summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers
parentAdding upstream version 6.7.12. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 6.8.9.upstream/6.8.9upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/accel/drm_accel.c1
-rw-r--r--drivers/accel/habanalabs/common/device.c8
-rw-r--r--drivers/accel/habanalabs/common/firmware_if.c123
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h15
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c34
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c53
-rw-r--r--drivers/accel/habanalabs/common/hwmon.c4
-rw-r--r--drivers/accel/habanalabs/common/memory.c7
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c39
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c70
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h13
-rw-r--r--drivers/accel/ivpu/Kconfig11
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c73
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c193
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h23
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c78
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h1
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c662
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h76
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h26
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c124
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx_reg.h2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c104
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c265
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h33
-rw-r--r--drivers/accel/ivpu/ivpu_job.c255
-rw-r--r--drivers/accel/ivpu/ivpu_job.h7
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c38
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.h1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c99
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c162
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h11
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c154
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h9
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h90
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h309
-rw-r--r--drivers/accel/qaic/Makefile3
-rw-r--r--drivers/accel/qaic/mhi_controller.c44
-rw-r--r--drivers/accel/qaic/mhi_controller.h2
-rw-r--r--drivers/accel/qaic/qaic.h21
-rw-r--r--drivers/accel/qaic/qaic_control.c7
-rw-r--r--drivers/accel/qaic/qaic_data.c147
-rw-r--r--drivers/accel/qaic/qaic_drv.c98
-rw-r--r--drivers/accel/qaic/qaic_timesync.c395
-rw-r--r--drivers/accel/qaic/qaic_timesync.h11
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_lpss.c48
-rw-r--r--drivers/acpi/acpi_processor.c18
-rw-r--r--drivers/acpi/acpi_video.c56
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c8
-rw-r--r--drivers/acpi/apei/einj.c71
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/thermal_cpufreq.c22
-rw-r--r--drivers/acpi/bus.c32
-rw-r--r--drivers/acpi/button.c10
-rw-r--r--drivers/acpi/cppc_acpi.c176
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/internal.h28
-rw-r--r--drivers/acpi/mipi-disco-img.c725
-rw-r--r--drivers/acpi/nfit/core.c65
-rw-r--r--drivers/acpi/numa/hmat.c214
-rw-r--r--drivers/acpi/numa/srat.c6
-rw-r--r--drivers/acpi/osl.c77
-rw-r--r--drivers/acpi/processor_thermal.c49
-rw-r--r--drivers/acpi/property.c102
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/acpi/scan.c98
-rw-r--r--drivers/acpi/sleep.c12
-rw-r--r--drivers/acpi/tables.c5
-rw-r--r--drivers/acpi/thermal.c80
-rw-r--r--drivers/acpi/thermal_lib.c (renamed from drivers/thermal/thermal_acpi.c)80
-rw-r--r--drivers/acpi/utils.c164
-rw-r--r--drivers/acpi/x86/utils.c38
-rw-r--r--drivers/android/binder.c31
-rw-r--r--drivers/android/binder_alloc.c841
-rw-r--r--drivers/android/binder_alloc.h61
-rw-r--r--drivers/android/binder_alloc_selftest.c18
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/android/binderfs.c1
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-sata.c2
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/ata/pata_pxa.c7
-rw-r--r--drivers/ata/sata_mv.c63
-rw-r--r--drivers/ata/sata_sx4.c6
-rw-r--r--drivers/atm/atmtcp.c1
-rw-r--r--drivers/atm/eni.c1
-rw-r--r--drivers/atm/idt77105.c1
-rw-r--r--drivers/atm/iphase.c1
-rw-r--r--drivers/atm/nicstar.c1
-rw-r--r--drivers/atm/suni.c1
-rw-r--r--drivers/auxdisplay/Kconfig10
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c10
-rw-r--r--drivers/auxdisplay/ht16k33.c10
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c12
-rw-r--r--drivers/base/arch_topology.c107
-rw-r--r--drivers/base/auxiliary.c2
-rw-r--r--drivers/base/bus.c8
-rw-r--r--drivers/base/container.c2
-rw-r--r--drivers/base/core.c50
-rw-r--r--drivers/base/cpu.c39
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c1
-rw-r--r--drivers/base/init.c2
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/node.c20
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c2
-rw-r--r--drivers/base/power/main.c117
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/runtime.c1
-rw-r--r--drivers/base/property.c66
-rw-r--r--drivers/base/regmap/internal.h1
-rw-r--r--drivers/base/regmap/regcache-maple.c6
-rw-r--r--drivers/base/regmap/regmap-debugfs.c8
-rw-r--r--drivers/base/regmap/regmap-kunit.c61
-rw-r--r--drivers/base/regmap/regmap-ram.c4
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c31
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/base/soc.c6
-rw-r--r--drivers/base/swnode.c8
-rw-r--r--drivers/bcma/driver_pci_host.c2
-rw-r--r--drivers/block/aoe/aoeblk.c3
-rw-r--r--drivers/block/drbd/drbd_actlog.c16
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c7
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/null_blk/main.c5
-rw-r--r--drivers/block/null_blk/zoned.c2
-rw-r--r--drivers/block/rbd.c10
-rw-r--r--drivers/block/rnbd/rnbd-clt.c13
-rw-r--r--drivers/block/rnbd/rnbd-proto.h14
-rw-r--r--drivers/block/rnbd/rnbd-srv.c25
-rw-r--r--drivers/block/ublk_drv.c13
-rw-r--r--drivers/block/virtio_blk.c80
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/zram/Kconfig15
-rw-r--r--drivers/block/zram/zram_drv.c58
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/btintel.c7
-rw-r--r--drivers/bluetooth/btintel.h4
-rw-r--r--drivers/bluetooth/btmtk.c8
-rw-r--r--drivers/bluetooth/btmtk.h1
-rw-r--r--drivers/bluetooth/btmtkuart.c4
-rw-r--r--drivers/bluetooth/btnxpuart.c5
-rw-r--r--drivers/bluetooth/btqca.c8
-rw-r--r--drivers/bluetooth/btusb.c24
-rw-r--r--drivers/bluetooth/hci_qca.c68
-rw-r--r--drivers/bluetooth/hci_serdev.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c16
-rw-r--r--drivers/bus/hisi_lpc.c6
-rw-r--r--drivers/bus/imx-weim.c9
-rw-r--r--drivers/bus/mhi/ep/internal.h4
-rw-r--r--drivers/bus/mhi/ep/main.c262
-rw-r--r--drivers/bus/mhi/ep/ring.c27
-rw-r--r--drivers/bus/mhi/host/init.c2
-rw-r--r--drivers/bus/mhi/host/internal.h11
-rw-r--r--drivers/bus/mhi/host/main.c5
-rw-r--r--drivers/bus/mhi/host/pci_generic.c22
-rw-r--r--drivers/bus/mhi/host/pm.c44
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/bus/omap-ocp2scp.c6
-rw-r--r--drivers/bus/omap_l3_smx.c6
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c6
-rw-r--r--drivers/bus/simple-pm-bus.c7
-rw-r--r--drivers/bus/sun50i-de2.c5
-rw-r--r--drivers/bus/sunxi-rsb.c6
-rw-r--r--drivers/bus/tegra-aconnect.c6
-rw-r--r--drivers/bus/tegra-gmi.c6
-rw-r--r--drivers/bus/ti-pwmss.c5
-rw-r--r--drivers/bus/ti-sysc.c6
-rw-r--r--drivers/bus/ts-nbus.c6
-rw-r--r--drivers/cache/Kconfig6
-rw-r--r--drivers/cache/Makefile3
-rw-r--r--drivers/cache/sifive_ccache.c (renamed from drivers/soc/sifive/sifive_ccache.c)62
-rw-r--r--drivers/cdx/cdx.c157
-rw-r--r--drivers/char/agp/Makefile6
-rw-r--r--drivers/char/agp/agp.h9
-rw-r--r--drivers/char/agp/backend.c11
-rw-r--r--drivers/char/agp/compat_ioctl.c291
-rw-r--r--drivers/char/agp/compat_ioctl.h106
-rw-r--r--drivers/char/agp/frontend.c1068
-rw-r--r--drivers/char/hw_random/atmel-rng.c6
-rw-r--r--drivers/char/hw_random/cctrng.c6
-rw-r--r--drivers/char/hw_random/exynos-trng.c6
-rw-r--r--drivers/char/hw_random/ingenic-rng.c8
-rw-r--r--drivers/char/hw_random/jh7110-trng.c8
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c6
-rw-r--r--drivers/char/hw_random/mxc-rnga.c6
-rw-r--r--drivers/char/hw_random/n2-drv.c6
-rw-r--r--drivers/char/hw_random/npcm-rng.c6
-rw-r--r--drivers/char/hw_random/omap-rng.c6
-rw-r--r--drivers/char/hw_random/stm32-rng.c6
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c6
-rw-r--r--drivers/char/hw_random/virtio-rng.c14
-rw-r--r--drivers/char/hw_random/xgene-rng.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c12
-rw-r--r--drivers/char/ppdev.c6
-rw-r--r--drivers/char/random.c16
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c15
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c3
-rw-r--r--drivers/char/ttyprintk.c6
-rw-r--r--drivers/char/virtio_console.c58
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/clk-renesas-pcie.c37
-rw-r--r--drivers/clk/clk-si5351.c47
-rw-r--r--drivers/clk/clk-versaclock3.c88
-rw-r--r--drivers/clk/clk.c161
-rw-r--r--drivers/clk/imx/clk-pll14xx.c23
-rw-r--r--drivers/clk/mediatek/Kconfig9
-rw-r--r--drivers/clk/mediatek/Makefile5
-rw-r--r--drivers/clk/mediatek/clk-mt7988-apmixed.c114
-rw-r--r--drivers/clk/mediatek/clk-mt7988-eth.c150
-rw-r--r--drivers/clk/mediatek/clk-mt7988-infracfg.c275
-rw-r--r--drivers/clk/mediatek/clk-mt7988-topckgen.c325
-rw-r--r--drivers/clk/mediatek/clk-mt7988-xfipll.c82
-rw-r--r--drivers/clk/mediatek/clk-mt8188-topckgen.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c27
-rw-r--r--drivers/clk/mediatek/clk-mtk.c15
-rw-r--r--drivers/clk/mediatek/clk-mux.c14
-rw-r--r--drivers/clk/mediatek/clk-mux.h43
-rw-r--r--drivers/clk/mediatek/clk-pll.c5
-rw-r--r--drivers/clk/mediatek/clk-pll.h1
-rw-r--r--drivers/clk/meson/g12a.c115
-rw-r--r--drivers/clk/meson/g12a.h1
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c2
-rw-r--r--drivers/clk/qcom/Kconfig63
-rw-r--r--drivers/clk/qcom/Makefile7
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c21
-rw-r--r--drivers/clk/qcom/camcc-sc8280xp.c3066
-rw-r--r--drivers/clk/qcom/clk-branch.c38
-rw-r--r--drivers/clk/qcom/clk-branch.h21
-rw-r--r--drivers/clk/qcom/clk-rpmh.c58
-rw-r--r--drivers/clk/qcom/dispcc-sm8650.c1818
-rw-r--r--drivers/clk/qcom/ecpricc-qdu1000.c2456
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c110
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c3849
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c6808
-rw-r--r--drivers/clk/qcom/gpucc-sm8650.c663
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8650.c182
-rw-r--r--drivers/clk/qcom/videocc-sm8150.c24
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c13
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c3
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-cpu.h30
-rw-r--r--drivers/clk/samsung/clk-gs101.c2518
-rw-r--r--drivers/clk/samsung/clk-pll.c6
-rw-r--r--drivers/clk/samsung/clk-pll.h3
-rw-r--r--drivers/clk/samsung/clk.h157
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100-audio.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100.c32
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-aon.c6
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c26
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.h4
-rw-r--r--drivers/clk/stm32/Kconfig29
-rw-r--r--drivers/clk/stm32/Makefile1
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c5
-rw-r--r--drivers/clk/stm32/clk-stm32-core.h5
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c (renamed from drivers/clk/clk-stm32mp1.c)127
-rw-r--r--drivers/clk/stm32/clk-stm32mp13.c9
-rw-r--r--drivers/clk/stm32/reset-stm32.c14
-rw-r--r--drivers/clk/stm32/reset-stm32.h8
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c5
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c628
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c22
-rw-r--r--drivers/clocksource/timer-riscv.c7
-rw-r--r--drivers/comedi/comedi_fops.c4
-rw-r--r--drivers/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/connector/connector.c5
-rw-r--r--drivers/counter/counter-core.c7
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c139
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c36
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c4
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c5
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c25
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c14
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c40
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c4
-rw-r--r--drivers/crypto/aspeed/Kconfig4
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c230
-rw-r--r--drivers/crypto/atmel-aes.c214
-rw-r--r--drivers/crypto/atmel-tdes.c205
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c12
-rw-r--r--drivers/crypto/bcm/cipher.c57
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c24
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c19
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c18
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccree/cc_aead.c10
-rw-r--r--drivers/crypto/ccree/cc_cipher.c45
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c4
-rw-r--r--drivers/crypto/hifn_795x.c126
-rw-r--r--drivers/crypto/hisilicon/debugfs.c54
-rw-r--r--drivers/crypto/hisilicon/qm.c166
-rw-r--r--drivers/crypto/hisilicon/qm_common.h4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c33
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c2
-rw-r--r--drivers/crypto/hisilicon/sgl.c18
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c54
-rw-r--r--drivers/crypto/inside-secure/safexcel.c4
-rw-r--r--drivers/crypto/inside-secure/safexcel.h4
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c152
-rw-r--r--drivers/crypto/intel/Kconfig1
-rw-r--r--drivers/crypto/intel/Makefile1
-rw-r--r--drivers/crypto/intel/iaa/Kconfig19
-rw-r--r--drivers/crypto/intel/iaa/Makefile12
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto.h173
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c92
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2197
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.c312
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.h53
-rw-r--r--drivers/crypto/intel/qat/Kconfig11
-rw-r--r--drivers/crypto/intel/qat/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c536
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h55
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c202
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c313
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h52
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c277
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h15
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.h4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c287
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c241
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h87
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c153
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h158
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c288
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h99
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c710
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h117
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c1
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c23
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c86
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h27
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h54
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c44
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h9
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c26
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h298
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c133
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h105
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c74
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c82
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c49
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c31
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c26
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c28
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c162
-rw-r--r--drivers/crypto/n2_core.c36
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c4
-rw-r--r--drivers/crypto/sahara.c589
-rw-r--r--drivers/crypto/starfive/Kconfig2
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c77
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c8
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h12
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c58
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c2
-rw-r--r--drivers/cxl/Kconfig3
-rw-r--r--drivers/cxl/acpi.c157
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/cdat.c517
-rw-r--r--drivers/cxl/core/core.h2
-rw-r--r--drivers/cxl/core/mbox.c122
-rw-r--r--drivers/cxl/core/memdev.c65
-rw-r--r--drivers/cxl/core/pci.c36
-rw-r--r--drivers/cxl/core/pmem.c8
-rw-r--r--drivers/cxl/core/port.c167
-rw-r--r--drivers/cxl/core/region.c13
-rw-r--r--drivers/cxl/core/regs.c5
-rw-r--r--drivers/cxl/core/trace.h14
-rw-r--r--drivers/cxl/cxl.h53
-rw-r--r--drivers/cxl/cxlmem.h130
-rw-r--r--drivers/cxl/cxlpci.h13
-rw-r--r--drivers/cxl/mem.c11
-rw-r--r--drivers/cxl/pci.c29
-rw-r--r--drivers/cxl/port.c8
-rw-r--r--drivers/dax/bus.c3
-rw-r--r--drivers/dax/bus.h1
-rw-r--r--drivers/dax/cxl.c1
-rw-r--r--drivers/dax/dax-private.h1
-rw-r--r--drivers/dax/hmem/hmem.c1
-rw-r--r--drivers/dax/kmem.c8
-rw-r--r--drivers/dax/pmem.c1
-rw-r--r--drivers/dax/super.c3
-rw-r--r--drivers/devfreq/devfreq.c37
-rw-r--r--drivers/dma-buf/dma-buf.c4
-rw-r--r--drivers/dma-buf/dma-fence.c3
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c7
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c6
-rw-r--r--drivers/dma-buf/sw_sync.c82
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/sync_file.c19
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_hdmac.c32
-rw-r--r--drivers/dma/dma-axi-dmac.c280
-rw-r--r--drivers/dma/dmatest.c17
-rw-r--r--drivers/dma/fsl-qdma.c26
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/bus.c6
-rw-r--r--drivers/dma/idxd/cdev.c15
-rw-r--r--drivers/dma/idxd/debugfs.c4
-rw-r--r--drivers/dma/idxd/defaults.c53
-rw-r--r--drivers/dma/idxd/device.c21
-rw-r--r--drivers/dma/idxd/dma.c9
-rw-r--r--drivers/dma/idxd/idxd.h85
-rw-r--r--drivers/dma/idxd/init.c9
-rw-r--r--drivers/dma/idxd/irq.c16
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/idxd/submit.c9
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ls2x-apb-dma.c705
-rw-r--r--drivers/dma/milbeaut-hdmac.c17
-rw-r--r--drivers/dma/milbeaut-xdmac.c17
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c44
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h8
-rw-r--r--drivers/dma/sh/rz-dmac.c8
-rw-r--r--drivers/dma/sh/usb-dmac.c10
-rw-r--r--drivers/dma/ste_dma40.c12
-rw-r--r--drivers/dma/tegra186-gpc-dma.c11
-rw-r--r--drivers/dma/tegra210-adma.c35
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/k3-psil-am62p.c325
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c2
-rw-r--r--drivers/dma/ti/k3-udma.c2
-rw-r--r--drivers/dma/uniphier-mdmac.c17
-rw-r--r--drivers/dma/uniphier-xdmac.c17
-rw-r--r--drivers/dma/xilinx/xdma-regs.h33
-rw-r--r--drivers/dma/xilinx/xdma.c361
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c15
-rw-r--r--drivers/dpll/Kconfig2
-rw-r--r--drivers/dpll/dpll_core.c91
-rw-r--r--drivers/dpll/dpll_core.h2
-rw-r--r--drivers/dpll/dpll_netlink.c78
-rw-r--r--drivers/edac/altera_edac.c21
-rw-r--r--drivers/edac/amd64_edac.c66
-rw-r--r--drivers/edac/amd64_edac.h1
-rw-r--r--drivers/edac/armada_xp_edac.c16
-rw-r--r--drivers/edac/aspeed_edac.c6
-rw-r--r--drivers/edac/bluefield_edac.c6
-rw-r--r--drivers/edac/cell_edac.c5
-rw-r--r--drivers/edac/cpc925_edac.c6
-rw-r--r--drivers/edac/dmc520_edac.c6
-rw-r--r--drivers/edac/edac_device.h2
-rw-r--r--drivers/edac/edac_device_sysfs.c2
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/edac/edac_module.c4
-rw-r--r--drivers/edac/edac_pci_sysfs.c6
-rw-r--r--drivers/edac/fsl_ddr_edac.c3
-rw-r--r--drivers/edac/fsl_ddr_edac.h2
-rw-r--r--drivers/edac/highbank_l2_edac.c5
-rw-r--r--drivers/edac/highbank_mc_edac.c5
-rw-r--r--drivers/edac/i7core_edac.c4
-rw-r--r--drivers/edac/igen6_edac.c194
-rw-r--r--drivers/edac/layerscape_edac.c2
-rw-r--r--drivers/edac/mce_amd.c526
-rw-r--r--drivers/edac/mpc85xx_edac.c13
-rw-r--r--drivers/edac/npcm_edac.c6
-rw-r--r--drivers/edac/octeon_edac-l2c.c6
-rw-r--r--drivers/edac/octeon_edac-lmc.c5
-rw-r--r--drivers/edac/octeon_edac-pc.c5
-rw-r--r--drivers/edac/octeon_edac-pci.c6
-rw-r--r--drivers/edac/pnd2_edac.c55
-rw-r--r--drivers/edac/ppc4xx_edac.c7
-rw-r--r--drivers/edac/qcom_edac.c6
-rw-r--r--drivers/edac/sb_edac.c10
-rw-r--r--drivers/edac/skx_common.c4
-rw-r--r--drivers/edac/synopsys_edac.c6
-rw-r--r--drivers/edac/ti_edac.c6
-rw-r--r--drivers/edac/versal_edac.c4
-rw-r--r--drivers/edac/xgene_edac.c6
-rw-r--r--drivers/edac/zynqmp_edac.c6
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/extcon/extcon-usbc-tusb320.c9
-rw-r--r--drivers/firewire/.kunitconfig1
-rw-r--r--drivers/firewire/Kconfig16
-rw-r--r--drivers/firewire/core-device.c138
-rw-r--r--drivers/firewire/device-attribute-test.c251
-rw-r--r--drivers/firmware/Kconfig10
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/arm_ffa/driver.c75
-rw-r--r--drivers/firmware/arm_scmi/Kconfig25
-rw-r--r--drivers/firmware/arm_scmi/base.c6
-rw-r--r--drivers/firmware/arm_scmi/clock.c8
-rw-r--r--drivers/firmware/arm_scmi/driver.c30
-rw-r--r--drivers/firmware/arm_scmi/optee.c4
-rw-r--r--drivers/firmware/arm_scmi/perf.c48
-rw-r--r--drivers/firmware/arm_scmi/power.c8
-rw-r--r--drivers/firmware/arm_scmi/powercap.c8
-rw-r--r--drivers/firmware/arm_scmi/protocols.h11
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c7
-rw-r--r--drivers/firmware/arm_scmi/reset.c9
-rw-r--r--drivers/firmware/arm_scmi/sensors.c8
-rw-r--r--drivers/firmware/arm_scmi/shmem.c2
-rw-r--r--drivers/firmware/arm_scmi/system.c6
-rw-r--r--drivers/firmware/arm_scmi/voltage.c8
-rw-r--r--drivers/firmware/arm_scpi.c6
-rw-r--r--drivers/firmware/efi/Kconfig15
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/dev-path-parser.c7
-rw-r--r--drivers/firmware/efi/efi.c18
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot4
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c1
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c8
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/loongarch-stub.c9
-rw-r--r--drivers/firmware/efi/libstub/loongarch-stub.h4
-rw-r--r--drivers/firmware/efi/libstub/loongarch.c6
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c4
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c14
-rw-r--r--drivers/firmware/efi/memmap.c8
-rw-r--r--drivers/firmware/efi/stmm/mm_communication.h236
-rw-r--r--drivers/firmware/efi/stmm/tee_stmm_efi.c616
-rw-r--r--drivers/firmware/efi/vars.c8
-rw-r--r--drivers/firmware/google/coreboot_table.c5
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c3
-rw-r--r--drivers/firmware/imx/imx-dsp.c6
-rw-r--r--drivers/firmware/meson/meson_sm.c19
-rw-r--r--drivers/firmware/microchip/Kconfig12
-rw-r--r--drivers/firmware/microchip/Makefile3
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c495
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c6
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c137
-rw-r--r--drivers/firmware/qcom/qcom_scm.c37
-rw-r--r--drivers/firmware/qemu_fw_cfg.c5
-rw-r--r--drivers/firmware/raspberrypi.c6
-rw-r--r--drivers/firmware/stratix10-rsu.c5
-rw-r--r--drivers/firmware/stratix10-svc.c6
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c2
-rw-r--r--drivers/firmware/turris-mox-rwtm.c6
-rw-r--r--drivers/firmware/xilinx/zynqmp.c396
-rw-r--r--drivers/fpga/altera-fpga2sdram.c6
-rw-r--r--drivers/fpga/altera-freeze-bridge.c6
-rw-r--r--drivers/fpga/altera-hps2fpga.c6
-rw-r--r--drivers/fpga/dfl-afu-main.c6
-rw-r--r--drivers/fpga/dfl-fme-br.c6
-rw-r--r--drivers/fpga/dfl-fme-main.c6
-rw-r--r--drivers/fpga/dfl-fme-region.c6
-rw-r--r--drivers/fpga/dfl.c4
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c6
-rw-r--r--drivers/fpga/of-fpga-region.c6
-rw-r--r--drivers/fpga/socfpga-a10.c6
-rw-r--r--drivers/fpga/stratix10-soc.c6
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c6
-rw-r--r--drivers/fpga/zynq-fpga.c6
-rw-r--r--drivers/gnss/serial.c4
-rw-r--r--drivers/gnss/sirf.c4
-rw-r--r--drivers/gnss/ubx.c31
-rw-r--r--drivers/gpio/Kconfig20
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-dwapb.c13
-rw-r--r--drivers/gpio/gpio-elkhartlake.c14
-rw-r--r--drivers/gpio/gpio-en7523.c6
-rw-r--r--drivers/gpio/gpio-ixp4xx.c51
-rw-r--r--drivers/gpio/gpio-max730x.c2
-rw-r--r--drivers/gpio/gpio-mmio.c49
-rw-r--r--drivers/gpio/gpio-mockup.c3
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c619
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c19
-rw-r--r--drivers/gpio/gpio-rtd.c607
-rw-r--r--drivers/gpio/gpio-sifive.c1
-rw-r--r--drivers/gpio/gpio-sim.c24
-rw-r--r--drivers/gpio/gpio-stmpe.c6
-rw-r--r--drivers/gpio/gpio-tangier.c72
-rw-r--r--drivers/gpio/gpio-tangier.h4
-rw-r--r--drivers/gpio/gpio-tegra186.c20
-rw-r--r--drivers/gpio/gpio-tps65219.c18
-rw-r--r--drivers/gpio/gpio-wm831x.c14
-rw-r--r--drivers/gpio/gpio-wm8994.c13
-rw-r--r--drivers/gpio/gpio-xilinx.c1
-rw-r--r--drivers/gpio/gpiolib-cdev.c729
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpio/gpiolib-sysfs.c65
-rw-r--r--drivers/gpio/gpiolib-sysfs.h4
-rw-r--r--drivers/gpio/gpiolib.c184
-rw-r--r--drivers/gpio/gpiolib.h5
-rw-r--r--drivers/gpu/drm/Kconfig38
-rw-r--r--drivers/gpu/drm/Makefile15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c220
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c247
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c249
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c414
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h676
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c181
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c138
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c191
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/display/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c475
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h116
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c829
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c72
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c82
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c56
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c55
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c232
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c214
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c81
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c461
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c185
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c476
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c880
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane_priv.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream_priv.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/Makefile30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/Makefile30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h (renamed from drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/Makefile108
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile199
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c)30
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c)24
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c)14
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c)138
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h)26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c)27
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c)48
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h212
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c6
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_msg_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c10
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h10
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h10
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h28
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c35
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h6
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h4
-rw-r--r--drivers/gpu/drm/amd/include/amdgpu_reg_state.h153
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h102
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h184
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h18
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c35
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c75
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h13
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c235
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h52
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c129
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c84
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c100
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c29
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c5
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c10
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c263
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h101
-rw-r--r--drivers/gpu/drm/ast/ast_i2c.c1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c244
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c26
-rw-r--r--drivers/gpu/drm/ast/ast_post.c73
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h12
-rw-r--r--drivers/gpu/drm/bridge/Kconfig17
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c141
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c203
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c22
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c58
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c1
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c20
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c6
-rw-r--r--drivers/gpu/drm/ci/arm64.config1
-rw-r--r--drivers/gpu/drm/ci/build.sh19
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml16
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh10
-rw-r--r--drivers/gpu/drm/ci/test.yml14
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt46
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c169
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c223
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c451
-rw-r--r--drivers/gpu/drm/drm_atomic.c10
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c20
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c15
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c149
-rw-r--r--drivers/gpu/drm/drm_auth.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c44
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c1627
-rw-r--r--drivers/gpu/drm/drm_client.c12
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c6
-rw-r--r--drivers/gpu/drm/drm_context.c513
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c65
-rw-r--r--drivers/gpu/drm/drm_dma.c178
-rw-r--r--drivers/gpu/drm/drm_drv.c17
-rw-r--r--drivers/gpu/drm/drm_edid.c43
-rw-r--r--drivers/gpu/drm/drm_edid_load.c16
-rw-r--r--drivers/gpu/drm/drm_eld.c55
-rw-r--r--drivers/gpu/drm/drm_encoder.c4
-rw-r--r--drivers/gpu/drm/drm_exec.c13
-rw-r--r--drivers/gpu/drm/drm_file.c66
-rw-r--r--drivers/gpu/drm/drm_flip_work.c27
-rw-r--r--drivers/gpu/drm/drm_format_helper.c215
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c80
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c1168
-rw-r--r--drivers/gpu/drm/drm_hashtab.c203
-rw-r--r--drivers/gpu/drm/drm_internal.h23
-rw-r--r--drivers/gpu/drm/drm_ioc32.c613
-rw-r--r--drivers/gpu/drm/drm_ioctl.c96
-rw-r--r--drivers/gpu/drm/drm_irq.c204
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c32
-rw-r--r--drivers/gpu/drm/drm_legacy.h290
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c105
-rw-r--r--drivers/gpu/drm/drm_lock.c373
-rw-r--r--drivers/gpu/drm/drm_memory.c138
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c19
-rw-r--r--drivers/gpu/drm/drm_mode_object.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c19
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/drm_pci.c204
-rw-r--r--drivers/gpu/drm/drm_plane.c137
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c32
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c29
-rw-r--r--drivers/gpu/drm/drm_property.c59
-rw-r--r--drivers/gpu/drm/drm_scatter.c220
-rw-r--r--drivers/gpu/drm/drm_syncobj.c64
-rw-r--r--drivers/gpu/drm/drm_vblank.c101
-rw-r--r--drivers/gpu/drm/drm_vm.c665
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c80
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c30
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c8
-rw-r--r--drivers/gpu/drm/i915/Kconfig4
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug18
-rw-r--r--drivers/gpu/drm/i915/Makefile192
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c44
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c66
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c12
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c251
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c589
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c237
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c176
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c217
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h61
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h45
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c514
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c662
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c171
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_buffer.c82
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_buffer.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c368
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c168
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c112
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c471
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c109
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c5
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c100
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c96
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c65
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h79
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c115
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c112
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c108
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h20
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c199
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h34
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c89
-rw-r--r--drivers/gpu/drm/i915/i915_params.h22
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h9
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c77
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c79
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c50
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c19
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c243
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h13
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c35
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h73
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_syncmap.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c27
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c29
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h9
-rw-r--r--drivers/gpu/drm/imagination/Kconfig18
-rw-r--r--drivers/gpu/drm/imagination/Makefile35
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.c645
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.h71
-rw-r--r--drivers/gpu/drm/imagination/pvr_cccb.c267
-rw-r--r--drivers/gpu/drm/imagination/pvr_cccb.h110
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.c464
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.h205
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.c53
-rw-r--r--drivers/gpu/drm/imagination/pvr_debugfs.h29
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c658
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h725
-rw-r--r--drivers/gpu/drm/imagination/pvr_device_info.c255
-rw-r--r--drivers/gpu/drm/imagination/pvr_device_info.h186
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c1501
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.h129
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.c625
-rw-r--r--drivers/gpu/drm/imagination/pvr_free_list.h195
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c1489
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.h509
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_info.h135
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c555
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.h14
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.c252
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_mips.h48
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.c306
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_startstop.h13
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c471
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.h78
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c414
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.h170
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.c550
-rw-r--r--drivers/gpu/drm/imagination/pvr_hwrt.h166
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c786
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.h161
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.c2640
-rw-r--r--drivers/gpu/drm/imagination/pvr_mmu.h108
-rw-r--r--drivers/gpu/drm/imagination/pvr_params.c147
-rw-r--r--drivers/gpu/drm/imagination/pvr_params.h72
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c433
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h41
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c1432
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h169
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h6193
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h159
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_defs.h179
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif.h2188
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h493
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h373
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h133
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h60
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h113
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h28
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h1648
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h258
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h108
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h78
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_heap_config.h113
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_meta.h356
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_mips.h335
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_mips_check.h58
-rw-r--r--drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h136
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.c285
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream.h75
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream_defs.c351
-rw-r--r--drivers/gpu/drm/imagination/pvr_stream_defs.h16
-rw-r--r--drivers/gpu/drm/imagination/pvr_sync.c289
-rw-r--r--drivers/gpu/drm/imagination/pvr_sync.h84
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c1090
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h66
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c237
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.h22
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c6
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c5
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c1
-rw-r--r--drivers/gpu/drm/lima/lima_device.c2
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c4
-rw-r--r--drivers/gpu/drm/loongson/Kconfig1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_i2c.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c1
-rw-r--r--drivers/gpu/drm/mediatek/Makefile3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c253
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c16
-rw-r--r--drivers/gpu/drm/mediatek/mtk_padding.c160
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c110
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h457
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h104
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h33
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c180
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c130
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c262
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h69
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c247
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h142
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c33
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c105
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c33
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c32
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c37
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c87
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c24
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c30
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c21
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c29
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c25
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c19
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c39
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c69
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h23
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c349
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c30
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c32
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.h11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c27
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c1
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c41
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c94
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h15
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h17
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c235
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c44
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c4
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c105
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h1
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c5
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c239
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c386
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c14
-rw-r--r--drivers/gpu/drm/panel/Kconfig18
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c8
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c87
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c35
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c180
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c405
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c225
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c55
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c515
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c116
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c136
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c362
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c81
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h23
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c55
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c30
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c45
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h7
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c50
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h8
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c1
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c47
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h18
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c505
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h100
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c227
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c9
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c494
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c40
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h1
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c6
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c5
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c1
-rw-r--r--drivers/gpu/drm/tests/Makefile5
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c693
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c160
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c16
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c72
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c383
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c78
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c1913
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c79
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h3
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c16
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c54
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c9
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c6
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c3
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c10
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c17
-rw-r--r--drivers/gpu/drm/tiny/repaper.c10
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c44
-rw-r--r--drivers/gpu/drm/tiny/st7586.c19
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c2
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c25
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c56
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c19
-rw-r--r--drivers/gpu/drm/v3d/Makefile4
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c51
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c178
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c50
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h165
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c779
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c89
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h94
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c397
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c1341
-rw-r--r--drivers/gpu/drm/v3d/v3d_sysfs.c69
-rw-r--r--drivers/gpu/drm/v3d/v3d_trace.h57
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c4
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c44
-rw-r--r--drivers/gpu/drm/xe/.gitignore4
-rw-r--r--drivers/gpu/drm/xe/.kunitconfig13
-rw-r--r--drivers/gpu/drm/xe/Kconfig96
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug107
-rw-r--r--drivers/gpu/drm/xe/Kconfig.profile54
-rw-r--r--drivers/gpu/drm/xe/Makefile305
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_command_header_abi.h46
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_mkhi_commands_abi.h39
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h59
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h219
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h249
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h127
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_mmio_abi.h49
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h37
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h322
-rw-r--r--drivers/gpu/drm/xe/abi/guc_messages_abi.h234
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h64
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_active.h22
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_active_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_config.h19
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h233
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h79
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_irq.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_reg_defs.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h44
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h34
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h74
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_clock_gating.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_gt_types.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_mchbar_regs.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pci_config.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h16
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_step.h20
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h175
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h8
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h28
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_dram.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_gmch.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h132
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h6
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c77
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c26
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c78
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.h24
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c104
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.h21
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_misc.c16
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c71
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c384
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c34
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c291
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h160
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gsc_commands.h36
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_instr_defs.h33
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h61
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h184
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gpu_commands.h70
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h41
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h478
-rw-r--r--drivers/gpu/drm/xe/regs/xe_guc_regs.h143
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h17
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h44
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h120
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h68
-rw-r--r--drivers/gpu/drm/xe/regs/xe_sriov_regs.h17
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c352
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c26
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.h14
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c278
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c25
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_lmtt_test.c73
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c444
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c25
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c130
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c25
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c166
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c71
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h36
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c319
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test.h67
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c167
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h174
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c110
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h25
-rw-r--r--drivers/gpu/drm/xe/xe_bb_types.h20
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c2235
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h356
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h179
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c228
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h15
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h77
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c148
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c196
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h20
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h55
-rw-r--r--drivers/gpu/drm/xe/xe_device.c661
-rw-r--r--drivers/gpu/drm/xe/xe_device.h169
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c89
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h537
-rw-r--r--drivers/gpu/drm/xe/xe_display.c411
-rw-r--r--drivers/gpu/drm/xe/xe_display.h72
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c322
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.h15
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c196
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.h70
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h23
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c339
-rw-r--r--drivers/gpu/drm/xe/xe_exec.h14
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c862
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h69
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h211
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c472
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.h21
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h49
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c199
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.h38
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h86
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c165
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c428
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h33
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c101
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h73
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler_types.h57
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c438
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h20
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.c184
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_submit.h30
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c783
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h72
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c186
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.h24
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c85
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.h15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c249
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c222
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.h13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c192
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h17
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle_types.h38
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c685
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h29
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c651
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h46
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs.c61
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sysfs_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c251
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c418
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h26
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c169
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h363
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c916
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h72
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c672
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.h17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h25
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c1320
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h59
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h115
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c74
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h54
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h361
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.c104
-rw-r--r--drivers/gpu/drm/xe/xe_guc_hwconfig.h17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c97
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h48
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h23
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c1002
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h31
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c1987
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h38
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit_types.h155
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h81
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c234
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h35
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c300
-rw-r--r--drivers/gpu/drm/xe/xe_huc.h26
-rw-r--r--drivers/gpu/drm/xe/xe_huc_debugfs.c70
-rw-r--r--drivers/gpu/drm/xe/xe_huc_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/xe_huc_types.h24
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c883
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h70
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c675
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h36
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h225
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c230
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.h30
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence_types.h72
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c776
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.h19
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c666
-rw-r--r--drivers/gpu/drm/xe/xe_irq.h19
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c506
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.h27
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt_2l.c150
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt_ml.c161
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt_types.h63
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c1264
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h58
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h46
-rw-r--r--drivers/gpu/drm/xe/xe_macros.h18
-rw-r--r--drivers/gpu/drm/xe/xe_map.h93
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c1455
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h110
-rw-r--r--drivers/gpu/drm/xe/xe_migrate_doc.h88
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c524
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h107
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c580
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.h17
-rw-r--r--drivers/gpu/drm/xe/xe_module.c101
-rw-r--r--drivers/gpu/drm/xe/xe_module.h26
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c459
-rw-r--r--drivers/gpu/drm/xe/xe_pat.h61
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c951
-rw-r--r--drivers/gpu/drm/xe/xe_pci.h12
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h46
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c296
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h30
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h49
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h37
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c405
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h35
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c158
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.h61
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence_types.h32
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c1671
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h48
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h77
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c160
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h148
-rw-r--r--drivers/gpu/drm/xe/xe_query.c552
-rw-r--r--drivers/gpu/drm/xe/xe_query.h14
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.c161
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.h75
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c284
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.h28
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr_types.h37
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c146
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.h23
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h240
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c479
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.h17
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h22
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c325
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h430
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_helpers.h81
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h124
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c106
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h40
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c289
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h80
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h48
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c55
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h42
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_printk.h46
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_step.c264
-rw-r--r--drivers/gpu/drm/xe/xe_step.h23
-rw-r--r--drivers/gpu/drm/xe/xe_step_types.h50
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c380
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c186
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h18
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c57
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.h19
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs_types.h27
-rw-r--r--drivers/gpu/drm/xe/xe_trace.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h631
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c334
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h21
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c118
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.h13
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c480
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h44
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h52
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c121
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h16
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c258
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h24
-rw-r--r--drivers/gpu/drm/xe/xe_uc_debugfs.c26
-rw-r--r--drivers/gpu/drm/xe/xe_uc_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c882
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.h184
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_abi.h321
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h146
-rw-r--r--drivers/gpu/drm/xe/xe_uc_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c3286
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h275
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h555
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h373
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c895
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h32
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules24
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c179
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.h15
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm.c270
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm.h16
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm_types.h26
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c1
-rw-r--r--drivers/greybus/gb-beagleplay.c5
-rw-r--r--drivers/hid/Kconfig22
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h6
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c28
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c20
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c59
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h2
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c18
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h10
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-magicmouse.c3
-rw-r--r--drivers/hid/hid-mcp2200.c392
-rw-r--r--drivers/hid/hid-mcp2221.c72
-rw-r--r--drivers/hid/hid-nintendo.c897
-rw-r--r--drivers/hid/hid-picolcd_fb.c1
-rw-r--r--drivers/hid/hid-steam.c571
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c143
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c8
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c67
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c60
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c63
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c189
-rw-r--r--drivers/hid/uhid.c15
-rw-r--r--drivers/hid/wacom.h1
-rw-r--r--drivers/hid/wacom_sys.c8
-rw-r--r--drivers/hid/wacom_wac.c12
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c6
-rw-r--r--drivers/hv/channel.c176
-rw-r--r--drivers/hv/hv_common.c2
-rw-r--r--drivers/hv/hv_util.c31
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/Kconfig11
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c10
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c26
-rw-r--r--drivers/hwmon/corsair-cpro.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c604
-rw-r--r--drivers/hwmon/emc1403.c6
-rw-r--r--drivers/hwmon/gigabyte_waterforce.c430
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/lm75.c114
-rw-r--r--drivers/hwmon/ltc2991.c20
-rw-r--r--drivers/hwmon/max31827.c273
-rw-r--r--drivers/hwmon/max6650.c8
-rw-r--r--drivers/hwmon/nct6775-core.c34
-rw-r--r--drivers/hwmon/nct6775-i2c.c14
-rw-r--r--drivers/hwmon/nct6775-platform.c26
-rw-r--r--drivers/hwmon/nct6775.h2
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c37
-rw-r--r--drivers/hwmon/pc87360.c6
-rw-r--r--drivers/hwmon/peci/dimmtemp.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig28
-rw-r--r--drivers/hwmon/pmbus/Makefile3
-rw-r--r--drivers/hwmon/pmbus/lm25066.c14
-rw-r--r--drivers/hwmon/pmbus/ltc4286.c175
-rw-r--r--drivers/hwmon/pmbus/mp2856.c466
-rw-r--r--drivers/hwmon/pmbus/mp2975.c16
-rw-r--r--drivers/hwmon/pmbus/mp5990.c179
-rw-r--r--drivers/hwmon/pwm-fan.c8
-rw-r--r--drivers/hwmon/sht4x.c3
-rw-r--r--drivers/hwmon/smsc47m1.c67
-rw-r--r--drivers/hwmon/tmp513.c64
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c53
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c52
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c111
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c126
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c718
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h161
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.h2
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c76
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c19
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.h1
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-elektor.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c36
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c2
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-kempld.c3
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c49
-rw-r--r--drivers/i2c/busses/i2c-pasemi-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c51
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c21
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c4
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c342
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/busses/i2c-wmt.c123
-rw-r--r--drivers/i2c/busses/scx200_acb.c2
-rw-r--r--drivers/i2c/i2c-core-base.c25
-rw-r--r--drivers/i2c/i2c-smbus.c8
-rw-r--r--drivers/i2c/i2c-stub.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c4
-rw-r--r--drivers/i3c/master.c95
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c7
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c49
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h1
-rw-r--r--drivers/i3c/master/svc-i3c-master.c95
-rw-r--r--drivers/idle/intel_idle.c114
-rw-r--r--drivers/iio/accel/Kconfig7
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c2
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c2
-rw-r--r--drivers/iio/adc/Kconfig29
-rw-r--r--drivers/iio/adc/Makefile5
-rw-r--r--drivers/iio/adc/ad7091r-base.c113
-rw-r--r--drivers/iio/adc/ad7091r-base.h75
-rw-r--r--drivers/iio/adc/ad7091r5.c92
-rw-r--r--drivers/iio/adc/ad7091r8.c272
-rw-r--r--drivers/iio/adc/ad9467.c95
-rw-r--r--drivers/iio/adc/adi-axi-adc.c85
-rw-r--r--drivers/iio/adc/max34408.c276
-rw-r--r--drivers/iio/adc/mcp3911.c64
-rw-r--r--drivers/iio/amplifiers/hmc425a.c23
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c87
-rw-r--r--drivers/iio/chemical/Kconfig11
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/ags02ma.c165
-rw-r--r--drivers/iio/chemical/pms7003.c6
-rw-r--r--drivers/iio/chemical/scd30_serial.c6
-rw-r--r--drivers/iio/chemical/sps30_serial.c18
-rw-r--r--drivers/iio/dac/Kconfig10
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5791.c9
-rw-r--r--drivers/iio/dac/mcp4821.c236
-rw-r--r--drivers/iio/frequency/adf4377.c16
-rw-r--r--drivers/iio/frequency/admv1014.c31
-rw-r--r--drivers/iio/humidity/Kconfig12
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/hdc3020.c473
-rw-r--r--drivers/iio/imu/Kconfig1
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c18
-rw-r--r--drivers/iio/imu/bmi323/Kconfig33
-rw-r--r--drivers/iio/imu/bmi323/Makefile7
-rw-r--r--drivers/iio/imu/bmi323/bmi323.h209
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c2139
-rw-r--r--drivers/iio/imu/bmi323/bmi323_i2c.c121
-rw-r--r--drivers/iio/imu/bmi323/bmi323_spi.c92
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c6
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c5
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c9
-rw-r--r--drivers/iio/industrialio-buffer.c22
-rw-r--r--drivers/iio/industrialio-core.c66
-rw-r--r--drivers/iio/light/Kconfig37
-rw-r--r--drivers/iio/light/Makefile3
-rw-r--r--drivers/iio/light/isl76682.c345
-rw-r--r--drivers/iio/light/ltr390.c196
-rw-r--r--drivers/iio/light/ltrf216a.c10
-rw-r--r--drivers/iio/light/pa12203001.c2
-rw-r--r--drivers/iio/light/rohm-bu27008.c201
-rw-r--r--drivers/iio/light/veml6075.c474
-rw-r--r--drivers/iio/magnetometer/tmag5273.c10
-rw-r--r--drivers/iio/pressure/Kconfig22
-rw-r--r--drivers/iio/pressure/Makefile3
-rw-r--r--drivers/iio/pressure/bmp280-core.c42
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c8
-rw-r--r--drivers/iio/pressure/bmp280-spi.c60
-rw-r--r--drivers/iio/pressure/bmp280.h6
-rw-r--r--drivers/iio/pressure/dlhl60d.c7
-rw-r--r--drivers/iio/pressure/hsc030pa.c494
-rw-r--r--drivers/iio/pressure/hsc030pa.h74
-rw-r--r--drivers/iio/pressure/hsc030pa_i2c.c69
-rw-r--r--drivers/iio/pressure/hsc030pa_spi.c61
-rw-r--r--drivers/iio/proximity/irsd200.c1
-rw-r--r--drivers/iio/proximity/sx9324.c24
-rw-r--r--drivers/iio/resolver/ad2s1210.c162
-rw-r--r--drivers/iio/temperature/Kconfig22
-rw-r--r--drivers/iio/temperature/Makefile2
-rw-r--r--drivers/iio/temperature/mcp9600.c139
-rw-r--r--drivers/iio/temperature/mlx90635.c1097
-rw-r--r--drivers/infiniband/core/cm.c23
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c271
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c50
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c215
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h35
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h117
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h67
-rw-r--r--drivers/infiniband/hw/efa/efa.h12
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h33
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c11
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h12
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c7
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c71
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h41
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c90
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h4
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c4
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hns/Makefile3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c13
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_debugfs.c110
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_debugfs.h33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c67
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c48
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c28
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c6
-rw-r--r--drivers/infiniband/hw/irdma/utils.c11
-rw-r--r--drivers/infiniband/hw/mana/cq.c57
-rw-r--r--drivers/infiniband/hw/mana/device.c31
-rw-r--r--drivers/infiniband/hw/mana/main.c143
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h76
-rw-r--r--drivers/infiniband/hw/mana/mr.c17
-rw-r--r--drivers/infiniband/hw/mana/qp.c113
-rw-r--r--drivers/infiniband/hw/mana/wq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c24
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/siw/siw.h14
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c145
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c30
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c121
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h5
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c84
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c51
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c52
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c26
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c33
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h5
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c12
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c4
-rw-r--r--drivers/input/input.c8
-rw-r--r--drivers/input/joystick/Kconfig10
-rw-r--r--drivers/input/joystick/Makefile1
-rw-r--r--drivers/input/joystick/adafruit-seesaw.c315
-rw-r--r--drivers/input/joystick/as5011.c24
-rw-r--r--drivers/input/joystick/pxrc.c42
-rw-r--r--drivers/input/joystick/xpad.c9
-rw-r--r--drivers/input/keyboard/atkbd.c3
-rw-r--r--drivers/input/keyboard/cap11xx.c253
-rw-r--r--drivers/input/keyboard/gpio_keys.c75
-rw-r--r--drivers/input/keyboard/omap-keypad.c19
-rw-r--r--drivers/input/keyboard/omap4-keypad.c15
-rw-r--r--drivers/input/keyboard/qt1050.c2
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c1
-rw-r--r--drivers/input/misc/da7280.c4
-rw-r--r--drivers/input/misc/da9063_onkey.c69
-rw-r--r--drivers/input/misc/ims-pcu.c10
-rw-r--r--drivers/input/misc/iqs269a.c335
-rw-r--r--drivers/input/misc/max77693-haptic.c14
-rw-r--r--drivers/input/misc/pwm-beeper.c4
-rw-r--r--drivers/input/misc/pwm-vibra.c8
-rw-r--r--drivers/input/mouse/cyapa.c22
-rw-r--r--drivers/input/mouse/cyapa_gen3.c2
-rw-r--r--drivers/input/mouse/cyapa_gen5.c4
-rw-r--r--drivers/input/mouse/cyapa_gen6.c20
-rw-r--r--drivers/input/mouse/elan_i2c_core.c18
-rw-r--r--drivers/input/mouse/navpoint.c41
-rw-r--r--drivers/input/rmi4/rmi_driver.c12
-rw-r--r--drivers/input/rmi4/rmi_f01.c13
-rw-r--r--drivers/input/rmi4/rmi_spi.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c23
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c2
-rw-r--r--drivers/input/touchscreen/hideep.c6
-rw-r--r--drivers/input/touchscreen/hycon-hy46xx.c2
-rw-r--r--drivers/input/touchscreen/ilitek_ts_i2c.c16
-rw-r--r--drivers/input/touchscreen/imagis.c20
-rw-r--r--drivers/input/touchscreen/iqs5xx.c12
-rw-r--r--drivers/input/touchscreen/iqs7211.c12
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c16
-rw-r--r--drivers/input/touchscreen/sur40.c7
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c4
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c6
-rw-r--r--drivers/input/touchscreen/zforce_ts.c34
-rw-r--r--drivers/input/vivaldi-fmap.c6
-rw-r--r--drivers/interconnect/core.c8
-rw-r--r--drivers/interconnect/imx/imx8mm.c9
-rw-r--r--drivers/interconnect/imx/imx8mn.c9
-rw-r--r--drivers/interconnect/imx/imx8mp.c9
-rw-r--r--drivers/interconnect/imx/imx8mq.c9
-rw-r--r--drivers/interconnect/qcom/Kconfig27
-rw-r--r--drivers/interconnect/qcom/Makefile6
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c4
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h2
-rw-r--r--drivers/interconnect/qcom/msm8916.c2
-rw-r--r--drivers/interconnect/qcom/msm8939.c2
-rw-r--r--drivers/interconnect/qcom/msm8974.c6
-rw-r--r--drivers/interconnect/qcom/msm8996.c2
-rw-r--r--drivers/interconnect/qcom/osm-l3.c6
-rw-r--r--drivers/interconnect/qcom/qcm2290.c2
-rw-r--r--drivers/interconnect/qcom/qcs404.c2
-rw-r--r--drivers/interconnect/qcom/sdm660.c2
-rw-r--r--drivers/interconnect/qcom/sm6115.c1423
-rw-r--r--drivers/interconnect/qcom/sm8650.c1674
-rw-r--r--drivers/interconnect/qcom/sm8650.h143
-rw-r--r--drivers/interconnect/qcom/smd-rpm.c6
-rw-r--r--drivers/interconnect/qcom/x1e80100.c2303
-rw-r--r--drivers/interconnect/qcom/x1e80100.h192
-rw-r--r--drivers/interconnect/samsung/exynos.c6
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/amd/amd_iommu.h8
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h6
-rw-r--r--drivers/iommu/amd/init.c8
-rw-r--r--drivers/iommu/amd/io_pgtable.c5
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c10
-rw-r--r--drivers/iommu/amd/iommu.c193
-rw-r--r--drivers/iommu/apple-dart.c40
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c101
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h10
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c151
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c26
-rw-r--r--drivers/iommu/dma-iommu.c2
-rw-r--r--drivers/iommu/intel/debugfs.c3
-rw-r--r--drivers/iommu/intel/dmar.c26
-rw-r--r--drivers/iommu/intel/iommu.c447
-rw-r--r--drivers/iommu/intel/iommu.h189
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/nested.c88
-rw-r--r--drivers/iommu/intel/pasid.c338
-rw-r--r--drivers/iommu/intel/pasid.h224
-rw-r--r--drivers/iommu/intel/perfmon.c2
-rw-r--r--drivers/iommu/intel/svm.c53
-rw-r--r--drivers/iommu/io-pgtable-arm.c55
-rw-r--r--drivers/iommu/io-pgtable.c23
-rw-r--r--drivers/iommu/iommu-sva.c112
-rw-r--r--drivers/iommu/iommu.c200
-rw-r--r--drivers/iommu/iommufd/Kconfig1
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c43
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h10
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h24
-rw-r--r--drivers/iommu/iommufd/main.c3
-rw-r--r--drivers/iommu/iommufd/selftest.c223
-rw-r--r--drivers/iommu/mtk_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu_v1.c3
-rw-r--r--drivers/iommu/of_iommu.c66
-rw-r--r--drivers/iommu/omap-iommu.c1
-rw-r--r--drivers/iommu/sprd-iommu.c8
-rw-r--r--drivers/iommu/virtio-iommu.c36
-rw-r--r--drivers/ipack/devices/ipoctal.c14
-rw-r--r--drivers/ipack/ipack.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c13
-rw-r--r--drivers/irqchip/irq-gic-v4.c1
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c26
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c82
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c31
-rw-r--r--drivers/isdn/capi/capi.c4
-rw-r--r--drivers/leds/Kconfig34
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-triggers.c13
-rw-r--r--drivers/leds/leds-aw200xx.c91
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-max5970.c111
-rw-r--r--drivers/leds/leds-pwm.c2
-rw-r--r--drivers/leds/leds-sun50i-a100.c584
-rw-r--r--drivers/leds/leds-syscon.c3
-rw-r--r--drivers/leds/leds-tca6507.c30
-rw-r--r--drivers/leds/rgb/Kconfig2
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c4
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c63
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c26
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c47
-rw-r--r--drivers/leds/trigger/ledtrig-tty.c243
-rw-r--r--drivers/mailbox/Kconfig12
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/apple-mailbox.c441
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c6
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c5
-rw-r--r--drivers/mailbox/imx-mailbox.c6
-rw-r--r--drivers/mailbox/mailbox-test.c6
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c49
-rw-r--r--drivers/mailbox/omap-mailbox.c6
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c16
-rw-r--r--drivers/mailbox/qcom-ipcc.c6
-rw-r--r--drivers/mailbox/stm32-ipcc.c6
-rw-r--r--drivers/mailbox/sun6i-msgbox.c6
-rw-r--r--drivers/mailbox/tegra-hsp.c6
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c7
-rw-r--r--drivers/mcb/mcb-core.c5
-rw-r--r--drivers/md/Kconfig34
-rw-r--r--drivers/md/Makefile10
-rw-r--r--drivers/md/bcache/super.c1
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-stats.c9
-rw-r--r--drivers/md/dm-table.c45
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-writecache.c8
-rw-r--r--drivers/md/dm-zoned-metadata.c7
-rw-r--r--drivers/md/dm-zoned-target.c4
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md-autodetect.c8
-rw-r--r--drivers/md/md-faulty.c365
-rw-r--r--drivers/md/md-linear.c318
-rw-r--r--drivers/md/md-multipath.c462
-rw-r--r--drivers/md/md.c243
-rw-r--r--drivers/md/raid1-10.c54
-rw-r--r--drivers/md/raid1.c22
-rw-r--r--drivers/md/raid10.c262
-rw-r--r--drivers/md/raid5-cache.c11
-rw-r--r--drivers/md/raid5-ppl.c16
-rw-r--r--drivers/md/raid5.c182
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/cec/core/cec-adap.c68
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c515
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c138
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c21
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c5
-rw-r--r--drivers/media/i2c/Kconfig73
-rw-r--r--drivers/media/i2c/Makefile6
-rw-r--r--drivers/media/i2c/adv7180.c28
-rw-r--r--drivers/media/i2c/adv7183.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c6
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c6
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c4
-rw-r--r--drivers/media/i2c/adv7604.c4
-rw-r--r--drivers/media/i2c/adv7842.c4
-rw-r--r--drivers/media/i2c/ak7375.c132
-rw-r--r--drivers/media/i2c/alvium-csi2.c2558
-rw-r--r--drivers/media/i2c/alvium-csi2.h475
-rw-r--r--drivers/media/i2c/ar0521.c5
-rw-r--r--drivers/media/i2c/ccs/Kconfig1
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c134
-rw-r--r--drivers/media/i2c/ccs/ccs-reg-access.c213
-rw-r--r--drivers/media/i2c/ccs/ccs-regs.h906
-rw-r--r--drivers/media/i2c/ccs/ccs.h3
-rw-r--r--drivers/media/i2c/ccs/smiapp-reg-defs.h951
-rw-r--r--drivers/media/i2c/ds90ub913.c13
-rw-r--r--drivers/media/i2c/ds90ub953.c13
-rw-r--r--drivers/media/i2c/ds90ub960.c23
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c23
-rw-r--r--drivers/media/i2c/gc0308.c1451
-rw-r--r--drivers/media/i2c/gc2145.c1450
-rw-r--r--drivers/media/i2c/hi556.c13
-rw-r--r--drivers/media/i2c/hi846.c21
-rw-r--r--drivers/media/i2c/hi847.c9
-rw-r--r--drivers/media/i2c/imx208.c9
-rw-r--r--drivers/media/i2c/imx214.c207
-rw-r--r--drivers/media/i2c/imx219.c21
-rw-r--r--drivers/media/i2c/imx258.c9
-rw-r--r--drivers/media/i2c/imx274.c74
-rw-r--r--drivers/media/i2c/imx290.c18
-rw-r--r--drivers/media/i2c/imx296.c28
-rw-r--r--drivers/media/i2c/imx319.c19
-rw-r--r--drivers/media/i2c/imx334.c16
-rw-r--r--drivers/media/i2c/imx335.c223
-rw-r--r--drivers/media/i2c/imx355.c7
-rw-r--r--drivers/media/i2c/imx412.c16
-rw-r--r--drivers/media/i2c/imx415.c16
-rw-r--r--drivers/media/i2c/isl7998x.c6
-rw-r--r--drivers/media/i2c/max9286.c32
-rw-r--r--drivers/media/i2c/mt9m001.c16
-rw-r--r--drivers/media/i2c/mt9m111.c44
-rw-r--r--drivers/media/i2c/mt9m114.c102
-rw-r--r--drivers/media/i2c/mt9p031.c14
-rw-r--r--drivers/media/i2c/mt9t112.c1
-rw-r--r--drivers/media/i2c/mt9v011.c34
-rw-r--r--drivers/media/i2c/mt9v032.c10
-rw-r--r--drivers/media/i2c/mt9v111.c44
-rw-r--r--drivers/media/i2c/og01a1b.c10
-rw-r--r--drivers/media/i2c/ov01a10.c12
-rw-r--r--drivers/media/i2c/ov02a10.c16
-rw-r--r--drivers/media/i2c/ov08d10.c9
-rw-r--r--drivers/media/i2c/ov08x40.c7
-rw-r--r--drivers/media/i2c/ov13858.c10
-rw-r--r--drivers/media/i2c/ov13b10.c10
-rw-r--r--drivers/media/i2c/ov2640.c16
-rw-r--r--drivers/media/i2c/ov2659.c6
-rw-r--r--drivers/media/i2c/ov2680.c34
-rw-r--r--drivers/media/i2c/ov2685.c4
-rw-r--r--drivers/media/i2c/ov2740.c379
-rw-r--r--drivers/media/i2c/ov4689.c2
-rw-r--r--drivers/media/i2c/ov5640.c49
-rw-r--r--drivers/media/i2c/ov5645.c16
-rw-r--r--drivers/media/i2c/ov5647.c12
-rw-r--r--drivers/media/i2c/ov5648.c72
-rw-r--r--drivers/media/i2c/ov5670.c23
-rw-r--r--drivers/media/i2c/ov5675.c9
-rw-r--r--drivers/media/i2c/ov5693.c18
-rw-r--r--drivers/media/i2c/ov5695.c8
-rw-r--r--drivers/media/i2c/ov64a40.c3690
-rw-r--r--drivers/media/i2c/ov6650.c64
-rw-r--r--drivers/media/i2c/ov7251.c36
-rw-r--r--drivers/media/i2c/ov7670.c37
-rw-r--r--drivers/media/i2c/ov772x.c30
-rw-r--r--drivers/media/i2c/ov7740.c47
-rw-r--r--drivers/media/i2c/ov8856.c9
-rw-r--r--drivers/media/i2c/ov8858.c16
-rw-r--r--drivers/media/i2c/ov8865.c66
-rw-r--r--drivers/media/i2c/ov9282.c18
-rw-r--r--drivers/media/i2c/ov9640.c2
-rw-r--r--drivers/media/i2c/ov9650.c35
-rw-r--r--drivers/media/i2c/ov9734.c9
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c4
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c65
-rw-r--r--drivers/media/i2c/s5k5baf.c69
-rw-r--r--drivers/media/i2c/s5k6a3.c8
-rw-r--r--drivers/media/i2c/saa6752hs.c4
-rw-r--r--drivers/media/i2c/st-mipid02.c486
-rw-r--r--drivers/media/i2c/st-vgxy61.c34
-rw-r--r--drivers/media/i2c/tc358746.c22
-rw-r--r--drivers/media/i2c/tda1997x.c16
-rw-r--r--drivers/media/i2c/thp7312.c2256
-rw-r--r--drivers/media/i2c/tvp514x.c41
-rw-r--r--drivers/media/i2c/tvp5150.c8
-rw-r--r--drivers/media/i2c/tvp7002.c6
-rw-r--r--drivers/media/i2c/tw9900.c781
-rw-r--r--drivers/media/i2c/tw9910.c2
-rw-r--r--drivers/media/i2c/video-i2c.c7
-rw-r--r--drivers/media/mc/Kconfig7
-rw-r--r--drivers/media/mc/mc-device.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c7
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c4
-rw-r--r--drivers/media/pci/dt3155/dt3155.c4
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c26
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c83
-rw-r--r--drivers/media/pci/ivtv/Kconfig4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h1
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c6
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c5
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c9
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c7
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c7
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c6
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c30
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c8
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c2
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c15
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c14
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c3
-rw-r--r--drivers/media/platform/chips-media/Kconfig18
-rw-r--r--drivers/media/platform/chips-media/Makefile6
-rw-r--r--drivers/media/platform/chips-media/coda/Kconfig18
-rw-r--r--drivers/media/platform/chips-media/coda/Makefile6
-rw-r--r--drivers/media/platform/chips-media/coda/coda-bit.c (renamed from drivers/media/platform/chips-media/coda-bit.c)0
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c (renamed from drivers/media/platform/chips-media/coda-common.c)4
-rw-r--r--drivers/media/platform/chips-media/coda/coda-gdi.c (renamed from drivers/media/platform/chips-media/coda-gdi.c)0
-rw-r--r--drivers/media/platform/chips-media/coda/coda-h264.c (renamed from drivers/media/platform/chips-media/coda-h264.c)0
-rw-r--r--drivers/media/platform/chips-media/coda/coda-jpeg.c (renamed from drivers/media/platform/chips-media/coda-jpeg.c)0
-rw-r--r--drivers/media/platform/chips-media/coda/coda-mpeg2.c (renamed from drivers/media/platform/chips-media/coda-mpeg2.c)0
-rw-r--r--drivers/media/platform/chips-media/coda/coda-mpeg4.c (renamed from drivers/media/platform/chips-media/coda-mpeg4.c)0
-rw-r--r--drivers/media/platform/chips-media/coda/coda.h (renamed from drivers/media/platform/chips-media/coda.h)0
-rw-r--r--drivers/media/platform/chips-media/coda/coda_regs.h (renamed from drivers/media/platform/chips-media/coda_regs.h)0
-rw-r--r--drivers/media/platform/chips-media/coda/imx-vdoa.c (renamed from drivers/media/platform/chips-media/imx-vdoa.c)0
-rw-r--r--drivers/media/platform/chips-media/coda/imx-vdoa.h (renamed from drivers/media/platform/chips-media/imx-vdoa.h)0
-rw-r--r--drivers/media/platform/chips-media/coda/trace.h (renamed from drivers/media/platform/chips-media/trace.h)2
-rw-r--r--drivers/media/platform/chips-media/wave5/Kconfig15
-rw-r--r--drivers/media/platform/chips-media/wave5/Makefile10
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c213
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h31
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-hw.c2551
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-regdefine.h732
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vdi.c205
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vdi.h35
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c1932
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c1794
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c291
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h83
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.c960
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.h870
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h77
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuerror.h292
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5.h114
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c7
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c16
-rw-r--r--drivers/media/platform/mediatek/vcodec/Kconfig1
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c24
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c31
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h16
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c168
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c2
-rw-r--r--drivers/media/platform/microchip/microchip-csi2dc.c25
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c41
-rw-r--r--drivers/media/platform/microchip/microchip-isc-scaler.c26
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c34
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/Kconfig1
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c2
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c20
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c58
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c20
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-debug.c27
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c28
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c4
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c23
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen2.c31
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c20
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h7
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c15
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-170.c36
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c36
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c31
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-480.c69
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c115
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h26
-rw-r--r--drivers/media/platform/qcom/camss/camss.c122
-rw-r--r--drivers/media/platform/qcom/camss/camss.h10
-rw-r--r--drivers/media/platform/qcom/venus/core.c4
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c4
-rw-r--r--drivers/media/platform/qcom/venus/venc.c4
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c4
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-csi2.c4
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c2
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c5
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c2
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c16
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c16
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c2
-rw-r--r--drivers/media/platform/renesas/sh_vou.c2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.c43
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_clu.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c138
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h12
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgo.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgt.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c24
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.c12
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.c3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lut.c1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c41
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.c37
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.c40
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uif.c25
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c10
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c162
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c146
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c174
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h35
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c26
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c6
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c115
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h9
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c63
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c12
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp.c24
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c16
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c3
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c8
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v12.h52
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v7.h1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/regs-mfc-v8.h3
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c36
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h33
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c14
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c60
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c150
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h14
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c12
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c299
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h7
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c13
-rw-r--r--drivers/media/platform/st/stm32/Kconfig16
-rw-r--r--drivers/media/platform/st/stm32/Makefile1
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c10
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/Makefile4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c956
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c565
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.c111
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h217
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c604
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c440
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c1
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h1
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c17
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c18
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c18
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c18
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c4
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c7
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c28
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c9
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c7
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c7
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c2
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c7
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c19
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccp2.c13
-rw-r--r--drivers/media/platform/ti/omap3isp/ispcsi2.c9
-rw-r--r--drivers/media/platform/ti/omap3isp/isppreview.c18
-rw-r--r--drivers/media/platform/ti/omap3isp/ispresizer.c21
-rw-r--r--drivers/media/platform/verisilicon/Kconfig1
-rw-r--r--drivers/media/platform/verisilicon/hantro.h9
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c4
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c14
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c18
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c28
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h7
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c93
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c26
-rw-r--r--drivers/media/platform/video-mux.c28
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c74
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c9
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c4
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/ir-hix5hd2.c10
-rw-r--r--drivers/media/rc/meson-ir-tx.c34
-rw-r--r--drivers/media/rc/pwm-ir-tx.c87
-rw-r--r--drivers/media/test-drivers/Kconfig1
-rw-r--r--drivers/media/test-drivers/vicodec/Kconfig1
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c20
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_pes.c1
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c2
-rw-r--r--drivers/media/test-drivers/vimc/vimc-debayer.c21
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c20
-rw-r--r--drivers/media/test-drivers/vimc/vimc-sensor.c17
-rw-r--r--drivers/media/test-drivers/visl/Kconfig1
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c21
-rw-r--r--drivers/media/test-drivers/visl/visl-dec.c104
-rw-r--r--drivers/media/test-drivers/visl/visl-dec.h8
-rw-r--r--drivers/media/test-drivers/visl/visl-trace-av1.h314
-rw-r--r--drivers/media/test-drivers/visl/visl-trace-points.c1
-rw-r--r--drivers/media/test-drivers/visl/visl-video.c18
-rw-r--r--drivers/media/test-drivers/visl/visl-video.h1
-rw-r--r--drivers/media/test-drivers/visl/visl.h1
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig1
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-cap.c3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.c5
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c5
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c5
-rw-r--r--drivers/media/usb/airspy/airspy.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c7
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c9
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c6
-rw-r--r--drivers/media/usb/gspca/gspca.c6
-rw-r--r--drivers/media/usb/hackrf/hackrf.c5
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c12
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c334
-rw-r--r--drivers/memory/brcmstb_dpfe.c6
-rw-r--r--drivers/memory/brcmstb_memc.c6
-rw-r--r--drivers/memory/emif.c6
-rw-r--r--drivers/memory/fsl-corenet-cf.c6
-rw-r--r--drivers/memory/fsl_ifc.c6
-rw-r--r--drivers/memory/jz4780-nemc.c5
-rw-r--r--drivers/memory/mtk-smi.c10
-rw-r--r--drivers/memory/omap-gpmc.c6
-rw-r--r--drivers/memory/renesas-rpc-if.c6
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c6
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c6
-rw-r--r--drivers/memory/tegra/tegra186-emc.c6
-rw-r--r--drivers/memory/tegra/tegra186.c33
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c6
-rw-r--r--drivers/memory/ti-aemif.c5
-rw-r--r--drivers/memory/ti-emif-pm.c6
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/ab8500-sysctrl.c8
-rw-r--r--drivers/mfd/cros_ec_dev.c5
-rw-r--r--drivers/mfd/cs42l43-sdw.c2
-rw-r--r--drivers/mfd/da9062-core.c22
-rw-r--r--drivers/mfd/exynos-lpass.c6
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c6
-rw-r--r--drivers/mfd/hi655x-pmic.c5
-rw-r--r--drivers/mfd/intel-lpss-acpi.c33
-rw-r--r--drivers/mfd/intel-lpss-pci.c27
-rw-r--r--drivers/mfd/intel-lpss.c50
-rw-r--r--drivers/mfd/intel-lpss.h28
-rw-r--r--drivers/mfd/kempld-core.c6
-rw-r--r--drivers/mfd/mcp-sa11x0.c6
-rw-r--r--drivers/mfd/mxs-lradc.c6
-rw-r--r--drivers/mfd/omap-usb-host.c5
-rw-r--r--drivers/mfd/omap-usb-tll.c5
-rw-r--r--drivers/mfd/pcf50633-adc.c6
-rw-r--r--drivers/mfd/qcom-pm8xxx.c6
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c1
-rw-r--r--drivers/mfd/rave-sp.c14
-rw-r--r--drivers/mfd/sm501.c6
-rw-r--r--drivers/mfd/stm32-timers.c6
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c6
-rw-r--r--drivers/mfd/tps65086.c1
-rw-r--r--drivers/mfd/tps65911-comparator.c6
-rw-r--r--drivers/mfd/tps6594-spi.c2
-rw-r--r--drivers/mfd/twl4030-audio.c6
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c4
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rts5264.c886
-rw-r--r--drivers/misc/cardreader/rts5264.h278
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c32
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/cxl/cxl.h3
-rw-r--r--drivers/misc/dw-xdata-pcie.c6
-rw-r--r--drivers/misc/eeprom/at24.c66
-rw-r--r--drivers/misc/eeprom/ee1004.c113
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/lkdtm/heap.c60
-rw-r--r--drivers/misc/mei/Kconfig35
-rw-r--r--drivers/misc/mei/Makefile7
-rw-r--r--drivers/misc/mei/gsc_proxy/Kconfig2
-rw-r--r--drivers/misc/mei/hdcp/Kconfig2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/platform-vsc.c465
-rw-r--r--drivers/misc/mei/pxp/Kconfig3
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c770
-rw-r--r--drivers/misc/mei/vsc-tp.c593
-rw-r--r--drivers/misc/mei/vsc-tp.h53
-rw-r--r--drivers/misc/nsm.c506
-rw-r--r--drivers/misc/ocxl/afu_irq.c2
-rw-r--r--drivers/misc/ocxl/context.c2
-rw-r--r--drivers/misc/ocxl/link.c14
-rw-r--r--drivers/misc/ocxl/main.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c50
-rw-r--r--drivers/misc/pvpanic/pvpanic-mmio.c7
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c6
-rw-r--r--drivers/misc/pvpanic/pvpanic.c12
-rw-r--r--drivers/misc/pvpanic/pvpanic.h5
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.c12
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.h8
-rw-r--r--drivers/mmc/core/host.c3
-rw-r--r--drivers/mmc/core/mmc.c30
-rw-r--r--drivers/mmc/core/mmc_test.c33
-rw-r--r--drivers/mmc/core/sdio_uart.c22
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/mmci.c69
-rw-r--r--drivers/mmc/host/mmci.h2
-rw-r--r--drivers/mmc/host/mtk-sd.c166
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c17
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c69
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c350
-rw-r--r--drivers/mmc/host/sdhci-omap.c2
-rw-r--r--drivers/mmc/host/sdhci-xenon.c31
-rw-r--r--drivers/mmc/host/sdhci-xenon.h3
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sdhci_am654.c37
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/mtdcore.c8
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcma_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c412
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.h2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c14
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c8
-rw-r--r--drivers/mtd/nand/raw/nand_base.c10
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c7
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c7
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c2
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c13
-rw-r--r--drivers/mtd/nand/spi/core.c2
-rw-r--r--drivers/mtd/spi-nor/atmel.c16
-rw-r--r--drivers/mtd/spi-nor/core.c177
-rw-r--r--drivers/mtd/spi-nor/core.h24
-rw-r--r--drivers/mtd/spi-nor/debugfs.c2
-rw-r--r--drivers/mtd/spi-nor/micron-st.c59
-rw-r--r--drivers/mtd/spi-nor/sfdp.c29
-rw-r--r--drivers/mtd/spi-nor/sfdp.h7
-rw-r--r--drivers/mtd/spi-nor/spansion.c4
-rw-r--r--drivers/mtd/spi-nor/sst.c6
-rw-r--r--drivers/mtd/spi-nor/swp.c25
-rw-r--r--drivers/mtd/spi-nor/sysfs.c2
-rw-r--r--drivers/mtd/ssfdc.c1
-rw-r--r--drivers/mtd/ubi/Kconfig9
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/debug.c108
-rw-r--r--drivers/mtd/ubi/debug.h304
-rw-r--r--drivers/mtd/ubi/io.c86
-rw-r--r--drivers/mtd/ubi/ubi.h45
-rw-r--r--drivers/mux/mmio.c19
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/arcnet/arc-rawmode.c1
-rw-r--r--drivers/net/arcnet/arc-rimi.c1
-rw-r--r--drivers/net/arcnet/capmode.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c1
-rw-r--r--drivers/net/arcnet/com20020.c1
-rw-r--r--drivers/net/arcnet/com20020_cs.c1
-rw-r--r--drivers/net/arcnet/com90io.c1
-rw-r--r--drivers/net/arcnet/com90xx.c1
-rw-r--r--drivers/net/arcnet/rfc1051.c1
-rw-r--r--drivers/net/arcnet/rfc1201.c1
-rw-r--r--drivers/net/bonding/bond_main.c29
-rw-r--r--drivers/net/can/c_can/c_can_platform.c13
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c12
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/xilinx_can.c9
-rw-r--r--drivers/net/dsa/bcm_sf2.c7
-rw-r--r--drivers/net/dsa/bcm_sf2.h1
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c1
-rw-r--r--drivers/net/dsa/lantiq_gswip.c74
-rw-r--r--drivers/net/dsa/microchip/ksz8.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c152
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c34
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h21
-rw-r--r--drivers/net/dsa/mt7530.c269
-rw-r--r--drivers/net/dsa/mt7530.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c454
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h27
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h23
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c39
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c4
-rw-r--r--drivers/net/dsa/qca/qca8k.h1
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c2
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c2
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c59
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c8
-rw-r--r--drivers/net/dummy.c1
-rw-r--r--drivers/net/ethernet/8390/8390.c1
-rw-r--r--drivers/net/ethernet/8390/8390p.c1
-rw-r--r--drivers/net/ethernet/8390/apne.c1
-rw-r--r--drivers/net/ethernet/8390/hydra.c1
-rw-r--r--drivers/net/ethernet/8390/stnic.c1
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c1
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c50
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c728
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h99
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c470
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.h151
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c14
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h5
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c33
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c31
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c2
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.h8
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c52
-rw-r--r--drivers/net/ethernet/broadcom/b44.c14
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2758
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h503
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c740
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h521
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c39
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.h15
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c42
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c28
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c24
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c25
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c15
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c28
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c132
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c48
-rw-r--r--drivers/net/ethernet/google/gve/gve.h8
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c88
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c17
-rw-r--r--drivers/net/ethernet/google/gve/gve_register.h9
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c17
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c82
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c40
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c45
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c56
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c35
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c198
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h165
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c229
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c213
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c285
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debug.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c304
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c772
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c172
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h51
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c94
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c86
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c39
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c57
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c41
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c330
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c667
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c568
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c482
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c470
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c126
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h412
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c385
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c334
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c315
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c444
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c195
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c124
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c207
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c104
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c65
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c74
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c29
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c44
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c48
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h21
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c79
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c50
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c40
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c27
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c44
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c25
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c102
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c84
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c925
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h48
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h173
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c241
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h65
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c449
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h167
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h13
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h416
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c12
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h34
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h99
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h74
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c746
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c123
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c80
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c134
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c16
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c166
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c853
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c8
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c15
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c35
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c18
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c61
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c81
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c53
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c73
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c199
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c537
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c90
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c46
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c73
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c33
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c117
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c25
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c32
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c12
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c5
-rw-r--r--drivers/net/ethernet/realtek/Kconfig7
-rw-r--r--drivers/net/ethernet/realtek/Makefile3
-rw-r--r--drivers/net/ethernet/realtek/r8169.h9
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c168
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c261
-rw-r--r--drivers/net/ethernet/renesas/Kconfig12
-rw-r--r--drivers/net/ethernet/renesas/Makefile5
-rw-r--r--drivers/net/ethernet/renesas/ravb.h6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c146
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c40
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h9
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c381
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h43
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c126
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h13
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c26
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c30
-rw-r--r--drivers/net/ethernet/sfc/ptp.h7
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c126
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h13
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c30
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h7
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c109
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c117
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c153
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c8
-rw-r--r--drivers/net/ethernet/ti/Kconfig15
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c272
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c296
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h9
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c708
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h186
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c15
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c15
-rw-r--r--drivers/net/ethernet/ti/cpts.c17
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c16
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c8
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c177
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h30
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c238
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h27
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c275
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c157
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h94
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c82
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c86
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c114
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h1
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c82
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c63
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c65
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h15
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h35
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c667
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fjes/fjes_main.c6
-rw-r--r--drivers/net/geneve.c29
-rw-r--r--drivers/net/gtp.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c36
-rw-r--r--drivers/net/hyperv/rndis_filter.c1
-rw-r--r--drivers/net/ieee802154/fakelb.c6
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c6
-rw-r--r--drivers/net/ipa/Makefile4
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c487
-rw-r--r--drivers/net/ipa/gsi_reg.c1
-rw-r--r--drivers/net/ipa/ipa_data.h1
-rw-r--r--drivers/net/ipa/ipa_main.c42
-rw-r--r--drivers/net/ipa/ipa_mem.c2
-rw-r--r--drivers/net/ipa/ipa_reg.c6
-rw-r--r--drivers/net/ipa/ipa_reg.h111
-rw-r--r--drivers/net/ipa/ipa_version.h1
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.5.c565
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c15
-rw-r--r--drivers/net/ipvlan/ipvtap.c1
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macsec.c172
-rw-r--r--drivers/net/macvlan.c15
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c21
-rw-r--r--drivers/net/mdio/mdio-gpio.c4
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c6
-rw-r--r--drivers/net/mdio/mdio-mux.c14
-rw-r--r--drivers/net/netdevsim/macsec.c5
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c6
-rw-r--r--drivers/net/pcs/pcs-xpcs.c4
-rw-r--r--drivers/net/phy/Kconfig37
-rw-r--r--drivers/net/phy/Makefile19
-rw-r--r--drivers/net/phy/adin.c53
-rw-r--r--drivers/net/phy/aquantia.h16
-rw-r--r--drivers/net/phy/aquantia/Kconfig6
-rw-r--r--drivers/net/phy/aquantia/Makefile6
-rw-r--r--drivers/net/phy/aquantia/aquantia.h122
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c374
-rw-r--r--drivers/net/phy/aquantia/aquantia_hwmon.c (renamed from drivers/net/phy/aquantia_hwmon.c)14
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c (renamed from drivers/net/phy/aquantia_main.c)137
-rw-r--r--drivers/net/phy/at803x.c1090
-rw-r--r--drivers/net/phy/ax88796b_rust.rs135
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c15
-rw-r--r--drivers/net/phy/bcm54140.c16
-rw-r--r--drivers/net/phy/bcm84881.c12
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/dp83640.c24
-rw-r--r--drivers/net/phy/dp83869.c3
-rw-r--r--drivers/net/phy/dp83tg720.c188
-rw-r--r--drivers/net/phy/marvell10g.c203
-rw-r--r--drivers/net/phy/mdio_bus.c15
-rw-r--r--drivers/net/phy/mdio_device.c6
-rw-r--r--drivers/net/phy/mdio_devres.c1
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c43
-rw-r--r--drivers/net/phy/micrel.c76
-rw-r--r--drivers/net/phy/mscc/mscc.h5
-rw-r--r--drivers/net/phy/mscc/mscc_main.c4
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c18
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c1729
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c94
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.h62
-rw-r--r--drivers/net/phy/nxp-tja11xx.c2
-rw-r--r--drivers/net/phy/phy-c45.c129
-rw-r--r--drivers/net/phy/phy-core.c204
-rw-r--r--drivers/net/phy/phy.c28
-rw-r--r--drivers/net/phy/phy_device.c60
-rw-r--r--drivers/net/phy/phylink.c324
-rw-r--r--drivers/net/phy/sfp-bus.c10
-rw-r--r--drivers/net/phy/sfp.c40
-rw-r--r--drivers/net/phy/smsc.c2
-rw-r--r--drivers/net/plip/plip.c1
-rw-r--r--drivers/net/ppp/bsd_comp.c1
-rw-r--r--drivers/net/ppp/ppp_async.c3
-rw-r--r--drivers/net/ppp/ppp_deflate.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c1
-rw-r--r--drivers/net/ppp/ppp_synctty.c1
-rw-r--r--drivers/net/ppp/pppoe.c23
-rw-r--r--drivers/net/slip/slhc.c1
-rw-r--r--drivers/net/slip/slip.c1
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/ax88179_178a.c17
-rw-r--r--drivers/net/usb/hso.c11
-rw-r--r--drivers/net/usb/lan78xx.c2
-rw-r--r--drivers/net/usb/r8152.c18
-rw-r--r--drivers/net/veth.c20
-rw-r--r--drivers/net/virtio_net.c350
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c32
-rw-r--r--drivers/net/vxlan/vxlan_core.c29
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c174
-rw-r--r--drivers/net/vxlan/vxlan_private.h2
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/Makefile2
-rw-r--r--drivers/net/wan/framer/Kconfig42
-rw-r--r--drivers/net/wan/framer/Makefile7
-rw-r--r--drivers/net/wan/framer/framer-core.c882
-rw-r--r--drivers/net/wan/framer/pef2256/Makefile8
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256-regs.h250
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c880
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c6
-rw-r--r--drivers/net/wan/ixp4xx_hss.c5
-rw-r--r--drivers/net/wan/slic_ds26522.c1
-rw-r--r--drivers/net/wireless/Kconfig3
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c250
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c149
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h64
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c15
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c151
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c184
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c16
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c172
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h64
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c7
-rw-r--r--drivers/net/wireless/atmel/Kconfig35
-rw-r--r--drivers/net/wireless/atmel/Makefile4
-rw-r--r--drivers/net/wireless/atmel/atmel.c4452
-rw-r--r--drivers/net/wireless/atmel/atmel.h31
-rw-r--r--drivers/net/wireless/atmel/atmel_cs.c292
-rw-r--r--drivers/net/wireless/atmel/atmel_pci.c65
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/cisco/Kconfig59
-rw-r--r--drivers/net/wireless/cisco/Makefile3
-rw-r--r--drivers/net/wireless/cisco/airo.c8288
-rw-r--r--drivers/net/wireless/cisco/airo.h10
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c218
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c34
-rw-r--r--drivers/net/wireless/intersil/Kconfig2
-rw-r--r--drivers/net/wireless/intersil/Makefile2
-rw-r--r--drivers/net/wireless/intersil/hostap/Kconfig95
-rw-r--r--drivers/net/wireless/intersil/hostap/Makefile8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap.h98
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211.h97
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211_rx.c1116
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211_tx.c554
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c3277
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.h264
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_common.h420
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_config.h49
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_cs.c710
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c810
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c3387
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_info.c509
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c3847
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c1123
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_pci.c445
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_plx.c617
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c411
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_wlan.h1051
-rw-r--r--drivers/net/wireless/intersil/orinoco/Kconfig143
-rw-r--r--drivers/net/wireless/intersil/orinoco/Makefile15
-rw-r--r--drivers/net/wireless/intersil/orinoco/airport.c268
-rw-r--r--drivers/net/wireless/intersil/orinoco/cfg.c291
-rw-r--r--drivers/net/wireless/intersil/orinoco/cfg.h15
-rw-r--r--drivers/net/wireless/intersil/orinoco/fw.c387
-rw-r--r--drivers/net/wireless/intersil/orinoco/fw.h21
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.c778
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.h534
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes_dld.c477
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes_dld.h52
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes_rid.h165
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c1362
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.h60
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2414
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.h50
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c89
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.h23
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h251
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_cs.c350
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_nortel.c314
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_pci.c257
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_pci.h54
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_plx.c362
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_tmd.c237
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c1787
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.c259
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.h21
-rw-r--r--drivers/net/wireless/intersil/orinoco/spectrum_cs.c328
-rw-r--r--drivers/net/wireless/intersil/orinoco/wext.c1428
-rw-r--r--drivers/net/wireless/intersil/orinoco/wext.h13
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c1
-rw-r--r--drivers/net/wireless/legacy/Kconfig55
-rw-r--r--drivers/net/wireless/legacy/Makefile6
-rw-r--r--drivers/net/wireless/legacy/ray_cs.c2824
-rw-r--r--drivers/net/wireless/legacy/ray_cs.h74
-rw-r--r--drivers/net/wireless/legacy/rayctl.h734
-rw-r--r--drivers/net/wireless/legacy/rndis_wlan.c3760
-rw-r--r--drivers/net/wireless/legacy/wl3501.h615
-rw-r--r--drivers/net/wireless/legacy/wl3501_cs.c2036
-rw-r--r--drivers/net/wireless/marvell/libertas/Kconfig7
-rw-r--r--drivers/net/wireless/marvell/libertas/Makefile1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_cs.c957
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h54
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h102
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c160
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c399
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c520
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c218
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c504
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h230
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c360
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h160
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c71
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h182
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c24
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c46
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h42
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c86
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00link.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c76
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c5
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h32
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c648
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c105
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h137
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c70
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c420
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c158
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h154
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c753
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h149
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c22
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c1841
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c405
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h525
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c509
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c511
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h49
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h3195
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c175
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c51
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c710
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h73
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c89
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c7
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c27
-rw-r--r--drivers/net/wireless/zydas/Kconfig19
-rw-r--r--drivers/net/wireless/zydas/Makefile2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c1909
-rw-r--r--drivers/net/wireless/zydas/zd1201.h144
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c6
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.c4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.c8
-rw-r--r--drivers/net/xen-netback/netback.c1
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/s3fwrn5/uart.c5
-rw-r--r--drivers/nfc/trf7970a.c42
-rw-r--r--drivers/nubus/bus.c3
-rw-r--r--drivers/nvdimm/btt.c17
-rw-r--r--drivers/nvdimm/btt_devs.c6
-rw-r--r--drivers/nvdimm/bus.c4
-rw-r--r--drivers/nvdimm/dax_devs.c4
-rw-r--r--drivers/nvdimm/dimm_devs.c17
-rw-r--r--drivers/nvdimm/namespace_devs.c19
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvdimm/virtio_pmem.c36
-rw-r--r--drivers/nvme/common/auth.c1
-rw-r--r--drivers/nvme/common/keyring.c3
-rw-r--r--drivers/nvme/host/apple.c13
-rw-r--r--drivers/nvme/host/auth.c19
-rw-r--r--drivers/nvme/host/constants.c10
-rw-r--r--drivers/nvme/host/core.c347
-rw-r--r--drivers/nvme/host/fabrics.c24
-rw-r--r--drivers/nvme/host/fabrics.h8
-rw-r--r--drivers/nvme/host/fc.c27
-rw-r--r--drivers/nvme/host/ioctl.c207
-rw-r--r--drivers/nvme/host/multipath.c17
-rw-r--r--drivers/nvme/host/nvme.h122
-rw-r--r--drivers/nvme/host/pci.c31
-rw-r--r--drivers/nvme/host/pr.c2
-rw-r--r--drivers/nvme/host/rdma.c26
-rw-r--r--drivers/nvme/host/sysfs.c168
-rw-r--r--drivers/nvme/host/tcp.c18
-rw-r--r--drivers/nvme/host/zns.c37
-rw-r--r--drivers/nvme/target/configfs.c4
-rw-r--r--drivers/nvme/target/core.c8
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c4
-rw-r--r--drivers/nvme/target/fc.c51
-rw-r--r--drivers/nvme/target/fcloop.c8
-rw-r--r--drivers/nvme/target/loop.c9
-rw-r--r--drivers/nvme/target/passthru.c4
-rw-r--r--drivers/nvme/target/rdma.c20
-rw-r--r--drivers/nvme/target/tcp.c35
-rw-r--r--drivers/nvme/target/trace.c6
-rw-r--r--drivers/nvme/target/trace.h28
-rw-r--r--drivers/nvmem/Kconfig1
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/core.c360
-rw-r--r--drivers/nvmem/imx-ocotp.c11
-rw-r--r--drivers/nvmem/internals.h59
-rw-r--r--drivers/nvmem/layouts.c201
-rw-r--r--drivers/nvmem/layouts/Kconfig8
-rw-r--r--drivers/nvmem/layouts/onie-tlv.c29
-rw-r--r--drivers/nvmem/layouts/sl28vpd.c29
-rw-r--r--drivers/nvmem/mtk-efuse.c11
-rw-r--r--drivers/nvmem/stm32-romem.c16
-rw-r--r--drivers/nvmem/u-boot-env.c84
-rw-r--r--drivers/of/address.c1
-rw-r--r--drivers/of/device.c65
-rw-r--r--drivers/of/dynamic.c12
-rw-r--r--drivers/of/module.c8
-rw-r--r--drivers/of/overlay.c2
-rw-r--r--drivers/of/platform.c62
-rw-r--r--drivers/of/property.c7
-rw-r--r--drivers/opp/core.c281
-rw-r--r--drivers/opp/of.c57
-rw-r--r--drivers/opp/opp.h24
-rw-r--r--drivers/opp/ti-opp-supply.c13
-rw-r--r--drivers/parport/share.c8
-rw-r--r--drivers/pci/access.c12
-rw-r--r--drivers/pci/controller/cadence/Kconfig2
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c45
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c19
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h12
-rw-r--r--drivers/pci/controller/dwc/Kconfig2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c14
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c8
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c17
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c100
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c33
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c193
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c203
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h111
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c15
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c21
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c21
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c2
-rw-r--r--drivers/pci/controller/pci-host-common.c4
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c7
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c96
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c7
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c18
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c23
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c4
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c8
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c52
-rw-r--r--drivers/pci/controller/vmd.c6
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c254
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c6
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c16
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c9
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c6
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/pci.c126
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer.c12
-rw-r--r--drivers/pci/pcie/aspm.c65
-rw-r--r--drivers/pci/probe.c218
-rw-r--r--drivers/pci/quirks.c15
-rw-r--r--drivers/pci/setup-bus.c30
-rw-r--r--drivers/pci/setup-res.c72
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c5
-rw-r--r--drivers/pcmcia/db1xxx_ss.c6
-rw-r--r--drivers/pcmcia/electra_cf.c6
-rw-r--r--drivers/pcmcia/omap_cf.c5
-rw-r--r--drivers/pcmcia/pxa2xx_base.c6
-rw-r--r--drivers/pcmcia/sa1100_generic.c8
-rw-r--r--drivers/pcmcia/xxs1500_ss.c6
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c6
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c4
-rw-r--r--drivers/perf/arm_dsu_pmu.c6
-rw-r--r--drivers/perf/arm_pmu.c12
-rw-r--r--drivers/perf/arm_pmuv3.c236
-rw-r--r--drivers/perf/arm_spe_pmu.c22
-rw-r--r--drivers/perf/dwc_pcie_pmu.c792
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c45
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c42
-rw-r--r--drivers/perf/riscv_pmu.c4
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c6
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c9
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c25
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c2
-rw-r--r--drivers/phy/phy-can-transceiver.c10
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/phy/qualcomm/Kconfig2
-rw-r--r--drivers/phy/qualcomm/Makefile2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c166
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c244
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c65
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v7.h17
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v7.h87
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h51
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h78
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c114
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c598
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c1195
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h8
-rw-r--r--drivers/phy/renesas/Kconfig2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c156
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c36
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c31
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/ti/phy-tusb1210.c23
-rw-r--r--drivers/pinctrl/Kconfig31
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/aspeed/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c4
-rw-r--r--drivers/pinctrl/core.c59
-rw-r--r--drivers/pinctrl/core.h33
-rw-r--r--drivers/pinctrl/devicetree.c8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c44
-rw-r--r--drivers/pinctrl/intel/Kconfig21
-rw-r--r--drivers/pinctrl/intel/Makefile2
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c237
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-emmitsburg.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel-platform.c225
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c32
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h14
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c72
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorpoint.c465
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-tangier.c32
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c5
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c13
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h10
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2701.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2712.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6795.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8167.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8173.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8183.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8186.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8188.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8365.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8516.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c5
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.h10
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c9
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c6
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c9
-rw-r--r--drivers/pinctrl/pinconf-generic.c16
-rw-r--r--drivers/pinctrl/pinconf.c14
-rw-r--r--drivers/pinctrl/pinconf.h10
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c17
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c458
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c42
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c35
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c6
-rw-r--r--drivers/pinctrl/pinctrl-pef2256.c358
-rw-r--r--drivers/pinctrl/pinctrl-single.c5
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c373
-rw-r--r--drivers/pinctrl/pinctrl-utils.c26
-rw-r--r--drivers/pinctrl/pinctrl-utils.h18
-rw-r--r--drivers/pinctrl/pinmux.c36
-rw-r--r--drivers/pinctrl/pinmux.h20
-rw-r--r--drivers/pinctrl/qcom/Kconfig10
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm26
-rw-r--r--drivers/pinctrl/qcom/Makefile4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c70
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h13
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c46
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c20
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c1014
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c20
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c15
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c24
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c24
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c231
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c1762
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c1876
-rw-r--r--drivers/pinctrl/renesas/core.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c10
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c171
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c280
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c111
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h25
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h14
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c8
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c8
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c3
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c2
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c10
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c8
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c74
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c74
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c33
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c12
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c6
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c5
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c2
-rw-r--r--drivers/platform/mips/rs780e-acpi.c12
-rw-r--r--drivers/platform/surface/aggregator/Kconfig2
-rw-r--r--drivers/platform/surface/aggregator/bus.c5
-rw-r--r--drivers/platform/surface/aggregator/controller.h4
-rw-r--r--drivers/platform/surface/aggregator/core.c4
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c4
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.h2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c30
-rw-r--r--drivers/platform/x86/Kconfig19
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/acer-wmi.c394
-rw-r--r--drivers/platform/x86/amd/Kconfig14
-rw-r--r--drivers/platform/x86/amd/Makefile1
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c9
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c25
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h1
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile3
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c56
-rw-r--r--drivers/platform/x86/amd/pmf/core.c59
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h198
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c194
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c5
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c494
-rw-r--r--drivers/platform/x86/amd/wbrf.c317
-rw-r--r--drivers/platform/x86/asus-laptop.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c5
-rw-r--r--drivers/platform/x86/dell/Kconfig2
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c4
-rw-r--r--drivers/platform/x86/dell/dcdbas.c2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c173
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c1
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c6
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c2
-rw-r--r--drivers/platform/x86/intel/hid.c7
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/pmc/Kconfig1
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile2
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c4
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c731
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c2
-rw-r--r--drivers/platform/x86/intel/pmc/core.c277
-rw-r--r--drivers/platform/x86/intel/pmc/core.h89
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c267
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c10
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c519
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c86
-rw-r--r--drivers/platform/x86/intel/pmc/spt.c10
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c48
-rw-r--r--drivers/platform/x86/intel/pmt/class.c43
-rw-r--r--drivers/platform/x86/intel/pmt/class.h30
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c2
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c193
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.h126
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c25
-rw-r--r--drivers/platform/x86/intel/tpmi.c35
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c15
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c13
-rw-r--r--drivers/platform/x86/intel/vbtn.c5
-rw-r--r--drivers/platform/x86/intel/vsec.c106
-rw-r--r--drivers/platform/x86/intel/vsec.h44
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c17
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c3
-rw-r--r--drivers/platform/x86/intel_ips.c33
-rw-r--r--drivers/platform/x86/lenovo-yogabook.c2
-rw-r--r--drivers/platform/x86/p2sb.c2
-rw-r--r--drivers/platform/x86/serdev_helpers.h80
-rw-r--r--drivers/platform/x86/silicom-platform.c1004
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c34
-rw-r--r--drivers/platform/x86/wmi.c648
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c97
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c124
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h9
-rw-r--r--drivers/pmdomain/Kconfig1
-rw-r--r--drivers/pmdomain/Makefile1
-rw-r--r--drivers/pmdomain/amlogic/meson-ee-pwrc.c16
-rw-r--r--drivers/pmdomain/arm/Kconfig37
-rw-r--r--drivers/pmdomain/arm/Makefile1
-rw-r--r--drivers/pmdomain/arm/scpi_pm_domain.c (renamed from drivers/firmware/scpi_pm_domain.c)0
-rw-r--r--drivers/pmdomain/core.c (renamed from drivers/base/power/domain.c)36
-rw-r--r--drivers/pmdomain/governor.c (renamed from drivers/base/power/domain_governor.c)8
-rw-r--r--drivers/pmdomain/imx/gpc.c28
-rw-r--r--drivers/pmdomain/imx/gpcv2.c6
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c6
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c16
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c6
-rw-r--r--drivers/pmdomain/imx/imx93-pd.c6
-rw-r--r--drivers/pmdomain/qcom/cpr.c6
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c12
-rw-r--r--drivers/pmdomain/ti/omap_prm.c2
-rw-r--r--drivers/pmdomain/xilinx/zynqmp-pm-domains.c6
-rw-r--r--drivers/pnp/driver.c2
-rw-r--r--drivers/pnp/isapnp/Kconfig2
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c2
-rw-r--r--drivers/power/reset/as3722-poweroff.c6
-rw-r--r--drivers/power/reset/at91-poweroff.c13
-rw-r--r--drivers/power/reset/at91-reset.c11
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c13
-rw-r--r--drivers/power/reset/atc260x-poweroff.c6
-rw-r--r--drivers/power/reset/gpio-restart.c34
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c5
-rw-r--r--drivers/power/reset/mt6323-poweroff.c6
-rw-r--r--drivers/power/reset/pwr-mlxbf.c20
-rw-r--r--drivers/power/reset/qnap-poweroff.c5
-rw-r--r--drivers/power/reset/regulator-poweroff.c6
-rw-r--r--drivers/power/reset/restart-poweroff.c6
-rw-r--r--drivers/power/reset/rmobile-reset.c5
-rw-r--r--drivers/power/reset/syscon-poweroff.c6
-rw-r--r--drivers/power/reset/tps65086-restart.c12
-rw-r--r--drivers/power/supply/bq24190_charger.c457
-rw-r--r--drivers/power/supply/bq27xxx_battery.c22
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c1
-rw-r--r--drivers/power/supply/power_supply_core.c167
-rw-r--r--drivers/ptp/ptp_ines.c16
-rw-r--r--drivers/ptp/ptp_ocp.c34
-rw-r--r--drivers/pwm/core.c164
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c8
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c8
-rw-r--r--drivers/pwm/pwm-bcm-kona.c2
-rw-r--r--drivers/pwm/pwm-bcm2835.c36
-rw-r--r--drivers/pwm/pwm-berlin.c8
-rw-r--r--drivers/pwm/pwm-brcmstb.c8
-rw-r--r--drivers/pwm/pwm-crc.c16
-rw-r--r--drivers/pwm/pwm-cros-ec.c2
-rw-r--r--drivers/pwm/pwm-dwc.c6
-rw-r--r--drivers/pwm/pwm-img.c10
-rw-r--r--drivers/pwm/pwm-imx-tpm.c10
-rw-r--r--drivers/pwm/pwm-jz4740.c6
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c6
-rw-r--r--drivers/pwm/pwm-lpc32xx.c2
-rw-r--r--drivers/pwm/pwm-mediatek.c2
-rw-r--r--drivers/pwm/pwm-meson.c35
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c20
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c3
-rw-r--r--drivers/pwm/pwm-rockchip.c9
-rw-r--r--drivers/pwm/pwm-samsung.c6
-rw-r--r--drivers/pwm/pwm-sti.c2
-rw-r--r--drivers/pwm/pwm-stm32-lp.c10
-rw-r--r--drivers/pwm/pwm-stm32.c75
-rw-r--r--drivers/pwm/pwm-stmpe.c14
-rw-r--r--drivers/pwm/pwm-tegra.c2
-rw-r--r--drivers/pwm/pwm-tiecap.c6
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c8
-rw-r--r--drivers/pwm/pwm-twl-led.c6
-rw-r--r--drivers/pwm/pwm-twl.c4
-rw-r--r--drivers/pwm/pwm-vt8500.c4
-rw-r--r--drivers/pwm/sysfs.c12
-rw-r--r--drivers/rapidio/devices/tsi721.c67
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c4
-rw-r--r--drivers/regulator/Kconfig10
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/arizona-ldo1.c8
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c5
-rw-r--r--drivers/regulator/core.c74
-rw-r--r--drivers/regulator/db8500-prcmu.c6
-rw-r--r--drivers/regulator/event.c91
-rw-r--r--drivers/regulator/of_regulator.c9
-rw-r--r--drivers/regulator/palmas-regulator.c2
-rw-r--r--drivers/regulator/pwm-regulator.c44
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c177
-rw-r--r--drivers/regulator/qcom_smd-regulator.c35
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c34
-rw-r--r--drivers/regulator/regnl.h13
-rw-r--r--drivers/regulator/rk808-regulator.c10
-rw-r--r--drivers/regulator/stm32-vrefbuf.c6
-rw-r--r--drivers/regulator/stpmic1_regulator.c2
-rw-r--r--drivers/regulator/uniphier-regulator.c6
-rw-r--r--drivers/regulator/userspace-consumer.c6
-rw-r--r--drivers/regulator/virtual.c6
-rw-r--r--drivers/regulator/wm8350-regulator.c6
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c1
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c19
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c87
-rw-r--r--drivers/reset/reset-brcmstb.c3
-rw-r--r--drivers/reset/reset-meson-audio-arb.c4
-rw-r--r--drivers/reset/reset-meson.c1
-rw-r--r--drivers/reset/reset-npcm.c5
-rw-r--r--drivers/reset/reset-qcom-aoss.c4
-rw-r--r--drivers/reset/reset-qcom-pdc.c4
-rw-r--r--drivers/reset/reset-simple.c3
-rw-r--r--drivers/reset/reset-sunplus.c3
-rw-r--r--drivers/reset/reset-uniphier-glue.c3
-rw-r--r--drivers/reset/sti/reset-syscfg.c11
-rw-r--r--drivers/rtc/Kconfig37
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/rtc-ac100.c4
-rw-r--r--drivers/rtc/rtc-da9063.c88
-rw-r--r--drivers/rtc/rtc-ds3232.c4
-rw-r--r--drivers/rtc/rtc-ma35d1.c304
-rw-r--r--drivers/rtc/rtc-max31335.c697
-rw-r--r--drivers/rtc/rtc-nct3018y.c52
-rw-r--r--drivers/rtc/rtc-rv8803.c36
-rw-r--r--drivers/rtc/rtc-tps6594.c454
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/char/con3215.c25
-rw-r--r--drivers/s390/char/con3270.c12
-rw-r--r--drivers/s390/char/uvdevice.c3
-rw-r--r--drivers/s390/cio/chsc.c18
-rw-r--r--drivers/s390/cio/chsc_sch.c6
-rw-r--r--drivers/s390/cio/cio.c6
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/css.c36
-rw-r--r--drivers/s390/cio/device.c79
-rw-r--r--drivers/s390/cio/device_pgid.c12
-rw-r--r--drivers/s390/cio/eadm_sch.c36
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c8
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c24
-rw-r--r--drivers/s390/crypto/ap_bus.c72
-rw-r--r--drivers/s390/crypto/ap_bus.h22
-rw-r--r--drivers/s390/crypto/ap_card.c18
-rw-r--r--drivers/s390/crypto/ap_queue.c200
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c13
-rw-r--r--drivers/s390/crypto/zcrypt_api.c16
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c31
-rw-r--r--drivers/s390/net/ism.h7
-rw-r--r--drivers/s390/net/ism_drv.c94
-rw-r--r--drivers/s390/net/qeth_core_main.c38
-rw-r--r--drivers/s390/scsi/zfcp_fc.c15
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/a3000.c5
-rw-r--r--drivers/scsi/a4000t.c5
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h25
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c90
-rw-r--r--drivers/scsi/atari_scsi.c5
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c14
-rw-r--r--drivers/scsi/bvme6000_scsi.c6
-rw-r--r--drivers/scsi/ch.c12
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_node.h12
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c28
-rw-r--r--drivers/scsi/fnic/fnic.h71
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c68
-rw-r--r--drivers/scsi/fnic/fnic_isr.c168
-rw-r--r--drivers/scsi/fnic/fnic_main.c145
-rw-r--r--drivers/scsi/fnic/fnic_res.c48
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c873
-rw-r--r--drivers/scsi/fnic/fnic_stats.h3
-rw-r--r--drivers/scsi/fnic/fnic_trace.c11
-rw-r--r--drivers/scsi/fnic/vnic_dev.c4
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/initio.c3
-rw-r--r--drivers/scsi/ipr.c55
-rw-r--r--drivers/scsi/isci/request.c5
-rw-r--r--drivers/scsi/isci/request.h2
-rw-r--r--drivers/scsi/isci/task.c4
-rw-r--r--drivers/scsi/jazz_esp.c6
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_esp.c6
-rw-r--r--drivers/scsi/mac_scsi.c5
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h33
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c520
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c117
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c21
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h231
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_image.h32
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h27
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c35
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c42
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c56
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h44
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c3
-rw-r--r--drivers/scsi/mvme16x_scsi.c6
-rw-r--r--drivers/scsi/myrb.c20
-rw-r--r--drivers/scsi/myrs.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qlogicpti.c6
-rw-r--r--drivers/scsi/scsi_debug.c27
-rw-r--r--drivers/scsi/scsi_error.c1
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/sd.c51
-rw-r--r--drivers/scsi/sd_zbc.c16
-rw-r--r--drivers/scsi/sg.c20
-rw-r--r--drivers/scsi/sgiwd93.c5
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6
-rw-r--r--drivers/scsi/sni_53c710.c6
-rw-r--r--drivers/scsi/sun3_scsi.c5
-rw-r--r--drivers/scsi/sun3x_esp.c6
-rw-r--r--drivers/scsi/sun_esp.c6
-rw-r--r--drivers/scsi/virtio_scsi.c80
-rw-r--r--drivers/sh/maple/maple.c4
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/apple/Kconfig15
-rw-r--r--drivers/soc/apple/Makefile4
-rw-r--r--drivers/soc/apple/mailbox.c437
-rw-r--r--drivers/soc/apple/mailbox.h48
-rw-r--r--drivers/soc/apple/rtkit-internal.h8
-rw-r--r--drivers/soc/apple/rtkit.c133
-rw-r--r--drivers/soc/fsl/qe/qmc.c637
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c152
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h15
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h210
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c39
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h32
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c51
-rw-r--r--drivers/soc/mediatek/mtk-svs.c1681
-rw-r--r--drivers/soc/microchip/Kconfig1
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c33
-rw-r--r--drivers/soc/pxa/ssp.c4
-rw-r--r--drivers/soc/qcom/Kconfig14
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c97
-rw-r--r--drivers/soc/qcom/pmic_glink.c24
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c37
-rw-r--r--drivers/soc/qcom/pmic_pdcharger_ulog.c166
-rw-r--r--drivers/soc/qcom/pmic_pdcharger_ulog.h36
-rw-r--r--drivers/soc/qcom/socinfo.c13
-rw-r--r--drivers/soc/renesas/Kconfig1
-rw-r--r--drivers/soc/renesas/renesas-soc.c4
-rw-r--r--drivers/soc/samsung/exynos-chipid.c1
-rw-r--r--drivers/soc/sifive/Kconfig10
-rw-r--r--drivers/soc/sifive/Makefile3
-rw-r--r--drivers/soc/ti/k3-socinfo.c73
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c33
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c18
-rw-r--r--drivers/soundwire/amd_manager.c19
-rw-r--r--drivers/soundwire/amd_manager.h3
-rw-r--r--drivers/soundwire/dmi-quirks.c8
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c8
-rw-r--r--drivers/soundwire/qcom.c71
-rw-r--r--drivers/soundwire/stream.c10
-rw-r--r--drivers/spi/atmel-quadspi.c2
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c519
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-cadence-xspi.c1
-rw-r--r--drivers/spi/spi-cs42l43.c5
-rw-r--r--drivers/spi/spi-dw-mmio.c1
-rw-r--r--drivers/spi/spi-geni-qcom.c96
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-ingenic.c15
-rw-r--r--drivers/spi/spi-intel.c10
-rw-r--r--drivers/spi/spi-ljca.c2
-rw-r--r--drivers/spi/spi-mem.c53
-rw-r--r--drivers/spi/spi-mpc52xx.c1
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-npcm-fiu.c2
-rw-r--r--drivers/spi/spi-pci1xxxx.c2
-rw-r--r--drivers/spi/spi-pl022.c382
-rw-r--r--drivers/spi/spi-ppc4xx.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c80
-rw-r--r--drivers/spi/spi-sprd-adi.c32
-rw-r--r--drivers/spi/spi-sprd.c4
-rw-r--r--drivers/spi/spi-st-ssc4.c70
-rw-r--r--drivers/spi/spi-stm32-qspi.c18
-rw-r--r--drivers/spi/spi-stm32.c638
-rw-r--r--drivers/spi/spi-sun4i.c72
-rw-r--r--drivers/spi/spi-sun6i.c148
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c88
-rw-r--r--drivers/spi/spi-synquacer.c82
-rw-r--r--drivers/spi/spi-tegra114.c118
-rw-r--r--drivers/spi/spi-tegra20-sflash.c76
-rw-r--r--drivers/spi/spi-tegra20-slink.c98
-rw-r--r--drivers/spi/spi-tegra210-quad.c80
-rw-r--r--drivers/spi/spi-ti-qspi.c103
-rw-r--r--drivers/spi/spi-topcliff-pch.c226
-rw-r--r--drivers/spi/spi-uniphier.c194
-rw-r--r--drivers/spi/spi-wpcm-fiu.c4
-rw-r--r--drivers/spi/spi-xcomm.c32
-rw-r--r--drivers/spi/spi-xilinx.c58
-rw-r--r--drivers/spi/spi-xlp.c40
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c30
-rw-r--r--drivers/spi/spi-zynq-qspi.c28
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c50
-rw-r--r--drivers/spi/spi.c322
-rw-r--r--drivers/spmi/Makefile2
-rw-r--r--drivers/spmi/hisi-spmi-controller.c32
-rw-r--r--drivers/spmi/spmi-devres.c64
-rw-r--r--drivers/spmi/spmi-mtk-pmif.c34
-rw-r--r--drivers/spmi/spmi-pmic-arb.c87
-rw-r--r--drivers/spmi/spmi.c6
-rw-r--r--drivers/staging/greybus/i2c.c2
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c16
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c16
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c16
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c62
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tpg.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c59
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-base.c12
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c34
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c34
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c15
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c38
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c10
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c32
-rw-r--r--drivers/staging/media/imx/imx-media.h4
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c4
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c16
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c19
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c9
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c11
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c11
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c11
-rw-r--r--drivers/staging/media/rkvdec/Kconfig1
-rw-r--r--drivers/staging/media/starfive/Kconfig5
-rw-r--r--drivers/staging/media/starfive/Makefile2
-rw-r--r--drivers/staging/media/starfive/camss/Kconfig18
-rw-r--r--drivers/staging/media/starfive/camss/Makefile13
-rw-r--r--drivers/staging/media/starfive/camss/TODO.txt4
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c436
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.h134
-rw-r--r--drivers/staging/media/starfive/camss/stf-capture.c603
-rw-r--r--drivers/staging/media/starfive/camss/stf-capture.h86
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp-hw-ops.c445
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.c385
-rw-r--r--drivers/staging/media/starfive/camss/stf-isp.h428
-rw-r--r--drivers/staging/media/starfive/camss/stf-video.c572
-rw-r--r--drivers/staging/media/starfive/camss/stf-video.h100
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig1
-rw-r--r--drivers/staging/media/sunxi/cedrus/TODO23
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c9
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c9
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c2
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c2
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c18
-rw-r--r--drivers/staging/media/tegra-video/csi.c22
-rw-r--r--drivers/staging/media/tegra-video/vi.c22
-rw-r--r--drivers/staging/media/tegra-video/vip.c6
-rw-r--r--drivers/staging/rtl8192e/Makefile1
-rw-r--r--drivers/staging/rtl8192e/dot11d.c165
-rw-r--r--drivers/staging/rtl8192e/dot11d.h84
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c41
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c101
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c22
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c22
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c228
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h35
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c371
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h22
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c156
-rw-r--r--drivers/staging/rtl8192e/rtllib.h71
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c82
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c387
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c11
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c66
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c3
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c9
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c3
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c60
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c159
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h37
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c203
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h2
-rw-r--r--drivers/staging/sm750fb/sm750.c65
-rw-r--r--drivers/staging/vc04_services/interface/TODO5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c271
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h7
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c668
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h54
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c10
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c113
-rw-r--r--drivers/staging/vme_user/Kconfig22
-rw-r--r--drivers/staging/vme_user/vme.c2
-rw-r--r--drivers/staging/vt6655/card.c90
-rw-r--r--drivers/staging/vt6655/card.h18
-rw-r--r--drivers/staging/vt6655/device.h4
-rw-r--r--drivers/staging/vt6655/device_main.c18
-rw-r--r--drivers/staging/vt6655/rxtx.c2
-rw-r--r--drivers/target/target_core_configfs.c48
-rw-r--r--drivers/target/target_core_file.c10
-rw-r--r--drivers/target/target_core_pr.c1
-rw-r--r--drivers/target/target_core_xcopy.c1
-rw-r--r--drivers/tee/optee/Kconfig2
-rw-r--r--drivers/tee/optee/call.c161
-rw-r--r--drivers/tee/optee/core.c62
-rw-r--r--drivers/tee/optee/ffa_abi.c107
-rw-r--r--drivers/tee/optee/optee_ffa.h28
-rw-r--r--drivers/tee/optee/optee_private.h40
-rw-r--r--drivers/tee/optee/smc_abi.c112
-rw-r--r--drivers/tee/tee_core.c8
-rw-r--r--drivers/tee/tee_shm.c78
-rw-r--r--drivers/thermal/Kconfig11
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/amlogic_thermal.c19
-rw-r--r--drivers/thermal/cpuidle_cooling.c4
-rw-r--r--drivers/thermal/gov_power_allocator.c358
-rw-r--r--drivers/thermal/intel/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c8
-rw-r--r--drivers/thermal/intel/intel_hfi.c17
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c32
-rw-r--r--drivers/thermal/loongson2_thermal.c3
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c529
-rw-r--r--drivers/thermal/sun8i_thermal.c13
-rw-r--r--drivers/thermal/thermal_core.c204
-rw-r--r--drivers/thermal/thermal_core.h10
-rw-r--r--drivers/thermal/thermal_debugfs.c840
-rw-r--r--drivers/thermal/thermal_debugfs.h28
-rw-r--r--drivers/thermal/thermal_helpers.c38
-rw-r--r--drivers/thermal/thermal_hwmon.c5
-rw-r--r--drivers/thermal/thermal_netlink.c139
-rw-r--r--drivers/thermal/thermal_netlink.h75
-rw-r--r--drivers/thermal/thermal_of.c18
-rw-r--r--drivers/thermal/thermal_sysfs.c121
-rw-r--r--drivers/thermal/thermal_trace_ipa.h50
-rw-r--r--drivers/thermal/thermal_trip.c95
-rw-r--r--drivers/thunderbolt/domain.c7
-rw-r--r--drivers/thunderbolt/icm.c4
-rw-r--r--drivers/thunderbolt/lc.c45
-rw-r--r--drivers/thunderbolt/nhi.c23
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/path.c13
-rw-r--r--drivers/thunderbolt/quirks.c14
-rw-r--r--drivers/thunderbolt/switch.c211
-rw-r--r--drivers/thunderbolt/tb.c167
-rw-r--r--drivers/thunderbolt/tb.h32
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/tmu.c2
-rw-r--r--drivers/thunderbolt/tunnel.c60
-rw-r--r--drivers/thunderbolt/usb4.c52
-rw-r--r--drivers/thunderbolt/xdomain.c54
-rw-r--r--drivers/tty/amiserial.c16
-rw-r--r--drivers/tty/ehv_bytechan.c18
-rw-r--r--drivers/tty/goldfish.c23
-rw-r--r--drivers/tty/hvc/Kconfig8
-rw-r--r--drivers/tty/hvc/hvc_console.c7
-rw-r--r--drivers/tty/hvc/hvc_console.h8
-rw-r--r--drivers/tty/hvc/hvc_dcc.c32
-rw-r--r--drivers/tty/hvc/hvc_iucv.c18
-rw-r--r--drivers/tty/hvc/hvc_opal.c22
-rw-r--r--drivers/tty/hvc/hvc_riscv_sbi.c46
-rw-r--r--drivers/tty/hvc/hvc_rtas.c11
-rw-r--r--drivers/tty/hvc/hvc_udbg.c9
-rw-r--r--drivers/tty/hvc/hvc_vio.c18
-rw-r--r--drivers/tty/hvc/hvc_xen.c23
-rw-r--r--drivers/tty/hvc/hvsi_lib.c20
-rw-r--r--drivers/tty/ipwireless/main.h3
-rw-r--r--drivers/tty/mips_ejtag_fdc.c12
-rw-r--r--drivers/tty/moxa.c15
-rw-r--r--drivers/tty/mxser.c8
-rw-r--r--drivers/tty/n_gsm.c17
-rw-r--r--drivers/tty/n_hdlc.c10
-rw-r--r--drivers/tty/nozomi.c27
-rw-r--r--drivers/tty/serdev/core.c31
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c5
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c6
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c6
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c5
-rw-r--r--drivers/tty/serial/8250/8250_core.c5
-rw-r--r--drivers/tty/serial/8250/8250_dw.c6
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c16
-rw-r--r--drivers/tty/serial/8250/8250_em.c5
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c8
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c5
-rw-r--r--drivers/tty/serial/8250/8250_ioc3.c5
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c6
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c7
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/8250/8250_of.c49
-rw-r--r--drivers/tty/serial/8250/8250_omap.c5
-rw-r--r--drivers/tty/serial/8250/8250_pci.c7
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c210
-rw-r--r--drivers/tty/serial/8250/8250_port.c24
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c6
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c6
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c6
-rw-r--r--drivers/tty/serial/8250/serial_cs.c6
-rw-r--r--drivers/tty/serial/Kconfig5
-rw-r--r--drivers/tty/serial/altera_jtaguart.c6
-rw-r--r--drivers/tty/serial/altera_uart.c8
-rw-r--r--drivers/tty/serial/amba-pl011.c272
-rw-r--r--drivers/tty/serial/ar933x_uart.c6
-rw-r--r--drivers/tty/serial/atmel_serial.c22
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c5
-rw-r--r--drivers/tty/serial/clps711x.c6
-rw-r--r--drivers/tty/serial/cpm_uart.c6
-rw-r--r--drivers/tty/serial/digicolor-usart.c6
-rw-r--r--drivers/tty/serial/earlycon-riscv-sbi.c27
-rw-r--r--drivers/tty/serial/esp32_acm.c7
-rw-r--r--drivers/tty/serial/esp32_uart.c16
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c6
-rw-r--r--drivers/tty/serial/fsl_lpuart.c5
-rw-r--r--drivers/tty/serial/imx.c23
-rw-r--r--drivers/tty/serial/jsm/jsm.h5
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c36
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c40
-rw-r--r--drivers/tty/serial/lantiq.c6
-rw-r--r--drivers/tty/serial/liteuart.c6
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c6
-rw-r--r--drivers/tty/serial/ma35d1_serial.c7
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mcf.c6
-rw-r--r--drivers/tty/serial/meson_uart.c8
-rw-r--r--drivers/tty/serial/milbeaut_usio.c6
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c7
-rw-r--r--drivers/tty/serial/msm_serial.c25
-rw-r--r--drivers/tty/serial/mxs-auart.c28
-rw-r--r--drivers/tty/serial/omap-serial.c6
-rw-r--r--drivers/tty/serial/owl-uart.c6
-rw-r--r--drivers/tty/serial/pic32_uart.c7
-rw-r--r--drivers/tty/serial/pmac_zilog.c14
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c6
-rw-r--r--drivers/tty/serial/rda-uart.c6
-rw-r--r--drivers/tty/serial/rp2.c1
-rw-r--r--drivers/tty/serial/sa1100.c6
-rw-r--r--drivers/tty/serial/samsung_tty.c22
-rw-r--r--drivers/tty/serial/sc16is7xx.c252
-rw-r--r--drivers/tty/serial/sccnxp.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c5
-rw-r--r--drivers/tty/serial/serial_base.h4
-rw-r--r--drivers/tty/serial/serial_core.c48
-rw-r--r--drivers/tty/serial/serial_port.c34
-rw-r--r--drivers/tty/serial/serial_txx9.c5
-rw-r--r--drivers/tty/serial/sh-sci.c13
-rw-r--r--drivers/tty/serial/sifive.c10
-rw-r--r--drivers/tty/serial/sprd_serial.c6
-rw-r--r--drivers/tty/serial/st-asc.c6
-rw-r--r--drivers/tty/serial/stm32-usart.c19
-rw-r--r--drivers/tty/serial/sunhv.c6
-rw-r--r--drivers/tty/serial/sunplus-uart.c6
-rw-r--r--drivers/tty/serial/sunsab.c22
-rw-r--r--drivers/tty/serial/sunsu.c6
-rw-r--r--drivers/tty/serial/sunzilog.c6
-rw-r--r--drivers/tty/serial/tegra-tcu.c6
-rw-r--r--drivers/tty/serial/timbuart.c6
-rw-r--r--drivers/tty/serial/uartlite.c20
-rw-r--r--drivers/tty/serial/ucc_uart.c34
-rw-r--r--drivers/tty/serial/xilinx_uartps.c7
-rw-r--r--drivers/tty/sysrq.c19
-rw-r--r--drivers/tty/tty_io.c46
-rw-r--r--drivers/tty/tty_port.c18
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/keyboard.c10
-rw-r--r--drivers/ufs/core/ufs-fault-injection.c19
-rw-r--r--drivers/ufs/core/ufs-fault-injection.h13
-rw-r--r--drivers/ufs/core/ufs-sysfs.c151
-rw-r--r--drivers/ufs/core/ufshcd.c154
-rw-r--r--drivers/ufs/host/ufs-exynos.c7
-rw-r--r--drivers/ufs/host/ufs-hisi.c11
-rw-r--r--drivers/ufs/host/ufs-mediatek.c12
-rw-r--r--drivers/ufs/host/ufs-qcom.c494
-rw-r--r--drivers/ufs/host/ufs-qcom.h57
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c69
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h10
-rw-r--r--drivers/usb/atm/ueagle-atm.c19
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c7
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c16
-rw-r--r--drivers/usb/cdns3/cdns3-starfive.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-debug.h354
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c17
-rw-r--r--drivers/usb/chipidea/core.c6
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/core/driver.c64
-rw-r--r--drivers/usb/core/generic.c16
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/quirks.c7
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h8
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c4
-rw-r--r--drivers/usb/dwc2/params.c1
-rw-r--r--drivers/usb/dwc3/core.c21
-rw-r--r--drivers/usb/dwc3/core.h5
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c4
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c22
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c11
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c12
-rw-r--r--drivers/usb/fotg210/fotg210-udc.c6
-rw-r--r--drivers/usb/gadget/configfs.c13
-rw-r--r--drivers/usb/gadget/function/f_fs.c5
-rw-r--r--drivers/usb/gadget/function/f_midi.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c73
-rw-r--r--drivers/usb/gadget/function/f_tcm.c4
-rw-r--r--drivers/usb/gadget/function/f_uac1.c83
-rw-r--r--drivers/usb/gadget/function/f_uac2.c6
-rw-r--r--drivers/usb/gadget/function/f_uvc.c11
-rw-r--r--drivers/usb/gadget/function/f_uvc.h2
-rw-r--r--drivers/usb/gadget/function/u_ncm.h2
-rw-r--r--drivers/usb/gadget/function/uvc.h14
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c16
-rw-r--r--drivers/usb/gadget/function/uvc_video.c453
-rw-r--r--drivers/usb/gadget/function/uvc_video.h3
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c13
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-debug.h138
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c15
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c13
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c13
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c12
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c2
-rw-r--r--drivers/usb/host/max3421-hcd.c18
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-grlib.c1
-rw-r--r--drivers/usb/host/xhci-dbgcap.c132
-rw-r--r--drivers/usb/host/xhci-dbgcap.h1
-rw-r--r--drivers/usb/host/xhci-debugfs.c2
-rw-r--r--drivers/usb/host/xhci-mem.c114
-rw-r--r--drivers/usb/host/xhci-pci.c140
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c163
-rw-r--r--drivers/usb/host/xhci.c216
-rw-r--r--drivers/usb/host/xhci.h22
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c32
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h6
-rw-r--r--drivers/usb/misc/qcom_eud.c3
-rw-r--r--drivers/usb/misc/yurex.c14
-rw-r--r--drivers/usb/mon/mon_stat.c6
-rw-r--r--drivers/usb/mon/mon_text.c28
-rw-r--r--drivers/usb/phy/phy-generic.c14
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c8
-rw-r--r--drivers/usb/serial/bus.c2
-rw-r--r--drivers/usb/serial/option.c40
-rw-r--r--drivers/usb/serial/usb-serial.c2
-rw-r--r--drivers/usb/storage/sierra_ms.c16
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/typec/class.c13
-rw-r--r--drivers/usb/typec/mux/Kconfig12
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/nb7vpq904m.c44
-rw-r--r--drivers/usb/typec/mux/wcd939x-usbss.c779
-rw-r--r--drivers/usb/typec/pd.c6
-rw-r--r--drivers/usb/typec/tcpm/Kconfig1
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c41
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c1
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c20
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c420
-rw-r--r--drivers/usb/typec/tipd/core.c177
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h18
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c29
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h14
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c15
-rw-r--r--drivers/usb/usbip/stub_main.c8
-rw-r--r--drivers/usb/usbip/vudc.h2
-rw-r--r--drivers/usb/usbip/vudc_dev.c3
-rw-r--r--drivers/usb/usbip/vudc_main.c2
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h10
-rw-r--r--drivers/vdpa/mlx5/core/mr.c69
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c209
-rw-r--r--drivers/vdpa/vdpa.c4
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c2
-rw-r--r--drivers/vfio/Kconfig10
-rw-r--r--drivers/vfio/Makefile1
-rw-r--r--drivers/vfio/debugfs.c92
-rw-r--r--drivers/vfio/pci/Kconfig2
-rw-r--r--drivers/vfio/pci/Makefile2
-rw-r--r--drivers/vfio/pci/pds/dirty.c309
-rw-r--r--drivers/vfio/pci/pds/dirty.h18
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c57
-rw-r--r--drivers/vfio/pci/virtio/Kconfig15
-rw-r--r--drivers/vfio/pci/virtio/Makefile3
-rw-r--r--drivers/vfio/pci/virtio/main.c576
-rw-r--r--drivers/vfio/vfio.h14
-rw-r--r--drivers/vfio/vfio_iommu_type1.c8
-rw-r--r--drivers/vfio/vfio_main.c4
-rw-r--r--drivers/vhost/vdpa.c26
-rw-r--r--drivers/vhost/vhost.c24
-rw-r--r--drivers/video/backlight/Kconfig18
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/cr_bllcd.c264
-rw-r--r--drivers/video/backlight/hx8357.c74
-rw-r--r--drivers/video/backlight/ili922x.c8
-rw-r--r--drivers/video/backlight/lm3630a_bl.c2
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/mp3309c.c444
-rw-r--r--drivers/video/backlight/pwm_bl.c46
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/fbdev/Kconfig125
-rw-r--r--drivers/video/fbdev/Makefile2
-rw-r--r--drivers/video/fbdev/amba-clcd.c984
-rw-r--r--drivers/video/fbdev/arcfb.c114
-rw-r--r--drivers/video/fbdev/au1100fb.c2
-rw-r--r--drivers/video/fbdev/au1200fb.c11
-rw-r--r--drivers/video/fbdev/clps711x-fb.c4
-rw-r--r--drivers/video/fbdev/core/Kconfig7
-rw-r--r--drivers/video/fbdev/core/Makefile2
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c3
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c3
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c3
-rw-r--r--drivers/video/fbdev/core/fb_chrdev.c68
-rw-r--r--drivers/video/fbdev/core/fb_ddc.c1
-rw-r--r--drivers/video/fbdev/core/fb_defio.c4
-rw-r--r--drivers/video/fbdev/core/fb_io_fops.c36
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c6
-rw-r--r--drivers/video/fbdev/core/fbcon.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c7
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c3
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c3
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c3
-rw-r--r--drivers/video/fbdev/cyber2000fb.c10
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c2
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c2
-rw-r--r--drivers/video/fbdev/gbefb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c13
-rw-r--r--drivers/video/fbdev/hyperv_fb.c28
-rw-r--r--drivers/video/fbdev/i740fb.c1
-rw-r--r--drivers/video/fbdev/imxfb.c156
-rw-r--r--drivers/video/fbdev/intelfb/Makefile8
-rw-r--r--drivers/video/fbdev/intelfb/intelfb.h382
-rw-r--r--drivers/video/fbdev/intelfb/intelfb_i2c.c209
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c1680
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.c2115
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.h609
-rw-r--r--drivers/video/fbdev/matrox/i2c-matroxfb.c15
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/ps3fb.c11
-rw-r--r--drivers/video/fbdev/s3fb.c1
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/sbuslib.c5
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c16
-rw-r--r--drivers/video/fbdev/simplefb.c132
-rw-r--r--drivers/video/fbdev/sis/sis_main.c37
-rw-r--r--drivers/video/fbdev/smscufx.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/fbdev/stifb.c111
-rw-r--r--drivers/video/fbdev/tdfxfb.c1
-rw-r--r--drivers/video/fbdev/tridentfb.c1
-rw-r--r--drivers/video/fbdev/udlfb.c2
-rw-r--r--drivers/video/fbdev/vermilion/Makefile6
-rw-r--r--drivers/video/fbdev/vermilion/cr_pll.c195
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c1173
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.h245
-rw-r--r--drivers/video/fbdev/vfb.c10
-rw-r--r--drivers/video/fbdev/via/accel.c4
-rw-r--r--drivers/video/fbdev/via/via_i2c.c1
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c5
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/video/logo/pnmtologo.c6
-rw-r--r--drivers/video/sticore.c5
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c6
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c154
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c22
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c33
-rw-r--r--drivers/virtio/Kconfig5
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c29
-rw-r--r--drivers/virtio/virtio_balloon.c59
-rw-r--r--drivers/virtio/virtio_mem.c8
-rw-r--r--drivers/virtio/virtio_pci_admin_legacy_io.c244
-rw-r--r--drivers/virtio/virtio_pci_common.c48
-rw-r--r--drivers/virtio/virtio_pci_common.h42
-rw-r--r--drivers/virtio/virtio_pci_modern.c259
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c24
-rw-r--r--drivers/w1/masters/Kconfig11
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/amd_axi_w1.c396
-rw-r--r--drivers/w1/masters/ds2490.c25
-rw-r--r--drivers/w1/masters/w1-gpio.c118
-rw-r--r--drivers/w1/slaves/w1_ds2433.c162
-rw-r--r--drivers/watchdog/at91sam9_wdt.c12
-rw-r--r--drivers/watchdog/hpwdt.c7
-rw-r--r--drivers/watchdog/it87_wdt.c15
-rw-r--r--drivers/watchdog/mlx_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c42
-rw-r--r--drivers/watchdog/s3c2410_wdt.c85
-rw-r--r--drivers/watchdog/starfive-wdt.c6
-rw-r--r--drivers/watchdog/txx9wdt.c11
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/privcmd.c15
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c66
5235 files changed, 329012 insertions, 141315 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index d828329c2..37fd6ce3b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -71,9 +71,8 @@ obj-y += gpu/
obj-$(CONFIG_CONNECTOR) += connector/
-# i810fb and intelfb depend on char/agp/
+# i810fb depends on char/agp/
obj-$(CONFIG_FB_I810) += video/fbdev/i810/
-obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
obj-$(CONFIG_PARPORT) += parport/
obj-y += base/ block/ misc/ mfd/ nfc/
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 294b572a9..24cac4c02 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -11,6 +11,7 @@
#include <linux/idr.h>
#include <drm/drm_accel.h>
+#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 83821e175..a73bd4be9 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -1051,10 +1051,12 @@ static int hl_device_eq_heartbeat_check(struct hl_device *hdev)
if (!prop->cpucp_info.eq_health_check_supported)
return 0;
- if (hdev->eq_heartbeat_received)
+ if (hdev->eq_heartbeat_received) {
hdev->eq_heartbeat_received = false;
- else
+ } else {
+ dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
return -EIO;
+ }
return 0;
}
@@ -2038,7 +2040,7 @@ device_reset:
if (ctx)
hl_ctx_put(ctx);
- return hl_device_reset(hdev, flags);
+ return hl_device_reset(hdev, flags | HL_DRV_RESET_HARD);
}
static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
diff --git a/drivers/accel/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c
index 47e838413..3558a6a8e 100644
--- a/drivers/accel/habanalabs/common/firmware_if.c
+++ b/drivers/accel/habanalabs/common/firmware_if.c
@@ -646,39 +646,27 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
return rc;
}
-static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
- u32 sts_val)
+static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val, u32 sts_val)
{
bool err_exists = false;
if (!(err_val & CPU_BOOT_ERR0_ENABLED))
return false;
- if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
- dev_err(hdev->dev,
- "Device boot error - DRAM initialization failed\n");
- err_exists = true;
- }
+ if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
+ dev_err(hdev->dev, "Device boot error - DRAM initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
+ if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
- err_exists = true;
- }
- if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
- dev_err(hdev->dev,
- "Device boot error - Thermal Sensor initialization failed\n");
- err_exists = true;
- }
+ if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
+ dev_err(hdev->dev, "Device boot error - Thermal Sensor initialization failed\n");
if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
if (hdev->bmc_enable) {
- dev_err(hdev->dev,
- "Device boot error - Skipped waiting for BMC\n");
- err_exists = true;
+ dev_err(hdev->dev, "Device boot error - Skipped waiting for BMC\n");
} else {
- dev_info(hdev->dev,
- "Device boot message - Skipped waiting for BMC\n");
+ dev_info(hdev->dev, "Device boot message - Skipped waiting for BMC\n");
/* This is an info so we don't want it to disable the
* device
*/
@@ -686,48 +674,29 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
}
}
- if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
- dev_err(hdev->dev,
- "Device boot error - Serdes data from BMC not available\n");
- err_exists = true;
- }
+ if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
+ dev_err(hdev->dev, "Device boot error - Serdes data from BMC not available\n");
- if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
- dev_err(hdev->dev,
- "Device boot error - NIC F/W initialization failed\n");
- err_exists = true;
- }
+ if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
+ dev_err(hdev->dev, "Device boot error - NIC F/W initialization failed\n");
- if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
- dev_err(hdev->dev,
- "Device boot warning - security not ready\n");
- err_exists = true;
- }
+ if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
+ dev_err(hdev->dev, "Device boot warning - security not ready\n");
- if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
+ if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
dev_err(hdev->dev, "Device boot error - security failure\n");
- err_exists = true;
- }
- if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
+ if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
dev_err(hdev->dev, "Device boot error - eFuse failure\n");
- err_exists = true;
- }
- if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
+ if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL)
dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
- err_exists = true;
- }
- if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
+ if (err_val & CPU_BOOT_ERR0_PLL_FAIL)
dev_err(hdev->dev, "Device boot error - PLL failure\n");
- err_exists = true;
- }
- if (err_val & CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL) {
+ if (err_val & CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL)
dev_err(hdev->dev, "Device boot error - Failed to set threshold for temperature sensor\n");
- err_exists = true;
- }
if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
/* Ignore this bit, don't prevent driver loading */
@@ -735,52 +704,32 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
}
- if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
+ if (err_val & CPU_BOOT_ERR0_BINNING_FAIL)
dev_err(hdev->dev, "Device boot error - binning failure\n");
- err_exists = true;
- }
if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
+ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
+ dev_err(hdev->dev, "Device boot warning - Skipped DRAM initialization\n");
+
+ if (err_val & CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL)
+ dev_err(hdev->dev, "Device boot error - ARC memory scrub failed\n");
+
+ /* All warnings should go here in order not to reach the unknown error validation */
if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n");
err_exists = true;
}
- /* All warnings should go here in order not to reach the unknown error validation */
- if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
- dev_warn(hdev->dev,
- "Device boot warning - Skipped DRAM initialization\n");
- /* This is a warning so we don't want it to disable the
- * device
- */
- err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
- }
+ if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL)
+ dev_warn(hdev->dev, "Device boot warning - Failed to load preboot primary image\n");
- if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
- dev_warn(hdev->dev,
- "Device boot warning - Failed to load preboot primary image\n");
- /* This is a warning so we don't want it to disable the
- * device as we have a secondary preboot image
- */
- err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
- }
-
- if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
- dev_warn(hdev->dev,
- "Device boot warning - TPM failure\n");
- /* This is a warning so we don't want it to disable the
- * device
- */
- err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
- }
+ if (err_val & CPU_BOOT_ERR0_TPM_FAIL)
+ dev_warn(hdev->dev, "Device boot warning - TPM failure\n");
- if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
- dev_err(hdev->dev,
- "Device boot error - unknown ERR0 error 0x%08x\n", err_val);
+ if (err_val & CPU_BOOT_ERR_FATAL_MASK)
err_exists = true;
- }
/* return error only if it's in the predefined mask */
if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
@@ -3295,6 +3244,14 @@ int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_in
HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
}
+int hl_fw_get_dev_info_signed(struct hl_device *hdev,
+ struct cpucp_dev_info_signed *dev_info_signed, u32 nonce)
+{
+ return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_INFO_SIGNED_GET, dev_info_signed,
+ sizeof(struct cpucp_dev_info_signed), nonce,
+ HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
+}
+
int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
dma_addr_t buff, u32 *size)
{
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index d0fd77bb6..41c7aac2f 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -2547,7 +2547,7 @@ struct hl_state_dump_specs {
* DEVICES
*/
-#define HL_STR_MAX 32
+#define HL_STR_MAX 64
#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
@@ -3521,6 +3521,9 @@ struct hl_device {
u8 heartbeat;
};
+/* Retrieve PCI device name in case of a PCI device or dev name in simulator */
+#define HL_DEV_NAME(hdev) \
+ ((hdev)->pdev ? dev_name(&(hdev)->pdev->dev) : "NA-DEVICE")
/**
* struct hl_cs_encaps_sig_handle - encapsulated signals handle structure
@@ -3596,6 +3599,14 @@ static inline bool hl_is_fw_sw_ver_below(struct hl_device *hdev, u32 fw_sw_major
return false;
}
+static inline bool hl_is_fw_sw_ver_equal_or_greater(struct hl_device *hdev, u32 fw_sw_major,
+ u32 fw_sw_minor)
+{
+ return (hdev->fw_sw_major_ver > fw_sw_major ||
+ (hdev->fw_sw_major_ver == fw_sw_major &&
+ hdev->fw_sw_minor_ver >= fw_sw_minor));
+}
+
/*
* Kernel module functions that can be accessed by entire module
*/
@@ -3956,6 +3967,8 @@ long hl_fw_get_max_power(struct hl_device *hdev);
void hl_fw_set_max_power(struct hl_device *hdev);
int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
u32 nonce);
+int hl_fw_get_dev_info_signed(struct hl_device *hdev,
+ struct cpucp_dev_info_signed *dev_info_signed, u32 nonce);
int hl_set_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev, int sensor_index, u32 attr, long value);
int hl_set_power(struct hl_device *hdev, int sensor_index, u32 attr, long value);
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 51fb04bbe..e542fd40e 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -673,6 +673,38 @@ static pci_ers_result_t hl_pci_err_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
+static void hl_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct hl_device *hdev;
+
+ hdev = pci_get_drvdata(pdev);
+ if (!hdev)
+ return;
+
+ hdev->disabled = true;
+}
+
+static void hl_pci_reset_done(struct pci_dev *pdev)
+{
+ struct hl_device *hdev;
+ u32 flags;
+
+ hdev = pci_get_drvdata(pdev);
+ if (!hdev)
+ return;
+
+ /*
+ * Schedule a thread to trigger hard reset.
+ * The reason for this handler, is for rare cases where the driver is up
+ * and FLR occurs. This is valid only when working with no VM, so FW handles FLR
+ * and resets the device. FW will go back preboot stage, so driver needs to perform
+ * hard reset in order to load FW fit again.
+ */
+ flags = HL_DRV_RESET_HARD | HL_DRV_RESET_BYPASS_REQ_TO_FW;
+
+ hl_device_reset(hdev, flags);
+}
+
static const struct dev_pm_ops hl_pm_ops = {
.suspend = hl_pmops_suspend,
.resume = hl_pmops_resume,
@@ -682,6 +714,8 @@ static const struct pci_error_handlers hl_pci_err_handler = {
.error_detected = hl_pci_err_detected,
.slot_reset = hl_pci_err_slot_reset,
.resume = hl_pci_err_resume,
+ .reset_prepare = hl_pci_reset_prepare,
+ .reset_done = hl_pci_reset_done,
};
static struct pci_driver hl_pci_driver = {
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index a7cd625d8..1dd6e2317 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -19,6 +19,9 @@
#include <asm/msr.h>
+/* make sure there is space for all the signed info */
+static_assert(sizeof(struct cpucp_info) <= SEC_DEV_INFO_BUF_SZ);
+
static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
@@ -719,6 +722,53 @@ free_sec_attest_info:
return rc;
}
+static int dev_info_signed(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct cpucp_dev_info_signed *dev_info_signed;
+ struct hl_info_signed *info;
+ u32 max_size = args->return_size;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ dev_info_signed = kzalloc(sizeof(*dev_info_signed), GFP_KERNEL);
+ if (!dev_info_signed)
+ return -ENOMEM;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rc = -ENOMEM;
+ goto free_dev_info_signed;
+ }
+
+ rc = hl_fw_get_dev_info_signed(hpriv->hdev,
+ dev_info_signed, args->sec_attest_nonce);
+ if (rc)
+ goto free_info;
+
+ info->nonce = le32_to_cpu(dev_info_signed->nonce);
+ info->info_sig_len = dev_info_signed->info_sig_len;
+ info->pub_data_len = le16_to_cpu(dev_info_signed->pub_data_len);
+ info->certificate_len = le16_to_cpu(dev_info_signed->certificate_len);
+ info->dev_info_len = sizeof(struct cpucp_info);
+ memcpy(&info->info_sig, &dev_info_signed->info_sig, sizeof(info->info_sig));
+ memcpy(&info->public_data, &dev_info_signed->public_data, sizeof(info->public_data));
+ memcpy(&info->certificate, &dev_info_signed->certificate, sizeof(info->certificate));
+ memcpy(&info->dev_info, &dev_info_signed->info, info->dev_info_len);
+
+ rc = copy_to_user(out, info, min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
+
+free_info:
+ kfree(info);
+free_dev_info_signed:
+ kfree(dev_info_signed);
+
+ return rc;
+}
+
+
static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
int rc;
@@ -1089,6 +1139,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_FW_GENERIC_REQ:
return send_fw_generic_request(hdev, args);
+ case HL_INFO_DEV_SIGNED:
+ return dev_info_signed(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
diff --git a/drivers/accel/habanalabs/common/hwmon.c b/drivers/accel/habanalabs/common/hwmon.c
index 859805621..1ee2ee07e 100644
--- a/drivers/accel/habanalabs/common/hwmon.c
+++ b/drivers/accel/habanalabs/common/hwmon.c
@@ -578,10 +578,6 @@ int hl_get_temperature(struct hl_device *hdev,
CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
-
- dev_dbg(hdev->dev, "get temp, ctl 0x%x, sensor %d, type %d\n",
- pkt.ctl, pkt.sensor_index, pkt.type);
-
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 0b8689fe0..3348ad12c 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -955,8 +955,8 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
(i + 1) == phys_pg_pack->npages);
if (rc) {
dev_err(hdev->dev,
- "map failed for handle %u, npages: %llu, mapped: %llu",
- phys_pg_pack->handle, phys_pg_pack->npages,
+ "map failed (%d) for handle %u, npages: %llu, mapped: %llu\n",
+ rc, phys_pg_pack->handle, phys_pg_pack->npages,
mapped_pg_cnt);
goto err;
}
@@ -1186,7 +1186,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
- dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
+ dev_err(hdev->dev, "mapping page pack failed (%d) for handle %u\n",
+ rc, handle);
mutex_unlock(&hdev->mmu_lock);
goto map_err;
}
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 278606373..8a9f98832 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -8,6 +8,7 @@
#include "habanalabs.h"
#include <linux/pci.h>
+#include <linux/types.h>
static ssize_t clk_max_freq_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -80,12 +81,27 @@ static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, c
{
struct hl_device *hdev = dev_get_drvdata(dev);
struct cpucp_info *cpucp_info;
+ u32 infineon_second_stage_version;
+ u32 infineon_second_stage_first_instance;
+ u32 infineon_second_stage_second_instance;
+ u32 infineon_second_stage_third_instance;
+ u32 mask = 0xff;
cpucp_info = &hdev->asic_prop.cpucp_info;
+ infineon_second_stage_version = le32_to_cpu(cpucp_info->infineon_second_stage_version);
+ infineon_second_stage_first_instance = infineon_second_stage_version & mask;
+ infineon_second_stage_second_instance =
+ (infineon_second_stage_version >> 8) & mask;
+ infineon_second_stage_third_instance =
+ (infineon_second_stage_version >> 16) & mask;
+
if (cpucp_info->infineon_second_stage_version)
- return sprintf(buf, "%#04x %#04x\n", le32_to_cpu(cpucp_info->infineon_version),
- le32_to_cpu(cpucp_info->infineon_second_stage_version));
+ return sprintf(buf, "%#04x %#04x:%#04x:%#04x\n",
+ le32_to_cpu(cpucp_info->infineon_version),
+ infineon_second_stage_first_instance,
+ infineon_second_stage_second_instance,
+ infineon_second_stage_third_instance);
else
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
}
@@ -386,6 +402,21 @@ static ssize_t security_enabled_show(struct device *dev,
return sprintf(buf, "%d\n", hdev->asic_prop.fw_security_enabled);
}
+static ssize_t module_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", le32_to_cpu(hdev->asic_prop.cpucp_info.card_location));
+}
+
+static ssize_t parent_device_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", HL_DEV_NAME(hdev));
+}
+
static DEVICE_ATTR_RO(armcp_kernel_ver);
static DEVICE_ATTR_RO(armcp_ver);
static DEVICE_ATTR_RO(cpld_ver);
@@ -405,6 +436,8 @@ static DEVICE_ATTR_RO(thermal_ver);
static DEVICE_ATTR_RO(uboot_ver);
static DEVICE_ATTR_RO(fw_os_ver);
static DEVICE_ATTR_RO(security_enabled);
+static DEVICE_ATTR_RO(module_id);
+static DEVICE_ATTR_RO(parent_device);
static struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
@@ -430,6 +463,8 @@ static struct attribute *hl_dev_attrs[] = {
&dev_attr_uboot_ver.attr,
&dev_attr_fw_os_ver.attr,
&dev_attr_security_enabled.attr,
+ &dev_attr_module_id.attr,
+ &dev_attr_parent_device.attr,
NULL,
};
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index bc6e338ef..e0e5615ef 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -7858,39 +7858,44 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
return !!ecc_data->is_critical;
}
-static void handle_lower_qman_data_on_err(struct hl_device *hdev, u64 qman_base, u64 event_mask)
+static void handle_lower_qman_data_on_err(struct hl_device *hdev, u64 qman_base, u32 engine_id)
{
- u32 lo, hi, cq_ptr_size, arc_cq_ptr_size;
- u64 cq_ptr, arc_cq_ptr, cp_current_inst;
+ struct undefined_opcode_info *undef_opcode = &hdev->captured_err_info.undef_opcode;
+ u64 cq_ptr, cp_current_inst;
+ u32 lo, hi, cq_size, cp_sts;
+ bool is_arc_cq;
- lo = RREG32(qman_base + QM_CQ_PTR_LO_4_OFFSET);
- hi = RREG32(qman_base + QM_CQ_PTR_HI_4_OFFSET);
- cq_ptr = ((u64) hi) << 32 | lo;
- cq_ptr_size = RREG32(qman_base + QM_CQ_TSIZE_4_OFFSET);
+ cp_sts = RREG32(qman_base + QM_CP_STS_4_OFFSET);
+ is_arc_cq = FIELD_GET(PDMA0_QM_CP_STS_CUR_CQ_MASK, cp_sts); /* 0 - legacy CQ, 1 - ARC_CQ */
- lo = RREG32(qman_base + QM_ARC_CQ_PTR_LO_OFFSET);
- hi = RREG32(qman_base + QM_ARC_CQ_PTR_HI_OFFSET);
- arc_cq_ptr = ((u64) hi) << 32 | lo;
- arc_cq_ptr_size = RREG32(qman_base + QM_ARC_CQ_TSIZE_OFFSET);
+ if (is_arc_cq) {
+ lo = RREG32(qman_base + QM_ARC_CQ_PTR_LO_STS_OFFSET);
+ hi = RREG32(qman_base + QM_ARC_CQ_PTR_HI_STS_OFFSET);
+ cq_ptr = ((u64) hi) << 32 | lo;
+ cq_size = RREG32(qman_base + QM_ARC_CQ_TSIZE_STS_OFFSET);
+ } else {
+ lo = RREG32(qman_base + QM_CQ_PTR_LO_STS_4_OFFSET);
+ hi = RREG32(qman_base + QM_CQ_PTR_HI_STS_4_OFFSET);
+ cq_ptr = ((u64) hi) << 32 | lo;
+ cq_size = RREG32(qman_base + QM_CQ_TSIZE_STS_4_OFFSET);
+ }
lo = RREG32(qman_base + QM_CP_CURRENT_INST_LO_4_OFFSET);
hi = RREG32(qman_base + QM_CP_CURRENT_INST_HI_4_OFFSET);
cp_current_inst = ((u64) hi) << 32 | lo;
dev_info(hdev->dev,
- "LowerQM. CQ: {ptr %#llx, size %u}, ARC_CQ: {ptr %#llx, size %u}, CP: {instruction %#llx}\n",
- cq_ptr, cq_ptr_size, arc_cq_ptr, arc_cq_ptr_size, cp_current_inst);
+ "LowerQM. %sCQ: {ptr %#llx, size %u}, CP: {instruction %#018llx}\n",
+ is_arc_cq ? "ARC_" : "", cq_ptr, cq_size, cp_current_inst);
- if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
- if (arc_cq_ptr) {
- hdev->captured_err_info.undef_opcode.cq_addr = arc_cq_ptr;
- hdev->captured_err_info.undef_opcode.cq_size = arc_cq_ptr_size;
- } else {
- hdev->captured_err_info.undef_opcode.cq_addr = cq_ptr;
- hdev->captured_err_info.undef_opcode.cq_size = cq_ptr_size;
- }
-
- hdev->captured_err_info.undef_opcode.stream_id = QMAN_STREAMS;
+ if (undef_opcode->write_enable) {
+ memset(undef_opcode, 0, sizeof(*undef_opcode));
+ undef_opcode->timestamp = ktime_get();
+ undef_opcode->cq_addr = cq_ptr;
+ undef_opcode->cq_size = cq_size;
+ undef_opcode->engine_id = engine_id;
+ undef_opcode->stream_id = QMAN_STREAMS;
+ undef_opcode->write_enable = 0;
}
}
@@ -7929,19 +7934,12 @@ static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type
error_count++;
}
- /* check for undefined opcode */
- if (glbl_sts_val & PDMA0_QM_GLBL_ERR_STS_CP_UNDEF_CMD_ERR_MASK) {
+ /* Check for undefined opcode error in lower QM */
+ if ((i == QMAN_STREAMS) &&
+ (glbl_sts_val & PDMA0_QM_GLBL_ERR_STS_CP_UNDEF_CMD_ERR_MASK)) {
+ handle_lower_qman_data_on_err(hdev, qman_base,
+ gaudi2_queue_id_to_engine_id[qid_base]);
*event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
- if (hdev->captured_err_info.undef_opcode.write_enable) {
- memset(&hdev->captured_err_info.undef_opcode, 0,
- sizeof(hdev->captured_err_info.undef_opcode));
- hdev->captured_err_info.undef_opcode.timestamp = ktime_get();
- hdev->captured_err_info.undef_opcode.engine_id =
- gaudi2_queue_id_to_engine_id[qid_base];
- }
-
- if (i == QMAN_STREAMS)
- handle_lower_qman_data_on_err(hdev, qman_base, *event_mask);
}
}
@@ -10005,6 +10003,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ if (hl_is_fw_sw_ver_equal_or_greater(hdev, 1, 13))
+ is_critical = true;
break;
case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN:
diff --git a/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
index a08378d08..d21fcd388 100644
--- a/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
@@ -242,14 +242,15 @@
#define QM_FENCE2_OFFSET (mmPDMA0_QM_CP_FENCE2_RDATA_0 - mmPDMA0_QM_BASE)
#define QM_SEI_STATUS_OFFSET (mmPDMA0_QM_SEI_STATUS - mmPDMA0_QM_BASE)
-#define QM_CQ_PTR_LO_4_OFFSET (mmPDMA0_QM_CQ_PTR_LO_4 - mmPDMA0_QM_BASE)
-#define QM_CQ_PTR_HI_4_OFFSET (mmPDMA0_QM_CQ_PTR_HI_4 - mmPDMA0_QM_BASE)
-#define QM_CQ_TSIZE_4_OFFSET (mmPDMA0_QM_CQ_TSIZE_4 - mmPDMA0_QM_BASE)
+#define QM_CQ_TSIZE_STS_4_OFFSET (mmPDMA0_QM_CQ_TSIZE_STS_4 - mmPDMA0_QM_BASE)
+#define QM_CQ_PTR_LO_STS_4_OFFSET (mmPDMA0_QM_CQ_PTR_LO_STS_4 - mmPDMA0_QM_BASE)
+#define QM_CQ_PTR_HI_STS_4_OFFSET (mmPDMA0_QM_CQ_PTR_HI_STS_4 - mmPDMA0_QM_BASE)
-#define QM_ARC_CQ_PTR_LO_OFFSET (mmPDMA0_QM_ARC_CQ_PTR_LO - mmPDMA0_QM_BASE)
-#define QM_ARC_CQ_PTR_HI_OFFSET (mmPDMA0_QM_ARC_CQ_PTR_HI - mmPDMA0_QM_BASE)
-#define QM_ARC_CQ_TSIZE_OFFSET (mmPDMA0_QM_ARC_CQ_TSIZE - mmPDMA0_QM_BASE)
+#define QM_ARC_CQ_TSIZE_STS_OFFSET (mmPDMA0_QM_ARC_CQ_TSIZE_STS - mmPDMA0_QM_BASE)
+#define QM_ARC_CQ_PTR_LO_STS_OFFSET (mmPDMA0_QM_ARC_CQ_PTR_LO_STS - mmPDMA0_QM_BASE)
+#define QM_ARC_CQ_PTR_HI_STS_OFFSET (mmPDMA0_QM_ARC_CQ_PTR_HI_STS - mmPDMA0_QM_BASE)
+#define QM_CP_STS_4_OFFSET (mmPDMA0_QM_CP_STS_4 - mmPDMA0_QM_BASE)
#define QM_CP_CURRENT_INST_LO_4_OFFSET (mmPDMA0_QM_CP_CURRENT_INST_LO_4 - mmPDMA0_QM_BASE)
#define QM_CP_CURRENT_INST_HI_4_OFFSET (mmPDMA0_QM_CP_CURRENT_INST_HI_4 - mmPDMA0_QM_BASE)
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
index 1a4c4ed9d..682c53245 100644
--- a/drivers/accel/ivpu/Kconfig
+++ b/drivers/accel/ivpu/Kconfig
@@ -1,16 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_ACCEL_IVPU
- tristate "Intel VPU for Meteor Lake and newer"
+ tristate "Intel NPU (Neural Processing Unit)"
depends on DRM_ACCEL
depends on X86_64 && !UML
depends on PCI && PCI_MSI
select FW_LOADER
- select SHMEM
+ select DRM_GEM_SHMEM_HELPER
select GENERIC_ALLOCATOR
help
- Choose this option if you have a system that has an 14th generation Intel CPU
- or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated
- inference accelerator for Computer Vision and Deep Learning applications.
+ Choose this option if you have a system with an 14th generation
+ Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU)
+ is a CPU-integrated inference accelerator for Computer Vision
+ and Deep Learning applications.
If "M" is selected, the module will be called intel_vpu.
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index ea453b985..7cb962e21 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -14,6 +14,7 @@
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
+#include "ivpu_hw.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
@@ -101,7 +102,7 @@ static int reset_pending_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = seq_to_ivpu(s);
- seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
+ seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_pending));
return 0;
}
@@ -115,6 +116,33 @@ static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"reset_pending", reset_pending_show, 0},
};
+static ssize_t
+dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 dvfs_mode;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, size, 0, &dvfs_mode);
+ if (ret < 0)
+ return ret;
+
+ fw->dvfs_mode = dvfs_mode;
+
+ ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static const struct file_operations dvfs_mode_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = dvfs_mode_fops_write,
+};
+
static int fw_log_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
@@ -152,6 +180,33 @@ static const struct file_operations fw_log_fops = {
};
static ssize_t
+fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
+ size_t size, loff_t *pos)
+{
+ struct ivpu_device *vdev = file->private_data;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(user_buf, size, &enable);
+ if (ret < 0)
+ return ret;
+
+ ivpu_hw_profiling_freq_drive(vdev, enable);
+
+ ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static const struct file_operations fw_profiling_freq_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = fw_profiling_freq_fops_write,
+};
+
+static ssize_t
fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *pos)
{
@@ -251,11 +306,18 @@ static ssize_t
ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
+ int ret;
if (!size)
return -EINVAL;
- ivpu_pm_schedule_recovery(vdev);
+ ret = ivpu_rpm_get(vdev);
+ if (ret)
+ return ret;
+
+ ivpu_pm_trigger_recovery(vdev, "debugfs");
+ flush_work(&vdev->pm->recovery_work);
+ ivpu_rpm_put(vdev);
return size;
}
@@ -280,6 +342,9 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("force_recovery", 0200, debugfs_root, vdev,
&ivpu_force_recovery_fops);
+ debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev,
+ &dvfs_mode_fops);
+
debugfs_create_file("fw_log", 0644, debugfs_root, vdev,
&fw_log_fops);
debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev,
@@ -291,4 +356,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
&ivpu_reset_engine_fops);
+
+ if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX)
+ debugfs_create_file("fw_profiling_freq_drive", 0200,
+ debugfs_root, vdev, &fw_profiling_freq_fops);
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index c856c417a..b35c7aedc 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -6,6 +6,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_accel.h>
#include <drm/drm_file.h>
@@ -17,6 +18,7 @@
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@@ -31,8 +33,6 @@
__stringify(DRM_IVPU_DRIVER_MINOR) "."
#endif
-static const struct drm_driver driver;
-
static struct lock_class_key submitted_jobs_xa_lock_class_key;
int ivpu_dbg_mask;
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
int ivpu_test_mode;
module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
-MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw");
+MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
@@ -67,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
return file_priv;
}
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
+static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
{
- struct ivpu_file_priv *file_priv;
-
- xa_lock_irq(&vdev->context_xa);
- file_priv = xa_load(&vdev->context_xa, id);
- /* file_priv may still be in context_xa during file_priv_release() */
- if (file_priv && !kref_get_unless_zero(&file_priv->ref))
- file_priv = NULL;
- xa_unlock_irq(&vdev->context_xa);
-
- if (file_priv)
- ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
- file_priv->ctx.id, kref_read(&file_priv->ref));
-
- return file_priv;
+ mutex_lock(&file_priv->lock);
+ if (file_priv->bound) {
+ ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
+
+ ivpu_cmdq_release_all_locked(file_priv);
+ ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+ ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
+ ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+ file_priv->bound = false;
+ drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
+ }
+ mutex_unlock(&file_priv->lock);
}
static void file_priv_release(struct kref *ref)
@@ -90,13 +88,15 @@ static void file_priv_release(struct kref *ref)
struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
struct ivpu_device *vdev = file_priv->vdev;
- ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
+ ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
+ file_priv->ctx.id, (bool)file_priv->bound);
+
+ pm_runtime_get_sync(vdev->drm.dev);
+ mutex_lock(&vdev->context_list_lock);
+ file_priv_unbind(vdev, file_priv);
+ mutex_unlock(&vdev->context_list_lock);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
- ivpu_cmdq_release_all(file_priv);
- ivpu_bo_remove_all_bos_from_context(&file_priv->ctx);
- ivpu_jsm_context_release(vdev, file_priv->ctx.id);
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
- drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
}
@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
return 0;
}
-static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
-{
- int ret;
-
- ret = ivpu_rpm_get_if_active(vdev);
- if (ret < 0)
- return ret;
-
- *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
-
- if (ret)
- ivpu_rpm_put(vdev);
-
- return 0;
-}
-
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- ret = ivpu_get_core_clock_rate(vdev, &args->value);
+ args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -178,9 +162,6 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user.start;
break;
- case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- args->value = file_priv->priority;
- break;
case DRM_IVPU_PARAM_CONTEXT_ID:
args->value = file_priv->ctx.id;
break;
@@ -220,17 +201,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_param *args = data;
int ret = 0;
switch (args->param) {
- case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
- file_priv->priority = args->value;
- else
- ret = -EINVAL;
- break;
default:
ret = -EINVAL;
}
@@ -243,50 +217,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_file_priv *file_priv;
u32 ctx_id;
- void *old;
- int ret;
+ int idx, ret;
- ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
- return ret;
- }
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv) {
ret = -ENOMEM;
- goto err_xa_erase;
+ goto err_dev_exit;
}
file_priv->vdev = vdev;
- file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
+ file_priv->bound = true;
kref_init(&file_priv->ref);
mutex_init(&file_priv->lock);
+ mutex_lock(&vdev->context_list_lock);
+
+ ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
+ vdev->context_xa_limit, GFP_KERNEL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
+ goto err_unlock;
+ }
+
ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
if (ret)
- goto err_mutex_destroy;
+ goto err_xa_erase;
- old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
- if (xa_is_err(old)) {
- ret = xa_err(old);
- ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
- goto err_ctx_fini;
- }
+ mutex_unlock(&vdev->context_list_lock);
+ drm_dev_exit(idx);
+
+ file->driver_priv = file_priv;
ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
ctx_id, current->comm, task_pid_nr(current));
- file->driver_priv = file_priv;
return 0;
-err_ctx_fini:
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
-err_mutex_destroy:
- mutex_destroy(&file_priv->lock);
- kfree(file_priv);
err_xa_erase:
xa_erase_irq(&vdev->context_xa, ctx_id);
+err_unlock:
+ mutex_unlock(&vdev->context_list_lock);
+ mutex_destroy(&file_priv->lock);
+ kfree(file_priv);
+err_dev_exit:
+ drm_dev_exit(idx);
return ret;
}
@@ -317,16 +294,14 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
unsigned long timeout;
int ret;
- if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST)
+ if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
return 0;
- ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
+ ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
while (1) {
- ret = ivpu_ipc_irq_handler(vdev);
- if (ret)
- break;
+ ivpu_ipc_irq_handler(vdev, NULL);
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
break;
@@ -344,8 +319,6 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
if (!ret)
ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
- else
- ivpu_hw_diagnose_failure(vdev);
return ret;
}
@@ -362,7 +335,7 @@ int ivpu_boot(struct ivpu_device *vdev)
int ret;
/* Update boot params located at first 4KB of FW memory */
- ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr);
+ ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
ret = ivpu_hw_boot_fw(vdev);
if (ret) {
@@ -373,6 +346,9 @@ int ivpu_boot(struct ivpu_device *vdev)
ret = ivpu_wait_for_ready(vdev);
if (ret) {
ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_mmu_evtq_dump(vdev);
+ ivpu_fw_log_dump(vdev);
return ret;
}
@@ -414,7 +390,9 @@ static const struct drm_driver driver = {
.open = ivpu_open,
.postclose = ivpu_postclose,
- .gem_prime_import = ivpu_gem_prime_import,
+
+ .gem_create_object = ivpu_gem_create_object,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
@@ -427,6 +405,13 @@ static const struct drm_driver driver = {
.minor = DRM_IVPU_DRIVER_MINOR,
};
+static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
+{
+ struct ivpu_device *vdev = arg;
+
+ return ivpu_ipc_irq_thread_handler(vdev);
+}
+
static int ivpu_irq_init(struct ivpu_device *vdev)
{
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
@@ -440,8 +425,8 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
vdev->irq = pci_irq_vector(pdev, 0);
- ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
- IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
+ ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
@@ -529,9 +514,18 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
atomic64_set(&vdev->unique_id_counter, 0);
- xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
+ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
+ INIT_LIST_HEAD(&vdev->bo_list);
+
+ ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
+ if (ret)
+ goto err_xa_destroy;
+
+ ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
+ if (ret)
+ goto err_xa_destroy;
ret = ivpu_pci_init(vdev);
if (ret)
@@ -549,7 +543,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up(vdev);
if (ret)
- goto err_xa_destroy;
+ goto err_power_down;
ret = ivpu_mmu_global_context_init(vdev);
if (ret)
@@ -573,20 +567,15 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
ivpu_pm_init(vdev);
- ret = ivpu_job_done_thread_init(vdev);
- if (ret)
- goto err_ipc_fini;
-
ret = ivpu_boot(vdev);
if (ret)
- goto err_job_done_thread_fini;
+ goto err_ipc_fini;
+ ivpu_job_done_consumer_init(vdev);
ivpu_pm_enable(vdev);
return 0;
-err_job_done_thread_fini:
- ivpu_job_done_thread_fini(vdev);
err_ipc_fini:
ivpu_ipc_fini(vdev);
err_fw_fini:
@@ -605,14 +594,30 @@ err_xa_destroy:
return ret;
}
+static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
+{
+ struct ivpu_file_priv *file_priv;
+ unsigned long ctx_id;
+
+ mutex_lock(&vdev->context_list_lock);
+
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv)
+ file_priv_unbind(vdev, file_priv);
+
+ mutex_unlock(&vdev->context_list_lock);
+}
+
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
- ivpu_job_done_thread_fini(vdev);
+
+ ivpu_jobs_abort_all(vdev);
+ ivpu_job_done_consumer_fini(vdev);
ivpu_pm_cancel_recovery(vdev);
+ ivpu_bo_unbind_all_user_contexts(vdev);
ivpu_ipc_fini(vdev);
ivpu_fw_fini(vdev);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 417ddeca8..069ace4ad 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -17,9 +17,10 @@
#include <uapi/drm/ivpu_accel.h>
#include "ivpu_mmu_context.h"
+#include "ivpu_ipc.h"
#define DRIVER_NAME "intel_vpu"
-#define DRIVER_DESC "Driver for Intel Versatile Processing Unit (VPU)"
+#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
@@ -55,6 +56,7 @@
#define IVPU_DBG_JSM BIT(10)
#define IVPU_DBG_KREF BIT(11)
#define IVPU_DBG_RPM BIT(12)
+#define IVPU_DBG_MMU_MAP BIT(13)
#define ivpu_err(vdev, fmt, ...) \
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
@@ -88,6 +90,7 @@ struct ivpu_wa_table {
bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
+ bool disable_d0i3_msg;
};
struct ivpu_hw_info;
@@ -112,11 +115,15 @@ struct ivpu_device {
struct ivpu_mmu_context gctx;
struct ivpu_mmu_context rctx;
+ struct mutex context_list_lock; /* Protects user context addition/removal */
struct xarray context_xa;
struct xa_limit context_xa_limit;
+ struct mutex bo_list_lock; /* Protects bo_list */
+ struct list_head bo_list;
+
struct xarray submitted_jobs_xa;
- struct task_struct *job_done_thread;
+ struct ivpu_ipc_consumer job_done_consumer;
atomic64_t unique_id_counter;
@@ -126,6 +133,7 @@ struct ivpu_device {
int tdr;
int reschedule_suspend;
int autosuspend;
+ int d0i3_entry_msg;
} timeout;
};
@@ -139,8 +147,8 @@ struct ivpu_file_priv {
struct mutex lock; /* Protects cmdq */
struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
struct ivpu_mmu_context ctx;
- u32 priority;
bool has_mmu_faults;
+ bool bound;
};
extern int ivpu_dbg_mask;
@@ -148,13 +156,14 @@ extern u8 ivpu_pll_min_ratio;
extern u8 ivpu_pll_max_ratio;
extern bool ivpu_disable_mmu_cont_pages;
-#define IVPU_TEST_MODE_DISABLED 0
-#define IVPU_TEST_MODE_FW_TEST 1
-#define IVPU_TEST_MODE_NULL_HW 2
+#define IVPU_TEST_MODE_FW_TEST BIT(0)
+#define IVPU_TEST_MODE_NULL_HW BIT(1)
+#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2)
+#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4)
+#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 691da521d..5fa8bd460 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -33,12 +33,17 @@
#define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
-#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \
+/* Check if FW API is compatible with the driver */
+#define IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, name, min_major) \
ivpu_fw_check_api(vdev, fw_hdr, #name, \
VPU_##name##_API_VER_INDEX, \
VPU_##name##_API_VER_MAJOR, \
VPU_##name##_API_VER_MINOR, min_major)
+/* Check if API version is lower that the given version */
+#define IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, name, major, minor) \
+ ivpu_fw_check_api_ver_lt(vdev, fw_hdr, #name, VPU_##name##_API_VER_INDEX, major, minor)
+
static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
@@ -105,6 +110,19 @@ ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw
return 0;
}
+static bool
+ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
+ const char *str, int index, u16 major, u16 minor)
+{
+ u16 fw_major = (u16)(fw_hdr->api_version[index] >> 16);
+ u16 fw_minor = (u16)(fw_hdr->api_version[index]);
+
+ if (fw_major < major || (fw_major == major && fw_minor < minor))
+ return true;
+
+ return false;
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@@ -164,9 +182,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
(const char *)fw_hdr + VPU_FW_HEADER_SIZE);
- if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
+ if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
- if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3))
+ if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3))
return -EINVAL;
fw->runtime_addr = runtime_addr;
@@ -182,6 +200,8 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
fw->trace_hw_component_mask = -1;
+ fw->dvfs_mode = 0;
+
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
@@ -195,6 +215,23 @@ static void ivpu_fw_release(struct ivpu_device *vdev)
release_firmware(vdev->fw->file);
}
+/* Initialize workarounds that depend on FW version */
+static void
+ivpu_fw_init_wa(struct ivpu_device *vdev)
+{
+ const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
+
+ if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
+ (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
+ vdev->wa.disable_d0i3_msg = true;
+
+ /* Force enable the feature for testing purposes */
+ if (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_ENABLE)
+ vdev->wa.disable_d0i3_msg = false;
+
+ IVPU_PRINT_WA(disable_d0i3_msg);
+}
+
static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@@ -248,7 +285,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (fw->shave_nn_size) {
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
- fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
+ fw->shave_nn_size, DRM_IVPU_BO_WC);
if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
ret = -ENOMEM;
@@ -297,6 +334,8 @@ int ivpu_fw_init(struct ivpu_device *vdev)
if (ret)
goto err_fw_release;
+ ivpu_fw_init_wa(vdev);
+
ret = ivpu_fw_mem_init(vdev);
if (ret)
goto err_fw_release;
@@ -422,14 +461,31 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->punit_telemetry_sram_size);
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
boot_params->vpu_telemetry_enable);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n",
+ boot_params->dvfs_mode);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_delayed_entry = %d\n",
+ boot_params->d0i3_delayed_entry);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
+ boot_params->d0i3_residency_time_us);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
+ boot_params->d0i3_entry_vpu_ts);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
{
struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
- /* In case of warm boot we only have to reset the entrypoint addr */
+ /* In case of warm boot only update variable params */
if (!ivpu_fw_is_cold_boot(vdev)) {
+ boot_params->d0i3_residency_time_us =
+ ktime_us_delta(ktime_get_boottime(), vdev->hw->d0i3_entry_host_ts);
+ boot_params->d0i3_entry_vpu_ts = vdev->hw->d0i3_entry_vpu_ts;
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n",
+ boot_params->d0i3_residency_time_us);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n",
+ boot_params->d0i3_entry_vpu_ts);
+
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
wmb(); /* Flush WC buffers after writing save_restore_ret_address */
@@ -443,6 +499,13 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
/*
+ * This param is a debug firmware feature. It switches default clock
+ * to higher resolution one for fine-grained and more accurate firmware
+ * task profiling.
+ */
+ boot_params->perf_clk_frequency = ivpu_hw_profiling_freq_get(vdev);
+
+ /*
* Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR
*/
@@ -493,6 +556,11 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
+ boot_params->dvfs_mode = vdev->fw->dvfs_mode;
+ if (!IVPU_WA(disable_d0i3_msg))
+ boot_params->d0i3_delayed_entry = 1;
+ boot_params->d0i3_residency_time_us = 0;
+ boot_params->d0i3_entry_vpu_ts = 0;
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 10ae2847f..66b60fa16 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -27,6 +27,7 @@ struct ivpu_fw_info {
u32 trace_level;
u32 trace_destination_mask;
u64 trace_hw_component_mask;
+ u32 dvfs_mode;
};
int ivpu_fw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index c91852f2e..e9ddbe9f5 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -20,215 +20,15 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
-MODULE_IMPORT_NS(DMA_BUF);
-
static const struct drm_gem_object_funcs ivpu_gem_funcs;
-static struct lock_class_key prime_bo_lock_class_key;
-
-static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
-{
- /* Pages are managed by the underlying dma-buf */
- return 0;
-}
-
-static void prime_free_pages_locked(struct ivpu_bo *bo)
-{
- /* Pages are managed by the underlying dma-buf */
-}
-
-static int prime_map_pages_locked(struct ivpu_bo *bo)
-{
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- struct sg_table *sgt;
-
- sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
- return PTR_ERR(sgt);
- }
-
- bo->sgt = sgt;
- return 0;
-}
-
-static void prime_unmap_pages_locked(struct ivpu_bo *bo)
-{
- dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
- bo->sgt = NULL;
-}
-
-static const struct ivpu_bo_ops prime_ops = {
- .type = IVPU_BO_TYPE_PRIME,
- .name = "prime",
- .alloc_pages = prime_alloc_pages_locked,
- .free_pages = prime_free_pages_locked,
- .map_pages = prime_map_pages_locked,
- .unmap_pages = prime_unmap_pages_locked,
-};
-
-static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
-{
- int npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
- struct page **pages;
-
- pages = drm_gem_get_pages(&bo->base);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- if (bo->flags & DRM_IVPU_BO_WC)
- set_pages_array_wc(pages, npages);
- else if (bo->flags & DRM_IVPU_BO_UNCACHED)
- set_pages_array_uc(pages, npages);
-
- bo->pages = pages;
- return 0;
-}
-
-static void shmem_free_pages_locked(struct ivpu_bo *bo)
-{
- if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
- set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
-
- drm_gem_put_pages(&bo->base, bo->pages, true, false);
- bo->pages = NULL;
-}
-
-static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
-{
- int npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- struct sg_table *sgt;
- int ret;
-
- sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
- if (IS_ERR(sgt)) {
- ivpu_err(vdev, "Failed to allocate sgtable\n");
- return PTR_ERR(sgt);
- }
-
- ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
- if (ret) {
- ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
- goto err_free_sgt;
- }
-
- bo->sgt = sgt;
- return 0;
-
-err_free_sgt:
- kfree(sgt);
- return ret;
-}
-
-static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
-{
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
-
- dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
- sg_free_table(bo->sgt);
- kfree(bo->sgt);
- bo->sgt = NULL;
-}
-
-static const struct ivpu_bo_ops shmem_ops = {
- .type = IVPU_BO_TYPE_SHMEM,
- .name = "shmem",
- .alloc_pages = shmem_alloc_pages_locked,
- .free_pages = shmem_free_pages_locked,
- .map_pages = ivpu_bo_map_pages_locked,
- .unmap_pages = ivpu_bo_unmap_pages_locked,
-};
-
-static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
-{
- unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
- struct page **pages;
- int ret;
-
- pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- for (i = 0; i < npages; i++) {
- pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (!pages[i]) {
- ret = -ENOMEM;
- goto err_free_pages;
- }
- cond_resched();
- }
-
- bo->pages = pages;
- return 0;
-
-err_free_pages:
- while (i--)
- put_page(pages[i]);
- kvfree(pages);
- return ret;
-}
-
-static void internal_free_pages_locked(struct ivpu_bo *bo)
+static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
- unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT;
-
- if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
- set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
-
- for (i = 0; i < npages; i++)
- put_page(bo->pages[i]);
-
- kvfree(bo->pages);
- bo->pages = NULL;
-}
-
-static const struct ivpu_bo_ops internal_ops = {
- .type = IVPU_BO_TYPE_INTERNAL,
- .name = "internal",
- .alloc_pages = internal_alloc_pages_locked,
- .free_pages = internal_free_pages_locked,
- .map_pages = ivpu_bo_map_pages_locked,
- .unmap_pages = ivpu_bo_unmap_pages_locked,
-};
-
-static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
-{
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- int ret;
-
- lockdep_assert_held(&bo->lock);
- drm_WARN_ON(&vdev->drm, bo->sgt);
-
- ret = bo->ops->alloc_pages(bo);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
- return ret;
- }
-
- ret = bo->ops->map_pages(bo);
- if (ret) {
- ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
- goto err_free_pages;
- }
- return ret;
-
-err_free_pages:
- bo->ops->free_pages(bo);
- return ret;
-}
-
-static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
-{
- mutex_lock(&bo->lock);
-
- WARN_ON(!bo->sgt);
- bo->ops->unmap_pages(bo);
- WARN_ON(bo->sgt);
- bo->ops->free_pages(bo);
- WARN_ON(bo->pages);
-
- mutex_unlock(&bo->lock);
+ ivpu_dbg(vdev, BO,
+ "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
+ action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
+ (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
+ (bool)bo->base.base.import_attach);
}
/*
@@ -245,21 +45,19 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
mutex_lock(&bo->lock);
- if (!bo->vpu_addr) {
- ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n",
- bo->ctx->id, bo->handle);
- ret = -EINVAL;
- goto unlock;
- }
+ ivpu_dbg_bo(vdev, bo, "pin");
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
+
+ if (!bo->mmu_mapped) {
+ struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
- if (!bo->sgt) {
- ret = ivpu_bo_alloc_and_map_pages_locked(bo);
- if (ret)
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
goto unlock;
- }
+ }
- if (!bo->mmu_mapped) {
- ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
+ ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
ivpu_bo_is_snooped(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
@@ -279,250 +77,187 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
const struct ivpu_addr_range *range)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- int ret;
+ int idx, ret;
- if (!range) {
- if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
- range = &vdev->hw->ranges.shave;
- else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
- range = &vdev->hw->ranges.dma;
- else
- range = &vdev->hw->ranges.user;
- }
+ if (!drm_dev_enter(&vdev->drm, &idx))
+ return -ENODEV;
+
+ mutex_lock(&bo->lock);
- mutex_lock(&ctx->lock);
- ret = ivpu_mmu_context_insert_node_locked(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
+ ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
bo->vpu_addr = bo->mm_node.start;
- list_add_tail(&bo->ctx_node, &ctx->bo_list);
+ } else {
+ ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
}
- mutex_unlock(&ctx->lock);
+
+ ivpu_dbg_bo(vdev, bo, "alloc");
+
+ mutex_unlock(&bo->lock);
+
+ drm_dev_exit(idx);
return ret;
}
-static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
+static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- struct ivpu_mmu_context *ctx = bo->ctx;
-
- ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
- ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
- mutex_lock(&bo->lock);
+ lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
if (bo->mmu_mapped) {
- drm_WARN_ON(&vdev->drm, !bo->sgt);
- ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
+ drm_WARN_ON(&vdev->drm, !bo->vpu_addr);
+ drm_WARN_ON(&vdev->drm, !bo->base.sgt);
+ ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->base.sgt);
bo->mmu_mapped = false;
}
- mutex_lock(&ctx->lock);
- list_del(&bo->ctx_node);
- bo->vpu_addr = 0;
- bo->ctx = NULL;
- ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
- mutex_unlock(&ctx->lock);
+ if (bo->ctx) {
+ ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node);
+ bo->ctx = NULL;
+ }
- mutex_unlock(&bo->lock);
+ if (bo->base.base.import_attach)
+ return;
+
+ dma_resv_lock(bo->base.base.resv, NULL);
+ if (bo->base.sgt) {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ bo->base.sgt = NULL;
+ }
+ dma_resv_unlock(bo->base.base.resv);
}
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx)
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
- struct ivpu_bo *bo, *tmp;
+ struct ivpu_bo *bo;
+
+ if (drm_WARN_ON(&vdev->drm, !ctx))
+ return;
- list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
- ivpu_bo_free_vpu_addr(bo);
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
+ mutex_lock(&bo->lock);
+ if (bo->ctx == ctx) {
+ ivpu_dbg_bo(vdev, bo, "unbind");
+ ivpu_bo_unbind_locked(bo);
+ }
+ mutex_unlock(&bo->lock);
+ }
+ mutex_unlock(&vdev->bo_list_lock);
}
-static struct ivpu_bo *
-ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context,
- u64 size, u32 flags, const struct ivpu_bo_ops *ops,
- const struct ivpu_addr_range *range, u64 user_ptr)
+struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size)
{
struct ivpu_bo *bo;
- int ret = 0;
- if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
+ if (size == 0 || !PAGE_ALIGNED(size))
return ERR_PTR(-EINVAL);
- switch (flags & DRM_IVPU_BO_CACHE_MASK) {
- case DRM_IVPU_BO_CACHED:
- case DRM_IVPU_BO_UNCACHED:
- case DRM_IVPU_BO_WC:
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
- mutex_init(&bo->lock);
- bo->base.funcs = &ivpu_gem_funcs;
- bo->flags = flags;
- bo->ops = ops;
- bo->user_ptr = user_ptr;
-
- if (ops->type == IVPU_BO_TYPE_SHMEM)
- ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
- else
- drm_gem_private_object_init(&vdev->drm, &bo->base, size);
+ bo->base.base.funcs = &ivpu_gem_funcs;
+ bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
- if (ret) {
- ivpu_err(vdev, "Failed to initialize drm object\n");
- goto err_free;
- }
-
- if (flags & DRM_IVPU_BO_MAPPABLE) {
- ret = drm_gem_create_mmap_offset(&bo->base);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate mmap offset\n");
- goto err_release;
- }
- }
-
- if (mmu_context) {
- ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
- if (ret) {
- ivpu_err(vdev, "Failed to add BO to context: %d\n", ret);
- goto err_release;
- }
- }
-
- return bo;
+ INIT_LIST_HEAD(&bo->bo_list_node);
+ mutex_init(&bo->lock);
-err_release:
- drm_gem_object_release(&bo->base);
-err_free:
- kfree(bo);
- return ERR_PTR(ret);
+ return &bo->base.base;
}
-static void ivpu_bo_free(struct drm_gem_object *obj)
+static struct ivpu_bo *
+ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
{
- struct ivpu_bo *bo = to_ivpu_bo(obj);
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
-
- if (bo->ctx)
- ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
- bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
- else
- ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n",
- (bool)bo->sgt, bo->mmu_mapped);
-
- drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
-
- vunmap(bo->kvaddr);
+ struct drm_gem_shmem_object *shmem;
+ struct ivpu_bo *bo;
- if (bo->ctx)
- ivpu_bo_free_vpu_addr(bo);
+ switch (flags & DRM_IVPU_BO_CACHE_MASK) {
+ case DRM_IVPU_BO_CACHED:
+ case DRM_IVPU_BO_WC:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
- if (bo->sgt)
- ivpu_bo_unmap_and_free_pages(bo);
+ shmem = drm_gem_shmem_create(&vdev->drm, size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
- if (bo->base.import_attach)
- drm_prime_gem_destroy(&bo->base, bo->sgt);
+ bo = to_ivpu_bo(&shmem->base);
+ bo->base.map_wc = flags & DRM_IVPU_BO_WC;
+ bo->flags = flags;
- drm_gem_object_release(&bo->base);
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
- mutex_destroy(&bo->lock);
- kfree(bo);
+ return bo;
}
-static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_bo *bo = to_ivpu_bo(obj);
- struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
-
- ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
- bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo), bo->ops->name);
+ struct ivpu_addr_range *range;
- if (obj->import_attach) {
- /* Drop the reference drm_gem_mmap_obj() acquired.*/
- drm_gem_object_put(obj);
- vma->vm_private_data = NULL;
- return dma_buf_mmap(obj->dma_buf, vma, 0);
+ if (bo->ctx) {
+ ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
+ file_priv->ctx.id, bo->ctx->id);
+ return -EALREADY;
}
- vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
- vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
-
- return 0;
-}
-
-static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
-{
- struct ivpu_bo *bo = to_ivpu_bo(obj);
- loff_t npages = obj->size >> PAGE_SHIFT;
- int ret = 0;
-
- mutex_lock(&bo->lock);
-
- if (!bo->sgt)
- ret = ivpu_bo_alloc_and_map_pages_locked(bo);
-
- mutex_unlock(&bo->lock);
-
- if (ret)
- return ERR_PTR(ret);
+ if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
+ range = &vdev->hw->ranges.shave;
+ else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
+ range = &vdev->hw->ranges.dma;
+ else
+ range = &vdev->hw->ranges.user;
- return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
+ return ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, range);
}
-static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
+static void ivpu_bo_free(struct drm_gem_object *obj)
{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
+ struct ivpu_device *vdev = to_ivpu_device(obj->dev);
struct ivpu_bo *bo = to_ivpu_bo(obj);
- loff_t npages = obj->size >> PAGE_SHIFT;
- pgoff_t page_offset;
- struct page *page;
- vm_fault_t ret;
- int err;
- mutex_lock(&bo->lock);
+ ivpu_dbg_bo(vdev, bo, "free");
- if (!bo->sgt) {
- err = ivpu_bo_alloc_and_map_pages_locked(bo);
- if (err) {
- ret = vmf_error(err);
- goto unlock;
- }
- }
+ mutex_lock(&vdev->bo_list_lock);
+ list_del(&bo->bo_list_node);
+ mutex_unlock(&vdev->bo_list_lock);
- /* We don't use vmf->pgoff since that has the fake offset */
- page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- if (page_offset >= npages) {
- ret = VM_FAULT_SIGBUS;
- } else {
- page = bo->pages[page_offset];
- ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
- }
+ drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
-unlock:
- mutex_unlock(&bo->lock);
+ ivpu_bo_unbind_locked(bo);
+ mutex_destroy(&bo->lock);
- return ret;
+ drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
+ drm_gem_shmem_free(&bo->base);
}
-static const struct vm_operations_struct ivpu_vm_ops = {
- .fault = ivpu_vm_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_bo_free,
- .mmap = ivpu_bo_mmap,
- .vm_ops = &ivpu_vm_ops,
- .get_sg_table = ivpu_bo_get_sg_table,
+ .open = ivpu_bo_open,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
};
-int
-ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -537,23 +272,18 @@ ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (size == 0)
return -EINVAL;
- bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
+ bo = ivpu_bo_create(vdev, size, args->flags);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
- ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
- if (!ret) {
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+ if (!ret)
args->vpu_addr = bo->vpu_addr;
- args->handle = bo->handle;
- }
-
- drm_gem_object_put(&bo->base);
- ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
- file_priv->ctx.id, bo->vpu_addr, ivpu_bo_size(bo), bo->flags);
+ drm_gem_object_put(&bo->base.base);
return ret;
}
@@ -563,8 +293,8 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
{
const struct ivpu_addr_range *range;
struct ivpu_addr_range fixed_range;
+ struct iosys_map map;
struct ivpu_bo *bo;
- pgprot_t prot;
int ret;
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
@@ -578,81 +308,47 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
range = &vdev->hw->ranges.global;
}
- bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
+ bo = ivpu_bo_create(vdev, size, flags);
if (IS_ERR(bo)) {
ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
bo, vpu_addr, size, flags);
return NULL;
}
- ret = ivpu_bo_pin(bo);
+ ret = ivpu_bo_alloc_vpu_addr(bo, &vdev->gctx, range);
if (ret)
goto err_put;
- if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
- drm_clflush_pages(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
-
- if (bo->flags & DRM_IVPU_BO_WC)
- set_pages_array_wc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
- else if (bo->flags & DRM_IVPU_BO_UNCACHED)
- set_pages_array_uc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT);
-
- prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
- bo->kvaddr = vmap(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT, VM_MAP, prot);
- if (!bo->kvaddr) {
- ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
+ ret = ivpu_bo_pin(bo);
+ if (ret)
goto err_put;
- }
- ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
- bo->vpu_addr, ivpu_bo_size(bo), flags);
+ dma_resv_lock(bo->base.base.resv, NULL);
+ ret = drm_gem_shmem_vmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
+ if (ret)
+ goto err_put;
return bo;
err_put:
- drm_gem_object_put(&bo->base);
+ drm_gem_object_put(&bo->base.base);
return NULL;
}
void ivpu_bo_free_internal(struct ivpu_bo *bo)
{
- drm_gem_object_put(&bo->base);
-}
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
-struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
-{
- struct ivpu_device *vdev = to_ivpu_device(dev);
- struct dma_buf_attachment *attach;
- struct ivpu_bo *bo;
+ dma_resv_lock(bo->base.base.resv, NULL);
+ drm_gem_shmem_vunmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
- attach = dma_buf_attach(buf, dev->dev);
- if (IS_ERR(attach))
- return ERR_CAST(attach);
-
- get_dma_buf(buf);
-
- bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
- if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
- goto err_detach;
- }
-
- lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
-
- bo->base.import_attach = attach;
-
- return &bo->base;
-
-err_detach:
- dma_buf_detach(buf, attach);
- dma_buf_put(buf);
- return ERR_CAST(bo);
+ drm_gem_object_put(&bo->base.base);
}
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
- struct ivpu_device *vdev = to_ivpu_device(dev);
struct drm_ivpu_bo_info *args = data;
struct drm_gem_object *obj;
struct ivpu_bo *bo;
@@ -665,21 +361,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
bo = to_ivpu_bo(obj);
mutex_lock(&bo->lock);
-
- if (!bo->ctx) {
- ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret);
- goto unlock;
- }
- }
-
args->flags = bo->flags;
args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
args->vpu_addr = bo->vpu_addr;
args->size = obj->size;
-unlock:
mutex_unlock(&bo->lock);
+
drm_gem_object_put(obj);
return ret;
}
@@ -712,43 +399,38 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{
- unsigned long dma_refcount = 0;
+ mutex_lock(&bo->lock);
+
+ drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+ bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+ bo->flags, kref_read(&bo->base.base.refcount));
+
+ if (bo->base.pages)
+ drm_printf(p, " has_pages");
- if (bo->base.dma_buf && bo->base.dma_buf->file)
- dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
+ if (bo->mmu_mapped)
+ drm_printf(p, " mmu_mapped");
- drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
- bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo),
- kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
+ if (bo->base.base.import_attach)
+ drm_printf(p, " imported");
+
+ drm_printf(p, "\n");
+
+ mutex_unlock(&bo->lock);
}
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
{
struct ivpu_device *vdev = to_ivpu_device(dev);
- struct ivpu_file_priv *file_priv;
- unsigned long ctx_id;
struct ivpu_bo *bo;
- drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
- "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
+ drm_printf(p, "%-9s %-3s %-14s %-10s %-10s %-4s %s\n",
+ "bo", "ctx", "vpu_addr", "size", "flags", "refs", "attribs");
- mutex_lock(&vdev->gctx.lock);
- list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
+ mutex_lock(&vdev->bo_list_lock);
+ list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
ivpu_bo_print_info(bo, p);
- mutex_unlock(&vdev->gctx.lock);
-
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
- if (!file_priv)
- continue;
-
- mutex_lock(&file_priv->ctx.lock);
- list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
- ivpu_bo_print_info(bo, p);
- mutex_unlock(&file_priv->ctx.lock);
-
- ivpu_file_priv_put(&file_priv);
- }
+ mutex_unlock(&vdev->bo_list_lock);
}
void ivpu_bo_list_print(struct drm_device *dev)
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index a0b4d4a32..a8559211c 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -6,84 +6,51 @@
#define __IVPU_GEM_H__
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
-struct dma_buf;
-struct ivpu_bo_ops;
struct ivpu_file_priv;
struct ivpu_bo {
- struct drm_gem_object base;
- const struct ivpu_bo_ops *ops;
-
+ struct drm_gem_shmem_object base;
struct ivpu_mmu_context *ctx;
- struct list_head ctx_node;
+ struct list_head bo_list_node;
struct drm_mm_node mm_node;
- struct mutex lock; /* Protects: pages, sgt, mmu_mapped */
- struct sg_table *sgt;
- struct page **pages;
- bool mmu_mapped;
-
- void *kvaddr;
+ struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr;
- u32 handle;
u32 flags;
- uintptr_t user_ptr;
- u32 job_status;
-};
-
-enum ivpu_bo_type {
- IVPU_BO_TYPE_SHMEM = 1,
- IVPU_BO_TYPE_INTERNAL,
- IVPU_BO_TYPE_PRIME,
-};
-
-struct ivpu_bo_ops {
- enum ivpu_bo_type type;
- const char *name;
- int (*alloc_pages)(struct ivpu_bo *bo);
- void (*free_pages)(struct ivpu_bo *bo);
- int (*map_pages)(struct ivpu_bo *bo);
- void (*unmap_pages)(struct ivpu_bo *bo);
+ u32 job_status; /* Valid only for command buffer */
+ bool mmu_mapped;
};
int ivpu_bo_pin(struct ivpu_bo *bo);
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx);
-void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
-void ivpu_bo_list_print(struct drm_device *dev);
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
-struct ivpu_bo *
-ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
+struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
+struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
void ivpu_bo_free_internal(struct ivpu_bo *bo);
-struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
-void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
+void ivpu_bo_list_print(struct drm_device *dev);
+
static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj)
{
- return container_of(obj, struct ivpu_bo, base);
+ return container_of(obj, struct ivpu_bo, base.base);
}
static inline void *ivpu_bo_vaddr(struct ivpu_bo *bo)
{
- return bo->kvaddr;
+ return bo->base.vaddr;
}
static inline size_t ivpu_bo_size(struct ivpu_bo *bo)
{
- return bo->base.size;
-}
-
-static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset)
-{
- if (offset > ivpu_bo_size(bo) || !bo->pages)
- return NULL;
-
- return bo->pages[offset / PAGE_SIZE];
+ return bo->base.base.size;
}
static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
@@ -96,20 +63,9 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
-static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot)
-{
- if (bo->flags & DRM_IVPU_BO_WC)
- return pgprot_writecombine(prot);
-
- if (bo->flags & DRM_IVPU_BO_UNCACHED)
- return pgprot_noncached(prot);
-
- return prot;
-}
-
static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
{
- return to_ivpu_device(bo->base.dev);
+ return to_ivpu_device(bo->base.base.dev);
}
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 1079e0625..094c659d2 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -15,9 +15,13 @@ struct ivpu_hw_ops {
int (*power_down)(struct ivpu_device *vdev);
int (*reset)(struct ivpu_device *vdev);
bool (*is_idle)(struct ivpu_device *vdev);
+ int (*wait_for_idle)(struct ivpu_device *vdev);
void (*wdt_disable)(struct ivpu_device *vdev);
void (*diagnose_failure)(struct ivpu_device *vdev);
+ u32 (*profiling_freq_get)(struct ivpu_device *vdev);
+ void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
+ u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
@@ -58,6 +62,8 @@ struct ivpu_hw_info {
u32 sku;
u16 config;
int dma_bits;
+ ktime_t d0i3_entry_host_ts;
+ u64 d0i3_entry_vpu_ts;
};
extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
@@ -85,6 +91,11 @@ static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
return vdev->hw->ops->is_idle(vdev);
};
+static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->wait_for_idle(vdev);
+};
+
static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
{
ivpu_dbg(vdev, PM, "HW power down\n");
@@ -104,12 +115,27 @@ static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
vdev->hw->ops->wdt_disable(vdev);
};
+static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->profiling_freq_get(vdev);
+};
+
+static inline void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
+{
+ return vdev->hw->ops->profiling_freq_drive(vdev, enable);
+};
+
/* Register indirect accesses */
static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_pll_freq_get(vdev);
};
+static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return vdev->hw->ops->ratio_to_freq(vdev, ratio);
+}
+
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return vdev->hw->ops->reg_telemetry_offset_get(vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index e658fcf84..32bb772e0 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -29,6 +29,7 @@
#define PLL_REF_CLK_FREQ (50 * 1000000)
#define PLL_SIMULATION_FREQ (10 * 1000000)
+#define PLL_PROF_CLK_FREQ (38400 * 1000)
#define PLL_DEFAULT_EPP_VALUE 0x80
#define TIM_SAFE_ENABLE 0xf1d0dead
@@ -37,7 +38,7 @@
#define TIMEOUT_US (150 * USEC_PER_MSEC)
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
-#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
@@ -96,6 +97,7 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
vdev->timeout.tdr = 2000;
vdev->timeout.reschedule_suspend = 10;
vdev->timeout.autosuspend = 10;
+ vdev->timeout.d0i3_entry_msg = 5;
}
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
@@ -508,16 +510,6 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
-{
- ivpu_boot_dpu_active_drive(vdev, false);
- ivpu_boot_pwr_island_isolation_drive(vdev, true);
- ivpu_boot_pwr_island_trickle_drive(vdev, false);
- ivpu_boot_pwr_island_drive(vdev, false);
-
- return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
-}
-
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
@@ -614,12 +606,37 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
return 0;
}
+static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret = 0;
- if (ivpu_boot_pwr_domain_disable(vdev)) {
- ivpu_err(vdev, "Failed to disable power domain\n");
+ if (ivpu_hw_37xx_ip_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset NPU\n");
ret = -EIO;
}
@@ -659,6 +676,11 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
+ /* PLL requests may fail when powering down, so issue WP 0 here */
+ ret = ivpu_pll_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
+
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@ -722,10 +744,23 @@ static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
+static int ivpu_hw_37xx_wait_for_idle(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
+}
+
+static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev)
+{
+ vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
+ vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
+}
+
static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
+ ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev);
+
if (!ivpu_hw_37xx_is_idle(vdev))
ivpu_warn(vdev, "VPU not idle during power down\n");
@@ -760,12 +795,22 @@ static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
}
-static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
+static u32 ivpu_hw_37xx_profiling_freq_get(struct ivpu_device *vdev)
+{
+ return PLL_PROF_CLK_FREQ;
+}
+
+static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
+{
+ /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
+}
+
+static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
- if ((config & 0xff) == PLL_RATIO_4_3)
+ if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
cpu_clock = pll_clock * 2 / 4;
else
cpu_clock = pll_clock * 2 / 5;
@@ -784,7 +829,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
- return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+ return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
}
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
@@ -850,38 +895,35 @@ static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
-
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
}
static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
-
ivpu_hw_wdt_disable(vdev);
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
}
static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
-
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
}
/* Handler for IRQs from VPU core (irqV) */
-static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
+static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
{
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ if (!status)
+ return false;
+
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
- ivpu_ipc_irq_handler(vdev);
+ ivpu_ipc_irq_handler(vdev, wake_thread);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
@@ -898,17 +940,17 @@ static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
- return status;
+ return true;
}
/* Handler for IRQs from Buttress core (irqB) */
-static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
+static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
bool schedule_recovery = false;
- if (status == 0)
- return 0;
+ if (!status)
+ return false;
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
@@ -942,25 +984,29 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
- return status;
+ return true;
}
static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
- u32 ret_irqv, ret_irqb;
+ bool irqv_handled, irqb_handled, wake_thread = false;
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
- ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
+ irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, &wake_thread);
+ irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq);
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
- return IRQ_RETVAL(ret_irqb | ret_irqv);
+ if (wake_thread)
+ return IRQ_WAKE_THREAD;
+ if (irqv_handled || irqb_handled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
}
static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
@@ -997,12 +1043,16 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.info_init = ivpu_hw_37xx_info_init,
.power_up = ivpu_hw_37xx_power_up,
.is_idle = ivpu_hw_37xx_is_idle,
+ .wait_for_idle = ivpu_hw_37xx_wait_for_idle,
.power_down = ivpu_hw_37xx_power_down,
.reset = ivpu_hw_37xx_reset,
.boot_fw = ivpu_hw_37xx_boot_fw,
.wdt_disable = ivpu_hw_37xx_wdt_disable,
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
+ .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
+ .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
index 4083beb5e..f6fec1919 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h
@@ -240,6 +240,8 @@
#define VPU_37XX_CPU_SS_TIM_GEN_CONFIG 0x06021008u
#define VPU_37XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
+#define VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT 0x06029000u
+
#define VPU_37XX_CPU_SS_DOORBELL_0 0x06300000u
#define VPU_37XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index 40f9ee99e..0be353ad8 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -39,6 +39,7 @@
#define TIMEOUT_US (150 * USEC_PER_MSEC)
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
+#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define WEIGHTS_DEFAULT 0xf711f711u
#define WEIGHTS_ATS_DEFAULT 0x0000f711u
@@ -139,18 +140,21 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
vdev->timeout.tdr = 2000000;
vdev->timeout.reschedule_suspend = 1000;
vdev->timeout.autosuspend = -1;
+ vdev->timeout.d0i3_entry_msg = 500;
} else if (ivpu_is_simics(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
vdev->timeout.reschedule_suspend = 10;
vdev->timeout.autosuspend = -1;
+ vdev->timeout.d0i3_entry_msg = 100;
} else {
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
vdev->timeout.reschedule_suspend = 10;
vdev->timeout.autosuspend = 10;
+ vdev->timeout.d0i3_entry_msg = 5;
}
}
@@ -737,7 +741,7 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
return 0;
}
-static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+static int ivpu_hw_40xx_ip_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
@@ -759,6 +763,23 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
return ret;
}
+static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ if (ivpu_hw_40xx_ip_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset VPU IP\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_pll_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable PLL\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
@@ -819,12 +840,6 @@ static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev)
{
int ret;
- ret = ivpu_hw_40xx_reset(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to reset HW: %d\n", ret);
- return ret;
- }
-
ret = ivpu_hw_40xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@ -893,11 +908,24 @@ static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev)
REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
+static int ivpu_hw_40xx_wait_for_idle(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
+}
+
+static void ivpu_hw_40xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev)
+{
+ vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
+ vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
+}
+
static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
- if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
+ ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
+
+ if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
ivpu_warn(vdev, "Failed to reset the VPU\n");
if (ivpu_pll_disable(vdev)) {
@@ -928,6 +956,19 @@ static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
}
+static u32 ivpu_hw_40xx_profiling_freq_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->pll.profiling_freq;
+}
+
+static void ivpu_hw_40xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
+{
+ if (enable)
+ vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
+ else
+ vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+}
+
/* Register indirect accesses */
static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
{
@@ -939,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
}
+static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio);
+}
+
static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
@@ -1003,28 +1049,27 @@ static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
/* TODO: For LNN hang consider engine reset instead of full recovery */
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
}
static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_hw_wdt_disable(vdev);
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
}
static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
}
/* Handler for IRQs from VPU core (irqV) */
-static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
+static bool ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
{
u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
- irqreturn_t ret = IRQ_NONE;
if (!status)
- return IRQ_NONE;
+ return false;
REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
@@ -1032,7 +1077,7 @@ static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
- ret |= ivpu_ipc_irq_handler(vdev);
+ ivpu_ipc_irq_handler(vdev, wake_thread);
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
@@ -1049,17 +1094,17 @@ static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
- return ret;
+ return true;
}
/* Handler for IRQs from Buttress core (irqB) */
-static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
+static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
bool schedule_recovery = false;
u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
- if (status == 0)
- return IRQ_NONE;
+ if (!status)
+ return false;
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
@@ -1109,28 +1154,29 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
- return IRQ_HANDLED;
+ return true;
}
static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
{
+ bool irqv_handled, irqb_handled, wake_thread = false;
struct ivpu_device *vdev = ptr;
- irqreturn_t ret = IRQ_NONE;
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
- ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
- ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
+ irqv_handled = ivpu_hw_40xx_irqv_handler(vdev, irq, &wake_thread);
+ irqb_handled = ivpu_hw_40xx_irqb_handler(vdev, irq);
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
- if (ret & IRQ_WAKE_THREAD)
+ if (wake_thread)
return IRQ_WAKE_THREAD;
-
- return ret;
+ if (irqv_handled || irqb_handled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
}
static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
@@ -1180,12 +1226,16 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.info_init = ivpu_hw_40xx_info_init,
.power_up = ivpu_hw_40xx_power_up,
.is_idle = ivpu_hw_40xx_is_idle,
+ .wait_for_idle = ivpu_hw_40xx_wait_for_idle,
.power_down = ivpu_hw_40xx_power_down,
.reset = ivpu_hw_40xx_reset,
.boot_fw = ivpu_hw_40xx_boot_fw,
.wdt_disable = ivpu_hw_40xx_wdt_disable,
.diagnose_failure = ivpu_hw_40xx_diagnose_failure,
+ .profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
+ .profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
.reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+ .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
.reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index a4ca40b18..f4c539248 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/genalloc.h>
#include <linux/highmem.h>
-#include <linux/kthread.h>
+#include <linux/pm_runtime.h>
#include <linux/wait.h>
#include "ivpu_drv.h"
@@ -17,19 +17,12 @@
#include "ivpu_pm.h"
#define IPC_MAX_RX_MSG 128
-#define IS_KTHREAD() (get_current()->flags & PF_KTHREAD)
struct ivpu_ipc_tx_buf {
struct ivpu_ipc_hdr ipc;
struct vpu_jsm_msg jsm;
};
-struct ivpu_ipc_rx_msg {
- struct list_head link;
- struct ivpu_ipc_hdr *ipc_hdr;
- struct vpu_jsm_msg *jsm_msg;
-};
-
static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
{
@@ -139,8 +132,49 @@ static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
}
-void
-ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel)
+static void
+ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg;
+
+ lockdep_assert_held(&ipc->cons_lock);
+ lockdep_assert_irqs_disabled();
+
+ rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
+ if (!rx_msg) {
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
+ return;
+ }
+
+ atomic_inc(&ipc->rx_msg_count);
+
+ rx_msg->ipc_hdr = ipc_hdr;
+ rx_msg->jsm_msg = jsm_msg;
+ rx_msg->callback = cons->rx_callback;
+
+ if (rx_msg->callback) {
+ list_add_tail(&rx_msg->link, &ipc->cb_msg_list);
+ } else {
+ spin_lock(&cons->rx_lock);
+ list_add_tail(&rx_msg->link, &cons->rx_msg_list);
+ spin_unlock(&cons->rx_lock);
+ wake_up(&cons->rx_msg_wq);
+ }
+}
+
+static void
+ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg)
+{
+ list_del(&rx_msg->link);
+ ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
+ atomic_dec(&vdev->ipc->rx_msg_count);
+ kfree(rx_msg);
+}
+
+void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ u32 channel, ivpu_ipc_rx_callback_t rx_callback)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
@@ -148,13 +182,15 @@ ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
cons->channel = channel;
cons->tx_vpu_addr = 0;
cons->request_id = 0;
- spin_lock_init(&cons->rx_msg_lock);
+ cons->aborted = false;
+ cons->rx_callback = rx_callback;
+ spin_lock_init(&cons->rx_lock);
INIT_LIST_HEAD(&cons->rx_msg_list);
init_waitqueue_head(&cons->rx_msg_wq);
- spin_lock_irq(&ipc->cons_list_lock);
+ spin_lock_irq(&ipc->cons_lock);
list_add_tail(&cons->link, &ipc->cons_list);
- spin_unlock_irq(&ipc->cons_list_lock);
+ spin_unlock_irq(&ipc->cons_lock);
}
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
@@ -162,18 +198,14 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
- spin_lock_irq(&ipc->cons_list_lock);
+ spin_lock_irq(&ipc->cons_lock);
list_del(&cons->link);
- spin_unlock_irq(&ipc->cons_list_lock);
-
- spin_lock_irq(&cons->rx_msg_lock);
- list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) {
- list_del(&rx_msg->link);
- ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
- atomic_dec(&ipc->rx_msg_count);
- kfree(rx_msg);
- }
- spin_unlock_irq(&cons->rx_msg_lock);
+ spin_unlock_irq(&ipc->cons_lock);
+
+ spin_lock_irq(&cons->rx_lock);
+ list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
+ ivpu_ipc_rx_msg_del(vdev, rx_msg);
+ spin_unlock_irq(&cons->rx_lock);
ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
}
@@ -202,52 +234,61 @@ unlock:
return ret;
}
+static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
+{
+ bool ret;
+
+ spin_lock_irq(&cons->rx_lock);
+ ret = !list_empty(&cons->rx_msg_list) || cons->aborted;
+ spin_unlock_irq(&cons->rx_lock);
+
+ return ret;
+}
+
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf,
- struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
+ struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms)
{
- struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg;
int wait_ret, ret = 0;
+ if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n"))
+ return -EINVAL;
+
wait_ret = wait_event_timeout(cons->rx_msg_wq,
- (IS_KTHREAD() && kthread_should_stop()) ||
- !list_empty(&cons->rx_msg_list),
+ ivpu_ipc_rx_need_wakeup(cons),
msecs_to_jiffies(timeout_ms));
- if (IS_KTHREAD() && kthread_should_stop())
- return -EINTR;
-
if (wait_ret == 0)
return -ETIMEDOUT;
- spin_lock_irq(&cons->rx_msg_lock);
+ spin_lock_irq(&cons->rx_lock);
+ if (cons->aborted) {
+ spin_unlock_irq(&cons->rx_lock);
+ return -ECANCELED;
+ }
rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
if (!rx_msg) {
- spin_unlock_irq(&cons->rx_msg_lock);
+ spin_unlock_irq(&cons->rx_lock);
return -EAGAIN;
}
- list_del(&rx_msg->link);
- spin_unlock_irq(&cons->rx_msg_lock);
if (ipc_buf)
memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
if (rx_msg->jsm_msg) {
- u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload));
+ u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
ret = -EBADMSG;
}
- if (ipc_payload)
- memcpy(ipc_payload, rx_msg->jsm_msg, size);
+ if (jsm_msg)
+ memcpy(jsm_msg, rx_msg->jsm_msg, size);
}
- ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
- atomic_dec(&ipc->rx_msg_count);
- kfree(rx_msg);
-
+ ivpu_ipc_rx_msg_del(vdev, rx_msg);
+ spin_unlock_irq(&cons->rx_lock);
return ret;
}
@@ -260,7 +301,7 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
struct ivpu_ipc_consumer cons;
int ret;
- ivpu_ipc_consumer_add(vdev, &cons, channel);
+ ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
ret = ivpu_ipc_send(vdev, &cons, req);
if (ret) {
@@ -285,33 +326,41 @@ consumer_del:
return ret;
}
-int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp_type,
- struct vpu_jsm_msg *resp, u32 channel,
- unsigned long timeout_ms)
+int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms)
{
struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
struct vpu_jsm_msg hb_resp;
int ret, hb_ret;
- ret = ivpu_rpm_get(vdev);
- if (ret < 0)
- return ret;
+ drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
- ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp_type, resp,
- channel, timeout_ms);
+ ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
if (ret != -ETIMEDOUT)
- goto rpm_put;
+ return ret;
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
vdev->timeout.jsm);
- if (hb_ret == -ETIMEDOUT) {
- ivpu_hw_diagnose_failure(vdev);
- ivpu_pm_schedule_recovery(vdev);
- }
+ if (hb_ret == -ETIMEDOUT)
+ ivpu_pm_trigger_recovery(vdev, "IPC timeout");
+
+ return ret;
+}
+
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms)
+{
+ int ret;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
-rpm_put:
ivpu_rpm_put(vdev);
return ret;
}
@@ -329,35 +378,7 @@ ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons
return false;
}
-static void
-ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
- struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
-{
- struct ivpu_ipc_info *ipc = vdev->ipc;
- struct ivpu_ipc_rx_msg *rx_msg;
- unsigned long flags;
-
- lockdep_assert_held(&ipc->cons_list_lock);
-
- rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
- if (!rx_msg) {
- ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
- return;
- }
-
- atomic_inc(&ipc->rx_msg_count);
-
- rx_msg->ipc_hdr = ipc_hdr;
- rx_msg->jsm_msg = jsm_msg;
-
- spin_lock_irqsave(&cons->rx_msg_lock, flags);
- list_add_tail(&rx_msg->link, &cons->rx_msg_list);
- spin_unlock_irqrestore(&cons->rx_msg_lock, flags);
-
- wake_up(&cons->rx_msg_wq);
-}
-
-int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
+void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_consumer *cons;
@@ -375,7 +396,7 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
if (vpu_addr == REG_IO_ERROR) {
ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
- return -EIO;
+ return;
}
ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
@@ -405,15 +426,15 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
dispatched = false;
- spin_lock_irqsave(&ipc->cons_list_lock, flags);
+ spin_lock_irqsave(&ipc->cons_lock, flags);
list_for_each_entry(cons, &ipc->cons_list, link) {
if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
- ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg);
+ ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg);
dispatched = true;
break;
}
}
- spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
+ spin_unlock_irqrestore(&ipc->cons_lock, flags);
if (!dispatched) {
ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
@@ -421,7 +442,28 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
}
- return 0;
+ if (wake_thread)
+ *wake_thread = !list_empty(&ipc->cb_msg_list);
+}
+
+irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg, *r;
+ struct list_head cb_msg_list;
+
+ INIT_LIST_HEAD(&cb_msg_list);
+
+ spin_lock_irq(&ipc->cons_lock);
+ list_splice_tail_init(&ipc->cb_msg_list, &cb_msg_list);
+ spin_unlock_irq(&ipc->cons_lock);
+
+ list_for_each_entry_safe(rx_msg, r, &cb_msg_list, link) {
+ rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
+ ivpu_ipc_rx_msg_del(vdev, rx_msg);
+ }
+
+ return IRQ_HANDLED;
}
int ivpu_ipc_init(struct ivpu_device *vdev)
@@ -456,10 +498,14 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
goto err_free_rx;
}
+ spin_lock_init(&ipc->cons_lock);
INIT_LIST_HEAD(&ipc->cons_list);
- spin_lock_init(&ipc->cons_list_lock);
- drmm_mutex_init(&vdev->drm, &ipc->lock);
-
+ INIT_LIST_HEAD(&ipc->cb_msg_list);
+ ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
+ goto err_free_rx;
+ }
ivpu_ipc_reset(vdev);
return 0;
@@ -472,6 +518,13 @@ err_free_tx:
void ivpu_ipc_fini(struct ivpu_device *vdev)
{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ drm_WARN_ON(&vdev->drm, ipc->on);
+ drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
+ drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
+ drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
+
ivpu_ipc_mem_fini(vdev);
}
@@ -488,16 +541,27 @@ void ivpu_ipc_disable(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_consumer *cons, *c;
- unsigned long flags;
+ struct ivpu_ipc_rx_msg *rx_msg, *r;
+
+ drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
mutex_lock(&ipc->lock);
ipc->on = false;
mutex_unlock(&ipc->lock);
- spin_lock_irqsave(&ipc->cons_list_lock, flags);
- list_for_each_entry_safe(cons, c, &ipc->cons_list, link)
+ spin_lock_irq(&ipc->cons_lock);
+ list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
+ spin_lock(&cons->rx_lock);
+ if (!cons->rx_callback)
+ cons->aborted = true;
+ list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
+ ivpu_ipc_rx_msg_del(vdev, rx_msg);
+ spin_unlock(&cons->rx_lock);
wake_up(&cons->rx_msg_wq);
- spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
+ }
+ spin_unlock_irq(&ipc->cons_lock);
+
+ drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
}
void ivpu_ipc_reset(struct ivpu_device *vdev)
@@ -505,6 +569,7 @@ void ivpu_ipc_reset(struct ivpu_device *vdev)
struct ivpu_ipc_info *ipc = vdev->ipc;
mutex_lock(&ipc->lock);
+ drm_WARN_ON(&vdev->drm, ipc->on);
memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
index 68f5b6668..40ca3cc4e 100644
--- a/drivers/accel/ivpu/ivpu_ipc.h
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -42,13 +42,26 @@ struct ivpu_ipc_hdr {
u8 status;
} __packed __aligned(IVPU_IPC_ALIGNMENT);
+typedef void (*ivpu_ipc_rx_callback_t)(struct ivpu_device *vdev,
+ struct ivpu_ipc_hdr *ipc_hdr,
+ struct vpu_jsm_msg *jsm_msg);
+
+struct ivpu_ipc_rx_msg {
+ struct list_head link;
+ struct ivpu_ipc_hdr *ipc_hdr;
+ struct vpu_jsm_msg *jsm_msg;
+ ivpu_ipc_rx_callback_t callback;
+};
+
struct ivpu_ipc_consumer {
struct list_head link;
u32 channel;
u32 tx_vpu_addr;
u32 request_id;
+ bool aborted;
+ ivpu_ipc_rx_callback_t rx_callback;
- spinlock_t rx_msg_lock; /* Protects rx_msg_list */
+ spinlock_t rx_lock; /* Protects rx_msg_list and aborted */
struct list_head rx_msg_list;
wait_queue_head_t rx_msg_wq;
};
@@ -60,8 +73,9 @@ struct ivpu_ipc_info {
atomic_t rx_msg_count;
- spinlock_t cons_list_lock; /* Protects cons_list */
+ spinlock_t cons_lock; /* Protects cons_list and cb_msg_list */
struct list_head cons_list;
+ struct list_head cb_msg_list;
atomic_t request_id;
struct mutex lock; /* Lock on status */
@@ -75,19 +89,22 @@ void ivpu_ipc_enable(struct ivpu_device *vdev);
void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);
-int ivpu_ipc_irq_handler(struct ivpu_device *vdev);
+void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread);
+irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
- u32 channel);
+ u32 channel, ivpu_ipc_rx_callback_t callback);
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
- struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload,
+ struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
unsigned long timeout_ms);
+int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms);
int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp_type,
- struct vpu_jsm_msg *resp, u32 channel,
- unsigned long timeout_ms);
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms);
#endif /* __IVPU_IPC_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 8983e3a4f..e70cfb859 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -7,7 +7,6 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
-#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <uapi/drm/ivpu_accel.h>
@@ -24,10 +23,6 @@
#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
#define JOB_MAX_BUFFER_COUNT 65535
-static unsigned int ivpu_tdr_timeout_ms;
-module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, uint, 0644);
-MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
-
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
{
ivpu_hw_reg_db_set(vdev, cmdq->db_id);
@@ -117,22 +112,20 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin
}
}
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
int i;
- mutex_lock(&file_priv->lock);
+ lockdep_assert_held(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_release_locked(file_priv, i);
-
- mutex_unlock(&file_priv->lock);
}
/*
* Mark the doorbell as unregistered and reset job queue pointers.
* This function needs to be called when the VPU hardware is restarted
- * and FW looses job queue state. The next time job queue is used it
+ * and FW loses job queue state. The next time job queue is used it
* will be registered again.
*/
static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
@@ -166,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
- if (!file_priv)
- continue;
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv)
ivpu_cmdq_reset_all(file_priv);
- ivpu_file_priv_put(&file_priv);
- }
+ mutex_unlock(&vdev->context_list_lock);
+
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -196,6 +187,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
entry->job_id = job->job_id;
entry->flags = 0;
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
+ entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
wmb(); /* Ensure that tail is updated after filling entry */
header->tail = next_entry;
wmb(); /* Flush WC buffer for jobq header */
@@ -246,60 +239,32 @@ static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
return &fence->base;
}
-static void job_get(struct ivpu_job *job, struct ivpu_job **link)
-{
- struct ivpu_device *vdev = job->vdev;
-
- kref_get(&job->ref);
- *link = job;
-
- ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
-}
-
-static void job_release(struct kref *ref)
+static void ivpu_job_destroy(struct ivpu_job *job)
{
- struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
struct ivpu_device *vdev = job->vdev;
u32 i;
+ ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
+ job->job_id, job->file_priv->ctx.id, job->engine_idx);
+
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
- drm_gem_object_put(&job->bos[i]->base);
+ drm_gem_object_put(&job->bos[i]->base.base);
dma_fence_put(job->done_fence);
ivpu_file_priv_put(&job->file_priv);
-
- ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
kfree(job);
-
- /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
- ivpu_rpm_put(vdev);
-}
-
-static void job_put(struct ivpu_job *job)
-{
- struct ivpu_device *vdev = job->vdev;
-
- ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
- kref_put(&job->ref, job_release);
}
static struct ivpu_job *
-ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
+ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
- int ret;
-
- ret = ivpu_rpm_get(vdev);
- if (ret < 0)
- return NULL;
job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
if (!job)
- goto err_rpm_put;
-
- kref_init(&job->ref);
+ return NULL;
job->vdev = vdev;
job->engine_idx = engine_idx;
@@ -313,17 +278,14 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->file_priv = ivpu_file_priv_get(file_priv);
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
-
return job;
err_free_job:
kfree(job);
-err_rpm_put:
- ivpu_rpm_put(vdev);
return NULL;
}
-static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
struct ivpu_job *job;
@@ -332,7 +294,7 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
return -ENOENT;
if (job->file_priv->has_mmu_faults)
- job_status = VPU_JSM_STATUS_ABORTED;
+ job_status = DRM_IVPU_JOB_STATUS_ABORTED;
job->bos[CMD_BUF_IDX]->job_status = job_status;
dma_fence_signal(job->done_fence);
@@ -340,21 +302,11 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
- job_put(job);
- return 0;
-}
-
-static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg)
-{
- struct vpu_ipc_msg_payload_job_done *payload;
- struct vpu_jsm_msg *job_ret_msg = msg;
- int ret;
-
- payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload;
+ ivpu_job_destroy(job);
+ ivpu_stop_job_timeout_detection(vdev);
- ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
- if (ret)
- ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret);
+ ivpu_rpm_put(vdev);
+ return 0;
}
void ivpu_jobs_abort_all(struct ivpu_device *vdev)
@@ -363,10 +315,10 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
unsigned long id;
xa_for_each(&vdev->submitted_jobs_xa, id, job)
- ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
}
-static int ivpu_direct_job_submission(struct ivpu_job *job)
+static int ivpu_job_submit(struct ivpu_job *job)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
@@ -374,51 +326,65 @@ static int ivpu_direct_job_submission(struct ivpu_job *job)
struct ivpu_cmdq *cmdq;
int ret;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
if (!cmdq) {
- ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
- file_priv->ctx.id, job->engine_idx);
+ ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
+ file_priv->ctx.id, job->engine_idx);
ret = -EINVAL;
- goto err_unlock;
+ goto err_unlock_file_priv;
}
job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
- job_get(job, &job);
- ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+ xa_lock(&vdev->submitted_jobs_xa);
+ ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
if (ret) {
- ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
- goto err_job_put;
+ ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
+ file_priv->ctx.id);
+ ret = -EBUSY;
+ goto err_unlock_submitted_jobs_xa;
}
ret = ivpu_cmdq_push_job(cmdq, job);
if (ret)
- goto err_xa_erase;
+ goto err_erase_xa;
- ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
- job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
- job->engine_idx, cmdq->jobq->header.tail);
+ ivpu_start_job_timeout_detection(vdev);
- if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) {
- ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
cmdq->jobq->header.head = cmdq->jobq->header.tail;
wmb(); /* Flush WC buffer for jobq header */
} else {
ivpu_cmdq_ring_db(vdev, cmdq);
}
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
+ job->job_id, file_priv->ctx.id, job->engine_idx,
+ job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
+
+ xa_unlock(&vdev->submitted_jobs_xa);
+
mutex_unlock(&file_priv->lock);
+
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+ ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+
return 0;
-err_xa_erase:
- xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_job_put:
- job_put(job);
-err_unlock:
+err_erase_xa:
+ __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock_submitted_jobs_xa:
+ xa_unlock(&vdev->submitted_jobs_xa);
+err_unlock_file_priv:
mutex_unlock(&file_priv->lock);
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -448,7 +414,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
}
bo = job->bos[CMD_BUF_IDX];
- if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) {
+ if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
ivpu_warn(vdev, "Buffer is already in use\n");
return -EBUSY;
}
@@ -468,7 +434,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
}
for (i = 0; i < buf_count; i++) {
- ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
+ ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
if (ret) {
ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
@@ -477,7 +443,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
for (i = 0; i < buf_count; i++) {
usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
- dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage);
+ dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage);
}
unlock_reservations:
@@ -500,6 +466,9 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (params->engine > DRM_IVPU_ENGINE_COPY)
return -EINVAL;
+ if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
return -EINVAL;
@@ -521,102 +490,82 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
params->buffer_count * sizeof(u32));
if (ret) {
ret = -EFAULT;
- goto free_handles;
+ goto err_free_handles;
}
if (!drm_dev_enter(&vdev->drm, &idx)) {
ret = -ENODEV;
- goto free_handles;
+ goto err_free_handles;
}
ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
file_priv->ctx.id, params->buffer_count);
- job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
+ job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
if (!job) {
ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
- goto dev_exit;
+ goto err_exit_dev;
}
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
params->commands_offset);
if (ret) {
- ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
- goto job_put;
+ ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
+ goto err_destroy_job;
}
- ret = ivpu_direct_job_submission(job);
- if (ret) {
- dma_fence_signal(job->done_fence);
- ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
- }
+ down_read(&vdev->pm->reset_lock);
+ ret = ivpu_job_submit(job);
+ up_read(&vdev->pm->reset_lock);
+ if (ret)
+ goto err_signal_fence;
-job_put:
- job_put(job);
-dev_exit:
drm_dev_exit(idx);
-free_handles:
kfree(buf_handles);
+ return ret;
+err_signal_fence:
+ dma_fence_signal(job->done_fence);
+err_destroy_job:
+ ivpu_job_destroy(job);
+err_exit_dev:
+ drm_dev_exit(idx);
+err_free_handles:
+ kfree(buf_handles);
return ret;
}
-static int ivpu_job_done_thread(void *arg)
+static void
+ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
+ struct vpu_jsm_msg *jsm_msg)
{
- struct ivpu_device *vdev = (struct ivpu_device *)arg;
- struct ivpu_ipc_consumer cons;
- struct vpu_jsm_msg jsm_msg;
- bool jobs_submitted;
- unsigned int timeout;
+ struct vpu_ipc_msg_payload_job_done *payload;
int ret;
- ivpu_dbg(vdev, JOB, "Started %s\n", __func__);
-
- ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET);
-
- while (!kthread_should_stop()) {
- timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
- jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa);
- ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout);
- if (!ret) {
- ivpu_job_done_message(vdev, &jsm_msg);
- } else if (ret == -ETIMEDOUT) {
- if (jobs_submitted && !xa_empty(&vdev->submitted_jobs_xa)) {
- ivpu_err(vdev, "TDR detected, timeout %d ms", timeout);
- ivpu_hw_diagnose_failure(vdev);
- ivpu_pm_schedule_recovery(vdev);
- }
- }
+ if (!jsm_msg) {
+ ivpu_err(vdev, "IPC message has no JSM payload\n");
+ return;
}
- ivpu_ipc_consumer_del(vdev, &cons);
-
- ivpu_jobs_abort_all(vdev);
+ if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
+ ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result);
+ return;
+ }
- ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__);
- return 0;
+ payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
+ ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
+ ivpu_start_job_timeout_detection(vdev);
}
-int ivpu_job_done_thread_init(struct ivpu_device *vdev)
+void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
{
- struct task_struct *thread;
-
- thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread");
- if (IS_ERR(thread)) {
- ivpu_err(vdev, "Failed to start job completion thread\n");
- return -EIO;
- }
-
- get_task_struct(thread);
- wake_up_process(thread);
-
- vdev->job_done_thread = thread;
-
- return 0;
+ ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer,
+ VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback);
}
-void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
+void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
{
- kthread_stop_put(vdev->job_done_thread);
+ ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 5514c2d8a..ca4984071 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -43,7 +43,6 @@ struct ivpu_cmdq {
will update the job status
*/
struct ivpu_job {
- struct kref ref;
struct ivpu_device *vdev;
struct ivpu_file_priv *file_priv;
struct dma_fence *done_fence;
@@ -56,11 +55,11 @@ struct ivpu_job {
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
-int ivpu_job_done_thread_init(struct ivpu_device *vdev);
-void ivpu_job_done_thread_fini(struct ivpu_device *vdev);
+void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
+void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 0c2fe7142..8cea0dd73 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -4,6 +4,7 @@
*/
#include "ivpu_drv.h"
+#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
@@ -36,6 +37,17 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
@@ -65,6 +77,12 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
}
#undef IVPU_CASE_TO_STR
@@ -243,3 +261,23 @@ int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
}
+
+int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (IVPU_WA(disable_d0i3_msg))
+ return 0;
+
+ req.payload.pwr_d0i3_enter.send_response = 1;
+
+ ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
+ &resp, VPU_IPC_CHAN_GEN_CMD,
+ vdev->timeout.d0i3_entry_msg);
+ if (ret)
+ return ret;
+
+ return ivpu_hw_wait_for_idle(vdev);
+}
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h
index 66979a948..ae75e5dbc 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.h
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.h
@@ -22,4 +22,5 @@ int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destinati
int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
u64 trace_hw_component_mask);
int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid);
+int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev);
#endif
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 989894617..91bd64065 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -7,6 +7,7 @@
#include <linux/highmem.h>
#include "ivpu_drv.h"
+#include "ivpu_hw.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
@@ -71,10 +72,10 @@
#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
-#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
-#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
-#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
+#define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
+#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
+#define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT)
#define IVPU_MMU_CMDQ_CMD_SIZE 16
#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
@@ -230,7 +231,12 @@
(REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
-static char *ivpu_mmu_event_to_str(u32 cmd)
+#define IVPU_MMU_CERROR_NONE 0x0
+#define IVPU_MMU_CERROR_ILL 0x1
+#define IVPU_MMU_CERROR_ABT 0x2
+#define IVPU_MMU_CERROR_ATC_INV_SYNC 0x3
+
+static const char *ivpu_mmu_event_to_str(u32 cmd)
{
switch (cmd) {
case IVPU_MMU_EVT_F_UUT:
@@ -276,6 +282,22 @@ static char *ivpu_mmu_event_to_str(u32 cmd)
}
}
+static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
+{
+ switch (err) {
+ case IVPU_MMU_CERROR_NONE:
+ return "No CMDQ Error";
+ case IVPU_MMU_CERROR_ILL:
+ return "Illegal command";
+ case IVPU_MMU_CERROR_ABT:
+ return "External abort on CMDQ read";
+ case IVPU_MMU_CERROR_ATC_INV_SYNC:
+ return "Sync failed to complete ATS invalidation";
+ default:
+ return "Unknown CMDQ Error";
+ }
+}
+
static void ivpu_mmu_config_check(struct ivpu_device *vdev)
{
u32 val_ref;
@@ -453,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
return 0;
}
+static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
+{
+ return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+ (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
+}
+
+static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
+{
+ return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+ (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
+}
+
static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
{
- struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
- u64 *queue_buffer = q->base;
- int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
+ struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
+ u64 *queue_buffer = cmdq->base;
+ int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
- if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
+ if (ivpu_mmu_queue_is_full(cmdq)) {
ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
return -EBUSY;
}
queue_buffer[idx] = data0;
queue_buffer[idx + 1] = data1;
- q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
+ cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
@@ -479,10 +513,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
u64 val;
int ret;
- val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC) |
- FIELD_PREP(IVPU_MMU_CMD_SYNC_0_CS, 0x2) |
- FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
- FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
+ val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC);
ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
if (ret)
@@ -492,8 +523,16 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
- if (ret)
- ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
+ if (ret) {
+ u32 err;
+
+ val = REGV_RD32(IVPU_MMU_REG_CMDQ_CONS);
+ err = REG_GET_FLD(IVPU_MMU_REG_CMDQ_CONS, ERR, val);
+
+ ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
+ ivpu_mmu_cmdq_err_to_str(err));
+ ivpu_hw_diagnose_failure(vdev);
+ }
return ret;
}
@@ -749,9 +788,12 @@ int ivpu_mmu_init(struct ivpu_device *vdev)
ivpu_dbg(vdev, MMU, "Init..\n");
- drmm_mutex_init(&vdev->drm, &mmu->lock);
ivpu_mmu_config_check(vdev);
+ ret = drmm_mutex_init(&vdev->drm, &mmu->lock);
+ if (ret)
+ return ret;
+
ret = ivpu_mmu_structs_alloc(vdev);
if (ret)
return ret;
@@ -843,18 +885,15 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
- if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+ if (ivpu_mmu_queue_is_empty(evtq))
return NULL;
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
-
return evt;
}
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
- bool schedule_recovery = false;
u32 *event;
u32 ssid;
@@ -864,14 +903,22 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
ivpu_mmu_dump_event(vdev, event);
ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
- schedule_recovery = true;
- else
- ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+ ivpu_pm_trigger_recovery(vdev, "MMU event");
+ return;
+ }
+
+ ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
}
+}
- if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
+{
+ u32 *event;
+
+ while ((event = ivpu_mmu_get_event(vdev)) != NULL)
+ ivpu_mmu_dump_event(vdev, event);
}
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index cb5511268..6fa35c240 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -46,5 +46,6 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index c1050a2df..fe6161299 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -5,6 +5,9 @@
#include <linux/bitfield.h>
#include <linux/highmem.h>
+#include <linux/set_memory.h>
+
+#include <drm/drm_cache.h>
#include "ivpu_drv.h"
#include "ivpu_hw.h"
@@ -39,12 +42,57 @@
#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
+static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma)
+{
+ dma_addr_t dma_addr;
+ struct page *page;
+ void *cpu;
+
+ page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ set_pages_array_wc(&page, 1);
+
+ dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(vdev->drm.dev, dma_addr))
+ goto err_free_page;
+
+ cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (!cpu)
+ goto err_dma_unmap_page;
+
+
+ *dma = dma_addr;
+ return cpu;
+
+err_dma_unmap_page:
+ dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+err_free_page:
+ put_page(page);
+ return NULL;
+}
+
+static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
+{
+ struct page *page;
+
+ if (cpu_addr) {
+ page = vmalloc_to_page(cpu_addr);
+ vunmap(cpu_addr);
+ dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ set_pages_array_wb(&page, 1);
+ put_page(page);
+ }
+}
+
static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
dma_addr_t pgd_dma;
- pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
- GFP_KERNEL);
+ pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
if (!pgtable->pgd_dma_ptr)
return -ENOMEM;
@@ -53,13 +101,6 @@ static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtab
return 0;
}
-static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
-{
- if (cpu_addr)
- dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
- dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK);
-}
-
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
int pgd_idx, pud_idx, pmd_idx;
@@ -84,19 +125,19 @@ static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgt
pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
- ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
+ ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma);
}
kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
- ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
+ ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
}
kfree(pgtable->pmd_ptrs[pgd_idx]);
kfree(pgtable->pte_ptrs[pgd_idx]);
- ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
+ ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
}
- ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
+ ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
}
static u64*
@@ -108,7 +149,7 @@ ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
if (pud_dma_ptr)
return pud_dma_ptr;
- pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
+ pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma);
if (!pud_dma_ptr)
return NULL;
@@ -131,7 +172,7 @@ err_free_pmd_ptrs:
kfree(pgtable->pmd_ptrs[pgd_idx]);
err_free_pud_dma_ptr:
- ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
+ ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma);
return NULL;
}
@@ -145,7 +186,7 @@ ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
if (pmd_dma_ptr)
return pmd_dma_ptr;
- pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
+ pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma);
if (!pmd_dma_ptr)
return NULL;
@@ -160,7 +201,7 @@ ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
return pmd_dma_ptr;
err_free_pmd_dma_ptr:
- ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
+ ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma);
return NULL;
}
@@ -174,7 +215,7 @@ ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
if (pte_dma_ptr)
return pte_dma_ptr;
- pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
+ pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma);
if (!pte_dma_ptr)
return NULL;
@@ -249,38 +290,6 @@ static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_ad
ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
}
-static void
-ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
-{
- struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
- u64 end_addr = vpu_addr + size;
-
- /* Align to PMD entry (2 MB) */
- vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
-
- while (vpu_addr < end_addr) {
- int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
- u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE;
-
- while (vpu_addr < end_addr && vpu_addr < pud_end) {
- int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr);
- u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
-
- while (vpu_addr < end_addr && vpu_addr < pmd_end) {
- int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
-
- clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
- IVPU_MMU_PGTABLE_SIZE);
- vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
- }
- clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
- IVPU_MMU_PGTABLE_SIZE);
- }
- clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
- }
- clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
-}
-
static int
ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot)
@@ -327,6 +336,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 prot;
u64 i;
+ if (drm_WARN_ON(&vdev->drm, !ctx))
+ return -EINVAL;
+
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
return -EINVAL;
@@ -343,16 +355,21 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
+ ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+ ctx->id, dma_addr, vpu_addr, size);
+
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
mutex_unlock(&ctx->lock);
return ret;
}
- ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
vpu_addr += size;
}
+ /* Ensure page table modifications are flushed from wc buffers to memory */
+ wmb();
+
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
@@ -369,19 +386,25 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
int ret;
u64 i;
- if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
- ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
+ if (drm_WARN_ON(&vdev->drm, !ctx))
+ return;
mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) {
+ dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
+ ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+ ctx->id, dma_addr, vpu_addr, size);
+
ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
- ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
vpu_addr += size;
}
+ /* Ensure page table modifications are flushed from wc buffers to memory */
+ wmb();
+
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
@@ -390,28 +413,34 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
}
int
-ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
- const struct ivpu_addr_range *range,
- u64 size, struct drm_mm_node *node)
+ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node)
{
- lockdep_assert_held(&ctx->lock);
+ int ret;
+
+ WARN_ON(!range);
+ mutex_lock(&ctx->lock);
if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) {
- if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
- range->start, range->end, DRM_MM_INSERT_BEST))
- return 0;
+ ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0,
+ range->start, range->end, DRM_MM_INSERT_BEST);
+ if (!ret)
+ goto unlock;
}
- return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
- range->start, range->end, DRM_MM_INSERT_BEST);
+ ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0,
+ range->start, range->end, DRM_MM_INSERT_BEST);
+unlock:
+ mutex_unlock(&ctx->lock);
+ return ret;
}
void
-ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
+ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
{
- lockdep_assert_held(&ctx->lock);
-
+ mutex_lock(&ctx->lock);
drm_mm_remove_node(node);
+ mutex_unlock(&ctx->lock);
}
static int
@@ -421,7 +450,6 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
int ret;
mutex_init(&ctx->lock);
- INIT_LIST_HEAD(&ctx->bo_list);
ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
if (ret) {
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index f15d8c630..535db3a1f 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -23,10 +23,9 @@ struct ivpu_mmu_pgtable {
};
struct ivpu_mmu_context {
- struct mutex lock; /* protects: mm, pgtable, bo_list */
+ struct mutex lock; /* Protects: mm, pgtable */
struct drm_mm mm;
struct ivpu_mmu_pgtable pgtable;
- struct list_head bo_list;
u32 id;
};
@@ -39,11 +38,9 @@ int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context
void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
-int ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
- const struct ivpu_addr_range *range,
- u64 size, struct drm_mm_node *node);
-void ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx,
- struct drm_mm_node *node);
+int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node);
+void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index e9b16cbc2..a15d30d09 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -13,8 +13,10 @@
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
+#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
#include "ivpu_pm.h"
@@ -22,6 +24,10 @@ static bool ivpu_disable_recovery;
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected");
+static unsigned long ivpu_tdr_timeout_ms;
+module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+
#define PM_RESCHEDULE_LIMIT 5
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
@@ -52,11 +58,14 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
+ /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+ pci_save_state(to_pci_dev(vdev->drm.dev));
+
ret = ivpu_shutdown(vdev);
- if (ret) {
+ if (ret)
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
- return ret;
- }
+
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
return ret;
}
@@ -66,30 +75,38 @@ static int ivpu_resume(struct ivpu_device *vdev)
int ret;
retry:
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+
ret = ivpu_hw_power_up(vdev);
if (ret) {
ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
- return ret;
+ goto err_power_down;
}
ret = ivpu_mmu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
- ivpu_hw_power_down(vdev);
- return ret;
+ goto err_power_down;
}
ret = ivpu_boot(vdev);
- if (ret) {
- ivpu_mmu_disable(vdev);
- ivpu_hw_power_down(vdev);
- if (!ivpu_fw_is_cold_boot(vdev)) {
- ivpu_warn(vdev, "Failed to resume the FW: %d. Retrying cold boot..\n", ret);
- ivpu_pm_prepare_cold_boot(vdev);
- goto retry;
- } else {
- ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
- }
+ if (ret)
+ goto err_mmu_disable;
+
+ return 0;
+
+err_mmu_disable:
+ ivpu_mmu_disable(vdev);
+err_power_down:
+ ivpu_hw_power_down(vdev);
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
+ if (!ivpu_fw_is_cold_boot(vdev)) {
+ ivpu_pm_prepare_cold_boot(vdev);
+ goto retry;
+ } else {
+ ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
}
return ret;
@@ -102,22 +119,37 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
int ret;
-retry:
- ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
- if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
- cond_resched();
- goto retry;
- }
+ ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+ ret = pm_runtime_resume_and_get(vdev->drm.dev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
+
+ ivpu_fw_log_dump(vdev);
+
+ atomic_inc(&vdev->pm->reset_counter);
+ atomic_set(&vdev->pm->reset_pending, 1);
+ down_write(&vdev->pm->reset_lock);
+
+ ivpu_suspend(vdev);
+ ivpu_pm_prepare_cold_boot(vdev);
+ ivpu_jobs_abort_all(vdev);
+
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
- if (ret && ret != -EAGAIN)
- ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
+ up_write(&vdev->pm->reset_lock);
+ atomic_set(&vdev->pm->reset_pending, 0);
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
}
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
{
- struct ivpu_pm_info *pm = vdev->pm;
+ ivpu_err(vdev, "Recovery triggered by %s\n", reason);
if (ivpu_disable_recovery) {
ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
@@ -129,13 +161,35 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
return;
}
- /* Schedule recovery if it's not in progress */
- if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
- ivpu_hw_irq_disable(vdev);
- queue_work(system_long_wq, &pm->recovery_work);
+ /* Trigger recovery if it's not in progress */
+ if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
+ queue_work(system_long_wq, &vdev->pm->recovery_work);
}
}
+static void ivpu_job_timeout_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
+ struct ivpu_device *vdev = pm->vdev;
+
+ ivpu_pm_trigger_recovery(vdev, "TDR");
+}
+
+void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
+{
+ unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+
+ /* No-op if already queued */
+ queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));
+}
+
+void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
+{
+ cancel_delayed_work_sync(&vdev->pm->job_timeout_work);
+}
+
int ivpu_pm_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
@@ -153,12 +207,11 @@ int ivpu_pm_suspend_cb(struct device *dev)
}
}
+ ivpu_jsm_pwr_d0i3_enter(vdev);
+
ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev);
- pci_save_state(to_pci_dev(dev));
- pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
-
ivpu_dbg(vdev, PM, "Suspend done.\n");
return 0;
@@ -172,9 +225,6 @@ int ivpu_pm_resume_cb(struct device *dev)
ivpu_dbg(vdev, PM, "Resume..\n");
- pci_set_power_state(to_pci_dev(dev), PCI_D0);
- pci_restore_state(to_pci_dev(dev));
-
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to resume: %d\n", ret);
@@ -188,8 +238,12 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct ivpu_device *vdev = to_ivpu_device(drm);
+ bool hw_is_idle = true;
int ret;
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+ drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
+
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
@@ -200,12 +254,18 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
return -EAGAIN;
}
+ if (!vdev->pm->suspend_reschedule_counter)
+ hw_is_idle = false;
+ else if (ivpu_jsm_pwr_d0i3_enter(vdev))
+ hw_is_idle = false;
+
ret = ivpu_suspend(vdev);
if (ret)
ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
- if (!vdev->pm->suspend_reschedule_counter) {
- ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
+ if (!hw_is_idle) {
+ ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_fw_log_dump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
@@ -266,11 +326,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- pm_runtime_get_sync(vdev->drm.dev);
-
ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->in_reset, 1);
+ atomic_set(&vdev->pm->reset_pending, 1);
+
+ pm_runtime_get_sync(vdev->drm.dev);
+ down_write(&vdev->pm->reset_lock);
ivpu_prepare_for_reset(vdev);
ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev);
@@ -287,9 +348,11 @@ void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- atomic_set(&vdev->pm->in_reset, 0);
+ up_write(&vdev->pm->reset_lock);
+ atomic_set(&vdev->pm->reset_pending, 0);
ivpu_dbg(vdev, PM, "Post-reset done.\n");
+ pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
@@ -302,8 +365,12 @@ void ivpu_pm_init(struct ivpu_device *vdev)
pm->vdev = vdev;
pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
- atomic_set(&pm->in_reset, 0);
+ init_rwsem(&pm->reset_lock);
+ atomic_set(&pm->reset_pending, 0);
+ atomic_set(&pm->reset_counter, 0);
+
INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
+ INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
if (ivpu_disable_recovery)
delay = -1;
@@ -318,6 +385,7 @@ void ivpu_pm_init(struct ivpu_device *vdev)
void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
{
+ drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
cancel_work_sync(&vdev->pm->recovery_work);
}
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index 044db150b..ec60fbeef 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -6,15 +6,18 @@
#ifndef __IVPU_PM_H__
#define __IVPU_PM_H__
+#include <linux/rwsem.h>
#include <linux/types.h>
struct ivpu_device;
struct ivpu_pm_info {
struct ivpu_device *vdev;
+ struct delayed_work job_timeout_work;
struct work_struct recovery_work;
- atomic_t in_reset;
+ struct rw_semaphore reset_lock;
atomic_t reset_counter;
+ atomic_t reset_pending;
bool is_warmboot;
u32 suspend_reschedule_counter;
};
@@ -36,6 +39,8 @@ int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
void ivpu_rpm_put(struct ivpu_device *vdev);
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason);
+void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
+void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 6b71be92b..04c954258 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -11,7 +11,10 @@
* The bellow values will be used to construct the version info this way:
* fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
* VPU_BOOT_API_VER_MINOR;
- * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes.
+ * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes
+ * This information is collected by using vpuip_2/application/vpuFirmware/make_std_fw_image.py
+ * If a header is missing this info we ignore the header, if a header is missing or contains
+ * partial info a build error will be generated.
*/
/*
@@ -24,12 +27,12 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 12
+#define VPU_BOOT_API_VER_MINOR 20
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_BOOT_API_VER_PATCH 2
+#define VPU_BOOT_API_VER_PATCH 4
/*
* Index in the API version table
@@ -63,6 +66,12 @@ struct vpu_firmware_header {
/* Size of memory require for firmware execution */
u32 runtime_size;
u32 shave_nn_fw_size;
+ /* Size of primary preemption buffer. */
+ u32 preemption_buffer_1_size;
+ /* Size of secondary preemption buffer. */
+ u32 preemption_buffer_2_size;
+ /* Space reserved for future preemption-related fields. */
+ u32 preemption_reserved[6];
};
/*
@@ -89,6 +98,14 @@ enum VPU_BOOT_L2_CACHE_CFG_TYPE {
VPU_BOOT_L2_CACHE_CFG_NUM = 2
};
+/** VPU MCA ECC signalling mode. By default, no signalling is used */
+enum VPU_BOOT_MCA_ECC_SIGNAL_TYPE {
+ VPU_BOOT_MCA_ECC_NONE = 0,
+ VPU_BOOT_MCA_ECC_CORR = 1,
+ VPU_BOOT_MCA_ECC_FATAL = 2,
+ VPU_BOOT_MCA_ECC_BOTH = 3
+};
+
/**
* Logging destinations.
*
@@ -131,9 +148,11 @@ enum vpu_trace_destination {
#define VPU_TRACE_PROC_BIT_ACT_SHV_3 22
#define VPU_TRACE_PROC_NO_OF_HW_DEVS 23
-/* KMB HW component IDs are sequential, so define first and last IDs. */
-#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_LRT
-#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_SHV_15
+/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
+#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
+#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15
+#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_30XX_FIRST
+#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_30XX_LAST
struct vpu_boot_l2_cache_config {
u8 use;
@@ -148,6 +167,25 @@ struct vpu_warm_boot_section {
u32 is_clear_op;
};
+/*
+ * When HW scheduling mode is enabled, a present period is defined.
+ * It will be used by VPU to swap between normal and focus priorities
+ * to prevent starving of normal priority band (when implemented).
+ * Host must provide a valid value at boot time in
+ * `vpu_focus_present_timer_ms`. If the value provided by the host is not within the
+ * defined range a default value will be used. Here we define the min. and max.
+ * allowed values and the and default value of the present period. Units are milliseconds.
+ */
+#define VPU_PRESENT_CALL_PERIOD_MS_DEFAULT 50
+#define VPU_PRESENT_CALL_PERIOD_MS_MIN 16
+#define VPU_PRESENT_CALL_PERIOD_MS_MAX 10000
+
+/**
+ * Macros to enable various operation modes within the VPU.
+ * To be defined as part of 32 bit mask.
+ */
+#define VPU_OP_MODE_SURVIVABILITY 0x1
+
struct vpu_boot_params {
u32 magic;
u32 vpu_id;
@@ -218,6 +256,7 @@ struct vpu_boot_params {
* the threshold will not be logged); applies to every enabled logging
* destination and loggable HW component. See 'mvLog_t' enum for acceptable
* values.
+ * TODO: EISW-33556: Move log level definition (mvLog_t) to this file.
*/
u32 default_trace_level;
u32 boot_type;
@@ -249,7 +288,36 @@ struct vpu_boot_params {
u32 temp_sensor_period_ms;
/** PLL ratio for efficient clock frequency */
u32 pn_freq_pll_ratio;
- u32 pad4[28];
+ /** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */
+ u32 dvfs_mode;
+ /**
+ * Depending on DVFS Mode:
+ * On-demand: Default if 0.
+ * Bit 0-7 - uint8_t: Highest residency percent
+ * Bit 8-15 - uint8_t: High residency percent
+ * Bit 16-23 - uint8_t: Low residency percent
+ * Bit 24-31 - uint8_t: Lowest residency percent
+ * Bit 32-35 - unsigned 4b: PLL Ratio increase amount on highest residency
+ * Bit 36-39 - unsigned 4b: PLL Ratio increase amount on high residency
+ * Bit 40-43 - unsigned 4b: PLL Ratio decrease amount on low residency
+ * Bit 44-47 - unsigned 4b: PLL Ratio decrease amount on lowest frequency
+ * Bit 48-55 - uint8_t: Period (ms) for residency decisions
+ * Bit 56-63 - uint8_t: Averaging windows (as multiples of period. Max: 30 decimal)
+ * Power Save/Max Performance: Unused
+ */
+ u64 dvfs_param;
+ /**
+ * D0i3 delayed entry
+ * Bit0: Disable CPU state save on D0i2 entry flow.
+ * 0: Every D0i2 entry saves state. Save state IPC message ignored.
+ * 1: IPC message required to save state on D0i3 entry flow.
+ */
+ u32 d0i3_delayed_entry;
+ /* Time spent by VPU in D0i3 state */
+ u64 d0i3_residency_time_us;
+ /* Value of VPU perf counter at the time of entering D0i3 state . */
+ u64 d0i3_entry_vpu_ts;
+ u32 pad4[20];
/* Warm boot information: 0x400 - 0x43F */
u32 warm_boot_sections_count;
u32 warm_boot_start_address_reference;
@@ -274,8 +342,12 @@ struct vpu_boot_params {
u32 vpu_scheduling_mode;
/* Present call period in milliseconds. */
u32 vpu_focus_present_timer_ms;
- /* Unused/reserved: 0x478 - 0xFFF */
- u32 pad6[738];
+ /* VPU ECC Signaling */
+ u32 vpu_uses_ecc_mca_signal;
+ /* Values defined by VPU_OP_MODE* macros */
+ u32 vpu_operation_mode;
+ /* Unused/reserved: 0x480 - 0xFFF */
+ u32 pad6[736];
};
/*
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 2949ec836..7da762274 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -22,12 +22,12 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 0
+#define VPU_JSM_API_VER_MINOR 15
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_JSM_API_VER_PATCH 1
+#define VPU_JSM_API_VER_PATCH 0
/*
* Index in the API version table
@@ -84,11 +84,13 @@
* Job flags bit masks.
*/
#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
+#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK 0xFF000000
/*
* Sizes of the reserved areas in jobs, in bytes.
*/
-#define VPU_JOB_RESERVED_BYTES 16
+#define VPU_JOB_RESERVED_BYTES 8
+
/*
* Sizes of the reserved areas in job queues, in bytes.
*/
@@ -109,6 +111,20 @@
#define VPU_DYNDBG_CMD_MAX_LEN 96
/*
+ * For HWS command queue scheduling, we can prioritise command queues inside the
+ * same process with a relative in-process priority. Valid values for relative
+ * priority are given below - max and min.
+ */
+#define VPU_HWS_COMMAND_QUEUE_MAX_IN_PROCESS_PRIORITY 7
+#define VPU_HWS_COMMAND_QUEUE_MIN_IN_PROCESS_PRIORITY -7
+
+/*
+ * For HWS priority scheduling, we can have multiple realtime priority bands.
+ * They are numbered 0 to a MAX.
+ */
+#define VPU_HWS_MAX_REALTIME_PRIORITY_LEVEL 31U
+
+/*
* Job format.
*/
struct vpu_job_queue_entry {
@@ -117,8 +133,14 @@ struct vpu_job_queue_entry {
u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
u64 root_page_table_addr; /**< Address of root page table to use for this job */
u64 root_page_table_update_counter; /**< Page tables update events counter */
- u64 preemption_buffer_address; /**< Address of the preemption buffer to use for this job */
- u64 preemption_buffer_size; /**< Size of the preemption buffer to use for this job */
+ u64 primary_preempt_buf_addr;
+ /**< Address of the primary preemption buffer to use for this job */
+ u32 primary_preempt_buf_size;
+ /**< Size of the primary preemption buffer to use for this job */
+ u32 secondary_preempt_buf_size;
+ /**< Size of secondary preemption buffer to use for this job */
+ u64 secondary_preempt_buf_addr;
+ /**< Address of secondary preemption buffer to use for this job */
u8 reserved_0[VPU_JOB_RESERVED_BYTES];
};
@@ -153,6 +175,46 @@ enum vpu_trace_entity_type {
};
/*
+ * HWS specific log buffer header details.
+ * Total size is 32 bytes.
+ */
+struct vpu_hws_log_buffer_header {
+ /* Written by VPU after adding a log entry. Initialised by host to 0. */
+ u32 first_free_entry_index;
+ /* Incremented by VPU every time the VPU overwrites the 0th entry;
+ * initialised by host to 0.
+ */
+ u32 wraparound_count;
+ /*
+ * This is the number of buffers that can be stored in the log buffer provided by the host.
+ * It is written by host before passing buffer to VPU. VPU should consider it read-only.
+ */
+ u64 num_of_entries;
+ u64 reserved[2];
+};
+
+/*
+ * HWS specific log buffer entry details.
+ * Total size is 32 bytes.
+ */
+struct vpu_hws_log_buffer_entry {
+ /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
+ u64 vpu_timestamp;
+ /*
+ * Operation type:
+ * 0 - context state change
+ * 1 - queue new work
+ * 2 - queue unwait sync object
+ * 3 - queue no more work
+ * 4 - queue wait sync object
+ */
+ u32 operation_type;
+ u32 reserved;
+ /* Operation data depends on operation type */
+ u64 operation_data[2];
+};
+
+/*
* Host <-> VPU IPC messages types.
*/
enum vpu_ipc_msg_type {
@@ -228,6 +290,23 @@ enum vpu_ipc_msg_type {
* deallocated or reassigned to another context.
*/
VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
+ /** Control command: Log buffer setting */
+ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118,
+ /* Control command: Suspend command queue. */
+ VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119,
+ /* Control command: Resume command queue */
+ VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a,
+ /* Control command: Resume engine after reset */
+ VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b,
+ /* Control command: Enable survivability/DCT mode */
+ VPU_JSM_MSG_DCT_ENABLE = 0x111c,
+ /* Control command: Disable survivability/DCT mode */
+ VPU_JSM_MSG_DCT_DISABLE = 0x111d,
+ /**
+ * Dump VPU state. To be used for debug purposes only.
+ * NOTE: Please introduce new ASYNC commands before this one. *
+ */
+ VPU_JSM_MSG_STATE_DUMP = 0x11FF,
/* IPC Host -> Device, General commands */
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
@@ -236,6 +315,10 @@ enum vpu_ipc_msg_type {
* Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
*/
VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
+ /**
+ * Perform the save procedure for the D0i3 entry
+ */
+ VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
/* IPC Device -> Host, Job completion */
VPU_JSM_MSG_JOB_DONE = 0x2100,
/* IPC Device -> Host, Async command completion */
@@ -304,11 +387,35 @@ enum vpu_ipc_msg_type {
VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
/** Response to control command: Set context scheduling properties */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
+ /** Response to control command: Log buffer setting */
+ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218,
+ /* IPC Device -> Host, HWS notify index entry of log buffer written */
+ VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219,
+ /* IPC Device -> Host, HWS completion of a context suspend request */
+ VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a,
+ /* Response to control command: Resume command queue */
+ VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b,
+ /* Response to control command: Resume engine command response */
+ VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c,
+ /* Response to control command: Enable survivability/DCT mode */
+ VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d,
+ /* Response to control command: Disable survivability/DCT mode */
+ VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e,
+ /**
+ * Response to state dump control command.
+ * NOTE: Please introduce new ASYNC responses before this one. *
+ */
+ VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
/* IPC Device -> Host, General command completion */
VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
/** Response to VPU_JSM_MSG_DYNDBG_CONTROL. */
VPU_JSM_MSG_DYNDBG_CONTROL_RSP = 0x2301,
+ /**
+ * Acknowledgment of completion of the save procedure initiated by
+ * VPU_JSM_MSG_PWR_D0I3_ENTER
+ */
+ VPU_JSM_MSG_PWR_D0I3_ENTER_DONE = 0x2302,
};
enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
@@ -593,12 +700,12 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* Default quantum in 100ns units for scheduling across processes
* within a priority band
*/
- u64 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
/*
* Default grace period in 100ns units for processes that preempt each
* other within a priority band
*/
- u64 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
/*
* For normal priority band, specifies the target VPU percentage
* in situations when it's starved by the focus band.
@@ -608,32 +715,51 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
u32 reserved_0;
};
-/* HWS create command queue request */
+/*
+ * @brief HWS create command queue request.
+ * Host will create a command queue via this command.
+ * Note: Cmdq group is a handle of an object which
+ * may contain one or more command queues.
+ * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+ * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_create_cmdq {
/* Process id */
u64 process_id;
/* Host SSID */
u32 host_ssid;
- /* Zero Padding */
- u32 reserved;
+ /* Engine for which queue is being created */
+ u32 engine_idx;
+ /*
+ * Cmdq group may be set to 0 or equal to
+ * cmdq_id while each priority band contains
+ * only single engine instances.
+ */
+ u64 cmdq_group;
/* Command queue id */
u64 cmdq_id;
/* Command queue base */
u64 cmdq_base;
/* Command queue size */
u32 cmdq_size;
- /* Reserved */
+ /* Zero padding */
u32 reserved_0;
};
-/* HWS create command queue response */
+/*
+ * @brief HWS create command queue response.
+ * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+ * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
/* Process id */
u64 process_id;
/* Host SSID */
u32 host_ssid;
- /* Zero Padding */
- u32 reserved;
+ /* Engine for which queue is being created */
+ u32 engine_idx;
+ /* Command queue group */
+ u64 cmdq_group;
/* Command queue id */
u64 cmdq_id;
};
@@ -661,7 +787,7 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
/* Inside realtime band assigns a further priority */
u32 realtime_priority_level;
/* Priority relative to other contexts in the same process */
- u32 in_process_priority;
+ s32 in_process_priority;
/* Zero padding / Reserved */
u32 reserved_1;
/* Context quantum relative to other contexts of same priority in the same process */
@@ -694,6 +820,123 @@ struct vpu_jsm_hws_register_db {
u64 cmdq_size;
};
+/*
+ * @brief Structure to set another buffer to be used for scheduling-related logging.
+ * The size of the logging buffer and the number of entries is defined as part of the
+ * buffer itself as described next.
+ * The log buffer received from the host is made up of;
+ * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'.
+ * The header contains the number of log entries in the buffer.
+ * - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in
+ * 'struct vpu_hws_log_buffer_entry'.
+ * The entry contains the VPU timestamp, operation type and data.
+ * The host should provide the notify index value of log buffer to VPU. This is a
+ * value defined within the log buffer and when written to will generate the
+ * scheduling log notification.
+ * The host should set engine_idx and vpu_log_buffer_va to 0 to disable logging
+ * for a particular engine.
+ * VPU will handle one log buffer for each of supported engines.
+ * VPU should allow the logging to consume one host_ssid.
+ * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
+ * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP
+ * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
+ */
+struct vpu_ipc_msg_payload_hws_set_scheduling_log {
+ /* Engine ordinal */
+ u32 engine_idx;
+ /* Host SSID */
+ u32 host_ssid;
+ /*
+ * VPU log buffer virtual address.
+ * Set to 0 to disable logging for this engine.
+ */
+ u64 vpu_log_buffer_va;
+ /*
+ * Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
+ * is generated when an event log is written to this index.
+ */
+ u64 notify_index;
+};
+
+/*
+ * @brief The scheduling log notification is generated by VPU when it writes
+ * an event into the log buffer at the notify_index. VPU notifies host with
+ * VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous
+ * message from VPU to host.
+ * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
+ * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
+ */
+struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
+ /* Engine ordinal */
+ u32 engine_idx;
+ /* Zero Padding */
+ u32 reserved_0;
+};
+
+/*
+ * @brief HWS suspend command queue request and done structure.
+ * Host will request the suspend of contexts and VPU will;
+ * - Suspend all work on this context
+ * - Preempt any running work
+ * - Asynchronously perform the above and return success immediately once
+ * all items above are started successfully
+ * - Notify the host of completion of these operations via
+ * VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
+ * - Reject any other context operations on a context with an in-flight
+ * suspend request running
+ * Same structure used when VPU notifies host of completion of a context suspend
+ * request. The ids and suspend fence value reported in this command will match
+ * the one in the request from the host to suspend the context. Once suspend is
+ * complete, VPU will not access any data relating to this command queue until
+ * it is resumed.
+ * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ
+ * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
+ */
+struct vpu_ipc_msg_payload_hws_suspend_cmdq {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+ /*
+ * Suspend fence value - reported by the VPU suspend context
+ * completed once suspend is complete.
+ */
+ u64 suspend_fence_value;
+};
+
+/*
+ * @brief HWS Resume command queue request / response structure.
+ * Host will request the resume of a context;
+ * - VPU will resume all work on this context
+ * - Scheduler will allow this context to be scheduled
+ * @see VPU_JSM_MSG_HWS_RESUME_CMDQ
+ * @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
+ */
+struct vpu_ipc_msg_payload_hws_resume_cmdq {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+};
+
+/*
+ * @brief HWS Resume engine request / response structure.
+ * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume.
+ * Host shall send this command to resume scheduling of any valid queue.
+ * @see VPU_JSM_MSG_HWS_RESUME_ENGINE
+ * @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
+ */
+struct vpu_ipc_msg_payload_hws_resume_engine {
+ /* Engine to be resumed */
+ u32 engine_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
/**
* Payload for VPU_JSM_MSG_TRACE_SET_CONFIG[_RSP] and
* VPU_JSM_MSG_TRACE_GET_CONFIG_RSP messages.
@@ -938,6 +1181,35 @@ struct vpu_ipc_msg_payload_dyndbg_control {
char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN];
};
+/**
+ * Payload for VPU_JSM_MSG_PWR_D0I3_ENTER
+ *
+ * This is a bi-directional payload.
+ */
+struct vpu_ipc_msg_payload_pwr_d0i3_enter {
+ /**
+ * 0: VPU_JSM_MSG_PWR_D0I3_ENTER_DONE is not sent to the host driver
+ * The driver will poll for D0i2 Idle state transitions.
+ * 1: VPU_JSM_MSG_PWR_D0I3_ENTER_DONE is sent after VPU state save is complete
+ */
+ u32 send_response;
+ u32 reserved_0;
+};
+
+/**
+ * Payload for VPU_JSM_MSG_DCT_ENABLE message.
+ *
+ * Default values for DCT active/inactive times are 5.3ms and 30ms respectively,
+ * corresponding to a 85% duty cycle. This payload allows the host to tune these
+ * values according to application requirements.
+ */
+struct vpu_ipc_msg_payload_pwr_dct_control {
+ /** Duty cycle active time in microseconds */
+ u32 dct_active_us;
+ /** Duty cycle inactive time in microseconds */
+ u32 dct_inactive_us;
+};
+
/*
* Payloads union, used to define complete message format.
*/
@@ -974,6 +1246,13 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_hws_destroy_cmdq hws_destroy_cmdq;
struct vpu_ipc_msg_payload_hws_set_context_sched_properties
hws_set_context_sched_properties;
+ struct vpu_ipc_msg_payload_hws_set_scheduling_log hws_set_scheduling_log;
+ struct vpu_ipc_msg_payload_hws_scheduling_log_notification hws_scheduling_log_notification;
+ struct vpu_ipc_msg_payload_hws_suspend_cmdq hws_suspend_cmdq;
+ struct vpu_ipc_msg_payload_hws_resume_cmdq hws_resume_cmdq;
+ struct vpu_ipc_msg_payload_hws_resume_engine hws_resume_engine;
+ struct vpu_ipc_msg_payload_pwr_d0i3_enter pwr_d0i3_enter;
+ struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control;
};
/*
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 2418418f7..3f7f6dfde 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -9,4 +9,5 @@ qaic-y := \
mhi_controller.o \
qaic_control.o \
qaic_data.o \
- qaic_drv.o
+ qaic_drv.o \
+ qaic_timesync.o
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index 1405623b0..cb77d048e 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -348,7 +348,7 @@ static struct mhi_channel_config aic100_channels[] = {
.local_elements = 0,
.event_ring = 0,
.dir = DMA_TO_DEVICE,
- .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
+ .ee_mask = MHI_CH_EE_SBL,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
@@ -364,7 +364,39 @@ static struct mhi_channel_config aic100_channels[] = {
.local_elements = 0,
.event_ring = 0,
.dir = DMA_FROM_DEVICE,
- .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS,
+ .ee_mask = MHI_CH_EE_SBL,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num = 22,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .num = 23,
+ .name = "QAIC_TIMESYNC_PERIODIC",
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
.pollcfg = 0,
.doorbell = MHI_DB_BRST_DISABLE,
.lpm_notify = false,
@@ -450,7 +482,7 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback re
pci_err(qdev->pdev, "Fatal error received from device. Attempting to recover\n");
/* this event occurs in non-atomic context */
if (reason == MHI_CB_SYS_ERROR)
- qaic_dev_reset_clean_local_state(qdev, true);
+ qaic_dev_reset_clean_local_state(qdev);
}
static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
@@ -481,7 +513,7 @@ static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
}
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq)
+ int mhi_irq, bool shared_msi)
{
struct mhi_controller *mhi_cntrl;
int ret;
@@ -513,6 +545,10 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi
return ERR_PTR(-ENOMEM);
mhi_cntrl->irq[0] = mhi_irq;
+
+ if (shared_msi) /* MSI shared with data path, no IRQF_NO_SUSPEND */
+ mhi_cntrl->irq_flags = IRQF_SHARED;
+
mhi_cntrl->fw_image = "qcom/aic100/sbl.bin";
/* use latest configured timeout */
diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h
index 2ae45d768..500e7f4af 100644
--- a/drivers/accel/qaic/mhi_controller.h
+++ b/drivers/accel/qaic/mhi_controller.h
@@ -8,7 +8,7 @@
#define MHICONTROLLERQAIC_H_
struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar,
- int mhi_irq);
+ int mhi_irq, bool shared_msi);
void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up);
void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl);
void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl);
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index e3f4c30f3..582836f95 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -31,6 +31,15 @@
#define to_drm(qddev) (&(qddev)->drm)
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
+enum __packed dev_states {
+ /* Device is offline or will be very soon */
+ QAIC_OFFLINE,
+ /* Device is booting, not clear if it's in a usable state */
+ QAIC_BOOT,
+ /* Device is fully operational */
+ QAIC_ONLINE,
+};
+
extern bool datapath_polling;
struct qaic_user {
@@ -121,8 +130,10 @@ struct qaic_device {
struct workqueue_struct *cntl_wq;
/* Synchronizes all the users of device during cleanup */
struct srcu_struct dev_lock;
- /* true: Device under reset; false: Device not under reset */
- bool in_reset;
+ /* Track the state of the device during resets */
+ enum dev_states dev_state;
+ /* true: single MSI is used to operate device */
+ bool single_msi;
/*
* true: A tx MHI transaction has failed and a rx buffer is still queued
* in control device. Such a buffer is considered lost rx buffer
@@ -137,6 +148,10 @@ struct qaic_device {
u32 (*gen_crc)(void *msg);
/* Validate the CRC of a control message */
bool (*valid_crc)(void *msg);
+ /* MHI "QAIC_TIMESYNC" channel device */
+ struct mhi_device *qts_ch;
+ /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
+ struct workqueue_struct *qts_wq;
};
struct qaic_drm_device {
@@ -268,7 +283,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
void release_dbc(struct qaic_device *qdev, u32 dbc_id);
void wake_all_cntl(struct qaic_device *qdev);
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset);
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 388abd400..9e8a8cbad 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -1022,7 +1022,8 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
int xfer_count = 0;
int retry_count;
- if (qdev->in_reset) {
+ /* Allow QAIC_BOOT state since we need to check control protocol version */
+ if (qdev->dev_state == QAIC_OFFLINE) {
mutex_unlock(&qdev->cntl_mutex);
return ERR_PTR(-ENODEV);
}
@@ -1138,7 +1139,7 @@ static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrapper
if (!list_is_first(&wrapper->list, &wrappers->list))
kref_put(&wrapper->ref_count, free_wrapper);
- wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
+ wrapper = add_wrapper(wrappers, sizeof(*wrapper));
if (!wrapper)
return -ENOMEM;
@@ -1306,7 +1307,7 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
return -ENODEV;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index d42f002bc..03c9a793d 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -51,6 +51,7 @@
})
#define NUM_EVENTS 128
#define NUM_DELAYS 10
+#define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size())
static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */
module_param(wait_exec_default_timeout_ms, uint, 0600);
@@ -451,7 +452,7 @@ static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 s
* later
*/
buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
- max_order = min(MAX_ORDER - 1, get_order(size));
+ max_order = min(MAX_PAGE_ORDER, get_order(size));
} else {
/* allocate a single page for book keeping */
nr_pages = 1;
@@ -689,7 +690,7 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -748,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -967,7 +968,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1056,6 +1057,16 @@ unlock_usr_srcu:
return ret;
}
+static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size)
+{
+ u32 avail = head - tail - 1;
+
+ if (head <= tail)
+ avail += q_size;
+
+ return avail;
+}
+
static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
u32 head, u32 *ptail)
{
@@ -1064,27 +1075,20 @@ static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slic
u32 tail = *ptail;
u32 avail;
- avail = head - tail;
- if (head <= tail)
- avail += dbc->nelem;
-
- --avail;
-
+ avail = fifo_space_avail(head, tail, dbc->nelem);
if (avail < slice->nents)
return -EAGAIN;
if (tail + slice->nents > dbc->nelem) {
avail = dbc->nelem - tail;
avail = min_t(u32, avail, slice->nents);
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * avail);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
reqs += avail;
avail = slice->nents - avail;
if (avail)
memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
} else {
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * slice->nents);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents);
}
*ptail = (tail + slice->nents) % dbc->nelem;
@@ -1092,46 +1096,31 @@ static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slic
return 0;
}
-/*
- * Based on the value of resize we may only need to transmit first_n
- * entries and the last entry, with last_bytes to send from the last entry.
- * Note that first_n could be 0.
- */
static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
- u64 resize, u32 dbc_id, u32 head, u32 *ptail)
+ u64 resize, struct dma_bridge_chan *dbc, u32 head,
+ u32 *ptail)
{
- struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
struct dbc_req *reqs = slice->reqs;
struct dbc_req *last_req;
u32 tail = *ptail;
- u64 total_bytes;
u64 last_bytes;
u32 first_n;
u32 avail;
- int ret;
- int i;
-
- avail = head - tail;
- if (head <= tail)
- avail += dbc->nelem;
- --avail;
+ avail = fifo_space_avail(head, tail, dbc->nelem);
- total_bytes = 0;
- for (i = 0; i < slice->nents; i++) {
- total_bytes += le32_to_cpu(reqs[i].len);
- if (total_bytes >= resize)
+ /*
+ * After this for loop is complete, first_n represents the index
+ * of the last DMA request of this slice that needs to be
+ * transferred after resizing and last_bytes represents DMA size
+ * of that request.
+ */
+ last_bytes = resize;
+ for (first_n = 0; first_n < slice->nents; first_n++)
+ if (last_bytes > le32_to_cpu(reqs[first_n].len))
+ last_bytes -= le32_to_cpu(reqs[first_n].len);
+ else
break;
- }
-
- if (total_bytes < resize) {
- /* User space should have used the full buffer path. */
- ret = -EINVAL;
- return ret;
- }
-
- first_n = i;
- last_bytes = i ? resize + le32_to_cpu(reqs[i].len) - total_bytes : resize;
if (avail < (first_n + 1))
return -EAGAIN;
@@ -1140,22 +1129,21 @@ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_sli
if (tail + first_n > dbc->nelem) {
avail = dbc->nelem - tail;
avail = min_t(u32, avail, first_n);
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * avail);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail);
last_req = reqs + avail;
avail = first_n - avail;
if (avail)
memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
} else {
- memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
- sizeof(*reqs) * first_n);
+ memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n);
}
}
- /* Copy over the last entry. Here we need to adjust len to the left over
+ /*
+ * Copy over the last entry. Here we need to adjust len to the left over
* size, and set src and dst to the entry it is copied to.
*/
- last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size();
+ last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem);
memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
/*
@@ -1166,6 +1154,9 @@ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_sli
last_req->len = cpu_to_le32((u32)last_bytes);
last_req->src_addr = reqs[first_n].src_addr;
last_req->dest_addr = reqs[first_n].dest_addr;
+ if (!last_bytes)
+ /* Disable DMA transfer */
+ last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd;
*ptail = (tail + first_n + 1) % dbc->nelem;
@@ -1225,26 +1216,17 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
bo->req_id = dbc->next_req_id++;
list_for_each_entry(slice, &bo->slices, slice) {
- /*
- * If this slice does not fall under the given
- * resize then skip this slice and continue the loop
- */
- if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset)
- continue;
-
for (j = 0; j < slice->nents; j++)
slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
- /*
- * If it is a partial execute ioctl call then check if
- * resize has cut this slice short then do a partial copy
- * else do complete copy
- */
- if (is_partial && pexec[i].resize &&
- pexec[i].resize < slice->offset + slice->size)
+ if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset))
+ /* Configure the slice for no DMA transfer */
+ ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail);
+ else if (is_partial && pexec[i].resize < slice->offset + slice->size)
+ /* Configure the slice to be partially DMA transferred */
ret = copy_partial_exec_reqs(qdev, slice,
- pexec[i].resize - slice->offset,
- dbc->id, head, tail);
+ pexec[i].resize - slice->offset, dbc,
+ head, tail);
else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) {
@@ -1357,7 +1339,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1464,6 +1446,16 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (datapath_polling) {
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ /*
+ * Normally datapath_polling will not have irqs enabled, but
+ * when running with only one MSI the interrupt is shared with
+ * MHI so it cannot be disabled. Return ASAP instead.
+ */
+ return IRQ_HANDLED;
+ }
+
if (!dbc->usr) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_HANDLED;
@@ -1486,7 +1478,8 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- disable_irq_nosync(irq);
+ if (!dbc->qdev->single_msi)
+ disable_irq_nosync(irq);
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return IRQ_WAKE_THREAD;
}
@@ -1502,7 +1495,7 @@ void irq_polling_work(struct work_struct *work)
rcu_id = srcu_read_lock(&dbc->ch_lock);
while (1) {
- if (dbc->qdev->in_reset) {
+ if (dbc->qdev->dev_state != QAIC_ONLINE) {
srcu_read_unlock(&dbc->ch_lock, rcu_id);
return;
}
@@ -1557,12 +1550,12 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
u32 tail;
rcu_id = srcu_read_lock(&dbc->ch_lock);
+ qdev = dbc->qdev;
head = readl(dbc->dbc_base + RSPHP_OFF);
if (head == U32_MAX) /* PCI link error */
goto error_out;
- qdev = dbc->qdev;
read_fifo:
if (!event_count) {
@@ -1643,14 +1636,14 @@ read_fifo:
goto read_fifo;
normal_out:
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
enable_irq(irq);
- else
+ else if (unlikely(datapath_polling))
schedule_work(&dbc->poll_work);
/* checking the fifo and enabling irqs is a race, missed event check */
tail = readl(dbc->dbc_base + RSPTP_OFF);
if (tail != U32_MAX && head != tail) {
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
disable_irq_nosync(irq);
goto read_fifo;
}
@@ -1659,9 +1652,9 @@ normal_out:
error_out:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
- if (likely(!datapath_polling))
+ if (!qdev->single_msi && likely(!datapath_polling))
enable_irq(irq);
- else
+ else if (unlikely(datapath_polling))
schedule_work(&dbc->poll_work);
return IRQ_HANDLED;
@@ -1692,7 +1685,7 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1761,7 +1754,7 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
@@ -1852,7 +1845,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
qdev = usr->qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto unlock_dev_srcu;
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 6f5809576..2a313eb69 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -8,6 +8,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/mhi.h>
#include <linux/module.h>
@@ -27,6 +28,7 @@
#include "mhi_controller.h"
#include "qaic.h"
+#include "qaic_timesync.h"
MODULE_IMPORT_NS(DMA_BUF);
@@ -42,9 +44,6 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
static bool link_up;
static DEFINE_IDA(qaic_usrs);
-static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
-static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
-
static void free_usr(struct kref *kref)
{
struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
@@ -63,7 +62,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file)
int ret;
rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (qdev->in_reset) {
+ if (qdev->dev_state != QAIC_ONLINE) {
ret = -ENODEV;
goto dev_unlock;
}
@@ -120,7 +119,7 @@ static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
if (qddev) {
qdev = qddev->qdev;
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
- if (!qdev->in_reset) {
+ if (qdev->dev_state == QAIC_ONLINE) {
qaic_release_usr(qdev, usr);
for (i = 0; i < qdev->num_dbc; ++i)
if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
@@ -182,13 +181,6 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
qddev->partition_id = partition_id;
- /*
- * drm_dev_unregister() sets the driver data to NULL and
- * drm_dev_register() does not update the driver data. During a SOC
- * reset drm dev is unregistered and registered again leaving the
- * driver data to NULL.
- */
- dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
ret = drm_dev_register(drm, 0);
if (ret)
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
@@ -202,7 +194,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
- drm_dev_get(drm);
drm_dev_unregister(drm);
qddev->partition_id = 0;
/*
@@ -231,7 +222,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
mutex_lock(&qddev->users_mutex);
}
mutex_unlock(&qddev->users_mutex);
- drm_dev_put(drm);
}
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
@@ -253,8 +243,6 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
- qdev->in_reset = false;
-
dev_set_drvdata(&mhi_dev->dev, qdev);
qdev->cntl_ch = mhi_dev;
@@ -264,6 +252,7 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
return ret;
}
+ qdev->dev_state = QAIC_BOOT;
ret = get_cntl_version(qdev, NULL, &major, &minor);
if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
@@ -271,8 +260,8 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
ret = -EINVAL;
goto close_control;
}
-
- ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+ qdev->dev_state = QAIC_ONLINE;
+ kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE);
return ret;
@@ -290,7 +279,8 @@ static void qaic_notify_reset(struct qaic_device *qdev)
{
int i;
- qdev->in_reset = true;
+ kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE);
+ qdev->dev_state = QAIC_OFFLINE;
/* wake up any waiters to avoid waiting for timeouts at sync */
wake_all_cntl(qdev);
for (i = 0; i < qdev->num_dbc; ++i)
@@ -298,21 +288,15 @@ static void qaic_notify_reset(struct qaic_device *qdev)
synchronize_srcu(&qdev->dev_lock);
}
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
{
int i;
qaic_notify_reset(qdev);
- /* remove drmdevs to prevent new users from coming in */
- qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
-
/* start tearing things down */
for (i = 0; i < qdev->num_dbc; ++i)
release_dbc(qdev, i);
-
- if (exit_reset)
- qdev->in_reset = false;
}
static void cleanup_qdev(struct qaic_device *qdev)
@@ -324,6 +308,7 @@ static void cleanup_qdev(struct qaic_device *qdev)
cleanup_srcu_struct(&qdev->dev_lock);
pci_set_drvdata(qdev->pdev, NULL);
destroy_workqueue(qdev->cntl_wq);
+ destroy_workqueue(qdev->qts_wq);
}
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -336,6 +321,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
if (!qdev)
return NULL;
+ qdev->dev_state = QAIC_OFFLINE;
if (id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
@@ -347,6 +333,12 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
if (!qdev->cntl_wq)
return NULL;
+ qdev->qts_wq = alloc_workqueue("qaic_ts", WQ_UNBOUND, 0);
+ if (!qdev->qts_wq) {
+ destroy_workqueue(qdev->cntl_wq);
+ return NULL;
+ }
+
pci_set_drvdata(pdev, qdev);
qdev->pdev = pdev;
@@ -424,14 +416,24 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
int i;
/* Managed release since we use pcim_enable_device */
- ret = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
- if (ret < 0)
- return ret;
+ ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI);
+ if (ret == -ENOSPC) {
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0)
+ return ret;
- if (ret < 32) {
- pci_err(pdev, "%s: Requested 32 MSIs. Obtained %d MSIs which is less than the 32 required.\n",
- __func__, ret);
- return -ENODEV;
+ /*
+ * Operate in one MSI mode. All interrupts will be directed to
+ * MSI0; every interrupt will wake up all the interrupt handlers
+ * (MHI and DBC[0-15]). Since the interrupt is now shared, it is
+ * not disabled during DBC threaded handler, but only one thread
+ * will be allowed to run per DBC, so while it can be
+ * interrupted, it shouldn't race with itself.
+ */
+ qdev->single_msi = true;
+ pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n");
+ } else if (ret < 0) {
+ return ret;
}
mhi_irq = pci_irq_vector(pdev, 0);
@@ -439,15 +441,17 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
return mhi_irq;
for (i = 0; i < qdev->num_dbc; ++i) {
- ret = devm_request_threaded_irq(&pdev->dev, pci_irq_vector(pdev, i + 1),
+ ret = devm_request_threaded_irq(&pdev->dev,
+ pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1),
dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
"qaic_dbc", &qdev->dbc[i]);
if (ret)
return ret;
if (datapath_polling) {
- qdev->dbc[i].irq = pci_irq_vector(pdev, i + 1);
- disable_irq_nosync(qdev->dbc[i].irq);
+ qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
+ if (!qdev->single_msi)
+ disable_irq_nosync(qdev->dbc[i].irq);
INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
}
}
@@ -479,14 +483,21 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto cleanup_qdev;
}
- qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq);
+ ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+ if (ret)
+ goto cleanup_qdev;
+
+ qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
+ qdev->single_msi);
if (IS_ERR(qdev->mhi_cntrl)) {
ret = PTR_ERR(qdev->mhi_cntrl);
- goto cleanup_qdev;
+ goto cleanup_drm_dev;
}
return 0;
+cleanup_drm_dev:
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
cleanup_qdev:
cleanup_qdev(qdev);
return ret;
@@ -499,7 +510,8 @@ static void qaic_pci_remove(struct pci_dev *pdev)
if (!qdev)
return;
- qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_dev_reset_clean_local_state(qdev);
+ qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
cleanup_qdev(qdev);
}
@@ -522,14 +534,13 @@ static void qaic_pci_reset_prepare(struct pci_dev *pdev)
qaic_notify_reset(qdev);
qaic_mhi_start_reset(qdev->mhi_cntrl);
- qaic_dev_reset_clean_local_state(qdev, false);
+ qaic_dev_reset_clean_local_state(qdev);
}
static void qaic_pci_reset_done(struct pci_dev *pdev)
{
struct qaic_device *qdev = pci_get_drvdata(pdev);
- qdev->in_reset = false;
qaic_mhi_reset_done(qdev->mhi_cntrl);
}
@@ -586,6 +597,10 @@ static int __init qaic_init(void)
goto free_pci;
}
+ ret = qaic_timesync_init();
+ if (ret)
+ pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
+
return 0;
free_pci:
@@ -611,6 +626,7 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_timesync_deinit();
mhi_driver_unregister(&qaic_mhi_driver);
pci_unregister_driver(&qaic_pci_driver);
}
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
new file mode 100644
index 000000000..301f4462d
--- /dev/null
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/time64.h>
+#include <linux/timer.h>
+
+#include "qaic.h"
+#include "qaic_timesync.h"
+
+#define QTIMER_REG_OFFSET 0xa28
+#define QAIC_TIMESYNC_SIGNATURE 0x55aa
+#define QAIC_CONV_QTIMER_TO_US(qtimer) (mul_u64_u32_div(qtimer, 10, 192))
+
+static unsigned int timesync_delay_ms = 1000; /* 1 sec default */
+module_param(timesync_delay_ms, uint, 0600);
+MODULE_PARM_DESC(timesync_delay_ms, "Delay in ms between two consecutive timesync operations");
+
+enum qts_msg_type {
+ QAIC_TS_CMD_TO_HOST,
+ QAIC_TS_SYNC_REQ,
+ QAIC_TS_ACK_TO_HOST,
+ QAIC_TS_MSG_TYPE_MAX
+};
+
+/**
+ * struct qts_hdr - Timesync message header structure.
+ * @signature: Unique signature to identify the timesync message.
+ * @reserved_1: Reserved for future use.
+ * @reserved_2: Reserved for future use.
+ * @msg_type: sub-type of the timesync message.
+ * @reserved_3: Reserved for future use.
+ */
+struct qts_hdr {
+ __le16 signature;
+ __le16 reserved_1;
+ u8 reserved_2;
+ u8 msg_type;
+ __le16 reserved_3;
+} __packed;
+
+/**
+ * struct qts_timeval - Structure to carry time information.
+ * @tv_sec: Seconds part of the time.
+ * @tv_usec: uS (microseconds) part of the time.
+ */
+struct qts_timeval {
+ __le64 tv_sec;
+ __le64 tv_usec;
+} __packed;
+
+/**
+ * struct qts_host_time_sync_msg_data - Structure to denote the timesync message.
+ * @header: Header of the timesync message.
+ * @data: Time information.
+ */
+struct qts_host_time_sync_msg_data {
+ struct qts_hdr header;
+ struct qts_timeval data;
+} __packed;
+
+/**
+ * struct mqts_dev - MHI QAIC Timesync Control device.
+ * @qdev: Pointer to the root device struct driven by QAIC driver.
+ * @mhi_dev: Pointer to associated MHI device.
+ * @timer: Timer handle used for timesync.
+ * @qtimer_addr: Device QTimer register pointer.
+ * @buff_in_use: atomic variable to track if the sync_msg buffer is in use.
+ * @dev: Device pointer to qdev->pdev->dev stored for easy access.
+ * @sync_msg: Buffer used to send timesync message over MHI.
+ */
+struct mqts_dev {
+ struct qaic_device *qdev;
+ struct mhi_device *mhi_dev;
+ struct timer_list timer;
+ void __iomem *qtimer_addr;
+ atomic_t buff_in_use;
+ struct device *dev;
+ struct qts_host_time_sync_msg_data *sync_msg;
+};
+
+struct qts_resp_msg {
+ struct qts_hdr hdr;
+} __packed;
+
+struct qts_resp {
+ struct qts_resp_msg data;
+ struct work_struct work;
+ struct qaic_device *qdev;
+};
+
+#ifdef readq
+static u64 read_qtimer(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#else
+static u64 read_qtimer(const volatile void __iomem *addr)
+{
+ u64 low, high;
+
+ low = readl(addr);
+ high = readl(addr + sizeof(u32));
+ return low | (high << 32);
+}
+#endif
+
+static void qaic_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ dev_dbg(mqtsdev->dev, "%s status: %d xfer_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ atomic_set(&mqtsdev->buff_in_use, 0);
+}
+
+static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ dev_err(mqtsdev->dev, "%s no data expected on dl channel\n", __func__);
+}
+
+static void qaic_timesync_timer(struct timer_list *t)
+{
+ struct mqts_dev *mqtsdev = from_timer(mqtsdev, t, timer);
+ struct qts_host_time_sync_msg_data *sync_msg;
+ u64 device_qtimer_us;
+ u64 device_qtimer;
+ u64 host_time_us;
+ u64 offset_us;
+ u64 host_sec;
+ int ret;
+
+ if (atomic_read(&mqtsdev->buff_in_use)) {
+ dev_dbg(mqtsdev->dev, "%s buffer not free, schedule next cycle\n", __func__);
+ goto mod_timer;
+ }
+ atomic_set(&mqtsdev->buff_in_use, 1);
+
+ sync_msg = mqtsdev->sync_msg;
+ sync_msg->header.signature = cpu_to_le16(QAIC_TIMESYNC_SIGNATURE);
+ sync_msg->header.msg_type = QAIC_TS_SYNC_REQ;
+ /* Read host UTC time and convert to uS*/
+ host_time_us = div_u64(ktime_get_real_ns(), NSEC_PER_USEC);
+ device_qtimer = read_qtimer(mqtsdev->qtimer_addr);
+ device_qtimer_us = QAIC_CONV_QTIMER_TO_US(device_qtimer);
+ /* Offset between host UTC and device time */
+ offset_us = host_time_us - device_qtimer_us;
+
+ host_sec = div_u64(offset_us, USEC_PER_SEC);
+ sync_msg->data.tv_usec = cpu_to_le64(offset_us - host_sec * USEC_PER_SEC);
+ sync_msg->data.tv_sec = cpu_to_le64(host_sec);
+ ret = mhi_queue_buf(mqtsdev->mhi_dev, DMA_TO_DEVICE, sync_msg, sizeof(*sync_msg), MHI_EOT);
+ if (ret && (ret != -EAGAIN)) {
+ dev_err(mqtsdev->dev, "%s unable to queue to mhi:%d\n", __func__, ret);
+ return;
+ } else if (ret == -EAGAIN) {
+ atomic_set(&mqtsdev->buff_in_use, 0);
+ }
+
+mod_timer:
+ ret = mod_timer(t, jiffies + msecs_to_jiffies(timesync_delay_ms));
+ if (ret)
+ dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret);
+}
+
+static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct mqts_dev *mqtsdev;
+ struct timer_list *timer;
+ int ret;
+
+ mqtsdev = kzalloc(sizeof(*mqtsdev), GFP_KERNEL);
+ if (!mqtsdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ timer = &mqtsdev->timer;
+ mqtsdev->mhi_dev = mhi_dev;
+ mqtsdev->qdev = qdev;
+ mqtsdev->dev = &qdev->pdev->dev;
+
+ mqtsdev->sync_msg = kzalloc(sizeof(*mqtsdev->sync_msg), GFP_KERNEL);
+ if (!mqtsdev->sync_msg) {
+ ret = -ENOMEM;
+ goto free_mqts_dev;
+ }
+ atomic_set(&mqtsdev->buff_in_use, 0);
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ goto free_sync_msg;
+
+ /* Qtimer register pointer */
+ mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET;
+ timer_setup(timer, qaic_timesync_timer, 0);
+ timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
+ add_timer(timer);
+ dev_set_drvdata(&mhi_dev->dev, mqtsdev);
+
+ return 0;
+
+free_sync_msg:
+ kfree(mqtsdev->sync_msg);
+free_mqts_dev:
+ kfree(mqtsdev);
+out:
+ return ret;
+};
+
+static void qaic_timesync_remove(struct mhi_device *mhi_dev)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ del_timer_sync(&mqtsdev->timer);
+ mhi_unprepare_from_transfer(mqtsdev->mhi_dev);
+ kfree(mqtsdev->sync_msg);
+ kfree(mqtsdev);
+}
+
+static const struct mhi_device_id qaic_timesync_match_table[] = {
+ { .chan = "QAIC_TIMESYNC_PERIODIC"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(mhi, qaic_timesync_match_table);
+
+static struct mhi_driver qaic_timesync_driver = {
+ .id_table = qaic_timesync_match_table,
+ .remove = qaic_timesync_remove,
+ .probe = qaic_timesync_probe,
+ .ul_xfer_cb = qaic_timesync_ul_xfer_cb,
+ .dl_xfer_cb = qaic_timesync_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_timesync_periodic",
+ },
+};
+
+static void qaic_boot_timesync_worker(struct work_struct *work)
+{
+ struct qts_resp *resp = container_of(work, struct qts_resp, work);
+ struct qts_host_time_sync_msg_data *req;
+ struct qts_resp_msg data = resp->data;
+ struct qaic_device *qdev = resp->qdev;
+ struct mhi_device *mhi_dev;
+ struct timespec64 ts;
+ int ret;
+
+ mhi_dev = qdev->qts_ch;
+ /* Queue the response message beforehand to avoid race conditions */
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ dev_warn(&mhi_dev->dev, "Failed to re-queue response buffer %d\n", ret);
+ return;
+ }
+
+ switch (data.hdr.msg_type) {
+ case QAIC_TS_CMD_TO_HOST:
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ break;
+
+ req->header = data.hdr;
+ req->header.msg_type = QAIC_TS_SYNC_REQ;
+ ktime_get_real_ts64(&ts);
+ req->data.tv_sec = cpu_to_le64(ts.tv_sec);
+ req->data.tv_usec = cpu_to_le64(div_u64(ts.tv_nsec, NSEC_PER_USEC));
+
+ ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, req, sizeof(*req), MHI_EOT);
+ if (ret) {
+ kfree(req);
+ dev_dbg(&mhi_dev->dev, "Failed to send request message. Error %d\n", ret);
+ }
+ break;
+ case QAIC_TS_ACK_TO_HOST:
+ dev_dbg(&mhi_dev->dev, "ACK received from device\n");
+ break;
+ default:
+ dev_err(&mhi_dev->dev, "Invalid message type %u.\n", data.hdr.msg_type);
+ }
+}
+
+static int qaic_boot_timesync_queue_resp(struct mhi_device *mhi_dev, struct qaic_device *qdev)
+{
+ struct qts_resp *resp;
+ int ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ resp->qdev = qdev;
+ INIT_WORK(&resp->work, qaic_boot_timesync_worker);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ dev_warn(&mhi_dev->dev, "Failed to queue response buffer %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qaic_boot_timesync_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ mhi_unprepare_from_transfer(qdev->qts_ch);
+ qdev->qts_ch = NULL;
+}
+
+static int qaic_boot_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ qdev->qts_ch = mhi_dev;
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+
+ ret = qaic_boot_timesync_queue_resp(mhi_dev, qdev);
+ if (ret) {
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+ qdev->qts_ch = NULL;
+ mhi_unprepare_from_transfer(mhi_dev);
+ }
+
+ return ret;
+}
+
+static void qaic_boot_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ kfree(mhi_result->buf_addr);
+}
+
+static void qaic_boot_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qts_resp *resp = container_of(mhi_result->buf_addr, struct qts_resp, data);
+
+ if (mhi_result->transaction_status || mhi_result->bytes_xferd != sizeof(resp->data)) {
+ kfree(resp);
+ return;
+ }
+
+ queue_work(resp->qdev->qts_wq, &resp->work);
+}
+
+static const struct mhi_device_id qaic_boot_timesync_match_table[] = {
+ { .chan = "QAIC_TIMESYNC"},
+ {},
+};
+
+static struct mhi_driver qaic_boot_timesync_driver = {
+ .id_table = qaic_boot_timesync_match_table,
+ .remove = qaic_boot_timesync_remove,
+ .probe = qaic_boot_timesync_probe,
+ .ul_xfer_cb = qaic_boot_timesync_ul_xfer_cb,
+ .dl_xfer_cb = qaic_boot_timesync_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_timesync",
+ },
+};
+
+int qaic_timesync_init(void)
+{
+ int ret;
+
+ ret = mhi_driver_register(&qaic_timesync_driver);
+ if (ret)
+ return ret;
+ ret = mhi_driver_register(&qaic_boot_timesync_driver);
+
+ return ret;
+}
+
+void qaic_timesync_deinit(void)
+{
+ mhi_driver_unregister(&qaic_boot_timesync_driver);
+ mhi_driver_unregister(&qaic_timesync_driver);
+}
diff --git a/drivers/accel/qaic/qaic_timesync.h b/drivers/accel/qaic/qaic_timesync.h
new file mode 100644
index 000000000..851b7acd4
--- /dev/null
+++ b/drivers/accel/qaic/qaic_timesync.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QAIC_TIMESYNC_H__
+#define __QAIC_TIMESYNC_H__
+
+int qaic_timesync_init(void);
+void qaic_timesync_deinit(void);
+#endif /* __QAIC_TIMESYNC_H__ */
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 1fbc9b921..736c2eb8c 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index f819e760f..3c3f8037e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -61,6 +61,10 @@ config ACPI_CCA_REQUIRED
config ACPI_TABLE_LIB
bool
+config ACPI_THERMAL_LIB
+ depends on THERMAL
+ bool
+
config ACPI_DEBUGGER
bool "AML debugger interface"
select ACPI_DEBUG
@@ -310,7 +314,6 @@ config ACPI_HOTPLUG_CPU
bool
depends on ACPI_PROCESSOR && HOTPLUG_CPU
select ACPI_CONTAINER
- default y
config ACPI_PROCESSOR_AGGREGATOR
tristate "Processor Aggregator"
@@ -327,6 +330,7 @@ config ACPI_THERMAL
tristate "Thermal Zone"
depends on ACPI_PROCESSOR
select THERMAL
+ select ACPI_THERMAL_LIB
default y
help
This driver supports ACPI thermal zones. Most mobile and
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index eaa09bf52..12ef8180d 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,7 +37,7 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o
# ACPI Bus and Device Drivers
#
acpi-y += bus.o glue.o
-acpi-y += scan.o
+acpi-y += scan.o mipi-disco-img.o
acpi-y += resource.o
acpi-y += acpi_processor.o
acpi-y += processor_core.o
@@ -89,6 +89,7 @@ obj-$(CONFIG_ACPI_TAD) += acpi_tad.o
obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
+obj-$(CONFIG_ACPI_THERMAL_LIB) += thermal_lib.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_PLATFORM_PROFILE) += platform_profile.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index d48407472..04e273167 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -167,13 +167,9 @@ static struct pwm_lookup byt_pwm_lookup[] = {
static void byt_pwm_setup(struct lpss_private_data *pdata)
{
- u64 uid;
-
/* Only call pwm_add_table for the first PWM controller */
- if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
- return;
-
- pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+ if (acpi_dev_uid_match(pdata->adev, 1))
+ pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
}
#define LPSS_I2C_ENABLE 0x6c
@@ -218,13 +214,9 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
static void bsw_pwm_setup(struct lpss_private_data *pdata)
{
- u64 uid;
-
/* Only call pwm_add_table for the first PWM controller */
- if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
- return;
-
- pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
+ if (acpi_dev_uid_match(pdata->adev, 1))
+ pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
}
static const struct property_entry lpt_spi_properties[] = {
@@ -571,34 +563,6 @@ static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
}
-static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
-{
- struct acpi_handle_list dep_devices;
- acpi_status status;
- bool ret = false;
- int i;
-
- if (!acpi_has_method(adev->handle, "_DEP"))
- return false;
-
- status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
- &dep_devices);
- if (ACPI_FAILURE(status)) {
- dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
- return false;
- }
-
- for (i = 0; i < dep_devices.count; i++) {
- if (dep_devices.handles[i] == handle) {
- ret = true;
- break;
- }
- }
-
- acpi_handle_list_free(&dep_devices);
- return ret;
-}
-
static void acpi_lpss_link_consumer(struct device *dev1,
const struct lpss_device_links *link)
{
@@ -609,7 +573,7 @@ static void acpi_lpss_link_consumer(struct device *dev1,
return;
if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
- || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ || acpi_device_dep(ACPI_HANDLE(dev2), ACPI_HANDLE(dev1)))
device_link_add(dev2, dev1, link->flags);
put_device(dev2);
@@ -625,7 +589,7 @@ static void acpi_lpss_link_supplier(struct device *dev1,
return;
if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
- || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ || acpi_device_dep(ACPI_HANDLE(dev1), ACPI_HANDLE(dev2)))
device_link_add(dev1, dev2, link->flags);
put_device(dev2);
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 0f5218e36..4fe2ef540 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -184,24 +184,6 @@ static void __init acpi_pcc_cpufreq_init(void) {}
/* Initialization */
#ifdef CONFIG_ACPI_HOTPLUG_CPU
-int __weak acpi_map_cpu(acpi_handle handle,
- phys_cpuid_t physid, u32 acpi_id, int *pcpu)
-{
- return -ENODEV;
-}
-
-int __weak acpi_unmap_cpu(int cpu)
-{
- return -ENODEV;
-}
-
-int __weak arch_register_cpu(int cpu)
-{
- return -ENODEV;
-}
-
-void __weak arch_unregister_cpu(int cpu) {}
-
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
{
unsigned long long sta;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 33ddb4477..4afdda9db 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(hw_changes_brightness,
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
-static int only_lcd = -1;
+static int only_lcd;
module_param(only_lcd, int, 0444);
static bool may_report_brightness_keys;
@@ -2146,57 +2146,6 @@ static int __init intel_opregion_present(void)
return opregion;
}
-/* Check if the chassis-type indicates there is no builtin LCD panel */
-static bool dmi_is_desktop(void)
-{
- const char *chassis_type;
- unsigned long type;
-
- chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
- if (!chassis_type)
- return false;
-
- if (kstrtoul(chassis_type, 10, &type) != 0)
- return false;
-
- switch (type) {
- case 0x03: /* Desktop */
- case 0x04: /* Low Profile Desktop */
- case 0x05: /* Pizza Box */
- case 0x06: /* Mini Tower */
- case 0x07: /* Tower */
- case 0x10: /* Lunch Box */
- case 0x11: /* Main Server Chassis */
- return true;
- }
-
- return false;
-}
-
-/*
- * We're seeing a lot of bogus backlight interfaces on newer machines
- * without a LCD such as desktops, servers and HDMI sticks. Checking the
- * lcd flag fixes this, enable this by default on any machines which are:
- * 1. Win8 ready (where we also prefer the native backlight driver, so
- * normally the acpi_video code should not register there anyways); *and*
- * 2.1 Report a desktop/server DMI chassis-type, or
- * 2.2 Are an ACPI-reduced-hardware platform (and thus won't use the EC for
- backlight control)
- */
-static bool should_check_lcd_flag(void)
-{
- if (!acpi_osi_is_win8())
- return false;
-
- if (dmi_is_desktop())
- return true;
-
- if (acpi_reduced_hardware())
- return true;
-
- return false;
-}
-
int acpi_video_register(void)
{
int ret = 0;
@@ -2210,9 +2159,6 @@ int acpi_video_register(void)
goto leave;
}
- if (only_lcd == -1)
- only_lcd = should_check_lcd_flag();
-
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index ca28183f4..8e9e001da 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -81,7 +81,7 @@ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
return wdat;
}
-/**
+/*
* Returns true if this system should prefer ACPI based watchdog instead of
* the native one (which are typically the same hardware).
*/
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index b91155ea9..c9131259f 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
ACPI_FREE(buffer.pointer);
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
-
+ status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could Not evaluate object %p\n",
+ obj_handle);
+ return (AE_OK);
+ }
/*
* Since this is a field unit, surround the output in braces
*/
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 013eb621d..89fb9331c 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -73,6 +73,7 @@ static u32 notrigger;
static u32 vendor_flags;
static struct debugfs_blob_wrapper vendor_blob;
+static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
/*
@@ -182,6 +183,21 @@ static int einj_timedout(u64 *t)
return 0;
}
+static void get_oem_vendor_struct(u64 paddr, int offset,
+ struct vendor_error_type_extension *v)
+{
+ unsigned long vendor_size;
+ u64 target_pa = paddr + offset + sizeof(struct vendor_error_type_extension);
+
+ vendor_size = v->length - sizeof(struct vendor_error_type_extension);
+
+ if (vendor_size)
+ vendor_errors.data = acpi_os_map_memory(target_pa, vendor_size);
+
+ if (vendor_errors.data)
+ vendor_errors.size = vendor_size;
+}
+
static void check_vendor_extension(u64 paddr,
struct set_error_type_with_address *v5param)
{
@@ -194,6 +210,7 @@ static void check_vendor_extension(u64 paddr,
v = acpi_os_map_iomem(paddr + offset, sizeof(*v));
if (!v)
return;
+ get_oem_vendor_struct(paddr, offset, v);
sbdf = v->pcie_sbdf;
sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
sbdf >> 24, (sbdf >> 16) & 0xff,
@@ -577,38 +594,40 @@ static u64 error_param2;
static u64 error_param3;
static u64 error_param4;
static struct dentry *einj_debug_dir;
-static const char * const einj_error_type_string[] = {
- "0x00000001\tProcessor Correctable\n",
- "0x00000002\tProcessor Uncorrectable non-fatal\n",
- "0x00000004\tProcessor Uncorrectable fatal\n",
- "0x00000008\tMemory Correctable\n",
- "0x00000010\tMemory Uncorrectable non-fatal\n",
- "0x00000020\tMemory Uncorrectable fatal\n",
- "0x00000040\tPCI Express Correctable\n",
- "0x00000080\tPCI Express Uncorrectable non-fatal\n",
- "0x00000100\tPCI Express Uncorrectable fatal\n",
- "0x00000200\tPlatform Correctable\n",
- "0x00000400\tPlatform Uncorrectable non-fatal\n",
- "0x00000800\tPlatform Uncorrectable fatal\n",
- "0x00001000\tCXL.cache Protocol Correctable\n",
- "0x00002000\tCXL.cache Protocol Uncorrectable non-fatal\n",
- "0x00004000\tCXL.cache Protocol Uncorrectable fatal\n",
- "0x00008000\tCXL.mem Protocol Correctable\n",
- "0x00010000\tCXL.mem Protocol Uncorrectable non-fatal\n",
- "0x00020000\tCXL.mem Protocol Uncorrectable fatal\n",
+static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
+ { BIT(0), "Processor Correctable" },
+ { BIT(1), "Processor Uncorrectable non-fatal" },
+ { BIT(2), "Processor Uncorrectable fatal" },
+ { BIT(3), "Memory Correctable" },
+ { BIT(4), "Memory Uncorrectable non-fatal" },
+ { BIT(5), "Memory Uncorrectable fatal" },
+ { BIT(6), "PCI Express Correctable" },
+ { BIT(7), "PCI Express Uncorrectable non-fatal" },
+ { BIT(8), "PCI Express Uncorrectable fatal" },
+ { BIT(9), "Platform Correctable" },
+ { BIT(10), "Platform Uncorrectable non-fatal" },
+ { BIT(11), "Platform Uncorrectable fatal"},
+ { BIT(12), "CXL.cache Protocol Correctable" },
+ { BIT(13), "CXL.cache Protocol Uncorrectable non-fatal" },
+ { BIT(14), "CXL.cache Protocol Uncorrectable fatal" },
+ { BIT(15), "CXL.mem Protocol Correctable" },
+ { BIT(16), "CXL.mem Protocol Uncorrectable non-fatal" },
+ { BIT(17), "CXL.mem Protocol Uncorrectable fatal" },
+ { BIT(31), "Vendor Defined Error Types" },
};
static int available_error_type_show(struct seq_file *m, void *v)
{
int rc;
- u32 available_error_type = 0;
+ u32 error_type = 0;
- rc = einj_get_available_error_type(&available_error_type);
+ rc = einj_get_available_error_type(&error_type);
if (rc)
return rc;
for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++)
- if (available_error_type & BIT(pos))
- seq_puts(m, einj_error_type_string[pos]);
+ if (error_type & einj_error_type_string[pos].mask)
+ seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
+ einj_error_type_string[pos].str);
return 0;
}
@@ -767,6 +786,10 @@ static int __init einj_init(void)
einj_debug_dir, &vendor_flags);
}
+ if (vendor_errors.size)
+ debugfs_create_blob("oem_error", 0600, einj_debug_dir,
+ &vendor_errors);
+
pr_info("Error INJection is initialized.\n");
return 0;
@@ -792,6 +815,8 @@ static void __exit einj_exit(void)
sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
+ if (vendor_errors.size)
+ acpi_os_unmap_memory(vendor_errors.data, vendor_errors.size);
}
einj_exec_ctx_init(&ctx);
apei_exec_post_unmap_gars(&ctx);
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 143debc1b..726944648 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_APMT) += apmt.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-y += dma.o init.o
+obj-y += thermal_cpufreq.o
diff --git a/drivers/acpi/arm64/thermal_cpufreq.c b/drivers/acpi/arm64/thermal_cpufreq.c
new file mode 100644
index 000000000..582854914
--- /dev/null
+++ b/drivers/acpi/arm64/thermal_cpufreq.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/acpi.h>
+#include <linux/export.h>
+
+#include "../internal.h"
+
+#define SMCCC_SOC_ID_T241 0x036b0241
+
+int acpi_arch_thermal_cpufreq_pctg(void)
+{
+ s32 soc_id = arm_smccc_get_soc_id_version();
+
+ /*
+ * Check JEP106 code for NVIDIA Tegra241 chip (036b:0241) and
+ * reduce the CPUFREQ Thermal reduction percentage to 5%.
+ */
+ if (soc_id == SMCCC_SOC_ID_T241)
+ return 5;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_arch_thermal_cpufreq_pctg);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 72e64c071..569bd15f2 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -408,7 +408,7 @@ static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A";
static void acpi_bus_osc_negotiate_usb_control(void)
{
- u32 capbuf[3];
+ u32 capbuf[3], *capbuf_ret;
struct acpi_osc_context context = {
.uuid_str = sb_usb_uuid_str,
.rev = 1,
@@ -428,7 +428,12 @@ static void acpi_bus_osc_negotiate_usb_control(void)
control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING |
OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN;
- capbuf[OSC_QUERY_DWORD] = 0;
+ /*
+ * Run _OSC first with query bit set, trying to get control over
+ * all tunneling. The platform can then clear out bits in the
+ * control dword that it does not want to grant to the OS.
+ */
+ capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_DWORD] = 0;
capbuf[OSC_CONTROL_DWORD] = control;
@@ -441,8 +446,29 @@ static void acpi_bus_osc_negotiate_usb_control(void)
goto out_free;
}
+ /*
+ * Run _OSC again now with query bit clear and the control dword
+ * matching what the platform granted (which may not have all
+ * the control bits set).
+ */
+ capbuf_ret = context.ret.pointer;
+
+ capbuf[OSC_QUERY_DWORD] = 0;
+ capbuf[OSC_CONTROL_DWORD] = capbuf_ret[OSC_CONTROL_DWORD];
+
+ kfree(context.ret.pointer);
+
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (context.ret.length != sizeof(capbuf)) {
+ pr_info("USB4 _OSC: returned invalid length buffer\n");
+ goto out_free;
+ }
+
osc_sb_native_usb4_control =
- control & acpi_osc_ctx_get_pci_control(&context);
+ control & acpi_osc_ctx_get_pci_control(&context);
acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control);
acpi_bus_decode_usb_osc("USB4 _OSC: OS controls",
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 1e76a64cc..cc6102075 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -480,6 +480,7 @@ static int acpi_button_suspend(struct device *dev)
static int acpi_button_resume(struct device *dev)
{
+ struct input_dev *input;
struct acpi_device *device = to_acpi_device(dev);
struct acpi_button *button = acpi_driver_data(device);
@@ -489,6 +490,14 @@ static int acpi_button_resume(struct device *dev)
button->last_time = ktime_get();
acpi_lid_initialize_state(device);
}
+
+ if (button->type == ACPI_BUTTON_TYPE_POWER) {
+ input = button->input;
+ input_report_key(input, KEY_WAKEUP, 1);
+ input_sync(input);
+ input_report_key(input, KEY_WAKEUP, 0);
+ input_sync(input);
+ }
return 0;
}
#endif
@@ -579,6 +588,7 @@ static int acpi_button_add(struct acpi_device *device)
switch (button->type) {
case ACPI_BUTTON_TYPE_POWER:
input_set_capability(input, EV_KEY, KEY_POWER);
+ input_set_capability(input, EV_KEY, KEY_WAKEUP);
break;
case ACPI_BUTTON_TYPE_SLEEP:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 7ff269a78..1b27ebc6d 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -39,6 +39,9 @@
#include <linux/rwsem.h>
#include <linux/wait.h>
#include <linux/topology.h>
+#include <linux/dmi.h>
+#include <linux/units.h>
+#include <asm/unaligned.h>
#include <acpi/cppc_acpi.h>
@@ -163,6 +166,13 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+/* Check for valid access_width, otherwise, fallback to using bit_width */
+#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
+
+/* Shift and apply the mask for CPC reads/writes */
+#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+ GENMASK(((reg)->bit_width) - 1, 0))
+
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -777,6 +787,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
if (gas_t->address) {
void __iomem *addr;
+ size_t access_width;
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
@@ -784,7 +795,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- addr = ioremap(gas_t->address, gas_t->bit_width/8);
+ access_width = GET_BIT_WIDTH(gas_t) / 8;
+ addr = ioremap(gas_t->address, access_width);
if (!addr)
goto out_free;
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
@@ -980,6 +992,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
void __iomem *vaddr = NULL;
+ int size;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
@@ -989,14 +1002,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
+ size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = 8 << (reg->access_width - 1);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
- &val_u32, width);
+ &val_u32, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
@@ -1005,17 +1018,24 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = val_u32;
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
+ val, size);
- switch (reg->bit_width) {
+ switch (size) {
case 8:
*val = readb_relaxed(vaddr);
break;
@@ -1029,27 +1049,37 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = readq_relaxed(vaddr);
break;
default:
- pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
return -EFAULT;
}
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ *val = MASK_VAL(reg, *val);
+
return 0;
}
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
+ int size;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ size = GET_BIT_WIDTH(reg);
+
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = 8 << (reg->access_width - 1);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
- (u32)val, width);
+ (u32)val, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
@@ -1057,17 +1087,27 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
+ val, size);
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ val = MASK_VAL(reg, val);
- switch (reg->bit_width) {
+ switch (size) {
case 8:
writeb_relaxed(val, vaddr);
break;
@@ -1081,8 +1121,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
writeq_relaxed(val, vaddr);
break;
default:
- pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
ret_val = -EFAULT;
break;
}
@@ -1760,3 +1805,104 @@ unsigned int cppc_get_transition_latency(int cpu_num)
return latency_ns;
}
EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
+
+/* Minimum struct length needed for the DMI processor entry we want */
+#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
+
+/* Offset in the DMI processor structure for the max frequency */
+#define DMI_PROCESSOR_MAX_SPEED 0x14
+
+/* Callback function used to retrieve the max frequency from DMI */
+static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
+{
+ const u8 *dmi_data = (const u8 *)dm;
+ u16 *mhz = (u16 *)private;
+
+ if (dm->type == DMI_ENTRY_PROCESSOR &&
+ dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
+ u16 val = (u16)get_unaligned((const u16 *)
+ (dmi_data + DMI_PROCESSOR_MAX_SPEED));
+ *mhz = val > *mhz ? val : *mhz;
+ }
+}
+
+/* Look up the max frequency in DMI */
+static u64 cppc_get_dmi_max_khz(void)
+{
+ u16 mhz = 0;
+
+ dmi_walk(cppc_find_dmi_mhz, &mhz);
+
+ /*
+ * Real stupid fallback value, just in case there is no
+ * actual value set.
+ */
+ mhz = mhz ? mhz : 1;
+
+ return KHZ_PER_MHZ * mhz;
+}
+
+/*
+ * If CPPC lowest_freq and nominal_freq registers are exposed then we can
+ * use them to convert perf to freq and vice versa. The conversion is
+ * extrapolated as an affine function passing by the 2 points:
+ * - (Low perf, Low freq)
+ * - (Nominal perf, Nominal freq)
+ */
+unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
+{
+ s64 retval, offset = 0;
+ static u64 max_khz;
+ u64 mul, div;
+
+ if (caps->lowest_freq && caps->nominal_freq) {
+ mul = caps->nominal_freq - caps->lowest_freq;
+ mul *= KHZ_PER_MHZ;
+ div = caps->nominal_perf - caps->lowest_perf;
+ offset = caps->nominal_freq * KHZ_PER_MHZ -
+ div64_u64(caps->nominal_perf * mul, div);
+ } else {
+ if (!max_khz)
+ max_khz = cppc_get_dmi_max_khz();
+ mul = max_khz;
+ div = caps->highest_perf;
+ }
+
+ retval = offset + div64_u64(perf * mul, div);
+ if (retval >= 0)
+ return retval;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
+
+unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
+{
+ s64 retval, offset = 0;
+ static u64 max_khz;
+ u64 mul, div;
+
+ if (caps->lowest_freq && caps->nominal_freq) {
+ mul = caps->nominal_perf - caps->lowest_perf;
+ div = caps->nominal_freq - caps->lowest_freq;
+ /*
+ * We don't need to convert to kHz for computing offset and can
+ * directly use nominal_freq and lowest_freq as the div64_u64
+ * will remove the frequency unit.
+ */
+ offset = caps->nominal_perf -
+ div64_u64(caps->nominal_freq * mul, div);
+ /* But we need it for computing the perf level. */
+ div *= KHZ_PER_MHZ;
+ } else {
+ if (!max_khz)
+ max_khz = cppc_get_dmi_max_khz();
+ mul = caps->highest_perf;
+ div = max_khz;
+ }
+
+ retval = offset + div64_u64(freq * mul, div);
+ if (retval >= 0)
+ return retval;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cppc_khz_to_perf);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a59c11df7..02255795b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1458,8 +1458,8 @@ static bool install_gpe_event_handler(struct acpi_ec *ec)
static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
{
- return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED,
- "ACPI EC", ec) >= 0;
+ return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0;
}
/**
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 866c7c4ed..6588525c4 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -85,6 +85,20 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
void acpi_scan_table_notify(void);
+int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+
+#ifdef CONFIG_ARM64
+int acpi_arch_thermal_cpufreq_pctg(void);
+#else
+static inline int acpi_arch_thermal_cpufreq_pctg(void)
+{
+ return 0;
+}
+#endif
+
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
-------------------------------------------------------------------------- */
@@ -148,8 +162,11 @@ int acpi_wakeup_device_init(void);
#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
void acpi_early_processor_control_setup(void);
void acpi_early_processor_set_pdc(void);
-
+#ifdef CONFIG_X86
void acpi_proc_quirk_mwait_check(void);
+#else
+static inline void acpi_proc_quirk_mwait_check(void) {}
+#endif
bool processor_physically_present(acpi_handle handle);
#else
static inline void acpi_early_processor_control_setup(void) {}
@@ -276,4 +293,13 @@ void acpi_init_lpit(void);
static inline void acpi_init_lpit(void) { }
#endif
+/*--------------------------------------------------------------------------
+ ACPI _CRS CSI-2 and MIPI DisCo for Imaging
+ -------------------------------------------------------------------------- */
+
+void acpi_mipi_check_crs_csi2(acpi_handle handle);
+void acpi_mipi_scan_crs_csi2(void);
+void acpi_mipi_init_crs_csi2_swnodes(void);
+void acpi_mipi_crs_csi2_cleanup(void);
+
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
new file mode 100644
index 000000000..7286cf457
--- /dev/null
+++ b/drivers/acpi/mipi-disco-img.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MIPI DisCo for Imaging support.
+ *
+ * Copyright (C) 2023 Intel Corporation
+ *
+ * Support MIPI DisCo for Imaging by parsing ACPI _CRS CSI-2 records defined in
+ * Section 6.4.3.8.2.4 "Camera Serial Interface (CSI-2) Connection Resource
+ * Descriptor" of ACPI 6.5 and using device properties defined by the MIPI DisCo
+ * for Imaging specification.
+ *
+ * The implementation looks for the information in the ACPI namespace (CSI-2
+ * resource descriptors in _CRS) and constructs software nodes compatible with
+ * Documentation/firmware-guide/acpi/dsd/graph.rst to represent the CSI-2
+ * connection graph. The software nodes are then populated with the data
+ * extracted from the _CRS CSI-2 resource descriptors and the MIPI DisCo
+ * for Imaging device properties present in _DSD for the ACPI device objects
+ * with CSI-2 connections.
+ */
+
+#include <linux/acpi.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <media/v4l2-fwnode.h>
+
+#include "internal.h"
+
+static LIST_HEAD(acpi_mipi_crs_csi2_list);
+
+static void acpi_mipi_data_tag(acpi_handle handle, void *context)
+{
+}
+
+/* Connection data extracted from one _CRS CSI-2 resource descriptor. */
+struct crs_csi2_connection {
+ struct list_head entry;
+ struct acpi_resource_csi2_serialbus csi2_data;
+ acpi_handle remote_handle;
+ char remote_name[];
+};
+
+/* Data extracted from _CRS CSI-2 resource descriptors for one device. */
+struct crs_csi2 {
+ struct list_head entry;
+ acpi_handle handle;
+ struct acpi_device_software_nodes *swnodes;
+ struct list_head connections;
+ u32 port_count;
+};
+
+struct csi2_resources_walk_data {
+ acpi_handle handle;
+ struct list_head connections;
+};
+
+static acpi_status parse_csi2_resource(struct acpi_resource *res, void *context)
+{
+ struct csi2_resources_walk_data *crwd = context;
+ struct acpi_resource_csi2_serialbus *csi2_res;
+ struct acpi_resource_source *csi2_res_src;
+ u16 csi2_res_src_length;
+ struct crs_csi2_connection *conn;
+ acpi_handle remote_handle;
+
+ if (res->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return AE_OK;
+
+ csi2_res = &res->data.csi2_serial_bus;
+
+ if (csi2_res->type != ACPI_RESOURCE_SERIAL_TYPE_CSI2)
+ return AE_OK;
+
+ csi2_res_src = &csi2_res->resource_source;
+ if (ACPI_FAILURE(acpi_get_handle(NULL, csi2_res_src->string_ptr,
+ &remote_handle))) {
+ acpi_handle_debug(crwd->handle,
+ "unable to find resource source\n");
+ return AE_OK;
+ }
+ csi2_res_src_length = csi2_res_src->string_length;
+ if (!csi2_res_src_length) {
+ acpi_handle_debug(crwd->handle,
+ "invalid resource source string length\n");
+ return AE_OK;
+ }
+
+ conn = kmalloc(struct_size(conn, remote_name, csi2_res_src_length + 1),
+ GFP_KERNEL);
+ if (!conn)
+ return AE_OK;
+
+ conn->csi2_data = *csi2_res;
+ strscpy(conn->remote_name, csi2_res_src->string_ptr, csi2_res_src_length);
+ conn->csi2_data.resource_source.string_ptr = conn->remote_name;
+ conn->remote_handle = remote_handle;
+
+ list_add(&conn->entry, &crwd->connections);
+
+ return AE_OK;
+}
+
+static struct crs_csi2 *acpi_mipi_add_crs_csi2(acpi_handle handle,
+ struct list_head *list)
+{
+ struct crs_csi2 *csi2;
+
+ csi2 = kzalloc(sizeof(*csi2), GFP_KERNEL);
+ if (!csi2)
+ return NULL;
+
+ csi2->handle = handle;
+ INIT_LIST_HEAD(&csi2->connections);
+ csi2->port_count = 1;
+
+ if (ACPI_FAILURE(acpi_attach_data(handle, acpi_mipi_data_tag, csi2))) {
+ kfree(csi2);
+ return NULL;
+ }
+
+ list_add(&csi2->entry, list);
+
+ return csi2;
+}
+
+static struct crs_csi2 *acpi_mipi_get_crs_csi2(acpi_handle handle)
+{
+ struct crs_csi2 *csi2;
+
+ if (ACPI_FAILURE(acpi_get_data_full(handle, acpi_mipi_data_tag,
+ (void **)&csi2, NULL)))
+ return NULL;
+
+ return csi2;
+}
+
+static void csi_csr2_release_connections(struct list_head *list)
+{
+ struct crs_csi2_connection *conn, *conn_tmp;
+
+ list_for_each_entry_safe(conn, conn_tmp, list, entry) {
+ list_del(&conn->entry);
+ kfree(conn);
+ }
+}
+
+static void acpi_mipi_del_crs_csi2(struct crs_csi2 *csi2)
+{
+ list_del(&csi2->entry);
+ acpi_detach_data(csi2->handle, acpi_mipi_data_tag);
+ kfree(csi2->swnodes);
+ csi_csr2_release_connections(&csi2->connections);
+ kfree(csi2);
+}
+
+/**
+ * acpi_mipi_check_crs_csi2 - Look for CSI-2 resources in _CRS
+ * @handle: Device object handle to evaluate _CRS for.
+ *
+ * Find all CSI-2 resource descriptors in the given device's _CRS
+ * and collect them into a list.
+ */
+void acpi_mipi_check_crs_csi2(acpi_handle handle)
+{
+ struct csi2_resources_walk_data crwd = {
+ .handle = handle,
+ .connections = LIST_HEAD_INIT(crwd.connections),
+ };
+ struct crs_csi2 *csi2;
+
+ /*
+ * Avoid allocating _CRS CSI-2 objects for devices without any CSI-2
+ * resource descriptions in _CRS to reduce overhead.
+ */
+ acpi_walk_resources(handle, METHOD_NAME__CRS, parse_csi2_resource, &crwd);
+ if (list_empty(&crwd.connections))
+ return;
+
+ /*
+ * Create a _CRS CSI-2 entry to store the extracted connection
+ * information and add it to the global list.
+ */
+ csi2 = acpi_mipi_add_crs_csi2(handle, &acpi_mipi_crs_csi2_list);
+ if (!csi2) {
+ csi_csr2_release_connections(&crwd.connections);
+ return; /* Nothing really can be done about this. */
+ }
+
+ list_replace(&crwd.connections, &csi2->connections);
+}
+
+#define NO_CSI2_PORT (UINT_MAX - 1)
+
+static void alloc_crs_csi2_swnodes(struct crs_csi2 *csi2)
+{
+ size_t port_count = csi2->port_count;
+ struct acpi_device_software_nodes *swnodes;
+ size_t alloc_size;
+ unsigned int i;
+
+ /*
+ * Allocate memory for ports, node pointers (number of nodes +
+ * 1 (guardian), nodes (root + number of ports * 2 (because for
+ * every port there is an endpoint)).
+ */
+ if (check_mul_overflow(sizeof(*swnodes->ports) +
+ sizeof(*swnodes->nodes) * 2 +
+ sizeof(*swnodes->nodeptrs) * 2,
+ port_count, &alloc_size) ||
+ check_add_overflow(sizeof(*swnodes) +
+ sizeof(*swnodes->nodes) +
+ sizeof(*swnodes->nodeptrs) * 2,
+ alloc_size, &alloc_size)) {
+ acpi_handle_info(csi2->handle,
+ "too many _CRS CSI-2 resource handles (%zu)",
+ port_count);
+ return;
+ }
+
+ swnodes = kmalloc(alloc_size, GFP_KERNEL);
+ if (!swnodes)
+ return;
+
+ swnodes->ports = (struct acpi_device_software_node_port *)(swnodes + 1);
+ swnodes->nodes = (struct software_node *)(swnodes->ports + port_count);
+ swnodes->nodeptrs = (const struct software_node **)(swnodes->nodes + 1 +
+ 2 * port_count);
+ swnodes->num_ports = port_count;
+
+ for (i = 0; i < 2 * port_count + 1; i++)
+ swnodes->nodeptrs[i] = &swnodes->nodes[i];
+
+ swnodes->nodeptrs[i] = NULL;
+
+ for (i = 0; i < port_count; i++)
+ swnodes->ports[i].port_nr = NO_CSI2_PORT;
+
+ csi2->swnodes = swnodes;
+}
+
+#define ACPI_CRS_CSI2_PHY_TYPE_C 0
+#define ACPI_CRS_CSI2_PHY_TYPE_D 1
+
+static unsigned int next_csi2_port_index(struct acpi_device_software_nodes *swnodes,
+ unsigned int port_nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < swnodes->num_ports; i++) {
+ struct acpi_device_software_node_port *port = &swnodes->ports[i];
+
+ if (port->port_nr == port_nr)
+ return i;
+
+ if (port->port_nr == NO_CSI2_PORT) {
+ port->port_nr = port_nr;
+ return i;
+ }
+ }
+
+ return NO_CSI2_PORT;
+}
+
+/* Print graph port name into a buffer, return non-zero on failure. */
+#define GRAPH_PORT_NAME(var, num) \
+ (snprintf((var), sizeof(var), SWNODE_GRAPH_PORT_NAME_FMT, (num)) >= \
+ sizeof(var))
+
+static void extract_crs_csi2_conn_info(acpi_handle local_handle,
+ struct acpi_device_software_nodes *local_swnodes,
+ struct crs_csi2_connection *conn)
+{
+ struct crs_csi2 *remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle);
+ struct acpi_device_software_nodes *remote_swnodes;
+ struct acpi_device_software_node_port *local_port, *remote_port;
+ struct software_node *local_node, *remote_node;
+ unsigned int local_index, remote_index;
+ unsigned int bus_type;
+
+ /*
+ * If the previous steps have failed to make room for a _CRS CSI-2
+ * representation for the remote end of the given connection, skip it.
+ */
+ if (!remote_csi2)
+ return;
+
+ remote_swnodes = remote_csi2->swnodes;
+ if (!remote_swnodes)
+ return;
+
+ switch (conn->csi2_data.phy_type) {
+ case ACPI_CRS_CSI2_PHY_TYPE_C:
+ bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_CPHY;
+ break;
+
+ case ACPI_CRS_CSI2_PHY_TYPE_D:
+ bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_DPHY;
+ break;
+
+ default:
+ acpi_handle_info(local_handle, "unknown CSI-2 PHY type %u\n",
+ conn->csi2_data.phy_type);
+ return;
+ }
+
+ local_index = next_csi2_port_index(local_swnodes,
+ conn->csi2_data.local_port_instance);
+ if (WARN_ON_ONCE(local_index >= local_swnodes->num_ports))
+ return;
+
+ remote_index = next_csi2_port_index(remote_swnodes,
+ conn->csi2_data.resource_source.index);
+ if (WARN_ON_ONCE(remote_index >= remote_swnodes->num_ports))
+ return;
+
+ local_port = &local_swnodes->ports[local_index];
+ local_node = &local_swnodes->nodes[ACPI_DEVICE_SWNODE_EP(local_index)];
+ local_port->crs_csi2_local = true;
+
+ remote_port = &remote_swnodes->ports[remote_index];
+ remote_node = &remote_swnodes->nodes[ACPI_DEVICE_SWNODE_EP(remote_index)];
+
+ local_port->remote_ep[0] = SOFTWARE_NODE_REFERENCE(remote_node);
+ remote_port->remote_ep[0] = SOFTWARE_NODE_REFERENCE(local_node);
+
+ local_port->ep_props[ACPI_DEVICE_SWNODE_EP_REMOTE_EP] =
+ PROPERTY_ENTRY_REF_ARRAY("remote-endpoint",
+ local_port->remote_ep);
+
+ local_port->ep_props[ACPI_DEVICE_SWNODE_EP_BUS_TYPE] =
+ PROPERTY_ENTRY_U32("bus-type", bus_type);
+
+ local_port->ep_props[ACPI_DEVICE_SWNODE_EP_REG] =
+ PROPERTY_ENTRY_U32("reg", 0);
+
+ local_port->port_props[ACPI_DEVICE_SWNODE_PORT_REG] =
+ PROPERTY_ENTRY_U32("reg", conn->csi2_data.local_port_instance);
+
+ if (GRAPH_PORT_NAME(local_port->port_name,
+ conn->csi2_data.local_port_instance))
+ acpi_handle_info(local_handle, "local port %u name too long",
+ conn->csi2_data.local_port_instance);
+
+ remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_REMOTE_EP] =
+ PROPERTY_ENTRY_REF_ARRAY("remote-endpoint",
+ remote_port->remote_ep);
+
+ remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_BUS_TYPE] =
+ PROPERTY_ENTRY_U32("bus-type", bus_type);
+
+ remote_port->ep_props[ACPI_DEVICE_SWNODE_EP_REG] =
+ PROPERTY_ENTRY_U32("reg", 0);
+
+ remote_port->port_props[ACPI_DEVICE_SWNODE_PORT_REG] =
+ PROPERTY_ENTRY_U32("reg", conn->csi2_data.resource_source.index);
+
+ if (GRAPH_PORT_NAME(remote_port->port_name,
+ conn->csi2_data.resource_source.index))
+ acpi_handle_info(local_handle, "remote port %u name too long",
+ conn->csi2_data.resource_source.index);
+}
+
+static void prepare_crs_csi2_swnodes(struct crs_csi2 *csi2)
+{
+ struct acpi_device_software_nodes *local_swnodes = csi2->swnodes;
+ acpi_handle local_handle = csi2->handle;
+ struct crs_csi2_connection *conn;
+
+ /* Bail out if the allocation of swnodes has failed. */
+ if (!local_swnodes)
+ return;
+
+ list_for_each_entry(conn, &csi2->connections, entry)
+ extract_crs_csi2_conn_info(local_handle, local_swnodes, conn);
+}
+
+/**
+ * acpi_mipi_scan_crs_csi2 - Create ACPI _CRS CSI-2 software nodes
+ *
+ * Note that this function must be called before any struct acpi_device objects
+ * are bound to any ACPI drivers or scan handlers, so it cannot assume the
+ * existence of struct acpi_device objects for every device present in the ACPI
+ * namespace.
+ *
+ * acpi_scan_lock in scan.c must be held when calling this function.
+ */
+void acpi_mipi_scan_crs_csi2(void)
+{
+ struct crs_csi2 *csi2;
+ LIST_HEAD(aux_list);
+
+ /* Count references to each ACPI handle in the CSI-2 connection graph. */
+ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) {
+ struct crs_csi2_connection *conn;
+
+ list_for_each_entry(conn, &csi2->connections, entry) {
+ struct crs_csi2 *remote_csi2;
+
+ csi2->port_count++;
+
+ remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle);
+ if (remote_csi2) {
+ remote_csi2->port_count++;
+ continue;
+ }
+ /*
+ * The remote endpoint has no _CRS CSI-2 list entry yet,
+ * so create one for it and add it to the list.
+ */
+ acpi_mipi_add_crs_csi2(conn->remote_handle, &aux_list);
+ }
+ }
+ list_splice(&aux_list, &acpi_mipi_crs_csi2_list);
+
+ /*
+ * Allocate software nodes for representing the CSI-2 information.
+ *
+ * This needs to be done for all of the list entries in one go, because
+ * they may point to each other without restrictions and the next step
+ * relies on the availability of swnodes memory for each list entry.
+ */
+ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry)
+ alloc_crs_csi2_swnodes(csi2);
+
+ /*
+ * Set up software node properties using data from _CRS CSI-2 resource
+ * descriptors.
+ */
+ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry)
+ prepare_crs_csi2_swnodes(csi2);
+}
+
+/*
+ * Get the index of the next property in the property array, with a given
+ * maximum value.
+ */
+#define NEXT_PROPERTY(index, max) \
+ (WARN_ON((index) > ACPI_DEVICE_SWNODE_##max) ? \
+ ACPI_DEVICE_SWNODE_##max : (index)++)
+
+static void init_csi2_port_local(struct acpi_device *adev,
+ struct acpi_device_software_node_port *port,
+ struct fwnode_handle *port_fwnode,
+ unsigned int index)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ unsigned int num_link_freqs;
+ int ret;
+
+ ret = fwnode_property_count_u64(port_fwnode, "mipi-img-link-frequencies");
+ if (ret <= 0)
+ return;
+
+ num_link_freqs = ret;
+ if (num_link_freqs > ACPI_DEVICE_CSI2_DATA_LANES) {
+ acpi_handle_info(handle, "Too many link frequencies: %u\n",
+ num_link_freqs);
+ num_link_freqs = ACPI_DEVICE_CSI2_DATA_LANES;
+ }
+
+ ret = fwnode_property_read_u64_array(port_fwnode,
+ "mipi-img-link-frequencies",
+ port->link_frequencies,
+ num_link_freqs);
+ if (ret) {
+ acpi_handle_info(handle, "Unable to get link frequencies (%d)\n",
+ ret);
+ return;
+ }
+
+ port->ep_props[NEXT_PROPERTY(index, EP_LINK_FREQUENCIES)] =
+ PROPERTY_ENTRY_U64_ARRAY_LEN("link-frequencies",
+ port->link_frequencies,
+ num_link_freqs);
+}
+
+static void init_csi2_port(struct acpi_device *adev,
+ struct acpi_device_software_nodes *swnodes,
+ struct acpi_device_software_node_port *port,
+ struct fwnode_handle *port_fwnode,
+ unsigned int port_index)
+{
+ unsigned int ep_prop_index = ACPI_DEVICE_SWNODE_EP_CLOCK_LANES;
+ acpi_handle handle = acpi_device_handle(adev);
+ u8 val[ACPI_DEVICE_CSI2_DATA_LANES];
+ int num_lanes = 0;
+ int ret;
+
+ if (GRAPH_PORT_NAME(port->port_name, port->port_nr))
+ return;
+
+ swnodes->nodes[ACPI_DEVICE_SWNODE_PORT(port_index)] =
+ SOFTWARE_NODE(port->port_name, port->port_props,
+ &swnodes->nodes[ACPI_DEVICE_SWNODE_ROOT]);
+
+ ret = fwnode_property_read_u8(port_fwnode, "mipi-img-clock-lane", val);
+ if (!ret)
+ port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_CLOCK_LANES)] =
+ PROPERTY_ENTRY_U32("clock-lanes", val[0]);
+
+ ret = fwnode_property_count_u8(port_fwnode, "mipi-img-data-lanes");
+ if (ret > 0) {
+ num_lanes = ret;
+
+ if (num_lanes > ACPI_DEVICE_CSI2_DATA_LANES) {
+ acpi_handle_info(handle, "Too many data lanes: %u\n",
+ num_lanes);
+ num_lanes = ACPI_DEVICE_CSI2_DATA_LANES;
+ }
+
+ ret = fwnode_property_read_u8_array(port_fwnode,
+ "mipi-img-data-lanes",
+ val, num_lanes);
+ if (!ret) {
+ unsigned int i;
+
+ for (i = 0; i < num_lanes; i++)
+ port->data_lanes[i] = val[i];
+
+ port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_DATA_LANES)] =
+ PROPERTY_ENTRY_U32_ARRAY_LEN("data-lanes",
+ port->data_lanes,
+ num_lanes);
+ }
+ }
+
+ ret = fwnode_property_count_u8(port_fwnode, "mipi-img-lane-polarities");
+ if (ret < 0) {
+ acpi_handle_debug(handle, "Lane polarity bytes missing\n");
+ } else if (ret * BITS_PER_TYPE(u8) < num_lanes + 1) {
+ acpi_handle_info(handle, "Too few lane polarity bits (%zu vs. %d)\n",
+ ret * BITS_PER_TYPE(u8), num_lanes + 1);
+ } else {
+ unsigned long mask = 0;
+ int byte_count = ret;
+ unsigned int i;
+
+ /*
+ * The total number of lanes is ACPI_DEVICE_CSI2_DATA_LANES + 1
+ * (data lanes + clock lane). It is not expected to ever be
+ * greater than the number of bits in an unsigned long
+ * variable, but ensure that this is the case.
+ */
+ BUILD_BUG_ON(BITS_PER_TYPE(unsigned long) <= ACPI_DEVICE_CSI2_DATA_LANES);
+
+ if (byte_count > sizeof(mask)) {
+ acpi_handle_info(handle, "Too many lane polarities: %d\n",
+ byte_count);
+ byte_count = sizeof(mask);
+ }
+ fwnode_property_read_u8_array(port_fwnode, "mipi-img-lane-polarities",
+ val, byte_count);
+
+ for (i = 0; i < byte_count; i++)
+ mask |= (unsigned long)val[i] << BITS_PER_TYPE(u8) * i;
+
+ for (i = 0; i <= num_lanes; i++)
+ port->lane_polarities[i] = test_bit(i, &mask);
+
+ port->ep_props[NEXT_PROPERTY(ep_prop_index, EP_LANE_POLARITIES)] =
+ PROPERTY_ENTRY_U32_ARRAY_LEN("lane-polarities",
+ port->lane_polarities,
+ num_lanes + 1);
+ }
+
+ swnodes->nodes[ACPI_DEVICE_SWNODE_EP(port_index)] =
+ SOFTWARE_NODE("endpoint@0", swnodes->ports[port_index].ep_props,
+ &swnodes->nodes[ACPI_DEVICE_SWNODE_PORT(port_index)]);
+
+ if (port->crs_csi2_local)
+ init_csi2_port_local(adev, port, port_fwnode, ep_prop_index);
+}
+
+#define MIPI_IMG_PORT_PREFIX "mipi-img-port-"
+
+static struct fwnode_handle *get_mipi_port_handle(struct fwnode_handle *adev_fwnode,
+ unsigned int port_nr)
+{
+ char port_name[sizeof(MIPI_IMG_PORT_PREFIX) + 2];
+
+ if (snprintf(port_name, sizeof(port_name), "%s%u",
+ MIPI_IMG_PORT_PREFIX, port_nr) >= sizeof(port_name))
+ return NULL;
+
+ return fwnode_get_named_child_node(adev_fwnode, port_name);
+}
+
+static void init_crs_csi2_swnodes(struct crs_csi2 *csi2)
+{
+ struct acpi_buffer buffer = { .length = ACPI_ALLOCATE_BUFFER };
+ struct acpi_device_software_nodes *swnodes = csi2->swnodes;
+ acpi_handle handle = csi2->handle;
+ unsigned int prop_index = 0;
+ struct fwnode_handle *adev_fwnode;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned int i;
+ u32 val;
+ int ret;
+
+ /*
+ * Bail out if the swnodes are not available (either they have not been
+ * allocated or they have been assigned to the device already).
+ */
+ if (!swnodes)
+ return;
+
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
+ return;
+
+ adev_fwnode = acpi_fwnode_handle(adev);
+
+ /*
+ * If the "rotation" property is not present, but _PLD is there,
+ * evaluate it to get the "rotation" value.
+ */
+ if (!fwnode_property_present(adev_fwnode, "rotation")) {
+ struct acpi_pld_info *pld;
+
+ status = acpi_get_physical_device_location(handle, &pld);
+ if (ACPI_SUCCESS(status)) {
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] =
+ PROPERTY_ENTRY_U32("rotation",
+ pld->rotation * 45U);
+ kfree(pld);
+ }
+ }
+
+ if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-clock-frequency", &val))
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_CLOCK_FREQUENCY)] =
+ PROPERTY_ENTRY_U32("clock-frequency", val);
+
+ if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-led-max-current", &val))
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_LED_MAX_MICROAMP)] =
+ PROPERTY_ENTRY_U32("led-max-microamp", val);
+
+ if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-flash-max-current", &val))
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_FLASH_MAX_MICROAMP)] =
+ PROPERTY_ENTRY_U32("flash-max-microamp", val);
+
+ if (!fwnode_property_read_u32(adev_fwnode, "mipi-img-flash-max-timeout-us", &val))
+ swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_FLASH_MAX_TIMEOUT_US)] =
+ PROPERTY_ENTRY_U32("flash-max-timeout-us", val);
+
+ status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_info(handle, "Unable to get the path name\n");
+ return;
+ }
+
+ swnodes->nodes[ACPI_DEVICE_SWNODE_ROOT] =
+ SOFTWARE_NODE(buffer.pointer, swnodes->dev_props, NULL);
+
+ for (i = 0; i < swnodes->num_ports; i++) {
+ struct acpi_device_software_node_port *port = &swnodes->ports[i];
+ struct fwnode_handle *port_fwnode;
+
+ /*
+ * The MIPI DisCo for Imaging specification defines _DSD device
+ * properties for providing CSI-2 port parameters that can be
+ * accessed through the generic device properties framework. To
+ * access them, it is first necessary to find the data node
+ * representing the port under the given ACPI device object.
+ */
+ port_fwnode = get_mipi_port_handle(adev_fwnode, port->port_nr);
+ if (!port_fwnode) {
+ acpi_handle_info(handle,
+ "MIPI port name too long for port %u\n",
+ port->port_nr);
+ continue;
+ }
+
+ init_csi2_port(adev, swnodes, port, port_fwnode, i);
+
+ fwnode_handle_put(port_fwnode);
+ }
+
+ ret = software_node_register_node_group(swnodes->nodeptrs);
+ if (ret < 0) {
+ acpi_handle_info(handle,
+ "Unable to register software nodes (%d)\n", ret);
+ return;
+ }
+
+ adev->swnodes = swnodes;
+ adev_fwnode->secondary = software_node_fwnode(swnodes->nodes);
+
+ /*
+ * Prevents the swnodes from this csi2 entry from being assigned again
+ * or freed prematurely.
+ */
+ csi2->swnodes = NULL;
+}
+
+/**
+ * acpi_mipi_init_crs_csi2_swnodes - Initialize _CRS CSI-2 software nodes
+ *
+ * Use MIPI DisCo for Imaging device properties to finalize the initialization
+ * of CSI-2 software nodes for all ACPI device objects that have been already
+ * enumerated.
+ */
+void acpi_mipi_init_crs_csi2_swnodes(void)
+{
+ struct crs_csi2 *csi2, *csi2_tmp;
+
+ list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry)
+ init_crs_csi2_swnodes(csi2);
+}
+
+/**
+ * acpi_mipi_crs_csi2_cleanup - Free _CRS CSI-2 temporary data
+ */
+void acpi_mipi_crs_csi2_cleanup(void)
+{
+ struct crs_csi2 *csi2, *csi2_tmp;
+
+ list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry)
+ acpi_mipi_del_crs_csi2(csi2);
+}
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 992385537..802f8a56d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1186,7 +1186,7 @@ static ssize_t bus_dsm_mask_show(struct device *dev,
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
+ return sysfs_emit(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
}
static struct device_attribute dev_attr_bus_dsm_mask =
__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
@@ -1198,7 +1198,7 @@ static ssize_t revision_show(struct device *dev,
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
+ return sysfs_emit(buf, "%d\n", acpi_desc->acpi_header.revision);
}
static DEVICE_ATTR_RO(revision);
@@ -1209,7 +1209,7 @@ static ssize_t hw_error_scrub_show(struct device *dev,
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
+ return sysfs_emit(buf, "%d\n", acpi_desc->scrub_mode);
}
/*
@@ -1278,7 +1278,7 @@ static ssize_t scrub_show(struct device *dev,
mutex_lock(&acpi_desc->init_mutex);
busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
&& !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
- rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
+ rc = sysfs_emit(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
/* Allow an admin to poll the busy state at a higher rate */
if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
&acpi_desc->scrub_flags)) {
@@ -1382,7 +1382,7 @@ static ssize_t handle_show(struct device *dev,
{
struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
- return sprintf(buf, "%#x\n", memdev->device_handle);
+ return sysfs_emit(buf, "%#x\n", memdev->device_handle);
}
static DEVICE_ATTR_RO(handle);
@@ -1391,7 +1391,7 @@ static ssize_t phys_id_show(struct device *dev,
{
struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
- return sprintf(buf, "%#x\n", memdev->physical_id);
+ return sysfs_emit(buf, "%#x\n", memdev->physical_id);
}
static DEVICE_ATTR_RO(phys_id);
@@ -1400,7 +1400,7 @@ static ssize_t vendor_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
+ return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
}
static DEVICE_ATTR_RO(vendor);
@@ -1409,7 +1409,7 @@ static ssize_t rev_id_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
+ return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
}
static DEVICE_ATTR_RO(rev_id);
@@ -1418,7 +1418,7 @@ static ssize_t device_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
+ return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
}
static DEVICE_ATTR_RO(device);
@@ -1427,7 +1427,7 @@ static ssize_t subsystem_vendor_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
+ return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
}
static DEVICE_ATTR_RO(subsystem_vendor);
@@ -1436,7 +1436,7 @@ static ssize_t subsystem_rev_id_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n",
+ return sysfs_emit(buf, "0x%04x\n",
be16_to_cpu(dcr->subsystem_revision_id));
}
static DEVICE_ATTR_RO(subsystem_rev_id);
@@ -1446,7 +1446,7 @@ static ssize_t subsystem_device_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
+ return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
}
static DEVICE_ATTR_RO(subsystem_device);
@@ -1465,7 +1465,7 @@ static ssize_t format_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
+ return sysfs_emit(buf, "0x%04x\n", le16_to_cpu(dcr->code));
}
static DEVICE_ATTR_RO(format);
@@ -1498,7 +1498,7 @@ static ssize_t format1_show(struct device *dev,
continue;
if (nfit_dcr->dcr->code == dcr->code)
continue;
- rc = sprintf(buf, "0x%04x\n",
+ rc = sysfs_emit(buf, "0x%04x\n",
le16_to_cpu(nfit_dcr->dcr->code));
break;
}
@@ -1515,7 +1515,7 @@ static ssize_t formats_show(struct device *dev,
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
+ return sysfs_emit(buf, "%d\n", num_nvdimm_formats(nvdimm));
}
static DEVICE_ATTR_RO(formats);
@@ -1524,7 +1524,7 @@ static ssize_t serial_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
+ return sysfs_emit(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
}
static DEVICE_ATTR_RO(serial);
@@ -1536,7 +1536,7 @@ static ssize_t family_show(struct device *dev,
if (nfit_mem->family < 0)
return -ENXIO;
- return sprintf(buf, "%d\n", nfit_mem->family);
+ return sysfs_emit(buf, "%d\n", nfit_mem->family);
}
static DEVICE_ATTR_RO(family);
@@ -1548,7 +1548,7 @@ static ssize_t dsm_mask_show(struct device *dev,
if (nfit_mem->family < 0)
return -ENXIO;
- return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
+ return sysfs_emit(buf, "%#lx\n", nfit_mem->dsm_mask);
}
static DEVICE_ATTR_RO(dsm_mask);
@@ -1562,7 +1562,7 @@ static ssize_t flags_show(struct device *dev,
if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
- return sprintf(buf, "%s%s%s%s%s%s%s\n",
+ return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
@@ -1579,7 +1579,7 @@ static ssize_t id_show(struct device *dev,
struct nvdimm *nvdimm = to_nvdimm(dev);
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- return sprintf(buf, "%s\n", nfit_mem->id);
+ return sysfs_emit(buf, "%s\n", nfit_mem->id);
}
static DEVICE_ATTR_RO(id);
@@ -1589,7 +1589,7 @@ static ssize_t dirty_shutdown_show(struct device *dev,
struct nvdimm *nvdimm = to_nvdimm(dev);
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
+ return sysfs_emit(buf, "%d\n", nfit_mem->dirty_shutdown);
}
static DEVICE_ATTR_RO(dirty_shutdown);
@@ -2172,7 +2172,7 @@ static ssize_t range_index_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
- return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
+ return sysfs_emit(buf, "%d\n", nfit_spa->spa->range_index);
}
static DEVICE_ATTR_RO(range_index);
@@ -2257,26 +2257,23 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
struct nd_region_desc *ndr_desc,
struct acpi_nfit_system_address *spa)
{
+ u16 nr = ndr_desc->num_mappings;
+ struct nfit_set_info2 *info2 __free(kfree) =
+ kcalloc(nr, sizeof(*info2), GFP_KERNEL);
+ struct nfit_set_info *info __free(kfree) =
+ kcalloc(nr, sizeof(*info), GFP_KERNEL);
struct device *dev = acpi_desc->dev;
struct nd_interleave_set *nd_set;
- u16 nr = ndr_desc->num_mappings;
- struct nfit_set_info2 *info2;
- struct nfit_set_info *info;
int i;
+ if (!info || !info2)
+ return -ENOMEM;
+
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
if (!nd_set)
return -ENOMEM;
import_guid(&nd_set->type_guid, spa->range_guid);
- info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL);
- if (!info2)
- return -ENOMEM;
-
for (i = 0; i < nr; i++) {
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
struct nvdimm *nvdimm = mapping->nvdimm;
@@ -2337,8 +2334,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
}
ndr_desc->nd_set = nd_set;
- devm_kfree(dev, info);
- devm_kfree(dev, info2);
return 0;
}
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 9ef5f1bdc..75e9aac43 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -58,14 +58,21 @@ struct target_cache {
struct node_cache_attrs cache_attrs;
};
+enum {
+ NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL = ACCESS_COORDINATE_MAX,
+ NODE_ACCESS_CLASS_GENPORT_SINK_CPU,
+ NODE_ACCESS_CLASS_MAX,
+};
+
struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
struct resource memregions;
- struct node_hmem_attrs hmem_attrs[2];
+ struct access_coordinate coord[NODE_ACCESS_CLASS_MAX];
struct list_head caches;
struct node_cache_attrs cache_attrs;
+ u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
bool registered;
};
@@ -100,6 +107,51 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
return NULL;
}
+static struct memory_target *acpi_find_genport_target(u32 uid)
+{
+ struct memory_target *target;
+ u32 target_uid;
+ u8 *uid_ptr;
+
+ list_for_each_entry(target, &targets, node) {
+ uid_ptr = target->gen_port_device_handle + 8;
+ target_uid = *(u32 *)uid_ptr;
+ if (uid == target_uid)
+ return target;
+ }
+
+ return NULL;
+}
+
+/**
+ * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
+ * @uid: ACPI unique id
+ * @coord: The access coordinates written back out for the generic port.
+ * Expect 2 levels array.
+ *
+ * Return: 0 on success. Errno on failure.
+ *
+ * Only supports device handles that are ACPI. Assume ACPI0016 HID for CXL.
+ */
+int acpi_get_genport_coordinates(u32 uid,
+ struct access_coordinate *coord)
+{
+ struct memory_target *target;
+
+ guard(mutex)(&target_lock);
+ target = acpi_find_genport_target(uid);
+ if (!target)
+ return -ENOENT;
+
+ coord[ACCESS_COORDINATE_LOCAL] =
+ target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL];
+ coord[ACCESS_COORDINATE_CPU] =
+ target->coord[NODE_ACCESS_CLASS_GENPORT_SINK_CPU];
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, CXL);
+
static __init void alloc_memory_initiator(unsigned int cpu_pxm)
{
struct memory_initiator *initiator;
@@ -120,8 +172,7 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
list_add_tail(&initiator->node, &initiators);
}
-static __init void alloc_memory_target(unsigned int mem_pxm,
- resource_size_t start, resource_size_t len)
+static __init struct memory_target *alloc_target(unsigned int mem_pxm)
{
struct memory_target *target;
@@ -129,7 +180,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
if (!target) {
target = kzalloc(sizeof(*target), GFP_KERNEL);
if (!target)
- return;
+ return NULL;
target->memory_pxm = mem_pxm;
target->processor_pxm = PXM_INVAL;
target->memregions = (struct resource) {
@@ -142,6 +193,19 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
INIT_LIST_HEAD(&target->caches);
}
+ return target;
+}
+
+static __init void alloc_memory_target(unsigned int mem_pxm,
+ resource_size_t start,
+ resource_size_t len)
+{
+ struct memory_target *target;
+
+ target = alloc_target(mem_pxm);
+ if (!target)
+ return;
+
/*
* There are potentially multiple ranges per PXM, so record each
* in the per-target memregions resource tree.
@@ -152,6 +216,18 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
start, start + len, mem_pxm);
}
+static __init void alloc_genport_target(unsigned int mem_pxm, u8 *handle)
+{
+ struct memory_target *target;
+
+ target = alloc_target(mem_pxm);
+ if (!target)
+ return;
+
+ memcpy(target->gen_port_device_handle, handle,
+ ACPI_SRAT_DEVICE_HANDLE_SIZE);
+}
+
static __init const char *hmat_data_type(u8 type)
{
switch (type) {
@@ -228,24 +304,24 @@ static void hmat_update_target_access(struct memory_target *target,
{
switch (type) {
case ACPI_HMAT_ACCESS_LATENCY:
- target->hmem_attrs[access].read_latency = value;
- target->hmem_attrs[access].write_latency = value;
+ target->coord[access].read_latency = value;
+ target->coord[access].write_latency = value;
break;
case ACPI_HMAT_READ_LATENCY:
- target->hmem_attrs[access].read_latency = value;
+ target->coord[access].read_latency = value;
break;
case ACPI_HMAT_WRITE_LATENCY:
- target->hmem_attrs[access].write_latency = value;
+ target->coord[access].write_latency = value;
break;
case ACPI_HMAT_ACCESS_BANDWIDTH:
- target->hmem_attrs[access].read_bandwidth = value;
- target->hmem_attrs[access].write_bandwidth = value;
+ target->coord[access].read_bandwidth = value;
+ target->coord[access].write_bandwidth = value;
break;
case ACPI_HMAT_READ_BANDWIDTH:
- target->hmem_attrs[access].read_bandwidth = value;
+ target->coord[access].read_bandwidth = value;
break;
case ACPI_HMAT_WRITE_BANDWIDTH:
- target->hmem_attrs[access].write_bandwidth = value;
+ target->coord[access].write_bandwidth = value;
break;
default:
break;
@@ -291,11 +367,28 @@ static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
}
}
+static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_pxm,
+ u8 mem_hier, u8 type, u32 value)
+{
+ struct memory_target *target = find_mem_target(tgt_pxm);
+
+ if (mem_hier != ACPI_HMAT_MEMORY)
+ return;
+
+ if (target && target->processor_pxm == init_pxm) {
+ hmat_update_target_access(target, type, value,
+ ACCESS_COORDINATE_LOCAL);
+ /* If the node has a CPU, update access 1 */
+ if (node_state(pxm_to_node(init_pxm), N_CPU))
+ hmat_update_target_access(target, type, value,
+ ACCESS_COORDINATE_CPU);
+ }
+}
+
static __init int hmat_parse_locality(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_hmat_locality *hmat_loc = (void *)header;
- struct memory_target *target;
unsigned int init, targ, total_size, ipds, tpds;
u32 *inits, *targs, value;
u16 *entries;
@@ -336,15 +429,8 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
inits[init], targs[targ], value,
hmat_data_type_suffix(type));
- if (mem_hier == ACPI_HMAT_MEMORY) {
- target = find_mem_target(targs[targ]);
- if (target && target->processor_pxm == inits[init]) {
- hmat_update_target_access(target, type, value, 0);
- /* If the node has a CPU, update access 1 */
- if (node_state(pxm_to_node(inits[init]), N_CPU))
- hmat_update_target_access(target, type, value, 1);
- }
- }
+ hmat_update_target(targs[targ], inits[init],
+ mem_hier, type, value);
}
}
@@ -491,6 +577,27 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
return 0;
}
+static __init int srat_parse_genport_affinity(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_srat_generic_affinity *ga = (void *)header;
+
+ if (!ga)
+ return -EINVAL;
+
+ if (!(ga->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED))
+ return 0;
+
+ /* Skip PCI device_handle for now */
+ if (ga->device_handle_type != 0)
+ return 0;
+
+ alloc_genport_target(ga->proximity_domain,
+ (u8 *)ga->device_handle);
+
+ return 0;
+}
+
static u32 hmat_initiator_perf(struct memory_target *target,
struct memory_initiator *initiator,
struct acpi_hmat_locality *hmat_loc)
@@ -592,6 +699,12 @@ static void hmat_update_target_attrs(struct memory_target *target,
u32 best = 0;
int i;
+ /* Don't update for generic port if there's no device handle */
+ if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
+ access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
+ !(*(u16 *)target->gen_port_device_handle))
+ return;
+
bitmap_zero(p_nodes, MAX_NUMNODES);
/*
* If the Address Range Structure provides a local processor pxm, set
@@ -600,7 +713,8 @@ static void hmat_update_target_attrs(struct memory_target *target,
*/
if (target->processor_pxm != PXM_INVAL) {
cpu_nid = pxm_to_node(target->processor_pxm);
- if (access == 0 || node_state(cpu_nid, N_CPU)) {
+ if (access == ACCESS_COORDINATE_LOCAL ||
+ node_state(cpu_nid, N_CPU)) {
set_bit(target->processor_pxm, p_nodes);
return;
}
@@ -628,7 +742,9 @@ static void hmat_update_target_attrs(struct memory_target *target,
list_for_each_entry(initiator, &initiators, node) {
u32 value;
- if (access == 1 && !initiator->has_cpu) {
+ if ((access == ACCESS_COORDINATE_CPU ||
+ access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
+ !initiator->has_cpu) {
clear_bit(initiator->processor_pxm, p_nodes);
continue;
}
@@ -661,12 +777,24 @@ static void __hmat_register_target_initiators(struct memory_target *target,
}
}
+static void hmat_update_generic_target(struct memory_target *target)
+{
+ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+
+ hmat_update_target_attrs(target, p_nodes,
+ NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL);
+ hmat_update_target_attrs(target, p_nodes,
+ NODE_ACCESS_CLASS_GENPORT_SINK_CPU);
+}
+
static void hmat_register_target_initiators(struct memory_target *target)
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
- __hmat_register_target_initiators(target, p_nodes, 0);
- __hmat_register_target_initiators(target, p_nodes, 1);
+ __hmat_register_target_initiators(target, p_nodes,
+ ACCESS_COORDINATE_LOCAL);
+ __hmat_register_target_initiators(target, p_nodes,
+ ACCESS_COORDINATE_CPU);
}
static void hmat_register_target_cache(struct memory_target *target)
@@ -681,7 +809,7 @@ static void hmat_register_target_cache(struct memory_target *target)
static void hmat_register_target_perf(struct memory_target *target, int access)
{
unsigned mem_nid = pxm_to_node(target->memory_pxm);
- node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
+ node_set_perf_attrs(mem_nid, &target->coord[access], access);
}
static void hmat_register_target_devices(struct memory_target *target)
@@ -713,6 +841,17 @@ static void hmat_register_target(struct memory_target *target)
hmat_register_target_devices(target);
/*
+ * Register generic port perf numbers. The nid may not be
+ * initialized and is still NUMA_NO_NODE.
+ */
+ mutex_lock(&target_lock);
+ if (*(u16 *)target->gen_port_device_handle) {
+ hmat_update_generic_target(target);
+ target->registered = true;
+ }
+ mutex_unlock(&target_lock);
+
+ /*
* Skip offline nodes. This can happen when memory
* marked EFI_MEMORY_SP, "specific purpose", is applied
* to all the memory in a proximity domain leading to
@@ -726,8 +865,8 @@ static void hmat_register_target(struct memory_target *target)
if (!target->registered) {
hmat_register_target_initiators(target);
hmat_register_target_cache(target);
- hmat_register_target_perf(target, 0);
- hmat_register_target_perf(target, 1);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
target->registered = true;
}
mutex_unlock(&target_lock);
@@ -765,7 +904,7 @@ static int hmat_set_default_dram_perf(void)
int rc;
int nid, pxm;
struct memory_target *target;
- struct node_hmem_attrs *attrs;
+ struct access_coordinate *attrs;
if (!default_dram_type)
return -EIO;
@@ -775,7 +914,7 @@ static int hmat_set_default_dram_perf(void)
target = find_mem_target(pxm);
if (!target)
continue;
- attrs = &target->hmem_attrs[1];
+ attrs = &target->coord[1];
rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT");
if (rc)
return rc;
@@ -789,7 +928,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
struct memory_target *target;
- struct node_hmem_attrs *perf;
+ struct access_coordinate *perf;
int *adist = data;
int pxm;
@@ -799,10 +938,10 @@ static int hmat_calculate_adistance(struct notifier_block *self,
return NOTIFY_OK;
mutex_lock(&target_lock);
- hmat_update_target_attrs(target, p_nodes, 1);
+ hmat_update_target_attrs(target, p_nodes, ACCESS_COORDINATE_CPU);
mutex_unlock(&target_lock);
- perf = &target->hmem_attrs[1];
+ perf = &target->coord[1];
if (mt_perf_to_adistance(perf, adist))
return NOTIFY_OK;
@@ -870,6 +1009,13 @@ static __init int hmat_init(void)
ACPI_SRAT_TYPE_MEMORY_AFFINITY,
srat_parse_mem_affinity, 0) < 0)
goto out_put;
+
+ if (acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY,
+ srat_parse_genport_affinity, 0) < 0)
+ goto out_put;
+
acpi_put_table(tbl);
status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index b57de78fb..0214518fc 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -67,9 +67,9 @@ int acpi_map_pxm_to_node(int pxm)
node = pxm_to_node_map[pxm];
if (node == NUMA_NO_NODE) {
- if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
- return NUMA_NO_NODE;
node = first_unset_node(nodes_found_map);
+ if (node >= MAX_NUMNODES)
+ return NUMA_NO_NODE;
__acpi_map_pxm_to_node(pxm, node);
node_set(node, nodes_found_map);
}
@@ -430,7 +430,7 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header,
return -EINVAL;
node = acpi_map_pxm_to_node(gi_affinity->proximity_domain);
- if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ if (node == NUMA_NO_NODE) {
pr_err("SRAT: Too many proximity domains.\n");
return -EINVAL;
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c09cc3c68..70af3fbbe 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -544,11 +544,7 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
- u32 handled;
-
- handled = (*acpi_irq_handler) (acpi_irq_context);
-
- if (handled) {
+ if ((*acpi_irq_handler)(acpi_irq_context)) {
acpi_irq_handled++;
return IRQ_HANDLED;
} else {
@@ -582,7 +578,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_handler = handler;
acpi_irq_context = context;
- if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
+ if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED | IRQF_ONESHOT,
+ "acpi", acpi_irq)) {
pr_err("SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;
@@ -1063,9 +1060,7 @@ int __init acpi_debugger_init(void)
acpi_status acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context)
{
- acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
- struct workqueue_struct *queue;
int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -1076,9 +1071,9 @@ acpi_status acpi_os_execute(acpi_execute_type type,
ret = acpi_debugger_create_thread(function, context);
if (ret) {
pr_err("Kernel thread creation failed\n");
- status = AE_ERROR;
+ return AE_ERROR;
}
- goto out_thread;
+ return AE_OK;
}
/*
@@ -1096,43 +1091,41 @@ acpi_status acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
/*
* To prevent lockdep from complaining unnecessarily, make sure that
* there is a different static lockdep key for each workqueue by using
* INIT_WORK() for each of them separately.
*/
- if (type == OSL_NOTIFY_HANDLER) {
- queue = kacpi_notify_wq;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- } else if (type == OSL_GPE_HANDLER) {
- queue = kacpid_wq;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- } else {
+ switch (type) {
+ case OSL_NOTIFY_HANDLER:
+ ret = queue_work(kacpi_notify_wq, &dpc->work);
+ break;
+ case OSL_GPE_HANDLER:
+ /*
+ * On some machines, a software-initiated SMI causes corruption
+ * unless the SMI runs on CPU 0. An SMI can be initiated by
+ * any AML, but typically it's done in GPE-related methods that
+ * are run via workqueues, so we can avoid the known corruption
+ * cases by always queueing on CPU 0.
+ */
+ ret = queue_work_on(0, kacpid_wq, &dpc->work);
+ break;
+ default:
pr_err("Unsupported os_execute type %d.\n", type);
- status = AE_ERROR;
+ goto err;
}
-
- if (ACPI_FAILURE(status))
- goto err_workqueue;
-
- /*
- * On some machines, a software-initiated SMI causes corruption unless
- * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
- * typically it's done in GPE-related methods that are run via
- * workqueues, so we can avoid the known corruption cases by always
- * queueing on CPU 0.
- */
- ret = queue_work_on(0, queue, &dpc->work);
if (!ret) {
pr_err("Unable to queue work\n");
- status = AE_ERROR;
+ goto err;
}
-err_workqueue:
- if (ACPI_FAILURE(status))
- kfree(dpc);
-out_thread:
- return status;
+
+ return AE_OK;
+
+err:
+ kfree(dpc);
+ return AE_ERROR;
}
EXPORT_SYMBOL(acpi_os_execute);
@@ -1522,20 +1515,18 @@ void acpi_os_delete_lock(acpi_spinlock handle)
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
__acquires(lockp)
{
- acpi_cpu_flags flags;
-
- spin_lock_irqsave(lockp, flags);
- return flags;
+ spin_lock(lockp);
+ return 0;
}
/*
* Release a spinlock. See above.
*/
-void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
+void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used)
__releases(lockp)
{
- spin_unlock_irqrestore(lockp, flags);
+ spin_unlock(lockp);
}
#ifndef ACPI_USE_LOCAL_CACHE
@@ -1672,7 +1663,7 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
- kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index b7c6287ec..1219adb11 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -17,6 +17,8 @@
#include <acpi/processor.h>
#include <linux/uaccess.h>
+#include "internal.h"
+
#ifdef CONFIG_CPU_FREQ
/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
@@ -26,12 +28,21 @@
*/
#define CPUFREQ_THERMAL_MIN_STEP 0
-#define CPUFREQ_THERMAL_MAX_STEP 3
-static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
+static int cpufreq_thermal_max_step __read_mostly = 3;
+
+/*
+ * Minimum throttle percentage for processor_thermal cooling device.
+ * The processor_thermal driver uses it to calculate the percentage amount by
+ * which cpu frequency must be reduced for each cooling state. This is also used
+ * to calculate the maximum number of throttling steps or cooling states.
+ */
+static int cpufreq_thermal_reduction_pctg __read_mostly = 20;
-#define reduction_pctg(cpu) \
- per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
+static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_step);
+
+#define reduction_step(cpu) \
+ per_cpu(cpufreq_thermal_reduction_step, phys_package_first_cpu(cpu))
/*
* Emulate "per package data" using per cpu data (which should really be
@@ -71,7 +82,7 @@ static int cpufreq_get_max_state(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return 0;
- return CPUFREQ_THERMAL_MAX_STEP;
+ return cpufreq_thermal_max_step;
}
static int cpufreq_get_cur_state(unsigned int cpu)
@@ -79,7 +90,7 @@ static int cpufreq_get_cur_state(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return 0;
- return reduction_pctg(cpu);
+ return reduction_step(cpu);
}
static int cpufreq_set_cur_state(unsigned int cpu, int state)
@@ -92,7 +103,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (!cpu_has_cpufreq(cpu))
return 0;
- reduction_pctg(cpu) = state;
+ reduction_step(cpu) = state;
/*
* Update all the CPUs in the same package because they all
@@ -113,7 +124,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (!policy)
return -EINVAL;
- max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
+ max_freq = (policy->cpuinfo.max_freq *
+ (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100;
cpufreq_cpu_put(policy);
@@ -126,10 +138,29 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
return 0;
}
+static void acpi_thermal_cpufreq_config(void)
+{
+ int cpufreq_pctg = acpi_arch_thermal_cpufreq_pctg();
+
+ if (!cpufreq_pctg)
+ return;
+
+ cpufreq_thermal_reduction_pctg = cpufreq_pctg;
+
+ /*
+ * Derive the MAX_STEP from minimum throttle percentage so that the reduction
+ * percentage doesn't end up becoming negative. Also, cap the MAX_STEP so that
+ * the CPU performance doesn't become 0.
+ */
+ cpufreq_thermal_max_step = (100 / cpufreq_pctg) - 2;
+}
+
void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
{
unsigned int cpu;
+ acpi_thermal_cpufreq_config();
+
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
@@ -190,7 +221,7 @@ static int acpi_processor_max_state(struct acpi_processor *pr)
/*
* There exists four states according to
- * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
+ * cpufreq_thermal_reduction_step. 0, 1, 2, 3
*/
max_state += cpufreq_get_max_state(pr->id);
if (pr->flags.throttling)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 4d042673d..a6ead5204 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -2,14 +2,17 @@
/*
* ACPI device specific properties support.
*
- * Copyright (C) 2014, Intel Corporation
+ * Copyright (C) 2014 - 2023, Intel Corporation
* All rights reserved.
*
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
* Darren Hart <dvhart@linux.intel.com>
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ * Sakari Ailus <sakari.ailus@linux.intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -801,27 +804,15 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args,
u32 nargs = 0, i;
/*
- * Find the referred data extension node under the
- * referred device node.
- */
- for (; *element < end && (*element)->type == ACPI_TYPE_STRING;
- (*element)++) {
- const char *child_name = (*element)->string.pointer;
-
- ref_fwnode = acpi_fwnode_get_named_child_node(ref_fwnode, child_name);
- if (!ref_fwnode)
- return -EINVAL;
- }
-
- /*
* Assume the following integer elements are all args. Stop counting on
- * the first reference or end of the package arguments. In case of
- * neither reference, nor integer, return an error, we can't parse it.
+ * the first reference (possibly represented as a string) or end of the
+ * package arguments. In case of neither reference, nor integer, return
+ * an error, we can't parse it.
*/
for (i = 0; (*element) + i < end && i < num_args; i++) {
acpi_object_type type = (*element)[i].type;
- if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ if (type == ACPI_TYPE_LOCAL_REFERENCE || type == ACPI_TYPE_STRING)
break;
if (type == ACPI_TYPE_INTEGER)
@@ -845,6 +836,44 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args,
return 0;
}
+static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *fwnode,
+ const char *refstring)
+{
+ acpi_handle scope, handle;
+ struct acpi_data_node *dn;
+ struct acpi_device *device;
+ acpi_status status;
+
+ if (is_acpi_device_node(fwnode)) {
+ scope = to_acpi_device_node(fwnode)->handle;
+ } else if (is_acpi_data_node(fwnode)) {
+ scope = to_acpi_data_node(fwnode)->handle;
+ } else {
+ pr_debug("Bad node type for node %pfw\n", fwnode);
+ return NULL;
+ }
+
+ status = acpi_get_handle(scope, refstring, &handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(scope, "Unable to get an ACPI handle for %s\n",
+ refstring);
+ return NULL;
+ }
+
+ device = acpi_fetch_acpi_dev(handle);
+ if (device)
+ return acpi_fwnode_handle(device);
+
+ status = acpi_get_data_full(handle, acpi_nondev_subnode_tag,
+ (void **)&dn, NULL);
+ if (ACPI_FAILURE(status) || !dn) {
+ acpi_handle_debug(handle, "Subnode not found\n");
+ return NULL;
+ }
+
+ return &dn->fwnode;
+}
+
/**
* __acpi_node_get_property_reference - returns handle to the referenced object
* @fwnode: Firmware node to get the property from
@@ -888,6 +917,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const union acpi_object *element, *end;
const union acpi_object *obj;
const struct acpi_device_data *data;
+ struct fwnode_handle *ref_fwnode;
struct acpi_device *device;
int ret, idx = 0;
@@ -914,16 +944,30 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
args->fwnode = acpi_fwnode_handle(device);
args->nargs = 0;
+
+ return 0;
+ case ACPI_TYPE_STRING:
+ if (index)
+ return -ENOENT;
+
+ ref_fwnode = acpi_parse_string_ref(fwnode, obj->string.pointer);
+ if (!ref_fwnode)
+ return -EINVAL;
+
+ args->fwnode = ref_fwnode;
+ args->nargs = 0;
+
return 0;
case ACPI_TYPE_PACKAGE:
/*
* If it is not a single reference, then it is a package of
- * references followed by number of ints as follows:
+ * references, followed by number of ints as follows:
*
* Package () { REF, INT, REF, INT, INT }
*
- * The index argument is then used to determine which reference
- * the caller wants (along with the arguments).
+ * Here, REF may be either a local reference or a string. The
+ * index argument is then used to determine which reference the
+ * caller wants (along with the arguments).
*/
break;
default:
@@ -955,6 +999,24 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return 0;
break;
+ case ACPI_TYPE_STRING:
+ ref_fwnode = acpi_parse_string_ref(fwnode,
+ element->string.pointer);
+ if (!ref_fwnode)
+ return -EINVAL;
+
+ element++;
+
+ ret = acpi_get_ref_args(idx == index ? args : NULL,
+ ref_fwnode, &element, end,
+ num_args);
+ if (ret < 0)
+ return ret;
+
+ if (idx == index)
+ return 0;
+
+ break;
case ACPI_TYPE_INTEGER:
if (idx == index)
return -ENOENT;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index c843feb02..b5193049d 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -490,6 +490,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus ExpertBook B2502FBA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2502FBA"),
+ },
+ },
+ {
/* Asus Vivobook E1504GA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 35ad5781f..eb4ca85d1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1528,7 +1528,6 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
r->cpu_start = rentry->res->start;
r->dma_start = rentry->res->start - rentry->offset;
r->size = resource_size(rentry->res);
- r->offset = rentry->offset;
r++;
}
}
@@ -1558,8 +1557,7 @@ static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
return fwspec ? fwspec->ops : NULL;
}
-static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
- const u32 *id_in)
+static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
{
int err;
const struct iommu_ops *ops;
@@ -1573,7 +1571,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
ops = acpi_iommu_fwspec_ops(dev);
if (ops) {
mutex_unlock(&iommu_probe_device_lock);
- return ops;
+ return 0;
}
err = iort_iommu_configure_id(dev, id_in);
@@ -1590,12 +1588,14 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
/* Ignore all other errors apart from EPROBE_DEFER */
if (err == -EPROBE_DEFER) {
- return ERR_PTR(err);
+ return err;
} else if (err) {
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
- return NULL;
+ return -ENODEV;
}
- return acpi_iommu_fwspec_ops(dev);
+ if (!acpi_iommu_fwspec_ops(dev))
+ return -ENODEV;
+ return 0;
}
#else /* !CONFIG_IOMMU_API */
@@ -1607,10 +1607,9 @@ int acpi_iommu_fwspec_init(struct device *dev, u32 id,
return -ENODEV;
}
-static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
- const u32 *id_in)
+static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
{
- return NULL;
+ return -ENODEV;
}
#endif /* !CONFIG_IOMMU_API */
@@ -1624,7 +1623,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id)
{
- const struct iommu_ops *iommu;
+ int ret;
if (attr == DEV_DMA_NOT_SUPPORTED) {
set_dma_ops(dev, &dma_dummy_ops);
@@ -1633,12 +1632,16 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
acpi_arch_dma_setup(dev);
- iommu = acpi_iommu_configure_id(dev, input_id);
- if (PTR_ERR(iommu) == -EPROBE_DEFER)
+ ret = acpi_iommu_configure_id(dev, input_id);
+ if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
- arch_setup_dma_ops(dev, 0, U64_MAX,
- iommu, attr == DEV_DMA_COHERENT);
+ /*
+ * Historically this routine doesn't fail driver probing due to errors
+ * in acpi_iommu_configure_id()
+ */
+
+ arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
return 0;
}
@@ -1728,6 +1731,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
* Some ACPI devs contain SerialBus resources even though they are not
* attached to a serial bus at all.
*/
+ {ACPI_VIDEO_HID, },
{"MSHW0028", },
/*
* HIDs of device with an UartSerialBusV2 resource for which userspace
@@ -1798,7 +1802,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
if (dep->honor_dep)
adev->flags.honor_deps = 1;
- adev->dep_unmet++;
+ if (!dep->met)
+ adev->dep_unmet++;
}
}
}
@@ -1977,10 +1982,9 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev)
}
}
-static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
+static u32 acpi_scan_check_dep(acpi_handle handle)
{
struct acpi_handle_list dep_devices;
- acpi_status status;
u32 count;
int i;
@@ -1990,12 +1994,10 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
* 2. ACPI nodes describing USB ports.
* Still, checking for _HID catches more then just these cases ...
*/
- if (!check_dep || !acpi_has_method(handle, "_DEP") ||
- !acpi_has_method(handle, "_HID"))
+ if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID"))
return 0;
- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) {
acpi_handle_debug(handle, "Failed to evaluate _DEP.\n");
return 0;
}
@@ -2004,6 +2006,7 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
struct acpi_device_info *info;
struct acpi_dep_data *dep;
bool skip, honor_dep;
+ acpi_status status;
status = acpi_get_object_info(dep_devices.handles[i], &info);
if (ACPI_FAILURE(status)) {
@@ -2037,7 +2040,13 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
return count;
}
-static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
+static acpi_status acpi_scan_check_crs_csi2_cb(acpi_handle handle, u32 a, void *b, void **c)
+{
+ acpi_mipi_check_crs_csi2(handle);
+ return AE_OK;
+}
+
+static acpi_status acpi_bus_check_add(acpi_handle handle, bool first_pass,
struct acpi_device **adev_p)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
@@ -2055,9 +2064,25 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
if (acpi_device_should_be_hidden(handle))
return AE_OK;
- /* Bail out if there are dependencies. */
- if (acpi_scan_check_dep(handle, check_dep) > 0)
- return AE_CTRL_DEPTH;
+ if (first_pass) {
+ acpi_mipi_check_crs_csi2(handle);
+
+ /* Bail out if there are dependencies. */
+ if (acpi_scan_check_dep(handle) > 0) {
+ /*
+ * The entire CSI-2 connection graph needs to be
+ * extracted before any drivers or scan handlers
+ * are bound to struct device objects, so scan
+ * _CRS CSI-2 resource descriptors for all
+ * devices below the current handle.
+ */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX,
+ acpi_scan_check_crs_csi2_cb,
+ NULL, NULL, NULL);
+ return AE_CTRL_DEPTH;
+ }
+ }
fallthrough;
case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
@@ -2080,10 +2105,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
}
/*
- * If check_dep is true at this point, the device has no dependencies,
+ * If first_pass is true at this point, the device has no dependencies,
* or the creation of the device object would have been postponed above.
*/
- acpi_add_single_object(&device, handle, type, !check_dep);
+ acpi_add_single_object(&device, handle, type, !first_pass);
if (!device)
return AE_CTRL_DEPTH;
@@ -2427,6 +2452,13 @@ static void acpi_scan_postponed_branch(acpi_handle handle)
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
acpi_bus_check_add_2, NULL, NULL, (void **)&adev);
+
+ /*
+ * Populate the ACPI _CRS CSI-2 software nodes for the ACPI devices that
+ * have been added above.
+ */
+ acpi_mipi_init_crs_csi2_swnodes();
+
acpi_bus_attach(adev, NULL);
}
@@ -2495,12 +2527,22 @@ int acpi_bus_scan(acpi_handle handle)
if (!device)
return -ENODEV;
+ /*
+ * Set up ACPI _CRS CSI-2 software nodes using information extracted
+ * from the _CRS CSI-2 resource descriptors during the ACPI namespace
+ * walk above and MIPI DisCo for Imaging device properties.
+ */
+ acpi_mipi_scan_crs_csi2();
+ acpi_mipi_init_crs_csi2_swnodes();
+
acpi_bus_attach(device, (void *)true);
/* Pass 2: Enumerate all of the remaining devices. */
acpi_scan_postponed();
+ acpi_mipi_crs_csi2_cleanup();
+
return 0;
}
EXPORT_SYMBOL(acpi_bus_scan);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 808484d11..728acfeb7 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -385,18 +385,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
- /*
- * ASUS B1400CEAE hangs on resume from suspend (see
- * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
- */
- {
- .callback = init_default_s3,
- .ident = "ASUS B1400CEAE",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
- },
- },
{},
};
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index c1516337f..b07f7d091 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -251,8 +251,9 @@ int __init_or_acpilib acpi_table_parse_entries_array(
return -ENODEV;
}
- count = acpi_parse_entries_array(id, table_size, table_header,
- proc, proc_num, max_entries);
+ count = acpi_parse_entries_array(id, table_size,
+ (union fw_table_header *)table_header,
+ proc, proc_num, max_entries);
acpi_put_table(table_header);
return count;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index f74d81abd..4748e8061 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -31,6 +31,8 @@
#include <linux/uaccess.h>
#include <linux/units.h>
+#include "internal.h"
+
#define ACPI_THERMAL_CLASS "thermal_zone"
#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80
@@ -90,7 +92,7 @@ struct acpi_thermal_passive {
struct acpi_thermal_trip trip;
unsigned long tc1;
unsigned long tc2;
- unsigned long tsp;
+ unsigned long delay;
};
struct acpi_thermal_active {
@@ -188,24 +190,19 @@ static int active_trip_index(struct acpi_thermal *tz,
static long get_passive_temp(struct acpi_thermal *tz)
{
- unsigned long long tmp;
- acpi_status status;
+ int temp;
- status = acpi_evaluate_integer(tz->device->handle, "_PSV", NULL, &tmp);
- if (ACPI_FAILURE(status))
+ if (acpi_passive_trip_temp(tz->device, &temp))
return THERMAL_TEMP_INVALID;
- return tmp;
+ return temp;
}
static long get_active_temp(struct acpi_thermal *tz, int index)
{
- char method[] = { '_', 'A', 'C', '0' + index, '\0' };
- unsigned long long tmp;
- acpi_status status;
+ int temp;
- status = acpi_evaluate_integer(tz->device->handle, method, NULL, &tmp);
- if (ACPI_FAILURE(status))
+ if (acpi_active_trip_temp(tz->device, index, &temp))
return THERMAL_TEMP_INVALID;
/*
@@ -215,10 +212,10 @@ static long get_active_temp(struct acpi_thermal *tz, int index)
if (act > 0) {
unsigned long long override = celsius_to_deci_kelvin(act);
- if (tmp > override)
- tmp = override;
+ if (temp > override)
+ return override;
}
- return tmp;
+ return temp;
}
static void acpi_thermal_update_trip(struct acpi_thermal *tz,
@@ -247,7 +244,6 @@ static bool update_trip_devices(struct acpi_thermal *tz,
{
struct acpi_handle_list devices = { 0 };
char method[] = "_PSL";
- acpi_status status;
if (index != ACPI_THERMAL_TRIP_PASSIVE) {
method[1] = 'A';
@@ -255,8 +251,7 @@ static bool update_trip_devices(struct acpi_thermal *tz,
method[3] = '0' + index;
}
- status = acpi_evaluate_reference(tz->device->handle, method, NULL, &devices);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_evaluate_reference(tz->device->handle, method, NULL, &devices)) {
acpi_handle_info(tz->device->handle, "%s evaluation failure\n", method);
return false;
}
@@ -297,6 +292,7 @@ static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data)
struct acpi_thermal_trip *acpi_trip = trip->priv;
struct adjust_trip_data *atd = data;
struct acpi_thermal *tz = atd->tz;
+ int temp;
if (!acpi_trip || !acpi_thermal_trip_valid(acpi_trip))
return 0;
@@ -307,9 +303,11 @@ static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data)
acpi_thermal_update_trip_devices(tz, trip);
if (acpi_thermal_trip_valid(acpi_trip))
- trip->temperature = acpi_thermal_temp(tz, acpi_trip->temp_dk);
+ temp = acpi_thermal_temp(tz, acpi_trip->temp_dk);
else
- trip->temperature = THERMAL_TEMP_INVALID;
+ temp = THERMAL_TEMP_INVALID;
+
+ thermal_zone_set_trip_temp(tz->thermal_zone, trip, temp);
return 0;
}
@@ -339,13 +337,12 @@ static void acpi_thermal_trips_update(struct acpi_thermal *tz, u32 event)
dev_name(&adev->dev), event, 0);
}
-static long acpi_thermal_get_critical_trip(struct acpi_thermal *tz)
+static int acpi_thermal_get_critical_trip(struct acpi_thermal *tz)
{
- unsigned long long tmp;
- acpi_status status;
+ int temp;
if (crt > 0) {
- tmp = celsius_to_deci_kelvin(crt);
+ temp = celsius_to_deci_kelvin(crt);
goto set;
}
if (crt == -1) {
@@ -353,38 +350,34 @@ static long acpi_thermal_get_critical_trip(struct acpi_thermal *tz)
return THERMAL_TEMP_INVALID;
}
- status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- acpi_handle_debug(tz->device->handle, "No critical threshold\n");
+ if (acpi_critical_trip_temp(tz->device, &temp))
return THERMAL_TEMP_INVALID;
- }
- if (tmp <= 2732) {
+
+ if (temp <= 2732) {
/*
* Below zero (Celsius) values clearly aren't right for sure,
* so discard them as invalid.
*/
- pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp);
+ pr_info(FW_BUG "Invalid critical threshold (%d)\n", temp);
return THERMAL_TEMP_INVALID;
}
set:
- acpi_handle_debug(tz->device->handle, "Critical threshold [%llu]\n", tmp);
- return tmp;
+ acpi_handle_debug(tz->device->handle, "Critical threshold [%d]\n", temp);
+ return temp;
}
-static long acpi_thermal_get_hot_trip(struct acpi_thermal *tz)
+static int acpi_thermal_get_hot_trip(struct acpi_thermal *tz)
{
- unsigned long long tmp;
- acpi_status status;
+ int temp;
- status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
+ if (acpi_hot_trip_temp(tz->device, &temp) || temp == THERMAL_TEMP_INVALID) {
acpi_handle_debug(tz->device->handle, "No hot threshold\n");
return THERMAL_TEMP_INVALID;
}
- acpi_handle_debug(tz->device->handle, "Hot threshold [%llu]\n", tmp);
- return tmp;
+ acpi_handle_debug(tz->device->handle, "Hot threshold [%d]\n", temp);
+ return temp;
}
static bool passive_trip_params_init(struct acpi_thermal *tz)
@@ -404,11 +397,17 @@ static bool passive_trip_params_init(struct acpi_thermal *tz)
tz->trips.passive.tc2 = tmp;
+ status = acpi_evaluate_integer(tz->device->handle, "_TFP", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ tz->trips.passive.delay = tmp;
+ return true;
+ }
+
status = acpi_evaluate_integer(tz->device->handle, "_TSP", NULL, &tmp);
if (ACPI_FAILURE(status))
return false;
- tz->trips.passive.tsp = tmp;
+ tz->trips.passive.delay = tmp * 100;
return true;
}
@@ -904,7 +903,7 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_trip = &tz->trips.passive.trip;
if (acpi_thermal_trip_valid(acpi_trip)) {
- passive_delay = tz->trips.passive.tsp * 100;
+ passive_delay = tz->trips.passive.delay;
trip->type = THERMAL_TRIP_PASSIVE;
trip->temperature = acpi_thermal_temp(tz, acpi_trip->temp_dk);
@@ -1142,6 +1141,7 @@ static void __exit acpi_thermal_exit(void)
module_init(acpi_thermal_init);
module_exit(acpi_thermal_exit);
+MODULE_IMPORT_NS(ACPI_THERMAL);
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_acpi.c b/drivers/acpi/thermal_lib.c
index 43eaf0f2f..4e0519ca9 100644
--- a/drivers/thermal/thermal_acpi.c
+++ b/drivers/acpi/thermal_lib.c
@@ -3,12 +3,13 @@
* Copyright 2023 Linaro Limited
* Copyright 2023 Intel Corporation
*
- * Library routines for populating a generic thermal trip point structure
- * with data obtained by evaluating a specific object in the ACPI Namespace.
+ * Library routines for retrieving trip point temperature values from the
+ * platform firmware via ACPI.
*/
#include <linux/acpi.h>
#include <linux/units.h>
#include <linux/thermal.h>
+#include "internal.h"
/*
* Minimum temperature for full military grade is 218°K (-55°C) and
@@ -17,11 +18,11 @@
* firmware. Any values out of these boundaries may be considered
* bogus and we can assume the firmware has no data to provide.
*/
-#define TEMP_MIN_DECIK 2180
-#define TEMP_MAX_DECIK 4480
+#define TEMP_MIN_DECIK 2180ULL
+#define TEMP_MAX_DECIK 4480ULL
-static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
- int *ret_temp)
+static int acpi_trip_temp(struct acpi_device *adev, char *obj_name,
+ int *ret_temp)
{
unsigned long long temp;
acpi_status status;
@@ -33,7 +34,7 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
}
if (temp >= TEMP_MIN_DECIK && temp <= TEMP_MAX_DECIK) {
- *ret_temp = deci_kelvin_to_millicelsius(temp);
+ *ret_temp = temp;
} else {
acpi_handle_debug(adev->handle, "%s result %llu out of range\n",
obj_name, temp);
@@ -43,6 +44,48 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
return 0;
}
+int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
+{
+ char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'};
+
+ if (id < 0 || id > 9)
+ return -EINVAL;
+
+ return acpi_trip_temp(adev, obj_name, ret_temp);
+}
+EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, ACPI_THERMAL);
+
+int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return acpi_trip_temp(adev, "_PSV", ret_temp);
+}
+EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, ACPI_THERMAL);
+
+int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return acpi_trip_temp(adev, "_HOT", ret_temp);
+}
+EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, ACPI_THERMAL);
+
+int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return acpi_trip_temp(adev, "_CRT", ret_temp);
+}
+EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, ACPI_THERMAL);
+
+static int thermal_temp(int error, int temp_decik, int *ret_temp)
+{
+ if (error)
+ return error;
+
+ if (temp_decik == THERMAL_TEMP_INVALID)
+ *ret_temp = THERMAL_TEMP_INVALID;
+ else
+ *ret_temp = deci_kelvin_to_millicelsius(temp_decik);
+
+ return 0;
+}
+
/**
* thermal_acpi_active_trip_temp - Retrieve active trip point temperature
* @adev: Target thermal zone ACPI device object.
@@ -57,12 +100,10 @@ static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
*/
int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
{
- char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'};
-
- if (id < 0 || id > 9)
- return -EINVAL;
+ int temp_decik;
+ int ret = acpi_active_trip_temp(adev, id, &temp_decik);
- return thermal_acpi_trip_temp(adev, obj_name, ret_temp);
+ return thermal_temp(ret, temp_decik, ret_temp);
}
EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp);
@@ -78,7 +119,10 @@ EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp);
*/
int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- return thermal_acpi_trip_temp(adev, "_PSV", ret_temp);
+ int temp_decik;
+ int ret = acpi_passive_trip_temp(adev, &temp_decik);
+
+ return thermal_temp(ret, temp_decik, ret_temp);
}
EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp);
@@ -95,7 +139,10 @@ EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp);
*/
int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- return thermal_acpi_trip_temp(adev, "_HOT", ret_temp);
+ int temp_decik;
+ int ret = acpi_hot_trip_temp(adev, &temp_decik);
+
+ return thermal_temp(ret, temp_decik, ret_temp);
}
EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp);
@@ -111,6 +158,9 @@ EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp);
*/
int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
{
- return thermal_acpi_trip_temp(adev, "_CRT", ret_temp);
+ int temp_decik;
+ int ret = acpi_critical_trip_temp(adev, &temp_decik);
+
+ return thermal_temp(ret, temp_decik, ret_temp);
}
EXPORT_SYMBOL_GPL(thermal_acpi_critical_trip_temp);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 62944e35f..abac5cc25 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -329,21 +329,18 @@ const char *acpi_get_subsystem_id(acpi_handle handle)
}
EXPORT_SYMBOL_GPL(acpi_get_subsystem_id);
-acpi_status
-acpi_evaluate_reference(acpi_handle handle,
- acpi_string pathname,
- struct acpi_object_list *arguments,
- struct acpi_handle_list *list)
+bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list)
{
- acpi_status status = AE_OK;
- union acpi_object *package = NULL;
- union acpi_object *element = NULL;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- u32 i = 0;
-
+ union acpi_object *package;
+ acpi_status status;
+ bool ret = false;
+ u32 i;
if (!list)
- return AE_BAD_PARAMETER;
+ return false;
/* Evaluate object. */
@@ -353,62 +350,47 @@ acpi_evaluate_reference(acpi_handle handle,
package = buffer.pointer;
- if ((buffer.length == 0) || !package) {
- status = AE_BAD_DATA;
- acpi_util_eval_error(handle, pathname, status);
- goto end;
- }
- if (package->type != ACPI_TYPE_PACKAGE) {
- status = AE_BAD_DATA;
- acpi_util_eval_error(handle, pathname, status);
- goto end;
- }
- if (!package->package.count) {
- status = AE_BAD_DATA;
- acpi_util_eval_error(handle, pathname, status);
- goto end;
- }
+ if (buffer.length == 0 || !package ||
+ package->type != ACPI_TYPE_PACKAGE || !package->package.count)
+ goto err;
- list->handles = kcalloc(package->package.count, sizeof(*list->handles), GFP_KERNEL);
- if (!list->handles) {
- kfree(package);
- return AE_NO_MEMORY;
- }
list->count = package->package.count;
+ list->handles = kcalloc(list->count, sizeof(*list->handles), GFP_KERNEL);
+ if (!list->handles)
+ goto err_clear;
/* Extract package data. */
for (i = 0; i < list->count; i++) {
+ union acpi_object *element = &(package->package.elements[i]);
- element = &(package->package.elements[i]);
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE ||
+ !element->reference.handle)
+ goto err_free;
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
- status = AE_BAD_DATA;
- acpi_util_eval_error(handle, pathname, status);
- break;
- }
-
- if (!element->reference.handle) {
- status = AE_NULL_ENTRY;
- acpi_util_eval_error(handle, pathname, status);
- break;
- }
/* Get the acpi_handle. */
list->handles[i] = element->reference.handle;
acpi_handle_debug(list->handles[i], "Found in reference list\n");
}
- if (ACPI_FAILURE(status)) {
- list->count = 0;
- kfree(list->handles);
- list->handles = NULL;
- }
+ ret = true;
end:
kfree(buffer.pointer);
- return status;
+ return ret;
+
+err_free:
+ kfree(list->handles);
+ list->handles = NULL;
+
+err_clear:
+ list->count = 0;
+
+err:
+ acpi_util_eval_error(handle, pathname, status);
+ goto end;
}
EXPORT_SYMBOL(acpi_evaluate_reference);
@@ -426,7 +408,7 @@ bool acpi_handle_list_equal(struct acpi_handle_list *list1,
{
return list1->count == list2->count &&
!memcmp(list1->handles, list2->handles,
- list1->count * sizeof(acpi_handle));
+ list1->count * sizeof(*list1->handles));
}
EXPORT_SYMBOL_GPL(acpi_handle_list_equal);
@@ -468,6 +450,40 @@ void acpi_handle_list_free(struct acpi_handle_list *list)
}
EXPORT_SYMBOL_GPL(acpi_handle_list_free);
+/**
+ * acpi_device_dep - Check ACPI device dependency
+ * @target: ACPI handle of the target ACPI device.
+ * @match: ACPI handle to look up in the target's _DEP list.
+ *
+ * Return true if @match is present in the list returned by _DEP for
+ * @target or false otherwise.
+ */
+bool acpi_device_dep(acpi_handle target, acpi_handle match)
+{
+ struct acpi_handle_list dep_devices;
+ bool ret = false;
+ int i;
+
+ if (!acpi_has_method(target, "_DEP"))
+ return false;
+
+ if (!acpi_evaluate_reference(target, "_DEP", NULL, &dep_devices)) {
+ acpi_handle_debug(target, "Failed to evaluate _DEP.\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == match) {
+ ret = true;
+ break;
+ }
+ }
+
+ acpi_handle_list_free(&dep_devices);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(acpi_device_dep);
+
acpi_status
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
@@ -825,54 +841,6 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
EXPORT_SYMBOL(acpi_check_dsm);
/**
- * acpi_dev_uid_match - Match device by supplied UID
- * @adev: ACPI device to match.
- * @uid2: Unique ID of the device.
- *
- * Matches UID in @adev with given @uid2.
- *
- * Returns:
- * - %true if matches.
- * - %false otherwise.
- */
-bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2)
-{
- const char *uid1 = acpi_device_uid(adev);
-
- return uid1 && uid2 && !strcmp(uid1, uid2);
-}
-EXPORT_SYMBOL_GPL(acpi_dev_uid_match);
-
-/**
- * acpi_dev_hid_uid_match - Match device by supplied HID and UID
- * @adev: ACPI device to match.
- * @hid2: Hardware ID of the device.
- * @uid2: Unique ID of the device, pass NULL to not check _UID.
- *
- * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
- * will be treated as a match. If user wants to validate @uid2, it should be
- * done before calling this function.
- *
- * Returns:
- * - %true if matches or @uid2 is NULL.
- * - %false otherwise.
- */
-bool acpi_dev_hid_uid_match(struct acpi_device *adev,
- const char *hid2, const char *uid2)
-{
- const char *hid1 = acpi_device_hid(adev);
-
- if (strcmp(hid1, hid2))
- return false;
-
- if (!uid2)
- return true;
-
- return acpi_dev_uid_match(adev, uid2);
-}
-EXPORT_SYMBOL(acpi_dev_hid_uid_match);
-
-/**
* acpi_dev_uid_to_integer - treat ACPI device _UID as integer
* @adev: ACPI device to get _UID from
* @integer: output buffer for integer
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index bc65ebfcd..90c3d2eab 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -428,7 +428,7 @@ bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
}
EXPORT_SYMBOL_GPL(acpi_quirk_skip_i2c_client_enumeration);
-int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
{
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
@@ -436,8 +436,6 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
u64 uid;
int ret;
- *skip = false;
-
ret = acpi_dev_uid_to_integer(adev, &uid);
if (ret)
return 0;
@@ -463,7 +461,6 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
return 0;
}
-EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
bool acpi_quirk_skip_gpio_event_handlers(void)
{
@@ -478,8 +475,41 @@ bool acpi_quirk_skip_gpio_event_handlers(void)
return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
}
EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
+#else
+static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ return 0;
+}
#endif
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ struct acpi_device *adev = ACPI_COMPANION(controller_parent);
+
+ *skip = false;
+
+ /*
+ * The DELL0501 ACPI HID represents an UART (CID is set to PNP0501) with
+ * a backlight-controller attached. There is no separate ACPI device with
+ * an UartSerialBusV2() resource to model the backlight-controller.
+ * Set skip to true so that the tty core creates a serdev ctrl device.
+ * The backlight driver will manually create the serdev client device.
+ */
+ if (acpi_dev_hid_match(adev, "DELL0501")) {
+ *skip = true;
+ /*
+ * Create a platform dev for dell-uart-backlight to bind to.
+ * This is a static device, so no need to store the result.
+ */
+ platform_device_register_simple("dell-uart-backlight", PLATFORM_DEVID_NONE,
+ NULL, 0);
+ return 0;
+ }
+
+ return acpi_dmi_skip_serdev_enumeration(controller_parent, skip);
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+
/* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
static const struct {
const char *hid;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8460458eb..d6f14c8e2 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1708,8 +1708,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
@@ -1931,7 +1933,7 @@ static void binder_deferred_fd_close(int fd)
if (!twcb)
return;
init_task_work(&twcb->twork, binder_do_fd_close);
- twcb->file = close_fd_get_file(fd);
+ twcb->file = file_close_fd(fd);
if (twcb->file) {
// pin it until binder_do_fd_close(); see comments there
get_file(twcb->file);
@@ -2087,9 +2089,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* Convert the address to an offset relative to
* the base of the transaction buffer.
*/
- fda_offset =
- (parent->buffer - (uintptr_t)buffer->user_data) +
- fda->parent_offset;
+ fda_offset = parent->buffer - buffer->user_data +
+ fda->parent_offset;
for (fd_index = 0; fd_index < fda->num_fds;
fd_index++) {
u32 fd;
@@ -2607,7 +2608,7 @@ static int binder_translate_fd_array(struct list_head *pf_head,
* Convert the address to an offset relative to
* the base of the transaction buffer.
*/
- fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
+ fda_offset = parent->buffer - t->buffer->user_data +
fda->parent_offset;
sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
fda->parent_offset;
@@ -2682,8 +2683,9 @@ static int binder_fixup_parent(struct list_head *pf_head,
proc->pid, thread->pid);
return -EINVAL;
}
- buffer_offset = bp->parent_offset +
- (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
+
+ buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
+
return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
}
@@ -3235,7 +3237,7 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
- !reply && (t->flags & TF_ONE_WAY), current->tgid);
+ !reply && (t->flags & TF_ONE_WAY));
if (IS_ERR(t->buffer)) {
char *s;
@@ -3260,7 +3262,7 @@ static void binder_transaction(struct binder_proc *proc,
ALIGN(extra_buffers_size, sizeof(void *)) -
ALIGN(secctx_sz, sizeof(u64));
- t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
+ t->security_ctx = t->buffer->user_data + buf_offset;
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
secctx, secctx_sz);
@@ -3537,8 +3539,7 @@ static void binder_transaction(struct binder_proc *proc,
goto err_translate_failed;
}
/* Fixup buffer pointer to target proc address space */
- bp->buffer = (uintptr_t)
- t->buffer->user_data + sg_buf_offset;
+ bp->buffer = t->buffer->user_data + sg_buf_offset;
sg_buf_offset += ALIGN(bp->length, sizeof(u64));
num_valid = (buffer_offset - off_start_offset) /
@@ -4708,7 +4709,7 @@ retry:
}
trd->data_size = t->buffer->data_size;
trd->offsets_size = t->buffer->offsets_size;
- trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
+ trd->data.ptr.buffer = t->buffer->user_data;
trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
@@ -5991,9 +5992,9 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
}
if (buffer->target_node)
seq_printf(m, " node %d", buffer->target_node->debug_id);
- seq_printf(m, " size %zd:%zd data %pK\n",
+ seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- buffer->user_data);
+ proc->alloc.buffer - buffer->user_data);
}
static void print_binder_work_ilocked(struct seq_file *m,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index a56cbfd9b..e0e4dc38b 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -26,7 +26,7 @@
#include "binder_alloc.h"
#include "binder_trace.h"
-struct list_lru binder_alloc_lru;
+struct list_lru binder_freelist;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
@@ -125,23 +125,20 @@ static void binder_insert_allocated_buffer_locked(
static struct binder_buffer *binder_alloc_prepare_to_free_locked(
struct binder_alloc *alloc,
- uintptr_t user_ptr)
+ unsigned long user_ptr)
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- void __user *uptr;
-
- uptr = (void __user *)user_ptr;
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (uptr < buffer->user_data)
+ if (user_ptr < buffer->user_data) {
n = n->rb_left;
- else if (uptr > buffer->user_data)
+ } else if (user_ptr > buffer->user_data) {
n = n->rb_right;
- else {
+ } else {
/*
* Guard against user threads attempting to
* free the buffer when in use by kernel or
@@ -168,145 +165,168 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
* Return: Pointer to buffer or NULL
*/
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
- uintptr_t user_ptr)
+ unsigned long user_ptr)
{
struct binder_buffer *buffer;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
return buffer;
}
-static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void __user *start, void __user *end)
+static inline void
+binder_set_installed_page(struct binder_lru_page *lru_page,
+ struct page *page)
+{
+ /* Pairs with acquire in binder_get_installed_page() */
+ smp_store_release(&lru_page->page_ptr, page);
+}
+
+static inline struct page *
+binder_get_installed_page(struct binder_lru_page *lru_page)
+{
+ /* Pairs with release in binder_set_installed_page() */
+ return smp_load_acquire(&lru_page->page_ptr);
+}
+
+static void binder_lru_freelist_add(struct binder_alloc *alloc,
+ unsigned long start, unsigned long end)
{
- void __user *page_addr;
- unsigned long user_page_addr;
struct binder_lru_page *page;
- struct vm_area_struct *vma = NULL;
- struct mm_struct *mm = NULL;
- bool need_mm = false;
+ unsigned long page_addr;
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %pK-%pK\n", alloc->pid,
- allocate ? "allocate" : "free", start, end);
+ trace_binder_update_page_range(alloc, false, start, end);
- if (end <= start)
- return 0;
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ size_t index;
+ int ret;
- trace_binder_update_page_range(alloc, allocate, start, end);
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
- if (allocate == 0)
- goto free_range;
+ if (!binder_get_installed_page(page))
+ continue;
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
- if (!page->page_ptr) {
- need_mm = true;
- break;
- }
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
}
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ struct binder_lru_page *lru_page,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret = 0;
- if (need_mm && mmget_not_zero(alloc->mm))
- mm = alloc->mm;
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
- if (mm) {
- mmap_write_lock(mm);
- vma = alloc->vma;
+ /*
+ * Protected with mmap_sem in write mode as multiple tasks
+ * might race to install the same page.
+ */
+ mmap_write_lock(alloc->mm);
+ if (binder_get_installed_page(lru_page))
+ goto out;
+
+ if (!alloc->vma) {
+ pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
+ ret = -ESRCH;
+ goto out;
}
- if (!vma && need_mm) {
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- alloc->pid);
- goto err_no_vma;
+ page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page) {
+ pr_err("%d: failed to allocate page\n", alloc->pid);
+ ret = -ENOMEM;
+ goto out;
}
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ ret = vm_insert_page(alloc->vma, addr, page);
+ if (ret) {
+ pr_err("%d: %s failed to insert page at offset %lx with %d\n",
+ alloc->pid, __func__, addr - alloc->buffer, ret);
+ __free_page(page);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(lru_page, page);
+out:
+ mmap_write_unlock(alloc->mm);
+ mmput_async(alloc->mm);
+ return ret;
+}
+
+static int binder_install_buffer_pages(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ struct binder_lru_page *page;
+ unsigned long start, final;
+ unsigned long page_addr;
+
+ start = buffer->user_data & PAGE_MASK;
+ final = PAGE_ALIGN(buffer->user_data + size);
+
+ for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
+ unsigned long index;
int ret;
- bool on_lru;
- size_t index;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index];
- if (page->page_ptr) {
- trace_binder_alloc_lru_start(alloc, index);
-
- on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
- WARN_ON(!on_lru);
-
- trace_binder_alloc_lru_end(alloc, index);
+ if (binder_get_installed_page(page))
continue;
- }
-
- if (WARN_ON(!vma))
- goto err_page_ptr_cleared;
trace_binder_alloc_page_start(alloc, index);
- page->page_ptr = alloc_page(GFP_KERNEL |
- __GFP_HIGHMEM |
- __GFP_ZERO);
- if (!page->page_ptr) {
- pr_err("%d: binder_alloc_buf failed for page at %pK\n",
- alloc->pid, page_addr);
- goto err_alloc_page_failed;
- }
- page->alloc = alloc;
- INIT_LIST_HEAD(&page->lru);
-
- user_page_addr = (uintptr_t)page_addr;
- ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- alloc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- if (index + 1 > alloc->pages_high)
- alloc->pages_high = index + 1;
+ ret = binder_install_single_page(alloc, page, page_addr);
+ if (ret)
+ return ret;
trace_binder_alloc_page_end(alloc, index);
}
- if (mm) {
- mmap_write_unlock(mm);
- mmput_async(mm);
- }
+
return 0;
+}
-free_range:
- for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
- bool ret;
- size_t index;
+/* The range of pages should exclude those shared with other buffers */
+static void binder_lru_freelist_del(struct binder_alloc *alloc,
+ unsigned long start, unsigned long end)
+{
+ struct binder_lru_page *page;
+ unsigned long page_addr;
+
+ trace_binder_update_page_range(alloc, true, start, end);
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ unsigned long index;
+ bool on_lru;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index];
- trace_binder_free_lru_start(alloc, index);
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
- ret = list_lru_add(&binder_alloc_lru, &page->lru);
- WARN_ON(!ret);
+ on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ WARN_ON(!on_lru);
- trace_binder_free_lru_end(alloc, index);
- if (page_addr == start)
- break;
- continue;
-
-err_vm_insert_page_failed:
- __free_page(page->page_ptr);
- page->page_ptr = NULL;
-err_alloc_page_failed:
-err_page_ptr_cleared:
- if (page_addr == start)
- break;
- }
-err_no_vma:
- if (mm) {
- mmap_write_unlock(mm);
- mmput_async(mm);
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (index + 1 > alloc->pages_high)
+ alloc->pages_high = index + 1;
}
- return vma ? -ENOMEM : -ESRCH;
}
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
@@ -323,7 +343,44 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return smp_load_acquire(&alloc->vma);
}
-static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+static void debug_no_space_locked(struct binder_alloc *alloc)
+{
+ size_t largest_alloc_size = 0;
+ struct binder_buffer *buffer;
+ size_t allocated_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_alloc_size = 0;
+ size_t total_free_size = 0;
+ size_t free_buffers = 0;
+ size_t buffer_size;
+ struct rb_node *n;
+
+ for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+
+ for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers,
+ largest_alloc_size, total_free_size,
+ free_buffers, largest_free_size);
+}
+
+static bool debug_low_async_space_locked(struct binder_alloc *alloc)
{
/*
* Find the amount and size of buffers allocated by the current caller;
@@ -332,10 +389,20 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
* and at some point we'll catch them in the act. This is more efficient
* than keeping a map per pid.
*/
- struct rb_node *n;
struct binder_buffer *buffer;
size_t total_alloc_size = 0;
+ int pid = current->tgid;
size_t num_buffers = 0;
+ struct rb_node *n;
+
+ /*
+ * Only start detecting spammers once we have less than 20% of async
+ * space left (which is less than 10% of total buffer size).
+ */
+ if (alloc->free_async_space >= alloc->buffer_size / 10) {
+ alloc->oneway_spam_detected = false;
+ return false;
+ }
for (n = rb_first(&alloc->allocated_buffers); n != NULL;
n = rb_next(n)) {
@@ -365,56 +432,26 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
return false;
}
+/* Callers preallocate @new_buffer, it is freed by this function if unused */
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async,
- int pid)
+ struct binder_buffer *new_buffer,
+ size_t size,
+ int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
+ struct rb_node *best_fit = NULL;
struct binder_buffer *buffer;
+ unsigned long next_used_page;
+ unsigned long curr_last_page;
size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void __user *has_page_addr;
- void __user *end_page_addr;
- size_t size, data_offsets_size;
- int ret;
-
- /* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "%d: binder_alloc_buf, no vma\n",
- alloc->pid);
- return ERR_PTR(-ESRCH);
- }
-
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
-
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: got transaction with invalid size %zd-%zd\n",
- alloc->pid, data_size, offsets_size);
- return ERR_PTR(-EINVAL);
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: got transaction with invalid extra_buffers_size %zd\n",
- alloc->pid, extra_buffers_size);
- return ERR_PTR(-EINVAL);
- }
-
- /* Pad 0-size buffers so they get assigned unique addresses */
- size = max(size, sizeof(void *));
if (is_async && alloc->free_async_space < size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
- return ERR_PTR(-ENOSPC);
+ buffer = ERR_PTR(-ENOSPC);
+ goto out;
}
while (n) {
@@ -425,121 +462,92 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
if (size < buffer_size) {
best_fit = n;
n = n->rb_left;
- } else if (size > buffer_size)
+ } else if (size > buffer_size) {
n = n->rb_right;
- else {
+ } else {
best_fit = n;
break;
}
}
- if (best_fit == NULL) {
- size_t allocated_buffers = 0;
- size_t largest_alloc_size = 0;
- size_t total_alloc_size = 0;
- size_t free_buffers = 0;
- size_t largest_free_size = 0;
- size_t total_free_size = 0;
-
- for (n = rb_first(&alloc->allocated_buffers); n != NULL;
- n = rb_next(n)) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- buffer_size = binder_alloc_buffer_size(alloc, buffer);
- allocated_buffers++;
- total_alloc_size += buffer_size;
- if (buffer_size > largest_alloc_size)
- largest_alloc_size = buffer_size;
- }
- for (n = rb_first(&alloc->free_buffers); n != NULL;
- n = rb_next(n)) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- buffer_size = binder_alloc_buffer_size(alloc, buffer);
- free_buffers++;
- total_free_size += buffer_size;
- if (buffer_size > largest_free_size)
- largest_free_size = buffer_size;
- }
+
+ if (unlikely(!best_fit)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf size %zd failed, no address space\n",
alloc->pid, size);
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
- total_alloc_size, allocated_buffers,
- largest_alloc_size, total_free_size,
- free_buffers, largest_free_size);
- return ERR_PTR(-ENOSPC);
+ debug_no_space_locked(alloc);
+ buffer = ERR_PTR(-ENOSPC);
+ goto out;
}
- if (n == NULL) {
+
+ if (buffer_size != size) {
+ /* Found an oversized buffer and needs to be split */
buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ WARN_ON(n || buffer_size == size);
+ new_buffer->user_data = buffer->user_data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ new_buffer = NULL;
}
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size);
- has_page_addr = (void __user *)
- (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
- WARN_ON(n && buffer_size != size);
- end_page_addr =
- (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- ret = binder_update_page_range(alloc, 1, (void __user *)
- PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
- if (ret)
- return ERR_PTR(ret);
-
- if (buffer_size != size) {
- struct binder_buffer *new_buffer;
-
- new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!new_buffer) {
- pr_err("%s: %d failed to alloc new buffer struct\n",
- __func__, alloc->pid);
- goto err_alloc_buf_struct_failed;
- }
- new_buffer->user_data = (u8 __user *)buffer->user_data + size;
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(alloc, new_buffer);
- }
+ /*
+ * Now we remove the pages from the freelist. A clever calculation
+ * with buffer_size determines if the last page is shared with an
+ * adjacent in-use buffer. In such case, the page has been already
+ * removed from the freelist so we trim our range short.
+ */
+ next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
+ curr_last_page = PAGE_ALIGN(buffer->user_data + size);
+ binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
+ min(next_used_page, curr_last_page));
- rb_erase(best_fit, &alloc->free_buffers);
+ rb_erase(&buffer->rb_node, &alloc->free_buffers);
buffer->free = 0;
buffer->allow_user_free = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %pK\n",
- alloc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
- buffer->extra_buffers_size = extra_buffers_size;
- buffer->pid = pid;
buffer->oneway_spam_suspect = false;
if (is_async) {
alloc->free_async_space -= size;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
- if (alloc->free_async_space < alloc->buffer_size / 10) {
- /*
- * Start detecting spammers once we have less than 20%
- * of async space left (which is less than 10% of total
- * buffer size).
- */
- buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
- } else {
- alloc->oneway_spam_detected = false;
- }
+ if (debug_low_async_space_locked(alloc))
+ buffer->oneway_spam_suspect = true;
}
+
+out:
+ /* Discard possibly unused new_buffer */
+ kfree(new_buffer);
return buffer;
+}
-err_alloc_buf_struct_failed:
- binder_update_page_range(alloc, 0, (void __user *)
- PAGE_ALIGN((uintptr_t)buffer->user_data),
- end_page_addr);
- return ERR_PTR(-ENOMEM);
+/* Calculate the sanitized total size, returns 0 for invalid request */
+static inline size_t sanitized_size(size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size)
+{
+ size_t total, tmp;
+
+ /* Align to pointer size and check for overflows */
+ tmp = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+ if (tmp < data_size || tmp < offsets_size)
+ return 0;
+ total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
+ if (total < tmp || total < extra_buffers_size)
+ return 0;
+
+ /* Pad 0-sized buffers so they get a unique address */
+ total = max(total, sizeof(void *));
+
+ return total;
}
/**
@@ -549,7 +557,6 @@ err_alloc_buf_struct_failed:
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
- * @pid: pid to attribute allocation to (used for debugging)
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
@@ -562,74 +569,89 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async,
- int pid)
+ int is_async)
{
- struct binder_buffer *buffer;
+ struct binder_buffer *buffer, *next;
+ size_t size;
+ int ret;
+
+ /* Check binder_alloc is fully initialized */
+ if (!binder_alloc_get_vma(alloc)) {
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ size = sanitized_size(data_size, offsets_size, extra_buffers_size);
+ if (unlikely(!size)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd-%zd\n",
+ alloc->pid, data_size, offsets_size,
+ extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Preallocate the next buffer */
+ next = kzalloc(sizeof(*next), GFP_KERNEL);
+ if (!next)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&alloc->lock);
+ buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
+ if (IS_ERR(buffer)) {
+ spin_unlock(&alloc->lock);
+ goto out;
+ }
+
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->extra_buffers_size = extra_buffers_size;
+ buffer->pid = current->tgid;
+ spin_unlock(&alloc->lock);
- mutex_lock(&alloc->mutex);
- buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
- extra_buffers_size, is_async, pid);
- mutex_unlock(&alloc->mutex);
+ ret = binder_install_buffer_pages(alloc, buffer, size);
+ if (ret) {
+ binder_alloc_free_buf(alloc, buffer);
+ buffer = ERR_PTR(ret);
+ }
+out:
return buffer;
}
-static void __user *buffer_start_page(struct binder_buffer *buffer)
+static unsigned long buffer_start_page(struct binder_buffer *buffer)
{
- return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
+ return buffer->user_data & PAGE_MASK;
}
-static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
+static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void __user *)
- (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
+ return (buffer->user_data - 1) & PAGE_MASK;
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
- struct binder_buffer *prev, *next = NULL;
- bool to_free = true;
+ struct binder_buffer *prev, *next;
+
+ if (PAGE_ALIGNED(buffer->user_data))
+ goto skip_freelist;
BUG_ON(alloc->buffers.next == &buffer->entry);
prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
- if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
- to_free = false;
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer->user_data,
- prev->user_data);
- }
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
+ goto skip_freelist;
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
next = binder_buffer_next(buffer);
- if (buffer_start_page(next) == buffer_start_page(buffer)) {
- to_free = false;
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid,
- buffer->user_data,
- next->user_data);
- }
- }
-
- if (PAGE_ALIGNED(buffer->user_data)) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer start %pK is page aligned\n",
- alloc->pid, buffer->user_data);
- to_free = false;
+ if (buffer_start_page(next) == buffer_start_page(buffer))
+ goto skip_freelist;
}
- if (to_free) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
- alloc->pid, buffer->user_data,
- prev->user_data,
- next ? next->user_data : NULL);
- binder_update_page_range(alloc, 0, buffer_start_page(buffer),
- buffer_start_page(buffer) + PAGE_SIZE);
- }
+ binder_lru_freelist_add(alloc, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE);
+skip_freelist:
list_del(&buffer->entry);
kfree(buffer);
}
@@ -662,10 +684,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
alloc->pid, size, alloc->free_async_space);
}
- binder_update_page_range(alloc, 0,
- (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
- (void __user *)(((uintptr_t)
- buffer->user_data + buffer_size) & PAGE_MASK));
+ binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
+ (buffer->user_data + buffer_size) & PAGE_MASK);
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
@@ -689,8 +709,68 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
}
+/**
+ * binder_alloc_get_page() - get kernel pointer for given buffer offset
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be accessed
+ * @buffer_offset: offset into @buffer data
+ * @pgoffp: address to copy final page offset to
+ *
+ * Lookup the struct page corresponding to the address
+ * at @buffer_offset into @buffer->user_data. If @pgoffp is not
+ * NULL, the byte-offset into the page is written there.
+ *
+ * The caller is responsible to ensure that the offset points
+ * to a valid address within the @buffer and that @buffer is
+ * not freeable by the user. Since it can't be freed, we are
+ * guaranteed that the corresponding elements of @alloc->pages[]
+ * cannot change.
+ *
+ * Return: struct page
+ */
+static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ pgoff_t *pgoffp)
+{
+ binder_size_t buffer_space_offset = buffer_offset +
+ (buffer->user_data - alloc->buffer);
+ pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
+ size_t index = buffer_space_offset >> PAGE_SHIFT;
+ struct binder_lru_page *lru_page;
+
+ lru_page = &alloc->pages[index];
+ *pgoffp = pgoff;
+ return lru_page->page_ptr;
+}
+
+/**
+ * binder_alloc_clear_buf() - zero out buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be cleared
+ *
+ * memset the given buffer to 0
+ */
static void binder_alloc_clear_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffer);
+ struct binder_buffer *buffer)
+{
+ size_t bytes = binder_alloc_buffer_size(alloc, buffer);
+ binder_size_t buffer_offset = 0;
+
+ while (bytes) {
+ unsigned long size;
+ struct page *page;
+ pgoff_t pgoff;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ memset_page(page, pgoff, 0, size);
+ bytes -= size;
+ buffer_offset += size;
+ }
+}
+
/**
* binder_alloc_free_buf() - free a binder buffer
* @alloc: binder_alloc for this proc
@@ -705,17 +785,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc mutex if clear_on_free
- * is used frequently for large buffers. The mutex is not
+ * increase contention for the alloc->lock if clear_on_free
+ * is used frequently for large buffers. This lock is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
binder_free_buf_locked(alloc, buffer);
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
}
/**
@@ -734,9 +814,9 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
- int ret;
- const char *failure_string;
struct binder_buffer *buffer;
+ const char *failure_string;
+ int ret, i;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -754,7 +834,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = (void __user *)vma->vm_start;
+ alloc->buffer = vma->vm_start;
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
@@ -765,6 +845,11 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_pages_failed;
}
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ alloc->pages[i].alloc = alloc;
+ INIT_LIST_HEAD(&alloc->pages[i].lru);
+ }
+
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -787,7 +872,7 @@ err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = NULL;
+ alloc->buffer = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -808,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
@@ -840,25 +925,25 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- void __user *page_addr;
+ unsigned long page_addr;
bool on_lru;
if (!alloc->pages[i].page_ptr)
continue;
- on_lru = list_lru_del(&binder_alloc_lru,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del_obj(&binder_freelist,
+ &alloc->pages[i].lru);
page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %pK %s\n",
- __func__, alloc->pid, i, page_addr,
+ "%s: %d: page %d %s\n",
+ __func__, alloc->pid, i,
on_lru ? "on lru" : "active");
__free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
}
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -867,16 +952,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
__func__, alloc->pid, buffers, page_count);
}
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
-{
- seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->user_data,
- buffer->data_size, buffer->offsets_size,
- buffer->extra_buffers_size,
- buffer->transaction ? "active" : "delivered");
-}
-
/**
* binder_alloc_print_allocated() - print buffer info
* @m: seq_file for output via seq_printf()
@@ -888,13 +963,20 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix,
void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc)
{
+ struct binder_buffer *buffer;
struct rb_node *n;
- mutex_lock(&alloc->mutex);
- for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
- mutex_unlock(&alloc->mutex);
+ spin_lock(&alloc->lock);
+ for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
+ buffer->debug_id,
+ buffer->user_data - alloc->buffer,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+ }
+ spin_unlock(&alloc->lock);
}
/**
@@ -911,7 +993,7 @@ void binder_alloc_print_pages(struct seq_file *m,
int lru = 0;
int free = 0;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
@@ -927,7 +1009,7 @@ void binder_alloc_print_pages(struct seq_file *m,
lru++;
}
}
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -943,10 +1025,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
return count;
}
@@ -979,35 +1061,39 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
void *cb_arg)
__must_hold(lock)
{
- struct mm_struct *mm = NULL;
- struct binder_lru_page *page = container_of(item,
- struct binder_lru_page,
- lru);
- struct binder_alloc *alloc;
- uintptr_t page_addr;
- size_t index;
+ struct binder_lru_page *page = container_of(item, typeof(*page), lru);
+ struct binder_alloc *alloc = page->alloc;
+ struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
+ struct page *page_to_free;
+ unsigned long page_addr;
+ size_t index;
- alloc = page->alloc;
- if (!mutex_trylock(&alloc->mutex))
- goto err_get_alloc_mutex_failed;
-
+ if (!mmget_not_zero(mm))
+ goto err_mmget;
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ if (!spin_trylock(&alloc->lock))
+ goto err_get_alloc_lock_failed;
if (!page->page_ptr)
goto err_page_already_freed;
index = page - alloc->pages;
- page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ page_addr = alloc->buffer + index * PAGE_SIZE;
- mm = alloc->mm;
- if (!mmget_not_zero(mm))
- goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
vma = vma_lookup(mm, page_addr);
if (vma && vma != binder_alloc_get_vma(alloc))
goto err_invalid_vma;
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ page_to_free = page->page_ptr;
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
list_lru_isolate(lru, item);
+ spin_unlock(&alloc->lock);
spin_unlock(lock);
if (vma) {
@@ -1017,41 +1103,35 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
+
mmap_read_unlock(mm);
mmput_async(mm);
-
- trace_binder_unmap_kernel_start(alloc, index);
-
- __free_page(page->page_ptr);
- page->page_ptr = NULL;
-
- trace_binder_unmap_kernel_end(alloc, index);
+ __free_page(page_to_free);
spin_lock(lock);
- mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
err_invalid_vma:
+err_page_already_freed:
+ spin_unlock(&alloc->lock);
+err_get_alloc_lock_failed:
mmap_read_unlock(mm);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
-err_page_already_freed:
- mutex_unlock(&alloc->mutex);
-err_get_alloc_mutex_failed:
return LRU_SKIP;
}
static unsigned long
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- return list_lru_count(&binder_alloc_lru);
+ return list_lru_count(&binder_freelist);
}
static unsigned long
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ return list_lru_walk(&binder_freelist, binder_alloc_free_page,
NULL, sc->nr_to_scan);
}
@@ -1069,7 +1149,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- mutex_init(&alloc->mutex);
+ spin_lock_init(&alloc->lock);
INIT_LIST_HEAD(&alloc->buffers);
}
@@ -1077,13 +1157,13 @@ int binder_alloc_shrinker_init(void)
{
int ret;
- ret = list_lru_init(&binder_alloc_lru);
+ ret = list_lru_init(&binder_freelist);
if (ret)
return ret;
binder_shrinker = shrinker_alloc(0, "android-binder");
if (!binder_shrinker) {
- list_lru_destroy(&binder_alloc_lru);
+ list_lru_destroy(&binder_freelist);
return -ENOMEM;
}
@@ -1098,7 +1178,7 @@ int binder_alloc_shrinker_init(void)
void binder_alloc_shrinker_exit(void)
{
shrinker_free(binder_shrinker);
- list_lru_destroy(&binder_alloc_lru);
+ list_lru_destroy(&binder_freelist);
}
/**
@@ -1134,68 +1214,6 @@ static inline bool check_buffer(struct binder_alloc *alloc,
}
/**
- * binder_alloc_get_page() - get kernel pointer for given buffer offset
- * @alloc: binder_alloc for this proc
- * @buffer: binder buffer to be accessed
- * @buffer_offset: offset into @buffer data
- * @pgoffp: address to copy final page offset to
- *
- * Lookup the struct page corresponding to the address
- * at @buffer_offset into @buffer->user_data. If @pgoffp is not
- * NULL, the byte-offset into the page is written there.
- *
- * The caller is responsible to ensure that the offset points
- * to a valid address within the @buffer and that @buffer is
- * not freeable by the user. Since it can't be freed, we are
- * guaranteed that the corresponding elements of @alloc->pages[]
- * cannot change.
- *
- * Return: struct page
- */
-static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- pgoff_t *pgoffp)
-{
- binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
- pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
- size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
-
- lru_page = &alloc->pages[index];
- *pgoffp = pgoff;
- return lru_page->page_ptr;
-}
-
-/**
- * binder_alloc_clear_buf() - zero out buffer
- * @alloc: binder_alloc for this proc
- * @buffer: binder buffer to be cleared
- *
- * memset the given buffer to 0
- */
-static void binder_alloc_clear_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
-{
- size_t bytes = binder_alloc_buffer_size(alloc, buffer);
- binder_size_t buffer_offset = 0;
-
- while (bytes) {
- unsigned long size;
- struct page *page;
- pgoff_t pgoff;
-
- page = binder_alloc_get_page(alloc, buffer,
- buffer_offset, &pgoff);
- size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- memset_page(page, pgoff, 0, size);
- bytes -= size;
- buffer_offset += size;
- }
-}
-
-/**
* binder_alloc_copy_user_to_buffer() - copy src user to tgt user
* @alloc: binder_alloc for this proc
* @buffer: binder buffer to be accessed
@@ -1289,4 +1307,3 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
dest, bytes);
}
-
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index dc1e2b01d..703872344 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,13 +9,13 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/rtmutex.h>
+#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
#include <uapi/linux/android/binder.h>
-extern struct list_lru binder_alloc_lru;
+extern struct list_lru binder_freelist;
struct binder_transaction;
/**
@@ -49,21 +49,19 @@ struct binder_buffer {
unsigned async_transaction:1;
unsigned oneway_spam_suspect:1;
unsigned debug_id:27;
-
struct binder_transaction *transaction;
-
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- void __user *user_data;
- int pid;
+ unsigned long user_data;
+ int pid;
};
/**
* struct binder_lru_page - page object used for binder shrinker
* @page_ptr: pointer to physical page in mmap'd space
- * @lru: entry in binder_alloc_lru
+ * @lru: entry in binder_freelist
* @alloc: binder_alloc for a proc
*/
struct binder_lru_page {
@@ -74,7 +72,7 @@ struct binder_lru_page {
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @mutex: protects binder_alloc fields
+ * @lock: protects binder_alloc fields
* @vma: vm_area_struct passed to mmap_handler
* (invariant after mmap)
* @mm: copy of task->mm (invariant after open)
@@ -98,10 +96,10 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- struct mutex mutex;
+ spinlock_t lock;
struct vm_area_struct *vma;
struct mm_struct *mm;
- void __user *buffer;
+ unsigned long buffer;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
@@ -121,27 +119,26 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
spinlock_t *lock, void *cb_arg);
-extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async,
- int pid);
-extern void binder_alloc_init(struct binder_alloc *alloc);
-extern int binder_alloc_shrinker_init(void);
-extern void binder_alloc_shrinker_exit(void);
-extern void binder_alloc_vma_close(struct binder_alloc *alloc);
-extern struct binder_buffer *
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async);
+void binder_alloc_init(struct binder_alloc *alloc);
+int binder_alloc_shrinker_init(void);
+void binder_alloc_shrinker_exit(void);
+void binder_alloc_vma_close(struct binder_alloc *alloc);
+struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
- uintptr_t user_ptr);
-extern void binder_alloc_free_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffer);
-extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
- struct vm_area_struct *vma);
-extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
-extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
-extern void binder_alloc_print_allocated(struct seq_file *m,
- struct binder_alloc *alloc);
+ unsigned long user_ptr);
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma);
+void binder_alloc_deferred_release(struct binder_alloc *alloc);
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc);
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc);
@@ -156,9 +153,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
size_t free_async_space;
- mutex_lock(&alloc->mutex);
+ spin_lock(&alloc->lock);
free_async_space = alloc->free_async_space;
- mutex_unlock(&alloc->mutex);
+ spin_unlock(&alloc->lock);
return free_async_space;
}
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index c2b323bc3..81442fe20 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -72,6 +72,10 @@ enum buf_end_align_type {
* buf1 ]|[ buf2 | buf2 | buf2 ][ ...
*/
NEXT_NEXT_UNALIGNED,
+ /**
+ * @LOOP_END: The number of enum values in &buf_end_align_type.
+ * It is used for controlling loop termination.
+ */
LOOP_END,
};
@@ -93,11 +97,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- void __user *page_addr;
- void __user *end;
+ unsigned long page_addr;
+ unsigned long end;
int page_index;
- end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
+ end = PAGE_ALIGN(buffer->user_data + size);
page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
@@ -119,7 +123,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
int i;
for (i = 0; i < BUFFER_NUM; i++) {
- buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
if (IS_ERR(buffers[i]) ||
!check_buffer_pages_allocated(alloc, buffers[i],
sizes[i])) {
@@ -158,8 +162,8 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
int i;
unsigned long count;
- while ((count = list_lru_count(&binder_alloc_lru))) {
- list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ while ((count = list_lru_count(&binder_freelist))) {
+ list_lru_walk(&binder_freelist, binder_alloc_free_page,
NULL, count);
}
@@ -183,7 +187,7 @@ static void binder_selftest_alloc_free(struct binder_alloc *alloc,
/* Allocate from lru. */
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
- if (list_lru_count(&binder_alloc_lru))
+ if (list_lru_count(&binder_freelist))
pr_err("lru list should be empty but is not\n");
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 8cc07e6a4..fe38c6fc6 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -317,7 +317,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_update_buffer_release,
TRACE_EVENT(binder_update_page_range,
TP_PROTO(struct binder_alloc *alloc, bool allocate,
- void __user *start, void __user *end),
+ unsigned long start, unsigned long end),
TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 1224ab7aa..3001d754a 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -29,7 +29,6 @@
#include <linux/uaccess.h>
#include <linux/user_namespace.h>
#include <linux/xarray.h>
-#include <uapi/asm-generic/errno-base.h>
#include <uapi/linux/android/binder.h>
#include <uapi/linux/android/binderfs.h>
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index be3412cdb..c449d60d9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
bool cdl_enabled;
u64 val;
- if (ata_id_major_version(dev->id) < 12)
+ if (ata_id_major_version(dev->id) < 11)
goto not_supported;
if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index b6656c287..0fb193487 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -784,7 +784,7 @@ bool sata_lpm_ignore_phy_events(struct ata_link *link)
EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
static const char *ata_lpm_policy_names[] = {
- [ATA_LPM_UNKNOWN] = "max_performance",
+ [ATA_LPM_UNKNOWN] = "keep_firmware_settings",
[ATA_LPM_MAX_POWER] = "max_performance",
[ATA_LPM_MED_POWER] = "medium_power",
[ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2f4c58837..e95497689 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4745,7 +4745,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
* bail out.
*/
if (ap->pflags & ATA_PFLAG_SUSPENDED)
- goto unlock;
+ goto unlock_ap;
if (!sdev)
continue;
@@ -4758,7 +4758,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
if (do_resume) {
ret = scsi_resume_device(sdev);
if (ret == -EWOULDBLOCK)
- goto unlock;
+ goto unlock_scan;
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);
@@ -4766,12 +4766,13 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_lock_irqsave(ap->lock, flags);
if (ret)
- goto unlock;
+ goto unlock_ap;
}
}
-unlock:
+unlock_ap:
spin_unlock_irqrestore(ap->lock, flags);
+unlock_scan:
mutex_unlock(&ap->scsi_scan_mutex);
/* Reschedule with a delay if scsi_rescan_device() returned an error */
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 5275c6464..538bd3423 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -274,10 +274,9 @@ static int pxa_ata_probe(struct platform_device *pdev)
/*
* Request the DMA channel
*/
- data->dma_chan =
- dma_request_slave_channel(&pdev->dev, "data");
- if (!data->dma_chan)
- return -EBUSY;
+ data->dma_chan = dma_request_chan(&pdev->dev, "data");
+ if (IS_ERR(data->dma_chan))
+ return PTR_ERR(data->dma_chan);
ret = dmaengine_slave_config(data->dma_chan, &config);
if (ret < 0) {
dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index e82786c63..9bec0aee9 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = {
},
};
-static const struct pci_device_id mv_pci_tbl[] = {
- { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
- { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
- { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
- /* RocketRAID 1720/174x have different identifiers */
- { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
- { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
-
- { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
- { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
- { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
- { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
-
- { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
-
- /* Adaptec 1430SA */
- { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
-
- /* Marvell 7042 support */
- { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
-
- /* Highpoint RocketRAID PCIe series */
- { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
- { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
-
- { } /* terminate list */
-};
-
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
@@ -4303,6 +4272,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif
+static const struct pci_device_id mv_pci_tbl[] = {
+ { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+ { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+ /* RocketRAID 1720/174x have different identifiers */
+ { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+
+ { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+ { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+ { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+
+ { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
+
+ /* Adaptec 1430SA */
+ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
+
+ /* Marvell 7042 support */
+ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+
+ /* Highpoint RocketRAID PCIe series */
+ { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+ { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+
+ { } /* terminate list */
+};
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
@@ -4315,6 +4314,7 @@ static struct pci_driver mv_pci_driver = {
#endif
};
+MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
/**
* mv_print_info - Dump key info to kernel log for perusal.
@@ -4487,7 +4487,6 @@ static void __exit mv_exit(void)
MODULE_AUTHOR("Brett Russ");
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index b51d7a9d0..a482741eb 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -957,8 +957,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
offset -= (idx * window_size);
idx++;
- dist = ((long) (window_size - (offset + size))) >= 0 ? size :
- (long) (window_size - offset);
+ dist = min(size, window_size - offset);
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
psource += dist;
@@ -1005,8 +1004,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_DIMM_WINDOW_CTLR);
offset -= (idx * window_size);
idx++;
- dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
- (long) (window_size - offset);
+ dist = min(size, window_size - offset);
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 96bea1ab1..d4aa0f353 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -494,6 +494,7 @@ static void __exit atmtcp_exit(void)
deregister_atm_ioctl(&atmtcp_ioctl_ops);
}
+MODULE_DESCRIPTION("ATM over TCP");
MODULE_LICENSE("GPL");
module_init(atmtcp_init);
module_exit(atmtcp_exit);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index a31ffe16e..3011cf1a8 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2318,4 +2318,5 @@ static int __init eni_init(void)
module_init(eni_init);
/* @@@ since exit routine not defined, this module can not be unloaded */
+MODULE_DESCRIPTION("Efficient Networks ENI155P ATM NIC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index bfca7b8a6..fcd70e094 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -372,4 +372,5 @@ static void __exit idt77105_exit(void)
module_exit(idt77105_exit);
+MODULE_DESCRIPTION("IDT77105 PHY driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 9bba8f280..d213adcef 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -90,6 +90,7 @@ module_param(IA_RX_BUF, int, 0);
module_param(IA_RX_BUF_SZ, int, 0);
module_param(IADebugFlag, uint, 0644);
+MODULE_DESCRIPTION("Driver for Interphase ATM PCI NICs");
MODULE_LICENSE("GPL");
/**************************** IA_LIB **********************************/
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 1a50de39f..27153d6bc 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -171,6 +171,7 @@ static const struct atmdev_ops atm_ops = {
static struct timer_list ns_timer;
static char *mac[NS_MAX_CARDS];
module_param_array(mac, charp, NULL, 0);
+MODULE_DESCRIPTION("ATM NIC driver for IDT 77201/77211 \"NICStAR\" and Fore ForeRunnerLE.");
MODULE_LICENSE("GPL");
/* Functions */
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index 21e5acc76..32802ea95 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -387,4 +387,5 @@ int suni_init(struct atm_dev *dev)
EXPORT_SYMBOL(suni_init);
+MODULE_DESCRIPTION("S/UNI PHY driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 64012cda4..d944d5298 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -112,10 +112,7 @@ config CFAG12864B
depends on X86
depends on FB
depends on KS0108
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
default n
help
If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
@@ -170,10 +167,7 @@ config IMG_ASCII_LCD
config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
depends on FB && I2C && INPUT
- select FB_SYS_FOPS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_HELPERS
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
select NEW_LEDS
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 729845bcc..5ba19c339 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -51,16 +51,15 @@ static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct page *pages = virt_to_page(cfag12864b_buffer);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return vm_map_pages_zero(vma, &pages, 1);
}
static const struct fb_ops cfag12864bfb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = cfag12864bfb_mmap,
};
@@ -72,6 +71,7 @@ static int cfag12864bfb_probe(struct platform_device *device)
if (!info)
goto none;
+ info->flags = FBINFO_VIRTFB;
info->screen_buffer = cfag12864b_buffer;
info->screen_size = CFAG12864B_SIZE;
info->fbops = &cfag12864bfb_ops;
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 3a2d88387..a90430b7d 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -351,17 +351,16 @@ static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct ht16k33_priv *priv = info->par;
struct page *pages = virt_to_page(priv->fbdev.buffer);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return vm_map_pages_zero(vma, &pages, 1);
}
static const struct fb_ops ht16k33_fb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_blank = ht16k33_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = ht16k33_mmap,
};
@@ -640,6 +639,7 @@ static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv,
INIT_DELAYED_WORK(&priv->work, ht16k33_fb_update);
fbdev->info->fbops = &ht16k33_fb_ops;
+ fbdev->info->flags |= FBINFO_VIRTFB;
fbdev->info->screen_buffer = fbdev->buffer;
fbdev->info->screen_size = HT16K33_FB_SIZE;
fbdev->info->fix = ht16k33_fb_fix;
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index fa23e415f..56efda074 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -8,9 +8,9 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -225,17 +225,11 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);
*/
static int img_ascii_lcd_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- const struct img_ascii_lcd_config *cfg;
struct device *dev = &pdev->dev;
+ const struct img_ascii_lcd_config *cfg = device_get_match_data(dev);
struct img_ascii_lcd_ctx *ctx;
int err;
- match = of_match_device(img_ascii_lcd_matches, dev);
- if (!match)
- return -ENODEV;
-
- cfg = match->data;
ctx = devm_kzalloc(dev, sizeof(*ctx) + cfg->num_chars, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index b741b5ba8..024b78a0c 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
+#include <linux/units.h>
#define CREATE_TRACE_POINTS
#include <trace/events/thermal_pressure.h>
@@ -26,7 +27,8 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-static DEFINE_PER_CPU(u32, freq_factor) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
{
@@ -170,9 +172,9 @@ DEFINE_PER_CPU(unsigned long, thermal_pressure);
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
* thermal capping. It might be also a boost frequency value, which is bigger
- * than the internal 'freq_factor' max frequency. In such case the pressure
- * value should simply be removed, since this is an indication that there is
- * no thermal throttling. The @capped_freq must be provided in kHz.
+ * than the internal 'capacity_freq_ref' max frequency. In such case the
+ * pressure value should simply be removed, since this is an indication that
+ * there is no thermal throttling. The @capped_freq must be provided in kHz.
*/
void topology_update_thermal_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
@@ -183,10 +185,7 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
cpu = cpumask_first(cpus);
max_capacity = arch_scale_cpu_capacity(cpu);
- max_freq = per_cpu(freq_factor, cpu);
-
- /* Convert to MHz scale which is used in 'freq_factor' */
- capped_freq /= 1000;
+ max_freq = arch_scale_freq_ref(cpu);
/*
* Handle properly the boost frequencies, which should simply clean
@@ -220,20 +219,34 @@ static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
static DEVICE_ATTR_RO(cpu_capacity);
-static int register_cpu_capacity_sysctl(void)
+static int cpu_capacity_sysctl_add(unsigned int cpu)
{
- int i;
- struct device *cpu;
+ struct device *cpu_dev = get_cpu_device(cpu);
- for_each_possible_cpu(i) {
- cpu = get_cpu_device(i);
- if (!cpu) {
- pr_err("%s: too early to get CPU%d device!\n",
- __func__, i);
- continue;
- }
- device_create_file(cpu, &dev_attr_cpu_capacity);
- }
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_create_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int cpu_capacity_sysctl_remove(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ return -ENOENT;
+
+ device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
+
+ return 0;
+}
+
+static int register_cpu_capacity_sysctl(void)
+{
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
+ cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
return 0;
}
@@ -279,13 +292,13 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -321,15 +334,15 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
cpu_node, raw_capacity[cpu]);
/*
- * Update freq_factor for calculating early boot cpu capacities.
+ * Update capacity_freq_ref for calculating early boot CPU capacities.
* For non-clk CPU DVFS mechanism, there's no way to get the
* frequency value now, assuming they are running at the same
- * frequency (by keeping the initial freq_factor value).
+ * frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
if (!PTR_ERR_OR_ZERO(cpu_clk)) {
- per_cpu(freq_factor, cpu) =
- clk_get_rate(cpu_clk) / 1000;
+ per_cpu(capacity_freq_ref, cpu) =
+ clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
}
} else {
@@ -345,11 +358,16 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return !ret;
}
+void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
+{
+}
+
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
void topology_init_cpu_capacity_cppc(void)
{
+ u64 capacity, capacity_scale = 0;
struct cppc_perf_caps perf_caps;
int cpu;
@@ -366,6 +384,10 @@ void topology_init_cpu_capacity_cppc(void)
(perf_caps.highest_perf >= perf_caps.nominal_perf) &&
(perf_caps.highest_perf >= perf_caps.lowest_perf)) {
raw_capacity[cpu] = perf_caps.highest_perf;
+ capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
+
+ per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
+
pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
cpu, raw_capacity[cpu]);
continue;
@@ -376,7 +398,18 @@ void topology_init_cpu_capacity_cppc(void)
goto exit;
}
- topology_normalize_cpu_scale();
+ for_each_possible_cpu(cpu) {
+ freq_inv_set_max_ratio(cpu,
+ per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
+
+ capacity = raw_capacity[cpu];
+ capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
+ capacity_scale);
+ topology_set_cpu_scale(cpu, capacity);
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+ cpu, topology_get_cpu_scale(cpu));
+ }
+
schedule_work(&update_topology_flags_work);
pr_debug("cpu_capacity: cpu_capacity initialization done\n");
@@ -398,9 +431,6 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
int cpu;
- if (!raw_capacity)
- return 0;
-
if (val != CPUFREQ_CREATE_POLICY)
return 0;
@@ -410,13 +440,18 @@ init_cpu_capacity_callback(struct notifier_block *nb,
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus)
- per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
+ for_each_cpu(cpu, policy->related_cpus) {
+ per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
+ freq_inv_set_max_ratio(cpu,
+ per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
+ }
if (cpumask_empty(cpus_to_visit)) {
- topology_normalize_cpu_scale();
- schedule_work(&update_topology_flags_work);
- free_raw_capacity();
+ if (raw_capacity) {
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ free_raw_capacity();
+ }
pr_debug("cpu_capacity: parsing done\n");
schedule_work(&parsing_done_work);
}
@@ -436,7 +471,7 @@ static int __init register_cpufreq_notifier(void)
* On ACPI-based systems skip registering cpufreq notifier as cpufreq
* information is not needed for cpu capacity initialization.
*/
- if (!acpi_disabled || !raw_capacity)
+ if (!acpi_disabled)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 4d4c2c8d2..d3a2c40c2 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -244,7 +244,7 @@ static void auxiliary_bus_shutdown(struct device *dev)
auxdrv->shutdown(auxdev);
}
-static struct bus_type auxiliary_bus_type = {
+static const struct bus_type auxiliary_bus_type = {
.name = "auxiliary",
.probe = auxiliary_bus_probe,
.remove = auxiliary_bus_remove,
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 84a21084d..daee55c9b 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1030,7 +1030,7 @@ static void device_insertion_sort_klist(struct device *a, struct list_head *list
list_move_tail(&a->p->knode_bus.n_node, list);
}
-void bus_sort_breadthfirst(struct bus_type *bus,
+void bus_sort_breadthfirst(const struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b))
{
@@ -1194,7 +1194,7 @@ static void system_root_device_release(struct device *dev)
kfree(dev);
}
-static int subsys_register(struct bus_type *subsys,
+static int subsys_register(const struct bus_type *subsys,
const struct attribute_group **groups,
struct kobject *parent_of_root)
{
@@ -1264,7 +1264,7 @@ err_sp:
* directory itself and not some create fake root-device placed in
* /sys/devices/system/<name>.
*/
-int subsys_system_register(struct bus_type *subsys,
+int subsys_system_register(const struct bus_type *subsys,
const struct attribute_group **groups)
{
return subsys_register(subsys, groups, &system_kset->kobj);
@@ -1282,7 +1282,7 @@ EXPORT_SYMBOL_GPL(subsys_system_register);
* There's no restriction on device naming. This is for kernel software
* constructs which need sysfs interface.
*/
-int subsys_virtual_register(struct bus_type *subsys,
+int subsys_virtual_register(const struct bus_type *subsys,
const struct attribute_group **groups)
{
struct kobject *virtual_dir;
diff --git a/drivers/base/container.c b/drivers/base/container.c
index 1ba42d2d3..f40588ebc 100644
--- a/drivers/base/container.c
+++ b/drivers/base/container.c
@@ -24,7 +24,7 @@ static int container_offline(struct device *dev)
return cdev->offline ? cdev->offline(cdev) : 0;
}
-struct bus_type container_subsys = {
+const struct bus_type container_subsys = {
.name = CONTAINER_BUS_NAME,
.dev_name = CONTAINER_BUS_NAME,
.online = trivial_online,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f9fb69249..7f39c813c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,7 @@ static bool fw_devlink_is_permissive(void);
static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
+static struct workqueue_struct *device_link_wq;
/**
* __fwnode_link_add - Create a link between two fwnode_handles.
@@ -125,7 +126,7 @@ static void __fwnode_link_del(struct fwnode_link *link)
*/
static void __fwnode_link_cycle(struct fwnode_link *link)
{
- pr_debug("%pfwf: Relaxing link with %pfwf\n",
+ pr_debug("%pfwf: cycle: depends on %pfwf\n",
link->consumer, link->supplier);
link->flags |= FWLINK_FLAG_CYCLE;
}
@@ -300,7 +301,7 @@ static inline bool device_link_flag_is_sync_state_only(u32 flags)
* Check if @target depends on @dev or any device dependent on it (its child or
* its consumer etc). Return 1 if that is the case or 0 otherwise.
*/
-int device_is_dependent(struct device *dev, void *target)
+static int device_is_dependent(struct device *dev, void *target)
{
struct device_link *link;
int ret;
@@ -532,12 +533,26 @@ static void devlink_dev_release(struct device *dev)
/*
* It may take a while to complete this work because of the SRCU
* synchronization in device_link_release_fn() and if the consumer or
- * supplier devices get deleted when it runs, so put it into the "long"
- * workqueue.
+ * supplier devices get deleted when it runs, so put it into the
+ * dedicated workqueue.
*/
- queue_work(system_long_wq, &link->rm_work);
+ queue_work(device_link_wq, &link->rm_work);
}
+/**
+ * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
+ */
+void device_link_wait_removal(void)
+{
+ /*
+ * devlink removal jobs are queued in the dedicated work queue.
+ * To be sure that all removal jobs are terminated, ensure that any
+ * scheduled work has run to completion.
+ */
+ flush_workqueue(device_link_wq);
+}
+EXPORT_SYMBOL_GPL(device_link_wait_removal);
+
static struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
@@ -1643,7 +1658,7 @@ static void device_links_purge(struct device *dev)
#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
DL_FLAG_PM_RUNTIME)
-static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
+static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
static int __init fw_devlink_setup(char *arg)
{
if (!arg)
@@ -1945,6 +1960,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
/* Termination condition. */
if (sup_dev == con) {
+ pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
@@ -1976,8 +1992,11 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
+ if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
+ par_dev->fwnode);
ret = true;
+ }
if (!sup_dev)
goto out;
@@ -1993,6 +2012,8 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (__fw_devlink_relax_cycles(con,
dev_link->supplier->fwnode)) {
+ pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
+ dev_link->supplier->fwnode);
fw_devlink_relax_link(dev_link);
dev_link->flags |= DL_FLAG_CYCLE;
ret = true;
@@ -2072,6 +2093,7 @@ static int fw_devlink_create_devlink(struct device *con,
if (__fw_devlink_relax_cycles(con, sup_handle)) {
__fwnode_link_cycle(link);
flags = fw_devlink_get_flags(link->flags);
+ pr_debug("----- cycle: end -----\n");
dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
sup_handle);
}
@@ -4091,9 +4113,14 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
+ device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ if (!device_link_wq)
+ goto wq_err;
return 0;
+ wq_err:
+ kobject_put(sysfs_dev_char_kobj);
char_kobj_err:
kobject_put(sysfs_dev_block_kobj);
block_kobj_err:
@@ -4951,13 +4978,14 @@ define_dev_printk_level(_dev_info, KERN_INFO);
*
* return dev_err_probe(dev, err, ...);
*
- * Note that it is deemed acceptable to use this function for error
- * prints during probe even if the @err is known to never be -EPROBE_DEFER.
+ * Using this helper in your probe function is totally fine even if @err is
+ * known to never be -EPROBE_DEFER.
* The benefit compared to a normal dev_err() is the standardized format
- * of the error code and the fact that the error code is returned.
+ * of the error code, it being emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35") and the fact that the error code is returned which allows
+ * more compact error paths.
*
* Returns @err.
- *
*/
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
{
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index ef427ee78..0b33e81f9 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -525,19 +525,42 @@ bool cpu_is_hotpluggable(unsigned int cpu)
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
#ifdef CONFIG_GENERIC_CPU_DEVICES
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-#endif
+DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+bool __weak arch_cpu_is_hotpluggable(int cpu)
+{
+ return false;
+}
+
+int __weak arch_register_cpu(int cpu)
+{
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
+
+ return register_cpu(c, cpu);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __weak arch_unregister_cpu(int num)
+{
+ unregister_cpu(&per_cpu(cpu_devices, num));
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_GENERIC_CPU_DEVICES */
static void __init cpu_dev_register_generic(void)
{
-#ifdef CONFIG_GENERIC_CPU_DEVICES
- int i;
+ int i, ret;
- for_each_possible_cpu(i) {
- if (register_cpu(&per_cpu(cpu_devices, i), i))
- panic("Failed to register CPU device");
+ if (!IS_ENABLED(CONFIG_GENERIC_CPU_DEVICES))
+ return;
+
+ for_each_present_cpu(i) {
+ ret = arch_register_cpu(i);
+ if (ret)
+ pr_warn("register_cpu %d failed (%d)\n", i, ret);
}
-#endif
}
#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 0c3725c3e..85152537d 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -313,7 +313,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
mutex_lock(&deferred_probe_mutex);
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
- dev_info(p->device, "deferred probe pending\n");
+ dev_info(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
mutex_unlock(&deferred_probe_mutex);
fw_devlink_probing_done();
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index a0af8f5f1..829270067 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -27,6 +27,7 @@ static const char * const fw_upload_err_str[] = {
[FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
[FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
[FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+ [FW_UPLOAD_ERR_FW_INVALID] = "firmware-invalid",
};
static const char *fw_upload_progress(struct device *dev,
diff --git a/drivers/base/init.c b/drivers/base/init.c
index 397eb9880..c49548351 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -35,8 +35,8 @@ void __init driver_init(void)
of_core_init();
platform_bus_init();
auxiliary_bus_init();
- cpu_dev_init();
memory_dev_init();
node_dev_init();
+ cpu_dev_init();
container_dev_init();
}
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 675ad3139..e23d0b49a 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -82,7 +82,7 @@ static int isa_bus_resume(struct device *dev)
return 0;
}
-static struct bus_type isa_bus_type = {
+static const struct bus_type isa_bus_type = {
.name = "isa",
.match = isa_bus_match,
.probe = isa_bus_probe,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8a13babd8..14f964a77 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -68,7 +68,7 @@ static inline unsigned long phys_to_block_id(unsigned long phys)
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
-static struct bus_type memory_subsys = {
+static const struct bus_type memory_subsys = {
.name = MEMORY_CLASS_NAME,
.dev_name = MEMORY_CLASS_NAME,
.online = memory_subsys_online,
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 4d588f465..a73b0c9a4 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -21,7 +21,7 @@
#include <linux/swap.h>
#include <linux/slab.h>
-static struct bus_type node_subsys = {
+static const struct bus_type node_subsys = {
.name = "node",
.dev_name = "node",
};
@@ -74,14 +74,14 @@ static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
* @dev: Device for this memory access class
* @list_node: List element in the node's access list
* @access: The access class rank
- * @hmem_attrs: Heterogeneous memory performance attributes
+ * @coord: Heterogeneous memory performance coordinates
*/
struct node_access_nodes {
struct device dev;
struct list_head list_node;
unsigned int access;
#ifdef CONFIG_HMEM_REPORTING
- struct node_hmem_attrs hmem_attrs;
+ struct access_coordinate coord;
#endif
};
#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
@@ -126,7 +126,7 @@ static void node_access_release(struct device *dev)
}
static struct node_access_nodes *node_init_node_access(struct node *node,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node_access_nodes *access_node;
struct device *dev;
@@ -167,7 +167,7 @@ static ssize_t property##_show(struct device *dev, \
char *buf) \
{ \
return sysfs_emit(buf, "%u\n", \
- to_access_nodes(dev)->hmem_attrs.property); \
+ to_access_nodes(dev)->coord.property); \
} \
static DEVICE_ATTR_RO(property)
@@ -187,11 +187,11 @@ static struct attribute *access_attrs[] = {
/**
* node_set_perf_attrs - Set the performance values for given access class
* @nid: Node identifier to be set
- * @hmem_attrs: Heterogeneous memory performance attributes
+ * @coord: Heterogeneous memory performance coordinates
* @access: The access class the for the given attributes
*/
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
- unsigned int access)
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access)
{
struct node_access_nodes *c;
struct node *node;
@@ -205,7 +205,7 @@ void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
if (!c)
return;
- c->hmem_attrs = *hmem_attrs;
+ c->coord = *coord;
for (i = 0; access_attrs[i] != NULL; i++) {
if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
"initiators")) {
@@ -689,7 +689,7 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
*/
int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
- unsigned int access)
+ enum access_coordinate_class access)
{
struct node *init_node, *targ_node;
struct node_access_nodes *initiator, *target;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 8fdd0073e..01f11629d 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -2,7 +2,6 @@
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 4110c19c0..e18ba676c 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -793,7 +793,7 @@ static int pm_clk_notify(struct notifier_block *nb,
* the remaining members of @clknb should be populated prior to calling this
* routine.
*/
-void pm_clk_add_notifier(struct bus_type *bus,
+void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
if (!bus || !clknb)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9c5a5f4db..fadcd0379 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -579,7 +579,7 @@ bool dev_pm_skip_resume(struct device *dev)
}
/**
- * __device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
@@ -587,7 +587,7 @@ bool dev_pm_skip_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -674,16 +674,22 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
{
reinit_completion(&dev->power.completion);
- if (!is_async(dev))
- return false;
-
- get_device(dev);
+ if (is_async(dev)) {
+ dev->power.async_in_progress = true;
- if (async_schedule_dev_nocall(func, dev))
- return true;
+ get_device(dev);
- put_device(dev);
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+ put_device(dev);
+ }
+ /*
+ * Because async_schedule_dev_nocall() above has returned false or it
+ * has not been called at all, func() is not running and it is safe to
+ * update the async_in_progress flag without extra synchronization.
+ */
+ dev->power.async_in_progress = false;
return false;
}
@@ -691,18 +697,10 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- __device_resume_noirq(dev, pm_transition, true);
+ device_resume_noirq(dev, pm_transition, true);
put_device(dev);
}
-static void device_resume_noirq(struct device *dev)
-{
- if (dpm_async_fn(dev, async_resume_noirq))
- return;
-
- __device_resume_noirq(dev, pm_transition, false);
-}
-
static void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
@@ -712,18 +710,28 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
+ /*
+ * Trigger the resume of "async" devices upfront so they don't have to
+ * wait for the "non-async" ones they don't depend on.
+ */
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ dpm_async_fn(dev, async_resume_noirq);
+
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
- get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- mutex_unlock(&dpm_list_mtx);
+ if (!dev->power.async_in_progress) {
+ get_device(dev);
- device_resume_noirq(dev);
+ mutex_unlock(&dpm_list_mtx);
- put_device(dev);
+ device_resume_noirq(dev, state, false);
- mutex_lock(&dpm_list_mtx);
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
@@ -747,14 +755,14 @@ void dpm_resume_noirq(pm_message_t state)
}
/**
- * __device_resume_early - Execute an "early resume" callback for given device.
+ * device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
+static void device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -820,18 +828,10 @@ static void async_resume_early(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- __device_resume_early(dev, pm_transition, true);
+ device_resume_early(dev, pm_transition, true);
put_device(dev);
}
-static void device_resume_early(struct device *dev)
-{
- if (dpm_async_fn(dev, async_resume_early))
- return;
-
- __device_resume_early(dev, pm_transition, false);
-}
-
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -845,18 +845,28 @@ void dpm_resume_early(pm_message_t state)
mutex_lock(&dpm_list_mtx);
pm_transition = state;
+ /*
+ * Trigger the resume of "async" devices upfront so they don't have to
+ * wait for the "non-async" ones they don't depend on.
+ */
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ dpm_async_fn(dev, async_resume_early);
+
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
- get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- mutex_unlock(&dpm_list_mtx);
+ if (!dev->power.async_in_progress) {
+ get_device(dev);
- device_resume_early(dev);
+ mutex_unlock(&dpm_list_mtx);
- put_device(dev);
+ device_resume_early(dev, state, false);
- mutex_lock(&dpm_list_mtx);
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
@@ -876,12 +886,12 @@ void dpm_resume_start(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
- * __device_resume - Execute "resume" callbacks for given device.
+ * device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
*/
-static void __device_resume(struct device *dev, pm_message_t state, bool async)
+static void device_resume(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -975,18 +985,10 @@ static void async_resume(void *data, async_cookie_t cookie)
{
struct device *dev = data;
- __device_resume(dev, pm_transition, true);
+ device_resume(dev, pm_transition, true);
put_device(dev);
}
-static void device_resume(struct device *dev)
-{
- if (dpm_async_fn(dev, async_resume))
- return;
-
- __device_resume(dev, pm_transition, false);
-}
-
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -1006,16 +1008,25 @@ void dpm_resume(pm_message_t state)
pm_transition = state;
async_error = 0;
+ /*
+ * Trigger the resume of "async" devices upfront so they don't have to
+ * wait for the "non-async" ones they don't depend on.
+ */
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ dpm_async_fn(dev, async_resume);
+
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
get_device(dev);
- mutex_unlock(&dpm_list_mtx);
+ if (!dev->power.async_in_progress) {
+ mutex_unlock(&dpm_list_mtx);
- device_resume(dev);
+ device_resume(dev, state, false);
- mutex_lock(&dpm_list_mtx);
+ mutex_lock(&dpm_list_mtx);
+ }
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 8e93167f1..bd77f6734 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -201,7 +201,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
if (!qos)
return -ENOMEM;
- n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
+ n = kcalloc(3, sizeof(*n), GFP_KERNEL);
if (!n) {
kfree(qos);
return -ENOMEM;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 4545669cb..05793c9fb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
+#include <linux/rculist.h>
#include <trace/events/rpm.h>
#include "../base.h"
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 8c40abed7..a1b01ab42 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -473,7 +473,7 @@ int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char **values;
int nval, ret;
- nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+ nval = fwnode_property_string_array_count(fwnode, propname);
if (nval < 0)
return nval;
@@ -499,6 +499,41 @@ out_free:
EXPORT_SYMBOL_GPL(fwnode_property_match_string);
/**
+ * fwnode_property_match_property_string - find a property string value in an array and return index
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property holding the string value
+ * @array: String array to search in
+ * @n: Size of the @array
+ *
+ * Find a property string value in a given @array and if it is found return
+ * the index back.
+ *
+ * Return: index, starting from %0, if the string value was found in the @array (success),
+ * %-ENOENT when the string value was not found in the @array,
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO or %-EILSEQ if the property is not a string,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_match_property_string(const struct fwnode_handle *fwnode,
+ const char *propname, const char * const *array, size_t n)
+{
+ const char *string;
+ int ret;
+
+ ret = fwnode_property_read_string(fwnode, propname, &string);
+ if (ret)
+ return ret;
+
+ ret = match_string(array, n, string);
+ if (ret < 0)
+ ret = -ENOENT;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_match_property_string);
+
+/**
* fwnode_property_get_reference_args() - Find a reference with arguments
* @fwnode: Firmware node where to look for the reference
* @prop: The name of the property
@@ -508,6 +543,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
* @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
* @index: Index of the reference, from zero onwards.
* @args: Result structure with reference and integer arguments.
+ * May be NULL.
*
* Obtain a reference based on a named property in an fwnode, with
* integer arguments.
@@ -595,6 +631,34 @@ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
}
/**
+ * fwnode_name_eq - Return true if node name is equal
+ * @fwnode: The firmware node
+ * @name: The name to which to compare the node name
+ *
+ * Compare the name provided as an argument to the name of the node, stopping
+ * the comparison at either NUL or '@' character, whichever comes first. This
+ * function is generally used for comparing node names while ignoring the
+ * possible unit address of the node.
+ *
+ * Return: true if the node name matches with the name provided in the @name
+ * argument, false otherwise.
+ */
+bool fwnode_name_eq(const struct fwnode_handle *fwnode, const char *name)
+{
+ const char *node_name;
+ ptrdiff_t len;
+
+ node_name = fwnode_get_name(fwnode);
+ if (!node_name)
+ return false;
+
+ len = strchrnul(node_name, '@') - node_name;
+
+ return str_has_prefix(node_name, name) == len;
+}
+EXPORT_SYMBOL_GPL(fwnode_name_eq);
+
+/**
* fwnode_get_parent - Return parent firwmare node
* @fwnode: Firmware whose parent is retrieved
*
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 9a9ea514c..583dd5d7d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -318,6 +318,7 @@ struct regmap_ram_data {
bool *read;
bool *written;
enum regmap_endian reg_endian;
+ bool (*noinc_reg)(struct regmap_ram_data *data, unsigned int reg);
};
/*
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 41edd6a43..55999a50c 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -112,7 +112,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
unsigned long *entry, *lower, *upper;
unsigned long lower_index, lower_last;
unsigned long upper_index, upper_last;
- int ret;
+ int ret = 0;
lower = NULL;
upper = NULL;
@@ -145,7 +145,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
upper_index = max + 1;
upper_last = mas.last;
- upper = kmemdup(&entry[max + 1],
+ upper = kmemdup(&entry[max - mas.index + 1],
((mas.last - max) *
sizeof(unsigned long)),
map->alloc_flags);
@@ -244,7 +244,7 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
unsigned long lmin = min;
unsigned long lmax = max;
unsigned int r, v, sync_start;
- int ret;
+ int ret = 0;
bool sync_needed = false;
map->cache_bypass = true;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index bdd80b73c..fb84cda92 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -226,8 +226,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
if (*ppos < 0 || !count)
return -EINVAL;
- if (count > (PAGE_SIZE << MAX_ORDER))
- count = PAGE_SIZE << MAX_ORDER;
+ if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
+ count = PAGE_SIZE << MAX_PAGE_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
@@ -373,8 +373,8 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file,
if (*ppos < 0 || !count)
return -EINVAL;
- if (count > (PAGE_SIZE << MAX_ORDER))
- count = PAGE_SIZE << MAX_ORDER;
+ if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
+ count = PAGE_SIZE << MAX_PAGE_ORDER;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 4c9e38ead..0d957c5f1 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -1203,6 +1203,66 @@ static void raw_write(struct kunit *test)
regmap_exit(map);
}
+static bool reg_zero(struct device *dev, unsigned int reg)
+{
+ return reg == 0;
+}
+
+static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
+{
+ return reg == 0;
+}
+
+static void raw_noinc_write(struct kunit *test)
+{
+ struct raw_test_types *t = (struct raw_test_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ u16 val_test, val_last;
+ u16 val_array[BLOCK_TEST_SIZE];
+
+ config = raw_regmap_config;
+ config.volatile_reg = reg_zero;
+ config.writeable_noinc_reg = reg_zero;
+ config.readable_noinc_reg = reg_zero;
+
+ map = gen_raw_regmap(&config, t, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ data->noinc_reg = ram_reg_zero;
+
+ get_random_bytes(&val_array, sizeof(val_array));
+
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
+ val_test = be16_to_cpu(val_array[1]) + 100;
+ val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
+ } else {
+ val_test = le16_to_cpu(val_array[1]) + 100;
+ val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
+ }
+
+ /* Put some data into the register following the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
+
+ /* Write some data to the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
+ sizeof(val_array)));
+
+ /* We should read back the last value written */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
+ KUNIT_ASSERT_EQ(test, val_last, val);
+
+ /* Make sure we didn't touch the register after the noinc register */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
+ KUNIT_ASSERT_EQ(test, val_test, val);
+
+ regmap_exit(map);
+}
+
static void raw_sync(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
@@ -1306,6 +1366,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
+ KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
{}
};
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 85f34a5de..192d6b131 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -65,12 +65,12 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config,
return ERR_PTR(-EINVAL);
}
- data->read = kcalloc(sizeof(bool), config->max_register + 1,
+ data->read = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
- data->written = kcalloc(sizeof(bool), config->max_register + 1,
+ data->written = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index c9b800885..93ae07b50 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -41,10 +41,15 @@ static int regmap_raw_ram_gather_write(void *context,
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
- memcpy(&our_buf[r], val, val_len);
-
- for (i = 0; i < val_len / 2; i++)
- data->written[r + i] = true;
+ if (data->noinc_reg && data->noinc_reg(data, r)) {
+ memcpy(&our_buf[r], val + val_len - 2, 2);
+ data->written[r] = true;
+ } else {
+ memcpy(&our_buf[r], val, val_len);
+
+ for (i = 0; i < val_len / 2; i++)
+ data->written[r + i] = true;
+ }
return 0;
}
@@ -70,10 +75,16 @@ static int regmap_raw_ram_read(void *context,
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
- memcpy(val, &our_buf[r], val_len);
-
- for (i = 0; i < val_len / 2; i++)
- data->read[r + i] = true;
+ if (data->noinc_reg && data->noinc_reg(data, r)) {
+ for (i = 0; i < val_len; i += 2)
+ memcpy(val + i, &our_buf[r], 2);
+ data->read[r] = true;
+ } else {
+ memcpy(val, &our_buf[r], val_len);
+
+ for (i = 0; i < val_len / 2; i++)
+ data->read[r + i] = true;
+ }
return 0;
}
@@ -111,12 +122,12 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
return ERR_PTR(-EINVAL);
}
- data->read = kcalloc(sizeof(bool), config->max_register + 1,
+ data->read = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
- data->written = kcalloc(sizeof(bool), config->max_register + 1,
+ data->written = kcalloc(config->max_register + 1, sizeof(bool),
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ea6157747..6db77d8e4 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -2136,7 +2136,7 @@ static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
}
/**
- * regmap_noinc_write(): Write data from a register without incrementing the
+ * regmap_noinc_write(): Write data to a register without incrementing the
* register number
*
* @map: Register map to write to
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 8dec5228f..282c38aec 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -28,7 +28,7 @@ struct soc_device {
int soc_dev_num;
};
-static struct bus_type soc_bus_type = {
+static const struct bus_type soc_bus_type = {
.name = "soc",
};
static bool soc_bus_registered;
@@ -106,7 +106,7 @@ static void soc_release(struct device *dev)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
- ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+ ida_free(&soc_ida, soc_dev->soc_dev_num);
kfree(soc_dev->dev.groups);
kfree(soc_dev);
}
@@ -155,7 +155,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
soc_attr_groups[1] = soc_dev_attr->custom_attr_group;
/* Fetch a unique (reclaimable) SOC ID. */
- ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&soc_ida, GFP_KERNEL);
if (ret < 0)
goto out3;
soc_dev->soc_dev_num = ret;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 079bd14bd..36512fb75 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -750,10 +750,10 @@ static void software_node_release(struct kobject *kobj)
struct swnode *swnode = kobj_to_swnode(kobj);
if (swnode->parent) {
- ida_simple_remove(&swnode->parent->child_ids, swnode->id);
+ ida_free(&swnode->parent->child_ids, swnode->id);
list_del(&swnode->entry);
} else {
- ida_simple_remove(&swnode_root_ids, swnode->id);
+ ida_free(&swnode_root_ids, swnode->id);
}
if (swnode->allocated)
@@ -779,8 +779,8 @@ swnode_register(const struct software_node *node, struct swnode *parent,
if (!swnode)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(parent ? &parent->child_ids : &swnode_root_ids,
- 0, 0, GFP_KERNEL);
+ ret = ida_alloc(parent ? &parent->child_ids : &swnode_root_ids,
+ GFP_KERNEL);
if (ret < 0) {
kfree(swnode);
return ERR_PTR(ret);
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index aa0581cda..ed3be52ab 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -280,7 +280,7 @@ static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
/* check for Header type 0 */
bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
sizeof(u8));
- if ((byte_val & 0x7F) != PCI_HEADER_TYPE_NORMAL)
+ if ((byte_val & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_NORMAL)
return cap_ptr;
/* check if the capability pointer field exists */
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 37eff1c97..b1b47d88f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -384,7 +384,8 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_TKILL);
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
- blk_queue_max_hw_sectors(gd->queue, BLK_DEF_MAX_SECTORS);
+ /* random number picked from the history block max_sectors cap */
+ blk_queue_max_hw_sectors(gd->queue, 2560u);
blk_queue_io_opt(gd->queue, SZ_2M);
d->bufpool = mp;
d->blkq = gd->queue;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 64b3a1c76..742b2908f 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -838,8 +838,8 @@ static bool plausible_request_size(int size)
}
/* clear the bit corresponding to the piece of storage in question:
- * size byte of data starting from sector. Only clear a bits of the affected
- * one ore more _aligned_ BM_BLOCK_SIZE blocks.
+ * size byte of data starting from sector. Only clear bits of the affected
+ * one or more _aligned_ BM_BLOCK_SIZE blocks.
*
* called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
@@ -957,7 +957,9 @@ static int _is_in_al(struct drbd_device *device, unsigned int enr)
* @device: DRBD device.
* @sector: The sector number.
*
- * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
+ * This functions sleeps on al_wait.
+ *
+ * Returns: %0 on success, -EINTR if interrupted.
*/
int drbd_rs_begin_io(struct drbd_device *device, sector_t sector)
{
@@ -1004,11 +1006,13 @@ retry:
/**
* drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
- * @device: DRBD device.
+ * @peer_device: DRBD device.
* @sector: The sector number.
*
* Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
- * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
+ * tries to set it to BME_LOCKED.
+ *
+ * Returns: %0 upon success, and -EAGAIN
* if there is still application IO going on in this area.
*/
int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector)
@@ -1190,7 +1194,7 @@ void drbd_rs_cancel_all(struct drbd_device *device)
* drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
* @device: DRBD device.
*
- * Returns 0 upon success, -EAGAIN if at least one reference count was
+ * Returns: %0 upon success, -EAGAIN if at least one reference count was
* not zero.
*/
int drbd_rs_del_all(struct drbd_device *device)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 11114a5d9..d0e41d52d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3079,7 +3079,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
}
}
-#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT)
+#define MAX_LEN (1UL << MAX_PAGE_ORDER << PAGE_SHIFT)
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 552f56a84..f8145499d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -243,9 +243,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
- file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
- file_end_write(file);
if (likely(bw == bvec->bv_len))
return 0;
@@ -1301,8 +1299,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_set_size(lo, new_size);
}
- loop_config_discard(lo);
-
/* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio);
@@ -2036,7 +2032,8 @@ static int loop_add(int i)
}
lo->lo_queue = lo->lo_disk->queue;
- blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
+ /* random number picked from the history block max_sectors cap */
+ blk_queue_max_hw_sectors(lo->lo_queue, 2560u);
/*
* By default, we do buffer IO, so it doesn't make sense to enable
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index df738eab0..b7c332528 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -334,10 +334,8 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
if (!nbd->pid)
return 0;
- if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
- nbd->disk->queue->limits.discard_granularity = blksize;
+ if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
- }
blk_queue_logical_block_size(nbd->disk->queue, blksize);
blk_queue_physical_block_size(nbd->disk->queue, blksize);
@@ -1353,7 +1351,6 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->config = NULL;
nbd->tag_set.timeout = 0;
- nbd->disk->queue->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(nbd->disk->queue, 0);
mutex_unlock(&nbd->config_lock);
@@ -1846,7 +1843,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- disk->queue->limits.discard_granularity = 0;
blk_queue_max_discard_sectors(disk->queue, 0);
blk_queue_max_segment_size(disk->queue, UINT_MAX);
blk_queue_max_segments(disk->queue, USHRT_MAX);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 13ed446b5..36755f263 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1840,7 +1840,7 @@ static void null_del_dev(struct nullb *nullb)
dev = nullb->dev;
- ida_simple_remove(&nullb_indexes, nullb->index);
+ ida_free(&nullb_indexes, nullb->index);
list_del_init(&nullb->list);
@@ -1880,7 +1880,6 @@ static void null_config_discard(struct nullb *nullb)
return;
}
- nullb->q->limits.discard_granularity = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
}
@@ -2175,7 +2174,7 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
mutex_lock(&lock);
- rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
if (rv < 0) {
mutex_unlock(&lock);
goto out_cleanup_zone;
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 55c5b48bc..6f5e09948 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -159,7 +159,7 @@ int null_register_zoned_dev(struct nullb *nullb)
struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- disk_set_zoned(nullb->disk, BLK_ZONED_HM);
+ disk_set_zoned(nullb->disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
blk_queue_chunk_sectors(q, dev->zone_size_sects);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 1e2596c5e..12b5d53ec 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5332,7 +5332,7 @@ static void rbd_dev_release(struct device *dev)
if (need_put) {
destroy_workqueue(rbd_dev->task_wq);
- ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+ ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
}
rbd_dev_free(rbd_dev);
@@ -5408,9 +5408,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return NULL;
/* get an id and fill in device name */
- rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
- minor_to_rbd_dev_id(1 << MINORBITS),
- GFP_KERNEL);
+ rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida,
+ minor_to_rbd_dev_id(1 << MINORBITS) - 1,
+ GFP_KERNEL);
if (rbd_dev->dev_id < 0)
goto fail_rbd_dev;
@@ -5431,7 +5431,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return rbd_dev;
fail_dev_id:
- ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+ ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
fail_rbd_dev:
rbd_dev_free(rbd_dev);
return NULL;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index b0550b686..4044c369d 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1006,10 +1006,10 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
msg.prio = cpu_to_le16(req_get_ioprio(rq));
/*
- * We only support discards with single segment for now.
+ * We only support discards/WRITE_ZEROES with single segment for now.
* See queue limits.
*/
- if (req_op(rq) != REQ_OP_DISCARD)
+ if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
if (sg_cnt == 0)
@@ -1362,6 +1362,8 @@ static void setup_request_queue(struct rnbd_clt_dev *dev,
blk_queue_write_cache(dev->queue,
!!(rsp->cache_policy & RNBD_WRITEBACK),
!!(rsp->cache_policy & RNBD_FUA));
+ blk_queue_max_write_zeroes_sectors(dev->queue,
+ le32_to_cpu(rsp->max_write_zeroes_sectors));
}
static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
@@ -1567,8 +1569,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
dev = init_dev(sess, access_mode, pathname, nr_poll_queues);
if (IS_ERR(dev)) {
- pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n",
- pathname, sess->sessname, PTR_ERR(dev));
+ pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %pe\n",
+ pathname, sess->sessname, dev);
ret = PTR_ERR(dev);
goto put_sess;
}
@@ -1626,10 +1628,11 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
+ "map_device: Device mapped as %s (nsectors: %llu, logical_block_size: %d, physical_block_size: %d, max_write_zeroes_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, le64_to_cpu(rsp->nsectors),
le16_to_cpu(rsp->logical_block_size),
le16_to_cpu(rsp->physical_block_size),
+ le32_to_cpu(rsp->max_write_zeroes_sectors),
le32_to_cpu(rsp->max_discard_sectors),
le32_to_cpu(rsp->discard_granularity),
le32_to_cpu(rsp->discard_alignment),
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index e32f8f2c8..f35be51d2 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -128,7 +128,7 @@ enum rnbd_cache_policy {
* @device_id: device_id on server side to identify the device
* @nsectors: number of sectors in the usual 512b unit
* @max_hw_sectors: max hardware sectors in the usual 512b unit
- * @max_write_same_sectors: max sectors for WRITE SAME in the 512b unit
+ * @max_write_zeroes_sectors: max sectors for WRITE ZEROES in the 512b unit
* @max_discard_sectors: max. sectors that can be discarded at once in 512b
* unit.
* @discard_granularity: size of the internal discard allocation unit in bytes
@@ -145,7 +145,7 @@ struct rnbd_msg_open_rsp {
__le32 device_id;
__le64 nsectors;
__le32 max_hw_sectors;
- __le32 max_write_same_sectors;
+ __le32 max_write_zeroes_sectors;
__le32 max_discard_sectors;
__le32 discard_granularity;
__le32 discard_alignment;
@@ -186,7 +186,7 @@ struct rnbd_msg_io {
* @RNBD_OP_FLUSH: flush the volatile write cache
* @RNBD_OP_DISCARD: discard sectors
* @RNBD_OP_SECURE_ERASE: securely erase sectors
- * @RNBD_OP_WRITE_SAME: write the same sectors many times
+ * @RNBD_OP_WRITE_ZEROES: write zeroes sectors
* @RNBD_F_SYNC: request is sync (sync write or read)
* @RNBD_F_FUA: forced unit access
@@ -199,7 +199,7 @@ enum rnbd_io_flags {
RNBD_OP_FLUSH = 2,
RNBD_OP_DISCARD = 3,
RNBD_OP_SECURE_ERASE = 4,
- RNBD_OP_WRITE_SAME = 5,
+ RNBD_OP_WRITE_ZEROES = 5,
/* Flags */
RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0),
@@ -236,6 +236,9 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
case RNBD_OP_SECURE_ERASE:
bio_opf = REQ_OP_SECURE_ERASE;
break;
+ case RNBD_OP_WRITE_ZEROES:
+ bio_opf = REQ_OP_WRITE_ZEROES;
+ break;
default:
WARN(1, "Unknown RNBD type: %d (flags %d)\n",
rnbd_op(rnbd_opf), rnbd_opf);
@@ -268,6 +271,9 @@ static inline u32 rq_to_rnbd_flags(struct request *rq)
case REQ_OP_SECURE_ERASE:
rnbd_opf = RNBD_OP_SECURE_ERASE;
break;
+ case REQ_OP_WRITE_ZEROES:
+ rnbd_opf = RNBD_OP_WRITE_ZEROES;
+ break;
case REQ_OP_FLUSH:
rnbd_opf = RNBD_OP_FLUSH;
break;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index ab78eab97..3a0d5dcec 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -136,8 +136,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
sess_dev = rnbd_get_sess_dev(dev_id, srv_sess);
if (IS_ERR(sess_dev)) {
- pr_err_ratelimited("Got I/O request on session %s for unknown device id %d\n",
- srv_sess->sessname, dev_id);
+ pr_err_ratelimited("Got I/O request on session %s for unknown device id %d: %pe\n",
+ srv_sess->sessname, dev_id, sess_dev);
err = -ENOTCONN;
goto err;
}
@@ -544,7 +544,8 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
rsp->max_segments = cpu_to_le16(bdev_max_segments(bdev));
rsp->max_hw_sectors =
cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev)));
- rsp->max_write_same_sectors = 0;
+ rsp->max_write_zeroes_sectors =
+ cpu_to_le32(bdev_write_zeroes_sectors(bdev));
rsp->max_discard_sectors = cpu_to_le32(bdev_max_discard_sectors(bdev));
rsp->discard_granularity = cpu_to_le32(bdev_discard_granularity(bdev));
rsp->discard_alignment = cpu_to_le32(bdev_discard_alignment(bdev));
@@ -710,24 +711,24 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
full_path = rnbd_srv_get_full_path(srv_sess, open_msg->dev_name);
if (IS_ERR(full_path)) {
ret = PTR_ERR(full_path);
- pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %d\n",
- open_msg->dev_name, srv_sess->sessname, ret);
+ pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %pe\n",
+ open_msg->dev_name, srv_sess->sessname, full_path);
goto reject;
}
bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL);
if (IS_ERR(bdev_handle)) {
ret = PTR_ERR(bdev_handle);
- pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n",
- full_path, srv_sess->sessname, ret);
+ pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %pe\n",
+ full_path, srv_sess->sessname, bdev_handle);
goto free_path;
}
srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
- pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
- full_path, srv_sess->sessname, PTR_ERR(srv_dev));
+ pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %pe\n",
+ full_path, srv_sess->sessname, srv_dev);
ret = PTR_ERR(srv_dev);
goto blkdev_put;
}
@@ -737,8 +738,8 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
open_msg->access_mode == RNBD_ACCESS_RO,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
- pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
- full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev));
+ pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %pe\n",
+ full_path, srv_sess->sessname, srv_sess_dev);
ret = PTR_ERR(srv_sess_dev);
goto srv_dev_put;
}
@@ -819,7 +820,7 @@ static int __init rnbd_srv_init_module(void)
};
rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr);
if (IS_ERR(rtrs_ctx)) {
- pr_err("rtrs_srv_open(), err: %d\n", err);
+ pr_err("rtrs_srv_open(), err: %pe\n", rtrs_ctx);
return PTR_ERR(rtrs_ctx);
}
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 83600b45e..1dfb2e778 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -36,7 +36,7 @@
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <linux/cdev.h>
-#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include <linux/blk-mq.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -250,7 +250,7 @@ static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
const struct ublk_param_zoned *p = &ub->params.zoned;
- disk_set_zoned(ub->ub_disk, BLK_ZONED_HM);
+ disk_set_zoned(ub->ub_disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
blk_queue_required_elevator_features(ub->ub_disk->queue,
ELEVATOR_F_ZBD_SEQ_WRITE);
@@ -893,12 +893,9 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
*/
if (ublk_need_map_req(req)) {
struct iov_iter iter;
- struct iovec iov;
const int dir = ITER_DEST;
- import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
- &iov, &iter);
-
+ import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
@@ -915,13 +912,11 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
if (ublk_need_unmap_req(req)) {
struct iov_iter iter;
- struct iovec iov;
const int dir = ITER_SOURCE;
WARN_ON_ONCE(io->res > rq_bytes);
- import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
- &iov, &iter);
+ import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2c846eed5..2bf14a0e2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -367,8 +367,6 @@ static void virtblk_done(struct virtqueue *vq)
blk_mq_complete_request(req);
req_done = true;
}
- if (unlikely(virtqueue_is_broken(vq)))
- break;
} while (!virtqueue_enable_cb(vq));
/* In case queue is stopped waiting for more buffers. */
@@ -722,52 +720,15 @@ fail_report:
return ret;
}
-static void virtblk_revalidate_zones(struct virtio_blk *vblk)
-{
- u8 model;
-
- virtio_cread(vblk->vdev, struct virtio_blk_config,
- zoned.model, &model);
- switch (model) {
- default:
- dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
- fallthrough;
- case VIRTIO_BLK_Z_NONE:
- case VIRTIO_BLK_Z_HA:
- disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
- return;
- case VIRTIO_BLK_Z_HM:
- WARN_ON_ONCE(!vblk->zone_sectors);
- if (!blk_revalidate_disk_zones(vblk->disk, NULL))
- set_capacity_and_notify(vblk->disk, 0);
- }
-}
-
static int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk,
struct request_queue *q)
{
u32 v, wg;
- u8 model;
-
- virtio_cread(vdev, struct virtio_blk_config,
- zoned.model, &model);
-
- switch (model) {
- case VIRTIO_BLK_Z_NONE:
- case VIRTIO_BLK_Z_HA:
- /* Present the host-aware device as non-zoned */
- return 0;
- case VIRTIO_BLK_Z_HM:
- break;
- default:
- dev_err(&vdev->dev, "unsupported zone model %d\n", model);
- return -EINVAL;
- }
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
- disk_set_zoned(vblk->disk, BLK_ZONED_HM);
+ disk_set_zoned(vblk->disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
virtio_cread(vdev, struct virtio_blk_config,
@@ -839,23 +800,12 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
*/
#define virtblk_report_zones NULL
-static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
-{
-}
-
static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
struct virtio_blk *vblk, struct request_queue *q)
{
- u8 model;
-
- virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
- if (model == VIRTIO_BLK_Z_HM) {
- dev_err(&vdev->dev,
- "virtio_blk: zoned devices are not supported");
- return -EOPNOTSUPP;
- }
-
- return 0;
+ dev_err(&vdev->dev,
+ "virtio_blk: zoned devices are not supported");
+ return -EOPNOTSUPP;
}
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -1005,7 +955,6 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct virtio_blk *vblk =
container_of(work, struct virtio_blk, config_work);
- virtblk_revalidate_zones(vblk);
virtblk_update_capacity(vblk, true);
}
@@ -1570,9 +1519,26 @@ static int virtblk_probe(struct virtio_device *vdev)
* placed after the virtio_device_ready() call above.
*/
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
- err = virtblk_probe_zoned_device(vdev, vblk, q);
- if (err)
+ u8 model;
+
+ virtio_cread(vdev, struct virtio_blk_config, zoned.model,
+ &model);
+ switch (model) {
+ case VIRTIO_BLK_Z_NONE:
+ case VIRTIO_BLK_Z_HA:
+ /* Present the host-aware device as non-zoned */
+ break;
+ case VIRTIO_BLK_Z_HM:
+ err = virtblk_probe_zoned_device(vdev, vblk, q);
+ if (err)
+ goto out_cleanup_disk;
+ break;
+ default:
+ dev_err(&vdev->dev, "unsupported zone model %d\n",
+ model);
+ err = -EINVAL;
goto out_cleanup_disk;
+ }
}
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 5ff50e76c..1432c8318 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -132,7 +132,7 @@ struct blkif_x86_32_request {
struct blkif_x86_64_request_rw {
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
- uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
+ uint32_t _pad1; /* offsetof(blkif_request..,u.rw.id)==8 */
uint64_t id;
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 0386b7da0..7b29cce60 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -59,8 +59,8 @@ config ZRAM_WRITEBACK
bool "Write back incompressible or idle page to backing device"
depends on ZRAM
help
- With incompressible page, there is no memory saving to keep it
- in memory. Instead, write it out to backing device.
+ This lets zram entries (incompressible or idle pages) be written
+ back to a backing device, helping save memory.
For this feature, admin should set up backing device via
/sys/block/zramX/backing_dev.
@@ -69,9 +69,18 @@ config ZRAM_WRITEBACK
See Documentation/admin-guide/blockdev/zram.rst for more information.
+config ZRAM_TRACK_ENTRY_ACTIME
+ bool "Track access time of zram entries"
+ depends on ZRAM
+ help
+ With this feature zram tracks access time of every stored
+ entry (page), which can be used for a more fine grained IDLE
+ pages writeback.
+
config ZRAM_MEMORY_TRACKING
bool "Track zRam block status"
depends on ZRAM && DEBUG_FS
+ select ZRAM_TRACK_ENTRY_ACTIME
help
With this feature, admin can track the state of allocated blocks
of zRAM. Admin could see the information via
@@ -86,4 +95,4 @@ config ZRAM_MULTI_COMP
This will enable multi-compression streams, so that ZRAM can
re-compress pages using a potentially slower but more effective
compression algorithm. Note, that IDLE page recompression
- requires ZRAM_MEMORY_TRACKING.
+ requires ZRAM_TRACK_ENTRY_ACTIME.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d77d3664c..6772e0c65 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -174,6 +174,14 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index)
return prio & ZRAM_COMP_PRIORITY_MASK;
}
+static void zram_accessed(struct zram *zram, u32 index)
+{
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
+ zram->table[index].ac_time = ktime_get_boottime();
+#endif
+}
+
static inline void update_used_max(struct zram *zram,
const unsigned long pages)
{
@@ -293,8 +301,9 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
zram_slot_lock(zram, index);
if (zram_allocated(zram, index) &&
!zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
-#ifdef CONFIG_ZRAM_MEMORY_TRACKING
- is_idle = !cutoff || ktime_after(cutoff, zram->table[index].ac_time);
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
+ is_idle = !cutoff || ktime_after(cutoff,
+ zram->table[index].ac_time);
#endif
if (is_idle)
zram_set_flag(zram, index, ZRAM_IDLE);
@@ -317,7 +326,7 @@ static ssize_t idle_store(struct device *dev,
*/
u64 age_sec;
- if (IS_ENABLED(CONFIG_ZRAM_MEMORY_TRACKING) && !kstrtoull(buf, 0, &age_sec))
+ if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
cutoff_time = ktime_sub(ktime_get_boottime(),
ns_to_ktime(age_sec * NSEC_PER_SEC));
else
@@ -841,12 +850,6 @@ static void zram_debugfs_destroy(void)
debugfs_remove_recursive(zram_debugfs_root);
}
-static void zram_accessed(struct zram *zram, u32 index)
-{
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram->table[index].ac_time = ktime_get_boottime();
-}
-
static ssize_t read_block_state(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -930,10 +933,6 @@ static void zram_debugfs_unregister(struct zram *zram)
#else
static void zram_debugfs_create(void) {};
static void zram_debugfs_destroy(void) {};
-static void zram_accessed(struct zram *zram, u32 index)
-{
- zram_clear_flag(zram, index, ZRAM_IDLE);
-};
static void zram_debugfs_register(struct zram *zram) {};
static void zram_debugfs_unregister(struct zram *zram) {};
#endif
@@ -1254,7 +1253,7 @@ static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
-#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = 0;
#endif
if (zram_test_flag(zram, index, ZRAM_IDLE))
@@ -1322,9 +1321,9 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
void *mem;
value = handle ? zram_get_element(zram, index) : 0;
- mem = kmap_atomic(page);
+ mem = kmap_local_page(page);
zram_fill_page(mem, PAGE_SIZE, value);
- kunmap_atomic(mem);
+ kunmap_local(mem);
return 0;
}
@@ -1337,14 +1336,14 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
- dst = kmap_atomic(page);
+ dst = kmap_local_page(page);
memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ kunmap_local(dst);
ret = 0;
} else {
- dst = kmap_atomic(page);
+ dst = kmap_local_page(page);
ret = zcomp_decompress(zstrm, src, size, dst);
- kunmap_atomic(dst);
+ kunmap_local(dst);
zcomp_stream_put(zram->comps[prio]);
}
zs_unmap_object(zram->mem_pool, handle);
@@ -1417,21 +1416,21 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
unsigned long element = 0;
enum zram_pageflags flags = 0;
- mem = kmap_atomic(page);
+ mem = kmap_local_page(page);
if (page_same_filled(mem, &element)) {
- kunmap_atomic(mem);
+ kunmap_local(mem);
/* Free memory associated with this sector now. */
flags = ZRAM_SAME;
atomic64_inc(&zram->stats.same_pages);
goto out;
}
- kunmap_atomic(mem);
+ kunmap_local(mem);
compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
- src = kmap_atomic(page);
+ src = kmap_local_page(page);
ret = zcomp_compress(zstrm, src, &comp_len);
- kunmap_atomic(src);
+ kunmap_local(src);
if (unlikely(ret)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
@@ -1495,10 +1494,10 @@ compress_again:
src = zstrm->buffer;
if (comp_len == PAGE_SIZE)
- src = kmap_atomic(page);
+ src = kmap_local_page(page);
memcpy(dst, src, comp_len);
if (comp_len == PAGE_SIZE)
- kunmap_atomic(src);
+ kunmap_local(src);
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_unmap_object(zram->mem_pool, handle);
@@ -1615,9 +1614,9 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
num_recomps++;
zstrm = zcomp_stream_get(zram->comps[prio]);
- src = kmap_atomic(page);
+ src = kmap_local_page(page);
ret = zcomp_compress(zstrm, src, &comp_len_new);
- kunmap_atomic(src);
+ kunmap_local(src);
if (ret) {
zcomp_stream_put(zram->comps[prio]);
@@ -2227,7 +2226,6 @@ static int zram_add(void)
ZRAM_LOGICAL_BLOCK_SIZE);
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
- zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
/*
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index d090753f9..3b94d12f4 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -69,7 +69,7 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long flags;
-#ifdef CONFIG_ZRAM_MEMORY_TRACKING
+#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
};
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 2462796a5..e532e2be1 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -441,7 +441,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
return PTR_ERR(skb);
}
- if (skb->len != sizeof(*ver)) {
+ if (!skb || skb->len != sizeof(*ver)) {
bt_dev_err(hdev, "Intel version event size mismatch");
kfree_skb(skb);
return -EILSEQ;
@@ -535,6 +535,8 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
2000 + (version->timestamp >> 8), version->timestamp & 0xff,
version->build_type, version->build_num);
+ if (version->img_type == 0x03)
+ bt_dev_info(hdev, "Firmware SHA1: 0x%8.8x", version->git_sha1);
return 0;
}
@@ -630,6 +632,9 @@ static int btintel_parse_version_tlv(struct hci_dev *hdev,
memcpy(&version->otp_bd_addr, tlv->val,
sizeof(bdaddr_t));
break;
+ case INTEL_TLV_GIT_SHA1:
+ version->git_sha1 = get_unaligned_le32(tlv->val);
+ break;
default:
/* Ignore rest of information */
break;
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 3a2d5b421..d19fcdb9f 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -41,7 +41,8 @@ enum {
INTEL_TLV_LIMITED_CCE,
INTEL_TLV_SBE_TYPE,
INTEL_TLV_OTP_BDADDR,
- INTEL_TLV_UNLOCKED_STATE
+ INTEL_TLV_UNLOCKED_STATE,
+ INTEL_TLV_GIT_SHA1
};
struct intel_tlv {
@@ -69,6 +70,7 @@ struct intel_version_tlv {
u8 min_fw_build_yy;
u8 limited_cce;
u8 sbe_type;
+ u32 git_sha1;
bdaddr_t otp_bd_addr;
};
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 285418dbb..812fd2a8f 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(skb);
break;
+ }
data->cd_info.cnt = 0;
/* It is supposed coredump can be done within 5 seconds */
@@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
break;
}
- if (err < 0)
- kfree_skb(skb);
-
return err;
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
@@ -422,5 +421,6 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_MT7622);
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
+MODULE_FIRMWARE(FIRMWARE_MT7922);
MODULE_FIRMWARE(FIRMWARE_MT7961);
MODULE_FIRMWARE(FIRMWARE_MT7925);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index 56f5502ba..cbcdb99a2 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -4,6 +4,7 @@
#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+#define FIRMWARE_MT7922 "mediatek/BT_RAM_CODE_MT7922_1_1_hdr.bin"
#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
#define FIRMWARE_MT7925 "mediatek/mt7925/BT_RAM_CODE_MT7925_1_1_hdr.bin"
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 203a000a8..3c84fcbda 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -383,8 +383,8 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
}
}
-static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
- size_t count)
+static ssize_t btmtkuart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index abccd571c..c19dc8a29 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1267,8 +1267,8 @@ static const struct h4_recv_pkt nxp_recv_pkts[] = {
{ NXP_RECV_FW_REQ_V3, .recv = nxp_recv_fw_req_v3 },
};
-static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data,
- size_t count)
+static ssize_t btnxpuart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
@@ -1281,7 +1281,6 @@ static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data,
/* Safe to ignore out-of-sync bootloader signatures */
if (!is_fw_downloading(nxpdev))
bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
- nxpdev->rx_skb = NULL;
return count;
}
if (!is_fw_downloading(nxpdev))
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index b40b32fa7..19cfc342f 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -826,11 +826,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
+ bdaddr_t bdaddr_swapped;
struct sk_buff *skb;
int err;
- skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
- HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ baswap(&bdaddr_swapped, bdaddr);
+
+ skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
+ &bdaddr_swapped, HCI_EV_VENDOR,
+ HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 81ccb88c9..effa54629 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -542,6 +542,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
@@ -550,6 +552,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -3461,13 +3465,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*
@@ -4481,6 +4484,7 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
}
if (!reset)
@@ -4631,6 +4635,10 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
BT_DBG("intf %p", intf);
+ /* Don't suspend if there are connections */
+ if (hci_conn_count(data->hdev))
+ return -EBUSY;
+
if (data->suspend_count++)
return 0;
@@ -4792,10 +4800,8 @@ static struct usb_driver btusb_driver = {
.disable_hub_initiated_lpm = 1,
#ifdef CONFIG_DEV_COREDUMP
- .drvwrap = {
- .driver = {
- .coredump = btusb_coredump,
- },
+ .driver = {
+ .coredump = btusb_coredump,
},
#endif
};
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index bb69b7ea8..ce5c2aa74 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -7,7 +7,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -226,6 +225,7 @@ struct qca_serdev {
struct qca_power *bt_power;
u32 init_speed;
u32 oper_speed;
+ bool bdaddr_property_broken;
const char *firmware_name;
};
@@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
+ if (!hu->serdev)
+ return true;
+
/* BT SoC attached through the serial bus is handled by the serdev driver.
* So we need to use the device handle of the serdev driver to get the
* status of device may wakeup.
@@ -1815,6 +1818,24 @@ static void hci_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
}
+static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
+{
+ /* QCA uses 1 as non-HCI data path id for HFP */
+ *data_path_id = 1;
+ return 0;
+}
+
+static int qca_configure_hfp_offload(struct hci_dev *hdev)
+{
+ bt_dev_info(hdev, "HFP non-HCI data transport is supported");
+ hdev->get_data_path_id = qca_get_data_path_id;
+ /* Do not need to send HCI_Configure_Data_Path to configure non-HCI
+ * data transport path for QCA controllers, so set below field as NULL.
+ */
+ hdev->get_codec_config_data = NULL;
+ return 0;
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
@@ -1825,6 +1846,7 @@ static int qca_setup(struct hci_uart *hu)
const char *firmware_name = qca_get_firmware_name(hu);
int ret;
struct qca_btsoc_version ver;
+ struct qca_serdev *qcadev;
const char *soc_name;
ret = qca_check_speeds(hu);
@@ -1886,16 +1908,11 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- /* Set BDA quirk bit for reading BDA value from fwnode property
- * only if that property exist in DT.
- */
- if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
- bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
- } else {
- bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
- }
+ qcadev = serdev_device_get_drvdata(hu->serdev);
+ if (qcadev->bdaddr_property_broken)
+ set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
hci_set_aosp_capable(hdev);
@@ -1943,8 +1960,10 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
- hu->hdev->wakeup = qca_wakeup;
+ if (hu->serdev) {
+ if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
+ hu->hdev->wakeup = qca_wakeup;
+ }
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
@@ -1979,6 +1998,10 @@ out:
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
else
hu->hdev->set_bdaddr = qca_set_bdaddr;
+
+ if (soc_type == QCA_QCA2066)
+ qca_configure_hfp_offload(hdev);
+
qca->fw_version = le16_to_cpu(ver.patch_ver);
qca->controller_id = le16_to_cpu(ver.rom_ver);
hci_devcd_register(hdev, hci_coredump_qca, qca_dmp_hdr, NULL);
@@ -2273,6 +2296,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->oper_speed)
BT_DBG("UART will pick default operating speed");
+ qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
+ "qcom,local-bd-address-broken");
+
if (data)
qcadev->btsoc_type = data->soc_type;
else
@@ -2308,16 +2334,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- power_ctrl_enabled = false;
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
- data->soc_type == QCA_WCN7850))
- dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ data->soc_type == QCA_WCN7850)) {
+ dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ return PTR_ERR(qcadev->sw_ctrl);
+ }
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
@@ -2336,10 +2367,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) {
- dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
- power_ctrl_enabled = false;
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 611a11fbb..214fff876 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -271,8 +271,8 @@ static void hci_uart_write_wakeup(struct serdev_device *serdev)
*
* Return: number of processed bytes
*/
-static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data,
- size_t count)
+static ssize_t hci_uart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct hci_uart *hu = serdev_device_get_drvdata(serdev);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 2f6d5002e..78b96cd63 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -1167,14 +1167,11 @@ error_cleanup_mc_io:
* fsl_mc_bus_remove - callback invoked when the root MC bus is being
* removed
*/
-static int fsl_mc_bus_remove(struct platform_device *pdev)
+static void fsl_mc_bus_remove(struct platform_device *pdev)
{
struct fsl_mc *mc = platform_get_drvdata(pdev);
struct fsl_mc_io *mc_io;
- if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev))
- return -EINVAL;
-
mc_io = mc->root_mc_bus_dev->mc_io;
fsl_mc_device_remove(mc->root_mc_bus_dev);
fsl_destroy_mc_io(mc_io);
@@ -1190,13 +1187,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
(GCR1_P1_STOP | GCR1_P2_STOP),
mc->fsl_mc_regs + FSL_MC_GCR1);
}
-
- return 0;
-}
-
-static void fsl_mc_bus_shutdown(struct platform_device *pdev)
-{
- fsl_mc_bus_remove(pdev);
}
static const struct of_device_id fsl_mc_bus_match_table[] = {
@@ -1220,8 +1210,8 @@ static struct platform_driver fsl_mc_bus_driver = {
.acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
- .remove = fsl_mc_bus_remove,
- .shutdown = fsl_mc_bus_shutdown,
+ .remove_new = fsl_mc_bus_remove,
+ .shutdown = fsl_mc_bus_remove,
};
static int fsl_mc_bus_notifier(struct notifier_block *nb,
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index cdc4e38c1..09340adba 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -657,7 +657,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_lpc_remove(struct platform_device *pdev)
+static void hisi_lpc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
@@ -669,8 +669,6 @@ static int hisi_lpc_remove(struct platform_device *pdev)
of_platform_depopulate(dev);
logic_pio_unregister_range(range);
-
- return 0;
}
static const struct of_device_id hisi_lpc_of_match[] = {
@@ -691,6 +689,6 @@ static struct platform_driver hisi_lpc_driver = {
.acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
- .remove = hisi_lpc_remove,
+ .remove_new = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index f9fd1582f..837bf9d51 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -11,7 +11,10 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/regmap.h>
@@ -202,9 +205,7 @@ static int weim_timing_setup(struct device *dev, struct device_node *np,
static int weim_parse_dt(struct platform_device *pdev)
{
- const struct of_device_id *of_id = of_match_device(weim_id_table,
- &pdev->dev);
- const struct imx_weim_devtype *devtype = of_id->data;
+ const struct imx_weim_devtype *devtype = device_get_match_data(&pdev->dev);
int ret = 0, have_child = 0;
struct device_node *child;
struct weim_priv *priv;
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
index a2125fa5f..577965f95 100644
--- a/drivers/bus/mhi/ep/internal.h
+++ b/drivers/bus/mhi/ep/internal.h
@@ -126,6 +126,7 @@ struct mhi_ep_ring {
union mhi_ep_ring_ctx *ring_ctx;
struct mhi_ring_element *ring_cache;
enum mhi_ep_ring_type type;
+ struct delayed_work intmodt_work;
u64 rbase;
size_t rd_offset;
size_t wr_offset;
@@ -135,7 +136,9 @@ struct mhi_ep_ring {
u32 ch_id;
u32 er_index;
u32 irq_vector;
+ u32 intmodt;
bool started;
+ bool irq_pending;
};
struct mhi_ep_cmd {
@@ -159,6 +162,7 @@ struct mhi_ep_chan {
void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
enum mhi_ch_state state;
enum dma_data_direction dir;
+ size_t rd_offset;
u64 tre_loc;
u32 tre_size;
u32 tre_bytes_left;
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 934cdbca0..65f1f6b9b 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -54,11 +54,27 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
mutex_unlock(&mhi_cntrl->event_lock);
/*
- * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
- * set this flag for interrupt moderation as per MHI protocol.
+ * As per the MHI specification, section 4.3, Interrupt moderation:
+ *
+ * 1. If BEI flag is not set, cancel any pending intmodt work if started
+ * for the event ring and raise IRQ immediately.
+ *
+ * 2. If both BEI and intmodt are set, and if no IRQ is pending for the
+ * same event ring, start the IRQ delayed work as per the value of
+ * intmodt. If previous IRQ is pending, then do nothing as the pending
+ * IRQ is enough for the host to process the current event ring element.
+ *
+ * 3. If BEI is set and intmodt is not set, no need to raise IRQ.
*/
- if (!bei)
+ if (!bei) {
+ if (READ_ONCE(ring->irq_pending))
+ cancel_delayed_work(&ring->intmodt_work);
+
mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+ } else if (ring->intmodt && !READ_ONCE(ring->irq_pending)) {
+ WRITE_ONCE(ring->irq_pending, true);
+ schedule_delayed_work(&ring->intmodt_work, msecs_to_jiffies(ring->intmodt));
+ }
return 0;
@@ -183,6 +199,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
goto err_unlock;
}
+
+ mhi_chan->rd_offset = ch_ring->rd_offset;
}
/* Set channel state to RUNNING */
@@ -312,21 +330,85 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
- return !!(ring->rd_offset == ring->wr_offset);
+ return !!(mhi_chan->rd_offset == ring->wr_offset);
}
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info)
+{
+ struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+ struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
+ struct mhi_result result = {};
+ int ret;
+
+ if (mhi_chan->xfer_cb) {
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+ result.bytes_xferd = buf_info->size;
+
+ mhi_chan->xfer_cb(mhi_dev, &result);
+ }
+
+ /*
+ * The host will split the data packet into multiple TREs if it can't fit
+ * the packet in a single TRE. In that case, CHAIN flag will be set by the
+ * host for all TREs except the last one.
+ */
+ if (buf_info->code != MHI_EV_CC_OVERFLOW) {
+ if (MHI_TRE_DATA_GET_CHAIN(el)) {
+ /*
+ * IEOB (Interrupt on End of Block) flag will be set by the host if
+ * it expects the completion event for all TREs of a TD.
+ */
+ if (MHI_TRE_DATA_GET_IEOB(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOB);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ goto err_free_tre_buf;
+ }
+ }
+ } else {
+ /*
+ * IEOT (Interrupt on End of Transfer) flag will be set by the host
+ * for the last TRE of the TD and expects the completion event for
+ * the same.
+ */
+ if (MHI_TRE_DATA_GET_IEOT(el)) {
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
+ MHI_TRE_DATA_GET_LEN(el),
+ MHI_EV_CC_EOT);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev,
+ "Error sending transfer compl. event\n");
+ goto err_free_tre_buf;
+ }
+ }
+ }
+ }
+
+ mhi_ep_ring_inc_index(ring);
+
+err_free_tre_buf:
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf);
+}
+
static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
- struct mhi_ep_ring *ring,
- struct mhi_result *result,
- u32 len)
+ struct mhi_ep_ring *ring)
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t tr_len, read_offset, write_offset;
struct mhi_ep_buf_info buf_info = {};
+ u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
bool tr_done = false;
+ void *buf_addr;
u32 buf_left;
int ret;
@@ -339,7 +421,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
return -ENODEV;
}
- el = &ring->ring_cache[ring->rd_offset];
+ el = &ring->ring_cache[mhi_chan->rd_offset];
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
@@ -356,82 +438,50 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
+ buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+ if (!buf_addr)
+ return -ENOMEM;
+
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
- buf_info.dev_addr = result->buf_addr + write_offset;
+ buf_info.dev_addr = buf_addr + write_offset;
buf_info.size = tr_len;
+ buf_info.cb = mhi_ep_read_completion;
+ buf_info.cb_buf = buf_addr;
+ buf_info.mhi_dev = mhi_chan->mhi_dev;
+
+ if (mhi_chan->tre_bytes_left - tr_len)
+ buf_info.code = MHI_EV_CC_OVERFLOW;
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
- return ret;
+ goto err_free_buf_addr;
}
buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
- /*
- * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
- * read completely:
- *
- * 1. Send completion event to the host based on the flags set in TRE.
- * 2. Increment the local read offset of the transfer ring.
- */
if (!mhi_chan->tre_bytes_left) {
- /*
- * The host will split the data packet into multiple TREs if it can't fit
- * the packet in a single TRE. In that case, CHAIN flag will be set by the
- * host for all TREs except the last one.
- */
- if (MHI_TRE_DATA_GET_CHAIN(el)) {
- /*
- * IEOB (Interrupt on End of Block) flag will be set by the host if
- * it expects the completion event for all TREs of a TD.
- */
- if (MHI_TRE_DATA_GET_IEOB(el)) {
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
- MHI_TRE_DATA_GET_LEN(el),
- MHI_EV_CC_EOB);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev,
- "Error sending transfer compl. event\n");
- return ret;
- }
- }
- } else {
- /*
- * IEOT (Interrupt on End of Transfer) flag will be set by the host
- * for the last TRE of the TD and expects the completion event for
- * the same.
- */
- if (MHI_TRE_DATA_GET_IEOT(el)) {
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
- MHI_TRE_DATA_GET_LEN(el),
- MHI_EV_CC_EOT);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev,
- "Error sending transfer compl. event\n");
- return ret;
- }
- }
-
+ if (MHI_TRE_DATA_GET_IEOT(el))
tr_done = true;
- }
- mhi_ep_ring_inc_index(ring);
+ mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
}
-
- result->bytes_xferd += tr_len;
} while (buf_left && !tr_done);
return 0;
+
+err_free_buf_addr:
+ kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr);
+
+ return ret;
}
-static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
+static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct mhi_result result = {};
- u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ep_chan *mhi_chan;
int ret;
@@ -452,32 +502,49 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
- if (!result.buf_addr)
- return -ENOMEM;
-
do {
- ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ ret = mhi_ep_read_channel(mhi_cntrl, ring);
if (ret < 0) {
dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
return ret;
}
- result.dir = mhi_chan->dir;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
- result.bytes_xferd = 0;
- memset(result.buf_addr, 0, len);
-
/* Read until the ring becomes empty */
} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
-
- kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
}
return 0;
}
+static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info)
+{
+ struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+ struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
+ struct device *dev = &mhi_dev->dev;
+ struct mhi_result result = {};
+ int ret;
+
+ if (mhi_chan->xfer_cb) {
+ result.buf_addr = buf_info->cb_buf;
+ result.dir = mhi_chan->dir;
+ result.bytes_xferd = buf_info->size;
+
+ mhi_chan->xfer_cb(mhi_dev, &result);
+ }
+
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size,
+ buf_info->code);
+ if (ret) {
+ dev_err(dev, "Error sending transfer completion event\n");
+ return;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+}
+
/* TODO: Handle partially formed TDs */
int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
{
@@ -488,7 +555,6 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
struct mhi_ring_element *el;
u32 buf_left, read_offset;
struct mhi_ep_ring *ring;
- enum mhi_ev_ccs code;
size_t tr_len;
u32 tre_len;
int ret;
@@ -512,7 +578,7 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
goto err_exit;
}
- el = &ring->ring_cache[ring->rd_offset];
+ el = &ring->ring_cache[mhi_chan->rd_offset];
tre_len = MHI_TRE_DATA_GET_LEN(el);
tr_len = min(buf_left, tre_len);
@@ -521,33 +587,35 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
buf_info.dev_addr = skb->data + read_offset;
buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el);
buf_info.size = tr_len;
+ buf_info.cb = mhi_ep_skb_completion;
+ buf_info.cb_buf = skb;
+ buf_info.mhi_dev = mhi_dev;
- dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
- ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
- if (ret < 0) {
- dev_err(dev, "Error writing to the channel\n");
- goto err_exit;
- }
-
- buf_left -= tr_len;
/*
* For all TREs queued by the host for DL channel, only the EOT flag will be set.
* If the packet doesn't fit into a single TRE, send the OVERFLOW event to
* the host so that the host can adjust the packet boundary to next TREs. Else send
* the EOT event to the host indicating the packet boundary.
*/
- if (buf_left)
- code = MHI_EV_CC_OVERFLOW;
+ if (buf_left - tr_len)
+ buf_info.code = MHI_EV_CC_OVERFLOW;
else
- code = MHI_EV_CC_EOT;
+ buf_info.code = MHI_EV_CC_EOT;
- ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
- if (ret) {
- dev_err(dev, "Error sending transfer completion event\n");
+ dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
+ ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info);
+ if (ret < 0) {
+ dev_err(dev, "Error writing to the channel\n");
goto err_exit;
}
- mhi_ep_ring_inc_index(ring);
+ buf_left -= tr_len;
+
+ /*
+ * Update the read offset cached in mhi_chan. Actual read offset
+ * will be updated by the completion handler.
+ */
+ mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
} while (buf_left);
mutex_unlock(&mhi_chan->lock);
@@ -748,7 +816,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ep_ring_item *itr, *tmp;
- struct mhi_ring_element *el;
struct mhi_ep_ring *ring;
struct mhi_ep_chan *chan;
unsigned long flags;
@@ -787,16 +854,14 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
}
/* Sanity check to make sure there are elements in the ring */
- if (ring->rd_offset == ring->wr_offset) {
+ if (chan->rd_offset == ring->wr_offset) {
mutex_unlock(&chan->lock);
kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
continue;
}
- el = &ring->ring_cache[ring->rd_offset];
-
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
- ret = mhi_ep_process_ch_ring(ring, el);
+ ret = mhi_ep_process_ch_ring(ring);
if (ret) {
dev_err(dev, "Error processing ring for channel (%u): %d\n",
ring->ch_id, ret);
@@ -1399,6 +1464,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
return -EINVAL;
+ if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync ||
+ !mhi_cntrl->read_async || !mhi_cntrl->write_async)
+ return -EINVAL;
+
ret = mhi_ep_chan_init(mhi_cntrl, config);
if (ret)
return ret;
@@ -1431,6 +1500,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
ret = -ENOMEM;
goto err_destroy_tre_buf_cache;
}
+
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index c673d7200..aeb53b2c3 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -48,7 +48,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
buf_info.dev_addr = &ring->ring_cache[start];
- ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
} else {
@@ -56,7 +56,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
buf_info.dev_addr = &ring->ring_cache[start];
- ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
@@ -65,7 +65,7 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
buf_info.dev_addr = &ring->ring_cache[0];
buf_info.size = end * sizeof(struct mhi_ring_element);
- ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
+ ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
}
@@ -143,7 +143,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
buf_info.dev_addr = el;
buf_info.size = sizeof(*el);
- return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
+ return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
@@ -162,6 +162,15 @@ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32
}
}
+static void mhi_ep_raise_irq(struct work_struct *work)
+{
+ struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work);
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+
+ mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
+ WRITE_ONCE(ring->irq_pending, false);
+}
+
int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
union mhi_ep_ring_ctx *ctx)
{
@@ -178,8 +187,13 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
if (ring->type == RING_TYPE_CH)
ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
- if (ring->type == RING_TYPE_ER)
+ if (ring->type == RING_TYPE_ER) {
ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
+ ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK,
+ le32_to_cpu(ring->ring_ctx->ev.intmod));
+
+ INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq);
+ }
/* During ring init, both rp and wp are equal */
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
@@ -206,6 +220,9 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
{
+ if (ring->type == RING_TYPE_ER)
+ cancel_delayed_work_sync(&ring->intmodt_work);
+
ring->started = false;
kfree(ring->ring_cache);
ring->ring_cache = NULL;
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index f78aefd2d..8e5ec1a40 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -62,6 +62,7 @@ static const char * const mhi_pm_state_str[] = {
[MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
[MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
[MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
+ [MHI_PM_STATE_SYS_ERR_FAIL] = "SYS ERROR Failure",
[MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
[MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
};
@@ -881,6 +882,7 @@ static int parse_config(struct mhi_controller *mhi_cntrl,
if (!mhi_cntrl->timeout_ms)
mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+ mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms;
mhi_cntrl->bounce_buf = config->use_bounce_buf;
mhi_cntrl->buffer_len = config->buf_len;
if (!mhi_cntrl->buffer_len)
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 2e139e76d..4b6deea17 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -88,6 +88,7 @@ enum mhi_pm_state {
MHI_PM_STATE_FW_DL_ERR,
MHI_PM_STATE_SYS_ERR_DETECT,
MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SYS_ERR_FAIL,
MHI_PM_STATE_SHUTDOWN_PROCESS,
MHI_PM_STATE_LD_ERR_FATAL_DETECT,
MHI_PM_STATE_MAX
@@ -104,14 +105,16 @@ enum mhi_pm_state {
#define MHI_PM_FW_DL_ERR BIT(7)
#define MHI_PM_SYS_ERR_DETECT BIT(8)
#define MHI_PM_SYS_ERR_PROCESS BIT(9)
-#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+#define MHI_PM_SYS_ERR_FAIL BIT(10)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(11)
/* link not accessible */
-#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+ MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
+ MHI_PM_FW_DL_ERR)))
#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
@@ -321,7 +324,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
u32 *out);
int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 mask,
- u32 val, u32 delayus);
+ u32 val, u32 delayus, u32 timeout_ms);
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val);
int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index d6653cbcf..abb561db9 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -40,10 +40,11 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset,
- u32 mask, u32 val, u32 delayus)
+ u32 mask, u32 val, u32 delayus,
+ u32 timeout_ms)
{
int ret;
- u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+ u32 out, retry = (timeout_ms * 1000) / delayus;
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 08f3f039d..cd6cd14b3 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -269,6 +269,16 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
+static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
+ .max_channels = 128,
+ .timeout_ms = 8000,
+ .ready_timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
+ .ch_cfg = modem_qcom_v1_mhi_channels,
+ .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
+ .event_cfg = modem_qcom_v1_mhi_events,
+};
+
static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
@@ -278,6 +288,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.event_cfg = modem_qcom_v1_mhi_events,
};
+static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
+ .name = "qcom-sdx75m",
+ .fw = "qcom/sdx75m/xbl.elf",
+ .edl = "qcom/sdx75m/edl.mbn",
+ .config = &modem_qcom_v2_mhiv_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.name = "qcom-sdx65m",
.fw = "qcom/sdx65m/xbl.elf",
@@ -600,6 +620,8 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 8a4362d75..d0d033ce9 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -36,7 +36,10 @@
* M0 <--> M0
* M0 -> FW_DL_ERR
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
+ * SYS_ERR_PROCESS -> SYS_ERR_FAIL
+ * SYS_ERR_FAIL -> SYS_ERR_DETECT
+ * SYS_ERR_PROCESS --> POR
* L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
* SHUTDOWN_PROCESS -> DISABLE
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
@@ -93,7 +96,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = {
},
{
MHI_PM_SYS_ERR_PROCESS,
- MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_FAIL,
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
/* L2 States */
@@ -163,6 +171,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
enum mhi_pm_state cur_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
+ u32 timeout_ms;
int ret, i;
/* Check if device entered error state */
@@ -173,14 +182,18 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
/* Wait for RESET to be cleared and READY bit to be set by the device */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_RESET_MASK, 0, interval_us);
+ MHICTRL_RESET_MASK, 0, interval_us,
+ mhi_cntrl->timeout_ms);
if (ret) {
dev_err(dev, "Device failed to clear MHI Reset\n");
return ret;
}
+ timeout_ms = mhi_cntrl->ready_timeout_ms ?
+ mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
- MHISTATUS_READY_MASK, 1, interval_us);
+ MHISTATUS_READY_MASK, 1, interval_us,
+ timeout_ms);
if (ret) {
dev_err(dev, "Device failed to enter MHI Ready\n");
return ret;
@@ -479,7 +492,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
/* Wait for the reset bit to be cleared by the device */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_RESET_MASK, 0, 25000);
+ MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
if (ret)
dev_err(dev, "Device failed to clear MHI Reset\n");
@@ -492,8 +505,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
/* wait for ready to be set */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
- MHISTATUS,
- MHISTATUS_READY_MASK, 1, 25000);
+ MHISTATUS, MHISTATUS_READY_MASK,
+ 1, 25000, mhi_cntrl->timeout_ms);
if (ret)
dev_err(dev, "Device failed to enter READY state\n");
}
@@ -624,7 +637,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
!in_reset, timeout);
if (!ret || in_reset) {
dev_err(dev, "Device failed to exit MHI Reset state\n");
- goto exit_sys_error_transition;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_FAIL);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /* Shutdown may have occurred, otherwise cleanup now */
+ if (cur_state != MHI_PM_SYS_ERR_FAIL)
+ goto exit_sys_error_transition;
}
/*
@@ -1111,7 +1130,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
if (state == MHI_STATE_SYS_ERR) {
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_RESET_MASK, 0, interval_us);
+ MHICTRL_RESET_MASK, 0, interval_us,
+ mhi_cntrl->timeout_ms);
if (ret) {
dev_info(dev, "Failed to reset MHI due to syserr state\n");
goto error_exit;
@@ -1202,14 +1222,18 @@ EXPORT_SYMBOL_GPL(mhi_power_down);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
{
int ret = mhi_async_power_up(mhi_cntrl);
+ u32 timeout_ms;
if (ret)
return ret;
+ /* Some devices need more time to set ready during power up */
+ timeout_ms = mhi_cntrl->ready_timeout_ms ?
+ mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ msecs_to_jiffies(timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
if (ret)
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index e384fbc6c..641c1a6ad 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -102,7 +102,7 @@ static int moxtet_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static struct bus_type moxtet_bus_type = {
+static const struct bus_type moxtet_bus_type = {
.name = "moxtet",
.dev_groups = moxtet_dev_groups,
.match = moxtet_match,
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index e02d06562..7d7479ba0 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -84,12 +84,10 @@ err0:
return ret;
}
-static int omap_ocp2scp_remove(struct platform_device *pdev)
+static void omap_ocp2scp_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -103,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
static struct platform_driver omap_ocp2scp_driver = {
.probe = omap_ocp2scp_probe,
- .remove = omap_ocp2scp_remove,
+ .remove_new = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
.of_match_table = of_match_ptr(omap_ocp2scp_id_table),
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c
index 31774648b..ee6d29925 100644
--- a/drivers/bus/omap_l3_smx.c
+++ b/drivers/bus/omap_l3_smx.c
@@ -261,7 +261,7 @@ err0:
return ret;
}
-static int omap3_l3_remove(struct platform_device *pdev)
+static void omap3_l3_remove(struct platform_device *pdev)
{
struct omap3_l3 *l3 = platform_get_drvdata(pdev);
@@ -269,13 +269,11 @@ static int omap3_l3_remove(struct platform_device *pdev)
free_irq(l3->debug_irq, l3);
iounmap(l3->rt);
kfree(l3);
-
- return 0;
}
static struct platform_driver omap3_l3_driver = {
.probe = omap3_l3_probe,
- .remove = omap3_l3_remove,
+ .remove_new = omap3_l3_remove,
.driver = {
.name = "omap_l3_smx",
.of_match_table = of_match_ptr(omap3_l3_match),
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index 3fef18a43..5931974a2 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -350,7 +350,7 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
return 0;
}
-static int qcom_ssc_block_bus_remove(struct platform_device *pdev)
+static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
{
struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev);
@@ -363,8 +363,6 @@ static int qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);
pm_clk_destroy(&pdev->dev);
-
- return 0;
}
static const struct of_device_id qcom_ssc_block_bus_of_match[] = {
@@ -375,7 +373,7 @@ MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
static struct platform_driver qcom_ssc_block_bus_driver = {
.probe = qcom_ssc_block_bus_probe,
- .remove = qcom_ssc_block_bus_remove,
+ .remove_new = qcom_ssc_block_bus_remove,
.driver = {
.name = "qcom-ssc-block-bus",
.of_match_table = qcom_ssc_block_bus_of_match,
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index aafcc481d..50870c827 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -74,17 +74,16 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
return 0;
}
-static int simple_pm_bus_remove(struct platform_device *pdev)
+static void simple_pm_bus_remove(struct platform_device *pdev)
{
const void *data = of_device_get_match_data(&pdev->dev);
if (pdev->driver_override || data)
- return 0;
+ return;
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int simple_pm_bus_runtime_suspend(struct device *dev)
@@ -129,7 +128,7 @@ MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
static struct platform_driver simple_pm_bus_driver = {
.probe = simple_pm_bus_probe,
- .remove = simple_pm_bus_remove,
+ .remove_new = simple_pm_bus_remove,
.driver = {
.name = "simple-pm-bus",
.of_match_table = simple_pm_bus_of_match,
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 414f29cde..3339311ce 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -24,10 +24,9 @@ static int sun50i_de2_bus_probe(struct platform_device *pdev)
return 0;
}
-static int sun50i_de2_bus_remove(struct platform_device *pdev)
+static void sun50i_de2_bus_remove(struct platform_device *pdev)
{
sunxi_sram_release(&pdev->dev);
- return 0;
}
static const struct of_device_id sun50i_de2_bus_of_match[] = {
@@ -37,7 +36,7 @@ static const struct of_device_id sun50i_de2_bus_of_match[] = {
static struct platform_driver sun50i_de2_bus_driver = {
.probe = sun50i_de2_bus_probe,
- .remove = sun50i_de2_bus_remove,
+ .remove_new = sun50i_de2_bus_remove,
.driver = {
.name = "sun50i-de2-bus",
.of_match_table = sun50i_de2_bus_of_match,
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index db0ed4e5d..fd3e9d823 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -817,15 +817,13 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return 0;
}
-static int sunxi_rsb_remove(struct platform_device *pdev)
+static void sunxi_rsb_remove(struct platform_device *pdev)
{
struct sunxi_rsb *rsb = platform_get_drvdata(pdev);
device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices);
pm_runtime_disable(&pdev->dev);
sunxi_rsb_hw_exit(rsb);
-
- return 0;
}
static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = {
@@ -842,7 +840,7 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
- .remove = sunxi_rsb_remove,
+ .remove_new = sunxi_rsb_remove,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index ac5814230..de80008bf 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -53,11 +53,9 @@ static int tegra_aconnect_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_aconnect_remove(struct platform_device *pdev)
+static void tegra_aconnect_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int tegra_aconnect_runtime_resume(struct device *dev)
@@ -106,7 +104,7 @@ MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
static struct platform_driver tegra_aconnect_driver = {
.probe = tegra_aconnect_probe,
- .remove = tegra_aconnect_remove,
+ .remove_new = tegra_aconnect_remove,
.driver = {
.name = "tegra-aconnect",
.of_match_table = tegra_aconnect_of_match,
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index 59919e99f..f5d6414df 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -258,14 +258,12 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_gmi_remove(struct platform_device *pdev)
+static void tegra_gmi_remove(struct platform_device *pdev)
{
struct tegra_gmi *gmi = platform_get_drvdata(pdev);
of_platform_depopulate(gmi->dev);
tegra_gmi_disable(gmi);
-
- return 0;
}
static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev)
@@ -305,7 +303,7 @@ MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
static struct platform_driver tegra_gmi_driver = {
.probe = tegra_gmi_probe,
- .remove = tegra_gmi_remove,
+ .remove_new = tegra_gmi_remove,
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c
index 480a4de76..4969c556e 100644
--- a/drivers/bus/ti-pwmss.c
+++ b/drivers/bus/ti-pwmss.c
@@ -33,10 +33,9 @@ static int pwmss_probe(struct platform_device *pdev)
return ret;
}
-static int pwmss_remove(struct platform_device *pdev)
+static void pwmss_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
- return 0;
}
static struct platform_driver pwmss_driver = {
@@ -45,7 +44,7 @@ static struct platform_driver pwmss_driver = {
.of_match_table = pwmss_of_match,
},
.probe = pwmss_probe,
- .remove = pwmss_remove,
+ .remove_new = pwmss_remove,
};
module_platform_driver(pwmss_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 9ed9239b1..245e5e827 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3397,7 +3397,7 @@ unprepare:
return error;
}
-static int sysc_remove(struct platform_device *pdev)
+static void sysc_remove(struct platform_device *pdev)
{
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
@@ -3422,8 +3422,6 @@ static int sysc_remove(struct platform_device *pdev)
unprepare:
sysc_unprepare(ddata);
-
- return 0;
}
static const struct of_device_id sysc_match[] = {
@@ -3449,7 +3447,7 @@ MODULE_DEVICE_TABLE(of, sysc_match);
static struct platform_driver sysc_driver = {
.probe = sysc_probe,
- .remove = sysc_remove,
+ .remove_new = sysc_remove,
.driver = {
.name = "ti-sysc",
.of_match_table = sysc_match,
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index 38c886dc2..4fa932cb0 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -331,7 +331,7 @@ static int ts_nbus_probe(struct platform_device *pdev)
return 0;
}
-static int ts_nbus_remove(struct platform_device *pdev)
+static void ts_nbus_remove(struct platform_device *pdev)
{
struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev);
@@ -339,8 +339,6 @@ static int ts_nbus_remove(struct platform_device *pdev)
mutex_lock(&ts_nbus->lock);
pwm_disable(ts_nbus->pwm);
mutex_unlock(&ts_nbus->lock);
-
- return 0;
}
static const struct of_device_id ts_nbus_of_match[] = {
@@ -351,7 +349,7 @@ MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
static struct platform_driver ts_nbus_driver = {
.probe = ts_nbus_probe,
- .remove = ts_nbus_remove,
+ .remove_new = ts_nbus_remove,
.driver = {
.name = "ts_nbus",
.of_match_table = ts_nbus_of_match,
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index d6e5e3aba..9345ce497 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -8,4 +8,10 @@ config AX45MP_L2_CACHE
help
Support for the L2 cache controller on Andes Technology AX45MP platforms.
+config SIFIVE_CCACHE
+ bool "Sifive Composable Cache controller"
+ depends on ARCH_SIFIVE || ARCH_STARFIVE
+ help
+ Support for the composable cache controller on SiFive platforms.
+
endmenu
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
index 2012e7fb9..7657cff3b 100644
--- a/drivers/cache/Makefile
+++ b/drivers/cache/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
+obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
+obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index 3684f5b40..89ed6cd6b 100644
--- a/drivers/soc/sifive/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -8,13 +8,16 @@
#define pr_fmt(fmt) "CCACHE: " fmt
+#include <linux/align.h>
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/device.h>
#include <linux/bitfield.h>
+#include <asm/cacheflush.h>
#include <asm/cacheinfo.h>
+#include <asm/dma-noncoherent.h>
#include <soc/sifive/sifive_ccache.h>
#define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100
@@ -39,10 +42,14 @@
#define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16)
#define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24)
+#define SIFIVE_CCACHE_FLUSH64 0x200
+#define SIFIVE_CCACHE_FLUSH32 0x240
+
#define SIFIVE_CCACHE_WAYENABLE 0x08
#define SIFIVE_CCACHE_ECCINJECTERR 0x40
#define SIFIVE_CCACHE_MAX_ECCINTR 4
+#define SIFIVE_CCACHE_LINE_SIZE 64
static void __iomem *ccache_base;
static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR];
@@ -56,6 +63,11 @@ enum {
DIR_UNCORR,
};
+enum {
+ QUIRK_NONSTANDARD_CACHE_OPS = BIT(0),
+ QUIRK_BROKEN_DATA_UNCORR = BIT(1),
+};
+
#ifdef CONFIG_DEBUG_FS
static struct dentry *sifive_test;
@@ -106,6 +118,8 @@ static void ccache_config_read(void)
static const struct of_device_id sifive_ccache_ids[] = {
{ .compatible = "sifive,fu540-c000-ccache" },
{ .compatible = "sifive,fu740-c000-ccache" },
+ { .compatible = "starfive,jh7100-ccache",
+ .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS | QUIRK_BROKEN_DATA_UNCORR) },
{ .compatible = "sifive,ccache0" },
{ /* end of table */ }
};
@@ -124,6 +138,34 @@ int unregister_sifive_ccache_error_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+static void ccache_flush_range(phys_addr_t start, size_t len)
+{
+ phys_addr_t end = start + len;
+ phys_addr_t line;
+
+ if (!len)
+ return;
+
+ mb();
+ for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end;
+ line += SIFIVE_CCACHE_LINE_SIZE) {
+#ifdef CONFIG_32BIT
+ writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
+#else
+ writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
+#endif
+ mb();
+ }
+}
+
+static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
+ .wback = &ccache_flush_range,
+ .inv = &ccache_flush_range,
+ .wback_inv = &ccache_flush_range,
+};
+#endif /* CONFIG_RISCV_NONSTANDARD_CACHE_OPS */
+
static int ccache_largest_wayenabled(void)
{
return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF;
@@ -210,11 +252,15 @@ static int __init sifive_ccache_init(void)
struct device_node *np;
struct resource res;
int i, rc, intr_num;
+ const struct of_device_id *match;
+ unsigned long quirks;
- np = of_find_matching_node(NULL, sifive_ccache_ids);
+ np = of_find_matching_node_and_match(NULL, sifive_ccache_ids, &match);
if (!np)
return -ENODEV;
+ quirks = (uintptr_t)match->data;
+
if (of_address_to_resource(np, 0, &res)) {
rc = -ENODEV;
goto err_node_put;
@@ -240,6 +286,10 @@ static int __init sifive_ccache_init(void)
for (i = 0; i < intr_num; i++) {
g_irq[i] = irq_of_parse_and_map(np, i);
+
+ if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
+ continue;
+
rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
NULL);
if (rc) {
@@ -249,6 +299,14 @@ static int __init sifive_ccache_init(void)
}
of_node_put(np);
+#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
+ if (quirks & QUIRK_NONSTANDARD_CACHE_OPS) {
+ riscv_cbom_block_size = SIFIVE_CCACHE_LINE_SIZE;
+ riscv_noncoherent_supported();
+ riscv_noncoherent_register_cache_ops(&ccache_mgmt_ops);
+ }
+#endif
+
ccache_config_read();
ccache_cache_ops.get_priv_group = ccache_get_priv_group;
@@ -269,4 +327,4 @@ err_node_put:
return rc;
}
-device_initcall(sifive_ccache_init);
+arch_initcall(sifive_ccache_init);
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 7c1c1f82a..b74d76afc 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -67,6 +67,7 @@
#include <linux/cdx/cdx_bus.h>
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
+#include <linux/debugfs.h>
#include "cdx.h"
/* Default DMA mask for devices on a CDX bus */
@@ -77,9 +78,13 @@
static DEFINE_IDA(cdx_controller_ida);
/* Lock to protect controller ops */
static DEFINE_MUTEX(cdx_controller_lock);
+/* Debugfs dir for cdx bus */
+static struct dentry *cdx_debugfs_dir;
static char *compat_node_name = "xlnx,versal-net-cdx";
+static void cdx_destroy_res_attr(struct cdx_device *cdx_dev, int num);
+
/**
* cdx_dev_reset - Reset a CDX device
* @dev: CDX device
@@ -148,6 +153,8 @@ static int cdx_unregister_device(struct device *dev,
if (cdx_dev->enabled && cdx->ops->bus_disable)
cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
} else {
+ cdx_destroy_res_attr(cdx_dev, MAX_CDX_DEV_RESOURCES);
+ debugfs_remove_recursive(cdx_dev->debugfs_dir);
kfree(cdx_dev->driver_override);
cdx_dev->driver_override = NULL;
}
@@ -551,6 +558,31 @@ static const struct attribute_group *cdx_dev_groups[] = {
NULL,
};
+static int cdx_debug_resource_show(struct seq_file *s, void *data)
+{
+ struct cdx_device *cdx_dev = s->private;
+ int i;
+
+ for (i = 0; i < MAX_CDX_DEV_RESOURCES; i++) {
+ struct resource *res = &cdx_dev->res[i];
+
+ seq_printf(s, "%pr\n", res);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cdx_debug_resource);
+
+static void cdx_device_debugfs_init(struct cdx_device *cdx_dev)
+{
+ cdx_dev->debugfs_dir = debugfs_create_dir(dev_name(&cdx_dev->dev), cdx_debugfs_dir);
+ if (IS_ERR(cdx_dev->debugfs_dir))
+ return;
+
+ debugfs_create_file("resource", 0444, cdx_dev->debugfs_dir, cdx_dev,
+ &cdx_debug_resource_fops);
+}
+
static ssize_t rescan_store(const struct bus_type *bus,
const char *buf, size_t count)
{
@@ -644,11 +676,105 @@ static void cdx_device_release(struct device *dev)
kfree(cdx_dev);
}
+static const struct vm_operations_struct cdx_phys_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+/**
+ * cdx_mmap_resource - map a CDX resource into user memory space
+ * @fp: File pointer. Not used in this function, but required where
+ * this API is registered as a callback.
+ * @kobj: kobject for mapping
+ * @attr: struct bin_attribute for the file being mapped
+ * @vma: struct vm_area_struct passed into the mmap
+ *
+ * Use the regular CDX mapping routines to map a CDX resource into userspace.
+ *
+ * Return: true on success, false otherwise.
+ */
+static int cdx_mmap_resource(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(kobj_to_dev(kobj));
+ int num = (unsigned long)attr->private;
+ struct resource *res;
+ unsigned long size;
+
+ res = &cdx_dev->res[num];
+ if (iomem_is_exclusive(res->start))
+ return -EINVAL;
+
+ /* Make sure the caller is mapping a valid resource for this device */
+ size = ((cdx_resource_len(cdx_dev, num) - 1) >> PAGE_SHIFT) + 1;
+ if (vma->vm_pgoff + vma_pages(vma) > size)
+ return -EINVAL;
+
+ /*
+ * Map memory region and vm->vm_pgoff is expected to be an
+ * offset within that region.
+ */
+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
+ vma->vm_pgoff += (cdx_resource_start(cdx_dev, num) >> PAGE_SHIFT);
+ vma->vm_ops = &cdx_phys_vm_ops;
+ return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static void cdx_destroy_res_attr(struct cdx_device *cdx_dev, int num)
+{
+ int i;
+
+ /* removing the bin attributes */
+ for (i = 0; i < num; i++) {
+ struct bin_attribute *res_attr;
+
+ res_attr = cdx_dev->res_attr[i];
+ if (res_attr) {
+ sysfs_remove_bin_file(&cdx_dev->dev.kobj, res_attr);
+ kfree(res_attr);
+ }
+ }
+}
+
+#define CDX_RES_ATTR_NAME_LEN 10
+static int cdx_create_res_attr(struct cdx_device *cdx_dev, int num)
+{
+ struct bin_attribute *res_attr;
+ char *res_attr_name;
+ int ret;
+
+ res_attr = kzalloc(sizeof(*res_attr) + CDX_RES_ATTR_NAME_LEN, GFP_ATOMIC);
+ if (!res_attr)
+ return -ENOMEM;
+
+ res_attr_name = (char *)(res_attr + 1);
+
+ sysfs_bin_attr_init(res_attr);
+
+ cdx_dev->res_attr[num] = res_attr;
+ sprintf(res_attr_name, "resource%d", num);
+
+ res_attr->mmap = cdx_mmap_resource;
+ res_attr->attr.name = res_attr_name;
+ res_attr->attr.mode = 0600;
+ res_attr->size = cdx_resource_len(cdx_dev, num);
+ res_attr->private = (void *)(unsigned long)num;
+ ret = sysfs_create_bin_file(&cdx_dev->dev.kobj, res_attr);
+ if (ret)
+ kfree(res_attr);
+
+ return ret;
+}
+
int cdx_device_add(struct cdx_dev_params *dev_params)
{
struct cdx_controller *cdx = dev_params->cdx;
struct cdx_device *cdx_dev;
- int ret;
+ int ret, i;
cdx_dev = kzalloc(sizeof(*cdx_dev), GFP_KERNEL);
if (!cdx_dev)
@@ -691,7 +817,28 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
goto fail;
}
+ /* Create resource<N> attributes */
+ for (i = 0; i < MAX_CDX_DEV_RESOURCES; i++) {
+ if (cdx_resource_flags(cdx_dev, i) & IORESOURCE_MEM) {
+ /* skip empty resources */
+ if (!cdx_resource_len(cdx_dev, i))
+ continue;
+
+ ret = cdx_create_res_attr(cdx_dev, i);
+ if (ret != 0) {
+ dev_err(&cdx_dev->dev,
+ "cdx device resource<%d> file creation failed: %d", i, ret);
+ goto resource_create_fail;
+ }
+ }
+ }
+
+ cdx_device_debugfs_init(cdx_dev);
+
return 0;
+resource_create_fail:
+ cdx_destroy_res_attr(cdx_dev, i);
+ device_del(&cdx_dev->dev);
fail:
/*
* Do not free cdx_dev here as it would be freed in
@@ -792,6 +939,12 @@ EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, CDX_BUS_CONTROLLER);
static int __init cdx_bus_init(void)
{
- return bus_register(&cdx_bus_type);
+ int ret;
+
+ ret = bus_register(&cdx_bus_type);
+ if (!ret)
+ cdx_debugfs_dir = debugfs_create_dir(cdx_bus_type.name, NULL);
+
+ return ret;
}
postcore_initcall(cdx_bus_init);
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 25834557e..43b09cf19 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -1,12 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
agpgart-y := backend.o generic.o isoch.o
-ifeq ($(CONFIG_DRM_LEGACY),y)
-agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
-agpgart-y += frontend.o
-endif
-
-
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o
obj-$(CONFIG_AGP_ATI) += ati-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 8771dcc9b..5c36ab85f 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -185,15 +185,6 @@ void agp_put_bridge(struct agp_bridge_data *bridge);
int agp_add_bridge(struct agp_bridge_data *bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge);
-/* Frontend routines. */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int agp_frontend_initialize(void);
-void agp_frontend_cleanup(void);
-#else
-static inline int agp_frontend_initialize(void) { return 0; }
-static inline void agp_frontend_cleanup(void) {}
-#endif
-
/* Generic routines. */
void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 0e19c600d..1776afd3e 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -293,13 +293,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
}
if (list_empty(&agp_bridges)) {
- error = agp_frontend_initialize();
- if (error) {
- dev_info(&bridge->dev->dev,
- "agp_frontend_initialize() failed\n");
- goto frontend_err;
- }
-
dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
bridge->driver->fetch_size(), bridge->gart_bus_addr);
@@ -308,8 +301,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
list_add(&bridge->list, &agp_bridges);
return 0;
-frontend_err:
- agp_backend_cleanup(bridge);
err_out:
module_put(bridge->driver->owner);
err_put_bridge:
@@ -323,8 +314,6 @@ void agp_remove_bridge(struct agp_bridge_data *bridge)
{
agp_backend_cleanup(bridge);
list_del(&bridge->list);
- if (list_empty(&agp_bridges))
- agp_frontend_cleanup();
module_put(bridge->driver->owner);
}
EXPORT_SYMBOL_GPL(agp_remove_bridge);
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
deleted file mode 100644
index 52ffe1706..000000000
--- a/drivers/char/agp/compat_ioctl.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * AGPGART driver frontend compatibility ioctls
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Copyright (C) 2002-2003 Dave Jones
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/agpgart.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include "agp.h"
-#include "compat_ioctl.h"
-
-static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_info32 userinfo;
- struct agp_kern_info kerninfo;
-
- agp_copy_info(agp_bridge, &kerninfo);
-
- userinfo.version.major = kerninfo.version.major;
- userinfo.version.minor = kerninfo.version.minor;
- userinfo.bridge_id = kerninfo.device->vendor |
- (kerninfo.device->device << 16);
- userinfo.agp_mode = kerninfo.mode;
- userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
- userinfo.aper_size = kerninfo.aper_size;
- userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
- userinfo.pg_used = kerninfo.current_memory;
-
- if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_region32 ureserve;
- struct agp_region kreserve;
- struct agp_client *client;
- struct agp_file_private *client_priv;
-
- DBG("");
- if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
- return -EFAULT;
-
- if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
- return -EFAULT;
-
- kreserve.pid = ureserve.pid;
- kreserve.seg_count = ureserve.seg_count;
-
- client = agp_find_client_by_pid(kreserve.pid);
-
- if (kreserve.seg_count == 0) {
- /* remove a client */
- client_priv = agp_find_private(kreserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- if (client == NULL) {
- /* client is already removed */
- return 0;
- }
- return agp_remove_client(kreserve.pid);
- } else {
- struct agp_segment32 *usegment;
- struct agp_segment *ksegment;
- int seg;
-
- if (ureserve.seg_count >= 16384)
- return -EINVAL;
-
- usegment = kmalloc_array(ureserve.seg_count,
- sizeof(*usegment),
- GFP_KERNEL);
- if (!usegment)
- return -ENOMEM;
-
- ksegment = kmalloc_array(kreserve.seg_count,
- sizeof(*ksegment),
- GFP_KERNEL);
- if (!ksegment) {
- kfree(usegment);
- return -ENOMEM;
- }
-
- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
- sizeof(*usegment) * ureserve.seg_count)) {
- kfree(usegment);
- kfree(ksegment);
- return -EFAULT;
- }
-
- for (seg = 0; seg < ureserve.seg_count; seg++) {
- ksegment[seg].pg_start = usegment[seg].pg_start;
- ksegment[seg].pg_count = usegment[seg].pg_count;
- ksegment[seg].prot = usegment[seg].prot;
- }
-
- kfree(usegment);
- kreserve.seg_list = ksegment;
-
- if (client == NULL) {
- /* Create the client and add the segment */
- client = agp_create_client(kreserve.pid);
-
- if (client == NULL) {
- kfree(ksegment);
- return -ENOMEM;
- }
- client_priv = agp_find_private(kreserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- }
- return agp_create_segment(client, &kreserve);
- }
- /* Will never really happen */
- return -EINVAL;
-}
-
-static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_allocate32 alloc;
-
- DBG("");
- if (copy_from_user(&alloc, arg, sizeof(alloc)))
- return -EFAULT;
-
- memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
- if (memory == NULL)
- return -ENOMEM;
-
- alloc.key = memory->key;
- alloc.physical = memory->physical;
-
- if (copy_to_user(arg, &alloc, sizeof(alloc))) {
- agp_free_memory_wrap(memory);
- return -EFAULT;
- }
- return 0;
-}
-
-static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_bind32 bind_info;
- struct agp_memory *memory;
-
- DBG("");
- if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(bind_info.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_unbind32 unbind;
-
- DBG("");
- if (copy_from_user(&unbind, arg, sizeof(unbind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(unbind.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_unbind_memory(memory);
-}
-
-long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct agp_file_private *curr_priv = file->private_data;
- int ret_val = -ENOTTY;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- if ((agp_fe.current_controller == NULL) &&
- (cmd != AGPIOC_ACQUIRE32)) {
- ret_val = -EINVAL;
- goto ioctl_out;
- }
- if ((agp_fe.backend_acquired != true) &&
- (cmd != AGPIOC_ACQUIRE32)) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- if (cmd != AGPIOC_ACQUIRE32) {
- if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
- ret_val = -EPERM;
- goto ioctl_out;
- }
- /* Use the original pid of the controller,
- * in case it's threaded */
-
- if (agp_fe.current_controller->pid != curr_priv->my_pid) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- }
-
- switch (cmd) {
- case AGPIOC_INFO32:
- ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_ACQUIRE32:
- ret_val = agpioc_acquire_wrap(curr_priv);
- break;
-
- case AGPIOC_RELEASE32:
- ret_val = agpioc_release_wrap(curr_priv);
- break;
-
- case AGPIOC_SETUP32:
- ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_RESERVE32:
- ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_PROTECT32:
- ret_val = agpioc_protect_wrap(curr_priv);
- break;
-
- case AGPIOC_ALLOCATE32:
- ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_DEALLOCATE32:
- ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
- break;
-
- case AGPIOC_BIND32:
- ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_UNBIND32:
- ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_CHIPSET_FLUSH32:
- break;
- }
-
-ioctl_out:
- DBG("ioctl returns %d\n", ret_val);
- mutex_unlock(&(agp_fe.agp_mutex));
- return ret_val;
-}
-
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
deleted file mode 100644
index f30e0fd97..000000000
--- a/drivers/char/agp/compat_ioctl.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _AGP_COMPAT_IOCTL_H
-#define _AGP_COMPAT_IOCTL_H
-
-#include <linux/compat.h>
-#include <linux/agpgart.h>
-
-#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t)
-#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1)
-#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2)
-#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t)
-#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t)
-#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t)
-#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
-#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
-#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
-#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
-#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
-
-struct agp_info32 {
- struct agp_version version; /* version of the driver */
- u32 bridge_id; /* bridge vendor/device */
- u32 agp_mode; /* mode info of bridge */
- compat_long_t aper_base; /* base of aperture */
- compat_size_t aper_size; /* size of aperture */
- compat_size_t pg_total; /* max pages (swap + system) */
- compat_size_t pg_system; /* max pages (system) */
- compat_size_t pg_used; /* current pages used */
-};
-
-/*
- * The "prot" down below needs still a "sleep" flag somehow ...
- */
-struct agp_segment32 {
- compat_off_t pg_start; /* starting page to populate */
- compat_size_t pg_count; /* number of pages */
- compat_int_t prot; /* prot flags for mmap */
-};
-
-struct agp_region32 {
- compat_pid_t pid; /* pid of process */
- compat_size_t seg_count; /* number of segments */
- struct agp_segment32 *seg_list;
-};
-
-struct agp_allocate32 {
- compat_int_t key; /* tag of allocation */
- compat_size_t pg_count; /* number of pages */
- u32 type; /* 0 == normal, other devspec */
- u32 physical; /* device specific (some devices
- * need a phys address of the
- * actual page behind the gatt
- * table) */
-};
-
-struct agp_bind32 {
- compat_int_t key; /* tag of allocation */
- compat_off_t pg_start; /* starting page to populate */
-};
-
-struct agp_unbind32 {
- compat_int_t key; /* tag of allocation */
- u32 priority; /* priority for paging out */
-};
-
-extern struct agp_front_data agp_fe;
-
-int agpioc_acquire_wrap(struct agp_file_private *priv);
-int agpioc_release_wrap(struct agp_file_private *priv);
-int agpioc_protect_wrap(struct agp_file_private *priv);
-int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
-int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
-struct agp_file_private *agp_find_private(pid_t pid);
-struct agp_client *agp_create_client(pid_t id);
-int agp_remove_client(pid_t id);
-int agp_create_segment(struct agp_client *client, struct agp_region *region);
-void agp_free_memory_wrap(struct agp_memory *memory);
-struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
-struct agp_memory *agp_find_mem_by_key(int key);
-struct agp_client *agp_find_client_by_pid(pid_t id);
-
-#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
deleted file mode 100644
index 321118a9c..000000000
--- a/drivers/char/agp/frontend.c
+++ /dev/null
@@ -1,1068 +0,0 @@
-/*
- * AGPGART driver frontend
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Copyright (C) 2002-2003 Dave Jones
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/agp_backend.h>
-#include <linux/agpgart.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-
-#include "agp.h"
-#include "compat_ioctl.h"
-
-struct agp_front_data agp_fe;
-
-struct agp_memory *agp_find_mem_by_key(int key)
-{
- struct agp_memory *curr;
-
- if (agp_fe.current_controller == NULL)
- return NULL;
-
- curr = agp_fe.current_controller->pool;
-
- while (curr != NULL) {
- if (curr->key == key)
- break;
- curr = curr->next;
- }
-
- DBG("key=%d -> mem=%p", key, curr);
- return curr;
-}
-
-static void agp_remove_from_pool(struct agp_memory *temp)
-{
- struct agp_memory *prev;
- struct agp_memory *next;
-
- /* Check to see if this is even in the memory pool */
-
- DBG("mem=%p", temp);
- if (agp_find_mem_by_key(temp->key) != NULL) {
- next = temp->next;
- prev = temp->prev;
-
- if (prev != NULL) {
- prev->next = next;
- if (next != NULL)
- next->prev = prev;
-
- } else {
- /* This is the first item on the list */
- if (next != NULL)
- next->prev = NULL;
-
- agp_fe.current_controller->pool = next;
- }
- }
-}
-
-/*
- * Routines for managing each client's segment list -
- * These routines handle adding and removing segments
- * to each auth'ed client.
- */
-
-static struct
-agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
- unsigned long offset,
- int size, pgprot_t page_prot)
-{
- struct agp_segment_priv *seg;
- int i;
- off_t pg_start;
- size_t pg_count;
-
- pg_start = offset / 4096;
- pg_count = size / 4096;
- seg = *(client->segments);
-
- for (i = 0; i < client->num_segments; i++) {
- if ((seg[i].pg_start == pg_start) &&
- (seg[i].pg_count == pg_count) &&
- (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
- return seg + i;
- }
- }
-
- return NULL;
-}
-
-static void agp_remove_seg_from_client(struct agp_client *client)
-{
- DBG("client=%p", client);
-
- if (client->segments != NULL) {
- if (*(client->segments) != NULL) {
- DBG("Freeing %p from client %p", *(client->segments), client);
- kfree(*(client->segments));
- }
- DBG("Freeing %p from client %p", client->segments, client);
- kfree(client->segments);
- client->segments = NULL;
- }
-}
-
-static void agp_add_seg_to_client(struct agp_client *client,
- struct agp_segment_priv ** seg, int num_segments)
-{
- struct agp_segment_priv **prev_seg;
-
- prev_seg = client->segments;
-
- if (prev_seg != NULL)
- agp_remove_seg_from_client(client);
-
- DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
- client->num_segments = num_segments;
- client->segments = seg;
-}
-
-static pgprot_t agp_convert_mmap_flags(int prot)
-{
- unsigned long prot_bits;
-
- prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED;
- return vm_get_page_prot(prot_bits);
-}
-
-int agp_create_segment(struct agp_client *client, struct agp_region *region)
-{
- struct agp_segment_priv **ret_seg;
- struct agp_segment_priv *seg;
- struct agp_segment *user_seg;
- size_t i;
-
- seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
- if (seg == NULL) {
- kfree(region->seg_list);
- region->seg_list = NULL;
- return -ENOMEM;
- }
- user_seg = region->seg_list;
-
- for (i = 0; i < region->seg_count; i++) {
- seg[i].pg_start = user_seg[i].pg_start;
- seg[i].pg_count = user_seg[i].pg_count;
- seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
- }
- kfree(region->seg_list);
- region->seg_list = NULL;
-
- ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
- if (ret_seg == NULL) {
- kfree(seg);
- return -ENOMEM;
- }
- *ret_seg = seg;
- agp_add_seg_to_client(client, ret_seg, region->seg_count);
- return 0;
-}
-
-/* End - Routines for managing each client's segment list */
-
-/* This function must only be called when current_controller != NULL */
-static void agp_insert_into_pool(struct agp_memory * temp)
-{
- struct agp_memory *prev;
-
- prev = agp_fe.current_controller->pool;
-
- if (prev != NULL) {
- prev->prev = temp;
- temp->next = prev;
- }
- agp_fe.current_controller->pool = temp;
-}
-
-
-/* File private list routines */
-
-struct agp_file_private *agp_find_private(pid_t pid)
-{
- struct agp_file_private *curr;
-
- curr = agp_fe.file_priv_list;
-
- while (curr != NULL) {
- if (curr->my_pid == pid)
- return curr;
- curr = curr->next;
- }
-
- return NULL;
-}
-
-static void agp_insert_file_private(struct agp_file_private * priv)
-{
- struct agp_file_private *prev;
-
- prev = agp_fe.file_priv_list;
-
- if (prev != NULL)
- prev->prev = priv;
- priv->next = prev;
- agp_fe.file_priv_list = priv;
-}
-
-static void agp_remove_file_private(struct agp_file_private * priv)
-{
- struct agp_file_private *next;
- struct agp_file_private *prev;
-
- next = priv->next;
- prev = priv->prev;
-
- if (prev != NULL) {
- prev->next = next;
-
- if (next != NULL)
- next->prev = prev;
-
- } else {
- if (next != NULL)
- next->prev = NULL;
-
- agp_fe.file_priv_list = next;
- }
-}
-
-/* End - File flag list routines */
-
-/*
- * Wrappers for agp_free_memory & agp_allocate_memory
- * These make sure that internal lists are kept updated.
- */
-void agp_free_memory_wrap(struct agp_memory *memory)
-{
- agp_remove_from_pool(memory);
- agp_free_memory(memory);
-}
-
-struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
-{
- struct agp_memory *memory;
-
- memory = agp_allocate_memory(agp_bridge, pg_count, type);
- if (memory == NULL)
- return NULL;
-
- agp_insert_into_pool(memory);
- return memory;
-}
-
-/* Routines for managing the list of controllers -
- * These routines manage the current controller, and the list of
- * controllers
- */
-
-static struct agp_controller *agp_find_controller_by_pid(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = agp_fe.controllers;
-
- while (controller != NULL) {
- if (controller->pid == id)
- return controller;
- controller = controller->next;
- }
-
- return NULL;
-}
-
-static struct agp_controller *agp_create_controller(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
- if (controller == NULL)
- return NULL;
-
- controller->pid = id;
- return controller;
-}
-
-static int agp_insert_controller(struct agp_controller *controller)
-{
- struct agp_controller *prev_controller;
-
- prev_controller = agp_fe.controllers;
- controller->next = prev_controller;
-
- if (prev_controller != NULL)
- prev_controller->prev = controller;
-
- agp_fe.controllers = controller;
-
- return 0;
-}
-
-static void agp_remove_all_clients(struct agp_controller *controller)
-{
- struct agp_client *client;
- struct agp_client *temp;
-
- client = controller->clients;
-
- while (client) {
- struct agp_file_private *priv;
-
- temp = client;
- agp_remove_seg_from_client(temp);
- priv = agp_find_private(temp->pid);
-
- if (priv != NULL) {
- clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
- clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- }
- client = client->next;
- kfree(temp);
- }
-}
-
-static void agp_remove_all_memory(struct agp_controller *controller)
-{
- struct agp_memory *memory;
- struct agp_memory *temp;
-
- memory = controller->pool;
-
- while (memory) {
- temp = memory;
- memory = memory->next;
- agp_free_memory_wrap(temp);
- }
-}
-
-static int agp_remove_controller(struct agp_controller *controller)
-{
- struct agp_controller *prev_controller;
- struct agp_controller *next_controller;
-
- prev_controller = controller->prev;
- next_controller = controller->next;
-
- if (prev_controller != NULL) {
- prev_controller->next = next_controller;
- if (next_controller != NULL)
- next_controller->prev = prev_controller;
-
- } else {
- if (next_controller != NULL)
- next_controller->prev = NULL;
-
- agp_fe.controllers = next_controller;
- }
-
- agp_remove_all_memory(controller);
- agp_remove_all_clients(controller);
-
- if (agp_fe.current_controller == controller) {
- agp_fe.current_controller = NULL;
- agp_fe.backend_acquired = false;
- agp_backend_release(agp_bridge);
- }
- kfree(controller);
- return 0;
-}
-
-static void agp_controller_make_current(struct agp_controller *controller)
-{
- struct agp_client *clients;
-
- clients = controller->clients;
-
- while (clients != NULL) {
- struct agp_file_private *priv;
-
- priv = agp_find_private(clients->pid);
-
- if (priv != NULL) {
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- }
- clients = clients->next;
- }
-
- agp_fe.current_controller = controller;
-}
-
-static void agp_controller_release_current(struct agp_controller *controller,
- struct agp_file_private *controller_priv)
-{
- struct agp_client *clients;
-
- clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
- clients = controller->clients;
-
- while (clients != NULL) {
- struct agp_file_private *priv;
-
- priv = agp_find_private(clients->pid);
-
- if (priv != NULL)
- clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
-
- clients = clients->next;
- }
-
- agp_fe.current_controller = NULL;
- agp_fe.used_by_controller = false;
- agp_backend_release(agp_bridge);
-}
-
-/*
- * Routines for managing client lists -
- * These routines are for managing the list of auth'ed clients.
- */
-
-static struct agp_client
-*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
-{
- struct agp_client *client;
-
- if (controller == NULL)
- return NULL;
-
- client = controller->clients;
-
- while (client != NULL) {
- if (client->pid == id)
- return client;
- client = client->next;
- }
-
- return NULL;
-}
-
-static struct agp_controller *agp_find_controller_for_client(pid_t id)
-{
- struct agp_controller *controller;
-
- controller = agp_fe.controllers;
-
- while (controller != NULL) {
- if ((agp_find_client_in_controller(controller, id)) != NULL)
- return controller;
- controller = controller->next;
- }
-
- return NULL;
-}
-
-struct agp_client *agp_find_client_by_pid(pid_t id)
-{
- struct agp_client *temp;
-
- if (agp_fe.current_controller == NULL)
- return NULL;
-
- temp = agp_find_client_in_controller(agp_fe.current_controller, id);
- return temp;
-}
-
-static void agp_insert_client(struct agp_client *client)
-{
- struct agp_client *prev_client;
-
- prev_client = agp_fe.current_controller->clients;
- client->next = prev_client;
-
- if (prev_client != NULL)
- prev_client->prev = client;
-
- agp_fe.current_controller->clients = client;
- agp_fe.current_controller->num_clients++;
-}
-
-struct agp_client *agp_create_client(pid_t id)
-{
- struct agp_client *new_client;
-
- new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
- if (new_client == NULL)
- return NULL;
-
- new_client->pid = id;
- agp_insert_client(new_client);
- return new_client;
-}
-
-int agp_remove_client(pid_t id)
-{
- struct agp_client *client;
- struct agp_client *prev_client;
- struct agp_client *next_client;
- struct agp_controller *controller;
-
- controller = agp_find_controller_for_client(id);
- if (controller == NULL)
- return -EINVAL;
-
- client = agp_find_client_in_controller(controller, id);
- if (client == NULL)
- return -EINVAL;
-
- prev_client = client->prev;
- next_client = client->next;
-
- if (prev_client != NULL) {
- prev_client->next = next_client;
- if (next_client != NULL)
- next_client->prev = prev_client;
-
- } else {
- if (next_client != NULL)
- next_client->prev = NULL;
- controller->clients = next_client;
- }
-
- controller->num_clients--;
- agp_remove_seg_from_client(client);
- kfree(client);
- return 0;
-}
-
-/* End - Routines for managing client lists */
-
-/* File Operations */
-
-static int agp_mmap(struct file *file, struct vm_area_struct *vma)
-{
- unsigned int size, current_size;
- unsigned long offset;
- struct agp_client *client;
- struct agp_file_private *priv = file->private_data;
- struct agp_kern_info kerninfo;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- if (agp_fe.backend_acquired != true)
- goto out_eperm;
-
- if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
- goto out_eperm;
-
- agp_copy_info(agp_bridge, &kerninfo);
- size = vma->vm_end - vma->vm_start;
- current_size = kerninfo.aper_size;
- current_size = current_size * 0x100000;
- offset = vma->vm_pgoff << PAGE_SHIFT;
- DBG("%lx:%lx", offset, offset+size);
-
- if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
- if ((size + offset) > current_size)
- goto out_inval;
-
- client = agp_find_client_by_pid(current->pid);
-
- if (client == NULL)
- goto out_eperm;
-
- if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
- goto out_inval;
-
- DBG("client vm_ops=%p", kerninfo.vm_ops);
- if (kerninfo.vm_ops) {
- vma->vm_ops = kerninfo.vm_ops;
- } else if (io_remap_pfn_range(vma, vma->vm_start,
- (kerninfo.aper_base + offset) >> PAGE_SHIFT,
- size,
- pgprot_writecombine(vma->vm_page_prot))) {
- goto out_again;
- }
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
- }
-
- if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
- if (size != current_size)
- goto out_inval;
-
- DBG("controller vm_ops=%p", kerninfo.vm_ops);
- if (kerninfo.vm_ops) {
- vma->vm_ops = kerninfo.vm_ops;
- } else if (io_remap_pfn_range(vma, vma->vm_start,
- kerninfo.aper_base >> PAGE_SHIFT,
- size,
- pgprot_writecombine(vma->vm_page_prot))) {
- goto out_again;
- }
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
- }
-
-out_eperm:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EPERM;
-
-out_inval:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EINVAL;
-
-out_again:
- mutex_unlock(&(agp_fe.agp_mutex));
- return -EAGAIN;
-}
-
-static int agp_release(struct inode *inode, struct file *file)
-{
- struct agp_file_private *priv = file->private_data;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- DBG("priv=%p", priv);
-
- if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
- struct agp_controller *controller;
-
- controller = agp_find_controller_by_pid(priv->my_pid);
-
- if (controller != NULL) {
- if (controller == agp_fe.current_controller)
- agp_controller_release_current(controller, priv);
- agp_remove_controller(controller);
- controller = NULL;
- }
- }
-
- if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
- agp_remove_client(priv->my_pid);
-
- agp_remove_file_private(priv);
- kfree(priv);
- file->private_data = NULL;
- mutex_unlock(&(agp_fe.agp_mutex));
- return 0;
-}
-
-static int agp_open(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct agp_file_private *priv;
- struct agp_client *client;
-
- if (minor != AGPGART_MINOR)
- return -ENXIO;
-
- mutex_lock(&(agp_fe.agp_mutex));
-
- priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
- if (priv == NULL) {
- mutex_unlock(&(agp_fe.agp_mutex));
- return -ENOMEM;
- }
-
- set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
- priv->my_pid = current->pid;
-
- if (capable(CAP_SYS_RAWIO))
- /* Root priv, can be controller */
- set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
-
- client = agp_find_client_by_pid(current->pid);
-
- if (client != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- }
- file->private_data = (void *) priv;
- agp_insert_file_private(priv);
- DBG("private=%p, client=%p", priv, client);
-
- mutex_unlock(&(agp_fe.agp_mutex));
-
- return 0;
-}
-
-static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_info userinfo;
- struct agp_kern_info kerninfo;
-
- agp_copy_info(agp_bridge, &kerninfo);
-
- memset(&userinfo, 0, sizeof(userinfo));
- userinfo.version.major = kerninfo.version.major;
- userinfo.version.minor = kerninfo.version.minor;
- userinfo.bridge_id = kerninfo.device->vendor |
- (kerninfo.device->device << 16);
- userinfo.agp_mode = kerninfo.mode;
- userinfo.aper_base = kerninfo.aper_base;
- userinfo.aper_size = kerninfo.aper_size;
- userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
- userinfo.pg_used = kerninfo.current_memory;
-
- if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
- return -EFAULT;
-
- return 0;
-}
-
-int agpioc_acquire_wrap(struct agp_file_private *priv)
-{
- struct agp_controller *controller;
-
- DBG("");
-
- if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
- return -EPERM;
-
- if (agp_fe.current_controller != NULL)
- return -EBUSY;
-
- if (!agp_bridge)
- return -ENODEV;
-
- if (atomic_read(&agp_bridge->agp_in_use))
- return -EBUSY;
-
- atomic_inc(&agp_bridge->agp_in_use);
-
- agp_fe.backend_acquired = true;
-
- controller = agp_find_controller_by_pid(priv->my_pid);
-
- if (controller != NULL) {
- agp_controller_make_current(controller);
- } else {
- controller = agp_create_controller(priv->my_pid);
-
- if (controller == NULL) {
- agp_fe.backend_acquired = false;
- agp_backend_release(agp_bridge);
- return -ENOMEM;
- }
- agp_insert_controller(controller);
- agp_controller_make_current(controller);
- }
-
- set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &priv->access_flags);
- return 0;
-}
-
-int agpioc_release_wrap(struct agp_file_private *priv)
-{
- DBG("");
- agp_controller_release_current(agp_fe.current_controller, priv);
- return 0;
-}
-
-int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_setup mode;
-
- DBG("");
- if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
- return -EFAULT;
-
- agp_enable(agp_bridge, mode.agp_mode);
- return 0;
-}
-
-static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_region reserve;
- struct agp_client *client;
- struct agp_file_private *client_priv;
-
- DBG("");
- if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
- return -EFAULT;
-
- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
- return -EFAULT;
-
- client = agp_find_client_by_pid(reserve.pid);
-
- if (reserve.seg_count == 0) {
- /* remove a client */
- client_priv = agp_find_private(reserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- if (client == NULL) {
- /* client is already removed */
- return 0;
- }
- return agp_remove_client(reserve.pid);
- } else {
- struct agp_segment *segment;
-
- if (reserve.seg_count >= 16384)
- return -EINVAL;
-
- segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
- GFP_KERNEL);
-
- if (segment == NULL)
- return -ENOMEM;
-
- if (copy_from_user(segment, (void __user *) reserve.seg_list,
- sizeof(struct agp_segment) * reserve.seg_count)) {
- kfree(segment);
- return -EFAULT;
- }
- reserve.seg_list = segment;
-
- if (client == NULL) {
- /* Create the client and add the segment */
- client = agp_create_client(reserve.pid);
-
- if (client == NULL) {
- kfree(segment);
- return -ENOMEM;
- }
- client_priv = agp_find_private(reserve.pid);
-
- if (client_priv != NULL) {
- set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
- set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
- }
- }
- return agp_create_segment(client, &reserve);
- }
- /* Will never really happen */
- return -EINVAL;
-}
-
-int agpioc_protect_wrap(struct agp_file_private *priv)
-{
- DBG("");
- /* This function is not currently implemented */
- return -EINVAL;
-}
-
-static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_allocate alloc;
-
- DBG("");
- if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
- return -EFAULT;
-
- if (alloc.type >= AGP_USER_TYPES)
- return -EINVAL;
-
- memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
- if (memory == NULL)
- return -ENOMEM;
-
- alloc.key = memory->key;
- alloc.physical = memory->physical;
-
- if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
- agp_free_memory_wrap(memory);
- return -EFAULT;
- }
- return 0;
-}
-
-int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
-{
- struct agp_memory *memory;
-
- DBG("");
- memory = agp_find_mem_by_key(arg);
-
- if (memory == NULL)
- return -EINVAL;
-
- agp_free_memory_wrap(memory);
- return 0;
-}
-
-static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_bind bind_info;
- struct agp_memory *memory;
-
- DBG("");
- if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(bind_info.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
-{
- struct agp_memory *memory;
- struct agp_unbind unbind;
-
- DBG("");
- if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
- return -EFAULT;
-
- memory = agp_find_mem_by_key(unbind.key);
-
- if (memory == NULL)
- return -EINVAL;
-
- return agp_unbind_memory(memory);
-}
-
-static long agp_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct agp_file_private *curr_priv = file->private_data;
- int ret_val = -ENOTTY;
-
- DBG("priv=%p, cmd=%x", curr_priv, cmd);
- mutex_lock(&(agp_fe.agp_mutex));
-
- if ((agp_fe.current_controller == NULL) &&
- (cmd != AGPIOC_ACQUIRE)) {
- ret_val = -EINVAL;
- goto ioctl_out;
- }
- if ((agp_fe.backend_acquired != true) &&
- (cmd != AGPIOC_ACQUIRE)) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- if (cmd != AGPIOC_ACQUIRE) {
- if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
- ret_val = -EPERM;
- goto ioctl_out;
- }
- /* Use the original pid of the controller,
- * in case it's threaded */
-
- if (agp_fe.current_controller->pid != curr_priv->my_pid) {
- ret_val = -EBUSY;
- goto ioctl_out;
- }
- }
-
- switch (cmd) {
- case AGPIOC_INFO:
- ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_ACQUIRE:
- ret_val = agpioc_acquire_wrap(curr_priv);
- break;
-
- case AGPIOC_RELEASE:
- ret_val = agpioc_release_wrap(curr_priv);
- break;
-
- case AGPIOC_SETUP:
- ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_RESERVE:
- ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_PROTECT:
- ret_val = agpioc_protect_wrap(curr_priv);
- break;
-
- case AGPIOC_ALLOCATE:
- ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_DEALLOCATE:
- ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
- break;
-
- case AGPIOC_BIND:
- ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_UNBIND:
- ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
- break;
-
- case AGPIOC_CHIPSET_FLUSH:
- break;
- }
-
-ioctl_out:
- DBG("ioctl returns %d\n", ret_val);
- mutex_unlock(&(agp_fe.agp_mutex));
- return ret_val;
-}
-
-static const struct file_operations agp_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = agp_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_agp_ioctl,
-#endif
- .mmap = agp_mmap,
- .open = agp_open,
- .release = agp_release,
-};
-
-static struct miscdevice agp_miscdev =
-{
- .minor = AGPGART_MINOR,
- .name = "agpgart",
- .fops = &agp_fops
-};
-
-int agp_frontend_initialize(void)
-{
- memset(&agp_fe, 0, sizeof(struct agp_front_data));
- mutex_init(&(agp_fe.agp_mutex));
-
- if (misc_register(&agp_miscdev)) {
- printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
- return -EIO;
- }
- return 0;
-}
-
-void agp_frontend_cleanup(void)
-{
- misc_deregister(&agp_miscdev);
-}
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index a37367ebc..e9157255f 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -161,15 +161,13 @@ static int atmel_trng_probe(struct platform_device *pdev)
return ret;
}
-static int atmel_trng_remove(struct platform_device *pdev)
+static void atmel_trng_remove(struct platform_device *pdev)
{
struct atmel_trng *trng = platform_get_drvdata(pdev);
atmel_trng_cleanup(trng);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
-
- return 0;
}
static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev)
@@ -218,7 +216,7 @@ MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
- .remove = atmel_trng_remove,
+ .remove_new = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.pm = pm_ptr(&atmel_trng_pm_ops),
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 1abbff04a..c0d2f8247 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -560,7 +560,7 @@ post_pm_err:
return rc;
}
-static int cctrng_remove(struct platform_device *pdev)
+static void cctrng_remove(struct platform_device *pdev)
{
struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -570,8 +570,6 @@ static int cctrng_remove(struct platform_device *pdev)
cc_trng_pm_fini(drvdata);
dev_info(dev, "ARM cctrng device terminated\n");
-
- return 0;
}
static int __maybe_unused cctrng_suspend(struct device *dev)
@@ -654,7 +652,7 @@ static struct platform_driver cctrng_driver = {
.pm = &cctrng_pm,
},
.probe = cctrng_probe,
- .remove = cctrng_remove,
+ .remove_new = cctrng_remove,
};
module_platform_driver(cctrng_driver);
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 30207b7ac..0ed5d22fe 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -173,7 +173,7 @@ err_pm_get:
return ret;
}
-static int exynos_trng_remove(struct platform_device *pdev)
+static void exynos_trng_remove(struct platform_device *pdev)
{
struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
@@ -181,8 +181,6 @@ static int exynos_trng_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static int exynos_trng_suspend(struct device *dev)
@@ -223,7 +221,7 @@ static struct platform_driver exynos_trng_driver = {
.of_match_table = exynos_trng_dt_match,
},
.probe = exynos_trng_probe,
- .remove = exynos_trng_remove,
+ .remove_new = exynos_trng_remove,
};
module_platform_driver(exynos_trng_driver);
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
index 4f18c3fa5..2f9b6483c 100644
--- a/drivers/char/hw_random/ingenic-rng.c
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -11,7 +11,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -114,15 +114,13 @@ static int ingenic_rng_probe(struct platform_device *pdev)
return 0;
}
-static int ingenic_rng_remove(struct platform_device *pdev)
+static void ingenic_rng_remove(struct platform_device *pdev)
{
struct ingenic_rng *priv = platform_get_drvdata(pdev);
hwrng_unregister(&priv->rng);
writel(0, priv->base + RNG_REG_ERNG_OFFSET);
-
- return 0;
}
static const struct of_device_id ingenic_rng_of_match[] = {
@@ -134,7 +132,7 @@ MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
static struct platform_driver ingenic_rng_driver = {
.probe = ingenic_rng_probe,
- .remove = ingenic_rng_remove,
+ .remove_new = ingenic_rng_remove,
.driver = {
.name = "ingenic-rng",
.of_match_table = ingenic_rng_of_match,
diff --git a/drivers/char/hw_random/jh7110-trng.c b/drivers/char/hw_random/jh7110-trng.c
index b1f94e3c0..9776f4daa 100644
--- a/drivers/char/hw_random/jh7110-trng.c
+++ b/drivers/char/hw_random/jh7110-trng.c
@@ -369,8 +369,12 @@ static int __maybe_unused starfive_trng_resume(struct device *dev)
return 0;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(starfive_trng_pm_ops, starfive_trng_suspend,
- starfive_trng_resume);
+static const struct dev_pm_ops starfive_trng_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(starfive_trng_suspend,
+ starfive_trng_resume)
+ SET_RUNTIME_PM_OPS(starfive_trng_suspend,
+ starfive_trng_resume, NULL)
+};
static const struct of_device_id trng_dt_ids[] __maybe_unused = {
{ .compatible = "starfive,jh7110-trng" },
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index dff7b9db7..36c34252b 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -241,12 +241,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
}
-static int ks_sa_rng_remove(struct platform_device *pdev)
+static void ks_sa_rng_remove(struct platform_device *pdev)
{
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id ks_sa_rng_dt_match[] = {
@@ -263,7 +261,7 @@ static struct platform_driver ks_sa_rng_driver = {
.of_match_table = ks_sa_rng_dt_match,
},
.probe = ks_sa_rng_probe,
- .remove = ks_sa_rng_remove,
+ .remove_new = ks_sa_rng_remove,
};
module_platform_driver(ks_sa_rng_driver);
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 008763c98..07ec000e4 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -176,15 +176,13 @@ err_ioremap:
return err;
}
-static int __exit mxc_rnga_remove(struct platform_device *pdev)
+static void __exit mxc_rnga_remove(struct platform_device *pdev)
{
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
hwrng_unregister(&mxc_rng->rng);
clk_disable_unprepare(mxc_rng->clk);
-
- return 0;
}
static const struct of_device_id mxc_rnga_of_match[] = {
@@ -199,7 +197,7 @@ static struct platform_driver mxc_rnga_driver = {
.name = "mxc_rnga",
.of_match_table = mxc_rnga_of_match,
},
- .remove = __exit_p(mxc_rnga_remove),
+ .remove_new = __exit_p(mxc_rnga_remove),
};
module_platform_driver_probe(mxc_rnga_driver, mxc_rnga_probe);
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index aaae16b98..2e669e7c1 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -781,7 +781,7 @@ out:
return err;
}
-static int n2rng_remove(struct platform_device *op)
+static void n2rng_remove(struct platform_device *op)
{
struct n2rng *np = platform_get_drvdata(op);
@@ -790,8 +790,6 @@ static int n2rng_remove(struct platform_device *op)
cancel_delayed_work_sync(&np->work);
sun4v_hvapi_unregister(HV_GRP_RNG);
-
- return 0;
}
static struct n2rng_template n2_template = {
@@ -860,7 +858,7 @@ static struct platform_driver n2rng_driver = {
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
- .remove = n2rng_remove,
+ .remove_new = n2rng_remove,
};
module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 8a304b754..bce8c4829 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -126,15 +126,13 @@ static int npcm_rng_probe(struct platform_device *pdev)
return 0;
}
-static int npcm_rng_remove(struct platform_device *pdev)
+static void npcm_rng_remove(struct platform_device *pdev)
{
struct npcm_rng *priv = platform_get_drvdata(pdev);
devm_hwrng_unregister(&pdev->dev, &priv->rng);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -178,7 +176,7 @@ static struct platform_driver npcm_rng_driver = {
.of_match_table = of_match_ptr(rng_dt_id),
},
.probe = npcm_rng_probe,
- .remove = npcm_rng_remove,
+ .remove_new = npcm_rng_remove,
};
module_platform_driver(npcm_rng_driver);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index be03f76a2..d4c02e900 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -509,7 +509,7 @@ err_ioremap:
return ret;
}
-static int omap_rng_remove(struct platform_device *pdev)
+static void omap_rng_remove(struct platform_device *pdev)
{
struct omap_rng_dev *priv = platform_get_drvdata(pdev);
@@ -521,8 +521,6 @@ static int omap_rng_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
clk_disable_unprepare(priv->clk_reg);
-
- return 0;
}
static int __maybe_unused omap_rng_suspend(struct device *dev)
@@ -560,7 +558,7 @@ static struct platform_driver omap_rng_driver = {
.of_match_table = of_match_ptr(omap_rng_of_match),
},
.probe = omap_rng_probe,
- .remove = omap_rng_remove,
+ .remove_new = omap_rng_remove,
};
module_platform_driver(omap_rng_driver);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index efd6edcd7..379bc245c 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -363,11 +363,9 @@ static int stm32_rng_init(struct hwrng *rng)
return 0;
}
-static int stm32_rng_remove(struct platform_device *ofdev)
+static void stm32_rng_remove(struct platform_device *ofdev)
{
pm_runtime_disable(&ofdev->dev);
-
- return 0;
}
static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
@@ -558,7 +556,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
- .remove = stm32_rng_remove,
+ .remove_new = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 3db9d868e..65b826033 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -174,13 +174,11 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
return 0;
}
-static int timeriomem_rng_remove(struct platform_device *pdev)
+static void timeriomem_rng_remove(struct platform_device *pdev)
{
struct timeriomem_rng_private *priv = platform_get_drvdata(pdev);
hrtimer_cancel(&priv->timer);
-
- return 0;
}
static const struct of_device_id timeriomem_rng_match[] = {
@@ -195,7 +193,7 @@ static struct platform_driver timeriomem_rng_driver = {
.of_match_table = timeriomem_rng_match,
},
.probe = timeriomem_rng_probe,
- .remove = timeriomem_rng_remove,
+ .remove_new = timeriomem_rng_remove,
};
module_platform_driver(timeriomem_rng_driver);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index e41a84e6b..7a4b45393 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -135,7 +135,7 @@ static int probe_common(struct virtio_device *vdev)
if (!vi)
return -ENOMEM;
- vi->index = index = ida_simple_get(&rng_index_ida, 0, 0, GFP_KERNEL);
+ vi->index = index = ida_alloc(&rng_index_ida, GFP_KERNEL);
if (index < 0) {
err = index;
goto err_ida;
@@ -166,7 +166,7 @@ static int probe_common(struct virtio_device *vdev)
return 0;
err_find:
- ida_simple_remove(&rng_index_ida, index);
+ ida_free(&rng_index_ida, index);
err_ida:
kfree(vi);
return err;
@@ -184,7 +184,7 @@ static void remove_common(struct virtio_device *vdev)
hwrng_unregister(&vi->hwrng);
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
- ida_simple_remove(&rng_index_ida, vi->index);
+ ida_free(&rng_index_ida, vi->index);
kfree(vi);
}
@@ -208,7 +208,6 @@ static void virtrng_scan(struct virtio_device *vdev)
vi->hwrng_register_done = true;
}
-#ifdef CONFIG_PM_SLEEP
static int virtrng_freeze(struct virtio_device *vdev)
{
remove_common(vdev);
@@ -238,7 +237,6 @@ static int virtrng_restore(struct virtio_device *vdev)
return err;
}
-#endif
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
@@ -252,10 +250,8 @@ static struct virtio_driver virtio_rng_driver = {
.probe = virtrng_probe,
.remove = virtrng_remove,
.scan = virtrng_scan,
-#ifdef CONFIG_PM_SLEEP
- .freeze = virtrng_freeze,
- .restore = virtrng_restore,
-#endif
+ .freeze = pm_sleep_ptr(virtrng_freeze),
+ .restore = pm_sleep_ptr(virtrng_restore),
};
module_virtio_driver(virtio_rng_driver);
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 7382724bf..642d13519 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -357,15 +357,13 @@ static int xgene_rng_probe(struct platform_device *pdev)
return 0;
}
-static int xgene_rng_remove(struct platform_device *pdev)
+static void xgene_rng_remove(struct platform_device *pdev)
{
int rc;
rc = device_init_wakeup(&pdev->dev, 0);
if (rc)
dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc);
-
- return 0;
}
static const struct of_device_id xgene_rng_of_match[] = {
@@ -377,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
static struct platform_driver xgene_rng_driver = {
.probe = xgene_rng_probe,
- .remove = xgene_rng_remove,
+ .remove_new = xgene_rng_remove,
.driver = {
.name = "xgene-rng",
.of_match_table = xgene_rng_of_match,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index d6f142796..b0eedc459 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3053,7 +3053,7 @@ static void cleanup_bmc_work(struct work_struct *work)
int id = bmc->pdev.id; /* Unregister overwrites id */
platform_device_unregister(&bmc->pdev);
- ida_simple_remove(&ipmi_bmc_ida, id);
+ ida_free(&ipmi_bmc_ida, id);
}
static void
@@ -3169,7 +3169,7 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
bmc->pdev.name = "ipmi_bmc";
- rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
+ rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL);
if (rv < 0) {
kfree(bmc);
goto out;
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
index ed5e91b1e..0c92fa3ee 100644
--- a/drivers/char/ipmi/ipmi_si_hardcode.c
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -80,10 +80,10 @@ static void __init ipmi_hardcode_init_one(const char *si_type_str,
}
p.regsize = regsizes[i];
+ p.regspacing = regspacings[i];
p.slave_addr = slave_addrs[i];
p.addr_source = SI_HARDCODED;
p.regshift = regshifts[i];
- p.regsize = regsizes[i];
p.addr = addr;
p.space = addr_space;
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index c3d8ac787..cd2edd8f8 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -11,10 +11,11 @@
#include <linux/types.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/acpi.h>
#include "ipmi_si.h"
#include "ipmi_dmi.h"
@@ -224,7 +225,6 @@ MODULE_DEVICE_TABLE(of, of_ipmi_match);
static int of_ipmi_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct si_sm_io io;
struct resource resource;
const __be32 *regsize, *regspacing, *regshift;
@@ -237,10 +237,6 @@ static int of_ipmi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "probing via device tree\n");
- match = of_match_device(of_ipmi_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
if (!of_device_is_available(np))
return -EINVAL;
@@ -269,7 +265,7 @@ static int of_ipmi_probe(struct platform_device *pdev)
}
memset(&io, 0, sizeof(io));
- io.si_type = (unsigned long) match->data;
+ io.si_type = (enum si_type)device_get_match_data(&pdev->dev);
io.addr_source = SI_DEVICETREE;
io.irq_setup = ipmi_std_irq_setup;
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 4c188e9e4..ee951b265 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -299,7 +299,7 @@ static int register_device(int minor, struct pp_struct *pp)
goto err;
}
- index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+ index = ida_alloc(&ida_index, GFP_KERNEL);
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
ppdev_cb.irq_func = pp_irq;
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
@@ -310,7 +310,7 @@ static int register_device(int minor, struct pp_struct *pp)
if (!pdev) {
pr_warn("%s: failed to register device!\n", name);
rc = -ENXIO;
- ida_simple_remove(&ida_index, index);
+ ida_free(&ida_index, index);
goto err;
}
@@ -750,7 +750,7 @@ static int pp_release(struct inode *inode, struct file *file)
if (pp->pdev) {
parport_unregister_device(pp->pdev);
- ida_simple_remove(&ida_index, pp->index);
+ ida_free(&ida_index, pp->index);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4a9c79391..2597cb43f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
static void __cold _credit_init_bits(size_t bits)
{
- static struct execute_work set_ready;
+ static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
@@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized)
- execute_in_process_context(crng_set_ready, &set_ready);
+ if (static_key_initialized && system_unbound_wq)
+ queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -890,8 +890,8 @@ void __init random_init(void)
/*
* If we were initialized by the cpu or bootloader before jump labels
- * are initialized, then we should enable the static branch here, where
- * it's guaranteed that jump labels have been initialized.
+ * or workqueues are initialized, then we should enable the static
+ * branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
@@ -1364,7 +1364,6 @@ static void __cold try_to_generate_entropy(void)
SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
{
struct iov_iter iter;
- struct iovec iov;
int ret;
if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
@@ -1385,7 +1384,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags
return ret;
}
- ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter);
+ ret = import_ubuf(ITER_DEST, ubuf, len, &iter);
if (unlikely(ret))
return ret;
return get_random_bytes_user(&iter);
@@ -1491,7 +1490,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return 0;
case RNDADDENTROPY: {
struct iov_iter iter;
- struct iovec iov;
ssize_t ret;
int len;
@@ -1503,7 +1501,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EINVAL;
if (get_user(len, p++))
return -EFAULT;
- ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter);
+ ret = import_ubuf(ITER_SOURCE, p, len, &iter);
if (unlikely(ret))
return ret;
ret = write_pool_user(&iter);
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index d7be03c41..5490f7e0f 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -19,7 +19,8 @@
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include "tpm.h"
/* I2C interface offsets */
@@ -524,7 +525,6 @@ static int get_vid(struct i2c_client *client, u32 *res)
static int i2c_nuvoton_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
int rc;
struct tpm_chip *chip;
struct device *dev = &client->dev;
@@ -546,15 +546,8 @@ static int i2c_nuvoton_probe(struct i2c_client *client)
if (!priv)
return -ENOMEM;
- if (dev->of_node) {
- const struct of_device_id *of_id;
-
- of_id = of_match_device(dev->driver->of_match_table, dev);
- if (of_id && of_id->data == OF_IS_TPM2)
- chip->flags |= TPM_CHIP_FLAG_TPM2;
- } else
- if (id->driver_data == I2C_IS_TPM2)
- chip->flags |= TPM_CHIP_FLAG_TPM2;
+ if (i2c_get_match_data(client))
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
init_waitqueue_head(&priv->read_queue);
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index e70abd69e..adf229921 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -235,7 +235,7 @@ out:
* @len: Number of bytes to write.
*
* The provided address is prepended to the data in 'buffer', the
- * cobined address+data is sent to the TPM, then wait for TPM to
+ * combined address+data is sent to the TPM, then wait for TPM to
* indicate it is done writing.
*
* Return:
@@ -671,7 +671,6 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
/**
* tpm_cr50_i2c_probe() - Driver probe function.
* @client: I2C client information.
- * @id: I2C device id.
*
* Return:
* - 0: Success.
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 5af804c17..4c806a189 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -40,7 +40,7 @@ static struct ttyprintk_port tpk_port;
static int tpk_curr;
-static char tpk_buffer[TPK_STR_SIZE + 4];
+static u8 tpk_buffer[TPK_STR_SIZE + 4];
static void tpk_flush(void)
{
@@ -51,9 +51,9 @@ static void tpk_flush(void)
}
}
-static int tpk_printk(const u8 *buf, int count)
+static int tpk_printk(const u8 *buf, size_t count)
{
- int i;
+ size_t i;
for (i = 0; i < count; i++) {
if (tpk_curr >= TPK_STR_SIZE) {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 431e9e5bf..035f89f1a 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -230,9 +230,6 @@ struct port {
bool guest_connected;
};
-/* This is the very early arch-specified put chars function. */
-static int (*early_put_chars)(u32, const char *, int);
-
static struct port *find_port_by_vtermno(u32 vtermno)
{
struct port *port;
@@ -653,7 +650,7 @@ done:
* Give out the data that's requested from the buffer that we have
* queued up.
*/
-static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
+static ssize_t fill_readbuf(struct port *port, u8 __user *out_buf,
size_t out_count, bool to_user)
{
struct port_buffer *buf;
@@ -672,7 +669,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
if (ret)
return -EFAULT;
} else {
- memcpy((__force char *)out_buf, buf->buf + buf->offset,
+ memcpy((__force u8 *)out_buf, buf->buf + buf->offset,
out_count);
}
@@ -1107,16 +1104,13 @@ static const struct file_operations port_fops = {
* it to finish: inefficient in theory, but in practice
* implementations will do it immediately.
*/
-static int put_chars(u32 vtermno, const char *buf, int count)
+static ssize_t put_chars(u32 vtermno, const u8 *buf, size_t count)
{
struct port *port;
struct scatterlist sg[1];
void *data;
int ret;
- if (unlikely(early_put_chars))
- return early_put_chars(vtermno, buf, count);
-
port = find_port_by_vtermno(vtermno);
if (!port)
return -EPIPE;
@@ -1138,14 +1132,10 @@ static int put_chars(u32 vtermno, const char *buf, int count)
* We call out to fill_readbuf that gets us the required data from the
* buffers that are queued up.
*/
-static int get_chars(u32 vtermno, char *buf, int count)
+static ssize_t get_chars(u32 vtermno, u8 *buf, size_t count)
{
struct port *port;
- /* If we've not set up the port yet, we have no input to give. */
- if (unlikely(early_put_chars))
- return 0;
-
port = find_port_by_vtermno(vtermno);
if (!port)
return -EPIPE;
@@ -1153,7 +1143,7 @@ static int get_chars(u32 vtermno, char *buf, int count)
/* If we don't have an input queue yet, we can't get input. */
BUG_ON(!port->in_vq);
- return fill_readbuf(port, (__force char __user *)buf, count, false);
+ return fill_readbuf(port, (__force u8 __user *)buf, count, false);
}
static void resize_console(struct port *port)
@@ -1201,21 +1191,6 @@ static const struct hv_ops hv_ops = {
.notifier_hangup = notifier_del_vio,
};
-/*
- * Console drivers are initialized very early so boot messages can go
- * out, so we do things slightly differently from the generic virtio
- * initialization of the net and block drivers.
- *
- * At this stage, the console is output-only. It's too early to set
- * up a virtqueue, so we let the drivers do some boutique early-output
- * thing.
- */
-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
-{
- early_put_chars = put_chars;
- return hvc_instantiate(0, 0, &hv_ops);
-}
-
static int init_port_console(struct port *port)
{
int ret;
@@ -1256,13 +1231,6 @@ static int init_port_console(struct port *port)
spin_unlock_irq(&pdrvdata_lock);
port->guest_connected = true;
- /*
- * Start using the new console output if this is the first
- * console to come up.
- */
- if (early_put_chars)
- early_put_chars = NULL;
-
/* Notify host of port being opened */
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
@@ -1999,7 +1967,6 @@ static int virtcons_probe(struct virtio_device *vdev)
struct ports_device *portdev;
int err;
bool multiport;
- bool early = early_put_chars != NULL;
/* We only need a config space if features are offered */
if (!vdev->config->get &&
@@ -2010,9 +1977,6 @@ static int virtcons_probe(struct virtio_device *vdev)
return -EINVAL;
}
- /* Ensure to read early_put_chars now */
- barrier();
-
portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
if (!portdev) {
err = -ENOMEM;
@@ -2100,18 +2064,6 @@ static int virtcons_probe(struct virtio_device *vdev)
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 1);
- /*
- * If there was an early virtio console, assume that there are no
- * other consoles. We need to wait until the hvc_alloc matches the
- * hvc_instantiate, otherwise tty_open will complain, resulting in
- * a "Warning: unable to open an initial console" boot failure.
- * Without multiport this is done in add_port above. With multiport
- * this might take some host<->guest communication - thus we have to
- * wait.
- */
- if (multiport && early)
- wait_for_completion(&early_console_added);
-
return 0;
free_chrdev:
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c30d0d396..50af5fc7f 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -414,16 +414,6 @@ config COMMON_CLK_VC7
Renesas Versaclock7 is a family of configurable clock generator
and jitter attenuator ICs with fractional and integer dividers.
-config COMMON_CLK_STM32MP135
- def_bool COMMON_CLK && MACH_STM32MP13
- help
- Support for stm32mp135 SoC family clocks
-
-config COMMON_CLK_STM32MP157
- def_bool COMMON_CLK && MACH_STM32MP157
- help
- Support for stm32mp157 SoC family clocks
-
config COMMON_CLK_STM32F
def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746)
help
@@ -504,6 +494,7 @@ source "drivers/clk/starfive/Kconfig"
source "drivers/clk/sunxi/Kconfig"
source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
+source "drivers/clk/stm32/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
source "drivers/clk/visconti/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ed71f2e0e..14fa8d4ec 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -70,7 +70,6 @@ obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_SP7021) += clk-sp7021.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
-obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
obj-$(CONFIG_COMMON_CLK_TPS68470) += clk-tps68470.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_CLK_TWL) += clk-twl.o
diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
index 6606aba25..53e21ac30 100644
--- a/drivers/clk/clk-renesas-pcie.c
+++ b/drivers/clk/clk-renesas-pcie.c
@@ -7,6 +7,7 @@
* Currently supported:
* - 9FGV0241
* - 9FGV0441
+ * - 9FGV0841
*
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
*/
@@ -42,6 +43,7 @@
#define RS9_REG_DID 0x6
#define RS9_REG_BCP 0x7
+#define RS9_REG_VID_MASK GENMASK(3, 0)
#define RS9_REG_VID_IDT 0x01
#define RS9_REG_DID_TYPE_FGV (0x0 << RS9_REG_DID_TYPE_SHIFT)
@@ -49,16 +51,10 @@
#define RS9_REG_DID_TYPE_DMV (0x2 << RS9_REG_DID_TYPE_SHIFT)
#define RS9_REG_DID_TYPE_SHIFT 0x6
-/* Supported Renesas 9-series models. */
-enum rs9_model {
- RENESAS_9FGV0241,
- RENESAS_9FGV0441,
-};
-
/* Structure to describe features of a particular 9-series model */
struct rs9_chip_info {
- const enum rs9_model model;
unsigned int num_clks;
+ u8 outshift;
u8 did;
};
@@ -160,14 +156,12 @@ static const struct regmap_config rs9_regmap_config = {
static u8 rs9_calc_dif(const struct rs9_driver_data *rs9, int idx)
{
- enum rs9_model model = rs9->chip_info->model;
-
- if (model == RENESAS_9FGV0241)
- return BIT(idx + 1);
- else if (model == RENESAS_9FGV0441)
- return BIT(idx);
-
- return 0;
+ /*
+ * On 9FGV0241, the DIF OE0 is BIT(1) and DIF OE(1) is BIT(2),
+ * on 9FGV0441 and 9FGV0841 the DIF OE0 is BIT(0) and so on.
+ * Increment the index in the 9FGV0241 special case here.
+ */
+ return BIT(idx + rs9->chip_info->outshift);
}
static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx)
@@ -333,6 +327,7 @@ static int rs9_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ vid &= RS9_REG_VID_MASK;
if (vid != RS9_REG_VID_IDT || did != rs9->chip_info->did)
return dev_err_probe(&client->dev, -ENODEV,
"Incorrect VID/DID: %#02x, %#02x. Expected %#02x, %#02x\n",
@@ -380,20 +375,27 @@ static int __maybe_unused rs9_resume(struct device *dev)
}
static const struct rs9_chip_info renesas_9fgv0241_info = {
- .model = RENESAS_9FGV0241,
.num_clks = 2,
+ .outshift = 1,
.did = RS9_REG_DID_TYPE_FGV | 0x02,
};
static const struct rs9_chip_info renesas_9fgv0441_info = {
- .model = RENESAS_9FGV0441,
.num_clks = 4,
+ .outshift = 0,
.did = RS9_REG_DID_TYPE_FGV | 0x04,
};
+static const struct rs9_chip_info renesas_9fgv0841_info = {
+ .num_clks = 8,
+ .outshift = 0,
+ .did = RS9_REG_DID_TYPE_FGV | 0x08,
+};
+
static const struct i2c_device_id rs9_id[] = {
{ "9fgv0241", .driver_data = (kernel_ulong_t)&renesas_9fgv0241_info },
{ "9fgv0441", .driver_data = (kernel_ulong_t)&renesas_9fgv0441_info },
+ { "9fgv0841", .driver_data = (kernel_ulong_t)&renesas_9fgv0841_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, rs9_id);
@@ -401,6 +403,7 @@ MODULE_DEVICE_TABLE(i2c, rs9_id);
static const struct of_device_id clk_rs9_of_match[] = {
{ .compatible = "renesas,9fgv0241", .data = &renesas_9fgv0241_info },
{ .compatible = "renesas,9fgv0441", .data = &renesas_9fgv0441_info },
+ { .compatible = "renesas,9fgv0841", .data = &renesas_9fgv0841_info },
{ }
};
MODULE_DEVICE_TABLE(of, clk_rs9_of_match);
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index a9a0bc448..4ce83c526 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -506,6 +506,8 @@ static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
+ struct si5351_platform_data *pdata =
+ hwdata->drvdata->client->dev.platform_data;
u8 reg = (hwdata->num == 0) ? SI5351_PLLA_PARAMETERS :
SI5351_PLLB_PARAMETERS;
@@ -518,9 +520,10 @@ static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate,
(hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0);
/* Do a pll soft reset on the affected pll */
- si5351_reg_write(hwdata->drvdata, SI5351_PLL_RESET,
- hwdata->num == 0 ? SI5351_PLL_RESET_A :
- SI5351_PLL_RESET_B);
+ if (pdata->pll_reset[hwdata->num])
+ si5351_reg_write(hwdata->drvdata, SI5351_PLL_RESET,
+ hwdata->num == 0 ? SI5351_PLL_RESET_A :
+ SI5351_PLL_RESET_B);
dev_dbg(&hwdata->drvdata->client->dev,
"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
@@ -1222,6 +1225,44 @@ static int si5351_dt_parse(struct i2c_client *client,
}
}
+ /*
+ * Parse PLL reset mode. For compatibility with older device trees, the
+ * default is to always reset a PLL after setting its rate.
+ */
+ pdata->pll_reset[0] = true;
+ pdata->pll_reset[1] = true;
+
+ of_property_for_each_u32(np, "silabs,pll-reset-mode", prop, p, num) {
+ if (num >= 2) {
+ dev_err(&client->dev,
+ "invalid pll %d on pll-reset-mode prop\n", num);
+ return -EINVAL;
+ }
+
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(&client->dev,
+ "missing pll-reset-mode for pll %d\n", num);
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 0:
+ /* Reset PLL whenever its rate is adjusted */
+ pdata->pll_reset[num] = true;
+ break;
+ case 1:
+ /* Don't reset PLL whenever its rate is adjusted */
+ pdata->pll_reset[num] = false;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid pll-reset-mode %d for pll %d\n", val,
+ num);
+ return -EINVAL;
+ }
+ }
+
/* per clkout properties */
for_each_child_of_node(np, child) {
if (of_property_read_u32(child, "reg", &num)) {
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 00930d7bc..76d7ea196 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -37,7 +37,7 @@
#define VC3_PLL1_M_DIV(n) ((n) & GENMASK(5, 0))
#define VC3_PLL1_VCO_N_DIVIDER 0x9
-#define VC3_PLL1_LOOP_FILTER_N_DIV_MSB 0x0a
+#define VC3_PLL1_LOOP_FILTER_N_DIV_MSB 0xa
#define VC3_OUT_DIV1_DIV2_CTRL 0xf
@@ -148,16 +148,16 @@ struct vc3_pfd_data {
};
struct vc3_pll_data {
+ unsigned long vco_min;
+ unsigned long vco_max;
u8 num;
u8 int_div_msb_offs;
u8 int_div_lsb_offs;
- unsigned long vco_min;
- unsigned long vco_max;
};
struct vc3_div_data {
- u8 offs;
const struct clk_div_table *table;
+ u8 offs;
u8 shift;
u8 width;
u8 flags;
@@ -210,7 +210,7 @@ static const struct clk_div_table div3_divs[] = {
static struct clk_hw *clk_out[6];
-static unsigned char vc3_pfd_mux_get_parent(struct clk_hw *hw)
+static u8 vc3_pfd_mux_get_parent(struct clk_hw *hw)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *pfd_mux = vc3->data;
@@ -226,9 +226,8 @@ static int vc3_pfd_mux_set_parent(struct clk_hw *hw, u8 index)
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *pfd_mux = vc3->data;
- regmap_update_bits(vc3->regmap, pfd_mux->offs, pfd_mux->bitmsk,
- index ? pfd_mux->bitmsk : 0);
- return 0;
+ return regmap_update_bits(vc3->regmap, pfd_mux->offs, pfd_mux->bitmsk,
+ index ? pfd_mux->bitmsk : 0);
}
static const struct clk_ops vc3_pfd_mux_ops = {
@@ -440,7 +439,7 @@ static const struct clk_ops vc3_pll_ops = {
.set_rate = vc3_pll_set_rate,
};
-static unsigned char vc3_div_mux_get_parent(struct clk_hw *hw)
+static u8 vc3_div_mux_get_parent(struct clk_hw *hw)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *div_mux = vc3->data;
@@ -456,10 +455,8 @@ static int vc3_div_mux_set_parent(struct clk_hw *hw, u8 index)
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *div_mux = vc3->data;
- regmap_update_bits(vc3->regmap, div_mux->offs, div_mux->bitmsk,
- index ? div_mux->bitmsk : 0);
-
- return 0;
+ return regmap_update_bits(vc3->regmap, div_mux->offs, div_mux->bitmsk,
+ index ? div_mux->bitmsk : 0);
}
static const struct clk_ops vc3_div_mux_ops = {
@@ -477,7 +474,7 @@ static unsigned int vc3_get_div(const struct clk_div_table *table,
if (clkt->val == val)
return clkt->div;
- return 0;
+ return 1;
}
static unsigned long vc3_div_recalc_rate(struct clk_hw *hw,
@@ -524,10 +521,9 @@ static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
value = divider_get_val(rate, parent_rate, div_data->table,
div_data->width, div_data->flags);
- regmap_update_bits(vc3->regmap, div_data->offs,
- VC3_DIV_MASK(div_data->width) << div_data->shift,
- value << div_data->shift);
- return 0;
+ return regmap_update_bits(vc3->regmap, div_data->offs,
+ VC3_DIV_MASK(div_data->width) << div_data->shift,
+ value << div_data->shift);
}
static const struct clk_ops vc3_div_ops = {
@@ -539,11 +535,9 @@ static const struct clk_ops vc3_div_ops = {
static int vc3_clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- int ret;
int frc;
- ret = clk_mux_determine_rate_flags(hw, req, CLK_SET_RATE_PARENT);
- if (ret) {
+ if (clk_mux_determine_rate_flags(hw, req, CLK_SET_RATE_PARENT)) {
/* The below check is equivalent to (best_parent_rate/rate) */
if (req->best_parent_rate >= req->rate) {
frc = DIV_ROUND_CLOSEST_ULL(req->best_parent_rate,
@@ -552,13 +546,12 @@ static int vc3_clk_mux_determine_rate(struct clk_hw *hw,
return clk_mux_determine_rate_flags(hw, req,
CLK_SET_RATE_PARENT);
}
- ret = 0;
}
- return ret;
+ return 0;
}
-static unsigned char vc3_clk_mux_get_parent(struct clk_hw *hw)
+static u8 vc3_clk_mux_get_parent(struct clk_hw *hw)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *clk_mux = vc3->data;
@@ -574,9 +567,8 @@ static int vc3_clk_mux_set_parent(struct clk_hw *hw, u8 index)
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_clk_data *clk_mux = vc3->data;
- regmap_update_bits(vc3->regmap, clk_mux->offs,
- clk_mux->bitmsk, index ? clk_mux->bitmsk : 0);
- return 0;
+ return regmap_update_bits(vc3->regmap, clk_mux->offs, clk_mux->bitmsk,
+ index ? clk_mux->bitmsk : 0);
}
static const struct clk_ops vc3_clk_mux_ops = {
@@ -605,7 +597,7 @@ static struct vc3_hw_data clk_pfd_mux[] = {
.offs = VC3_PLL_OP_CTRL,
.bitmsk = BIT(VC3_PLL_OP_CTRL_PLL2_REFIN_SEL)
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "pfd2_mux",
.ops = &vc3_pfd_mux_ops,
.parent_data = pfd_mux_parent_data,
@@ -618,7 +610,7 @@ static struct vc3_hw_data clk_pfd_mux[] = {
.offs = VC3_GENERAL_CTR,
.bitmsk = BIT(VC3_GENERAL_CTR_PLL3_REFIN_SEL)
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "pfd3_mux",
.ops = &vc3_pfd_mux_ops,
.parent_data = pfd_mux_parent_data,
@@ -636,7 +628,7 @@ static struct vc3_hw_data clk_pfd[] = {
.mdiv1_bitmsk = VC3_PLL1_M_DIV1,
.mdiv2_bitmsk = VC3_PLL1_M_DIV2
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "pfd1",
.ops = &vc3_pfd_ops,
.parent_data = &(const struct clk_parent_data) {
@@ -653,7 +645,7 @@ static struct vc3_hw_data clk_pfd[] = {
.mdiv1_bitmsk = VC3_PLL2_M_DIV1,
.mdiv2_bitmsk = VC3_PLL2_M_DIV2
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "pfd2",
.ops = &vc3_pfd_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -670,7 +662,7 @@ static struct vc3_hw_data clk_pfd[] = {
.mdiv1_bitmsk = VC3_PLL3_M_DIV1,
.mdiv2_bitmsk = VC3_PLL3_M_DIV2
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "pfd3",
.ops = &vc3_pfd_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -691,7 +683,7 @@ static struct vc3_hw_data clk_pll[] = {
.vco_min = VC3_PLL1_VCO_MIN,
.vco_max = VC3_PLL1_VCO_MAX
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "pll1",
.ops = &vc3_pll_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -709,7 +701,7 @@ static struct vc3_hw_data clk_pll[] = {
.vco_min = VC3_PLL2_VCO_MIN,
.vco_max = VC3_PLL2_VCO_MAX
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "pll2",
.ops = &vc3_pll_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -727,7 +719,7 @@ static struct vc3_hw_data clk_pll[] = {
.vco_min = VC3_PLL3_VCO_MIN,
.vco_max = VC3_PLL3_VCO_MAX
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "pll3",
.ops = &vc3_pll_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -760,7 +752,7 @@ static struct vc3_hw_data clk_div_mux[] = {
.offs = VC3_GENERAL_CTR,
.bitmsk = VC3_GENERAL_CTR_DIV1_SRC_SEL
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "div1_mux",
.ops = &vc3_div_mux_ops,
.parent_data = div_mux_parent_data[VC3_DIV1_MUX],
@@ -773,7 +765,7 @@ static struct vc3_hw_data clk_div_mux[] = {
.offs = VC3_PLL3_CHARGE_PUMP_CTRL,
.bitmsk = VC3_PLL3_CHARGE_PUMP_CTRL_OUTDIV3_SRC_SEL
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "div3_mux",
.ops = &vc3_div_mux_ops,
.parent_data = div_mux_parent_data[VC3_DIV3_MUX],
@@ -786,7 +778,7 @@ static struct vc3_hw_data clk_div_mux[] = {
.offs = VC3_OUTPUT_CTR,
.bitmsk = VC3_OUTPUT_CTR_DIV4_SRC_SEL
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "div4_mux",
.ops = &vc3_div_mux_ops,
.parent_data = div_mux_parent_data[VC3_DIV4_MUX],
@@ -805,7 +797,7 @@ static struct vc3_hw_data clk_div[] = {
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "div1",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -823,7 +815,7 @@ static struct vc3_hw_data clk_div[] = {
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "div2",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -841,7 +833,7 @@ static struct vc3_hw_data clk_div[] = {
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "div3",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -859,7 +851,7 @@ static struct vc3_hw_data clk_div[] = {
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "div4",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -877,7 +869,7 @@ static struct vc3_hw_data clk_div[] = {
.width = 4,
.flags = CLK_DIVIDER_READ_ONLY
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "div5",
.ops = &vc3_div_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -895,7 +887,7 @@ static struct vc3_hw_data clk_mux[] = {
.offs = VC3_SE1_DIV4_CTRL,
.bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "se1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -911,7 +903,7 @@ static struct vc3_hw_data clk_mux[] = {
.offs = VC3_SE2_CTRL_REG0,
.bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "se2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -927,7 +919,7 @@ static struct vc3_hw_data clk_mux[] = {
.offs = VC3_SE3_DIFF1_CTRL_REG,
.bitmsk = VC3_SE3_DIFF1_CTRL_REG_SE3_CLK_SEL
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "se3_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -943,7 +935,7 @@ static struct vc3_hw_data clk_mux[] = {
.offs = VC3_DIFF1_CTRL_REG,
.bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "diff1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
@@ -959,7 +951,7 @@ static struct vc3_hw_data clk_mux[] = {
.offs = VC3_DIFF2_CTRL_REG,
.bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL
},
- .hw.init = &(struct clk_init_data){
+ .hw.init = &(struct clk_init_data) {
.name = "diff2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 20c4b28fe..cf1fc0edf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -1362,9 +1450,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1373,8 +1458,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1390,9 +1473,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1417,8 +1497,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1434,6 +1512,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
@@ -1442,6 +1521,13 @@ static int __init clk_disable_unused(void)
pr_info("clk: Disabling unused clocks\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1458,6 +1544,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -3233,9 +3321,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
- clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3245,11 +3331,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
struct hlist_head **lists = s->private;
+ int ret;
seq_puts(s, " enable prepare protect duty hardware connection\n");
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -3258,6 +3348,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -3305,8 +3396,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
struct clk_core *c;
bool first_node = true;
struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -3319,6 +3416,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@@ -3962,8 +4060,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
-
- kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@@ -4192,6 +4288,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@@ -4212,6 +4324,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
+ kref_init(&core->ref);
+
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@@ -4224,9 +4338,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->rpm_enabled = true;
core->dev = dev;
+ clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@@ -4266,12 +4379,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
- clk_core_free_parent_map(core);
fail_parents:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@@ -4351,18 +4462,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
-
- lockdep_assert_held(&prepare_lock);
-
- clk_core_free_parent_map(core);
- kfree_const(core->name);
- kfree(core);
-}
-
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 0d58d85c3..d63564dbb 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -104,15 +104,15 @@ static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
static long pll14xx_calc_rate(struct clk_pll14xx *pll, int mdiv, int pdiv,
int sdiv, int kdiv, unsigned long prate)
{
- u64 fvco = prate;
+ u64 fout = prate;
- /* fvco = (m * 65536 + k) * Fin / (p * 65536) */
- fvco *= (mdiv * 65536 + kdiv);
+ /* fout = (m * 65536 + k) * Fin / (p * 65536) / (1 << sdiv) */
+ fout *= (mdiv * 65536 + kdiv);
pdiv *= 65536;
- do_div(fvco, pdiv << sdiv);
+ do_div(fout, pdiv << sdiv);
- return fvco;
+ return fout;
}
static long pll1443x_calc_kdiv(int mdiv, int pdiv, int sdiv,
@@ -131,7 +131,7 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
{
u32 pll_div_ctl0, pll_div_ctl1;
int mdiv, pdiv, sdiv, kdiv;
- long fvco, rate_min, rate_max, dist, best = LONG_MAX;
+ long fout, rate_min, rate_max, dist, best = LONG_MAX;
const struct imx_pll14xx_rate_table *tt;
/*
@@ -143,6 +143,7 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
* d) -32768 <= k <= 32767
*
* fvco = (m * 65536 + k) * prate / (p * 65536)
+ * fout = (m * 65536 + k) * prate / (p * 65536) / (1 << sdiv)
*/
/* First try if we can get the desired rate from one of the static entries */
@@ -173,8 +174,8 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
pr_debug("%s: in=%ld, want=%ld Only adjust kdiv %ld -> %d\n",
clk_hw_get_name(&pll->hw), prate, rate,
FIELD_GET(KDIV_MASK, pll_div_ctl1), kdiv);
- fvco = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
- t->rate = (unsigned int)fvco;
+ fout = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
+ t->rate = (unsigned int)fout;
t->mdiv = mdiv;
t->pdiv = pdiv;
t->sdiv = sdiv;
@@ -190,13 +191,13 @@ static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
mdiv = clamp(mdiv, 64, 1023);
kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate);
- fvco = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
+ fout = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
/* best match */
- dist = abs((long)rate - (long)fvco);
+ dist = abs((long)rate - (long)fout);
if (dist < best) {
best = dist;
- t->rate = (unsigned int)fvco;
+ t->rate = (unsigned int)fout;
t->mdiv = mdiv;
t->pdiv = pdiv;
t->sdiv = sdiv;
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 48b42d111..70a005e7e 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -423,6 +423,15 @@ config COMMON_CLK_MT7986_ETHSYS
This driver adds support for clocks for Ethernet and SGMII
required on MediaTek MT7986 SoC.
+config COMMON_CLK_MT7988
+ tristate "Clock driver for MediaTek MT7988"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT7988 basic clocks and clocks
+ required for various periperals found on this SoC.
+
config COMMON_CLK_MT8135
tristate "Clock driver for MediaTek MT8135"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index dbeaa5b41..eeccfa039 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -62,6 +62,11 @@ obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-apmixed.o
obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-topckgen.o
obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-infracfg.o
obj-$(CONFIG_COMMON_CLK_MT7986_ETHSYS) += clk-mt7986-eth.o
+obj-$(CONFIG_COMMON_CLK_MT7988) += clk-mt7988-apmixed.o
+obj-$(CONFIG_COMMON_CLK_MT7988) += clk-mt7988-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT7988) += clk-mt7988-infracfg.o
+obj-$(CONFIG_COMMON_CLK_MT7988) += clk-mt7988-eth.o
+obj-$(CONFIG_COMMON_CLK_MT7988) += clk-mt7988-xfipll.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135-apmixedsys.o clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8167) += clk-mt8167-apmixedsys.o clk-mt8167.o
obj-$(CONFIG_COMMON_CLK_MT8167_AUDSYS) += clk-mt8167-aud.o
diff --git a/drivers/clk/mediatek/clk-mt7988-apmixed.c b/drivers/clk/mediatek/clk-mt7988-apmixed.c
new file mode 100644
index 000000000..baf956435
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7988-apmixed.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Xiufeng Li <Xiufeng.Li@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+#include <dt-bindings/clock/mediatek,mt7988-clk.h>
+
+#define MT7988_PLL_FMAX (2500UL * MHZ)
+#define MT7988_PCW_CHG_BIT 2
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _rst_bar_mask, _pcwbits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = BIT(_rst_bar_mask), \
+ .fmax = MT7988_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .pcw_chg_bit = MT7988_PCW_CHG_BIT, \
+ .parent_name = "clkxtal", \
+ }
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_NETSYSPLL, "netsyspll", 0x0104, 0x0110, 0x00000001, 0, 0, 32, 0x0104, 4, 0,
+ 0, 0, 0x0108, 0, 0x0104),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x0114, 0x0120, 0xff000001, HAVE_RST_BAR, 23, 32, 0x0114, 4,
+ 0, 0, 0, 0x0118, 0, 0x0114),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0124, 0x0130, 0xff000001, HAVE_RST_BAR, 23, 32, 0x0124, 4,
+ 0, 0, 0, 0x0128, 0, 0x0124),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0134, 0x0140, 0x00000001, 0, 0, 32, 0x0134, 4, 0x0704,
+ 0x0700, 1, 0x0138, 0, 0x0134),
+ PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0144, 0x0150, 0xff000001, HAVE_RST_BAR, 23, 32,
+ 0x0144, 4, 0, 0, 0, 0x0148, 0, 0x0144),
+ PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0154, 0x0160, 0xff000001, (HAVE_RST_BAR | PLL_AO), 23,
+ 32, 0x0154, 4, 0, 0, 0, 0x0158, 0, 0x0154),
+ PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0164, 0x0170, 0x00000001, 0, 0, 32, 0x0164, 4, 0,
+ 0, 0, 0x0168, 0, 0x0164),
+ PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0174, 0x0180, 0x00000001, 0, 0, 32, 0x0174, 4, 0, 0, 0,
+ 0x0178, 0, 0x0174),
+ PLL(CLK_APMIXED_ARM_B, "arm_b", 0x0204, 0x0210, 0xff000001, (HAVE_RST_BAR | PLL_AO), 23, 32,
+ 0x0204, 4, 0, 0, 0, 0x0208, 0, 0x0204),
+ PLL(CLK_APMIXED_CCIPLL2_B, "ccipll2_b", 0x0214, 0x0220, 0xff000001, HAVE_RST_BAR, 23, 32,
+ 0x0214, 4, 0, 0, 0, 0x0218, 0, 0x0214),
+ PLL(CLK_APMIXED_USXGMIIPLL, "usxgmiipll", 0x0304, 0x0310, 0xff000001, HAVE_RST_BAR, 23, 32,
+ 0x0304, 4, 0, 0, 0, 0x0308, 0, 0x0304),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0314, 0x0320, 0x00000001, 0, 0, 32, 0x0314, 4, 0, 0,
+ 0, 0x0318, 0, 0x0314),
+};
+
+static const struct of_device_id of_match_clk_mt7988_apmixed[] = {
+ { .compatible = "mediatek,mt7988-apmixedsys" },
+ { /* sentinel */ }
+};
+
+static int clk_mt7988_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(plls));
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt7988_apmixed_drv = {
+ .probe = clk_mt7988_apmixed_probe,
+ .driver = {
+ .name = "clk-mt7988-apmixed",
+ .of_match_table = of_match_clk_mt7988_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt7988_apmixed_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-eth.c b/drivers/clk/mediatek/clk-mt7988-eth.c
new file mode 100644
index 000000000..adf4a9d39
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7988-eth.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Xiufeng Li <Xiufeng.Li@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "reset.h"
+#include <dt-bindings/clock/mediatek,mt7988-clk.h>
+#include <dt-bindings/reset/mediatek,mt7988-resets.h>
+
+static const struct mtk_gate_regs ethdma_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+#define GATE_ETHDMA(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ethdma_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate ethdma_clks[] = {
+ GATE_ETHDMA(CLK_ETHDMA_XGP1_EN, "ethdma_xgp1_en", "top_xtal", 0),
+ GATE_ETHDMA(CLK_ETHDMA_XGP2_EN, "ethdma_xgp2_en", "top_xtal", 1),
+ GATE_ETHDMA(CLK_ETHDMA_XGP3_EN, "ethdma_xgp3_en", "top_xtal", 2),
+ GATE_ETHDMA(CLK_ETHDMA_FE_EN, "ethdma_fe_en", "netsys_2x_sel", 6),
+ GATE_ETHDMA(CLK_ETHDMA_GP2_EN, "ethdma_gp2_en", "top_xtal", 7),
+ GATE_ETHDMA(CLK_ETHDMA_GP1_EN, "ethdma_gp1_en", "top_xtal", 8),
+ GATE_ETHDMA(CLK_ETHDMA_GP3_EN, "ethdma_gp3_en", "top_xtal", 10),
+ GATE_ETHDMA(CLK_ETHDMA_ESW_EN, "ethdma_esw_en", "netsys_gsw_sel", 16),
+ GATE_ETHDMA(CLK_ETHDMA_CRYPT0_EN, "ethdma_crypt0_en", "eip197_sel", 29),
+};
+
+static const struct mtk_clk_desc ethdma_desc = {
+ .clks = ethdma_clks,
+ .num_clks = ARRAY_SIZE(ethdma_clks),
+};
+
+static const struct mtk_gate_regs sgmii_cg_regs = {
+ .set_ofs = 0xe4,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe4,
+};
+
+#define GATE_SGMII(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &sgmii_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii0_clks[] = {
+ GATE_SGMII(CLK_SGM0_TX_EN, "sgm0_tx_en", "top_xtal", 2),
+ GATE_SGMII(CLK_SGM0_RX_EN, "sgm0_rx_en", "top_xtal", 3),
+};
+
+static const struct mtk_clk_desc sgmii0_desc = {
+ .clks = sgmii0_clks,
+ .num_clks = ARRAY_SIZE(sgmii0_clks),
+};
+
+static const struct mtk_gate sgmii1_clks[] = {
+ GATE_SGMII(CLK_SGM1_TX_EN, "sgm1_tx_en", "top_xtal", 2),
+ GATE_SGMII(CLK_SGM1_RX_EN, "sgm1_rx_en", "top_xtal", 3),
+};
+
+static const struct mtk_clk_desc sgmii1_desc = {
+ .clks = sgmii1_clks,
+ .num_clks = ARRAY_SIZE(sgmii1_clks),
+};
+
+static const struct mtk_gate_regs ethwarp_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_ETHWARP(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ethwarp_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate ethwarp_clks[] = {
+ GATE_ETHWARP(CLK_ETHWARP_WOCPU2_EN, "ethwarp_wocpu2_en", "netsys_mcu_sel", 13),
+ GATE_ETHWARP(CLK_ETHWARP_WOCPU1_EN, "ethwarp_wocpu1_en", "netsys_mcu_sel", 14),
+ GATE_ETHWARP(CLK_ETHWARP_WOCPU0_EN, "ethwarp_wocpu0_en", "netsys_mcu_sel", 15),
+};
+
+static u16 ethwarp_rst_ofs[] = { 0x8 };
+
+static u16 ethwarp_idx_map[] = {
+ [MT7988_ETHWARP_RST_SWITCH] = 9,
+};
+
+static const struct mtk_clk_rst_desc ethwarp_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = ethwarp_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ethwarp_rst_ofs),
+ .rst_idx_map = ethwarp_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(ethwarp_idx_map),
+};
+
+static const struct mtk_clk_desc ethwarp_desc = {
+ .clks = ethwarp_clks,
+ .num_clks = ARRAY_SIZE(ethwarp_clks),
+ .rst_desc = &ethwarp_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt7988_eth[] = {
+ { .compatible = "mediatek,mt7988-ethsys", .data = &ethdma_desc },
+ { .compatible = "mediatek,mt7988-sgmiisys0", .data = &sgmii0_desc },
+ { .compatible = "mediatek,mt7988-sgmiisys1", .data = &sgmii1_desc },
+ { .compatible = "mediatek,mt7988-ethwarp", .data = &ethwarp_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7988_eth);
+
+static struct platform_driver clk_mt7988_eth_drv = {
+ .driver = {
+ .name = "clk-mt7988-eth",
+ .of_match_table = of_match_clk_mt7988_eth,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove_new = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt7988_eth_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7988 Ethernet clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-infracfg.c b/drivers/clk/mediatek/clk-mt7988-infracfg.c
new file mode 100644
index 000000000..df02997c6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7988-infracfg.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Xiufeng Li <Xiufeng.Li@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+#include <dt-bindings/clock/mediatek,mt7988-clk.h>
+
+static DEFINE_SPINLOCK(mt7988_clk_lock);
+
+static const char *const infra_mux_uart0_parents[] __initconst = { "csw_infra_f26m_sel",
+ "uart_sel" };
+
+static const char *const infra_mux_uart1_parents[] __initconst = { "csw_infra_f26m_sel",
+ "uart_sel" };
+
+static const char *const infra_mux_uart2_parents[] __initconst = { "csw_infra_f26m_sel",
+ "uart_sel" };
+
+static const char *const infra_mux_spi0_parents[] __initconst = { "i2c_sel", "spi_sel" };
+
+static const char *const infra_mux_spi1_parents[] __initconst = { "i2c_sel", "spim_mst_sel" };
+
+static const char *const infra_pwm_bck_parents[] __initconst = { "top_rtc_32p7k",
+ "csw_infra_f26m_sel", "sysaxi_sel",
+ "pwm_sel" };
+
+static const char *const infra_pcie_gfmux_tl_ck_o_p0_parents[] __initconst = {
+ "top_rtc_32p7k", "csw_infra_f26m_sel", "csw_infra_f26m_sel", "pextp_tl_sel"
+};
+
+static const char *const infra_pcie_gfmux_tl_ck_o_p1_parents[] __initconst = {
+ "top_rtc_32p7k", "csw_infra_f26m_sel", "csw_infra_f26m_sel", "pextp_tl_p1_sel"
+};
+
+static const char *const infra_pcie_gfmux_tl_ck_o_p2_parents[] __initconst = {
+ "top_rtc_32p7k", "csw_infra_f26m_sel", "csw_infra_f26m_sel", "pextp_tl_p2_sel"
+};
+
+static const char *const infra_pcie_gfmux_tl_ck_o_p3_parents[] __initconst = {
+ "top_rtc_32p7k", "csw_infra_f26m_sel", "csw_infra_f26m_sel", "pextp_tl_p3_sel"
+};
+
+static const struct mtk_mux infra_muxes[] = {
+ /* MODULE_CLK_SEL_0 */
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_MUX_UART0_SEL, "infra_mux_uart0_sel",
+ infra_mux_uart0_parents, 0x0018, 0x0010, 0x0014, 0, 1, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_MUX_UART1_SEL, "infra_mux_uart1_sel",
+ infra_mux_uart1_parents, 0x0018, 0x0010, 0x0014, 1, 1, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_MUX_UART2_SEL, "infra_mux_uart2_sel",
+ infra_mux_uart2_parents, 0x0018, 0x0010, 0x0014, 2, 1, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_MUX_SPI0_SEL, "infra_mux_spi0_sel", infra_mux_spi0_parents,
+ 0x0018, 0x0010, 0x0014, 4, 1, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_MUX_SPI1_SEL, "infra_mux_spi1_sel", infra_mux_spi1_parents,
+ 0x0018, 0x0010, 0x0014, 5, 1, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_MUX_SPI2_SEL, "infra_mux_spi2_sel", infra_mux_spi0_parents,
+ 0x0018, 0x0010, 0x0014, 6, 1, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_SEL, "infra_pwm_sel", infra_pwm_bck_parents, 0x0018,
+ 0x0010, 0x0014, 14, 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_CK1_SEL, "infra_pwm_ck1_sel", infra_pwm_bck_parents,
+ 0x0018, 0x0010, 0x0014, 16, 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_CK2_SEL, "infra_pwm_ck2_sel", infra_pwm_bck_parents,
+ 0x0018, 0x0010, 0x0014, 18, 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_CK3_SEL, "infra_pwm_ck3_sel", infra_pwm_bck_parents,
+ 0x0018, 0x0010, 0x0014, 20, 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_CK4_SEL, "infra_pwm_ck4_sel", infra_pwm_bck_parents,
+ 0x0018, 0x0010, 0x0014, 22, 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_CK5_SEL, "infra_pwm_ck5_sel", infra_pwm_bck_parents,
+ 0x0018, 0x0010, 0x0014, 24, 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_CK6_SEL, "infra_pwm_ck6_sel", infra_pwm_bck_parents,
+ 0x0018, 0x0010, 0x0014, 26, 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_CK7_SEL, "infra_pwm_ck7_sel", infra_pwm_bck_parents,
+ 0x0018, 0x0010, 0x0014, 28, 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_CK8_SEL, "infra_pwm_ck8_sel", infra_pwm_bck_parents,
+ 0x0018, 0x0010, 0x0014, 30, 2, -1, -1, -1),
+ /* MODULE_CLK_SEL_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PCIE_GFMUX_TL_O_P0_SEL, "infra_pcie_gfmux_tl_o_p0_sel",
+ infra_pcie_gfmux_tl_ck_o_p0_parents, 0x0028, 0x0020, 0x0024, 0, 2, -1,
+ -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PCIE_GFMUX_TL_O_P1_SEL, "infra_pcie_gfmux_tl_o_p1_sel",
+ infra_pcie_gfmux_tl_ck_o_p1_parents, 0x0028, 0x0020, 0x0024, 2, 2, -1,
+ -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PCIE_GFMUX_TL_O_P2_SEL, "infra_pcie_gfmux_tl_o_p2_sel",
+ infra_pcie_gfmux_tl_ck_o_p2_parents, 0x0028, 0x0020, 0x0024, 4, 2, -1,
+ -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PCIE_GFMUX_TL_O_P3_SEL, "infra_pcie_gfmux_tl_o_p3_sel",
+ infra_pcie_gfmux_tl_ck_o_p3_parents, 0x0028, 0x0020, 0x0024, 6, 2, -1,
+ -1, -1),
+};
+
+static const struct mtk_gate_regs infra0_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs infra1_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x48,
+};
+
+static const struct mtk_gate_regs infra2_cg_regs = {
+ .set_ofs = 0x50,
+ .clr_ofs = 0x54,
+ .sta_ofs = 0x58,
+};
+
+static const struct mtk_gate_regs infra3_cg_regs = {
+ .set_ofs = 0x60,
+ .clr_ofs = 0x64,
+ .sta_ofs = 0x68,
+};
+
+#define GATE_INFRA0_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr, \
+ _flags)
+
+#define GATE_INFRA1_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr, \
+ _flags)
+
+#define GATE_INFRA2_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr, \
+ _flags)
+
+#define GATE_INFRA3_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra3_cg_regs, _shift, &mtk_clk_gate_ops_setclr, \
+ _flags)
+
+#define GATE_INFRA0(_id, _name, _parent, _shift) GATE_INFRA0_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA1(_id, _name, _parent, _shift) GATE_INFRA1_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA2(_id, _name, _parent, _shift) GATE_INFRA2_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA3(_id, _name, _parent, _shift) GATE_INFRA3_FLAGS(_id, _name, _parent, _shift, 0)
+
+static const struct mtk_gate infra_clks[] = {
+ /* INFRA0 */
+ GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P0, "infra_pcie_peri_ck_26m_ck_p0",
+ "csw_infra_f26m_sel", 7),
+ GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
+ "csw_infra_f26m_sel", 8),
+ GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
+ "infra_pcie_peri_ck_26m_ck_p3", 9),
+ GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
+ "csw_infra_f26m_sel", 10),
+ /* INFRA1 */
+ GATE_INFRA1(CLK_INFRA_66M_GPT_BCK, "infra_hf_66m_gpt_bck", "sysaxi_sel", 0),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_HCK, "infra_hf_66m_pwm_hck", "sysaxi_sel", 1),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_BCK, "infra_hf_66m_pwm_bck", "infra_pwm_sel", 2),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_CK1, "infra_hf_66m_pwm_ck1", "infra_pwm_ck1_sel", 3),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_CK2, "infra_hf_66m_pwm_ck2", "infra_pwm_ck2_sel", 4),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_CK3, "infra_hf_66m_pwm_ck3", "infra_pwm_ck3_sel", 5),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_CK4, "infra_hf_66m_pwm_ck4", "infra_pwm_ck4_sel", 6),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_CK5, "infra_hf_66m_pwm_ck5", "infra_pwm_ck5_sel", 7),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_CK6, "infra_hf_66m_pwm_ck6", "infra_pwm_ck6_sel", 8),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_CK7, "infra_hf_66m_pwm_ck7", "infra_pwm_ck7_sel", 9),
+ GATE_INFRA1(CLK_INFRA_66M_PWM_CK8, "infra_hf_66m_pwm_ck8", "infra_pwm_ck8_sel", 10),
+ GATE_INFRA1(CLK_INFRA_133M_CQDMA_BCK, "infra_hf_133m_cqdma_bck", "sysaxi_sel", 12),
+ GATE_INFRA1(CLK_INFRA_66M_AUD_SLV_BCK, "infra_66m_aud_slv_bck", "sysaxi_sel", 13),
+ GATE_INFRA1(CLK_INFRA_AUD_26M, "infra_f_faud_26m", "csw_infra_f26m_sel", 14),
+ GATE_INFRA1(CLK_INFRA_AUD_L, "infra_f_faud_l", "aud_l_sel", 15),
+ GATE_INFRA1(CLK_INFRA_AUD_AUD, "infra_f_aud_aud", "a1sys_sel", 16),
+ GATE_INFRA1(CLK_INFRA_AUD_EG2, "infra_f_faud_eg2", "a_tuner_sel", 18),
+ GATE_INFRA1_FLAGS(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", "csw_infra_f26m_sel", 19,
+ CLK_IS_CRITICAL),
+ /* JTAG */
+ GATE_INFRA1_FLAGS(CLK_INFRA_133M_DBG_ACKM, "infra_hf_133m_dbg_ackm", "sysaxi_sel", 20,
+ CLK_IS_CRITICAL),
+ GATE_INFRA1(CLK_INFRA_66M_AP_DMA_BCK, "infra_66m_ap_dma_bck", "sysaxi_sel", 21),
+ GATE_INFRA1(CLK_INFRA_66M_SEJ_BCK, "infra_hf_66m_sej_bck", "sysaxi_sel", 29),
+ GATE_INFRA1(CLK_INFRA_PRE_CK_SEJ_F13M, "infra_pre_ck_sej_f13m", "csw_infra_f26m_sel", 30),
+ /* INFRA2 */
+ GATE_INFRA2(CLK_INFRA_26M_THERM_SYSTEM, "infra_hf_26m_therm_system", "csw_infra_f26m_sel",
+ 0),
+ GATE_INFRA2(CLK_INFRA_I2C_BCK, "infra_i2c_bck", "i2c_sel", 1),
+ GATE_INFRA2(CLK_INFRA_52M_UART0_CK, "infra_f_52m_uart0", "infra_mux_uart0_sel", 3),
+ GATE_INFRA2(CLK_INFRA_52M_UART1_CK, "infra_f_52m_uart1", "infra_mux_uart1_sel", 4),
+ GATE_INFRA2(CLK_INFRA_52M_UART2_CK, "infra_f_52m_uart2", "infra_mux_uart2_sel", 5),
+ GATE_INFRA2(CLK_INFRA_NFI, "infra_f_fnfi", "nfi1x_sel", 9),
+ GATE_INFRA2(CLK_INFRA_SPINFI, "infra_f_fspinfi", "spinfi_sel", 10),
+ GATE_INFRA2_FLAGS(CLK_INFRA_66M_NFI_HCK, "infra_hf_66m_nfi_hck", "sysaxi_sel", 11,
+ CLK_IS_CRITICAL),
+ GATE_INFRA2_FLAGS(CLK_INFRA_104M_SPI0, "infra_hf_104m_spi0", "infra_mux_spi0_sel", 12,
+ CLK_IS_CRITICAL),
+ GATE_INFRA2(CLK_INFRA_104M_SPI1, "infra_hf_104m_spi1", "infra_mux_spi1_sel", 13),
+ GATE_INFRA2(CLK_INFRA_104M_SPI2_BCK, "infra_hf_104m_spi2_bck", "infra_mux_spi2_sel", 14),
+ GATE_INFRA2_FLAGS(CLK_INFRA_66M_SPI0_HCK, "infra_hf_66m_spi0_hck", "sysaxi_sel", 15,
+ CLK_IS_CRITICAL),
+ GATE_INFRA2(CLK_INFRA_66M_SPI1_HCK, "infra_hf_66m_spi1_hck", "sysaxi_sel", 16),
+ GATE_INFRA2(CLK_INFRA_66M_SPI2_HCK, "infra_hf_66m_spi2_hck", "sysaxi_sel", 17),
+ GATE_INFRA2(CLK_INFRA_66M_FLASHIF_AXI, "infra_hf_66m_flashif_axi", "sysaxi_sel", 18),
+ GATE_INFRA2_FLAGS(CLK_INFRA_RTC, "infra_f_frtc", "top_rtc_32k", 19, CLK_IS_CRITICAL),
+ GATE_INFRA2(CLK_INFRA_26M_ADC_BCK, "infra_f_26m_adc_bck", "csw_infra_f26m_sel", 20),
+ GATE_INFRA2(CLK_INFRA_RC_ADC, "infra_f_frc_adc", "infra_f_26m_adc_bck", 21),
+ GATE_INFRA2(CLK_INFRA_MSDC400, "infra_f_fmsdc400", "emmc_400m_sel", 22),
+ GATE_INFRA2(CLK_INFRA_MSDC2_HCK, "infra_f_fmsdc2_hck", "emmc_250m_sel", 23),
+ GATE_INFRA2(CLK_INFRA_133M_MSDC_0_HCK, "infra_hf_133m_msdc_0_hck", "sysaxi_sel", 24),
+ GATE_INFRA2(CLK_INFRA_66M_MSDC_0_HCK, "infra_66m_msdc_0_hck", "sysaxi_sel", 25),
+ GATE_INFRA2(CLK_INFRA_133M_CPUM_BCK, "infra_hf_133m_cpum_bck", "sysaxi_sel", 26),
+ GATE_INFRA2(CLK_INFRA_BIST2FPC, "infra_hf_fbist2fpc", "nfi1x_sel", 27),
+ GATE_INFRA2(CLK_INFRA_I2C_X16W_MCK_CK_P1, "infra_hf_i2c_x16w_mck_ck_p1", "sysaxi_sel", 29),
+ GATE_INFRA2(CLK_INFRA_I2C_X16W_PCK_CK_P1, "infra_hf_i2c_x16w_pck_ck_p1", "sysaxi_sel", 31),
+ /* INFRA3 */
+ GATE_INFRA3(CLK_INFRA_133M_USB_HCK, "infra_133m_usb_hck", "sysaxi_sel", 0),
+ GATE_INFRA3(CLK_INFRA_133M_USB_HCK_CK_P1, "infra_133m_usb_hck_ck_p1", "sysaxi_sel", 1),
+ GATE_INFRA3(CLK_INFRA_66M_USB_HCK, "infra_66m_usb_hck", "sysaxi_sel", 2),
+ GATE_INFRA3(CLK_INFRA_66M_USB_HCK_CK_P1, "infra_66m_usb_hck_ck_p1", "sysaxi_sel", 3),
+ GATE_INFRA3(CLK_INFRA_USB_SYS, "infra_usb_sys", "usb_sys_sel", 4),
+ GATE_INFRA3(CLK_INFRA_USB_SYS_CK_P1, "infra_usb_sys_ck_p1", "usb_sys_p1_sel", 5),
+ GATE_INFRA3(CLK_INFRA_USB_REF, "infra_usb_ref", "top_xtal", 6),
+ GATE_INFRA3(CLK_INFRA_USB_CK_P1, "infra_usb_ck_p1", "top_xtal", 7),
+ GATE_INFRA3_FLAGS(CLK_INFRA_USB_FRMCNT, "infra_usb_frmcnt", "usb_frmcnt_sel", 8,
+ CLK_IS_CRITICAL),
+ GATE_INFRA3_FLAGS(CLK_INFRA_USB_FRMCNT_CK_P1, "infra_usb_frmcnt_ck_p1", "usb_frmcnt_p1_sel",
+ 9, CLK_IS_CRITICAL),
+ GATE_INFRA3(CLK_INFRA_USB_PIPE, "infra_usb_pipe", "sspxtp_sel", 10),
+ GATE_INFRA3(CLK_INFRA_USB_PIPE_CK_P1, "infra_usb_pipe_ck_p1", "usb_phy_sel", 11),
+ GATE_INFRA3(CLK_INFRA_USB_UTMI, "infra_usb_utmi", "top_xtal", 12),
+ GATE_INFRA3(CLK_INFRA_USB_UTMI_CK_P1, "infra_usb_utmi_ck_p1", "top_xtal", 13),
+ GATE_INFRA3(CLK_INFRA_USB_XHCI, "infra_usb_xhci", "usb_xhci_sel", 14),
+ GATE_INFRA3(CLK_INFRA_USB_XHCI_CK_P1, "infra_usb_xhci_ck_p1", "usb_xhci_p1_sel", 15),
+ GATE_INFRA3(CLK_INFRA_PCIE_GFMUX_TL_P0, "infra_pcie_gfmux_tl_ck_p0",
+ "infra_pcie_gfmux_tl_o_p0_sel", 20),
+ GATE_INFRA3(CLK_INFRA_PCIE_GFMUX_TL_P1, "infra_pcie_gfmux_tl_ck_p1",
+ "infra_pcie_gfmux_tl_o_p1_sel", 21),
+ GATE_INFRA3(CLK_INFRA_PCIE_GFMUX_TL_P2, "infra_pcie_gfmux_tl_ck_p2",
+ "infra_pcie_gfmux_tl_o_p2_sel", 22),
+ GATE_INFRA3(CLK_INFRA_PCIE_GFMUX_TL_P3, "infra_pcie_gfmux_tl_ck_p3",
+ "infra_pcie_gfmux_tl_o_p3_sel", 23),
+ GATE_INFRA3(CLK_INFRA_PCIE_PIPE_P0, "infra_pcie_pipe_ck_p0", "top_xtal", 24),
+ GATE_INFRA3(CLK_INFRA_PCIE_PIPE_P1, "infra_pcie_pipe_ck_p1", "top_xtal", 25),
+ GATE_INFRA3(CLK_INFRA_PCIE_PIPE_P2, "infra_pcie_pipe_ck_p2", "top_xtal", 26),
+ GATE_INFRA3(CLK_INFRA_PCIE_PIPE_P3, "infra_pcie_pipe_ck_p3", "top_xtal", 27),
+ GATE_INFRA3(CLK_INFRA_133M_PCIE_CK_P0, "infra_133m_pcie_ck_p0", "sysaxi_sel", 28),
+ GATE_INFRA3(CLK_INFRA_133M_PCIE_CK_P1, "infra_133m_pcie_ck_p1", "sysaxi_sel", 29),
+ GATE_INFRA3(CLK_INFRA_133M_PCIE_CK_P2, "infra_133m_pcie_ck_p2", "sysaxi_sel", 30),
+ GATE_INFRA3(CLK_INFRA_133M_PCIE_CK_P3, "infra_133m_pcie_ck_p3", "sysaxi_sel", 31),
+};
+
+static const struct mtk_clk_desc infra_desc = {
+ .clks = infra_clks,
+ .num_clks = ARRAY_SIZE(infra_clks),
+ .mux_clks = infra_muxes,
+ .num_mux_clks = ARRAY_SIZE(infra_muxes),
+ .clk_lock = &mt7988_clk_lock,
+};
+
+static const struct of_device_id of_match_clk_mt7988_infracfg[] = {
+ { .compatible = "mediatek,mt7988-infracfg", .data = &infra_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7988_infracfg);
+
+static struct platform_driver clk_mt7988_infracfg_drv = {
+ .driver = {
+ .name = "clk-mt7988-infracfg",
+ .of_match_table = of_match_clk_mt7988_infracfg,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove_new = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt7988_infracfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-topckgen.c b/drivers/clk/mediatek/clk-mt7988-topckgen.c
new file mode 100644
index 000000000..760f8e0d2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7988-topckgen.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Xiufeng Li <Xiufeng.Li@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+#include <dt-bindings/clock/mediatek,mt7988-clk.h>
+
+static DEFINE_SPINLOCK(mt7988_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_XTAL, "top_xtal", "clkxtal", 40000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_XTAL_D2, "top_xtal_d2", "top_xtal", 1, 2),
+ FACTOR(CLK_TOP_RTC_32K, "top_rtc_32k", "top_xtal", 1, 1250),
+ FACTOR(CLK_TOP_RTC_32P7K, "top_rtc_32p7k", "top_xtal", 1, 1220),
+ FACTOR(CLK_TOP_MPLL_D2, "mpll_d2", "mpll", 1, 2),
+ FACTOR(CLK_TOP_MPLL_D3_D2, "mpll_d3_d2", "mpll", 1, 2),
+ FACTOR(CLK_TOP_MPLL_D4, "mpll_d4", "mpll", 1, 4),
+ FACTOR(CLK_TOP_MPLL_D8, "mpll_d8", "mpll", 1, 8),
+ FACTOR(CLK_TOP_MPLL_D8_D2, "mpll_d8_d2", "mpll", 1, 16),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D3_D5, "mmpll_d3_d5", "mmpll", 1, 15),
+ FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D6_D2, "mmpll_d6_d2", "mmpll", 1, 12),
+ FACTOR(CLK_TOP_MMPLL_D8, "mmpll_d8", "mmpll", 1, 8),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4),
+ FACTOR(CLK_TOP_NET1PLL_D4, "net1pll_d4", "net1pll", 1, 4),
+ FACTOR(CLK_TOP_NET1PLL_D5, "net1pll_d5", "net1pll", 1, 5),
+ FACTOR(CLK_TOP_NET1PLL_D5_D2, "net1pll_d5_d2", "net1pll", 1, 10),
+ FACTOR(CLK_TOP_NET1PLL_D5_D4, "net1pll_d5_d4", "net1pll", 1, 20),
+ FACTOR(CLK_TOP_NET1PLL_D8, "net1pll_d8", "net1pll", 1, 8),
+ FACTOR(CLK_TOP_NET1PLL_D8_D2, "net1pll_d8_d2", "net1pll", 1, 16),
+ FACTOR(CLK_TOP_NET1PLL_D8_D4, "net1pll_d8_d4", "net1pll", 1, 32),
+ FACTOR(CLK_TOP_NET1PLL_D8_D8, "net1pll_d8_d8", "net1pll", 1, 64),
+ FACTOR(CLK_TOP_NET1PLL_D8_D16, "net1pll_d8_d16", "net1pll", 1, 128),
+ FACTOR(CLK_TOP_NET2PLL_D2, "net2pll_d2", "net2pll", 1, 2),
+ FACTOR(CLK_TOP_NET2PLL_D4, "net2pll_d4", "net2pll", 1, 4),
+ FACTOR(CLK_TOP_NET2PLL_D4_D4, "net2pll_d4_d4", "net2pll", 1, 16),
+ FACTOR(CLK_TOP_NET2PLL_D4_D8, "net2pll_d4_d8", "net2pll", 1, 32),
+ FACTOR(CLK_TOP_NET2PLL_D6, "net2pll_d6", "net2pll", 1, 6),
+ FACTOR(CLK_TOP_NET2PLL_D8, "net2pll_d8", "net2pll", 1, 8),
+};
+
+static const char *const netsys_parents[] = { "top_xtal", "net2pll_d2", "mmpll_d2" };
+static const char *const netsys_500m_parents[] = { "top_xtal", "net1pll_d5", "net1pll_d5_d2" };
+static const char *const netsys_2x_parents[] = { "top_xtal", "net2pll", "mmpll" };
+static const char *const netsys_gsw_parents[] = { "top_xtal", "net1pll_d4", "net1pll_d5" };
+static const char *const eth_gmii_parents[] = { "top_xtal", "net1pll_d5_d4" };
+static const char *const netsys_mcu_parents[] = { "top_xtal", "net2pll", "mmpll",
+ "net1pll_d4", "net1pll_d5", "mpll" };
+static const char *const eip197_parents[] = { "top_xtal", "netsyspll", "net2pll",
+ "mmpll", "net1pll_d4", "net1pll_d5" };
+static const char *const axi_infra_parents[] = { "top_xtal", "net1pll_d8_d2" };
+static const char *const uart_parents[] = { "top_xtal", "mpll_d8", "mpll_d8_d2" };
+static const char *const emmc_250m_parents[] = { "top_xtal", "net1pll_d5_d2", "mmpll_d4" };
+static const char *const emmc_400m_parents[] = { "top_xtal", "msdcpll", "mmpll_d2",
+ "mpll_d2", "mmpll_d4", "net1pll_d8_d2" };
+static const char *const spi_parents[] = { "top_xtal", "mpll_d2", "mmpll_d4",
+ "net1pll_d8_d2", "net2pll_d6", "net1pll_d5_d4",
+ "mpll_d4", "net1pll_d8_d4" };
+static const char *const nfi1x_parents[] = { "top_xtal", "mmpll_d4", "net1pll_d8_d2", "net2pll_d6",
+ "mpll_d4", "mmpll_d8", "net1pll_d8_d4", "mpll_d8" };
+static const char *const spinfi_parents[] = { "top_xtal_d2", "top_xtal", "net1pll_d5_d4",
+ "mpll_d4", "mmpll_d8", "net1pll_d8_d4",
+ "mmpll_d6_d2", "mpll_d8" };
+static const char *const pwm_parents[] = { "top_xtal", "net1pll_d8_d2", "net1pll_d5_d4",
+ "mpll_d4", "mpll_d8_d2", "top_rtc_32k" };
+static const char *const i2c_parents[] = { "top_xtal", "net1pll_d5_d4", "mpll_d4",
+ "net1pll_d8_d4" };
+static const char *const pcie_mbist_250m_parents[] = { "top_xtal", "net1pll_d5_d2" };
+static const char *const pextp_tl_ck_parents[] = { "top_xtal", "net2pll_d6", "mmpll_d8",
+ "mpll_d8_d2", "top_rtc_32k" };
+static const char *const usb_frmcnt_parents[] = { "top_xtal", "mmpll_d3_d5" };
+static const char *const aud_parents[] = { "top_xtal", "apll2" };
+static const char *const a1sys_parents[] = { "top_xtal", "apll2_d4" };
+static const char *const aud_l_parents[] = { "top_xtal", "apll2", "mpll_d8_d2" };
+static const char *const sspxtp_parents[] = { "top_xtal_d2", "mpll_d8_d2" };
+static const char *const usxgmii_sbus_0_parents[] = { "top_xtal", "net1pll_d8_d4" };
+static const char *const sgm_0_parents[] = { "top_xtal", "sgmpll" };
+static const char *const sysapb_parents[] = { "top_xtal", "mpll_d3_d2" };
+static const char *const eth_refck_50m_parents[] = { "top_xtal", "net2pll_d4_d4" };
+static const char *const eth_sys_200m_parents[] = { "top_xtal", "net2pll_d4" };
+static const char *const eth_xgmii_parents[] = { "top_xtal_d2", "net1pll_d8_d8", "net1pll_d8_d16" };
+static const char *const bus_tops_parents[] = { "top_xtal", "net1pll_d5", "net2pll_d2" };
+static const char *const npu_tops_parents[] = { "top_xtal", "net2pll" };
+static const char *const dramc_md32_parents[] = { "top_xtal", "mpll_d2", "wedmcupll" };
+static const char *const da_xtp_glb_p0_parents[] = { "top_xtal", "net2pll_d8" };
+static const char *const mcusys_backup_625m_parents[] = { "top_xtal", "net1pll_d4" };
+static const char *const macsec_parents[] = { "top_xtal", "sgmpll", "net1pll_d8" };
+static const char *const netsys_tops_400m_parents[] = { "top_xtal", "net2pll_d2" };
+static const char *const eth_mii_parents[] = { "top_xtal_d2", "net2pll_d4_d8" };
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_SEL, "netsys_sel", netsys_parents, 0x000, 0x004, 0x008,
+ 0, 2, 7, 0x1c0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_500M_SEL, "netsys_500m_sel", netsys_500m_parents, 0x000,
+ 0x004, 0x008, 8, 2, 15, 0x1C0, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_2X_SEL, "netsys_2x_sel", netsys_2x_parents, 0x000,
+ 0x004, 0x008, 16, 2, 23, 0x1C0, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_GSW_SEL, "netsys_gsw_sel", netsys_gsw_parents, 0x000,
+ 0x004, 0x008, 24, 2, 31, 0x1C0, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETH_GMII_SEL, "eth_gmii_sel", eth_gmii_parents, 0x010, 0x014,
+ 0x018, 0, 1, 7, 0x1C0, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_MCU_SEL, "netsys_mcu_sel", netsys_mcu_parents, 0x010,
+ 0x014, 0x018, 8, 3, 15, 0x1C0, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_PAO_2X_SEL, "netsys_pao_2x_sel", netsys_mcu_parents,
+ 0x010, 0x014, 0x018, 16, 3, 23, 0x1C0, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EIP197_SEL, "eip197_sel", eip197_parents, 0x010, 0x014, 0x018,
+ 24, 3, 31, 0x1c0, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI_INFRA_SEL, "axi_infra_sel", axi_infra_parents, 0x020,
+ 0x024, 0x028, 0, 1, 7, 0x1C0, 8, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x020, 0x024, 0x028, 8, 2,
+ 15, 0x1c0, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EMMC_250M_SEL, "emmc_250m_sel", emmc_250m_parents, 0x020,
+ 0x024, 0x028, 16, 2, 23, 0x1C0, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EMMC_400M_SEL, "emmc_400m_sel", emmc_400m_parents, 0x020,
+ 0x024, 0x028, 24, 3, 31, 0x1C0, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x030, 0x034, 0x038, 0, 3, 7,
+ 0x1c0, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPIM_MST_SEL, "spim_mst_sel", spi_parents, 0x030, 0x034, 0x038,
+ 8, 3, 15, 0x1c0, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFI1X_SEL, "nfi1x_sel", nfi1x_parents, 0x030, 0x034, 0x038, 16,
+ 3, 23, 0x1c0, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINFI_SEL, "spinfi_sel", spinfi_parents, 0x030, 0x034, 0x038,
+ 24, 3, 31, 0x1c0, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x040, 0x044, 0x048, 0, 3, 7,
+ 0x1c0, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, 0x040, 0x044, 0x048, 8, 2, 15,
+ 0x1c0, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PCIE_MBIST_250M_SEL, "pcie_mbist_250m_sel",
+ pcie_mbist_250m_parents, 0x040, 0x044, 0x048, 16, 1, 23, 0x1C0, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_TL_SEL, "pextp_tl_sel", pextp_tl_ck_parents, 0x040,
+ 0x044, 0x048, 24, 3, 31, 0x1C0, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_TL_P1_SEL, "pextp_tl_p1_sel", pextp_tl_ck_parents, 0x050,
+ 0x054, 0x058, 0, 3, 7, 0x1C0, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_TL_P2_SEL, "pextp_tl_p2_sel", pextp_tl_ck_parents, 0x050,
+ 0x054, 0x058, 8, 3, 15, 0x1C0, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_TL_P3_SEL, "pextp_tl_p3_sel", pextp_tl_ck_parents, 0x050,
+ 0x054, 0x058, 16, 3, 23, 0x1C0, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_SYS_SEL, "usb_sys_sel", eth_gmii_parents, 0x050, 0x054,
+ 0x058, 24, 1, 31, 0x1C0, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_SYS_P1_SEL, "usb_sys_p1_sel", eth_gmii_parents, 0x060,
+ 0x064, 0x068, 0, 1, 7, 0x1C0, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_XHCI_SEL, "usb_xhci_sel", eth_gmii_parents, 0x060, 0x064,
+ 0x068, 8, 1, 15, 0x1C0, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_XHCI_P1_SEL, "usb_xhci_p1_sel", eth_gmii_parents, 0x060,
+ 0x064, 0x068, 16, 1, 23, 0x1C0, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_FRMCNT_SEL, "usb_frmcnt_sel", usb_frmcnt_parents, 0x060,
+ 0x064, 0x068, 24, 1, 31, 0x1C0, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_FRMCNT_P1_SEL, "usb_frmcnt_p1_sel", usb_frmcnt_parents,
+ 0x070, 0x074, 0x078, 0, 1, 7, 0x1C0, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_SEL, "aud_sel", aud_parents, 0x070, 0x074, 0x078, 8, 1, 15,
+ 0x1c0, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A1SYS_SEL, "a1sys_sel", a1sys_parents, 0x070, 0x074, 0x078, 16,
+ 1, 23, 0x1c0, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_L_SEL, "aud_l_sel", aud_l_parents, 0x070, 0x074, 0x078, 24,
+ 2, 31, 0x1c4, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A_TUNER_SEL, "a_tuner_sel", a1sys_parents, 0x080, 0x084, 0x088,
+ 0, 1, 7, 0x1c4, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSPXTP_SEL, "sspxtp_sel", sspxtp_parents, 0x080, 0x084, 0x088,
+ 8, 1, 15, 0x1c4, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_PHY_SEL, "usb_phy_sel", sspxtp_parents, 0x080, 0x084,
+ 0x088, 16, 1, 23, 0x1c4, 3),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USXGMII_SBUS_0_SEL, "usxgmii_sbus_0_sel",
+ usxgmii_sbus_0_parents, 0x080, 0x084, 0x088, 24, 1, 31, 0x1C4, 4),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USXGMII_SBUS_1_SEL, "usxgmii_sbus_1_sel",
+ usxgmii_sbus_0_parents, 0x090, 0x094, 0x098, 0, 1, 7, 0x1C4, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_0_SEL, "sgm_0_sel", sgm_0_parents, 0x090, 0x094, 0x098, 8,
+ 1, 15, 0x1c4, 6),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SGM_SBUS_0_SEL, "sgm_sbus_0_sel", usxgmii_sbus_0_parents,
+ 0x090, 0x094, 0x098, 16, 1, 23, 0x1C4, 7, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_1_SEL, "sgm_1_sel", sgm_0_parents, 0x090, 0x094, 0x098, 24,
+ 1, 31, 0x1c4, 8),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SGM_SBUS_1_SEL, "sgm_sbus_1_sel", usxgmii_sbus_0_parents,
+ 0x0a0, 0x0a4, 0x0a8, 0, 1, 7, 0x1C4, 9, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_XFI_PHY_0_XTAL_SEL, "xfi_phy_0_xtal_sel", sspxtp_parents,
+ 0x0a0, 0x0a4, 0x0a8, 8, 1, 15, 0x1C4, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_XFI_PHY_1_XTAL_SEL, "xfi_phy_1_xtal_sel", sspxtp_parents,
+ 0x0a0, 0x0a4, 0x0a8, 16, 1, 23, 0x1C4, 11),
+ /* CLK_CFG_11 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SYSAXI_SEL, "sysaxi_sel", axi_infra_parents, 0x0a0,
+ 0x0a4, 0x0a8, 24, 1, 31, 0x1C4, 12, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SYSAPB_SEL, "sysapb_sel", sysapb_parents, 0x0b0, 0x0b4,
+ 0x0b8, 0, 1, 7, 0x1c4, 13, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETH_REFCK_50M_SEL, "eth_refck_50m_sel", eth_refck_50m_parents,
+ 0x0b0, 0x0b4, 0x0b8, 8, 1, 15, 0x1C4, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETH_SYS_200M_SEL, "eth_sys_200m_sel", eth_sys_200m_parents,
+ 0x0b0, 0x0b4, 0x0b8, 16, 1, 23, 0x1C4, 15),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETH_SYS_SEL, "eth_sys_sel", pcie_mbist_250m_parents, 0x0b0,
+ 0x0b4, 0x0b8, 24, 1, 31, 0x1C4, 16),
+ /* CLK_CFG_12 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETH_XGMII_SEL, "eth_xgmii_sel", eth_xgmii_parents, 0x0c0,
+ 0x0c4, 0x0c8, 0, 2, 7, 0x1C4, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_BUS_TOPS_SEL, "bus_tops_sel", bus_tops_parents, 0x0c0, 0x0c4,
+ 0x0c8, 8, 2, 15, 0x1C4, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NPU_TOPS_SEL, "npu_tops_sel", npu_tops_parents, 0x0c0, 0x0c4,
+ 0x0c8, 16, 1, 23, 0x1C4, 19),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DRAMC_SEL, "dramc_sel", sspxtp_parents, 0x0c0, 0x0c4,
+ 0x0c8, 24, 1, 31, 0x1C4, 20, CLK_IS_CRITICAL),
+ /* CLK_CFG_13 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DRAMC_MD32_SEL, "dramc_md32_sel", dramc_md32_parents,
+ 0x0d0, 0x0d4, 0x0d8, 0, 2, 7, 0x1C4, 21, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_INFRA_F26M_SEL, "csw_infra_f26m_sel", sspxtp_parents,
+ 0x0d0, 0x0d4, 0x0d8, 8, 1, 15, 0x1C4, 22, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_P0_SEL, "pextp_p0_sel", sspxtp_parents, 0x0d0, 0x0d4,
+ 0x0d8, 16, 1, 23, 0x1C4, 23),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_P1_SEL, "pextp_p1_sel", sspxtp_parents, 0x0d0, 0x0d4,
+ 0x0d8, 24, 1, 31, 0x1C4, 24),
+ /* CLK_CFG_14 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_P2_SEL, "pextp_p2_sel", sspxtp_parents, 0x0e0, 0x0e4,
+ 0x0e8, 0, 1, 7, 0x1C4, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_P3_SEL, "pextp_p3_sel", sspxtp_parents, 0x0e0, 0x0e4,
+ 0x0e8, 8, 1, 15, 0x1C4, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_XTP_GLB_P0_SEL, "da_xtp_glb_p0_sel", da_xtp_glb_p0_parents,
+ 0x0e0, 0x0e4, 0x0e8, 16, 1, 23, 0x1C4, 27),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_XTP_GLB_P1_SEL, "da_xtp_glb_p1_sel", da_xtp_glb_p0_parents,
+ 0x0e0, 0x0e4, 0x0e8, 24, 1, 31, 0x1C4, 28),
+ /* CLK_CFG_15 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_XTP_GLB_P2_SEL, "da_xtp_glb_p2_sel", da_xtp_glb_p0_parents,
+ 0x0f0, 0x0f4, 0x0f8, 0, 1, 7, 0x1C4, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_XTP_GLB_P3_SEL, "da_xtp_glb_p3_sel", da_xtp_glb_p0_parents,
+ 0x0f0, 0x0f4, 0x0f8, 8, 1, 15, 0x1C4, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CKM_SEL, "ckm_sel", sspxtp_parents, 0x0F0, 0x0f4, 0x0f8, 16, 1,
+ 23, 0x1c8, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_SEL, "da_sel", sspxtp_parents, 0x0f0, 0x0f4, 0x0f8, 24, 1,
+ 31, 0x1C8, 1),
+ /* CLK_CFG_16 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_SEL, "pextp_sel", sspxtp_parents, 0x0100, 0x104, 0x108,
+ 0, 1, 7, 0x1c8, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_TOPS_P2_26M_SEL, "tops_p2_26m_sel", sspxtp_parents, 0x0100,
+ 0x104, 0x108, 8, 1, 15, 0x1C8, 3),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MCUSYS_BACKUP_625M_SEL, "mcusys_backup_625m_sel",
+ mcusys_backup_625m_parents, 0x0100, 0x104, 0x108, 16, 1, 23, 0x1C8, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_SYNC_250M_SEL, "netsys_sync_250m_sel",
+ pcie_mbist_250m_parents, 0x0100, 0x104, 0x108, 24, 1, 31, 0x1c8, 5),
+ /* CLK_CFG_17 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MACSEC_SEL, "macsec_sel", macsec_parents, 0x0110, 0x114, 0x118,
+ 0, 2, 7, 0x1c8, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_TOPS_400M_SEL, "netsys_tops_400m_sel",
+ netsys_tops_400m_parents, 0x0110, 0x114, 0x118, 8, 1, 15, 0x1c8, 7),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_PPEFB_250M_SEL, "netsys_ppefb_250m_sel",
+ pcie_mbist_250m_parents, 0x0110, 0x114, 0x118, 16, 1, 23, 0x1c8, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_WARP_SEL, "netsys_warp_sel", netsys_parents, 0x0110,
+ 0x114, 0x118, 24, 2, 31, 0x1C8, 9),
+ /* CLK_CFG_18 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETH_MII_SEL, "eth_mii_sel", eth_mii_parents, 0x0120, 0x124,
+ 0x128, 0, 1, 7, 0x1c8, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NPU_SEL, "ck_npu_sel", netsys_2x_parents, 0x0120, 0x124, 0x128,
+ 8, 2, 15, 0x1c8, 11),
+};
+
+static const struct mtk_composite top_aud_divs[] = {
+ DIV_GATE(CLK_TOP_AUD_I2S_M, "aud_i2s_m", "aud_sel", 0x0420, 0, 0x0420, 8, 8),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .fixed_clks = top_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(top_fixed_clks),
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_divs,
+ .num_composite_clks = ARRAY_SIZE(top_aud_divs),
+ .clk_lock = &mt7988_clk_lock,
+};
+
+static const char *const mcu_bus_div_parents[] = { "top_xtal", "ccipll2_b", "net1pll_d4" };
+
+static const char *const mcu_arm_div_parents[] = { "top_xtal", "arm_b", "net1pll_d4" };
+
+static struct mtk_composite mcu_muxes[] = {
+ /* bus_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_BUS_DIV_SEL, "mcu_bus_div_sel", mcu_bus_div_parents, 0x7C0, 9, 2, -1,
+ CLK_IS_CRITICAL),
+ /* mp2_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_ARM_DIV_SEL, "mcu_arm_div_sel", mcu_arm_div_parents, 0x7A8, 9, 2, -1,
+ CLK_IS_CRITICAL),
+};
+
+static const struct mtk_clk_desc mcusys_desc = {
+ .composite_clks = mcu_muxes,
+ .num_composite_clks = ARRAY_SIZE(mcu_muxes),
+};
+
+static const struct of_device_id of_match_clk_mt7988_topckgen[] = {
+ { .compatible = "mediatek,mt7988-topckgen", .data = &topck_desc },
+ { .compatible = "mediatek,mt7988-mcusys", .data = &mcusys_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7988_topckgen);
+
+static struct platform_driver clk_mt7988_topckgen_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove_new = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt7988-topckgen",
+ .of_match_table = of_match_clk_mt7988_topckgen,
+ },
+};
+module_platform_driver(clk_mt7988_topckgen_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt7988-xfipll.c b/drivers/clk/mediatek/clk-mt7988-xfipll.c
new file mode 100644
index 000000000..9b9ca5471
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7988-xfipll.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include <dt-bindings/clock/mediatek,mt7988-clk.h>
+
+/* Register to control USXGMII XFI PLL analog */
+#define XFI_PLL_ANA_GLB8 0x108
+#define RG_XFI_PLL_ANA_SWWA 0x02283248
+
+static const struct mtk_gate_regs xfipll_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_XFIPLL(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &xfipll_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_fixed_factor xfipll_divs[] = {
+ FACTOR(CLK_XFIPLL_PLL, "xfipll_pll", "top_xtal", 125, 32),
+};
+
+static const struct mtk_gate xfipll_clks[] = {
+ GATE_XFIPLL(CLK_XFIPLL_PLL_EN, "xfipll_pll_en", "xfipll_pll", 31),
+};
+
+static const struct mtk_clk_desc xfipll_desc = {
+ .clks = xfipll_clks,
+ .num_clks = ARRAY_SIZE(xfipll_clks),
+ .factor_clks = xfipll_divs,
+ .num_factor_clks = ARRAY_SIZE(xfipll_divs),
+};
+
+static int clk_mt7988_xfipll_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base = of_iomap(node, 0);
+
+ if (!base)
+ return -ENOMEM;
+
+ /* Apply software workaround for USXGMII PLL TCL issue */
+ writel(RG_XFI_PLL_ANA_SWWA, base + XFI_PLL_ANA_GLB8);
+ iounmap(base);
+
+ return mtk_clk_simple_probe(pdev);
+};
+
+static const struct of_device_id of_match_clk_mt7988_xfipll[] = {
+ { .compatible = "mediatek,mt7988-xfi-pll", .data = &xfipll_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt7988_xfipll);
+
+static struct platform_driver clk_mt7988_xfipll_drv = {
+ .driver = {
+ .name = "clk-mt7988-xfipll",
+ .of_match_table = of_match_clk_mt7988_xfipll,
+ },
+ .probe = clk_mt7988_xfipll_probe,
+ .remove_new = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt7988_xfipll_drv);
+
+MODULE_DESCRIPTION("MediaTek MT7988 XFI PLL clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c
index e330a4f9a..f7ec599b2 100644
--- a/drivers/clk/mediatek/clk-mt8188-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c
@@ -475,29 +475,28 @@ static const char * const sspm_parents[] = {
"mainpll_d4_d2"
};
+/*
+ * Both DP/eDP can be parented to TVDPLL1 and TVDPLL2, but we force using
+ * TVDPLL1 on eDP and TVDPLL2 on DP to avoid changing the "other" PLL rate
+ * in dual output case, which would lead to corruption of functionality loss.
+ */
static const char * const dp_parents[] = {
"clk26m",
- "tvdpll1_d2",
"tvdpll2_d2",
- "tvdpll1_d4",
"tvdpll2_d4",
- "tvdpll1_d8",
"tvdpll2_d8",
- "tvdpll1_d16",
"tvdpll2_d16"
};
+static const u8 dp_parents_idx[] = { 0, 2, 4, 6, 8 };
static const char * const edp_parents[] = {
"clk26m",
"tvdpll1_d2",
- "tvdpll2_d2",
"tvdpll1_d4",
- "tvdpll2_d4",
"tvdpll1_d8",
- "tvdpll2_d8",
- "tvdpll1_d16",
- "tvdpll2_d16"
+ "tvdpll1_d16"
};
+static const u8 edp_parents_idx[] = { 0, 1, 3, 5, 7 };
static const char * const dpi_parents[] = {
"clk26m",
@@ -1038,10 +1037,12 @@ static const struct mtk_mux top_mtk_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_SSPM, "top_sspm",
sspm_parents, 0x080, 0x084, 0x088, 24, 4, 31, 0x08, 3),
/* CLK_CFG_9 */
- MUX_GATE_CLR_SET_UPD(CLK_TOP_DP, "top_dp",
- dp_parents, 0x08C, 0x090, 0x094, 0, 4, 7, 0x08, 4),
- MUX_GATE_CLR_SET_UPD(CLK_TOP_EDP, "top_edp",
- edp_parents, 0x08C, 0x090, 0x094, 8, 4, 15, 0x08, 5),
+ MUX_GATE_CLR_SET_UPD_INDEXED(CLK_TOP_DP, "top_dp",
+ dp_parents, dp_parents_idx, 0x08C, 0x090, 0x094,
+ 0, 4, 7, 0x08, 4),
+ MUX_GATE_CLR_SET_UPD_INDEXED(CLK_TOP_EDP, "top_edp",
+ edp_parents, edp_parents_idx, 0x08C, 0x090, 0x094,
+ 8, 4, 15, 0x08, 5),
MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI, "top_dpi",
dpi_parents, 0x08C, 0x090, 0x094, 16, 4, 23, 0x08, 6),
MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM0, "top_disp_pwm0",
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index 5c426a1c9..8f713a334 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -415,17 +415,28 @@ static const char * const pwrmcu_parents[] = {
"mainpll_d4_d2"
};
+/*
+ * Both DP/eDP can be parented to TVDPLL1 and TVDPLL2, but we force using
+ * TVDPLL1 on eDP and TVDPLL2 on DP to avoid changing the "other" PLL rate
+ * in dual output case, which would lead to corruption of functionality loss.
+ */
static const char * const dp_parents[] = {
"clk26m",
- "tvdpll1_d2",
"tvdpll2_d2",
- "tvdpll1_d4",
"tvdpll2_d4",
- "tvdpll1_d8",
"tvdpll2_d8",
- "tvdpll1_d16",
"tvdpll2_d16"
};
+static const u8 dp_parents_idx[] = { 0, 2, 4, 6, 8 };
+
+static const char * const edp_parents[] = {
+ "clk26m",
+ "tvdpll1_d2",
+ "tvdpll1_d4",
+ "tvdpll1_d8",
+ "tvdpll1_d16"
+};
+static const u8 edp_parents_idx[] = { 0, 1, 3, 5, 7 };
static const char * const disp_pwm_parents[] = {
"clk26m",
@@ -957,11 +968,11 @@ static const struct mtk_mux top_mtk_muxes[] = {
MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_PWRMCU, "top_pwrmcu",
pwrmcu_parents, 0x08C, 0x090, 0x094, 16, 3, 23, 0x08, 6,
CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
- MUX_GATE_CLR_SET_UPD(CLK_TOP_DP, "top_dp",
- dp_parents, 0x08C, 0x090, 0x094, 24, 4, 31, 0x08, 7),
+ MUX_GATE_CLR_SET_UPD_INDEXED(CLK_TOP_DP, "top_dp",
+ dp_parents, dp_parents_idx, 0x08C, 0x090, 0x094, 24, 4, 31, 0x08, 7),
/* CLK_CFG_10 */
- MUX_GATE_CLR_SET_UPD(CLK_TOP_EDP, "top_edp",
- dp_parents, 0x098, 0x09C, 0x0A0, 0, 4, 7, 0x08, 8),
+ MUX_GATE_CLR_SET_UPD_INDEXED(CLK_TOP_EDP, "top_edp",
+ edp_parents, edp_parents_idx, 0x098, 0x09C, 0x0A0, 0, 4, 7, 0x08, 8),
MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI, "top_dpi",
dp_parents, 0x098, 0x09C, 0x0A0, 8, 4, 15, 0x08, 9),
MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM0, "top_disp_pwm0",
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 2e55368dc..bd37ab4d1 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@@ -494,6 +495,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
}
+
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+
/* Calculate how many clk_hw_onecell_data entries to allocate */
num_clks = mcd->num_clks + mcd->num_composite_clks;
num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
@@ -574,6 +585,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev,
goto unregister_clks;
}
+ pm_runtime_put(&pdev->dev);
+
return r;
unregister_clks:
@@ -604,6 +617,8 @@ free_data:
free_base:
if (mcd->shared_io && base)
iounmap(base);
+
+ pm_runtime_put(&pdev->dev);
return r;
}
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index c93bc7f92..609902964 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -89,6 +89,17 @@ static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
regmap_read(mux->regmap, mux->data->mux_ofs, &val);
val = (val >> mux->data->mux_shift) & mask;
+ if (mux->data->parent_index) {
+ int i;
+
+ for (i = 0; i < mux->data->num_parents; i++)
+ if (mux->data->parent_index[i] == val)
+ return i;
+
+ /* Not found: return an impossible index to generate error */
+ return mux->data->num_parents + 1;
+ }
+
return val;
}
@@ -104,6 +115,9 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
else
__acquire(mux->lock);
+ if (mux->data->parent_index)
+ index = mux->data->parent_index[index];
+
regmap_read(mux->regmap, mux->data->mux_ofs, &orig);
val = (orig & ~(mask << mux->data->mux_shift))
| (index << mux->data->mux_shift);
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 7ecb963b0..943ad1d7c 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -21,6 +21,7 @@ struct mtk_mux {
int id;
const char *name;
const char * const *parent_names;
+ const u8 *parent_index;
unsigned int flags;
u32 mux_ofs;
@@ -37,9 +38,10 @@ struct mtk_mux {
signed char num_parents;
};
-#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
- _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
- _gate, _upd_ofs, _upd, _flags, _ops) { \
+#define __GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ _num_parents, _mux_ofs, _mux_set_ofs, \
+ _mux_clr_ofs, _shift, _width, _gate, _upd_ofs, \
+ _upd, _flags, _ops) { \
.id = _id, \
.name = _name, \
.mux_ofs = _mux_ofs, \
@@ -51,11 +53,28 @@ struct mtk_mux {
.gate_shift = _gate, \
.upd_shift = _upd, \
.parent_names = _parents, \
- .num_parents = ARRAY_SIZE(_parents), \
+ .parent_index = _paridx, \
+ .num_parents = _num_parents, \
.flags = _flags, \
.ops = &_ops, \
}
+#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags, _ops) \
+ __GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ NULL, ARRAY_SIZE(_parents), _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags, _ops) \
+
+#define GATE_CLR_SET_UPD_FLAGS_INDEXED(_id, _name, _parents, _paridx, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd, _flags, _ops) \
+ __GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _paridx, ARRAY_SIZE(_paridx), _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags, _ops) \
+
extern const struct clk_ops mtk_mux_clr_set_upd_ops;
extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
@@ -67,6 +86,14 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
_gate, _upd_ofs, _upd, _flags, \
mtk_mux_gate_clr_set_upd_ops)
+#define MUX_GATE_CLR_SET_UPD_FLAGS_INDEXED(_id, _name, _parents, \
+ _paridx, _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, _flags) \
+ GATE_CLR_SET_UPD_FLAGS_INDEXED(_id, _name, _parents, \
+ _paridx, _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, _flags, \
+ mtk_mux_gate_clr_set_upd_ops)
+
#define MUX_GATE_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
_gate, _upd_ofs, _upd) \
@@ -75,6 +102,14 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
_width, _gate, _upd_ofs, _upd, \
CLK_SET_RATE_PARENT)
+#define MUX_GATE_CLR_SET_UPD_INDEXED(_id, _name, _parents, _paridx, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd) \
+ MUX_GATE_CLR_SET_UPD_FLAGS_INDEXED(_id, _name, \
+ _parents, _paridx, _mux_ofs, _mux_set_ofs, \
+ _mux_clr_ofs, _shift, _width, _gate, _upd_ofs, \
+ _upd, CLK_SET_RATE_PARENT)
+
#define MUX_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
_upd_ofs, _upd) \
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 513ab6b1b..ce453e171 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -23,7 +23,7 @@
#define CON0_BASE_EN BIT(0)
#define CON0_PWR_ON BIT(0)
#define CON0_ISO_EN BIT(1)
-#define PCW_CHG_MASK BIT(31)
+#define PCW_CHG_BIT 31
#define AUDPLL_TUNER_EN BIT(31)
@@ -114,7 +114,8 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
pll->data->pcw_shift);
val |= pcw << pll->data->pcw_shift;
writel(val, pll->pcw_addr);
- chg = readl(pll->pcw_chg_addr) | PCW_CHG_MASK;
+ chg = readl(pll->pcw_chg_addr) |
+ BIT(pll->data->pcw_chg_bit ? : PCW_CHG_BIT);
writel(chg, pll->pcw_chg_addr);
if (pll->tuner_addr)
writel(val + 1, pll->tuner_addr);
diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h
index f17278ff1..285c8db95 100644
--- a/drivers/clk/mediatek/clk-pll.h
+++ b/drivers/clk/mediatek/clk-pll.h
@@ -48,6 +48,7 @@ struct mtk_pll_data {
const char *parent_name;
u32 en_reg;
u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
+ u8 pcw_chg_bit;
};
/*
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index f373a8d48..90f4c6103 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -3549,6 +3549,22 @@ static struct clk_regmap g12a_cts_encp_sel = {
},
};
+static struct clk_regmap g12a_cts_encl_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encl_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = g12a_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
static struct clk_regmap g12a_cts_vdac_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VIID_CLK_DIV,
@@ -3628,6 +3644,22 @@ static struct clk_regmap g12a_cts_encp = {
},
};
+static struct clk_regmap g12a_cts_encl = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_encl",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_cts_encl_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
static struct clk_regmap g12a_cts_vdac = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL2,
@@ -3722,6 +3754,66 @@ static struct clk_regmap g12a_mipi_dsi_pxclk = {
},
};
+/* MIPI ISP Clocks */
+
+static const struct clk_parent_data g12b_mipi_isp_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_gp0_pll.hw },
+ { .hw = &g12a_hifi_pll.hw },
+ { .hw = &g12a_fclk_div2p5.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_fclk_div4.hw },
+ { .hw = &g12a_fclk_div5.hw },
+ { .hw = &g12a_fclk_div7.hw },
+};
+
+static struct clk_regmap g12b_mipi_isp_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_ISP_CLK_CNTL,
+ .mask = 7,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mipi_isp_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = g12b_mipi_isp_parent_data,
+ .num_parents = ARRAY_SIZE(g12b_mipi_isp_parent_data),
+ },
+};
+
+static struct clk_regmap g12b_mipi_isp_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_ISP_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mipi_isp_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_mipi_isp_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12b_mipi_isp = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_ISP_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "mipi_isp",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12b_mipi_isp_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* HDMI Clocks */
static const struct clk_parent_data g12a_hdmi_parent_data[] = {
@@ -4214,9 +4306,12 @@ static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
+static MESON_GATE(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17);
static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
+static MESON_GATE(g12b_csi_phy1, HHI_GCLK_MPEG2, 28);
+static MESON_GATE(g12b_csi_phy0, HHI_GCLK_MPEG2, 29);
static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
@@ -4407,10 +4502,12 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw,
[CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw,
[CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw,
+ [CLKID_CTS_ENCL_SEL] = &g12a_cts_encl_sel.hw,
[CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw,
[CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw,
[CLKID_CTS_ENCI] = &g12a_cts_enci.hw,
[CLKID_CTS_ENCP] = &g12a_cts_encp.hw,
+ [CLKID_CTS_ENCL] = &g12a_cts_encl.hw,
[CLKID_CTS_VDAC] = &g12a_cts_vdac.hw,
[CLKID_HDMI_TX] = &g12a_hdmi_tx.hw,
[CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw,
@@ -4632,10 +4729,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw,
[CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw,
[CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw,
+ [CLKID_CTS_ENCL_SEL] = &g12a_cts_encl_sel.hw,
[CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw,
[CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw,
[CLKID_CTS_ENCI] = &g12a_cts_enci.hw,
[CLKID_CTS_ENCP] = &g12a_cts_encp.hw,
+ [CLKID_CTS_ENCL] = &g12a_cts_encl.hw,
[CLKID_CTS_VDAC] = &g12a_cts_vdac.hw,
[CLKID_HDMI_TX] = &g12a_hdmi_tx.hw,
[CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw,
@@ -4729,6 +4828,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_MIPI_DSI_PXCLK_SEL] = &g12a_mipi_dsi_pxclk_sel.hw,
[CLKID_MIPI_DSI_PXCLK_DIV] = &g12a_mipi_dsi_pxclk_div.hw,
[CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
+ [CLKID_MIPI_ISP_SEL] = &g12b_mipi_isp_sel.hw,
+ [CLKID_MIPI_ISP_DIV] = &g12b_mipi_isp_div.hw,
+ [CLKID_MIPI_ISP] = &g12b_mipi_isp.hw,
+ [CLKID_MIPI_ISP_GATE] = &g12b_mipi_isp_gate.hw,
+ [CLKID_MIPI_ISP_CSI_PHY0] = &g12b_csi_phy0.hw,
+ [CLKID_MIPI_ISP_CSI_PHY1] = &g12b_csi_phy1.hw,
};
static struct clk_hw *sm1_hw_clks[] = {
@@ -4892,10 +4997,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw,
[CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw,
[CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw,
+ [CLKID_CTS_ENCL_SEL] = &g12a_cts_encl_sel.hw,
[CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw,
[CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw,
[CLKID_CTS_ENCI] = &g12a_cts_enci.hw,
[CLKID_CTS_ENCP] = &g12a_cts_encp.hw,
+ [CLKID_CTS_ENCL] = &g12a_cts_encl.hw,
[CLKID_CTS_VDAC] = &g12a_cts_vdac.hw,
[CLKID_HDMI_TX] = &g12a_hdmi_tx.hw,
[CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw,
@@ -5123,10 +5230,12 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&g12a_vclk2_div12_en,
&g12a_cts_enci_sel,
&g12a_cts_encp_sel,
+ &g12a_cts_encl_sel,
&g12a_cts_vdac_sel,
&g12a_hdmi_tx_sel,
&g12a_cts_enci,
&g12a_cts_encp,
+ &g12a_cts_encl,
&g12a_cts_vdac,
&g12a_hdmi_tx,
&g12a_hdmi_sel,
@@ -5221,6 +5330,12 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&g12a_mipi_dsi_pxclk_sel,
&g12a_mipi_dsi_pxclk_div,
&g12a_mipi_dsi_pxclk,
+ &g12b_mipi_isp_sel,
+ &g12b_mipi_isp_div,
+ &g12b_mipi_isp,
+ &g12b_mipi_isp_gate,
+ &g12b_csi_phy1,
+ &g12b_csi_phy0,
};
static const struct reg_sequence g12a_init_regs[] = {
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index f11ee3c59..27df99c45 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -70,6 +70,7 @@
#define HHI_MALI_CLK_CNTL 0x1b0
#define HHI_VPU_CLKC_CNTL 0x1b4
#define HHI_VPU_CLK_CNTL 0x1bC
+#define HHI_ISP_CLK_CNTL 0x1C0
#define HHI_NNA_CLK_CNTL 0x1C8
#define HHI_HDMI_CLK_CNTL 0x1CC
#define HHI_VDEC_CLK_CNTL 0x1E0
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
index bce61c45e..3a3ea2d14 100644
--- a/drivers/clk/microchip/clk-mpfs-ccc.c
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -4,8 +4,8 @@
*
* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries
*/
-#include "asm-generic/errno-base.h"
#include <linux/clk-provider.h>
+#include <linux/errno.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index dbc3950c5..2a9da0939 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -20,6 +20,16 @@ menuconfig COMMON_CLK_QCOM
if COMMON_CLK_QCOM
+config CLK_X1E80100_GCC
+ tristate "X1E80100 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on Qualcomm Technologies, Inc
+ X1E80100 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI, I2C,
+ USB, UFS, SD/eMMC, PCIe, etc.
+
config QCOM_A53PLL
tristate "MSM8916 A53 PLL"
help
@@ -427,6 +437,15 @@ config SC_CAMCC_7280
Say Y if you want to support camera devices and functionality such as
capturing pictures.
+config SC_CAMCC_8280XP
+ tristate "SC8280XP Camera Clock Controller"
+ select SC_GCC_8280XP
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SC8280XP devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SC_DISPCC_7180
tristate "SC7180 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -668,6 +687,15 @@ config QDU_GCC_1000
QRU1000 devices. Say Y if you want to use peripheral
devices such as UART, SPI, I2C, USB, SD, PCIe, etc.
+config QDU_ECPRICC_1000
+ tristate "QDU1000/QRU1000 ECPRI Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QDU_GCC_1000
+ help
+ Support for the ECPRI clock controller on QDU1000 and
+ QRU1000 devices. Say Y if you want to support the ECPRI
+ clock controller functionality such as Ethernet.
+
config SDM_GCC_845
tristate "SDM845/SDM670 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -843,6 +871,16 @@ config SM_DISPCC_8550
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_8650
+ tristate "SM8650 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_8650
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM8650 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_GCC_4450
tristate "SM4450 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -939,6 +977,15 @@ config SM_GCC_8550
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8650
+ tristate "SM8650 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8650 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_6115
tristate "SM6115 Graphics Clock Controller"
select SM_GCC_6115
@@ -1020,6 +1067,14 @@ config SM_GPUCC_8550
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_GPUCC_8650
+ tristate "SM8650 Graphics Clock Controller"
+ select SM_GCC_8650
+ help
+ Support for the graphics clock controller on SM8650 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_TCSRCC_8550
tristate "SM8550 TCSR Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1028,6 +1083,14 @@ config SM_TCSRCC_8550
Support for the TCSR clock controller on SM8550 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config SM_TCSRCC_8650
+ tristate "SM8650 TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on SM8650 devices.
+ Say Y if you want to use peripheral devices such as SD/UFS.
+
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 17edd73f9..582e06dc1 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -21,6 +21,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
+obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
@@ -65,9 +66,11 @@ obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_QDU_ECPRICC_1000) += ecpricc-qdu1000.o
obj-$(CONFIG_QDU_GCC_1000) += gcc-qdu1000.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_CAMCC_7280) += camcc-sc7280.o
+obj-$(CONFIG_SC_CAMCC_8280XP) += camcc-sc8280xp.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
obj-$(CONFIG_SC_DISPCC_8280XP) += dispcc-sc8280xp.o
@@ -110,6 +113,7 @@ obj-$(CONFIG_SM_DISPCC_6375) += dispcc-sm6375.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
+obj-$(CONFIG_SM_DISPCC_8650) += dispcc-sm8650.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -121,6 +125,7 @@ obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
+obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
@@ -130,7 +135,9 @@ obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8450) += gpucc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8550) += gpucc-sm8550.o
+obj-$(CONFIG_SM_GPUCC_8650) += gpucc-sm8650.o
obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
+obj-$(CONFIG_SM_TCSRCC_8650) += tcsrcc-sm8650.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8350) += videocc-sm8350.o
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index 41279e543..678b805f1 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -73,6 +73,20 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
},
};
+static const struct alpha_pll_config ipq5018_pll_config = {
+ .l = 0x32,
+ .config_ctl_val = 0x4001075b,
+ .config_ctl_hi_val = 0x304,
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .early_output_mask = BIT(3),
+ .alpha_en_mask = BIT(24),
+ .status_val = 0x3,
+ .status_mask = GENMASK(10, 8),
+ .lock_det = BIT(2),
+ .test_ctl_hi_val = 0x00400003,
+};
+
static const struct alpha_pll_config ipq5332_pll_config = {
.l = 0x2d,
.config_ctl_val = 0x4001075b,
@@ -129,6 +143,12 @@ struct apss_pll_data {
const struct alpha_pll_config *pll_config;
};
+static const struct apss_pll_data ipq5018_pll_data = {
+ .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
+ .pll = &ipq_pll_stromer_plus,
+ .pll_config = &ipq5018_pll_config,
+};
+
static struct apss_pll_data ipq5332_pll_data = {
.pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
.pll = &ipq_pll_stromer_plus,
@@ -195,6 +215,7 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
}
static const struct of_device_id apss_ipq_pll_match_table[] = {
+ { .compatible = "qcom,ipq5018-a53pll", .data = &ipq5018_pll_data },
{ .compatible = "qcom,ipq5332-a53pll", .data = &ipq5332_pll_data },
{ .compatible = "qcom,ipq6018-a53pll", .data = &ipq6018_pll_data },
{ .compatible = "qcom,ipq8074-a53pll", .data = &ipq8074_pll_data },
diff --git a/drivers/clk/qcom/camcc-sc8280xp.c b/drivers/clk/qcom/camcc-sc8280xp.c
new file mode 100644
index 000000000..7f0ae9a5f
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sc8280xp.c
@@ -0,0 +1,3066 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sc8280xp-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAMCC_PLL0_OUT_EVEN,
+ P_CAMCC_PLL0_OUT_MAIN,
+ P_CAMCC_PLL0_OUT_ODD,
+ P_CAMCC_PLL1_OUT_EVEN,
+ P_CAMCC_PLL2_OUT_AUX,
+ P_CAMCC_PLL2_OUT_EARLY,
+ P_CAMCC_PLL3_OUT_EVEN,
+ P_CAMCC_PLL4_OUT_EVEN,
+ P_CAMCC_PLL5_OUT_EVEN,
+ P_CAMCC_PLL6_OUT_EVEN,
+ P_CAMCC_PLL7_OUT_EVEN,
+ P_CAMCC_PLL7_OUT_ODD,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 1800000000, 0 },
+};
+
+static struct pll_vco zonda_vco[] = {
+ { 595200000, 3600000000, 0 },
+};
+
+static struct alpha_pll_config camcc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00003100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll camcc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll0_out_even[] = {
+ { 0x1, 2 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll0_out_odd[] = {
+ { 0x3, 3 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_camcc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct alpha_pll_config camcc_pll1_config = {
+ .l = 0x21,
+ .alpha = 0x5555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll camcc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll1_out_even[] = {
+ { 0x1, 2 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct alpha_pll_config camcc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x08200800,
+ .config_ctl_hi_val = 0x05028011,
+ .config_ctl_hi1_val = 0x08000000,
+};
+
+static struct clk_alpha_pll camcc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = zonda_vco,
+ .num_vco = ARRAY_SIZE(zonda_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll2",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_zonda_ops,
+ },
+ },
+};
+
+static struct alpha_pll_config camcc_pll3_config = {
+ .l = 0x29,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll camcc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll3",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll3_out_even[] = {
+ { 0x1, 2 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct alpha_pll_config camcc_pll4_config = {
+ .l = 0x29,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll camcc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll4",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll4_out_even[] = {
+ { 0x1, 2 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct alpha_pll_config camcc_pll5_config = {
+ .l = 0x29,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll camcc_pll5 = {
+ .offset = 0x10000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll5",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll5_out_even[] = {
+ { 0x1, 2 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll5_out_even = {
+ .offset = 0x10000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct alpha_pll_config camcc_pll6_config = {
+ .l = 0x29,
+ .alpha = 0xaaaa,
+ .config_ctl_val = 0x20486699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll camcc_pll6 = {
+ .offset = 0x11000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll6",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll6_out_even[] = {
+ { 0x1, 2 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll6_out_even = {
+ .offset = 0x11000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll6_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct alpha_pll_config camcc_pll7_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00003100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll camcc_pll7 = {
+ .offset = 0x12000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_pll7",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll7_out_even[] = {
+ { 0x1, 2 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll7_out_even = {
+ .offset = 0x12000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_camcc_pll7_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll7_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll7_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll7.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_camcc_pll7_out_odd[] = {
+ { 0x3, 3 },
+};
+
+static struct clk_alpha_pll_postdiv camcc_pll7_out_odd = {
+ .offset = 0x12000,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_camcc_pll7_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_camcc_pll7_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_pll7_out_odd",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_pll7.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct parent_map camcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL0_OUT_EVEN, 2 },
+ { P_CAMCC_PLL0_OUT_ODD, 3 },
+ { P_CAMCC_PLL7_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data camcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll0_out_even.clkr.hw },
+ { .hw = &camcc_pll0_out_odd.clkr.hw },
+ { .hw = &camcc_pll7_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL2_OUT_AUX, 2 },
+ { P_CAMCC_PLL2_OUT_EARLY, 5 },
+};
+
+static const struct clk_parent_data camcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll2.clkr.hw },
+ { .hw = &camcc_pll2.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL0_OUT_EVEN, 2 },
+ { P_CAMCC_PLL0_OUT_ODD, 3 },
+ { P_CAMCC_PLL7_OUT_ODD, 4 },
+ { P_CAMCC_PLL7_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data camcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll0_out_even.clkr.hw },
+ { .hw = &camcc_pll0_out_odd.clkr.hw },
+ { .hw = &camcc_pll7_out_odd.clkr.hw },
+ { .hw = &camcc_pll7_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL0_OUT_MAIN, 1 },
+ { P_CAMCC_PLL0_OUT_EVEN, 2 },
+ { P_CAMCC_PLL0_OUT_ODD, 3 },
+ { P_CAMCC_PLL7_OUT_EVEN, 5 },
+ { P_CAMCC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data camcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll0.clkr.hw },
+ { .hw = &camcc_pll0_out_even.clkr.hw },
+ { .hw = &camcc_pll0_out_odd.clkr.hw },
+ { .hw = &camcc_pll7_out_even.clkr.hw },
+ { .hw = &camcc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data camcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data camcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data camcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data camcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAMCC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data camcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &camcc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map camcc_parent_map_9[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data camcc_parent_data_9[] = {
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map camcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data camcc_parent_data_10_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+};
+
+static const struct freq_tbl ftbl_camcc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_CAMCC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ F(760000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_bps_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_3,
+ .freq_tbl = ftbl_camcc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_clk_src",
+ .parent_data = camcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAMCC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(266666667, P_CAMCC_PLL0_OUT_ODD, 1.5, 0, 0),
+ F(320000000, P_CAMCC_PLL7_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0xc170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_camnoc_axi_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAMCC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_cci_0_clk_src = {
+ .cmd_rcgr = 0xc108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_0_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_cci_1_clk_src = {
+ .cmd_rcgr = 0xc124,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_1_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_cci_2_clk_src = {
+ .cmd_rcgr = 0xc204,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_2_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_cci_3_clk_src = {
+ .cmd_rcgr = 0xc220,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_3_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(240000000, P_CAMCC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xa064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_cphy_rx_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAMCC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x6004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_csi0phytimer_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_csi1phytimer_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x604c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_csi2phytimer_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x6074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_csi3phytimer_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAMCC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAMCC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAMCC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x703c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_fast_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_fast_ahb_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_icp_clk_src = {
+ .cmd_rcgr = 0xc0b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_icp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_icp_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAMCC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_0_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_4,
+ .freq_tbl = ftbl_camcc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_clk_src",
+ .parent_data = camcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAMCC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0xa03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_csid_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAMCC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_1_clk_src = {
+ .cmd_rcgr = 0xb010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_5,
+ .freq_tbl = ftbl_camcc_ife_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_clk_src",
+ .parent_data = camcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xb03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_csid_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_2_clk_src[] = {
+ F(400000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAMCC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_2_clk_src = {
+ .cmd_rcgr = 0xf010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_6,
+ .freq_tbl = ftbl_camcc_ife_2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_clk_src",
+ .parent_data = camcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_2_csid_clk_src[] = {
+ F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_2_csid_clk_src = {
+ .cmd_rcgr = 0xf03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_ife_2_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_csid_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_3_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAMCC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_3_clk_src = {
+ .cmd_rcgr = 0xf07c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_7,
+ .freq_tbl = ftbl_camcc_ife_3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_3_clk_src",
+ .parent_data = camcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_3_csid_clk_src = {
+ .cmd_rcgr = 0xf0a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_ife_2_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_3_csid_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ife_lite_0_clk_src[] = {
+ F(320000000, P_CAMCC_PLL7_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ife_lite_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_0_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_lite_0_csid_clk_src = {
+ .cmd_rcgr = 0xc020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_ife_2_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_0_csid_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_lite_1_clk_src = {
+ .cmd_rcgr = 0xc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_1_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_lite_1_csid_clk_src = {
+ .cmd_rcgr = 0xc064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_ife_2_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_1_csid_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_lite_2_clk_src = {
+ .cmd_rcgr = 0xc240,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_2_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_lite_2_csid_clk_src = {
+ .cmd_rcgr = 0xc25c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_ife_2_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_2_csid_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_lite_3_clk_src = {
+ .cmd_rcgr = 0xc284,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_3_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_ife_lite_3_csid_clk_src = {
+ .cmd_rcgr = 0xc2a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_ife_2_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_3_csid_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(475000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x8010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_8,
+ .freq_tbl = ftbl_camcc_ipe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_clk_src",
+ .parent_data = camcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_jpeg_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_CAMCC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAMCC_PLL7_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAMCC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_jpeg_clk_src = {
+ .cmd_rcgr = 0xc08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_jpeg_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_jpeg_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_lrme_clk_src[] = {
+ F(240000000, P_CAMCC_PLL7_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_CAMCC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAMCC_PLL7_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAMCC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_lrme_clk_src = {
+ .cmd_rcgr = 0xc144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_2,
+ .freq_tbl = ftbl_camcc_lrme_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_lrme_clk_src",
+ .parent_data = camcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAMCC_PLL2_OUT_EARLY, 10, 1, 4),
+ F(64000000, P_CAMCC_PLL2_OUT_EARLY, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_mclk0_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk0_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk1_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk1_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk2_clk_src = {
+ .cmd_rcgr = 0x5044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk2_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk3_clk_src = {
+ .cmd_rcgr = 0x5064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk3_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk4_clk_src = {
+ .cmd_rcgr = 0x5084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk4_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk5_clk_src = {
+ .cmd_rcgr = 0x50a4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk5_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk6_clk_src = {
+ .cmd_rcgr = 0x50c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk6_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camcc_mclk7_clk_src = {
+ .cmd_rcgr = 0x50e4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_1,
+ .freq_tbl = ftbl_camcc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk7_clk_src",
+ .parent_data = camcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_sleep_clk_src = {
+ .cmd_rcgr = 0xc1e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_9,
+ .freq_tbl = ftbl_camcc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_sleep_clk_src",
+ .parent_data = camcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAMCC_PLL7_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x7058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_0,
+ .freq_tbl = ftbl_camcc_slow_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_slow_ahb_clk_src",
+ .parent_data = camcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camcc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camcc_xo_clk_src = {
+ .cmd_rcgr = 0xc1cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = camcc_parent_map_10,
+ .freq_tbl = ftbl_camcc_xo_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camcc_xo_clk_src",
+ .parent_data = camcc_parent_data_10_ao,
+ .num_parents = ARRAY_SIZE(camcc_parent_data_10_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch camcc_bps_ahb_clk = {
+ .halt_reg = 0x7070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_bps_areg_clk = {
+ .halt_reg = 0x7054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_bps_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_bps_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_camnoc_axi_clk = {
+ .halt_reg = 0xc18c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc18c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0xc194,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc194,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_camnoc_dcd_xo_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_cci_0_clk = {
+ .halt_reg = 0xc120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc120,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_cci_1_clk = {
+ .halt_reg = 0xc13c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc13c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_cci_2_clk = {
+ .halt_reg = 0xc21c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc21c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cci_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_cci_3_clk = {
+ .halt_reg = 0xc238,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc238,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_cci_3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cci_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_core_ahb_clk = {
+ .halt_reg = 0xc1c8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xc1c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_cpas_ahb_clk = {
+ .halt_reg = 0xc168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc168,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csi0phytimer_clk = {
+ .halt_reg = 0x601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csi1phytimer_clk = {
+ .halt_reg = 0x6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csi2phytimer_clk = {
+ .halt_reg = 0x6064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csi3phytimer_clk = {
+ .halt_reg = 0x608c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x608c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csiphy0_clk = {
+ .halt_reg = 0x6020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csiphy1_clk = {
+ .halt_reg = 0x6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csiphy2_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_csiphy3_clk = {
+ .halt_reg = 0x6090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_gdsc_clk = {
+ .halt_reg = 0xc1e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc1e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_gdsc_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_icp_ahb_clk = {
+ .halt_reg = 0xc0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_icp_clk = {
+ .halt_reg = 0xc0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_axi_clk = {
+ .halt_reg = 0xa080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0xa07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_csid_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_0_dsp_clk = {
+ .halt_reg = 0xa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_axi_clk = {
+ .halt_reg = 0xb068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_csid_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_1_dsp_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_axi_clk = {
+ .halt_reg = 0xf068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_clk = {
+ .halt_reg = 0xf028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_cphy_rx_clk = {
+ .halt_reg = 0xf064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_csid_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_2_dsp_clk = {
+ .halt_reg = 0xf038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_2_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_3_axi_clk = {
+ .halt_reg = 0xf0d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_3_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_3_clk = {
+ .halt_reg = 0xf094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_3_cphy_rx_clk = {
+ .halt_reg = 0xf0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_3_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_3_csid_clk = {
+ .halt_reg = 0xf0c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_3_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_3_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_3_dsp_clk = {
+ .halt_reg = 0xf0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_3_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_0_cphy_rx_clk = {
+ .halt_reg = 0xc040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_0_csid_clk = {
+ .halt_reg = 0xc038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_1_clk = {
+ .halt_reg = 0xc060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_1_cphy_rx_clk = {
+ .halt_reg = 0xc084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_1_csid_clk = {
+ .halt_reg = 0xc07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_2_clk = {
+ .halt_reg = 0xc258,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc258,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_2_cphy_rx_clk = {
+ .halt_reg = 0xc27c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc27c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_2_csid_clk = {
+ .halt_reg = 0xc274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc274,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_3_clk = {
+ .halt_reg = 0xc29c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc29c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_3_cphy_rx_clk = {
+ .halt_reg = 0xc2c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_3_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ife_lite_3_csid_clk = {
+ .halt_reg = 0xc2b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ife_lite_3_csid_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ife_lite_3_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_0_ahb_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_0_areg_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_0_axi_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_1_ahb_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_1_areg_clk = {
+ .halt_reg = 0x9024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_1_areg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_1_axi_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_ipe_1_clk = {
+ .halt_reg = 0x9010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_ipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_jpeg_clk = {
+ .halt_reg = 0xc0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_lrme_clk = {
+ .halt_reg = 0xc15c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc15c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk0_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk1_clk = {
+ .halt_reg = 0x503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk2_clk = {
+ .halt_reg = 0x505c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk3_clk = {
+ .halt_reg = 0x507c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x507c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk4_clk = {
+ .halt_reg = 0x509c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk5_clk = {
+ .halt_reg = 0x50bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk5_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk6_clk = {
+ .halt_reg = 0x50dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk6_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_mclk7_clk = {
+ .halt_reg = 0x50fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_mclk7_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camcc_sleep_clk = {
+ .halt_reg = 0xc200,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc200,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camcc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc;
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x7004,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0xa004,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .flags = RETAIN_FF_ENABLE,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xb004,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .flags = RETAIN_FF_ENABLE,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_2_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "ife_2_gdsc",
+ },
+ .flags = RETAIN_FF_ENABLE,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_3_gdsc = {
+ .gdscr = 0xf070,
+ .pd = {
+ .name = "ife_3_gdsc",
+ },
+ .flags = RETAIN_FF_ENABLE,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x8004,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_1_gdsc = {
+ .gdscr = 0x9004,
+ .pd = {
+ .name = "ipe_1_gdsc",
+ },
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .parent = &titan_top_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xc1bc,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .flags = RETAIN_FF_ENABLE,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *camcc_sc8280xp_clocks[] = {
+ [CAMCC_BPS_AHB_CLK] = &camcc_bps_ahb_clk.clkr,
+ [CAMCC_BPS_AREG_CLK] = &camcc_bps_areg_clk.clkr,
+ [CAMCC_BPS_AXI_CLK] = &camcc_bps_axi_clk.clkr,
+ [CAMCC_BPS_CLK] = &camcc_bps_clk.clkr,
+ [CAMCC_BPS_CLK_SRC] = &camcc_bps_clk_src.clkr,
+ [CAMCC_CAMNOC_AXI_CLK] = &camcc_camnoc_axi_clk.clkr,
+ [CAMCC_CAMNOC_AXI_CLK_SRC] = &camcc_camnoc_axi_clk_src.clkr,
+ [CAMCC_CAMNOC_DCD_XO_CLK] = &camcc_camnoc_dcd_xo_clk.clkr,
+ [CAMCC_CCI_0_CLK] = &camcc_cci_0_clk.clkr,
+ [CAMCC_CCI_0_CLK_SRC] = &camcc_cci_0_clk_src.clkr,
+ [CAMCC_CCI_1_CLK] = &camcc_cci_1_clk.clkr,
+ [CAMCC_CCI_1_CLK_SRC] = &camcc_cci_1_clk_src.clkr,
+ [CAMCC_CCI_2_CLK] = &camcc_cci_2_clk.clkr,
+ [CAMCC_CCI_2_CLK_SRC] = &camcc_cci_2_clk_src.clkr,
+ [CAMCC_CCI_3_CLK] = &camcc_cci_3_clk.clkr,
+ [CAMCC_CCI_3_CLK_SRC] = &camcc_cci_3_clk_src.clkr,
+ [CAMCC_CORE_AHB_CLK] = &camcc_core_ahb_clk.clkr,
+ [CAMCC_CPAS_AHB_CLK] = &camcc_cpas_ahb_clk.clkr,
+ [CAMCC_CPHY_RX_CLK_SRC] = &camcc_cphy_rx_clk_src.clkr,
+ [CAMCC_CSI0PHYTIMER_CLK] = &camcc_csi0phytimer_clk.clkr,
+ [CAMCC_CSI0PHYTIMER_CLK_SRC] = &camcc_csi0phytimer_clk_src.clkr,
+ [CAMCC_CSI1PHYTIMER_CLK] = &camcc_csi1phytimer_clk.clkr,
+ [CAMCC_CSI1PHYTIMER_CLK_SRC] = &camcc_csi1phytimer_clk_src.clkr,
+ [CAMCC_CSI2PHYTIMER_CLK] = &camcc_csi2phytimer_clk.clkr,
+ [CAMCC_CSI2PHYTIMER_CLK_SRC] = &camcc_csi2phytimer_clk_src.clkr,
+ [CAMCC_CSI3PHYTIMER_CLK] = &camcc_csi3phytimer_clk.clkr,
+ [CAMCC_CSI3PHYTIMER_CLK_SRC] = &camcc_csi3phytimer_clk_src.clkr,
+ [CAMCC_CSIPHY0_CLK] = &camcc_csiphy0_clk.clkr,
+ [CAMCC_CSIPHY1_CLK] = &camcc_csiphy1_clk.clkr,
+ [CAMCC_CSIPHY2_CLK] = &camcc_csiphy2_clk.clkr,
+ [CAMCC_CSIPHY3_CLK] = &camcc_csiphy3_clk.clkr,
+ [CAMCC_FAST_AHB_CLK_SRC] = &camcc_fast_ahb_clk_src.clkr,
+ [CAMCC_GDSC_CLK] = &camcc_gdsc_clk.clkr,
+ [CAMCC_ICP_AHB_CLK] = &camcc_icp_ahb_clk.clkr,
+ [CAMCC_ICP_CLK] = &camcc_icp_clk.clkr,
+ [CAMCC_ICP_CLK_SRC] = &camcc_icp_clk_src.clkr,
+ [CAMCC_IFE_0_AXI_CLK] = &camcc_ife_0_axi_clk.clkr,
+ [CAMCC_IFE_0_CLK] = &camcc_ife_0_clk.clkr,
+ [CAMCC_IFE_0_CLK_SRC] = &camcc_ife_0_clk_src.clkr,
+ [CAMCC_IFE_0_CPHY_RX_CLK] = &camcc_ife_0_cphy_rx_clk.clkr,
+ [CAMCC_IFE_0_CSID_CLK] = &camcc_ife_0_csid_clk.clkr,
+ [CAMCC_IFE_0_CSID_CLK_SRC] = &camcc_ife_0_csid_clk_src.clkr,
+ [CAMCC_IFE_0_DSP_CLK] = &camcc_ife_0_dsp_clk.clkr,
+ [CAMCC_IFE_1_AXI_CLK] = &camcc_ife_1_axi_clk.clkr,
+ [CAMCC_IFE_1_CLK] = &camcc_ife_1_clk.clkr,
+ [CAMCC_IFE_1_CLK_SRC] = &camcc_ife_1_clk_src.clkr,
+ [CAMCC_IFE_1_CPHY_RX_CLK] = &camcc_ife_1_cphy_rx_clk.clkr,
+ [CAMCC_IFE_1_CSID_CLK] = &camcc_ife_1_csid_clk.clkr,
+ [CAMCC_IFE_1_CSID_CLK_SRC] = &camcc_ife_1_csid_clk_src.clkr,
+ [CAMCC_IFE_1_DSP_CLK] = &camcc_ife_1_dsp_clk.clkr,
+ [CAMCC_IFE_2_AXI_CLK] = &camcc_ife_2_axi_clk.clkr,
+ [CAMCC_IFE_2_CLK] = &camcc_ife_2_clk.clkr,
+ [CAMCC_IFE_2_CLK_SRC] = &camcc_ife_2_clk_src.clkr,
+ [CAMCC_IFE_2_CPHY_RX_CLK] = &camcc_ife_2_cphy_rx_clk.clkr,
+ [CAMCC_IFE_2_CSID_CLK] = &camcc_ife_2_csid_clk.clkr,
+ [CAMCC_IFE_2_CSID_CLK_SRC] = &camcc_ife_2_csid_clk_src.clkr,
+ [CAMCC_IFE_2_DSP_CLK] = &camcc_ife_2_dsp_clk.clkr,
+ [CAMCC_IFE_3_AXI_CLK] = &camcc_ife_3_axi_clk.clkr,
+ [CAMCC_IFE_3_CLK] = &camcc_ife_3_clk.clkr,
+ [CAMCC_IFE_3_CLK_SRC] = &camcc_ife_3_clk_src.clkr,
+ [CAMCC_IFE_3_CPHY_RX_CLK] = &camcc_ife_3_cphy_rx_clk.clkr,
+ [CAMCC_IFE_3_CSID_CLK] = &camcc_ife_3_csid_clk.clkr,
+ [CAMCC_IFE_3_CSID_CLK_SRC] = &camcc_ife_3_csid_clk_src.clkr,
+ [CAMCC_IFE_3_DSP_CLK] = &camcc_ife_3_dsp_clk.clkr,
+ [CAMCC_IFE_LITE_0_CLK] = &camcc_ife_lite_0_clk.clkr,
+ [CAMCC_IFE_LITE_0_CLK_SRC] = &camcc_ife_lite_0_clk_src.clkr,
+ [CAMCC_IFE_LITE_0_CPHY_RX_CLK] = &camcc_ife_lite_0_cphy_rx_clk.clkr,
+ [CAMCC_IFE_LITE_0_CSID_CLK] = &camcc_ife_lite_0_csid_clk.clkr,
+ [CAMCC_IFE_LITE_0_CSID_CLK_SRC] = &camcc_ife_lite_0_csid_clk_src.clkr,
+ [CAMCC_IFE_LITE_1_CLK] = &camcc_ife_lite_1_clk.clkr,
+ [CAMCC_IFE_LITE_1_CLK_SRC] = &camcc_ife_lite_1_clk_src.clkr,
+ [CAMCC_IFE_LITE_1_CPHY_RX_CLK] = &camcc_ife_lite_1_cphy_rx_clk.clkr,
+ [CAMCC_IFE_LITE_1_CSID_CLK] = &camcc_ife_lite_1_csid_clk.clkr,
+ [CAMCC_IFE_LITE_1_CSID_CLK_SRC] = &camcc_ife_lite_1_csid_clk_src.clkr,
+ [CAMCC_IFE_LITE_2_CLK] = &camcc_ife_lite_2_clk.clkr,
+ [CAMCC_IFE_LITE_2_CLK_SRC] = &camcc_ife_lite_2_clk_src.clkr,
+ [CAMCC_IFE_LITE_2_CPHY_RX_CLK] = &camcc_ife_lite_2_cphy_rx_clk.clkr,
+ [CAMCC_IFE_LITE_2_CSID_CLK] = &camcc_ife_lite_2_csid_clk.clkr,
+ [CAMCC_IFE_LITE_2_CSID_CLK_SRC] = &camcc_ife_lite_2_csid_clk_src.clkr,
+ [CAMCC_IFE_LITE_3_CLK] = &camcc_ife_lite_3_clk.clkr,
+ [CAMCC_IFE_LITE_3_CLK_SRC] = &camcc_ife_lite_3_clk_src.clkr,
+ [CAMCC_IFE_LITE_3_CPHY_RX_CLK] = &camcc_ife_lite_3_cphy_rx_clk.clkr,
+ [CAMCC_IFE_LITE_3_CSID_CLK] = &camcc_ife_lite_3_csid_clk.clkr,
+ [CAMCC_IFE_LITE_3_CSID_CLK_SRC] = &camcc_ife_lite_3_csid_clk_src.clkr,
+ [CAMCC_IPE_0_AHB_CLK] = &camcc_ipe_0_ahb_clk.clkr,
+ [CAMCC_IPE_0_AREG_CLK] = &camcc_ipe_0_areg_clk.clkr,
+ [CAMCC_IPE_0_AXI_CLK] = &camcc_ipe_0_axi_clk.clkr,
+ [CAMCC_IPE_0_CLK] = &camcc_ipe_0_clk.clkr,
+ [CAMCC_IPE_0_CLK_SRC] = &camcc_ipe_0_clk_src.clkr,
+ [CAMCC_IPE_1_AHB_CLK] = &camcc_ipe_1_ahb_clk.clkr,
+ [CAMCC_IPE_1_AREG_CLK] = &camcc_ipe_1_areg_clk.clkr,
+ [CAMCC_IPE_1_AXI_CLK] = &camcc_ipe_1_axi_clk.clkr,
+ [CAMCC_IPE_1_CLK] = &camcc_ipe_1_clk.clkr,
+ [CAMCC_JPEG_CLK] = &camcc_jpeg_clk.clkr,
+ [CAMCC_JPEG_CLK_SRC] = &camcc_jpeg_clk_src.clkr,
+ [CAMCC_LRME_CLK] = &camcc_lrme_clk.clkr,
+ [CAMCC_LRME_CLK_SRC] = &camcc_lrme_clk_src.clkr,
+ [CAMCC_MCLK0_CLK] = &camcc_mclk0_clk.clkr,
+ [CAMCC_MCLK0_CLK_SRC] = &camcc_mclk0_clk_src.clkr,
+ [CAMCC_MCLK1_CLK] = &camcc_mclk1_clk.clkr,
+ [CAMCC_MCLK1_CLK_SRC] = &camcc_mclk1_clk_src.clkr,
+ [CAMCC_MCLK2_CLK] = &camcc_mclk2_clk.clkr,
+ [CAMCC_MCLK2_CLK_SRC] = &camcc_mclk2_clk_src.clkr,
+ [CAMCC_MCLK3_CLK] = &camcc_mclk3_clk.clkr,
+ [CAMCC_MCLK3_CLK_SRC] = &camcc_mclk3_clk_src.clkr,
+ [CAMCC_MCLK4_CLK] = &camcc_mclk4_clk.clkr,
+ [CAMCC_MCLK4_CLK_SRC] = &camcc_mclk4_clk_src.clkr,
+ [CAMCC_MCLK5_CLK] = &camcc_mclk5_clk.clkr,
+ [CAMCC_MCLK5_CLK_SRC] = &camcc_mclk5_clk_src.clkr,
+ [CAMCC_MCLK6_CLK] = &camcc_mclk6_clk.clkr,
+ [CAMCC_MCLK6_CLK_SRC] = &camcc_mclk6_clk_src.clkr,
+ [CAMCC_MCLK7_CLK] = &camcc_mclk7_clk.clkr,
+ [CAMCC_MCLK7_CLK_SRC] = &camcc_mclk7_clk_src.clkr,
+ [CAMCC_PLL0] = &camcc_pll0.clkr,
+ [CAMCC_PLL0_OUT_EVEN] = &camcc_pll0_out_even.clkr,
+ [CAMCC_PLL0_OUT_ODD] = &camcc_pll0_out_odd.clkr,
+ [CAMCC_PLL1] = &camcc_pll1.clkr,
+ [CAMCC_PLL1_OUT_EVEN] = &camcc_pll1_out_even.clkr,
+ [CAMCC_PLL2] = &camcc_pll2.clkr,
+ [CAMCC_PLL3] = &camcc_pll3.clkr,
+ [CAMCC_PLL3_OUT_EVEN] = &camcc_pll3_out_even.clkr,
+ [CAMCC_PLL4] = &camcc_pll4.clkr,
+ [CAMCC_PLL4_OUT_EVEN] = &camcc_pll4_out_even.clkr,
+ [CAMCC_PLL5] = &camcc_pll5.clkr,
+ [CAMCC_PLL5_OUT_EVEN] = &camcc_pll5_out_even.clkr,
+ [CAMCC_PLL6] = &camcc_pll6.clkr,
+ [CAMCC_PLL6_OUT_EVEN] = &camcc_pll6_out_even.clkr,
+ [CAMCC_PLL7] = &camcc_pll7.clkr,
+ [CAMCC_PLL7_OUT_EVEN] = &camcc_pll7_out_even.clkr,
+ [CAMCC_PLL7_OUT_ODD] = &camcc_pll7_out_odd.clkr,
+ [CAMCC_SLEEP_CLK] = &camcc_sleep_clk.clkr,
+ [CAMCC_SLEEP_CLK_SRC] = &camcc_sleep_clk_src.clkr,
+ [CAMCC_SLOW_AHB_CLK_SRC] = &camcc_slow_ahb_clk_src.clkr,
+ [CAMCC_XO_CLK_SRC] = &camcc_xo_clk_src.clkr,
+};
+
+static struct gdsc *camcc_sc8280xp_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IFE_2_GDSC] = &ife_2_gdsc,
+ [IFE_3_GDSC] = &ife_3_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [IPE_1_GDSC] = &ipe_1_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_reset_map camcc_sc8280xp_resets[] = {
+ [CAMCC_BPS_BCR] = { 0x7000 },
+ [CAMCC_CAMNOC_BCR] = { 0xc16c },
+ [CAMCC_CCI_BCR] = { 0xc104 },
+ [CAMCC_CPAS_BCR] = { 0xc164 },
+ [CAMCC_CSI0PHY_BCR] = { 0x6000 },
+ [CAMCC_CSI1PHY_BCR] = { 0x6024 },
+ [CAMCC_CSI2PHY_BCR] = { 0x6048 },
+ [CAMCC_CSI3PHY_BCR] = { 0x6070 },
+ [CAMCC_ICP_BCR] = { 0xc0b4 },
+ [CAMCC_IFE_0_BCR] = { 0xa000 },
+ [CAMCC_IFE_1_BCR] = { 0xb000 },
+ [CAMCC_IFE_2_BCR] = { 0xf000 },
+ [CAMCC_IFE_3_BCR] = { 0xf06c },
+ [CAMCC_IFE_LITE_0_BCR] = { 0xc000 },
+ [CAMCC_IFE_LITE_1_BCR] = { 0xc044 },
+ [CAMCC_IFE_LITE_2_BCR] = { 0xc23c },
+ [CAMCC_IFE_LITE_3_BCR] = { 0xc280 },
+ [CAMCC_IPE_0_BCR] = { 0x8000 },
+ [CAMCC_IPE_1_BCR] = { 0x9000 },
+ [CAMCC_JPEG_BCR] = { 0xc088 },
+ [CAMCC_LRME_BCR] = { 0xc140 },
+};
+
+static const struct regmap_config camcc_sc8280xp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x13020,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc camcc_sc8280xp_desc = {
+ .config = &camcc_sc8280xp_regmap_config,
+ .clks = camcc_sc8280xp_clocks,
+ .num_clks = ARRAY_SIZE(camcc_sc8280xp_clocks),
+ .resets = camcc_sc8280xp_resets,
+ .num_resets = ARRAY_SIZE(camcc_sc8280xp_resets),
+ .gdscs = camcc_sc8280xp_gdscs,
+ .num_gdscs = ARRAY_SIZE(camcc_sc8280xp_gdscs),
+};
+
+static const struct of_device_id camcc_sc8280xp_match_table[] = {
+ { .compatible = "qcom,sc8280xp-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, camcc_sc8280xp_match_table);
+
+static int camcc_sc8280xp_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &camcc_sc8280xp_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_put_rpm;
+ }
+
+ clk_lucid_pll_configure(&camcc_pll0, regmap, &camcc_pll0_config);
+ clk_lucid_pll_configure(&camcc_pll1, regmap, &camcc_pll1_config);
+ clk_zonda_pll_configure(&camcc_pll2, regmap, &camcc_pll2_config);
+ clk_lucid_pll_configure(&camcc_pll3, regmap, &camcc_pll3_config);
+ clk_lucid_pll_configure(&camcc_pll4, regmap, &camcc_pll4_config);
+ clk_lucid_pll_configure(&camcc_pll5, regmap, &camcc_pll5_config);
+ clk_lucid_pll_configure(&camcc_pll6, regmap, &camcc_pll6_config);
+ clk_lucid_pll_configure(&camcc_pll7, regmap, &camcc_pll7_config);
+
+ /*
+ * Keep camcc_gdsc_clk always enabled:
+ */
+ regmap_update_bits(regmap, 0xc1e4, BIT(0), 1);
+
+ ret = qcom_cc_really_probe(pdev, &camcc_sc8280xp_desc, regmap);
+ if (ret)
+ goto err_disable;
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+err_disable:
+ regmap_update_bits(regmap, 0xc1e4, BIT(0), 0);
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver camcc_sc8280xp_driver = {
+ .probe = camcc_sc8280xp_probe,
+ .driver = {
+ .name = "camcc-sc8280xp",
+ .of_match_table = camcc_sc8280xp_match_table,
+ },
+};
+
+module_platform_driver(camcc_sc8280xp_driver);
+
+MODULE_DESCRIPTION("QCOM CAMCC SC8280XP Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index fc4735f74..c1dba33ac 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@@ -134,6 +135,43 @@ static void clk_branch2_disable(struct clk_hw *hw)
clk_branch_toggle(hw, false, clk_branch2_check_halt);
}
+static int clk_branch2_mem_enable(struct clk_hw *hw)
+{
+ struct clk_mem_branch *mem_br = to_clk_mem_branch(hw);
+ struct clk_branch branch = mem_br->branch;
+ u32 val;
+ int ret;
+
+ regmap_update_bits(branch.clkr.regmap, mem_br->mem_enable_reg,
+ mem_br->mem_enable_ack_mask, mem_br->mem_enable_ack_mask);
+
+ ret = regmap_read_poll_timeout(branch.clkr.regmap, mem_br->mem_ack_reg,
+ val, val & mem_br->mem_enable_ack_mask, 0, 200);
+ if (ret) {
+ WARN(1, "%s mem enable failed\n", clk_hw_get_name(&branch.clkr.hw));
+ return ret;
+ }
+
+ return clk_branch2_enable(hw);
+}
+
+static void clk_branch2_mem_disable(struct clk_hw *hw)
+{
+ struct clk_mem_branch *mem_br = to_clk_mem_branch(hw);
+
+ regmap_update_bits(mem_br->branch.clkr.regmap, mem_br->mem_enable_reg,
+ mem_br->mem_enable_ack_mask, 0);
+
+ return clk_branch2_disable(hw);
+}
+
+const struct clk_ops clk_branch2_mem_ops = {
+ .enable = clk_branch2_mem_enable,
+ .disable = clk_branch2_mem_disable,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_mem_ops);
+
const struct clk_ops clk_branch2_ops = {
.enable = clk_branch2_enable,
.disable = clk_branch2_disable,
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 0cf800b9d..8ffed603c 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -38,6 +38,23 @@ struct clk_branch {
struct clk_regmap clkr;
};
+/**
+ * struct clk_mem_branch - gating clock which are associated with memories
+ *
+ * @mem_enable_reg: branch clock memory gating register
+ * @mem_ack_reg: branch clock memory ack register
+ * @mem_enable_ack_mask: branch clock memory enable and ack field in @mem_ack_reg
+ * @branch: branch clock gating handle
+ *
+ * Clock which can gate its memories.
+ */
+struct clk_mem_branch {
+ u32 mem_enable_reg;
+ u32 mem_ack_reg;
+ u32 mem_enable_ack_mask;
+ struct clk_branch branch;
+};
+
/* Branch clock common bits for HLOS-owned clocks */
#define CBCR_CLK_OFF BIT(31)
#define CBCR_NOC_FSM_STATUS GENMASK(30, 28)
@@ -85,8 +102,12 @@ extern const struct clk_ops clk_branch_ops;
extern const struct clk_ops clk_branch2_ops;
extern const struct clk_ops clk_branch_simple_ops;
extern const struct clk_ops clk_branch2_aon_ops;
+extern const struct clk_ops clk_branch2_mem_ops;
#define to_clk_branch(_hw) \
container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
+#define to_clk_mem_branch(_hw) \
+ container_of(to_clk_branch(_hw), struct clk_mem_branch, branch)
+
#endif
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 5d853fd43..bb82abeed 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -372,6 +372,9 @@ DEFINE_CLK_RPMH_VRM(clk3, _a1, "clka3", 1);
DEFINE_CLK_RPMH_VRM(clk4, _a1, "clka4", 1);
DEFINE_CLK_RPMH_VRM(clk5, _a1, "clka5", 1);
+DEFINE_CLK_RPMH_VRM(clk3, _a2, "clka3", 2);
+DEFINE_CLK_RPMH_VRM(clk4, _a2, "clka4", 2);
+DEFINE_CLK_RPMH_VRM(clk5, _a2, "clka5", 2);
DEFINE_CLK_RPMH_VRM(clk6, _a2, "clka6", 2);
DEFINE_CLK_RPMH_VRM(clk7, _a2, "clka7", 2);
DEFINE_CLK_RPMH_VRM(clk8, _a2, "clka8", 2);
@@ -630,6 +633,37 @@ static const struct clk_rpmh_desc clk_rpmh_sm8550 = {
.num_clks = ARRAY_SIZE(sm8550_rpmh_clocks),
};
+static struct clk_hw *sm8650_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw,
+ [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_clk7_a2.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_clk7_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_clk1_a1.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_clk1_a1_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_clk2_a1.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_clk2_a1_ao.hw,
+ /*
+ * The clka3 RPMh resource is missing in cmd-db
+ * for current platforms, while the clka3 exists
+ * on the PMK8550, the clock is unconnected and
+ * unused.
+ */
+ [RPMH_RF_CLK4] = &clk_rpmh_clk4_a2.hw,
+ [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a2_ao.hw,
+ [RPMH_RF_CLK5] = &clk_rpmh_clk5_a2.hw,
+ [RPMH_RF_CLK5_A] = &clk_rpmh_clk5_a2_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8650 = {
+ .clks = sm8650_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8650_rpmh_clocks),
+};
+
static struct clk_hw *sc7280_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw,
@@ -737,6 +771,28 @@ static const struct clk_rpmh_desc clk_rpmh_sm4450 = {
.num_clks = ARRAY_SIZE(sm4450_rpmh_clocks),
};
+static struct clk_hw *x1e80100_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw,
+ [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_clk7_a2.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_clk7_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a2.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a2_ao.hw,
+ [RPMH_RF_CLK4] = &clk_rpmh_clk4_a2.hw,
+ [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a2_ao.hw,
+ [RPMH_RF_CLK5] = &clk_rpmh_clk5_a2.hw,
+ [RPMH_RF_CLK5_A] = &clk_rpmh_clk5_a2_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_x1e80100 = {
+ .clks = x1e80100_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(x1e80100_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -837,7 +893,9 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
{ .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
{ .compatible = "qcom,sm8550-rpmh-clk", .data = &clk_rpmh_sm8550},
+ { .compatible = "qcom,sm8650-rpmh-clk", .data = &clk_rpmh_sm8650},
{ .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
+ { .compatible = "qcom,x1e80100-rpmh-clk", .data = &clk_rpmh_x1e80100},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c
new file mode 100644
index 000000000..f3b1d9d16
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8650.c
@@ -0,0 +1,1818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8650-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2100000000, 0 },
+};
+
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x82e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+ F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x8170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x818c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81a4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8220,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x81d4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x81ec,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x8284,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x8238,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x8254,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x826c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x82d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x82b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x829c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8140,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x8158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x80a8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8120,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x813c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8188,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x821c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x8250,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x82cc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_clk = {
+ .halt_reg = 0xe058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+ .halt_reg = 0xe074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm8650_clocks[] = {
+ [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8650_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_sm8650_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8650_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm8650_desc = {
+ .config = &disp_cc_sm8650_regmap_config,
+ .clks = disp_cc_sm8650_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8650_clocks),
+ .resets = disp_cc_sm8650_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8650_resets),
+ .gdscs = disp_cc_sm8650_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8650_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8650_match_table[] = {
+ { .compatible = "qcom,sm8650-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8650_match_table);
+
+static int disp_cc_sm8650_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8650_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_put_rpm;
+ }
+
+ clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /* Keep clocks always enabled */
+ regmap_update_bits(regmap, 0xe054, BIT(0), BIT(0)); /* disp_cc_xo_clk */
+
+ ret = qcom_cc_really_probe(pdev, &disp_cc_sm8650_desc, regmap);
+ if (ret)
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm8650_driver = {
+ .probe = disp_cc_sm8650_probe,
+ .driver = {
+ .name = "disp_cc-sm8650",
+ .of_match_table = disp_cc_sm8650_match_table,
+ },
+};
+
+static int __init disp_cc_sm8650_init(void)
+{
+ return platform_driver_register(&disp_cc_sm8650_driver);
+}
+subsys_initcall(disp_cc_sm8650_init);
+
+static void __exit disp_cc_sm8650_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sm8650_driver);
+}
+module_exit(disp_cc_sm8650_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8650 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/ecpricc-qdu1000.c b/drivers/clk/qcom/ecpricc-qdu1000.c
new file mode 100644
index 000000000..c628054a7
--- /dev/null
+++ b/drivers/clk/qcom/ecpricc-qdu1000.c
@@ -0,0 +1,2456 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qdu1000-ecpricc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GCC_ECPRI_CC_GPLL0_OUT_MAIN,
+ DT_GCC_ECPRI_CC_GPLL1_OUT_EVEN,
+ DT_GCC_ECPRI_CC_GPLL2_OUT_MAIN,
+ DT_GCC_ECPRI_CC_GPLL3_OUT_MAIN,
+ DT_GCC_ECPRI_CC_GPLL4_OUT_MAIN,
+ DT_GCC_ECPRI_CC_GPLL5_OUT_EVEN,
+};
+
+enum {
+ P_BI_TCXO,
+ P_ECPRI_CC_PLL0_OUT_MAIN,
+ P_ECPRI_CC_PLL1_OUT_MAIN,
+ P_GCC_ECPRI_CC_GPLL0_OUT_MAIN,
+ P_GCC_ECPRI_CC_GPLL1_OUT_EVEN,
+ P_GCC_ECPRI_CC_GPLL2_OUT_MAIN,
+ P_GCC_ECPRI_CC_GPLL3_OUT_MAIN,
+ P_GCC_ECPRI_CC_GPLL4_OUT_MAIN,
+ P_GCC_ECPRI_CC_GPLL5_OUT_EVEN,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+/* 700 MHz configuration */
+static const struct alpha_pll_config ecpri_cc_pll0_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll ecpri_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+/* 806 MHz configuration */
+static const struct alpha_pll_config ecpri_cc_pll1_config = {
+ .l = 0x29,
+ .alpha = 0xfaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll ecpri_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map ecpri_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_ECPRI_CC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_ECPRI_CC_GPLL2_OUT_MAIN, 2 },
+ { P_GCC_ECPRI_CC_GPLL5_OUT_EVEN, 3 },
+ { P_ECPRI_CC_PLL1_OUT_MAIN, 4 },
+ { P_GCC_ECPRI_CC_GPLL4_OUT_MAIN, 5 },
+ { P_ECPRI_CC_PLL0_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data ecpri_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GCC_ECPRI_CC_GPLL0_OUT_MAIN },
+ { .index = DT_GCC_ECPRI_CC_GPLL2_OUT_MAIN },
+ { .index = DT_GCC_ECPRI_CC_GPLL5_OUT_EVEN },
+ { .hw = &ecpri_cc_pll1.clkr.hw },
+ { .index = DT_GCC_ECPRI_CC_GPLL4_OUT_MAIN },
+ { .hw = &ecpri_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map ecpri_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_ECPRI_CC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_ECPRI_CC_GPLL1_OUT_EVEN, 2 },
+ { P_GCC_ECPRI_CC_GPLL3_OUT_MAIN, 3 },
+ { P_ECPRI_CC_PLL1_OUT_MAIN, 4 },
+ { P_GCC_ECPRI_CC_GPLL4_OUT_MAIN, 5 },
+ { P_ECPRI_CC_PLL0_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data ecpri_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GCC_ECPRI_CC_GPLL0_OUT_MAIN },
+ { .index = DT_GCC_ECPRI_CC_GPLL1_OUT_EVEN },
+ { .index = DT_GCC_ECPRI_CC_GPLL3_OUT_MAIN },
+ { .hw = &ecpri_cc_pll1.clkr.hw },
+ { .index = DT_GCC_ECPRI_CC_GPLL4_OUT_MAIN },
+ { .hw = &ecpri_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map ecpri_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_ECPRI_CC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_ECPRI_CC_GPLL5_OUT_EVEN, 3 },
+ { P_ECPRI_CC_PLL1_OUT_MAIN, 4 },
+ { P_GCC_ECPRI_CC_GPLL4_OUT_MAIN, 5 },
+ { P_ECPRI_CC_PLL0_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data ecpri_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GCC_ECPRI_CC_GPLL0_OUT_MAIN },
+ { .index = DT_GCC_ECPRI_CC_GPLL5_OUT_EVEN },
+ { .hw = &ecpri_cc_pll1.clkr.hw },
+ { .index = DT_GCC_ECPRI_CC_GPLL4_OUT_MAIN },
+ { .hw = &ecpri_cc_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_ecpri_clk_src[] = {
+ F(466500000, P_GCC_ECPRI_CC_GPLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_ecpri_clk_src = {
+ .cmd_rcgr = 0x9034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_2,
+ .freq_tbl = ftbl_ecpri_cc_ecpri_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_clk_src",
+ .parent_data = ecpri_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_ecpri_dma_clk_src[] = {
+ F(466500000, P_GCC_ECPRI_CC_GPLL5_OUT_EVEN, 1, 0, 0),
+ F(500000000, P_GCC_ECPRI_CC_GPLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_ecpri_dma_clk_src = {
+ .cmd_rcgr = 0x9080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_ecpri_dma_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_dma_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_ecpri_fast_clk_src[] = {
+ F(500000000, P_GCC_ECPRI_CC_GPLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_GCC_ECPRI_CC_GPLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_ecpri_fast_clk_src = {
+ .cmd_rcgr = 0x904c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_ecpri_fast_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_fast_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_ecpri_oran_clk_src[] = {
+ F(500000000, P_GCC_ECPRI_CC_GPLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_ecpri_oran_clk_src = {
+ .cmd_rcgr = 0x9064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_ecpri_oran_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_oran_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_eth_100g_c2c0_hm_ff_clk_src[] = {
+ F(201500000, P_ECPRI_CC_PLL1_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_ECPRI_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(466500000, P_GCC_ECPRI_CC_GPLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_c2c0_hm_ff_clk_src = {
+ .cmd_rcgr = 0x81b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_c2c0_hm_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c0_hm_ff_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_eth_100g_c2c_hm_macsec_clk_src[] = {
+ F(100000000, P_GCC_ECPRI_CC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_ECPRI_CC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_c2c_hm_macsec_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_c2c_hm_macsec_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c_hm_macsec_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_dbg_c2c_hm_ff_clk_src = {
+ .cmd_rcgr = 0x81c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_c2c0_hm_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_dbg_c2c_hm_ff_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_fh0_hm_ff_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_c2c0_hm_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh0_hm_ff_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_fh0_macsec_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_c2c_hm_macsec_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh0_macsec_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_fh1_hm_ff_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_ecpri_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh1_hm_ff_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_eth_100g_fh1_macsec_clk_src[] = {
+ F(200000000, P_GCC_ECPRI_CC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_fh1_macsec_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_fh1_macsec_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh1_macsec_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_fh2_hm_ff_clk_src = {
+ .cmd_rcgr = 0x8198,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_c2c0_hm_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh2_hm_ff_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_fh2_macsec_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_0,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_fh1_macsec_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh2_macsec_clk_src",
+ .parent_data = ecpri_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src[] = {
+ F(533000000, P_GCC_ECPRI_CC_GPLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_GCC_ECPRI_CC_GPLL3_OUT_MAIN, 1, 0, 0),
+ F(806000000, P_GCC_ECPRI_CC_GPLL4_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src = {
+ .cmd_rcgr = 0x8228,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_1,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src",
+ .parent_data = ecpri_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk_src = {
+ .cmd_rcgr = 0x8240,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_1,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk_src",
+ .parent_data = ecpri_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_mac_fh0_hm_ref_clk_src = {
+ .cmd_rcgr = 0x81e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_1,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_fh0_hm_ref_clk_src",
+ .parent_data = ecpri_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_mac_fh1_hm_ref_clk_src = {
+ .cmd_rcgr = 0x81f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_1,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_fh1_hm_ref_clk_src",
+ .parent_data = ecpri_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 ecpri_cc_eth_100g_mac_fh2_hm_ref_clk_src = {
+ .cmd_rcgr = 0x8210,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_1,
+ .freq_tbl = ftbl_ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_fh2_hm_ref_clk_src",
+ .parent_data = ecpri_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ecpri_cc_mss_emac_clk_src[] = {
+ F(403000000, P_GCC_ECPRI_CC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ecpri_cc_mss_emac_clk_src = {
+ .cmd_rcgr = 0xe00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = ecpri_cc_parent_map_2,
+ .freq_tbl = ftbl_ecpri_cc_mss_emac_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_mss_emac_clk_src",
+ .parent_data = ecpri_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(ecpri_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_ecpri_fast_div2_clk_src = {
+ .reg = 0x907c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_fast_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_fast_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_c2c_hm_ff_0_div_clk_src = {
+ .reg = 0x8290,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c_hm_ff_0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_c2c0_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_c2c_hm_ff_1_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c_hm_ff_1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_c2c0_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_div_clk_src = {
+ .reg = 0x8298,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_dbg_c2c_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_div_clk_src = {
+ .reg = 0x829c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_dbg_c2c_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_0_hm_ff_0_div_clk_src = {
+ .reg = 0x8260,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_hm_ff_0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh0_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_0_hm_ff_1_div_clk_src = {
+ .reg = 0x8264,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_hm_ff_1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh0_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_0_hm_ff_2_div_clk_src = {
+ .reg = 0x8268,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_hm_ff_2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh0_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_0_hm_ff_3_div_clk_src = {
+ .reg = 0x826c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_hm_ff_3_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh0_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_1_hm_ff_0_div_clk_src = {
+ .reg = 0x8270,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_hm_ff_0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh1_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_1_hm_ff_1_div_clk_src = {
+ .reg = 0x8274,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_hm_ff_1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh1_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_1_hm_ff_2_div_clk_src = {
+ .reg = 0x8278,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_hm_ff_2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh1_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_1_hm_ff_3_div_clk_src = {
+ .reg = 0x827c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_hm_ff_3_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh1_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_2_hm_ff_0_div_clk_src = {
+ .reg = 0x8280,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_hm_ff_0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh2_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_2_hm_ff_1_div_clk_src = {
+ .reg = 0x8284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_hm_ff_1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh2_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_2_hm_ff_2_div_clk_src = {
+ .reg = 0x8288,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_hm_ff_2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh2_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div ecpri_cc_eth_100g_fh_2_hm_ff_3_div_clk_src = {
+ .reg = 0x828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_hm_ff_3_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh2_hm_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch ecpri_cc_ecpri_cg_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_cg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_ecpri_dma_clk = {
+ .halt_reg = 0x902c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x902c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_dma_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_dma_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_ecpri_dma_noc_clk = {
+ .halt_reg = 0xf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_dma_noc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_dma_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_ecpri_fast_clk = {
+ .halt_reg = 0x9014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_fast_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_fast_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_ecpri_fast_div2_clk = {
+ .halt_reg = 0x901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_fast_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_fast_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_ecpri_fast_div2_noc_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_fast_div2_noc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_fast_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_ecpri_fr_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_fr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_ecpri_oran_div2_clk = {
+ .halt_reg = 0x9024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_ecpri_oran_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_oran_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_eth_100g_c2c0_udp_fifo_clk = {
+ .halt_reg = 0x80cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c0_udp_fifo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_eth_100g_c2c1_udp_fifo_clk = {
+ .halt_reg = 0x80d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c1_udp_fifo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_0_clk = {
+ .mem_enable_reg = 0x8410,
+ .mem_ack_reg = 0x8424,
+ .mem_enable_ack_mask = BIT(0),
+ .branch = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c_0_hm_ff_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_c2c_hm_ff_0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_1_clk = {
+ .mem_enable_reg = 0x8410,
+ .mem_ack_reg = 0x8424,
+ .mem_enable_ack_mask = BIT(1),
+ .branch = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c_0_hm_ff_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_c2c_hm_ff_1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_c2c_hm_macsec_clk = {
+ .mem_enable_reg = 0x8410,
+ .mem_ack_reg = 0x8424,
+ .mem_enable_ack_mask = BIT(4),
+ .branch = {
+ .halt_reg = 0x80ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_c2c_hm_macsec_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_c2c_hm_macsec_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_clk = {
+ .mem_enable_reg = 0x8414,
+ .mem_ack_reg = 0x8428,
+ .mem_enable_ack_mask = BIT(0),
+ .branch = {
+ .halt_reg = 0x80d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_clk = {
+ .mem_enable_reg = 0x8414,
+ .mem_ack_reg = 0x8428,
+ .mem_enable_ack_mask = BIT(1),
+ .branch = {
+ .halt_reg = 0x80e0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_eth_100g_dbg_c2c_udp_fifo_clk = {
+ .halt_reg = 0x80f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_dbg_c2c_udp_fifo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_0_clk = {
+ .mem_enable_reg = 0x8404,
+ .mem_ack_reg = 0x8418,
+ .mem_enable_ack_mask = BIT(0),
+ .branch = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_hm_ff_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_0_hm_ff_0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_1_clk = {
+ .mem_enable_reg = 0x8404,
+ .mem_ack_reg = 0x8418,
+ .mem_enable_ack_mask = BIT(1),
+ .branch = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_hm_ff_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_0_hm_ff_1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_2_clk = {
+ .mem_enable_reg = 0x8404,
+ .mem_ack_reg = 0x8418,
+ .mem_enable_ack_mask = BIT(2),
+ .branch = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_hm_ff_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_0_hm_ff_2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_3_clk = {
+ .mem_enable_reg = 0x8404,
+ .mem_ack_reg = 0x8418,
+ .mem_enable_ack_mask = BIT(3),
+ .branch = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_hm_ff_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_0_hm_ff_3_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_eth_100g_fh_0_udp_fifo_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_0_udp_fifo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_0_clk = {
+ .mem_enable_reg = 0x8408,
+ .mem_ack_reg = 0x841c,
+ .mem_enable_ack_mask = BIT(0),
+ .branch = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_hm_ff_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_1_hm_ff_0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_1_clk = {
+ .mem_enable_reg = 0x8408,
+ .mem_ack_reg = 0x841c,
+ .mem_enable_ack_mask = BIT(1),
+ .branch = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_hm_ff_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_1_hm_ff_1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_2_clk = {
+ .mem_enable_reg = 0x8408,
+ .mem_ack_reg = 0x841c,
+ .mem_enable_ack_mask = BIT(2),
+ .branch = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_hm_ff_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_1_hm_ff_2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_3_clk = {
+ .mem_enable_reg = 0x8408,
+ .mem_ack_reg = 0x841c,
+ .mem_enable_ack_mask = BIT(3),
+ .branch = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_hm_ff_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_1_hm_ff_3_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_eth_100g_fh_1_udp_fifo_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_1_udp_fifo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_0_clk = {
+ .mem_enable_reg = 0x840c,
+ .mem_ack_reg = 0x8420,
+ .mem_enable_ack_mask = BIT(0),
+ .branch = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_hm_ff_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_2_hm_ff_0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_1_clk = {
+ .mem_enable_reg = 0x840c,
+ .mem_ack_reg = 0x8420,
+ .mem_enable_ack_mask = BIT(1),
+ .branch = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_hm_ff_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_2_hm_ff_1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_2_clk = {
+ .mem_enable_reg = 0x840c,
+ .mem_ack_reg = 0x8420,
+ .mem_enable_ack_mask = BIT(2),
+ .branch = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_hm_ff_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_2_hm_ff_2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_3_clk = {
+ .mem_enable_reg = 0x840c,
+ .mem_ack_reg = 0x8420,
+ .mem_enable_ack_mask = BIT(3),
+ .branch = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_hm_ff_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh_2_hm_ff_3_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_eth_100g_fh_2_udp_fifo_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_2_udp_fifo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_0_clk = {
+ .mem_enable_reg = 0x8404,
+ .mem_ack_reg = 0x8418,
+ .mem_enable_ack_mask = BIT(4),
+ .branch = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_macsec_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh0_macsec_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_1_clk = {
+ .mem_enable_reg = 0x8408,
+ .mem_ack_reg = 0x841c,
+ .mem_enable_ack_mask = BIT(4),
+ .branch = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_macsec_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh1_macsec_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_2_clk = {
+ .mem_enable_reg = 0x840c,
+ .mem_ack_reg = 0x8420,
+ .mem_enable_ack_mask = BIT(4),
+ .branch = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_fh_macsec_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_fh2_macsec_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_mac_c2c_hm_ref_clk = {
+ .mem_enable_reg = 0x8410,
+ .mem_ack_reg = 0x8424,
+ .mem_enable_ack_mask = BIT(5),
+ .branch = {
+ .halt_reg = 0x80c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_c2c_hm_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk = {
+ .mem_enable_reg = 0x8414,
+ .mem_ack_reg = 0x8428,
+ .mem_enable_ack_mask = BIT(5),
+ .branch = {
+ .halt_reg = 0x80e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh0_hm_ref_clk = {
+ .mem_enable_reg = 0x8404,
+ .mem_ack_reg = 0x8418,
+ .mem_enable_ack_mask = BIT(5),
+ .branch = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_fh0_hm_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_mac_fh0_hm_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh1_hm_ref_clk = {
+ .mem_enable_reg = 0x8408,
+ .mem_ack_reg = 0x841c,
+ .mem_enable_ack_mask = BIT(5),
+ .branch = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_fh1_hm_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_mac_fh1_hm_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh2_hm_ref_clk = {
+ .mem_enable_reg = 0x840c,
+ .mem_ack_reg = 0x8420,
+ .mem_enable_ack_mask = BIT(5),
+ .branch = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_100g_mac_fh2_hm_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_eth_100g_mac_fh2_hm_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_eth_dbg_nfapi_axi_clk = {
+ .halt_reg = 0x80f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_dbg_nfapi_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_dma_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_eth_dbg_noc_axi_clk = {
+ .halt_reg = 0x80fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_dbg_noc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_mss_emac_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_phy_0_ock_sram_clk = {
+ .mem_enable_reg = 0x8404,
+ .mem_ack_reg = 0x8418,
+ .mem_enable_ack_mask = BIT(6),
+ .branch = {
+ .halt_reg = 0xd140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_phy_0_ock_sram_clk",
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_phy_1_ock_sram_clk = {
+ .mem_enable_reg = 0x8408,
+ .mem_ack_reg = 0x841C,
+ .mem_enable_ack_mask = BIT(6),
+ .branch = {
+ .halt_reg = 0xd148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_phy_1_ock_sram_clk",
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_phy_2_ock_sram_clk = {
+ .mem_enable_reg = 0x840c,
+ .mem_ack_reg = 0x8420,
+ .mem_enable_ack_mask = BIT(6),
+ .branch = {
+ .halt_reg = 0xd150,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_phy_2_ock_sram_clk",
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_phy_3_ock_sram_clk = {
+ .mem_enable_reg = 0x8410,
+ .mem_ack_reg = 0x8424,
+ .mem_enable_ack_mask = BIT(6),
+ .branch = {
+ .halt_reg = 0xd158,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd158,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_phy_3_ock_sram_clk",
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_mem_branch ecpri_cc_eth_phy_4_ock_sram_clk = {
+ .mem_enable_reg = 0x8414,
+ .mem_ack_reg = 0x8428,
+ .mem_enable_ack_mask = BIT(6),
+ .branch = {
+ .halt_reg = 0xd160,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_eth_phy_4_ock_sram_clk",
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_mss_emac_clk = {
+ .halt_reg = 0xe008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_mss_emac_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_mss_emac_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_mss_oran_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_mss_oran_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &ecpri_cc_ecpri_oran_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy0_lane0_rx_clk = {
+ .halt_reg = 0xd000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy0_lane0_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy0_lane0_tx_clk = {
+ .halt_reg = 0xd050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy0_lane0_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy0_lane1_rx_clk = {
+ .halt_reg = 0xd004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy0_lane1_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy0_lane1_tx_clk = {
+ .halt_reg = 0xd054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy0_lane1_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy0_lane2_rx_clk = {
+ .halt_reg = 0xd008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy0_lane2_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy0_lane2_tx_clk = {
+ .halt_reg = 0xd058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy0_lane2_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy0_lane3_rx_clk = {
+ .halt_reg = 0xd00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy0_lane3_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy0_lane3_tx_clk = {
+ .halt_reg = 0xd05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy0_lane3_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy1_lane0_rx_clk = {
+ .halt_reg = 0xd010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy1_lane0_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy1_lane0_tx_clk = {
+ .halt_reg = 0xd060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy1_lane0_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy1_lane1_rx_clk = {
+ .halt_reg = 0xd014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy1_lane1_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy1_lane1_tx_clk = {
+ .halt_reg = 0xd064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy1_lane1_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy1_lane2_rx_clk = {
+ .halt_reg = 0xd018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy1_lane2_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy1_lane2_tx_clk = {
+ .halt_reg = 0xd068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy1_lane2_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy1_lane3_rx_clk = {
+ .halt_reg = 0xd01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy1_lane3_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy1_lane3_tx_clk = {
+ .halt_reg = 0xd06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy1_lane3_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy2_lane0_rx_clk = {
+ .halt_reg = 0xd020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy2_lane0_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy2_lane0_tx_clk = {
+ .halt_reg = 0xd070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy2_lane0_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy2_lane1_rx_clk = {
+ .halt_reg = 0xd024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy2_lane1_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy2_lane1_tx_clk = {
+ .halt_reg = 0xd074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy2_lane1_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy2_lane2_rx_clk = {
+ .halt_reg = 0xd028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy2_lane2_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy2_lane2_tx_clk = {
+ .halt_reg = 0xd078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy2_lane2_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy2_lane3_rx_clk = {
+ .halt_reg = 0xd02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy2_lane3_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy2_lane3_tx_clk = {
+ .halt_reg = 0xd07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy2_lane3_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy3_lane0_rx_clk = {
+ .halt_reg = 0xd030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy3_lane0_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy3_lane0_tx_clk = {
+ .halt_reg = 0xd080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy3_lane0_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy3_lane1_rx_clk = {
+ .halt_reg = 0xd034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy3_lane1_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy3_lane1_tx_clk = {
+ .halt_reg = 0xd084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy3_lane1_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy3_lane2_rx_clk = {
+ .halt_reg = 0xd038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy3_lane2_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy3_lane2_tx_clk = {
+ .halt_reg = 0xd088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy3_lane2_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy3_lane3_rx_clk = {
+ .halt_reg = 0xd03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy3_lane3_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy3_lane3_tx_clk = {
+ .halt_reg = 0xd08c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd08c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy3_lane3_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy4_lane0_rx_clk = {
+ .halt_reg = 0xd040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy4_lane0_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy4_lane0_tx_clk = {
+ .halt_reg = 0xd090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy4_lane0_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy4_lane1_rx_clk = {
+ .halt_reg = 0xd044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy4_lane1_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy4_lane1_tx_clk = {
+ .halt_reg = 0xd094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy4_lane1_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy4_lane2_rx_clk = {
+ .halt_reg = 0xd048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy4_lane2_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy4_lane2_tx_clk = {
+ .halt_reg = 0xd098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy4_lane2_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy4_lane3_rx_clk = {
+ .halt_reg = 0xd04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy4_lane3_rx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ecpri_cc_phy4_lane3_tx_clk = {
+ .halt_reg = 0xd09c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "ecpri_cc_phy4_lane3_tx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *ecpri_cc_qdu1000_clocks[] = {
+ [ECPRI_CC_ECPRI_CG_CLK] = &ecpri_cc_ecpri_cg_clk.clkr,
+ [ECPRI_CC_ECPRI_CLK_SRC] = &ecpri_cc_ecpri_clk_src.clkr,
+ [ECPRI_CC_ECPRI_DMA_CLK] = &ecpri_cc_ecpri_dma_clk.clkr,
+ [ECPRI_CC_ECPRI_DMA_CLK_SRC] = &ecpri_cc_ecpri_dma_clk_src.clkr,
+ [ECPRI_CC_ECPRI_DMA_NOC_CLK] = &ecpri_cc_ecpri_dma_noc_clk.clkr,
+ [ECPRI_CC_ECPRI_FAST_CLK] = &ecpri_cc_ecpri_fast_clk.clkr,
+ [ECPRI_CC_ECPRI_FAST_CLK_SRC] = &ecpri_cc_ecpri_fast_clk_src.clkr,
+ [ECPRI_CC_ECPRI_FAST_DIV2_CLK] = &ecpri_cc_ecpri_fast_div2_clk.clkr,
+ [ECPRI_CC_ECPRI_FAST_DIV2_CLK_SRC] = &ecpri_cc_ecpri_fast_div2_clk_src.clkr,
+ [ECPRI_CC_ECPRI_FAST_DIV2_NOC_CLK] = &ecpri_cc_ecpri_fast_div2_noc_clk.clkr,
+ [ECPRI_CC_ECPRI_FR_CLK] = &ecpri_cc_ecpri_fr_clk.clkr,
+ [ECPRI_CC_ECPRI_ORAN_CLK_SRC] = &ecpri_cc_ecpri_oran_clk_src.clkr,
+ [ECPRI_CC_ECPRI_ORAN_DIV2_CLK] = &ecpri_cc_ecpri_oran_div2_clk.clkr,
+ [ECPRI_CC_ETH_100G_C2C0_HM_FF_CLK_SRC] = &ecpri_cc_eth_100g_c2c0_hm_ff_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_C2C0_UDP_FIFO_CLK] = &ecpri_cc_eth_100g_c2c0_udp_fifo_clk.clkr,
+ [ECPRI_CC_ETH_100G_C2C1_UDP_FIFO_CLK] = &ecpri_cc_eth_100g_c2c1_udp_fifo_clk.clkr,
+ [ECPRI_CC_ETH_100G_C2C_0_HM_FF_0_CLK] = &ecpri_cc_eth_100g_c2c_0_hm_ff_0_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_C2C_0_HM_FF_1_CLK] = &ecpri_cc_eth_100g_c2c_0_hm_ff_1_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_C2C_HM_FF_0_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_c2c_hm_ff_0_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_C2C_HM_FF_1_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_c2c_hm_ff_1_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK] = &ecpri_cc_eth_100g_c2c_hm_macsec_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK_SRC] = &ecpri_cc_eth_100g_c2c_hm_macsec_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_CLK] =
+ &ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_CLK] =
+ &ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_CLK_SRC] = &ecpri_cc_eth_100g_dbg_c2c_hm_ff_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_DBG_C2C_UDP_FIFO_CLK] = &ecpri_cc_eth_100g_dbg_c2c_udp_fifo_clk.clkr,
+ [ECPRI_CC_ETH_100G_FH0_HM_FF_CLK_SRC] = &ecpri_cc_eth_100g_fh0_hm_ff_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH0_MACSEC_CLK_SRC] = &ecpri_cc_eth_100g_fh0_macsec_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH1_HM_FF_CLK_SRC] = &ecpri_cc_eth_100g_fh1_hm_ff_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH1_MACSEC_CLK_SRC] = &ecpri_cc_eth_100g_fh1_macsec_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH2_HM_FF_CLK_SRC] = &ecpri_cc_eth_100g_fh2_hm_ff_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH2_MACSEC_CLK_SRC] = &ecpri_cc_eth_100g_fh2_macsec_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_HM_FF_0_CLK] = &ecpri_cc_eth_100g_fh_0_hm_ff_0_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_HM_FF_0_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_0_hm_ff_0_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_HM_FF_1_CLK] = &ecpri_cc_eth_100g_fh_0_hm_ff_1_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_HM_FF_1_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_0_hm_ff_1_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_HM_FF_2_CLK] = &ecpri_cc_eth_100g_fh_0_hm_ff_2_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_HM_FF_2_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_0_hm_ff_2_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_HM_FF_3_CLK] = &ecpri_cc_eth_100g_fh_0_hm_ff_3_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_HM_FF_3_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_0_hm_ff_3_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_0_UDP_FIFO_CLK] = &ecpri_cc_eth_100g_fh_0_udp_fifo_clk.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_HM_FF_0_CLK] = &ecpri_cc_eth_100g_fh_1_hm_ff_0_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_HM_FF_0_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_1_hm_ff_0_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_HM_FF_1_CLK] = &ecpri_cc_eth_100g_fh_1_hm_ff_1_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_HM_FF_1_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_1_hm_ff_1_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_HM_FF_2_CLK] = &ecpri_cc_eth_100g_fh_1_hm_ff_2_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_HM_FF_2_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_1_hm_ff_2_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_HM_FF_3_CLK] = &ecpri_cc_eth_100g_fh_1_hm_ff_3_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_HM_FF_3_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_1_hm_ff_3_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_1_UDP_FIFO_CLK] = &ecpri_cc_eth_100g_fh_1_udp_fifo_clk.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_HM_FF_0_CLK] = &ecpri_cc_eth_100g_fh_2_hm_ff_0_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_HM_FF_0_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_2_hm_ff_0_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_HM_FF_1_CLK] = &ecpri_cc_eth_100g_fh_2_hm_ff_1_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_HM_FF_1_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_2_hm_ff_1_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_HM_FF_2_CLK] = &ecpri_cc_eth_100g_fh_2_hm_ff_2_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_HM_FF_2_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_2_hm_ff_2_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_HM_FF_3_CLK] = &ecpri_cc_eth_100g_fh_2_hm_ff_3_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_HM_FF_3_DIV_CLK_SRC] =
+ &ecpri_cc_eth_100g_fh_2_hm_ff_3_div_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_FH_2_UDP_FIFO_CLK] = &ecpri_cc_eth_100g_fh_2_udp_fifo_clk.clkr,
+ [ECPRI_CC_ETH_100G_FH_MACSEC_0_CLK] = &ecpri_cc_eth_100g_fh_macsec_0_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_MACSEC_1_CLK] = &ecpri_cc_eth_100g_fh_macsec_1_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_FH_MACSEC_2_CLK] = &ecpri_cc_eth_100g_fh_macsec_2_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK] = &ecpri_cc_eth_100g_mac_c2c_hm_ref_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK_SRC] = &ecpri_cc_eth_100g_mac_c2c_hm_ref_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK] =
+ &ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK_SRC] =
+ &ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK] = &ecpri_cc_eth_100g_mac_fh0_hm_ref_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK_SRC] = &ecpri_cc_eth_100g_mac_fh0_hm_ref_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK] = &ecpri_cc_eth_100g_mac_fh1_hm_ref_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK_SRC] = &ecpri_cc_eth_100g_mac_fh1_hm_ref_clk_src.clkr,
+ [ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK] = &ecpri_cc_eth_100g_mac_fh2_hm_ref_clk.branch.clkr,
+ [ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK_SRC] = &ecpri_cc_eth_100g_mac_fh2_hm_ref_clk_src.clkr,
+ [ECPRI_CC_ETH_DBG_NFAPI_AXI_CLK] = &ecpri_cc_eth_dbg_nfapi_axi_clk.clkr,
+ [ECPRI_CC_ETH_DBG_NOC_AXI_CLK] = &ecpri_cc_eth_dbg_noc_axi_clk.clkr,
+ [ECPRI_CC_ETH_PHY_0_OCK_SRAM_CLK] = &ecpri_cc_eth_phy_0_ock_sram_clk.branch.clkr,
+ [ECPRI_CC_ETH_PHY_1_OCK_SRAM_CLK] = &ecpri_cc_eth_phy_1_ock_sram_clk.branch.clkr,
+ [ECPRI_CC_ETH_PHY_2_OCK_SRAM_CLK] = &ecpri_cc_eth_phy_2_ock_sram_clk.branch.clkr,
+ [ECPRI_CC_ETH_PHY_3_OCK_SRAM_CLK] = &ecpri_cc_eth_phy_3_ock_sram_clk.branch.clkr,
+ [ECPRI_CC_ETH_PHY_4_OCK_SRAM_CLK] = &ecpri_cc_eth_phy_4_ock_sram_clk.branch.clkr,
+ [ECPRI_CC_MSS_EMAC_CLK] = &ecpri_cc_mss_emac_clk.clkr,
+ [ECPRI_CC_MSS_EMAC_CLK_SRC] = &ecpri_cc_mss_emac_clk_src.clkr,
+ [ECPRI_CC_MSS_ORAN_CLK] = &ecpri_cc_mss_oran_clk.clkr,
+ [ECPRI_CC_PHY0_LANE0_RX_CLK] = &ecpri_cc_phy0_lane0_rx_clk.clkr,
+ [ECPRI_CC_PHY0_LANE0_TX_CLK] = &ecpri_cc_phy0_lane0_tx_clk.clkr,
+ [ECPRI_CC_PHY0_LANE1_RX_CLK] = &ecpri_cc_phy0_lane1_rx_clk.clkr,
+ [ECPRI_CC_PHY0_LANE1_TX_CLK] = &ecpri_cc_phy0_lane1_tx_clk.clkr,
+ [ECPRI_CC_PHY0_LANE2_RX_CLK] = &ecpri_cc_phy0_lane2_rx_clk.clkr,
+ [ECPRI_CC_PHY0_LANE2_TX_CLK] = &ecpri_cc_phy0_lane2_tx_clk.clkr,
+ [ECPRI_CC_PHY0_LANE3_RX_CLK] = &ecpri_cc_phy0_lane3_rx_clk.clkr,
+ [ECPRI_CC_PHY0_LANE3_TX_CLK] = &ecpri_cc_phy0_lane3_tx_clk.clkr,
+ [ECPRI_CC_PHY1_LANE0_RX_CLK] = &ecpri_cc_phy1_lane0_rx_clk.clkr,
+ [ECPRI_CC_PHY1_LANE0_TX_CLK] = &ecpri_cc_phy1_lane0_tx_clk.clkr,
+ [ECPRI_CC_PHY1_LANE1_RX_CLK] = &ecpri_cc_phy1_lane1_rx_clk.clkr,
+ [ECPRI_CC_PHY1_LANE1_TX_CLK] = &ecpri_cc_phy1_lane1_tx_clk.clkr,
+ [ECPRI_CC_PHY1_LANE2_RX_CLK] = &ecpri_cc_phy1_lane2_rx_clk.clkr,
+ [ECPRI_CC_PHY1_LANE2_TX_CLK] = &ecpri_cc_phy1_lane2_tx_clk.clkr,
+ [ECPRI_CC_PHY1_LANE3_RX_CLK] = &ecpri_cc_phy1_lane3_rx_clk.clkr,
+ [ECPRI_CC_PHY1_LANE3_TX_CLK] = &ecpri_cc_phy1_lane3_tx_clk.clkr,
+ [ECPRI_CC_PHY2_LANE0_RX_CLK] = &ecpri_cc_phy2_lane0_rx_clk.clkr,
+ [ECPRI_CC_PHY2_LANE0_TX_CLK] = &ecpri_cc_phy2_lane0_tx_clk.clkr,
+ [ECPRI_CC_PHY2_LANE1_RX_CLK] = &ecpri_cc_phy2_lane1_rx_clk.clkr,
+ [ECPRI_CC_PHY2_LANE1_TX_CLK] = &ecpri_cc_phy2_lane1_tx_clk.clkr,
+ [ECPRI_CC_PHY2_LANE2_RX_CLK] = &ecpri_cc_phy2_lane2_rx_clk.clkr,
+ [ECPRI_CC_PHY2_LANE2_TX_CLK] = &ecpri_cc_phy2_lane2_tx_clk.clkr,
+ [ECPRI_CC_PHY2_LANE3_RX_CLK] = &ecpri_cc_phy2_lane3_rx_clk.clkr,
+ [ECPRI_CC_PHY2_LANE3_TX_CLK] = &ecpri_cc_phy2_lane3_tx_clk.clkr,
+ [ECPRI_CC_PHY3_LANE0_RX_CLK] = &ecpri_cc_phy3_lane0_rx_clk.clkr,
+ [ECPRI_CC_PHY3_LANE0_TX_CLK] = &ecpri_cc_phy3_lane0_tx_clk.clkr,
+ [ECPRI_CC_PHY3_LANE1_RX_CLK] = &ecpri_cc_phy3_lane1_rx_clk.clkr,
+ [ECPRI_CC_PHY3_LANE1_TX_CLK] = &ecpri_cc_phy3_lane1_tx_clk.clkr,
+ [ECPRI_CC_PHY3_LANE2_RX_CLK] = &ecpri_cc_phy3_lane2_rx_clk.clkr,
+ [ECPRI_CC_PHY3_LANE2_TX_CLK] = &ecpri_cc_phy3_lane2_tx_clk.clkr,
+ [ECPRI_CC_PHY3_LANE3_RX_CLK] = &ecpri_cc_phy3_lane3_rx_clk.clkr,
+ [ECPRI_CC_PHY3_LANE3_TX_CLK] = &ecpri_cc_phy3_lane3_tx_clk.clkr,
+ [ECPRI_CC_PHY4_LANE0_RX_CLK] = &ecpri_cc_phy4_lane0_rx_clk.clkr,
+ [ECPRI_CC_PHY4_LANE0_TX_CLK] = &ecpri_cc_phy4_lane0_tx_clk.clkr,
+ [ECPRI_CC_PHY4_LANE1_RX_CLK] = &ecpri_cc_phy4_lane1_rx_clk.clkr,
+ [ECPRI_CC_PHY4_LANE1_TX_CLK] = &ecpri_cc_phy4_lane1_tx_clk.clkr,
+ [ECPRI_CC_PHY4_LANE2_RX_CLK] = &ecpri_cc_phy4_lane2_rx_clk.clkr,
+ [ECPRI_CC_PHY4_LANE2_TX_CLK] = &ecpri_cc_phy4_lane2_tx_clk.clkr,
+ [ECPRI_CC_PHY4_LANE3_RX_CLK] = &ecpri_cc_phy4_lane3_rx_clk.clkr,
+ [ECPRI_CC_PHY4_LANE3_TX_CLK] = &ecpri_cc_phy4_lane3_tx_clk.clkr,
+ [ECPRI_CC_PLL0] = &ecpri_cc_pll0.clkr,
+ [ECPRI_CC_PLL1] = &ecpri_cc_pll1.clkr,
+};
+
+static const struct qcom_reset_map ecpri_cc_qdu1000_resets[] = {
+ [ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ECPRI_SS_BCR] = { 0x9000 },
+ [ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_C2C_BCR] = { 0x80a8 },
+ [ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH0_BCR] = { 0x8000 },
+ [ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH1_BCR] = { 0x8038 },
+ [ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH2_BCR] = { 0x8070 },
+ [ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_WRAPPER_TOP_BCR] = { 0x8104 },
+ [ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_MODEM_BCR] = { 0xe000 },
+ [ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_NOC_BCR] = { 0xf000 },
+};
+
+static const struct regmap_config ecpri_cc_qdu1000_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x31bf0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc ecpri_cc_qdu1000_desc = {
+ .config = &ecpri_cc_qdu1000_regmap_config,
+ .clks = ecpri_cc_qdu1000_clocks,
+ .num_clks = ARRAY_SIZE(ecpri_cc_qdu1000_clocks),
+ .resets = ecpri_cc_qdu1000_resets,
+ .num_resets = ARRAY_SIZE(ecpri_cc_qdu1000_resets),
+};
+
+static const struct of_device_id ecpri_cc_qdu1000_match_table[] = {
+ { .compatible = "qcom,qdu1000-ecpricc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ecpri_cc_qdu1000_match_table);
+
+static int ecpri_cc_qdu1000_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &ecpri_cc_qdu1000_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&ecpri_cc_pll0, regmap, &ecpri_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&ecpri_cc_pll1, regmap, &ecpri_cc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &ecpri_cc_qdu1000_desc, regmap);
+}
+
+static struct platform_driver ecpri_cc_qdu1000_driver = {
+ .probe = ecpri_cc_qdu1000_probe,
+ .driver = {
+ .name = "ecpri_cc-qdu1000",
+ .of_match_table = ecpri_cc_qdu1000_match_table,
+ },
+};
+
+module_platform_driver(ecpri_cc_qdu1000_driver);
+
+MODULE_DESCRIPTION("QTI ECPRICC QDU1000 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index b45f97c07..7b9a3e99b 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -696,7 +696,7 @@ static struct clk_rcg2 apss_ahb_clk_src = {
},
};
-static const struct freq_tbl ftbl_gcc_camss_csi0_1_clk[] = {
+static const struct freq_tbl ftbl_gcc_camss_csi0_1_2_clk[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(200000000, P_GPLL0, 4, 0, 0),
{ }
@@ -706,7 +706,7 @@ static struct clk_rcg2 csi0_clk_src = {
.cmd_rcgr = 0x4e020,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
@@ -719,7 +719,7 @@ static struct clk_rcg2 csi1_clk_src = {
.cmd_rcgr = 0x4f020,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
@@ -728,6 +728,19 @@ static struct clk_rcg2 csi1_clk_src = {
},
};
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3c020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_oxili_gfx3d_clk[] = {
F(19200000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0, 16, 0, 0),
@@ -2385,6 +2398,91 @@ static struct clk_branch gcc_camss_csi1rdi_clk = {
},
};
+static struct clk_branch gcc_camss_csi2_ahb_clk = {
+ .halt_reg = 0x3c040,
+ .clkr = {
+ .enable_reg = 0x3c040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2_clk = {
+ .halt_reg = 0x3c03c,
+ .clkr = {
+ .enable_reg = 0x3c03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2phy_clk = {
+ .halt_reg = 0x3c048,
+ .clkr = {
+ .enable_reg = 0x3c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phy_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2pix_clk = {
+ .halt_reg = 0x3c058,
+ .clkr = {
+ .enable_reg = 0x3c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2rdi_clk = {
+ .halt_reg = 0x3c050,
+ .clkr = {
+ .enable_reg = 0x3c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_camss_csi_vfe0_clk = {
.halt_reg = 0x58050,
.clkr = {
@@ -3682,6 +3780,7 @@ static struct clk_regmap *gcc_msm8939_clocks[] = {
[APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
[CSI0_CLK_SRC] = &csi0_clk_src.clkr,
[CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
[GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
[VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
@@ -3751,6 +3850,11 @@ static struct clk_regmap *gcc_msm8939_clocks[] = {
[GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
[GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
[GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
[GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
[GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
[GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
new file mode 100644
index 000000000..9174dd823
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -0,0 +1,3849 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8650-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+
+ DT_PCIE_0_PIPE,
+ DT_PCIE_1_PIPE,
+ DT_PCIE_1_PHY_AUX,
+
+ DT_UFS_PHY_RX_SYMBOL_0,
+ DT_UFS_PHY_RX_SYMBOL_1,
+ DT_UFS_PHY_TX_SYMBOL_0,
+
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL3_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL6_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PHY_AUX_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll0_ao = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_ao",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO_AO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even_ao = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even_ao",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_ao.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1_ao = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1_ao",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO_AO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll3_ao = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll3_ao",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO_AO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4_ao = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4_ao",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO_AO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll6_ao = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll6_ao",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO_AO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_PCIE_1_PHY_AUX_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_PCIE_1_PHY_AUX },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b070,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_0_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
+ .reg = 0x8d094,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x8d078,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_1_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770e0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3906c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b074,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x8d07c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8d060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
+ .cmd_rcgr = 0x17008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
+ .cmd_rcgr = 0x17040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
+ .cmd_rcgr = 0x1705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
+ .cmd_rcgr = 0x17078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
+ .cmd_rcgr = 0x17094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
+ .cmd_rcgr = 0x170b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
+ .cmd_rcgr = 0x170cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
+ .cmd_rcgr = 0x170e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
+ .cmd_rcgr = 0x17104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src[] = {
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x188a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s3_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x18290,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x183c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18770,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src[] = {
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_ibi_ctrl_0_clk_src = {
+ .cmd_rcgr = 0x1e9d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_0_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e280,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e4f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e628,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_s6_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(128000000, P_GCC_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x1e760,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0x1e898,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap3_qspi_ref_clk_src[] = {
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ F(400000000, P_GCC_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap3_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap3_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap3_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x19018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap3_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap3_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x16018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3902c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0x18280,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap3_s0_clk_src = {
+ .reg = 0x19010,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_s0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3905c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_axi_clk = {
+ .halt_reg = 0x10064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3908c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x2601c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2601c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x39088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_sf_axi_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_qtb_clk = {
+ .halt_reg = 0x10074,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_qtb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b03c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x8d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x8d034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_aux_clk = {
+ .halt_reg = 0x8d044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x8d05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x8d050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_core_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s0_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s1_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s2_clk = {
+ .halt_reg = 0x1703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s3_clk = {
+ .halt_reg = 0x17058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s4_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s5_clk = {
+ .halt_reg = 0x17090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s6_clk = {
+ .halt_reg = 0x170ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s7_clk = {
+ .halt_reg = 0x170c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s8_clk = {
+ .halt_reg = 0x170e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s8_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s9_clk = {
+ .halt_reg = 0x17100,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s9_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_ref_clk = {
+ .halt_reg = 0x1889c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x18284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x183bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x184f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x1862c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x232a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x23294,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_2_clk = {
+ .halt_reg = 0x1e9cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_3_clk = {
+ .halt_reg = 0x1e9d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9d0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e3ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e61c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x1e754,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0x1e88c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_core_2x_clk = {
+ .halt_reg = 0x233f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_core_clk = {
+ .halt_reg = 0x233e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_qspi_ref_clk = {
+ .halt_reg = 0x19014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_s0_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x2313c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2313c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23140,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_2_ahb_clk = {
+ .halt_reg = 0x1e9c4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9c4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_3_ahb_clk = {
+ .halt_reg = 0x1e9c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9c8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_3_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x2328c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2328c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x23290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23290,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_3_m_ahb_clk = {
+ .halt_reg = 0x233dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x233dc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_3_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_3_s_ahb_clk = {
+ .halt_reg = 0x233e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x233e0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_3_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x39068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(0),
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(3),
+ .pd = {
+ .name = "pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x8d004,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(1),
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc pcie_1_phy_gdsc = {
+ .gdscr = 0x8e000,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(4),
+ .pd = {
+ .name = "pcie_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .pd = {
+ .name = "ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb3_phy_gdsc = {
+ .gdscr = 0x50018,
+ .pd = {
+ .name = "usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_sm8650_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_AXI_CLK] = &gcc_aggre_noc_pcie_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SF_AXI_CLK] = &gcc_cnoc_pcie_sf_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_QTB_CLK] = &gcc_ddrss_pcie_sf_qtb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL3] = &gcc_gpll3.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL6] = &gcc_gpll6.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK] = &gcc_pcie_1_phy_aux_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK_SRC] = &gcc_pcie_1_phy_aux_clk_src.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_I2C_CORE_CLK] = &gcc_qupv3_i2c_core_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK] = &gcc_qupv3_i2c_s0_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK_SRC] = &gcc_qupv3_i2c_s0_clk_src.clkr,
+ [GCC_QUPV3_I2C_S1_CLK] = &gcc_qupv3_i2c_s1_clk.clkr,
+ [GCC_QUPV3_I2C_S1_CLK_SRC] = &gcc_qupv3_i2c_s1_clk_src.clkr,
+ [GCC_QUPV3_I2C_S2_CLK] = &gcc_qupv3_i2c_s2_clk.clkr,
+ [GCC_QUPV3_I2C_S2_CLK_SRC] = &gcc_qupv3_i2c_s2_clk_src.clkr,
+ [GCC_QUPV3_I2C_S3_CLK] = &gcc_qupv3_i2c_s3_clk.clkr,
+ [GCC_QUPV3_I2C_S3_CLK_SRC] = &gcc_qupv3_i2c_s3_clk_src.clkr,
+ [GCC_QUPV3_I2C_S4_CLK] = &gcc_qupv3_i2c_s4_clk.clkr,
+ [GCC_QUPV3_I2C_S4_CLK_SRC] = &gcc_qupv3_i2c_s4_clk_src.clkr,
+ [GCC_QUPV3_I2C_S5_CLK] = &gcc_qupv3_i2c_s5_clk.clkr,
+ [GCC_QUPV3_I2C_S5_CLK_SRC] = &gcc_qupv3_i2c_s5_clk_src.clkr,
+ [GCC_QUPV3_I2C_S6_CLK] = &gcc_qupv3_i2c_s6_clk.clkr,
+ [GCC_QUPV3_I2C_S6_CLK_SRC] = &gcc_qupv3_i2c_s6_clk_src.clkr,
+ [GCC_QUPV3_I2C_S7_CLK] = &gcc_qupv3_i2c_s7_clk.clkr,
+ [GCC_QUPV3_I2C_S7_CLK_SRC] = &gcc_qupv3_i2c_s7_clk_src.clkr,
+ [GCC_QUPV3_I2C_S8_CLK] = &gcc_qupv3_i2c_s8_clk.clkr,
+ [GCC_QUPV3_I2C_S8_CLK_SRC] = &gcc_qupv3_i2c_s8_clk_src.clkr,
+ [GCC_QUPV3_I2C_S9_CLK] = &gcc_qupv3_i2c_s9_clk.clkr,
+ [GCC_QUPV3_I2C_S9_CLK_SRC] = &gcc_qupv3_i2c_s9_clk_src.clkr,
+ [GCC_QUPV3_I2C_S_AHB_CLK] = &gcc_qupv3_i2c_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK] = &gcc_qupv3_wrap1_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC] = &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_2_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP3_CORE_2X_CLK] = &gcc_qupv3_wrap3_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP3_CORE_CLK] = &gcc_qupv3_wrap3_core_clk.clkr,
+ [GCC_QUPV3_WRAP3_QSPI_REF_CLK] = &gcc_qupv3_wrap3_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP3_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap3_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP3_S0_CLK] = &gcc_qupv3_wrap3_s0_clk.clkr,
+ [GCC_QUPV3_WRAP3_S0_CLK_SRC] = &gcc_qupv3_wrap3_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_2_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_3_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_3_M_AHB_CLK] = &gcc_qupv3_wrap_3_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_3_S_AHB_CLK] = &gcc_qupv3_wrap_3_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+ [GCC_GPLL0_AO] = &gcc_gpll0_ao.clkr,
+ [GCC_GPLL0_OUT_EVEN_AO] = &gcc_gpll0_out_even_ao.clkr,
+ [GCC_GPLL1_AO] = &gcc_gpll1_ao.clkr,
+ [GCC_GPLL3_AO] = &gcc_gpll3_ao.clkr,
+ [GCC_GPLL4_AO] = &gcc_gpll4_ao.clkr,
+ [GCC_GPLL6_AO] = &gcc_gpll6_ao.clkr,
+};
+
+static const struct qcom_reset_map gcc_sm8650_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_1_BCR] = { 0x8d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e024 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUPV3_WRAPPER_3_BCR] = { 0x19000 },
+ [GCC_QUPV3_WRAPPER_I2C_BCR] = { 0x17000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32024, 2 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap3_qspi_ref_clk_src),
+};
+
+static struct gdsc *gcc_sm8650_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_0_PHY_GDSC] = &pcie_0_phy_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_1_PHY_GDSC] = &pcie_1_phy_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [UFS_MEM_PHY_GDSC] = &ufs_mem_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB3_PHY_GDSC] = &usb3_phy_gdsc,
+};
+
+static const struct regmap_config gcc_sm8650_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8650_desc = {
+ .config = &gcc_sm8650_regmap_config,
+ .clks = gcc_sm8650_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8650_clocks),
+ .resets = gcc_sm8650_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8650_resets),
+ .gdscs = gcc_sm8650_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8650_gdscs),
+};
+
+static const struct of_device_id gcc_sm8650_match_table[] = {
+ { .compatible = "qcom,sm8650-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8650_match_table);
+
+static int gcc_sm8650_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8650_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* Keep the critical clock always-On */
+ regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0)); /* gcc_camera_ahb_clk */
+ regmap_update_bits(regmap, 0x26028, BIT(0), BIT(0)); /* gcc_camera_xo_clk */
+ regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0)); /* gcc_disp_ahb_clk */
+ regmap_update_bits(regmap, 0x27018, BIT(0), BIT(0)); /* gcc_disp_xo_clk */
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); /* gcc_gpu_cfg_ahb_clk */
+ regmap_update_bits(regmap, 0x32004, BIT(0), BIT(0)); /* gcc_video_ahb_clk */
+ regmap_update_bits(regmap, 0x32030, BIT(0), BIT(0)); /* gcc_video_xo_clk */
+
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+
+ /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
+ regmap_write(regmap, 0x52150, 0x0);
+
+ return qcom_cc_really_probe(pdev, &gcc_sm8650_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8650_driver = {
+ .probe = gcc_sm8650_probe,
+ .driver = {
+ .name = "gcc-sm8650",
+ .of_match_table = gcc_sm8650_match_table,
+ },
+};
+
+static int __init gcc_sm8650_init(void)
+{
+ return platform_driver_register(&gcc_sm8650_driver);
+}
+subsys_initcall(gcc_sm8650_init);
+
+static void __exit gcc_sm8650_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8650_driver);
+}
+module_exit(gcc_sm8650_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8650 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
new file mode 100644
index 000000000..d7182d6e9
--- /dev/null
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -0,0 +1,6808 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_PCIE_3_PIPE,
+ DT_PCIE_4_PIPE,
+ DT_PCIE_5_PIPE,
+ DT_PCIE_6A_PIPE,
+ DT_PCIE_6B_PIPE,
+ DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE,
+ DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE,
+ DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL8_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+ P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52030,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52030,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll8 = {
+ .offset = 0x8000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52030,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52030,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL8_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xa0180,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xa0054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x2c180,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x2c054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0x13180,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x13054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3_aux_clk_src = {
+ .cmd_rcgr = 0x5808c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x58070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_aux_clk_src = {
+ .cmd_rcgr = 0x6b080,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_aux_clk_src = {
+ .cmd_rcgr = 0x2f080,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x2f064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6a_aux_clk_src = {
+ .cmd_rcgr = 0x3108c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6a_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x31070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6b_aux_clk_src = {
+ .cmd_rcgr = 0x8d08c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6b_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8d070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_rscc_xo_clk_src = {
+ .cmd_rcgr = 0xa400c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x42010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x42148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x42288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x423c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x42500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x42638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x42770,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x428a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x18500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18770,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x188a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x1e770,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0x1e8a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x16018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_master_clk_src = {
+ .cmd_rcgr = 0x2902c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb20_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x29158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_master_clk_src = {
+ .cmd_rcgr = 0x1702c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x17158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3902c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0xa102c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xa1044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_master_clk_src = {
+ .cmd_rcgr = 0xa202c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xa2044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = {
+ .cmd_rcgr = 0x172a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa1074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_tert_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa2074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_master_clk_src[] = {
+ F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0),
+ F(175000000, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(350000000, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_master_clk_src = {
+ .cmd_rcgr = 0x9f024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x9f0e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_sb_if_clk_src = {
+ .cmd_rcgr = 0x9f08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_tmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_tmu_clk_src = {
+ .cmd_rcgr = 0x9f070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_usb4_0_tmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_master_clk_src = {
+ .cmd_rcgr = 0x2b024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2b0e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_sb_if_clk_src = {
+ .cmd_rcgr = 0x2b08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_tmu_clk_src = {
+ .cmd_rcgr = 0x2b070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_usb4_0_tmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_master_clk_src = {
+ .cmd_rcgr = 0x11024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x110e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_sb_if_clk_src = {
+ .cmd_rcgr = 0x1108c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_tmu_clk_src = {
+ .cmd_rcgr = 0x11070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_usb4_0_tmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3_pipe_clk_src = {
+ .reg = 0x58088,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_3_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_3_pipe_div_clk_src = {
+ .reg = 0x5806c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_4_pipe_clk_src = {
+ .reg = 0x6b07c,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_4_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_4_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_4_pipe_div_clk_src = {
+ .reg = 0x6b060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_5_pipe_clk_src = {
+ .reg = 0x2f07c,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_5_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_5_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_5_pipe_div_clk_src = {
+ .reg = 0x2f060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_6a_pipe_clk_src = {
+ .reg = 0x31088,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_6a_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_6A_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_6a_pipe_div_clk_src = {
+ .reg = 0x3106c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_6b_pipe_clk_src = {
+ .reg = 0x8d088,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_6b_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_6B_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_6b_pipe_div_clk_src = {
+ .reg = 0x8d06c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s2_div_clk_src = {
+ .reg = 0x42284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s3_div_clk_src = {
+ .reg = 0x423c4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_div_clk_src = {
+ .reg = 0x18284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s3_div_clk_src = {
+ .reg = 0x183c4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s2_div_clk_src = {
+ .reg = 0x1e284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s3_div_clk_src = {
+ .reg = 0x1e3c4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = {
+ .reg = 0x29284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mp_mock_utmi_postdiv_clk_src = {
+ .reg = 0x17284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3905c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0xa105c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_tert_mock_utmi_postdiv_clk_src = {
+ .reg = 0xa205c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_usb_north_axi_clk = {
+ .halt_reg = 0x2d17c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d17c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d17c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_usb_north_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_usb_south_axi_clk = {
+ .halt_reg = 0x2d174,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d174,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d174,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_usb_south_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_prim_axi_clk = {
+ .halt_reg = 0x2928c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2928c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2928c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_mp_axi_clk = {
+ .halt_reg = 0x173d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x173d0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x173d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x39090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0xa1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa1090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_tert_axi_clk = {
+ .halt_reg = 0xa2090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa2090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_0_axi_clk = {
+ .halt_reg = 0x9f118,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9f118,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_1_axi_clk = {
+ .halt_reg = 0x2b118,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b118,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_2_axi_clk = {
+ .halt_reg = 0x11118,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x11118,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x11118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb_noc_axi_clk = {
+ .halt_reg = 0x2d034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb_noc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_ahb_clk = {
+ .halt_reg = 0x4a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_axi_clk = {
+ .halt_reg = 0x4a008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x4a008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_xo_clk = {
+ .halt_reg = 0x4a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x2601c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2601c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_north_ahb_clk = {
+ .halt_reg = 0x1002c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1002c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_north_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_south_ahb_clk = {
+ .halt_reg = 0x10030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = {
+ .halt_reg = 0x29288,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x29288,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x29288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = {
+ .halt_reg = 0x173cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x173cc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x173cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3908c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0xa108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa108c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_tert_axi_clk = {
+ .halt_reg = 0xa208c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa208c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa208c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_ahb_clk = {
+ .halt_reg = 0x2d024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_north_ahb_clk = {
+ .halt_reg = 0x2d028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_north_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_south_ahb_clk = {
+ .halt_reg = 0x2d02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie1_tunnel_clk = {
+ .halt_reg = 0x2c2b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie1_tunnel_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie2_tunnel_clk = {
+ .halt_reg = 0x132b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie2_tunnel_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_north_sf_axi_clk = {
+ .halt_reg = 0x10014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_north_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_south_sf_axi_clk = {
+ .halt_reg = 0x10018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_south_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_tunnel_clk = {
+ .halt_reg = 0xa02b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa02b4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_tunnel_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x7115c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7115c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7115c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0x27018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x71004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_cph_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_cph_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_cph_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_cph_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_rchng_clk = {
+ .halt_reg = 0xa0050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_phy_rchng_clk = {
+ .halt_reg = 0x2c050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_phy_rchng_clk = {
+ .halt_reg = 0x13050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xa0038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xa0034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa0034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xa0028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xa0028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xa0044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xa001c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa001c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xa0018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x2c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x2c034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2c034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x2c028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2c028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x2c044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x2c01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2c01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x2c018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0x13038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0x13034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x13034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0x13028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x13028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0x13044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0x1301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1301c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0x13018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_aux_clk = {
+ .halt_reg = 0x58038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_cfg_ahb_clk = {
+ .halt_reg = 0x58034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x58034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_mstr_axi_clk = {
+ .halt_reg = 0x58028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x58028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_phy_aux_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_phy_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_phy_rchng_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_pipe_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_pipediv2_clk = {
+ .halt_reg = 0x58060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_slv_axi_clk = {
+ .halt_reg = 0x5801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x5801c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3_slv_q2a_axi_clk = {
+ .halt_reg = 0x58018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_aux_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_cfg_ahb_clk = {
+ .halt_reg = 0x6b034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_mstr_axi_clk = {
+ .halt_reg = 0x6b028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_phy_rchng_clk = {
+ .halt_reg = 0x6b050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipediv2_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_aux_clk = {
+ .halt_reg = 0x2f038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_cfg_ahb_clk = {
+ .halt_reg = 0x2f034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2f034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_mstr_axi_clk = {
+ .halt_reg = 0x2f028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2f028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_phy_rchng_clk = {
+ .halt_reg = 0x2f050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_clk = {
+ .halt_reg = 0x2f044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipediv2_clk = {
+ .halt_reg = 0x2f054,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_axi_clk = {
+ .halt_reg = 0x2f01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2f01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_q2a_axi_clk = {
+ .halt_reg = 0x2f018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_aux_clk = {
+ .halt_reg = 0x31038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_cfg_ahb_clk = {
+ .halt_reg = 0x31034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x31034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_mstr_axi_clk = {
+ .halt_reg = 0x31028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x31028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_phy_aux_clk = {
+ .halt_reg = 0x31044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_phy_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_phy_rchng_clk = {
+ .halt_reg = 0x3105c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6a_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_pipe_clk = {
+ .halt_reg = 0x31050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_pipediv2_clk = {
+ .halt_reg = 0x31060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6a_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_slv_axi_clk = {
+ .halt_reg = 0x3101c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3101c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6a_slv_q2a_axi_clk = {
+ .halt_reg = 0x31018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6a_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_aux_clk = {
+ .halt_reg = 0x8d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_cfg_ahb_clk = {
+ .halt_reg = 0x8d034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_mstr_axi_clk = {
+ .halt_reg = 0x8d028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8d028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_phy_aux_clk = {
+ .halt_reg = 0x8d044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_phy_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_phy_rchng_clk = {
+ .halt_reg = 0x8d05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6b_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_pipe_clk = {
+ .halt_reg = 0x8d050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_pipediv2_clk = {
+ .halt_reg = 0x8d060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6b_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_slv_axi_clk = {
+ .halt_reg = 0x8d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6b_slv_q2a_axi_clk = {
+ .halt_reg = 0x8d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6b_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_ahb_clk = {
+ .halt_reg = 0xa4008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa4008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0xa4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_rscc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_av1e_ahb_clk = {
+ .halt_reg = 0x4a018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4a018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s2_clk = {
+ .halt_reg = 0x42280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s3_clk = {
+ .halt_reg = 0x423c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x42004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x4213c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x42274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x423b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x424f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x4262c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x42764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x4289c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23168,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23158,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s2_clk = {
+ .halt_reg = 0x18280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s3_clk = {
+ .halt_reg = 0x183c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x1862c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x1889c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x232b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x232a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s2_clk = {
+ .halt_reg = 0x1e280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s3_clk = {
+ .halt_reg = 0x1e3c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e3b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e62c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x1e764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0x1e89c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x23150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23150,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x232a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x232a0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x232a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x232a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb_axi_clk = {
+ .halt_reg = 0x2d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sys_noc_usb_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0x29018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0x29028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0x29024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_master_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_mock_utmi_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_sleep_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0xa1018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa1018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0xa1028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa1028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0xa1024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa1024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_master_clk = {
+ .halt_reg = 0xa2018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_mock_utmi_clk = {
+ .halt_reg = 0xa2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_sleep_clk = {
+ .halt_reg = 0xa2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_aux_clk = {
+ .halt_reg = 0x17288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = {
+ .halt_reg = 0x1728c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1728c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17290,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = {
+ .halt_reg = 0x17298,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17298,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3906c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0xa1060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa1060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0xa1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0xa106c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0xa1068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa1068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa1068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_aux_clk = {
+ .halt_reg = 0xa2060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa2060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_com_aux_clk = {
+ .halt_reg = 0xa2064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa2064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = {
+ .reg = 0xa206c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
+ .halt_reg = 0xa2068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa2068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa2068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_cfg_ahb_clk = {
+ .halt_reg = 0x9f0a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9f0a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp0_clk = {
+ .halt_reg = 0x9f060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp1_clk = {
+ .halt_reg = 0x9f108,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f108,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_master_clk = {
+ .halt_reg = 0x9f018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x9f0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
+ .halt_reg = 0x9f048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
+ .halt_reg = 0x9f0b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f0b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx1_clk = {
+ .halt_reg = 0x9f0c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f0c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = {
+ .halt_reg = 0x9f0a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9f0a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_usb_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sb_if_clk = {
+ .halt_reg = 0x9f044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sys_clk = {
+ .halt_reg = 0x9f054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sys_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_tmu_clk = {
+ .halt_reg = 0x9f088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9f088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_cfg_ahb_clk = {
+ .halt_reg = 0x2b0a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b0a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp0_clk = {
+ .halt_reg = 0x2b060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp1_clk = {
+ .halt_reg = 0x2b108,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b108,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_master_clk = {
+ .halt_reg = 0x2b018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2b0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2b048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
+ .halt_reg = 0x2b0b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx1_clk = {
+ .halt_reg = 0x2b0c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = {
+ .halt_reg = 0x2b0a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b0a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_usb_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sb_if_clk = {
+ .halt_reg = 0x2b044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sys_clk = {
+ .halt_reg = 0x2b054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sys_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_tmu_clk = {
+ .halt_reg = 0x2b088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_cfg_ahb_clk = {
+ .halt_reg = 0x110a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x110a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x110a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp0_clk = {
+ .halt_reg = 0x11060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp1_clk = {
+ .halt_reg = 0x11108,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11108,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_master_clk = {
+ .halt_reg = 0x11018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x110d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x110d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
+ .halt_reg = 0x11048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
+ .halt_reg = 0x110b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x110b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx1_clk = {
+ .halt_reg = 0x110c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x110c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = {
+ .halt_reg = 0x110a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x110a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x110a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_usb_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sb_if_clk = {
+ .halt_reg = 0x11044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sys_clk = {
+ .halt_reg = 0x11054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sys_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_tmu_clk = {
+ .halt_reg = 0x11088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x11088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x11088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_tunnel_gdsc = {
+ .gdscr = 0xa0004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_0_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_1_tunnel_gdsc = {
+ .gdscr = 0x2c004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_1_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_2_tunnel_gdsc = {
+ .gdscr = 0x13004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_2_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3_gdsc = {
+ .gdscr = 0x58004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3_phy_gdsc = {
+ .gdscr = 0x3e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_4_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_4_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_gdsc = {
+ .gdscr = 0x2f004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_5_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_phy_gdsc = {
+ .gdscr = 0x30000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_5_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_phy_gdsc = {
+ .gdscr = 0x8e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_6_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6a_gdsc = {
+ .gdscr = 0x31004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_6a_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6b_gdsc = {
+ .gdscr = 0x8d004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_6b_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb20_prim_gdsc = {
+ .gdscr = 0x29004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_mp_gdsc = {
+ .gdscr = 0x17004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_mp_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_sec_gdsc = {
+ .gdscr = 0xa1004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_tert_gdsc = {
+ .gdscr = 0xa2004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_tert_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss0_phy_gdsc = {
+ .gdscr = 0x1900c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
+ .gdscr = 0x5400c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_0_gdsc = {
+ .gdscr = 0x9f004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_1_gdsc = {
+ .gdscr = 0x2b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_2_gdsc = {
+ .gdscr = 0x11004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_0_phy_gdsc = {
+ .gdscr = 0x50024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_1_phy_gdsc = {
+ .gdscr = 0x2a024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_2_phy_gdsc = {
+ .gdscr = 0xa3024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_2_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_x1e80100_clocks[] = {
+ [GCC_AGGRE_NOC_USB_NORTH_AXI_CLK] = &gcc_aggre_noc_usb_north_axi_clk.clkr,
+ [GCC_AGGRE_NOC_USB_SOUTH_AXI_CLK] = &gcc_aggre_noc_usb_south_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_TERT_AXI_CLK] = &gcc_aggre_usb3_tert_axi_clk.clkr,
+ [GCC_AGGRE_USB4_0_AXI_CLK] = &gcc_aggre_usb4_0_axi_clk.clkr,
+ [GCC_AGGRE_USB4_1_AXI_CLK] = &gcc_aggre_usb4_1_axi_clk.clkr,
+ [GCC_AGGRE_USB4_2_AXI_CLK] = &gcc_aggre_usb4_2_axi_clk.clkr,
+ [GCC_AGGRE_USB_NOC_AXI_CLK] = &gcc_aggre_usb_noc_axi_clk.clkr,
+ [GCC_AV1E_AHB_CLK] = &gcc_av1e_ahb_clk.clkr,
+ [GCC_AV1E_AXI_CLK] = &gcc_av1e_axi_clk.clkr,
+ [GCC_AV1E_XO_CLK] = &gcc_av1e_xo_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_north_ahb_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_south_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_TERT_AXI_CLK] = &gcc_cfg_noc_usb3_tert_axi_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_AHB_CLK] = &gcc_cfg_noc_usb_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_NORTH_AHB_CLK] = &gcc_cfg_noc_usb_anoc_north_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_usb_anoc_south_ahb_clk.clkr,
+ [GCC_CNOC_PCIE1_TUNNEL_CLK] = &gcc_cnoc_pcie1_tunnel_clk.clkr,
+ [GCC_CNOC_PCIE2_TUNNEL_CLK] = &gcc_cnoc_pcie2_tunnel_clk.clkr,
+ [GCC_CNOC_PCIE_NORTH_SF_AXI_CLK] = &gcc_cnoc_pcie_north_sf_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SOUTH_SF_AXI_CLK] = &gcc_cnoc_pcie_south_sf_axi_clk.clkr,
+ [GCC_CNOC_PCIE_TUNNEL_CLK] = &gcc_cnoc_pcie_tunnel_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL8] = &gcc_gpll8.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CPH_CLK_SRC] = &gcc_gpu_gpll0_cph_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CPH_CLK_SRC] = &gcc_gpu_gpll0_div_cph_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_RCHNG_CLK] = &gcc_pcie0_phy_rchng_clk.clkr,
+ [GCC_PCIE1_PHY_RCHNG_CLK] = &gcc_pcie1_phy_rchng_clk.clkr,
+ [GCC_PCIE2_PHY_RCHNG_CLK] = &gcc_pcie2_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3_AUX_CLK] = &gcc_pcie_3_aux_clk.clkr,
+ [GCC_PCIE_3_AUX_CLK_SRC] = &gcc_pcie_3_aux_clk_src.clkr,
+ [GCC_PCIE_3_CFG_AHB_CLK] = &gcc_pcie_3_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3_MSTR_AXI_CLK] = &gcc_pcie_3_mstr_axi_clk.clkr,
+ [GCC_PCIE_3_PHY_AUX_CLK] = &gcc_pcie_3_phy_aux_clk.clkr,
+ [GCC_PCIE_3_PHY_RCHNG_CLK] = &gcc_pcie_3_phy_rchng_clk.clkr,
+ [GCC_PCIE_3_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3_PIPE_CLK] = &gcc_pcie_3_pipe_clk.clkr,
+ [GCC_PCIE_3_PIPE_CLK_SRC] = &gcc_pcie_3_pipe_clk_src.clkr,
+ [GCC_PCIE_3_PIPE_DIV_CLK_SRC] = &gcc_pcie_3_pipe_div_clk_src.clkr,
+ [GCC_PCIE_3_PIPEDIV2_CLK] = &gcc_pcie_3_pipediv2_clk.clkr,
+ [GCC_PCIE_3_SLV_AXI_CLK] = &gcc_pcie_3_slv_axi_clk.clkr,
+ [GCC_PCIE_3_SLV_Q2A_AXI_CLK] = &gcc_pcie_3_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK] = &gcc_pcie_4_aux_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK_SRC] = &gcc_pcie_4_aux_clk_src.clkr,
+ [GCC_PCIE_4_CFG_AHB_CLK] = &gcc_pcie_4_cfg_ahb_clk.clkr,
+ [GCC_PCIE_4_MSTR_AXI_CLK] = &gcc_pcie_4_mstr_axi_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK] = &gcc_pcie_4_phy_rchng_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK_SRC] = &gcc_pcie_4_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_CLK] = &gcc_pcie_4_pipe_clk.clkr,
+ [GCC_PCIE_4_PIPE_CLK_SRC] = &gcc_pcie_4_pipe_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_DIV_CLK_SRC] = &gcc_pcie_4_pipe_div_clk_src.clkr,
+ [GCC_PCIE_4_PIPEDIV2_CLK] = &gcc_pcie_4_pipediv2_clk.clkr,
+ [GCC_PCIE_4_SLV_AXI_CLK] = &gcc_pcie_4_slv_axi_clk.clkr,
+ [GCC_PCIE_4_SLV_Q2A_AXI_CLK] = &gcc_pcie_4_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK] = &gcc_pcie_5_aux_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK_SRC] = &gcc_pcie_5_aux_clk_src.clkr,
+ [GCC_PCIE_5_CFG_AHB_CLK] = &gcc_pcie_5_cfg_ahb_clk.clkr,
+ [GCC_PCIE_5_MSTR_AXI_CLK] = &gcc_pcie_5_mstr_axi_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK] = &gcc_pcie_5_phy_rchng_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK_SRC] = &gcc_pcie_5_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_CLK] = &gcc_pcie_5_pipe_clk.clkr,
+ [GCC_PCIE_5_PIPE_CLK_SRC] = &gcc_pcie_5_pipe_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_DIV_CLK_SRC] = &gcc_pcie_5_pipe_div_clk_src.clkr,
+ [GCC_PCIE_5_PIPEDIV2_CLK] = &gcc_pcie_5_pipediv2_clk.clkr,
+ [GCC_PCIE_5_SLV_AXI_CLK] = &gcc_pcie_5_slv_axi_clk.clkr,
+ [GCC_PCIE_5_SLV_Q2A_AXI_CLK] = &gcc_pcie_5_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_6A_AUX_CLK] = &gcc_pcie_6a_aux_clk.clkr,
+ [GCC_PCIE_6A_AUX_CLK_SRC] = &gcc_pcie_6a_aux_clk_src.clkr,
+ [GCC_PCIE_6A_CFG_AHB_CLK] = &gcc_pcie_6a_cfg_ahb_clk.clkr,
+ [GCC_PCIE_6A_MSTR_AXI_CLK] = &gcc_pcie_6a_mstr_axi_clk.clkr,
+ [GCC_PCIE_6A_PHY_AUX_CLK] = &gcc_pcie_6a_phy_aux_clk.clkr,
+ [GCC_PCIE_6A_PHY_RCHNG_CLK] = &gcc_pcie_6a_phy_rchng_clk.clkr,
+ [GCC_PCIE_6A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_6a_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_6A_PIPE_CLK] = &gcc_pcie_6a_pipe_clk.clkr,
+ [GCC_PCIE_6A_PIPE_CLK_SRC] = &gcc_pcie_6a_pipe_clk_src.clkr,
+ [GCC_PCIE_6A_PIPE_DIV_CLK_SRC] = &gcc_pcie_6a_pipe_div_clk_src.clkr,
+ [GCC_PCIE_6A_PIPEDIV2_CLK] = &gcc_pcie_6a_pipediv2_clk.clkr,
+ [GCC_PCIE_6A_SLV_AXI_CLK] = &gcc_pcie_6a_slv_axi_clk.clkr,
+ [GCC_PCIE_6A_SLV_Q2A_AXI_CLK] = &gcc_pcie_6a_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_6B_AUX_CLK] = &gcc_pcie_6b_aux_clk.clkr,
+ [GCC_PCIE_6B_AUX_CLK_SRC] = &gcc_pcie_6b_aux_clk_src.clkr,
+ [GCC_PCIE_6B_CFG_AHB_CLK] = &gcc_pcie_6b_cfg_ahb_clk.clkr,
+ [GCC_PCIE_6B_MSTR_AXI_CLK] = &gcc_pcie_6b_mstr_axi_clk.clkr,
+ [GCC_PCIE_6B_PHY_AUX_CLK] = &gcc_pcie_6b_phy_aux_clk.clkr,
+ [GCC_PCIE_6B_PHY_RCHNG_CLK] = &gcc_pcie_6b_phy_rchng_clk.clkr,
+ [GCC_PCIE_6B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_6b_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_6B_PIPE_CLK] = &gcc_pcie_6b_pipe_clk.clkr,
+ [GCC_PCIE_6B_PIPE_CLK_SRC] = &gcc_pcie_6b_pipe_clk_src.clkr,
+ [GCC_PCIE_6B_PIPE_DIV_CLK_SRC] = &gcc_pcie_6b_pipe_div_clk_src.clkr,
+ [GCC_PCIE_6B_PIPEDIV2_CLK] = &gcc_pcie_6b_pipediv2_clk.clkr,
+ [GCC_PCIE_6B_SLV_AXI_CLK] = &gcc_pcie_6b_slv_axi_clk.clkr,
+ [GCC_PCIE_6B_SLV_Q2A_AXI_CLK] = &gcc_pcie_6b_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_RSCC_AHB_CLK] = &gcc_pcie_rscc_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK_SRC] = &gcc_pcie_rscc_xo_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_AV1E_AHB_CLK] = &gcc_qmip_av1e_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK] = &gcc_qupv3_wrap0_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK] = &gcc_qupv3_wrap0_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_DIV_CLK_SRC] = &gcc_qupv3_wrap0_s2_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_DIV_CLK_SRC] = &gcc_qupv3_wrap0_s3_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK] = &gcc_qupv3_wrap1_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK] = &gcc_qupv3_wrap1_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_DIV_CLK_SRC] = &gcc_qupv3_wrap1_s2_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_DIV_CLK_SRC] = &gcc_qupv3_wrap1_s3_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK] = &gcc_qupv3_wrap2_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK] = &gcc_qupv3_wrap2_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_DIV_CLK_SRC] = &gcc_qupv3_wrap2_s2_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_DIV_CLK_SRC] = &gcc_qupv3_wrap2_s3_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_USB_AXI_CLK] = &gcc_sys_noc_usb_axi_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK] = &gcc_usb30_tert_master_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK_SRC] = &gcc_usb30_tert_master_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK] = &gcc_usb30_tert_mock_utmi_clk.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK_SRC] = &gcc_usb30_tert_mock_utmi_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_TERT_SLEEP_CLK] = &gcc_usb30_tert_sleep_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
+ [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK] = &gcc_usb3_tert_phy_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK_SRC] = &gcc_usb3_tert_phy_aux_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_COM_AUX_CLK] = &gcc_usb3_tert_phy_com_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK] = &gcc_usb3_tert_phy_pipe_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb3_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB4_0_CFG_AHB_CLK] = &gcc_usb4_0_cfg_ahb_clk.clkr,
+ [GCC_USB4_0_DP0_CLK] = &gcc_usb4_0_dp0_clk.clkr,
+ [GCC_USB4_0_DP1_CLK] = &gcc_usb4_0_dp1_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK] = &gcc_usb4_0_master_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK_SRC] = &gcc_usb4_0_master_clk_src.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_0_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK] = &gcc_usb4_0_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK] = &gcc_usb4_0_phy_rx0_clk.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK] = &gcc_usb4_0_phy_rx1_clk.clkr,
+ [GCC_USB4_0_PHY_USB_PIPE_CLK] = &gcc_usb4_0_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK] = &gcc_usb4_0_sb_if_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK_SRC] = &gcc_usb4_0_sb_if_clk_src.clkr,
+ [GCC_USB4_0_SYS_CLK] = &gcc_usb4_0_sys_clk.clkr,
+ [GCC_USB4_0_TMU_CLK] = &gcc_usb4_0_tmu_clk.clkr,
+ [GCC_USB4_0_TMU_CLK_SRC] = &gcc_usb4_0_tmu_clk_src.clkr,
+ [GCC_USB4_1_CFG_AHB_CLK] = &gcc_usb4_1_cfg_ahb_clk.clkr,
+ [GCC_USB4_1_DP0_CLK] = &gcc_usb4_1_dp0_clk.clkr,
+ [GCC_USB4_1_DP1_CLK] = &gcc_usb4_1_dp1_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr,
+ [GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr,
+ [GCC_USB4_1_SYS_CLK] = &gcc_usb4_1_sys_clk.clkr,
+ [GCC_USB4_1_TMU_CLK] = &gcc_usb4_1_tmu_clk.clkr,
+ [GCC_USB4_1_TMU_CLK_SRC] = &gcc_usb4_1_tmu_clk_src.clkr,
+ [GCC_USB4_2_CFG_AHB_CLK] = &gcc_usb4_2_cfg_ahb_clk.clkr,
+ [GCC_USB4_2_DP0_CLK] = &gcc_usb4_2_dp0_clk.clkr,
+ [GCC_USB4_2_DP1_CLK] = &gcc_usb4_2_dp1_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK] = &gcc_usb4_2_master_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK_SRC] = &gcc_usb4_2_master_clk_src.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_2_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK] = &gcc_usb4_2_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK] = &gcc_usb4_2_phy_rx0_clk.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK] = &gcc_usb4_2_phy_rx1_clk.clkr,
+ [GCC_USB4_2_PHY_USB_PIPE_CLK] = &gcc_usb4_2_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK] = &gcc_usb4_2_sb_if_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK_SRC] = &gcc_usb4_2_sb_if_clk_src.clkr,
+ [GCC_USB4_2_SYS_CLK] = &gcc_usb4_2_sys_clk.clkr,
+ [GCC_USB4_2_TMU_CLK] = &gcc_usb4_2_tmu_clk.clkr,
+ [GCC_USB4_2_TMU_CLK_SRC] = &gcc_usb4_2_tmu_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_x1e80100_gdscs[] = {
+ [GCC_PCIE_0_TUNNEL_GDSC] = &gcc_pcie_0_tunnel_gdsc,
+ [GCC_PCIE_1_TUNNEL_GDSC] = &gcc_pcie_1_tunnel_gdsc,
+ [GCC_PCIE_2_TUNNEL_GDSC] = &gcc_pcie_2_tunnel_gdsc,
+ [GCC_PCIE_3_GDSC] = &gcc_pcie_3_gdsc,
+ [GCC_PCIE_3_PHY_GDSC] = &gcc_pcie_3_phy_gdsc,
+ [GCC_PCIE_4_GDSC] = &gcc_pcie_4_gdsc,
+ [GCC_PCIE_4_PHY_GDSC] = &gcc_pcie_4_phy_gdsc,
+ [GCC_PCIE_5_GDSC] = &gcc_pcie_5_gdsc,
+ [GCC_PCIE_5_PHY_GDSC] = &gcc_pcie_5_phy_gdsc,
+ [GCC_PCIE_6_PHY_GDSC] = &gcc_pcie_6_phy_gdsc,
+ [GCC_PCIE_6A_GDSC] = &gcc_pcie_6a_gdsc,
+ [GCC_PCIE_6B_GDSC] = &gcc_pcie_6b_gdsc,
+ [GCC_UFS_MEM_PHY_GDSC] = &gcc_ufs_mem_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB20_PRIM_GDSC] = &gcc_usb20_prim_gdsc,
+ [GCC_USB30_MP_GDSC] = &gcc_usb30_mp_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB30_SEC_GDSC] = &gcc_usb30_sec_gdsc,
+ [GCC_USB30_TERT_GDSC] = &gcc_usb30_tert_gdsc,
+ [GCC_USB3_MP_SS0_PHY_GDSC] = &gcc_usb3_mp_ss0_phy_gdsc,
+ [GCC_USB3_MP_SS1_PHY_GDSC] = &gcc_usb3_mp_ss1_phy_gdsc,
+ [GCC_USB4_0_GDSC] = &gcc_usb4_0_gdsc,
+ [GCC_USB4_1_GDSC] = &gcc_usb4_1_gdsc,
+ [GCC_USB4_2_GDSC] = &gcc_usb4_2_gdsc,
+ [GCC_USB_0_PHY_GDSC] = &gcc_usb_0_phy_gdsc,
+ [GCC_USB_1_PHY_GDSC] = &gcc_usb_1_phy_gdsc,
+ [GCC_USB_2_PHY_GDSC] = &gcc_usb_2_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_x1e80100_resets[] = {
+ [GCC_AV1E_BCR] = { 0x4a000 },
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_0_TUNNEL_BCR] = { 0xa0000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e024 },
+ [GCC_PCIE_1_TUNNEL_BCR] = { 0x2c000 },
+ [GCC_PCIE_2_LINK_DOWN_BCR] = { 0xa5014 },
+ [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0xa5020 },
+ [GCC_PCIE_2_PHY_BCR] = { 0xa501c },
+ [GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR] = { 0xa5028 },
+ [GCC_PCIE_2_TUNNEL_BCR] = { 0x13000 },
+ [GCC_PCIE_3_BCR] = { 0x58000 },
+ [GCC_PCIE_3_LINK_DOWN_BCR] = { 0xab014 },
+ [GCC_PCIE_3_NOCSR_COM_PHY_BCR] = { 0xab020 },
+ [GCC_PCIE_3_PHY_BCR] = { 0xab01c },
+ [GCC_PCIE_3_PHY_NOCSR_COM_PHY_BCR] = { 0xab024 },
+ [GCC_PCIE_4_BCR] = { 0x6b000 },
+ [GCC_PCIE_4_LINK_DOWN_BCR] = { 0xb3014 },
+ [GCC_PCIE_4_NOCSR_COM_PHY_BCR] = { 0xb3020 },
+ [GCC_PCIE_4_PHY_BCR] = { 0xb301c },
+ [GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR] = { 0xb3028 },
+ [GCC_PCIE_5_BCR] = { 0x2f000 },
+ [GCC_PCIE_5_LINK_DOWN_BCR] = { 0xaa014 },
+ [GCC_PCIE_5_NOCSR_COM_PHY_BCR] = { 0xaa020 },
+ [GCC_PCIE_5_PHY_BCR] = { 0xaa01c },
+ [GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR] = { 0xaa028 },
+ [GCC_PCIE_6A_BCR] = { 0x31000 },
+ [GCC_PCIE_6A_LINK_DOWN_BCR] = { 0xac014 },
+ [GCC_PCIE_6A_NOCSR_COM_PHY_BCR] = { 0xac020 },
+ [GCC_PCIE_6A_PHY_BCR] = { 0xac01c },
+ [GCC_PCIE_6A_PHY_NOCSR_COM_PHY_BCR] = { 0xac024 },
+ [GCC_PCIE_6B_BCR] = { 0x8d000 },
+ [GCC_PCIE_6B_LINK_DOWN_BCR] = { 0xb5014 },
+ [GCC_PCIE_6B_NOCSR_COM_PHY_BCR] = { 0xb5020 },
+ [GCC_PCIE_6B_PHY_BCR] = { 0xb501c },
+ [GCC_PCIE_6B_PHY_NOCSR_COM_PHY_BCR] = { 0xb5024 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0xa4000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x42000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_HS0_MP_BCR] = { 0x1200c },
+ [GCC_QUSB2PHY_HS1_MP_BCR] = { 0x12010 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_QUSB2PHY_TERT_BCR] = { 0x12008 },
+ [GCC_QUSB2PHY_USB20_HS_BCR] = { 0x12014 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB20_PRIM_BCR] = { 0x29000 },
+ [GCC_USB30_MP_BCR] = { 0x17000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB30_SEC_BCR] = { 0xa1000 },
+ [GCC_USB30_TERT_BCR] = { 0xa2000 },
+ [GCC_USB3_MP_SS0_PHY_BCR] = { 0x19008 },
+ [GCC_USB3_MP_SS1_PHY_BCR] = { 0x54008 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x2a000 },
+ [GCC_USB3_PHY_TERT_BCR] = { 0xa3000 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x19000 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x54000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x2a004 },
+ [GCC_USB3PHY_PHY_TERT_BCR] = { 0xa3004 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x19004 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x54004 },
+ [GCC_USB4_0_BCR] = { 0x9f000 },
+ [GCC_USB4_0_DP0_PHY_PRIM_BCR] = { 0x50010 },
+ [GCC_USB4_1_DP0_PHY_SEC_BCR] = { 0x2a010 },
+ [GCC_USB4_2_DP0_PHY_TERT_BCR] = { 0xa3010 },
+ [GCC_USB4_1_BCR] = { 0x2b000 },
+ [GCC_USB4_2_BCR] = { 0x11000 },
+ [GCC_USB_0_PHY_BCR] = { 0x50020 },
+ [GCC_USB_1_PHY_BCR] = { 0x2a020 },
+ [GCC_USB_2_PHY_BCR] = { 0xa3020 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static const struct regmap_config gcc_x1e80100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_x1e80100_desc = {
+ .config = &gcc_x1e80100_regmap_config,
+ .clks = gcc_x1e80100_clocks,
+ .num_clks = ARRAY_SIZE(gcc_x1e80100_clocks),
+ .resets = gcc_x1e80100_resets,
+ .num_resets = ARRAY_SIZE(gcc_x1e80100_resets),
+ .gdscs = gcc_x1e80100_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_x1e80100_gdscs),
+};
+
+static const struct of_device_id gcc_x1e80100_match_table[] = {
+ { .compatible = "qcom,x1e80100-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_x1e80100_match_table);
+
+static int gcc_x1e80100_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_x1e80100_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* Keep the critical clock always-On */
+ regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0)); /* gcc_camera_ahb_clk */
+ regmap_update_bits(regmap, 0x26028, BIT(0), BIT(0)); /* gcc_camera_xo_clk */
+ regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0)); /* gcc_disp_ahb_clk */
+ regmap_update_bits(regmap, 0x27018, BIT(0), BIT(0)); /* gcc_disp_xo_clk */
+ regmap_update_bits(regmap, 0x32004, BIT(0), BIT(0)); /* gcc_video_ahb_clk */
+ regmap_update_bits(regmap, 0x32030, BIT(0), BIT(0)); /* gcc_video_xo_clk */
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); /* gcc_gpu_cfg_ahb_clk */
+
+ /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
+ regmap_write(regmap, 0x52224, 0x0);
+
+ return qcom_cc_really_probe(pdev, &gcc_x1e80100_desc, regmap);
+}
+
+static struct platform_driver gcc_x1e80100_driver = {
+ .probe = gcc_x1e80100_probe,
+ .driver = {
+ .name = "gcc-x1e80100",
+ .of_match_table = gcc_x1e80100_match_table,
+ },
+};
+
+static int __init gcc_x1e80100_init(void)
+{
+ return platform_driver_register(&gcc_x1e80100_driver);
+}
+subsys_initcall(gcc_x1e80100_init);
+
+static void __exit gcc_x1e80100_exit(void)
+{
+ platform_driver_unregister(&gcc_x1e80100_driver);
+}
+module_exit(gcc_x1e80100_exit);
+
+MODULE_DESCRIPTION("QTI GCC X1E80100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sm8650.c b/drivers/clk/qcom/gpucc-sm8650.c
new file mode 100644
index 000000000..03307e482
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sm8650.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8650-gpucc.h>
+#include <dt-bindings/reset/qcom,sm8650-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2100000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x20,
+ .alpha = 0x4aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1b,
+ .alpha = 0x1555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(260000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(625000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_div_clk_src = {
+ .reg = 0x942c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x9160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_demet_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_demet_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x90a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_rdvm_clk = {
+ .halt_reg = 0x90c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_rdvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x90d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_ff_clk = {
+ .halt_reg = 0x90c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_dpm_clk = {
+ .halt_reg = 0x9164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_dpm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x9168,
+ .clk_dis_wait_val = 8,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x905c,
+ .clamp_io_ctrl = 0x9504,
+ .resets = (unsigned int []){ GPUCC_GPU_CC_GX_BCR,
+ GPUCC_GPU_CC_ACD_BCR,
+ GPUCC_GPU_CC_GX_ACD_IROOT_BCR },
+ .reset_count = 3,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | SW_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sm8650_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DEMET_CLK] = &gpu_cc_demet_clk.clkr,
+ [GPU_CC_DPM_CLK] = &gpu_cc_dpm_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr,
+ [GPU_CC_GX_FF_CLK] = &gpu_cc_gx_ff_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_RDVM_CLK] = &gpu_cc_gx_gfx3d_rdvm_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_DIV_CLK_SRC] = &gpu_cc_hub_div_clk_src.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sm8650_resets[] = {
+ [GPUCC_GPU_CC_XO_BCR] = { 0x9000 },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x9058 },
+ [GPUCC_GPU_CC_CX_BCR] = { 0x9104 },
+ [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPUCC_GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPUCC_GPU_CC_FF_BCR] = { 0x9470 },
+ [GPUCC_GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPUCC_GPU_CC_GX_ACD_IROOT_BCR] = { 0x958c },
+};
+
+static struct gdsc *gpu_cc_sm8650_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sm8650_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sm8650_desc = {
+ .config = &gpu_cc_sm8650_regmap_config,
+ .clks = gpu_cc_sm8650_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sm8650_clocks),
+ .resets = gpu_cc_sm8650_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sm8650_resets),
+ .gdscs = gpu_cc_sm8650_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sm8650_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sm8650_match_table[] = {
+ { .compatible = "qcom,sm8650-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sm8650_match_table);
+
+static int gpu_cc_sm8650_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sm8650_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sm8650_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sm8650_driver = {
+ .probe = gpu_cc_sm8650_probe,
+ .driver = {
+ .name = "sm8650-gpucc",
+ .of_match_table = gpu_cc_sm8650_match_table,
+ },
+};
+module_platform_driver(gpu_cc_sm8650_driver);
+
+MODULE_DESCRIPTION("QTI GPU_CC SM8650 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/tcsrcc-sm8650.c b/drivers/clk/qcom/tcsrcc-sm8650.c
new file mode 100644
index 000000000..11c7d6df4
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-sm8650.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8650-tcsr.h>
+
+#include "clk-branch.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_pcie_0_clkref_en = {
+ .halt_reg = 0x31100,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x31100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_pcie_0_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_1_clkref_en = {
+ .halt_reg = 0x31114,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x31114,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_pcie_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_clkref_en = {
+ .halt_reg = 0x31110,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x31110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_ufs_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_pad_clkref_en = {
+ .halt_reg = 0x31104,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x31104,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_ufs_pad_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_clkref_en = {
+ .halt_reg = 0x31118,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x31118,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_clkref_en = {
+ .halt_reg = 0x31108,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x31108,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_usb3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_sm8650_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_UFS_CLKREF_EN] = &tcsr_ufs_clkref_en.clkr,
+ [TCSR_UFS_PAD_CLKREF_EN] = &tcsr_ufs_pad_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_sm8650_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3b000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_sm8650_desc = {
+ .config = &tcsr_cc_sm8650_regmap_config,
+ .clks = tcsr_cc_sm8650_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sm8650_clocks),
+};
+
+static const struct of_device_id tcsr_cc_sm8650_match_table[] = {
+ { .compatible = "qcom,sm8650-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_sm8650_match_table);
+
+static int tcsr_cc_sm8650_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_sm8650_desc);
+}
+
+static struct platform_driver tcsr_cc_sm8650_driver = {
+ .probe = tcsr_cc_sm8650_probe,
+ .driver = {
+ .name = "tcsr_cc-sm8650",
+ .of_match_table = tcsr_cc_sm8650_match_table,
+ },
+};
+
+static int __init tcsr_cc_sm8650_init(void)
+{
+ return platform_driver_register(&tcsr_cc_sm8650_driver);
+}
+subsys_initcall(tcsr_cc_sm8650_init);
+
+static void __exit tcsr_cc_sm8650_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_sm8650_driver);
+}
+module_exit(tcsr_cc_sm8650_exit);
+
+MODULE_DESCRIPTION("QTI TCSRCC SM8650 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
index 5579463f7..f1456eaa8 100644
--- a/drivers/clk/qcom/videocc-sm8150.c
+++ b/drivers/clk/qcom/videocc-sm8150.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,videocc-sm8150.h>
@@ -215,6 +216,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = {
static const struct qcom_reset_map video_cc_sm8150_resets[] = {
[VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
+ [VIDEO_CC_INTERFACE_BCR] = { 0x8f0 },
+ [VIDEO_CC_MVS0_BCR] = { 0x870 },
+ [VIDEO_CC_MVS1_BCR] = { 0x8b0 },
+ [VIDEO_CC_MVSC_BCR] = { 0x810 },
};
static const struct qcom_cc_desc video_cc_sm8150_desc = {
@@ -236,17 +241,32 @@ MODULE_DEVICE_TABLE(of, video_cc_sm8150_match_table);
static int video_cc_sm8150_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
regmap = qcom_cc_map(pdev, &video_cc_sm8150_desc);
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ pm_runtime_put_sync(&pdev->dev);
return PTR_ERR(regmap);
+ }
clk_trion_pll_configure(&video_pll0, regmap, &video_pll0_config);
/* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
regmap_update_bits(regmap, 0x984, 0x1, 0x1);
- return qcom_cc_really_probe(pdev, &video_cc_sm8150_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &video_cc_sm8150_desc, regmap);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
}
static struct platform_driver video_cc_sm8150_driver = {
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 7999faa9a..c4b1938db 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -193,6 +193,8 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("msi3", 621, R8A779G0_CLK_MSO),
DEF_MOD("msi4", 622, R8A779G0_CLK_MSO),
DEF_MOD("msi5", 623, R8A779G0_CLK_MSO),
+ DEF_MOD("pciec0", 624, R8A779G0_CLK_S0D2_HSC),
+ DEF_MOD("pciec1", 625, R8A779G0_CLK_S0D2_HSC),
DEF_MOD("pwm", 628, R8A779G0_CLK_SASYNCPERD4),
DEF_MOD("rpc-if", 629, R8A779G0_CLK_RPCD2),
DEF_MOD("scif0", 702, R8A779G0_CLK_SASYNCPERD4),
@@ -236,6 +238,7 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
+ DEF_MOD("tsn", 2723, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
};
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index 4394cb241..2582ba952 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -181,13 +181,16 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
DEF_G3S_DIV("P3", R9A08G045_CLK_P3, CLK_PLL3_DIV2_4, DIVPL3C, G3S_DIVPL3C_STS,
dtable_1_32, 0, 0, 0, NULL),
DEF_FIXED("P3_DIV2", CLK_P3_DIV2, R9A08G045_CLK_P3, 1, 2),
+ DEF_FIXED("ZT", R9A08G045_CLK_ZT, CLK_PLL3_DIV2_8, 1, 1),
DEF_FIXED("S0", R9A08G045_CLK_S0, CLK_SEL_PLL4, 1, 2),
DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1),
DEF_FIXED("OSC2", R9A08G045_OSCCLK2, CLK_EXTAL, 1, 3),
+ DEF_FIXED("HP", R9A08G045_CLK_HP, CLK_PLL6, 1, 2),
};
static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("gic_gicclk", R9A08G045_GIC600_GICCLK, R9A08G045_CLK_P1, 0x514, 0),
+ DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0),
DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1),
DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0),
DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0),
@@ -202,6 +205,12 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
+ DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0),
+ DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0),
+ DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8),
+ DEF_COUPLED("eth1_axi", R9A08G045_ETH1_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 1),
+ DEF_COUPLED("eth1_chi", R9A08G045_ETH1_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 1),
+ DEF_MOD("eth1_refclk", R9A08G045_ETH1_REFCLK, R9A08G045_CLK_HP, 0x57c, 9),
DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0),
DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0),
};
@@ -209,9 +218,12 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_GIC600_GICRESET_N, 0x814, 0),
DEF_RST(R9A08G045_GIC600_DBG_GICRESET_N, 0x814, 1),
+ DEF_RST(R9A08G045_IA55_RESETN, 0x818, 0),
DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2),
+ DEF_RST(R9A08G045_ETH0_RST_HW_N, 0x87c, 0),
+ DEF_RST(R9A08G045_ETH1_RST_HW_N, 0x87c, 1),
DEF_RST(R9A08G045_SCIF0_RST_SYSTEM_N, 0x884, 0),
DEF_RST(R9A08G045_GPIO_RSTN, 0x898, 0),
DEF_RST(R9A08G045_GPIO_PORT_RESETN, 0x898, 1),
@@ -220,6 +232,7 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
static const unsigned int r9a08g045_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A08G045_GIC600_GICCLK,
+ MOD_CLK_BASE + R9A08G045_IA55_PCLK,
MOD_CLK_BASE + R9A08G045_IA55_CLK,
MOD_CLK_BASE + R9A08G045_DMAC_ACLK,
};
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index db713e152..b786ddc9a 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -78,7 +78,9 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
RK3036_PLL_RATE(148500000, 1, 99, 4, 4, 1, 0),
RK3036_PLL_RATE(135000000, 2, 45, 4, 1, 1, 0),
+ RK3036_PLL_RATE(126400000, 1, 79, 5, 3, 1, 0),
RK3036_PLL_RATE(119000000, 3, 119, 4, 2, 1, 0),
+ RK3036_PLL_RATE(115200000, 1, 24, 5, 1, 1, 0),
RK3036_PLL_RATE(108000000, 2, 45, 5, 1, 1, 0),
RK3036_PLL_RATE(101000000, 1, 101, 6, 4, 1, 0),
RK3036_PLL_RATE(100000000, 1, 150, 6, 6, 1, 0),
@@ -1593,6 +1595,7 @@ static const char *const rk3568_cru_critical_clocks[] __initconst = {
"hclk_php",
"pclk_php",
"hclk_usb",
+ "pclk_usb",
"hclk_vo",
};
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index ebbeacabe..3056944a5 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
obj-$(CONFIG_TESLA_FSD_COMMON_CLK) += clk-fsd.o
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
index fc9f67a3b..0164bd9ad 100644
--- a/drivers/clk/samsung/clk-cpu.h
+++ b/drivers/clk/samsung/clk-cpu.h
@@ -11,10 +11,10 @@
#include "clk.h"
/**
- * struct exynos_cpuclk_data: config data to setup cpu clocks.
- * @prate: frequency of the primary parent clock (in KHz).
- * @div0: value to be programmed in the div_cpu0 register.
- * @div1: value to be programmed in the div_cpu1 register.
+ * struct exynos_cpuclk_cfg_data - config data to setup cpu clocks
+ * @prate: frequency of the primary parent clock (in KHz)
+ * @div0: value to be programmed in the div_cpu0 register
+ * @div1: value to be programmed in the div_cpu1 register
*
* This structure holds the divider configuration data for dividers in the CPU
* clock domain. The parent frequency at which these divider values are valid is
@@ -29,17 +29,17 @@ struct exynos_cpuclk_cfg_data {
};
/**
- * struct exynos_cpuclk: information about clock supplied to a CPU core.
- * @hw: handle between CCF and CPU clock.
- * @alt_parent: alternate parent clock to use when switching the speed
- * of the primary parent clock.
- * @ctrl_base: base address of the clock controller.
- * @lock: cpu clock domain register access lock.
- * @cfg: cpu clock rate configuration data.
- * @num_cfgs: number of array elements in @cfg array.
- * @clk_nb: clock notifier registered for changes in clock speed of the
- * primary parent clock.
- * @flags: configuration flags for the CPU clock.
+ * struct exynos_cpuclk - information about clock supplied to a CPU core
+ * @hw: handle between CCF and CPU clock
+ * @alt_parent: alternate parent clock to use when switching the speed
+ * of the primary parent clock
+ * @ctrl_base: base address of the clock controller
+ * @lock: cpu clock domain register access lock
+ * @cfg: cpu clock rate configuration data
+ * @num_cfgs: number of array elements in @cfg array
+ * @clk_nb: clock notifier registered for changes in clock speed of the
+ * primary parent clock
+ * @flags: configuration flags for the CPU clock
*
* This structure holds information required for programming the CPU clock for
* various clock speeds.
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
new file mode 100644
index 000000000..782993951
--- /dev/null
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -0,0 +1,2518 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Common Clock Framework support for GS101.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/google,gs101.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_GOUT_CMU_TPU_UART + 1)
+#define CLKS_NR_APM (CLK_APM_PLL_DIV16_APM + 1)
+#define CLKS_NR_MISC (CLK_GOUT_MISC_XIU_D_MISC_ACLK + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x1e080000) */
+
+#define PLL_LOCKTIME_PLL_SHARED0 0x0000
+#define PLL_LOCKTIME_PLL_SHARED1 0x0004
+#define PLL_LOCKTIME_PLL_SHARED2 0x0008
+#define PLL_LOCKTIME_PLL_SHARED3 0x000c
+#define PLL_LOCKTIME_PLL_SPARE 0x0010
+#define PLL_CON0_PLL_SHARED0 0x0100
+#define PLL_CON1_PLL_SHARED0 0x0104
+#define PLL_CON2_PLL_SHARED0 0x0108
+#define PLL_CON3_PLL_SHARED0 0x010c
+#define PLL_CON4_PLL_SHARED0 0x0110
+#define PLL_CON0_PLL_SHARED1 0x0140
+#define PLL_CON1_PLL_SHARED1 0x0144
+#define PLL_CON2_PLL_SHARED1 0x0148
+#define PLL_CON3_PLL_SHARED1 0x014c
+#define PLL_CON4_PLL_SHARED1 0x0150
+#define PLL_CON0_PLL_SHARED2 0x0180
+#define PLL_CON1_PLL_SHARED2 0x0184
+#define PLL_CON2_PLL_SHARED2 0x0188
+#define PLL_CON3_PLL_SHARED2 0x018c
+#define PLL_CON4_PLL_SHARED2 0x0190
+#define PLL_CON0_PLL_SHARED3 0x01c0
+#define PLL_CON1_PLL_SHARED3 0x01c4
+#define PLL_CON2_PLL_SHARED3 0x01c8
+#define PLL_CON3_PLL_SHARED3 0x01cc
+#define PLL_CON4_PLL_SHARED3 0x01d0
+#define PLL_CON0_PLL_SPARE 0x0200
+#define PLL_CON1_PLL_SPARE 0x0204
+#define PLL_CON2_PLL_SPARE 0x0208
+#define PLL_CON3_PLL_SPARE 0x020c
+#define PLL_CON4_PLL_SPARE 0x0210
+#define CMU_CMU_TOP_CONTROLLER_OPTION 0x0800
+#define CLKOUT_CON_BLK_CMU_CMU_TOP_CLKOUT0 0x0810
+#define CMU_HCHGEN_CLKMUX_CMU_BOOST 0x0840
+#define CMU_HCHGEN_CLKMUX_TOP_BOOST 0x0844
+#define CMU_HCHGEN_CLKMUX 0x0850
+#define POWER_FAIL_DETECT_PLL 0x0864
+#define EARLY_WAKEUP_FORCED_0_ENABLE 0x0870
+#define EARLY_WAKEUP_FORCED_1_ENABLE 0x0874
+#define EARLY_WAKEUP_APM_CTRL 0x0878
+#define EARLY_WAKEUP_CLUSTER0_CTRL 0x087c
+#define EARLY_WAKEUP_DPU_CTRL 0x0880
+#define EARLY_WAKEUP_CSIS_CTRL 0x0884
+#define EARLY_WAKEUP_APM_DEST 0x0890
+#define EARLY_WAKEUP_CLUSTER0_DEST 0x0894
+#define EARLY_WAKEUP_DPU_DEST 0x0898
+#define EARLY_WAKEUP_CSIS_DEST 0x089c
+#define EARLY_WAKEUP_SW_TRIG_APM 0x08c0
+#define EARLY_WAKEUP_SW_TRIG_APM_SET 0x08c4
+#define EARLY_WAKEUP_SW_TRIG_APM_CLEAR 0x08c8
+#define EARLY_WAKEUP_SW_TRIG_CLUSTER0 0x08d0
+#define EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET 0x08d4
+#define EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR 0x08d8
+#define EARLY_WAKEUP_SW_TRIG_DPU 0x08e0
+#define EARLY_WAKEUP_SW_TRIG_DPU_SET 0x08e4
+#define EARLY_WAKEUP_SW_TRIG_DPU_CLEAR 0x08e8
+#define EARLY_WAKEUP_SW_TRIG_CSIS 0x08f0
+#define EARLY_WAKEUP_SW_TRIG_CSIS_SET 0x08f4
+#define EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR 0x08f8
+#define CLK_CON_MUX_MUX_CLKCMU_BO_BUS 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_BUS2_BUS 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_OPTION1 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_DISP_BUS 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DNS_BUS 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_DPU_BUS 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_EH_BUS 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_G3AA_G3AA 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_BUSD 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_GLB 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_GDC_GDC0 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_GDC_GDC1 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_GDC_SCSC 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HPM 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDPDBG 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_MMC_CARD 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_IPP_BUS 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_ITP_BUS 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_ITSC 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_MFC 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_MISC_BUS 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_MISC_SSS 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PDP_BUS 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PDP_VRA 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10e8
+#define CLK_CON_MUX_MUX_CLKCMU_TNR_BUS 0x10ec
+#define CLK_CON_MUX_MUX_CLKCMU_TOP_BOOST_OPTION1 0x10f0
+#define CLK_CON_MUX_MUX_CLKCMU_TOP_CMUREF 0x10f4
+#define CLK_CON_MUX_MUX_CLKCMU_TPU_BUS 0x10f8
+#define CLK_CON_MUX_MUX_CLKCMU_TPU_TPU 0x10fc
+#define CLK_CON_MUX_MUX_CLKCMU_TPU_TPUCTL 0x1100
+#define CLK_CON_MUX_MUX_CLKCMU_TPU_UART 0x1104
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x1108
+#define CLK_CON_DIV_CLKCMU_BO_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_BUS0_BUS 0x1804
+#define CLK_CON_DIV_CLKCMU_BUS1_BUS 0x1808
+#define CLK_CON_DIV_CLKCMU_BUS2_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_CIS_CLK0 0x1810
+#define CLK_CON_DIV_CLKCMU_CIS_CLK1 0x1814
+#define CLK_CON_DIV_CLKCMU_CIS_CLK2 0x1818
+#define CLK_CON_DIV_CLKCMU_CIS_CLK3 0x181c
+#define CLK_CON_DIV_CLKCMU_CIS_CLK4 0x1820
+#define CLK_CON_DIV_CLKCMU_CIS_CLK5 0x1824
+#define CLK_CON_DIV_CLKCMU_CIS_CLK6 0x1828
+#define CLK_CON_DIV_CLKCMU_CIS_CLK7 0x182c
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
+#define CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH 0x1840
+#define CLK_CON_DIV_CLKCMU_CSIS_BUS 0x1844
+#define CLK_CON_DIV_CLKCMU_DISP_BUS 0x1848
+#define CLK_CON_DIV_CLKCMU_DNS_BUS 0x184c
+#define CLK_CON_DIV_CLKCMU_DPU_BUS 0x1850
+#define CLK_CON_DIV_CLKCMU_EH_BUS 0x1854
+#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x1858
+#define CLK_CON_DIV_CLKCMU_G2D_MSCL 0x185c
+#define CLK_CON_DIV_CLKCMU_G3AA_G3AA 0x1860
+#define CLK_CON_DIV_CLKCMU_G3D_BUSD 0x1864
+#define CLK_CON_DIV_CLKCMU_G3D_GLB 0x1868
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x186c
+#define CLK_CON_DIV_CLKCMU_GDC_GDC0 0x1870
+#define CLK_CON_DIV_CLKCMU_GDC_GDC1 0x1874
+#define CLK_CON_DIV_CLKCMU_GDC_SCSC 0x1878
+#define CLK_CON_DIV_CLKCMU_HPM 0x187c
+#define CLK_CON_DIV_CLKCMU_HSI0_BUS 0x1880
+#define CLK_CON_DIV_CLKCMU_HSI0_DPGTC 0x1884
+#define CLK_CON_DIV_CLKCMU_HSI0_USB31DRD 0x1888
+#define CLK_CON_DIV_CLKCMU_HSI0_USBDPDBG 0x188c
+#define CLK_CON_DIV_CLKCMU_HSI1_BUS 0x1890
+#define CLK_CON_DIV_CLKCMU_HSI1_PCIE 0x1894
+#define CLK_CON_DIV_CLKCMU_HSI2_BUS 0x1898
+#define CLK_CON_DIV_CLKCMU_HSI2_MMC_CARD 0x189c
+#define CLK_CON_DIV_CLKCMU_HSI2_PCIE 0x18a0
+#define CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD 0x18a4
+#define CLK_CON_DIV_CLKCMU_IPP_BUS 0x18a8
+#define CLK_CON_DIV_CLKCMU_ITP_BUS 0x18ac
+#define CLK_CON_DIV_CLKCMU_MCSC_ITSC 0x18b0
+#define CLK_CON_DIV_CLKCMU_MCSC_MCSC 0x18b4
+#define CLK_CON_DIV_CLKCMU_MFC_MFC 0x18b8
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x18bc
+#define CLK_CON_DIV_CLKCMU_MISC_BUS 0x18c0
+#define CLK_CON_DIV_CLKCMU_MISC_SSS 0x18c4
+#define CLK_CON_DIV_CLKCMU_OTP 0x18c8
+#define CLK_CON_DIV_CLKCMU_PDP_BUS 0x18cc
+#define CLK_CON_DIV_CLKCMU_PDP_VRA 0x18d0
+#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x18d4
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x18d8
+#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18dc
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18e0
+#define CLK_CON_DIV_CLKCMU_TNR_BUS 0x18e4
+#define CLK_CON_DIV_CLKCMU_TPU_BUS 0x18e8
+#define CLK_CON_DIV_CLKCMU_TPU_TPU 0x18ec
+#define CLK_CON_DIV_CLKCMU_TPU_TPUCTL 0x18f0
+#define CLK_CON_DIV_CLKCMU_TPU_UART 0x18f4
+#define CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST 0x18f8
+#define CLK_CON_DIV_DIV_CLK_CMU_CMUREF 0x18fc
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x1900
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x1904
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x1908
+#define CLK_CON_DIV_PLL_SHARED0_DIV5 0x190c
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1910
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x1914
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x1918
+#define CLK_CON_DIV_PLL_SHARED2_DIV2 0x191c
+#define CLK_CON_DIV_PLL_SHARED3_DIV2 0x1920
+#define CLK_CON_GAT_CLKCMU_BUS0_BOOST 0x2000
+#define CLK_CON_GAT_CLKCMU_BUS1_BOOST 0x2004
+#define CLK_CON_GAT_CLKCMU_BUS2_BOOST 0x2008
+#define CLK_CON_GAT_CLKCMU_CORE_BOOST 0x200c
+#define CLK_CON_GAT_CLKCMU_CPUCL0_BOOST 0x2010
+#define CLK_CON_GAT_CLKCMU_CPUCL1_BOOST 0x2014
+#define CLK_CON_GAT_CLKCMU_CPUCL2_BOOST 0x2018
+#define CLK_CON_GAT_CLKCMU_MIF_BOOST 0x201c
+#define CLK_CON_GAT_CLKCMU_MIF_SWITCH 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_BO_BUS 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_BUS2_BUS 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK6 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK7 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_DISP_BUS 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_DNS_BUS 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_DPU_BUS 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_EH_BUS 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_G3AA_G3AA 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_BUSD 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_GLB 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_GDC_GDC0 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_GDC_GDC1 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_GDC_SCSC 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_HPM 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD 0x20b0
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDPDBG 0x20b4
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS 0x20b8
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_MMCCARD 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_UFS_EMBD 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_IPP_BUS 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_ITP_BUS 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_ITSC 0x20d8
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC 0x20dc
+#define CLK_CON_GAT_GATE_CLKCMU_MFC_MFC 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_MISC_BUS 0x20e8
+#define CLK_CON_GAT_GATE_CLKCMU_MISC_SSS 0x20ec
+#define CLK_CON_GAT_GATE_CLKCMU_PDP_BUS 0x20f0
+#define CLK_CON_GAT_GATE_CLKCMU_PDP_VRA 0x20f4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20f8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP 0x20fc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x2100
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP 0x2104
+#define CLK_CON_GAT_GATE_CLKCMU_TNR_BUS 0x2108
+#define CLK_CON_GAT_GATE_CLKCMU_TOP_CMUREF 0x210c
+#define CLK_CON_GAT_GATE_CLKCMU_TPU_BUS 0x2110
+#define CLK_CON_GAT_GATE_CLKCMU_TPU_TPU 0x2114
+#define CLK_CON_GAT_GATE_CLKCMU_TPU_TPUCTL 0x2118
+#define CLK_CON_GAT_GATE_CLKCMU_TPU_UART 0x211c
+#define DMYQCH_CON_CMU_TOP_CMUREF_QCH 0x3000
+#define DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK0 0x3004
+#define DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK1 0x3008
+#define DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK2 0x300c
+#define DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK3 0x3010
+#define DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK4 0x3014
+#define DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK5 0x3018
+#define DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK6 0x301c
+#define DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK7 0x3020
+#define DMYQCH_CON_OTP_QCH 0x3024
+#define QUEUE_CTRL_REG_BLK_CMU_CMU_TOP 0x3c00
+#define QUEUE_ENTRY0_BLK_CMU_CMU_TOP 0x3c10
+#define QUEUE_ENTRY1_BLK_CMU_CMU_TOP 0x3c14
+#define QUEUE_ENTRY2_BLK_CMU_CMU_TOP 0x3c18
+#define QUEUE_ENTRY3_BLK_CMU_CMU_TOP 0x3c1c
+#define QUEUE_ENTRY4_BLK_CMU_CMU_TOP 0x3c20
+#define QUEUE_ENTRY5_BLK_CMU_CMU_TOP 0x3c24
+#define QUEUE_ENTRY6_BLK_CMU_CMU_TOP 0x3c28
+#define QUEUE_ENTRY7_BLK_CMU_CMU_TOP 0x3c2c
+#define MIFMIRROR_QUEUE_CTRL_REG 0x3e00
+#define MIFMIRROR_QUEUE_ENTRY0 0x3e10
+#define MIFMIRROR_QUEUE_ENTRY1 0x3e14
+#define MIFMIRROR_QUEUE_ENTRY2 0x3e18
+#define MIFMIRROR_QUEUE_ENTRY3 0x3e1c
+#define MIFMIRROR_QUEUE_ENTRY4 0x3e20
+#define MIFMIRROR_QUEUE_ENTRY5 0x3e24
+#define MIFMIRROR_QUEUE_ENTRY6 0x3e28
+#define MIFMIRROR_QUEUE_ENTRY7 0x3e2c
+#define MIFMIRROR_QUEUE_BUSY 0x3e30
+#define GENERALIO_ACD_CHANNEL_0 0x3f00
+#define GENERALIO_ACD_CHANNEL_1 0x3f04
+#define GENERALIO_ACD_CHANNEL_2 0x3f08
+#define GENERALIO_ACD_CHANNEL_3 0x3f0c
+#define GENERALIO_ACD_MASK 0x3f14
+
+static const unsigned long cmu_top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SPARE,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON1_PLL_SHARED0,
+ PLL_CON2_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON4_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON1_PLL_SHARED1,
+ PLL_CON2_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON4_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
+ PLL_CON1_PLL_SHARED2,
+ PLL_CON2_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON4_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
+ PLL_CON1_PLL_SHARED3,
+ PLL_CON2_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON4_PLL_SHARED3,
+ PLL_CON0_PLL_SPARE,
+ PLL_CON1_PLL_SPARE,
+ PLL_CON2_PLL_SPARE,
+ PLL_CON3_PLL_SPARE,
+ PLL_CON4_PLL_SPARE,
+ CMU_CMU_TOP_CONTROLLER_OPTION,
+ CLKOUT_CON_BLK_CMU_CMU_TOP_CLKOUT0,
+ CMU_HCHGEN_CLKMUX_CMU_BOOST,
+ CMU_HCHGEN_CLKMUX_TOP_BOOST,
+ CMU_HCHGEN_CLKMUX,
+ POWER_FAIL_DETECT_PLL,
+ EARLY_WAKEUP_FORCED_0_ENABLE,
+ EARLY_WAKEUP_FORCED_1_ENABLE,
+ EARLY_WAKEUP_APM_CTRL,
+ EARLY_WAKEUP_CLUSTER0_CTRL,
+ EARLY_WAKEUP_DPU_CTRL,
+ EARLY_WAKEUP_CSIS_CTRL,
+ EARLY_WAKEUP_APM_DEST,
+ EARLY_WAKEUP_CLUSTER0_DEST,
+ EARLY_WAKEUP_DPU_DEST,
+ EARLY_WAKEUP_CSIS_DEST,
+ EARLY_WAKEUP_SW_TRIG_APM,
+ EARLY_WAKEUP_SW_TRIG_APM_SET,
+ EARLY_WAKEUP_SW_TRIG_APM_CLEAR,
+ EARLY_WAKEUP_SW_TRIG_CLUSTER0,
+ EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET,
+ EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR,
+ EARLY_WAKEUP_SW_TRIG_DPU,
+ EARLY_WAKEUP_SW_TRIG_DPU_SET,
+ EARLY_WAKEUP_SW_TRIG_DPU_CLEAR,
+ EARLY_WAKEUP_SW_TRIG_CSIS,
+ EARLY_WAKEUP_SW_TRIG_CSIS_SET,
+ EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR,
+ CLK_CON_MUX_MUX_CLKCMU_BO_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS2_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_OPTION1,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DISP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DNS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_EH_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL,
+ CLK_CON_MUX_MUX_CLKCMU_G3AA_G3AA,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_BUSD,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_GLB,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_GDC_GDC0,
+ CLK_CON_MUX_MUX_CLKCMU_GDC_GDC1,
+ CLK_CON_MUX_MUX_CLKCMU_GDC_SCSC,
+ CLK_CON_MUX_MUX_CLKCMU_HPM,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDPDBG,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_IPP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ITP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_ITSC,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_MFC,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_MISC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MISC_SSS,
+ CLK_CON_MUX_MUX_CLKCMU_PDP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PDP_VRA,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP,
+ CLK_CON_MUX_MUX_CLKCMU_TNR_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_TOP_BOOST_OPTION1,
+ CLK_CON_MUX_MUX_CLKCMU_TOP_CMUREF,
+ CLK_CON_MUX_MUX_CLKCMU_TPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_TPU_TPU,
+ CLK_CON_MUX_MUX_CLKCMU_TPU_TPUCTL,
+ CLK_CON_MUX_MUX_CLKCMU_TPU_UART,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_DIV_CLKCMU_BO_BUS,
+ CLK_CON_DIV_CLKCMU_BUS0_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_BUS,
+ CLK_CON_DIV_CLKCMU_BUS2_BUS,
+ CLK_CON_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_CLKCMU_CIS_CLK4,
+ CLK_CON_DIV_CLKCMU_CIS_CLK5,
+ CLK_CON_DIV_CLKCMU_CIS_CLK6,
+ CLK_CON_DIV_CLKCMU_CIS_CLK7,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_CLKCMU_CSIS_BUS,
+ CLK_CON_DIV_CLKCMU_DISP_BUS,
+ CLK_CON_DIV_CLKCMU_DNS_BUS,
+ CLK_CON_DIV_CLKCMU_DPU_BUS,
+ CLK_CON_DIV_CLKCMU_EH_BUS,
+ CLK_CON_DIV_CLKCMU_G2D_G2D,
+ CLK_CON_DIV_CLKCMU_G2D_MSCL,
+ CLK_CON_DIV_CLKCMU_G3AA_G3AA,
+ CLK_CON_DIV_CLKCMU_G3D_BUSD,
+ CLK_CON_DIV_CLKCMU_G3D_GLB,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_GDC_GDC0,
+ CLK_CON_DIV_CLKCMU_GDC_GDC1,
+ CLK_CON_DIV_CLKCMU_GDC_SCSC,
+ CLK_CON_DIV_CLKCMU_HPM,
+ CLK_CON_DIV_CLKCMU_HSI0_BUS,
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC,
+ CLK_CON_DIV_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_DIV_CLKCMU_HSI0_USBDPDBG,
+ CLK_CON_DIV_CLKCMU_HSI1_BUS,
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE,
+ CLK_CON_DIV_CLKCMU_HSI2_BUS,
+ CLK_CON_DIV_CLKCMU_HSI2_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE,
+ CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_IPP_BUS,
+ CLK_CON_DIV_CLKCMU_ITP_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_ITSC,
+ CLK_CON_DIV_CLKCMU_MCSC_MCSC,
+ CLK_CON_DIV_CLKCMU_MFC_MFC,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_MISC_BUS,
+ CLK_CON_DIV_CLKCMU_MISC_SSS,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_CLKCMU_PDP_BUS,
+ CLK_CON_DIV_CLKCMU_PDP_VRA,
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP,
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP,
+ CLK_CON_DIV_CLKCMU_TNR_BUS,
+ CLK_CON_DIV_CLKCMU_TPU_BUS,
+ CLK_CON_DIV_CLKCMU_TPU_TPU,
+ CLK_CON_DIV_CLKCMU_TPU_TPUCTL,
+ CLK_CON_DIV_CLKCMU_TPU_UART,
+ CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED0_DIV5,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_DIV_PLL_SHARED2_DIV2,
+ CLK_CON_DIV_PLL_SHARED3_DIV2,
+ CLK_CON_GAT_CLKCMU_BUS0_BOOST,
+ CLK_CON_GAT_CLKCMU_BUS1_BOOST,
+ CLK_CON_GAT_CLKCMU_BUS2_BOOST,
+ CLK_CON_GAT_CLKCMU_CORE_BOOST,
+ CLK_CON_GAT_CLKCMU_CPUCL0_BOOST,
+ CLK_CON_GAT_CLKCMU_CPUCL1_BOOST,
+ CLK_CON_GAT_CLKCMU_CPUCL2_BOOST,
+ CLK_CON_GAT_CLKCMU_MIF_BOOST,
+ CLK_CON_GAT_CLKCMU_MIF_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_BO_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS2_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK6,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK7,
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DISP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_EH_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL,
+ CLK_CON_GAT_GATE_CLKCMU_G3AA_G3AA,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_BUSD,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_GLB,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_GDC_GDC0,
+ CLK_CON_GAT_GATE_CLKCMU_GDC_GDC1,
+ CLK_CON_GAT_GATE_CLKCMU_GDC_SCSC,
+ CLK_CON_GAT_GATE_CLKCMU_HPM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDPDBG,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_MMCCARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_ITSC,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC,
+ CLK_CON_GAT_GATE_CLKCMU_MFC_MFC,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_MISC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MISC_SSS,
+ CLK_CON_GAT_GATE_CLKCMU_PDP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PDP_VRA,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_TOP_CMUREF,
+ CLK_CON_GAT_GATE_CLKCMU_TPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_TPU_TPU,
+ CLK_CON_GAT_GATE_CLKCMU_TPU_TPUCTL,
+ CLK_CON_GAT_GATE_CLKCMU_TPU_UART,
+ DMYQCH_CON_CMU_TOP_CMUREF_QCH,
+ DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK0,
+ DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK1,
+ DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK2,
+ DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK3,
+ DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK4,
+ DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK5,
+ DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK6,
+ DMYQCH_CON_DFTMUX_CMU_QCH_CIS_CLK7,
+ DMYQCH_CON_OTP_QCH,
+ QUEUE_CTRL_REG_BLK_CMU_CMU_TOP,
+ QUEUE_ENTRY0_BLK_CMU_CMU_TOP,
+ QUEUE_ENTRY1_BLK_CMU_CMU_TOP,
+ QUEUE_ENTRY2_BLK_CMU_CMU_TOP,
+ QUEUE_ENTRY3_BLK_CMU_CMU_TOP,
+ QUEUE_ENTRY4_BLK_CMU_CMU_TOP,
+ QUEUE_ENTRY5_BLK_CMU_CMU_TOP,
+ QUEUE_ENTRY6_BLK_CMU_CMU_TOP,
+ QUEUE_ENTRY7_BLK_CMU_CMU_TOP,
+ MIFMIRROR_QUEUE_CTRL_REG,
+ MIFMIRROR_QUEUE_ENTRY0,
+ MIFMIRROR_QUEUE_ENTRY1,
+ MIFMIRROR_QUEUE_ENTRY2,
+ MIFMIRROR_QUEUE_ENTRY3,
+ MIFMIRROR_QUEUE_ENTRY4,
+ MIFMIRROR_QUEUE_ENTRY5,
+ MIFMIRROR_QUEUE_ENTRY6,
+ MIFMIRROR_QUEUE_ENTRY7,
+ MIFMIRROR_QUEUE_BUSY,
+ GENERALIO_ACD_CHANNEL_0,
+ GENERALIO_ACD_CHANNEL_1,
+ GENERALIO_ACD_CHANNEL_2,
+ GENERALIO_ACD_CHANNEL_3,
+ GENERALIO_ACD_MASK,
+};
+
+static const struct samsung_pll_clock cmu_top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_0517x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0,
+ NULL),
+ PLL(pll_0517x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1,
+ NULL),
+ PLL(pll_0518x, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2,
+ NULL),
+ PLL(pll_0518x, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3,
+ NULL),
+ PLL(pll_0518x, CLK_FOUT_SPARE_PLL, "fout_spare_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SPARE, PLL_CON3_PLL_SPARE,
+ NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_pll_spare_p) = { "oscclk", "fout_spare_pll" };
+PNAME(mout_cmu_bo_bus_p) = { "fout_shared2_pll", "dout_cmu_shared0_div3",
+ "fout_shared3_pll", "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_bus0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2",
+ "fout_spare_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_bus1_bus_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_bus2_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll", "fout_shared3_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div5", "fout_spare_pll" };
+PNAME(mout_cmu_cis_clk0_7_p) = { "oscclk", "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2", "fout_spare_pll",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cmu_boost_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2" };
+PNAME(mout_cmu_cmu_boost_option1_p) = { "dout_cmu_cmu_boost",
+ "gout_cmu_boost_option1" };
+PNAME(mout_cmu_core_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll", "fout_shared3_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div5", "fout_spare_pll" };
+PNAME(mout_cmu_cpucl0_dbg_p) = { "fout_shared2_pll", "fout_shared3_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2", "fout_spare_pll",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "fout_shared1_pll", "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2", "fout_shared2_pll",
+ "fout_shared3_pll", "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3", "fout_spare_pll" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "fout_shared1_pll", "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2", "fout_shared2_pll",
+ "fout_shared3_pll", "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3", "fout_spare_pll" };
+PNAME(mout_cmu_cpucl2_switch_p) = { "fout_shared1_pll", "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2", "fout_shared2_pll",
+ "fout_shared3_pll", "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3", "fout_spare_pll" };
+PNAME(mout_cmu_csis_bus_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_disp_bus_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_dns_bus_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_dpu_p) = { "dout_cmu_shared0_div3",
+ "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_eh_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll", "fout_shared3_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div5", "fout_spare_pll" };
+PNAME(mout_cmu_g2d_g2d_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_g2d_mscl_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2",
+ "fout_spare_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_g3aa_g3aa_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_g3d_busd_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll", "fout_shared3_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4", "fout_spare_pll" };
+PNAME(mout_cmu_g3d_glb_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll", "fout_shared3_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4", "fout_spare_pll" };
+PNAME(mout_cmu_g3d_switch_p) = { "fout_shared2_pll", "dout_cmu_shared0_div3",
+ "fout_shared3_pll", "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "fout_spare_pll", "fout_spare_pll"};
+PNAME(mout_cmu_gdc_gdc0_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_gdc_gdc1_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_gdc_scsc_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_hpm_p) = { "oscclk", "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2",
+ "fout_spare_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_hsi0_dpgtc_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2", "fout_spare_pll" };
+PNAME(mout_cmu_hsi0_usb31drd_p) = { "oscclk", "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi0_usbdpdbg_p) = { "oscclk", "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2",
+ "fout_spare_pll" };
+PNAME(mout_cmu_hsi1_pcie_p) = { "oscclk", "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi2_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2",
+ "fout_spare_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_hsi2_mmc_card_p) = { "fout_shared2_pll", "fout_shared3_pll",
+ "dout_cmu_shared0_div4", "fout_spare_pll" };
+PNAME(mout_cmu_hsi2_pcie0_p) = { "oscclk", "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi2_ufs_embd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2", "fout_spare_pll" };
+PNAME(mout_cmu_ipp_bus_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_itp_bus_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_mcsc_itsc_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_mcsc_mcsc_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_mfc_mfc_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2", "fout_spare_pll",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mif_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared0_div5", "fout_spare_pll" };
+PNAME(mout_cmu_mif_switch_p) = { "fout_shared0_pll", "fout_shared1_pll",
+ "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll", "dout_cmu_shared0_div3",
+ "fout_shared3_pll", "fout_spare_pll" };
+PNAME(mout_cmu_misc_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2", "fout_spare_pll" };
+PNAME(mout_cmu_misc_sss_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2", "fout_spare_pll" };
+PNAME(mout_cmu_pdp_bus_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_pdp_vra_p) = { "fout_shared2_pll", "dout_cmu_shared0_div3",
+ "fout_shared3_pll", "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_peric0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2", "fout_spare_pll" };
+PNAME(mout_cmu_peric0_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2", "fout_spare_pll" };
+PNAME(mout_cmu_peric1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2", "fout_spare_pll" };
+PNAME(mout_cmu_peric1_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2", "fout_spare_pll" };
+PNAME(mout_cmu_tnr_bus_p) = { "dout_cmu_shared0_div3", "fout_shared3_pll",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "fout_spare_pll", "oscclk" };
+PNAME(mout_cmu_top_boost_option1_p) = { "oscclk",
+ "gout_cmu_boost_option1" };
+PNAME(mout_cmu_top_cmuref_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2" };
+PNAME(mout_cmu_tpu_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared3_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "fout_spare_pll" };
+PNAME(mout_cmu_tpu_tpu_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared3_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4", "fout_spare_pll" };
+PNAME(mout_cmu_tpu_tpuctl_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll", "fout_shared3_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4", "fout_spare_pll" };
+PNAME(mout_cmu_tpu_uart_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared3_div2", "fout_spare_pll" };
+PNAME(mout_cmu_cmuref_p) = { "mout_cmu_top_boost_option1",
+ "dout_cmu_cmuref" };
+
+/*
+ * Register name to clock name mangling strategy used in this file
+ *
+ * Replace PLL_CON0_PLL with CLK_MOUT_PLL and mout_pll
+ * Replace CLK_CON_MUX_MUX_CLKCMU with CLK_MOUT_CMU and mout_cmu
+ * Replace CLK_CON_DIV_CLKCMU with CLK_DOUT_CMU and dout_cmu
+ * Replace CLK_CON_DIV_DIV_CLKCMU with CLK_DOUT_CMU and dout_cmu
+ * Replace CLK_CON_GAT_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ * Replace CLK_CON_GAT_GATE_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ *
+ * For gates remove _UID _BLK _IPCLKPORT and _RSTNSYNC
+ */
+
+static const struct samsung_mux_clock cmu_top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
+ PLL_CON0_PLL_SHARED2, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
+ PLL_CON0_PLL_SHARED3, 4, 1),
+ MUX(CLK_MOUT_PLL_SPARE, "mout_pll_spare", mout_pll_spare_p,
+ PLL_CON0_PLL_SPARE, 4, 1),
+ MUX(CLK_MOUT_CMU_BO_BUS, "mout_cmu_bo_bus", mout_cmu_bo_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_BO_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_BUS0_BUS, "mout_cmu_bus0_bus", mout_cmu_bus0_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_BUS1_BUS, "mout_cmu_bus1_bus", mout_cmu_bus1_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_BUS2_BUS, "mout_cmu_bus2_bus", mout_cmu_bus2_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_BUS2_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_CIS_CLK0, "mout_cmu_cis_clk0", mout_cmu_cis_clk0_7_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 3),
+ MUX(CLK_MOUT_CMU_CIS_CLK1, "mout_cmu_cis_clk1", mout_cmu_cis_clk0_7_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 3),
+ MUX(CLK_MOUT_CMU_CIS_CLK2, "mout_cmu_cis_clk2", mout_cmu_cis_clk0_7_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 3),
+ MUX(CLK_MOUT_CMU_CIS_CLK3, "mout_cmu_cis_clk3", mout_cmu_cis_clk0_7_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 3),
+ MUX(CLK_MOUT_CMU_CIS_CLK4, "mout_cmu_cis_clk4", mout_cmu_cis_clk0_7_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4, 0, 3),
+ MUX(CLK_MOUT_CMU_CIS_CLK5, "mout_cmu_cis_clk5", mout_cmu_cis_clk0_7_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5, 0, 3),
+ MUX(CLK_MOUT_CMU_CIS_CLK6, "mout_cmu_cis_clk6", mout_cmu_cis_clk0_7_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK6, 0, 3),
+ MUX(CLK_MOUT_CMU_CIS_CLK7, "mout_cmu_cis_clk7", mout_cmu_cis_clk0_7_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK7, 0, 3),
+ MUX(CLK_MOUT_CMU_CMU_BOOST, "mout_cmu_cmu_boost", mout_cmu_cmu_boost_p,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(CLK_MOUT_CMU_BOOST_OPTION1, "mout_cmu_boost_option1",
+ mout_cmu_cmu_boost_option1_p,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_OPTION1, 0, 1),
+ MUX(CLK_MOUT_CMU_CORE_BUS, "mout_cmu_core_bus", mout_cmu_core_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_DBG, "mout_cmu_cpucl0_dbg",
+ mout_cmu_cpucl0_dbg_p, CLK_CON_DIV_CLKCMU_CPUCL0_DBG, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL2_SWITCH, "mout_cmu_cpucl2_switch",
+ mout_cmu_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_CSIS_BUS, "mout_cmu_csis_bus", mout_cmu_csis_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DISP_BUS, "mout_cmu_disp_bus", mout_cmu_disp_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DISP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DNS_BUS, "mout_cmu_dns_bus", mout_cmu_dns_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DNS_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DPU_BUS, "mout_cmu_dpu_bus", mout_cmu_dpu_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_EH_BUS, "mout_cmu_eh_bus", mout_cmu_eh_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_EH_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d", mout_cmu_g2d_g2d_p,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 3),
+ MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl", mout_cmu_g2d_mscl_p,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 3),
+ MUX(CLK_MOUT_CMU_G3AA_G3AA, "mout_cmu_g3aa_g3aa", mout_cmu_g3aa_g3aa_p,
+ CLK_CON_MUX_MUX_CLKCMU_G3AA_G3AA, 0, 3),
+ MUX(CLK_MOUT_CMU_G3D_BUSD, "mout_cmu_g3d_busd", mout_cmu_g3d_busd_p,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_BUSD, 0, 3),
+ MUX(CLK_MOUT_CMU_G3D_GLB, "mout_cmu_g3d_glb", mout_cmu_g3d_glb_p,
+ CLK_CON_MUX_MUX_CLKCMU_G3D_GLB, 0, 3),
+ MUX(CLK_MOUT_CMU_G3D_SWITCH, "mout_cmu_g3d_switch",
+ mout_cmu_g3d_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_GDC_GDC0, "mout_cmu_gdc_gdc0", mout_cmu_gdc_gdc0_p,
+ CLK_CON_MUX_MUX_CLKCMU_GDC_GDC0, 0, 3),
+ MUX(CLK_MOUT_CMU_GDC_GDC1, "mout_cmu_gdc_gdc1", mout_cmu_gdc_gdc1_p,
+ CLK_CON_MUX_MUX_CLKCMU_GDC_GDC1, 0, 3),
+ MUX(CLK_MOUT_CMU_GDC_SCSC, "mout_cmu_gdc_scsc", mout_cmu_gdc_scsc_p,
+ CLK_CON_MUX_MUX_CLKCMU_GDC_SCSC, 0, 3),
+ MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm", mout_cmu_hpm_p,
+ CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus", mout_cmu_hsi0_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_HSI0_DPGTC, "mout_cmu_hsi0_dpgtc",
+ mout_cmu_hsi0_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USB31DRD, "mout_cmu_hsi0_usb31drd",
+ mout_cmu_hsi0_usb31drd_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_HSI0_USBDPDBG, "mout_cmu_hsi0_usbdpdbg",
+ mout_cmu_hsi0_usbdpdbg_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDPDBG,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus", mout_cmu_hsi1_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_HSI1_PCIE, "mout_cmu_hsi1_pcie", mout_cmu_hsi1_pcie_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus", mout_cmu_hsi2_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_HSI2_MMC_CARD, "mout_cmu_hsi2_mmc_card",
+ mout_cmu_hsi2_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_MMC_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie", mout_cmu_hsi2_pcie0_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_UFS_EMBD, "mout_cmu_hsi2_ufs_embd",
+ mout_cmu_hsi2_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_UFS_EMBD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_IPP_BUS, "mout_cmu_ipp_bus", mout_cmu_ipp_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_IPP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_ITP_BUS, "mout_cmu_itp_bus", mout_cmu_itp_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_ITP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_ITSC, "mout_cmu_mcsc_itsc", mout_cmu_mcsc_itsc_p,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_ITSC, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_MCSC, "mout_cmu_mcsc_mcsc", mout_cmu_mcsc_mcsc_p,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_MCSC, 0, 3),
+ MUX(CLK_MOUT_CMU_MFC_MFC, "mout_cmu_mfc_mfc", mout_cmu_mfc_mfc_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_MFC, 0, 3),
+ MUX(CLK_MOUT_CMU_MIF_BUSP, "mout_cmu_mif_busp", mout_cmu_mif_busp_p,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_SWITCH, "mout_cmu_mif_switch",
+ mout_cmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_MISC_BUS, "mout_cmu_misc_bus", mout_cmu_misc_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_MISC_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_MISC_SSS, "mout_cmu_misc_sss", mout_cmu_misc_sss_p,
+ CLK_CON_MUX_MUX_CLKCMU_MISC_SSS, 0, 2),
+ MUX(CLK_MOUT_CMU_PDP_BUS, "mout_cmu_pdp_bus", mout_cmu_pdp_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_PDP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_PDP_VRA, "mout_cmu_pdp_vra", mout_cmu_pdp_vra_p,
+ CLK_CON_MUX_MUX_CLKCMU_PDP_VRA, 0, 3),
+ MUX(CLK_MOUT_CMU_PERIC0_BUS, "mout_cmu_peric0_bus",
+ mout_cmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_IP, "mout_cmu_peric0_ip", mout_cmu_peric0_ip_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_BUS, "mout_cmu_peric1_bus",
+ mout_cmu_peric1_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_IP, "mout_cmu_peric1_ip", mout_cmu_peric1_ip_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 2),
+ MUX(CLK_MOUT_CMU_TNR_BUS, "mout_cmu_tnr_bus", mout_cmu_tnr_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_TNR_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_TOP_BOOST_OPTION1, "mout_cmu_top_boost_option1",
+ mout_cmu_top_boost_option1_p,
+ CLK_CON_MUX_MUX_CLKCMU_TOP_BOOST_OPTION1, 0, 1),
+ MUX(CLK_MOUT_CMU_TOP_CMUREF, "mout_cmu_top_cmuref",
+ mout_cmu_top_cmuref_p, CLK_CON_MUX_MUX_CLKCMU_TOP_CMUREF, 0, 2),
+ MUX(CLK_MOUT_CMU_TPU_BUS, "mout_cmu_tpu_bus", mout_cmu_tpu_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_TPU_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_TPU_TPU, "mout_cmu_tpu_tpu", mout_cmu_tpu_tpu_p,
+ CLK_CON_MUX_MUX_CLKCMU_TPU_TPU, 0, 3),
+ MUX(CLK_MOUT_CMU_TPU_TPUCTL, "mout_cmu_tpu_tpuctl",
+ mout_cmu_tpu_tpuctl_p, CLK_CON_MUX_MUX_CLKCMU_TPU_TPUCTL, 0, 3),
+ MUX(CLK_MOUT_CMU_TPU_UART, "mout_cmu_tpu_uart", mout_cmu_tpu_uart_p,
+ CLK_CON_MUX_MUX_CLKCMU_TPU_UART, 0, 2),
+ MUX(CLK_MOUT_CMU_CMUREF, "mout_cmu_cmuref", mout_cmu_cmuref_p,
+ CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+};
+
+static const struct samsung_div_clock cmu_top_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMU_BO_BUS, "dout_cmu_bo_bus", "gout_cmu_bo_bus",
+ CLK_CON_DIV_CLKCMU_BO_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
+ CLK_CON_DIV_CLKCMU_BUS0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_BUS, "dout_cmu_bus1_bus", "gout_cmu_bus1_bus",
+ CLK_CON_DIV_CLKCMU_BUS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS2_BUS, "dout_cmu_bus2_bus", "gout_cmu_bus2_bus",
+ CLK_CON_DIV_CLKCMU_BUS2_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CIS_CLK0, "dout_cmu_cis_clk0", "gout_cmu_cis_clk0",
+ CLK_CON_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK1, "dout_cmu_cis_clk1", "gout_cmu_cis_clk1",
+ CLK_CON_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK2, "dout_cmu_cis_clk2", "gout_cmu_cis_clk2",
+ CLK_CON_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK3, "dout_cmu_cis_clk3", "gout_cmu_cis_clk3",
+ CLK_CON_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK4, "dout_cmu_cis_clk4", "gout_cmu_cis_clk4",
+ CLK_CON_DIV_CLKCMU_CIS_CLK4, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK5, "dout_cmu_cis_clk5", "gout_cmu_cis_clk5",
+ CLK_CON_DIV_CLKCMU_CIS_CLK5, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK6, "dout_cmu_cis_clk6", "gout_cmu_cis_clk6",
+ CLK_CON_DIV_CLKCMU_CIS_CLK6, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK7, "dout_cmu_cis_clk7", "gout_cmu_cis_clk7",
+ CLK_CON_DIV_CLKCMU_CIS_CLK7, 0, 5),
+ DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_DBG, "dout_cmu_cpucl0_dbg",
+ "gout_cmu_cpucl0_dbg", CLK_CON_DIV_CLKCMU_CPUCL0_DBG, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
+ "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
+ "gout_cmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL2_SWITCH, "dout_cmu_cpucl2_switch",
+ "gout_cmu_cpucl2_switch", CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CSIS_BUS, "dout_cmu_csis_bus", "gout_cmu_csis_bus",
+ CLK_CON_DIV_CLKCMU_CSIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DISP_BUS, "dout_cmu_disp_bus", "gout_cmu_disp_bus",
+ CLK_CON_DIV_CLKCMU_DISP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DNS_BUS, "dout_cmu_dns_bus", "gout_cmu_dns_bus",
+ CLK_CON_DIV_CLKCMU_DNS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU_BUS, "dout_cmu_dpu_bus", "gout_cmu_dpu_bus",
+ CLK_CON_DIV_CLKCMU_DPU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_EH_BUS, "dout_cmu_eh_bus", "gout_cmu_eh_bus",
+ CLK_CON_DIV_CLKCMU_EH_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_G2D, "dout_cmu_g2d_g2d", "gout_cmu_g2d_g2d",
+ CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_MSCL, "dout_cmu_g2d_mscl", "gout_cmu_g2d_mscl",
+ CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
+ DIV(CLK_DOUT_CMU_G3AA_G3AA, "dout_cmu_g3aa_g3aa", "gout_cmu_g3aa_g3aa",
+ CLK_CON_DIV_CLKCMU_G3AA_G3AA, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
+ CLK_CON_DIV_CLKCMU_G3D_BUSD, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_GLB, "dout_cmu_g3d_glb", "gout_cmu_g3d_glb",
+ CLK_CON_DIV_CLKCMU_G3D_GLB, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_switch",
+ "gout_cmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_GDC_GDC0, "dout_cmu_gdc_gdc0", "gout_cmu_gdc_gdc0",
+ CLK_CON_DIV_CLKCMU_GDC_GDC0, 0, 4),
+ DIV(CLK_DOUT_CMU_GDC_GDC1, "dout_cmu_gdc_gdc1", "gout_cmu_gdc_gdc1",
+ CLK_CON_DIV_CLKCMU_GDC_GDC1, 0, 4),
+ DIV(CLK_DOUT_CMU_GDC_SCSC, "dout_cmu_gdc_scsc", "gout_cmu_gdc_scsc",
+ CLK_CON_DIV_CLKCMU_GDC_SCSC, 0, 4),
+ DIV(CLK_DOUT_CMU_CMU_HPM, "dout_cmu_hpm", "gout_cmu_hpm",
+ CLK_CON_DIV_CLKCMU_HPM, 0, 2),
+ DIV(CLK_DOUT_CMU_HSI0_BUS, "dout_cmu_hsi0_bus", "gout_cmu_hsi0_bus",
+ CLK_CON_DIV_CLKCMU_HSI0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_DPGTC, "dout_cmu_hsi0_dpgtc",
+ "gout_cmu_hsi0_dpgtc", CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
+ "gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 5),
+ DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
+ CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie", "gout_cmu_hsi1_pcie",
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
+ CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI2_MMC_CARD, "dout_cmu_hsi2_mmc_card",
+ "gout_cmu_hsi2_mmc_card", CLK_CON_DIV_CLKCMU_HSI2_MMC_CARD, 0, 9),
+ DIV(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie", "gout_cmu_hsi2_pcie",
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI2_UFS_EMBD, "dout_cmu_hsi2_ufs_embd",
+ "gout_cmu_hsi2_ufs_embd", CLK_CON_DIV_CLKCMU_HSI2_UFS_EMBD, 0, 4),
+ DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
+ CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
+ CLK_CON_DIV_CLKCMU_ITP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_ITSC, "dout_cmu_mcsc_itsc", "gout_cmu_mcsc_itsc",
+ CLK_CON_DIV_CLKCMU_MCSC_ITSC, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_MCSC, "dout_cmu_mcsc_mcsc", "gout_cmu_mcsc_mcsc",
+ CLK_CON_DIV_CLKCMU_MCSC_MCSC, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC_MFC, "dout_cmu_mfc_mfc", "gout_cmu_mfc_mfc",
+ CLK_CON_DIV_CLKCMU_MFC_MFC, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_cmu_mif_busp", "gout_cmu_mif_busp",
+ CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_MISC_BUS, "dout_cmu_misc_bus", "gout_cmu_misc_bus",
+ CLK_CON_DIV_CLKCMU_MISC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MISC_SSS, "dout_cmu_misc_sss", "gout_cmu_misc_sss",
+ CLK_CON_DIV_CLKCMU_MISC_SSS, 0, 4),
+ DIV(CLK_DOUT_CMU_PDP_BUS, "dout_cmu_pdp_bus", "gout_cmu_pdp_bus",
+ CLK_CON_DIV_CLKCMU_PDP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PDP_VRA, "dout_cmu_pdp_vra", "gout_cmu_pdp_vra",
+ CLK_CON_DIV_CLKCMU_PDP_VRA, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_BUS, "dout_cmu_peric0_bus",
+ "gout_cmu_peric0_bus", CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP, "dout_cmu_peric0_ip", "gout_cmu_peric0_ip",
+ CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_BUS, "dout_cmu_peric1_bus",
+ "gout_cmu_peric1_bus", CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP, "dout_cmu_peric1_ip", "gout_cmu_peric1_ip",
+ CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_TNR_BUS, "dout_cmu_tnr_bus", "gout_cmu_tnr_bus",
+ CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_TPU_BUS, "dout_cmu_tpu_bus", "gout_cmu_tpu_bus",
+ CLK_CON_DIV_CLKCMU_TPU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_TPU_TPU, "dout_cmu_tpu_tpu", "gout_cmu_tpu_tpu",
+ CLK_CON_DIV_CLKCMU_TPU_TPU, 0, 4),
+ DIV(CLK_DOUT_CMU_TPU_TPUCTL, "dout_cmu_tpu_tpuctl",
+ "gout_cmu_tpu_tpuctl", CLK_CON_DIV_CLKCMU_TPU_TPUCTL, 0, 4),
+ DIV(CLK_DOUT_CMU_TPU_UART, "dout_cmu_tpu_uart", "gout_cmu_tpu_uart",
+ CLK_CON_DIV_CLKCMU_TPU_UART, 0, 4),
+ DIV(CLK_DOUT_CMU_CMU_BOOST, "dout_cmu_cmu_boost", "gout_cmu_cmu_boost",
+ CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST, 0, 2),
+ DIV(CLK_DOUT_CMU_CMU_CMUREF, "dout_cmu_cmuref", "gout_cmu_cmuref",
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV2, "dout_cmu_shared0_div2",
+ "mout_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV3, "dout_cmu_shared0_div3",
+ "mout_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV4, "dout_cmu_shared0_div4",
+ "dout_cmu_shared0_div2", CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV5, "dout_cmu_shared0_div5",
+ "mout_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV5, 0, 3),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV2, "dout_cmu_shared1_div2",
+ "mout_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV3, "dout_cmu_shared1_div3",
+ "mout_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV4, "dout_cmu_shared1_div4",
+ "mout_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED2_DIV2, "dout_cmu_shared2_div2",
+ "mout_pll_shared2", CLK_CON_DIV_PLL_SHARED2_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED3_DIV2, "dout_cmu_shared3_div2",
+ "mout_pll_shared3", CLK_CON_DIV_PLL_SHARED3_DIV2, 0, 1),
+};
+
+static const struct samsung_fixed_factor_clock cmu_top_ffactor[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_HSI0_USBDPDBG, "dout_cmu_hsi0_usbdpdbg",
+ "gout_cmu_hsi0_usbdpdbg", 1, 4, 0),
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_cmu_otp", "oscclk", 1, 8, 0),
+};
+
+static const struct samsung_gate_clock cmu_top_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMU_BUS0_BOOST, "gout_cmu_bus0_boost",
+ "mout_cmu_boost_option1", CLK_CON_GAT_CLKCMU_BUS0_BOOST, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS1_BOOST, "gout_cmu_bus1_boost",
+ "mout_cmu_boost_option1", CLK_CON_GAT_CLKCMU_BUS1_BOOST, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS2_BOOST, "gout_cmu_bus2_boost",
+ "mout_cmu_boost_option1", CLK_CON_GAT_CLKCMU_BUS2_BOOST, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CORE_BOOST, "gout_cmu_core_boost",
+ "mout_cmu_boost_option1", CLK_CON_GAT_CLKCMU_CORE_BOOST, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_BOOST, "gout_cmu_cpucl0_boost",
+ "mout_cmu_boost_option1", CLK_CON_GAT_CLKCMU_CPUCL0_BOOST,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL1_BOOST, "gout_cmu_cpucl1_boost",
+ "mout_cmu_boost_option1", CLK_CON_GAT_CLKCMU_CPUCL1_BOOST,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_BOOST, "gout_cmu_cpucl2_boost",
+ "mout_cmu_boost_option1", CLK_CON_GAT_CLKCMU_CPUCL2_BOOST,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_BOOST, "gout_cmu_mif_boost",
+ "mout_cmu_boost_option1", CLK_CON_GAT_CLKCMU_MIF_BOOST,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_SWITCH, "gout_cmu_mif_switch",
+ "mout_cmu_mif_switch", CLK_CON_GAT_CLKCMU_MIF_SWITCH, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BO_BUS, "gout_cmu_bo_bus", "mout_cmu_bo_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BO_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS0_BUS, "gout_cmu_bus0_bus", "mout_cmu_bus0_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS1_BUS, "gout_cmu_bus1_bus", "mout_cmu_bus1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS2_BUS, "gout_cmu_bus2_bus", "mout_cmu_bus2_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS2_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK0, "gout_cmu_cis_clk0", "mout_cmu_cis_clk0",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK1, "gout_cmu_cis_clk1", "mout_cmu_cis_clk1",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK2, "gout_cmu_cis_clk2", "mout_cmu_cis_clk2",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK3, "gout_cmu_cis_clk3", "mout_cmu_cis_clk3",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK4, "gout_cmu_cis_clk4", "mout_cmu_cis_clk4",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK5, "gout_cmu_cis_clk5", "mout_cmu_cis_clk5",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK6, "gout_cmu_cis_clk6", "mout_cmu_cis_clk6",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK6, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK7, "gout_cmu_cis_clk7", "mout_cmu_cis_clk7",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK7, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CMU_BOOST, "gout_cmu_cmu_boost", "mout_cmu_cmu_boost",
+ CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CORE_BUS, "gout_cmu_core_bus", "mout_cmu_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_DBG, "gout_cmu_cpucl0_dbg",
+ "mout_cmu_cpucl0_dbg", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_SWITCH, "gout_cmu_cpucl0_switch",
+ "mout_cmu_cpucl0_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL1_SWITCH, "gout_cmu_cpucl1_switch",
+ "mout_cmu_cpucl1_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_SWITCH, "gout_cmu_cpucl2_switch",
+ "mout_cmu_cpucl2_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CSIS_BUS, "gout_cmu_csis_bus", "mout_cmu_csis_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DISP_BUS, "gout_cmu_disp_bus", "mout_cmu_disp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DISP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNS_BUS, "gout_cmu_dns_bus", "mout_cmu_dns_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU_BUS, "gout_cmu_dpu_bus", "mout_cmu_dpu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_EH_BUS, "gout_cmu_eh_bus", "mout_cmu_eh_bus",
+ CLK_CON_GAT_GATE_CLKCMU_EH_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_G2D, "gout_cmu_g2d_g2d", "mout_cmu_g2d_g2d",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_MSCL, "gout_cmu_g2d_mscl", "mout_cmu_g2d_mscl",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3AA_G3AA, "gout_cmu_g3aa_g3aa", "mout_cmu_g3aa_g3aa",
+ CLK_CON_MUX_MUX_CLKCMU_G3AA_G3AA, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_BUSD, "gout_cmu_g3d_busd", "mout_cmu_g3d_busd",
+ CLK_CON_GAT_GATE_CLKCMU_G3D_BUSD, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_GLB, "gout_cmu_g3d_glb", "mout_cmu_g3d_glb",
+ CLK_CON_GAT_GATE_CLKCMU_G3D_GLB, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_SWITCH, "gout_cmu_g3d_switch",
+ "mout_cmu_g3d_switch", CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_GDC_GDC0, "gout_cmu_gdc_gdc0", "mout_cmu_gdc_gdc0",
+ CLK_CON_GAT_GATE_CLKCMU_GDC_GDC0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_GDC_GDC1, "gout_cmu_gdc_gdc1", "mout_cmu_gdc_gdc1",
+ CLK_CON_GAT_GATE_CLKCMU_GDC_GDC1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_GDC_SCSC, "gout_cmu_gdc_scsc", "mout_cmu_gdc_scsc",
+ CLK_CON_GAT_GATE_CLKCMU_GDC_SCSC, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HPM, "gout_cmu_hpm", "mout_cmu_hpm",
+ CLK_CON_GAT_GATE_CLKCMU_HPM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_BUS, "gout_cmu_hsi0_bus", "mout_cmu_hsi0_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_DPGTC, "gout_cmu_hsi0_dpgtc",
+ "mout_cmu_hsi0_dpgtc", CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USB31DRD, "gout_cmu_hsi0_usb31drd",
+ "mout_cmu_hsi0_usb31drd", CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USBDPDBG, "gout_cmu_hsi0_usbdpdbg",
+ "mout_cmu_hsi0_usbdpdbg", CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDPDBG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_BUS, "gout_cmu_hsi1_bus", "mout_cmu_hsi1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_PCIE, "gout_cmu_hsi1_pcie", "mout_cmu_hsi1_pcie",
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_BUS, "gout_cmu_hsi2_bus", "mout_cmu_hsi2_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_MMC_CARD, "gout_cmu_hsi2_mmc_card",
+ "mout_cmu_hsi2_mmc_card", CLK_CON_GAT_GATE_CLKCMU_HSI2_MMCCARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_PCIE, "gout_cmu_hsi2_pcie", "mout_cmu_hsi2_pcie",
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_UFS_EMBD, "gout_cmu_hsi2_ufs_embd",
+ "mout_cmu_hsi2_ufs_embd", CLK_CON_GAT_GATE_CLKCMU_HSI2_UFS_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IPP_BUS, "gout_cmu_ipp_bus", "mout_cmu_ipp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ITP_BUS, "gout_cmu_itp_bus", "mout_cmu_itp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_ITSC, "gout_cmu_mcsc_itsc", "mout_cmu_mcsc_itsc",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_ITSC, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_MCSC, "gout_cmu_mcsc_mcsc", "mout_cmu_mcsc_mcsc",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_MCSC, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC_MFC, "gout_cmu_mfc_mfc", "mout_cmu_mfc_mfc",
+ CLK_CON_GAT_GATE_CLKCMU_MFC_MFC, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_BUSP, "gout_cmu_mif_busp", "mout_cmu_mif_busp",
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MISC_BUS, "gout_cmu_misc_bus", "mout_cmu_misc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_MISC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MISC_SSS, "gout_cmu_misc_sss", "mout_cmu_misc_sss",
+ CLK_CON_GAT_GATE_CLKCMU_MISC_SSS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PDP_BUS, "gout_cmu_pdp_bus", "mout_cmu_pdp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_PDP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PDP_VRA, "gout_cmu_pdp_vra", "mout_cmu_pdp_vra",
+ CLK_CON_GAT_GATE_CLKCMU_PDP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_BUS, "gout_cmu_peric0_bus",
+ "mout_cmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_IP, "gout_cmu_peric0_ip", "mout_cmu_peric0_ip",
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_BUS, "gout_cmu_peric1_bus",
+ "mout_cmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_IP, "gout_cmu_peric1_ip", "mout_cmu_peric1_ip",
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TNR_BUS, "gout_cmu_tnr_bus", "mout_cmu_tnr_bus",
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TOP_CMUREF, "gout_cmu_top_cmuref",
+ "mout_cmu_top_cmuref", CLK_CON_GAT_GATE_CLKCMU_TOP_CMUREF,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TPU_BUS, "gout_cmu_tpu_bus", "mout_cmu_tpu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_TPU_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TPU_TPU, "gout_cmu_tpu_tpu", "mout_cmu_tpu_tpu",
+ CLK_CON_GAT_GATE_CLKCMU_TPU_TPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TPU_TPUCTL, "gout_cmu_tpu_tpuctl",
+ "mout_cmu_tpu_tpuctl", CLK_CON_GAT_GATE_CLKCMU_TPU_TPUCTL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TPU_UART, "gout_cmu_tpu_uart", "mout_cmu_tpu_uart",
+ CLK_CON_GAT_GATE_CLKCMU_TPU_UART, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = cmu_top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_top_pll_clks),
+ .mux_clks = cmu_top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_top_mux_clks),
+ .div_clks = cmu_top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_top_div_clks),
+ .fixed_factor_clks = cmu_top_ffactor,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_top_ffactor),
+ .gate_clks = cmu_top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_top_gate_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = cmu_top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_top_clk_regs),
+};
+
+static void __init gs101_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(gs101_cmu_top, "google,gs101-cmu-top",
+ gs101_cmu_top_init);
+
+/* ---- CMU_APM ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_APM (0x17400000) */
+#define APM_CMU_APM_CONTROLLER_OPTION 0x0800
+#define CLKOUT_CON_BLK_APM_CMU_APM_CLKOUT0 0x0810
+#define CLK_CON_MUX_MUX_CLKCMU_APM_FUNC 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_APM_FUNCSRC 0x1004
+#define CLK_CON_DIV_DIV_CLK_APM_BOOST 0x1800
+#define CLK_CON_DIV_DIV_CLK_APM_USI0_UART 0x1804
+#define CLK_CON_DIV_DIV_CLK_APM_USI0_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_APM_USI1_UART 0x180c
+#define CLK_CON_GAT_CLK_BLK_APM_UID_APM_CMU_APM_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BUS0_BOOST_OPTION1 0x2004
+#define CLK_CON_GAT_CLK_CMU_BOOST_OPTION1 0x2008
+#define CLK_CON_GAT_CLK_CORE_BOOST_OPTION1 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_APM_FUNC 0x2010
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_GPIO_ALIVE_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_GPIO_FAR_ALIVE_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_PMU_ALIVE_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_RTC_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_TRTC_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_UART_IPCLKPORT_IPCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_UART_IPCLKPORT_PCLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_USI_IPCLKPORT_IPCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_USI_IPCLKPORT_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI1_UART_IPCLKPORT_IPCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI1_UART_IPCLKPORT_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_D_TZPC_APM_IPCLKPORT_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_GPC_APM_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_GREBEINTEGRATION_IPCLKPORT_HCLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_INTMEM_IPCLKPORT_ACLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_INTMEM_IPCLKPORT_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_LHM_AXI_G_SWD_IPCLKPORT_I_CLK 0x2054
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_LHM_AXI_P_AOCAPM_IPCLKPORT_I_CLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_LHM_AXI_P_APM_IPCLKPORT_I_CLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_D_APM_IPCLKPORT_I_CLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_G_DBGCORE_IPCLKPORT_I_CLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_G_SCAN2DRAM_IPCLKPORT_I_CLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_AOC_IPCLKPORT_PCLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_AP_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_GSA_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_SWD_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_TPU_IPCLKPORT_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_AP_AOC_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_AP_DBGCORE_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_PMU_INTR_GEN_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_ROM_CRC32_HOST_IPCLKPORT_ACLK 0x2090
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_ROM_CRC32_HOST_IPCLKPORT_PCLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_BUS_IPCLKPORT_CLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI0_UART_IPCLKPORT_CLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI0_USI_IPCLKPORT_CLK 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI1_UART_IPCLKPORT_CLK 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SPEEDY_APM_IPCLKPORT_PCLK 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SPEEDY_SUB_APM_IPCLKPORT_PCLK 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_D_APM_IPCLKPORT_ACLK 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_D_APM_IPCLKPORT_PCLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_G_DBGCORE_IPCLKPORT_ACLK 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_G_DBGCORE_IPCLKPORT_PCLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SS_DBGCORE_IPCLKPORT_SS_DBGCORE_IPCLKPORT_HCLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SYSMMU_D_APM_IPCLKPORT_CLK_S2 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_SYSREG_APM_IPCLKPORT_PCLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_APM_IPCLKPORT_ACLK 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_APM_IPCLKPORT_PCLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_DBGCORE_IPCLKPORT_ACLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_DBGCORE_IPCLKPORT_PCLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_ACLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_PCLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_AOCAPM_IPCLKPORT_ACLK 0x20e8
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_AOCAPM_IPCLKPORT_PCLK 0x20ec
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_ACLK 0x20f0
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_PCLK 0x20f4
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_WDT_APM_IPCLKPORT_PCLK 0x20f8
+#define CLK_CON_GAT_GOUT_BLK_APM_UID_XIU_DP_APM_IPCLKPORT_ACLK 0x20fc
+#define PCH_CON_LHM_AXI_G_SWD_PCH 0x3000
+#define PCH_CON_LHM_AXI_P_AOCAPM_PCH 0x3004
+#define PCH_CON_LHM_AXI_P_APM_PCH 0x3008
+#define PCH_CON_LHS_AXI_D_APM_PCH 0x300c
+#define PCH_CON_LHS_AXI_G_DBGCORE_PCH 0x3010
+#define PCH_CON_LHS_AXI_G_SCAN2DRAM_PCH 0x3014
+#define QCH_CON_APBIF_GPIO_ALIVE_QCH 0x3018
+#define QCH_CON_APBIF_GPIO_FAR_ALIVE_QCH 0x301c
+#define QCH_CON_APBIF_PMU_ALIVE_QCH 0x3020
+#define QCH_CON_APBIF_RTC_QCH 0x3024
+#define QCH_CON_APBIF_TRTC_QCH 0x3028
+#define QCH_CON_APM_CMU_APM_QCH 0x302c
+#define QCH_CON_APM_USI0_UART_QCH 0x3030
+#define QCH_CON_APM_USI0_USI_QCH 0x3034
+#define QCH_CON_APM_USI1_UART_QCH 0x3038
+#define QCH_CON_D_TZPC_APM_QCH 0x303c
+#define QCH_CON_GPC_APM_QCH 0x3040
+#define QCH_CON_GREBEINTEGRATION_QCH_DBG 0x3044
+#define QCH_CON_GREBEINTEGRATION_QCH_GREBE 0x3048
+#define QCH_CON_INTMEM_QCH 0x304c
+#define QCH_CON_LHM_AXI_G_SWD_QCH 0x3050
+#define QCH_CON_LHM_AXI_P_AOCAPM_QCH 0x3054
+#define QCH_CON_LHM_AXI_P_APM_QCH 0x3058
+#define QCH_CON_LHS_AXI_D_APM_QCH 0x305c
+#define QCH_CON_LHS_AXI_G_DBGCORE_QCH 0x3060
+#define QCH_CON_LHS_AXI_G_SCAN2DRAM_QCH 0x3064
+#define QCH_CON_MAILBOX_APM_AOC_QCH 0x3068
+#define QCH_CON_MAILBOX_APM_AP_QCH 0x306c
+#define QCH_CON_MAILBOX_APM_GSA_QCH 0x3070
+#define QCH_CON_MAILBOX_APM_SWD_QCH 0x3078
+#define QCH_CON_MAILBOX_APM_TPU_QCH 0x307c
+#define QCH_CON_MAILBOX_AP_AOC_QCH 0x3080
+#define QCH_CON_MAILBOX_AP_DBGCORE_QCH 0x3084
+#define QCH_CON_PMU_INTR_GEN_QCH 0x3088
+#define QCH_CON_ROM_CRC32_HOST_QCH 0x308c
+#define QCH_CON_RSTNSYNC_CLK_APM_BUS_QCH_GREBE 0x3090
+#define QCH_CON_RSTNSYNC_CLK_APM_BUS_QCH_GREBE_DBG 0x3094
+#define QCH_CON_SPEEDY_APM_QCH 0x3098
+#define QCH_CON_SPEEDY_SUB_APM_QCH 0x309c
+#define QCH_CON_SSMT_D_APM_QCH 0x30a0
+#define QCH_CON_SSMT_G_DBGCORE_QCH 0x30a4
+#define QCH_CON_SS_DBGCORE_QCH_DBG 0x30a8
+#define QCH_CON_SS_DBGCORE_QCH_GREBE 0x30ac
+#define QCH_CON_SYSMMU_D_APM_QCH 0x30b0
+#define QCH_CON_SYSREG_APM_QCH 0x30b8
+#define QCH_CON_UASC_APM_QCH 0x30bc
+#define QCH_CON_UASC_DBGCORE_QCH 0x30c0
+#define QCH_CON_UASC_G_SWD_QCH 0x30c4
+#define QCH_CON_UASC_P_AOCAPM_QCH 0x30c8
+#define QCH_CON_UASC_P_APM_QCH 0x30cc
+#define QCH_CON_WDT_APM_QCH 0x30d0
+#define QUEUE_CTRL_REG_BLK_APM_CMU_APM 0x3c00
+
+static const unsigned long apm_clk_regs[] __initconst = {
+ APM_CMU_APM_CONTROLLER_OPTION,
+ CLKOUT_CON_BLK_APM_CMU_APM_CLKOUT0,
+ CLK_CON_MUX_MUX_CLKCMU_APM_FUNC,
+ CLK_CON_MUX_MUX_CLKCMU_APM_FUNCSRC,
+ CLK_CON_DIV_DIV_CLK_APM_BOOST,
+ CLK_CON_DIV_DIV_CLK_APM_USI0_UART,
+ CLK_CON_DIV_DIV_CLK_APM_USI0_USI,
+ CLK_CON_DIV_DIV_CLK_APM_USI1_UART,
+ CLK_CON_GAT_CLK_BLK_APM_UID_APM_CMU_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BUS0_BOOST_OPTION1,
+ CLK_CON_GAT_CLK_CMU_BOOST_OPTION1,
+ CLK_CON_GAT_CLK_CORE_BOOST_OPTION1,
+ CLK_CON_GAT_GATE_CLKCMU_APM_FUNC,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_GPIO_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_GPIO_FAR_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_PMU_ALIVE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_RTC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_TRTC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_UART_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_UART_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_USI_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_USI_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI1_UART_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI1_UART_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_D_TZPC_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_GPC_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_GREBEINTEGRATION_IPCLKPORT_HCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_INTMEM_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_INTMEM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHM_AXI_G_SWD_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHM_AXI_P_AOCAPM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHM_AXI_P_APM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_D_APM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_G_DBGCORE_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_G_SCAN2DRAM_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_AOC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_AP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_GSA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_SWD_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_TPU_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_AP_AOC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_AP_DBGCORE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_PMU_INTR_GEN_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_ROM_CRC32_HOST_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_ROM_CRC32_HOST_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI0_UART_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI0_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI1_UART_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SPEEDY_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SPEEDY_SUB_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_D_APM_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_D_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_G_DBGCORE_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_G_DBGCORE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SS_DBGCORE_IPCLKPORT_SS_DBGCORE_IPCLKPORT_HCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SYSMMU_D_APM_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SYSREG_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_APM_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_DBGCORE_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_DBGCORE_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_AOCAPM_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_AOCAPM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_WDT_APM_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_APM_UID_XIU_DP_APM_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_apm_func_p) = { "oscclk", "mout_apm_funcsrc",
+ "pad_clk_apm", "oscclk" };
+PNAME(mout_apm_funcsrc_p) = { "pll_alv_div2_apm", "pll_alv_div4_apm",
+ "pll_alv_div16_apm" };
+
+static const struct samsung_fixed_rate_clock apm_fixed_clks[] __initconst = {
+ FRATE(CLK_APM_PLL_DIV2_APM, "pll_alv_div2_apm", NULL, 0, 393216000),
+ FRATE(CLK_APM_PLL_DIV4_APM, "pll_alv_div4_apm", NULL, 0, 196608000),
+ FRATE(CLK_APM_PLL_DIV16_APM, "pll_alv_div16_apm", NULL, 0, 49152000),
+};
+
+static const struct samsung_mux_clock apm_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_APM_FUNC, "mout_apm_func", mout_apm_func_p,
+ CLK_CON_MUX_MUX_CLKCMU_APM_FUNC, 4, 1),
+ MUX(CLK_MOUT_APM_FUNCSRC, "mout_apm_funcsrc", mout_apm_funcsrc_p,
+ CLK_CON_MUX_MUX_CLKCMU_APM_FUNCSRC, 3, 1),
+};
+
+static const struct samsung_div_clock apm_div_clks[] __initconst = {
+ DIV(CLK_DOUT_APM_BOOST, "dout_apm_boost", "gout_apm_func",
+ CLK_CON_DIV_DIV_CLK_APM_BOOST, 0, 1),
+ DIV(CLK_DOUT_APM_USI0_UART, "dout_apm_usi0_uart", "gout_apm_func",
+ CLK_CON_DIV_DIV_CLK_APM_USI0_UART, 0, 7),
+ DIV(CLK_DOUT_APM_USI0_USI, "dout_apm_usi0_usi", "gout_apm_func",
+ CLK_CON_DIV_DIV_CLK_APM_USI0_USI, 0, 7),
+ DIV(CLK_DOUT_APM_USI1_UART, "dout_apm_usi1_uart", "gout_apm_func",
+ CLK_CON_DIV_DIV_CLK_APM_USI1_UART, 0, 7),
+};
+
+static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_APM_APM_CMU_APM_PCLK,
+ "gout_apm_apm_cmu_apm_pclk", "mout_apm_func",
+ CLK_CON_GAT_CLK_BLK_APM_UID_APM_CMU_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_BUS0_BOOST_OPTION1, "gout_bus0_boost_option1",
+ "dout_apm_boost", CLK_CON_GAT_CLK_BUS0_BOOST_OPTION1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BOOST_OPTION1, "gout_cmu_boost_option1",
+ "dout_apm_boost", CLK_CON_GAT_CLK_CMU_BOOST_OPTION1, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_BOOST_OPTION1, "gout_core_boost_option1",
+ "dout_apm_boost", CLK_CON_GAT_CLK_CORE_BOOST_OPTION1, 21, 0, 0),
+ GATE(CLK_GOUT_APM_FUNC, "gout_apm_func", "mout_apm_func",
+ CLK_CON_GAT_GATE_CLKCMU_APM_FUNC, 21, 0, 0),
+ GATE(CLK_GOUT_APM_APBIF_GPIO_ALIVE_PCLK,
+ "gout_apm_apbif_gpio_alive_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_GPIO_ALIVE_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_APBIF_GPIO_FAR_ALIVE_PCLK,
+ "gout_apm_apbif_gpio_far_alive_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_GPIO_FAR_ALIVE_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_APBIF_PMU_ALIVE_PCLK,
+ "gout_apm_apbif_pmu_alive_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_PMU_ALIVE_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_APBIF_RTC_PCLK,
+ "gout_apm_apbif_rtc_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_RTC_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_APBIF_TRTC_PCLK,
+ "gout_apm_apbif_trtc_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APBIF_TRTC_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_APM_USI0_UART_IPCLK,
+ "gout_apm_apm_usi0_uart_ipclk", "dout_apm_usi0_uart",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_UART_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_APM_USI0_UART_PCLK,
+ "gout_apm_apm_usi0_uart_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_UART_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_APM_USI0_USI_IPCLK,
+ "gout_apm_apm_usi0_usi_ipclk", "dout_apm_usi0_usi",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_USI_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_APM_USI0_USI_PCLK,
+ "gout_apm_apm_usi0_usi_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI0_USI_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_APM_USI1_UART_IPCLK,
+ "gout_apm_apm_usi1_uart_ipclk", "dout_apm_usi1_uart",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI1_UART_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_APM_USI1_UART_PCLK,
+ "gout_apm_apm_usi1_uart_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_APM_USI1_UART_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_D_TZPC_APM_PCLK,
+ "gout_apm_d_tzpc_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_D_TZPC_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_GPC_APM_PCLK,
+ "gout_apm_gpc_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_GPC_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_GREBEINTEGRATION_HCLK,
+ "gout_apm_grebeintegration_hclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_GREBEINTEGRATION_IPCLKPORT_HCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_INTMEM_ACLK,
+ "gout_apm_intmem_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_INTMEM_IPCLKPORT_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_INTMEM_PCLK,
+ "gout_apm_intmem_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_INTMEM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_LHM_AXI_G_SWD_I_CLK,
+ "gout_apm_lhm_axi_g_swd_i_clk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHM_AXI_G_SWD_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_LHM_AXI_P_AOCAPM_I_CLK,
+ "gout_apm_lhm_axi_p_aocapm_i_clk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHM_AXI_P_AOCAPM_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_LHM_AXI_P_APM_I_CLK,
+ "gout_apm_lhm_axi_p_apm_i_clk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_D_APM_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_LHS_AXI_D_APM_I_CLK,
+ "gout_apm_lhs_axi_d_apm_i_clk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_D_APM_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_LHS_AXI_G_DBGCORE_I_CLK,
+ "gout_apm_lhs_axi_g_dbgcore_i_clk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_G_DBGCORE_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_LHS_AXI_G_SCAN2DRAM_I_CLK,
+ "gout_apm_lhs_axi_g_scan2dram_i_clk",
+ "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_LHS_AXI_G_SCAN2DRAM_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_MAILBOX_APM_AOC_PCLK,
+ "gout_apm_mailbox_apm_aoc_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_AOC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_MAILBOX_APM_AP_PCLK,
+ "gout_apm_mailbox_apm_ap_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_AP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_MAILBOX_APM_GSA_PCLK,
+ "gout_apm_mailbox_apm_gsa_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_GSA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_MAILBOX_APM_SWD_PCLK,
+ "gout_apm_mailbox_apm_swd_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_SWD_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_MAILBOX_APM_TPU_PCLK,
+ "gout_apm_mailbox_apm_tpu_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_APM_TPU_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_MAILBOX_AP_AOC_PCLK,
+ "gout_apm_mailbox_ap_aoc_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_AP_AOC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_MAILBOX_AP_DBGCORE_PCLK,
+ "gout_apm_mailbox_ap_dbgcore_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_MAILBOX_AP_DBGCORE_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_PMU_INTR_GEN_PCLK,
+ "gout_apm_pmu_intr_gen_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_PMU_INTR_GEN_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_ROM_CRC32_HOST_ACLK,
+ "gout_apm_rom_crc32_host_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_ROM_CRC32_HOST_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_ROM_CRC32_HOST_PCLK,
+ "gout_apm_rom_crc32_host_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_ROM_CRC32_HOST_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_CLK_APM_BUS_CLK,
+ "gout_apm_clk_apm_bus_clk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_CLK_APM_USI0_UART_CLK,
+ "gout_apm_clk_apm_usi0_uart_clk",
+ "dout_apm_usi0_uart",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI0_UART_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_CLK_APM_USI0_USI_CLK,
+ "gout_apm_clk_apm_usi0_usi_clk",
+ "dout_apm_usi0_usi",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI0_UART_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_CLK_APM_USI1_UART_CLK,
+ "gout_apm_clk_apm_usi1_uart_clk",
+ "dout_apm_usi1_uart",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_RSTNSYNC_CLK_APM_USI1_UART_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_SPEEDY_APM_PCLK,
+ "gout_apm_speedy_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SPEEDY_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_SPEEDY_SUB_APM_PCLK,
+ "gout_apm_speedy_sub_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SPEEDY_SUB_APM_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_SSMT_D_APM_ACLK,
+ "gout_apm_ssmt_d_apm_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_D_APM_IPCLKPORT_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_SSMT_D_APM_PCLK,
+ "gout_apm_ssmt_d_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_D_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_SSMT_G_DBGCORE_ACLK,
+ "gout_apm_ssmt_g_dbgcore_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_G_DBGCORE_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_SSMT_G_DBGCORE_PCLK,
+ "gout_apm_ssmt_g_dbgcore_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SSMT_G_DBGCORE_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_SS_DBGCORE_SS_DBGCORE_HCLK,
+ "gout_apm_ss_dbgcore_ss_dbgcore_hclk",
+ "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SS_DBGCORE_IPCLKPORT_SS_DBGCORE_IPCLKPORT_HCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_SYSMMU_D_APM_CLK_S2,
+ "gout_apm_sysmmu_d_dpm_clk_s2", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SYSMMU_D_APM_IPCLKPORT_CLK_S2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_SYSREG_APM_PCLK,
+ "gout_apm_sysreg_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_SYSREG_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_APM_ACLK,
+ "gout_apm_uasc_apm_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_APM_IPCLKPORT_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_APM_PCLK,
+ "gout_apm_uasc_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_DBGCORE_ACLK,
+ "gout_apm_uasc_dbgcore_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_DBGCORE_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_DBGCORE_PCLK,
+ "gout_apm_uasc_dbgcore_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_DBGCORE_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_G_SWD_ACLK,
+ "gout_apm_uasc_g_swd_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_G_SWD_PCLK,
+ "gout_apm_uasc_g_swd_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_P_AOCAPM_ACLK,
+ "gout_apm_uasc_p_aocapm_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_AOCAPM_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_P_AOCAPM_PCLK,
+ "gout_apm_uasc_p_aocapm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_G_SWD_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_P_APM_ACLK,
+ "gout_apm_uasc_p_apm_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_UASC_P_APM_PCLK,
+ "gout_apm_uasc_p_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_UASC_P_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_WDT_APM_PCLK,
+ "gout_apm_wdt_apm_pclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_WDT_APM_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_APM_XIU_DP_APM_ACLK,
+ "gout_apm_xiu_dp_apm_aclk", "gout_apm_func",
+ CLK_CON_GAT_GOUT_BLK_APM_UID_XIU_DP_APM_IPCLKPORT_ACLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info apm_cmu_info __initconst = {
+ .mux_clks = apm_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(apm_mux_clks),
+ .div_clks = apm_div_clks,
+ .nr_div_clks = ARRAY_SIZE(apm_div_clks),
+ .gate_clks = apm_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(apm_gate_clks),
+ .fixed_clks = apm_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(apm_fixed_clks),
+ .nr_clk_ids = CLKS_NR_APM,
+ .clk_regs = apm_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(apm_clk_regs),
+};
+
+/* ---- CMU_MISC ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_MISC (0x10010000) */
+#define PLL_CON0_MUX_CLKCMU_MISC_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_MISC_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_MISC_SSS_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_MISC_SSS_USER 0x0614
+#define MISC_CMU_MISC_CONTROLLER_OPTION 0x0800
+#define CLKOUT_CON_BLK_MISC_CMU_MISC_CLKOUT0 0x0810
+#define CLK_CON_MUX_MUX_CLK_MISC_GIC 0x1000
+#define CLK_CON_DIV_DIV_CLK_MISC_BUSP 0x1800
+#define CLK_CON_DIV_DIV_CLK_MISC_GIC 0x1804
+#define CLK_CON_GAT_CLK_BLK_MISC_UID_MISC_CMU_MISC_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_BISR_IPCLKPORT_I_OSCCLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_MISC_UID_RSTNSYNC_CLK_MISC_OSCCLK_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM 0x2014
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_AD_APB_DIT_IPCLKPORT_PCLKM 0x2018
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_AD_APB_PUF_IPCLKPORT_PCLKM 0x201c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_DIT_IPCLKPORT_ICLKL2A 0x2020
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_D_TZPC_MISC_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_GIC_IPCLKPORT_GICCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_GPC_MISC_IPCLKPORT_PCLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AST_ICC_CPUGIC_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_D_SSS_IPCLKPORT_I_CLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_P_GIC_IPCLKPORT_I_CLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_P_MISC_IPCLKPORT_I_CLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_ACEL_D_MISC_IPCLKPORT_I_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_AST_IRI_GICCPU_IPCLKPORT_I_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_AXI_D_SSS_IPCLKPORT_I_CLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_MCT_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_BIRA_IPCLKPORT_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_BISR_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_TOP_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_PDMA_IPCLKPORT_ACLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_PPMU_DMA_IPCLKPORT_ACLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_PPMU_MISC_IPCLKPORT_ACLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_PPMU_MISC_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_PUF_IPCLKPORT_I_CLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_DIT_IPCLKPORT_ACLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_DIT_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PDMA_IPCLKPORT_ACLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PDMA_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PPMU_DMA_IPCLKPORT_ACLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PPMU_DMA_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_RTIC_IPCLKPORT_ACLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_RTIC_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SPDMA_IPCLKPORT_ACLK 0x2090
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SPDMA_IPCLKPORT_PCLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SSS_IPCLKPORT_ACLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SSS_IPCLKPORT_PCLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_BUSD_IPCLKPORT_CLK 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_BUSP_IPCLKPORT_CLK 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_GIC_IPCLKPORT_CLK 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_SSS_IPCLKPORT_CLK 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_RTIC_IPCLKPORT_I_ACLK 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_RTIC_IPCLKPORT_I_PCLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SPDMA_IPCLKPORT_ACLK 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_DIT_IPCLKPORT_ACLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_DIT_IPCLKPORT_PCLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PDMA_IPCLKPORT_ACLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PDMA_IPCLKPORT_PCLK 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PPMU_DMA_IPCLKPORT_ACLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PPMU_DMA_IPCLKPORT_PCLK 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_RTIC_IPCLKPORT_ACLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_RTIC_IPCLKPORT_PCLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SPDMA_IPCLKPORT_ACLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SPDMA_IPCLKPORT_PCLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SSS_IPCLKPORT_ACLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SSS_IPCLKPORT_PCLK 0x20e8
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSS_IPCLKPORT_I_ACLK 0x20ec
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SSS_IPCLKPORT_I_PCLK 0x20f0
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSMMU_MISC_IPCLKPORT_CLK_S2 0x20f4
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSMMU_SSS_IPCLKPORT_CLK_S1 0x20f8
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSREG_MISC_IPCLKPORT_PCLK 0x20fc
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_TMU_SUB_IPCLKPORT_PCLK 0x2100
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_TMU_TOP_IPCLKPORT_PCLK 0x2104
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_WDT_CLUSTER0_IPCLKPORT_PCLK 0x2108
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_WDT_CLUSTER1_IPCLKPORT_PCLK 0x210c
+#define CLK_CON_GAT_GOUT_BLK_MISC_UID_XIU_D_MISC_IPCLKPORT_ACLK 0x2110
+#define DMYQCH_CON_PPMU_DMA_QCH 0x3000
+#define DMYQCH_CON_PUF_QCH 0x3004
+#define PCH_CON_LHM_AXI_D_SSS_PCH 0x300c
+#define PCH_CON_LHM_AXI_P_GIC_PCH 0x3010
+#define PCH_CON_LHM_AXI_P_MISC_PCH 0x3014
+#define PCH_CON_LHS_ACEL_D_MISC_PCH 0x3018
+#define PCH_CON_LHS_AST_IRI_GICCPU_PCH 0x301c
+#define PCH_CON_LHS_AXI_D_SSS_PCH 0x3020
+#define QCH_CON_ADM_AHB_SSS_QCH 0x3024
+#define QCH_CON_DIT_QCH 0x3028
+#define QCH_CON_GIC_QCH 0x3030
+#define QCH_CON_LHM_AST_ICC_CPUGIC_QCH 0x3038
+#define QCH_CON_LHM_AXI_D_SSS_QCH 0x303c
+#define QCH_CON_LHM_AXI_P_GIC_QCH 0x3040
+#define QCH_CON_LHM_AXI_P_MISC_QCH 0x3044
+#define QCH_CON_LHS_ACEL_D_MISC_QCH 0x3048
+#define QCH_CON_LHS_AST_IRI_GICCPU_QCH 0x304c
+#define QCH_CON_LHS_AXI_D_SSS_QCH 0x3050
+#define QCH_CON_MCT_QCH 0x3054
+#define QCH_CON_MISC_CMU_MISC_QCH 0x3058
+#define QCH_CON_OTP_CON_BIRA_QCH 0x305c
+#define QCH_CON_OTP_CON_BISR_QCH 0x3060
+#define QCH_CON_OTP_CON_TOP_QCH 0x3064
+#define QCH_CON_PDMA_QCH 0x3068
+#define QCH_CON_PPMU_MISC_QCH 0x306c
+#define QCH_CON_QE_DIT_QCH 0x3070
+#define QCH_CON_QE_PDMA_QCH 0x3074
+#define QCH_CON_QE_PPMU_DMA_QCH 0x3078
+#define QCH_CON_QE_RTIC_QCH 0x307c
+#define QCH_CON_QE_SPDMA_QCH 0x3080
+#define QCH_CON_QE_SSS_QCH 0x3084
+#define QCH_CON_RTIC_QCH 0x3088
+#define QCH_CON_SPDMA_QCH 0x308c
+#define QCH_CON_SSMT_DIT_QCH 0x3090
+#define QCH_CON_SSMT_PDMA_QCH 0x3094
+#define QCH_CON_SSMT_PPMU_DMA_QCH 0x3098
+#define QCH_CON_SSMT_RTIC_QCH 0x309c
+#define QCH_CON_SSMT_SPDMA_QCH 0x30a0
+#define QCH_CON_SSMT_SSS_QCH 0x30a4
+#define QCH_CON_SSS_QCH 0x30a8
+#define QCH_CON_SYSMMU_MISC_QCH 0x30ac
+#define QCH_CON_SYSMMU_SSS_QCH 0x30b0
+#define QCH_CON_SYSREG_MISC_QCH 0x30b4
+#define QCH_CON_TMU_SUB_QCH 0x30b8
+#define QCH_CON_TMU_TOP_QCH 0x30bc
+#define QCH_CON_WDT_CLUSTER0_QCH 0x30c0
+#define QCH_CON_WDT_CLUSTER1_QCH 0x30c4
+#define QUEUE_CTRL_REG_BLK_MISC_CMU_MISC 0x3c00
+
+static const unsigned long misc_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_MISC_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_MISC_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_MISC_SSS_USER,
+ PLL_CON1_MUX_CLKCMU_MISC_SSS_USER,
+ MISC_CMU_MISC_CONTROLLER_OPTION,
+ CLKOUT_CON_BLK_MISC_CMU_MISC_CLKOUT0,
+ CLK_CON_MUX_MUX_CLK_MISC_GIC,
+ CLK_CON_DIV_DIV_CLK_MISC_BUSP,
+ CLK_CON_DIV_DIV_CLK_MISC_GIC,
+ CLK_CON_GAT_CLK_BLK_MISC_UID_MISC_CMU_MISC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_BISR_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ CLK_CON_GAT_CLK_BLK_MISC_UID_RSTNSYNC_CLK_MISC_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_AD_APB_DIT_IPCLKPORT_PCLKM,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_AD_APB_PUF_IPCLKPORT_PCLKM,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_DIT_IPCLKPORT_ICLKL2A,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_D_TZPC_MISC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_GIC_IPCLKPORT_GICCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_GPC_MISC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AST_ICC_CPUGIC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_D_SSS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_P_GIC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_P_MISC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_ACEL_D_MISC_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_AST_IRI_GICCPU_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_AXI_D_SSS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_MCT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_BISR_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PDMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PPMU_DMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PPMU_MISC_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PPMU_MISC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PUF_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_DIT_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_DIT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PDMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PDMA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PPMU_DMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PPMU_DMA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_RTIC_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_RTIC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SPDMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SPDMA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SSS_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_BUSD_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_SSS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RTIC_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RTIC_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SPDMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_DIT_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_DIT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PDMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PDMA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PPMU_DMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PPMU_DMA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_RTIC_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_RTIC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SPDMA_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SPDMA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SSS_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SSS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSS_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSS_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSMMU_MISC_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSMMU_SSS_IPCLKPORT_CLK_S1,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSREG_MISC_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_TMU_SUB_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_TMU_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_WDT_CLUSTER1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_XIU_D_MISC_IPCLKPORT_ACLK,
+ DMYQCH_CON_PPMU_DMA_QCH,
+ DMYQCH_CON_PUF_QCH,
+ PCH_CON_LHM_AXI_D_SSS_PCH,
+ PCH_CON_LHM_AXI_P_GIC_PCH,
+ PCH_CON_LHM_AXI_P_MISC_PCH,
+ PCH_CON_LHS_ACEL_D_MISC_PCH,
+ PCH_CON_LHS_AST_IRI_GICCPU_PCH,
+ PCH_CON_LHS_AXI_D_SSS_PCH,
+ QCH_CON_ADM_AHB_SSS_QCH,
+ QCH_CON_DIT_QCH,
+ QCH_CON_GIC_QCH,
+ QCH_CON_LHM_AST_ICC_CPUGIC_QCH,
+ QCH_CON_LHM_AXI_D_SSS_QCH,
+ QCH_CON_LHM_AXI_P_GIC_QCH,
+ QCH_CON_LHM_AXI_P_MISC_QCH,
+ QCH_CON_LHS_ACEL_D_MISC_QCH,
+ QCH_CON_LHS_AST_IRI_GICCPU_QCH,
+ QCH_CON_LHS_AXI_D_SSS_QCH,
+ QCH_CON_MCT_QCH,
+ QCH_CON_MISC_CMU_MISC_QCH,
+ QCH_CON_OTP_CON_BIRA_QCH,
+ QCH_CON_OTP_CON_BISR_QCH,
+ QCH_CON_OTP_CON_TOP_QCH,
+ QCH_CON_PDMA_QCH,
+ QCH_CON_PPMU_MISC_QCH,
+ QCH_CON_QE_DIT_QCH,
+ QCH_CON_QE_PDMA_QCH,
+ QCH_CON_QE_PPMU_DMA_QCH,
+ QCH_CON_QE_RTIC_QCH,
+ QCH_CON_QE_SPDMA_QCH,
+ QCH_CON_QE_SSS_QCH,
+ QCH_CON_RTIC_QCH,
+ QCH_CON_SPDMA_QCH,
+ QCH_CON_SSMT_DIT_QCH,
+ QCH_CON_SSMT_PDMA_QCH,
+ QCH_CON_SSMT_PPMU_DMA_QCH,
+ QCH_CON_SSMT_RTIC_QCH,
+ QCH_CON_SSMT_SPDMA_QCH,
+ QCH_CON_SSMT_SSS_QCH,
+ QCH_CON_SSS_QCH,
+ QCH_CON_SYSMMU_MISC_QCH,
+ QCH_CON_SYSMMU_SSS_QCH,
+ QCH_CON_SYSREG_MISC_QCH,
+ QCH_CON_TMU_SUB_QCH,
+ QCH_CON_TMU_TOP_QCH,
+ QCH_CON_WDT_CLUSTER0_QCH,
+ QCH_CON_WDT_CLUSTER1_QCH,
+ QUEUE_CTRL_REG_BLK_MISC_CMU_MISC,
+};
+
+ /* List of parent clocks for Muxes in CMU_MISC */
+PNAME(mout_misc_bus_user_p) = { "oscclk", "dout_cmu_misc_bus" };
+PNAME(mout_misc_sss_user_p) = { "oscclk", "dout_cmu_misc_sss" };
+PNAME(mout_misc_gic_p) = { "dout_misc_gic", "oscclk" };
+
+static const struct samsung_mux_clock misc_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MISC_BUS_USER, "mout_misc_bus_user", mout_misc_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_MISC_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_MISC_SSS_USER, "mout_misc_sss_user", mout_misc_sss_user_p,
+ PLL_CON0_MUX_CLKCMU_MISC_SSS_USER, 4, 1),
+ MUX(CLK_MOUT_MISC_GIC, "mout_misc_gic", mout_misc_gic_p,
+ CLK_CON_MUX_MUX_CLK_MISC_GIC, 0, 0),
+};
+
+static const struct samsung_div_clock misc_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MISC_BUSP, "dout_misc_busp", "mout_misc_bus_user",
+ CLK_CON_DIV_DIV_CLK_MISC_BUSP, 0, 3),
+ DIV(CLK_DOUT_MISC_GIC, "dout_misc_gic", "mout_misc_bus_user",
+ CLK_CON_DIV_DIV_CLK_MISC_GIC, 0, 3),
+};
+
+static const struct samsung_gate_clock misc_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_MISC_MISC_CMU_MISC_PCLK,
+ "gout_misc_misc_cmu_misc_pclk", "dout_misc_busp",
+ CLK_CON_GAT_CLK_BLK_MISC_UID_MISC_CMU_MISC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_OTP_CON_BIRA_I_OSCCLK,
+ "gout_misc_otp_con_bira_i_oscclk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_BIRA_IPCLKPORT_I_OSCCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_OTP_CON_BISR_I_OSCCLK,
+ "gout_misc_otp_con_bisr_i_oscclk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_BISR_IPCLKPORT_I_OSCCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_OTP_CON_TOP_I_OSCCLK,
+ "gout_misc_otp_con_top_i_oscclk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_MISC_UID_OTP_CON_TOP_IPCLKPORT_I_OSCCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_CLK_MISC_OSCCLK_CLK,
+ "gout_misc_clk_misc_oscclk_clk", "oscclk",
+ CLK_CON_GAT_CLK_BLK_MISC_UID_RSTNSYNC_CLK_MISC_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_ADM_AHB_SSS_HCLKM,
+ "gout_misc_adm_ahb_sss_hclkm", "mout_misc_sss_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_AD_APB_DIT_PCLKM,
+ "gout_misc_ad_apb_dit_pclkm", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_AD_APB_DIT_IPCLKPORT_PCLKM,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_D_TZPC_MISC_PCLK,
+ "gout_misc_d_tzpc_misc_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_D_TZPC_MISC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_GIC_GICCLK,
+ "gout_misc_gic_gicclk", "mout_misc_gic",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_GIC_IPCLKPORT_GICCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_GPC_MISC_PCLK,
+ "gout_misc_gpc_misc_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_GPC_MISC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_LHM_AST_ICC_CPUGIC_I_CLK,
+ "gout_misc_lhm_ast_icc_gpugic_i_clk", "mout_misc_gic",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AST_ICC_CPUGIC_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_LHM_AXI_D_SSS_I_CLK,
+ "gout_misc_lhm_axi_d_sss_i_clk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_D_SSS_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_LHM_AXI_P_GIC_I_CLK,
+ "gout_misc_lhm_axi_p_gic_i_clk", "mout_misc_gic",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_P_GIC_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_LHM_AXI_P_MISC_I_CLK,
+ "gout_misc_lhm_axi_p_misc_i_clk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHM_AXI_P_MISC_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_LHS_ACEL_D_MISC_I_CLK,
+ "gout_misc_lhs_acel_d_misc_i_clk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_ACEL_D_MISC_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_LHS_AST_IRI_GICCPU_I_CLK,
+ "gout_misc_lhs_ast_iri_giccpu_i_clk", "mout_misc_gic",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_AST_IRI_GICCPU_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_LHS_AXI_D_SSS_I_CLK,
+ "gout_misc_lhs_axi_d_sss_i_clk", "mout_misc_sss_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_LHS_AXI_D_SSS_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_MCT_PCLK, "gout_misc_mct_pclk",
+ "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_MCT_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_OTP_CON_BIRA_PCLK,
+ "gout_misc_otp_con_bira_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_OTP_CON_BISR_PCLK,
+ "gout_misc_otp_con_bisr_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_BISR_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_OTP_CON_TOP_PCLK,
+ "gout_misc_otp_con_top_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_PDMA_ACLK, "gout_misc_pdma_aclk",
+ "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PDMA_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_PPMU_MISC_ACLK,
+ "gout_misc_ppmu_misc_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PPMU_MISC_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_PPMU_MISC_PCLK,
+ "gout_misc_ppmu_misc_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PPMU_MISC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_PUF_I_CLK,
+ "gout_misc_puf_i_clk", "mout_misc_sss_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_PUF_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_DIT_ACLK,
+ "gout_misc_qe_dit_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_DIT_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_DIT_PCLK,
+ "gout_misc_qe_dit_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_DIT_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_PDMA_ACLK,
+ "gout_misc_qe_pdma_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PDMA_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_PDMA_PCLK,
+ "gout_misc_qe_pdma_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PDMA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_PPMU_DMA_ACLK,
+ "gout_misc_qe_ppmu_dma_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PPMU_DMA_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_PPMU_DMA_PCLK,
+ "gout_misc_qe_ppmu_dma_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_PPMU_DMA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_RTIC_ACLK,
+ "gout_misc_qe_rtic_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_RTIC_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_RTIC_PCLK,
+ "gout_misc_qe_rtic_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_RTIC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_SPDMA_ACLK,
+ "gout_misc_qe_spdma_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SPDMA_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_SPDMA_PCLK,
+ "gout_misc_qe_spdma_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SPDMA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_SSS_ACLK,
+ "gout_misc_qe_sss_aclk", "mout_misc_sss_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SSS_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_QE_SSS_PCLK,
+ "gout_misc_qe_sss_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_QE_SSS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_CLK_MISC_BUSD_CLK,
+ "gout_misc_clk_misc_busd_clk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_BUSD_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_CLK_MISC_BUSP_CLK,
+ "gout_misc_clk_misc_busp_clk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_CLK_MISC_GIC_CLK,
+ "gout_misc_clk_misc_gic_clk", "mout_misc_gic",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_GIC_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_CLK_MISC_SSS_CLK,
+ "gout_misc_clk_misc_sss_clk", "mout_misc_sss_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RSTNSYNC_CLK_MISC_SSS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_RTIC_I_ACLK,
+ "gout_misc_rtic_i_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RTIC_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_RTIC_I_PCLK, "gout_misc_rtic_i_pclk",
+ "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_RTIC_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SPDMA_ACLK,
+ "gout_misc_spdma_ipclockport_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SPDMA_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_DIT_ACLK,
+ "gout_misc_ssmt_dit_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_DIT_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_DIT_PCLK,
+ "gout_misc_ssmt_dit_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_DIT_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_PDMA_ACLK,
+ "gout_misc_ssmt_pdma_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PDMA_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_PDMA_PCLK,
+ "gout_misc_ssmt_pdma_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PDMA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_PPMU_DMA_ACLK,
+ "gout_misc_ssmt_ppmu_dma_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PPMU_DMA_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_PPMU_DMA_PCLK,
+ "gout_misc_ssmt_ppmu_dma_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_PPMU_DMA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_RTIC_ACLK,
+ "gout_misc_ssmt_rtic_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_RTIC_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_RTIC_PCLK,
+ "gout_misc_ssmt_rtic_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_RTIC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_SPDMA_ACLK,
+ "gout_misc_ssmt_spdma_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SPDMA_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_SPDMA_PCLK,
+ "gout_misc_ssmt_spdma_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SPDMA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_SSS_ACLK,
+ "gout_misc_ssmt_sss_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SSS_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSMT_SSS_PCLK,
+ "gout_misc_ssmt_sss_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSMT_SSS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSS_I_ACLK,
+ "gout_misc_sss_i_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSS_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SSS_I_PCLK,
+ "gout_misc_sss_i_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SSS_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SYSMMU_MISC_CLK_S2,
+ "gout_misc_sysmmu_misc_clk_s2", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSMMU_MISC_IPCLKPORT_CLK_S2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SYSMMU_SSS_CLK_S1,
+ "gout_misc_sysmmu_sss_clk_s1", "mout_misc_sss_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSMMU_SSS_IPCLKPORT_CLK_S1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_SYSREG_MISC_PCLK,
+ "gout_misc_sysreg_misc_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_SYSREG_MISC_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_TMU_SUB_PCLK,
+ "gout_misc_tmu_sub_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_TMU_SUB_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_TMU_TOP_PCLK,
+ "gout_misc_tmu_top_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_TMU_TOP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_WDT_CLUSTER0_PCLK,
+ "gout_misc_wdt_cluster0_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_WDT_CLUSTER1_PCLK,
+ "gout_misc_wdt_cluster1_pclk", "dout_misc_busp",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_WDT_CLUSTER1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MISC_XIU_D_MISC_ACLK,
+ "gout_misc_xiu_d_misc_aclk", "mout_misc_bus_user",
+ CLK_CON_GAT_GOUT_BLK_MISC_UID_XIU_D_MISC_IPCLKPORT_ACLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info misc_cmu_info __initconst = {
+ .mux_clks = misc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(misc_mux_clks),
+ .div_clks = misc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(misc_div_clks),
+ .gate_clks = misc_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(misc_gate_clks),
+ .nr_clk_ids = CLKS_NR_MISC,
+ .clk_regs = misc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(misc_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- platform_driver ----------------------------------------------------- */
+
+static int __init gs101_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id gs101_cmu_of_match[] = {
+ {
+ .compatible = "google,gs101-cmu-apm",
+ .data = &apm_cmu_info,
+ }, {
+ .compatible = "google,gs101-cmu-misc",
+ .data = &misc_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver gs101_cmu_driver __refdata = {
+ .driver = {
+ .name = "gs101-cmu",
+ .of_match_table = gs101_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = gs101_cmu_probe,
+};
+
+static int __init gs101_cmu_init(void)
+{
+ return platform_driver_register(&gs101_cmu_driver);
+}
+core_initcall(gs101_cmu_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 74934c618..4bbdf5e91 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -443,6 +443,9 @@ static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
fvco *= mdiv;
+ if (pll->type == pll_0516x)
+ fvco *= 2;
+
do_div(fvco, (pdiv << sdiv));
return (unsigned long)fvco;
@@ -1316,6 +1319,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_1417x:
case pll_0818x:
case pll_0822x:
+ case pll_0516x:
+ case pll_0517x:
+ case pll_0518x:
pll->enable_offs = PLL0822X_ENABLE_SHIFT;
pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
if (!pll->rate_table)
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 0725d485c..ffd3d52c0 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -38,6 +38,9 @@ enum samsung_pll_type {
pll_0822x,
pll_0831x,
pll_142xx,
+ pll_0516x,
+ pll_0517x,
+ pll_0518x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index ab9c3d7a2..516b71640 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -14,11 +14,11 @@
#include "clk-pll.h"
/**
- * struct samsung_clk_provider: information about clock provider
- * @reg_base: virtual address for the register base.
- * @dev: clock provider device needed for runtime PM.
- * @lock: maintains exclusion between callbacks for a given clock-provider.
- * @clk_data: holds clock related data like clk_hw* and number of clocks.
+ * struct samsung_clk_provider - information about clock provider
+ * @reg_base: virtual address for the register base
+ * @dev: clock provider device needed for runtime PM
+ * @lock: maintains exclusion between callbacks for a given clock-provider
+ * @clk_data: holds clock related data like clk_hw* and number of clocks
*/
struct samsung_clk_provider {
void __iomem *reg_base;
@@ -29,10 +29,10 @@ struct samsung_clk_provider {
};
/**
- * struct samsung_clock_alias: information about mux clock
- * @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
- * @alias: optional clock alias name to be assigned to this clock.
+ * struct samsung_clock_alias - information about mux clock
+ * @id: platform specific id of the clock
+ * @dev_name: name of the device to which this clock belongs
+ * @alias: optional clock alias name to be assigned to this clock
*/
struct samsung_clock_alias {
unsigned int id;
@@ -50,12 +50,12 @@ struct samsung_clock_alias {
#define MHZ (1000 * 1000)
/**
- * struct samsung_fixed_rate_clock: information about fixed-rate clock
- * @id: platform specific id of the clock.
- * @name: name of this fixed-rate clock.
- * @parent_name: optional parent clock name.
- * @flags: optional fixed-rate clock flags.
- * @fixed-rate: fixed clock rate of this clock.
+ * struct samsung_fixed_rate_clock - information about fixed-rate clock
+ * @id: platform specific id of the clock
+ * @name: name of this fixed-rate clock
+ * @parent_name: optional parent clock name
+ * @flags: optional fixed-rate clock flags
+ * @fixed_rate: fixed clock rate of this clock
*/
struct samsung_fixed_rate_clock {
unsigned int id;
@@ -74,14 +74,14 @@ struct samsung_fixed_rate_clock {
.fixed_rate = frate, \
}
-/*
- * struct samsung_fixed_factor_clock: information about fixed-factor clock
- * @id: platform specific id of the clock.
- * @name: name of this fixed-factor clock.
- * @parent_name: parent clock name.
- * @mult: fixed multiplication factor.
- * @div: fixed division factor.
- * @flags: optional fixed-factor clock flags.
+/**
+ * struct samsung_fixed_factor_clock - information about fixed-factor clock
+ * @id: platform specific id of the clock
+ * @name: name of this fixed-factor clock
+ * @parent_name: parent clock name
+ * @mult: fixed multiplication factor
+ * @div: fixed division factor
+ * @flags: optional fixed-factor clock flags
*/
struct samsung_fixed_factor_clock {
unsigned int id;
@@ -103,16 +103,16 @@ struct samsung_fixed_factor_clock {
}
/**
- * struct samsung_mux_clock: information about mux clock
- * @id: platform specific id of the clock.
- * @name: name of this mux clock.
- * @parent_names: array of pointer to parent clock names.
- * @num_parents: number of parents listed in @parent_names.
- * @flags: optional flags for basic clock.
- * @offset: offset of the register for configuring the mux.
- * @shift: starting bit location of the mux control bit-field in @reg.
- * @width: width of the mux control bit-field in @reg.
- * @mux_flags: flags for mux-type clock.
+ * struct samsung_mux_clock - information about mux clock
+ * @id: platform specific id of the clock
+ * @name: name of this mux clock
+ * @parent_names: array of pointer to parent clock names
+ * @num_parents: number of parents listed in @parent_names
+ * @flags: optional flags for basic clock
+ * @offset: offset of the register for configuring the mux
+ * @shift: starting bit location of the mux control bit-field in @reg
+ * @width: width of the mux control bit-field in @reg
+ * @mux_flags: flags for mux-type clock
*/
struct samsung_mux_clock {
unsigned int id;
@@ -146,14 +146,16 @@ struct samsung_mux_clock {
__MUX(_id, cname, pnames, o, s, w, f, mf)
/**
- * @id: platform specific id of the clock.
- * struct samsung_div_clock: information about div clock
- * @name: name of this div clock.
- * @parent_name: name of the parent clock.
- * @flags: optional flags for basic clock.
- * @offset: offset of the register for configuring the div.
- * @shift: starting bit location of the div control bit-field in @reg.
- * @div_flags: flags for div-type clock.
+ * struct samsung_div_clock - information about div clock
+ * @id: platform specific id of the clock
+ * @name: name of this div clock
+ * @parent_name: name of the parent clock
+ * @flags: optional flags for basic clock
+ * @offset: offset of the register for configuring the div
+ * @shift: starting bit location of the div control bit-field in @reg
+ * @width: width of the bitfield
+ * @div_flags: flags for div-type clock
+ * @table: array of divider/value pairs ending with a div set to 0
*/
struct samsung_div_clock {
unsigned int id;
@@ -190,14 +192,14 @@ struct samsung_div_clock {
__DIV(_id, cname, pname, o, s, w, 0, 0, t)
/**
- * struct samsung_gate_clock: information about gate clock
- * @id: platform specific id of the clock.
- * @name: name of this gate clock.
- * @parent_name: name of the parent clock.
- * @flags: optional flags for basic clock.
- * @offset: offset of the register for configuring the gate.
- * @bit_idx: bit index of the gate control bit-field in @reg.
- * @gate_flags: flags for gate-type clock.
+ * struct samsung_gate_clock - information about gate clock
+ * @id: platform specific id of the clock
+ * @name: name of this gate clock
+ * @parent_name: name of the parent clock
+ * @flags: optional flags for basic clock
+ * @offset: offset of the register for configuring the gate
+ * @bit_idx: bit index of the gate control bit-field in @reg
+ * @gate_flags: flags for gate-type clock
*/
struct samsung_gate_clock {
unsigned int id;
@@ -226,9 +228,9 @@ struct samsung_gate_clock {
#define PNAME(x) static const char * const x[] __initconst
/**
- * struct samsung_clk_reg_dump: register dump of clock controller registers.
- * @offset: clock register offset from the controller base address.
- * @value: the value to be register at offset.
+ * struct samsung_clk_reg_dump - register dump of clock controller registers
+ * @offset: clock register offset from the controller base address
+ * @value: the value to be register at offset
*/
struct samsung_clk_reg_dump {
u32 offset;
@@ -236,14 +238,15 @@ struct samsung_clk_reg_dump {
};
/**
- * struct samsung_pll_clock: information about pll clock
- * @id: platform specific id of the clock.
- * @name: name of this pll clock.
- * @parent_name: name of the parent clock.
- * @flags: optional flags for basic clock.
- * @con_offset: offset of the register for configuring the PLL.
- * @lock_offset: offset of the register for locking the PLL.
- * @type: Type of PLL to be registered.
+ * struct samsung_pll_clock - information about pll clock
+ * @id: platform specific id of the clock
+ * @name: name of this pll clock
+ * @parent_name: name of the parent clock
+ * @flags: optional flags for basic clock
+ * @con_offset: offset of the register for configuring the PLL
+ * @lock_offset: offset of the register for locking the PLL
+ * @type: type of PLL to be registered
+ * @rate_table: array of PLL settings for possible PLL rates
*/
struct samsung_pll_clock {
unsigned int id;
@@ -302,39 +305,51 @@ struct samsung_clock_reg_cache {
unsigned int rsuspend_num;
};
+/**
+ * struct samsung_cmu_info - all clocks information needed for CMU registration
+ * @pll_clks: list of PLL clocks
+ * @nr_pll_clks: count of clocks in @pll_clks
+ * @mux_clks: list of mux clocks
+ * @nr_mux_clks: count of clocks in @mux_clks
+ * @div_clks: list of div clocks
+ * @nr_div_clks: count of clocks in @div_clks
+ * @gate_clks: list of gate clocks
+ * @nr_gate_clks: count of clocks in @gate_clks
+ * @fixed_clks: list of fixed clocks
+ * @nr_fixed_clks: count clocks in @fixed_clks
+ * @fixed_factor_clks: list of fixed factor clocks
+ * @nr_fixed_factor_clks: count of clocks in @fixed_factor_clks
+ * @nr_clk_ids: total number of clocks with IDs assigned
+ * @cpu_clks: list of CPU clocks
+ * @nr_cpu_clks: count of clocks in @cpu_clks
+ * @clk_regs: list of clock registers
+ * @nr_clk_regs: count of clock registers in @clk_regs
+ * @suspend_regs: list of clock registers to set before suspend
+ * @nr_suspend_regs: count of clock registers in @suspend_regs
+ * @clk_name: name of the parent clock needed for CMU register access
+ */
struct samsung_cmu_info {
- /* list of pll clocks and respective count */
const struct samsung_pll_clock *pll_clks;
unsigned int nr_pll_clks;
- /* list of mux clocks and respective count */
const struct samsung_mux_clock *mux_clks;
unsigned int nr_mux_clks;
- /* list of div clocks and respective count */
const struct samsung_div_clock *div_clks;
unsigned int nr_div_clks;
- /* list of gate clocks and respective count */
const struct samsung_gate_clock *gate_clks;
unsigned int nr_gate_clks;
- /* list of fixed clocks and respective count */
const struct samsung_fixed_rate_clock *fixed_clks;
unsigned int nr_fixed_clks;
- /* list of fixed factor clocks and respective count */
const struct samsung_fixed_factor_clock *fixed_factor_clks;
unsigned int nr_fixed_factor_clks;
- /* total number of clocks with IDs assigned*/
unsigned int nr_clk_ids;
- /* list of cpu clocks and respective count */
const struct samsung_cpu_clock *cpu_clks;
unsigned int nr_cpu_clks;
- /* list and number of clocks registers */
const unsigned long *clk_regs;
unsigned int nr_clk_regs;
- /* list and number of clocks registers to set before suspend */
const struct samsung_clk_reg_dump *suspend_regs;
unsigned int nr_suspend_regs;
- /* name of the parent clock needed for CMU register access */
const char *clk_name;
};
diff --git a/drivers/clk/starfive/clk-starfive-jh7100-audio.c b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
index ee4bda14a..1fcf4e62f 100644
--- a/drivers/clk/starfive/clk-starfive-jh7100-audio.c
+++ b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
@@ -79,7 +79,7 @@ static const struct jh71x0_clk_data jh7100_audclk_data[] = {
JH71X0_GDIV(JH7100_AUDCLK_USB_LPM, "usb_lpm", CLK_IGNORE_UNUSED, 4, JH7100_AUDCLK_USB_APB),
JH71X0_GDIV(JH7100_AUDCLK_USB_STB, "usb_stb", CLK_IGNORE_UNUSED, 3, JH7100_AUDCLK_USB_APB),
JH71X0__DIV(JH7100_AUDCLK_APB_EN, "apb_en", 8, JH7100_AUDCLK_DOM7AHB_BUS),
- JH71X0__MUX(JH7100_AUDCLK_VAD_MEM, "vad_mem", 2,
+ JH71X0__MUX(JH7100_AUDCLK_VAD_MEM, "vad_mem", 0, 2,
JH7100_AUDCLK_VAD_INTMEM,
JH7100_AUDCLK_AUDIO_12288),
};
diff --git a/drivers/clk/starfive/clk-starfive-jh7100.c b/drivers/clk/starfive/clk-starfive-jh7100.c
index 69cc11ea7..03f6f26a1 100644
--- a/drivers/clk/starfive/clk-starfive-jh7100.c
+++ b/drivers/clk/starfive/clk-starfive-jh7100.c
@@ -24,48 +24,48 @@
#define JH7100_CLK_GMAC_GR_MII_RX (JH7100_CLK_END + 3)
static const struct jh71x0_clk_data jh7100_clk_data[] __initconst = {
- JH71X0__MUX(JH7100_CLK_CPUNDBUS_ROOT, "cpundbus_root", 4,
+ JH71X0__MUX(JH7100_CLK_CPUNDBUS_ROOT, "cpundbus_root", 0, 4,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH71X0__MUX(JH7100_CLK_DLA_ROOT, "dla_root", 3,
+ JH71X0__MUX(JH7100_CLK_DLA_ROOT, "dla_root", 0, 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH71X0__MUX(JH7100_CLK_DSP_ROOT, "dsp_root", 4,
+ JH71X0__MUX(JH7100_CLK_DSP_ROOT, "dsp_root", 0, 4,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH71X0__MUX(JH7100_CLK_GMACUSB_ROOT, "gmacusb_root", 3,
+ JH71X0__MUX(JH7100_CLK_GMACUSB_ROOT, "gmacusb_root", 0, 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL2_OUT),
- JH71X0__MUX(JH7100_CLK_PERH0_ROOT, "perh0_root", 2,
+ JH71X0__MUX(JH7100_CLK_PERH0_ROOT, "perh0_root", 0, 2,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT),
- JH71X0__MUX(JH7100_CLK_PERH1_ROOT, "perh1_root", 2,
+ JH71X0__MUX(JH7100_CLK_PERH1_ROOT, "perh1_root", 0, 2,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL2_OUT),
- JH71X0__MUX(JH7100_CLK_VIN_ROOT, "vin_root", 3,
+ JH71X0__MUX(JH7100_CLK_VIN_ROOT, "vin_root", 0, 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH71X0__MUX(JH7100_CLK_VOUT_ROOT, "vout_root", 3,
+ JH71X0__MUX(JH7100_CLK_VOUT_ROOT, "vout_root", 0, 3,
JH7100_CLK_OSC_AUD,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL2_OUT),
JH71X0_GDIV(JH7100_CLK_AUDIO_ROOT, "audio_root", 0, 8, JH7100_CLK_PLL0_OUT),
- JH71X0__MUX(JH7100_CLK_CDECHIFI4_ROOT, "cdechifi4_root", 3,
+ JH71X0__MUX(JH7100_CLK_CDECHIFI4_ROOT, "cdechifi4_root", 0, 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL1_OUT,
JH7100_CLK_PLL2_OUT),
- JH71X0__MUX(JH7100_CLK_CDEC_ROOT, "cdec_root", 3,
+ JH71X0__MUX(JH7100_CLK_CDEC_ROOT, "cdec_root", 0, 3,
JH7100_CLK_OSC_SYS,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL1_OUT),
- JH71X0__MUX(JH7100_CLK_VOUTBUS_ROOT, "voutbus_root", 3,
+ JH71X0__MUX(JH7100_CLK_VOUTBUS_ROOT, "voutbus_root", 0, 3,
JH7100_CLK_OSC_AUD,
JH7100_CLK_PLL0_OUT,
JH7100_CLK_PLL2_OUT),
@@ -76,7 +76,7 @@ static const struct jh71x0_clk_data jh7100_clk_data[] __initconst = {
JH71X0_GDIV(JH7100_CLK_PLL0_TESTOUT, "pll0_testout", 0, 31, JH7100_CLK_PERH0_SRC),
JH71X0_GDIV(JH7100_CLK_PLL1_TESTOUT, "pll1_testout", 0, 31, JH7100_CLK_DLA_ROOT),
JH71X0_GDIV(JH7100_CLK_PLL2_TESTOUT, "pll2_testout", 0, 31, JH7100_CLK_PERH1_SRC),
- JH71X0__MUX(JH7100_CLK_PLL2_REF, "pll2_refclk", 2,
+ JH71X0__MUX(JH7100_CLK_PLL2_REF, "pll2_refclk", 0, 2,
JH7100_CLK_OSC_SYS,
JH7100_CLK_OSC_AUD),
JH71X0__DIV(JH7100_CLK_CPU_CORE, "cpu_core", 8, JH7100_CLK_CPUNBUS_ROOT_DIV),
@@ -142,7 +142,7 @@ static const struct jh71x0_clk_data jh7100_clk_data[] __initconst = {
JH71X0__DIV(JH7100_CLK_NOC_COG, "noc_cog", 8, JH7100_CLK_DLA_ROOT),
JH71X0_GATE(JH7100_CLK_NNE_AHB, "nne_ahb", 0, JH7100_CLK_AHB_BUS),
JH71X0__DIV(JH7100_CLK_NNEBUS_SRC1, "nnebus_src1", 4, JH7100_CLK_DSP_ROOT),
- JH71X0__MUX(JH7100_CLK_NNE_BUS, "nne_bus", 2,
+ JH71X0__MUX(JH7100_CLK_NNE_BUS, "nne_bus", 0, 2,
JH7100_CLK_CPU_AXI,
JH7100_CLK_NNEBUS_SRC1),
JH71X0_GATE(JH7100_CLK_NNE_AXI, "nne_axi", 0, JH7100_CLK_NNE_BUS),
@@ -166,7 +166,7 @@ static const struct jh71x0_clk_data jh7100_clk_data[] __initconst = {
JH71X0_GDIV(JH7100_CLK_USBPHY_125M, "usbphy_125m", 0, 8, JH7100_CLK_USBPHY_ROOTDIV),
JH71X0_GDIV(JH7100_CLK_USBPHY_PLLDIV25M, "usbphy_plldiv25m", 0, 32,
JH7100_CLK_USBPHY_ROOTDIV),
- JH71X0__MUX(JH7100_CLK_USBPHY_25M, "usbphy_25m", 2,
+ JH71X0__MUX(JH7100_CLK_USBPHY_25M, "usbphy_25m", 0, 2,
JH7100_CLK_OSC_SYS,
JH7100_CLK_USBPHY_PLLDIV25M),
JH71X0_FDIV(JH7100_CLK_AUDIO_DIV, "audio_div", JH7100_CLK_AUDIO_ROOT),
@@ -200,12 +200,12 @@ static const struct jh71x0_clk_data jh7100_clk_data[] __initconst = {
JH71X0_GDIV(JH7100_CLK_GMAC_GTX, "gmac_gtxclk", 0, 255, JH7100_CLK_GMAC_ROOT_DIV),
JH71X0_GDIV(JH7100_CLK_GMAC_RMII_TX, "gmac_rmii_txclk", 0, 8, JH7100_CLK_GMAC_RMII_REF),
JH71X0_GDIV(JH7100_CLK_GMAC_RMII_RX, "gmac_rmii_rxclk", 0, 8, JH7100_CLK_GMAC_RMII_REF),
- JH71X0__MUX(JH7100_CLK_GMAC_TX, "gmac_tx", 3,
+ JH71X0__MUX(JH7100_CLK_GMAC_TX, "gmac_tx", CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, 3,
JH7100_CLK_GMAC_GTX,
JH7100_CLK_GMAC_TX_INV,
JH7100_CLK_GMAC_RMII_TX),
JH71X0__INV(JH7100_CLK_GMAC_TX_INV, "gmac_tx_inv", JH7100_CLK_GMAC_TX),
- JH71X0__MUX(JH7100_CLK_GMAC_RX_PRE, "gmac_rx_pre", 2,
+ JH71X0__MUX(JH7100_CLK_GMAC_RX_PRE, "gmac_rx_pre", 0, 2,
JH7100_CLK_GMAC_GR_MII_RX,
JH7100_CLK_GMAC_RMII_RX),
JH71X0__INV(JH7100_CLK_GMAC_RX_INV, "gmac_rx_inv", JH7100_CLK_GMAC_RX_PRE),
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-aon.c b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
index 62954eb7b..418efdad7 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-aon.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
@@ -26,7 +26,7 @@
static const struct jh71x0_clk_data jh7110_aonclk_data[] = {
/* source */
JH71X0__DIV(JH7110_AONCLK_OSC_DIV4, "osc_div4", 4, JH7110_AONCLK_OSC),
- JH71X0__MUX(JH7110_AONCLK_APB_FUNC, "apb_func", 2,
+ JH71X0__MUX(JH7110_AONCLK_APB_FUNC, "apb_func", 0, 2,
JH7110_AONCLK_OSC_DIV4,
JH7110_AONCLK_OSC),
/* gmac0 */
@@ -39,7 +39,7 @@ static const struct jh71x0_clk_data jh7110_aonclk_data[] = {
JH7110_AONCLK_GMAC0_GTXCLK,
JH7110_AONCLK_GMAC0_RMII_RTX),
JH71X0__INV(JH7110_AONCLK_GMAC0_TX_INV, "gmac0_tx_inv", JH7110_AONCLK_GMAC0_TX),
- JH71X0__MUX(JH7110_AONCLK_GMAC0_RX, "gmac0_rx", 2,
+ JH71X0__MUX(JH7110_AONCLK_GMAC0_RX, "gmac0_rx", 0, 2,
JH7110_AONCLK_GMAC0_RGMII_RXIN,
JH7110_AONCLK_GMAC0_RMII_RTX),
JH71X0__INV(JH7110_AONCLK_GMAC0_RX_INV, "gmac0_rx_inv", JH7110_AONCLK_GMAC0_RX),
@@ -48,7 +48,7 @@ static const struct jh71x0_clk_data jh7110_aonclk_data[] = {
/* rtc */
JH71X0_GATE(JH7110_AONCLK_RTC_APB, "rtc_apb", 0, JH7110_AONCLK_APB_BUS),
JH71X0__DIV(JH7110_AONCLK_RTC_INTERNAL, "rtc_internal", 1022, JH7110_AONCLK_OSC),
- JH71X0__MUX(JH7110_AONCLK_RTC_32K, "rtc_32k", 2,
+ JH71X0__MUX(JH7110_AONCLK_RTC_32K, "rtc_32k", 0, 2,
JH7110_AONCLK_RTC_OSC,
JH7110_AONCLK_RTC_INTERNAL),
JH71X0_GATE(JH7110_AONCLK_RTC_CAL, "rtc_cal", 0, JH7110_AONCLK_OSC),
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
index ce034ed28..929b87882 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
@@ -53,7 +53,7 @@ static const struct jh71x0_clk_data jh7110_ispclk_data[] = {
JH7110_ISPCLK_MIPI_RX0_PXL),
JH71X0_GATE(JH7110_ISPCLK_VIN_PIXEL_IF3, "vin_pixel_if3", 0,
JH7110_ISPCLK_MIPI_RX0_PXL),
- JH71X0__MUX(JH7110_ISPCLK_VIN_P_AXI_WR, "vin_p_axi_wr", 2,
+ JH71X0__MUX(JH7110_ISPCLK_VIN_P_AXI_WR, "vin_p_axi_wr", 0, 2,
JH7110_ISPCLK_MIPI_RX0_PXL,
JH7110_ISPCLK_DVP_INV),
/* ispv2_top_wrapper */
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index 3884eff9f..8f5e5abfa 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -36,18 +36,18 @@
static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
/* root */
- JH71X0__MUX(JH7110_SYSCLK_CPU_ROOT, "cpu_root", 2,
+ JH71X0__MUX(JH7110_SYSCLK_CPU_ROOT, "cpu_root", 0, 2,
JH7110_SYSCLK_OSC,
JH7110_SYSCLK_PLL0_OUT),
JH71X0__DIV(JH7110_SYSCLK_CPU_CORE, "cpu_core", 7, JH7110_SYSCLK_CPU_ROOT),
JH71X0__DIV(JH7110_SYSCLK_CPU_BUS, "cpu_bus", 2, JH7110_SYSCLK_CPU_CORE),
- JH71X0__MUX(JH7110_SYSCLK_GPU_ROOT, "gpu_root", 2,
+ JH71X0__MUX(JH7110_SYSCLK_GPU_ROOT, "gpu_root", 0, 2,
JH7110_SYSCLK_PLL2_OUT,
JH7110_SYSCLK_PLL1_OUT),
JH71X0_MDIV(JH7110_SYSCLK_PERH_ROOT, "perh_root", 2, 2,
JH7110_SYSCLK_PLL0_OUT,
JH7110_SYSCLK_PLL2_OUT),
- JH71X0__MUX(JH7110_SYSCLK_BUS_ROOT, "bus_root", 2,
+ JH71X0__MUX(JH7110_SYSCLK_BUS_ROOT, "bus_root", 0, 2,
JH7110_SYSCLK_OSC,
JH7110_SYSCLK_PLL2_OUT),
JH71X0__DIV(JH7110_SYSCLK_NOCSTG_BUS, "nocstg_bus", 3, JH7110_SYSCLK_BUS_ROOT),
@@ -62,7 +62,7 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH71X0__DIV(JH7110_SYSCLK_PLL2_DIV2, "pll2_div2", 2, JH7110_SYSCLK_PLL2_OUT),
JH71X0__DIV(JH7110_SYSCLK_AUDIO_ROOT, "audio_root", 8, JH7110_SYSCLK_PLL2_OUT),
JH71X0__DIV(JH7110_SYSCLK_MCLK_INNER, "mclk_inner", 64, JH7110_SYSCLK_AUDIO_ROOT),
- JH71X0__MUX(JH7110_SYSCLK_MCLK, "mclk", 2,
+ JH71X0__MUX(JH7110_SYSCLK_MCLK, "mclk", 0, 2,
JH7110_SYSCLK_MCLK_INNER,
JH7110_SYSCLK_MCLK_EXT),
JH71X0_GATE(JH7110_SYSCLK_MCLK_OUT, "mclk_out", 0, JH7110_SYSCLK_MCLK_INNER),
@@ -96,7 +96,7 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH71X0__DIV(JH7110_SYSCLK_OSC_DIV2, "osc_div2", 2, JH7110_SYSCLK_OSC),
JH71X0__DIV(JH7110_SYSCLK_PLL1_DIV4, "pll1_div4", 2, JH7110_SYSCLK_PLL1_DIV2),
JH71X0__DIV(JH7110_SYSCLK_PLL1_DIV8, "pll1_div8", 2, JH7110_SYSCLK_PLL1_DIV4),
- JH71X0__MUX(JH7110_SYSCLK_DDR_BUS, "ddr_bus", 4,
+ JH71X0__MUX(JH7110_SYSCLK_DDR_BUS, "ddr_bus", 0, 4,
JH7110_SYSCLK_OSC_DIV2,
JH7110_SYSCLK_PLL1_DIV2,
JH7110_SYSCLK_PLL1_DIV4,
@@ -186,7 +186,7 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH71X0__DIV(JH7110_SYSCLK_GMAC1_RMII_RTX, "gmac1_rmii_rtx", 30,
JH7110_SYSCLK_GMAC1_RMII_REFIN),
JH71X0_GDIV(JH7110_SYSCLK_GMAC1_PTP, "gmac1_ptp", 0, 31, JH7110_SYSCLK_GMAC_SRC),
- JH71X0__MUX(JH7110_SYSCLK_GMAC1_RX, "gmac1_rx", 2,
+ JH71X0__MUX(JH7110_SYSCLK_GMAC1_RX, "gmac1_rx", 0, 2,
JH7110_SYSCLK_GMAC1_RGMII_RXIN,
JH7110_SYSCLK_GMAC1_RMII_RTX),
JH71X0__INV(JH7110_SYSCLK_GMAC1_RX_INV, "gmac1_rx_inv", JH7110_SYSCLK_GMAC1_RX),
@@ -270,11 +270,11 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH71X0_MDIV(JH7110_SYSCLK_I2STX0_LRCK_MST, "i2stx0_lrck_mst", 64, 2,
JH7110_SYSCLK_I2STX0_BCLK_MST_INV,
JH7110_SYSCLK_I2STX0_BCLK_MST),
- JH71X0__MUX(JH7110_SYSCLK_I2STX0_BCLK, "i2stx0_bclk", 2,
+ JH71X0__MUX(JH7110_SYSCLK_I2STX0_BCLK, "i2stx0_bclk", 0, 2,
JH7110_SYSCLK_I2STX0_BCLK_MST,
JH7110_SYSCLK_I2STX_BCLK_EXT),
JH71X0__INV(JH7110_SYSCLK_I2STX0_BCLK_INV, "i2stx0_bclk_inv", JH7110_SYSCLK_I2STX0_BCLK),
- JH71X0__MUX(JH7110_SYSCLK_I2STX0_LRCK, "i2stx0_lrck", 2,
+ JH71X0__MUX(JH7110_SYSCLK_I2STX0_LRCK, "i2stx0_lrck", 0, 2,
JH7110_SYSCLK_I2STX0_LRCK_MST,
JH7110_SYSCLK_I2STX_LRCK_EXT),
/* i2stx1 */
@@ -285,11 +285,11 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH71X0_MDIV(JH7110_SYSCLK_I2STX1_LRCK_MST, "i2stx1_lrck_mst", 64, 2,
JH7110_SYSCLK_I2STX1_BCLK_MST_INV,
JH7110_SYSCLK_I2STX1_BCLK_MST),
- JH71X0__MUX(JH7110_SYSCLK_I2STX1_BCLK, "i2stx1_bclk", 2,
+ JH71X0__MUX(JH7110_SYSCLK_I2STX1_BCLK, "i2stx1_bclk", 0, 2,
JH7110_SYSCLK_I2STX1_BCLK_MST,
JH7110_SYSCLK_I2STX_BCLK_EXT),
JH71X0__INV(JH7110_SYSCLK_I2STX1_BCLK_INV, "i2stx1_bclk_inv", JH7110_SYSCLK_I2STX1_BCLK),
- JH71X0__MUX(JH7110_SYSCLK_I2STX1_LRCK, "i2stx1_lrck", 2,
+ JH71X0__MUX(JH7110_SYSCLK_I2STX1_LRCK, "i2stx1_lrck", 0, 2,
JH7110_SYSCLK_I2STX1_LRCK_MST,
JH7110_SYSCLK_I2STX_LRCK_EXT),
/* i2srx */
@@ -300,11 +300,11 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH71X0_MDIV(JH7110_SYSCLK_I2SRX_LRCK_MST, "i2srx_lrck_mst", 64, 2,
JH7110_SYSCLK_I2SRX_BCLK_MST_INV,
JH7110_SYSCLK_I2SRX_BCLK_MST),
- JH71X0__MUX(JH7110_SYSCLK_I2SRX_BCLK, "i2srx_bclk", 2,
+ JH71X0__MUX(JH7110_SYSCLK_I2SRX_BCLK, "i2srx_bclk", 0, 2,
JH7110_SYSCLK_I2SRX_BCLK_MST,
JH7110_SYSCLK_I2SRX_BCLK_EXT),
JH71X0__INV(JH7110_SYSCLK_I2SRX_BCLK_INV, "i2srx_bclk_inv", JH7110_SYSCLK_I2SRX_BCLK),
- JH71X0__MUX(JH7110_SYSCLK_I2SRX_LRCK, "i2srx_lrck", 2,
+ JH71X0__MUX(JH7110_SYSCLK_I2SRX_LRCK, "i2srx_lrck", 0, 2,
JH7110_SYSCLK_I2SRX_LRCK_MST,
JH7110_SYSCLK_I2SRX_LRCK_EXT),
/* pdm */
@@ -314,7 +314,7 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH71X0_GATE(JH7110_SYSCLK_TDM_AHB, "tdm_ahb", 0, JH7110_SYSCLK_AHB0),
JH71X0_GATE(JH7110_SYSCLK_TDM_APB, "tdm_apb", 0, JH7110_SYSCLK_APB0),
JH71X0_GDIV(JH7110_SYSCLK_TDM_INTERNAL, "tdm_internal", 0, 64, JH7110_SYSCLK_MCLK),
- JH71X0__MUX(JH7110_SYSCLK_TDM_TDM, "tdm_tdm", 2,
+ JH71X0__MUX(JH7110_SYSCLK_TDM_TDM, "tdm_tdm", 0, 2,
JH7110_SYSCLK_TDM_INTERNAL,
JH7110_SYSCLK_TDM_EXT),
JH71X0__INV(JH7110_SYSCLK_TDM_TDM_INV, "tdm_tdm_inv", JH7110_SYSCLK_TDM_TDM),
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
index 34bb11c72..23e052fc1 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.h
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
@@ -61,10 +61,10 @@ struct jh71x0_clk_data {
.parents = { [0] = _parent }, \
}
-#define JH71X0__MUX(_idx, _name, _nparents, ...) \
+#define JH71X0__MUX(_idx, _name, _flags, _nparents, ...) \
[_idx] = { \
.name = _name, \
- .flags = 0, \
+ .flags = _flags, \
.max = ((_nparents) - 1) << JH71X0_CLK_MUX_SHIFT, \
.parents = { __VA_ARGS__ }, \
}
diff --git a/drivers/clk/stm32/Kconfig b/drivers/clk/stm32/Kconfig
new file mode 100644
index 000000000..3c8493a94
--- /dev/null
+++ b/drivers/clk/stm32/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# common clock support for STMicroelectronics SoC family.
+
+menuconfig COMMON_CLK_STM32MP
+ bool "Clock support for common STM32MP clocks"
+ depends on ARCH_STM32 || COMPILE_TEST
+ default y
+ select RESET_CONTROLLER
+ help
+ Support for STM32MP SoC family clocks.
+
+if COMMON_CLK_STM32MP
+
+config COMMON_CLK_STM32MP135
+ bool "Clock driver for stm32mp13x clocks"
+ depends on ARM || COMPILE_TEST
+ default y
+ help
+ Support for stm32mp13x SoC family clocks.
+
+config COMMON_CLK_STM32MP157
+ bool "Clock driver for stm32mp15x clocks"
+ depends on ARM || COMPILE_TEST
+ default y
+ help
+ Support for stm32mp15x SoC family clocks.
+
+endif
+
diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile
index 95bd2230b..5ced7fe3d 100644
--- a/drivers/clk/stm32/Makefile
+++ b/drivers/clk/stm32/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o
+obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o reset-stm32.o
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
index 067b918a8..58705fcad 100644
--- a/drivers/clk/stm32/clk-stm32-core.c
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -70,6 +70,7 @@ static int stm32_rcc_clock_init(struct device *dev,
int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
void __iomem *base)
{
+ const struct stm32_rcc_match_data *rcc_match_data;
const struct of_device_id *match;
int err;
@@ -79,8 +80,10 @@ int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
return -ENODEV;
}
+ rcc_match_data = match->data;
+
/* RCC Reset Configuration */
- err = stm32_rcc_reset_init(dev, match, base);
+ err = stm32_rcc_reset_init(dev, rcc_match_data->reset_data, base);
if (err) {
pr_err("stm32 reset failed to initialize\n");
return err;
diff --git a/drivers/clk/stm32/clk-stm32-core.h b/drivers/clk/stm32/clk-stm32-core.h
index 76cffda02..bb5aa19a7 100644
--- a/drivers/clk/stm32/clk-stm32-core.h
+++ b/drivers/clk/stm32/clk-stm32-core.h
@@ -70,15 +70,12 @@ struct stm32_rcc_match_data {
const struct clock_config *tab_clocks;
unsigned int maxbinding;
struct clk_stm32_clock_data *clock_data;
- u32 clear_offset;
+ struct clk_stm32_reset_data *reset_data;
int (*check_security)(void __iomem *base,
const struct clock_config *cfg);
int (*multi_mux)(void __iomem *base, const struct clock_config *cfg);
};
-int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match,
- void __iomem *base);
-
int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data,
void __iomem *base);
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c
index 939779f66..7e2337297 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/stm32/clk-stm32mp1.c
@@ -20,6 +20,10 @@
#include <dt-bindings/clock/stm32mp1-clks.h>
+#include "reset-stm32.h"
+
+#define STM32MP1_RESET_ID_MASK GENMASK(15, 0)
+
static DEFINE_SPINLOCK(rlock);
#define RCC_OCENSETR 0x0C
@@ -2137,22 +2141,27 @@ struct stm32_rcc_match_data {
const struct clock_config *cfg;
unsigned int num;
unsigned int maxbinding;
- u32 clear_offset;
+ struct clk_stm32_reset_data *reset_data;
bool (*check_security)(const struct clock_config *cfg);
};
+static struct clk_stm32_reset_data stm32mp1_reset_data = {
+ .nr_lines = STM32MP1_RESET_ID_MASK,
+ .clear_offset = RCC_CLR,
+};
+
static struct stm32_rcc_match_data stm32mp1_data = {
.cfg = stm32mp1_clock_cfg,
.num = ARRAY_SIZE(stm32mp1_clock_cfg),
.maxbinding = STM32MP1_LAST_CLK,
- .clear_offset = RCC_CLR,
+ .reset_data = &stm32mp1_reset_data,
};
static struct stm32_rcc_match_data stm32mp1_data_secure = {
.cfg = stm32mp1_clock_cfg,
.num = ARRAY_SIZE(stm32mp1_clock_cfg),
.maxbinding = STM32MP1_LAST_CLK,
- .clear_offset = RCC_CLR,
+ .reset_data = &stm32mp1_reset_data,
.check_security = &stm32_check_security
};
@@ -2193,113 +2202,6 @@ static int stm32_register_hw_clk(struct device *dev,
return 0;
}
-#define STM32_RESET_ID_MASK GENMASK(15, 0)
-
-struct stm32_reset_data {
- /* reset lock */
- spinlock_t lock;
- struct reset_controller_dev rcdev;
- void __iomem *membase;
- u32 clear_offset;
-};
-
-static inline struct stm32_reset_data *
-to_stm32_reset_data(struct reset_controller_dev *rcdev)
-{
- return container_of(rcdev, struct stm32_reset_data, rcdev);
-}
-
-static int stm32_reset_update(struct reset_controller_dev *rcdev,
- unsigned long id, bool assert)
-{
- struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
-
- if (data->clear_offset) {
- void __iomem *addr;
-
- addr = data->membase + (bank * reg_width);
- if (!assert)
- addr += data->clear_offset;
-
- writel(BIT(offset), addr);
-
- } else {
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * reg_width));
-
- if (assert)
- reg |= BIT(offset);
- else
- reg &= ~BIT(offset);
-
- writel(reg, data->membase + (bank * reg_width));
-
- spin_unlock_irqrestore(&data->lock, flags);
- }
-
- return 0;
-}
-
-static int stm32_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return stm32_reset_update(rcdev, id, true);
-}
-
-static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return stm32_reset_update(rcdev, id, false);
-}
-
-static int stm32_reset_status(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- u32 reg;
-
- reg = readl(data->membase + (bank * reg_width));
-
- return !!(reg & BIT(offset));
-}
-
-static const struct reset_control_ops stm32_reset_ops = {
- .assert = stm32_reset_assert,
- .deassert = stm32_reset_deassert,
- .status = stm32_reset_status,
-};
-
-static int stm32_rcc_reset_init(struct device *dev, void __iomem *base,
- const struct of_device_id *match)
-{
- const struct stm32_rcc_match_data *data = match->data;
- struct stm32_reset_data *reset_data = NULL;
-
- reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
- if (!reset_data)
- return -ENOMEM;
-
- spin_lock_init(&reset_data->lock);
- reset_data->membase = base;
- reset_data->rcdev.owner = THIS_MODULE;
- reset_data->rcdev.ops = &stm32_reset_ops;
- reset_data->rcdev.of_node = dev_of_node(dev);
- reset_data->rcdev.nr_resets = STM32_RESET_ID_MASK;
- reset_data->clear_offset = data->clear_offset;
-
- return reset_controller_register(&reset_data->rcdev);
-}
-
static int stm32_rcc_clock_init(struct device *dev, void __iomem *base,
const struct of_device_id *match)
{
@@ -2342,6 +2244,7 @@ static int stm32_rcc_clock_init(struct device *dev, void __iomem *base,
static int stm32_rcc_init(struct device *dev, void __iomem *base,
const struct of_device_id *match_data)
{
+ const struct stm32_rcc_match_data *rcc_match_data;
const struct of_device_id *match;
int err;
@@ -2351,8 +2254,10 @@ static int stm32_rcc_init(struct device *dev, void __iomem *base,
return -ENODEV;
}
+ rcc_match_data = match->data;
+
/* RCC Reset Configuration */
- err = stm32_rcc_reset_init(dev, base, match);
+ err = stm32_rcc_reset_init(dev, rcc_match_data->reset_data, base);
if (err) {
pr_err("stm32mp1 reset failed to initialize\n");
return err;
diff --git a/drivers/clk/stm32/clk-stm32mp13.c b/drivers/clk/stm32/clk-stm32mp13.c
index c4a737482..d4ecb3c34 100644
--- a/drivers/clk/stm32/clk-stm32mp13.c
+++ b/drivers/clk/stm32/clk-stm32mp13.c
@@ -10,8 +10,10 @@
#include <linux/platform_device.h>
#include <dt-bindings/clock/stm32mp13-clks.h>
#include "clk-stm32-core.h"
+#include "reset-stm32.h"
#include "stm32mp13_rcc.h"
+#define STM32MP1_RESET_ID_MASK GENMASK(15, 0)
#define RCC_CLR_OFFSET 0x4
/* STM32 Gates definition */
@@ -1511,13 +1513,18 @@ static struct clk_stm32_clock_data stm32mp13_clock_data = {
.is_multi_mux = stm32mp13_is_multi_mux,
};
+static struct clk_stm32_reset_data stm32mp13_reset_data = {
+ .nr_lines = STM32MP1_RESET_ID_MASK,
+ .clear_offset = RCC_CLR_OFFSET,
+};
+
static const struct stm32_rcc_match_data stm32mp13_data = {
.tab_clocks = stm32mp13_clock_cfg,
.num_clocks = ARRAY_SIZE(stm32mp13_clock_cfg),
.clock_data = &stm32mp13_clock_data,
.check_security = &stm32mp13_clock_is_provided_by_secure,
.maxbinding = STM32MP1_LAST_CLK,
- .clear_offset = RCC_CLR_OFFSET,
+ .reset_data = &stm32mp13_reset_data,
};
static const struct of_device_id stm32mp13_match_data[] = {
diff --git a/drivers/clk/stm32/reset-stm32.c b/drivers/clk/stm32/reset-stm32.c
index e89381528..14c2ee1ee 100644
--- a/drivers/clk/stm32/reset-stm32.c
+++ b/drivers/clk/stm32/reset-stm32.c
@@ -11,9 +11,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include "clk-stm32-core.h"
-
-#define STM32_RESET_ID_MASK GENMASK(15, 0)
+#include "reset-stm32.h"
struct stm32_reset_data {
/* reset lock */
@@ -99,24 +97,22 @@ static const struct reset_control_ops stm32_reset_ops = {
.status = stm32_reset_status,
};
-int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match,
+int stm32_rcc_reset_init(struct device *dev, struct clk_stm32_reset_data *data,
void __iomem *base)
{
- const struct stm32_rcc_match_data *data = match->data;
- struct stm32_reset_data *reset_data = NULL;
-
- data = match->data;
+ struct stm32_reset_data *reset_data;
reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
spin_lock_init(&reset_data->lock);
+
reset_data->membase = base;
reset_data->rcdev.owner = THIS_MODULE;
reset_data->rcdev.ops = &stm32_reset_ops;
reset_data->rcdev.of_node = dev_of_node(dev);
- reset_data->rcdev.nr_resets = STM32_RESET_ID_MASK;
+ reset_data->rcdev.nr_resets = data->nr_lines;
reset_data->clear_offset = data->clear_offset;
return reset_controller_register(&reset_data->rcdev);
diff --git a/drivers/clk/stm32/reset-stm32.h b/drivers/clk/stm32/reset-stm32.h
index 6eb6ea4b5..8cf1cc9be 100644
--- a/drivers/clk/stm32/reset-stm32.h
+++ b/drivers/clk/stm32/reset-stm32.h
@@ -4,5 +4,11 @@
* Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
*/
-int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match,
+struct clk_stm32_reset_data {
+ const struct reset_control_ops *ops;
+ unsigned int nr_lines;
+ u32 clear_offset;
+};
+
+int stm32_rcc_reset_init(struct device *dev, struct clk_stm32_reset_data *data,
void __iomem *base);
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index eed64547a..853f84398 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -21,17 +21,16 @@ static unsigned long ccu_nkm_find_best_with_parent_adj(struct ccu_common *common
unsigned long *parent, unsigned long rate,
struct _ccu_nkm *nkm)
{
- unsigned long best_rate = 0, best_parent_rate = *parent, tmp_parent = *parent;
+ unsigned long best_rate = 0, best_parent_rate = *parent;
unsigned long best_n = 0, best_k = 0, best_m = 0;
unsigned long _n, _k, _m;
for (_k = nkm->min_k; _k <= nkm->max_k; _k++) {
for (_n = nkm->min_n; _n <= nkm->max_n; _n++) {
for (_m = nkm->min_m; _m <= nkm->max_m; _m++) {
- unsigned long tmp_rate;
+ unsigned long tmp_rate, tmp_parent;
tmp_parent = clk_hw_round_rate(parent_hw, rate * _m / (_n * _k));
-
tmp_rate = tmp_parent * _n * _k / _m;
if (ccu_is_better_rate(common, rate, tmp_rate, best_rate) ||
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index d56822ce6..6a6e5d929 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -23,15 +23,41 @@
#define WZRD_NUM_OUTPUTS 7
#define WZRD_ACLK_MAX_FREQ 250000000UL
-#define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
+#define WZRD_CLK_CFG_REG(v, n) (0x200 + 0x130 * (v) + 4 * (n))
#define WZRD_CLKOUT0_FRAC_EN BIT(18)
-#define WZRD_CLKFBOUT_FRAC_EN BIT(26)
+#define WZRD_CLKFBOUT_1 0
+#define WZRD_CLKFBOUT_2 1
+#define WZRD_CLKOUT0_1 2
+#define WZRD_CLKOUT0_2 3
+#define WZRD_DESKEW_2 20
+#define WZRD_DIVCLK 21
+#define WZRD_CLKFBOUT_4 51
+#define WZRD_CLKFBOUT_3 48
+#define WZRD_DUTY_CYCLE 2
+#define WZRD_O_DIV 4
+
+#define WZRD_CLKFBOUT_FRAC_EN BIT(1)
+#define WZRD_CLKFBOUT_PREDIV2 (BIT(11) | BIT(12) | BIT(9))
+#define WZRD_MULT_PREDIV2 (BIT(10) | BIT(9) | BIT(12))
+#define WZRD_CLKFBOUT_EDGE BIT(8)
+#define WZRD_P5EN BIT(13)
+#define WZRD_P5EN_SHIFT 13
+#define WZRD_P5FEDGE BIT(15)
+#define WZRD_DIVCLK_EDGE BIT(10)
+#define WZRD_P5FEDGE_SHIFT 15
+#define WZRD_CLKOUT0_PREDIV2 BIT(11)
+#define WZRD_EDGE_SHIFT 8
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_L_SHIFT 0
+#define WZRD_CLKFBOUT_H_SHIFT 8
+#define WZRD_CLKFBOUT_L_MASK GENMASK(7, 0)
+#define WZRD_CLKFBOUT_H_MASK GENMASK(15, 8)
#define WZRD_CLKFBOUT_FRAC_SHIFT 16
#define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
+#define WZRD_VERSAL_FRAC_MASK GENMASK(5, 0)
#define WZRD_DIVCLK_DIVIDE_SHIFT 0
#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
#define WZRD_CLKOUT_DIVIDE_SHIFT 0
@@ -45,6 +71,7 @@
#define WZRD_DR_STATUS_REG_OFFSET 0x04
#define WZRD_DR_LOCK_BIT_MASK 0x00000001
#define WZRD_DR_INIT_REG_OFFSET 0x25C
+#define WZRD_DR_INIT_VERSAL_OFFSET 0x14
#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
#define WZRD_DR_BEGIN_DYNA_RECONF_5_2 0x07
@@ -52,6 +79,8 @@
#define WZRD_USEC_POLL 10
#define WZRD_TIMEOUT_POLL 1000
+#define WZRD_FRAC_GRADIENT 64
+#define PREDIV2_MULT 2
/* Divider limits, from UG572 Table 3-4 for Ultrascale+ */
#define DIV_O 0x01
@@ -65,6 +94,14 @@
#define WZRD_VCO_MAX 1600000000
#define WZRD_O_MIN 1
#define WZRD_O_MAX 128
+#define VER_WZRD_M_MIN 4
+#define VER_WZRD_M_MAX 432
+#define VER_WZRD_D_MIN 1
+#define VER_WZRD_D_MAX 123
+#define VER_WZRD_VCO_MIN 2160000000ULL
+#define VER_WZRD_VCO_MAX 4320000000ULL
+#define VER_WZRD_O_MIN 2
+#define VER_WZRD_O_MAX 511
#define WZRD_MIN_ERR 20000
#define WZRD_FRAC_POINTS 1000
@@ -135,6 +172,10 @@ struct clk_wzrd_divider {
spinlock_t *lock; /* divider lock */
};
+struct versal_clk_data {
+ bool is_versal;
+};
+
#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
/* maximum frequencies for input/output clocks per speed grade */
@@ -147,6 +188,31 @@ static const unsigned long clk_wzrd_max_freq[] = {
/* spin lock variable for clk_wzrd */
static DEFINE_SPINLOCK(clkwzrd_lock);
+static unsigned long clk_wzrd_recalc_rate_ver(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr = divider->base + divider->offset;
+ u32 div, p5en, edge, prediv2, all;
+ unsigned int vall, valh;
+
+ edge = !!(readl(div_addr) & WZRD_CLKFBOUT_EDGE);
+ p5en = !!(readl(div_addr) & WZRD_P5EN);
+ prediv2 = !!(readl(div_addr) & WZRD_CLKOUT0_PREDIV2);
+ vall = readl(div_addr + 4) & WZRD_CLKFBOUT_L_MASK;
+ valh = readl(div_addr + 4) >> WZRD_CLKFBOUT_H_SHIFT;
+ all = valh + vall + edge;
+ if (!all)
+ all = 1;
+
+ if (prediv2)
+ div = 2 * all + prediv2 * p5en;
+ else
+ div = all;
+
+ return DIV_ROUND_UP_ULL((u64)parent_rate, div);
+}
+
static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -161,19 +227,64 @@ static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
+static int clk_wzrd_ver_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr = divider->base + divider->offset;
+ u32 value, regh, edged, p5en, p5fedge, regval, regval1;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(divider->lock, flags);
+
+ value = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ regh = (value / 4);
+ regval1 = readl(div_addr);
+ regval1 |= WZRD_CLKFBOUT_PREDIV2;
+ regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
+ if (value % 4 > 1) {
+ edged = 1;
+ regval1 |= (edged << WZRD_EDGE_SHIFT);
+ }
+ p5fedge = value % 2;
+ p5en = value % 2;
+ regval1 = regval1 | p5en << WZRD_P5EN_SHIFT | p5fedge << WZRD_P5FEDGE_SHIFT;
+ writel(regval1, div_addr);
+
+ regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
+ writel(regval, div_addr + 4);
+ /* Check status register */
+ err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
+ value, value & WZRD_DR_LOCK_BIT_MASK,
+ WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+ if (err)
+ goto err_reconfig;
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_VERSAL_OFFSET);
+
+ /* Check status register */
+ err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
+ value, value & WZRD_DR_LOCK_BIT_MASK,
+ WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+err_reconfig:
+ spin_unlock_irqrestore(divider->lock, flags);
+ return err;
+}
+
static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- int err;
- u32 value;
- unsigned long flags = 0;
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
void __iomem *div_addr = divider->base + divider->offset;
+ unsigned long flags;
+ u32 value;
+ int err;
- if (divider->lock)
- spin_lock_irqsave(divider->lock, flags);
- else
- __acquire(divider->lock);
+ spin_lock_irqsave(divider->lock, flags);
value = DIV_ROUND_CLOSEST(parent_rate, rate);
@@ -185,9 +296,9 @@ static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
/* Check status register */
- err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET,
- value, value & WZRD_DR_LOCK_BIT_MASK,
- WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+ err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
+ value, value & WZRD_DR_LOCK_BIT_MASK,
+ WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
if (err)
goto err_reconfig;
@@ -198,14 +309,11 @@ static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
divider->base + WZRD_DR_INIT_REG_OFFSET);
/* Check status register */
- err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET,
- value, value & WZRD_DR_LOCK_BIT_MASK,
- WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+ err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
+ value, value & WZRD_DR_LOCK_BIT_MASK,
+ WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
err_reconfig:
- if (divider->lock)
- spin_unlock_irqrestore(divider->lock, flags);
- else
- __release(divider->lock);
+ spin_unlock_irqrestore(divider->lock, flags);
return err;
}
@@ -223,18 +331,66 @@ static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
return *prate / div;
}
+static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ u64 vco_freq, freq, diff, vcomin, vcomax;
+ u32 m, d, o;
+ u32 mmin, mmax, dmin, dmax, omin, omax;
+
+ mmin = VER_WZRD_M_MIN;
+ mmax = VER_WZRD_M_MAX;
+ dmin = VER_WZRD_D_MIN;
+ dmax = VER_WZRD_D_MAX;
+ omin = VER_WZRD_O_MIN;
+ omax = VER_WZRD_O_MAX;
+ vcomin = VER_WZRD_VCO_MIN;
+ vcomax = VER_WZRD_VCO_MAX;
+
+ for (m = mmin; m <= mmax; m++) {
+ for (d = dmin; d <= dmax; d++) {
+ vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
+ if (vco_freq >= vcomin && vco_freq <= vcomax) {
+ for (o = omin; o <= omax; o++) {
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = abs(freq - rate);
+
+ if (diff < WZRD_MIN_ERR) {
+ divider->m = m;
+ divider->d = d;
+ divider->o = o;
+ return 0;
+ }
+ }
+ }
+ }
+ }
+ return -EBUSY;
+}
+
static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- unsigned long vco_freq, freq, diff;
+ u64 vco_freq, freq, diff, vcomin, vcomax;
u32 m, d, o;
-
- for (m = WZRD_M_MIN; m <= WZRD_M_MAX; m++) {
- for (d = WZRD_D_MIN; d <= WZRD_D_MAX; d++) {
+ u32 mmin, mmax, dmin, dmax, omin, omax;
+
+ mmin = WZRD_M_MIN;
+ mmax = WZRD_M_MAX;
+ dmin = WZRD_D_MIN;
+ dmax = WZRD_D_MAX;
+ omin = WZRD_O_MIN;
+ omax = WZRD_O_MAX;
+ vcomin = WZRD_VCO_MIN;
+ vcomax = WZRD_VCO_MAX;
+
+ for (m = mmin; m <= mmax; m++) {
+ for (d = dmin; d <= dmax; d++) {
vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= WZRD_VCO_MIN && vco_freq <= WZRD_VCO_MAX) {
- for (o = WZRD_O_MIN; o <= WZRD_O_MAX; o++) {
+ if (vco_freq >= vcomin && vco_freq <= vcomax) {
+ for (o = omin; o <= omax; o++) {
freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
diff = abs(freq - rate);
@@ -251,12 +407,99 @@ static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
return -EBUSY;
}
+static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
+{
+ u32 value;
+ int err;
+
+ /* Check status register */
+ err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
+ value & WZRD_DR_LOCK_BIT_MASK,
+ WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+ if (err)
+ return -ETIMEDOUT;
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF, div_addr);
+ /* Check status register */
+ return readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
+ value & WZRD_DR_LOCK_BIT_MASK,
+ WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+}
+
+static int clk_wzrd_dynamic_ver_all_nolock(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 regh, edged, p5en, p5fedge, value2, m, regval, regval1, value;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr;
+ int err;
+
+ err = clk_wzrd_get_divisors_ver(hw, rate, parent_rate);
+ if (err)
+ return err;
+
+ writel(0, divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4));
+
+ m = divider->m;
+ edged = m % WZRD_DUTY_CYCLE;
+ regh = m / WZRD_DUTY_CYCLE;
+ regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
+ WZRD_CLKFBOUT_1));
+ regval1 |= WZRD_MULT_PREDIV2;
+ if (edged)
+ regval1 = regval1 | WZRD_CLKFBOUT_EDGE;
+ else
+ regval1 = regval1 & ~WZRD_CLKFBOUT_EDGE;
+
+ writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
+ WZRD_CLKFBOUT_1));
+ regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
+ writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
+ WZRD_CLKFBOUT_2));
+
+ value2 = divider->d;
+ edged = value2 % WZRD_DUTY_CYCLE;
+ regh = (value2 / WZRD_DUTY_CYCLE);
+ regval1 = FIELD_PREP(WZRD_DIVCLK_EDGE, edged);
+ writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
+ WZRD_DESKEW_2));
+ regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
+ writel(regval1, divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
+
+ value = divider->o;
+ regh = value / WZRD_O_DIV;
+ regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
+ WZRD_CLKOUT0_1));
+ regval1 |= WZRD_CLKFBOUT_PREDIV2;
+ regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
+
+ if (value % WZRD_O_DIV > 1) {
+ edged = 1;
+ regval1 |= edged << WZRD_CLKFBOUT_H_SHIFT;
+ }
+
+ p5fedge = value % WZRD_DUTY_CYCLE;
+ p5en = value % WZRD_DUTY_CYCLE;
+
+ regval1 = regval1 | FIELD_PREP(WZRD_P5EN, p5en) | FIELD_PREP(WZRD_P5FEDGE, p5fedge);
+ writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
+ WZRD_CLKOUT0_1));
+ regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
+ writel(regval, divider->base + WZRD_CLK_CFG_REG(1,
+ WZRD_CLKOUT0_2));
+ div_addr = divider->base + WZRD_DR_INIT_VERSAL_OFFSET;
+
+ return clk_wzrd_reconfig(divider, div_addr);
+}
+
static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long vco_freq, rate_div, clockout0_div;
- u32 reg, pre, value, f;
+ void __iomem *div_addr = divider->base;
+ u32 reg, pre, f;
int err;
err = clk_wzrd_get_divisors(hw, rate, parent_rate);
@@ -275,35 +518,22 @@ static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
- writel(reg, divider->base + WZRD_CLK_CFG_REG(2));
+ writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
/* Set divisor and clear phase offset */
reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
- writel(reg, divider->base + WZRD_CLK_CFG_REG(0));
- writel(divider->o, divider->base + WZRD_CLK_CFG_REG(2));
- writel(0, divider->base + WZRD_CLK_CFG_REG(3));
- /* Check status register */
- err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
- value & WZRD_DR_LOCK_BIT_MASK,
- WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
- if (err)
- return -ETIMEDOUT;
-
- /* Initiate reconfiguration */
- writel(WZRD_DR_BEGIN_DYNA_RECONF,
- divider->base + WZRD_DR_INIT_REG_OFFSET);
-
- /* Check status register */
- return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
- value & WZRD_DR_LOCK_BIT_MASK,
- WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
+ writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
+ writel(divider->o, divider->base + WZRD_CLK_CFG_REG(0, 2));
+ writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
+ div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
+ return clk_wzrd_reconfig(divider, div_addr);
}
static int clk_wzrd_dynamic_all(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- unsigned long flags = 0;
+ unsigned long flags;
int ret;
spin_lock_irqsave(divider->lock, flags);
@@ -315,21 +545,103 @@ static int clk_wzrd_dynamic_all(struct clk_hw *hw, unsigned long rate,
return ret;
}
+static int clk_wzrd_dynamic_all_ver(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(divider->lock, flags);
+
+ ret = clk_wzrd_dynamic_ver_all_nolock(hw, rate, parent_rate);
+
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return ret;
+}
+
static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
u32 m, d, o, div, reg, f;
- reg = readl(divider->base + WZRD_CLK_CFG_REG(0));
+ reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
- reg = readl(divider->base + WZRD_CLK_CFG_REG(2));
+ reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
+ divider->flags, divider->width);
+}
+
+static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ u32 edged, div2, p5en, edge, prediv2, all, regl, regh, mult;
+ u32 div, reg;
+
+ edge = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_1)) &
+ WZRD_CLKFBOUT_EDGE);
+
+ reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_2));
+ regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
+ regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
+
+ mult = regl + regh + edge;
+ if (!mult)
+ mult = 1;
+
+ regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4)) &
+ WZRD_CLKFBOUT_FRAC_EN;
+ if (regl) {
+ regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_3))
+ & WZRD_VERSAL_FRAC_MASK;
+ mult = mult * WZRD_FRAC_GRADIENT + regl;
+ parent_rate = DIV_ROUND_CLOSEST((parent_rate * mult), WZRD_FRAC_GRADIENT);
+ } else {
+ parent_rate = parent_rate * mult;
+ }
+
+ /* O Calculation */
+ reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_1));
+ edged = FIELD_GET(WZRD_CLKFBOUT_EDGE, reg);
+ p5en = FIELD_GET(WZRD_P5EN, reg);
+ prediv2 = FIELD_GET(WZRD_CLKOUT0_PREDIV2, reg);
+
+ reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_2));
+ /* Low time */
+ regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
+ /* High time */
+ regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
+ all = regh + regl + edged;
+ if (!all)
+ all = 1;
+
+ if (prediv2)
+ div2 = PREDIV2_MULT * all + p5en;
+ else
+ div2 = all;
+
+ /* D calculation */
+ edged = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DESKEW_2)) &
+ WZRD_DIVCLK_EDGE);
+ reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
+ /* Low time */
+ regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
+ /* High time */
+ regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
+ div = regl + regh + edged;
+ if (!div)
+ div = 1;
+
+ div = div * div2;
+ return divider_recalc_rate(hw, parent_rate, div, divider->table,
divider->flags, divider->width);
}
@@ -360,6 +672,18 @@ static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
return rate;
}
+static const struct clk_ops clk_wzrd_ver_divider_ops = {
+ .round_rate = clk_wzrd_round_rate,
+ .set_rate = clk_wzrd_ver_dynamic_reconfig,
+ .recalc_rate = clk_wzrd_recalc_rate_ver,
+};
+
+static const struct clk_ops clk_wzrd_ver_div_all_ops = {
+ .round_rate = clk_wzrd_round_rate_all,
+ .set_rate = clk_wzrd_dynamic_all_ver,
+ .recalc_rate = clk_wzrd_recalc_rate_all_ver,
+};
+
static const struct clk_ops clk_wzrd_clk_divider_ops = {
.round_rate = clk_wzrd_round_rate,
.set_rate = clk_wzrd_dynamic_reconfig,
@@ -484,6 +808,53 @@ static struct clk *clk_wzrd_register_divf(struct device *dev,
return hw->clk;
}
+static struct clk *clk_wzrd_ver_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base,
+ u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ u32 div_type,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else if (div_type == DIV_O)
+ init.ops = &clk_wzrd_ver_divider_ops;
+ else
+ init.ops = &clk_wzrd_ver_div_all_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+
+ hw = &div->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return hw->clk;
+}
+
static struct clk *clk_wzrd_register_divider(struct device *dev,
const char *name,
const char *parent_name,
@@ -588,18 +959,24 @@ static int __maybe_unused clk_wzrd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
clk_wzrd_resume);
+static const struct versal_clk_data versal_data = {
+ .is_versal = true,
+};
+
static int clk_wzrd_probe(struct platform_device *pdev)
{
- int i, ret;
+ const char *clkout_name, *clk_name, *clk_mul_name;
+ u32 regl, regh, edge, regld, reghd, edged, div;
+ struct device_node *np = pdev->dev.of_node;
+ const struct versal_clk_data *data;
+ struct clk_wzrd *clk_wzrd;
+ unsigned long flags = 0;
+ void __iomem *ctrl_reg;
u32 reg, reg_f, mult;
+ bool is_versal = false;
unsigned long rate;
- const char *clk_name;
- void __iomem *ctrl_reg;
- struct clk_wzrd *clk_wzrd;
- const char *clkout_name;
- struct device_node *np = pdev->dev.of_node;
int nr_outputs;
- unsigned long flags = 0;
+ int i, ret;
clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
if (!clk_wzrd)
@@ -641,6 +1018,10 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
+ data = device_get_match_data(&pdev->dev);
+ if (data)
+ is_versal = data->is_versal;
+
ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
ret = -EINVAL;
@@ -653,26 +1034,61 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- if (nr_outputs == 1) {
- clk_wzrd->clkout[0] = clk_wzrd_register_divider
+ if (is_versal) {
+ if (nr_outputs == 1) {
+ clk_wzrd->clkout[0] = clk_wzrd_ver_register_divider
(&pdev->dev, clkout_name,
__clk_get_name(clk_wzrd->clk_in1), 0,
- clk_wzrd->base, WZRD_CLK_CFG_REG(3),
+ clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_ALL, &clkwzrd_lock);
- goto out;
- }
-
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0));
- reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
- reg_f = reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
+ goto out;
+ }
+ /* register multiplier */
+ edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)) &
+ BIT(8));
+ regl = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
+ WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
+ regh = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
+ WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
+ mult = regl + regh + edge;
+ if (!mult)
+ mult = 1;
+ mult = mult * WZRD_FRAC_GRADIENT;
+
+ regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 51)) &
+ WZRD_CLKFBOUT_FRAC_EN;
+ if (regl) {
+ regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 48)) &
+ WZRD_VERSAL_FRAC_MASK;
+ mult = mult + regl;
+ }
+ div = 64;
+ } else {
+ if (nr_outputs == 1) {
+ clk_wzrd->clkout[0] = clk_wzrd_register_divider
+ (&pdev->dev, clkout_name,
+ __clk_get_name(clk_wzrd->clk_in1), 0,
+ clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ DIV_ALL, &clkwzrd_lock);
- reg = reg & WZRD_CLKFBOUT_MULT_MASK;
- reg = reg >> WZRD_CLKFBOUT_MULT_SHIFT;
- mult = (reg * 1000) + reg_f;
+ goto out;
+ }
+ reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0));
+ reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
+ reg_f = reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
+
+ reg = reg & WZRD_CLKFBOUT_MULT_MASK;
+ reg = reg >> WZRD_CLKFBOUT_MULT_SHIFT;
+ mult = (reg * 1000) + reg_f;
+ div = 1000;
+ }
clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
if (!clk_name) {
ret = -ENOMEM;
@@ -681,7 +1097,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
(&pdev->dev, clk_name,
__clk_get_name(clk_wzrd->clk_in1),
- 0, mult, 1000);
+ 0, mult, div);
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
@@ -694,13 +1110,29 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_rm_int_clk;
}
- ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(0);
- /* register div */
- clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
+ if (is_versal) {
+ edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 20)) &
+ BIT(10));
+ regld = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
+ WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
+ reghd = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
+ WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
+ div = (regld + reghd + edged);
+ if (!div)
+ div = 1;
+
+ clk_mul_name = __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
+ clk_wzrd->clks_internal[wzrd_clk_mul_div] =
+ clk_register_fixed_factor(&pdev->dev, clk_name,
+ clk_mul_name, 0, 1, div);
+ } else {
+ ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0);
+ clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
(&pdev->dev, clk_name,
__clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
+ }
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
dev_err(&pdev->dev, "unable to register divider clock\n");
ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
@@ -716,24 +1148,35 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_rm_int_clk;
}
- if (!i)
- clk_wzrd->clkout[i] = clk_wzrd_register_divf
- (&pdev->dev, clkout_name,
- clk_name, flags,
- clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
- WZRD_CLKOUT_DIVIDE_SHIFT,
- WZRD_CLKOUT_DIVIDE_WIDTH,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
- DIV_O, &clkwzrd_lock);
- else
- clk_wzrd->clkout[i] = clk_wzrd_register_divider
- (&pdev->dev, clkout_name,
- clk_name, 0,
- clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
- WZRD_CLKOUT_DIVIDE_SHIFT,
- WZRD_CLKOUT_DIVIDE_WIDTH,
- CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
- DIV_O, &clkwzrd_lock);
+ if (is_versal) {
+ clk_wzrd->clkout[i] = clk_wzrd_ver_register_divider
+ (&pdev->dev,
+ clkout_name, clk_name, 0,
+ clk_wzrd->base,
+ (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
+ DIV_O, &clkwzrd_lock);
+ } else {
+ if (!i)
+ clk_wzrd->clkout[i] = clk_wzrd_register_divf
+ (&pdev->dev, clkout_name, clk_name, flags, clk_wzrd->base,
+ (WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ DIV_O, &clkwzrd_lock);
+ else
+ clk_wzrd->clkout[i] = clk_wzrd_register_divider
+ (&pdev->dev, clkout_name, clk_name, 0, clk_wzrd->base,
+ (WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ DIV_O, &clkwzrd_lock);
+ }
if (IS_ERR(clk_wzrd->clkout[i])) {
int j;
@@ -799,9 +1242,10 @@ static void clk_wzrd_remove(struct platform_device *pdev)
}
static const struct of_device_id clk_wzrd_ids[] = {
- { .compatible = "xlnx,clocking-wizard" },
- { .compatible = "xlnx,clocking-wizard-v5.2" },
- { .compatible = "xlnx,clocking-wizard-v6.0" },
+ { .compatible = "xlnx,versal-clk-wizard", .data = &versal_data },
+ { .compatible = "xlnx,clocking-wizard" },
+ { .compatible = "xlnx,clocking-wizard-v5.2" },
+ { .compatible = "xlnx,clocking-wizard-v6.0" },
{ },
};
MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 32daaac9b..ca7a06489 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -69,7 +69,7 @@
* @base_addr: Base address of timer
* @freq: Timer input clock frequency
* @clk: Associated clock source
- * @clk_rate_change_nb Notifier block for clock rate changes
+ * @clk_rate_change_nb: Notifier block for clock rate changes
*/
struct ttc_timer {
void __iomem *base_addr;
@@ -134,7 +134,7 @@ static void ttc_set_interval(struct ttc_timer *timer,
* @irq: IRQ number of the Timer
* @dev_id: void pointer to the ttc_timer instance
*
- * returns: Always IRQ_HANDLED - success
+ * Returns: Always IRQ_HANDLED - success
**/
static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
{
@@ -151,8 +151,9 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
/**
* __ttc_clocksource_read - Reads the timer counter register
+ * @cs: &clocksource to read from
*
- * returns: Current timer counter register value
+ * Returns: Current timer counter register value
**/
static u64 __ttc_clocksource_read(struct clocksource *cs)
{
@@ -173,7 +174,7 @@ static u64 notrace ttc_sched_clock_read(void)
* @cycles: Timer interval ticks
* @evt: Address of clock event instance
*
- * returns: Always 0 - success
+ * Returns: Always %0 - success
**/
static int ttc_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
@@ -186,9 +187,12 @@ static int ttc_set_next_event(unsigned long cycles,
}
/**
- * ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer
- *
+ * ttc_shutdown - Sets the state of timer
* @evt: Address of clock event instance
+ *
+ * Used for shutdown or oneshot.
+ *
+ * Returns: Always %0 - success
**/
static int ttc_shutdown(struct clock_event_device *evt)
{
@@ -202,6 +206,12 @@ static int ttc_shutdown(struct clock_event_device *evt)
return 0;
}
+/**
+ * ttc_set_periodic - Sets the state of timer
+ * @evt: Address of clock event instance
+ *
+ * Returns: Always %0 - success
+ */
static int ttc_set_periodic(struct clock_event_device *evt)
{
struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 1c732479a..79bb9a98b 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -61,12 +61,19 @@ static int riscv_clock_next_event(unsigned long delta,
return 0;
}
+static int riscv_clock_shutdown(struct clock_event_device *evt)
+{
+ riscv_clock_event_stop();
+ return 0;
+}
+
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.set_next_event = riscv_clock_next_event,
+ .set_state_shutdown = riscv_clock_shutdown,
};
/*
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 1548dea15..1b481731d 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -1714,8 +1714,8 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev,
lockdep_assert_held(&dev->mutex);
cmd->chanlist = NULL;
- chanlist = memdup_user(user_chanlist,
- cmd->chanlist_len * sizeof(unsigned int));
+ chanlist = memdup_array_user(user_chanlist,
+ cmd->chanlist_len, sizeof(unsigned int));
if (IS_ERR(chanlist))
return PTR_ERR(chanlist);
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4536ed43f..84dce5184 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
+ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+ int ret;
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
+ if (devpriv->model == VMK8061_MODEL)
+ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+ &ep_tx_desc, NULL, NULL);
+ else
+ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+ &ep_rx_desc, &ep_tx_desc);
- if (!devpriv->ep_rx || !devpriv->ep_tx)
+ if (ret)
return -ENODEV;
+ devpriv->ep_rx = ep_rx_desc;
+ devpriv->ep_tx = ep_tx_desc;
+
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 7f7b94f61..4028e8eeb 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -59,9 +59,8 @@ static int cn_already_initialized;
* both, or if both are zero then the group is looked up and sent there.
*/
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
- gfp_t gfp_mask,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data)
+ gfp_t gfp_mask, netlink_filter_fn filter,
+ void *filter_data)
{
struct cn_callback_entry *__cbq;
unsigned int size;
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 09c77afb3..3f24481fc 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -31,10 +31,11 @@ struct counter_device_allochelper {
struct counter_device counter;
/*
- * This is cache line aligned to ensure private data behaves like if it
- * were kmalloced separately.
+ * This ensures private data behaves like if it were kmalloced
+ * separately. Also ensures the minimum alignment for safe DMA
+ * operations (which may or may not mean cache alignment).
*/
- unsigned long privdata[] ____cacheline_aligned;
+ unsigned long privdata[] __aligned(ARCH_DMA_MINALIGN);
};
static void counter_device_release(struct device *dev)
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 8afefdea4..ce5a5641b 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -57,7 +57,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_warn("Cannot get clock for CPU %d\n", cpu);
} else {
@@ -165,7 +165,7 @@ static int __init armada_8k_cpufreq_init(void)
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index fe08ca419..64420d9cf 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -16,7 +16,6 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/dmi.h>
#include <linux/irq_work.h>
#include <linux/kthread.h>
#include <linux/time.h>
@@ -27,12 +26,6 @@
#include <acpi/cppc_acpi.h>
-/* Minimum struct length needed for the DMI processor entry we want */
-#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
-
-/* Offset in the DMI processor structure for the max frequency */
-#define DMI_PROCESSOR_MAX_SPEED 0x14
-
/*
* This list contains information parsed from per CPU ACPI _CPC and _PSD
* structures: e.g. the highest and lowest supported performance, capabilities,
@@ -291,97 +284,9 @@ static inline void cppc_freq_invariance_exit(void)
}
#endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
-/* Callback function used to retrieve the max frequency from DMI */
-static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
-{
- const u8 *dmi_data = (const u8 *)dm;
- u16 *mhz = (u16 *)private;
-
- if (dm->type == DMI_ENTRY_PROCESSOR &&
- dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
- u16 val = (u16)get_unaligned((const u16 *)
- (dmi_data + DMI_PROCESSOR_MAX_SPEED));
- *mhz = val > *mhz ? val : *mhz;
- }
-}
-
-/* Look up the max frequency in DMI */
-static u64 cppc_get_dmi_max_khz(void)
-{
- u16 mhz = 0;
-
- dmi_walk(cppc_find_dmi_mhz, &mhz);
-
- /*
- * Real stupid fallback value, just in case there is no
- * actual value set.
- */
- mhz = mhz ? mhz : 1;
-
- return (1000 * mhz);
-}
-
-/*
- * If CPPC lowest_freq and nominal_freq registers are exposed then we can
- * use them to convert perf to freq and vice versa. The conversion is
- * extrapolated as an affine function passing by the 2 points:
- * - (Low perf, Low freq)
- * - (Nominal perf, Nominal perf)
- */
-static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
- unsigned int perf)
-{
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- s64 retval, offset = 0;
- static u64 max_khz;
- u64 mul, div;
-
- if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_freq - caps->lowest_freq;
- div = caps->nominal_perf - caps->lowest_perf;
- offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
- } else {
- if (!max_khz)
- max_khz = cppc_get_dmi_max_khz();
- mul = max_khz;
- div = caps->highest_perf;
- }
-
- retval = offset + div64_u64(perf * mul, div);
- if (retval >= 0)
- return retval;
- return 0;
-}
-
-static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
- unsigned int freq)
-{
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- s64 retval, offset = 0;
- static u64 max_khz;
- u64 mul, div;
-
- if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_perf - caps->lowest_perf;
- div = caps->nominal_freq - caps->lowest_freq;
- offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
- } else {
- if (!max_khz)
- max_khz = cppc_get_dmi_max_khz();
- mul = caps->highest_perf;
- div = max_khz;
- }
-
- retval = offset + div64_u64(freq * mul, div);
- if (retval >= 0)
- return retval;
- return 0;
-}
-
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
-
{
struct cppc_cpudata *cpu_data = policy->driver_data;
unsigned int cpu = policy->cpu;
@@ -389,7 +294,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
u32 desired_perf;
int ret = 0;
- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
/* Return if it is exactly the same perf */
if (desired_perf == cpu_data->perf_ctrls.desired_perf)
return ret;
@@ -417,7 +322,7 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
u32 desired_perf;
int ret;
- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
+ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
cpu_data->perf_ctrls.desired_perf = desired_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -530,7 +435,7 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
min_step = min_cap / CPPC_EM_CAP_STEP;
max_step = max_cap / CPPC_EM_CAP_STEP;
- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ perf_prev = cppc_khz_to_perf(perf_caps, *KHz);
step = perf_prev / perf_step;
if (step > max_step)
@@ -550,8 +455,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
perf = step * perf_step;
}
- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ *KHz = cppc_perf_to_khz(perf_caps, perf);
+ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
/*
@@ -561,8 +466,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
*/
while ((*KHz == prev_freq) || (step_check != step)) {
perf++;
- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
+ *KHz = cppc_perf_to_khz(perf_caps, perf);
+ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
step_check = perf_check / perf_step;
}
@@ -591,7 +496,7 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
+ perf_prev = cppc_khz_to_perf(perf_caps, KHz);
perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
step = perf_prev / perf_step;
@@ -679,10 +584,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
- /* Convert the lowest and nominal freq from MHz to KHz */
- cpu_data->perf_caps.lowest_freq *= 1000;
- cpu_data->perf_caps.nominal_freq *= 1000;
-
list_add(&cpu_data->node, &cpu_data_list);
return cpu_data;
@@ -724,20 +625,16 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
- policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->lowest_nonlinear_perf);
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
* available if userspace wants to use any perf between lowest & lowest
* nonlinear perf
*/
- policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->lowest_perf);
- policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
+ policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
@@ -773,7 +670,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
- policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
+ policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
@@ -863,7 +760,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1);
- return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
+ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
}
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
@@ -878,11 +775,9 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
}
if (state)
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->highest_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
else
- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
- caps->nominal_perf);
+ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
policy->cpuinfo.max_freq = policy->max;
ret = freq_qos_update_request(policy->max_freq_req, policy->max);
@@ -937,7 +832,7 @@ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
if (ret < 0)
return -EIO;
- return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
+ return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf);
}
static void cppc_check_hisi_workaround(void)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5104f853a..3c2c955fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -454,7 +454,7 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
arch_set_freq_scale(policy->related_cpus,
policy->cur,
- policy->cpuinfo.max_freq);
+ arch_scale_freq_ref(policy->cpu));
spin_lock(&policy->transition_lock);
policy->transition_ongoing = false;
@@ -1576,7 +1576,8 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
- if (cpufreq_thermal_control_enabled(cpufreq_driver))
+ /* Register cpufreq cooling only for a new policy */
+ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
pr_debug("initialization complete\n");
@@ -1660,11 +1661,6 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
else
policy->last_policy = policy->policy;
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
- }
-
if (has_target())
cpufreq_exit_governor(policy);
@@ -1725,6 +1721,15 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return;
}
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy are
+ * removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
+
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline)
cpufreq_driver->exit(policy);
@@ -2179,7 +2184,7 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
policy->cur = freq;
arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
+ arch_scale_freq_ref(policy->cpu));
cpufreq_stats_record_transition(policy, freq);
if (trace_cpu_frequency_enabled()) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 357e01a27..79619227e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -302,7 +302,10 @@ static bool hwp_forced __read_mostly;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
-#define HYBRID_SCALING_FACTOR 78741
+#define HYBRID_SCALING_FACTOR 78741
+#define HYBRID_SCALING_FACTOR_MTL 80000
+
+static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
static inline int core_get_scaling(void)
{
@@ -422,7 +425,7 @@ static int intel_pstate_cppc_get_scaling(int cpu)
*/
if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
- return HYBRID_SCALING_FACTOR;
+ return hybrid_scaling_factor;
return core_get_scaling();
}
@@ -1717,13 +1720,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
/*
- * If this CPU gen doesn't call for change in balance_perf
- * EPP return.
- */
- if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
- return;
-
- /*
* If the EPP is set by firmware, which means that firmware enabled HWP
* - Is equal or less than 0x80 (default balance_perf EPP)
* - But less performance oriented than performance EPP
@@ -1736,6 +1732,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
}
/*
+ * If this CPU gen doesn't call for change in balance_perf
+ * EPP return.
+ */
+ if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
+ return;
+
+ /*
* Use hard coded value per gen to update the balance_perf
* and default EPP.
*/
@@ -1993,7 +1996,7 @@ static int hwp_get_cpu_scaling(int cpu)
smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
/* P-cores have a smaller perf level-to-freqency scaling factor. */
if (cpu_type == 0x40)
- return HYBRID_SCALING_FACTOR;
+ return hybrid_scaling_factor;
/* Use default core scaling for E-cores */
if (cpu_type == 0x20)
@@ -2431,6 +2434,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(ICELAKE_X, core_funcs),
X86_MATCH(TIGERLAKE, core_funcs),
X86_MATCH(SAPPHIRERAPIDS_X, core_funcs),
+ X86_MATCH(EMERALDRAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -3414,6 +3418,11 @@ static const struct x86_cpu_id intel_epp_balance_perf[] = {
{}
};
+static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
+ {}
+};
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3504,9 +3513,16 @@ hwp_cpu_matched:
if (hwp_active) {
const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
+ const struct x86_cpu_id *hybrid_id = x86_match_cpu(intel_hybrid_scaling_factor);
if (id)
epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
+
+ if (hybrid_id) {
+ hybrid_scaling_factor = hybrid_id->driver_data;
+ pr_debug("hybrid scaling factor: %d\n", hybrid_scaling_factor);
+ }
+
}
mutex_lock(&intel_pstate_driver_lock);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index d9cda7f6c..cf5873cc4 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -16,6 +16,7 @@
#include <linux/cpumask.h>
#include <linux/tick.h>
#include <linux/cpu.h>
+#include <linux/math64.h>
#include "cpuidle.h"
@@ -187,7 +188,7 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
s->target_residency = div_u64(s->target_residency_ns, NSEC_PER_USEC);
if (s->exit_latency > 0)
- s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
+ s->exit_latency_ns = mul_u32_u32(s->exit_latency, NSEC_PER_USEC);
else if (s->exit_latency_ns < 0)
s->exit_latency_ns = 0;
else
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 79c3bb9c9..0991f026c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -306,6 +306,7 @@ config CRYPTO_DEV_SAHARA
select CRYPTO_SKCIPHER
select CRYPTO_AES
select CRYPTO_ECB
+ select CRYPTO_ENGINE
help
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index d2cf96190..de50c00ba 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -431,8 +431,8 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
- sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm);
+ crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
memcpy(algt->fbname,
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 7fa359725..9b9605ce8 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -405,9 +405,8 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
- sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm);
-
+ crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
memcpy(algt->fbname,
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index d70b105dc..753f67a36 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -30,33 +30,16 @@ static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
unsigned int keylen)
{
struct crypto_shash *xtfm;
- struct shash_desc *sdesc;
- size_t len;
- int ret = 0;
+ int ret;
xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(xtfm))
return PTR_ERR(xtfm);
- len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
- sdesc = kmalloc(len, GFP_KERNEL);
- if (!sdesc) {
- ret = -ENOMEM;
- goto err_hashkey_sdesc;
- }
- sdesc->tfm = xtfm;
-
- ret = crypto_shash_init(sdesc);
- if (ret) {
- dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
- goto err_hashkey;
- }
- ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
+ ret = crypto_shash_tfm_digest(xtfm, key, keylen, tfmctx->key);
if (ret)
- dev_err(tfmctx->ss->dev, "shash finup error\n");
-err_hashkey:
- kfree(sdesc);
-err_hashkey_sdesc:
+ dev_err(tfmctx->ss->dev, "shash digest error ret=%d\n", ret);
+
crypto_free_shash(xtfm);
return ret;
}
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index ded732242..e0af611a9 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -181,13 +181,6 @@ int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
-int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
- CRYPTO_FEEDBACK_MODE_128BIT_CFB);
-}
-
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
@@ -195,13 +188,6 @@ int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
-int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
- CRYPTO_FEEDBACK_MODE_64BIT_OFB);
-}
-
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 8d5337224..6006703fb 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1211,26 +1211,6 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cfb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt_iv_stream,
- .decrypt = crypto4xx_decrypt_iv_stream,
- .init = crypto4xx_sk_init,
- .exit = crypto4xx_sk_exit,
- } },
- { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
- .base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
@@ -1289,26 +1269,6 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
- { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
- .base = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt_iv_stream,
- .decrypt = crypto4xx_decrypt_iv_stream,
- .init = crypto4xx_sk_init,
- .exit = crypto4xx_sk_exit,
- } },
/* AEAD */
{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 56c10668c..96355d463 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -162,14 +162,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *dst_tmp);
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen);
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index 330840661..29048da6f 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -327,8 +327,8 @@ int meson_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
- sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm);
+ crypto_skcipher_set_reqsize(sktfm, sizeof(struct meson_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
return 0;
}
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
index db6c5b4cd..e93f2f82b 100644
--- a/drivers/crypto/aspeed/Kconfig
+++ b/drivers/crypto/aspeed/Kconfig
@@ -38,14 +38,12 @@ config CRYPTO_DEV_ASPEED_HACE_CRYPTO
select CRYPTO_DES
select CRYPTO_ECB
select CRYPTO_CBC
- select CRYPTO_CFB
- select CRYPTO_OFB
select CRYPTO_CTR
help
Select here to enable Aspeed Hash & Crypto Engine (HACE)
crypto driver.
Supports AES/DES symmetric-key encryption and decryption
- with ECB/CBC/CFB/OFB/CTR options.
+ with ECB/CBC/CTR options.
config CRYPTO_DEV_ASPEED_ACRY
bool "Enable Aspeed ACRY RSA Engine"
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
index f0eddb785..a72dfebc5 100644
--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -473,30 +473,6 @@ static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req)
HACE_CMD_TRIPLE_DES);
}
-static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
- HACE_CMD_TRIPLE_DES);
-}
-
-static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
- HACE_CMD_TRIPLE_DES);
-}
-
-static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
- HACE_CMD_TRIPLE_DES);
-}
-
-static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
- HACE_CMD_TRIPLE_DES);
-}
-
static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req)
{
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
@@ -533,30 +509,6 @@ static int aspeed_des_ctr_encrypt(struct skcipher_request *req)
HACE_CMD_SINGLE_DES);
}
-static int aspeed_des_ofb_decrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
- HACE_CMD_SINGLE_DES);
-}
-
-static int aspeed_des_ofb_encrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
- HACE_CMD_SINGLE_DES);
-}
-
-static int aspeed_des_cfb_decrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
- HACE_CMD_SINGLE_DES);
-}
-
-static int aspeed_des_cfb_encrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
- HACE_CMD_SINGLE_DES);
-}
-
static int aspeed_des_cbc_decrypt(struct skcipher_request *req)
{
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
@@ -659,26 +611,6 @@ static int aspeed_aes_ctr_encrypt(struct skcipher_request *req)
return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR);
}
-static int aspeed_aes_ofb_decrypt(struct skcipher_request *req)
-{
- return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB);
-}
-
-static int aspeed_aes_ofb_encrypt(struct skcipher_request *req)
-{
- return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB);
-}
-
-static int aspeed_aes_cfb_decrypt(struct skcipher_request *req)
-{
- return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB);
-}
-
-static int aspeed_aes_cfb_encrypt(struct skcipher_request *req)
-{
- return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB);
-}
-
static int aspeed_aes_cbc_decrypt(struct skcipher_request *req)
{
return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC);
@@ -792,60 +724,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
},
{
.alg.skcipher.base = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aspeed_aes_setkey,
- .encrypt = aspeed_aes_cfb_encrypt,
- .decrypt = aspeed_aes_cfb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "aspeed-cfb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aspeed_aes_setkey,
- .encrypt = aspeed_aes_ofb_encrypt,
- .decrypt = aspeed_aes_ofb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "aspeed-ofb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = aspeed_des_setkey,
@@ -899,60 +777,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
},
{
.alg.skcipher.base = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = aspeed_des_setkey,
- .encrypt = aspeed_des_cfb_encrypt,
- .decrypt = aspeed_des_cfb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "cfb(des)",
- .cra_driver_name = "aspeed-cfb-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = aspeed_des_setkey,
- .encrypt = aspeed_des_ofb_encrypt,
- .decrypt = aspeed_des_ofb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "ofb(des)",
- .cra_driver_name = "aspeed-ofb-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = aspeed_des_setkey,
@@ -1004,60 +828,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.do_one_request = aspeed_crypto_do_request,
},
},
- {
- .alg.skcipher.base = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = aspeed_des_setkey,
- .encrypt = aspeed_tdes_cfb_encrypt,
- .decrypt = aspeed_tdes_cfb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "cfb(des3_ede)",
- .cra_driver_name = "aspeed-cfb-tdes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = aspeed_des_setkey,
- .encrypt = aspeed_tdes_ofb_encrypt,
- .decrypt = aspeed_tdes_ofb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "aspeed-ofb-tdes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
};
static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index d1d93e897..8bd64fc37 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -46,11 +46,6 @@
#define ATMEL_AES_BUFFER_ORDER 2
#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
-#define CFB8_BLOCK_SIZE 1
-#define CFB16_BLOCK_SIZE 2
-#define CFB32_BLOCK_SIZE 4
-#define CFB64_BLOCK_SIZE 8
-
#define SIZE_IN_WORDS(x) ((x) >> 2)
/* AES flags */
@@ -60,12 +55,6 @@
#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
#define AES_FLAGS_ECB AES_MR_OPMOD_ECB
#define AES_FLAGS_CBC AES_MR_OPMOD_CBC
-#define AES_FLAGS_OFB AES_MR_OPMOD_OFB
-#define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
-#define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
-#define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
-#define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
-#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
@@ -87,7 +76,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
- bool has_cfb64;
bool has_gcm;
bool has_xts;
bool has_authenc;
@@ -860,22 +848,6 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
int err;
switch (dd->ctx->block_size) {
- case CFB8_BLOCK_SIZE:
- addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- maxburst = 1;
- break;
-
- case CFB16_BLOCK_SIZE:
- addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- maxburst = 1;
- break;
-
- case CFB32_BLOCK_SIZE:
- case CFB64_BLOCK_SIZE:
- addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- maxburst = 1;
- break;
-
case AES_BLOCK_SIZE:
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
maxburst = dd->caps.max_burst_size;
@@ -1103,7 +1075,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
}
/*
- * ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
+ * ECB, CBC or CTR mode require the plaintext and ciphertext
* to have a positve integer length.
*/
if (!req->cryptlen && opmode != AES_FLAGS_XTS)
@@ -1113,27 +1085,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
return -EINVAL;
- switch (mode & AES_FLAGS_OPMODE_MASK) {
- case AES_FLAGS_CFB8:
- ctx->block_size = CFB8_BLOCK_SIZE;
- break;
-
- case AES_FLAGS_CFB16:
- ctx->block_size = CFB16_BLOCK_SIZE;
- break;
-
- case AES_FLAGS_CFB32:
- ctx->block_size = CFB32_BLOCK_SIZE;
- break;
-
- case AES_FLAGS_CFB64:
- ctx->block_size = CFB64_BLOCK_SIZE;
- break;
-
- default:
- ctx->block_size = AES_BLOCK_SIZE;
- break;
- }
+ ctx->block_size = AES_BLOCK_SIZE;
ctx->is_aead = false;
rctx = skcipher_request_ctx(req);
@@ -1188,66 +1140,6 @@ static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
return atmel_aes_crypt(req, AES_FLAGS_CBC);
}
-static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_OFB);
-}
-
-static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB128);
-}
-
-static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB64);
-}
-
-static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB32);
-}
-
-static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB16);
-}
-
-static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB8);
-}
-
static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
@@ -1319,76 +1211,6 @@ static struct skcipher_alg aes_algs[] = {
.ivsize = AES_BLOCK_SIZE,
},
{
- .base.cra_name = "ofb(aes)",
- .base.cra_driver_name = "atmel-ofb-aes",
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ofb_encrypt,
- .decrypt = atmel_aes_ofb_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
- .base.cra_name = "cfb(aes)",
- .base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb_encrypt,
- .decrypt = atmel_aes_cfb_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
- .base.cra_name = "cfb32(aes)",
- .base.cra_driver_name = "atmel-cfb32-aes",
- .base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb32_encrypt,
- .decrypt = atmel_aes_cfb32_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
- .base.cra_name = "cfb16(aes)",
- .base.cra_driver_name = "atmel-cfb16-aes",
- .base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb16_encrypt,
- .decrypt = atmel_aes_cfb16_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
- .base.cra_name = "cfb8(aes)",
- .base.cra_driver_name = "atmel-cfb8-aes",
- .base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb8_encrypt,
- .decrypt = atmel_aes_cfb8_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "atmel-ctr-aes",
.base.cra_blocksize = 1,
@@ -1404,21 +1226,6 @@ static struct skcipher_alg aes_algs[] = {
},
};
-static struct skcipher_alg aes_cfb64_alg = {
- .base.cra_name = "cfb64(aes)",
- .base.cra_driver_name = "atmel-cfb64-aes",
- .base.cra_blocksize = CFB64_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb64_encrypt,
- .decrypt = atmel_aes_cfb64_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-};
-
/* gcm aead functions */
@@ -2407,9 +2214,6 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
- if (dd->caps.has_cfb64)
- crypto_unregister_skcipher(&aes_cfb64_alg);
-
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_skcipher(&aes_algs[i]);
}
@@ -2434,14 +2238,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_algs;
}
- if (dd->caps.has_cfb64) {
- atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
-
- err = crypto_register_skcipher(&aes_cfb64_alg);
- if (err)
- goto err_aes_cfb64_alg;
- }
-
if (dd->caps.has_gcm) {
atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
@@ -2482,8 +2278,6 @@ err_aes_authenc_alg:
err_aes_xts_alg:
crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
- crypto_unregister_skcipher(&aes_cfb64_alg);
-err_aes_cfb64_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
for (j = 0; j < i; j++)
@@ -2495,7 +2289,6 @@ err_aes_algs:
static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
- dd->caps.has_cfb64 = 0;
dd->caps.has_gcm = 0;
dd->caps.has_xts = 0;
dd->caps.has_authenc = 0;
@@ -2507,7 +2300,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x600:
case 0x500:
dd->caps.has_dualbuff = 1;
- dd->caps.has_cfb64 = 1;
dd->caps.has_gcm = 1;
dd->caps.has_xts = 1;
dd->caps.has_authenc = 1;
@@ -2515,13 +2307,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
break;
case 0x200:
dd->caps.has_dualbuff = 1;
- dd->caps.has_cfb64 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
case 0x130:
dd->caps.has_dualbuff = 1;
- dd->caps.has_cfb64 = 1;
dd->caps.max_burst_size = 4;
break;
case 0x120:
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 27b7000e2..dcc2380a5 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -45,11 +45,6 @@
#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
-#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
-#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
-#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
-#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
-#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
@@ -60,13 +55,8 @@
#define ATMEL_TDES_QUEUE_LENGTH 50
-#define CFB8_BLOCK_SIZE 1
-#define CFB16_BLOCK_SIZE 2
-#define CFB32_BLOCK_SIZE 4
-
struct atmel_tdes_caps {
bool has_dma;
- u32 has_cfb_3keys;
};
struct atmel_tdes_dev;
@@ -376,7 +366,6 @@ static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
dma_addr_t dma_addr_in,
dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
int len32;
dd->dma_size = length;
@@ -386,19 +375,7 @@ static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
DMA_TO_DEVICE);
}
- switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
- case TDES_FLAGS_CFB8:
- len32 = DIV_ROUND_UP(length, sizeof(u8));
- break;
-
- case TDES_FLAGS_CFB16:
- len32 = DIV_ROUND_UP(length, sizeof(u16));
- break;
-
- default:
- len32 = DIV_ROUND_UP(length, sizeof(u32));
- break;
- }
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
@@ -419,7 +396,6 @@ static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
dma_addr_t dma_addr_in,
dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
enum dma_slave_buswidth addr_width;
@@ -431,19 +407,7 @@ static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
DMA_TO_DEVICE);
}
- switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
- case TDES_FLAGS_CFB8:
- addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- break;
-
- case TDES_FLAGS_CFB16:
- addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- break;
-
- default:
- addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- break;
- }
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
@@ -680,39 +644,11 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
if (!req->cryptlen)
return 0;
- switch (mode & TDES_FLAGS_OPMODE_MASK) {
- case TDES_FLAGS_CFB8:
- if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
- dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
- return -EINVAL;
- }
- ctx->block_size = CFB8_BLOCK_SIZE;
- break;
-
- case TDES_FLAGS_CFB16:
- if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
- dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
- return -EINVAL;
- }
- ctx->block_size = CFB16_BLOCK_SIZE;
- break;
-
- case TDES_FLAGS_CFB32:
- if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
- dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
- return -EINVAL;
- }
- ctx->block_size = CFB32_BLOCK_SIZE;
- break;
-
- default:
- if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
- dev_dbg(dev, "request size is not exact amount of DES blocks\n");
- return -EINVAL;
- }
- ctx->block_size = DES_BLOCK_SIZE;
- break;
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
+ dev_dbg(dev, "request size is not exact amount of DES blocks\n");
+ return -EINVAL;
}
+ ctx->block_size = DES_BLOCK_SIZE;
rctx->mode = mode;
@@ -832,55 +768,6 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
}
-static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
-}
-
-static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
-}
-
-static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
-}
-
-static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
-}
-
-static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
-}
static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
{
@@ -932,71 +819,6 @@ static struct skcipher_alg tdes_algs[] = {
.decrypt = atmel_tdes_cbc_decrypt,
},
{
- .base.cra_name = "cfb(des)",
- .base.cra_driver_name = "atmel-cfb-des",
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_alignmask = 0x7,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb_encrypt,
- .decrypt = atmel_tdes_cfb_decrypt,
-},
-{
- .base.cra_name = "cfb8(des)",
- .base.cra_driver_name = "atmel-cfb8-des",
- .base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_alignmask = 0,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb8_encrypt,
- .decrypt = atmel_tdes_cfb8_decrypt,
-},
-{
- .base.cra_name = "cfb16(des)",
- .base.cra_driver_name = "atmel-cfb16-des",
- .base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_alignmask = 0x1,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb16_encrypt,
- .decrypt = atmel_tdes_cfb16_decrypt,
-},
-{
- .base.cra_name = "cfb32(des)",
- .base.cra_driver_name = "atmel-cfb32-des",
- .base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_alignmask = 0x3,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb32_encrypt,
- .decrypt = atmel_tdes_cfb32_decrypt,
-},
-{
- .base.cra_name = "ofb(des)",
- .base.cra_driver_name = "atmel-ofb-des",
- .base.cra_blocksize = 1,
- .base.cra_alignmask = 0x7,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
-},
-{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "atmel-ecb-tdes",
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -1021,19 +843,6 @@ static struct skcipher_alg tdes_algs[] = {
.decrypt = atmel_tdes_cbc_decrypt,
.ivsize = DES_BLOCK_SIZE,
},
-{
- .base.cra_name = "ofb(des3_ede)",
- .base.cra_driver_name = "atmel-ofb-tdes",
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_alignmask = 0x7,
-
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
- .ivsize = DES_BLOCK_SIZE,
-},
};
static void atmel_tdes_queue_task(unsigned long data)
@@ -1121,14 +930,12 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
{
dd->caps.has_dma = 0;
- dd->caps.has_cfb_3keys = 0;
/* keep only major version number */
switch (dd->hw_version & 0xf00) {
case 0x800:
case 0x700:
dd->caps.has_dma = 1;
- dd->caps.has_cfb_3keys = 1;
break;
case 0x600:
break;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index ef9fe13ff..dbc1d483f 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1535,7 +1535,8 @@ static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ crypto_skcipher_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_request_context));
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
return 0;
@@ -1551,7 +1552,8 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ crypto_skcipher_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_request_context));
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
return 0;
@@ -1561,7 +1563,8 @@ static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ crypto_skcipher_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_request_context));
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
return 0;
@@ -1571,7 +1574,8 @@ static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ crypto_skcipher_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_request_context));
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
return 0;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 10968ddb1..1a3ecd44c 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3517,25 +3517,6 @@ static struct iproc_alg_s driver_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .base.cra_name = "ofb(des)",
- .base.cra_driver_name = "ofb-des-iproc",
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- },
- .cipher_info = {
- .alg = CIPHER_ALG_DES,
- .mode = CIPHER_MODE_OFB,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-iproc",
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -3574,25 +3555,6 @@ static struct iproc_alg_s driver_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .base.cra_name = "ofb(des3_ede)",
- .base.cra_driver_name = "ofb-des3-iproc",
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .cipher_info = {
- .alg = CIPHER_ALG_3DES,
- .mode = CIPHER_MODE_OFB,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-iproc",
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -3631,25 +3593,6 @@ static struct iproc_alg_s driver_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .base.cra_name = "ofb(aes)",
- .base.cra_driver_name = "ofb-aes-iproc",
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_info = {
- .alg = CIPHER_ALG_AES,
- .mode = CIPHER_MODE_OFB,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-iproc",
.base.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index ee476c6c7..219fe9be7 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -311,12 +311,6 @@ static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
return cvm_setkey(cipher, key, keylen, AES_ECB);
}
-static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
- u32 keylen)
-{
- return cvm_setkey(cipher, key, keylen, AES_CFB);
-}
-
static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
@@ -394,24 +388,6 @@ static struct skcipher_alg algs[] = { {
}, {
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .base.cra_alignmask = 7,
- .base.cra_priority = 4001,
- .base.cra_name = "cfb(aes)",
- .base.cra_driver_name = "cavium-cfb-aes",
- .base.cra_module = THIS_MODULE,
-
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_cfb_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- .init = cvm_enc_dec_init,
-}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
.base.cra_alignmask = 7,
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 138261dcd..6e5e667ba 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -421,25 +421,6 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.exit = nitrox_skcipher_exit,
}, {
.base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "n5_cfb(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
.cra_name = "xts(aes)",
.cra_driver_name = "n5_xts(aes)",
.cra_priority = PRIO,
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 918e223f2..d11daaf47 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -267,24 +267,6 @@ static struct ccp_aes_def aes_algs[] = {
.alg_defaults = &ccp_aes_defaults,
},
{
- .mode = CCP_AES_MODE_CFB,
- .version = CCP_VERSION(3, 0),
- .name = "cfb(aes)",
- .driver_name = "cfb-aes-ccp",
- .blocksize = 1,
- .ivsize = AES_BLOCK_SIZE,
- .alg_defaults = &ccp_aes_defaults,
- },
- {
- .mode = CCP_AES_MODE_OFB,
- .version = CCP_VERSION(3, 0),
- .name = "ofb(aes)",
- .driver_name = "ofb-aes-ccp",
- .blocksize = 1,
- .ivsize = AES_BLOCK_SIZE,
- .alg_defaults = &ccp_aes_defaults,
- },
- {
.mode = CCP_AES_MODE_CTR,
.version = CCP_VERSION(3, 0),
.name = "ctr(aes)",
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 53b217a62..b04bc1d3d 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -912,7 +912,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
/*
* The length of the ID shouldn't be assumed by software since
* it may change in the future. The allocation size is limited
- * to 1 << (PAGE_SHIFT + MAX_ORDER) by the page allocator.
+ * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator.
* If the allocation fails, simply return ENOMEM rather than
* warning in the kernel log.
*/
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 109ffb375..5ef39d682 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2569,9 +2569,13 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
alg = &tmpl->template_aead;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- tmpl->driver_name);
+ if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CC_CRA_PRIO;
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 2cd44d745..cd66a580e 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1080,24 +1080,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "ofb(paes)",
- .driver_name = "ofb-paes-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_OFB,
- .flow_mode = S_DIN_to_AES,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
.name = "cts(cbc(paes))",
.driver_name = "cts-cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1206,23 +1188,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "ofb(aes)",
- .driver_name = "ofb-aes-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_OFB,
- .flow_mode = S_DIN_to_AES,
- .min_hw_rev = CC_HW_REV_630,
- .std_body = CC_STD_NIST,
- },
- {
.name = "cts(cbc(aes))",
.driver_name = "cts-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1427,9 +1392,13 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- tmpl->driver_name);
+ if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CC_CRA_PRIO;
alg->base.cra_blocksize = tmpl->blocksize;
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
index 49dce9e0a..583010b2d 100644
--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -332,8 +332,8 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
- sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm);
+ crypto_skcipher_set_reqsize(sktfm, sizeof(struct sl3516_ce_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
dev_info(op->ce->dev, "Fallback for %s is %s\n",
crypto_tfm_alg_driver_name(&sktfm->base),
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 7bddc3c78..b4a4ec35b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2096,16 +2096,6 @@ static inline int hifn_encrypt_aes_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_aes_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_encrypt_aes_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
-}
/*
* AES decryption functions.
@@ -2120,16 +2110,6 @@ static inline int hifn_decrypt_aes_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_aes_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_decrypt_aes_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
-}
/*
* DES ecryption functions.
@@ -2144,16 +2124,6 @@ static inline int hifn_encrypt_des_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_des_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_encrypt_des_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
-}
/*
* DES decryption functions.
@@ -2168,16 +2138,6 @@ static inline int hifn_decrypt_des_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_des_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_decrypt_des_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
-}
/*
* 3DES ecryption functions.
@@ -2192,16 +2152,6 @@ static inline int hifn_encrypt_3des_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_3des_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_encrypt_3des_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
-}
/* 3DES decryption functions. */
static inline int hifn_decrypt_3des_ecb(struct skcipher_request *req)
@@ -2214,16 +2164,6 @@ static inline int hifn_decrypt_3des_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_3des_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_decrypt_3des_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
-}
struct hifn_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
@@ -2234,29 +2174,9 @@ struct hifn_alg_template {
static const struct hifn_alg_template hifn_alg_templates[] = {
/*
- * 3DES ECB, CBC, CFB and OFB modes.
+ * 3DES ECB and CBC modes.
*/
{
- .name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
- .skcipher = {
- .min_keysize = HIFN_3DES_KEY_LENGTH,
- .max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_des3_setkey,
- .encrypt = hifn_encrypt_3des_cfb,
- .decrypt = hifn_decrypt_3des_cfb,
- },
- },
- {
- .name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
- .skcipher = {
- .min_keysize = HIFN_3DES_KEY_LENGTH,
- .max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_des3_setkey,
- .encrypt = hifn_encrypt_3des_ofb,
- .decrypt = hifn_decrypt_3des_ofb,
- },
- },
- {
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
.skcipher = {
.ivsize = HIFN_IV_LENGTH,
@@ -2279,29 +2199,9 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
},
/*
- * DES ECB, CBC, CFB and OFB modes.
+ * DES ECB and CBC modes.
*/
{
- .name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
- .skcipher = {
- .min_keysize = HIFN_DES_KEY_LENGTH,
- .max_keysize = HIFN_DES_KEY_LENGTH,
- .setkey = hifn_setkey,
- .encrypt = hifn_encrypt_des_cfb,
- .decrypt = hifn_decrypt_des_cfb,
- },
- },
- {
- .name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
- .skcipher = {
- .min_keysize = HIFN_DES_KEY_LENGTH,
- .max_keysize = HIFN_DES_KEY_LENGTH,
- .setkey = hifn_setkey,
- .encrypt = hifn_encrypt_des_ofb,
- .decrypt = hifn_decrypt_des_ofb,
- },
- },
- {
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
.skcipher = {
.ivsize = HIFN_IV_LENGTH,
@@ -2324,7 +2224,7 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
},
/*
- * AES ECB, CBC, CFB and OFB modes.
+ * AES ECB and CBC modes.
*/
{
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
@@ -2347,26 +2247,6 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
.decrypt = hifn_decrypt_aes_cbc,
},
},
- {
- .name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = hifn_setkey,
- .encrypt = hifn_encrypt_aes_cfb,
- .decrypt = hifn_decrypt_aes_cfb,
- },
- },
- {
- .name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = hifn_setkey,
- .encrypt = hifn_encrypt_aes_ofb,
- .decrypt = hifn_decrypt_aes_ofb,
- },
- },
};
static int hifn_init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 7e8186fe0..80ed4b2d2 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -31,6 +31,10 @@ static const char * const qm_debug_file_name[] = {
[CLEAR_ENABLE] = "clear_enable",
};
+static const char * const qm_s[] = {
+ "work", "stop",
+};
+
struct qm_dfx_item {
const char *name;
u32 offset;
@@ -53,34 +57,34 @@ static struct qm_dfx_item qm_dfx_files[] = {
#define CNT_CYC_REGS_NUM 10
static const struct debugfs_reg32 qm_dfx_regs[] = {
/* XXX_CNT are reading clear register */
- {"QM_ECC_1BIT_CNT ", 0x104000ull},
- {"QM_ECC_MBIT_CNT ", 0x104008ull},
- {"QM_DFX_MB_CNT ", 0x104018ull},
- {"QM_DFX_DB_CNT ", 0x104028ull},
- {"QM_DFX_SQE_CNT ", 0x104038ull},
- {"QM_DFX_CQE_CNT ", 0x104048ull},
- {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
- {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
- {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
- {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
- {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
- {"QM_ECC_1BIT_INF ", 0x104004ull},
- {"QM_ECC_MBIT_INF ", 0x10400cull},
- {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
- {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
- {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
- {"QM_DFX_FF_ST0 ", 0x1040c8ull},
- {"QM_DFX_FF_ST1 ", 0x1040ccull},
- {"QM_DFX_FF_ST2 ", 0x1040d0ull},
- {"QM_DFX_FF_ST3 ", 0x1040d4ull},
- {"QM_DFX_FF_ST4 ", 0x1040d8ull},
- {"QM_DFX_FF_ST5 ", 0x1040dcull},
- {"QM_DFX_FF_ST6 ", 0x1040e0ull},
- {"QM_IN_IDLE_ST ", 0x1040e4ull},
+ {"QM_ECC_1BIT_CNT ", 0x104000},
+ {"QM_ECC_MBIT_CNT ", 0x104008},
+ {"QM_DFX_MB_CNT ", 0x104018},
+ {"QM_DFX_DB_CNT ", 0x104028},
+ {"QM_DFX_SQE_CNT ", 0x104038},
+ {"QM_DFX_CQE_CNT ", 0x104048},
+ {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050},
+ {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058},
+ {"QM_DFX_ACC_FINISH_CNT ", 0x104060},
+ {"QM_DFX_CQE_ERR_CNT ", 0x1040b4},
+ {"QM_DFX_FUNS_ACTIVE_ST ", 0x200},
+ {"QM_ECC_1BIT_INF ", 0x104004},
+ {"QM_ECC_MBIT_INF ", 0x10400c},
+ {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0},
+ {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4},
+ {"QM_DFX_AXI_RDY_VLD ", 0x1040a8},
+ {"QM_DFX_FF_ST0 ", 0x1040c8},
+ {"QM_DFX_FF_ST1 ", 0x1040cc},
+ {"QM_DFX_FF_ST2 ", 0x1040d0},
+ {"QM_DFX_FF_ST3 ", 0x1040d4},
+ {"QM_DFX_FF_ST4 ", 0x1040d8},
+ {"QM_DFX_FF_ST5 ", 0x1040dc},
+ {"QM_DFX_FF_ST6 ", 0x1040e0},
+ {"QM_IN_IDLE_ST ", 0x1040e4},
};
static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
- {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
+ {"QM_DFX_FUNS_ACTIVE_ST ", 0x200},
};
/* define the QM's dfx regs region and region length */
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 40da95dba..4b20b94e6 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -129,16 +129,21 @@
#define QM_FIFO_OVERFLOW_TYPE 0xc0
#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
#define QM_FIFO_OVERFLOW_VF 0x3f
+#define QM_FIFO_OVERFLOW_QP_SHIFT 16
#define QM_ABNORMAL_INF01 0x100014
#define QM_DB_TIMEOUT_TYPE 0xc0
#define QM_DB_TIMEOUT_TYPE_SHIFT 6
#define QM_DB_TIMEOUT_VF 0x3f
+#define QM_DB_TIMEOUT_QP_SHIFT 16
+#define QM_ABNORMAL_INF02 0x100018
+#define QM_AXI_POISON_ERR BIT(22)
#define QM_RAS_CE_ENABLE 0x1000ec
#define QM_RAS_FE_ENABLE 0x1000f0
#define QM_RAS_NFE_ENABLE 0x1000f4
#define QM_RAS_CE_THRESHOLD 0x1000f8
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
+#define QM_AXI_RRESP_ERR BIT(0)
#define QM_ECC_MBIT BIT(2)
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
@@ -402,7 +407,6 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
{ .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
{ .int_msk = BIT(14), .msg = "qm_flr_timeout" },
- { /* sentinel */ }
};
static const char * const qm_db_timeout[] = {
@@ -413,10 +417,6 @@ static const char * const qm_fifo_overflow[] = {
"cq", "eq", "aeq",
};
-static const char * const qp_s[] = {
- "none", "init", "start", "stop", "close",
-};
-
struct qm_typical_qos_table {
u32 start;
u32 end;
@@ -444,85 +444,6 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
static void qm_irqs_unregister(struct hisi_qm *qm);
-static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
-{
- enum qm_state curr = atomic_read(&qm->status.flags);
- bool avail = false;
-
- switch (curr) {
- case QM_INIT:
- if (new == QM_START || new == QM_CLOSE)
- avail = true;
- break;
- case QM_START:
- if (new == QM_STOP)
- avail = true;
- break;
- case QM_STOP:
- if (new == QM_CLOSE || new == QM_START)
- avail = true;
- break;
- default:
- break;
- }
-
- dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
- qm_s[curr], qm_s[new]);
-
- if (!avail)
- dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
- qm_s[curr], qm_s[new]);
-
- return avail;
-}
-
-static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
- enum qp_state new)
-{
- enum qm_state qm_curr = atomic_read(&qm->status.flags);
- enum qp_state qp_curr = 0;
- bool avail = false;
-
- if (qp)
- qp_curr = atomic_read(&qp->qp_status.flags);
-
- switch (new) {
- case QP_INIT:
- if (qm_curr == QM_START || qm_curr == QM_INIT)
- avail = true;
- break;
- case QP_START:
- if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
- (qm_curr == QM_START && qp_curr == QP_STOP))
- avail = true;
- break;
- case QP_STOP:
- if ((qm_curr == QM_START && qp_curr == QP_START) ||
- (qp_curr == QP_INIT))
- avail = true;
- break;
- case QP_CLOSE:
- if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
- (qm_curr == QM_START && qp_curr == QP_STOP) ||
- (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
- (qm_curr == QM_STOP && qp_curr == QP_INIT))
- avail = true;
- break;
- default:
- break;
- }
-
- dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
- qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
-
- if (!avail)
- dev_warn(&qm->pdev->dev,
- "Can not change qp state from %s to %s in QM %s\n",
- qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
-
- return avail;
-}
-
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
{
return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -676,9 +597,6 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
struct qm_mailbox mailbox;
int ret;
- dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
- queue, cmd, (unsigned long long)dma_addr);
-
qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
mutex_lock(&qm->mailbox_lock);
@@ -1456,7 +1374,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
{
const struct hisi_qm_hw_error *err;
struct device *dev = &qm->pdev->dev;
- u32 reg_val, type, vf_num;
+ u32 reg_val, type, vf_num, qp_id;
int i;
for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
@@ -1472,19 +1390,24 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
QM_DB_TIMEOUT_TYPE_SHIFT;
vf_num = reg_val & QM_DB_TIMEOUT_VF;
- dev_err(dev, "qm %s doorbell timeout in function %u\n",
- qm_db_timeout[type], vf_num);
+ qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT;
+ dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n",
+ qm_db_timeout[type], vf_num, qp_id);
} else if (err->int_msk & QM_OF_FIFO_OF) {
reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
QM_FIFO_OVERFLOW_TYPE_SHIFT;
vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
-
+ qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT;
if (type < ARRAY_SIZE(qm_fifo_overflow))
- dev_err(dev, "qm %s fifo overflow in function %u\n",
- qm_fifo_overflow[type], vf_num);
+ dev_err(dev, "qm %s fifo overflow in function %u qp %u\n",
+ qm_fifo_overflow[type], vf_num, qp_id);
else
dev_err(dev, "unknown error type\n");
+ } else if (err->int_msk & QM_AXI_RRESP_ERR) {
+ reg_val = readl(qm->io_base + QM_ABNORMAL_INF02);
+ if (reg_val & QM_AXI_POISON_ERR)
+ dev_err(dev, "qm axi poison error happened\n");
}
}
}
@@ -1893,8 +1816,10 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
struct hisi_qp *qp;
int qp_id;
- if (!qm_qp_avail_state(qm, NULL, QP_INIT))
+ if (atomic_read(&qm->status.flags) == QM_STOP) {
+ dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n");
return ERR_PTR(-EPERM);
+ }
if (qm->qp_in_used == qm->qp_num) {
dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
@@ -1921,7 +1846,6 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp->alg_type = alg_type;
qp->is_in_kernel = true;
qm->qp_in_used++;
- atomic_set(&qp->qp_status.flags, QP_INIT);
return qp;
}
@@ -1964,11 +1888,6 @@ static void hisi_qm_release_qp(struct hisi_qp *qp)
down_write(&qm->qps_lock);
- if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
- up_write(&qm->qps_lock);
- return;
- }
-
qm->qp_in_used--;
idr_remove(&qm->qp_idr, qp->qp_id);
@@ -2016,6 +1935,11 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
cqc.w8 = 0; /* rand_qc */
}
+ /*
+ * Enable request finishing interrupts defaultly.
+ * So, there will be some interrupts until disabling
+ * this.
+ */
cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma));
cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma));
@@ -2048,8 +1972,10 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
u32 pasid = arg;
int ret;
- if (!qm_qp_avail_state(qm, qp, QP_START))
+ if (atomic_read(&qm->status.flags) == QM_STOP) {
+ dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n");
return -EPERM;
+ }
ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
if (ret)
@@ -2171,21 +2097,17 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
* is_resetting flag should be set negative so that this qp will not
* be restarted after reset.
*/
- if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
+ if (atomic_read(&qp->qp_status.flags) != QP_START) {
qp->is_resetting = false;
return 0;
}
- if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
- return -EPERM;
-
atomic_set(&qp->qp_status.flags, QP_STOP);
ret = qm_drain_qp(qp);
if (ret)
dev_err(dev, "Failed to drain out data for stopping!\n");
-
flush_workqueue(qp->qm->wq);
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
@@ -2905,13 +2827,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
{
qm_cmd_uninit(qm);
hisi_qm_unint_work(qm);
- down_write(&qm->qps_lock);
-
- if (!qm_avail_state(qm, QM_CLOSE)) {
- up_write(&qm->qps_lock);
- return;
- }
+ down_write(&qm->qps_lock);
hisi_qm_memory_uninit(qm);
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
@@ -3085,11 +3002,6 @@ int hisi_qm_start(struct hisi_qm *qm)
down_write(&qm->qps_lock);
- if (!qm_avail_state(qm, QM_START)) {
- up_write(&qm->qps_lock);
- return -EPERM;
- }
-
dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
if (!qm->qp_num) {
@@ -3099,10 +3011,12 @@ int hisi_qm_start(struct hisi_qm *qm)
}
ret = __hisi_qm_start(qm);
- if (!ret)
- atomic_set(&qm->status.flags, QM_START);
+ if (ret)
+ goto err_unlock;
+ atomic_set(&qm->status.flags, QM_WORK);
hisi_qm_set_state(qm, QM_READY);
+
err_unlock:
up_write(&qm->qps_lock);
return ret;
@@ -3199,10 +3113,11 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
down_write(&qm->qps_lock);
qm->status.stop_reason = r;
- if (!qm_avail_state(qm, QM_STOP)) {
- ret = -EPERM;
+ if (atomic_read(&qm->status.flags) == QM_STOP)
goto err_unlock;
- }
+
+ /* Stop all the request sending at first. */
+ atomic_set(&qm->status.flags, QM_STOP);
if (qm->status.stop_reason == QM_SOFT_RESET ||
qm->status.stop_reason == QM_DOWN) {
@@ -3226,7 +3141,6 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
qm_clear_queues(qm);
- atomic_set(&qm->status.flags, QM_STOP);
err_unlock:
up_write(&qm->qps_lock);
@@ -4016,6 +3930,11 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
int pos;
int i;
+ /*
+ * Since function qm_set_vf_mse is called only after SRIOV is enabled,
+ * pci_find_ext_capability cannot return 0, pos does not need to be
+ * checked.
+ */
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
if (set)
@@ -5418,7 +5337,6 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_free_qm_memory;
qm_cmd_init(qm);
- atomic_set(&qm->status.flags, QM_INIT);
return 0;
diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
index 7b0b15c83..0760bf55f 100644
--- a/drivers/crypto/hisilicon/qm_common.h
+++ b/drivers/crypto/hisilicon/qm_common.h
@@ -72,10 +72,6 @@ struct qm_aeqc {
__le32 dw6;
};
-static const char * const qm_s[] = {
- "init", "start", "close", "stop",
-};
-
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op);
void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm);
void hisi_qm_set_algqos_init(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index ba7f305d4..f028dcfd0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -850,6 +850,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
break;
default:
+ dev_err(dev, "sec c_alg err!\n");
return -EINVAL;
}
@@ -879,15 +880,11 @@ static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
-GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
-GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
-GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
@@ -1176,7 +1173,8 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
}
- if (crypto_authenc_extractkeys(&keys, key, keylen))
+ ret = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (ret)
goto bad_key;
ret = sec_aead_aes_set_key(c_ctx, &keys);
@@ -1193,6 +1191,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
(ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ ret = -EINVAL;
dev_err(dev, "MAC or AUTH key length error!\n");
goto bad_key;
}
@@ -1201,7 +1200,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
bad_key:
memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
- return -EINVAL;
+ return ret;
}
@@ -2032,8 +2031,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
ret = -EINVAL;
}
break;
- case SEC_CMODE_CFB:
- case SEC_CMODE_OFB:
case SEC_CMODE_CTR:
if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
dev_err(dev, "skcipher HW version error!\n");
@@ -2198,16 +2195,6 @@ static struct sec_skcipher sec_skciphers[] = {
SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
},
{
- .alg_msk = BIT(4),
- .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
- AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
- },
- {
- .alg_msk = BIT(5),
- .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
- AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
- },
- {
.alg_msk = BIT(12),
.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
@@ -2223,16 +2210,6 @@ static struct sec_skcipher sec_skciphers[] = {
SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
},
{
- .alg_msk = BIT(15),
- .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
- AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
- },
- {
- .alg_msk = BIT(16),
- .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
- AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
- },
- {
.alg_msk = BIT(23),
.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index d033f63b5..27a0ee5ad 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -37,8 +37,6 @@ enum sec_mac_len {
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
- SEC_CMODE_CFB = 0x2,
- SEC_CMODE_OFB = 0x3,
SEC_CMODE_CTR = 0x4,
SEC_CMODE_CCM = 0x5,
SEC_CMODE_GCM = 0x6,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 878d94ab5..7bb99381b 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -153,7 +153,7 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
- {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
+ {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
{SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 3df7a256e..0beca257c 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -70,11 +70,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
HISI_ACC_SGL_ALIGN_SIZE);
/*
- * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_ORDER,
+ * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_PAGE_ORDER,
* block size may exceed 2^31 on ia64, so the max of block size is 2^31
*/
- block_size = 1 << (PAGE_SHIFT + MAX_ORDER < 32 ?
- PAGE_SHIFT + MAX_ORDER : 31);
+ block_size = 1 << (PAGE_SHIFT + MAX_PAGE_ORDER < 32 ?
+ PAGE_SHIFT + MAX_PAGE_ORDER : 31);
sgl_num_per_block = block_size / sgl_size;
block_num = count / sgl_num_per_block;
remain_sgl = count % sgl_num_per_block;
@@ -121,10 +121,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
return pool;
err_free_mem:
- for (j = 0; j < i; j++) {
+ for (j = 0; j < i; j++)
dma_free_coherent(dev, block_size, block[j].sgl,
block[j].sgl_dma);
- }
+
kfree_sensitive(pool);
return ERR_PTR(-ENOMEM);
}
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
{
struct mem_block *block;
- int i;
+ u32 i;
if (!dev || !pool)
return;
@@ -196,9 +196,10 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
+ u16 entry_sum = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
int i;
- for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
+ for (i = 0; i < entry_sum; i++) {
hw_sge[i].page_ctrl = NULL;
hw_sge[i].buf = 0;
hw_sge[i].len = 0;
@@ -223,10 +224,11 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
u32 index, dma_addr_t *hw_sgl_dma)
{
struct hisi_acc_hw_sgl *curr_hw_sgl;
+ unsigned int i, sg_n_mapped;
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int i, sg_n, sg_n_mapped;
+ int sg_n;
if (!dev || !sgl || !pool || !hw_sgl_dma)
return ERR_PTR(-EINVAL);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 403b07468..479ba8a1d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -292,28 +292,28 @@ static const u64 core_offsets[] = {
};
static const struct debugfs_reg32 hzip_dfx_regs[] = {
- {"HZIP_GET_BD_NUM ", 0x00ull},
- {"HZIP_GET_RIGHT_BD ", 0x04ull},
- {"HZIP_GET_ERROR_BD ", 0x08ull},
- {"HZIP_DONE_BD_NUM ", 0x0cull},
- {"HZIP_WORK_CYCLE ", 0x10ull},
- {"HZIP_IDLE_CYCLE ", 0x18ull},
- {"HZIP_MAX_DELAY ", 0x20ull},
- {"HZIP_MIN_DELAY ", 0x24ull},
- {"HZIP_AVG_DELAY ", 0x28ull},
- {"HZIP_MEM_VISIBLE_DATA ", 0x30ull},
- {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull},
- {"HZIP_CONSUMED_BYTE ", 0x38ull},
- {"HZIP_PRODUCED_BYTE ", 0x40ull},
- {"HZIP_COMP_INF ", 0x70ull},
- {"HZIP_PRE_OUT ", 0x78ull},
- {"HZIP_BD_RD ", 0x7cull},
- {"HZIP_BD_WR ", 0x80ull},
- {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull},
- {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull},
- {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull},
- {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull},
- {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
+ {"HZIP_GET_BD_NUM ", 0x00},
+ {"HZIP_GET_RIGHT_BD ", 0x04},
+ {"HZIP_GET_ERROR_BD ", 0x08},
+ {"HZIP_DONE_BD_NUM ", 0x0c},
+ {"HZIP_WORK_CYCLE ", 0x10},
+ {"HZIP_IDLE_CYCLE ", 0x18},
+ {"HZIP_MAX_DELAY ", 0x20},
+ {"HZIP_MIN_DELAY ", 0x24},
+ {"HZIP_AVG_DELAY ", 0x28},
+ {"HZIP_MEM_VISIBLE_DATA ", 0x30},
+ {"HZIP_MEM_VISIBLE_ADDR ", 0x34},
+ {"HZIP_CONSUMED_BYTE ", 0x38},
+ {"HZIP_PRODUCED_BYTE ", 0x40},
+ {"HZIP_COMP_INF ", 0x70},
+ {"HZIP_PRE_OUT ", 0x78},
+ {"HZIP_BD_RD ", 0x7c},
+ {"HZIP_BD_WR ", 0x80},
+ {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84},
+ {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88},
+ {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8c},
+ {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94},
+ {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9c},
};
static const struct debugfs_reg32 hzip_com_dfx_regs[] = {
@@ -325,11 +325,11 @@ static const struct debugfs_reg32 hzip_com_dfx_regs[] = {
};
static const struct debugfs_reg32 hzip_dump_dfx_regs[] = {
- {"HZIP_GET_BD_NUM ", 0x00ull},
- {"HZIP_GET_RIGHT_BD ", 0x04ull},
- {"HZIP_GET_ERROR_BD ", 0x08ull},
- {"HZIP_DONE_BD_NUM ", 0x0cull},
- {"HZIP_MAX_DELAY ", 0x20ull},
+ {"HZIP_GET_BD_NUM ", 0x00},
+ {"HZIP_GET_RIGHT_BD ", 0x04},
+ {"HZIP_GET_ERROR_BD ", 0x08},
+ {"HZIP_DONE_BD_NUM ", 0x0c},
+ {"HZIP_MAX_DELAY ", 0x20},
};
/* define the ZIP's dfx regs region and region length */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 76da14af7..f5c1912aa 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1191,8 +1191,6 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_cbc_des3_ede,
&safexcel_alg_ecb_aes,
&safexcel_alg_cbc_aes,
- &safexcel_alg_cfb_aes,
- &safexcel_alg_ofb_aes,
&safexcel_alg_ctr_aes,
&safexcel_alg_md5,
&safexcel_alg_sha1,
@@ -1231,8 +1229,6 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sm3,
&safexcel_alg_ecb_sm4,
&safexcel_alg_cbc_sm4,
- &safexcel_alg_ofb_sm4,
- &safexcel_alg_cfb_sm4,
&safexcel_alg_ctr_sm4,
&safexcel_alg_authenc_hmac_sha1_cbc_sm4,
&safexcel_alg_authenc_hmac_sm3_cbc_sm4,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 47ef6c7cd..d0059ce95 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -933,8 +933,6 @@ extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_ecb_aes;
extern struct safexcel_alg_template safexcel_alg_cbc_aes;
-extern struct safexcel_alg_template safexcel_alg_cfb_aes;
-extern struct safexcel_alg_template safexcel_alg_ofb_aes;
extern struct safexcel_alg_template safexcel_alg_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_md5;
extern struct safexcel_alg_template safexcel_alg_sha1;
@@ -973,8 +971,6 @@ extern struct safexcel_alg_template safexcel_alg_sm3;
extern struct safexcel_alg_template safexcel_alg_hmac_sm3;
extern struct safexcel_alg_template safexcel_alg_ecb_sm4;
extern struct safexcel_alg_template safexcel_alg_cbc_sm4;
-extern struct safexcel_alg_template safexcel_alg_ofb_sm4;
-extern struct safexcel_alg_template safexcel_alg_cfb_sm4;
extern struct safexcel_alg_template safexcel_alg_ctr_sm4;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4;
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index b83818634..42677f745 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1352,82 +1352,6 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
},
};
-static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- safexcel_skcipher_cra_init(tfm);
- ctx->alg = SAFEXCEL_AES;
- ctx->blocksz = AES_BLOCK_SIZE;
- ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
- return 0;
-}
-
-struct safexcel_alg_template safexcel_alg_cfb_aes = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
- .alg.skcipher = {
- .setkey = safexcel_skcipher_aes_setkey,
- .encrypt = safexcel_encrypt,
- .decrypt = safexcel_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "safexcel-cfb-aes",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_aes_cfb_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
-};
-
-static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- safexcel_skcipher_cra_init(tfm);
- ctx->alg = SAFEXCEL_AES;
- ctx->blocksz = AES_BLOCK_SIZE;
- ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
- return 0;
-}
-
-struct safexcel_alg_template safexcel_alg_ofb_aes = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
- .alg.skcipher = {
- .setkey = safexcel_skcipher_aes_setkey,
- .encrypt = safexcel_encrypt,
- .decrypt = safexcel_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .base = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "safexcel-ofb-aes",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_aes_ofb_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
-};
-
static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
@@ -3186,82 +3110,6 @@ struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
},
};
-static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- safexcel_skcipher_cra_init(tfm);
- ctx->alg = SAFEXCEL_SM4;
- ctx->blocksz = SM4_BLOCK_SIZE;
- ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
- return 0;
-}
-
-struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
- .alg.skcipher = {
- .setkey = safexcel_skcipher_sm4_setkey,
- .encrypt = safexcel_encrypt,
- .decrypt = safexcel_decrypt,
- .min_keysize = SM4_KEY_SIZE,
- .max_keysize = SM4_KEY_SIZE,
- .ivsize = SM4_BLOCK_SIZE,
- .base = {
- .cra_name = "ofb(sm4)",
- .cra_driver_name = "safexcel-ofb-sm4",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_sm4_ofb_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
-};
-
-static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- safexcel_skcipher_cra_init(tfm);
- ctx->alg = SAFEXCEL_SM4;
- ctx->blocksz = SM4_BLOCK_SIZE;
- ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
- return 0;
-}
-
-struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
- .alg.skcipher = {
- .setkey = safexcel_skcipher_sm4_setkey,
- .encrypt = safexcel_encrypt,
- .decrypt = safexcel_decrypt,
- .min_keysize = SM4_KEY_SIZE,
- .max_keysize = SM4_KEY_SIZE,
- .ivsize = SM4_BLOCK_SIZE,
- .base = {
- .cra_name = "cfb(sm4)",
- .cra_driver_name = "safexcel-cfb-sm4",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_sm4_cfb_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
-};
-
static int safexcel_skcipher_sm4ctr_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
diff --git a/drivers/crypto/intel/Kconfig b/drivers/crypto/intel/Kconfig
index 3d90c87d4..f38cd62a3 100644
--- a/drivers/crypto/intel/Kconfig
+++ b/drivers/crypto/intel/Kconfig
@@ -3,3 +3,4 @@
source "drivers/crypto/intel/keembay/Kconfig"
source "drivers/crypto/intel/ixp4xx/Kconfig"
source "drivers/crypto/intel/qat/Kconfig"
+source "drivers/crypto/intel/iaa/Kconfig"
diff --git a/drivers/crypto/intel/Makefile b/drivers/crypto/intel/Makefile
index b3d0352ae..2f56f6d34 100644
--- a/drivers/crypto/intel/Makefile
+++ b/drivers/crypto/intel/Makefile
@@ -3,3 +3,4 @@
obj-y += keembay/
obj-y += ixp4xx/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
+obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) += iaa/
diff --git a/drivers/crypto/intel/iaa/Kconfig b/drivers/crypto/intel/iaa/Kconfig
new file mode 100644
index 000000000..d53f4b1d4
--- /dev/null
+++ b/drivers/crypto/intel/iaa/Kconfig
@@ -0,0 +1,19 @@
+config CRYPTO_DEV_IAA_CRYPTO
+ tristate "Support for Intel(R) IAA Compression Accelerator"
+ depends on CRYPTO_DEFLATE
+ depends on INTEL_IDXD
+ default n
+ help
+ This driver supports acceleration for compression and
+ decompression with the Intel Analytics Accelerator (IAA)
+ hardware using the cryptographic API. If you choose 'M'
+ here, the module will be called iaa_crypto.
+
+config CRYPTO_DEV_IAA_CRYPTO_STATS
+ bool "Enable Intel(R) IAA Compression Accelerator Statistics"
+ depends on CRYPTO_DEV_IAA_CRYPTO
+ default n
+ help
+ Enable statistics for the IAA compression accelerator.
+ These include per-device and per-workqueue statistics in
+ addition to global driver statistics.
diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile
new file mode 100644
index 000000000..b64b208d2
--- /dev/null
+++ b/drivers/crypto/intel/iaa/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for IAA crypto device drivers
+#
+
+ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+
+obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o
+
+iaa_crypto-y := iaa_crypto_main.o iaa_crypto_comp_fixed.o
+
+iaa_crypto-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS) += iaa_crypto_stats.o
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
new file mode 100644
index 000000000..014420f7b
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#ifndef __IAA_CRYPTO_H__
+#define __IAA_CRYPTO_H__
+
+#include <linux/crypto.h>
+#include <linux/idxd.h>
+#include <uapi/linux/idxd.h>
+
+#define IDXD_SUBDRIVER_NAME "crypto"
+
+#define IAA_DECOMP_ENABLE BIT(0)
+#define IAA_DECOMP_FLUSH_OUTPUT BIT(1)
+#define IAA_DECOMP_CHECK_FOR_EOB BIT(2)
+#define IAA_DECOMP_STOP_ON_EOB BIT(3)
+#define IAA_DECOMP_SUPPRESS_OUTPUT BIT(9)
+
+#define IAA_COMP_FLUSH_OUTPUT BIT(1)
+#define IAA_COMP_APPEND_EOB BIT(2)
+
+#define IAA_COMPLETION_TIMEOUT 1000000
+
+#define IAA_ANALYTICS_ERROR 0x0a
+#define IAA_ERROR_DECOMP_BUF_OVERFLOW 0x0b
+#define IAA_ERROR_COMP_BUF_OVERFLOW 0x19
+#define IAA_ERROR_WATCHDOG_EXPIRED 0x24
+
+#define IAA_COMP_MODES_MAX 2
+
+#define FIXED_HDR 0x2
+#define FIXED_HDR_SIZE 3
+
+#define IAA_COMP_FLAGS (IAA_COMP_FLUSH_OUTPUT | \
+ IAA_COMP_APPEND_EOB)
+
+#define IAA_DECOMP_FLAGS (IAA_DECOMP_ENABLE | \
+ IAA_DECOMP_FLUSH_OUTPUT | \
+ IAA_DECOMP_CHECK_FOR_EOB | \
+ IAA_DECOMP_STOP_ON_EOB)
+
+/* Representation of IAA workqueue */
+struct iaa_wq {
+ struct list_head list;
+
+ struct idxd_wq *wq;
+ int ref;
+ bool remove;
+
+ struct iaa_device *iaa_device;
+
+ u64 comp_calls;
+ u64 comp_bytes;
+ u64 decomp_calls;
+ u64 decomp_bytes;
+};
+
+struct iaa_device_compression_mode {
+ const char *name;
+
+ struct aecs_comp_table_record *aecs_comp_table;
+ struct aecs_decomp_table_record *aecs_decomp_table;
+
+ dma_addr_t aecs_comp_table_dma_addr;
+ dma_addr_t aecs_decomp_table_dma_addr;
+};
+
+/* Representation of IAA device with wqs, populated by probe */
+struct iaa_device {
+ struct list_head list;
+ struct idxd_device *idxd;
+
+ struct iaa_device_compression_mode *compression_modes[IAA_COMP_MODES_MAX];
+
+ int n_wq;
+ struct list_head wqs;
+
+ u64 comp_calls;
+ u64 comp_bytes;
+ u64 decomp_calls;
+ u64 decomp_bytes;
+};
+
+struct wq_table_entry {
+ struct idxd_wq **wqs;
+ int max_wqs;
+ int n_wqs;
+ int cur_wq;
+};
+
+#define IAA_AECS_ALIGN 32
+
+/*
+ * Analytics Engine Configuration and State (AECS) contains parameters and
+ * internal state of the analytics engine.
+ */
+struct aecs_comp_table_record {
+ u32 crc;
+ u32 xor_checksum;
+ u32 reserved0[5];
+ u32 num_output_accum_bits;
+ u8 output_accum[256];
+ u32 ll_sym[286];
+ u32 reserved1;
+ u32 reserved2;
+ u32 d_sym[30];
+ u32 reserved_padding[2];
+} __packed;
+
+/* AECS for decompress */
+struct aecs_decomp_table_record {
+ u32 crc;
+ u32 xor_checksum;
+ u32 low_filter_param;
+ u32 high_filter_param;
+ u32 output_mod_idx;
+ u32 drop_init_decomp_out_bytes;
+ u32 reserved[36];
+ u32 output_accum_data[2];
+ u32 out_bits_valid;
+ u32 bit_off_indexing;
+ u32 input_accum_data[64];
+ u8 size_qw[32];
+ u32 decomp_state[1220];
+} __packed;
+
+int iaa_aecs_init_fixed(void);
+void iaa_aecs_cleanup_fixed(void);
+
+typedef int (*iaa_dev_comp_init_fn_t) (struct iaa_device_compression_mode *mode);
+typedef int (*iaa_dev_comp_free_fn_t) (struct iaa_device_compression_mode *mode);
+
+struct iaa_compression_mode {
+ const char *name;
+ u32 *ll_table;
+ int ll_table_size;
+ u32 *d_table;
+ int d_table_size;
+ u32 *header_table;
+ int header_table_size;
+ u16 gen_decomp_table_flags;
+ iaa_dev_comp_init_fn_t init;
+ iaa_dev_comp_free_fn_t free;
+};
+
+int add_iaa_compression_mode(const char *name,
+ const u32 *ll_table,
+ int ll_table_size,
+ const u32 *d_table,
+ int d_table_size,
+ const u8 *header_table,
+ int header_table_size,
+ u16 gen_decomp_table_flags,
+ iaa_dev_comp_init_fn_t init,
+ iaa_dev_comp_free_fn_t free);
+
+void remove_iaa_compression_mode(const char *name);
+
+enum iaa_mode {
+ IAA_MODE_FIXED,
+};
+
+struct iaa_compression_ctx {
+ enum iaa_mode mode;
+ bool verify_compress;
+ bool async_mode;
+ bool use_irq;
+};
+
+extern struct list_head iaa_devices;
+extern struct mutex iaa_devices_lock;
+
+#endif
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
new file mode 100644
index 000000000..45cf5d74f
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#include "idxd.h"
+#include "iaa_crypto.h"
+
+/*
+ * Fixed Huffman tables the IAA hardware requires to implement RFC-1951.
+ */
+static const u32 fixed_ll_sym[286] = {
+ 0x40030, 0x40031, 0x40032, 0x40033, 0x40034, 0x40035, 0x40036, 0x40037,
+ 0x40038, 0x40039, 0x4003A, 0x4003B, 0x4003C, 0x4003D, 0x4003E, 0x4003F,
+ 0x40040, 0x40041, 0x40042, 0x40043, 0x40044, 0x40045, 0x40046, 0x40047,
+ 0x40048, 0x40049, 0x4004A, 0x4004B, 0x4004C, 0x4004D, 0x4004E, 0x4004F,
+ 0x40050, 0x40051, 0x40052, 0x40053, 0x40054, 0x40055, 0x40056, 0x40057,
+ 0x40058, 0x40059, 0x4005A, 0x4005B, 0x4005C, 0x4005D, 0x4005E, 0x4005F,
+ 0x40060, 0x40061, 0x40062, 0x40063, 0x40064, 0x40065, 0x40066, 0x40067,
+ 0x40068, 0x40069, 0x4006A, 0x4006B, 0x4006C, 0x4006D, 0x4006E, 0x4006F,
+ 0x40070, 0x40071, 0x40072, 0x40073, 0x40074, 0x40075, 0x40076, 0x40077,
+ 0x40078, 0x40079, 0x4007A, 0x4007B, 0x4007C, 0x4007D, 0x4007E, 0x4007F,
+ 0x40080, 0x40081, 0x40082, 0x40083, 0x40084, 0x40085, 0x40086, 0x40087,
+ 0x40088, 0x40089, 0x4008A, 0x4008B, 0x4008C, 0x4008D, 0x4008E, 0x4008F,
+ 0x40090, 0x40091, 0x40092, 0x40093, 0x40094, 0x40095, 0x40096, 0x40097,
+ 0x40098, 0x40099, 0x4009A, 0x4009B, 0x4009C, 0x4009D, 0x4009E, 0x4009F,
+ 0x400A0, 0x400A1, 0x400A2, 0x400A3, 0x400A4, 0x400A5, 0x400A6, 0x400A7,
+ 0x400A8, 0x400A9, 0x400AA, 0x400AB, 0x400AC, 0x400AD, 0x400AE, 0x400AF,
+ 0x400B0, 0x400B1, 0x400B2, 0x400B3, 0x400B4, 0x400B5, 0x400B6, 0x400B7,
+ 0x400B8, 0x400B9, 0x400BA, 0x400BB, 0x400BC, 0x400BD, 0x400BE, 0x400BF,
+ 0x48190, 0x48191, 0x48192, 0x48193, 0x48194, 0x48195, 0x48196, 0x48197,
+ 0x48198, 0x48199, 0x4819A, 0x4819B, 0x4819C, 0x4819D, 0x4819E, 0x4819F,
+ 0x481A0, 0x481A1, 0x481A2, 0x481A3, 0x481A4, 0x481A5, 0x481A6, 0x481A7,
+ 0x481A8, 0x481A9, 0x481AA, 0x481AB, 0x481AC, 0x481AD, 0x481AE, 0x481AF,
+ 0x481B0, 0x481B1, 0x481B2, 0x481B3, 0x481B4, 0x481B5, 0x481B6, 0x481B7,
+ 0x481B8, 0x481B9, 0x481BA, 0x481BB, 0x481BC, 0x481BD, 0x481BE, 0x481BF,
+ 0x481C0, 0x481C1, 0x481C2, 0x481C3, 0x481C4, 0x481C5, 0x481C6, 0x481C7,
+ 0x481C8, 0x481C9, 0x481CA, 0x481CB, 0x481CC, 0x481CD, 0x481CE, 0x481CF,
+ 0x481D0, 0x481D1, 0x481D2, 0x481D3, 0x481D4, 0x481D5, 0x481D6, 0x481D7,
+ 0x481D8, 0x481D9, 0x481DA, 0x481DB, 0x481DC, 0x481DD, 0x481DE, 0x481DF,
+ 0x481E0, 0x481E1, 0x481E2, 0x481E3, 0x481E4, 0x481E5, 0x481E6, 0x481E7,
+ 0x481E8, 0x481E9, 0x481EA, 0x481EB, 0x481EC, 0x481ED, 0x481EE, 0x481EF,
+ 0x481F0, 0x481F1, 0x481F2, 0x481F3, 0x481F4, 0x481F5, 0x481F6, 0x481F7,
+ 0x481F8, 0x481F9, 0x481FA, 0x481FB, 0x481FC, 0x481FD, 0x481FE, 0x481FF,
+ 0x38000, 0x38001, 0x38002, 0x38003, 0x38004, 0x38005, 0x38006, 0x38007,
+ 0x38008, 0x38009, 0x3800A, 0x3800B, 0x3800C, 0x3800D, 0x3800E, 0x3800F,
+ 0x38010, 0x38011, 0x38012, 0x38013, 0x38014, 0x38015, 0x38016, 0x38017,
+ 0x400C0, 0x400C1, 0x400C2, 0x400C3, 0x400C4, 0x400C5
+};
+
+static const u32 fixed_d_sym[30] = {
+ 0x28000, 0x28001, 0x28002, 0x28003, 0x28004, 0x28005, 0x28006, 0x28007,
+ 0x28008, 0x28009, 0x2800A, 0x2800B, 0x2800C, 0x2800D, 0x2800E, 0x2800F,
+ 0x28010, 0x28011, 0x28012, 0x28013, 0x28014, 0x28015, 0x28016, 0x28017,
+ 0x28018, 0x28019, 0x2801A, 0x2801B, 0x2801C, 0x2801D
+};
+
+static int init_fixed_mode(struct iaa_device_compression_mode *mode)
+{
+ struct aecs_comp_table_record *comp_table = mode->aecs_comp_table;
+ u32 bfinal = 1;
+ u32 offset;
+
+ /* Configure aecs table using fixed Huffman table */
+ comp_table->crc = 0;
+ comp_table->xor_checksum = 0;
+ offset = comp_table->num_output_accum_bits / 8;
+ comp_table->output_accum[offset] = FIXED_HDR | bfinal;
+ comp_table->num_output_accum_bits = FIXED_HDR_SIZE;
+
+ return 0;
+}
+
+int iaa_aecs_init_fixed(void)
+{
+ int ret;
+
+ ret = add_iaa_compression_mode("fixed",
+ fixed_ll_sym,
+ sizeof(fixed_ll_sym),
+ fixed_d_sym,
+ sizeof(fixed_d_sym),
+ NULL, 0, 0,
+ init_fixed_mode, NULL);
+ if (!ret)
+ pr_debug("IAA fixed compression mode initialized\n");
+
+ return ret;
+}
+
+void iaa_aecs_cleanup_fixed(void)
+{
+ remove_iaa_compression_mode("fixed");
+}
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
new file mode 100644
index 000000000..64a2e87a5
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -0,0 +1,2197 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <uapi/linux/idxd.h>
+#include <linux/highmem.h>
+#include <linux/sched/smt.h>
+#include <crypto/internal/acompress.h>
+
+#include "idxd.h"
+#include "iaa_crypto.h"
+#include "iaa_crypto_stats.h"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "idxd: " IDXD_SUBDRIVER_NAME ": " fmt
+
+#define IAA_ALG_PRIORITY 300
+
+/* number of iaa instances probed */
+static unsigned int nr_iaa;
+static unsigned int nr_cpus;
+static unsigned int nr_nodes;
+static unsigned int nr_cpus_per_node;
+
+/* Number of physical cpus sharing each iaa instance */
+static unsigned int cpus_per_iaa;
+
+static struct crypto_comp *deflate_generic_tfm;
+
+/* Per-cpu lookup table for balanced wqs */
+static struct wq_table_entry __percpu *wq_table;
+
+static struct idxd_wq *wq_table_next_wq(int cpu)
+{
+ struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
+
+ if (++entry->cur_wq >= entry->n_wqs)
+ entry->cur_wq = 0;
+
+ if (!entry->wqs[entry->cur_wq])
+ return NULL;
+
+ pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__,
+ entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id,
+ entry->wqs[entry->cur_wq]->id, cpu);
+
+ return entry->wqs[entry->cur_wq];
+}
+
+static void wq_table_add(int cpu, struct idxd_wq *wq)
+{
+ struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
+
+ if (WARN_ON(entry->n_wqs == entry->max_wqs))
+ return;
+
+ entry->wqs[entry->n_wqs++] = wq;
+
+ pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__,
+ entry->wqs[entry->n_wqs - 1]->idxd->id,
+ entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu);
+}
+
+static void wq_table_free_entry(int cpu)
+{
+ struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
+
+ kfree(entry->wqs);
+ memset(entry, 0, sizeof(*entry));
+}
+
+static void wq_table_clear_entry(int cpu)
+{
+ struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
+
+ entry->n_wqs = 0;
+ entry->cur_wq = 0;
+ memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *));
+}
+
+LIST_HEAD(iaa_devices);
+DEFINE_MUTEX(iaa_devices_lock);
+
+/* If enabled, IAA hw crypto algos are registered, unavailable otherwise */
+static bool iaa_crypto_enabled;
+static bool iaa_crypto_registered;
+
+/* Verify results of IAA compress or not */
+static bool iaa_verify_compress = true;
+
+static ssize_t verify_compress_show(struct device_driver *driver, char *buf)
+{
+ return sprintf(buf, "%d\n", iaa_verify_compress);
+}
+
+static ssize_t verify_compress_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ int ret = -EBUSY;
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (iaa_crypto_enabled)
+ goto out;
+
+ ret = kstrtobool(buf, &iaa_verify_compress);
+ if (ret)
+ goto out;
+
+ ret = count;
+out:
+ mutex_unlock(&iaa_devices_lock);
+
+ return ret;
+}
+static DRIVER_ATTR_RW(verify_compress);
+
+/*
+ * The iaa crypto driver supports three 'sync' methods determining how
+ * compressions and decompressions are performed:
+ *
+ * - sync: the compression or decompression completes before
+ * returning. This is the mode used by the async crypto
+ * interface when the sync mode is set to 'sync' and by
+ * the sync crypto interface regardless of setting.
+ *
+ * - async: the compression or decompression is submitted and returns
+ * immediately. Completion interrupts are not used so
+ * the caller is responsible for polling the descriptor
+ * for completion. This mode is applicable to only the
+ * async crypto interface and is ignored for anything
+ * else.
+ *
+ * - async_irq: the compression or decompression is submitted and
+ * returns immediately. Completion interrupts are
+ * enabled so the caller can wait for the completion and
+ * yield to other threads. When the compression or
+ * decompression completes, the completion is signaled
+ * and the caller awakened. This mode is applicable to
+ * only the async crypto interface and is ignored for
+ * anything else.
+ *
+ * These modes can be set using the iaa_crypto sync_mode driver
+ * attribute.
+ */
+
+/* Use async mode */
+static bool async_mode;
+/* Use interrupts */
+static bool use_irq;
+
+/**
+ * set_iaa_sync_mode - Set IAA sync mode
+ * @name: The name of the sync mode
+ *
+ * Make the IAA sync mode named @name the current sync mode used by
+ * compression/decompression.
+ */
+
+static int set_iaa_sync_mode(const char *name)
+{
+ int ret = 0;
+
+ if (sysfs_streq(name, "sync")) {
+ async_mode = false;
+ use_irq = false;
+ } else if (sysfs_streq(name, "async")) {
+ async_mode = true;
+ use_irq = false;
+ } else if (sysfs_streq(name, "async_irq")) {
+ async_mode = true;
+ use_irq = true;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
+{
+ int ret = 0;
+
+ if (!async_mode && !use_irq)
+ ret = sprintf(buf, "%s\n", "sync");
+ else if (async_mode && !use_irq)
+ ret = sprintf(buf, "%s\n", "async");
+ else if (async_mode && use_irq)
+ ret = sprintf(buf, "%s\n", "async_irq");
+
+ return ret;
+}
+
+static ssize_t sync_mode_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ int ret = -EBUSY;
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (iaa_crypto_enabled)
+ goto out;
+
+ ret = set_iaa_sync_mode(buf);
+ if (ret == 0)
+ ret = count;
+out:
+ mutex_unlock(&iaa_devices_lock);
+
+ return ret;
+}
+static DRIVER_ATTR_RW(sync_mode);
+
+static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];
+
+static int find_empty_iaa_compression_mode(void)
+{
+ int i = -EINVAL;
+
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
+ if (iaa_compression_modes[i])
+ continue;
+ break;
+ }
+
+ return i;
+}
+
+static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx)
+{
+ struct iaa_compression_mode *mode;
+ int i;
+
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
+ mode = iaa_compression_modes[i];
+ if (!mode)
+ continue;
+
+ if (!strcmp(mode->name, name)) {
+ *idx = i;
+ return iaa_compression_modes[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void free_iaa_compression_mode(struct iaa_compression_mode *mode)
+{
+ kfree(mode->name);
+ kfree(mode->ll_table);
+ kfree(mode->d_table);
+ kfree(mode->header_table);
+
+ kfree(mode);
+}
+
+/*
+ * IAA Compression modes are defined by an ll_table, a d_table, and an
+ * optional header_table. These tables are typically generated and
+ * captured using statistics collected from running actual
+ * compress/decompress workloads.
+ *
+ * A module or other kernel code can add and remove compression modes
+ * with a given name using the exported @add_iaa_compression_mode()
+ * and @remove_iaa_compression_mode functions.
+ *
+ * When a new compression mode is added, the tables are saved in a
+ * global compression mode list. When IAA devices are added, a
+ * per-IAA device dma mapping is created for each IAA device, for each
+ * compression mode. These are the tables used to do the actual
+ * compression/deccompression and are unmapped if/when the devices are
+ * removed. Currently, compression modes must be added before any
+ * device is added, and removed after all devices have been removed.
+ */
+
+/**
+ * remove_iaa_compression_mode - Remove an IAA compression mode
+ * @name: The name the compression mode will be known as
+ *
+ * Remove the IAA compression mode named @name.
+ */
+void remove_iaa_compression_mode(const char *name)
+{
+ struct iaa_compression_mode *mode;
+ int idx;
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (!list_empty(&iaa_devices))
+ goto out;
+
+ mode = find_iaa_compression_mode(name, &idx);
+ if (mode) {
+ free_iaa_compression_mode(mode);
+ iaa_compression_modes[idx] = NULL;
+ }
+out:
+ mutex_unlock(&iaa_devices_lock);
+}
+EXPORT_SYMBOL_GPL(remove_iaa_compression_mode);
+
+/**
+ * add_iaa_compression_mode - Add an IAA compression mode
+ * @name: The name the compression mode will be known as
+ * @ll_table: The ll table
+ * @ll_table_size: The ll table size in bytes
+ * @d_table: The d table
+ * @d_table_size: The d table size in bytes
+ * @header_table: Optional header table
+ * @header_table_size: Optional header table size in bytes
+ * @gen_decomp_table_flags: Otional flags used to generate the decomp table
+ * @init: Optional callback function to init the compression mode data
+ * @free: Optional callback function to free the compression mode data
+ *
+ * Add a new IAA compression mode named @name.
+ *
+ * Returns 0 if successful, errcode otherwise.
+ */
+int add_iaa_compression_mode(const char *name,
+ const u32 *ll_table,
+ int ll_table_size,
+ const u32 *d_table,
+ int d_table_size,
+ const u8 *header_table,
+ int header_table_size,
+ u16 gen_decomp_table_flags,
+ iaa_dev_comp_init_fn_t init,
+ iaa_dev_comp_free_fn_t free)
+{
+ struct iaa_compression_mode *mode;
+ int idx, ret = -ENOMEM;
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (!list_empty(&iaa_devices)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ goto out;
+
+ mode->name = kstrdup(name, GFP_KERNEL);
+ if (!mode->name)
+ goto free;
+
+ if (ll_table) {
+ mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL);
+ if (!mode->ll_table)
+ goto free;
+ memcpy(mode->ll_table, ll_table, ll_table_size);
+ mode->ll_table_size = ll_table_size;
+ }
+
+ if (d_table) {
+ mode->d_table = kzalloc(d_table_size, GFP_KERNEL);
+ if (!mode->d_table)
+ goto free;
+ memcpy(mode->d_table, d_table, d_table_size);
+ mode->d_table_size = d_table_size;
+ }
+
+ if (header_table) {
+ mode->header_table = kzalloc(header_table_size, GFP_KERNEL);
+ if (!mode->header_table)
+ goto free;
+ memcpy(mode->header_table, header_table, header_table_size);
+ mode->header_table_size = header_table_size;
+ }
+
+ mode->gen_decomp_table_flags = gen_decomp_table_flags;
+
+ mode->init = init;
+ mode->free = free;
+
+ idx = find_empty_iaa_compression_mode();
+ if (idx < 0)
+ goto free;
+
+ pr_debug("IAA compression mode %s added at idx %d\n",
+ mode->name, idx);
+
+ iaa_compression_modes[idx] = mode;
+
+ ret = 0;
+out:
+ mutex_unlock(&iaa_devices_lock);
+
+ return ret;
+free:
+ free_iaa_compression_mode(mode);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(add_iaa_compression_mode);
+
+static struct iaa_device_compression_mode *
+get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx)
+{
+ return iaa_device->compression_modes[idx];
+}
+
+static void free_device_compression_mode(struct iaa_device *iaa_device,
+ struct iaa_device_compression_mode *device_mode)
+{
+ size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN;
+ struct device *dev = &iaa_device->idxd->pdev->dev;
+
+ kfree(device_mode->name);
+
+ if (device_mode->aecs_comp_table)
+ dma_free_coherent(dev, size, device_mode->aecs_comp_table,
+ device_mode->aecs_comp_table_dma_addr);
+ if (device_mode->aecs_decomp_table)
+ dma_free_coherent(dev, size, device_mode->aecs_decomp_table,
+ device_mode->aecs_decomp_table_dma_addr);
+
+ kfree(device_mode);
+}
+
+#define IDXD_OP_FLAG_AECS_RW_TGLS 0x400000
+#define IAX_AECS_DEFAULT_FLAG (IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC)
+#define IAX_AECS_COMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS)
+#define IAX_AECS_DECOMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS)
+#define IAX_AECS_GEN_FLAG (IAX_AECS_DEFAULT_FLAG | \
+ IDXD_OP_FLAG_WR_SRC2_AECS_COMP | \
+ IDXD_OP_FLAG_AECS_RW_TGLS)
+
+static int check_completion(struct device *dev,
+ struct iax_completion_record *comp,
+ bool compress,
+ bool only_once);
+
+static int decompress_header(struct iaa_device_compression_mode *device_mode,
+ struct iaa_compression_mode *mode,
+ struct idxd_wq *wq)
+{
+ dma_addr_t src_addr, src2_addr;
+ struct idxd_desc *idxd_desc;
+ struct iax_hw_desc *desc;
+ struct device *dev;
+ int ret = 0;
+
+ idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(idxd_desc))
+ return PTR_ERR(idxd_desc);
+
+ desc = idxd_desc->iax_hw;
+
+ dev = &wq->idxd->pdev->dev;
+
+ src_addr = dma_map_single(dev, (void *)mode->header_table,
+ mode->header_table_size, DMA_TO_DEVICE);
+ dev_dbg(dev, "%s: mode->name %s, src_addr %llx, dev %p, src %p, slen %d\n",
+ __func__, mode->name, src_addr, dev,
+ mode->header_table, mode->header_table_size);
+ if (unlikely(dma_mapping_error(dev, src_addr))) {
+ dev_dbg(dev, "dma_map_single err, exiting\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ desc->flags = IAX_AECS_GEN_FLAG;
+ desc->opcode = IAX_OPCODE_DECOMPRESS;
+
+ desc->src1_addr = (u64)src_addr;
+ desc->src1_size = mode->header_table_size;
+
+ src2_addr = device_mode->aecs_decomp_table_dma_addr;
+ desc->src2_addr = (u64)src2_addr;
+ desc->src2_size = 1088;
+ dev_dbg(dev, "%s: mode->name %s, src2_addr %llx, dev %p, src2_size %d\n",
+ __func__, mode->name, desc->src2_addr, dev, desc->src2_size);
+ desc->max_dst_size = 0; // suppressed output
+
+ desc->decompr_flags = mode->gen_decomp_table_flags;
+
+ desc->priv = 0;
+
+ desc->completion_addr = idxd_desc->compl_dma;
+
+ ret = idxd_submit_desc(wq, idxd_desc);
+ if (ret) {
+ pr_err("%s: submit_desc failed ret=0x%x\n", __func__, ret);
+ goto out;
+ }
+
+ ret = check_completion(dev, idxd_desc->iax_completion, false, false);
+ if (ret)
+ dev_dbg(dev, "%s: mode->name %s check_completion failed ret=%d\n",
+ __func__, mode->name, ret);
+ else
+ dev_dbg(dev, "%s: mode->name %s succeeded\n", __func__,
+ mode->name);
+out:
+ dma_unmap_single(dev, src_addr, 1088, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int init_device_compression_mode(struct iaa_device *iaa_device,
+ struct iaa_compression_mode *mode,
+ int idx, struct idxd_wq *wq)
+{
+ size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN;
+ struct device *dev = &iaa_device->idxd->pdev->dev;
+ struct iaa_device_compression_mode *device_mode;
+ int ret = -ENOMEM;
+
+ device_mode = kzalloc(sizeof(*device_mode), GFP_KERNEL);
+ if (!device_mode)
+ return -ENOMEM;
+
+ device_mode->name = kstrdup(mode->name, GFP_KERNEL);
+ if (!device_mode->name)
+ goto free;
+
+ device_mode->aecs_comp_table = dma_alloc_coherent(dev, size,
+ &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL);
+ if (!device_mode->aecs_comp_table)
+ goto free;
+
+ device_mode->aecs_decomp_table = dma_alloc_coherent(dev, size,
+ &device_mode->aecs_decomp_table_dma_addr, GFP_KERNEL);
+ if (!device_mode->aecs_decomp_table)
+ goto free;
+
+ /* Add Huffman table to aecs */
+ memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table));
+ memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size);
+ memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size);
+
+ if (mode->header_table) {
+ ret = decompress_header(device_mode, mode, wq);
+ if (ret) {
+ pr_debug("iaa header decompression failed: ret=%d\n", ret);
+ goto free;
+ }
+ }
+
+ if (mode->init) {
+ ret = mode->init(device_mode);
+ if (ret)
+ goto free;
+ }
+
+ /* mode index should match iaa_compression_modes idx */
+ iaa_device->compression_modes[idx] = device_mode;
+
+ pr_debug("IAA %s compression mode initialized for iaa device %d\n",
+ mode->name, iaa_device->idxd->id);
+
+ ret = 0;
+out:
+ return ret;
+free:
+ pr_debug("IAA %s compression mode initialization failed for iaa device %d\n",
+ mode->name, iaa_device->idxd->id);
+
+ free_device_compression_mode(iaa_device, device_mode);
+ goto out;
+}
+
+static int init_device_compression_modes(struct iaa_device *iaa_device,
+ struct idxd_wq *wq)
+{
+ struct iaa_compression_mode *mode;
+ int i, ret = 0;
+
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
+ mode = iaa_compression_modes[i];
+ if (!mode)
+ continue;
+
+ ret = init_device_compression_mode(iaa_device, mode, i, wq);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void remove_device_compression_modes(struct iaa_device *iaa_device)
+{
+ struct iaa_device_compression_mode *device_mode;
+ int i;
+
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
+ device_mode = iaa_device->compression_modes[i];
+ if (!device_mode)
+ continue;
+
+ free_device_compression_mode(iaa_device, device_mode);
+ iaa_device->compression_modes[i] = NULL;
+ if (iaa_compression_modes[i]->free)
+ iaa_compression_modes[i]->free(device_mode);
+ }
+}
+
+static struct iaa_device *iaa_device_alloc(void)
+{
+ struct iaa_device *iaa_device;
+
+ iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL);
+ if (!iaa_device)
+ return NULL;
+
+ INIT_LIST_HEAD(&iaa_device->wqs);
+
+ return iaa_device;
+}
+
+static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
+{
+ struct iaa_wq *iaa_wq;
+
+ list_for_each_entry(iaa_wq, &iaa_device->wqs, list) {
+ if (iaa_wq->wq == wq)
+ return true;
+ }
+
+ return false;
+}
+
+static struct iaa_device *add_iaa_device(struct idxd_device *idxd)
+{
+ struct iaa_device *iaa_device;
+
+ iaa_device = iaa_device_alloc();
+ if (!iaa_device)
+ return NULL;
+
+ iaa_device->idxd = idxd;
+
+ list_add_tail(&iaa_device->list, &iaa_devices);
+
+ nr_iaa++;
+
+ return iaa_device;
+}
+
+static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq)
+{
+ int ret = 0;
+
+ ret = init_device_compression_modes(iaa_device, iaa_wq->wq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static void del_iaa_device(struct iaa_device *iaa_device)
+{
+ list_del(&iaa_device->list);
+
+ nr_iaa--;
+}
+
+static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq,
+ struct iaa_wq **new_wq)
+{
+ struct idxd_device *idxd = iaa_device->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iaa_wq *iaa_wq;
+
+ iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL);
+ if (!iaa_wq)
+ return -ENOMEM;
+
+ iaa_wq->wq = wq;
+ iaa_wq->iaa_device = iaa_device;
+ idxd_wq_set_private(wq, iaa_wq);
+
+ list_add_tail(&iaa_wq->list, &iaa_device->wqs);
+
+ iaa_device->n_wq++;
+
+ if (new_wq)
+ *new_wq = iaa_wq;
+
+ dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n",
+ wq->id, iaa_device->idxd->id, iaa_device->n_wq);
+
+ return 0;
+}
+
+static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = iaa_device->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iaa_wq *iaa_wq;
+
+ list_for_each_entry(iaa_wq, &iaa_device->wqs, list) {
+ if (iaa_wq->wq == wq) {
+ list_del(&iaa_wq->list);
+ iaa_device->n_wq--;
+
+ dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n",
+ wq->id, iaa_device->idxd->id,
+ iaa_device->n_wq, nr_iaa);
+
+ if (iaa_device->n_wq == 0)
+ del_iaa_device(iaa_device);
+ break;
+ }
+ }
+}
+
+static void clear_wq_table(void)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ wq_table_clear_entry(cpu);
+
+ pr_debug("cleared wq table\n");
+}
+
+static void free_iaa_device(struct iaa_device *iaa_device)
+{
+ if (!iaa_device)
+ return;
+
+ remove_device_compression_modes(iaa_device);
+ kfree(iaa_device);
+}
+
+static void __free_iaa_wq(struct iaa_wq *iaa_wq)
+{
+ struct iaa_device *iaa_device;
+
+ if (!iaa_wq)
+ return;
+
+ iaa_device = iaa_wq->iaa_device;
+ if (iaa_device->n_wq == 0)
+ free_iaa_device(iaa_wq->iaa_device);
+}
+
+static void free_iaa_wq(struct iaa_wq *iaa_wq)
+{
+ struct idxd_wq *wq;
+
+ __free_iaa_wq(iaa_wq);
+
+ wq = iaa_wq->wq;
+
+ kfree(iaa_wq);
+ idxd_wq_set_private(wq, NULL);
+}
+
+static int iaa_wq_get(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct iaa_wq *iaa_wq;
+ int ret = 0;
+
+ spin_lock(&idxd->dev_lock);
+ iaa_wq = idxd_wq_get_private(wq);
+ if (iaa_wq && !iaa_wq->remove) {
+ iaa_wq->ref++;
+ idxd_wq_get(wq);
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock(&idxd->dev_lock);
+
+ return ret;
+}
+
+static int iaa_wq_put(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct iaa_wq *iaa_wq;
+ bool free = false;
+ int ret = 0;
+
+ spin_lock(&idxd->dev_lock);
+ iaa_wq = idxd_wq_get_private(wq);
+ if (iaa_wq) {
+ iaa_wq->ref--;
+ if (iaa_wq->ref == 0 && iaa_wq->remove) {
+ idxd_wq_set_private(wq, NULL);
+ free = true;
+ }
+ idxd_wq_put(wq);
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock(&idxd->dev_lock);
+ if (free) {
+ __free_iaa_wq(iaa_wq);
+ kfree(iaa_wq);
+ }
+
+ return ret;
+}
+
+static void free_wq_table(void)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ wq_table_free_entry(cpu);
+
+ free_percpu(wq_table);
+
+ pr_debug("freed wq table\n");
+}
+
+static int alloc_wq_table(int max_wqs)
+{
+ struct wq_table_entry *entry;
+ int cpu;
+
+ wq_table = alloc_percpu(struct wq_table_entry);
+ if (!wq_table)
+ return -ENOMEM;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ entry = per_cpu_ptr(wq_table, cpu);
+ entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL);
+ if (!entry->wqs) {
+ free_wq_table();
+ return -ENOMEM;
+ }
+
+ entry->max_wqs = max_wqs;
+ }
+
+ pr_debug("initialized wq table\n");
+
+ return 0;
+}
+
+static int save_iaa_wq(struct idxd_wq *wq)
+{
+ struct iaa_device *iaa_device, *found = NULL;
+ struct idxd_device *idxd;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret = 0;
+
+ list_for_each_entry(iaa_device, &iaa_devices, list) {
+ if (iaa_device->idxd == wq->idxd) {
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+ /*
+ * Check to see that we don't already have this wq.
+ * Shouldn't happen but we don't control probing.
+ */
+ if (iaa_has_wq(iaa_device, wq)) {
+ dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n",
+ iaa_device);
+ goto out;
+ }
+
+ found = iaa_device;
+
+ ret = add_iaa_wq(iaa_device, wq, NULL);
+ if (ret)
+ goto out;
+
+ break;
+ }
+ }
+
+ if (!found) {
+ struct iaa_device *new_device;
+ struct iaa_wq *new_wq;
+
+ new_device = add_iaa_device(wq->idxd);
+ if (!new_device) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = add_iaa_wq(new_device, wq, &new_wq);
+ if (ret) {
+ del_iaa_device(new_device);
+ free_iaa_device(new_device);
+ goto out;
+ }
+
+ ret = init_iaa_device(new_device, new_wq);
+ if (ret) {
+ del_iaa_wq(new_device, new_wq->wq);
+ del_iaa_device(new_device);
+ free_iaa_wq(new_wq);
+ goto out;
+ }
+ }
+
+ if (WARN_ON(nr_iaa == 0))
+ return -EINVAL;
+
+ cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
+out:
+ return 0;
+}
+
+static void remove_iaa_wq(struct idxd_wq *wq)
+{
+ struct iaa_device *iaa_device;
+
+ list_for_each_entry(iaa_device, &iaa_devices, list) {
+ if (iaa_has_wq(iaa_device, wq)) {
+ del_iaa_wq(iaa_device, wq);
+ break;
+ }
+ }
+
+ if (nr_iaa) {
+ cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
+ } else
+ cpus_per_iaa = 1;
+}
+
+static int wq_table_add_wqs(int iaa, int cpu)
+{
+ struct iaa_device *iaa_device, *found_device = NULL;
+ int ret = 0, cur_iaa = 0, n_wqs_added = 0;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ list_for_each_entry(iaa_device, &iaa_devices, list) {
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ if (cur_iaa != iaa) {
+ cur_iaa++;
+ continue;
+ }
+
+ found_device = iaa_device;
+ dev_dbg(dev, "getting wq from iaa_device %d, cur_iaa %d\n",
+ found_device->idxd->id, cur_iaa);
+ break;
+ }
+
+ if (!found_device) {
+ found_device = list_first_entry_or_null(&iaa_devices,
+ struct iaa_device, list);
+ if (!found_device) {
+ pr_debug("couldn't find any iaa devices with wqs!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ cur_iaa = 0;
+
+ idxd = found_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+ dev_dbg(dev, "getting wq from only iaa_device %d, cur_iaa %d\n",
+ found_device->idxd->id, cur_iaa);
+ }
+
+ list_for_each_entry(iaa_wq, &found_device->wqs, list) {
+ wq_table_add(cpu, iaa_wq->wq);
+ pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n",
+ cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id);
+ n_wqs_added++;
+ }
+
+ if (!n_wqs_added) {
+ pr_debug("couldn't find any iaa wqs!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/*
+ * Rebalance the wq table so that given a cpu, it's easy to find the
+ * closest IAA instance. The idea is to try to choose the most
+ * appropriate IAA instance for a caller and spread available
+ * workqueues around to clients.
+ */
+static void rebalance_wq_table(void)
+{
+ const struct cpumask *node_cpus;
+ int node, cpu, iaa = -1;
+
+ if (nr_iaa == 0)
+ return;
+
+ pr_debug("rebalance: nr_nodes=%d, nr_cpus %d, nr_iaa %d, cpus_per_iaa %d\n",
+ nr_nodes, nr_cpus, nr_iaa, cpus_per_iaa);
+
+ clear_wq_table();
+
+ if (nr_iaa == 1) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ if (WARN_ON(wq_table_add_wqs(0, cpu))) {
+ pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
+ return;
+ }
+ }
+
+ return;
+ }
+
+ for_each_node_with_cpus(node) {
+ node_cpus = cpumask_of_node(node);
+
+ for (cpu = 0; cpu < nr_cpus_per_node; cpu++) {
+ int node_cpu = cpumask_nth(cpu, node_cpus);
+
+ if (WARN_ON(node_cpu >= nr_cpu_ids)) {
+ pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
+ return;
+ }
+
+ if ((cpu % cpus_per_iaa) == 0)
+ iaa++;
+
+ if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
+ pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
+ return;
+ }
+ }
+ }
+}
+
+static inline int check_completion(struct device *dev,
+ struct iax_completion_record *comp,
+ bool compress,
+ bool only_once)
+{
+ char *op_str = compress ? "compress" : "decompress";
+ int ret = 0;
+
+ while (!comp->status) {
+ if (only_once)
+ return -EAGAIN;
+ cpu_relax();
+ }
+
+ if (comp->status != IAX_COMP_SUCCESS) {
+ if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) {
+ ret = -ETIMEDOUT;
+ dev_dbg(dev, "%s timed out, size=0x%x\n",
+ op_str, comp->output_size);
+ update_completion_timeout_errs();
+ goto out;
+ }
+
+ if (comp->status == IAA_ANALYTICS_ERROR &&
+ comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) {
+ ret = -E2BIG;
+ dev_dbg(dev, "compressed > uncompressed size,"
+ " not compressing, size=0x%x\n",
+ comp->output_size);
+ update_completion_comp_buf_overflow_errs();
+ goto out;
+ }
+
+ if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n",
+ op_str, comp->status, comp->error_code, comp->output_size);
+ print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0);
+ update_completion_einval_errs();
+
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int deflate_generic_decompress(struct acomp_req *req)
+{
+ void *src, *dst;
+ int ret;
+
+ src = kmap_local_page(sg_page(req->src)) + req->src->offset;
+ dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset;
+
+ ret = crypto_comp_decompress(deflate_generic_tfm,
+ src, req->slen, dst, &req->dlen);
+
+ kunmap_local(src);
+ kunmap_local(dst);
+
+ update_total_sw_decomp_calls();
+
+ return ret;
+}
+
+static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
+ struct acomp_req *req,
+ dma_addr_t *src_addr, dma_addr_t *dst_addr);
+
+static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
+ struct idxd_wq *wq,
+ dma_addr_t src_addr, unsigned int slen,
+ dma_addr_t dst_addr, unsigned int *dlen,
+ u32 compression_crc);
+
+static void iaa_desc_complete(struct idxd_desc *idxd_desc,
+ enum idxd_complete_type comp_type,
+ bool free_desc, void *__ctx,
+ u32 *status)
+{
+ struct iaa_device_compression_mode *active_compression_mode;
+ struct iaa_compression_ctx *compression_ctx;
+ struct crypto_ctx *ctx = __ctx;
+ struct iaa_device *iaa_device;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret, err = 0;
+
+ compression_ctx = crypto_tfm_ctx(ctx->tfm);
+
+ iaa_wq = idxd_wq_get_private(idxd_desc->wq);
+ iaa_device = iaa_wq->iaa_device;
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ active_compression_mode = get_iaa_device_compression_mode(iaa_device,
+ compression_ctx->mode);
+ dev_dbg(dev, "%s: compression mode %s,"
+ " ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__,
+ active_compression_mode->name,
+ ctx->src_addr, ctx->dst_addr);
+
+ ret = check_completion(dev, idxd_desc->iax_completion,
+ ctx->compress, false);
+ if (ret) {
+ dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
+ if (!ctx->compress &&
+ idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
+ pr_warn("%s: falling back to deflate-generic decompress, "
+ "analytics error code %x\n", __func__,
+ idxd_desc->iax_completion->error_code);
+ ret = deflate_generic_decompress(ctx->req);
+ if (ret) {
+ dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
+ __func__, ret);
+ err = -EIO;
+ goto err;
+ }
+ } else {
+ err = -EIO;
+ goto err;
+ }
+ } else {
+ ctx->req->dlen = idxd_desc->iax_completion->output_size;
+ }
+
+ /* Update stats */
+ if (ctx->compress) {
+ update_total_comp_bytes_out(ctx->req->dlen);
+ update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen);
+ } else {
+ update_total_decomp_bytes_in(ctx->req->dlen);
+ update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen);
+ }
+
+ if (ctx->compress && compression_ctx->verify_compress) {
+ dma_addr_t src_addr, dst_addr;
+ u32 compression_crc;
+
+ compression_crc = idxd_desc->iax_completion->crc;
+
+ ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
+ if (ret) {
+ dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
+ err = -EIO;
+ goto out;
+ }
+
+ ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
+ ctx->req->slen, dst_addr, &ctx->req->dlen,
+ compression_crc);
+ if (ret) {
+ dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
+ err = -EIO;
+ }
+
+ dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE);
+ dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE);
+
+ goto out;
+ }
+err:
+ dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE);
+out:
+ if (ret != 0)
+ dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
+
+ if (ctx->req->base.complete)
+ acomp_request_complete(ctx->req, err);
+
+ if (free_desc)
+ idxd_free_desc(idxd_desc->wq, idxd_desc);
+ iaa_wq_put(idxd_desc->wq);
+}
+
+static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
+ struct idxd_wq *wq,
+ dma_addr_t src_addr, unsigned int slen,
+ dma_addr_t dst_addr, unsigned int *dlen,
+ u32 *compression_crc,
+ bool disable_async)
+{
+ struct iaa_device_compression_mode *active_compression_mode;
+ struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct iaa_device *iaa_device;
+ struct idxd_desc *idxd_desc;
+ struct iax_hw_desc *desc;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret = 0;
+
+ iaa_wq = idxd_wq_get_private(wq);
+ iaa_device = iaa_wq->iaa_device;
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
+
+ idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(idxd_desc)) {
+ dev_dbg(dev, "idxd descriptor allocation failed\n");
+ dev_dbg(dev, "iaa compress failed: ret=%ld\n", PTR_ERR(idxd_desc));
+ return PTR_ERR(idxd_desc);
+ }
+ desc = idxd_desc->iax_hw;
+
+ desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR |
+ IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC;
+ desc->opcode = IAX_OPCODE_COMPRESS;
+ desc->compr_flags = IAA_COMP_FLAGS;
+ desc->priv = 0;
+
+ desc->src1_addr = (u64)src_addr;
+ desc->src1_size = slen;
+ desc->dst_addr = (u64)dst_addr;
+ desc->max_dst_size = *dlen;
+ desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr;
+ desc->src2_size = sizeof(struct aecs_comp_table_record);
+ desc->completion_addr = idxd_desc->compl_dma;
+
+ if (ctx->use_irq && !disable_async) {
+ desc->flags |= IDXD_OP_FLAG_RCI;
+
+ idxd_desc->crypto.req = req;
+ idxd_desc->crypto.tfm = tfm;
+ idxd_desc->crypto.src_addr = src_addr;
+ idxd_desc->crypto.dst_addr = dst_addr;
+ idxd_desc->crypto.compress = true;
+
+ dev_dbg(dev, "%s use_async_irq: compression mode %s,"
+ " src_addr %llx, dst_addr %llx\n", __func__,
+ active_compression_mode->name,
+ src_addr, dst_addr);
+ } else if (ctx->async_mode && !disable_async)
+ req->base.data = idxd_desc;
+
+ dev_dbg(dev, "%s: compression mode %s,"
+ " desc->src1_addr %llx, desc->src1_size %d,"
+ " desc->dst_addr %llx, desc->max_dst_size %d,"
+ " desc->src2_addr %llx, desc->src2_size %d\n", __func__,
+ active_compression_mode->name,
+ desc->src1_addr, desc->src1_size, desc->dst_addr,
+ desc->max_dst_size, desc->src2_addr, desc->src2_size);
+
+ ret = idxd_submit_desc(wq, idxd_desc);
+ if (ret) {
+ dev_dbg(dev, "submit_desc failed ret=%d\n", ret);
+ goto err;
+ }
+
+ /* Update stats */
+ update_total_comp_calls();
+ update_wq_comp_calls(wq);
+
+ if (ctx->async_mode && !disable_async) {
+ ret = -EINPROGRESS;
+ dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
+ goto out;
+ }
+
+ ret = check_completion(dev, idxd_desc->iax_completion, true, false);
+ if (ret) {
+ dev_dbg(dev, "check_completion failed ret=%d\n", ret);
+ goto err;
+ }
+
+ *dlen = idxd_desc->iax_completion->output_size;
+
+ /* Update stats */
+ update_total_comp_bytes_out(*dlen);
+ update_wq_comp_bytes(wq, *dlen);
+
+ *compression_crc = idxd_desc->iax_completion->crc;
+
+ if (!ctx->async_mode || disable_async)
+ idxd_free_desc(wq, idxd_desc);
+out:
+ return ret;
+err:
+ idxd_free_desc(wq, idxd_desc);
+ dev_dbg(dev, "iaa compress failed: ret=%d\n", ret);
+
+ goto out;
+}
+
+static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
+ struct acomp_req *req,
+ dma_addr_t *src_addr, dma_addr_t *dst_addr)
+{
+ int ret = 0;
+ int nr_sgs;
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+
+ nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "verify: couldn't map src sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto out;
+ }
+ *src_addr = sg_dma_address(req->src);
+ dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+ " req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs,
+ req->src, req->slen, sg_dma_len(req->src));
+
+ nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "verify: couldn't map dst sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
+ goto out;
+ }
+ *dst_addr = sg_dma_address(req->dst);
+ dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+ " req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs,
+ req->dst, req->dlen, sg_dma_len(req->dst));
+out:
+ return ret;
+}
+
+static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
+ struct idxd_wq *wq,
+ dma_addr_t src_addr, unsigned int slen,
+ dma_addr_t dst_addr, unsigned int *dlen,
+ u32 compression_crc)
+{
+ struct iaa_device_compression_mode *active_compression_mode;
+ struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct iaa_device *iaa_device;
+ struct idxd_desc *idxd_desc;
+ struct iax_hw_desc *desc;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret = 0;
+
+ iaa_wq = idxd_wq_get_private(wq);
+ iaa_device = iaa_wq->iaa_device;
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
+
+ idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(idxd_desc)) {
+ dev_dbg(dev, "idxd descriptor allocation failed\n");
+ dev_dbg(dev, "iaa compress failed: ret=%ld\n",
+ PTR_ERR(idxd_desc));
+ return PTR_ERR(idxd_desc);
+ }
+ desc = idxd_desc->iax_hw;
+
+ /* Verify (optional) - decompress and check crc, suppress dest write */
+
+ desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC;
+ desc->opcode = IAX_OPCODE_DECOMPRESS;
+ desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT;
+ desc->priv = 0;
+
+ desc->src1_addr = (u64)dst_addr;
+ desc->src1_size = *dlen;
+ desc->dst_addr = (u64)src_addr;
+ desc->max_dst_size = slen;
+ desc->completion_addr = idxd_desc->compl_dma;
+
+ dev_dbg(dev, "(verify) compression mode %s,"
+ " desc->src1_addr %llx, desc->src1_size %d,"
+ " desc->dst_addr %llx, desc->max_dst_size %d,"
+ " desc->src2_addr %llx, desc->src2_size %d\n",
+ active_compression_mode->name,
+ desc->src1_addr, desc->src1_size, desc->dst_addr,
+ desc->max_dst_size, desc->src2_addr, desc->src2_size);
+
+ ret = idxd_submit_desc(wq, idxd_desc);
+ if (ret) {
+ dev_dbg(dev, "submit_desc (verify) failed ret=%d\n", ret);
+ goto err;
+ }
+
+ ret = check_completion(dev, idxd_desc->iax_completion, false, false);
+ if (ret) {
+ dev_dbg(dev, "(verify) check_completion failed ret=%d\n", ret);
+ goto err;
+ }
+
+ if (compression_crc != idxd_desc->iax_completion->crc) {
+ ret = -EINVAL;
+ dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
+ " comp=0x%x, decomp=0x%x\n", compression_crc,
+ idxd_desc->iax_completion->crc);
+ print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
+ 8, 1, idxd_desc->iax_completion, 64, 0);
+ goto err;
+ }
+
+ idxd_free_desc(wq, idxd_desc);
+out:
+ return ret;
+err:
+ idxd_free_desc(wq, idxd_desc);
+ dev_dbg(dev, "iaa compress failed: ret=%d\n", ret);
+
+ goto out;
+}
+
+static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
+ struct idxd_wq *wq,
+ dma_addr_t src_addr, unsigned int slen,
+ dma_addr_t dst_addr, unsigned int *dlen,
+ bool disable_async)
+{
+ struct iaa_device_compression_mode *active_compression_mode;
+ struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct iaa_device *iaa_device;
+ struct idxd_desc *idxd_desc;
+ struct iax_hw_desc *desc;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret = 0;
+
+ iaa_wq = idxd_wq_get_private(wq);
+ iaa_device = iaa_wq->iaa_device;
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
+
+ idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(idxd_desc)) {
+ dev_dbg(dev, "idxd descriptor allocation failed\n");
+ dev_dbg(dev, "iaa decompress failed: ret=%ld\n",
+ PTR_ERR(idxd_desc));
+ return PTR_ERR(idxd_desc);
+ }
+ desc = idxd_desc->iax_hw;
+
+ desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC;
+ desc->opcode = IAX_OPCODE_DECOMPRESS;
+ desc->max_dst_size = PAGE_SIZE;
+ desc->decompr_flags = IAA_DECOMP_FLAGS;
+ desc->priv = 0;
+
+ desc->src1_addr = (u64)src_addr;
+ desc->dst_addr = (u64)dst_addr;
+ desc->max_dst_size = *dlen;
+ desc->src1_size = slen;
+ desc->completion_addr = idxd_desc->compl_dma;
+
+ if (ctx->use_irq && !disable_async) {
+ desc->flags |= IDXD_OP_FLAG_RCI;
+
+ idxd_desc->crypto.req = req;
+ idxd_desc->crypto.tfm = tfm;
+ idxd_desc->crypto.src_addr = src_addr;
+ idxd_desc->crypto.dst_addr = dst_addr;
+ idxd_desc->crypto.compress = false;
+
+ dev_dbg(dev, "%s: use_async_irq compression mode %s,"
+ " src_addr %llx, dst_addr %llx\n", __func__,
+ active_compression_mode->name,
+ src_addr, dst_addr);
+ } else if (ctx->async_mode && !disable_async)
+ req->base.data = idxd_desc;
+
+ dev_dbg(dev, "%s: decompression mode %s,"
+ " desc->src1_addr %llx, desc->src1_size %d,"
+ " desc->dst_addr %llx, desc->max_dst_size %d,"
+ " desc->src2_addr %llx, desc->src2_size %d\n", __func__,
+ active_compression_mode->name,
+ desc->src1_addr, desc->src1_size, desc->dst_addr,
+ desc->max_dst_size, desc->src2_addr, desc->src2_size);
+
+ ret = idxd_submit_desc(wq, idxd_desc);
+ if (ret) {
+ dev_dbg(dev, "submit_desc failed ret=%d\n", ret);
+ goto err;
+ }
+
+ /* Update stats */
+ update_total_decomp_calls();
+ update_wq_decomp_calls(wq);
+
+ if (ctx->async_mode && !disable_async) {
+ ret = -EINPROGRESS;
+ dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
+ goto out;
+ }
+
+ ret = check_completion(dev, idxd_desc->iax_completion, false, false);
+ if (ret) {
+ dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
+ if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
+ pr_warn("%s: falling back to deflate-generic decompress, "
+ "analytics error code %x\n", __func__,
+ idxd_desc->iax_completion->error_code);
+ ret = deflate_generic_decompress(req);
+ if (ret) {
+ dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
+ __func__, ret);
+ goto err;
+ }
+ } else {
+ goto err;
+ }
+ } else {
+ req->dlen = idxd_desc->iax_completion->output_size;
+ }
+
+ *dlen = req->dlen;
+
+ if (!ctx->async_mode || disable_async)
+ idxd_free_desc(wq, idxd_desc);
+
+ /* Update stats */
+ update_total_decomp_bytes_in(slen);
+ update_wq_decomp_bytes(wq, slen);
+out:
+ return ret;
+err:
+ idxd_free_desc(wq, idxd_desc);
+ dev_dbg(dev, "iaa decompress failed: ret=%d\n", ret);
+
+ goto out;
+}
+
+static int iaa_comp_acompress(struct acomp_req *req)
+{
+ struct iaa_compression_ctx *compression_ctx;
+ struct crypto_tfm *tfm = req->base.tfm;
+ dma_addr_t src_addr, dst_addr;
+ bool disable_async = false;
+ int nr_sgs, cpu, ret = 0;
+ struct iaa_wq *iaa_wq;
+ u32 compression_crc;
+ struct idxd_wq *wq;
+ struct device *dev;
+ int order = -1;
+
+ compression_ctx = crypto_tfm_ctx(tfm);
+
+ if (!iaa_crypto_enabled) {
+ pr_debug("iaa_crypto disabled, not compressing\n");
+ return -ENODEV;
+ }
+
+ if (!req->src || !req->slen) {
+ pr_debug("invalid src, not compressing\n");
+ return -EINVAL;
+ }
+
+ cpu = get_cpu();
+ wq = wq_table_next_wq(cpu);
+ put_cpu();
+ if (!wq) {
+ pr_debug("no wq configured for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ ret = iaa_wq_get(wq);
+ if (ret) {
+ pr_debug("no wq available for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ iaa_wq = idxd_wq_get_private(wq);
+
+ if (!req->dst) {
+ gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+
+ /* incompressible data will always be < 2 * slen */
+ req->dlen = 2 * req->slen;
+ order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
+ req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
+ if (!req->dst) {
+ ret = -ENOMEM;
+ order = -1;
+ goto out;
+ }
+ disable_async = true;
+ }
+
+ dev = &wq->idxd->pdev->dev;
+
+ nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map src sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto out;
+ }
+ src_addr = sg_dma_address(req->src);
+ dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+ " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
+ req->src, req->slen, sg_dma_len(req->src));
+
+ nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto err_map_dst;
+ }
+ dst_addr = sg_dma_address(req->dst);
+ dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+ " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
+ req->dst, req->dlen, sg_dma_len(req->dst));
+
+ ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
+ &req->dlen, &compression_crc, disable_async);
+ if (ret == -EINPROGRESS)
+ return ret;
+
+ if (!ret && compression_ctx->verify_compress) {
+ ret = iaa_remap_for_verify(dev, iaa_wq, req, &src_addr, &dst_addr);
+ if (ret) {
+ dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
+ dst_addr, &req->dlen, compression_crc);
+ if (ret)
+ dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
+
+ goto out;
+ }
+
+ if (ret)
+ dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+err_map_dst:
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+out:
+ iaa_wq_put(wq);
+
+ if (order >= 0)
+ sgl_free_order(req->dst, order);
+
+ return ret;
+}
+
+static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
+{
+ gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+ struct crypto_tfm *tfm = req->base.tfm;
+ dma_addr_t src_addr, dst_addr;
+ int nr_sgs, cpu, ret = 0;
+ struct iaa_wq *iaa_wq;
+ struct device *dev;
+ struct idxd_wq *wq;
+ int order = -1;
+
+ cpu = get_cpu();
+ wq = wq_table_next_wq(cpu);
+ put_cpu();
+ if (!wq) {
+ pr_debug("no wq configured for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ ret = iaa_wq_get(wq);
+ if (ret) {
+ pr_debug("no wq available for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ iaa_wq = idxd_wq_get_private(wq);
+
+ dev = &wq->idxd->pdev->dev;
+
+ nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map src sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto out;
+ }
+ src_addr = sg_dma_address(req->src);
+ dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+ " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
+ req->src, req->slen, sg_dma_len(req->src));
+
+ req->dlen = 4 * req->slen; /* start with ~avg comp rato */
+alloc_dest:
+ order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
+ req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
+ if (!req->dst) {
+ ret = -ENOMEM;
+ order = -1;
+ goto out;
+ }
+
+ nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto err_map_dst;
+ }
+
+ dst_addr = sg_dma_address(req->dst);
+ dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+ " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
+ req->dst, req->dlen, sg_dma_len(req->dst));
+ ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
+ dst_addr, &req->dlen, true);
+ if (ret == -EOVERFLOW) {
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ req->dlen *= 2;
+ if (req->dlen > CRYPTO_ACOMP_DST_MAX)
+ goto err_map_dst;
+ goto alloc_dest;
+ }
+
+ if (ret != 0)
+ dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+err_map_dst:
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+out:
+ iaa_wq_put(wq);
+
+ if (order >= 0)
+ sgl_free_order(req->dst, order);
+
+ return ret;
+}
+
+static int iaa_comp_adecompress(struct acomp_req *req)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ dma_addr_t src_addr, dst_addr;
+ int nr_sgs, cpu, ret = 0;
+ struct iaa_wq *iaa_wq;
+ struct device *dev;
+ struct idxd_wq *wq;
+
+ if (!iaa_crypto_enabled) {
+ pr_debug("iaa_crypto disabled, not decompressing\n");
+ return -ENODEV;
+ }
+
+ if (!req->src || !req->slen) {
+ pr_debug("invalid src, not decompressing\n");
+ return -EINVAL;
+ }
+
+ if (!req->dst)
+ return iaa_comp_adecompress_alloc_dest(req);
+
+ cpu = get_cpu();
+ wq = wq_table_next_wq(cpu);
+ put_cpu();
+ if (!wq) {
+ pr_debug("no wq configured for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ ret = iaa_wq_get(wq);
+ if (ret) {
+ pr_debug("no wq available for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ iaa_wq = idxd_wq_get_private(wq);
+
+ dev = &wq->idxd->pdev->dev;
+
+ nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map src sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto out;
+ }
+ src_addr = sg_dma_address(req->src);
+ dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+ " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
+ req->src, req->slen, sg_dma_len(req->src));
+
+ nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto err_map_dst;
+ }
+ dst_addr = sg_dma_address(req->dst);
+ dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+ " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
+ req->dst, req->dlen, sg_dma_len(req->dst));
+
+ ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
+ dst_addr, &req->dlen, false);
+ if (ret == -EINPROGRESS)
+ return ret;
+
+ if (ret != 0)
+ dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+err_map_dst:
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+out:
+ iaa_wq_put(wq);
+
+ return ret;
+}
+
+static void compression_ctx_init(struct iaa_compression_ctx *ctx)
+{
+ ctx->verify_compress = iaa_verify_compress;
+ ctx->async_mode = async_mode;
+ ctx->use_irq = use_irq;
+}
+
+static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ compression_ctx_init(ctx);
+
+ ctx->mode = IAA_MODE_FIXED;
+
+ return 0;
+}
+
+static void dst_free(struct scatterlist *sgl)
+{
+ /*
+ * Called for req->dst = NULL cases but we free elsewhere
+ * using sgl_free_order().
+ */
+}
+
+static struct acomp_alg iaa_acomp_fixed_deflate = {
+ .init = iaa_comp_init_fixed,
+ .compress = iaa_comp_acompress,
+ .decompress = iaa_comp_adecompress,
+ .dst_free = dst_free,
+ .base = {
+ .cra_name = "deflate",
+ .cra_driver_name = "deflate-iaa",
+ .cra_ctxsize = sizeof(struct iaa_compression_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_priority = IAA_ALG_PRIORITY,
+ }
+};
+
+static int iaa_register_compression_device(void)
+{
+ int ret;
+
+ ret = crypto_register_acomp(&iaa_acomp_fixed_deflate);
+ if (ret) {
+ pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret);
+ goto out;
+ }
+
+ iaa_crypto_registered = true;
+out:
+ return ret;
+}
+
+static int iaa_unregister_compression_device(void)
+{
+ if (iaa_crypto_registered)
+ crypto_unregister_acomp(&iaa_acomp_fixed_deflate);
+
+ return 0;
+}
+
+static int iaa_crypto_probe(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_driver_data *data = idxd->data;
+ struct device *dev = &idxd_dev->conf_dev;
+ bool first_wq = false;
+ int ret = 0;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -ENXIO;
+
+ if (data->type != IDXD_TYPE_IAX)
+ return -ENODEV;
+
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd_wq_get_private(wq)) {
+ mutex_unlock(&wq->wq_lock);
+ return -EBUSY;
+ }
+
+ if (!idxd_wq_driver_name_match(wq, dev)) {
+ dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n",
+ idxd->id, wq->id, wq->driver_name, dev->driver->name);
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
+ ret = -ENODEV;
+ goto err;
+ }
+
+ wq->type = IDXD_WQT_KERNEL;
+
+ ret = idxd_drv_enable_wq(wq);
+ if (ret < 0) {
+ dev_dbg(dev, "enable wq %d.%d failed: %d\n",
+ idxd->id, wq->id, ret);
+ ret = -ENXIO;
+ goto err;
+ }
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (list_empty(&iaa_devices)) {
+ ret = alloc_wq_table(wq->idxd->max_wqs);
+ if (ret)
+ goto err_alloc;
+ first_wq = true;
+ }
+
+ ret = save_iaa_wq(wq);
+ if (ret)
+ goto err_save;
+
+ rebalance_wq_table();
+
+ if (first_wq) {
+ iaa_crypto_enabled = true;
+ ret = iaa_register_compression_device();
+ if (ret != 0) {
+ iaa_crypto_enabled = false;
+ dev_dbg(dev, "IAA compression device registration failed\n");
+ goto err_register;
+ }
+ try_module_get(THIS_MODULE);
+
+ pr_info("iaa_crypto now ENABLED\n");
+ }
+
+ mutex_unlock(&iaa_devices_lock);
+out:
+ mutex_unlock(&wq->wq_lock);
+
+ return ret;
+
+err_register:
+ remove_iaa_wq(wq);
+ free_iaa_wq(idxd_wq_get_private(wq));
+err_save:
+ if (first_wq)
+ free_wq_table();
+err_alloc:
+ mutex_unlock(&iaa_devices_lock);
+ idxd_drv_disable_wq(wq);
+err:
+ wq->type = IDXD_WQT_NONE;
+
+ goto out;
+}
+
+static void iaa_crypto_remove(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ struct iaa_wq *iaa_wq;
+ bool free = false;
+
+ idxd_wq_quiesce(wq);
+
+ mutex_lock(&wq->wq_lock);
+ mutex_lock(&iaa_devices_lock);
+
+ remove_iaa_wq(wq);
+
+ spin_lock(&idxd->dev_lock);
+ iaa_wq = idxd_wq_get_private(wq);
+ if (!iaa_wq) {
+ spin_unlock(&idxd->dev_lock);
+ pr_err("%s: no iaa_wq available to remove\n", __func__);
+ goto out;
+ }
+
+ if (iaa_wq->ref) {
+ iaa_wq->remove = true;
+ } else {
+ wq = iaa_wq->wq;
+ idxd_wq_set_private(wq, NULL);
+ free = true;
+ }
+ spin_unlock(&idxd->dev_lock);
+ if (free) {
+ __free_iaa_wq(iaa_wq);
+ kfree(iaa_wq);
+ }
+
+ idxd_drv_disable_wq(wq);
+ rebalance_wq_table();
+
+ if (nr_iaa == 0) {
+ iaa_crypto_enabled = false;
+ free_wq_table();
+ module_put(THIS_MODULE);
+
+ pr_info("iaa_crypto now DISABLED\n");
+ }
+out:
+ mutex_unlock(&iaa_devices_lock);
+ mutex_unlock(&wq->wq_lock);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_WQ,
+ IDXD_DEV_NONE,
+};
+
+static struct idxd_device_driver iaa_crypto_driver = {
+ .probe = iaa_crypto_probe,
+ .remove = iaa_crypto_remove,
+ .name = IDXD_SUBDRIVER_NAME,
+ .type = dev_types,
+ .desc_complete = iaa_desc_complete,
+};
+
+static int __init iaa_crypto_init_module(void)
+{
+ int ret = 0;
+ int node;
+
+ nr_cpus = num_online_cpus();
+ for_each_node_with_cpus(node)
+ nr_nodes++;
+ if (!nr_nodes) {
+ pr_err("IAA couldn't find any nodes with cpus\n");
+ return -ENODEV;
+ }
+ nr_cpus_per_node = nr_cpus / nr_nodes;
+
+ if (crypto_has_comp("deflate-generic", 0, 0))
+ deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0);
+
+ if (IS_ERR_OR_NULL(deflate_generic_tfm)) {
+ pr_err("IAA could not alloc %s tfm: errcode = %ld\n",
+ "deflate-generic", PTR_ERR(deflate_generic_tfm));
+ return -ENOMEM;
+ }
+
+ ret = iaa_aecs_init_fixed();
+ if (ret < 0) {
+ pr_debug("IAA fixed compression mode init failed\n");
+ goto err_aecs_init;
+ }
+
+ ret = idxd_driver_register(&iaa_crypto_driver);
+ if (ret) {
+ pr_debug("IAA wq sub-driver registration failed\n");
+ goto err_driver_reg;
+ }
+
+ ret = driver_create_file(&iaa_crypto_driver.drv,
+ &driver_attr_verify_compress);
+ if (ret) {
+ pr_debug("IAA verify_compress attr creation failed\n");
+ goto err_verify_attr_create;
+ }
+
+ ret = driver_create_file(&iaa_crypto_driver.drv,
+ &driver_attr_sync_mode);
+ if (ret) {
+ pr_debug("IAA sync mode attr creation failed\n");
+ goto err_sync_attr_create;
+ }
+
+ if (iaa_crypto_debugfs_init())
+ pr_warn("debugfs init failed, stats not available\n");
+
+ pr_debug("initialized\n");
+out:
+ return ret;
+
+err_sync_attr_create:
+ driver_remove_file(&iaa_crypto_driver.drv,
+ &driver_attr_verify_compress);
+err_verify_attr_create:
+ idxd_driver_unregister(&iaa_crypto_driver);
+err_driver_reg:
+ iaa_aecs_cleanup_fixed();
+err_aecs_init:
+ crypto_free_comp(deflate_generic_tfm);
+
+ goto out;
+}
+
+static void __exit iaa_crypto_cleanup_module(void)
+{
+ if (iaa_unregister_compression_device())
+ pr_debug("IAA compression device unregister failed\n");
+
+ iaa_crypto_debugfs_cleanup();
+ driver_remove_file(&iaa_crypto_driver.drv,
+ &driver_attr_sync_mode);
+ driver_remove_file(&iaa_crypto_driver.drv,
+ &driver_attr_verify_compress);
+ idxd_driver_unregister(&iaa_crypto_driver);
+ iaa_aecs_cleanup_fixed();
+ crypto_free_comp(deflate_generic_tfm);
+
+ pr_debug("cleaned up\n");
+}
+
+MODULE_IMPORT_NS(IDXD);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_IDXD_DEVICE(0);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver");
+
+module_init(iaa_crypto_init_module);
+module_exit(iaa_crypto_cleanup_module);
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
new file mode 100644
index 000000000..2e3b7b73a
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <uapi/linux/idxd.h>
+#include <linux/idxd.h>
+#include <linux/dmaengine.h>
+#include "../../dma/idxd/idxd.h"
+#include <linux/debugfs.h>
+#include <crypto/internal/acompress.h>
+#include "iaa_crypto.h"
+#include "iaa_crypto_stats.h"
+
+static u64 total_comp_calls;
+static u64 total_decomp_calls;
+static u64 total_sw_decomp_calls;
+static u64 max_comp_delay_ns;
+static u64 max_decomp_delay_ns;
+static u64 max_acomp_delay_ns;
+static u64 max_adecomp_delay_ns;
+static u64 total_comp_bytes_out;
+static u64 total_decomp_bytes_in;
+static u64 total_completion_einval_errors;
+static u64 total_completion_timeout_errors;
+static u64 total_completion_comp_buf_overflow_errors;
+
+static struct dentry *iaa_crypto_debugfs_root;
+
+void update_total_comp_calls(void)
+{
+ total_comp_calls++;
+}
+
+void update_total_comp_bytes_out(int n)
+{
+ total_comp_bytes_out += n;
+}
+
+void update_total_decomp_calls(void)
+{
+ total_decomp_calls++;
+}
+
+void update_total_sw_decomp_calls(void)
+{
+ total_sw_decomp_calls++;
+}
+
+void update_total_decomp_bytes_in(int n)
+{
+ total_decomp_bytes_in += n;
+}
+
+void update_completion_einval_errs(void)
+{
+ total_completion_einval_errors++;
+}
+
+void update_completion_timeout_errs(void)
+{
+ total_completion_timeout_errors++;
+}
+
+void update_completion_comp_buf_overflow_errs(void)
+{
+ total_completion_comp_buf_overflow_errors++;
+}
+
+void update_max_comp_delay_ns(u64 start_time_ns)
+{
+ u64 time_diff;
+
+ time_diff = ktime_get_ns() - start_time_ns;
+
+ if (time_diff > max_comp_delay_ns)
+ max_comp_delay_ns = time_diff;
+}
+
+void update_max_decomp_delay_ns(u64 start_time_ns)
+{
+ u64 time_diff;
+
+ time_diff = ktime_get_ns() - start_time_ns;
+
+ if (time_diff > max_decomp_delay_ns)
+ max_decomp_delay_ns = time_diff;
+}
+
+void update_max_acomp_delay_ns(u64 start_time_ns)
+{
+ u64 time_diff;
+
+ time_diff = ktime_get_ns() - start_time_ns;
+
+ if (time_diff > max_acomp_delay_ns)
+ max_acomp_delay_ns = time_diff;
+}
+
+void update_max_adecomp_delay_ns(u64 start_time_ns)
+{
+ u64 time_diff;
+
+ time_diff = ktime_get_ns() - start_time_ns;
+
+ if (time_diff > max_adecomp_delay_ns)
+ max_adecomp_delay_ns = time_diff;
+}
+
+void update_wq_comp_calls(struct idxd_wq *idxd_wq)
+{
+ struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
+
+ wq->comp_calls++;
+ wq->iaa_device->comp_calls++;
+}
+
+void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n)
+{
+ struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
+
+ wq->comp_bytes += n;
+ wq->iaa_device->comp_bytes += n;
+}
+
+void update_wq_decomp_calls(struct idxd_wq *idxd_wq)
+{
+ struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
+
+ wq->decomp_calls++;
+ wq->iaa_device->decomp_calls++;
+}
+
+void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n)
+{
+ struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
+
+ wq->decomp_bytes += n;
+ wq->iaa_device->decomp_bytes += n;
+}
+
+static void reset_iaa_crypto_stats(void)
+{
+ total_comp_calls = 0;
+ total_decomp_calls = 0;
+ total_sw_decomp_calls = 0;
+ max_comp_delay_ns = 0;
+ max_decomp_delay_ns = 0;
+ max_acomp_delay_ns = 0;
+ max_adecomp_delay_ns = 0;
+ total_comp_bytes_out = 0;
+ total_decomp_bytes_in = 0;
+ total_completion_einval_errors = 0;
+ total_completion_timeout_errors = 0;
+ total_completion_comp_buf_overflow_errors = 0;
+}
+
+static void reset_wq_stats(struct iaa_wq *wq)
+{
+ wq->comp_calls = 0;
+ wq->comp_bytes = 0;
+ wq->decomp_calls = 0;
+ wq->decomp_bytes = 0;
+}
+
+static void reset_device_stats(struct iaa_device *iaa_device)
+{
+ struct iaa_wq *iaa_wq;
+
+ iaa_device->comp_calls = 0;
+ iaa_device->comp_bytes = 0;
+ iaa_device->decomp_calls = 0;
+ iaa_device->decomp_bytes = 0;
+
+ list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
+ reset_wq_stats(iaa_wq);
+}
+
+static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq)
+{
+ seq_printf(m, " name: %s\n", iaa_wq->wq->name);
+ seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls);
+ seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes);
+ seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls);
+ seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes);
+}
+
+static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
+{
+ struct iaa_wq *iaa_wq;
+
+ seq_puts(m, "iaa device:\n");
+ seq_printf(m, " id: %d\n", iaa_device->idxd->id);
+ seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq);
+ seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls);
+ seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes);
+ seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls);
+ seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes);
+ seq_puts(m, " wqs:\n");
+
+ list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
+ wq_show(m, iaa_wq);
+}
+
+static void global_stats_show(struct seq_file *m)
+{
+ seq_puts(m, "global stats:\n");
+ seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls);
+ seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls);
+ seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls);
+ seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out);
+ seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in);
+ seq_printf(m, " total_completion_einval_errors: %llu\n",
+ total_completion_einval_errors);
+ seq_printf(m, " total_completion_timeout_errors: %llu\n",
+ total_completion_timeout_errors);
+ seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n",
+ total_completion_comp_buf_overflow_errors);
+}
+
+static int wq_stats_show(struct seq_file *m, void *v)
+{
+ struct iaa_device *iaa_device;
+
+ mutex_lock(&iaa_devices_lock);
+
+ global_stats_show(m);
+
+ list_for_each_entry(iaa_device, &iaa_devices, list)
+ device_stats_show(m, iaa_device);
+
+ mutex_unlock(&iaa_devices_lock);
+
+ return 0;
+}
+
+static int iaa_crypto_stats_reset(void *data, u64 value)
+{
+ struct iaa_device *iaa_device;
+
+ reset_iaa_crypto_stats();
+
+ mutex_lock(&iaa_devices_lock);
+
+ list_for_each_entry(iaa_device, &iaa_devices, list)
+ reset_device_stats(iaa_device);
+
+ mutex_unlock(&iaa_devices_lock);
+
+ return 0;
+}
+
+static int wq_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wq_stats_show, file);
+}
+
+static const struct file_operations wq_stats_fops = {
+ .open = wq_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n");
+
+int __init iaa_crypto_debugfs_init(void)
+{
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
+ if (!iaa_crypto_debugfs_root)
+ return -ENOMEM;
+
+ debugfs_create_u64("max_comp_delay_ns", 0644,
+ iaa_crypto_debugfs_root, &max_comp_delay_ns);
+ debugfs_create_u64("max_decomp_delay_ns", 0644,
+ iaa_crypto_debugfs_root, &max_decomp_delay_ns);
+ debugfs_create_u64("max_acomp_delay_ns", 0644,
+ iaa_crypto_debugfs_root, &max_comp_delay_ns);
+ debugfs_create_u64("max_adecomp_delay_ns", 0644,
+ iaa_crypto_debugfs_root, &max_decomp_delay_ns);
+ debugfs_create_u64("total_comp_calls", 0644,
+ iaa_crypto_debugfs_root, &total_comp_calls);
+ debugfs_create_u64("total_decomp_calls", 0644,
+ iaa_crypto_debugfs_root, &total_decomp_calls);
+ debugfs_create_u64("total_sw_decomp_calls", 0644,
+ iaa_crypto_debugfs_root, &total_sw_decomp_calls);
+ debugfs_create_u64("total_comp_bytes_out", 0644,
+ iaa_crypto_debugfs_root, &total_comp_bytes_out);
+ debugfs_create_u64("total_decomp_bytes_in", 0644,
+ iaa_crypto_debugfs_root, &total_decomp_bytes_in);
+ debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL,
+ &wq_stats_fops);
+ debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL,
+ &wq_stats_reset_fops);
+
+ return 0;
+}
+
+void __exit iaa_crypto_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(iaa_crypto_debugfs_root);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
new file mode 100644
index 000000000..c10b87b86
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#ifndef __CRYPTO_DEV_IAA_CRYPTO_STATS_H__
+#define __CRYPTO_DEV_IAA_CRYPTO_STATS_H__
+
+#if defined(CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS)
+int iaa_crypto_debugfs_init(void);
+void iaa_crypto_debugfs_cleanup(void);
+
+void update_total_comp_calls(void);
+void update_total_comp_bytes_out(int n);
+void update_total_decomp_calls(void);
+void update_total_sw_decomp_calls(void);
+void update_total_decomp_bytes_in(int n);
+void update_max_comp_delay_ns(u64 start_time_ns);
+void update_max_decomp_delay_ns(u64 start_time_ns);
+void update_max_acomp_delay_ns(u64 start_time_ns);
+void update_max_adecomp_delay_ns(u64 start_time_ns);
+void update_completion_einval_errs(void);
+void update_completion_timeout_errs(void);
+void update_completion_comp_buf_overflow_errs(void);
+
+void update_wq_comp_calls(struct idxd_wq *idxd_wq);
+void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n);
+void update_wq_decomp_calls(struct idxd_wq *idxd_wq);
+void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n);
+
+#else
+static inline int iaa_crypto_debugfs_init(void) { return 0; }
+static inline void iaa_crypto_debugfs_cleanup(void) {}
+
+static inline void update_total_comp_calls(void) {}
+static inline void update_total_comp_bytes_out(int n) {}
+static inline void update_total_decomp_calls(void) {}
+static inline void update_total_sw_decomp_calls(void) {}
+static inline void update_total_decomp_bytes_in(int n) {}
+static inline void update_max_comp_delay_ns(u64 start_time_ns) {}
+static inline void update_max_decomp_delay_ns(u64 start_time_ns) {}
+static inline void update_max_acomp_delay_ns(u64 start_time_ns) {}
+static inline void update_max_adecomp_delay_ns(u64 start_time_ns) {}
+static inline void update_completion_einval_errs(void) {}
+static inline void update_completion_timeout_errs(void) {}
+static inline void update_completion_comp_buf_overflow_errs(void) {}
+
+static inline void update_wq_comp_calls(struct idxd_wq *idxd_wq) {}
+static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {}
+static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {}
+static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {}
+
+#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS
+
+#endif
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 1220cc86f..c120f6715 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -59,6 +59,17 @@ config CRYPTO_DEV_QAT_4XXX
To compile this as a module, choose M here: the module
will be called qat_4xxx.
+config CRYPTO_DEV_QAT_420XX
+ tristate "Support for Intel(R) QAT_420XX"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) QuickAssist Technology QAT_420xx
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_420xx.
+
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 258c8a626..235b69f4f 100644
--- a/drivers/crypto/intel/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
new file mode 100644
index 000000000..a90fbe00b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
+qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
new file mode 100644
index 000000000..7909b51e9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/iopoll.h>
+#include <adf_accel_devices.h>
+#include <adf_admin.h>
+#include <adf_cfg.h>
+#include <adf_cfg_services.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen4_config.h>
+#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
+#include <adf_gen4_ras.h>
+#include <adf_gen4_timer.h>
+#include <adf_gen4_tl.h>
+#include "adf_420xx_hw_data.h"
+#include "icp_qat_hw.h"
+
+#define ADF_AE_GROUP_0 GENMASK(3, 0)
+#define ADF_AE_GROUP_1 GENMASK(7, 4)
+#define ADF_AE_GROUP_2 GENMASK(11, 8)
+#define ADF_AE_GROUP_3 GENMASK(15, 12)
+#define ADF_AE_GROUP_4 BIT(16)
+
+#define ENA_THD_MASK_ASYM GENMASK(1, 0)
+#define ENA_THD_MASK_SYM GENMASK(3, 0)
+#define ENA_THD_MASK_DC GENMASK(1, 0)
+
+static const char * const adf_420xx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ,
+};
+
+static const struct adf_fw_config adf_fw_cy_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_dc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_asym_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_asym_dc_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_dc_config[] = {
+ {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_dcc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+
+static struct adf_hw_device_class adf_420xx_class = {
+ .name = ADF_420XX_DEVICE_NAME,
+ .type = DEV_420XX,
+ .instances = 0,
+};
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ u32 me_disable = self->fuses;
+
+ return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
+}
+
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
+{
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return ARRAY_SIZE(adf_fw_cy_config);
+ case SVC_DC:
+ return ARRAY_SIZE(adf_fw_dc_config);
+ case SVC_DCC:
+ return ARRAY_SIZE(adf_fw_dcc_config);
+ case SVC_SYM:
+ return ARRAY_SIZE(adf_fw_sym_config);
+ case SVC_ASYM:
+ return ARRAY_SIZE(adf_fw_asym_config);
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return ARRAY_SIZE(adf_fw_asym_dc_config);
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return ARRAY_SIZE(adf_fw_sym_dc_config);
+ default:
+ return 0;
+ }
+}
+
+static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
+{
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return adf_fw_cy_config;
+ case SVC_DC:
+ return adf_fw_dc_config;
+ case SVC_DCC:
+ return adf_fw_dcc_config;
+ case SVC_SYM:
+ return adf_fw_sym_config;
+ case SVC_ASYM:
+ return adf_fw_asym_config;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return adf_fw_asym_dc_config;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return adf_fw_sym_dc_config;
+ default:
+ return NULL;
+ }
+}
+
+static void update_ae_mask(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ const struct adf_fw_config *fw_config;
+ u32 config_ae_mask = 0;
+ u32 ae_mask, num_objs;
+ int i;
+
+ ae_mask = get_ae_mask(hw_data);
+
+ /* Modify the AE mask based on the firmware configuration loaded */
+ fw_config = get_fw_config(accel_dev);
+ num_objs = uof_get_num_objs(accel_dev);
+
+ config_ae_mask |= ADF_420XX_ADMIN_AE_MASK;
+ for (i = 0; i < num_objs; i++)
+ config_ae_mask |= fw_config[i].ae_mask;
+
+ hw_data->ae_mask = ae_mask & config_ae_mask;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym, capabilities_asym, capabilities_dc;
+ struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities_dcc;
+ u32 fusectl1;
+
+ /* As a side effect, update ae_mask based on configuration */
+ update_ae_mask(accel_dev);
+
+ /* Read accelerator capabilities mask */
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
+
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_HKDF |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_SM3 |
+ ICP_ACCEL_CAPABILITIES_SM4 |
+ ICP_ACCEL_CAPABILITIES_AES_V2 |
+ ICP_ACCEL_CAPABILITIES_ZUC |
+ ICP_ACCEL_CAPABILITIES_ZUC_256 |
+ ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
+ ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
+
+ /* A set bit in fusectl1 means the feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return capabilities_sym | capabilities_asym;
+ case SVC_DC:
+ return capabilities_dc;
+ case SVC_DCC:
+ /*
+ * Sym capabilities are available for chaining operations,
+ * but sym crypto instances cannot be supported
+ */
+ capabilities_dcc = capabilities_dc | capabilities_sym;
+ capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ return capabilities_dcc;
+ case SVC_SYM:
+ return capabilities_sym;
+ case SVC_ASYM:
+ return capabilities_asym;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return capabilities_asym | capabilities_dc;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return capabilities_sym | capabilities_dc;
+ default:
+ return 0;
+ }
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ if (adf_gen4_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Generate of the thread to arbiter map failed");
+
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+}
+
+static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
+{
+ rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
+ rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
+ rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
+ rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
+ rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
+
+ rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
+ rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
+ rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
+ rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
+ rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
+ rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
+ rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
+}
+
+static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
+{
+ switch (ae_mask) {
+ case ADF_AE_GROUP_0:
+ return RP_GROUP_0;
+ case ADF_AE_GROUP_1:
+ case ADF_AE_GROUP_3:
+ return RP_GROUP_1;
+ case ADF_AE_GROUP_2:
+ if (get_fw_config(accel_dev) == adf_fw_cy_config)
+ return RP_GROUP_0;
+ else
+ return RP_GROUP_1;
+ default:
+ dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
+ return -EINVAL;
+ }
+}
+
+static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
+
+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
+ const struct adf_fw_config *fw_config;
+ u16 ring_to_svc_map;
+ int i, j;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return 0;
+
+ /* If dcc, all rings handle compression requests */
+ if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
+ for (i = 0; i < RP_GROUP_COUNT; i++)
+ rps[i] = COMP;
+ goto set_mask;
+ }
+
+ for (i = 0; i < RP_GROUP_COUNT; i++) {
+ switch (fw_config[i].ae_mask) {
+ case ADF_AE_GROUP_0:
+ j = RP_GROUP_0;
+ break;
+ case ADF_AE_GROUP_1:
+ j = RP_GROUP_1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (fw_config[i].obj) {
+ case ADF_FW_SYM_OBJ:
+ rps[j] = SYM;
+ break;
+ case ADF_FW_ASYM_OBJ:
+ rps[j] = ASYM;
+ break;
+ case ADF_FW_DC_OBJ:
+ rps[j] = COMP;
+ break;
+ default:
+ rps[j] = 0;
+ break;
+ }
+ }
+
+set_mask:
+ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+ const char * const fw_objs[], int num_objs)
+{
+ const struct adf_fw_config *fw_config;
+ int id;
+
+ fw_config = get_fw_config(accel_dev);
+ if (fw_config)
+ id = fw_config[obj_num].obj;
+ else
+ id = -EINVAL;
+
+ if (id < 0 || id > num_objs)
+ return NULL;
+
+ return fw_objs[id];
+}
+
+static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs);
+
+ return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs);
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return 0;
+
+ return fw_config[obj_num].ae_mask;
+}
+
+static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
+{
+ dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK;
+ dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK;
+ dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
+ dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
+ dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
+ dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
+}
+
+void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
+{
+ hw_data->dev_class = &adf_420xx_class;
+ hw_data->instance_id = adf_420xx_class.instances++;
+ hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
+ hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES;
+ hw_data->num_logical_accel = 1;
+ hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = adf_gen4_enable_error_correction;
+ hw_data->get_accel_mask = adf_gen4_get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = adf_gen4_get_num_accels;
+ hw_data->get_num_aes = adf_gen4_get_num_aes;
+ hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
+ hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
+ hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
+ hw_data->get_arb_info = adf_gen4_get_arb_info;
+ hw_data->get_admin_info = adf_gen4_get_admin_info;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_sku = adf_gen4_get_sku;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = adf_gen4_enable_ints;
+ hw_data->init_device = adf_gen4_init_device;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK;
+ hw_data->num_rps = ADF_GEN4_MAX_RPS;
+ hw_data->fw_name = ADF_420XX_FW;
+ hw_data->fw_mmp_name = ADF_420XX_MMP;
+ hw_data->uof_get_name = uof_get_name_420xx;
+ hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->get_rp_group = get_rp_group;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
+ hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+ hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
+ hw_data->dev_config = adf_gen4_dev_config;
+ hw_data->start_timer = adf_gen4_timer_start;
+ hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->clock_frequency = ADF_420XX_AE_FREQ;
+
+ adf_gen4_set_err_mask(&hw_data->dev_err_mask);
+ adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen4_init_dc_ops(&hw_data->dc_ops);
+ adf_gen4_init_ras_ops(&hw_data->ras_ops);
+ adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_init_rl_data(&hw_data->rl_data);
+}
+
+void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h
new file mode 100644
index 000000000..99abbfc14
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_420XX_HW_DATA_H_
+#define ADF_420XX_HW_DATA_H_
+
+#include <adf_accel_devices.h>
+
+#define ADF_420XX_MAX_ACCELENGINES 17
+
+#define ADF_420XX_ACCELENGINES_MASK 0x1FFFF
+#define ADF_420XX_ADMIN_AE_MASK 0x10000
+
+#define ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK (0xFF)
+#define ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK (0xFF00FF)
+#define ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK (0x10001)
+#define ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK (0xF0007)
+#define ADF_420XX_PARITYERRORMASK_PKE_MASK (0xFFF)
+#define ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK (0x3FF03FF)
+
+/*
+ * SSMFEATREN bit mask
+ * BIT(4) - enables parity detection on CPP
+ * BIT(12) - enables the logging of push/pull data errors
+ * in pperr register
+ * BIT(16) - BIT(27) - enable parity detection on SPPs
+ */
+#define ADF_420XX_SSMFEATREN_MASK \
+ (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27))
+
+/* Firmware Binaries */
+#define ADF_420XX_FW "qat_420xx.bin"
+#define ADF_420XX_MMP "qat_420xx_mmp.bin"
+#define ADF_420XX_SYM_OBJ "qat_420xx_sym.bin"
+#define ADF_420XX_DC_OBJ "qat_420xx_dc.bin"
+#define ADF_420XX_ASYM_OBJ "qat_420xx_asym.bin"
+#define ADF_420XX_ADMIN_OBJ "qat_420xx_admin.bin"
+
+/* RL constants */
+#define ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV 100
+#define ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL 102
+#define ADF_420XX_RL_DCPR_CORRECTION 1
+#define ADF_420XX_RL_SCANS_PER_SEC 954
+#define ADF_420XX_RL_MAX_TP_ASYM 173750UL
+#define ADF_420XX_RL_MAX_TP_SYM 95000UL
+#define ADF_420XX_RL_MAX_TP_DC 40000UL
+#define ADF_420XX_RL_SLICE_REF 1000UL
+
+/* Clocks frequency */
+#define ADF_420XX_AE_FREQ (1000 * HZ_PER_MHZ)
+
+void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id);
+void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
new file mode 100644
index 000000000..2a3598409
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <adf_accel_devices.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_config.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
+
+#include "adf_420xx_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ if (accel_dev->hw_device) {
+ adf_clean_hw_data_420xx(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ struct adf_bar *bar;
+ int ret;
+
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /*
+ * Add accel device to accel table
+ * This should be called before adf_cleanup_accel is called
+ */
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ return -EFAULT;
+ }
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and initialise device hardware meta-data structure */
+ hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_420xx(accel_dev->hw_device, ent->device);
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+ /* If the device has no acceleration engines then ignore it */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ (~hw_data->ae_mask & 0x01)) {
+ dev_err(&pdev->dev, "No acceleration units found.\n");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* Enable PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable PCI device.\n");
+ goto out_err;
+ }
+
+ /* Set DMA identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration.\n");
+ goto out_err;
+ }
+
+ ret = adf_gen4_cfg_dev_init(accel_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize configuration.\n");
+ goto out_err;
+ }
+
+ /* Get accelerator capabilities mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ /* Find and map all the device's BARS */
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
+
+ ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to map pci regions.\n");
+ goto out_err;
+ }
+
+ i = 0;
+ for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
+ bar = &accel_pci_dev->pci_bars[i++];
+ bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_save_state(pdev)) {
+ dev_err(&pdev->dev, "Failed to save pci state.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->ras_errors.enabled = true;
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ goto out_err_dev_stop;
+
+ ret = adf_sysfs_init(accel_dev);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+}
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_420XX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_420XX_FW);
+MODULE_FIRMWARE(ADF_420XX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_SOFTDEP("pre: crypto-intel_qat");
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index b64aaecdd..e171cddf6 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -7,12 +7,15 @@
#include <adf_cfg_services.h>
#include <adf_clock.h>
#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
#include <adf_gen4_timer.h>
+#include <adf_gen4_tl.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -20,12 +23,10 @@
#define ADF_AE_GROUP_1 GENMASK(7, 4)
#define ADF_AE_GROUP_2 BIT(8)
-enum adf_fw_objs {
- ADF_FW_SYM_OBJ,
- ADF_FW_ASYM_OBJ,
- ADF_FW_DC_OBJ,
- ADF_FW_ADMIN_OBJ,
-};
+#define ENA_THD_MASK_ASYM GENMASK(1, 0)
+#define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0)
+#define ENA_THD_MASK_SYM GENMASK(6, 0)
+#define ENA_THD_MASK_DC GENMASK(1, 0)
static const char * const adf_4xxx_fw_objs[] = {
[ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ,
@@ -41,11 +42,6 @@ static const char * const adf_402xx_fw_objs[] = {
[ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
};
-struct adf_fw_config {
- u32 ae_mask;
- enum adf_fw_objs obj;
-};
-
static const struct adf_fw_config adf_fw_cy_config[] = {
{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
@@ -95,36 +91,12 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config))
static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
-/* Worker thread to service arbiter mappings */
-static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x5555555, 0x5555555, 0x5555555, 0x5555555,
- 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
- 0x0
-};
-
-static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
- 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
- 0x0
-};
-
-static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
- 0x0
-};
-
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
.instances = 0,
};
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_ACCELERATORS_MASK;
-}
-
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
u32 me_disable = self->fuses;
@@ -132,55 +104,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- if (!self || !self->ae_mask)
- return 0;
-
- return hweight32(self->ae_mask);
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_SRAM_BAR;
-}
-
-/*
- * The vector routing table is used to select the MSI-X entry to use for each
- * interrupt source.
- * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
- * The final entry corresponds to VF2PF or error interrupts.
- * This vector table could be used to configure one MSI-X entry to be shared
- * between multiple interrupt sources.
- *
- * The default routing is set to have a one to one correspondence between the
- * interrupt source and the MSI-X entry used.
- */
-static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
-{
- void __iomem *csr;
- int i;
-
- csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
- for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
- ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
-}
-
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
@@ -189,7 +112,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
u32 fusectl1;
/* Read accelerator capabilities mask */
- pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
@@ -204,27 +127,27 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_AES_V2;
/* A set bit in fusectl1 means the feature is OFF in this SKU */
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
}
@@ -234,7 +157,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_SM2 |
ICP_ACCEL_CAPABILITIES_ECEDMONT;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
@@ -245,7 +168,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
@@ -281,43 +204,13 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
}
}
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
- return DEV_SKU_1;
-}
-
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
- switch (adf_get_service_enabled(accel_dev)) {
- case SVC_DC:
- return thrd_to_arb_map_dc;
- case SVC_DCC:
- return thrd_to_arb_map_dcc;
- default:
- return default_thrd_to_arb_map;
- }
-}
+ if (adf_gen4_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Generate of the thread to arbiter map failed");
-static void get_arb_info(struct arb_info *arb_info)
-{
- arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
- arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
- arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
-}
-
-static void get_admin_info(struct admin_info *admin_csrs_info)
-{
- admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
- admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
- admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
-}
-
-static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
-{
- /*
- * 4XXX uses KPT counter for HB
- */
- return ADF_4XXX_KPT_COUNTER_FREQ;
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
@@ -338,59 +231,7 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
}
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
- void __iomem *csr = misc_bar->virt_addr;
-
- /* Enable all in errsou3 except VFLR notification on host */
- ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
-}
-
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
-
- addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
- /* Enable bundle interrupts */
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
-
- /* Enable misc interrupts */
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
-}
-
-static int adf_init_device(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
- u32 status;
- u32 csr;
- int ret;
-
- addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
- /* Temporarily mask PM interrupt */
- csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
- csr |= ADF_GEN4_PM_SOU;
- ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
-
- /* Set DRV_ACTIVE bit to power up the device */
- ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
-
- /* Poll status register to make sure the device is powered up */
- ret = read_poll_timeout(ADF_CSR_RD, status,
- status & ADF_GEN4_PM_INIT_STATE,
- ADF_GEN4_PM_POLL_DELAY_US,
- ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
- ADF_GEN4_PM_STATUS);
- if (ret)
- dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
-
- return ret;
-}
-
-static u32 uof_get_num_objs(void)
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
{
return ARRAY_SIZE(adf_fw_cy_config);
}
@@ -420,11 +261,64 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev
}
}
-enum adf_rp_groups {
- RP_GROUP_0 = 0,
- RP_GROUP_1,
- RP_GROUP_COUNT
-};
+static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
+{
+ switch (ae_mask) {
+ case ADF_AE_GROUP_0:
+ return RP_GROUP_0;
+ case ADF_AE_GROUP_1:
+ return RP_GROUP_1;
+ default:
+ dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
+ return -EINVAL;
+ }
+}
+
+static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
+
+static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM_401XX;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
{
@@ -538,54 +432,64 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
{
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
- hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
- hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
- hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
- hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
+ hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
hw_data->num_logical_accel = 1;
- hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
- hw_data->get_accel_mask = get_accel_mask;
+ hw_data->enable_error_correction = adf_gen4_enable_error_correction;
+ hw_data->get_accel_mask = adf_gen4_get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
- hw_data->get_sram_bar_id = get_sram_bar_id;
- hw_data->get_etr_bar_id = get_etr_bar_id;
- hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_arb_info = get_arb_info;
- hw_data->get_admin_info = get_admin_info;
+ hw_data->get_num_accels = adf_gen4_get_num_accels;
+ hw_data->get_num_aes = adf_gen4_get_num_aes;
+ hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
+ hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
+ hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
+ hw_data->get_arb_info = adf_gen4_get_arb_info;
+ hw_data->get_admin_info = adf_gen4_get_admin_info;
hw_data->get_accel_cap = get_accel_cap;
- hw_data->get_sku = get_sku;
+ hw_data->get_sku = adf_gen4_get_sku;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
- hw_data->enable_ints = adf_enable_ints;
- hw_data->init_device = adf_init_device;
+ hw_data->enable_ints = adf_gen4_enable_ints;
+ hw_data->init_device = adf_gen4_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+ hw_data->num_rps = ADF_GEN4_MAX_RPS;
switch (dev_id) {
case ADF_402XX_PCI_DEVICE_ID:
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
+ break;
+ case ADF_401XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_4xxx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx;
break;
-
default:
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->uof_get_name = uof_get_name_4xxx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
+ break;
}
hw_data->uof_get_num_objs = uof_get_num_objs;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
- hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->get_rp_group = get_rp_group;
+ hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
@@ -595,7 +499,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->dev_config = adf_gen4_dev_config;
hw_data->start_timer = adf_gen4_timer_start;
hw_data->stop_timer = adf_gen4_timer_stop;
- hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
@@ -604,6 +508,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
+ adf_gen4_init_tl_data(&hw_data->tl_data);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index 33423295e..76388363e 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -6,25 +6,8 @@
#include <linux/units.h>
#include <adf_accel_devices.h>
-/* PCIe configuration space */
-#define ADF_4XXX_SRAM_BAR 0
-#define ADF_4XXX_PMISC_BAR 1
-#define ADF_4XXX_ETR_BAR 2
-#define ADF_4XXX_RX_RINGS_OFFSET 1
-#define ADF_4XXX_TX_RINGS_MASK 0x1
-#define ADF_4XXX_MAX_ACCELERATORS 1
#define ADF_4XXX_MAX_ACCELENGINES 9
-#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
-/* Physical function fuses */
-#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8)
-#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC)
-#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0)
-#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4)
-#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8)
-#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC)
-
-#define ADF_4XXX_ACCELERATORS_MASK (0x1)
#define ADF_4XXX_ACCELENGINES_MASK (0x1FF)
#define ADF_4XXX_ADMIN_AE_MASK (0x100)
@@ -45,28 +28,6 @@
(BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \
BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23))
-#define ADF_4XXX_ETR_MAX_BANKS 64
-
-/* MSIX interrupt */
-#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040)
-#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044)
-#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084)
-#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04))
-
-/* Bank and ring configuration */
-#define ADF_4XXX_NUM_RINGS_PER_BANK 2
-#define ADF_4XXX_NUM_BANKS_PER_VF 4
-
-/* Arbiter configuration */
-#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
-#define ADF_4XXX_ARB_OFFSET (0x0)
-#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400)
-
-/* Admin Interface Reg Offset */
-#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574)
-#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
-#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
-
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
@@ -93,22 +54,9 @@
#define ADF_4XXX_RL_SLICE_REF 1000UL
/* Clocks frequency */
-#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ)
-/* qat_4xxx fuse bits are different from old GENs, redefine them */
-enum icp_qat_4xxx_slice_mask {
- ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
- ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
- ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
- ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
- ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
- ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
- ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7),
-};
-
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 8f483d119..9762f2bf7 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -8,13 +8,10 @@
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_dbgfs.h>
-#include <adf_heartbeat.h>
+#include <adf_gen4_config.h>
+#include <adf_gen4_hw_data.h>
#include "adf_4xxx_hw_data.h"
-#include "adf_cfg_services.h"
-#include "qat_compression.h"
-#include "qat_crypto.h"
-#include "adf_transport_access_macros.h"
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
@@ -35,270 +32,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
-static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
-{
- const char *config;
- int ret;
-
- config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
-
- ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
- if (ret)
- return ret;
-
- /* Default configuration is crypto only for even devices
- * and compression for odd devices
- */
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, config,
- ADF_STR);
- if (ret)
- return ret;
-
- adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
-
- return 0;
-}
-
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
-{
- char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
- int banks = GET_MAX_BANKS(accel_dev);
- int cpus = num_online_cpus();
- unsigned long bank, val;
- int instances;
- int ret;
- int i;
-
- if (adf_hw_dev_has_crypto(accel_dev))
- instances = min(cpus, banks / 2);
- else
- instances = 0;
-
- for (i = 0; i < instances; i++) {
- val = i;
- bank = i * 2;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &bank, ADF_DEC);
- if (ret)
- goto err;
-
- bank += 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &bank, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
- i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
- val = 128;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 512;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = ADF_COALESCING_DEF_TIME;
- snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
- ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
- key, &val, ADF_DEC);
- if (ret)
- goto err;
- }
-
- val = i;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
- return ret;
-}
-
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
-{
- char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
- int banks = GET_MAX_BANKS(accel_dev);
- int cpus = num_online_cpus();
- unsigned long val;
- int instances;
- int ret;
- int i;
-
- if (adf_hw_dev_has_compression(accel_dev))
- instances = min(cpus, banks);
- else
- instances = 0;
-
- for (i = 0; i < instances; i++) {
- val = i;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 512;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = ADF_COALESCING_DEF_TIME;
- snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
- ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
- key, &val, ADF_DEC);
- if (ret)
- goto err;
- }
-
- val = i;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
- return ret;
-}
-
-static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
-{
- unsigned long val;
- int ret;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- return ret;
-
- return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
-}
-
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
-{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- int ret;
-
- ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
- if (ret)
- goto err;
-
- ret = adf_cfg_section_add(accel_dev, "Accelerator0");
- if (ret)
- goto err;
-
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
- if (ret)
- goto err;
-
- ret = sysfs_match_string(adf_cfg_services, services);
- if (ret < 0)
- goto err;
-
- switch (ret) {
- case SVC_CY:
- case SVC_CY2:
- ret = adf_crypto_dev_config(accel_dev);
- break;
- case SVC_DC:
- case SVC_DCC:
- ret = adf_comp_dev_config(accel_dev);
- break;
- default:
- ret = adf_no_dev_config(accel_dev);
- break;
- }
-
- if (ret)
- goto err;
-
- set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
- return ret;
-
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
- return ret;
-}
-
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
@@ -348,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
- pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
@@ -381,7 +114,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- ret = adf_cfg_dev_init(accel_dev);
+ ret = adf_gen4_cfg_dev_init(accel_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize configuration.\n");
goto out_err;
@@ -396,7 +129,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Find and map all the device's BARS */
- bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
if (ret) {
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 779a8aa0b..6908727bf 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \
adf_sysfs_ras_counters.o \
adf_gen2_hw_data.o \
adf_gen2_config.o \
+ adf_gen4_config.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
@@ -40,9 +41,12 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
adf_fw_counters.o \
adf_cnv_dbgfs.o \
adf_gen4_pm_debugfs.o \
+ adf_gen4_tl.o \
adf_heartbeat.o \
adf_heartbeat_dbgfs.o \
adf_pm_dbgfs.o \
+ adf_telemetry.o \
+ adf_tl_debugfs.o \
adf_dbgfs.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 9d5fdd529..a16c7e6ed 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -6,11 +6,14 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/types.h>
#include "adf_cfg_common.h"
#include "adf_rl.h"
+#include "adf_telemetry.h"
#include "adf_pfvf_msg.h"
+#include "icp_qat_hw.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
@@ -19,12 +22,15 @@
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
+#define ADF_420XX_DEVICE_NAME "420xx"
#define ADF_4XXX_PCI_DEVICE_ID 0x4940
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
#define ADF_402XX_PCI_DEVICE_ID 0x4944
#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
+#define ADF_420XX_PCI_DEVICE_ID 0x4946
+#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -241,8 +247,10 @@ struct adf_hw_device_data {
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
- u32 (*uof_get_num_objs)(void);
+ u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
+ u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
@@ -250,6 +258,7 @@ struct adf_hw_device_data {
struct adf_ras_ops ras_ops;
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
+ struct adf_tl_hw_data tl_data;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -264,6 +273,7 @@ struct adf_hw_device_data {
u32 admin_ae_mask;
u16 tx_rings_mask;
u16 ring_to_svc_map;
+ u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
u8 tx_rx_gap;
u8 num_banks;
u16 num_banks_per_vf;
@@ -272,6 +282,7 @@ struct adf_hw_device_data {
u8 num_logical_accel;
u8 num_engines;
u32 num_hb_ctrs;
+ u8 num_rps;
};
/* CSR write macro */
@@ -304,6 +315,7 @@ struct adf_hw_device_data {
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms;
@@ -352,6 +364,7 @@ struct adf_accel_dev {
struct adf_cfg_device_data *cfg;
struct adf_fw_loader_data *fw_loader;
struct adf_admin_comms *admin;
+ struct adf_telemetry *telemetry;
struct adf_dc_data *dc_data;
struct adf_pm power_management;
struct list_head crypto_list;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index 6be064dc6..4b5d0350f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
@@ -19,7 +19,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
int i;
loader = loader_data->fw_loader;
- num_objs = hw_device->uof_get_num_objs();
+ num_objs = hw_device->uof_get_num_objs(accel_dev);
for (i = 0; i < num_objs; i++) {
obj_name = hw_device->uof_get_name(accel_dev, i);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 54b673ec2..acad526eb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -498,6 +498,43 @@ int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt,
return ret;
}
+int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev,
+ dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes,
+ struct icp_qat_fw_init_admin_slice_cnt *slice_count)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask;
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_TL_START;
+ req.init_cfg_ptr = tl_dma_addr;
+ req.init_cfg_sz = layout_sz;
+
+ if (rp_indexes)
+ memcpy(&req.rp_indexes, rp_indexes, sizeof(req.rp_indexes));
+
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ return ret;
+
+ memcpy(slice_count, &resp.slices, sizeof(*slice_count));
+
+ return 0;
+}
+
+int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ u32 ae_mask = hw_data->admin_ae_mask;
+
+ req.cmd_id = ICP_QAT_FW_TL_STOP;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h
index 55cbcbc66..647c8e196 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h
@@ -23,5 +23,9 @@ int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id,
int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size);
int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err);
+int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev,
+ dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes,
+ struct icp_qat_fw_init_admin_slice_cnt *slice_count);
+int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 6e5de1dab..89df3888d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -47,6 +47,7 @@ enum adf_device_type {
DEV_C3XXX,
DEV_C3XXXVF,
DEV_4XXX,
+ DEV_420XX,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index 477efcc81..c42f5c25a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -10,6 +10,7 @@
#include "adf_fw_counters.h"
#include "adf_heartbeat_dbgfs.h"
#include "adf_pm_dbgfs.h"
+#include "adf_tl_debugfs.h"
/**
* adf_dbgfs_init() - add persistent debugfs entries
@@ -66,6 +67,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
adf_heartbeat_dbgfs_add(accel_dev);
adf_pm_dbgfs_add(accel_dev);
adf_cnv_dbgfs_add(accel_dev);
+ adf_tl_dbgfs_add(accel_dev);
}
}
@@ -79,6 +81,7 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
return;
if (!accel_dev->is_vf) {
+ adf_tl_dbgfs_rm(accel_dev);
adf_cnv_dbgfs_rm(accel_dev);
adf_pm_dbgfs_rm(accel_dev);
adf_heartbeat_dbgfs_rm(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
new file mode 100644
index 000000000..4f8669680
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_FW_CONFIG_H_
+#define ADF_FW_CONFIG_H_
+
+enum adf_fw_objs {
+ ADF_FW_SYM_OBJ,
+ ADF_FW_ASYM_OBJ,
+ ADF_FW_DC_OBJ,
+ ADF_FW_ADMIN_OBJ,
+};
+
+struct adf_fw_config {
+ u32 ae_mask;
+ enum adf_fw_objs obj;
+};
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
new file mode 100644
index 000000000..fe1f3d727
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_services.h"
+#include "adf_cfg_strings.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_config.h"
+#include "adf_heartbeat.h"
+#include "adf_transport_access_macros.h"
+#include "qat_compression.h"
+#include "qat_crypto.h"
+
+static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long bank, val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_crypto(accel_dev))
+ instances = min(cpus, banks / 2);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ bank = i * 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &bank, ADF_DEC);
+ if (ret)
+ goto err;
+
+ bank += 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &bank, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+ i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ val = 128;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+ return ret;
+}
+
+static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_compression(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+ return ret;
+}
+
+static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ unsigned long val;
+ int ret;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+}
+
+/**
+ * adf_gen4_dev_config() - create dev config required to create instances
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret)
+ goto err;
+
+ ret = sysfs_match_string(adf_cfg_services, services);
+ if (ret < 0)
+ goto err;
+
+ switch (ret) {
+ case SVC_CY:
+ case SVC_CY2:
+ ret = adf_crypto_dev_config(accel_dev);
+ break;
+ case SVC_DC:
+ case SVC_DCC:
+ ret = adf_comp_dev_config(accel_dev);
+ break;
+ default:
+ ret = adf_no_dev_config(accel_dev);
+ break;
+ }
+
+ if (ret)
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_dev_config);
+
+int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ const char *config;
+ int ret;
+
+ config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ /* Default configuration is crypto only for even devices
+ * and compression for odd devices
+ */
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, config,
+ ADF_STR);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_cfg_dev_init);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
new file mode 100644
index 000000000..bb87655f6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_GEN4_CONFIG_H_
+#define ADF_GEN4_CONFIG_H_
+
+#include "adf_accel_devices.h"
+
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
+int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 3148a6293..f752653cc 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -2,8 +2,10 @@
/* Copyright(c) 2020 Intel Corporation */
#include <linux/iopoll.h>
#include "adf_accel_devices.h"
+#include "adf_cfg_services.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
+#include "adf_gen4_pm.h"
static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
{
@@ -102,6 +104,131 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_ACCELERATORS_MASK;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask);
+
+u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_MAX_ACCELERATORS;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels);
+
+u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self)
+{
+ if (!self || !self->ae_mask)
+ return 0;
+
+ return hweight32(self->ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes);
+
+u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_PMISC_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id);
+
+u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_ETR_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id);
+
+u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_SRAM_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id);
+
+enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_1;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_sku);
+
+void adf_gen4_get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG;
+ arb_info->arb_offset = ADF_GEN4_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info);
+
+void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info);
+
+u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * GEN4 uses KPT counter for HB
+ */
+ return ADF_GEN4_KPT_COUNTER_FREQ;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock);
+
+void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR];
+ void __iomem *csr = misc_bar->virt_addr;
+
+ /* Enable all in errsou3 except VFLR notification on host */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction);
+
+void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+
+ addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+
+ /* Enable bundle interrupts */
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0);
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+ /* Enable misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_ints);
+
+int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+ u32 status;
+ u32 csr;
+ int ret;
+
+ addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+ csr |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_GEN4_PM_INIT_STATE,
+ ADF_GEN4_PM_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN4_PM_STATUS);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_device);
+
static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
u32 *lower)
{
@@ -135,6 +262,28 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr;
+ int i;
+
+ csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+ for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++)
+ ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable);
+
int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -192,3 +341,95 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
+
+static const u32 thrd_to_arb_map_dcc[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0
+};
+
+static const u16 rp_group_to_arb_mask[] = {
+ [RP_GROUP_0] = 0x5,
+ [RP_GROUP_1] = 0xA,
+};
+
+static bool is_single_service(int service_id)
+{
+ switch (service_id) {
+ case SVC_DC:
+ case SVC_SYM:
+ case SVC_ASYM:
+ return true;
+ case SVC_CY:
+ case SVC_CY2:
+ case SVC_DCC:
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ default:
+ return false;
+ }
+}
+
+int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u32 *thd2arb_map = hw_data->thd_to_arb_map;
+ unsigned int ae_cnt, worker_obj_cnt, i, j;
+ unsigned long ae_mask, thds_mask;
+ int srv_id, rp_group;
+ u32 thd2arb_map_base;
+ u16 arb_mask;
+
+ if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask ||
+ !hw_data->get_num_aes || !hw_data->uof_get_num_objs ||
+ !hw_data->uof_get_ae_mask)
+ return -EFAULT;
+
+ srv_id = adf_get_service_enabled(accel_dev);
+ if (srv_id < 0)
+ return srv_id;
+
+ ae_cnt = hw_data->get_num_aes(hw_data);
+ worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
+ ADF_GEN4_ADMIN_ACCELENGINES;
+
+ if (srv_id == SVC_DCC) {
+ if (ae_cnt > ICP_QAT_HW_AE_DELIMITER)
+ return -EINVAL;
+
+ memcpy(thd2arb_map, thrd_to_arb_map_dcc,
+ array_size(sizeof(*thd2arb_map), ae_cnt));
+ return 0;
+ }
+
+ for (i = 0; i < worker_obj_cnt; i++) {
+ ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
+ rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
+ thds_mask = hw_data->get_ena_thd_mask(accel_dev, i);
+ thd2arb_map_base = 0;
+
+ if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
+ return -EINVAL;
+
+ if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR)
+ return -EINVAL;
+
+ if (is_single_service(srv_id))
+ arb_mask = rp_group_to_arb_mask[RP_GROUP_0] |
+ rp_group_to_arb_mask[RP_GROUP_1];
+ else
+ arb_mask = rp_group_to_arb_mask[rp_group];
+
+ for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE)
+ thd2arb_map_base |= arb_mask << (j * 4);
+
+ for_each_set_bit(j, &ae_mask, ae_cnt)
+ thd2arb_map[j] = thd2arb_map_base;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 1813fe1d5..7d8a774ca 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -3,9 +3,57 @@
#ifndef ADF_GEN4_HW_CSR_DATA_H_
#define ADF_GEN4_HW_CSR_DATA_H_
+#include <linux/units.h>
+
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
+/* PCIe configuration space */
+#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
+#define ADF_GEN4_SRAM_BAR 0
+#define ADF_GEN4_PMISC_BAR 1
+#define ADF_GEN4_ETR_BAR 2
+
+/* Clocks frequency */
+#define ADF_GEN4_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
+/* Physical function fuses */
+#define ADF_GEN4_FUSECTL0_OFFSET 0x2C8
+#define ADF_GEN4_FUSECTL1_OFFSET 0x2CC
+#define ADF_GEN4_FUSECTL2_OFFSET 0x2D0
+#define ADF_GEN4_FUSECTL3_OFFSET 0x2D4
+#define ADF_GEN4_FUSECTL4_OFFSET 0x2D8
+#define ADF_GEN4_FUSECTL5_OFFSET 0x2DC
+
+/* Accelerators */
+#define ADF_GEN4_ACCELERATORS_MASK 0x1
+#define ADF_GEN4_MAX_ACCELERATORS 1
+#define ADF_GEN4_ADMIN_ACCELENGINES 1
+
+/* MSIX interrupt */
+#define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040
+#define ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET 0x41A044
+#define ADF_GEN4_SMIAPF_MASK_OFFSET 0x41A084
+#define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04))
+
+/* Bank and ring configuration */
+#define ADF_GEN4_MAX_RPS 64
+#define ADF_GEN4_NUM_RINGS_PER_BANK 2
+#define ADF_GEN4_NUM_BANKS_PER_VF 4
+#define ADF_GEN4_ETR_MAX_BANKS 64
+#define ADF_GEN4_RX_RINGS_OFFSET 1
+#define ADF_GEN4_TX_RINGS_MASK 0x1
+
+/* Arbiter configuration */
+#define ADF_GEN4_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_GEN4_ARB_OFFSET 0x0
+#define ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET 0x400
+
+/* Admin Interface Reg Offset */
+#define ADF_GEN4_ADMINMSGUR_OFFSET 0x500574
+#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578
+#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970
+
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
#define ADF_RING_CSR_RING_CONFIG 0x1000
@@ -146,7 +194,46 @@ do { \
#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
+/* Arbiter threads mask with error value */
+#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0)
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+
+enum icp_qat_gen4_slice_mask {
+ ICP_ACCEL_GEN4_MASK_CIPHER_SLICE = BIT(0),
+ ICP_ACCEL_GEN4_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_GEN4_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE = BIT(3),
+ ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4),
+ ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5),
+ ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7),
+ ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8),
+ ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9),
+};
+
+enum adf_gen4_rp_groups {
+ RP_GROUP_0,
+ RP_GROUP_1,
+ RP_GROUP_COUNT
+};
+
+void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev);
+void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev);
+u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self);
+void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info);
+void adf_gen4_get_arb_info(struct arb_info *arb_info);
+u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self);
+u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self);
+u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self);
+u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self);
+u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self);
+enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self);
+u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self);
+int adf_gen4_init_device(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
+void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
new file mode 100644
index 000000000..7fc7a77f6
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include "adf_gen4_tl.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+
+#define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4)
+
+#define ADF_GEN4_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen4)
+
+#define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \
+ ADF_TL_COUNTER("util_" #_name, \
+ ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen4))
+
+#define ADF_GEN4_TL_SL_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("exec_" #_name, \
+ ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen4))
+
+/* Device level counters. */
+static const struct adf_tl_dbg_counter dev_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_pci_trans_cnt)),
+ /* Max read latency[ns]. */
+ ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_max)),
+ /* Read latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)),
+ /* Max get to put latency[ns]. */
+ ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_max)),
+ /* Get to put latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_out)),
+ /* Page request latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)),
+ /* Page translation latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)),
+ /* Maximum uTLB used. */
+ ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_max_tlb_used)),
+};
+
+/* Slice utilization counters. */
+static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(cpr),
+ /* Translator slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(xlt),
+ /* Decompression slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(dcpr),
+ /* PKE utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(pke),
+ /* Wireless Authentication slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(wat),
+ /* Wireless Cipher slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(wcp),
+ /* UCS slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(ucs),
+ /* Cipher slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(cph),
+ /* Authentication slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(ath),
+};
+
+/* Slice execution counters. */
+static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(cpr),
+ /* Translator slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(xlt),
+ /* Decompression slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(dcpr),
+ /* PKE execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(pke),
+ /* Wireless Authentication slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(wat),
+ /* Wireless Cipher slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(wcp),
+ /* UCS slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(ucs),
+ /* Cipher slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(cph),
+ /* Authentication slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(ath),
+};
+
+/* Ring pair counters. */
+static const struct adf_tl_dbg_counter rp_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_pci_trans_cnt)),
+ /* Get to put latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_out)),
+ /* Message descriptor DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)),
+ /* Message descriptor DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)),
+ /* Payload DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)),
+ /* Payload DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+};
+
+void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ;
+ tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ;
+ tl_data->rp_reg_sz = ADF_GEN4_TL_RP_REG_SZ;
+ tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS;
+ tl_data->max_rp = ADF_GEN4_TL_MAX_RP_NUM;
+ tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF;
+ tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE;
+ tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES;
+
+ tl_data->dev_counters = dev_counters;
+ tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
+ tl_data->sl_util_counters = sl_util_counters;
+ tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->rp_counters = rp_counters;
+ tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h
new file mode 100644
index 000000000..32df4163b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_GEN4_TL_H
+#define ADF_GEN4_TL_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct adf_tl_hw_data;
+
+/* Computation constants. */
+#define ADF_GEN4_CPP_NS_PER_CYCLE 2
+#define ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES 64
+
+/* Maximum aggregation time. Value in milliseconds. */
+#define ADF_GEN4_TL_MAX_AGGR_TIME_MS 4000
+/* Num of buffers to store historic values. */
+#define ADF_GEN4_TL_NUM_HIST_BUFFS \
+ (ADF_GEN4_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS)
+
+/* Max number of HW resources of one type. */
+#define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24
+
+/* Max number of simultaneously monitored ring pairs. */
+#define ADF_GEN4_TL_MAX_RP_NUM 4
+
+/**
+ * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW.
+ * @reg_tm_slice_exec_cnt: Slice execution count.
+ * @reg_tm_slice_util: Slice utilization.
+ */
+struct adf_gen4_tl_slice_data_regs {
+ __u32 reg_tm_slice_exec_cnt;
+ __u32 reg_tm_slice_util;
+};
+
+#define ADF_GEN4_TL_SLICE_REG_SZ sizeof(struct adf_gen4_tl_slice_data_regs)
+
+/**
+ * struct adf_gen4_tl_device_data_regs - This structure stores device telemetry
+ * counter values as are being populated periodically by device.
+ * @reg_tl_rd_lat_acc: read latency accumulator
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator
+ * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_rd_lat_max: maximum logged read latency
+ * @reg_tl_rd_cmpl_cnt: read requests completed count
+ * @reg_tl_gp_lat_max: maximum logged get to put latency
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_page_req_cnt: DevTLB page requests count
+ * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count
+ * @reg_tl_at_max_tlb_used: maximum uTLB used
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved: reserved
+ * @ath_slices: array of Authentication slices utilization registers
+ * @cph_slices: array of Cipher slices utilization registers
+ * @cpr_slices: array of Compression slices utilization registers
+ * @xlt_slices: array of Translator slices utilization registers
+ * @dcpr_slices: array of Decompression slices utilization registers
+ * @pke_slices: array of PKE slices utilization registers
+ * @ucs_slices: array of UCS slices utilization registers
+ * @wat_slices: array of Wireless Authentication slices utilization registers
+ * @wcp_slices: array of Wireless Cipher slices utilization registers
+ */
+struct adf_gen4_tl_device_data_regs {
+ __u64 reg_tl_rd_lat_acc;
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_at_page_req_lat_acc;
+ __u64 reg_tl_at_trans_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_pci_trans_cnt;
+ __u32 reg_tl_rd_lat_max;
+ __u32 reg_tl_rd_cmpl_cnt;
+ __u32 reg_tl_gp_lat_max;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_page_req_cnt;
+ __u32 reg_tl_at_trans_lat_cnt;
+ __u32 reg_tl_at_max_tlb_used;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved;
+ struct adf_gen4_tl_slice_data_regs ath_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs cph_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs cpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs xlt_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs dcpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs pke_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs ucs_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs wat_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+};
+
+/**
+ * struct adf_gen4_tl_ring_pair_data_regs - This structure stores Ring Pair
+ * telemetry counter values as are being populated periodically by device.
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reserved: reserved
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate
+ * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate
+ * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate
+ * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved1: reserved
+ */
+struct adf_gen4_tl_ring_pair_data_regs {
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reserved;
+ __u32 reg_tl_pci_trans_cnt;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_glob_devtlb_hit;
+ __u32 reg_tl_at_glob_devtlb_miss;
+ __u32 reg_tl_at_payld_devtlb_hit;
+ __u32 reg_tl_at_payld_devtlb_miss;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved1;
+};
+
+#define ADF_GEN4_TL_RP_REG_SZ sizeof(struct adf_gen4_tl_ring_pair_data_regs)
+
+/**
+ * struct adf_gen4_tl_layout - This structure represents entire telemetry
+ * counters data: Device + 4 Ring Pairs as are being populated periodically
+ * by device.
+ * @tl_device_data_regs: structure of device telemetry registers
+ * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers
+ * @reg_tl_msg_cnt: telemetry messages counter
+ * @reserved: reserved
+ */
+struct adf_gen4_tl_layout {
+ struct adf_gen4_tl_device_data_regs tl_device_data_regs;
+ struct adf_gen4_tl_ring_pair_data_regs
+ tl_ring_pairs_data_regs[ADF_GEN4_TL_MAX_RP_NUM];
+ __u32 reg_tl_msg_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN4_TL_LAYOUT_SZ sizeof(struct adf_gen4_tl_layout)
+#define ADF_GEN4_TL_MSG_CNT_OFF offsetof(struct adf_gen4_tl_layout, reg_tl_msg_cnt)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data);
+#else
+static inline void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_GEN4_TL_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 81c39f3d0..f43ae9111 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -11,6 +11,7 @@
#include "adf_heartbeat.h"
#include "adf_rl.h"
#include "adf_sysfs_ras_counters.h"
+#include "adf_telemetry.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
@@ -142,6 +143,10 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
if (ret && ret != -EOPNOTSUPP)
return ret;
+ ret = adf_tl_init(accel_dev);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
@@ -220,6 +225,10 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
if (ret && ret != -EOPNOTSUPP)
return ret;
+ ret = adf_tl_start(accel_dev);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
list_for_each_entry(service, &service_table, list) {
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
dev_err(&GET_DEV(accel_dev),
@@ -279,6 +288,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
!test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return;
+ adf_tl_stop(accel_dev);
adf_rl_stop(accel_dev);
adf_dbgfs_rm(accel_dev);
adf_sysfs_stop_ras(accel_dev);
@@ -374,6 +384,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
adf_heartbeat_shutdown(accel_dev);
+ adf_tl_shutdown(accel_dev);
+
hw_data->disable_iov(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
new file mode 100644
index 000000000..2ff714d11
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#define dev_fmt(fmt) "Telemetry: " fmt
+
+#include <asm/errno.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "adf_admin.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_telemetry.h"
+
+#define TL_IS_ZERO(input) ((input) == 0)
+
+static bool is_tl_supported(struct adf_accel_dev *accel_dev)
+{
+ u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities;
+
+ return fw_caps & TL_CAPABILITY_BIT;
+}
+
+static int validate_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ if (!tl_data->dev_counters ||
+ TL_IS_ZERO(tl_data->num_dev_counters) ||
+ !tl_data->sl_util_counters ||
+ !tl_data->sl_exec_counters ||
+ !tl_data->rp_counters ||
+ TL_IS_ZERO(tl_data->num_rp_counters))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t regs_sz = tl_data->layout_sz;
+ struct adf_telemetry *telemetry;
+ int node = dev_to_node(dev);
+ void *tl_data_regs;
+ unsigned int i;
+
+ telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node);
+ if (!telemetry)
+ return -ENOMEM;
+
+ telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp,
+ sizeof(*telemetry->rp_num_indexes),
+ GFP_KERNEL);
+ if (!telemetry->rp_num_indexes)
+ goto err_free_tl;
+
+ telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff,
+ sizeof(*telemetry->regs_hist_buff),
+ GFP_KERNEL);
+ if (!telemetry->regs_hist_buff)
+ goto err_free_rp_indexes;
+
+ telemetry->regs_data = dma_alloc_coherent(dev, regs_sz,
+ &telemetry->regs_data_p,
+ GFP_KERNEL);
+ if (!telemetry->regs_data)
+ goto err_free_regs_hist_buff;
+
+ for (i = 0; i < tl_data->num_hbuff; i++) {
+ tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node);
+ if (!tl_data_regs)
+ goto err_free_dma;
+
+ telemetry->regs_hist_buff[i] = tl_data_regs;
+ }
+
+ accel_dev->telemetry = telemetry;
+
+ return 0;
+
+err_free_dma:
+ dma_free_coherent(dev, regs_sz, telemetry->regs_data,
+ telemetry->regs_data_p);
+
+ while (i--)
+ kfree(telemetry->regs_hist_buff[i]);
+
+err_free_regs_hist_buff:
+ kfree(telemetry->regs_hist_buff);
+err_free_rp_indexes:
+ kfree(telemetry->rp_num_indexes);
+err_free_tl:
+ kfree(telemetry);
+
+ return -ENOMEM;
+}
+
+static void adf_tl_free_mem(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t regs_sz = tl_data->layout_sz;
+ unsigned int i;
+
+ for (i = 0; i < tl_data->num_hbuff; i++)
+ kfree(telemetry->regs_hist_buff[i]);
+
+ dma_free_coherent(dev, regs_sz, telemetry->regs_data,
+ telemetry->regs_data_p);
+
+ kfree(telemetry->regs_hist_buff);
+ kfree(telemetry->rp_num_indexes);
+ kfree(telemetry);
+ accel_dev->telemetry = NULL;
+}
+
+static unsigned long get_next_timeout(void)
+{
+ return msecs_to_jiffies(ADF_TL_TIMER_INT_MS);
+}
+
+static void snapshot_regs(struct adf_telemetry *telemetry, size_t size)
+{
+ void *dst = telemetry->regs_hist_buff[telemetry->hb_num];
+ void *src = telemetry->regs_data;
+
+ memcpy(dst, src, size);
+}
+
+static void tl_work_handler(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct adf_telemetry *telemetry;
+ struct adf_tl_hw_data *tl_data;
+ u32 msg_cnt, old_msg_cnt;
+ size_t layout_sz;
+ u32 *regs_data;
+ size_t id;
+
+ delayed_work = to_delayed_work(work);
+ telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx);
+ tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ regs_data = telemetry->regs_data;
+
+ id = tl_data->msg_cnt_off / sizeof(*regs_data);
+ layout_sz = tl_data->layout_sz;
+
+ if (!atomic_read(&telemetry->state)) {
+ cancel_delayed_work_sync(&telemetry->work_ctx);
+ return;
+ }
+
+ msg_cnt = regs_data[id];
+ old_msg_cnt = msg_cnt;
+ if (msg_cnt == telemetry->msg_cnt)
+ goto out;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ snapshot_regs(telemetry, layout_sz);
+
+ /* Check if data changed while updating it */
+ msg_cnt = regs_data[id];
+ if (old_msg_cnt != msg_cnt)
+ snapshot_regs(telemetry, layout_sz);
+
+ telemetry->msg_cnt = msg_cnt;
+ telemetry->hb_num++;
+ telemetry->hb_num %= telemetry->hbuffs;
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+out:
+ adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
+}
+
+int adf_tl_halt(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ int ret;
+
+ cancel_delayed_work_sync(&telemetry->work_ctx);
+ atomic_set(&telemetry->state, 0);
+
+ ret = adf_send_admin_tl_stop(accel_dev);
+ if (ret)
+ dev_err(dev, "failed to stop telemetry\n");
+
+ return ret;
+}
+
+int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t layout_sz = tl_data->layout_sz;
+ int ret;
+
+ ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p,
+ layout_sz, telemetry->rp_num_indexes,
+ &telemetry->slice_cnt);
+ if (ret) {
+ dev_err(dev, "failed to start telemetry\n");
+ return ret;
+ }
+
+ telemetry->hbuffs = state;
+ atomic_set(&telemetry->state, state);
+
+ adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
+
+ return 0;
+}
+
+int adf_tl_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
+ struct device *dev = &GET_DEV(accel_dev);
+ struct adf_telemetry *telemetry;
+ unsigned int i;
+ int ret;
+
+ ret = validate_tl_data(tl_data);
+ if (ret)
+ return ret;
+
+ ret = adf_tl_alloc_mem(accel_dev);
+ if (ret) {
+ dev_err(dev, "failed to initialize: %d\n", ret);
+ return ret;
+ }
+
+ telemetry = accel_dev->telemetry;
+ telemetry->accel_dev = accel_dev;
+
+ mutex_init(&telemetry->wr_lock);
+ mutex_init(&telemetry->regs_hist_lock);
+ INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler);
+
+ for (i = 0; i < max_rp; i++)
+ telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED;
+
+ return 0;
+}
+
+int adf_tl_start(struct adf_accel_dev *accel_dev)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+
+ if (!accel_dev->telemetry)
+ return -EOPNOTSUPP;
+
+ if (!is_tl_supported(accel_dev)) {
+ dev_info(dev, "feature not supported by FW\n");
+ adf_tl_free_mem(accel_dev);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void adf_tl_stop(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->telemetry)
+ return;
+
+ if (atomic_read(&accel_dev->telemetry->state))
+ adf_tl_halt(accel_dev);
+}
+
+void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->telemetry)
+ return;
+
+ adf_tl_free_mem(accel_dev);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
new file mode 100644
index 000000000..9be81cd3b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_TELEMETRY_H
+#define ADF_TELEMETRY_H
+
+#include <linux/bits.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "icp_qat_fw_init_admin.h"
+
+struct adf_accel_dev;
+struct adf_tl_dbg_counter;
+struct dentry;
+
+#define ADF_TL_SL_CNT_COUNT \
+ (sizeof(struct icp_qat_fw_init_admin_slice_cnt) / sizeof(__u8))
+
+#define TL_CAPABILITY_BIT BIT(1)
+/* Interval within device writes data to DMA region. Value in milliseconds. */
+#define ADF_TL_DATA_WR_INTERVAL_MS 1000
+/* Interval within timer interrupt should be handled. Value in milliseconds. */
+#define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2)
+
+#define ADF_TL_RP_REGS_DISABLED (0xff)
+
+struct adf_tl_hw_data {
+ size_t layout_sz;
+ size_t slice_reg_sz;
+ size_t rp_reg_sz;
+ size_t msg_cnt_off;
+ const struct adf_tl_dbg_counter *dev_counters;
+ const struct adf_tl_dbg_counter *sl_util_counters;
+ const struct adf_tl_dbg_counter *sl_exec_counters;
+ const struct adf_tl_dbg_counter *rp_counters;
+ u8 num_hbuff;
+ u8 cpp_ns_per_cycle;
+ u8 bw_units_to_bytes;
+ u8 num_dev_counters;
+ u8 num_rp_counters;
+ u8 max_rp;
+};
+
+struct adf_telemetry {
+ struct adf_accel_dev *accel_dev;
+ atomic_t state;
+ u32 hbuffs;
+ int hb_num;
+ u32 msg_cnt;
+ dma_addr_t regs_data_p; /* bus address for DMA mapping */
+ void *regs_data; /* virtual address for DMA mapping */
+ /**
+ * @regs_hist_buff: array of pointers to copies of the last @hbuffs
+ * values of @regs_data
+ */
+ void **regs_hist_buff;
+ struct dentry *dbg_dir;
+ u8 *rp_num_indexes;
+ /**
+ * @regs_hist_lock: protects from race conditions between write and read
+ * to the copies referenced by @regs_hist_buff
+ */
+ struct mutex regs_hist_lock;
+ /**
+ * @wr_lock: protects from concurrent writes to debugfs telemetry files
+ */
+ struct mutex wr_lock;
+ struct delayed_work work_ctx;
+ struct icp_qat_fw_init_admin_slice_cnt slice_cnt;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int adf_tl_init(struct adf_accel_dev *accel_dev);
+int adf_tl_start(struct adf_accel_dev *accel_dev);
+void adf_tl_stop(struct adf_accel_dev *accel_dev);
+void adf_tl_shutdown(struct adf_accel_dev *accel_dev);
+int adf_tl_run(struct adf_accel_dev *accel_dev, int state);
+int adf_tl_halt(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_tl_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline int adf_tl_start(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_tl_stop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_TELEMETRY_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
new file mode 100644
index 000000000..c8241f5a0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#define dev_fmt(fmt) "Telemetry debugfs: " fmt
+
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_strings.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+
+#define TL_VALUE_MIN_PADDING 20
+#define TL_KEY_MIN_PADDING 23
+#define TL_RP_SRV_UNKNOWN "Unknown"
+
+static int tl_collect_values_u32(struct adf_telemetry *telemetry,
+ size_t counter_offset, u64 *arr)
+{
+ unsigned int samples, hb_idx, i;
+ u32 *regs_hist_buff;
+ u32 counter_val;
+
+ samples = min(telemetry->msg_cnt, telemetry->hbuffs);
+ hb_idx = telemetry->hb_num + telemetry->hbuffs - samples;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ for (i = 0; i < samples; i++) {
+ regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs];
+ counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)];
+ arr[i] = counter_val;
+ hb_idx++;
+ }
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+ return samples;
+}
+
+static int tl_collect_values_u64(struct adf_telemetry *telemetry,
+ size_t counter_offset, u64 *arr)
+{
+ unsigned int samples, hb_idx, i;
+ u64 *regs_hist_buff;
+ u64 counter_val;
+
+ samples = min(telemetry->msg_cnt, telemetry->hbuffs);
+ hb_idx = telemetry->hb_num + telemetry->hbuffs - samples;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ for (i = 0; i < samples; i++) {
+ regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs];
+ counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)];
+ arr[i] = counter_val;
+ hb_idx++;
+ }
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+ return samples;
+}
+
+/**
+ * avg_array() - Return average of values within an array.
+ * @array: Array of values.
+ * @len: Number of elements.
+ *
+ * This algorithm computes average of an array without running into overflow.
+ *
+ * Return: average of values.
+ */
+#define avg_array(array, len) ( \
+{ \
+ typeof(&(array)[0]) _array = (array); \
+ __unqual_scalar_typeof(_array[0]) _x = 0; \
+ __unqual_scalar_typeof(_array[0]) _y = 0; \
+ __unqual_scalar_typeof(_array[0]) _a, _b; \
+ typeof(len) _len = (len); \
+ size_t _i; \
+ \
+ for (_i = 0; _i < _len; _i++) { \
+ _a = _array[_i]; \
+ _b = do_div(_a, _len); \
+ _x += _a; \
+ if (_y >= _len - _b) { \
+ _x++; \
+ _y -= _len - _b; \
+ } else { \
+ _y += _b; \
+ } \
+ } \
+ do_div(_y, _len); \
+ (_x + _y); \
+})
+
+/* Calculation function for simple counter. */
+static int tl_calc_count(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u64 *hist_vals;
+ int sample_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals),
+ GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_vals;
+
+ vals->curr = hist_vals[sample_cnt - 1];
+ vals->min = min_array(hist_vals, sample_cnt);
+ vals->max = max_array(hist_vals, sample_cnt);
+ vals->avg = avg_array(hist_vals, sample_cnt);
+
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+/* Convert CPP bus cycles to ns. */
+static int tl_cycles_to_ns(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle;
+ int ret;
+
+ ret = tl_calc_count(telemetry, ctr, vals);
+ if (ret)
+ return ret;
+
+ vals->curr *= cpp_ns_per_cycle;
+ vals->min *= cpp_ns_per_cycle;
+ vals->max *= cpp_ns_per_cycle;
+ vals->avg *= cpp_ns_per_cycle;
+
+ return 0;
+}
+
+/*
+ * Compute latency cumulative average with division of accumulated value
+ * by sample count. Returned value is in ns.
+ */
+static int tl_lat_acc_avg(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle;
+ u8 num_hbuff = tl_data->num_hbuff;
+ int sample_cnt, i;
+ u64 *hist_vals;
+ u64 *hist_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL);
+ if (!hist_cnt) {
+ ret = -ENOMEM;
+ goto out_free_hist_vals;
+ }
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_cnt;
+
+ tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt);
+
+ for (i = 0; i < sample_cnt; i++) {
+ /* Avoid division by 0 if count is 0. */
+ if (hist_cnt[i])
+ hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle,
+ hist_cnt[i]);
+ else
+ hist_vals[i] = 0;
+ }
+
+ vals->curr = hist_vals[sample_cnt - 1];
+ vals->min = min_array(hist_vals, sample_cnt);
+ vals->max = max_array(hist_vals, sample_cnt);
+ vals->avg = avg_array(hist_vals, sample_cnt);
+
+out_free_hist_cnt:
+ kfree(hist_cnt);
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+/* Convert HW raw bandwidth units to Mbps. */
+static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE;
+ u64 *hist_vals;
+ int sample_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals),
+ GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_vals;
+
+ vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA);
+ vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+ vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+ vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+static void tl_seq_printf_counter(struct adf_telemetry *telemetry,
+ struct seq_file *s, const char *name,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr);
+ if (atomic_read(&telemetry->state) > 1) {
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg);
+ }
+ seq_puts(s, "\n");
+}
+
+static int tl_calc_and_print_counter(struct adf_telemetry *telemetry,
+ struct seq_file *s,
+ const struct adf_tl_dbg_counter *ctr,
+ const char *name)
+{
+ const char *counter_name = name ? name : ctr->name;
+ enum adf_tl_counter_type type = ctr->type;
+ struct adf_tl_dbg_aggr_values vals;
+ int ret;
+
+ switch (type) {
+ case ADF_TL_SIMPLE_COUNT:
+ ret = tl_calc_count(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_NS:
+ ret = tl_cycles_to_ns(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_NS_AVG:
+ ret = tl_lat_acc_avg(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_MBPS:
+ ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ tl_seq_printf_counter(telemetry, s, counter_name, &vals);
+
+ return 0;
+}
+
+static int tl_print_sl_counter(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct seq_file *s, u8 cnt_id)
+{
+ size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz;
+ struct adf_tl_dbg_counter slice_ctr;
+ size_t offset_inc = cnt_id * sl_regs_sz;
+ char cnt_name[MAX_COUNT_NAME_SIZE];
+
+ snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id);
+ slice_ctr = *ctr;
+ slice_ctr.offset1 += offset_inc;
+
+ return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name);
+}
+
+static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev,
+ struct seq_file *s, u8 cnt_type, u8 cnt_id)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *sl_tl_util_counters;
+ const struct adf_tl_dbg_counter *sl_tl_exec_counters;
+ const struct adf_tl_dbg_counter *ctr;
+ int ret;
+
+ sl_tl_util_counters = tl_data->sl_util_counters;
+ sl_tl_exec_counters = tl_data->sl_exec_counters;
+
+ ctr = &sl_tl_util_counters[cnt_type];
+
+ ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice utilization counter type\n");
+ return ret;
+ }
+
+ ctr = &sl_tl_exec_counters[cnt_type];
+
+ ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice execution counter type\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt)
+{
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG);
+ seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt);
+}
+
+static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
+ struct seq_file *s)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *dev_tl_counters;
+ u8 num_dev_counters = tl_data->num_dev_counters;
+ u8 *sl_cnt = (u8 *)&telemetry->slice_cnt;
+ const struct adf_tl_dbg_counter *ctr;
+ unsigned int i;
+ int ret;
+ u8 j;
+
+ if (!atomic_read(&telemetry->state)) {
+ dev_info(&GET_DEV(accel_dev), "not enabled\n");
+ return -EPERM;
+ }
+
+ dev_tl_counters = tl_data->dev_counters;
+
+ tl_print_msg_cnt(s, telemetry->msg_cnt);
+
+ /* Print device level telemetry. */
+ for (i = 0; i < num_dev_counters; i++) {
+ ctr = &dev_tl_counters[i];
+ ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid counter type\n");
+ return ret;
+ }
+ }
+
+ /* Print per slice telemetry. */
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ for (j = 0; j < sl_cnt[i]; j++) {
+ ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int tl_dev_data_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ return tl_print_dev_data(accel_dev, s);
+}
+DEFINE_SHOW_ATTRIBUTE(tl_dev_data);
+
+static int tl_control_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state));
+
+ return 0;
+}
+
+static ssize_t tl_control_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq_f = file->private_data;
+ struct adf_accel_dev *accel_dev;
+ struct adf_telemetry *telemetry;
+ struct adf_tl_hw_data *tl_data;
+ struct device *dev;
+ u32 input;
+ int ret;
+
+ accel_dev = seq_f->private;
+ if (!accel_dev)
+ return -EINVAL;
+
+ tl_data = &GET_TL_DATA(accel_dev);
+ telemetry = accel_dev->telemetry;
+ dev = &GET_DEV(accel_dev);
+
+ mutex_lock(&telemetry->wr_lock);
+
+ ret = kstrtou32_from_user(userbuf, count, 10, &input);
+ if (ret)
+ goto unlock_and_exit;
+
+ if (input > tl_data->num_hbuff) {
+ dev_info(dev, "invalid control input\n");
+ ret = -EINVAL;
+ goto unlock_and_exit;
+ }
+
+ /* If input is 0, just stop telemetry. */
+ if (!input) {
+ ret = adf_tl_halt(accel_dev);
+ if (!ret)
+ ret = count;
+
+ goto unlock_and_exit;
+ }
+
+ /* If TL is already enabled, stop it. */
+ if (atomic_read(&telemetry->state)) {
+ dev_info(dev, "already enabled, restarting.\n");
+ ret = adf_tl_halt(accel_dev);
+ if (ret)
+ goto unlock_and_exit;
+ }
+
+ ret = adf_tl_run(accel_dev, input);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = count;
+
+unlock_and_exit:
+ mutex_unlock(&telemetry->wr_lock);
+ return ret;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(tl_control);
+
+static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num)
+{
+ char alpha;
+ u8 index;
+ int ret;
+
+ ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha);
+ *rp_id = index;
+
+ return 0;
+}
+
+static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev,
+ unsigned int new_rp_num,
+ unsigned int rp_regs_index)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ unsigned int i;
+ u8 curr_state;
+ int ret;
+
+ if (new_rp_num >= hw_data->num_rps) {
+ dev_info(dev, "invalid Ring Pair number selected\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hw_data->tl_data.max_rp; i++) {
+ if (telemetry->rp_num_indexes[i] == new_rp_num) {
+ dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n",
+ new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i));
+ return 0;
+ }
+ }
+
+ dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n",
+ new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index));
+
+ curr_state = atomic_read(&telemetry->state);
+
+ if (curr_state) {
+ ret = adf_tl_halt(accel_dev);
+ if (ret)
+ return ret;
+
+ telemetry->rp_num_indexes[rp_regs_index] = new_rp_num;
+
+ ret = adf_tl_run(accel_dev, curr_state);
+ if (ret)
+ return ret;
+ } else {
+ telemetry->rp_num_indexes[rp_regs_index] = new_rp_num;
+ }
+
+ return 0;
+}
+
+static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s,
+ u8 rp_idx)
+{
+ u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf;
+ enum adf_cfg_service_type svc;
+
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE);
+
+ svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf);
+ switch (svc) {
+ case COMP:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC);
+ break;
+ case SYM:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM);
+ break;
+ case ASYM:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM);
+ break;
+ default:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN);
+ break;
+ }
+}
+
+static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s,
+ u8 rp_regs_index)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *rp_tl_counters;
+ u8 num_rp_counters = tl_data->num_rp_counters;
+ size_t rp_regs_sz = tl_data->rp_reg_sz;
+ struct adf_tl_dbg_counter ctr;
+ unsigned int i;
+ u8 rp_idx;
+ int ret;
+
+ if (!atomic_read(&telemetry->state)) {
+ dev_info(&GET_DEV(accel_dev), "not enabled\n");
+ return -EPERM;
+ }
+
+ rp_tl_counters = tl_data->rp_counters;
+ rp_idx = telemetry->rp_num_indexes[rp_regs_index];
+
+ if (rp_idx == ADF_TL_RP_REGS_DISABLED) {
+ dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n",
+ ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index));
+ return -EPERM;
+ }
+
+ tl_print_msg_cnt(s, telemetry->msg_cnt);
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX);
+ seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx);
+ tl_print_rp_srv(accel_dev, s, rp_idx);
+
+ for (i = 0; i < num_rp_counters; i++) {
+ ctr = rp_tl_counters[i];
+ ctr.offset1 += rp_regs_sz * rp_regs_index;
+ ctr.offset2 += rp_regs_sz * rp_regs_index;
+ ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "invalid RP counter type\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int tl_rp_data_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+ u8 rp_regs_index;
+ u8 max_rp;
+ int ret;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ max_rp = GET_TL_DATA(accel_dev).max_rp;
+ ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
+ return ret;
+ }
+
+ return tl_print_rp_data(accel_dev, s, rp_regs_index);
+}
+
+static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq_f = file->private_data;
+ struct adf_accel_dev *accel_dev;
+ struct adf_telemetry *telemetry;
+ unsigned int new_rp_num;
+ u8 rp_regs_index;
+ u8 max_rp;
+ int ret;
+
+ accel_dev = seq_f->private;
+ if (!accel_dev)
+ return -EINVAL;
+
+ telemetry = accel_dev->telemetry;
+ max_rp = GET_TL_DATA(accel_dev).max_rp;
+
+ mutex_lock(&telemetry->wr_lock);
+
+ ret = get_rp_index_from_file(file, &rp_regs_index, max_rp);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
+ goto unlock_and_exit;
+ }
+
+ ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = count;
+
+unlock_and_exit:
+ mutex_unlock(&telemetry->wr_lock);
+ return ret;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data);
+
+void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct dentry *parent = accel_dev->debugfs_dir;
+ u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
+ char name[ADF_TL_RP_REGS_FNAME_SIZE];
+ struct dentry *dir;
+ unsigned int i;
+
+ if (!telemetry)
+ return;
+
+ dir = debugfs_create_dir("telemetry", parent);
+ accel_dev->telemetry->dbg_dir = dir;
+ debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops);
+ debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops);
+
+ for (i = 0; i < max_rp; i++) {
+ snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME,
+ ADF_TL_DBG_RP_ALPHA_INDEX(i));
+ debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops);
+ }
+}
+
+void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct dentry *dbg_dir;
+
+ if (!telemetry)
+ return;
+
+ dbg_dir = telemetry->dbg_dir;
+
+ debugfs_remove_recursive(dbg_dir);
+
+ if (atomic_read(&telemetry->state))
+ adf_tl_halt(accel_dev);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
new file mode 100644
index 000000000..11cc9eae1
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_TL_DEBUGFS_H
+#define ADF_TL_DEBUGFS_H
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+#define MAX_COUNT_NAME_SIZE 32
+#define SNAPSHOT_CNT_MSG "sample_cnt"
+#define RP_NUM_INDEX "rp_num"
+#define PCI_TRANS_CNT_NAME "pci_trans_cnt"
+#define MAX_RD_LAT_NAME "max_rd_lat"
+#define RD_LAT_ACC_NAME "rd_lat_acc_avg"
+#define MAX_LAT_NAME "max_gp_lat"
+#define LAT_ACC_NAME "gp_lat_acc_avg"
+#define BW_IN_NAME "bw_in"
+#define BW_OUT_NAME "bw_out"
+#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg"
+#define AT_TRANS_LAT_NAME "at_trans_lat_avg"
+#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used"
+#define AT_GLOB_DTLB_HIT_NAME "at_glob_devtlb_hit"
+#define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss"
+#define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit"
+#define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss"
+#define RP_SERVICE_TYPE "service_type"
+
+#define ADF_TL_DBG_RP_ALPHA_INDEX(index) ((index) + 'A')
+#define ADF_TL_DBG_RP_INDEX_ALPHA(alpha) ((alpha) - 'A')
+
+#define ADF_TL_RP_REGS_FNAME "rp_%c_data"
+#define ADF_TL_RP_REGS_FNAME_SIZE 16
+
+#define ADF_TL_DATA_REG_OFF(reg, qat_gen) \
+ offsetof(struct adf_##qat_gen##_tl_layout, reg)
+
+#define ADF_TL_DEV_REG_OFF(reg, qat_gen) \
+ (ADF_TL_DATA_REG_OFF(tl_device_data_regs, qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_device_data_regs, reg))
+
+#define ADF_TL_SLICE_REG_OFF(slice, reg, qat_gen) \
+ (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg))
+
+#define ADF_TL_RP_REG_OFF(reg, qat_gen) \
+ (ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg))
+
+/**
+ * enum adf_tl_counter_type - telemetry counter types
+ * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter
+ * @ADF_TL_SIMPLE_COUNT: simple counter
+ * @ADF_TL_COUNTER_NS: latency counter, value in ns
+ * @ADF_TL_COUNTER_NS_AVG: accumulated average latency counter, value in ns
+ * @ADF_TL_COUNTER_MBPS: bandwidth, value in MBps
+ */
+enum adf_tl_counter_type {
+ ADF_TL_COUNTER_UNSUPPORTED,
+ ADF_TL_SIMPLE_COUNT,
+ ADF_TL_COUNTER_NS,
+ ADF_TL_COUNTER_NS_AVG,
+ ADF_TL_COUNTER_MBPS,
+};
+
+/**
+ * struct adf_tl_dbg_counter - telemetry counter definition
+ * @name: name of the counter as printed in the report
+ * @adf_tl_counter_type: type of the counter
+ * @offset1: offset of 1st register
+ * @offset2: offset of 2nd optional register
+ */
+struct adf_tl_dbg_counter {
+ const char *name;
+ enum adf_tl_counter_type type;
+ size_t offset1;
+ size_t offset2;
+};
+
+#define ADF_TL_COUNTER(_name, _type, _offset) \
+{ .name = _name, \
+ .type = _type, \
+ .offset1 = _offset \
+}
+
+#define ADF_TL_COUNTER_LATENCY(_name, _type, _offset1, _offset2) \
+{ .name = _name, \
+ .type = _type, \
+ .offset1 = _offset1, \
+ .offset2 = _offset2 \
+}
+
+/* Telemetry counter aggregated values. */
+struct adf_tl_dbg_aggr_values {
+ u64 curr;
+ u64 min;
+ u64 max;
+ u64 avg;
+};
+
+/**
+ * adf_tl_dbgfs_add() - Add telemetry's debug fs entries.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Creates telemetry's debug fs folder and attributes in QAT debug fs root.
+ */
+void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev);
+
+/**
+ * adf_tl_dbgfs_rm() - Remove telemetry's debug fs entries.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Removes telemetry's debug fs folder and attributes from QAT debug fs root.
+ */
+void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_TL_DEBUGFS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index cd418b51d..63cf18e2a 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -29,6 +29,8 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_RL_ADD = 134,
ICP_QAT_FW_RL_UPDATE = 135,
ICP_QAT_FW_RL_REMOVE = 136,
+ ICP_QAT_FW_TL_START = 137,
+ ICP_QAT_FW_TL_STOP = 138,
};
enum icp_qat_fw_init_admin_resp_status {
@@ -36,6 +38,13 @@ enum icp_qat_fw_init_admin_resp_status {
ICP_QAT_FW_INIT_RESP_STATUS_FAIL
};
+struct icp_qat_fw_init_admin_tl_rp_indexes {
+ __u8 rp_num_index_0;
+ __u8 rp_num_index_1;
+ __u8 rp_num_index_2;
+ __u8 rp_num_index_3;
+};
+
struct icp_qat_fw_init_admin_slice_cnt {
__u8 cpr_cnt;
__u8 xlt_cnt;
@@ -87,6 +96,7 @@ struct icp_qat_fw_init_admin_req {
__u8 rp_count;
};
__u32 idle_filter;
+ struct icp_qat_fw_init_admin_tl_rp_indexes rp_indexes;
};
__u32 resrvd4;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index eb2ef225b..b8f1c4ffb 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -18,7 +18,12 @@ enum icp_qat_hw_ae_id {
ICP_QAT_HW_AE_9 = 9,
ICP_QAT_HW_AE_10 = 10,
ICP_QAT_HW_AE_11 = 11,
- ICP_QAT_HW_AE_DELIMITER = 12
+ ICP_QAT_HW_AE_12 = 12,
+ ICP_QAT_HW_AE_13 = 13,
+ ICP_QAT_HW_AE_14 = 14,
+ ICP_QAT_HW_AE_15 = 15,
+ ICP_QAT_HW_AE_16 = 16,
+ ICP_QAT_HW_AE_DELIMITER = 17
};
enum icp_qat_hw_qat_id {
@@ -95,7 +100,7 @@ enum icp_qat_capabilities_mask {
/* Bits 10-11 are currently reserved */
ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
- /* Bit 14 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = BIT(14),
ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
@@ -107,7 +112,10 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
- ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
+ ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26),
+ /* Bits 27-28 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_ZUC_256 = BIT(29),
+ ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT = BIT(30),
};
#define QAT_AUTH_MODE_BITPOS 4
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index 69482abdb..e28241bdd 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -7,7 +7,7 @@
#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
-#define ICP_QAT_UCLO_MAX_AE 12
+#define ICP_QAT_UCLO_MAX_AE 17
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
#define ICP_QAT_UCLO_MAX_USTORE 0x4000
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index cbb946a80..317cafa9d 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -697,12 +697,16 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
case ADF_402XX_PCI_DEVICE_ID:
+ case ADF_420XX_PCI_DEVICE_ID:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
- handle->chip_info->icp_rst_mask = 0x100015;
+ if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID)
+ handle->chip_info->icp_rst_mask = 0x100155;
+ else
+ handle->chip_info->icp_rst_mask = 0x100015;
handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
handle->chip_info->wakeup_event_val = 0x80000000;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index e27ea7e28..ad2c64af7 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -733,6 +733,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
case ADF_402XX_PCI_DEVICE_ID:
+ case ADF_420XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5744df30c..5fd31ba71 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -488,7 +488,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
for (i = 0; i < caps->nengines; i++) {
struct mv_cesa_engine *engine = &cesa->engines[i];
- char res_name[7];
+ char res_name[16];
engine->id = i;
spin_lock_init(&engine->lock);
@@ -509,7 +509,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
* Not all platforms can gate the CESA clocks: do not complain
* if the clock does not exist.
*/
- snprintf(res_name, sizeof(res_name), "cesa%d", i);
+ snprintf(res_name, sizeof(res_name), "cesa%u", i);
engine->clk = devm_clk_get(dev, res_name);
if (IS_ERR(engine->clk)) {
engine->clk = devm_clk_get(dev, NULL);
@@ -517,7 +517,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
engine->clk = NULL;
}
- snprintf(res_name, sizeof(res_name), "cesaz%d", i);
+ snprintf(res_name, sizeof(res_name), "cesaz%u", i);
engine->zclk = devm_clk_get(dev, res_name);
if (IS_ERR(engine->zclk))
engine->zclk = NULL;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 1c2c870e8..3c5d577d8 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -473,12 +473,6 @@ static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
}
-static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
- const u8 *key, u32 keylen)
-{
- return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
-}
-
static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
@@ -1352,23 +1346,6 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
.encrypt = otx_cpt_skcipher_encrypt,
.decrypt = otx_cpt_skcipher_decrypt,
}, {
- .base.cra_name = "cfb(aes)",
- .base.cra_driver_name = "cpt_cfb_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
- .base.cra_alignmask = 7,
- .base.cra_priority = 4001,
- .base.cra_module = THIS_MODULE,
-
- .init = otx_cpt_enc_dec_init,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = otx_cpt_skcipher_cfb_aes_setkey,
- .encrypt = otx_cpt_skcipher_encrypt,
- .decrypt = otx_cpt_skcipher_decrypt,
-}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cpt_cbc_des3_ede",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 93d22b328..79b4e7480 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -14,12 +14,14 @@ static struct cpt_hw_ops otx2_hw_ops = {
.send_cmd = otx2_cpt_send_cmd,
.cpt_get_compcode = otx2_cpt_get_compcode,
.cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
+ .cpt_sg_info_create = otx2_sg_info_create,
};
static struct cpt_hw_ops cn10k_hw_ops = {
.send_cmd = cn10k_cpt_send_cmd,
.cpt_get_compcode = cn10k_cpt_get_compcode,
.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
+ .cpt_sg_info_create = otx2_sg_info_create,
};
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
@@ -78,12 +80,9 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
struct pci_dev *pdev = cptvf->pdev;
resource_size_t offset, size;
- if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) {
- cptvf->lfs.ops = &otx2_hw_ops;
+ if (!test_bit(CN10K_LMTST, &cptvf->cap_flag))
return 0;
- }
- cptvf->lfs.ops = &cn10k_hw_ops;
offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
/* Map VF LMILINE region */
@@ -96,3 +95,82 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
return 0;
}
EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+
+void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
+ struct cn10k_cpt_errata_ctx *er_ctx)
+{
+ u64 cptr_dma;
+
+ if (!is_dev_cn10ka_ax(pdev))
+ return;
+
+ cptr_dma = er_ctx->cptr_dma & ~(BIT_ULL(60));
+ cn10k_cpt_ctx_flush(pdev, cptr_dma, true);
+ dma_unmap_single(&pdev->dev, cptr_dma, CN10K_CPT_HW_CTX_SIZE,
+ DMA_BIDIRECTIONAL);
+ kfree(er_ctx->hw_ctx);
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, CRYPTO_DEV_OCTEONTX2_CPT);
+
+void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
+{
+ hctx->w0.aop_valid = 1;
+ hctx->w0.ctx_hdr_sz = 0;
+ hctx->w0.ctx_sz = ctx_sz;
+ hctx->w0.ctx_push_sz = 1;
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, CRYPTO_DEV_OCTEONTX2_CPT);
+
+int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
+ struct cn10k_cpt_errata_ctx *er_ctx)
+{
+ union cn10k_cpt_hw_ctx *hctx;
+ u64 cptr_dma;
+
+ er_ctx->cptr_dma = 0;
+ er_ctx->hw_ctx = NULL;
+
+ if (!is_dev_cn10ka_ax(pdev))
+ return 0;
+
+ hctx = kmalloc(CN10K_CPT_HW_CTX_SIZE, GFP_KERNEL);
+ if (unlikely(!hctx))
+ return -ENOMEM;
+ cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ cn10k_cpt_hw_ctx_set(hctx, 1);
+ er_ctx->hw_ctx = hctx;
+ er_ctx->cptr_dma = cptr_dma | BIT_ULL(60);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, CRYPTO_DEV_OCTEONTX2_CPT);
+
+void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ u64 reg;
+
+ reg = (uintptr_t)cptr >> 7;
+ if (inval)
+ reg = reg | BIT_ULL(46);
+
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
+ OTX2_CPT_LF_CTX_FLUSH, reg);
+ /* Make sure that the FLUSH operation is complete */
+ wmb();
+ otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
+ OTX2_CPT_LF_CTX_ERR);
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, CRYPTO_DEV_OCTEONTX2_CPT);
+
+void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
+{
+ if (test_bit(CN10K_LMTST, &cptvf->cap_flag))
+ cptvf->lfs.ops = &cn10k_hw_ops;
+ else
+ cptvf->lfs.ops = &otx2_hw_ops;
+}
+EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
index aaefc7e38..92be3ecf5 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -8,6 +8,26 @@
#include "otx2_cptpf.h"
#include "otx2_cptvf.h"
+#define CN10K_CPT_HW_CTX_SIZE 256
+
+union cn10k_cpt_hw_ctx {
+ u64 u;
+ struct {
+ u64 reserved_0_47:48;
+ u64 ctx_push_sz:7;
+ u64 reserved_55:1;
+ u64 ctx_hdr_sz:2;
+ u64 aop_valid:1;
+ u64 reserved_59:1;
+ u64 ctx_sz:4;
+ } w0;
+};
+
+struct cn10k_cpt_errata_ctx {
+ union cn10k_cpt_hw_ctx *hw_ctx;
+ u64 cptr_dma;
+};
+
static inline u8 cn10k_cpt_get_compcode(union otx2_cpt_res_s *result)
{
return ((struct cn10k_cpt_res_s *)result)->compcode;
@@ -30,5 +50,12 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval);
+int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
+ struct cn10k_cpt_errata_ctx *er_ctx);
+void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
+ struct cn10k_cpt_errata_ctx *er_ctx);
+void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz);
+void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf);
#endif /* __CN10K_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 46b778bbb..c5b7c5757 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -56,7 +56,11 @@ struct otx2_cpt_rx_inline_lf_cfg {
u16 param2;
u16 opcode;
u32 credit;
+ u32 credit_th;
+ u16 bpid;
u32 reserved;
+ u8 ctx_ilen_valid : 1;
+ u8 ctx_ilen : 7;
};
/*
@@ -102,7 +106,10 @@ union otx2_cpt_eng_caps {
u64 kasumi:1;
u64 des:1;
u64 crc:1;
- u64 reserved_14_63:50;
+ u64 mmul:1;
+ u64 reserved_15_33:19;
+ u64 pdcp_chain:1;
+ u64 reserved_35_63:29;
};
};
@@ -145,6 +152,35 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
return false;
}
+static inline bool is_dev_cn10ka(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A;
+}
+
+static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 ||
+ (pdev->revision & 0xff) == 0x51))
+ return true;
+
+ return false;
+}
+
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_B;
+}
+
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
unsigned long *cap_flag)
{
@@ -154,6 +190,21 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
}
}
+static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev)
+{
+ if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev))
+ return true;
+
+ return false;
+}
+
+static inline bool cpt_feature_sgv2(struct pci_dev *pdev)
+{
+ if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev))
+ return true;
+
+ return false;
+}
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
@@ -171,5 +222,6 @@ int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
+int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index a2aba0b0d..d2b8d26db 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -24,10 +24,45 @@ static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
+ ctx->val.vstr[0] = '\0';
+
+ return 0;
+}
+
+static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+ struct pci_dev *pdev = cptpf->pdev;
+ u64 reg_val = 0;
+
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
+ BLKADDR_CPT0);
+ ctx->val.vu8 = (reg_val >> 18) & 0x1;
+
+ return 0;
+}
- otx2_cpt_print_uc_dbg_info(cptpf);
+static int otx2_cpt_dl_t106_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+ struct pci_dev *pdev = cptpf->pdev;
+ u64 reg_val = 0;
+
+ if (cptpf->enabled_vfs != 0 || cptpf->eng_grps.is_grps_created)
+ return -EPERM;
+
+ if (cpt_feature_sgv2(pdev)) {
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
+ &reg_val, BLKADDR_CPT0);
+ reg_val &= ~(0x1ULL << 18);
+ reg_val |= ((u64)ctx->val.vu8 & 0x1) << 18;
+ return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev,
+ CPT_AF_CTL, reg_val, BLKADDR_CPT0);
+ }
return 0;
}
@@ -36,6 +71,7 @@ enum otx2_cpt_dl_param_id {
OTX2_CPT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
+ OTX2_CPT_DEVLINK_PARAM_ID_T106_MODE,
};
static const struct devlink_param otx2_cpt_dl_params[] = {
@@ -49,6 +85,11 @@ static const struct devlink_param otx2_cpt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_delete,
NULL),
+ DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_T106_MODE,
+ "t106_mode", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_cpt_dl_t106_mode_get, otx2_cpt_dl_t106_mode_set,
+ NULL),
};
static int otx2_cpt_dl_info_firmware_version_put(struct devlink_info_req *req,
@@ -120,7 +161,6 @@ int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf)
devlink_free(dl);
return ret;
}
-
devlink_register(dl);
return 0;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
index 6f947978e..7e746a4de 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
@@ -13,6 +13,9 @@
#define CN10K_CPT_PCI_PF_DEVICE_ID 0xA0F2
#define CN10K_CPT_PCI_VF_DEVICE_ID 0xA0F3
+#define CPT_PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define CPT_PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+
/* Mailbox interrupts offset */
#define OTX2_CPT_PF_MBOX_INT 6
#define OTX2_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
@@ -99,6 +102,9 @@
#define OTX2_CPT_LF_Q_INST_PTR (0x110)
#define OTX2_CPT_LF_Q_GRP_PTR (0x120)
#define OTX2_CPT_LF_NQX(a) (0x400 | (a) << 3)
+#define OTX2_CPT_LF_CTX_CTL (0x500)
+#define OTX2_CPT_LF_CTX_FLUSH (0x510)
+#define OTX2_CPT_LF_CTX_ERR (0x520)
#define OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT 20
/* LMT LF registers */
#define OTX2_CPT_LMT_LFBASE BIT_ULL(OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT)
@@ -467,7 +473,8 @@ union otx2_cptx_af_lf_ctrl {
u64 cont_err:1;
u64 reserved_11_15:5;
u64 nixtx_en:1;
- u64 reserved_17_47:31;
+ u64 ctx_ilen:3;
+ u64 reserved_17_47:28;
u64 grp:8;
u64 reserved_56_63:8;
} s;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 273ee5352..5be0103c1 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -229,3 +229,29 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
return otx2_mbox_check_rsp_msgs(mbox, 0);
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+
+int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct cpt_lf_rst_req *req;
+ int ret;
+
+ req = (struct cpt_lf_rst_req *)otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_CPT_LF_RESET;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ req->slot = slot;
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
index dbb1ee746..e27e849b0 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -27,6 +27,13 @@
#define OTX2_CPT_MAX_REQ_SIZE 65535
+#define SG_COMPS_MAX 4
+#define SGV2_COMPS_MAX 3
+
+#define SG_COMP_3 3
+#define SG_COMP_2 2
+#define SG_COMP_1 1
+
union otx2_cpt_opcode {
u16 flags;
struct {
@@ -40,6 +47,8 @@ struct otx2_cptvf_request {
u32 param2;
u16 dlen;
union otx2_cpt_opcode opcode;
+ dma_addr_t cptr_dma;
+ void *cptr;
};
/*
@@ -143,6 +152,8 @@ struct otx2_cpt_inst_info {
unsigned long time_in;
u32 dlen;
u32 dma_len;
+ u64 gthr_sz;
+ u64 sctr_sz;
u8 extra_time;
};
@@ -157,6 +168,16 @@ struct otx2_cpt_sglist_component {
__be64 ptr3;
};
+struct cn10kb_cpt_sglist_component {
+ u16 len0;
+ u16 len1;
+ u16 len2;
+ u16 valid_segs;
+ u64 ptr0;
+ u64 ptr1;
+ u64 ptr2;
+};
+
static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
struct otx2_cpt_inst_info *info)
{
@@ -188,6 +209,283 @@ static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
kfree(info);
}
+static inline int setup_sgio_components(struct pci_dev *pdev,
+ struct otx2_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct otx2_cpt_sglist_component *sg_ptr;
+ int components;
+ int i, j;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (unlikely(!list[i].vptr))
+ continue;
+ list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ goto sg_cleanup;
+ }
+ }
+ components = buf_count / SG_COMPS_MAX;
+ sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->len0 = cpu_to_be16(list[i * SG_COMPS_MAX + 0].size);
+ sg_ptr->len1 = cpu_to_be16(list[i * SG_COMPS_MAX + 1].size);
+ sg_ptr->len2 = cpu_to_be16(list[i * SG_COMPS_MAX + 2].size);
+ sg_ptr->len3 = cpu_to_be16(list[i * SG_COMPS_MAX + 3].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * SG_COMPS_MAX + 0].dma_addr);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * SG_COMPS_MAX + 1].dma_addr);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * SG_COMPS_MAX + 2].dma_addr);
+ sg_ptr->ptr3 = cpu_to_be64(list[i * SG_COMPS_MAX + 3].dma_addr);
+ sg_ptr++;
+ }
+ components = buf_count % SG_COMPS_MAX;
+
+ switch (components) {
+ case SG_COMP_3:
+ sg_ptr->len2 = cpu_to_be16(list[i * SG_COMPS_MAX + 2].size);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * SG_COMPS_MAX + 2].dma_addr);
+ fallthrough;
+ case SG_COMP_2:
+ sg_ptr->len1 = cpu_to_be16(list[i * SG_COMPS_MAX + 1].size);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * SG_COMPS_MAX + 1].dma_addr);
+ fallthrough;
+ case SG_COMP_1:
+ sg_ptr->len0 = cpu_to_be16(list[i * SG_COMPS_MAX + 0].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * SG_COMPS_MAX + 0].dma_addr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[j].dma_addr,
+ list[j].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return -EIO;
+}
+
+static inline int sgv2io_components_setup(struct pci_dev *pdev,
+ struct otx2_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct cn10kb_cpt_sglist_component *sg_ptr;
+ int components;
+ int i, j;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (unlikely(!list[i].vptr))
+ continue;
+ list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ goto sg_cleanup;
+ }
+ }
+ components = buf_count / SGV2_COMPS_MAX;
+ sg_ptr = (struct cn10kb_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->len0 = list[i * SGV2_COMPS_MAX + 0].size;
+ sg_ptr->len1 = list[i * SGV2_COMPS_MAX + 1].size;
+ sg_ptr->len2 = list[i * SGV2_COMPS_MAX + 2].size;
+ sg_ptr->ptr0 = list[i * SGV2_COMPS_MAX + 0].dma_addr;
+ sg_ptr->ptr1 = list[i * SGV2_COMPS_MAX + 1].dma_addr;
+ sg_ptr->ptr2 = list[i * SGV2_COMPS_MAX + 2].dma_addr;
+ sg_ptr->valid_segs = SGV2_COMPS_MAX;
+ sg_ptr++;
+ }
+ components = buf_count % SGV2_COMPS_MAX;
+
+ sg_ptr->valid_segs = components;
+ switch (components) {
+ case SG_COMP_2:
+ sg_ptr->len1 = list[i * SGV2_COMPS_MAX + 1].size;
+ sg_ptr->ptr1 = list[i * SGV2_COMPS_MAX + 1].dma_addr;
+ fallthrough;
+ case SG_COMP_1:
+ sg_ptr->len0 = list[i * SGV2_COMPS_MAX + 0].size;
+ sg_ptr->ptr0 = list[i * SGV2_COMPS_MAX + 0].dma_addr;
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[j].dma_addr,
+ list[j].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return -EIO;
+}
+
+static inline struct otx2_cpt_inst_info *
+cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ gfp_t gfp)
+{
+ u32 dlen = 0, g_len, sg_len, info_len;
+ int align = OTX2_CPT_DMA_MINALIGN;
+ struct otx2_cpt_inst_info *info;
+ u16 g_sz_bytes, s_sz_bytes;
+ u32 total_mem_len;
+ int i;
+
+ g_sz_bytes = ((req->in_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ s_sz_bytes = ((req->out_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+
+ g_len = ALIGN(g_sz_bytes, align);
+ sg_len = ALIGN(g_len + s_sz_bytes, align);
+ info_len = ALIGN(sizeof(*info), align);
+ total_mem_len = sg_len + info_len + sizeof(union otx2_cpt_res_s);
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info))
+ return NULL;
+
+ for (i = 0; i < req->in_cnt; i++)
+ dlen += req->in[i].size;
+
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+ info->gthr_sz = req->in_cnt;
+ info->sctr_sz = req->out_cnt;
+
+ /* Setup gather (input) components */
+ if (sgv2io_components_setup(pdev, req->in, req->in_cnt,
+ info->in_buffer)) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ goto destroy_info;
+ }
+
+ if (sgv2io_components_setup(pdev, req->out, req->out_cnt,
+ &info->in_buffer[g_len])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ goto destroy_info;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ goto destroy_info;
+ }
+ info->rptr_baddr = info->dptr_baddr + g_len;
+ /*
+ * Get buffer for union otx2_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = info->in_buffer + sg_len;
+ info->comp_baddr = info->dptr_baddr + sg_len;
+
+ return info;
+
+destroy_info:
+ otx2_cpt_info_destroy(pdev, info);
+ return NULL;
+}
+
+/* SG list header size in bytes */
+#define SG_LIST_HDR_SIZE 8
+static inline struct otx2_cpt_inst_info *
+otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ gfp_t gfp)
+{
+ int align = OTX2_CPT_DMA_MINALIGN;
+ struct otx2_cpt_inst_info *info;
+ u32 dlen, align_dlen, info_len;
+ u16 g_sz_bytes, s_sz_bytes;
+ u32 total_mem_len;
+
+ if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
+ req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
+ dev_err(&pdev->dev, "Error too many sg components\n");
+ return NULL;
+ }
+
+ g_sz_bytes = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_sz_bytes = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+ align_dlen = ALIGN(dlen, align);
+ info_len = ALIGN(sizeof(*info), align);
+ total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info))
+ return NULL;
+
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+
+ ((u16 *)info->in_buffer)[0] = req->out_cnt;
+ ((u16 *)info->in_buffer)[1] = req->in_cnt;
+ ((u16 *)info->in_buffer)[2] = 0;
+ ((u16 *)info->in_buffer)[3] = 0;
+ cpu_to_be64s((u64 *)info->in_buffer);
+
+ /* Setup gather (input) components */
+ if (setup_sgio_components(pdev, req->in, req->in_cnt,
+ &info->in_buffer[8])) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ goto destroy_info;
+ }
+
+ if (setup_sgio_components(pdev, req->out, req->out_cnt,
+ &info->in_buffer[8 + g_sz_bytes])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ goto destroy_info;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ goto destroy_info;
+ }
+ /*
+ * Get buffer for union otx2_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = info->in_buffer + align_dlen;
+ info->comp_baddr = info->dptr_baddr + align_dlen;
+
+ return info;
+
+destroy_info:
+ otx2_cpt_info_destroy(pdev, info);
+ return NULL;
+}
+
struct otx2_cptlf_wqe;
int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
int cpu_num);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index e4bd3f030..b52728e3c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -106,6 +106,32 @@ static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
return ret;
}
+static int cptlf_set_ctx_ilen(struct otx2_cptlfs_info *lfs, int ctx_ilen)
+{
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ struct otx2_cptlf_info *lf;
+ int slot, ret = 0;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lf = &lfs->lf[slot];
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u, lfs->blkaddr);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.ctx_ilen = ctx_ilen;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u, lfs->blkaddr);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
{
/* Disable instruction queues */
@@ -151,26 +177,14 @@ static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
irq_misc.u);
}
-static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
+static void cptlf_set_done_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
{
+ u64 reg = enable ? OTX2_CPT_LF_DONE_INT_ENA_W1S :
+ OTX2_CPT_LF_DONE_INT_ENA_W1C;
int slot;
- /* Enable done interrupts */
for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
- OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
- /* Enable Misc interrupts */
- cptlf_set_misc_intrs(lfs, true);
-}
-
-static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
-{
- int slot;
-
- for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
- OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
- cptlf_set_misc_intrs(lfs, false);
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 0x1);
}
static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
@@ -257,24 +271,44 @@ static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
+void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs)
{
- int i, offs, vector;
+ int i, irq_offs, vector;
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
for (i = 0; i < lfs->lfs_num; i++) {
- for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
- if (!lfs->lf[i].is_irq_reg[offs])
- continue;
+ if (!lfs->lf[i].is_irq_reg[irq_offs])
+ continue;
- vector = pci_irq_vector(lfs->pdev,
- lfs->lf[i].msix_offset + offs);
- free_irq(vector, &lfs->lf[i]);
- lfs->lf[i].is_irq_reg[offs] = false;
- }
+ vector = pci_irq_vector(lfs->pdev,
+ lfs->lf[i].msix_offset + irq_offs);
+ free_irq(vector, &lfs->lf[i]);
+ lfs->lf[i].is_irq_reg[irq_offs] = false;
}
- cptlf_disable_intrs(lfs);
+
+ cptlf_set_misc_intrs(lfs, false);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts,
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts,
+ CRYPTO_DEV_OCTEONTX2_CPT);
+
+void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int i, irq_offs, vector;
+
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (!lfs->lf[i].is_irq_reg[irq_offs])
+ continue;
+
+ vector = pci_irq_vector(lfs->pdev,
+ lfs->lf[i].msix_offset + irq_offs);
+ free_irq(vector, &lfs->lf[i]);
+ lfs->lf[i].is_irq_reg[irq_offs] = false;
+ }
+
+ cptlf_set_done_intrs(lfs, false);
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts,
CRYPTO_DEV_OCTEONTX2_CPT);
static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
@@ -296,34 +330,53 @@ static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
return ret;
}
-int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
+int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs)
{
+ bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1);
int irq_offs, ret, i;
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
for (i = 0; i < lfs->lfs_num; i++) {
- irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
- snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPT%dLF Misc%d",
+ is_cpt1, i);
ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
cptlf_misc_intr_handler);
if (ret)
goto free_irq;
+ }
+ cptlf_set_misc_intrs(lfs, true);
+ return 0;
- irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
- snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
- i);
+free_irq:
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts,
+ CRYPTO_DEV_OCTEONTX2_CPT);
+
+int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1);
+ int irq_offs, ret, i;
+
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
+ for (i = 0; i < lfs->lfs_num; i++) {
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32,
+ "OTX2_CPT%dLF Done%d", is_cpt1, i);
ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
cptlf_done_intr_handler);
if (ret)
goto free_irq;
}
- cptlf_enable_intrs(lfs);
+ cptlf_set_done_intrs(lfs, true);
return 0;
free_irq:
- otx2_cptlf_unregister_interrupts(lfs);
+ otx2_cptlf_unregister_done_interrupts(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts,
+ CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -416,6 +469,12 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
if (ret)
goto free_iq;
+ if (lfs->ctx_ilen_ovrd) {
+ ret = cptlf_set_ctx_ilen(lfs, lfs->ctx_ilen);
+ if (ret)
+ goto free_iq;
+ }
+
return 0;
free_iq:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 5302fe3d0..bd8604be2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -5,6 +5,7 @@
#define __OTX2_CPTLF_H
#include <linux/soc/marvell/octeontx2/asm.h>
+#include <linux/bitfield.h>
#include <mbox.h>
#include <rvu.h>
#include "otx2_cpt_common.h"
@@ -99,6 +100,9 @@ struct cpt_hw_ops {
struct otx2_cptlf_info *lf);
u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result);
u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result);
+ struct otx2_cpt_inst_info *
+ (*cpt_sg_info_create)(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ gfp_t gfp);
};
struct otx2_cptlfs_info {
@@ -116,6 +120,9 @@ struct otx2_cptlfs_info {
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
+ int global_slot; /* Global slot across the blocks */
+ u8 ctx_ilen;
+ u8 ctx_ilen_ovrd;
};
static inline void otx2_cpt_free_instruction_queues(
@@ -203,48 +210,71 @@ static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs)
otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]);
}
+#define INFLIGHT GENMASK_ULL(8, 0)
+#define GRB_CNT GENMASK_ULL(39, 32)
+#define GWB_CNT GENMASK_ULL(47, 40)
+#define XQ_XOR GENMASK_ULL(63, 63)
+#define DQPTR GENMASK_ULL(19, 0)
+#define NQPTR GENMASK_ULL(51, 32)
+
static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
{
- union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 };
- union otx2_cptx_lf_inprog lf_inprog;
+ void __iomem *reg_base = lf->lfs->reg_base;
+ struct pci_dev *pdev = lf->lfs->pdev;
u8 blkaddr = lf->lfs->blkaddr;
- int timeout = 20;
+ int timeout = 1000000;
+ u64 inprog, inst_ptr;
+ u64 slot = lf->slot;
+ u64 qsize, pending;
+ int i = 0;
/* Disable instructions enqueuing */
- otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
- OTX2_CPT_LF_CTL, lf_ctl.u);
+ otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_CTL, 0x0);
+
+ inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG);
+ inprog |= BIT_ULL(16);
+ otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG, inprog);
- /* Wait for instruction queue to become empty */
+ qsize = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_SIZE) & 0x7FFF;
+ do {
+ inst_ptr = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_INST_PTR);
+ pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
+ FIELD_GET(NQPTR, inst_ptr) - FIELD_GET(DQPTR, inst_ptr);
+ udelay(1);
+ timeout--;
+ } while ((pending != 0) && (timeout != 0));
+
+ if (timeout == 0)
+ dev_warn(&pdev->dev, "TIMEOUT: CPT poll on pending instructions\n");
+
+ timeout = 1000000;
+ /* Wait for CPT queue to become execution-quiescent */
do {
- lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr,
- lf->slot, OTX2_CPT_LF_INPROG);
- if (!lf_inprog.s.inflight)
- break;
-
- usleep_range(10000, 20000);
- if (timeout-- < 0) {
- dev_err(&lf->lfs->pdev->dev,
- "Error LF %d is still busy.\n", lf->slot);
- break;
+ inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG);
+
+ if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
+ (FIELD_GET(GRB_CNT, inprog) == 0)) {
+ i++;
+ } else {
+ i = 0;
+ timeout--;
}
+ } while ((timeout != 0) && (i < 10));
- } while (1);
-
- /*
- * Disable executions in the LF's queue,
- * the queue should be empty at this point
- */
- lf_inprog.s.eena = 0x0;
- otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
- OTX2_CPT_LF_INPROG, lf_inprog.u);
+ if (timeout == 0)
+ dev_warn(&pdev->dev, "TIMEOUT: CPT poll on inflight count\n");
+ /* Wait for 2 us to flush all queue writes to memory */
+ udelay(2);
}
static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)
{
int slot;
- for (slot = 0; slot < lfs->lfs_num; slot++)
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]);
+ otx2_cpt_lf_reset_msg(lfs, lfs->global_slot + slot);
+ }
}
static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,
@@ -282,6 +312,19 @@ static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,
OTX2_CPT_LF_INPROG, lf_inprog.u);
}
+static inline void otx2_cptlf_set_ctx_flr_flush(struct otx2_cptlf_info *lf)
+{
+ u8 blkaddr = lf->lfs->blkaddr;
+ u64 val;
+
+ val = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_CTX_CTL);
+ val |= BIT_ULL(0);
+
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_CTX_CTL, val);
+}
+
static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf)
{
otx2_cptlf_set_iqueue_exec(lf, true);
@@ -297,6 +340,10 @@ static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs)
int slot;
for (slot = 0; slot < lfs->lfs_num; slot++) {
+ /* Enable flush on FLR for Errata */
+ if (is_dev_cn10kb(lfs->pdev))
+ otx2_cptlf_set_ctx_flr_flush(&lfs->lf[slot]);
+
otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]);
otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]);
}
@@ -382,8 +429,10 @@ static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs,
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
int lfs_num);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
-int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs);
-void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs);
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs);
int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index a209ec5af..e5859a1e1 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -71,4 +71,8 @@ void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work);
irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg);
void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
+int otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs);
+void otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs);
+
#endif /* __OTX2_CPTPF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index e34223daa..400e36d99 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -14,6 +14,8 @@
#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
#define CPT_UC_RID_CN9K_B0 1
+#define CPT_UC_RID_CN10K_A 4
+#define CPT_UC_RID_CN10K_B 5
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
@@ -587,43 +589,22 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
return 0;
}
-static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
+static void cptpf_get_rid(struct pci_dev *pdev, struct otx2_cptpf_dev *cptpf)
{
- int timeout = 10, ret;
- u64 reg = 0;
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ u64 reg_val = 0x0;
- ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, 0x1, blkaddr);
- if (ret)
- return ret;
-
- do {
- ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, &reg, blkaddr);
- if (ret)
- return ret;
-
- if (!((reg >> 63) & 0x1))
- break;
-
- usleep_range(10000, 20000);
- if (timeout-- < 0)
- return -EBUSY;
- } while (1);
-
- return ret;
-}
-
-static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
-{
- int ret = 0;
-
- if (cptpf->has_cpt1) {
- ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
- if (ret)
- return ret;
+ if (is_dev_otx2(pdev)) {
+ eng_grps->rid = pdev->revision;
+ return;
}
- return cptx_device_reset(cptpf, BLKADDR_CPT0);
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
+ BLKADDR_CPT0);
+ if ((cpt_feature_sgv2(pdev) && (reg_val & BIT_ULL(18))) ||
+ is_dev_cn10ka_ax(pdev))
+ eng_grps->rid = CPT_UC_RID_CN10K_A;
+ else if (cpt_feature_sgv2(pdev))
+ eng_grps->rid = CPT_UC_RID_CN10K_B;
}
static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
@@ -643,10 +624,6 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
cptpf_check_block_implemented(cptpf);
- /* Reset the CPT PF device */
- ret = cptpf_device_reset(cptpf);
- if (ret)
- return ret;
/* Get number of SE, IE and AE engines */
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
@@ -701,6 +678,7 @@ static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
if (ret)
goto destroy_flr;
+ cptpf_get_rid(pdev, cptpf);
/* Get CPT HW capabilities using LOAD_FVC operation. */
ret = otx2_cpt_discover_eng_capabilities(cptpf);
if (ret)
@@ -744,7 +722,7 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
{
struct device *dev = &pdev->dev;
struct otx2_cptpf_dev *cptpf;
- int err;
+ int err, num_vec;
cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
if (!cptpf)
@@ -779,8 +757,13 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
if (err)
goto clear_drvdata;
- err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
- RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ num_vec = pci_msix_vec_count(cptpf->pdev);
+ if (num_vec <= 0) {
+ err = -EINVAL;
+ goto clear_drvdata;
+ }
+
+ err = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "Request for %d msix vectors failed\n",
RVU_PF_INT_VEC_CNT);
@@ -797,6 +780,7 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
goto destroy_afpf_mbox;
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
+ cptpf->kvf_limits = 1;
err = cn10k_cptpf_lmtst_init(cptpf);
if (err)
@@ -844,6 +828,14 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
cptpf_sriov_disable(pdev);
otx2_cpt_unregister_dl(cptpf);
+
+ /* Cleanup Inline CPT LF's if attached */
+ if (cptpf->lfs.lfs_num)
+ otx2_inline_cptlf_cleanup(&cptpf->lfs);
+
+ if (cptpf->cpt1_lfs.lfs_num)
+ otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
+
/* Delete sysfs entry created for kernel VF limits */
sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
/* Cleanup engine groups */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 480b3720f..ec1ac7e83 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -78,7 +78,7 @@ static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = req->pcifunc;
rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
- rsp->cpt_revision = cptpf->pdev->revision;
+ rsp->cpt_revision = cptpf->eng_grps.rid;
memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
return 0;
@@ -171,6 +171,8 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
nix_req->enable = 1;
+ nix_req->credit_th = req->credit_th;
+ nix_req->bpid = req->bpid;
if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
else
@@ -197,12 +199,53 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
}
+int
+otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs)
+{
+ int ret;
+
+ ret = otx2_cptlf_init(lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret) {
+ dev_err(&cptpf->pdev->dev,
+ "LF configuration failed for RX inline ipsec.\n");
+ return ret;
+ }
+
+ /* Get msix offsets for attached LFs */
+ ret = otx2_cpt_msix_offset_msg(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Register for CPT LF Misc interrupts */
+ ret = otx2_cptlf_register_misc_interrupts(lfs);
+ if (ret)
+ goto free_irq;
+
+ return 0;
+free_irq:
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+cleanup_lf:
+ otx2_cptlf_shutdown(lfs);
+ return ret;
+}
+
+void
+otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ /* Unregister misc interrupt */
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+
+ /* Cleanup LFs */
+ otx2_cptlf_shutdown(lfs);
+}
+
static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *req)
{
struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
+ int num_lfs = 1, ret;
u8 egrp;
- int ret;
cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
if (cptpf->lfs.lfs_num) {
@@ -223,11 +266,13 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
&cptpf->afpf_mbox, BLKADDR_CPT0);
- ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO,
- 1);
+ cptpf->lfs.global_slot = 0;
+ cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
+ cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
+
+ ret = otx2_inline_cptlf_setup(cptpf, &cptpf->lfs, egrp, num_lfs);
if (ret) {
- dev_err(&cptpf->pdev->dev,
- "LF configuration failed for RX inline ipsec.\n");
+ dev_err(&cptpf->pdev->dev, "Inline-Ipsec CPT0 LF setup failed.\n");
return ret;
}
@@ -236,11 +281,13 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
cptpf->reg_base, &cptpf->afpf_mbox,
BLKADDR_CPT1);
- ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp,
- OTX2_CPT_QUEUE_HI_PRIO, 1);
+ cptpf->cpt1_lfs.global_slot = num_lfs;
+ cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
+ cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
+ ret = otx2_inline_cptlf_setup(cptpf, &cptpf->cpt1_lfs, egrp,
+ num_lfs);
if (ret) {
- dev_err(&cptpf->pdev->dev,
- "LF configuration failed for RX inline ipsec.\n");
+ dev_err(&cptpf->pdev->dev, "Inline CPT1 LF setup failed.\n");
goto lf_cleanup;
}
cptpf->rsrc_req_blkaddr = 0;
@@ -253,9 +300,9 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
return 0;
lf1_cleanup:
- otx2_cptlf_shutdown(&cptpf->cpt1_lfs);
+ otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
lf_cleanup:
- otx2_cptlf_shutdown(&cptpf->lfs);
+ otx2_inline_cptlf_cleanup(&cptpf->lfs);
return ret;
}
@@ -410,6 +457,8 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
struct otx2_cptlfs_info *lfs = &cptpf->lfs;
struct device *dev = &cptpf->pdev->dev;
struct cpt_rd_wr_reg_msg *rsp_rd_wr;
+ struct msix_offset_rsp *rsp_msix;
+ int i;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
@@ -428,6 +477,14 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
RVU_PFVF_PF_MASK;
break;
+ case MBOX_MSG_MSIX_OFFSET:
+ rsp_msix = (struct msix_offset_rsp *) msg;
+ for (i = 0; i < rsp_msix->cptlfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
+
+ for (i = 0; i < rsp_msix->cpt1_lfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cpt1_lf_msixoff[i];
+ break;
case MBOX_MSG_CPT_RD_WR_REGISTER:
rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
if (msg->rc) {
@@ -449,6 +506,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
break;
case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
+ case MBOX_MSG_CPT_LF_RESET:
break;
default:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1958b797a..5c9484646 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -16,7 +16,11 @@
#define LOADFVC_MAJOR_OP 0x01
#define LOADFVC_MINOR_OP 0x08
-#define CTX_FLUSH_TIMER_CNT 0xFFFFFF
+/*
+ * Interval to flush dirty data for next CTX entry. The interval is measured
+ * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
+ */
+#define CTX_FLUSH_TIMER_CNT 0x2FAF0
struct fw_info_t {
struct list_head ucodes;
@@ -117,12 +121,10 @@ static char *get_ucode_type_str(int ucode_type)
static int get_ucode_type(struct device *dev,
struct otx2_cpt_ucode_hdr *ucode_hdr,
- int *ucode_type)
+ int *ucode_type, u16 rid)
{
- struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
- struct pci_dev *pdev = cptpf->pdev;
int i, val = 0;
u8 nn;
@@ -130,7 +132,7 @@ static int get_ucode_type(struct device *dev,
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
- sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
+ sprintf(ver_str_prefix, "ocpt-%02d", rid);
if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
return -EINVAL;
@@ -359,7 +361,7 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
}
static int load_fw(struct device *dev, struct fw_info_t *fw_info,
- char *filename)
+ char *filename, u16 rid)
{
struct otx2_cpt_ucode_hdr *ucode_hdr;
struct otx2_cpt_uc_info_t *uc_info;
@@ -375,7 +377,7 @@ static int load_fw(struct device *dev, struct fw_info_t *fw_info,
goto free_uc_info;
ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
- ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
+ ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);
if (ret)
goto release_fw;
@@ -389,6 +391,7 @@ static int load_fw(struct device *dev, struct fw_info_t *fw_info,
set_ucode_filename(&uc_info->ucode, filename);
memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
OTX2_CPT_UCODE_VER_STR_SZ);
+ uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;
uc_info->ucode.ver_num = ucode_hdr->ver_num;
uc_info->ucode.type = ucode_type;
uc_info->ucode.size = ucode_size;
@@ -448,7 +451,8 @@ static void print_uc_info(struct fw_info_t *fw_info)
}
}
-static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
+static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
+ u16 rid)
{
char filename[OTX2_CPT_NAME_LENGTH];
char eng_type[8] = {0};
@@ -462,9 +466,9 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
eng_type[i] = tolower(eng_type[i]);
snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
- pdev->revision, eng_type);
+ rid, eng_type);
/* Request firmware for each engine type */
- ret = load_fw(&pdev->dev, fw_info, filename);
+ ret = load_fw(&pdev->dev, fw_info, filename, rid);
if (ret)
goto release_fw;
}
@@ -1155,7 +1159,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
if (eng_grps->is_grps_created)
goto unlock;
- ret = cpt_ucode_load_fw(pdev, &fw_info);
+ ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
if (ret)
goto unlock;
@@ -1230,14 +1234,16 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
*/
rnm_to_cpt_errata_fixup(&pdev->dev);
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
+ BLKADDR_CPT0);
/*
* Configure engine group mask to allow context prefetching
* for the groups and enable random number request, to enable
* CPT to request random numbers from RNM.
*/
+ reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
- OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
- BLKADDR_CPT0);
+ reg_val, BLKADDR_CPT0);
/*
* Set interval to periodically flush dirty data for the next
* CTX cache entry. Set the interval count to maximum supported
@@ -1252,10 +1258,12 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
* encounters a fault/poison, a rare case may result in
* unpredictable data being delivered to a CPT engine.
*/
- otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, &reg_val,
- BLKADDR_CPT0);
- otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
- reg_val | BIT_ULL(24), BLKADDR_CPT0);
+ if (cpt_is_errata_38550_exists(pdev)) {
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
+ &reg_val, BLKADDR_CPT0);
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
+ reg_val | BIT_ULL(24), BLKADDR_CPT0);
+ }
mutex_unlock(&eng_grps->lock);
return 0;
@@ -1412,7 +1420,7 @@ static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
int ret;
mutex_lock(&eng_grps->lock);
- ret = cpt_ucode_load_fw(pdev, &fw_info);
+ ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
if (ret) {
mutex_unlock(&eng_grps->lock);
return ret;
@@ -1686,13 +1694,14 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
goto err_unlock;
}
INIT_LIST_HEAD(&fw_info.ucodes);
- ret = load_fw(dev, &fw_info, ucode_filename[0]);
+
+ ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);
if (ret) {
dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
goto err_unlock;
}
if (ucode_idx > 1) {
- ret = load_fw(dev, &fw_info, ucode_filename[1]);
+ ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);
if (ret) {
dev_err(dev, "Unable to load firmware %s\n",
ucode_filename[1]);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index e69320a54..365fe8943 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -73,7 +73,7 @@ struct otx2_cpt_ucode_hdr {
};
struct otx2_cpt_ucode {
- u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];/*
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ + 1];/*
* ucode version in readable
* format
*/
@@ -150,6 +150,7 @@ struct otx2_cpt_eng_grps {
int engs_num; /* total number of engines supported */
u8 eng_ref_cnt[OTX2_CPT_MAX_ENGINES];/* engines reference count */
bool is_grps_created; /* Is the engine groups are already created */
+ u16 rid;
};
struct otx2_cptpf_dev;
int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
index 994291e90..11ab9af1d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -22,6 +22,7 @@ struct otx2_cptvf_dev {
int blkaddr;
void *bbuf_base;
unsigned long cap_flag;
+ u64 eng_caps[OTX2_CPT_MAX_ENG_TYPES];
};
irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
@@ -29,5 +30,6 @@ void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev);
+int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf);
#endif /* __OTX2_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index e27ddd3c4..1604fc58d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -17,6 +17,7 @@
#include "otx2_cptvf.h"
#include "otx2_cptvf_algs.h"
#include "otx2_cpt_reqmgr.h"
+#include "cn10k_cpt.h"
/* Size of salt in AES GCM mode */
#define AES_GCM_SALT_SIZE 4
@@ -384,6 +385,9 @@ static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
req_info->is_trunc_hmac = false;
req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->req.cptr = ctx->er_ctx.hw_ctx;
+ req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
+
/*
* We perform an asynchronous send and once
* the request is completed the driver would
@@ -530,6 +534,8 @@ static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct crypto_alg *alg = tfm->__crt_alg;
+ struct pci_dev *pdev;
+ int ret, cpu_num;
memset(ctx, 0, sizeof(*ctx));
/*
@@ -541,6 +547,15 @@ static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
stfm, sizeof(struct otx2_cpt_req_ctx) +
sizeof(struct skcipher_request));
+ ret = get_se_device(&pdev, &cpu_num);
+ if (ret)
+ return ret;
+
+ ctx->pdev = pdev;
+ ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
+ if (ret)
+ return ret;
+
return cpt_skcipher_fallback_init(ctx, alg);
}
@@ -552,6 +567,7 @@ static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
crypto_free_skcipher(ctx->fbk_cipher);
ctx->fbk_cipher = NULL;
}
+ cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
}
static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
@@ -576,6 +592,8 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
struct crypto_alg *alg = tfm->__crt_alg;
+ struct pci_dev *pdev;
+ int ret, cpu_num;
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
@@ -632,6 +650,15 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
}
crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
+ ret = get_se_device(&pdev, &cpu_num);
+ if (ret)
+ return ret;
+
+ ctx->pdev = pdev;
+ ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
+ if (ret)
+ return ret;
+
return cpt_aead_fallback_init(ctx, alg);
}
@@ -694,6 +721,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
crypto_free_aead(ctx->fbk_cipher);
ctx->fbk_cipher = NULL;
}
+ cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
}
static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
@@ -1299,6 +1327,9 @@ static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
+ req_info->req.cptr = ctx->er_ctx.hw_ctx;
+ req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
+
switch (reg_type) {
case OTX2_CPT_AEAD_ENC_DEC_REQ:
status = create_aead_input_list(req, enc);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
index f04184bd1..d29f84f01 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
@@ -9,6 +9,7 @@
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#include "otx2_cpt_common.h"
+#include "cn10k_cpt.h"
#define OTX2_CPT_MAX_ENC_KEY_SIZE 32
#define OTX2_CPT_MAX_HASH_KEY_SIZE 64
@@ -123,6 +124,8 @@ struct otx2_cpt_enc_ctx {
u8 key_type;
u8 enc_align_len;
struct crypto_skcipher *fbk_cipher;
+ struct pci_dev *pdev;
+ struct cn10k_cpt_errata_ctx er_ctx;
};
union otx2_cpt_offset_ctrl {
@@ -161,6 +164,8 @@ struct otx2_cpt_aead_ctx {
struct crypto_shash *hashalg;
struct otx2_cpt_sdesc *sdesc;
struct crypto_aead *fbk_cipher;
+ struct cn10k_cpt_errata_ctx er_ctx;
+ struct pci_dev *pdev;
u8 *ipad;
u8 *opad;
u32 enc_key_len;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 215a1b17b..527d34cc2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -246,7 +246,8 @@ static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
/* Unregister crypto algorithms */
otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
/* Unregister LFs interrupts */
- otx2_cptlf_unregister_interrupts(lfs);
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+ otx2_cptlf_unregister_done_interrupts(lfs);
/* Cleanup LFs software side */
lf_sw_cleanup(lfs);
/* Free instruction queues */
@@ -280,8 +281,7 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
if (ret)
return ret;
- lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
- num_online_cpus();
+ lfs_num = cptvf->lfs.kvf_limits;
otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
&cptvf->pfvf_mbox, cptvf->blkaddr);
@@ -301,7 +301,11 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
goto cleanup_lf;
/* Register LFs interrupts */
- ret = otx2_cptlf_register_interrupts(lfs);
+ ret = otx2_cptlf_register_misc_interrupts(lfs);
+ if (ret)
+ goto cleanup_lf_sw;
+
+ ret = otx2_cptlf_register_done_interrupts(lfs);
if (ret)
goto cleanup_lf_sw;
@@ -322,7 +326,8 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
disable_irqs:
otx2_cptlf_free_irqs_affinity(lfs);
unregister_intr:
- otx2_cptlf_unregister_interrupts(lfs);
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+ otx2_cptlf_unregister_done_interrupts(lfs);
cleanup_lf_sw:
lf_sw_cleanup(lfs);
cleanup_lf:
@@ -383,6 +388,17 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
goto destroy_pfvf_mbox;
cptvf->blkaddr = BLKADDR_CPT0;
+
+ cptvf_hw_ops_get(cptvf);
+
+ ret = otx2_cptvf_send_caps_msg(cptvf);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
+ goto unregister_interrupts;
+ }
+ if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
+ cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
+
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 75c403f2b..d9fa5f6e2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -72,6 +72,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
struct otx2_cptlfs_info *lfs = &cptvf->lfs;
struct otx2_cpt_kvf_limits_rsp *rsp_limits;
struct otx2_cpt_egrp_num_rsp *rsp_grp;
+ struct otx2_cpt_caps_rsp *eng_caps;
struct cpt_rd_wr_reg_msg *rsp_reg;
struct msix_offset_rsp *rsp_msix;
int i;
@@ -127,6 +128,13 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
cptvf->lfs.kvf_limits = rsp_limits->kvf_limits;
break;
+ case MBOX_MSG_GET_CAPS:
+ eng_caps = (struct otx2_cpt_caps_rsp *)msg;
+ memcpy(cptvf->eng_caps, eng_caps->eng_caps,
+ sizeof(cptvf->eng_caps));
+ break;
+ case MBOX_MSG_CPT_LF_RESET:
+ break;
default:
dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
msg->id);
@@ -205,3 +213,23 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+
+int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct mbox_msghdr *req;
+
+ req = (struct mbox_msghdr *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_caps_rsp));
+ if (!req) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_GET_CAPS;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 811ded72c..5387c68f3 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -4,9 +4,6 @@
#include "otx2_cptvf.h"
#include "otx2_cpt_common.h"
-/* SG list header size in bytes */
-#define SG_LIST_HDR_SIZE 8
-
/* Default timeout when waiting for free pending entry in us */
#define CPT_PENTRY_TIMEOUT 1000
#define CPT_PENTRY_STEP 50
@@ -26,9 +23,9 @@ static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
pr_debug("Gather list size %d\n", req->in_cnt);
for (i = 0; i < req->in_cnt; i++) {
- pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%llx\n", i,
req->in[i].size, req->in[i].vptr,
- (void *) req->in[i].dma_addr);
+ req->in[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n",
req->in[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
@@ -36,9 +33,9 @@ static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
}
pr_debug("Scatter list size %d\n", req->out_cnt);
for (i = 0; i < req->out_cnt; i++) {
- pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%llx\n", i,
req->out[i].size, req->out[i].vptr,
- (void *) req->out[i].dma_addr);
+ req->out[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->out[i].vptr, req->out[i].size, false);
@@ -84,149 +81,6 @@ static inline void free_pentry(struct otx2_cpt_pending_entry *pentry)
pentry->busy = false;
}
-static inline int setup_sgio_components(struct pci_dev *pdev,
- struct otx2_cpt_buf_ptr *list,
- int buf_count, u8 *buffer)
-{
- struct otx2_cpt_sglist_component *sg_ptr = NULL;
- int ret = 0, i, j;
- int components;
-
- if (unlikely(!list)) {
- dev_err(&pdev->dev, "Input list pointer is NULL\n");
- return -EFAULT;
- }
-
- for (i = 0; i < buf_count; i++) {
- if (unlikely(!list[i].vptr))
- continue;
- list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
- list[i].size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
- dev_err(&pdev->dev, "Dma mapping failed\n");
- ret = -EIO;
- goto sg_cleanup;
- }
- }
- components = buf_count / 4;
- sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
- for (i = 0; i < components; i++) {
- sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
- sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
- sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
- sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size);
- sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
- sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
- sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
- sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
- sg_ptr++;
- }
- components = buf_count % 4;
-
- switch (components) {
- case 3:
- sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
- sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
- fallthrough;
- case 2:
- sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
- sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
- fallthrough;
- case 1:
- sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
- sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
- break;
- default:
- break;
- }
- return ret;
-
-sg_cleanup:
- for (j = 0; j < i; j++) {
- if (list[j].dma_addr) {
- dma_unmap_single(&pdev->dev, list[j].dma_addr,
- list[j].size, DMA_BIDIRECTIONAL);
- }
-
- list[j].dma_addr = 0;
- }
- return ret;
-}
-
-static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev,
- struct otx2_cpt_req_info *req,
- gfp_t gfp)
-{
- int align = OTX2_CPT_DMA_MINALIGN;
- struct otx2_cpt_inst_info *info;
- u32 dlen, align_dlen, info_len;
- u16 g_sz_bytes, s_sz_bytes;
- u32 total_mem_len;
-
- if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
- req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
- dev_err(&pdev->dev, "Error too many sg components\n");
- return NULL;
- }
-
- g_sz_bytes = ((req->in_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
-
- dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- align_dlen = ALIGN(dlen, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
-
- info = kzalloc(total_mem_len, gfp);
- if (unlikely(!info))
- return NULL;
-
- info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
-
- ((u16 *)info->in_buffer)[0] = req->out_cnt;
- ((u16 *)info->in_buffer)[1] = req->in_cnt;
- ((u16 *)info->in_buffer)[2] = 0;
- ((u16 *)info->in_buffer)[3] = 0;
- cpu_to_be64s((u64 *)info->in_buffer);
-
- /* Setup gather (input) components */
- if (setup_sgio_components(pdev, req->in, req->in_cnt,
- &info->in_buffer[8])) {
- dev_err(&pdev->dev, "Failed to setup gather list\n");
- goto destroy_info;
- }
-
- if (setup_sgio_components(pdev, req->out, req->out_cnt,
- &info->in_buffer[8 + g_sz_bytes])) {
- dev_err(&pdev->dev, "Failed to setup scatter list\n");
- goto destroy_info;
- }
-
- info->dma_len = total_mem_len - info_len;
- info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
- info->dma_len, DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
- dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
- goto destroy_info;
- }
- /*
- * Get buffer for union otx2_cpt_res_s response
- * structure and its physical address
- */
- info->completion_addr = info->in_buffer + align_dlen;
- info->comp_baddr = info->dptr_baddr + align_dlen;
-
- return info;
-
-destroy_info:
- otx2_cpt_info_destroy(pdev, info);
- return NULL;
-}
-
static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
struct otx2_cpt_pending_queue *pqueue,
struct otx2_cptlf_info *lf)
@@ -247,7 +101,7 @@ static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
if (unlikely(!otx2_cptlf_started(lf->lfs)))
return -ENODEV;
- info = info_create(pdev, req, gfp);
+ info = lf->lfs->ops->cpt_sg_info_create(pdev, req, gfp);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Setting up cpt inst info failed");
return -ENOMEM;
@@ -303,9 +157,9 @@ static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
/* 64-bit swap for microcode data reads, not needed for addresses*/
cpu_to_be64s(&iq_cmd.cmd.u);
- iq_cmd.dptr = info->dptr_baddr;
- iq_cmd.rptr = 0;
- iq_cmd.cptr.u = 0;
+ iq_cmd.dptr = info->dptr_baddr | info->gthr_sz << 60;
+ iq_cmd.rptr = info->rptr_baddr | info->sctr_sz << 60;
+ iq_cmd.cptr.s.cptr = cpt_req->cptr_dma;
iq_cmd.cptr.s.grp = ctrl->s.grp;
/* Fill in the CPT_INST_S type command for HW interpretation */
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index caea98622..7a3083deb 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1121,19 +1121,6 @@ static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
.decrypt = n2_decrypt_chaining,
},
},
- { .name = "cfb(des)",
- .drv_name = "cfb-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_CFB),
- .skcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
/* 3DES: ECB CBC and CFB are supported */
{ .name = "ecb(des3_ede)",
@@ -1163,19 +1150,7 @@ static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
.decrypt = n2_decrypt_chaining,
},
},
- { .name = "cfb(des3_ede)",
- .drv_name = "cfb-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_CFB),
- .skcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
+
/* AES: ECB CBC and CTR are supported */
{ .name = "ecb(aes)",
.drv_name = "ecb-aes",
@@ -1382,8 +1357,12 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
ahash->setkey = n2_hmac_async_setkey;
base = &ahash->halg.base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
+ if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
+ p->child_alg) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_p;
+ if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
+ p->child_alg) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_p;
base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
base->cra_init = n2_hmac_cra_init;
@@ -1394,6 +1373,7 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
if (err) {
pr_err("%s alg registration failed\n", base->cra_name);
list_del(&p->derived.entry);
+out_free_p:
kfree(p);
} else {
pr_info("%s alg registered\n", base->cra_name);
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index da95747d9..9393e1067 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -445,8 +445,8 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
return PTR_ERR(ctx->fallback_tfm);
}
- tfm->reqsize = sizeof(struct rk_cipher_rctx) +
- crypto_skcipher_reqsize(ctx->fallback_tfm);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct rk_cipher_rctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm));
return 0;
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index fabe4f381..3423b5cde 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -15,6 +15,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/engine.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -24,105 +25,101 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#define SHA_BUFFER_LEN PAGE_SIZE
-#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
-
-#define SAHARA_NAME "sahara"
-#define SAHARA_VERSION_3 3
-#define SAHARA_VERSION_4 4
-#define SAHARA_TIMEOUT_MS 1000
-#define SAHARA_MAX_HW_DESC 2
-#define SAHARA_MAX_HW_LINK 20
-
-#define FLAGS_MODE_MASK 0x000f
-#define FLAGS_ENCRYPT BIT(0)
-#define FLAGS_CBC BIT(1)
-
-#define SAHARA_HDR_BASE 0x00800000
-#define SAHARA_HDR_SKHA_ALG_AES 0
-#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
-#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
-#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
-#define SAHARA_HDR_FORM_DATA (5 << 16)
-#define SAHARA_HDR_FORM_KEY (8 << 16)
-#define SAHARA_HDR_LLO (1 << 24)
-#define SAHARA_HDR_CHA_SKHA (1 << 28)
-#define SAHARA_HDR_CHA_MDHA (2 << 28)
-#define SAHARA_HDR_PARITY_BIT (1 << 31)
-
-#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
-#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
-#define SAHARA_HDR_MDHA_HASH 0xA0850000
-#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
-#define SAHARA_HDR_MDHA_ALG_SHA1 0
-#define SAHARA_HDR_MDHA_ALG_MD5 1
-#define SAHARA_HDR_MDHA_ALG_SHA256 2
-#define SAHARA_HDR_MDHA_ALG_SHA224 3
-#define SAHARA_HDR_MDHA_PDATA (1 << 2)
-#define SAHARA_HDR_MDHA_HMAC (1 << 3)
-#define SAHARA_HDR_MDHA_INIT (1 << 5)
-#define SAHARA_HDR_MDHA_IPAD (1 << 6)
-#define SAHARA_HDR_MDHA_OPAD (1 << 7)
-#define SAHARA_HDR_MDHA_SWAP (1 << 8)
-#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
-#define SAHARA_HDR_MDHA_SSL (1 << 10)
-
-/* SAHARA can only process one request at a time */
-#define SAHARA_QUEUE_LENGTH 1
-
-#define SAHARA_REG_VERSION 0x00
-#define SAHARA_REG_DAR 0x04
-#define SAHARA_REG_CONTROL 0x08
-#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
-#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
-#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
-#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
-#define SAHARA_REG_CMD 0x0C
-#define SAHARA_CMD_RESET (1 << 0)
-#define SAHARA_CMD_CLEAR_INT (1 << 8)
-#define SAHARA_CMD_CLEAR_ERR (1 << 9)
-#define SAHARA_CMD_SINGLE_STEP (1 << 10)
-#define SAHARA_CMD_MODE_BATCH (1 << 16)
-#define SAHARA_CMD_MODE_DEBUG (1 << 18)
-#define SAHARA_REG_STATUS 0x10
-#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
-#define SAHARA_STATE_IDLE 0
-#define SAHARA_STATE_BUSY 1
-#define SAHARA_STATE_ERR 2
-#define SAHARA_STATE_FAULT 3
-#define SAHARA_STATE_COMPLETE 4
-#define SAHARA_STATE_COMP_FLAG (1 << 2)
-#define SAHARA_STATUS_DAR_FULL (1 << 3)
-#define SAHARA_STATUS_ERROR (1 << 4)
-#define SAHARA_STATUS_SECURE (1 << 5)
-#define SAHARA_STATUS_FAIL (1 << 6)
-#define SAHARA_STATUS_INIT (1 << 7)
-#define SAHARA_STATUS_RNG_RESEED (1 << 8)
-#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
-#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
-#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
-#define SAHARA_STATUS_MODE_BATCH (1 << 16)
-#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
-#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
-#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
-#define SAHARA_REG_ERRSTATUS 0x14
-#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
-#define SAHARA_ERRSOURCE_CHA 14
-#define SAHARA_ERRSOURCE_DMA 15
-#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
-#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
-#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
-#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
-#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
-#define SAHARA_REG_FADDR 0x18
-#define SAHARA_REG_CDAR 0x1C
-#define SAHARA_REG_IDAR 0x20
+#define SHA_BUFFER_LEN PAGE_SIZE
+#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+#define SAHARA_NAME "sahara"
+#define SAHARA_VERSION_3 3
+#define SAHARA_VERSION_4 4
+#define SAHARA_TIMEOUT_MS 1000
+#define SAHARA_MAX_HW_DESC 2
+#define SAHARA_MAX_HW_LINK 20
+
+#define FLAGS_MODE_MASK 0x000f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+
+#define SAHARA_HDR_BASE 0x00800000
+#define SAHARA_HDR_SKHA_ALG_AES 0
+#define SAHARA_HDR_SKHA_MODE_ECB 0
+#define SAHARA_HDR_SKHA_OP_ENC BIT(2)
+#define SAHARA_HDR_SKHA_MODE_CBC BIT(3)
+#define SAHARA_HDR_FORM_DATA (5 << 16)
+#define SAHARA_HDR_FORM_KEY BIT(19)
+#define SAHARA_HDR_LLO BIT(24)
+#define SAHARA_HDR_CHA_SKHA BIT(28)
+#define SAHARA_HDR_CHA_MDHA BIT(29)
+#define SAHARA_HDR_PARITY_BIT BIT(31)
+
+#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
+#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
+#define SAHARA_HDR_MDHA_HASH 0xA0850000
+#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
+#define SAHARA_HDR_MDHA_ALG_SHA1 0
+#define SAHARA_HDR_MDHA_ALG_MD5 1
+#define SAHARA_HDR_MDHA_ALG_SHA256 2
+#define SAHARA_HDR_MDHA_ALG_SHA224 3
+#define SAHARA_HDR_MDHA_PDATA BIT(2)
+#define SAHARA_HDR_MDHA_HMAC BIT(3)
+#define SAHARA_HDR_MDHA_INIT BIT(5)
+#define SAHARA_HDR_MDHA_IPAD BIT(6)
+#define SAHARA_HDR_MDHA_OPAD BIT(7)
+#define SAHARA_HDR_MDHA_SWAP BIT(8)
+#define SAHARA_HDR_MDHA_MAC_FULL BIT(9)
+#define SAHARA_HDR_MDHA_SSL BIT(10)
+
+#define SAHARA_REG_VERSION 0x00
+#define SAHARA_REG_DAR 0x04
+#define SAHARA_REG_CONTROL 0x08
+#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
+#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
+#define SAHARA_CONTROL_RNG_AUTORSD BIT(7)
+#define SAHARA_CONTROL_ENABLE_INT BIT(4)
+#define SAHARA_REG_CMD 0x0C
+#define SAHARA_CMD_RESET BIT(0)
+#define SAHARA_CMD_CLEAR_INT BIT(8)
+#define SAHARA_CMD_CLEAR_ERR BIT(9)
+#define SAHARA_CMD_SINGLE_STEP BIT(10)
+#define SAHARA_CMD_MODE_BATCH BIT(16)
+#define SAHARA_CMD_MODE_DEBUG BIT(18)
+#define SAHARA_REG_STATUS 0x10
+#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
+#define SAHARA_STATE_IDLE 0
+#define SAHARA_STATE_BUSY 1
+#define SAHARA_STATE_ERR 2
+#define SAHARA_STATE_FAULT 3
+#define SAHARA_STATE_COMPLETE 4
+#define SAHARA_STATE_COMP_FLAG BIT(2)
+#define SAHARA_STATUS_DAR_FULL BIT(3)
+#define SAHARA_STATUS_ERROR BIT(4)
+#define SAHARA_STATUS_SECURE BIT(5)
+#define SAHARA_STATUS_FAIL BIT(6)
+#define SAHARA_STATUS_INIT BIT(7)
+#define SAHARA_STATUS_RNG_RESEED BIT(8)
+#define SAHARA_STATUS_ACTIVE_RNG BIT(9)
+#define SAHARA_STATUS_ACTIVE_MDHA BIT(10)
+#define SAHARA_STATUS_ACTIVE_SKHA BIT(11)
+#define SAHARA_STATUS_MODE_BATCH BIT(16)
+#define SAHARA_STATUS_MODE_DEDICATED BIT(17)
+#define SAHARA_STATUS_MODE_DEBUG BIT(18)
+#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
+#define SAHARA_REG_ERRSTATUS 0x14
+#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
+#define SAHARA_ERRSOURCE_CHA 14
+#define SAHARA_ERRSOURCE_DMA 15
+#define SAHARA_ERRSTATUS_DMA_DIR BIT(8)
+#define SAHARA_ERRSTATUS_GET_DMASZ(x) (((x) >> 9) & 0x3)
+#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
+#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
+#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
+#define SAHARA_REG_FADDR 0x18
+#define SAHARA_REG_CDAR 0x1C
+#define SAHARA_REG_IDAR 0x20
struct sahara_hw_desc {
u32 hdr;
@@ -168,7 +165,6 @@ struct sahara_aes_reqctx {
* @total: total number of bytes for transfer
* @last: is this the last block
* @first: is this the first block
- * @active: inside a transfer
*/
struct sahara_sha_reqctx {
u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
@@ -184,7 +180,6 @@ struct sahara_sha_reqctx {
size_t total;
unsigned int last;
unsigned int first;
- unsigned int active;
};
struct sahara_dev {
@@ -193,12 +188,9 @@ struct sahara_dev {
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
- spinlock_t queue_spinlock;
- struct task_struct *kthread;
struct completion dma_completion;
struct sahara_ctx *ctx;
- struct crypto_queue queue;
unsigned long flags;
struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
@@ -222,7 +214,7 @@ struct sahara_dev {
struct scatterlist *out_sg;
int nb_out_sg;
- u32 error;
+ struct crypto_engine *engine;
};
static struct sahara_dev *dev_ptr;
@@ -675,7 +667,6 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
struct sahara_dev *dev = dev_ptr;
- int err = 0;
if (!req->cryptlen)
return 0;
@@ -686,21 +677,12 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
- dev_err(dev->device,
- "request size is not exact amount of AES blocks\n");
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
return -EINVAL;
- }
rctx->mode = mode;
- spin_lock_bh(&dev->queue_spinlock);
- err = crypto_enqueue_request(&dev->queue, &req->base);
- spin_unlock_bh(&dev->queue_spinlock);
-
- wake_up_process(dev->kthread);
-
- return err;
+ return crypto_transfer_skcipher_request_to_engine(dev->engine, req);
}
static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
@@ -1001,45 +983,26 @@ static int sahara_sha_process(struct ahash_request *req)
return 0;
}
-static int sahara_queue_manage(void *data)
+static int sahara_do_one_request(struct crypto_engine *engine, void *areq)
{
- struct sahara_dev *dev = data;
- struct crypto_async_request *async_req;
- struct crypto_async_request *backlog;
- int ret = 0;
-
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_bh(&dev->queue_spinlock);
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
- spin_unlock_bh(&dev->queue_spinlock);
-
- if (backlog)
- crypto_request_complete(backlog, -EINPROGRESS);
-
- if (async_req) {
- if (crypto_tfm_alg_type(async_req->tfm) ==
- CRYPTO_ALG_TYPE_AHASH) {
- struct ahash_request *req =
- ahash_request_cast(async_req);
-
- ret = sahara_sha_process(req);
- } else {
- struct skcipher_request *req =
- skcipher_request_cast(async_req);
-
- ret = sahara_aes_process(req);
- }
+ struct crypto_async_request *async_req = areq;
+ int err;
- crypto_request_complete(async_req, ret);
+ if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_AHASH) {
+ struct ahash_request *req = ahash_request_cast(async_req);
- continue;
- }
+ err = sahara_sha_process(req);
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, req, err);
+ local_bh_enable();
+ } else {
+ struct skcipher_request *req = skcipher_request_cast(async_req);
- schedule();
- } while (!kthread_should_stop());
+ err = sahara_aes_process(skcipher_request_cast(async_req));
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, req, err);
+ local_bh_enable();
+ }
return 0;
}
@@ -1048,25 +1011,13 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
{
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
struct sahara_dev *dev = dev_ptr;
- int ret;
if (!req->nbytes && !last)
return 0;
rctx->last = last;
- if (!rctx->active) {
- rctx->active = 1;
- rctx->first = 1;
- }
-
- spin_lock_bh(&dev->queue_spinlock);
- ret = crypto_enqueue_request(&dev->queue, &req->base);
- spin_unlock_bh(&dev->queue_spinlock);
-
- wake_up_process(dev->kthread);
-
- return ret;
+ return crypto_transfer_hash_request_to_engine(dev->engine, req);
}
static int sahara_sha_init(struct ahash_request *req)
@@ -1090,7 +1041,7 @@ static int sahara_sha_init(struct ahash_request *req)
}
rctx->context_size = rctx->digest_size + 4;
- rctx->active = 0;
+ rctx->first = 1;
return 0;
}
@@ -1144,94 +1095,114 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct skcipher_alg aes_algs[] = {
+static struct skcipher_engine_alg aes_algs[] = {
{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "sahara-ecb-aes",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct sahara_ctx),
- .base.cra_alignmask = 0x0,
- .base.cra_module = THIS_MODULE,
-
- .init = sahara_aes_init_tfm,
- .exit = sahara_aes_exit_tfm,
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_ecb_encrypt,
- .decrypt = sahara_aes_ecb_decrypt,
+ .base = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "sahara-ecb-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_ecb_encrypt,
+ .decrypt = sahara_aes_ecb_decrypt,
+ },
+ .op = {
+ .do_one_request = sahara_do_one_request,
+ },
}, {
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "sahara-cbc-aes",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct sahara_ctx),
- .base.cra_alignmask = 0x0,
- .base.cra_module = THIS_MODULE,
-
- .init = sahara_aes_init_tfm,
- .exit = sahara_aes_exit_tfm,
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_cbc_encrypt,
- .decrypt = sahara_aes_cbc_decrypt,
+ .base = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "sahara-cbc-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_cbc_encrypt,
+ .decrypt = sahara_aes_cbc_decrypt,
+ },
+ .op = {
+ .do_one_request = sahara_do_one_request,
+ },
}
};
-static struct ahash_alg sha_v3_algs[] = {
+static struct ahash_engine_alg sha_v3_algs[] = {
{
- .init = sahara_sha_init,
- .update = sahara_sha_update,
- .final = sahara_sha_final,
- .finup = sahara_sha_finup,
- .digest = sahara_sha_digest,
- .export = sahara_sha_export,
- .import = sahara_sha_import,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct sahara_sha_reqctx),
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "sahara-sha1",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_sha_cra_init,
- }
+ .base = {
+ .init = sahara_sha_init,
+ .update = sahara_sha_update,
+ .final = sahara_sha_final,
+ .finup = sahara_sha_finup,
+ .digest = sahara_sha_digest,
+ .export = sahara_sha_export,
+ .import = sahara_sha_import,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sahara-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_sha_cra_init,
+ }
+ },
+ .op = {
+ .do_one_request = sahara_do_one_request,
+ },
},
};
-static struct ahash_alg sha_v4_algs[] = {
+static struct ahash_engine_alg sha_v4_algs[] = {
{
- .init = sahara_sha_init,
- .update = sahara_sha_update,
- .final = sahara_sha_final,
- .finup = sahara_sha_finup,
- .digest = sahara_sha_digest,
- .export = sahara_sha_export,
- .import = sahara_sha_import,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct sahara_sha_reqctx),
- .halg.base = {
- .cra_name = "sha256",
- .cra_driver_name = "sahara-sha256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_sha_cra_init,
- }
+ .base = {
+ .init = sahara_sha_init,
+ .update = sahara_sha_update,
+ .final = sahara_sha_final,
+ .finup = sahara_sha_finup,
+ .digest = sahara_sha_digest,
+ .export = sahara_sha_export,
+ .import = sahara_sha_import,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sahara-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_sha_cra_init,
+ }
+ },
+ .op = {
+ .do_one_request = sahara_do_one_request,
+ },
},
};
@@ -1246,14 +1217,11 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
sahara_decode_status(dev, stat);
- if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
+ if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY)
return IRQ_NONE;
- } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
- dev->error = 0;
- } else {
+
+ if (SAHARA_STATUS_GET_STATE(stat) != SAHARA_STATE_COMPLETE)
sahara_decode_error(dev, err);
- dev->error = -EINVAL;
- }
complete(&dev->dma_completion);
@@ -1264,57 +1232,42 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
static int sahara_register_algs(struct sahara_dev *dev)
{
int err;
- unsigned int i, j, k, l;
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_skcipher(&aes_algs[i]);
- if (err)
- goto err_aes_algs;
- }
+ err = crypto_engine_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+ if (err)
+ return err;
- for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
- err = crypto_register_ahash(&sha_v3_algs[k]);
+ err = crypto_engine_register_ahashes(sha_v3_algs,
+ ARRAY_SIZE(sha_v3_algs));
+ if (err)
+ goto err_aes_algs;
+
+ if (dev->version > SAHARA_VERSION_3) {
+ err = crypto_engine_register_ahashes(sha_v4_algs,
+ ARRAY_SIZE(sha_v4_algs));
if (err)
goto err_sha_v3_algs;
}
- if (dev->version > SAHARA_VERSION_3)
- for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
- err = crypto_register_ahash(&sha_v4_algs[l]);
- if (err)
- goto err_sha_v4_algs;
- }
-
return 0;
-err_sha_v4_algs:
- for (j = 0; j < l; j++)
- crypto_unregister_ahash(&sha_v4_algs[j]);
-
err_sha_v3_algs:
- for (j = 0; j < k; j++)
- crypto_unregister_ahash(&sha_v3_algs[j]);
+ crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
err_aes_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_skcipher(&aes_algs[j]);
+ crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
return err;
}
static void sahara_unregister_algs(struct sahara_dev *dev)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_skcipher(&aes_algs[i]);
-
- for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
- crypto_unregister_ahash(&sha_v3_algs[i]);
+ crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+ crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
if (dev->version > SAHARA_VERSION_3)
- for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
- crypto_unregister_ahash(&sha_v4_algs[i]);
+ crypto_engine_unregister_ahashes(sha_v4_algs,
+ ARRAY_SIZE(sha_v4_algs));
}
static const struct of_device_id sahara_dt_ids[] = {
@@ -1351,32 +1304,27 @@ static int sahara_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
0, dev_name(&pdev->dev), dev);
- if (err) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request irq\n");
/* clocks */
- dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(dev->clk_ipg)) {
- dev_err(&pdev->dev, "Could not get ipg clock\n");
- return PTR_ERR(dev->clk_ipg);
- }
+ dev->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
+ if (IS_ERR(dev->clk_ipg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ipg),
+ "Could not get ipg clock\n");
- dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(dev->clk_ahb)) {
- dev_err(&pdev->dev, "Could not get ahb clock\n");
- return PTR_ERR(dev->clk_ahb);
- }
+ dev->clk_ahb = devm_clk_get_enabled(&pdev->dev, "ahb");
+ if (IS_ERR(dev->clk_ahb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ahb),
+ "Could not get ahb clock\n");
/* Allocate HW descriptors */
dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
&dev->hw_phys_desc[0], GFP_KERNEL);
- if (!dev->hw_desc[0]) {
- dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
+ if (!dev->hw_desc[0])
return -ENOMEM;
- }
dev->hw_desc[1] = dev->hw_desc[0] + 1;
dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
sizeof(struct sahara_hw_desc);
@@ -1384,10 +1332,8 @@ static int sahara_probe(struct platform_device *pdev)
/* Allocate space for iv and key */
dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
&dev->key_phys_base, GFP_KERNEL);
- if (!dev->key_base) {
- dev_err(&pdev->dev, "Could not allocate memory for key\n");
+ if (!dev->key_base)
return -ENOMEM;
- }
dev->iv_base = dev->key_base + AES_KEYSIZE_128;
dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
@@ -1395,45 +1341,36 @@ static int sahara_probe(struct platform_device *pdev)
dev->context_base = dmam_alloc_coherent(&pdev->dev,
SHA256_DIGEST_SIZE + 4,
&dev->context_phys_base, GFP_KERNEL);
- if (!dev->context_base) {
- dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
+ if (!dev->context_base)
return -ENOMEM;
- }
/* Allocate space for HW links */
dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
&dev->hw_phys_link[0], GFP_KERNEL);
- if (!dev->hw_link[0]) {
- dev_err(&pdev->dev, "Could not allocate hw links\n");
+ if (!dev->hw_link[0])
return -ENOMEM;
- }
for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
sizeof(struct sahara_hw_link);
dev->hw_link[i] = dev->hw_link[i - 1] + 1;
}
- crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
-
- spin_lock_init(&dev->queue_spinlock);
-
dev_ptr = dev;
- dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
- if (IS_ERR(dev->kthread)) {
- return PTR_ERR(dev->kthread);
+ dev->engine = crypto_engine_alloc_init(&pdev->dev, true);
+ if (!dev->engine)
+ return -ENOMEM;
+
+ err = crypto_engine_start(dev->engine);
+ if (err) {
+ crypto_engine_exit(dev->engine);
+ return dev_err_probe(&pdev->dev, err,
+ "Could not start crypto engine\n");
}
init_completion(&dev->dma_completion);
- err = clk_prepare_enable(dev->clk_ipg);
- if (err)
- return err;
- err = clk_prepare_enable(dev->clk_ahb);
- if (err)
- goto clk_ipg_disable;
-
version = sahara_read(dev, SAHARA_REG_VERSION);
if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
if (version != SAHARA_VERSION_3)
@@ -1445,8 +1382,8 @@ static int sahara_probe(struct platform_device *pdev)
version = (version >> 8) & 0xff;
}
if (err == -ENODEV) {
- dev_err(&pdev->dev, "SAHARA version %d not supported\n",
- version);
+ dev_err_probe(&pdev->dev, err,
+ "SAHARA version %d not supported\n", version);
goto err_algs;
}
@@ -1469,11 +1406,7 @@ static int sahara_probe(struct platform_device *pdev)
return 0;
err_algs:
- kthread_stop(dev->kthread);
- dev_ptr = NULL;
- clk_disable_unprepare(dev->clk_ahb);
-clk_ipg_disable:
- clk_disable_unprepare(dev->clk_ipg);
+ crypto_engine_exit(dev->engine);
return err;
}
@@ -1482,14 +1415,8 @@ static void sahara_remove(struct platform_device *pdev)
{
struct sahara_dev *dev = platform_get_drvdata(pdev);
- kthread_stop(dev->kthread);
-
+ crypto_engine_exit(dev->engine);
sahara_unregister_algs(dev);
-
- clk_disable_unprepare(dev->clk_ipg);
- clk_disable_unprepare(dev->clk_ahb);
-
- dev_ptr = NULL;
}
static struct platform_driver sahara_driver = {
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
index 2cb192502..cb59357b5 100644
--- a/drivers/crypto/starfive/Kconfig
+++ b/drivers/crypto/starfive/Kconfig
@@ -4,7 +4,7 @@
config CRYPTO_DEV_JH7110
tristate "StarFive JH7110 cryptographic engine driver"
- depends on SOC_STARFIVE || AMBA_PL08X || COMPILE_TEST
+ depends on (SOC_STARFIVE && AMBA_PL08X) || COMPILE_TEST
depends on HAS_DMA
select CRYPTO_ENGINE
select CRYPTO_HMAC
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 9378e6682..1ac15cc4e 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -262,12 +262,7 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
rctx->csr.aes.mode = hw_mode;
rctx->csr.aes.cmode = !is_encrypt(cryp);
rctx->csr.aes.ie = 1;
-
- if (hw_mode == STARFIVE_AES_MODE_CFB ||
- hw_mode == STARFIVE_AES_MODE_OFB)
- rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_128;
- else
- rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
+ rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
if (cryp->side_chan) {
rctx->csr.aes.delay_aes = 1;
@@ -294,8 +289,6 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
starfive_aes_ccm_init(ctx);
starfive_aes_aead_hw_start(ctx, hw_mode);
break;
- case STARFIVE_AES_MODE_OFB:
- case STARFIVE_AES_MODE_CFB:
case STARFIVE_AES_MODE_CBC:
case STARFIVE_AES_MODE_CTR:
starfive_aes_write_iv(ctx, (void *)cryp->req.sreq->iv);
@@ -500,7 +493,7 @@ static int starfive_aes_prepare_req(struct skcipher_request *req,
scatterwalk_start(&cryp->out_walk, rctx->out_sg);
if (cryp->assoclen) {
- rctx->adata = kzalloc(ALIGN(cryp->assoclen, AES_BLOCK_SIZE), GFP_KERNEL);
+ rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
if (!rctx->adata)
return dev_err_probe(cryp->dev, -ENOMEM,
"Failed to alloc memory for adata");
@@ -569,7 +562,7 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
struct starfive_cryp_ctx *ctx =
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_request_ctx *rctx;
u32 block[AES_BLOCK_32];
u32 stat;
int err;
@@ -579,6 +572,8 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
if (err)
return err;
+ rctx = ctx->rctx;
+
if (!cryp->assoclen)
goto write_text;
@@ -783,26 +778,6 @@ static int starfive_aes_cbc_decrypt(struct skcipher_request *req)
return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC);
}
-static int starfive_aes_cfb_encrypt(struct skcipher_request *req)
-{
- return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB | FLG_ENCRYPT);
-}
-
-static int starfive_aes_cfb_decrypt(struct skcipher_request *req)
-{
- return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB);
-}
-
-static int starfive_aes_ofb_encrypt(struct skcipher_request *req)
-{
- return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB | FLG_ENCRYPT);
-}
-
-static int starfive_aes_ofb_decrypt(struct skcipher_request *req)
-{
- return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB);
-}
-
static int starfive_aes_ctr_encrypt(struct skcipher_request *req)
{
return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR | FLG_ENCRYPT);
@@ -908,48 +883,6 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.op = {
.do_one_request = starfive_aes_do_one_req,
},
-}, {
- .base.init = starfive_aes_init_tfm,
- .base.setkey = starfive_aes_setkey,
- .base.encrypt = starfive_aes_cfb_encrypt,
- .base.decrypt = starfive_aes_cfb_decrypt,
- .base.min_keysize = AES_MIN_KEY_SIZE,
- .base.max_keysize = AES_MAX_KEY_SIZE,
- .base.ivsize = AES_BLOCK_SIZE,
- .base.base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "starfive-cfb-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- },
- .op = {
- .do_one_request = starfive_aes_do_one_req,
- },
-}, {
- .base.init = starfive_aes_init_tfm,
- .base.setkey = starfive_aes_setkey,
- .base.encrypt = starfive_aes_ofb_encrypt,
- .base.decrypt = starfive_aes_ofb_decrypt,
- .base.min_keysize = AES_MIN_KEY_SIZE,
- .base.max_keysize = AES_MAX_KEY_SIZE,
- .base.ivsize = AES_BLOCK_SIZE,
- .base.base = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "starfive-ofb-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- },
- .op = {
- .do_one_request = starfive_aes_do_one_req,
- },
},
};
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index 4f5b68182..425fddf3a 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -109,12 +109,6 @@ static irqreturn_t starfive_cryp_irq(int irq, void *priv)
tasklet_schedule(&cryp->hash_done);
}
- if (status & STARFIVE_IE_FLAG_PKA_DONE) {
- mask |= STARFIVE_IE_MASK_PKA_DONE;
- writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
- complete(&cryp->pka_done);
- }
-
return IRQ_HANDLED;
}
@@ -159,8 +153,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst),
"Error getting hardware reset line\n");
- init_completion(&cryp->pka_done);
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index fe011d504..6cdf6db5d 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -50,8 +50,6 @@ union starfive_aes_csr {
u32 ccm_start :1;
#define STARFIVE_AES_MODE_ECB 0x0
#define STARFIVE_AES_MODE_CBC 0x1
-#define STARFIVE_AES_MODE_CFB 0x2
-#define STARFIVE_AES_MODE_OFB 0x3
#define STARFIVE_AES_MODE_CTR 0x4
#define STARFIVE_AES_MODE_CCM 0x5
#define STARFIVE_AES_MODE_GCM 0x6
@@ -125,6 +123,15 @@ union starfive_pka_cacr {
};
};
+union starfive_pka_casr {
+ u32 v;
+ struct {
+#define STARFIVE_PKA_DONE BIT(0)
+ u32 done :1;
+ u32 rsvd_0 :31;
+ };
+};
+
struct starfive_rsa_key {
u8 *n;
u8 *e;
@@ -183,7 +190,6 @@ struct starfive_cryp_dev {
struct crypto_engine *engine;
struct tasklet_struct aes_done;
struct tasklet_struct hash_done;
- struct completion pka_done;
size_t assoclen;
size_t total_in;
size_t total_out;
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index f31bbd825..cf8bda7f0 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -6,13 +6,7 @@
*/
#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-direct.h>
-#include <linux/interrupt.h>
#include <linux/iopoll.h>
-#include <linux/io.h>
-#include <linux/mod_devicetable.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
#include <crypto/internal/akcipher.h>
@@ -28,13 +22,13 @@
#define STARFIVE_PKA_CAER_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x108)
#define STARFIVE_PKA_CANR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x208)
-// R^2 mod N and N0'
+/* R ^ 2 mod N and N0' */
#define CRYPTO_CMD_PRE 0x0
-// A * R mod N ==> A
+/* A * R mod N ==> A */
#define CRYPTO_CMD_ARN 0x5
-// A * E * R mod N ==> A
+/* A * E * R mod N ==> A */
#define CRYPTO_CMD_AERN 0x6
-// A * A * R mod N ==> A
+/* A * A * R mod N ==> A */
#define CRYPTO_CMD_AARN 0x7
#define STARFIVE_RSA_MAX_KEYSZ 256
@@ -43,31 +37,17 @@
static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 status;
- return wait_for_completion_timeout(&cryp->pka_done,
- usecs_to_jiffies(100000));
-}
-
-static inline void starfive_pka_irq_mask_clear(struct starfive_cryp_ctx *ctx)
-{
- struct starfive_cryp_dev *cryp = ctx->cryp;
- u32 stat;
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_PKA_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
-
- reinit_completion(&cryp->pka_done);
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_PKA_CASR_OFFSET, status,
+ status & STARFIVE_PKA_DONE, 10, 100000);
}
static void starfive_rsa_free_key(struct starfive_rsa_key *key)
{
- if (key->d)
- kfree_sensitive(key->d);
- if (key->e)
- kfree_sensitive(key->e);
- if (key->n)
- kfree_sensitive(key->n);
+ kfree_sensitive(key->d);
+ kfree_sensitive(key->e);
+ kfree_sensitive(key->n);
memset(key, 0, sizeof(*key));
}
@@ -114,10 +94,9 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
rctx->csr.pka.not_r2 = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
return -ETIMEDOUT;
for (loop = 0; loop <= opsize; loop++)
@@ -136,10 +115,9 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
rctx->csr.pka.start = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
return -ETIMEDOUT;
} else {
rctx->csr.pka.v = 0;
@@ -151,10 +129,9 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
rctx->csr.pka.pre_expf = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
return -ETIMEDOUT;
for (loop = 0; loop <= count; loop++)
@@ -172,10 +149,9 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
rctx->csr.pka.start = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
return -ETIMEDOUT;
}
@@ -226,11 +202,10 @@ static int starfive_rsa_cpu_start(struct starfive_cryp_ctx *ctx, u32 *result,
rctx->csr.pka.start = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
ret = -ETIMEDOUT;
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
goto rsa_err;
if (mlen) {
@@ -242,10 +217,9 @@ static int starfive_rsa_cpu_start(struct starfive_cryp_ctx *ctx, u32 *result,
rctx->csr.pka.start = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
goto rsa_err;
}
}
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index c3cbc2673..11ad4ffdc 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -838,7 +838,7 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
{
- tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
+ crypto_aead_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx));
return 0;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 8ea1d340e..67998dbd1 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -5,6 +5,7 @@ menuconfig CXL_BUS
select FW_LOADER
select FW_UPLOAD
select PCI_DOE
+ select FIRMWARE_TABLE
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -54,8 +55,10 @@ config CXL_MEM_RAW_COMMANDS
config CXL_ACPI
tristate "CXL ACPI: Platform Support"
depends on ACPI
+ depends on ACPI_NUMA
default CXL_BUS
select ACPI_TABLE_LIB
+ select ACPI_HMAT
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index b3cec8912..af5cb818f 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/node.h>
#include <asm/div64.h>
#include "cxlpci.h"
#include "cxl.h"
@@ -17,6 +18,10 @@ struct cxl_cxims_data {
u64 xormaps[] __counted_by(nr_maps);
};
+static const guid_t acpi_cxl_qtg_id_guid =
+ GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
+ 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
+
/*
* Find a targets entry (n) in the host bridge interleave list.
* CXL Specification 3.0 Table 9-22
@@ -194,6 +199,123 @@ struct cxl_cfmws_context {
int id;
};
+/**
+ * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
+ * @handle: ACPI handle
+ * @coord: performance access coordinates
+ * @entries: number of QTG IDs to return
+ * @qos_class: int array provided by caller to return QTG IDs
+ *
+ * Return: number of QTG IDs returned, or -errno for errors
+ *
+ * Issue QTG _DSM with accompanied bandwidth and latency data in order to get
+ * the QTG IDs that are suitable for the performance point in order of most
+ * suitable to least suitable. Write back array of QTG IDs and return the
+ * actual number of QTG IDs written back.
+ */
+static int
+cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
+ int entries, int *qos_class)
+{
+ union acpi_object *out_obj, *out_buf, *obj;
+ union acpi_object in_array[4] = {
+ [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
+ [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
+ [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
+ [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
+ };
+ union acpi_object in_obj = {
+ .package = {
+ .type = ACPI_TYPE_PACKAGE,
+ .count = 4,
+ .elements = in_array,
+ },
+ };
+ int count, pkg_entries, i;
+ u16 max_qtg;
+ int rc;
+
+ if (!entries)
+ return -EINVAL;
+
+ out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
+ if (!out_obj)
+ return -ENXIO;
+
+ if (out_obj->type != ACPI_TYPE_PACKAGE) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* Check Max QTG ID */
+ obj = &out_obj->package.elements[0];
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ max_qtg = obj->integer.value;
+
+ /* It's legal to have 0 QTG entries */
+ pkg_entries = out_obj->package.count;
+ if (pkg_entries <= 1) {
+ rc = 0;
+ goto out;
+ }
+
+ /* Retrieve QTG IDs package */
+ obj = &out_obj->package.elements[1];
+ if (obj->type != ACPI_TYPE_PACKAGE) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ pkg_entries = obj->package.count;
+ count = min(entries, pkg_entries);
+ for (i = 0; i < count; i++) {
+ u16 qtg_id;
+
+ out_buf = &obj->package.elements[i];
+ if (out_buf->type != ACPI_TYPE_INTEGER) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ qtg_id = out_buf->integer.value;
+ if (qtg_id > max_qtg)
+ pr_warn("QTG ID %u greater than MAX %u\n",
+ qtg_id, max_qtg);
+
+ qos_class[i] = qtg_id;
+ }
+ rc = count;
+
+out:
+ ACPI_FREE(out_obj);
+ return rc;
+}
+
+static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
+ struct access_coordinate *coord, int entries,
+ int *qos_class)
+{
+ struct device *dev = cxl_root->port.uport_dev;
+ acpi_handle handle;
+
+ if (!dev_is_platform(dev))
+ return -ENODEV;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -ENODEV;
+
+ return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
+}
+
+static const struct cxl_root_ops acpi_root_ops = {
+ .qos_class = cxl_acpi_qos_class,
+};
+
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
@@ -399,8 +521,31 @@ static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
return 0;
}
+static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
+{
+ struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
+ u32 uid;
+ int rc;
+
+ if (kstrtou32(acpi_device_uid(hb), 0, &uid))
+ return -EINVAL;
+
+ rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
+ if (rc < 0)
+ return rc;
+
+ /* Adjust back to picoseconds from nanoseconds */
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ dport->hb_coord[i].read_latency *= 1000;
+ dport->hb_coord[i].write_latency *= 1000;
+ }
+
+ return 0;
+}
+
static int add_host_bridge_dport(struct device *match, void *arg)
{
+ int ret;
acpi_status rc;
struct device *bridge;
struct cxl_dport *dport;
@@ -450,6 +595,10 @@ static int add_host_bridge_dport(struct device *match, void *arg)
if (IS_ERR(dport))
return PTR_ERR(dport);
+ ret = get_genport_coordinates(match, dport);
+ if (ret)
+ dev_dbg(match, "Failed to get generic port perf coordinates.\n");
+
return 0;
}
@@ -666,6 +815,7 @@ static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
struct resource *cxl_res;
+ struct cxl_root *cxl_root;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
@@ -685,9 +835,10 @@ static int cxl_acpi_probe(struct platform_device *pdev)
cxl_res->end = -1;
cxl_res->flags = IORESOURCE_MEM;
- root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
- if (IS_ERR(root_port))
- return PTR_ERR(root_port);
+ cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
+ if (IS_ERR(cxl_root))
+ return PTR_ERR(cxl_root);
+ root_port = &cxl_root->port;
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_dport);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 1f66b5d4d..9259bcc67 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -13,5 +13,6 @@ cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
cxl_core-y += pmu.o
+cxl_core-y += cdat.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
new file mode 100644
index 000000000..fbf167f9d
--- /dev/null
+++ b/drivers/cxl/core/cdat.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
+#include <linux/acpi.h>
+#include <linux/xarray.h>
+#include <linux/fw_table.h>
+#include <linux/node.h>
+#include <linux/overflow.h>
+#include "cxlpci.h"
+#include "cxlmem.h"
+#include "core.h"
+#include "cxl.h"
+
+struct dsmas_entry {
+ struct range dpa_range;
+ u8 handle;
+ struct access_coordinate coord;
+
+ int entries;
+ int qos_class;
+};
+
+static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cdat_header *hdr = &header->cdat;
+ struct acpi_cdat_dsmas *dsmas;
+ int size = sizeof(*hdr) + sizeof(*dsmas);
+ struct xarray *dsmas_xa = arg;
+ struct dsmas_entry *dent;
+ u16 len;
+ int rc;
+
+ len = le16_to_cpu((__force __le16)hdr->length);
+ if (len != size || (unsigned long)hdr + len > end) {
+ pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
+ return -EINVAL;
+ }
+
+ /* Skip common header */
+ dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
+
+ dent = kzalloc(sizeof(*dent), GFP_KERNEL);
+ if (!dent)
+ return -ENOMEM;
+
+ dent->handle = dsmas->dsmad_handle;
+ dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
+ dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
+ le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
+
+ rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
+ if (rc) {
+ kfree(dent);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
+{
+ switch (access) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ coord->read_latency = val;
+ coord->write_latency = val;
+ break;
+ case ACPI_HMAT_READ_LATENCY:
+ coord->read_latency = val;
+ break;
+ case ACPI_HMAT_WRITE_LATENCY:
+ coord->write_latency = val;
+ break;
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ coord->read_bandwidth = val;
+ coord->write_bandwidth = val;
+ break;
+ case ACPI_HMAT_READ_BANDWIDTH:
+ coord->read_bandwidth = val;
+ break;
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ coord->write_bandwidth = val;
+ break;
+ }
+}
+
+static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cdat_header *hdr = &header->cdat;
+ struct acpi_cdat_dslbis *dslbis;
+ int size = sizeof(*hdr) + sizeof(*dslbis);
+ struct xarray *dsmas_xa = arg;
+ struct dsmas_entry *dent;
+ __le64 le_base;
+ __le16 le_val;
+ u64 val;
+ u16 len;
+ int rc;
+
+ len = le16_to_cpu((__force __le16)hdr->length);
+ if (len != size || (unsigned long)hdr + len > end) {
+ pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
+ return -EINVAL;
+ }
+
+ /* Skip common header */
+ dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
+
+ /* Skip unrecognized data type */
+ if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
+ return 0;
+
+ /* Not a memory type, skip */
+ if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
+ return 0;
+
+ dent = xa_load(dsmas_xa, dslbis->handle);
+ if (!dent) {
+ pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
+ return 0;
+ }
+
+ le_base = (__force __le64)dslbis->entry_base_unit;
+ le_val = (__force __le16)dslbis->entry[0];
+ rc = check_mul_overflow(le64_to_cpu(le_base),
+ le16_to_cpu(le_val), &val);
+ if (rc)
+ pr_warn("DSLBIS value overflowed.\n");
+
+ cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
+
+ return 0;
+}
+
+static int cdat_table_parse_output(int rc)
+{
+ if (rc < 0)
+ return rc;
+ if (rc == 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int cxl_cdat_endpoint_process(struct cxl_port *port,
+ struct xarray *dsmas_xa)
+{
+ int rc;
+
+ rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
+ dsmas_xa, port->cdat.table);
+ rc = cdat_table_parse_output(rc);
+ if (rc)
+ return rc;
+
+ rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
+ dsmas_xa, port->cdat.table);
+ return cdat_table_parse_output(rc);
+}
+
+static int cxl_port_perf_data_calculate(struct cxl_port *port,
+ struct xarray *dsmas_xa)
+{
+ struct access_coordinate ep_c;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct dsmas_entry *dent;
+ int valid_entries = 0;
+ unsigned long index;
+ int rc;
+
+ rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
+ if (rc) {
+ dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
+ return rc;
+ }
+
+ rc = cxl_hb_get_perf_coordinates(port, coord);
+ if (rc) {
+ dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
+ return rc;
+ }
+
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+
+ if (!cxl_root)
+ return -ENODEV;
+
+ if (!cxl_root->ops || !cxl_root->ops->qos_class)
+ return -EOPNOTSUPP;
+
+ xa_for_each(dsmas_xa, index, dent) {
+ int qos_class;
+
+ cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
+ /*
+ * Keeping the host bridge coordinates separate from the dsmas
+ * coordinates in order to allow calculation of access class
+ * 0 and 1 for region later.
+ */
+ cxl_coordinates_combine(&coord[ACCESS_COORDINATE_LOCAL],
+ &coord[ACCESS_COORDINATE_LOCAL],
+ &dent->coord);
+ dent->entries = 1;
+ rc = cxl_root->ops->qos_class(cxl_root,
+ &coord[ACCESS_COORDINATE_LOCAL],
+ 1, &qos_class);
+ if (rc != 1)
+ continue;
+
+ valid_entries++;
+ dent->qos_class = qos_class;
+ }
+
+ if (!valid_entries)
+ return -ENOENT;
+
+ return 0;
+}
+
+static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ dpa_perf->dpa_range = dent->dpa_range;
+ dpa_perf->coord = dent->coord;
+ dpa_perf->qos_class = dent->qos_class;
+ dev_dbg(dev,
+ "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
+ dent->dpa_range.start, dpa_perf->qos_class,
+ dent->coord.read_bandwidth, dent->coord.write_bandwidth,
+ dent->coord.read_latency, dent->coord.write_latency);
+}
+
+static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
+ struct xarray *dsmas_xa)
+{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct device *dev = cxlds->dev;
+ struct range pmem_range = {
+ .start = cxlds->pmem_res.start,
+ .end = cxlds->pmem_res.end,
+ };
+ struct range ram_range = {
+ .start = cxlds->ram_res.start,
+ .end = cxlds->ram_res.end,
+ };
+ struct dsmas_entry *dent;
+ unsigned long index;
+
+ xa_for_each(dsmas_xa, index, dent) {
+ if (resource_size(&cxlds->ram_res) &&
+ range_contains(&ram_range, &dent->dpa_range))
+ update_perf_entry(dev, dent, &mds->ram_perf);
+ else if (resource_size(&cxlds->pmem_res) &&
+ range_contains(&pmem_range, &dent->dpa_range))
+ update_perf_entry(dev, dent, &mds->pmem_perf);
+ else
+ dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
+ dent->dpa_range.start);
+ }
+}
+
+static int match_cxlrd_qos_class(struct device *dev, void *data)
+{
+ int dev_qos_class = *(int *)data;
+ struct cxl_root_decoder *cxlrd;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ cxlrd = to_cxl_root_decoder(dev);
+ if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ if (cxlrd->qos_class == dev_qos_class)
+ return 1;
+
+ return 0;
+}
+
+static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
+{
+ *dpa_perf = (struct cxl_dpa_perf) {
+ .qos_class = CXL_QOS_CLASS_INVALID,
+ };
+}
+
+static bool cxl_qos_match(struct cxl_port *root_port,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+ return false;
+
+ if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
+ match_cxlrd_qos_class))
+ return false;
+
+ return true;
+}
+
+static int match_cxlrd_hb(struct device *dev, void *data)
+{
+ struct device *host_bridge = data;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_root_decoder *cxlrd;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ cxlrd = to_cxl_root_decoder(dev);
+ cxlsd = &cxlrd->cxlsd;
+
+ guard(rwsem_read)(&cxl_region_rwsem);
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ if (host_bridge == cxlsd->target[i]->dport_dev)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct cxl_port *root_port;
+ int rc;
+
+ struct cxl_root *cxl_root __free(put_cxl_root) =
+ find_cxl_root(cxlmd->endpoint);
+
+ if (!cxl_root)
+ return -ENODEV;
+
+ root_port = &cxl_root->port;
+
+ /* Check that the QTG IDs are all sane between end device and root decoders */
+ if (!cxl_qos_match(root_port, &mds->ram_perf))
+ reset_dpa_perf(&mds->ram_perf);
+ if (!cxl_qos_match(root_port, &mds->pmem_perf))
+ reset_dpa_perf(&mds->pmem_perf);
+
+ /* Check to make sure that the device's host bridge is under a root decoder */
+ rc = device_for_each_child(&root_port->dev,
+ cxlmd->endpoint->host_bridge, match_cxlrd_hb);
+ if (!rc) {
+ reset_dpa_perf(&mds->ram_perf);
+ reset_dpa_perf(&mds->pmem_perf);
+ }
+
+ return rc;
+}
+
+static void discard_dsmas(struct xarray *xa)
+{
+ unsigned long index;
+ void *ent;
+
+ xa_for_each(xa, index, ent) {
+ xa_erase(xa, index);
+ kfree(ent);
+ }
+ xa_destroy(xa);
+}
+DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
+
+void cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct xarray __dsmas_xa;
+ struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
+ int rc;
+
+ xa_init(&__dsmas_xa);
+ if (!port->cdat.table)
+ return;
+
+ rc = cxl_cdat_endpoint_process(port, dsmas_xa);
+ if (rc < 0) {
+ dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
+ return;
+ }
+
+ rc = cxl_port_perf_data_calculate(port, dsmas_xa);
+ if (rc) {
+ dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
+ return;
+ }
+
+ cxl_memdev_set_qos_class(cxlds, dsmas_xa);
+ cxl_qos_class_verify(cxlmd);
+ cxl_memdev_update_perf(cxlmd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
+
+static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cdat_sslbis_table {
+ struct acpi_cdat_header header;
+ struct acpi_cdat_sslbis sslbis_header;
+ struct acpi_cdat_sslbe entries[];
+ } *tbl = (struct acpi_cdat_sslbis_table *)header;
+ int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
+ struct acpi_cdat_sslbis *sslbis;
+ struct cxl_port *port = arg;
+ struct device *dev = &port->dev;
+ int remain, entries, i;
+ u16 len;
+
+ len = le16_to_cpu((__force __le16)header->cdat.length);
+ remain = len - size;
+ if (!remain || remain % sizeof(tbl->entries[0]) ||
+ (unsigned long)header + len > end) {
+ dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
+ return -EINVAL;
+ }
+
+ sslbis = &tbl->sslbis_header;
+ /* Unrecognized data type, we can skip */
+ if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
+ return 0;
+
+ entries = remain / sizeof(tbl->entries[0]);
+ if (struct_size(tbl, entries, entries) != len)
+ return -EINVAL;
+
+ for (i = 0; i < entries; i++) {
+ u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
+ u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
+ __le64 le_base;
+ __le16 le_val;
+ struct cxl_dport *dport;
+ unsigned long index;
+ u16 dsp_id;
+ u64 val;
+
+ switch (x) {
+ case ACPI_CDAT_SSLBIS_US_PORT:
+ dsp_id = y;
+ break;
+ case ACPI_CDAT_SSLBIS_ANY_PORT:
+ switch (y) {
+ case ACPI_CDAT_SSLBIS_US_PORT:
+ dsp_id = x;
+ break;
+ case ACPI_CDAT_SSLBIS_ANY_PORT:
+ dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
+ break;
+ default:
+ dsp_id = y;
+ break;
+ }
+ break;
+ default:
+ dsp_id = x;
+ break;
+ }
+
+ le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
+ le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
+
+ if (check_mul_overflow(le64_to_cpu(le_base),
+ le16_to_cpu(le_val), &val))
+ dev_warn(dev, "SSLBIS value overflowed!\n");
+
+ xa_for_each(&port->dports, index, dport) {
+ if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
+ dsp_id == dport->port_id)
+ cxl_access_coordinate_set(&dport->sw_coord,
+ sslbis->data_type,
+ val);
+ }
+ }
+
+ return 0;
+}
+
+void cxl_switch_parse_cdat(struct cxl_port *port)
+{
+ int rc;
+
+ if (!port->cdat.table)
+ return;
+
+ rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
+ port, port->cdat.table);
+ rc = cdat_table_parse_output(rc);
+ if (rc)
+ dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+
+/**
+ * cxl_coordinates_combine - Combine the two input coordinates
+ *
+ * @out: Output coordinate of c1 and c2 combined
+ * @c1: input coordinates
+ * @c2: input coordinates
+ */
+void cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ if (c1->write_bandwidth && c2->write_bandwidth)
+ out->write_bandwidth = min(c1->write_bandwidth,
+ c2->write_bandwidth);
+ out->write_latency = c1->write_latency + c2->write_latency;
+
+ if (c1->read_bandwidth && c2->read_bandwidth)
+ out->read_bandwidth = min(c1->read_bandwidth,
+ c2->read_bandwidth);
+ out->read_latency = c1->read_latency + c2->read_latency;
+}
+
+MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 86d7ba232..3b64fb1b9 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -88,4 +88,6 @@ enum cxl_poison_trace_type {
CXL_POISON_TRACE_CLEAR,
};
+long cxl_pci_get_latency(struct pci_dev *pdev);
+
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 36270dcfb..65185c9fa 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -63,6 +63,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
+ CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
};
/*
@@ -836,54 +837,37 @@ out:
}
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-static const uuid_t gen_media_event_uuid =
- UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
- 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6);
-
-/*
- * DRAM Event Record
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
- */
-static const uuid_t dram_event_uuid =
- UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
- 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24);
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-static const uuid_t mem_mod_event_uuid =
- UUID_INIT(0xfe927475, 0xdd59, 0x4339,
- 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74);
-
-static void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
- enum cxl_event_log_type type,
- struct cxl_event_record_raw *record)
+void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ enum cxl_event_type event_type,
+ const uuid_t *uuid, union cxl_event *evt)
{
- uuid_t *id = &record->hdr.id;
-
- if (uuid_equal(id, &gen_media_event_uuid)) {
- struct cxl_event_gen_media *rec =
- (struct cxl_event_gen_media *)record;
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ trace_cxl_general_media(cxlmd, type, &evt->gen_media);
+ else if (event_type == CXL_CPER_EVENT_DRAM)
+ trace_cxl_dram(cxlmd, type, &evt->dram);
+ else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
+ trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
+ else
+ trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
- trace_cxl_general_media(cxlmd, type, rec);
- } else if (uuid_equal(id, &dram_event_uuid)) {
- struct cxl_event_dram *rec = (struct cxl_event_dram *)record;
+static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ struct cxl_event_record_raw *record)
+{
+ enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
+ const uuid_t *uuid = &record->id;
- trace_cxl_dram(cxlmd, type, rec);
- } else if (uuid_equal(id, &mem_mod_event_uuid)) {
- struct cxl_event_mem_module *rec =
- (struct cxl_event_mem_module *)record;
+ if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
+ ev_type = CXL_CPER_EVENT_GEN_MEDIA;
+ else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
+ ev_type = CXL_CPER_EVENT_DRAM;
+ else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
+ ev_type = CXL_CPER_EVENT_MEM_MODULE;
- trace_cxl_memory_module(cxlmd, type, rec);
- } else {
- /* For unknown record types print just the header */
- trace_cxl_generic_event(cxlmd, type, record);
- }
+ cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
}
static int cxl_clear_event_record(struct cxl_memdev_state *mds,
@@ -926,9 +910,12 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
*/
i = 0;
for (cnt = 0; cnt < total; cnt++) {
- payload->handles[i++] = get_pl->records[cnt].hdr.handle;
+ struct cxl_event_record_raw *raw = &get_pl->records[cnt];
+ struct cxl_event_generic *gen = &raw->event.generic;
+
+ payload->handles[i++] = gen->hdr.handle;
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
- le16_to_cpu(payload->handles[i]));
+ le16_to_cpu(payload->handles[i - 1]));
if (i == max_handles) {
payload->nr_recs = i;
@@ -959,24 +946,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
- struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
mutex_lock(&mds->event.log_lock);
payload = mds->event.buf;
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
- .payload_in = &log_type,
- .size_in = sizeof(log_type),
- .payload_out = payload,
- .size_out = mds->payload_size,
- .min_out = struct_size(payload, records, 0),
- };
-
do {
int rc, i;
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+ .size_out = mds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
@@ -991,8 +976,8 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
break;
for (i = 0; i < nr_rec; i++)
- cxl_event_trace_record(cxlmd, type,
- &payload->records[i]);
+ __cxl_event_trace_record(cxlmd, type,
+ &payload->records[i]);
if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
trace_cxl_overflow(cxlmd, type, payload);
@@ -1309,7 +1294,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
- struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
@@ -1321,16 +1305,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_POISON,
- .size_in = sizeof(pi),
- .payload_in = &pi,
- .size_out = mds->payload_size,
- .payload_out = po,
- .min_out = struct_size(po, record, 0),
- };
-
do {
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_GET_POISON,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = mds->payload_size,
+ .payload_out = po,
+ .min_out = struct_size(po, record, 0),
+ };
+
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;
@@ -1404,6 +1388,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.reg_map.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
+ mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
+ mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
return mds;
}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 2f43d368b..d4e259f3a 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -114,7 +114,7 @@ static DEVICE_ATTR_RO(serial);
static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev_to_node(dev));
+ return sysfs_emit(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);
@@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static ssize_t pmem_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+ __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_pmem_attributes[] = {
&dev_attr_pmem_size.attr,
+ &dev_attr_pmem_qos_class.attr,
NULL,
};
+static ssize_t ram_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+ __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_ram_attributes[] = {
&dev_attr_ram_size.attr,
+ &dev_attr_ram_qos_class.attr,
NULL,
};
@@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
.is_visible = cxl_memdev_visible,
};
+static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_ram_qos_class.attr)
+ if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_ram_attribute_group = {
.name = "ram",
.attrs = cxl_memdev_ram_attributes,
+ .is_visible = cxl_ram_visible,
};
+static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_pmem_qos_class.attr)
+ if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_pmem_attribute_group = {
.name = "pmem",
.attrs = cxl_memdev_pmem_attributes,
+ .is_visible = cxl_pmem_visible,
};
static umode_t cxl_memdev_security_visible(struct kobject *kobj,
@@ -519,6 +575,13 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
NULL,
};
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
+{
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
.release = cxl_memdev_release,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index deae3289d..e9e6c81ce 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
+#include <linux/units.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -998,3 +999,38 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
+
+static int cxl_flit_size(struct pci_dev *pdev)
+{
+ if (cxl_pci_flit_256(pdev))
+ return 256;
+
+ return 68;
+}
+
+/**
+ * cxl_pci_get_latency - calculate the link latency for the PCIe link
+ * @pdev: PCI device
+ *
+ * return: calculated latency or 0 for no latency
+ *
+ * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
+ * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
+ * LinkProgationLatency is negligible, so 0 will be used
+ * RetimerLatency is assumed to be negligible and 0 will be used
+ * FlitLatency = FlitSize / LinkBandwidth
+ * FlitSize is defined by spec. CXL rev3.0 4.2.1.
+ * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
+ * The FlitLatency is converted to picoseconds.
+ */
+long cxl_pci_get_latency(struct pci_dev *pdev)
+{
+ long bw;
+
+ bw = pcie_link_speed_mbps(pdev);
+ if (bw < 0)
+ return 0;
+ bw /= BITS_PER_BYTE;
+
+ return cxl_flit_size(pdev) * MEGA / bw;
+}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index fc94f5240..e69625a8d 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -64,14 +64,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{
- struct cxl_port *port = find_cxl_root(cxlmd->endpoint);
+ struct cxl_root *cxl_root __free(put_cxl_root) =
+ find_cxl_root(cxlmd->endpoint);
struct device *dev;
- if (!port)
+ if (!cxl_root)
return NULL;
- dev = device_find_child(&port->dev, NULL, match_nvdimm_bridge);
- put_device(&port->dev);
+ dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
if (!dev)
return NULL;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 6b386771c..4ae441ef3 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/node.h>
#include <cxlmem.h>
#include <cxlpci.h>
#include <cxl.h>
@@ -537,7 +538,10 @@ static void cxl_port_release(struct device *dev)
xa_destroy(&port->dports);
xa_destroy(&port->regions);
ida_free(&cxl_port_ida, port->id);
- kfree(port);
+ if (is_cxl_root(port))
+ kfree(to_cxl_root(port));
+ else
+ kfree(port);
}
static ssize_t decoders_committed_show(struct device *dev,
@@ -665,17 +669,31 @@ static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
struct cxl_dport *parent_dport)
{
- struct cxl_port *port;
+ struct cxl_root *cxl_root __free(kfree) = NULL;
+ struct cxl_port *port, *_port __free(kfree) = NULL;
struct device *dev;
int rc;
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port)
- return ERR_PTR(-ENOMEM);
+ /* No parent_dport, root cxl_port */
+ if (!parent_dport) {
+ cxl_root = kzalloc(sizeof(*cxl_root), GFP_KERNEL);
+ if (!cxl_root)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ _port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!_port)
+ return ERR_PTR(-ENOMEM);
+ }
rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
if (rc < 0)
- goto err;
+ return ERR_PTR(rc);
+
+ if (cxl_root)
+ port = &no_free_ptr(cxl_root)->port;
+ else
+ port = no_free_ptr(_port);
+
port->id = rc;
port->uport_dev = uport_dev;
@@ -727,10 +745,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
dev->type = &cxl_port_type;
return port;
-
-err:
- kfree(port);
- return ERR_PTR(rc);
}
static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
@@ -837,6 +851,9 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
if (rc)
return ERR_PTR(rc);
+ if (parent_dport && dev_is_pci(uport_dev))
+ port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev));
+
return port;
err:
@@ -880,6 +897,22 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
+struct cxl_root *devm_cxl_add_root(struct device *host,
+ const struct cxl_root_ops *ops)
+{
+ struct cxl_root *cxl_root;
+ struct cxl_port *port;
+
+ port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
+ if (IS_ERR(port))
+ return (struct cxl_root *)port;
+
+ cxl_root = to_cxl_root(port);
+ cxl_root->ops = ops;
+ return cxl_root;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL);
+
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
{
/* There is no pci_bus associated with a CXL platform-root port */
@@ -935,7 +968,7 @@ static bool dev_is_cxl_root_child(struct device *dev)
return false;
}
-struct cxl_port *find_cxl_root(struct cxl_port *port)
+struct cxl_root *find_cxl_root(struct cxl_port *port)
{
struct cxl_port *iter = port;
@@ -945,10 +978,19 @@ struct cxl_port *find_cxl_root(struct cxl_port *port)
if (!iter)
return NULL;
get_device(&iter->dev);
- return iter;
+ return to_cxl_root(iter);
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
+void put_cxl_root(struct cxl_root *cxl_root)
+{
+ if (!cxl_root)
+ return;
+
+ put_device(&cxl_root->port.dev);
+}
+EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL);
+
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
@@ -1104,6 +1146,9 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (rc)
return ERR_PTR(rc);
+ if (dev_is_pci(dport_dev))
+ dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev));
+
return dport;
}
@@ -2051,6 +2096,104 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
+/**
+ * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
+ * and host bridge
+ *
+ * @port: endpoint cxl_port
+ * @coord: output access coordinates
+ *
+ * Return: errno on failure, 0 on success.
+ */
+int cxl_hb_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord)
+{
+ struct cxl_port *iter = port;
+ struct cxl_dport *dport;
+
+ if (!is_cxl_endpoint(port))
+ return -EINVAL;
+
+ dport = iter->parent_dport;
+ while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
+ iter = to_cxl_port(iter->dev.parent);
+ dport = iter->parent_dport;
+ }
+
+ coord[ACCESS_COORDINATE_LOCAL] =
+ dport->hb_coord[ACCESS_COORDINATE_LOCAL];
+ coord[ACCESS_COORDINATE_CPU] =
+ dport->hb_coord[ACCESS_COORDINATE_CPU];
+
+ return 0;
+}
+
+static bool parent_port_is_cxl_root(struct cxl_port *port)
+{
+ return is_cxl_root(to_cxl_port(port->dev.parent));
+}
+
+/**
+ * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports
+ * of CXL path
+ * @port: endpoint cxl_port
+ * @coord: output performance data
+ *
+ * Return: errno on failure, 0 on success.
+ */
+int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord)
+{
+ struct access_coordinate c = {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ };
+ struct cxl_port *iter = port;
+ struct cxl_dport *dport;
+ struct pci_dev *pdev;
+ unsigned int bw;
+ bool is_cxl_root;
+
+ if (!is_cxl_endpoint(port))
+ return -EINVAL;
+
+ /*
+ * Exit the loop when the parent port of the current iter port is cxl
+ * root. The iterative loop starts at the endpoint and gathers the
+ * latency of the CXL link from the current device/port to the connected
+ * downstream port each iteration.
+ */
+ do {
+ dport = iter->parent_dport;
+ iter = to_cxl_port(iter->dev.parent);
+ is_cxl_root = parent_port_is_cxl_root(iter);
+
+ /*
+ * There's no valid access_coordinate for a root port since RPs do not
+ * have CDAT and therefore needs to be skipped.
+ */
+ if (!is_cxl_root)
+ cxl_coordinates_combine(&c, &c, &dport->sw_coord);
+ c.write_latency += dport->link_latency;
+ c.read_latency += dport->link_latency;
+ } while (!is_cxl_root);
+
+ /* Get the calculated PCI paths bandwidth */
+ pdev = to_pci_dev(port->uport_dev->parent);
+ bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL);
+ if (bw == 0)
+ return -ENXIO;
+ bw /= BITS_PER_BYTE;
+
+ c.write_bandwidth = min(c.write_bandwidth, bw);
+ c.read_bandwidth = min(c.read_bandwidth, bw);
+
+ *coord = c;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
+
/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
{
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 8f0a2507d..4c7fd2d5c 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -552,8 +552,9 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
dev_name(&cxlr->dev));
if (IS_ERR(res)) {
- dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
- PTR_ERR(res));
+ dev_dbg(&cxlr->dev,
+ "HPA allocation error (%ld) for size:%pap in %s %pr\n",
+ PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res);
return PTR_ERR(res);
}
@@ -2113,13 +2114,13 @@ static struct cxl_region *to_cxl_region(struct device *dev)
return container_of(dev, struct cxl_region, dev);
}
-static void unregister_region(void *dev)
+static void unregister_region(void *_cxlr)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region *cxlr = _cxlr;
struct cxl_region_params *p = &cxlr->params;
int i;
- device_del(dev);
+ device_del(&cxlr->dev);
/*
* Now that region sysfs is shutdown, the parameter block is now
@@ -2130,7 +2131,7 @@ static void unregister_region(void *dev)
detach_target(cxlr, i);
cxl_region_iomem_release(cxlr);
- put_device(dev);
+ put_device(&cxlr->dev);
}
static struct lock_class_key cxl_region_key;
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 372786f80..3c42f984e 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
{
+ u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
u64 offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
if (offset > pci_resource_len(pdev, bar)) {
dev_warn(&pdev->dev,
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
- &pdev->resource[bar], &offset, map->reg_type);
+ &pdev->resource[bar], &offset, reg_type);
return false;
}
- map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+ map->reg_type = reg_type;
map->resource = pci_resource_start(pdev, bar) + offset;
map->max_size = pci_resource_len(pdev, bar) - offset;
return true;
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index f01d0709c..e5f13260f 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -181,6 +181,7 @@ TRACE_EVENT(cxl_overflow,
* 1) Add CXL_EVT_TP_entry to TP_STRUCT__entry
* 2) Use CXL_EVT_TP_fast_assign within TP_fast_assign;
* pass the dev, log, and CXL event header
+ * NOTE: The uuid must be assigned by the specific trace event
* 3) Use CXL_EVT_TP_printk() instead of TP_printk()
*
* See the generic_event tracepoint as an example.
@@ -203,7 +204,6 @@ TRACE_EVENT(cxl_overflow,
__assign_str(host, dev_name((cxlmd)->dev.parent)); \
__entry->log = (l); \
__entry->serial = (cxlmd)->cxlds->serial; \
- memcpy(&__entry->hdr_uuid, &(hdr).id, sizeof(uuid_t)); \
__entry->hdr_length = (hdr).length; \
__entry->hdr_flags = get_unaligned_le24((hdr).flags); \
__entry->hdr_handle = le16_to_cpu((hdr).handle); \
@@ -225,9 +225,9 @@ TRACE_EVENT(cxl_overflow,
TRACE_EVENT(cxl_generic_event,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_event_record_raw *rec),
+ const uuid_t *uuid, struct cxl_event_generic *gen_rec),
- TP_ARGS(cxlmd, log, rec),
+ TP_ARGS(cxlmd, log, uuid, gen_rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -235,8 +235,9 @@ TRACE_EVENT(cxl_generic_event,
),
TP_fast_assign(
- CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(__entry->data, &rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
+ CXL_EVT_TP_fast_assign(cxlmd, log, gen_rec->hdr);
+ memcpy(&__entry->hdr_uuid, uuid, sizeof(uuid_t));
+ memcpy(__entry->data, gen_rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
),
CXL_EVT_TP_printk("%s",
@@ -337,6 +338,7 @@ TRACE_EVENT(cxl_general_media,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID;
/* General Media */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -423,6 +425,7 @@ TRACE_EVENT(cxl_dram,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_DRAM_UUID;
/* DRAM */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -570,6 +573,7 @@ TRACE_EVENT(cxl_memory_module,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID;
/* Memory Module Event */
__entry->event_type = rec->event_type;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 62fa96f85..de477eb7f 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/log2.h>
+#include <linux/node.h>
#include <linux/io.h>
/**
@@ -588,6 +589,7 @@ struct cxl_dax_region {
* @depth: How deep this port is relative to the root. depth 0 is the root.
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
+ * @pci_latency: Upstream latency in picoseconds
*/
struct cxl_port {
struct device dev;
@@ -610,6 +612,30 @@ struct cxl_port {
size_t length;
} cdat;
bool cdat_available;
+ long pci_latency;
+};
+
+/**
+ * struct cxl_root - logical collection of root cxl_port items
+ *
+ * @port: cxl_port member
+ * @ops: cxl root operations
+ */
+struct cxl_root {
+ struct cxl_port port;
+ const struct cxl_root_ops *ops;
+};
+
+static inline struct cxl_root *
+to_cxl_root(const struct cxl_port *port)
+{
+ return container_of(port, struct cxl_root, port);
+}
+
+struct cxl_root_ops {
+ int (*qos_class)(struct cxl_root *cxl_root,
+ struct access_coordinate *coord, int entries,
+ int *qos_class);
};
static inline struct cxl_dport *
@@ -632,6 +658,9 @@ struct cxl_rcrb_info {
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
* @regs: Dport parsed register blocks
+ * @sw_coord: access coordinates (performance) for switch from CDAT
+ * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
+ * @link_latency: calculated PCIe downstream latency
*/
struct cxl_dport {
struct device *dport_dev;
@@ -641,6 +670,9 @@ struct cxl_dport {
bool rch;
struct cxl_port *port;
struct cxl_regs regs;
+ struct access_coordinate sw_coord;
+ struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
+ long link_latency;
};
/**
@@ -698,7 +730,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
struct device *uport_dev,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport);
-struct cxl_port *find_cxl_root(struct cxl_port *port);
+struct cxl_root *devm_cxl_add_root(struct device *host,
+ const struct cxl_root_ops *ops);
+struct cxl_root *find_cxl_root(struct cxl_port *port);
+void put_cxl_root(struct cxl_root *cxl_root);
+DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -837,6 +874,20 @@ static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
}
#endif
+void cxl_endpoint_parse_cdat(struct cxl_port *port);
+void cxl_switch_parse_cdat(struct cxl_port *port);
+
+int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord);
+int cxl_hb_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord);
+
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+
+void cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index a2fcbca25..20fb3b35e 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -6,6 +6,8 @@
#include <linux/cdev.h>
#include <linux/uuid.h>
#include <linux/rcuwait.h>
+#include <linux/cxl-event.h>
+#include <linux/node.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -392,6 +394,18 @@ enum cxl_devtype {
};
/**
+ * struct cxl_dpa_perf - DPA performance property entry
+ * @dpa_range - range for DPA address
+ * @coord - QoS performance data (i.e. latency, bandwidth)
+ * @qos_class - QoS Class cookies
+ */
+struct cxl_dpa_perf {
+ struct range dpa_range;
+ struct access_coordinate coord;
+ int qos_class;
+};
+
+/**
* struct cxl_dev_state - The driver device state
*
* cxl_dev_state represents the CXL driver/device state. It provides an
@@ -455,6 +469,8 @@ struct cxl_dev_state {
* @security: security driver state info
* @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -475,6 +491,10 @@ struct cxl_memdev_state {
u64 active_persistent_bytes;
u64 next_volatile_bytes;
u64 next_persistent_bytes;
+
+ struct cxl_dpa_perf ram_perf;
+ struct cxl_dpa_perf pmem_perf;
+
struct cxl_event_state event;
struct cxl_poison_state poison;
struct cxl_security_state security;
@@ -503,6 +523,7 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_FW_INFO = 0x0200,
CXL_MBOX_OP_TRANSFER_FW = 0x0201,
CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
+ CXL_MBOX_OP_GET_TIMESTAMP = 0x0300,
CXL_MBOX_OP_SET_TIMESTAMP = 0x0301,
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
CXL_MBOX_OP_GET_LOG = 0x0401,
@@ -580,25 +601,28 @@ struct cxl_mbox_identify {
} __packed;
/*
- * Common Event Record Format
- * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ * General Media Event Record UUID
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
*/
-struct cxl_event_record_hdr {
- uuid_t id;
- u8 length;
- u8 flags[3];
- __le16 handle;
- __le16 related_handle;
- __le64 timestamp;
- u8 maint_op_class;
- u8 reserved[15];
-} __packed;
+#define CXL_EVENT_GEN_MEDIA_UUID \
+ UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, 0x85, 0xa9, 0x08, 0x8b, 0x16, \
+ 0x21, 0xeb, 0xa6)
-#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
-struct cxl_event_record_raw {
- struct cxl_event_record_hdr hdr;
- u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
-} __packed;
+/*
+ * DRAM Event Record UUID
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CXL_EVENT_DRAM_UUID \
+ UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, \
+ 0x5c, 0x96, 0x24)
+
+/*
+ * Memory Module Event Record UUID
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CXL_EVENT_MEM_MODULE_UUID \
+ UUID_INIT(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86, 0x79, 0xba, 0xb1, \
+ 0x13, 0xb7, 0x74)
/*
* Get Event Records output payload
@@ -641,74 +665,6 @@ struct cxl_mbox_clear_event_payload {
} __packed;
#define CXL_CLEAR_EVENT_MAX_HANDLES U8_MAX
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
-struct cxl_event_gen_media {
- struct cxl_event_record_hdr hdr;
- __le64 phys_addr;
- u8 descriptor;
- u8 type;
- u8 transaction_type;
- u8 validity_flags[2];
- u8 channel;
- u8 rank;
- u8 device[3];
- u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
- u8 reserved[46];
-} __packed;
-
-/*
- * DRAM Event Record - DER
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
- */
-#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
-struct cxl_event_dram {
- struct cxl_event_record_hdr hdr;
- __le64 phys_addr;
- u8 descriptor;
- u8 type;
- u8 transaction_type;
- u8 validity_flags[2];
- u8 channel;
- u8 rank;
- u8 nibble_mask[3];
- u8 bank_group;
- u8 bank;
- u8 row[3];
- u8 column[2];
- u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
- u8 reserved[0x17];
-} __packed;
-
-/*
- * Get Health Info Record
- * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
- */
-struct cxl_get_health_info {
- u8 health_status;
- u8 media_status;
- u8 add_status;
- u8 life_used;
- u8 device_temp[2];
- u8 dirty_shutdown_cnt[4];
- u8 cor_vol_err_cnt[4];
- u8 cor_per_err_cnt[4];
-} __packed;
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-struct cxl_event_mem_module {
- struct cxl_event_record_hdr hdr;
- u8 event_type;
- struct cxl_get_health_info info;
- u8 reserved[0x3d];
-} __packed;
-
struct cxl_mbox_get_partition_info {
__le64 active_volatile_cap;
__le64 active_persistent_cap;
@@ -866,6 +822,10 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
+void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ enum cxl_event_type event_type,
+ const uuid_t *uuid, union cxl_event *evt);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 0fa4799ea..711b05d9a 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -85,6 +85,19 @@ struct cdat_entry_header {
__le16 length;
} __packed;
+/*
+ * CXL v3.0 6.2.3 Table 6-4
+ * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
+ * mode, otherwise it's 68B flits mode.
+ */
+static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
+{
+ u16 lnksta2;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2);
+ return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
+}
+
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index e087febf9..0c79d9ce8 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -217,16 +217,15 @@ static DEVICE_ATTR_WO(trigger_poison_list);
static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
- if (a == &dev_attr_trigger_poison_list.attr) {
- struct device *dev = kobj_to_dev(kobj);
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds =
- to_cxl_memdev_state(cxlmd->cxlds);
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ if (a == &dev_attr_trigger_poison_list.attr)
if (!test_bit(CXL_POISON_ENABLED_LIST,
mds->poison.enabled_cmds))
return 0;
- }
+
return a->mode;
}
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 0155fb66b..2ff361e75 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <asm-generic/unaligned.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
@@ -381,7 +382,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
return rc;
}
-static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
@@ -440,7 +441,7 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
/* background command interrupts are optional */
- if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
+ if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail)
return 0;
msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
@@ -587,7 +588,7 @@ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
}
-static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
+static bool cxl_alloc_irq_vectors(struct pci_dev *pdev)
{
int nvecs;
@@ -604,9 +605,9 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nvecs < 1) {
dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
- return -ENXIO;
+ return false;
}
- return 0;
+ return true;
}
static irqreturn_t cxl_event_thread(int irq, void *id)
@@ -742,7 +743,7 @@ static bool cxl_event_int_is_fw(u8 setting)
}
static int cxl_event_config(struct pci_host_bridge *host_bridge,
- struct cxl_memdev_state *mds)
+ struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_event_interrupt_policy policy;
int rc;
@@ -754,6 +755,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
if (!host_bridge->native_cxl_error)
return 0;
+ if (!irq_avail) {
+ dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n");
+ return 0;
+ }
+
rc = cxl_mem_alloc_event_buf(mds);
if (rc)
return rc;
@@ -788,6 +794,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
int i, rc, pmu_count;
+ bool irq_avail;
/*
* Double check the anonymous union trickery in struct cxl_regs
@@ -845,11 +852,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
- rc = cxl_alloc_irq_vectors(pdev);
- if (rc)
- return rc;
+ irq_avail = cxl_alloc_irq_vectors(pdev);
- rc = cxl_pci_setup_mailbox(mds);
+ rc = cxl_pci_setup_mailbox(mds, irq_avail);
if (rc)
return rc;
@@ -908,7 +913,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- rc = cxl_event_config(host_bridge, mds);
+ rc = cxl_event_config(host_bridge, mds, irq_avail);
if (rc)
return rc;
@@ -969,6 +974,6 @@ static struct pci_driver cxl_pci_driver = {
},
};
-MODULE_LICENSE("GPL v2");
module_pci_driver(cxl_pci_driver);
+MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 47bc8e0b8..97c215666 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -69,6 +69,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
if (rc < 0)
return rc;
+ cxl_switch_parse_cdat(port);
+
cxlhdm = devm_cxl_setup_hdm(port, NULL);
if (!IS_ERR(cxlhdm))
return devm_cxl_enumerate_decoders(cxlhdm, NULL);
@@ -109,6 +111,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
+ cxl_endpoint_parse_cdat(port);
get_device(&cxlmd->dev);
rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
@@ -127,14 +130,15 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
* This can't fail in practice as CXL root exit unregisters all
* descendant ports and that in turn synchronizes with cxl_port_probe()
*/
- root = find_cxl_root(port);
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+
+ root = &cxl_root->port;
/*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
*/
device_for_each_child(&port->dev, root, discover_region);
- put_device(&root->dev);
return 0;
}
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 1659b787b..1ff1ab5fa 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -367,6 +367,7 @@ static ssize_t create_store(struct device *dev, struct device_attribute *attr,
.dax_region = dax_region,
.size = 0,
.id = -1,
+ .memmap_on_memory = false,
};
struct dev_dax *dev_dax = devm_create_dev_dax(&data);
@@ -1400,6 +1401,8 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
dev_dax->align = dax_region->align;
ida_init(&dev_dax->ida);
+ dev_dax->memmap_on_memory = data->memmap_on_memory;
+
inode = dax_inode(dax_dev);
dev->devt = inode->i_rdev;
dev->bus = &dax_bus_type;
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 1ccd23360..cbbf64443 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -23,6 +23,7 @@ struct dev_dax_data {
struct dev_pagemap *pgmap;
resource_size_t size;
int id;
+ bool memmap_on_memory;
};
struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data);
diff --git a/drivers/dax/cxl.c b/drivers/dax/cxl.c
index 8bc9d0403..c696837ab 100644
--- a/drivers/dax/cxl.c
+++ b/drivers/dax/cxl.c
@@ -26,6 +26,7 @@ static int cxl_dax_region_probe(struct device *dev)
.dax_region = dax_region,
.id = -1,
.size = range_len(&cxlr_dax->hpa_range),
+ .memmap_on_memory = true,
};
return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 27cf2daaa..446617b73 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -70,6 +70,7 @@ struct dev_dax {
struct ida ida;
struct device dev;
struct dev_pagemap *pgmap;
+ bool memmap_on_memory;
int nr_range;
struct dev_dax_range {
unsigned long pgoff;
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index 5d2ddef0f..b9da69f92 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -36,6 +36,7 @@ static int dax_hmem_probe(struct platform_device *pdev)
.dax_region = dax_region,
.id = -1,
.size = region_idle ? 0 : range_len(&mri->range),
+ .memmap_on_memory = false,
};
return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 369c698b7..42ee360cf 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/memory-tiers.h>
+#include <linux/memory_hotplug.h>
#include "dax-private.h"
#include "bus.h"
@@ -93,6 +94,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
struct dax_kmem_data *data;
struct memory_dev_type *mtype;
int i, rc, mapped = 0;
+ mhp_t mhp_flags;
int numa_node;
int adist = MEMTIER_DEFAULT_DAX_ADISTANCE;
@@ -179,12 +181,16 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
*/
res->flags = IORESOURCE_SYSTEM_RAM;
+ mhp_flags = MHP_NID_IS_MGID;
+ if (dev_dax->memmap_on_memory)
+ mhp_flags |= MHP_MEMMAP_ON_MEMORY;
+
/*
* Ensure that future kexec'd kernels will not treat
* this as RAM automatically.
*/
rc = add_memory_driver_managed(data->mgid, range.start,
- range_len(&range), kmem_name, MHP_NID_IS_MGID);
+ range_len(&range), kmem_name, mhp_flags);
if (rc) {
dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index ae0cb113a..f3c6c67b8 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -63,6 +63,7 @@ static struct dev_dax *__dax_pmem_probe(struct device *dev)
.id = id,
.pgmap = &pgmap,
.size = range_len(&range),
+ .memmap_on_memory = false,
};
return devm_create_dev_dax(&data);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 0da9232ea..f4b635526 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -326,7 +326,8 @@ void kill_dax(struct dax_device *dax_dev)
return;
if (dax_dev->holder_data != NULL)
- dax_holder_notify_failure(dax_dev, 0, U64_MAX, 0);
+ dax_holder_notify_failure(dax_dev, 0, U64_MAX,
+ MF_MEM_PRE_REMOVE);
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 7162d2bad..98657d3b9 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -1717,7 +1717,7 @@ static ssize_t trans_stat_show(struct device *dev,
max_state = df->max_state;
if (max_state == 0)
- return scnprintf(buf, PAGE_SIZE, "Not Supported.\n");
+ return sysfs_emit(buf, "Not Supported.\n");
mutex_lock(&df->lock);
if (!df->stop_polling &&
@@ -1727,47 +1727,44 @@ static ssize_t trans_stat_show(struct device *dev,
}
mutex_unlock(&df->lock);
- len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
- len += scnprintf(buf + len, PAGE_SIZE - len, " :");
+ len += sysfs_emit_at(buf, len, " From : To\n");
+ len += sysfs_emit_at(buf, len, " :");
for (i = 0; i < max_state; i++) {
if (len >= PAGE_SIZE - 1)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu",
- df->freq_table[i]);
+ len += sysfs_emit_at(buf, len, "%10lu",
+ df->freq_table[i]);
}
+
if (len >= PAGE_SIZE - 1)
return PAGE_SIZE - 1;
-
- len += scnprintf(buf + len, PAGE_SIZE - len, " time(ms)\n");
+ len += sysfs_emit_at(buf, len, " time(ms)\n");
for (i = 0; i < max_state; i++) {
if (len >= PAGE_SIZE - 1)
break;
- if (df->freq_table[i] == df->previous_freq)
- len += scnprintf(buf + len, PAGE_SIZE - len, "*");
+ if (df->freq_table[2] == df->previous_freq)
+ len += sysfs_emit_at(buf, len, "*");
else
- len += scnprintf(buf + len, PAGE_SIZE - len, " ");
+ len += sysfs_emit_at(buf, len, " ");
if (len >= PAGE_SIZE - 1)
break;
-
- len += scnprintf(buf + len, PAGE_SIZE - len, "%10lu:",
- df->freq_table[i]);
+ len += sysfs_emit_at(buf, len, "%10lu:", df->freq_table[i]);
for (j = 0; j < max_state; j++) {
if (len >= PAGE_SIZE - 1)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%10u",
- df->stats.trans_table[(i * max_state) + j]);
+ len += sysfs_emit_at(buf, len, "%10u",
+ df->stats.trans_table[(i * max_state) + j]);
}
if (len >= PAGE_SIZE - 1)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%10llu\n", (u64)
- jiffies64_to_msecs(df->stats.time_in_state[i]));
+ len += sysfs_emit_at(buf, len, "%10llu\n", (u64)
+ jiffies64_to_msecs(df->stats.time_in_state[i]));
}
if (len < PAGE_SIZE - 1)
- len += scnprintf(buf + len, PAGE_SIZE - len, "Total transition : %u\n",
- df->stats.total_trans);
-
+ len += sysfs_emit_at(buf, len, "Total transition : %u\n",
+ df->stats.total_trans);
if (len >= PAGE_SIZE - 1) {
pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
return -EFBIG;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 21916bba7..8fe5aa67b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -46,12 +46,12 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
struct dma_buf *dmabuf;
char name[DMA_BUF_NAME_LEN];
- size_t ret = 0;
+ ssize_t ret = 0;
dmabuf = dentry->d_fsdata;
spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
- ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
+ ret = strscpy(name, dmabuf->name, sizeof(name));
spin_unlock(&dmabuf->name_lock);
return dynamic_dname(buffer, buflen, "/%s:%s",
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 8aa8f8cb7..e0fd99e61 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -934,7 +934,8 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
* the GPU's devfreq to reduce frequency, when in fact the opposite is what is
* needed.
*
- * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline.
+ * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline
+ * (or indirectly via userspace facing ioctls like &sync_set_deadline).
* The deadline hint provides a way for the waiting driver, or userspace, to
* convey an appropriate sense of urgency to the signaling driver.
*
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index ee899f8e6..4a63567e9 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -168,10 +168,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
if (vmf->pgoff > buffer->pagecount)
return VM_FAULT_SIGBUS;
- vmf->page = buffer->pages[vmf->pgoff];
- get_page(vmf->page);
-
- return 0;
+ return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
}
static const struct vm_operations_struct dma_heap_vm_ops = {
@@ -185,6 +182,8 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
vma->vm_ops = &dma_heap_vm_ops;
vma->vm_private_data = buffer;
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index 9c2a0c082..ed4b32388 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -84,11 +84,11 @@ static int sanitycheck(void *arg)
return -ENOMEM;
chain = mock_chain(NULL, f, 1);
- if (!chain)
+ if (chain)
+ dma_fence_enable_sw_signaling(chain);
+ else
err = -ENOMEM;
- dma_fence_enable_sw_signaling(chain);
-
dma_fence_signal(f);
dma_fence_put(f);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index f0a35277f..c35302978 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -52,12 +52,33 @@ struct sw_sync_create_fence_data {
__s32 fence; /* fd of new fence */
};
+/**
+ * struct sw_sync_get_deadline - get the deadline hint of a sw_sync fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad: must be zero
+ * @fence_fd: the sw_sync fence fd (in)
+ *
+ * Return the earliest deadline set on the fence. The timebase for the
+ * deadline is CLOCK_MONOTONIC (same as vblank). If there is no deadline
+ * set on the fence, this ioctl will return -ENOENT.
+ */
+struct sw_sync_get_deadline {
+ __u64 deadline_ns;
+ __u32 pad;
+ __s32 fence_fd;
+};
+
#define SW_SYNC_IOC_MAGIC 'W'
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
struct sw_sync_create_fence_data)
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+#define SW_SYNC_GET_DEADLINE _IOWR(SW_SYNC_IOC_MAGIC, 2, \
+ struct sw_sync_get_deadline)
+
+
+#define SW_SYNC_HAS_DEADLINE_BIT DMA_FENCE_FLAG_USER_BITS
static const struct dma_fence_ops timeline_fence_ops;
@@ -171,6 +192,22 @@ static void timeline_fence_timeline_value_str(struct dma_fence *fence,
snprintf(str, size, "%d", parent->value);
}
+static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
+{
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+ if (ktime_before(deadline, pt->deadline))
+ pt->deadline = deadline;
+ } else {
+ pt->deadline = deadline;
+ __set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+}
+
static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
@@ -179,6 +216,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
.timeline_value_str = timeline_fence_timeline_value_str,
+ .set_deadline = timeline_fence_set_deadline,
};
/**
@@ -387,6 +425,47 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
return 0;
}
+static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long arg)
+{
+ struct sw_sync_get_deadline data;
+ struct dma_fence *fence;
+ unsigned long flags;
+ struct sync_pt *pt;
+ int ret = 0;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+
+ if (data.deadline_ns || data.pad)
+ return -EINVAL;
+
+ fence = sync_file_get_fence(data.fence_fd);
+ if (!fence)
+ return -EINVAL;
+
+ pt = dma_fence_to_sync_pt(fence);
+ if (!pt)
+ return -EINVAL;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+ data.deadline_ns = ktime_to_ns(pt->deadline);
+ } else {
+ ret = -ENOENT;
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ dma_fence_put(fence);
+
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long sw_sync_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -399,6 +478,9 @@ static long sw_sync_ioctl(struct file *file, unsigned int cmd,
case SW_SYNC_IOC_INC:
return sw_sync_ioctl_inc(obj, arg);
+ case SW_SYNC_GET_DEADLINE:
+ return sw_sync_ioctl_get_deadline(obj, arg);
+
default:
return -ENOTTY;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index 6176e52ba..a1bdd62ef 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -55,11 +55,13 @@ static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
* @base: base fence object
* @link: link on the sync timeline's list
* @node: node in the sync timeline's tree
+ * @deadline: the earliest fence deadline hint
*/
struct sync_pt {
struct dma_fence base;
struct list_head link;
struct rb_node node;
+ ktime_t deadline;
};
extern const struct file_operations sw_sync_debugfs_fops;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 2e9a316c5..d9b1c1b2a 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -347,6 +347,22 @@ out:
return ret;
}
+static int sync_file_ioctl_set_deadline(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ struct sync_set_deadline ts;
+
+ if (copy_from_user(&ts, (void __user *)arg, sizeof(ts)))
+ return -EFAULT;
+
+ if (ts.pad)
+ return -EINVAL;
+
+ dma_fence_set_deadline(sync_file->fence, ns_to_ktime(ts.deadline_ns));
+
+ return 0;
+}
+
static long sync_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -359,6 +375,9 @@ static long sync_file_ioctl(struct file *file, unsigned int cmd,
case SYNC_IOC_FILE_INFO:
return sync_file_ioctl_fence_info(sync_file, arg);
+ case SYNC_IOC_SET_DEADLINE:
+ return sync_file_ioctl_set_deadline(sync_file, arg);
+
default:
return -ENOTTY;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index de6eb370d..002a5ec80 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -378,6 +378,20 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config LS2X_APB_DMA
+ tristate "Loongson LS2X APB DMA support"
+ depends on LOONGARCH || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the Loongson LS2X APB DMA controller driver. The
+ DMA controller is having single DMA channel which can be
+ configured for different peripherals like audio, nand, sdio
+ etc which is in APB bus.
+
+ This DMA controller transfers data from memory to peripheral fifo.
+ It does not support memory to memory data transfer.
+
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
depends on M5441x || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 83553a97a..dfd40d14e 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fb89ecbf0..40052d1bd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -222,8 +222,14 @@ struct atdma_sg {
* @vd: pointer to the virtual dma descriptor.
* @atchan: pointer to the atmel dma channel.
* @total_len: total transaction byte count
- * @sg_len: number of sg entries.
+ * @sglen: number of sg entries.
* @sg: array of sgs.
+ * @boundary: number of transfers to perform before the automatic address increment operation
+ * @dst_hole: value to add to the destination address when the boundary has been reached
+ * @src_hole: value to add to the source address when the boundary has been reached
+ * @memset_buffer: buffer used for the memset operation
+ * @memset_paddr: physical address of the buffer used for the memset operation
+ * @memset_vaddr: virtual address of the buffer used for the memset operation
*/
struct at_desc {
struct virt_dma_desc vd;
@@ -245,7 +251,10 @@ struct at_desc {
/*-- Channels --------------------------------------------------------*/
/**
- * atc_status - information bits stored in channel status flag
+ * enum atc_status - information bits stored in channel status flag
+ *
+ * @ATC_IS_PAUSED: If channel is pauses
+ * @ATC_IS_CYCLIC: If channel is cyclic
*
* Manipulated with atomic operations.
*/
@@ -282,7 +291,6 @@ struct at_dma_chan {
u32 save_cfg;
u32 save_dscr;
struct dma_slave_config dma_sconfig;
- bool cyclic;
struct at_desc *desc;
};
@@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @dma_device: dmaengine dma_device object members
- * @atdma_devtype: identifier of DMA controller compatibility
- * @ch_regs: memory mapped register base
+ * @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @lli_pool: hw lli table
+ * @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
*/
struct at_dma {
@@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
/**
* atc_get_llis_residue - Get residue for a hardware linked list transfer
+ * @atchan: pointer to an atmel hdmac channel.
+ * @desc: pointer to the descriptor for which the residue is calculated.
+ * @residue: residue to be set to dma_tx_state.
*
* Calculate the residue by removing the length of the Linked List Item (LLI)
* already transferred from the total length. To get the current LLI we can use
@@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
- * @atchan: pointer to an atmel hdmac channel.
- * @desc: pointer to the descriptor for which the residue is calculated.
- * @residue: residue to be set to dma_tx_state.
- * Returns 0 on success, -errno otherwise.
+ *
+ * Returns: %0 on success, -errno otherwise.
*/
static int atc_get_llis_residue(struct at_dma_chan *atchan,
struct at_desc *desc, u32 *residue)
@@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
* @chan: DMA channel
* @cookie: transaction identifier to check status of
* @residue: residue to be updated.
- * Return 0 on success, -errono otherwise.
+ *
+ * Return: %0 on success, -errno otherwise.
*/
static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
u32 *residue)
@@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan)
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
*
- * return - the number of allocated descriptors
+ * Return: the number of allocated descriptors
*/
static int atc_alloc_chan_resources(struct dma_chan *chan)
{
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 2457a420c..4e339c04f 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -81,9 +81,13 @@
#define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
+#define AXI_DMAC_REG_CURRENT_SG_ID 0x454
+#define AXI_DMAC_REG_SG_ADDRESS 0x47c
+#define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc
#define AXI_DMAC_CTRL_ENABLE BIT(0)
#define AXI_DMAC_CTRL_PAUSE BIT(1)
+#define AXI_DMAC_CTRL_ENABLE_SG BIT(2)
#define AXI_DMAC_IRQ_SOT BIT(0)
#define AXI_DMAC_IRQ_EOT BIT(1)
@@ -97,20 +101,35 @@
/* The maximum ID allocated by the hardware is 31 */
#define AXI_DMAC_SG_UNUSED 32U
+/* Flags for axi_dmac_hw_desc.flags */
+#define AXI_DMAC_HW_FLAG_LAST BIT(0)
+#define AXI_DMAC_HW_FLAG_IRQ BIT(1)
+
+struct axi_dmac_hw_desc {
+ u32 flags;
+ u32 id;
+ u64 dest_addr;
+ u64 src_addr;
+ u64 next_sg_addr;
+ u32 y_len;
+ u32 x_len;
+ u32 src_stride;
+ u32 dst_stride;
+ u64 __pad[2];
+};
+
struct axi_dmac_sg {
- dma_addr_t src_addr;
- dma_addr_t dest_addr;
- unsigned int x_len;
- unsigned int y_len;
- unsigned int dest_stride;
- unsigned int src_stride;
- unsigned int id;
unsigned int partial_len;
bool schedule_when_free;
+
+ struct axi_dmac_hw_desc *hw;
+ dma_addr_t hw_phys;
};
struct axi_dmac_desc {
struct virt_dma_desc vdesc;
+ struct axi_dmac_chan *chan;
+
bool cyclic;
bool have_partial_xfer;
@@ -139,6 +158,7 @@ struct axi_dmac_chan {
bool hw_partial_xfer;
bool hw_cyclic;
bool hw_2d;
+ bool hw_sg;
};
struct axi_dmac {
@@ -213,9 +233,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
unsigned int flags = 0;
unsigned int val;
- val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
- if (val) /* Queue is full, wait for the next SOT IRQ */
- return;
+ if (!chan->hw_sg) {
+ val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
+ if (val) /* Queue is full, wait for the next SOT IRQ */
+ return;
+ }
desc = chan->next_desc;
@@ -229,14 +251,15 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
sg = &desc->sg[desc->num_submitted];
/* Already queued in cyclic mode. Wait for it to finish */
- if (sg->id != AXI_DMAC_SG_UNUSED) {
+ if (sg->hw->id != AXI_DMAC_SG_UNUSED) {
sg->schedule_when_free = true;
return;
}
- desc->num_submitted++;
- if (desc->num_submitted == desc->num_sgs ||
- desc->have_partial_xfer) {
+ if (chan->hw_sg) {
+ chan->next_desc = NULL;
+ } else if (++desc->num_submitted == desc->num_sgs ||
+ desc->have_partial_xfer) {
if (desc->cyclic)
desc->num_submitted = 0; /* Start again */
else
@@ -246,32 +269,42 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
chan->next_desc = desc;
}
- sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
+ sg->hw->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
- if (axi_dmac_dest_is_mem(chan)) {
- axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
- axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
- }
+ if (!chan->hw_sg) {
+ if (axi_dmac_dest_is_mem(chan)) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->hw->dest_addr);
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->hw->dst_stride);
+ }
- if (axi_dmac_src_is_mem(chan)) {
- axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
- axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
+ if (axi_dmac_src_is_mem(chan)) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->hw->src_addr);
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->hw->src_stride);
+ }
}
/*
* If the hardware supports cyclic transfers and there is no callback to
- * call and only a single segment, enable hw cyclic mode to avoid
- * unnecessary interrupts.
+ * call, enable hw cyclic mode to avoid unnecessary interrupts.
*/
- if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
- desc->num_sgs == 1)
- flags |= AXI_DMAC_FLAG_CYCLIC;
+ if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) {
+ if (chan->hw_sg)
+ desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ;
+ else if (desc->num_sgs == 1)
+ flags |= AXI_DMAC_FLAG_CYCLIC;
+ }
if (chan->hw_partial_xfer)
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
- axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
- axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
+ if (chan->hw_sg) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, (u32)sg->hw_phys);
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS_HIGH,
+ (u64)sg->hw_phys >> 32);
+ } else {
+ axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->hw->x_len);
+ axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->hw->y_len);
+ }
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
}
@@ -286,9 +319,9 @@ static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
struct axi_dmac_sg *sg)
{
if (chan->hw_2d)
- return sg->x_len * sg->y_len;
+ return (sg->hw->x_len + 1) * (sg->hw->y_len + 1);
else
- return sg->x_len;
+ return (sg->hw->x_len + 1);
}
static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
@@ -307,9 +340,9 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
for (i = 0; i < desc->num_sgs; i++) {
sg = &desc->sg[i];
- if (sg->id == AXI_DMAC_SG_UNUSED)
+ if (sg->hw->id == AXI_DMAC_SG_UNUSED)
continue;
- if (sg->id == id) {
+ if (sg->hw->id == id) {
desc->have_partial_xfer = true;
sg->partial_len = len;
found_sg = true;
@@ -348,6 +381,9 @@ static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
rslt->result = DMA_TRANS_NOERROR;
rslt->residue = 0;
+ if (chan->hw_sg)
+ return;
+
/*
* We get here if the last completed segment is partial, which
* means we can compute the residue from that segment onwards
@@ -374,36 +410,47 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
(completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
axi_dmac_dequeue_partial_xfers(chan);
- do {
- sg = &active->sg[active->num_completed];
- if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
- break;
- if (!(BIT(sg->id) & completed_transfers))
- break;
- active->num_completed++;
- sg->id = AXI_DMAC_SG_UNUSED;
- if (sg->schedule_when_free) {
- sg->schedule_when_free = false;
- start_next = true;
+ if (chan->hw_sg) {
+ if (active->cyclic) {
+ vchan_cyclic_callback(&active->vdesc);
+ } else {
+ list_del(&active->vdesc.node);
+ vchan_cookie_complete(&active->vdesc);
+ active = axi_dmac_active_desc(chan);
+ start_next = !!active;
}
+ } else {
+ do {
+ sg = &active->sg[active->num_completed];
+ if (sg->hw->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
+ break;
+ if (!(BIT(sg->hw->id) & completed_transfers))
+ break;
+ active->num_completed++;
+ sg->hw->id = AXI_DMAC_SG_UNUSED;
+ if (sg->schedule_when_free) {
+ sg->schedule_when_free = false;
+ start_next = true;
+ }
- if (sg->partial_len)
- axi_dmac_compute_residue(chan, active);
+ if (sg->partial_len)
+ axi_dmac_compute_residue(chan, active);
- if (active->cyclic)
- vchan_cyclic_callback(&active->vdesc);
+ if (active->cyclic)
+ vchan_cyclic_callback(&active->vdesc);
- if (active->num_completed == active->num_sgs ||
- sg->partial_len) {
- if (active->cyclic) {
- active->num_completed = 0; /* wrap around */
- } else {
- list_del(&active->vdesc.node);
- vchan_cookie_complete(&active->vdesc);
- active = axi_dmac_active_desc(chan);
+ if (active->num_completed == active->num_sgs ||
+ sg->partial_len) {
+ if (active->cyclic) {
+ active->num_completed = 0; /* wrap around */
+ } else {
+ list_del(&active->vdesc.node);
+ vchan_cookie_complete(&active->vdesc);
+ active = axi_dmac_active_desc(chan);
+ }
}
- }
- } while (active);
+ } while (active);
+ }
return start_next;
}
@@ -467,8 +514,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac *dmac = chan_to_axi_dmac(chan);
unsigned long flags;
+ u32 ctrl = AXI_DMAC_CTRL_ENABLE;
- axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
+ if (chan->hw_sg)
+ ctrl |= AXI_DMAC_CTRL_ENABLE_SG;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan))
@@ -476,22 +527,58 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
-static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
+static struct axi_dmac_desc *
+axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
{
+ struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+ struct device *dev = dmac->dma_dev.dev;
+ struct axi_dmac_hw_desc *hws;
struct axi_dmac_desc *desc;
+ dma_addr_t hw_phys;
unsigned int i;
desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
desc->num_sgs = num_sgs;
+ desc->chan = chan;
+
+ hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)),
+ &hw_phys, GFP_ATOMIC);
+ if (!hws) {
+ kfree(desc);
+ return NULL;
+ }
- for (i = 0; i < num_sgs; i++)
- desc->sg[i].id = AXI_DMAC_SG_UNUSED;
+ for (i = 0; i < num_sgs; i++) {
+ desc->sg[i].hw = &hws[i];
+ desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws);
+
+ hws[i].id = AXI_DMAC_SG_UNUSED;
+ hws[i].flags = 0;
+
+ /* Link hardware descriptors */
+ hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws);
+ }
+
+ /* The last hardware descriptor will trigger an interrupt */
+ desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ;
return desc;
}
+static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
+{
+ struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
+ struct device *dev = dmac->dma_dev.dev;
+ struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
+ dma_addr_t hw_phys = desc->sg[0].hw_phys;
+
+ dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
+ hw, hw_phys);
+ kfree(desc);
+}
+
static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
enum dma_transfer_direction direction, dma_addr_t addr,
unsigned int num_periods, unsigned int period_len,
@@ -508,26 +595,24 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
for (i = 0; i < num_periods; i++) {
- len = period_len;
-
- while (len > segment_size) {
+ for (len = period_len; len > segment_size; sg++) {
if (direction == DMA_DEV_TO_MEM)
- sg->dest_addr = addr;
+ sg->hw->dest_addr = addr;
else
- sg->src_addr = addr;
- sg->x_len = segment_size;
- sg->y_len = 1;
- sg++;
+ sg->hw->src_addr = addr;
+ sg->hw->x_len = segment_size - 1;
+ sg->hw->y_len = 0;
+ sg->hw->flags = 0;
addr += segment_size;
len -= segment_size;
}
if (direction == DMA_DEV_TO_MEM)
- sg->dest_addr = addr;
+ sg->hw->dest_addr = addr;
else
- sg->src_addr = addr;
- sg->x_len = len;
- sg->y_len = 1;
+ sg->hw->src_addr = addr;
+ sg->hw->x_len = len - 1;
+ sg->hw->y_len = 0;
sg++;
addr += len;
}
@@ -554,7 +639,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i)
num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
- desc = axi_dmac_alloc_desc(num_sgs);
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
@@ -563,7 +648,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) {
- kfree(desc);
+ axi_dmac_free_desc(desc);
return NULL;
}
@@ -583,7 +668,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
- unsigned int num_periods, num_segments;
+ unsigned int num_periods, num_segments, num_sgs;
if (direction != chan->direction)
return NULL;
@@ -597,11 +682,16 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
num_periods = buf_len / period_len;
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
+ num_sgs = num_periods * num_segments;
- desc = axi_dmac_alloc_desc(num_periods * num_segments);
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
+ /* Chain the last descriptor to the first, and remove its "last" flag */
+ desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys;
+ desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST;
+
axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
period_len, desc->sg);
@@ -653,26 +743,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
return NULL;
}
- desc = axi_dmac_alloc_desc(1);
+ desc = axi_dmac_alloc_desc(chan, 1);
if (!desc)
return NULL;
if (axi_dmac_src_is_mem(chan)) {
- desc->sg[0].src_addr = xt->src_start;
- desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
+ desc->sg[0].hw->src_addr = xt->src_start;
+ desc->sg[0].hw->src_stride = xt->sgl[0].size + src_icg;
}
if (axi_dmac_dest_is_mem(chan)) {
- desc->sg[0].dest_addr = xt->dst_start;
- desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
+ desc->sg[0].hw->dest_addr = xt->dst_start;
+ desc->sg[0].hw->dst_stride = xt->sgl[0].size + dst_icg;
}
if (chan->hw_2d) {
- desc->sg[0].x_len = xt->sgl[0].size;
- desc->sg[0].y_len = xt->numf;
+ desc->sg[0].hw->x_len = xt->sgl[0].size - 1;
+ desc->sg[0].hw->y_len = xt->numf - 1;
} else {
- desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
- desc->sg[0].y_len = 1;
+ desc->sg[0].hw->x_len = xt->sgl[0].size * xt->numf - 1;
+ desc->sg[0].hw->y_len = 0;
}
if (flags & DMA_CYCLIC)
@@ -688,7 +778,7 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
{
- kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
+ axi_dmac_free_desc(to_axi_dmac_desc(vdesc));
}
static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
@@ -714,6 +804,9 @@ static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
case AXI_DMAC_REG_CURRENT_DEST_ADDR:
case AXI_DMAC_REG_PARTIAL_XFER_LEN:
case AXI_DMAC_REG_PARTIAL_XFER_ID:
+ case AXI_DMAC_REG_CURRENT_SG_ID:
+ case AXI_DMAC_REG_SG_ADDRESS:
+ case AXI_DMAC_REG_SG_ADDRESS_HIGH:
return true;
default:
return false;
@@ -866,6 +959,10 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
chan->hw_cyclic = true;
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, 0xffffffff);
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_SG_ADDRESS))
+ chan->hw_sg = true;
+
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
chan->hw_2d = true;
@@ -911,6 +1008,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
struct axi_dmac *dmac;
struct regmap *regmap;
unsigned int version;
+ u32 irq_mask = 0;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -966,6 +1064,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
dma_dev->directions = BIT(dmac->chan.direction);
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */
INIT_LIST_HEAD(&dma_dev->channels);
dmac->chan.vchan.desc_free = axi_dmac_desc_free;
@@ -977,7 +1076,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
- axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+ if (dmac->chan.hw_sg)
+ irq_mask |= AXI_DMAC_IRQ_SOT;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, irq_mask);
if (of_dma_is_coherent(pdev->dev.of_node)) {
ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ffe621695..a4f608837 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -21,6 +21,10 @@
#include <linux/slab.h>
#include <linux/wait.h>
+static bool nobounce;
+module_param(nobounce, bool, 0644);
+MODULE_PARM_DESC(nobounce, "Prevent using swiotlb buffer (default: use swiotlb buffer)");
+
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
@@ -90,6 +94,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
* struct dmatest_params - test parameters.
+ * @nobounce: prevent using swiotlb buffer
* @buf_size: size of the memcpy test buffer
* @channel: bus ID of the channel to test
* @device: bus ID of the DMA Engine to test
@@ -106,6 +111,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
* @polled: use polling for completion instead of interrupts
*/
struct dmatest_params {
+ bool nobounce;
unsigned int buf_size;
char channel[20];
char device[32];
@@ -215,6 +221,7 @@ struct dmatest_done {
struct dmatest_data {
u8 **raw;
u8 **aligned;
+ gfp_t gfp_flags;
unsigned int cnt;
unsigned int off;
};
@@ -533,7 +540,7 @@ static int dmatest_alloc_test_data(struct dmatest_data *d,
goto err;
for (i = 0; i < d->cnt; i++) {
- d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
+ d->raw[i] = kmalloc(buf_size + align, d->gfp_flags);
if (!d->raw[i])
goto err;
@@ -655,6 +662,13 @@ static int dmatest_func(void *data)
goto err_free_coefs;
}
+ src->gfp_flags = GFP_KERNEL;
+ dst->gfp_flags = GFP_KERNEL;
+ if (params->nobounce) {
+ src->gfp_flags = GFP_DMA;
+ dst->gfp_flags = GFP_DMA;
+ }
+
if (dmatest_alloc_test_data(src, buf_size, align) < 0)
goto err_free_coefs;
@@ -1093,6 +1107,7 @@ static void add_threaded_test(struct dmatest_info *info)
struct dmatest_params *params = &info->params;
/* Copy test parameters */
+ params->nobounce = nobounce;
params->buf_size = test_buf_size;
strscpy(params->channel, strim(test_channel), sizeof(params->channel));
strscpy(params->device, strim(test_device), sizeof(params->device));
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 7a2427931..5005e138f 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -161,6 +161,10 @@ struct fsl_qdma_format {
u8 __reserved1[2];
u8 cfg8b_w1;
} __packed;
+ struct {
+ __le32 __reserved2;
+ __le32 cmd;
+ } __packed;
__le64 data;
};
} __packed;
@@ -355,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len)
{
- u32 cmd;
struct fsl_qdma_format *sdf, *ddf;
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
@@ -384,15 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
/* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET) |
- FSL_QDMA_CMD_PF;
- sdf->data = QDMA_SDDF_CMD(cmd);
-
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
- ddf->data = QDMA_SDDF_CMD(cmd);
+ sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF);
+
+ ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
}
/*
@@ -570,10 +569,9 @@ static struct fsl_qdma_queue
status_size,
&status_head->bus_addr,
GFP_KERNEL);
- if (!status_head->cq) {
- devm_kfree(&pdev->dev, status_head);
+ if (!status_head->cq)
return NULL;
- }
+
status_head->n_cq = status_size;
status_head->virt_head = status_head->cq;
status_head->virt_tail = status_head->cq;
@@ -627,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
- void *block,
+ __iomem void *block,
int id)
{
bool duplicate;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 78a938969..1398814d8 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -171,6 +171,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index c5e679070..2b4a0d406 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
idxd_bus-y := bus.o
obj-$(CONFIG_INTEL_IDXD) += idxd.o
-idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o defaults.o
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c
index 6f8462105..0c9e689a2 100644
--- a/drivers/dma/idxd/bus.c
+++ b/drivers/dma/idxd/bus.c
@@ -67,11 +67,17 @@ static void idxd_config_bus_remove(struct device *dev)
idxd_drv->remove(idxd_dev);
}
+static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0);
+}
+
struct bus_type dsa_bus_type = {
.name = "dsa",
.match = idxd_config_bus_match,
.probe = idxd_config_bus_probe,
.remove = idxd_config_bus_remove,
+ .uevent = idxd_bus_uevent,
};
EXPORT_SYMBOL_GPL(dsa_bus_type);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 3dd25a9a0..59456f217 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -165,7 +165,7 @@ static void idxd_cdev_dev_release(struct device *dev)
struct idxd_wq *wq = idxd_cdev->wq;
cdev_ctx = &ictx[wq->idxd->data->type];
- ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
kfree(idxd_cdev);
}
@@ -342,7 +342,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
if (!evl)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
h = status.head;
@@ -354,9 +354,8 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
- spin_unlock(&evl->lock);
-
drain_workqueue(wq->wq);
+ mutex_unlock(&evl->lock);
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -463,7 +462,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
cdev = &idxd_cdev->cdev;
dev = cdev_dev(idxd_cdev);
cdev_ctx = &ictx[wq->idxd->data->type];
- minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
if (minor < 0) {
kfree(idxd_cdev);
return minor;
@@ -550,7 +549,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
}
wq->type = IDXD_WQT_USER;
- rc = drv_enable_wq(wq);
+ rc = idxd_drv_enable_wq(wq);
if (rc < 0)
goto err;
@@ -565,7 +564,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_cdev:
- drv_disable_wq(wq);
+ idxd_drv_disable_wq(wq);
err:
destroy_workqueue(wq->wq);
wq->type = IDXD_WQT_NONE;
@@ -580,7 +579,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
idxd_wq_del_cdev(wq);
- drv_disable_wq(wq);
+ idxd_drv_disable_wq(wq);
wq->type = IDXD_WQT_NONE;
destroy_workqueue(wq->wq);
wq->wq = NULL;
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index f3f25ee67..ad4245cb3 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -66,7 +66,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
if (!evl || !evl->log)
return 0;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
@@ -87,7 +87,7 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
dump_event_entry(idxd, s, i, &count, processed);
}
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
}
diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c
new file mode 100644
index 000000000..c607ae8dd
--- /dev/null
+++ b/drivers/dma/idxd/defaults.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Intel Corporation. All rights rsvd. */
+#include <linux/kernel.h>
+#include "idxd.h"
+
+int idxd_load_iaa_device_defaults(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct idxd_group *group;
+ struct idxd_wq *wq;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return 0;
+
+ wq = idxd->wqs[0];
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ /* set mode to "dedicated" */
+ set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ wq->threshold = 0;
+
+ /* only setting up 1 wq, so give it all the wq space */
+ wq->size = idxd->max_wq_size;
+
+ /* set priority to 10 */
+ wq->priority = 10;
+
+ /* set type to "kernel" */
+ wq->type = IDXD_WQT_KERNEL;
+
+ /* set wq group to 0 */
+ group = idxd->groups[0];
+ wq->group = group;
+ group->num_wqs++;
+
+ /* set name to "iaa_crypto" */
+ memset(wq->name, 0, WQ_NAME_SIZE + 1);
+ strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1);
+
+ /* set driver_name to "crypto" */
+ memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
+ strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1);
+
+ engine = idxd->engines[0];
+
+ /* set engine group to 0 */
+ engine->group = idxd->groups[0];
+ engine->group->num_engines++;
+
+ return 0;
+}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index fa0f880be..c41ef195e 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -161,6 +161,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
free_hw_descs(wq);
return rc;
}
+EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD);
void idxd_wq_free_resources(struct idxd_wq *wq)
{
@@ -174,6 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
sbitmap_queue_free(&wq->sbq);
}
+EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD);
int idxd_wq_enable(struct idxd_wq *wq)
{
@@ -405,6 +407,7 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
reinit_completion(&wq->wq_resurrect);
return 0;
}
+EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD);
void __idxd_wq_quiesce(struct idxd_wq *wq)
{
@@ -414,6 +417,7 @@ void __idxd_wq_quiesce(struct idxd_wq *wq)
complete_all(&wq->wq_resurrect);
wait_for_completion(&wq->wq_dead);
}
+EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD);
void idxd_wq_quiesce(struct idxd_wq *wq)
{
@@ -421,6 +425,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq)
__idxd_wq_quiesce(wq);
mutex_unlock(&wq->wq_lock);
}
+EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD);
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
@@ -770,7 +775,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
evl->log_size = size;
@@ -791,7 +796,7 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
gencfg.evl_en = 1;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
return 0;
err_alloc:
@@ -814,7 +819,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
if (!gencfg.evl_en)
return;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
gencfg.evl_en = 0;
iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
@@ -831,7 +836,7 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
@@ -1273,7 +1278,7 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
tx = &desc->txd;
tx->callback = NULL;
tx->callback_result = NULL;
- idxd_dma_complete_txd(desc, ctype, true);
+ idxd_dma_complete_txd(desc, ctype, true, NULL, NULL);
}
}
@@ -1357,7 +1362,7 @@ err_irq:
return rc;
}
-int drv_enable_wq(struct idxd_wq *wq)
+int idxd_drv_enable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -1489,8 +1494,9 @@ err_map_portal:
err:
return rc;
}
+EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, IDXD);
-void drv_disable_wq(struct idxd_wq *wq)
+void idxd_drv_disable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
@@ -1510,6 +1516,7 @@ void drv_disable_wq(struct idxd_wq *wq)
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
}
+EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, IDXD);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
{
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 47a01893c..cd835eabd 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -22,7 +22,7 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type,
- bool free_desc)
+ bool free_desc, void *ctx, u32 *status)
{
struct idxd_device *idxd = desc->wq->idxd;
struct dma_async_tx_descriptor *tx;
@@ -314,7 +314,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
wq->type = IDXD_WQT_KERNEL;
- rc = drv_enable_wq(wq);
+ rc = idxd_drv_enable_wq(wq);
if (rc < 0) {
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
rc = -ENXIO;
@@ -333,7 +333,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_dma:
- drv_disable_wq(wq);
+ idxd_drv_disable_wq(wq);
err:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
@@ -347,7 +347,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
__idxd_wq_quiesce(wq);
idxd_unregister_dma_channel(wq);
- drv_disable_wq(wq);
+ idxd_drv_disable_wq(wq);
mutex_unlock(&wq->wq_lock);
}
@@ -359,6 +359,7 @@ static enum idxd_dev_type dev_types[] = {
struct idxd_device_driver idxd_dmaengine_drv = {
.probe = idxd_dmaengine_drv_probe,
.remove = idxd_dmaengine_drv_remove,
+ .desc_complete = idxd_dma_complete_txd,
.name = "dmaengine",
.type = dev_types,
};
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 96062ae39..df91472f0 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -13,6 +13,7 @@
#include <linux/bitmap.h>
#include <linux/perf_event.h>
#include <linux/iommu.h>
+#include <linux/crypto.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
@@ -57,11 +58,23 @@ enum idxd_type {
#define IDXD_ENQCMDS_RETRIES 32
#define IDXD_ENQCMDS_MAX_RETRIES 64
+enum idxd_complete_type {
+ IDXD_COMPLETE_NORMAL = 0,
+ IDXD_COMPLETE_ABORT,
+ IDXD_COMPLETE_DEV_FAIL,
+};
+
+struct idxd_desc;
+
struct idxd_device_driver {
const char *name;
enum idxd_dev_type *type;
int (*probe)(struct idxd_dev *idxd_dev);
void (*remove)(struct idxd_dev *idxd_dev);
+ void (*desc_complete)(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type,
+ bool free_desc,
+ void *ctx, u32 *status);
struct device_driver drv;
};
@@ -174,12 +187,6 @@ enum idxd_op_type {
IDXD_OP_NONBLOCK = 1,
};
-enum idxd_complete_type {
- IDXD_COMPLETE_NORMAL = 0,
- IDXD_COMPLETE_ABORT,
- IDXD_COMPLETE_DEV_FAIL,
-};
-
struct idxd_dma_chan {
struct dma_chan chan;
struct idxd_wq *wq;
@@ -270,6 +277,8 @@ struct idxd_dma_dev {
struct dma_device dma;
};
+typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd);
+
struct idxd_driver_data {
const char *name_prefix;
enum idxd_type type;
@@ -279,11 +288,12 @@ struct idxd_driver_data {
int evl_cr_off;
int cr_status_off;
int cr_result_off;
+ load_device_defaults_fn_t load_device_defaults;
};
struct idxd_evl {
/* Lock to protect event log access. */
- spinlock_t lock;
+ struct mutex lock;
void *log;
dma_addr_t dma;
/* Total size of event log = number of entries * entry size. */
@@ -377,6 +387,14 @@ static inline unsigned int evl_size(struct idxd_device *idxd)
return idxd->evl->size * evl_ent_size(idxd);
}
+struct crypto_ctx {
+ struct acomp_req *req;
+ struct crypto_tfm *tfm;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ bool compress;
+};
+
/* IDXD software descriptor */
struct idxd_desc {
union {
@@ -389,7 +407,10 @@ struct idxd_desc {
struct iax_completion_record *iax_completion;
};
dma_addr_t compl_dma;
- struct dma_async_tx_descriptor txd;
+ union {
+ struct dma_async_tx_descriptor txd;
+ struct crypto_ctx crypto;
+ };
struct llist_node llnode;
struct list_head list;
int id;
@@ -416,6 +437,15 @@ enum idxd_completion_status {
#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
#define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
+static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq)
+{
+ struct device *dev = wq_confdev(wq);
+ struct idxd_device_driver *idxd_drv =
+ container_of(dev->driver, struct idxd_device_driver, drv);
+
+ return idxd_drv;
+}
+
static inline struct idxd_device *confdev_to_idxd(struct device *dev)
{
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
@@ -617,6 +647,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
+static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private)
+{
+ dev_set_drvdata(wq_confdev(wq), private);
+}
+
+static inline void *idxd_wq_get_private(struct idxd_wq *wq)
+{
+ return dev_get_drvdata(wq_confdev(wq));
+}
+
/*
* Intel IAA does not support batch processing.
* The max batch size of device, max batch size of wq and
@@ -654,6 +694,9 @@ static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *d
return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0);
}
+#define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*")
+#define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d"
+
int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
struct module *module, const char *mod_name);
#define idxd_driver_register(driver) \
@@ -664,6 +707,24 @@ void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
#define module_idxd_driver(__idxd_driver) \
module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+void idxd_dma_complete_txd(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type,
+ bool free_desc, void *ctx, u32 *status);
+
+static inline void idxd_desc_complete(struct idxd_desc *desc,
+ enum idxd_complete_type comp_type,
+ bool free_desc)
+{
+ struct idxd_device_driver *drv;
+ u32 status;
+
+ drv = wq_to_idxd_drv(desc->wq);
+ if (drv->desc_complete)
+ drv->desc_complete(desc, comp_type, free_desc,
+ &desc->txd, &status);
+}
+
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
@@ -671,6 +732,7 @@ void idxd_unregister_devices(struct idxd_device *idxd);
void idxd_wqs_quiesce(struct idxd_device *idxd);
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
+int idxd_load_iaa_device_defaults(struct idxd_device *idxd);
/* device interrupt control */
irqreturn_t idxd_misc_thread(int vec, void *data);
@@ -681,8 +743,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
-int drv_enable_wq(struct idxd_wq *wq);
-void drv_disable_wq(struct idxd_wq *wq);
+int idxd_drv_enable_wq(struct idxd_wq *wq);
+void idxd_drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
@@ -717,14 +779,11 @@ int idxd_wq_request_irq(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
-void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
-void idxd_dma_complete_txd(struct idxd_desc *desc,
- enum idxd_complete_type comp_type, bool free_desc);
/* cdev */
int idxd_cdev_register(void);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index d09a8553e..264c4e47d 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -59,6 +59,7 @@ static struct idxd_driver_data idxd_driver_data[] = {
.evl_cr_off = offsetof(struct iax_evl_entry, cr),
.cr_status_off = offsetof(struct iax_completion_record, status),
.cr_result_off = offsetof(struct iax_completion_record, error_code),
+ .load_device_defaults = idxd_load_iaa_device_defaults,
},
};
@@ -353,7 +354,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
if (!evl)
return -ENOMEM;
- spin_lock_init(&evl->lock);
+ mutex_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
idxd_name = dev_name(idxd_confdev(idxd));
@@ -754,6 +755,12 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ if (data->load_device_defaults) {
+ rc = data->load_device_defaults(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD loading device defaults failed\n");
+ }
+
rc = idxd_register_devices(idxd);
if (rc) {
dev_err(dev, "IDXD sysfs setup failed\n");
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 3bdfc1797..8dc029c86 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -123,7 +123,7 @@ static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie)
list_for_each_entry_safe(d, t, &flist, list) {
list_del(&d->list);
- idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true);
+ idxd_desc_complete(d, IDXD_COMPLETE_ABORT, true);
}
}
@@ -363,7 +363,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.bits = 0;
evl_status.int_pending = 1;
- spin_lock(&evl->lock);
+ mutex_lock(&evl->lock);
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
@@ -380,7 +380,7 @@ static void process_evl_entries(struct idxd_device *idxd)
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
- spin_unlock(&evl->lock);
+ mutex_unlock(&evl->lock);
}
irqreturn_t idxd_misc_thread(int vec, void *data)
@@ -533,7 +533,7 @@ static void idxd_int_handle_resubmit_work(struct work_struct *work)
*/
if (rc != -EAGAIN) {
desc->completion->status = IDXD_COMP_DESC_ABORT;
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false);
+ idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, false);
}
idxd_free_desc(wq, desc);
}
@@ -574,11 +574,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+ idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
+ idxd_desc_complete(desc, IDXD_COMPLETE_NORMAL, true);
} else {
spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
@@ -617,11 +617,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+ idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
+ idxd_desc_complete(desc, IDXD_COMPLETE_NORMAL, true);
}
}
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index fdda6d604..5e94247e1 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -528,14 +528,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 3f922518e..817a56441 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -61,6 +61,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
return __get_desc(wq, idx, cpu);
}
+EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, IDXD);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
@@ -69,6 +70,7 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
desc->cpu = -1;
sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
}
+EXPORT_SYMBOL_NS_GPL(idxd_free_desc, IDXD);
static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
struct idxd_desc *desc)
@@ -125,7 +127,8 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
spin_unlock(&ie->list_lock);
if (found)
- idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false,
+ NULL, NULL);
/*
* completing the descriptor will return desc to allocator and
@@ -135,7 +138,8 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
- idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true,
+ NULL, NULL);
}
}
@@ -215,3 +219,4 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
percpu_ref_put(&wq->wq_active);
return 0;
}
+EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, IDXD);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f81ecf586..9b42f5e96 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -421,9 +421,7 @@ struct sdma_desc {
* @shp_addr: value for gReg[6]
* @per_addr: value for gReg[2]
* @status: status of dma channel
- * @context_loaded: ensure context is only loaded once
* @data: specific sdma interface structure
- * @bd_pool: dma_pool for bd
* @terminate_worker: used to call back into terminate work function
* @terminated: terminated list
* @is_ram_script: flag for script in ram
@@ -486,8 +484,6 @@ struct sdma_channel {
* @num_script_addrs: Number of script addresses in this image
* @ram_code_start: offset of SDMA ram image in this firmware image
* @ram_code_size: size of SDMA ram image
- * @script_addrs: Stores the start address of the SDMA scripts
- * (in SDMA memory space)
*/
struct sdma_firmware_header {
u32 magic;
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
new file mode 100644
index 000000000..a49913f3e
--- /dev/null
+++ b/drivers/dma/ls2x-apb-dma.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for the Loongson LS2X APB DMA Controller
+ *
+ * Copyright (C) 2017-2023 Loongson Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* Global Configuration Register */
+#define LDMA_ORDER_ERG 0x0
+
+/* Bitfield definitions */
+
+/* Bitfields in Global Configuration Register */
+#define LDMA_64BIT_EN BIT(0) /* 1: 64 bit support */
+#define LDMA_UNCOHERENT_EN BIT(1) /* 0: cache, 1: uncache */
+#define LDMA_ASK_VALID BIT(2)
+#define LDMA_START BIT(3) /* DMA start operation */
+#define LDMA_STOP BIT(4) /* DMA stop operation */
+#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
+
+/* Bitfields in ndesc_addr field of HW decriptor */
+#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
+#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
+
+/* Bitfields in cmd field of HW decriptor */
+#define LDMA_INT BIT(1) /* Enable DMA interrupts */
+#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
+
+#define LDMA_SLAVE_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define LDMA_MAX_TRANS_LEN U32_MAX
+
+/*-- descriptors -----------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_hw_desc - DMA HW descriptor
+ * @ndesc_addr: the next descriptor low address.
+ * @mem_addr: memory low address.
+ * @apb_addr: device buffer address.
+ * @len: length of a piece of carried content, in words.
+ * @step_len: length between two moved memory data blocks.
+ * @step_times: number of blocks to be carried in a single DMA operation.
+ * @cmd: descriptor command or state.
+ * @stats: DMA status.
+ * @high_ndesc_addr: the next descriptor high address.
+ * @high_mem_addr: memory high address.
+ * @reserved: reserved
+ */
+struct ls2x_dma_hw_desc {
+ u32 ndesc_addr;
+ u32 mem_addr;
+ u32 apb_addr;
+ u32 len;
+ u32 step_len;
+ u32 step_times;
+ u32 cmd;
+ u32 stats;
+ u32 high_ndesc_addr;
+ u32 high_mem_addr;
+ u32 reserved[2];
+} __packed;
+
+/*
+ * struct ls2x_dma_sg - ls2x dma scatter gather entry
+ * @hw: the pointer to DMA HW descriptor.
+ * @llp: physical address of the DMA HW descriptor.
+ * @phys: destination or source address(mem).
+ * @len: number of Bytes to read.
+ */
+struct ls2x_dma_sg {
+ struct ls2x_dma_hw_desc *hw;
+ dma_addr_t llp;
+ dma_addr_t phys;
+ u32 len;
+};
+
+/*
+ * struct ls2x_dma_desc - software descriptor
+ * @vdesc: pointer to the virtual dma descriptor.
+ * @cyclic: flag to dma cyclic
+ * @burst_size: burst size of transaction, in words.
+ * @desc_num: number of sg entries.
+ * @direction: transfer direction, to or from device.
+ * @status: dma controller status.
+ * @sg: array of sgs.
+ */
+struct ls2x_dma_desc {
+ struct virt_dma_desc vdesc;
+ bool cyclic;
+ size_t burst_size;
+ u32 desc_num;
+ enum dma_transfer_direction direction;
+ enum dma_status status;
+ struct ls2x_dma_sg sg[] __counted_by(desc_num);
+};
+
+/*-- Channels --------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
+ * @vchan: virtual dma channel entry.
+ * @desc: pointer to the ls2x sw dma descriptor.
+ * @pool: hw desc table
+ * @irq: irq line
+ * @sconfig: configuration for slave transfers, passed via .device_config
+ */
+struct ls2x_dma_chan {
+ struct virt_dma_chan vchan;
+ struct ls2x_dma_desc *desc;
+ void *pool;
+ int irq;
+ struct dma_slave_config sconfig;
+};
+
+/*-- Controller ------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_priv - LS2X APB DMAC specific information
+ * @ddev: dmaengine dma_device object members
+ * @dma_clk: DMAC clock source
+ * @regs: memory mapped register base
+ * @lchan: channel to store ls2x_dma_chan structures
+ */
+struct ls2x_dma_priv {
+ struct dma_device ddev;
+ struct clk *dma_clk;
+ void __iomem *regs;
+ struct ls2x_dma_chan lchan;
+};
+
+/*-- Helper functions ------------------------------------------------*/
+
+static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct ls2x_dma_desc, vdesc);
+}
+
+static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ls2x_dma_chan, vchan.chan);
+}
+
+static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
+{
+ return container_of(ddev, struct ls2x_dma_priv, ddev);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
+ struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
+ int i;
+
+ for (i = 0; i < desc->desc_num; i++) {
+ if (desc->sg[i].hw)
+ dma_pool_free(lchan->pool, desc->sg[i].hw,
+ desc->sg[i].llp);
+ }
+
+ kfree(desc);
+}
+
+static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
+{
+ struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+ u64 val;
+
+ val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
+ val |= LDMA_64BIT_EN | cmd;
+ lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
+{
+ struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+ struct ls2x_dma_sg *ldma_sg;
+ struct virt_dma_desc *vdesc;
+ u64 val;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&lchan->vchan);
+ if (!vdesc) {
+ lchan->desc = NULL;
+ return;
+ }
+
+ list_del(&vdesc->node);
+ lchan->desc = to_ldma_desc(vdesc);
+ ldma_sg = &lchan->desc->sg[0];
+
+ /* Start DMA */
+ lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
+ val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
+ lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
+{
+ u32 maxburst, buswidth;
+
+ /* Reject definitely invalid configurations */
+ if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
+ (lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
+ return 0;
+
+ if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
+ maxburst = lchan->sconfig.dst_maxburst;
+ buswidth = lchan->sconfig.dst_addr_width;
+ } else {
+ maxburst = lchan->sconfig.src_maxburst;
+ buswidth = lchan->sconfig.src_addr_width;
+ }
+
+ /* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
+ return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
+}
+
+static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
+ struct ls2x_dma_desc *desc)
+{
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
+ u32 num_segments, segment_size;
+
+ if (desc->direction == DMA_MEM_TO_DEV) {
+ ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
+ ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
+ } else {
+ ldma_sg->hw->cmd = LDMA_INT;
+ ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
+ }
+
+ ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
+ ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
+
+ /* Split into multiple equally sized segments if necessary */
+ num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
+ segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
+
+ /* Word count register takes input in words */
+ ldma_sg->hw->len = segment_size;
+ ldma_sg->hw->step_times = num_segments;
+ ldma_sg->hw->step_len = 0;
+
+ /* lets make a link list */
+ if (sg_index) {
+ desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
+ desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
+ }
+}
+
+/*-- DMA Engine API --------------------------------------------------*/
+
+/*
+ * ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
+ * @chan: allocate descriptor resources for this channel
+ *
+ * return - the number of allocated descriptors
+ */
+static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
+ chan->device->dev, PAGE_SIZE,
+ __alignof__(struct ls2x_dma_hw_desc), 0);
+ if (!lchan->pool) {
+ dev_err(chan2dev(chan), "No memory for descriptors\n");
+ return -ENOMEM;
+ }
+
+ return 1;
+}
+
+/*
+ * ls2x_dma_free_chan_resources - free all channel resources
+ * @chan: DMA channel
+ */
+static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ vchan_free_chan_resources(to_virt_chan(chan));
+ dma_pool_destroy(lchan->pool);
+ lchan->pool = NULL;
+}
+
+/*
+ * ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: tx descriptor status flags
+ * @context: transaction context (ignored)
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ u32 sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ struct ls2x_dma_desc *desc;
+ struct scatterlist *sg;
+ size_t burst_size;
+ int i;
+
+ if (unlikely(!sg_len || !is_slave_direction(direction)))
+ return NULL;
+
+ burst_size = ls2x_dmac_detect_burst(lchan);
+ if (!burst_size)
+ return NULL;
+
+ desc = kzalloc(struct_size(desc, sg, sg_len), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc_num = sg_len;
+ desc->direction = direction;
+ desc->burst_size = burst_size;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+ if (!ldma_sg->hw) {
+ desc->desc_num = i;
+ ls2x_dma_desc_free(&desc->vdesc);
+ return NULL;
+ }
+
+ ldma_sg->phys = sg_dma_address(sg);
+ ldma_sg->len = sg_dma_len(sg);
+
+ ls2x_dma_fill_desc(lchan, i, desc);
+ }
+
+ /* Setting the last descriptor enable bit */
+ desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ * @flags: tx descriptor status flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ struct ls2x_dma_desc *desc;
+ size_t burst_size;
+ u32 num_periods;
+ int i;
+
+ if (unlikely(!buf_len || !period_len))
+ return NULL;
+
+ if (unlikely(!is_slave_direction(direction)))
+ return NULL;
+
+ burst_size = ls2x_dmac_detect_burst(lchan);
+ if (!burst_size)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+ desc = kzalloc(struct_size(desc, sg, num_periods), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc_num = num_periods;
+ desc->direction = direction;
+ desc->burst_size = burst_size;
+
+ /* Build cyclic linked list */
+ for (i = 0; i < num_periods; i++) {
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+ if (!ldma_sg->hw) {
+ desc->desc_num = i;
+ ls2x_dma_desc_free(&desc->vdesc);
+ return NULL;
+ }
+
+ ldma_sg->phys = buf_addr + period_len * i;
+ ldma_sg->len = period_len;
+
+ ls2x_dma_fill_desc(lchan, i, desc);
+ }
+
+ /* Lets make a cyclic list */
+ desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
+ desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
+ desc->cyclic = true;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ */
+static int ls2x_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ memcpy(&lchan->sconfig, config, sizeof(*config));
+ return 0;
+}
+
+/*
+ * ls2x_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ls2x_dma_issue_pending(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
+ ls2x_dma_start_transfer(lchan);
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+}
+
+/*
+ * ls2x_dma_terminate_all - terminate all transactions
+ * @chan: channel
+ *
+ * Stops all DMA transactions.
+ */
+static int ls2x_dma_terminate_all(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ /* Setting stop cmd */
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ if (lchan->desc) {
+ vchan_terminate_vdesc(&lchan->desc->vdesc);
+ lchan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&lchan->vchan, &head);
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&lchan->vchan, &head);
+ return 0;
+}
+
+/*
+ * ls2x_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ */
+static void ls2x_dma_synchronize(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ vchan_synchronize(&lchan->vchan);
+}
+
+static int ls2x_dma_pause(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ lchan->desc->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int ls2x_dma_resume(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
+ lchan->desc->status = DMA_IN_PROGRESS;
+ ls2x_dma_write_cmd(lchan, LDMA_START);
+ }
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ return 0;
+}
+
+/*
+ * ls2x_dma_isr - LS2X DMA Interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to ls2x_dma_chan
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
+{
+ struct ls2x_dma_chan *lchan = dev_id;
+ struct ls2x_dma_desc *desc;
+
+ spin_lock(&lchan->vchan.lock);
+ desc = lchan->desc;
+ if (desc) {
+ if (desc->cyclic) {
+ vchan_cyclic_callback(&desc->vdesc);
+ } else {
+ desc->status = DMA_COMPLETE;
+ vchan_cookie_complete(&desc->vdesc);
+ ls2x_dma_start_transfer(lchan);
+ }
+
+ /* ls2x_dma_start_transfer() updates lchan->desc */
+ if (!lchan->desc)
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ }
+ spin_unlock(&lchan->vchan.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ls2x_dma_chan_init(struct platform_device *pdev,
+ struct ls2x_dma_priv *priv)
+{
+ struct ls2x_dma_chan *lchan = &priv->lchan;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ lchan->irq = platform_get_irq(pdev, 0);
+ if (lchan->irq < 0)
+ return lchan->irq;
+
+ ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), lchan);
+ if (ret)
+ return ret;
+
+ /* Initialize channels related values */
+ INIT_LIST_HEAD(&priv->ddev.channels);
+ lchan->vchan.desc_free = ls2x_dma_desc_free;
+ vchan_init(&lchan->vchan, &priv->ddev);
+
+ return 0;
+}
+
+/*
+ * ls2x_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ls2x_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ls2x_dma_priv *priv;
+ struct dma_device *ddev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return dev_err_probe(dev, PTR_ERR(priv->regs),
+ "devm_platform_ioremap_resource failed.\n");
+
+ priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->dma_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
+
+ ret = clk_prepare_enable(priv->dma_clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
+
+ ret = ls2x_dma_chan_init(pdev, priv);
+ if (ret)
+ goto disable_clk;
+
+ ddev = &priv->ddev;
+ ddev->dev = dev;
+ dma_cap_zero(ddev->cap_mask);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+
+ ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = ls2x_dma_issue_pending;
+ ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
+ ddev->device_config = ls2x_dma_slave_config;
+ ddev->device_terminate_all = ls2x_dma_terminate_all;
+ ddev->device_synchronize = ls2x_dma_synchronize;
+ ddev->device_pause = ls2x_dma_pause;
+ ddev->device_resume = ls2x_dma_resume;
+
+ ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+ ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ ret = dma_async_device_register(&priv->ddev);
+ if (ret < 0)
+ goto disable_clk;
+
+ ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
+ if (ret < 0)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(&priv->ddev);
+disable_clk:
+ clk_disable_unprepare(priv->dma_clk);
+
+ return ret;
+}
+
+/*
+ * ls2x_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static void ls2x_dma_remove(struct platform_device *pdev)
+{
+ struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&priv->ddev);
+ clk_disable_unprepare(priv->dma_clk);
+}
+
+static const struct of_device_id ls2x_dma_of_match_table[] = {
+ { .compatible = "loongson,ls2k1000-apbdma" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
+
+static struct platform_driver ls2x_dmac_driver = {
+ .probe = ls2x_dma_probe,
+ .remove_new = ls2x_dma_remove,
+ .driver = {
+ .name = "ls2x-apbdma",
+ .of_match_table = ls2x_dma_of_match_table,
+ },
+};
+module_platform_driver(ls2x_dmac_driver);
+
+MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
index 1b0a95892..7b41c6709 100644
--- a/drivers/dma/milbeaut-hdmac.c
+++ b/drivers/dma/milbeaut-hdmac.c
@@ -531,7 +531,7 @@ disable_clk:
return ret;
}
-static int milbeaut_hdmac_remove(struct platform_device *pdev)
+static void milbeaut_hdmac_remove(struct platform_device *pdev)
{
struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -546,16 +546,21 @@ static int milbeaut_hdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
milbeaut_hdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->ddev);
clk_disable_unprepare(mdev->clk);
-
- return 0;
}
static const struct of_device_id milbeaut_hdmac_match[] = {
@@ -566,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
static struct platform_driver milbeaut_hdmac_driver = {
.probe = milbeaut_hdmac_probe,
- .remove = milbeaut_hdmac_remove,
+ .remove_new = milbeaut_hdmac_remove,
.driver = {
.name = "milbeaut-m10v-hdmac",
.of_match_table = milbeaut_hdmac_match,
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index d29d01e73..2cce529b4 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -368,7 +368,7 @@ disable_xdmac:
return ret;
}
-static int milbeaut_xdmac_remove(struct platform_device *pdev)
+static void milbeaut_xdmac_remove(struct platform_device *pdev)
{
struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -383,8 +383,15 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
milbeaut_xdmac_free_chan_resources(chan);
}
@@ -392,8 +399,6 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&mdev->ddev);
disable_xdmac(mdev);
-
- return 0;
}
static const struct of_device_id milbeaut_xdmac_match[] = {
@@ -404,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
static struct platform_driver milbeaut_xdmac_driver = {
.probe = milbeaut_xdmac_probe,
- .remove = milbeaut_xdmac_remove,
+ .remove_new = milbeaut_xdmac_remove,
.driver = {
.name = "milbeaut-m10v-xdmac",
.of_match_table = milbeaut_xdmac_match,
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 4e76c4ec2..e001f4f7a 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -250,7 +250,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -274,7 +274,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 3125a2f16..428473611 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -20,10 +20,13 @@
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/slab.h>
#include "sf-pdma.h"
+#define PDMA_QUIRK_NO_STRICT_ORDERING BIT(0)
+
#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
{
@@ -65,7 +68,7 @@ static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
u64 dst, u64 src, u64 size)
{
- desc->xfer_type = PDMA_FULL_SPEED;
+ desc->xfer_type = desc->chan->pdma->transfer_type;
desc->xfer_size = size;
desc->dst_addr = dst;
desc->src_addr = src;
@@ -492,6 +495,7 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
+ const struct sf_pdma_driver_platdata *ddata;
struct sf_pdma *pdma;
int ret, n_chans;
const enum dma_slave_buswidth widths =
@@ -517,6 +521,14 @@ static int sf_pdma_probe(struct platform_device *pdev)
pdma->n_chans = n_chans;
+ pdma->transfer_type = PDMA_FULL_SPEED | PDMA_STRICT_ORDERING;
+
+ ddata = device_get_match_data(&pdev->dev);
+ if (ddata) {
+ if (ddata->quirks & PDMA_QUIRK_NO_STRICT_ORDERING)
+ pdma->transfer_type &= ~PDMA_STRICT_ORDERING;
+ }
+
pdma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdma->membase))
return PTR_ERR(pdma->membase);
@@ -563,7 +575,20 @@ static int sf_pdma_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id, pdma);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Can't register SiFive Platform OF_DMA. (%d)\n", ret);
+ goto err_unregister;
+ }
+
return 0;
+
+err_unregister:
+ dma_async_device_unregister(&pdma->dma_dev);
+
+ return ret;
}
static void sf_pdma_remove(struct platform_device *pdev)
@@ -583,12 +608,25 @@ static void sf_pdma_remove(struct platform_device *pdev)
tasklet_kill(&ch->err_tasklet);
}
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
dma_async_device_unregister(&pdma->dma_dev);
}
+static const struct sf_pdma_driver_platdata mpfs_pdma = {
+ .quirks = PDMA_QUIRK_NO_STRICT_ORDERING,
+};
+
static const struct of_device_id sf_pdma_dt_ids[] = {
- { .compatible = "sifive,fu540-c000-pdma" },
- { .compatible = "sifive,pdma0" },
+ {
+ .compatible = "sifive,fu540-c000-pdma",
+ }, {
+ .compatible = "sifive,pdma0",
+ }, {
+ .compatible = "microchip,mpfs-pdma",
+ .data = &mpfs_pdma,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
index d05772b5d..215e07183 100644
--- a/drivers/dma/sf-pdma/sf-pdma.h
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -48,7 +48,8 @@
#define PDMA_ERR_STATUS_MASK GENMASK(31, 31)
/* Transfer Type */
-#define PDMA_FULL_SPEED 0xFF000008
+#define PDMA_FULL_SPEED 0xFF000000
+#define PDMA_STRICT_ORDERING BIT(3)
/* Error Recovery */
#define MAX_RETRY 1
@@ -112,8 +113,13 @@ struct sf_pdma {
struct dma_device dma_dev;
void __iomem *membase;
void __iomem *mappedbase;
+ u32 transfer_type;
u32 n_chans;
struct sf_pdma_chan chans[] __counted_by(n_chans);
};
+struct sf_pdma_driver_platdata {
+ u32 quirks;
+};
+
#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index fea5bda34..1f1e86ba5 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -755,11 +755,11 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
static int rz_dmac_chan_probe(struct rz_dmac *dmac,
struct rz_dmac_chan *channel,
- unsigned int index)
+ u8 index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
struct rz_lmdesc *lmdesc;
- char pdev_irqname[5];
+ char pdev_irqname[6];
char *irqname;
int ret;
@@ -767,7 +767,7 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
channel->mid_rid = -EINVAL;
/* Request the channel interrupt. */
- sprintf(pdev_irqname, "ch%u", index);
+ scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (channel->irq < 0)
return channel->irq;
@@ -845,9 +845,9 @@ static int rz_dmac_probe(struct platform_device *pdev)
struct dma_device *engine;
struct rz_dmac *dmac;
int channel_num;
- unsigned int i;
int ret;
int irq;
+ u8 i;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index a9b4302f6..f7cd0cad0 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -706,10 +706,10 @@ static const struct dev_pm_ops usb_dmac_pm = {
static int usb_dmac_chan_probe(struct usb_dmac *dmac,
struct usb_dmac_chan *uchan,
- unsigned int index)
+ u8 index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
- char pdev_irqname[5];
+ char pdev_irqname[6];
char *irqname;
int ret;
@@ -717,7 +717,7 @@ static int usb_dmac_chan_probe(struct usb_dmac *dmac,
uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
/* Request the channel interrupt. */
- sprintf(pdev_irqname, "ch%u", index);
+ scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (uchan->irq < 0)
return -ENODEV;
@@ -768,8 +768,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
struct dma_device *engine;
struct usb_dmac *dmac;
- unsigned int i;
int ret;
+ u8 i;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
@@ -869,7 +869,7 @@ static void usb_dmac_chan_remove(struct usb_dmac *dmac,
static void usb_dmac_remove(struct platform_device *pdev)
{
struct usb_dmac *dmac = platform_get_drvdata(pdev);
- int i;
+ u8 i;
for (i = 0; i < dmac->n_channels; ++i)
usb_dmac_chan_remove(dmac, &dmac->channels[i]);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 002833fb1..2c4892991 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -31,13 +31,11 @@
/**
* struct stedma40_platform_data - Configuration struct for the dma device.
*
- * @dev_tx: mapping between destination event line and io address
- * @dev_rx: mapping between source event line and io address
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
* @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
* which avoids HW bug that exists in some versions of the controller.
- * SoftLLI introduces relink overhead that could impact performace for
+ * SoftLLI introduces relink overhead that could impact performance for
* certain use cases.
* @num_of_soft_lli_chans: The number of channels that needs to be configured
* to use SoftLLI.
@@ -184,7 +182,7 @@ static __maybe_unused u32 d40_backup_regs[] = {
/*
* since 9540 and 8540 has the same HW revision
- * use v4a for 9540 or ealier
+ * use v4a for 9540 or earlier
* use v4b for 8540 or later
* HW revision:
* DB8500ed has revision 0
@@ -411,7 +409,7 @@ struct d40_desc {
*
* @base: The virtual address of LCLA. 18 bit aligned.
* @dma_addr: DMA address, if mapped
- * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
+ * @base_unaligned: The original kmalloc pointer, if kmalloc is used.
* This pointer is only there for clean-up on error.
* @pages: The number of pages needed for all physical channels.
* Only used later for clean-up on error
@@ -1655,7 +1653,7 @@ static void dma_tasklet(struct tasklet_struct *t)
return;
check_pending_tx:
- /* Rescue manouver if receiving double interrupts */
+ /* Rescue maneuver if receiving double interrupts */
if (d40c->pending_tx > 0)
d40c->pending_tx--;
spin_unlock_irqrestore(&d40c->lock, flags);
@@ -3412,7 +3410,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
base->lcla_pool.base = (void *)page_list[i];
} else {
/*
- * After many attempts and no succees with finding the correct
+ * After many attempts and no success with finding the correct
* alignment, try with allocating a big buffer.
*/
dev_warn(base->dev,
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index fa4d4142a..3642508e8 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -746,6 +746,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
@@ -1348,8 +1351,8 @@ static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
static int tegra_dma_probe(struct platform_device *pdev)
{
const struct tegra_dma_chip_data *cdata = NULL;
- struct iommu_fwspec *iommu_spec;
- unsigned int stream_id, i;
+ unsigned int i;
+ u32 stream_id;
struct tegra_dma *tdma;
int ret;
@@ -1378,12 +1381,10 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma->dma_dev.dev = &pdev->dev;
- iommu_spec = dev_iommu_fwspec_get(&pdev->dev);
- if (!iommu_spec) {
+ if (!tegra_dev_iommu_get_stream_id(&pdev->dev, &stream_id)) {
dev_err(&pdev->dev, "Missing iommu stream-id\n");
return -EINVAL;
}
- stream_id = iommu_spec->ids[0] & 0xffff;
ret = device_property_read_u32(&pdev->dev, "dma-channel-mask",
&tdma->chan_mask);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 7a0586633..24ad7077c 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -153,6 +153,7 @@ struct tegra_adma {
void __iomem *base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
+ unsigned long *dma_chan_mask;
unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved;
@@ -741,6 +742,10 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!tdc->tdma)
+ continue;
+
ch_reg = &tdc->ch_regs;
ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
/* skip if channel is not active */
@@ -779,6 +784,9 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!tdc->tdma)
+ continue;
ch_reg = &tdc->ch_regs;
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
@@ -867,10 +875,31 @@ static int tegra_adma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->ahub_clk);
}
+ tdma->dma_chan_mask = devm_kzalloc(&pdev->dev,
+ BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!tdma->dma_chan_mask)
+ return -ENOMEM;
+
+ /* Enable all channels by default */
+ bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels);
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask",
+ (u32 *)tdma->dma_chan_mask,
+ BITS_TO_U32(tdma->nr_channels));
+ if (ret < 0 && (ret != -EINVAL)) {
+ dev_err(&pdev->dev, "dma-channel-mask is not complete.\n");
+ return ret;
+ }
+
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!test_bit(i, tdma->dma_chan_mask))
+ continue;
+
tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
+ (cdata->ch_reg_size * i);
@@ -957,8 +986,10 @@ static void tegra_adma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
- for (i = 0; i < tdma->nr_channels; ++i)
- irq_dispose_mapping(tdma->channels[i].irq);
+ for (i = 0; i < tdma->nr_channels; ++i) {
+ if (tdma->channels[i].irq)
+ irq_dispose_mapping(tdma->channels[i].irq);
+ }
pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index acc950bf6..d376c117c 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -12,6 +12,7 @@ k3-psil-lib-objs := k3-psil.o \
k3-psil-j721s2.o \
k3-psil-am62.o \
k3-psil-am62a.o \
- k3-psil-j784s4.o
+ k3-psil-j784s4.o \
+ k3-psil-am62p.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/k3-psil-am62p.c b/drivers/dma/ti/k3-psil-am62p.c
new file mode 100644
index 000000000..0f338e16d
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am62p.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62p_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0x4600, 19, 19, 16),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+ /* CSIRX 1-3 (only for J722S) */
+ PSIL_CSI2RX(0x5100),
+ PSIL_CSI2RX(0x5101),
+ PSIL_CSI2RX(0x5102),
+ PSIL_CSI2RX(0x5103),
+ PSIL_CSI2RX(0x5104),
+ PSIL_CSI2RX(0x5105),
+ PSIL_CSI2RX(0x5106),
+ PSIL_CSI2RX(0x5107),
+ PSIL_CSI2RX(0x5108),
+ PSIL_CSI2RX(0x5109),
+ PSIL_CSI2RX(0x510a),
+ PSIL_CSI2RX(0x510b),
+ PSIL_CSI2RX(0x510c),
+ PSIL_CSI2RX(0x510d),
+ PSIL_CSI2RX(0x510e),
+ PSIL_CSI2RX(0x510f),
+ PSIL_CSI2RX(0x5110),
+ PSIL_CSI2RX(0x5111),
+ PSIL_CSI2RX(0x5112),
+ PSIL_CSI2RX(0x5113),
+ PSIL_CSI2RX(0x5114),
+ PSIL_CSI2RX(0x5115),
+ PSIL_CSI2RX(0x5116),
+ PSIL_CSI2RX(0x5117),
+ PSIL_CSI2RX(0x5118),
+ PSIL_CSI2RX(0x5119),
+ PSIL_CSI2RX(0x511a),
+ PSIL_CSI2RX(0x511b),
+ PSIL_CSI2RX(0x511c),
+ PSIL_CSI2RX(0x511d),
+ PSIL_CSI2RX(0x511e),
+ PSIL_CSI2RX(0x511f),
+ PSIL_CSI2RX(0x5200),
+ PSIL_CSI2RX(0x5201),
+ PSIL_CSI2RX(0x5202),
+ PSIL_CSI2RX(0x5203),
+ PSIL_CSI2RX(0x5204),
+ PSIL_CSI2RX(0x5205),
+ PSIL_CSI2RX(0x5206),
+ PSIL_CSI2RX(0x5207),
+ PSIL_CSI2RX(0x5208),
+ PSIL_CSI2RX(0x5209),
+ PSIL_CSI2RX(0x520a),
+ PSIL_CSI2RX(0x520b),
+ PSIL_CSI2RX(0x520c),
+ PSIL_CSI2RX(0x520d),
+ PSIL_CSI2RX(0x520e),
+ PSIL_CSI2RX(0x520f),
+ PSIL_CSI2RX(0x5210),
+ PSIL_CSI2RX(0x5211),
+ PSIL_CSI2RX(0x5212),
+ PSIL_CSI2RX(0x5213),
+ PSIL_CSI2RX(0x5214),
+ PSIL_CSI2RX(0x5215),
+ PSIL_CSI2RX(0x5216),
+ PSIL_CSI2RX(0x5217),
+ PSIL_CSI2RX(0x5218),
+ PSIL_CSI2RX(0x5219),
+ PSIL_CSI2RX(0x521a),
+ PSIL_CSI2RX(0x521b),
+ PSIL_CSI2RX(0x521c),
+ PSIL_CSI2RX(0x521d),
+ PSIL_CSI2RX(0x521e),
+ PSIL_CSI2RX(0x521f),
+ PSIL_CSI2RX(0x5300),
+ PSIL_CSI2RX(0x5301),
+ PSIL_CSI2RX(0x5302),
+ PSIL_CSI2RX(0x5303),
+ PSIL_CSI2RX(0x5304),
+ PSIL_CSI2RX(0x5305),
+ PSIL_CSI2RX(0x5306),
+ PSIL_CSI2RX(0x5307),
+ PSIL_CSI2RX(0x5308),
+ PSIL_CSI2RX(0x5309),
+ PSIL_CSI2RX(0x530a),
+ PSIL_CSI2RX(0x530b),
+ PSIL_CSI2RX(0x530c),
+ PSIL_CSI2RX(0x530d),
+ PSIL_CSI2RX(0x530e),
+ PSIL_CSI2RX(0x530f),
+ PSIL_CSI2RX(0x5310),
+ PSIL_CSI2RX(0x5311),
+ PSIL_CSI2RX(0x5312),
+ PSIL_CSI2RX(0x5313),
+ PSIL_CSI2RX(0x5314),
+ PSIL_CSI2RX(0x5315),
+ PSIL_CSI2RX(0x5316),
+ PSIL_CSI2RX(0x5317),
+ PSIL_CSI2RX(0x5318),
+ PSIL_CSI2RX(0x5319),
+ PSIL_CSI2RX(0x531a),
+ PSIL_CSI2RX(0x531b),
+ PSIL_CSI2RX(0x531c),
+ PSIL_CSI2RX(0x531d),
+ PSIL_CSI2RX(0x531e),
+ PSIL_CSI2RX(0x531f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62p_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0xc300),
+ PSIL_PDMA_XY_PKT(0xc301),
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+ PSIL_PDMA_XY_PKT(0xc305),
+ PSIL_PDMA_XY_PKT(0xc306),
+ PSIL_PDMA_XY_PKT(0xc307),
+ PSIL_PDMA_XY_PKT(0xc308),
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+ PSIL_PDMA_XY_PKT(0xc402),
+ PSIL_PDMA_XY_PKT(0xc403),
+ PSIL_PDMA_XY_PKT(0xc404),
+ PSIL_PDMA_XY_PKT(0xc405),
+ PSIL_PDMA_XY_PKT(0xc406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0xc600, 19, 19, 8),
+ PSIL_ETHERNET(0xc601, 20, 27, 8),
+ PSIL_ETHERNET(0xc602, 21, 35, 8),
+ PSIL_ETHERNET(0xc603, 22, 43, 8),
+ PSIL_ETHERNET(0xc604, 23, 51, 8),
+ PSIL_ETHERNET(0xc605, 24, 59, 8),
+ PSIL_ETHERNET(0xc606, 25, 67, 8),
+ PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62p_ep_map = {
+ .name = "am62p",
+ .src = am62p_src_ep_map,
+ .src_count = ARRAY_SIZE(am62p_src_ep_map),
+ .dst = am62p_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am62p_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index c383723d1..a577be97e 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -45,5 +45,6 @@ extern struct psil_ep_map j721s2_ep_map;
extern struct psil_ep_map am62_ep_map;
extern struct psil_ep_map am62a_ep_map;
extern struct psil_ep_map j784s4_ep_map;
+extern struct psil_ep_map am62p_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index c11389d67..25148d952 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -26,6 +26,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM62X", .data = &am62_ep_map },
{ .family = "AM62AX", .data = &am62a_ep_map },
{ .family = "J784S4", .data = &j784s4_ep_map },
+ { .family = "AM62PX", .data = &am62p_ep_map },
+ { .family = "J722S", .data = &am62p_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 037f1408e..6400d0658 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4447,6 +4447,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM62X", .data = &am64_soc_data },
{ .family = "AM62AX", .data = &am64_soc_data },
{ .family = "J784S4", .data = &j721e_soc_data },
+ { .family = "AM62PX", .data = &am64_soc_data },
+ { .family = "J722S", .data = &am64_soc_data },
{ /* sentinel */ }
};
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index 618839df0..ad7125f6e 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -453,7 +453,7 @@ disable_clk:
return ret;
}
-static int uniphier_mdmac_remove(struct platform_device *pdev)
+static void uniphier_mdmac_remove(struct platform_device *pdev)
{
struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -468,16 +468,21 @@ static int uniphier_mdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
uniphier_mdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->ddev);
clk_disable_unprepare(mdev->clk);
-
- return 0;
}
static const struct of_device_id uniphier_mdmac_match[] = {
@@ -488,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
static struct platform_driver uniphier_mdmac_driver = {
.probe = uniphier_mdmac_probe,
- .remove = uniphier_mdmac_remove,
+ .remove_new = uniphier_mdmac_remove,
.driver = {
.name = "uniphier-mio-dmac",
.of_match_table = uniphier_mdmac_match,
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index 3a8ee2b17..3ce2dc2ad 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -563,7 +563,7 @@ out_unregister_dmac:
return ret;
}
-static int uniphier_xdmac_remove(struct platform_device *pdev)
+static void uniphier_xdmac_remove(struct platform_device *pdev)
{
struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
struct dma_device *ddev = &xdev->ddev;
@@ -579,15 +579,20 @@ static int uniphier_xdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &ddev->channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *xdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
uniphier_xdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(ddev);
-
- return 0;
}
static const struct of_device_id uniphier_xdmac_match[] = {
@@ -598,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
static struct platform_driver uniphier_xdmac_driver = {
.probe = uniphier_xdmac_probe,
- .remove = uniphier_xdmac_remove,
+ .remove_new = uniphier_xdmac_remove,
.driver = {
.name = "uniphier-xdmac",
.of_match_table = uniphier_xdmac_match,
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index e641a5083..6ad08878e 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -64,9 +64,10 @@ struct xdma_hw_desc {
__le64 next_desc;
};
-#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
-#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
-#define XDMA_DESC_BLOCK_ALIGN 4096
+#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
+#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
+#define XDMA_DESC_BLOCK_ALIGN 32
+#define XDMA_DESC_BLOCK_BOUNDARY 4096
/*
* Channel registers
@@ -76,6 +77,7 @@ struct xdma_hw_desc {
#define XDMA_CHAN_CONTROL_W1S 0x8
#define XDMA_CHAN_CONTROL_W1C 0xc
#define XDMA_CHAN_STATUS 0x40
+#define XDMA_CHAN_STATUS_RC 0x44
#define XDMA_CHAN_COMPLETED_DESC 0x48
#define XDMA_CHAN_ALIGNMENTS 0x4c
#define XDMA_CHAN_INTR_ENABLE 0x90
@@ -101,6 +103,7 @@ struct xdma_hw_desc {
#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14)
#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
#define CHAN_CTRL_POLL_MODE_WB BIT(26)
@@ -111,8 +114,20 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
CHAN_CTRL_IE_MAGIC_STOPPED | \
CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+/* bits of the channel status register */
+#define XDMA_CHAN_STATUS_BUSY BIT(0)
+
+#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
+
+#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
/* bits of the channel interrupt enable mask */
#define CHAN_IM_DESC_ERROR BIT(19)
#define CHAN_IM_READ_ERROR BIT(9)
@@ -134,18 +149,6 @@ struct xdma_hw_desc {
#define XDMA_SGDMA_DESC_ADJ 0x4088
#define XDMA_SGDMA_DESC_CREDIT 0x408c
-/* bits of the SG DMA control register */
-#define XDMA_CTRL_RUN_STOP BIT(0)
-#define XDMA_CTRL_IE_DESC_STOPPED BIT(1)
-#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2)
-#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
-#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4)
-#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6)
-#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9)
-#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19)
-#define XDMA_CTRL_NON_INCR_ADDR BIT(25)
-#define XDMA_CTRL_POLL_MODE_WB BIT(26)
-
/*
* interrupt registers
*/
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 2c9c72d4b..5a3a3293b 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -71,6 +71,8 @@ struct xdma_chan {
enum dma_transfer_direction dir;
struct dma_slave_config cfg;
u32 irq;
+ struct completion last_interrupt;
+ bool stop_requested;
};
/**
@@ -78,27 +80,31 @@ struct xdma_chan {
* @vdesc: Virtual DMA descriptor
* @chan: DMA channel pointer
* @dir: Transferring direction of the request
- * @dev_addr: Physical address on DMA device side
* @desc_blocks: Hardware descriptor blocks
* @dblk_num: Number of hardware descriptor blocks
* @desc_num: Number of hardware descriptors
* @completed_desc_num: Completed hardware descriptors
* @cyclic: Cyclic transfer vs. scatter-gather
+ * @interleaved_dma: Interleaved DMA transfer
* @periods: Number of periods in the cyclic transfer
* @period_size: Size of a period in bytes in cyclic transfers
+ * @frames_left: Number of frames left in interleaved DMA transfer
+ * @error: tx error flag
*/
struct xdma_desc {
struct virt_dma_desc vdesc;
struct xdma_chan *chan;
enum dma_transfer_direction dir;
- u64 dev_addr;
struct xdma_desc_block *desc_blocks;
u32 dblk_num;
u32 desc_num;
u32 completed_desc_num;
bool cyclic;
+ bool interleaved_dma;
u32 periods;
u32 period_size;
+ u32 frames_left;
+ bool error;
};
#define XDMA_DEV_STATUS_REG_DMA BIT(0)
@@ -276,6 +282,7 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
sw_desc->chan = chan;
sw_desc->desc_num = desc_num;
sw_desc->cyclic = cyclic;
+ sw_desc->error = false;
dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
GFP_NOWAIT);
@@ -371,10 +378,30 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+ xchan->stop_requested = false;
+ reinit_completion(&xchan->last_interrupt);
+
return 0;
}
/**
+ * xdma_xfer_stop - Stop DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_stop(struct xdma_chan *xchan)
+{
+ int ret;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+
+ /* clear run stop bit to prevent any further auto-triggering */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+/**
* xdma_alloc_channels - Detect and allocate DMA channels
* @xdev: DMA device pointer
* @dir: Channel direction
@@ -444,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
xchan->xdev_hdl = xdev;
xchan->base = base + i * XDMA_CHAN_STRIDE;
xchan->dir = dir;
+ xchan->stop_requested = false;
+ init_completion(&xchan->last_interrupt);
ret = xdma_channel_init(xchan);
if (ret)
@@ -476,6 +505,92 @@ static void xdma_issue_pending(struct dma_chan *chan)
}
/**
+ * xdma_terminate_all - Terminate all transactions
+ * @chan: DMA channel pointer
+ */
+static int xdma_terminate_all(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ xdma_xfer_stop(xdma_chan);
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ xdma_chan->busy = false;
+ xdma_chan->stop_requested = true;
+ vd = vchan_next_desc(&xdma_chan->vchan);
+ if (vd) {
+ list_del(&vd->node);
+ dma_cookie_complete(&vd->tx);
+ vchan_terminate_vdesc(vd);
+ }
+ vchan_get_all_descriptors(&xdma_chan->vchan, &head);
+ list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
+
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ return 0;
+}
+
+/**
+ * xdma_synchronize - Synchronize terminated transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_synchronize(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ int st = 0;
+
+ /* If the engine continues running, wait for the last interrupt */
+ regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
+ if (st & XDMA_CHAN_STATUS_BUSY)
+ wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
+
+ vchan_synchronize(&xdma_chan->vchan);
+}
+
+/**
+ * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
+ * @sw_desc: tx descriptor state container
+ * @src_addr: Value for a ->src_addr field of a first descriptor
+ * @dst_addr: Value for a ->dst_addr field of a first descriptor
+ * @size: Total size of a contiguous memory block
+ * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ */
+static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
+ u64 dst_addr, u32 size, u32 filled_descs_num)
+{
+ u32 left = size, len, desc_num = filled_descs_num;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+
+ dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
+ desc = dblk->virt_addr;
+ desc += desc_num & XDMA_DESC_ADJACENT_MASK;
+ do {
+ len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
+ /* set hardware descriptor */
+ desc->bytes = cpu_to_le32(len);
+ desc->src_addr = cpu_to_le64(src_addr);
+ desc->dst_addr = cpu_to_le64(dst_addr);
+ if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
+ desc = (++dblk)->virt_addr;
+ else
+ desc++;
+
+ src_addr += len;
+ dst_addr += len;
+ left -= len;
+ } while (left);
+
+ return desc_num - filled_descs_num;
+}
+
+/**
* xdma_prep_device_sg - prepare a descriptor for a DMA transaction
* @chan: DMA channel pointer
* @sgl: Transfer scatter gather list
@@ -491,13 +606,10 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
struct dma_async_tx_descriptor *tx_desc;
- u32 desc_num = 0, i, len, rest;
- struct xdma_desc_block *dblk;
- struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
- u64 dev_addr, *src, *dst;
+ u32 desc_num = 0, i;
+ u64 addr, dev_addr, *src, *dst;
struct scatterlist *sg;
- u64 addr;
for_each_sg(sgl, sg, sg_len, i)
desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
@@ -506,6 +618,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!sw_desc)
return NULL;
sw_desc->dir = dir;
+ sw_desc->cyclic = false;
+ sw_desc->interleaved_dma = false;
if (dir == DMA_MEM_TO_DEV) {
dev_addr = xdma_chan->cfg.dst_addr;
@@ -517,32 +631,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
dst = &addr;
}
- dblk = sw_desc->desc_blocks;
- desc = dblk->virt_addr;
- desc_num = 1;
+ desc_num = 0;
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
- rest = sg_dma_len(sg);
-
- do {
- len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
- /* set hardware descriptor */
- desc->bytes = cpu_to_le32(len);
- desc->src_addr = cpu_to_le64(*src);
- desc->dst_addr = cpu_to_le64(*dst);
-
- if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
- dblk++;
- desc = dblk->virt_addr;
- } else {
- desc++;
- }
-
- desc_num++;
- dev_addr += len;
- addr += len;
- rest -= len;
- } while (rest);
+ desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
+ dev_addr += sg_dma_len(sg);
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -576,9 +669,9 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
struct xdma_device *xdev = xdma_chan->xdev_hdl;
unsigned int periods = size / period_size;
struct dma_async_tx_descriptor *tx_desc;
- struct xdma_desc_block *dblk;
- struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
+ u64 addr, dev_addr, *src, *dst;
+ u32 desc_num;
unsigned int i;
/*
@@ -602,22 +695,23 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
sw_desc->periods = periods;
sw_desc->period_size = period_size;
sw_desc->dir = dir;
+ sw_desc->interleaved_dma = false;
- dblk = sw_desc->desc_blocks;
- desc = dblk->virt_addr;
+ addr = address;
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
- /* fill hardware descriptor */
+ desc_num = 0;
for (i = 0; i < periods; i++) {
- desc->bytes = cpu_to_le32(period_size);
- if (dir == DMA_MEM_TO_DEV) {
- desc->src_addr = cpu_to_le64(address + i * period_size);
- desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
- } else {
- desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
- desc->dst_addr = cpu_to_le64(address + i * period_size);
- }
-
- desc++;
+ desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
+ addr += period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -633,6 +727,57 @@ failed:
}
/**
+ * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
+ * @chan: DMA channel
+ * @xt: DMA transfer template
+ * @flags: tx flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_interleaved_dma(struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ int i;
+ u32 desc_num = 0, period_size = 0;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_chan *xchan = to_xdma_chan(chan);
+ struct xdma_desc *sw_desc;
+ u64 src_addr, dst_addr;
+
+ for (i = 0; i < xt->frame_size; ++i)
+ desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xchan, desc_num, false);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = xt->dir;
+ sw_desc->interleaved_dma = true;
+ sw_desc->cyclic = flags & DMA_PREP_REPEAT;
+ sw_desc->frames_left = xt->numf;
+ sw_desc->periods = xt->numf;
+
+ desc_num = 0;
+ src_addr = xt->src_start;
+ dst_addr = xt->dst_start;
+ for (i = 0; i < xt->frame_size; ++i) {
+ desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
+ src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
+ xt->sgl[i].size : 0);
+ dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
+ xt->sgl[i].size : 0);
+ period_size += xt->sgl[i].size;
+ }
+ sw_desc->period_size = period_size;
+
+ tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
+ if (tx_desc)
+ return tx_desc;
+
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
* xdma_device_config - Configure the DMA channel
* @chan: DMA channel
* @cfg: channel configuration
@@ -677,9 +822,8 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
return -EINVAL;
}
- xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
- dev, XDMA_DESC_BLOCK_SIZE,
- XDMA_DESC_BLOCK_ALIGN, 0);
+ xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE,
+ XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
if (!xdma_chan->desc_pool) {
xdma_err(xdev, "unable to allocate descriptor pool");
return -ENOMEM;
@@ -706,20 +850,20 @@ static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
vd = vchan_find_desc(&xdma_chan->vchan, cookie);
- if (vd)
- desc = to_xdma_desc(vd);
- if (!desc || !desc->cyclic) {
- spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
- return ret;
- }
-
- period_idx = desc->completed_desc_num % desc->periods;
- residue = (desc->periods - period_idx) * desc->period_size;
+ if (!vd)
+ goto out;
+ desc = to_xdma_desc(vd);
+ if (desc->error) {
+ ret = DMA_ERROR;
+ } else if (desc->cyclic) {
+ period_idx = desc->completed_desc_num % desc->periods;
+ residue = (desc->periods - period_idx) * desc->period_size;
+ dma_set_residue(state, residue);
+ }
+out:
spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
- dma_set_residue(state, residue);
-
return ret;
}
@@ -732,11 +876,15 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
{
struct xdma_chan *xchan = dev_id;
u32 complete_desc_num = 0;
- struct xdma_device *xdev;
- struct virt_dma_desc *vd;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct virt_dma_desc *vd, *next_vd;
struct xdma_desc *desc;
int ret;
u32 st;
+ bool repeat_tx;
+
+ if (xchan->stop_requested)
+ complete(&xchan->last_interrupt);
spin_lock(&xchan->vchan.lock);
@@ -745,47 +893,76 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
if (!vd)
goto out;
- xchan->busy = false;
+ /* Clear-on-read the status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
+ if (ret)
+ goto out;
+
desc = to_xdma_desc(vd);
- xdev = xchan->xdev_hdl;
+
+ st &= XDMA_CHAN_STATUS_MASK;
+ if ((st & XDMA_CHAN_ERROR_MASK) ||
+ !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
+ desc->error = true;
+ xdma_err(xdev, "channel error, status register value: 0x%x", st);
+ goto out;
+ }
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
&complete_desc_num);
if (ret)
goto out;
- if (desc->cyclic) {
- desc->completed_desc_num = complete_desc_num;
-
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
- &st);
- if (ret)
+ if (desc->interleaved_dma) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+ if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
+ xdma_xfer_start(xchan);
goto out;
+ }
- regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
+ /* last desc of any frame */
+ desc->frames_left--;
+ if (desc->frames_left)
+ goto out;
- vchan_cyclic_callback(vd);
- goto out;
- }
+ /* last desc of the last frame */
+ repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
+ next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
+ if (next_vd)
+ repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
+ if (repeat_tx) {
+ desc->frames_left = desc->periods;
+ desc->completed_desc_num = 0;
+ vchan_cyclic_callback(vd);
+ } else {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
+ xdma_xfer_start(xchan);
+ } else if (!desc->cyclic) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+
+ /* if all data blocks are transferred, remove and complete the request */
+ if (desc->completed_desc_num == desc->desc_num) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ goto out;
+ }
- desc->completed_desc_num += complete_desc_num;
+ if (desc->completed_desc_num > desc->desc_num ||
+ complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
+ goto out;
- /*
- * if all data blocks are transferred, remove and complete the request
- */
- if (desc->completed_desc_num == desc->desc_num) {
- list_del(&vd->node);
- vchan_cookie_complete(vd);
- goto out;
+ /* transfer the rest of data */
+ xdma_xfer_start(xchan);
+ } else {
+ desc->completed_desc_num = complete_desc_num;
+ vchan_cyclic_callback(vd);
}
- if (desc->completed_desc_num > desc->desc_num ||
- complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
- goto out;
-
- /* transfer the rest of data (SG only) */
- xdma_xfer_start(xchan);
-
out:
spin_unlock(&xchan->vchan.lock);
return IRQ_HANDLED;
@@ -1082,6 +1259,9 @@ static int xdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
xdev->dma_dev.dev = &pdev->dev;
xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
@@ -1091,10 +1271,13 @@ static int xdma_probe(struct platform_device *pdev)
xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
xdev->dma_dev.device_config = xdma_device_config;
xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+ xdev->dma_dev.device_terminate_all = xdma_terminate_all;
+ xdev->dma_dev.device_synchronize = xdma_synchronize;
xdev->dma_dev.filter.map = pdata->device_map;
xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
xdev->dma_dev.filter.fn = xdma_filter_fn;
xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
+ xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
ret = dma_async_device_register(&xdev->dma_dev);
if (ret) {
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 69587d85a..eb0637d90 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -309,7 +310,7 @@ static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
- out_str_len);
+ out_str_len + 1);
snprintf(buf, out_str_len, "%d",
dpdma_debugfs.xilinx_dpdma_irq_done_count);
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
index a4cae73f2..20607ed54 100644
--- a/drivers/dpll/Kconfig
+++ b/drivers/dpll/Kconfig
@@ -4,4 +4,4 @@
#
config DPLL
- bool
+ bool
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index e72ebdf62..a6856730c 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -22,12 +22,15 @@ DEFINE_MUTEX(dpll_lock);
DEFINE_XARRAY_FLAGS(dpll_device_xa, XA_FLAGS_ALLOC);
DEFINE_XARRAY_FLAGS(dpll_pin_xa, XA_FLAGS_ALLOC);
-static u32 dpll_xa_id;
+static u32 dpll_device_xa_id;
+static u32 dpll_pin_xa_id;
#define ASSERT_DPLL_REGISTERED(d) \
WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_DPLL_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
+#define ASSERT_DPLL_PIN_REGISTERED(p) \
+ WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
struct dpll_device_registration {
struct list_head list;
@@ -39,6 +42,7 @@ struct dpll_pin_registration {
struct list_head list;
const struct dpll_pin_ops *ops;
void *priv;
+ void *cookie;
};
struct dpll_device *dpll_device_get_by_id(int id)
@@ -51,12 +55,14 @@ struct dpll_device *dpll_device_get_by_id(int id)
static struct dpll_pin_registration *
dpll_pin_registration_find(struct dpll_pin_ref *ref,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
list_for_each_entry(reg, &ref->registration_list, list) {
- if (reg->ops == ops && reg->priv == priv)
+ if (reg->ops == ops && reg->priv == priv &&
+ reg->cookie == cookie)
return reg;
}
return NULL;
@@ -64,7 +70,8 @@ dpll_pin_registration_find(struct dpll_pin_ref *ref,
static int
dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -75,7 +82,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -108,6 +115,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -116,7 +124,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
}
static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv,
+ void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -125,7 +134,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
xa_for_each(xa_pins, i, ref) {
if (ref->pin != pin)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return -EINVAL;
list_del(&reg->list);
@@ -143,7 +152,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
static int
dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -154,7 +163,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (reg) {
refcount_inc(&ref->refcount);
return 0;
@@ -187,6 +196,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
}
reg->ops = ops;
reg->priv = priv;
+ reg->cookie = cookie;
if (ref_exists)
refcount_inc(&ref->refcount);
list_add_tail(&reg->list, &ref->registration_list);
@@ -196,7 +206,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
static void
dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
struct dpll_pin_registration *reg;
struct dpll_pin_ref *ref;
@@ -205,7 +215,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
xa_for_each(xa_dplls, i, ref) {
if (ref->dpll != dpll)
continue;
- reg = dpll_pin_registration_find(ref, ops, priv);
+ reg = dpll_pin_registration_find(ref, ops, priv, cookie);
if (WARN_ON(!reg))
return;
list_del(&reg->list);
@@ -244,7 +254,7 @@ dpll_device_alloc(const u64 clock_id, u32 device_idx, struct module *module)
dpll->clock_id = clock_id;
dpll->module = module;
ret = xa_alloc_cyclic(&dpll_device_xa, &dpll->id, dpll, xa_limit_32b,
- &dpll_xa_id, GFP_KERNEL);
+ &dpll_device_xa_id, GFP_KERNEL);
if (ret < 0) {
kfree(dpll);
return ERR_PTR(ret);
@@ -493,7 +503,8 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
refcount_set(&pin->refcount, 1);
xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
- ret = xa_alloc(&dpll_pin_xa, &pin->id, pin, xa_limit_16b, GFP_KERNEL);
+ ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
+ &dpll_pin_xa_id, GFP_KERNEL);
if (ret)
goto err_xa_alloc;
return pin;
@@ -506,6 +517,26 @@ err_pin_prop:
return ERR_PTR(ret);
}
+static void dpll_netdev_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ rtnl_lock();
+ rcu_assign_pointer(dev->dpll_pin, dpll_pin);
+ rtnl_unlock();
+}
+
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ WARN_ON(!dpll_pin);
+ dpll_netdev_pin_assign(dev, dpll_pin);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_set);
+
+void dpll_netdev_pin_clear(struct net_device *dev)
+{
+ dpll_netdev_pin_assign(dev, NULL);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_clear);
+
/**
* dpll_pin_get - find existing or create new dpll pin
* @clock_id: clock_id of creator
@@ -562,7 +593,7 @@ void dpll_pin_put(struct dpll_pin *pin)
xa_destroy(&pin->parent_refs);
xa_erase(&dpll_pin_xa, pin->id);
dpll_pin_prop_free(&pin->prop);
- kfree(pin);
+ kfree_rcu(pin, rcu);
}
mutex_unlock(&dpll_lock);
}
@@ -570,14 +601,14 @@ EXPORT_SYMBOL_GPL(dpll_pin_put);
static int
__dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
int ret;
- ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv);
+ ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
if (ret)
return ret;
- ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv);
+ ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
if (ret)
goto ref_pin_del;
xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
@@ -586,7 +617,7 @@ __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
return ret;
ref_pin_del:
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
return ret;
}
@@ -618,7 +649,7 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
dpll->clock_id == pin->clock_id)))
ret = -EINVAL;
else
- ret = __dpll_pin_register(dpll, pin, ops, priv);
+ ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
return ret;
@@ -627,10 +658,11 @@ EXPORT_SYMBOL_GPL(dpll_pin_register);
static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
- const struct dpll_pin_ops *ops, void *priv)
+ const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
- dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
- dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
+ ASSERT_DPLL_PIN_REGISTERED(pin);
+ dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
+ dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs))
xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
}
@@ -655,7 +687,7 @@ void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- __dpll_pin_unregister(dpll, pin, ops, priv);
+ __dpll_pin_unregister(dpll, pin, ops, priv, NULL);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_unregister);
@@ -691,12 +723,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
return -EINVAL;
mutex_lock(&dpll_lock);
- ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
+ ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
if (ret)
goto unlock;
refcount_inc(&pin->refcount);
xa_for_each(&parent->dpll_refs, i, ref) {
- ret = __dpll_pin_register(ref->dpll, pin, ops, priv);
+ ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
if (ret) {
stop = i;
goto dpll_unregister;
@@ -710,11 +742,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
dpll_unregister:
xa_for_each(&parent->dpll_refs, i, ref)
if (i < stop) {
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv,
+ parent);
dpll_pin_delete_ntf(pin);
}
refcount_dec(&pin->refcount);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
unlock:
mutex_unlock(&dpll_lock);
return ret;
@@ -739,10 +772,10 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
mutex_lock(&dpll_lock);
dpll_pin_delete_ntf(pin);
- dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+ dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
refcount_dec(&pin->refcount);
xa_for_each(&pin->dpll_refs, i, ref)
- __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+ __dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
mutex_unlock(&dpll_lock);
}
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 717f71501..2b6d8ef1c 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -47,6 +47,7 @@ struct dpll_device {
* @prop: pin properties copied from the registerer
* @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
+ * @rcu: rcu_head for kfree_rcu()
**/
struct dpll_pin {
u32 id;
@@ -57,6 +58,7 @@ struct dpll_pin {
struct xarray parent_refs;
struct dpll_pin_properties prop;
refcount_t refcount;
+ struct rcu_head rcu;
};
/**
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index c8c2e8361..b57355e0c 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <net/genetlink.h>
#include "dpll_core.h"
#include "dpll_netlink.h"
@@ -48,18 +49,6 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
}
/**
- * dpll_msg_pin_handle_size - get size of pin handle attribute for given pin
- * @pin: pin pointer
- *
- * Return: byte size of pin handle attribute for given pin.
- */
-size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
-{
- return pin ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
-}
-EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
-
-/**
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
* @msg: pointer to sk_buff message to attach a pin handle
* @pin: pin pointer
@@ -68,7 +57,7 @@ EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
* * 0 - success
* * -EMSGSIZE - no space in message to attach pin handle
*/
-int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+static int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
{
if (!pin)
return 0;
@@ -76,7 +65,28 @@ int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
return -EMSGSIZE;
return 0;
}
-EXPORT_SYMBOL_GPL(dpll_msg_add_pin_handle);
+
+static struct dpll_pin *dpll_netdev_pin(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->dpll_pin);
+}
+
+/**
+ * dpll_netdev_pin_handle_size - get size of pin handle attribute of a netdev
+ * @dev: netdev from which to get the pin
+ *
+ * Return: byte size of pin handle attribute, or 0 if @dev has no pin.
+ */
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+ return dpll_netdev_pin(dev) ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
+}
+
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev)
+{
+ return dpll_msg_add_pin_handle(msg, dpll_netdev_pin(dev));
+}
static int
dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
@@ -101,13 +111,17 @@ dpll_msg_add_mode_supported(struct sk_buff *msg, struct dpll_device *dpll,
{
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
enum dpll_mode mode;
+ int ret;
- if (!ops->mode_supported)
- return 0;
- for (mode = DPLL_MODE_MANUAL; mode <= DPLL_MODE_MAX; mode++)
- if (ops->mode_supported(dpll, dpll_priv(dpll), mode, extack))
- if (nla_put_u32(msg, DPLL_A_MODE_SUPPORTED, mode))
- return -EMSGSIZE;
+ /* No mode change is supported now, so the only supported mode is the
+ * one obtained by mode_get().
+ */
+
+ ret = ops->mode_get(dpll, dpll_priv(dpll), &mode, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_MODE_SUPPORTED, mode))
+ return -EMSGSIZE;
return 0;
}
@@ -259,6 +273,27 @@ dpll_msg_add_phase_offset(struct sk_buff *msg, struct dpll_pin *pin,
return 0;
}
+static int dpll_msg_add_ffo(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ s64 ffo;
+ int ret;
+
+ if (!ops->ffo_get)
+ return 0;
+ ret = ops->ffo_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ dpll, dpll_priv(dpll), &ffo, extack);
+ if (ret) {
+ if (ret == -ENODATA)
+ return 0;
+ return ret;
+ }
+ return nla_put_sint(msg, DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET, ffo);
+}
+
static int
dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
struct dpll_pin_ref *ref, struct netlink_ext_ack *extack)
@@ -438,6 +473,9 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
ret = dpll_msg_add_pin_phase_adjust(msg, pin, ref, extack);
if (ret)
return ret;
+ ret = dpll_msg_add_ffo(msg, pin, ref, extack);
+ if (ret)
+ return ret;
if (xa_empty(&pin->parent_refs))
ret = dpll_msg_add_pin_dplls(msg, pin, extack);
else
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 8b31cd54b..ae17ce4d9 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -22,6 +22,7 @@
#include <linux/of_platform.h>
#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/types.h>
#include <linux/uaccess.h>
@@ -279,7 +280,6 @@ release:
static int altr_sdram_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
struct edac_mc_layer layers[2];
struct mem_ctl_info *mci;
struct altr_sdram_mc_data *drvdata;
@@ -290,10 +290,6 @@ static int altr_sdram_probe(struct platform_device *pdev)
int irq, irq2, res = 0;
unsigned long mem_size, irqflags = 0;
- id = of_match_device(altr_sdram_ctrl_of_match, &pdev->dev);
- if (!id)
- return -ENODEV;
-
/* Grab the register range from the sdr controller in device tree */
mc_vbase = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"altr,sdr-syscon");
@@ -304,8 +300,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
}
/* Check specific dependencies for the module */
- priv = of_match_node(altr_sdram_ctrl_of_match,
- pdev->dev.of_node)->data;
+ priv = device_get_match_data(&pdev->dev);
/* Validate the SDRAM controller has ECC enabled */
if (regmap_read(mc_vbase, priv->ecc_ctrl_offset, &read_reg) ||
@@ -459,15 +454,13 @@ free:
return res;
}
-static int altr_sdram_remove(struct platform_device *pdev)
+static void altr_sdram_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
/*
@@ -489,7 +482,7 @@ static const struct dev_pm_ops altr_sdram_pm_ops = {
static struct platform_driver altr_sdram_edac_driver = {
.probe = altr_sdram_probe,
- .remove = altr_sdram_remove,
+ .remove_new = altr_sdram_remove,
.driver = {
.name = "altr_sdram_edac",
#ifdef CONFIG_PM
@@ -812,7 +805,7 @@ fail:
return res;
}
-static int altr_edac_device_remove(struct platform_device *pdev)
+static void altr_edac_device_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
struct altr_edac_device_dev *drvdata = dci->pvt_info;
@@ -820,13 +813,11 @@ static int altr_edac_device_remove(struct platform_device *pdev)
debugfs_remove_recursive(drvdata->debugfs_dir);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
-
- return 0;
}
static struct platform_driver altr_edac_device_driver = {
.probe = altr_edac_device_probe,
- .remove = altr_edac_device_remove,
+ .remove_new = altr_edac_device_remove,
.driver = {
.name = "altr_edac_device",
.of_match_table = altr_edac_device_of_match,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9b6642d00..537b9987a 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -996,15 +996,23 @@ static struct local_node_map {
#define LNTM_NODE_COUNT GENMASK(27, 16)
#define LNTM_BASE_NODE_ID GENMASK(11, 0)
-static int gpu_get_node_map(void)
+static int gpu_get_node_map(struct amd64_pvt *pvt)
{
struct pci_dev *pdev;
int ret;
u32 tmp;
/*
- * Node ID 0 is reserved for CPUs.
- * Therefore, a non-zero Node ID means we've already cached the values.
+ * Mapping of nodes from hardware-provided AMD Node ID to a
+ * Linux logical one is applicable for MI200 models. Therefore,
+ * return early for other heterogeneous systems.
+ */
+ if (pvt->F3->device != PCI_DEVICE_ID_AMD_MI200_DF_F3)
+ return 0;
+
+ /*
+ * Node ID 0 is reserved for CPUs. Therefore, a non-zero Node ID
+ * means the values have been already cached.
*/
if (gpu_node_map.base_node_id)
return 0;
@@ -3851,7 +3859,7 @@ static void gpu_init_csrows(struct mem_ctl_info *mci)
dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
dimm->edac_mode = EDAC_SECDED;
- dimm->mtype = MEM_HBM2;
+ dimm->mtype = pvt->dram_type;
dimm->dtype = DEV_X16;
dimm->grain = 64;
}
@@ -3880,7 +3888,7 @@ static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
return true;
}
-static inline u32 gpu_get_umc_base(u8 umc, u8 channel)
+static inline u32 gpu_get_umc_base(struct amd64_pvt *pvt, u8 umc, u8 channel)
{
/*
* On CPUs, there is one channel per UMC, so UMC numbering equals
@@ -3893,13 +3901,16 @@ static inline u32 gpu_get_umc_base(u8 umc, u8 channel)
* On GPU nodes channels are selected in 3rd nibble
* HBM chX[3:0]= [Y ]5X[3:0]000;
* HBM chX[7:4]= [Y+1]5X[3:0]000
+ *
+ * On MI300 APU nodes, same as GPU nodes but channels are selected
+ * in the base address of 0x90000
*/
umc *= 2;
if (channel >= 4)
umc++;
- return 0x50000 + (umc << 20) + ((channel % 4) << 12);
+ return pvt->gpu_umc_base + (umc << 20) + ((channel % 4) << 12);
}
static void gpu_read_mc_regs(struct amd64_pvt *pvt)
@@ -3910,7 +3921,7 @@ static void gpu_read_mc_regs(struct amd64_pvt *pvt)
/* Read registers from each UMC */
for_each_umc(i) {
- umc_base = gpu_get_umc_base(i, 0);
+ umc_base = gpu_get_umc_base(pvt, i, 0);
umc = &pvt->umc[i];
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
@@ -3927,7 +3938,7 @@ static void gpu_read_base_mask(struct amd64_pvt *pvt)
for_each_umc(umc) {
for_each_chip_select(cs, umc, pvt) {
- base_reg = gpu_get_umc_base(umc, cs) + UMCCH_BASE_ADDR;
+ base_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_BASE_ADDR;
base = &pvt->csels[umc].csbases[cs];
if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
@@ -3935,7 +3946,7 @@ static void gpu_read_base_mask(struct amd64_pvt *pvt)
umc, cs, *base, base_reg);
}
- mask_reg = gpu_get_umc_base(umc, cs) + UMCCH_ADDR_MASK;
+ mask_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_ADDR_MASK;
mask = &pvt->csels[umc].csmasks[cs];
if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
@@ -3960,7 +3971,7 @@ static int gpu_hw_info_get(struct amd64_pvt *pvt)
{
int ret;
- ret = gpu_get_node_map();
+ ret = gpu_get_node_map(pvt);
if (ret)
return ret;
@@ -4125,6 +4136,8 @@ static int per_family_init(struct amd64_pvt *pvt)
if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
pvt->ctl_name = "MI200";
pvt->max_mcs = 4;
+ pvt->dram_type = MEM_HBM2;
+ pvt->gpu_umc_base = 0x50000;
pvt->ops = &gpu_ops;
} else {
pvt->ctl_name = "F19h_M30h";
@@ -4142,6 +4155,13 @@ static int per_family_init(struct amd64_pvt *pvt)
pvt->ctl_name = "F19h_M70h";
pvt->flags.zn_regs_v2 = 1;
break;
+ case 0x90 ... 0x9f:
+ pvt->ctl_name = "F19h_M90h";
+ pvt->max_mcs = 4;
+ pvt->dram_type = MEM_HBM3;
+ pvt->gpu_umc_base = 0x90000;
+ pvt->ops = &gpu_ops;
+ break;
case 0xa0 ... 0xaf:
pvt->ctl_name = "F19h_MA0h";
pvt->max_mcs = 12;
@@ -4180,23 +4200,33 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
+/*
+ * For heterogeneous and APU models EDAC CHIP_SELECT and CHANNEL layers
+ * should be swapped to fit into the layers.
+ */
+static unsigned int get_layer_size(struct amd64_pvt *pvt, u8 layer)
+{
+ bool is_gpu = (pvt->ops == &gpu_ops);
+
+ if (!layer)
+ return is_gpu ? pvt->max_mcs
+ : pvt->csels[0].b_cnt;
+ else
+ return is_gpu ? pvt->csels[0].b_cnt
+ : pvt->max_mcs;
+}
+
static int init_one_instance(struct amd64_pvt *pvt)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
int ret = -ENOMEM;
- /*
- * For Heterogeneous family EDAC CHIP_SELECT and CHANNEL layers should
- * be swapped to fit into the layers.
- */
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
- pvt->max_mcs : pvt->csels[0].b_cnt;
+ layers[0].size = get_layer_size(pvt, 0);
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) ?
- pvt->csels[0].b_cnt : pvt->max_mcs;
+ layers[1].size = get_layer_size(pvt, 1);
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 5a4e4a596..1665f7932 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -362,6 +362,7 @@ struct amd64_pvt {
u32 dct_sel_lo; /* DRAM Controller Select Low */
u32 dct_sel_hi; /* DRAM Controller Select High */
u32 online_spare; /* On-Line spare Reg */
+ u32 gpu_umc_base; /* Base address used for channel selection on GPUs */
/* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index c4bd2fb9c..25517c99b 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -5,7 +5,9 @@
#include <linux/kernel.h>
#include <linux/edac.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-aurora-l2.h>
@@ -351,20 +353,18 @@ static int axp_mc_probe(struct platform_device *pdev)
return 0;
}
-static int axp_mc_remove(struct platform_device *pdev)
+static void axp_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver axp_mc_driver = {
.probe = axp_mc_probe,
- .remove = axp_mc_remove,
+ .remove_new = axp_mc_remove,
.driver = {
.name = "armada_xp_mc_edac",
.of_match_table = of_match_ptr(axp_mc_of_match),
@@ -564,7 +564,7 @@ static int aurora_l2_probe(struct platform_device *pdev)
return 0;
}
-static int aurora_l2_remove(struct platform_device *pdev)
+static void aurora_l2_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
#ifdef CONFIG_EDAC_DEBUG
@@ -575,13 +575,11 @@ static int aurora_l2_remove(struct platform_device *pdev)
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver aurora_l2_driver = {
.probe = aurora_l2_probe,
- .remove = aurora_l2_remove,
+ .remove_new = aurora_l2_remove,
.driver = {
.name = "aurora_l2_edac",
.of_match_table = of_match_ptr(aurora_l2_of_match),
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 6bd5f8815..157a480eb 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -357,7 +357,7 @@ probe_exit02:
}
-static int aspeed_remove(struct platform_device *pdev)
+static void aspeed_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
@@ -369,8 +369,6 @@ static int aspeed_remove(struct platform_device *pdev)
mci = edac_mc_del_mc(&pdev->dev);
if (mci)
edac_mc_free(mci);
-
- return 0;
}
@@ -389,7 +387,7 @@ static struct platform_driver aspeed_driver = {
.of_match_table = aspeed_of_match
},
.probe = aspeed_probe,
- .remove = aspeed_remove
+ .remove_new = aspeed_remove
};
module_platform_driver(aspeed_driver);
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
index e4736eb37..5b3164560 100644
--- a/drivers/edac/bluefield_edac.c
+++ b/drivers/edac/bluefield_edac.c
@@ -323,14 +323,12 @@ err:
}
-static int bluefield_edac_mc_remove(struct platform_device *pdev)
+static void bluefield_edac_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static const struct acpi_device_id bluefield_mc_acpi_ids[] = {
@@ -346,7 +344,7 @@ static struct platform_driver bluefield_edac_mc_driver = {
.acpi_match_table = bluefield_mc_acpi_ids,
},
.probe = bluefield_edac_mc_probe,
- .remove = bluefield_edac_mc_remove,
+ .remove_new = bluefield_edac_mc_remove,
};
module_platform_driver(bluefield_edac_mc_driver);
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index bc1f34164..2000f66fb 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -234,12 +234,11 @@ static int cell_edac_probe(struct platform_device *pdev)
return 0;
}
-static int cell_edac_remove(struct platform_device *pdev)
+static void cell_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
if (mci)
edac_mc_free(mci);
- return 0;
}
static struct platform_driver cell_edac_driver = {
@@ -247,7 +246,7 @@ static struct platform_driver cell_edac_driver = {
.name = "cbe-mic",
},
.probe = cell_edac_probe,
- .remove = cell_edac_remove,
+ .remove_new = cell_edac_remove,
};
static int __init cell_edac_init(void)
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 9797e6d60..5075dc752 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -1010,7 +1010,7 @@ out:
return res;
}
-static int cpc925_remove(struct platform_device *pdev)
+static void cpc925_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
@@ -1023,13 +1023,11 @@ static int cpc925_remove(struct platform_device *pdev)
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver cpc925_edac_driver = {
.probe = cpc925_probe,
- .remove = cpc925_remove,
+ .remove_new = cpc925_remove,
.driver = {
.name = "cpc925_edac",
}
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index 1fa5ca57e..4e30b989a 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -602,7 +602,7 @@ err:
return ret;
}
-static int dmc520_edac_remove(struct platform_device *pdev)
+static void dmc520_edac_remove(struct platform_device *pdev)
{
u32 reg_val, idx, irq_mask_all = 0;
struct mem_ctl_info *mci;
@@ -626,8 +626,6 @@ static int dmc520_edac_remove(struct platform_device *pdev)
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static const struct of_device_id dmc520_edac_driver_id[] = {
@@ -644,7 +642,7 @@ static struct platform_driver dmc520_edac_driver = {
},
.probe = dmc520_edac_probe,
- .remove = dmc520_edac_remove
+ .remove_new = dmc520_edac_remove
};
module_platform_driver(dmc520_edac_driver);
diff --git a/drivers/edac/edac_device.h b/drivers/edac/edac_device.h
index 3f44e6b9d..7db22a4c8 100644
--- a/drivers/edac/edac_device.h
+++ b/drivers/edac/edac_device.h
@@ -176,7 +176,7 @@ struct edac_device_ctl_info {
struct edac_dev_sysfs_attribute *sysfs_attributes;
/* pointer to main 'edac' subsys in sysfs */
- struct bus_type *edac_subsys;
+ const struct bus_type *edac_subsys;
/* the internal state of this controller instance */
int op_state;
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 010c26be5..237a542e0 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -229,7 +229,7 @@ static struct kobj_type ktype_device_ctrl = {
int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
{
struct device *dev_root;
- struct bus_type *edac_subsys;
+ const struct bus_type *edac_subsys;
int err = -ENODEV;
edac_dbg(1, "\n");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 6faeb2ab3..d6eed727b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -166,6 +166,7 @@ const char * const edac_mem_types[] = {
[MEM_NVDIMM] = "Non-volatile-RAM",
[MEM_WIO2] = "Wide-IO-2",
[MEM_HBM2] = "High-bandwidth-memory-Gen2",
+ [MEM_HBM3] = "High-bandwidth-memory-Gen3",
};
EXPORT_SYMBOL_GPL(edac_mem_types);
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 32a931d0c..1c9f62382 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -67,7 +67,7 @@ char *edac_op_state_to_string(int opstate)
* sysfs object: /sys/devices/system/edac
* need to export to other files
*/
-static struct bus_type edac_subsys = {
+static const struct bus_type edac_subsys = {
.name = "edac",
.dev_name = "edac",
};
@@ -90,7 +90,7 @@ static void edac_subsys_exit(void)
}
/* return pointer to the 'edac' node in sysfs */
-struct bus_type *edac_get_sysfs_subsys(void)
+const struct bus_type *edac_get_sysfs_subsys(void)
{
return &edac_subsys;
}
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 287cc51db..7b44afcf4 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -338,7 +338,7 @@ static struct kobj_type ktype_edac_pci_main_kobj = {
static int edac_pci_main_kobj_setup(void)
{
int err = -ENODEV;
- struct bus_type *edac_subsys;
+ const struct bus_type *edac_subsys;
struct device *dev_root;
edac_dbg(0, "\n");
@@ -521,7 +521,7 @@ static void edac_pci_dev_parity_clear(struct pci_dev *dev)
/* read the device TYPE, looking for bridges */
pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE)
get_pci_parity_status(dev, 1);
}
@@ -583,7 +583,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n",
header_type, dev_name(&dev->dev));
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
/* On bridges, need to examine secondary status register */
status = get_pci_parity_status(dev, 1);
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index b81757555..d148d262d 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -612,7 +612,7 @@ err:
return res;
}
-int fsl_mc_err_remove(struct platform_device *op)
+void fsl_mc_err_remove(struct platform_device *op)
{
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
@@ -629,5 +629,4 @@ int fsl_mc_err_remove(struct platform_device *op)
edac_mc_del_mc(&op->dev);
edac_mc_free(mci);
- return 0;
}
diff --git a/drivers/edac/fsl_ddr_edac.h b/drivers/edac/fsl_ddr_edac.h
index 332439d7b..c0994a2a0 100644
--- a/drivers/edac/fsl_ddr_edac.h
+++ b/drivers/edac/fsl_ddr_edac.h
@@ -72,5 +72,5 @@ struct fsl_mc_pdata {
int irq;
};
int fsl_mc_err_probe(struct platform_device *op);
-int fsl_mc_err_remove(struct platform_device *op);
+void fsl_mc_err_remove(struct platform_device *op);
#endif
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 140d4431b..5646c049a 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -118,18 +118,17 @@ err:
return res;
}
-static int highbank_l2_err_remove(struct platform_device *pdev)
+static void highbank_l2_err_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
- return 0;
}
static struct platform_driver highbank_l2_edac_driver = {
.probe = highbank_l2_err_probe,
- .remove = highbank_l2_err_remove,
+ .remove_new = highbank_l2_err_remove,
.driver = {
.name = "hb_l2_edac",
.of_match_table = hb_l2_err_of_match,
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index a0c04a7f9..1c5b888ab 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -251,18 +251,17 @@ free:
return res;
}
-static int highbank_mc_remove(struct platform_device *pdev)
+static void highbank_mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
- return 0;
}
static struct platform_driver highbank_mc_edac_driver = {
.probe = highbank_mc_probe,
- .remove = highbank_mc_remove,
+ .remove_new = highbank_mc_remove,
.driver = {
.name = "hb_mc_edac",
.of_match_table = hb_ddr_ctrl_of_match,
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 23d25724b..91e0a88ef 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -376,7 +376,7 @@ static const struct pci_id_table pci_dev_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
/*
@@ -385,7 +385,7 @@ static const struct pci_id_table pci_dev_table[] = {
static const struct pci_device_id i7core_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
- {0,} /* 0 terminated list. */
+ { 0, }
};
/****************************************************************************
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 1a1869329..2b0ecdeba 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -58,6 +58,7 @@
/* Capability register E */
#define CAPID_E_OFFSET 0xf0
#define CAPID_E_IBECC BIT(12)
+#define CAPID_E_IBECC_BIT18 BIT(18)
/* Error Status */
#define ERRSTS_OFFSET 0xc8
@@ -80,6 +81,7 @@
#define ECC_ERROR_LOG_UE BIT_ULL(63)
#define ECC_ERROR_LOG_ADDR_SHIFT 5
#define ECC_ERROR_LOG_ADDR(v) GET_BITFIELD(v, 5, 38)
+#define ECC_ERROR_LOG_ADDR45(v) GET_BITFIELD(v, 5, 45)
#define ECC_ERROR_LOG_SYND(v) GET_BITFIELD(v, 46, 61)
/* Host MMIO base address */
@@ -133,6 +135,8 @@ static struct res_config {
u32 ibecc_base;
u32 ibecc_error_log_offset;
bool (*ibecc_available)(struct pci_dev *pdev);
+ /* Extract error address logged in IBECC */
+ u64 (*err_addr)(u64 ecclog);
/* Convert error address logged in IBECC to system physical address */
u64 (*err_addr_to_sys_addr)(u64 eaddr, int mc);
/* Convert error address logged in IBECC to integrated memory controller address */
@@ -222,6 +226,67 @@ static struct work_struct ecclog_work;
#define DID_ADL_SKU3 0x4621
#define DID_ADL_SKU4 0x4641
+/* Compute die IDs for Alder Lake-N with IBECC */
+#define DID_ADL_N_SKU1 0x4614
+#define DID_ADL_N_SKU2 0x4617
+#define DID_ADL_N_SKU3 0x461b
+#define DID_ADL_N_SKU4 0x461c
+#define DID_ADL_N_SKU5 0x4673
+#define DID_ADL_N_SKU6 0x4674
+#define DID_ADL_N_SKU7 0x4675
+#define DID_ADL_N_SKU8 0x4677
+#define DID_ADL_N_SKU9 0x4678
+#define DID_ADL_N_SKU10 0x4679
+#define DID_ADL_N_SKU11 0x467c
+
+/* Compute die IDs for Raptor Lake-P with IBECC */
+#define DID_RPL_P_SKU1 0xa706
+#define DID_RPL_P_SKU2 0xa707
+#define DID_RPL_P_SKU3 0xa708
+#define DID_RPL_P_SKU4 0xa716
+#define DID_RPL_P_SKU5 0xa718
+
+/* Compute die IDs for Meteor Lake-PS with IBECC */
+#define DID_MTL_PS_SKU1 0x7d21
+#define DID_MTL_PS_SKU2 0x7d22
+#define DID_MTL_PS_SKU3 0x7d23
+#define DID_MTL_PS_SKU4 0x7d24
+
+/* Compute die IDs for Meteor Lake-P with IBECC */
+#define DID_MTL_P_SKU1 0x7d01
+#define DID_MTL_P_SKU2 0x7d02
+#define DID_MTL_P_SKU3 0x7d14
+
+static int get_mchbar(struct pci_dev *pdev, u64 *mchbar)
+{
+ union {
+ u64 v;
+ struct {
+ u32 v_lo;
+ u32 v_hi;
+ };
+ } u;
+
+ if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) {
+ igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n");
+ return -ENODEV;
+ }
+
+ if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) {
+ igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n");
+ return -ENODEV;
+ }
+
+ if (!(u.v & MCHBAR_EN)) {
+ igen6_printk(KERN_ERR, "MCHBAR is disabled\n");
+ return -ENODEV;
+ }
+
+ *mchbar = MCHBAR_BASE(u.v);
+
+ return 0;
+}
+
static bool ehl_ibecc_available(struct pci_dev *pdev)
{
u32 v;
@@ -272,6 +337,39 @@ static bool tgl_ibecc_available(struct pci_dev *pdev)
return !(CAPID_E_IBECC & v);
}
+static bool mtl_p_ibecc_available(struct pci_dev *pdev)
+{
+ u32 v;
+
+ if (pci_read_config_dword(pdev, CAPID_E_OFFSET, &v))
+ return false;
+
+ return !(CAPID_E_IBECC_BIT18 & v);
+}
+
+static bool mtl_ps_ibecc_available(struct pci_dev *pdev)
+{
+#define MCHBAR_MEMSS_IBECCDIS 0x13c00
+ void __iomem *window;
+ u64 mchbar;
+ u32 val;
+
+ if (get_mchbar(pdev, &mchbar))
+ return false;
+
+ window = ioremap(mchbar, MCHBAR_SIZE * 2);
+ if (!window) {
+ igen6_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", mchbar);
+ return false;
+ }
+
+ val = readl(window + MCHBAR_MEMSS_IBECCDIS);
+ iounmap(window);
+
+ /* Bit6: 1 - IBECC is disabled, 0 - IBECC isn't disabled */
+ return !GET_BITFIELD(val, 6, 6);
+}
+
static u64 mem_addr_to_sys_addr(u64 maddr)
{
if (maddr < igen6_tolud)
@@ -358,6 +456,11 @@ static u64 adl_err_addr_to_imc_addr(u64 eaddr, int mc)
return imc_addr;
}
+static u64 rpl_p_err_addr(u64 ecclog)
+{
+ return ECC_ERROR_LOG_ADDR45(ecclog);
+}
+
static struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
@@ -403,6 +506,51 @@ static struct res_config adl_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
+static struct res_config adl_n_cfg = {
+ .machine_check = true,
+ .num_imc = 1,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x68,
+ .ibecc_available = tgl_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
+static struct res_config rpl_p_cfg = {
+ .machine_check = true,
+ .num_imc = 2,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x68,
+ .ibecc_available = tgl_ibecc_available,
+ .err_addr = rpl_p_err_addr,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
+static struct res_config mtl_ps_cfg = {
+ .machine_check = true,
+ .num_imc = 2,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x170,
+ .ibecc_available = mtl_ps_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
+static struct res_config mtl_p_cfg = {
+ .machine_check = true,
+ .num_imc = 2,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x170,
+ .ibecc_available = mtl_p_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg },
@@ -424,6 +572,29 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_SKU2), (kernel_ulong_t)&adl_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_SKU3), (kernel_ulong_t)&adl_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_SKU4), (kernel_ulong_t)&adl_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU1), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU2), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU3), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU4), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU5), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU6), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU7), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU8), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU9), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU4), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_RPL_P_SKU5), (kernel_ulong_t)&rpl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU1), (kernel_ulong_t)&mtl_ps_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU2), (kernel_ulong_t)&mtl_ps_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU3), (kernel_ulong_t)&mtl_ps_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_PS_SKU4), (kernel_ulong_t)&mtl_ps_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_P_SKU1), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_P_SKU2), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_MTL_P_SKU3), (kernel_ulong_t)&mtl_p_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
@@ -679,8 +850,11 @@ static void ecclog_work_cb(struct work_struct *work)
llist_for_each_entry_safe(node, tmp, head, llnode) {
memset(&res, 0, sizeof(res));
- eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) <<
- ECC_ERROR_LOG_ADDR_SHIFT;
+ if (res_cfg->err_addr)
+ eaddr = res_cfg->err_addr(node->ecclog);
+ else
+ eaddr = ECC_ERROR_LOG_ADDR(node->ecclog) <<
+ ECC_ERROR_LOG_ADDR_SHIFT;
res.mc = node->mc;
res.sys_addr = res_cfg->err_addr_to_sys_addr(eaddr, res.mc);
res.imc_addr = res_cfg->err_addr_to_imc_addr(eaddr, res.mc);
@@ -969,22 +1143,8 @@ static int igen6_pci_setup(struct pci_dev *pdev, u64 *mchbar)
igen6_tom = u.v & GENMASK_ULL(38, 20);
- if (pci_read_config_dword(pdev, MCHBAR_OFFSET, &u.v_lo)) {
- igen6_printk(KERN_ERR, "Failed to read lower MCHBAR\n");
+ if (get_mchbar(pdev, mchbar))
goto fail;
- }
-
- if (pci_read_config_dword(pdev, MCHBAR_OFFSET + 4, &u.v_hi)) {
- igen6_printk(KERN_ERR, "Failed to read upper MCHBAR\n");
- goto fail;
- }
-
- if (!(u.v & MCHBAR_EN)) {
- igen6_printk(KERN_ERR, "MCHBAR is disabled\n");
- goto fail;
- }
-
- *mchbar = MCHBAR_BASE(u.v);
#ifdef CONFIG_EDAC_DEBUG
if (pci_read_config_dword(pdev, TOUUD_OFFSET, &u.v_lo))
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index 7c5e2b3c0..d2f895033 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -27,7 +27,7 @@ MODULE_DEVICE_TABLE(of, fsl_ddr_mc_err_of_match);
static struct platform_driver fsl_ddr_mc_err_driver = {
.probe = fsl_mc_err_probe,
- .remove = fsl_mc_err_remove,
+ .remove_new = fsl_mc_err_remove,
.driver = {
.name = "fsl_ddr_mc_err",
.of_match_table = fsl_ddr_mc_err_of_match,
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 9215c0678..ec8b6c9fe 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -143,482 +143,6 @@ static const char * const mc6_mce_desc[] = {
"Status Register File",
};
-/* Scalable MCA error strings */
-static const char * const smca_ls_mce_desc[] = {
- "Load queue parity error",
- "Store queue parity error",
- "Miss address buffer payload parity error",
- "Level 1 TLB parity error",
- "DC Tag error type 5",
- "DC Tag error type 6",
- "DC Tag error type 1",
- "Internal error type 1",
- "Internal error type 2",
- "System Read Data Error Thread 0",
- "System Read Data Error Thread 1",
- "DC Tag error type 2",
- "DC Data error type 1 and poison consumption",
- "DC Data error type 2",
- "DC Data error type 3",
- "DC Tag error type 4",
- "Level 2 TLB parity error",
- "PDC parity error",
- "DC Tag error type 3",
- "DC Tag error type 5",
- "L2 Fill Data error",
-};
-
-static const char * const smca_ls2_mce_desc[] = {
- "An ECC error was detected on a data cache read by a probe or victimization",
- "An ECC error or L2 poison was detected on a data cache read by a load",
- "An ECC error was detected on a data cache read-modify-write by a store",
- "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization",
- "An ECC error or poison bit mismatch was detected on a tag read by a load",
- "An ECC error or poison bit mismatch was detected on a tag read by a store",
- "An ECC error was detected on an EMEM read by a load",
- "An ECC error was detected on an EMEM read-modify-write by a store",
- "A parity error was detected in an L1 TLB entry by any access",
- "A parity error was detected in an L2 TLB entry by any access",
- "A parity error was detected in a PWC entry by any access",
- "A parity error was detected in an STQ entry by any access",
- "A parity error was detected in an LDQ entry by any access",
- "A parity error was detected in a MAB entry by any access",
- "A parity error was detected in an SCB entry state field by any access",
- "A parity error was detected in an SCB entry address field by any access",
- "A parity error was detected in an SCB entry data field by any access",
- "A parity error was detected in a WCB entry by any access",
- "A poisoned line was detected in an SCB entry by any access",
- "A SystemReadDataError error was reported on read data returned from L2 for a load",
- "A SystemReadDataError error was reported on read data returned from L2 for an SCB store",
- "A SystemReadDataError error was reported on read data returned from L2 for a WCB store",
- "A hardware assertion error was reported",
- "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access",
-};
-
-static const char * const smca_if_mce_desc[] = {
- "Op Cache Microtag Probe Port Parity Error",
- "IC Microtag or Full Tag Multi-hit Error",
- "IC Full Tag Parity Error",
- "IC Data Array Parity Error",
- "Decoupling Queue PhysAddr Parity Error",
- "L0 ITLB Parity Error",
- "L1 ITLB Parity Error",
- "L2 ITLB Parity Error",
- "BPQ Thread 0 Snoop Parity Error",
- "BPQ Thread 1 Snoop Parity Error",
- "L1 BTB Multi-Match Error",
- "L2 BTB Multi-Match Error",
- "L2 Cache Response Poison Error",
- "System Read Data Error",
- "Hardware Assertion Error",
- "L1-TLB Multi-Hit",
- "L2-TLB Multi-Hit",
- "BSR Parity Error",
- "CT MCE",
-};
-
-static const char * const smca_l2_mce_desc[] = {
- "L2M Tag Multiple-Way-Hit error",
- "L2M Tag or State Array ECC Error",
- "L2M Data Array ECC Error",
- "Hardware Assert Error",
-};
-
-static const char * const smca_de_mce_desc[] = {
- "Micro-op cache tag parity error",
- "Micro-op cache data parity error",
- "Instruction buffer parity error",
- "Micro-op queue parity error",
- "Instruction dispatch queue parity error",
- "Fetch address FIFO parity error",
- "Patch RAM data parity error",
- "Patch RAM sequencer parity error",
- "Micro-op buffer parity error",
- "Hardware Assertion MCA Error",
-};
-
-static const char * const smca_ex_mce_desc[] = {
- "Watchdog Timeout error",
- "Physical register file parity error",
- "Flag register file parity error",
- "Immediate displacement register file parity error",
- "Address generator payload parity error",
- "EX payload parity error",
- "Checkpoint queue parity error",
- "Retire dispatch queue parity error",
- "Retire status queue parity error",
- "Scheduling queue parity error",
- "Branch buffer queue parity error",
- "Hardware Assertion error",
- "Spec Map parity error",
- "Retire Map parity error",
-};
-
-static const char * const smca_fp_mce_desc[] = {
- "Physical register file (PRF) parity error",
- "Freelist (FL) parity error",
- "Schedule queue parity error",
- "NSQ parity error",
- "Retire queue (RQ) parity error",
- "Status register file (SRF) parity error",
- "Hardware assertion",
-};
-
-static const char * const smca_l3_mce_desc[] = {
- "Shadow Tag Macro ECC Error",
- "Shadow Tag Macro Multi-way-hit Error",
- "L3M Tag ECC Error",
- "L3M Tag Multi-way-hit Error",
- "L3M Data ECC Error",
- "SDP Parity Error or SystemReadDataError from XI",
- "L3 Victim Queue Parity Error",
- "L3 Hardware Assertion",
-};
-
-static const char * const smca_cs_mce_desc[] = {
- "Illegal Request",
- "Address Violation",
- "Security Violation",
- "Illegal Response",
- "Unexpected Response",
- "Request or Probe Parity Error",
- "Read Response Parity Error",
- "Atomic Request Parity Error",
- "Probe Filter ECC Error",
-};
-
-static const char * const smca_cs2_mce_desc[] = {
- "Illegal Request",
- "Address Violation",
- "Security Violation",
- "Illegal Response",
- "Unexpected Response",
- "Request or Probe Parity Error",
- "Read Response Parity Error",
- "Atomic Request Parity Error",
- "SDP read response had no match in the CS queue",
- "Probe Filter Protocol Error",
- "Probe Filter ECC Error",
- "SDP read response had an unexpected RETRY error",
- "Counter overflow error",
- "Counter underflow error",
-};
-
-static const char * const smca_pie_mce_desc[] = {
- "Hardware Assert",
- "Register security violation",
- "Link Error",
- "Poison data consumption",
- "A deferred error was detected in the DF"
-};
-
-static const char * const smca_umc_mce_desc[] = {
- "DRAM ECC error",
- "Data poison error",
- "SDP parity error",
- "Advanced peripheral bus error",
- "Address/Command parity error",
- "Write data CRC error",
- "DCQ SRAM ECC error",
- "AES SRAM ECC error",
-};
-
-static const char * const smca_umc2_mce_desc[] = {
- "DRAM ECC error",
- "Data poison error",
- "SDP parity error",
- "Reserved",
- "Address/Command parity error",
- "Write data parity error",
- "DCQ SRAM ECC error",
- "Reserved",
- "Read data parity error",
- "Rdb SRAM ECC error",
- "RdRsp SRAM ECC error",
- "LM32 MP errors",
-};
-
-static const char * const smca_pb_mce_desc[] = {
- "An ECC error in the Parameter Block RAM array",
-};
-
-static const char * const smca_psp_mce_desc[] = {
- "An ECC or parity error in a PSP RAM instance",
-};
-
-static const char * const smca_psp2_mce_desc[] = {
- "High SRAM ECC or parity error",
- "Low SRAM ECC or parity error",
- "Instruction Cache Bank 0 ECC or parity error",
- "Instruction Cache Bank 1 ECC or parity error",
- "Instruction Tag Ram 0 parity error",
- "Instruction Tag Ram 1 parity error",
- "Data Cache Bank 0 ECC or parity error",
- "Data Cache Bank 1 ECC or parity error",
- "Data Cache Bank 2 ECC or parity error",
- "Data Cache Bank 3 ECC or parity error",
- "Data Tag Bank 0 parity error",
- "Data Tag Bank 1 parity error",
- "Data Tag Bank 2 parity error",
- "Data Tag Bank 3 parity error",
- "Dirty Data Ram parity error",
- "TLB Bank 0 parity error",
- "TLB Bank 1 parity error",
- "System Hub Read Buffer ECC or parity error",
-};
-
-static const char * const smca_smu_mce_desc[] = {
- "An ECC or parity error in an SMU RAM instance",
-};
-
-static const char * const smca_smu2_mce_desc[] = {
- "High SRAM ECC or parity error",
- "Low SRAM ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
- "System Hub Read Buffer ECC or parity error",
- "PHY RAM ECC error",
-};
-
-static const char * const smca_mp5_mce_desc[] = {
- "High SRAM ECC or parity error",
- "Low SRAM ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
-};
-
-static const char * const smca_mpdma_mce_desc[] = {
- "Main SRAM [31:0] bank ECC or parity error",
- "Main SRAM [63:32] bank ECC or parity error",
- "Main SRAM [95:64] bank ECC or parity error",
- "Main SRAM [127:96] bank ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
- "Data Cache Bank A ECC or parity error",
- "Data Cache Bank B ECC or parity error",
- "Data Tag Cache Bank A ECC or parity error",
- "Data Tag Cache Bank B ECC or parity error",
- "Instruction Cache Bank A ECC or parity error",
- "Instruction Cache Bank B ECC or parity error",
- "Instruction Tag Cache Bank A ECC or parity error",
- "Instruction Tag Cache Bank B ECC or parity error",
- "System Hub Read Buffer ECC or parity error",
- "MPDMA TVF DVSEC Memory ECC or parity error",
- "MPDMA TVF MMIO Mailbox0 ECC or parity error",
- "MPDMA TVF MMIO Mailbox1 ECC or parity error",
- "MPDMA TVF Doorbell Memory ECC or parity error",
- "MPDMA TVF SDP Slave Memory 0 ECC or parity error",
- "MPDMA TVF SDP Slave Memory 1 ECC or parity error",
- "MPDMA TVF SDP Slave Memory 2 ECC or parity error",
- "MPDMA TVF SDP Master Memory 0 ECC or parity error",
- "MPDMA TVF SDP Master Memory 1 ECC or parity error",
- "MPDMA TVF SDP Master Memory 2 ECC or parity error",
- "MPDMA TVF SDP Master Memory 3 ECC or parity error",
- "MPDMA TVF SDP Master Memory 4 ECC or parity error",
- "MPDMA TVF SDP Master Memory 5 ECC or parity error",
- "MPDMA TVF SDP Master Memory 6 ECC or parity error",
- "MPDMA PTE Command FIFO ECC or parity error",
- "MPDMA PTE Hub Data FIFO ECC or parity error",
- "MPDMA PTE Internal Data FIFO ECC or parity error",
- "MPDMA PTE Command Memory DMA ECC or parity error",
- "MPDMA PTE Command Memory Internal ECC or parity error",
- "MPDMA PTE DMA Completion FIFO ECC or parity error",
- "MPDMA PTE Tablewalk Completion FIFO ECC or parity error",
- "MPDMA PTE Descriptor Completion FIFO ECC or parity error",
- "MPDMA PTE ReadOnly Completion FIFO ECC or parity error",
- "MPDMA PTE DirectWrite Completion FIFO ECC or parity error",
- "SDP Watchdog Timer expired",
-};
-
-static const char * const smca_nbio_mce_desc[] = {
- "ECC or Parity error",
- "PCIE error",
- "SDP ErrEvent error",
- "SDP Egress Poison Error",
- "IOHC Internal Poison Error",
-};
-
-static const char * const smca_pcie_mce_desc[] = {
- "CCIX PER Message logging",
- "CCIX Read Response with Status: Non-Data Error",
- "CCIX Write Response with Status: Non-Data Error",
- "CCIX Read Response with Status: Data Error",
- "CCIX Non-okay write response with data error",
-};
-
-static const char * const smca_pcie2_mce_desc[] = {
- "SDP Parity Error logging",
-};
-
-static const char * const smca_xgmipcs_mce_desc[] = {
- "Data Loss Error",
- "Training Error",
- "Flow Control Acknowledge Error",
- "Rx Fifo Underflow Error",
- "Rx Fifo Overflow Error",
- "CRC Error",
- "BER Exceeded Error",
- "Tx Vcid Data Error",
- "Replay Buffer Parity Error",
- "Data Parity Error",
- "Replay Fifo Overflow Error",
- "Replay Fifo Underflow Error",
- "Elastic Fifo Overflow Error",
- "Deskew Error",
- "Flow Control CRC Error",
- "Data Startup Limit Error",
- "FC Init Timeout Error",
- "Recovery Timeout Error",
- "Ready Serial Timeout Error",
- "Ready Serial Attempt Error",
- "Recovery Attempt Error",
- "Recovery Relock Attempt Error",
- "Replay Attempt Error",
- "Sync Header Error",
- "Tx Replay Timeout Error",
- "Rx Replay Timeout Error",
- "LinkSub Tx Timeout Error",
- "LinkSub Rx Timeout Error",
- "Rx CMD Packet Error",
-};
-
-static const char * const smca_xgmiphy_mce_desc[] = {
- "RAM ECC Error",
- "ARC instruction buffer parity error",
- "ARC data buffer parity error",
- "PHY APB error",
-};
-
-static const char * const smca_nbif_mce_desc[] = {
- "Timeout error from GMI",
- "SRAM ECC error",
- "NTB Error Event",
- "SDP Parity error",
-};
-
-static const char * const smca_sata_mce_desc[] = {
- "Parity error for port 0",
- "Parity error for port 1",
- "Parity error for port 2",
- "Parity error for port 3",
- "Parity error for port 4",
- "Parity error for port 5",
- "Parity error for port 6",
- "Parity error for port 7",
-};
-
-static const char * const smca_usb_mce_desc[] = {
- "Parity error or ECC error for S0 RAM0",
- "Parity error or ECC error for S0 RAM1",
- "Parity error or ECC error for S0 RAM2",
- "Parity error for PHY RAM0",
- "Parity error for PHY RAM1",
- "AXI Slave Response error",
-};
-
-static const char * const smca_gmipcs_mce_desc[] = {
- "Data Loss Error",
- "Training Error",
- "Replay Parity Error",
- "Rx Fifo Underflow Error",
- "Rx Fifo Overflow Error",
- "CRC Error",
- "BER Exceeded Error",
- "Tx Fifo Underflow Error",
- "Replay Buffer Parity Error",
- "Tx Overflow Error",
- "Replay Fifo Overflow Error",
- "Replay Fifo Underflow Error",
- "Elastic Fifo Overflow Error",
- "Deskew Error",
- "Offline Error",
- "Data Startup Limit Error",
- "FC Init Timeout Error",
- "Recovery Timeout Error",
- "Ready Serial Timeout Error",
- "Ready Serial Attempt Error",
- "Recovery Attempt Error",
- "Recovery Relock Attempt Error",
- "Deskew Abort Error",
- "Rx Buffer Error",
- "Rx LFDS Fifo Overflow Error",
- "Rx LFDS Fifo Underflow Error",
- "LinkSub Tx Timeout Error",
- "LinkSub Rx Timeout Error",
- "Rx CMD Packet Error",
- "LFDS Training Timeout Error",
- "LFDS FC Init Timeout Error",
- "Data Loss Error",
-};
-
-struct smca_mce_desc {
- const char * const *descs;
- unsigned int num_descs;
-};
-
-static struct smca_mce_desc smca_mce_descs[] = {
- [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) },
- [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) },
- [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) },
- [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) },
- [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) },
- [SMCA_EX] = { smca_ex_mce_desc, ARRAY_SIZE(smca_ex_mce_desc) },
- [SMCA_FP] = { smca_fp_mce_desc, ARRAY_SIZE(smca_fp_mce_desc) },
- [SMCA_L3_CACHE] = { smca_l3_mce_desc, ARRAY_SIZE(smca_l3_mce_desc) },
- [SMCA_CS] = { smca_cs_mce_desc, ARRAY_SIZE(smca_cs_mce_desc) },
- [SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) },
- [SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) },
- [SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) },
- [SMCA_UMC_V2] = { smca_umc2_mce_desc, ARRAY_SIZE(smca_umc2_mce_desc) },
- [SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) },
- [SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) },
- [SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) },
- [SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) },
- [SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) },
- [SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
- [SMCA_MPDMA] = { smca_mpdma_mce_desc, ARRAY_SIZE(smca_mpdma_mce_desc) },
- [SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
- [SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
- [SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
- [SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
- /* NBIF and SHUB have the same error descriptions, for now. */
- [SMCA_NBIF] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
- [SMCA_SHUB] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
- [SMCA_SATA] = { smca_sata_mce_desc, ARRAY_SIZE(smca_sata_mce_desc) },
- [SMCA_USB] = { smca_usb_mce_desc, ARRAY_SIZE(smca_usb_mce_desc) },
- [SMCA_GMI_PCS] = { smca_gmipcs_mce_desc, ARRAY_SIZE(smca_gmipcs_mce_desc) },
- /* All the PHY bank types have the same error descriptions, for now. */
- [SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
- [SMCA_WAFL_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
- [SMCA_GMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
-};
-
static bool f12h_mc0_mce(u16 ec, u8 xec)
{
bool ret = false;
@@ -1163,11 +687,51 @@ static void decode_mc6_mce(struct mce *m)
pr_emerg(HW_ERR "Corrupted MC6 MCE info?\n");
}
+static const char * const smca_long_names[] = {
+ [SMCA_LS ... SMCA_LS_V2] = "Load Store Unit",
+ [SMCA_IF] = "Instruction Fetch Unit",
+ [SMCA_L2_CACHE] = "L2 Cache",
+ [SMCA_DE] = "Decode Unit",
+ [SMCA_RESERVED] = "Reserved",
+ [SMCA_EX] = "Execution Unit",
+ [SMCA_FP] = "Floating Point Unit",
+ [SMCA_L3_CACHE] = "L3 Cache",
+ [SMCA_CS ... SMCA_CS_V2] = "Coherent Slave",
+ [SMCA_PIE] = "Power, Interrupts, etc.",
+
+ /* UMC v2 is separate because both of them can exist in a single system. */
+ [SMCA_UMC] = "Unified Memory Controller",
+ [SMCA_UMC_V2] = "Unified Memory Controller v2",
+ [SMCA_PB] = "Parameter Block",
+ [SMCA_PSP ... SMCA_PSP_V2] = "Platform Security Processor",
+ [SMCA_SMU ... SMCA_SMU_V2] = "System Management Unit",
+ [SMCA_MP5] = "Microprocessor 5 Unit",
+ [SMCA_MPDMA] = "MPDMA Unit",
+ [SMCA_NBIO] = "Northbridge IO Unit",
+ [SMCA_PCIE ... SMCA_PCIE_V2] = "PCI Express Unit",
+ [SMCA_XGMI_PCS] = "Ext Global Memory Interconnect PCS Unit",
+ [SMCA_NBIF] = "NBIF Unit",
+ [SMCA_SHUB] = "System Hub Unit",
+ [SMCA_SATA] = "SATA Unit",
+ [SMCA_USB] = "USB Unit",
+ [SMCA_GMI_PCS] = "Global Memory Interconnect PCS Unit",
+ [SMCA_XGMI_PHY] = "Ext Global Memory Interconnect PHY Unit",
+ [SMCA_WAFL_PHY] = "WAFL PHY Unit",
+ [SMCA_GMI_PHY] = "Global Memory Interconnect PHY Unit",
+};
+
+static const char *smca_get_long_name(enum smca_bank_types t)
+{
+ if (t >= N_SMCA_BANK_TYPES)
+ return NULL;
+
+ return smca_long_names[t];
+}
+
/* Decode errors according to Scalable MCA specification */
static void decode_smca_error(struct mce *m)
{
enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank);
- const char *ip_name;
u8 xec = XEC(m->status, xec_mask);
if (bank_type >= N_SMCA_BANK_TYPES)
@@ -1178,13 +742,7 @@ static void decode_smca_error(struct mce *m)
return;
}
- ip_name = smca_get_long_name(bank_type);
-
- pr_emerg(HW_ERR "%s Ext. Error Code: %d", ip_name, xec);
-
- /* Only print the decode of valid error codes */
- if (xec < smca_mce_descs[bank_type].num_descs)
- pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]);
+ pr_emerg(HW_ERR "%s Ext. Error Code: %d", smca_get_long_name(bank_type), xec);
if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) &&
xec == 0 && decode_dram_ecc)
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 2b5703e50..c1bc53f4e 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -300,7 +300,7 @@ err:
return res;
}
-static int mpc85xx_pci_err_remove(struct platform_device *op)
+static void mpc85xx_pci_err_remove(struct platform_device *op)
{
struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
@@ -312,8 +312,6 @@ static int mpc85xx_pci_err_remove(struct platform_device *op)
edac_pci_del_device(&op->dev);
edac_pci_free_ctl_info(pci);
-
- return 0;
}
static const struct platform_device_id mpc85xx_pci_err_match[] = {
@@ -325,7 +323,7 @@ static const struct platform_device_id mpc85xx_pci_err_match[] = {
static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
- .remove = mpc85xx_pci_err_remove,
+ .remove_new = mpc85xx_pci_err_remove,
.id_table = mpc85xx_pci_err_match,
.driver = {
.name = "mpc85xx_pci_err",
@@ -591,7 +589,7 @@ err:
return res;
}
-static int mpc85xx_l2_err_remove(struct platform_device *op)
+static void mpc85xx_l2_err_remove(struct platform_device *op)
{
struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
@@ -606,7 +604,6 @@ static int mpc85xx_l2_err_remove(struct platform_device *op)
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
edac_device_del_device(&op->dev);
edac_device_free_ctl_info(edac_dev);
- return 0;
}
static const struct of_device_id mpc85xx_l2_err_of_match[] = {
@@ -630,7 +627,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
static struct platform_driver mpc85xx_l2_err_driver = {
.probe = mpc85xx_l2_err_probe,
- .remove = mpc85xx_l2_err_remove,
+ .remove_new = mpc85xx_l2_err_remove,
.driver = {
.name = "mpc85xx_l2_err",
.of_match_table = mpc85xx_l2_err_of_match,
@@ -659,7 +656,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
static struct platform_driver mpc85xx_mc_err_driver = {
.probe = fsl_mc_err_probe,
- .remove = fsl_mc_err_remove,
+ .remove_new = fsl_mc_err_remove,
.driver = {
.name = "mpc85xx_mc_err",
.of_match_table = mpc85xx_mc_err_of_match,
diff --git a/drivers/edac/npcm_edac.c b/drivers/edac/npcm_edac.c
index 6d15c1550..2e2133b78 100644
--- a/drivers/edac/npcm_edac.c
+++ b/drivers/edac/npcm_edac.c
@@ -410,7 +410,7 @@ free_edac_mc:
return rc;
}
-static int edac_remove(struct platform_device *pdev)
+static void edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct priv_data *priv = mci->pvt_info;
@@ -426,8 +426,6 @@ static int edac_remove(struct platform_device *pdev)
regmap_write(npcm_regmap, pdata->ctl_int_mask_master,
pdata->int_mask_master_global_mask);
regmap_update_bits(npcm_regmap, pdata->ctl_ecc_en, pdata->ecc_en_mask, 0);
-
- return 0;
}
static const struct npcm_platform_data npcm750_edac = {
@@ -533,7 +531,7 @@ static struct platform_driver npcm_edac_driver = {
.of_match_table = npcm_edac_of_match,
},
.probe = edac_probe,
- .remove = edac_remove,
+ .remove_new = edac_remove,
};
module_platform_driver(npcm_edac_driver);
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index c33059e9b..4015eb9af 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -184,19 +184,17 @@ err:
return -ENXIO;
}
-static int octeon_l2c_remove(struct platform_device *pdev)
+static void octeon_l2c_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *l2c = platform_get_drvdata(pdev);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(l2c);
-
- return 0;
}
static struct platform_driver octeon_l2c_driver = {
.probe = octeon_l2c_probe,
- .remove = octeon_l2c_remove,
+ .remove_new = octeon_l2c_remove,
.driver = {
.name = "octeon_l2c_edac",
}
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index aeb222ca3..18615cbcd 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -302,18 +302,17 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
return 0;
}
-static int octeon_lmc_edac_remove(struct platform_device *pdev)
+static void octeon_lmc_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
- return 0;
}
static struct platform_driver octeon_lmc_edac_driver = {
.probe = octeon_lmc_edac_probe,
- .remove = octeon_lmc_edac_remove,
+ .remove_new = octeon_lmc_edac_remove,
.driver = {
.name = "octeon_lmc_edac",
}
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index 754eced59..ea8a8e337 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -119,19 +119,18 @@ err:
return -ENXIO;
}
-static int co_cache_error_remove(struct platform_device *pdev)
+static void co_cache_error_remove(struct platform_device *pdev)
{
struct co_cache_error *p = platform_get_drvdata(pdev);
unregister_co_cache_error_notifier(&p->notifier);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(p->ed);
- return 0;
}
static struct platform_driver co_cache_error_driver = {
.probe = co_cache_error_probe,
- .remove = co_cache_error_remove,
+ .remove_new = co_cache_error_remove,
.driver = {
.name = "octeon_pc_edac",
}
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
index 28b238eec..108ad9493 100644
--- a/drivers/edac/octeon_edac-pci.c
+++ b/drivers/edac/octeon_edac-pci.c
@@ -87,19 +87,17 @@ err:
return res;
}
-static int octeon_pci_remove(struct platform_device *pdev)
+static void octeon_pci_remove(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
edac_pci_del_device(&pdev->dev);
edac_pci_free_ctl_info(pci);
-
- return 0;
}
static struct platform_driver octeon_pci_driver = {
.probe = octeon_pci_probe,
- .remove = octeon_pci_remove,
+ .remove_new = octeon_pci_remove,
.driver = {
.name = "octeon_pci_edac",
}
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 2b306f2cc..2afcd148f 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -16,18 +16,20 @@
* rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
*/
-#include <linux/module.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/mmzone.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/edac.h>
-#include <linux/mmzone.h>
#include <linux/smp.h>
-#include <linux/bitmap.h>
-#include <linux/math64.h>
-#include <linux/mod_devicetable.h>
+
#include <linux/platform_data/x86/p2sb.h>
#include <asm/cpu_device_id.h>
@@ -109,7 +111,6 @@ static struct mem_ctl_info *pnd2_mci;
#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
#define SELECTOR_DISABLED (-1)
-#define _4GB (1ul << 32)
#define PMI_ADDRESS_WIDTH 31
#define PND_MAX_PHYS_BIT 39
@@ -183,7 +184,7 @@ static int _apl_rd_reg(int port, int off, int op, u32 *data)
}
P2SB_READ(dword, P2SB_DATA_OFF, data);
- ret = (status >> 1) & 0x3;
+ ret = (status >> 1) & GENMASK(1, 0);
out:
/* Hide the P2SB device, if it was hidden before */
if (hidden)
@@ -307,7 +308,7 @@ static bool two_channels; /* Both PMI channels in one slice enabled */
static u8 sym_chan_mask;
static u8 asym_chan_mask;
-static u8 chan_mask;
+static unsigned long chan_mask;
static int slice_selector = -1;
static int chan_selector = -1;
@@ -329,7 +330,7 @@ static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
return;
}
if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
- pr_info(FW_BUG "MOT mask not power of two\n");
+ pr_info(FW_BUG "MOT mask is invalid\n");
return;
}
if (base & ~mask) {
@@ -587,7 +588,7 @@ static int get_registers(void)
/* Get a contiguous memory address (remove the MMIO gap) */
static u64 remove_mmio_gap(u64 sys)
{
- return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
+ return (sys < SZ_4G) ? sys : sys - (SZ_4G - top_lm);
}
/* Squeeze out one address bit, shift upper part down to fill gap */
@@ -598,7 +599,7 @@ static void remove_addr_bit(u64 *addr, int bitidx)
if (bitidx == -1)
return;
- mask = (1ull << bitidx) - 1;
+ mask = BIT_ULL(bitidx) - 1;
*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
}
@@ -642,8 +643,8 @@ static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
int sym_chan_shift = sym_channels >> 1;
/* Give up if address is out of range, or in MMIO gap */
- if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
- (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
+ if (addr >= BIT(PND_MAX_PHYS_BIT) ||
+ (addr >= top_lm && addr < SZ_4G) || addr >= top_hm) {
snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
return -EINVAL;
}
@@ -727,10 +728,10 @@ static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
}
/* Translate PMI address to memory (rank, row, bank, column) */
-#define C(n) (0x10 | (n)) /* column */
-#define B(n) (0x20 | (n)) /* bank */
-#define R(n) (0x40 | (n)) /* row */
-#define RS (0x80) /* rank */
+#define C(n) (BIT(4) | (n)) /* column */
+#define B(n) (BIT(5) | (n)) /* bank */
+#define R(n) (BIT(6) | (n)) /* row */
+#define RS (BIT(7)) /* rank */
/* addrdec values */
#define AMAP_1KB 0
@@ -1064,9 +1065,9 @@ static int apl_check_ecc_active(void)
int i, ret = 0;
/* Check dramtype and ECC mode for each present DIMM */
- for (i = 0; i < APL_NUM_CHANNELS; i++)
- if (chan_mask & BIT(i))
- ret += check_channel(i);
+ for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS)
+ ret += check_channel(i);
+
return ret ? -EINVAL : 0;
}
@@ -1205,10 +1206,7 @@ static void apl_get_dimm_config(struct mem_ctl_info *mci)
u64 capacity;
int i, g;
- for (i = 0; i < APL_NUM_CHANNELS; i++) {
- if (!(chan_mask & BIT(i)))
- continue;
-
+ for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS) {
dimm = edac_get_dimm(mci, i, 0, 0);
if (!dimm) {
edac_dbg(0, "No allocated DIMM for channel %d\n", i);
@@ -1228,8 +1226,7 @@ static void apl_get_dimm_config(struct mem_ctl_info *mci)
}
pvt->dimm_geom[i] = g;
- capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
- (1ul << dimms[g].colbits);
+ capacity = (d->rken0 + d->rken1) * 8 * BIT(dimms[g].rowbits + dimms[g].colbits);
edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
dimm->grain = 32;
@@ -1295,7 +1292,7 @@ static void dnv_get_dimm_config(struct mem_ctl_info *mci)
continue;
}
- capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
+ capacity = ranks_of_dimm[j] * banks * BIT(rowbits + colbits);
edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
dimm->grain = 32;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 046969b4e..1eea3341a 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1329,8 +1329,7 @@ static int ppc4xx_edac_probe(struct platform_device *op)
*
* Unconditionally returns 0.
*/
-static int
-ppc4xx_edac_remove(struct platform_device *op)
+static void ppc4xx_edac_remove(struct platform_device *op)
{
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct ppc4xx_edac_pdata *pdata = mci->pvt_info;
@@ -1344,8 +1343,6 @@ ppc4xx_edac_remove(struct platform_device *op)
edac_mc_del_mc(mci->pdev);
edac_mc_free(mci);
-
- return 0;
}
/**
@@ -1379,7 +1376,7 @@ ppc4xx_edac_opstate_init(void)
static struct platform_driver ppc4xx_edac_driver = {
.probe = ppc4xx_edac_probe,
- .remove = ppc4xx_edac_remove,
+ .remove_new = ppc4xx_edac_remove,
.driver = {
.name = PPC4XX_EDAC_MODULE_NAME,
.of_match_table = ppc4xx_edac_match,
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index b2db545c6..5539917c0 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -390,14 +390,12 @@ irq_done:
return rc;
}
-static int qcom_llcc_edac_remove(struct platform_device *pdev)
+static void qcom_llcc_edac_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *edev_ctl = dev_get_drvdata(&pdev->dev);
edac_device_del_device(edev_ctl->dev);
edac_device_free_ctl_info(edev_ctl);
-
- return 0;
}
static const struct platform_device_id qcom_llcc_edac_id_table[] = {
@@ -408,7 +406,7 @@ MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table);
static struct platform_driver qcom_llcc_edac_driver = {
.probe = qcom_llcc_edac_probe,
- .remove = qcom_llcc_edac_remove,
+ .remove_new = qcom_llcc_edac_remove,
.driver = {
.name = "qcom_llcc_edac",
},
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 0c779a032..26cca5a93 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -439,7 +439,7 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
/* This changes depending if 1HA or 2HA:
@@ -505,7 +505,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
/* Haswell support */
@@ -576,7 +576,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
/* Knight's Landing Support */
@@ -620,7 +620,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
static const struct pci_id_table pci_dev_descr_knl_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
- {0,}
+ { NULL, }
};
/*
@@ -686,7 +686,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
- {0,} /* 0 terminated list. */
+ { NULL, }
};
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index ce3e0069e..9c5b6f8bd 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -648,6 +648,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
memset(&res, 0, sizeof(res));
res.mce = mce;
res.addr = mce->addr & MCI_ADDR_PHYSADDR;
+ if (!pfn_to_online_page(res.addr >> PAGE_SHIFT)) {
+ pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
+ return NOTIFY_DONE;
+ }
/* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) {
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index c4fc64cbe..709babce4 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -1410,7 +1410,7 @@ free_edac_mc:
*
* Return: Unconditionally 0
*/
-static int mc_remove(struct platform_device *pdev)
+static void mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct synps_edac_priv *priv = mci->pvt_info;
@@ -1425,8 +1425,6 @@ static int mc_remove(struct platform_device *pdev)
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver synps_edac_mc_driver = {
@@ -1435,7 +1433,7 @@ static struct platform_driver synps_edac_mc_driver = {
.of_match_table = synps_edac_match,
},
.probe = mc_probe,
- .remove = mc_remove,
+ .remove_new = mc_remove,
};
module_platform_driver(synps_edac_mc_driver);
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 6971ded59..29723c959 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -312,19 +312,17 @@ err:
return ret;
}
-static int ti_edac_remove(struct platform_device *pdev)
+static void ti_edac_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver ti_edac_driver = {
.probe = ti_edac_probe,
- .remove = ti_edac_remove,
+ .remove_new = ti_edac_remove,
.driver = {
.name = EDAC_MOD_NAME,
.of_match_table = ti_edac_of_match,
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index 8625de20f..62caf454b 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -1005,7 +1005,7 @@ static int mc_probe(struct platform_device *pdev)
goto free_edac_mc;
}
- rc = xlnx_register_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1,
+ rc = xlnx_register_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR |
XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR,
false, err_callback, mci);
@@ -1042,7 +1042,7 @@ static int mc_remove(struct platform_device *pdev)
debugfs_remove_recursive(priv->debugfs);
#endif
- xlnx_unregister_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1,
+ xlnx_unregister_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1,
XPM_EVENT_ERROR_MASK_DDRMC_CR |
XPM_EVENT_ERROR_MASK_NOC_CR |
XPM_EVENT_ERROR_MASK_NOC_NCR |
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index c52b9dd91..1b50f8160 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1960,7 +1960,7 @@ out_err:
return rc;
}
-static int xgene_edac_remove(struct platform_device *pdev)
+static void xgene_edac_remove(struct platform_device *pdev)
{
struct xgene_edac *edac = dev_get_drvdata(&pdev->dev);
struct xgene_edac_mc_ctx *mcu;
@@ -1981,8 +1981,6 @@ static int xgene_edac_remove(struct platform_device *pdev)
list_for_each_entry_safe(node, temp_node, &edac->socs, next)
xgene_edac_soc_remove(node);
-
- return 0;
}
static const struct of_device_id xgene_edac_of_match[] = {
@@ -1993,7 +1991,7 @@ MODULE_DEVICE_TABLE(of, xgene_edac_of_match);
static struct platform_driver xgene_edac_driver = {
.probe = xgene_edac_probe,
- .remove = xgene_edac_remove,
+ .remove_new = xgene_edac_remove,
.driver = {
.name = "xgene-edac",
.of_match_table = xgene_edac_of_match,
diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c
index ac7d1e0b3..2d9a5cfd8 100644
--- a/drivers/edac/zynqmp_edac.c
+++ b/drivers/edac/zynqmp_edac.c
@@ -426,7 +426,7 @@ free_dev_ctl:
return ret;
}
-static int edac_remove(struct platform_device *pdev)
+static void edac_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
struct edac_priv *priv = dci->pvt_info;
@@ -440,8 +440,6 @@ static int edac_remove(struct platform_device *pdev)
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
-
- return 0;
}
static const struct of_device_id zynqmp_ocm_edac_match[] = {
@@ -457,7 +455,7 @@ static struct platform_driver zynqmp_ocm_edac_driver = {
.of_match_table = zynqmp_ocm_edac_match,
},
.probe = edac_probe,
- .remove = edac_remove,
+ .remove_new = edac_remove,
};
module_platform_driver(zynqmp_ocm_edac_driver);
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index f72e90cec..53de581a3 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* extcon-qcom-spmi-misc.c - Qualcomm USB extcon driver to support USB ID
* and VBUS detection based on extcon-usb-gpio.c.
*
diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
index 4d08c2123..2eab341de 100644
--- a/drivers/extcon/extcon-usbc-tusb320.c
+++ b/drivers/extcon/extcon-usbc-tusb320.c
@@ -17,6 +17,7 @@
#include <linux/usb/typec.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/role.h>
+#include <linux/irq.h>
#define TUSB320_REG8 0x8
#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6)
@@ -515,6 +516,8 @@ static int tusb320_probe(struct i2c_client *client)
const void *match_data;
unsigned int revision;
int ret;
+ u32 irq_trigger_type = IRQF_TRIGGER_FALLING;
+ struct irq_data *irq_d;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -568,9 +571,13 @@ static int tusb320_probe(struct i2c_client *client)
*/
tusb320_state_update_handler(priv, true);
+ irq_d = irq_get_irq_data(client->irq);
+ if (irq_d)
+ irq_trigger_type = irqd_get_trigger_type(irq_d);
+
ret = devm_request_threaded_irq(priv->dev, client->irq, NULL,
tusb320_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_ONESHOT | irq_trigger_type,
client->name, priv);
if (ret)
tusb320_typec_remove(priv);
diff --git a/drivers/firewire/.kunitconfig b/drivers/firewire/.kunitconfig
index 1599e0693..76444a2d5 100644
--- a/drivers/firewire/.kunitconfig
+++ b/drivers/firewire/.kunitconfig
@@ -2,3 +2,4 @@ CONFIG_KUNIT=y
CONFIG_PCI=y
CONFIG_FIREWIRE=y
CONFIG_FIREWIRE_KUNIT_UAPI_TEST=y
+CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST=y
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 0a6596b02..552a39df8 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -34,6 +34,22 @@ config FIREWIRE_KUNIT_UAPI_TEST
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
+config FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST
+ tristate "KUnit tests for device attributes" if !KUNIT_ALL_TESTS
+ depends on FIREWIRE && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the KUnit tests for device attribute for node and
+ unit.
+
+ KUnit tests run during boot and output the results to the debug
+ log in TAP format (https://testanything.org/). Only useful for
+ kernel devs running KUnit test harness and are not for inclusion
+ into a production build.
+
+ For more information on KUnit and unit tests in general, refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE && MMU
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index da8a4c8f2..7d3346b3a 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -31,6 +31,8 @@
#include "core.h"
+#define ROOT_DIR_OFFSET 5
+
void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
{
ci->p = p + 1;
@@ -47,6 +49,22 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
}
EXPORT_SYMBOL(fw_csr_iterator_next);
+static const u32 *search_directory(const u32 *directory, int search_key)
+{
+ struct fw_csr_iterator ci;
+ int key, value;
+
+ search_key |= CSR_DIRECTORY;
+
+ fw_csr_iterator_init(&ci, directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (key == search_key)
+ return ci.p - 1 + value;
+ }
+
+ return NULL;
+}
+
static const u32 *search_leaf(const u32 *directory, int search_key)
{
struct fw_csr_iterator ci;
@@ -134,8 +152,25 @@ static void get_ids(const u32 *directory, int *id)
static void get_modalias_ids(const struct fw_unit *unit, int *id)
{
- get_ids(&fw_parent_device(unit)->config_rom[5], id);
- get_ids(unit->directory, id);
+ const u32 *root_directory = &fw_parent_device(unit)->config_rom[ROOT_DIR_OFFSET];
+ const u32 *directories[] = {NULL, NULL, NULL};
+ const u32 *vendor_directory;
+ int i;
+
+ directories[0] = root_directory;
+
+ // Legacy layout of configuration ROM described in Annex 1 of 'Configuration ROM for AV/C
+ // Devices 1.0 (December 12, 2000, 1394 Trading Association, TA Document 1999027)'.
+ vendor_directory = search_directory(root_directory, CSR_VENDOR);
+ if (!vendor_directory) {
+ directories[1] = unit->directory;
+ } else {
+ directories[1] = vendor_directory;
+ directories[2] = unit->directory;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i)
+ get_ids(directories[i], id);
}
static bool match_ids(const struct ieee1394_device_id *id_table, int *id)
@@ -170,7 +205,7 @@ static const struct ieee1394_device_id *unit_match(struct device *dev,
return NULL;
}
-static bool is_fw_unit(struct device *dev);
+static bool is_fw_unit(const struct device *dev);
static int fw_unit_match(struct device *dev, struct device_driver *drv)
{
@@ -218,7 +253,7 @@ static int fw_unit_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct bus_type fw_bus_type = {
+const struct bus_type fw_bus_type = {
.name = "firewire",
.match = fw_unit_match,
.probe = fw_unit_probe,
@@ -250,27 +285,44 @@ static ssize_t show_immediate(struct device *dev,
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
- const u32 *dir;
- int key, value, ret = -ENOENT;
+ const u32 *directories[] = {NULL, NULL};
+ int i, value = -1;
down_read(&fw_device_rwsem);
- if (is_fw_unit(dev))
- dir = fw_unit(dev)->directory;
- else
- dir = fw_device(dev)->config_rom + 5;
+ if (is_fw_unit(dev)) {
+ directories[0] = fw_unit(dev)->directory;
+ } else {
+ const u32 *root_directory = fw_device(dev)->config_rom + ROOT_DIR_OFFSET;
+ const u32 *vendor_directory = search_directory(root_directory, CSR_VENDOR);
- fw_csr_iterator_init(&ci, dir);
- while (fw_csr_iterator_next(&ci, &key, &value))
- if (attr->key == key) {
- ret = snprintf(buf, buf ? PAGE_SIZE : 0,
- "0x%06x\n", value);
- break;
+ if (!vendor_directory) {
+ directories[0] = root_directory;
+ } else {
+ // Legacy layout of configuration ROM described in Annex 1 of
+ // 'Configuration ROM for AV/C Devices 1.0 (December 12, 2000, 1394 Trading
+ // Association, TA Document 1999027)'.
+ directories[0] = vendor_directory;
+ directories[1] = root_directory;
}
+ }
+
+ for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) {
+ int key, val;
+
+ fw_csr_iterator_init(&ci, directories[i]);
+ while (fw_csr_iterator_next(&ci, &key, &val)) {
+ if (attr->key == key)
+ value = val;
+ }
+ }
up_read(&fw_device_rwsem);
- return ret;
+ if (value < 0)
+ return -ENOENT;
+
+ return snprintf(buf, buf ? PAGE_SIZE : 0, "0x%06x\n", value);
}
#define IMMEDIATE_ATTR(name, key) \
@@ -281,17 +333,29 @@ static ssize_t show_text_leaf(struct device *dev,
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
- const u32 *dir;
+ const u32 *directories[] = {NULL, NULL};
size_t bufsize;
char dummy_buf[2];
- int ret;
+ int i, ret = -ENOENT;
down_read(&fw_device_rwsem);
- if (is_fw_unit(dev))
- dir = fw_unit(dev)->directory;
- else
- dir = fw_device(dev)->config_rom + 5;
+ if (is_fw_unit(dev)) {
+ directories[0] = fw_unit(dev)->directory;
+ } else {
+ const u32 *root_directory = fw_device(dev)->config_rom + ROOT_DIR_OFFSET;
+ const u32 *vendor_directory = search_directory(root_directory, CSR_VENDOR);
+
+ if (!vendor_directory) {
+ directories[0] = root_directory;
+ } else {
+ // Legacy layout of configuration ROM described in Annex 1 of
+ // 'Configuration ROM for AV/C Devices 1.0 (December 12, 2000, 1394
+ // Trading Association, TA Document 1999027)'.
+ directories[0] = root_directory;
+ directories[1] = vendor_directory;
+ }
+ }
if (buf) {
bufsize = PAGE_SIZE - 1;
@@ -300,7 +364,21 @@ static ssize_t show_text_leaf(struct device *dev,
bufsize = 1;
}
- ret = fw_csr_string(dir, attr->key, buf, bufsize);
+ for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) {
+ int result = fw_csr_string(directories[i], attr->key, buf, bufsize);
+ // Detected.
+ if (result >= 0) {
+ ret = result;
+ } else if (i == 0 && attr->key == CSR_VENDOR) {
+ // Sony DVMC-DA1 has configuration ROM such that the descriptor leaf entry
+ // in the root directory follows to the directory entry for vendor ID
+ // instead of the immediate value for vendor ID.
+ result = fw_csr_string(directories[i], CSR_DIRECTORY | attr->key, buf,
+ bufsize);
+ if (result >= 0)
+ ret = result;
+ }
+ }
if (ret >= 0) {
/* Strip trailing whitespace and add newline. */
@@ -445,7 +523,7 @@ static ssize_t units_show(struct device *dev,
int key, value, i = 0;
down_read(&fw_device_rwsem);
- fw_csr_iterator_init(&ci, &device->config_rom[5]);
+ fw_csr_iterator_init(&ci, &device->config_rom[ROOT_DIR_OFFSET]);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (key != (CSR_UNIT | CSR_DIRECTORY))
continue;
@@ -678,7 +756,7 @@ static struct device_type fw_unit_type = {
.release = fw_unit_release,
};
-static bool is_fw_unit(struct device *dev)
+static bool is_fw_unit(const struct device *dev)
{
return dev->type == &fw_unit_type;
}
@@ -690,7 +768,7 @@ static void create_units(struct fw_device *device)
int key, value, i;
i = 0;
- fw_csr_iterator_init(&ci, &device->config_rom[5]);
+ fw_csr_iterator_init(&ci, &device->config_rom[ROOT_DIR_OFFSET]);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (key != (CSR_UNIT | CSR_DIRECTORY))
continue;
@@ -834,7 +912,7 @@ static struct device_type fw_device_type = {
.release = fw_device_release,
};
-static bool is_fw_device(struct device *dev)
+static bool is_fw_device(const struct device *dev)
{
return dev->type == &fw_device_type;
}
@@ -1307,3 +1385,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
break;
}
}
+
+#ifdef CONFIG_FIREWIRE_KUNIT_DEVICE_ATTRIBUTE_TEST
+#include "device-attribute-test.c"
+#endif
diff --git a/drivers/firewire/device-attribute-test.c b/drivers/firewire/device-attribute-test.c
new file mode 100644
index 000000000..2f123c6b0
--- /dev/null
+++ b/drivers/firewire/device-attribute-test.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// device-attribute-test.c - An application of Kunit to test implementation for device attributes.
+//
+// Copyright (c) 2023 Takashi Sakamoto
+//
+// This file can not be built independently since it is intentionally included in core-device.c.
+
+#include <kunit/test.h>
+
+// Configuration ROM for AV/C Devices 1.0 (Dec. 12, 2000, 1394 Trading Association)
+// Annex C:Configuration ROM example(informative)
+// C.1 Simple AV/C device
+//
+// Copied from the documentation.
+static const u32 simple_avc_config_rom[] = {
+ 0x0404eabf,
+ 0x31333934,
+ 0xe0646102,
+ 0xffffffff,
+ 0xffffffff,
+ 0x00063287, // root directory.
+ 0x03ffffff,
+ 0x8100000a,
+ 0x17ffffff,
+ 0x8100000e,
+ 0x0c0083c0,
+ 0xd1000001,
+ 0x0004442d, // unit 0 directory.
+ 0x1200a02d,
+ 0x13010001,
+ 0x17ffffff,
+ 0x81000007,
+ 0x0005c915, // leaf for textual descriptor.
+ 0x00000000,
+ 0x00000000,
+ 0x56656e64,
+ 0x6f72204e,
+ 0x616d6500,
+ 0x00057f16, // leaf for textual descriptor.
+ 0x00000000,
+ 0x00000000,
+ 0x4d6f6465,
+ 0x6c204e61,
+ 0x6d650000,
+};
+
+// Ibid.
+// Annex A:Consideration for configuration ROM reader design (informative)
+// A.1 Vendor directory
+//
+// Written by hand.
+static const u32 legacy_avc_config_rom[] = {
+ 0x04199fe7,
+ 0x31333934,
+ 0xe0644000,
+ 0x00112233,
+ 0x44556677,
+ 0x0005dace, // root directory.
+ 0x03012345,
+ 0x0c0083c0,
+ 0x8d000009,
+ 0xd1000002,
+ 0xc3000004,
+ 0x0002e107, // unit 0 directory.
+ 0x12abcdef,
+ 0x13543210,
+ 0x0002cb73, // vendor directory.
+ 0x17fedcba,
+ 0x81000004,
+ 0x00026dc1, // leaf for EUI-64.
+ 0x00112233,
+ 0x44556677,
+ 0x00050e84, // leaf for textual descriptor.
+ 0x00000000,
+ 0x00000000,
+ 0x41424344,
+ 0x45464748,
+ 0x494a0000,
+};
+
+static void device_attr_simple_avc(struct kunit *test)
+{
+ static const struct fw_device node = {
+ .device = {
+ .type = &fw_device_type,
+ },
+ .config_rom = simple_avc_config_rom,
+ .config_rom_length = sizeof(simple_avc_config_rom),
+ };
+ static const struct fw_unit unit0 = {
+ .device = {
+ .type = &fw_unit_type,
+ .parent = (struct device *)&node.device,
+ },
+ .directory = &simple_avc_config_rom[12],
+ };
+ struct device *node_dev = (struct device *)&node.device;
+ struct device *unit0_dev = (struct device *)&unit0.device;
+ static const int unit0_expected_ids[] = {0x00ffffff, 0x00ffffff, 0x0000a02d, 0x00010001};
+ char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ int ids[4] = {0, 0, 0, 0};
+
+ // Ensure associations for node and unit devices.
+
+ KUNIT_ASSERT_TRUE(test, is_fw_device(node_dev));
+ KUNIT_ASSERT_FALSE(test, is_fw_unit(node_dev));
+ KUNIT_ASSERT_PTR_EQ(test, fw_device(node_dev), &node);
+
+ KUNIT_ASSERT_FALSE(test, is_fw_device(unit0_dev));
+ KUNIT_ASSERT_TRUE(test, is_fw_unit(unit0_dev));
+ KUNIT_ASSERT_PTR_EQ(test, fw_parent_device((&unit0)), &node);
+ KUNIT_ASSERT_PTR_EQ(test, fw_unit(unit0_dev), &unit0);
+
+ // For entries in root directory.
+
+ // Vendor immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(node_dev, &config_rom_attributes[0].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0xffffff\n");
+
+ // Model immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(node_dev, &config_rom_attributes[4].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0xffffff\n");
+
+ // Descriptor leaf entry for vendor is found.
+ KUNIT_EXPECT_GT(test, show_text_leaf(node_dev, &config_rom_attributes[5].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "Vendor Name\n");
+
+ // Descriptor leaf entry for model is found.
+ KUNIT_EXPECT_GT(test, show_text_leaf(node_dev, &config_rom_attributes[6].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "Model Name\n");
+
+ // For entries in unit 0 directory.
+
+ // Vendor immediate entry is not found.
+ KUNIT_EXPECT_LT(test, show_immediate(unit0_dev, &config_rom_attributes[0].attr, buf), 0);
+
+ // Model immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[4].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0xffffff\n");
+
+ // Descriptor leaf entry for vendor is not found.
+ KUNIT_EXPECT_LT(test, show_text_leaf(unit0_dev, &config_rom_attributes[5].attr, buf), 0);
+
+ // Descriptor leaf entry for model is found.
+ KUNIT_EXPECT_GT(test, show_text_leaf(unit0_dev, &config_rom_attributes[6].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "Model Name\n");
+
+ // Specifier_ID immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[2].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0x00a02d\n");
+
+ // Version immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[3].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0x010001\n");
+
+ kunit_kfree(test, buf);
+
+ get_modalias_ids(&unit0, ids);
+ KUNIT_EXPECT_MEMEQ(test, ids, unit0_expected_ids, sizeof(ids));
+}
+
+static void device_attr_legacy_avc(struct kunit *test)
+{
+ static const struct fw_device node = {
+ .device = {
+ .type = &fw_device_type,
+ },
+ .config_rom = legacy_avc_config_rom,
+ .config_rom_length = sizeof(legacy_avc_config_rom),
+ };
+ static const struct fw_unit unit0 = {
+ .device = {
+ .type = &fw_unit_type,
+ .parent = (struct device *)&node.device,
+ },
+ .directory = &legacy_avc_config_rom[11],
+ };
+ struct device *node_dev = (struct device *)&node.device;
+ struct device *unit0_dev = (struct device *)&unit0.device;
+ static const int unit0_expected_ids[] = {0x00012345, 0x00fedcba, 0x00abcdef, 0x00543210};
+ char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ int ids[4] = {0, 0, 0, 0};
+
+ // Ensure associations for node and unit devices.
+
+ KUNIT_ASSERT_TRUE(test, is_fw_device(node_dev));
+ KUNIT_ASSERT_FALSE(test, is_fw_unit(node_dev));
+ KUNIT_ASSERT_PTR_EQ(test, fw_device((node_dev)), &node);
+
+ KUNIT_ASSERT_FALSE(test, is_fw_device(unit0_dev));
+ KUNIT_ASSERT_TRUE(test, is_fw_unit(unit0_dev));
+ KUNIT_ASSERT_PTR_EQ(test, fw_parent_device((&unit0)), &node);
+ KUNIT_ASSERT_PTR_EQ(test, fw_unit(unit0_dev), &unit0);
+
+ // For entries in root directory.
+
+ // Vendor immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(node_dev, &config_rom_attributes[0].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0x012345\n");
+
+ // Model immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(node_dev, &config_rom_attributes[4].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0xfedcba\n");
+
+ // Descriptor leaf entry for vendor is not found.
+ KUNIT_EXPECT_LT(test, show_text_leaf(node_dev, &config_rom_attributes[5].attr, buf), 0);
+
+ // Descriptor leaf entry for model is found.
+ KUNIT_EXPECT_GT(test, show_text_leaf(node_dev, &config_rom_attributes[6].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "ABCDEFGHIJ\n");
+
+ // For entries in unit 0 directory.
+
+ // Vendor immediate entry is not found.
+ KUNIT_EXPECT_LT(test, show_immediate(unit0_dev, &config_rom_attributes[0].attr, buf), 0);
+
+ // Model immediate entry is not found.
+ KUNIT_EXPECT_LT(test, show_immediate(unit0_dev, &config_rom_attributes[4].attr, buf), 0);
+
+ // Descriptor leaf entry for vendor is not found.
+ KUNIT_EXPECT_LT(test, show_text_leaf(unit0_dev, &config_rom_attributes[5].attr, buf), 0);
+
+ // Descriptor leaf entry for model is not found.
+ KUNIT_EXPECT_LT(test, show_text_leaf(unit0_dev, &config_rom_attributes[6].attr, buf), 0);
+
+ // Specifier_ID immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[2].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0xabcdef\n");
+
+ // Version immediate entry is found.
+ KUNIT_EXPECT_GT(test, show_immediate(unit0_dev, &config_rom_attributes[3].attr, buf), 0);
+ KUNIT_EXPECT_STREQ(test, buf, "0x543210\n");
+
+ kunit_kfree(test, buf);
+
+ get_modalias_ids(&unit0, ids);
+ KUNIT_EXPECT_MEMEQ(test, ids, unit0_expected_ids, sizeof(ids));
+}
+
+static struct kunit_case device_attr_test_cases[] = {
+ KUNIT_CASE(device_attr_simple_avc),
+ KUNIT_CASE(device_attr_legacy_avc),
+ {}
+};
+
+static struct kunit_suite device_attr_test_suite = {
+ .name = "firewire-device-attribute",
+ .test_cases = device_attr_test_cases,
+};
+kunit_test_suite(device_attr_test_suite);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 4a98a859d..afd38539b 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -28,15 +28,6 @@ config ARM_SCPI_PROTOCOL
This protocol library provides interface for all the client drivers
making use of the features offered by the SCP.
-config ARM_SCPI_POWER_DOMAIN
- tristate "SCPI power domain driver"
- depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF)
- default y
- select PM_GENERIC_DOMAINS if PM
- help
- This enables support for the SCPI power domains which can be
- enabled or disabled via the SCP firmware
-
config ARM_SDE_INTERFACE
bool "ARM Software Delegated Exception Interface (SDEI)"
depends on ARM64
@@ -272,6 +263,7 @@ source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/imx/Kconfig"
source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/microchip/Kconfig"
source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/qcom/Kconfig"
source "drivers/firmware/smccc/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5f9dab82e..7a8d486e7 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -3,7 +3,6 @@
# Makefile for the linux kernel.
#
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
-obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
@@ -28,6 +27,7 @@ obj-y += arm_scmi/
obj-y += broadcom/
obj-y += cirrus/
obj-y += meson/
+obj-y += microchip/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-y += efi/
obj-y += imx/
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 0ea1dd6e5..9bc2e1038 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -107,12 +107,12 @@ struct ffa_drv_info {
struct work_struct notif_pcpu_work;
struct work_struct irq_work;
struct xarray partition_info;
- unsigned int partition_count;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
struct mutex notify_lock; /* lock to protect notifier hashtable */
};
static struct ffa_drv_info *drv_info;
+static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@@ -790,7 +790,7 @@ static void ffa_notification_info_get(void)
part_id = packed_id_list[ids_processed++];
- if (!ids_count[list]) { /* Global Notification */
+ if (ids_count[list] == 1) { /* Global Notification */
__do_sched_recv_cb(part_id, 0, false);
continue;
}
@@ -1196,9 +1196,9 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
kfree(pbuf);
}
-static void ffa_setup_partitions(void)
+static int ffa_setup_partitions(void)
{
- int count, idx;
+ int count, idx, ret;
uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_dev_part_info *info;
@@ -1207,7 +1207,7 @@ static void ffa_setup_partitions(void)
count = ffa_partition_probe(&uuid_null, &pbuf);
if (count <= 0) {
pr_info("%s: No partitions found, error %d\n", __func__, count);
- return;
+ return -EINVAL;
}
xa_init(&drv_info->partition_info);
@@ -1237,41 +1237,52 @@ static void ffa_setup_partitions(void)
continue;
}
rwlock_init(&info->rw_lock);
- xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL);
+ ret = xa_insert(&drv_info->partition_info, tpbuf->id,
+ info, GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
+ __func__, tpbuf->id, ret);
+ ffa_device_unregister(ffa_dev);
+ kfree(info);
+ }
}
- drv_info->partition_count = count;
kfree(pbuf);
/* Allocate for the host */
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
+ if (!info) {
+ pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n",
+ __func__, drv_info->vm_id);
+ /* Already registered devices are freed on bus_exit */
+ ffa_partitions_cleanup();
+ return -ENOMEM;
+ }
+
rwlock_init(&info->rw_lock);
- xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL);
- drv_info->partition_count++;
+ ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
+ info, GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
+ __func__, drv_info->vm_id, ret);
+ kfree(info);
+ /* Already registered devices are freed on bus_exit */
+ ffa_partitions_cleanup();
+ }
+
+ return ret;
}
static void ffa_partitions_cleanup(void)
{
- struct ffa_dev_part_info **info;
- int idx, count = drv_info->partition_count;
-
- if (!count)
- return;
-
- info = kcalloc(count, sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
-
- xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK,
- count, XA_PRESENT);
+ struct ffa_dev_part_info *info;
+ unsigned long idx;
- for (idx = 0; idx < count; idx++)
- kfree(info[idx]);
- kfree(info);
+ xa_for_each(&drv_info->partition_info, idx, info) {
+ xa_erase(&drv_info->partition_info, idx);
+ kfree(info);
+ }
- drv_info->partition_count = 0;
xa_destroy(&drv_info->partition_info);
}
@@ -1520,7 +1531,11 @@ static int __init ffa_init(void)
ffa_notifications_setup();
- ffa_setup_partitions();
+ ret = ffa_setup_partitions();
+ if (ret) {
+ pr_err("failed to setup partitions\n");
+ goto cleanup_notifs;
+ }
ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
drv_info, true);
@@ -1528,6 +1543,9 @@ static int __init ffa_init(void)
pr_info("Failed to register driver sched callback %d\n", ret);
return 0;
+
+cleanup_notifs:
+ ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
@@ -1547,7 +1565,6 @@ static void __exit ffa_exit(void)
ffa_rxtx_unmap(drv_info->vm_id);
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
- xa_destroy(&drv_info->partition_info);
kfree(drv_info);
arm_ffa_bus_exit();
}
diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig
index 706d1264d..aa5842be1 100644
--- a/drivers/firmware/arm_scmi/Kconfig
+++ b/drivers/firmware/arm_scmi/Kconfig
@@ -168,31 +168,6 @@ config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE
endif #ARM_SCMI_PROTOCOL
-config ARM_SCMI_POWER_DOMAIN
- tristate "SCMI power domain driver"
- depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
- default y
- select PM_GENERIC_DOMAINS if PM
- help
- This enables support for the SCMI power domains which can be
- enabled or disabled via the SCP firmware
-
- This driver can also be built as a module. If so, the module
- will be called scmi_pm_domain. Note this may needed early in boot
- before rootfs may be available.
-
-config ARM_SCMI_PERF_DOMAIN
- tristate "SCMI performance domain driver"
- depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
- default y
- select PM_GENERIC_DOMAINS if PM
- help
- This enables support for the SCMI performance domains which can be
- enabled or disabled via the SCP firmware.
-
- This driver can also be built as a module. If so, the module will be
- called scmi_perf_domain.
-
config ARM_SCMI_POWER_CONTROL
tristate "SCMI system power control driver"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index a52f084a6..97254de35 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -13,6 +13,9 @@
#include "common.h"
#include "notify.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+
#define SCMI_BASE_NUM_SOURCES 1
#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024
@@ -385,7 +388,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
rev->major_ver = PROTOCOL_REV_MAJOR(version),
rev->minor_ver = PROTOCOL_REV_MINOR(version);
- ph->set_priv(ph, rev);
+ ph->set_priv(ph, rev, version);
ret = scmi_base_attributes_get(ph);
if (ret)
@@ -423,6 +426,7 @@ static const struct scmi_protocol scmi_base = {
.instance_init = &scmi_base_protocol_init,
.ops = NULL,
.events = &base_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(base, scmi_base)
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 96b4f0694..e2050adbf 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -12,6 +12,9 @@
#include "protocols.h"
#include "notify.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3,
CLOCK_DESCRIBE_RATES = 0x4,
@@ -318,7 +321,7 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
if (SUPPORTS_EXTENDED_NAMES(attributes))
ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
- clk->name,
+ NULL, clk->name,
SCMI_MAX_STR_SIZE);
if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
@@ -960,7 +963,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
}
cinfo->version = version;
- return ph->set_priv(ph, cinfo);
+ return ph->set_priv(ph, cinfo, version);
}
static const struct scmi_protocol scmi_clock = {
@@ -969,6 +972,7 @@ static const struct scmi_protocol scmi_clock = {
.instance_init = &scmi_clock_protocol_init,
.ops = &clk_proto_ops,
.events = &clk_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 09371f40d..3ea64b22c 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -85,6 +85,7 @@ struct scmi_xfers_info {
* @gid: A reference for per-protocol devres management.
* @users: A refcount to track effective users of this protocol.
* @priv: Reference for optional protocol private data.
+ * @version: Protocol version supported by the platform as detected at runtime.
* @ph: An embedded protocol handle that will be passed down to protocol
* initialization code to identify this instance.
*
@@ -97,6 +98,7 @@ struct scmi_protocol_instance {
void *gid;
refcount_t users;
void *priv;
+ unsigned int version;
struct scmi_protocol_handle ph;
};
@@ -1392,15 +1394,17 @@ static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
*
* @ph: A reference to the protocol handle.
* @priv: The private data to set.
+ * @version: The detected protocol version for the core to register.
*
* Return: 0 on Success
*/
static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
- void *priv)
+ void *priv, u32 version)
{
struct scmi_protocol_instance *pi = ph_to_pi(ph);
pi->priv = priv;
+ pi->version = version;
return 0;
}
@@ -1438,6 +1442,7 @@ struct scmi_msg_resp_domain_name_get {
* @ph: A protocol handle reference.
* @cmd_id: The specific command ID to use.
* @res_id: The specific resource ID to use.
+ * @flags: A pointer to specific flags to use, if any.
* @name: A pointer to the preallocated area where the retrieved name will be
* stored as a NULL terminated string.
* @len: The len in bytes of the @name char array.
@@ -1445,19 +1450,22 @@ struct scmi_msg_resp_domain_name_get {
* Return: 0 on Succcess
*/
static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
- u8 cmd_id, u32 res_id, char *name,
- size_t len)
+ u8 cmd_id, u32 res_id, u32 *flags,
+ char *name, size_t len)
{
int ret;
+ size_t txlen;
struct scmi_xfer *t;
struct scmi_msg_resp_domain_name_get *resp;
- ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
- sizeof(*resp), &t);
+ txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
+ ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
if (ret)
goto out;
put_unaligned_le32(res_id, t->tx.buf);
+ if (flags)
+ put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
resp = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
@@ -1845,6 +1853,12 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
devres_close_group(handle->dev, pi->gid);
dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
+ if (pi->version > proto->supported_version)
+ dev_warn(handle->dev,
+ "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X."
+ "Backward compatibility is NOT assured.\n",
+ pi->version, pi->proto->id);
+
return pi;
clean:
@@ -2820,7 +2834,7 @@ clear_ida:
return ret;
}
-static int scmi_remove(struct platform_device *pdev)
+static void scmi_remove(struct platform_device *pdev)
{
int id;
struct scmi_info *info = platform_get_drvdata(pdev);
@@ -2854,8 +2868,6 @@ static int scmi_remove(struct platform_device *pdev)
scmi_cleanup_txrx_channels(info);
ida_free(&scmi_id, info->id);
-
- return 0;
}
static ssize_t protocol_version_show(struct device *dev,
@@ -2933,7 +2945,7 @@ static struct platform_driver scmi_driver = {
.dev_groups = versions_groups,
},
.probe = scmi_probe,
- .remove = scmi_remove,
+ .remove_new = scmi_remove,
};
/**
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index e123de6e8..25bfb4654 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -440,6 +440,10 @@ static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *de
if (ret)
goto err_free_shm;
+ ret = tee_client_system_session(scmi_optee_private->tee_ctx, channel->tee_session);
+ if (ret)
+ dev_warn(dev, "Could not switch to system session, do best effort\n");
+
ret = get_channel(channel);
if (ret)
goto err_close_sess;
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index d26eca37d..211e8e0ae 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -24,7 +24,10 @@
#include "protocols.h"
#include "notify.h"
-#define MAX_OPPS 16
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x40000
+
+#define MAX_OPPS 32
enum scmi_performance_protocol_cmd {
PERF_DOMAIN_ATTRIBUTES = 0x3,
@@ -289,7 +292,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(flags))
ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
- dom_info->id, dom_info->info.name,
+ dom_info->id, NULL, dom_info->info.name,
SCMI_MAX_STR_SIZE);
if (dom_info->level_indexing_mode) {
@@ -518,6 +521,9 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
if (IS_ERR(dom))
return PTR_ERR(dom);
+ if (!dom->set_limits)
+ return -EOPNOTSUPP;
+
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
return -EINVAL;
@@ -668,6 +674,9 @@ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
if (IS_ERR(dom))
return PTR_ERR(dom);
+ if (!dom->info.set_perf)
+ return -EOPNOTSUPP;
+
if (dom->level_indexing_mode) {
struct scmi_opp *opp;
@@ -767,7 +776,7 @@ static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
}
static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
- u32 domain, struct scmi_fc_info **p_fc)
+ struct perf_dom_info *dom)
{
struct scmi_fc_info *fc;
@@ -776,24 +785,26 @@ static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
return;
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
- PERF_LEVEL_SET, 4, domain,
- &fc[PERF_FC_LEVEL].set_addr,
- &fc[PERF_FC_LEVEL].set_db);
-
- ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
- PERF_LEVEL_GET, 4, domain,
+ PERF_LEVEL_GET, 4, dom->id,
&fc[PERF_FC_LEVEL].get_addr, NULL);
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
- PERF_LIMITS_SET, 8, domain,
- &fc[PERF_FC_LIMIT].set_addr,
- &fc[PERF_FC_LIMIT].set_db);
-
- ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
- PERF_LIMITS_GET, 8, domain,
+ PERF_LIMITS_GET, 8, dom->id,
&fc[PERF_FC_LIMIT].get_addr, NULL);
- *p_fc = fc;
+ if (dom->info.set_perf)
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LEVEL_SET, 4, dom->id,
+ &fc[PERF_FC_LEVEL].set_addr,
+ &fc[PERF_FC_LEVEL].set_db);
+
+ if (dom->set_limits)
+ ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
+ PERF_LIMITS_SET, 8, dom->id,
+ &fc[PERF_FC_LIMIT].set_addr,
+ &fc[PERF_FC_LIMIT].set_db);
+
+ dom->fc_info = fc;
}
static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
@@ -1104,14 +1115,14 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
scmi_perf_describe_levels_get(ph, dom, version);
if (dom->perf_fastchannels)
- scmi_perf_domain_init_fc(ph, dom->id, &dom->fc_info);
+ scmi_perf_domain_init_fc(ph, dom);
}
ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo);
if (ret)
return ret;
- return ph->set_priv(ph, pinfo);
+ return ph->set_priv(ph, pinfo, version);
}
static const struct scmi_protocol scmi_perf = {
@@ -1120,6 +1131,7 @@ static const struct scmi_protocol scmi_perf = {
.instance_init = &scmi_perf_protocol_init,
.ops = &perf_proto_ops,
.events = &perf_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 356e83631..c2e6b9b4d 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -13,6 +13,9 @@
#include "protocols.h"
#include "notify.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
+
enum scmi_power_protocol_cmd {
POWER_DOMAIN_ATTRIBUTES = 0x3,
POWER_STATE_SET = 0x4,
@@ -133,7 +136,7 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(flags)) {
ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET,
- domain, dom_info->name,
+ domain, NULL, dom_info->name,
SCMI_MAX_STR_SIZE);
}
@@ -328,7 +331,7 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
pinfo->version = version;
- return ph->set_priv(ph, pinfo);
+ return ph->set_priv(ph, pinfo, version);
}
static const struct scmi_protocol scmi_power = {
@@ -337,6 +340,7 @@ static const struct scmi_protocol scmi_power = {
.instance_init = &scmi_power_protocol_init,
.ops = &power_proto_ops,
.events = &power_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(power, scmi_power)
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
index cb5617443..a4c6cd471 100644
--- a/drivers/firmware/arm_scmi/powercap.c
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -17,6 +17,9 @@
#include "protocols.h"
#include "notify.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+
enum scmi_powercap_protocol_cmd {
POWERCAP_DOMAIN_ATTRIBUTES = 0x3,
POWERCAP_CAP_GET = 0x4,
@@ -270,7 +273,7 @@ clean:
*/
if (!ret && SUPPORTS_EXTENDED_NAMES(flags))
ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET,
- domain, dom_info->name,
+ domain, NULL, dom_info->name,
SCMI_MAX_STR_SIZE);
return ret;
@@ -975,7 +978,7 @@ scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph)
}
pinfo->version = version;
- return ph->set_priv(ph, pinfo);
+ return ph->set_priv(ph, pinfo, version);
}
static const struct scmi_protocol scmi_powercap = {
@@ -984,6 +987,7 @@ static const struct scmi_protocol scmi_powercap = {
.instance_init = &scmi_powercap_protocol_init,
.ops = &powercap_proto_ops,
.events = &powercap_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap)
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 78e1a01eb..e683c26f2 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -174,7 +174,8 @@ struct scmi_protocol_handle {
struct device *dev;
const struct scmi_xfer_ops *xops;
const struct scmi_proto_helpers_ops *hops;
- int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
+ int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv,
+ u32 version);
void *(*get_priv)(const struct scmi_protocol_handle *ph);
};
@@ -256,7 +257,8 @@ struct scmi_fc_info {
*/
struct scmi_proto_helpers_ops {
int (*extended_name_get)(const struct scmi_protocol_handle *ph,
- u8 cmd_id, u32 res_id, char *name, size_t len);
+ u8 cmd_id, u32 res_id, u32 *flags, char *name,
+ size_t len);
void *(*iter_response_init)(const struct scmi_protocol_handle *ph,
struct scmi_iterator_ops *ops,
unsigned int max_resources, u8 msg_id,
@@ -310,6 +312,10 @@ typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
* @ops: Optional reference to the operations provided by the protocol and
* exposed in scmi_protocol.h.
* @events: An optional reference to the events supported by this protocol.
+ * @supported_version: The highest version currently supported for this
+ * protocol by the agent. Each protocol implementation
+ * in the agent is supposed to downgrade to match the
+ * protocol version supported by the platform.
*/
struct scmi_protocol {
const u8 id;
@@ -318,6 +324,7 @@ struct scmi_protocol {
const scmi_prot_init_ph_fn_t instance_deinit;
const void *ops;
const struct scmi_protocol_events *events;
+ unsigned int supported_version;
};
#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 350573518..130d13e9c 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
rd->raw = raw;
filp->private_data = rd;
- return 0;
+ return nonseekable_open(inode, filp);
}
static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
@@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
@@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
+ .llseek = no_llseek,
.owner = THIS_MODULE,
};
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index e9afa8cab..19970d9f9 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -13,6 +13,9 @@
#include "protocols.h"
#include "notify.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
+
enum scmi_reset_protocol_cmd {
RESET_DOMAIN_ATTRIBUTES = 0x3,
RESET = 0x4,
@@ -128,7 +131,8 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(attributes))
ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain,
- dom_info->name, SCMI_MAX_STR_SIZE);
+ NULL, dom_info->name,
+ SCMI_MAX_STR_SIZE);
return ret;
}
@@ -342,7 +346,7 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
}
pinfo->version = version;
- return ph->set_priv(ph, pinfo);
+ return ph->set_priv(ph, pinfo, version);
}
static const struct scmi_protocol scmi_reset = {
@@ -351,6 +355,7 @@ static const struct scmi_protocol scmi_reset = {
.instance_init = &scmi_reset_protocol_init,
.ops = &reset_proto_ops,
.events = &reset_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(reset, scmi_reset)
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 0b5853fa9..311149965 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -14,6 +14,9 @@
#include "protocols.h"
#include "notify.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
+
#define SCMI_MAX_NUM_SENSOR_AXIS 63
#define SCMIv2_SENSOR_PROTOCOL 0x10000
@@ -644,7 +647,7 @@ iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
if (PROTOCOL_REV_MAJOR(si->version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(attrl))
ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id,
- s->name, SCMI_MAX_STR_SIZE);
+ NULL, s->name, SCMI_MAX_STR_SIZE);
if (s->extended_scalar_attrs) {
s->sensor_power = le32_to_cpu(sdesc->power);
@@ -1138,7 +1141,7 @@ static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
- return ph->set_priv(ph, sinfo);
+ return ph->set_priv(ph, sinfo, version);
}
static const struct scmi_protocol scmi_sensors = {
@@ -1147,6 +1150,7 @@ static const struct scmi_protocol scmi_sensors = {
.instance_init = &scmi_sensors_protocol_init,
.ops = &sensor_proto_ops,
.events = &sensor_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(sensors, scmi_sensors)
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 517d52fb3..8bf495bca 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -10,7 +10,7 @@
#include <linux/processor.h>
#include <linux/types.h>
-#include <asm-generic/bug.h>
+#include <linux/bug.h>
#include "common.h"
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index 9383d7584..1621da97b 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -13,6 +13,9 @@
#include "protocols.h"
#include "notify.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+
#define SCMI_SYSTEM_NUM_SOURCES 1
enum scmi_system_protocol_cmd {
@@ -144,7 +147,7 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2)
pinfo->graceful_timeout_supported = true;
- return ph->set_priv(ph, pinfo);
+ return ph->set_priv(ph, pinfo, version);
}
static const struct scmi_protocol scmi_system = {
@@ -153,6 +156,7 @@ static const struct scmi_protocol scmi_system = {
.instance_init = &scmi_system_protocol_init,
.ops = NULL,
.events = &system_protocol_events,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(system, scmi_system)
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index eaa8d9449..2175ffd6c 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -10,6 +10,9 @@
#include "protocols.h"
+/* Updated only after ALL the mandatory features for that version are merged */
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+
#define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0)
#define REMAINING_LEVELS_MASK GENMASK(31, 16)
#define RETURNED_LEVELS_MASK GENMASK(11, 0)
@@ -242,7 +245,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
if (SUPPORTS_EXTENDED_NAMES(attributes))
ph->hops->extended_name_get(ph,
VOLTAGE_DOMAIN_NAME_GET,
- v->id, v->name,
+ v->id, NULL, v->name,
SCMI_MAX_STR_SIZE);
if (SUPPORTS_ASYNC_LEVEL_SET(attributes))
v->async_level_set = true;
@@ -432,7 +435,7 @@ static int scmi_voltage_protocol_init(const struct scmi_protocol_handle *ph)
dev_warn(ph->dev, "No Voltage domains found.\n");
}
- return ph->set_priv(ph, vinfo);
+ return ph->set_priv(ph, vinfo, version);
}
static const struct scmi_protocol scmi_voltage = {
@@ -440,6 +443,7 @@ static const struct scmi_protocol scmi_voltage = {
.owner = THIS_MODULE,
.instance_init = &scmi_voltage_protocol_init,
.ops = &voltage_proto_ops,
+ .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(voltage, scmi_voltage)
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 3f123f592..94a6b4e66 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -863,7 +863,7 @@ static void scpi_free_channels(void *data)
mbox_free_channel(info->channels[i].chan);
}
-static int scpi_remove(struct platform_device *pdev)
+static void scpi_remove(struct platform_device *pdev)
{
int i;
struct scpi_drvinfo *info = platform_get_drvdata(pdev);
@@ -874,8 +874,6 @@ static int scpi_remove(struct platform_device *pdev)
kfree(info->dvfs[i]->opps);
kfree(info->dvfs[i]);
}
-
- return 0;
}
#define MAX_SCPI_XFERS 10
@@ -1048,7 +1046,7 @@ static struct platform_driver scpi_driver = {
.dev_groups = versions_groups,
},
.probe = scpi_probe,
- .remove = scpi_remove,
+ .remove_new = scpi_remove,
};
module_platform_driver(scpi_driver);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index cb374b2da..72f2537d9 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -301,3 +301,18 @@ config UEFI_CPER_X86
bool
depends on UEFI_CPER && X86
default y
+
+config TEE_STMM_EFI
+ tristate "TEE-based EFI runtime variable service driver"
+ depends on EFI && OPTEE
+ help
+ Select this config option if TEE is compiled to include StandAloneMM
+ as a separate secure partition. It has the ability to check and store
+ EFI variables on an RPMB or any other non-volatile medium used by
+ StandAloneMM.
+
+ Enabling this will change the EFI runtime services from the firmware
+ provided functions to TEE calls.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tee_stmm_efi.
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index e489fefd2..a2d000956 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -42,3 +42,4 @@ obj-$(CONFIG_EFI_EARLYCON) += earlycon.o
obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
obj-$(CONFIG_UEFI_CPER_X86) += cper-x86.o
obj-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o
+obj-$(CONFIG_TEE_STMM_EFI) += stmm/tee_stmm_efi.o
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 35c37f667..9b3884ff8 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -523,6 +523,17 @@ static void cper_print_tstamp(const char *pfx,
}
}
+struct ignore_section {
+ guid_t guid;
+ const char *name;
+};
+
+static const struct ignore_section ignore_sections[] = {
+ { .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" },
+ { .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" },
+ { .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" },
+};
+
static void
cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
int sec_no)
@@ -543,6 +554,14 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) {
+ if (guid_equal(sec_type, &ignore_sections[i].guid)) {
+ printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name);
+ return;
+ }
+ }
+
if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index f80d87c19..937be269f 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -18,8 +18,6 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
struct acpi_device *adev;
struct device *phys_dev;
char hid[ACPI_ID_LEN];
- u64 uid;
- int ret;
if (node->header.length != 12)
return -EINVAL;
@@ -31,10 +29,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
node->acpi.hid >> 16);
for_each_acpi_dev_match(adev, hid, NULL, -1) {
- ret = acpi_dev_uid_to_integer(adev, &uid);
- if (ret == 0 && node->acpi.uid == uid)
+ if (acpi_dev_uid_match(adev, node->acpi.uid))
break;
- if (ret == -ENODATA && node->acpi.uid == 0)
+ if (!acpi_device_uid(adev) && node->acpi.uid == 0)
break;
}
if (!adev)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index abdfcb5aa..1ea14e86a 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -32,6 +32,7 @@
#include <linux/ucs2_string.h>
#include <linux/memblock.h>
#include <linux/security.h>
+#include <linux/notifier.h>
#include <asm/early_ioremap.h>
@@ -187,6 +188,9 @@ static const struct attribute_group efi_subsys_attr_group = {
.is_visible = efi_attr_is_visible,
};
+struct blocking_notifier_head efivar_ops_nh;
+EXPORT_SYMBOL_GPL(efivar_ops_nh);
+
static struct efivars generic_efivars;
static struct efivar_operations generic_ops;
@@ -233,6 +237,18 @@ static void generic_ops_unregister(void)
efivars_unregister(&generic_efivars);
}
+void efivars_generic_ops_register(void)
+{
+ generic_ops_register();
+}
+EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
+
+void efivars_generic_ops_unregister(void)
+{
+ generic_ops_unregister();
+}
+EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
+
#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
#define EFIVAR_SSDT_NAME_MAX 16UL
static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
@@ -421,6 +437,8 @@ static int __init efisubsys_init(void)
platform_device_register_simple("efivars", 0, NULL, 0);
}
+ BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
+
error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
if (error) {
pr_err("efi: Sysfs attribute export failed with error %d.\n",
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d561d7de4..73f4810f6 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -143,7 +143,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# exist.
STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
-STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX
# For LoongArch, keep all the symbols in .init section and make sure that no
# absolute symbols references exist.
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 2c489627a..65ffd0b76 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -5,8 +5,8 @@
# EFI_ZBOOT_FORWARD_CFI
quiet_cmd_copy_and_pad = PAD $@
- cmd_copy_and_pad = cp $< $@ && \
- truncate -s $(shell hexdump -s16 -n4 -e '"%u"' $<) $@
+ cmd_copy_and_pad = cp $< $@; \
+ truncate -s $$(hexdump -s16 -n4 -e '"%u"' $<) $@
# Pad the file to the size of the uncompressed image in memory, including BSS
$(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
index 6b83c492c..31928bd87 100644
--- a/drivers/firmware/efi/libstub/alignedmem.c
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -14,6 +14,7 @@
* @max: the address that the last allocated memory page shall not
* exceed
* @align: minimum alignment of the base of the allocation
+ * @memory_type: the type of memory to allocate
*
* Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
* to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index bfa30625f..3dc2f9aaf 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -24,6 +24,8 @@ static bool efi_noinitrd;
static bool efi_nosoftreserve;
static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+int efi_mem_encrypt;
+
bool __pure __efi_soft_reserve_enabled(void)
{
return !efi_nosoftreserve;
@@ -75,6 +77,12 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_noinitrd = true;
} else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
efi_no5lvl = true;
+ } else if (IS_ENABLED(CONFIG_ARCH_HAS_MEM_ENCRYPT) &&
+ !strcmp(param, "mem_encrypt") && val) {
+ if (parse_option_str(val, "on"))
+ efi_mem_encrypt = 1;
+ else if (parse_option_str(val, "off"))
+ efi_mem_encrypt = -1;
} else if (!strcmp(param, "efi") && val) {
efi_nochunk = parse_option_str(val, "nochunk");
efi_novamap |= parse_option_str(val, "novamap");
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index c04b82ea4..fc18fd649 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -37,8 +37,8 @@ extern bool efi_no5lvl;
extern bool efi_nochunk;
extern bool efi_nokaslr;
extern int efi_loglevel;
+extern int efi_mem_encrypt;
extern bool efi_novamap;
-
extern const efi_system_table_t *efi_system_table;
typedef union efi_dxe_services_table efi_dxe_services_table_t;
diff --git a/drivers/firmware/efi/libstub/loongarch-stub.c b/drivers/firmware/efi/libstub/loongarch-stub.c
index d6ec5d4b8..736b6aae3 100644
--- a/drivers/firmware/efi/libstub/loongarch-stub.c
+++ b/drivers/firmware/efi/libstub/loongarch-stub.c
@@ -8,10 +8,10 @@
#include <asm/efi.h>
#include <asm/addrspace.h>
#include "efistub.h"
+#include "loongarch-stub.h"
extern int kernel_asize;
extern int kernel_fsize;
-extern int kernel_offset;
extern int kernel_entry;
efi_status_t handle_kernel_image(unsigned long *image_addr,
@@ -24,7 +24,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_status_t status;
unsigned long kernel_addr = 0;
- kernel_addr = (unsigned long)&kernel_offset - kernel_offset;
+ kernel_addr = (unsigned long)image->image_base;
status = efi_relocate_kernel(&kernel_addr, kernel_fsize, kernel_asize,
EFI_KIMG_PREFERRED_ADDRESS, efi_get_kimg_min_align(), 0x0);
@@ -35,9 +35,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
return status;
}
-unsigned long kernel_entry_address(unsigned long kernel_addr)
+unsigned long kernel_entry_address(unsigned long kernel_addr,
+ efi_loaded_image_t *image)
{
- unsigned long base = (unsigned long)&kernel_offset - kernel_offset;
+ unsigned long base = (unsigned long)image->image_base;
return (unsigned long)&kernel_entry - base + kernel_addr;
}
diff --git a/drivers/firmware/efi/libstub/loongarch-stub.h b/drivers/firmware/efi/libstub/loongarch-stub.h
new file mode 100644
index 000000000..cd015955a
--- /dev/null
+++ b/drivers/firmware/efi/libstub/loongarch-stub.h
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+unsigned long kernel_entry_address(unsigned long kernel_addr,
+ efi_loaded_image_t *image);
diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
index 0e0aa6cda..684c93546 100644
--- a/drivers/firmware/efi/libstub/loongarch.c
+++ b/drivers/firmware/efi/libstub/loongarch.c
@@ -8,6 +8,7 @@
#include <asm/efi.h>
#include <asm/addrspace.h>
#include "efistub.h"
+#include "loongarch-stub.h"
typedef void __noreturn (*kernel_entry_t)(bool efi, unsigned long cmdline,
unsigned long systab);
@@ -37,7 +38,8 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
return EFI_SUCCESS;
}
-unsigned long __weak kernel_entry_address(unsigned long kernel_addr)
+unsigned long __weak kernel_entry_address(unsigned long kernel_addr,
+ efi_loaded_image_t *image)
{
return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
}
@@ -73,7 +75,7 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
- real_kernel_entry = (void *)kernel_entry_address(kernel_addr);
+ real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image);
real_kernel_entry(true, (unsigned long)cmdline_ptr,
(unsigned long)efi_system_table);
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
index 479dd445a..77359e802 100644
--- a/drivers/firmware/efi/libstub/x86-5lvl.c
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -13,8 +13,8 @@ bool efi_no5lvl;
static void (*la57_toggle)(void *cr3);
static const struct desc_struct gdt[] = {
- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
};
/*
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index dd80082aa..19b51dc34 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -238,6 +238,15 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
if (memattr != NULL) {
+ status = efi_call_proto(memattr, set_memory_attributes,
+ rounded_start,
+ rounded_end - rounded_start,
+ EFI_MEMORY_RO);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to set EFI_MEMORY_RO attribute\n");
+ return status;
+ }
+
status = efi_call_proto(memattr, clear_memory_attributes,
rounded_start,
rounded_end - rounded_start,
@@ -818,7 +827,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
*kernel_entry = addr + entry;
- return efi_adjust_memory_range_protection(addr, kernel_total_size);
+ return efi_adjust_memory_range_protection(addr, kernel_text_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
@@ -890,6 +899,9 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
}
}
+ if (efi_mem_encrypt > 0)
+ hdr->xloadflags |= XLF_MEM_ENCRYPTION;
+
status = efi_decompress_kernel(&kernel_entry);
if (status != EFI_SUCCESS) {
efi_err("Failed to decompress kernel\n");
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index a1180461a..3365944f7 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -32,7 +32,7 @@
* space isn't setup. Once the kernel is fully booted we can fallback
* to the more robust memremap*() API.
*
- * Returns zero on success, a negative error code on failure.
+ * Returns: zero on success, a negative error code on failure.
*/
int __init __efi_memmap_init(struct efi_memory_map_data *data)
{
@@ -77,6 +77,8 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
*
* Use early_memremap() to map the passed in EFI memory map and assign
* it to efi.memmap.
+ *
+ * Returns: zero on success, a negative error code on failure.
*/
int __init efi_memmap_init_early(struct efi_memory_map_data *data)
{
@@ -107,7 +109,7 @@ void __init efi_memmap_unmap(void)
/**
* efi_memmap_init_late - Map efi.memmap with memremap()
- * @phys_addr: Physical address of the new EFI memory map
+ * @addr: Physical address of the new EFI memory map
* @size: Size in bytes of the new EFI memory map
*
* Setup a mapping of the EFI memory map using ioremap_cache(). This
@@ -126,7 +128,7 @@ void __init efi_memmap_unmap(void)
* runtime so that things like efi_mem_desc_lookup() and
* efi_mem_attributes() always work.
*
- * Returns zero on success, a negative error code on failure.
+ * Returns: zero on success, a negative error code on failure.
*/
int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
{
diff --git a/drivers/firmware/efi/stmm/mm_communication.h b/drivers/firmware/efi/stmm/mm_communication.h
new file mode 100644
index 000000000..52a1f32cd
--- /dev/null
+++ b/drivers/firmware/efi/stmm/mm_communication.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Headers for EFI variable service via StandAloneMM, EDK2 application running
+ * in OP-TEE. Most of the structs and defines resemble the EDK2 naming.
+ *
+ * Copyright (c) 2017, Intel Corporation. All rights reserved.
+ * Copyright (C) 2020 Linaro Ltd.
+ */
+
+#ifndef _MM_COMMUNICATION_H_
+#define _MM_COMMUNICATION_H_
+
+/*
+ * Interface to the pseudo Trusted Application (TA), which provides a
+ * communication channel with the Standalone MM (Management Mode)
+ * Secure Partition running at Secure-EL0
+ */
+
+#define PTA_STMM_CMD_COMMUNICATE 0
+
+/*
+ * Defined in OP-TEE, this UUID is used to identify the pseudo-TA.
+ * OP-TEE is using big endian GUIDs while UEFI uses little endian ones
+ */
+#define PTA_STMM_UUID \
+ UUID_INIT(0xed32d533, 0x99e6, 0x4209, \
+ 0x9c, 0xc0, 0x2d, 0x72, 0xcd, 0xd9, 0x98, 0xa7)
+
+#define EFI_MM_VARIABLE_GUID \
+ EFI_GUID(0xed32d533, 0x99e6, 0x4209, \
+ 0x9c, 0xc0, 0x2d, 0x72, 0xcd, 0xd9, 0x98, 0xa7)
+
+/**
+ * struct efi_mm_communicate_header - Header used for SMM variable communication
+
+ * @header_guid: header use for disambiguation of content
+ * @message_len: length of the message. Does not include the size of the
+ * header
+ * @data: payload of the message
+ *
+ * Defined in the PI spec as EFI_MM_COMMUNICATE_HEADER.
+ * To avoid confusion in interpreting frames, the communication buffer should
+ * always begin with efi_mm_communicate_header.
+ */
+struct efi_mm_communicate_header {
+ efi_guid_t header_guid;
+ size_t message_len;
+ u8 data[];
+} __packed;
+
+#define MM_COMMUNICATE_HEADER_SIZE \
+ (sizeof(struct efi_mm_communicate_header))
+
+/* SPM return error codes */
+#define ARM_SVC_SPM_RET_SUCCESS 0
+#define ARM_SVC_SPM_RET_NOT_SUPPORTED -1
+#define ARM_SVC_SPM_RET_INVALID_PARAMS -2
+#define ARM_SVC_SPM_RET_DENIED -3
+#define ARM_SVC_SPM_RET_NO_MEMORY -5
+
+#define SMM_VARIABLE_FUNCTION_GET_VARIABLE 1
+/*
+ * The payload for this function is
+ * SMM_VARIABLE_COMMUNICATE_GET_NEXT_VARIABLE_NAME.
+ */
+#define SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME 2
+/*
+ * The payload for this function is SMM_VARIABLE_COMMUNICATE_ACCESS_VARIABLE.
+ */
+#define SMM_VARIABLE_FUNCTION_SET_VARIABLE 3
+/*
+ * The payload for this function is
+ * SMM_VARIABLE_COMMUNICATE_QUERY_VARIABLE_INFO.
+ */
+#define SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO 4
+/*
+ * It is a notify event, no extra payload for this function.
+ */
+#define SMM_VARIABLE_FUNCTION_READY_TO_BOOT 5
+/*
+ * It is a notify event, no extra payload for this function.
+ */
+#define SMM_VARIABLE_FUNCTION_EXIT_BOOT_SERVICE 6
+/*
+ * The payload for this function is VARIABLE_INFO_ENTRY.
+ * The GUID in EFI_SMM_COMMUNICATE_HEADER is gEfiSmmVariableProtocolGuid.
+ */
+#define SMM_VARIABLE_FUNCTION_GET_STATISTICS 7
+/*
+ * The payload for this function is SMM_VARIABLE_COMMUNICATE_LOCK_VARIABLE
+ */
+#define SMM_VARIABLE_FUNCTION_LOCK_VARIABLE 8
+
+#define SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_SET 9
+
+#define SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET 10
+
+#define SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE 11
+/*
+ * The payload for this function is
+ * SMM_VARIABLE_COMMUNICATE_RUNTIME_VARIABLE_CACHE_CONTEXT
+ */
+#define SMM_VARIABLE_FUNCTION_INIT_RUNTIME_VARIABLE_CACHE_CONTEXT 12
+
+#define SMM_VARIABLE_FUNCTION_SYNC_RUNTIME_CACHE 13
+/*
+ * The payload for this function is
+ * SMM_VARIABLE_COMMUNICATE_GET_RUNTIME_CACHE_INFO
+ */
+#define SMM_VARIABLE_FUNCTION_GET_RUNTIME_CACHE_INFO 14
+
+/**
+ * struct smm_variable_communicate_header - Used for SMM variable communication
+
+ * @function: function to call in Smm.
+ * @ret_status: return status
+ * @data: payload
+ */
+struct smm_variable_communicate_header {
+ size_t function;
+ efi_status_t ret_status;
+ u8 data[];
+};
+
+#define MM_VARIABLE_COMMUNICATE_SIZE \
+ (sizeof(struct smm_variable_communicate_header))
+
+/**
+ * struct smm_variable_access - Used to communicate with StMM by
+ * SetVariable and GetVariable.
+
+ * @guid: vendor GUID
+ * @data_size: size of EFI variable data
+ * @name_size: size of EFI name
+ * @attr: attributes
+ * @name: variable name
+ *
+ */
+struct smm_variable_access {
+ efi_guid_t guid;
+ size_t data_size;
+ size_t name_size;
+ u32 attr;
+ u16 name[];
+};
+
+#define MM_VARIABLE_ACCESS_HEADER_SIZE \
+ (sizeof(struct smm_variable_access))
+/**
+ * struct smm_variable_payload_size - Used to get the max allowed
+ * payload used in StMM.
+ *
+ * @size: size to fill in
+ *
+ */
+struct smm_variable_payload_size {
+ size_t size;
+};
+
+/**
+ * struct smm_variable_getnext - Used to communicate with StMM for
+ * GetNextVariableName.
+ *
+ * @guid: vendor GUID
+ * @name_size: size of the name of the variable
+ * @name: variable name
+ *
+ */
+struct smm_variable_getnext {
+ efi_guid_t guid;
+ size_t name_size;
+ u16 name[];
+};
+
+#define MM_VARIABLE_GET_NEXT_HEADER_SIZE \
+ (sizeof(struct smm_variable_getnext))
+
+/**
+ * struct smm_variable_query_info - Used to communicate with StMM for
+ * QueryVariableInfo.
+ *
+ * @max_variable_storage: max available storage
+ * @remaining_variable_storage: remaining available storage
+ * @max_variable_size: max variable supported size
+ * @attr: attributes to query storage for
+ *
+ */
+struct smm_variable_query_info {
+ u64 max_variable_storage;
+ u64 remaining_variable_storage;
+ u64 max_variable_size;
+ u32 attr;
+};
+
+#define VAR_CHECK_VARIABLE_PROPERTY_REVISION 0x0001
+#define VAR_CHECK_VARIABLE_PROPERTY_READ_ONLY BIT(0)
+/**
+ * struct var_check_property - Used to store variable properties in StMM
+ *
+ * @revision: magic revision number for variable property checking
+ * @property: properties mask for the variable used in StMM.
+ * Currently RO flag is supported
+ * @attributes: variable attributes used in StMM checking when properties
+ * for a variable are enabled
+ * @minsize: minimum allowed size for variable payload checked against
+ * smm_variable_access->datasize in StMM
+ * @maxsize: maximum allowed size for variable payload checked against
+ * smm_variable_access->datasize in StMM
+ *
+ */
+struct var_check_property {
+ u16 revision;
+ u16 property;
+ u32 attributes;
+ size_t minsize;
+ size_t maxsize;
+};
+
+/**
+ * struct smm_variable_var_check_property - Used to communicate variable
+ * properties with StMM
+ *
+ * @guid: vendor GUID
+ * @name_size: size of EFI name
+ * @property: variable properties struct
+ * @name: variable name
+ *
+ */
+struct smm_variable_var_check_property {
+ efi_guid_t guid;
+ size_t name_size;
+ struct var_check_property property;
+ u16 name[];
+};
+
+#endif /* _MM_COMMUNICATION_H_ */
diff --git a/drivers/firmware/efi/stmm/tee_stmm_efi.c b/drivers/firmware/efi/stmm/tee_stmm_efi.c
new file mode 100644
index 000000000..f741ca279
--- /dev/null
+++ b/drivers/firmware/efi/stmm/tee_stmm_efi.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * EFI variable service via TEE
+ *
+ * Copyright (C) 2022 Linaro
+ */
+
+#include <linux/efi.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/tee.h>
+#include <linux/tee_drv.h>
+#include <linux/ucs2_string.h>
+#include "mm_communication.h"
+
+static struct efivars tee_efivars;
+static struct efivar_operations tee_efivar_ops;
+
+static size_t max_buffer_size; /* comm + var + func + data */
+static size_t max_payload_size; /* func + data */
+
+struct tee_stmm_efi_private {
+ struct tee_context *ctx;
+ u32 session;
+ struct device *dev;
+};
+
+static struct tee_stmm_efi_private pvt_data;
+
+/* UUID of the stmm PTA */
+static const struct tee_client_device_id tee_stmm_efi_id_table[] = {
+ {PTA_STMM_UUID},
+ {}
+};
+
+static int tee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ /* currently only OP-TEE is supported as a communication path */
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * tee_mm_communicate() - Pass a buffer to StandaloneMM running in TEE
+ *
+ * @comm_buf: locally allocated communication buffer
+ * @dsize: buffer size
+ * Return: status code
+ */
+static efi_status_t tee_mm_communicate(void *comm_buf, size_t dsize)
+{
+ size_t buf_size;
+ struct efi_mm_communicate_header *mm_hdr;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[4];
+ struct tee_shm *shm = NULL;
+ int rc;
+
+ if (!comm_buf)
+ return EFI_INVALID_PARAMETER;
+
+ mm_hdr = (struct efi_mm_communicate_header *)comm_buf;
+ buf_size = mm_hdr->message_len + sizeof(efi_guid_t) + sizeof(size_t);
+
+ if (dsize != buf_size)
+ return EFI_INVALID_PARAMETER;
+
+ shm = tee_shm_register_kernel_buf(pvt_data.ctx, comm_buf, buf_size);
+ if (IS_ERR(shm)) {
+ dev_err(pvt_data.dev, "Unable to register shared memory\n");
+ return EFI_UNSUPPORTED;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ arg.func = PTA_STMM_CMD_COMMUNICATE;
+ arg.session = pvt_data.session;
+ arg.num_params = 4;
+
+ memset(param, 0, sizeof(param));
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.size = buf_size;
+ param[0].u.memref.shm = shm;
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+ param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ param[3].attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+
+ rc = tee_client_invoke_func(pvt_data.ctx, &arg, param);
+ tee_shm_free(shm);
+
+ if (rc < 0 || arg.ret != 0) {
+ dev_err(pvt_data.dev,
+ "PTA_STMM_CMD_COMMUNICATE invoke error: 0x%x\n", arg.ret);
+ return EFI_DEVICE_ERROR;
+ }
+
+ switch (param[1].u.value.a) {
+ case ARM_SVC_SPM_RET_SUCCESS:
+ return EFI_SUCCESS;
+
+ case ARM_SVC_SPM_RET_INVALID_PARAMS:
+ return EFI_INVALID_PARAMETER;
+
+ case ARM_SVC_SPM_RET_DENIED:
+ return EFI_ACCESS_DENIED;
+
+ case ARM_SVC_SPM_RET_NO_MEMORY:
+ return EFI_OUT_OF_RESOURCES;
+
+ default:
+ return EFI_ACCESS_DENIED;
+ }
+}
+
+/**
+ * mm_communicate() - Adjust the communication buffer to StandAlonneMM and send
+ * it to TEE
+ *
+ * @comm_buf: locally allocated communication buffer, buffer should
+ * be enough big to have some headers and payload
+ * @payload_size: payload size
+ * Return: status code
+ */
+static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
+{
+ size_t dsize;
+ efi_status_t ret;
+ struct efi_mm_communicate_header *mm_hdr;
+ struct smm_variable_communicate_header *var_hdr;
+
+ dsize = payload_size + MM_COMMUNICATE_HEADER_SIZE +
+ MM_VARIABLE_COMMUNICATE_SIZE;
+ mm_hdr = (struct efi_mm_communicate_header *)comm_buf;
+ var_hdr = (struct smm_variable_communicate_header *)mm_hdr->data;
+
+ ret = tee_mm_communicate(comm_buf, dsize);
+ if (ret != EFI_SUCCESS) {
+ dev_err(pvt_data.dev, "%s failed!\n", __func__);
+ return ret;
+ }
+
+ return var_hdr->ret_status;
+}
+
+/**
+ * setup_mm_hdr() - Allocate a buffer for StandAloneMM and initialize the
+ * header data.
+ *
+ * @dptr: pointer address to store allocated buffer
+ * @payload_size: payload size
+ * @func: standAloneMM function number
+ * @ret: EFI return code
+ * Return: pointer to corresponding StandAloneMM function buffer or NULL
+ */
+static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
+ efi_status_t *ret)
+{
+ const efi_guid_t mm_var_guid = EFI_MM_VARIABLE_GUID;
+ struct efi_mm_communicate_header *mm_hdr;
+ struct smm_variable_communicate_header *var_hdr;
+ u8 *comm_buf;
+
+ /* In the init function we initialize max_buffer_size with
+ * get_max_payload(). So skip the test if max_buffer_size is initialized
+ * StandAloneMM will perform similar checks and drop the buffer if it's
+ * too long
+ */
+ if (max_buffer_size &&
+ max_buffer_size < (MM_COMMUNICATE_HEADER_SIZE +
+ MM_VARIABLE_COMMUNICATE_SIZE + payload_size)) {
+ *ret = EFI_INVALID_PARAMETER;
+ return NULL;
+ }
+
+ comm_buf = kzalloc(MM_COMMUNICATE_HEADER_SIZE +
+ MM_VARIABLE_COMMUNICATE_SIZE + payload_size,
+ GFP_KERNEL);
+ if (!comm_buf) {
+ *ret = EFI_OUT_OF_RESOURCES;
+ return NULL;
+ }
+
+ mm_hdr = (struct efi_mm_communicate_header *)comm_buf;
+ memcpy(&mm_hdr->header_guid, &mm_var_guid, sizeof(mm_hdr->header_guid));
+ mm_hdr->message_len = MM_VARIABLE_COMMUNICATE_SIZE + payload_size;
+
+ var_hdr = (struct smm_variable_communicate_header *)mm_hdr->data;
+ var_hdr->function = func;
+ if (dptr)
+ *dptr = comm_buf;
+ *ret = EFI_SUCCESS;
+
+ return var_hdr->data;
+}
+
+/**
+ * get_max_payload() - Get variable payload size from StandAloneMM.
+ *
+ * @size: size of the variable in storage
+ * Return: status code
+ */
+static efi_status_t get_max_payload(size_t *size)
+{
+ struct smm_variable_payload_size *var_payload = NULL;
+ size_t payload_size;
+ u8 *comm_buf = NULL;
+ efi_status_t ret;
+
+ if (!size)
+ return EFI_INVALID_PARAMETER;
+
+ payload_size = sizeof(*var_payload);
+ var_payload = setup_mm_hdr(&comm_buf, payload_size,
+ SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE,
+ &ret);
+ if (!var_payload)
+ return EFI_OUT_OF_RESOURCES;
+
+ ret = mm_communicate(comm_buf, payload_size);
+ if (ret != EFI_SUCCESS)
+ goto out;
+
+ /* Make sure the buffer is big enough for storing variables */
+ if (var_payload->size < MM_VARIABLE_ACCESS_HEADER_SIZE + 0x20) {
+ ret = EFI_DEVICE_ERROR;
+ goto out;
+ }
+ *size = var_payload->size;
+ /*
+ * There seems to be a bug in EDK2 miscalculating the boundaries and
+ * size checks, so deduct 2 more bytes to fulfill this requirement. Fix
+ * it up here to ensure backwards compatibility with older versions
+ * (cf. StandaloneMmPkg/Drivers/StandaloneMmCpu/AArch64/EventHandle.c.
+ * sizeof (EFI_MM_COMMUNICATE_HEADER) instead the size minus the
+ * flexible array member).
+ *
+ * size is guaranteed to be > 2 due to checks on the beginning.
+ */
+ *size -= 2;
+out:
+ kfree(comm_buf);
+ return ret;
+}
+
+static efi_status_t get_property_int(u16 *name, size_t name_size,
+ const efi_guid_t *vendor,
+ struct var_check_property *var_property)
+{
+ struct smm_variable_var_check_property *smm_property;
+ size_t payload_size;
+ u8 *comm_buf = NULL;
+ efi_status_t ret;
+
+ memset(var_property, 0, sizeof(*var_property));
+ payload_size = sizeof(*smm_property) + name_size;
+ if (payload_size > max_payload_size)
+ return EFI_INVALID_PARAMETER;
+
+ smm_property = setup_mm_hdr(
+ &comm_buf, payload_size,
+ SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET, &ret);
+ if (!smm_property)
+ return EFI_OUT_OF_RESOURCES;
+
+ memcpy(&smm_property->guid, vendor, sizeof(smm_property->guid));
+ smm_property->name_size = name_size;
+ memcpy(smm_property->name, name, name_size);
+
+ ret = mm_communicate(comm_buf, payload_size);
+ /*
+ * Currently only R/O property is supported in StMM.
+ * Variables that are not set to R/O will not set the property in StMM
+ * and the call will return EFI_NOT_FOUND. We are setting the
+ * properties to 0x0 so checking against that is enough for the
+ * EFI_NOT_FOUND case.
+ */
+ if (ret == EFI_NOT_FOUND)
+ ret = EFI_SUCCESS;
+ if (ret != EFI_SUCCESS)
+ goto out;
+ memcpy(var_property, &smm_property->property, sizeof(*var_property));
+
+out:
+ kfree(comm_buf);
+ return ret;
+}
+
+static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
+ u32 *attributes, unsigned long *data_size,
+ void *data)
+{
+ struct var_check_property var_property;
+ struct smm_variable_access *var_acc;
+ size_t payload_size;
+ size_t name_size;
+ size_t tmp_dsize;
+ u8 *comm_buf = NULL;
+ efi_status_t ret;
+
+ if (!name || !vendor || !data_size)
+ return EFI_INVALID_PARAMETER;
+
+ name_size = (ucs2_strnlen(name, EFI_VAR_NAME_LEN) + 1) * sizeof(u16);
+ if (name_size > max_payload_size - MM_VARIABLE_ACCESS_HEADER_SIZE)
+ return EFI_INVALID_PARAMETER;
+
+ /* Trim output buffer size */
+ tmp_dsize = *data_size;
+ if (name_size + tmp_dsize >
+ max_payload_size - MM_VARIABLE_ACCESS_HEADER_SIZE) {
+ tmp_dsize = max_payload_size - MM_VARIABLE_ACCESS_HEADER_SIZE -
+ name_size;
+ }
+
+ payload_size = MM_VARIABLE_ACCESS_HEADER_SIZE + name_size + tmp_dsize;
+ var_acc = setup_mm_hdr(&comm_buf, payload_size,
+ SMM_VARIABLE_FUNCTION_GET_VARIABLE, &ret);
+ if (!var_acc)
+ return EFI_OUT_OF_RESOURCES;
+
+ /* Fill in contents */
+ memcpy(&var_acc->guid, vendor, sizeof(var_acc->guid));
+ var_acc->data_size = tmp_dsize;
+ var_acc->name_size = name_size;
+ var_acc->attr = attributes ? *attributes : 0;
+ memcpy(var_acc->name, name, name_size);
+
+ ret = mm_communicate(comm_buf, payload_size);
+ if (ret == EFI_SUCCESS || ret == EFI_BUFFER_TOO_SMALL)
+ /* Update with reported data size for trimmed case */
+ *data_size = var_acc->data_size;
+ if (ret != EFI_SUCCESS)
+ goto out;
+
+ ret = get_property_int(name, name_size, vendor, &var_property);
+ if (ret != EFI_SUCCESS)
+ goto out;
+
+ if (attributes)
+ *attributes = var_acc->attr;
+
+ if (!data) {
+ ret = EFI_INVALID_PARAMETER;
+ goto out;
+ }
+ memcpy(data, (u8 *)var_acc->name + var_acc->name_size,
+ var_acc->data_size);
+out:
+ kfree(comm_buf);
+ return ret;
+}
+
+static efi_status_t tee_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *guid)
+{
+ struct smm_variable_getnext *var_getnext;
+ size_t payload_size;
+ size_t out_name_size;
+ size_t in_name_size;
+ u8 *comm_buf = NULL;
+ efi_status_t ret;
+
+ if (!name_size || !name || !guid)
+ return EFI_INVALID_PARAMETER;
+
+ out_name_size = *name_size;
+ in_name_size = (ucs2_strnlen(name, EFI_VAR_NAME_LEN) + 1) * sizeof(u16);
+
+ if (out_name_size < in_name_size)
+ return EFI_INVALID_PARAMETER;
+
+ if (in_name_size > max_payload_size - MM_VARIABLE_GET_NEXT_HEADER_SIZE)
+ return EFI_INVALID_PARAMETER;
+
+ /* Trim output buffer size */
+ if (out_name_size > max_payload_size - MM_VARIABLE_GET_NEXT_HEADER_SIZE)
+ out_name_size =
+ max_payload_size - MM_VARIABLE_GET_NEXT_HEADER_SIZE;
+
+ payload_size = MM_VARIABLE_GET_NEXT_HEADER_SIZE + out_name_size;
+ var_getnext = setup_mm_hdr(&comm_buf, payload_size,
+ SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME,
+ &ret);
+ if (!var_getnext)
+ return EFI_OUT_OF_RESOURCES;
+
+ /* Fill in contents */
+ memcpy(&var_getnext->guid, guid, sizeof(var_getnext->guid));
+ var_getnext->name_size = out_name_size;
+ memcpy(var_getnext->name, name, in_name_size);
+ memset((u8 *)var_getnext->name + in_name_size, 0x0,
+ out_name_size - in_name_size);
+
+ ret = mm_communicate(comm_buf, payload_size);
+ if (ret == EFI_SUCCESS || ret == EFI_BUFFER_TOO_SMALL) {
+ /* Update with reported data size for trimmed case */
+ *name_size = var_getnext->name_size;
+ }
+ if (ret != EFI_SUCCESS)
+ goto out;
+
+ memcpy(guid, &var_getnext->guid, sizeof(*guid));
+ memcpy(name, var_getnext->name, var_getnext->name_size);
+
+out:
+ kfree(comm_buf);
+ return ret;
+}
+
+static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attributes, unsigned long data_size,
+ void *data)
+{
+ efi_status_t ret;
+ struct var_check_property var_property;
+ struct smm_variable_access *var_acc;
+ size_t payload_size;
+ size_t name_size;
+ u8 *comm_buf = NULL;
+
+ if (!name || name[0] == 0 || !vendor)
+ return EFI_INVALID_PARAMETER;
+
+ if (data_size > 0 && !data)
+ return EFI_INVALID_PARAMETER;
+
+ /* Check payload size */
+ name_size = (ucs2_strnlen(name, EFI_VAR_NAME_LEN) + 1) * sizeof(u16);
+ payload_size = MM_VARIABLE_ACCESS_HEADER_SIZE + name_size + data_size;
+ if (payload_size > max_payload_size)
+ return EFI_INVALID_PARAMETER;
+
+ /*
+ * Allocate the buffer early, before switching to RW (if needed)
+ * so we won't need to account for any failures in reading/setting
+ * the properties, if the allocation fails
+ */
+ var_acc = setup_mm_hdr(&comm_buf, payload_size,
+ SMM_VARIABLE_FUNCTION_SET_VARIABLE, &ret);
+ if (!var_acc)
+ return EFI_OUT_OF_RESOURCES;
+
+ /*
+ * The API has the ability to override RO flags. If no RO check was
+ * requested switch the variable to RW for the duration of this call
+ */
+ ret = get_property_int(name, name_size, vendor, &var_property);
+ if (ret != EFI_SUCCESS) {
+ dev_err(pvt_data.dev, "Getting variable property failed\n");
+ goto out;
+ }
+
+ if (var_property.property & VAR_CHECK_VARIABLE_PROPERTY_READ_ONLY) {
+ ret = EFI_WRITE_PROTECTED;
+ goto out;
+ }
+
+ /* Fill in contents */
+ memcpy(&var_acc->guid, vendor, sizeof(var_acc->guid));
+ var_acc->data_size = data_size;
+ var_acc->name_size = name_size;
+ var_acc->attr = attributes;
+ memcpy(var_acc->name, name, name_size);
+ memcpy((u8 *)var_acc->name + name_size, data, data_size);
+
+ ret = mm_communicate(comm_buf, payload_size);
+ dev_dbg(pvt_data.dev, "Set Variable %s %d %lx\n", __FILE__, __LINE__, ret);
+out:
+ kfree(comm_buf);
+ return ret;
+}
+
+static efi_status_t tee_set_variable_nonblocking(efi_char16_t *name,
+ efi_guid_t *vendor,
+ u32 attributes,
+ unsigned long data_size,
+ void *data)
+{
+ return EFI_UNSUPPORTED;
+}
+
+static efi_status_t tee_query_variable_info(u32 attributes,
+ u64 *max_variable_storage_size,
+ u64 *remain_variable_storage_size,
+ u64 *max_variable_size)
+{
+ struct smm_variable_query_info *mm_query_info;
+ size_t payload_size;
+ efi_status_t ret;
+ u8 *comm_buf;
+
+ payload_size = sizeof(*mm_query_info);
+ mm_query_info = setup_mm_hdr(&comm_buf, payload_size,
+ SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO,
+ &ret);
+ if (!mm_query_info)
+ return EFI_OUT_OF_RESOURCES;
+
+ mm_query_info->attr = attributes;
+ ret = mm_communicate(comm_buf, payload_size);
+ if (ret != EFI_SUCCESS)
+ goto out;
+ *max_variable_storage_size = mm_query_info->max_variable_storage;
+ *remain_variable_storage_size =
+ mm_query_info->remaining_variable_storage;
+ *max_variable_size = mm_query_info->max_variable_size;
+
+out:
+ kfree(comm_buf);
+ return ret;
+}
+
+static void tee_stmm_efi_close_context(void *data)
+{
+ tee_client_close_context(pvt_data.ctx);
+}
+
+static void tee_stmm_efi_close_session(void *data)
+{
+ tee_client_close_session(pvt_data.ctx, pvt_data.session);
+}
+
+static void tee_stmm_restore_efivars_generic_ops(void)
+{
+ efivars_unregister(&tee_efivars);
+ efivars_generic_ops_register();
+}
+
+static int tee_stmm_efi_probe(struct device *dev)
+{
+ struct tee_ioctl_open_session_arg sess_arg;
+ efi_status_t ret;
+ int rc;
+
+ pvt_data.ctx = tee_client_open_context(NULL, tee_ctx_match, NULL, NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ rc = devm_add_action_or_reset(dev, tee_stmm_efi_close_context, NULL);
+ if (rc)
+ return rc;
+
+ /* Open session with StMM PTA */
+ memset(&sess_arg, 0, sizeof(sess_arg));
+ export_uuid(sess_arg.uuid, &tee_stmm_efi_id_table[0].uuid);
+ rc = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ return -EINVAL;
+ }
+ pvt_data.session = sess_arg.session;
+ pvt_data.dev = dev;
+ rc = devm_add_action_or_reset(dev, tee_stmm_efi_close_session, NULL);
+ if (rc)
+ return rc;
+
+ ret = get_max_payload(&max_payload_size);
+ if (ret != EFI_SUCCESS)
+ return -EIO;
+
+ max_buffer_size = MM_COMMUNICATE_HEADER_SIZE +
+ MM_VARIABLE_COMMUNICATE_SIZE +
+ max_payload_size;
+
+ tee_efivar_ops.get_variable = tee_get_variable;
+ tee_efivar_ops.get_next_variable = tee_get_next_variable;
+ tee_efivar_ops.set_variable = tee_set_variable;
+ tee_efivar_ops.set_variable_nonblocking = tee_set_variable_nonblocking;
+ tee_efivar_ops.query_variable_store = efi_query_variable_store;
+ tee_efivar_ops.query_variable_info = tee_query_variable_info;
+
+ efivars_generic_ops_unregister();
+ pr_info("Using TEE-based EFI runtime variable services\n");
+ efivars_register(&tee_efivars, &tee_efivar_ops);
+
+ return 0;
+}
+
+static int tee_stmm_efi_remove(struct device *dev)
+{
+ tee_stmm_restore_efivars_generic_ops();
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(tee, tee_stmm_efi_id_table);
+
+static struct tee_client_driver tee_stmm_efi_driver = {
+ .id_table = tee_stmm_efi_id_table,
+ .driver = {
+ .name = "tee-stmm-efi",
+ .bus = &tee_bus_type,
+ .probe = tee_stmm_efi_probe,
+ .remove = tee_stmm_efi_remove,
+ },
+};
+
+static int __init tee_stmm_efi_mod_init(void)
+{
+ return driver_register(&tee_stmm_efi_driver.driver);
+}
+
+static void __exit tee_stmm_efi_mod_exit(void)
+{
+ driver_unregister(&tee_stmm_efi_driver.driver);
+}
+
+module_init(tee_stmm_efi_mod_init);
+module_exit(tee_stmm_efi_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ilias Apalodimas <ilias.apalodimas@linaro.org>");
+MODULE_AUTHOR("Masahisa Kojima <masahisa.kojima@linaro.org>");
+MODULE_DESCRIPTION("TEE based EFI runtime variable service driver");
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index e9dc7116d..f654e6f6a 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -63,6 +63,7 @@ int efivars_register(struct efivars *efivars,
const struct efivar_operations *ops)
{
int rv;
+ int event;
if (down_interruptible(&efivars_lock))
return -EINTR;
@@ -77,6 +78,13 @@ int efivars_register(struct efivars *efivars,
__efivars = efivars;
+ if (efivar_supports_writes())
+ event = EFIVAR_OPS_RDWR;
+ else
+ event = EFIVAR_OPS_RDONLY;
+
+ blocking_notifier_call_chain(&efivar_ops_nh, event, NULL);
+
pr_info("Registered efivars operations\n");
rv = 0;
out:
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 33ae94745..2a4469bf1 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -176,10 +176,9 @@ static int __cb_dev_unregister(struct device *dev, void *dummy)
return 0;
}
-static int coreboot_table_remove(struct platform_device *pdev)
+static void coreboot_table_remove(struct platform_device *pdev)
{
bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
- return 0;
}
#ifdef CONFIG_ACPI
@@ -201,7 +200,7 @@ MODULE_DEVICE_TABLE(of, coreboot_of_match);
static struct platform_driver coreboot_table_driver = {
.probe = coreboot_table_probe,
- .remove = coreboot_table_remove,
+ .remove_new = coreboot_table_remove,
.driver = {
.name = "coreboot_table",
.acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index c323a8188..5c84bbebf 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -36,6 +36,9 @@ static int framebuffer_probe(struct coreboot_device *dev)
.format = NULL,
};
+ if (!fb->physical_address)
+ return -ENODEV;
+
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
if (fb->bits_per_pixel == formats[i].bits_per_pixel &&
fb->red_mask_pos == formats[i].red.offset &&
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index a48a58e0c..01c8ef14e 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -160,7 +160,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
return 0;
}
-static int imx_dsp_remove(struct platform_device *pdev)
+static void imx_dsp_remove(struct platform_device *pdev)
{
struct imx_dsp_chan *dsp_chan;
struct imx_dsp_ipc *dsp_ipc;
@@ -173,8 +173,6 @@ static int imx_dsp_remove(struct platform_device *pdev)
mbox_free_channel(dsp_chan->ch);
kfree(dsp_chan->name);
}
-
- return 0;
}
static struct platform_driver imx_dsp_driver = {
@@ -182,7 +180,7 @@ static struct platform_driver imx_dsp_driver = {
.name = "imx-dsp",
},
.probe = imx_dsp_probe,
- .remove = imx_dsp_remove,
+ .remove_new = imx_dsp_remove,
};
builtin_platform_driver(imx_dsp_driver);
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index ed60f1103..5d7f62fe1 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -274,14 +274,11 @@ static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(serial);
-static struct attribute *meson_sm_sysfs_attributes[] = {
+static struct attribute *meson_sm_sysfs_attrs[] = {
&dev_attr_serial.attr,
NULL,
};
-
-static const struct attribute_group meson_sm_sysfs_attr_group = {
- .attrs = meson_sm_sysfs_attributes,
-};
+ATTRIBUTE_GROUPS(meson_sm_sysfs);
static const struct of_device_id meson_sm_ids[] = {
{ .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
@@ -313,7 +310,7 @@ static int __init meson_sm_probe(struct platform_device *pdev)
fw->sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
chip->shmem_size);
if (WARN_ON(!fw->sm_shmem_out_base))
- goto out_in_base;
+ goto unmap_in_base;
}
fw->chip = chip;
@@ -321,16 +318,15 @@ static int __init meson_sm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fw);
if (devm_of_platform_populate(dev))
- goto out_in_base;
-
- if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
- goto out_in_base;
+ goto unmap_out_base;
pr_info("secure-monitor enabled\n");
return 0;
-out_in_base:
+unmap_out_base:
+ iounmap(fw->sm_shmem_out_base);
+unmap_in_base:
iounmap(fw->sm_shmem_in_base);
out:
return -EINVAL;
@@ -340,6 +336,7 @@ static struct platform_driver meson_sm_driver = {
.driver = {
.name = "meson-sm",
.of_match_table = of_match_ptr(meson_sm_ids),
+ .dev_groups = meson_sm_sysfs_groups,
},
};
module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
diff --git a/drivers/firmware/microchip/Kconfig b/drivers/firmware/microchip/Kconfig
new file mode 100644
index 000000000..434b923e0
--- /dev/null
+++ b/drivers/firmware/microchip/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config POLARFIRE_SOC_AUTO_UPDATE
+ tristate "Microchip PolarFire SoC AUTO UPDATE"
+ depends on POLARFIRE_SOC_SYS_CTRL
+ select FW_LOADER
+ select FW_UPLOAD
+ help
+ Support for reprogramming PolarFire SoC from within Linux, using the
+ Auto Upgrade feature of the system controller.
+
+ If built as a module, it will be called mpfs-auto-update.
diff --git a/drivers/firmware/microchip/Makefile b/drivers/firmware/microchip/Makefile
new file mode 100644
index 000000000..38796fd82
--- /dev/null
+++ b/drivers/firmware/microchip/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_POLARFIRE_SOC_AUTO_UPDATE) += mpfs-auto-update.o
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
new file mode 100644
index 000000000..fbeeaee4a
--- /dev/null
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip Polarfire SoC "Auto Update" FPGA reprogramming.
+ *
+ * Documentation of this functionality is available in the "PolarFire® FPGA and
+ * PolarFire SoC FPGA Programming" User Guide.
+ *
+ * Copyright (c) 2022-2023 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+
+#include <soc/microchip/mpfs.h>
+
+#define AUTO_UPDATE_DEFAULT_MBOX_OFFSET 0u
+#define AUTO_UPDATE_DEFAULT_RESP_OFFSET 0u
+
+#define AUTO_UPDATE_FEATURE_CMD_OPCODE 0x05u
+#define AUTO_UPDATE_FEATURE_CMD_DATA_SIZE 0u
+#define AUTO_UPDATE_FEATURE_RESP_SIZE 33u
+#define AUTO_UPDATE_FEATURE_CMD_DATA NULL
+#define AUTO_UPDATE_FEATURE_ENABLED BIT(5)
+
+#define AUTO_UPDATE_AUTHENTICATE_CMD_OPCODE 0x22u
+#define AUTO_UPDATE_AUTHENTICATE_CMD_DATA_SIZE 0u
+#define AUTO_UPDATE_AUTHENTICATE_RESP_SIZE 1u
+#define AUTO_UPDATE_AUTHENTICATE_CMD_DATA NULL
+
+#define AUTO_UPDATE_PROGRAM_CMD_OPCODE 0x46u
+#define AUTO_UPDATE_PROGRAM_CMD_DATA_SIZE 0u
+#define AUTO_UPDATE_PROGRAM_RESP_SIZE 1u
+#define AUTO_UPDATE_PROGRAM_CMD_DATA NULL
+
+/*
+ * SPI Flash layout example:
+ * |------------------------------| 0x0000000
+ * | 1 KiB |
+ * | SPI "directories" |
+ * |------------------------------| 0x0000400
+ * | 1 MiB |
+ * | Reserved area |
+ * | Used for bitstream info |
+ * |------------------------------| 0x0100400
+ * | 20 MiB |
+ * | Golden Image |
+ * |------------------------------| 0x1500400
+ * | 20 MiB |
+ * | Auto Upgrade Image |
+ * |------------------------------| 0x2900400
+ * | 20 MiB |
+ * | Reserved for multi-image IAP |
+ * | Unused for Auto Upgrade |
+ * |------------------------------| 0x3D00400
+ * | ? B |
+ * | Unused |
+ * |------------------------------| 0x?
+ */
+#define AUTO_UPDATE_DIRECTORY_BASE 0u
+#define AUTO_UPDATE_DIRECTORY_WIDTH 4u
+#define AUTO_UPDATE_GOLDEN_INDEX 0u
+#define AUTO_UPDATE_UPGRADE_INDEX 1u
+#define AUTO_UPDATE_BLANK_INDEX 2u
+#define AUTO_UPDATE_GOLDEN_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_GOLDEN_INDEX)
+#define AUTO_UPDATE_UPGRADE_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_UPGRADE_INDEX)
+#define AUTO_UPDATE_BLANK_DIRECTORY (AUTO_UPDATE_DIRECTORY_WIDTH * AUTO_UPDATE_BLANK_INDEX)
+#define AUTO_UPDATE_DIRECTORY_SIZE SZ_1K
+#define AUTO_UPDATE_RESERVED_SIZE SZ_1M
+#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_RESERVED_SIZE)
+
+#define AUTO_UPDATE_TIMEOUT_MS 60000
+
+struct mpfs_auto_update_priv {
+ struct mpfs_sys_controller *sys_controller;
+ struct device *dev;
+ struct mtd_info *flash;
+ struct fw_upload *fw_uploader;
+ struct completion programming_complete;
+ size_t size_per_bitstream;
+ bool cancel_request;
+};
+
+static enum fw_upload_err mpfs_auto_update_prepare(struct fw_upload *fw_uploader, const u8 *data,
+ u32 size)
+{
+ struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
+ size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE;
+
+ /*
+ * Verifying the Golden Image is idealistic. It will be evaluated
+ * against the currently programmed image and thus may fail - due to
+ * either rollback protection (if its an older version than that in use)
+ * or if the version is the same as that of the in-use image.
+ * Extracting the information as to why a failure occurred is not
+ * currently possible due to limitations of the system controller
+ * driver. If those are fixed, verification of the Golden Image should
+ * be added here.
+ */
+
+ priv->flash = mpfs_sys_controller_get_flash(priv->sys_controller);
+ if (!priv->flash)
+ return FW_UPLOAD_ERR_HW_ERROR;
+
+ erase_size = round_up(erase_size, (u64)priv->flash->erasesize);
+
+ /*
+ * We need to calculate if we have enough space in the flash for the
+ * new image.
+ * First, chop off the first 1 KiB as it's reserved for the directory.
+ * The 1 MiB reserved for design info needs to be ignored also.
+ * All that remains is carved into 3 & rounded down to the erasesize.
+ * If this is smaller than the image size, we abort.
+ * There's also no need to consume more than 20 MiB per image.
+ */
+ priv->size_per_bitstream = priv->flash->size - SZ_1K - SZ_1M;
+ priv->size_per_bitstream = round_down(priv->size_per_bitstream / 3, erase_size);
+ if (priv->size_per_bitstream > 20 * SZ_1M)
+ priv->size_per_bitstream = 20 * SZ_1M;
+
+ if (priv->size_per_bitstream < size) {
+ dev_err(priv->dev,
+ "flash device has insufficient capacity to store this bitstream\n");
+ return FW_UPLOAD_ERR_INVALID_SIZE;
+ }
+
+ priv->cancel_request = false;
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static void mpfs_auto_update_cancel(struct fw_upload *fw_uploader)
+{
+ struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
+
+ priv->cancel_request = true;
+}
+
+static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_uploader)
+{
+ struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
+ int ret;
+
+ /*
+ * There is no meaningful way to get the status of the programming while
+ * it is in progress, so attempting anything other than waiting for it
+ * to complete would be misplaced.
+ */
+ ret = wait_for_completion_timeout(&priv->programming_complete,
+ msecs_to_jiffies(AUTO_UPDATE_TIMEOUT_MS));
+ if (ret)
+ return FW_UPLOAD_ERR_TIMEOUT;
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
+{
+ struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
+ struct mpfs_mss_response *response;
+ struct mpfs_mss_msg *message;
+ u32 *response_msg;
+ int ret;
+
+ response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
+ GFP_KERNEL);
+ if (!response_msg)
+ return -ENOMEM;
+
+ response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL);
+ if (!response) {
+ ret = -ENOMEM;
+ goto free_response_msg;
+ }
+
+ message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL);
+ if (!message) {
+ ret = -ENOMEM;
+ goto free_response;
+ }
+
+ /*
+ * The system controller can verify that an image in the flash is valid.
+ * Rather than duplicate the check in this driver, call the relevant
+ * service from the system controller instead.
+ * This service has no command data and no response data. It overloads
+ * mbox_offset with the image index in the flash's SPI directory where
+ * the bitstream is located.
+ */
+ response->resp_msg = response_msg;
+ response->resp_size = AUTO_UPDATE_AUTHENTICATE_RESP_SIZE;
+ message->cmd_opcode = AUTO_UPDATE_AUTHENTICATE_CMD_OPCODE;
+ message->cmd_data_size = AUTO_UPDATE_AUTHENTICATE_CMD_DATA_SIZE;
+ message->response = response;
+ message->cmd_data = AUTO_UPDATE_AUTHENTICATE_CMD_DATA;
+ message->mbox_offset = AUTO_UPDATE_UPGRADE_INDEX;
+ message->resp_offset = AUTO_UPDATE_DEFAULT_RESP_OFFSET;
+
+ dev_info(priv->dev, "Running verification of Upgrade Image\n");
+ ret = mpfs_blocking_transaction(priv->sys_controller, message);
+ if (ret | response->resp_status) {
+ dev_warn(priv->dev, "Verification of Upgrade Image failed!\n");
+ ret = ret ? ret : -EBADMSG;
+ }
+
+ dev_info(priv->dev, "Verification of Upgrade Image passed!\n");
+
+ devm_kfree(priv->dev, message);
+free_response:
+ devm_kfree(priv->dev, response);
+free_response_msg:
+ devm_kfree(priv->dev, response_msg);
+
+ return ret;
+}
+
+static int mpfs_auto_update_set_image_address(struct mpfs_auto_update_priv *priv, char *buffer,
+ u32 image_address, loff_t directory_address)
+{
+ struct erase_info erase;
+ size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE;
+ size_t bytes_written = 0, bytes_read = 0;
+ int ret;
+
+ erase_size = round_up(erase_size, (u64)priv->flash->erasesize);
+
+ erase.addr = AUTO_UPDATE_DIRECTORY_BASE;
+ erase.len = erase_size;
+
+ /*
+ * We need to write the "SPI DIRECTORY" to the first 1 KiB, telling
+ * the system controller where to find the actual bitstream. Since
+ * this is spi-nor, we have to read the first eraseblock, erase that
+ * portion of the flash, modify the data and then write it back.
+ * There's no need to do this though if things are already the way they
+ * should be, so check and save the write in that case.
+ */
+ ret = mtd_read(priv->flash, AUTO_UPDATE_DIRECTORY_BASE, erase_size, &bytes_read,
+ (u_char *)buffer);
+ if (ret)
+ return ret;
+
+ if (bytes_read != erase_size)
+ return -EIO;
+
+ if ((*(u32 *)(buffer + AUTO_UPDATE_UPGRADE_DIRECTORY) == image_address) &&
+ !(*(u32 *)(buffer + AUTO_UPDATE_BLANK_DIRECTORY)))
+ return 0;
+
+ ret = mtd_erase(priv->flash, &erase);
+ if (ret)
+ return ret;
+
+ /*
+ * Populate the image address and then zero out the next directory so
+ * that the system controller doesn't complain if in "Single Image"
+ * mode.
+ */
+ memcpy(buffer + AUTO_UPDATE_UPGRADE_DIRECTORY, &image_address,
+ AUTO_UPDATE_DIRECTORY_WIDTH);
+ memset(buffer + AUTO_UPDATE_BLANK_DIRECTORY, 0x0, AUTO_UPDATE_DIRECTORY_WIDTH);
+
+ dev_info(priv->dev, "Writing the image address (%x) to the flash directory (%llx)\n",
+ image_address, directory_address);
+
+ ret = mtd_write(priv->flash, 0x0, erase_size, &bytes_written, (u_char *)buffer);
+ if (ret)
+ return ret;
+
+ if (bytes_written != erase_size)
+ return ret;
+
+ return 0;
+}
+
+static int mpfs_auto_update_write_bitstream(struct fw_upload *fw_uploader, const u8 *data,
+ u32 offset, u32 size, u32 *written)
+{
+ struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
+ struct erase_info erase;
+ char *buffer;
+ loff_t directory_address = AUTO_UPDATE_UPGRADE_DIRECTORY;
+ size_t erase_size = AUTO_UPDATE_DIRECTORY_SIZE;
+ size_t bytes_written = 0;
+ u32 image_address;
+ int ret;
+
+ erase_size = round_up(erase_size, (u64)priv->flash->erasesize);
+
+ image_address = AUTO_UPDATE_BITSTREAM_BASE +
+ AUTO_UPDATE_UPGRADE_INDEX * priv->size_per_bitstream;
+
+ buffer = devm_kzalloc(priv->dev, erase_size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ ret = mpfs_auto_update_set_image_address(priv, buffer, image_address, directory_address);
+ if (ret) {
+ dev_err(priv->dev, "failed to set image address in the SPI directory: %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * Now the .spi image itself can be written to the flash. Preservation
+ * of contents here is not important here, unlike the spi "directory"
+ * which must be RMWed.
+ */
+ erase.len = round_up(size, (size_t)priv->flash->erasesize);
+ erase.addr = image_address;
+
+ dev_info(priv->dev, "Erasing the flash at address (%x)\n", image_address);
+ ret = mtd_erase(priv->flash, &erase);
+ if (ret)
+ goto out;
+
+ /*
+ * No parsing etc of the bitstream is required. The system controller
+ * will do all of that itself - including verifying that the bitstream
+ * is valid.
+ */
+ dev_info(priv->dev, "Writing the image to the flash at address (%x)\n", image_address);
+ ret = mtd_write(priv->flash, (loff_t)image_address, size, &bytes_written, data);
+ if (ret)
+ goto out;
+
+ if (bytes_written != size) {
+ ret = -EIO;
+ goto out;
+ }
+
+ *written = bytes_written;
+
+out:
+ devm_kfree(priv->dev, buffer);
+ return ret;
+}
+
+static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader, const u8 *data,
+ u32 offset, u32 size, u32 *written)
+{
+ struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
+ enum fw_upload_err err = FW_UPLOAD_ERR_NONE;
+ int ret;
+
+ reinit_completion(&priv->programming_complete);
+
+ ret = mpfs_auto_update_write_bitstream(fw_uploader, data, offset, size, written);
+ if (ret) {
+ err = FW_UPLOAD_ERR_RW_ERROR;
+ goto out;
+ }
+
+ if (priv->cancel_request) {
+ err = FW_UPLOAD_ERR_CANCELED;
+ goto out;
+ }
+
+ ret = mpfs_auto_update_verify_image(fw_uploader);
+ if (ret)
+ err = FW_UPLOAD_ERR_FW_INVALID;
+
+out:
+ complete(&priv->programming_complete);
+
+ return err;
+}
+
+static const struct fw_upload_ops mpfs_auto_update_ops = {
+ .prepare = mpfs_auto_update_prepare,
+ .write = mpfs_auto_update_write,
+ .poll_complete = mpfs_auto_update_poll_complete,
+ .cancel = mpfs_auto_update_cancel,
+};
+
+static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
+{
+ struct mpfs_mss_response *response;
+ struct mpfs_mss_msg *message;
+ u32 *response_msg;
+ int ret;
+
+ response_msg = devm_kzalloc(priv->dev,
+ AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
+ GFP_KERNEL);
+ if (!response_msg)
+ return -ENOMEM;
+
+ response = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_response), GFP_KERNEL);
+ if (!response)
+ return -ENOMEM;
+
+ message = devm_kzalloc(priv->dev, sizeof(struct mpfs_mss_msg), GFP_KERNEL);
+ if (!message)
+ return -ENOMEM;
+
+ /*
+ * To verify that Auto Update is possible, the "Query Security Service
+ * Request" is performed.
+ * This service has no command data & does not overload mbox_offset.
+ */
+ response->resp_msg = response_msg;
+ response->resp_size = AUTO_UPDATE_FEATURE_RESP_SIZE;
+ message->cmd_opcode = AUTO_UPDATE_FEATURE_CMD_OPCODE;
+ message->cmd_data_size = AUTO_UPDATE_FEATURE_CMD_DATA_SIZE;
+ message->response = response;
+ message->cmd_data = AUTO_UPDATE_FEATURE_CMD_DATA;
+ message->mbox_offset = AUTO_UPDATE_DEFAULT_MBOX_OFFSET;
+ message->resp_offset = AUTO_UPDATE_DEFAULT_RESP_OFFSET;
+
+ ret = mpfs_blocking_transaction(priv->sys_controller, message);
+ if (ret)
+ return ret;
+
+ /*
+ * Currently, the system controller's firmware does not generate any
+ * interrupts for failed services, so mpfs_blocking_transaction() should
+ * time out & therefore return an error.
+ * Hitting this check is highly unlikely at present, but if the system
+ * controller's behaviour changes so that it does generate interrupts
+ * for failed services, it will be required.
+ */
+ if (response->resp_status)
+ return -EIO;
+
+ /*
+ * Bit 5 of byte 1 is "UL_Auto Update" & if it is set, Auto Update is
+ * not possible.
+ */
+ if (response_msg[1] & AUTO_UPDATE_FEATURE_ENABLED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int mpfs_auto_update_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpfs_auto_update_priv *priv;
+ struct fw_upload *fw_uploader;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->sys_controller = mpfs_sys_controller_get(dev);
+ if (IS_ERR(priv->sys_controller))
+ return dev_err_probe(dev, PTR_ERR(priv->sys_controller),
+ "Could not register as a sub device of the system controller\n");
+
+ priv->dev = dev;
+ platform_set_drvdata(pdev, priv);
+
+ ret = mpfs_auto_update_available(priv);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "The current bitstream does not support auto-update\n");
+
+ init_completion(&priv->programming_complete);
+
+ fw_uploader = firmware_upload_register(THIS_MODULE, dev, "mpfs-auto-update",
+ &mpfs_auto_update_ops, priv);
+ if (IS_ERR(fw_uploader))
+ return dev_err_probe(dev, PTR_ERR(fw_uploader),
+ "Failed to register the bitstream uploader\n");
+
+ priv->fw_uploader = fw_uploader;
+
+ return 0;
+}
+
+static void mpfs_auto_update_remove(struct platform_device *pdev)
+{
+ struct mpfs_auto_update_priv *priv = platform_get_drvdata(pdev);
+
+ firmware_upload_unregister(priv->fw_uploader);
+}
+
+static struct platform_driver mpfs_auto_update_driver = {
+ .driver = {
+ .name = "mpfs-auto-update",
+ },
+ .probe = mpfs_auto_update_probe,
+ .remove_new = mpfs_auto_update_remove,
+};
+module_platform_driver(mpfs_auto_update_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("PolarFire SoC Auto Update FPGA reprogramming");
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index 85e94ddc7..a76230297 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -116,7 +116,7 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_adsp_ipc_remove(struct platform_device *pdev)
+static void mtk_adsp_ipc_remove(struct platform_device *pdev)
{
struct mtk_adsp_ipc *adsp_ipc = dev_get_drvdata(&pdev->dev);
struct mtk_adsp_chan *adsp_chan;
@@ -126,8 +126,6 @@ static int mtk_adsp_ipc_remove(struct platform_device *pdev)
adsp_chan = &adsp_ipc->chans[i];
mbox_free_channel(adsp_chan->ch);
}
-
- return 0;
}
static struct platform_driver mtk_adsp_ipc_driver = {
@@ -135,7 +133,7 @@ static struct platform_driver mtk_adsp_ipc_driver = {
.name = "mtk-adsp-ipc",
},
.probe = mtk_adsp_ipc_probe,
- .remove = mtk_adsp_ipc_remove,
+ .remove_new = mtk_adsp_ipc_remove,
};
builtin_platform_driver(mtk_adsp_ipc_driver);
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 32188f098..bc550ad0d 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -221,6 +221,19 @@ struct qsee_rsp_uefi_query_variable_info {
* alignment of 8 bytes (64 bits) for GUIDs. Our definition of efi_guid_t,
* however, has an alignment of 4 byte (32 bits). So far, this seems to work
* fine here. See also the comment on the typedef of efi_guid_t.
+ *
+ * Note: It looks like uefisecapp is quite picky about how the memory passed to
+ * it is structured and aligned. In particular the request/response setup used
+ * for QSEE_CMD_UEFI_GET_VARIABLE. While qcom_qseecom_app_send(), in theory,
+ * accepts separate buffers/addresses for the request and response parts, in
+ * practice, however, it seems to expect them to be both part of a larger
+ * contiguous block. We initially allocated separate buffers for the request
+ * and response but this caused the QSEE_CMD_UEFI_GET_VARIABLE command to
+ * either not write any response to the response buffer or outright crash the
+ * device. Therefore, we now allocate a single contiguous block of DMA memory
+ * for both and properly align the data using the macros below. In particular,
+ * request and response structs are aligned at 8 byte (via __reqdata_offs()),
+ * following the driver that this has been reverse-engineered from.
*/
#define qcuefi_buf_align_fields(fields...) \
({ \
@@ -244,6 +257,12 @@ struct qsee_rsp_uefi_query_variable_info {
#define __array_offs(type, count, offset) \
__field_impl(sizeof(type) * (count), __alignof__(type), offset)
+#define __array_offs_aligned(type, count, align, offset) \
+ __field_impl(sizeof(type) * (count), align, offset)
+
+#define __reqdata_offs(size, offset) \
+ __array_offs_aligned(u8, size, 8, offset)
+
#define __array(type, count) __array_offs(type, count, NULL)
#define __field_offs(type, offset) __array_offs(type, 1, offset)
#define __field(type) __array_offs(type, 1, NULL)
@@ -277,10 +296,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
unsigned long buffer_size = *data_size;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -304,17 +328,19 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
__array(u8, buffer_size)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_VARIABLE;
req_data->data_size = buffer_size;
@@ -332,7 +358,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -407,9 +435,7 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size);
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -422,10 +448,15 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
struct qsee_rsp_uefi_set_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t name_offs;
size_t guid_offs;
size_t data_offs;
size_t req_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name || !guid)
@@ -450,17 +481,19 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
__array_offs(u8, data_size, &data_offs)
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_SET_VARIABLE;
req_data->attributes = attributes;
@@ -483,8 +516,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
if (data_size)
memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -507,9 +541,7 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -521,10 +553,15 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_get_next_variable *req_data;
struct qsee_rsp_uefi_get_next_variable *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
size_t rsp_size;
+ size_t req_offs;
+ size_t rsp_offs;
ssize_t status;
if (!name_size || !name || !guid)
@@ -545,17 +582,19 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
__array(*name, *name_size / sizeof(*name))
);
- req_data = kzalloc(req_size, GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(req_size, &req_offs)
+ __reqdata_offs(rsp_size, &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(rsp_size, GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_GET_NEXT_VARIABLE;
req_data->guid_offset = guid_offs;
@@ -572,7 +611,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
goto out_free;
}
- status = qcom_qseecom_app_send(qcuefi->client, req_data, req_size, rsp_data, rsp_size);
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, req_size,
+ cmd_buf_dma + rsp_offs, rsp_size);
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -645,9 +686,7 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
}
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
@@ -659,26 +698,34 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
struct qsee_req_uefi_query_variable_info *req_data;
struct qsee_rsp_uefi_query_variable_info *rsp_data;
efi_status_t efi_status = EFI_SUCCESS;
+ dma_addr_t cmd_buf_dma;
+ size_t cmd_buf_size;
+ void *cmd_buf;
+ size_t req_offs;
+ size_t rsp_offs;
int status;
- req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data) {
+ cmd_buf_size = qcuefi_buf_align_fields(
+ __reqdata_offs(sizeof(*req_data), &req_offs)
+ __reqdata_offs(sizeof(*rsp_data), &rsp_offs)
+ );
+
+ cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
+ if (!cmd_buf) {
efi_status = EFI_OUT_OF_RESOURCES;
goto out;
}
- rsp_data = kzalloc(sizeof(*rsp_data), GFP_KERNEL);
- if (!rsp_data) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out_free_req;
- }
+ req_data = cmd_buf + req_offs;
+ rsp_data = cmd_buf + rsp_offs;
req_data->command_id = QSEE_CMD_UEFI_QUERY_VARIABLE_INFO;
req_data->attributes = attr;
req_data->length = sizeof(*req_data);
- status = qcom_qseecom_app_send(qcuefi->client, req_data, sizeof(*req_data), rsp_data,
- sizeof(*rsp_data));
+ status = qcom_qseecom_app_send(qcuefi->client,
+ cmd_buf_dma + req_offs, sizeof(*req_data),
+ cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
if (status) {
efi_status = EFI_DEVICE_ERROR;
goto out_free;
@@ -711,9 +758,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
*max_variable_size = rsp_data->max_variable_size;
out_free:
- kfree(rsp_data);
-out_free_req:
- kfree(req_data);
+ qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
out:
return efi_status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 520de9b56..90283f160 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1576,9 +1576,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
/**
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @app_id: The ID of the target app.
- * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req: DMA address of the request buffer sent to the app.
* @req_size: Size of the request buffer.
- * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp: DMA address of the response buffer, written to by the app.
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given ID and read back
@@ -1589,33 +1589,13 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
*
* Return: Zero on success, nonzero on failure.
*/
-int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
- size_t rsp_size)
+int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
+ dma_addr_t rsp, size_t rsp_size)
{
struct qcom_scm_qseecom_resp res = {};
struct qcom_scm_desc desc = {};
- dma_addr_t req_phys;
- dma_addr_t rsp_phys;
int status;
- /* Map request buffer */
- req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
- status = dma_mapping_error(__scm->dev, req_phys);
- if (status) {
- dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
- return status;
- }
-
- /* Map response buffer */
- rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
- status = dma_mapping_error(__scm->dev, rsp_phys);
- if (status) {
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
- dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
- return status;
- }
-
- /* Set up SCM call data */
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
@@ -1623,18 +1603,13 @@ int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL);
desc.args[0] = app_id;
- desc.args[1] = req_phys;
+ desc.args[1] = req;
desc.args[2] = req_size;
- desc.args[3] = rsp_phys;
+ desc.args[3] = rsp;
desc.args[4] = rsp_size;
- /* Perform call */
status = qcom_scm_qseecom_call(&desc, &res);
- /* Unmap buffers */
- dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
- dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
-
if (status)
return status;
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 1448f6117..03da9a435 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -731,7 +731,7 @@ err_sel:
return err;
}
-static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+static void fw_cfg_sysfs_remove(struct platform_device *pdev)
{
pr_debug("fw_cfg: unloading.\n");
fw_cfg_sysfs_cache_cleanup();
@@ -739,7 +739,6 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev)
fw_cfg_io_cleanup();
fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
- return 0;
}
static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
@@ -758,7 +757,7 @@ MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
static struct platform_driver fw_cfg_sysfs_driver = {
.probe = fw_cfg_sysfs_probe,
- .remove = fw_cfg_sysfs_remove,
+ .remove_new = fw_cfg_sysfs_remove,
.driver = {
.name = "fw_cfg",
.of_match_table = fw_cfg_sysfs_mmio_match,
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 4cd290a60..322aada20 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -317,7 +317,7 @@ static void rpi_firmware_shutdown(struct platform_device *pdev)
rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
}
-static int rpi_firmware_remove(struct platform_device *pdev)
+static void rpi_firmware_remove(struct platform_device *pdev)
{
struct rpi_firmware *fw = platform_get_drvdata(pdev);
@@ -327,8 +327,6 @@ static int rpi_firmware_remove(struct platform_device *pdev)
rpi_clk = NULL;
rpi_firmware_put(fw);
-
- return 0;
}
static const struct of_device_id rpi_firmware_of_match[] = {
@@ -406,7 +404,7 @@ static struct platform_driver rpi_firmware_driver = {
},
.probe = rpi_firmware_probe,
.shutdown = rpi_firmware_shutdown,
- .remove = rpi_firmware_remove,
+ .remove_new = rpi_firmware_remove,
};
module_platform_driver(rpi_firmware_driver);
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index 4f7a7abad..e20cee9c2 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -793,17 +793,16 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
return ret;
}
-static int stratix10_rsu_remove(struct platform_device *pdev)
+static void stratix10_rsu_remove(struct platform_device *pdev)
{
struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
stratix10_svc_free_channel(priv->chan);
- return 0;
}
static struct platform_driver stratix10_rsu_driver = {
.probe = stratix10_rsu_probe,
- .remove = stratix10_rsu_remove,
+ .remove_new = stratix10_rsu_remove,
.driver = {
.name = "stratix10-rsu",
.dev_groups = rsu_groups,
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index c693da60e..528f37417 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1251,7 +1251,7 @@ err_destroy_pool:
return ret;
}
-static int stratix10_svc_drv_remove(struct platform_device *pdev)
+static void stratix10_svc_drv_remove(struct platform_device *pdev)
{
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
@@ -1267,13 +1267,11 @@ static int stratix10_svc_drv_remove(struct platform_device *pdev)
if (ctrl->genpool)
gen_pool_destroy(ctrl->genpool);
list_del(&ctrl->node);
-
- return 0;
}
static struct platform_driver stratix10_svc_driver = {
.probe = stratix10_svc_drv_probe,
- .remove = stratix10_svc_drv_remove,
+ .remove_new = stratix10_svc_drv_remove,
.driver = {
.name = "stratix10-svc",
.of_match_table = stratix10_svc_drv_match,
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index bbcdd9fed..4221fed70 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -77,7 +77,7 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
if (!root_path_buf)
- goto out;
+ return NULL;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
root_path_buf_len);
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index 2de0fb139..31d962cdd 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -554,7 +554,7 @@ put_kobj:
return ret;
}
-static int turris_mox_rwtm_remove(struct platform_device *pdev)
+static void turris_mox_rwtm_remove(struct platform_device *pdev)
{
struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
@@ -562,8 +562,6 @@ static int turris_mox_rwtm_remove(struct platform_device *pdev)
sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
kobject_put(rwtm_to_kobj(rwtm));
mbox_free_channel(rwtm->mbox);
-
- return 0;
}
static const struct of_device_id turris_mox_rwtm_match[] = {
@@ -576,7 +574,7 @@ MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
static struct platform_driver turris_mox_rwtm_driver = {
.probe = turris_mox_rwtm_probe,
- .remove = turris_mox_rwtm_remove,
+ .remove_new = turris_mox_rwtm_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = turris_mox_rwtm_match,
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index b0d22d445..79789f056 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -92,6 +92,8 @@ static int zynqmp_pm_ret_code(u32 ret_status)
return 0;
case XST_PM_NO_FEATURE:
return -ENOTSUPP;
+ case XST_PM_INVALID_VERSION:
+ return -EOPNOTSUPP;
case XST_PM_NO_ACCESS:
return -EACCES;
case XST_PM_ABORT_SUSPEND:
@@ -101,13 +103,13 @@ static int zynqmp_pm_ret_code(u32 ret_status)
case XST_PM_INTERNAL:
case XST_PM_CONFLICT:
case XST_PM_INVALID_NODE:
+ case XST_PM_INVALID_CRC:
default:
return -EINVAL;
}
}
-static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2,
- u32 *ret_payload)
+static noinline int do_fw_call_fail(u32 *ret_payload, u32 num_args, ...)
{
return -ENODEV;
}
@@ -116,25 +118,35 @@ static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2,
* PM function call wrapper
* Invoke do_fw_call_smc or do_fw_call_hvc, depending on the configuration
*/
-static int (*do_fw_call)(u64, u64, u64, u32 *ret_payload) = do_fw_call_fail;
+static int (*do_fw_call)(u32 *ret_payload, u32, ...) = do_fw_call_fail;
/**
* do_fw_call_smc() - Call system-level platform management layer (SMC)
- * @arg0: Argument 0 to SMC call
- * @arg1: Argument 1 to SMC call
- * @arg2: Argument 2 to SMC call
+ * @num_args: Number of variable arguments should be <= 8
* @ret_payload: Returned value array
*
* Invoke platform management function via SMC call (no hypervisor present).
*
* Return: Returns status, either success or error+reason
*/
-static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
- u32 *ret_payload)
+static noinline int do_fw_call_smc(u32 *ret_payload, u32 num_args, ...)
{
struct arm_smccc_res res;
+ u64 args[8] = {0};
+ va_list arg_list;
+ u8 i;
- arm_smccc_smc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+ if (num_args > 8)
+ return -EINVAL;
+
+ va_start(arg_list, num_args);
+
+ for (i = 0; i < num_args; i++)
+ args[i] = va_arg(arg_list, u64);
+
+ va_end(arg_list);
+
+ arm_smccc_smc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res);
if (ret_payload) {
ret_payload[0] = lower_32_bits(res.a0);
@@ -148,9 +160,7 @@ static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
/**
* do_fw_call_hvc() - Call system-level platform management layer (HVC)
- * @arg0: Argument 0 to HVC call
- * @arg1: Argument 1 to HVC call
- * @arg2: Argument 2 to HVC call
+ * @num_args: Number of variable arguments should be <= 8
* @ret_payload: Returned value array
*
* Invoke platform management function via HVC
@@ -159,12 +169,24 @@ static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
*
* Return: Returns status, either success or error+reason
*/
-static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
- u32 *ret_payload)
+static noinline int do_fw_call_hvc(u32 *ret_payload, u32 num_args, ...)
{
struct arm_smccc_res res;
+ u64 args[8] = {0};
+ va_list arg_list;
+ u8 i;
+
+ if (num_args > 8)
+ return -EINVAL;
+
+ va_start(arg_list, num_args);
+
+ for (i = 0; i < num_args; i++)
+ args[i] = va_arg(arg_list, u64);
+
+ va_end(arg_list);
- arm_smccc_hvc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
+ arm_smccc_hvc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res);
if (ret_payload) {
ret_payload[0] = lower_32_bits(res.a0);
@@ -180,11 +202,31 @@ static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
{
int ret;
u64 smc_arg[2];
+ u32 module_id;
+ u32 feature_check_api_id;
- smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
- smc_arg[1] = api_id;
+ module_id = FIELD_GET(MODULE_ID_MASK, api_id);
- ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+ /*
+ * Feature check of APIs belonging to PM, XSEM, and TF-A are handled by calling
+ * PM_FEATURE_CHECK API. For other modules, call PM_API_FEATURES API.
+ */
+ if (module_id == PM_MODULE_ID || module_id == XSEM_MODULE_ID || module_id == TF_A_MODULE_ID)
+ feature_check_api_id = PM_FEATURE_CHECK;
+ else
+ feature_check_api_id = PM_API_FEATURES;
+
+ /*
+ * Feature check of TF-A APIs is done in the TF-A layer and it expects for
+ * MODULE_ID_MASK bits of SMC's arg[0] to be the same as PM_MODULE_ID.
+ */
+ if (module_id == TF_A_MODULE_ID)
+ module_id = PM_MODULE_ID;
+
+ smc_arg[0] = PM_SIP_SVC | FIELD_PREP(MODULE_ID_MASK, module_id) | feature_check_api_id;
+ smc_arg[1] = (api_id & API_ID_MASK);
+
+ ret = do_fw_call(ret_payload, 2, smc_arg[0], smc_arg[1]);
if (ret)
ret = -EOPNOTSUPP;
else
@@ -295,11 +337,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
- * @arg0: Argument 0 to requested PM-API call
- * @arg1: Argument 1 to requested PM-API call
- * @arg2: Argument 2 to requested PM-API call
- * @arg3: Argument 3 to requested PM-API call
* @ret_payload: Returned value array
+ * @num_args: Number of arguments to requested PM-API call
*
* Invoke platform management function for SMC or HVC call, depending on
* configuration.
@@ -316,26 +355,38 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 *ret_payload)
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...)
{
/*
* Added SIP service call Function Identifier
* Make sure to stay in x0 register
*/
- u64 smc_arg[4];
- int ret;
+ u64 smc_arg[8];
+ int ret, i;
+ va_list arg_list;
+ u32 args[14] = {0};
+
+ if (num_args > 14)
+ return -EINVAL;
+
+ va_start(arg_list, num_args);
/* Check if feature is supported or not */
ret = zynqmp_pm_feature(pm_api_id);
if (ret < 0)
return ret;
+ for (i = 0; i < num_args; i++)
+ args[i] = va_arg(arg_list, u32);
+
+ va_end(arg_list);
+
smc_arg[0] = PM_SIP_SVC | pm_api_id;
- smc_arg[1] = ((u64)arg1 << 32) | arg0;
- smc_arg[2] = ((u64)arg3 << 32) | arg2;
+ for (i = 0; i < 7; i++)
+ smc_arg[i + 1] = ((u64)args[(i * 2) + 1] << 32) | args[i * 2];
- return do_fw_call(smc_arg[0], smc_arg[1], smc_arg[2], ret_payload);
+ return do_fw_call(ret_payload, 8, smc_arg[0], smc_arg[1], smc_arg[2], smc_arg[3],
+ smc_arg[4], smc_arg[5], smc_arg[6], smc_arg[7]);
}
static u32 pm_api_version;
@@ -347,14 +398,12 @@ int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
{
int ret;
- ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0,
- NULL);
- if (!ret)
+ ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, NULL, 2, sgi_num, reset);
+ if (ret != -EOPNOTSUPP && !ret)
return ret;
/* try old implementation as fallback strategy if above fails */
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
- reset, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, IOCTL_REGISTER_SGI, sgi_num, reset);
}
/**
@@ -376,7 +425,7 @@ int zynqmp_pm_get_api_version(u32 *version)
*version = pm_api_version;
return 0;
}
- ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, 0, 0, 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, ret_payload, 0);
*version = ret_payload[1];
return ret;
@@ -399,7 +448,7 @@ int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
if (!idcode || !version)
return -EINVAL;
- ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0);
*idcode = ret_payload[1];
*version = ret_payload[2];
@@ -414,7 +463,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
u32 idcode;
@@ -427,7 +476,7 @@ static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
return 0;
}
- ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0);
if (ret < 0)
return ret;
@@ -439,6 +488,7 @@ static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
return 0;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_family_info);
/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
@@ -459,8 +509,7 @@ static int zynqmp_pm_get_trustzone_version(u32 *version)
*version = pm_tz_version;
return 0;
}
- ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, 0, 0,
- 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, ret_payload, 0);
*version = ret_payload[1];
return ret;
@@ -507,8 +556,8 @@ int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
int ret;
- ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, qdata.qid, qdata.arg1,
- qdata.arg2, qdata.arg3, out);
+ ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, qdata.arg1, qdata.arg2,
+ qdata.arg3);
/*
* For clock name query, all bytes in SMC response are clock name
@@ -530,7 +579,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
*/
int zynqmp_pm_clock_enable(u32 clock_id)
{
- return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, NULL, 1, clock_id);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
@@ -545,7 +594,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
*/
int zynqmp_pm_clock_disable(u32 clock_id)
{
- return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, NULL, 1, clock_id);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
@@ -564,8 +613,7 @@ int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, clock_id, 0,
- 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, ret_payload, 1, clock_id);
*state = ret_payload[1];
return ret;
@@ -584,8 +632,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
*/
int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
{
- return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
- 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, NULL, 2, clock_id, divider);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
@@ -604,8 +651,7 @@ int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, clock_id, 0,
- 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, ret_payload, 1, clock_id);
*divider = ret_payload[1];
return ret;
@@ -613,47 +659,6 @@ int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
/**
- * zynqmp_pm_clock_setrate() - Set the clock rate for given id
- * @clock_id: ID of the clock
- * @rate: rate value in hz
- *
- * This function is used by master to set rate for any clock.
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
-{
- return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
- lower_32_bits(rate),
- upper_32_bits(rate),
- 0, NULL);
-}
-EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
-
-/**
- * zynqmp_pm_clock_getrate() - Get the clock rate for given id
- * @clock_id: ID of the clock
- * @rate: rate value in hz
- *
- * This function is used by master to get rate
- * for any clock.
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
-{
- u32 ret_payload[PAYLOAD_ARG_CNT];
- int ret;
-
- ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETRATE, clock_id, 0,
- 0, 0, ret_payload);
- *rate = ((u64)ret_payload[2] << 32) | ret_payload[1];
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
-
-/**
* zynqmp_pm_clock_setparent() - Set the clock parent for given id
* @clock_id: ID of the clock
* @parent_id: parent id
@@ -664,8 +669,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
*/
int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
- return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
- parent_id, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, NULL, 2, clock_id, parent_id);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
@@ -684,8 +688,7 @@ int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, clock_id, 0,
- 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, ret_payload, 1, clock_id);
*parent_id = ret_payload[1];
return ret;
@@ -704,8 +707,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
*/
int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
- clk_id, mode, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_PLL_FRAC_MODE, clk_id, mode);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
@@ -721,8 +723,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
*/
int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
- clk_id, 0, mode);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, mode, 3, 0, IOCTL_GET_PLL_FRAC_MODE, clk_id);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
@@ -739,8 +740,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
*/
int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
- clk_id, data, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_PLL_FRAC_DATA, clk_id, data);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
@@ -756,8 +756,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
*/
int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
- clk_id, 0, data);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, data, 3, 0, IOCTL_GET_PLL_FRAC_DATA, clk_id);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
@@ -778,9 +777,8 @@ int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16);
if (value) {
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
- IOCTL_SET_SD_TAPDELAY,
- type, value, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node_id, IOCTL_SET_SD_TAPDELAY, type,
+ value);
}
/*
@@ -798,7 +796,7 @@ int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
* Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter
* part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits.
*/
- return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, NULL, 2, reg, mask);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
@@ -814,8 +812,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
*/
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
- type, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_SD_DLL_RESET, type);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
@@ -831,8 +828,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
*/
int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT,
- select, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, dev_id, IOCTL_OSPI_MUX_SELECT, select);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
@@ -847,8 +843,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
*/
int zynqmp_pm_write_ggs(u32 index, u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
- index, value, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_WRITE_GGS, index, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
@@ -863,8 +858,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
*/
int zynqmp_pm_read_ggs(u32 index, u32 *value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
- index, 0, value);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, value, 3, 0, IOCTL_READ_GGS, index);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
@@ -880,8 +874,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
*/
int zynqmp_pm_write_pggs(u32 index, u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
- NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_WRITE_PGGS, index, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
@@ -897,15 +890,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
*/
int zynqmp_pm_read_pggs(u32 index, u32 *value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
- value);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, value, 3, 0, IOCTL_READ_PGGS, index);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_TAPDELAY_BYPASS,
- index, value, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_TAPDELAY_BYPASS, index, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass);
@@ -920,8 +911,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass);
*/
int zynqmp_pm_set_boot_health_status(u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
- value, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, 0, IOCTL_SET_BOOT_HEALTH_STATUS, value);
}
/**
@@ -935,8 +925,7 @@ int zynqmp_pm_set_boot_health_status(u32 value)
int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
const enum zynqmp_pm_reset_action assert_flag)
{
- return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
- 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, NULL, 2, reset, assert_flag);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
@@ -955,8 +944,7 @@ int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
if (!status)
return -EINVAL;
- ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
- 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, ret_payload, 1, reset);
*status = ret_payload[1];
return ret;
@@ -981,9 +969,8 @@ int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
- upper_32_bits(address), size, flags,
- ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, ret_payload, 4, lower_32_bits(address),
+ upper_32_bits(address), size, flags);
if (ret_payload[0])
return -ret_payload[0];
@@ -1008,7 +995,7 @@ int zynqmp_pm_fpga_get_status(u32 *value)
if (!value)
return -EINVAL;
- ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, ret_payload, 0);
*value = ret_payload[1];
return ret;
@@ -1036,11 +1023,9 @@ int zynqmp_pm_fpga_get_config_status(u32 *value)
lower_addr = lower_32_bits((u64)&buf);
upper_addr = upper_32_bits((u64)&buf);
- ret = zynqmp_pm_invoke_fn(PM_FPGA_READ,
- XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET,
- lower_addr, upper_addr,
- XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG,
- ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1];
@@ -1058,7 +1043,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_config_status);
*/
int zynqmp_pm_pinctrl_request(const u32 pin)
{
- return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, NULL, 1, pin);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request);
@@ -1072,36 +1057,11 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request);
*/
int zynqmp_pm_pinctrl_release(const u32 pin)
{
- return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, NULL, 1, pin);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_release);
/**
- * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
- * @pin: Pin number
- * @id: Buffer to store function ID
- *
- * This function provides the function currently set for the given pin.
- *
- * Return: Returns status, either success or error+reason
- */
-int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
-{
- u32 ret_payload[PAYLOAD_ARG_CNT];
- int ret;
-
- if (!id)
- return -EINVAL;
-
- ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
- 0, 0, ret_payload);
- *id = ret_payload[1];
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function);
-
-/**
* zynqmp_pm_pinctrl_set_function - Set requested function for the pin
* @pin: Pin number
* @id: Function ID to set
@@ -1112,8 +1072,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function);
*/
int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
{
- return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
- 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, NULL, 2, pin, id);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_function);
@@ -1136,8 +1095,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
if (!value)
return -EINVAL;
- ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
- 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, ret_payload, 2, pin, param);
*value = ret_payload[1];
return ret;
@@ -1166,8 +1124,7 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
return -EOPNOTSUPP;
}
- return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
- param, value, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, NULL, 3, pin, param, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_config);
@@ -1185,8 +1142,7 @@ unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
unsigned int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
- ret = zynqmp_pm_invoke_fn(PM_MMIO_READ, CRL_APB_BOOT_PIN_CTRL, 0,
- 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_MMIO_READ, ret_payload, 1, CRL_APB_BOOT_PIN_CTRL);
*ps_mode = ret_payload[1];
@@ -1205,8 +1161,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_read);
*/
int zynqmp_pm_bootmode_write(u32 ps_mode)
{
- return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, CRL_APB_BOOT_PIN_CTRL,
- CRL_APB_BOOTPIN_CTRL_MASK, ps_mode, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, NULL, 3, CRL_APB_BOOT_PIN_CTRL,
+ CRL_APB_BOOTPIN_CTRL_MASK, ps_mode);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write);
@@ -1221,7 +1177,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write);
*/
int zynqmp_pm_init_finalize(void)
{
- return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, NULL, 0);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
@@ -1235,7 +1191,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
*/
int zynqmp_pm_set_suspend_mode(u32 mode)
{
- return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, NULL, 1, mode);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
@@ -1254,8 +1210,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
const u32 qos, const enum zynqmp_pm_request_ack ack)
{
- return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
- qos, ack, NULL);
+ return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, NULL, 4, node, capabilities, qos, ack);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
@@ -1271,7 +1226,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
*/
int zynqmp_pm_release_node(const u32 node)
{
- return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, NULL, 1, node);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
@@ -1290,8 +1245,7 @@ int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- ret = zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
- IOCTL_GET_RPU_OPER_MODE, 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 2, node_id, IOCTL_GET_RPU_OPER_MODE);
/* only set rpu_mode if no error */
if (ret == XST_PM_SUCCESS)
@@ -1313,9 +1267,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_rpu_mode);
*/
int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
- IOCTL_SET_RPU_OPER_MODE, (u32)rpu_mode,
- 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_SET_RPU_OPER_MODE,
+ (u32)rpu_mode);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode);
@@ -1331,9 +1284,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode);
*/
int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
- IOCTL_TCM_COMB_CONFIG, (u32)tcm_mode, 0,
- NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_TCM_COMB_CONFIG,
+ (u32)tcm_mode);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
@@ -1348,7 +1300,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
int zynqmp_pm_force_pwrdwn(const u32 node,
const enum zynqmp_pm_request_ack ack)
{
- return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, node, ack, 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, NULL, 2, node, ack);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_force_pwrdwn);
@@ -1367,8 +1319,8 @@ int zynqmp_pm_request_wake(const u32 node,
const enum zynqmp_pm_request_ack ack)
{
/* set_addr flag is encoded into 1st bit of address */
- return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr,
- address >> 32, ack, NULL);
+ return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, NULL, 4, node, address | set_addr,
+ address >> 32, ack);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_request_wake);
@@ -1388,15 +1340,14 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack)
{
- return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
- qos, ack, NULL);
+ return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, NULL, 4, node, capabilities, qos, ack);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
/**
* zynqmp_pm_load_pdi - Load and process PDI
- * @src: Source device where PDI is located
- * @address: PDI src address
+ * @src: Source device where PDI is located
+ * @address: PDI src address
*
* This function provides support to load PDI from linux
*
@@ -1404,9 +1355,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
*/
int zynqmp_pm_load_pdi(const u32 src, const u64 address)
{
- return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src,
- lower_32_bits(address),
- upper_32_bits(address), 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_LOAD_PDI, NULL, 3, src, lower_32_bits(address),
+ upper_32_bits(address));
}
EXPORT_SYMBOL_GPL(zynqmp_pm_load_pdi);
@@ -1426,9 +1376,8 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
if (!out)
return -EINVAL;
- ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
- lower_32_bits(address),
- 0, 0, ret_payload);
+ ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, ret_payload, 2, upper_32_bits(address),
+ lower_32_bits(address));
*out = ret_payload[1];
return ret;
@@ -1456,8 +1405,7 @@ int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
u32 lower_addr = lower_32_bits(address);
u32 upper_addr = upper_32_bits(address);
- return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr,
- size, flags, NULL);
+ return zynqmp_pm_invoke_fn(PM_SECURE_SHA, NULL, 4, upper_addr, lower_addr, size, flags);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
@@ -1479,8 +1427,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
int zynqmp_pm_register_notifier(const u32 node, const u32 event,
const u32 wake, const u32 enable)
{
- return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
- wake, enable, NULL);
+ return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, NULL, 4, node, event, wake, enable);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier);
@@ -1493,8 +1440,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier);
*/
int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
{
- return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
- 0, 0, NULL);
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, NULL, 2, type, subtype);
}
/**
@@ -1506,8 +1452,7 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
*/
int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_FEATURE_CONFIG,
- id, value, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_FEATURE_CONFIG, id, value);
}
/**
@@ -1520,8 +1465,7 @@ int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value)
int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
u32 *payload)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_FEATURE_CONFIG,
- id, 0, payload);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, payload, 3, 0, IOCTL_GET_FEATURE_CONFIG, id);
}
/**
@@ -1534,8 +1478,7 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
*/
int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG,
- config, value, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node, IOCTL_SET_SD_CONFIG, config, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
@@ -1550,8 +1493,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
u32 value)
{
- return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG,
- config, value, NULL);
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node, IOCTL_SET_GEM_CONFIG, config, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config);
@@ -1916,7 +1858,6 @@ ATTRIBUTE_GROUPS(zynqmp_firmware);
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np;
struct zynqmp_devinfo *devinfo;
int ret;
@@ -1924,22 +1865,9 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
if (ret)
return ret;
- np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
- if (!np) {
- np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
- if (!np)
- return 0;
-
+ ret = do_feature_check_call(PM_FEATURE_CHECK);
+ if (ret >= 0 && ((ret & FIRMWARE_VERSION_MASK) >= PM_API_VERSION_1))
feature_check_enabled = true;
- }
-
- if (!feature_check_enabled) {
- ret = do_feature_check_call(PM_FEATURE_CHECK);
- if (ret >= 0)
- feature_check_enabled = true;
- }
-
- of_node_put(np);
devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL);
if (!devinfo)
@@ -1992,19 +1920,17 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
- np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
- if (np) {
+ if (pm_family_code == VERSAL_FAMILY_CODE) {
em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
-1, NULL, 0);
if (IS_ERR(em_dev))
dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n");
}
- of_node_put(np);
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
-static int zynqmp_firmware_remove(struct platform_device *pdev)
+static void zynqmp_firmware_remove(struct platform_device *pdev)
{
struct pm_api_feature_data *feature_data;
struct hlist_node *tmp;
@@ -2019,8 +1945,6 @@ static int zynqmp_firmware_remove(struct platform_device *pdev)
}
platform_device_unregister(em_dev);
-
- return 0;
}
static const struct of_device_id zynqmp_firmware_of_match[] = {
@@ -2037,6 +1961,6 @@ static struct platform_driver zynqmp_firmware_driver = {
.dev_groups = zynqmp_firmware_groups,
},
.probe = zynqmp_firmware_probe,
- .remove = zynqmp_firmware_remove,
+ .remove_new = zynqmp_firmware_remove,
};
module_platform_driver(zynqmp_firmware_driver);
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index 1fa2ccc32..6b60ca004 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -147,20 +147,18 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
return ret;
}
-static int alt_fpga_bridge_remove(struct platform_device *pdev)
+static void alt_fpga_bridge_remove(struct platform_device *pdev)
{
struct fpga_bridge *br = platform_get_drvdata(pdev);
fpga_bridge_unregister(br);
-
- return 0;
}
MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
static struct platform_driver altera_fpga_driver = {
.probe = alt_fpga_bridge_probe,
- .remove = alt_fpga_bridge_remove,
+ .remove_new = alt_fpga_bridge_remove,
.driver = {
.name = "altera_fpga2sdram_bridge",
.of_match_table = of_match_ptr(altera_fpga_of_match),
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index 0c3fb8226..44061cb16 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -253,18 +253,16 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
return 0;
}
-static int altera_freeze_br_remove(struct platform_device *pdev)
+static void altera_freeze_br_remove(struct platform_device *pdev)
{
struct fpga_bridge *br = platform_get_drvdata(pdev);
fpga_bridge_unregister(br);
-
- return 0;
}
static struct platform_driver altera_freeze_br_driver = {
.probe = altera_freeze_br_probe,
- .remove = altera_freeze_br_remove,
+ .remove_new = altera_freeze_br_remove,
.driver = {
.name = "altera_freeze_br",
.of_match_table = altera_freeze_br_of_match,
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 578663503..6f8e24be1 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -191,7 +191,7 @@ err:
return ret;
}
-static int alt_fpga_bridge_remove(struct platform_device *pdev)
+static void alt_fpga_bridge_remove(struct platform_device *pdev)
{
struct fpga_bridge *bridge = platform_get_drvdata(pdev);
struct altera_hps2fpga_data *priv = bridge->priv;
@@ -199,15 +199,13 @@ static int alt_fpga_bridge_remove(struct platform_device *pdev)
fpga_bridge_unregister(bridge);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
static struct platform_driver alt_fpga_bridge_driver = {
.probe = alt_fpga_bridge_probe,
- .remove = alt_fpga_bridge_remove,
+ .remove_new = alt_fpga_bridge_remove,
.driver = {
.name = "altera_hps2fpga_bridge",
.of_match_table = of_match_ptr(altera_fpga_of_match),
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 7f621e96d..c0a75ca36 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -932,15 +932,13 @@ exit:
return ret;
}
-static int afu_remove(struct platform_device *pdev)
+static void afu_remove(struct platform_device *pdev)
{
dev_dbg(&pdev->dev, "%s\n", __func__);
dfl_fpga_dev_ops_unregister(pdev);
dfl_fpga_dev_feature_uinit(pdev);
afu_dev_destroy(pdev);
-
- return 0;
}
static const struct attribute_group *afu_dev_groups[] = {
@@ -956,7 +954,7 @@ static struct platform_driver afu_driver = {
.dev_groups = afu_dev_groups,
},
.probe = afu_probe,
- .remove = afu_remove,
+ .remove_new = afu_remove,
};
static int __init afu_init(void)
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 808d1f4d7..0b01b3895 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -78,7 +78,7 @@ static int fme_br_probe(struct platform_device *pdev)
return 0;
}
-static int fme_br_remove(struct platform_device *pdev)
+static void fme_br_remove(struct platform_device *pdev)
{
struct fpga_bridge *br = platform_get_drvdata(pdev);
struct fme_br_priv *priv = br->priv;
@@ -89,8 +89,6 @@ static int fme_br_remove(struct platform_device *pdev)
put_device(&priv->port_pdev->dev);
if (priv->port_ops)
dfl_fpga_port_ops_put(priv->port_ops);
-
- return 0;
}
static struct platform_driver fme_br_driver = {
@@ -98,7 +96,7 @@ static struct platform_driver fme_br_driver = {
.name = DFL_FPGA_FME_BRIDGE,
},
.probe = fme_br_probe,
- .remove = fme_br_remove,
+ .remove_new = fme_br_remove,
};
module_platform_driver(fme_br_driver);
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 3dcf990bd..a2b5da009 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -730,13 +730,11 @@ exit:
return ret;
}
-static int fme_remove(struct platform_device *pdev)
+static void fme_remove(struct platform_device *pdev)
{
dfl_fpga_dev_ops_unregister(pdev);
dfl_fpga_dev_feature_uinit(pdev);
fme_dev_destroy(pdev);
-
- return 0;
}
static const struct attribute_group *fme_dev_groups[] = {
@@ -751,7 +749,7 @@ static struct platform_driver fme_driver = {
.dev_groups = fme_dev_groups,
},
.probe = fme_probe,
- .remove = fme_remove,
+ .remove_new = fme_remove,
};
module_platform_driver(fme_driver);
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 4aebde0a7..71616f8b4 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -61,15 +61,13 @@ eprobe_mgr_put:
return ret;
}
-static int fme_region_remove(struct platform_device *pdev)
+static void fme_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = platform_get_drvdata(pdev);
struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
fpga_mgr_put(mgr);
-
- return 0;
}
static struct platform_driver fme_region_driver = {
@@ -77,7 +75,7 @@ static struct platform_driver fme_region_driver = {
.name = DFL_FPGA_FME_REGION,
},
.probe = fme_region_probe,
- .remove = fme_region_remove,
+ .remove_new = fme_region_remove,
};
module_platform_driver(fme_region_driver);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index e73f88050..e6d12fbab 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -2008,8 +2008,8 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
(hdr.start + hdr.count < hdr.start))
return -EINVAL;
- fds = memdup_user((void __user *)(arg + sizeof(hdr)),
- array_size(hdr.count, sizeof(s32)));
+ fds = memdup_array_user((void __user *)(arg + sizeof(hdr)),
+ hdr.count, sizeof(s32));
if (IS_ERR(fds))
return PTR_ERR(fds);
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 31af2e08c..89851b133 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -730,15 +730,13 @@ fw_name_fail:
return ret;
}
-static int m10bmc_sec_remove(struct platform_device *pdev)
+static void m10bmc_sec_remove(struct platform_device *pdev)
{
struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev);
firmware_upload_unregister(sec->fwl);
kfree(sec->fw_name);
xa_erase(&fw_upload_xa, sec->fw_name_id);
-
- return 0;
}
static const struct platform_device_id intel_m10bmc_sec_ids[] = {
@@ -760,7 +758,7 @@ MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
static struct platform_driver intel_m10bmc_sec_driver = {
.probe = m10bmc_sec_probe,
- .remove = m10bmc_sec_remove,
+ .remove_new = m10bmc_sec_remove,
.driver = {
.name = "intel-m10bmc-sec-update",
.dev_groups = m10bmc_sec_attr_groups,
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index a6affd83f..8526a5a86 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -425,20 +425,18 @@ eprobe_mgr_put:
return ret;
}
-static int of_fpga_region_remove(struct platform_device *pdev)
+static void of_fpga_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = platform_get_drvdata(pdev);
struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
fpga_mgr_put(mgr);
-
- return 0;
}
static struct platform_driver of_fpga_region_driver = {
.probe = of_fpga_region_probe,
- .remove = of_fpga_region_remove,
+ .remove_new = of_fpga_region_remove,
.driver = {
.name = "of-fpga-region",
.of_match_table = of_match_ptr(fpga_region_of_match),
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index cc4861e34..4c03513b8 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -517,15 +517,13 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return 0;
}
-static int socfpga_a10_fpga_remove(struct platform_device *pdev)
+static void socfpga_a10_fpga_remove(struct platform_device *pdev)
{
struct fpga_manager *mgr = platform_get_drvdata(pdev);
struct a10_fpga_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static const struct of_device_id socfpga_a10_fpga_of_match[] = {
@@ -537,7 +535,7 @@ MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
static struct platform_driver socfpga_a10_fpga_driver = {
.probe = socfpga_a10_fpga_probe,
- .remove = socfpga_a10_fpga_remove,
+ .remove_new = socfpga_a10_fpga_remove,
.driver = {
.name = "socfpga_a10_fpga_manager",
.of_match_table = socfpga_a10_fpga_of_match,
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index cacb9cc57..2c0def7d7 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -436,15 +436,13 @@ probe_err:
return ret;
}
-static int s10_remove(struct platform_device *pdev)
+static void s10_remove(struct platform_device *pdev)
{
struct fpga_manager *mgr = platform_get_drvdata(pdev);
struct s10_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
stratix10_svc_free_channel(priv->chan);
-
- return 0;
}
static const struct of_device_id s10_of_match[] = {
@@ -457,7 +455,7 @@ MODULE_DEVICE_TABLE(of, s10_of_match);
static struct platform_driver s10_driver = {
.probe = s10_probe,
- .remove = s10_remove,
+ .remove_new = s10_remove,
.driver = {
.name = "Stratix10 SoC FPGA manager",
.of_match_table = of_match_ptr(s10_of_match),
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 68835896f..788dd2f63 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -150,7 +150,7 @@ err_clk:
return err;
}
-static int xlnx_pr_decoupler_remove(struct platform_device *pdev)
+static void xlnx_pr_decoupler_remove(struct platform_device *pdev)
{
struct fpga_bridge *bridge = platform_get_drvdata(pdev);
struct xlnx_pr_decoupler_data *p = bridge->priv;
@@ -158,13 +158,11 @@ static int xlnx_pr_decoupler_remove(struct platform_device *pdev)
fpga_bridge_unregister(bridge);
clk_unprepare(p->clk);
-
- return 0;
}
static struct platform_driver xlnx_pr_decoupler_driver = {
.probe = xlnx_pr_decoupler_probe,
- .remove = xlnx_pr_decoupler_remove,
+ .remove_new = xlnx_pr_decoupler_remove,
.driver = {
.name = "xlnx_pr_decoupler",
.of_match_table = xlnx_pr_decoupler_of_match,
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 96611d424..0ac93183d 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -618,7 +618,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
return 0;
}
-static int zynq_fpga_remove(struct platform_device *pdev)
+static void zynq_fpga_remove(struct platform_device *pdev)
{
struct zynq_fpga_priv *priv;
struct fpga_manager *mgr;
@@ -629,8 +629,6 @@ static int zynq_fpga_remove(struct platform_device *pdev)
fpga_mgr_unregister(mgr);
clk_unprepare(priv->clk);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -644,7 +642,7 @@ MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
static struct platform_driver zynq_fpga_driver = {
.probe = zynq_fpga_probe,
- .remove = zynq_fpga_remove,
+ .remove_new = zynq_fpga_remove,
.driver = {
.name = "zynq_fpga_manager",
.of_match_table = of_match_ptr(zynq_fpga_of_match),
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
index 5d8e9bfb2..baa956494 100644
--- a/drivers/gnss/serial.c
+++ b/drivers/gnss/serial.c
@@ -80,8 +80,8 @@ static const struct gnss_operations gnss_serial_gnss_ops = {
.write_raw = gnss_serial_write_raw,
};
-static int gnss_serial_receive_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t count)
+static ssize_t gnss_serial_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t count)
{
struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
struct gnss_device *gdev = gserial->gdev;
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index bcb53ccfe..6801a8fb2 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -160,8 +160,8 @@ static const struct gnss_operations sirf_gnss_ops = {
.write_raw = sirf_write_raw,
};
-static int sirf_receive_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t count)
+static ssize_t sirf_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t count)
{
struct sirf_data *data = serdev_device_get_drvdata(serdev);
struct gnss_device *gdev = data->gdev;
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
index c951be202..92402f608 100644
--- a/drivers/gnss/ubx.c
+++ b/drivers/gnss/ubx.c
@@ -7,6 +7,7 @@
#include <linux/errno.h>
#include <linux/gnss.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,7 +18,6 @@
#include "serial.h"
struct ubx_data {
- struct regulator *v_bckp;
struct regulator *vcc;
};
@@ -66,6 +66,7 @@ static const struct gnss_serial_ops ubx_gserial_ops = {
static int ubx_probe(struct serdev_device *serdev)
{
struct gnss_serial *gserial;
+ struct gpio_desc *reset;
struct ubx_data *data;
int ret;
@@ -87,30 +88,23 @@ static int ubx_probe(struct serdev_device *serdev)
goto err_free_gserial;
}
- data->v_bckp = devm_regulator_get_optional(&serdev->dev, "v-bckp");
- if (IS_ERR(data->v_bckp)) {
- ret = PTR_ERR(data->v_bckp);
- if (ret == -ENODEV)
- data->v_bckp = NULL;
- else
- goto err_free_gserial;
- }
+ ret = devm_regulator_get_enable_optional(&serdev->dev, "v-bckp");
+ if (ret < 0 && ret != -ENODEV)
+ goto err_free_gserial;
- if (data->v_bckp) {
- ret = regulator_enable(data->v_bckp);
- if (ret)
- goto err_free_gserial;
+ /* Deassert reset */
+ reset = devm_gpiod_get_optional(&serdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ goto err_free_gserial;
}
ret = gnss_serial_register(gserial);
if (ret)
- goto err_disable_v_bckp;
+ goto err_free_gserial;
return 0;
-err_disable_v_bckp:
- if (data->v_bckp)
- regulator_disable(data->v_bckp);
err_free_gserial:
gnss_serial_free(gserial);
@@ -120,11 +114,8 @@ err_free_gserial:
static void ubx_remove(struct serdev_device *serdev)
{
struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
- struct ubx_data *data = gnss_serial_get_drvdata(gserial);
gnss_serial_deregister(gserial);
- if (data->v_bckp)
- regulator_disable(data->v_bckp);
gnss_serial_free(gserial);
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 41b51536b..353af1a4d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -478,6 +478,13 @@ config GPIO_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
+config GPIO_NPCM_SGPIO
+ bool "Nuvoton SGPIO support"
+ depends on ARCH_NPCM || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to support Nuvoton NPCM7XX/NPCM8XX SGPIO functionality.
+
config GPIO_OCTEON
tristate "Cavium OCTEON GPIO"
depends on CAVIUM_OCTEON_SOC
@@ -553,6 +560,19 @@ config GPIO_ROCKCHIP
help
Say yes here to support GPIO on Rockchip SoCs.
+config GPIO_RTD
+ tristate "Realtek DHC GPIO support"
+ depends on ARCH_REALTEK
+ default y
+ select GPIOLIB_IRQCHIP
+ help
+ This option enables support for GPIOs found on Realtek DHC(Digital
+ Home Center) SoCs family, including RTD1295, RTD1315E, RTD1319,
+ RTD1319D, RTD1395, RTD1619 and RTD1619B.
+
+ Say yes here to support GPIO functionality and GPIO interrupt on
+ Realtek DHC SoCs.
+
config GPIO_SAMA5D2_PIOBU
tristate "SAMA5D2 PIOBU GPIO support"
depends on MFD_SYSCON
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index eb73b5d63..9e40af196 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -116,6 +116,7 @@ obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_NPCM_SGPIO) += gpio-npcm-sgpio.o
obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
@@ -137,6 +138,7 @@ obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_GPIO_REALTEK_OTTO) += gpio-realtek-otto.o
obj-$(CONFIG_GPIO_REG) += gpio-reg.o
obj-$(CONFIG_GPIO_ROCKCHIP) += gpio-rockchip.o
+obj-$(CONFIG_GPIO_RTD) += gpio-rtd.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 8c5933242..798235791 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -416,11 +416,12 @@ static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset,
{
u32 debounce;
- if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
- return -ENOTSUPP;
+ if (pinconf_to_config_param(config) == PIN_CONFIG_INPUT_DEBOUNCE) {
+ debounce = pinconf_to_config_argument(config);
+ return dwapb_gpio_set_debounce(gc, offset, debounce);
+ }
- debounce = pinconf_to_config_argument(config);
- return dwapb_gpio_set_debounce(gc, offset, debounce);
+ return gpiochip_generic_config(gc, offset, config);
}
static int dwapb_convert_irqs(struct dwapb_gpio_port_irqchip *pirq,
@@ -530,10 +531,14 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
port->gc.fwnode = pp->fwnode;
port->gc.ngpio = pp->ngpio;
port->gc.base = pp->gpio_base;
+ port->gc.request = gpiochip_generic_request;
+ port->gc.free = gpiochip_generic_free;
/* Only port A support debounce */
if (pp->idx == 0)
port->gc.set_config = dwapb_gpio_set_config;
+ else
+ port->gc.set_config = gpiochip_generic_config;
/* Only port A can provide interrupts in all configurations of the IP */
if (pp->idx == 0)
diff --git a/drivers/gpio/gpio-elkhartlake.c b/drivers/gpio/gpio-elkhartlake.c
index a9c8b1621..887c0fe99 100644
--- a/drivers/gpio/gpio-elkhartlake.c
+++ b/drivers/gpio/gpio-elkhartlake.c
@@ -55,18 +55,6 @@ static int ehl_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int ehl_gpio_suspend(struct device *dev)
-{
- return tng_gpio_suspend(dev);
-}
-
-static int ehl_gpio_resume(struct device *dev)
-{
- return tng_gpio_resume(dev);
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(ehl_gpio_pm_ops, ehl_gpio_suspend, ehl_gpio_resume);
-
static const struct platform_device_id ehl_gpio_ids[] = {
{ "gpio-elkhartlake" },
{ }
@@ -76,7 +64,7 @@ MODULE_DEVICE_TABLE(platform, ehl_gpio_ids);
static struct platform_driver ehl_gpio_driver = {
.driver = {
.name = "gpio-elkhartlake",
- .pm = pm_sleep_ptr(&ehl_gpio_pm_ops),
+ .pm = pm_sleep_ptr(&tng_gpio_pm_ops),
},
.probe = ehl_gpio_probe,
.id_table = ehl_gpio_ids,
diff --git a/drivers/gpio/gpio-en7523.c b/drivers/gpio/gpio-en7523.c
index f836a8db4..69834db2c 100644
--- a/drivers/gpio/gpio-en7523.c
+++ b/drivers/gpio/gpio-en7523.c
@@ -12,11 +12,11 @@
#define AIROHA_GPIO_MAX 32
/**
- * airoha_gpio_ctrl - Airoha GPIO driver data
+ * struct airoha_gpio_ctrl - Airoha GPIO driver data
* @gc: Associated gpio_chip instance.
* @data: The data register.
- * @dir0: The direction register for the lower 16 pins.
- * @dir1: The direction register for the higher 16 pins.
+ * @dir: [0] The direction register for the lower 16 pins.
+ * [1]: The direction register for the higher 16 pins.
* @output: The output enable register.
*/
struct airoha_gpio_ctrl {
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index dde6cf3a5..c5a9fa640 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -38,6 +38,18 @@
#define IXP4XX_GPIO_STYLE_MASK GENMASK(2, 0)
#define IXP4XX_GPIO_STYLE_SIZE 3
+/*
+ * Clock output control register defines.
+ */
+#define IXP4XX_GPCLK_CLK0DC_SHIFT 0
+#define IXP4XX_GPCLK_CLK0TC_SHIFT 4
+#define IXP4XX_GPCLK_CLK0_MASK GENMASK(7, 0)
+#define IXP4XX_GPCLK_MUX14 BIT(8)
+#define IXP4XX_GPCLK_CLK1DC_SHIFT 16
+#define IXP4XX_GPCLK_CLK1TC_SHIFT 20
+#define IXP4XX_GPCLK_CLK1_MASK GENMASK(23, 16)
+#define IXP4XX_GPCLK_MUX15 BIT(24)
+
/**
* struct ixp4xx_gpio - IXP4 GPIO state container
* @dev: containing device for this instance
@@ -202,6 +214,8 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
struct ixp4xx_gpio *g;
struct gpio_irq_chip *girq;
struct device_node *irq_parent;
+ bool clk_14, clk_15;
+ u32 val;
int ret;
g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
@@ -226,12 +240,47 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
g->fwnode = of_node_to_fwnode(np);
/*
+ * If either clock output is enabled explicitly in the device tree
+ * we take full control of the clock by masking off all bits for
+ * the clock control and selectively enabling them. Otherwise
+ * we leave the hardware default settings.
+ *
+ * Enable clock outputs with default timings of requested clock.
+ * If you need control over TC and DC, add these to the device
+ * tree bindings and use them here.
+ */
+ clk_14 = of_property_read_bool(np, "intel,ixp4xx-gpio14-clkout");
+ clk_15 = of_property_read_bool(np, "intel,ixp4xx-gpio15-clkout");
+
+ /*
* Make sure GPIO 14 and 15 are NOT used as clocks but GPIO on
* specific machines.
*/
if (of_machine_is_compatible("dlink,dsm-g600-a") ||
of_machine_is_compatible("iom,nas-100d"))
- __raw_writel(0x0, g->base + IXP4XX_REG_GPCLK);
+ val = 0;
+ else {
+ val = __raw_readl(g->base + IXP4XX_REG_GPCLK);
+
+ if (clk_14 || clk_15) {
+ val &= ~(IXP4XX_GPCLK_MUX14 | IXP4XX_GPCLK_MUX15);
+ val &= ~IXP4XX_GPCLK_CLK0_MASK;
+ val &= ~IXP4XX_GPCLK_CLK1_MASK;
+ if (clk_14) {
+ /* IXP4XX_GPCLK_CLK0DC implicit low */
+ val |= (1 << IXP4XX_GPCLK_CLK0TC_SHIFT);
+ val |= IXP4XX_GPCLK_MUX14;
+ }
+
+ if (clk_15) {
+ /* IXP4XX_GPCLK_CLK1DC implicit low */
+ val |= (1 << IXP4XX_GPCLK_CLK1TC_SHIFT);
+ val |= IXP4XX_GPCLK_MUX15;
+ }
+ }
+ }
+
+ __raw_writel(val, g->base + IXP4XX_REG_GPCLK);
/*
* This is a very special big-endian ARM issue: when the IXP4xx is
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index bb5cf14ae..701795b9d 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) 2006 Juergen Beisert, Pengutronix
* Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix
* Copyright (C) 2009 Wolfram Sang, Pengutronix
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 3ff0ea1e3..71e1af7c2 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -40,25 +40,22 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
* `.......````.```
*/
-#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
#include <linux/err.h>
-#include <linux/bug.h>
-#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/log2.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/log2.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
+
#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/platform_device.h>
-#include <linux/property.h>
-#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include "gpiolib.h"
@@ -688,7 +685,6 @@ static void __iomem *bgpio_map(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, r);
}
-#ifdef CONFIG_OF
static const struct of_device_id bgpio_of_match[] = {
{ .compatible = "brcm,bcm6345-gpio" },
{ .compatible = "wd,mbl-gpio" },
@@ -697,36 +693,27 @@ static const struct of_device_id bgpio_of_match[] = {
};
MODULE_DEVICE_TABLE(of, bgpio_of_match);
-static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev,
- unsigned long *flags)
+static struct bgpio_pdata *bgpio_parse_fw(struct device *dev, unsigned long *flags)
{
struct bgpio_pdata *pdata;
- if (!pdev->dev.of_node)
+ if (!dev_fwnode(dev))
return NULL;
- pdata = devm_kzalloc(&pdev->dev, sizeof(struct bgpio_pdata),
- GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
pdata->base = -1;
- if (of_device_is_big_endian(pdev->dev.of_node))
+ if (device_is_big_endian(dev))
*flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
- if (of_property_read_bool(pdev->dev.of_node, "no-output"))
+ if (device_property_read_bool(dev, "no-output"))
*flags |= BGPIOF_NO_OUTPUT;
return pdata;
}
-#else
-static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev,
- unsigned long *flags)
-{
- return NULL;
-}
-#endif /* CONFIG_OF */
static int bgpio_pdev_probe(struct platform_device *pdev)
{
@@ -743,7 +730,7 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
struct gpio_chip *gc;
struct bgpio_pdata *pdata;
- pdata = bgpio_parse_dt(pdev, &flags);
+ pdata = bgpio_parse_fw(dev, &flags);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
@@ -814,7 +801,7 @@ MODULE_DEVICE_TABLE(platform, bgpio_id_table);
static struct platform_driver bgpio_driver = {
.driver = {
.name = "basic-mmio-gpio",
- .of_match_table = of_match_ptr(bgpio_of_match),
+ .of_match_table = bgpio_of_match,
},
.id_table = bgpio_id_table,
.probe = bgpio_pdev_probe,
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 4870e267a..455eecf63 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -354,7 +354,6 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
static void gpio_mockup_debugfs_setup(struct device *dev,
struct gpio_mockup_chip *chip)
{
- struct device *child __free(put_device) = NULL;
struct gpio_mockup_dbgfs_private *priv;
struct gpio_chip *gc;
const char *devname;
@@ -367,7 +366,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
* There can only be a single GPIO device per platform device in
* gpio-mockup so using device_find_any_child() is OK.
*/
- child = device_find_any_child(dev);
+ struct device *child __free(put_device) = device_find_any_child(dev);
if (!child)
return;
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
new file mode 100644
index 000000000..d31788b43
--- /dev/null
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NPCM Serial GPIO Driver
+ *
+ * Copyright (C) 2021 Nuvoton Technologies
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/units.h>
+
+#define MAX_NR_HW_SGPIO 64
+
+#define NPCM_IOXCFG1 0x2A
+#define NPCM_IOXCFG1_SFT_CLK GENMASK(3, 0)
+#define NPCM_IOXCFG1_SCLK_POL BIT(4)
+#define NPCM_IOXCFG1_LDSH_POL BIT(5)
+
+#define NPCM_IOXCTS 0x28
+#define NPCM_IOXCTS_IOXIF_EN BIT(7)
+#define NPCM_IOXCTS_RD_MODE GENMASK(2, 1)
+#define NPCM_IOXCTS_RD_MODE_PERIODIC BIT(2)
+
+#define NPCM_IOXCFG2 0x2B
+#define NPCM_IOXCFG2_PORT GENMASK(3, 0)
+
+#define NPCM_IXOEVCFG_MASK GENMASK(1, 0)
+#define NPCM_IXOEVCFG_FALLING BIT(1)
+#define NPCM_IXOEVCFG_RISING BIT(0)
+#define NPCM_IXOEVCFG_BOTH (NPCM_IXOEVCFG_FALLING | NPCM_IXOEVCFG_RISING)
+
+#define NPCM_CLK_MHZ (8 * HZ_PER_MHZ)
+#define NPCM_750_OPT 6
+#define NPCM_845_OPT 5
+
+#define GPIO_BANK(x) ((x) / 8)
+#define GPIO_BIT(x) ((x) % 8)
+
+/*
+ * Select the frequency of shift clock.
+ * The shift clock is a division of the APB clock.
+ */
+struct npcm_clk_cfg {
+ unsigned int *sft_clk;
+ unsigned int *clk_sel;
+ unsigned int cfg_opt;
+};
+
+struct npcm_sgpio {
+ struct gpio_chip chip;
+ struct clk *pclk;
+ struct irq_chip intc;
+ raw_spinlock_t lock;
+
+ void __iomem *base;
+ int irq;
+ u8 nin_sgpio;
+ u8 nout_sgpio;
+ u8 in_port;
+ u8 out_port;
+ u8 int_type[MAX_NR_HW_SGPIO];
+};
+
+struct npcm_sgpio_bank {
+ u8 rdata_reg;
+ u8 wdata_reg;
+ u8 event_config;
+ u8 event_status;
+};
+
+enum npcm_sgpio_reg {
+ READ_DATA,
+ WRITE_DATA,
+ EVENT_CFG,
+ EVENT_STS,
+};
+
+static const struct npcm_sgpio_bank npcm_sgpio_banks[] = {
+ {
+ .wdata_reg = 0x00,
+ .rdata_reg = 0x08,
+ .event_config = 0x10,
+ .event_status = 0x20,
+ },
+ {
+ .wdata_reg = 0x01,
+ .rdata_reg = 0x09,
+ .event_config = 0x12,
+ .event_status = 0x21,
+ },
+ {
+ .wdata_reg = 0x02,
+ .rdata_reg = 0x0a,
+ .event_config = 0x14,
+ .event_status = 0x22,
+ },
+ {
+ .wdata_reg = 0x03,
+ .rdata_reg = 0x0b,
+ .event_config = 0x16,
+ .event_status = 0x23,
+ },
+ {
+ .wdata_reg = 0x04,
+ .rdata_reg = 0x0c,
+ .event_config = 0x18,
+ .event_status = 0x24,
+ },
+ {
+ .wdata_reg = 0x05,
+ .rdata_reg = 0x0d,
+ .event_config = 0x1a,
+ .event_status = 0x25,
+ },
+ {
+ .wdata_reg = 0x06,
+ .rdata_reg = 0x0e,
+ .event_config = 0x1c,
+ .event_status = 0x26,
+ },
+ {
+ .wdata_reg = 0x07,
+ .rdata_reg = 0x0f,
+ .event_config = 0x1e,
+ .event_status = 0x27,
+ },
+};
+
+static void __iomem *bank_reg(struct npcm_sgpio *gpio,
+ const struct npcm_sgpio_bank *bank,
+ const enum npcm_sgpio_reg reg)
+{
+ switch (reg) {
+ case READ_DATA:
+ return gpio->base + bank->rdata_reg;
+ case WRITE_DATA:
+ return gpio->base + bank->wdata_reg;
+ case EVENT_CFG:
+ return gpio->base + bank->event_config;
+ case EVENT_STS:
+ return gpio->base + bank->event_status;
+ default:
+ /* actually if code runs to here, it's an error case */
+ dev_WARN(gpio->chip.parent, "Getting here is an error condition");
+ return NULL;
+ }
+}
+
+static const struct npcm_sgpio_bank *offset_to_bank(unsigned int offset)
+{
+ unsigned int bank = GPIO_BANK(offset);
+
+ return &npcm_sgpio_banks[bank];
+}
+
+static void npcm_sgpio_irqd_to_data(struct irq_data *d,
+ struct npcm_sgpio **gpio,
+ const struct npcm_sgpio_bank **bank,
+ u8 *bit, unsigned int *offset)
+{
+ struct npcm_sgpio *internal;
+
+ *offset = irqd_to_hwirq(d);
+ internal = irq_data_get_irq_chip_data(d);
+
+ *gpio = internal;
+ *offset -= internal->nout_sgpio;
+ *bank = offset_to_bank(*offset);
+ *bit = GPIO_BIT(*offset);
+}
+
+static int npcm_sgpio_init_port(struct npcm_sgpio *gpio)
+{
+ u8 in_port, out_port, set_port, reg;
+
+ in_port = GPIO_BANK(gpio->nin_sgpio);
+ if (GPIO_BIT(gpio->nin_sgpio) > 0)
+ in_port += 1;
+
+ out_port = GPIO_BANK(gpio->nout_sgpio);
+ if (GPIO_BIT(gpio->nout_sgpio) > 0)
+ out_port += 1;
+
+ gpio->in_port = in_port;
+ gpio->out_port = out_port;
+ set_port = (out_port & NPCM_IOXCFG2_PORT) << 4 |
+ (in_port & NPCM_IOXCFG2_PORT);
+ iowrite8(set_port, gpio->base + NPCM_IOXCFG2);
+
+ reg = ioread8(gpio->base + NPCM_IOXCFG2);
+
+ return reg == set_port ? 0 : -EINVAL;
+
+}
+
+static int npcm_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
+{
+ struct npcm_sgpio *gpio = gpiochip_get_data(gc);
+
+ return offset < gpio->nout_sgpio ? -EINVAL : 0;
+
+}
+
+static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ gc->set(gc, offset, val);
+
+ return 0;
+}
+
+static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct npcm_sgpio *gpio = gpiochip_get_data(gc);
+
+ if (offset < gpio->nout_sgpio)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ struct npcm_sgpio *gpio = gpiochip_get_data(gc);
+ const struct npcm_sgpio_bank *bank = offset_to_bank(offset);
+ void __iomem *addr;
+ u8 reg = 0;
+
+ addr = bank_reg(gpio, bank, WRITE_DATA);
+ reg = ioread8(addr);
+
+ if (val)
+ reg |= BIT(GPIO_BIT(offset));
+ else
+ reg &= ~BIT(GPIO_BIT(offset));
+
+ iowrite8(reg, addr);
+}
+
+static int npcm_sgpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct npcm_sgpio *gpio = gpiochip_get_data(gc);
+ const struct npcm_sgpio_bank *bank;
+ void __iomem *addr;
+ u8 reg;
+
+ if (offset < gpio->nout_sgpio) {
+ bank = offset_to_bank(offset);
+ addr = bank_reg(gpio, bank, WRITE_DATA);
+ } else {
+ offset -= gpio->nout_sgpio;
+ bank = offset_to_bank(offset);
+ addr = bank_reg(gpio, bank, READ_DATA);
+ }
+
+ reg = ioread8(addr);
+
+ return !!(reg & BIT(GPIO_BIT(offset)));
+}
+
+static void npcm_sgpio_setup_enable(struct npcm_sgpio *gpio, bool enable)
+{
+ u8 reg;
+
+ reg = ioread8(gpio->base + NPCM_IOXCTS);
+ reg = (reg & ~NPCM_IOXCTS_RD_MODE) | NPCM_IOXCTS_RD_MODE_PERIODIC;
+
+ if (enable)
+ reg |= NPCM_IOXCTS_IOXIF_EN;
+ else
+ reg &= ~NPCM_IOXCTS_IOXIF_EN;
+
+ iowrite8(reg, gpio->base + NPCM_IOXCTS);
+}
+
+static int npcm_sgpio_setup_clk(struct npcm_sgpio *gpio,
+ const struct npcm_clk_cfg *clk_cfg)
+{
+ unsigned long apb_freq;
+ u32 val;
+ u8 tmp;
+ int i;
+
+ apb_freq = clk_get_rate(gpio->pclk);
+ tmp = ioread8(gpio->base + NPCM_IOXCFG1) & ~NPCM_IOXCFG1_SFT_CLK;
+
+ for (i = clk_cfg->cfg_opt-1; i > 0; i--) {
+ val = apb_freq / clk_cfg->sft_clk[i];
+ if (NPCM_CLK_MHZ > val) {
+ iowrite8(clk_cfg->clk_sel[i] | tmp,
+ gpio->base + NPCM_IOXCFG1);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void npcm_sgpio_irq_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct npcm_sgpio *gpio = gpiochip_get_data(gc);
+
+ /* input GPIOs in the high range */
+ bitmap_set(valid_mask, gpio->nout_sgpio, gpio->nin_sgpio);
+ bitmap_clear(valid_mask, 0, gpio->nout_sgpio);
+}
+
+static void npcm_sgpio_irq_set_mask(struct irq_data *d, bool set)
+{
+ const struct npcm_sgpio_bank *bank;
+ struct npcm_sgpio *gpio;
+ unsigned long flags;
+ void __iomem *addr;
+ unsigned int offset;
+ u16 reg, type;
+ u8 bit;
+
+ npcm_sgpio_irqd_to_data(d, &gpio, &bank, &bit, &offset);
+ addr = bank_reg(gpio, bank, EVENT_CFG);
+
+ reg = ioread16(addr);
+ if (set) {
+ reg &= ~(NPCM_IXOEVCFG_MASK << (bit * 2));
+ } else {
+ type = gpio->int_type[offset];
+ reg |= (type << (bit * 2));
+ }
+
+ raw_spin_lock_irqsave(&gpio->lock, flags);
+
+ npcm_sgpio_setup_enable(gpio, false);
+
+ iowrite16(reg, addr);
+
+ npcm_sgpio_setup_enable(gpio, true);
+
+ addr = bank_reg(gpio, bank, EVENT_STS);
+ reg = ioread8(addr);
+ reg |= BIT(bit);
+ iowrite8(reg, addr);
+
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void npcm_sgpio_irq_ack(struct irq_data *d)
+{
+ const struct npcm_sgpio_bank *bank;
+ struct npcm_sgpio *gpio;
+ unsigned long flags;
+ void __iomem *status_addr;
+ unsigned int offset;
+ u8 bit;
+
+ npcm_sgpio_irqd_to_data(d, &gpio, &bank, &bit, &offset);
+ status_addr = bank_reg(gpio, bank, EVENT_STS);
+ raw_spin_lock_irqsave(&gpio->lock, flags);
+ iowrite8(BIT(bit), status_addr);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
+}
+
+static void npcm_sgpio_irq_mask(struct irq_data *d)
+{
+ npcm_sgpio_irq_set_mask(d, true);
+}
+
+static void npcm_sgpio_irq_unmask(struct irq_data *d)
+{
+ npcm_sgpio_irq_set_mask(d, false);
+}
+
+static int npcm_sgpio_set_type(struct irq_data *d, unsigned int type)
+{
+ const struct npcm_sgpio_bank *bank;
+ irq_flow_handler_t handler;
+ struct npcm_sgpio *gpio;
+ unsigned long flags;
+ void __iomem *addr;
+ unsigned int offset;
+ u16 reg, val;
+ u8 bit;
+
+ npcm_sgpio_irqd_to_data(d, &gpio, &bank, &bit, &offset);
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = NPCM_IXOEVCFG_BOTH;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = NPCM_IXOEVCFG_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ val = NPCM_IXOEVCFG_FALLING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ handler = handle_level_irq;
+ else
+ handler = handle_edge_irq;
+
+ gpio->int_type[offset] = val;
+
+ raw_spin_lock_irqsave(&gpio->lock, flags);
+ npcm_sgpio_setup_enable(gpio, false);
+ addr = bank_reg(gpio, bank, EVENT_CFG);
+ reg = ioread16(addr);
+
+ reg |= (val << (bit * 2));
+
+ iowrite16(reg, addr);
+ npcm_sgpio_setup_enable(gpio, true);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
+ irq_set_handler_locked(d, handler);
+
+ return 0;
+}
+
+static void npcm_sgpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ struct npcm_sgpio *gpio = gpiochip_get_data(gc);
+ unsigned int i, j, girq;
+ unsigned long reg;
+
+ chained_irq_enter(ic, desc);
+
+ for (i = 0; i < ARRAY_SIZE(npcm_sgpio_banks); i++) {
+ const struct npcm_sgpio_bank *bank = &npcm_sgpio_banks[i];
+
+ reg = ioread8(bank_reg(gpio, bank, EVENT_STS));
+ for_each_set_bit(j, &reg, 8) {
+ girq = irq_find_mapping(gc->irq.domain,
+ i * 8 + gpio->nout_sgpio + j);
+ generic_handle_domain_irq(gc->irq.domain, girq);
+ }
+ }
+
+ chained_irq_exit(ic, desc);
+}
+
+static const struct irq_chip sgpio_irq_chip = {
+ .name = "sgpio-irq",
+ .irq_ack = npcm_sgpio_irq_ack,
+ .irq_mask = npcm_sgpio_irq_mask,
+ .irq_unmask = npcm_sgpio_irq_unmask,
+ .irq_set_type = npcm_sgpio_set_type,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int npcm_sgpio_setup_irqs(struct npcm_sgpio *gpio,
+ struct platform_device *pdev)
+{
+ int rc, i;
+ struct gpio_irq_chip *irq;
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
+ return rc;
+
+ gpio->irq = rc;
+
+ npcm_sgpio_setup_enable(gpio, false);
+
+ /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
+ for (i = 0; i < ARRAY_SIZE(npcm_sgpio_banks); i++) {
+ const struct npcm_sgpio_bank *bank = &npcm_sgpio_banks[i];
+
+ iowrite16(0, bank_reg(gpio, bank, EVENT_CFG));
+ iowrite8(0xff, bank_reg(gpio, bank, EVENT_STS));
+ }
+
+ irq = &gpio->chip.irq;
+ gpio_irq_chip_set_chip(irq, &sgpio_irq_chip);
+ irq->init_valid_mask = npcm_sgpio_irq_init_valid_mask;
+ irq->handler = handle_bad_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->parent_handler = npcm_sgpio_irq_handler;
+ irq->parent_handler_data = gpio;
+ irq->parents = &gpio->irq;
+ irq->num_parents = 1;
+
+ return 0;
+}
+
+static int npcm_sgpio_probe(struct platform_device *pdev)
+{
+ struct npcm_sgpio *gpio;
+ const struct npcm_clk_cfg *clk_cfg;
+ int rc;
+ u32 nin_gpios, nout_gpios;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
+
+ clk_cfg = device_get_match_data(&pdev->dev);
+ if (!clk_cfg)
+ return -EINVAL;
+
+ rc = device_property_read_u32(&pdev->dev, "nuvoton,input-ngpios",
+ &nin_gpios);
+ if (rc < 0)
+ return dev_err_probe(&pdev->dev, rc, "Could not read ngpios property\n");
+
+ rc = device_property_read_u32(&pdev->dev, "nuvoton,output-ngpios",
+ &nout_gpios);
+ if (rc < 0)
+ return dev_err_probe(&pdev->dev, rc, "Could not read ngpios property\n");
+
+ gpio->nin_sgpio = nin_gpios;
+ gpio->nout_sgpio = nout_gpios;
+ if (gpio->nin_sgpio > MAX_NR_HW_SGPIO ||
+ gpio->nout_sgpio > MAX_NR_HW_SGPIO)
+ return dev_err_probe(&pdev->dev, -EINVAL, "Number of GPIOs exceeds the maximum of %d: input: %d output: %d\n", MAX_NR_HW_SGPIO, nin_gpios, nout_gpios);
+
+ gpio->pclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gpio->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpio->pclk), "Could not get pclk\n");
+
+ rc = npcm_sgpio_setup_clk(gpio, clk_cfg);
+ if (rc < 0)
+ return dev_err_probe(&pdev->dev, rc, "Failed to setup clock\n");
+
+ raw_spin_lock_init(&gpio->lock);
+ gpio->chip.parent = &pdev->dev;
+ gpio->chip.ngpio = gpio->nin_sgpio + gpio->nout_sgpio;
+ gpio->chip.direction_input = npcm_sgpio_dir_in;
+ gpio->chip.direction_output = npcm_sgpio_dir_out;
+ gpio->chip.get_direction = npcm_sgpio_get_direction;
+ gpio->chip.get = npcm_sgpio_get;
+ gpio->chip.set = npcm_sgpio_set;
+ gpio->chip.label = dev_name(&pdev->dev);
+ gpio->chip.base = -1;
+
+ rc = npcm_sgpio_init_port(gpio);
+ if (rc < 0)
+ return rc;
+
+ rc = npcm_sgpio_setup_irqs(gpio, pdev);
+ if (rc < 0)
+ return rc;
+
+ rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "GPIO registering failed\n");
+
+ npcm_sgpio_setup_enable(gpio, true);
+
+ return 0;
+}
+
+static unsigned int npcm750_SFT_CLK[NPCM_750_OPT] = {
+ 1024, 32, 8, 4, 3, 2,
+};
+
+static unsigned int npcm750_CLK_SEL[NPCM_750_OPT] = {
+ 0x00, 0x05, 0x07, 0x0C, 0x0D, 0x0E,
+};
+
+static unsigned int npcm845_SFT_CLK[NPCM_845_OPT] = {
+ 1024, 32, 16, 8, 4,
+};
+
+static unsigned int npcm845_CLK_SEL[NPCM_845_OPT] = {
+ 0x00, 0x05, 0x06, 0x07, 0x0C,
+};
+
+static struct npcm_clk_cfg npcm750_sgpio_pdata = {
+ .sft_clk = npcm750_SFT_CLK,
+ .clk_sel = npcm750_CLK_SEL,
+ .cfg_opt = NPCM_750_OPT,
+};
+
+static const struct npcm_clk_cfg npcm845_sgpio_pdata = {
+ .sft_clk = npcm845_SFT_CLK,
+ .clk_sel = npcm845_CLK_SEL,
+ .cfg_opt = NPCM_845_OPT,
+};
+
+static const struct of_device_id npcm_sgpio_of_table[] = {
+ { .compatible = "nuvoton,npcm750-sgpio", .data = &npcm750_sgpio_pdata, },
+ { .compatible = "nuvoton,npcm845-sgpio", .data = &npcm845_sgpio_pdata, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, npcm_sgpio_of_table);
+
+static struct platform_driver npcm_sgpio_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = npcm_sgpio_of_table,
+ },
+ .probe = npcm_sgpio_probe,
+};
+module_platform_driver(npcm_sgpio_driver);
+
+MODULE_AUTHOR("Jim Liu <jjliu0@nuvoton.com>");
+MODULE_AUTHOR("Joseph Liu <kwliu@nuvoton.com>");
+MODULE_DESCRIPTION("Nuvoton NPCM Serial GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index 01c0fd0a9..d9b228bea 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -151,8 +151,8 @@ static void sprd_pmic_eic_irq_mask(struct irq_data *data)
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
u32 offset = irqd_to_hwirq(data);
- pmic_eic->reg[REG_IE] = 0;
- pmic_eic->reg[REG_TRIG] = 0;
+ pmic_eic->reg[REG_IE] &= ~BIT(offset);
+ pmic_eic->reg[REG_TRIG] &= ~BIT(offset);
gpiochip_disable_irq(chip, offset);
}
@@ -165,8 +165,8 @@ static void sprd_pmic_eic_irq_unmask(struct irq_data *data)
gpiochip_enable_irq(chip, offset);
- pmic_eic->reg[REG_IE] = 1;
- pmic_eic->reg[REG_TRIG] = 1;
+ pmic_eic->reg[REG_IE] |= BIT(offset);
+ pmic_eic->reg[REG_TRIG] |= BIT(offset);
}
static int sprd_pmic_eic_irq_set_type(struct irq_data *data,
@@ -174,13 +174,14 @@ static int sprd_pmic_eic_irq_set_type(struct irq_data *data,
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
+ u32 offset = irqd_to_hwirq(data);
switch (flow_type) {
case IRQ_TYPE_LEVEL_HIGH:
- pmic_eic->reg[REG_IEV] = 1;
+ pmic_eic->reg[REG_IEV] |= BIT(offset);
break;
case IRQ_TYPE_LEVEL_LOW:
- pmic_eic->reg[REG_IEV] = 0;
+ pmic_eic->reg[REG_IEV] &= ~BIT(offset);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
@@ -222,15 +223,15 @@ static void sprd_pmic_eic_bus_sync_unlock(struct irq_data *data)
sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IEV, 1);
} else {
sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IEV,
- pmic_eic->reg[REG_IEV]);
+ !!(pmic_eic->reg[REG_IEV] & BIT(offset)));
}
/* Set irq unmask */
sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_IE,
- pmic_eic->reg[REG_IE]);
+ !!(pmic_eic->reg[REG_IE] & BIT(offset)));
/* Generate trigger start pulse for debounce EIC */
sprd_pmic_eic_update(chip, offset, SPRD_PMIC_EIC_TRIG,
- pmic_eic->reg[REG_TRIG]);
+ !!(pmic_eic->reg[REG_TRIG] & BIT(offset)));
mutex_unlock(&pmic_eic->buslock);
}
diff --git a/drivers/gpio/gpio-rtd.c b/drivers/gpio/gpio-rtd.c
new file mode 100644
index 000000000..bf7f008f5
--- /dev/null
+++ b/drivers/gpio/gpio-rtd.c
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Realtek DHC gpio driver
+ *
+ * Copyright (c) 2023 Realtek Semiconductor Corp.
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define RTD_GPIO_DEBOUNCE_1US 0
+#define RTD_GPIO_DEBOUNCE_10US 1
+#define RTD_GPIO_DEBOUNCE_100US 2
+#define RTD_GPIO_DEBOUNCE_1MS 3
+#define RTD_GPIO_DEBOUNCE_10MS 4
+#define RTD_GPIO_DEBOUNCE_20MS 5
+#define RTD_GPIO_DEBOUNCE_30MS 6
+
+/**
+ * struct rtd_gpio_info - Specific GPIO register information
+ * @name: GPIO device name
+ * @gpio_base: GPIO base number
+ * @num_gpios: The number of GPIOs
+ * @dir_offset: Offset for GPIO direction registers
+ * @dato_offset: Offset for GPIO data output registers
+ * @dati_offset: Offset for GPIO data input registers
+ * @ie_offset: Offset for GPIO interrupt enable registers
+ * @dp_offset: Offset for GPIO detection polarity registers
+ * @gpa_offset: Offset for GPIO assert interrupt status registers
+ * @gpda_offset: Offset for GPIO deassert interrupt status registers
+ * @deb_offset: Offset for GPIO debounce registers
+ * @deb_val: Register values representing the GPIO debounce time
+ * @get_deb_setval: Used to get the corresponding value for setting the debounce register
+ */
+struct rtd_gpio_info {
+ const char *name;
+ unsigned int gpio_base;
+ unsigned int num_gpios;
+ u8 *dir_offset;
+ u8 *dato_offset;
+ u8 *dati_offset;
+ u8 *ie_offset;
+ u8 *dp_offset;
+ u8 *gpa_offset;
+ u8 *gpda_offset;
+ u8 *deb_offset;
+ u8 *deb_val;
+ u8 (*get_deb_setval)(const struct rtd_gpio_info *info,
+ unsigned int offset, u8 deb_index,
+ u8 *reg_offset, u8 *shift);
+};
+
+struct rtd_gpio {
+ struct gpio_chip gpio_chip;
+ const struct rtd_gpio_info *info;
+ void __iomem *base;
+ void __iomem *irq_base;
+ unsigned int irqs[2];
+ raw_spinlock_t lock;
+};
+
+static u8 rtd_gpio_get_deb_setval(const struct rtd_gpio_info *info, unsigned int offset,
+ u8 deb_index, u8 *reg_offset, u8 *shift)
+{
+ *reg_offset = info->deb_offset[offset / 8];
+ *shift = (offset % 8) * 4;
+ return info->deb_val[deb_index];
+}
+
+static u8 rtd1295_misc_gpio_get_deb_setval(const struct rtd_gpio_info *info, unsigned int offset,
+ u8 deb_index, u8 *reg_offset, u8 *shift)
+{
+ *reg_offset = info->deb_offset[0];
+ *shift = (offset % 8) * 4;
+ return info->deb_val[deb_index];
+}
+
+static u8 rtd1295_iso_gpio_get_deb_setval(const struct rtd_gpio_info *info, unsigned int offset,
+ u8 deb_index, u8 *reg_offset, u8 *shift)
+{
+ *reg_offset = info->deb_offset[0];
+ *shift = 0;
+ return info->deb_val[deb_index];
+}
+
+static const struct rtd_gpio_info rtd_iso_gpio_info = {
+ .name = "rtd_iso_gpio",
+ .gpio_base = 0,
+ .num_gpios = 82,
+ .dir_offset = (u8 []){ 0x0, 0x18, 0x2c },
+ .dato_offset = (u8 []){ 0x4, 0x1c, 0x30 },
+ .dati_offset = (u8 []){ 0x8, 0x20, 0x34 },
+ .ie_offset = (u8 []){ 0xc, 0x24, 0x38 },
+ .dp_offset = (u8 []){ 0x10, 0x28, 0x3c },
+ .gpa_offset = (u8 []){ 0x8, 0xe0, 0x90 },
+ .gpda_offset = (u8 []){ 0xc, 0xe4, 0x94 },
+ .deb_offset = (u8 []){ 0x44, 0x48, 0x4c, 0x50, 0x54, 0x58, 0x5c,
+ 0x60, 0x64, 0x68, 0x6c },
+ .deb_val = (u8 []){ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6 },
+ .get_deb_setval = rtd_gpio_get_deb_setval,
+};
+
+static const struct rtd_gpio_info rtd1619_iso_gpio_info = {
+ .name = "rtd1619_iso_gpio",
+ .gpio_base = 0,
+ .num_gpios = 86,
+ .dir_offset = (u8 []){ 0x0, 0x18, 0x2c },
+ .dato_offset = (u8 []){ 0x4, 0x1c, 0x30 },
+ .dati_offset = (u8 []){ 0x8, 0x20, 0x34 },
+ .ie_offset = (u8 []){ 0xc, 0x24, 0x38 },
+ .dp_offset = (u8 []){ 0x10, 0x28, 0x3c },
+ .gpa_offset = (u8 []){ 0x8, 0xe0, 0x90 },
+ .gpda_offset = (u8 []){ 0xc, 0xe4, 0x94 },
+ .deb_offset = (u8 []){ 0x44, 0x48, 0x4c, 0x50, 0x54, 0x58, 0x5c,
+ 0x60, 0x64, 0x68, 0x6c },
+ .deb_val = (u8 []){ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6 },
+ .get_deb_setval = rtd_gpio_get_deb_setval,
+};
+
+static const struct rtd_gpio_info rtd1395_iso_gpio_info = {
+ .name = "rtd1395_iso_gpio",
+ .gpio_base = 0,
+ .num_gpios = 57,
+ .dir_offset = (u8 []){ 0x0, 0x18 },
+ .dato_offset = (u8 []){ 0x4, 0x1c },
+ .dati_offset = (u8 []){ 0x8, 0x20 },
+ .ie_offset = (u8 []){ 0xc, 0x24 },
+ .dp_offset = (u8 []){ 0x10, 0x28 },
+ .gpa_offset = (u8 []){ 0x8, 0xe0 },
+ .gpda_offset = (u8 []){ 0xc, 0xe4 },
+ .deb_offset = (u8 []){ 0x30, 0x34, 0x38, 0x3c, 0x40, 0x44, 0x48, 0x4c },
+ .deb_val = (u8 []){ 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6 },
+ .get_deb_setval = rtd_gpio_get_deb_setval,
+};
+
+static const struct rtd_gpio_info rtd1295_misc_gpio_info = {
+ .name = "rtd1295_misc_gpio",
+ .gpio_base = 0,
+ .num_gpios = 101,
+ .dir_offset = (u8 []){ 0x0, 0x4, 0x8, 0xc },
+ .dato_offset = (u8 []){ 0x10, 0x14, 0x18, 0x1c },
+ .dati_offset = (u8 []){ 0x20, 0x24, 0x28, 0x2c },
+ .ie_offset = (u8 []){ 0x30, 0x34, 0x38, 0x3c },
+ .dp_offset = (u8 []){ 0x40, 0x44, 0x48, 0x4c },
+ .gpa_offset = (u8 []){ 0x40, 0x44, 0xa4, 0xb8 },
+ .gpda_offset = (u8 []){ 0x54, 0x58, 0xa8, 0xbc},
+ .deb_offset = (u8 []){ 0x50 },
+ .deb_val = (u8 []){ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 },
+ .get_deb_setval = rtd1295_misc_gpio_get_deb_setval,
+};
+
+static const struct rtd_gpio_info rtd1295_iso_gpio_info = {
+ .name = "rtd1295_iso_gpio",
+ .gpio_base = 101,
+ .num_gpios = 35,
+ .dir_offset = (u8 []){ 0x0, 0x18 },
+ .dato_offset = (u8 []){ 0x4, 0x1c },
+ .dati_offset = (u8 []){ 0x8, 0x20 },
+ .ie_offset = (u8 []){ 0xc, 0x24 },
+ .dp_offset = (u8 []){ 0x10, 0x28 },
+ .gpa_offset = (u8 []){ 0x8, 0xe0 },
+ .gpda_offset = (u8 []){ 0xc, 0xe4 },
+ .deb_offset = (u8 []){ 0x14 },
+ .deb_val = (u8 []){ 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 },
+ .get_deb_setval = rtd1295_iso_gpio_get_deb_setval,
+};
+
+static int rtd_gpio_dir_offset(struct rtd_gpio *data, unsigned int offset)
+{
+ return data->info->dir_offset[offset / 32];
+}
+
+static int rtd_gpio_dato_offset(struct rtd_gpio *data, unsigned int offset)
+{
+ return data->info->dato_offset[offset / 32];
+}
+
+static int rtd_gpio_dati_offset(struct rtd_gpio *data, unsigned int offset)
+{
+ return data->info->dati_offset[offset / 32];
+}
+
+static int rtd_gpio_ie_offset(struct rtd_gpio *data, unsigned int offset)
+{
+ return data->info->ie_offset[offset / 32];
+}
+
+static int rtd_gpio_dp_offset(struct rtd_gpio *data, unsigned int offset)
+{
+ return data->info->dp_offset[offset / 32];
+}
+
+
+static int rtd_gpio_gpa_offset(struct rtd_gpio *data, unsigned int offset)
+{
+ /* Each GPIO assert interrupt status register contains 31 GPIOs. */
+ return data->info->gpa_offset[offset / 31];
+}
+
+static int rtd_gpio_gpda_offset(struct rtd_gpio *data, unsigned int offset)
+{
+ /* Each GPIO deassert interrupt status register contains 31 GPIOs. */
+ return data->info->gpda_offset[offset / 31];
+}
+
+static int rtd_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
+ unsigned int debounce)
+{
+ struct rtd_gpio *data = gpiochip_get_data(chip);
+ u8 deb_val, deb_index, reg_offset, shift;
+ unsigned int write_en;
+ u32 val;
+
+ switch (debounce) {
+ case 1:
+ deb_index = RTD_GPIO_DEBOUNCE_1US;
+ break;
+ case 10:
+ deb_index = RTD_GPIO_DEBOUNCE_10US;
+ break;
+ case 100:
+ deb_index = RTD_GPIO_DEBOUNCE_100US;
+ break;
+ case 1000:
+ deb_index = RTD_GPIO_DEBOUNCE_1MS;
+ break;
+ case 10000:
+ deb_index = RTD_GPIO_DEBOUNCE_10MS;
+ break;
+ case 20000:
+ deb_index = RTD_GPIO_DEBOUNCE_20MS;
+ break;
+ case 30000:
+ deb_index = RTD_GPIO_DEBOUNCE_30MS;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ deb_val = data->info->get_deb_setval(data->info, offset, deb_index, &reg_offset, &shift);
+ write_en = BIT(shift + 3);
+ val = (deb_val << shift) | write_en;
+
+ guard(raw_spinlock_irqsave)(&data->lock);
+ writel_relaxed(val, data->base + reg_offset);
+
+ return 0;
+}
+
+static int rtd_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ int debounce;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return gpiochip_generic_config(chip, offset, config);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ debounce = pinconf_to_config_argument(config);
+ return rtd_gpio_set_debounce(chip, offset, debounce);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct rtd_gpio *data = gpiochip_get_data(chip);
+ u32 mask = BIT(offset % 32);
+ int dato_reg_offset;
+ u32 val;
+
+ dato_reg_offset = rtd_gpio_dato_offset(data, offset);
+
+ guard(raw_spinlock_irqsave)(&data->lock);
+
+ val = readl_relaxed(data->base + dato_reg_offset);
+ if (value)
+ val |= mask;
+ else
+ val &= ~mask;
+ writel_relaxed(val, data->base + dato_reg_offset);
+}
+
+static int rtd_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rtd_gpio *data = gpiochip_get_data(chip);
+ int dato_reg_offset = rtd_gpio_dato_offset(data, offset);
+ int dati_reg_offset = rtd_gpio_dati_offset(data, offset);
+ int dir_reg_offset = rtd_gpio_dir_offset(data, offset);
+ int dat_reg_offset;
+ u32 val;
+
+ guard(raw_spinlock_irqsave)(&data->lock);
+
+ val = readl_relaxed(data->base + dir_reg_offset);
+ dat_reg_offset = (val & BIT(offset % 32)) ? dato_reg_offset : dati_reg_offset;
+ val = readl_relaxed(data->base + dat_reg_offset);
+
+ return !!(val & BIT(offset % 32));
+}
+
+static int rtd_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rtd_gpio *data = gpiochip_get_data(chip);
+ int reg_offset;
+ u32 val;
+
+ reg_offset = rtd_gpio_dir_offset(data, offset);
+ val = readl_relaxed(data->base + reg_offset);
+ if (val & BIT(offset % 32))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int rtd_gpio_set_direction(struct gpio_chip *chip, unsigned int offset, bool out)
+{
+ struct rtd_gpio *data = gpiochip_get_data(chip);
+ u32 mask = BIT(offset % 32);
+ int reg_offset;
+ u32 val;
+
+ reg_offset = rtd_gpio_dir_offset(data, offset);
+
+ guard(raw_spinlock_irqsave)(&data->lock);
+
+ val = readl_relaxed(data->base + reg_offset);
+ if (out)
+ val |= mask;
+ else
+ val &= ~mask;
+ writel_relaxed(val, data->base + reg_offset);
+
+ return 0;
+}
+
+static int rtd_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return rtd_gpio_set_direction(chip, offset, false);
+}
+
+static int rtd_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ rtd_gpio_set(chip, offset, value);
+
+ return rtd_gpio_set_direction(chip, offset, true);
+}
+
+static bool rtd_gpio_check_ie(struct rtd_gpio *data, int irq)
+{
+ int mask = BIT(irq % 32);
+ int ie_reg_offset;
+ u32 enable;
+
+ ie_reg_offset = rtd_gpio_ie_offset(data, irq);
+ enable = readl_relaxed(data->base + ie_reg_offset);
+
+ return enable & mask;
+}
+
+static void rtd_gpio_irq_handle(struct irq_desc *desc)
+{
+ int (*get_reg_offset)(struct rtd_gpio *gpio, unsigned int offset);
+ struct rtd_gpio *data = irq_desc_get_handler_data(desc);
+ struct irq_domain *domain = data->gpio_chip.irq.domain;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
+ unsigned long status;
+ int reg_offset, i, j;
+ unsigned int hwirq;
+
+ if (irq == data->irqs[0])
+ get_reg_offset = &rtd_gpio_gpa_offset;
+ else if (irq == data->irqs[1])
+ get_reg_offset = &rtd_gpio_gpda_offset;
+
+ chained_irq_enter(chip, desc);
+
+ /* Each GPIO interrupt status register contains 31 GPIOs. */
+ for (i = 0; i < data->info->num_gpios; i += 31) {
+ reg_offset = get_reg_offset(data, i);
+
+ /*
+ * Bit 0 is the write_en bit, bit 0 to 31 corresponds to 31 GPIOs.
+ * When bit 0 is set to 0, write 1 to the other bits to clear the status.
+ * When bit 0 is set to 1, write 1 to the other bits to set the status.
+ */
+ status = readl_relaxed(data->irq_base + reg_offset);
+ status &= ~BIT(0);
+ writel_relaxed(status, data->irq_base + reg_offset);
+
+ for_each_set_bit(j, &status, 32) {
+ hwirq = i + j - 1;
+ if (rtd_gpio_check_ie(data, hwirq)) {
+ int girq = irq_find_mapping(domain, hwirq);
+ u32 irq_type = irq_get_trigger_type(girq);
+
+ if ((irq == data->irqs[1]) && (irq_type != IRQ_TYPE_EDGE_BOTH))
+ break;
+ generic_handle_domain_irq(domain, hwirq);
+ }
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void rtd_gpio_enable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct rtd_gpio *data = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ /* Bit 0 is write_en and bit 1 to 31 is correspond to 31 GPIOs. */
+ u32 clr_mask = BIT(hwirq % 31) << 1;
+
+ u32 ie_mask = BIT(hwirq % 32);
+ int gpda_reg_offset;
+ int gpa_reg_offset;
+ int ie_reg_offset;
+ u32 val;
+
+ ie_reg_offset = rtd_gpio_ie_offset(data, hwirq);
+ gpa_reg_offset = rtd_gpio_gpa_offset(data, hwirq);
+ gpda_reg_offset = rtd_gpio_gpda_offset(data, hwirq);
+
+ gpiochip_enable_irq(gc, hwirq);
+
+ guard(raw_spinlock_irqsave)(&data->lock);
+
+ writel_relaxed(clr_mask, data->irq_base + gpa_reg_offset);
+ writel_relaxed(clr_mask, data->irq_base + gpda_reg_offset);
+
+ val = readl_relaxed(data->base + ie_reg_offset);
+ val |= ie_mask;
+ writel_relaxed(val, data->base + ie_reg_offset);
+}
+
+static void rtd_gpio_disable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct rtd_gpio *data = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 ie_mask = BIT(hwirq % 32);
+ int ie_reg_offset;
+ u32 val;
+
+ ie_reg_offset = rtd_gpio_ie_offset(data, hwirq);
+
+ scoped_guard(raw_spinlock_irqsave, &data->lock) {
+ val = readl_relaxed(data->base + ie_reg_offset);
+ val &= ~ie_mask;
+ writel_relaxed(val, data->base + ie_reg_offset);
+ }
+
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static int rtd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct rtd_gpio *data = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 mask = BIT(hwirq % 32);
+ int dp_reg_offset;
+ bool polarity;
+ u32 val;
+
+ dp_reg_offset = rtd_gpio_dp_offset(data, hwirq);
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ polarity = 1;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ polarity = 0;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ polarity = 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ scoped_guard(raw_spinlock_irqsave, &data->lock) {
+ val = readl_relaxed(data->base + dp_reg_offset);
+ if (polarity)
+ val |= mask;
+ else
+ val &= ~mask;
+ writel_relaxed(val, data->base + dp_reg_offset);
+ }
+
+ irq_set_handler_locked(d, handle_simple_irq);
+
+ return 0;
+}
+
+static const struct irq_chip rtd_gpio_irq_chip = {
+ .name = "rtd-gpio",
+ .irq_enable = rtd_gpio_enable_irq,
+ .irq_disable = rtd_gpio_disable_irq,
+ .irq_set_type = rtd_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+};
+
+static int rtd_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *irq_chip;
+ struct rtd_gpio *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ data->irqs[0] = ret;
+
+ ret = platform_get_irq(pdev, 1);
+ if (ret < 0)
+ return ret;
+ data->irqs[1] = ret;
+
+ data->info = device_get_match_data(dev);
+ if (!data->info)
+ return -EINVAL;
+
+ raw_spin_lock_init(&data->lock);
+
+ data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->irq_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(data->irq_base))
+ return PTR_ERR(data->irq_base);
+
+ data->gpio_chip.label = dev_name(dev);
+ data->gpio_chip.base = -1;
+ data->gpio_chip.ngpio = data->info->num_gpios;
+ data->gpio_chip.request = gpiochip_generic_request;
+ data->gpio_chip.free = gpiochip_generic_free;
+ data->gpio_chip.get_direction = rtd_gpio_get_direction;
+ data->gpio_chip.direction_input = rtd_gpio_direction_input;
+ data->gpio_chip.direction_output = rtd_gpio_direction_output;
+ data->gpio_chip.set = rtd_gpio_set;
+ data->gpio_chip.get = rtd_gpio_get;
+ data->gpio_chip.set_config = rtd_gpio_set_config;
+ data->gpio_chip.parent = dev;
+
+ irq_chip = &data->gpio_chip.irq;
+ irq_chip->handler = handle_bad_irq;
+ irq_chip->default_type = IRQ_TYPE_NONE;
+ irq_chip->parent_handler = rtd_gpio_irq_handle;
+ irq_chip->parent_handler_data = data;
+ irq_chip->num_parents = 2;
+ irq_chip->parents = data->irqs;
+
+ gpio_irq_chip_set_chip(irq_chip, &rtd_gpio_irq_chip);
+
+ return devm_gpiochip_add_data(dev, &data->gpio_chip, data);
+}
+
+static const struct of_device_id rtd_gpio_of_matches[] = {
+ { .compatible = "realtek,rtd1295-misc-gpio", .data = &rtd1295_misc_gpio_info },
+ { .compatible = "realtek,rtd1295-iso-gpio", .data = &rtd1295_iso_gpio_info },
+ { .compatible = "realtek,rtd1395-iso-gpio", .data = &rtd1395_iso_gpio_info },
+ { .compatible = "realtek,rtd1619-iso-gpio", .data = &rtd1619_iso_gpio_info },
+ { .compatible = "realtek,rtd1319-iso-gpio", .data = &rtd_iso_gpio_info },
+ { .compatible = "realtek,rtd1619b-iso-gpio", .data = &rtd_iso_gpio_info },
+ { .compatible = "realtek,rtd1319d-iso-gpio", .data = &rtd_iso_gpio_info },
+ { .compatible = "realtek,rtd1315e-iso-gpio", .data = &rtd_iso_gpio_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rtd_gpio_of_matches);
+
+static struct platform_driver rtd_gpio_platform_driver = {
+ .driver = {
+ .name = "gpio-rtd",
+ .of_match_table = rtd_gpio_of_matches,
+ },
+ .probe = rtd_gpio_probe,
+};
+module_platform_driver(rtd_gpio_platform_driver);
+
+MODULE_DESCRIPTION("Realtek DHC SoC gpio driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 8decd9b5d..067c8edb6 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -250,7 +250,6 @@ static int sifive_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- platform_set_drvdata(pdev, chip);
return gpiochip_add_data(&chip->gc, chip);
}
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 192820949..c4106e37e 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq_sim.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
@@ -28,6 +29,7 @@
#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
@@ -224,6 +226,25 @@ static void gpio_sim_free(struct gpio_chip *gc, unsigned int offset)
}
}
+static void gpio_sim_dbg_show(struct seq_file *seq, struct gpio_chip *gc)
+{
+ struct gpio_sim_chip *chip = gpiochip_get_data(gc);
+ const char *label;
+ int i;
+
+ guard(mutex)(&chip->lock);
+
+ for_each_requested_gpio(gc, i, label)
+ seq_printf(seq, " gpio-%-3d (%s) %s,%s\n",
+ gc->base + i,
+ label,
+ test_bit(i, chip->direction_map) ? "input" :
+ test_bit(i, chip->value_map) ? "output-high" :
+ "output-low",
+ test_bit(i, chip->pull_map) ? "pull-up" :
+ "pull-down");
+}
+
static ssize_t gpio_sim_sysfs_val_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -460,6 +481,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->to_irq = gpio_sim_to_irq;
gc->request = gpio_sim_request;
gc->free = gpio_sim_free;
+ gc->dbg_show = PTR_IF(IS_ENABLED(CONFIG_DEBUG_FS), gpio_sim_dbg_show);
gc->can_sleep = true;
ret = devm_gpiochip_add_data(dev, gc, chip);
@@ -1546,6 +1568,6 @@ static void __exit gpio_sim_exit(void)
}
module_exit(gpio_sim_exit);
-MODULE_AUTHOR("Bartosz Golaszewski <brgl@bgdev.pl");
+MODULE_AUTHOR("Bartosz Golaszewski <brgl@bgdev.pl>");
MODULE_DESCRIPTION("GPIO Simulator Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 27cc4da53..6c5ee81d7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -5,6 +5,7 @@
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -255,7 +256,6 @@ static void stmpe_dbg_show_one(struct seq_file *s,
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
struct stmpe *stmpe = stmpe_gpio->stmpe;
- const char *label = gpiochip_is_requested(gc, offset);
bool val = !!stmpe_gpio_get(gc, offset);
u8 bank = offset / 8;
u8 dir_reg = stmpe->regs[STMPE_IDX_GPDR_LSB + bank];
@@ -263,6 +263,10 @@ static void stmpe_dbg_show_one(struct seq_file *s,
int ret;
u8 dir;
+ char *label __free(kfree) = gpiochip_dup_line_label(gc, offset);
+ if (IS_ERR(label))
+ return;
+
ret = stmpe_reg_read(stmpe, dir_reg);
if (ret < 0)
return;
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index 7ce3eddae..4b29abafe 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -10,6 +10,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/export.h>
@@ -19,6 +20,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pm.h>
#include <linux/spinlock.h>
#include <linux/string_helpers.h>
#include <linux/types.h>
@@ -91,37 +93,31 @@ static int tng_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct tng_gpio *priv = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *reg;
u8 shift;
reg = gpio_reg_and_bit(chip, offset, value ? GPSR : GPCR, &shift);
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
writel(BIT(shift), reg);
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
}
static int tng_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct tng_gpio *priv = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *gpdr;
u32 value;
u8 shift;
gpdr = gpio_reg_and_bit(chip, offset, GPDR, &shift);
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
value = readl(gpdr);
value &= ~BIT(shift);
writel(value, gpdr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
@@ -129,21 +125,18 @@ static int tng_gpio_direction_output(struct gpio_chip *chip, unsigned int offset
int value)
{
struct tng_gpio *priv = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *gpdr;
u8 shift;
gpdr = gpio_reg_and_bit(chip, offset, GPDR, &shift);
tng_gpio_set(chip, offset, value);
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
value = readl(gpdr);
value |= BIT(shift);
writel(value, gpdr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
@@ -164,14 +157,13 @@ static int tng_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
unsigned int debounce)
{
struct tng_gpio *priv = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *gfbr;
u32 value;
u8 shift;
gfbr = gpio_reg_and_bit(chip, offset, GFBR, &shift);
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
value = readl(gfbr);
if (debounce)
@@ -180,8 +172,6 @@ static int tng_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
value |= BIT(shift);
writel(value, gfbr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
@@ -205,29 +195,28 @@ static int tng_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static void tng_irq_ack(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
- unsigned long flags;
void __iomem *gisr;
u8 shift;
gisr = gpio_reg_and_bit(&priv->chip, gpio, GISR, &shift);
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
+
writel(BIT(shift), gisr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
}
static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
{
- unsigned long flags;
void __iomem *gimr;
u32 value;
u8 shift;
gimr = gpio_reg_and_bit(&priv->chip, gpio, GIMR, &shift);
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
value = readl(gimr);
if (unmask)
@@ -235,13 +224,12 @@ static void tng_irq_unmask_mask(struct tng_gpio *priv, u32 gpio, bool unmask)
else
value &= ~BIT(shift);
writel(value, gimr);
-
- raw_spin_unlock_irqrestore(&priv->lock, flags);
}
static void tng_irq_mask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
tng_irq_unmask_mask(priv, gpio, false);
@@ -250,7 +238,8 @@ static void tng_irq_mask(struct irq_data *d)
static void tng_irq_unmask(struct irq_data *d)
{
- struct tng_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct tng_gpio *priv = gpiochip_get_data(gc);
irq_hw_number_t gpio = irqd_to_hwirq(d);
gpiochip_enable_irq(&priv->chip, gpio);
@@ -267,10 +256,9 @@ static int tng_irq_set_type(struct irq_data *d, unsigned int type)
void __iomem *gitr = gpio_reg(&priv->chip, gpio, GITR);
void __iomem *glpr = gpio_reg(&priv->chip, gpio, GLPR);
u8 shift = gpio % 32;
- unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
value = readl(grer);
if (type & IRQ_TYPE_EDGE_RISING)
@@ -311,8 +299,6 @@ static int tng_irq_set_type(struct irq_data *d, unsigned int type)
irq_set_handler_locked(d, handle_edge_irq);
}
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
@@ -324,10 +310,11 @@ static int tng_irq_set_wake(struct irq_data *d, unsigned int on)
void __iomem *gwmr = gpio_reg(&priv->chip, gpio, priv->wake_regs.gwmr);
void __iomem *gwsr = gpio_reg(&priv->chip, gpio, priv->wake_regs.gwsr);
u8 shift = gpio % 32;
- unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&priv->lock, flags);
+ dev_dbg(priv->dev, "%s wake for gpio %lu\n", str_enable_disable(on), gpio);
+
+ guard(raw_spinlock_irqsave)(&priv->lock);
/* Clear the existing wake status */
writel(BIT(shift), gwsr);
@@ -339,9 +326,6 @@ static int tng_irq_set_wake(struct irq_data *d, unsigned int on)
value &= ~BIT(shift);
writel(value, gwmr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
- dev_dbg(priv->dev, "%s wake for gpio %lu\n", str_enable_disable(on), gpio);
return 0;
}
@@ -477,14 +461,13 @@ int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio)
}
EXPORT_SYMBOL_NS_GPL(devm_tng_gpio_probe, GPIO_TANGIER);
-int tng_gpio_suspend(struct device *dev)
+static int tng_gpio_suspend(struct device *dev)
{
struct tng_gpio *priv = dev_get_drvdata(dev);
struct tng_gpio_context *ctx = priv->ctx;
- unsigned long flags;
unsigned int base;
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
for (base = 0; base < priv->chip.ngpio; base += 32, ctx++) {
/* GPLR is RO, values read will be restored using GPSR */
@@ -498,20 +481,16 @@ int tng_gpio_suspend(struct device *dev)
ctx->gwmr = readl(gpio_reg(&priv->chip, base, priv->wake_regs.gwmr));
}
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
-EXPORT_SYMBOL_NS_GPL(tng_gpio_suspend, GPIO_TANGIER);
-int tng_gpio_resume(struct device *dev)
+static int tng_gpio_resume(struct device *dev)
{
struct tng_gpio *priv = dev_get_drvdata(dev);
struct tng_gpio_context *ctx = priv->ctx;
- unsigned long flags;
unsigned int base;
- raw_spin_lock_irqsave(&priv->lock, flags);
+ guard(raw_spinlock_irqsave)(&priv->lock);
for (base = 0; base < priv->chip.ngpio; base += 32, ctx++) {
/* GPLR is RO, values read will be restored using GPSR */
@@ -525,11 +504,10 @@ int tng_gpio_resume(struct device *dev)
writel(ctx->gwmr, gpio_reg(&priv->chip, base, priv->wake_regs.gwmr));
}
- raw_spin_unlock_irqrestore(&priv->lock, flags);
-
return 0;
}
-EXPORT_SYMBOL_NS_GPL(tng_gpio_resume, GPIO_TANGIER);
+
+EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(tng_gpio_pm_ops, tng_gpio_suspend, tng_gpio_resume, GPIO_TANGIER);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
diff --git a/drivers/gpio/gpio-tangier.h b/drivers/gpio/gpio-tangier.h
index 16c4f2290..ca7ab6cf6 100644
--- a/drivers/gpio/gpio-tangier.h
+++ b/drivers/gpio/gpio-tangier.h
@@ -13,6 +13,7 @@
#define _GPIO_TANGIER_H_
#include <linux/gpio/driver.h>
+#include <linux/pm.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
@@ -111,7 +112,6 @@ struct tng_gpio {
int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio);
-int tng_gpio_suspend(struct device *dev);
-int tng_gpio_resume(struct device *dev);
+extern const struct dev_pm_ops tng_gpio_pm_ops;
#endif /* _GPIO_TANGIER_H_ */
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index d87dd06db..9130c691a 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -36,12 +36,6 @@
#define TEGRA186_GPIO_SCR_SEC_REN BIT(27)
#define TEGRA186_GPIO_SCR_SEC_G1W BIT(9)
#define TEGRA186_GPIO_SCR_SEC_G1R BIT(1)
-#define TEGRA186_GPIO_FULL_ACCESS (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN | \
- TEGRA186_GPIO_SCR_SEC_G1R | \
- TEGRA186_GPIO_SCR_SEC_G1W)
-#define TEGRA186_GPIO_SCR_SEC_ENABLE (TEGRA186_GPIO_SCR_SEC_WEN | \
- TEGRA186_GPIO_SCR_SEC_REN)
/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
@@ -177,10 +171,18 @@ static inline bool tegra186_gpio_is_accessible(struct tegra_gpio *gpio, unsigned
value = __raw_readl(secure + TEGRA186_GPIO_SCR);
- if ((value & TEGRA186_GPIO_SCR_SEC_ENABLE) == 0)
- return true;
+ /*
+ * When SCR_SEC_[R|W]EN is unset, then we have full read/write access to all the
+ * registers for given GPIO pin.
+ * When SCR_SEC[R|W]EN is set, then there is need to further check the accompanying
+ * SCR_SEC_G1[R|W] bit to determine read/write access to all the registers for given
+ * GPIO pin.
+ */
- if ((value & TEGRA186_GPIO_FULL_ACCESS) == TEGRA186_GPIO_FULL_ACCESS)
+ if (((value & TEGRA186_GPIO_SCR_SEC_REN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_REN) && (value & TEGRA186_GPIO_SCR_SEC_G1R))) &&
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) == 0 ||
+ ((value & TEGRA186_GPIO_SCR_SEC_WEN) && (value & TEGRA186_GPIO_SCR_SEC_G1W))))
return true;
return false;
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index 7b38aa360..cd1f17041 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -96,16 +96,16 @@ static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int off
* Below can be used for test purpose only.
*/
- if (IS_ENABLED(CONFIG_DEBUG_GPIO)) {
- int ret = regmap_update_bits(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG,
- TPS65219_GPIO0_DIR_MASK, direction);
- if (ret) {
- dev_err(dev,
- "GPIO DEBUG enabled: Fail to change direction to %u for GPIO%d.\n",
- direction, offset);
- return ret;
- }
+#if 0
+ int ret = regmap_update_bits(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG,
+ TPS65219_GPIO0_DIR_MASK, direction);
+ if (ret) {
+ dev_err(dev,
+ "GPIO DEBUG enabled: Fail to change direction to %u for GPIO%d.\n",
+ direction, offset);
+ return ret;
}
+#endif
dev_err(dev,
"GPIO%d direction set by NVM, change to %u failed, not allowed by specification\n",
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 7eaf8a286..f7d5120ff 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -160,18 +161,21 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
for (i = 0; i < chip->ngpio; i++) {
int gpio = i + chip->base;
int reg;
- const char *label, *pull, *powerdomain;
+ const char *pull, *powerdomain;
/* We report the GPIO even if it's not requested since
* we're also reporting things like alternate
* functions which apply even when the GPIO is not in
* use as a GPIO.
*/
- label = gpiochip_is_requested(chip, i);
- if (!label)
- label = "Unrequested";
+ char *label __free(kfree) = gpiochip_dup_line_label(chip, i);
+ if (IS_ERR(label)) {
+ dev_err(wm831x->dev, "Failed to duplicate label\n");
+ continue;
+ }
- seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label);
+ seq_printf(s, " gpio-%-3d (%-20.20s) ",
+ gpio, label ?: "Unrequested");
reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i);
if (reg < 0) {
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index f4a474cef..bf05c9b58 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -193,18 +194,20 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
for (i = 0; i < chip->ngpio; i++) {
int gpio = i + chip->base;
int reg;
- const char *label;
/* We report the GPIO even if it's not requested since
* we're also reporting things like alternate
* functions which apply even when the GPIO is not in
* use as a GPIO.
*/
- label = gpiochip_is_requested(chip, i);
- if (!label)
- label = "Unrequested";
+ char *label __free(kfree) = gpiochip_dup_line_label(chip, i);
+ if (IS_ERR(label)) {
+ dev_err(wm8994->dev, "Failed to duplicate label\n");
+ continue;
+ }
- seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label);
+ seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio,
+ label ?: "Unrequested");
reg = wm8994_reg_read(wm8994, WM8994_GPIO_1 + i);
if (reg < 0) {
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 823198368..7348df385 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -52,7 +52,6 @@
* @dir: GPIO direction shadow register
* @gpio_lock: Lock used for synchronization
* @irq: IRQ used by GPIO device
- * @irqchip: IRQ chip
* @enable: GPIO IRQ enable/disable bitfield
* @rising_edge: GPIO IRQ rising edge enable/disable bitfield
* @falling_edge: GPIO IRQ falling edge enable/disable bitfield
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f713d1ef7..1438fdca0 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -5,6 +5,7 @@
#include <linux/bitmap.h>
#include <linux/build_bug.h>
#include <linux/cdev.h>
+#include <linux/cleanup.h>
#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/device.h>
@@ -19,8 +20,11 @@
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/overflow.h>
#include <linux/pinctrl/consumer.h>
#include <linux/poll.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/timekeeping.h>
@@ -62,45 +66,6 @@ typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
typedef ssize_t (*read_fn)(struct file *, char __user *,
size_t count, loff_t *);
-static __poll_t call_poll_locked(struct file *file,
- struct poll_table_struct *wait,
- struct gpio_device *gdev, poll_fn func)
-{
- __poll_t ret;
-
- down_read(&gdev->sem);
- ret = func(file, wait);
- up_read(&gdev->sem);
-
- return ret;
-}
-
-static long call_ioctl_locked(struct file *file, unsigned int cmd,
- unsigned long arg, struct gpio_device *gdev,
- ioctl_fn func)
-{
- long ret;
-
- down_read(&gdev->sem);
- ret = func(file, cmd, arg);
- up_read(&gdev->sem);
-
- return ret;
-}
-
-static ssize_t call_read_locked(struct file *file, char __user *buf,
- size_t count, loff_t *f_ps,
- struct gpio_device *gdev, read_fn func)
-{
- ssize_t ret;
-
- down_read(&gdev->sem);
- ret = func(file, buf, count, f_ps);
- up_read(&gdev->sem);
-
- return ret;
-}
-
/*
* GPIO line handle management
*/
@@ -235,8 +200,8 @@ static long linehandle_set_config(struct linehandle_state *lh,
return 0;
}
-static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long linehandle_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct linehandle_state *lh = file->private_data;
void __user *ip = (void __user *)arg;
@@ -245,6 +210,8 @@ static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
unsigned int i;
int ret;
+ guard(rwsem_read)(&lh->gdev->sem);
+
if (!lh->gdev->chip)
return -ENODEV;
@@ -294,15 +261,6 @@ static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
}
}
-static long linehandle_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct linehandle_state *lh = file->private_data;
-
- return call_ioctl_locked(file, cmd, arg, lh->gdev,
- linehandle_ioctl_unlocked);
-}
-
#ifdef CONFIG_COMPAT
static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -461,6 +419,7 @@ out_free_lh:
/**
* struct line - contains the state of a requested line
+ * @node: to store the object in supinfo_tree if supplemental
* @desc: the GPIO descriptor for this line.
* @req: the corresponding line request
* @irq: the interrupt triggered in response to events on this GPIO
@@ -473,6 +432,7 @@ out_free_lh:
* @line_seqno: the seqno for the current edge event in the sequence of
* events for this line.
* @work: the worker that implements software debouncing
+ * @debounce_period_us: the debounce period in microseconds
* @sw_debounced: flag indicating if the software debouncer is active
* @level: the current debounced physical level of the line
* @hdesc: the Hardware Timestamp Engine (HTE) descriptor
@@ -481,6 +441,7 @@ out_free_lh:
* @last_seqno: the last sequence number before debounce period expires
*/
struct line {
+ struct rb_node node;
struct gpio_desc *desc;
/*
* -- edge detector specific fields --
@@ -515,6 +476,15 @@ struct line {
*/
struct delayed_work work;
/*
+ * debounce_period_us is accessed by debounce_irq_handler() and
+ * process_hw_ts() which are disabled when modified by
+ * debounce_setup(), edge_detector_setup() or edge_detector_stop()
+ * or can live with a stale version when updated by
+ * edge_detector_update().
+ * The modifying functions are themselves mutually exclusive.
+ */
+ unsigned int debounce_period_us;
+ /*
* sw_debounce is accessed by linereq_set_config(), which is the
* only setter, and linereq_get_values(), which can live with a
* slightly stale value.
@@ -546,6 +516,17 @@ struct line {
#endif /* CONFIG_HTE */
};
+/*
+ * a rbtree of the struct lines containing supplemental info.
+ * Used to populate gpio_v2_line_info with cdev specific fields not contained
+ * in the struct gpio_desc.
+ * A line is determined to contain supplemental information by
+ * line_has_supinfo().
+ */
+static struct rb_root supinfo_tree = RB_ROOT;
+/* covers supinfo_tree */
+static DEFINE_SPINLOCK(supinfo_lock);
+
/**
* struct linereq - contains the state of a userspace line request
* @gdev: the GPIO device the line request pertains to
@@ -559,7 +540,8 @@ struct line {
* this line request. Note that this is not used when @num_lines is 1, as
* the line_seqno is then the same and is cheaper to calculate.
* @config_mutex: mutex for serializing ioctl() calls to ensure consistency
- * of configuration, particularly multi-step accesses to desc flags.
+ * of configuration, particularly multi-step accesses to desc flags and
+ * changes to supinfo status.
* @lines: the lines held by this line request, with @num_lines elements.
*/
struct linereq {
@@ -575,6 +557,103 @@ struct linereq {
struct line lines[] __counted_by(num_lines);
};
+static void supinfo_insert(struct line *line)
+{
+ struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL;
+ struct line *entry;
+
+ guard(spinlock)(&supinfo_lock);
+
+ while (*new) {
+ entry = container_of(*new, struct line, node);
+
+ parent = *new;
+ if (line->desc < entry->desc) {
+ new = &((*new)->rb_left);
+ } else if (line->desc > entry->desc) {
+ new = &((*new)->rb_right);
+ } else {
+ /* this should never happen */
+ WARN(1, "duplicate line inserted");
+ return;
+ }
+ }
+
+ rb_link_node(&line->node, parent, new);
+ rb_insert_color(&line->node, &supinfo_tree);
+}
+
+static void supinfo_erase(struct line *line)
+{
+ guard(spinlock)(&supinfo_lock);
+
+ rb_erase(&line->node, &supinfo_tree);
+}
+
+static struct line *supinfo_find(struct gpio_desc *desc)
+{
+ struct rb_node *node = supinfo_tree.rb_node;
+ struct line *line;
+
+ while (node) {
+ line = container_of(node, struct line, node);
+ if (desc < line->desc)
+ node = node->rb_left;
+ else if (desc > line->desc)
+ node = node->rb_right;
+ else
+ return line;
+ }
+ return NULL;
+}
+
+static void supinfo_to_lineinfo(struct gpio_desc *desc,
+ struct gpio_v2_line_info *info)
+{
+ struct gpio_v2_line_attribute *attr;
+ struct line *line;
+
+ guard(spinlock)(&supinfo_lock);
+
+ line = supinfo_find(desc);
+ if (!line)
+ return;
+
+ attr = &info->attrs[info->num_attrs];
+ attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
+ attr->debounce_period_us = READ_ONCE(line->debounce_period_us);
+ info->num_attrs++;
+}
+
+static inline bool line_has_supinfo(struct line *line)
+{
+ return READ_ONCE(line->debounce_period_us);
+}
+
+/*
+ * Checks line_has_supinfo() before and after the change to avoid unnecessary
+ * supinfo_tree access.
+ * Called indirectly by linereq_create() or linereq_set_config() so line
+ * is already protected from concurrent changes.
+ */
+static void line_set_debounce_period(struct line *line,
+ unsigned int debounce_period_us)
+{
+ bool was_suppl = line_has_supinfo(line);
+
+ WRITE_ONCE(line->debounce_period_us, debounce_period_us);
+
+ /* if supinfo status is unchanged then we're done */
+ if (line_has_supinfo(line) == was_suppl)
+ return;
+
+ /* supinfo status has changed, so update the tree */
+ if (was_suppl)
+ supinfo_erase(line);
+ else
+ supinfo_insert(line);
+}
+
#define GPIO_V2_LINE_BIAS_FLAGS \
(GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
@@ -625,13 +704,13 @@ static void linereq_put_event(struct linereq *lr,
{
bool overflow = false;
- spin_lock(&lr->wait.lock);
- if (kfifo_is_full(&lr->events)) {
- overflow = true;
- kfifo_skip(&lr->events);
+ scoped_guard(spinlock, &lr->wait.lock) {
+ if (kfifo_is_full(&lr->events)) {
+ overflow = true;
+ kfifo_skip(&lr->events);
+ }
+ kfifo_in(&lr->events, le, 1);
}
- kfifo_in(&lr->events, le, 1);
- spin_unlock(&lr->wait.lock);
if (!overflow)
wake_up_poll(&lr->wait, EPOLLIN);
else
@@ -655,6 +734,25 @@ static u32 line_event_id(int level)
GPIO_V2_LINE_EVENT_FALLING_EDGE;
}
+static inline char *make_irq_label(const char *orig)
+{
+ char *new;
+
+ if (!orig)
+ return NULL;
+
+ new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ return new;
+}
+
+static inline void free_irq_label(const char *label)
+{
+ kfree(label);
+}
+
#ifdef CONFIG_HTE
static enum hte_return process_hw_ts_thread(void *p)
@@ -723,7 +821,7 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
line->total_discard_seq++;
line->last_seqno = ts->seq;
mod_delayed_work(system_wq, &line->work,
- usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
+ usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
} else {
if (unlikely(ts->seq < line->line_seqno))
return HTE_CB_HANDLED;
@@ -864,7 +962,7 @@ static irqreturn_t debounce_irq_handler(int irq, void *p)
struct line *line = p;
mod_delayed_work(system_wq, &line->work,
- usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
+ usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
return IRQ_HANDLED;
}
@@ -942,11 +1040,12 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
{
unsigned long irqflags;
int ret, level, irq;
+ char *label;
/* try hardware */
ret = gpiod_set_debounce(line->desc, debounce_period_us);
if (!ret) {
- WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
+ line_set_debounce_period(line, debounce_period_us);
return ret;
}
if (ret != -ENOTSUPP)
@@ -964,11 +1063,17 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
if (irq < 0)
return -ENXIO;
+ label = make_irq_label(line->req->label);
+ if (IS_ERR(label))
+ return -ENOMEM;
+
irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
ret = request_irq(irq, debounce_irq_handler, irqflags,
- line->req->label, line);
- if (ret)
+ label, line);
+ if (ret) {
+ free_irq_label(label);
return ret;
+ }
line->irq = irq;
} else {
ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
@@ -1013,7 +1118,7 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
static void edge_detector_stop(struct line *line)
{
if (line->irq) {
- free_irq(line->irq, line);
+ free_irq_label(free_irq(line->irq, line));
line->irq = 0;
}
@@ -1025,8 +1130,7 @@ static void edge_detector_stop(struct line *line)
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->edflags, 0);
- if (line->desc)
- WRITE_ONCE(line->desc->debounce_period_us, 0);
+ line_set_debounce_period(line, 0);
/* do not change line->level - see comment in debounced_value() */
}
@@ -1038,6 +1142,7 @@ static int edge_detector_setup(struct line *line,
unsigned long irqflags = 0;
u64 eflags;
int irq, ret;
+ char *label;
eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
if (eflags && !kfifo_initialized(&line->req->events)) {
@@ -1051,7 +1156,7 @@ static int edge_detector_setup(struct line *line,
ret = debounce_setup(line, debounce_period_us);
if (ret)
return ret;
- WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
+ line_set_debounce_period(line, debounce_period_us);
}
/* detection disabled or sw debouncer will provide edge detection */
@@ -1074,11 +1179,17 @@ static int edge_detector_setup(struct line *line,
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
+ label = make_irq_label(line->req->label);
+ if (IS_ERR(label))
+ return PTR_ERR(label);
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
- irqflags, line->req->label, line);
- if (ret)
+ irqflags, label, line);
+ if (ret) {
+ free_irq_label(label);
return ret;
+ }
line->irq = irq;
return 0;
@@ -1093,12 +1204,12 @@ static int edge_detector_update(struct line *line,
gpio_v2_line_config_debounce_period(lc, line_idx);
if ((active_edflags == edflags) &&
- (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
+ (READ_ONCE(line->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
- WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
+ line_set_debounce_period(line, debounce_period_us);
return 0;
}
@@ -1272,9 +1383,18 @@ static long linereq_get_values(struct linereq *lr, void __user *ip)
if (copy_from_user(&lv, ip, sizeof(lv)))
return -EFAULT;
+ /*
+ * gpiod_get_array_value_complex() requires compacted desc and val
+ * arrays, rather than the sparse ones in lv.
+ * Calculation of num_get and construction of the desc array is
+ * optimized to avoid allocation for the desc array for the common
+ * num_get == 1 case.
+ */
+ /* scan requested lines to calculate the subset to get */
for (num_get = 0, i = 0; i < lr->num_lines; i++) {
if (lv.mask & BIT_ULL(i)) {
num_get++;
+ /* capture desc for the num_get == 1 case */
descs = &lr->lines[i].desc;
}
}
@@ -1283,6 +1403,7 @@ static long linereq_get_values(struct linereq *lr, void __user *ip)
return -EINVAL;
if (num_get != 1) {
+ /* build compacted desc array */
descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
if (!descs)
return -ENOMEM;
@@ -1303,6 +1424,7 @@ static long linereq_get_values(struct linereq *lr, void __user *ip)
lv.bits = 0;
for (didx = 0, i = 0; i < lr->num_lines; i++) {
+ /* unpack compacted vals for the response */
if (lv.mask & BIT_ULL(i)) {
if (lr->lines[i].sw_debounced)
val = debounced_value(&lr->lines[i]);
@@ -1320,22 +1442,38 @@ static long linereq_get_values(struct linereq *lr, void __user *ip)
return 0;
}
-static long linereq_set_values_unlocked(struct linereq *lr,
- struct gpio_v2_line_values *lv)
+static long linereq_set_values(struct linereq *lr, void __user *ip)
{
DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
+ struct gpio_v2_line_values lv;
struct gpio_desc **descs;
unsigned int i, didx, num_set;
int ret;
+ if (copy_from_user(&lv, ip, sizeof(lv)))
+ return -EFAULT;
+
+ guard(mutex)(&lr->config_mutex);
+
+ /*
+ * gpiod_set_array_value_complex() requires compacted desc and val
+ * arrays, rather than the sparse ones in lv.
+ * Calculation of num_set and construction of the descs and vals arrays
+ * is optimized to minimize scanning the lv->mask, and to avoid
+ * allocation for the desc array for the common num_set == 1 case.
+ */
bitmap_zero(vals, GPIO_V2_LINES_MAX);
+ /* scan requested lines to determine the subset to be set */
for (num_set = 0, i = 0; i < lr->num_lines; i++) {
- if (lv->mask & BIT_ULL(i)) {
+ if (lv.mask & BIT_ULL(i)) {
+ /* setting inputs is not allowed */
if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
return -EPERM;
- if (lv->bits & BIT_ULL(i))
+ /* add to compacted values */
+ if (lv.bits & BIT_ULL(i))
__set_bit(num_set, vals);
num_set++;
+ /* capture desc for the num_set == 1 case */
descs = &lr->lines[i].desc;
}
}
@@ -1343,12 +1481,12 @@ static long linereq_set_values_unlocked(struct linereq *lr,
return -EINVAL;
if (num_set != 1) {
- /* build compacted desc array and values */
+ /* build compacted desc array */
descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
if (!descs)
return -ENOMEM;
for (didx = 0, i = 0; i < lr->num_lines; i++) {
- if (lv->mask & BIT_ULL(i)) {
+ if (lv.mask & BIT_ULL(i)) {
descs[didx] = lr->lines[i].desc;
didx++;
}
@@ -1362,36 +1500,28 @@ static long linereq_set_values_unlocked(struct linereq *lr,
return ret;
}
-static long linereq_set_values(struct linereq *lr, void __user *ip)
-{
- struct gpio_v2_line_values lv;
- int ret;
-
- if (copy_from_user(&lv, ip, sizeof(lv)))
- return -EFAULT;
-
- mutex_lock(&lr->config_mutex);
-
- ret = linereq_set_values_unlocked(lr, &lv);
-
- mutex_unlock(&lr->config_mutex);
-
- return ret;
-}
-
-static long linereq_set_config_unlocked(struct linereq *lr,
- struct gpio_v2_line_config *lc)
+static long linereq_set_config(struct linereq *lr, void __user *ip)
{
+ struct gpio_v2_line_config lc;
struct gpio_desc *desc;
struct line *line;
unsigned int i;
u64 flags, edflags;
int ret;
+ if (copy_from_user(&lc, ip, sizeof(lc)))
+ return -EFAULT;
+
+ ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&lr->config_mutex);
+
for (i = 0; i < lr->num_lines; i++) {
line = &lr->lines[i];
desc = lr->lines[i].desc;
- flags = gpio_v2_line_config_flags(lc, i);
+ flags = gpio_v2_line_config_flags(&lc, i);
gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
/*
@@ -1399,7 +1529,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
* or output, else the line will be treated "as is".
*/
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
- int val = gpio_v2_line_config_output_value(lc, i);
+ int val = gpio_v2_line_config_output_value(&lc, i);
edge_detector_stop(line);
ret = gpiod_direction_output(desc, val);
@@ -1410,7 +1540,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
if (ret)
return ret;
- ret = edge_detector_update(line, lc, i, edflags);
+ ret = edge_detector_update(line, &lc, i, edflags);
if (ret)
return ret;
}
@@ -1422,33 +1552,14 @@ static long linereq_set_config_unlocked(struct linereq *lr,
return 0;
}
-static long linereq_set_config(struct linereq *lr, void __user *ip)
-{
- struct gpio_v2_line_config lc;
- int ret;
-
- if (copy_from_user(&lc, ip, sizeof(lc)))
- return -EFAULT;
-
- ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
- if (ret)
- return ret;
-
- mutex_lock(&lr->config_mutex);
-
- ret = linereq_set_config_unlocked(lr, &lc);
-
- mutex_unlock(&lr->config_mutex);
-
- return ret;
-}
-
-static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long linereq_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct linereq *lr = file->private_data;
void __user *ip = (void __user *)arg;
+ guard(rwsem_read)(&lr->gdev->sem);
+
if (!lr->gdev->chip)
return -ENODEV;
@@ -1464,15 +1575,6 @@ static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd,
}
}
-static long linereq_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct linereq *lr = file->private_data;
-
- return call_ioctl_locked(file, cmd, arg, lr->gdev,
- linereq_ioctl_unlocked);
-}
-
#ifdef CONFIG_COMPAT
static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -1481,12 +1583,14 @@ static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
}
#endif
-static __poll_t linereq_poll_unlocked(struct file *file,
- struct poll_table_struct *wait)
+static __poll_t linereq_poll(struct file *file,
+ struct poll_table_struct *wait)
{
struct linereq *lr = file->private_data;
__poll_t events = 0;
+ guard(rwsem_read)(&lr->gdev->sem);
+
if (!lr->gdev->chip)
return EPOLLHUP | EPOLLERR;
@@ -1499,22 +1603,16 @@ static __poll_t linereq_poll_unlocked(struct file *file,
return events;
}
-static __poll_t linereq_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct linereq *lr = file->private_data;
-
- return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked);
-}
-
-static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
- size_t count, loff_t *f_ps)
+static ssize_t linereq_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_ps)
{
struct linereq *lr = file->private_data;
struct gpio_v2_line_event le;
ssize_t bytes_read = 0;
int ret;
+ guard(rwsem_read)(&lr->gdev->sem);
+
if (!lr->gdev->chip)
return -ENODEV;
@@ -1522,28 +1620,22 @@ static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
return -EINVAL;
do {
- spin_lock(&lr->wait.lock);
- if (kfifo_is_empty(&lr->events)) {
- if (bytes_read) {
- spin_unlock(&lr->wait.lock);
- return bytes_read;
- }
-
- if (file->f_flags & O_NONBLOCK) {
- spin_unlock(&lr->wait.lock);
- return -EAGAIN;
+ scoped_guard(spinlock, &lr->wait.lock) {
+ if (kfifo_is_empty(&lr->events)) {
+ if (bytes_read)
+ return bytes_read;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible_locked(lr->wait,
+ !kfifo_is_empty(&lr->events));
+ if (ret)
+ return ret;
}
- ret = wait_event_interruptible_locked(lr->wait,
- !kfifo_is_empty(&lr->events));
- if (ret) {
- spin_unlock(&lr->wait.lock);
- return ret;
- }
+ ret = kfifo_out(&lr->events, &le, 1);
}
-
- ret = kfifo_out(&lr->events, &le, 1);
- spin_unlock(&lr->wait.lock);
if (ret != 1) {
/*
* This should never happen - we were holding the
@@ -1562,17 +1654,9 @@ static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
return bytes_read;
}
-static ssize_t linereq_read(struct file *file, char __user *buf,
- size_t count, loff_t *f_ps)
-{
- struct linereq *lr = file->private_data;
-
- return call_read_locked(file, buf, count, f_ps, lr->gdev,
- linereq_read_unlocked);
-}
-
static void linereq_free(struct linereq *lr)
{
+ struct line *line;
unsigned int i;
if (lr->device_unregistered_nb.notifier_call)
@@ -1580,15 +1664,19 @@ static void linereq_free(struct linereq *lr)
&lr->device_unregistered_nb);
for (i = 0; i < lr->num_lines; i++) {
- if (lr->lines[i].desc) {
- edge_detector_stop(&lr->lines[i]);
- gpiod_free(lr->lines[i].desc);
- }
+ line = &lr->lines[i];
+ if (!line->desc)
+ continue;
+
+ edge_detector_stop(line);
+ if (line_has_supinfo(line))
+ supinfo_erase(line);
+ gpiod_free(line->desc);
}
kfifo_free(&lr->events);
kfree(lr->label);
gpio_device_put(lr->gdev);
- kfree(lr);
+ kvfree(lr);
}
static int linereq_release(struct inode *inode, struct file *file)
@@ -1653,7 +1741,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (ret)
return ret;
- lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
+ lr = kvzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
if (!lr)
return -ENOMEM;
lr->num_lines = ulr.num_lines;
@@ -1818,12 +1906,14 @@ struct lineevent_state {
(GPIOEVENT_REQUEST_RISING_EDGE | \
GPIOEVENT_REQUEST_FALLING_EDGE)
-static __poll_t lineevent_poll_unlocked(struct file *file,
- struct poll_table_struct *wait)
+static __poll_t lineevent_poll(struct file *file,
+ struct poll_table_struct *wait)
{
struct lineevent_state *le = file->private_data;
__poll_t events = 0;
+ guard(rwsem_read)(&le->gdev->sem);
+
if (!le->gdev->chip)
return EPOLLHUP | EPOLLERR;
@@ -1835,14 +1925,6 @@ static __poll_t lineevent_poll_unlocked(struct file *file,
return events;
}
-static __poll_t lineevent_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct lineevent_state *le = file->private_data;
-
- return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked);
-}
-
static int lineevent_unregistered_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -1859,8 +1941,8 @@ struct compat_gpioeevent_data {
u32 id;
};
-static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
- size_t count, loff_t *f_ps)
+static ssize_t lineevent_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_ps)
{
struct lineevent_state *le = file->private_data;
struct gpioevent_data ge;
@@ -1868,6 +1950,8 @@ static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
ssize_t ge_size;
int ret;
+ guard(rwsem_read)(&le->gdev->sem);
+
if (!le->gdev->chip)
return -ENODEV;
@@ -1888,28 +1972,22 @@ static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
return -EINVAL;
do {
- spin_lock(&le->wait.lock);
- if (kfifo_is_empty(&le->events)) {
- if (bytes_read) {
- spin_unlock(&le->wait.lock);
- return bytes_read;
- }
-
- if (file->f_flags & O_NONBLOCK) {
- spin_unlock(&le->wait.lock);
- return -EAGAIN;
+ scoped_guard(spinlock, &le->wait.lock) {
+ if (kfifo_is_empty(&le->events)) {
+ if (bytes_read)
+ return bytes_read;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible_locked(le->wait,
+ !kfifo_is_empty(&le->events));
+ if (ret)
+ return ret;
}
- ret = wait_event_interruptible_locked(le->wait,
- !kfifo_is_empty(&le->events));
- if (ret) {
- spin_unlock(&le->wait.lock);
- return ret;
- }
+ ret = kfifo_out(&le->events, &ge, 1);
}
-
- ret = kfifo_out(&le->events, &ge, 1);
- spin_unlock(&le->wait.lock);
if (ret != 1) {
/*
* This should never happen - we were holding the lock
@@ -1928,22 +2006,13 @@ static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
return bytes_read;
}
-static ssize_t lineevent_read(struct file *file, char __user *buf,
- size_t count, loff_t *f_ps)
-{
- struct lineevent_state *le = file->private_data;
-
- return call_read_locked(file, buf, count, f_ps, le->gdev,
- lineevent_read_unlocked);
-}
-
static void lineevent_free(struct lineevent_state *le)
{
if (le->device_unregistered_nb.notifier_call)
blocking_notifier_chain_unregister(&le->gdev->device_notifier,
&le->device_unregistered_nb);
if (le->irq)
- free_irq(le->irq, le);
+ free_irq_label(free_irq(le->irq, le));
if (le->desc)
gpiod_free(le->desc);
kfree(le->label);
@@ -1957,13 +2026,15 @@ static int lineevent_release(struct inode *inode, struct file *file)
return 0;
}
-static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long lineevent_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct lineevent_state *le = file->private_data;
void __user *ip = (void __user *)arg;
struct gpiohandle_data ghd;
+ guard(rwsem_read)(&le->gdev->sem);
+
if (!le->gdev->chip)
return -ENODEV;
@@ -1989,15 +2060,6 @@ static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd,
return -EINVAL;
}
-static long lineevent_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct lineevent_state *le = file->private_data;
-
- return call_ioctl_locked(file, cmd, arg, le->gdev,
- lineevent_ioctl_unlocked);
-}
-
#ifdef CONFIG_COMPAT
static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -2091,6 +2153,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
int fd;
int ret;
int irq, irqflags = 0;
+ char *label;
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
return -EFAULT;
@@ -2175,15 +2238,23 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
+ label = make_irq_label(le->label);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ goto out_free_le;
+ }
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
- le->label,
+ label,
le);
- if (ret)
+ if (ret) {
+ free_irq_label(label);
goto out_free_le;
+ }
le->irq = irq;
@@ -2272,84 +2343,72 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
struct gpio_v2_line_info *info)
{
struct gpio_chip *gc = desc->gdev->chip;
- bool ok_for_pinctrl;
- unsigned long flags;
- u32 debounce_period_us;
- unsigned int num_attrs = 0;
+ unsigned long dflags;
memset(info, 0, sizeof(*info));
info->offset = gpio_chip_hwgpio(desc);
- /*
- * This function takes a mutex so we must check this before taking
- * the spinlock.
- *
- * FIXME: find a non-racy way to retrieve this information. Maybe a
- * lock common to both frameworks?
- */
- ok_for_pinctrl = pinctrl_gpio_can_use_line(gc, info->offset);
+ scoped_guard(spinlock_irqsave, &gpio_lock) {
+ if (desc->name)
+ strscpy(info->name, desc->name, sizeof(info->name));
- spin_lock_irqsave(&gpio_lock, flags);
+ if (desc->label)
+ strscpy(info->consumer, desc->label,
+ sizeof(info->consumer));
- if (desc->name)
- strscpy(info->name, desc->name, sizeof(info->name));
-
- if (desc->label)
- strscpy(info->consumer, desc->label, sizeof(info->consumer));
+ dflags = READ_ONCE(desc->flags);
+ }
/*
- * Userspace only need to know that the kernel is using this GPIO so
- * it can't use it.
+ * Userspace only need know that the kernel is using this GPIO so it
+ * can't use it.
+ * The calculation of the used flag is slightly racy, as it may read
+ * desc, gc and pinctrl state without a lock covering all three at
+ * once. Worst case if the line is in transition and the calculation
+ * is inconsistent then it looks to the user like they performed the
+ * read on the other side of the transition - but that can always
+ * happen.
+ * The definitive test that a line is available to userspace is to
+ * request it.
*/
- info->flags = 0;
- if (test_bit(FLAG_REQUESTED, &desc->flags) ||
- test_bit(FLAG_IS_HOGGED, &desc->flags) ||
- test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
- test_bit(FLAG_EXPORT, &desc->flags) ||
- test_bit(FLAG_SYSFS, &desc->flags) ||
+ if (test_bit(FLAG_REQUESTED, &dflags) ||
+ test_bit(FLAG_IS_HOGGED, &dflags) ||
+ test_bit(FLAG_USED_AS_IRQ, &dflags) ||
+ test_bit(FLAG_EXPORT, &dflags) ||
+ test_bit(FLAG_SYSFS, &dflags) ||
!gpiochip_line_is_valid(gc, info->offset) ||
- !ok_for_pinctrl)
+ !pinctrl_gpio_can_use_line(gc, info->offset))
info->flags |= GPIO_V2_LINE_FLAG_USED;
- if (test_bit(FLAG_IS_OUT, &desc->flags))
+ if (test_bit(FLAG_IS_OUT, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
else
info->flags |= GPIO_V2_LINE_FLAG_INPUT;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(FLAG_ACTIVE_LOW, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ if (test_bit(FLAG_OPEN_DRAIN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
- if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ if (test_bit(FLAG_OPEN_SOURCE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
- if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
+ if (test_bit(FLAG_BIAS_DISABLE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
- if (test_bit(FLAG_PULL_DOWN, &desc->flags))
+ if (test_bit(FLAG_PULL_DOWN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
- if (test_bit(FLAG_PULL_UP, &desc->flags))
+ if (test_bit(FLAG_PULL_UP, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
- if (test_bit(FLAG_EDGE_RISING, &desc->flags))
+ if (test_bit(FLAG_EDGE_RISING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
- if (test_bit(FLAG_EDGE_FALLING, &desc->flags))
+ if (test_bit(FLAG_EDGE_FALLING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
+ if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
- else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
+ else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
-
- debounce_period_us = READ_ONCE(desc->debounce_period_us);
- if (debounce_period_us) {
- info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
- info->attrs[num_attrs].debounce_period_us = debounce_period_us;
- num_attrs++;
- }
- info->num_attrs = num_attrs;
-
- spin_unlock_irqrestore(&gpio_lock, flags);
}
struct gpio_chardev_data {
@@ -2455,6 +2514,7 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
return -EBUSY;
}
gpio_desc_to_lineinfo(desc, &lineinfo);
+ supinfo_to_lineinfo(desc, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
if (watch)
@@ -2481,12 +2541,17 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
return 0;
}
-static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
+/*
+ * gpio_ioctl() - ioctl handler for the GPIO chardev
+ */
+static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
void __user *ip = (void __user *)arg;
+ guard(rwsem_read)(&gdev->sem);
+
/* We fail any subsequent ioctl():s when the chip is gone */
if (!gdev->chip)
return -ENODEV;
@@ -2518,17 +2583,6 @@ static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned lo
}
}
-/*
- * gpio_ioctl() - ioctl handler for the GPIO chardev
- */
-static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct gpio_chardev_data *cdev = file->private_data;
-
- return call_ioctl_locked(file, cmd, arg, cdev->gdev,
- gpio_ioctl_unlocked);
-}
-
#ifdef CONFIG_COMPAT
static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -2553,6 +2607,7 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
chg.event_type = action;
chg.timestamp_ns = ktime_get_ns();
gpio_desc_to_lineinfo(desc, &chg.info);
+ supinfo_to_lineinfo(desc, &chg.info);
ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
if (ret)
@@ -2575,12 +2630,14 @@ static int gpio_device_unregistered_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
-static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
- struct poll_table_struct *pollt)
+static __poll_t lineinfo_watch_poll(struct file *file,
+ struct poll_table_struct *pollt)
{
struct gpio_chardev_data *cdev = file->private_data;
__poll_t events = 0;
+ guard(rwsem_read)(&cdev->gdev->sem);
+
if (!cdev->gdev->chip)
return EPOLLHUP | EPOLLERR;
@@ -2593,17 +2650,8 @@ static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
return events;
}
-static __poll_t lineinfo_watch_poll(struct file *file,
- struct poll_table_struct *pollt)
-{
- struct gpio_chardev_data *cdev = file->private_data;
-
- return call_poll_locked(file, pollt, cdev->gdev,
- lineinfo_watch_poll_unlocked);
-}
-
-static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
- size_t count, loff_t *off)
+static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
+ size_t count, loff_t *off)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_v2_line_info_changed event;
@@ -2611,6 +2659,8 @@ static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
int ret;
size_t event_size;
+ guard(rwsem_read)(&cdev->gdev->sem);
+
if (!cdev->gdev->chip)
return -ENODEV;
@@ -2621,38 +2671,30 @@ static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
#endif
do {
- spin_lock(&cdev->wait.lock);
- if (kfifo_is_empty(&cdev->events)) {
- if (bytes_read) {
- spin_unlock(&cdev->wait.lock);
- return bytes_read;
+ scoped_guard(spinlock, &cdev->wait.lock) {
+ if (kfifo_is_empty(&cdev->events)) {
+ if (bytes_read)
+ return bytes_read;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible_locked(cdev->wait,
+ !kfifo_is_empty(&cdev->events));
+ if (ret)
+ return ret;
}
-
- if (file->f_flags & O_NONBLOCK) {
- spin_unlock(&cdev->wait.lock);
- return -EAGAIN;
- }
-
- ret = wait_event_interruptible_locked(cdev->wait,
- !kfifo_is_empty(&cdev->events));
- if (ret) {
- spin_unlock(&cdev->wait.lock);
- return ret;
- }
- }
#ifdef CONFIG_GPIO_CDEV_V1
- /* must be after kfifo check so watch_abi_version is set */
- if (atomic_read(&cdev->watch_abi_version) == 2)
- event_size = sizeof(struct gpio_v2_line_info_changed);
- else
- event_size = sizeof(struct gpioline_info_changed);
- if (count < event_size) {
- spin_unlock(&cdev->wait.lock);
- return -EINVAL;
- }
+ /* must be after kfifo check so watch_abi_version is set */
+ if (atomic_read(&cdev->watch_abi_version) == 2)
+ event_size = sizeof(struct gpio_v2_line_info_changed);
+ else
+ event_size = sizeof(struct gpioline_info_changed);
+ if (count < event_size)
+ return -EINVAL;
#endif
- ret = kfifo_out(&cdev->events, &event, 1);
- spin_unlock(&cdev->wait.lock);
+ ret = kfifo_out(&cdev->events, &event, 1);
+ }
if (ret != 1) {
ret = -EIO;
break;
@@ -2681,15 +2723,6 @@ static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
return bytes_read;
}
-static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
- size_t count, loff_t *off)
-{
- struct gpio_chardev_data *cdev = file->private_data;
-
- return call_read_locked(file, buf, count, off, cdev->gdev,
- lineinfo_watch_read_unlocked);
-}
-
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
@@ -2703,17 +2736,15 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
struct gpio_chardev_data *cdev;
int ret = -ENOMEM;
- down_read(&gdev->sem);
+ guard(rwsem_read)(&gdev->sem);
/* Fail on open if the backing gpiochip is gone */
- if (!gdev->chip) {
- ret = -ENODEV;
- goto out_unlock;
- }
+ if (!gdev->chip)
+ return -ENODEV;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
- goto out_unlock;
+ return -ENODEV;
cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
if (!cdev->watched_lines)
@@ -2742,8 +2773,6 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
if (ret)
goto out_unregister_device_notifier;
- up_read(&gdev->sem);
-
return ret;
out_unregister_device_notifier:
@@ -2757,8 +2786,6 @@ out_free_bitmap:
bitmap_free(cdev->watched_lines);
out_free_cdev:
kfree(cdev);
-out_unlock:
- up_read(&gdev->sem);
return ret;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 402f7d99b..e7770eedd 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -184,7 +184,7 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
const char *propname;
bool active_high;
} gpios[] = {
-#if !IS_ENABLED(CONFIG_LCD_HX8357)
+#if IS_ENABLED(CONFIG_LCD_HX8357)
/*
* Himax LCD controllers used incorrectly named
* "gpios-reset" property and also specified wrong
@@ -478,7 +478,7 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
*/
const char *compatible;
} gpios[] = {
-#if !IS_ENABLED(CONFIG_LCD_HX8357)
+#if IS_ENABLED(CONFIG_LCD_HX8357)
/* Himax LCD controllers used "gpios-reset" */
{ "reset", "gpios-reset", "himax,hx8357" },
{ "reset", "gpios-reset", "himax,hx8369" },
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 12d853845..6bf533213 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -165,10 +165,10 @@ static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
/* Caller holds gpiod-data mutex. */
static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
- struct gpio_desc *desc = data->desc;
- unsigned long irq_flags;
- int ret;
+ struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpio_desc *desc = data->desc;
+ unsigned long irq_flags;
+ int ret;
data->irq = gpiod_to_irq(desc);
if (data->irq < 0)
@@ -259,7 +259,7 @@ static ssize_t edge_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct gpiod_data *data = dev_get_drvdata(dev);
- ssize_t status = size;
+ ssize_t status = size;
int flags;
flags = sysfs_match_string(trigger_names, buf);
@@ -292,10 +292,11 @@ static DEVICE_ATTR_RW(edge);
/* Caller holds gpiod-data mutex. */
static int gpio_sysfs_set_active_low(struct device *dev, int value)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
- struct gpio_desc *desc = data->desc;
- int status = 0;
- unsigned int flags = data->irq_flags;
+ struct gpiod_data *data = dev_get_drvdata(dev);
+ unsigned int flags = data->irq_flags;
+ struct gpio_desc *desc = data->desc;
+ int status = 0;
+
if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
@@ -331,9 +332,9 @@ static ssize_t active_low_show(struct device *dev,
static ssize_t active_low_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
- ssize_t status;
- long value;
+ struct gpiod_data *data = dev_get_drvdata(dev);
+ ssize_t status;
+ long value;
status = kstrtol(buf, 0, &value);
if (status)
@@ -399,7 +400,7 @@ static const struct attribute_group *gpio_groups[] = {
static ssize_t base_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
+ const struct gpio_chip *chip = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", chip->base);
}
@@ -408,7 +409,7 @@ static DEVICE_ATTR_RO(base);
static ssize_t label_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
+ const struct gpio_chip *chip = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", chip->label ?: "");
}
@@ -417,7 +418,7 @@ static DEVICE_ATTR_RO(label);
static ssize_t ngpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
+ const struct gpio_chip *chip = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", chip->ngpio);
}
@@ -441,11 +442,10 @@ static ssize_t export_store(const struct class *class,
const struct class_attribute *attr,
const char *buf, size_t len)
{
- long gpio;
- struct gpio_desc *desc;
- int status;
- struct gpio_chip *gc;
- int offset;
+ struct gpio_desc *desc;
+ struct gpio_chip *gc;
+ int status, offset;
+ long gpio;
status = kstrtol(buf, 0, &gpio);
if (status < 0)
@@ -496,9 +496,9 @@ static ssize_t unexport_store(const struct class *class,
const struct class_attribute *attr,
const char *buf, size_t len)
{
- long gpio;
- struct gpio_desc *desc;
- int status;
+ struct gpio_desc *desc;
+ int status;
+ long gpio;
status = kstrtol(buf, 0, &gpio);
if (status < 0)
@@ -559,14 +559,13 @@ static struct class gpio_class = {
*/
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
- struct gpio_chip *chip;
- struct gpio_device *gdev;
- struct gpiod_data *data;
- unsigned long flags;
- int status;
- const char *ioname = NULL;
- struct device *dev;
- int offset;
+ const char *ioname = NULL;
+ struct gpio_device *gdev;
+ struct gpiod_data *data;
+ struct gpio_chip *chip;
+ unsigned long flags;
+ struct device *dev;
+ int status, offset;
/* can't export until sysfs is available ... */
if (!class_is_registered(&gpio_class)) {
@@ -733,9 +732,9 @@ EXPORT_SYMBOL_GPL(gpiod_unexport);
int gpiochip_sysfs_register(struct gpio_device *gdev)
{
- struct device *dev;
- struct device *parent;
struct gpio_chip *chip = gdev->chip;
+ struct device *parent;
+ struct device *dev;
/*
* Many systems add gpio chips for SOC support very early,
diff --git a/drivers/gpio/gpiolib-sysfs.h b/drivers/gpio/gpiolib-sysfs.h
index 0f213bdb4..b794b396d 100644
--- a/drivers/gpio/gpiolib-sysfs.h
+++ b/drivers/gpio/gpiolib-sysfs.h
@@ -3,10 +3,10 @@
#ifndef GPIOLIB_SYSFS_H
#define GPIOLIB_SYSFS_H
-#ifdef CONFIG_GPIO_SYSFS
-
struct gpio_device;
+#ifdef CONFIG_GPIO_SYSFS
+
int gpiochip_sysfs_register(struct gpio_device *gdev);
void gpiochip_sysfs_unregister(struct gpio_device *gdev);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 2d8c07a44..d9da0331e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -45,19 +45,6 @@
* GPIOs can sometimes cost only an instruction or two per bit.
*/
-
-/* When debugging, extend minimal trust to callers and platform code.
- * Also emit diagnostic messages that may help initial bringup, when
- * board setup or driver bugs are most common.
- *
- * Otherwise, minimize overhead in what may be bitbanging codepaths.
- */
-#ifdef DEBUG
-#define extra_checks 1
-#else
-#define extra_checks 0
-#endif
-
/* Device and char device-related information */
static DEFINE_IDA(gpio_ida);
static dev_t gpio_devt;
@@ -255,6 +242,20 @@ int gpio_device_get_base(struct gpio_device *gdev)
EXPORT_SYMBOL_GPL(gpio_device_get_base);
/**
+ * gpio_device_get_label() - Get the label of this GPIO device
+ * @gdev: GPIO device
+ *
+ * Returns:
+ * Pointer to the string containing the GPIO device label. The string's
+ * lifetime is tied to that of the underlying GPIO device.
+ */
+const char *gpio_device_get_label(struct gpio_device *gdev)
+{
+ return gdev->label;
+}
+EXPORT_SYMBOL(gpio_device_get_label);
+
+/**
* gpio_device_get_chip() - Get the gpio_chip implementation of this GPIO device
* @gdev: GPIO device
*
@@ -276,7 +277,7 @@ struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev)
EXPORT_SYMBOL_GPL(gpio_device_get_chip);
/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
-static int gpiochip_find_base(int ngpio)
+static int gpiochip_find_base_unlocked(int ngpio)
{
struct gpio_device *gdev;
int base = GPIO_DYNAMIC_BASE;
@@ -349,7 +350,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_direction);
* Return -EBUSY if the new chip overlaps with some other chip's integer
* space.
*/
-static int gpiodev_add_to_list(struct gpio_device *gdev)
+static int gpiodev_add_to_list_unlocked(struct gpio_device *gdev)
{
struct gpio_device *prev, *next;
@@ -655,11 +656,6 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
static void gpiodev_release(struct device *dev)
{
struct gpio_device *gdev = to_gpio_device(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
- list_del(&gdev->list);
- spin_unlock_irqrestore(&gpio_lock, flags);
ida_free(&gpio_ida, gdev->id);
kfree_const(gdev->label);
@@ -893,7 +889,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
*/
base = gc->base;
if (base < 0) {
- base = gpiochip_find_base(gc->ngpio);
+ base = gpiochip_find_base_unlocked(gc->ngpio);
if (base < 0) {
spin_unlock_irqrestore(&gpio_lock, flags);
ret = base;
@@ -913,7 +909,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
gdev->base = base;
- ret = gpiodev_add_to_list(gdev);
+ ret = gpiodev_add_to_list_unlocked(gdev);
if (ret) {
spin_unlock_irqrestore(&gpio_lock, flags);
chip_err(gc, "GPIO integer space overlap, cannot add chip\n");
@@ -1009,15 +1005,15 @@ err_remove_of_chip:
of_gpiochip_remove(gc);
err_free_gpiochip_mask:
gpiochip_free_valid_mask(gc);
+err_remove_from_list:
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&gdev->list);
+ spin_unlock_irqrestore(&gpio_lock, flags);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */
gpio_device_put(gdev);
goto err_print_message;
}
-err_remove_from_list:
- spin_lock_irqsave(&gpio_lock, flags);
- list_del(&gdev->list);
- spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
kfree_const(gdev->label);
err_free_descs:
@@ -1048,8 +1044,8 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
void gpiochip_remove(struct gpio_chip *gc)
{
struct gpio_device *gdev = gc->gpiodev;
- unsigned long flags;
- unsigned int i;
+ unsigned long flags;
+ unsigned int i;
down_write(&gdev->sem);
@@ -1071,7 +1067,7 @@ void gpiochip_remove(struct gpio_chip *gc)
spin_lock_irqsave(&gpio_lock, flags);
for (i = 0; i < gdev->ngpio; i++) {
- if (gpiochip_is_requested(gc, i))
+ if (test_bit(FLAG_REQUESTED, &gdev->descs[i].flags))
break;
}
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -1080,6 +1076,9 @@ void gpiochip_remove(struct gpio_chip *gc)
dev_crit(&gdev->dev,
"REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
+ scoped_guard(spinlock_irqsave, &gpio_lock)
+ list_del(&gdev->list);
+
/*
* The gpiochip side puts its use of the device to rest here:
* if there are no userspace clients, the chardev and device will
@@ -2190,10 +2189,10 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
*/
static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
{
- struct gpio_chip *gc = desc->gdev->chip;
- int ret;
- unsigned long flags;
- unsigned offset;
+ struct gpio_chip *gc = desc->gdev->chip;
+ unsigned long flags;
+ unsigned int offset;
+ int ret;
if (label) {
label = kstrdup_const(label, GFP_KERNEL);
@@ -2305,9 +2304,9 @@ int gpiod_request(struct gpio_desc *desc, const char *label)
static bool gpiod_free_commit(struct gpio_desc *desc)
{
- bool ret = false;
- unsigned long flags;
- struct gpio_chip *gc;
+ struct gpio_chip *gc;
+ unsigned long flags;
+ bool ret = false;
might_sleep();
@@ -2336,9 +2335,6 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
#ifdef CONFIG_OF_DYNAMIC
desc->hog = NULL;
#endif
-#ifdef CONFIG_GPIO_CDEV
- WRITE_ONCE(desc->debounce_period_us, 0);
-#endif
ret = true;
}
@@ -2358,38 +2354,58 @@ void gpiod_free(struct gpio_desc *desc)
return;
if (!gpiod_free_commit(desc))
- WARN_ON(extra_checks);
+ WARN_ON(1);
module_put(desc->gdev->owner);
gpio_device_put(desc->gdev);
}
/**
- * gpiochip_is_requested - return string iff signal was requested
- * @gc: controller managing the signal
- * @offset: of signal within controller's 0..(ngpio - 1) range
+ * gpiochip_dup_line_label - Get a copy of the consumer label.
+ * @gc: GPIO chip controlling this line.
+ * @offset: Hardware offset of the line.
*
- * Returns NULL if the GPIO is not currently requested, else a string.
- * The string returned is the label passed to gpio_request(); if none has been
- * passed it is a meaningless, non-NULL constant.
+ * Returns:
+ * Pointer to a copy of the consumer label if the line is requested or NULL
+ * if it's not. If a valid pointer was returned, it must be freed using
+ * kfree(). In case of a memory allocation error, the function returns %ENOMEM.
*
- * This function is for use by GPIO controller drivers. The label can
- * help with diagnostics, and knowing that the signal is used as a GPIO
- * can help avoid accidentally multiplexing it to another controller.
+ * Must not be called from atomic context.
*/
-const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset)
+char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_desc *desc;
+ char *label;
desc = gpiochip_get_desc(gc, offset);
if (IS_ERR(desc))
return NULL;
- if (test_bit(FLAG_REQUESTED, &desc->flags) == 0)
+ guard(spinlock_irqsave)(&gpio_lock);
+
+ if (!test_bit(FLAG_REQUESTED, &desc->flags))
return NULL;
- return desc->label;
+
+ /*
+ * FIXME: Once we mark gpiod_direction_input/output() and
+ * gpiod_get_direction() with might_sleep(), we'll be able to protect
+ * the GPIO descriptors with mutex (while value setting operations will
+ * become lockless).
+ *
+ * Until this happens, this allocation needs to be atomic.
+ */
+ label = kstrdup(desc->label, GFP_ATOMIC);
+ if (!label)
+ return ERR_PTR(-ENOMEM);
+
+ return label;
+}
+EXPORT_SYMBOL_GPL(gpiochip_dup_line_label);
+
+static inline const char *function_name_or_default(const char *con_id)
+{
+ return con_id ?: "(default)";
}
-EXPORT_SYMBOL_GPL(gpiochip_is_requested);
/**
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
@@ -2419,10 +2435,11 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags)
{
struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum);
+ const char *name = function_name_or_default(label);
int ret;
if (IS_ERR(desc)) {
- chip_err(gc, "failed to get GPIO descriptor\n");
+ chip_err(gc, "failed to get GPIO %s descriptor\n", name);
return desc;
}
@@ -2432,8 +2449,8 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret) {
- chip_err(gc, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
+ chip_err(gc, "setup of own GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -2569,8 +2586,8 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
*/
int gpiod_direction_input(struct gpio_desc *desc)
{
- struct gpio_chip *gc;
- int ret = 0;
+ struct gpio_chip *gc;
+ int ret = 0;
VALIDATE_DESC(desc);
gc = desc->gdev->chip;
@@ -2919,7 +2936,7 @@ static int gpio_chip_get_value(struct gpio_chip *gc, const struct gpio_desc *des
static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
{
- struct gpio_chip *gc;
+ struct gpio_chip *gc;
int value;
gc = desc->gdev->chip;
@@ -3214,7 +3231,7 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
- struct gpio_chip *gc;
+ struct gpio_chip *gc;
gc = desc->gdev->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
@@ -3721,7 +3738,7 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
*/
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
- might_sleep_if(extra_checks);
+ might_sleep();
VALIDATE_DESC(desc);
return gpiod_get_raw_value_commit(desc);
}
@@ -3740,7 +3757,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
int value;
- might_sleep_if(extra_checks);
+ might_sleep();
VALIDATE_DESC(desc);
value = gpiod_get_raw_value_commit(desc);
if (value < 0)
@@ -3771,7 +3788,7 @@ int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
- might_sleep_if(extra_checks);
+ might_sleep();
if (!desc_array)
return -EINVAL;
return gpiod_get_array_value_complex(true, true, array_size,
@@ -3797,7 +3814,7 @@ int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
- might_sleep_if(extra_checks);
+ might_sleep();
if (!desc_array)
return -EINVAL;
return gpiod_get_array_value_complex(false, true, array_size,
@@ -3818,7 +3835,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value_cansleep);
*/
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
- might_sleep_if(extra_checks);
+ might_sleep();
VALIDATE_DESC_VOID(desc);
gpiod_set_raw_value_commit(desc, value);
}
@@ -3836,7 +3853,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
*/
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
- might_sleep_if(extra_checks);
+ might_sleep();
VALIDATE_DESC_VOID(desc);
gpiod_set_value_nocheck(desc, value);
}
@@ -3859,7 +3876,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
- might_sleep_if(extra_checks);
+ might_sleep();
if (!desc_array)
return -EINVAL;
return gpiod_set_array_value_complex(true, true, array_size, desc_array,
@@ -3901,7 +3918,7 @@ int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
- might_sleep_if(extra_checks);
+ might_sleep();
if (!desc_array)
return -EINVAL;
return gpiod_set_array_value_complex(false, true, array_size,
@@ -4108,19 +4125,17 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
enum gpiod_flags *flags,
unsigned long *lookupflags)
{
+ const char *name = function_name_or_default(con_id);
struct gpio_desc *desc = ERR_PTR(-ENOENT);
if (is_of_node(fwnode)) {
- dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = of_find_gpio(to_of_node(fwnode), con_id, idx, lookupflags);
} else if (is_acpi_node(fwnode)) {
- dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = acpi_find_gpio(fwnode, con_id, idx, flags, lookupflags);
} else if (is_software_node(fwnode)) {
- dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n",
- fwnode, con_id);
+ dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n", fwnode, name);
desc = swnode_find_gpio(fwnode, con_id, idx, lookupflags);
}
@@ -4136,6 +4151,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
bool platform_lookup_allowed)
{
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ const char *name = function_name_or_default(con_id);
struct gpio_desc *desc;
int ret;
@@ -4151,7 +4167,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
}
if (IS_ERR(desc)) {
- dev_dbg(consumer, "No GPIO consumer %s found\n", con_id);
+ dev_dbg(consumer, "No GPIO consumer %s found\n", name);
return desc;
}
@@ -4172,15 +4188,14 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
*
* FIXME: Make this more sane and safe.
*/
- dev_info(consumer,
- "nonexclusive access to GPIO for %s\n", con_id);
+ dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);
return desc;
}
ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (ret < 0) {
- dev_dbg(consumer, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
+ dev_dbg(consumer, "setup of GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -4296,6 +4311,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags)
{
+ const char *name = function_name_or_default(con_id);
int ret;
if (lflags & GPIO_ACTIVE_LOW)
@@ -4339,7 +4355,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
- gpiod_dbg(desc, "no flags found for %s\n", con_id);
+ gpiod_dbg(desc, "no flags found for GPIO %s\n", name);
return 0;
}
@@ -4701,13 +4717,11 @@ core_initcall(gpiolib_dev_init);
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
- struct gpio_chip *gc = gdev->chip;
- struct gpio_desc *desc;
- unsigned gpio = gdev->base;
- int value;
- bool is_out;
- bool is_irq;
- bool active_low;
+ struct gpio_chip *gc = gdev->chip;
+ bool active_low, is_irq, is_out;
+ unsigned int gpio = gdev->base;
+ struct gpio_desc *desc;
+ int value;
for_each_gpio_desc(gc, desc) {
if (test_bit(FLAG_REQUESTED, &desc->flags)) {
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 87ec7ba5b..c6e5fb9aa 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -147,7 +147,6 @@ void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
* @label: Name of the consumer
* @name: Line name
* @hog: Pointer to the device node that hogs this line (if any)
- * @debounce_period_us: Debounce period in microseconds
*
* These are obtained using gpiod_get() and are preferable to the old
* integer-based handles.
@@ -185,10 +184,6 @@ struct gpio_desc {
#ifdef CONFIG_OF_DYNAMIC
struct device_node *hog;
#endif
-#ifdef CONFIG_GPIO_CDEV
- /* debounce period in microseconds */
- unsigned int debounce_period_us;
-#endif
};
#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 9b079f3a1..c7edba18a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -74,16 +74,17 @@ config DRM_KUNIT_TEST_HELPERS
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT
- select PRIME_NUMBERS
+ depends on DRM && KUNIT && MMU
+ select DRM_BUDDY
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_LIB_RANDOM
- select DRM_KMS_HELPER
- select DRM_BUDDY
+ select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS
- select DRM_EXEC
+ select DRM_LIB_RANDOM
+ select PRIME_NUMBERS
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
@@ -276,6 +277,8 @@ source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
+source "drivers/gpu/drm/xe/Kconfig"
+
source "drivers/gpu/drm/kmb/Kconfig"
config DRM_VGEM
@@ -395,6 +398,8 @@ source "drivers/gpu/drm/solomon/Kconfig"
source "drivers/gpu/drm/sprd/Kconfig"
+source "drivers/gpu/drm/imagination/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
depends on DRM && PCI && MMU && HYPERV
@@ -408,27 +413,6 @@ config DRM_HYPERV
If M is selected the module will be called hyperv_drm.
-# Keep legacy drivers last
-
-menuconfig DRM_LEGACY
- bool "Enable legacy drivers (DANGEROUS)"
- depends on DRM && MMU
- help
- Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
- APIs to user-space, which can be used to circumvent access
- restrictions and other security measures. For backwards compatibility
- those drivers are still available, but their use is highly
- inadvisable and might harm your system.
-
- You are recommended to use the safe modeset-only drivers instead, and
- perform 3D emulation in user-space.
-
- Unless you have strong reasons to go rogue, say "N".
-
-if DRM_LEGACY
-# leave here to list legacy drivers
-endif # DRM_LEGACY
-
config DRM_EXPORT_FOR_TESTS
bool
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 8e1bde059..104b42df2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -22,6 +22,7 @@ drm-y := \
drm_drv.o \
drm_dumb_buffers.o \
drm_edid.o \
+ drm_eld.o \
drm_encoder.o \
drm_file.o \
drm_fourcc.o \
@@ -46,18 +47,6 @@ drm-y := \
drm_vblank_work.o \
drm_vma_manager.o \
drm_writeback.o
-drm-$(CONFIG_DRM_LEGACY) += \
- drm_agpsupport.o \
- drm_bufs.o \
- drm_context.o \
- drm_dma.o \
- drm_hashtab.o \
- drm_irq.o \
- drm_legacy_misc.o \
- drm_lock.o \
- drm_memory.o \
- drm_scatter.o \
- drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@@ -145,6 +134,7 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdxcp/
obj-$(CONFIG_DRM_I915) += i915/
+obj-$(CONFIG_DRM_XE) += xe/
obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
obj-$(CONFIG_DRM_V3D) += v3d/
@@ -198,3 +188,4 @@ obj-$(CONFIG_DRM_HYPERV) += hyperv/
obj-y += solomon/
obj-$(CONFIG_DRM_SPRD) += sprd/
obj-$(CONFIG_DRM_LOONGSON) += loongson/
+obj-$(CONFIG_DRM_POWERVR) += imagination/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 2afecc550..260e32ef7 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -80,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
- amdgpu_ring_mux.o amdgpu_xcp.o
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 13c62a26a..79827a6dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -109,6 +109,8 @@
#include "amdgpu_mca.h"
#include "amdgpu_ras.h"
#include "amdgpu_xcp.h"
+#include "amdgpu_seq64.h"
+#include "amdgpu_reg_state.h"
#define MAX_GPU_INSTANCE 64
@@ -251,6 +253,8 @@ extern int amdgpu_seamless;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
+extern int amdgpu_wbrf;
+
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -469,6 +473,7 @@ struct amdgpu_fpriv {
struct amdgpu_vm vm;
struct amdgpu_bo_va *prt_va;
struct amdgpu_bo_va *csa_va;
+ struct amdgpu_bo_va *seq64_va;
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
@@ -507,6 +512,31 @@ struct amdgpu_allowed_register_entry {
bool grbm_indexed;
};
+/**
+ * enum amd_reset_method - Methods for resetting AMD GPU devices
+ *
+ * @AMD_RESET_METHOD_NONE: The device will not be reset.
+ * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
+ * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
+ * any device.
+ * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
+ * individually. Suitable only for some discrete GPU, not
+ * available for all ASICs.
+ * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
+ * are reset depends on the ASIC. Notably doesn't reset IPs
+ * shared with the CPU on APUs or the memory controllers (so
+ * VRAM is not lost). Not available on all ASICs.
+ * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
+ * but without powering off the PCI bus. Suitable only for
+ * discrete GPUs.
+ * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
+ * and does a secondary bus reset or FLR, depending on what the
+ * underlying hardware supports.
+ *
+ * Methods available for AMD GPU driver for resetting the device. Not all
+ * methods are suitable for every device. User can override the method using
+ * module parameter `reset_method`.
+ */
enum amd_reset_method {
AMD_RESET_METHOD_NONE = -1,
AMD_RESET_METHOD_LEGACY = 0,
@@ -586,6 +616,10 @@ struct amdgpu_asic_funcs {
const struct amdgpu_video_codecs **codecs);
/* encode "> 32bits" smn addressing */
u64 (*encode_ext_smn_addressing)(int ext_id);
+
+ ssize_t (*get_reg_state)(struct amdgpu_device *adev,
+ enum amdgpu_reg_state reg_state, void *buf,
+ size_t max_size);
};
/*
@@ -988,6 +1022,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* for userq and VM fences */
+ struct amdgpu_seq64 seq64;
+
/* KFD */
struct amdgpu_kfd_dev kfd;
@@ -1110,6 +1147,7 @@ struct amdgpu_device {
bool debug_vm;
bool debug_largebar;
bool debug_disable_soft_recovery;
+ bool debug_use_vram_fw_buf;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 3d126f296..131983ed4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -138,6 +138,30 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
+static const struct drm_client_funcs kfd_client_funcs = {
+ .unregister = drm_client_release,
+};
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (!adev->kfd.init_complete || adev->kfd.client.dev)
+ return 0;
+
+ ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
+ &kfd_client_funcs);
+ if (ret) {
+ dev_err(adev->dev, "Failed to init DRM client: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_client_register(&adev->kfd.client);
+
+ return 0;
+}
+
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
@@ -708,35 +732,6 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
return false;
}
-int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
- uint16_t vmid)
-{
- if (adev->family == AMDGPU_FAMILY_AI) {
- int i;
-
- for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
- } else {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0);
- }
-
- return 0;
-}
-
-int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
- uint16_t pasid,
- enum TLB_FLUSH_TYPE flush_type,
- uint32_t inst)
-{
- bool all_hub = false;
-
- if (adev->family == AMDGPU_FAMILY_AI ||
- adev->family == AMDGPU_FAMILY_RV)
- all_hub = true;
-
- return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst);
-}
-
bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
{
return adev->have_atomics_support;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index dac983da9..27c61c535 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -33,6 +33,7 @@
#include <linux/mmu_notifier.h>
#include <linux/memremap.h>
#include <kgd_kfd_interface.h>
+#include <drm/drm_client.h>
#include "amdgpu_sync.h"
#include "amdgpu_vm.h"
#include "amdgpu_xcp.h"
@@ -83,6 +84,7 @@ struct kgd_mem {
struct amdgpu_sync sync;
+ uint32_t gem_handle;
bool aql_queue;
bool is_imported;
};
@@ -105,6 +107,9 @@ struct amdgpu_kfd_dev {
/* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
struct dev_pagemap pgmap;
+
+ /* Client for KFD BO GEM handle allocations */
+ struct drm_client_dev client;
};
enum kgd_engine_type {
@@ -162,11 +167,6 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
uint32_t *ib_cmd, uint32_t ib_len);
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
-int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
- uint16_t vmid);
-int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
- uint16_t pasid, enum TLB_FLUSH_TYPE flush_type,
- uint32_t inst);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
@@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm,
struct svm_range_bo *svm_bo);
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#endif
@@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
@@ -311,14 +313,13 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
- struct dma_fence **ef);
+ struct dma_fence __rcu **ef);
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *info);
-int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
- struct dma_buf *dmabuf,
- uint64_t va, void *drm_priv,
- struct kgd_mem **mem, uint64_t *size,
- uint64_t *mmap_offset);
+int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
struct dma_buf **dmabuf);
void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 625db444d..3a3f3ce09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -200,7 +200,7 @@ int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- if (!(ring && ring->sched.thread))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
/* stop secheduler and drain ring. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
index f6598b9e4..a5c7259cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -141,7 +141,7 @@ static int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 6bf448ab3..ca4a6b828 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -214,7 +214,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -301,7 +301,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+4)
- *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index cd06e4a6d..0f3e2944e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -238,7 +238,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -324,7 +324,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+4+2+3+7)
- *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 00fbc0f44..5a35a8ca8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -363,7 +363,7 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -460,7 +460,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 41fbc4fd0..daa66eb4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
+#include <linux/fdtable.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
@@ -806,13 +807,22 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
{
if (!mem->dmabuf) {
- struct dma_buf *ret = amdgpu_gem_prime_export(
- &mem->bo->tbo.base,
+ struct amdgpu_device *bo_adev;
+ struct dma_buf *dmabuf;
+ int r, fd;
+
+ bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file,
+ mem->gem_handle,
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
- DRM_RDWR : 0);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- mem->dmabuf = ret;
+ DRM_RDWR : 0, &fd);
+ if (r)
+ return r;
+ dmabuf = dma_buf_get(fd);
+ close_fd(fd);
+ if (WARN_ON_ONCE(IS_ERR(dmabuf)))
+ return PTR_ERR(dmabuf);
+ mem->dmabuf = dmabuf;
}
return 0;
@@ -1137,7 +1147,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->n_vms = 1;
ctx->sync = &mem->sync;
- drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&ctx->exec) {
ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
drm_exec_retry_on_contention(&ctx->exec);
@@ -1176,7 +1186,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
int ret;
ctx->sync = &mem->sync;
- drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&ctx->exec) {
ctx->n_vms = 0;
list_for_each_entry(entry, &mem->attachments, list) {
@@ -1384,7 +1394,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
amdgpu_amdkfd_restore_userptr_worker);
*process_info = info;
- *ef = dma_fence_get(&info->eviction_fence->base);
}
vm->process_info = *process_info;
@@ -1415,6 +1424,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
list_add_tail(&vm->vm_list_node,
&(vm->process_info->vm_list_head));
vm->process_info->n_vms++;
+
+ *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
mutex_unlock(&vm->process_info->lock);
return 0;
@@ -1426,10 +1437,7 @@ validate_pd_fail:
reserve_pd_fail:
vm->process_info = NULL;
if (info) {
- /* Two fence references: one in info and one in *ef */
dma_fence_put(&info->eviction_fence->base);
- dma_fence_put(*ef);
- *ef = NULL;
*process_info = NULL;
put_pid(info->pid);
create_evict_fence_fail:
@@ -1623,7 +1631,8 @@ int amdgpu_amdkfd_criu_resume(void *p)
goto out_unlock;
}
WRITE_ONCE(pinfo->block_mmu_notifications, false);
- schedule_delayed_work(&pinfo->restore_userptr_work, 0);
+ queue_delayed_work(system_freezable_wq,
+ &pinfo->restore_userptr_work, 0);
out_unlock:
mutex_unlock(&pinfo->lock);
@@ -1779,6 +1788,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("Failed to allow vma node access. ret %d\n", ret);
goto err_node_allow;
}
+ ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle);
+ if (ret)
+ goto err_gem_handle_create;
bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
@@ -1830,6 +1842,8 @@ allocate_init_user_pages_failed:
err_pin_bo:
err_validate_bo:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
+ drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle);
+err_gem_handle_create:
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
/* Don't unreserve system mem limit twice */
@@ -1837,6 +1851,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
+ amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
@@ -1942,8 +1957,11 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the BO*/
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
- if (mem->dmabuf)
+ drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
+ if (mem->dmabuf) {
dma_buf_put(mem->dmabuf);
+ mem->dmabuf = NULL;
+ }
mutex_destroy(&mem->lock);
/* If this releases the last reference, it will end up calling
@@ -2068,21 +2086,35 @@ out:
return ret;
}
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
+ int ret;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
+ ret = amdgpu_bo_reserve(mem->bo, true);
+ if (ret)
+ goto out;
+
list_for_each_entry(entry, &mem->attachments, list) {
- if (entry->bo_va->base.vm == vm)
- kfd_mem_dmaunmap_attachment(mem, entry);
+ if (entry->bo_va->base.vm != vm)
+ continue;
+ if (entry->bo_va->base.bo->tbo.ttm &&
+ !entry->bo_va->base.bo->tbo.ttm->sg)
+ continue;
+
+ kfd_mem_dmaunmap_attachment(mem, entry);
}
+ amdgpu_bo_unreserve(mem->bo);
+out:
mutex_unlock(&mem->lock);
+
+ return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
@@ -2295,34 +2327,26 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
return 0;
}
-int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
- struct dma_buf *dma_buf,
- uint64_t va, void *drm_priv,
- struct kgd_mem **mem, uint64_t *size,
- uint64_t *mmap_offset)
+static int import_obj_create(struct amdgpu_device *adev,
+ struct dma_buf *dma_buf,
+ struct drm_gem_object *obj,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
- struct drm_gem_object *obj;
struct amdgpu_bo *bo;
int ret;
- obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT))) {
+ AMDGPU_GEM_DOMAIN_GTT)))
/* Only VRAM and GTT BOs are supported */
- ret = -EINVAL;
- goto err_put_obj;
- }
+ return -EINVAL;
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem) {
- ret = -ENOMEM;
- goto err_put_obj;
- }
+ if (!*mem)
+ return -ENOMEM;
ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
if (ret)
@@ -2372,8 +2396,41 @@ err_remove_mem:
drm_vma_node_revoke(&obj->vma_node, drm_priv);
err_free_mem:
kfree(*mem);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct drm_gem_object *obj;
+ uint32_t handle;
+ int ret;
+
+ ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
+ &handle);
+ if (ret)
+ return ret;
+ obj = drm_gem_object_lookup(adev->kfd.client.file, handle);
+ if (!obj) {
+ ret = -EINVAL;
+ goto err_release_handle;
+ }
+
+ ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
+ mmap_offset);
+ if (ret)
+ goto err_put_obj;
+
+ (*mem)->gem_handle = handle;
+
+ return 0;
+
err_put_obj:
drm_gem_object_put(obj);
+err_release_handle:
+ drm_gem_handle_delete(adev->kfd.client.file, handle);
return ret;
}
@@ -2426,7 +2483,8 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
if (r)
pr_err("Failed to quiesce KFD\n");
- schedule_delayed_work(&process_info->restore_userptr_work,
+ queue_delayed_work(system_freezable_wq,
+ &process_info->restore_userptr_work,
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
}
mutex_unlock(&process_info->notifier_lock);
@@ -2552,7 +2610,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
amdgpu_sync_create(&sync);
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
/* Reserve all BOs and page tables for validation */
drm_exec_until_all_locked(&exec) {
/* Reserve all the page directories */
@@ -2749,7 +2807,8 @@ unlock_out:
/* If validation failed, reschedule another attempt */
if (evicted_bos) {
- schedule_delayed_work(&process_info->restore_userptr_work,
+ queue_delayed_work(system_freezable_wq,
+ &process_info->restore_userptr_work,
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
kfd_smi_event_queue_restore_rescheduled(mm);
@@ -2758,6 +2817,23 @@ unlock_out:
put_task_struct(usertask);
}
+static void replace_eviction_fence(struct dma_fence __rcu **ef,
+ struct dma_fence *new_ef)
+{
+ struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
+ /* protected by process_info->lock */);
+
+ /* If we're replacing an unsignaled eviction fence, that fence will
+ * never be signaled, and if anyone is still waiting on that fence,
+ * they will hang forever. This should never happen. We should only
+ * replace the fence in restore_work that only gets scheduled after
+ * eviction work signaled the fence.
+ */
+ WARN_ONCE(!dma_fence_is_signaled(old_ef),
+ "Replacing unsignaled eviction fence");
+ dma_fence_put(old_ef);
+}
+
/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
* KFD process identified by process_info
*
@@ -2776,12 +2852,11 @@ unlock_out:
* 7. Add fence to all PD and PT BOs.
* 8. Unreserve all BOs
*/
-int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
{
struct amdkfd_process_info *process_info = info;
struct amdgpu_vm *peer_vm;
struct kgd_mem *mem;
- struct amdgpu_amdkfd_fence *new_fence;
struct list_head duplicate_save;
struct amdgpu_sync sync_obj;
unsigned long failed_size = 0;
@@ -2793,7 +2868,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
mutex_lock(&process_info->lock);
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
@@ -2825,12 +2900,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (ret)
goto validate_map_fail;
- ret = process_sync_pds_resv(process_info, &sync_obj);
- if (ret) {
- pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
- goto validate_map_fail;
- }
-
/* Validate BOs and map them to GPUVM (update VM page tables). */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
@@ -2881,6 +2950,19 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (failed_size)
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+ /* Update mappings not managed by KFD */
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(
+ peer_vm->root.bo->tbo.bdev);
+
+ ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
+ if (ret) {
+ pr_debug("Memory eviction: handle moved failed. Try again\n");
+ goto validate_map_fail;
+ }
+ }
+
/* Update page directories */
ret = process_update_pds(process_info, &sync_obj);
if (ret) {
@@ -2888,25 +2970,47 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
+ /* Sync with fences on all the page tables. They implicitly depend on any
+ * move fences from amdgpu_vm_handle_moved above.
+ */
+ ret = process_sync_pds_resv(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+ goto validate_map_fail;
+ }
+
/* Wait for validate and PT updates to finish */
amdgpu_sync_wait(&sync_obj, false);
- /* Release old eviction fence and create new one, because fence only
- * goes from unsignaled to signaled, fence cannot be reused.
- * Use context and mm from the old fence.
+ /* The old eviction fence may be unsignaled if restore happens
+ * after a GPU reset or suspend/resume. Keep the old fence in that
+ * case. Otherwise release the old eviction fence and create new
+ * one, because fence only goes from unsignaled to signaled once
+ * and cannot be reused. Use context and mm from the old fence.
+ *
+ * If an old eviction fence signals after this check, that's OK.
+ * Anyone signaling an eviction fence must stop the queues first
+ * and schedule another restore worker.
*/
- new_fence = amdgpu_amdkfd_fence_create(
+ if (dma_fence_is_signaled(&process_info->eviction_fence->base)) {
+ struct amdgpu_amdkfd_fence *new_fence =
+ amdgpu_amdkfd_fence_create(
process_info->eviction_fence->base.context,
process_info->eviction_fence->mm,
NULL);
- if (!new_fence) {
- pr_err("Failed to create eviction fence\n");
- ret = -ENOMEM;
- goto validate_map_fail;
+
+ if (!new_fence) {
+ pr_err("Failed to create eviction fence\n");
+ ret = -ENOMEM;
+ goto validate_map_fail;
+ }
+ dma_fence_put(&process_info->eviction_fence->base);
+ process_info->eviction_fence = new_fence;
+ replace_eviction_fence(ef, dma_fence_get(&new_fence->base));
+ } else {
+ WARN_ONCE(*ef != &process_info->eviction_fence->base,
+ "KFD eviction fence doesn't match KGD process_info");
}
- dma_fence_put(&process_info->eviction_fence->base);
- process_info->eviction_fence = new_fence;
- *ef = dma_fence_get(&new_fence->base);
/* Attach new eviction fence to all BOs except pinned ones */
list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 7473a42f7..9caba1031 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -103,7 +103,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
int bpc = 8;
- unsigned mode_clock, max_tmds_clock;
+ unsigned int mode_clock, max_tmds_clock;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
@@ -255,6 +255,7 @@ struct edid *amdgpu_connector_edid(struct drm_connector *connector)
return amdgpu_connector->edid;
} else if (edid_blob) {
struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
+
if (edid)
amdgpu_connector->edid = edid;
}
@@ -581,6 +582,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
} else {
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+
amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
}
@@ -797,6 +799,7 @@ static int amdgpu_connector_set_lcd_property(struct drm_connector *connector,
amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
else {
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+
amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
}
@@ -979,6 +982,41 @@ amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
return false;
}
+static void amdgpu_connector_shared_ddc(enum drm_connector_status *status,
+ struct drm_connector *connector,
+ struct amdgpu_connector *amdgpu_connector)
+{
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+ struct amdgpu_connector *list_amdgpu_connector;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ if (amdgpu_connector->shared_ddc && *status == connector_status_connected) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(list_connector,
+ &iter) {
+ if (connector == list_connector)
+ continue;
+ list_amdgpu_connector = to_amdgpu_connector(list_connector);
+ if (list_amdgpu_connector->shared_ddc &&
+ list_amdgpu_connector->ddc_bus->rec.i2c_id ==
+ amdgpu_connector->ddc_bus->rec.i2c_id) {
+ /* cases where both connectors are digital */
+ if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+ /* hpd is our only option in this case */
+ if (!amdgpu_display_hpd_sense(adev,
+ amdgpu_connector->hpd.hpd)) {
+ amdgpu_connector_free_edid(connector);
+ *status = connector_status_disconnected;
+ }
+ }
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ }
+}
+
/*
* DVI is complicated
* Do a DDC probe, if DDC probe passes, get the full EDID so
@@ -1065,32 +1103,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
* DDC line. The latter is more complex because with DVI<->HDMI adapters
* you don't really know what's connected to which port as both are digital.
*/
- if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
- struct drm_connector *list_connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *list_amdgpu_connector;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(list_connector,
- &iter) {
- if (connector == list_connector)
- continue;
- list_amdgpu_connector = to_amdgpu_connector(list_connector);
- if (list_amdgpu_connector->shared_ddc &&
- (list_amdgpu_connector->ddc_bus->rec.i2c_id ==
- amdgpu_connector->ddc_bus->rec.i2c_id)) {
- /* cases where both connectors are digital */
- if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
- /* hpd is our only option in this case */
- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
- amdgpu_connector_free_edid(connector);
- ret = connector_status_disconnected;
- }
- }
- }
- }
- drm_connector_list_iter_end(&iter);
- }
+ amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
}
}
@@ -1192,6 +1205,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
static void amdgpu_connector_dvi_force(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
if (connector->force == DRM_FORCE_ON)
amdgpu_connector->use_digital = false;
if (connector->force == DRM_FORCE_ON_DIGITAL)
@@ -1426,6 +1440,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
else if (amdgpu_connector->dac_load_detect) { /* try load detection */
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
ret = encoder_funcs->detect(encoder, connector);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e50be6500..116d3756c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -66,7 +66,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
amdgpu_sync_create(&p->sync);
drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
return 0;
}
@@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -870,9 +870,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo = e->bo;
int i;
- e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
+ e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
+ sizeof(struct page *),
+ GFP_KERNEL);
if (!e->user_pages) {
DRM_ERROR("kvmalloc_array failure\n");
r = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 720011019..796fa6f14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
@@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e2ae9ba14..5cb33ac99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return DRM_SCHED_PRIORITY_MIN;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_LOW:
- return DRM_SCHED_PRIORITY_MIN;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8d4a3ff65..1afbb2e93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -540,7 +540,11 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = RREG32_PCIE(*pos);
+ if (upper_32_bits(*pos))
+ value = RREG32_PCIE_EXT(*pos);
+ else
+ value = RREG32_PCIE(*pos);
+
r = put_user(value, (uint32_t *)buf);
if (r)
goto out;
@@ -600,7 +604,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r)
goto out;
- WREG32_PCIE(*pos, value);
+ if (upper_32_bits(*pos))
+ WREG32_PCIE_EXT(*pos, value);
+ else
+ WREG32_PCIE(*pos, value);
result += 4;
buf += 4;
@@ -1671,9 +1678,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
- kthread_park(ring->sched.thread);
+ drm_sched_wqueue_stop(&ring->sched);
}
seq_puts(m, "run ib test:\n");
@@ -1687,9 +1694,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
- kthread_unpark(ring->sched.thread);
+ drm_sched_wqueue_start(&ring->sched);
}
up_write(&adev->reset_domain->sem);
@@ -1909,7 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
ring = adev->rings[val];
- if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring) ||
+ !ring->funcs->preempt_ib)
return -EINVAL;
/* the last preemption failed */
@@ -1927,7 +1935,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
goto pro_end;
/* stop the scheduler */
- kthread_park(ring->sched.thread);
+ drm_sched_wqueue_stop(&ring->sched);
/* preempt the IB */
r = amdgpu_ring_preempt_ib(ring);
@@ -1961,7 +1969,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
failure:
/* restart the scheduler */
- kthread_unpark(ring->sched.thread);
+ drm_sched_wqueue_start(&ring->sched);
up_read(&adev->reset_domain->sem);
@@ -2146,6 +2154,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_debugfs_firmware_init(adev);
amdgpu_ta_if_debugfs_init(adev);
+ amdgpu_debugfs_mes_event_log_init(adev);
+
#if defined(CONFIG_DRM_AMD_DC)
if (adev->dc_enabled)
dtn_debugfs_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 371a6f0de..0425432d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -32,3 +32,5 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev);
void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7f48c7ec4..d0afb9ba3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -162,6 +162,65 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
static DEVICE_ATTR(pcie_replay_count, 0444,
amdgpu_device_get_pcie_replay_count, NULL);
+static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t ppos, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t bytes_read;
+
+ switch (ppos) {
+ case AMDGPU_SYS_REG_STATE_XGMI:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
+ break;
+ case AMDGPU_SYS_REG_STATE_WAFL:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
+ break;
+ case AMDGPU_SYS_REG_STATE_PCIE:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
+ break;
+ case AMDGPU_SYS_REG_STATE_USR:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
+ break;
+ case AMDGPU_SYS_REG_STATE_USR_1:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bytes_read;
+}
+
+BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
+ AMDGPU_SYS_REG_STATE_END);
+
+int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (!amdgpu_asic_get_reg_state_supported(adev))
+ return 0;
+
+ ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
+
+ return ret;
+}
+
+void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_asic_get_reg_state_supported(adev))
+ return;
+ sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
+}
+
/**
* DOC: board_info
*
@@ -1541,7 +1600,7 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
if (adev->mman.keep_stolen_vga_memory)
return false;
- return adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0);
+ return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
}
/*
@@ -1552,11 +1611,15 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
*/
-static bool amdgpu_device_pcie_dynamic_switching_supported(void)
+static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
+ /* eGPU change speeds based on USB4 fabric conditions */
+ if (dev_is_removable(adev->dev))
+ return true;
+
if (c->x86_vendor == X86_VENDOR_INTEL)
return false;
#endif
@@ -2389,7 +2452,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
- if (!amdgpu_device_pcie_dynamic_switching_supported())
+ if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
total = true;
@@ -2567,7 +2630,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
break;
}
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
ring->num_hw_submission, 0,
timeout, adev->reset_domain->wq,
@@ -2670,6 +2733,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
goto init_failed;
}
}
+
+ r = amdgpu_seq64_init(adev);
+ if (r) {
+ DRM_ERROR("allocate seq64 failed %d\n", r);
+ goto init_failed;
+ }
}
}
@@ -3132,6 +3201,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_device_wb_fini(adev);
amdgpu_device_mem_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
+ amdgpu_seq64_fini(adev);
}
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
@@ -4202,6 +4272,7 @@ fence_driver_init:
"Could not create amdgpu board attributes\n");
amdgpu_fru_sysfs_init(adev);
+ amdgpu_reg_state_sysfs_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4324,6 +4395,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
amdgpu_fru_sysfs_fini(adev);
+ amdgpu_reg_state_sysfs_fini(adev);
+
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4451,6 +4524,8 @@ int amdgpu_device_prepare(struct drm_device *dev)
if (r)
goto unprepare;
+ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -4954,7 +5029,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
spin_lock(&ring->sched.job_list_lock);
@@ -5093,7 +5168,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
/* Clear job fence from fence drv to avoid force_completion
@@ -5560,7 +5635,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@@ -5629,7 +5704,7 @@ skip_hw_reset:
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);
@@ -5691,6 +5766,39 @@ skip_sched_resume:
}
/**
+ * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
+ *
+ * @adev: amdgpu_device pointer
+ * @speed: pointer to the speed of the link
+ * @width: pointer to the width of the link
+ *
+ * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
+ * first physical partner to an AMD dGPU.
+ * This will exclude any virtual switches and links.
+ */
+static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ struct pci_dev *parent = adev->pdev;
+
+ if (!speed || !width)
+ return;
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ while ((parent = pci_upstream_bridge(parent))) {
+ /* skip upstream/downstream switches internal to dGPU*/
+ if (parent->vendor == PCI_VENDOR_ID_ATI)
+ continue;
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ break;
+ }
+}
+
+/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
* @adev: amdgpu_device pointer
@@ -5723,8 +5831,8 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
return;
- pcie_bandwidth_available(adev->pdev, NULL,
- &platform_speed_cap, &platform_link_width);
+ amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
+ &platform_link_width);
if (adev->pm.pcie_gen_mask == 0) {
/* asic caps */
@@ -5951,7 +6059,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, NULL);
@@ -6001,6 +6109,20 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_reset_context reset_context;
u32 memsize;
struct list_head device_list;
+ struct amdgpu_hive_info *hive;
+ int hive_ras_recovery = 0;
+ struct amdgpu_ras *ras;
+
+ /* PCI error slot reset should be skipped During RAS recovery */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+ ras = amdgpu_ras_get_context(adev);
+ if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
+ ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+ return PCI_ERS_RESULT_RECOVERED;
DRM_INFO("PCI error: slot reset callback!!\n");
@@ -6079,7 +6201,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e7e87a3b2..decbbe3d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -42,6 +42,7 @@
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
#include <linux/pm_runtime.h>
+#include "amdgpu_trace.h"
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
@@ -63,6 +64,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
attach->peer2peer = false;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ trace_amdgpu_runpm_reference_dumps(1, __func__);
if (r < 0)
goto out;
@@ -70,6 +72,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
out:
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ trace_amdgpu_runpm_reference_dumps(0, __func__);
return r;
}
@@ -90,6 +93,7 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ trace_amdgpu_runpm_reference_dumps(0, __func__);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 855ab5963..64b1bb240 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -128,6 +128,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_VM = BIT(0),
AMDGPU_DEBUG_LARGEBAR = BIT(1),
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
+ AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -209,6 +210,7 @@ int amdgpu_umsch_mm;
int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
+int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -985,6 +987,22 @@ module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)");
module_param_named(agp, amdgpu_agp, int, 0444);
+/**
+ * DOC: wbrf (int)
+ * Enable Wifi RFI interference mitigation feature.
+ * Due to electrical and mechanical constraints there may be likely interference of
+ * relatively high-powered harmonics of the (G-)DDR memory clocks with local radio
+ * module frequency bands used by Wifi 6/6e/7. To mitigate the possible RFI interference,
+ * with this feature enabled, PMFW will use either “shadowed P-State” or “P-State” based
+ * on active list of frequencies in-use (to be avoided) as part of initial setting or
+ * P-state transition. However, there may be potential performance impact with this
+ * feature enabled.
+ * (0 = disabled, 1 = enabled, -1 = auto (default setting, will be enabled if supported))
+ */
+MODULE_PARM_DESC(wbrf,
+ "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
+module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -2113,6 +2131,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: soft reset for GPU recovery disabled\n");
adev->debug_disable_soft_recovery = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) {
+ pr_info("debug: place fw in vram for frontdoor loading\n");
+ adev->debug_use_vram_fw_buf = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2224,6 +2247,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ddev);
+ amdgpu_init_debug_options(adev);
+
ret = amdgpu_driver_load_kms(adev, flags);
if (ret)
goto err_pci;
@@ -2243,6 +2268,10 @@ retry_init:
if (ret)
goto err_pci;
+ ret = amdgpu_amdkfd_drm_client_create(adev);
+ if (ret)
+ goto err_pci;
+
/*
* 1. don't init fbdev on hw without DCE
* 2. don't init fbdev if there are no connectors
@@ -2304,8 +2333,6 @@ retry_init:
amdgpu_get_secondary_funcs(adev);
}
- amdgpu_init_debug_options(adev);
-
return 0;
err_pci:
@@ -2424,8 +2451,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
}
for (i = 0; i < mgpu_info.num_dgpu; i++) {
adev = mgpu_info.gpu_ins[i].adev;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
+ amdgpu_amdkfd_drm_client_create(adev);
+ }
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 5706b282a..c7df7fa34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -97,6 +97,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
stats.requested_visible_vram/1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
stats.requested_gtt/1024UL);
+ drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
+ drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
+ drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
+
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index dc2302127..70bff8cec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -183,6 +183,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
+ trace_amdgpu_runpm_reference_dumps(1, __func__);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -310,6 +311,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ trace_amdgpu_runpm_reference_dumps(0, __func__);
} while (last_seq != seq);
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 84beeaa4d..49a5f1c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_exec exec;
long r;
- drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&exec);
@@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
if (gobj) {
r = drm_exec_lock_obj(&exec, gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 57516a8c5..431ec7265 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -202,8 +202,8 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
pr_debug("hmm range: start = 0x%lx, end = 0x%lx",
hmm_range->start, hmm_range->end);
- /* Assuming 128MB takes maximum 1 second to fault page address */
- timeout = max((hmm_range->end - hmm_range->start) >> 27, 1UL);
+ /* Assuming 64MB takes maximum 1 second to fault page address */
+ timeout = max((hmm_range->end - hmm_range->start) >> 26, 1UL);
timeout *= HMM_RANGE_DEFAULT_TIMEOUT;
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -211,6 +211,7 @@ retry:
hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
r = hmm_range_fault(hmm_range);
if (unlikely(r)) {
+ schedule();
/*
* FIXME: This timeout should encompass the retry from
* mmu_interval_read_retry() as well.
@@ -224,7 +225,6 @@ retry:
break;
hmm_range->hmm_pfns += MAX_WALK_BYTE >> PAGE_SHIFT;
hmm_range->start = hmm_range->end;
- schedule();
} while (hmm_range->end < end);
hmm_range->start = start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 82608df43..d79cb13e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -175,7 +175,6 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 1f3571985..71a5cf37b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -115,7 +115,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner);
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
@@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i;
/* Signal all jobs not yet scheduled */
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 598867919..bf4f48fe4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1433,6 +1433,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
fpriv->csa_va = NULL;
}
+ amdgpu_seq64_unmap(adev, fpriv);
+
pasid = fpriv->vm.pasid;
pd = amdgpu_bo_ref(fpriv->vm.root.bo);
if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 061d88f44..59fafb839 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -218,6 +218,7 @@ static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, st
int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
{
struct amdgpu_smuio_mcm_config_info mcm_info;
+ struct ras_err_addr err_addr = {0};
struct mca_bank_set mca_set;
struct mca_bank_node *node;
struct mca_bank_entry *entry;
@@ -246,10 +247,18 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
mcm_info.socket_id = entry->info.socket_id;
mcm_info.die_id = entry->info.aid;
+ if (blk == AMDGPU_RAS_BLOCK__UMC) {
+ err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
+ err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
+ err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
+ }
+
if (type == AMDGPU_MCA_ERROR_TYPE_UE)
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count);
+ amdgpu_ras_error_statistic_ue_count(err_data,
+ &mcm_info, &err_addr, (uint64_t)count);
else
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count);
+ amdgpu_ras_error_statistic_ce_count(err_data,
+ &mcm_info, &err_addr, (uint64_t)count);
}
out_mca_release:
@@ -377,7 +386,7 @@ static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
struct amdgpu_device *adev = (struct amdgpu_device *)data;
int ret;
- ret = amdgpu_mca_smu_set_debug_mode(adev, val ? true : false);
+ ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false);
if (ret)
return ret;
@@ -485,7 +494,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_se
void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
{
#if defined(CONFIG_DEBUG_FS)
- if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6))
+ if (!root || amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6))
return;
debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index e51e8918e..b399f1b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -46,6 +46,8 @@
#define MCA_REG__STATUS__ERRORCODEEXT(x) MCA_REG_FIELD(x, 21, 16)
#define MCA_REG__STATUS__ERRORCODE(x) MCA_REG_FIELD(x, 15, 0)
+#define MCA_REG__MISC0__ERRCNT(x) MCA_REG_FIELD(x, 43, 32)
+
#define MCA_REG__SYND__ERRORINFORMATION(x) MCA_REG_FIELD(x, 17, 0)
enum amdgpu_mca_ip {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 30c010836..da48b6da0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -98,6 +98,26 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
return 0;
}
+static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.event_log_gpu_obj,
+ &adev->mes.event_log_gpu_addr,
+ &adev->mes.event_log_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
+ return r;
+ }
+
+ memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
+
+ return 0;
+
+}
+
static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
{
bitmap_free(adev->mes.doorbell_bitmap);
@@ -182,8 +202,14 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
if (r)
goto error;
+ r = amdgpu_mes_event_log_init(adev);
+ if (r)
+ goto error_doorbell;
+
return 0;
+error_doorbell:
+ amdgpu_mes_doorbell_free(adev);
error:
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
@@ -199,6 +225,10 @@ error_ids:
void amdgpu_mes_fini(struct amdgpu_device *adev)
{
+ amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
+ &adev->mes.event_log_gpu_addr,
+ &adev->mes.event_log_cpu_addr);
+
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
@@ -1153,7 +1183,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
amdgpu_sync_create(&sync);
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec,
&ctx_data->meta_data_obj->tbo.base);
@@ -1224,7 +1254,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
struct drm_exec exec;
long r;
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec,
&ctx_data->meta_data_obj->tbo.base);
@@ -1510,3 +1540,34 @@ out:
amdgpu_ucode_release(&adev->mes.fw[pipe]);
return r;
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = m->private;
+ uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
+
+ seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
+ mem, PAGE_SIZE, false);
+
+ return 0;
+}
+
+
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
+
+#endif
+
+void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
+{
+
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_mes_event_log", 0444, root,
+ adev, &amdgpu_debugfs_mes_event_log_fops);
+
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c2c88b772..7d4f93fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -133,6 +133,11 @@ struct amdgpu_mes {
uint32_t num_mes_dbs;
unsigned long *doorbell_bitmap;
+ /* MES event log buffer */
+ struct amdgpu_bo *event_log_gpu_obj;
+ uint64_t event_log_gpu_addr;
+ void *event_log_cpu_addr;
+
/* ip specific functions */
const struct amdgpu_mes_funcs *funcs;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 32fe05c81..2e4911050 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -32,7 +32,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
#include <drm/drm_framebuffer.h>
@@ -51,6 +50,7 @@ struct amdgpu_device;
struct amdgpu_encoder;
struct amdgpu_router;
struct amdgpu_hpd;
+struct edid;
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
@@ -343,6 +343,97 @@ struct amdgpu_mode_info {
int disp_priority;
const struct amdgpu_display_funcs *funcs;
const enum drm_plane_type *plane_type;
+
+ /* Driver-private color mgmt props */
+
+ /* @plane_degamma_lut_property: Plane property to set a degamma LUT to
+ * convert encoded values to light linear values before sampling or
+ * blending.
+ */
+ struct drm_property *plane_degamma_lut_property;
+ /* @plane_degamma_lut_size_property: Plane property to define the max
+ * size of degamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_degamma_lut_size_property;
+ /**
+ * @plane_degamma_tf_property: Plane pre-defined transfer function to
+ * to go from scanout/encoded values to linear values.
+ */
+ struct drm_property *plane_degamma_tf_property;
+ /**
+ * @plane_hdr_mult_property:
+ */
+ struct drm_property *plane_hdr_mult_property;
+
+ struct drm_property *plane_ctm_property;
+ /**
+ * @shaper_lut_property: Plane property to set pre-blending shaper LUT
+ * that converts color content before 3D LUT. If
+ * plane_shaper_tf_property != Identity TF, AMD color module will
+ * combine the user LUT values with pre-defined TF into the LUT
+ * parameters to be programmed.
+ */
+ struct drm_property *plane_shaper_lut_property;
+ /**
+ * @shaper_lut_size_property: Plane property for the size of
+ * pre-blending shaper LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_shaper_lut_size_property;
+ /**
+ * @plane_shaper_tf_property: Plane property to set a predefined
+ * transfer function for pre-blending shaper (before applying 3D LUT)
+ * with or without LUT. There is no shaper ROM, but we can use AMD
+ * color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *plane_shaper_tf_property;
+ /**
+ * @plane_lut3d_property: Plane property for color transformation using
+ * a 3D LUT (pre-blending), a three-dimensional array where each
+ * element is an RGB triplet. Each dimension has the size of
+ * lut3d_size. The array contains samples from the approximated
+ * function. On AMD, values between samples are estimated by
+ * tetrahedral interpolation. The array is accessed with three indices,
+ * one for each input dimension (color channel), blue being the
+ * outermost dimension, red the innermost.
+ */
+ struct drm_property *plane_lut3d_property;
+ /**
+ * @plane_degamma_lut_size_property: Plane property to define the max
+ * size of 3D LUT as supported by the driver (read-only). The max size
+ * is the max size of one dimension and, therefore, the max number of
+ * entries for 3D LUT array is the 3D LUT size cubed;
+ */
+ struct drm_property *plane_lut3d_size_property;
+ /**
+ * @plane_blend_lut_property: Plane property for output gamma before
+ * blending. Userspace set a blend LUT to convert colors after 3D LUT
+ * conversion. It works as a post-3DLUT 1D LUT. With shaper LUT, they
+ * are sandwiching 3D LUT with two 1D LUT. If plane_blend_tf_property
+ * != Identity TF, AMD color module will combine the user LUT values
+ * with pre-defined TF into the LUT parameters to be programmed.
+ */
+ struct drm_property *plane_blend_lut_property;
+ /**
+ * @plane_blend_lut_size_property: Plane property to define the max
+ * size of blend LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_blend_lut_size_property;
+ /**
+ * @plane_blend_tf_property: Plane property to set a predefined
+ * transfer function for pre-blending blend/out_gamma (after applying
+ * 3D LUT) with or without LUT. There is no blend ROM, but we can use
+ * AMD color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *plane_blend_tf_property;
+ /* @regamma_tf_property: Transfer function for CRTC regamma
+ * (post-blending). Possible values are defined by `enum
+ * amdgpu_transfer_function`. There is no regamma ROM, but we can use
+ * AMD color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *regamma_tf_property;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -416,6 +507,10 @@ struct amdgpu_crtc {
int otg_inst;
struct drm_pending_vblank_event *event;
+
+ bool wb_pending;
+ bool wb_enabled;
+ struct drm_writeback_connector *wb_conn;
};
struct amdgpu_encoder_atom_dig {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 425cebcc5..866bfde1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -620,8 +620,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- amdgpu_bo_in_cpu_visible_vram(bo))
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -1275,26 +1274,39 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
+ struct drm_gem_object *obj;
unsigned int domain;
+ bool shared;
/* Abort if the BO doesn't currently have a backing store */
- if (!bo->tbo.resource)
+ if (!res)
return;
- domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ obj = &bo->tbo.base;
+ shared = drm_gem_object_is_shared_for_memory_stats(obj);
+
+ domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
+ if (shared)
+ stats->vram_shared += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
stats->gtt += size;
+ if (shared)
+ stats->gtt_shared += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
stats->cpu += size;
+ if (shared)
+ stats->cpu_shared += size;
break;
}
@@ -1381,10 +1393,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->resource->mem_type != TTM_PL_VRAM)
- return 0;
-
- if (amdgpu_bo_in_cpu_visible_vram(abo))
+ if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1408,7 +1417,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- !amdgpu_bo_in_cpu_visible_vram(abo))
+ !amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1572,6 +1581,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@@ -1580,10 +1590,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
- if (amdgpu_bo_in_cpu_visible_vram(bo))
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index a3ea8a82d..fa03d9e48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -138,12 +138,18 @@ struct amdgpu_bo_vm {
struct amdgpu_mem_stats {
/* current VRAM usage, includes visible VRAM */
uint64_t vram;
+ /* current shared VRAM usage, includes visible VRAM */
+ uint64_t vram_shared;
/* current visible VRAM usage */
uint64_t visible_vram;
/* current GTT usage */
uint64_t gtt;
+ /* current shared GTT usage */
+ uint64_t gtt_shared;
/* current system memory usage */
uint64_t cpu;
+ /* current shared system memory usage */
+ uint64_t cpu_shared;
/* sum of evicted buffers, includes visible VRAM */
uint64_t evicted_vram;
/* sum of evicted buffers due to CPU access */
@@ -245,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
}
/**
- * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
- */
-static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_res_cursor cursor;
-
- if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
- return false;
-
- amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
- while (cursor.remaining) {
- if (cursor.start < adev->gmc.visible_vram_size)
- return true;
-
- amdgpu_res_next(&cursor, cursor.size);
- }
-
- return false;
-}
-
-/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index a21045d01..032861647 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -466,7 +466,7 @@ static int psp_sw_init(void *handle)
}
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- amdgpu_sriov_vf(adev) ?
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
@@ -1433,8 +1433,8 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
get_extended_data) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
IP_VERSION(13, 0, 6);
- bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps &
- EXTEND_PEER_LINK_INFO_CMD_FLAG;
+ bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
+ psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
/* popluate the shared output buffer rather than the cmd input buffer
* with node_ids as the input for GET_PEER_LINKS command execution.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 4a3726bb6..31823a30d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -305,11 +305,13 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return -EINVAL;
data->head.block = block_id;
- /* only ue and ce errors are supported */
+ /* only ue, ce and poison errors are supported */
if (!memcmp("ue", err, 2))
data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
else if (!memcmp("ce", err, 2))
data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ else if (!memcmp("poison", err, 6))
+ data->head.type = AMDGPU_RAS_ERROR__POISON;
else
return -EINVAL;
@@ -431,9 +433,10 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
* The block is one of: umc, sdma, gfx, etc.
* see ras_block_string[] for details
*
- * The error type is one of: ue, ce, where,
+ * The error type is one of: ue, ce and poison where,
* ue is multi-uncorrectable
* ce is single-correctable
+ * poison is poison
*
* The sub-block is a the sub-block index, pass 0 if there is no sub-block.
* The address and value are hexadecimal numbers, leading 0x is optional.
@@ -1067,8 +1070,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
mcm_info = &err_info->mcm_info;
if (err_info->ce_count) {
dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new correctable hardware errors detected in %s block, "
- "no user action is needed\n",
+ "%lld new correctable hardware errors detected in %s block\n",
mcm_info->socket_id,
mcm_info->die_id,
err_info->ce_count,
@@ -1080,8 +1082,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld correctable hardware errors detected in total in %s block, "
- "no user action is needed\n",
+ "%lld correctable hardware errors detected in total in %s block\n",
mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
}
}
@@ -1108,16 +1109,14 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "
"%ld correctable hardware errors "
- "detected in %s block, no user "
- "action is needed.\n",
+ "detected in %s block\n",
adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev),
ras_mgr->err_data.ce_count,
blk_name);
} else {
dev_info(adev->dev, "%ld correctable hardware errors "
- "detected in %s block, no user "
- "action is needed.\n",
+ "detected in %s block\n",
ras_mgr->err_data.ce_count,
blk_name);
}
@@ -1156,8 +1155,10 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
- amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count);
- amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count);
+ amdgpu_ras_error_statistic_ce_count(&obj->err_data,
+ &err_info->mcm_info, NULL, err_info->ce_count);
+ amdgpu_ras_error_statistic_ue_count(&obj->err_data,
+ &err_info->mcm_info, NULL, err_info->ue_count);
}
} else {
/* for legacy asic path which doesn't has error source info */
@@ -1918,7 +1919,7 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj
struct amdgpu_iv_entry *entry)
{
dev_info(obj->adev->dev,
- "Poison is created, no user action is needed.\n");
+ "Poison is created\n");
}
static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
@@ -2541,7 +2542,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
return 0;
data = &con->eh_data;
- *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
+ *data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!*data) {
ret = -ENOMEM;
goto out;
@@ -2828,10 +2829,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (con)
return 0;
- con = kmalloc(sizeof(struct amdgpu_ras) +
+ con = kzalloc(sizeof(*con) +
sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
- GFP_KERNEL|__GFP_ZERO);
+ GFP_KERNEL);
if (!con)
return -ENOMEM;
@@ -2918,6 +2919,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_query_poison_mode(adev);
+ /* Packed socket_id to ras feature mask bits[31:29] */
+ if (adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id)
+ con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
+
/* Get RAS schema for particular SOC */
con->schema = amdgpu_get_ras_schema(adev);
@@ -3136,6 +3142,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ amdgpu_ras_set_mca_debug_mode(adev, false);
+
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
if (!node->ras_obj) {
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
@@ -3409,12 +3417,18 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
+int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret = 0;
- if (con)
- con->is_mca_debug_mode = enable;
+ if (con) {
+ ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
+ if (!ret)
+ con->is_mca_debug_mode = enable;
+ }
+
+ return ret;
}
bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
@@ -3685,7 +3699,8 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct
}
static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr)
{
struct ras_err_node *err_node;
@@ -3699,6 +3714,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
+ if (err_addr)
+ memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
+
err_data->err_list_count++;
list_add_tail(&err_node->node, &err_data->err_node_list);
list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
@@ -3707,7 +3725,8 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
}
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count)
{
struct ras_err_info *err_info;
@@ -3717,7 +3736,7 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
if (!count)
return 0;
- err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
if (!err_info)
return -EINVAL;
@@ -3728,7 +3747,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
}
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count)
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count)
{
struct ras_err_info *err_info;
@@ -3738,7 +3758,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
if (!count)
return 0;
- err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
if (!err_info)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 19161916a..76fb85628 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -452,10 +452,17 @@ struct ras_fs_data {
char debugfs_name[32];
};
+struct ras_err_addr {
+ uint64_t err_status;
+ uint64_t err_ipid;
+ uint64_t err_addr;
+};
+
struct ras_err_info {
struct amdgpu_smuio_mcm_config_info mcm_info;
u64 ce_count;
u64 ue_count;
+ struct ras_err_addr err_addr;
};
struct ras_err_node {
@@ -773,7 +780,7 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
-void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
+int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev);
bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
unsigned int *mode);
@@ -806,8 +813,10 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count);
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
- struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count);
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ struct ras_err_addr *err_addr, u64 count);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 4e526d773..06f0a6534 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -647,6 +647,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
ring->name);
ring->sched.ready = !r;
+
return r;
}
@@ -729,3 +730,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
}
+
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
+{
+ if (!ring)
+ return false;
+
+ if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bbb53720a..fe1a61eb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 35e0ae9ac..2c3675d91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -531,13 +531,12 @@ int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
if (version_major == 2 && version_minor == 1)
adev->gfx.rlc.is_rlc_v2_1 = true;
- if (version_minor >= 0) {
- err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
- if (err) {
- dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
- return err;
- }
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
}
+
if (version_minor >= 1)
amdgpu_gfx_rlc_init_microcode_v2_1(adev);
if (version_minor >= 2)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
new file mode 100644
index 000000000..7a6a67275
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_seq64.h"
+
+#include <drm/drm_exec.h>
+
+/**
+ * DOC: amdgpu_seq64
+ *
+ * amdgpu_seq64 allocates a 64bit memory on each request in sequence order.
+ * seq64 driver is required for user queue fence memory allocation, TLB
+ * counters and VM updates. It has maximum count of 32768 64 bit slots.
+ */
+
+/**
+ * amdgpu_seq64_map - Map the seq64 memory to VM
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: vm pointer
+ * @bo_va: bo_va pointer
+ * @seq64_addr: seq64 vaddr start address
+ * @size: seq64 pool size
+ *
+ * Map the seq64 memory to the given VM.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure
+ */
+int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va, u64 seq64_addr,
+ uint32_t size)
+{
+ struct amdgpu_bo *bo;
+ struct drm_exec exec;
+ int r;
+
+ bo = adev->seq64.sbo;
+ if (!bo)
+ return -EINVAL;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+ r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error;
+ }
+
+ *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+ if (!*bo_va) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
+ if (r) {
+ DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
+ amdgpu_vm_bo_del(adev, *bo_va);
+ goto error;
+ }
+
+ r = amdgpu_vm_bo_update(adev, *bo_va, false);
+ if (r) {
+ DRM_ERROR("failed to do vm_bo_update on userq sem\n");
+ amdgpu_vm_bo_del(adev, *bo_va);
+ goto error;
+ }
+
+error:
+ drm_exec_fini(&exec);
+ return r;
+}
+
+/**
+ * amdgpu_seq64_unmap - Unmap the seq64 memory
+ *
+ * @adev: amdgpu_device pointer
+ * @fpriv: DRM file private
+ *
+ * Unmap the seq64 memory from the given VM.
+ */
+void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
+{
+ struct amdgpu_vm *vm;
+ struct amdgpu_bo *bo;
+ struct drm_exec exec;
+ int r;
+
+ if (!fpriv->seq64_va)
+ return;
+
+ bo = adev->seq64.sbo;
+ if (!bo)
+ return;
+
+ vm = &fpriv->vm;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+ r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error;
+ }
+
+ amdgpu_vm_bo_del(adev, fpriv->seq64_va);
+
+ fpriv->seq64_va = NULL;
+
+error:
+ drm_exec_fini(&exec);
+}
+
+/**
+ * amdgpu_seq64_alloc - Allocate a 64 bit memory
+ *
+ * @adev: amdgpu_device pointer
+ * @gpu_addr: allocated gpu VA start address
+ * @cpu_addr: allocated cpu VA start address
+ *
+ * Alloc a 64 bit memory from seq64 pool.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure
+ */
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
+ u64 **cpu_addr)
+{
+ unsigned long bit_pos;
+ u32 offset;
+
+ bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
+
+ if (bit_pos < adev->seq64.num_sem) {
+ __set_bit(bit_pos, adev->seq64.used);
+ offset = bit_pos << 6; /* convert to qw offset */
+ } else {
+ return -EINVAL;
+ }
+
+ *gpu_addr = offset + AMDGPU_SEQ64_VADDR_START;
+ *cpu_addr = offset + adev->seq64.cpu_base_addr;
+
+ return 0;
+}
+
+/**
+ * amdgpu_seq64_free - Free the given 64 bit memory
+ *
+ * @adev: amdgpu_device pointer
+ * @gpu_addr: gpu start address to be freed
+ *
+ * Free the given 64 bit memory from seq64 pool.
+ *
+ */
+void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr)
+{
+ u32 offset;
+
+ offset = gpu_addr - AMDGPU_SEQ64_VADDR_START;
+
+ offset >>= 6;
+ if (offset < adev->seq64.num_sem)
+ __clear_bit(offset, adev->seq64.used);
+}
+
+/**
+ * amdgpu_seq64_fini - Cleanup seq64 driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free the memory space allocated for seq64.
+ *
+ */
+void amdgpu_seq64_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->seq64.sbo,
+ NULL,
+ (void **)&adev->seq64.cpu_base_addr);
+}
+
+/**
+ * amdgpu_seq64_init - Initialize seq64 driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the required memory space for seq64.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure
+ */
+int amdgpu_seq64_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->seq64.sbo)
+ return 0;
+
+ /*
+ * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
+ * 64bit slots
+ */
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->seq64.sbo, NULL,
+ (void **)&adev->seq64.cpu_base_addr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
+ return r;
+ }
+
+ memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE);
+
+ adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
+ memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
new file mode 100644
index 000000000..2196e72be
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_SEQ64_H__
+#define __AMDGPU_SEQ64_H__
+
+#define AMDGPU_SEQ64_SIZE (2ULL << 20)
+#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8))
+#define AMDGPU_SEQ64_VADDR_OFFSET 0x50000
+#define AMDGPU_SEQ64_VADDR_START (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET)
+
+struct amdgpu_seq64 {
+ struct amdgpu_bo *sbo;
+ u32 num_sem;
+ u64 *cpu_base_addr;
+ DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
+};
+
+void amdgpu_seq64_fini(struct amdgpu_device *adev);
+int amdgpu_seq64_init(struct amdgpu_device *adev);
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
+void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
+int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size);
+void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 2fd1bfb35..f539b1d00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -554,6 +554,21 @@ TRACE_EVENT(amdgpu_reset_reg_dumps,
__entry->value)
);
+TRACE_EVENT(amdgpu_runpm_reference_dumps,
+ TP_PROTO(uint32_t index, const char *func),
+ TP_ARGS(index, func),
+ TP_STRUCT__entry(
+ __field(uint32_t, index)
+ __string(func, func)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __assign_str(func, func);
+ ),
+ TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n",
+ __entry->index,
+ __get_str(func))
+);
#undef AMDGPU_JOB_GET_TIMELINE_NAME
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b0ed10f4d..851509c6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -137,7 +137,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- amdgpu_bo_in_cpu_visible_vram(abo)) {
+ amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@@ -408,40 +408,55 @@ error:
return r;
}
-/*
- * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+/**
+ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+ * @adev: amdgpu device
+ * @res: the resource to check
*
- * Called by amdgpu_bo_move()
+ * Returns: true if the full resource is CPU visible, false otherwise.
*/
-static bool amdgpu_mem_visible(struct amdgpu_device *adev,
- struct ttm_resource *mem)
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res)
{
- u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
- u64 end;
- if (mem->mem_type == TTM_PL_SYSTEM ||
- mem->mem_type == TTM_PL_TT)
+ if (!res)
+ return false;
+
+ if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+ res->mem_type == AMDGPU_PL_PREEMPT)
return true;
- if (mem->mem_type != TTM_PL_VRAM)
+
+ if (res->mem_type != TTM_PL_VRAM)
return false;
- amdgpu_res_first(mem, 0, mem_size, &cursor);
- end = cursor.start + cursor.size;
+ amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
+ if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
+ return false;
amdgpu_res_next(&cursor, cursor.size);
+ }
- if (!cursor.remaining)
- break;
+ return true;
+}
- /* ttm_resource_ioremap only supports contiguous memory */
- if (end != cursor.start)
- return false;
+/*
+ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+ struct ttm_resource *mem)
+{
+ if (!amdgpu_res_cpu_visible(adev, mem))
+ return false;
- end = cursor.start + cursor.size;
- }
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (mem->mem_type == TTM_PL_VRAM &&
+ !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+ return false;
- return end <= adev->gmc.visible_vram_size;
+ return true;
}
/*
@@ -534,8 +549,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
/* Check that all memory is CPU accessible */
- if (!amdgpu_mem_visible(adev, old_mem) ||
- !amdgpu_mem_visible(adev, new_mem)) {
+ if (!amdgpu_res_copyable(adev, old_mem) ||
+ !amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}
@@ -562,7 +577,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@@ -573,9 +587,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- /* check if it's visible */
- if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
- return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 65ec82141..32cf6b6f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res);
+
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 0efb2568c..3e12763e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1062,7 +1062,8 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
{
if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.fw_buf,
&adev->firmware.fw_buf_mc,
&adev->firmware.fw_buf_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index ca45ba8ac..1b5ef3210 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -86,7 +86,7 @@ static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_sync_create(&sync);
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
@@ -149,7 +149,7 @@ static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
long r;
- drm_exec_init(&exec, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
@@ -766,6 +766,9 @@ static int umsch_mm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
+ return 0;
+
return umsch_mm_test(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 3a632c3b1..0dcff2889 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1099,7 +1099,8 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
{
bool xnack_mode = true;
- if (amdgpu_sriov_vf(adev) && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+ if (amdgpu_sriov_vf(adev) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
xnack_mode = false;
return xnack_mode;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index db6fc0cb1..453a4b786 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5baefb548..5b50f7359 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1439,6 +1439,51 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @flush_type: flush type
+ * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
+ *
+ * Flush TLB if needed for a compute VM.
+ *
+ * Returns:
+ * 0 for success.
+ */
+int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint32_t flush_type,
+ uint32_t xcc_mask)
+{
+ uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
+ bool all_hub = false;
+ int xcc = 0, r = 0;
+
+ WARN_ON_ONCE(!vm->is_compute_context);
+
+ /*
+ * It can be that we race and lose here, but that is extremely unlikely
+ * and the worst thing which could happen is that we flush the changes
+ * into the TLB once more which is harmless.
+ */
+ if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
+ return 0;
+
+ if (adev->family == AMDGPU_FAMILY_AI ||
+ adev->family == AMDGPU_FAMILY_RV)
+ all_hub = true;
+
+ for_each_inst(xcc, xcc_mask) {
+ r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
+ all_hub, xcc);
+ if (r)
+ break;
+ }
+ return r;
+}
+
+/**
* amdgpu_vm_bo_add - add a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -1514,6 +1559,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ uint64_t saddr,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t tmp, lpfn;
+
+ if (saddr & AMDGPU_GPU_PAGE_MASK
+ || offset & AMDGPU_GPU_PAGE_MASK
+ || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ if (check_add_overflow(saddr, size, &tmp)
+ || check_add_overflow(offset, size, &tmp)
+ || size == 0 /* which also leads to end < begin */)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ if (bo && offset + size > amdgpu_bo_size(bo))
+ return -EINVAL;
+
+ /* Ensure last pfn not exceed max_pfn */
+ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+ if (lpfn >= adev->vm_manager.max_pfn)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -1540,21 +1616,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
+ int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@@ -1607,17 +1676,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1631,7 +1692,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@@ -1718,10 +1779,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
+ int r;
+
+ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+ if (r)
+ return r;
- eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2cd86d2bf..4740dd65b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -116,7 +116,7 @@ struct amdgpu_mem_stats;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-/* Reserve 4MB VRAM for page tables */
+/* How much VRAM be reserved for page tables */
#define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
/*
@@ -324,6 +324,7 @@ struct amdgpu_vm {
/* Last finished delayed update */
atomic64_t tlb_seq;
struct dma_fence *last_tlb_flush;
+ atomic64_t kfd_last_flushed_seq;
/* How many times we had to re-generate the page tables */
uint64_t generation;
@@ -445,6 +446,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket);
+int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint32_t flush_type,
+ uint32_t xcc_mask);
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index e81579708..ad44012cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_vpe.h"
+#include "amdgpu_smu.h"
#include "soc15_common.h"
#include "vpe_v6_1.h"
@@ -33,8 +34,180 @@
/* VPE CSA resides in the 4th page of CSA */
#define AMDGPU_CSA_VPE_OFFSET (4096 * 3)
+/* 1 second timeout */
+#define VPE_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define VPE_MAX_DPM_LEVEL 4
+#define FIXED1_8_BITS_PER_FRACTIONAL_PART 8
+#define GET_PRATIO_INTEGER_PART(x) ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART)
+
static void vpe_set_ring_funcs(struct amdgpu_device *adev);
+static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+static inline uint16_t complete_integer_division_u16(
+ uint16_t dividend,
+ uint16_t divisor,
+ uint16_t *remainder)
+{
+ return div16_u16_rem(dividend, divisor, (uint16_t *)remainder);
+}
+
+static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
+{
+ u16 arg1_value = numerator;
+ u16 arg2_value = denominator;
+
+ uint16_t remainder;
+
+ /* determine integer part */
+ uint16_t res_value = complete_integer_division_u16(
+ arg1_value, arg2_value, &remainder);
+
+ if (res_value > 127 /* CHAR_MAX */)
+ return 0;
+
+ /* determine fractional part */
+ {
+ unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint16_t summand = (remainder << 1) >= arg2_value;
+
+ if ((res_value + summand) > 32767 /* SHRT_MAX */)
+ return 0;
+
+ res_value += summand;
+ }
+
+ return res_value;
+}
+
+static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency)
+{
+ uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency);
+
+ if (GET_PRATIO_INTEGER_PART(pratio) > 1)
+ pratio = 0;
+
+ return pratio;
+}
+
+/*
+ * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
+ * VPE FW will dynamically decide which level should be used according to current loading.
+ *
+ * Get VPE and SOC clocks from PM, and select the appropriate four clock values,
+ * calculate the ratios of adjusting from one clock to another.
+ * The VPE FW can then request the appropriate frequency from the PMFW.
+ */
+int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe)
+{
+ struct amdgpu_device *adev = vpe->ring.adev;
+ uint32_t dpm_ctl;
+
+ if (adev->pm.dpm_enabled) {
+ struct dpm_clocks clock_table = { 0 };
+ struct dpm_clock *VPEClks;
+ struct dpm_clock *SOCClks;
+ uint32_t idx;
+ uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0;
+ uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0;
+
+ dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
+ dpm_ctl |= 1; /* DPM enablement */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
+
+ /* Get VPECLK and SOCCLK */
+ if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) {
+ dev_dbg(adev->dev, "%s: get clock failed!\n", __func__);
+ goto disable_dpm;
+ }
+
+ SOCClks = clock_table.SocClocks;
+ VPEClks = clock_table.VPEClocks;
+
+ /* vpe dpm only cares 4 levels. */
+ for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) {
+ uint32_t soc_dpm_level;
+ uint32_t min_freq;
+
+ if (idx == 0)
+ soc_dpm_level = 0;
+ else
+ soc_dpm_level = (idx * 2) + 1;
+
+ /* clamp the max level */
+ if (soc_dpm_level > PP_SMU_NUM_VPECLK_DPM_LEVELS - 1)
+ soc_dpm_level = PP_SMU_NUM_VPECLK_DPM_LEVELS - 1;
+
+ min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ?
+ SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq;
+
+ switch (idx) {
+ case 0:
+ pratio_vmin_freq = min_freq;
+ break;
+ case 1:
+ pratio_vmid_freq = min_freq;
+ break;
+ case 2:
+ pratio_vnorm_freq = min_freq;
+ break;
+ case 3:
+ pratio_vmax_freq = min_freq;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) {
+ uint32_t pratio_ctl;
+
+ pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq);
+ pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq);
+ pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq);
+
+ pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18);
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */
+ dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__);
+ } else {
+ dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__);
+ goto disable_dpm;
+ }
+ }
+ return 0;
+
+disable_dpm:
+ dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
+ dpm_ctl &= 0xfffffffe; /* Disable DPM */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
+ dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
+ return 0;
+}
+
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
{
struct amdgpu_firmware_info ucode = {
@@ -134,6 +307,19 @@ static int vpe_early_init(void *handle)
return 0;
}
+static void vpe_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, vpe.idle_work.work);
+ unsigned int fences = 0;
+
+ fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+
+ if (fences == 0)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ else
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+}
static int vpe_common_init(struct amdgpu_vpe *vpe)
{
@@ -150,6 +336,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe)
return r;
}
+ vpe->context_started = false;
+ INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler);
+
return 0;
}
@@ -201,6 +390,12 @@ static int vpe_hw_init(void *handle)
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
+ /* Power on VPE */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
ret = vpe_load_microcode(vpe);
if (ret)
return ret;
@@ -219,6 +414,9 @@ static int vpe_hw_fini(void *handle)
vpe_ring_stop(vpe);
+ /* Power off VPE */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+
return 0;
}
@@ -226,6 +424,8 @@ static int vpe_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
return vpe_hw_fini(adev);
}
@@ -430,6 +630,21 @@ static int vpe_set_clockgating_state(void *handle,
static int vpe_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ dev_err(adev->dev, "Without PM, cannot support powergating\n");
+
+ dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE");
+
+ if (state == AMD_PG_STATE_GATE) {
+ amdgpu_dpm_enable_vpe(adev, false);
+ vpe->context_started = false;
+ } else {
+ amdgpu_dpm_enable_vpe(adev, true);
+ }
+
return 0;
}
@@ -595,6 +810,38 @@ err0:
return ret;
}
+static void vpe_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
+ /* Power on VPE and notify VPE of new context */
+ if (!vpe->context_started) {
+ uint32_t context_notify;
+
+ /* Power on VPE */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE);
+
+ /* Indicates that a job from a new context has been submitted. */
+ context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator));
+ if ((context_notify & 0x1) == 0)
+ context_notify |= 0x1;
+ else
+ context_notify &= ~(0x1);
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify);
+ vpe->context_started = true;
+ }
+}
+
+static void vpe_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+}
+
static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.type = AMDGPU_RING_TYPE_VPE,
.align_mask = 0xf,
@@ -625,6 +872,8 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.init_cond_exec = vpe_ring_init_cond_exec,
.patch_cond_exec = vpe_ring_patch_cond_exec,
.preempt_ib = vpe_ring_preempt_ib,
+ .begin_use = vpe_ring_begin_use,
+ .end_use = vpe_ring_end_use,
};
static void vpe_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
index 29d56f7ae..1153ddaea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
@@ -47,6 +47,15 @@ struct vpe_regs {
uint32_t queue0_rb_wptr_lo;
uint32_t queue0_rb_wptr_hi;
uint32_t queue0_preempt;
+
+ uint32_t dpm_enable;
+ uint32_t dpm_pratio;
+ uint32_t dpm_request_interval;
+ uint32_t dpm_decision_threshold;
+ uint32_t dpm_busy_clamp_threshold;
+ uint32_t dpm_idle_clamp_threshold;
+ uint32_t dpm_request_lv;
+ uint32_t context_indicator;
};
struct amdgpu_vpe {
@@ -63,12 +72,15 @@ struct amdgpu_vpe {
struct amdgpu_bo *cmdbuf_obj;
uint64_t cmdbuf_gpu_addr;
uint32_t *cmdbuf_cpu_addr;
+ struct delayed_work idle_work;
+ bool context_started;
};
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev);
int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe);
int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe);
int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe);
+int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe);
#define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0)
#define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index bd20cb3b9..a6c88f2fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -413,6 +413,38 @@ static ssize_t amdgpu_xgmi_show_num_links(struct device *dev,
return sysfs_emit(buf, "%s\n", buf);
}
+static ssize_t amdgpu_xgmi_show_connected_port_num(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i, j, size = 0;
+ int current_node;
+ /*
+ * get the node id in the sysfs for the current socket and show
+ * it in the port num info output in the sysfs for easy reading.
+ * it is NOT the one retrieved from xgmi ta.
+ */
+ for (i = 0; i < top->num_nodes; i++) {
+ if (top->nodes[i].node_id == adev->gmc.xgmi.node_id) {
+ current_node = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < top->num_nodes; i++) {
+ for (j = 0; j < top->nodes[i].num_links; j++)
+ /* node id in sysfs starts from 1 rather than 0 so +1 here */
+ size += sysfs_emit_at(buf, size, "%02x:%02x -> %02x:%02x\n", current_node + 1,
+ top->nodes[i].port_num[j].src_xgmi_port_num, i + 1,
+ top->nodes[i].port_num[j].dst_xgmi_port_num);
+ }
+
+ return size;
+}
+
#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
static ssize_t amdgpu_xgmi_show_error(struct device *dev,
struct device_attribute *attr,
@@ -452,6 +484,7 @@ static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL);
static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL);
static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL);
+static DEVICE_ATTR(xgmi_port_num, S_IRUGO, amdgpu_xgmi_show_connected_port_num, NULL);
static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
@@ -487,6 +520,13 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
if (ret)
pr_err("failed to create xgmi_num_links\n");
+ /* Create xgmi port num file if supported */
+ if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) {
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_port_num);
+ if (ret)
+ dev_err(adev->dev, "failed to create xgmi_port_num\n");
+ }
+
/* Create sysfs link to hive info folder on the first device */
if (hive->kobj.parent != (&adev->dev->kobj)) {
ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
@@ -517,6 +557,8 @@ remove_file:
device_remove_file(adev->dev, &dev_attr_xgmi_error);
device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
+ if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG)
+ device_remove_file(adev->dev, &dev_attr_xgmi_port_num);
success:
return ret;
@@ -533,6 +575,8 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
device_remove_file(adev->dev, &dev_attr_xgmi_error);
device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
+ if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG)
+ device_remove_file(adev->dev, &dev_attr_xgmi_port_num);
if (hive->kobj.parent != (&adev->dev->kobj))
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
@@ -779,6 +823,28 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf
return 0;
}
+static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
+{
+ struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info;
+ struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info;
+
+ for (int i = 0; i < peer_info->num_nodes; i++) {
+ if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) {
+ for (int j = 0; j < top_info->num_nodes; j++) {
+ if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) {
+ peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops;
+ peer_info->nodes[i].is_sharing_enabled =
+ top_info->nodes[j].is_sharing_enabled;
+ peer_info->nodes[i].num_links =
+ top_info->nodes[j].num_links;
+ return;
+ }
+ }
+ }
+ }
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
struct psp_xgmi_topology_info *top_info;
@@ -853,18 +919,38 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit_unlock;
}
- /* get latest topology info for each device from psp */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
- &tmp_adev->psp.xgmi_context.top_info, false);
+ if (amdgpu_sriov_vf(adev) &&
+ adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) {
+ /* only get topology for VF being init if it can support full duplex */
+ ret = psp_xgmi_get_topology_info(&adev->psp, count,
+ &adev->psp.xgmi_context.top_info, false);
if (ret) {
- dev_err(tmp_adev->dev,
+ dev_err(adev->dev,
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
- tmp_adev->gmc.xgmi.node_id,
- tmp_adev->gmc.xgmi.hive_id, ret);
- /* To do : continue with some node failed or disable the whole hive */
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+ /* To do: continue with some node failed or disable the whole hive*/
goto exit_unlock;
}
+
+ /* fill the topology info for peers instead of getting from PSP */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ amdgpu_xgmi_fill_topology_info(adev, tmp_adev);
+ }
+ } else {
+ /* get latest topology info for each device from psp */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+ &tmp_adev->psp.xgmi_context.top_info, false);
+ if (ret) {
+ dev_err(tmp_adev->dev,
+ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
+ tmp_adev->gmc.xgmi.hive_id, ret);
+ /* To do : continue with some node failed or disable the whole hive */
+ goto exit_unlock;
+ }
+ }
}
/* get topology again for hives that support extended data */
@@ -1227,10 +1313,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
case AMDGPU_MCA_ERROR_TYPE_UE:
- amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
+ amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
break;
case AMDGPU_MCA_ERROR_TYPE_CE:
- amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
+ amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 3f715e7fe..d6f808acf 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -24,6 +24,7 @@
#include "soc15.h"
#include "soc15_common.h"
+#include "amdgpu_reg_state.h"
#include "amdgpu_xcp.h"
#include "gfx_v9_4_3.h"
#include "gfxhub_v1_2.h"
@@ -656,3 +657,416 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
return 0;
}
+
+static void aqua_read_smn(struct amdgpu_device *adev,
+ struct amdgpu_smn_reg_data *regdata,
+ uint64_t smn_addr)
+{
+ regdata->addr = smn_addr;
+ regdata->value = RREG32_PCIE(smn_addr);
+}
+
+struct aqua_reg_list {
+ uint64_t start_addr;
+ uint32_t num_regs;
+ uint32_t incrx;
+};
+
+#define DW_ADDR_INCR 4
+
+static void aqua_read_smn_ext(struct amdgpu_device *adev,
+ struct amdgpu_smn_reg_data *regdata,
+ uint64_t smn_addr, int i)
+{
+ regdata->addr =
+ smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
+ regdata->value = RREG32_PCIE_EXT(regdata->addr);
+}
+
+#define smnreg_0x1A340218 0x1A340218
+#define smnreg_0x1A3402E4 0x1A3402E4
+#define smnreg_0x1A340294 0x1A340294
+#define smreg_0x1A380088 0x1A380088
+
+#define NUM_PCIE_SMN_REGS 14
+
+static struct aqua_reg_list pcie_reg_addrs[] = {
+ { smnreg_0x1A340218, 1, 0 },
+ { smnreg_0x1A3402E4, 1, 0 },
+ { smnreg_0x1A340294, 6, DW_ADDR_INCR },
+ { smreg_0x1A380088, 6, DW_ADDR_INCR },
+};
+
+static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size)
+{
+ struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
+ uint32_t start_addr, incrx, num_regs, szbuf;
+ struct amdgpu_regs_pcie_v1_0 *pcie_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ struct pci_dev *us_pdev, *ds_pdev;
+ int aer_cap, r, n;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
+
+ szbuf = sizeof(*pcie_reg_state) +
+ amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
+ /* Only one instance of pcie regs */
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
+ sizeof(*pcie_reg_state));
+ pcie_regs->inst_header.instance = 0;
+ pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
+ pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
+
+ reg_data = pcie_regs->smn_reg_values;
+
+ for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
+ start_addr = pcie_reg_addrs[r].start_addr;
+ incrx = pcie_reg_addrs[r].incrx;
+ num_regs = pcie_reg_addrs[r].num_regs;
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn(adev, reg_data, start_addr + n * incrx);
+ ++reg_data;
+ }
+ }
+
+ ds_pdev = pci_upstream_bridge(adev->pdev);
+ us_pdev = pci_upstream_bridge(ds_pdev);
+
+ pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
+ &pcie_regs->device_status);
+ pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
+ &pcie_regs->link_status);
+
+ aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
+ if (aer_cap) {
+ pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
+ &pcie_regs->pcie_corr_err_status);
+ pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
+ &pcie_regs->pcie_uncorr_err_status);
+ }
+
+ pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
+ &pcie_regs->sub_bus_number_latency);
+
+ pcie_reg_state->common_header.structure_size = szbuf;
+ pcie_reg_state->common_header.format_revision = 1;
+ pcie_reg_state->common_header.content_revision = 0;
+ pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
+ pcie_reg_state->common_header.num_instances = 1;
+
+ return pcie_reg_state->common_header.structure_size;
+}
+
+#define smnreg_0x11A00050 0x11A00050
+#define smnreg_0x11A00180 0x11A00180
+#define smnreg_0x11A00070 0x11A00070
+#define smnreg_0x11A00200 0x11A00200
+#define smnreg_0x11A0020C 0x11A0020C
+#define smnreg_0x11A00210 0x11A00210
+#define smnreg_0x11A00108 0x11A00108
+
+#define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
+
+#define NUM_XGMI_SMN_REGS 25
+
+static struct aqua_reg_list xgmi_reg_addrs[] = {
+ { smnreg_0x11A00050, 1, 0 },
+ { smnreg_0x11A00180, 16, DW_ADDR_INCR },
+ { smnreg_0x11A00070, 4, DW_ADDR_INCR },
+ { smnreg_0x11A00200, 1, 0 },
+ { smnreg_0x11A0020C, 1, 0 },
+ { smnreg_0x11A00210, 1, 0 },
+ { smnreg_0x11A00108, 1, 0 },
+};
+
+static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size)
+{
+ struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
+ uint32_t start_addr, incrx, num_regs, szbuf;
+ struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ const int max_xgmi_instances = 8;
+ int inst = 0, i, j, r, n;
+ const int xgmi_inst = 2;
+ void *p;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
+
+ szbuf = sizeof(*xgmi_reg_state) +
+ amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
+ NUM_XGMI_SMN_REGS);
+ /* Only one instance of pcie regs */
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ p = &xgmi_reg_state->xgmi_state_regs[0];
+ for_each_inst(i, adev->aid_mask) {
+ for (j = 0; j < xgmi_inst; ++j) {
+ xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
+ xgmi_regs->inst_header.instance = inst++;
+
+ xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
+ xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
+
+ reg_data = xgmi_regs->smn_reg_values;
+
+ for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
+ start_addr = xgmi_reg_addrs[r].start_addr;
+ incrx = xgmi_reg_addrs[r].incrx;
+ num_regs = xgmi_reg_addrs[r].num_regs;
+
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn_ext(
+ adev, reg_data,
+ XGMI_LINK_REG(start_addr, j) +
+ n * incrx,
+ i);
+ ++reg_data;
+ }
+ }
+ p = reg_data;
+ }
+ }
+
+ xgmi_reg_state->common_header.structure_size = szbuf;
+ xgmi_reg_state->common_header.format_revision = 1;
+ xgmi_reg_state->common_header.content_revision = 0;
+ xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
+ xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
+
+ return xgmi_reg_state->common_header.structure_size;
+}
+
+#define smnreg_0x11C00070 0x11C00070
+#define smnreg_0x11C00210 0x11C00210
+
+static struct aqua_reg_list wafl_reg_addrs[] = {
+ { smnreg_0x11C00070, 4, DW_ADDR_INCR },
+ { smnreg_0x11C00210, 1, 0 },
+};
+
+#define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
+
+#define NUM_WAFL_SMN_REGS 5
+
+static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size)
+{
+ struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
+ uint32_t start_addr, incrx, num_regs, szbuf;
+ struct amdgpu_regs_wafl_v1_0 *wafl_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ const int max_wafl_instances = 8;
+ int inst = 0, i, j, r, n;
+ const int wafl_inst = 2;
+ void *p;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
+
+ szbuf = sizeof(*wafl_reg_state) +
+ amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
+ NUM_WAFL_SMN_REGS);
+
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ p = &wafl_reg_state->wafl_state_regs[0];
+ for_each_inst(i, adev->aid_mask) {
+ for (j = 0; j < wafl_inst; ++j) {
+ wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
+ wafl_regs->inst_header.instance = inst++;
+
+ wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
+ wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
+
+ reg_data = wafl_regs->smn_reg_values;
+
+ for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
+ start_addr = wafl_reg_addrs[r].start_addr;
+ incrx = wafl_reg_addrs[r].incrx;
+ num_regs = wafl_reg_addrs[r].num_regs;
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn_ext(
+ adev, reg_data,
+ WAFL_LINK_REG(start_addr, j) +
+ n * incrx,
+ i);
+ ++reg_data;
+ }
+ }
+ p = reg_data;
+ }
+ }
+
+ wafl_reg_state->common_header.structure_size = szbuf;
+ wafl_reg_state->common_header.format_revision = 1;
+ wafl_reg_state->common_header.content_revision = 0;
+ wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
+ wafl_reg_state->common_header.num_instances = max_wafl_instances;
+
+ return wafl_reg_state->common_header.structure_size;
+}
+
+#define smnreg_0x1B311060 0x1B311060
+#define smnreg_0x1B411060 0x1B411060
+#define smnreg_0x1B511060 0x1B511060
+#define smnreg_0x1B611060 0x1B611060
+
+#define smnreg_0x1C307120 0x1C307120
+#define smnreg_0x1C317120 0x1C317120
+
+#define smnreg_0x1C320830 0x1C320830
+#define smnreg_0x1C380830 0x1C380830
+#define smnreg_0x1C3D0830 0x1C3D0830
+#define smnreg_0x1C420830 0x1C420830
+
+#define smnreg_0x1C320100 0x1C320100
+#define smnreg_0x1C380100 0x1C380100
+#define smnreg_0x1C3D0100 0x1C3D0100
+#define smnreg_0x1C420100 0x1C420100
+
+#define smnreg_0x1B310500 0x1B310500
+#define smnreg_0x1C300400 0x1C300400
+
+#define USR_CAKE_INCR 0x11000
+#define USR_LINK_INCR 0x100000
+#define USR_CP_INCR 0x10000
+
+#define NUM_USR_SMN_REGS 20
+
+struct aqua_reg_list usr_reg_addrs[] = {
+ { smnreg_0x1B311060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B411060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B511060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B611060, 4, DW_ADDR_INCR },
+ { smnreg_0x1C307120, 2, DW_ADDR_INCR },
+ { smnreg_0x1C317120, 2, DW_ADDR_INCR },
+};
+
+#define NUM_USR1_SMN_REGS 46
+struct aqua_reg_list usr1_reg_addrs[] = {
+ { smnreg_0x1C320830, 6, USR_CAKE_INCR },
+ { smnreg_0x1C380830, 5, USR_CAKE_INCR },
+ { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
+ { smnreg_0x1C420830, 4, USR_CAKE_INCR },
+ { smnreg_0x1C320100, 6, USR_CAKE_INCR },
+ { smnreg_0x1C380100, 5, USR_CAKE_INCR },
+ { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
+ { smnreg_0x1C420100, 4, USR_CAKE_INCR },
+ { smnreg_0x1B310500, 4, USR_LINK_INCR },
+ { smnreg_0x1C300400, 2, USR_CP_INCR },
+};
+
+static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size,
+ int reg_state)
+{
+ uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
+ struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
+ struct amdgpu_regs_usr_v1_0 *usr_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ const int max_usr_instances = 4;
+ struct aqua_reg_list *reg_addrs;
+ int inst = 0, i, n, r, arr_size;
+ void *p;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ switch (reg_state) {
+ case AMDGPU_REG_STATE_TYPE_USR:
+ arr_size = ARRAY_SIZE(usr_reg_addrs);
+ reg_addrs = usr_reg_addrs;
+ num_smn = NUM_USR_SMN_REGS;
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR_1:
+ arr_size = ARRAY_SIZE(usr1_reg_addrs);
+ reg_addrs = usr1_reg_addrs;
+ num_smn = NUM_USR1_SMN_REGS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
+
+ szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
+ sizeof(*usr_regs),
+ num_smn);
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ p = &usr_reg_state->usr_state_regs[0];
+ for_each_inst(i, adev->aid_mask) {
+ usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
+ usr_regs->inst_header.instance = inst++;
+ usr_regs->inst_header.state = AMDGPU_INST_S_OK;
+ usr_regs->inst_header.num_smn_regs = num_smn;
+ reg_data = usr_regs->smn_reg_values;
+
+ for (r = 0; r < arr_size; r++) {
+ start_addr = reg_addrs[r].start_addr;
+ incrx = reg_addrs[r].incrx;
+ num_regs = reg_addrs[r].num_regs;
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn_ext(adev, reg_data,
+ start_addr + n * incrx, i);
+ reg_data++;
+ }
+ }
+ p = reg_data;
+ }
+
+ usr_reg_state->common_header.structure_size = szbuf;
+ usr_reg_state->common_header.format_revision = 1;
+ usr_reg_state->common_header.content_revision = 0;
+ usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
+ usr_reg_state->common_header.num_instances = max_usr_instances;
+
+ return usr_reg_state->common_header.structure_size;
+}
+
+ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
+ enum amdgpu_reg_state reg_state, void *buf,
+ size_t max_size)
+{
+ ssize_t size;
+
+ switch (reg_state) {
+ case AMDGPU_REG_STATE_TYPE_PCIE:
+ size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
+ break;
+ case AMDGPU_REG_STATE_TYPE_XGMI:
+ size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
+ break;
+ case AMDGPU_REG_STATE_TYPE_WAFL:
+ size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR:
+ size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
+ AMDGPU_REG_STATE_TYPE_USR);
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR_1:
+ size = aqua_vanjaram_read_usr_state(
+ adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return size;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index f0737fb3a..d1bba9c64 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -30,6 +30,8 @@
#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+#define regATHUB_MISC_CNTL_V3_3_0 0x00d8
+#define regATHUB_MISC_CNTL_V3_3_0_BASE_IDX 0
static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
@@ -40,6 +42,9 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
break;
+ case IP_VERSION(3, 3, 0):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0);
+ break;
default:
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
break;
@@ -53,6 +58,9 @@ static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
case IP_VERSION(3, 0, 1):
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
break;
+ case IP_VERSION(3, 3, 0):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0, data);
+ break;
default:
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 1b51be9b5..ac51d19b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -395,7 +395,6 @@ static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
(*ptr)++;
return;
}
- return;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 3ee219aa2..7672abe6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -28,6 +28,7 @@
#include <acpi/video.h>
+#include <drm/drm_edid.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bb666cb75..587ee632a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7af277f61..f22ec2736 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 143efc37a..4dbe9b325 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index adeddfb7f..05bcce233 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -21,6 +21,7 @@
*
*/
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 806a8cf90..0afe86bcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -67,6 +67,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
@@ -106,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
};
-static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
- SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f)
-};
-
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -293,6 +277,9 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
@@ -300,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
break;
- case IP_VERSION(11, 5, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_5_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0));
- break;
default:
break;
}
@@ -564,7 +546,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
if (!amdgpu_sriov_vf(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
+ adev->pdev->revision == 0xCE)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/gc_11_0_0_rlc_1.bin");
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
@@ -1644,7 +1630,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -4466,11 +4452,43 @@ static int gfx_v11_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
+static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ int req)
+{
+ u32 i, tmp, val;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* Request with MeId=2, PipeId=0 */
+ tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
+ WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
+
+ val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
+ if (req) {
+ if (val == tmp)
+ break;
+ } else {
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
+ REQUEST, 1);
+
+ /* unlocked or locked by firmware */
+ if (val != tmp)
+ break;
+ }
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ return -EINVAL;
+
+ return 0;
+}
+
static int gfx_v11_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0;
u32 tmp;
- int i, j, k;
+ int r, i, j, k;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
@@ -4510,6 +4528,13 @@ static int gfx_v11_0_soft_reset(void *handle)
}
}
+ /* Try to acquire the gfx mutex before access to CP_VMID_RESET */
+ r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
+ if (r) {
+ DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
+ return r;
+ }
+
WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
// Read CP_VMID_RESET register three times.
@@ -4518,6 +4543,13 @@ static int gfx_v11_0_soft_reset(void *handle)
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
RREG32_SOC15(GC, 0, regCP_VMID_RESET);
+ /* release the gfx mutex */
+ r = gfx_v11_0_request_gfx_index_mutex(adev, 0);
+ if (r) {
+ DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
+ return r;
+ }
+
for (i = 0; i < adev->usec_timeout; i++) {
if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
!RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 4a09cc0d8..131cddbdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -3828,8 +3828,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
/* the caller should make sure initialize value of
* err_data->ue_count and err_data->ce_count
*/
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
}
static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
@@ -3882,150 +3882,6 @@ static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
mutex_unlock(&adev->grbm_idx_mutex);
}
-static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- uint32_t data;
-
- data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS);
- if (data) {
- dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
- }
-
- data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS);
- if (data) {
- dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
- }
-
- data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
- regVML2_WALKER_MEM_ECC_STATUS);
- if (data) {
- dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS,
- 0x3);
- }
-}
-
-static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev,
- uint32_t status, int xcc_id)
-{
- struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
- uint32_t i, simd, wave;
- uint32_t wave_status;
- uint32_t wave_pc_lo, wave_pc_hi;
- uint32_t wave_exec_lo, wave_exec_hi;
- uint32_t wave_inst_dw0, wave_inst_dw1;
- uint32_t wave_ib_sts;
-
- for (i = 0; i < 32; i++) {
- if (!((i << 1) & status))
- continue;
-
- simd = i / cu_info->max_waves_per_simd;
- wave = i % cu_info->max_waves_per_simd;
-
- wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
- wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
- wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
- wave_exec_lo =
- wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
- wave_exec_hi =
- wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
- wave_inst_dw0 =
- wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
- wave_inst_dw1 =
- wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
- wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
-
- dev_info(
- adev->dev,
- "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
- simd, wave, wave_status,
- ((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
- ((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
- ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
- wave_ib_sts);
- }
-}
-
-static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- uint32_t se_idx, sh_idx, cu_idx;
- uint32_t status;
-
- mutex_lock(&adev->grbm_idx_mutex);
- for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
- for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
- for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
- gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
- cu_idx, xcc_id);
- status = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
- regSQ_TIMEOUT_STATUS);
- if (status != 0) {
- dev_info(
- adev->dev,
- "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
- se_idx, sh_idx, cu_idx);
- gfx_v9_4_3_log_cu_timeout_status(
- adev, status, xcc_id);
- }
- /* clear old status */
- WREG32_SOC15(GC, GET_INST(GC, xcc_id),
- regSQ_TIMEOUT_STATUS, 0);
- }
- }
- }
- gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
- xcc_id);
- mutex_unlock(&adev->grbm_idx_mutex);
-}
-
-static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev,
- void *ras_error_status, int xcc_id)
-{
- gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id);
- gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id);
-}
-
-static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3);
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3);
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3);
-}
-
-static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev,
- int xcc_id)
-{
- uint32_t se_idx, sh_idx, cu_idx;
-
- mutex_lock(&adev->grbm_idx_mutex);
- for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) {
- for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) {
- for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) {
- gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx,
- cu_idx, xcc_id);
- WREG32_SOC15(GC, GET_INST(GC, xcc_id),
- regSQ_TIMEOUT_STATUS, 0);
- }
- }
- }
- gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
- xcc_id);
- mutex_unlock(&adev->grbm_idx_mutex);
-}
-
-static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev,
- void *ras_error_status, int xcc_id)
-{
- gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id);
- gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id);
-}
-
static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
void *ras_error_status, int xcc_id)
{
@@ -4067,16 +3923,6 @@ static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
}
-static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev)
-{
- amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status);
-}
-
-static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev)
-{
- amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status);
-}
-
static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
{
amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
@@ -4394,8 +4240,6 @@ struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
- .query_ras_error_status = &gfx_v9_4_3_query_ras_error_status,
- .reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status,
};
struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 95d06da54..49aecdcee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -456,10 +456,12 @@ static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
- tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
- WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
+ WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index ced2e1bdc..e67a62db9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -883,7 +883,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* GRBM interface.
*/
if ((vmhub == AMDGPU_GFXHUB(0)) &&
- (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
+ (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2)))
RREG32_NO_KIQ(req);
for (j = 0; j < adev->usec_timeout; j++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index bc38b90f8..88ea58d5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle,
return ret;
}
-static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
@@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
- .set = jpeg_v4_0_set_interrupt_state,
.process = jpeg_v4_0_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 6ede85b28..78b74daf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
- amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
return 0;
}
@@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle,
return ret;
}
-static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
- .set = jpeg_v4_0_5_set_interrupt_state,
.process = jpeg_v4_0_5_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 4dfec56e1..26d71a223 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -408,6 +408,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
+ mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 9b0146732..fb53aacdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -652,8 +652,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
}
static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 6d24c8492..19986ff6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -401,8 +401,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
if (err_data.ce_count)
dev_info(adev->dev, "%ld correctable hardware "
- "errors detected in %s block, "
- "no user action is needed.\n",
+ "errors detected in %s block\n",
obj->err_data.ce_count,
get_ras_block_str(adev->nbio.ras_if));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index db013f861..b4723d68e 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -603,8 +603,7 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
if (err_data.ce_count)
dev_info(adev->dev, "%ld correctable hardware "
- "errors detected in %s block, "
- "no user action is needed.\n",
+ "errors detected in %s block\n",
obj->err_data.ce_count,
get_ras_block_str(adev->nbio.ras_if));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 8eed9a73d..1caed0163 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -366,7 +366,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
+ << (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
@@ -2135,7 +2136,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
&ue_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
}
static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 0058f3f7c..46f9f58d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -292,17 +292,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ if (ring->me > 1) {
+ amdgpu_asic_flush_hdp(adev, ring);
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f9ba18030..1c614451d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -925,6 +925,7 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
.pre_asic_init = &soc15_pre_asic_init,
.query_video_codecs = &soc15_query_video_codecs,
.encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
+ .get_reg_state = &aqua_vanjaram_get_reg_state,
};
static int soc15_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index eac54042c..1444b7765 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -27,6 +27,7 @@
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
+#include "amdgpu_reg_state.h"
extern const struct amdgpu_ip_block_version vega10_common_ip_block;
@@ -114,6 +115,9 @@ int aldebaran_reg_base_init(struct amdgpu_device *adev);
void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev);
u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
+ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
+ enum amdgpu_reg_state reg_state, void *buf,
+ size_t max_size);
void vega10_doorbell_index_init(struct amdgpu_device *adev);
void vega20_doorbell_index_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 4d7188912..fd4505fa4 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -450,10 +450,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
- return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- return false;
default:
return true;
}
@@ -714,7 +712,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_GFX_PG;
- adev->external_rev_id = adev->rev_id + 0x1;
+ if (adev->rev_id == 0)
+ adev->external_rev_id = 0x1;
+ else
+ adev->external_rev_id = adev->rev_id + 0x10;
break;
default:
/* FIXME: not supported yet */
@@ -832,10 +833,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
+static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg1, sol_reg2;
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset dGPU side.
+ * 2) S3 suspend got aborted and TOS is active.
+ */
+ if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
+ !adev->suspend_complete) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+ msleep(100);
+ sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return (sol_reg1 != sol_reg2);
+ }
+
+ return false;
+}
+
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc21_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend aborted, resetting...");
+ soc21_asic_reset(adev);
+ }
+
return soc21_common_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index e9c2ff74f..7458a218e 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "umc/umc_12_0_0_offset.h"
#include "umc/umc_12_0_0_sh_mask.h"
+#include "mp/mp_13_0_6_sh_mask.h"
const uint32_t
umc_v12_0_channel_idx_tbl[]
@@ -88,16 +89,26 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
umc_v12_0_reset_error_count_per_channel, NULL);
}
-bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
+bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
{
+ if (amdgpu_ras_is_poison_mode_supported(adev) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+ return true;
+
return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
}
-bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
+bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
{
+ if (amdgpu_ras_is_poison_mode_supported(adev) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+ return false;
+
return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
@@ -105,7 +116,7 @@ bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
/* Identify data parity error in replay mode */
((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
- !(umc_v12_0_is_uncorrectable_error(mc_umc_status)))));
+ !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
}
static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
@@ -124,7 +135,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
mc_umc_status =
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
- if (umc_v12_0_is_correctable_error(mc_umc_status))
+ if (umc_v12_0_is_correctable_error(adev, mc_umc_status))
*error_count += 1;
}
@@ -142,7 +153,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev
mc_umc_status =
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
- if (umc_v12_0_is_uncorrectable_error(mc_umc_status))
+ if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))
*error_count += 1;
}
@@ -166,8 +177,8 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count);
umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count);
- amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
- amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
return 0;
}
@@ -360,6 +371,59 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
return 0;
}
+static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_mca_smu_log_ras_error(adev,
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status);
+ amdgpu_mca_smu_log_ras_error(adev,
+ AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status);
+}
+
+static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_node *err_node;
+ uint64_t mc_umc_status;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ for_each_ras_error(err_node, err_data) {
+ mc_umc_status = err_node->err_info.err_addr.err_status;
+ if (!mc_umc_status)
+ continue;
+
+ if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
+ uint64_t mca_addr, err_addr, mca_ipid;
+ uint32_t InstanceIdLo;
+ struct amdgpu_smuio_mcm_config_info *mcm_info;
+
+ mcm_info = &err_node->err_info.mcm_info;
+ mca_addr = err_node->err_info.err_addr.err_addr;
+ mca_ipid = err_node->err_info.err_addr.err_ipid;
+
+ err_addr = REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+
+ dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
+ mca_ipid,
+ mcm_info->die_id,
+ MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+ MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+ err_addr);
+
+ umc_v12_0_convert_error_address(adev,
+ err_data, err_addr,
+ MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+ MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+ mcm_info->die_id);
+
+ /* Clear umc error address content */
+ memset(&err_node->err_info.err_addr,
+ 0, sizeof(err_node->err_info.err_addr));
+ }
+ }
+}
+
static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
{
amdgpu_umc_loop_channels(adev,
@@ -386,4 +450,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
},
.err_cnt_init = umc_v12_0_err_cnt_init,
.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
+ .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
+ .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index b34b1e358..e8de3a922 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -117,8 +117,12 @@
(pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
} while (0)
-bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status);
-bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status);
+#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \
+ (((_ipid_lo) >> 12) & 0xF))
+#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+
+bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
+bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
extern const uint32_t
umc_v12_0_channel_idx_tbl[]
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index 530549314..a3ee3c4c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -64,7 +64,7 @@ static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
uint64_t reg_value;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
- dev_info(adev->dev, "Deferred error, no user action is needed.\n");
+ dev_info(adev->dev, "Deferred error\n");
if (mc_umc_status)
dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
index 8e7b763cf..fd23cd348 100644
--- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
@@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
+ ring->wptr = 0;
+
data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 48bfcd0d5..8ab01ae91 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -100,6 +100,31 @@ static int vcn_v4_0_early_init(void *handle)
return amdgpu_vcn_early_init(adev);
}
+static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
+ fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
+ AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
+ IP_VERSION(4, 0, 2)) {
+ fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+ fw_shared->drm_key_wa.method =
+ AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+ }
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
+
+ return 0;
+}
+
/**
* vcn_v4_0_sw_init - sw init for VCN block
*
@@ -124,8 +149,6 @@ static int vcn_v4_0_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
-
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -161,23 +184,7 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = 1;
-
- fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
- fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
- AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
-
- if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
- IP_VERSION(4, 0, 2)) {
- fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
- fw_shared->drm_key_wa.method =
- AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
- }
-
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ vcn_v4_0_fw_shared_init(adev, i);
}
if (amdgpu_sriov_vf(adev)) {
@@ -1273,6 +1280,9 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
+ // Must re/init fw_shared at beginning
+ vcn_v4_0_fw_shared_init(adev, i);
+
table_size = 0;
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
@@ -2008,22 +2018,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
}
/**
- * vcn_v4_0_set_interrupt_state - set VCN block interrupt state
- *
- * @adev: amdgpu_device pointer
- * @source: interrupt sources
- * @type: interrupt types
- * @state: interrupt states
- *
- * Set VCN block interrupt state
- */
-static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
- unsigned type, enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
-/**
* vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
*
* @adev: amdgpu_device pointer
@@ -2087,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
- .set = vcn_v4_0_set_interrupt_state,
.process = vcn_v4_0_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 2eda30e78..49e4c3c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle)
vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
-
- amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
}
return 0;
@@ -1669,22 +1667,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s
}
/**
- * vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state
- *
- * @adev: amdgpu_device pointer
- * @source: interrupt sources
- * @type: interrupt types
- * @state: interrupt states
- *
- * Set VCN block interrupt state
- */
-static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
- unsigned type, enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
-/**
* vcn_v4_0_5_process_interrupt - process VCN block interrupt
*
* @adev: amdgpu_device pointer
@@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = {
- .set = vcn_v4_0_5_set_interrupt_state,
.process = vcn_v4_0_5_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
index 174f13eff..d20060a51 100644
--- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
@@ -96,6 +96,10 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
amdgpu_vpe_psp_update_sram(adev);
+
+ /* Config DPM */
+ amdgpu_vpe_configure_dpm(vpe);
+
return 0;
}
@@ -128,6 +132,8 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
}
vpe_v6_1_halt(vpe, false);
+ /* Config DPM */
+ amdgpu_vpe_configure_dpm(vpe);
return 0;
}
@@ -264,6 +270,15 @@ static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe)
vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI;
vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT;
+ vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2;
+ vpe->regs.dpm_pratio = regVPEC_QUEUE6_DUMMY4;
+ vpe->regs.dpm_request_interval = regVPEC_QUEUE5_DUMMY3;
+ vpe->regs.dpm_decision_threshold = regVPEC_QUEUE5_DUMMY4;
+ vpe->regs.dpm_busy_clamp_threshold = regVPEC_QUEUE7_DUMMY2;
+ vpe->regs.dpm_idle_clamp_threshold = regVPEC_QUEUE7_DUMMY3;
+ vpe->regs.dpm_request_lv = regVPEC_QUEUE7_DUMMY1;
+ vpe->regs.context_indicator = regVPEC_QUEUE6_DUMMY3;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index d7cd5fa31..d1caaf0e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9eef807, 0x876dff6d,
0x0000ffff, 0x87fe7e7e,
0x87ea6a6a, 0xb9faf802,
- 0xbe80226c, 0xbf810000,
+ 0xbe80226c, 0xbf9b0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
@@ -2065,11 +2065,11 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
- 0xbf820001, 0xbf820220,
+ 0xbf820001, 0xbf820221,
0xb0804004, 0xb978f802,
0x8a78ff78, 0x00020006,
0xb97bf803, 0x876eff78,
@@ -2118,391 +2118,391 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbf900004, 0xbf8cc07f,
0x877aff7f, 0x04000000,
0x8f7a857a, 0x886d7a6d,
- 0xbefa037e, 0x877bff7f,
- 0x0000ffff, 0xbefe03c1,
- 0xbeff03c1, 0xdc5f8000,
- 0x007a0000, 0x7e000280,
- 0xbefe037a, 0xbeff037b,
- 0xb97b02dc, 0x8f7b997b,
- 0xb97a3a05, 0x807a817a,
- 0xbf0d997b, 0xbf850002,
- 0x8f7a897a, 0xbf820001,
- 0x8f7a8a7a, 0xb97b1e06,
- 0x8f7b8a7b, 0x807a7b7a,
+ 0x7e008200, 0xbefa037e,
0x877bff7f, 0x0000ffff,
- 0x807aff7a, 0x00000200,
- 0x807a7e7a, 0x827b807b,
- 0xd7610000, 0x00010870,
- 0xd7610000, 0x00010a71,
- 0xd7610000, 0x00010c72,
- 0xd7610000, 0x00010e73,
- 0xd7610000, 0x00011074,
- 0xd7610000, 0x00011275,
- 0xd7610000, 0x00011476,
- 0xd7610000, 0x00011677,
- 0xd7610000, 0x00011a79,
- 0xd7610000, 0x00011c7e,
- 0xd7610000, 0x00011e7f,
- 0xbefe03ff, 0x00003fff,
- 0xbeff0380, 0xdc5f8040,
- 0x007a0000, 0xd760007a,
- 0x00011d00, 0xd760007b,
- 0x00011f00, 0xbefe037a,
- 0xbeff037b, 0xbef4037e,
- 0x8775ff7f, 0x0000ffff,
- 0x8875ff75, 0x00040000,
- 0xbef60380, 0xbef703ff,
- 0x10807fac, 0xbef1037c,
- 0xbef00380, 0xb97302dc,
- 0x8f739973, 0xbefe03c1,
- 0x907c9973, 0x877c817c,
- 0xbf06817c, 0xbf850002,
- 0xbeff0380, 0xbf820002,
- 0xbeff03c1, 0xbf820009,
+ 0xbefe03c1, 0xbeff03c1,
+ 0xdc5f8000, 0x007a0000,
+ 0x7e000280, 0xbefe037a,
+ 0xbeff037b, 0xb97b02dc,
+ 0x8f7b997b, 0xb97a3a05,
+ 0x807a817a, 0xbf0d997b,
+ 0xbf850002, 0x8f7a897a,
+ 0xbf820001, 0x8f7a8a7a,
+ 0xb97b1e06, 0x8f7b8a7b,
+ 0x807a7b7a, 0x877bff7f,
+ 0x0000ffff, 0x807aff7a,
+ 0x00000200, 0x807a7e7a,
+ 0x827b807b, 0xd7610000,
+ 0x00010870, 0xd7610000,
+ 0x00010a71, 0xd7610000,
+ 0x00010c72, 0xd7610000,
+ 0x00010e73, 0xd7610000,
+ 0x00011074, 0xd7610000,
+ 0x00011275, 0xd7610000,
+ 0x00011476, 0xd7610000,
+ 0x00011677, 0xd7610000,
+ 0x00011a79, 0xd7610000,
+ 0x00011c7e, 0xd7610000,
+ 0x00011e7f, 0xbefe03ff,
+ 0x00003fff, 0xbeff0380,
+ 0xdc5f8040, 0x007a0000,
+ 0xd760007a, 0x00011d00,
+ 0xd760007b, 0x00011f00,
+ 0xbefe037a, 0xbeff037b,
+ 0xbef4037e, 0x8775ff7f,
+ 0x0000ffff, 0x8875ff75,
+ 0x00040000, 0xbef60380,
+ 0xbef703ff, 0x10807fac,
+ 0xbef1037c, 0xbef00380,
+ 0xb97302dc, 0x8f739973,
+ 0xbefe03c1, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0xbeff0380,
+ 0xbf820002, 0xbeff03c1,
+ 0xbf820009, 0xbef603ff,
+ 0x01000000, 0xe0704080,
+ 0x705d0100, 0xe0704100,
+ 0x705d0200, 0xe0704180,
+ 0x705d0300, 0xbf820008,
0xbef603ff, 0x01000000,
- 0xe0704080, 0x705d0100,
- 0xe0704100, 0x705d0200,
- 0xe0704180, 0x705d0300,
- 0xbf820008, 0xbef603ff,
- 0x01000000, 0xe0704100,
- 0x705d0100, 0xe0704200,
- 0x705d0200, 0xe0704300,
- 0x705d0300, 0xb9703a05,
- 0x80708170, 0xbf0d9973,
- 0xbf850002, 0x8f708970,
- 0xbf820001, 0x8f708a70,
- 0xb97a1e06, 0x8f7a8a7a,
- 0x80707a70, 0x8070ff70,
- 0x00000200, 0xbef603ff,
- 0x01000000, 0x7e000280,
- 0x7e020280, 0x7e040280,
- 0xbefc0380, 0xd7610002,
- 0x0000f871, 0x807c817c,
- 0xd7610002, 0x0000f86c,
- 0x807c817c, 0x8a7aff6d,
- 0x80000000, 0xd7610002,
- 0x0000f87a, 0x807c817c,
- 0xd7610002, 0x0000f86e,
- 0x807c817c, 0xd7610002,
- 0x0000f86f, 0x807c817c,
- 0xd7610002, 0x0000f878,
- 0x807c817c, 0xb97af803,
- 0xd7610002, 0x0000f87a,
- 0x807c817c, 0xd7610002,
- 0x0000f87b, 0x807c817c,
- 0xb971f801, 0xd7610002,
- 0x0000f871, 0x807c817c,
- 0xb971f814, 0xd7610002,
- 0x0000f871, 0x807c817c,
- 0xb971f815, 0xd7610002,
- 0x0000f871, 0x807c817c,
- 0xbefe03ff, 0x0000ffff,
- 0xbeff0380, 0xe0704000,
- 0x705d0200, 0xbefe03c1,
+ 0xe0704100, 0x705d0100,
+ 0xe0704200, 0x705d0200,
+ 0xe0704300, 0x705d0300,
0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
0x8f7a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
0xbef603ff, 0x01000000,
- 0xbef90380, 0xbefc0380,
- 0xbf800000, 0xbe802f00,
- 0xbe822f02, 0xbe842f04,
- 0xbe862f06, 0xbe882f08,
- 0xbe8a2f0a, 0xbe8c2f0c,
- 0xbe8e2f0e, 0xd7610002,
- 0x0000f200, 0x80798179,
- 0xd7610002, 0x0000f201,
+ 0x7e000280, 0x7e020280,
+ 0x7e040280, 0xbefc0380,
+ 0xd7610002, 0x0000f871,
+ 0x807c817c, 0xd7610002,
+ 0x0000f86c, 0x807c817c,
+ 0x8a7aff6d, 0x80000000,
+ 0xd7610002, 0x0000f87a,
+ 0x807c817c, 0xd7610002,
+ 0x0000f86e, 0x807c817c,
+ 0xd7610002, 0x0000f86f,
+ 0x807c817c, 0xd7610002,
+ 0x0000f878, 0x807c817c,
+ 0xb97af803, 0xd7610002,
+ 0x0000f87a, 0x807c817c,
+ 0xd7610002, 0x0000f87b,
+ 0x807c817c, 0xb971f801,
+ 0xd7610002, 0x0000f871,
+ 0x807c817c, 0xb971f814,
+ 0xd7610002, 0x0000f871,
+ 0x807c817c, 0xb971f815,
+ 0xd7610002, 0x0000f871,
+ 0x807c817c, 0xbefe03ff,
+ 0x0000ffff, 0xbeff0380,
+ 0xe0704000, 0x705d0200,
+ 0xbefe03c1, 0xb9703a05,
+ 0x80708170, 0xbf0d9973,
+ 0xbf850002, 0x8f708970,
+ 0xbf820001, 0x8f708a70,
+ 0xb97a1e06, 0x8f7a8a7a,
+ 0x80707a70, 0xbef603ff,
+ 0x01000000, 0xbef90380,
+ 0xbefc0380, 0xbf800000,
+ 0xbe802f00, 0xbe822f02,
+ 0xbe842f04, 0xbe862f06,
+ 0xbe882f08, 0xbe8a2f0a,
+ 0xbe8c2f0c, 0xbe8e2f0e,
+ 0xd7610002, 0x0000f200,
0x80798179, 0xd7610002,
- 0x0000f202, 0x80798179,
- 0xd7610002, 0x0000f203,
+ 0x0000f201, 0x80798179,
+ 0xd7610002, 0x0000f202,
0x80798179, 0xd7610002,
- 0x0000f204, 0x80798179,
- 0xd7610002, 0x0000f205,
+ 0x0000f203, 0x80798179,
+ 0xd7610002, 0x0000f204,
0x80798179, 0xd7610002,
- 0x0000f206, 0x80798179,
- 0xd7610002, 0x0000f207,
+ 0x0000f205, 0x80798179,
+ 0xd7610002, 0x0000f206,
0x80798179, 0xd7610002,
- 0x0000f208, 0x80798179,
- 0xd7610002, 0x0000f209,
+ 0x0000f207, 0x80798179,
+ 0xd7610002, 0x0000f208,
0x80798179, 0xd7610002,
- 0x0000f20a, 0x80798179,
- 0xd7610002, 0x0000f20b,
+ 0x0000f209, 0x80798179,
+ 0xd7610002, 0x0000f20a,
0x80798179, 0xd7610002,
- 0x0000f20c, 0x80798179,
- 0xd7610002, 0x0000f20d,
+ 0x0000f20b, 0x80798179,
+ 0xd7610002, 0x0000f20c,
0x80798179, 0xd7610002,
- 0x0000f20e, 0x80798179,
- 0xd7610002, 0x0000f20f,
- 0x80798179, 0xbf06a079,
- 0xbf840006, 0xe0704000,
- 0x705d0200, 0x8070ff70,
- 0x00000080, 0xbef90380,
- 0x7e040280, 0x807c907c,
- 0xbf0aff7c, 0x00000060,
- 0xbf85ffbc, 0xbe802f00,
- 0xbe822f02, 0xbe842f04,
- 0xbe862f06, 0xbe882f08,
- 0xbe8a2f0a, 0xd7610002,
- 0x0000f200, 0x80798179,
- 0xd7610002, 0x0000f201,
+ 0x0000f20d, 0x80798179,
+ 0xd7610002, 0x0000f20e,
0x80798179, 0xd7610002,
- 0x0000f202, 0x80798179,
- 0xd7610002, 0x0000f203,
+ 0x0000f20f, 0x80798179,
+ 0xbf06a079, 0xbf840006,
+ 0xe0704000, 0x705d0200,
+ 0x8070ff70, 0x00000080,
+ 0xbef90380, 0x7e040280,
+ 0x807c907c, 0xbf0aff7c,
+ 0x00000060, 0xbf85ffbc,
+ 0xbe802f00, 0xbe822f02,
+ 0xbe842f04, 0xbe862f06,
+ 0xbe882f08, 0xbe8a2f0a,
+ 0xd7610002, 0x0000f200,
0x80798179, 0xd7610002,
- 0x0000f204, 0x80798179,
- 0xd7610002, 0x0000f205,
+ 0x0000f201, 0x80798179,
+ 0xd7610002, 0x0000f202,
0x80798179, 0xd7610002,
- 0x0000f206, 0x80798179,
- 0xd7610002, 0x0000f207,
+ 0x0000f203, 0x80798179,
+ 0xd7610002, 0x0000f204,
0x80798179, 0xd7610002,
- 0x0000f208, 0x80798179,
- 0xd7610002, 0x0000f209,
+ 0x0000f205, 0x80798179,
+ 0xd7610002, 0x0000f206,
0x80798179, 0xd7610002,
- 0x0000f20a, 0x80798179,
- 0xd7610002, 0x0000f20b,
- 0x80798179, 0xe0704000,
- 0x705d0200, 0xbefe03c1,
- 0x907c9973, 0x877c817c,
- 0xbf06817c, 0xbf850002,
- 0xbeff0380, 0xbf820001,
- 0xbeff03c1, 0xb97b4306,
- 0x877bc17b, 0xbf840044,
- 0xbf8a0000, 0x877aff6d,
- 0x80000000, 0xbf840040,
- 0x8f7b867b, 0x8f7b827b,
- 0xbef6037b, 0xb9703a05,
- 0x80708170, 0xbf0d9973,
- 0xbf850002, 0x8f708970,
- 0xbf820001, 0x8f708a70,
- 0xb97a1e06, 0x8f7a8a7a,
- 0x80707a70, 0x8070ff70,
- 0x00000200, 0x8070ff70,
- 0x00000080, 0xbef603ff,
- 0x01000000, 0xd7650000,
- 0x000100c1, 0xd7660000,
- 0x000200c1, 0x16000084,
- 0x907c9973, 0x877c817c,
- 0xbf06817c, 0xbefc0380,
- 0xbf850012, 0xbe8303ff,
- 0x00000080, 0xbf800000,
- 0xbf800000, 0xbf800000,
- 0xd8d80000, 0x01000000,
- 0xbf8c0000, 0xe0704000,
- 0x705d0100, 0x807c037c,
- 0x80700370, 0xd5250000,
- 0x0001ff00, 0x00000080,
- 0xbf0a7b7c, 0xbf85fff4,
- 0xbf820011, 0xbe8303ff,
- 0x00000100, 0xbf800000,
- 0xbf800000, 0xbf800000,
- 0xd8d80000, 0x01000000,
- 0xbf8c0000, 0xe0704000,
- 0x705d0100, 0x807c037c,
- 0x80700370, 0xd5250000,
- 0x0001ff00, 0x00000100,
- 0xbf0a7b7c, 0xbf85fff4,
+ 0x0000f207, 0x80798179,
+ 0xd7610002, 0x0000f208,
+ 0x80798179, 0xd7610002,
+ 0x0000f209, 0x80798179,
+ 0xd7610002, 0x0000f20a,
+ 0x80798179, 0xd7610002,
+ 0x0000f20b, 0x80798179,
+ 0xe0704000, 0x705d0200,
0xbefe03c1, 0x907c9973,
0x877c817c, 0xbf06817c,
- 0xbf850004, 0xbef003ff,
- 0x00000200, 0xbeff0380,
- 0xbf820003, 0xbef003ff,
- 0x00000400, 0xbeff03c1,
- 0xb97b3a05, 0x807b817b,
- 0x8f7b827b, 0x907c9973,
+ 0xbf850002, 0xbeff0380,
+ 0xbf820001, 0xbeff03c1,
+ 0xb97b4306, 0x877bc17b,
+ 0xbf840044, 0xbf8a0000,
+ 0x877aff6d, 0x80000000,
+ 0xbf840040, 0x8f7b867b,
+ 0x8f7b827b, 0xbef6037b,
+ 0xb9703a05, 0x80708170,
+ 0xbf0d9973, 0xbf850002,
+ 0x8f708970, 0xbf820001,
+ 0x8f708a70, 0xb97a1e06,
+ 0x8f7a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000080,
+ 0xbef603ff, 0x01000000,
+ 0xd7650000, 0x000100c1,
+ 0xd7660000, 0x000200c1,
+ 0x16000084, 0x907c9973,
0x877c817c, 0xbf06817c,
- 0xbf850017, 0xbef603ff,
- 0x01000000, 0xbefc0384,
- 0xbf0a7b7c, 0xbf840037,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
- 0xe0704000, 0x705d0000,
- 0xe0704080, 0x705d0100,
- 0xe0704100, 0x705d0200,
- 0xe0704180, 0x705d0300,
- 0x807c847c, 0x8070ff70,
- 0x00000200, 0xbf0a7b7c,
- 0xbf85ffef, 0xbf820025,
+ 0xbefc0380, 0xbf850012,
+ 0xbe8303ff, 0x00000080,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf8c0000,
+ 0xe0704000, 0x705d0100,
+ 0x807c037c, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000080, 0xbf0a7b7c,
+ 0xbf85fff4, 0xbf820011,
+ 0xbe8303ff, 0x00000100,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf8c0000,
+ 0xe0704000, 0x705d0100,
+ 0x807c037c, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7c,
+ 0xbf85fff4, 0xbefe03c1,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850004,
+ 0xbef003ff, 0x00000200,
+ 0xbeff0380, 0xbf820003,
+ 0xbef003ff, 0x00000400,
+ 0xbeff03c1, 0xb97b3a05,
+ 0x807b817b, 0x8f7b827b,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850017,
0xbef603ff, 0x01000000,
0xbefc0384, 0xbf0a7b7c,
- 0xbf840011, 0x7e008700,
+ 0xbf840037, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xe0704000,
- 0x705d0000, 0xe0704100,
- 0x705d0100, 0xe0704200,
- 0x705d0200, 0xe0704300,
+ 0x705d0000, 0xe0704080,
+ 0x705d0100, 0xe0704100,
+ 0x705d0200, 0xe0704180,
0x705d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
+ 0x8070ff70, 0x00000200,
0xbf0a7b7c, 0xbf85ffef,
- 0xb97b1e06, 0x877bc17b,
- 0xbf84000c, 0x8f7b837b,
- 0x807b7c7b, 0xbefe03c1,
- 0xbeff0380, 0x7e008700,
+ 0xbf820025, 0xbef603ff,
+ 0x01000000, 0xbefc0384,
+ 0xbf0a7b7c, 0xbf840011,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xe0704000, 0x705d0000,
- 0x807c817c, 0x8070ff70,
- 0x00000080, 0xbf0a7b7c,
- 0xbf85fff8, 0xbf82013b,
- 0xbef4037e, 0x8775ff7f,
- 0x0000ffff, 0x8875ff75,
- 0x00040000, 0xbef60380,
- 0xbef703ff, 0x10807fac,
- 0xb97202dc, 0x8f729972,
- 0x876eff7f, 0x04000000,
- 0xbf840034, 0xbefe03c1,
- 0x907c9972, 0x877c817c,
- 0xbf06817c, 0xbf850002,
- 0xbeff0380, 0xbf820001,
- 0xbeff03c1, 0xb96f4306,
- 0x876fc16f, 0xbf840029,
- 0x8f6f866f, 0x8f6f826f,
- 0xbef6036f, 0xb9783a05,
- 0x80788178, 0xbf0d9972,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb96e1e06, 0x8f6e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x8078ff78,
- 0x00000080, 0xbef603ff,
- 0x01000000, 0x907c9972,
- 0x877c817c, 0xbf06817c,
- 0xbefc0380, 0xbf850009,
- 0xe0310000, 0x781d0000,
- 0x807cff7c, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fff8,
- 0xbf820008, 0xe0310000,
- 0x781d0000, 0x807cff7c,
- 0x00000100, 0x8078ff78,
- 0x00000100, 0xbf0a6f7c,
- 0xbf85fff8, 0xbef80380,
+ 0xe0704100, 0x705d0100,
+ 0xe0704200, 0x705d0200,
+ 0xe0704300, 0x705d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xb97b1e06,
+ 0x877bc17b, 0xbf84000c,
+ 0x8f7b837b, 0x807b7c7b,
+ 0xbefe03c1, 0xbeff0380,
+ 0x7e008700, 0xe0704000,
+ 0x705d0000, 0x807c817c,
+ 0x8070ff70, 0x00000080,
+ 0xbf0a7b7c, 0xbf85fff8,
+ 0xbf82013b, 0xbef4037e,
+ 0x8775ff7f, 0x0000ffff,
+ 0x8875ff75, 0x00040000,
+ 0xbef60380, 0xbef703ff,
+ 0x10807fac, 0xb97202dc,
+ 0x8f729972, 0x876eff7f,
+ 0x04000000, 0xbf840034,
0xbefe03c1, 0x907c9972,
0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
- 0xb96f3a05, 0x806f816f,
- 0x8f6f826f, 0x907c9972,
- 0x877c817c, 0xbf06817c,
- 0xbf850024, 0xbef603ff,
- 0x01000000, 0xbeee0378,
+ 0xb96f4306, 0x876fc16f,
+ 0xbf840029, 0x8f6f866f,
+ 0x8f6f826f, 0xbef6036f,
+ 0xb9783a05, 0x80788178,
+ 0xbf0d9972, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb96e1e06,
+ 0x8f6e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbefc0384, 0xbf0a6f7c,
- 0xbf840050, 0xe0304000,
- 0x785d0000, 0xe0304080,
- 0x785d0100, 0xe0304100,
- 0x785d0200, 0xe0304180,
- 0x785d0300, 0xbf8c3f70,
- 0x7e008500, 0x7e028501,
- 0x7e048502, 0x7e068503,
- 0x807c847c, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85ffee, 0xe0304000,
- 0x6e5d0000, 0xe0304080,
- 0x6e5d0100, 0xe0304100,
- 0x6e5d0200, 0xe0304180,
- 0x6e5d0300, 0xbf8c3f70,
- 0xbf820034, 0xbef603ff,
- 0x01000000, 0xbeee0378,
- 0x8078ff78, 0x00000400,
- 0xbefc0384, 0xbf0a6f7c,
- 0xbf840012, 0xe0304000,
- 0x785d0000, 0xe0304100,
- 0x785d0100, 0xe0304200,
- 0x785d0200, 0xe0304300,
- 0x785d0300, 0xbf8c3f70,
- 0x7e008500, 0x7e028501,
- 0x7e048502, 0x7e068503,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xb96f1e06,
- 0x876fc16f, 0xbf84000e,
- 0x8f6f836f, 0x806f7c6f,
- 0xbefe03c1, 0xbeff0380,
+ 0x8078ff78, 0x00000080,
+ 0xbef603ff, 0x01000000,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbefc0380,
+ 0xbf850009, 0xe0310000,
+ 0x781d0000, 0x807cff7c,
+ 0x00000080, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7c,
+ 0xbf85fff8, 0xbf820008,
+ 0xe0310000, 0x781d0000,
+ 0x807cff7c, 0x00000100,
+ 0x8078ff78, 0x00000100,
+ 0xbf0a6f7c, 0xbf85fff8,
+ 0xbef80380, 0xbefe03c1,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0xbeff0380, 0xbf820001,
+ 0xbeff03c1, 0xb96f3a05,
+ 0x806f816f, 0x8f6f826f,
+ 0x907c9972, 0x877c817c,
+ 0xbf06817c, 0xbf850024,
+ 0xbef603ff, 0x01000000,
+ 0xbeee0378, 0x8078ff78,
+ 0x00000200, 0xbefc0384,
+ 0xbf0a6f7c, 0xbf840050,
0xe0304000, 0x785d0000,
+ 0xe0304080, 0x785d0100,
+ 0xe0304100, 0x785d0200,
+ 0xe0304180, 0x785d0300,
0xbf8c3f70, 0x7e008500,
- 0x807c817c, 0x8078ff78,
- 0x00000080, 0xbf0a6f7c,
- 0xbf85fff7, 0xbeff03c1,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807c847c,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85ffee,
0xe0304000, 0x6e5d0000,
- 0xe0304100, 0x6e5d0100,
- 0xe0304200, 0x6e5d0200,
- 0xe0304300, 0x6e5d0300,
- 0xbf8c3f70, 0xb9783a05,
- 0x80788178, 0xbf0d9972,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb96e1e06, 0x8f6e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef603ff,
- 0x01000000, 0xbefc03ff,
- 0x0000006c, 0x80f89078,
- 0xf429003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc847c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0x80f8a078,
- 0xf42d003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc887c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0x80f8c078,
- 0xf431003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0xbe883108,
- 0xbe8a310a, 0xbe8c310c,
- 0xbe8e310e, 0xbf06807c,
- 0xbf84fff0, 0xba80f801,
- 0x00000000, 0xbf8a0000,
+ 0xe0304080, 0x6e5d0100,
+ 0xe0304100, 0x6e5d0200,
+ 0xe0304180, 0x6e5d0300,
+ 0xbf8c3f70, 0xbf820034,
+ 0xbef603ff, 0x01000000,
+ 0xbeee0378, 0x8078ff78,
+ 0x00000400, 0xbefc0384,
+ 0xbf0a6f7c, 0xbf840012,
+ 0xe0304000, 0x785d0000,
+ 0xe0304100, 0x785d0100,
+ 0xe0304200, 0x785d0200,
+ 0xe0304300, 0x785d0300,
+ 0xbf8c3f70, 0x7e008500,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xb96f1e06, 0x876fc16f,
+ 0xbf84000e, 0x8f6f836f,
+ 0x806f7c6f, 0xbefe03c1,
+ 0xbeff0380, 0xe0304000,
+ 0x785d0000, 0xbf8c3f70,
+ 0x7e008500, 0x807c817c,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7c, 0xbf85fff7,
+ 0xbeff03c1, 0xe0304000,
+ 0x6e5d0000, 0xe0304100,
+ 0x6e5d0100, 0xe0304200,
+ 0x6e5d0200, 0xe0304300,
+ 0x6e5d0300, 0xbf8c3f70,
0xb9783a05, 0x80788178,
0xbf0d9972, 0xbf850002,
0x8f788978, 0xbf820001,
0x8f788a78, 0xb96e1e06,
0x8f6e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
0xbef603ff, 0x01000000,
- 0xf4211bfa, 0xf0000000,
- 0x80788478, 0xf4211b3a,
+ 0xbefc03ff, 0x0000006c,
+ 0x80f89078, 0xf429003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc847c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0x80f8a078, 0xf42d003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc887c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0x80f8c078, 0xf431003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0xbe883108, 0xbe8a310a,
+ 0xbe8c310c, 0xbe8e310e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xba80f801, 0x00000000,
+ 0xbf8a0000, 0xb9783a05,
+ 0x80788178, 0xbf0d9972,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb96e1e06, 0x8f6e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0xbef603ff,
+ 0x01000000, 0xf4211bfa,
0xf0000000, 0x80788478,
- 0xf4211b7a, 0xf0000000,
- 0x80788478, 0xf4211c3a,
+ 0xf4211b3a, 0xf0000000,
+ 0x80788478, 0xf4211b7a,
0xf0000000, 0x80788478,
- 0xf4211c7a, 0xf0000000,
- 0x80788478, 0xf4211eba,
+ 0xf4211c3a, 0xf0000000,
+ 0x80788478, 0xf4211c7a,
0xf0000000, 0x80788478,
- 0xf4211efa, 0xf0000000,
- 0x80788478, 0xf4211e7a,
+ 0xf4211eba, 0xf0000000,
+ 0x80788478, 0xf4211efa,
0xf0000000, 0x80788478,
- 0xf4211cfa, 0xf0000000,
- 0x80788478, 0xf4211bba,
+ 0xf4211e7a, 0xf0000000,
+ 0x80788478, 0xf4211cfa,
0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef815, 0xbefc036f,
- 0xbefe0370, 0xbeff0371,
- 0x876f7bff, 0x000003ff,
- 0xb9ef4803, 0x876f7bff,
- 0xfffff800, 0x906f8b6f,
- 0xb9efa2c3, 0xb9f3f801,
- 0xb96e3a05, 0x806e816e,
- 0xbf0d9972, 0xbf850002,
- 0x8f6e896e, 0xbf820001,
- 0x8f6e8a6e, 0xb96f1e06,
- 0x8f6f8a6f, 0x806e6f6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x876fff6f, 0x0000ffff,
- 0xf4091c37, 0xfa000050,
- 0xf4091d37, 0xfa000060,
- 0xf4011e77, 0xfa000074,
- 0xbf8cc07f, 0x876dff6d,
- 0x0000ffff, 0x87fe7e7e,
- 0x87ea6a6a, 0xb9faf802,
- 0xbe80226c, 0xbf810000,
+ 0xb9eef814, 0xf4211bba,
+ 0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef815,
+ 0xbefc036f, 0xbefe0370,
+ 0xbeff0371, 0x876f7bff,
+ 0x000003ff, 0xb9ef4803,
+ 0x876f7bff, 0xfffff800,
+ 0x906f8b6f, 0xb9efa2c3,
+ 0xb9f3f801, 0xb96e3a05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbf850002, 0x8f6e896e,
+ 0xbf820001, 0x8f6e8a6e,
+ 0xb96f1e06, 0x8f6f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x876fff6f,
+ 0x0000ffff, 0xf4091c37,
+ 0xfa000050, 0xf4091d37,
+ 0xfa000060, 0xf4011e77,
+ 0xfa000074, 0xbf8cc07f,
+ 0x876dff6d, 0x0000ffff,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9faf802, 0xbe80226c,
+ 0xbf9b0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
- 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx11_hex[] = {
@@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0xb8eef802, 0xbf0d866e,
0xbfa20002, 0xb97af802,
0xbe80486c, 0xb97af802,
- 0xbe804a6c, 0xbfb00000,
+ 0xbe804a6c, 0xbfb10000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index fdab64624..71b3dc0c7 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -369,6 +369,12 @@ L_SLEEP:
s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
#if NO_SQC_STORE
+#if ASIC_FAMILY <= CHIP_SIENNA_CICHLID
+ // gfx10: If there was a VALU exception, the exception state must be
+ // cleared before executing the VALU instructions below.
+ v_clrexcp
+#endif
+
// Trap temporaries must be saved via VGPR but all VGPRs are in use.
// There is no ttmp space to hold the resource constant for VGPR save.
// Save v0 by itself since it requires only two SGPRs.
@@ -1098,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV:
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
- s_endpgm
+ s_endpgm_saved
end
function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index e506411ad..bb2633820 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -921,7 +921,7 @@ L_RESTORE:
/* the END */
/**************************************************************************/
L_END_PGM:
- s_endpgm
+ s_endpgm_saved
end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f6d4748c1..08da993df 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -778,8 +778,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
- pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
- args->num_of_nodes), GFP_KERNEL);
+ pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
+ GFP_KERNEL);
if (!pa)
return -ENOMEM;
@@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
- amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+ err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+ if (err)
+ goto sync_memory_failed;
}
mutex_unlock(&p->mutex);
@@ -1564,16 +1566,11 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
{
struct kfd_ioctl_import_dmabuf_args *args = data;
struct kfd_process_device *pdd;
- struct dma_buf *dmabuf;
int idr_handle;
uint64_t size;
void *mem;
int r;
- dmabuf = dma_buf_get(args->dmabuf_fd);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
@@ -1587,10 +1584,10 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
goto err_unlock;
}
- r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
- args->va_addr, pdd->drm_priv,
- (struct kgd_mem **)&mem, &size,
- NULL);
+ r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd,
+ args->va_addr, pdd->drm_priv,
+ (struct kgd_mem **)&mem, &size,
+ NULL);
if (r)
goto err_unlock;
@@ -1601,7 +1598,6 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
}
mutex_unlock(&p->mutex);
- dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1612,7 +1608,6 @@ err_free:
pdd->drm_priv, NULL);
err_unlock:
mutex_unlock(&p->mutex);
- dma_buf_put(dmabuf);
return r;
}
@@ -1855,8 +1850,8 @@ static uint32_t get_process_num_bos(struct kfd_process *p)
return num_of_bos;
}
-static int criu_get_prime_handle(struct kgd_mem *mem, int flags,
- u32 *shared_fd)
+static int criu_get_prime_handle(struct kgd_mem *mem,
+ int flags, u32 *shared_fd)
{
struct dma_buf *dmabuf;
int ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c0e715433..c0ae1a974 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1997,6 +1997,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
+ kfd_hws_hang(dqm);
return -ETIME;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 0f58be651..739721254 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -880,6 +880,10 @@ static int copy_signaled_event_data(uint32_t num_events,
dst = &data[i].memory_exception_data;
src = &event->memory_exception_data;
size = sizeof(struct kfd_hsa_memory_exception_data);
+ } else if (event->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+ dst = &data[i].memory_exception_data;
+ src = &event->hw_exception_data;
+ size = sizeof(struct kfd_hsa_hw_exception_data);
} else if (event->type == KFD_EVENT_TYPE_SIGNAL &&
waiter->event_age_enabled) {
dst = &data[i].signal_event_data.last_event_age;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index b8680e075..bdc01ca96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -260,19 +260,6 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
-static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
-{
- unsigned long cpages = 0;
- unsigned long i;
-
- for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- migrate->src[i] & MIGRATE_PFN_MIGRATE)
- cpages++;
- }
- return cpages;
-}
-
static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
{
unsigned long upages = 0;
@@ -402,6 +389,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
+ unsigned long mpages = 0;
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@@ -442,20 +430,21 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
- pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
cpages, npages);
else
- pr_debug("0x%lx pages migrated\n", cpages);
+ pr_debug("0x%lx pages collected\n", cpages);
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
-
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
+ mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
+ pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
+
kfd_smi_event_migration_end(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger);
@@ -465,12 +454,12 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free:
kvfree(buf);
out:
- if (!r && cpages) {
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
- WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
+ WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
- return cpages;
+ return mpages;
}
return r;
}
@@ -479,6 +468,8 @@ out:
* svm_migrate_ram_to_vram - migrate svm range from system to device
* @prange: range structure
* @best_loc: the device to migrate to
+ * @start_mgr: start page to migrate
+ * @last_mgr: last page to migrate
* @mm: the process mm structure
* @trigger: reason of migration
*
@@ -489,19 +480,20 @@ out:
*/
static int
svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ unsigned long start_mgr, unsigned long last_mgr,
struct mm_struct *mm, uint32_t trigger)
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
uint64_t ttm_res_offset;
struct kfd_node *node;
- unsigned long cpages = 0;
+ unsigned long mpages = 0;
long r = 0;
- if (prange->actual_loc == best_loc) {
- pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
- prange->svms, prange->start, prange->last, best_loc);
- return 0;
+ if (start_mgr < prange->start || last_mgr > prange->last) {
+ pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
+ start_mgr, last_mgr, prange->start, prange->last);
+ return -EFAULT;
}
node = svm_range_get_node_by_id(prange, best_loc);
@@ -510,18 +502,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
return -ENODEV;
}
- pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
- prange->start, prange->last, best_loc);
+ pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
+ prange->svms, start_mgr, last_mgr, prange->start, prange->last,
+ best_loc);
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = start_mgr << PAGE_SHIFT;
+ end = (last_mgr + 1) << PAGE_SHIFT;
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
return r;
}
- ttm_res_offset = prange->offset << PAGE_SHIFT;
+ ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@@ -536,16 +529,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("failed %ld to migrate\n", r);
break;
} else {
- cpages += r;
+ mpages += r;
}
ttm_res_offset += next - addr;
addr = next;
}
- if (cpages) {
+ if (mpages) {
prange->actual_loc = best_loc;
- svm_range_dma_unmap(prange);
- } else {
+ prange->vram_pages += mpages;
+ } else if (!prange->actual_loc) {
+ /* if no page migrated and all pages from prange are at
+ * sys ram drop svm_bo got from svm_range_vram_node_new
+ */
svm_range_vram_node_free(prange);
}
@@ -578,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
- addr = prange->start << PAGE_SHIFT;
+ addr = migrate->start;
src = (uint64_t *)(scratch + npages);
dst = scratch;
@@ -663,9 +659,8 @@ out_oom:
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
- * 0 - success with all pages migrated
* negative values - indicate error
- * positive values - partial migration, number of pages not migrated
+ * positive values or zero - number of pages got migrated
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
@@ -676,6 +671,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
unsigned long upages = npages;
unsigned long cpages = 0;
+ unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -725,10 +721,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
- pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
cpages, npages);
else
- pr_debug("0x%lx pages migrated\n", cpages);
+ pr_debug("0x%lx pages collected\n", cpages);
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
@@ -751,17 +747,21 @@ out_free:
kvfree(buf);
out:
if (!r && cpages) {
+ mpages = cpages - upages;
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
- WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
+ WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
}
- return r ? r : upages;
+
+ return r ? r : mpages;
}
/**
* svm_migrate_vram_to_ram - migrate svm range from device to system
* @prange: range structure
* @mm: process mm, use current->mm if NULL
+ * @start_mgr: start page need be migrated to sys ram
+ * @last_mgr: last page need be migrated to sys ram
* @trigger: reason of migration
* @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
@@ -771,6 +771,7 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
+ unsigned long start_mgr, unsigned long last_mgr,
uint32_t trigger, struct page *fault_page)
{
struct kfd_node *node;
@@ -778,26 +779,33 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long addr;
unsigned long start;
unsigned long end;
- unsigned long upages = 0;
+ unsigned long mpages = 0;
long r = 0;
+ /* this pragne has no any vram page to migrate to sys ram */
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
prange->start, prange->last);
return 0;
}
+ if (start_mgr < prange->start || last_mgr > prange->last) {
+ pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
+ start_mgr, last_mgr, prange->start, prange->last);
+ return -EFAULT;
+ }
+
node = svm_range_get_node_by_id(prange, prange->actual_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
return -ENODEV;
}
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
- prange->svms, prange, prange->start, prange->last,
+ prange->svms, prange, start_mgr, last_mgr,
prange->actual_loc);
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = start_mgr << PAGE_SHIFT;
+ end = (last_mgr + 1) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@@ -816,14 +824,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
} else {
- upages += r;
+ mpages += r;
}
addr = next;
}
- if (r >= 0 && !upages) {
- svm_range_vram_node_free(prange);
- prange->actual_loc = 0;
+ if (r >= 0) {
+ prange->vram_pages -= mpages;
+
+ /* prange does not have vram page set its actual_loc to system
+ * and drop its svm_bo ref
+ */
+ if (prange->vram_pages == 0 && prange->ttm_res) {
+ prange->actual_loc = 0;
+ svm_range_vram_node_free(prange);
+ }
}
return r < 0 ? r : 0;
@@ -833,17 +848,23 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
* svm_migrate_vram_to_vram - migrate svm range from device to device
* @prange: range structure
* @best_loc: the device to migrate to
+ * @start: start page need be migrated to sys ram
+ * @last: last page need be migrated to sys ram
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
*
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
*
+ * migrate all vram pages in prange to sys ram, then migrate
+ * [start, last] pages from sys ram to gpu node best_loc.
+ *
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
- struct mm_struct *mm, uint32_t trigger)
+ unsigned long start, unsigned long last,
+ struct mm_struct *mm, uint32_t trigger)
{
int r, retries = 3;
@@ -855,7 +876,8 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
- r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
+ r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
+ trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@@ -863,17 +885,21 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (prange->actual_loc)
return -EDEADLK;
- return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
+ return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
}
int
svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
+ unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger)
{
- if (!prange->actual_loc)
- return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
+ if (!prange->actual_loc || prange->actual_loc == best_loc)
+ return svm_migrate_ram_to_vram(prange, best_loc, start, last,
+ mm, trigger);
+
else
- return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
+ return svm_migrate_vram_to_vram(prange, best_loc, start, last,
+ mm, trigger);
}
@@ -889,10 +915,9 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
*/
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
+ unsigned long start, last, size;
unsigned long addr = vmf->address;
struct svm_range_bo *svm_bo;
- enum svm_work_list_ops op;
- struct svm_range *parent;
struct svm_range *prange;
struct kfd_process *p;
struct mm_struct *mm;
@@ -929,51 +954,31 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
mutex_lock(&p->svms.lock);
- prange = svm_range_from_addr(&p->svms, addr, &parent);
+ prange = svm_range_from_addr(&p->svms, addr, NULL);
if (!prange) {
pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
goto out_unlock_svms;
}
- mutex_lock(&parent->migrate_mutex);
- if (prange != parent)
- mutex_lock_nested(&prange->migrate_mutex, 1);
+ mutex_lock(&prange->migrate_mutex);
if (!prange->actual_loc)
goto out_unlock_prange;
- svm_range_lock(parent);
- if (prange != parent)
- mutex_lock_nested(&prange->lock, 1);
- r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
- if (prange != parent)
- mutex_unlock(&prange->lock);
- svm_range_unlock(parent);
- if (r) {
- pr_debug("failed %d to split range by granularity\n", r);
- goto out_unlock_prange;
- }
+ /* Align migration range start and size to granularity size */
+ size = 1UL << prange->granularity;
+ start = max(ALIGN_DOWN(addr, size), prange->start);
+ last = min(ALIGN(addr + 1, size) - 1, prange->last);
- r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
- vmf->page);
+ r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange, prange->start, prange->last);
-
- /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
- if (p->xnack_enabled && parent == prange)
- op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
- else
- op = SVM_OP_UPDATE_RANGE_NOTIFIER;
- svm_range_add_list_work(&p->svms, parent, mm, op);
- schedule_deferred_list_work(&p->svms);
+ r, prange->svms, prange, start, last);
out_unlock_prange:
- if (prange != parent)
- mutex_unlock(&prange->migrate_mutex);
- mutex_unlock(&parent->migrate_mutex);
+ mutex_unlock(&prange->migrate_mutex);
out_unlock_svms:
mutex_unlock(&p->svms.lock);
out_unref_process:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 487f26368..2eebf67f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -41,9 +41,13 @@ enum MIGRATION_COPY_DIR {
};
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
+ unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger);
+
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
+ unsigned long start, unsigned long last,
uint32_t trigger, struct page *fault_page);
+
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index d722cbd31..826bc4f6c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
if (has_wa_flag) {
- uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
- 0xffff : 0xffffffff;
+ uint32_t wa_mask =
+ (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff;
m->compute_static_thread_mgmt_se0 = wa_mask;
m->compute_static_thread_mgmt_se1 = wa_mask;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 42d881809..697b6d530 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
+ if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
+ if (minfo->update_flag & UPDATE_FLAG_IS_GWS)
+ m->compute_resource_limits |=
+ COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
+ else
+ m->compute_resource_limits &=
+ ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
+ }
+
q->is_active = QUEUE_IS_ACTIVE(*q);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 02d1d9afb..fedeba3e1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -532,6 +532,7 @@ struct queue_properties {
enum mqd_update_flag {
UPDATE_FLAG_DBG_WA_ENABLE = 1,
UPDATE_FLAG_DBG_WA_DISABLE = 2,
+ UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */
};
struct mqd_update_info {
@@ -748,7 +749,6 @@ struct kfd_process_device {
/* VM context for GPUVM allocations */
struct file *drm_file;
void *drm_priv;
- atomic64_t tlb_seq;
/* GPUVM allocations storage */
struct idr alloc_idr;
@@ -918,7 +918,7 @@ struct kfd_process {
* fence will be triggered during eviction and new one will be created
* during restore
*/
- struct dma_fence *ef;
+ struct dma_fence __rcu *ef;
/* Work items for evicting and restoring BOs */
struct delayed_work eviction_work;
@@ -1462,7 +1462,14 @@ void kfd_signal_reset_event(struct kfd_node *dev);
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
-void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
+static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
+ enum TLB_FLUSH_TYPE type)
+{
+ struct amdgpu_device *adev = pdd->dev->adev;
+ struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
+
+ amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask);
+}
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7a33e06f5..58c1fe542 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -664,7 +664,8 @@ int kfd_process_create_wq(void)
if (!kfd_process_wq)
kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
if (!kfd_restore_wq)
- kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
+ kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq",
+ WQ_FREEZABLE);
if (!kfd_process_wq || !kfd_restore_wq) {
kfd_process_destroy_wq();
@@ -818,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
- return ERR_PTR(-EINVAL);
+ process = ERR_PTR(-EINVAL);
+ goto out;
}
/* A prior open of /dev/kfd could have already created the process. */
@@ -1109,6 +1110,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+ struct dma_fence *ef;
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
@@ -1117,7 +1119,9 @@ static void kfd_process_wq_release(struct work_struct *work)
* destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences.
*/
- dma_fence_signal(p->ef);
+ synchronize_rcu();
+ ef = rcu_access_pointer(p->ef);
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
@@ -1126,7 +1130,7 @@ static void kfd_process_wq_release(struct work_struct *work)
svm_range_list_fini(p);
kfd_process_destroy_pdds(p);
- dma_fence_put(p->ef);
+ dma_fence_put(ef);
kfd_event_free_process(p);
@@ -1642,6 +1646,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
struct kfd_process *p;
+ struct dma_fence *ef;
struct kfd_node *dev;
int ret;
@@ -1661,13 +1666,13 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
- &p->ef);
+ &ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
}
+ RCU_INIT_POINTER(p->ef, ef);
pdd->drm_priv = drm_file->private_data;
- atomic64_set(&pdd->tlb_seq, 0);
ret = kfd_process_device_reserve_ib_mem(pdd);
if (ret)
@@ -1909,6 +1914,23 @@ kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
return -EINVAL;
}
+static int signal_eviction_fence(struct kfd_process *p)
+{
+ struct dma_fence *ef;
+ int ret;
+
+ rcu_read_lock();
+ ef = dma_fence_get_rcu_safe(&p->ef);
+ rcu_read_unlock();
+ if (!ef)
+ return -EINVAL;
+
+ ret = dma_fence_signal(ef);
+ dma_fence_put(ef);
+
+ return ret;
+}
+
static void evict_process_worker(struct work_struct *work)
{
int ret;
@@ -1921,31 +1943,45 @@ static void evict_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, eviction_work);
- WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
- "Eviction fence mismatch\n");
-
- /* Narrow window of overlap between restore and evict work
- * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
- * unreserves KFD BOs, it is possible to evicted again. But
- * restore has few more steps of finish. So lets wait for any
- * previous restore work to complete
- */
- flush_delayed_work(&p->restore_work);
pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
if (!ret) {
- dma_fence_signal(p->ef);
- dma_fence_put(p->ef);
- p->ef = NULL;
- queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
+ /* If another thread already signaled the eviction fence,
+ * they are responsible stopping the queues and scheduling
+ * the restore work.
+ */
+ if (signal_eviction_fence(p) ||
+ mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
+static int restore_process_helper(struct kfd_process *p)
+{
+ int ret = 0;
+
+ /* VMs may not have been acquired yet during debugging. */
+ if (p->kgd_process_info) {
+ ret = amdgpu_amdkfd_gpuvm_restore_process_bos(
+ p->kgd_process_info, &p->ef);
+ if (ret)
+ return ret;
+ }
+
+ ret = kfd_process_restore_queues(p);
+ if (!ret)
+ pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
+ else
+ pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
+
+ return ret;
+}
+
static void restore_process_worker(struct work_struct *work)
{
struct delayed_work *dwork;
@@ -1971,24 +2007,15 @@ static void restore_process_worker(struct work_struct *work)
*/
p->last_restore_timestamp = get_jiffies_64();
- /* VMs may not have been acquired yet during debugging. */
- if (p->kgd_process_info)
- ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
- &p->ef);
+
+ ret = restore_process_helper(p);
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
- ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
- msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
- WARN(!ret, "reschedule restore work failed\n");
- return;
+ if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
+ msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+ kfd_process_restore_queues(p);
}
-
- ret = kfd_process_restore_queues(p);
- if (!ret)
- pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
- else
- pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
@@ -1999,14 +2026,9 @@ void kfd_suspend_all_processes(void)
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- cancel_delayed_work_sync(&p->eviction_work);
- flush_delayed_work(&p->restore_work);
-
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
pr_err("Failed to suspend process 0x%x\n", p->pasid);
- dma_fence_signal(p->ef);
- dma_fence_put(p->ef);
- p->ef = NULL;
+ signal_eviction_fence(p);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
}
@@ -2018,7 +2040,7 @@ int kfd_resume_all_processes(void)
int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
+ if (restore_process_helper(p)) {
pr_err("Restore process %d failed during resume\n",
p->pasid);
ret = -EFAULT;
@@ -2059,36 +2081,6 @@ int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
}
-void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
-{
- struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
- uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
- struct kfd_node *dev = pdd->dev;
- uint32_t xcc_mask = dev->xcc_mask;
- int xcc = 0;
-
- /*
- * It can be that we race and lose here, but that is extremely unlikely
- * and the worst thing which could happen is that we flush the changes
- * into the TLB once more which is harmless.
- */
- if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
- return;
-
- if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
- /* Nothing to flush until a VMID is assigned, which
- * only happens when the first queue is created.
- */
- if (pdd->qpd.vmid)
- amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
- pdd->qpd.vmid);
- } else {
- for_each_inst(xcc, xcc_mask)
- amdgpu_amdkfd_flush_gpu_tlb_pasid(
- dev->adev, pdd->process->pasid, type, xcc);
- }
-}
-
/* assumes caller holds process lock. */
int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 43eff221e..4858112f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws)
{
+ struct mqd_update_info minfo = {0};
struct kfd_node *dev = NULL;
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
@@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
}
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
+ minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q, NULL);
+ pqn->q, &minfo);
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9af1d0943..c50a0dc9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -198,6 +198,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
addr[i] >> PAGE_SHIFT, page_to_pfn(page));
}
+
return 0;
}
@@ -349,6 +350,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
INIT_LIST_HEAD(&prange->child_list);
atomic_set(&prange->invalid, 0);
prange->validate_timestamp = 0;
+ prange->vram_pages = 0;
mutex_init(&prange->migrate_mutex);
mutex_init(&prange->lock);
@@ -395,6 +397,8 @@ static void svm_range_bo_release(struct kref *kref)
prange->start, prange->last);
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
+ /* prange should not hold vram page now */
+ WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
mutex_unlock(&prange->lock);
spin_lock(&svm_bo->list_lock);
@@ -873,14 +877,29 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
static void *
svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
- uint64_t offset)
+ uint64_t offset, uint64_t *vram_pages)
{
+ unsigned char *src = (unsigned char *)psrc + offset;
unsigned char *dst;
+ uint64_t i;
dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
if (!dst)
return NULL;
- memcpy(dst, (unsigned char *)psrc + offset, num_elements * size);
+
+ if (!vram_pages) {
+ memcpy(dst, src, num_elements * size);
+ return (void *)dst;
+ }
+
+ *vram_pages = 0;
+ for (i = 0; i < num_elements; i++) {
+ dma_addr_t *temp;
+ temp = (dma_addr_t *)dst + i;
+ *temp = *((dma_addr_t *)src + i);
+ if (*temp&SVM_RANGE_VRAM_DOMAIN)
+ (*vram_pages)++;
+ }
return (void *)dst;
}
@@ -894,7 +913,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
if (!src->dma_addr[i])
continue;
dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
- sizeof(*src->dma_addr[i]), src->npages, 0);
+ sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
if (!dst->dma_addr[i])
return -ENOMEM;
}
@@ -905,7 +924,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
static int
svm_range_split_array(void *ppnew, void *ppold, size_t size,
uint64_t old_start, uint64_t old_n,
- uint64_t new_start, uint64_t new_n)
+ uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
{
unsigned char *new, *old, *pold;
uint64_t d;
@@ -917,11 +936,12 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size,
return 0;
d = (new_start - old_start) * size;
- new = svm_range_copy_array(pold, size, new_n, d);
+ /* get dma addr array for new range and calculte its vram page number */
+ new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
if (!new)
return -ENOMEM;
d = (new_start == old_start) ? new_n * size : 0;
- old = svm_range_copy_array(pold, size, old_n, d);
+ old = svm_range_copy_array(pold, size, old_n, d, NULL);
if (!old) {
kvfree(new);
return -ENOMEM;
@@ -943,10 +963,13 @@ svm_range_split_pages(struct svm_range *new, struct svm_range *old,
for (i = 0; i < MAX_GPU_INSTANCE; i++) {
r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
sizeof(*old->dma_addr[i]), old->start,
- npages, new->start, new->npages);
+ npages, new->start, new->npages,
+ old->actual_loc ? &new->vram_pages : NULL);
if (r)
return r;
}
+ if (old->actual_loc)
+ old->vram_pages -= new->vram_pages;
return 0;
}
@@ -1092,7 +1115,7 @@ static int
svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
struct list_head *insert_list, struct list_head *remap_list)
{
- struct svm_range *tail;
+ struct svm_range *tail = NULL;
int r = svm_range_split(prange, prange->start, new_last, &tail);
if (!r) {
@@ -1107,7 +1130,7 @@ static int
svm_range_split_head(struct svm_range *prange, uint64_t new_start,
struct list_head *insert_list, struct list_head *remap_list)
{
- struct svm_range *head;
+ struct svm_range *head = NULL;
int r = svm_range_split(prange, new_start, prange->last, &head);
if (!r) {
@@ -1130,66 +1153,6 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
list_add_tail(&pchild->child_list, &prange->child_list);
}
-/**
- * svm_range_split_by_granularity - collect ranges within granularity boundary
- *
- * @p: the process with svms list
- * @mm: mm structure
- * @addr: the vm fault address in pages, to split the prange
- * @parent: parent range if prange is from child list
- * @prange: prange to split
- *
- * Trims @prange to be a single aligned block of prange->granularity if
- * possible. The head and tail are added to the child_list in @parent.
- *
- * Context: caller must hold mmap_read_lock and prange->lock
- *
- * Return:
- * 0 - OK, otherwise error code
- */
-int
-svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
- unsigned long addr, struct svm_range *parent,
- struct svm_range *prange)
-{
- struct svm_range *head, *tail;
- unsigned long start, last, size;
- int r;
-
- /* Align splited range start and size to granularity size, then a single
- * PTE will be used for whole range, this reduces the number of PTE
- * updated and the L1 TLB space used for translation.
- */
- size = 1UL << prange->granularity;
- start = ALIGN_DOWN(addr, size);
- last = ALIGN(addr + 1, size) - 1;
-
- pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
- prange->svms, prange->start, prange->last, start, last, size);
-
- if (start > prange->start) {
- r = svm_range_split(prange, start, prange->last, &head);
- if (r)
- return r;
- svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
- }
-
- if (last < prange->last) {
- r = svm_range_split(prange, prange->start, last, &tail);
- if (r)
- return r;
- svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
- }
-
- /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
- if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
- prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
- pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
- prange, prange->start, prange->last,
- SVM_OP_ADD_RANGE_AND_MAP);
- }
- return 0;
-}
static bool
svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
{
@@ -1524,7 +1487,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
uint32_t gpuidx;
int r;
- drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
+ drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
drm_exec_until_all_locked(&ctx->exec) {
for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
@@ -1609,6 +1572,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
* 5. Release page table (and SVM BO) reservation
*/
static int svm_range_validate_and_map(struct mm_struct *mm,
+ unsigned long map_start, unsigned long map_last,
struct svm_range *prange, int32_t gpuidx,
bool intr, bool wait, bool flush_tlb)
{
@@ -1689,10 +1653,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
}
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = map_start << PAGE_SHIFT;
+ end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
struct hmm_range *hmm_range;
+ unsigned long map_start_vma;
+ unsigned long map_last_vma;
struct vm_area_struct *vma;
unsigned long next = 0;
unsigned long offset;
@@ -1720,7 +1686,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
if (!r) {
- offset = (addr - start) >> PAGE_SHIFT;
+ offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
hmm_range->hmm_pfns);
if (r)
@@ -1738,9 +1704,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
r = -EAGAIN;
}
- if (!r)
- r = svm_range_map_to_gpus(prange, offset, npages, readonly,
- ctx->bitmap, wait, flush_tlb);
+ if (!r) {
+ map_start_vma = max(map_start, prange->start + offset);
+ map_last_vma = min(map_last, prange->start + offset + npages - 1);
+ if (map_start_vma <= map_last_vma) {
+ offset = map_start_vma - prange->start;
+ npages = map_last_vma - map_start_vma + 1;
+ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
+ ctx->bitmap, wait, flush_tlb);
+ }
+ }
if (!r && next == end)
prange->mapped_to_gpu = true;
@@ -1833,8 +1806,8 @@ static void svm_range_restore_work(struct work_struct *work)
*/
mutex_lock(&prange->migrate_mutex);
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- false, true, false);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, false, true, false);
if (r)
pr_debug("failed %d to map 0x%lx to gpus\n", r,
prange->start);
@@ -1871,7 +1844,7 @@ out_reschedule:
/* If validation failed, reschedule another attempt */
if (evicted_ranges) {
pr_debug("reschedule to restore svm range\n");
- schedule_delayed_work(&svms->restore_work,
+ queue_delayed_work(system_freezable_wq, &svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
kfd_smi_event_queue_restore_rescheduled(mm);
@@ -1947,7 +1920,7 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
pr_debug("failed to quiesce KFD\n");
pr_debug("schedule to restore svm %p ranges\n", svms);
- schedule_delayed_work(&svms->restore_work,
+ queue_delayed_work(system_freezable_wq, &svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
} else {
unsigned long s, l;
@@ -2002,6 +1975,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
new->actual_loc = old->actual_loc;
new->granularity = old->granularity;
new->mapped_to_gpu = old->mapped_to_gpu;
+ new->vram_pages = old->vram_pages;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
@@ -2911,6 +2885,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id,
uint64_t addr, bool write_fault)
{
+ unsigned long start, last, size;
struct mm_struct *mm = NULL;
struct svm_range_list *svms;
struct svm_range *prange;
@@ -3046,40 +3021,44 @@ retry_write_locked:
kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
write_fault, timestamp);
- if (prange->actual_loc != best_loc) {
+ /* Align migration range start and size to granularity size */
+ size = 1UL << prange->granularity;
+ start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
+ last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
+ if (prange->actual_loc != 0 || best_loc != 0) {
migration = true;
+
if (best_loc) {
- r = svm_migrate_to_vram(prange, best_loc, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ r = svm_migrate_to_vram(prange, best_loc, start, last,
+ mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
if (r) {
pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
r, addr);
/* Fallback to system memory if migration to
* VRAM failed
*/
- if (prange->actual_loc)
- r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- NULL);
+ if (prange->actual_loc && prange->actual_loc != best_loc)
+ r = svm_migrate_vram_to_ram(prange, mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
else
r = 0;
}
} else {
- r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- NULL);
+ r = svm_migrate_vram_to_ram(prange, mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
- r, svms, prange->start, prange->last);
+ r, svms, start, last);
goto out_unlock_range;
}
}
- r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
+ r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
+ false, false);
if (r)
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
- r, svms, prange->start, prange->last);
+ r, svms, start, last);
kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
migration);
@@ -3425,18 +3404,24 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
*migrated = false;
best_loc = svm_range_best_prefetch_location(prange);
- if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
- best_loc == prange->actual_loc)
+ /* when best_loc is a gpu node and same as prange->actual_loc
+ * we still need do migration as prange->actual_loc !=0 does
+ * not mean all pages in prange are vram. hmm migrate will pick
+ * up right pages during migration.
+ */
+ if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
+ (best_loc == 0 && prange->actual_loc == 0))
return 0;
if (!best_loc) {
- r = svm_migrate_vram_to_ram(prange, mm,
+ r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
- r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+ r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
+ mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
return r;
@@ -3490,7 +3475,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
+ /* migrate all vram pages in this prange to sys ram
+ * after that prange->actual_loc should be zero
+ */
r = svm_migrate_vram_to_ram(prange, mm,
+ prange->start, prange->last,
KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);
@@ -3614,8 +3603,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- true, true, flush_tlb);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, true, true, flush_tlb);
if (r)
pr_debug("failed %d to map svm range\n", r);
@@ -3629,8 +3618,8 @@ out_unlock_range:
pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
prange, prange->start, prange->last);
mutex_lock(&prange->migrate_mutex);
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- true, true, prange->mapped_to_gpu);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
if (r)
pr_debug("failed %d on remap svm range\n", r);
mutex_unlock(&prange->migrate_mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index c528df1d0..026863a0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -78,6 +78,7 @@ struct svm_work_list_item {
* @update_list:link list node used to add to update_list
* @mapping: bo_va mapping structure to create and update GPU page table
* @npages: number of pages
+ * @vram_pages: vram pages number in this svm_range
* @dma_addr: dma mapping address on each GPU for system memory physical page
* @ttm_res: vram ttm resource map
* @offset: range start offset within mm_nodes
@@ -88,7 +89,9 @@ struct svm_work_list_item {
* @flags: flags defined as KFD_IOCTL_SVM_FLAG_*
* @perferred_loc: perferred location, 0 for CPU, or GPU id
* @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
- * @actual_loc: the actual location, 0 for CPU, or GPU id
+ * @actual_loc: this svm_range location. 0: all pages are from sys ram;
+ * GPU id: this svm_range may include vram pages from GPU with
+ * id actual_loc.
* @granularity:migration granularity, log2 num pages
* @invalid: not 0 means cpu page table is invalidated
* @validate_timestamp: system timestamp when range is validated
@@ -112,6 +115,7 @@ struct svm_range {
struct list_head list;
struct list_head update_list;
uint64_t npages;
+ uint64_t vram_pages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res;
uint64_t offset;
@@ -168,9 +172,6 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear);
void svm_range_vram_node_free(struct svm_range *prange);
-int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
- unsigned long addr, struct svm_range *parent,
- struct svm_range *prange);
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id, uint64_t addr,
bool write_fault);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e5f7c92ee..6ed2ec381 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
- if (pcache->cache_level == 2)
- pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
- else if (mode)
- pcache->cache_size = pcache_info[cache_type].cache_size / mode;
- else
- pcache->cache_size = pcache_info[cache_type].cache_size;
+ pcache->cache_size = pcache_info[cache_type].cache_size;
+ /* Partition mode only affects L3 cache size */
+ if (mode && pcache->cache_level == 3)
+ pcache->cache_size /= mode;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index af17ab802..92a5c5efc 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -30,6 +30,9 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/clk_mgr
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 8bf94920d..ab2a97e35 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -25,22 +25,25 @@
+ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM = \
amdgpu_dm.o \
amdgpu_dm_plane.o \
amdgpu_dm_crtc.o \
amdgpu_dm_irq.o \
amdgpu_dm_mst_types.o \
- amdgpu_dm_color.o
+ amdgpu_dm_color.o \
+ amdgpu_dm_services.o \
+ amdgpu_dm_helpers.o \
+ amdgpu_dm_pp_smu.o \
+ amdgpu_dm_psr.o \
+ amdgpu_dm_replay.o \
+ amdgpu_dm_wb.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
endif
-ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o
-endif
-
AMDGPUDM += amdgpu_dm_hdcp.o
ifneq ($(CONFIG_DEBUG_FS),)
@@ -52,3 +55,4 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
AMD_DISPLAY_FILES += $(AMDGPU_DM)
+endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index dafe9562a..718e533ab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -37,6 +37,7 @@
#include "dc/dc_dmub_srv.h"
#include "dc/dc_edid_parser.h"
#include "dc/dc_stat.h"
+#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
@@ -54,6 +55,7 @@
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
+#include "amdgpu_dm_wb.h"
#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
@@ -84,12 +86,13 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <acpi/video.h>
@@ -269,6 +272,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
u32 v_blank_start, v_blank_end, h_position, v_position;
struct amdgpu_crtc *acrtc = NULL;
+ struct dc *dc = adev->dm.dc;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
@@ -281,6 +285,9 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
return 0;
}
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
/*
* TODO rework base driver to use values directly.
* for now parse it back into reg-format
@@ -574,6 +581,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
+ struct drm_writeback_job *job;
struct amdgpu_crtc *acrtc;
unsigned long flags;
int vrr_active;
@@ -582,6 +590,33 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (!acrtc)
return;
+ if (acrtc->wb_pending) {
+ if (acrtc->wb_conn) {
+ spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
+ job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
+
+ if (job) {
+ unsigned int v_total, refresh_hz;
+ struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
+
+ v_total = stream->adjust.v_total_max ?
+ stream->adjust.v_total_max : stream->timing.v_total;
+ refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
+ 100LL, (v_total * stream->timing.h_total));
+ mdelay(1000 / refresh_hz);
+
+ drm_writeback_signal_completion(acrtc->wb_conn, 0);
+ dc_stream_fc_disable_writeback(adev->dm.dc,
+ acrtc->dm_irq_params.stream, 0);
+ }
+ } else
+ DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
+ acrtc->wb_pending = false;
+ }
+
vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
drm_dbg_vbl(adev_to_drm(adev),
@@ -724,6 +759,10 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
if (notify->type == DMUB_NOTIFICATION_HPD)
@@ -893,8 +932,7 @@ static int dm_early_init(void *handle);
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct dm_compressor_info *compressor = &adev->dm.compressor;
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
struct drm_display_mode *mode;
@@ -948,6 +986,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->audio_inst != port)
continue;
@@ -988,8 +1030,7 @@ static int amdgpu_dm_audio_component_bind(struct device *kdev,
static void amdgpu_dm_audio_component_unbind(struct device *kdev,
struct device *hda_kdev, void *data)
{
- struct drm_device *dev = dev_get_drvdata(kdev);
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
struct drm_audio_component *acomp = data;
acomp->ops = NULL;
@@ -1678,6 +1719,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
+ if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+
+ init_data.flags.disable_ips_in_vpb = 1;
+
+ /* Enable DWB for tested platforms only */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ init_data.num_virtual_links = 1;
+
INIT_LIST_HEAD(&adev->dm.da_list);
retrieve_dmi_info(&adev->dm);
@@ -1720,23 +1770,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
- /* TODO: There is a new drm mst change where the freedom of
- * vc_next_start_slot update is revoked/moved into drm, instead of in
- * driver. This forces us to make sure to get vc_next_start_slot updated
- * in drm function each time without considering if mst_state is active
- * or not. Otherwise, next time hotplug will give wrong start_slot
- * number. We are implementing a temporary solution to even notify drm
- * mst deallocation when link is no longer of MST type when uncommitting
- * the stream so we will have more time to work on a proper solution.
- * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
- * should notify drm to do a complete "reset" of its states and stop
- * calling further drm mst functions when link is no longer of an MST
- * type. This could happen when we unplug an MST hubs/displays. When
- * uncommit stream comes later after unplug, we should just reset
- * hardware states only.
- */
- adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
-
if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
DRM_INFO("DP-HDMI FRL PCON supported\n");
@@ -1912,7 +1945,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
- if (adev->dm.hpd_rx_offload_wq) {
+ if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
if (adev->dm.hpd_rx_offload_wq[i].wq) {
destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
@@ -2262,6 +2295,10 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
@@ -2390,6 +2427,10 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
@@ -2569,12 +2610,10 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
memset(del_streams, 0, sizeof(del_streams));
- context = dc_create_state(dc);
+ context = dc_state_create_current_copy(dc);
if (context == NULL)
goto context_alloc_fail;
- dc_resource_state_copy_construct_current(dc, context);
-
/* First remove from context all streams */
for (i = 0; i < context->stream_count; i++) {
struct dc_stream_state *stream = context->streams[i];
@@ -2584,12 +2623,12 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
/* Remove all planes for removed streams and then remove the streams */
for (i = 0; i < del_streams_count; i++) {
- if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
res = DC_FAIL_DETACH_SURFACES;
goto fail;
}
- res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
if (res != DC_OK)
goto fail;
}
@@ -2597,7 +2636,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
res = dc_commit_streams(dc, context->streams, context->stream_count);
fail:
- dc_release_state(context);
+ dc_state_release(context);
context_alloc_fail:
return res;
@@ -2624,7 +2663,7 @@ static int dm_suspend(void *handle)
dc_allow_idle_optimizations(adev->dm.dc, false);
- dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+ dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
@@ -2871,7 +2910,7 @@ static int dm_resume(void *handle)
dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
- dc_release_state(dm->cached_dc_state);
+ dc_state_release(dm->cached_dc_state);
dm->cached_dc_state = NULL;
amdgpu_dm_irq_resume_late(adev);
@@ -2881,10 +2920,9 @@ static int dm_resume(void *handle)
return 0;
}
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
- dc_release_state(dm_state->context);
- dm_state->context = dc_create_state(dm->dc);
+ dc_state_release(dm_state->context);
+ dm_state->context = dc_state_create(dm->dc);
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
- dc_resource_state_construct(dm->dc, dm_state->context);
/* Before powering on DC we need to re-initialize DMUB. */
dm_dmub_hw_resume(adev);
@@ -2914,6 +2952,10 @@ static int dm_resume(void *handle)
/* Do detection*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (!aconnector->dc_link)
@@ -3495,6 +3537,9 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
dc_link = aconnector->dc_link;
@@ -3957,7 +4002,7 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj)
old_state = to_dm_atomic_state(obj->state);
if (old_state && old_state->context)
- new_state->context = dc_copy_state(old_state->context);
+ new_state->context = dc_state_create_copy(old_state->context);
if (!new_state->context) {
kfree(new_state);
@@ -3973,7 +4018,7 @@ static void dm_atomic_destroy_state(struct drm_private_obj *obj,
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
if (dm_state && dm_state->context)
- dc_release_state(dm_state->context);
+ dc_state_release(dm_state->context);
kfree(dm_state);
}
@@ -4009,14 +4054,12 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
if (!state)
return -ENOMEM;
- state->context = dc_create_state(adev->dm.dc);
+ state->context = dc_state_create_current_copy(adev->dm.dc);
if (!state->context) {
kfree(state);
return -ENOMEM;
}
- dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
-
drm_atomic_private_obj_init(adev_to_drm(adev),
&adev->dm.atomic_obj,
&state->base,
@@ -4024,14 +4067,19 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
r = amdgpu_display_modeset_create_props(adev);
if (r) {
- dc_release_state(state->context);
+ dc_state_release(state->context);
kfree(state);
return r;
}
+#ifdef AMD_PRIVATE_COLOR
+ if (amdgpu_dm_create_color_properties(adev))
+ return -ENOMEM;
+#endif
+
r = amdgpu_dm_audio_init(adev);
if (r) {
- dc_release_state(state->context);
+ dc_state_release(state->context);
kfree(state);
return r;
}
@@ -4467,6 +4515,28 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
continue;
}
+ link = dc_get_link_at_index(dm->dc, i);
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
+ struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
+
+ if (!wbcon) {
+ DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ continue;
+ }
+
+ if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
+ DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ kfree(wbcon);
+ continue;
+ }
+
+ link->psr_settings.psr_feature_enabled = false;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ continue;
+ }
+
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
goto fail;
@@ -4485,8 +4555,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- link = dc_get_link_at_index(dm->dc, i);
-
if (dm->hpd_rx_offload_wq)
dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
aconnector;
@@ -5089,7 +5157,9 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
* Always set input transfer function, since plane state is refreshed
* every time.
*/
- ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
+ ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
+ plane_state,
+ dc_plane_state);
if (ret)
return ret;
@@ -5503,10 +5573,13 @@ static void fill_stream_properties_from_drm_display_mode(
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *aconnector = NULL;
struct hdmi_vendor_infoframe hv_frame;
struct hdmi_avi_infoframe avi_frame;
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
memset(&hv_frame, 0, sizeof(hv_frame));
memset(&avi_frame, 0, sizeof(avi_frame));
@@ -5676,13 +5749,13 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
}
static struct dc_sink *
-create_fake_sink(struct amdgpu_dm_connector *aconnector)
+create_fake_sink(struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
- sink_init_data.link = aconnector->dc_link;
- sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = link->connector_signal;
sink = dc_sink_create(&sink_init_data);
if (!sink) {
@@ -6053,6 +6126,7 @@ create_stream_for_sink(struct drm_connector *connector,
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
struct dsc_dec_dpcd_caps dsc_caps;
+ struct dc_link *link = NULL;
struct dc_sink *sink = NULL;
drm_mode_init(&mode, drm_mode);
@@ -6066,14 +6140,24 @@ create_stream_for_sink(struct drm_connector *connector,
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
aconnector = NULL;
aconnector = to_amdgpu_dm_connector(connector);
- if (!aconnector->dc_sink) {
- sink = create_fake_sink(aconnector);
- if (!sink)
- return stream;
- } else {
- sink = aconnector->dc_sink;
- dc_sink_retain(sink);
- }
+ link = aconnector->dc_link;
+ } else {
+ struct drm_writeback_connector *wbcon = NULL;
+ struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
+
+ wbcon = drm_connector_to_writeback(connector);
+ dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
+ link = dm_wbcon->link;
+ }
+
+ if (!aconnector || !aconnector->dc_sink) {
+ sink = create_fake_sink(link);
+ if (!sink)
+ return stream;
+
+ } else {
+ sink = aconnector->dc_sink;
+ dc_sink_retain(sink);
}
stream = dc_create_stream_for_sink(sink);
@@ -6173,19 +6257,16 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
- if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
//
- stream->use_vsc_sdp_for_colorimetry = false;
- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- stream->use_vsc_sdp_for_colorimetry =
- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
- } else {
- if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
- stream->use_vsc_sdp_for_colorimetry = true;
- }
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
@@ -6455,7 +6536,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct edid *edid;
struct i2c_adapter *ddc;
- if (dc_link->aux_mode)
+ if (dc_link && dc_link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
else
ddc = &aconnector->i2c->base;
@@ -6581,7 +6662,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
if (!dc_plane_state)
goto cleanup;
- dc_state = dc_create_state(dc);
+ dc_state = dc_state_create(dc);
if (!dc_state)
goto cleanup;
@@ -6608,9 +6689,9 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_result = dc_validate_plane(dc, dc_plane_state);
if (dc_result == DC_OK)
- dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
+ dc_result = dc_state_add_stream(dc, dc_state, stream);
- if (dc_result == DC_OK && !dc_add_plane_to_context(
+ if (dc_result == DC_OK && !dc_state_add_plane(
dc,
stream,
dc_plane_state,
@@ -6622,7 +6703,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
cleanup:
if (dc_state)
- dc_release_state(dc_state);
+ dc_state_release(dc_state);
if (dc_plane_state)
dc_plane_state_release(dc_plane_state);
@@ -6930,7 +7011,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
+ mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -6974,6 +7055,9 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (!aconnector->mst_output_port)
@@ -7553,7 +7637,6 @@ create_i2c(struct ddc_service *ddc_service,
if (!i2c)
return NULL;
i2c->base.owner = THIS_MODULE;
- i2c->base.class = I2C_CLASS_DDC;
i2c->base.dev.parent = &adev->pdev->dev;
i2c->base.algo = &amdgpu_dm_i2c_algo;
snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
@@ -7579,6 +7662,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct dc_link *link = dc_get_link_at_index(dc, link_index);
struct amdgpu_i2c_adapter *i2c;
+ /* Not needed for writeback connector */
link->priv = aconnector;
@@ -8189,6 +8273,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
+ bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
+ bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf;
}
amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
@@ -8402,6 +8490,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&acrtc_state->stream->csc_color_matrix;
bundle->stream_update.out_transfer_func =
acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.lut3d_func =
+ (struct dc_3dlut *) acrtc_state->stream->lut3d_func;
+ bundle->stream_update.func_shaper =
+ (struct dc_transfer_func *) acrtc_state->stream->func_shaper;
}
acrtc_state->stream->abm_level = acrtc_state->abm_level;
@@ -8535,6 +8627,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
notify:
aconnector = to_amdgpu_dm_connector(connector);
@@ -8568,6 +8663,9 @@ notify:
if (!status)
continue;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
mutex_lock(&adev->dm.audio_lock);
@@ -8593,6 +8691,12 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
+static void dm_clear_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state)
+{
+ dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
+}
+
static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
struct dc_state *dc_state)
{
@@ -8602,9 +8706,38 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_connector_state *old_con_state;
+ struct drm_connector *connector;
bool mode_set_reset_required = false;
u32 i;
+ /* Disable writeback */
+ for_each_old_connector_in_state(state, connector, old_con_state, i) {
+ struct dm_connector_state *dm_old_con_state;
+ struct amdgpu_crtc *acrtc;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ old_crtc_state = NULL;
+
+ dm_old_con_state = to_dm_connector_state(old_con_state);
+ if (!dm_old_con_state->base.crtc)
+ continue;
+
+ acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
+ if (acrtc)
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+
+ if (!acrtc->wb_enabled)
+ continue;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ dm_clear_writeback(dm, dm_old_crtc_state);
+ acrtc->wb_enabled = false;
+ }
+
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -8729,7 +8862,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
dc_stream_get_status(dm_new_crtc_state->stream);
if (!status)
- status = dc_stream_get_status_from_state(dc_state,
+ status = dc_state_get_stream_status(dc_state,
dm_new_crtc_state->stream);
if (!status)
drm_err(dev,
@@ -8741,6 +8874,105 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
}
}
+static void dm_set_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state,
+ struct drm_connector *connector,
+ struct drm_connector_state *new_con_state)
+{
+ struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dc_writeback_info *wb_info;
+ struct pipe_ctx *pipe = NULL;
+ struct amdgpu_framebuffer *afb;
+ int i = 0;
+
+ wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
+ if (!wb_info) {
+ DRM_ERROR("Failed to allocate wb_info\n");
+ return;
+ }
+
+ acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
+ if (!acrtc) {
+ DRM_ERROR("no amdgpu_crtc found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
+ if (!afb) {
+ DRM_ERROR("No amdgpu_framebuffer found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
+ pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ /* fill in wb_info */
+ wb_info->wb_enabled = true;
+
+ wb_info->dwb_pipe_inst = 0;
+ wb_info->dwb_params.dwbscl_black_color = 0;
+ wb_info->dwb_params.hdr_mult = 0x1F000;
+ wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
+ wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
+ wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
+ wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
+
+ /* width & height from crtc */
+ wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
+ wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
+
+ wb_info->dwb_params.cnv_params.crop_en = false;
+ wb_info->dwb_params.stereo_params.stereo_enabled = false;
+
+ wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
+ wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
+ wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
+ wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
+
+ wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
+
+ wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
+
+ wb_info->dwb_params.scaler_taps.h_taps = 4;
+ wb_info->dwb_params.scaler_taps.v_taps = 4;
+ wb_info->dwb_params.scaler_taps.h_taps_c = 2;
+ wb_info->dwb_params.scaler_taps.v_taps_c = 2;
+ wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
+
+ wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
+ wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
+
+ for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
+ wb_info->mcif_buf_params.luma_address[i] = afb->address;
+ wb_info->mcif_buf_params.chroma_address[i] = 0;
+ }
+
+ wb_info->mcif_buf_params.p_vmid = 1;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
+ wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
+ wb_info->mcif_warmup_params.region_size =
+ wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
+ }
+ wb_info->mcif_warmup_params.p_vmid = 1;
+ wb_info->writeback_source_plane = pipe->plane_state;
+
+ dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
+
+ acrtc->wb_pending = true;
+ acrtc->wb_conn = wb_conn;
+ drm_writeback_queue_job(wb_conn, new_con_state);
+}
+
/**
* amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
* @state: The atomic state to commit
@@ -8768,16 +9000,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
trace_amdgpu_dm_atomic_commit_tail_begin(state);
- if (dm->dc->caps.ips_support) {
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- if (new_con_state->crtc &&
- new_con_state->crtc->state->active &&
- drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
- dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
- break;
- }
- }
- }
+ if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dm->dc, false);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_dp_mst_atomic_wait_for_dependencies(state);
@@ -8791,7 +9015,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *aconnector;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
if (!adev->dm.hdcp_workqueue)
continue;
@@ -8975,6 +9204,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* To fix this, DC should permit updating only stream properties.
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ if (!dummy_updates) {
+ DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ continue;
+ }
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
@@ -9068,6 +9301,31 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
}
+ /* Enable writeback */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ if (!new_con_state->writeback_job)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (acrtc->wb_enabled)
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
+ acrtc->wb_enabled = true;
+ }
+
/* Update audio instances for each connector. */
amdgpu_dm_commit_audio(dev, state);
@@ -9185,10 +9443,15 @@ out:
void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector)
{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *aconnector;
struct amdgpu_crtc *disconnected_acrtc;
struct dm_crtc_state *acrtc_state;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
if (!aconnector->dc_sink || !connector->state || !connector->encoder)
return;
@@ -9265,12 +9528,16 @@ static void get_freesync_config_for_crtc(
struct dm_connector_state *new_con_state)
{
struct mod_freesync_config config = {0};
- struct amdgpu_dm_connector *aconnector =
- to_amdgpu_dm_connector(new_con_state->base.connector);
+ struct amdgpu_dm_connector *aconnector;
struct drm_display_mode *mode = &new_crtc_state->base.mode;
int vrefresh = drm_mode_vrefresh(mode);
bool fs_vid_mode = false;
+ if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
+
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
vrefresh >= aconnector->min_vfreq &&
vrefresh <= aconnector->max_vfreq;
@@ -9516,7 +9783,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
crtc->base.id);
/* i.e. reset mode */
- if (dc_remove_stream_from_ctx(
+ if (dc_state_remove_stream(
dm->dc,
dm_state->context,
dm_old_crtc_state->stream) != DC_OK) {
@@ -9559,7 +9826,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
crtc->base.id);
- if (dc_add_stream_to_ctx(
+ if (dc_state_add_stream(
dm->dc,
dm_state->context,
dm_new_crtc_state->stream) != DC_OK) {
@@ -9606,6 +9873,7 @@ skip_modeset:
* when a modeset is needed, to ensure it gets reprogrammed.
*/
if (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
if (ret)
@@ -9639,7 +9907,8 @@ static bool should_reset_plane(struct drm_atomic_state *state,
* TODO: Remove this hack for all asics once it proves that the
* fast updates works fine on DCN3.2+.
*/
- if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
+ state->allow_modeset)
return true;
/* Exit early if we know that we're adding or removing the plane. */
@@ -9673,6 +9942,10 @@ static bool should_reset_plane(struct drm_atomic_state *state,
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
struct amdgpu_framebuffer *old_afb, *new_afb;
+ struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
+
+ dm_new_other_state = to_dm_plane_state(new_other_state);
+ dm_old_other_state = to_dm_plane_state(old_other_state);
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -9709,6 +9982,18 @@ static bool should_reset_plane(struct drm_atomic_state *state,
old_other_state->color_encoding != new_other_state->color_encoding)
return true;
+ /* HDR/Transfer Function changes. */
+ if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
+ dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
+ dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
+ dm_old_other_state->ctm != dm_new_other_state->ctm ||
+ dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
+ dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
+ dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
+ dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
+ dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
+ return true;
+
/* Framebuffer checks fall at the end. */
if (!old_other_state->fb || !new_other_state->fb)
continue;
@@ -9863,7 +10148,7 @@ static int dm_update_plane_state(struct dc *dc,
if (ret)
return ret;
- if (!dc_remove_plane_from_context(
+ if (!dc_state_remove_plane(
dc,
dm_old_crtc_state->stream,
dm_old_plane_state->dc_state,
@@ -9941,7 +10226,7 @@ static int dm_update_plane_state(struct dc *dc,
* state. It'll be released when the atomic state is
* cleaned.
*/
- if (!dc_add_plane_to_context(
+ if (!dc_state_add_plane(
dc,
dm_new_crtc_state->stream,
dc_new_plane_state,
@@ -10103,6 +10388,9 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
if (conn_state->crtc != crtc)
continue;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconnector = to_amdgpu_dm_connector(connector);
if (!aconnector->mst_output_port || !aconnector->mst_root)
aconnector = NULL;
@@ -10815,8 +11103,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct dm_connector_state *dm_con_state = NULL;
struct dc_sink *sink;
- struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
bool freesync_capable = false;
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 3710f4d0f..9c1871b86 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_plane.h>
#include "link_service_types.h"
+#include <drm/drm_writeback.h>
/*
* This file contains the definition for amdgpu_display_manager
@@ -54,6 +55,9 @@
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3
+
+#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
+
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -714,11 +718,107 @@ static inline void amdgpu_dm_set_mst_status(uint8_t *status,
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
+struct amdgpu_dm_wb_connector {
+ struct drm_writeback_connector base;
+ struct dc_link *link;
+};
+
+#define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base)
+
extern const struct amdgpu_ip_block_version dm_ip_block;
+/* enum amdgpu_transfer_function: pre-defined transfer function supported by AMD.
+ *
+ * It includes standardized transfer functions and pure power functions. The
+ * transfer function coefficients are available at modules/color/color_gamma.c
+ */
+enum amdgpu_transfer_function {
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT,
+ AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF,
+ AMDGPU_TRANSFER_FUNCTION_PQ_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_IDENTITY,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_BT709_OETF,
+ AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF,
+ AMDGPU_TRANSFER_FUNCTION_COUNT
+};
+
struct dm_plane_state {
struct drm_plane_state base;
struct dc_plane_state *dc_state;
+
+ /* Plane color mgmt */
+ /**
+ * @degamma_lut:
+ *
+ * 1D LUT for mapping framebuffer/plane pixel data before sampling or
+ * blending operations. It's usually applied to linearize input space.
+ * The blob (if not NULL) is an array of &struct drm_color_lut.
+ */
+ struct drm_property_blob *degamma_lut;
+ /**
+ * @degamma_tf:
+ *
+ * Predefined transfer function to tell DC driver the input space to
+ * linearize.
+ */
+ enum amdgpu_transfer_function degamma_tf;
+ /**
+ * @hdr_mult:
+ *
+ * Multiplier to 'gain' the plane. When PQ is decoded using the fixed
+ * func transfer function to the internal FP16 fb, 1.0 -> 80 nits (on
+ * AMD at least). When sRGB is decoded, 1.0 -> 1.0, obviously.
+ * Therefore, 1.0 multiplier = 80 nits for SDR content. So if you
+ * want, 203 nits for SDR content, pass in (203.0 / 80.0). Format is
+ * S31.32 sign-magnitude.
+ *
+ * HDR multiplier can wide range beyond [0.0, 1.0]. This means that PQ
+ * TF is needed for any subsequent linear-to-non-linear transforms.
+ */
+ __u64 hdr_mult;
+ /**
+ * @ctm:
+ *
+ * Color transformation matrix. The blob (if not NULL) is a &struct
+ * drm_color_ctm_3x4.
+ */
+ struct drm_property_blob *ctm;
+ /**
+ * @shaper_lut: shaper lookup table blob. The blob (if not NULL) is an
+ * array of &struct drm_color_lut.
+ */
+ struct drm_property_blob *shaper_lut;
+ /**
+ * @shaper_tf:
+ *
+ * Predefined transfer function to delinearize color space.
+ */
+ enum amdgpu_transfer_function shaper_tf;
+ /**
+ * @lut3d: 3D lookup table blob. The blob (if not NULL) is an array of
+ * &struct drm_color_lut.
+ */
+ struct drm_property_blob *lut3d;
+ /**
+ * @blend_lut: blend lut lookup table blob. The blob (if not NULL) is an
+ * array of &struct drm_color_lut.
+ */
+ struct drm_property_blob *blend_lut;
+ /**
+ * @blend_tf:
+ *
+ * Pre-defined transfer function for converting plane pixel data before
+ * applying blend LUT.
+ */
+ enum amdgpu_transfer_function blend_tf;
};
struct dm_crtc_state {
@@ -743,6 +843,14 @@ struct dm_crtc_state {
struct dc_info_packet vrr_infopacket;
int abm_level;
+
+ /**
+ * @regamma_tf:
+ *
+ * Pre-defined transfer function for converting internal FB -> wire
+ * encoding.
+ */
+ enum amdgpu_transfer_function regamma_tf;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
@@ -804,14 +912,22 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
+/* 3D LUT max size is 17x17x17 (4913 entries) */
+#define MAX_COLOR_3DLUT_SIZE 17
+#define MAX_COLOR_3DLUT_BITDEPTH 12
+int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
+ struct drm_plane_state *plane_state);
+/* 1D LUT size */
#define MAX_COLOR_LUT_ENTRIES 4096
/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
void amdgpu_dm_init_color_mod(void);
+int amdgpu_dm_create_color_properties(struct amdgpu_device *adev);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
+ struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state);
void amdgpu_dm_update_connector_after_detect(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index a4cb23d05..c87b64e46 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -72,6 +72,7 @@
*/
#define MAX_DRM_LUT_VALUE 0xFFFF
+#define SDR_WHITE_LEVEL_INIT_VALUE 80
/**
* amdgpu_dm_init_color_mod - Initialize the color module.
@@ -84,6 +85,247 @@ void amdgpu_dm_init_color_mod(void)
setup_x_points_distribution();
}
+static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x)
+{
+ struct fixed31_32 val;
+
+ /* If negative, convert to 2's complement. */
+ if (x & (1ULL << 63))
+ x = -(x & ~(1ULL << 63));
+
+ val.value = x;
+ return val;
+}
+
+#ifdef AMD_PRIVATE_COLOR
+/* Pre-defined Transfer Functions (TF)
+ *
+ * AMD driver supports pre-defined mathematical functions for transferring
+ * between encoded values and optical/linear space. Depending on HW color caps,
+ * ROMs and curves built by the AMD color module support these transforms.
+ *
+ * The driver-specific color implementation exposes properties for pre-blending
+ * degamma TF, shaper TF (before 3D LUT), and blend(dpp.ogam) TF and
+ * post-blending regamma (mpc.ogam) TF. However, only pre-blending degamma
+ * supports ROM curves. AMD color module uses pre-defined coefficients to build
+ * curves for the other blocks. What can be done by each color block is
+ * described by struct dpp_color_capsand struct mpc_color_caps.
+ *
+ * AMD driver-specific color API exposes the following pre-defined transfer
+ * functions:
+ *
+ * - Identity: linear/identity relationship between pixel value and
+ * luminance value;
+ * - Gamma 2.2, Gamma 2.4, Gamma 2.6: pure power functions;
+ * - sRGB: 2.4: The piece-wise transfer function from IEC 61966-2-1:1999;
+ * - BT.709: has a linear segment in the bottom part and then a power function
+ * with a 0.45 (~1/2.22) gamma for the rest of the range; standardized by
+ * ITU-R BT.709-6;
+ * - PQ (Perceptual Quantizer): used for HDR display, allows luminance range
+ * capability of 0 to 10,000 nits; standardized by SMPTE ST 2084.
+ *
+ * The AMD color model is designed with an assumption that SDR (sRGB, BT.709,
+ * Gamma 2.2, etc.) peak white maps (normalized to 1.0 FP) to 80 nits in the PQ
+ * system. This has the implication that PQ EOTF (non-linear to linear) maps to
+ * [0.0..125.0] where 125.0 = 10,000 nits / 80 nits.
+ *
+ * Non-linear and linear forms are described in the table below:
+ *
+ * ┌───────────┬─────────────────────┬──────────────────────┐
+ * │ │ Non-linear │ Linear │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ sRGB │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ BT709 │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ Gamma 2.x │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ PQ │ UNORM or FP16 CCCS* │ [0.0, 125.0] │
+ * ├───────────┼─────────────────────┼──────────────────────┤
+ * │ Identity │ UNORM or FP16 CCCS* │ [0.0, 1.0] or CCCS** │
+ * └───────────┴─────────────────────┴──────────────────────┘
+ * * CCCS: Windows canonical composition color space
+ * ** Respectively
+ *
+ * In the driver-specific API, color block names attached to TF properties
+ * suggest the intention regarding non-linear encoding pixel's luminance
+ * values. As some newer encodings don't use gamma curve, we make encoding and
+ * decoding explicit by defining an enum list of transfer functions supported
+ * in terms of EOTF and inverse EOTF, where:
+ *
+ * - EOTF (electro-optical transfer function): is the transfer function to go
+ * from the encoded value to an optical (linear) value. De-gamma functions
+ * traditionally do this.
+ * - Inverse EOTF (simply the inverse of the EOTF): is usually intended to go
+ * from an optical/linear space (which might have been used for blending)
+ * back to the encoded values. Gamma functions traditionally do this.
+ */
+static const char * const
+amdgpu_transfer_function_names[] = {
+ [AMDGPU_TRANSFER_FUNCTION_DEFAULT] = "Default",
+ [AMDGPU_TRANSFER_FUNCTION_IDENTITY] = "Identity",
+ [AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF] = "sRGB EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF] = "BT.709 inv_OETF",
+ [AMDGPU_TRANSFER_FUNCTION_PQ_EOTF] = "PQ EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF] = "Gamma 2.2 EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF] = "Gamma 2.4 EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF] = "Gamma 2.6 EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF] = "sRGB inv_EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_BT709_OETF] = "BT.709 OETF",
+ [AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF] = "PQ inv_EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF] = "Gamma 2.2 inv_EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF] = "Gamma 2.4 inv_EOTF",
+ [AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF] = "Gamma 2.6 inv_EOTF",
+};
+
+static const u32 amdgpu_eotf =
+ BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_PQ_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF);
+
+static const u32 amdgpu_inv_eotf =
+ BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_BT709_OETF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF);
+
+static struct drm_property *
+amdgpu_create_tf_property(struct drm_device *dev,
+ const char *name,
+ u32 supported_tf)
+{
+ u32 transfer_functions = supported_tf |
+ BIT(AMDGPU_TRANSFER_FUNCTION_DEFAULT) |
+ BIT(AMDGPU_TRANSFER_FUNCTION_IDENTITY);
+ struct drm_prop_enum_list enum_list[AMDGPU_TRANSFER_FUNCTION_COUNT];
+ int i, len;
+
+ len = 0;
+ for (i = 0; i < AMDGPU_TRANSFER_FUNCTION_COUNT; i++) {
+ if ((transfer_functions & BIT(i)) == 0)
+ continue;
+
+ enum_list[len].type = i;
+ enum_list[len].name = amdgpu_transfer_function_names[i];
+ len++;
+ }
+
+ return drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ name, enum_list, len);
+}
+
+int
+amdgpu_dm_create_color_properties(struct amdgpu_device *adev)
+{
+ struct drm_property *prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_DEGAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_degamma_lut_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ DRM_MODE_PROP_IMMUTABLE,
+ "AMD_PLANE_DEGAMMA_LUT_SIZE",
+ 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_degamma_lut_size_property = prop;
+
+ prop = amdgpu_create_tf_property(adev_to_drm(adev),
+ "AMD_PLANE_DEGAMMA_TF",
+ amdgpu_eotf);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_degamma_tf_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ 0, "AMD_PLANE_HDR_MULT", 0, U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_hdr_mult_property = prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_CTM", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_ctm_property = prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_SHAPER_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_shaper_lut_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ DRM_MODE_PROP_IMMUTABLE,
+ "AMD_PLANE_SHAPER_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_shaper_lut_size_property = prop;
+
+ prop = amdgpu_create_tf_property(adev_to_drm(adev),
+ "AMD_PLANE_SHAPER_TF",
+ amdgpu_inv_eotf);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_shaper_tf_property = prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_LUT3D", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_lut3d_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ DRM_MODE_PROP_IMMUTABLE,
+ "AMD_PLANE_LUT3D_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_lut3d_size_property = prop;
+
+ prop = drm_property_create(adev_to_drm(adev),
+ DRM_MODE_PROP_BLOB,
+ "AMD_PLANE_BLEND_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_blend_lut_property = prop;
+
+ prop = drm_property_create_range(adev_to_drm(adev),
+ DRM_MODE_PROP_IMMUTABLE,
+ "AMD_PLANE_BLEND_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_blend_lut_size_property = prop;
+
+ prop = amdgpu_create_tf_property(adev_to_drm(adev),
+ "AMD_PLANE_BLEND_TF",
+ amdgpu_eotf);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.plane_blend_tf_property = prop;
+
+ prop = amdgpu_create_tf_property(adev_to_drm(adev),
+ "AMD_CRTC_REGAMMA_TF",
+ amdgpu_inv_eotf);
+ if (!prop)
+ return -ENOMEM;
+ adev->mode_info.regamma_tf_property = prop;
+
+ return 0;
+}
+#endif
+
/**
* __extract_blob_lut - Extracts the DRM lut and lut size from a blob.
* @blob: DRM color mgmt property blob
@@ -182,7 +424,6 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
struct fixed31_32 *matrix)
{
- int64_t val;
int i;
/*
@@ -201,12 +442,29 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
}
/* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
- val = ctm->matrix[i - (i / 4)];
- /* If negative, convert to 2's complement. */
- if (val & (1ULL << 63))
- val = -(val & ~(1ULL << 63));
+ matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]);
+ }
+}
- matrix[i].value = val;
+/**
+ * __drm_ctm_3x4_to_dc_matrix - converts a DRM CTM 3x4 to a DC CSC float matrix
+ * @ctm: DRM color transformation matrix with 3x4 dimensions
+ * @matrix: DC CSC float matrix
+ *
+ * The matrix needs to be a 3x4 (12 entry) matrix.
+ */
+static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm,
+ struct fixed31_32 *matrix)
+{
+ int i;
+
+ /* The format provided is S31.32, using signed-magnitude representation.
+ * Our fixed31_32 is also S31.32, but is using 2's complement. We have
+ * to convert from signed-magnitude to 2's complement.
+ */
+ for (i = 0; i < 12; i++) {
+ /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
+ matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]);
}
}
@@ -268,16 +526,18 @@ static int __set_output_tf(struct dc_transfer_func *func,
struct calculate_buffer cal_buffer = {0};
bool res;
- ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
-
cal_buffer.buffer_index = -1;
- gamma = dc_create_gamma();
- if (!gamma)
- return -ENOMEM;
+ if (lut_size) {
+ ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
- gamma->num_entries = lut_size;
- __drm_lut_to_dc_gamma(lut, gamma, false);
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ __drm_lut_to_dc_gamma(lut, gamma, false);
+ }
if (func->tf == TRANSFER_FUNCTION_LINEAR) {
/*
@@ -285,27 +545,68 @@ static int __set_output_tf(struct dc_transfer_func *func,
* on top of a linear input. But degamma params can be used
* instead to simulate this.
*/
- gamma->type = GAMMA_CUSTOM;
+ if (gamma)
+ gamma->type = GAMMA_CUSTOM;
res = mod_color_calculate_degamma_params(NULL, func,
- gamma, true);
+ gamma, gamma != NULL);
} else {
/*
* Assume sRGB. The actual mapping will depend on whether the
* input was legacy or not.
*/
- gamma->type = GAMMA_CS_TFM_1D;
- res = mod_color_calculate_regamma_params(func, gamma, false,
+ if (gamma)
+ gamma->type = GAMMA_CS_TFM_1D;
+ res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
has_rom, NULL, &cal_buffer);
}
- dc_gamma_release(&gamma);
+ if (gamma)
+ dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
+static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
+ const struct drm_color_lut *regamma_lut,
+ uint32_t regamma_size, bool has_rom,
+ enum dc_transfer_func_predefined tf)
+{
+ struct dc_transfer_func *out_tf = stream->out_transfer_func;
+ int ret = 0;
+
+ if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * CRTC RGM goes into RGM LUT.
+ *
+ * Note: there is no implicit sRGB regamma here. We are using
+ * degamma calculation from color module to calculate the curve
+ * from a linear base if gamma TF is not set. However, if gamma
+ * TF (!= Linear) and LUT are set at the same time, we will use
+ * regamma calculation, and the color module will combine the
+ * pre-defined TF and the custom LUT values into the LUT that's
+ * actually programmed.
+ */
+ out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ out_tf->tf = tf;
+ out_tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+
+ ret = __set_output_tf(out_tf, regamma_lut, regamma_size, has_rom);
+ } else {
+ /*
+ * No CRTC RGM means we can just put the block into bypass
+ * since we don't have any plane level adjustments using it.
+ */
+ out_tf->type = TF_TYPE_BYPASS;
+ out_tf->tf = TRANSFER_FUNCTION_LINEAR;
+ }
+
+ return ret;
+}
+
/**
* __set_input_tf - calculates the input transfer function based on expected
* input space.
+ * @caps: dc color capabilities
* @func: transfer function
* @lut: lookup table that defines the color space
* @lut_size: size of respective lut.
@@ -313,27 +614,240 @@ static int __set_output_tf(struct dc_transfer_func *func,
* Returns:
* 0 in case of success. -ENOMEM if fails.
*/
-static int __set_input_tf(struct dc_transfer_func *func,
+static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size)
{
struct dc_gamma *gamma = NULL;
bool res;
- gamma = dc_create_gamma();
- if (!gamma)
- return -ENOMEM;
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
- gamma->type = GAMMA_CUSTOM;
- gamma->num_entries = lut_size;
+ gamma->type = GAMMA_CUSTOM;
+ gamma->num_entries = lut_size;
+
+ __drm_lut_to_dc_gamma(lut, gamma, false);
+ }
- __drm_lut_to_dc_gamma(lut, gamma, false);
+ res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
- res = mod_color_calculate_degamma_params(NULL, func, gamma, true);
- dc_gamma_release(&gamma);
+ if (gamma)
+ dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
}
+static enum dc_transfer_func_predefined
+amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
+{
+ switch (tf) {
+ default:
+ case AMDGPU_TRANSFER_FUNCTION_DEFAULT:
+ case AMDGPU_TRANSFER_FUNCTION_IDENTITY:
+ return TRANSFER_FUNCTION_LINEAR;
+ case AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF:
+ return TRANSFER_FUNCTION_SRGB;
+ case AMDGPU_TRANSFER_FUNCTION_BT709_OETF:
+ case AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF:
+ return TRANSFER_FUNCTION_BT709;
+ case AMDGPU_TRANSFER_FUNCTION_PQ_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF:
+ return TRANSFER_FUNCTION_PQ;
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF:
+ return TRANSFER_FUNCTION_GAMMA22;
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF:
+ return TRANSFER_FUNCTION_GAMMA24;
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF:
+ case AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF:
+ return TRANSFER_FUNCTION_GAMMA26;
+ }
+}
+
+static void __to_dc_lut3d_color(struct dc_rgb *rgb,
+ const struct drm_color_lut lut,
+ int bit_precision)
+{
+ rgb->red = drm_color_lut_extract(lut.red, bit_precision);
+ rgb->green = drm_color_lut_extract(lut.green, bit_precision);
+ rgb->blue = drm_color_lut_extract(lut.blue, bit_precision);
+}
+
+static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
+ uint32_t lut3d_size,
+ struct tetrahedral_params *params,
+ bool use_tetrahedral_9,
+ int bit_depth)
+{
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_i, i;
+
+
+ if (use_tetrahedral_9) {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ } else {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ }
+
+ for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
+ /*
+ * We should consider the 3D LUT RGB values are distributed
+ * along four arrays lut0-3 where the first sizes 1229 and the
+ * other 1228. The bit depth supported for 3dlut channel is
+ * 12-bit, but DC also supports 10-bit.
+ *
+ * TODO: improve color pipeline API to enable the userspace set
+ * bit depth and 3D LUT size/stride, as specified by VA-API.
+ */
+ __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
+ __to_dc_lut3d_color(&lut1[lut_i], lut[i + 1], bit_depth);
+ __to_dc_lut3d_color(&lut2[lut_i], lut[i + 2], bit_depth);
+ __to_dc_lut3d_color(&lut3[lut_i], lut[i + 3], bit_depth);
+ }
+ /* lut0 has 1229 points (lut_size/4 + 1) */
+ __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
+}
+
+/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream
+ * @drm_lut3d: user 3D LUT
+ * @drm_lut3d_size: size of 3D LUT
+ * @lut3d: DC 3D LUT
+ *
+ * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it
+ * on DCN accordingly.
+ */
+static void amdgpu_dm_atomic_lut3d(const struct drm_color_lut *drm_lut3d,
+ uint32_t drm_lut3d_size,
+ struct dc_3dlut *lut)
+{
+ if (!drm_lut3d_size) {
+ lut->state.bits.initialized = 0;
+ } else {
+ /* Stride and bit depth are not programmable by API yet.
+ * Therefore, only supports 17x17x17 3D LUT (12-bit).
+ */
+ lut->lut_3d.use_tetrahedral_9 = false;
+ lut->lut_3d.use_12bits = true;
+ lut->state.bits.initialized = 1;
+ __drm_3dlut_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
+ lut->lut_3d.use_tetrahedral_9,
+ MAX_COLOR_3DLUT_BITDEPTH);
+ }
+}
+
+static int amdgpu_dm_atomic_shaper_lut(const struct drm_color_lut *shaper_lut,
+ bool has_rom,
+ enum dc_transfer_func_predefined tf,
+ uint32_t shaper_size,
+ struct dc_transfer_func *func_shaper)
+{
+ int ret = 0;
+
+ if (shaper_size || tf != TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * If user shaper LUT is set, we assume a linear color space
+ * (linearized by degamma 1D LUT or not).
+ */
+ func_shaper->type = TF_TYPE_DISTRIBUTED_POINTS;
+ func_shaper->tf = tf;
+ func_shaper->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+
+ ret = __set_output_tf(func_shaper, shaper_lut, shaper_size, has_rom);
+ } else {
+ func_shaper->type = TF_TYPE_BYPASS;
+ func_shaper->tf = TRANSFER_FUNCTION_LINEAR;
+ }
+
+ return ret;
+}
+
+static int amdgpu_dm_atomic_blend_lut(const struct drm_color_lut *blend_lut,
+ bool has_rom,
+ enum dc_transfer_func_predefined tf,
+ uint32_t blend_size,
+ struct dc_transfer_func *func_blend)
+{
+ int ret = 0;
+
+ if (blend_size || tf != TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * DRM plane gamma LUT or TF means we are linearizing color
+ * space before blending (similar to degamma programming). As
+ * we don't have hardcoded curve support, or we use AMD color
+ * module to fill the parameters that will be translated to HW
+ * points.
+ */
+ func_blend->type = TF_TYPE_DISTRIBUTED_POINTS;
+ func_blend->tf = tf;
+ func_blend->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+
+ ret = __set_input_tf(NULL, func_blend, blend_lut, blend_size);
+ } else {
+ func_blend->type = TF_TYPE_BYPASS;
+ func_blend->tf = TRANSFER_FUNCTION_LINEAR;
+ }
+
+ return ret;
+}
+
+/**
+ * amdgpu_dm_verify_lut3d_size - verifies if 3D LUT is supported and if user
+ * shaper and 3D LUTs match the hw supported size
+ * @adev: amdgpu device
+ * @plane_state: the DRM plane state
+ *
+ * Verifies if pre-blending (DPP) 3D LUT is supported by the HW (DCN 2.0 or
+ * newer) and if the user shaper and 3D LUTs match the supported size.
+ *
+ * Returns:
+ * 0 on success. -EINVAL if lut size are invalid.
+ */
+int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
+ struct drm_plane_state *plane_state)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
+ uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
+ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut;
+
+ /* shaper LUT is only available if 3D LUT color caps */
+ exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
+ shaper = __extract_blob_lut(dm_plane_state->shaper_lut, &size);
+
+ if (shaper && size != exp_size) {
+ drm_dbg(&adev->ddev,
+ "Invalid Shaper LUT size. Should be %u but got %u.\n",
+ exp_size, size);
+ return -EINVAL;
+ }
+
+ /* The number of 3D LUT entries is the dimension size cubed */
+ exp_size = has_3dlut ? dim_size * dim_size * dim_size : 0;
+ lut3d = __extract_blob_lut(dm_plane_state->lut3d, &size);
+
+ if (lut3d && size != exp_size) {
+ drm_dbg(&adev->ddev,
+ "Invalid 3D LUT size. Should be %u but got %u.\n",
+ exp_size, size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes
* @crtc_state: the DRM CRTC state
@@ -401,9 +915,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_LINEAR;
bool is_legacy;
int r;
+ tf = amdgpu_tf_to_dc_tf(crtc->regamma_tf);
+
r = amdgpu_dm_verify_lut_sizes(&crtc->base);
if (r)
return r;
@@ -439,27 +956,23 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
crtc->cm_is_degamma_srgb = true;
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
-
+ /*
+ * Note: although we pass has_rom as parameter here, we never
+ * actually use ROM because the color module only takes the ROM
+ * path if transfer_func->type == PREDEFINED.
+ *
+ * See more in mod_color_calculate_regamma_params()
+ */
r = __set_legacy_tf(stream->out_transfer_func, regamma_lut,
regamma_size, has_rom);
if (r)
return r;
- } else if (has_regamma) {
- /* If atomic regamma, CRTC RGM goes into RGM LUT. */
- stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
-
- r = __set_output_tf(stream->out_transfer_func, regamma_lut,
- regamma_size, has_rom);
+ } else {
+ regamma_size = has_regamma ? regamma_size : 0;
+ r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut,
+ regamma_size, has_rom, tf);
if (r)
return r;
- } else {
- /*
- * No CRTC RGM means we can just put the block into bypass
- * since we don't have any plane level adjustments using it.
- */
- stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
}
/*
@@ -495,20 +1008,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
return 0;
}
-/**
- * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
- * @crtc: amdgpu_dm crtc state
- * @dc_plane_state: target DC surface
- *
- * Update the underlying dc_stream_state's input transfer function (ITF) in
- * preparation for hardware commit. The transfer function used depends on
- * the preparation done on the stream for color management.
- *
- * Returns:
- * 0 on success. -ENOMEM if mem allocation fails.
- */
-int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
- struct dc_plane_state *dc_plane_state)
+static int
+map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc,
+ struct dc_plane_state *dc_plane_state,
+ struct dc_color_caps *caps)
{
const struct drm_color_lut *degamma_lut;
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
@@ -531,8 +1034,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
&degamma_size);
ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
- dc_plane_state->in_transfer_func->type =
- TF_TYPE_DISTRIBUTED_POINTS;
+ dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
/*
* This case isn't fully correct, but also fairly
@@ -564,11 +1066,11 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->in_transfer_func->tf =
TRANSFER_FUNCTION_LINEAR;
- r = __set_input_tf(dc_plane_state->in_transfer_func,
+ r = __set_input_tf(caps, dc_plane_state->in_transfer_func,
degamma_lut, degamma_size);
if (r)
return r;
- } else if (crtc->cm_is_degamma_srgb) {
+ } else {
/*
* For legacy gamma support we need the regamma input
* in linear space. Assume that the input is sRGB.
@@ -577,14 +1079,209 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->in_transfer_func->tf = tf;
if (tf != TRANSFER_FUNCTION_SRGB &&
- !mod_color_calculate_degamma_params(NULL,
- dc_plane_state->in_transfer_func, NULL, false))
+ !mod_color_calculate_degamma_params(caps,
+ dc_plane_state->in_transfer_func,
+ NULL, false))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_degamma(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct dc_color_caps *color_caps)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ const struct drm_color_lut *degamma_lut;
+ enum amdgpu_transfer_function tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ uint32_t degamma_size;
+ bool has_degamma_lut;
+ int ret;
+
+ degamma_lut = __extract_blob_lut(dm_plane_state->degamma_lut,
+ &degamma_size);
+
+ has_degamma_lut = degamma_lut &&
+ !__is_lut_linear(degamma_lut, degamma_size);
+
+ tf = dm_plane_state->degamma_tf;
+
+ /* If we don't have plane degamma LUT nor TF to set on DC, we have
+ * nothing to do here, return.
+ */
+ if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT)
+ return -EINVAL;
+
+ dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf);
+
+ if (has_degamma_lut) {
+ ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES);
+
+ dc_plane_state->in_transfer_func->type =
+ TF_TYPE_DISTRIBUTED_POINTS;
+
+ ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func,
+ degamma_lut, degamma_size);
+ if (ret)
+ return ret;
+ } else {
+ dc_plane_state->in_transfer_func->type =
+ TF_TYPE_PREDEFINED;
+
+ if (!mod_color_calculate_degamma_params(color_caps,
+ dc_plane_state->in_transfer_func, NULL, false))
return -ENOMEM;
- } else {
- /* ...Otherwise we can just bypass the DGM block. */
- dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+ }
+ return 0;
+}
+
+static int
+amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ enum amdgpu_transfer_function shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ enum amdgpu_transfer_function blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ const struct drm_color_lut *shaper_lut, *lut3d, *blend_lut;
+ uint32_t shaper_size, lut3d_size, blend_size;
+ int ret;
+
+ dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult);
+
+ shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size);
+ shaper_size = shaper_lut != NULL ? shaper_size : 0;
+ shaper_tf = dm_plane_state->shaper_tf;
+ lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size);
+ lut3d_size = lut3d != NULL ? lut3d_size : 0;
+
+ amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func);
+ ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false,
+ amdgpu_tf_to_dc_tf(shaper_tf),
+ shaper_size,
+ dc_plane_state->in_shaper_func);
+ if (ret) {
+ drm_dbg_kms(plane_state->plane->dev,
+ "setting plane %d shaper LUT failed.\n",
+ plane_state->plane->index);
+
+ return ret;
+ }
+
+ blend_tf = dm_plane_state->blend_tf;
+ blend_lut = __extract_blob_lut(dm_plane_state->blend_lut, &blend_size);
+ blend_size = blend_lut != NULL ? blend_size : 0;
+
+ ret = amdgpu_dm_atomic_blend_lut(blend_lut, false,
+ amdgpu_tf_to_dc_tf(blend_tf),
+ blend_size, dc_plane_state->blend_tf);
+ if (ret) {
+ drm_dbg_kms(plane_state->plane->dev,
+ "setting plane %d gamma lut failed.\n",
+ plane_state->plane->index);
+
+ return ret;
}
return 0;
}
+
+/**
+ * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
+ * @crtc: amdgpu_dm crtc state
+ * @plane_state: DRM plane state
+ * @dc_plane_state: target DC surface
+ *
+ * Update the underlying dc_stream_state's input transfer function (ITF) in
+ * preparation for hardware commit. The transfer function used depends on
+ * the preparation done on the stream for color management.
+ *
+ * Returns:
+ * 0 on success. -ENOMEM if mem allocation fails.
+ */
+int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
+ struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+ struct drm_color_ctm_3x4 *ctm = NULL;
+ struct dc_color_caps *color_caps = NULL;
+ bool has_crtc_cm_degamma;
+ int ret;
+
+ ret = amdgpu_dm_verify_lut3d_size(adev, plane_state);
+ if (ret) {
+ drm_dbg_driver(&adev->ddev, "amdgpu_dm_verify_lut3d_size() failed\n");
+ return ret;
+ }
+
+ if (dc_plane_state->ctx && dc_plane_state->ctx->dc)
+ color_caps = &dc_plane_state->ctx->dc->caps.color;
+
+ /* Initially, we can just bypass the DGM block. */
+ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
+
+ /* After, we start to update values according to color props */
+ has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb);
+
+ ret = __set_dm_plane_degamma(plane_state, dc_plane_state, color_caps);
+ if (ret == -ENOMEM)
+ return ret;
+
+ /* We only have one degamma block available (pre-blending) for the
+ * whole color correction pipeline, so that we can't actually perform
+ * plane and CRTC degamma at the same time. Explicitly reject atomic
+ * updates when userspace sets both plane and CRTC degamma properties.
+ */
+ if (has_crtc_cm_degamma && ret != -EINVAL) {
+ drm_dbg_kms(crtc->base.crtc->dev,
+ "doesn't support plane and CRTC degamma at the same time\n");
+ return -EINVAL;
+ }
+
+ /* If we are here, it means we don't have plane degamma settings, check
+ * if we have CRTC degamma waiting for mapping to pre-blending degamma
+ * block
+ */
+ if (has_crtc_cm_degamma) {
+ /*
+ * AMD HW doesn't have post-blending degamma caps. When DRM
+ * CRTC atomic degamma is set, we maps it to DPP degamma block
+ * (pre-blending) or, on legacy gamma, we use DPP degamma to
+ * linearize (implicit degamma) from sRGB/BT709 according to
+ * the input space.
+ */
+ ret = map_crtc_degamma_to_dc_plane(crtc, dc_plane_state, color_caps);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup CRTC CTM. */
+ if (dm_plane_state->ctm) {
+ ctm = (struct drm_color_ctm_3x4 *)dm_plane_state->ctm->data;
+ /*
+ * DCN2 and older don't support both pre-blending and
+ * post-blending gamut remap. For this HW family, if we have
+ * the plane and CRTC CTMs simultaneously, CRTC CTM takes
+ * priority, and we discard plane CTM, as implemented in
+ * dcn10_program_gamut_remap(). However, DCN3+ has DPP
+ * (pre-blending) and MPC (post-blending) `gamut remap` blocks;
+ * therefore, we can program plane and CRTC CTMs together by
+ * mapping CRTC CTM to MPC and keeping plane CTM setup at DPP,
+ * as it's done by dcn30_program_gamut_remap().
+ */
+ __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
+
+ dc_plane_state->gamut_remap_matrix.enable_remap = true;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ } else {
+ /* Bypass CTM. */
+ dc_plane_state->gamut_remap_matrix.enable_remap = false;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ }
+
+ return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 52ecfa746..f936a35fa 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -326,6 +326,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
if (!connector->state || connector->state->crtc != crtc)
continue;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
aconn = to_amdgpu_dm_connector(connector);
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index d2834ad85..6e715ef3a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -253,6 +253,7 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+ state->regamma_tf = cur->regamma_tf;
state->crc_skip_count = cur->crc_skip_count;
state->mpo_requested = cur->mpo_requested;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
@@ -289,6 +290,70 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
}
#endif
+#ifdef AMD_PRIVATE_COLOR
+/**
+ * dm_crtc_additional_color_mgmt - enable additional color properties
+ * @crtc: DRM CRTC
+ *
+ * This function lets the driver enable post-blending CRTC regamma transfer
+ * function property in addition to DRM CRTC gamma LUT. Default value means
+ * linear transfer function, which is the default CRTC gamma LUT behaviour
+ * without this property.
+ */
+static void
+dm_crtc_additional_color_mgmt(struct drm_crtc *crtc)
+{
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+
+ if (adev->dm.dc->caps.color.mpc.ogam_ram)
+ drm_object_attach_property(&crtc->base,
+ adev->mode_info.regamma_tf_property,
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT);
+}
+
+static int
+amdgpu_dm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state);
+
+ if (property == adev->mode_info.regamma_tf_property) {
+ if (acrtc_state->regamma_tf != val) {
+ acrtc_state->regamma_tf = val;
+ acrtc_state->base.color_mgmt_changed |= 1;
+ }
+ } else {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+ crtc->base.id, crtc->name,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+amdgpu_dm_atomic_crtc_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state);
+
+ if (property == adev->mode_info.regamma_tf_property)
+ *val = acrtc_state->regamma_tf;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+#endif
+
/* Implemented only the options currently available for the driver */
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = amdgpu_dm_crtc_reset_state,
@@ -307,6 +372,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
#if defined(CONFIG_DEBUG_FS)
.late_register = amdgpu_dm_crtc_late_register,
#endif
+#ifdef AMD_PRIVATE_COLOR
+ .atomic_set_property = amdgpu_dm_atomic_crtc_set_property,
+ .atomic_get_property = amdgpu_dm_atomic_crtc_get_property,
+#endif
};
static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc)
@@ -482,6 +551,9 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
+#ifdef AMD_PRIVATE_COLOR
+ dm_crtc_additional_color_mgmt(&acrtc->base);
+#endif
return 0;
fail:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 417c9cb47..85fc61813 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -2971,6 +2971,85 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val)
return 0;
}
+static int dmub_trace_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+ struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
+ enum dmub_gpint_command cmd;
+ u64 mask = 0xffff;
+ u8 shift = 0;
+ u32 res;
+ int i;
+
+ if (!srv->fw_version)
+ return -EINVAL;
+
+ for (i = 0; i < 4; i++) {
+ res = (val & mask) >> shift;
+
+ switch (i) {
+ case 0:
+ cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0;
+ break;
+ case 1:
+ cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1;
+ break;
+ case 2:
+ cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2;
+ break;
+ case 3:
+ cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3;
+ break;
+ }
+
+ if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT))
+ return -EIO;
+
+ usleep_range(100, 1000);
+
+ mask <<= 16;
+ shift += 16;
+ }
+
+ return 0;
+}
+
+static int dmub_trace_mask_show(void *data, u64 *val)
+{
+ enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0;
+ struct amdgpu_device *adev = data;
+ struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
+ u8 shift = 0;
+ u64 raw = 0;
+ u64 res = 0;
+ int i = 0;
+
+ if (!srv->fw_version)
+ return -EINVAL;
+
+ while (i < 4) {
+ uint32_t response;
+
+ if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return -EIO;
+
+ raw = response;
+ usleep_range(100, 1000);
+
+ cmd++;
+ res |= (raw << shift);
+ shift += 16;
+ i++;
+ }
+
+ *val = res;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(dmub_trace_mask_fops, dmub_trace_mask_show,
+ dmub_trace_mask_set, "0x%llx\n");
+
/*
* Set dmcub trace event IRQ enable or disable.
* Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en
@@ -3884,6 +3963,9 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root,
adev, &force_timing_sync_ops);
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_trace_mask", 0644, root,
+ adev, &dmub_trace_mask_fops);
+
debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root,
adev, &dmcub_trace_event_state_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 7a09a72e1..c27063305 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -31,6 +31,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include "dm_services.h"
#include "amdgpu.h"
@@ -218,7 +219,7 @@ static void dm_helpers_construct_old_payload(
struct drm_dp_mst_atomic_payload *old_payload)
{
struct drm_dp_mst_atomic_payload *pos;
- int pbn_per_slot = mst_state->pbn_div;
+ int pbn_per_slot = dfixed_trunc(mst_state->pbn_div);
u8 next_payload_vc_start = mgr->next_start_slot;
u8 payload_vc_start = new_payload->vc_start_slot;
u8 allocated_time_slots;
@@ -341,15 +342,14 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
return ACT_SUCCESS;
}
-bool dm_helpers_dp_mst_send_payload_allocation(
+void dm_helpers_dp_mst_send_payload_allocation(
struct dc_context *ctx,
- const struct dc_stream_state *stream,
- bool enable)
+ const struct dc_stream_state *stream)
{
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state;
struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_atomic_payload *new_payload, old_payload;
+ struct drm_dp_mst_atomic_payload *new_payload;
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
int ret = 0;
@@ -357,25 +357,13 @@ bool dm_helpers_dp_mst_send_payload_allocation(
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector || !aconnector->mst_root)
- return false;
+ return;
mst_mgr = &aconnector->mst_root->mst_mgr;
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
-
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
- if (!enable) {
- set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
- clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
- }
-
- if (enable) {
- ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
- } else {
- dm_helpers_construct_old_payload(mst_mgr, mst_state,
- new_payload, &old_payload);
- drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload);
- }
+ ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
if (ret) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
@@ -386,10 +374,36 @@ bool dm_helpers_dp_mst_send_payload_allocation(
amdgpu_dm_set_mst_status(&aconnector->mst_status,
clr_flag, false);
}
-
- return true;
}
+void dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_atomic_payload *new_payload, old_payload;
+ enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
+ enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
+
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector || !aconnector->mst_root)
+ return;
+
+ mst_mgr = &aconnector->mst_root->mst_mgr;
+ mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
+ new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+ dm_helpers_construct_old_payload(mst_mgr, mst_state,
+ new_payload, &old_payload);
+
+ drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload);
+
+ amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true);
+ amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false);
+ }
+
void dm_dtn_log_begin(struct dc_context *ctx,
struct dc_log_buffer_ctx *log_ctx)
{
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index d595030c3..3390f0d84 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -897,10 +897,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_dm_connector *amdgpu_dm_connector =
- to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *amdgpu_dm_connector;
+ const struct dc_link *dc_link;
- const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
@@ -933,9 +938,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_dm_connector *amdgpu_dm_connector =
- to_amdgpu_dm_connector(connector);
- const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+ struct amdgpu_dm_connector *amdgpu_dm_connector;
+ const struct dc_link *dc_link;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 602a2ab98..941e96f10 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -27,6 +27,8 @@
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_edid.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
@@ -44,7 +46,7 @@
#include "amdgpu_dm_debugfs.h"
#endif
-#include "dc/dcn20/dcn20_resource.h"
+#include "dc/resource/dcn20/dcn20_resource.h"
#define PEAK_FACTOR_X1000 1006
@@ -424,8 +426,7 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector,
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
- struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
return &adev->dm.mst_encoders[acrtc->crtc_id].base;
@@ -941,10 +942,10 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
link_timeslots_used = 0;
for (i = 0; i < count; i++)
- link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
+ link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div));
fair_pbn_alloc =
- (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
+ (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div);
if (initial_slack[next_index] > fair_pbn_alloc) {
vars[next_index].pbn += fair_pbn_alloc;
@@ -1604,9 +1605,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct dc_link_settings cur_link_settings;
unsigned int end_to_end_bw_in_kbps = 0;
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
- unsigned int max_compressed_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
- uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
+ struct dc_dsc_config_options dsc_options = {0};
/*
* Consider the case with the depth of the mst topology tree is equal or less than 2
@@ -1622,30 +1622,39 @@ enum dc_status dm_dp_mst_is_port_support_mode(
(aconnector->mst_output_port->passthrough_aux ||
aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
cur_link_settings = stream->link->verified_link_cap;
+ upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
+ down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
- upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
- &cur_link_settings);
- down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
-
- /* pick the bottleneck */
- end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
- down_link_bw_in_kbps);
-
- /*
- * use the maximum dsc compression bandwidth as the required
- * bandwidth for the mode
- */
- max_compressed_bw_in_kbps = bw_range.min_kbps;
+ /* pick the end to end bw bottleneck */
+ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps);
- if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) {
- DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n");
+ if (end_to_end_bw_in_kbps < bw_range.min_kbps) {
+ DRM_DEBUG_DRIVER("maximum dsc compression cannot fit into end-to-end bandwidth\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
+
+ if (end_to_end_bw_in_kbps < bw_range.stream_kbps) {
+ dc_dsc_get_default_config_option(stream->link->dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16;
+ if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0],
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &dsc_options,
+ end_to_end_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(stream->link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc and dsc config found\n");
+ } else {
+ DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc but dsc config not found\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+ }
} else {
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4);
- if (pbn > full_pbn)
+ if (pbn > aconnector->mst_output_port->full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 116121e64..8a4c40b4c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1337,8 +1337,14 @@ static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
WARN_ON(amdgpu_state == NULL);
- if (amdgpu_state)
- __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
+ if (!amdgpu_state)
+ return;
+
+ __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
+ amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT;
+ amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
+ amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT;
}
static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
@@ -1357,6 +1363,27 @@ static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct
dc_plane_state_retain(dm_plane_state->dc_state);
}
+ if (old_dm_plane_state->degamma_lut)
+ dm_plane_state->degamma_lut =
+ drm_property_blob_get(old_dm_plane_state->degamma_lut);
+ if (old_dm_plane_state->ctm)
+ dm_plane_state->ctm =
+ drm_property_blob_get(old_dm_plane_state->ctm);
+ if (old_dm_plane_state->shaper_lut)
+ dm_plane_state->shaper_lut =
+ drm_property_blob_get(old_dm_plane_state->shaper_lut);
+ if (old_dm_plane_state->lut3d)
+ dm_plane_state->lut3d =
+ drm_property_blob_get(old_dm_plane_state->lut3d);
+ if (old_dm_plane_state->blend_lut)
+ dm_plane_state->blend_lut =
+ drm_property_blob_get(old_dm_plane_state->blend_lut);
+
+ dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf;
+ dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult;
+ dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf;
+ dm_plane_state->blend_tf = old_dm_plane_state->blend_tf;
+
return &dm_plane_state->base;
}
@@ -1424,12 +1451,206 @@ static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ if (dm_plane_state->degamma_lut)
+ drm_property_blob_put(dm_plane_state->degamma_lut);
+ if (dm_plane_state->ctm)
+ drm_property_blob_put(dm_plane_state->ctm);
+ if (dm_plane_state->lut3d)
+ drm_property_blob_put(dm_plane_state->lut3d);
+ if (dm_plane_state->shaper_lut)
+ drm_property_blob_put(dm_plane_state->shaper_lut);
+ if (dm_plane_state->blend_lut)
+ drm_property_blob_put(dm_plane_state->blend_lut);
+
if (dm_plane_state->dc_state)
dc_plane_state_release(dm_plane_state->dc_state);
drm_atomic_helper_plane_destroy_state(plane, state);
}
+#ifdef AMD_PRIVATE_COLOR
+static void
+dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane)
+{
+ struct amdgpu_mode_info mode_info = dm->adev->mode_info;
+ struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp;
+
+ /* Check HW color pipeline capabilities on DPP block (pre-blending)
+ * before exposing related properties.
+ */
+ if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) {
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_degamma_lut_property,
+ 0);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_degamma_lut_size_property,
+ MAX_COLOR_LUT_ENTRIES);
+ drm_object_attach_property(&plane->base,
+ dm->adev->mode_info.plane_degamma_tf_property,
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT);
+ }
+ /* HDR MULT is always available */
+ drm_object_attach_property(&plane->base,
+ dm->adev->mode_info.plane_hdr_mult_property,
+ AMDGPU_HDR_MULT_DEFAULT);
+
+ /* Only enable plane CTM if both DPP and MPC gamut remap is available. */
+ if (dm->dc->caps.color.mpc.gamut_remap)
+ drm_object_attach_property(&plane->base,
+ dm->adev->mode_info.plane_ctm_property, 0);
+
+ if (dpp_color_caps.hw_3d_lut) {
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_shaper_lut_property, 0);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_shaper_lut_size_property,
+ MAX_COLOR_LUT_ENTRIES);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_shaper_tf_property,
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_lut3d_property, 0);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_lut3d_size_property,
+ MAX_COLOR_3DLUT_SIZE);
+ }
+
+ if (dpp_color_caps.ogam_ram) {
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_blend_lut_property, 0);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_blend_lut_size_property,
+ MAX_COLOR_LUT_ENTRIES);
+ drm_object_attach_property(&plane->base,
+ mode_info.plane_blend_tf_property,
+ AMDGPU_TRANSFER_FUNCTION_DEFAULT);
+ }
+}
+
+static int
+dm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ bool replaced = false;
+ int ret;
+
+ if (property == adev->mode_info.plane_degamma_lut_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->degamma_lut,
+ val, -1,
+ sizeof(struct drm_color_lut),
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_degamma_tf_property) {
+ if (dm_plane_state->degamma_tf != val) {
+ dm_plane_state->degamma_tf = val;
+ dm_plane_state->base.color_mgmt_changed = 1;
+ }
+ } else if (property == adev->mode_info.plane_hdr_mult_property) {
+ if (dm_plane_state->hdr_mult != val) {
+ dm_plane_state->hdr_mult = val;
+ dm_plane_state->base.color_mgmt_changed = 1;
+ }
+ } else if (property == adev->mode_info.plane_ctm_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->ctm,
+ val,
+ sizeof(struct drm_color_ctm_3x4), -1,
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_shaper_lut_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->shaper_lut,
+ val, -1,
+ sizeof(struct drm_color_lut),
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_shaper_tf_property) {
+ if (dm_plane_state->shaper_tf != val) {
+ dm_plane_state->shaper_tf = val;
+ dm_plane_state->base.color_mgmt_changed = 1;
+ }
+ } else if (property == adev->mode_info.plane_lut3d_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->lut3d,
+ val, -1,
+ sizeof(struct drm_color_lut),
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_blend_lut_property) {
+ ret = drm_property_replace_blob_from_id(plane->dev,
+ &dm_plane_state->blend_lut,
+ val, -1,
+ sizeof(struct drm_color_lut),
+ &replaced);
+ dm_plane_state->base.color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == adev->mode_info.plane_blend_tf_property) {
+ if (dm_plane_state->blend_tf != val) {
+ dm_plane_state->blend_tf = val;
+ dm_plane_state->base.color_mgmt_changed = 1;
+ }
+ } else {
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+ plane->base.id, plane->name,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dm_atomic_plane_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+
+ if (property == adev->mode_info.plane_degamma_lut_property) {
+ *val = (dm_plane_state->degamma_lut) ?
+ dm_plane_state->degamma_lut->base.id : 0;
+ } else if (property == adev->mode_info.plane_degamma_tf_property) {
+ *val = dm_plane_state->degamma_tf;
+ } else if (property == adev->mode_info.plane_hdr_mult_property) {
+ *val = dm_plane_state->hdr_mult;
+ } else if (property == adev->mode_info.plane_ctm_property) {
+ *val = (dm_plane_state->ctm) ?
+ dm_plane_state->ctm->base.id : 0;
+ } else if (property == adev->mode_info.plane_shaper_lut_property) {
+ *val = (dm_plane_state->shaper_lut) ?
+ dm_plane_state->shaper_lut->base.id : 0;
+ } else if (property == adev->mode_info.plane_shaper_tf_property) {
+ *val = dm_plane_state->shaper_tf;
+ } else if (property == adev->mode_info.plane_lut3d_property) {
+ *val = (dm_plane_state->lut3d) ?
+ dm_plane_state->lut3d->base.id : 0;
+ } else if (property == adev->mode_info.plane_blend_lut_property) {
+ *val = (dm_plane_state->blend_lut) ?
+ dm_plane_state->blend_lut->base.id : 0;
+ } else if (property == adev->mode_info.plane_blend_tf_property) {
+ *val = dm_plane_state->blend_tf;
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -1438,6 +1659,10 @@ static const struct drm_plane_funcs dm_plane_funcs = {
.atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
.atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
.format_mod_supported = amdgpu_dm_plane_format_mod_supported,
+#ifdef AMD_PRIVATE_COLOR
+ .atomic_set_property = dm_atomic_plane_set_property,
+ .atomic_get_property = dm_atomic_plane_get_property,
+#endif
};
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
@@ -1517,6 +1742,9 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+#ifdef AMD_PRIVATE_COLOR
+ dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
+#endif
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 08ce3bb8f..286ecd28c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -51,6 +51,9 @@ static bool link_supports_psrsu(struct dc_link *link)
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
return false;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
+ return false;
+
return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
}
@@ -138,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
* amdgpu_dm_psr_enable() - enable psr f/w
* @stream: stream state
*
- * Return: true if success
*/
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
unsigned int vsync_rate_hz = 0;
@@ -187,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
power_opt |= psr_power_opt_z10_static_screen;
- return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+ dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
+
+ if (link->ctx->dc->caps.ips_support)
+ dc_allow_idle_optimizations(link->ctx->dc, true);
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index 6806b3c9c..1fdfd183c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -32,7 +32,7 @@
#define AMDGPU_DM_PSR_ENTRY_DELAY 5
void amdgpu_dm_set_psr_caps(struct dc_link *link);
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
new file mode 100644
index 000000000..08c494a7a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services_types.h"
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_wb.h"
+#include "amdgpu_display.h"
+#include "dc.h"
+
+#include <drm/drm_edid.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+static const u32 amdgpu_dm_wb_formats[] = {
+ DRM_FORMAT_XRGB2101010,
+};
+
+static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_framebuffer *fb;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ bool found = false;
+ uint8_t i;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+ if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < sizeof(amdgpu_dm_wb_formats) / sizeof(u32); i++) {
+ if (fb->format->format == amdgpu_dm_wb_formats[i])
+ found = true;
+ }
+
+ if (!found) {
+ DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
+ &fb->format->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
+{
+ /* Maximum resolution supported by DWB */
+ return drm_add_modes_noedid(connector, 3840, 2160);
+}
+
+static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job)
+{
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+ struct amdgpu_device *adev;
+ struct amdgpu_bo *rbo;
+ uint32_t domain;
+ int r;
+
+ if (!job->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ afb = to_amdgpu_framebuffer(job->fb);
+ obj = job->fb->obj[0];
+ rbo = gem_to_amdgpu_bo(obj);
+ adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+
+ r = amdgpu_bo_reserve(rbo, true);
+ if (r) {
+ dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ return r;
+ }
+
+ r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
+ if (r) {
+ dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
+ goto error_unlock;
+ }
+
+ domain = amdgpu_display_supported_domains(adev, rbo->flags);
+
+ r = amdgpu_bo_pin(rbo, domain);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ goto error_unlock;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("%p bind failed\n", rbo);
+ goto error_unpin;
+ }
+
+ amdgpu_bo_unreserve(rbo);
+
+ afb->address = amdgpu_bo_gpu_offset(rbo);
+
+ amdgpu_bo_ref(rbo);
+
+ return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(rbo);
+
+error_unlock:
+ amdgpu_bo_unreserve(rbo);
+ return r;
+}
+
+static void amdgpu_dm_wb_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct amdgpu_bo *rbo;
+ int r;
+
+ if (!job->fb)
+ return;
+
+ rbo = gem_to_amdgpu_bo(job->fb->obj[0]);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
+ }
+
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unref(&rbo);
+}
+
+static const struct drm_encoder_helper_funcs amdgpu_dm_wb_encoder_helper_funcs = {
+ .atomic_check = amdgpu_dm_wb_encoder_atomic_check,
+};
+
+static const struct drm_connector_funcs amdgpu_dm_wb_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs amdgpu_dm_wb_conn_helper_funcs = {
+ .get_modes = amdgpu_dm_wb_connector_get_modes,
+ .prepare_writeback_job = amdgpu_dm_wb_prepare_job,
+ .cleanup_writeback_job = amdgpu_dm_wb_cleanup_job,
+};
+
+int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_wb_connector *wbcon,
+ uint32_t link_index)
+{
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ int res = 0;
+
+ wbcon->link = link;
+
+ drm_connector_helper_add(&wbcon->base.base, &amdgpu_dm_wb_conn_helper_funcs);
+
+ res = drm_writeback_connector_init(&dm->adev->ddev, &wbcon->base,
+ &amdgpu_dm_wb_connector_funcs,
+ &amdgpu_dm_wb_encoder_helper_funcs,
+ amdgpu_dm_wb_formats,
+ ARRAY_SIZE(amdgpu_dm_wb_formats),
+ amdgpu_dm_get_encoder_crtc_mask(dm->adev));
+
+ if (res)
+ return res;
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (wbcon->base.base.funcs->reset)
+ wbcon->base.base.funcs->reset(&wbcon->base.base);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h
new file mode 100644
index 000000000..13d31c857
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_WB_H__
+#define __AMDGPU_DM_WB_H__
+
+#include <drm/drm_writeback.h>
+
+int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_wb_connector *dm_wbcon,
+ uint32_t link_index);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3a169b78e..7991ae468 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,7 +22,7 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc
+DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc
ifdef CONFIG_DRM_AMD_DC_FP
@@ -34,12 +34,8 @@ DC_LIBS += dcn21
DC_LIBS += dcn201
DC_LIBS += dcn30
DC_LIBS += dcn301
-DC_LIBS += dcn302
-DC_LIBS += dcn303
DC_LIBS += dcn31
DC_LIBS += dcn314
-DC_LIBS += dcn315
-DC_LIBS += dcn316
DC_LIBS += dcn32
DC_LIBS += dcn321
DC_LIBS += dcn35
@@ -51,7 +47,6 @@ DC_LIBS += dce120
DC_LIBS += dce112
DC_LIBS += dce110
-DC_LIBS += dce100
DC_LIBS += dce80
ifdef CONFIG_DRM_AMD_DC_SI
@@ -65,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
include $(AMD_DC)
DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o
+dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index e295a839a..1090d2350 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -103,7 +103,8 @@ void convert_float_matrix(
static uint32_t find_gcd(uint32_t a, uint32_t b)
{
- uint32_t remainder = 0;
+ uint32_t remainder;
+
while (b != 0) {
remainder = a % b;
a = b;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index f2dfa96f9..39530b2ea 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -94,7 +94,7 @@ static void calculate_bandwidth(
const uint32_t s_high = 7;
const uint32_t dmif_chunk_buff_margin = 1;
- uint32_t max_chunks_fbc_mode;
+ uint32_t max_chunks_fbc_mode = 0;
int32_t num_cursor_lines;
int32_t i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index bc7a375f4..05f392501 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2223,22 +2223,22 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
switch (bp->object_info_tbl.revision.minor) {
case 4:
- default:
- object = get_bios_object(bp, object_id);
-
- if (!object)
- return BP_RESULT_BADINPUT;
-
- record = get_disp_connector_caps_record(bp, object);
- if (!record)
- return BP_RESULT_NORECORD;
-
- info->INTERNAL_DISPLAY =
- (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0;
- info->INTERNAL_DISPLAY_BL =
- (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0;
- break;
- case 5:
+ default:
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_disp_connector_caps_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->INTERNAL_DISPLAY =
+ (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0;
+ info->INTERNAL_DISPLAY_BL =
+ (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0;
+ break;
+ case 5:
object_path_v3 = get_bios_object_from_path_v3(bp, object_id);
if (!object_path_v3)
@@ -2400,7 +2400,6 @@ static enum bp_result get_vram_info_v30(
return result;
}
-
/*
* get_integrated_info_v11
*
@@ -3336,27 +3335,28 @@ static enum bp_result get_bracket_layout_record(
DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
return BP_RESULT_BADINPUT;
}
+
tbl = &bp->object_info_tbl;
v1_4 = tbl->v1_4;
v1_5 = tbl->v1_5;
result = BP_RESULT_NORECORD;
switch (bp->object_info_tbl.revision.minor) {
- case 4:
- default:
- for (i = 0; i < v1_4->number_of_path; ++i) {
- if (bracket_layout_id ==
- v1_4->display_path[i].display_objid) {
- result = update_slot_layout_info(dcb, i, slot_layout_info);
- break;
- }
+ case 4:
+ default:
+ for (i = 0; i < v1_4->number_of_path; ++i) {
+ if (bracket_layout_id == v1_4->display_path[i].display_objid) {
+ result = update_slot_layout_info(dcb, i, slot_layout_info);
+ break;
}
- break;
- case 5:
- for (i = 0; i < v1_5->number_of_path; ++i)
- result = update_slot_layout_info_v2(dcb, i, slot_layout_info);
- break;
+ }
+ break;
+ case 5:
+ for (i = 0; i < v1_5->number_of_path; ++i)
+ result = update_slot_layout_info_v2(dcb, i, slot_layout_info);
+ break;
}
+
return result;
}
@@ -3365,9 +3365,7 @@ static enum bp_result bios_get_board_layout_info(
struct board_layout_info *board_layout_info)
{
unsigned int i;
-
struct bios_parser *bp;
-
static enum bp_result record_result;
unsigned int max_slots;
@@ -3377,7 +3375,6 @@ static enum bp_result bios_get_board_layout_info(
0, 0
};
-
bp = BP_FROM_DCB(dcb);
if (board_layout_info == NULL) {
@@ -3558,7 +3555,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = firmware_parser_destroy,
.get_board_layout_info = bios_get_board_layout_info,
- /* TODO: use this fn in hw init?*/
.pack_data_tables = bios_parser_pack_data_tables,
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 3e73c4e59..28a2a837d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -29,6 +29,7 @@
#include "dc_types.h"
#include "dccg.h"
#include "clk_mgr_internal.h"
+#include "dc_state_priv.h"
#include "link.h"
#include "dce100/dce_clk_mgr.h"
@@ -63,7 +64,7 @@ int clk_mgr_helper_get_active_display_cnt(
/* Don't count SubVP phantom pipes as part of active
* display count
*/
- if (stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
/*
@@ -368,7 +369,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
-#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */
+#endif /* CONFIG_DRM_AMD_DC_FP */
default:
ASSERT(0); /* Unknown Asic */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index a5489fe68..aa9fd1dc5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
int i;
for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) {
+ if (i >= VG_NUM_DCFCLK_DPM_LEVELS)
+ break;
if (clock_table->SocVoltage[i] == voltage)
return clock_table->DcfClocks[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 12f3e8aa4..6ad4f4efe 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, context, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, context, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 289918ea7..bbdbc7816 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -25,7 +25,6 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
-
#include "dcn32/dcn32_clk_mgr_smu_msg.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
@@ -34,7 +33,7 @@
#include "core_types.h"
#include "dm_helpers.h"
#include "link.h"
-
+#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "smu13_driver_if.h"
@@ -472,20 +471,56 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
-static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr)
+static bool dcn32_check_native_scaling(struct pipe_ctx *pipe)
{
- unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
- unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
- unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
- unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
- unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
- unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
+ bool is_native_scaling = false;
+ int width = pipe->plane_state->src_rect.width;
+ int height = pipe->plane_state->src_rect.height;
+
+ if (pipe->stream->timing.h_addressable == width &&
+ pipe->stream->timing.v_addressable == height &&
+ pipe->plane_state->dst_rect.width == width &&
+ pipe->plane_state->dst_rect.height == height)
+ is_native_scaling = true;
+
+ return is_native_scaling;
+}
+
+static void dcn32_auto_dpm_test_log(
+ struct dc_clocks *new_clocks,
+ struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context)
+{
+ unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg,
+ fclk_khz_reg, mall_ss_size_bytes;
+ int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
+
+ struct pipe_ctx *pipe_ctx_list[MAX_PIPES];
+ int active_pipe_count = 0;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
+ pipe_ctx_list[active_pipe_count] = pipe_ctx;
+ active_pipe_count++;
+ }
+ }
+
+ mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
+
+ dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
+ dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
+ dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
+ dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
+ dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
+ fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
// Overrides for these clocks in case there is no p_state change support
- int dramclk_khz_override = new_clocks->dramclk_khz;
- int fclk_khz_override = new_clocks->fclk_khz;
+ dramclk_khz_override = new_clocks->dramclk_khz;
+ fclk_khz_override = new_clocks->fclk_khz;
- int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
+ num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
if (!new_clocks->p_state_change_support) {
dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000;
@@ -502,16 +537,49 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
//
// AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
////////////////////////////////////////////////////////////////////////////
- if (new_clocks &&
+ if (new_clocks && active_pipe_count > 0 &&
new_clocks->dramclk_khz > 0 &&
new_clocks->fclk_khz > 0 &&
new_clocks->dcfclk_khz > 0 &&
new_clocks->dppclk_khz > 0) {
+ uint32_t pix_clk_list[MAX_PIPES] = {0};
+ int p_state_list[MAX_PIPES] = {0};
+ int disp_src_width_list[MAX_PIPES] = {0};
+ int disp_src_height_list[MAX_PIPES] = {0};
+ uint64_t disp_src_refresh_list[MAX_PIPES] = {0};
+ bool is_scaled_list[MAX_PIPES] = {0};
+
+ for (int i = 0; i < active_pipe_count; i++) {
+ struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i];
+ uint64_t refresh_rate;
+
+ pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz;
+ p_state_list[i] = curr_pipe_ctx->p_state_type;
+
+ refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total);
+ disp_src_refresh_list[i] = refresh_rate;
+
+ if (curr_pipe_ctx->plane_state) {
+ is_scaled_list[i] = !(dcn32_check_native_scaling(curr_pipe_ctx));
+ disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width;
+ disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height;
+ }
+ }
+
DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
"dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
"dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
- "dtbclk_hw:%d - fclk_hw:%d\n",
+ "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - "
+ "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - "
+ "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - "
+ "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - "
+ "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - "
+ "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - "
+ "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n",
dramclk_khz_override,
fclk_khz_override,
new_clocks->dcfclk_khz,
@@ -521,7 +589,14 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
dprefclk_khz_reg,
dcfclk_khz_reg,
dtbclk_khz_reg,
- fclk_khz_reg);
+ fclk_khz_reg,
+ pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2],
+ mall_ss_size_bytes,
+ p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3],
+ disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0],
+ disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1],
+ disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2],
+ disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]);
}
}
@@ -694,6 +769,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* DCCG requires KHz precision for DTBCLK */
clk_mgr_base->clks.ref_dtbclk_khz =
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
+
dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
}
@@ -722,7 +798,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
if (dc->config.enable_auto_dpm_test_logs) {
- dcn32_auto_dpm_test_log(new_clocks, clk_mgr);
+ dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 353d5fb9e..9cbab880c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -50,6 +50,7 @@
#include "dc_dmub_srv.h"
#include "link.h"
#include "logger_types.h"
+
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
@@ -80,12 +81,12 @@
static int dcn35_get_active_display_cnt_wa(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ int *all_active_disps)
{
- int i, display_count;
+ int i, display_count = 0;
bool tmds_present = false;
- display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
@@ -103,7 +104,8 @@ static int dcn35_get_active_display_cnt_wa(
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
-
+ if (all_active_disps != NULL)
+ *all_active_disps = display_count;
/* WA for hang on HDMI after display off back on*/
if (display_count == 0 && tmds_present)
display_count = 1;
@@ -216,15 +218,16 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
+ int all_active_disps = 0;
if (dc->work_arounds.skip_clock_update)
return;
- /* DTBCLK is fixed, so set a default if unspecified. */
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
@@ -246,7 +249,6 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn35_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@@ -382,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn35_smu_enable_pme_wa(clk_mgr);
}
-void dcn35_init_clocks(struct clk_mgr *clk_mgr)
-{
- uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
-
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
-
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
bool dcn35_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
@@ -416,11 +405,22 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
}
static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
- struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
+ struct clk_mgr_dcn35 *clk_mgr)
{
-
}
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
@@ -436,32 +436,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@@ -473,32 +473,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@@ -825,7 +825,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
struct dc_state *context = dc->current_state;
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn35_get_active_display_cnt_wa(dc, context);
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL);
/* if we can go lower, go lower */
if (display_count == 0)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@@ -992,7 +992,6 @@ void dcn35_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
- struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn35_funcs;
@@ -1045,7 +1044,7 @@ void dcn35_clk_mgr_construct(
dcn35_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+ dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index b6b8c3ca1..6d4a1ffab 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -116,6 +116,9 @@ static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
+
+ if (clk_mgr->base.ctx->dc->debug.disable_timeout)
+ max_retries++;
} while (max_retries--);
return res_val;
@@ -276,7 +279,7 @@ void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, u
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info);
- smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info);
+ smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info);
}
void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
@@ -295,7 +298,7 @@ void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool e
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info.data);
- smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0);
+ smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
}
void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
@@ -307,6 +310,7 @@ void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
clk_mgr,
VBIOSSMC_MSG_UpdatePmeRestore,
0);
+ smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__);
}
void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
@@ -347,7 +351,7 @@ void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
{
- unsigned int msg_id, param;
+ unsigned int msg_id, param, retv;
if (!clk_mgr->smu_present)
return;
@@ -357,27 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 9) | (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = 0;
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
@@ -387,11 +396,11 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
}
- dcn35_smu_send_msg_with_param(
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
msg_id,
param);
- smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param);
+ smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv);
}
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
@@ -405,7 +414,7 @@ int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
VBIOSSMC_MSG_GetDprefclkFreq,
0);
- smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk);
+ smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk);
return dprefclk * 1000;
}
@@ -420,7 +429,7 @@ int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
VBIOSSMC_MSG_GetDtbclkFreq,
0);
- smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk);
+ smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk);
return dtbclk * 1000;
}
/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
@@ -433,7 +442,7 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
clk_mgr,
VBIOSSMC_MSG_SetDtbClk,
enable);
- smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0);
+ smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0);
}
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
@@ -442,30 +451,45 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
enable);
+ smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
}
int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
{
- return dcn35_smu_send_msg_with_param(
+ int retv;
+
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_DispPsrExit,
0);
+ smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
+ return retv;
}
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
{
- return dcn35_smu_send_msg_with_param(
+ int retv;
+
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_QueryIPS2Support,
0);
+
+ //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
+ return retv;
}
void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
{
REG_WRITE(MP1_SMN_C2PMSG_71, param);
+ //smu_print("%s: write_ips_scratch = %x\n", __func__, param);
}
uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
{
- return REG_READ(MP1_SMN_C2PMSG_71);
+ uint32_t retv;
+
+ retv = REG_READ(MP1_SMN_C2PMSG_71);
+ //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
+ return retv;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index b51208f44..3c3d613c5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -34,6 +34,8 @@
#include "dce/dce_hwseq.h"
#include "resource.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -409,9 +411,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required || dc->wm_optimized_required)
+ if (dc->optimized_required)
return false;
+ if (!memcmp(&stream->adjust, adjust, sizeof(*adjust)))
+ return true;
+
+ dc_exit_ips_for_hw_access(dc);
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -452,6 +459,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
int i = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -482,6 +491,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
bool ret = false;
struct crtc_position position;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe =
&dc->current_state->res_ctx.pipe_ctx[i];
@@ -601,6 +612,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
if (pipe == NULL)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
/* By default, capture the full frame */
param.windowa_x_start = 0;
param.windowa_y_start = 0;
@@ -660,6 +673,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
struct pipe_ctx *pipe;
struct timing_generator *tg;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream)
@@ -684,6 +699,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
int i;
struct pipe_ctx *pipe_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -719,6 +736,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
if (option > DITHER_OPTION_MAX)
return;
+ dc_exit_ips_for_hw_access(stream->ctx->dc);
+
stream->dither_option = option;
memset(&params, 0, sizeof(params));
@@ -743,6 +762,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -760,6 +781,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -786,6 +809,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
struct pipe_ctx *pipes_affected[MAX_PIPES];
int num_pipes_affected = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < num_streams; i++) {
struct dc_stream_state *stream = streams[i];
@@ -808,7 +833,7 @@ static void dc_destruct(struct dc *dc)
link_enc_cfg_init(dc, dc->current_state);
if (dc->current_state) {
- dc_release_state(dc->current_state);
+ dc_state_release(dc->current_state);
dc->current_state = NULL;
}
@@ -1020,29 +1045,27 @@ static bool dc_construct(struct dc *dc,
}
#endif
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ /* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction.
+ */
+ if (!create_link_encoders(dc))
+ goto fail;
+
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
*/
- dc->current_state = dc_create_state(dc);
+ dc->current_state = dc_state_create(dc);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
goto fail;
}
- if (!create_links(dc, init_params->num_virtual_links))
- goto fail;
-
- /* Create additional DIG link encoder objects if fewer than the platform
- * supports were created during link construction.
- */
- if (!create_link_encoders(dc))
- goto fail;
-
- dc_resource_state_construct(dc, dc->current_state);
-
return true;
fail:
@@ -1085,7 +1108,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
}
}
-static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1105,9 +1128,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
- get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
- get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
}
}
}
@@ -1115,7 +1138,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
- struct dc_state *dangling_context = dc_create_state(dc);
+ struct dc_state *dangling_context = dc_state_create_current_copy(dc);
struct dc_state *current_ctx;
struct pipe_ctx *pipe;
struct timing_generator *tg;
@@ -1123,8 +1146,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (dangling_context == NULL)
return;
- dc_resource_state_copy_construct(dc->current_state, dangling_context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
@@ -1161,6 +1182,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
+ bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
/* When disabling plane for a phantom pipe, we must turn on the
@@ -1169,22 +1191,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* state that can result in underflow or hang when enabling it
* again for different use.
*/
- if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (is_phantom) {
if (tg->funcs->enable_crtc) {
int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
- main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
- main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+ main_pipe_width = old_paired_stream->dst.width;
+ main_pipe_height = old_paired_stream->dst.height;
if (dc->hwss.blank_phantom)
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
}
}
- dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+
+ if (is_phantom)
+ dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
+ else
+ dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
if (dc->hwss.apply_ctx_for_surface) {
apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
@@ -1203,7 +1232,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* The OTG is set to disable on falling edge of VUPDATE so the plane disable
* will still get it's double buffer update.
*/
- if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (is_phantom) {
if (tg->funcs->disable_phantom_crtc)
tg->funcs->disable_phantom_crtc(tg);
}
@@ -1212,7 +1241,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
current_ctx = dc->current_state;
dc->current_state = dangling_context;
- dc_release_state(current_ctx);
+ dc_state_release(current_ctx);
}
static void disable_vbios_mode_if_required(
@@ -1276,6 +1305,54 @@ static void disable_vbios_mode_if_required(
}
}
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1284,7 +1361,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
@@ -1510,7 +1587,7 @@ static void program_timing_sync(
}
for (k = 0; k < group_size; k++) {
- struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
+ struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
status->timing_sync_info.group_id = num_group;
status->timing_sync_info.group_size = group_size;
@@ -1521,7 +1598,7 @@ static void program_timing_sync(
}
- /* remove any other pipes that are already been synced */
+ /* remove any other unblanked pipes as they have already been synced */
if (dc->config.use_pipe_ctx_sync_logic) {
/* check pipe's syncd to decide which pipe to be removed */
for (j = 1; j < group_size; j++) {
@@ -1534,6 +1611,7 @@ static void program_timing_sync(
pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
}
} else {
+ /* remove any other pipes by checking valid plane */
for (j = j + 1; j < group_size; j++) {
bool is_blanked;
@@ -1554,7 +1632,7 @@ static void program_timing_sync(
if (group_size > 1) {
if (sync_type == TIMING_SYNCHRONIZABLE) {
dc->hwss.enable_timing_synchronization(
- dc, group_index, group_size, pipe_set);
+ dc, ctx, group_index, group_size, pipe_set);
} else
if (sync_type == VBLANK_SYNCHRONIZABLE) {
dc->hwss.enable_vblanks_synchronization(
@@ -1759,6 +1837,8 @@ void dc_enable_stereo(
int i, j;
struct pipe_ctx *pipe;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (context != NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -1778,6 +1858,8 @@ void dc_enable_stereo(
void dc_trigger_sync(struct dc *dc, struct dc_state *context)
{
if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ dc_exit_ips_for_hw_access(dc);
+
enable_timing_multisync(dc, context);
program_timing_sync(dc, context);
}
@@ -1836,7 +1918,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* Check old context for SubVP */
- subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
@@ -1962,6 +2044,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -1994,9 +2081,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
old_state = dc->current_state;
dc->current_state = context;
- dc_release_state(old_state);
+ dc_state_release(old_state);
- dc_retain_state(dc->current_state);
+ dc_state_retain(dc->current_state);
return result;
}
@@ -2034,6 +2121,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (!streams_changed(dc, streams, stream_count))
return res;
+ dc_exit_ips_for_hw_access(dc);
+
DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
for (i = 0; i < stream_count; i++) {
@@ -2067,12 +2156,10 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (handle_exit_odm2to1)
res = commit_minimal_transition_state(dc, dc->current_state);
- context = dc_create_state(dc);
+ context = dc_state_create_current_copy(dc);
if (!context)
goto context_alloc_fail;
- dc_resource_state_copy_construct_current(dc, context);
-
res = dc_validate_with_context(dc, set, stream_count, context, false);
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
@@ -2087,7 +2174,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
if (dc_is_embedded_signal(streams[i]->signal)) {
- struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
+ struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
if (dc->hwss.is_abm_supported)
status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
@@ -2098,7 +2185,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
fail:
- dc_release_state(context);
+ dc_state_release(context);
context_alloc_fail:
@@ -2152,7 +2239,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
- if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
+ if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
continue;
/* Must set to false to start with, due to OR in update function */
@@ -2210,7 +2297,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
}
process_deferred_updates(dc);
@@ -2222,104 +2309,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
- dc->wm_optimized_required = false;
-}
-
-static void init_state(struct dc *dc, struct dc_state *context)
-{
- /* Each context must have their own instance of VBA and in order to
- * initialize and obtain IP and SOC the base DML instance from DC is
- * initially copied into every context
- */
- memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
-}
-
-struct dc_state *dc_create_state(struct dc *dc)
-{
- struct dc_state *context = kvzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
-
- if (!context)
- return NULL;
-
- init_state(dc, context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- if (dc->debug.using_dml2) {
- dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
- }
-#endif
- kref_init(&context->refcount);
-
- return context;
-}
-
-struct dc_state *dc_copy_state(struct dc_state *src_ctx)
-{
- int i, j;
- struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
-
- if (!new_ctx)
- return NULL;
- memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) {
- dc_release_state(new_ctx);
- return NULL;
- }
-#endif
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
-
- if (cur_pipe->top_pipe)
- cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
- if (cur_pipe->bottom_pipe)
- cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
- if (cur_pipe->prev_odm_pipe)
- cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
-
- if (cur_pipe->next_odm_pipe)
- cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
- }
-
- for (i = 0; i < new_ctx->stream_count; i++) {
- dc_stream_retain(new_ctx->streams[i]);
- for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
- dc_plane_state_retain(
- new_ctx->stream_status[i].plane_states[j]);
- }
-
- kref_init(&new_ctx->refcount);
-
- return new_ctx;
-}
-
-void dc_retain_state(struct dc_state *context)
-{
- kref_get(&context->refcount);
-}
-
-static void dc_state_free(struct kref *kref)
-{
- struct dc_state *context = container_of(kref, struct dc_state, refcount);
- dc_resource_state_destruct(context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- dml2_destroy(context->bw_ctx.dml2);
- context->bw_ctx.dml2 = 0;
-#endif
-
- kvfree(context);
-}
-
-void dc_release_state(struct dc_state *context)
-{
- kref_put(&context->refcount, dc_state_free);
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -2742,8 +2731,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
}
-
- dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -2951,9 +2938,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
- if (update->crtc_timing_adjust)
- stream->adjust = *update->crtc_timing_adjust;
-
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -2994,11 +2978,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config->num_slices_v != 0);
/* Use temporarry context for validating new DSC config */
- struct dc_state *dsc_validate_context = dc_create_state(dc);
+ struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
if (dsc_validate_context) {
- dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
-
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
@@ -3007,7 +2989,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config = NULL;
}
- dc_release_state(dsc_validate_context);
+ dc_state_release(dsc_validate_context);
} else {
DC_ERROR("Failed to allocate new validate context for DSC change\n");
update->dsc_config = NULL;
@@ -3106,30 +3088,27 @@ static bool update_planes_and_stream_state(struct dc *dc,
new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
- context = dc_create_state(dc);
+ context = dc_state_create_copy(dc->current_state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return false;
}
- dc_resource_state_copy_construct(
- dc->current_state, context);
-
/* For each full update, remove all existing phantom pipes first.
* Ensures that we have enough pipes for newly added MPO planes
*/
- if (dc->res_pool->funcs->remove_phantom_pipes)
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
/*remove old surfaces from context */
- if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+ if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
BREAK_TO_DEBUGGER();
goto fail;
}
/* add surface to context */
- if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+ if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -3154,19 +3133,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- /* For phantom pipes we remove and create a new set of phantom pipes
- * for each full update (because we don't know if we'll need phantom
- * pipes until after the first round of validation). However, if validation
- * fails we need to keep the existing phantom pipes (because we don't update
- * the dc->current_state).
- *
- * The phantom stream/plane refcount is decremented for validation because
- * we assume it'll be removed (the free comes when the dc_state is freed),
- * but if validation fails we have to increment back the refcount so it's
- * consistent.
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3187,7 +3153,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
return true;
fail:
- dc_release_state(context);
+ dc_state_release(context);
return false;
@@ -3488,18 +3454,26 @@ static void commit_planes_for_stream_fast(struct dc *dc,
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
+ struct dc_stream_status *stream_status = NULL;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
top_pipe_to_program = resource_get_otg_master_for_stream(
&context->res_ctx,
stream);
- if (dc->debug.visual_confirm) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ if (!top_pipe_to_program)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
}
}
@@ -3523,6 +3497,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
}
}
+ stream_status = dc_state_get_stream_status(context, stream);
+
build_dmub_cmd_list(dc,
srf_updates,
surface_count,
@@ -3535,7 +3511,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->dmub_cmd_count,
context->block_sequence,
&(context->block_sequence_steps),
- top_pipe_to_program);
+ top_pipe_to_program,
+ stream_status);
hwss_execute_sequence(dc,
context->block_sequence,
context->block_sequence_steps);
@@ -3548,7 +3525,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
@@ -3586,6 +3563,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
}
}
}
+ wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
@@ -3607,6 +3585,8 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
@@ -3631,7 +3611,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
// Check old context for SubVP
- subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
@@ -3639,19 +3619,22 @@ static void commit_planes_for_stream(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
subvp_curr_use = true;
break;
}
}
- if (dc->debug.visual_confirm)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
}
+ }
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
@@ -3918,7 +3901,9 @@ static void commit_planes_for_stream(struct dc *dc,
* programming has completed (we turn on phantom OTG in order
* to complete the plane disable for phantom pipes).
*/
- dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (dc->hwss.disable_phantom_streams)
+ dc->hwss.disable_phantom_streams(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
@@ -4024,7 +4009,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
subvp_active = true;
break;
}
@@ -4061,7 +4046,7 @@ struct pipe_split_policy_backup {
static void release_minimal_transition_state(struct dc *dc,
struct dc_state *context, struct pipe_split_policy_backup *policy)
{
- dc_release_state(context);
+ dc_state_release(context);
/* restore previous pipe split and odm policy */
if (!dc->config.is_vmin_only_asic)
dc->debug.pipe_split_policy = policy->mpc_policy;
@@ -4072,7 +4057,7 @@ static void release_minimal_transition_state(struct dc *dc,
static struct dc_state *create_minimal_transition_state(struct dc *dc,
struct dc_state *base_context, struct pipe_split_policy_backup *policy)
{
- struct dc_state *minimal_transition_context = dc_create_state(dc);
+ struct dc_state *minimal_transition_context = NULL;
unsigned int i, j;
if (!dc->config.is_vmin_only_asic) {
@@ -4084,7 +4069,9 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
policy->subvp_policy = dc->debug.force_disable_subvp;
dc->debug.force_disable_subvp = true;
- dc_resource_state_copy_construct(base_context, minimal_transition_context);
+ minimal_transition_context = dc_state_create_copy(base_context);
+ if (!minimal_transition_context)
+ return NULL;
/* commit minimal state */
if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
@@ -4116,7 +4103,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
bool success = false;
struct dc_state *minimal_transition_context;
struct pipe_split_policy_backup policy;
- struct mall_temp_config mall_temp_config;
/* commit based on new context */
/* Since all phantom pipes are removed in full validation,
@@ -4125,8 +4111,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
* pipe as subvp/phantom will be cleared (dc copy constructor
* creates a shallow copy).
*/
- if (dc->res_pool->funcs->save_mall_state)
- dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
minimal_transition_context = create_minimal_transition_state(dc,
context, &policy);
if (minimal_transition_context) {
@@ -4139,16 +4123,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
}
release_minimal_transition_state(dc, minimal_transition_context, &policy);
- if (dc->res_pool->funcs->restore_mall_state)
- dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
- /* If we do a minimal transition with plane removal and the context
- * has subvp we also have to retain back the phantom stream / planes
- * since the refcount is decremented as part of the min transition
- * (we commit a state with no subvp, so the phantom streams / planes
- * had to be removed).
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, context);
}
if (!success) {
@@ -4216,7 +4190,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
subvp_in_use = true;
break;
}
@@ -4403,8 +4377,7 @@ static bool full_update_required(struct dc *dc,
stream_update->mst_bw_update ||
stream_update->func_shaper ||
stream_update->lut3d_func ||
- stream_update->pending_test_pattern ||
- stream_update->crtc_timing_adjust))
+ stream_update->pending_test_pattern))
return true;
if (stream) {
@@ -4484,7 +4457,6 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *context;
enum surface_update_type update_type;
int i;
- struct mall_temp_config mall_temp_config;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
@@ -4495,6 +4467,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
bool is_plane_addition = 0;
bool is_fast_update_only;
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
surface_count, stream_update, stream);
@@ -4528,23 +4502,10 @@ bool dc_update_planes_and_stream(struct dc *dc,
* pipe as subvp/phantom will be cleared (dc copy constructor
* creates a shallow copy).
*/
- if (dc->res_pool->funcs->save_mall_state)
- dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
if (!commit_minimal_transition_state(dc, context)) {
- dc_release_state(context);
+ dc_state_release(context);
return false;
}
- if (dc->res_pool->funcs->restore_mall_state)
- dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
-
- /* If we do a minimal transition with plane removal and the context
- * has subvp we also have to retain back the phantom stream / planes
- * since the refcount is decremented as part of the min transition
- * (we commit a state with no subvp, so the phantom streams / planes
- * had to be removed).
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, context);
update_type = UPDATE_TYPE_FULL;
}
@@ -4601,7 +4562,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *old = dc->current_state;
dc->current_state = context;
- dc_release_state(old);
+ dc_state_release(old);
// clear any forced full updates
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4628,6 +4589,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
int i, j;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -4660,14 +4623,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
- context = dc_create_state(dc);
+ context = dc_state_create_copy(state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
}
- dc_resource_state_copy_construct(state, context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4706,7 +4667,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
- dc_release_state(context);
+ dc_state_release(context);
return;
}
}
@@ -4739,7 +4700,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_state *old = dc->current_state;
dc->current_state = context;
- dc_release_state(old);
+ dc_state_release(old);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -4812,7 +4773,9 @@ void dc_set_power_state(
switch (power_state) {
case DC_ACPI_CM_POWER_STATE_D0:
- dc_resource_state_construct(dc, dc->current_state);
+ dc_state_construct(dc, dc->current_state);
+
+ dc_exit_ips_for_hw_access(dc);
dc_z10_restore(dc);
@@ -4827,7 +4790,7 @@ void dc_set_power_state(
default:
ASSERT(dc->current_state->stream_count == 0);
- dc_resource_state_destruct(dc->current_state);
+ dc_state_destruct(dc->current_state);
break;
}
@@ -4904,6 +4867,38 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
return true;
}
+/* enable/disable eDP Replay without specify stream for eDP */
+bool dc_set_replay_allow_active(struct dc *dc, bool active)
+{
+ int i;
+ bool allow_active;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->replay_settings.replay_feature_enabled) {
+ if (active && !link->replay_settings.replay_allow_active) {
+ allow_active = true;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ false, false, NULL))
+ return false;
+ } else if (!active && link->replay_settings.replay_allow_active) {
+ allow_active = false;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ true, false, NULL))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
void dc_allow_idle_optimizations(struct dc *dc, bool allow)
{
if (dc->debug.disable_idle_power_optimizations)
@@ -4923,6 +4918,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
+void dc_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, false);
+}
+
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
if (dc->debug.disable_idle_power_optimizations)
@@ -5443,6 +5444,8 @@ bool dc_abm_save_restore(
struct dc_link *link = stream->sink->link;
struct dc_link *edp_links[MAX_NUM_EDP];
+ if (link->replay_settings.replay_feature_enabled)
+ return false;
/*find primary pipe associated with stream*/
for (i = 0; i < MAX_PIPES; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index fc18b9dc9..9c05b1a07 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -31,6 +31,7 @@
#include "basics/dc_common.h"
#include "resource.h"
#include "dc_dmub_srv.h"
+#include "dc_state_priv.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
@@ -425,45 +426,130 @@ void get_hdr_visual_confirm_color(
}
void get_subvp_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
uint32_t color_value = MAX_TG_COLOR_VALUE;
- bool enable_subvp = false;
- int i;
-
- if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context)
- return;
+ if (pipe_ctx) {
+ switch (pipe_ctx->p_state_type) {
+ case P_STATE_SUB_VP:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_DRR_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_V_BLANK_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ default:
+ break;
+ }
+ }
+}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+void get_mclk_switch_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
- if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- /* SubVP enable - red */
- color->color_g_y = 0;
+ if (pipe_ctx) {
+ switch (pipe_ctx->p_state_type) {
+ case P_STATE_V_BLANK:
+ color->color_r_cr = color_value;
+ color->color_g_y = color_value;
color->color_b_cb = 0;
+ break;
+ case P_STATE_FPO:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = color_value;
+ break;
+ case P_STATE_V_ACTIVE:
color->color_r_cr = color_value;
- enable_subvp = true;
-
- if (pipe_ctx->stream == pipe->stream)
- return;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ case P_STATE_SUB_VP:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_DRR_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_V_BLANK_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ default:
break;
}
}
+}
- if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
- color->color_r_cr = 0;
- if (pipe_ctx->stream->allow_freesync == 1) {
- /* SubVP enable and DRR on - green */
- color->color_b_cb = 0;
- color->color_g_y = color_value;
+void set_p_state_switch_method(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ bool enable_subvp;
+
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
+ return;
+
+ if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
+ dm_dram_clock_change_unsupported) {
+ /* MCLK switching is supported */
+ if (!pipe_ctx->has_vactive_margin) {
+ /* In Vblank - yellow */
+ pipe_ctx->p_state_type = P_STATE_V_BLANK;
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ /* FPO + Vblank - cyan */
+ pipe_ctx->p_state_type = P_STATE_FPO;
+ }
} else {
- /* SubVP enable and No DRR - blue */
- color->color_g_y = 0;
- color->color_b_cb = color_value;
+ /* In Vactive - pink */
+ pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
+ }
+
+ /* SubVP */
+ enable_subvp = false;
+
+ for (int i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && dc_state_get_paired_subvp_stream(context, pipe->stream) &&
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ /* SubVP enable - red */
+ pipe_ctx->p_state_type = P_STATE_SUB_VP;
+ enable_subvp = true;
+
+ if (pipe_ctx->stream == pipe->stream)
+ return;
+ break;
+ }
+ }
+
+ if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) {
+ if (pipe_ctx->stream->allow_freesync == 1) {
+ /* SubVP enable and DRR on - green */
+ pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
+ } else {
+ /* SubVP enable and No DRR - blue */
+ pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
+ }
}
}
}
@@ -473,7 +559,8 @@ void hwss_build_fast_sequence(struct dc *dc,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
int *num_steps,
- struct pipe_ctx *pipe_ctx)
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_status *stream_status)
{
struct dc_plane_state *plane = pipe_ctx->plane_state;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -490,7 +577,8 @@ void hwss_build_fast_sequence(struct dc *dc,
if (dc->hwss.subvp_pipe_control_lock_fast) {
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true;
- block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
+ plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -529,7 +617,7 @@ void hwss_build_fast_sequence(struct dc *dc,
}
if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
- current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ stream_status->mall_stream_config.type == SUBVP_MAIN) {
block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
@@ -612,7 +700,8 @@ void hwss_build_fast_sequence(struct dc *dc,
if (dc->hwss.subvp_pipe_control_lock_fast) {
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false;
- block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
+ plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -812,42 +901,6 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
}
-void get_mclk_switch_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *pipe_ctx,
- struct tg_color *color)
-{
- uint32_t color_value = MAX_TG_COLOR_VALUE;
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
- return;
-
- if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
- dm_dram_clock_change_unsupported) {
- /* MCLK switching is supported */
- if (!pipe_ctx->has_vactive_margin) {
- /* In Vblank - yellow */
- color->color_r_cr = color_value;
- color->color_g_y = color_value;
-
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- /* FPO + Vblank - cyan */
- color->color_r_cr = 0;
- color->color_g_y = color_value;
- color->color_b_cb = color_value;
- }
- } else {
- /* In Vactive - pink */
- color->color_r_cr = color_value;
- color->color_b_cb = color_value;
- }
- /* SubVP */
- get_subvp_visual_confirm_color(dc, context, pipe_ctx, color);
- }
-}
-
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index f365773d5..c6c35037b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -467,6 +467,13 @@ bool dc_link_setup_psr(struct dc_link *link,
return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
}
+bool dc_link_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ return link->dc->link_srv->edp_set_replay_allow_active(link, allow_active, wait,
+ force_static, power_opts);
+}
+
bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state)
{
return link->dc->link_srv->edp_get_replay_state(link, state);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 990d775e4..9fbdb0969 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -42,6 +42,7 @@
#include "link_enc_cfg.h"
#include "link.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#include "virtual/virtual_link_hwss.h"
#include "link/hwss/link_hwss_dio.h"
#include "link/hwss/link_hwss_dpia.h"
@@ -69,8 +70,8 @@
#include "dcn314/dcn314_resource.h"
#include "dcn315/dcn315_resource.h"
#include "dcn316/dcn316_resource.h"
-#include "../dcn32/dcn32_resource.h"
-#include "../dcn321/dcn321_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn321/dcn321_resource.h"
#include "dcn35/dcn35_resource.h"
#define VISUAL_CONFIRM_BASE_DEFAULT 3
@@ -1764,6 +1765,29 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx(
return free_pipe_idx;
}
+int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
+{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, OTG_MASTER) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
+ }
+
+ return free_pipe_idx;
+}
+
int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
@@ -2440,6 +2464,9 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
&context->res_ctx, stream);
+ if (!otg_master)
+ return;
+
ASSERT(resource_get_odm_slice_count(otg_master) == 1);
ASSERT(otg_master->plane_state == NULL);
ASSERT(otg_master->stream_res.stream_enc);
@@ -2974,190 +3001,6 @@ bool resource_update_pipes_for_plane_with_slice_count(
return result;
}
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context)
-{
- struct resource_pool *pool = dc->res_pool;
- struct pipe_ctx *otg_master_pipe;
- struct dc_stream_status *stream_status = NULL;
- bool added = false;
-
- stream_status = dc_stream_get_status_from_state(context, stream);
- if (stream_status == NULL) {
- dm_error("Existing stream not found; failed to attach surface!\n");
- goto out;
- } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
- dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
- goto out;
- }
-
- otg_master_pipe = resource_get_otg_master_for_stream(
- &context->res_ctx, stream);
- if (otg_master_pipe)
- added = resource_append_dpp_pipes_for_plane_composition(context,
- dc->current_state, pool, otg_master_pipe, plane_state);
-
- if (added) {
- stream_status->plane_states[stream_status->plane_count] =
- plane_state;
- stream_status->plane_count++;
- dc_plane_state_retain(plane_state);
- }
-
-out:
- return added;
-}
-
-bool dc_remove_plane_from_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context)
-{
- int i;
- struct dc_stream_status *stream_status = NULL;
- struct resource_pool *pool = dc->res_pool;
-
- if (!plane_state)
- return true;
-
- for (i = 0; i < context->stream_count; i++)
- if (context->streams[i] == stream) {
- stream_status = &context->stream_status[i];
- break;
- }
-
- if (stream_status == NULL) {
- dm_error("Existing stream not found; failed to remove plane.\n");
- return false;
- }
-
- resource_remove_dpp_pipes_for_plane_composition(
- context, pool, plane_state);
-
- for (i = 0; i < stream_status->plane_count; i++) {
- if (stream_status->plane_states[i] == plane_state) {
- dc_plane_state_release(stream_status->plane_states[i]);
- break;
- }
- }
-
- if (i == stream_status->plane_count) {
- dm_error("Existing plane_state not found; failed to detach it!\n");
- return false;
- }
-
- stream_status->plane_count--;
-
- /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
- for (; i < stream_status->plane_count; i++)
- stream_status->plane_states[i] = stream_status->plane_states[i + 1];
-
- stream_status->plane_states[stream_status->plane_count] = NULL;
-
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
- /* ODM combine could prevent us from supporting more planes
- * we will reset ODM slice count back to 1 when all planes have
- * been removed to maximize the amount of planes supported when
- * new planes are added.
- */
- resource_update_pipes_for_stream_with_slice_count(
- context, dc->current_state, dc->res_pool, stream, 1);
-
- return true;
-}
-
-/**
- * dc_rem_all_planes_for_stream - Remove planes attached to the target stream.
- *
- * @dc: Current dc state.
- * @stream: Target stream, which we want to remove the attached plans.
- * @context: New context.
- *
- * Return:
- * Return true if DC was able to remove all planes from the target
- * stream, otherwise, return false.
- */
-bool dc_rem_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context)
-{
- int i, old_plane_count;
- struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
-
- for (i = 0; i < context->stream_count; i++)
- if (context->streams[i] == stream) {
- stream_status = &context->stream_status[i];
- break;
- }
-
- if (stream_status == NULL) {
- dm_error("Existing stream %p not found!\n", stream);
- return false;
- }
-
- old_plane_count = stream_status->plane_count;
-
- for (i = 0; i < old_plane_count; i++)
- del_planes[i] = stream_status->plane_states[i];
-
- for (i = 0; i < old_plane_count; i++)
- if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
- return false;
-
- return true;
-}
-
-static bool add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- const struct dc_validation_set set[],
- int set_count,
- struct dc_state *context)
-{
- int i, j;
-
- for (i = 0; i < set_count; i++)
- if (set[i].stream == stream)
- break;
-
- if (i == set_count) {
- dm_error("Stream %p not found in set!\n", stream);
- return false;
- }
-
- for (j = 0; j < set[i].plane_count; j++)
- if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
- return false;
-
- return true;
-}
-
-bool dc_add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state * const *plane_states,
- int plane_count,
- struct dc_state *context)
-{
- struct dc_validation_set set;
- int i;
-
- set.stream = stream;
- set.plane_count = plane_count;
-
- for (i = 0; i < plane_count; i++)
- set.plane_states[i] = plane_states[i];
-
- return add_all_planes_for_stream(dc, stream, &set, 1, context);
-}
-
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
@@ -3309,84 +3152,6 @@ static struct audio *find_first_free_audio(
return NULL;
}
-/*
- * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
- */
-enum dc_status dc_add_stream_to_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream)
-{
- enum dc_status res;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
- DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- new_ctx->streams[new_ctx->stream_count] = stream;
- dc_stream_retain(stream);
- new_ctx->stream_count++;
-
- res = resource_add_otg_master_for_stream_output(
- new_ctx, dc->res_pool, stream);
- if (res != DC_OK)
- DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
-
- return res;
-}
-
-/*
- * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
- */
-enum dc_status dc_remove_stream_from_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream)
-{
- int i;
- struct dc_context *dc_ctx = dc->ctx;
- struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
- &new_ctx->res_ctx, stream);
-
- if (!del_pipe) {
- DC_ERROR("Pipe not found for stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- resource_update_pipes_for_stream_with_slice_count(new_ctx,
- dc->current_state, dc->res_pool, stream, 1);
- resource_remove_otg_master_for_stream_output(
- new_ctx, dc->res_pool, stream);
-
- for (i = 0; i < new_ctx->stream_count; i++)
- if (new_ctx->streams[i] == stream)
- break;
-
- if (new_ctx->streams[i] != stream) {
- DC_ERROR("Context doesn't have stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- dc_stream_release(new_ctx->streams[i]);
- new_ctx->stream_count--;
-
- /* Trim back arrays */
- for (; i < new_ctx->stream_count; i++) {
- new_ctx->streams[i] = new_ctx->streams[i + 1];
- new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
- }
-
- new_ctx->streams[new_ctx->stream_count] = NULL;
- memset(
- &new_ctx->stream_status[new_ctx->stream_count],
- 0,
- sizeof(new_ctx->stream_status[0]));
-
- return DC_OK;
-}
-
static struct dc_stream_state *find_pll_sharable_stream(
struct dc_stream_state *stream_needs_pll,
struct dc_state *context)
@@ -3594,6 +3359,7 @@ static void mark_seamless_boot_stream(
* |________|_______________|___________|_____________|
*/
static bool acquire_otg_master_pipe_for_stream(
+ const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream)
@@ -3607,7 +3373,22 @@ static bool acquire_otg_master_pipe_for_stream(
int pipe_idx;
struct pipe_ctx *pipe_ctx = NULL;
- pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
+ /*
+ * Upper level code is responsible to optimize unnecessary addition and
+ * removal for unchanged streams. So unchanged stream will keep the same
+ * OTG master instance allocated. When current stream is removed and a
+ * new stream is added, we want to reuse the OTG instance made available
+ * by the removed stream first. If not found, we try to avoid of using
+ * any free pipes already used in current context as this could tear
+ * down exiting ODM/MPC/MPO configuration unnecessarily.
+ */
+ pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) {
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[pipe_idx];
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
@@ -3667,7 +3448,7 @@ enum dc_status resource_map_pool_resources(
if (!acquired)
/* acquire new resources */
- acquired = acquire_otg_master_pipe_for_stream(
+ acquired = acquire_otg_master_pipe_for_stream(dc->current_state,
context, pool, stream);
pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
@@ -3750,35 +3531,6 @@ enum dc_status resource_map_pool_resources(
return DC_ERROR_UNEXPECTED;
}
-/**
- * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
- *
- * @dc: copy out of dc->current_state
- * @dst_ctx: copy into this
- *
- * This function makes a shallow copy of the current DC state and increments
- * refcounts on existing streams and planes.
- */
-void dc_resource_state_copy_construct_current(
- const struct dc *dc,
- struct dc_state *dst_ctx)
-{
- dc_resource_state_copy_construct(dc->current_state, dst_ctx);
-}
-
-
-void dc_resource_state_construct(
- const struct dc *dc,
- struct dc_state *dst_ctx)
-{
- dst_ctx->clk_mgr = dc->clk_mgr;
-
- /* Initialise DIG link encoder resource tracking variables. */
- if (dc->res_pool)
- link_enc_cfg_init(dc, dst_ctx);
-}
-
-
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
{
if (dc->res_pool == NULL)
@@ -3822,6 +3574,31 @@ static bool planes_changed_for_existing_stream(struct dc_state *context,
return false;
}
+static bool add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *state)
+{
+ int i, j;
+
+ for (i = 0; i < set_count; i++)
+ if (set[i].stream == stream)
+ break;
+
+ if (i == set_count) {
+ dm_error("Stream %p not found in set!\n", stream);
+ return false;
+ }
+
+ for (j = 0; j < set[i].plane_count; j++)
+ if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state))
+ return false;
+
+ return true;
+}
+
/**
* dc_validate_with_context - Validate and update the potential new stream in the context object
*
@@ -3927,7 +3704,8 @@ enum dc_status dc_validate_with_context(struct dc *dc,
unchanged_streams[i],
set,
set_count)) {
- if (!dc_rem_all_planes_for_stream(dc,
+
+ if (!dc_state_rem_all_planes_for_stream(dc,
unchanged_streams[i],
context)) {
res = DC_FAIL_DETACH_SURFACES;
@@ -3949,12 +3727,24 @@ enum dc_status dc_validate_with_context(struct dc *dc,
}
}
- if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
- res = DC_FAIL_DETACH_SURFACES;
- goto fail;
+ if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) {
+ /* remove phantoms specifically */
+ if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_phantom_stream(dc, context, del_streams[i]);
+ dc_state_release_phantom_stream(dc, context, del_streams[i]);
+ } else {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
}
- res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
if (res != DC_OK)
goto fail;
}
@@ -3977,7 +3767,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
/* Add new streams and then add all planes for the new stream */
for (i = 0; i < add_streams_count; i++) {
calculate_phy_pix_clks(add_streams[i]);
- res = dc_add_stream_to_ctx(dc, context, add_streams[i]);
+ res = dc_state_add_stream(dc, context, add_streams[i]);
if (res != DC_OK)
goto fail;
@@ -4483,84 +4273,6 @@ static void set_vtem_info_packet(
*info_packet = stream->vtem_infopacket;
}
-void dc_resource_state_destruct(struct dc_state *context)
-{
- int i, j;
-
- for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < context->stream_status[i].plane_count; j++)
- dc_plane_state_release(
- context->stream_status[i].plane_states[j]);
-
- context->stream_status[i].plane_count = 0;
- dc_stream_release(context->streams[i]);
- context->streams[i] = NULL;
- }
- context->stream_count = 0;
- context->stream_mask = 0;
- memset(&context->res_ctx, 0, sizeof(context->res_ctx));
- memset(&context->pp_display_cfg, 0, sizeof(context->pp_display_cfg));
- memset(&context->dcn_bw_vars, 0, sizeof(context->dcn_bw_vars));
- context->clk_mgr = NULL;
- memset(&context->bw_ctx.bw, 0, sizeof(context->bw_ctx.bw));
- memset(context->block_sequence, 0, sizeof(context->block_sequence));
- context->block_sequence_steps = 0;
- memset(context->dc_dmub_cmd, 0, sizeof(context->dc_dmub_cmd));
- context->dmub_cmd_count = 0;
- memset(&context->perf_params, 0, sizeof(context->perf_params));
- memset(&context->scratch, 0, sizeof(context->scratch));
-}
-
-void dc_resource_state_copy_construct(
- const struct dc_state *src_ctx,
- struct dc_state *dst_ctx)
-{
- int i, j;
- struct kref refcount = dst_ctx->refcount;
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_context *dml2 = NULL;
-
- // Need to preserve allocated dml2 context
- if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
- dml2 = dst_ctx->bw_ctx.dml2;
-#endif
-
- *dst_ctx = *src_ctx;
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- // Preserve allocated dml2 context
- if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
- dst_ctx->bw_ctx.dml2 = dml2;
-#endif
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
-
- if (cur_pipe->top_pipe)
- cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
- if (cur_pipe->bottom_pipe)
- cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
- if (cur_pipe->next_odm_pipe)
- cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
- if (cur_pipe->prev_odm_pipe)
- cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
- }
-
- for (i = 0; i < dst_ctx->stream_count; i++) {
- dc_stream_retain(dst_ctx->streams[i]);
- for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
- dc_plane_state_retain(
- dst_ctx->stream_status[i].plane_states[j]);
- }
-
- /* context refcount should not be overridden */
- dst_ctx->refcount = refcount;
-
-}
-
struct clock_source *dc_resource_find_first_free_pll(
struct resource_context *res_ctx,
const struct resource_pool *pool)
@@ -4740,7 +4452,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
option = DITHER_OPTION_SPATIAL8;
break;
case COLOR_DEPTH_101010:
- option = DITHER_OPTION_SPATIAL10;
+ option = DITHER_OPTION_TRUN10;
break;
default:
option = DITHER_OPTION_DISABLE;
@@ -4766,6 +4478,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ if (option == DITHER_OPTION_TRUN10)
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
}
/* special case - Formatter can only reduce by 4 bits at most.
@@ -5283,7 +4997,7 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st
if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
return true;
- else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 2160 &&
+ else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
new file mode 100644
index 000000000..61986e5cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -0,0 +1,880 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "core_types.h"
+#include "core_status.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
+#include "dc_stream_priv.h"
+#include "dc_plane_priv.h"
+
+#include "dm_services.h"
+#include "resource.h"
+#include "link_enc_cfg.h"
+
+#include "dml2/dml2_wrapper.h"
+#include "dml2/dml2_internal_types.h"
+
+#define DC_LOGGER \
+ dc->ctx->logger
+#define DC_LOGGER_INIT(logger)
+
+/* Private dc_state helper functions */
+static bool dc_state_track_phantom_stream(struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ if (state->phantom_stream_count >= MAX_PHANTOM_PIPES)
+ return false;
+
+ state->phantom_streams[state->phantom_stream_count++] = phantom_stream;
+
+ return true;
+}
+
+static bool dc_state_untrack_phantom_stream(struct dc_state *state, struct dc_stream_state *phantom_stream)
+{
+ bool res = false;
+ int i;
+
+ /* first find phantom stream in the dc_state */
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ if (state->phantom_streams[i] == phantom_stream) {
+ state->phantom_streams[i] = NULL;
+ res = true;
+ break;
+ }
+ }
+
+ /* failed to find stream in state */
+ if (!res)
+ return res;
+
+ /* trim back phantom streams */
+ state->phantom_stream_count--;
+ for (; i < state->phantom_stream_count; i++)
+ state->phantom_streams[i] = state->phantom_streams[i + 1];
+
+ return res;
+}
+
+static bool dc_state_is_phantom_stream_tracked(struct dc_state *state, struct dc_stream_state *phantom_stream)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ if (state->phantom_streams[i] == phantom_stream)
+ return true;
+ }
+
+ return false;
+}
+
+static bool dc_state_track_phantom_plane(struct dc_state *state,
+ struct dc_plane_state *phantom_plane)
+{
+ if (state->phantom_plane_count >= MAX_PHANTOM_PIPES)
+ return false;
+
+ state->phantom_planes[state->phantom_plane_count++] = phantom_plane;
+
+ return true;
+}
+
+static bool dc_state_untrack_phantom_plane(struct dc_state *state, struct dc_plane_state *phantom_plane)
+{
+ bool res = false;
+ int i;
+
+ /* first find phantom plane in the dc_state */
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ if (state->phantom_planes[i] == phantom_plane) {
+ state->phantom_planes[i] = NULL;
+ res = true;
+ break;
+ }
+ }
+
+ /* failed to find plane in state */
+ if (!res)
+ return res;
+
+ /* trim back phantom planes */
+ state->phantom_plane_count--;
+ for (; i < state->phantom_plane_count; i++)
+ state->phantom_planes[i] = state->phantom_planes[i + 1];
+
+ return res;
+}
+
+static bool dc_state_is_phantom_plane_tracked(struct dc_state *state, struct dc_plane_state *phantom_plane)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ if (state->phantom_planes[i] == phantom_plane)
+ return true;
+ }
+
+ return false;
+}
+
+static void dc_state_copy_internal(struct dc_state *dst_state, struct dc_state *src_state)
+{
+ int i, j;
+
+ memcpy(dst_state, src_state, sizeof(struct dc_state));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &dst_state->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ if (cur_pipe->prev_odm_pipe)
+ cur_pipe->prev_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
+
+ if (cur_pipe->next_odm_pipe)
+ cur_pipe->next_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
+ }
+
+ /* retain phantoms */
+ for (i = 0; i < dst_state->phantom_stream_count; i++)
+ dc_stream_retain(dst_state->phantom_streams[i]);
+
+ for (i = 0; i < dst_state->phantom_plane_count; i++)
+ dc_plane_state_retain(dst_state->phantom_planes[i]);
+
+ /* retain streams and planes */
+ for (i = 0; i < dst_state->stream_count; i++) {
+ dc_stream_retain(dst_state->streams[i]);
+ for (j = 0; j < dst_state->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ dst_state->stream_status[i].plane_states[j]);
+ }
+
+}
+
+static void init_state(struct dc *dc, struct dc_state *state)
+{
+ /* Each context must have their own instance of VBA and in order to
+ * initialize and obtain IP and SOC the base DML instance from DC is
+ * initially copied into every context
+ */
+ memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+}
+
+/* Public dc_state functions */
+struct dc_state *dc_state_create(struct dc *dc)
+{
+ struct dc_state *state = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!state)
+ return NULL;
+
+ init_state(dc, state);
+ dc_state_construct(dc, state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (dc->debug.using_dml2)
+ dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
+#endif
+
+ kref_init(&state->refcount);
+
+ return state;
+}
+
+void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
+{
+ struct kref refcount = dst_state->refcount;
+#ifdef CONFIG_DRM_AMD_DC_FP
+ struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2;
+#endif
+
+ dc_state_copy_internal(dst_state, src_state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dst_state->bw_ctx.dml2 = dst_dml2;
+ if (src_state->bw_ctx.dml2)
+ dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+#endif
+
+ /* context refcount should not be overridden */
+ dst_state->refcount = refcount;
+}
+
+struct dc_state *dc_state_create_copy(struct dc_state *src_state)
+{
+ struct dc_state *new_state;
+
+ new_state = kvmalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ dc_state_copy_internal(new_state, src_state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (src_state->bw_ctx.dml2 &&
+ !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
+ dc_state_release(new_state);
+ return NULL;
+ }
+#endif
+
+ kref_init(&new_state->refcount);
+
+ return new_state;
+}
+
+void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state)
+{
+ dc_state_copy(dst_state, dc->current_state);
+}
+
+struct dc_state *dc_state_create_current_copy(struct dc *dc)
+{
+ return dc_state_create_copy(dc->current_state);
+}
+
+void dc_state_construct(struct dc *dc, struct dc_state *state)
+{
+ state->clk_mgr = dc->clk_mgr;
+
+ /* Initialise DIG link encoder resource tracking variables. */
+ if (dc->res_pool)
+ link_enc_cfg_init(dc, state);
+}
+
+void dc_state_destruct(struct dc_state *state)
+{
+ int i, j;
+
+ for (i = 0; i < state->stream_count; i++) {
+ for (j = 0; j < state->stream_status[i].plane_count; j++)
+ dc_plane_state_release(
+ state->stream_status[i].plane_states[j]);
+
+ state->stream_status[i].plane_count = 0;
+ dc_stream_release(state->streams[i]);
+ state->streams[i] = NULL;
+ }
+ state->stream_count = 0;
+
+ /* release tracked phantoms */
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ dc_stream_release(state->phantom_streams[i]);
+ state->phantom_streams[i] = NULL;
+ }
+ state->phantom_stream_count = 0;
+
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ dc_plane_state_release(state->phantom_planes[i]);
+ state->phantom_planes[i] = NULL;
+ }
+ state->phantom_plane_count = 0;
+
+ state->stream_mask = 0;
+ memset(&state->res_ctx, 0, sizeof(state->res_ctx));
+ memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
+ memset(&state->dcn_bw_vars, 0, sizeof(state->dcn_bw_vars));
+ state->clk_mgr = NULL;
+ memset(&state->bw_ctx.bw, 0, sizeof(state->bw_ctx.bw));
+ memset(state->block_sequence, 0, sizeof(state->block_sequence));
+ state->block_sequence_steps = 0;
+ memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd));
+ state->dmub_cmd_count = 0;
+ memset(&state->perf_params, 0, sizeof(state->perf_params));
+ memset(&state->scratch, 0, sizeof(state->scratch));
+}
+
+void dc_state_retain(struct dc_state *state)
+{
+ kref_get(&state->refcount);
+}
+
+static void dc_state_free(struct kref *kref)
+{
+ struct dc_state *state = container_of(kref, struct dc_state, refcount);
+
+ dc_state_destruct(state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dml2_destroy(state->bw_ctx.dml2);
+ state->bw_ctx.dml2 = 0;
+#endif
+
+ kvfree(state);
+}
+
+void dc_state_release(struct dc_state *state)
+{
+ if (state != NULL)
+ kref_put(&state->refcount, dc_state_free);
+}
+/*
+ * dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
+ */
+enum dc_status dc_state_add_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ enum dc_status res;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (state->stream_count >= dc->res_pool->timing_generator_count) {
+ DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ state->streams[state->stream_count] = stream;
+ dc_stream_retain(stream);
+ state->stream_count++;
+
+ res = resource_add_otg_master_for_stream_output(
+ state, dc->res_pool, stream);
+ if (res != DC_OK)
+ DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
+
+ return res;
+}
+
+/*
+ * dc_state_remove_stream() - Remove a stream from a dc_state.
+ */
+enum dc_status dc_state_remove_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
+
+ if (!del_pipe) {
+ dm_error("Pipe not found for stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ resource_update_pipes_for_stream_with_slice_count(state,
+ dc->current_state, dc->res_pool, stream, 1);
+ resource_remove_otg_master_for_stream_output(
+ state, dc->res_pool, stream);
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream)
+ break;
+
+ if (state->streams[i] != stream) {
+ dm_error("Context doesn't have stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ dc_stream_release(state->streams[i]);
+ state->stream_count--;
+
+ /* Trim back arrays */
+ for (; i < state->stream_count; i++) {
+ state->streams[i] = state->streams[i + 1];
+ state->stream_status[i] = state->stream_status[i + 1];
+ }
+
+ state->streams[state->stream_count] = NULL;
+ memset(
+ &state->stream_status[state->stream_count],
+ 0,
+ sizeof(state->stream_status[0]));
+
+ return DC_OK;
+}
+
+bool dc_state_add_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state)
+{
+ struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *otg_master_pipe;
+ struct dc_stream_status *stream_status = NULL;
+ bool added = false;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to attach surface!\n");
+ goto out;
+ } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+ plane_state, MAX_SURFACE_NUM);
+ goto out;
+ }
+
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
+ if (otg_master_pipe)
+ added = resource_append_dpp_pipes_for_plane_composition(state,
+ dc->current_state, pool, otg_master_pipe, plane_state);
+
+ if (added) {
+ stream_status->plane_states[stream_status->plane_count] =
+ plane_state;
+ stream_status->plane_count++;
+ dc_plane_state_retain(plane_state);
+ }
+
+out:
+ return added;
+}
+
+bool dc_state_remove_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state)
+{
+ int i;
+ struct dc_stream_status *stream_status = NULL;
+ struct resource_pool *pool = dc->res_pool;
+
+ if (!plane_state)
+ return true;
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to remove plane.\n");
+ return false;
+ }
+
+ resource_remove_dpp_pipes_for_plane_composition(
+ state, pool, plane_state);
+
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i] == plane_state) {
+ dc_plane_state_release(stream_status->plane_states[i]);
+ break;
+ }
+ }
+
+ if (i == stream_status->plane_count) {
+ dm_error("Existing plane_state not found; failed to detach it!\n");
+ return false;
+ }
+
+ stream_status->plane_count--;
+
+ /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
+ for (; i < stream_status->plane_count; i++)
+ stream_status->plane_states[i] = stream_status->plane_states[i + 1];
+
+ stream_status->plane_states[stream_status->plane_count] = NULL;
+
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
+ return true;
+}
+
+/**
+ * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream.
+ *
+ * @dc: Current dc state.
+ * @stream: Target stream, which we want to remove the attached plans.
+ * @state: context from which the planes are to be removed.
+ *
+ * Return:
+ * Return true if DC was able to remove all planes from the target
+ * stream, otherwise, return false.
+ */
+bool dc_state_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++)
+ if (!dc_state_remove_plane(dc, stream, del_planes[i], state))
+ return false;
+
+ return true;
+}
+
+bool dc_state_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *state)
+{
+ int i;
+ bool result = true;
+
+ for (i = 0; i < plane_count; i++)
+ if (!dc_state_add_plane(dc, stream, plane_states[i], state)) {
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+/* Private dc_state functions */
+
+/**
+ * dc_state_get_stream_status - Get stream status from given dc state
+ * @state: DC state to find the stream status in
+ * @stream: The stream to get the stream status for
+ *
+ * The given stream is expected to exist in the given dc state. Otherwise, NULL
+ * will be returned.
+ */
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ if (state == NULL)
+ return NULL;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i])
+ return &state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx)
+{
+ return dc_state_get_stream_subvp_type(state, pipe_ctx->stream);
+}
+
+enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ int i;
+
+ enum mall_stream_type type = SUBVP_NONE;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] == stream) {
+ type = state->stream_status[i].mall_stream_config.type;
+ break;
+ }
+ }
+
+ return type;
+}
+
+struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ int i;
+
+ struct dc_stream_state *paired_stream = NULL;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] == stream) {
+ paired_stream = state->stream_status[i].mall_stream_config.paired_stream;
+ break;
+ }
+ }
+
+ return paired_stream;
+}
+
+struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream)
+{
+ struct dc_stream_state *phantom_stream;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ phantom_stream = dc_create_stream_for_sink(main_stream->sink);
+
+ if (!phantom_stream) {
+ DC_LOG_ERROR("Failed to allocate phantom stream.\n");
+ return NULL;
+ }
+
+ /* track phantom stream in dc_state */
+ dc_state_track_phantom_stream(state, phantom_stream);
+
+ phantom_stream->is_phantom = true;
+ phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
+ phantom_stream->dpms_off = true;
+
+ return phantom_stream;
+}
+
+void dc_state_release_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!dc_state_untrack_phantom_stream(state, phantom_stream)) {
+ DC_LOG_ERROR("Failed to free phantom stream %p in dc state %p.\n", phantom_stream, state);
+ return;
+ }
+
+ dc_stream_release(phantom_stream);
+}
+
+struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane)
+{
+ struct dc_plane_state *phantom_plane = dc_create_plane_state(dc);
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!phantom_plane) {
+ DC_LOG_ERROR("Failed to allocate phantom plane.\n");
+ return NULL;
+ }
+
+ /* track phantom inside dc_state */
+ dc_state_track_phantom_plane(state, phantom_plane);
+
+ phantom_plane->is_phantom = true;
+
+ return phantom_plane;
+}
+
+void dc_state_release_phantom_plane(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *phantom_plane)
+{
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!dc_state_untrack_phantom_plane(state, phantom_plane)) {
+ DC_LOG_ERROR("Failed to free phantom plane %p in dc state %p.\n", phantom_plane, state);
+ return;
+ }
+
+ dc_plane_state_release(phantom_plane);
+}
+
+/* add phantom streams to context and generate correct meta inside dc_state */
+enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream)
+{
+ struct dc_stream_status *main_stream_status;
+ struct dc_stream_status *phantom_stream_status;
+ enum dc_status res = dc_state_add_stream(dc, state, phantom_stream);
+
+ /* check if stream is tracked */
+ if (res == DC_OK && !dc_state_is_phantom_stream_tracked(state, phantom_stream)) {
+ /* stream must be tracked if added to state */
+ dc_state_track_phantom_stream(state, phantom_stream);
+ }
+
+ /* setup subvp meta */
+ main_stream_status = dc_state_get_stream_status(state, main_stream);
+ phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
+ phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
+ phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ main_stream_status->mall_stream_config.type = SUBVP_MAIN;
+ main_stream_status->mall_stream_config.paired_stream = phantom_stream;
+
+ return res;
+}
+
+enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ struct dc_stream_status *main_stream_status;
+ struct dc_stream_status *phantom_stream_status;
+
+ /* reset subvp meta */
+ phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
+ main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream);
+ phantom_stream_status->mall_stream_config.type = SUBVP_NONE;
+ phantom_stream_status->mall_stream_config.paired_stream = NULL;
+ if (main_stream_status) {
+ main_stream_status->mall_stream_config.type = SUBVP_NONE;
+ main_stream_status->mall_stream_config.paired_stream = NULL;
+ }
+
+ /* remove stream from state */
+ return dc_state_remove_stream(dc, state, phantom_stream);
+}
+
+bool dc_state_add_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state)
+{
+ bool res = dc_state_add_plane(dc, phantom_stream, phantom_plane, state);
+
+ /* check if stream is tracked */
+ if (res && !dc_state_is_phantom_plane_tracked(state, phantom_plane)) {
+ /* stream must be tracked if added to state */
+ dc_state_track_phantom_plane(state, phantom_plane);
+ }
+
+ return res;
+}
+
+bool dc_state_remove_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state)
+{
+ return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state);
+}
+
+bool dc_state_rem_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_state *state,
+ bool should_release_planes)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == phantom_stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", phantom_stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++) {
+ if (!dc_state_remove_plane(dc, phantom_stream, del_planes[i], state))
+ return false;
+ if (should_release_planes)
+ dc_state_release_phantom_plane(dc, state, del_planes[i]);
+ }
+
+ return true;
+}
+
+bool dc_state_add_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state * const *phantom_planes,
+ int plane_count,
+ struct dc_state *state)
+{
+ return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state);
+}
+
+bool dc_state_remove_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state)
+{
+ int i;
+ bool removed_phantom = false;
+ struct dc_stream_state *phantom_stream = NULL;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
+ phantom_stream = pipe->stream;
+
+ dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream, state, false);
+ dc_state_remove_phantom_stream(dc, state, phantom_stream);
+ removed_phantom = true;
+ }
+ }
+ return removed_phantom;
+}
+
+void dc_state_release_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_stream_count; i++)
+ dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+
+ for (i = 0; i < state->phantom_plane_count; i++)
+ dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4bdf105d1..51a970fcb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -31,6 +31,8 @@
#include "ipp.h"
#include "timing_generator.h"
#include "dc_dmub_srv.h"
+#include "dc_state_priv.h"
+#include "dc_stream_priv.h"
#define DC_LOGGER dc->ctx->logger
@@ -54,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
}
}
-static bool dc_stream_construct(struct dc_stream_state *stream,
+bool dc_stream_construct(struct dc_stream_state *stream,
struct dc_sink *dc_sink_data)
{
uint32_t i = 0;
@@ -121,13 +123,12 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
}
stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->stream_id = stream->ctx->dc_stream_id_count;
- stream->ctx->dc_stream_id_count++;
+ dc_stream_assign_stream_id(stream);
return true;
}
-static void dc_stream_destruct(struct dc_stream_state *stream)
+void dc_stream_destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
if (stream->out_transfer_func != NULL) {
@@ -136,6 +137,13 @@ static void dc_stream_destruct(struct dc_stream_state *stream)
}
}
+void dc_stream_assign_stream_id(struct dc_stream_state *stream)
+{
+ /* MSB is reserved to indicate phantoms */
+ stream->stream_id = stream->ctx->dc_stream_id_count;
+ stream->ctx->dc_stream_id_count++;
+}
+
void dc_stream_retain(struct dc_stream_state *stream)
{
kref_get(&stream->refcount);
@@ -196,8 +204,7 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
if (new_stream->out_transfer_func)
dc_transfer_func_retain(new_stream->out_transfer_func);
- new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
- new_stream->ctx->dc_stream_id_count++;
+ dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
@@ -209,31 +216,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
}
/**
- * dc_stream_get_status_from_state - Get stream status from given dc state
- * @state: DC state to find the stream status in
- * @stream: The stream to get the stream status for
- *
- * The given stream is expected to exist in the given dc state. Otherwise, NULL
- * will be returned.
- */
-struct dc_stream_status *dc_stream_get_status_from_state(
- struct dc_state *state,
- struct dc_stream_state *stream)
-{
- uint8_t i;
-
- if (state == NULL)
- return NULL;
-
- for (i = 0; i < state->stream_count; i++) {
- if (stream == state->streams[i])
- return &state->stream_status[i];
- }
-
- return NULL;
-}
-
-/**
* dc_stream_get_status() - Get current stream status of the given stream state
* @stream: The stream to get the stream status for.
*
@@ -244,7 +226,7 @@ struct dc_stream_status *dc_stream_get_status(
struct dc_stream_state *stream)
{
struct dc *dc = stream->ctx->dc;
- return dc_stream_get_status_from_state(dc->current_state, stream);
+ return dc_state_get_stream_status(dc->current_state, stream);
}
static void program_cursor_attributes(
@@ -441,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
@@ -465,16 +449,37 @@ bool dc_stream_add_writeback(struct dc *dc,
if (dc->hwss.enable_writeback) {
struct dc_stream_status *stream_status = dc_stream_get_status(stream);
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
- dwb->otg_inst = stream_status->primary_otg_inst;
+ if (stream_status)
+ dwb->otg_inst = stream_status->primary_otg_inst;
+ }
+
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+
+ /* enable writeback */
+ if (dc->hwss.enable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, wb_info, dc->current_state);
+ } else {
+ /* Enable writeback pipe from scratch*/
+ dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
+ }
}
+
return true;
}
-bool dc_stream_remove_writeback(struct dc *dc,
+bool dc_stream_fc_disable_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
{
- int i = 0, j = 0;
+ struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+
if (stream == NULL) {
dm_error("DC: dc_stream is NULL!\n");
return false;
@@ -490,27 +495,67 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
-// stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
- for (i = 0; i < stream->num_wb_info; i++) {
- /*dynamic update*/
- if (stream->writeback_info[i].wb_enabled &&
- stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) {
- stream->writeback_info[i].wb_enabled = false;
- }
+ dc_exit_ips_for_hw_access(dc);
+
+ if (dwb->funcs->set_fc_enable)
+ dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE);
+
+ return true;
+}
+
+bool dc_stream_remove_writeback(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t dwb_pipe_inst)
+{
+ int i = 0, j = 0;
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ if (dwb_pipe_inst >= MAX_DWB_PIPES) {
+ dm_error("DC: writeback pipe is invalid!\n");
+ return false;
+ }
+
+ if (stream->num_wb_info > MAX_DWB_PIPES) {
+ dm_error("DC: num_wb_info is invalid!\n");
+ return false;
}
/* remove writeback info for disabled writeback pipes from stream */
for (i = 0, j = 0; i < stream->num_wb_info; i++) {
if (stream->writeback_info[i].wb_enabled) {
- if (j < i)
- /* trim the array */
+
+ if (stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst)
+ stream->writeback_info[i].wb_enabled = false;
+
+ /* trim the array */
+ if (j < i) {
memcpy(&stream->writeback_info[j], &stream->writeback_info[i],
sizeof(struct dc_writeback_info));
- j++;
+ j++;
+ }
}
}
stream->num_wb_info = j;
+ /* recalculate and apply DML parameters */
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+
+ dc_exit_ips_for_hw_access(dc);
+
+ /* disable writeback */
+ if (dc->hwss.disable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb))
+ dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ }
+
return true;
}
@@ -518,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc,
int num_dwb,
struct dc_writeback_info *wb_info)
{
+ dc_exit_ips_for_hw_access(dc);
+
if (dc->hwss.mmhubbub_warmup)
return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
else
@@ -530,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -558,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
dc = stream->ctx->dc;
res_ctx = &dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -589,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -625,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
if (i == MAX_PIPES)
return true;
+ dc_exit_ips_for_hw_access(dc);
+
return dc->hwss.dmdata_status_done(pipe);
}
@@ -659,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
pipe_ctx->stream->dmdata_address = attr->address;
+ dc_exit_ips_for_hw_access(dc);
+
dc->hwss.program_dmdata_engine(pipe_ctx);
if (hubp->funcs->dmdata_set_attributes != NULL &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index a80e45300..19140fb65 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -32,10 +32,12 @@
#include "transform.h"
#include "dpp.h"
+#include "dc_plane_priv.h"
+
/*******************************************************************************
* Private functions
******************************************************************************/
-static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
{
plane_state->ctx = ctx;
@@ -63,7 +65,7 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
}
-static void dc_plane_destruct(struct dc_plane_state *plane_state)
+void dc_plane_destruct(struct dc_plane_state *plane_state)
{
if (plane_state->gamma_correction != NULL) {
dc_gamma_release(&plane_state->gamma_correction);
@@ -159,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status(
break;
}
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8164a5340..fc60fa581 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -27,6 +27,8 @@
#define DC_INTERFACE_H_
#include "dc_types.h"
+#include "dc_state.h"
+#include "dc_plane.h"
#include "grph_object_defs.h"
#include "logger_types.h"
#include "hdcp_msg_types.h"
@@ -49,7 +51,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.259"
+#define DC_VER "3.2.266"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -432,6 +434,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_in_vpb;
};
enum visual_confirm {
@@ -461,6 +464,12 @@ enum dml_hostvm_override_opts {
DML_HOSTVM_OVERRIDE_TRUE = 0x2,
};
+enum dc_replay_power_opts {
+ replay_power_opt_invalid = 0x0,
+ replay_power_opt_smu_opt_static_screen = 0x1,
+ replay_power_opt_z10_static_screen = 0x10,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -956,7 +965,6 @@ struct dc_debug_options {
unsigned int min_prefetch_in_strobe_ns;
bool disable_unbounded_requesting;
bool dig_fifo_off_in_blank;
- bool temp_mst_deallocation_sequence;
bool override_dispclk_programming;
bool otg_crc_db;
bool disallow_dispclk_dppclk_ds;
@@ -979,6 +987,10 @@ struct dc_debug_options {
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
+ bool disable_dmub_reallow_idle;
+ bool disable_timeout;
+ bool disable_extblankadj;
+ unsigned int static_screen_wait_frames;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1026,7 +1038,6 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
- bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
@@ -1389,13 +1400,6 @@ struct dc_surface_update {
/*
* Create a new surface with default parameters;
*/
-struct dc_plane_state *dc_create_plane_state(struct dc *dc);
-const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state);
-
-void dc_plane_state_retain(struct dc_plane_state *plane_state);
-void dc_plane_state_release(struct dc_plane_state *plane_state);
-
void dc_gamma_retain(struct dc_gamma *dc_gamma);
void dc_gamma_release(struct dc_gamma **dc_gamma);
struct dc_gamma *dc_create_gamma(void);
@@ -1459,37 +1463,20 @@ enum dc_status dc_validate_global_state(
struct dc_state *new_ctx,
bool fast_validate);
-
-void dc_resource_state_construct(
- const struct dc *dc,
- struct dc_state *dst_ctx);
-
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
-void dc_resource_state_copy_construct(
- const struct dc_state *src_ctx,
- struct dc_state *dst_ctx);
-
-void dc_resource_state_copy_construct_current(
- const struct dc *dc,
- struct dc_state *dst_ctx);
-
-void dc_resource_state_destruct(struct dc_state *context);
-
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *aud_chk);
enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count);
-struct dc_state *dc_create_state(struct dc *dc);
-struct dc_state *dc_copy_state(struct dc_state *src_ctx);
-void dc_retain_state(struct dc_state *context);
-void dc_release_state(struct dc_state *context);
struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
struct dc_stream_state *stream,
@@ -1541,7 +1528,13 @@ struct dc_link {
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
bool is_hpd_pending; /* Indicates a new received hpd */
- bool is_automated; /* Indicates automated testing */
+
+ /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
+ * for every link training. This is incompatible with DP LL compliance automation,
+ * which expects the same link settings to be used every retry on a link loss.
+ * This flag is used to skip the fallback when link loss occurs during automation.
+ */
+ bool skip_fallback_on_link_loss;
bool edp_sink_present;
@@ -2092,6 +2085,20 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context);
+/*
+ * Communicate with DMUB to allow or disallow Panel Replay on the specified link:
+ *
+ * @link: pointer to the dc_link struct instance
+ * @enable: enable(active) or disable(inactive) replay
+ * @wait: state transition need to wait the active set completed.
+ * @force_static: force disable(inactive) the replay
+ * @power_opts: set power optimazation parameters to DMUB.
+ *
+ * return: allow Replay active will return true, else will return false.
+ */
+bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
+
bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
/* On eDP links this function call will stall until T12 has elapsed.
@@ -2318,6 +2325,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_
struct dc_cursor_attributes *cursor_attr);
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+void dc_exit_ips_for_hw_access(struct dc *dc);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
@@ -2336,6 +2344,9 @@ void dc_hardware_release(struct dc *dc);
void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
+
+bool dc_set_replay_allow_active(struct dc *dc, bool active);
+
void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 05b3433cb..9084b3208 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -33,6 +33,7 @@
#include "cursor_reg_cache.h"
#include "resource.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -141,7 +142,10 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_cmd_execute(dmub);
+ if (status == DMUB_STATUS_POWER_STATE_D3)
+ return false;
+
dmub_srv_wait_for_idle(dmub, 100000);
/* Requeue the command. */
@@ -149,16 +153,20 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
}
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
@@ -219,7 +227,10 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_cmd_execute(dmub);
+ if (status == DMUB_STATUS_POWER_STATE_D3)
+ return false;
+
dmub_srv_wait_for_idle(dmub, 100000);
/* Requeue the command. */
@@ -227,22 +238,31 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
}
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
}
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
// Wait for DMUB to process command
if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (status != DMUB_STATUS_OK);
+ } else
+ status = dmub_srv_wait_for_idle(dmub, 100000);
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
@@ -500,10 +520,11 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
/**
* populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
*
- * @dc: [in] current dc state
+ * @dc: [in] pointer to dc object
* @subvp_pipe: [in] pipe_ctx for the SubVP pipe
* @vblank_pipe: [in] pipe_ctx for the DRR pipe
* @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
+ * @context: [in] DC state for access to phantom stream
*
* Populate the DMCUB SubVP command with DRR pipe info. All the information
* required for calculating the SubVP + DRR microschedule is populated here.
@@ -514,12 +535,14 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
* 3. Populate the drr_info with the min and max supported vtotal values
*/
static void populate_subvp_cmd_drr_info(struct dc *dc,
+ struct dc_state *context,
struct pipe_ctx *subvp_pipe,
struct pipe_ctx *vblank_pipe,
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
{
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
uint16_t drr_frame_us = 0;
uint16_t min_drr_supported_us = 0;
@@ -607,7 +630,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
break;
}
@@ -624,7 +647,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
if (vblank_pipe->stream->ignore_msa_timing_param &&
(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
- populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data);
+ populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
}
/**
@@ -649,10 +672,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
uint32_t subvp0_prefetch_us = 0;
uint32_t subvp1_prefetch_us = 0;
uint32_t prefetch_delta_us = 0;
- struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing;
- struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
+ struct dc_stream_state *phantom_stream0 = NULL;
+ struct dc_stream_state *phantom_stream1 = NULL;
+ struct dc_crtc_timing *phantom_timing0 = NULL;
+ struct dc_crtc_timing *phantom_timing1 = NULL;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
+ phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
+ phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
+ phantom_timing0 = &phantom_stream0->timing;
+ phantom_timing1 = &phantom_stream1->timing;
+
subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
(uint64_t)phantom_timing0->h_total * 1000000),
(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
@@ -702,8 +732,9 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
uint32_t j;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
pipe_data->mode = SUBVP;
@@ -757,7 +788,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
- if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
+ if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
@@ -791,6 +822,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
union dmub_rb_cmd cmd;
struct pipe_ctx *subvp_pipes[2];
uint32_t wm_val_refclk = 0;
+ enum mall_stream_type pipe_mall_type;
memset(&cmd, 0, sizeof(cmd));
// FW command for SUBVP
@@ -806,7 +838,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
*/
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
@@ -814,6 +846,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
@@ -824,12 +857,11 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
*/
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.paired_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ pipe_mall_type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
// we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -1142,10 +1174,16 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
dc_ctx = dc_dmub_srv->ctx;
if (wait) {
- status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
- if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
- return false;
+ if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
+ do {
+ status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
+ } while (status != DMUB_STATUS_OK);
+ } else {
+ status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
+ return false;
+ }
}
} else
return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
@@ -1175,22 +1213,18 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
}
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ /* We also do not perform a wait since DMCUB could enter idle after the notification. */
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
- const uint32_t max_num_polls = 10000;
uint32_t allow_state = 0;
uint32_t commit_state = 0;
- uint32_t i;
if (dc->debug.dmcub_emulation)
return;
- if (!dc->idle_optimizations_allowed)
- return;
-
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
@@ -1203,8 +1237,16 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
// Wait for evaluation time
- udelay(dc->debug.ips2_eval_delay_us);
- commit_state = dc->hwss.get_idle_state(dc);
+ for (;;) {
+ udelay(dc->debug.ips2_eval_delay_us);
+ commit_state = dc->hwss.get_idle_state(dc);
+ if (commit_state & DMUB_IPS2_ALLOW_MASK)
+ break;
+
+ /* allow was still set, retry eval delay */
+ dc->hwss.set_idle_state(dc, false);
+ }
+
if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
@@ -1213,14 +1255,13 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(dc->debug.ips2_entry_delay_us);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
- for (i = 0; i < max_num_polls; ++i) {
+ for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_COMMIT_MASK)
break;
udelay(1);
}
- ASSERT(i < max_num_polls);
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
@@ -1235,14 +1276,13 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv_notify_idle(dc, false);
if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
- for (i = 0; i < max_num_polls; ++i) {
+ for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS1_COMMIT_MASK)
break;
udelay(1);
}
- ASSERT(i < max_num_polls);
}
}
@@ -1324,7 +1364,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
@@ -1373,7 +1413,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
new file mode 100644
index 000000000..ef380cae8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_PLANE_H_
+#define _DC_PLANE_H_
+
+#include "dc.h"
+#include "dc_hw_types.h"
+
+struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state);
+void dc_plane_state_retain(struct dc_plane_state *plane_state);
+void dc_plane_state_release(struct dc_plane_state *plane_state);
+
+#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
new file mode 100644
index 000000000..9ee184c1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_PLANE_PRIV_H_
+#define _DC_PLANE_PRIV_H_
+
+#include "dc_plane.h"
+
+void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state);
+void dc_plane_destruct(struct dc_plane_state *plane_state);
+
+#endif /* _DC_PLANE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
new file mode 100644
index 000000000..d167fdbfa
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_state.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STATE_H_
+#define _DC_STATE_H_
+
+#include "dc.h"
+#include "inc/core_status.h"
+
+struct dc_state *dc_state_create(struct dc *dc);
+void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state);
+struct dc_state *dc_state_create_copy(struct dc_state *src_state);
+void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state);
+struct dc_state *dc_state_create_current_copy(struct dc *dc);
+void dc_state_construct(struct dc *dc, struct dc_state *state);
+void dc_state_destruct(struct dc_state *state);
+void dc_state_retain(struct dc_state *state);
+void dc_state_release(struct dc_state *state);
+
+enum dc_status dc_state_add_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
+enum dc_status dc_state_remove_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
+bool dc_state_add_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state);
+
+bool dc_state_remove_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state);
+
+bool dc_state_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *state);
+
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+#endif /* _DC_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
new file mode 100644
index 000000000..c1f44e09a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STATE_PRIV_H_
+#define _DC_STATE_PRIV_H_
+
+#include "dc_state.h"
+#include "dc_stream.h"
+
+/* Get the type of the provided resource (none, phantom, main) based on the provided
+ * context. If the context is unavailable, determine only if phantom or not.
+ */
+enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx);
+enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
+ const struct dc_stream_state *stream);
+
+/* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/
+struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
+ const struct dc_stream_state *stream);
+
+/* allocate's phantom stream or plane and returns pointer to the object */
+struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream);
+struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane);
+
+/* deallocate's phantom stream or plane */
+void dc_state_release_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream);
+void dc_state_release_phantom_plane(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *phantom_plane);
+
+/* add/remove phantom stream to context and generate subvp meta data */
+enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream);
+enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream);
+
+bool dc_state_add_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state);
+
+bool dc_state_remove_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state);
+
+bool dc_state_rem_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_state *state,
+ bool should_release_planes);
+
+bool dc_state_add_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state * const *phantom_planes,
+ int plane_count,
+ struct dc_state *state);
+
+bool dc_state_remove_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state);
+
+void dc_state_release_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state);
+
+#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e61eea6db..a23eebd99 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -38,6 +38,14 @@ struct timing_sync_info {
bool master;
};
+struct mall_stream_config {
+ /* MALL stream config to indicate if the stream is phantom or not.
+ * We will use a phantom stream to indicate that the pipe is phantom.
+ */
+ enum mall_stream_type type;
+ struct dc_stream_state *paired_stream; // master / slave stream
+};
+
struct dc_stream_status {
int primary_otg_inst;
int stream_enc_inst;
@@ -50,6 +58,7 @@ struct dc_stream_status {
struct timing_sync_info timing_sync_info;
struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
bool is_abm_supported;
+ struct mall_stream_config mall_stream_config;
};
enum hubp_dmdata_mode {
@@ -130,7 +139,6 @@ union stream_update_flags {
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
- uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
} bits;
@@ -147,31 +155,6 @@ struct test_pattern {
#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
-enum mall_stream_type {
- SUBVP_NONE, // subvp not in use
- SUBVP_MAIN, // subvp in use, this stream is main stream
- SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
-};
-
-struct mall_stream_config {
- /* MALL stream config to indicate if the stream is phantom or not.
- * We will use a phantom stream to indicate that the pipe is phantom.
- */
- enum mall_stream_type type;
- struct dc_stream_state *paired_stream; // master / slave stream
-};
-
-/* Temp struct used to save and restore MALL config
- * during validation.
- *
- * TODO: Move MALL config into dc_state instead of stream struct
- * to avoid needing to save/restore.
- */
-struct mall_temp_config {
- struct mall_stream_config mall_stream_config[MAX_PIPES];
- bool is_phantom_plane[MAX_PIPES];
-};
-
struct dc_stream_debug_options {
char force_odm_combine_segments;
};
@@ -301,7 +284,7 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk;
bool vblank_synchronized;
bool fpo_in_use;
- struct mall_stream_config mall_stream_config;
+ bool is_phantom;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -342,7 +325,6 @@ struct dc_stream_update {
struct dc_3dlut *lut3d_func;
struct test_pattern *pending_test_pattern;
- struct dc_crtc_timing_adjust *crtc_timing_adjust;
};
bool dc_is_stream_unchanged(
@@ -415,45 +397,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
uint32_t *h_position,
uint32_t *v_position);
-enum dc_status dc_add_stream_to_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream);
-
-enum dc_status dc_remove_stream_from_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream);
-
-
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context);
-
-bool dc_remove_plane_from_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context);
-
-bool dc_rem_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context);
-
-bool dc_add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state * const *plane_states,
- int plane_count,
- struct dc_state *context);
-
bool dc_stream_add_writeback(struct dc *dc,
struct dc_stream_state *stream,
struct dc_writeback_info *wb_info);
+bool dc_stream_fc_disable_writeback(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t dwb_pipe_inst);
+
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst);
@@ -514,9 +465,6 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
-struct dc_stream_status *dc_stream_get_status_from_state(
- struct dc_state *state,
- struct dc_stream_state *stream);
struct dc_stream_status *dc_stream_get_status(
struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
new file mode 100644
index 000000000..7476fd52c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STREAM_PRIV_H_
+#define _DC_STREAM_PRIV_H_
+
+#include "dc_stream.h"
+
+bool dc_stream_construct(struct dc_stream_state *stream,
+ struct dc_sink *dc_sink_data);
+void dc_stream_destruct(struct dc_stream_state *stream);
+
+void dc_stream_assign_stream_id(struct dc_stream_state *stream);
+
+#endif // _DC_STREAM_PRIV_H_
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 66d0774be..be2ac5c44 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1018,6 +1018,25 @@ enum replay_coasting_vtotal_type {
PR_COASTING_TYPE_NUM,
};
+enum replay_link_off_frame_count_level {
+ PR_LINK_OFF_FRAME_COUNT_FAIL = 0x0,
+ PR_LINK_OFF_FRAME_COUNT_GOOD = 0x2,
+ PR_LINK_OFF_FRAME_COUNT_BEST = 0x6,
+};
+
+/*
+ * This is general Interface for Replay to
+ * set an 32 bit variable to dmub
+ * The Message_type indicates which variable
+ * passed to DMUB.
+ */
+enum replay_FW_Message_type {
+ Replay_Msg_Not_Support = -1,
+ Replay_Set_Timing_Sync_Supported,
+ Replay_Set_Residency_Frameupdate_Timer,
+ Replay_Set_Pseudo_VTotal,
+};
+
union replay_error_status {
struct {
unsigned char STATE_TRANSITION_ERROR :1;
@@ -1029,26 +1048,52 @@ union replay_error_status {
};
struct replay_config {
- bool replay_supported; // Replay feature is supported
- unsigned int replay_power_opt_supported; // Power opt flags that are supported
- bool replay_smu_opt_supported; // SMU optimization is supported
- unsigned int replay_enable_option; // Replay enablement option
- uint32_t debug_flags; // Replay debug flags
- bool replay_timing_sync_supported; // Replay desync is supported
- bool force_disable_desync_error_check; // Replay desync is supported
- bool received_desync_error_hpd; //Replay Received Desync Error HPD.
- union replay_error_status replay_error_status; // Replay error status
-};
-
-/* Replay feature flags */
+ /* Replay feature is supported */
+ bool replay_supported;
+ /* Power opt flags that are supported */
+ unsigned int replay_power_opt_supported;
+ /* SMU optimization is supported */
+ bool replay_smu_opt_supported;
+ /* Replay enablement option */
+ unsigned int replay_enable_option;
+ /* Replay debug flags */
+ uint32_t debug_flags;
+ /* Replay sync is supported */
+ bool replay_timing_sync_supported;
+ /* Replay Disable desync error check. */
+ bool force_disable_desync_error_check;
+ /* Replay Received Desync Error HPD. */
+ bool received_desync_error_hpd;
+ /* Replay feature is supported long vblank */
+ bool replay_support_fast_resync_in_ultra_sleep_mode;
+ /* Replay error status */
+ union replay_error_status replay_error_status;
+};
+
+/* Replay feature flags*/
struct replay_settings {
- struct replay_config config; // Replay configuration
- bool replay_feature_enabled; // Replay feature is ready for activating
- bool replay_allow_active; // Replay is currently active
- unsigned int replay_power_opt_active; // Power opt flags that are activated currently
- bool replay_smu_opt_enable; // SMU optimization is enabled
- uint16_t coasting_vtotal; // Current Coasting vtotal
- uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table
+ /* Replay configuration */
+ struct replay_config config;
+ /* Replay feature is ready for activating */
+ bool replay_feature_enabled;
+ /* Replay is currently active */
+ bool replay_allow_active;
+ /* Replay is currently active */
+ bool replay_allow_long_vblank;
+ /* Power opt flags that are activated currently */
+ unsigned int replay_power_opt_active;
+ /* SMU optimization is enabled */
+ bool replay_smu_opt_enable;
+ /* Current Coasting vtotal */
+ uint32_t coasting_vtotal;
+ /* Coasting vtotal table */
+ uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ /* Maximum link off frame count */
+ enum replay_link_off_frame_count_level link_off_frame_count_level;
+ /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
+ uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+ /* Replay last pseudo vtotal set to DMUB */
+ uint16_t last_pseudo_vtotal;
};
/* To split out "global" and "per-panel" config settings.
@@ -1125,4 +1170,9 @@ enum dc_hpd_enable_select {
HPD_EN_FOR_SECONDARY_EDP_ONLY,
};
+enum mall_stream_type {
+ SUBVP_NONE, // subvp not in use
+ SUBVP_MAIN, // subvp in use, this stream is main stream
+ SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
+};
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 874b132fe..a60067763 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -135,7 +135,7 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
}
-static void dce_abm_init(struct abm *abm, uint32_t backlight)
+static void dce_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
@@ -162,7 +162,7 @@ static void dce_abm_init(struct abm *abm, uint32_t backlight)
BL1_PWM_TARGET_ABM_LEVEL, backlight);
REG_UPDATE(BL1_PWM_USER_LEVEL,
- BL1_PWM_USER_LEVEL, backlight);
+ BL1_PWM_USER_LEVEL, user_level);
REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 930fd929e..ccc154b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -57,18 +57,22 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst
return ret;
}
-static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight)
+static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
- dmub_abm_init(abm, backlight);
+ dmub_abm_init(abm, backlight, user_level);
}
static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm)
{
+ dc_allow_idle_optimizations(abm->ctx->dc, false);
+
return dmub_abm_get_current_backlight(abm);
}
static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm)
{
+ dc_allow_idle_optimizations(abm->ctx->dc, false);
+
return dmub_abm_get_target_backlight(abm);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 4cff36351..f9d6a1811 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -79,7 +79,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-void dmub_abm_init(struct abm *abm, uint32_t backlight)
+void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
@@ -106,7 +106,7 @@ void dmub_abm_init(struct abm *abm, uint32_t backlight)
BL1_PWM_TARGET_ABM_LEVEL, backlight);
REG_UPDATE(BL1_PWM_USER_LEVEL,
- BL1_PWM_USER_LEVEL, backlight);
+ BL1_PWM_USER_LEVEL, user_level);
REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
index 07ea6c8d4..761685e5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
@@ -30,7 +30,7 @@
struct abm_save_restore;
-void dmub_abm_init(struct abm *abm, uint32_t backlight);
+void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level);
bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
unsigned int dmub_abm_get_current_backlight(struct abm *abm);
unsigned int dmub_abm_get_target_backlight(struct abm *abm);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 28149e53c..38e4797e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -258,13 +258,97 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
*residency = 0;
}
+/**
+ * Set REPLAY power optimization flags and coasting vtotal.
+ */
+static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
+ unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type =
+ DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt;
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst;
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
+
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/**
+ * send Replay general cmd to DMUB.
+ */
+static void dmub_replay_send_cmd(struct dmub_replay *dmub,
+ enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *ctx = NULL;
+
+ if (dmub == NULL || cmd_element == NULL)
+ return;
+
+ ctx = dmub->ctx;
+ if (ctx != NULL) {
+
+ if (msg != Replay_Msg_Not_Support) {
+ memset(&cmd, 0, sizeof(cmd));
+ //Header
+ cmd.replay_set_timing_sync.header.type = DMUB_CMD__REPLAY;
+ } else
+ return;
+ } else
+ return;
+
+ switch (msg) {
+ case Replay_Set_Timing_Sync_Supported:
+ //Header
+ cmd.replay_set_timing_sync.header.sub_type =
+ DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
+ cmd.replay_set_timing_sync.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ //Cmd Body
+ cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
+ cmd_element->sync_data.panel_inst;
+ cmd.replay_set_timing_sync.replay_set_timing_sync_data.timing_sync_supported =
+ cmd_element->sync_data.timing_sync_supported;
+ break;
+ case Replay_Set_Residency_Frameupdate_Timer:
+ //Header
+ cmd.replay_set_frameupdate_timer.header.sub_type =
+ DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
+ cmd.replay_set_frameupdate_timer.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ //Cmd Body
+ cmd.replay_set_frameupdate_timer.data.panel_inst =
+ cmd_element->panel_inst;
+ cmd.replay_set_frameupdate_timer.data.enable =
+ cmd_element->timer_data.enable;
+ cmd.replay_set_frameupdate_timer.data.frameupdate_count =
+ cmd_element->timer_data.frameupdate_count;
+ break;
+ case Replay_Msg_Not_Support:
+ default:
+ return;
+ break;
+ }
+
+ dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
static const struct dmub_replay_funcs replay_funcs = {
- .replay_copy_settings = dmub_replay_copy_settings,
- .replay_enable = dmub_replay_enable,
- .replay_get_state = dmub_replay_get_state,
- .replay_set_power_opt = dmub_replay_set_power_opt,
- .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal,
- .replay_residency = dmub_replay_residency,
+ .replay_copy_settings = dmub_replay_copy_settings,
+ .replay_enable = dmub_replay_enable,
+ .replay_get_state = dmub_replay_get_state,
+ .replay_set_power_opt = dmub_replay_set_power_opt,
+ .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal,
+ .replay_residency = dmub_replay_residency,
+ .replay_set_power_opt_and_coasting_vtotal = dmub_replay_set_power_opt_and_coasting_vtotal,
+ .replay_send_cmd = dmub_replay_send_cmd,
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e8385bbf5..3613aff99 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -45,10 +45,14 @@ struct dmub_replay_funcs {
struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
uint8_t panel_inst);
+ void (*replay_send_cmd)(struct dmub_replay *dmub,
+ enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element);
void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal,
uint8_t panel_inst);
void (*replay_residency)(struct dmub_replay *dmub,
uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
+ void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
+ unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal);
};
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
deleted file mode 100644
index 0d2f6bbf7..000000000
--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# Copyright 2017 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-#
-# Makefile for the 'controller' sub-component of DAL.
-# It provides the control and status of HW CRTC block.
-
-CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init)
-
-DCE100 = dce100_resource.o
-
-AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCE100)
-
-
-###############################################################################
-# DCE 10x
-###############################################################################
-ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0
-TG_DCE100 = dce100_resource.o
-
-AMD_DAL_TG_DCE100 = $(addprefix \
- $(AMDDALPATH)/dc/dce100/,$(TG_DCE100))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100)
-endif
-
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index 695a50ed5..c307f040e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,11 +23,11 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init
DCE110 = dce110_timing_generator.o \
-dce110_compressor.o dce110_resource.o \
-dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
+dce110_compressor.o dce110_opp_regamma_v.o \
+dce110_opp_csc_v.o dce110_timing_generator_v.o \
dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o
AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110))
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index e846ef58c..683866797 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,10 +23,9 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init
-DCE112 = dce112_compressor.o \
-dce112_resource.o
+DCE112 = dce112_compressor.o
AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112))
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 097cf407a..8f508e662 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,9 +24,9 @@
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init
-DCE120 = dce120_resource.o dce120_timing_generator.o \
+DCE120 = dce120_timing_generator.o
AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index fee331acc..eede83ad9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 93dd68c31..fba189d26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,10 +23,9 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init
-DCE80 = dce80_timing_generator.o \
- dce80_resource.o
+DCE80 = dce80_timing_generator.o
AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 2d2007c3e..ae6a131be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -22,9 +22,9 @@
#
# Makefile for DCN.
-DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o \
+DCN10 = dcn10_ipp.o \
dcn10_hw_sequencer_debug.o \
- dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
+ dcn10_dpp.o dcn10_opp.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 92fdab731..9033b39e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -32,7 +32,7 @@
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
-#include "dcn10_optc.h"
+#include "dcn10/dcn10_optc.h"
#include "dcn10/dcn10_dpp.h"
#include "dcn10/dcn10_mpc.h"
#include "timing_generator.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 0dec57679..86bfed5de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -377,6 +377,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
+ .dpg_is_pending = NULL,
.opp_destroy = opp1_destroy
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index d7dc9696a..3dae3943b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,13 +2,11 @@
#
# Makefile for DCN.
-DCN20 = dcn20_resource.o dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
- dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
+DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+ dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
-DCN20 += dcn20_dsc.o
-
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN20)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index ab6d09c6f..ef5c22f41 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -291,7 +291,43 @@
type SYMCLKB_FE_SRC_SEL;\
type SYMCLKC_FE_SRC_SEL;\
type SYMCLKD_FE_SRC_SEL;\
- type SYMCLKE_FE_SRC_SEL;
+ type SYMCLKE_FE_SRC_SEL;\
+ type DTBCLK_P0_GATE_DISABLE;\
+ type DTBCLK_P1_GATE_DISABLE;\
+ type DTBCLK_P2_GATE_DISABLE;\
+ type DTBCLK_P3_GATE_DISABLE;\
+ type DSCCLK0_ROOT_GATE_DISABLE;\
+ type DSCCLK1_ROOT_GATE_DISABLE;\
+ type DSCCLK2_ROOT_GATE_DISABLE;\
+ type DSCCLK3_ROOT_GATE_DISABLE;\
+ type SYMCLKA_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKB_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKC_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKD_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKE_FE_ROOT_GATE_DISABLE;\
+ type DPPCLK0_ROOT_GATE_DISABLE;\
+ type DPPCLK1_ROOT_GATE_DISABLE;\
+ type DPPCLK2_ROOT_GATE_DISABLE;\
+ type DPPCLK3_ROOT_GATE_DISABLE;\
+ type HDMISTREAMCLK0_ROOT_GATE_DISABLE;\
+ type SYMCLKA_ROOT_GATE_DISABLE;\
+ type SYMCLKB_ROOT_GATE_DISABLE;\
+ type SYMCLKC_ROOT_GATE_DISABLE;\
+ type SYMCLKD_ROOT_GATE_DISABLE;\
+ type SYMCLKE_ROOT_GATE_DISABLE;\
+ type PHYA_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYB_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYC_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYD_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYE_REFCLK_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK0_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK1_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK2_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK3_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK0_GATE_DISABLE;\
+ type DPSTREAMCLK1_GATE_DISABLE;\
+ type DPSTREAMCLK2_GATE_DISABLE;\
+ type DPSTREAMCLK3_GATE_DISABLE;\
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 0784d0198..fbf1b6370 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
(double_buffer_pending == 0);
}
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ uint32_t double_buffer_pending;
+ uint32_t dpg_en;
+
+ REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
+
+ REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+ return (dpg_en == 1 && double_buffer_pending == 1);
+}
+
void opp2_program_left_edge_extra_pixel (
struct output_pixel_processor *opp,
bool count)
@@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 3ab221bdd..8f186abd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
+
void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
index 3a41a97b0..2b0b4f32e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: MIT
#
# Makefile for DCN.
-DCN201 = dcn201_init.o dcn201_resource.o \
- dcn201_hubbub.o\
- dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \
+DCN201 = dcn201_hubbub.o\
+ dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
dcn201_dccg.o dcn201_link_encoder.o
AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
index 8e77db46a..6a71ba3df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
@@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index ce1be0afa..ca92f5c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN21.
-DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \
+DCN21 = dcn21_hubp.o dcn21_hubbub.o \
dcn21_link_encoder.o dcn21_dccg.o
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index af4d2065d..b5b2aa3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -23,12 +23,9 @@
#
#
-DCN30 := \
- dcn30_init.o \
- dcn30_hubbub.o \
+DCN30 := dcn30_hubbub.o \
dcn30_hubp.o \
dcn30_dpp.o \
- dcn30_optc.o \
dcn30_dccg.o \
dcn30_mpc.o dcn30_vpg.o \
dcn30_afmt.o \
@@ -38,7 +35,6 @@ DCN30 := \
dcn30_dwb_cm.o \
dcn30_cm_common.o \
dcn30_mmhubbub.o \
- dcn30_resource.o \
dcn30_dio_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index e43f77c11..5f97a868a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -56,16 +56,13 @@ static void dpp3_enable_cm_block(
static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
{
- enum dc_lut_mode mode;
+ enum dc_lut_mode mode = LUT_BYPASS;
uint32_t state_mode;
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
- if (state_mode == 0)
- mode = LUT_BYPASS;
-
if (state_mode == 2) {//Programmable RAM LUT
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
if (lut_mode == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index 0d98918bf..1b9d9495f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -130,6 +130,28 @@ bool dwb3_disable(struct dwbc *dwbc)
return true;
}
+void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ unsigned int pre_locked;
+
+ REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked);
+
+ /* Lock DWB registers */
+ if (pre_locked == 0)
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1);
+
+ /* Disable FC */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable);
+
+ /* Unlock DWB registers */
+ if (pre_locked == 0)
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0);
+
+ DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst);
+}
+
+
bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
@@ -226,6 +248,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = {
.disable = dwb3_disable,
.update = dwb3_update,
.is_enabled = dwb3_is_enabled,
+ .set_fc_enable = dwb3_set_fc_enable,
.set_stereo = dwb3_set_stereo,
.set_new_content = dwb3_set_new_content,
.dwb_program_output_csc = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
index a5d1b81e7..332634b76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -877,6 +877,8 @@ bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params);
bool dwb3_is_enabled(struct dwbc *dwbc);
+void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable);
+
void dwb3_set_stereo(struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
index 701c7d8bc..03a50c32f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
@@ -243,6 +243,9 @@ static bool dwb3_program_ogam_lut(
return false;
}
+ if (params->hw_points_num == 0)
+ return false;
+
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
current_mode = dwb3_get_ogam_current(dwbc30);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 30fbc5e06..d241f665e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -10,9 +10,8 @@
#
# Makefile for dcn30.
-DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
- dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o \
- dcn301_optc.o
+DCN301 = dcn301_dccg.o \
+ dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
deleted file mode 100644
index 95b66baf3..000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
-#
-# Authors: AMD
-#
-# Makefile for dcn302.
-
-DCN3_02 = dcn302_init.o dcn302_resource.o
-
-AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
index d7b3ad780..a954e316a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
@@ -6,7 +6,7 @@
#
# Makefile for dcn303.
-DCN3_03 = dcn303_init.o dcn303_resource.o
+DCN3_03 = dcn303_init.o
AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index 96e45c9ef..5d93ac16c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -10,8 +10,8 @@
#
# Makefile for dcn31.
-DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \
- dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
+DCN31 = dcn31_hubbub.o dcn31_hubp.o \
+ dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
dcn31_afmt.o dcn31_vpg.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index 72456debb..b134ab05a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -10,8 +10,7 @@
#
# Makefile for dcn314.
-DCN314 = dcn314_resource.o dcn314_init.o \
- dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
+DCN314 = dcn314_dio_stream_encoder.o dcn314_dccg.o
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
deleted file mode 100644
index 59381d248..000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Copyright © 2021 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dcn315.
-
-DCN315 = dcn315_resource.o
-
-AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN315)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
deleted file mode 100644
index 819d44a94..000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Copyright 2021 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dcn316.
-
-DCN316 = dcn316_resource.o
-
-AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN316)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
index 8bb251307..5314770ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
@@ -10,10 +10,10 @@
#
# Makefile for dcn32.
-DCN32 = dcn32_resource.o dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \
- dcn32_dccg.o dcn32_optc.o dcn32_mmhubbub.o dcn32_hubp.o dcn32_dpp.o \
- dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_hpo_dp_link_encoder.o \
- dcn32_resource_helpers.o dcn32_mpc.o
+DCN32 = dcn32_hubbub.o dcn32_dccg.o \
+ dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
+ dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
+ dcn32_hpo_dp_link_encoder.o
AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index d761b0df2..8a0460e86 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -34,6 +34,7 @@
#include "dc_bios_types.h"
#include "link_enc_cfg.h"
+#include "dc_dmub_srv.h"
#include "gpio_service_interface.h"
#ifndef MIN
@@ -61,6 +62,38 @@
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
+static uint8_t phy_id_from_transmitter(enum transmitter t)
+{
+ uint8_t phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_id = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_id = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_id = 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_id = 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_id = 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ phy_id = 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ phy_id = 6;
+ break;
+ default:
+ phy_id = 0;
+ break;
+ }
+ return phy_id;
+}
void enc32_hw_init(struct link_encoder *enc)
{
@@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output(
}
}
-static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool query_dp_alt_from_dmub(struct link_encoder *enc,
+ union dmub_rb_cmd *cmd)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t dp_alt_mode_disable = 0;
- bool is_usb_c_alt_mode = false;
- if (enc->features.flags.bits.DP_IS_USB_C) {
- /* if value == 1 alt mode is disabled, otherwise it is enabled */
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
- }
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd->query_dp_alt.header.sub_type =
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
+ cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+ if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
- return is_usb_c_alt_mode;
+ return true;
}
-static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return false;
+
+ return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
+}
+
+void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t is_in_usb_c_dp4_mode = 0;
+ union dmub_rb_cmd cmd;
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
- /* in usb c dp2 mode, max lane count is 2 */
- if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- if (!is_in_usb_c_dp4_mode)
- link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
- }
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return;
+ if (cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
+
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -203,12 +248,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index bbcfce06b..2d5f25290 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,4 +53,9 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
+bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+
+void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 3279b6102..e408e859b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -71,12 +71,13 @@ void mpc32_power_on_blnd_lut(
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_DIS, power_on);
+
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) {
if (power_on) {
REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0);
REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5);
} else if (!mpc->ctx->dc->debug.disable_mem_low_power) {
- ASSERT(false);
/* TODO: change to mpc
* dpp_base->ctx->dc->optimized_required = true;
* dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 1f8942849..f98def6c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -24,10 +24,11 @@
*/
// header file of functions being implemented
-#include "dcn32_resource.h"
+#include "dcn32/dcn32_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dml/dcn32/display_mode_vba_util_32.h"
#include "dml/dcn32/dcn32_fpu.h"
+#include "dc_state_priv.h"
static bool is_dual_plane(enum surface_pixel_format format)
{
@@ -190,7 +191,7 @@ bool dcn32_subvp_in_use(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE)
+ if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
return true;
}
return false;
@@ -264,18 +265,17 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
// Do not override if a stream has multiple planes
for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count > 1) {
+ if (context->stream_status[i].plane_count > 1)
return;
- }
- if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
stream_count++;
- }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) {
if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
@@ -290,7 +290,7 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
if (pipe_segments[i] > 4)
pipe_segments[i] = 4;
@@ -337,14 +337,14 @@ void dcn32_determine_det_override(struct dc *dc,
for (i = 0; i < context->stream_count; i++) {
/* Don't count SubVP streams for DET allocation */
- if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
stream_count++;
}
if (stream_count > 0) {
stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM)
continue;
if (context->stream_status[i].plane_count > 0)
@@ -430,71 +430,6 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
dcn32_determine_det_override(dc, context, pipes);
}
-/**
- * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases
- *
- * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
- * there are situations where a shallow copy of the dc->current_state is created for the
- * validation. In this case we want to save and restore the mall config because we always
- * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
- * fast validation). If we don't restore the subvp config in cases of fast validation +
- * shallow copy of the dc->current_state, the dc->current_state will have a partially
- * removed subvp state when we did not intend to remove it.
- *
- * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
- * validation. We don't expect this to happen in fast_validation=1 cases.
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed
- * @temp_config: struct used to cache the existing MALL state
- *
- * Return: void
- */
-void dcn32_save_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config)
-{
- uint32_t i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream)
- temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
-
- if (pipe->plane_state)
- temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
- }
-}
-
-/**
- * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases
- *
- * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed, restore MALL state into here
- * @temp_config: struct that has the cached MALL state
- *
- * Return: void
- */
-void dcn32_restore_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config)
-{
- uint32_t i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream)
- pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
-
- if (pipe->plane_state)
- pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
- }
-}
-
#define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
/*
* Scaling factor for v_blank stretch calculations considering timing in
@@ -589,13 +524,14 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
*
* Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
*/
-struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context)
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
{
int refresh_rate = 0;
const int minimum_refreshrate_supported = 120;
struct dc_stream_state *fpo_candidate_stream = NULL;
bool is_fpo_vactive = false;
uint32_t fpo_vactive_margin_us = 0;
+ struct dc_stream_status *fpo_stream_status = NULL;
if (context == NULL)
return NULL;
@@ -618,16 +554,28 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
DC_FP_START();
dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
DC_FP_END();
-
+ if (fpo_candidate_stream)
+ fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
DC_FP_START();
is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
DC_FP_END();
if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
return NULL;
- } else
+ } else {
fpo_candidate_stream = context->streams[0];
+ if (fpo_candidate_stream)
+ fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
+ }
- if (!fpo_candidate_stream)
+ /* In DCN32/321, FPO uses per-pipe P-State force.
+ * If there's no planes, HUBP is power gated and
+ * therefore programming UCLK_PSTATE_FORCE does
+ * nothing (P-State will always be asserted naturally
+ * on a pipe that has HUBP power gated. Therefore we
+ * only want to enable FPO if the FPO pipe has both
+ * a stream and a plane.
+ */
+ if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0)
return NULL;
if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
@@ -666,6 +614,30 @@ bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int widt
}
/**
+ * disallow_subvp_in_active_plus_blank() - Function to determine disallowed subvp + drr/vblank configs
+ *
+ * @pipe: subvp pipe to be used for the subvp + drr/vblank config
+ *
+ * Since subvp is being enabled on more configs (such as 1080p60), we want
+ * to explicitly block any configs that we don't want to enable. We do not
+ * want to enable any 1080p60 (SubVP) + drr / vblank configs since these
+ * are already convered by FPO.
+ *
+ * Return: True if disallowed, false otherwise
+ */
+static bool disallow_subvp_in_active_plus_blank(struct pipe_ctx *pipe)
+{
+ bool disallow = false;
+
+ if (resource_is_pipe_type(pipe, OPP_HEAD) &&
+ resource_is_pipe_type(pipe, DPP_PIPE)) {
+ if (pipe->stream->timing.v_addressable == 1080 && pipe->stream->timing.h_addressable == 1920)
+ disallow = true;
+ }
+ return disallow;
+}
+
+/**
* dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible
*
* @dc: Current DC state
@@ -688,21 +660,24 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
bool drr_pipe_found = false;
bool drr_psr_capable = false;
uint64_t refresh_rate = 0;
+ bool subvp_disallow = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
resource_is_pipe_type(pipe, DPP_PIPE)) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_mall_type == SUBVP_MAIN) {
subvp_count++;
+ subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE) {
non_subvp_pipes++;
drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
if (pipe->stream->ignore_msa_timing_param &&
@@ -713,7 +688,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
}
}
- if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
+ if (subvp_count == 1 && !subvp_disallow && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
((uint32_t)refresh_rate < 120))
result = true;
@@ -746,21 +721,24 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
bool vblank_psr_capable = false;
uint64_t refresh_rate = 0;
+ bool subvp_disallow = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
resource_is_pipe_type(pipe, DPP_PIPE)) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_mall_type == SUBVP_MAIN) {
subvp_count++;
+ subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE) {
non_subvp_pipes++;
vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
if (pipe->stream->ignore_msa_timing_param &&
@@ -772,7 +750,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
}
if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
- ((uint32_t)refresh_rate < 120) &&
+ ((uint32_t)refresh_rate < 120) && !subvp_disallow &&
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
result = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile
index 0a199c83b..c195c47f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile
@@ -10,7 +10,7 @@
#
# Makefile for dcn321.
-DCN321 = dcn321_resource.o dcn321_dio_link_encoder.o
+DCN321 = dcn321_dio_link_encoder.o
AMD_DAL_DCN321 = $(addprefix $(AMDDALPATH)/dc/dcn321/,$(DCN321))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
index 20d0eef1a..0e317e0c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
@@ -10,9 +10,9 @@
#
# Makefile for DCN35.
-DCN35 = dcn35_resource.o dcn35_init.o dcn35_dio_stream_encoder.o \
- dcn35_dio_link_encoder.o dcn35_dccg.o dcn35_optc.o \
- dcn35_dsc.o dcn35_hubp.o dcn35_hubbub.o \
+DCN35 = dcn35_dio_stream_encoder.o \
+ dcn35_dio_link_encoder.o dcn35_dccg.o \
+ dcn35_hubp.o dcn35_hubbub.o \
dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
index 479f3683c..f1ba7bb79 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
@@ -256,6 +256,21 @@ static void dccg35_set_dtbclk_dto(
if (params->ref_dtbclk_khz && req_dtbclk_khz) {
uint32_t modulo, phase;
+ switch (params->otg_inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 1);
+ break;
+ }
+
// phase / modulo = dtbclk / dtbclk ref
modulo = params->ref_dtbclk_khz * 1000;
phase = req_dtbclk_khz * 1000;
@@ -280,6 +295,21 @@ static void dccg35_set_dtbclk_dto(
REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
PIPE_DTO_SRC_SEL[params->otg_inst], 2);
} else {
+ switch (params->otg_inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 0);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 0);
+ break;
+ }
+
REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
DTBCLK_DTO_ENABLE[params->otg_inst], 0,
PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
@@ -476,6 +506,64 @@ static void dccg35_dpp_root_clock_control(
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
}
+static void dccg35_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, 0,
+ SYMCLK32_SE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
+ }
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, 0,
+ SYMCLK32_SE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
+ }
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, 0,
+ SYMCLK32_SE2_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
+ }
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, 0,
+ SYMCLK32_SE3_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
+ }
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -484,7 +572,7 @@ void dccg35_init(struct dccg *dccg)
* will cause DCN to hang.
*/
for (otg_inst = 0; otg_inst < 4; otg_inst++)
- dccg31_disable_symclk32_se(dccg, otg_inst);
+ dccg35_disable_symclk32_se(dccg, otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
for (otg_inst = 0; otg_inst < 2; otg_inst++)
@@ -758,7 +846,7 @@ static const struct dccg_funcs dccg35_funcs = {
.dccg_init = dccg35_init,
.set_dpstreamclk = dccg35_set_dpstreamclk,
.enable_symclk32_se = dccg31_enable_symclk32_se,
- .disable_symclk32_se = dccg31_disable_symclk32_se,
+ .disable_symclk32_se = dccg35_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
.disable_symclk32_le = dccg31_disable_symclk32_le,
.set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
index 423feb4c2..1586a45ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
@@ -34,6 +34,8 @@
#define DCCG_REG_LIST_DCN35() \
DCCG_REG_LIST_DCN314(),\
SR(DPPCLK_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL4),\
+ SR(DCCG_GATE_DISABLE_CNTL5),\
SR(DCCG_GATE_DISABLE_CNTL6),\
SR(DCCG_GLOBAL_FGCG_REP_CNTL),\
SR(SYMCLKA_CLOCK_ENABLE),\
@@ -174,7 +176,61 @@
DCCG_SF(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, mask_sh),\
- DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh)
+ DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYE_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL, DISPCLK_DCCG_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
struct dccg *dccg35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index 81e349d58..da94e5309 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
index d19db8e9b..53bd0ae4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
@@ -342,13 +342,6 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
pg_cntl->pg_res_enable[PG_DCIO] = power_on;
}
-void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on)
-{
- struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
-
- REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, power_on ? 1 : 0);
-}
-
static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl)
{
struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
@@ -518,8 +511,7 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = {
.mpcc_pg_control = pg_cntl35_mpcc_pg_control,
.opp_pg_control = pg_cntl35_opp_pg_control,
.optc_pg_control = pg_cntl35_optc_pg_control,
- .dwb_pg_control = pg_cntl35_dwb_pg_control,
- .set_force_poweron_domain22 = pg_cntl35_set_force_poweron_domain22
+ .dwb_pg_control = pg_cntl35_dwb_pg_control
};
struct pg_cntl *pg_cntl35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h
index 069dae08e..3de240884 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h
@@ -183,7 +183,6 @@ void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl,
unsigned int optc_inst, bool power_on);
void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on);
void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl);
-void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on);
struct pg_cntl *pg_cntl35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 7ce9a5b6c..6d7a15dcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -103,10 +103,16 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
/*
* Sends ALLOCATE_PAYLOAD message.
*/
-bool dm_helpers_dp_mst_send_payload_allocation(
+void dm_helpers_dp_mst_send_payload_allocation(
struct dc_context *ctx,
- const struct dc_stream_state *stream,
- bool enable);
+ const struct dc_stream_state *stream);
+
+/*
+ * Update mst manager relevant variables
+ */
+void dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
bool dm_helpers_dp_mst_start_top_mgr(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 4440d0874..bd7ba0a25 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -247,6 +247,7 @@ struct pp_smu_funcs_nv {
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
#define PP_SMU_NUM_DCLK_DPM_LEVELS 8
#define PP_SMU_NUM_VCLK_DPM_LEVELS 8
+#define PP_SMU_NUM_VPECLK_DPM_LEVELS 8
struct dpm_clock {
uint32_t Freq; // In MHz
@@ -262,6 +263,7 @@ struct dpm_clocks {
struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS];
struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS];
struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS];
+ struct dpm_clock VPEClocks[PP_SMU_NUM_VPECLK_DPM_LEVELS];
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index 50b043435..0c4a8fe8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -30,7 +30,7 @@
#include "dcn_calc_auto.h"
#include "dal_asic_id.h"
#include "resource.h"
-#include "dcn10/dcn10_resource.h"
+#include "resource/dcn10/dcn10_resource.h"
#include "dcn10/dcn10_hubbub.h"
#include "dml/dml1_display_rq_dlg_calc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index d2271e308..38ab9ad60 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -33,6 +33,7 @@
#include "link.h"
#include "dcn20_fpu.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc->ctx->logger
@@ -1182,7 +1183,7 @@ void dcn20_calculate_dlg_params(struct dc *dc,
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[i].unbounded_req = false;
@@ -1532,7 +1533,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
*/
if (res_ctx->pipe_ctx[i].plane_state &&
(res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
- res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM))
+ dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM))
pipes[pipe_cnt].pipe.src.num_cursors = 0;
else
pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 3686f1e7d..63c48c29b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3542,7 +3542,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
struct vba_vars_st *v = &mode_lib->vba;
int MinPrefetchMode, MaxPrefetchMode;
- int i;
+ int i, start_state;
unsigned int j, k, m;
bool EnoughWritebackUnits = true;
bool WritebackModeSupport = true;
@@ -3553,6 +3553,11 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+ if (mode_lib->validate_max_state)
+ start_state = v->soc.num_states - 1;
+ else
+ start_state = 0;
+
CalculateMinAndMaxPrefetchMode(
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
&MinPrefetchMode, &MaxPrefetchMode);
@@ -3851,7 +3856,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SingleDPPViewportSizeSupportPerPlane,
&v->ViewportSizeSupport[0][0]);
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed);
@@ -4007,7 +4012,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*Total Available Pipes Support Check*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) {
v->TotalAvailablePipesSupport[i][j] = true;
@@ -4046,7 +4051,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
@@ -4174,7 +4179,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
v->DIOSupport[i] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
@@ -4185,7 +4190,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
v->ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
@@ -4197,7 +4202,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
v->NotEnoughDSCUnits[i] = false;
v->TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -4217,7 +4222,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*DSC Delay per state*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->OutputBppPerState[i][k] == BPP_INVALID) {
v->BPP = 0.0;
@@ -4333,7 +4338,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
@@ -5075,7 +5080,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*PTE Buffer Size Check*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -5136,7 +5141,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*Mode Support, Voltage State and SOC Configuration*/
- for (i = v->soc.num_states - 1; i >= 0; i--) {
+ for (i = v->soc.num_states - 1; i >= start_state; i--) {
for (j = 0; j < 2; j++) {
if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
&& v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
@@ -5158,7 +5163,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
{
unsigned int MaximumMPCCombine = 0;
- for (i = v->soc.num_states; i >= 0; i--) {
+ for (i = v->soc.num_states; i >= start_state; i--) {
if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) {
v->VoltageLevel = i;
v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index b315ca6f1..ba76dd4a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -32,6 +32,7 @@
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -45,6 +46,14 @@ static const struct subvp_high_refresh_list subvp_high_refresh_list = {
{.width = 1920, .height = 1080, }},
};
+static const struct subvp_active_margin_list subvp_active_margin_list = {
+ .min_refresh = 55,
+ .max_refresh = 65,
+ .res = {
+ {.width = 2560, .height = 1440, },
+ {.width = 1920, .height = 1080, }},
+};
+
struct _vcs_dpi_ip_params_st dcn3_2_ip = {
.gpuvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
@@ -333,7 +342,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
if (!pipe->stream)
continue;
- if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
pipes[pipe_idx].pipe.dest.vstartup_start =
get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset =
@@ -616,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
!pipe->plane_state->address.tmz_surface &&
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
@@ -674,7 +683,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
// Find the minimum pipe split count for non SubVP pipes
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
split_cnt++;
@@ -727,8 +736,8 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- phantom = pipe->stream->mall_stream_config.paired_stream;
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
phantom->timing.v_addressable;
@@ -796,6 +805,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
int16_t stretched_drr_us = 0;
int16_t drr_stretched_vblank_us = 0;
int16_t max_vblank_mallregion = 0;
+ struct dc_stream_state *phantom_stream;
+ bool subvp_found = false;
+ bool drr_found = false;
// Find SubVP pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -808,8 +820,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ subvp_found = true;
break;
+ }
}
// Find the DRR pipe
@@ -817,32 +831,37 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
drr_pipe = &context->res_ctx.pipe_ctx[i];
// We check for master pipe only
- if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
- !resource_is_pipe_type(pipe, DPP_PIPE))
+ if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(drr_pipe, DPP_PIPE))
continue;
- if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
- (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed))
+ if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
+ (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) {
+ drr_found = true;
break;
+ }
}
- main_timing = &pipe->stream->timing;
- phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
- drr_timing = &drr_pipe->stream->timing;
- prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
- (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
- dc->caps.subvp_prefetch_end_to_mall_start_us;
- subvp_active_us = main_timing->v_addressable * main_timing->h_total /
- (double)(main_timing->pix_clk_100hz * 100) * 1000000;
- drr_frame_us = drr_timing->v_total * drr_timing->h_total /
- (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
- // P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
- (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
- stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
- drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
- (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
- max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
+ if (subvp_found && drr_found) {
+ phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
+ main_timing = &pipe->stream->timing;
+ phantom_timing = &phantom_stream->timing;
+ drr_timing = &drr_pipe->stream->timing;
+ prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
+ (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
+ dc->caps.subvp_prefetch_end_to_mall_start_us;
+ subvp_active_us = main_timing->v_addressable * main_timing->h_total /
+ (double)(main_timing->pix_clk_100hz * 100) * 1000000;
+ drr_frame_us = drr_timing->v_total * drr_timing->h_total /
+ (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
+ // P-State allow width and FW delays already included phantom_timing->v_addressable
+ mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
+ (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
+ stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
+ drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
+ (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
+ max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
+ }
/* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
* highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
@@ -887,6 +906,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *vblank_timing = NULL;
+ struct dc_stream_state *phantom_stream;
+ enum mall_stream_type pipe_mall_type;
/* For SubVP + VBLANK/DRR cases, we assume there can only be
* a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
@@ -896,6 +917,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
@@ -903,18 +925,19 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
!resource_is_pipe_type(pipe, DPP_PIPE))
continue;
- if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (!found && pipe_mall_type == SUBVP_NONE) {
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
vblank_index = i;
found = true;
}
- if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
if (found) {
+ phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
main_timing = &subvp_pipe->stream->timing;
- phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
// Also include the prefetch end to mallstart delay time
@@ -969,7 +992,7 @@ static bool subvp_subvp_admissable(struct dc *dc,
continue;
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
@@ -1018,23 +1041,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
if (pipe->plane_state && !pipe->top_pipe) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (pipe_mall_type == SUBVP_MAIN)
subvp_count++;
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE)
non_subvp_pipes++;
- }
}
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
// switching (SubVP + VACTIVE unsupported). In situations where we force
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
vactive_count++;
}
pipe_idx++;
@@ -1070,7 +1093,7 @@ static void assign_subvp_index(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
pipe_ctx->subvp_index = index++;
} else {
pipe_ctx->subvp_index = 0;
@@ -1414,6 +1437,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
unsigned int dc_pipe_idx = 0;
int i = 0;
bool found_supported_config = false;
+ int vlevel_temp = 0;
dc_assert_fp_enabled();
@@ -1446,13 +1470,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
*/
if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
!dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) &&
- (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ (*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] &&
+ vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
memset(merge, 0, MAX_PIPES * sizeof(bool));
+ vlevel_temp = *vlevel;
/* to re-initialize viewport after the pipe merge */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1521,10 +1547,14 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
}
}
+ if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp])
+ found_supported_config = false;
+
// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
// remove phantom pipes and repopulate dml pipes
if (!found_supported_config) {
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
@@ -1676,7 +1706,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[i].unbounded_req = false;
@@ -1708,7 +1738,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
/* SS: all active surfaces stored in MALL */
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) {
context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
@@ -1922,7 +1952,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
return false;
// For each full update, remove all existing phantom pipes first
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
@@ -2729,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
struct _vcs_dpi_voltage_scaling_st entry = {0};
struct clk_limit_table_entry max_clk_data = {0};
- unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
+ unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599;
static const unsigned int num_dcfclk_stas = 5;
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
@@ -3305,25 +3336,24 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
{
bool allow = false;
uint32_t refresh_rate = 0;
+ uint32_t min_refresh = subvp_active_margin_list.min_refresh;
+ uint32_t max_refresh = subvp_active_margin_list.max_refresh;
+ uint32_t i;
- /* Allow subvp on displays that have active margin for 2560x1440@60hz displays
- * only for now. There must be no scaling as well.
- *
- * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
- * for p-state switching.
- */
- if (pipe->stream && pipe->plane_state) {
- refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
- / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
- if (pipe->stream->timing.v_addressable == 1440 &&
- pipe->stream->timing.h_addressable == 2560 &&
- refresh_rate >= 55 && refresh_rate <= 65 &&
- pipe->plane_state->src_rect.height == 1440 &&
- pipe->plane_state->src_rect.width == 2560 &&
- pipe->plane_state->dst_rect.height == 1440 &&
- pipe->plane_state->dst_rect.width == 2560)
+ for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) {
+ uint32_t width = subvp_active_margin_list.res[i].width;
+ uint32_t height = subvp_active_margin_list.res[i].height;
+
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+
+ if (refresh_rate >= min_refresh && refresh_rate <= max_refresh &&
+ dcn32_check_native_scaling_for_res(pipe, width, height)) {
allow = true;
+ break;
+ }
}
return allow;
}
@@ -3442,7 +3472,15 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->stream)
+ /* In DCN32/321, FPO uses per-pipe P-State force.
+ * If there's no planes, HUBP is power gated and
+ * therefore programming UCLK_PSTATE_FORCE does
+ * nothing (P-State will always be asserted naturally
+ * on a pipe that has HUBP power gated. Therefore we
+ * only want to enable FPO if the FPO pipe has both
+ * a stream and a plane.
+ */
+ if (!pipe->stream || !pipe->plane_state)
continue;
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index f154a3eb1..7ea2bd537 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -164,11 +164,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
- .sr_exit_z8_time_us = 525.0,
- .sr_enter_plus_exit_z8_time_us = 715.0,
- .fclk_change_latency_us = 20.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .sr_exit_z8_time_us = 210.0,
+ .sr_enter_plus_exit_z8_time_us = 320.0,
+ .fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -326,6 +326,25 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
dcn3_5_soc.dram_clock_change_latency_us =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns > 0)
+ dcn3_5_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_time_ns > 0)
+ dcn3_5_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0)
+ dcn3_5_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_z8_time_ns > 0)
+ dcn3_5_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0)
+ dcn3_5_soc.sr_enter_plus_exit_z8_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+
/*temp till dml2 fully work without dml1*/
dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip,
DML_PROJECT_DCN31);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 1a2b24cc6..0baf39d64 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -772,18 +772,29 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
const struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
- const struct dc_stream_status *status, unsigned int stream_id,
+ const struct dc_stream_status *status,
+ const struct dc_stream_state *stream,
int plane_idx)
{
unsigned int plane_id;
unsigned int cfg_idx;
+ unsigned int mpc_factor;
- get_plane_id(ctx, state, status->plane_states[plane_idx], stream_id, plane_idx, &plane_id);
+ get_plane_id(ctx, state, status->plane_states[plane_idx],
+ stream->stream_id, plane_idx, &plane_id);
cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
- if (ctx->architecture == dml2_architecture_20)
- return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
- ASSERT(false);
- return 1;
+ if (ctx->architecture == dml2_architecture_20) {
+ mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
+ } else {
+ mpc_factor = 1;
+ ASSERT(false);
+ }
+
+ /* For stereo timings, we need to pipe split */
+ if (dml2_is_stereo_timing(stream))
+ mpc_factor = 2;
+
+ return mpc_factor;
}
static unsigned int get_odm_factor(
@@ -820,14 +831,13 @@ static void populate_mpc_factors_for_stream(
unsigned int mpc_factors[MAX_PIPES])
{
const struct dc_stream_status *status = &state->stream_status[stream_idx];
- unsigned int stream_id = state->streams[stream_idx]->stream_id;
int i;
for (i = 0; i < status->plane_count; i++)
if (odm_factor == 1)
mpc_factors[i] = get_mpc_factor(
ctx, state, disp_cfg, mapping, status,
- stream_id, i);
+ state->streams[stream_idx], i);
else
mpc_factors[i] = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
index e85866db8..7ca7f2a74 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
@@ -38,5 +38,6 @@
#include "core_types.h"
#include "dsc.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#endif //__DML2_DC_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 32f8a43af..282d70e2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -51,7 +51,7 @@ unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx,
// Find the phantom pipes
if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
mblk_width = ctx->config.mall_cfg.mblk_width_pixels;
mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels;
@@ -253,7 +253,7 @@ static bool assign_subvp_pipe(struct dml2_context *ctx, struct dc_state *context
* to combine this with SubVP can cause issues with the scheduling).
*/
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 &&
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_NONE && refresh_rate < 120 &&
vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
num_pipes++;
@@ -317,7 +317,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st
// Find the minimum pipe split count for non SubVP pipes
if (pipe->stream && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
split_cnt++;
@@ -372,8 +372,8 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- phantom = pipe->stream->mall_stream_config.paired_stream;
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ phantom = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
phantom->timing.v_addressable;
@@ -435,6 +435,7 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
struct pipe_ctx *pipe = NULL;
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
+ struct dc_stream_state *phantom_stream;
int16_t prefetch_us = 0;
int16_t mall_region_us = 0;
int16_t drr_frame_us = 0; // nominal frame time
@@ -453,12 +454,13 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
break;
}
+ phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
main_timing = &pipe->stream->timing;
- phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us;
@@ -519,6 +521,8 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *vblank_timing = NULL;
+ struct dc_stream_state *phantom_stream;
+ enum mall_stream_type pipe_mall_type;
/* For SubVP + VBLANK/DRR cases, we assume there can only be
* a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
@@ -528,19 +532,20 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
*/
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe);
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (!found && pipe_mall_type == SUBVP_NONE) {
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
vblank_index = i;
found = true;
}
- if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
// Use ignore_msa_timing_param flag to identify as DRR
@@ -548,8 +553,9 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
// SUBVP + DRR case
schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing);
} else if (found) {
+ phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, subvp_pipe->stream);
main_timing = &subvp_pipe->stream->timing;
- phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
// Also include the prefetch end to mallstart delay time
@@ -602,19 +608,20 @@ bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc
for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ pipe_mall_type == SUBVP_MAIN)
subvp_count++;
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
// switching (SubVP + VACTIVE unsupported). In situations where we force
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
vactive_count++;
}
pipe_idx++;
@@ -708,14 +715,10 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup)
{
struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx];
- struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink);
-
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+ struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_phantom_stream(
+ ctx->config.svp_pstate.callbacks.dc,
+ state,
+ ref_pipe->stream);
/* stream has limited viewport and small timing */
memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -723,7 +726,10 @@ static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, s
memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup);
- ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
+ ctx->config.svp_pstate.callbacks.add_phantom_stream(ctx->config.svp_pstate.callbacks.dc,
+ state,
+ phantom_stream,
+ ref_pipe->stream);
return phantom_stream;
}
@@ -740,7 +746,10 @@ static void enable_phantom_plane(struct dml2_context *ctx,
if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) {
phantom_plane = prev_phantom_plane;
} else {
- phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc);
+ phantom_plane = ctx->config.svp_pstate.callbacks.create_phantom_plane(
+ ctx->config.svp_pstate.callbacks.dc,
+ state,
+ curr_pipe->plane_state);
}
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
@@ -763,9 +772,7 @@ static void enable_phantom_plane(struct dml2_context *ctx,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
- phantom_plane->is_phantom = true;
-
- ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
+ ctx->config.svp_pstate.callbacks.add_phantom_plane(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
curr_pipe = curr_pipe->bottom_pipe;
prev_phantom_plane = phantom_plane;
@@ -790,7 +797,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_
// We determine which phantom pipes were added by comparing with
// the phantom stream.
if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
pipe->stream->use_dynamic_meta = false;
pipe->plane_state->flip_immediate = false;
if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) {
@@ -800,7 +807,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_
}
}
-static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
+static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
@@ -821,9 +828,11 @@ static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_str
for (i = 0; i < old_plane_count; i++)
del_planes[i] = stream_status->plane_states[i];
- for (i = 0; i < old_plane_count; i++)
- if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
+ for (i = 0; i < old_plane_count; i++) {
+ if (!ctx->config.svp_pstate.callbacks.remove_phantom_plane(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
return false;
+ ctx->config.svp_pstate.callbacks.release_phantom_plane(ctx->config.svp_pstate.callbacks.dc, context, del_planes[i]);
+ }
return true;
}
@@ -832,35 +841,21 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
{
int i;
bool removed_pipe = false;
- struct dc_plane_state *phantom_plane = NULL;
struct dc_stream_state *phantom_stream = NULL;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
// build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
+ if (pipe->plane_state && pipe->stream && ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
phantom_stream = pipe->stream;
- remove_all_planes_for_stream(ctx, pipe->stream, state);
- ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream);
-
- /* Ref count is incremented on allocation and also when added to the context.
- * Therefore we must call release for the the phantom plane and stream once
- * they are removed from the ctx to finally decrement the refcount to 0 to free.
- */
- ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane);
- ctx->config.svp_pstate.callbacks.stream_release(phantom_stream);
+ remove_all_phantom_planes_for_stream(ctx, phantom_stream, state);
+ ctx->config.svp_pstate.callbacks.remove_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
+ ctx->config.svp_pstate.callbacks.release_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
removed_pipe = true;
}
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
-
if (pipe->plane_state) {
pipe->plane_state->is_phantom = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index b6744ad77..a20f28a5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -1055,8 +1055,10 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
- int i = 0, j = 0;
+ int i = 0, j = 0, k = 0;
int disp_cfg_stream_location, disp_cfg_plane_location;
+ enum mall_stream_type stream_mall_type;
+ struct pipe_ctx *current_pipe_context;
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id_valid[i] = false;
@@ -1076,7 +1078,17 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
dml2_populate_pipe_to_plane_index_mapping(dml2, context);
for (i = 0; i < context->stream_count; i++) {
+ current_pipe_context = NULL;
+ for (k = 0; k < MAX_PIPES; k++) {
+ /* find one pipe allocated to this stream for the purpose of getting
+ info about the link later */
+ if (context->streams[i] == context->res_ctx.pipe_ctx[k].stream) {
+ current_pipe_context = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+ }
disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg);
+ stream_mall_type = dc_state_get_stream_subvp_type(context, context->streams[i]);
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_timings++;
@@ -1084,7 +1096,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], &context->res_ctx.pipe_ctx[i]);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1;
@@ -1121,10 +1133,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context);
- if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) {
+ if (stream_mall_type == SUBVP_MAIN) {
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_optimize;
- } else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) {
+ } else if (stream_mall_type == SUBVP_PHANTOM) {
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe;
dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_disable;
dml2->v20.dml_core_ctx.policy.ImmediateFlipRequirement[disp_cfg_plane_location] = dml_immediate_flip_not_required;
@@ -1141,7 +1153,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (j >= 1) {
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], &context->res_ctx.pipe_ctx[i]);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1;
@@ -1153,9 +1165,9 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
break;
}
- if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN)
+ if (stream_mall_type == SUBVP_MAIN)
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
- else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ else if (stream_mall_type == SUBVP_PHANTOM)
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe;
dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[disp_cfg_plane_location] = context->streams[i]->stream_id;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index d6a684841..1068b962d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -155,8 +155,12 @@ unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, e
bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
{
+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+ return false;
+
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
+
/* Count MST hubs once by treating only 1st remote sink in topology as an encoder */
if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) {
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
@@ -283,6 +287,7 @@ static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, str
void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt)
{
unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id;
+ enum mall_stream_type pipe_mall_type;
bool unbounded_req_enabled = false;
struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch;
@@ -330,7 +335,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
*/
populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx);
- if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[dc_pipe_ctx_index]);
+ if (pipe_mall_type == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false;
@@ -357,7 +363,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) &&
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) {
/* SS: all active surfaces stored in MALL */
- if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_mall_type != SUBVP_PHANTOM) {
context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes;
} else {
/* SUBVP: phantom surfaces only stored in MALL */
@@ -476,7 +482,7 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc
return need_recalculation;
}
-bool dml2_is_stereo_timing(struct dc_stream_state *stream)
+bool dml2_is_stereo_timing(const struct dc_stream_state *stream)
{
bool is_stereo = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
index 23b902833..5842d6d3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
@@ -42,7 +42,7 @@ void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_st
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx);
int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
bool is_dtbclk_required(const struct dc *dc, struct dc_state *context);
-bool dml2_is_stereo_timing(struct dc_stream_state *stream);
+bool dml2_is_stereo_timing(const struct dc_stream_state *stream);
/*
* dml2_dc_construct_pipes - This function will determine if we need additional pipes based
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 269bfb14c..72cca3670 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -423,7 +423,7 @@ static int find_drr_eligible_stream(struct dc_state *display_state)
int i;
for (i = 0; i < display_state->stream_count; i++) {
- if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE
+ if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
&& display_state->streams[i]->ignore_msa_timing_param) {
// Use ignore_msa_timing_param flag to identify as DRR
return i;
@@ -639,6 +639,8 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
+ //copy for deciding zstate use
+ context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
}
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 548504d7d..cc662d682 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -93,15 +93,34 @@ struct dml2_dc_callbacks {
struct dml2_dc_svp_callbacks {
struct dc *dc;
bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
- struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data);
- struct dc_plane_state* (*create_plane)(struct dc *dc);
- enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
- bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
- bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
- enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream);
- void (*plane_state_release)(struct dc_plane_state *plane_state);
- void (*stream_release)(struct dc_stream_state *stream);
+ struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream);
+ struct dc_plane_state* (*create_phantom_plane)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane);
+ enum dc_status (*add_phantom_stream)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream);
+ bool (*add_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
+ bool (*remove_phantom_plane)(const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+ enum dc_status (*remove_phantom_stream)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+ void (*release_phantom_plane)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *plane);
+ void (*release_phantom_stream)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc);
+ enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx);
+ enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream);
+ struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream);
};
struct dml2_clks_table_entry {
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index a2537229e..b183ba5a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,8 +1,34 @@
# SPDX-License-Identifier: MIT
#
# Makefile for the 'dsc' sub-component of DAL.
+
+ifdef CONFIG_DRM_AMD_DC_FP
+
+###############################################################################
+# DCN20
+###############################################################################
+DSC_DCN20 = dcn20_dsc.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn20/,$(DSC_DCN20))
+
+
+
+
+###############################################################################
+# DCN35
+###############################################################################
+
+DSC_DCN35 = dcn35_dsc.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn35/,$(DSC_DCN35))
+
+
+
+endif
+
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC))
AMD_DISPLAY_FILES += $(AMD_DAL_DSC)
+
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index e8b5f17be..0df6c55eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -331,8 +331,9 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
int buff_block_size;
int buff_size;
- if (!dsc_buff_block_size_from_dpcd(dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT],
- &buff_block_size))
+ if (!dsc_buff_block_size_from_dpcd(
+ dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] & 0x03,
+ &buff_block_size))
return false;
buff_size = dpcd_dsc_basic_data[DP_DSC_RC_BUF_SIZE - DP_DSC_SUPPORT] + 1;
@@ -357,10 +358,15 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
{
int dpcd_throughput = dpcd_dsc_basic_data[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT];
+ int dsc_throughput_granular_delta;
+
+ dsc_throughput_granular_delta = dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] >> 3;
+ dsc_throughput_granular_delta *= 2;
if (!dsc_throughput_from_dpcd(dpcd_throughput & DP_DSC_THROUGHPUT_MODE_0_MASK,
&dsc_sink_caps->throughput_mode_0_mps))
return false;
+ dsc_sink_caps->throughput_mode_0_mps += dsc_throughput_granular_delta;
dpcd_throughput = (dpcd_throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> DP_DSC_THROUGHPUT_MODE_1_SHIFT;
if (!dsc_throughput_from_dpcd(dpcd_throughput, &dsc_sink_caps->throughput_mode_1_mps))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index c9ae2d8f0..c9ae2d8f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index ba869387c..ba869387c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 71d2dff99..71d2dff99 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h
index 133ad3884..133ad3884 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index 4b27f29d0..4b27f29d0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index bccd46bd1..254136f8e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -78,7 +78,7 @@ ifdef CONFIG_DRM_AMD_DC_FP
# DCN
###############################################################################
-HWSS_DCN10 = dcn10_hwseq.o
+HWSS_DCN10 = dcn10_hwseq.o dcn10_init.o
AMD_DAL_HWSS_DCN10 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn10/,$(HWSS_DCN10))
@@ -86,7 +86,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN10)
###############################################################################
-HWSS_DCN20 = dcn20_hwseq.o
+HWSS_DCN20 = dcn20_hwseq.o dcn20_init.o
AMD_DAL_HWSS_DCN20 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn20/,$(HWSS_DCN20))
@@ -94,7 +94,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN20)
###############################################################################
-HWSS_DCN201 = dcn201_hwseq.o
+HWSS_DCN201 = dcn201_hwseq.o dcn201_init.o
AMD_DAL_HWSS_DCN201 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn201/,$(HWSS_DCN201))
@@ -102,7 +102,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN201)
###############################################################################
-HWSS_DCN21 = dcn21_hwseq.o
+HWSS_DCN21 = dcn21_hwseq.o dcn21_init.o
AMD_DAL_HWSS_DCN21 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn21/,$(HWSS_DCN21))
@@ -114,7 +114,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN21)
###############################################################################
-HWSS_DCN30 = dcn30_hwseq.o
+HWSS_DCN30 = dcn30_hwseq.o dcn30_init.o
AMD_DAL_HWSS_DCN30 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn30/,$(HWSS_DCN30))
@@ -122,7 +122,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN30)
###############################################################################
-HWSS_DCN301 = dcn301_hwseq.o
+HWSS_DCN301 = dcn301_hwseq.o dcn301_init.o
AMD_DAL_HWSS_DCN301 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn301/,$(HWSS_DCN301))
@@ -130,15 +130,17 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN301)
###############################################################################
-HWSS_DCN302 = dcn302_hwseq.o
+HWSS_DCN302 = dcn302_hwseq.o dcn302_init.o
AMD_DAL_HWSS_DCN302 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn302/,$(HWSS_DCN302))
AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN302)
+
+
###############################################################################
-HWSS_DCN303 = dcn303_hwseq.o
+HWSS_DCN303 = dcn303_hwseq.o dcn303_init.o
AMD_DAL_HWSS_DCN303 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn303/,$(HWSS_DCN303))
@@ -146,7 +148,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN303)
###############################################################################
-HWSS_DCN31 = dcn31_hwseq.o
+HWSS_DCN31 = dcn31_hwseq.o dcn31_init.o
AMD_DAL_HWSS_DCN31 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn31/,$(HWSS_DCN31))
@@ -154,7 +156,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN31)
###############################################################################
-HWSS_DCN314 = dcn314_hwseq.o
+HWSS_DCN314 = dcn314_hwseq.o dcn314_init.o
AMD_DAL_HWSS_DCN314 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn314/,$(HWSS_DCN314))
@@ -162,7 +164,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN314)
###############################################################################
-HWSS_DCN32 = dcn32_hwseq.o
+HWSS_DCN32 = dcn32_hwseq.o dcn32_init.o
AMD_DAL_HWSS_DCN32 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn32/,$(HWSS_DCN32))
@@ -170,7 +172,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN32)
###############################################################################
-HWSS_DCN35 = dcn35_hwseq.o
+HWSS_DCN35 = dcn35_hwseq.o dcn35_init.o
AMD_DAL_HWSS_DCN35 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn35/,$(HWSS_DCN35))
@@ -180,4 +182,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)
###############################################################################
-endif \ No newline at end of file
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
index 44b4df646..52f045cfd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
@@ -682,6 +682,7 @@ struct dce_hwseq_registers {
uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
uint32_t HPO_TOP_HW_CONTROL;
uint32_t DMU_CLK_CNTL;
+ uint32_t DCCG_GATE_DISABLE_CNTL4;
uint32_t DCCG_GATE_DISABLE_CNTL5;
};
/* set field name */
@@ -1199,7 +1200,19 @@ struct dce_hwseq_registers {
type PHYBSYMCLK_ROOT_GATE_DISABLE;\
type PHYCSYMCLK_ROOT_GATE_DISABLE;\
type PHYDSYMCLK_ROOT_GATE_DISABLE;\
- type PHYESYMCLK_ROOT_GATE_DISABLE;
+ type PHYESYMCLK_ROOT_GATE_DISABLE;\
+ type DTBCLK_P0_GATE_DISABLE;\
+ type DTBCLK_P1_GATE_DISABLE;\
+ type DTBCLK_P2_GATE_DISABLE;\
+ type DTBCLK_P3_GATE_DISABLE;\
+ type DPSTREAMCLK0_GATE_DISABLE;\
+ type DPSTREAMCLK1_GATE_DISABLE;\
+ type DPSTREAMCLK2_GATE_DISABLE;\
+ type DPSTREAMCLK3_GATE_DISABLE;\
+ type DPIASYMCLK0_GATE_DISABLE;\
+ type DPIASYMCLK1_GATE_DISABLE;\
+ type DPIASYMCLK2_GATE_DISABLE;\
+ type DPIASYMCLK3_GATE_DISABLE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 3642c069b..12af6bb9f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -55,6 +55,7 @@
#include "audio.h"
#include "reg_helper.h"
#include "panel_cntl.h"
+#include "dc_state_priv.h"
#include "dpcd_defs.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
@@ -1476,7 +1477,7 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
-static enum dc_status apply_single_controller_ctx_to_hw(
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -1597,7 +1598,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
* is constructed with the same sink). Make sure not to override
* and link programming on the main.
*/
- if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false;
}
@@ -1685,7 +1686,7 @@ static void disable_vga_and_power_gate_all_controllers(
true);
dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc,
+ dc->hwss.disable_plane(dc, dc->current_state,
&dc->current_state->res_ctx.pipe_ctx[i]);
}
}
@@ -2135,7 +2136,7 @@ static void dce110_reset_hw_ctx_wrap(
old_clk))
old_clk->funcs->cs_power_down(old_clk);
- dc->hwss.disable_plane(dc, pipe_ctx_old);
+ dc->hwss.disable_plane(dc, dc->current_state, pipe_ctx_old);
pipe_ctx_old->stream = NULL;
}
@@ -2302,7 +2303,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
continue;
- status = apply_single_controller_ctx_to_hw(
+ status = dce110_apply_single_controller_ctx_to_hw(
pipe_ctx,
context,
dc);
@@ -2499,6 +2500,7 @@ static bool wait_for_reset_trigger_to_occur(
/* Enable timing synchronization for a group of Timing Generators. */
static void dce110_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[])
@@ -2592,6 +2594,7 @@ static void init_hw(struct dc *dc)
struct dmcu *dmcu;
struct dce_hwseq *hws = dc->hwseq;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2641,13 +2644,15 @@ static void init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
abm = dc->res_pool->abm;
if (abm != NULL)
- abm->funcs->abm_init(abm, backlight);
+ abm->funcs->abm_init(abm, backlight, user_level);
dmcu = dc->res_pool->dmcu;
if (dmcu != NULL && abm != NULL)
@@ -2844,7 +2849,7 @@ static void dce110_post_unlock_program_front_end(
{
}
-static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
int fe_idx = pipe_ctx->plane_res.mi ?
@@ -3117,7 +3122,8 @@ void dce110_disable_link_output(struct dc_link *link,
struct dmcu *dmcu = dc->res_pool->dmcu;
if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control &&
+ !link->skip_implict_edp_power_control)
link->dc->hwss.edp_backlight_control(link, false);
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 08028a177..ed3cc3648 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 2b3ef5cdb..c45f84aa3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -56,6 +56,7 @@
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc_logger
@@ -115,7 +116,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
!pipe_ctx->stream ||
(!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
!tg->funcs->is_tg_enabled(tg) ||
- pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
continue;
if (lock)
@@ -1181,7 +1182,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
}
/* trigger HW to start disconnect plane from stream on the next vsync */
-void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -1201,7 +1204,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
// so don't wait for MPCC_IDLE in the programming sequence
- if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
+ if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
@@ -1291,7 +1294,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state = NULL;
}
-void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1417,12 +1420,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -1487,6 +1490,7 @@ void dcn10_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
bool is_optimized_init_done = false;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -1584,12 +1588,14 @@ void dcn10_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
if (abm != NULL)
- abm->funcs->abm_init(abm, backlight);
+ abm->funcs->abm_init(abm, backlight, user_level);
if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
@@ -2266,6 +2272,7 @@ void dcn10_enable_vblanks_synchronization(
void dcn10_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[])
@@ -2280,7 +2287,7 @@ void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
opp = grouped_pipes[i]->stream_res.opp;
@@ -2300,14 +2307,14 @@ void dcn10_enable_timing_synchronization(
if (grouped_pipes[i]->stream == NULL)
continue;
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream->vblank_synchronized = false;
}
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
@@ -2321,11 +2328,11 @@ void dcn10_enable_timing_synchronization(
* synchronized. Look at last pipe programmed to reset.
*/
- if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
@@ -2333,7 +2340,7 @@ void dcn10_enable_timing_synchronization(
}
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
opp = grouped_pipes[i]->stream_res.opp;
@@ -3025,7 +3032,7 @@ void dcn10_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
@@ -3072,7 +3079,7 @@ void dcn10_prepare_bandwidth(
context,
false);
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index ef6d56da4..bc5dd68a2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -75,7 +75,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
-void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn10_lock_all_pipes(
struct dc *dc,
struct dc_state *context,
@@ -108,13 +108,16 @@ void dcn10_power_down_on_boot(struct dc *dc);
enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
-void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data);
void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx);
void dce110_power_down(struct dc *dc);
void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
void dcn10_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[]);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
index a5bdac79a..a5bdac79a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h
index 8c6fd7b84..8c6fd7b84 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index e3f547e06..868a086c7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -55,6 +55,7 @@
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc_logger
@@ -623,9 +624,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
}
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
- bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -847,7 +848,7 @@ enum dc_status dcn20_enable_stream_timing(
/* TODO enable stream if timing changed */
/* TODO unblank stream if DP */
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
@@ -1368,8 +1369,14 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
+static void dcn20_detect_pipe_changes(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe)
{
+ bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
+ bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
+
new_pipe->update_flags.raw = 0;
/* If non-phantom pipe is being transitioned to a phantom pipe,
@@ -1379,8 +1386,8 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
* be different). The post_unlock sequence will set the correct
* update flags to enable the phantom pipe.
*/
- if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom &&
- new_pipe->plane_state && new_pipe->plane_state->is_phantom) {
+ if (old_pipe->plane_state && !old_is_phantom &&
+ new_pipe->plane_state && new_is_phantom) {
new_pipe->update_flags.bits.disable = 1;
return;
}
@@ -1405,6 +1412,10 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
new_pipe->update_flags.bits.scaler = 1;
new_pipe->update_flags.bits.viewport = 1;
new_pipe->update_flags.bits.det_size = 1;
+ if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
+ new_pipe->stream_res.test_pattern_params.width != 0 &&
+ new_pipe->stream_res.test_pattern_params.height != 0)
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
new_pipe->update_flags.bits.odm = 1;
new_pipe->update_flags.bits.global_sync = 1;
@@ -1417,14 +1428,14 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
* The remove-add sequence of the phantom pipe always results in the pipe
* being blanked in enable_stream_timing (DPG).
*/
- if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
new_pipe->update_flags.bits.enable = 1;
/* Phantom pipes are effectively disabled, if the pipe was previously phantom
* we have to enable
*/
- if (old_pipe->plane_state && old_pipe->plane_state->is_phantom &&
- new_pipe->plane_state && !new_pipe->plane_state->is_phantom)
+ if (old_pipe->plane_state && old_is_phantom &&
+ new_pipe->plane_state && !new_is_phantom)
new_pipe->update_flags.bits.enable = 1;
if (old_pipe->plane_state && !new_pipe->plane_state) {
@@ -1557,6 +1568,7 @@ static void dcn20_update_dchubp_dpp(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dccg *dccg = dc->res_pool->dccg;
bool viewport_changed = false;
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
if (pipe_ctx->update_flags.bits.dppclk)
dpp->funcs->dpp_dppclk_control(dpp, false, true);
@@ -1702,7 +1714,7 @@ static void dcn20_update_dchubp_dpp(
pipe_ctx->update_flags.bits.plane_changed ||
plane_state->update_flags.bits.addr_update) {
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ pipe_mall_type == SUBVP_MAIN) {
union block_sequence_params params;
params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
@@ -1716,7 +1728,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
/* If the stream paired with this plane is phantom, the plane is also phantom */
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM
+ if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM
&& hubp->funcs->phantom_hubp_post_enable)
hubp->funcs->phantom_hubp_post_enable(hubp);
}
@@ -1774,7 +1786,7 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
- if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
@@ -1914,7 +1926,7 @@ void dcn20_program_front_end_for_ctx(
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
- dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
@@ -1924,15 +1936,16 @@ void dcn20_program_front_end_for_ctx(
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
- dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc) {
if (dc->hwss.blank_phantom) {
int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream);
- main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
- main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
+ main_pipe_width = phantom_stream->dst.width;
+ main_pipe_height = phantom_stream->dst.height;
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
}
tg->funcs->enable_crtc(tg);
@@ -1961,9 +1974,9 @@ void dcn20_program_front_end_for_ctx(
* DET allocation.
*/
if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom)))
+ (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM)))
hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
- hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -1996,7 +2009,7 @@ void dcn20_program_front_end_for_ctx(
* but the MPO still exists until the double buffered update of the main pipe so we
* will get a frame of underflow if the phantom pipe is programmed here.
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
dcn20_program_pipe(dc, pipe, context);
}
@@ -2035,7 +2048,7 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
@@ -2047,7 +2060,7 @@ void dcn20_post_unlock_program_front_end(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
- pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
@@ -2070,7 +2083,7 @@ void dcn20_post_unlock_program_front_end(
* programming sequence).
*/
while (pipe) {
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
/* When turning on the phantom pipe we want to run through the
* entire enable sequence, so apply all the "enable" flags.
*/
@@ -2140,17 +2153,17 @@ void dcn20_prepare_bandwidth(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't restore the original watermark value
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
}
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2163,10 +2176,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -2184,7 +2197,7 @@ void dcn20_optimize_bandwidth(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't need to restore the original watermark value
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
@@ -2218,7 +2231,8 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
- if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+ !dc->debug.disable_extblankadj) {
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2331,7 +2345,7 @@ bool dcn20_wait_for_blank_complete(
int counter;
for (counter = 0; counter < 1000; counter++) {
- if (opp->funcs->dpg_is_blanked(opp))
+ if (!opp->funcs->dpg_is_pending(opp))
break;
udelay(100);
@@ -2342,7 +2356,7 @@ bool dcn20_wait_for_blank_complete(
return false;
}
- return true;
+ return opp->funcs->dpg_is_blanked(opp);
}
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
@@ -2548,7 +2562,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
-static void dcn20_reset_back_end_for_pipe(
+void dcn20_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -2944,7 +2958,7 @@ void dcn20_fpga_init_hw(struct dc *dc)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
/*to do*/
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
@@ -2961,7 +2975,7 @@ void dcn20_fpga_init_hw(struct dc *dc)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index ab02e4e9c..d950b3e54 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -52,7 +52,7 @@ void dcn20_program_output_csc(struct dc *dc,
void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn20_disable_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing(
void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index 884e3e323..884e3e323 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h
index 12277797c..12277797c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
index d3fe6092f..d5769f388 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
@@ -320,7 +320,7 @@ void dcn201_init_hw(struct dc *dc)
res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = res_pool->opps[i];
/*To do: number of MPCC != number of opp*/
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
@@ -337,7 +337,7 @@ void dcn201_init_hw(struct dc *dc)
for (i = 0; i < res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -369,7 +369,9 @@ void dcn201_init_hw(struct dc *dc)
}
/* trigger HW to start disconnect plane from stream on the next vsync */
-void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn201_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
index 26cd62be6..6a50a9894 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
@@ -33,7 +33,7 @@ void dcn201_init_hw(struct dc *dc);
void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_plane_atomic_disconnect(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
void dcn201_pipe_control_lock(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
index a13bf6c93..a13bf6c93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h
index 1168887b0..1168887b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 5c7f380a8..7252f5f78 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -211,7 +211,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
uint32_t otg_inst;
- if (!abm && !tg && !panel_cntl)
+ if (!abm || !tg || !panel_cntl)
return;
otg_inst = tg->inst;
@@ -245,7 +245,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
uint32_t otg_inst;
- if (!abm && !tg && !panel_cntl)
+ if (!abm || !tg || !panel_cntl)
return false;
otg_inst = tg->inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
index 18249c6b6..18249c6b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h
index 3ed242926..3ed242926 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index c89149d15..55cf4c9e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -51,7 +51,7 @@
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_resource.h"
#include "link.h"
-
+#include "dc_state_priv.h"
@@ -367,6 +367,10 @@ void dcn30_enable_writeback(
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
+
+ /* Warmup interface */
+ dcn30_mmhubbub_warmup(dc, 1, wb_info);
+
/* Update writeback pipe */
dcn30_set_writeback(dc, wb_info, context);
@@ -472,6 +476,7 @@ void dcn30_init_hw(struct dc *dc)
int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -608,13 +613,15 @@ void dcn30_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -972,7 +979,7 @@ void dcn30_hardware_release(struct dc *dc)
if (!pipe->stream)
continue;
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) {
subvp_in_use = true;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 9894caedf..9894caedf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h
index c280ff90b..c280ff90b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 6477009ce..6477009ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
index 0bca48ccb..0bca48ccb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c
index 637f9514d..637f9514d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h
index 899587b93..899587b93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c
index edb4d68b8..edb4d68b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h
index 494998112..494998112 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 3a70a3cbc..7423880fa 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -96,7 +96,8 @@ static void enable_memory_low_power(struct dc *dc)
if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
// Power down VPGs
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
- dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
+ if (dc->res_pool->stream_enc[i]->vpg)
+ dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
@@ -112,6 +113,7 @@ void dcn31_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -223,13 +225,15 @@ void dcn31_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 669f524bd..669f524bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h
index a3db08c8b..a3db08c8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index ccb7e317e..ccb7e317e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h
index 8f92e6657..8f92e6657 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 580afb008..766822943 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -51,6 +51,7 @@
#include "dcn32/dcn32_resource.h"
#include "link.h"
#include "../dcn20/dcn20_hwseq.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -348,8 +349,7 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
// There is at least 1 SubVP pipe, so enable SubVP
enable_subvp = true;
break;
@@ -375,18 +375,20 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
bool subvp_immediate_flip = false;
bool subvp_in_use = false;
struct pipe_ctx *pipe;
+ enum mall_stream_type pipe_mall_type = SUBVP_NONE;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
- if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) {
subvp_in_use = true;
break;
}
}
if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) {
- if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN &&
+ if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN &&
top_pipe_to_program->plane_state->flip_immediate)
subvp_immediate_flip = true;
}
@@ -398,7 +400,7 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
if (!lock) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
+ if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN &&
should_lock_all_pipes)
pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
}
@@ -416,14 +418,7 @@ void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params)
{
struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc;
bool lock = params->subvp_pipe_control_lock_fast_params.lock;
- struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx;
- bool subvp_immediate_flip = false;
-
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) {
- if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN &&
- pipe_ctx->plane_state->flip_immediate)
- subvp_immediate_flip = true;
- }
+ bool subvp_immediate_flip = params->subvp_pipe_control_lock_fast_params.subvp_immediate_flip;
// Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared.
if (subvp_immediate_flip) {
@@ -609,7 +604,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp;
- if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+ if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
pipe->stream->fpo_in_use)) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
@@ -624,7 +619,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp;
- if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+ if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
pipe->stream->fpo_in_use)) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
@@ -671,8 +666,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
if (cursor_size > 16384)
cache_cursor = true;
- if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
// MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
@@ -714,9 +709,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
* see if CURSOR_REQ_MODE will be back to 1 for SubVP
* when it should be 0 for MPO
*/
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
hubp->funcs->hubp_prepare_subvp_buffering(hubp, true);
- }
}
}
}
@@ -759,6 +753,7 @@ void dcn32_init_hw(struct dc *dc)
int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -913,13 +908,15 @@ void dcn32_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL && abms[i]->funcs != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -1194,7 +1191,7 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
continue;
if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
- && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) {
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
@@ -1345,8 +1342,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
- pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN &&
+ dc_state_get_paired_subvp_stream(context, pipe->stream) == phantom_pipe->stream) {
if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) {
phantom_plane->src_rect.x = pipe->plane_state->src_rect.x;
@@ -1371,21 +1368,19 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
{
phantom_pipe->update_flags.raw = 0;
- if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
- phantom_pipe->update_flags.bits.enable = 1;
- phantom_pipe->update_flags.bits.mpcc = 1;
- phantom_pipe->update_flags.bits.dppclk = 1;
- phantom_pipe->update_flags.bits.hubp_interdependent = 1;
- phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
- phantom_pipe->update_flags.bits.gamut_remap = 1;
- phantom_pipe->update_flags.bits.scaler = 1;
- phantom_pipe->update_flags.bits.viewport = 1;
- phantom_pipe->update_flags.bits.det_size = 1;
- if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
- phantom_pipe->update_flags.bits.odm = 1;
- phantom_pipe->update_flags.bits.global_sync = 1;
- }
+ if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
+ phantom_pipe->update_flags.bits.enable = 1;
+ phantom_pipe->update_flags.bits.mpcc = 1;
+ phantom_pipe->update_flags.bits.dppclk = 1;
+ phantom_pipe->update_flags.bits.hubp_interdependent = 1;
+ phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ phantom_pipe->update_flags.bits.gamut_remap = 1;
+ phantom_pipe->update_flags.bits.scaler = 1;
+ phantom_pipe->update_flags.bits.viewport = 1;
+ phantom_pipe->update_flags.bits.det_size = 1;
+ if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
+ phantom_pipe->update_flags.bits.odm = 1;
+ phantom_pipe->update_flags.bits.global_sync = 1;
}
}
}
@@ -1445,9 +1440,44 @@ void dcn32_update_dsc_pg(struct dc *dc,
}
}
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) ||
+ (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ if (hws->funcs.reset_back_end_for_pipe)
+ hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+}
+
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
{
unsigned int i;
+ enum dc_status status = DC_OK;
+ struct dce_hwseq *hws = dc->hwseq;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1457,8 +1487,8 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
* pipe, wait for the double buffer update to complete first before we do
* ANY phantom pipe programming.
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
- old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM &&
+ old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) {
old_pipe->stream_res.tg->funcs->wait_for_state(
old_pipe->stream_res.tg,
CRTC_STATE_VBLANK);
@@ -1468,16 +1498,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream &&
+ pipe_ctx->stream->link->link_state_valid) {
+ continue;
}
+
+ if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
+ continue;
+
+ if (hws->funcs.apply_single_controller_ctx_to_hw)
+ status = hws->funcs.apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ ASSERT(status == DC_OK);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (hws->funcs.resync_fifo_dccg_dio)
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+#endif
}
}
@@ -1691,3 +1744,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
+
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ else
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index cecf7f0f5..f55c11fc5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context);
+
void dcn32_init_blank(
struct dc *dc,
struct timing_generator *tg);
@@ -127,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
void dcn32_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock);
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 427cfc8c2..03253faea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn10_lock_all_pipes,
+ .interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
.enable_phantom_streams = dcn32_enable_phantom_streams,
+ .disable_phantom_streams = dcn32_disable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
@@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
.resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
+ .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
};
void dcn32_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h
index 89a591eb2..89a591eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 325a711a1..1e67374b8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -56,6 +56,7 @@
#include "dcn30/dcn30_cm_common.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn20/dcn20_hwseq.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger) \
struct dal_logger *dc_logger = logger
@@ -133,6 +134,7 @@ void dcn35_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -145,17 +147,36 @@ void dcn35_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
}
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
- PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYESYMCLK_ROOT_GATE_DISABLE, 1);
+ if (!dc->debug.disable_clock_gate) {
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
+ REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
+ PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
+ PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
+ PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
+ PHYESYMCLK_ROOT_GATE_DISABLE, 1);
+
+ REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4,
+ DPIASYMCLK0_GATE_DISABLE, 0,
+ DPIASYMCLK1_GATE_DISABLE, 0,
+ DPIASYMCLK2_GATE_DISABLE, 0,
+ DPIASYMCLK3_GATE_DISABLE, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF);
+ REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
+ DTBCLK_P0_GATE_DISABLE, 0,
+ DTBCLK_P1_GATE_DISABLE, 0,
+ DTBCLK_P2_GATE_DISABLE, 0,
+ DTBCLK_P3_GATE_DISABLE, 0);
+ REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK0_GATE_DISABLE, 0,
+ DPSTREAMCLK1_GATE_DISABLE, 0,
+ DPSTREAMCLK2_GATE_DISABLE, 0,
+ DPSTREAMCLK3_GATE_DISABLE, 0);
- REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf);
+ }
// Initialize the dccg
if (res_pool->dccg->funcs->dccg_init)
@@ -260,13 +281,15 @@ void dcn35_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
if (dc->ctx->dmub_srv) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL && abms[i]->funcs != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
}
@@ -332,9 +355,6 @@ void dcn35_init_hw(struct dc *dc)
if (dc->res_pool->pg_cntl) {
if (dc->res_pool->pg_cntl->funcs->init_pg_status)
dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
-
- if (dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22)
- dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22(dc->res_pool->pg_cntl, false);
}
}
@@ -619,7 +639,7 @@ void dcn35_power_down_on_boot(struct dc *dc)
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
+ int i, edp_num;
if (dc->debug.dmcub_emulation)
return true;
@@ -627,6 +647,13 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num == 0 || edp_num > 1)
return false;
+
+ for (i = 0; i < dc->current_state->stream_count; ++i) {
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+ return false;
+ }
}
// TODO: review other cases when idle optimization is allowed
@@ -756,12 +783,12 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -888,10 +915,10 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state = NULL;
}
-void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
- bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -918,6 +945,8 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
bool hpo_frl_stream_enc_acquired = false;
bool hpo_dp_stream_enc_acquired = false;
int i = 0, j = 0;
+ int edp_num = 0;
+ struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
memset(update_state, 0, sizeof(struct pg_block_update));
@@ -958,10 +987,24 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
+ }
+ /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ if (tg && tg->funcs->is_tg_enabled(tg)) {
+ update_state->pg_pipe_res_update[PG_OPTC][i] = false;
+ break;
+ }
+ }
- if (pipe_ctx->stream_res.tg)
- update_state->pg_pipe_res_update[PG_OPTC][pipe_ctx->stream_res.tg->inst] = false;
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ if (edp_num == 0 ||
+ ((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
+ (!edp_links[1] || !edp_links[1]->edp_sink_present))) {
+ /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
+ update_state->pg_pipe_res_update[PG_OPTC][0] = false;
}
+
}
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
@@ -1047,8 +1090,29 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
}
-void dcn35_block_power_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on)
+/**
+ * dcn35_hw_block_power_down() - power down sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power down:
+ *
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn35_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state)
{
int i = 0;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
@@ -1057,64 +1121,106 @@ void dcn35_block_power_control(struct dc *dc,
return;
if (dc->debug.ignore_pg)
return;
+
if (update_state->pg_res_update[PG_HPO]) {
if (pg_cntl->funcs->hpo_pg_control)
- pg_cntl->funcs->hpo_pg_control(pg_cntl, power_on);
+ pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (pg_cntl->funcs->hubp_dpp_pg_control)
- pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, power_on);
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
}
-
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (pg_cntl->funcs->dsc_pg_control)
- pg_cntl->funcs->dsc_pg_control(pg_cntl, i, power_on);
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
}
- if (update_state->pg_pipe_res_update[PG_MPCC][i]) {
- if (pg_cntl->funcs->mpcc_pg_control)
- pg_cntl->funcs->mpcc_pg_control(pg_cntl, i, power_on);
- }
-
- if (update_state->pg_pipe_res_update[PG_OPP][i]) {
- if (pg_cntl->funcs->opp_pg_control)
- pg_cntl->funcs->opp_pg_control(pg_cntl, i, power_on);
- }
- if (update_state->pg_pipe_res_update[PG_OPTC][i]) {
- if (pg_cntl->funcs->optc_pg_control)
- pg_cntl->funcs->optc_pg_control(pg_cntl, i, power_on);
- }
- }
+ /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
- if (update_state->pg_res_update[PG_DWB]) {
- if (pg_cntl->funcs->dwb_pg_control)
- pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on);
- }
+ //domain22, 23, 25 currently always on.
- if (pg_cntl->funcs->plane_otg_pg_control)
- pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on);
}
-void dcn35_root_clock_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on)
+/**
+ * dcn35_hw_block_power_up() - power up sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power up:
+ *
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn35_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state)
{
int i = 0;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
if (!pg_cntl)
return;
+ if (dc->debug.ignore_pg)
+ return;
+ //domain22, 23, 25 currently always on.
+ /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
- if (dc->hwseq->funcs.dpp_root_clock_control)
- dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
}
+ }
+ if (update_state->pg_res_update[PG_HPO]) {
+ if (pg_cntl->funcs->hpo_pg_control)
+ pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
+ }
+}
+void dcn35_root_clock_control(struct dc *dc,
+ struct pg_block_update *update_state, bool power_on)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+ if (!pg_cntl)
+ return;
+ /*enable root clock first when power up*/
+ if (power_on)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (dc->hwseq->funcs.dpp_root_clock_control)
+ dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ }
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (power_on) {
if (dc->res_pool->dccg->funcs->enable_dsc)
@@ -1125,6 +1231,15 @@ void dcn35_root_clock_control(struct dc *dc,
}
}
}
+ /*disable root clock first when power down*/
+ if (!power_on)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (dc->hwseq->funcs.dpp_root_clock_control)
+ dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ }
+ }
}
void dcn35_prepare_bandwidth(
@@ -1138,9 +1253,9 @@ void dcn35_prepare_bandwidth(
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, true);
-
- if (dc->hwss.block_power_control)
- dc->hwss.block_power_control(dc, &pg_update_state, true);
+ /*power up required HW block*/
+ if (dc->hwss.hw_block_power_up)
+ dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
dcn20_prepare_bandwidth(dc, context);
@@ -1156,9 +1271,9 @@ void dcn35_optimize_bandwidth(
if (dc->hwss.calc_blocks_to_gate) {
dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
-
- if (dc->hwss.block_power_control)
- dc->hwss.block_power_control(dc, &pg_update_state, false);
+ /*try to power down unused block*/
+ if (dc->hwss.hw_block_power_down)
+ dc->hwss.hw_block_power_down(dc, &pg_update_state);
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, false);
@@ -1180,3 +1295,44 @@ uint32_t dcn35_get_idle_state(const struct dc *dc)
return 0;
}
+
+void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, struct dc_crtc_timing_adjust adjust)
+{
+ int i = 0;
+ struct drr_params params = {0};
+ // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
+ unsigned int event_triggers = 0x800;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
+
+ params.vertical_total_max = adjust.v_total_max;
+ params.vertical_total_min = adjust.v_total_min;
+ params.vertical_total_mid = adjust.v_total_mid;
+ params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
+
+ for (i = 0; i < num_pipes; i++) {
+ if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
+ struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+ struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
+
+ if (dc->debug.static_screen_wait_frames) {
+ unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
+
+ if (frame_rate >= 120 && dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
+ /*ips enable case*/
+ num_frames = 2 * (frame_rate % 60);
+ }
+ }
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(
+ pipe_ctx[i]->stream_res.tg, &params);
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx[i]->stream_res.tg,
+ event_triggers, num_frames);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index 0dff10d17..fd66316e3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -57,14 +57,16 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context);
void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
-void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
-void dcn35_block_power_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on);
+void dcn35_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state);
+void dcn35_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state);
void dcn35_root_clock_control(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
@@ -84,4 +86,8 @@ void dcn35_dsc_pg_control(
void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
uint32_t dcn35_get_idle_state(const struct dc *dc);
+
+void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, struct dc_crtc_timing_adjust adjust);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 296bf3a38..a630aa77d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -68,7 +68,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.prepare_bandwidth = dcn35_prepare_bandwidth,
.optimize_bandwidth = dcn35_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn10_set_drr,
+ .set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
@@ -118,7 +118,8 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
- .block_power_control = dcn35_block_power_control,
+ .hw_block_power_up = dcn35_hw_block_power_up,
+ .hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
.set_idle_state = dcn35_set_idle_state,
.get_idle_state = dcn35_get_idle_state
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h
index b67015032..b67015032 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
new file mode 100644
index 000000000..951ca2da4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn351_init.c
+ dcn351_init.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
new file mode 100644
index 000000000..b24ad27fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
@@ -0,0 +1,17 @@
+#
+# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
+#
+# All rights reserved. This notice is intended as a precaution against
+# inadvertent publication and does not imply publication or any waiver
+# of confidentiality. The year included in the foregoing notice is the
+# year of creation of the work.
+#
+# Authors: AMD
+#
+# Makefile for DCN351.
+
+DCN351 = dcn351_init.o
+
+AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN351)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
new file mode 100644
index 000000000..143d3fc02
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hwseq.h"
+#include "dcn10/dcn10_hwseq.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dcn301/dcn301_hwseq.h"
+#include "dcn31/dcn31_hwseq.h"
+#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
+
+#include "dcn351_init.h"
+
+static const struct hw_sequencer_funcs dcn351_funcs = {
+ .program_gamut_remap = dcn30_program_gamut_remap,
+ .init_hw = dcn35_init_hw,
+ .power_down_on_boot = dcn35_power_down_on_boot,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dcn31_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn32_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn35_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn35_prepare_bandwidth,
+ .optimize_bandwidth = dcn35_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn30_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dcn30_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_T12 = dce110_edp_wait_for_T12,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn30_enable_writeback,
+ .disable_writeback = dcn30_disable_writeback,
+ .update_writeback = dcn30_update_writeback,
+ .mmhubbub_warmup = dcn30_mmhubbub_warmup,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn30_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn31_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
+ .set_backlight_level = dcn21_set_backlight_level,
+ .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+ .set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dcn32_disable_link_output,
+ .z10_restore = dcn35_z10_restore,
+ .z10_save_init = dcn31_z10_save_init,
+ .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+ .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
+ .update_dsc_pg = dcn32_update_dsc_pg,
+ .calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
+ .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
+ .hw_block_power_up = dcn35_hw_block_power_up,
+ .hw_block_power_down = dcn35_hw_block_power_down,
+ .root_clock_control = dcn35_root_clock_control,
+ .set_idle_state = dcn35_set_idle_state,
+ .get_idle_state = dcn35_get_idle_state
+};
+
+static const struct hwseq_private_funcs dcn351_private_funcs = {
+ .init_pipes = dcn35_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn32_set_input_transfer_func,
+ .set_output_transfer_func = dcn32_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = NULL,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn35_plane_atomic_disable,
+ //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
+ //.hubp_pg_control = dcn35_hubp_pg_control,
+ .enable_power_gating_plane = dcn35_enable_power_gating_plane,
+ .dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .update_odm = dcn35_update_odm,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_mcm_luts = dcn32_set_mcm_luts,
+ .setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
+ .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .dsc_pg_control = dcn35_dsc_pg_control,
+ .dsc_pg_status = dcn32_dsc_pg_status,
+ .enable_plane = dcn35_enable_plane,
+};
+
+void dcn351_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn351_funcs;
+ dc->hwseq->funcs = dcn351_private_funcs;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h
new file mode 100644
index 000000000..970b01008
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN351_INIT_H__
+#define __DC_DCN351_INIT_H__
+
+struct dc;
+
+void dcn351_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN351_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 452680fe9..64ca7c665 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -50,7 +50,7 @@ struct pg_block_update;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
bool lock;
- struct pipe_ctx *pipe_ctx;
+ bool subvp_immediate_flip;
};
struct pipe_control_lock_params {
@@ -200,7 +200,7 @@ struct hw_sequencer_funcs {
struct dc_state *context);
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
- void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
@@ -248,6 +248,7 @@ struct hw_sequencer_funcs {
void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
int group_size, struct pipe_ctx *grouped_pipes[]);
void (*enable_timing_synchronization)(struct dc *dc,
+ struct dc_state *state,
int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
void (*enable_vblanks_synchronization)(struct dc *dc,
@@ -378,6 +379,7 @@ struct hw_sequencer_funcs {
struct dc_cursor_attributes *cursor_attr);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+ void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
@@ -414,8 +416,10 @@ struct hw_sequencer_funcs {
struct pg_block_update *update_state);
void (*calc_blocks_to_ungate)(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
- void (*block_power_control)(struct dc *dc,
- struct pg_block_update *update_state, bool power_on);
+ void (*hw_block_power_up)(struct dc *dc,
+ struct pg_block_update *update_state);
+ void (*hw_block_power_down)(struct dc *dc,
+ struct pg_block_update *update_state);
void (*root_clock_control)(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
void (*set_idle_state)(const struct dc *dc, bool allow_idle);
@@ -452,17 +456,18 @@ void get_mpctree_visual_confirm_color(
struct tg_color *color);
void get_subvp_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
void get_mclk_switch_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void set_p_state_switch_method(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+
void hwss_execute_sequence(struct dc *dc,
struct block_sequence block_sequence[],
int num_steps);
@@ -472,7 +477,8 @@ void hwss_build_fast_sequence(struct dc *dc,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
int *num_steps,
- struct pipe_ctx *pipe_ctx);
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_status *stream_status);
void hwss_send_dmcub_cmd(union block_sequence_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 82c592166..b3c62a82c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -79,6 +79,7 @@ struct hwseq_private_funcs {
void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
void (*plane_atomic_disconnect)(struct dc *dc,
+ struct dc_state *state,
struct pipe_ctx *pipe_ctx);
void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
bool (*set_input_transfer_func)(struct dc *dc,
@@ -164,8 +165,15 @@ struct hwseq_private_funcs {
void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
struct dc_state *context);
+ enum dc_status (*apply_single_controller_ctx_to_hw)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
#endif
+ void (*reset_back_end_for_pipe)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index bc9cda329..3a6bf77a6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -200,11 +200,7 @@ struct resource_funcs {
unsigned int pipe_cnt,
unsigned int index);
- bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context, bool fast_update);
- void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context);
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
- void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
- void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
};
@@ -385,6 +381,16 @@ union pipe_update_flags {
uint32_t raw;
};
+enum p_state_switch_method {
+ P_STATE_UNKNOWN = 0,
+ P_STATE_V_BLANK = 1,
+ P_STATE_FPO,
+ P_STATE_V_ACTIVE,
+ P_STATE_SUB_VP,
+ P_STATE_DRR_SUB_VP,
+ P_STATE_V_BLANK_SUB_VP
+};
+
struct pipe_ctx {
struct dc_plane_state *plane_state;
struct dc_stream_state *stream;
@@ -433,6 +439,7 @@ struct pipe_ctx {
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
union pipe_update_flags update_flags;
+ enum p_state_switch_method p_state_type;
struct tg_color visual_confirm_color;
bool has_vactive_margin;
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
@@ -528,6 +535,14 @@ struct dc_state {
* @stream_status: Planes status on a given stream
*/
struct dc_stream_status stream_status[MAX_PIPES];
+ /**
+ * @phantom_streams: Stream state properties for phantoms
+ */
+ struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+ /**
+ * @phantom_planes: Planes state properties for phantoms
+ */
+ struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
/**
* @stream_count: Total of streams in use
@@ -536,6 +551,14 @@ struct dc_state {
uint8_t stream_mask;
/**
+ * @stream_count: Total phantom streams in use
+ */
+ uint8_t phantom_stream_count;
+ /**
+ * @stream_count: Total phantom planes in use
+ */
+ uint8_t phantom_plane_count;
+ /**
* @res_ctx: Persistent state of resources
*/
struct resource_context res_ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index 9f521cf0f..3f0161d64 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -36,7 +36,7 @@ struct abm {
};
struct abm_funcs {
- void (*abm_init)(struct abm *abm, uint32_t back_light);
+ void (*abm_init)(struct abm *abm, uint32_t back_light, uint32_t user_level);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 55ded5fb8..17e014d3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -62,6 +62,25 @@ struct dcn3_clk_internal {
uint32_t CLK4_CLK0_CURRENT_CNT; //fclk
};
+struct dcn35_clk_internal {
+ int dummy;
+ uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
+ uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk
+ uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk
+ uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk
+ uint32_t CLK1_CLK4_CURRENT_CNT; //dtbclk
+ //uint32_t CLK1_CLK5_CURRENT_CNT; //dpiaclk
+ //uint32_t CLK1_CLK6_CURRENT_CNT; //srdbgclk
+ uint32_t CLK1_CLK3_DS_CNTL; //dcf_deep_sleep_divider
+ uint32_t CLK1_CLK3_ALLOW_DS; //dcf_deep_sleep_allow
+
+ uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass
+ uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass
+ uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass
+ uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass
+ uint32_t CLK1_CLK4_BYPASS_CNTL; //dtbclk bypass
+};
+
struct dcn301_clk_internal {
int dummy;
uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 6b44557fc..b9a06bf84 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -59,8 +59,8 @@ enum dentist_dispclk_change_mode {
struct dp_dto_params {
int otg_inst;
enum signal_type signal;
- long long pixclk_hz;
- long long refclk_hz;
+ uint64_t pixclk_hz;
+ uint64_t refclk_hz;
};
enum pixel_rate_div {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 86b711dcc..729ca0064 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -188,6 +188,10 @@ struct dwbc_funcs {
bool (*is_enabled)(
struct dwbc *dwbc);
+ void (*set_fc_enable)(
+ struct dwbc *dwbc,
+ enum dwb_frame_capture_enable enable);
+
void (*set_stereo)(
struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index b95ae9596..dcae23fae 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -43,6 +43,7 @@
* to be used inside loops and for determining array sizes.
*/
#define MAX_PIPES 6
+#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7617fabbd..071792081 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -321,6 +321,9 @@ struct opp_funcs {
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
+ bool (*dpg_is_pending)(struct output_pixel_processor *opp);
+
+
void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
index 660897e12..e97d964a1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -40,6 +40,7 @@ struct panel_cntl_backlight_registers {
unsigned int BL_PWM_PERIOD_CNTL;
unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
unsigned int PANEL_PWRSEQ_REF_DIV2;
+ unsigned int USER_LEVEL;
};
struct panel_cntl_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
index b9812afb8..00ea3864d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
@@ -47,8 +47,6 @@ struct pg_cntl_funcs {
void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*init_pg_status)(struct pg_cntl *pg_cntl);
-
- void (*set_force_poweron_domain22)(struct pg_cntl *pg_cntl, bool power_on);
};
#endif //__DC_PG_CNTL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 9a00a9931..cad3e5f14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -333,6 +333,7 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index d76853681..bf29fc58e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -281,11 +281,16 @@ struct link_service {
const unsigned int *power_opts);
bool (*edp_setup_replay)(struct dc_link *link,
const struct dc_stream_state *stream);
+ bool (*edp_send_replay_cmd)(struct dc_link *link,
+ enum replay_FW_Message_type msg,
+ union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint16_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const bool is_alpm);
+ bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 3d7244393..77a60aa9f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -497,6 +497,18 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx(
const struct resource_pool *pool);
/*
+ * Look for a free pipe in new resource context that is used in current resource
+ * context as an OTG master pipe.
+ *
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
* Look for a free pipe in new resource context that is used as a secondary DPP
* pipe in any MPCC combine in current resource context.
* return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
@@ -557,9 +569,6 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
-void get_audio_check(struct audio_info *aud_modes,
- struct audio_check *aud_chk);
-
bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
@@ -606,5 +615,4 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 007ee32c2..3cbfbf8d1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -1279,86 +1279,6 @@ static void remove_stream_from_alloc_table(
}
}
-static enum dc_status deallocate_mst_payload_with_temp_drm_wa(
- struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
- int i;
- bool mst_mode = (link->type == dc_connection_mst_branch);
- /* adjust for drm changes*/
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- const struct dc_link_settings empty_link_settings = {0};
- DC_LOGGER_INIT(link->ctx->logger);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &empty_link_settings,
- avg_time_slots_per_mtp);
-
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- false))
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- else
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
-
- DC_LOG_MST("%s"
- "stream_count: %d: ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_DEBUG("Unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- if (mst_mode) {
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
- }
-
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- false);
-
- return DC_OK;
-}
-
static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -1371,9 +1291,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
const struct dc_link_settings empty_link_settings = {0};
DC_LOGGER_INIT(link->ctx->logger);
- if (link->dc->debug.temp_mst_deallocation_sequence)
- return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx);
-
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc
@@ -1446,16 +1363,14 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
&link->mst_stream_alloc_table);
- if (mst_mode) {
+ if (mst_mode)
dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
stream);
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- false);
- }
+ dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
+ stream->ctx,
+ stream);
return DC_OK;
}
@@ -1536,12 +1451,10 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
stream->ctx,
stream);
- if (ret != ACT_LINK_LOST) {
+ if (ret != ACT_LINK_LOST)
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
- stream,
- true);
- }
+ stream);
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
@@ -1801,8 +1714,7 @@ enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in
/* send ALLOCATE_PAYLOAD sideband message with updated pbn */
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
- stream,
- true);
+ stream);
/* notify immediate branch device table update */
if (dm_helpers_dp_mst_write_payload_allocation_table(
@@ -1931,8 +1843,7 @@ enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_
/* send ALLOCATE_PAYLOAD sideband message with updated pbn */
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
- stream,
- true);
+ stream);
}
/* increase throttled vcp size */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 41f8230f9..cf22b8f28 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -213,8 +213,10 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
link_srv->edp_get_replay_state = edp_get_replay_state;
link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active;
link_srv->edp_setup_replay = edp_setup_replay;
+ link_srv->edp_send_replay_cmd = edp_send_replay_cmd;
link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal;
link_srv->edp_replay_residency = edp_replay_residency;
+ link_srv->edp_set_replay_power_opt_and_coasting_vtotal = edp_set_replay_power_opt_and_coasting_vtotal;
link_srv->edp_wait_for_t12 = edp_wait_for_t12;
link_srv->edp_is_ilr_optimization_required =
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 4a954317d..595fb0594 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -25,6 +25,7 @@
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
#include "link.h"
+
enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 2f11eaabb..289f5d133 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -412,12 +412,18 @@ static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
{
enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
- if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
+ if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) {
cable_max_link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY)
+ } else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) {
cable_max_link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10)
- cable_max_link_rate = LINK_RATE_UHBR10;
+ } else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) {
+ // allow DP40 cables to do UHBR13.5 for passive or unknown cable type
+ if (link->dpcd_caps.cable_id.bits.CABLE_TYPE < 2) {
+ cable_max_link_rate = LINK_RATE_UHBR13_5;
+ } else {
+ cable_max_link_rate = LINK_RATE_UHBR10;
+ }
+ }
return cable_max_link_rate;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 982eda3c4..6af42ba98 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -82,25 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link)
{
union dmub_rb_cmd cmd = {0};
struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
- bool is_hpd_high = false;
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
- /* Return HPD status reported by DMUB if query successfully executed. */
- if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
- cmd.query_hpd.data.status == AUX_RET_SUCCESS)
- is_hpd_high = cmd.query_hpd.data.result;
-
- DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
- __func__,
- link->link_index,
- link->link_id.enum_id - ENUM_ID_1,
- cmd.query_hpd.data.status,
- cmd.query_hpd.data.result);
-
- return is_hpd_high;
+ /* Query dpia hpd status from dmub */
+ if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ cmd.query_hpd.data.status == AUX_RET_SUCCESS) {
+ DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ link->hpd_status,
+ cmd.query_hpd.data.result);
+ link->hpd_status = cmd.query_hpd.data.result;
+ } else {
+ DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ cmd.query_hpd.data.status,
+ link->hpd_status);
+ link->hpd_status = false;
+ }
+
+ return link->hpd_status;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 9eadc2c7f..ba69874be 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -265,7 +265,7 @@ void dp_handle_link_loss(struct dc_link *link)
for (i = count - 1; i >= 0; i--) {
// Always use max settings here for DP 1.4a LL Compliance CTS
- if (link->is_automated) {
+ if (link->skip_fallback_on_link_loss) {
pipes[i]->link_config.dp_link_settings.lane_count =
link->verified_link_cap.lane_count;
pipes[i]->link_config.dp_link_settings.link_rate =
@@ -404,7 +404,9 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- link->is_automated = true;
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->skip_fallback_on_link_loss = true;
+
device_service_clear.bits.AUTOMATED_TEST = 1;
core_link_write_dpcd(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 4f4e899e5..5d36bab00 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
uint32_t retries_eq = 0;
enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
- uint32_t wait_time_microsec;
+ uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
@@ -811,7 +811,7 @@ static enum link_training_result dpia_training_eq_transparent(
/* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4
* has to share encoders unlike DP and USBC
*/
- if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) {
+ if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->skip_fallback_on_link_loss && retries_eq)) {
result = LINK_TRAINING_SUCCESS;
break;
}
@@ -1037,7 +1037,7 @@ enum link_training_result dpia_perform_link_training(
*/
if (result == LINK_TRAINING_SUCCESS) {
fsleep(5000);
- if (!link->is_automated)
+ if (!link->skip_fallback_on_link_loss)
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT)
dpia_training_abort(link, &lt_settings, repeater_id);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 68096d12f..7087cdc9e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -205,6 +205,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
+ const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87};
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
union down_spread_ctrl downspread = {0};
@@ -293,6 +294,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_dpmf[0],
+ sizeof(vendor_lttpr_write_data_dpmf));
+
if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
@@ -552,6 +557,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
+ const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87};
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
union down_spread_ctrl downspread = {0};
@@ -639,6 +645,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_dpmf[0],
+ sizeof(vendor_lttpr_write_data_dpmf));
+
if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 7c4a93d3c..d01b77fb9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -529,6 +529,9 @@ bool edp_set_backlight_level(const struct dc_link *link,
if (dc_is_embedded_signal(link->connector_signal)) {
struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
+ if (link->panel_cntl)
+ link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16;
+
if (pipe_ctx) {
/* Disable brightness ramping when the display is blanked
* as it can hang the DMCU
@@ -1001,7 +1004,37 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+/*
+ * This is general Interface for Replay to set an 32 bit variable to dmub
+ * replay_FW_Message_type: Indicates which instruction or variable pass to DMUB
+ * cmd_data: Value of the config.
+ */
+bool edp_send_replay_cmd(struct dc_link *link,
+ enum replay_FW_Message_type msg,
+ union dmub_replay_cmd_set *cmd_data)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!replay)
+ return false;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ cmd_data->panel_inst = panel_inst;
+ else {
+ DC_LOG_DC("%s(): get edp panel inst fail ", __func__);
+ return false;
+ }
+
+ replay->funcs->replay_send_cmd(replay, msg, cmd_data);
+
+ return true;
+}
+
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1039,6 +1072,33 @@ bool edp_replay_residency(const struct dc_link *link,
return true;
}
+bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
+ const unsigned int *power_opts, uint32_t coasting_vtotal)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ /* Only both power and coasting vtotal changed, this func could return true */
+ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts &&
+ coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+ if (link->replay_settings.replay_feature_enabled &&
+ replay->funcs->replay_set_power_opt_and_coasting_vtotal) {
+ replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay,
+ *power_opts, panel_inst, coasting_vtotal);
+ link->replay_settings.replay_power_opt_active = *power_opts;
+ link->replay_settings.coasting_vtotal = coasting_vtotal;
+ } else
+ return false;
+ } else
+ return false;
+
+ return true;
+}
+
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index a034288ad..a158c6234 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -56,10 +56,15 @@ bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
bool wait, bool force_static, const unsigned int *power_opts);
bool edp_setup_replay(struct dc_link *link,
const struct dc_stream_state *stream);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_send_replay_cmd(struct dc_link *link,
+ enum replay_FW_Message_type msg,
+ union dmub_replay_cmd_set *cmd_data);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const bool is_alpm);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
+bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/Makefile b/drivers/gpu/drm/amd/display/dc/optc/Makefile
new file mode 100644
index 000000000..bb213335f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/optc/Makefile
@@ -0,0 +1,108 @@
+
+# Copyright 2022 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'optc' sub-component of DAL.
+#
+
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN
+###############################################################################
+
+OPTC_DCN10 = dcn10_optc.o
+
+AMD_DAL_OPTC_DCN10 = $(addprefix $(AMDDALPATH)/dc/optc/dcn10/,$(OPTC_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN10)
+
+###############################################################################
+
+OPTC_DCN20 = dcn20_optc.o
+
+AMD_DAL_OPTC_DCN20 = $(addprefix $(AMDDALPATH)/dc/optc/dcn20/,$(OPTC_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN20)
+
+###############################################################################
+
+OPTC_DCN201 = dcn201_optc.o
+
+AMD_DAL_OPTC_DCN201 = $(addprefix $(AMDDALPATH)/dc/optc/dcn201/,$(OPTC_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN201)
+
+###############################################################################
+
+###############################################################################
+
+###############################################################################
+
+OPTC_DCN30 = dcn30_optc.o
+
+AMD_DAL_OPTC_DCN30 = $(addprefix $(AMDDALPATH)/dc/optc/dcn30/,$(OPTC_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN30)
+
+###############################################################################
+
+OPTC_DCN301 = dcn301_optc.o
+
+AMD_DAL_OPTC_DCN301 = $(addprefix $(AMDDALPATH)/dc/optc/dcn301/,$(OPTC_DCN301))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN301)
+
+###############################################################################
+
+OPTC_DCN31 = dcn31_optc.o
+
+AMD_DAL_OPTC_DCN31 = $(addprefix $(AMDDALPATH)/dc/optc/dcn31/,$(OPTC_DCN31))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN31)
+
+###############################################################################
+
+OPTC_DCN314 = dcn314_optc.o
+
+AMD_DAL_OPTC_DCN314 = $(addprefix $(AMDDALPATH)/dc/optc/dcn314/,$(OPTC_DCN314))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN314)
+
+###############################################################################
+
+OPTC_DCN32 = dcn32_optc.o
+
+AMD_DAL_OPTC_DCN32 = $(addprefix $(AMDDALPATH)/dc/optc/dcn32/,$(OPTC_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN32)
+
+###############################################################################
+
+OPTC_DCN35 = dcn35_optc.o
+
+AMD_DAL_OPTC_DCN35 = $(addprefix $(AMDDALPATH)/dc/optc/dcn35/,$(OPTC_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN35)
+
+###############################################################################
+
+###############################################################################
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 0e8f4f36c..0e8f4f36c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index ab81594a7..6c2e84d39 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -557,7 +557,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
- type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
+ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index 58bdbd859..58bdbd859 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
index f7968b9ca..c2e03ced3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
@@ -26,7 +26,7 @@
#ifndef __DC_OPTC_DCN20_H__
#define __DC_OPTC_DCN20_H__
-#include "../dcn10/dcn10_optc.h"
+#include "dcn10/dcn10_optc.h"
#define TG_COMMON_REG_LIST_DCN2_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
index 70fcbec03..70fcbec03 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h
index e9545b735..e9545b735 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index b97bdb868..b97bdb868 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
index d3a056c12..d3a056c12 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
index b3cfcb887..b3cfcb887 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h
index b49585682..b49585682 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 63a677c8e..63a677c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index 30b81a448..30b81a448 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 0086cafb0..0086cafb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
index 99c098e76..99c098e76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 823493543..52eab8fcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
}
}
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
+}
+
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -260,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
}
}
@@ -345,6 +349,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc32_set_odm_combine,
.get_odm_combine_segments = optc32_get_odm_combine_segments,
+ .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 8ce3b178c..0c2c14695 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 5b1547508..5b1547508 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index 1f422e4c4..1f422e4c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
new file mode 100644
index 000000000..0a75ed896
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -0,0 +1,199 @@
+
+# Copyright 2022 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'resource' sub-component of DAL.
+#
+
+
+###############################################################################
+# DCE
+###############################################################################
+
+RESOURCE_DCE100 = dce100_resource.o
+
+AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE100)
+
+###############################################################################
+
+RESOURCE_DCE110 = dce110_resource.o
+
+AMD_DAL_RESOURCE_DCE110 = $(addprefix $(AMDDALPATH)/dc/resource/dce110/,$(RESOURCE_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE110)
+
+###############################################################################
+
+RESOURCE_DCE112 = dce112_resource.o
+
+AMD_DAL_RESOURCE_DCE112 = $(addprefix $(AMDDALPATH)/dc/resource/dce112/,$(RESOURCE_DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE112)
+
+###############################################################################
+
+RESOURCE_DCE120 = dce120_resource.o
+
+AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOURCE_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120)
+
+###############################################################################
+
+RESOURCE_DCE80 = dce80_resource.o
+
+AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN
+###############################################################################
+
+RESOURCE_DCN10 = dcn10_resource.o
+
+AMD_DAL_RESOURCE_DCN10 = $(addprefix $(AMDDALPATH)/dc/resource/dcn10/,$(RESOURCE_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN10)
+
+###############################################################################
+
+RESOURCE_DCN20 = dcn20_resource.o
+
+AMD_DAL_RESOURCE_DCN20 = $(addprefix $(AMDDALPATH)/dc/resource/dcn20/,$(RESOURCE_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN20)
+
+###############################################################################
+
+RESOURCE_DCN201 = dcn201_resource.o
+
+AMD_DAL_RESOURCE_DCN201 = $(addprefix $(AMDDALPATH)/dc/resource/dcn201/,$(RESOURCE_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN201)
+
+###############################################################################
+
+RESOURCE_DCN21 = dcn21_resource.o
+
+AMD_DAL_RESOURCE_DCN21 = $(addprefix $(AMDDALPATH)/dc/resource/dcn21/,$(RESOURCE_DCN21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21)
+
+###############################################################################
+
+###############################################################################
+
+###############################################################################
+
+RESOURCE_DCN30 = dcn30_resource.o
+
+AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN30)
+
+###############################################################################
+
+RESOURCE_DCN301 = dcn301_resource.o
+
+AMD_DAL_RESOURCE_DCN301 = $(addprefix $(AMDDALPATH)/dc/resource/dcn301/,$(RESOURCE_DCN301))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN301)
+
+###############################################################################
+
+RESOURCE_DCN302 = dcn302_resource.o
+
+AMD_DAL_RESOURCE_DCN302 = $(addprefix $(AMDDALPATH)/dc/resource/dcn302/,$(RESOURCE_DCN302))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN302)
+
+###############################################################################
+
+RESOURCE_DCN303 = dcn303_resource.o
+
+AMD_DAL_RESOURCE_DCN303 = $(addprefix $(AMDDALPATH)/dc/resource/dcn303/,$(RESOURCE_DCN303))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN303)
+
+###############################################################################
+
+RESOURCE_DCN31 = dcn31_resource.o
+
+AMD_DAL_RESOURCE_DCN31 = $(addprefix $(AMDDALPATH)/dc/resource/dcn31/,$(RESOURCE_DCN31))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN31)
+
+###############################################################################
+
+RESOURCE_DCN314 = dcn314_resource.o
+
+AMD_DAL_RESOURCE_DCN314 = $(addprefix $(AMDDALPATH)/dc/resource/dcn314/,$(RESOURCE_DCN314))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN314)
+
+###############################################################################
+
+RESOURCE_DCN315 = dcn315_resource.o
+
+AMD_DAL_RESOURCE_DCN315 = $(addprefix $(AMDDALPATH)/dc/resource/dcn315/,$(RESOURCE_DCN315))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN315)
+
+###############################################################################
+
+RESOURCE_DCN316 = dcn316_resource.o
+
+AMD_DAL_RESOURCE_DCN316 = $(addprefix $(AMDDALPATH)/dc/resource/dcn316/,$(RESOURCE_DCN316))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN316)
+
+###############################################################################
+
+RESOURCE_DCN32 = dcn32_resource.o
+
+AMD_DAL_RESOURCE_DCN32 = $(addprefix $(AMDDALPATH)/dc/resource/dcn32/,$(RESOURCE_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN32)
+
+###############################################################################
+
+RESOURCE_DCN321 = dcn321_resource.o
+
+AMD_DAL_RESOURCE_DCN321 = $(addprefix $(AMDDALPATH)/dc/resource/dcn321/,$(RESOURCE_DCN321))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN321)
+
+###############################################################################
+
+RESOURCE_DCN35 = dcn35_resource.o
+
+AMD_DAL_RESOURCE_DCN35 = $(addprefix $(AMDDALPATH)/dc/resource/dcn35/,$(RESOURCE_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN35)
+
+###############################################################################
+
+###############################################################################
+
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 53a5f4cb6..53a5f4cb6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
index fecab7c56..fecab7c56 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index fe518fd27..fe518fd27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h
index aa4531e08..aa4531e08 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index d1edac46c..d1edac46c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 1f57ebc6f..1f57ebc6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 962de79be..20662edd0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -36,7 +36,7 @@
#include "dce110/dce110_resource.h"
#include "virtual/virtual_stream_encoder.h"
-#include "dce120_timing_generator.h"
+#include "dce120/dce120_timing_generator.h"
#include "irq/dce120/irq_service_dce120.h"
#include "dce/dce_opp.h"
#include "dce/dce_clock_source.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h
index 3d1f3cf01..3d1f3cf01 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt
new file mode 100644
index 000000000..19dd73bc9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dce80_resource.c
+ dce80_resource.h
+ ) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 35a2cce0c..35a2cce0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h
index eff31ab83..eff31ab83 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index b94c5c97e..d08d10969 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -26,29 +26,32 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn10_init.h"
+#include "dcn10/dcn10_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
-#include "dcn10_resource.h"
-#include "dcn10_ipp.h"
-#include "dcn10_mpc.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn10/dcn10_ipp.h"
+#include "dcn10/dcn10_mpc.h"
+
+#include "dcn10/dcn10_dwb.h"
+
#include "irq/dcn10/irq_service_dcn10.h"
-#include "dcn10_dpp.h"
-#include "dcn10_optc.h"
+#include "dcn10/dcn10_dpp.h"
+#include "dcn10/dcn10_optc.h"
#include "dcn10/dcn10_hwseq.h"
#include "dce110/dce110_hwseq.h"
-#include "dcn10_opp.h"
-#include "dcn10_link_encoder.h"
-#include "dcn10_stream_encoder.h"
+#include "dcn10/dcn10_opp.h"
+#include "dcn10/dcn10_link_encoder.h"
+#include "dcn10/dcn10_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
-#include "dcn10_hubp.h"
-#include "dcn10_hubbub.h"
+#include "dcn10/dcn10_hubp.h"
+#include "dcn10/dcn10_hubbub.h"
#include "dce/dce_panel_cntl.h"
#include "soc15_hw_ip.h"
@@ -1247,7 +1250,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
/* Store first available for MST second display
* in daisy chain use case
*/
- j = i;
+
+ if (pool->stream_enc[i]->id != ENGINE_ID_VIRTUAL)
+ j = i;
+
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
index bf8e33cd8..bf8e33cd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index e73e59754..f9c5bc624 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -29,7 +29,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn20_init.h"
+#include "dcn20/dcn20_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -39,29 +39,29 @@
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
-#include "dcn20_hubbub.h"
-#include "dcn20_mpc.h"
-#include "dcn20_hubp.h"
+#include "dcn20/dcn20_hubbub.h"
+#include "dcn20/dcn20_mpc.h"
+#include "dcn20/dcn20_hubp.h"
#include "irq/dcn20/irq_service_dcn20.h"
-#include "dcn20_dpp.h"
-#include "dcn20_optc.h"
+#include "dcn20/dcn20_dpp.h"
+#include "dcn20/dcn20_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dce110/dce110_hwseq.h"
#include "dcn10/dcn10_resource.h"
-#include "dcn20_opp.h"
+#include "dcn20/dcn20_opp.h"
-#include "dcn20_dsc.h"
+#include "dcn20/dcn20_dsc.h"
-#include "dcn20_link_encoder.h"
-#include "dcn20_stream_encoder.h"
+#include "dcn20/dcn20_link_encoder.h"
+#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
-#include "dcn20_dccg.h"
-#include "dcn20_vmid.h"
+#include "dcn20/dcn20_dccg.h"
+#include "dcn20/dcn20_vmid.h"
#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index 4cee3fa11..4cee3fa11 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index bca22d867..914b234d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn201_init.h"
+#include "dcn201/dcn201_init.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -36,16 +36,16 @@
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
-#include "dcn201_mpc.h"
-#include "dcn201_hubp.h"
+#include "dcn201/dcn201_mpc.h"
+#include "dcn201/dcn201_hubp.h"
#include "irq/dcn201/irq_service_dcn201.h"
#include "dcn201/dcn201_dpp.h"
#include "dcn201/dcn201_hubbub.h"
-#include "dcn201_dccg.h"
-#include "dcn201_optc.h"
+#include "dcn201/dcn201_dccg.h"
+#include "dcn201/dcn201_optc.h"
#include "dcn201/dcn201_hwseq.h"
#include "dce110/dce110_hwseq.h"
-#include "dcn201_opp.h"
+#include "dcn201/dcn201_opp.h"
#include "dcn201/dcn201_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
@@ -55,7 +55,7 @@
#include "dce110/dce110_resource.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
-#include "dcn201_hubbub.h"
+#include "dcn201/dcn201_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "cyan_skillfish_ip_offset.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h
index e0467d17d..e0467d17d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 42277b280..65d337731 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -29,7 +29,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn21_init.h"
+#include "dcn21/dcn21_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -44,7 +44,7 @@
#include "dcn20/dcn20_hubbub.h"
#include "dcn20/dcn20_mpc.h"
#include "dcn20/dcn20_hubp.h"
-#include "dcn21_hubp.h"
+#include "dcn21/dcn21_hubp.h"
#include "irq/dcn21/irq_service_dcn21.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn20/dcn20_optc.h"
@@ -61,7 +61,7 @@
#include "dml/display_mode_vba.h"
#include "dcn20/dcn20_dccg.h"
#include "dcn21/dcn21_dccg.h"
-#include "dcn21_hubbub.h"
+#include "dcn21/dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "dce/dce_panel_cntl.h"
@@ -713,9 +713,8 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
pool->base.hubps[i] = NULL;
}
- if (pool->base.irqs != NULL) {
+ if (pool->base.irqs != NULL)
dal_irq_service_destroy(&pool->base.irqs);
- }
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
index f7ecc002c..f7ecc002c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 7b259cb5f..37a64186f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -27,7 +27,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn30_init.h"
+#include "dcn30/dcn30_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -1682,6 +1682,7 @@ noinline bool dcn30_internal_validate_bw(
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
+ context->bw_ctx.dml.validate_max_state = fast_validate;
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
@@ -1691,6 +1692,7 @@ noinline bool dcn30_internal_validate_bw(
memset(merge, 0, sizeof(merge));
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
+ context->bw_ctx.dml.validate_max_state = false;
}
dml_log_mode_support_params(&context->bw_ctx.dml);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 8e6b8b736..8e6b8b736 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index ce04caf35..7538b548c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -27,7 +27,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn301_init.h"
+#include "dcn301/dcn301_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -61,7 +61,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn301/dcn301_dio_link_encoder.h"
-#include "dcn301_panel_cntl.h"
+#include "dcn301/dcn301_panel_cntl.h"
#include "vangogh_ip_offset.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h
index ae8672680..ae8672680 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 63ac984a0..5791b5cc2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -23,9 +23,9 @@
*
*/
-#include "dcn302_init.h"
+#include "dcn302/dcn302_init.h"
#include "dcn302_resource.h"
-#include "dcn302_dccg.h"
+#include "dcn302/dcn302_dccg.h"
#include "irq/dcn302/irq_service_dcn302.h"
#include "dcn30/dcn30_dio_link_encoder.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h
index 9f24e73b9..9f24e73b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 49cb7fde4..25cd6236b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -23,9 +23,9 @@
* Authors: AMD
*/
-#include "dcn303_init.h"
+#include "dcn303/dcn303_init.h"
#include "dcn303_resource.h"
-#include "dcn303_dccg.h"
+#include "dcn303/dcn303_dccg.h"
#include "irq/dcn303/irq_service_dcn303.h"
#include "dcn30/dcn30_dio_link_encoder.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h
index 37cf15258..37cf15258 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 79416cfb2..31035fc3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -70,7 +70,7 @@
#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "dcn31_panel_cntl.h"
+#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 901436591..901436591 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index c97391edb..c97391edb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index 49ffe7101..49ffe7101 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index cb8024eee..515ba435f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1631,8 +1631,10 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
int i;
struct resource_context *res_ctx = &context->res_ctx;
- /*Don't apply for single stream*/
- if (context->stream_count < 2)
+ /* Only apply for dual stream scenarios with edp*/
+ if (context->stream_count != 2)
+ return false;
+ if (context->streams[0]->signal != SIGNAL_TYPE_EDP && context->streams[1]->signal != SIGNAL_TYPE_EDP)
return false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h
index 22849eaa6..22849eaa6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index b9753d460..b9753d460 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h
index aba6d6341..aba6d6341 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index f663de1cd..9042378fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -27,7 +27,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn32_init.h"
+#include "dcn32/dcn32_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -41,7 +41,7 @@
#include "dcn31/dcn31_hubbub.h"
#include "dcn32/dcn32_hubbub.h"
#include "dcn32/dcn32_mpc.h"
-#include "dcn32_hubp.h"
+#include "dcn32/dcn32_hubp.h"
#include "irq/dcn32/irq_service_dcn32.h"
#include "dcn32/dcn32_dpp.h"
#include "dcn32/dcn32_optc.h"
@@ -89,6 +89,8 @@
#include "dcn20/dcn20_vmid.h"
#include "dml/dcn32/dcn32_fpu.h"
+#include "dc_state_priv.h"
+
#include "dml2/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -1644,7 +1646,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
phantom_plane = prev_phantom_plane;
else
- phantom_plane = dc_create_plane_state(dc);
+ phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state);
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
@@ -1665,9 +1667,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->src.height;
- phantom_plane->is_phantom = true;
-
- dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
+ dc_state_add_phantom_plane(dc, phantom_stream, phantom_plane, context);
curr_pipe = curr_pipe->bottom_pipe;
prev_phantom_plane = phantom_plane;
@@ -1683,13 +1683,7 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
struct dc_stream_state *phantom_stream = NULL;
struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
- phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+ phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream);
/* stream has limited viewport and small timing */
memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -1699,81 +1693,10 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx);
DC_FP_END();
- dc_add_stream_to_ctx(dc, context, phantom_stream);
+ dc_state_add_phantom_stream(dc, context, phantom_stream, ref_pipe->stream);
return phantom_stream;
}
-void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i;
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_stream_state *phantom_stream = NULL;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
- phantom_stream = pipe->stream;
-
- dc_plane_state_retain(phantom_plane);
- dc_stream_retain(phantom_stream);
- }
- }
-}
-
-// return true if removed piped from ctx, false otherwise
-bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update)
-{
- int i;
- bool removed_pipe = false;
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_stream_state *phantom_stream = NULL;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
- phantom_stream = pipe->stream;
-
- dc_rem_all_planes_for_stream(dc, pipe->stream, context);
- dc_remove_stream_from_ctx(dc, context, pipe->stream);
-
- /* Ref count is incremented on allocation and also when added to the context.
- * Therefore we must call release for the the phantom plane and stream once
- * they are removed from the ctx to finally decrement the refcount to 0 to free.
- */
- dc_plane_state_release(phantom_plane);
- dc_stream_release(phantom_stream);
-
- removed_pipe = true;
- }
-
- /* For non-full updates, a shallow copy of the current state
- * is created. In this case we don't want to erase the current
- * state (there can be 2 HIRQL threads, one in flip, and one in
- * checkMPO) that can cause a race condition.
- *
- * This is just a workaround, needs a proper fix.
- */
- if (!fast_update) {
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
-
- if (pipe->plane_state) {
- pipe->plane_state->is_phantom = false;
- }
- }
- }
- return removed_pipe;
-}
-
/* TODO: Input to this function should indicate which pipe indexes (or streams)
* require a phantom pipe / stream
*/
@@ -1798,7 +1721,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
// We determine which phantom pipes were added by comparing with
// the phantom stream.
if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
pipe->stream->use_dynamic_meta = false;
pipe->plane_state->flip_immediate = false;
if (!resource_build_scaling_params(pipe)) {
@@ -1817,7 +1740,6 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
- struct mall_temp_config mall_temp_config;
/* To handle Freesync properly, setting FreeSync DML parameters
* to its default state for the first stage of validation
@@ -1827,29 +1749,12 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
DC_LOGGER_INIT(dc->ctx->logger);
- /* For fast validation, there are situations where a shallow copy of
- * of the dc->current_state is created for the validation. In this case
- * we want to save and restore the mall config because we always
- * teardown subvp at the beginning of validation (and don't attempt
- * to add it back if it's fast validation). If we don't restore the
- * subvp config in cases of fast validation + shallow copy of the
- * dc->current_state, the dc->current_state will have a partially
- * removed subvp state when we did not intend to remove it.
- */
- if (fast_validate) {
- memset(&mall_temp_config, 0, sizeof(mall_temp_config));
- dcn32_save_mall_state(dc, context, &mall_temp_config);
- }
-
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
- if (fast_validate)
- dcn32_restore_mall_state(dc, context, &mall_temp_config);
-
if (pipe_cnt == 0)
goto validate_out;
@@ -1948,7 +1853,7 @@ int dcn32_populate_dml_pipes_from_context(
* This is just a workaround -- needs a proper fix.
*/
if (!fast_validate) {
- switch (pipe->stream->mall_stream_config.type) {
+ switch (dc_state_get_pipe_subvp_type(context, pipe)) {
case SUBVP_MAIN:
pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport;
subvp_in_use = true;
@@ -2054,10 +1959,6 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
- .remove_phantom_pipes = dcn32_remove_phantom_pipes,
- .retain_phantom_pipes = dcn32_retain_phantom_pipes,
- .save_mall_state = dcn32_save_mall_state,
- .restore_mall_state = dcn32_restore_mall_state,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
};
@@ -2471,16 +2372,19 @@ static bool dcn32_resource_construct(
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
- dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
- dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
- dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
- dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
- dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
- dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
+ dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 351c8a284..2258c5c72 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -39,6 +39,7 @@
#define DCN3_2_MBLK_HEIGHT_8BPE 64
#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq
#define SUBVP_HIGH_REFRESH_LIST_LEN 4
+#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define MIN_SUBVP_DCFCLK_KHZ 400000
@@ -58,6 +59,15 @@ struct subvp_high_refresh_list {
} res[SUBVP_HIGH_REFRESH_LIST_LEN];
};
+struct subvp_active_margin_list {
+ int min_refresh;
+ int max_refresh;
+ struct {
+ int width;
+ int height;
+ } res[SUBVP_ACTIVE_MARGIN_LIST_LEN];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -82,12 +92,6 @@ bool dcn32_release_post_bldn_3dlut(
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
-bool dcn32_remove_phantom_pipes(struct dc *dc,
- struct dc_state *context, bool fast_update);
-
-void dcn32_retain_phantom_pipes(struct dc *dc,
- struct dc_state *context);
-
void dcn32_add_phantom_pipes(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -160,15 +164,7 @@ void dcn32_determine_det_override(struct dc *dc,
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes);
-void dcn32_save_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config);
-
-void dcn32_restore_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config);
-
-struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context);
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 3b7505b5f..f4dd6443a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -63,7 +63,7 @@
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32/dcn32_dio_link_encoder.h"
-#include "dcn321_dio_link_encoder.h"
+#include "dcn321/dcn321_dio_link_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -92,6 +92,8 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
+#include "dc_state_priv.h"
+
#define DC_LOGGER_INIT(logger)
enum dcn321_clk_src_array_id {
@@ -1607,10 +1609,6 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
- .remove_phantom_pipes = dcn32_remove_phantom_pipes,
- .retain_phantom_pipes = dcn32_retain_phantom_pipes,
- .save_mall_state = dcn32_save_mall_state,
- .restore_mall_state = dcn32_restore_mall_state,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
};
@@ -2010,16 +2008,19 @@ static bool dcn321_resource_construct(
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
- dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
- dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
- dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
- dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
- dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
- dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
+ dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h
index 82cbf009f..82cbf009f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 70ef1e7ff..78c315541 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -78,7 +78,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn35/dcn35_hwseq.h"
-#include "dcn35_dio_link_encoder.h"
+#include "dcn35/dcn35_dio_link_encoder.h"
#include "dml/dcn31/dcn31_fpu.h" /*todo*/
#include "dml/dcn35/dcn35_fpu.h"
#include "dcn35/dcn35_dwb.h"
@@ -96,12 +96,15 @@
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn31/display_mode_vba_31.h" /*temp*/
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
+#include "dc_state_priv.h"
+
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -626,7 +629,19 @@ static struct dce_hwseq_registers hwseq_reg;
HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
- HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh)
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN35_MASK_SH_LIST(__SHIFT)
@@ -686,7 +701,7 @@ static const struct dc_plane_cap plane_cap = {
// 6:1 downscaling ratio: 1000/6 = 166.666
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -705,7 +720,9 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
- .disable_clock_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
+ .disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
.performance_trace = false,
@@ -724,7 +741,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.i2c = true,
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
.dscl = true,
- .cm = false,
+ .cm = true,
.mpc = true,
.optc = true,
.vpg = true,
@@ -752,7 +769,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = false,
- .disable_idle_power_optimizations = true,
+ .disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
.disable_unbounded_requesting = false,
@@ -763,14 +780,17 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
- .ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = true,
+ .static_screen_wait_frames = 2,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1529,6 +1549,9 @@ static void dcn35_resource_destruct(struct dcn35_resource_pool *pool)
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
if (pool->base.pg_cntl != NULL)
dcn_pg_cntl_destroy(&pool->base.pg_cntl);
@@ -2013,6 +2036,14 @@ static bool dcn35_resource_construct(
goto create_fail;
}
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
@@ -2100,6 +2131,7 @@ static bool dcn35_resource_construct(
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index 99aea102e..a51c4a9ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -166,6 +166,7 @@ struct resource_pool *dcn35_create_resource_pool(
SR(MMHUBBUB_MEM_PWR_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCCG_GATE_DISABLE_CNTL4), \
SR(DCCG_GATE_DISABLE_CNTL5), \
SR(DCFCLK_CNTL),\
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index d1a4ed6f5..c78c9224a 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -86,6 +86,7 @@ enum dmub_status {
DMUB_STATUS_TIMEOUT,
DMUB_STATUS_INVALID,
DMUB_STATUS_HW_FAILURE,
+ DMUB_STATUS_POWER_STATE_D3
};
/* enum dmub_asic - dmub asic identifier */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 3cea96a36..bb1f69a54 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -185,8 +185,7 @@ union abm_flags {
unsigned int disable_abm_requested : 1;
/**
- * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled
- * immediately.
+ * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled immediately.
*/
unsigned int disable_abm_immediately : 1;
@@ -654,7 +653,7 @@ union dmub_fw_boot_options {
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
+ uint32_t reserved0: 1;
uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
@@ -818,18 +817,61 @@ enum dmub_gpint_command {
* RETURN: Lower 32-bit mask.
*/
DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK = 101,
+
/**
- * DESC: Updates the trace buffer lower 32-bit mask.
+ * DESC: Updates the trace buffer mask bit0~bit15.
* ARGS: The new mask
* RETURN: Lower 32-bit mask.
*/
DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0 = 102,
+
/**
- * DESC: Updates the trace buffer mask bi0~bit15.
+ * DESC: Updates the trace buffer mask bit16~bit31.
* ARGS: The new mask
* RETURN: Lower 32-bit mask.
*/
DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1 = 103,
+
+ /**
+ * DESC: Updates the trace buffer mask bit32~bit47.
+ * ARGS: The new mask
+ * RETURN: Lower 32-bit mask.
+ */
+ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2 = 114,
+
+ /**
+ * DESC: Updates the trace buffer mask bit48~bit63.
+ * ARGS: The new mask
+ * RETURN: Lower 32-bit mask.
+ */
+ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3 = 115,
+
+ /**
+ * DESC: Read the trace buffer mask bi0~bit15.
+ */
+ DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0 = 116,
+
+ /**
+ * DESC: Read the trace buffer mask bit16~bit31.
+ */
+ DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD1 = 117,
+
+ /**
+ * DESC: Read the trace buffer mask bi32~bit47.
+ */
+ DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD2 = 118,
+
+ /**
+ * DESC: Updates the trace buffer mask bit32~bit63.
+ */
+ DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119,
+
+ /**
+ * DESC: Enable measurements for various task duration
+ * ARGS: 0 - Disable measurement
+ * 1 - Enable measurement
+ */
+ DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123,
};
/**
@@ -1303,6 +1345,10 @@ enum dmub_cmd_cab_type {
* Fit surfaces in CAB (i.e. CAB enable)
*/
DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB = 2,
+ /**
+ * Do not fit surfaces in CAB (i.e. no CAB)
+ */
+ DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB = 3,
};
/**
@@ -2786,6 +2832,7 @@ struct dmub_rb_cmd_psr_set_power_opt {
#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_MODE_IPS 0x10
#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
@@ -2840,6 +2887,18 @@ enum dmub_cmd_replay_type {
* Set power opt and coasting vtotal.
*/
DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL = 4,
+ /**
+ * Set disabled iiming sync.
+ */
+ DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED = 5,
+ /**
+ * Set Residency Frameupdate Timer.
+ */
+ DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER = 6,
+ /**
+ * Set pseudo vtotal
+ */
+ DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
};
/**
@@ -3003,6 +3062,46 @@ struct dmub_cmd_replay_set_power_opt_data {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command.
+ */
+struct dmub_cmd_replay_set_timing_sync_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * REPLAY set_timing_sync
+ */
+ uint8_t timing_sync_supported;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_cmd_replay_set_pseudo_vtotal {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Source Vtotal that Replay + IPS + ABM full screen video src vtotal
+ */
+ uint16_t vtotal;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
*/
struct dmub_rb_cmd_replay_set_power_opt {
@@ -3034,6 +3133,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
+ */
+ uint16_t coasting_vtotal_high;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -3069,6 +3176,91 @@ struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal {
};
/**
+ * Definition of a DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command.
+ */
+struct dmub_rb_cmd_replay_set_timing_sync {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command.
+ */
+ struct dmub_cmd_replay_set_timing_sync_data replay_set_timing_sync_data;
+};
+
+/**
+ * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_rb_cmd_replay_set_pseudo_vtotal {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+ struct dmub_cmd_replay_set_pseudo_vtotal data;
+};
+
+/**
+ * Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
+ */
+struct dmub_cmd_replay_frameupdate_timer_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Replay Frameupdate Timer Enable or not
+ */
+ uint8_t enable;
+ /**
+ * REPLAY force reflash frame update number
+ */
+ uint16_t frameupdate_count;
+};
+/**
+ * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER
+ */
+struct dmub_rb_cmd_replay_set_frameupdate_timer {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
+ */
+ struct dmub_cmd_replay_frameupdate_timer_data data;
+};
+
+/**
+ * Definition union of replay command set
+ */
+union dmub_replay_cmd_set {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command data.
+ */
+ struct dmub_cmd_replay_set_timing_sync_data sync_data;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command data.
+ */
+ struct dmub_cmd_replay_frameupdate_timer_data timer_data;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
+ */
+ struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
+};
+
+/**
* Set of HW components that can be locked.
*
* Note: If updating with more HW components, fields
@@ -4211,6 +4403,16 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL command.
*/
struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal replay_set_power_opt_and_coasting_vtotal;
+
+ struct dmub_rb_cmd_replay_set_timing_sync replay_set_timing_sync;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
+ */
+ struct dmub_rb_cmd_replay_set_frameupdate_timer replay_set_frameupdate_timer;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+ struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 59d4e6484..9ad738805 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -64,7 +64,7 @@
/* Default scratch mem size. */
-#define DMUB_SCRATCH_MEM_SIZE (256)
+#define DMUB_SCRATCH_MEM_SIZE (1024)
/* Number of windows in use. */
#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
@@ -768,7 +768,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
return DMUB_STATUS_INVALID;
if (dmub->power_state != DMUB_POWER_STATE_D0)
- return DMUB_STATUS_INVALID;
+ return DMUB_STATUS_POWER_STATE_D3;
if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
@@ -789,7 +789,7 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
return DMUB_STATUS_INVALID;
if (dmub->power_state != DMUB_POWER_STATE_D0)
- return DMUB_STATUS_INVALID;
+ return DMUB_STATUS_POWER_STATE_D3;
/**
* Read back all the queued commands to ensure that they've
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
index 42229b4ef..eced9ad91 100644
--- a/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
+++ b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h
@@ -69,6 +69,11 @@ enum hdcp_message_id {
HDCP_MESSAGE_ID_READ_RXSTATUS,
HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+ /* PS175 chip */
+
+ HDCP_MESSAGE_ID_WRITE_PS175_CMD,
+ HDCP_MESSAGE_ID_READ_PS175_RSP,
+
HDCP_MESSAGE_ID_MAX
};
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index ccecddafe..3955b7e4b 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -81,6 +81,7 @@ fail_alloc_context:
void mod_freesync_destroy(struct mod_freesync *mod_freesync)
{
struct core_freesync *core_freesync = NULL;
+
if (mod_freesync == NULL)
return;
core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
@@ -278,9 +279,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
}
} else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- if (!in_out_vrr->btr.btr_active) {
+ if (!in_out_vrr->btr.btr_active)
in_out_vrr->btr.btr_active = true;
- }
}
/* BTR set to "not active" so disengage */
@@ -693,10 +693,12 @@ static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
if (app_tf != TRANSFER_FUNC_UNKNOWN) {
infopacket->valid = true;
- if (app_tf != TRANSFER_FUNC_PQ2084) {
+ if (app_tf == TRANSFER_FUNC_PQ2084)
+ infopacket->sb[9] |= 0x20; // PB9 = [Bit 5 = PQ EOTF Active]
+ else {
infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
if (app_tf == TRANSFER_FUNC_GAMMA_22)
- infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
+ infopacket->sb[9] |= 0x04; // PB9 = [Bit 2 = Gamma 2.2 EOTF Active]
}
}
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 1ddb4f5ea..182e7532d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -63,6 +63,7 @@ static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
+
if (is_dp_hdcp(hdcp)) {
status = (hdcp->auth.msg.hdcp1.bstatus &
DP_BSTATUS_R0_PRIME_READY) ?
@@ -131,9 +132,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* Avoid device count == 0 to do authentication */
- if (0 == get_device_count(hdcp)) {
+ if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
- }
/* Some MST display may choose to report the internal panel as an HDCP RX.
* To update this condition with 1(because the immediate repeater's internal
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 91c22b96e..733f22bed 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -208,9 +208,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* Avoid device count == 0 to do authentication */
- if (0 == get_device_count(hdcp)) {
+ if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
- }
/* Some MST display may choose to report the internal panel as an HDCP RX. */
/* To update this condition with 1(because the immediate repeater's internal */
@@ -689,9 +688,8 @@ static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp,
if (is_hdmi_dvi_sl_hdcp(hdcp)) {
if (!process_rxstatus(hdcp, event_ctx, input, &status))
goto out;
- if (event_ctx->rx_id_list_ready) {
+ if (event_ctx->rx_id_list_ready)
goto out;
- }
}
if (is_hdmi_dvi_sl_hdcp(hdcp))
if (!mod_hdcp_execute_and_set(check_stream_ready_available,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index c62df3bcc..1d83c1b9d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -86,10 +86,12 @@
#define HDCP_CPIRQ_TRACE(hdcp) \
HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index)
#define HDCP_EVENT_TRACE(hdcp, event) \
- if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
- HDCP_TIMEOUT_TRACE(hdcp); \
- else if (event == MOD_HDCP_EVENT_CPIRQ) \
- HDCP_CPIRQ_TRACE(hdcp)
+ do { \
+ if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
+ HDCP_TIMEOUT_TRACE(hdcp); \
+ else if (event == MOD_HDCP_EVENT_CPIRQ) \
+ HDCP_CPIRQ_TRACE(hdcp); \
+ } while (0)
/* TODO: find some way to tell if logging is off to save time */
#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \
mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index ff930a71e..7c9805705 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -443,7 +443,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
- continue;
+ continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -929,7 +929,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
- continue;
+ continue;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 5b71bc96b..7844ea916 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -98,9 +98,9 @@ enum ta_dtm_encoder_type {
* This enum defines software value for dio_output_type
*/
typedef enum {
- TA_DTM_DIO_OUTPUT_TYPE__INVALID,
- TA_DTM_DIO_OUTPUT_TYPE__DIRECT,
- TA_DTM_DIO_OUTPUT_TYPE__DPIA
+ TA_DTM_DIO_OUTPUT_TYPE__INVALID,
+ TA_DTM_DIO_OUTPUT_TYPE__DIRECT,
+ TA_DTM_DIO_OUTPUT_TYPE__DPIA
} ta_dtm_dio_output_type;
struct ta_dtm_topology_update_input_v3 {
@@ -237,11 +237,11 @@ enum ta_hdcp2_hdcp2_msg_id_max_size {
#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
#define TA_HDCP__HDCP1_V_PRIME_SIZE 20
#define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \
- TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6
+ (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6)
// 64 bits boundaries
#define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \
- TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4
+ (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4)
enum ta_hdcp_status {
TA_HDCP_STATUS__SUCCESS = 0x00,
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index afe1f6cce..cc3dc9b58 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -23,34 +23,6 @@
*
*/
-
-
-
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
#ifndef MOD_FREESYNC_H_
#define MOD_FREESYNC_H_
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 5960dd760..8ce6c22e5 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
unsigned int length);
void mod_stats_update_flip(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns);
+ unsigned long long timestamp_in_ns);
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns);
+ unsigned long long timestamp_in_ns);
void mod_stats_update_freesync(struct mod_stats *mod_stats,
unsigned int v_total_min,
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 1675314a3..2a3698fd2 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -31,7 +31,7 @@
#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
#define bswap16_based_on_endian(big_endian, value) \
- (big_endian) ? cpu_to_be16(value) : cpu_to_le16(value)
+ ((big_endian) ? cpu_to_be16(value) : cpu_to_le16(value))
/* Possible Min Reduction config from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
@@ -973,6 +973,39 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
return true;
}
+void set_replay_coasting_vtotal(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t vtotal)
+{
+ link->replay_settings.coasting_vtotal_table[type] = vtotal;
+}
+
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+{
+ link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+}
+
+void calculate_replay_link_off_frame_count(struct dc_link *link,
+ uint16_t vtotal, uint16_t htotal)
+{
+ uint8_t max_link_off_frame_count = 0;
+ uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0;
+
+ max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
+ pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
+
+ if (htotal != 0 && vtotal != 0)
+ max_link_off_frame_count = htotal * max_deviation_line / (pixel_deviation_per_line * vtotal);
+ else
+ ASSERT(0);
+
+ link->replay_settings.link_off_frame_count_level =
+ max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_BEST ? PR_LINK_OFF_FRAME_COUNT_BEST :
+ max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_GOOD ? PR_LINK_OFF_FRAME_COUNT_GOOD :
+ PR_LINK_OFF_FRAME_COUNT_FAIL;
+
+}
+
bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps)
{
unsigned int data_points_size;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index d9e0d67d6..ff7e6f3cd 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -54,6 +54,12 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
unsigned int inst);
void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
+void set_replay_coasting_vtotal(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t vtotal);
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
+void calculate_replay_link_off_frame_count(struct dc_link *link,
+ uint16_t vtotal, uint16_t htotal);
bool is_psr_su_specific_panel(struct dc_link *link);
void mod_power_calc_psr_configs(struct psr_config *psr_config,
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 579977f6a..df2c7ffe1 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -255,6 +255,10 @@ enum DC_DEBUG_MASK {
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
DC_DISABLE_MPO = 0x40,
DC_ENABLE_DPIA_TRACE = 0x80,
+ DC_ENABLE_DML2 = 0x100,
+ DC_DISABLE_PSR_SU = 0x200,
+ DC_DISABLE_REPLAY = 0x400,
+ DC_DISABLE_IPS = 0x800,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/amdgpu_reg_state.h b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h
new file mode 100644
index 000000000..335980e2a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_REG_STATE_H__
+#define __AMDGPU_REG_STATE_H__
+
+enum amdgpu_reg_state {
+ AMDGPU_REG_STATE_TYPE_INVALID = 0,
+ AMDGPU_REG_STATE_TYPE_XGMI = 1,
+ AMDGPU_REG_STATE_TYPE_WAFL = 2,
+ AMDGPU_REG_STATE_TYPE_PCIE = 3,
+ AMDGPU_REG_STATE_TYPE_USR = 4,
+ AMDGPU_REG_STATE_TYPE_USR_1 = 5
+};
+
+enum amdgpu_sysfs_reg_offset {
+ AMDGPU_SYS_REG_STATE_XGMI = 0x0000,
+ AMDGPU_SYS_REG_STATE_WAFL = 0x1000,
+ AMDGPU_SYS_REG_STATE_PCIE = 0x2000,
+ AMDGPU_SYS_REG_STATE_USR = 0x3000,
+ AMDGPU_SYS_REG_STATE_USR_1 = 0x4000,
+ AMDGPU_SYS_REG_STATE_END = 0x5000,
+};
+
+struct amdgpu_reg_state_header {
+ uint16_t structure_size;
+ uint8_t format_revision;
+ uint8_t content_revision;
+ uint8_t state_type;
+ uint8_t num_instances;
+ uint16_t pad;
+};
+
+enum amdgpu_reg_inst_state {
+ AMDGPU_INST_S_OK,
+ AMDGPU_INST_S_EDISABLED,
+ AMDGPU_INST_S_EACCESS,
+};
+
+struct amdgpu_smn_reg_data {
+ uint64_t addr;
+ uint32_t value;
+ uint32_t pad;
+};
+
+struct amdgpu_reg_inst_header {
+ uint16_t instance;
+ uint16_t state;
+ uint16_t num_smn_regs;
+ uint16_t pad;
+};
+
+
+struct amdgpu_regs_xgmi_v1_0 {
+ struct amdgpu_reg_inst_header inst_header;
+
+ struct amdgpu_smn_reg_data smn_reg_values[];
+};
+
+struct amdgpu_reg_state_xgmi_v1_0 {
+ /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_XGMI */
+ struct amdgpu_reg_state_header common_header;
+
+ struct amdgpu_regs_xgmi_v1_0 xgmi_state_regs[];
+};
+
+struct amdgpu_regs_wafl_v1_0 {
+ struct amdgpu_reg_inst_header inst_header;
+
+ struct amdgpu_smn_reg_data smn_reg_values[];
+};
+
+struct amdgpu_reg_state_wafl_v1_0 {
+ /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_WAFL */
+ struct amdgpu_reg_state_header common_header;
+
+ struct amdgpu_regs_wafl_v1_0 wafl_state_regs[];
+};
+
+struct amdgpu_regs_pcie_v1_0 {
+ struct amdgpu_reg_inst_header inst_header;
+
+ uint16_t device_status;
+ uint16_t link_status;
+ uint32_t sub_bus_number_latency;
+ uint32_t pcie_corr_err_status;
+ uint32_t pcie_uncorr_err_status;
+
+ struct amdgpu_smn_reg_data smn_reg_values[];
+};
+
+struct amdgpu_reg_state_pcie_v1_0 {
+ /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_PCIE */
+ struct amdgpu_reg_state_header common_header;
+
+ struct amdgpu_regs_pcie_v1_0 pci_state_regs[];
+};
+
+struct amdgpu_regs_usr_v1_0 {
+ struct amdgpu_reg_inst_header inst_header;
+
+ struct amdgpu_smn_reg_data smn_reg_values[];
+};
+
+struct amdgpu_reg_state_usr_v1_0 {
+ /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_USR */
+ struct amdgpu_reg_state_header common_header;
+
+ struct amdgpu_regs_usr_v1_0 usr_state_regs[];
+};
+
+static inline size_t amdgpu_reginst_size(uint16_t num_inst, size_t inst_size,
+ uint16_t num_regs)
+{
+ return num_inst *
+ (inst_size + num_regs * sizeof(struct amdgpu_smn_reg_data));
+}
+
+#define amdgpu_asic_get_reg_state_supported(adev) \
+ (((adev)->asic_funcs && (adev)->asic_funcs->get_reg_state) ? 1 : 0)
+
+#define amdgpu_asic_get_reg_state(adev, state, buf, size) \
+ ((adev)->asic_funcs->get_reg_state ? \
+ (adev)->asic_funcs->get_reg_state((adev), (state), (buf), \
+ (size)) : \
+ 0)
+
+
+int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
index b64664879..fca72e2ec 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
@@ -6220,12 +6220,20 @@
#define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x3
#define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x4
#define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE__SHIFT 0x19
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE__SHIFT 0x1a
#define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000001L
#define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000002L
#define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000004L
#define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000008L
#define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000010L
#define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE_MASK 0x00800000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE_MASK 0x01000000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE_MASK 0x02000000L
+#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE_MASK 0x04000000L
#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL__SHIFT 0x0
#define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN__SHIFT 0x3
#define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL__SHIFT 0x4
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
index 7ee3d2911..6f80bfa7e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
@@ -8707,10 +8707,10 @@
#define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX 2
#define regBIF_BX1_BX_RESET_CNTL 0x00f0
#define regBIF_BX1_BX_RESET_CNTL_BASE_IDX 2
-#define regBIF_BX1_INTERRUPT_CNTL 0x8e11
-#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 5
-#define regBIF_BX1_INTERRUPT_CNTL2 0x8e12
-#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 5
+#define regBIF_BX1_INTERRUPT_CNTL 0x00f1
+#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 2
+#define regBIF_BX1_INTERRUPT_CNTL2 0x00f2
+#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 2
#define regBIF_BX1_CLKREQB_PAD_CNTL 0x00f8
#define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX 2
#define regBIF_BX1_BIF_FEATURES_CONTROL_MISC 0x00fb
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h
new file mode 100644
index 000000000..a4dd372c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_10_0_2_OFFSET_HEADER
+
+// addressBlock: smuio_smuio_misc_SmuSmuioDec
+// base address: 0x5a000
+#define mmSMUIO_MCM_CONFIG 0x0023
+#define mmSMUIO_MCM_CONFIG_BASE_IDX 0
+#define mmIP_DISCOVERY_VERSION 0x0000
+#define mmIP_DISCOVERY_VERSION_BASE_IDX 1
+#define mmIO_SMUIO_PINSTRAP 0x01b1
+#define mmIO_SMUIO_PINSTRAP_BASE_IDX 1
+#define mmSCRATCH_REGISTER0 0x01b2
+#define mmSCRATCH_REGISTER0_BASE_IDX 1
+#define mmSCRATCH_REGISTER1 0x01b3
+#define mmSCRATCH_REGISTER1_BASE_IDX 1
+#define mmSCRATCH_REGISTER2 0x01b4
+#define mmSCRATCH_REGISTER2_BASE_IDX 1
+#define mmSCRATCH_REGISTER3 0x01b5
+#define mmSCRATCH_REGISTER3_BASE_IDX 1
+#define mmSCRATCH_REGISTER4 0x01b6
+#define mmSCRATCH_REGISTER4_BASE_IDX 1
+#define mmSCRATCH_REGISTER5 0x01b7
+#define mmSCRATCH_REGISTER5_BASE_IDX 1
+#define mmSCRATCH_REGISTER6 0x01b8
+#define mmSCRATCH_REGISTER6_BASE_IDX 1
+#define mmSCRATCH_REGISTER7 0x01b9
+#define mmSCRATCH_REGISTER7_BASE_IDX 1
+
+
+// addressBlock: smuio_smuio_reset_SmuSmuioDec
+// base address: 0x5a300
+#define mmSMUIO_MP_RESET_INTR 0x00c1
+#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0
+#define mmSMUIO_SOC_HALT 0x00c2
+#define mmSMUIO_SOC_HALT_BASE_IDX 0
+#define mmSMUIO_GFX_MISC_CNTL 0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+
+
+// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec
+// base address: 0x5a000
+#define mmPWROK_REFCLK_GAP_CYCLES 0x0001
+#define mmPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1
+#define mmGOLDEN_TSC_INCREMENT_UPPER 0x0004
+#define mmGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1
+#define mmGOLDEN_TSC_INCREMENT_LOWER 0x0005
+#define mmGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_UPPER 0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER 0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1
+#define mmGFX_GOLDEN_TSC_SHADOW_UPPER 0x0029
+#define mmGFX_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
+#define mmGFX_GOLDEN_TSC_SHADOW_LOWER 0x002a
+#define mmGFX_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
+#define mmSOC_GOLDEN_TSC_SHADOW_UPPER 0x002b
+#define mmSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1
+#define mmSOC_GOLDEN_TSC_SHADOW_LOWER 0x002c
+#define mmSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1
+#define mmSOC_GAP_PWROK 0x002d
+#define mmSOC_GAP_PWROK_BASE_IDX 1
+
+// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
+// base address: 0x5ac40
+#define mmPWR_VIRT_RESET_REQ 0x0110
+#define mmPWR_VIRT_RESET_REQ_BASE_IDX 1
+#define mmPWR_DISP_TIMER_CONTROL 0x0111
+#define mmPWR_DISP_TIMER_CONTROL_BASE_IDX 1
+#define mmPWR_DISP_TIMER2_CONTROL 0x0113
+#define mmPWR_DISP_TIMER2_CONTROL_BASE_IDX 1
+#define mmPWR_DISP_TIMER_GLOBAL_CONTROL 0x0115
+#define mmPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1
+#define mmPWR_IH_CONTROL 0x0116
+#define mmPWR_IH_CONTROL_BASE_IDX 1
+
+// addressBlock: smuio_smuio_svi0_SmuSmuioDec
+// base address: 0x6f000
+#define mmSMUSVI0_TEL_PLANE0 0x520e
+#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 1
+#define mmSMUSVI0_PLANE0_CURRENTVID 0x5217
+#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h
new file mode 100644
index 000000000..d10ae61c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_10_0_2_SH_MASK_HEADER
+
+// addressBlock: smuio_smuio_misc_SmuSmuioDec
+//SMUIO_MCM_CONFIG
+#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0
+#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2
+#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x5
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0x6
+#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10
+#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11
+#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L
+#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL
+#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000020L
+#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x000000C0L
+#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L
+#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L
+//IP_DISCOVERY_VERSION
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0
+#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL
+//IO_SMUIO_PINSTRAP
+#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN__SHIFT 0x0
+#define IO_SMUIO_PINSTRAP__AUD__SHIFT 0x3
+#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN_MASK 0x00000007L
+#define IO_SMUIO_PINSTRAP__AUD_MASK 0x00000018L
+//SCRATCH_REGISTER0
+#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0
+#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER1
+#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0
+#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER2
+#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0
+#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER3
+#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0
+#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER4
+#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0
+#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER5
+#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0
+#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER6
+#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0
+#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL
+//SCRATCH_REGISTER7
+#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0
+#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL
+
+// addressBlock: smuio_smuio_reset_SmuSmuioDec
+//SMUIO_MP_RESET_INTR
+#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
+#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
+//SMUIO_SOC_HALT
+#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2
+#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3
+#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L
+#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L
+//SMUIO_GFX_MISC_CNTL
+#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH__SHIFT 0x3
+#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN__SHIFT 0x4
+#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH_MASK 0x00000008L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN_MASK 0x00000010L
+
+// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec
+//PWROK_REFCLK_GAP_CYCLES
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL
+#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L
+//GOLDEN_TSC_INCREMENT_UPPER
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_INCREMENT_LOWER
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0
+#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL
+//GOLDEN_TSC_COUNT_UPPER
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL
+//GOLDEN_TSC_COUNT_LOWER
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0
+#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL
+//GFX_GOLDEN_TSC_SHADOW_UPPER
+#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper__SHIFT 0x0
+#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper_MASK 0x00FFFFFFL
+//GFX_GOLDEN_TSC_SHADOW_LOWER
+#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower__SHIFT 0x0
+#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower_MASK 0xFFFFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_UPPER
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL
+//SOC_GOLDEN_TSC_SHADOW_LOWER
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0
+#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL
+//SOC_GAP_PWROK
+#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0
+#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L
+
+// addressBlock: smuio_smuio_swtimer_SmuSmuioDec
+//PWR_VIRT_RESET_REQ
+#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f
+#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
+#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L
+//PWR_DISP_TIMER_CONTROL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER2_CONTROL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L
+//PWR_DISP_TIMER_GLOBAL_CONTROL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL
+#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L
+//PWR_IH_CONTROL
+#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f
+#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL
+#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L
+#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L
+#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L
+
+// addressBlock: smuio_smuio_svi0_SmuSmuioDec
+//SMUSVI0_TEL_PLANE0
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR__SHIFT 0x0
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR_MASK 0x000000FFL
+#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L
+//SMUSVI0_PLANE0_CURRENTVID
+#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18
+#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index c2ccf3724..edcb85560 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -422,7 +422,7 @@ struct amd_pm_funcs {
int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
- int (*get_asic_baco_capability)(void *handle, bool *cap);
+ bool (*get_asic_baco_capability)(void *handle);
int (*get_asic_baco_state)(void *handle, int *state);
int (*set_asic_baco_state)(void *handle, int state);
int (*get_ppfeature_status)(void *handle, char *buf);
@@ -432,6 +432,7 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
struct pp_smu_wm_range_sets *ranges);
int (*display_disable_memory_clock_switch)(void *handle,
@@ -1225,4 +1226,19 @@ struct gpu_metrics_v3_0 {
/* Metrics table alpha filter time constant [us] */
uint32_t time_filter_alphavalue;
};
+
+struct amdgpu_pmmetrics_header {
+ uint16_t structure_size;
+ uint16_t pad;
+ uint32_t mp1_ip_discovery_version;
+ uint32_t pmfw_version;
+ uint32_t pmmetrics_version;
+};
+
+struct amdgpu_pm_metrics {
+ struct amdgpu_pmmetrics_header common_header;
+
+ uint8_t data[];
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index e07e93167..ec5b9ab67 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -232,6 +232,7 @@ union MESAPI_SET_HW_RESOURCES {
};
uint32_t oversubscription_timer;
uint64_t doorbell_info;
+ uint64_t event_intr_history_gpu_mc_ptr;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 8ec11da03..6627ee07d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -203,8 +203,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
- bool baco_cap;
- int ret = 0;
+ bool ret;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
@@ -222,12 +221,11 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
mutex_lock(&adev->pm.mutex);
- ret = pp_funcs->get_asic_baco_capability(pp_handle,
- &baco_cap);
+ ret = pp_funcs->get_asic_baco_capability(pp_handle);
mutex_unlock(&adev->pm.mutex);
- return ret ? false : baco_cap;
+ return ret;
}
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
@@ -618,6 +616,16 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
enable ? "enable" : "disable", ret);
}
+void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
+ enable ? "enable" : "disable", ret);
+}
+
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1319,6 +1327,23 @@ int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
return ret;
}
+ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
+ size_t size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_pm_metrics)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
uint32_t *fan_mode)
{
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index fee86be55..b4698f985 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1799,6 +1799,44 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return count;
}
+static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
+ struct amdgpu_device_attr *attr,
+ uint32_t mask,
+ enum amdgpu_device_attr_states *states)
+{
+ if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
+ *states = ATTR_STATE_UNSUPPORTED;
+
+ return 0;
+}
+
+static ssize_t amdgpu_get_pm_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size = 0;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
/**
* DOC: gpu_metrics
*
@@ -2096,6 +2134,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
.attr_update = ss_bias_attr_update),
AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
+ .attr_update = amdgpu_pm_metrics_attr_update),
};
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
@@ -4177,6 +4217,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
}
}
+ /*
+ * If gpu_od is the only member in the list, that means gpu_od is an
+ * empty directory, so remove it.
+ */
+ if (list_is_singular(&adev->pm.od_kobj_list))
+ goto err_out;
+
return 0;
err_out:
@@ -4319,11 +4366,19 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDNB)\n", value);
size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
+ if (adev->flags & AMD_IS_APU)
+ seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
+ else
+ seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
+ }
size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
+ if (adev->flags & AMD_IS_APU)
+ seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
+ else
+ seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
+ }
size = sizeof(value);
seq_printf(m, "\n");
@@ -4349,9 +4404,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* VCN clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "VCN: Disabled\n");
+ seq_printf(m, "VCN: Powered down\n");
} else {
- seq_printf(m, "VCN: Enabled\n");
+ seq_printf(m, "VCN: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4363,9 +4418,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* UVD clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "UVD: Disabled\n");
+ seq_printf(m, "UVD: Powered down\n");
} else {
- seq_printf(m, "UVD: Enabled\n");
+ seq_printf(m, "UVD: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4377,9 +4432,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* VCE clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "VCE: Disabled\n");
+ seq_printf(m, "VCE: Powered down\n");
} else {
- seq_printf(m, "VCE: Enabled\n");
+ seq_printf(m, "VCE: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 482ea3014..3047ffe7f 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -445,6 +445,7 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable);
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
@@ -513,6 +514,18 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
long *input, uint32_t size);
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
+
+/**
+ * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
+ * sample is copied to pm_metrics buffer. It's expected to be allocated by the
+ * caller and size of the allocated buffer is passed. Max size expected for a
+ * metrics sample is 4096 bytes.
+ *
+ * Return: Actual size of the metrics sample
+ */
+ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
+ size_t size);
+
int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
uint32_t *fan_mode);
int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 914c15387..aed0e2cef 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1371,21 +1371,18 @@ static int pp_set_active_display_count(void *handle, uint32_t count)
return phm_set_active_display_count(hwmgr, count);
}
-static int pp_get_asic_baco_capability(void *handle, bool *cap)
+static bool pp_get_asic_baco_capability(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- *cap = false;
if (!hwmgr)
- return -EINVAL;
+ return false;
if (!(hwmgr->not_vf && amdgpu_dpm) ||
!hwmgr->hwmgr_func->get_asic_baco_capability)
- return 0;
+ return false;
- hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
-
- return 0;
+ return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr);
}
static int pp_get_asic_baco_state(void *handle, int *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
index 044cda005..e8a9471c1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
@@ -33,21 +33,20 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
- *cap = false;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
return 0;
reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
- *cap = true;
+ return true;
- return 0;
+ return false;
}
int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
index be0d98abb..73a773f4c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr);
extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
index de0a37f7c..c66ef9741 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
@@ -28,14 +28,13 @@
#include "vega10_inc.h"
#include "smu9_baco.h"
-int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg, data;
- *cap = false;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return 0;
+ return false;
WREG32(0x12074, 0xFFF0003B);
data = RREG32(0x12075);
@@ -44,10 +43,10 @@ int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- *cap = true;
+ return true;
}
- return 0;
+ return false;
}
int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
index 84e90f801..9ff7c2ea1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr);
extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
index 994c0d374..dad4c80ae 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
@@ -36,23 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = {
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
-int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint32_t reg;
- *cap = false;
if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return 0;
+ return false;
if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) {
reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- *cap = true;
+ return true;
}
- return 0;
+ return false;
}
int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
index f06471e71..bdad9c915 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
@@ -25,7 +25,7 @@
#include "hwmgr.h"
#include "common_baco.h"
-extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr);
extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 81650727a..6f536159d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -351,7 +351,7 @@ struct pp_hwmgr_func {
int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
- int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap);
+ bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr);
int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 9e4228232..ad1fd3150 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -2298,6 +2298,7 @@ static uint32_t ci_get_mac_definition(uint32_t value)
case SMU_MAX_ENTRIES_SMIO:
return SMU7_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
+ case SMU_MAX_LEVELS_VDDGFX:
return SMU7_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
return SMU7_MAX_LEVELS_VDDCI;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 97d9802fe..17d2f5bff 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2263,6 +2263,7 @@ static uint32_t iceland_get_mac_definition(uint32_t value)
case SMU_MAX_ENTRIES_SMIO:
return SMU71_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
+ case SMU_MAX_LEVELS_VDDGFX:
return SMU71_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
return SMU71_MAX_LEVELS_VDDCI;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 60dce148b..0ad947df7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1315,6 +1315,187 @@ static int smu_get_thermal_temperature_range(struct smu_context *smu)
return ret;
}
+/**
+ * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
+ *
+ * @smu: smu_context pointer
+ *
+ * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
+ * Returns 0 on success, error on failure.
+ */
+static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
+{
+ struct wbrf_ranges_in_out wbrf_exclusion = {0};
+ struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
+ uint64_t start, end;
+ int ret, i, j;
+
+ ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
+ return ret;
+ }
+
+ /*
+ * The exclusion ranges array we got might be filled with holes and duplicate
+ * entries. For example:
+ * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
+ * We need to do some sortups to eliminate those holes and duplicate entries.
+ * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
+ */
+ for (i = 0; i < num_of_wbrf_ranges; i++) {
+ start = wifi_bands[i].start;
+ end = wifi_bands[i].end;
+
+ /* get the last valid entry to fill the intermediate hole */
+ if (!start && !end) {
+ for (j = num_of_wbrf_ranges - 1; j > i; j--)
+ if (wifi_bands[j].start && wifi_bands[j].end)
+ break;
+
+ /* no valid entry left */
+ if (j <= i)
+ break;
+
+ start = wifi_bands[i].start = wifi_bands[j].start;
+ end = wifi_bands[i].end = wifi_bands[j].end;
+ wifi_bands[j].start = 0;
+ wifi_bands[j].end = 0;
+ num_of_wbrf_ranges = j;
+ }
+
+ /* eliminate duplicate entries */
+ for (j = i + 1; j < num_of_wbrf_ranges; j++) {
+ if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
+ wifi_bands[j].start = 0;
+ wifi_bands[j].end = 0;
+ }
+ }
+ }
+
+ /* Send the sorted wifi_bands to PMFW */
+ ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
+ /* Try to set the wifi_bands again */
+ if (unlikely(ret == -EBUSY)) {
+ mdelay(5);
+ ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
+ }
+
+ return ret;
+}
+
+/**
+ * smu_wbrf_event_handler - handle notify events
+ *
+ * @nb: notifier block
+ * @action: event type
+ * @_arg: event data
+ *
+ * Calls relevant amdgpu function in response to wbrf event
+ * notification from kernel.
+ */
+static int smu_wbrf_event_handler(struct notifier_block *nb,
+ unsigned long action, void *_arg)
+{
+ struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
+
+ switch (action) {
+ case WBRF_CHANGED:
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
+ *
+ * @work: struct work_struct pointer
+ *
+ * Flood is over and driver will consume the latest exclusion ranges.
+ */
+static void smu_wbrf_delayed_work_handler(struct work_struct *work)
+{
+ struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
+
+ smu_wbrf_handle_exclusion_ranges(smu);
+}
+
+/**
+ * smu_wbrf_support_check - check wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Verifies the ACPI interface whether wbrf is supported.
+ */
+static void smu_wbrf_support_check(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
+ acpi_amd_wbrf_supported_consumer(adev->dev);
+
+ if (smu->wbrf_supported)
+ dev_info(adev->dev, "RF interference mitigation is supported\n");
+}
+
+/**
+ * smu_wbrf_init - init driver wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the wbrf
+ * notifier chain if wbrf feature is supported.
+ * Returns 0 on success, error on failure.
+ */
+static int smu_wbrf_init(struct smu_context *smu)
+{
+ int ret;
+
+ if (!smu->wbrf_supported)
+ return 0;
+
+ INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
+
+ smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
+ ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
+ if (ret)
+ return ret;
+
+ /*
+ * Some wifiband exclusion ranges may be already there
+ * before our driver loaded. To make sure our driver
+ * is awared of those exclusion ranges.
+ */
+ schedule_delayed_work(&smu->wbrf_delayed_work,
+ msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
+
+ return 0;
+}
+
+/**
+ * smu_wbrf_fini - tear down driver wbrf support
+ *
+ * @smu: smu_context pointer
+ *
+ * Unregisters with the wbrf notifier chain.
+ */
+static void smu_wbrf_fini(struct smu_context *smu)
+{
+ if (!smu->wbrf_supported)
+ return;
+
+ amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
+
+ cancel_delayed_work_sync(&smu->wbrf_delayed_work);
+}
+
static int smu_smc_hw_setup(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -1407,6 +1588,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
if (ret)
return ret;
+ /* Enable UclkShadow on wbrf supported */
+ if (smu->wbrf_supported) {
+ ret = smu_enable_uclk_shadow(smu, true);
+ if (ret) {
+ dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
+ return ret;
+ }
+ }
+
/*
* With SCPM enabled, these actions(and relevant messages) are
* not needed and permitted.
@@ -1505,6 +1695,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
*/
ret = smu_set_min_dcef_deep_sleep(smu,
smu->smu_table.boot_values.dcefclk / 100);
+ if (ret) {
+ dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
+ return ret;
+ }
+
+ /* Init wbrf support. Properly setup the notifier */
+ ret = smu_wbrf_init(smu);
+ if (ret)
+ dev_err(adev->dev, "Error during wbrf init call\n");
return ret;
}
@@ -1560,6 +1759,13 @@ static int smu_hw_init(void *handle)
return ret;
}
+ /*
+ * Check whether wbrf is supported. This needs to be done
+ * before SMU setup starts since part of SMU configuration
+ * relies on this.
+ */
+ smu_wbrf_support_check(smu);
+
if (smu->is_apu) {
ret = smu_set_gfx_imu_enable(smu);
if (ret)
@@ -1726,6 +1932,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ smu_wbrf_fini(smu);
+
cancel_work_sync(&smu->throttling_logging_work);
cancel_work_sync(&smu->interrupt_work);
@@ -2980,19 +3188,17 @@ static int smu_set_xgmi_pstate(void *handle,
return ret;
}
-static int smu_get_baco_capability(void *handle, bool *cap)
+static bool smu_get_baco_capability(void *handle)
{
struct smu_context *smu = handle;
- *cap = false;
-
if (!smu->pm_enabled)
- return 0;
+ return false;
- if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
- *cap = smu->ppt_funcs->baco_is_support(smu);
+ if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support)
+ return false;
- return 0;
+ return smu->ppt_funcs->baco_is_support(smu);
}
static int smu_baco_set_state(void *handle, int state)
@@ -3166,6 +3372,20 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
return smu->ppt_funcs->get_gpu_metrics(smu, table);
}
+static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
+ size_t size)
+{
+ struct smu_context *smu = handle;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->ppt_funcs->get_pm_metrics)
+ return -EOPNOTSUPP;
+
+ return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
+}
+
static int smu_enable_mgpu_fan_boost(void *handle)
{
struct smu_context *smu = handle;
@@ -3307,6 +3527,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.set_df_cstate = smu_set_df_cstate,
.set_xgmi_pstate = smu_set_xgmi_pstate,
.get_gpu_metrics = smu_sys_get_gpu_metrics,
+ .get_pm_metrics = smu_sys_get_pm_metrics,
.set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
.get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index e8329d157..66e84defd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -22,6 +22,9 @@
#ifndef __AMDGPU_SMU_H__
#define __AMDGPU_SMU_H__
+#include <linux/acpi_amd_wbrf.h>
+#include <linux/units.h>
+
#include "amdgpu.h"
#include "kgd_pp_interface.h"
#include "dm_pp_interface.h"
@@ -253,6 +256,7 @@ struct smu_table {
uint64_t mc_address;
void *cpu_addr;
struct amdgpu_bo *bo;
+ uint32_t version;
};
enum smu_perf_level_designation {
@@ -317,6 +321,7 @@ enum smu_table_id {
SMU_TABLE_PACE,
SMU_TABLE_ECCINFO,
SMU_TABLE_COMBO_PPTABLE,
+ SMU_TABLE_WIFIBAND,
SMU_TABLE_COUNT,
};
@@ -469,6 +474,12 @@ struct stb_context {
#define WORKLOAD_POLICY_MAX 7
+/*
+ * Configure wbrf event handling pace as there can be only one
+ * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms.
+ */
+#define SMU_WBRF_EVENT_HANDLING_PACE 10
+
struct smu_context {
struct amdgpu_device *adev;
struct amdgpu_irq_src irq_source;
@@ -568,6 +579,11 @@ struct smu_context {
struct delayed_work swctf_delayed_work;
enum pp_xgmi_plpd_mode plpd_mode;
+
+ /* data structures for wbrf feature support */
+ bool wbrf_supported;
+ struct notifier_block wbrf_notifier;
+ struct delayed_work wbrf_delayed_work;
};
struct i2c_adapter;
@@ -1252,6 +1268,15 @@ struct pptable_funcs {
ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
/**
+ * @get_pm_metrics: Get one snapshot of power management metrics from
+ * PMFW.
+ *
+ * Return: Size of the metrics sample
+ */
+ ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics,
+ size_t size);
+
+ /**
* @enable_mgpu_fan_boost: Enable multi-GPU fan boost.
*/
int (*enable_mgpu_fan_boost)(struct smu_context *smu);
@@ -1364,6 +1389,22 @@ struct pptable_funcs {
* @notify_rlc_state: Notify RLC power state to SMU.
*/
int (*notify_rlc_state)(struct smu_context *smu, bool en);
+
+ /**
+ * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature
+ */
+ bool (*is_asic_wbrf_supported)(struct smu_context *smu);
+
+ /**
+ * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported
+ */
+ int (*enable_uclk_shadow)(struct smu_context *smu, bool enable);
+
+ /**
+ * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied
+ */
+ int (*set_wbrf_exclusion_ranges)(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges);
};
typedef enum {
@@ -1490,6 +1531,17 @@ enum smu_baco_seq {
__dst_size); \
})
+typedef struct {
+ uint16_t LowFreq;
+ uint16_t HighFreq;
+} WifiOneBand_t;
+
+typedef struct {
+ uint32_t WifiBandEntryNum;
+ WifiOneBand_t WifiBandEntry[11];
+ uint32_t MmHubPadding[8];
+} WifiBandEntryTable_t;
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index 9dd1ed5b8..b114d14fc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -1615,7 +1615,8 @@ typedef struct {
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
-#define TABLE_COUNT 12
+#define TABLE_WIFIBAND 12
+#define TABLE_COUNT 13
//IH Interupt ID
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index 62b7c0daf..8b1496f8c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -1605,7 +1605,8 @@ typedef struct {
#define TABLE_I2C_COMMANDS 9
#define TABLE_DRIVER_INFO 10
#define TABLE_ECCINFO 11
-#define TABLE_COUNT 12
+#define TABLE_WIFIBAND 12
+#define TABLE_COUNT 13
//IH Interupt ID
#define IH_INTERRUPT_ID_TO_DRIVER 0xFE
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
index 8f42771e1..5bb7a63c0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
@@ -24,11 +24,6 @@
#ifndef SMU14_DRIVER_IF_V14_0_0_H
#define SMU14_DRIVER_IF_V14_0_0_H
-// *** IMPORTANT ***
-// SMU TEAM: Always increment the interface version if
-// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 7
-
typedef struct {
int32_t value;
uint32_t numFractionalBits;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
index e2ee855c7..e862d323c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
@@ -138,10 +138,9 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-
#define PPSMC_MSG_DALNotPresent 0x4E
-
-#define PPSMC_Message_Count 0x4F
+#define PPSMC_MSG_EnableUCLKShadow 0x51
+#define PPSMC_Message_Count 0x52
//Debug Dump Message
#define DEBUGSMC_MSG_TestMessage 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
index 6aaefca9b..a6bf9cdd1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
@@ -134,6 +134,7 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-#define PPSMC_Message_Count 0x4D
+#define PPSMC_MSG_EnableUCLKShadow 0x51
+#define PPSMC_Message_Count 0x52
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 9dd47d910..953a76761 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -259,7 +259,9 @@
__SMU_DUMMY_MAP(PowerUpUmsch), \
__SMU_DUMMY_MAP(PowerDownUmsch), \
__SMU_DUMMY_MAP(SetSoftMaxVpe), \
- __SMU_DUMMY_MAP(SetSoftMinVpe),
+ __SMU_DUMMY_MAP(SetSoftMinVpe), \
+ __SMU_DUMMY_MAP(GetMetricsVersion), \
+ __SMU_DUMMY_MAP(EnableUCLKShadow),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 95cb91971..fbd57fa1a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -210,15 +210,8 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu);
int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
-int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
- enum smu_baco_seq baco_seq);
-
bool smu_v13_0_baco_is_support(struct smu_context *smu);
-enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
-
-int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
-
int smu_v13_0_baco_enter(struct smu_context *smu);
int smu_v13_0_baco_exit(struct smu_context *smu);
@@ -301,5 +294,9 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
int smu_v13_0_disable_pmfw_state(struct smu_context *smu);
+int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable);
+
+int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index a5b569976..3f7463c1c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -26,8 +26,8 @@
#include "amdgpu_smu.h"
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x6
#define FEATURE_MASK(feature) (1ULL << feature)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index ac04ed1e4..40ba7227c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2411,8 +2411,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
.baco_exit = smu_v11_0_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 082101845..836b1df79 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3539,8 +3539,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = navi10_baco_enter,
.baco_exit = navi10_baco_exit,
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 6f37ca7a0..1f18b6188 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -4432,8 +4432,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
.baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = sienna_cichlid_baco_enter,
.baco_exit = sienna_cichlid_baco_exit,
.mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index e3579a8fd..ca0d5a5b2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1530,7 +1530,6 @@ static int aldebaran_i2c_control_init(struct smu_context *smu)
smu_i2c->port = 0;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &aldebaran_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 68981086b..c486182ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -2201,7 +2201,7 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
return ret;
}
-int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
+static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
enum smu_baco_seq baco_seq)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -2223,33 +2223,14 @@ int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
return 0;
}
-bool smu_v13_0_baco_is_support(struct smu_context *smu)
-{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
-
- if (amdgpu_sriov_vf(smu->adev) ||
- !smu_baco->platform_support)
- return false;
-
- /* return true if ASIC is in BACO state already */
- if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
- return true;
-
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
- !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
- return false;
-
- return true;
-}
-
-enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
+static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
return smu_baco->state;
}
-int smu_v13_0_baco_set_state(struct smu_context *smu,
+static int smu_v13_0_baco_set_state(struct smu_context *smu,
enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -2283,24 +2264,60 @@ int smu_v13_0_baco_set_state(struct smu_context *smu,
return ret;
}
-int smu_v13_0_baco_enter(struct smu_context *smu)
+bool smu_v13_0_baco_is_support(struct smu_context *smu)
{
- int ret = 0;
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
- ret = smu_v13_0_baco_set_state(smu,
- SMU_BACO_STATE_ENTER);
- if (ret)
- return ret;
+ if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
+ return false;
+
+ /* return true if ASIC is in BACO state already */
+ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return true;
- msleep(10);
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
+ !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
+ return false;
- return ret;
+ return true;
+}
+
+int smu_v13_0_baco_enter(struct smu_context *smu)
+{
+ struct smu_baco_context *smu_baco = &smu->smu_baco;
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ return smu_v13_0_baco_set_armd3_sequence(smu,
+ (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO);
+ } else {
+ ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
+ if (!ret)
+ usleep_range(10000, 11000);
+
+ return ret;
+ }
}
int smu_v13_0_baco_exit(struct smu_context *smu)
{
- return smu_v13_0_baco_set_state(smu,
- SMU_BACO_STATE_EXIT);
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
+ /* Wait for PMFW handling for the Dstate change */
+ usleep_range(10000, 11000);
+ ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+ } else {
+ ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
+ }
+
+ if (!ret)
+ adev->gfx.is_poweron = false;
+
+ return ret;
}
int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
@@ -2492,3 +2509,51 @@ int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
return ret == 0 ? 0 : -EINVAL;
}
+
+int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable)
+{
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL);
+}
+
+int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
+ struct freq_band_range *exclusion_ranges)
+{
+ WifiBandEntryTable_t wifi_bands;
+ int valid_entries = 0;
+ int ret, i;
+
+ memset(&wifi_bands, 0, sizeof(wifi_bands));
+ for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) {
+ if (!exclusion_ranges[i].start && !exclusion_ranges[i].end)
+ break;
+
+ /* PMFW expects the inputs to be in Mhz unit */
+ wifi_bands.WifiBandEntry[valid_entries].LowFreq =
+ DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ);
+ wifi_bands.WifiBandEntry[valid_entries++].HighFreq =
+ DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ);
+ }
+ wifi_bands.WifiBandEntryNum = valid_entries;
+
+ /*
+ * Per confirm with PMFW team, WifiBandEntryNum = 0
+ * is a valid setting.
+ *
+ * Considering the scenarios below:
+ * - At first the wifi device adds an exclusion range e.g. (2400,2500) to
+ * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1
+ * and pass the WifiBandEntry (2400, 2500) to PMFW.
+ *
+ * - Later the wifi device removes the wifiband list added above and
+ * our driver gets notified again. At this time, driver will set
+ * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW.
+ *
+ * - PMFW may still need to do some uclk shadow update(e.g. switching
+ * from shadow clock back to primary clock) on receiving this.
+ */
+ ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true);
+ if (ret)
+ dev_warn(smu->adev->dev, "Failed to set wifiband!");
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9ac6c408d..9c03296f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -169,6 +169,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0),
+ MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -253,6 +254,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(I2C_COMMANDS),
TAB_MAP(ECCINFO),
TAB_MAP(OVERDRIVE),
+ TAB_MAP(WIFIBAND),
};
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -498,6 +500,9 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND,
+ sizeof(WifiBandEntryTable_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -2544,16 +2549,19 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask = 1 << workload_type;
- /* Add optimizations for SMU13.0.0. Reuse the power saving profile */
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7400))) {
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_POWERSAVING);
- if (workload_type >= 0)
- workload_mask |= 1 << workload_type;
+ /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
+ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500)) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ workload_mask |= 1 << workload_type;
+ }
}
return smu_cmn_send_smc_msg_with_param(smu,
@@ -2562,38 +2570,6 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
NULL);
}
-static int smu_v13_0_0_baco_enter(struct smu_context *smu)
-{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
- return smu_v13_0_baco_set_armd3_sequence(smu,
- (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
- BACO_SEQ_BAMACO : BACO_SEQ_BACO);
- else
- return smu_v13_0_baco_enter(smu);
-}
-
-static int smu_v13_0_0_baco_exit(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret;
-
- if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
- /* Wait for PMFW handling for the Dstate change */
- usleep_range(10000, 11000);
- ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
- } else {
- ret = smu_v13_0_baco_exit(smu);
- }
-
- if (!ret)
- adev->gfx.is_poweron = false;
-
- return ret;
-}
-
static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2724,7 +2700,6 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v13_0_0_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
@@ -2968,6 +2943,20 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
return ret;
}
+static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 0):
+ return smu->smc_fw_version >= 0x004e6300;
+ case IP_VERSION(13, 0, 10):
+ return smu->smc_fw_version >= 0x00503300;
+ default:
+ return false;
+ }
+}
+
static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
enum smu_ppt_limit_type limit_type,
uint32_t limit)
@@ -3082,10 +3071,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.deep_sleep_control = smu_v13_0_deep_sleep_control,
.gfx_ulv_control = smu_v13_0_gfx_ulv_control,
.baco_is_support = smu_v13_0_baco_is_support,
- .baco_get_state = smu_v13_0_baco_get_state,
- .baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_0_baco_enter,
- .baco_exit = smu_v13_0_0_baco_exit,
+ .baco_enter = smu_v13_0_baco_enter,
+ .baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_0_mode1_reset,
.mode2_reset = smu_v13_0_0_mode2_reset,
@@ -3097,6 +3084,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.gpo_control = smu_v13_0_gpo_control,
.get_ecc_info = smu_v13_0_0_get_ecc_info,
.notify_display_change = smu_v13_0_notify_display_change,
+ .is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
+ .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
+ .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index bb98156b2..949131bd1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && !adev->in_s0ix)
+ if (!en && !adev->in_s0ix) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 58f63b7a7..78491b04d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -120,6 +120,7 @@ struct mca_ras_info {
#define P2S_TABLE_ID_A 0x50325341
#define P2S_TABLE_ID_X 0x50325358
+// clang-format off
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -128,6 +129,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0),
MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1),
+ MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
@@ -158,8 +160,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
- MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
- MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
@@ -171,6 +173,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
};
+// clang-format on
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(SOCCLK, PPCLK_SOCCLK),
CLK_MAP(FCLK, PPCLK_FCLK),
@@ -432,6 +435,41 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
return 0;
}
+static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
+ void *metrics, size_t max_size)
+{
+ struct smu_table_context *smu_tbl_ctxt = &smu->smu_table;
+ uint32_t table_version = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].version;
+ uint32_t table_size = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].size;
+ struct amdgpu_pm_metrics *pm_metrics = metrics;
+ uint32_t pmfw_version;
+ int ret;
+
+ if (!pm_metrics || !max_size)
+ return -EINVAL;
+
+ if (max_size < (table_size + sizeof(pm_metrics->common_header)))
+ return -EOVERFLOW;
+
+ /* Don't use cached metrics data */
+ ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
+
+ memset(&pm_metrics->common_header, 0,
+ sizeof(pm_metrics->common_header));
+ pm_metrics->common_header.mp1_ip_discovery_version =
+ IP_VERSION(13, 0, 6);
+ pm_metrics->common_header.pmfw_version = pmfw_version;
+ pm_metrics->common_header.pmmetrics_version = table_version;
+ pm_metrics->common_header.structure_size =
+ sizeof(pm_metrics->common_header) + table_size;
+
+ return pm_metrics->common_header.structure_size;
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -441,6 +479,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
(struct PPTable_t *)smu_table->driver_pptable;
struct amdgpu_device *adev = smu->adev;
int ret, i, retry = 100;
+ uint32_t table_version;
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
@@ -459,6 +498,13 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
if (!retry)
return -ETIME;
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
+ &table_version);
+ if (ret)
+ return ret;
+ smu_table->tables[SMU_TABLE_SMU_METRICS].version =
+ table_version;
+
pptable->MaxSocketPowerLimit =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
pptable->MaxGfxclkFrequency =
@@ -1479,7 +1525,6 @@ static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
if (smu->smc_fw_version < 0x554800)
return 0;
- amdgpu_ras_set_mca_debug_mode(smu->adev, enable);
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK,
NULL);
@@ -1893,7 +1938,6 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
smu_i2c->port = i;
mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v13_0_6_i2c_algo;
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
@@ -2332,16 +2376,6 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
return ret;
}
-static int smu_v13_0_6_post_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
-
- if (!amdgpu_sriov_vf(adev) && adev->ras_enabled)
- return smu_v13_0_6_mca_set_debug_mode(smu, false);
-
- return 0;
-}
-
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -2424,8 +2458,8 @@ static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT
static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info)
{
- uint64_t ipid = entry->regs[MCA_REG_IDX_IPID];
- uint32_t insthi;
+ u64 ipid = entry->regs[MCA_REG_IDX_IPID];
+ u32 instidhi, instid;
/* NOTE: All MCA IPID register share the same format,
* so the driver can share the MCMP1 register header file.
@@ -2434,9 +2468,15 @@ static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_
info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
- insthi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi);
- info->aid = ((insthi >> 2) & 0x03);
- info->socket_id = insthi & 0x03;
+ /*
+ * Unfied DieID Format: SAASS. A:AID, S:Socket.
+ * Unfied DieID[4] = InstanceId[0]
+ * Unfied DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi);
+ instid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo);
+ info->aid = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instid & 0x1) << 2) | (instidhi & 0x03);
}
static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
@@ -2515,9 +2555,9 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
return 0;
}
- if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0))
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0))
*count = 1;
- else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0))
+ else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0))
*count = 1;
return 0;
@@ -2528,13 +2568,15 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st
uint32_t *count)
{
u32 ext_error_code;
+ u32 err_cnt;
ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
+ err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0)
- *count = 1;
+ *count = err_cnt;
else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
- *count = 1;
+ *count = err_cnt;
return 0;
}
@@ -2610,6 +2652,7 @@ static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct
uint32_t instlo;
instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+ instlo &= GENMASK(31, 1);
switch (instlo) {
case 0x36430400: /* SMNAID XCD 0 */
case 0x38430400: /* SMNAID XCD 1 */
@@ -2629,6 +2672,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
uint32_t errcode, instlo;
instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+ instlo &= GENMASK(31, 1);
if (instlo != 0x03b30400)
return false;
@@ -2851,6 +2895,13 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
return ret;
}
+static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ /* Support ecc info by default */
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2895,6 +2946,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
+ .get_pm_metrics = smu_v13_0_6_get_pm_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
.mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
@@ -2904,7 +2956,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_init = smu_v13_0_6_i2c_control_init,
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
- .post_init = smu_v13_0_6_post_init,
+ .get_ecc_info = smu_v13_0_6_get_ecc_info,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 402e0d184..7318964f1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -140,6 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
+ MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -222,6 +223,7 @@ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
TAB_MAP(OVERDRIVE),
+ TAB_MAP(WIFIBAND),
};
static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -512,6 +514,9 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu)
AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND,
+ sizeof(WifiBandEntryTable_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -2513,38 +2518,6 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
return ret;
}
-static int smu_v13_0_7_baco_enter(struct smu_context *smu)
-{
- struct smu_baco_context *smu_baco = &smu->smu_baco;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
- return smu_v13_0_baco_set_armd3_sequence(smu,
- (smu_baco->maco_support && amdgpu_runtime_pm != 1) ?
- BACO_SEQ_BAMACO : BACO_SEQ_BACO);
- else
- return smu_v13_0_baco_enter(smu);
-}
-
-static int smu_v13_0_7_baco_exit(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret;
-
- if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
- /* Wait for PMFW handling for the Dstate change */
- usleep_range(10000, 11000);
- ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
- } else {
- ret = smu_v13_0_baco_exit(smu);
- }
-
- if (!ret)
- adev->gfx.is_poweron = false;
-
- return ret;
-}
-
static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2565,6 +2538,11 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu,
NULL);
}
+static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
+{
+ return smu->smc_fw_version > 0x00524600;
+}
+
static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
enum smu_ppt_limit_type limit_type,
uint32_t limit)
@@ -2673,15 +2651,16 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.baco_is_support = smu_v13_0_baco_is_support,
- .baco_get_state = smu_v13_0_baco_get_state,
- .baco_set_state = smu_v13_0_baco_set_state,
- .baco_enter = smu_v13_0_7_baco_enter,
- .baco_exit = smu_v13_0_7_baco_exit,
+ .baco_enter = smu_v13_0_baco_enter,
+ .baco_exit = smu_v13_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
.set_mp1_state = smu_v13_0_7_set_mp1_state,
.set_df_cstate = smu_v13_0_7_set_df_cstate,
.gpo_control = smu_v13_0_gpo_control,
+ .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
+ .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
+ .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 01f2ab456..6dae5ad74 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -224,7 +224,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
if (smu->is_apu)
adev->pm.fw_version = smu_version;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
@@ -233,7 +233,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
- adev->ip_versions[MP1_HWIP][0]);
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
break;
}
@@ -731,7 +731,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 0):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 24a43374a..9310c4758 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1088,6 +1088,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
0, NULL);
}
+static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ uint8_t idx;
+
+ /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
+ for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
+ clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
+ clock_table->SocClocks[idx].Vol = 0;
+ }
+
+ for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
+ clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
+ clock_table->VPEClocks[idx].Vol = 0;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.check_fw_version = smu_v14_0_check_fw_version,
@@ -1118,6 +1137,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
.set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
.dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
.dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
+ .get_dpm_clock_table = smu_14_0_0_get_dpm_table,
};
static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 64766ac69..6f4d21260 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -98,6 +98,9 @@
#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
#define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu)
#define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en)
+#define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu)
+#define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable)
+#define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range)
#endif
#endif
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index dc01c43f6..d72c22dcf 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -221,7 +221,7 @@ static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc,
/*
* The size of the ctm is checked in
- * drm_atomic_replace_property_blob_from_id.
+ * drm_property_replace_blob_from_id.
*/
ctm = (struct drm_color_ctm *)state->ctm->data;
for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) {
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 15dd667aa..c78687c75 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,8 +7,9 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -1012,26 +1013,17 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
int irq = platform_get_irq(pdev, 0);
const struct armada_variant *variant;
struct device_node *port = NULL;
+ struct device_node *np, *parent = dev->of_node;
if (irq < 0)
return irq;
- if (!dev->of_node) {
- const struct platform_device_id *id;
- id = platform_get_device_id(pdev);
- if (!id)
- return -ENXIO;
-
- variant = (const struct armada_variant *)id->driver_data;
- } else {
- const struct of_device_id *match;
- struct device_node *np, *parent = dev->of_node;
-
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match)
- return -ENXIO;
+ variant = device_get_match_data(dev);
+ if (!variant)
+ return -ENXIO;
+ if (parent) {
np = of_get_child_by_name(parent, "ports");
if (np)
parent = np;
@@ -1041,8 +1033,6 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
dev_err(dev, "no port node found in %pOF\n", parent);
return -ENXIO;
}
-
- variant = match->data;
}
return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
@@ -1066,10 +1056,9 @@ static int armada_lcd_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &armada_lcd_ops);
}
-static int armada_lcd_remove(struct platform_device *pdev)
+static void armada_lcd_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &armada_lcd_ops);
- return 0;
}
static const struct of_device_id armada_lcd_of_match[] = {
@@ -1095,7 +1084,7 @@ MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
struct platform_driver armada_lcd_platform_driver = {
.probe = armada_lcd_probe,
- .remove = armada_lcd_remove,
+ .remove_new = armada_lcd_remove,
.driver = {
.name = "armada-lcd",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index fa1c67598..e51ecc4f7 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -226,10 +226,9 @@ static int armada_drm_probe(struct platform_device *pdev)
match);
}
-static int armada_drm_remove(struct platform_device *pdev)
+static void armada_drm_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &armada_master_ops);
- return 0;
}
static void armada_drm_shutdown(struct platform_device *pdev)
@@ -249,7 +248,7 @@ MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
static struct platform_driver armada_drm_platform_driver = {
.probe = armada_drm_probe,
- .remove = armada_drm_remove,
+ .remove_new = armada_drm_remove,
.shutdown = armada_drm_shutdown,
.driver = {
.name = "armada-drm",
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 78122b35a..a7a6b7022 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -6,10 +6,10 @@
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -143,7 +143,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct device_node *np = pdev->dev.of_node;
const struct aspeed_gfx_config *config;
- const struct of_device_id *match;
struct resource *res;
int ret;
@@ -152,10 +151,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- match = of_match_device(aspeed_gfx_match, &pdev->dev);
- if (!match)
+ config = device_get_match_data(&pdev->dev);
+ if (!config)
return -EINVAL;
- config = match->data;
priv->dac_reg = config->dac_reg;
priv->int_clr_reg = config->int_clear_reg;
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index ebb6d8ebd..1e9259416 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
+ u32 i = 0;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
+ if (++i > 200)
+ break;
}
}
}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index cf5b754f0..90bcb1eb9 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -89,11 +89,194 @@ static const struct pci_device_id ast_pciidlist[] = {
MODULE_DEVICE_TABLE(pci, ast_pciidlist);
+static bool ast_is_vga_enabled(void __iomem *ioregs)
+{
+ u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER);
+
+ return vgaer & AST_IO_VGAER_VGA_ENABLE;
+}
+
+static void ast_enable_vga(void __iomem *ioregs)
+{
+ __ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE);
+ __ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL);
+}
+
+/*
+ * Run this function as part of the HW device cleanup; not
+ * when the DRM device gets released.
+ */
+static void ast_enable_mmio_release(void *data)
+{
+ void __iomem *ioregs = (void __force __iomem *)data;
+
+ /* enable standard VGA decode */
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED);
+}
+
+static int ast_enable_mmio(struct device *dev, void __iomem *ioregs)
+{
+ void *data = (void __force *)ioregs;
+
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1,
+ AST_IO_VGACRA1_MMIO_ENABLED |
+ AST_IO_VGACRA1_VGAIO_DISABLED);
+
+ return devm_add_action_or_reset(dev, ast_enable_mmio_release, data);
+}
+
+static void ast_open_key(void __iomem *ioregs)
+{
+ __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD);
+}
+
+static int ast_detect_chip(struct pci_dev *pdev,
+ void __iomem *regs, void __iomem *ioregs,
+ enum ast_chip *chip_out,
+ enum ast_config_mode *config_mode_out)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ enum ast_config_mode config_mode = ast_use_defaults;
+ uint32_t scu_rev = 0xffffffff;
+ enum ast_chip chip;
+ u32 data;
+ u8 vgacrd0, vgacrd1;
+
+ /*
+ * Find configuration mode and read SCU revision
+ */
+
+ /* Check if we have device-tree properties */
+ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
+ /* We do, disable P2A access */
+ config_mode = ast_use_dt;
+ scu_rev = data;
+ } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
+ /*
+ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+ * is disabled. We force using P2A if VGA only mode bit
+ * is set D[7]
+ */
+ vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0);
+ vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1);
+ if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) {
+
+ /*
+ * We have a P2A bridge and it is enabled.
+ */
+
+ /* Patch AST2500/AST2510 */
+ if ((pdev->revision & 0xf0) == 0x40) {
+ if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK))
+ ast_patch_ahb_2500(regs);
+ }
+
+ /* Double check that it's actually working */
+ data = __ast_read32(regs, 0xf004);
+ if ((data != 0xffffffff) && (data != 0x00)) {
+ config_mode = ast_use_p2a;
+
+ /* Read SCU7c (silicon revision register) */
+ __ast_write32(regs, 0xf004, 0x1e6e0000);
+ __ast_write32(regs, 0xf000, 0x1);
+ scu_rev = __ast_read32(regs, 0x1207c);
+ }
+ }
+ }
+
+ switch (config_mode) {
+ case ast_use_defaults:
+ dev_info(dev, "Using default configuration\n");
+ break;
+ case ast_use_dt:
+ dev_info(dev, "Using device-tree for configuration\n");
+ break;
+ case ast_use_p2a:
+ dev_info(dev, "Using P2A bridge for configuration\n");
+ break;
+ }
+
+ /*
+ * Identify chipset
+ */
+
+ if (pdev->revision >= 0x50) {
+ chip = AST2600;
+ dev_info(dev, "AST 2600 detected\n");
+ } else if (pdev->revision >= 0x40) {
+ switch (scu_rev & 0x300) {
+ case 0x0100:
+ chip = AST2510;
+ dev_info(dev, "AST 2510 detected\n");
+ break;
+ default:
+ chip = AST2500;
+ dev_info(dev, "AST 2500 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x30) {
+ switch (scu_rev & 0x300) {
+ case 0x0100:
+ chip = AST1400;
+ dev_info(dev, "AST 1400 detected\n");
+ break;
+ default:
+ chip = AST2400;
+ dev_info(dev, "AST 2400 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x20) {
+ switch (scu_rev & 0x300) {
+ case 0x0000:
+ chip = AST1300;
+ dev_info(dev, "AST 1300 detected\n");
+ break;
+ default:
+ chip = AST2300;
+ dev_info(dev, "AST 2300 detected\n");
+ break;
+ }
+ } else if (pdev->revision >= 0x10) {
+ switch (scu_rev & 0x0300) {
+ case 0x0200:
+ chip = AST1100;
+ dev_info(dev, "AST 1100 detected\n");
+ break;
+ case 0x0100:
+ chip = AST2200;
+ dev_info(dev, "AST 2200 detected\n");
+ break;
+ case 0x0000:
+ chip = AST2150;
+ dev_info(dev, "AST 2150 detected\n");
+ break;
+ default:
+ chip = AST2100;
+ dev_info(dev, "AST 2100 detected\n");
+ break;
+ }
+ } else {
+ chip = AST2000;
+ dev_info(dev, "AST 2000 detected\n");
+ }
+
+ *chip_out = chip;
+ *config_mode_out = config_mode;
+
+ return 0;
+}
+
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ast_device *ast;
- struct drm_device *dev;
+ struct device *dev = &pdev->dev;
int ret;
+ void __iomem *regs;
+ void __iomem *ioregs;
+ enum ast_config_mode config_mode;
+ enum ast_chip chip;
+ struct drm_device *drm;
+ bool need_post = false;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
if (ret)
@@ -103,16 +286,80 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- ast = ast_device_create(&ast_driver, pdev, ent->driver_data);
- if (IS_ERR(ast))
- return PTR_ERR(ast);
- dev = &ast->base;
+ regs = pcim_iomap(pdev, 1, 0);
+ if (!regs)
+ return -EIO;
+
+ if (pdev->revision >= 0x40) {
+ /*
+ * On AST2500 and later models, MMIO is enabled by
+ * default. Adopt it to be compatible with ARM.
+ */
+ resource_size_t len = pci_resource_len(pdev, 1);
+
+ if (len < AST_IO_MM_OFFSET)
+ return -EIO;
+ if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = regs + AST_IO_MM_OFFSET;
+ } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) {
+ /*
+ * Map I/O registers if we have a PCI BAR for I/O.
+ */
+ resource_size_t len = pci_resource_len(pdev, 2);
+
+ if (len < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = pcim_iomap(pdev, 2, 0);
+ if (!ioregs)
+ return -EIO;
+ } else {
+ /*
+ * Anything else is best effort.
+ */
+ resource_size_t len = pci_resource_len(pdev, 1);
+
+ if (len < AST_IO_MM_OFFSET)
+ return -EIO;
+ if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH)
+ return -EIO;
+ ioregs = regs + AST_IO_MM_OFFSET;
+
+ dev_info(dev, "Platform has no I/O space, using MMIO\n");
+ }
+
+ if (!ast_is_vga_enabled(ioregs)) {
+ dev_info(dev, "VGA not enabled on entry, requesting chip POST\n");
+ need_post = true;
+ }
+
+ /*
+ * If VGA isn't enabled, we need to enable now or subsequent
+ * access to the scratch registers will fail.
+ */
+ if (need_post)
+ ast_enable_vga(ioregs);
+ /* Enable extended register access */
+ ast_open_key(ioregs);
+
+ ret = ast_enable_mmio(dev, ioregs);
+ if (ret)
+ return ret;
+
+ ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode);
+ if (ret)
+ return ret;
+
+ drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+ pci_set_drvdata(pdev, drm);
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = drm_dev_register(drm, ent->driver_data);
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 32);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 772f3b049..3be5ccf1f 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,6 +98,12 @@ enum ast_tx_chip {
#define AST_TX_DP501_BIT BIT(AST_TX_DP501)
#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP)
+enum ast_config_mode {
+ ast_use_p2a,
+ ast_use_dt,
+ ast_use_defaults
+};
+
#define AST_DRAM_512Mx16 0
#define AST_DRAM_1Gx16 1
#define AST_DRAM_512Mx32 2
@@ -192,12 +198,13 @@ to_ast_bmc_connector(struct drm_connector *connector)
struct ast_device {
struct drm_device base;
- struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
void __iomem *regs;
void __iomem *ioregs;
void __iomem *dp501_fw_buf;
+ enum ast_config_mode config_mode;
enum ast_chip chip;
+
uint32_t dram_bus_width;
uint32_t dram_type;
uint32_t mclk;
@@ -207,6 +214,8 @@ struct ast_device {
unsigned long vram_size;
unsigned long vram_fb_available;
+ struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
+
struct ast_plane primary_plane;
struct ast_plane cursor_plane;
struct drm_crtc crtc;
@@ -234,11 +243,6 @@ struct ast_device {
} output;
bool support_wide_screen;
- enum {
- ast_use_p2a,
- ast_use_dt,
- ast_use_defaults
- } config_mode;
unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */
u8 *dp501_fw_addr;
@@ -250,9 +254,13 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev)
return container_of(dev, struct ast_device, base);
}
-struct ast_device *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags);
+struct drm_device *ast_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
static inline unsigned long __ast_gen(struct ast_device *ast)
{
@@ -272,55 +280,94 @@ static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen)
#define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6)
#define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7)
+static inline u8 __ast_read8(const void __iomem *addr, u32 reg)
+{
+ return ioread8(addr + reg);
+}
+
+static inline u32 __ast_read32(const void __iomem *addr, u32 reg)
+{
+ return ioread32(addr + reg);
+}
+
+static inline void __ast_write8(void __iomem *addr, u32 reg, u8 val)
+{
+ iowrite8(val, addr + reg);
+}
+
+static inline void __ast_write32(void __iomem *addr, u32 reg, u32 val)
+{
+ iowrite32(val, addr + reg);
+}
+
+static inline u8 __ast_read8_i(void __iomem *addr, u32 reg, u8 index)
+{
+ __ast_write8(addr, reg, index);
+ return __ast_read8(addr, reg + 1);
+}
+
+static inline u8 __ast_read8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask)
+{
+ u8 val = __ast_read8_i(addr, reg, index);
+
+ return val & read_mask;
+}
+
+static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
+{
+ __ast_write8(addr, reg, index);
+ __ast_write8(addr, reg + 1, val);
+}
+
+static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
+ u8 val)
+{
+ u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
+
+ tmp |= val;
+ __ast_write8_i(addr, reg, index, tmp);
+}
+
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
{
- return ioread32(ast->regs + reg);
+ return __ast_read32(ast->regs, reg);
}
static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val)
{
- iowrite32(val, ast->regs + reg);
+ __ast_write32(ast->regs, reg, val);
}
static inline u8 ast_io_read8(struct ast_device *ast, u32 reg)
{
- return ioread8(ast->ioregs + reg);
+ return __ast_read8(ast->ioregs, reg);
}
static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
{
- iowrite8(val, ast->ioregs + reg);
+ __ast_write8(ast->ioregs, reg, val);
}
static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index)
{
- ast_io_write8(ast, base, index);
- ++base;
- return ast_io_read8(ast, base);
+ return __ast_read8_i(ast->ioregs, base, index);
}
static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
u8 preserve_mask)
{
- u8 val = ast_get_index_reg(ast, base, index);
-
- return val & preserve_mask;
+ return __ast_read8_i_masked(ast->ioregs, base, index, preserve_mask);
}
static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val)
{
- ast_io_write8(ast, base, index);
- ++base;
- ast_io_write8(ast, base, val);
+ __ast_write8_i(ast->ioregs, base, index, val);
}
static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
u8 preserve_mask, u8 val)
{
- u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask);
-
- tmp |= val;
- ast_set_index_reg(ast, base, index, tmp);
+ __ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
}
#define AST_VIDMEM_SIZE_8M 0x00800000
@@ -442,7 +489,7 @@ int ast_mm_init(struct ast_device *ast);
void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(struct ast_device *ast);
+void ast_patch_ahb_2500(void __iomem *regs);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c
index 0e845e7ac..e5d3f7121 100644
--- a/drivers/gpu/drm/ast/ast_i2c.c
+++ b/drivers/gpu/drm/ast/ast_i2c.c
@@ -120,7 +120,6 @@ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
return NULL;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f4ab40e22..2f3ad5f94 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -35,180 +35,6 @@
#include "ast_drv.h"
-static bool ast_is_vga_enabled(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
- u8 ch;
-
- ch = ast_io_read8(ast, AST_IO_VGAER);
-
- return !!(ch & 0x01);
-}
-
-static void ast_enable_vga(struct drm_device *dev)
-{
- struct ast_device *ast = to_ast_device(dev);
-
- ast_io_write8(ast, AST_IO_VGAER, 0x01);
- ast_io_write8(ast, AST_IO_VGAMR_W, 0x01);
-}
-
-/*
- * Run this function as part of the HW device cleanup; not
- * when the DRM device gets released.
- */
-static void ast_enable_mmio_release(void *data)
-{
- struct ast_device *ast = data;
-
- /* enable standard VGA decode */
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x04);
-}
-
-static int ast_enable_mmio(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
-
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
-
- return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast);
-}
-
-static void ast_open_key(struct ast_device *ast)
-{
- ast_set_index_reg(ast, AST_IO_VGACRI, 0x80, 0xA8);
-}
-
-static int ast_device_config_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct device_node *np = dev->dev->of_node;
- uint32_t scu_rev = 0xffffffff;
- u32 data;
- u8 jregd0, jregd1;
-
- /*
- * Find configuration mode and read SCU revision
- */
-
- ast->config_mode = ast_use_defaults;
-
- /* Check if we have device-tree properties */
- if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
- /* We do, disable P2A access */
- ast->config_mode = ast_use_dt;
- scu_rev = data;
- } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
- /*
- * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
- * is disabled. We force using P2A if VGA only mode bit
- * is set D[7]
- */
- jregd0 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
- jregd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
- if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
-
- /*
- * We have a P2A bridge and it is enabled.
- */
-
- /* Patch AST2500/AST2510 */
- if ((pdev->revision & 0xf0) == 0x40) {
- if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK))
- ast_patch_ahb_2500(ast);
- }
-
- /* Double check that it's actually working */
- data = ast_read32(ast, 0xf004);
- if ((data != 0xffffffff) && (data != 0x00)) {
- ast->config_mode = ast_use_p2a;
-
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- scu_rev = ast_read32(ast, 0x1207c);
- }
- }
- }
-
- switch (ast->config_mode) {
- case ast_use_defaults:
- drm_info(dev, "Using default configuration\n");
- break;
- case ast_use_dt:
- drm_info(dev, "Using device-tree for configuration\n");
- break;
- case ast_use_p2a:
- drm_info(dev, "Using P2A bridge for configuration\n");
- break;
- }
-
- /*
- * Identify chipset
- */
-
- if (pdev->revision >= 0x50) {
- ast->chip = AST2600;
- drm_info(dev, "AST 2600 detected\n");
- } else if (pdev->revision >= 0x40) {
- switch (scu_rev & 0x300) {
- case 0x0100:
- ast->chip = AST2510;
- drm_info(dev, "AST 2510 detected\n");
- break;
- default:
- ast->chip = AST2500;
- drm_info(dev, "AST 2500 detected\n");
- }
- } else if (pdev->revision >= 0x30) {
- switch (scu_rev & 0x300) {
- case 0x0100:
- ast->chip = AST1400;
- drm_info(dev, "AST 1400 detected\n");
- break;
- default:
- ast->chip = AST2400;
- drm_info(dev, "AST 2400 detected\n");
- }
- } else if (pdev->revision >= 0x20) {
- switch (scu_rev & 0x300) {
- case 0x0000:
- ast->chip = AST1300;
- drm_info(dev, "AST 1300 detected\n");
- break;
- default:
- ast->chip = AST2300;
- drm_info(dev, "AST 2300 detected\n");
- break;
- }
- } else if (pdev->revision >= 0x10) {
- switch (scu_rev & 0x0300) {
- case 0x0200:
- ast->chip = AST1100;
- drm_info(dev, "AST 1100 detected\n");
- break;
- case 0x0100:
- ast->chip = AST2200;
- drm_info(dev, "AST 2200 detected\n");
- break;
- case 0x0000:
- ast->chip = AST2150;
- drm_info(dev, "AST 2150 detected\n");
- break;
- default:
- ast->chip = AST2100;
- drm_info(dev, "AST 2100 detected\n");
- break;
- }
- } else {
- ast->chip = AST2000;
- drm_info(dev, "AST 2000 detected\n");
- }
-
- return 0;
-}
-
static void ast_detect_widescreen(struct ast_device *ast)
{
u8 jreg;
@@ -424,69 +250,27 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-struct ast_device *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags)
+struct drm_device *ast_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
{
struct drm_device *dev;
struct ast_device *ast;
- bool need_post = false;
- int ret = 0;
+ int ret;
ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
if (IS_ERR(ast))
- return ast;
+ return ERR_CAST(ast);
dev = &ast->base;
- pci_set_drvdata(pdev, dev);
-
- ret = drmm_mutex_init(dev, &ast->ioregs_lock);
- if (ret)
- return ERR_PTR(ret);
-
- ast->regs = pcim_iomap(pdev, 1, 0);
- if (!ast->regs)
- return ERR_PTR(-EIO);
-
- /*
- * After AST2500, MMIO is enabled by default, and it should be adopted
- * to be compatible with Arm.
- */
- if (pdev->revision >= 0x40) {
- ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
- } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
- drm_info(dev, "platform has no IO space, trying MMIO\n");
- ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
- }
-
- /* "map" IO regs if the above hasn't done so already */
- if (!ast->ioregs) {
- ast->ioregs = pcim_iomap(pdev, 2, 0);
- if (!ast->ioregs)
- return ERR_PTR(-EIO);
- }
-
- if (!ast_is_vga_enabled(dev)) {
- drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
- need_post = true;
- }
-
- /*
- * If VGA isn't enabled, we need to enable now or subsequent
- * access to the scratch registers will fail.
- */
- if (need_post)
- ast_enable_vga(dev);
-
- /* Enable extended register access */
- ast_open_key(ast);
- ret = ast_enable_mmio(ast);
- if (ret)
- return ERR_PTR(ret);
-
- ret = ast_device_config_init(ast);
- if (ret)
- return ERR_PTR(ret);
+ ast->chip = chip;
+ ast->config_mode = config_mode;
+ ast->regs = regs;
+ ast->ioregs = ioregs;
ast_detect_widescreen(ast);
ast_detect_tx_chip(ast, need_post);
@@ -517,5 +301,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
if (ret)
return ERR_PTR(ret);
- return ast;
+ return dev;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c20534d0e..a718646a6 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1358,13 +1358,13 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
if (!edid)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1372,7 +1372,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
@@ -1464,13 +1464,13 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
if (!edid)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
count = drm_add_edid_modes(connector, edid);
kfree(edid);
@@ -1478,7 +1478,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
return 0;
@@ -1670,13 +1670,13 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
* Protect access to I/O registers from concurrent modesetting
* by acquiring the I/O-register lock.
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
succ = ast_astdp_read_edid(connector->dev, edid);
if (succ < 0)
goto err_mutex_unlock;
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
@@ -1685,7 +1685,7 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
return count;
err_mutex_unlock:
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
kfree(edid);
err_drm_connector_update_edid_property:
drm_connector_update_edid_property(connector, NULL);
@@ -1870,9 +1870,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
* display modes. Protect access to I/O registers by acquiring
* the I/O-register lock. Released in atomic_flush().
*/
- mutex_lock(&ast->ioregs_lock);
+ mutex_lock(&ast->modeset_lock);
drm_atomic_helper_commit_tail_rpm(state);
- mutex_unlock(&ast->ioregs_lock);
+ mutex_unlock(&ast->modeset_lock);
}
static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
@@ -1910,6 +1910,10 @@ int ast_mode_config_init(struct ast_device *ast)
struct drm_connector *physical_connector = NULL;
int ret;
+ ret = drmm_mutex_init(dev, &ast->modeset_lock);
+ if (ret)
+ return ret;
+
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 7a993a384..22f548805 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -77,28 +77,42 @@ ast_set_def_ext_reg(struct drm_device *dev)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
}
-u32 ast_mindwm(struct ast_device *ast, u32 r)
+static u32 __ast_mindwm(void __iomem *regs, u32 r)
{
- uint32_t data;
+ u32 data;
- ast_write32(ast, 0xf004, r & 0xffff0000);
- ast_write32(ast, 0xf000, 0x1);
+ __ast_write32(regs, 0xf004, r & 0xffff0000);
+ __ast_write32(regs, 0xf000, 0x1);
do {
- data = ast_read32(ast, 0xf004) & 0xffff0000;
+ data = __ast_read32(regs, 0xf004) & 0xffff0000;
} while (data != (r & 0xffff0000));
- return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+
+ return __ast_read32(regs, 0x10000 + (r & 0x0000ffff));
}
-void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
+static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
{
- uint32_t data;
- ast_write32(ast, 0xf004, r & 0xffff0000);
- ast_write32(ast, 0xf000, 0x1);
+ u32 data;
+
+ __ast_write32(regs, 0xf004, r & 0xffff0000);
+ __ast_write32(regs, 0xf000, 0x1);
+
do {
- data = ast_read32(ast, 0xf004) & 0xffff0000;
+ data = __ast_read32(regs, 0xf004) & 0xffff0000;
} while (data != (r & 0xffff0000));
- ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+
+ __ast_write32(regs, 0x10000 + (r & 0x0000ffff), v);
+}
+
+u32 ast_mindwm(struct ast_device *ast, u32 r)
+{
+ return __ast_mindwm(ast->regs, r);
+}
+
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
+{
+ __ast_moutdwm(ast->regs, r, v);
}
/*
@@ -1987,17 +2001,18 @@ static bool ast_dram_init_2500(struct ast_device *ast)
return true;
}
-void ast_patch_ahb_2500(struct ast_device *ast)
+void ast_patch_ahb_2500(void __iomem *regs)
{
- u32 data;
+ u32 data;
/* Clear bus lock condition */
- ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
- ast_moutdwm(ast, 0x1e600084, 0x00010000);
- ast_moutdwm(ast, 0x1e600088, 0x00000000);
- ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1e6e2070);
- if (data & 0x08000000) { /* check fast reset */
+ __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
+ __ast_moutdwm(regs, 0x1e600084, 0x00010000);
+ __ast_moutdwm(regs, 0x1e600088, 0x00000000);
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+
+ data = __ast_mindwm(regs, 0x1e6e2070);
+ if (data & 0x08000000) { /* check fast reset */
/*
* If "Fast restet" is enabled for ARM-ICE debugger,
* then WDT needs to enable, that
@@ -2009,16 +2024,18 @@ void ast_patch_ahb_2500(struct ast_device *ast)
* [1]:= 1:WDT will be cleeared and disabled after timeout occurs
* [0]:= 1:WDT enable
*/
- ast_moutdwm(ast, 0x1E785004, 0x00000010);
- ast_moutdwm(ast, 0x1E785008, 0x00004755);
- ast_moutdwm(ast, 0x1E78500c, 0x00000033);
+ __ast_moutdwm(regs, 0x1E785004, 0x00000010);
+ __ast_moutdwm(regs, 0x1E785008, 0x00004755);
+ __ast_moutdwm(regs, 0x1E78500c, 0x00000033);
udelay(1000);
}
+
do {
- ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
- data = ast_mindwm(ast, 0x1e6e2000);
- } while (data != 1);
- ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */
+ __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+ data = __ast_mindwm(regs, 0x1e6e2000);
+ } while (data != 1);
+
+ __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
}
void ast_post_chip_2500(struct drm_device *dev)
@@ -2030,7 +2047,7 @@ void ast_post_chip_2500(struct drm_device *dev)
reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
/* Clear bus lock condition */
- ast_patch_ahb_2500(ast);
+ ast_patch_ahb_2500(ast->regs);
/* Disable watchdog */
ast_moutdwm(ast, 0x1E78502C, 0x00000000);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 555286ecf..62dddbf3f 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -10,10 +10,17 @@
*/
#define AST_IO_MM_OFFSET (0x380)
+#define AST_IO_MM_LENGTH (128)
#define AST_IO_VGAARI_W (0x40)
+
#define AST_IO_VGAMR_W (0x42)
+#define AST_IO_VGAMR_R (0x4c)
+#define AST_IO_VGAMR_IOSEL BIT(0)
+
#define AST_IO_VGAER (0x43)
+#define AST_IO_VGAER_VGA_ENABLE BIT(0)
+
#define AST_IO_VGASRI (0x44)
#define AST_IO_VGADRR (0x47)
#define AST_IO_VGADWR (0x48)
@@ -21,14 +28,15 @@
#define AST_IO_VGAGRI (0x4E)
#define AST_IO_VGACRI (0x54)
+#define AST_IO_VGACR80_PASSWORD (0xa8)
+#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
+#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
-#define AST_IO_VGAMR_R (0x4C)
-
/*
* Display Transmitter Type
*/
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 3e6a4e204..efd996f6c 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -12,6 +12,23 @@ config DRM_PANEL_BRIDGE
help
DRM bridge wrapper of DRM panels
+config DRM_AUX_BRIDGE
+ tristate
+ depends on DRM_BRIDGE && OF
+ select AUXILIARY_BUS
+ select DRM_PANEL_BRIDGE
+ help
+ Simple transparent bridge that is used by several non-DRM drivers to
+ build bridges chain.
+
+config DRM_AUX_HPD_BRIDGE
+ tristate
+ depends on DRM_BRIDGE && OF
+ select AUXILIARY_BUS
+ help
+ Simple bridge that terminates the bridge chain and provides HPD
+ support.
+
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 2b892b7ed..017b58327 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_AUX_BRIDGE) += aux-bridge.o
+obj-$(CONFIG_DRM_AUX_HPD_BRIDGE) += aux-hpd-bridge.o
obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
new file mode 100644
index 000000000..b29980f95
--- /dev/null
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
+
+static DEFINE_IDA(drm_aux_bridge_ida);
+
+static void drm_aux_bridge_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&drm_aux_bridge_ida, adev->id);
+
+ kfree(adev);
+}
+
+static void drm_aux_bridge_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+/**
+ * drm_aux_bridge_register - Create a simple bridge device to link the chain
+ * @parent: device instance providing this bridge
+ *
+ * Creates a simple DRM bridge that doesn't implement any drm_bridge
+ * operations. Such bridges merely fill a place in the bridge chain linking
+ * surrounding DRM bridges.
+ *
+ * Return: zero on success, negative error code on failure
+ */
+int drm_aux_bridge_register(struct device *parent)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ ret = ida_alloc(&drm_aux_bridge_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(adev);
+ return ret;
+ }
+
+ adev->id = ret;
+ adev->name = "aux_bridge";
+ adev->dev.parent = parent;
+ adev->dev.of_node = of_node_get(parent->of_node);
+ adev->dev.release = drm_aux_bridge_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ ida_free(&drm_aux_bridge_ida, adev->id);
+ kfree(adev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, drm_aux_bridge_unregister_adev, adev);
+}
+EXPORT_SYMBOL_GPL(drm_aux_bridge_register);
+
+struct drm_aux_bridge_data {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct device *dev;
+};
+
+static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct drm_aux_bridge_data *data;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ data = container_of(bridge, struct drm_aux_bridge_data, bridge);
+
+ return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static const struct drm_bridge_funcs drm_aux_bridge_funcs = {
+ .attach = drm_aux_bridge_attach,
+};
+
+static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct drm_aux_bridge_data *data;
+
+ data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &auxdev->dev;
+ data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
+ if (IS_ERR(data->next_bridge))
+ return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
+ "failed to acquire drm_bridge\n");
+
+ data->bridge.funcs = &drm_aux_bridge_funcs;
+ data->bridge.of_node = data->dev->of_node;
+
+ return devm_drm_bridge_add(data->dev, &data->bridge);
+}
+
+static const struct auxiliary_device_id drm_aux_bridge_table[] = {
+ { .name = KBUILD_MODNAME ".aux_bridge" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, drm_aux_bridge_table);
+
+static struct auxiliary_driver drm_aux_bridge_drv = {
+ .name = "aux_bridge",
+ .id_table = drm_aux_bridge_table,
+ .probe = drm_aux_bridge_probe,
+};
+module_auxiliary_driver(drm_aux_bridge_drv);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_DESCRIPTION("DRM transparent bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
new file mode 100644
index 000000000..6886db2d9
--- /dev/null
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
+
+static DEFINE_IDA(drm_aux_hpd_bridge_ida);
+
+struct drm_aux_hpd_bridge_data {
+ struct drm_bridge bridge;
+ struct device *dev;
+};
+
+static void drm_aux_hpd_bridge_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&drm_aux_hpd_bridge_ida, adev->id);
+
+ of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
+
+ kfree(adev);
+}
+
+static void drm_aux_hpd_bridge_free_adev(void *_adev)
+{
+ auxiliary_device_uninit(_adev);
+}
+
+/**
+ * devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Creates a simple DRM bridge with the type set to
+ * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
+ * able to send the HPD events.
+ *
+ * Return: bridge auxiliary device pointer or an error pointer
+ */
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_alloc(&drm_aux_hpd_bridge_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ adev->id = ret;
+ adev->name = "dp_hpd_bridge";
+ adev->dev.parent = parent;
+ adev->dev.of_node = of_node_get(parent->of_node);
+ adev->dev.release = drm_aux_hpd_bridge_release;
+ adev->dev.platform_data = of_node_get(np);
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
+ ida_free(&drm_aux_hpd_bridge_ida, adev->id);
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return adev;
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc);
+
+static void drm_aux_hpd_bridge_del_adev(void *_adev)
+{
+ auxiliary_device_delete(_adev);
+}
+
+/**
+ * devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge
+ * @dev: struct device to tie registration lifetime to
+ * @adev: bridge auxiliary device to be registered
+ *
+ * Returns: zero on success or a negative errno
+ */
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
+{
+ int ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev);
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add);
+
+/**
+ * drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Return: device instance that will handle created bridge or an error pointer
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = devm_drm_dp_hpd_bridge_alloc(parent, np);
+ if (IS_ERR(adev))
+ return ERR_CAST(adev);
+
+ ret = devm_drm_dp_hpd_bridge_add(parent, adev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &adev->dev;
+}
+EXPORT_SYMBOL_GPL(drm_dp_hpd_bridge_register);
+
+/**
+ * drm_aux_hpd_bridge_notify - notify hot plug detection events
+ * @dev: device created for the HPD bridge
+ * @status: output connection status
+ *
+ * A wrapper around drm_bridge_hpd_notify() that is used to report hot plug
+ * detection events for bridges created via drm_dp_hpd_bridge_register().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ struct drm_aux_hpd_bridge_data *data = auxiliary_get_drvdata(adev);
+
+ if (!data)
+ return;
+
+ drm_bridge_hpd_notify(&data->bridge, status);
+}
+EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
+
+static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static const struct drm_bridge_funcs drm_aux_hpd_bridge_funcs = {
+ .attach = drm_aux_hpd_bridge_attach,
+};
+
+static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct drm_aux_hpd_bridge_data *data;
+
+ data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &auxdev->dev;
+ data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
+ data->bridge.of_node = dev_get_platdata(data->dev);
+ data->bridge.ops = DRM_BRIDGE_OP_HPD;
+ data->bridge.type = id->driver_data;
+
+ auxiliary_set_drvdata(auxdev, data);
+
+ return devm_drm_bridge_add(data->dev, &data->bridge);
+}
+
+static const struct auxiliary_device_id drm_aux_hpd_bridge_table[] = {
+ { .name = KBUILD_MODNAME ".dp_hpd_bridge", .driver_data = DRM_MODE_CONNECTOR_DisplayPort, },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, drm_aux_hpd_bridge_table);
+
+static struct auxiliary_driver drm_aux_hpd_bridge_drv = {
+ .name = "aux_hpd_bridge",
+ .id_table = drm_aux_hpd_bridge_table,
+ .probe = drm_aux_hpd_bridge_probe,
+};
+module_auxiliary_driver(drm_aux_hpd_bridge_drv);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_DESCRIPTION("DRM HPD bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 6af565ac3..7d4705274 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2596,11 +2596,10 @@ clk_disable:
return ret;
}
-static int cdns_mhdp_remove(struct platform_device *pdev)
+static void cdns_mhdp_remove(struct platform_device *pdev)
{
struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
unsigned long timeout = msecs_to_jiffies(100);
- bool stop_fw = false;
int ret;
drm_bridge_remove(&mhdp->bridge);
@@ -2608,18 +2607,19 @@ static int cdns_mhdp_remove(struct platform_device *pdev)
ret = wait_event_timeout(mhdp->fw_load_wq,
mhdp->hw_state == MHDP_HW_READY,
timeout);
- if (ret == 0)
- dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
- __func__);
- else
- stop_fw = true;
-
spin_lock(&mhdp->start_lock);
mhdp->hw_state = MHDP_HW_STOPPED;
spin_unlock(&mhdp->start_lock);
- if (stop_fw)
+ if (ret == 0) {
+ dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
+ __func__);
+ } else {
ret = cdns_mhdp_set_firmware_active(mhdp, false);
+ if (ret)
+ dev_err(mhdp->dev, "Failed to stop firmware (%pe)\n",
+ ERR_PTR(ret));
+ }
phy_exit(mhdp->phy);
@@ -2634,8 +2634,6 @@ static int cdns_mhdp_remove(struct platform_device *pdev)
/* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
clk_disable_unprepare(mhdp->clk);
-
- return ret;
}
static const struct of_device_id mhdp_ids[] = {
@@ -2658,7 +2656,7 @@ static struct platform_driver mhdp_driver = {
.of_match_table = mhdp_ids,
},
.probe = cdns_mhdp_probe,
- .remove = cdns_mhdp_remove,
+ .remove_new = cdns_mhdp_remove,
};
module_platform_driver(mhdp_driver);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index e5839c89a..97d4af3d1 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -43,6 +43,8 @@ struct lt8912 {
struct videomode mode;
+ struct regulator_bulk_data supplies[7];
+
u8 data_lanes;
bool is_power_on;
};
@@ -257,6 +259,12 @@ static int lt8912_free_i2c(struct lt8912 *lt)
static int lt8912_hard_power_on(struct lt8912 *lt)
{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(lt->supplies), lt->supplies);
+ if (ret)
+ return ret;
+
gpiod_set_value_cansleep(lt->gp_reset, 0);
msleep(20);
@@ -267,6 +275,9 @@ static void lt8912_hard_power_off(struct lt8912 *lt)
{
gpiod_set_value_cansleep(lt->gp_reset, 1);
msleep(20);
+
+ regulator_bulk_disable(ARRAY_SIZE(lt->supplies), lt->supplies);
+
lt->is_power_on = false;
}
@@ -632,6 +643,48 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = {
.get_edid = lt8912_bridge_get_edid,
};
+static int lt8912_bridge_resume(struct device *dev)
+{
+ struct lt8912 *lt = dev_get_drvdata(dev);
+ int ret;
+
+ ret = lt8912_hard_power_on(lt);
+ if (ret)
+ return ret;
+
+ ret = lt8912_soft_power_on(lt);
+ if (ret)
+ return ret;
+
+ return lt8912_video_on(lt);
+}
+
+static int lt8912_bridge_suspend(struct device *dev)
+{
+ struct lt8912 *lt = dev_get_drvdata(dev);
+
+ lt8912_hard_power_off(lt);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(lt8912_bridge_pm_ops, lt8912_bridge_suspend, lt8912_bridge_resume);
+
+static int lt8912_get_regulators(struct lt8912 *lt)
+{
+ unsigned int i;
+ const char * const supply_names[] = {
+ "vdd", "vccmipirx", "vccsysclk", "vcclvdstx",
+ "vcchdmitx", "vcclvdspll", "vcchdmipll"
+ };
+
+ for (i = 0; i < ARRAY_SIZE(lt->supplies); i++)
+ lt->supplies[i].supply = supply_names[i];
+
+ return devm_regulator_bulk_get(lt->dev, ARRAY_SIZE(lt->supplies),
+ lt->supplies);
+}
+
static int lt8912_parse_dt(struct lt8912 *lt)
{
struct gpio_desc *gp_reset;
@@ -683,6 +736,10 @@ static int lt8912_parse_dt(struct lt8912 *lt)
goto err_free_host_node;
}
+ ret = lt8912_get_regulators(lt);
+ if (ret)
+ goto err_free_host_node;
+
of_node_put(port_node);
return 0;
@@ -768,6 +825,7 @@ static struct i2c_driver lt8912_i2c_driver = {
.driver = {
.name = "lt8912",
.of_match_table = lt8912_dt_match,
+ .pm = pm_sleep_ptr(&lt8912_bridge_pm_ops),
},
.probe = lt8912_probe,
.remove = lt8912_remove,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 52d91a0df..aca5bb086 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -515,7 +515,6 @@ static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi)
init_completion(&i2c->cmp);
adap = &i2c->adap;
- adap->class = I2C_CLASS_DDC;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->algo = &dw_hdmi_algorithm;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index b5464199b..62cc3893d 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1415,11 +1415,9 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
if (!pdata->pwm_enabled) {
- ret = pm_runtime_get_sync(pdata->dev);
- if (ret < 0) {
- pm_runtime_put_sync(pdata->dev);
+ ret = pm_runtime_resume_and_get(chip->dev);
+ if (ret < 0)
return ret;
- }
}
if (state->enabled) {
@@ -1433,7 +1431,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX),
SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX));
if (ret) {
- dev_err(pdata->dev, "failed to mux in PWM function\n");
+ dev_err(chip->dev, "failed to mux in PWM function\n");
goto out;
}
}
@@ -1509,7 +1507,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div);
if (ret) {
- dev_err(pdata->dev, "failed to update PWM_PRE_DIV\n");
+ dev_err(chip->dev, "failed to update PWM_PRE_DIV\n");
goto out;
}
@@ -1521,7 +1519,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED);
ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv);
if (ret) {
- dev_err(pdata->dev, "failed to update PWM_EN/PWM_INV\n");
+ dev_err(chip->dev, "failed to update PWM_EN/PWM_INV\n");
goto out;
}
@@ -1529,7 +1527,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
out:
if (!pdata->pwm_enabled)
- pm_runtime_put_sync(pdata->dev);
+ pm_runtime_put_sync(chip->dev);
return ret;
}
@@ -1589,12 +1587,14 @@ static int ti_sn_pwm_probe(struct auxiliary_device *adev,
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
- pdata->pchip.dev = pdata->dev;
+ pdata->pchip.dev = &adev->dev;
pdata->pchip.ops = &ti_sn_pwm_ops;
pdata->pchip.npwm = 1;
pdata->pchip.of_xlate = of_pwm_single_xlate;
pdata->pchip.of_pwm_n_cells = 1;
+ devm_pm_runtime_enable(&adev->dev);
+
return pwmchip_add(&pdata->pchip);
}
@@ -1605,7 +1605,7 @@ static void ti_sn_pwm_remove(struct auxiliary_device *adev)
pwmchip_remove(&pdata->pchip);
if (pdata->pwm_enabled)
- pm_runtime_put_sync(pdata->dev);
+ pm_runtime_put_sync(&adev->dev);
}
static const struct auxiliary_device_id ti_sn_pwm_id_table[] = {
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index b588fea12..f9fb35683 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -179,13 +179,11 @@ static int tpd12s015_probe(struct platform_device *pdev)
return 0;
}
-static int tpd12s015_remove(struct platform_device *pdev)
+static void tpd12s015_remove(struct platform_device *pdev)
{
struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
drm_bridge_remove(&tpd->bridge);
-
- return 0;
}
static const struct of_device_id tpd12s015_of_match[] = {
@@ -197,7 +195,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
static struct platform_driver tpd12s015_driver = {
.probe = tpd12s015_probe,
- .remove = tpd12s015_remove,
+ .remove_new = tpd12s015_remove,
.driver = {
.name = "tpd12s015",
.of_match_table = tpd12s015_of_match,
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index b4f653417..8dbce9919 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -186,6 +186,7 @@ CONFIG_HW_RANDOM_MTK=y
CONFIG_MTK_DEVAPC=y
CONFIG_PWM_MTK_DISP=y
CONFIG_MTK_CMDQ=y
+CONFIG_REGULATOR_DA9211=y
# For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware.
CONFIG_ARCH_TEGRA=y
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index e5c5dcedd..f73f3471e 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -19,7 +19,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
- DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc-usb-host.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtb"
@@ -58,6 +58,9 @@ git config --global user.email "fdo@example.com"
git config --global user.name "freedesktop.org CI"
git config --global pull.rebase true
+# cleanup git state on the worker
+rm -rf .git/rebase-merge
+
# Try to merge fixes from target repo
if [ "$(git ls-remote --exit-code --heads ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes)" ]; then
git pull ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes
@@ -75,19 +78,19 @@ else
fi
fi
-for opt in $ENABLE_KCONFIGS; do
- echo CONFIG_$opt=y >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config
-done
-for opt in $DISABLE_KCONFIGS; do
- echo CONFIG_$opt=n >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config
-done
-
if [[ -n "${MERGE_FRAGMENT}" ]]; then
./scripts/kconfig/merge_config.sh ${DEFCONFIG} drivers/gpu/drm/ci/${MERGE_FRAGMENT}
else
make `basename ${DEFCONFIG}`
fi
+for opt in $ENABLE_KCONFIGS; do
+ ./scripts/config --enable CONFIG_$opt
+done
+for opt in $DISABLE_KCONFIGS; do
+ ./scripts/config --disable CONFIG_$opt
+done
+
make ${KERNEL_IMAGE_NAME}
mkdir -p /lava-files/
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index aeb9bab1b..084e3ff8e 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,11 +1,11 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha edfbf74df1d4d6ce54ffe24566108be0e1a98c3d
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 9d162de9a05155e1c4041857a5848842749164cf
UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm
TARGET_BRANCH: drm-next
- IGT_VERSION: d1db7333d9c5fbbb05e50b0804123950d9dc1c46
+ IGT_VERSION: d2af13d9f5be5ce23d996e4afd3e45990f5ab977
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git
DEQP_RUNNER_GIT_TAG: v0.15.0
@@ -25,7 +25,9 @@ variables:
# per-job artifact storage on MinIO
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
# default kernel for rootfs before injecting the current kernel tree
- KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/gfx-ci/linux/v6.4.12-for-mesa-ci-f6b4ad45f48d
+ KERNEL_REPO: "gfx-ci/linux"
+ KERNEL_TAG: "v6.6.4-for-mesa-ci-e4f4c500f7fb"
+ KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
LAVA_TAGS: subset-1-gfx
LAVA_JOB_PRIORITY: 30
@@ -133,6 +135,11 @@ stages:
- if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
when: on_success
+.never-post-merge-rules:
+ rules:
+ - if: *is-post-merge
+ when: never
+
# Rule to filter for only scheduled pipelines.
.scheduled_pipeline-rules:
rules:
@@ -150,6 +157,7 @@ stages:
.build-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
# Run automatically once all dependency jobs have passed
- when: on_success
@@ -157,6 +165,7 @@ stages:
.container+build-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
- when: manual
.ci-deqp-artifacts:
@@ -175,6 +184,7 @@ stages:
.container-rules:
rules:
- !reference [.no_scheduled_pipelines-rules, rules]
+ - !reference [.never-post-merge-rules, rules]
# Run pipeline by default in the main project if any CI pipeline
# configuration files were changed, to ensure docker images are up to date
- if: *is-post-merge
diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh
index 2f815ee3a..f1a08b9b1 100755
--- a/drivers/gpu/drm/ci/igt_runner.sh
+++ b/drivers/gpu/drm/ci/igt_runner.sh
@@ -15,15 +15,21 @@ cat /sys/kernel/debug/device_component/*
'
# Dump drm state to confirm that kernel was able to find a connected display:
-# TODO this path might not exist for all drivers.. maybe run modetest instead?
set +e
cat /sys/kernel/debug/dri/*/state
set -e
case "$DRIVER_NAME" in
- rockchip|mediatek|meson)
+ rockchip|meson)
export IGT_FORCE_DRIVER="panfrost"
;;
+ mediatek)
+ if [ "$GPU_VERSION" = "mt8173" ]; then
+ export IGT_FORCE_DRIVER=${DRIVER_NAME}
+ elif [ "$GPU_VERSION" = "mt8183" ]; then
+ export IGT_FORCE_DRIVER="panfrost"
+ fi
+ ;;
amdgpu)
# Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib
mv /install/modules/lib/modules/* /lib/modules/.
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 2aa4b1916..9faf76e55 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -102,7 +102,7 @@ msm:apq8016:
stage: msm
variables:
DRIVER_NAME: msm
- BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc.dtb
+ BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc-usb-host.dtb
GPU_VERSION: apq8016
# disabling unused clocks congests with the MDSS runtime PM trying to
# disable those clocks and causes boot to fail.
@@ -111,9 +111,6 @@ msm:apq8016:
RUNNER_TAG: google-freedreno-db410c
script:
- ./install/bare-metal/fastboot.sh
- rules:
- # TODO: current issue: it is not fiding the NFS root. Fix and remove this rule.
- - when: never
msm:apq8096:
extends:
@@ -283,9 +280,6 @@ mediatek:mt8173:
DEVICE_TYPE: mt8173-elm-hana
GPU_VERSION: mt8173
RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana
- rules:
- # TODO: current issue: device is hanging. Fix and remove this rule.
- - when: never
mediatek:mt8183:
extends:
@@ -333,16 +327,16 @@ virtio_gpu:none:
GPU_VERSION: none
extends:
- .test-gl
+ - .test-rules
tags:
- kvm
script:
- ln -sf $CI_PROJECT_DIR/install /install
- mv install/bzImage /lava-files/bzImage
+ - mkdir -p $CI_PROJECT_DIR/results
+ - ln -sf $CI_PROJECT_DIR/results /results
- install/crosvm-runner.sh install/igt_runner.sh
needs:
- debian/x86_64_test-gl
- testing:x86_64
- igt:x86_64
- rules:
- # TODO: current issue: malloc(): corrupted top size. Fix and remove this rule.
- - when: never \ No newline at end of file
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index 671916067..ef0cb7c36 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -1,5 +1,4 @@
kms_3d,Fail
-kms_addfb_basic@addfb25-bad-modifier,Fail
kms_bw@linear-tiling-1-displays-1920x1080p,Fail
kms_bw@linear-tiling-1-displays-2560x1440p,Fail
kms_bw@linear-tiling-1-displays-3840x2160p,Fail
@@ -9,13 +8,19 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail
kms_bw@linear-tiling-3-displays-1920x1080p,Fail
kms_bw@linear-tiling-3-displays-2560x1440p,Fail
kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_color@invalid-gamma-lut-sizes,Fail
kms_color@pipe-A-invalid-gamma-lut-sizes,Fail
kms_color@pipe-B-invalid-gamma-lut-sizes,Fail
-kms_force_connector_basic@force-connector-state,Fail
+kms_cursor_legacy@cursor-vs-flip-atomic,Fail
+kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-suspend,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
kms_force_connector_basic@force-edid,Fail
kms_force_connector_basic@force-load-detect,Fail
kms_force_connector_basic@prune-stale-modes,Fail
-kms_invalid_mode@int-max-clock,Fail
+kms_hdmi_inject@inject-4k,Fail
kms_plane_scaling@planes-upscale-20x20,Fail
kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25,Fail
kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5,Fail
@@ -27,3 +32,5 @@ kms_properties@get_properties-sanity-atomic,Fail
kms_properties@plane-properties-atomic,Fail
kms_properties@plane-properties-legacy,Fail
kms_rmfb@close-fd,Fail
+kms_selftest@drm_format,Timeout
+kms_selftest@drm_format_helper,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 9981682fe..d39d254c9 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -6,10 +6,15 @@ kms_cursor_legacy@all-pipes-single-bo,Fail
kms_cursor_legacy@all-pipes-single-move,Fail
kms_cursor_legacy@all-pipes-torture-bo,Fail
kms_cursor_legacy@all-pipes-torture-move,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
kms_cursor_legacy@pipe-A-forked-bo,Fail
kms_cursor_legacy@pipe-A-forked-move,Fail
kms_cursor_legacy@pipe-A-single-bo,Fail
kms_cursor_legacy@pipe-A-single-move,Fail
kms_cursor_legacy@pipe-A-torture-bo,Fail
kms_cursor_legacy@pipe-A-torture-move,Fail
+kms_force_connector_basic@force-edid,Fail
kms_hdmi_inject@inject-4k,Fail
+kms_selftest@drm_format,Timeout
+kms_selftest@drm_format_helper,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
index 9586b2339..007f21e56 100644
--- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt
@@ -10,6 +10,49 @@ kms_bw@linear-tiling-1-displays-3840x2160p,Fail
kms_bw@linear-tiling-2-displays-1920x1080p,Fail
kms_bw@linear-tiling-2-displays-2560x1440p,Fail
kms_bw@linear-tiling-2-displays-3840x2160p,Fail
+kms_bw@linear-tiling-3-displays-1920x1080p,Fail
+kms_bw@linear-tiling-3-displays-2560x1440p,Fail
+kms_bw@linear-tiling-3-displays-3840x2160p,Fail
+kms_bw@linear-tiling-4-displays-1920x1080p,Fail
+kms_bw@linear-tiling-4-displays-2560x1440p,Fail
+kms_bw@linear-tiling-4-displays-3840x2160p,Fail
+kms_bw@linear-tiling-5-displays-1920x1080p,Fail
+kms_bw@linear-tiling-5-displays-2560x1440p,Fail
+kms_bw@linear-tiling-5-displays-3840x2160p,Fail
+kms_bw@linear-tiling-6-displays-1920x1080p,Fail
+kms_bw@linear-tiling-6-displays-2560x1440p,Fail
+kms_bw@linear-tiling-6-displays-3840x2160p,Fail
+kms_bw@linear-tiling-7-displays-1920x1080p,Fail
+kms_bw@linear-tiling-7-displays-2560x1440p,Fail
+kms_bw@linear-tiling-7-displays-3840x2160p,Fail
+kms_bw@linear-tiling-8-displays-1920x1080p,Fail
+kms_bw@linear-tiling-8-displays-2560x1440p,Fail
+kms_bw@linear-tiling-8-displays-3840x2160p,Fail
+kms_flip@absolute-wf_vblank,Fail
+kms_flip@absolute-wf_vblank-interruptible,Fail
+kms_flip@basic-flip-vs-wf_vblank,Fail
+kms_flip@blocking-absolute-wf_vblank,Fail
+kms_flip@blocking-absolute-wf_vblank-interruptible,Fail
+kms_flip@blocking-wf_vblank,Fail
+kms_flip@busy-flip,Fail
+kms_flip@dpms-vs-vblank-race,Fail
+kms_flip@dpms-vs-vblank-race-interruptible,Fail
+kms_flip@flip-vs-absolute-wf_vblank,Fail
+kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail
+kms_flip@flip-vs-blocking-wf-vblank,Fail
+kms_flip@flip-vs-expired-vblank,Fail
+kms_flip@flip-vs-expired-vblank-interruptible,Fail
+kms_flip@flip-vs-modeset-vs-hang,Fail
+kms_flip@flip-vs-panning-vs-hang,Fail
+kms_flip@flip-vs-wf_vblank-interruptible,Fail
+kms_flip@modeset-vs-vblank-race,Fail
+kms_flip@modeset-vs-vblank-race-interruptible,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip@plain-flip-fb-recreate-interruptible,Fail
+kms_flip@plain-flip-ts-check,Fail
+kms_flip@plain-flip-ts-check-interruptible,Fail
+kms_flip@wf_vblank-ts-check,Fail
+kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
kms_plane_scaling@downscale-with-modifier-factor-0-25,Fail
kms_plane_scaling@downscale-with-rotation-factor-0-25,Fail
@@ -22,6 +65,9 @@ kms_plane_scaling@upscale-with-modifier-factor-0-25,Fail
kms_plane_scaling@upscale-with-pixel-format-20x20,Fail
kms_plane_scaling@upscale-with-pixel-format-factor-0-25,Fail
kms_plane_scaling@upscale-with-rotation-20x20,Fail
+kms_selftest@drm_format,Timeout
+kms_selftest@drm_format_helper,Timeout
+kms_setmode@basic,Fail
kms_vblank@crtc-id,Fail
kms_vblank@invalid,Fail
kms_vblank@pipe-A-accuracy-idle,Fail
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index f3680f4e6..26c188ce5 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2102,7 +2102,6 @@ int drm_dp_aux_register(struct drm_dp_aux *aux)
if (!aux->ddc.algo)
drm_dp_aux_init(aux);
- aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
@@ -2245,6 +2244,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
+ /* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
};
@@ -2327,6 +2328,33 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
EXPORT_SYMBOL(drm_dp_read_desc);
/**
+ * drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment
+ * @dsc_dpcd: DSC capabilities from DPCD
+ *
+ * Returns the bpp precision supported by the DP sink.
+ */
+u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
+
+ switch (bpp_increment_dpcd) {
+ case DP_DSC_BITS_PER_PIXEL_1_16:
+ return 16;
+ case DP_DSC_BITS_PER_PIXEL_1_8:
+ return 8;
+ case DP_DSC_BITS_PER_PIXEL_1_4:
+ return 4;
+ case DP_DSC_BITS_PER_PIXEL_1_2:
+ return 2;
+ case DP_DSC_BITS_PER_PIXEL_1_1:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_bpp_incr);
+
+/**
* drm_dp_dsc_sink_max_slice_count() - Get the max slice count
* supported by the DSC sink.
* @dsc_dpcd: DSC capabilities from DPCD
@@ -3898,3 +3926,142 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux)
EXPORT_SYMBOL(drm_panel_dp_aux_backlight);
#endif
+
+/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */
+static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16,
+ int symbol_size, bool is_mst)
+{
+ int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count);
+ int align = is_mst ? 4 / lane_count : 1;
+
+ return ALIGN(cycles, align);
+}
+
+static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count,
+ int bpp_x16, int symbol_size, bool is_mst)
+{
+ int slice_pixels = DIV_ROUND_UP(pixels, slice_count);
+ int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels,
+ bpp_x16, symbol_size, is_mst);
+ int slice_eoc_cycles = is_mst ? 4 / lane_count : 1;
+
+ return slice_count * (slice_data_cycles + slice_eoc_cycles);
+}
+
+/**
+ * drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream
+ * @lane_count: DP link lane count
+ * @hactive: pixel count of the active period in one scanline of the stream
+ * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set
+ * @bpp_x16: bits per pixel in .4 binary fixed point
+ * @flags: DRM_DP_OVERHEAD_x flags
+ *
+ * Calculate the BW allocation overhead of a DP link stream, depending
+ * on the link's
+ * - @lane_count
+ * - SST/MST mode (@flags / %DRM_DP_OVERHEAD_MST)
+ * - symbol size (@flags / %DRM_DP_OVERHEAD_UHBR)
+ * - FEC mode (@flags / %DRM_DP_OVERHEAD_FEC)
+ * - SSC/REF_CLK mode (@flags / %DRM_DP_OVERHEAD_SSC_REF_CLK)
+ * as well as the stream's
+ * - @hactive timing
+ * - @bpp_x16 color depth
+ * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC).
+ * Note that this overhead doesn't account for the 8b/10b, 128b/132b
+ * channel coding efficiency, for that see
+ * @drm_dp_link_bw_channel_coding_efficiency().
+ *
+ * Returns the overhead as 100% + overhead% in 1ppm units.
+ */
+int drm_dp_bw_overhead(int lane_count, int hactive,
+ int dsc_slice_count,
+ int bpp_x16, unsigned long flags)
+{
+ int symbol_size = flags & DRM_DP_BW_OVERHEAD_UHBR ? 32 : 8;
+ bool is_mst = flags & DRM_DP_BW_OVERHEAD_MST;
+ u32 overhead = 1000000;
+ int symbol_cycles;
+
+ if (lane_count == 0 || hactive == 0 || bpp_x16 == 0) {
+ DRM_DEBUG_KMS("Invalid BW overhead params: lane_count %d, hactive %d, bpp_x16 %d.%04d\n",
+ lane_count, hactive,
+ bpp_x16 >> 4, (bpp_x16 & 0xf) * 625);
+ return 0;
+ }
+
+ /*
+ * DP Standard v2.1 2.6.4.1
+ * SSC downspread and ref clock variation margin:
+ * 5300ppm + 300ppm ~ 0.6%
+ */
+ if (flags & DRM_DP_BW_OVERHEAD_SSC_REF_CLK)
+ overhead += 6000;
+
+ /*
+ * DP Standard v2.1 2.6.4.1.1, 3.5.1.5.4:
+ * FEC symbol insertions for 8b/10b channel coding:
+ * After each 250 data symbols on 2-4 lanes:
+ * 250 LL + 5 FEC_PARITY_PH + 1 CD_ADJ (256 byte FEC block)
+ * After each 2 x 250 data symbols on 1 lane:
+ * 2 * 250 LL + 11 FEC_PARITY_PH + 1 CD_ADJ (512 byte FEC block)
+ * After 256 (2-4 lanes) or 128 (1 lane) FEC blocks:
+ * 256 * 256 bytes + 1 FEC_PM
+ * or
+ * 128 * 512 bytes + 1 FEC_PM
+ * (256 * 6 + 1) / (256 * 250) = 2.4015625 %
+ */
+ if (flags & DRM_DP_BW_OVERHEAD_FEC)
+ overhead += 24016;
+
+ /*
+ * DP Standard v2.1 2.7.9, 5.9.7
+ * The FEC overhead for UHBR is accounted for in its 96.71% channel
+ * coding efficiency.
+ */
+ WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) &&
+ (flags & DRM_DP_BW_OVERHEAD_FEC));
+
+ if (flags & DRM_DP_BW_OVERHEAD_DSC)
+ symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive,
+ dsc_slice_count,
+ bpp_x16, symbol_size,
+ is_mst);
+ else
+ symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive,
+ bpp_x16, symbol_size,
+ is_mst);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count,
+ overhead * 16),
+ hactive * bpp_x16);
+}
+EXPORT_SYMBOL(drm_dp_bw_overhead);
+
+/**
+ * drm_dp_bw_channel_coding_efficiency - Get a DP link's channel coding efficiency
+ * @is_uhbr: Whether the link has a 128b/132b channel coding
+ *
+ * Return the channel coding efficiency of the given DP link type, which is
+ * either 8b/10b or 128b/132b (aka UHBR). The corresponding overhead includes
+ * the 8b -> 10b, 128b -> 132b pixel data to link symbol conversion overhead
+ * and for 128b/132b any link or PHY level control symbol insertion overhead
+ * (LLCP, FEC, PHY sync, see DP Standard v2.1 3.5.2.18). For 8b/10b the
+ * corresponding FEC overhead is BW allocation specific, included in the value
+ * returned by drm_dp_bw_overhead().
+ *
+ * Returns the efficiency in the 100%/coding-overhead% ratio in
+ * 1ppm units.
+ */
+int drm_dp_bw_channel_coding_efficiency(bool is_uhbr)
+{
+ if (is_uhbr)
+ return 967100;
+ else
+ /*
+ * Note that on 8b/10b MST the efficiency is only
+ * 78.75% due to the 1 out of 64 MTPH packet overhead,
+ * not accounted for here.
+ */
+ return 800000;
+}
+EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 772b00ebd..f7c6b6062 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -43,6 +43,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -3578,16 +3579,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
* value is in units of PBNs/(timeslots/1 MTP). This value can be used to
* convert the number of PBNs required for a given stream to the number of
* timeslots this stream requires in each MTP.
+ *
+ * Returns the BW / timeslot value in 20.12 fixed point format.
*/
-int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count)
+fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count)
{
+ int ch_coding_efficiency =
+ drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
+ fixed20_12 ret;
+
if (link_rate == 0 || link_lane_count == 0)
drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
link_rate, link_lane_count);
- /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
- return link_rate * link_lane_count / 54000;
+ /* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+ ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
+ ch_coding_efficiency),
+ (1000000ULL * 8 * 5400) >> 12);
+
+ return ret;
}
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
@@ -4335,7 +4346,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
}
}
- req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
+ req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full);
drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
port->connector->base.id, port->connector->name,
@@ -4726,17 +4737,28 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
/*
- * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
* common multiplier to render an integer PBN for all link rate/lane
* counts combinations
* calculate
- * peak_kbps *= (1006/1000)
- * peak_kbps *= (64/54)
- * peak_kbps *= 8 convert to bytes
+ * peak_kbps = clock * bpp / 16
+ * peak_kbps *= SSC overhead / 1000000
+ * peak_kbps /= 8 convert to Kbytes
+ * peak_kBps *= (64/54) / 1000 convert to PBN
+ */
+ /*
+ * TODO: Use the actual link and mode parameters to calculate
+ * the overhead. For now it's assumed that these are
+ * 4 link lanes, 4096 hactive pixels, which don't add any
+ * significant data padding overhead and that there is no DSC
+ * or FEC overhead.
*/
- return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 >> 4),
- 1000 * 8 * 54 * 1000);
+ int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp,
+ DRM_DP_BW_OVERHEAD_MST |
+ DRM_DP_BW_OVERHEAD_SSC_REF_CLK);
+
+ return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4),
+ 1000000ULL * 8 * 54 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
@@ -4861,7 +4883,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
state = to_drm_dp_mst_topology_state(mgr->base.state);
seq_printf(m, "\n*** Atomic state info ***\n");
seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
- state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
+ state->payload_mask, mgr->max_payloads, state->start_slot,
+ dfixed_trunc(state->pbn_div));
seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n");
for (i = 0; i < mgr->max_payloads; i++) {
@@ -5126,13 +5149,67 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
return false;
}
+static bool
+drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_mst_port *parent)
+{
+ if (!mgr->mst_primary)
+ return false;
+
+ port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
+ port);
+ if (!port)
+ return false;
+
+ if (!parent)
+ return true;
+
+ parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
+ parent);
+ if (!parent)
+ return false;
+
+ if (!parent->mstb)
+ return false;
+
+ return drm_dp_mst_port_downstream_of_branch(port, parent->mstb);
+}
+
+/**
+ * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port
+ * @mgr: MST topology manager
+ * @port: the port being looked up
+ * @parent: the parent port
+ *
+ * The function returns %true if @port is downstream of @parent. If @parent is
+ * %NULL - denoting the root port - the function returns %true if @port is in
+ * @mgr's topology.
+ */
+bool
+drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_mst_port *parent)
+{
+ bool ret;
+
+ mutex_lock(&mgr->lock);
+ ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent);
+ mutex_unlock(&mgr->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent);
+
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
- struct drm_dp_mst_topology_state *state);
+ struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port **failing_port);
static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_topology_state *state)
+ struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port **failing_port)
{
struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_port *port;
@@ -5161,7 +5238,7 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
list_for_each_entry(port, &mstb->ports, next) {
- ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
+ ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port);
if (ret < 0)
return ret;
@@ -5173,7 +5250,8 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
- struct drm_dp_mst_topology_state *state)
+ struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port **failing_port)
{
struct drm_dp_mst_atomic_payload *payload;
int pbn_used = 0;
@@ -5194,13 +5272,15 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
drm_dbg_atomic(port->mgr->dev,
"[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
port->parent, port);
+ *failing_port = port;
return -EINVAL;
}
pbn_used = payload->pbn;
} else {
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
- state);
+ state,
+ failing_port);
if (pbn_used <= 0)
return pbn_used;
}
@@ -5209,6 +5289,7 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
drm_dbg_atomic(port->mgr->dev,
"[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
port->parent, port, pbn_used, port->full_pbn);
+ *failing_port = port;
return -ENOSPC;
}
@@ -5261,10 +5342,10 @@ drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr
}
if (!payload_count)
- mst_state->pbn_div = 0;
+ mst_state->pbn_div.full = dfixed_const(0);
drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
- mgr, mst_state, mst_state->pbn_div, avail_slots,
+ mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots,
mst_state->total_avail_slots - avail_slots);
return 0;
@@ -5387,19 +5468,83 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
/**
+ * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager
+ * @state: The global atomic state
+ * @mgr: Manager to check
+ * @mst_state: The MST atomic state for @mgr
+ * @failing_port: Returns the port with a BW limitation
+ *
+ * Checks the given MST manager's topology state for an atomic update to ensure
+ * that it's valid. This includes checking whether there's enough bandwidth to
+ * support the new timeslot allocations in the atomic update.
+ *
+ * Any atomic drivers supporting DP MST must make sure to call this or
+ * the drm_dp_mst_atomic_check() function after checking the rest of their state
+ * in their &drm_mode_config_funcs.atomic_check() callback.
+ *
+ * See also:
+ * drm_dp_mst_atomic_check()
+ * drm_dp_atomic_find_time_slots()
+ * drm_dp_atomic_release_time_slots()
+ *
+ * Returns:
+ * - 0 if the new state is valid
+ * - %-ENOSPC, if the new state is invalid, because of BW limitation
+ * @failing_port is set to:
+ *
+ * - The non-root port where a BW limit check failed
+ * with all the ports downstream of @failing_port passing
+ * the BW limit check.
+ * The returned port pointer is valid until at least
+ * one payload downstream of it exists.
+ * - %NULL if the BW limit check failed at the root port
+ * with all the ports downstream of the root port passing
+ * the BW limit check.
+ *
+ * - %-EINVAL, if the new state is invalid, because the root port has
+ * too many payloads.
+ */
+int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_port **failing_port)
+{
+ int ret;
+
+ *failing_port = NULL;
+
+ if (!mgr->mst_state)
+ return 0;
+
+ mutex_lock(&mgr->lock);
+ ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
+ mst_state,
+ failing_port);
+ mutex_unlock(&mgr->lock);
+
+ if (ret < 0)
+ return ret;
+
+ return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);
+
+/**
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
* atomic update is valid
* @state: Pointer to the new &struct drm_dp_mst_topology_state
*
* Checks the given topology state for an atomic update to ensure that it's
- * valid. This includes checking whether there's enough bandwidth to support
- * the new timeslot allocations in the atomic update.
+ * valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the
+ * atomic state. This includes checking whether there's enough bandwidth to
+ * support the new timeslot allocations in the atomic update.
*
* Any atomic drivers supporting DP MST must make sure to call this after
* checking the rest of their state in their
* &drm_mode_config_funcs.atomic_check() callback.
*
* See also:
+ * drm_dp_mst_atomic_check_mgr()
* drm_dp_atomic_find_time_slots()
* drm_dp_atomic_release_time_slots()
*
@@ -5414,21 +5559,11 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
int i, ret = 0;
for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
- if (!mgr->mst_state)
- continue;
+ struct drm_dp_mst_port *tmp_port;
- ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
+ ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port);
if (ret)
break;
-
- mutex_lock(&mgr->lock);
- ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
- mst_state);
- mutex_unlock(&mgr->lock);
- if (ret < 0)
- break;
- else
- ret = 0;
}
return ret;
@@ -5793,7 +5928,6 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
- aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
/* FIXME: set the kdev of the port's connector as parent */
aux->ddc.dev.parent = parent_dev;
@@ -5884,6 +6018,7 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *immediate_upstream_port;
+ struct drm_dp_aux *immediate_upstream_aux;
struct drm_dp_mst_port *fec_port;
struct drm_dp_desc desc = {};
u8 endpoint_fec;
@@ -5948,21 +6083,25 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
* - Port is on primary branch device
* - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
*/
- if (drm_dp_read_desc(port->mgr->aux, &desc, true))
+ if (immediate_upstream_port)
+ immediate_upstream_aux = &immediate_upstream_port->aux;
+ else
+ immediate_upstream_aux = port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
- port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
- port->parent == port->mgr->mst_primary) {
+ if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
+ if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
return NULL;
- if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
+ if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
+ ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
- != DP_DWN_STRM_PORT_TYPE_ANALOG))
- return port->mgr->aux;
+ != DP_DWN_STRM_PORT_TYPE_ANALOG)))
+ return immediate_upstream_aux;
}
/*
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
deleted file mode 100644
index a4ad6fd13..000000000
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
- * \file drm_agpsupport.c
- * DRM support for AGP/GART backend
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-#if IS_ENABLED(CONFIG_AGP)
-
-/*
- * Get AGP information.
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been initialized and acquired and fills in the
- * drm_agp_info structure with the information in drm_agp_head::agp_info.
- */
-int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info)
-{
- struct agp_kern_info *kern;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- kern = &dev->agp->agp_info;
- info->agp_version_major = kern->version.major;
- info->agp_version_minor = kern->version.minor;
- info->mode = kern->mode;
- info->aperture_base = kern->aper_base;
- info->aperture_size = kern->aper_size * 1024 * 1024;
- info->memory_allowed = kern->max_memory << PAGE_SHIFT;
- info->memory_used = kern->current_memory << PAGE_SHIFT;
- info->id_vendor = kern->device->vendor;
- info->id_device = kern->device->device;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_info);
-
-int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_info *info = data;
- int err;
-
- err = drm_legacy_agp_info(dev, info);
- if (err)
- return err;
-
- return 0;
-}
-
-/*
- * Acquire the AGP device.
- *
- * \param dev DRM device that is to acquire AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_legacy_agp_acquire(struct drm_device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- if (!dev->agp)
- return -ENODEV;
- if (dev->agp->acquired)
- return -EBUSY;
- dev->agp->bridge = agp_backend_acquire(pdev);
- if (!dev->agp->bridge)
- return -ENODEV;
- dev->agp->acquired = 1;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_acquire);
-
-/*
- * Acquire the AGP device (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev);
-}
-
-/*
- * Release the AGP device.
- *
- * \param dev DRM device that is to release AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired and calls \c agp_backend_release.
- */
-int drm_legacy_agp_release(struct drm_device *dev)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- agp_backend_release(dev->agp->bridge);
- dev->agp->acquired = 0;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_release);
-
-int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return drm_legacy_agp_release(dev);
-}
-
-/*
- * Enable the AGP bus.
- *
- * \param dev DRM device that has previously acquired AGP.
- * \param mode Requested AGP mode.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired but not enabled, and calls
- * \c agp_enable.
- */
-int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
-{
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
-
- dev->agp->mode = mode.mode;
- agp_enable(dev->agp->bridge, mode.mode);
- dev->agp->enabled = 1;
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_enable);
-
-int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_mode *mode = data;
-
- return drm_legacy_agp_enable(dev, *mode);
-}
-
-/*
- * Allocate AGP memory.
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired, allocates the
- * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
- */
-int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
- struct agp_memory *memory;
- unsigned long pages;
- u32 type;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
- type = (u32) request->type;
- memory = agp_allocate_memory(dev->agp->bridge, pages, type);
- if (!memory) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->handle = (unsigned long)memory->key + 1;
- entry->memory = memory;
- entry->bound = 0;
- entry->pages = pages;
- list_add(&entry->head, &dev->agp->memory);
-
- request->handle = entry->handle;
- request->physical = memory->physical;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_alloc);
-
-
-int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_legacy_agp_alloc(dev, request);
-}
-
-/*
- * Search for the AGP memory entry associated with a handle.
- *
- * \param dev DRM device structure.
- * \param handle AGP memory handle.
- * \return pointer to the drm_agp_mem structure associated with \p handle.
- *
- * Walks through drm_agp_head::memory until finding a matching handle.
- */
-static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev,
- unsigned long handle)
-{
- struct drm_agp_mem *entry;
-
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if (entry->handle == handle)
- return entry;
- }
- return NULL;
-}
-
-/*
- * Unbind AGP memory from the GATT (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and acquired, looks-up the AGP memory
- * entry and passes it to the unbind_agp() function.
- */
-int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int ret;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry || !entry->bound)
- return -EINVAL;
- ret = agp_unbind_memory(entry->memory);
- if (ret == 0)
- entry->bound = 0;
- return ret;
-}
-EXPORT_SYMBOL(drm_legacy_agp_unbind);
-
-
-int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_legacy_agp_unbind(dev, request);
-}
-
-/*
- * Bind AGP memory into the GATT (ioctl)
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and that no memory
- * is currently bound into the GATT. Looks-up the AGP memory entry and passes
- * it to bind_agp() function.
- */
-int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
-{
- struct drm_agp_mem *entry;
- int retcode;
- int page;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry || entry->bound)
- return -EINVAL;
- page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
- retcode = agp_bind_memory(entry->memory, page);
- if (retcode)
- return retcode;
- entry->bound = dev->agp->base + (page << PAGE_SHIFT);
- DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
- dev->agp->base, entry->bound);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_bind);
-
-
-int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_binding *request = data;
-
- return drm_legacy_agp_bind(dev, request);
-}
-
-/*
- * Free AGP memory (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and looks up the
- * AGP memory entry. If the memory is currently bound, unbind it via
- * unbind_agp(). Frees it via free_agp() as well as the entry itself
- * and unlinks from the doubly linked list it's inserted in.
- */
-int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
-{
- struct drm_agp_mem *entry;
-
- if (!dev->agp || !dev->agp->acquired)
- return -EINVAL;
- entry = drm_legacy_agp_lookup_entry(dev, request->handle);
- if (!entry)
- return -EINVAL;
- if (entry->bound)
- agp_unbind_memory(entry->memory);
-
- list_del(&entry->head);
-
- agp_free_memory(entry->memory);
- kfree(entry);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_free);
-
-
-int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_agp_buffer *request = data;
-
- return drm_legacy_agp_free(dev, request);
-}
-
-/*
- * Initialize the AGP resources.
- *
- * \return pointer to a drm_agp_head structure.
- *
- * Gets the drm_agp_t structure which is made available by the agpgart module
- * via the inter_module_* functions. Creates and initializes a drm_agp_head
- * structure.
- *
- * Note that final cleanup of the kmalloced structure is directly done in
- * drm_pci_agp_destroy.
- */
-struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct drm_agp_head *head = NULL;
-
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (!head)
- return NULL;
- head->bridge = agp_find_bridge(pdev);
- if (!head->bridge) {
- head->bridge = agp_backend_acquire(pdev);
- if (!head->bridge) {
- kfree(head);
- return NULL;
- }
- agp_copy_info(head->bridge, &head->agp_info);
- agp_backend_release(head->bridge);
- } else {
- agp_copy_info(head->bridge, &head->agp_info);
- }
- if (head->agp_info.chipset == NOT_SUPPORTED) {
- kfree(head);
- return NULL;
- }
- INIT_LIST_HEAD(&head->memory);
- head->cant_use_aperture = head->agp_info.cant_use_aperture;
- head->page_mask = head->agp_info.page_mask;
- head->base = head->agp_info.aper_base;
- return head;
-}
-/* Only exported for i810.ko */
-EXPORT_SYMBOL(drm_legacy_agp_init);
-
-/**
- * drm_legacy_agp_clear - Clear AGP resource list
- * @dev: DRM device
- *
- * Iterate over all AGP resources and remove them. But keep the AGP head
- * intact so it can still be used. It is safe to call this if AGP is disabled or
- * was already removed.
- *
- * Cleanup is only done for drivers who have DRIVER_LEGACY set.
- */
-void drm_legacy_agp_clear(struct drm_device *dev)
-{
- struct drm_agp_mem *entry, *tempe;
-
- if (!dev->agp)
- return;
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- agp_unbind_memory(entry->memory);
- agp_free_memory(entry->memory);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
-
- if (dev->agp->acquired)
- drm_legacy_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f1a503aaf..a91737adf 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -733,6 +733,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_get_color_encoding_name(state->color_encoding));
drm_printf(p, "\tcolor-range=%s\n",
drm_get_color_range_name(state->color_range));
+ drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
if (plane->funcs->atomic_print_state)
plane->funcs->atomic_print_state(p, state);
@@ -1773,6 +1774,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct drm_private_obj *obj;
if (!drm_drv_uses_atomic_modeset(dev))
return;
@@ -1801,6 +1803,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
if (take_locks)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
drm_connector_list_iter_end(&conn_iter);
+
+ list_for_each_entry(obj, &config->privobj_list, head) {
+ if (take_locks)
+ drm_modeset_lock(&obj->lock, NULL);
+ drm_atomic_private_obj_print_state(p, obj->state);
+ if (take_locks)
+ drm_modeset_unlock(&obj->lock);
+ }
}
/**
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 68ffcc0b0..39ef0a6ad 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -795,9 +795,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
- * drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state
- * @encoder: encoder state to check
- * @conn_state: connector state to check
+ * drm_atomic_helper_check_wb_connector_state() - Check writeback connector state
+ * @connector: corresponding connector
+ * @state: the driver state object
*
* Checks if the writeback connector state is valid, and returns an error if it
* isn't.
@@ -806,9 +806,11 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
* Zero for success or -errno
*/
int
-drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
- struct drm_connector_state *conn_state)
+drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
struct drm_writeback_job *wb_job = conn_state->writeback_job;
struct drm_property_blob *pixel_format_blob;
struct drm_framebuffer *fb;
@@ -827,11 +829,11 @@ drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
if (fb->format->format == formats[i])
return 0;
- drm_dbg_kms(encoder->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
+ drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
return -EINVAL;
}
-EXPORT_SYMBOL(drm_atomic_helper_check_wb_encoder_state);
+EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state);
/**
* drm_atomic_helper_check_plane_state() - Check plane state for validity
@@ -2382,10 +2384,10 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
/**
- * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
+ * drm_atomic_helper_wait_for_dependencies - wait for required preceding commits
* @old_state: atomic state object with old state structures
*
- * This function waits for all preceeding commits that touch the same CRTC as
+ * This function waits for all preceding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled
* by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 784e63d70..519228eb1 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -275,6 +275,20 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
plane_state->normalized_zpos = val;
}
}
+
+ if (plane->hotspot_x_property) {
+ if (!drm_object_property_get_default_value(&plane->base,
+ plane->hotspot_x_property,
+ &val))
+ plane_state->hotspot_x = val;
+ }
+
+ if (plane->hotspot_y_property) {
+ if (!drm_object_property_get_default_value(&plane->base,
+ plane->hotspot_y_property,
+ &val))
+ plane_state->hotspot_y = val;
+ }
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
@@ -338,6 +352,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
state->fence = NULL;
state->commit = NULL;
state->fb_damage_clips = NULL;
+ state->color_mgmt_changed = false;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 98d3b10c0..29d494018 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -362,48 +362,6 @@ static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
return fence_ptr;
}
-static int
-drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
- struct drm_property_blob **blob,
- uint64_t blob_id,
- ssize_t expected_size,
- ssize_t expected_elem_size,
- bool *replaced)
-{
- struct drm_property_blob *new_blob = NULL;
-
- if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
- if (new_blob == NULL) {
- drm_dbg_atomic(dev,
- "cannot find blob ID %llu\n", blob_id);
- return -EINVAL;
- }
-
- if (expected_size > 0 &&
- new_blob->length != expected_size) {
- drm_dbg_atomic(dev,
- "[BLOB:%d] length %zu different from expected %zu\n",
- new_blob->base.id, new_blob->length, expected_size);
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- if (expected_elem_size > 0 &&
- new_blob->length % expected_elem_size != 0) {
- drm_dbg_atomic(dev,
- "[BLOB:%d] length %zu not divisible by element size %zu\n",
- new_blob->base.id, new_blob->length, expected_elem_size);
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- }
-
- *replaced |= drm_property_replace_blob(blob, new_blob);
- drm_property_blob_put(new_blob);
-
- return 0;
-}
-
static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct drm_property *property,
uint64_t val)
@@ -424,7 +382,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
} else if (property == config->prop_vrr_enabled) {
state->vrr_enabled = val;
} else if (property == config->degamma_lut_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->degamma_lut,
val,
-1, sizeof(struct drm_color_lut),
@@ -432,7 +390,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->ctm_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->ctm,
val,
sizeof(struct drm_color_ctm), -1,
@@ -440,7 +398,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->gamma_lut_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->gamma_lut,
val,
-1, sizeof(struct drm_color_lut),
@@ -581,7 +539,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == plane->color_range_property) {
state->color_range = val;
} else if (property == config->prop_fb_damage_clips) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->fb_damage_clips,
val,
-1,
@@ -593,6 +551,22 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
+ } else if (property == plane->hotspot_x_property) {
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n",
+ plane->base.id, plane->name, val);
+ return -EINVAL;
+ }
+ state->hotspot_x = val;
+ } else if (property == plane->hotspot_y_property) {
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ drm_dbg_atomic(plane->dev,
+ "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n",
+ plane->base.id, plane->name, val);
+ return -EINVAL;
+ }
+ state->hotspot_y = val;
} else {
drm_dbg_atomic(plane->dev,
"[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
@@ -653,6 +627,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->scaling_filter;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
+ } else if (property == plane->hotspot_x_property) {
+ *val = state->hotspot_x;
+ } else if (property == plane->hotspot_y_property) {
+ *val = state->hotspot_y;
} else {
drm_dbg_atomic(dev,
"[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
@@ -758,7 +736,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
if (state->link_status != DRM_LINK_STATUS_GOOD)
state->link_status = val;
} else if (property == config->hdr_output_metadata_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
+ ret = drm_property_replace_blob_from_id(dev,
&state->hdr_output_metadata,
val,
sizeof(struct hdr_output_metadata), -1,
@@ -1006,13 +984,28 @@ out:
return ret;
}
+static int drm_atomic_check_prop_changes(int ret, uint64_t old_val, uint64_t prop_value,
+ struct drm_property *prop)
+{
+ if (ret != 0 || old_val != prop_value) {
+ drm_dbg_atomic(prop->dev,
+ "[PROP:%d:%s] No prop can be changed during async flip\n",
+ prop->base.id, prop->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
- uint64_t prop_value)
+ u64 prop_value,
+ bool async_flip)
{
struct drm_mode_object *ref;
+ u64 old_val;
int ret;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
@@ -1029,6 +1022,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip) {
+ ret = drm_atomic_connector_get_property(connector, connector_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
ret = drm_atomic_connector_set_property(connector,
connector_state, file_priv,
prop, prop_value);
@@ -1044,6 +1044,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip) {
+ ret = drm_atomic_crtc_get_property(crtc, crtc_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
ret = drm_atomic_crtc_set_property(crtc,
crtc_state, prop, prop_value);
break;
@@ -1051,6 +1058,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
+ struct drm_mode_config *config = &plane->dev->mode_config;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -1058,6 +1066,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
+ if (async_flip && prop != config->prop_fb_id) {
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+ break;
+ }
+
+ if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ drm_dbg_atomic(prop->dev,
+ "[OBJECT:%d] Only primary planes can be changed during async flip\n",
+ obj->id);
+ ret = -EINVAL;
+ break;
+ }
+
ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv,
prop, prop_value);
@@ -1323,6 +1346,18 @@ static void complete_signaling(struct drm_device *dev,
kfree(fence_state);
}
+static void
+set_async_flip(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ crtc_state->async_flip = true;
+ }
+}
+
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
@@ -1337,6 +1372,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_out_fence_state *fence_state;
int ret = 0;
unsigned int i, j, num_fences;
+ bool async_flip = false;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1363,9 +1399,13 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
}
if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) {
- drm_dbg_atomic(dev,
- "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n");
- return -EINVAL;
+ if (!dev->mode_config.async_page_flip) {
+ drm_dbg_atomic(dev,
+ "commit failed: DRM_MODE_PAGE_FLIP_ASYNC not supported\n");
+ return -EINVAL;
+ }
+
+ async_flip = true;
}
/* can't test and expect an event at the same time. */
@@ -1450,8 +1490,8 @@ retry:
goto out;
}
- ret = drm_atomic_set_property(state, file_priv,
- obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj,
+ prop, prop_value, async_flip);
if (ret) {
drm_mode_object_put(obj);
goto out;
@@ -1468,6 +1508,9 @@ retry:
if (ret)
goto out;
+ if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ set_async_flip(state);
+
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 6899b3dc1..22aa015df 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -37,13 +37,12 @@
#include <drm/drm_print.h>
#include "drm_internal.h"
-#include "drm_legacy.h"
/**
* DOC: master and authentication
*
* &struct drm_master is used to track groups of clients with open
- * primary/legacy device nodes. For every &struct drm_file which has had at
+ * primary device nodes. For every &struct drm_file which has had at
* least once successfully became the device master (either through the
* SET_MASTER IOCTL, or implicitly through opening the primary device node when
* no one else is the current master that time) there exists one &drm_master.
@@ -139,7 +138,6 @@ struct drm_master *drm_master_create(struct drm_device *dev)
return NULL;
kref_init(&master->refcount);
- drm_master_legacy_init(master);
idr_init_base(&master->magic_map, 1);
master->dev = dev;
@@ -365,8 +363,6 @@ void drm_master_release(struct drm_file *file_priv)
if (!drm_is_current_master_locked(file_priv))
goto out;
- drm_legacy_lock_master_cleanup(dev, master);
-
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
out:
@@ -429,8 +425,6 @@ static void drm_master_destroy(struct kref *kref)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_lease_destroy(master);
- drm_legacy_master_rmmaps(dev, master);
-
idr_destroy(&master->magic_map);
idr_destroy(&master->leases);
idr_destroy(&master->lessee_idr);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index e1cfba2ff..4f6f8c662 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -1391,50 +1391,6 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
-#ifdef CONFIG_DEBUG_FS
-static int drm_bridge_chains_info(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_encoder *encoder;
- unsigned int bridge_idx = 0;
-
- list_for_each_entry(encoder, &config->encoder_list, head) {
- struct drm_bridge *bridge;
-
- drm_printf(&p, "encoder[%u]\n", encoder->base.id);
-
- drm_for_each_bridge_in_chain(encoder, bridge) {
- drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
- bridge_idx, bridge->type, bridge->ops);
-
-#ifdef CONFIG_OF
- if (bridge->of_node)
- drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
-#endif
-
- drm_printf(&p, "\n");
-
- bridge_idx++;
- }
- }
-
- return 0;
-}
-
-static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
- { "bridge_chains", drm_bridge_chains_info, 0 },
-};
-
-void drm_bridge_debugfs_init(struct drm_device *dev)
-{
- drm_debugfs_add_files(dev, drm_bridge_debugfs_list,
- ARRAY_SIZE(drm_bridge_debugfs_list));
-}
-#endif
-
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 8239ad43a..3acd67021 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -198,12 +198,6 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector)
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
- if (bridge_connector->bridge_hpd) {
- struct drm_bridge *hpd = bridge_connector->bridge_hpd;
-
- drm_bridge_hpd_disable(hpd);
- }
-
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
deleted file mode 100644
index 86700560f..000000000
--- a/drivers/gpu/drm/drm_bufs.c
+++ /dev/null
@@ -1,1627 +0,0 @@
-/*
- * Legacy: Generic DRM Buffer Management
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/log2.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/nospec.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-#include <asm/shmparam.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-
-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
- struct drm_local_map *map)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- /*
- * Because the kernel-userspace ABI is fixed at a 32-bit offset
- * while PCI resources may live above that, we only compare the
- * lower 32 bits of the map offset for maps of type
- * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
- * It is assumed that if a driver have more than one resource
- * of each type, the lower 32 bits are different.
- */
- if (!entry->map ||
- map->type != entry->map->type ||
- entry->master != dev->master)
- continue;
- switch (map->type) {
- case _DRM_SHM:
- if (map->flags != _DRM_CONTAINS_LOCK)
- break;
- return entry;
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- if ((entry->map->offset & 0xffffffff) ==
- (map->offset & 0xffffffff))
- return entry;
- break;
- default: /* Make gcc happy */
- break;
- }
- if (entry->map->offset == map->offset)
- return entry;
- }
-
- return NULL;
-}
-
-static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
- unsigned long user_token, int hashed_handle, int shm)
-{
- int use_hashed_handle, shift;
- unsigned long add;
-
-#if (BITS_PER_LONG == 64)
- use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
-#elif (BITS_PER_LONG == 32)
- use_hashed_handle = hashed_handle;
-#else
-#error Unsupported long size. Neither 64 nor 32 bits.
-#endif
-
- if (!use_hashed_handle) {
- int ret;
-
- hash->key = user_token >> PAGE_SHIFT;
- ret = drm_ht_insert_item(&dev->map_hash, hash);
- if (ret != -EINVAL)
- return ret;
- }
-
- shift = 0;
- add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
- if (shm && (SHMLBA > PAGE_SIZE)) {
- int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
-
- /* For shared memory, we have to preserve the SHMLBA
- * bits of the eventual vma->vm_pgoff value during
- * mmap(). Otherwise we run into cache aliasing problems
- * on some platforms. On these platforms, the pgoff of
- * a mmap() request is used to pick a suitable virtual
- * address for the mmap() region such that it will not
- * cause cache aliasing problems.
- *
- * Therefore, make sure the SHMLBA relevant bits of the
- * hash value we use are equal to those in the original
- * kernel virtual address.
- */
- shift = bits;
- add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
- }
-
- return drm_ht_just_insert_please(&dev->map_hash, hash,
- user_token, 32 - PAGE_SHIFT - 3,
- shift, add);
-}
-
-/*
- * Core function to create a range of memory available for mapping by a
- * non-root process.
- *
- * Adjusts the memory offset to its absolute value according to the mapping
- * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
- * applicable and if supported by the kernel.
- */
-static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags,
- struct drm_map_list **maplist)
-{
- struct drm_local_map *map;
- struct drm_map_list *list;
- unsigned long user_token;
- int ret;
-
- map = kmalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return -ENOMEM;
-
- map->offset = offset;
- map->size = size;
- map->flags = flags;
- map->type = type;
-
- /* Only allow shared memory to be removable since we only keep enough
- * book keeping information about shared memory to allow for removal
- * when processes fork.
- */
- if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
- kfree(map);
- return -EINVAL;
- }
- DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
- (unsigned long long)map->offset, map->size, map->type);
-
- /* page-align _DRM_SHM maps. They are allocated here so there is no security
- * hole created by that and it works around various broken drivers that use
- * a non-aligned quantity to map the SAREA. --BenH
- */
- if (map->type == _DRM_SHM)
- map->size = PAGE_ALIGN(map->size);
-
- if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
- kfree(map);
- return -EINVAL;
- }
- map->mtrr = -1;
- map->handle = NULL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
- if (map->offset + (map->size-1) < map->offset ||
- map->offset < virt_to_phys(high_memory)) {
- kfree(map);
- return -EINVAL;
- }
-#endif
- /* Some drivers preinitialize some maps, without the X Server
- * needing to be aware of it. Therefore, we just return success
- * when the server tries to create a duplicate map.
- */
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size,
- list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
-
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr =
- arch_phys_wc_add(map->offset, map->size);
- }
- if (map->type == _DRM_REGISTERS) {
- if (map->flags & _DRM_WRITE_COMBINING)
- map->handle = ioremap_wc(map->offset,
- map->size);
- else
- map->handle = ioremap(map->offset, map->size);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- }
-
- break;
- case _DRM_SHM:
- list = drm_find_matching_map(dev, map);
- if (list != NULL) {
- if (list->map->size != map->size) {
- DRM_DEBUG("Matching maps of type %d with "
- "mismatched sizes, (%ld vs %ld)\n",
- map->type, map->size, list->map->size);
- list->map->size = map->size;
- }
-
- kfree(map);
- *maplist = list;
- return 0;
- }
- map->handle = vmalloc_user(map->size);
- DRM_DEBUG("%lu %d %p\n",
- map->size, order_base_2(map->size), map->handle);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- map->offset = (unsigned long)map->handle;
- if (map->flags & _DRM_CONTAINS_LOCK) {
- /* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->master->lock.hw_lock != NULL) {
- vfree(map->handle);
- kfree(map);
- return -EBUSY;
- }
- dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
- }
- break;
- case _DRM_AGP: {
- struct drm_agp_mem *entry;
- int valid = 0;
-
- if (!dev->agp) {
- kfree(map);
- return -EINVAL;
- }
-#ifdef __alpha__
- map->offset += dev->hose->mem_space->start;
-#endif
- /* In some cases (i810 driver), user space may have already
- * added the AGP base itself, because dev->agp->base previously
- * only got set during AGP enable. So, only add the base
- * address if the map's offset isn't already within the
- * aperture.
- */
- if (map->offset < dev->agp->base ||
- map->offset > dev->agp->base +
- dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
- map->offset += dev->agp->base;
- }
- map->mtrr = dev->agp->agp_mtrr; /* for getmap */
-
- /* This assumes the DRM is in total control of AGP space.
- * It's not always the case as AGP can be in the control
- * of user space (i.e. i810 driver). So this loop will get
- * skipped and we double check that dev->agp->memory is
- * actually set as well as being invalid before EPERM'ing
- */
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if ((map->offset >= entry->bound) &&
- (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- kfree(map);
- return -EPERM;
- }
- DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
- (unsigned long long)map->offset, map->size);
-
- break;
- }
- case _DRM_SCATTER_GATHER:
- if (!dev->sg) {
- kfree(map);
- return -EINVAL;
- }
- map->offset += (unsigned long)dev->sg->virtual;
- break;
- case _DRM_CONSISTENT:
- /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
- * As we're limiting the address to 2^32-1 (or less),
- * casting it down to 32 bits is no problem, but we
- * need to point to a 64bit variable first.
- */
- map->handle = dma_alloc_coherent(dev->dev,
- map->size,
- &map->offset,
- GFP_KERNEL);
- if (!map->handle) {
- kfree(map);
- return -ENOMEM;
- }
- break;
- default:
- kfree(map);
- return -EINVAL;
- }
-
- list = kzalloc(sizeof(*list), GFP_KERNEL);
- if (!list) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- return -EINVAL;
- }
- list->map = map;
-
- mutex_lock(&dev->struct_mutex);
- list_add(&list->head, &dev->maplist);
-
- /* Assign a 32-bit handle */
- /* We do it here so that dev->struct_mutex protects the increment */
- user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
- map->offset;
- ret = drm_map_handle(dev, &list->hash, user_token, 0,
- (map->type == _DRM_SHM));
- if (ret) {
- if (map->type == _DRM_REGISTERS)
- iounmap(map->handle);
- kfree(map);
- kfree(list);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- list->user_token = list->hash.key << PAGE_SHIFT;
- mutex_unlock(&dev->struct_mutex);
-
- if (!(map->flags & _DRM_DRIVER))
- list->master = dev->master;
- *maplist = list;
- return 0;
-}
-
-int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_ptr)
-{
- struct drm_map_list *list;
- int rc;
-
- rc = drm_addmap_core(dev, offset, size, type, flags, &list);
- if (!rc)
- *map_ptr = list->map;
- return rc;
-}
-EXPORT_SYMBOL(drm_legacy_addmap);
-
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
- unsigned int token)
-{
- struct drm_map_list *_entry;
-
- list_for_each_entry(_entry, &dev->maplist, head)
- if (_entry->user_token == token)
- return _entry->map;
- return NULL;
-}
-EXPORT_SYMBOL(drm_legacy_findmap);
-
-/*
- * Ioctl to specify a range of memory that is available for mapping by a
- * non-root process.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_map structure.
- * \return zero on success or a negative value on error.
- *
- */
-int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *maplist;
- int err;
-
- if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
- return -EPERM;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- err = drm_addmap_core(dev, map->offset, map->size, map->type,
- map->flags, &maplist);
-
- if (err)
- return err;
-
- /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
- map->handle = (void *)(unsigned long)maplist->user_token;
-
- /*
- * It appears that there are no users of this value whatsoever --
- * drmAddMap just discards it. Let's not encourage its use.
- * (Keeping drm_addmap_core's returned mtrr value would be wrong --
- * it's not a real mtrr index anymore.)
- */
- map->mtrr = -1;
-
- return 0;
-}
-
-/*
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *r_list = NULL;
- struct list_head *list;
- int idx;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- idx = map->offset;
- if (idx < 0)
- return -EINVAL;
-
- i = 0;
- mutex_lock(&dev->struct_mutex);
- list_for_each(list, &dev->maplist) {
- if (i == idx) {
- r_list = list_entry(list, struct drm_map_list, head);
- break;
- }
- i++;
- }
- if (!r_list || !r_list->map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- map->offset = r_list->map->offset;
- map->size = r_list->map->size;
- map->type = r_list->map->type;
- map->flags = r_list->map->flags;
- map->handle = (void *)(unsigned long) r_list->user_token;
- map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*
- * Remove a map private from list and deallocate resources if the mapping
- * isn't in use.
- *
- * Searches the map on drm_device::maplist, removes it from the list, see if
- * it's being used, and free any associated resource (such as MTRR's) if it's not
- * being on use.
- *
- * \sa drm_legacy_addmap
- */
-int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
-{
- struct drm_map_list *r_list = NULL, *list_t;
- int found = 0;
- struct drm_master *master;
-
- /* Find the list entry for the map and remove it */
- list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- if (r_list->map == map) {
- master = r_list->master;
- list_del(&r_list->head);
- drm_ht_remove_key(&dev->map_hash,
- r_list->user_token >> PAGE_SHIFT);
- kfree(r_list);
- found = 1;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- switch (map->type) {
- case _DRM_REGISTERS:
- iounmap(map->handle);
- fallthrough;
- case _DRM_FRAME_BUFFER:
- arch_phys_wc_del(map->mtrr);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- if (master) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL; /* SHM removed */
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dma_free_coherent(dev->dev,
- map->size,
- map->handle,
- map->offset);
- break;
- }
- kfree(map);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_rmmap_locked);
-
-void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- drm_legacy_rmmap_locked(dev, map);
- mutex_unlock(&dev->struct_mutex);
-}
-EXPORT_SYMBOL(drm_legacy_rmmap);
-
-void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
-{
- struct drm_map_list *r_list, *list_temp;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_legacy_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-void drm_legacy_rmmaps(struct drm_device *dev)
-{
- struct drm_map_list *r_list, *list_temp;
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_legacy_rmmap(dev, r_list->map);
-}
-
-/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
- * the last close of the device, and this is necessary for cleanup when things
- * exit uncleanly. Therefore, having userland manually remove mappings seems
- * like a pointless exercise since they're going away anyway.
- *
- * One use case might be after addmap is allowed for normal users for SHM and
- * gets used by drivers that the server doesn't need to care about. This seems
- * unlikely.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_map structure.
- * \return zero on success or a negative value on error.
- */
-int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->user_token == (unsigned long)request->handle &&
- r_list->map->flags & _DRM_REMOVABLE) {
- map = r_list->map;
- break;
- }
- }
-
- /* List has wrapped around to the head pointer, or it's empty we didn't
- * find anything.
- */
- if (list_empty(&dev->maplist) || !map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- /* Register and framebuffer maps are permanent */
- if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- ret = drm_legacy_rmmap_locked(dev, map);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/*
- * Cleanup after an error on one of the addbufs() functions.
- *
- * \param dev DRM device.
- * \param entry buffer entry where the error occurred.
- *
- * Frees any pages and buffers associated with the given entry.
- */
-static void drm_cleanup_buf_error(struct drm_device *dev,
- struct drm_buf_entry *entry)
-{
- drm_dma_handle_t *dmah;
- int i;
-
- if (entry->seg_count) {
- for (i = 0; i < entry->seg_count; i++) {
- if (entry->seglist[i]) {
- dmah = entry->seglist[i];
- dma_free_coherent(dev->dev,
- dmah->size,
- dmah->vaddr,
- dmah->busaddr);
- kfree(dmah);
- }
- }
- kfree(entry->seglist);
-
- entry->seg_count = 0;
- }
-
- if (entry->buf_count) {
- for (i = 0; i < entry->buf_count; i++) {
- kfree(entry->buflist[i].dev_private);
- }
- kfree(entry->buflist);
-
- entry->buf_count = 0;
- }
-}
-
-#if IS_ENABLED(CONFIG_AGP)
-/*
- * Add AGP buffers for DMA transfers.
- *
- * \param dev struct drm_device to which the buffers are to be added.
- * \param request pointer to a struct drm_buf_desc describing the request.
- * \return zero on success or a negative number on failure.
- *
- * After some sanity checks creates a drm_buf structure for each buffer and
- * reallocates the buffer list of the same size order to accommodate the new
- * buffers.
- */
-int drm_legacy_addbufs_agp(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_agp_mem *agp_entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i, valid;
- struct drm_buf **temp_buflist;
-
- if (!dma)
- return -EINVAL;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = dev->agp->base + request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lx\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- /* Make sure buffers are located in AGP memory that we own */
- valid = 0;
- list_for_each_entry(agp_entry, &dev->agp->memory, head) {
- if ((agp_offset >= agp_entry->bound) &&
- (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
- valid = 1;
- break;
- }
- }
- if (!list_empty(&dev->agp->memory) && !valid) {
- DRM_DEBUG("zone invalid\n");
- return -EINVAL;
- }
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_AGP;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_addbufs_agp);
-#endif /* CONFIG_AGP */
-
-int drm_legacy_addbufs_pci(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- int count;
- int order;
- int size;
- int total;
- int page_order;
- struct drm_buf_entry *entry;
- drm_dma_handle_t *dmah;
- struct drm_buf *buf;
- int alignment;
- unsigned long offset;
- int i;
- int byte_count;
- int page_count;
- unsigned long *temp_pagelist;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
- request->count, request->size, size, order);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
- if (!entry->seglist) {
- kfree(entry->buflist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- /* Keep the original pagelist until we know all the allocations
- * have succeeded
- */
- temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
- sizeof(*dma->pagelist),
- GFP_KERNEL);
- if (!temp_pagelist) {
- kfree(entry->buflist);
- kfree(entry->seglist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- memcpy(temp_pagelist,
- dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
- DRM_DEBUG("pagelist: %d entries\n",
- dma->page_count + (count << page_order));
-
- entry->buf_size = size;
- entry->page_order = page_order;
- byte_count = 0;
- page_count = 0;
-
- while (entry->buf_count < count) {
- dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
- if (!dmah) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- dmah->size = total;
- dmah->vaddr = dma_alloc_coherent(dev->dev,
- dmah->size,
- &dmah->busaddr,
- GFP_KERNEL);
- if (!dmah->vaddr) {
- kfree(dmah);
-
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- entry->seglist[entry->seg_count++] = dmah;
- for (i = 0; i < (1 << page_order); i++) {
- DRM_DEBUG("page %d @ 0x%08lx\n",
- dma->page_count + page_count,
- (unsigned long)dmah->vaddr + PAGE_SIZE * i);
- temp_pagelist[dma->page_count + page_count++]
- = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
- }
- for (offset = 0;
- offset + size <= total && entry->buf_count < count;
- offset += alignment, ++entry->buf_count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
- buf->offset = (dma->byte_count + byte_count + offset);
- buf->address = (void *)(dmah->vaddr + offset);
- buf->bus_address = dmah->busaddr + offset;
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size,
- GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- entry->seg_count = count;
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n",
- entry->buf_count, buf->address);
- }
- byte_count += PAGE_SIZE << page_order;
- }
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- kfree(temp_pagelist);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- /* No allocations failed, so now we can replace the original pagelist
- * with the new one.
- */
- if (dma->page_count) {
- kfree(dma->pagelist);
- }
- dma->pagelist = temp_pagelist;
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += entry->seg_count << page_order;
- dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- if (request->flags & _DRM_PCI_BUFFER_RO)
- dma->flags = _DRM_DMA_USE_PCI_RO;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-
-}
-EXPORT_SYMBOL(drm_legacy_addbufs_pci);
-
-static int drm_legacy_addbufs_sg(struct drm_device *dev,
- struct drm_buf_desc *request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = order_base_2(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->buf_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset
- + (unsigned long)dev->sg->virtual);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_SG;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-/*
- * Add buffers for DMA transfers (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_buf_desc request.
- * \return zero on success or a negative number on failure.
- *
- * According with the memory type specified in drm_buf_desc::flags and the
- * build options, it dispatches the call either to addbufs_agp(),
- * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
- * PCI memory respectively.
- */
-int drm_legacy_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_desc *request = data;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
-#if IS_ENABLED(CONFIG_AGP)
- if (request->flags & _DRM_AGP_BUFFER)
- ret = drm_legacy_addbufs_agp(dev, request);
- else
-#endif
- if (request->flags & _DRM_SG_BUFFER)
- ret = drm_legacy_addbufs_sg(dev, request);
- else if (request->flags & _DRM_FB_BUFFER)
- ret = -EINVAL;
- else
- ret = drm_legacy_addbufs_pci(dev, request);
-
- return ret;
-}
-
-/*
- * Get information about the buffer mappings.
- *
- * This was originally mean for debugging purposes, or by a sophisticated
- * client library to determine how best to use the available buffers (e.g.,
- * large buffers can be used for image transfer).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Increments drm_device::buf_use while holding the drm_device::buf_lock
- * lock, preventing of allocating more buffers after this call. Information
- * about each requested buffer is then copied into user space.
- */
-int __drm_legacy_infobufs(struct drm_device *dev,
- void *data, int *p,
- int (*f)(void *, int, struct drm_buf_entry *))
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int count;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- ++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->buf_lock);
-
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count)
- ++count;
- }
-
- DRM_DEBUG("count = %d\n", count);
-
- if (*p >= count) {
- for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- struct drm_buf_entry *from = &dma->bufs[i];
-
- if (from->buf_count) {
- if (f(data, count, from) < 0)
- return -EFAULT;
- DRM_DEBUG("%d %d %d %d %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].buf_size,
- dma->bufs[i].low_mark,
- dma->bufs[i].high_mark);
- ++count;
- }
- }
- }
- *p = count;
-
- return 0;
-}
-
-static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
-{
- struct drm_buf_info *request = data;
- struct drm_buf_desc __user *to = &request->list[count];
- struct drm_buf_desc v = {.count = from->buf_count,
- .size = from->buf_size,
- .low_mark = from->low_mark,
- .high_mark = from->high_mark};
-
- if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
- return -EFAULT;
- return 0;
-}
-
-int drm_legacy_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_info *request = data;
-
- return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
-}
-
-/*
- * Specifies a low and high water mark for buffer allocation
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg a pointer to a drm_buf_desc structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies that the size order is bounded between the admissible orders and
- * updates the respective drm_device_dma::bufs entry low and high water mark.
- *
- * \note This ioctl is deprecated and mostly never used.
- */
-int drm_legacy_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_desc *request = data;
- int order;
- struct drm_buf_entry *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d, %d, %d\n",
- request->size, request->low_mark, request->high_mark);
- order = order_base_2(request->size);
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
- entry = &dma->bufs[order];
-
- if (request->low_mark < 0 || request->low_mark > entry->buf_count)
- return -EINVAL;
- if (request->high_mark < 0 || request->high_mark > entry->buf_count)
- return -EINVAL;
-
- entry->low_mark = request->low_mark;
- entry->high_mark = request->high_mark;
-
- return 0;
-}
-
-/*
- * Unreserve the buffers in list, previously reserved using drmDMA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_free structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls free_buffer() for each used buffer.
- * This function is primarily used for debugging.
- */
-int drm_legacy_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_free *request = data;
- int i;
- int idx;
- struct drm_buf *buf;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- DRM_DEBUG("%d\n", request->count);
- for (i = 0; i < request->count; i++) {
- if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
- return -EFAULT;
- if (idx < 0 || idx >= dma->buf_count) {
- DRM_ERROR("Index %d (of %d max)\n",
- idx, dma->buf_count - 1);
- return -EINVAL;
- }
- idx = array_index_nospec(idx, dma->buf_count);
- buf = dma->buflist[idx];
- if (buf->file_priv != file_priv) {
- DRM_ERROR("Process %d freeing buffer not owned\n",
- task_pid_nr(current));
- return -EINVAL;
- }
- drm_legacy_free_buffer(dev, buf);
- }
-
- return 0;
-}
-
-/*
- * Maps all of the DMA buffers into client-virtual space (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
- * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
- * drm_mmap_dma().
- */
-int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
- void __user **v,
- int (*f)(void *, int, unsigned long,
- struct drm_buf *),
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int retcode = 0;
- unsigned long virtual;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EOPNOTSUPP;
-
- if (!dma)
- return -EINVAL;
-
- spin_lock(&dev->buf_lock);
- if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->buf_lock);
- return -EBUSY;
- }
- dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->buf_lock);
-
- if (*p >= dma->buf_count) {
- if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
- || (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))) {
- struct drm_local_map *map = dev->agp_buffer_map;
- unsigned long token = dev->agp_buffer_token;
-
- if (!map) {
- retcode = -EINVAL;
- goto done;
- }
- virtual = vm_mmap(file_priv->filp, 0, map->size,
- PROT_READ | PROT_WRITE,
- MAP_SHARED,
- token);
- } else {
- virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, 0);
- }
- if (virtual > -1024UL) {
- /* Real error */
- retcode = (signed long)virtual;
- goto done;
- }
- *v = (void __user *)virtual;
-
- for (i = 0; i < dma->buf_count; i++) {
- if (f(data, i, virtual, dma->buflist[i]) < 0) {
- retcode = -EFAULT;
- goto done;
- }
- }
- }
- done:
- *p = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
-
- return retcode;
-}
-
-static int map_one_buf(void *data, int idx, unsigned long virtual,
- struct drm_buf *buf)
-{
- struct drm_buf_map *request = data;
- unsigned long address = virtual + buf->offset; /* *** */
-
- if (copy_to_user(&request->list[idx].idx, &buf->idx,
- sizeof(request->list[0].idx)))
- return -EFAULT;
- if (copy_to_user(&request->list[idx].total, &buf->total,
- sizeof(request->list[0].total)))
- return -EFAULT;
- if (clear_user(&request->list[idx].used, sizeof(int)))
- return -EFAULT;
- if (copy_to_user(&request->list[idx].address, &address,
- sizeof(address)))
- return -EFAULT;
- return 0;
-}
-
-int drm_legacy_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_buf_map *request = data;
-
- return __drm_legacy_mapbufs(dev, data, &request->count,
- &request->virtual, map_one_buf,
- file_priv);
-}
-
-int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (dev->driver->dma_ioctl)
- return dev->driver->dma_ioctl(dev, data, file_priv);
- else
- return -EINVAL;
-}
-
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_legacy_getsarea);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index c3027115d..9403b3f57 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -5,7 +5,6 @@
#include <linux/iosys-map.h>
#include <linux/list.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -84,16 +83,13 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
return -EOPNOTSUPP;
- if (funcs && !try_module_get(funcs->owner))
- return -ENODEV;
-
client->dev = dev;
client->name = name;
client->funcs = funcs;
ret = drm_client_modeset_create(client);
if (ret)
- goto err_put_module;
+ return ret;
ret = drm_client_open(client);
if (ret)
@@ -105,10 +101,6 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
err_free:
drm_client_modeset_free(client);
-err_put_module:
- if (funcs)
- module_put(funcs->owner);
-
return ret;
}
EXPORT_SYMBOL(drm_client_init);
@@ -177,8 +169,6 @@ void drm_client_release(struct drm_client_dev *client)
drm_client_modeset_free(client);
drm_client_close(client);
drm_dev_put(dev);
- if (client->funcs)
- module_put(client->funcs->owner);
}
EXPORT_SYMBOL(drm_client_release);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 871e4e212..0683a129b 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
+ /* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index c3725086f..b0516505f 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1198,6 +1198,12 @@ static const u32 dp_colorspaces =
* drm_connector_set_path_property(), in the case of DP MST with the
* path property the MST manager created. Userspace cannot change this
* property.
+ *
+ * In the case of DP MST, the property has the format
+ * ``mst:<parent>-<ports>`` where ``<parent>`` is the KMS object ID of the
+ * parent connector and ``<ports>`` is a hyphen-separated list of DP MST
+ * port numbers. Note, KMS object IDs are not guaranteed to be stable
+ * across reboots.
* TILE:
* Connector tile group property to indicate how a set of DRM connector
* compose together into one logical screen. This is used by both high-res
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
deleted file mode 100644
index a0fc779e5..000000000
--- a/drivers/gpu/drm/drm_context.c
+++ /dev/null
@@ -1,513 +0,0 @@
-/*
- * Legacy: Generic DRM Contexts
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-struct drm_ctx_list {
- struct list_head head;
- drm_context_t handle;
- struct drm_file *tag;
-};
-
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-/*
- * Free a handle from the context bitmap.
- *
- * \param dev DRM device.
- * \param ctx_handle context handle.
- *
- * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
- * lock.
- */
-void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- idr_remove(&dev->ctx_idr, ctx_handle);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * Context bitmap allocation.
- *
- * \param dev DRM device.
- * \return (non-negative) context handle on success or a negative number on failure.
- *
- * Allocate a new idr from drm_device::ctx_idr while holding the
- * drm_device::struct_mutex lock.
- */
-static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
-{
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
- GFP_KERNEL);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/*
- * Context bitmap initialization.
- *
- * \param dev DRM device.
- *
- * Initialise the drm_device::ctx_idr
- */
-void drm_legacy_ctxbitmap_init(struct drm_device * dev)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- idr_init(&dev->ctx_idr);
-}
-
-/*
- * Context bitmap cleanup.
- *
- * \param dev DRM device.
- *
- * Free all idr members using drm_ctx_sarea_free helper function
- * while holding the drm_device::struct_mutex lock.
- */
-void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->struct_mutex);
- idr_destroy(&dev->ctx_idr);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file
- * @dev: DRM device to operate on
- * @file: Open file to flush contexts for
- *
- * This iterates over all contexts on @dev and drops them if they're owned by
- * @file. Note that after this call returns, new contexts might be added if
- * the file is still alive.
- */
-void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_ctx_list *pos, *tmp;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- mutex_lock(&dev->ctxlist_mutex);
-
- list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
- if (pos->tag == file &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, pos->handle);
-
- drm_legacy_ctxbitmap_free(dev, pos->handle);
- list_del(&pos->head);
- kfree(pos);
- }
- }
-
- mutex_unlock(&dev->ctxlist_mutex);
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name Per Context SAREA Support */
-/*@{*/
-
-/*
- * Get per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Gets the map from drm_device::ctx_idr with the handle specified and
- * returns its handle.
- */
-int drm_legacy_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map;
- struct drm_map_list *_entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
-
- map = idr_find(&dev->ctx_idr, request->ctx_id);
- if (!map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- request->handle = NULL;
- list_for_each_entry(_entry, &dev->maplist, head) {
- if (_entry->map == map) {
- request->handle =
- (void *)(unsigned long)_entry->user_token;
- break;
- }
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- if (request->handle == NULL)
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * Set per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Searches the mapping specified in \p arg and update the entry in
- * drm_device::ctx_idr with it.
- */
-int drm_legacy_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_priv_map *request = data;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list = NULL;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map
- && r_list->user_token == (unsigned long) request->handle)
- goto found;
- }
- bad:
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
-
- found:
- map = r_list->map;
- if (!map)
- goto bad;
-
- if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
- goto bad;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name The actual DRM context handling routines */
-/*@{*/
-
-/*
- * Switch context.
- *
- * \param dev DRM device.
- * \param old old context handle.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Attempt to set drm_device::context_flag.
- */
-static int drm_context_switch(struct drm_device * dev, int old, int new)
-{
- if (test_and_set_bit(0, &dev->context_flag)) {
- DRM_ERROR("Reentering -- FIXME\n");
- return -EBUSY;
- }
-
- DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
- if (new == dev->last_context) {
- clear_bit(0, &dev->context_flag);
- return 0;
- }
-
- return 0;
-}
-
-/*
- * Complete context switch.
- *
- * \param dev DRM device.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Updates drm_device::last_context and drm_device::last_switch. Verifies the
- * hardware lock is held, clears the drm_device::context_flag and wakes up
- * drm_device::context_wait.
- */
-static int drm_context_switch_complete(struct drm_device *dev,
- struct drm_file *file_priv, int new)
-{
- dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
-
- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
- DRM_ERROR("Lock isn't held after context switch\n");
- }
-
- /* If a context switch is ever initiated
- when the kernel holds the lock, release
- that lock here.
- */
- clear_bit(0, &dev->context_flag);
-
- return 0;
-}
-
-/*
- * Reserve contexts.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_res structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_legacy_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_res *res = data;
- struct drm_ctx ctx;
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (res->count >= DRM_RESERVED_CONTEXTS) {
- memset(&ctx, 0, sizeof(ctx));
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- ctx.handle = i;
- if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
- return -EFAULT;
- }
- }
- res->count = DRM_RESERVED_CONTEXTS;
-
- return 0;
-}
-
-/*
- * Add context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Get a new handle for the context and copy to userspace.
- */
-int drm_legacy_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx_list *ctx_entry;
- struct drm_ctx *ctx = data;
- int tmp_handle;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- tmp_handle = drm_legacy_ctxbitmap_next(dev);
- if (tmp_handle == DRM_KERNEL_CONTEXT) {
- /* Skip kernel's context and get a new one. */
- tmp_handle = drm_legacy_ctxbitmap_next(dev);
- }
- DRM_DEBUG("%d\n", tmp_handle);
- if (tmp_handle < 0) {
- DRM_DEBUG("Not enough free contexts.\n");
- /* Should this return -EBUSY instead? */
- return tmp_handle;
- }
-
- ctx->handle = tmp_handle;
-
- ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
- if (!ctx_entry) {
- DRM_DEBUG("out of memory\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&ctx_entry->head);
- ctx_entry->handle = ctx->handle;
- ctx_entry->tag = file_priv;
-
- mutex_lock(&dev->ctxlist_mutex);
- list_add(&ctx_entry->head, &dev->ctxlist);
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*
- * Get context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_legacy_getctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- /* This is 0, because we don't handle any context flags */
- ctx->flags = 0;
-
- return 0;
-}
-
-/*
- * Switch context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch().
- */
-int drm_legacy_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- return drm_context_switch(dev, dev->last_context, ctx->handle);
-}
-
-/*
- * New context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch_complete().
- */
-int drm_legacy_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- drm_context_switch_complete(dev, file_priv, ctx->handle);
-
- return 0;
-}
-
-/*
- * Remove context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
- */
-int drm_legacy_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_ctx *ctx = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, ctx->handle);
- drm_legacy_ctxbitmap_free(dev, ctx->handle);
- }
-
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->handle == ctx->handle) {
- list_del(&pos->head);
- kfree(pos);
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
-
- return 0;
-}
-
-/*@}*/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index cb90e70d8..65f9f6693 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -904,6 +904,7 @@ out:
connector_set = NULL;
fb = NULL;
mode = NULL;
+ num_connectors = 0;
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a209659a9..2dafc39a2 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -439,11 +439,8 @@ EXPORT_SYMBOL(drm_crtc_helper_set_mode);
* @state: atomic state object
*
* Provides a default CRTC-state check handler for CRTCs that only have
- * one primary plane attached to it.
- *
- * This is often the case for the CRTC of simple framebuffers. See also
- * drm_plane_helper_atomic_check() for the respective plane-state check
- * helper function.
+ * one primary plane attached to it. This is often the case for the CRTC
+ * of simple framebuffers.
*
* RETURNS:
* Zero on success, or an errno code otherwise.
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 8556c3b3f..a514d5207 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -222,6 +222,8 @@ int drm_mode_addfb2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_rmfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+int drm_mode_closefb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_getfb2_ioctl(struct drm_device *dev,
@@ -251,7 +253,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
- uint64_t prop_value);
+ u64 prop_value, bool async_flip);
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index f291fb4b3..f4715a67e 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -314,10 +314,8 @@ void drm_debugfs_dev_register(struct drm_device *dev)
drm_framebuffer_debugfs_init(dev);
drm_client_debugfs_init(dev);
}
- if (drm_drv_uses_atomic_modeset(dev)) {
+ if (drm_drv_uses_atomic_modeset(dev))
drm_atomic_debugfs_init(dev);
- drm_bridge_debugfs_init(dev);
- }
}
int drm_debugfs_register(struct drm_minor *minor, int minor_id,
@@ -589,4 +587,65 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
crtc->debugfs_entry = NULL;
}
+static int bridges_show(struct seq_file *m, void *data)
+{
+ struct drm_encoder *encoder = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_bridge *bridge;
+ unsigned int idx = 0;
+
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs);
+ drm_printf(&p, "\ttype: [%d] %s\n",
+ bridge->type,
+ drm_get_connector_type_name(bridge->type));
+#ifdef CONFIG_OF
+ if (bridge->of_node)
+ drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
+#endif
+ drm_printf(&p, "\tops: [0x%x]", bridge->ops);
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ drm_puts(&p, " detect");
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ drm_puts(&p, " edid");
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_puts(&p, " hpd");
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ drm_puts(&p, " modes");
+ drm_puts(&p, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(bridges);
+
+void drm_debugfs_encoder_add(struct drm_encoder *encoder)
+{
+ struct drm_minor *minor = encoder->dev->primary;
+ struct dentry *root;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "encoder-%d", encoder->index);
+ if (!name)
+ return;
+
+ root = debugfs_create_dir(name, minor->debugfs_root);
+ kfree(name);
+
+ encoder->debugfs_entry = root;
+
+ /* bridges list */
+ debugfs_create_file("bridges", 0444, root, encoder,
+ &bridges_fops);
+
+ if (encoder->funcs && encoder->funcs->debugfs_init)
+ encoder->funcs->debugfs_init(encoder, root);
+}
+
+void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
+{
+ debugfs_remove_recursive(encoder->debugfs_entry);
+ encoder->debugfs_entry = NULL;
+}
+
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
deleted file mode 100644
index eb6b741a6..000000000
--- a/drivers/gpu/drm/drm_dma.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * \file drm_dma.c
- * DMA IOCTL and function support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-/**
- * drm_legacy_dma_setup() - Initialize the DMA data.
- *
- * @dev: DRM device.
- * Return: zero on success or a negative value on failure.
- *
- * Allocate and initialize a drm_device_dma structure.
- */
-int drm_legacy_dma_setup(struct drm_device *dev)
-{
- int i;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
- !drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
-
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
- if (!dev->dma)
- return -ENOMEM;
-
- for (i = 0; i <= DRM_MAX_ORDER; i++)
- memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
-
- return 0;
-}
-
-/**
- * drm_legacy_dma_takedown() - Cleanup the DMA resources.
- *
- * @dev: DRM device.
- *
- * Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the drm_device::dma structure itself.
- */
-void drm_legacy_dma_takedown(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_dma_handle_t *dmah;
- int i, j;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
- !drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- if (!dma)
- return;
-
- /* Clear dma buffers */
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].seg_count) {
- DRM_DEBUG("order %d: buf_count = %d,"
- " seg_count = %d\n",
- i,
- dma->bufs[i].buf_count,
- dma->bufs[i].seg_count);
- for (j = 0; j < dma->bufs[i].seg_count; j++) {
- if (dma->bufs[i].seglist[j]) {
- dmah = dma->bufs[i].seglist[j];
- dma_free_coherent(dev->dev,
- dmah->size,
- dmah->vaddr,
- dmah->busaddr);
- kfree(dmah);
- }
- }
- kfree(dma->bufs[i].seglist);
- }
- if (dma->bufs[i].buf_count) {
- for (j = 0; j < dma->bufs[i].buf_count; j++) {
- kfree(dma->bufs[i].buflist[j].dev_private);
- }
- kfree(dma->bufs[i].buflist);
- }
- }
-
- kfree(dma->buflist);
- kfree(dma->pagelist);
- kfree(dev->dma);
- dev->dma = NULL;
-}
-
-/**
- * drm_legacy_free_buffer() - Free a buffer.
- *
- * @dev: DRM device.
- * @buf: buffer to free.
- *
- * Resets the fields of \p buf.
- */
-void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
-{
- if (!buf)
- return;
-
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
- buf->used = 0;
-}
-
-/**
- * drm_legacy_reclaim_buffers() - Reclaim the buffers.
- *
- * @dev: DRM device.
- * @file_priv: DRM file private.
- *
- * Frees each buffer associated with \p file_priv not already on the hardware.
- */
-void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->file_priv == file_priv) {
- switch (dma->buflist[i]->list) {
- case DRM_LIST_NONE:
- drm_legacy_free_buffer(dev, dma->buflist[i]);
- break;
- case DRM_LIST_WAIT:
- dma->buflist[i]->list = DRM_LIST_RECLAIM;
- break;
- default:
- /* Buffer already on hardware. */
- break;
- }
- }
- }
-}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3c835c99d..243cacb35 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -48,7 +48,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
@@ -585,8 +584,6 @@ static void drm_fs_inode_free(struct inode *inode)
static void drm_dev_init_release(struct drm_device *dev, void *res)
{
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
drm_fs_inode_free(dev->anon_inode);
put_device(dev->dev);
@@ -597,7 +594,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
}
static int drm_dev_init(struct drm_device *dev,
@@ -632,7 +628,6 @@ static int drm_dev_init(struct drm_device *dev,
return -EINVAL;
}
- drm_legacy_init_members(dev);
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
@@ -673,12 +668,6 @@ static int drm_dev_init(struct drm_device *dev,
goto err;
}
- ret = drm_legacy_create_map_hash(dev);
- if (ret)
- goto err;
-
- drm_legacy_ctxbitmap_init(dev);
-
if (drm_core_check_feature(dev, DRIVER_GEM)) {
ret = drm_gem_init(dev);
if (ret) {
@@ -996,9 +985,6 @@ EXPORT_SYMBOL(drm_dev_register);
*/
void drm_dev_unregister(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_lastclose(dev);
-
dev->registered = false;
drm_client_dev_unregister(dev);
@@ -1009,9 +995,6 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->driver->unload)
dev->driver->unload(dev);
- drm_legacy_pci_agp_destroy(dev);
- drm_legacy_rmmaps(dev);
-
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_ACCEL);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3b4065099..69c688040 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -41,10 +41,12 @@
#include <drm/drm_displayid.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
static int oui(u8 first, u8 second, u8 third)
{
@@ -5511,6 +5513,27 @@ static void clear_eld(struct drm_connector *connector)
}
/*
+ * Get 3-byte SAD buffer from struct cea_sad.
+ */
+void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad)
+{
+ sad[0] = cta_sad->format << 3 | cta_sad->channels;
+ sad[1] = cta_sad->freq;
+ sad[2] = cta_sad->byte2;
+}
+
+/*
+ * Set struct cea_sad from 3-byte SAD buffer.
+ */
+void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad)
+{
+ cta_sad->format = (sad[0] & 0x78) >> 3;
+ cta_sad->channels = sad[0] & 0x07;
+ cta_sad->freq = sad[1] & 0x7f;
+ cta_sad->byte2 = sad[2];
+}
+
+/*
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
* @drm_edid: EDID to parse
@@ -5594,7 +5617,7 @@ static void drm_edid_to_eld(struct drm_connector *connector,
}
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
- struct cea_sad **sads)
+ struct cea_sad **psads)
{
const struct cea_db *db;
struct cea_db_iter iter;
@@ -5603,20 +5626,16 @@ static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
if (cea_db_tag(db) == CTA_DB_AUDIO) {
- int j;
+ struct cea_sad *sads;
+ int i;
count = cea_db_payload_len(db) / 3; /* SAD is 3B */
- *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
- if (!*sads)
+ sads = kcalloc(count, sizeof(*sads), GFP_KERNEL);
+ *psads = sads;
+ if (!sads)
return -ENOMEM;
- for (j = 0; j < count; j++) {
- const u8 *sad = &db->data[j * 3];
-
- (*sads)[j].format = (sad[0] & 0x78) >> 3;
- (*sads)[j].channels = sad[0] & 0x7;
- (*sads)[j].freq = sad[1] & 0x7F;
- (*sads)[j].byte2 = sad[2];
- }
+ for (i = 0; i < count; i++)
+ drm_edid_cta_sad_set(&sads[i], &db->data[i * 3]);
break;
}
}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 5d9ef267e..60fcb80bc 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -23,22 +23,6 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
-/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
-int __drm_set_edid_firmware_path(const char *path)
-{
- scnprintf(edid_firmware, sizeof(edid_firmware), "%s", path);
-
- return 0;
-}
-EXPORT_SYMBOL(__drm_set_edid_firmware_path);
-
-/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
-int __drm_get_edid_firmware_path(char *buf, size_t bufsize)
-{
- return scnprintf(buf, bufsize, "%s", edid_firmware);
-}
-EXPORT_SYMBOL(__drm_get_edid_firmware_path);
-
#define GENERIC_EDIDS 6
static const char * const generic_edid_name[GENERIC_EDIDS] = {
"edid/800x600.bin",
diff --git a/drivers/gpu/drm/drm_eld.c b/drivers/gpu/drm/drm_eld.c
new file mode 100644
index 000000000..5177991aa
--- /dev/null
+++ b/drivers/gpu/drm/drm_eld.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
+
+#include "drm_internal.h"
+
+/**
+ * drm_eld_sad_get - get SAD from ELD to struct cea_sad
+ * @eld: ELD buffer
+ * @sad_index: SAD index
+ * @cta_sad: destination struct cea_sad
+ *
+ * @return: 0 on success, or negative on errors
+ */
+int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad)
+{
+ const u8 *sad;
+
+ if (sad_index >= drm_eld_sad_count(eld))
+ return -EINVAL;
+
+ sad = eld + DRM_ELD_CEA_SAD(drm_eld_mnl(eld), sad_index);
+
+ drm_edid_cta_sad_set(cta_sad, sad);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_eld_sad_get);
+
+/**
+ * drm_eld_sad_set - set SAD to ELD from struct cea_sad
+ * @eld: ELD buffer
+ * @sad_index: SAD index
+ * @cta_sad: source struct cea_sad
+ *
+ * @return: 0 on success, or negative on errors
+ */
+int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad)
+{
+ u8 *sad;
+
+ if (sad_index >= drm_eld_sad_count(eld))
+ return -EINVAL;
+
+ sad = eld + DRM_ELD_CEA_SAD(drm_eld_mnl(eld), sad_index);
+
+ drm_edid_cta_sad_get(cta_sad, sad);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_eld_sad_set);
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 1143bc7f3..8f2bc6a28 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -30,6 +30,7 @@
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
+#include "drm_internal.h"
/**
* DOC: overview
@@ -74,6 +75,8 @@ int drm_encoder_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_encoder(encoder, dev) {
+ drm_debugfs_encoder_add(encoder);
+
if (encoder->funcs && encoder->funcs->late_register)
ret = encoder->funcs->late_register(encoder);
if (ret)
@@ -90,6 +93,7 @@ void drm_encoder_unregister_all(struct drm_device *dev)
drm_for_each_encoder(encoder, dev) {
if (encoder->funcs && encoder->funcs->early_unregister)
encoder->funcs->early_unregister(encoder);
+ drm_debugfs_encoder_remove(encoder);
}
}
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 5d2809de4..48ee851b6 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -69,16 +69,23 @@ static void drm_exec_unlock_all(struct drm_exec *exec)
* drm_exec_init - initialize a drm_exec object
* @exec: the drm_exec object to initialize
* @flags: controls locking behavior, see DRM_EXEC_* defines
+ * @nr: the initial # of objects
*
* Initialize the object and make sure that we can track locked objects.
+ *
+ * If nr is non-zero then it is used as the initial objects table size.
+ * In either case, the table will grow (be re-allocated) on demand.
*/
-void drm_exec_init(struct drm_exec *exec, uint32_t flags)
+void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr)
{
+ if (!nr)
+ nr = PAGE_SIZE / sizeof(void *);
+
exec->flags = flags;
- exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ exec->objects = kvmalloc_array(nr, sizeof(void *), GFP_KERNEL);
/* If allocation here fails, just delay that till the first use */
- exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0;
+ exec->max_objects = exec->objects ? nr : 0;
exec->num_objects = 0;
exec->contended = DRM_EXEC_DUMMY;
exec->prelocked = NULL;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 54a7103c1..8c87287c3 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -47,7 +47,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -55,14 +54,6 @@ DEFINE_MUTEX(drm_global_mutex);
bool drm_dev_needs_global_mutex(struct drm_device *dev)
{
/*
- * Legacy drivers rely on all kinds of BKL locking semantics, don't
- * bother. They also still need BKL locking for their ioctls, so better
- * safe than sorry.
- */
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- return true;
-
- /*
* The deprecated ->load callback must be called after the driver is
* already registered. This means such drivers rely on the BKL to make
* sure an open can't proceed until the driver is actually fully set up.
@@ -107,9 +98,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* drm_send_event() as the main starting points.
*
* The memory mapping implementation will vary depending on how the driver
- * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
- * function, modern drivers should use one of the provided memory-manager
- * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ * manages memory. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
* following is an example &file_operations structure::
@@ -254,18 +243,6 @@ void drm_file_free(struct drm_file *file)
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
-#ifdef CONFIG_DRM_LEGACY
- if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
- dev->driver->preclose)
- dev->driver->preclose(dev, file);
-#endif
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_lock_release(dev, file->filp);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- drm_legacy_reclaim_buffers(dev, file);
-
drm_events_release(file);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -279,8 +256,6 @@ void drm_file_free(struct drm_file *file)
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
- drm_legacy_ctxbitmap_flush(dev, file);
-
if (drm_is_primary_client(file))
drm_master_release(file);
@@ -367,29 +342,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->filelist_mutex);
-#ifdef CONFIG_DRM_LEGACY
-#ifdef __alpha__
- /*
- * Default the hose
- */
- if (!dev->hose) {
- struct pci_dev *pci_dev;
-
- pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
- if (pci_dev) {
- dev->hose = pci_dev->sysdata;
- pci_dev_put(pci_dev);
- }
- if (!dev->hose) {
- struct pci_bus *b = list_entry(pci_root_buses.next,
- struct pci_bus, node);
- if (b)
- dev->hose = b->sysdata;
- }
- }
-#endif
-#endif
-
return 0;
}
@@ -411,7 +363,6 @@ int drm_open(struct inode *inode, struct file *filp)
struct drm_device *dev;
struct drm_minor *minor;
int retcode;
- int need_setup = 0;
minor = drm_minor_acquire(iminor(inode));
if (IS_ERR(minor))
@@ -421,8 +372,7 @@ int drm_open(struct inode *inode, struct file *filp)
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
- if (!atomic_fetch_inc(&dev->open_count))
- need_setup = 1;
+ atomic_fetch_inc(&dev->open_count);
/* share address_space across all char-devs of a single device */
filp->f_mapping = dev->anon_inode->i_mapping;
@@ -430,13 +380,6 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(filp, minor);
if (retcode)
goto err_undo;
- if (need_setup) {
- retcode = drm_legacy_setup(dev);
- if (retcode) {
- drm_close_helper(filp);
- goto err_undo;
- }
- }
if (drm_dev_needs_global_mutex(dev))
mutex_unlock(&drm_global_mutex);
@@ -460,9 +403,6 @@ void drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev);
drm_dbg_core(dev, "driver lastclose completed\n");
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- drm_legacy_dev_reinit(dev);
-
drm_client_dev_restore(dev);
}
@@ -913,7 +853,7 @@ static void print_size(struct drm_printer *p, const char *stat,
unsigned u;
for (u = 0; u < ARRAY_SIZE(units) - 1; u++) {
- if (sz < SZ_1K)
+ if (sz == 0 || !IS_ALIGNED(sz, SZ_1K))
break;
sz = div_u64(sz, SZ_1K);
}
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index 060b75388..8c6090a90 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -27,14 +27,12 @@
#include <drm/drm_print.h>
#include <drm/drm_util.h>
-/**
- * drm_flip_work_allocate_task - allocate a flip-work task
- * @data: data associated to the task
- * @flags: allocator flags
- *
- * Allocate a drm_flip_task object and attach private data to it.
- */
-struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
+struct drm_flip_task {
+ struct list_head node;
+ void *data;
+};
+
+static struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
{
struct drm_flip_task *task;
@@ -44,18 +42,8 @@ struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
return task;
}
-EXPORT_SYMBOL(drm_flip_work_allocate_task);
-/**
- * drm_flip_work_queue_task - queue a specific task
- * @work: the flip-work
- * @task: the task to handle
- *
- * Queues task, that will later be run (passed back to drm_flip_func_t
- * func) on a work queue after drm_flip_work_commit() is called.
- */
-void drm_flip_work_queue_task(struct drm_flip_work *work,
- struct drm_flip_task *task)
+static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task)
{
unsigned long flags;
@@ -63,7 +51,6 @@ void drm_flip_work_queue_task(struct drm_flip_work *work,
list_add_tail(&task->node, &work->queued);
spin_unlock_irqrestore(&work->lock, flags);
}
-EXPORT_SYMBOL(drm_flip_work_queue_task);
/**
* drm_flip_work_queue - queue work
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index f93a4efce..b1be458ed 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -20,6 +20,97 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
+/**
+ * drm_format_conv_state_init - Initialize format-conversion state
+ * @state: The state to initialize
+ *
+ * Clears all fields in struct drm_format_conv_state. The state will
+ * be empty with no preallocated resources.
+ */
+void drm_format_conv_state_init(struct drm_format_conv_state *state)
+{
+ state->tmp.mem = NULL;
+ state->tmp.size = 0;
+ state->tmp.preallocated = false;
+}
+EXPORT_SYMBOL(drm_format_conv_state_init);
+
+/**
+ * drm_format_conv_state_copy - Copy format-conversion state
+ * @state: Destination state
+ * @old_state: Source state
+ *
+ * Copies format-conversion state from @old_state to @state; except for
+ * temporary storage.
+ */
+void drm_format_conv_state_copy(struct drm_format_conv_state *state,
+ const struct drm_format_conv_state *old_state)
+{
+ /*
+ * So far, there's only temporary storage here, which we don't
+ * duplicate. Just clear the fields.
+ */
+ state->tmp.mem = NULL;
+ state->tmp.size = 0;
+ state->tmp.preallocated = false;
+}
+EXPORT_SYMBOL(drm_format_conv_state_copy);
+
+/**
+ * drm_format_conv_state_reserve - Allocates storage for format conversion
+ * @state: The format-conversion state
+ * @new_size: The minimum allocation size
+ * @flags: Flags for kmalloc()
+ *
+ * Allocates at least @new_size bytes and returns a pointer to the memory
+ * range. After calling this function, previously returned memory blocks
+ * are invalid. It's best to collect all memory requirements of a format
+ * conversion and call this function once to allocate the range.
+ *
+ * Returns:
+ * A pointer to the allocated memory range, or NULL otherwise.
+ */
+void *drm_format_conv_state_reserve(struct drm_format_conv_state *state,
+ size_t new_size, gfp_t flags)
+{
+ void *mem;
+
+ if (new_size <= state->tmp.size)
+ goto out;
+ else if (state->tmp.preallocated)
+ return NULL;
+
+ mem = krealloc(state->tmp.mem, new_size, flags);
+ if (!mem)
+ return NULL;
+
+ state->tmp.mem = mem;
+ state->tmp.size = new_size;
+
+out:
+ return state->tmp.mem;
+}
+EXPORT_SYMBOL(drm_format_conv_state_reserve);
+
+/**
+ * drm_format_conv_state_release - Releases an format-conversion storage
+ * @state: The format-conversion state
+ *
+ * Releases the memory range references by the format-conversion state.
+ * After this call, all pointers to the memory are invalid. Prefer
+ * drm_format_conv_state_init() for cleaning up and unloading a driver.
+ */
+void drm_format_conv_state_release(struct drm_format_conv_state *state)
+{
+ if (state->tmp.preallocated)
+ return;
+
+ kfree(state->tmp.mem);
+ state->tmp.mem = NULL;
+ state->tmp.size = 0;
+}
+EXPORT_SYMBOL(drm_format_conv_state_release);
+
static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp)
{
return clip->y1 * pitch + clip->x1 * cpp;
@@ -45,6 +136,7 @@ EXPORT_SYMBOL(drm_fb_clip_offset);
static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
const void *vaddr, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
+ struct drm_format_conv_state *state,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
@@ -60,7 +152,7 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p
* one line at a time.
*/
if (!vaddr_cached_hint) {
- stmp = kmalloc(sbuf_len, GFP_KERNEL);
+ stmp = drm_format_conv_state_reserve(state, sbuf_len, GFP_KERNEL);
if (!stmp)
return -ENOMEM;
}
@@ -79,8 +171,6 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p
dst += dst_pitch;
}
- kfree(stmp);
-
return 0;
}
@@ -88,6 +178,7 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p
static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
const void *vaddr, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
+ struct drm_format_conv_state *state,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
@@ -101,9 +192,9 @@ static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsign
void *dbuf;
if (vaddr_cached_hint) {
- dbuf = kmalloc(dbuf_len, GFP_KERNEL);
+ dbuf = drm_format_conv_state_reserve(state, dbuf_len, GFP_KERNEL);
} else {
- dbuf = kmalloc(stmp_off + sbuf_len, GFP_KERNEL);
+ dbuf = drm_format_conv_state_reserve(state, stmp_off + sbuf_len, GFP_KERNEL);
stmp = dbuf + stmp_off;
}
if (!dbuf)
@@ -124,8 +215,6 @@ static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsign
dst += dst_pitch;
}
- kfree(dbuf);
-
return 0;
}
@@ -134,6 +223,7 @@ static int drm_fb_xfrm(struct iosys_map *dst,
const unsigned int *dst_pitch, const u8 *dst_pixsize,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool vaddr_cached_hint,
+ struct drm_format_conv_state *state,
void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
@@ -146,10 +236,12 @@ static int drm_fb_xfrm(struct iosys_map *dst,
/* TODO: handle src in I/O memory here */
if (dst[0].is_iomem)
return __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0],
- src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
+ src[0].vaddr, fb, clip, vaddr_cached_hint, state,
+ xfrm_line);
else
return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0],
- src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
+ src[0].vaddr, fb, clip, vaddr_cached_hint, state,
+ xfrm_line);
}
/**
@@ -235,6 +327,7 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @cached: Source buffer is mapped cached (eg. not write-combined)
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and swaps per-pixel
* bytes during the process. Destination and framebuffer formats must match. The
@@ -249,7 +342,8 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels
*/
void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool cached)
+ const struct drm_rect *clip, bool cached,
+ struct drm_format_conv_state *state)
{
const struct drm_format_info *format = fb->format;
u8 cpp = DIV_ROUND_UP(drm_format_info_bpp(format, 0), 8);
@@ -268,7 +362,7 @@ void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
return;
}
- drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, swab_line);
+ drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, state, swab_line);
}
EXPORT_SYMBOL(drm_fb_swab);
@@ -295,6 +389,7 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -309,13 +404,13 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne
*/
void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
1,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_rgb332_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
@@ -364,6 +459,7 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
* @swab: Swap bytes
*
* This function copies parts of a framebuffer to display memory and converts the
@@ -379,7 +475,8 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
*/
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool swab)
+ const struct drm_rect *clip, struct drm_format_conv_state *state,
+ bool swab)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
@@ -392,7 +489,7 @@ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pi
else
xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, xfrm_line);
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
@@ -421,6 +518,7 @@ static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsig
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
@@ -436,13 +534,13 @@ static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsig
*/
void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xrgb1555_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
@@ -473,6 +571,7 @@ static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsig
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
@@ -488,13 +587,13 @@ static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsig
*/
void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_argb1555_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
@@ -525,6 +624,7 @@ static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsig
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
@@ -540,13 +640,13 @@ static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsig
*/
void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
2,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_rgba5551_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
@@ -575,6 +675,7 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -590,13 +691,13 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne
*/
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
3,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_rgb888_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
@@ -623,6 +724,7 @@ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsig
* @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. The parameters @dst, @dst_pitch and @src refer
@@ -638,13 +740,13 @@ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsig
*/
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_argb8888_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
@@ -669,13 +771,14 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig
static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src,
const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_abgr8888_line);
}
@@ -699,13 +802,14 @@ static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsig
static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src,
const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xbgr8888_line);
}
@@ -735,6 +839,7 @@ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, un
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -750,13 +855,14 @@ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, un
*/
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xrgb2101010_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
@@ -788,6 +894,7 @@ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, un
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts
* the color format during the process. The parameters @dst, @dst_pitch and
@@ -803,13 +910,14 @@ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, un
*/
void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_argb2101010_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
@@ -839,6 +947,7 @@ static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -858,13 +967,13 @@ static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned
*/
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
1,
};
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_gray8_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
@@ -878,6 +987,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
* @src: The framebuffer memory to copy from
* @fb: The framebuffer to copy from
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory. If the
* formats of the display and the framebuffer mismatch, the blit function
@@ -896,7 +1006,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
*/
int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
uint32_t fb_format = fb->format->format;
@@ -904,44 +1014,44 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
return 0;
} else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
return 0;
} else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
return 0;
} else if (fb_format == DRM_FORMAT_XRGB8888) {
if (dst_format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
+ drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB1555) {
- drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB1555) {
- drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_RGBA5551) {
- drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB8888) {
- drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_XBGR8888) {
- drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_ABGR8888) {
- drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_XRGB2101010) {
- drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_ARGB2101010) {
- drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip);
+ drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state);
return 0;
} else if (dst_format == DRM_FORMAT_BGRX8888) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
return 0;
}
}
@@ -978,6 +1088,7 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int
* @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
*
* This function copies parts of a framebuffer to display memory and converts the
* color format during the process. Destination and framebuffer formats must match. The
@@ -1002,7 +1113,7 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int
*/
void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
{
static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
0, 0, 0, 0
@@ -1042,7 +1153,7 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
* Allocate a buffer to be used for both copying from the cma
* memory and to store the intermediate grayscale line pixels.
*/
- src32 = kmalloc(len_src32 + linepixels, GFP_KERNEL);
+ src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL);
if (!src32)
return;
@@ -1056,8 +1167,6 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
vaddr += fb->pitches[0];
mono += dst_pitch_0;
}
-
- kfree(src32);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 36c5629af..888aadb6a 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -394,6 +394,31 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
}
}
+static int drm_mode_closefb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv)
+{
+ struct drm_framebuffer *fbl;
+ bool found = false;
+
+ mutex_lock(&file_priv->fbs_lock);
+ list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+ if (fb == fbl)
+ found = true;
+
+ if (!found) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -ENOENT;
+ }
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ /* Drop the reference that was stored in the fbs list */
+ drm_framebuffer_put(fb);
+
+ return 0;
+}
+
/**
* drm_mode_rmfb - remove an FB from the configuration
* @dev: drm device
@@ -410,9 +435,8 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
struct drm_file *file_priv)
{
- struct drm_framebuffer *fb = NULL;
- struct drm_framebuffer *fbl = NULL;
- int found = 0;
+ struct drm_framebuffer *fb;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
@@ -421,24 +445,13 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
if (!fb)
return -ENOENT;
- mutex_lock(&file_priv->fbs_lock);
- list_for_each_entry(fbl, &file_priv->fbs, filp_head)
- if (fb == fbl)
- found = 1;
- if (!found) {
- mutex_unlock(&file_priv->fbs_lock);
- goto fail_unref;
+ ret = drm_mode_closefb(fb, file_priv);
+ if (ret != 0) {
+ drm_framebuffer_put(fb);
+ return ret;
}
- list_del_init(&fb->filp_head);
- mutex_unlock(&file_priv->fbs_lock);
-
- /* drop the reference we picked up in framebuffer lookup */
- drm_framebuffer_put(fb);
-
/*
- * we now own the reference that was stored in the fbs list
- *
* drm_framebuffer_remove may fail with -EINTR on pending signals,
* so run this in a separate stack as there's no way to correctly
* handle this after the fb is already removed from the lookup table.
@@ -448,6 +461,7 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
INIT_LIST_HEAD(&arg.fbs);
+ drm_WARN_ON(dev, !list_empty(&fb->filp_head));
list_add_tail(&fb->filp_head, &arg.fbs);
schedule_work(&arg.work);
@@ -457,10 +471,6 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
drm_framebuffer_put(fb);
return 0;
-
-fail_unref:
- drm_framebuffer_put(fb);
- return -ENOENT;
}
int drm_mode_rmfb_ioctl(struct drm_device *dev,
@@ -471,6 +481,28 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev,
return drm_mode_rmfb(dev, *fb_id, file_priv);
}
+int drm_mode_closefb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_closefb *r = data;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EOPNOTSUPP;
+
+ if (r->pad)
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
+ if (!fb)
+ return -ENOENT;
+
+ ret = drm_mode_closefb(fb, file_priv);
+ drm_framebuffer_put(fb);
+ return ret;
+}
+
/**
* drm_mode_getfb - get FB info
* @dev: drm device for the ioctl
@@ -796,6 +828,8 @@ void drm_framebuffer_free(struct kref *kref)
container_of(kref, struct drm_framebuffer, base.refcount);
struct drm_device *dev = fb->dev;
+ drm_WARN_ON(dev, !list_empty(&fb->filp_head));
+
/*
* The lookup idr holds a weak reference, which has not necessarily been
* removed at this point. Check for that.
@@ -1088,7 +1122,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
dev = fb->dev;
- WARN_ON(!list_empty(&fb->filp_head));
+ drm_WARN_ON(dev, !list_empty(&fb->filp_head));
/*
* drm ABI mandates that we remove any deleted framebuffers from active
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 5d4b9cd07..93337543a 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -218,7 +218,14 @@ void
__drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
struct drm_shadow_plane_state *new_shadow_plane_state)
{
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_shadow_plane_state *shadow_plane_state =
+ to_drm_shadow_plane_state(plane_state);
+
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
+
+ drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
+ &shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
@@ -266,6 +273,7 @@ EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state);
*/
void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state)
{
+ drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state);
__drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base);
}
EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state);
@@ -302,6 +310,7 @@ void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+ drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index b80d4e1cc..f9eb56f24 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -61,6 +61,42 @@
* contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva
* entries from within dma-fence signalling critical sections it is enough to
* pre-allocate the &drm_gpuva structures.
+ *
+ * &drm_gem_objects which are private to a single VM can share a common
+ * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec).
+ * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in
+ * the following called 'resv object', which serves as the container of the
+ * GPUVM's shared &dma_resv. This resv object can be a driver specific
+ * &drm_gem_object, such as the &drm_gem_object containing the root page table,
+ * but it can also be a 'dummy' object, which can be allocated with
+ * drm_gpuvm_resv_object_alloc().
+ *
+ * In order to connect a struct drm_gpuva its backing &drm_gem_object each
+ * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
+ * &drm_gpuvm_bo contains a list of &drm_gpuva structures.
+ *
+ * A &drm_gpuvm_bo is an abstraction that represents a combination of a
+ * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique.
+ * This is ensured by the API through drm_gpuvm_bo_obtain() and
+ * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
+ * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
+ * particular combination. If not existent a new instance is created and linked
+ * to the &drm_gem_object.
+ *
+ * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
+ * as entry for the &drm_gpuvm's lists of external and evicted objects. Those
+ * lists are maintained in order to accelerate locking of dma-resv locks and
+ * validation of evicted objects bound in a &drm_gpuvm. For instance, all
+ * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling
+ * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in
+ * order to validate all evicted &drm_gem_objects. It is also possible to lock
+ * additional &drm_gem_objects by providing the corresponding parameters to
+ * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making
+ * use of helper functions such as drm_gpuvm_prepare_range() or
+ * drm_gpuvm_prepare_objects().
+ *
+ * Every bound &drm_gem_object is treated as external object when its &dma_resv
+ * structure is different than the &drm_gpuvm's common &dma_resv structure.
*/
/**
@@ -386,21 +422,42 @@
/**
* DOC: Locking
*
- * Generally, the GPU VA manager does not take care of locking itself, it is
- * the drivers responsibility to take care about locking. Drivers might want to
- * protect the following operations: inserting, removing and iterating
- * &drm_gpuva objects as well as generating all kinds of operations, such as
- * split / merge or prefetch.
- *
- * The GPU VA manager also does not take care of the locking of the backing
- * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to
- * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively
- * a driver specific external lock. For the latter see also
- * drm_gem_gpuva_set_lock().
- *
- * However, the GPU VA manager contains lockdep checks to ensure callers of its
- * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is
- * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink().
+ * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
+ * locking itself, it is the drivers responsibility to take care about locking.
+ * Drivers might want to protect the following operations: inserting, removing
+ * and iterating &drm_gpuva objects as well as generating all kinds of
+ * operations, such as split / merge or prefetch.
+ *
+ * DRM GPUVM also does not take care of the locking of the backing
+ * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
+ * itself; drivers are responsible to enforce mutual exclusion using either the
+ * GEMs dma_resv lock or alternatively a driver specific external lock. For the
+ * latter see also drm_gem_gpuva_set_lock().
+ *
+ * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
+ * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
+ * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also
+ * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put().
+ *
+ * The latter is required since on creation and destruction of a &drm_gpuvm_bo
+ * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list.
+ * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and
+ * &drm_gem_object must be able to observe previous creations and destructions
+ * of &drm_gpuvm_bos in order to keep instances unique.
+ *
+ * The &drm_gpuvm's lists for keeping track of external and evicted objects are
+ * protected against concurrent insertion / removal and iteration internally.
+ *
+ * However, drivers still need ensure to protect concurrent calls to functions
+ * iterating those lists, namely drm_gpuvm_prepare_objects() and
+ * drm_gpuvm_validate().
+ *
+ * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate
+ * that the corresponding &dma_resv locks are held in order to protect the
+ * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and
+ * the corresponding lockdep checks are enabled. This is an optimization for
+ * drivers which are capable of taking the corresponding &dma_resv locks and
+ * hence do not require internal locking.
*/
/**
@@ -430,6 +487,7 @@
* {
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
+ * struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
* ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
@@ -437,6 +495,10 @@
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
+ * vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj);
+ * if (IS_ERR(vm_bo))
+ * return PTR_ERR(vm_bo);
+ *
* drm_gpuva_for_each_op(op, ops) {
* struct drm_gpuva *va;
*
@@ -449,7 +511,7 @@
*
* driver_vm_map();
* drm_gpuva_map(gpuvm, va, &op->map);
- * drm_gpuva_link(va);
+ * drm_gpuva_link(va, vm_bo);
*
* break;
* case DRM_GPUVA_OP_REMAP: {
@@ -476,11 +538,11 @@
* driver_vm_remap();
* drm_gpuva_remap(prev, next, &op->remap);
*
- * drm_gpuva_unlink(va);
* if (prev)
- * drm_gpuva_link(prev);
+ * drm_gpuva_link(prev, va->vm_bo);
* if (next)
- * drm_gpuva_link(next);
+ * drm_gpuva_link(next, va->vm_bo);
+ * drm_gpuva_unlink(va);
*
* break;
* }
@@ -496,6 +558,7 @@
* break;
* }
* }
+ * drm_gpuvm_bo_put(vm_bo);
* driver_unlock_va_space();
*
* return 0;
@@ -505,6 +568,7 @@
*
* struct driver_context {
* struct drm_gpuvm *gpuvm;
+ * struct drm_gpuvm_bo *vm_bo;
* struct drm_gpuva *new_va;
* struct drm_gpuva *prev_va;
* struct drm_gpuva *next_va;
@@ -525,6 +589,7 @@
* struct drm_gem_object *obj, u64 offset)
* {
* struct driver_context ctx;
+ * struct drm_gpuvm_bo *vm_bo;
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op;
* int ret = 0;
@@ -534,16 +599,23 @@
* ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
* ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
* ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
- * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) {
+ * ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
+ * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) {
* ret = -ENOMEM;
* goto out;
* }
*
+ * // Typically protected with a driver specific GEM gpuva lock
+ * // used in the fence signaling path for drm_gpuva_link() and
+ * // drm_gpuva_unlink(), hence pre-allocate.
+ * ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo);
+ *
* driver_lock_va_space();
* ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset);
* driver_unlock_va_space();
*
* out:
+ * drm_gpuvm_bo_put(ctx.vm_bo);
* kfree(ctx.new_va);
* kfree(ctx.prev_va);
* kfree(ctx.next_va);
@@ -556,7 +628,7 @@
*
* drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
*
- * drm_gpuva_link(ctx->new_va);
+ * drm_gpuva_link(ctx->new_va, ctx->vm_bo);
*
* // prevent the new GPUVA from being freed in
* // driver_mapping_create()
@@ -568,22 +640,23 @@
* int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
* {
* struct driver_context *ctx = __ctx;
+ * struct drm_gpuva *va = op->remap.unmap->va;
*
* drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
*
- * drm_gpuva_unlink(op->remap.unmap->va);
- * kfree(op->remap.unmap->va);
- *
* if (op->remap.prev) {
- * drm_gpuva_link(ctx->prev_va);
+ * drm_gpuva_link(ctx->prev_va, va->vm_bo);
* ctx->prev_va = NULL;
* }
*
* if (op->remap.next) {
- * drm_gpuva_link(ctx->next_va);
+ * drm_gpuva_link(ctx->next_va, va->vm_bo);
* ctx->next_va = NULL;
* }
*
+ * drm_gpuva_unlink(va);
+ * kfree(va);
+ *
* return 0;
* }
*
@@ -597,6 +670,201 @@
* }
*/
+/**
+ * get_next_vm_bo_from_list() - get the next vm_bo element
+ * @__gpuvm: the &drm_gpuvm
+ * @__list_name: the name of the list we're iterating on
+ * @__local_list: a pointer to the local list used to store already iterated items
+ * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list()
+ *
+ * This helper is here to provide lockless list iteration. Lockless as in, the
+ * iterator releases the lock immediately after picking the first element from
+ * the list, so list insertion deletion can happen concurrently.
+ *
+ * Elements popped from the original list are kept in a local list, so removal
+ * and is_empty checks can still happen while we're iterating the list.
+ */
+#define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo) \
+ ({ \
+ struct drm_gpuvm_bo *__vm_bo = NULL; \
+ \
+ drm_gpuvm_bo_put(__prev_vm_bo); \
+ \
+ spin_lock(&(__gpuvm)->__list_name.lock); \
+ if (!(__gpuvm)->__list_name.local_list) \
+ (__gpuvm)->__list_name.local_list = __local_list; \
+ else \
+ drm_WARN_ON((__gpuvm)->drm, \
+ (__gpuvm)->__list_name.local_list != __local_list); \
+ \
+ while (!list_empty(&(__gpuvm)->__list_name.list)) { \
+ __vm_bo = list_first_entry(&(__gpuvm)->__list_name.list, \
+ struct drm_gpuvm_bo, \
+ list.entry.__list_name); \
+ if (kref_get_unless_zero(&__vm_bo->kref)) { \
+ list_move_tail(&(__vm_bo)->list.entry.__list_name, \
+ __local_list); \
+ break; \
+ } else { \
+ list_del_init(&(__vm_bo)->list.entry.__list_name); \
+ __vm_bo = NULL; \
+ } \
+ } \
+ spin_unlock(&(__gpuvm)->__list_name.lock); \
+ \
+ __vm_bo; \
+ })
+
+/**
+ * for_each_vm_bo_in_list() - internal vm_bo list iterator
+ * @__gpuvm: the &drm_gpuvm
+ * @__list_name: the name of the list we're iterating on
+ * @__local_list: a pointer to the local list used to store already iterated items
+ * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step
+ *
+ * This helper is here to provide lockless list iteration. Lockless as in, the
+ * iterator releases the lock immediately after picking the first element from the
+ * list, hence list insertion and deletion can happen concurrently.
+ *
+ * It is not allowed to re-assign the vm_bo pointer from inside this loop.
+ *
+ * Typical use:
+ *
+ * struct drm_gpuvm_bo *vm_bo;
+ * LIST_HEAD(my_local_list);
+ *
+ * ret = 0;
+ * for_each_vm_bo_in_list(gpuvm, <list_name>, &my_local_list, vm_bo) {
+ * ret = do_something_with_vm_bo(..., vm_bo);
+ * if (ret)
+ * break;
+ * }
+ * // Drop ref in case we break out of the loop.
+ * drm_gpuvm_bo_put(vm_bo);
+ * restore_vm_bo_list(gpuvm, <list_name>, &my_local_list);
+ *
+ *
+ * Only used for internal list iterations, not meant to be exposed to the outside
+ * world.
+ */
+#define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo) \
+ for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \
+ __local_list, NULL); \
+ __vm_bo; \
+ __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \
+ __local_list, __vm_bo))
+
+static void
+__restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock,
+ struct list_head *list, struct list_head **local_list)
+{
+ /* Merge back the two lists, moving local list elements to the
+ * head to preserve previous ordering, in case it matters.
+ */
+ spin_lock(lock);
+ if (*local_list) {
+ list_splice(*local_list, list);
+ *local_list = NULL;
+ }
+ spin_unlock(lock);
+}
+
+/**
+ * restore_vm_bo_list() - move vm_bo elements back to their original list
+ * @__gpuvm: the &drm_gpuvm
+ * @__list_name: the name of the list we're iterating on
+ *
+ * When we're done iterating a vm_bo list, we should call restore_vm_bo_list()
+ * to restore the original state and let new iterations take place.
+ */
+#define restore_vm_bo_list(__gpuvm, __list_name) \
+ __restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock, \
+ &(__gpuvm)->__list_name.list, \
+ &(__gpuvm)->__list_name.local_list)
+
+static void
+cond_spin_lock(spinlock_t *lock, bool cond)
+{
+ if (cond)
+ spin_lock(lock);
+}
+
+static void
+cond_spin_unlock(spinlock_t *lock, bool cond)
+{
+ if (cond)
+ spin_unlock(lock);
+}
+
+static void
+__drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
+ struct list_head *entry, struct list_head *list)
+{
+ cond_spin_lock(lock, !!lock);
+ if (list_empty(entry))
+ list_add_tail(entry, list);
+ cond_spin_unlock(lock, !!lock);
+}
+
+/**
+ * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
+ * @__vm_bo: the &drm_gpuvm_bo
+ * @__list_name: the name of the list to insert into
+ * @__lock: whether to lock with the internal spinlock
+ *
+ * Inserts the given @__vm_bo into the list specified by @__list_name.
+ */
+#define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock) \
+ __drm_gpuvm_bo_list_add((__vm_bo)->vm, \
+ __lock ? &(__vm_bo)->vm->__list_name.lock : \
+ NULL, \
+ &(__vm_bo)->list.entry.__list_name, \
+ &(__vm_bo)->vm->__list_name.list)
+
+static void
+__drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock,
+ struct list_head *entry, bool init)
+{
+ cond_spin_lock(lock, !!lock);
+ if (init) {
+ if (!list_empty(entry))
+ list_del_init(entry);
+ } else {
+ list_del(entry);
+ }
+ cond_spin_unlock(lock, !!lock);
+}
+
+/**
+ * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list
+ * @__vm_bo: the &drm_gpuvm_bo
+ * @__list_name: the name of the list to insert into
+ * @__lock: whether to lock with the internal spinlock
+ *
+ * Removes the given @__vm_bo from the list specified by @__list_name.
+ */
+#define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock) \
+ __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
+ __lock ? &(__vm_bo)->vm->__list_name.lock : \
+ NULL, \
+ &(__vm_bo)->list.entry.__list_name, \
+ true)
+
+/**
+ * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list
+ * @__vm_bo: the &drm_gpuvm_bo
+ * @__list_name: the name of the list to insert into
+ * @__lock: whether to lock with the internal spinlock
+ *
+ * Removes the given @__vm_bo from the list specified by @__list_name.
+ */
+#define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock) \
+ __drm_gpuvm_bo_list_del((__vm_bo)->vm, \
+ __lock ? &(__vm_bo)->vm->__list_name.lock : \
+ NULL, \
+ &(__vm_bo)->list.entry.__list_name, \
+ false)
+
#define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node)
#define GPUVA_START(node) ((node)->va.addr)
@@ -618,8 +886,14 @@ drm_gpuvm_check_overflow(u64 addr, u64 range)
{
u64 end;
- return WARN(check_add_overflow(addr, range, &end),
- "GPUVA address limited to %zu bytes.\n", sizeof(end));
+ return check_add_overflow(addr, range, &end);
+}
+
+static bool
+drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
+{
+ return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range),
+ "GPUVA address limited to %zu bytes.\n", sizeof(addr));
}
static bool
@@ -643,7 +917,18 @@ drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
return krange && addr < kend && kstart < end;
}
-static bool
+/**
+ * drm_gpuvm_range_valid() - checks whether the given range is valid for the
+ * given &drm_gpuvm
+ * @gpuvm: the GPUVM to check the range for
+ * @addr: the base address
+ * @range: the range starting from the base address
+ *
+ * Checks whether the range is within the GPUVM's managed boundaries.
+ *
+ * Returns: true for a valid range, false otherwise
+ */
+bool
drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
u64 addr, u64 range)
{
@@ -651,11 +936,52 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
!drm_gpuvm_in_kernel_node(gpuvm, addr, range);
}
+EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid);
+
+static void
+drm_gpuvm_gem_object_free(struct drm_gem_object *obj)
+{
+ drm_gem_object_release(obj);
+ kfree(obj);
+}
+
+static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = {
+ .free = drm_gpuvm_gem_object_free,
+};
+
+/**
+ * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object
+ * @drm: the drivers &drm_device
+ *
+ * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in
+ * order to serve as root GEM object providing the &drm_resv shared across
+ * &drm_gem_objects local to a single GPUVM.
+ *
+ * Returns: the &drm_gem_object on success, NULL on failure
+ */
+struct drm_gem_object *
+drm_gpuvm_resv_object_alloc(struct drm_device *drm)
+{
+ struct drm_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ obj->funcs = &drm_gpuvm_object_funcs;
+ drm_gem_private_object_init(drm, obj, 0);
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc);
/**
* drm_gpuvm_init() - initialize a &drm_gpuvm
* @gpuvm: pointer to the &drm_gpuvm to initialize
* @name: the name of the GPU VA space
+ * @flags: the &drm_gpuvm_flags for this GPUVM
+ * @drm: the &drm_device this VM resides in
+ * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv
* @start_offset: the start offset of the GPU VA space
* @range: the size of the GPU VA space
* @reserve_offset: the start of the kernel reserved GPU VA area
@@ -668,8 +994,10 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
* &name is expected to be managed by the surrounding driver structures.
*/
void
-drm_gpuvm_init(struct drm_gpuvm *gpuvm,
- const char *name,
+drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+ enum drm_gpuvm_flags flags,
+ struct drm_device *drm,
+ struct drm_gem_object *r_obj,
u64 start_offset, u64 range,
u64 reserve_offset, u64 reserve_range,
const struct drm_gpuvm_ops *ops)
@@ -677,45 +1005,713 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm,
gpuvm->rb.tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpuvm->rb.list);
- drm_gpuvm_check_overflow(start_offset, range);
- gpuvm->mm_start = start_offset;
- gpuvm->mm_range = range;
+ INIT_LIST_HEAD(&gpuvm->extobj.list);
+ spin_lock_init(&gpuvm->extobj.lock);
+
+ INIT_LIST_HEAD(&gpuvm->evict.list);
+ spin_lock_init(&gpuvm->evict.lock);
+
+ kref_init(&gpuvm->kref);
gpuvm->name = name ? name : "unknown";
+ gpuvm->flags = flags;
gpuvm->ops = ops;
+ gpuvm->drm = drm;
+ gpuvm->r_obj = r_obj;
- memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
+ drm_gem_object_get(r_obj);
+
+ drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range);
+ gpuvm->mm_start = start_offset;
+ gpuvm->mm_range = range;
+ memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
if (reserve_range) {
gpuvm->kernel_alloc_node.va.addr = reserve_offset;
gpuvm->kernel_alloc_node.va.range = reserve_range;
- if (likely(!drm_gpuvm_check_overflow(reserve_offset,
- reserve_range)))
+ if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset,
+ reserve_range)))
__drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
}
}
EXPORT_SYMBOL_GPL(drm_gpuvm_init);
+static void
+drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
+{
+ gpuvm->name = NULL;
+
+ if (gpuvm->kernel_alloc_node.va.range)
+ __drm_gpuva_remove(&gpuvm->kernel_alloc_node);
+
+ drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
+ "GPUVA tree is not empty, potentially leaking memory.\n");
+
+ drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list),
+ "Extobj list should be empty.\n");
+ drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
+ "Evict list should be empty.\n");
+
+ drm_gem_object_put(gpuvm->r_obj);
+}
+
+static void
+drm_gpuvm_free(struct kref *kref)
+{
+ struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
+
+ drm_gpuvm_fini(gpuvm);
+
+ if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
+ return;
+
+ gpuvm->ops->vm_free(gpuvm);
+}
+
/**
- * drm_gpuvm_destroy() - cleanup a &drm_gpuvm
- * @gpuvm: pointer to the &drm_gpuvm to clean up
+ * drm_gpuvm_put() - drop a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to release the reference of
+ *
+ * This releases a reference to @gpuvm.
*
- * Note that it is a bug to call this function on a manager that still
- * holds GPU VA mappings.
+ * This function may be called from atomic context.
*/
void
-drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
+drm_gpuvm_put(struct drm_gpuvm *gpuvm)
{
- gpuvm->name = NULL;
+ if (gpuvm)
+ kref_put(&gpuvm->kref, drm_gpuvm_free);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_put);
- if (gpuvm->kernel_alloc_node.va.range)
- __drm_gpuva_remove(&gpuvm->kernel_alloc_node);
+static int
+exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences)
+{
+ return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
+ drm_exec_lock_obj(exec, obj);
+}
- WARN(!RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
- "GPUVA tree is not empty, potentially leaking memory.");
+/**
+ * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
+ * @gpuvm: the &drm_gpuvm
+ * @exec: the &drm_exec context
+ * @num_fences: the amount of &dma_fences to reserve
+ *
+ * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
+ * @num_fences is zero drm_exec_lock_obj() is called instead.
+ *
+ * Using this function directly, it is the drivers responsibility to call
+ * drm_exec_init() and drm_exec_fini() accordingly.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
+
+static int
+__drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct drm_gpuvm_bo *vm_bo;
+ LIST_HEAD(extobjs);
+ int ret = 0;
+
+ for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
+ ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
+ if (ret)
+ break;
+ }
+ /* Drop ref in case we break out of the loop. */
+ drm_gpuvm_bo_put(vm_bo);
+ restore_vm_bo_list(gpuvm, extobj);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
+
+static int
+drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct drm_gpuvm_bo *vm_bo;
+ int ret = 0;
+
+ drm_gpuvm_resv_assert_held(gpuvm);
+ list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
+ ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
+ if (ret)
+ break;
+
+ if (vm_bo->evicted)
+ drm_gpuvm_bo_list_add(vm_bo, evict, false);
+ }
+
+ return ret;
+}
+
+/**
+ * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
+ * @gpuvm: the &drm_gpuvm
+ * @exec: the &drm_exec locking context
+ * @num_fences: the amount of &dma_fences to reserve
+ *
+ * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
+ * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
+ * is called instead.
+ *
+ * Using this function directly, it is the drivers responsibility to call
+ * drm_exec_init() and drm_exec_fini() accordingly.
+ *
+ * Note: This function is safe against concurrent insertion and removal of
+ * external objects, however it is not safe against concurrent usage itself.
+ *
+ * Drivers need to make sure to protect this case with either an outer VM lock
+ * or by calling drm_gpuvm_prepare_vm() before this function within the
+ * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures
+ * mutual exclusion.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ if (drm_gpuvm_resv_protected(gpuvm))
+ return drm_gpuvm_prepare_objects_locked(gpuvm, exec,
+ num_fences);
+ else
+ return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
+
+/**
+ * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range
+ * @gpuvm: the &drm_gpuvm
+ * @exec: the &drm_exec locking context
+ * @addr: the start address within the VA space
+ * @range: the range to iterate within the VA space
+ * @num_fences: the amount of &dma_fences to reserve
+ *
+ * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
+ * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
+ * instead.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 addr, u64 range, unsigned int num_fences)
+{
+ struct drm_gpuva *va;
+ u64 end = addr + range;
+ int ret;
+
+ drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
+ struct drm_gem_object *obj = va->gem.obj;
+
+ ret = exec_prepare_obj(exec, obj, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
+
+/**
+ * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * Acquires all dma-resv locks of all &drm_gem_objects the given
+ * &drm_gpuvm contains mappings of.
+ *
+ * Addionally, when calling this function with struct drm_gpuvm_exec::extra
+ * being set the driver receives the given @fn callback to lock additional
+ * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
+ * would call drm_exec_prepare_obj() from within this callback.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec)
+{
+ struct drm_gpuvm *gpuvm = vm_exec->vm;
+ struct drm_exec *exec = &vm_exec->exec;
+ unsigned int num_fences = vm_exec->num_fences;
+ int ret;
+
+ drm_exec_init(exec, vm_exec->flags, 0);
+
+ drm_exec_until_all_locked(exec) {
+ ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err;
+
+ ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err;
+
+ if (vm_exec->extra.fn) {
+ ret = vm_exec->extra.fn(vm_exec);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ drm_exec_fini(exec);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock);
+
+static int
+fn_lock_array(struct drm_gpuvm_exec *vm_exec)
+{
+ struct {
+ struct drm_gem_object **objs;
+ unsigned int num_objs;
+ } *args = vm_exec->extra.priv;
+
+ return drm_exec_prepare_array(&vm_exec->exec, args->objs,
+ args->num_objs, vm_exec->num_fences);
+}
+
+/**
+ * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ * @objs: additional &drm_gem_objects to lock
+ * @num_objs: the number of additional &drm_gem_objects to lock
+ *
+ * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm
+ * contains mappings of, plus the ones given through @objs.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
+ struct drm_gem_object **objs,
+ unsigned int num_objs)
+{
+ struct {
+ struct drm_gem_object **objs;
+ unsigned int num_objs;
+ } args;
+
+ args.objs = objs;
+ args.num_objs = num_objs;
+
+ vm_exec->extra.fn = fn_lock_array;
+ vm_exec->extra.priv = &args;
+
+ return drm_gpuvm_exec_lock(vm_exec);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array);
+
+/**
+ * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ * @addr: the start address within the VA space
+ * @range: the range to iterate within the VA space
+ *
+ * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and
+ * @addr + @range.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
+ u64 addr, u64 range)
+{
+ struct drm_gpuvm *gpuvm = vm_exec->vm;
+ struct drm_exec *exec = &vm_exec->exec;
+ int ret;
+
+ drm_exec_init(exec, vm_exec->flags, 0);
+
+ drm_exec_until_all_locked(exec) {
+ ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
+ vm_exec->num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ goto err;
+ }
+
+ return ret;
+
+err:
+ drm_exec_fini(exec);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range);
+
+static int
+__drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo;
+ LIST_HEAD(evict);
+ int ret = 0;
+
+ for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) {
+ ret = ops->vm_bo_validate(vm_bo, exec);
+ if (ret)
+ break;
+ }
+ /* Drop ref in case we break out of the loop. */
+ drm_gpuvm_bo_put(vm_bo);
+ restore_vm_bo_list(gpuvm, evict);
+
+ return ret;
+}
+
+static int
+drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo, *next;
+ int ret = 0;
+
+ drm_gpuvm_resv_assert_held(gpuvm);
+
+ list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
+ list.entry.evict) {
+ ret = ops->vm_bo_validate(vm_bo, exec);
+ if (ret)
+ break;
+
+ dma_resv_assert_held(vm_bo->obj->resv);
+ if (!vm_bo->evicted)
+ drm_gpuvm_bo_list_del_init(vm_bo, evict, false);
+ }
+
+ return ret;
+}
+
+/**
+ * drm_gpuvm_validate() - validate all BOs marked as evicted
+ * @gpuvm: the &drm_gpuvm to validate evicted BOs
+ * @exec: the &drm_exec instance used for locking the GPUVM
+ *
+ * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer
+ * objects being mapped in the given &drm_gpuvm.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+
+ if (unlikely(!ops || !ops->vm_bo_validate))
+ return -EOPNOTSUPP;
+
+ if (drm_gpuvm_resv_protected(gpuvm))
+ return drm_gpuvm_validate_locked(gpuvm, exec);
+ else
+ return __drm_gpuvm_validate(gpuvm, exec);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_validate);
+
+/**
+ * drm_gpuvm_resv_add_fence - add fence to private and all extobj
+ * dma-resv
+ * @gpuvm: the &drm_gpuvm to add a fence to
+ * @exec: the &drm_exec locking context
+ * @fence: fence to add
+ * @private_usage: private dma-resv usage
+ * @extobj_usage: extobj dma-resv usage
+ */
+void
+drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage)
+{
+ struct drm_gem_object *obj;
+ unsigned long index;
+
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ dma_resv_assert_held(obj->resv);
+ dma_resv_add_fence(obj->resv, fence,
+ drm_gpuvm_is_extobj(gpuvm, obj) ?
+ extobj_usage : private_usage);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence);
+
+/**
+ * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * If provided by the driver, this function uses the &drm_gpuvm_ops
+ * vm_bo_alloc() callback to allocate.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo;
+
+ if (ops && ops->vm_bo_alloc)
+ vm_bo = ops->vm_bo_alloc();
+ else
+ vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL);
+
+ if (unlikely(!vm_bo))
+ return NULL;
+
+ vm_bo->vm = drm_gpuvm_get(gpuvm);
+ vm_bo->obj = obj;
+ drm_gem_object_get(obj);
+
+ kref_init(&vm_bo->kref);
+ INIT_LIST_HEAD(&vm_bo->list.gpuva);
+ INIT_LIST_HEAD(&vm_bo->list.entry.gem);
+
+ INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
+ INIT_LIST_HEAD(&vm_bo->list.entry.evict);
+
+ return vm_bo;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
+
+static void
+drm_gpuvm_bo_destroy(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gem_object *obj = vm_bo->obj;
+ bool lock = !drm_gpuvm_resv_protected(gpuvm);
+
+ if (!lock)
+ drm_gpuvm_resv_assert_held(gpuvm);
+
+ drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
+ drm_gpuvm_bo_list_del(vm_bo, evict, lock);
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ list_del(&vm_bo->list.entry.gem);
+
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+}
+
+/**
+ * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
+ * @vm_bo: the &drm_gpuvm_bo to release the reference of
+ *
+ * This releases a reference to @vm_bo.
+ *
+ * If the reference count drops to zero, the &gpuvm_bo is destroyed, which
+ * includes removing it from the GEMs gpuva list. Hence, if a call to this
+ * function can potentially let the reference count drop to zero the caller must
+ * hold the dma-resv or driver specific GEM gpuva lock.
+ *
+ * This function may only be called from non-atomic context.
+ *
+ * Returns: true if vm_bo was destroyed, false otherwise.
+ */
+bool
+drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
+{
+ might_sleep();
+
+ if (vm_bo)
+ return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
+
+static struct drm_gpuvm_bo *
+__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj)
+ if (vm_bo->vm == gpuvm)
+ return vm_bo;
+
+ return NULL;
+}
+
+/**
+ * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given
+ * &drm_gpuvm and &drm_gem_object
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * Find the &drm_gpuvm_bo representing the combination of the given
+ * &drm_gpuvm and &drm_gem_object. If found, increases the reference
+ * count of the &drm_gpuvm_bo accordingly.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj);
+
+ return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
+
+/**
+ * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
+ * given &drm_gpuvm and &drm_gem_object
+ * @gpuvm: The &drm_gpuvm the @obj is mapped in.
+ * @obj: The &drm_gem_object being mapped in the @gpuvm.
+ *
+ * Find the &drm_gpuvm_bo representing the combination of the given
+ * &drm_gpuvm and &drm_gem_object. If found, increases the reference
+ * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new
+ * &drm_gpuvm_bo.
+ *
+ * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
+ *
+ * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
+ if (vm_bo)
+ return vm_bo;
+
+ vm_bo = drm_gpuvm_bo_create(gpuvm, obj);
+ if (!vm_bo)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
+
+ return vm_bo;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
+
+/**
+ * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
+ * for the given &drm_gpuvm and &drm_gem_object
+ * @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
+ *
+ * Find the &drm_gpuvm_bo representing the combination of the given
+ * &drm_gpuvm and &drm_gem_object. If found, increases the reference
+ * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference
+ * count is decreased. If not found @__vm_bo is returned without further
+ * increase of the reference count.
+ *
+ * A new &drm_gpuvm_bo is added to the GEMs gpuva list.
+ *
+ * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
+ * &drm_gpuvm_bo was found
+ */
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
+{
+ struct drm_gpuvm *gpuvm = __vm_bo->vm;
+ struct drm_gem_object *obj = __vm_bo->obj;
+ struct drm_gpuvm_bo *vm_bo;
+
+ vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
+ if (vm_bo) {
+ drm_gpuvm_bo_put(__vm_bo);
+ return vm_bo;
+ }
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
+
+ return __vm_bo;
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc);
+
+/**
+ * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's
+ * extobj list
+ * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list.
+ *
+ * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list
+ * already and if the corresponding &drm_gem_object is an external object,
+ * actually.
+ */
+void
+drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo)
+{
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ bool lock = !drm_gpuvm_resv_protected(gpuvm);
+
+ if (!lock)
+ drm_gpuvm_resv_assert_held(gpuvm);
+
+ if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj))
+ drm_gpuvm_bo_list_add(vm_bo, extobj, lock);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
+
+/**
+ * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms
+ * evicted list
+ * @vm_bo: the &drm_gpuvm_bo to add or remove
+ * @evict: indicates whether the object is evicted
+ *
+ * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
+ */
+void
+drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
+{
+ struct drm_gpuvm *gpuvm = vm_bo->vm;
+ struct drm_gem_object *obj = vm_bo->obj;
+ bool lock = !drm_gpuvm_resv_protected(gpuvm);
+
+ dma_resv_assert_held(obj->resv);
+ vm_bo->evicted = evict;
+
+ /* Can't add external objects to the evicted list directly if not using
+ * internal spinlocks, since in this case the evicted list is protected
+ * with the VM's common dma-resv lock.
+ */
+ if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock)
+ return;
+
+ if (evict)
+ drm_gpuvm_bo_list_add(vm_bo, evict, lock);
+ else
+ drm_gpuvm_bo_list_del_init(vm_bo, evict, lock);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict);
static int
__drm_gpuva_insert(struct drm_gpuvm *gpuvm,
@@ -764,11 +1760,21 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm,
{
u64 addr = va->va.addr;
u64 range = va->va.range;
+ int ret;
if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
return -EINVAL;
- return __drm_gpuva_insert(gpuvm, va);
+ ret = __drm_gpuva_insert(gpuvm, va);
+ if (likely(!ret))
+ /* Take a reference of the GPUVM for the successfully inserted
+ * drm_gpuva. We can't take the reference in
+ * __drm_gpuva_insert() itself, since we don't want to increse
+ * the reference count for the GPUVM's kernel_alloc_node.
+ */
+ drm_gpuvm_get(gpuvm);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(drm_gpuva_insert);
@@ -795,35 +1801,46 @@ drm_gpuva_remove(struct drm_gpuva *va)
struct drm_gpuvm *gpuvm = va->vm;
if (unlikely(va == &gpuvm->kernel_alloc_node)) {
- WARN(1, "Can't destroy kernel reserved node.\n");
+ drm_WARN(gpuvm->drm, 1,
+ "Can't destroy kernel reserved node.\n");
return;
}
__drm_gpuva_remove(va);
+ drm_gpuvm_put(va->vm);
}
EXPORT_SYMBOL_GPL(drm_gpuva_remove);
/**
* drm_gpuva_link() - link a &drm_gpuva
* @va: the &drm_gpuva to link
+ * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to
*
- * This adds the given &va to the GPU VA list of the &drm_gem_object it is
- * associated with.
+ * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the
+ * &drm_gpuvm_bo to the &drm_gem_object it is associated with.
+ *
+ * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional
+ * reference of the latter is taken.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * concurrent access using either the GEMs dma_resv lock or a driver specific
+ * lock set through drm_gem_gpuva_set_lock().
*/
void
-drm_gpuva_link(struct drm_gpuva *va)
+drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
{
struct drm_gem_object *obj = va->gem.obj;
+ struct drm_gpuvm *gpuvm = va->vm;
if (unlikely(!obj))
return;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj);
- list_add_tail(&va->gem.entry, &obj->gpuva.list);
+ va->vm_bo = drm_gpuvm_bo_get(vm_bo);
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
}
EXPORT_SYMBOL_GPL(drm_gpuva_link);
@@ -834,20 +1851,31 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
* This removes the given &va from the GPU VA list of the &drm_gem_object it is
* associated with.
*
+ * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and
+ * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case
+ * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo.
+ *
+ * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of
+ * the latter is dropped.
+ *
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * concurrent access using either the GEMs dma_resv lock or a driver specific
+ * lock set through drm_gem_gpuva_set_lock().
*/
void
drm_gpuva_unlink(struct drm_gpuva *va)
{
struct drm_gem_object *obj = va->gem.obj;
+ struct drm_gpuvm_bo *vm_bo = va->vm_bo;
if (unlikely(!obj))
return;
drm_gem_gpuva_assert_lock_held(obj);
-
list_del_init(&va->gem.entry);
+
+ va->vm_bo = NULL;
+ drm_gpuvm_bo_put(vm_bo);
}
EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
@@ -992,10 +2020,10 @@ drm_gpuva_remap(struct drm_gpuva *prev,
struct drm_gpuva *next,
struct drm_gpuva_op_remap *op)
{
- struct drm_gpuva *curr = op->unmap->va;
- struct drm_gpuvm *gpuvm = curr->vm;
+ struct drm_gpuva *va = op->unmap->va;
+ struct drm_gpuvm *gpuvm = va->vm;
- drm_gpuva_remove(curr);
+ drm_gpuva_remove(va);
if (op->prev) {
drm_gpuva_init_from_op(prev, op->prev);
@@ -1637,9 +2665,8 @@ err_free_ops:
EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
/**
- * drm_gpuvm_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
- * @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @obj: the &drm_gem_object to unmap
+ * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
+ * @vm_bo: the &drm_gpuvm_bo abstraction
*
* This function creates a list of operations to perform unmapping for every
* GPUVA attached to a GEM.
@@ -1656,15 +2683,14 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
-drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
- struct drm_gem_object *obj)
+drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
{
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *op;
struct drm_gpuva *va;
int ret;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(vm_bo->obj);
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
@@ -1672,8 +2698,8 @@ drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
INIT_LIST_HEAD(&ops->list);
- drm_gem_for_each_gpuva(va, obj) {
- op = gpuva_op_alloc(gpuvm);
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ op = gpuva_op_alloc(vm_bo->vm);
if (!op) {
ret = -ENOMEM;
goto err_free_ops;
@@ -1687,10 +2713,10 @@ drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
return ops;
err_free_ops:
- drm_gpuva_ops_free(gpuvm, ops);
+ drm_gpuva_ops_free(vm_bo->vm, ops);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gpuvm_gem_unmap_ops_create);
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create);
/**
* drm_gpuva_ops_free() - free the given &drm_gpuva_ops
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
deleted file mode 100644
index 60afa1865..000000000
--- a/drivers/gpu/drm/drm_hashtab.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/hash.h>
-#include <linux/mm.h>
-#include <linux/rculist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
-{
- unsigned int size = 1 << order;
-
- ht->order = order;
- ht->table = NULL;
- if (size <= PAGE_SIZE / sizeof(*ht->table))
- ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
- else
- ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
- if (!ht->table) {
- DRM_ERROR("Out of memory for hash table\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
- int count = 0;
-
- hashed_key = hash_long(key, ht->order);
- DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, h_list, head)
- DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
-}
-
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry(entry, h_list, head) {
- if (entry->key == key)
- return &entry->head;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
- unsigned long key)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- unsigned int hashed_key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- hlist_for_each_entry_rcu(entry, h_list, head) {
- if (entry->key == key)
- return &entry->head;
- if (entry->key > key)
- break;
- }
- return NULL;
-}
-
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- struct drm_hash_item *entry;
- struct hlist_head *h_list;
- struct hlist_node *parent;
- unsigned int hashed_key;
- unsigned long key = item->key;
-
- hashed_key = hash_long(key, ht->order);
- h_list = &ht->table[hashed_key];
- parent = NULL;
- hlist_for_each_entry(entry, h_list, head) {
- if (entry->key == key)
- return -EINVAL;
- if (entry->key > key)
- break;
- parent = &entry->head;
- }
- if (parent) {
- hlist_add_behind_rcu(&item->head, parent);
- } else {
- hlist_add_head_rcu(&item->head, h_list);
- }
- return 0;
-}
-
-/*
- * Just insert an item and return any "bits" bit key that hasn't been
- * used before.
- */
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add)
-{
- int ret;
- unsigned long mask = (1UL << bits) - 1;
- unsigned long first, unshifted_key;
-
- unshifted_key = hash_long(seed, bits);
- first = unshifted_key;
- do {
- item->key = (unshifted_key << shift) + add;
- ret = drm_ht_insert_item(ht, item);
- if (ret)
- unshifted_key = (unshifted_key + 1) & mask;
- } while(ret && (unshifted_key != first));
-
- if (ret) {
- DRM_ERROR("Available key bit space exhausted\n");
- return -EINVAL;
- }
- return 0;
-}
-
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
- struct drm_hash_item **item)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key_rcu(ht, key);
- if (!list)
- return -EINVAL;
-
- *item = hlist_entry(list, struct drm_hash_item, head);
- return 0;
-}
-
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
-{
- struct hlist_node *list;
-
- list = drm_ht_find_key(ht, key);
- if (list) {
- hlist_del_init_rcu(list);
- return 0;
- }
- return -EINVAL;
-}
-
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
- hlist_del_init_rcu(&item->head);
- return 0;
-}
-
-void drm_ht_remove(struct drm_open_hash *ht)
-{
- if (ht->table) {
- kvfree(ht->table);
- ht->table = NULL;
- }
-}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 8462b657c..8e4faf0a2 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -22,6 +22,7 @@
*/
#include <linux/kthread.h>
+#include <linux/types.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
@@ -31,6 +32,7 @@
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+struct cea_sad;
struct dentry;
struct dma_buf;
struct iosys_map;
@@ -115,17 +117,10 @@ void drm_handle_vblank_works(struct drm_vblank_crtc *vblank);
/* IOCTLS */
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
/* drm_irq.c */
/* IOCTLS */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_irq_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#endif
-
int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -192,6 +187,8 @@ void drm_debugfs_connector_remove(struct drm_connector *connector);
void drm_debugfs_crtc_add(struct drm_crtc *crtc);
void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
+void drm_debugfs_encoder_add(struct drm_encoder *encoder);
+void drm_debugfs_encoder_remove(struct drm_encoder *encoder);
#else
static inline void drm_debugfs_dev_fini(struct drm_device *dev)
{
@@ -229,6 +226,14 @@ static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
}
+static inline void drm_debugfs_encoder_add(struct drm_encoder *encoder)
+{
+}
+
+static inline void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
+{
+}
+
#endif
drm_ioctl_t drm_version;
@@ -267,3 +272,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
void drm_framebuffer_debugfs_init(struct drm_device *dev);
+
+/* drm_edid.c */
+void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad);
+void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 025dc558c..129e2b91d 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -31,12 +31,12 @@
#include <linux/ratelimit.h>
#include <linux/export.h>
+#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
@@ -163,92 +163,6 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
return -EINVAL;
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-typedef struct drm_map32 {
- u32 offset; /* Requested physical address (0 for SAREA) */
- u32 size; /* Requested physical size (bytes) */
- enum drm_map_type type; /* Type of memory to map */
- enum drm_map_flags flags; /* Flags */
- u32 handle; /* User-space: "Handle" to pass to mmap() */
- int mtrr; /* MTRR slot used */
-} drm_map32_t;
-
-static int compat_drm_getmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- drm_map32_t m32;
- struct drm_map map;
- int err;
-
- if (copy_from_user(&m32, argp, sizeof(m32)))
- return -EFAULT;
-
- map.offset = m32.offset;
- err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0);
- if (err)
- return err;
-
- m32.offset = map.offset;
- m32.size = map.size;
- m32.type = map.type;
- m32.flags = map.flags;
- m32.handle = ptr_to_compat((void __user *)map.handle);
- m32.mtrr = map.mtrr;
- if (copy_to_user(argp, &m32, sizeof(m32)))
- return -EFAULT;
- return 0;
-
-}
-
-static int compat_drm_addmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- drm_map32_t m32;
- struct drm_map map;
- int err;
-
- if (copy_from_user(&m32, argp, sizeof(m32)))
- return -EFAULT;
-
- map.offset = m32.offset;
- map.size = m32.size;
- map.type = m32.type;
- map.flags = m32.flags;
-
- err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- m32.offset = map.offset;
- m32.mtrr = map.mtrr;
- m32.handle = ptr_to_compat((void __user *)map.handle);
- if (map.handle != compat_ptr(m32.handle))
- pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
- map.handle, m32.type, m32.offset);
-
- if (copy_to_user(argp, &m32, sizeof(m32)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_rmmap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_map32_t __user *argp = (void __user *)arg;
- struct drm_map map;
- u32 handle;
-
- if (get_user(handle, &argp->handle))
- return -EFAULT;
- map.handle = compat_ptr(handle);
- return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
-}
-#endif
-
typedef struct drm_client32 {
int idx; /* Which client desired? */
int auth; /* Is client authenticated? */
@@ -308,501 +222,6 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
return 0;
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-typedef struct drm_buf_desc32 {
- int count; /* Number of buffers of this size */
- int size; /* Size in bytes */
- int low_mark; /* Low water mark */
- int high_mark; /* High water mark */
- int flags;
- u32 agp_start; /* Start address in the AGP aperture */
-} drm_buf_desc32_t;
-
-static int compat_drm_addbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_desc32_t __user *argp = (void __user *)arg;
- drm_buf_desc32_t desc32;
- struct drm_buf_desc desc;
- int err;
-
- if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- desc = (struct drm_buf_desc){
- desc32.count, desc32.size, desc32.low_mark, desc32.high_mark,
- desc32.flags, desc32.agp_start
- };
-
- err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- desc32 = (drm_buf_desc32_t){
- desc.count, desc.size, desc.low_mark, desc.high_mark,
- desc.flags, desc.agp_start
- };
- if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_markbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_desc32_t b32;
- drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc buf;
-
- if (copy_from_user(&b32, argp, sizeof(b32)))
- return -EFAULT;
-
- buf.size = b32.size;
- buf.low_mark = b32.low_mark;
- buf.high_mark = b32.high_mark;
-
- return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_buf_info32 {
- int count; /**< Entries in list */
- u32 list;
-} drm_buf_info32_t;
-
-static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
-{
- drm_buf_info32_t *request = data;
- drm_buf_desc32_t __user *to = compat_ptr(request->list);
- drm_buf_desc32_t v = {.count = from->buf_count,
- .size = from->buf_size,
- .low_mark = from->low_mark,
- .high_mark = from->high_mark};
-
- if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
- return -EFAULT;
- return 0;
-}
-
-static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_buf_info32_t *request = data;
-
- return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32);
-}
-
-static int compat_drm_infobufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_info32_t req32;
- drm_buf_info32_t __user *argp = (void __user *)arg;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- if (req32.count < 0)
- req32.count = 0;
-
- err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(req32.count, &argp->count))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_buf_pub32 {
- int idx; /**< Index into the master buffer list */
- int total; /**< Buffer size */
- int used; /**< Amount of buffer in use (for DMA) */
- u32 address; /**< Address of buffer */
-} drm_buf_pub32_t;
-
-typedef struct drm_buf_map32 {
- int count; /**< Length of the buffer list */
- u32 virtual; /**< Mmap'd area in user-virtual */
- u32 list; /**< Buffer information */
-} drm_buf_map32_t;
-
-static int map_one_buf32(void *data, int idx, unsigned long virtual,
- struct drm_buf *buf)
-{
- drm_buf_map32_t *request = data;
- drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx;
- drm_buf_pub32_t v;
-
- v.idx = buf->idx;
- v.total = buf->total;
- v.used = 0;
- v.address = virtual + buf->offset;
- if (copy_to_user(to, &v, sizeof(v)))
- return -EFAULT;
- return 0;
-}
-
-static int drm_legacy_mapbufs32(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_buf_map32_t *request = data;
- void __user *v;
- int err = __drm_legacy_mapbufs(dev, data, &request->count,
- &v, map_one_buf32,
- file_priv);
- request->virtual = ptr_to_compat(v);
- return err;
-}
-
-static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_map32_t __user *argp = (void __user *)arg;
- drm_buf_map32_t req32;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
- if (req32.count < 0)
- return -EINVAL;
-
- err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(req32.count, &argp->count)
- || put_user(req32.virtual, &argp->virtual))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_buf_free32 {
- int count;
- u32 list;
-} drm_buf_free32_t;
-
-static int compat_drm_freebufs(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_buf_free32_t req32;
- struct drm_buf_free request;
- drm_buf_free32_t __user *argp = (void __user *)arg;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.count = req32.count;
- request.list = compat_ptr(req32.list);
- return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH);
-}
-
-typedef struct drm_ctx_priv_map32 {
- unsigned int ctx_id; /**< Context requesting private mapping */
- u32 handle; /**< Handle of map */
-} drm_ctx_priv_map32_t;
-
-static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_priv_map32_t req32;
- struct drm_ctx_priv_map request;
- drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.ctx_id = req32.ctx_id;
- request.handle = compat_ptr(req32.handle);
- return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct drm_ctx_priv_map req;
- drm_ctx_priv_map32_t req32;
- drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- req.ctx_id = req32.ctx_id;
- err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH);
- if (err)
- return err;
-
- req32.handle = ptr_to_compat((void __user *)req.handle);
- if (copy_to_user(argp, &req32, sizeof(req32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_ctx_res32 {
- int count;
- u32 contexts;
-} drm_ctx_res32_t;
-
-static int compat_drm_resctx(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_ctx_res32_t __user *argp = (void __user *)arg;
- drm_ctx_res32_t res32;
- struct drm_ctx_res res;
- int err;
-
- if (copy_from_user(&res32, argp, sizeof(res32)))
- return -EFAULT;
-
- res.count = res32.count;
- res.contexts = compat_ptr(res32.contexts);
- err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH);
- if (err)
- return err;
-
- res32.count = res.count;
- if (copy_to_user(argp, &res32, sizeof(res32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_dma32 {
- int context; /**< Context handle */
- int send_count; /**< Number of buffers to send */
- u32 send_indices; /**< List of handles to buffers */
- u32 send_sizes; /**< Lengths of data to send */
- enum drm_dma_flags flags; /**< Flags */
- int request_count; /**< Number of buffers requested */
- int request_size; /**< Desired size for buffers */
- u32 request_indices; /**< Buffer information */
- u32 request_sizes;
- int granted_count; /**< Number of buffers granted */
-} drm_dma32_t;
-
-static int compat_drm_dma(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_dma32_t d32;
- drm_dma32_t __user *argp = (void __user *)arg;
- struct drm_dma d;
- int err;
-
- if (copy_from_user(&d32, argp, sizeof(d32)))
- return -EFAULT;
-
- d.context = d32.context;
- d.send_count = d32.send_count;
- d.send_indices = compat_ptr(d32.send_indices);
- d.send_sizes = compat_ptr(d32.send_sizes);
- d.flags = d32.flags;
- d.request_count = d32.request_count;
- d.request_indices = compat_ptr(d32.request_indices);
- d.request_sizes = compat_ptr(d32.request_sizes);
- err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH);
- if (err)
- return err;
-
- if (put_user(d.request_size, &argp->request_size)
- || put_user(d.granted_count, &argp->granted_count))
- return -EFAULT;
-
- return 0;
-}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-#if IS_ENABLED(CONFIG_AGP)
-typedef struct drm_agp_mode32 {
- u32 mode; /**< AGP mode */
-} drm_agp_mode32_t;
-
-static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_mode32_t __user *argp = (void __user *)arg;
- struct drm_agp_mode mode;
-
- if (get_user(mode.mode, &argp->mode))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_enable_ioctl, &mode,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_agp_info32 {
- int agp_version_major;
- int agp_version_minor;
- u32 mode;
- u32 aperture_base; /* physical address */
- u32 aperture_size; /* bytes */
- u32 memory_allowed; /* bytes */
- u32 memory_used;
-
- /* PCI information */
- unsigned short id_vendor;
- unsigned short id_device;
-} drm_agp_info32_t;
-
-static int compat_drm_agp_info(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_info32_t __user *argp = (void __user *)arg;
- drm_agp_info32_t i32;
- struct drm_agp_info info;
- int err;
-
- err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH);
- if (err)
- return err;
-
- i32.agp_version_major = info.agp_version_major;
- i32.agp_version_minor = info.agp_version_minor;
- i32.mode = info.mode;
- i32.aperture_base = info.aperture_base;
- i32.aperture_size = info.aperture_size;
- i32.memory_allowed = info.memory_allowed;
- i32.memory_used = info.memory_used;
- i32.id_vendor = info.id_vendor;
- i32.id_device = info.id_device;
- if (copy_to_user(argp, &i32, sizeof(i32)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm_agp_buffer32 {
- u32 size; /**< In bytes -- will round to page boundary */
- u32 handle; /**< Used for binding / unbinding */
- u32 type; /**< Type of memory to allocate */
- u32 physical; /**< Physical used by i810 */
-} drm_agp_buffer32_t;
-
-static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_buffer32_t __user *argp = (void __user *)arg;
- drm_agp_buffer32_t req32;
- struct drm_agp_buffer request;
- int err;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.size = req32.size;
- request.type = req32.type;
- err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- req32.handle = request.handle;
- req32.physical = request.physical;
- if (copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int compat_drm_agp_free(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_buffer32_t __user *argp = (void __user *)arg;
- struct drm_agp_buffer request;
-
- if (get_user(request.handle, &argp->handle))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_agp_binding32 {
- u32 handle; /**< From drm_agp_buffer */
- u32 offset; /**< In bytes -- will round to page boundary */
-} drm_agp_binding32_t;
-
-static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_binding32_t __user *argp = (void __user *)arg;
- drm_agp_binding32_t req32;
- struct drm_agp_binding request;
-
- if (copy_from_user(&req32, argp, sizeof(req32)))
- return -EFAULT;
-
- request.handle = req32.handle;
- request.offset = req32.offset;
- return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_agp_binding32_t __user *argp = (void __user *)arg;
- struct drm_agp_binding request;
-
- if (get_user(request.handle, &argp->handle))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-#endif /* CONFIG_AGP */
-
-typedef struct drm_scatter_gather32 {
- u32 size; /**< In bytes -- will round to page boundary */
- u32 handle; /**< Used for mapping / unmapping */
-} drm_scatter_gather32_t;
-
-static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather request;
- int err;
-
- if (get_user(request.size, &argp->size))
- return -EFAULT;
-
- err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- /* XXX not sure about the handle conversion here... */
- if (put_user(request.handle >> PAGE_SHIFT, &argp->handle))
- return -EFAULT;
-
- return 0;
-}
-
-static int compat_drm_sg_free(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather request;
- unsigned long x;
-
- if (get_user(x, &argp->handle))
- return -EFAULT;
- request.handle = x << PAGE_SHIFT;
- return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-#endif
#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
@@ -854,7 +273,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
- err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, 0);
req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence;
@@ -914,37 +333,9 @@ static struct {
#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
-#endif
DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
- DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs),
- DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap),
- DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx),
- DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
- DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
- DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
-#if IS_ENABLED(CONFIG_AGP)
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
- DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
-#endif
-#endif
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
- DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
-#endif
#if defined(CONFIG_X86)
DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 77590b0f3..e368fc084 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -42,7 +42,6 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-#include "drm_legacy.h"
/**
* DOC: getunique and setversion story
@@ -301,6 +300,10 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_CRTC_IN_VBLANK_EVENT:
req->value = 1;
break;
+ case DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP:
+ req->value = drm_core_check_feature(dev, DRIVER_ATOMIC) &&
+ dev->mode_config.async_page_flip;
+ break;
default:
return -EINVAL;
}
@@ -361,6 +364,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->writeback_connectors = req->value;
break;
+ case DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT:
+ if (!drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT))
+ return -EOPNOTSUPP;
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->supports_virtualized_cursor_plane = req->value;
+ break;
default:
return -EINVAL;
}
@@ -559,21 +571,11 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
.name = #ioctl \
}
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags)
-#else
-#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags)
-#endif
-
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid,
- DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
@@ -586,63 +588,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
-
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if IS_ENABLED(CONFIG_AGP)
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -675,6 +629,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CLOSEFB, drm_mode_closefb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
@@ -774,7 +729,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
{
struct drm_file *file_priv = file->private_data;
struct drm_device *dev = file_priv->minor->dev;
- int retcode;
+ int ret;
/* Update drm_file owner if fd was passed along. */
drm_file_update_pid(file_priv);
@@ -782,20 +737,11 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
- retcode = drm_ioctl_permit(flags, file_priv);
- if (unlikely(retcode))
- return retcode;
-
- /* Enforce sane locking for modern driver ioctls. */
- if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) ||
- (flags & DRM_UNLOCKED))
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
- }
- return retcode;
+ ret = drm_ioctl_permit(flags, file_priv);
+ if (unlikely(ret))
+ return ret;
+
+ return func(dev, kdata, file_priv);
}
EXPORT_SYMBOL(drm_ioctl_kernel);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
deleted file mode 100644
index d327638e1..000000000
--- a/drivers/gpu/drm/drm_irq.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * drm_irq.c IRQ and vblank support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-
-#include <linux/export.h>
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/pci.h>
-#include <linux/vgaarb.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_print.h>
-#include <drm/drm_vblank.h>
-
-#include "drm_internal.h"
-
-static int drm_legacy_irq_install(struct drm_device *dev, int irq)
-{
- int ret;
- unsigned long sh_flags = 0;
-
- if (irq == 0)
- return -EINVAL;
-
- if (dev->irq_enabled)
- return -EBUSY;
- dev->irq_enabled = true;
-
- DRM_DEBUG("irq=%d\n", irq);
-
- /* Before installing handler */
- if (dev->driver->irq_preinstall)
- dev->driver->irq_preinstall(dev);
-
- /* PCI devices require shared interrupts. */
- if (dev_is_pci(dev->dev))
- sh_flags = IRQF_SHARED;
-
- ret = request_irq(irq, dev->driver->irq_handler,
- sh_flags, dev->driver->name, dev);
-
- if (ret < 0) {
- dev->irq_enabled = false;
- return ret;
- }
-
- /* After installing handler */
- if (dev->driver->irq_postinstall)
- ret = dev->driver->irq_postinstall(dev);
-
- if (ret < 0) {
- dev->irq_enabled = false;
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_unregister(to_pci_dev(dev->dev));
- free_irq(irq, dev);
- } else {
- dev->irq = irq;
- }
-
- return ret;
-}
-
-int drm_legacy_irq_uninstall(struct drm_device *dev)
-{
- unsigned long irqflags;
- bool irq_enabled;
- int i;
-
- irq_enabled = dev->irq_enabled;
- dev->irq_enabled = false;
-
- /*
- * Wake up any waiters so they don't hang. This is just to paper over
- * issues for UMS drivers which aren't in full control of their
- * vblank/irq handling. KMS drivers must ensure that vblanks are all
- * disabled when uninstalling the irq handler.
- */
- if (drm_dev_has_vblank(dev)) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- for (i = 0; i < dev->num_crtcs; i++) {
- struct drm_vblank_crtc *vblank = &dev->vblank[i];
-
- if (!vblank->enabled)
- continue;
-
- WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
-
- drm_vblank_disable_and_save(dev, i);
- wake_up(&vblank->queue);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- }
-
- if (!irq_enabled)
- return -EINVAL;
-
- DRM_DEBUG("irq=%d\n", dev->irq);
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY))
- vga_client_unregister(to_pci_dev(dev->dev));
-
- if (dev->driver->irq_uninstall)
- dev->driver->irq_uninstall(dev);
-
- free_irq(dev->irq, dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_irq_uninstall);
-
-int drm_legacy_irq_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_control *ctl = data;
- int ret = 0, irq;
- struct pci_dev *pdev;
-
- /* if we haven't irq we fallback for compatibility reasons -
- * this used to be a separate function in drm_dma.h
- */
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
- /* UMS was only ever supported on pci devices. */
- if (WARN_ON(!dev_is_pci(dev->dev)))
- return -EINVAL;
-
- switch (ctl->func) {
- case DRM_INST_HANDLER:
- pdev = to_pci_dev(dev->dev);
- irq = pdev->irq;
-
- if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != irq)
- return -EINVAL;
- mutex_lock(&dev->struct_mutex);
- ret = drm_legacy_irq_install(dev, irq);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
- case DRM_UNINST_HANDLER:
- mutex_lock(&dev->struct_mutex);
- ret = drm_legacy_irq_uninstall(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
- default:
- return -EINVAL;
- }
-}
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 0bf0fc1ab..0c7550c04 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -27,38 +27,6 @@
#include <linux/module.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_print.h>
-
-#include "drm_crtc_helper_internal.h"
-
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
-
-#if IS_ENABLED(CONFIG_DRM_LOAD_EDID_FIRMWARE)
-
-/* Backward compatibility for drm_kms_helper.edid_firmware */
-static int edid_firmware_set(const char *val, const struct kernel_param *kp)
-{
- DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
-
- return __drm_set_edid_firmware_path(val);
-}
-
-static int edid_firmware_get(char *buffer, const struct kernel_param *kp)
-{
- return __drm_get_edid_firmware_path(buffer, PAGE_SIZE);
-}
-
-static const struct kernel_param_ops edid_firmware_ops = {
- .set = edid_firmware_set,
- .get = edid_firmware_get,
-};
-
-module_param_cb(edid_firmware, &edid_firmware_ops, NULL, 0644);
-__MODULE_PARM_TYPE(edid_firmware, "charp");
-MODULE_PARM_DESC(edid_firmware,
- "DEPRECATED. Use drm.edid_firmware module parameter instead.");
-
-#endif
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
deleted file mode 100644
index 70c9dba11..000000000
--- a/drivers/gpu/drm/drm_legacy.h
+++ /dev/null
@@ -1,290 +0,0 @@
-#ifndef __DRM_LEGACY_H__
-#define __DRM_LEGACY_H__
-
-/*
- * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * This file contains legacy interfaces that modern drm drivers
- * should no longer be using. They cannot be removed as legacy
- * drivers use them, and removing them are API breaks.
- */
-#include <linux/list.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_legacy.h>
-
-struct agp_memory;
-struct drm_buf_desc;
-struct drm_device;
-struct drm_file;
-struct drm_hash_item;
-struct drm_open_hash;
-
-/*
- * Hash-table Support
- */
-
-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-/* drm_hashtab.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-void drm_ht_remove(struct drm_open_hash *ht);
-#endif
-
-/*
- * RCU-safe interface
- *
- * The user of this API needs to make sure that two or more instances of the
- * hash table manipulation functions are never run simultaneously.
- * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
- * with any of the manipulation functions as long as it's called from within
- * an RCU read-locked section.
- */
-#define drm_ht_insert_item_rcu drm_ht_insert_item
-#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
-#define drm_ht_remove_key_rcu drm_ht_remove_key
-#define drm_ht_remove_item_rcu drm_ht_remove_item
-#define drm_ht_find_item_rcu drm_ht_find_item
-
-/*
- * Generic DRM Contexts
- */
-
-#define DRM_KERNEL_CONTEXT 0
-#define DRM_RESERVED_CONTEXTS 1
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_ctxbitmap_init(struct drm_device *dev);
-void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
-#else
-static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {}
-static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {}
-static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {}
-#endif
-
-void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
-
-int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
-#endif
-
-/*
- * Generic Buffer Management
- */
-
-#define DRM_MAP_HASH_OFFSET 0x10000000
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-static inline int drm_legacy_create_map_hash(struct drm_device *dev)
-{
- return drm_ht_create(&dev->map_hash, 12);
-}
-
-static inline void drm_legacy_remove_map_hash(struct drm_device *dev)
-{
- drm_ht_remove(&dev->map_hash);
-}
-#else
-static inline int drm_legacy_create_map_hash(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {}
-#endif
-
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-
-int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-#endif
-
-int __drm_legacy_infobufs(struct drm_device *, void *, int *,
- int (*)(void *, int, struct drm_buf_entry *));
-int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
- void __user **,
- int (*)(void *, int, unsigned long, struct drm_buf *),
- struct drm_file *);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_master_rmmaps(struct drm_device *dev,
- struct drm_master *master);
-void drm_legacy_rmmaps(struct drm_device *dev);
-#else
-static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
- struct drm_master *master) {}
-static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_vma_flush(struct drm_device *d);
-#else
-static inline void drm_legacy_vma_flush(struct drm_device *d)
-{
- /* do nothing */
-}
-#endif
-
-/*
- * AGP Support
- */
-
-struct drm_agp_mem {
- unsigned long handle;
- struct agp_memory *memory;
- unsigned long bound;
- int pages;
- struct list_head head;
-};
-
-/* drm_agpsupport.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
-void drm_legacy_agp_clear(struct drm_device *dev);
-
-int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#else
-static inline void drm_legacy_agp_clear(struct drm_device *dev) {}
-#endif
-
-/* drm_lock.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
-void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
-#else
-static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {}
-#endif
-
-/* DMA support */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_dma_setup(struct drm_device *dev);
-void drm_legacy_dma_takedown(struct drm_device *dev);
-#else
-static inline int drm_legacy_dma_setup(struct drm_device *dev)
-{
- return 0;
-}
-#endif
-
-void drm_legacy_free_buffer(struct drm_device *dev,
- struct drm_buf * buf);
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *filp);
-#else
-static inline void drm_legacy_reclaim_buffers(struct drm_device *dev,
- struct drm_file *filp) {}
-#endif
-
-/* Scatter Gather Support */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_sg_cleanup(struct drm_device *dev);
-int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_legacy_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_init_members(struct drm_device *dev);
-void drm_legacy_destroy_members(struct drm_device *dev);
-void drm_legacy_dev_reinit(struct drm_device *dev);
-int drm_legacy_setup(struct drm_device * dev);
-#else
-static inline void drm_legacy_init_members(struct drm_device *dev) {}
-static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
-static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
-static inline int drm_legacy_setup(struct drm_device * dev) { return 0; }
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master);
-#else
-static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_master_legacy_init(struct drm_master *master);
-#else
-static inline void drm_master_legacy_init(struct drm_master *master) {}
-#endif
-
-/* drm_pci.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI)
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv);
-void drm_legacy_pci_agp_destroy(struct drm_device *dev);
-#else
-static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return -EINVAL;
-}
-
-static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {}
-#endif
-
-#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
deleted file mode 100644
index d4c543406..000000000
--- a/drivers/gpu/drm/drm_legacy_misc.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * \file drm_legacy_misc.c
- * Misc legacy support functions.
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-void drm_legacy_init_members(struct drm_device *dev)
-{
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- spin_lock_init(&dev->buf_lock);
- mutex_init(&dev->ctxlist_mutex);
-}
-
-void drm_legacy_destroy_members(struct drm_device *dev)
-{
- mutex_destroy(&dev->ctxlist_mutex);
-}
-
-int drm_legacy_setup(struct drm_device * dev)
-{
- int ret;
-
- if (dev->driver->firstopen &&
- drm_core_check_feature(dev, DRIVER_LEGACY)) {
- ret = dev->driver->firstopen(dev);
- if (ret != 0)
- return ret;
- }
-
- ret = drm_legacy_dma_setup(dev);
- if (ret < 0)
- return ret;
-
-
- DRM_DEBUG("\n");
- return 0;
-}
-
-void drm_legacy_dev_reinit(struct drm_device *dev)
-{
- if (dev->irq_enabled)
- drm_legacy_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_legacy_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
- drm_legacy_vma_flush(dev);
- drm_legacy_dma_takedown(dev);
-
- mutex_unlock(&dev->struct_mutex);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-
- DRM_DEBUG("lastclose completed\n");
-}
-
-void drm_master_legacy_init(struct drm_master *master)
-{
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
-}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
deleted file mode 100644
index 1efbd5389..000000000
--- a/drivers/gpu/drm/drm_lock.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * \file drm_lock.c
- * IOCTLs for locking
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/sched/signal.h>
-
-#include <drm/drm.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-
-/*
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static
-int drm_lock_take(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD)
- new = old | _DRM_LOCK_CONT;
- else {
- new = context | _DRM_LOCK_HELD |
- ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
- _DRM_LOCK_CONT : 0);
- }
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- spin_unlock_bh(&lock_data->spinlock);
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
-
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-
-/*
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- lock_data->file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- return 1;
-}
-
-static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (lock_data->kernel_waiters != 0) {
- drm_lock_transfer(lock_data, 0);
- lock_data->idle_has_lock = 1;
- spin_unlock_bh(&lock_data->spinlock);
- return 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-
- do {
- old = *lock;
- new = _DRM_LOCKING_CONTEXT(old);
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- wake_up_interruptible(&lock_data->lock_queue);
- return 0;
-}
-
-/*
- * Lock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Add the current task to the lock wait queue, and attempt to take to lock.
- */
-int drm_legacy_lock(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DECLARE_WAITQUEUE(entry, current);
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- ++file_priv->lock_count;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock->context, task_pid_nr(current),
- master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
- lock->flags);
-
- add_wait_queue(&master->lock.lock_queue, &entry);
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters++;
- spin_unlock_bh(&master->lock.spinlock);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!master->lock.hw_lock) {
- /* Device has been unregistered */
- send_sig(SIGTERM, current, 0);
- ret = -EINTR;
- break;
- }
- if (drm_lock_take(&master->lock, lock->context)) {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
- break; /* Got lock */
- }
-
- /* Contention */
- mutex_unlock(&drm_global_mutex);
- schedule();
- mutex_lock(&drm_global_mutex);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
- spin_lock_bh(&master->lock.spinlock);
- master->lock.user_waiters--;
- spin_unlock_bh(&master->lock.spinlock);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&master->lock.lock_queue, &entry);
-
- DRM_DEBUG("%d %s\n", lock->context,
- ret ? "interrupted" : "has lock");
- if (ret) return ret;
-
- /* don't set the block all signals on the master process for now
- * really probably not the correct answer but lets us debug xkb
- * xserver for now */
- if (!drm_is_current_master(file_priv)) {
- dev->sigdata.context = lock->context;
- dev->sigdata.lock = master->lock.hw_lock;
- }
-
- if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
- {
- if (dev->driver->dma_quiescent(dev)) {
- DRM_DEBUG("%d waiting for DMA quiescent\n",
- lock->context);
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-/*
- * Unlock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Transfer and free the lock.
- */
-int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_lock *lock = data;
- struct drm_master *master = file_priv->master;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (lock->context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- task_pid_nr(current), lock->context);
- return -EINVAL;
- }
-
- if (drm_legacy_lock_free(&master->lock, lock->context)) {
- /* FIXME: Should really bail out here. */
- }
-
- return 0;
-}
-
-/*
- * This function returns immediately and takes the hw lock
- * with the kernel context if it is free, otherwise it gets the highest priority when and if
- * it is eventually released.
- *
- * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
- * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
- * a deadlock, which is why the "idlelock" was invented).
- *
- * This should be sufficient to wait for GPU idle without
- * having to worry about starvation.
- */
-void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
-{
- int ret;
-
- spin_lock_bh(&lock_data->spinlock);
- lock_data->kernel_waiters++;
- if (!lock_data->idle_has_lock) {
-
- spin_unlock_bh(&lock_data->spinlock);
- ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- spin_lock_bh(&lock_data->spinlock);
-
- if (ret == 1)
- lock_data->idle_has_lock = 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_legacy_idlelock_take);
-
-void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
-{
- unsigned int old, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (--lock_data->kernel_waiters == 0) {
- if (lock_data->idle_has_lock) {
- do {
- old = *lock;
- prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
- } while (prev != old);
- wake_up_interruptible(&lock_data->lock_queue);
- lock_data->idle_has_lock = 0;
- }
- }
- spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_legacy_idlelock_release);
-
-static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
-
- return (file_priv->lock_count && master->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
- master->lock.file_priv == file_priv);
-}
-
-void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
-
- /* if the master has gone away we can't do anything with the lock */
- if (!dev->master)
- return;
-
- if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_legacy_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- }
-}
-
-void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
-{
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return;
-
- /*
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
- mutex_lock(&dev->struct_mutex);
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- mutex_unlock(&dev->struct_mutex);
-}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
deleted file mode 100644
index d2e1dccd8..000000000
--- a/drivers/gpu/drm/drm_memory.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * \file drm_memory.c
- * Memory management wrappers for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/highmem.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_cache.h>
-#include <drm/drm_device.h>
-
-#include "drm_legacy.h"
-
-#if IS_ENABLED(CONFIG_AGP)
-
-#ifdef HAVE_PAGE_AGP
-# include <asm/agp.h>
-#else
-# ifdef __powerpc__
-# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
-# else
-# define PAGE_AGP PAGE_KERNEL
-# endif
-#endif
-
-static void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device *dev)
-{
- unsigned long i, num_pages =
- PAGE_ALIGN(size) / PAGE_SIZE;
- struct drm_agp_mem *agpmem;
- struct page **page_map;
- struct page **phys_page_map;
- void *addr;
-
- size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
- offset -= dev->hose->mem_space->start;
-#endif
-
- list_for_each_entry(agpmem, &dev->agp->memory, head)
- if (agpmem->bound <= offset
- && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
- (offset + size))
- break;
- if (&agpmem->head == &dev->agp->memory)
- return NULL;
-
- /*
- * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
- * the CPU do not get remapped by the GART. We fix this by using the kernel's
- * page-table instead (that's probably faster anyhow...).
- */
- /* note: use vmalloc() because num_pages could be large... */
- page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
- if (!page_map)
- return NULL;
-
- phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
- for (i = 0; i < num_pages; ++i)
- page_map[i] = phys_page_map[i];
- addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
- vfree(page_map);
-
- return addr;
-}
-
-#else /* CONFIG_AGP */
-static inline void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device *dev)
-{
- return NULL;
-}
-
-#endif /* CONFIG_AGP */
-
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
-{
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_legacy_ioremap);
-
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
-{
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- map->handle = agp_remap(map->offset, map->size, dev);
- else
- map->handle = ioremap_wc(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_legacy_ioremap_wc);
-
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
-{
- if (!map->handle || !map->size)
- return;
-
- if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
- vunmap(map->handle);
- else
- iounmap(map->handle);
-}
-EXPORT_SYMBOL(drm_legacy_ioremapfree);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index e90f0bf89..daac649aa 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -197,12 +197,14 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
* @fb: The source framebuffer
* @clip: Clipping rectangle of the area to be copied
* @swap: When true, swap MSB/LSB of 16-bit values
+ * @fmtcnv_state: Format-conversion state
*
* Returns:
* Zero on success, negative error code on failure.
*/
int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap)
+ struct drm_rect *clip, bool swap,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst);
@@ -215,12 +217,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach);
+ drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach,
+ fmtcnv_state);
else
drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
@@ -252,7 +255,7 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
}
static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *rect)
+ struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
@@ -270,7 +273,7 @@ static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap, fmtcnv_state);
if (ret)
goto err_msg;
} else {
@@ -332,7 +335,8 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
drm_dev_exit(idx);
}
@@ -368,7 +372,8 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
if (!drm_dev_enter(&dbidev->drm, &idx))
return;
- mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
backlight_enable(dbidev->backlight);
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index ac0d2ce3f..0e8355063 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -538,7 +538,7 @@ retry:
obj_to_connector(obj),
prop_value);
} else {
- ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value, false);
if (ret)
goto out;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ac9a40625..893f52ee4 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -2617,8 +2617,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
break;
}
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+ strscpy_pad(out->name, in->name, sizeof(out->name));
}
/**
@@ -2659,8 +2658,7 @@ int drm_mode_convert_umode(struct drm_device *dev,
* useful for the kernel->userspace direction anyway.
*/
out->type = in->type & DRM_MODE_TYPE_ALL;
- strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
- out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+ strscpy_pad(out->name, in->name, sizeof(out->name));
/* Clearing picture aspect ratio bits from out flags,
* as the aspect-ratio information is not stored in
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index f858dfedf..2c582020c 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -193,13 +193,22 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
if (!dev)
return 0;
+ /*
+ * Don't disable polling if it was never initialized
+ */
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_disable(dev);
- drm_kms_helper_poll_disable(dev);
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
- drm_kms_helper_poll_enable(dev);
+ /*
+ * Don't enable polling if it was never initialized
+ */
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
+
return PTR_ERR(state);
}
@@ -239,7 +248,11 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
dev->mode_config.suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
- drm_kms_helper_poll_enable(dev);
+ /*
+ * Don't enable polling if it is not initialized
+ */
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
return ret;
}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 3d92f66e5..aa93129c3 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -117,6 +117,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
+ .width = 1080,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -279,6 +285,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
},
.driver_data = (void *)&lcd720x1280_rightside_up,
+ }, { /* GPD Win Mini */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1617-01")
+ },
+ .driver_data = (void *)&lcd1080x1920_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 39d35fc3a..c585f1e88 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -29,18 +29,12 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include <drm/drm_auth.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
-#include "drm_legacy.h"
-
-#ifdef CONFIG_DRM_LEGACY
-/* List of devices hanging off drivers with stealth attach. */
-static LIST_HEAD(legacy_dev_list);
-static DEFINE_MUTEX(legacy_dev_list_lock);
-#endif
static int drm_get_pci_domain(struct drm_device *dev)
{
@@ -71,199 +65,3 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
master->unique_len = strlen(master->unique);
return 0;
}
-
-#ifdef CONFIG_DRM_LEGACY
-
-static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != pdev->bus->number ||
- p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
- return -EINVAL;
-
- p->irq = pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
- return 0;
-}
-
-/**
- * drm_legacy_irq_by_busid - Get interrupt from bus ID
- * @dev: DRM device
- * @data: IOCTL parameter pointing to a drm_irq_busid structure
- * @file_priv: DRM file private.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_irq_busid *p = data;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- /* UMS was only ever support on PCI devices. */
- if (WARN_ON(!dev_is_pci(dev->dev)))
- return -EINVAL;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EOPNOTSUPP;
-
- return drm_legacy_pci_irq_by_busid(dev, p);
-}
-
-void drm_legacy_pci_agp_destroy(struct drm_device *dev)
-{
- if (dev->agp) {
- arch_phys_wc_del(dev->agp->agp_mtrr);
- drm_legacy_agp_clear(dev);
- kfree(dev->agp);
- dev->agp = NULL;
- }
-}
-
-static void drm_legacy_pci_agp_init(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
- if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
- dev->agp = drm_legacy_agp_init(dev);
- if (dev->agp) {
- dev->agp->agp_mtrr = arch_phys_wc_add(
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024);
- }
- }
-}
-
-static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = drm_dev_alloc(driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
-
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_free;
-
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
- drm_legacy_pci_agp_init(dev);
-
- ret = drm_dev_register(dev, ent->driver_data);
- if (ret)
- goto err_agp;
-
- if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
- mutex_lock(&legacy_dev_list_lock);
- list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
- mutex_unlock(&legacy_dev_list_lock);
- }
-
- return 0;
-
-err_agp:
- drm_legacy_pci_agp_destroy(dev);
- pci_disable_device(pdev);
-err_free:
- drm_dev_put(dev);
- return ret;
-}
-
-/**
- * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
- * @driver: DRM device driver
- * @pdriver: PCI device driver
- *
- * This is only used by legacy dri1 drivers and deprecated.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
- DRM_DEBUG("\n");
-
- if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
- return -EINVAL;
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
- pid = &pdriver->id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_legacy_get_pci_dev(pdev, pid, driver);
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_legacy_pci_init);
-
-/**
- * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
- * @driver: DRM device driver
- * @pdriver: PCI device driver
- *
- * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
- * is deprecated and only used by dri1 drivers.
- */
-void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- struct drm_device *dev, *tmp;
-
- DRM_DEBUG("\n");
-
- if (!(driver->driver_features & DRIVER_LEGACY)) {
- WARN_ON(1);
- } else {
- mutex_lock(&legacy_dev_list_lock);
- list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
- legacy_dev_list) {
- if (dev->driver == driver) {
- list_del(&dev->legacy_dev_list);
- drm_put_dev(dev);
- }
- }
- mutex_unlock(&legacy_dev_list_lock);
- }
- DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_legacy_pci_exit);
-
-#endif
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 311e17990..672c655c7 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -230,6 +230,103 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
return 0;
}
+/**
+ * DOC: hotspot properties
+ *
+ * HOTSPOT_X: property to set mouse hotspot x offset.
+ * HOTSPOT_Y: property to set mouse hotspot y offset.
+ *
+ * When the plane is being used as a cursor image to display a mouse pointer,
+ * the "hotspot" is the offset within the cursor image where mouse events
+ * are expected to go.
+ *
+ * Positive values move the hotspot from the top-left corner of the cursor
+ * plane towards the right and bottom.
+ *
+ * Most display drivers do not need this information because the
+ * hotspot is not actually connected to anything visible on screen.
+ * However, this is necessary for display drivers like the para-virtualized
+ * drivers (eg qxl, vbox, virtio, vmwgfx), that are attached to a user console
+ * with a mouse pointer. Since these consoles are often being remoted over a
+ * network, they would otherwise have to wait to display the pointer movement to
+ * the user until a full network round-trip has occurred. New mouse events have
+ * to be sent from the user's console, over the network to the virtual input
+ * devices, forwarded to the desktop for processing, and then the cursor plane's
+ * position can be updated and sent back to the user's console over the network.
+ * Instead, with the hotspot information, the console can anticipate the new
+ * location, and draw the mouse cursor there before the confirmation comes in.
+ * To do that correctly, the user's console must be able predict how the
+ * desktop will process mouse events, which normally requires the desktop's
+ * mouse topology information, ie where each CRTC sits in the mouse coordinate
+ * space. This is typically sent to the para-virtualized drivers using some
+ * driver-specific method, and the driver then forwards it to the console by
+ * way of the virtual display device or hypervisor.
+ *
+ * The assumption is generally made that there is only one cursor plane being
+ * used this way at a time, and that the desktop is feeding all mouse devices
+ * into the same global pointer. Para-virtualized drivers that require this
+ * should only be exposing a single cursor plane, or find some other way
+ * to coordinate with a userspace desktop that supports multiple pointers.
+ * If the hotspot properties are set, the cursor plane is therefore assumed to be
+ * used only for displaying a mouse cursor image, and the position of the combined
+ * cursor plane + offset can therefore be used for coordinating with input from a
+ * mouse device.
+ *
+ * The cursor will then be drawn either at the location of the plane in the CRTC
+ * console, or as a free-floating cursor plane on the user's console
+ * corresponding to their desktop mouse position.
+ *
+ * DRM clients which would like to work correctly on drivers which expose
+ * hotspot properties should advertise DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT.
+ * Setting this property on drivers which do not special case
+ * cursor planes will return EOPNOTSUPP, which can be used by userspace to
+ * gauge requirements of the hardware/drivers they're running on. Advertising
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT implies that the userspace client will be
+ * correctly setting the hotspot properties.
+ */
+
+/**
+ * drm_plane_create_hotspot_properties - creates the mouse hotspot
+ * properties and attaches them to the given cursor plane
+ *
+ * @plane: drm cursor plane
+ *
+ * This function enables the mouse hotspot property on a given
+ * cursor plane. Look at the documentation for hotspot properties
+ * to get a better understanding for what they're used for.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int drm_plane_create_hotspot_properties(struct drm_plane *plane)
+{
+ struct drm_property *prop_x;
+ struct drm_property *prop_y;
+
+ drm_WARN_ON(plane->dev,
+ !drm_core_check_feature(plane->dev,
+ DRIVER_CURSOR_HOTSPOT));
+
+ prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X",
+ INT_MIN, INT_MAX);
+ if (IS_ERR(prop_x))
+ return PTR_ERR(prop_x);
+
+ prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y",
+ INT_MIN, INT_MAX);
+ if (IS_ERR(prop_y)) {
+ drm_property_destroy(plane->dev, prop_x);
+ return PTR_ERR(prop_y);
+ }
+
+ drm_object_attach_property(&plane->base, prop_x, 0);
+ drm_object_attach_property(&plane->base, prop_y, 0);
+ plane->hotspot_x_property = prop_x;
+ plane->hotspot_y_property = prop_y;
+
+ return 0;
+}
+
__printf(9, 0)
static int __drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
@@ -348,6 +445,10 @@ static int __drm_universal_plane_init(struct drm_device *dev,
drm_object_attach_property(&plane->base, config->prop_src_w, 0);
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
}
+ if (drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
+ type == DRM_PLANE_TYPE_CURSOR) {
+ drm_plane_create_hotspot_properties(plane);
+ }
if (format_modifier_count)
create_in_format_blob(dev, plane);
@@ -1065,8 +1166,10 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
return PTR_ERR(fb);
}
- fb->hot_x = req->hot_x;
- fb->hot_y = req->hot_y;
+ if (plane->hotspot_x_property && plane->state)
+ plane->state->hotspot_x = req->hot_x;
+ if (plane->hotspot_y_property && plane->state)
+ plane->state->hotspot_y = req->hot_y;
} else {
fb = NULL;
}
@@ -1456,6 +1559,36 @@ out:
* Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
* drm_atomic_helper_damage_iter_next() helper iterator function to get damage
* rectangles clipped to &drm_plane_state.src.
+ *
+ * Note that there are two types of damage handling: frame damage and buffer
+ * damage, the type of damage handling implemented depends on a driver's upload
+ * target. Drivers implementing a per-plane or per-CRTC upload target need to
+ * handle frame damage, while drivers implementing a per-buffer upload target
+ * need to handle buffer damage.
+ *
+ * The existing damage helpers only support the frame damage type, there is no
+ * buffer age support or similar damage accumulation algorithm implemented yet.
+ *
+ * Only drivers handling frame damage can use the mentioned damage helpers to
+ * iterate over the damaged regions. Drivers that handle buffer damage, must set
+ * &drm_plane_state.ignore_damage_clips for drm_atomic_helper_damage_iter_init()
+ * to know that damage clips should be ignored and return &drm_plane_state.src
+ * as the damage rectangle, to force a full plane update.
+ *
+ * Drivers with a per-buffer upload target could compare the &drm_plane_state.fb
+ * of the old and new plane states to determine if the framebuffer attached to a
+ * plane has changed or not since the last plane update. If &drm_plane_state.fb
+ * has changed, then &drm_plane_state.ignore_damage_clips must be set to true.
+ *
+ * That is because drivers with a per-plane upload target, expect the backing
+ * storage buffer to not change for a given plane. If the upload buffer changes
+ * between page flips, the new upload buffer has to be updated as a whole. This
+ * can be improved in the future if support for frame damage is added to the DRM
+ * damage helpers, similarly to how user-space already handle this case as it is
+ * explained in the following documents:
+ *
+ * https://registry.khronos.org/EGL/extensions/KHR/EGL_KHR_swap_buffers_with_damage.txt
+ * https://emersion.fr/blog/2019/intro-to-damage-tracking/
*/
/**
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5e9508967..7982be4b0 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -279,35 +279,3 @@ void drm_plane_helper_destroy(struct drm_plane *plane)
kfree(plane);
}
EXPORT_SYMBOL(drm_plane_helper_destroy);
-
-/**
- * drm_plane_helper_atomic_check() - Helper to check plane atomic-state
- * @plane: plane to check
- * @state: atomic state object
- *
- * Provides a default plane-state check handler for planes whose atomic-state
- * scale and positioning are not expected to change since the plane is always
- * a fullscreen scanout buffer.
- *
- * This is often the case for the primary plane of simple framebuffers. See
- * also drm_crtc_helper_atomic_check() for the respective CRTC-state check
- * helper function.
- *
- * RETURNS:
- * Zero on success, or an errno code otherwise.
- */
-int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
- struct drm_crtc *new_crtc = new_plane_state->crtc;
- struct drm_crtc_state *new_crtc_state = NULL;
-
- if (new_crtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
- DRM_PLANE_NO_SCALING,
- DRM_PLANE_NO_SCALING,
- false, false);
-}
-EXPORT_SYMBOL(drm_plane_helper_atomic_check);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7352bde29..03bd3c7bd 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -582,7 +582,12 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_gem_object *obj = dma_buf->priv;
- if (!obj->funcs->get_sg_table)
+ /*
+ * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
+ * that implement their own ->map_dma_buf() do not.
+ */
+ if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
+ !obj->funcs->get_sg_table)
return -ENOSYS;
return drm_gem_pin(obj);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 15ed974bc..36433fa68 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -293,14 +293,17 @@ static void reschedule_output_poll_work(struct drm_device *dev)
* Drivers can call this helper from their device resume implementation. It is
* not an error to call this even when output polling isn't enabled.
*
+ * If device polling was never initialized before, this call will trigger a
+ * warning and return.
+ *
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
- dev->mode_config.poll_running)
+ if (drm_WARN_ON_ONCE(dev, !dev->mode_config.poll_enabled) ||
+ !drm_kms_helper_poll || dev->mode_config.poll_running)
return;
if (drm_kms_helper_enable_hpd(dev) ||
@@ -626,8 +629,12 @@ retry:
0);
}
- /* Re-enable polling in case the global poll config changed. */
- drm_kms_helper_poll_enable(dev);
+ /*
+ * Re-enable polling in case the global poll config changed but polling
+ * is still initialized.
+ */
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -767,9 +774,11 @@ static void output_poll_execute(struct work_struct *work)
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
- if (!drm_kms_helper_poll && dev->mode_config.poll_running) {
- drm_kms_helper_disable_hpd(dev);
- dev->mode_config.poll_running = false;
+ if (!drm_kms_helper_poll) {
+ if (dev->mode_config.poll_running) {
+ drm_kms_helper_disable_hpd(dev);
+ dev->mode_config.poll_running = false;
+ }
goto out;
}
@@ -878,12 +887,18 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
* not an error to call this even when output polling isn't enabled or already
* disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
*
+ * If however, the polling was never initialized, this call will trigger a
+ * warning and return
+ *
* Note that calls to enable and disable polling must be strictly ordered, which
* is automatically the case when they're only call from suspend/resume
* callbacks.
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
+ if (drm_WARN_ON(dev, !dev->mode_config.poll_enabled))
+ return;
+
if (dev->mode_config.poll_running)
drm_kms_helper_disable_hpd(dev);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index dfec47983..596272149 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -27,6 +27,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_property.h>
#include "drm_crtc_internal.h"
@@ -751,6 +752,64 @@ bool drm_property_replace_blob(struct drm_property_blob **blob,
}
EXPORT_SYMBOL(drm_property_replace_blob);
+/**
+ * drm_property_replace_blob_from_id - replace a blob property taking a reference
+ * @dev: DRM device
+ * @blob: a pointer to the member blob to be replaced
+ * @blob_id: the id of the new blob to replace with
+ * @expected_size: expected size of the blob property
+ * @expected_elem_size: expected size of an element in the blob property
+ * @replaced: if the blob was in fact replaced
+ *
+ * Look up the new blob from id, take its reference, check expected sizes of
+ * the blob and its element and replace the old blob by the new one. Advertise
+ * if the replacement operation was successful.
+ *
+ * Return: true if the blob was in fact replaced. -EINVAL if the new blob was
+ * not found or sizes don't match.
+ */
+int drm_property_replace_blob_from_id(struct drm_device *dev,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ ssize_t expected_elem_size,
+ bool *replaced)
+{
+ struct drm_property_blob *new_blob = NULL;
+
+ if (blob_id != 0) {
+ new_blob = drm_property_lookup_blob(dev, blob_id);
+ if (new_blob == NULL) {
+ drm_dbg_atomic(dev,
+ "cannot find blob ID %llu\n", blob_id);
+ return -EINVAL;
+ }
+
+ if (expected_size > 0 &&
+ new_blob->length != expected_size) {
+ drm_dbg_atomic(dev,
+ "[BLOB:%d] length %zu different from expected %zu\n",
+ new_blob->base.id, new_blob->length, expected_size);
+ drm_property_blob_put(new_blob);
+ return -EINVAL;
+ }
+ if (expected_elem_size > 0 &&
+ new_blob->length % expected_elem_size != 0) {
+ drm_dbg_atomic(dev,
+ "[BLOB:%d] length %zu not divisible by element size %zu\n",
+ new_blob->base.id, new_blob->length, expected_elem_size);
+ drm_property_blob_put(new_blob);
+ return -EINVAL;
+ }
+ }
+
+ *replaced |= drm_property_replace_blob(blob, new_blob);
+ drm_property_blob_put(new_blob);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_property_replace_blob_from_id);
+
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
deleted file mode 100644
index f4e6184d1..000000000
--- a/drivers/gpu/drm/drm_scatter.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * \file drm_scatter.c
- * IOCTLs to manage scatter/gather memory
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-#define DEBUG_SCATTER 0
-
-static void drm_sg_cleanup(struct drm_sg_mem * entry)
-{
- struct page *page;
- int i;
-
- for (i = 0; i < entry->pages; i++) {
- page = entry->pagelist[i];
- if (page)
- ClearPageReserved(page);
- }
-
- vfree(entry->virtual);
-
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
-}
-
-void drm_legacy_sg_cleanup(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- drm_core_check_feature(dev, DRIVER_LEGACY)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
-}
-#ifdef _LP64
-# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-# define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
- unsigned long pages, i, j;
-
- DRM_DEBUG("\n");
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- if (request->size > SIZE_MAX - PAGE_SIZE)
- return -EINVAL;
-
- if (dev->sg)
- return -EINVAL;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
-
- entry->pages = pages;
- entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
- if (!entry->pagelist) {
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
- if (!entry->busaddr) {
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
- if (!entry->virtual) {
- kfree(entry->busaddr);
- kfree(entry->pagelist);
- kfree(entry);
- return -ENOMEM;
- }
-
- /* This also forces the mapping of COW pages, so our page list
- * will be valid. Please don't remove it...
- */
- memset(entry->virtual, 0, pages << PAGE_SHIFT);
-
- entry->handle = ScatterHandle((unsigned long)entry->virtual);
-
- DRM_DEBUG("handle = %08lx\n", entry->handle);
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- for (i = (unsigned long)entry->virtual, j = 0; j < pages;
- i += PAGE_SIZE, j++) {
- entry->pagelist[j] = vmalloc_to_page((void *)i);
- if (!entry->pagelist[j])
- goto failed;
- SetPageReserved(entry->pagelist[j]);
- }
-
- request->handle = entry->handle;
-
- dev->sg = entry;
-
-#if DEBUG_SCATTER
- /* Verify that each page points to its virtual address, and vice
- * versa.
- */
- {
- int error = 0;
-
- for (i = 0; i < pages; i++) {
- unsigned long *tmp;
-
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0xcafebabe;
- }
- tmp = (unsigned long *)((u8 *) entry->virtual +
- (PAGE_SIZE * i));
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- if (*tmp != 0xcafebabe && error == 0) {
- error = 1;
- DRM_ERROR("Scatter allocation error, "
- "pagelist does not match "
- "virtual mapping\n");
- }
- }
- tmp = page_address(entry->pagelist[i]);
- for (j = 0;
- j < PAGE_SIZE / sizeof(unsigned long);
- j++, tmp++) {
- *tmp = 0;
- }
- }
- if (error == 0)
- DRM_ERROR("Scatter allocation matches pagelist\n");
- }
-#endif
-
- return 0;
-
- failed:
- drm_sg_cleanup(entry);
- return -ENOMEM;
-}
-
-int drm_legacy_sg_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
- struct drm_sg_mem *entry;
-
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EOPNOTSUPP;
-
- if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EOPNOTSUPP;
-
- entry = dev->sg;
- dev->sg = NULL;
-
- if (!entry || entry->handle != request->handle)
- return -EINVAL;
-
- DRM_DEBUG("virtual = %p\n", entry->virtual);
-
- drm_sg_cleanup(entry);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index f0bdcfc0c..a6c19de46 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -126,6 +126,11 @@
* synchronize between the two.
* This requirement is inherited from the Vulkan fence API.
*
+ * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE is set, the ioctl will also set
+ * a fence deadline hint on the backing fences before waiting, to provide the
+ * fence signaler with an appropriate sense of urgency. The deadline is
+ * specified as an absolute &CLOCK_MONOTONIC value in units of ns.
+ *
* Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
* handles as well as an array of u64 points and does a host-side wait on all
* of syncobj fences at the given points simultaneously.
@@ -1027,7 +1032,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint32_t count,
uint32_t flags,
signed long timeout,
- uint32_t *idx)
+ uint32_t *idx,
+ ktime_t *deadline)
{
struct syncobj_wait_entry *entries;
struct dma_fence *fence;
@@ -1110,6 +1116,15 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
+ if (deadline) {
+ for (i = 0; i < count; ++i) {
+ fence = entries[i].fence;
+ if (!fence)
+ continue;
+ dma_fence_set_deadline(fence, *deadline);
+ }
+ }
+
do {
set_current_state(TASK_INTERRUPTIBLE);
@@ -1208,7 +1223,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
struct drm_syncobj_timeline_wait *timeline_wait,
- struct drm_syncobj **syncobjs, bool timeline)
+ struct drm_syncobj **syncobjs, bool timeline,
+ ktime_t *deadline)
{
signed long timeout = 0;
uint32_t first = ~0;
@@ -1219,7 +1235,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
NULL,
wait->count_handles,
wait->flags,
- timeout, &first);
+ timeout, &first,
+ deadline);
if (timeout < 0)
return timeout;
wait->first_signaled = first;
@@ -1229,7 +1246,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
u64_to_user_ptr(timeline_wait->points),
timeline_wait->count_handles,
timeline_wait->flags,
- timeout, &first);
+ timeout, &first,
+ deadline);
if (timeout < 0)
return timeout;
timeline_wait->first_signaled = first;
@@ -1300,17 +1318,22 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
{
struct drm_syncobj_wait *args = data;
struct drm_syncobj **syncobjs;
+ unsigned int possible_flags;
+ ktime_t t, *tp = NULL;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -EOPNOTSUPP;
- if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
+
+ if (args->flags & ~possible_flags)
return -EINVAL;
if (args->count_handles == 0)
- return -EINVAL;
+ return 0;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
@@ -1319,8 +1342,13 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
+ if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
+ t = ns_to_ktime(args->deadline_nsec);
+ tp = &t;
+ }
+
ret = drm_syncobj_array_wait(dev, file_private,
- args, NULL, syncobjs, false);
+ args, NULL, syncobjs, false, tp);
drm_syncobj_array_free(syncobjs, args->count_handles);
@@ -1333,18 +1361,23 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
{
struct drm_syncobj_timeline_wait *args = data;
struct drm_syncobj **syncobjs;
+ unsigned int possible_flags;
+ ktime_t t, *tp = NULL;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
- DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
+
+ if (args->flags & ~possible_flags)
return -EINVAL;
if (args->count_handles == 0)
- return -EINVAL;
+ return 0;
ret = drm_syncobj_array_find(file_private,
u64_to_user_ptr(args->handles),
@@ -1353,8 +1386,13 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
+ if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
+ t = ns_to_ktime(args->deadline_nsec);
+ tp = &t;
+ }
+
ret = drm_syncobj_array_wait(dev, file_private,
- NULL, args, syncobjs, true);
+ NULL, args, syncobjs, true, tp);
drm_syncobj_array_free(syncobjs, args->count_handles);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 877e20675..702a12bc9 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -210,11 +210,6 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else if (dev->driver->get_vblank_counter) {
- return dev->driver->get_vblank_counter(dev, pipe);
- }
-#endif
return drm_vblank_no_hw_counter(dev, pipe);
}
@@ -433,11 +428,6 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else {
- dev->driver->disable_vblank(dev, pipe);
- }
-#endif
}
/*
@@ -1151,11 +1141,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
}
-#ifdef CONFIG_DRM_LEGACY
- else if (dev->driver->enable_vblank) {
- return dev->driver->enable_vblank(dev, pipe);
- }
-#endif
return -EINVAL;
}
@@ -1574,88 +1559,6 @@ void drm_crtc_vblank_restore(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);
-static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
- unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- /* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!drm_dev_has_vblank(dev))
- return;
-
- if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
- return;
-
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- if (!vblank->inmodeset) {
- vblank->inmodeset = 0x1;
- if (drm_vblank_get(dev, pipe) == 0)
- vblank->inmodeset |= 0x2;
- }
-}
-
-static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
- unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- /* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!drm_dev_has_vblank(dev))
- return;
-
- if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
- return;
-
- if (vblank->inmodeset) {
- spin_lock_irq(&dev->vbl_lock);
- drm_reset_vblank_timestamp(dev, pipe);
- spin_unlock_irq(&dev->vbl_lock);
-
- if (vblank->inmodeset & 0x2)
- drm_vblank_put(dev, pipe);
-
- vblank->inmodeset = 0;
- }
-}
-
-int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_modeset_ctl *modeset = data;
- unsigned int pipe;
-
- /* If drm_vblank_init() hasn't been called yet, just no-op */
- if (!drm_dev_has_vblank(dev))
- return 0;
-
- /* KMS drivers handle this internally */
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
-
- pipe = modeset->crtc;
- if (pipe >= dev->num_crtcs)
- return -EINVAL;
-
- switch (modeset->cmd) {
- case _DRM_PRE_MODESET:
- drm_legacy_vblank_pre_modeset(dev, pipe);
- break;
- case _DRM_POST_MODESET:
- drm_legacy_vblank_post_modeset(dev, pipe);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
u64 req_seq,
union drm_wait_vblank *vblwait,
@@ -1780,10 +1683,6 @@ static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
static bool drm_wait_vblank_supported(struct drm_device *dev)
{
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY)))
- return dev->irq_enabled;
-#endif
return drm_dev_has_vblank(dev);
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
deleted file mode 100644
index 87c9fe55d..000000000
--- a/drivers/gpu/drm/drm_vm.c
+++ /dev/null
@@ -1,665 +0,0 @@
-/*
- * \file drm_vm.c
- * Memory mapping for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
-#include <linux/pgtable.h>
-
-#if defined(__ia64__)
-#include <linux/efi.h>
-#include <linux/slab.h>
-#endif
-#include <linux/mem_encrypt.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-struct drm_vma_entry {
- struct list_head head;
- struct vm_area_struct *vma;
- pid_t pid;
-};
-
-static void drm_vm_open(struct vm_area_struct *vma);
-static void drm_vm_close(struct vm_area_struct *vma);
-
-static pgprot_t drm_io_prot(struct drm_local_map *map,
- struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__) || defined(__loongarch__)
- if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
- tmp = pgprot_noncached(tmp);
- else
- tmp = pgprot_writecombine(tmp);
-#elif defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end -
- vma->vm_start))
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
-}
-
-static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
- pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp = pgprot_noncached_wc(tmp);
-#endif
- return tmp;
-}
-
-/*
- * \c fault method for AGP virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Find the right map and if it's AGP memory find the real physical page to
- * map, get the page, increment the use count and return it.
- */
-#if IS_ENABLED(CONFIG_AGP)
-static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- struct drm_map_list *r_list;
- struct drm_hash_item *hash;
-
- /*
- * Find the right map
- */
- if (!dev->agp)
- goto vm_fault_error;
-
- if (!dev->agp || !dev->agp->cant_use_aperture)
- goto vm_fault_error;
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
- goto vm_fault_error;
-
- r_list = drm_hash_entry(hash, struct drm_map_list, hash);
- map = r_list->map;
-
- if (map && map->type == _DRM_AGP) {
- /*
- * Using vm_pgoff as a selector forces us to use this unusual
- * addressing scheme.
- */
- resource_size_t offset = vmf->address - vma->vm_start;
- resource_size_t baddr = map->offset + offset;
- struct drm_agp_mem *agpmem;
- struct page *page;
-
-#ifdef __alpha__
- /*
- * Adjust to a bus-relative address
- */
- baddr -= dev->hose->mem_space->start;
-#endif
-
- /*
- * It's AGP memory - find the real physical page to map
- */
- list_for_each_entry(agpmem, &dev->agp->memory, head) {
- if (agpmem->bound <= baddr &&
- agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
- break;
- }
-
- if (&agpmem->head == &dev->agp->memory)
- goto vm_fault_error;
-
- /*
- * Get the page, inc the use count, and return it
- */
- offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- page = agpmem->memory->pages[offset];
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG
- ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
- (unsigned long long)baddr,
- agpmem->memory->pages[offset],
- (unsigned long long)offset,
- page_count(page));
- return 0;
- }
-vm_fault_error:
- return VM_FAULT_SIGBUS; /* Disallow mremap */
-}
-#else
-static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-#endif
-
-/*
- * \c nopage method for shared virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Get the mapping, find the real physical page to map, get the page, and
- * return it.
- */
-static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_local_map *map = vma->vm_private_data;
- unsigned long offset;
- unsigned long i;
- struct page *page;
-
- if (!map)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- i = (unsigned long)map->handle + offset;
- page = vmalloc_to_page((void *)i);
- if (!page)
- return VM_FAULT_SIGBUS;
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("shm_fault 0x%lx\n", offset);
- return 0;
-}
-
-/*
- * \c close method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Deletes map information if we are the last
- * person to close a mapping and it's not in the global maplist.
- */
-static void drm_vm_shm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_vma_entry *pt, *temp;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
- int found_maps = 0;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- map = vma->vm_private_data;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma->vm_private_data == map)
- found_maps++;
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- }
- }
-
- /* We were the only map that was found */
- if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
- /* Check to see if we are in the maplist, if we are not, then
- * we delete this mappings information.
- */
- found_maps = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map == map)
- found_maps++;
- }
-
- if (!found_maps) {
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
- arch_phys_wc_del(map->mtrr);
- iounmap(map->handle);
- break;
- case _DRM_SHM:
- vfree(map->handle);
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
- break;
- case _DRM_CONSISTENT:
- dma_free_coherent(dev->dev,
- map->size,
- map->handle,
- map->offset);
- break;
- }
- kfree(map);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * \c fault method for DMA virtual memory.
- *
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
- */
-static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_device_dma *dma = dev->dma;
- unsigned long offset;
- unsigned long page_nr;
- struct page *page;
-
- if (!dma)
- return VM_FAULT_SIGBUS; /* Error */
- if (!dma->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- /* vm_[pg]off[set] should be 0 */
- page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
- page = virt_to_page((void *)dma->pagelist[page_nr]);
-
- get_page(page);
- vmf->page = page;
-
- DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
- return 0;
-}
-
-/*
- * \c fault method for scatter-gather virtual memory.
- *
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
- */
-static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_local_map *map = vma->vm_private_data;
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_sg_mem *entry = dev->sg;
- unsigned long offset;
- unsigned long map_offset;
- unsigned long page_offset;
- struct page *page;
-
- if (!entry)
- return VM_FAULT_SIGBUS; /* Error */
- if (!entry->pagelist)
- return VM_FAULT_SIGBUS; /* Nothing allocated */
-
- offset = vmf->address - vma->vm_start;
- map_offset = map->offset - (unsigned long)dev->sg->virtual;
- page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
- page = entry->pagelist[page_offset];
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-/** AGP virtual memory operations */
-static const struct vm_operations_struct drm_vm_ops = {
- .fault = drm_vm_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Shared virtual memory operations */
-static const struct vm_operations_struct drm_vm_shm_ops = {
- .fault = drm_vm_shm_fault,
- .open = drm_vm_open,
- .close = drm_vm_shm_close,
-};
-
-/** DMA virtual memory operations */
-static const struct vm_operations_struct drm_vm_dma_ops = {
- .fault = drm_vm_dma_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-/** Scatter-gather virtual memory operations */
-static const struct vm_operations_struct drm_vm_sg_ops = {
- .fault = drm_vm_sg_fault,
- .open = drm_vm_open,
- .close = drm_vm_close,
-};
-
-static void drm_vm_open_locked(struct drm_device *dev,
- struct vm_area_struct *vma)
-{
- struct drm_vma_entry *vma_entry;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
- if (vma_entry) {
- vma_entry->vma = vma;
- vma_entry->pid = current->pid;
- list_add(&vma_entry->head, &dev->vmalist);
- }
-}
-
-static void drm_vm_open(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(dev, vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void drm_vm_close_locked(struct drm_device *dev,
- struct vm_area_struct *vma)
-{
- struct drm_vma_entry *pt, *temp;
-
- DRM_DEBUG("0x%08lx,0x%08lx\n",
- vma->vm_start, vma->vm_end - vma->vm_start);
-
- list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
- if (pt->vma == vma) {
- list_del(&pt->head);
- kfree(pt);
- break;
- }
- }
-}
-
-/*
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
-{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(dev, vma);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * Sets the virtual memory area operations structure to vm_dma_ops, the file
- * pointer, and calls vm_open().
- */
-static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- struct drm_device_dma *dma;
- unsigned long length = vma->vm_end - vma->vm_start;
-
- dev = priv->minor->dev;
- dma = dev->dma;
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- /* Length must match exact page count */
- if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
- return -EINVAL;
- }
-
- if (!capable(CAP_SYS_ADMIN) &&
- (dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- vma->vm_ops = &drm_vm_dma_ops;
-
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-
- drm_vm_open_locked(dev, vma);
- return 0;
-}
-
-static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
-{
-#ifdef __alpha__
- return dev->hose->dense_mem_base;
-#else
- return 0;
-#endif
-}
-
-/*
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * If the virtual memory area has no offset associated with it then it's a DMA
- * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
- * checks that the restricted flag is not set, sets the virtual memory operations
- * according to the mapping type and remaps the pages. Finally sets the file
- * pointer and calls vm_open().
- */
-static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_local_map *map = NULL;
- resource_size_t offset = 0;
- struct drm_hash_item *hash;
-
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
- if (!priv->authenticated)
- return -EACCES;
-
- /* We check for "dma". On Apple's UniNorth, it's valid to have
- * the AGP mapped at physical address 0
- * --BenH.
- */
- if (!vma->vm_pgoff
-#if IS_ENABLED(CONFIG_AGP)
- && (!dev->agp
- || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
-#endif
- )
- return drm_mmap_dma(filp, vma);
-
- if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
- DRM_ERROR("Could not find map\n");
- return -EINVAL;
- }
-
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
- return -EPERM;
-
- /* Check for valid size. */
- if (map->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
- /* Ye gads this is ugly. With more thought
- we could move this up higher and use
- `protection_map' instead. */
- vma->vm_page_prot =
- __pgprot(pte_val
- (pte_wrprotect
- (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
- }
-
- switch (map->type) {
-#if !defined(__arm__)
- case _DRM_AGP:
- if (dev->agp && dev->agp->cant_use_aperture) {
- /*
- * On some platforms we can't talk to bus dma address from the CPU, so for
- * memory of type DRM_AGP, we'll deal with sorting out the real physical
- * pages and mappings in fault()
- */
-#if defined(__powerpc__)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
- vma->vm_ops = &drm_vm_ops;
- break;
- }
- fallthrough; /* to _DRM_FRAME_BUFFER... */
-#endif
- case _DRM_FRAME_BUFFER:
- case _DRM_REGISTERS:
- offset = drm_core_get_reg_ofs(dev);
- vma->vm_page_prot = drm_io_prot(map, vma);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
- DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
- " offset = 0x%llx\n",
- map->type,
- vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
-
- vma->vm_ops = &drm_vm_ops;
- break;
- case _DRM_CONSISTENT:
- /* Consistent memory is really like shared memory. But
- * it's allocated in a different way, so avoid fault */
- if (remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(virt_to_page(map->handle)),
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- fallthrough; /* to _DRM_SHM */
- case _DRM_SHM:
- vma->vm_ops = &drm_vm_shm_ops;
- vma->vm_private_data = (void *)map;
- break;
- case _DRM_SCATTER_GATHER:
- vma->vm_ops = &drm_vm_sg_ops;
- vma->vm_private_data = (void *)map;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- break;
- default:
- return -EINVAL; /* This should never happen. */
- }
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-
- drm_vm_open_locked(dev, vma);
- return 0;
-}
-
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- int ret;
-
- if (drm_dev_is_unplugged(dev))
- return -ENODEV;
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_mmap_locked(filp, vma);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_legacy_mmap);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_vma_flush(struct drm_device *dev)
-{
- struct drm_vma_entry *vma, *vma_temp;
-
- /* Clear vma list (only needed for legacy drivers) */
- list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
- list_del(&vma->head);
- kfree(vma);
- }
-}
-#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index f9bc837e2..9a2965741 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -640,16 +640,14 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
return component_master_add_with_match(dev, &etnaviv_master_ops, match);
}
-static int etnaviv_pdev_remove(struct platform_device *pdev)
+static void etnaviv_pdev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &etnaviv_master_ops);
-
- return 0;
}
static struct platform_driver etnaviv_platform_driver = {
.probe = etnaviv_pdev_probe,
- .remove = etnaviv_pdev_remove,
+ .remove_new = etnaviv_pdev_remove,
.driver = {
.name = "etnaviv",
},
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 2416c526f..3d0f8d182 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -535,7 +535,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
- submit->ctx);
+ 1, submit->ctx);
if (ret)
goto err_submit_put;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 9276756e1..9b8445d2a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1904,11 +1904,10 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
return 0;
}
-static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
+static void etnaviv_gpu_platform_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &gpu_ops);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int etnaviv_gpu_rpm_suspend(struct device *dev)
@@ -1917,7 +1916,7 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
u32 idle, mask;
/* If there are any jobs in the HW queue, we're not idle */
- if (atomic_read(&gpu->sched.hw_rq_count))
+ if (atomic_read(&gpu->sched.credit_count))
return -EBUSY;
/* Check whether the hardware (except FE and MC) is idle */
@@ -1970,6 +1969,6 @@ struct platform_driver etnaviv_gpu_driver = {
.of_match_table = etnaviv_gpu_match,
},
.probe = etnaviv_gpu_platform_probe,
- .remove = etnaviv_gpu_platform_remove,
+ .remove_new = etnaviv_gpu_platform_remove,
.id_table = gpu_ids,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 9b79f218e..c4b04b0de 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -134,7 +134,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
{
int ret;
- ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
+ ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), NULL, NULL,
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index bce027552..0ef7bc884 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -862,18 +862,16 @@ err_disable_pm_runtime:
return ret;
}
-static int exynos5433_decon_remove(struct platform_device *pdev)
+static void exynos5433_decon_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &decon_component_ops);
-
- return 0;
}
struct platform_driver exynos5433_decon_driver = {
.probe = exynos5433_decon_probe,
- .remove = exynos5433_decon_remove,
+ .remove_new = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
.pm = pm_ptr(&exynos5433_decon_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 0156a5e94..0d185c056 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -765,7 +765,7 @@ err_iounmap:
return ret;
}
-static int decon_remove(struct platform_device *pdev)
+static void decon_remove(struct platform_device *pdev)
{
struct decon_context *ctx = dev_get_drvdata(&pdev->dev);
@@ -774,8 +774,6 @@ static int decon_remove(struct platform_device *pdev)
iounmap(ctx->regs);
component_del(&pdev->dev, &decon_component_ops);
-
- return 0;
}
static int exynos7_decon_suspend(struct device *dev)
@@ -840,7 +838,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
struct platform_driver decon_driver = {
.probe = decon_probe,
- .remove = decon_remove,
+ .remove_new = decon_remove,
.driver = {
.name = "exynos-decon",
.pm = pm_ptr(&exynos7_decon_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 3404ec136..ca31bad6c 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -250,14 +250,12 @@ out:
return component_add(&pdev->dev, &exynos_dp_ops);
}
-static int exynos_dp_remove(struct platform_device *pdev)
+static void exynos_dp_remove(struct platform_device *pdev)
{
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
component_del(&pdev->dev, &exynos_dp_ops);
analogix_dp_remove(dp->adp);
-
- return 0;
}
static int exynos_dp_suspend(struct device *dev)
@@ -285,7 +283,7 @@ MODULE_DEVICE_TABLE(of, exynos_dp_match);
struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
- .remove = exynos_dp_remove,
+ .remove_new = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 378e53819..0dc36df6a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -101,7 +101,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(encoder->dev, connector,
&exynos_dpi_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_DPI);
if (ret) {
DRM_DEV_ERROR(ctx->dev,
"failed to initialize connector with drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 5380fb6c5..7c59e1164 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -346,10 +346,9 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
match);
}
-static int exynos_drm_platform_remove(struct platform_device *pdev)
+static void exynos_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &exynos_drm_ops);
- return 0;
}
static void exynos_drm_platform_shutdown(struct platform_device *pdev)
@@ -362,7 +361,7 @@ static void exynos_drm_platform_shutdown(struct platform_device *pdev)
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
- .remove = exynos_drm_platform_remove,
+ .remove_new = exynos_drm_platform_remove,
.shutdown = exynos_drm_platform_shutdown,
.driver = {
.name = "exynos-drm",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 8de271459..e81a576de 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1367,7 +1367,7 @@ err_pm_dis:
return ret;
}
-static int fimc_remove(struct platform_device *pdev)
+static void fimc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1377,8 +1377,6 @@ static int fimc_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
fimc_put_clocks(ctx);
-
- return 0;
}
static int fimc_runtime_suspend(struct device *dev)
@@ -1410,7 +1408,7 @@ MODULE_DEVICE_TABLE(of, fimc_of_match);
struct platform_driver fimc_driver = {
.probe = fimc_probe,
- .remove = fimc_remove,
+ .remove_new = fimc_remove,
.driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 5bdc246f5..f2145227a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -480,7 +480,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
struct fimd_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
const struct fimd_driver_data *driver_data = ctx->driver_data;
- void *timing_base = ctx->regs + driver_data->timing_base;
+ void __iomem *timing_base = ctx->regs + driver_data->timing_base;
u32 val;
if (ctx->suspended)
@@ -1277,13 +1277,11 @@ err_disable_pm_runtime:
return ret;
}
-static int fimd_remove(struct platform_device *pdev)
+static void fimd_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &fimd_component_ops);
-
- return 0;
}
static int exynos_fimd_suspend(struct device *dev)
@@ -1325,7 +1323,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend,
struct platform_driver fimd_driver = {
.probe = fimd_probe,
- .remove = fimd_remove,
+ .remove_new = fimd_remove,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 414e585ec..f31384236 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1530,7 +1530,7 @@ err_destroy_slab:
return ret;
}
-static int g2d_remove(struct platform_device *pdev)
+static void g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
@@ -1545,8 +1545,6 @@ static int g2d_remove(struct platform_device *pdev)
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab);
-
- return 0;
}
static int g2d_suspend(struct device *dev)
@@ -1609,7 +1607,7 @@ MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
- .remove = g2d_remove,
+ .remove_new = g2d_remove,
.driver = {
.name = "exynos-drm-g2d",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 5302bebbe..180507a47 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -11,9 +11,10 @@
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <drm/drm_fourcc.h>
@@ -103,7 +104,7 @@ struct gsc_context {
unsigned int num_formats;
void __iomem *regs;
- const char **clk_names;
+ const char *const *clk_names;
struct clk *clocks[GSC_MAX_CLOCKS];
int num_clocks;
struct gsc_scaler sc;
@@ -1217,7 +1218,7 @@ static const unsigned int gsc_tiled_formats[] = {
static int gsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct gsc_driverdata *driver_data;
+ const struct gsc_driverdata *driver_data;
struct exynos_drm_ipp_formats *formats;
struct gsc_context *ctx;
int num_formats, ret, i, j;
@@ -1226,7 +1227,7 @@ static int gsc_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
+ driver_data = device_get_match_data(dev);
ctx->dev = dev;
ctx->num_clocks = driver_data->num_clocks;
ctx->clk_names = driver_data->clk_names;
@@ -1308,15 +1309,13 @@ err_pm_dis:
return ret;
}
-static int gsc_remove(struct platform_device *pdev)
+static void gsc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &gsc_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int __maybe_unused gsc_runtime_suspend(struct device *dev)
@@ -1421,7 +1420,7 @@ MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
struct platform_driver gsc_driver = {
.probe = gsc_probe,
- .remove = gsc_remove,
+ .remove_new = gsc_remove,
.driver = {
.name = "exynos-drm-gsc",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 17bab5b16..e29209601 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -442,7 +442,7 @@ err:
return ret;
}
-static int exynos_mic_remove(struct platform_device *pdev)
+static void exynos_mic_remove(struct platform_device *pdev)
{
struct exynos_mic *mic = platform_get_drvdata(pdev);
@@ -450,8 +450,6 @@ static int exynos_mic_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
drm_bridge_remove(&mic->bridge);
-
- return 0;
}
static const struct of_device_id exynos_mic_of_match[] = {
@@ -462,7 +460,7 @@ MODULE_DEVICE_TABLE(of, exynos_mic_of_match);
struct platform_driver mic_driver = {
.probe = exynos_mic_probe,
- .remove = exynos_mic_remove,
+ .remove_new = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
.pm = pm_ptr(&exynos_mic_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index ffb327c51..5f7516655 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -329,15 +329,13 @@ err_component:
return ret;
}
-static int rotator_remove(struct platform_device *pdev)
+static void rotator_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &rotator_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int rotator_runtime_suspend(struct device *dev)
@@ -453,7 +451,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend,
struct platform_driver rotator_driver = {
.probe = rotator_probe,
- .remove = rotator_remove,
+ .remove_new = rotator_remove,
.driver = {
.name = "exynos-rotator",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index f2b8b09a6..392f721f1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -539,15 +539,13 @@ err_ippdrv_register:
return ret;
}
-static int scaler_remove(struct platform_device *pdev)
+static void scaler_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &scaler_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
-
- return 0;
}
static int clk_disable_unprepare_wrapper(struct clk *clk)
@@ -721,7 +719,7 @@ MODULE_DEVICE_TABLE(of, exynos_scaler_match);
struct platform_driver scaler_driver = {
.probe = scaler_probe,
- .remove = scaler_remove,
+ .remove_new = scaler_remove,
.driver = {
.name = "exynos-scaler",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index fb941a8c9..f5bbba9ad 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -462,7 +462,7 @@ static int vidi_probe(struct platform_device *pdev)
return component_add(dev, &vidi_component_ops);
}
-static int vidi_remove(struct platform_device *pdev)
+static void vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
@@ -472,13 +472,11 @@ static int vidi_remove(struct platform_device *pdev)
}
component_del(&pdev->dev, &vidi_component_ops);
-
- return 0;
}
struct platform_driver vidi_driver = {
.probe = vidi_probe,
- .remove = vidi_remove,
+ .remove_new = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index eff51bfc4..b1d02dec3 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2069,7 +2069,7 @@ err_ddc:
return ret;
}
-static int hdmi_remove(struct platform_device *pdev)
+static void hdmi_remove(struct platform_device *pdev)
{
struct hdmi_context *hdata = platform_get_drvdata(pdev);
@@ -2092,8 +2092,6 @@ static int hdmi_remove(struct platform_device *pdev)
put_device(&hdata->ddc_adpt->dev);
mutex_destroy(&hdata->mutex);
-
- return 0;
}
static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
@@ -2125,7 +2123,7 @@ static const struct dev_pm_ops exynos_hdmi_pm_ops = {
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
- .remove = hdmi_remove,
+ .remove_new = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index b302392ff..6822333fd 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1258,13 +1258,11 @@ static int mixer_probe(struct platform_device *pdev)
return ret;
}
-static int mixer_remove(struct platform_device *pdev)
+static void mixer_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &mixer_component_ops);
-
- return 0;
}
static int __maybe_unused exynos_mixer_suspend(struct device *dev)
@@ -1338,5 +1336,5 @@ struct platform_driver mixer_driver = {
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
- .remove = mixer_remove,
+ .remove_new = mixer_remove,
};
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 4f302cd5e..58fed80c7 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -34,7 +34,6 @@ gma500_gfx-y += \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
- psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 8992a9507..dd1eb7e98 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -855,7 +855,6 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
intel_dp->adapter.owner = THIS_MODULE;
- intel_dp->adapter.class = I2C_CLASS_DDC;
strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
intel_dp->adapter.algo_data = &intel_dp->algo;
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 09cedabf4..aa4550985 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -411,7 +411,6 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
struct intel_gmbus *bus = &dev_priv->gmbus[i];
bus->adapter.owner = THIS_MODULE;
- bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"gma500 gmbus %s",
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index fc9a34ed5..6daa6669e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -168,7 +168,6 @@ static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
.name = "oaktrail_hdmi_i2c",
.nr = 3,
.owner = THIS_MODULE,
- .class = I2C_CLASS_DDC,
.algo = &oaktrail_hdmi_i2c_algorithm,
};
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index dcfcd7b89..6dece8f0e 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
- /* This must occur after the backlight is properly initialised */
- psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index c5edfa4aa..83c17689c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -162,7 +162,6 @@
#define PSB_NUM_VBLANKS 2
#define PSB_WATCHDOG_DELAY (HZ * 2)
-#define PSB_LID_DELAY (HZ / 10)
#define PSB_MAX_BRIGHTNESS 100
@@ -491,11 +490,7 @@ struct drm_psb_private {
/* Hotplug handling */
struct work_struct hotplug_work;
- /* LID-Switch */
- spinlock_t lid_lock;
- struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 lid_last_state;
/* Watchdog */
uint32_t apm_reg;
@@ -591,10 +586,6 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
-/* psb_lid.c */
-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
-
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index d6fd5d726..e4f914dec 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -2426,7 +2426,6 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
struct drm_device *dev)
{
sdvo->ddc.owner = THIS_MODULE;
- sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
sdvo->ddc.dev.parent = dev->dev;
sdvo->ddc.algo_data = sdvo;
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
deleted file mode 100644
index 58a7fe392..000000000
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
- *
- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- **************************************************************************/
-
-#include <linux/spinlock.h>
-
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_reg.h"
-
-static void psb_lid_timer_func(struct timer_list *t)
-{
- struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
- u32 __iomem *lid_state = dev_priv->opregion.lid_state;
- u32 pp_status;
-
- if (readl(lid_state) == dev_priv->lid_last_state)
- goto lid_timer_schedule;
-
- if ((readl(lid_state)) & 0x01) {
- /*lid state is open*/
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0 &&
- (pp_status & PP_SEQUENCE_MASK) != 0);
-
- if (REG_READ(PP_STATUS) & PP_ON) {
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
- } else {
- DRM_DEBUG("LVDS panel never powered up");
- return;
- }
- } else {
- psb_intel_lvds_set_brightness(dev, 0);
-
- REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
- do {
- pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
- }
- dev_priv->lid_last_state = readl(lid_state);
-
-lid_timer_schedule:
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
- if (!timer_pending(lid_timer)) {
- lid_timer->expires = jiffies + PSB_LID_DELAY;
- add_timer(lid_timer);
- }
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
-{
- struct timer_list *lid_timer = &dev_priv->lid_timer;
- unsigned long irq_flags;
-
- spin_lock_init(&dev_priv->lid_lock);
- spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
-
- timer_setup(lid_timer, psb_lid_timer_func, 0);
-
- lid_timer->expires = jiffies + PSB_LID_DELAY;
-
- add_timer(lid_timer);
- spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
-{
- del_timer_sync(&dev_priv->lid_timer);
-}
-
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index a02f75be8..e16364981 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -51,7 +51,8 @@ static bool gud_is_big_endian(void)
static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
void *src, struct drm_framebuffer *fb,
- struct drm_rect *rect)
+ struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
{
unsigned int block_width = drm_format_info_block_width(format, 0);
unsigned int bits_per_pixel = 8 / block_width;
@@ -75,7 +76,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
iosys_map_set_vaddr(&dst_map, buf);
iosys_map_set_vaddr(&vmap, src);
- drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect, fmtcnv_state);
pix8 = buf;
for (y = 0; y < height; y++) {
@@ -152,7 +153,8 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
const struct iosys_map *src, bool cached_reads,
const struct drm_format_info *format, struct drm_rect *rect,
- struct gud_set_buffer_req *req)
+ struct gud_set_buffer_req *req,
+ struct drm_format_conv_state *fmtcnv_state)
{
u8 compression = gdrm->compression;
struct iosys_map dst;
@@ -178,23 +180,23 @@ retry:
*/
if (format != fb->format) {
if (format->format == GUD_DRM_FORMAT_R1) {
- len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
+ len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect, fmtcnv_state);
if (!len)
return -ENOMEM;
} else if (format->format == DRM_FORMAT_R8) {
- drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect, fmtcnv_state);
} else if (format->format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect);
+ drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state,
gud_is_big_endian());
} else if (format->format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect);
+ drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
- drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads);
+ drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads, fmtcnv_state);
} else if (compression && cached_reads && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
@@ -266,7 +268,8 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
const struct iosys_map *src, bool cached_reads,
- const struct drm_format_info *format, struct drm_rect *rect)
+ const struct drm_format_info *format, struct drm_rect *rect,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct gud_set_buffer_req req;
size_t len, trlen;
@@ -274,7 +277,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req);
+ ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req, fmtcnv_state);
if (ret)
return ret;
@@ -318,6 +321,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
const struct iosys_map *src, bool cached_reads,
struct drm_rect *damage)
{
+ struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
const struct drm_format_info *format;
unsigned int i, lines;
size_t pitch;
@@ -340,7 +344,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
rect.y1 += i * lines;
rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
- ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect);
+ ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect, &fmtcnv_state);
if (ret) {
if (ret != -ENODEV && ret != -ECONNRESET &&
ret != -ESHUTDOWN && ret != -EPROTO)
@@ -350,6 +354,8 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb
break;
}
}
+
+ drm_format_conv_state_release(&fmtcnv_state);
}
void gud_flush_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 410bd019b..e6e48651c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -81,7 +81,6 @@ int hibmc_ddc_create(struct drm_device *drm_dev,
struct hibmc_connector *connector)
{
connector->adapter.owner = THIS_MODULE;
- connector->adapter.class = I2C_CLASS_DDC;
snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
connector->adapter.dev.parent = drm_dev->dev;
i2c_set_adapdata(&connector->adapter, connector);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index d511d17c5..cff85086f 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -7,7 +7,6 @@
#include <linux/hyperv.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/screen_info.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
@@ -73,11 +72,6 @@ static int hyperv_setup_vram(struct hyperv_drm_device *hv,
struct drm_device *dev = &hv->dev;
int ret;
- if (IS_ENABLED(CONFIG_SYSFB))
- drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
- screen_info.lfb_size,
- &hyperv_driver);
-
hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;
ret = vmbus_allocate_mmio(&hv->mem, hdev, 0, -1, hv->fb_size, 0x100000,
@@ -130,6 +124,8 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
goto err_hv_set_drv_data;
}
+ drm_aperture_remove_framebuffers(&hyperv_driver);
+
ret = hyperv_setup_vram(hv, hdev);
if (ret)
goto err_vmbus_close;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index ce397a879..3089029ab 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -94,7 +94,7 @@ config DRM_I915_CAPTURE_ERROR
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang for triaging according to:
- https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
+ https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html
If in doubt, say "Y".
@@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT
Note that this driver only supports newer device from Broadwell on.
For further information and setup guide, you can visit:
- http://01.org/igvt-g.
+ https://github.com/intel/gvt-linux/wiki.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 2d21930d5..5b7162076 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -24,7 +24,9 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
+ select REF_TRACKER
select STACKDEPOT
+ select STACKTRACE
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
@@ -38,6 +40,7 @@ config DRM_I915_DEBUG
select DRM_I915_DEBUG_GEM_ONCE
select DRM_I915_DEBUG_MMIO
select DRM_I915_DEBUG_RUNTIME_PM
+ select DRM_I915_DEBUG_WAKEREF
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
default n
@@ -231,7 +234,9 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
+ select REF_TRACKER
select STACKDEPOT
+ select STACKTRACE
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during
@@ -240,3 +245,16 @@ config DRM_I915_DEBUG_RUNTIME_PM
Recommended for driver developers only.
If in doubt, say "N"
+
+config DRM_I915_DEBUG_WAKEREF
+ bool "Enable extra tracking for wakerefs"
+ depends on DRM_I915
+ select REF_TRACKER
+ select STACKDEPOT
+ select STACKTRACE
+ help
+ Choose this option to turn on extra state checking and usage
+ tracking for the wakerefPM functionality. This may introduce
+ overhead during driver runtime.
+
+ If in doubt, say "N"
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 88b2bb005..2d08baa91 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
-subdir-ccflags-y += $(call cc-option, -Wstringop-overflow)
subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
# The following turn off the warnings enabled by -Wextra
ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
@@ -34,9 +33,9 @@ endif
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
-CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
+CFLAGS_i915_pci.o = -Wno-override-init
+CFLAGS_display/intel_display_device.o = -Wno-override-init
+CFLAGS_display/intel_fbdev.o = -Wno-override-init
# Support compiling the display code separately for both i915 and xe
# drivers. Define I915 when building i915.
@@ -47,33 +46,34 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
# core driver code
-i915-y += i915_driver.o \
- i915_drm_client.o \
- i915_config.o \
- i915_getparam.o \
- i915_ioctl.o \
- i915_irq.o \
- i915_mitigations.o \
- i915_module.o \
- i915_params.o \
- i915_pci.o \
- i915_scatterlist.o \
- i915_suspend.o \
- i915_switcheroo.o \
- i915_sysfs.o \
- i915_utils.o \
- intel_clock_gating.o \
- intel_device_info.o \
- intel_memory_region.o \
- intel_pcode.o \
- intel_region_ttm.o \
- intel_runtime_pm.o \
- intel_sbi.o \
- intel_step.o \
- intel_uncore.o \
- intel_wakeref.o \
- vlv_sideband.o \
- vlv_suspend.o
+i915-y += \
+ i915_config.o \
+ i915_driver.o \
+ i915_drm_client.o \
+ i915_getparam.o \
+ i915_ioctl.o \
+ i915_irq.o \
+ i915_mitigations.o \
+ i915_module.o \
+ i915_params.o \
+ i915_pci.o \
+ i915_scatterlist.o \
+ i915_suspend.o \
+ i915_switcheroo.o \
+ i915_sysfs.o \
+ i915_utils.o \
+ intel_clock_gating.o \
+ intel_device_info.o \
+ intel_memory_region.o \
+ intel_pcode.o \
+ intel_region_ttm.o \
+ intel_runtime_pm.o \
+ intel_sbi.o \
+ intel_step.o \
+ intel_uncore.o \
+ intel_wakeref.o \
+ vlv_sideband.o \
+ vlv_suspend.o
# core peripheral code
i915-y += \
@@ -90,13 +90,13 @@ i915-y += \
i915_syncmap.o \
i915_user_extensions.o
-i915-$(CONFIG_COMPAT) += i915_ioc32.o
+i915-$(CONFIG_COMPAT) += \
+ i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += \
i915_debugfs.o \
- i915_debugfs_params.o \
- display/intel_display_debugfs.o \
- display/intel_pipe_crc.o
-i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
+ i915_debugfs_params.o
+i915-$(CONFIG_PERF_EVENTS) += \
+ i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
gt-y += \
@@ -118,6 +118,7 @@ gt-y += \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
+ gt/intel_gt_ccs_mode.o \
gt/intel_gt_clock_utils.o \
gt/intel_gt_debugfs.o \
gt/intel_gt_engines_debugfs.o \
@@ -153,7 +154,8 @@ gt-y += \
gt/sysfs_engines.o
# x86 intel-gtt module support
-gt-$(CONFIG_X86) += gt/intel_ggtt_gmch.o
+gt-$(CONFIG_X86) += \
+ gt/intel_ggtt_gmch.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -172,9 +174,9 @@ gem-y += \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
gem/i915_gem_internal.o \
- gem/i915_gem_object.o \
gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
+ gem/i915_gem_object.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
@@ -191,57 +193,61 @@ gem-y += \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
i915-y += \
- $(gem-y) \
- i915_active.o \
- i915_cmd_parser.o \
- i915_deps.o \
- i915_gem_evict.o \
- i915_gem_gtt.o \
- i915_gem_ww.o \
- i915_gem.o \
- i915_query.o \
- i915_request.o \
- i915_scheduler.o \
- i915_trace_points.o \
- i915_ttm_buddy_manager.o \
- i915_vma.o \
- i915_vma_resource.o
+ $(gem-y) \
+ i915_active.o \
+ i915_cmd_parser.o \
+ i915_deps.o \
+ i915_gem.o \
+ i915_gem_evict.o \
+ i915_gem_gtt.o \
+ i915_gem_ww.o \
+ i915_query.o \
+ i915_request.o \
+ i915_scheduler.o \
+ i915_trace_points.o \
+ i915_ttm_buddy_manager.o \
+ i915_vma.o \
+ i915_vma_resource.o
# general-purpose microcontroller (GuC) support
i915-y += \
- gt/uc/intel_gsc_fw.o \
- gt/uc/intel_gsc_proxy.o \
- gt/uc/intel_gsc_uc.o \
- gt/uc/intel_gsc_uc_debugfs.o \
- gt/uc/intel_gsc_uc_heci_cmd_submit.o \
- gt/uc/intel_guc.o \
- gt/uc/intel_guc_ads.o \
- gt/uc/intel_guc_capture.o \
- gt/uc/intel_guc_ct.o \
- gt/uc/intel_guc_debugfs.o \
- gt/uc/intel_guc_fw.o \
- gt/uc/intel_guc_hwconfig.o \
- gt/uc/intel_guc_log.o \
- gt/uc/intel_guc_log_debugfs.o \
- gt/uc/intel_guc_rc.o \
- gt/uc/intel_guc_slpc.o \
- gt/uc/intel_guc_submission.o \
- gt/uc/intel_huc.o \
- gt/uc/intel_huc_debugfs.o \
- gt/uc/intel_huc_fw.o \
- gt/uc/intel_uc.o \
- gt/uc/intel_uc_debugfs.o \
- gt/uc/intel_uc_fw.o
+ gt/uc/intel_gsc_fw.o \
+ gt/uc/intel_gsc_proxy.o \
+ gt/uc/intel_gsc_uc.o \
+ gt/uc/intel_gsc_uc_debugfs.o \
+ gt/uc/intel_gsc_uc_heci_cmd_submit.o\
+ gt/uc/intel_guc.o \
+ gt/uc/intel_guc_ads.o \
+ gt/uc/intel_guc_capture.o \
+ gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_debugfs.o \
+ gt/uc/intel_guc_fw.o \
+ gt/uc/intel_guc_hwconfig.o \
+ gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_log_debugfs.o \
+ gt/uc/intel_guc_rc.o \
+ gt/uc/intel_guc_slpc.o \
+ gt/uc/intel_guc_submission.o \
+ gt/uc/intel_huc.o \
+ gt/uc/intel_huc_debugfs.o \
+ gt/uc/intel_huc_fw.o \
+ gt/uc/intel_uc.o \
+ gt/uc/intel_uc_debugfs.o \
+ gt/uc/intel_uc_fw.o
# graphics system controller (GSC) support
-i915-y += gt/intel_gsc.o
+i915-y += \
+ gt/intel_gsc.o
# graphics hardware monitoring (HWMON) support
-i915-$(CONFIG_HWMON) += i915_hwmon.o
+i915-$(CONFIG_HWMON) += \
+ i915_hwmon.o
# modesetting core code
i915-y += \
display/hsw_ips.o \
+ display/i9xx_plane.o \
+ display/i9xx_wm.o \
display/intel_atomic.o \
display/intel_atomic_plane.o \
display/intel_audio.o \
@@ -257,6 +263,7 @@ i915-y += \
display/intel_display.o \
display/intel_display_driver.o \
display/intel_display_irq.o \
+ display/intel_display_params.o \
display/intel_display_power.o \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
@@ -268,9 +275,12 @@ i915-y += \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dpt.o \
+ display/intel_dpt_common.o \
display/intel_drrs.o \
display/intel_dsb.o \
+ display/intel_dsb_buffer.o \
display/intel_fb.o \
+ display/intel_fb_bo.o \
display/intel_fb_pin.o \
display/intel_fbc.o \
display/intel_fdi.o \
@@ -287,8 +297,8 @@ i915-y += \
display/intel_load_detect.o \
display/intel_lpe_audio.o \
display/intel_modeset_lock.o \
- display/intel_modeset_verify.o \
display/intel_modeset_setup.o \
+ display/intel_modeset_verify.o \
display/intel_overlay.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
@@ -302,8 +312,6 @@ i915-y += \
display/intel_vblank.o \
display/intel_vga.o \
display/intel_wm.o \
- display/i9xx_plane.o \
- display/i9xx_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o
@@ -311,7 +319,12 @@ i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
i915-$(CONFIG_DRM_FBDEV_EMULATION) += \
- display/intel_fbdev.o
+ display/intel_fbdev.o \
+ display/intel_fbdev_fb.o
+i915-$(CONFIG_DEBUG_FS) += \
+ display/intel_display_debugfs.o \
+ display/intel_display_debugfs_params.o \
+ display/intel_pipe_crc.o
# modesetting output/encoder code
i915-y += \
@@ -357,13 +370,14 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
-i915-y += i915_perf.o
+i915-y += \
+ i915_perf.o
# Protected execution platform (PXP) support. Base support is required for HuC
i915-y += \
pxp/intel_pxp.o \
- pxp/intel_pxp_tee.o \
- pxp/intel_pxp_huc.o
+ pxp/intel_pxp_huc.o \
+ pxp/intel_pxp_tee.o
i915-$(CONFIG_DRM_I915_PXP) += \
pxp/intel_pxp_cmd.o \
@@ -374,11 +388,11 @@ i915-$(CONFIG_DRM_I915_PXP) += \
pxp/intel_pxp_session.o
# Post-mortem debug and GPU hang state capture
-i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += \
+ i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
gem/selftests/i915_gem_client_blt.o \
gem/selftests/igt_gem_utils.o \
- selftests/intel_scheduler_helpers.o \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_atomic.o \
@@ -387,10 +401,12 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/igt_mmap.o \
selftests/igt_reset.o \
selftests/igt_spinner.o \
+ selftests/intel_scheduler_helpers.o \
selftests/librapl.o
# virtual gpu code
-i915-y += i915_vgpu.o
+i915-y += \
+ i915_vgpu.o
i915-$(CONFIG_DRM_I915_GVT) += \
intel_gvt.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index e8ee0a089..06ec04e66 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -432,7 +432,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
- intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP &= ~DP_PORT_EN;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
@@ -475,6 +475,40 @@ intel_dp_link_down(struct intel_encoder *encoder,
}
}
+static void g4x_dp_audio_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ /* Enable audio presence detect */
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
+
+static void g4x_dp_audio_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ /* Disable audio presence detect */
+ intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+ intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+}
+
static void intel_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -484,8 +518,6 @@ static void intel_disable_dp(struct intel_atomic_state *state,
intel_dp->link_trained = false;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
/*
* Make sure the panel is off before trying to change the mode.
* But also ensure that we have vdd while we switch off the panel.
@@ -631,8 +663,6 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
* fail when the power sequencer is freshly used for this port.
*/
intel_dp->DP |= DP_PORT_EN;
- if (crtc_state->has_audio)
- intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
@@ -686,7 +716,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
@@ -695,7 +724,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
@@ -1325,6 +1353,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->disable = g4x_disable_dp;
intel_encoder->post_disable = g4x_post_disable_dp;
}
+ intel_encoder->audio_enable = g4x_dp_audio_enable;
+ intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 45e044b4a..8096492b3 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -228,25 +228,51 @@ static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
+static void g4x_hdmi_audio_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ drm_WARN_ON(&i915->drm, !crtc_state->has_hdmi_sink);
+
+ /* Enable audio presence detect */
+ intel_de_rmw(i915, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE);
+
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
+
+static void g4x_hdmi_audio_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ /* Disable audio presence detect */
+ intel_de_rmw(i915, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0);
+}
+
static void g4x_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
g4x_hdmi_enable_port(encoder, pipe_config);
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_atomic_state *state,
@@ -262,8 +288,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
/*
* HW workaround, need to write this twice for issue
@@ -296,10 +320,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_atomic_state *state,
@@ -317,8 +337,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
/*
* WaEnableHDMI8bpcBefore12bpc:snb,ivb
@@ -351,10 +369,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_atomic_state *state,
@@ -362,11 +376,6 @@ static void vlv_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void intel_disable_hdmi(struct intel_atomic_state *state,
@@ -384,7 +393,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
- temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
+ temp &= ~SDVO_ENABLE;
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
@@ -433,8 +442,6 @@ static void g4x_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
@@ -443,7 +450,6 @@ static void pch_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
static void pch_post_disable_hdmi(struct intel_atomic_state *state,
@@ -750,6 +756,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
else
intel_encoder->enable = g4x_enable_hdmi;
}
+ intel_encoder->audio_enable = g4x_hdmi_audio_enable;
+ intel_encoder->audio_disable = g4x_hdmi_audio_disable;
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 7dc38ac02..611a7d6ef 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -193,7 +193,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!i915->params.enable_ips)
+ if (!i915->display.params.enable_ips)
return false;
if (crtc_state->pipe_bpp > 24)
@@ -329,7 +329,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- str_yes_no(i915->params.enable_ips));
+ str_yes_no(i915->display.params.enable_ips));
if (DISPLAY_VER(i915) >= 8) {
seq_puts(m, "Currently: unknown\n");
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index af0c79a4c..11ca9572e 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -608,7 +608,7 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
* crtc->state->active once we have proper CRTC states wired up
* for atomic.
*/
- return crtc && crtc->active && crtc->base.primary->state->fb &&
+ return crtc->active && crtc->base.primary->state->fb &&
crtc->config->hw.adjusted_mode.crtc_clock;
}
@@ -2477,7 +2477,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) <= 6)
+ if (DISPLAY_VER(dev_priv) < 7)
fifo_size /= 2;
}
@@ -2818,7 +2818,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = dev_priv->display.wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2961,7 +2961,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
@@ -2993,7 +2993,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
+ dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3060,7 +3060,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
+ if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 67143a0f5..ac456a227 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -330,7 +330,7 @@ static int afe_clk(struct intel_encoder *encoder,
int bpp;
if (crtc_state->dsc.compression_enable)
- bpp = crtc_state->dsc.compressed_bpp;
+ bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -860,7 +860,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* compressed and non-compressed bpp.
*/
if (crtc_state->dsc.compression_enable) {
- mul = crtc_state->dsc.compressed_bpp;
+ mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
}
@@ -884,7 +884,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
int bpp, line_time_us, byte_clk_period_ns;
if (crtc_state->dsc.compression_enable)
- bpp = crtc_state->dsc.compressed_bpp;
+ bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1458,8 +1458,8 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- if (pipe_config->dsc.compressed_bpp) {
- int div = pipe_config->dsc.compressed_bpp;
+ if (pipe_config->dsc.compressed_bpp_x16) {
+ int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
adjusted_mode->crtc_htotal =
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 5d18145da..ec0d5168b 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -331,9 +331,6 @@ void intel_atomic_state_free(struct drm_atomic_state *_state)
drm_atomic_state_default_release(&state->base);
kfree(state->global_objs);
-
- i915_sw_fence_fini(&state->commit_ready);
-
kfree(state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index b10743506..06c2455bd 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -31,7 +31,10 @@
* prepare/check/commit/cleanup steps.
*/
+#include <linux/dma-fence-chain.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
@@ -1012,6 +1015,41 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
+static int add_dma_resv_fences(struct dma_resv *resv,
+ struct drm_plane_state *new_plane_state)
+{
+ struct dma_fence *fence = dma_fence_get(new_plane_state->fence);
+ struct dma_fence *new;
+ int ret;
+
+ ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new);
+ if (ret)
+ goto error;
+
+ if (new && fence) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dma_fence_chain_init(chain, fence, new, 1);
+ fence = &chain->base;
+
+ } else if (new) {
+ fence = new;
+ }
+
+ dma_fence_put(new_plane_state->fence);
+ new_plane_state->fence = fence;
+ return 0;
+
+error:
+ dma_fence_put(fence);
+ return ret;
+}
+
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for
@@ -1035,7 +1073,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct intel_plane_state *old_plane_state =
+ struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
@@ -1058,55 +1096,28 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* can safely continue.
*/
if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- old_obj->base.resv,
- false, 0,
- GFP_KERNEL);
+ ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv,
+ &new_plane_state->uapi);
if (ret < 0)
return ret;
}
}
- if (new_plane_state->uapi.fence) { /* explicit fencing */
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
- ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
- new_plane_state->uapi.fence,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
if (!obj)
return 0;
-
ret = intel_plane_pin_fb(new_plane_state);
if (ret)
return ret;
- i915_gem_object_wait_priority(obj, 0, &attr);
+ ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi);
+ if (ret < 0)
+ goto unpin_fb;
- if (!new_plane_state->uapi.fence) { /* implicit fencing */
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
-
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- obj->base.resv, false,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- goto unpin_fb;
+ if (new_plane_state->uapi.fence) {
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
- dma_resv_iter_begin(&cursor, obj->base.resv,
- DMA_RESV_USAGE_WRITE);
- dma_resv_for_each_fence_unlocked(&cursor, fence) {
- intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
- }
- dma_resv_iter_end(&cursor);
- } else {
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence);
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 19605264a..07e0c7320 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/i915_component.h>
#include "i915_drv.h"
@@ -521,25 +522,25 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
unsigned int link_clks_available, link_clks_required;
unsigned int tu_data, tu_line, link_clks_active;
unsigned int h_active, h_total, hblank_delta, pixel_clk;
- unsigned int fec_coeff, cdclk, vdsc_bpp;
+ unsigned int fec_coeff, cdclk, vdsc_bppx16;
unsigned int link_clk, lanes;
unsigned int hblank_rise;
h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
- vdsc_bpp = crtc_state->dsc.compressed_bpp;
+ vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16;
cdclk = i915->display.cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
lanes = crtc_state->lane_count;
- drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
- "lanes = %u vdsc_bpp = %u cdclk = %u\n",
- h_active, link_clk, lanes, vdsc_bpp, cdclk);
+ drm_dbg_kms(&i915->drm,
+ "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n",
+ h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk);
- if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk))
+ if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk))
return 0;
link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28;
@@ -551,8 +552,8 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk),
mul_u32_u32(link_clk, cdclk));
- tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000),
- mul_u32_u32(link_clk * lanes, fec_coeff));
+ tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bppx16 * 8, 1000000),
+ mul_u32_u32(link_clk * lanes * 16, fec_coeff));
tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff),
mul_u32_u32(64 * pixel_clk, 1000000));
link_clks_active = (tu_line - 1) * 64 + tu_data;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 2e8f17c04..3f3cd944a 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -88,10 +88,10 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
- if (i915->params.invert_brightness < 0)
+ if (i915->display.params.invert_brightness < 0)
return val;
- if (i915->params.invert_brightness > 0 ||
+ if (i915->display.params.invert_brightness > 0 ||
intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -132,8 +132,9 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
drm_WARN_ON_ONCE(&i915->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
- if (i915->params.invert_brightness > 0 ||
- (i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
+ if (i915->display.params.invert_brightness > 0 ||
+ (i915->display.params.invert_brightness == 0 &&
+ intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -274,7 +275,7 @@ static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state,
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
static void
@@ -427,7 +428,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
intel_backlight_set_pwm_level(old_conn_state, level);
panel->backlight.pwm_state.enabled = false;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
@@ -749,7 +750,7 @@ static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
panel->backlight.pwm_state.enabled = true;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index e7ea28784..eb9835d48 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1116,7 +1116,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915->params.vbt_sdvo_panel_type;
+ index = i915->display.params.vbt_sdvo_panel_type;
if (index == -2) {
drm_dbg_kms(&i915->drm,
"Ignore SDVO panel mode from BIOS VBT tables.\n");
@@ -1514,9 +1514,9 @@ parse_edp(struct drm_i915_private *i915,
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915->params.edp_vswing) {
+ if (i915->display.params.edp_vswing) {
panel->vbt.edp.low_vswing =
- i915->params.edp_vswing == 1;
+ i915->display.params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
panel->vbt.edp.low_vswing = vswing == 0;
@@ -2232,6 +2232,9 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
const u8 *ddc_pin_map;
int i, n_entries;
+ if (IS_DGFX(i915))
+ return vbt_pin;
+
if (INTEL_PCH_TYPE(i915) >= PCH_LNL || HAS_PCH_MTP(i915) ||
IS_ALDERLAKE_P(i915)) {
ddc_pin_map = adlp_ddc_pin_map;
@@ -2239,8 +2242,6 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
} else if (IS_ALDERLAKE_S(i915)) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
- return vbt_pin;
} else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
@@ -2504,6 +2505,27 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
}
+static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+
+ if (!intel_bios_encoder_supports_dvi(devdata))
+ return;
+
+ /*
+ * Some BDW machines (eg. HP Pavilion 15-ab) shipped
+ * with a HSW VBT where the level shifter value goes
+ * up to 11, whereas the BDW max is 9.
+ */
+ if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) {
+ drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
+ port_name(port), devdata->child.hdmi_level_shifter_value, 9);
+
+ devdata->child.hdmi_level_shifter_value = 9;
+ }
+}
+
static bool
intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata)
{
@@ -2683,6 +2705,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
}
sanitize_device_type(devdata, port);
+ sanitize_hdmi_level_shift(devdata, port);
}
static bool has_ddi_port_info(struct drm_i915_private *i915)
@@ -3426,8 +3449,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
crtc_state->pipe_bpp = bpc * 3;
- crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
- VBT_DSC_MAX_BPP(dsc->max_bpp));
+ crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp)));
/*
* FIXME: This is ugly, and slice count should take DSC engine
@@ -3486,8 +3509,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!devdata->dsc)
return false;
- if (crtc_state)
- fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
+ fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index bef96db62..7f2a50b4f 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -87,7 +87,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000);
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ 1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -480,7 +481,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels)
+ if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -897,7 +898,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
unsigned int idx;
unsigned int max_data_rate;
- if (DISPLAY_VER(i915) > 11)
+ if (DISPLAY_VER(i915) >= 12)
idx = tgl_max_bw_index(i915, num_active_planes, i);
else
idx = icl_max_bw_index(i915, num_active_planes, i);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c4839c67c..7ba30d26f 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1180,7 +1180,7 @@ sanitize:
/* force cdclk programming */
dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->display.cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = ~0;
}
static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1446,50 +1446,77 @@ static u8 bxt_calc_voltage_level(int cdclk)
return DIV_ROUND_UP(cdclk, 25000);
}
+static u8 calc_voltage_level(int cdclk, int num_voltage_levels,
+ const int voltage_level_max_cdclk[])
+{
+ int voltage_level;
+
+ for (voltage_level = 0; voltage_level < num_voltage_levels; voltage_level++) {
+ if (cdclk <= voltage_level_max_cdclk[voltage_level])
+ return voltage_level;
+ }
+
+ MISSING_CASE(cdclk);
+ return num_voltage_levels - 1;
+}
+
static u8 icl_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int icl_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 556800,
+ [2] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(icl_voltage_level_max_cdclk),
+ icl_voltage_level_max_cdclk);
}
static u8 ehl_calc_voltage_level(int cdclk)
{
- if (cdclk > 326400)
- return 3;
- else if (cdclk > 312000)
- return 2;
- else if (cdclk > 180000)
- return 1;
- else
- return 0;
+ static const int ehl_voltage_level_max_cdclk[] = {
+ [0] = 180000,
+ [1] = 312000,
+ [2] = 326400,
+ /*
+ * Bspec lists the limit as 556.8 MHz, but some JSL
+ * development boards (at least) boot with 652.8 MHz
+ */
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(ehl_voltage_level_max_cdclk),
+ ehl_voltage_level_max_cdclk);
}
static u8 tgl_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 3;
- else if (cdclk > 326400)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int tgl_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 326400,
+ [2] = 556800,
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(tgl_voltage_level_max_cdclk),
+ tgl_voltage_level_max_cdclk);
}
static u8 rplu_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 3;
- else if (cdclk > 480000)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int rplu_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 480000,
+ [2] = 556800,
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(rplu_voltage_level_max_cdclk),
+ rplu_voltage_level_max_cdclk);
}
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
@@ -1800,6 +1827,8 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
+static const int cdclk_squash_len = 16;
+
static int cdclk_squash_divider(u16 waveform)
{
return hweight16(waveform ?: 0xffff);
@@ -1811,7 +1840,6 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
struct intel_cdclk_config *mid_cdclk_config)
{
u16 old_waveform, new_waveform, mid_waveform;
- int size = 16;
int div = 2;
/* Return if PLL is in an unknown state, force a complete disable and re-enable. */
@@ -1850,7 +1878,8 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
}
mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) *
- mid_cdclk_config->vco, size * div);
+ mid_cdclk_config->vco,
+ cdclk_squash_len * div);
/* make sure the mid clock came out sane */
@@ -1878,9 +1907,9 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- u32 val;
+ int unsquashed_cdclk;
u16 waveform;
- int clock;
+ u32 val;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
@@ -1897,15 +1926,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
waveform = cdclk_squash_waveform(dev_priv, cdclk);
- if (waveform)
- clock = vco / 2;
- else
- clock = cdclk;
+ unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
+ cdclk_squash_divider(waveform));
if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
- val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) |
+ val = bxt_cdclk_cd2x_div_sel(dev_priv, unsquashed_cdclk, vco) |
bxt_cdclk_cd2x_pipe(dev_priv, pipe);
/*
@@ -2075,7 +2102,7 @@ sanitize:
dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->display.cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = ~0;
}
static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -2485,7 +2512,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ struct intel_cdclk_config cdclk_config;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2494,12 +2522,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
- if (pipe == INVALID_PIPE ||
- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (new_cdclk_state->disable_pipes) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ } else {
+ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = new_cdclk_state->pipe;
+ } else {
+ cdclk_config = old_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ }
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+ old_cdclk_state->actual.voltage_level);
}
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &cdclk_config, pipe);
}
/**
@@ -2517,7 +2558,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2526,12 +2567,15 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
- if (pipe != INVALID_PIPE &&
- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (!new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+ pipe = new_cdclk_state->pipe;
+ else
+ pipe = INVALID_PIPE;
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
- }
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2597,9 +2641,10 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
* Since PPC = 2 with bigjoiner
* => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
*/
- int bigjoiner_interface_bits = DISPLAY_VER(i915) > 13 ? 36 : 24;
- int min_cdclk_bj = (crtc_state->dsc.compressed_bpp * pixel_clock) /
- (2 * bigjoiner_interface_bits);
+ int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
+ int min_cdclk_bj =
+ (to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
+ pixel_clock) / (2 * bigjoiner_interface_bits);
min_cdclk = max(min_cdclk, min_cdclk_bj);
}
@@ -3008,6 +3053,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
+ cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@@ -3186,6 +3232,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_cdclk_state->disable_pipes = true;
+
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
@@ -3488,7 +3536,7 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
- .calc_voltage_level = tgl_calc_voltage_level,
+ .calc_voltage_level = rplu_calc_voltage_level,
};
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 48fd7d39e..71bc032bf 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -51,6 +51,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 1d26be54d..c5092b7e8 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -785,14 +785,12 @@ static void chv_assign_csc(struct intel_crtc_state *crtc_state)
/* convert hw value with given bit_precision to lut property val */
static u32 intel_color_lut_pack(u32 val, int bit_precision)
{
- u32 max = 0xffff >> (16 - bit_precision);
-
- val = clamp_val(val, 0, max);
-
- if (bit_precision < 16)
- val <<= 16 - bit_precision;
-
- return val;
+ if (bit_precision > 16)
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(val, (1 << 16) - 1),
+ (1 << bit_precision) - 1);
+ else
+ return DIV_ROUND_CLOSEST(val * ((1 << 16) - 1),
+ (1 << bit_precision) - 1);
}
static u32 i9xx_lut_8(const struct drm_color_lut *color)
@@ -911,7 +909,7 @@ static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static u16 i965_lut_11p6_max_pack(u32 val)
{
/* PIPEGCMAX is 11.6, clamp to 10.6 */
- return clamp_val(val, 0, 0xffff);
+ return min(val, 0xffffu);
}
static u32 ilk_lut_10(const struct drm_color_lut *color)
@@ -1528,14 +1526,27 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915)
return 35;
}
-/*
- * change_lut_val_precision: helper function to upscale or downscale lut values.
- * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient
- * as currently there are no lut values exceeding 32 bit.
- */
-static u32 change_lut_val_precision(u32 lut_val, int to, int from)
+static u32 glk_degamma_lut(const struct drm_color_lut *color)
+{
+ return color->green;
+}
+
+static void glk_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
+{
+ /* PRE_CSC_GAMC_DATA is 3.16, clamp to 0.16 */
+ entry->red = entry->green = entry->blue = min(val, 0xffffu);
+}
+
+static u32 mtl_degamma_lut(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 24);
+}
+
+static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
{
- return mul_u32_u32(lut_val, (1 << to)) / (1 << from);
+ /* PRE_CSC_GAMC_DATA is 3.24, clamp to 0.16 */
+ entry->red = entry->green = entry->blue =
+ intel_color_lut_pack(min(val, 0xffffffu), 24);
}
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
@@ -1572,20 +1583,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- u32 lut_val;
-
- if (DISPLAY_VER(i915) >= 14)
- lut_val = change_lut_val_precision(lut[i].green, 24, 16);
- else
- lut_val = lut[i].green;
-
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- lut_val);
+ DISPLAY_VER(i915) >= 14 ?
+ mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
}
/* Clamp values > 1.0. */
while (i++ < glk_degamma_lut_size(i915))
- ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ DISPLAY_VER(i915) >= 14 ?
+ 1 << 24 : 1 << 16);
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
@@ -3572,17 +3579,10 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
for (i = 0; i < lut_size; i++) {
u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
- /*
- * For MTL and beyond, convert back the 24 bit lut values
- * read from HW to 16 bit values to maintain parity with
- * userspace values
- */
if (DISPLAY_VER(dev_priv) >= 14)
- val = change_lut_val_precision(val, 16, 24);
-
- lut[i].red = val;
- lut[i].green = val;
- lut[i].blue = val;
+ mtl_degamma_lut_pack(&lut[i], val);
+ else
+ glk_degamma_lut_pack(&lut[i], val);
}
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 6f6b348b8..abaacea5c 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -846,7 +846,7 @@ intel_crt_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
- if (dev_priv->params.load_detect_test) {
+ if (dev_priv->display.params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
goto load_detect;
@@ -906,7 +906,7 @@ load_detect:
else if (DISPLAY_VER(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
- else if (dev_priv->params.load_detect_test)
+ else if (dev_priv->display.params.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 1fd068e6e..8a84a31c7 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -553,8 +553,15 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
intel_psr_lock(new_crtc_state);
- if (new_crtc_state->do_async_flip)
+ if (new_crtc_state->do_async_flip) {
+ spin_lock_irq(&crtc->base.dev->event_lock);
+ /* arm the event for the flip done irq handler */
+ crtc->flip_done_event = new_crtc_state->uapi.event;
+ spin_unlock_irq(&crtc->base.dev->event_lock);
+
+ new_crtc_state->uapi.event = NULL;
return;
+ }
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 66fe880af..49fd100ec 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
@@ -261,6 +262,15 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n",
str_enabled_disabled(pipe_config->fec_enable),
str_enabled_disabled(pipe_config->enhanced_framing));
+
+ drm_dbg_kms(&i915->drm, "sdp split: %s\n",
+ str_enabled_disabled(pipe_config->sdp_split_enable));
+
+ drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
+ str_enabled_disabled(pipe_config->has_psr),
+ str_enabled_disabled(pipe_config->has_psr2),
+ str_enabled_disabled(pipe_config->has_panel_replay),
+ str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
}
drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index b342fad18..2dde7ac88 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -21,8 +21,11 @@
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "skl_watermark.h"
+#include "gem/i915_gem_object.h"
+
/* Cursor formats */
static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
@@ -32,12 +35,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
- base = sg_dma_address(obj->mm.pages->sgl);
+ base = plane_state->phys_dma_addr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -484,6 +485,35 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return 0;
}
+static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
+}
+
+static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
+ else
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+}
+
/* TODO: split into noarm+arm pair */
static void i9xx_cursor_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -531,10 +561,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
skl_write_cursor_wm(plane, crtc_state);
if (plane_state)
- intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state,
- plane_state);
+ i9xx_cursor_update_sel_fetch_arm(plane, crtc_state,
+ plane_state);
else
- intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index ccf225afe..6b25e1952 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -31,7 +31,7 @@
bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
{
- if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0) && phy < PHY_C)
+ if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
return true;
return false;
@@ -206,6 +206,13 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
intel_clear_response_ready_flag(i915, port, lane);
+ /*
+ * FIXME: Workaround to let HW to settle
+ * down and let the message bus to end up
+ * in a known state
+ */
+ intel_cx0_bus_reset(i915, port, lane);
+
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
}
@@ -285,6 +292,13 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
intel_clear_response_ready_flag(i915, port, lane);
+ /*
+ * FIXME: Workaround to let HW to settle
+ * down and let the message bus to end up
+ * in a known state
+ */
+ intel_cx0_bus_reset(i915, port, lane);
+
return 0;
}
@@ -401,9 +415,15 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(i915, encoder->port);
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ u8 owned_lane_mask;
intel_wakeref_t wakeref;
int n_entries, ln;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -725,7 +745,6 @@ static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = {
/* C20 basic DP 1.4 tables */
static const struct intel_c20pll_state mtl_c20_dp_rbr = {
- .link_bit_rate = 162000,
.clock = 162000,
.tx = { 0xbe88, /* tx cfg0 */
0x5800, /* tx cfg1 */
@@ -751,7 +770,6 @@ static const struct intel_c20pll_state mtl_c20_dp_rbr = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
- .link_bit_rate = 270000,
.clock = 270000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -777,7 +795,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
- .link_bit_rate = 540000,
.clock = 540000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -803,7 +820,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
- .link_bit_rate = 810000,
.clock = 810000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -830,8 +846,7 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
/* C20 basic DP 2.0 tables */
static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
- .link_bit_rate = 1000000, /* 10 Gbps */
- .clock = 312500,
+ .clock = 1000000, /* 10 Gbps */
.tx = { 0xbe21, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -855,8 +870,7 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
- .link_bit_rate = 1350000, /* 13.5 Gbps */
- .clock = 421875,
+ .clock = 1350000, /* 13.5 Gbps */
.tx = { 0xbea0, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -881,8 +895,7 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
- .link_bit_rate = 2000000, /* 20 Gbps */
- .clock = 625000,
+ .clock = 2000000, /* 20 Gbps */
.tx = { 0xbe20, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1501,7 +1514,6 @@ static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
- .link_bit_rate = 25175,
.clock = 25175,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1527,7 +1539,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
- .link_bit_rate = 27000,
.clock = 27000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1553,7 +1564,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
- .link_bit_rate = 74250,
.clock = 74250,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1579,7 +1589,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
- .link_bit_rate = 148500,
.clock = 148500,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1605,7 +1614,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
- .link_bit_rate = 594000,
.clock = 594000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1631,8 +1639,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
- .link_bit_rate = 3000000,
- .clock = 166670,
+ .clock = 3000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1657,8 +1664,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
- .link_bit_rate = 6000000,
- .clock = 333330,
+ .clock = 6000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1683,8 +1689,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
- .link_bit_rate = 8000000,
- .clock = 444440,
+ .clock = 8000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1709,8 +1714,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
- .link_bit_rate = 10000000,
- .clock = 555560,
+ .clock = 10000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1735,8 +1739,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
- .link_bit_rate = 12000000,
- .clock = 666670,
+ .clock = 12000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1850,8 +1853,8 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
-void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c10pll_state *pll_state)
+static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c10pll_state *pll_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 lane = INTEL_CX0_LANE0;
@@ -1985,7 +1988,6 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_
else
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0;
- pll_state->link_bit_rate = pixel_clock;
pll_state->clock = pixel_clock;
pll_state->tx[0] = 0xbe88;
pll_state->tx[1] = 0x9800;
@@ -2022,7 +2024,7 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock)
int i;
for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->link_bit_rate)
+ if (clock == tables[i]->clock)
return MODE_OK;
}
@@ -2074,7 +2076,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
return -EINVAL;
for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock == tables[i]->link_bit_rate) {
+ if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->cx0pll_state.c20 = *tables[i];
return 0;
}
@@ -2097,14 +2099,14 @@ int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
static bool intel_c20_use_mplla(u32 clock)
{
/* 10G and 20G rates use MPLLA */
- if (clock == 312500 || clock == 625000)
+ if (clock == 1000000 || clock == 2000000)
return true;
return false;
}
-void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c20pll_state *pll_state)
+static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c20pll_state *pll_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool cntx;
@@ -2200,11 +2202,11 @@ static u8 intel_c20_get_dp_rate(u32 clock)
return 6;
case 432000: /* 4.32 Gbps eDP */
return 7;
- case 312500: /* 10 Gbps DP2.0 */
+ case 1000000: /* 10 Gbps DP2.0 */
return 8;
- case 421875: /* 13.5 Gbps DP2.0 */
+ case 1350000: /* 13.5 Gbps DP2.0 */
return 9;
- case 625000: /* 20 Gbps DP2.0*/
+ case 2000000: /* 20 Gbps DP2.0 */
return 10;
case 648000: /* 6.48 Gbps eDP*/
return 11;
@@ -2222,13 +2224,13 @@ static u8 intel_c20_get_hdmi_rate(u32 clock)
return 0;
switch (clock) {
- case 166670: /* 3 Gbps */
- case 333330: /* 6 Gbps */
- case 666670: /* 12 Gbps */
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 1200000: /* 12 Gbps */
return 1;
- case 444440: /* 8 Gbps */
+ case 800000: /* 8 Gbps */
return 2;
- case 555560: /* 10 Gbps */
+ case 1000000: /* 10 Gbps */
return 3;
default:
MISSING_CASE(clock);
@@ -2239,7 +2241,7 @@ static u8 intel_c20_get_hdmi_rate(u32 clock)
static bool is_dp2(u32 clock)
{
/* DP2.0 clock rates */
- if (clock == 312500 || clock == 421875 || clock == 625000)
+ if (clock == 1000000 || clock == 1350000 || clock == 2000000)
return true;
return false;
@@ -2248,11 +2250,11 @@ static bool is_dp2(u32 clock)
static bool is_hdmi_frl(u32 clock)
{
switch (clock) {
- case 166670: /* 3 Gbps */
- case 333330: /* 6 Gbps */
- case 444440: /* 8 Gbps */
- case 555560: /* 10 Gbps */
- case 666670: /* 12 Gbps */
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 800000: /* 8 Gbps */
+ case 1000000: /* 10 Gbps */
+ case 1200000: /* 12 Gbps */
return true;
default:
return false;
@@ -2285,6 +2287,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20;
bool dp = false;
int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
+ u32 clock = crtc_state->port_clock;
bool cntx;
int i;
@@ -2323,7 +2326,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
}
/* 3.3 mpllb or mplla configuration */
- if (intel_c20_use_mplla(pll_state->clock)) {
+ if (intel_c20_use_mplla(clock)) {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
@@ -2350,23 +2353,23 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
/* 4. Program custom width to match the link protocol */
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
PHY_C20_CUSTOM_WIDTH_MASK,
- PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(pll_state->clock, dp)),
+ PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
if (dp) {
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
- BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(pll_state->clock)),
+ BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
MB_WRITE_COMMITTED);
} else {
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
- is_hdmi_frl(pll_state->clock) ? BIT(7) : 0,
+ is_hdmi_frl(clock) ? BIT(7) : 0,
MB_WRITE_COMMITTED);
intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
- intel_c20_get_hdmi_rate(pll_state->clock),
+ intel_c20_get_hdmi_rate(clock),
MB_WRITE_COMMITTED);
}
@@ -2378,8 +2381,8 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
}
-int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c10pll_state *pll_state)
+static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state)
{
unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
@@ -2405,8 +2408,8 @@ int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
return tmpclk;
}
-int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state)
+static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state)
{
unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
unsigned int multiplier, refclk = 38400;
@@ -3004,17 +3007,115 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
return ICL_PORT_DPLL_DEFAULT;
}
-void intel_c10pll_state_verify(struct intel_atomic_state *state,
+static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder,
+ struct intel_c10pll_state *mpllb_hw_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
+ u8 expected = mpllb_sw_state->pll[i];
+
+ I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name, i,
+ expected, mpllb_hw_state->pll[i]);
+ }
+
+ I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->tx, mpllb_hw_state->tx);
+
+ I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->cmn, mpllb_hw_state->cmn);
+}
+
+void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_is_c10phy(i915, phy))
+ intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
+ else
+ intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
+}
+
+int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_cx0pll_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_is_c10phy(i915, phy))
+ return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
+
+ return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
+}
+
+static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder,
+ struct intel_c20pll_state *mpll_hw_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
+ bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
+ bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
+ int i;
+
+ I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
+ "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ sw_use_mpllb, hw_use_mpllb);
+
+ if (hw_use_mpllb) {
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i],
+ "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->tx[i], mpll_hw_state->tx[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i],
+ "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]);
+ }
+}
+
+void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_c10pll_state mpllb_hw_state = {};
- const struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10;
struct intel_encoder *encoder;
+ struct intel_cx0pll_state mpll_hw_state = {};
enum phy phy;
- int i;
if (DISPLAY_VER(i915) < 14)
return;
@@ -3030,27 +3131,13 @@ void intel_c10pll_state_verify(struct intel_atomic_state *state,
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
phy = intel_port_to_phy(i915, encoder->port);
- if (!intel_is_c10phy(i915, phy))
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
- intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state);
+ intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
- for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
- u8 expected = mpllb_sw_state->pll[i];
-
- I915_STATE_WARN(i915, mpllb_hw_state.pll[i] != expected,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name, i,
- expected, mpllb_hw_state.pll[i]);
- }
-
- I915_STATE_WARN(i915, mpllb_hw_state.tx != mpllb_sw_state->tx,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name,
- mpllb_sw_state->tx, mpllb_hw_state.tx);
-
- I915_STATE_WARN(i915, mpllb_hw_state.cmn != mpllb_sw_state->cmn,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name,
- mpllb_sw_state->cmn, mpllb_hw_state.cmn);
+ if (intel_is_c10phy(i915, phy))
+ intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
+ else
+ intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index 0e0a38dac..c66826772 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -16,6 +16,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_c10pll_state;
struct intel_c20pll_state;
+struct intel_cx0pll_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
@@ -28,20 +29,19 @@ void intel_mtl_pll_disable(struct intel_encoder *encoder);
enum icl_port_dpll_id
intel_mtl_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state);
+
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder);
+void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state);
+int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_cx0pll_state *pll_state);
+
void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_c10pll_state *hw_state);
-int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c10pll_state *pll_state);
-void intel_c10pll_state_verify(struct intel_atomic_state *state,
+void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c20pll_state *pll_state);
void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
const struct intel_c20pll_state *hw_state);
-int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9151d5add..31aa5d54f 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -25,6 +25,7 @@
*
*/
+#include <linux/iopoll.h>
#include <linux/string_helpers.h>
#include <drm/display/drm_scdc_helper.h>
@@ -2210,16 +2211,87 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!crtc_state->fec_enable)
return;
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
- drm_dbg_kms(&i915->drm,
- "Failed to set FEC_READY in the sink\n");
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION,
+ enable ? DP_FEC_READY : 0) <= 0)
+ drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n",
+ enable ? "enabled" : "disabled");
+
+ if (enable &&
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS,
+ DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0)
+ drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n");
+}
+
+static int read_fec_detected_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
+{
+ struct drm_i915_private *i915 = to_i915(aux->drm_dev);
+ int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
+ int status;
+ int err;
+
+ err = readx_poll_timeout(read_fec_detected_status, aux, status,
+ status & mask || status < 0,
+ 10000, 200000);
+
+ if (!err && status >= 0)
+ return;
+
+ if (err == -ETIMEDOUT)
+ drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n",
+ str_enabled_disabled(enabled));
+ else
+ drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status);
+}
+
+void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool enabled)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int ret;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (enabled)
+ ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ else
+ ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+
+ if (ret)
+ drm_err(&i915->drm,
+ "Timeout waiting for FEC live state to get %s\n",
+ str_enabled_disabled(enabled));
+
+ /*
+ * At least the Synoptics MST hub doesn't set the detected flag for
+ * FEC decoding disabling so skip waiting for that.
+ */
+ if (enabled)
+ wait_for_fec_detected(&intel_dp->aux, enabled);
}
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
@@ -2234,8 +2306,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
0, DP_TP_CTL_FEC_ENABLE);
}
-static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_ddi_disable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -2466,13 +2538,17 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
+
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
* in the FEC_CONFIGURATION register to 1 before initiating link
* training
*/
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_check_frl_training(intel_dp);
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
@@ -2505,7 +2581,8 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 6.o Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ if (!is_mst)
+ intel_dsc_dp_pps_write(encoder, crtc_state);
}
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2616,13 +2693,16 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
* in the FEC_CONFIGURATION register to 1 before initiating link
* training
*/
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_check_frl_training(intel_dp);
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
@@ -2643,7 +2723,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ if (!is_mst)
+ intel_dsc_dp_pps_write(encoder, crtc_state);
}
static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2695,9 +2776,11 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
- true);
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(intel_dp, crtc_state);
if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
@@ -2705,10 +2788,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_fec(encoder, crtc_state);
- if (!is_mst)
+ if (!is_mst) {
intel_ddi_enable_transcoder_clock(encoder, crtc_state);
-
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+ }
}
static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2717,10 +2800,15 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (HAS_DP20(dev_priv))
+ if (HAS_DP20(dev_priv)) {
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
+ if (crtc_state->has_panel_replay)
+ drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
+ DP_PANEL_REPLAY_ENABLE);
+ }
if (DISPLAY_VER(dev_priv) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2866,8 +2954,7 @@ static void disable_ddi_buf(struct intel_encoder *encoder,
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
- /* Disable FEC in DP Sink */
- intel_ddi_disable_fec_state(encoder, crtc_state);
+ intel_ddi_disable_fec(encoder, crtc_state);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -2882,10 +2969,12 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
mtl_disable_ddi_buf(encoder, crtc_state);
/* 3.f Disable DP_TP_CTL FEC Enable if it is needed */
- intel_ddi_disable_fec_state(encoder, crtc_state);
+ intel_ddi_disable_fec(encoder, crtc_state);
} else {
disable_ddi_buf(encoder, crtc_state);
}
+
+ intel_ddi_wait_for_fec_status(encoder, crtc_state, false);
}
static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
@@ -2925,6 +3014,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_disable_ddi_buf(encoder, old_crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
+
/*
* From TGL spec: "If single stream or multi-stream master transcoder:
* Configure Transcoder Clock select to direct no clock to the
@@ -3110,11 +3201,18 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp))
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_audio_codec_enable(encoder, crtc_state, conn_state);
-
trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
+/* FIXME bad home for this function */
+i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ return DISPLAY_VER(i915) >= 14 ?
+ MTL_CHICKEN_TRANS(cpu_transcoder) :
+ CHICKEN_TRANS(cpu_transcoder);
+}
+
static i915_reg_t
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
enum port port)
@@ -3233,8 +3331,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
intel_wait_ddi_buf_active(dev_priv, port);
-
- intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
static void intel_enable_ddi(struct intel_atomic_state *state,
@@ -3252,6 +3348,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_enable_transcoder(crtc_state);
+ intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
+
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -3259,10 +3357,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
else
intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
- /* Enable hdcp if it's desired */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+ intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+
}
static void intel_disable_ddi_dp(struct intel_atomic_state *state,
@@ -3271,16 +3367,16 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->connector);
intel_dp->link_trained = false;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
- intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
- false);
+ intel_dp_sink_disable_decompression(state,
+ connector, old_crtc_state);
/* Disable Ignore_MSA bit in DP Sink */
intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
false);
@@ -3294,8 +3390,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
drm_dbg_kms(&i915->drm,
@@ -3578,16 +3672,42 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state)
+static int tgl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
{
- if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 2;
- else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
- crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 3;
- else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 1;
+ if (crtc_state->port_clock > 594000)
+ return 2;
+ else
+ return 0;
+}
+
+static int jsl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->port_clock > 594000)
+ return 3;
+ else
+ return 0;
+}
+
+static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->port_clock > 594000)
+ return 1;
+ else
+ return 0;
+}
+
+void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
@@ -3801,7 +3921,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
@@ -3854,18 +3974,13 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
- } else if (intel_is_c10phy(i915, phy)) {
- intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10);
- crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
} else {
- intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20);
- crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
+ intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
}
intel_ddi_get_config(encoder, crtc_state);
@@ -4086,7 +4201,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
return 0;
}
@@ -4114,7 +4229,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
+ /*
+ * FIXME the modeset sequence is currently wrong and
+ * can't deal with bigjoiner + port sync at the same time.
+ */
return crtc_state1->hw.active && crtc_state2->hw.active &&
+ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&
@@ -4844,6 +4964,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
encoder->post_pll_disable = intel_ddi_post_pll_disable;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->audio_enable = intel_audio_codec_enable;
+ encoder->audio_disable = intel_audio_codec_disable;
encoder->get_hw_state = intel_ddi_get_hw_state;
encoder->sync_state = intel_ddi_sync_state;
encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 4999c0ee2..434de7196 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -27,6 +27,8 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder);
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -60,13 +62,15 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool enabled);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state);
+void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state);
int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index df582ff81..b10aad15a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -48,6 +48,7 @@
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "hsw_ips.h"
+#include "i915_config.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -72,10 +73,10 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
-#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
+#include "intel_dpt_common.h"
#include "intel_drrs.h"
#include "intel_dsb.h"
#include "intel_dsi.h"
@@ -193,12 +194,9 @@ static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
- if (enable)
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
- 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
- else
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
- DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DUPS1_GATING_DIS | DUPS2_GATING_DIS,
+ enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
}
/* Wa_2006604312:icl,ehl */
@@ -206,10 +204,9 @@ static void
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
- if (enable)
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
- else
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DPFR_GATING_DIS,
+ enable ? DPFR_GATING_DIS : 0);
}
/* Wa_1604331009:icl,jsl,ehl */
@@ -217,7 +214,8 @@ static void
icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ CURSOR_GATING_DIS,
enable ? CURSOR_GATING_DIS : 0);
}
@@ -397,7 +395,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
u32 val;
drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
@@ -430,16 +427,16 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- reg = TRANSCONF(cpu_transcoder);
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
+ val | TRANSCONF_ENABLE);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
@@ -458,7 +455,6 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
u32 val;
drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
@@ -469,8 +465,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- reg = TRANSCONF(cpu_transcoder);
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if ((val & TRANSCONF_ENABLE) == 0)
return;
@@ -485,14 +480,12 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~TRANSCONF_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
- FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
- else if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
- intel_de_write(dev_priv, reg, val);
if ((val & TRANSCONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -896,6 +889,48 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
}
+static void intel_encoders_audio_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->audio_enable)
+ encoder->audio_enable(encoder, crtc_state, conn_state);
+ }
+}
+
+static void intel_encoders_audio_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->audio_disable)
+ encoder->audio_disable(encoder, old_crtc_state, old_conn_state);
+ }
+}
+
#define is_enabling(feature, old_crtc_state, new_crtc_state) \
((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
(new_crtc_state)->feature)
@@ -955,6 +990,28 @@ static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
vrr_params_changed(old_crtc_state, new_crtc_state)));
}
+static bool audio_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(has_audio, old_crtc_state, new_crtc_state) ||
+ (new_crtc_state->has_audio &&
+ memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
+}
+
+static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!old_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(has_audio, old_crtc_state, new_crtc_state) ||
+ (old_crtc_state->has_audio &&
+ memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
+}
+
#undef is_disabling
#undef is_enabling
@@ -995,6 +1052,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_post_update(new_crtc_state);
+
+ if (audio_enabling(old_crtc_state, new_crtc_state))
+ intel_encoders_audio_enable(state, crtc);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -1078,6 +1138,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_crtc_update_active_timings(old_crtc_state, false);
}
+ if (audio_disabling(old_crtc_state, new_crtc_state))
+ intel_encoders_audio_disable(state, crtc);
+
intel_drrs_deactivate(old_crtc_state);
intel_psr_pre_plane_update(state, crtc);
@@ -1513,12 +1576,9 @@ static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder transcoder = crtc_state->cpu_transcoder;
- i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
- CHICKEN_TRANS(transcoder);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_de_rmw(dev_priv, reg,
+ intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
HSW_FRAME_START_DELAY_MASK,
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
@@ -1796,31 +1856,31 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
+ /*
+ * DG2's "TC1", although TC-capable output, doesn't share the same flow
+ * as other platforms on the display engine side and rather rely on the
+ * SNPS PHY, that is programmed separately
+ */
if (IS_DG2(dev_priv))
- /* DG2's "TC1" output uses a SNPS PHY */
return false;
- else if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0))
+
+ if (DISPLAY_VER(dev_priv) >= 13)
return phy >= PHY_F && phy <= PHY_I;
else if (IS_TIGERLAKE(dev_priv))
return phy >= PHY_D && phy <= PHY_I;
else if (IS_ICELAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
- else
- return false;
+
+ return false;
}
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (phy == PHY_NONE)
- return false;
- else if (IS_DG2(dev_priv))
- /*
- * All four "combo" ports and the TC1 port (PHY E) use
- * Synopsis PHYs.
- */
- return phy <= PHY_E;
-
- return false;
+ /*
+ * For DG2, and for DG2 only, all four "combo" ports and the TC1 port
+ * (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
+ */
+ return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
}
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
@@ -2409,15 +2469,15 @@ static void compute_m_n(u32 *ret_m, u32 *ret_n,
}
void
-intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool fec_enable)
+ int bw_overhead,
+ struct intel_link_m_n *m_n)
{
- u32 data_clock = bits_per_pixel * pixel_clock;
-
- if (fec_enable)
- data_clock = intel_dp_mode_to_fec_clock(data_clock);
+ u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
+ u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
+ bw_overhead);
+ u32 data_n = intel_dp_max_data_rate(link_clock, nlanes);
/*
* Windows/BIOS uses fixed M/N values always. Follow suit.
@@ -2428,11 +2488,11 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
*/
m_n->tu = 64;
compute_m_n(&m_n->data_m, &m_n->data_n,
- data_clock, link_clock * nlanes * 8,
+ data_m, data_n,
0x8000000);
compute_m_n(&m_n->link_m, &m_n->link_n,
- pixel_clock, link_clock,
+ pixel_clock, link_symbol_clock,
0x80000);
}
@@ -2567,7 +2627,7 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
crtc_vblank_start = 1;
}
- if (DISPLAY_VER(dev_priv) > 3)
+ if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
vsyncshift);
@@ -2850,67 +2910,6 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
intel_de_read(dev_priv, PFIT_PGM_RATIOS);
}
-static void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- struct dpll clock;
- u32 mdiv;
- int refclk = 100000;
-
- /* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- vlv_dpio_get(dev_priv);
- mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
- vlv_dpio_put(dev_priv);
-
- clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
- clock.m2 = mdiv & DPIO_M2DIV_MASK;
- clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
- clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
- clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
-
- pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
-}
-
-static void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- struct dpll clock;
- u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
- int refclk = 100000;
-
- /* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
- pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
- pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
- pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
- pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
- vlv_dpio_put(dev_priv);
-
- clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = (pll_dw0 & 0xff) << 22;
- if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
- clock.m2 |= pll_dw2 & 0x3fffff;
- clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
- clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
- clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
-
- pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
-}
-
static enum intel_output_format
bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
@@ -3168,7 +3167,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
- if (DISPLAY_VER(dev_priv) > 12)
+ if (DISPLAY_VER(dev_priv) >= 13)
val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
@@ -3225,7 +3224,7 @@ int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
* MIPI DSI HW readout.
*/
case PIPE_MISC_BPC_12_ADLP:
- if (DISPLAY_VER(dev_priv) > 12)
+ if (DISPLAY_VER(dev_priv) >= 13)
return 36;
fallthrough;
default:
@@ -3802,9 +3801,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
- MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
- CHICKEN_TRANS(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -3833,133 +3830,27 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
return true;
}
-static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
-
- if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev_priv))
- return 120000;
- else if (DISPLAY_VER(dev_priv) != 2)
- return 96000;
- else
- return 48000;
-}
-
-/* Returns the clock of the currently programmed mode of the given pipe. */
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
- u32 fp;
- struct dpll clock;
- int port_clock;
- int refclk = i9xx_pll_refclk(dev, pipe_config);
-
- if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = pipe_config->dpll_hw_state.fp0;
- else
- fp = pipe_config->dpll_hw_state.fp1;
-
- clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev_priv)) {
- clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
- clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
- } else {
- clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
- clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
- }
-
- if (DISPLAY_VER(dev_priv) != 2) {
- if (IS_PINEVIEW(dev_priv))
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
- else
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT);
-
- switch (dpll & DPLL_MODE_MASK) {
- case DPLLB_MODE_DAC_SERIAL:
- clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
- 5 : 10;
- break;
- case DPLLB_MODE_LVDS:
- clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
- 7 : 14;
- break;
- default:
- drm_dbg_kms(&dev_priv->drm,
- "Unknown DPLL mode %08x in programmed "
- "mode\n", (int)(dpll & DPLL_MODE_MASK));
- return;
- }
-
- if (IS_PINEVIEW(dev_priv))
- port_clock = pnv_calc_dpll_params(refclk, &clock);
- else
- port_clock = i9xx_calc_dpll_params(refclk, &clock);
- } else {
- enum pipe lvds_pipe;
-
- if (IS_I85X(dev_priv) &&
- intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
- lvds_pipe == crtc->pipe) {
- u32 lvds = intel_de_read(dev_priv, LVDS);
-
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT);
-
- if (lvds & LVDS_CLKB_POWER_UP)
- clock.p2 = 7;
- else
- clock.p2 = 14;
- } else {
- if (dpll & PLL_P1_DIVIDE_BY_TWO)
- clock.p1 = 2;
- else {
- clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
- }
- if (dpll & PLL_P2_DIVIDE_BY_4)
- clock.p2 = 4;
- else
- clock.p2 = 2;
- }
-
- port_clock = i9xx_calc_dpll_params(refclk, &clock);
- }
-
- /*
- * This value includes pixel_multiplier. We will use
- * port_clock to compute adjusted_mode.crtc_clock in the
- * encoder's get_config() function.
- */
- pipe_config->port_clock = port_clock;
-}
-
int intel_dotclock_calculate(int link_freq,
const struct intel_link_m_n *m_n)
{
/*
- * The calculation for the data clock is:
+ * The calculation for the data clock -> pixel clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
* But we want to avoid losing precison if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
- * and the link clock is simpler:
- * link_clock = (m * link_clock) / n
+ * and for link freq (10kbs units) -> pixel clock it is:
+ * link_symbol_clock = link_freq * 10 / link_symbol_size
+ * pixel_clock = (m * link_symbol_clock) / n
+ * or for more precision:
+ * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size)
*/
if (!m_n->link_n)
return 0;
- return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
- m_n->link_n);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10),
+ m_n->link_n * intel_dp_link_symbol_size(link_freq));
}
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
@@ -4691,6 +4582,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret)
return ret;
+ crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) {
@@ -5031,6 +4923,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name, \
@@ -5041,6 +4935,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name & (mask), \
@@ -5051,6 +4947,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected %i, found %i)", \
current_config->name, \
@@ -5061,6 +4959,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
+ __stringify(name) " is not bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
str_yes_no(current_config->name), \
@@ -5069,23 +4969,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-/*
- * Checks state where we only read out the enabling, but not the entire
- * state itself (like full infoframes or ELD for audio). These states
- * require a full modeset on bootup to fix up.
- */
-#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
- if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
- PIPE_CONF_CHECK_BOOL(name); \
- } else { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
- str_yes_no(current_config->name), \
- str_yes_no(pipe_config->name)); \
- ret = false; \
- } \
-} while (0)
-
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
@@ -5216,8 +5099,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
- PIPE_CONF_CHECK_I(hw.enable);
- PIPE_CONF_CHECK_I(hw.active);
+ PIPE_CONF_CHECK_BOOL(hw.enable);
+ PIPE_CONF_CHECK_BOOL(hw.active);
PIPE_CONF_CHECK_I(cpu_transcoder);
PIPE_CONF_CHECK_I(mst_master_transcoder);
@@ -5273,8 +5156,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(enhanced_framing);
PIPE_CONF_CHECK_BOOL(fec_enable);
- PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
- PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
+ if (!fastset) {
+ PIPE_CONF_CHECK_BOOL(has_audio);
+ PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
+ }
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -5424,9 +5309,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset);
PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
- PIPE_CONF_CHECK_I(dsc.compression_enable);
- PIPE_CONF_CHECK_I(dsc.dsc_split);
- PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+ PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
+ PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
PIPE_CONF_CHECK_I(splitter.link_count);
@@ -5444,7 +5329,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
-#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_COLOR_LUT
@@ -5535,6 +5419,16 @@ int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
return 0;
}
+static void
+intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->uapi.mode_changed = true;
+
+ crtc_state->update_pipe = false;
+ crtc_state->update_m_n = false;
+ crtc_state->update_lrr = false;
+}
+
/**
* intel_modeset_all_pipes_late - force a full modeset on all pipes
* @state: intel atomic state
@@ -5568,9 +5462,8 @@ int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
if (ret)
return ret;
- crtc_state->update_pipe = false;
- crtc_state->update_m_n = false;
- crtc_state->update_lrr = false;
+ intel_crtc_flag_modeset(crtc_state);
+
crtc_state->update_planes |= crtc_state->active_planes;
crtc_state->async_flip_planes = 0;
crtc_state->do_async_flip = false;
@@ -5683,17 +5576,17 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
else
new_crtc_state->uapi.mode_changed = false;
- if (intel_crtc_needs_modeset(new_crtc_state) ||
- intel_compare_link_m_n(&old_crtc_state->dp_m_n,
+ if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if (intel_crtc_needs_modeset(new_crtc_state) ||
- (old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
+ if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
new_crtc_state->update_lrr = false;
- if (!intel_crtc_needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ intel_crtc_flag_modeset(new_crtc_state);
+ else
new_crtc_state->update_pipe = true;
}
@@ -6476,15 +6369,14 @@ int intel_atomic_check(struct drm_device *dev,
if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
continue;
+ if (intel_dp_mst_crtc_needs_modeset(state, crtc))
+ intel_crtc_flag_modeset(new_crtc_state);
+
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
enum transcoder master = new_crtc_state->mst_master_transcoder;
- if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master)))
+ intel_crtc_flag_modeset(new_crtc_state);
}
if (is_trans_port_sync_mode(new_crtc_state)) {
@@ -6493,21 +6385,13 @@ int intel_atomic_check(struct drm_device *dev,
if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
trans |= BIT(new_crtc_state->master_transcoder);
- if (intel_cpu_transcoders_need_modeset(state, trans)) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_cpu_transcoders_need_modeset(state, trans))
+ intel_crtc_flag_modeset(new_crtc_state);
}
if (new_crtc_state->bigjoiner_pipes) {
- if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes))
+ intel_crtc_flag_modeset(new_crtc_state);
}
}
@@ -6528,10 +6412,6 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = drm_dp_mst_atomic_check(&state->base);
- if (ret)
- goto fail;
-
ret = intel_atomic_check_planes(state);
if (ret)
goto fail;
@@ -6767,8 +6647,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_enable_pipe_crc(crtc);
}
-static void intel_update_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void intel_pre_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
@@ -6810,6 +6690,15 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_color_commit_noarm(new_crtc_state);
intel_crtc_planes_update_noarm(state, crtc);
+}
+
+static void intel_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(state, crtc);
@@ -6838,7 +6727,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
* valid pipe configuration from the BIOS we need to take care
* of enabling them on the CRTC's first fastset.
*/
- if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
+ if (intel_crtc_needs_fastset(new_crtc_state) &&
old_crtc_state->inherited)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -6934,6 +6823,13 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
continue;
intel_enable_crtc(state, crtc);
+ intel_pre_update_crtc(state, crtc);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active)
+ continue;
+
intel_update_crtc(state, crtc);
}
}
@@ -6971,6 +6867,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
* So first lets enable all pipes that do not need a fullmodeset as
* those don't have any external dependency.
*/
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((update_pipes & BIT(pipe)) == 0)
+ continue;
+
+ intel_pre_update_crtc(state, crtc);
+ }
+
while (update_pipes) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
@@ -7047,6 +6952,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((update_pipes & BIT(pipe)) == 0)
continue;
+ intel_pre_update_crtc(state, crtc);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((update_pipes & BIT(pipe)) == 0)
+ continue;
+
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries, I915_MAX_PIPES, pipe));
@@ -7060,49 +6974,24 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
drm_WARN_ON(&dev_priv->drm, update_pipes);
}
-static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
-{
- struct intel_atomic_state *state, *next;
- struct llist_node *freed;
-
- freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
- llist_for_each_entry_safe(state, next, freed, freed)
- drm_atomic_state_put(&state->base);
-}
-
-void intel_atomic_helper_free_state_worker(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
-
- intel_atomic_helper_free_state(dev_priv);
-}
-
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
{
- struct wait_queue_entry wait_fence, wait_reset;
- struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
-
- init_wait_entry(&wait_fence, 0);
- init_wait_entry(&wait_reset, 0);
- for (;;) {
- prepare_to_wait(&intel_state->commit_ready.wait,
- &wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
- I915_RESET_MODESET),
- &wait_reset, TASK_UNINTERRUPTIBLE);
-
+ struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ int ret, i;
- if (i915_sw_fence_done(&intel_state->commit_ready) ||
- test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
- break;
+ for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
+ if (new_plane_state->fence) {
+ ret = dma_fence_wait_timeout(new_plane_state->fence, false,
+ i915_fence_timeout(i915));
+ if (ret <= 0)
+ break;
- schedule();
+ dma_fence_put(new_plane_state->fence);
+ new_plane_state->fence = NULL;
+ }
}
- finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
- I915_RESET_MODESET),
- &wait_reset);
}
static void intel_atomic_cleanup_work(struct work_struct *work)
@@ -7120,8 +7009,6 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
-
- intel_atomic_helper_free_state(i915);
}
static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
@@ -7394,32 +7281,6 @@ static void intel_atomic_commit_work(struct work_struct *work)
intel_atomic_commit_tail(state);
}
-static int
-intel_atomic_commit_ready(struct i915_sw_fence *fence,
- enum i915_sw_fence_notify notify)
-{
- struct intel_atomic_state *state =
- container_of(fence, struct intel_atomic_state, commit_ready);
-
- switch (notify) {
- case FENCE_COMPLETE:
- /* we do blocking waits in the worker, nothing to do here */
- break;
- case FENCE_FREE:
- {
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- struct intel_atomic_helper *helper =
- &i915->display.atomic_helper;
-
- if (llist_add(&state->freed, &helper->free_list))
- queue_work(i915->unordered_wq, &helper->free_work);
- break;
- }
- }
-
- return NOTIFY_DONE;
-}
-
static void intel_atomic_track_fbs(struct intel_atomic_state *state)
{
struct intel_plane_state *old_plane_state, *new_plane_state;
@@ -7442,10 +7303,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- drm_atomic_state_get(&state->base);
- i915_sw_fence_init(&state->commit_ready,
- intel_atomic_commit_ready);
-
/*
* The intel_legacy_cursor_update() fast path takes care
* of avoiding the vblank waits for simple cursor
@@ -7478,7 +7335,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_dbg_atomic(&dev_priv->drm,
"Preparing state failed with %i\n", ret);
- i915_sw_fence_commit(&state->commit_ready);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
@@ -7494,8 +7350,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
struct intel_crtc *crtc;
int i;
- i915_sw_fence_commit(&state->commit_ready);
-
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_color_cleanup_commit(new_crtc_state);
@@ -7509,7 +7363,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
drm_atomic_state_get(&state->base);
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
- i915_sw_fence_commit(&state->commit_ready);
if (nonblock && state->modeset) {
queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
} else if (nonblock) {
@@ -7909,7 +7762,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de
* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) &&
mode->hsync_start == mode->hdisplay)
return MODE_H_ILLEGAL;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index a05c7e2b7..f4a0773f0 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -105,7 +105,6 @@ enum i9xx_plane_id {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
#define for_each_plane_id_on_crtc(__crtc, __p) \
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
@@ -395,8 +394,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool fec_enable);
+ int bw_overhead,
+ struct intel_link_m_n *m_n);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
enum drm_mode_status
@@ -485,8 +484,6 @@ void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
enum transcoder cpu_transcoder,
struct intel_link_m_n *m_n);
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
@@ -555,7 +552,7 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port);
struct drm_device *drm = &(__i915)->drm; \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- if (!drm_WARN(drm, i915_modparams.verbose_state_checks, format)) \
+ if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \
drm_err(drm, format); \
unlikely(__ret_warn_on); \
})
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index ccfe27630..47297ed85 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -19,6 +19,7 @@
#include "intel_cdclk.h"
#include "intel_display_device.h"
#include "intel_display_limits.h"
+#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
@@ -297,12 +298,6 @@ struct intel_display {
const struct intel_audio_funcs *audio;
} funcs;
- /* Grouping using anonymous structs. Keep sorted. */
- struct intel_atomic_helper {
- struct llist_head free_list;
- struct work_struct free_work;
- } atomic_helper;
-
struct {
/* backlight registers and fields in struct intel_panel */
struct mutex lock;
@@ -348,15 +343,6 @@ struct intel_display {
} dbuf;
struct {
- wait_queue_head_t waitqueue;
-
- /* mutex to protect pmdemand programming sequence */
- struct mutex lock;
-
- struct intel_global_obj obj;
- } pmdemand;
-
- struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
@@ -444,6 +430,15 @@ struct intel_display {
} ips;
struct {
+ wait_queue_head_t waitqueue;
+
+ /* mutex to protect pmdemand programming sequence */
+ struct mutex lock;
+
+ struct intel_global_obj obj;
+ } pmdemand;
+
+ struct {
struct i915_power_domains domains;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@@ -520,6 +515,7 @@ struct intel_display {
struct intel_hotplug hotplug;
struct intel_opregion opregion;
struct intel_overlay *overlay;
+ struct intel_display_params params;
struct intel_vbt_data vbt;
struct intel_wm wm;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 2836826f8..d951edb36 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -17,6 +17,7 @@
#include "intel_de.h"
#include "intel_crtc_state_dump.h"
#include "intel_display_debugfs.h"
+#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
@@ -641,6 +642,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_display_capabilities(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_display_device_info_print(DISPLAY_INFO(i915),
+ DISPLAY_RUNTIME_INFO(i915), &p);
+
+ return 0;
+}
+
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1059,6 +1071,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
@@ -1082,7 +1095,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
debugfs_create_file(intel_display_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
+ 0644,
minor->debugfs_root,
to_i915(minor->dev),
intel_display_debugfs_files[i].fops);
@@ -1098,15 +1111,15 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_hpd_debugfs_register(i915);
intel_psr_debugfs_register(i915);
intel_wm_debugfs_register(i915);
+ intel_display_debugfs_params(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return -ENODEV;
seq_printf(m, "Panel power up delay: %d\n",
@@ -1124,23 +1137,23 @@ DEFINE_SHOW_ATTRIBUTE(i915_panel);
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- if (!connector->encoder || connector->status != connector_status_connected) {
+ if (!connector->base.encoder ||
+ connector->base.status != connector_status_connected) {
ret = -ENODEV;
goto out;
}
- seq_printf(m, "%s:%d HDCP version: ", connector->name,
- connector->base.id);
- intel_hdcp_info(m, intel_connector);
+ seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
+ connector->base.base.id);
+ intel_hdcp_info(m, connector);
out:
drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
@@ -1151,16 +1164,16 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_encoder *encoder;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ int connector_type = connector->base.connector_type;
bool lpsp_capable = false;
- encoder = intel_attached_encoder(to_intel_connector(connector));
if (!encoder)
return -ENODEV;
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return -ENODEV;
if (DISPLAY_VER(i915) >= 13)
@@ -1173,15 +1186,15 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
*/
lpsp_capable = encoder->port <= PORT_B;
else if (DISPLAY_VER(i915) == 11)
- lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ lpsp_capable = (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP);
else if (IS_DISPLAY_VER(i915, 9, 10))
lpsp_capable = (encoder->port == PORT_A &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+ (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort));
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
- lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
+ lpsp_capable = connector_type == DRM_MODE_CONNECTOR_eDP;
seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
@@ -1191,7 +1204,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
{
- struct intel_connector *connector = to_intel_connector(m->private);
+ struct intel_connector *connector = m->private;
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_crtc *crtc;
struct intel_dp *intel_dp;
@@ -1242,6 +1255,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
DP_DSC_YCbCr420_Native)),
str_yes_no(drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd,
DP_DSC_YCbCr444)));
+ seq_printf(m, "DSC_Sink_BPP_Precision: %d\n",
+ drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd));
seq_printf(m, "Force_DSC_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
@@ -1259,13 +1274,13 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool dsc_enable = false;
int ret;
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (len == 0)
return 0;
@@ -1303,22 +1318,22 @@ static const struct file_operations i915_dsc_fec_support_fops = {
static int i915_dsc_bpc_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
int ret;
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
ret = -ENODEV;
goto out;
}
@@ -1326,7 +1341,7 @@ static int i915_dsc_bpc_show(struct seq_file *m, void *data)
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
-out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
+out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
@@ -1335,9 +1350,9 @@ static ssize_t i915_dsc_bpc_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int dsc_bpc = 0;
int ret;
@@ -1369,22 +1384,22 @@ static const struct file_operations i915_dsc_bpc_fops = {
static int i915_dsc_output_format_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
int ret;
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
ret = -ENODEV;
goto out;
}
@@ -1393,7 +1408,7 @@ static int i915_dsc_output_format_show(struct seq_file *m, void *data)
seq_printf(m, "DSC_Output_Format: %s\n",
intel_output_format_name(crtc_state->output_format));
-out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
+out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
@@ -1402,9 +1417,9 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int dsc_output_format = 0;
int ret;
@@ -1434,6 +1449,84 @@ static const struct file_operations i915_dsc_output_format_fops = {
.write = i915_dsc_output_format_write
};
+static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ int ret;
+
+ if (!encoder)
+ return -ENODEV;
+
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ intel_dp = intel_attached_dp(connector);
+ seq_printf(m, "Force_DSC_Fractional_BPP_Enable: %s\n",
+ str_yes_no(intel_dp->force_dsc_fractional_bpp_en));
+
+out:
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ bool dsc_fractional_bpp_enable = false;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ drm_dbg(&i915->drm,
+ "Copied %zu bytes from user to force fractional bpp for DSC\n", len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_fractional_bpp_enable);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg(&i915->drm, "Got %s for DSC Fractional BPP Enable\n",
+ (dsc_fractional_bpp_enable) ? "true" : "false");
+ intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
+
+ *offp += len;
+
+ return len;
+}
+
+static int i915_dsc_fractional_bpp_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fractional_bpp_show, inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fractional_bpp_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fractional_bpp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fractional_bpp_write
+};
+
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@@ -1470,39 +1563,38 @@ DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe);
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
- * @intel_connector: pointer to a registered drm_connector
+ * @connector: pointer to a registered intel_connector
*
* Cleanup will be done by drm_connector_unregister() through a call to
* drm_debugfs_connector_remove().
*/
-void intel_connector_debugfs_add(struct intel_connector *intel_connector)
+void intel_connector_debugfs_add(struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct dentry *root = connector->debugfs_entry;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct dentry *root = connector->base.debugfs_entry;
+ int connector_type = connector->base.connector_type;
/* The connector must have been registered beforehands. */
if (!root)
return;
- intel_drrs_connector_debugfs_add(intel_connector);
- intel_psr_connector_debugfs_add(intel_connector);
+ intel_drrs_connector_debugfs_add(connector);
+ intel_psr_connector_debugfs_add(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file("i915_panel_timings", 0444, root,
connector, &i915_panel_fops);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", 0444, root,
connector, &i915_hdcp_sink_capability_fops);
}
- if (DISPLAY_VER(dev_priv) >= 11 &&
- ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
- !to_intel_connector(connector)->mst_port) ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ if (DISPLAY_VER(i915) >= 11 &&
+ ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst_port) ||
+ connector_type == DRM_MODE_CONNECTOR_eDP)) {
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
@@ -1511,13 +1603,16 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
debugfs_create_file("i915_dsc_output_format", 0644, root,
connector, &i915_dsc_output_format_fops);
+
+ debugfs_create_file("i915_dsc_fractional_bpp", 0644, root,
+ connector, &i915_dsc_fractional_bpp_fops);
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ if (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB)
debugfs_create_file("i915_lpsp_capability", 0444, root,
connector, &i915_lpsp_capability_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
new file mode 100644
index 000000000..b7e68eb62
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drm_drv.h>
+
+#include "intel_display_debugfs_params.h"
+#include "i915_drv.h"
+#include "intel_display_params.h"
+
+/* int param */
+static int intel_display_param_int_show(struct seq_file *m, void *data)
+{
+ int *value = m->private;
+
+ seq_printf(m, "%d\n", *value);
+
+ return 0;
+}
+
+static int intel_display_param_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intel_display_param_int_show, inode->i_private);
+}
+
+static ssize_t intel_display_param_int_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int *value = m->private;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations intel_display_param_int_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_int_open,
+ .read = seq_read,
+ .write = intel_display_param_int_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations intel_display_param_int_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_int_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* unsigned int param */
+static int intel_display_param_uint_show(struct seq_file *m, void *data)
+{
+ unsigned int *value = m->private;
+
+ seq_printf(m, "%u\n", *value);
+
+ return 0;
+}
+
+static int intel_display_param_uint_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intel_display_param_uint_show, inode->i_private);
+}
+
+static ssize_t intel_display_param_uint_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ unsigned int *value = m->private;
+ int ret;
+
+ ret = kstrtouint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations intel_display_param_uint_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_uint_open,
+ .read = seq_read,
+ .write = intel_display_param_uint_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations intel_display_param_uint_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_uint_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+#define RO(mode) (((mode) & 0222) == 0)
+
+__maybe_unused static struct dentry *
+intel_display_debugfs_create_int(const char *name, umode_t mode,
+ struct dentry *parent, int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &intel_display_param_int_fops_ro :
+ &intel_display_param_int_fops);
+}
+
+__maybe_unused static struct dentry *
+intel_display_debugfs_create_uint(const char *name, umode_t mode,
+ struct dentry *parent, unsigned int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &intel_display_param_uint_fops_ro :
+ &intel_display_param_uint_fops);
+}
+
+#define _intel_display_param_create_file(parent, name, mode, valp) \
+ do { \
+ if (mode) \
+ _Generic(valp, \
+ bool * : debugfs_create_bool, \
+ int * : intel_display_debugfs_create_int, \
+ unsigned int * : intel_display_debugfs_create_uint, \
+ unsigned long * : debugfs_create_ulong, \
+ char ** : debugfs_create_str) \
+ (name, mode, parent, valp); \
+ } while (0)
+
+/* add a subdirectory with files for each intel display param */
+void intel_display_debugfs_params(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ struct dentry *dir;
+ char dirname[16];
+
+ snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name);
+ dir = debugfs_lookup(dirname, minor->debugfs_root);
+ if (!dir)
+ dir = debugfs_create_dir(dirname, minor->debugfs_root);
+ if (IS_ERR(dir))
+ return;
+
+ /*
+ * Note: We could create files for params needing special handling
+ * here. Set mode in params to 0 to skip the generic create file, or
+ * just let the generic create file fail silently with -EEXIST.
+ */
+
+#define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \
+ dir, #x, mode, &i915->display.params.x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER);
+#undef REGISTER
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
new file mode 100644
index 000000000..1e9945a40
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__
+#define __INTEL_DISPLAY_DEBUGFS_PARAMS__
+
+struct drm_i915_private;
+
+void intel_display_debugfs_params(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 2b1ec23ba..0b522c6a8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -12,6 +12,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_device.h"
+#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
@@ -937,6 +938,13 @@ void intel_display_device_probe(struct drm_i915_private *i915)
DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel;
DISPLAY_RUNTIME_INFO(i915)->ip.step = step;
}
+
+ intel_display_params_copy(&i915->display.params);
+}
+
+void intel_display_device_remove(struct drm_i915_private *i915)
+{
+ intel_display_params_free(&i915->display.params);
}
static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915)
@@ -1105,7 +1113,7 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
}
/* Disable nuclear pageflip by default on pre-g4x */
- if (!i915->params.nuclear_pageflip &&
+ if (!i915->display.params.nuclear_pageflip &&
DISPLAY_VER(i915) < 5 && !IS_G4X(i915))
i915->drm.driver_features &= ~DRIVER_ATOMIC;
}
@@ -1145,5 +1153,6 @@ bool intel_display_device_enabled(struct drm_i915_private *i915)
/* Only valid when HAS_DISPLAY() is true */
drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915));
- return !i915->params.disable_display && !intel_opregion_headless_sku(i915);
+ return !i915->display.params.disable_display &&
+ !intel_opregion_headless_sku(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 5b5c0e533..9b1bce262 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -36,7 +36,7 @@ struct drm_printer;
#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
#define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash)
-#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && DISPLAY_VER(i915) >= 7)
+#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915))
#define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi)
#define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0)
@@ -47,9 +47,10 @@ struct drm_printer;
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
-#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)
+#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4)
#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915))
#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch)
@@ -161,6 +162,7 @@ struct intel_display_device_info {
bool intel_display_device_enabled(struct drm_i915_private *i915);
void intel_display_device_probe(struct drm_i915_private *i915);
+void intel_display_device_remove(struct drm_i915_private *i915);
void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
void intel_display_device_info_print(const struct intel_display_device_info *info,
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 44b59ac30..9df9097a0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -181,6 +181,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
+ spin_lock_init(&i915->display.fb_tracking.lock);
+ mutex_init(&i915->display.backlight.lock);
+ mutex_init(&i915->display.audio.mutex);
+ mutex_init(&i915->display.wm.wm_mutex);
+ mutex_init(&i915->display.pps.mutex);
+ mutex_init(&i915->display.hdcp.hdcp_mutex);
+
intel_display_irq_init(i915);
intel_dkl_phy_init(i915);
intel_color_init_hooks(i915);
@@ -252,10 +259,6 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- init_llist_head(&i915->display.atomic_helper.free_list);
- INIT_WORK(&i915->display.atomic_helper.free_work,
- intel_atomic_helper_free_state_worker);
-
intel_init_quirks(i915);
intel_fbc_init(i915);
@@ -423,9 +426,6 @@ void intel_display_driver_remove(struct drm_i915_private *i915)
flush_workqueue(i915->display.wq.flip);
flush_workqueue(i915->display.wq.modeset);
- flush_work(&i915->display.atomic_helper.free_work);
- drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
-
/*
* MST topology needs to be suspended so we don't have any calls to
* fbdev after it's finalized. MST will be destroyed later as part of
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index bff4a7631..a7d8f3fc9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -340,18 +340,15 @@ static void flip_done_handler(struct drm_i915_private *i915,
enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
- struct drm_crtc_state *crtc_state = crtc->base.state;
- struct drm_pending_vblank_event *e = crtc_state->event;
- struct drm_device *dev = &i915->drm;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev->event_lock, irqflags);
- crtc_state->event = NULL;
+ spin_lock(&i915->drm.event_lock);
- drm_crtc_send_vblank_event(&crtc->base, e);
+ if (crtc->flip_done_event) {
+ drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event);
+ crtc->flip_done_event = NULL;
+ }
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock(&i915->drm.event_lock);
}
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
@@ -896,7 +893,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
+ drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
@@ -1653,7 +1650,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev_priv);
- if (DISPLAY_VER(dev_priv) <= 10)
+ if (DISPLAY_VER(dev_priv) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
new file mode 100644
index 000000000..11e03cfb7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "intel_display_params.h"
+#include "i915_drv.h"
+
+#define intel_display_param_named(name, T, perm, desc) \
+ module_param_named(name, intel_display_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+#define intel_display_param_named_unsafe(name, T, perm, desc) \
+ module_param_named_unsafe(name, intel_display_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+
+static struct intel_display_params intel_display_modparams __read_mostly = {
+#define MEMBER(T, member, value, ...) .member = (value),
+ INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER)
+#undef MEMBER
+};
+/*
+ * Note: As a rule, keep module parameter sysfs permissions read-only
+ * 0400. Runtime changes are only supported through i915 debugfs.
+ *
+ * For any exceptions requiring write access and runtime changes through module
+ * parameter sysfs, prevent debugfs file creation by setting the parameter's
+ * debugfs mode to 0.
+ */
+
+intel_display_param_named_unsafe(vbt_firmware, charp, 0400,
+ "Load VBT from specified file under /lib/firmware");
+
+intel_display_param_named_unsafe(lvds_channel_mode, int, 0400,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+intel_display_param_named_unsafe(panel_use_ssc, int, 0400,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: auto from VBT)");
+
+intel_display_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+intel_display_param_named_unsafe(enable_dc, int, 0400,
+ "Enable power-saving display C-states. "
+ "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
+ "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
+
+intel_display_param_named_unsafe(enable_dpt, bool, 0400,
+ "Enable display page table (DPT) (default: true)");
+
+intel_display_param_named_unsafe(enable_sagv, bool, 0400,
+ "Enable system agent voltage/frequency scaling (SAGV) (default: true)");
+
+intel_display_param_named_unsafe(disable_power_well, int, 0400,
+ "Disable display power wells when possible "
+ "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
+
+intel_display_param_named_unsafe(enable_ips, bool, 0400, "Enable IPS (default: true)");
+
+intel_display_param_named_unsafe(invert_brightness, int, 0400,
+ "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+
+/* WA to get away with the default setting in VBT for early platforms.Will be removed */
+intel_display_param_named_unsafe(edp_vswing, int, 0400,
+ "Ignore/Override vswing pre-emph table selection from VBT "
+ "(0=use value from vbt [default], 1=low power swing(200mV),"
+ "2=default swing(400mV))");
+
+intel_display_param_named(enable_dpcd_backlight, int, 0400,
+ "Enable support for DPCD backlight control"
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
+
+intel_display_param_named_unsafe(load_detect_test, bool, 0400,
+ "Force-enable the VGA load detect code for testing (default:false). "
+ "For developers only.");
+
+intel_display_param_named_unsafe(force_reset_modeset_test, bool, 0400,
+ "Force a modeset during gpu reset for testing (default:false). "
+ "For developers only.");
+
+intel_display_param_named(disable_display, bool, 0400,
+ "Disable display (default: false)");
+
+intel_display_param_named(verbose_state_checks, bool, 0400,
+ "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
+
+intel_display_param_named_unsafe(nuclear_pageflip, bool, 0400,
+ "Force enable atomic functionality on platforms that don't have full support yet.");
+
+intel_display_param_named_unsafe(enable_dp_mst, bool, 0400,
+ "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
+
+intel_display_param_named_unsafe(enable_fbc, int, 0400,
+ "Enable frame buffer compression for power savings "
+ "(default: -1 (use per-chip default))");
+
+intel_display_param_named_unsafe(enable_psr, int, 0400,
+ "Enable PSR "
+ "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
+ "Default: -1 (use per-chip default)");
+
+intel_display_param_named(psr_safest_params, bool, 0400,
+ "Replace PSR VBT parameters by the safest and not optimal ones. This "
+ "is helpful to detect if PSR issues are related to bad values set in "
+ " VBT. (0=use VBT parameters, 1=use safest parameters)"
+ "Default: 0");
+
+intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
+ "Enable PSR2 selective fetch "
+ "(0=disabled, 1=enabled) "
+ "Default: 1");
+
+__maybe_unused
+static void _param_print_bool(struct drm_printer *p, const char *driver_name,
+ const char *name, bool val)
+{
+ drm_printf(p, "%s.%s=%s\n", driver_name, name, str_yes_no(val));
+}
+
+__maybe_unused
+static void _param_print_int(struct drm_printer *p, const char *driver_name,
+ const char *name, int val)
+{
+ drm_printf(p, "%s.%s=%d\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_uint(struct drm_printer *p, const char *driver_name,
+ const char *name, unsigned int val)
+{
+ drm_printf(p, "%s.%s=%u\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_ulong(struct drm_printer *p, const char *driver_name,
+ const char *name, unsigned long val)
+{
+ drm_printf(p, "%s.%s=%lu\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_charp(struct drm_printer *p, const char *driver_name,
+ const char *name, const char *val)
+{
+ drm_printf(p, "%s.%s=%s\n", driver_name, name, val);
+}
+
+#define _param_print(p, driver_name, name, val) \
+ _Generic(val, \
+ bool : _param_print_bool, \
+ int : _param_print_int, \
+ unsigned int : _param_print_uint, \
+ unsigned long : _param_print_ulong, \
+ char * : _param_print_charp)(p, driver_name, name, val)
+
+/**
+ * intel_display_params_dump - dump intel display modparams
+ * @i915: i915 device
+ * @p: the &drm_printer
+ *
+ * Pretty printer for i915 modparams.
+ */
+void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p)
+{
+#define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT);
+#undef PRINT
+}
+
+__maybe_unused static void _param_dup_charp(char **valp)
+{
+ *valp = kstrdup(*valp ? *valp : "", GFP_ATOMIC);
+}
+
+__maybe_unused static void _param_nop(void *valp)
+{
+}
+
+#define _param_dup(valp) \
+ _Generic(valp, \
+ char ** : _param_dup_charp, \
+ default : _param_nop) \
+ (valp)
+
+void intel_display_params_copy(struct intel_display_params *dest)
+{
+ *dest = intel_display_modparams;
+#define DUP(T, x, ...) _param_dup(&dest->x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(DUP);
+#undef DUP
+}
+
+__maybe_unused static void _param_free_charp(char **valp)
+{
+ kfree(*valp);
+ *valp = NULL;
+}
+
+#define _param_free(valp) \
+ _Generic(valp, \
+ char ** : _param_free_charp, \
+ default : _param_nop) \
+ (valp)
+
+/* free the allocated members, *not* the passed in params itself */
+void intel_display_params_free(struct intel_display_params *params)
+{
+#define FREE(T, x, ...) _param_free(&params->x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
new file mode 100644
index 000000000..6206cc51d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_DISPLAY_PARAMS_H_
+#define _INTEL_DISPLAY_PARAMS_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct drm_i915_private;
+
+/*
+ * Invoke param, a function-like macro, for each intel display param, with
+ * arguments:
+ *
+ * param(type, name, value, mode)
+ *
+ * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *}
+ * name: name of the parameter
+ * value: initial/default value of the parameter
+ * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create
+ * debugfs file
+ */
+#define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \
+ param(char *, vbt_firmware, NULL, 0400) \
+ param(int, lvds_channel_mode, 0, 0400) \
+ param(int, panel_use_ssc, -1, 0600) \
+ param(int, vbt_sdvo_panel_type, -1, 0400) \
+ param(int, enable_dc, -1, 0400) \
+ param(bool, enable_dpt, true, 0400) \
+ param(bool, enable_sagv, true, 0600) \
+ param(int, disable_power_well, -1, 0400) \
+ param(bool, enable_ips, true, 0600) \
+ param(int, invert_brightness, 0, 0600) \
+ param(int, edp_vswing, 0, 0400) \
+ param(int, enable_dpcd_backlight, -1, 0600) \
+ param(bool, load_detect_test, false, 0600) \
+ param(bool, force_reset_modeset_test, false, 0600) \
+ param(bool, disable_display, false, 0400) \
+ param(bool, verbose_state_checks, true, 0400) \
+ param(bool, nuclear_pageflip, false, 0400) \
+ param(bool, enable_dp_mst, true, 0600) \
+ param(int, enable_fbc, -1, 0600) \
+ param(int, enable_psr, -1, 0600) \
+ param(bool, psr_safest_params, false, 0400) \
+ param(bool, enable_psr2_sel_fetch, true, 0400) \
+
+#define MEMBER(T, member, ...) T member;
+struct intel_display_params {
+ INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER);
+};
+#undef MEMBER
+
+void intel_display_params_dump(struct drm_i915_private *i915,
+ struct drm_printer *p);
+void intel_display_params_copy(struct intel_display_params *dest);
+void intel_display_params_free(struct intel_display_params *params);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index e25785ae1..6fd4fa522 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -405,8 +405,8 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
struct drm_i915_private,
display.power.domains);
- drm_dbg(&i915->drm, "async_put_wakeref %u\n",
- power_domains->async_put_wakeref);
+ drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
+ str_yes_no(power_domains->async_put_wakeref));
print_power_domains(power_domains, "async_put_domains[0]",
&power_domains->async_put_domains[0]);
@@ -967,7 +967,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
DISPLAY_VER(dev_priv) >= 11 ?
DC_STATE_EN_DC9 : 0;
- if (!dev_priv->params.disable_power_well)
+ if (!dev_priv->display.params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -1016,11 +1016,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
- dev_priv->params.disable_power_well =
+ dev_priv->display.params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
- dev_priv->params.disable_power_well);
+ dev_priv->display.params.disable_power_well);
power_domains->allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
+ get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
power_domains->target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
@@ -1697,14 +1697,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (resume)
intel_dmc_load_program(dev_priv);
- /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
- if (DISPLAY_VER(dev_priv) >= 12)
+ /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
+ if (IS_DISPLAY_IP_RANGE(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(dev_priv) == 13)
intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
}
@@ -1950,7 +1950,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->params.disable_power_well) {
+ if (!i915->display.params.disable_power_well) {
drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
POWER_DOMAIN_INIT);
@@ -1977,7 +1977,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
fetch_and_zero(&i915->display.power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915->params.disable_power_well)
+ if (!i915->display.params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
@@ -2096,7 +2096,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915->params.disable_power_well)
+ if (!i915->display.params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 82dd5be72..06900ff30 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -1411,20 +1411,16 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
{
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
- enum pipe pipe;
u32 tmp;
drm_WARN_ON_ONCE(&dev_priv->drm,
id != VLV_DISP_PW_DPIO_CMN_BC &&
id != CHV_DISP_PW_DPIO_CMN_D);
- if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- pipe = PIPE_A;
+ if (id == VLV_DISP_PW_DPIO_CMN_BC)
phy = DPIO_PHY0;
- } else {
- pipe = PIPE_C;
+ else
phy = DPIO_PHY1;
- }
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1439,24 +1435,24 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_get(dev_priv);
/* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
* too, so maybe it saves some power even though
* CL2 doesn't exist?
*/
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
}
vlv_dpio_put(dev_priv);
@@ -1510,7 +1506,6 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
- enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
u32 reg, val, expected, actual;
/*
@@ -1529,7 +1524,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
reg = _CHV_CMN_DW6_CH1;
vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, pipe, reg);
+ val = vlv_dpio_read(dev_priv, phy, reg);
vlv_dpio_put(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 17178d5d7..c2c347b22 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -29,7 +29,7 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
return;
/* reset doesn't touch the display */
- if (!dev_priv->params.force_reset_modeset_test &&
+ if (!dev_priv->display.params.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 65ea37fe8..9529f77b8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -198,6 +198,12 @@ struct intel_encoder {
struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
+ void (*audio_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@@ -603,6 +609,13 @@ struct intel_connector {
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
+ /*
+ * Optional hook called during init/resume to sync any state
+ * stored in the connector (eg. DSC state) wrt. the HW state.
+ */
+ void (*sync_state)(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state);
+
/* Panel info for eDP and LVDS */
struct intel_panel panel;
@@ -624,6 +637,9 @@ struct intel_connector {
struct drm_dp_aux *dsc_decompression_aux;
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
u8 fec_capability;
+
+ u8 dsc_hblank_expansion_quirk:1;
+ u8 dsc_decompression_enabled:1;
} dp;
/* Work struct to schedule a uevent on link train failure */
@@ -675,10 +691,6 @@ struct intel_atomic_state {
bool skip_intermediate_wm;
bool rps_interactive;
-
- struct i915_sw_fence commit_ready;
-
- struct llist_node freed;
};
struct intel_plane_state {
@@ -707,6 +719,7 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
+ u32 phys_dma_addr; /* for cursor_needs_physical */
/* Plane pxp decryption state */
bool decrypt;
@@ -1015,7 +1028,6 @@ struct intel_c10pll_state {
};
struct intel_c20pll_state {
- u32 link_bit_rate;
u32 clock; /* in kHz */
u16 tx[3];
u16 cmn[4];
@@ -1210,6 +1222,7 @@ struct intel_crtc_state {
bool has_psr2;
bool enable_psr2_sel_fetch;
bool req_psr2_sdp_prior_scanline;
+ bool has_panel_replay;
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
@@ -1361,7 +1374,8 @@ struct intel_crtc_state {
struct {
bool compression_enable;
bool dsc_split;
- u16 compressed_bpp;
+ /* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
+ u16 compressed_bpp_x16;
u8 slice_count;
struct drm_dsc_config config;
} dsc;
@@ -1467,6 +1481,9 @@ struct intel_crtc {
struct intel_crtc_state *config;
+ /* armed event for async flip */
+ struct drm_pending_vblank_event *flip_done_event;
+
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@@ -1707,9 +1724,13 @@ struct intel_psr {
bool irq_aux_error;
u16 su_w_granularity;
u16 su_y_granularity;
+ bool source_panel_replay_support;
+ bool sink_panel_replay_support;
+ bool panel_replay_enabled;
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
+ u8 entry_setup_frames;
};
struct intel_dp {
@@ -1808,6 +1829,7 @@ struct intel_dp {
/* Display stream compression testing */
bool force_dsc_en;
int force_dsc_output_format;
+ bool force_dsc_fractional_bpp_en;
int force_dsc_bpc;
bool hobl_failed;
@@ -1992,17 +2014,6 @@ dp_to_lspcon(struct intel_dp *intel_dp)
#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
- (intel_dp)->psr.source_support)
-
-static inline bool intel_encoder_can_psr(struct intel_encoder *encoder)
-{
- if (!intel_encoder_is_dp(encoder))
- return false;
-
- return CAN_PSR(enc_to_intel_dp(encoder));
-}
-
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 073b85b57..b70502586 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -335,77 +335,6 @@ static void disable_event_handler(struct drm_i915_private *i915,
intel_de_write(i915, htp_reg, 0);
}
-static void
-disable_flip_queue_event(struct drm_i915_private *i915,
- i915_reg_t ctl_reg, i915_reg_t htp_reg)
-{
- u32 event_ctl;
- u32 event_htp;
-
- event_ctl = intel_de_read(i915, ctl_reg);
- event_htp = intel_de_read(i915, htp_reg);
- if (event_ctl != (DMC_EVT_CTL_ENABLE |
- DMC_EVT_CTL_RECURRING |
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
- !event_htp) {
- drm_dbg_kms(&i915->drm,
- "Unexpected DMC event configuration (control %08x htp %08x)\n",
- event_ctl, event_htp);
- return;
- }
-
- disable_event_handler(i915, ctl_reg, htp_reg);
-}
-
-static bool
-get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id,
- i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
-{
- if (dmc_id == DMC_FW_MAIN) {
- if (DISPLAY_VER(i915) == 12) {
- *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
- *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
-
- return true;
- }
- } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) {
- if (IS_DG2(i915)) {
- *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
- *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
-
- return true;
- }
- }
-
- return false;
-}
-
-static void
-disable_all_flip_queue_events(struct drm_i915_private *i915)
-{
- enum intel_dmc_id dmc_id;
-
- /* TODO: check if the following applies to all D13+ platforms. */
- if (!IS_TIGERLAKE(i915))
- return;
-
- for_each_dmc_id(dmc_id) {
- i915_reg_t ctl_reg;
- i915_reg_t htp_reg;
-
- if (!has_dmc_id_fw(i915, dmc_id))
- continue;
-
- if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
- continue;
-
- disable_flip_queue_event(i915, ctl_reg, htp_reg);
- }
-}
-
static void disable_all_event_handlers(struct drm_i915_private *i915)
{
enum intel_dmc_id dmc_id;
@@ -503,6 +432,16 @@ static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
return offset >= start && offset < end;
}
+static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915,
+ enum intel_dmc_id dmc_id, i915_reg_t reg)
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+ u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0));
+ u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
+
+ return offset >= start && offset < end;
+}
+
static bool disable_dmc_evt(struct drm_i915_private *i915,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -514,6 +453,16 @@ static bool disable_dmc_evt(struct drm_i915_private *i915,
if (dmc_id != DMC_FW_MAIN)
return true;
+ /* also disable the flip queue event on the main DMC on TGL */
+ if (IS_TIGERLAKE(i915) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
+ return true;
+
+ /* also disable the HRR event on the main DMC on TGL/ADLS */
+ if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
+ return true;
+
return false;
}
@@ -579,13 +528,6 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
gen9_set_dc_state_debugmask(i915);
- /*
- * Flip queue events need to be disabled before enabling DC5/6.
- * i915 doesn't use the flip queue feature, so disable it already
- * here.
- */
- disable_all_flip_queue_events(i915);
-
pipedmc_clock_gating_wa(i915, false);
}
@@ -781,9 +723,17 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0;
}
+ drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id);
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
+
+ drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
+ i, mmioaddr[i], mmiodata[i],
+ is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
+ is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
+ disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i],
+ dmc_info->mmiodata[i]) ? " (disabling)" : "");
}
dmc_info->mmio_count = mmio_count;
dmc_info->start_mmioaddr = start_mmioaddr;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index cf10094ac..90d0dbb41 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -60,6 +60,7 @@
#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
+#define DMC_EVT_CTL_EVENT_ID_VBLANK_A 0x32 /* main DMC */
/* An event handler scheduled to run at a 1 kHz frequency. */
#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ef09356bc..4e8545126 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -85,8 +85,8 @@
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-/* DP DSC FEC Overhead factor = 1/(0.972261) */
-#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
+/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -124,7 +124,31 @@ static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* Is link rate UHBR and thus 128b/132b? */
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->port_clock >= 1000000;
+ return drm_dp_is_uhbr_rate(crtc_state->port_clock);
+}
+
+/**
+ * intel_dp_link_symbol_size - get the link symbol size for a given link rate
+ * @rate: link rate in 10kbit/s units
+ *
+ * Returns the link symbol size in bits/symbol units depending on the link
+ * rate -> channel coding.
+ */
+int intel_dp_link_symbol_size(int rate)
+{
+ return drm_dp_is_uhbr_rate(rate) ? 32 : 10;
+}
+
+/**
+ * intel_dp_link_symbol_clock - convert link rate to link symbol clock
+ * @rate: link rate in 10kbit/s units
+ *
+ * Returns the link symbol clock frequency in kHz units depending on the
+ * link rate and channel coding.
+ */
+int intel_dp_link_symbol_clock(int rate)
+{
+ return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
}
static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
@@ -331,6 +355,9 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp)
/*
* The required data bandwidth for a mode with given pixel clock and bpp. This
* is the required net bandwidth independent of the data bandwidth efficiency.
+ *
+ * TODO: check if callers of this functions should use
+ * intel_dp_effective_data_rate() instead.
*/
int
intel_dp_link_required(int pixel_clock, int bpp)
@@ -339,6 +366,22 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
+/**
+ * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead
+ * @pixel_clock: pixel clock in kHz
+ * @bpp_x16: bits per pixel .4 fixed point format
+ * @bw_overhead: BW allocation overhead in 1ppm units
+ *
+ * Return the effective pixel data rate in kB/sec units taking into account
+ * the provided SSC, FEC, DSC BW allocation overhead.
+ */
+int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
+ int bw_overhead)
+{
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead),
+ 1000000 * 16 * 8);
+}
+
/*
* Given a link rate and lanes, get the data bandwidth.
*
@@ -362,29 +405,27 @@ intel_dp_link_required(int pixel_clock, int bpp)
int
intel_dp_max_data_rate(int max_link_rate, int max_lanes)
{
- if (max_link_rate >= 1000000) {
- /*
- * UHBR rates always use 128b/132b channel encoding, and have
- * 97.71% data bandwidth efficiency. Consider max_link_rate the
- * link bit rate in units of 10000 bps.
- */
- int max_link_rate_kbps = max_link_rate * 10;
-
- max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000);
- max_link_rate = max_link_rate_kbps / 8;
- }
+ int ch_coding_efficiency =
+ drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
+ int max_link_rate_kbps = max_link_rate * 10;
/*
+ * UHBR rates always use 128b/132b channel encoding, and have
+ * 97.71% data bandwidth efficiency. Consider max_link_rate the
+ * link bit rate in units of 10000 bps.
+ */
+ /*
* Lower than UHBR rates always use 8b/10b channel encoding, and have
* 80% data bandwidth efficiency for SST non-FEC. However, this turns
- * out to be a nop by coincidence, and can be skipped:
+ * out to be a nop by coincidence:
*
* int max_link_rate_kbps = max_link_rate * 10;
- * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
+ * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
* max_link_rate = max_link_rate_kbps / 8;
*/
-
- return max_link_rate * max_lanes;
+ return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
+ ch_coding_efficiency),
+ 1000000 * 8);
}
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@@ -680,8 +721,22 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
{
- return div_u64(mul_u32_u32(mode_clock, 1000000U),
- DP_DSC_FEC_OVERHEAD_FACTOR);
+ return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR),
+ 1000000U);
+}
+
+int intel_dp_bw_fec_overhead(bool fec_enabled)
+{
+ /*
+ * TODO: Calculate the actual overhead for a given mode.
+ * The hard-coded 1/0.972261=2.853% overhead factor
+ * corresponds (for instance) to the 8b/10b DP FEC 2.4% +
+ * 0.453% DSC overhead. This is enough for a 3840 width mode,
+ * which has a DSC overhead of up to ~0.2%, but may not be
+ * enough for a 1024 width mode where this is ~0.8% (on a 4
+ * lane DP link, with 2 DSC slices and 8 bpp color depth).
+ */
+ return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000;
}
static int
@@ -1367,15 +1422,16 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
+ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
return false;
}
-static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
- const struct intel_crtc_state *pipe_config)
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config)
{
return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
drm_dp_sink_supports_fec(connector->dp.fec_capability);
@@ -1388,6 +1444,7 @@ static bool intel_dp_supports_dsc(const struct intel_connector *connector,
return false;
return intel_dsc_source_support(crtc_state) &&
+ connector->dp.dsc_decompression_aux &&
drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd);
}
@@ -1721,15 +1778,15 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
}
-static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock,
+static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
u32 lane_count, u32 mode_clock,
enum intel_output_format output_format,
int timeslots)
{
u32 available_bw, required_bw;
- available_bw = (link_clock * lane_count * timeslots) / 8;
- required_bw = compressed_bpp * (intel_dp_mode_to_fec_clock(mode_clock));
+ available_bw = (link_clock * lane_count * timeslots * 16) / 8;
+ required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
return available_bw > required_bw;
}
@@ -1737,7 +1794,7 @@ static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock,
static int dsc_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits,
- u16 compressed_bpp,
+ u16 compressed_bppx16,
int timeslots)
{
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1752,8 +1809,8 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- if (!is_bw_sufficient_for_dsc_config(compressed_bpp, link_rate, lane_count,
- adjusted_mode->clock,
+ if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
+ lane_count, adjusted_mode->clock,
pipe_config->output_format,
timeslots))
continue;
@@ -1795,7 +1852,7 @@ u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connec
return 0;
}
-static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
+int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
{
/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
switch (pipe_config->output_format) {
@@ -1812,9 +1869,9 @@ static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
return 0;
}
-static int dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
- int bpc)
+int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
+ struct intel_crtc_state *pipe_config,
+ int bpc)
{
return intel_dp_dsc_max_sink_compressed_bppx16(connector,
pipe_config, bpc) >> 4;
@@ -1834,7 +1891,7 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
* Max Compressed bpp for Gen 13+ is 27bpp.
* For earlier platform is 23bpp. (Bspec:49259).
*/
- if (DISPLAY_VER(i915) <= 12)
+ if (DISPLAY_VER(i915) < 13)
return 23;
else
return 27;
@@ -1859,17 +1916,19 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp ||
- valid_dsc_bpp[i] > dsc_max_bpp)
+ if (valid_dsc_bpp[i] < dsc_min_bpp)
+ continue;
+ if (valid_dsc_bpp[i] > dsc_max_bpp)
break;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
limits,
- valid_dsc_bpp[i],
+ valid_dsc_bpp[i] << 4,
timeslots);
if (ret == 0) {
- pipe_config->dsc.compressed_bpp = valid_dsc_bpp[i];
+ pipe_config->dsc.compressed_bpp_x16 =
+ to_bpp_x16(valid_dsc_bpp[i]);
return 0;
}
}
@@ -1885,6 +1944,7 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
*/
static int
xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits,
int dsc_max_bpp,
@@ -1892,22 +1952,38 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
- u16 compressed_bpp;
+ u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u16 compressed_bppx16;
+ u8 bppx16_step;
int ret;
- /* Compressed BPP should be less than the Input DSC bpp */
- dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
+ if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
+ bppx16_step = 16;
+ else
+ bppx16_step = 16 / bppx16_incr;
- for (compressed_bpp = dsc_max_bpp;
- compressed_bpp >= dsc_min_bpp;
- compressed_bpp--) {
+ /* Compressed BPP should be less than the Input DSC bpp */
+ dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
+ dsc_min_bpp = dsc_min_bpp << 4;
+
+ for (compressed_bppx16 = dsc_max_bpp;
+ compressed_bppx16 >= dsc_min_bpp;
+ compressed_bppx16 -= bppx16_step) {
+ if (intel_dp->force_dsc_fractional_bpp_en &&
+ !to_bpp_frac(compressed_bppx16))
+ continue;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
limits,
- compressed_bpp,
+ compressed_bppx16,
timeslots);
if (ret == 0) {
- pipe_config->dsc.compressed_bpp = compressed_bpp;
+ pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
+ if (intel_dp->force_dsc_fractional_bpp_en &&
+ to_bpp_frac(compressed_bppx16))
+ drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
+
return 0;
}
}
@@ -1928,12 +2004,14 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
int dsc_joiner_max_bpp;
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config);
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ pipe_config,
+ pipe_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
@@ -1943,7 +2021,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
if (DISPLAY_VER(i915) >= 13)
- return xelpd_dsc_compute_link_config(intel_dp, pipe_config, limits,
+ return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
@@ -2088,19 +2166,22 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
pipe_config->lane_count = limits->max_lane_count;
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config);
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ pipe_config,
+ pipe_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
- pipe_config->dsc.compressed_bpp = max(dsc_min_bpp, dsc_max_bpp);
+ pipe_config->dsc.compressed_bpp_x16 =
+ to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
pipe_config->pipe_bpp = pipe_bpp;
@@ -2122,8 +2203,9 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
&pipe_config->hw.adjusted_mode;
int ret;
- pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
- intel_dp_supports_fec(intel_dp, connector, pipe_config);
+ pipe_config->fec_enable = pipe_config->fec_enable ||
+ (!intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, connector, pipe_config));
if (!intel_dp_supports_dsc(connector, pipe_config))
return -EINVAL;
@@ -2188,18 +2270,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
ret = intel_dp_dsc_compute_params(connector, pipe_config);
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm,
- "Cannot compute valid DSC parameters for Input Bpp = %d "
- "Compressed BPP = %d\n",
+ "Cannot compute valid DSC parameters for Input Bpp = %d"
+ "Compressed BPP = " BPP_X16_FMT "\n",
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
return ret;
}
pipe_config->dsc.compression_enable = true;
drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
- "Compressed Bpp = %d Slice Count = %d\n",
+ "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp,
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
pipe_config->dsc.slice_count);
return 0;
@@ -2314,6 +2396,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2322,6 +2406,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
bool dsc_needed;
int ret = 0;
+ if (pipe_config->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, pipe_config))
+ return -EINVAL;
+
if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_clock))
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
@@ -2369,15 +2457,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
if (pipe_config->dsc.compression_enable) {
drm_dbg_kms(&i915->drm,
- "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
drm_dbg_kms(&i915->drm,
"DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc.compressed_bpp),
+ to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
} else {
@@ -2446,12 +2534,22 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- /*
- * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
- * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
- * Colorimetry Format indication.
- */
- vsc->revision = 0x5;
+ if (crtc_state->has_panel_replay) {
+ /*
+ * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
+ * VSC SDP supporting 3D stereo, Panel Replay, and Pixel
+ * Encoding/Colorimetry Format indication.
+ */
+ vsc->revision = 0x7;
+ } else {
+ /*
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc->revision = 0x5;
+ }
+
vsc->length = 0x13;
/* DP 1.4a spec, Table 2-120 */
@@ -2560,6 +2658,21 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
vsc->revision = 0x4;
vsc->length = 0xe;
}
+ } else if (crtc_state->has_panel_replay) {
+ if (intel_dp->psr.colorimetry_support &&
+ intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+ /* [Panel Replay with colorimetry info] */
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ vsc);
+ } else {
+ /*
+ * [Panel Replay without colorimetry info]
+ * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
+ * VSC SDP supporting 3D stereo + Panel Replay.
+ */
+ vsc->revision = 0x6;
+ vsc->length = 0x10;
+ }
} else {
/*
* [PSR1]
@@ -2636,14 +2749,18 @@ static bool can_enable_drrs(struct intel_connector *connector,
static void
intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
- int link_bpp)
+ int link_bpp_x16)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
- if (has_seamless_m_n(connector))
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that when updating M/N live.
+ */
+ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
@@ -2661,9 +2778,10 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
if (pipe_config->splitter.enable)
pixel_clock /= pipe_config->splitter.link_count;
- intel_link_compute_m_n(link_bpp, pipe_config->lane_count, pixel_clock,
- pipe_config->port_clock, &pipe_config->dp_m2_n2,
- pipe_config->fec_enable);
+ intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock,
+ pipe_config->port_clock,
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m2_n2);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2739,19 +2857,12 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct drm_connector *connector = conn_state->connector;
-
pipe_config->has_audio =
intel_dp_has_audio(encoder, pipe_config, conn_state) &&
intel_audio_compute_config(encoder, pipe_config, conn_state);
pipe_config->sdp_split_enable = pipe_config->has_audio &&
intel_dp_is_uhbr(pipe_config);
-
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n",
- connector->base.id, connector->name,
- str_yes_no(pipe_config->sdp_split_enable));
}
int
@@ -2764,7 +2875,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
struct intel_connector *connector = intel_dp->attached_connector;
- int ret = 0, link_bpp;
+ int ret = 0, link_bpp_x16;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2813,10 +2924,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
drm_dp_enhanced_frame_cap(intel_dp->dpcd);
if (pipe_config->dsc.compression_enable)
- link_bpp = pipe_config->dsc.compressed_bpp;
+ link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
else
- link_bpp = intel_dp_output_bpp(pipe_config->output_format,
- pipe_config->pipe_bpp);
+ link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
+ pipe_config->pipe_bpp));
if (intel_dp->mso_link_count) {
int n = intel_dp->mso_link_count;
@@ -2840,12 +2951,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_link_compute_m_n(link_bpp,
+ intel_link_compute_m_n(link_bpp_x16,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
- &pipe_config->dp_m_n,
- pipe_config->fec_enable);
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m_n);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2856,7 +2967,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
- intel_dp_drrs_compute_config(connector, pipe_config, link_bpp);
+ intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -2924,25 +3035,180 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable)
+static int
+write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int ret;
+ int err;
+ u8 val;
- if (!crtc_state->dsc.compression_enable)
- return;
+ err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val);
+ if (err < 0)
+ return err;
- ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
- enable ? DP_DECOMPRESSION_EN : 0);
- if (ret < 0)
+ if (set)
+ val |= flag;
+ else
+ val &= ~flag;
+
+ return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val);
+}
+
+static void
+intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
+ DP_DECOMPRESSION_EN, enable) < 0)
drm_dbg_kms(&i915->drm,
"Failed to %s sink decompression state\n",
str_enable_disable(enable));
}
static void
+intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_aux *aux = connector->port ?
+ connector->port->passthrough_aux : NULL;
+
+ if (!aux)
+ return;
+
+ if (write_dsc_decompression_flag(aux,
+ DP_DSC_PASSTHROUGH_EN, enable) < 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s sink compression passthrough state\n",
+ str_enable_disable(enable));
+}
+
+static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
+ const struct intel_connector *connector,
+ bool for_get_ref)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct drm_connector *_connector_iter;
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ int ref_count = 0;
+ int i;
+
+ /*
+ * On SST the decompression AUX device won't be shared, each connector
+ * uses for this its own AUX targeting the sink device.
+ */
+ if (!connector->mst_port)
+ return connector->dp.dsc_decompression_enabled ? 1 : 0;
+
+ for_each_oldnew_connector_in_state(&state->base, _connector_iter,
+ old_conn_state, new_conn_state, i) {
+ const struct intel_connector *
+ connector_iter = to_intel_connector(_connector_iter);
+
+ if (connector_iter->mst_port != connector->mst_port)
+ continue;
+
+ if (!connector_iter->dp.dsc_decompression_enabled)
+ continue;
+
+ drm_WARN_ON(&i915->drm,
+ (for_get_ref && !new_conn_state->crtc) ||
+ (!for_get_ref && !old_conn_state->crtc));
+
+ if (connector_iter->dp.dsc_decompression_aux ==
+ connector->dp.dsc_decompression_aux)
+ ref_count++;
+ }
+
+ return ref_count;
+}
+
+static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0;
+
+ connector->dp.dsc_decompression_enabled = true;
+
+ return ret;
+}
+
+static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ connector->dp.dsc_decompression_enabled = false;
+
+ return intel_dp_dsc_aux_ref_count(state, connector, false) == 0;
+}
+
+/**
+ * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device
+ * @state: atomic state
+ * @connector: connector to enable the decompression for
+ * @new_crtc_state: new state for the CRTC driving @connector
+ *
+ * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
+ * register of the appropriate sink/branch device. On SST this is always the
+ * sink device, whereas on MST based on each device's DSC capabilities it's
+ * either the last branch device (enabling decompression in it) or both the
+ * last branch device (enabling passthrough in it) and the sink device
+ * (enabling decompression in it).
+ */
+void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!new_crtc_state->dsc.compression_enable)
+ return;
+
+ if (drm_WARN_ON(&i915->drm,
+ !connector->dp.dsc_decompression_aux ||
+ connector->dp.dsc_decompression_enabled))
+ return;
+
+ if (!intel_dp_dsc_aux_get_ref(state, connector))
+ return;
+
+ intel_dp_sink_set_dsc_passthrough(connector, true);
+ intel_dp_sink_set_dsc_decompression(connector, true);
+}
+
+/**
+ * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device
+ * @state: atomic state
+ * @connector: connector to disable the decompression for
+ * @old_crtc_state: old state for the CRTC driving @connector
+ *
+ * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
+ * register of the appropriate sink/branch device, corresponding to the
+ * sequence in intel_dp_sink_enable_decompression().
+ */
+void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!old_crtc_state->dsc.compression_enable)
+ return;
+
+ if (drm_WARN_ON(&i915->drm,
+ !connector->dp.dsc_decompression_aux ||
+ !connector->dp.dsc_decompression_enabled))
+ return;
+
+ if (!intel_dp_dsc_aux_put_ref(state, connector))
+ return;
+
+ intel_dp_sink_set_dsc_decompression(connector, false);
+ intel_dp_sink_set_dsc_passthrough(connector, false);
+}
+
+static void
intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -3778,7 +4044,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- return i915->params.enable_dp_mst &&
+ return i915->display.params.enable_dp_mst &&
intel_dp_mst_source_support(intel_dp) &&
drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
}
@@ -3796,13 +4062,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
str_yes_no(sink_can_mst),
- str_yes_no(i915->params.enable_dp_mst));
+ str_yes_no(i915->display.params.enable_dp_mst));
if (!intel_dp_mst_source_support(intel_dp))
return;
intel_dp->is_mst = sink_can_mst &&
- i915->params.enable_dp_mst;
+ i915->display.params.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -3872,11 +4138,16 @@ static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
+ if (vsc->revision == 0x6) {
+ sdp->db[0] = 1;
+ sdp->db[3] = 1;
+ }
+
/*
- * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
- * per DP 1.4a spec.
+ * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
+ * Format as per DP 1.4a spec and DP 2.0 respectively.
*/
- if (vsc->revision != 0x5)
+ if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
goto out;
/* VSC SDP Payload for DB16 through DB18 */
@@ -4056,7 +4327,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
- /* TODO: Add DSC case (DIP_ENABLE_PPS) */
+ /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
+ if (!enable && HAS_DSC(dev_priv))
+ val &= ~VDIP_ENABLE_PPS;
+
/* When PSR is enabled, this routine doesn't disable VSC DIP */
if (!crtc_state->has_psr)
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
@@ -5416,6 +5690,7 @@ intel_dp_detect(struct drm_connector *connector,
if (status == connector_status_disconnected) {
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
+ intel_dp->psr.sink_panel_replay_support = false;
if (intel_dp->is_mst) {
drm_dbg_kms(&dev_priv->drm,
@@ -5430,6 +5705,9 @@ intel_dp_detect(struct drm_connector *connector,
goto out;
}
+ if (!intel_dp_is_edp(intel_dp))
+ intel_psr_init_dpcd(intel_dp);
+
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
intel_dp_configure_mst(intel_dp);
@@ -5590,6 +5868,19 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (crtc_state && crtc_state->dsc.compression_enable) {
+ drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+ connector->dp.dsc_decompression_enabled = true;
+ } else {
+ connector->dp.dsc_decompression_enabled = false;
+ }
+}
+
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
@@ -6238,6 +6529,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
@@ -6261,16 +6553,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
- /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
- * 0xd. Failure to do so will result in spurious interrupts being
- * generated on the port when a cable is not attached.
- */
- if (IS_G45(dev_priv)) {
- u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
- intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
- (temp & ~0xf) | 0xd);
- }
-
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 484aea215..375d0677c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -45,6 +45,8 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
int intel_dp_min_bpp(enum intel_output_format output_format);
bool intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count);
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
@@ -57,9 +59,12 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable);
+void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *new_crtc_state);
+void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *old_crtc_state);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder);
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
@@ -78,6 +83,8 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
+int intel_dp_link_symbol_size(int rate);
+int intel_dp_link_symbol_clock(int rate);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
@@ -98,6 +105,8 @@ bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
+ int bw_overhead);
int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
@@ -125,6 +134,10 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
enum intel_output_format output_format,
u32 pipe_bpp,
u32 timeslots);
+int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config);
+int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
+ struct intel_crtc_state *pipe_config,
+ int bpc);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
bool bigjoiner);
@@ -136,7 +149,16 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config);
u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+int intel_dp_bw_fec_overhead(bool fec_enabled);
+
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config);
+
u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
void intel_ddi_update_pipe(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 4431b6290..2e2af71bc 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -74,7 +74,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (index)
return 0;
@@ -83,12 +83,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 freq;
@@ -101,18 +101,18 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->display.cdclk.hw.cdclk;
+ freq = i915->display.cdclk.hw.cdclk;
else
- freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ freq = RUNTIME_INFO(i915)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -165,12 +165,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 aux_clock_divider)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 timeout;
/* Max timeout value on G4x-BDW: 1.6ms */
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(i915))
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -229,8 +228,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
@@ -531,9 +529,40 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
+static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return VLV_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return VLV_DP_AUX_CH_CTL(AUX_CH_B);
+ }
+}
+
+static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return VLV_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return VLV_DP_AUX_CH_DATA(AUX_CH_B, index);
+ }
+}
+
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -550,7 +579,6 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -567,7 +595,6 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -586,7 +613,6 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -605,7 +631,6 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -625,7 +650,6 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -645,7 +669,6 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -668,7 +691,6 @@ static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -691,7 +713,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -702,16 +724,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(i915, aux_ch);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_CTL(dev_priv, AUX_CH_A);
+ return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A);
}
}
static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -722,10 +744,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_DATA(dev_priv, aux_ch, index);
+ return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_DATA(dev_priv, AUX_CH_A, index);
+ return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index);
}
}
@@ -739,49 +761,52 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
char buf[AUX_CH_NAME_BUFSIZE];
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(i915) >= 14) {
intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(i915) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (DISPLAY_VER(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(i915) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(i915)) {
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
} else {
intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
}
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(i915) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(i915))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(i915) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- intel_dp->aux.drm_dev = &dev_priv->drm;
+ intel_dp->aux.drm_dev = &i915->drm;
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
- aux_ch_name(dev_priv, buf, sizeof(buf), aux_ch),
+ aux_ch_name(i915, buf, sizeof(buf), aux_ch),
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 26ea7e9f1..4f58efdc6 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -146,7 +146,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
* HDR static metadata we need to start maintaining table of
* ranges for such panels.
*/
- if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(&i915->drm,
@@ -489,7 +489,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
/* Check the VBT and user's module parameters to figure out which
* interfaces to probe
*/
- switch (i915->params.enable_dpcd_backlight) {
+ switch (i915->display.params.enable_dpcd_backlight) {
case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
case INTEL_DP_AUX_BACKLIGHT_AUTO:
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
index 34f6e0a48..e64244536 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
@@ -21,13 +21,14 @@
#define __xe2lpd_aux_ch_idx(aux_ch) \
(aux_ch >= AUX_CH_USBC1 ? aux_ch : AUX_CH_USBC4 + 1 + (aux_ch) - AUX_CH_A)
-/* TODO: Remove implicit dev_priv */
-#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
-#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
+#define _DPA_AUX_CH_CTL 0x64010
+#define _DPB_AUX_CH_CTL 0x64110
#define _XELPDP_USBC1_AUX_CH_CTL 0x16f210
#define _XELPDP_USBC2_AUX_CH_CTL 0x16f410
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, \
_DPB_AUX_CH_CTL)
+#define VLV_DP_AUX_CH_CTL(aux_ch) _MMIO(VLV_DISPLAY_BASE + \
+ _PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL))
#define _XELPDP_DP_AUX_CH_CTL(aux_ch) \
_MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \
_DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL, \
@@ -69,13 +70,14 @@
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK REG_GENMASK(4, 0) /* skl+ */
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) REG_FIELD_PREP(DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK, (c) - 1)
-/* TODO: Remove implicit dev_priv */
-#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
-#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
+#define _DPA_AUX_CH_DATA1 0x64014
+#define _DPB_AUX_CH_DATA1 0x64114
#define _XELPDP_USBC1_AUX_CH_DATA1 0x16f214
#define _XELPDP_USBC2_AUX_CH_DATA1 0x16f414
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, \
_DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define VLV_DP_AUX_CH_DATA(aux_ch, i) _MMIO(VLV_DISPLAY_BASE + _PORT(aux_ch, _DPA_AUX_CH_DATA1, \
+ _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
#define _XELPDP_DP_AUX_CH_DATA(aux_ch, i) \
_MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \
_DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1, \
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 3a595cd43..8538d1ce2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -330,23 +330,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
0, 0 },
};
-static struct drm_dp_aux *
-intel_dp_hdcp_get_aux(struct intel_connector *connector)
-{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
-
- if (intel_encoder_is_mst(connector->encoder))
- return &connector->port->aux;
- else
- return &dig_port->dp.aux;
-}
-
static int
intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
u8 *rx_status)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
ret = drm_dp_dpcd_read(aux,
@@ -399,7 +389,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
int ret, timeout;
bool msg_ready = false;
@@ -454,8 +446,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
- struct drm_dp_aux *aux;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@@ -463,8 +456,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
offset = hdcp2_msg_data->offset;
- aux = intel_dp_hdcp_get_aux(connector);
-
/* No msg_id in DP HDCP2.2 msgs */
bytes_to_write = size - 1;
byte++;
@@ -490,7 +481,8 @@ static
ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector,
u32 *dev_cnt, u8 *byte)
{
- struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
u8 *rx_info = byte;
@@ -515,8 +507,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_dp_aux *aux;
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -530,8 +523,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
return -EINVAL;
offset = hdcp2_msg_data->offset;
- aux = intel_dp_hdcp_get_aux(connector);
-
ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data);
if (ret < 0)
return ret;
@@ -561,13 +552,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
/* Entire msg read timeout since initiate of msg read */
if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) {
- if (intel_encoder_is_mst(connector->encoder))
- msg_end = ktime_add_ms(ktime_get_raw(),
- hdcp2_msg_data->msg_read_timeout *
- connector->port->parent->num_ports);
- else
- msg_end = ktime_add_ms(ktime_get_raw(),
- hdcp2_msg_data->msg_read_timeout);
+ msg_end = ktime_add_ms(ktime_get_raw(),
+ hdcp2_msg_data->msg_read_timeout);
}
ret = drm_dp_dpcd_read(aux, offset,
@@ -651,12 +637,11 @@ static
int intel_dp_hdcp2_capable(struct intel_connector *connector,
bool *capable)
{
- struct drm_dp_aux *aux;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
u8 rx_caps[3];
int ret;
- aux = intel_dp_hdcp_get_aux(connector);
-
*capable = false;
ret = drm_dp_dpcd_read(aux,
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 03ac28176..2151e916c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
@@ -43,6 +44,9 @@
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
+#include "intel_psr.h"
+#include "intel_vdsc.h"
#include "skl_scaler.h"
static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
@@ -50,7 +54,7 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp
struct intel_crtc_state *crtc_state,
bool dsc)
{
- if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) <= 13 && dsc) {
+ if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) {
int output_bpp = bpp;
/* DisplayPort 2 128b/132b, bits per lane is always 32 */
int symbol_clock = crtc_state->port_clock / 32;
@@ -66,6 +70,73 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp
return 0;
}
+static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
+ const struct intel_connector *connector,
+ bool ssc, bool dsc, int bpp_x16)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
+ int dsc_slice_count = 0;
+ int overhead;
+
+ flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
+ flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
+ flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
+
+ if (dsc) {
+ flags |= DRM_DP_BW_OVERHEAD_DSC;
+ /* TODO: add support for bigjoiner */
+ dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->clock,
+ adjusted_mode->hdisplay,
+ false);
+ }
+
+ overhead = drm_dp_bw_overhead(crtc_state->lane_count,
+ adjusted_mode->hdisplay,
+ dsc_slice_count,
+ bpp_x16,
+ flags);
+
+ /*
+ * TODO: clarify whether a minimum required by the fixed FEC overhead
+ * in the bspec audio programming sequence is required here.
+ */
+ return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
+}
+
+static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_connector *connector,
+ int overhead,
+ int bpp_x16,
+ struct intel_link_m_n *m_n)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
+ intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ overhead,
+ m_n);
+
+ m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
+}
+
+static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
+{
+ int effective_data_rate =
+ intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
+
+ /*
+ * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
+ * to calculate PBN with the BW overhead passed to it.
+ */
+ return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
+}
+
static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int max_bpp,
@@ -94,19 +165,67 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
+ if (dsc) {
+ if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
+ return -EINVAL;
+
+ crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
+ }
+
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
crtc_state->port_clock,
crtc_state->lane_count);
+ drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
+ min_bpp, max_bpp);
+
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
+ int local_bw_overhead;
+ int remote_bw_overhead;
+ int link_bpp_x16;
+ int remote_tu;
+
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
if (ret)
continue;
- crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- bpp << 4);
+ link_bpp_x16 = to_bpp_x16(dsc ? bpp :
+ intel_dp_output_bpp(crtc_state->output_format, bpp));
+
+ local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
+ false, dsc, link_bpp_x16);
+ remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
+ true, dsc, link_bpp_x16);
+
+ intel_dp_mst_compute_m_n(crtc_state, connector,
+ local_bw_overhead,
+ link_bpp_x16,
+ &crtc_state->dp_m_n);
+
+ /*
+ * The TU size programmed to the HW determines which slots in
+ * an MTP frame are used for this stream, which needs to match
+ * the payload size programmed to the first downstream branch
+ * device's payload table.
+ *
+ * Note that atm the payload's PBN value DRM core sends via
+ * the ALLOCATE_PAYLOAD side-band message matches the payload
+ * size (which it calculates from the PBN value) it programs
+ * to the first branch device's payload table. The allocation
+ * in the payload table could be reduced though (to
+ * crtc_state->dp_m_n.tu), provided that the driver doesn't
+ * enable SSC on the corresponding link.
+ */
+ crtc_state->pbn = intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
+ link_bpp_x16,
+ remote_bw_overhead);
+
+ remote_tu = DIV_ROUND_UP(dfixed_const(crtc_state->pbn), mst_state->pbn_div.full);
+
+ drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
+ crtc_state->dp_m_n.tu = remote_tu;
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
connector->port,
@@ -115,13 +234,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
return slots;
if (slots >= 0) {
- ret = drm_dp_mst_atomic_check(state);
- /*
- * If we got slots >= 0 and we can fit those based on check
- * then we can exit the loop. Otherwise keep trying.
- */
- if (!ret)
- break;
+ drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
+
+ break;
}
}
@@ -136,7 +251,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
if (!dsc)
crtc_state->pipe_bpp = bpp;
else
- crtc_state->dsc.compressed_bpp = bpp;
+ crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp);
drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
}
@@ -148,10 +263,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
int slots = -EINVAL;
- int link_bpp;
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
@@ -166,16 +278,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
if (slots < 0)
return slots;
- link_bpp = intel_dp_output_bpp(crtc_state->output_format, crtc_state->pipe_bpp);
-
- intel_link_compute_m_n(link_bpp,
- crtc_state->lane_count,
- adjusted_mode->crtc_clock,
- crtc_state->port_clock,
- &crtc_state->dp_m_n,
- crtc_state->fec_enable);
- crtc_state->dp_m_n.tu = slots;
-
return 0;
}
@@ -187,15 +289,12 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
int slots = -EINVAL;
int i, num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
u8 dsc_max_bpc;
- bool need_timeslot_recalc = false;
- u32 last_compressed_bpp;
+ int min_compressed_bpp, max_compressed_bpp;
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
if (DISPLAY_VER(i915) >= 12)
@@ -231,45 +330,31 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
if (max_bpp > sink_max_bpp)
max_bpp = sink_max_bpp;
- min_bpp = max(min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
- max_bpp = min(max_bpp, to_bpp_int(limits->link.max_bpp_x16));
+ max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ crtc_state,
+ max_bpp / 3);
+ max_compressed_bpp = min(max_compressed_bpp,
+ to_bpp_int(limits->link.max_bpp_x16));
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp,
- min_bpp, limits,
- conn_state, 2 * 3, true);
+ min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
+ min_compressed_bpp = max(min_compressed_bpp,
+ to_bpp_int_roundup(limits->link.min_bpp_x16));
- if (slots < 0)
- return slots;
+ drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
+ min_compressed_bpp, max_compressed_bpp);
- last_compressed_bpp = crtc_state->dsc.compressed_bpp;
+ /* Align compressed bpps according to our own constraints */
+ max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
+ crtc_state->pipe_bpp);
+ min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
+ crtc_state->pipe_bpp);
- crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915,
- last_compressed_bpp,
- crtc_state->pipe_bpp);
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
+ min_compressed_bpp, limits,
+ conn_state, 1, true);
- if (crtc_state->dsc.compressed_bpp != last_compressed_bpp)
- need_timeslot_recalc = true;
-
- /*
- * Apparently some MST hubs dislike if vcpi slots are not matching precisely
- * the actual compressed bpp we use.
- */
- if (need_timeslot_recalc) {
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
- crtc_state->dsc.compressed_bpp,
- crtc_state->dsc.compressed_bpp,
- limits, conn_state, 2 * 3, true);
- if (slots < 0)
- return slots;
- }
-
- intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
- crtc_state->lane_count,
- adjusted_mode->crtc_clock,
- crtc_state->port_clock,
- &crtc_state->dp_m_n,
- crtc_state->fec_enable);
- crtc_state->dp_m_n.tu = slots;
+ if (slots < 0)
+ return slots;
return 0;
}
@@ -297,7 +382,102 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
}
static bool
+intel_dp_mst_dsc_source_support(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * FIXME: Enabling DSC on ICL results in blank screen and FIFO pipe /
+ * transcoder underruns, re-enable DSC after fixing this issue.
+ */
+ return DISPLAY_VER(i915) >= 12 && intel_dsc_source_support(crtc_state);
+}
+
+static int mode_hblank_period_ns(const struct drm_display_mode *mode)
+{
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay,
+ NSEC_PER_SEC / 1000),
+ mode->crtc_clock);
+}
+
+static bool
+hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->dp.dsc_hblank_expansion_quirk)
+ return false;
+
+ if (mode_hblank_period_ns(adjusted_mode) > 300)
+ return false;
+
+ return true;
+}
+
+static bool
+adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state,
+ struct link_config_limits *limits,
+ bool dsc)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int min_bpp_x16 = limits->link.min_bpp_x16;
+
+ if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state))
+ return true;
+
+ if (!dsc) {
+ if (intel_dp_mst_dsc_source_support(crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name);
+ return false;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name);
+
+ if (limits->link.max_bpp_x16 < to_bpp_x16(24))
+ return false;
+
+ limits->link.min_bpp_x16 = to_bpp_x16(24);
+
+ return true;
+ }
+
+ drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
+
+ if (limits->max_rate < 540000)
+ min_bpp_x16 = to_bpp_x16(13);
+ else if (limits->max_rate < 810000)
+ min_bpp_x16 = to_bpp_x16(10);
+
+ if (limits->link.min_bpp_x16 >= min_bpp_x16)
+ return true;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name,
+ BPP_X16_ARGS(min_bpp_x16));
+
+ if (limits->link.max_bpp_x16 < min_bpp_x16)
+ return false;
+
+ limits->link.min_bpp_x16 = min_bpp_x16;
+
+ return true;
+}
+
+static bool
intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
@@ -325,10 +505,16 @@ intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
- return intel_dp_compute_config_link_bpp_limits(intel_dp,
- crtc_state,
- dsc,
- limits);
+ if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
+ crtc_state,
+ dsc,
+ limits))
+ return false;
+
+ return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
+ crtc_state,
+ limits,
+ dsc);
}
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
@@ -338,12 +524,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ const struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
bool dsc_needed;
int ret = 0;
+ if (pipe_config->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, pipe_config))
+ return -EINVAL;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -353,6 +545,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
dsc_needed = intel_dp->force_dsc_en ||
!intel_dp_mst_compute_config_limits(intel_dp,
+ connector,
pipe_config,
false,
&limits);
@@ -374,7 +567,11 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
str_yes_no(ret),
str_yes_no(intel_dp->force_dsc_en));
+ if (!intel_dp_mst_dsc_source_support(pipe_config))
+ return -EINVAL;
+
if (!intel_dp_mst_compute_config_limits(intel_dp,
+ connector,
pipe_config,
true,
&limits))
@@ -417,7 +614,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
+
+ intel_psr_compute_config(intel_dp, pipe_config, conn_state);
return 0;
}
@@ -458,6 +657,130 @@ intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
return transcoders;
}
+static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct drm_dp_mst_port *parent_port)
+{
+ const struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ u8 mask = 0;
+ int i;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ if (!conn_state->base.crtc)
+ continue;
+
+ if (&connector->mst_port->mst_mgr != mst_mgr)
+ continue;
+
+ if (connector->port != parent_port &&
+ !drm_dp_mst_port_downstream_of_parent(mst_mgr,
+ connector->port,
+ parent_port))
+ continue;
+
+ mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe);
+ }
+
+ return mask;
+}
+
+static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ u8 mst_pipe_mask;
+ u8 fec_pipe_mask = 0;
+ int ret;
+
+ mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /* Atomic connector check should've added all the MST CRTCs. */
+ if (drm_WARN_ON(&i915->drm, !crtc_state))
+ return -EINVAL;
+
+ if (crtc_state->fec_enable)
+ fec_pipe_mask |= BIT(crtc->pipe);
+ }
+
+ if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
+ return 0;
+
+ limits->force_fec_pipes |= mst_pipe_mask;
+
+ ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
+ mst_pipe_mask);
+
+ return ret ? : -EAGAIN;
+}
+
+static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_dp_mst_port *mst_port;
+ u8 mst_port_pipes;
+ int ret;
+
+ ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port);
+ if (ret != -ENOSPC)
+ return ret;
+
+ mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port);
+
+ ret = intel_link_bw_reduce_bpp(state, limits,
+ mst_port_pipes, "MST link BW");
+
+ return ret ? : -EAGAIN;
+}
+
+/**
+ * intel_dp_mst_atomic_check_link - check all modeset MST link configuration
+ * @state: intel atomic state
+ * @limits: link BW limits
+ *
+ * Check the link configuration for all modeset MST outputs. If the
+ * configuration is invalid @limits will be updated if possible to
+ * reduce the total BW, after which the configuration for all CRTCs in
+ * @state must be recomputed with the updated @limits.
+ *
+ * Returns:
+ * - 0 if the confugration is valid
+ * - %-EAGAIN, if the configuration is invalid and @limits got updated
+ * with fallback values with which the configuration of all CRTCs in
+ * @state must be recomputed
+ * - Other negative error, if the configuration is invalid without a
+ * fallback possibility, or the check failed for another reason
+ */
+int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ int ret;
+ int i;
+
+ for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
+ ret = intel_dp_mst_check_fec_change(state, mgr, limits);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_check_bw(state, mgr, mst_state,
+ limits);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -478,19 +801,23 @@ static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
* that shares the same MST stream as mode changed,
* intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
* a fastset when possible.
+ *
+ * On TGL+ this is required since each stream go through a master transcoder,
+ * so if the master transcoder needs modeset, all other streams in the
+ * topology need a modeset. All platforms need to add the atomic state
+ * for all streams in the topology, since a modeset on one may require
+ * changing the MST link BW usage of the others, which in turn needs a
+ * recomputation of the corresponding CRTC states.
*/
static int
-intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
- struct intel_atomic_state *state)
+intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
int ret = 0;
- if (DISPLAY_VER(dev_priv) < 12)
- return 0;
-
if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
@@ -544,7 +871,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
+ ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
if (ret)
return ret;
@@ -586,10 +913,6 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_dp_mst_topology_state *new_mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
- struct drm_dp_mst_atomic_payload *new_payload =
- drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "active links %d\n",
@@ -597,9 +920,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector);
- drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
-
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+ intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
}
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
@@ -633,6 +954,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
+ drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
+
clear_act_sent(encoder, old_crtc_state);
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
@@ -645,6 +968,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
+
if (DISPLAY_VER(dev_priv) >= 9)
skl_scaler_disable(old_crtc_state);
else
@@ -661,9 +986,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* BSpec 4287: disable DIP after the transcoder is disabled and before
* the transcoder clock select is set to none.
*/
- if (last_mst_stream)
- intel_dp_set_infoframes(&dig_port->base, false,
- old_crtc_state, NULL);
+ intel_dp_set_infoframes(&dig_port->base, false,
+ old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
@@ -753,6 +1077,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+ intel_dp_sink_enable_decompression(state, connector, pipe_config);
+
if (first_mst_stream)
dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
@@ -775,6 +1101,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
+ intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
@@ -791,11 +1118,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
+ bool first_mst_stream = intel_dp->active_mst_links == 1;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
- clear_act_sent(encoder, pipe_config);
-
if (intel_dp_is_uhbr(pipe_config)) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
@@ -809,6 +1135,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ clear_act_sent(encoder, pipe_config);
+
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
@@ -817,15 +1145,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
wait_for_act_sent(encoder, pipe_config);
+ if (first_mst_stream)
+ intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
+
drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
- if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
- FECSTALL_DIS_DPTSTREAM_DPTTG);
- else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
- FECSTALL_DIS_DPTSTREAM_DPTTG);
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
+ FECSTALL_DIS_DPTSTREAM_DPTTG,
+ pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
intel_audio_sdp_split_update(pipe_config);
@@ -833,12 +1162,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_crtc_vblank_on(pipe_config);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
-
- /* Enable hdcp if it's desired */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(state, encoder, pipe_config, conn_state);
+ intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -977,6 +1301,18 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (ret)
return ret;
+ /*
+ * TODO:
+ * - Also check if compression would allow for the mode
+ * - Calculate the overhead using drm_dp_bw_overhead() /
+ * drm_dp_bw_channel_coding_efficiency(), similarly to the
+ * compute config code, as drm_dp_calc_pbn_mode() doesn't
+ * account with all the overheads.
+ * - Check here and during compute config the BW reported by
+ * DFP_Link_Available_Payload_Bandwidth_Number (or the
+ * corresponding link capabilities of the sink) in case the
+ * stream is uncompressed for it by the last branch device.
+ */
if (mode_rate > max_rate || mode->clock > max_dotclk ||
drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
@@ -1002,7 +1338,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- if (DISPLAY_VER(dev_priv) >= 10 &&
+ if (HAS_DSC_MST(dev_priv) &&
drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
/*
* TBD pass the connector BPC,
@@ -1150,6 +1486,36 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
}
+static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_desc desc;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ if (!connector->dp.dsc_decompression_aux)
+ return false;
+
+ if (drm_dp_read_desc(connector->dp.dsc_decompression_aux,
+ &desc, true) < 0)
+ return false;
+
+ if (!drm_dp_has_quirk(&desc,
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
+ return false;
+
+ if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0)
+ return false;
+
+ if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
+ return false;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
+ connector->base.base.id, connector->base.name);
+
+ return true;
+}
+
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
const char *pathprop)
@@ -1168,17 +1534,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
return NULL;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
drm_dp_mst_get_port_malloc(port);
- /*
- * TODO: set the AUX for the actual MST port decompressing the stream.
- * At the moment the driver only supports enabling this globally in the
- * first downstream MST branch, via intel_dp's (root port) AUX.
- */
- intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
+ intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
+ intel_connector->dp.dsc_hblank_expansion_quirk =
+ detect_dsc_hblank_expansion_quirk(intel_connector);
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
@@ -1271,6 +1635,8 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
+ intel_encoder->audio_enable = intel_audio_codec_enable;
+ intel_encoder->audio_disable = intel_audio_codec_disable;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
intel_encoder->get_config = intel_dp_mst_enc_get_config;
intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
@@ -1418,3 +1784,91 @@ int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
return 0;
}
+
+static struct intel_connector *
+get_connector_in_state_for_crtc(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *_connector;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, _connector,
+ old_conn_state, new_conn_state, i) {
+ struct intel_connector *connector =
+ to_intel_connector(_connector);
+
+ if (old_conn_state->crtc == &crtc->base ||
+ new_conn_state->crtc == &crtc->base)
+ return connector;
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC
+ * @state: atomic state
+ * @crtc: CRTC for which to check the modeset requirement
+ *
+ * Check if any change in a MST topology requires a forced modeset on @crtc in
+ * this topology. One such change is enabling/disabling the DSC decompression
+ * state in the first branch device's UFP DPCD as required by one CRTC, while
+ * the other @crtc in the same topology is still active, requiring a full modeset
+ * on @crtc.
+ */
+bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_connector *crtc_connector;
+ const struct drm_connector_state *conn_state;
+ const struct drm_connector *_connector;
+ int i;
+
+ if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
+ INTEL_OUTPUT_DP_MST))
+ return false;
+
+ crtc_connector = get_connector_in_state_for_crtc(state, crtc);
+
+ if (!crtc_connector)
+ /* None of the connectors in the topology needs modeset */
+ return false;
+
+ for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
+ const struct intel_connector *connector =
+ to_intel_connector(_connector);
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc_iter;
+
+ if (connector->mst_port != crtc_connector->mst_port ||
+ !conn_state->crtc)
+ continue;
+
+ crtc_iter = to_intel_crtc(conn_state->crtc);
+
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter);
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter);
+
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (old_crtc_state->dsc.compression_enable ==
+ new_crtc_state->dsc.compression_enable)
+ continue;
+ /*
+ * Toggling the decompression flag because of this stream in
+ * the first downstream branch device's UFP DPCD may reset the
+ * whole branch device. To avoid the reset while other streams
+ * are also active modeset the whole MST topology in this
+ * case.
+ */
+ if (connector->dp.dsc_decompression_aux ==
+ &connector->mst_port->aux)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f1815bb72..8ca1d5990 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -13,6 +13,7 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
+struct intel_link_bw_limits;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
@@ -22,5 +23,9 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits);
+bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 62b93d097..4ca910874 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -666,6 +666,20 @@ enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
}
}
+enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
+{
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
+ case PIPE_A:
+ case PIPE_B:
+ return DPIO_PHY0;
+ case PIPE_C:
+ return DPIO_PHY1;
+ }
+}
+
enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
{
switch (pipe) {
@@ -689,50 +703,50 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 val;
int i;
vlv_dpio_get(dev_priv);
/* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
}
/* Program swing deemph */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
}
/* Program swing margin */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
@@ -745,7 +759,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
}
/*
@@ -755,23 +769,23 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
* 27 for ch0 and ch1.
*/
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
if (uniq_trans_scale)
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
}
vlv_dpio_put(dev_priv);
@@ -782,43 +796,43 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 val;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
}
}
@@ -829,6 +843,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -851,40 +866,40 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
}
/*
@@ -892,12 +907,12 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
vlv_dpio_put(dev_priv);
}
@@ -910,21 +925,21 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
int data, i, stagger;
u32 val;
vlv_dpio_get(dev_priv);
/* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
@@ -934,7 +949,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
@@ -950,17 +965,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
else
stagger = 0x2;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
}
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -968,7 +983,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
DPIO_TX2_STAGGER_MULT(0));
if (crtc_state->lane_count > 2) {
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -998,19 +1013,20 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
}
vlv_dpio_put(dev_priv);
@@ -1036,22 +1052,22 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+ vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
vlv_dpio_put(dev_priv);
}
@@ -1063,24 +1079,24 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
/* Program Tx lane resets to default */
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW14(port), 0x40400000);
vlv_dpio_put(dev_priv);
}
@@ -1094,23 +1110,24 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* Enable clock channels for this port */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888);
vlv_dpio_put(dev_priv);
}
@@ -1122,10 +1139,10 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 4d43dbbdf..9adc4e8c1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -44,6 +44,7 @@ u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port);
enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port);
+enum dpio_phy vlv_pipe_to_phy(enum pipe pipe);
enum dpio_channel vlv_pipe_to_channel(enum pipe pipe);
void chv_set_phy_signal_level(struct intel_encoder *encoder,
@@ -116,6 +117,10 @@ static inline enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_p
{
return DPIO_PHY0;
}
+static inline enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
+{
+ return DPIO_PHY0;
+}
static inline enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
{
return DPIO_CH0;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index d41c1dc9f..303865537 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -16,6 +16,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_snps_phy.h"
@@ -311,7 +312,7 @@ static const struct intel_limit intel_limits_bxt = {
* divided-down version of it.
*/
/* m1 is reserved as 0 in Pineview, n is a ring counter */
-int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -342,7 +343,7 @@ int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2 * 5;
@@ -368,6 +369,176 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
+static int i9xx_pll_refclk(struct drm_device *dev,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+ if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return dev_priv->display.vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ return 120000;
+ else if (DISPLAY_VER(dev_priv) != 2)
+ return 96000;
+ else
+ return 48000;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+ u32 fp;
+ struct dpll clock;
+ int port_clock;
+ int refclk = i9xx_pll_refclk(dev, pipe_config);
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = pipe_config->dpll_hw_state.fp0;
+ else
+ fp = pipe_config->dpll_hw_state.fp1;
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ if (IS_PINEVIEW(dev_priv)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
+ if (DISPLAY_VER(dev_priv) != 2) {
+ if (IS_PINEVIEW(dev_priv))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+ case DPLLB_MODE_DAC_SERIAL:
+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+ 5 : 10;
+ break;
+ case DPLLB_MODE_LVDS:
+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+ 7 : 14;
+ break;
+ default:
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ return;
+ }
+
+ if (IS_PINEVIEW(dev_priv))
+ port_clock = pnv_calc_dpll_params(refclk, &clock);
+ else
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
+ } else {
+ enum pipe lvds_pipe;
+
+ if (IS_I85X(dev_priv) &&
+ intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ lvds_pipe == crtc->pipe) {
+ u32 lvds = intel_de_read(dev_priv, LVDS);
+
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ if (lvds & LVDS_CLKB_POWER_UP)
+ clock.p2 = 7;
+ else
+ clock.p2 = 14;
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+ }
+
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
+ }
+
+ /*
+ * This value includes pixel_multiplier. We will use
+ * port_clock to compute adjusted_mode.crtc_clock in the
+ * encoder's get_config() function.
+ */
+ pipe_config->port_clock = port_clock;
+}
+
+void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct dpll clock;
+ u32 mdiv;
+ int refclk = 100000;
+
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+ mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe));
+ vlv_dpio_put(dev_priv);
+
+ clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+ clock.m2 = mdiv & DPIO_M2DIV_MASK;
+ clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+ clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+ clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+ pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+}
+
+void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct dpll clock;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
+ int refclk = 100000;
+
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+ cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port));
+ pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port));
+ pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port));
+ pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port));
+ pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
+ vlv_dpio_put(dev_priv);
+
+ clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = (pll_dw0 & 0xff) << 22;
+ if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+ clock.m2 |= pll_dw2 & 0x3fffff;
+ clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+ clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+ clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+ pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+}
+
/*
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
@@ -1003,12 +1174,10 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
int ret;
ret = intel_cx0pll_calc_state(crtc_state, encoder);
@@ -1016,10 +1185,7 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
/* TODO: Do the readback via intel_compute_shared_dplls() */
- if (intel_is_c10phy(i915, phy))
- crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
- else
- crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
@@ -1645,7 +1811,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
}
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+ enum dpio_phy phy)
{
u32 reg_val;
@@ -1653,30 +1819,31 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0x8c000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -1694,18 +1861,18 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, pipe);
+ vlv_pllb_recal_opamp(dev_priv, phy);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe));
reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+ vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -1719,46 +1886,46 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
0x009f0003);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df40000);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df70000);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+ coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000);
vlv_dpio_put(dev_priv);
}
@@ -1809,6 +1976,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 loopfilter, tribuf_calcntr;
u32 bestm2, bestp1, bestp2, bestm2_frac;
u32 dpio_val;
@@ -1825,39 +1993,39 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
vlv_dpio_get(dev_priv);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port),
5 << DPIO_CHV_S1_DIV_SHIFT |
bestp1 << DPIO_CHV_P1_DIV_SHIFT |
bestp2 << DPIO_CHV_P2_DIV_SHIFT |
1 << DPIO_CHV_K_DIV_SHIFT);
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2);
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port),
DPIO_CHV_M1_DIV_BY_2 |
1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
if (bestm2_frac)
dpio_val |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val);
/* Program digital lock detect threshold */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port));
dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
if (!bestm2_frac)
dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val);
/* Loop filter */
if (vco == 5400000) {
@@ -1882,16 +2050,16 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter);
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port));
dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val);
/* AFC Recal */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
- vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) |
DPIO_AFC_RECAL);
vlv_dpio_put(dev_priv);
@@ -1903,14 +2071,15 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 tmp;
vlv_dpio_get(dev_priv);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp);
vlv_dpio_put(dev_priv);
@@ -2031,6 +2200,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
/* Make sure the pipe isn't still relying on us */
@@ -2047,9 +2217,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_dpio_get(dev_priv);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index bbc30542f..ac01bb19c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -20,8 +20,6 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int vlv_calc_dpll_params(int refclk, struct dpll *clock);
-int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
@@ -41,6 +39,13 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index fb694ff9d..87deb135c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -219,6 +219,26 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915,
return MG_PLL_ENABLE(tc_port);
}
+static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (pll->info->power_domain)
+ pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
+
+ pll->info->funcs->enable(i915, pll);
+ pll->on = true;
+}
+
+static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+
+ if (pll->info->power_domain)
+ intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
+}
+
/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
@@ -258,8 +278,8 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
drm_WARN_ON(&i915->drm, pll->on);
drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
- pll->info->funcs->enable(i915, pll);
- pll->on = true;
+
+ _intel_enable_shared_dpll(i915, pll);
out:
mutex_unlock(&i915->display.dpll.lock);
@@ -304,8 +324,8 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
goto out;
drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
- pll->info->funcs->disable(i915, pll);
- pll->on = false;
+
+ _intel_disable_shared_dpll(i915, pll);
out:
mutex_unlock(&i915->display.dpll.lock);
@@ -3844,18 +3864,6 @@ static void combo_pll_enable(struct drm_i915_private *i915,
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
-
- /*
- * We need to disable DC states when this DPLL is enabled.
- * This can be done by taking a reference on DPLL4 power
- * domain.
- */
- pll->wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_DC_OFF);
- }
-
icl_pll_power_enable(i915, pll, enable_reg);
icl_dpll_write(i915, pll);
@@ -3951,11 +3959,6 @@ static void combo_pll_disable(struct drm_i915_private *i915,
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
icl_pll_disable(i915, pll, enable_reg);
-
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->info->id == DPLL_ID_EHL_DPLL4)
- intel_display_power_put(i915, POWER_DOMAIN_DC_OFF,
- pll->wakeref);
}
static void tbt_pll_disable(struct drm_i915_private *i915,
@@ -4048,7 +4051,8 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
static const struct dpll_info ehl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
- { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
+ { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
+ .power_domain = POWER_DOMAIN_DC_OFF, },
{}
};
@@ -4378,12 +4382,8 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->on &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- pll->wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_DC_OFF);
- }
+ if (pll->on && pll->info->power_domain)
+ pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
pll->state.pipe_mask = 0;
for_each_intel_crtc(&i915->drm, crtc) {
@@ -4430,8 +4430,7 @@ static void sanitize_dpll_state(struct drm_i915_private *i915,
"%s enabled but not in use, disabling\n",
pll->info->name);
- pll->info->funcs->disable(i915, pll);
- pll->on = false;
+ _intel_disable_shared_dpll(i915, pll);
}
void intel_dpll_sanitize_state(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 1a88ccc98..5cdec77cf 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
+#include "intel_display_power.h"
#include "intel_wakeref.h"
#define for_each_shared_dpll(__i915, __pll, __i) \
@@ -270,6 +271,11 @@ struct dpll_info {
*/
enum intel_dpll_id id;
+ /**
+ * @power_domain: extra power domain required by the DPLL
+ */
+ enum intel_display_power_domain power_domain;
+
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
#define INTEL_DPLL_IS_ALT_PORT_DPLL (1 << 1)
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 48582b31b..b29bceff7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,8 +9,6 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
-#include "i915_reg.h"
-#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -318,25 +316,3 @@ void intel_dpt_destroy(struct i915_address_space *vm)
i915_vm_put(&dpt->vm);
}
-void intel_dpt_configure(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) == 14) {
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
- continue;
-
- intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
- PLANE_CHICKEN_DISABLE_DPT,
- i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT);
- }
- } else if (DISPLAY_VER(i915) == 13) {
- intel_de_rmw(i915, CHICKEN_MISC_2,
- CHICKEN_MISC_DISABLE_DPT,
- i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT);
- }
-}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index d9a166550..e18a9f767 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -10,7 +10,6 @@ struct drm_i915_private;
struct i915_address_space;
struct i915_vma;
-struct intel_crtc;
struct intel_framebuffer;
void intel_dpt_destroy(struct i915_address_space *vm);
@@ -20,6 +19,5 @@ void intel_dpt_suspend(struct drm_i915_private *i915);
void intel_dpt_resume(struct drm_i915_private *i915);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
-void intel_dpt_configure(struct intel_crtc *crtc);
#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
new file mode 100644
index 000000000..cdba47165
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dpt_common.h"
+
+void intel_dpt_configure(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) == 14) {
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
+ PLANE_CHICKEN_DISABLE_DPT,
+ i915->display.params.enable_dpt ? 0 :
+ PLANE_CHICKEN_DISABLE_DPT);
+ }
+ } else if (DISPLAY_VER(i915) == 13) {
+ intel_de_rmw(i915, CHICKEN_MISC_2,
+ CHICKEN_MISC_DISABLE_DPT,
+ i915->display.params.enable_dpt ? 0 :
+ CHICKEN_MISC_DISABLE_DPT);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.h b/drivers/gpu/drm/i915/display/intel_dpt_common.h
new file mode 100644
index 000000000..6d7de4051
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DPT_COMMON_H__
+#define __INTEL_DPT_COMMON_H__
+
+struct intel_crtc;
+
+void intel_dpt_configure(struct intel_crtc *crtc);
+
+#endif /* __INTEL_DPT_COMMON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 15c3dd5d3..54d9abeb2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,9 +4,6 @@
*
*/
-#include "gem/i915_gem_internal.h"
-#include "gem/i915_gem_lmem.h"
-
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -14,12 +11,13 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
+#include "intel_dsb_buffer.h"
#include "intel_dsb_regs.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_watermark.h"
-struct i915_vma;
+#define CACHELINE_BYTES 64
enum dsb_id {
INVALID_DSB = -1,
@@ -32,8 +30,7 @@ enum dsb_id {
struct intel_dsb {
enum dsb_id id;
- u32 *cmd_buf;
- struct i915_vma *vma;
+ struct intel_dsb_buffer dsb_buf;
struct intel_crtc *crtc;
/*
@@ -109,15 +106,17 @@ static void intel_dsb_dump(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const u32 *buf = dsb->cmd_buf;
int i;
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] DSB %d commands {\n",
crtc->base.base.id, crtc->base.name, dsb->id);
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
drm_dbg_kms(&i915->drm,
- " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i * 4, buf[i], buf[i+1], buf[i+2], buf[i+3]);
+ " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
+ intel_dsb_buffer_read(&dsb->dsb_buf, i),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
drm_dbg_kms(&i915->drm, "}\n");
}
@@ -129,8 +128,6 @@ static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
- u32 *buf = dsb->cmd_buf;
-
if (!assert_dsb_has_room(dsb))
return;
@@ -139,14 +136,13 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
dsb->ins_start_offset = dsb->free_pos;
- buf[dsb->free_pos++] = ldw;
- buf[dsb->free_pos++] = udw;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw);
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw);
}
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
u32 opcode, i915_reg_t reg)
{
- const u32 *buf = dsb->cmd_buf;
u32 prev_opcode, prev_reg;
/*
@@ -157,8 +153,10 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
if (dsb->free_pos == 0)
return false;
- prev_opcode = buf[dsb->ins_start_offset + 1] & ~DSB_REG_VALUE_MASK;
- prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+ prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK;
+ prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK;
return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
}
@@ -191,6 +189,8 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val)
{
+ u32 old_val;
+
/*
* For example the buffer will look like below for 3 dwords for auto
* increment register:
@@ -214,31 +214,32 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
(DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
i915_mmio_reg_offset(reg));
} else {
- u32 *buf = dsb->cmd_buf;
-
if (!assert_dsb_has_room(dsb))
return;
/* convert to indexed write? */
if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
- u32 prev_val = buf[dsb->ins_start_offset + 0];
+ u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 0);
- buf[dsb->ins_start_offset + 0] = 1; /* count */
- buf[dsb->ins_start_offset + 1] =
- (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg);
- buf[dsb->ins_start_offset + 2] = prev_val;
+ intel_dsb_buffer_write(&dsb->dsb_buf,
+ dsb->ins_start_offset + 0, 1); /* count */
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg));
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val);
dsb->free_pos++;
}
- buf[dsb->free_pos++] = val;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
/* Update the count */
- buf[dsb->ins_start_offset]++;
+ old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset);
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1);
/* if number of data words is odd, then the last dword should be 0.*/
if (dsb->free_pos & 0x1)
- buf[dsb->free_pos] = 0;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
}
}
@@ -297,8 +298,8 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
if (aligned_tail > tail)
- memset(&dsb->cmd_buf[dsb->free_pos], 0,
- aligned_tail - tail);
+ intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
+ aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
}
@@ -317,7 +318,7 @@ void intel_dsb_finish(struct intel_dsb *dsb)
intel_dsb_align_tail(dsb);
- i915_gem_object_flush_map(dsb->vma->obj);
+ intel_dsb_buffer_flush_map(&dsb->dsb_buf);
}
static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
@@ -375,7 +376,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
dsb_chicken(crtc));
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
- i915_ggtt_offset(dsb->vma));
+ intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
if (dewake_scanline >= 0) {
int diff, hw_dewake_scanline;
@@ -397,7 +398,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
}
intel_de_write_fw(dev_priv, DSB_TAIL(pipe, dsb->id),
- i915_ggtt_offset(dsb->vma) + tail);
+ intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
}
/**
@@ -422,7 +423,7 @@ void intel_dsb_wait(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
- u32 offset = i915_ggtt_offset(dsb->vma);
+ u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
DSB_ENABLE | DSB_HALT);
@@ -459,12 +460,9 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
struct intel_dsb *dsb;
- struct i915_vma *vma;
unsigned int size;
- u32 *buf;
if (!HAS_DSB(i915))
return NULL;
@@ -478,37 +476,13 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
- if (HAS_LMEM(i915)) {
- obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
- I915_BO_ALLOC_CONTIGUOUS);
- if (IS_ERR(obj))
- goto out_put_rpm;
- } else {
- obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
- if (IS_ERR(obj))
- goto out_put_rpm;
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- }
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- goto out_put_rpm;
- }
-
- buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
- if (IS_ERR(buf)) {
- i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
goto out_put_rpm;
- }
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
dsb->id = DSB1;
- dsb->vma = vma;
dsb->crtc = crtc;
- dsb->cmd_buf = buf;
dsb->size = size / 4; /* in dwords */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
@@ -536,6 +510,6 @@ out:
*/
void intel_dsb_cleanup(struct intel_dsb *dsb)
{
- i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ intel_dsb_buffer_cleanup(&dsb->dsb_buf);
kfree(dsb);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.c b/drivers/gpu/drm/i915/display/intel_dsb_buffer.c
new file mode 100644
index 000000000..c77d48bda
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb_buffer.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023, Intel Corporation.
+ */
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_display_types.h"
+#include "intel_dsb_buffer.h"
+
+u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+{
+ return i915_ggtt_offset(dsb_buf->vma);
+}
+
+void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+{
+ dsb_buf->cmd_buf[idx] = val;
+}
+
+u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+{
+ return dsb_buf->cmd_buf[idx];
+}
+
+void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+{
+ WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
+
+ memset(&dsb_buf->cmd_buf[idx], val, size);
+}
+
+bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *buf;
+
+ if (HAS_LMEM(i915)) {
+ obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return false;
+ } else {
+ obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return false;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return false;
+ }
+
+ buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ return false;
+ }
+
+ dsb_buf->vma = vma;
+ dsb_buf->cmd_buf = buf;
+ dsb_buf->buf_size = size;
+
+ return true;
+}
+
+void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+{
+ i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
+}
+
+void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+{
+ i915_gem_object_flush_map(dsb_buf->vma->obj);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.h b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
new file mode 100644
index 000000000..425acd393
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_DSB_BUFFER_H
+#define _INTEL_DSB_BUFFER_H
+
+#include <linux/types.h>
+
+struct intel_crtc;
+struct i915_vma;
+
+struct intel_dsb_buffer {
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+ size_t buf_size;
+};
+
+u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf);
+void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
+u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx);
+void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
+bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf,
+ size_t size);
+void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf);
+void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 24b2cbcfc..a5d7fc841 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -55,43 +55,6 @@
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
-/* base offsets for gpio pads */
-#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
-#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
-#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
-#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
-#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
-#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
-#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
-#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
-#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
-#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
-#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
-#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
-
-#define VLV_GPIO_PCONF0(base_offset) (base_offset)
-#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
-
-struct gpio_map {
- u16 base_offset;
- bool init;
-};
-
-static struct gpio_map vlv_gpio_table[] = {
- { VLV_GPIO_NC_0_HV_DDI0_HPD },
- { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
- { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
- { VLV_GPIO_NC_3_PANEL0_VDDEN },
- { VLV_GPIO_NC_4_PANEL0_BKLTEN },
- { VLV_GPIO_NC_5_PANEL0_BKLTCTL },
- { VLV_GPIO_NC_6_HV_DDI1_HPD },
- { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
- { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
- { VLV_GPIO_NC_9_PANEL1_VDDEN },
- { VLV_GPIO_NC_10_PANEL1_BKLTEN },
- { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
-};
-
struct i2c_adapter_lookup {
u16 slave_addr;
struct intel_dsi *intel_dsi;
@@ -103,19 +66,6 @@ struct i2c_adapter_lookup {
#define CHV_GPIO_IDX_START_SW 100
#define CHV_GPIO_IDX_START_SE 198
-#define CHV_VBT_MAX_PINS_PER_FMLY 15
-
-#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
-#define CHV_GPIO_GPIOEN (1 << 15)
-#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
-#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
-#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
-#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
-#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
-
-#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
-#define CHV_GPIO_CFGLOCK (1 << 31)
-
/* ICL DSI Display GPIO Pins */
#define ICL_GPIO_DDSP_HPD_A 0
#define ICL_GPIO_L_VDDEN_1 1
@@ -142,7 +92,7 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
if (seq_port) {
if (intel_dsi->ports & BIT(PORT_B))
return PORT_B;
- else if (intel_dsi->ports & BIT(PORT_C))
+ if (intel_dsi->ports & BIT(PORT_C))
return PORT_C;
}
@@ -243,75 +193,93 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static void vlv_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
+ const char *con_id, u8 idx, bool value)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct gpio_map *map;
- u16 pconf0, padval;
- u32 tmp;
- u8 port;
-
- if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
- drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
- gpio_index);
- return;
+ /* XXX: this table is a quick ugly hack. */
+ static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
+ struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
+
+ if (gpio_desc) {
+ gpiod_set_value(gpio_desc, value);
+ } else {
+ gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
+ value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
+ if (IS_ERR(gpio_desc)) {
+ drm_err(&dev_priv->drm,
+ "GPIO index %u request failed (%pe)\n",
+ gpio_index, gpio_desc);
+ return;
+ }
+
+ soc_gpio_table[gpio_index] = gpio_desc;
}
+}
- map = &vlv_gpio_table[gpio_index];
+static void soc_opaque_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_index, const char *chip,
+ const char *con_id, u8 idx, bool value)
+{
+ struct gpiod_lookup_table *lookup;
- if (connector->panel.vbt.dsi.seq_version >= 3) {
- /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
- port = IOSF_PORT_GPIO_NC;
- } else {
- if (gpio_source == 0) {
- port = IOSF_PORT_GPIO_NC;
- } else if (gpio_source == 1) {
+ lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
+ if (!lookup)
+ return;
+
+ lookup->dev_id = "0000:00:02.0";
+ lookup->table[0] =
+ GPIO_LOOKUP_IDX(chip, idx, con_id, idx, GPIO_ACTIVE_HIGH);
+
+ gpiod_add_lookup_table(lookup);
+
+ soc_gpio_set_value(connector, gpio_index, con_id, idx, value);
+
+ gpiod_remove_lookup_table(lookup);
+ kfree(lookup);
+}
+
+static void vlv_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+ if (connector->panel.vbt.dsi.seq_version < 3) {
+ if (gpio_source == 1) {
drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
return;
- } else {
+ }
+ if (gpio_source > 1) {
drm_dbg_kms(&dev_priv->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
}
- pconf0 = VLV_GPIO_PCONF0(map->base_offset);
- padval = VLV_GPIO_PAD_VAL(map->base_offset);
-
- vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
- if (!map->init) {
- /* FIXME: remove constant below */
- vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
- map->init = true;
- }
-
- tmp = 0x4 | value;
- vlv_iosf_sb_write(dev_priv, port, padval, tmp);
- vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+ soc_opaque_gpio_set_value(connector, gpio_index,
+ "INT33FC:01", "Panel N", gpio_index, value);
}
-static void chv_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void chv_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u16 cfg0, cfg1;
- u16 family_num;
- u8 port;
if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
/* XXX: it's unclear whether 255->57 is part of SE. */
- gpio_index -= CHV_GPIO_IDX_START_SE;
- port = CHV_IOSF_PORT_GPIO_SE;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:03", "Panel SE",
+ gpio_index - CHV_GPIO_IDX_START_SE, value);
} else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
- gpio_index -= CHV_GPIO_IDX_START_SW;
- port = CHV_IOSF_PORT_GPIO_SW;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:00", "Panel SW",
+ gpio_index - CHV_GPIO_IDX_START_SW, value);
} else if (gpio_index >= CHV_GPIO_IDX_START_E) {
- gpio_index -= CHV_GPIO_IDX_START_E;
- port = CHV_IOSF_PORT_GPIO_E;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:02", "Panel E",
+ gpio_index - CHV_GPIO_IDX_START_E, value);
} else {
- port = CHV_IOSF_PORT_GPIO_N;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N",
+ gpio_index - CHV_GPIO_IDX_START_N, value);
}
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
@@ -328,56 +296,15 @@ static void chv_exec_gpio(struct intel_connector *connector,
return;
}
- port = CHV_IOSF_PORT_GPIO_N;
- }
-
- family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
- gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
-
- cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
- cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
-
- vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
- vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
- vlv_iosf_sb_write(dev_priv, port, cfg0,
- CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
- CHV_GPIO_GPIOTXSTATE(value));
- vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
-}
-
-static void bxt_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- /* XXX: this table is a quick ugly hack. */
- static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
- struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
-
- if (!gpio_desc) {
- gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
- NULL, gpio_index,
- value ? GPIOD_OUT_LOW :
- GPIOD_OUT_HIGH);
-
- if (IS_ERR_OR_NULL(gpio_desc)) {
- drm_err(&dev_priv->drm,
- "GPIO index %u request failed (%ld)\n",
- gpio_index, PTR_ERR(gpio_desc));
- return;
- }
-
- bxt_gpio_table[gpio_index] = gpio_desc;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N",
+ gpio_index - CHV_GPIO_IDX_START_N, value);
}
-
- gpiod_set_value(gpio_desc, value);
}
-static void icl_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void bxt_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
+ soc_gpio_set_value(connector, gpio_index, NULL, gpio_index, value);
}
enum {
@@ -462,44 +389,45 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct intel_connector *connector = intel_dsi->attached_connector;
- u8 gpio_source, gpio_index = 0, gpio_number;
+ u8 gpio_source = 0, gpio_index = 0, gpio_number;
bool value;
- bool native = DISPLAY_VER(dev_priv) >= 11;
+ int size;
+ bool native = DISPLAY_VER(i915) >= 11;
- if (connector->panel.vbt.dsi.seq_version >= 3)
- gpio_index = *data++;
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
+ size = 3;
- gpio_number = *data++;
+ gpio_index = data[0];
+ gpio_number = data[1];
+ value = data[2] & BIT(0);
- /* gpio source in sequence v2 only */
- if (connector->panel.vbt.dsi.seq_version == 2)
- gpio_source = (*data >> 1) & 3;
- else
- gpio_source = 0;
+ if (connector->panel.vbt.dsi.seq_version >= 4 && data[2] & BIT(1))
+ native = false;
+ } else {
+ size = 2;
- if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
- native = false;
+ gpio_number = data[0];
+ value = data[1] & BIT(0);
- /* pull up/down */
- value = *data++ & 1;
+ if (connector->panel.vbt.dsi.seq_version == 2)
+ gpio_source = (data[1] >> 1) & 3;
+ }
- drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
if (native)
- icl_native_gpio_set_value(dev_priv, gpio_number, value);
- else if (DISPLAY_VER(dev_priv) >= 11)
- icl_exec_gpio(connector, gpio_source, gpio_index, value);
- else if (IS_VALLEYVIEW(dev_priv))
- vlv_exec_gpio(connector, gpio_source, gpio_number, value);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_exec_gpio(connector, gpio_source, gpio_number, value);
- else
- bxt_exec_gpio(connector, gpio_source, gpio_index, value);
-
- return data;
+ icl_native_gpio_set_value(i915, gpio_number, value);
+ else if (DISPLAY_VER(i915) >= 9)
+ bxt_gpio_set_value(connector, gpio_index, value);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
+ else if (IS_CHERRYVIEW(i915))
+ chv_gpio_set_value(connector, gpio_source, gpio_number, value);
+
+ return data + size;
}
#ifdef CONFIG_ACPI
@@ -658,6 +586,7 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
+ [MIPI_SEQ_END] = "MIPI_SEQ_END",
[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
@@ -673,10 +602,10 @@ static const char * const seq_name[] = {
static const char *sequence_name(enum mipi_seq seq_id)
{
- if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
+ if (seq_id < ARRAY_SIZE(seq_name))
return seq_name[seq_id];
- else
- return "(unknown)";
+
+ return "(unknown)";
}
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
@@ -707,13 +636,10 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
if (connector->panel.vbt.dsi.seq_version >= 3)
data += 4;
- while (1) {
+ while (*data != MIPI_SEQ_ELEM_END) {
u8 operation_byte = *data++;
u8 operation_size = 0;
- if (operation_byte == MIPI_SEQ_ELEM_END)
- break;
-
if (operation_byte < ARRAY_SIZE(exec_elem))
mipi_elem_exec = exec_elem[operation_byte];
else
@@ -873,36 +799,34 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
* multiply by 100 to preserve remainder
*/
if (intel_dsi->video_mode == BURST_MODE) {
- if (mipi_config->target_burst_mode_freq) {
- u32 bitrate = intel_dsi_bitrate(intel_dsi);
-
- /*
- * Sometimes the VBT contains a slightly lower clock,
- * then the bitrate we have calculated, in this case
- * just replace it with the calculated bitrate.
- */
- if (mipi_config->target_burst_mode_freq < bitrate &&
- intel_fuzzy_clock_check(
- mipi_config->target_burst_mode_freq,
- bitrate))
- mipi_config->target_burst_mode_freq = bitrate;
-
- if (mipi_config->target_burst_mode_freq < bitrate) {
- drm_err(&dev_priv->drm,
- "Burst mode freq is less than computed\n");
- return false;
- }
+ u32 bitrate;
- burst_mode_ratio = DIV_ROUND_UP(
- mipi_config->target_burst_mode_freq * 100,
- bitrate);
+ if (mipi_config->target_burst_mode_freq == 0) {
+ drm_err(&dev_priv->drm, "Burst mode target is not set\n");
+ return false;
+ }
- intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
- } else {
- drm_err(&dev_priv->drm,
- "Burst mode target is not set\n");
+ bitrate = intel_dsi_bitrate(intel_dsi);
+
+ /*
+ * Sometimes the VBT contains a slightly lower clock, then
+ * the bitrate we have calculated, in this case just replace it
+ * with the calculated bitrate.
+ */
+ if (mipi_config->target_burst_mode_freq < bitrate &&
+ intel_fuzzy_clock_check(mipi_config->target_burst_mode_freq,
+ bitrate))
+ mipi_config->target_burst_mode_freq = bitrate;
+
+ if (mipi_config->target_burst_mode_freq < bitrate) {
+ drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
return false;
}
+
+ burst_mode_ratio =
+ DIV_ROUND_UP(mipi_config->target_burst_mode_freq * 100, bitrate);
+
+ intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
} else
burst_mode_ratio = 100;
@@ -964,6 +888,7 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ struct gpiod_lookup_table *gpiod_lookup_table = NULL;
bool want_backlight_gpio = false;
bool want_panel_gpio = false;
struct pinctrl *pinctrl;
@@ -971,12 +896,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
mipi_config->pwm_blc == PPS_BLC_PMIC) {
- gpiod_add_lookup_table(&pmic_panel_gpio_table);
+ gpiod_lookup_table = &pmic_panel_gpio_table;
want_panel_gpio = true;
}
if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
- gpiod_add_lookup_table(&soc_panel_gpio_table);
+ gpiod_lookup_table = &soc_panel_gpio_table;
want_panel_gpio = true;
want_backlight_gpio = true;
@@ -993,8 +918,11 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
"Failed to set pinmux to PWM\n");
}
+ if (gpiod_lookup_table)
+ gpiod_add_lookup_table(gpiod_lookup_table);
+
if (want_panel_gpio) {
- intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
+ intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
drm_err(&dev_priv->drm,
"Failed to own gpio for panel control\n");
@@ -1004,38 +932,14 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (want_backlight_gpio) {
intel_dsi->gpio_backlight =
- gpiod_get(dev->dev, "backlight", flags);
+ devm_gpiod_get(dev->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
drm_err(&dev_priv->drm,
"Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
}
-}
-void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
-{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_connector *connector = intel_dsi->attached_connector;
- struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
-
- if (intel_dsi->gpio_panel) {
- gpiod_put(intel_dsi->gpio_panel);
- intel_dsi->gpio_panel = NULL;
- }
-
- if (intel_dsi->gpio_backlight) {
- gpiod_put(intel_dsi->gpio_backlight);
- intel_dsi->gpio_backlight = NULL;
- }
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- mipi_config->pwm_blc == PPS_BLC_PMIC)
- gpiod_remove_lookup_table(&pmic_panel_gpio_table);
-
- if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
- pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
- gpiod_remove_lookup_table(&soc_panel_gpio_table);
- }
+ if (gpiod_lookup_table)
+ gpiod_remove_lookup_table(gpiod_lookup_table);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
index 468d873fa..3462fcc76 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
@@ -13,7 +13,6 @@ struct intel_dsi;
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
-void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_log_params(struct intel_dsi *intel_dsi);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 646f367a1..0c0144eaa 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -4,7 +4,6 @@
*/
#include <drm/drm_blend.h>
-#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
#include <linux/dma-fence.h>
@@ -15,6 +14,7 @@
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
+#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
@@ -301,6 +301,33 @@ lookup_format_info(const struct drm_format_info formats[],
return NULL;
}
+unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
+{
+ const struct intel_modifier_desc *md;
+ u8 tiling_caps;
+
+ md = lookup_modifier_or_null(fb_modifier);
+ if (!md)
+ return I915_TILING_NONE;
+
+ tiling_caps = lookup_modifier_or_null(fb_modifier)->plane_caps &
+ INTEL_PLANE_CAP_TILING_MASK;
+
+ switch (tiling_caps) {
+ case INTEL_PLANE_CAP_TILING_Y:
+ return I915_TILING_Y;
+ case INTEL_PLANE_CAP_TILING_X:
+ return I915_TILING_X;
+ case INTEL_PLANE_CAP_TILING_4:
+ case INTEL_PLANE_CAP_TILING_Yf:
+ case INTEL_PLANE_CAP_TILING_NONE:
+ return I915_TILING_NONE;
+ default:
+ MISSING_CASE(tiling_caps);
+ return I915_TILING_NONE;
+ }
+}
+
/**
* intel_fb_get_format_info: Get a modifier specific format information
* @cmd: FB add command structure
@@ -737,26 +764,6 @@ intel_fb_align_height(const struct drm_framebuffer *fb,
return ALIGN(height, tile_height);
}
-static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
-{
- u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps &
- INTEL_PLANE_CAP_TILING_MASK;
-
- switch (tiling_caps) {
- case INTEL_PLANE_CAP_TILING_Y:
- return I915_TILING_Y;
- case INTEL_PLANE_CAP_TILING_X:
- return I915_TILING_X;
- case INTEL_PLANE_CAP_TILING_4:
- case INTEL_PLANE_CAP_TILING_Yf:
- case INTEL_PLANE_CAP_TILING_NONE:
- return I915_TILING_NONE;
- default:
- MISSING_CASE(tiling_caps);
- return I915_TILING_NONE;
- }
-}
-
bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
{
return HAS_DPT(i915) && modifier != DRM_FORMAT_MOD_LINEAR;
@@ -764,7 +771,7 @@ bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
{
- return fb && to_i915(fb->dev)->params.enable_dpt &&
+ return to_i915(fb->dev)->display.params.enable_dpt &&
intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
}
@@ -1670,10 +1677,10 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
max_size = max(max_size, offset + size);
}
- if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
+ if (mul_u32_u32(max_size, tile_size) > intel_bo_to_drm_bo(obj)->size) {
drm_dbg_kms(&i915->drm,
"fb too big for bo (need %llu bytes, have %zu bytes)\n",
- mul_u32_u32(max_size, tile_size), obj->base.size);
+ mul_u32_u32(max_size, tile_size), intel_bo_to_drm_bo(obj)->size);
return -EINVAL;
}
@@ -1894,6 +1901,8 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
+ intel_fb_bo_framebuffer_fini(intel_fb_obj(fb));
+
kfree(intel_fb);
}
@@ -1902,7 +1911,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
unsigned int *handle)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(intel_bo_to_drm_bo(obj)->dev);
if (i915_gem_object_is_userptr(obj)) {
drm_dbg(&i915->drm,
@@ -1910,7 +1919,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
return -EINVAL;
}
- return drm_gem_handle_create(file, &obj->base, handle);
+ return drm_gem_handle_create(file, intel_bo_to_drm_bo(obj), handle);
}
struct frontbuffer_fence_cb {
@@ -1943,10 +1952,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
if (!atomic_read(&front->bits))
return 0;
- if (dma_resv_test_signaled(obj->base.resv, dma_resv_usage_rw(false)))
+ if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false)))
goto flush;
- ret = dma_resv_get_singleton(obj->base.resv, dma_resv_usage_rw(false),
+ ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false),
&fence);
if (ret || !fence)
goto flush;
@@ -1988,61 +1997,30 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_bo_to_drm_bo(obj)->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- unsigned int tiling, stride;
int ret = -EINVAL;
int i;
- intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
-
- i915_gem_object_lock(obj, NULL);
- tiling = i915_gem_object_get_tiling(obj);
- stride = i915_gem_object_get_stride(obj);
- i915_gem_object_unlock(obj);
+ ret = intel_fb_bo_framebuffer_init(intel_fb, obj, mode_cmd);
+ if (ret)
+ return ret;
- if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /*
- * If there's a fence, enforce that
- * the fb modifier and tiling mode match.
- */
- if (tiling != I915_TILING_NONE &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode doesn't match fb modifier\n");
- goto err;
- }
- } else {
- if (tiling == I915_TILING_X) {
- mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- } else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(&dev_priv->drm,
- "No Y tiling for legacy addfb\n");
- goto err;
- }
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer) {
+ ret = -ENOMEM;
+ goto err;
}
+ ret = -EINVAL;
if (!drm_any_plane_has_format(&dev_priv->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg_kms(&dev_priv->drm,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
- goto err;
- }
-
- /*
- * gen2/3 display engine uses the fence if present,
- * so the tiling mode must match the fb modifier exactly.
- */
- if (DISPLAY_VER(dev_priv) < 4 &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode must match fb modifier exactly on gen2/3\n");
- goto err;
+ goto err_frontbuffer_put;
}
max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
@@ -2053,18 +2031,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
mode_cmd->pitches[0], max_stride);
- goto err;
- }
-
- /*
- * If there's a fence, enforce that
- * the fb pitch and fence stride match.
- */
- if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(&dev_priv->drm,
- "pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
- goto err;
+ goto err_frontbuffer_put;
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
@@ -2072,7 +2039,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(&dev_priv->drm,
"plane 0 offset (0x%08x) must be 0\n",
mode_cmd->offsets[0]);
- goto err;
+ goto err_frontbuffer_put;
}
drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
@@ -2083,7 +2050,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
i);
- goto err;
+ goto err_frontbuffer_put;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
@@ -2091,7 +2058,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(&dev_priv->drm,
"plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
- goto err;
+ goto err_frontbuffer_put;
}
if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) {
@@ -2102,16 +2069,16 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
"ccs aux plane %d pitch (%d) must be %d\n",
i,
fb->pitches[i], ccs_aux_stride);
- goto err;
+ goto err_frontbuffer_put;
}
}
- fb->obj[i] = &obj->base;
+ fb->obj[i] = intel_bo_to_drm_bo(obj);
}
ret = intel_fill_fb_info(dev_priv, intel_fb);
if (ret)
- goto err;
+ goto err_frontbuffer_put;
if (intel_fb_uses_dpt(fb)) {
struct i915_address_space *vm;
@@ -2120,7 +2087,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (IS_ERR(vm)) {
drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n");
ret = PTR_ERR(vm);
- goto err;
+ goto err_frontbuffer_put;
}
intel_fb->dpt_vm = vm;
@@ -2137,8 +2104,10 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
err_free_dpt:
if (intel_fb_uses_dpt(fb))
intel_dpt_destroy(intel_fb->dpt_vm);
-err:
+err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
+err:
+ intel_fb_bo_framebuffer_fini(obj);
return ret;
}
@@ -2150,23 +2119,14 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- struct drm_i915_private *i915;
-
- obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- /* object is backed with LMEM for discrete */
- i915 = to_i915(obj->base.dev);
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
- /* object is "remote", not in local memory */
- i915_gem_object_put(obj);
- drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
- return ERR_PTR(-EREMOTE);
- }
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ obj = intel_fb_bo_lookup_valid_bo(i915, filp, &mode_cmd);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
+ drm_gem_object_put(intel_bo_to_drm_bo(obj));
return fb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index e85167d6b..23db6628f 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -95,4 +95,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier);
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb);
+unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier);
+
#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
new file mode 100644
index 000000000..4be09541e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_framebuffer.h>
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_fb.h"
+#include "intel_fb_bo.h"
+
+void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj)
+{
+ /* Nothing to do for i915 */
+}
+
+int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int tiling, stride;
+
+ i915_gem_object_lock(obj, NULL);
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
+
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode doesn't match fb modifier\n");
+ return -EINVAL;
+ }
+ } else {
+ if (tiling == I915_TILING_X) {
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ } else if (tiling == I915_TILING_Y) {
+ drm_dbg_kms(&i915->drm,
+ "No Y tiling for legacy addfb\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (DISPLAY_VER(i915) < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ drm_dbg_kms(&i915->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct drm_i915_gem_object *
+intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ /* object is backed with LMEM for discrete */
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
+ return ERR_PTR(-EREMOTE);
+ }
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
new file mode 100644
index 000000000..232bf898b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_BO_H__
+#define __INTEL_FB_BO_H__
+
+struct drm_file;
+struct drm_mode_fb_cmd2;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_framebuffer;
+
+void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj);
+
+int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_i915_gem_object *
+intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 7b42aef37..b6df9baf4 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return PTR_ERR(vma);
plane_state->ggtt_vma = vma;
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (phys_cursor)
+ plane_state->phys_dma_addr =
+ i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
} else {
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 4820d21cc..f17a1afb4 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -608,6 +608,7 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
static void ivb_fbc_activate(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
+ u32 dpfc_ctl;
if (DISPLAY_VER(i915) >= 10)
glk_fbc_program_cfb_stride(fbc);
@@ -617,8 +618,13 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
if (intel_gt_support_legacy_fencing(to_gt(i915)))
snb_fbc_program_fence(fbc);
+ /* wa_14019417088 Alternative WA*/
+ dpfc_ctl = ivb_dpfc_ctl(fbc);
+ if (DISPLAY_VER(i915) >= 20)
+ intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
- DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
+ DPFC_CTL_EN | dpfc_ctl);
}
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
@@ -1022,10 +1028,13 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
unsigned int effective_w, effective_h, max_w, max_h;
- if (DISPLAY_VER(i915) >= 10) {
+ if (DISPLAY_VER(i915) >= 11) {
+ max_w = 8192;
+ max_h = 4096;
+ } else if (DISPLAY_VER(i915) >= 10) {
max_w = 5120;
max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
+ } else if (DISPLAY_VER(i915) >= 7) {
max_w = 4096;
max_h = 4096;
} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
@@ -1044,6 +1053,31 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
return effective_w <= max_w && effective_h <= max_h;
}
+static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int w, h, max_w, max_h;
+
+ if (DISPLAY_VER(i915) >= 10) {
+ max_w = 5120;
+ max_h = 4096;
+ } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
+ max_w = 4096;
+ max_h = 4096;
+ } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
+ max_w = 4096;
+ max_h = 2048;
+ } else {
+ max_w = 2048;
+ max_h = 1536;
+ }
+
+ w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ return w <= max_w && h <= max_h;
+}
+
static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1174,7 +1208,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!i915->params.enable_fbc) {
+ if (!i915->display.params.enable_fbc) {
plane_state->no_fbc_reason = "disabled per module param or by default";
return 0;
}
@@ -1201,7 +1235,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*/
- if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
+ if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_psr2) {
plane_state->no_fbc_reason = "PSR2 enabled";
return 0;
}
@@ -1241,11 +1275,16 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ if (!intel_fbc_plane_size_valid(plane_state)) {
plane_state->no_fbc_reason = "plane size too big";
return 0;
}
+ if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ plane_state->no_fbc_reason = "surface size too big";
+ return 0;
+ }
+
/*
* Work around a problem on GEN9+ HW, where enabling FBC on a plane
* having a Y offset that isn't divisible by 4 causes FIFO underrun
@@ -1751,8 +1790,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
*/
static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
{
- if (i915->params.enable_fbc >= 0)
- return !!i915->params.enable_fbc;
+ if (i915->display.params.enable_fbc >= 0)
+ return !!i915->display.params.enable_fbc;
if (!HAS_FBC(i915))
return 0;
@@ -1824,9 +1863,9 @@ void intel_fbc_init(struct drm_i915_private *i915)
if (need_fbc_vtd_wa(i915))
DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
- i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
+ i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915);
drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
- i915->params.enable_fbc);
+ i915->display.params.enable_fbc);
for_each_fbc_id(i915, fbc_id)
i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 31d0d695d..99894a855 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -43,7 +43,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_mman.h"
#include "i915_drv.h"
@@ -51,6 +50,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
+#include "intel_fbdev_fb.h"
#include "intel_frontbuffer.h"
struct intel_fbdev {
@@ -146,65 +146,6 @@ static const struct fb_ops intelfb_ops = {
.fb_mmap = intel_fbdev_mmap,
};
-static int intelfb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
- struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
-
- obj = ERR_PTR(-ENODEV);
- if (HAS_LMEM(dev_priv)) {
- obj = i915_gem_object_create_lmem(dev_priv, size,
- I915_BO_ALLOC_CONTIGUOUS |
- I915_BO_ALLOC_USER);
- } else {
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- *
- * Also skip stolen on MTL as Wa_22018444074 mitigation.
- */
- if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
- obj = i915_gem_object_create_stolen(dev_priv, size);
- if (IS_ERR(obj))
- obj = i915_gem_object_create_shmem(dev_priv, size);
- }
-
- if (IS_ERR(obj)) {
- drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
- return PTR_ERR(obj);
- }
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
- if (IS_ERR(fb))
- return PTR_ERR(fb);
-
- ifbdev->fb = to_intel_framebuffer(fb);
- return 0;
-}
-
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -213,7 +154,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
const struct i915_gtt_view view = {
.type = I915_GTT_VIEW_NORMAL,
};
@@ -222,9 +162,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
- void __iomem *vaddr;
struct drm_i915_gem_object *obj;
- struct i915_gem_ww_ctx ww;
int ret;
mutex_lock(&ifbdev->hpd_lock);
@@ -245,12 +183,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
+ struct drm_framebuffer *fb;
drm_dbg_kms(&dev_priv->drm,
"no BIOS fb, allocating a new one\n");
- ret = intelfb_alloc(helper, sizes);
- if (ret)
- return ret;
- intel_fb = ifbdev->fb;
+ fb = intel_fbdev_fb_alloc(helper, sizes);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+ intel_fb = ifbdev->fb = to_intel_framebuffer(fb);
} else {
drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
prealloc = true;
@@ -283,49 +222,18 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
obj = intel_fb_obj(&intel_fb->base);
- if (i915_gem_object_is_lmem(obj)) {
- struct intel_memory_region *mem = obj->mm.region;
-
- /* Use fbdev's framebuffer from lmem for discrete */
- info->fix.smem_start =
- (unsigned long)(mem->io_start +
- i915_gem_object_get_dma_address(obj, 0));
- info->fix.smem_len = obj->base.size;
- } else {
- /* Our framebuffer is the entirety of fbdev's system memory */
- info->fix.smem_start =
- (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
- info->fix.smem_len = vma->size;
- }
-
- for_i915_gem_ww(&ww, ret, false) {
- ret = i915_gem_object_lock(vma->obj, &ww);
-
- if (ret)
- continue;
-
- vaddr = i915_vma_pin_iomap(vma);
- if (IS_ERR(vaddr)) {
- drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
- ret = PTR_ERR(vaddr);
- continue;
- }
- }
+ ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
if (ret)
goto out_unpin;
- info->screen_base = vaddr;
- info->screen_size = vma->size;
-
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
+ if (!i915_gem_object_is_shmem(obj) && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -424,12 +332,12 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
continue;
}
- if (obj->base.size > max_size) {
+ if (intel_bo_to_drm_bo(obj)->size > max_size) {
drm_dbg_kms(&i915->drm,
"found possible fb from [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fb = to_intel_framebuffer(plane_state->uapi.fb);
- max_size = obj->base.size;
+ max_size = intel_bo_to_drm_bo(obj)->size;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
new file mode 100644
index 000000000..717c3a323
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_fb_helper.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_fbdev_fb.h"
+
+struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_framebuffer *fb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_i915_gem_object *obj;
+ int size;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = PAGE_ALIGN(size);
+
+ obj = ERR_PTR(-ENODEV);
+ if (HAS_LMEM(dev_priv)) {
+ obj = i915_gem_object_create_lmem(dev_priv, size,
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_USER);
+ } else {
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ *
+ * Also skip stolen on MTL as Wa_22018444074 mitigation.
+ */
+ if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
+ obj = i915_gem_object_create_stolen(dev_priv, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(dev_priv, size);
+ }
+
+ if (IS_ERR(obj)) {
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fb = intel_framebuffer_create(obj, &mode_cmd);
+ i915_gem_object_put(obj);
+
+ return fb;
+}
+
+int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+ struct drm_i915_gem_object *obj, struct i915_vma *vma)
+{
+ struct i915_gem_ww_ctx ww;
+ void __iomem *vaddr;
+ int ret;
+
+ if (i915_gem_object_is_lmem(obj)) {
+ struct intel_memory_region *mem = obj->mm.region;
+
+ /* Use fbdev's framebuffer from lmem for discrete */
+ info->fix.smem_start =
+ (unsigned long)(mem->io_start +
+ i915_gem_object_get_dma_address(obj, 0));
+ info->fix.smem_len = obj->base.size;
+ } else {
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
+ info->fix.smem_len = vma->size;
+ }
+
+ for_i915_gem_ww(&ww, ret, false) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+
+ if (ret)
+ continue;
+
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
+ drm_err(&i915->drm,
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+ ret = PTR_ERR(vaddr);
+ continue;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ info->screen_base = vaddr;
+ info->screen_size = intel_bo_to_drm_bo(obj)->size;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
new file mode 100644
index 000000000..a395b2c65
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_FB_H__
+#define __INTEL_FBDEV_FB_H__
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct fb_info;
+struct i915_vma;
+
+struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+ struct drm_i915_gem_object *obj, struct i915_vma *vma);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index e6429dfeb..295a0f24e 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -10,6 +10,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_dp.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@@ -338,8 +339,11 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes = lane;
- intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false);
+ intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
+ lane, fdi_dotclock,
+ link_bw,
+ intel_dp_bw_fec_overhead(false),
+ &pipe_config->fdi_m_n);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 40d7b6f3f..e9e4dcf34 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -899,7 +899,6 @@ int intel_gmbus_setup(struct drm_i915_private *i915)
}
bus->adapter.owner = THIS_MODULE;
- bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s", gmbus_pin->name);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index c89da3568..39b3f7c0c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -923,7 +923,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return 0;
}
-static int _intel_hdcp_enable(struct intel_connector *connector)
+static int intel_hdcp1_enable(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1058,7 +1058,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- ret = _intel_hdcp_enable(connector);
+ ret = intel_hdcp1_enable(connector);
if (ret) {
drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
intel_hdcp_update_value(connector,
@@ -2324,10 +2324,10 @@ intel_hdcp_set_streams(struct intel_digital_port *dig_port,
return 0;
}
-int intel_hdcp_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static int _intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector =
@@ -2388,7 +2388,7 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
*/
if (ret && intel_hdcp_capable(connector) &&
hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
- ret = _intel_hdcp_enable(connector);
+ ret = intel_hdcp1_enable(connector);
}
if (!ret) {
@@ -2404,6 +2404,27 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
return ret;
}
+void intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ /*
+ * Enable hdcp if it's desired or if userspace is enabled and
+ * driver set its state to undesired
+ */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
+ _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+}
+
int intel_hdcp_disable(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2491,7 +2512,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
}
if (desired_and_not_enabled || content_protection_type_changed)
- intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+ _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
}
void intel_hdcp_component_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 5997c52a0..a9c784fd9 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -28,10 +28,10 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
int intel_hdcp_init(struct intel_connector *connector,
struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state);
+void intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state);
int intel_hdcp_disable(struct intel_connector *connector);
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index bfa456fa7..39e4f5f7c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -3034,16 +3034,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
- /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
- * 0xd. Failure to do so will result in spurious interrupts being
- * generated on the port when a cable is not attached.
- */
- if (IS_G45(dev_priv)) {
- u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
- intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
- (temp & ~0xf) | 0xd);
- }
-
cec_fill_conn_info_from_drm(&conn_info, connector);
intel_hdmi->cec_notifier =
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index f07047e9c..04f62f27a 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1361,11 +1361,24 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
bxt_hpd_detection_setup(dev_priv);
}
+static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
+{
+ /*
+ * For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
+}
+
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
+ if (IS_G45(i915))
+ g45_hpd_peg_band_gap_wa(i915);
+
/* HPD sense and interrupt enable are one and the same */
i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
}
@@ -1389,6 +1402,9 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ if (IS_G45(dev_priv))
+ g45_hpd_peg_band_gap_wa(dev_priv);
+
/* Ignore TV since it's buggy */
i915_hotplug_interrupt_update_locked(dev_priv,
HOTPLUG_INT_EN_MASK |
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index c5eb5f242..9c6d35a40 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -7,6 +7,7 @@
#include "intel_atomic.h"
#include "intel_display_types.h"
+#include "intel_dp_mst.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
@@ -21,6 +22,7 @@ void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_
{
enum pipe pipe;
+ limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(i915, pipe)
limits->max_bpp_x16[pipe] = INT_MAX;
@@ -53,11 +55,11 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe max_bpp_pipe = INVALID_PIPE;
struct intel_crtc *crtc;
- int max_bpp = 0;
+ int max_bpp_x16 = 0;
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
- int link_bpp;
+ int link_bpp_x16;
if (limits->bpp_limit_reached_pipes & BIT(crtc->pipe))
continue;
@@ -68,7 +70,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
return PTR_ERR(crtc_state);
if (crtc_state->dsc.compression_enable)
- link_bpp = crtc_state->dsc.compressed_bpp;
+ link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
else
/*
* TODO: for YUV420 the actual link bpp is only half
@@ -76,10 +78,10 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
* is based on the pipe bpp value, set the actual link bpp
* limit here once the MST BW allocation is fixed.
*/
- link_bpp = crtc_state->pipe_bpp;
+ link_bpp_x16 = to_bpp_x16(crtc_state->pipe_bpp);
- if (link_bpp > max_bpp) {
- max_bpp = link_bpp;
+ if (link_bpp_x16 > max_bpp_x16) {
+ max_bpp_x16 = link_bpp_x16;
max_bpp_pipe = crtc->pipe;
}
}
@@ -87,7 +89,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
if (max_bpp_pipe == INVALID_PIPE)
return -ENOSPC;
- limits->max_bpp_x16[max_bpp_pipe] = to_bpp_x16(max_bpp) - 1;
+ limits->max_bpp_x16[max_bpp_pipe] = max_bpp_x16 - 1;
return intel_modeset_pipes_in_mask_early(state, reason,
BIT(max_bpp_pipe));
@@ -143,6 +145,10 @@ static int check_all_link_config(struct intel_atomic_state *state,
/* TODO: Check additional shared display link configurations like MST */
int ret;
+ ret = intel_dp_mst_atomic_check_link(state, limits);
+ if (ret)
+ return ret;
+
ret = intel_fdi_atomic_check_link(state, limits);
if (ret)
return ret;
@@ -158,6 +164,12 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
bool bpps_changed = false;
enum pipe pipe;
+ /* FEC can't be forced off after it was forced on. */
+ if (drm_WARN_ON(&i915->drm,
+ (old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
+ old_limits->force_fec_pipes))
+ return false;
+
for_each_pipe(i915, pipe) {
/* The bpp limit can only decrease. */
if (drm_WARN_ON(&i915->drm,
@@ -172,7 +184,9 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
/* At least one limit must change. */
if (drm_WARN_ON(&i915->drm,
- !bpps_changed))
+ !bpps_changed &&
+ new_limits->force_fec_pipes ==
+ old_limits->force_fec_pipes))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index e07df22a7..2cf57307c 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -16,6 +16,7 @@ struct intel_atomic_state;
struct intel_crtc_state;
struct intel_link_bw_limits {
+ u8 force_fec_pipes;
u8 bpp_limit_reached_pipes;
/* in 1/16 bpp units */
int max_bpp_x16[I915_MAX_PIPES];
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index bcbdd1984..221f5c6c8 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -185,7 +185,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->t4 = val * 1000;
- if (DISPLAY_VER(dev_priv) <= 4 &&
+ if (DISPLAY_VER(dev_priv) < 5 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
drm_dbg_kms(&dev_priv->drm,
"Panel power timings uninitialized, "
@@ -799,8 +799,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
unsigned int val;
/* use the module option value if specified */
- if (i915->params.lvds_channel_mode > 0)
- return i915->params.lvds_channel_mode == 2;
+ if (i915->display.params.lvds_channel_mode > 0)
+ return i915->display.params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index b8f43efb0..caeca3a84 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -769,8 +769,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
+ struct intel_crtc_state *crtc_state = NULL;
+
if (connector->get_hw_state(connector)) {
- struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
connector->base.dpms = DRM_MODE_DPMS_ON;
@@ -796,6 +797,10 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
+
+ if (connector->sync_state)
+ connector->sync_state(connector, crtc_state);
+
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 5e1c2c780..076298a8d 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -244,7 +244,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
verify_crtc_state(state, crtc);
intel_shared_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
- intel_c10pll_state_verify(state, crtc);
+ intel_cx0pll_state_verify(state, crtc);
}
void intel_modeset_verify_disabled(struct intel_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 84078fb82..1ce785db6 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -841,7 +841,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->display.opregion;
const struct firmware *fw = NULL;
- const char *name = dev_priv->params.vbt_firmware;
+ const char *name = dev_priv->display.params.vbt_firmware;
int ret;
if (!name || !*name)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 483beedac..0d8e5320a 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -46,8 +46,8 @@
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
- if (i915->params.panel_use_ssc >= 0)
- return i915->params.panel_use_ssc != 0;
+ if (i915->display.params.panel_use_ssc >= 0)
+ return i915->display.params.panel_use_ssc != 0;
return i915->display.vbt.lvds_use_ssc &&
!intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 866786e6b..baf679759 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -8,6 +8,7 @@
#include "intel_crt.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
#include "intel_lvds.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 73f0f1714..a8fa3a209 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -90,7 +90,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.pps_pipe;
bool pll_enabled, release_cl_override = false;
- enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 614b4534e..925776ba1 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -29,6 +29,7 @@
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -172,6 +173,15 @@
* irrelevant for normal operation.
*/
+bool intel_encoder_can_psr(struct intel_encoder *encoder)
+{
+ if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
+ return CAN_PSR(enc_to_intel_dp(encoder)) ||
+ CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
+ else
+ return false;
+}
+
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
@@ -179,9 +189,9 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (i915->params.enable_psr == -1)
+ if (i915->display.params.enable_psr == -1)
return connector->panel.vbt.psr.enable;
- return i915->params.enable_psr;
+ return i915->display.params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -198,7 +208,7 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (i915->params.enable_psr == 1)
+ if (i915->display.params.enable_psr == 1)
return false;
return true;
}
@@ -474,27 +484,41 @@ exit:
intel_dp->psr.su_y_granularity = y;
}
-void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 pr_dpcd = 0;
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
+ intel_dp->psr.sink_panel_replay_support = false;
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
- if (!intel_dp->psr_dpcd[0])
+ if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
+ drm_dbg_kms(&i915->drm,
+ "Panel replay is not supported by panel\n");
return;
- drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "Panel replay is supported by panel\n");
+ intel_dp->psr.sink_panel_replay_support = true;
+}
+
+static void _psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -503,8 +527,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 9 &&
- (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ if (DISPLAY_VER(i915) >= 9 &&
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
bool alpm = intel_dp_get_alpm_status(intel_dp);
@@ -521,14 +545,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* GTC first.
*/
intel_dp->psr.sink_psr2_support = y_req && alpm;
- drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
+ drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
intel_dp->psr.sink_psr2_support ? "" : "not ");
+ }
+}
- if (intel_dp->psr.sink_psr2_support) {
- intel_dp->psr.colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
- intel_dp_get_su_granularity(intel_dp);
- }
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ _panel_replay_init_dpcd(intel_dp);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+
+ if (intel_dp->psr_dpcd[0])
+ _psr_init_dpcd(intel_dp);
+
+ if (intel_dp->psr.sink_psr2_support) {
+ intel_dp->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ intel_dp_get_su_granularity(intel_dp);
}
}
@@ -574,8 +609,11 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 dpcd_val = DP_PSR_ENABLE;
- /* Enable ALPM at sink for psr2 */
+ if (intel_dp->psr.panel_replay_enabled)
+ return;
+
if (intel_dp->psr.psr2_enabled) {
+ /* Enable ALPM at sink for psr2 */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE |
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
@@ -592,6 +630,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
+ if (intel_dp->psr.entry_setup_frames > 0)
+ dpcd_val |= DP_PSR_FRAME_CAPTURE;
+
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -606,7 +647,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 11)
val |= EDP_PSR_TP4_TIME_0us;
- if (dev_priv->params.psr_safest_params) {
+ if (dev_priv->display.params.psr_safest_params) {
val |= EDP_PSR_TP1_TIME_2500us;
val |= EDP_PSR_TP2_TP3_TIME_2500us;
goto check_tp3_sel;
@@ -690,6 +731,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
+ if (DISPLAY_VER(dev_priv) >= 20)
+ val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
+
intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
}
@@ -700,7 +744,7 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
- if (dev_priv->params.psr_safest_params)
+ if (dev_priv->display.params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
@@ -727,21 +771,49 @@ static int psr2_block_count(struct intel_dp *intel_dp)
return psr2_block_count_lines(intel_dp) / 4;
}
+static u8 frames_before_su_entry(struct intel_dp *intel_dp)
+{
+ u8 frames_before_su_entry;
+
+ frames_before_su_entry = max_t(u8,
+ intel_dp->psr.sink_sync_latency + 1,
+ 2);
+
+ /* Entry setup frames must be at least 1 less than frames before SU entry */
+ if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
+ frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
+
+ return frames_before_su_entry;
+}
+
+static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
+
+ intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
+ TRANS_DP2_PANEL_REPLAY_ENABLE);
+}
+
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
+ u32 psr_val = 0;
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
val |= EDP_SU_TRACK_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
+ if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
+ val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
+
val |= intel_psr2_get_tp_time(intel_dp);
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -785,6 +857,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
+ if (DISPLAY_VER(dev_priv) >= 20)
+ psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
+
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
@@ -798,7 +873,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
+ intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
}
@@ -816,13 +891,13 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_trans
return false;
}
-static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
+static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
{
- if (!cstate || !cstate->hw.active)
+ if (!crtc_state->hw.active)
return 0;
return DIV_ROUND_UP(1000 * 1000,
- drm_mode_vrefresh(&cstate->hw.adjusted_mode));
+ drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
}
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
@@ -943,7 +1018,7 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!dev_priv->params.enable_psr2_sel_fetch &&
+ if (!dev_priv->display.params.enable_psr2_sel_fetch &&
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 sel fetch not enabled, disabled by parameter\n");
@@ -1019,7 +1094,7 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
/* Not supported <13 / Wa_22012279113:adl-p */
- if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1056,7 +1131,7 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
fast_wake_lines > max_wake_lines)
return false;
- if (i915->params.psr_safest_params)
+ if (i915->display.params.psr_safest_params)
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
@@ -1066,6 +1141,39 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
return true;
}
+static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
+ int entry_setup_frames = 0;
+
+ if (psr_setup_time < 0) {
+ drm_dbg_kms(&i915->drm,
+ "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
+ return -ETIME;
+ }
+
+ if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
+ if (DISPLAY_VER(i915) >= 20) {
+ /* setup entry frames can be up to 3 frames */
+ entry_setup_frames = 1;
+ drm_dbg_kms(&i915->drm,
+ "PSR setup entry frames %d\n",
+ entry_setup_frames);
+ } else {
+ drm_dbg_kms(&i915->drm,
+ "PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
+ return -ETIME;
+ }
+ }
+
+ return entry_setup_frames;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -1113,7 +1221,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
+ (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1206,24 +1314,42 @@ unsupported:
return false;
}
-void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static bool _psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int psr_setup_time;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int entry_setup_frames;
/*
* Current PSR panels don't work reliably with VRR enabled
* So if VRR is enabled, do not enable PSR.
*/
if (crtc_state->vrr.enable)
- return;
+ return false;
if (!CAN_PSR(intel_dp))
- return;
+ return false;
+
+ entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
+
+ if (entry_setup_frames >= 0) {
+ intel_dp->psr.entry_setup_frames = entry_setup_frames;
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: PSR setup timing not met\n");
+ return false;
+ }
+
+ return true;
+}
+
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
@@ -1242,23 +1368,25 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
- if (psr_setup_time < 0) {
+ /*
+ * FIXME figure out what is wrong with PSR+bigjoiner and
+ * fix it. Presumably something related to the fact that
+ * PSR is a transcoder level feature.
+ */
+ if (crtc_state->bigjoiner_pipes) {
drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
- intel_dp->psr_dpcd[1]);
+ "PSR disabled due to bigjoiner\n");
return;
}
- if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
- adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: PSR setup time (%d us) too long\n",
- psr_setup_time);
+ if (CAN_PANEL_REPLAY(intel_dp))
+ crtc_state->has_panel_replay = true;
+ else
+ crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
+
+ if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
return;
- }
- crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
@@ -1279,18 +1407,23 @@ void intel_psr_get_config(struct intel_encoder *encoder,
return;
intel_dp = &dig_port->dp;
- if (!CAN_PSR(intel_dp))
+ if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
return;
mutex_lock(&intel_dp->psr.lock);
if (!intel_dp->psr.enabled)
goto unlock;
- /*
- * Not possible to read EDP_PSR/PSR2_CTL registers as it is
- * enabled/disabled because of frontbuffer tracking and others.
- */
- pipe_config->has_psr = true;
+ if (intel_dp->psr.panel_replay_enabled) {
+ pipe_config->has_panel_replay = true;
+ } else {
+ /*
+ * Not possible to read EDP_PSR/PSR2_CTL registers as it is
+ * enabled/disabled because of frontbuffer tracking and others.
+ */
+ pipe_config->has_psr = true;
+ }
+
pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
@@ -1327,8 +1460,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
lockdep_assert_held(&intel_dp->psr.lock);
- /* psr1 and psr2 are mutually exclusive.*/
- if (intel_dp->psr.psr2_enabled)
+ /* psr1, psr2 and panel-replay are mutually exclusive.*/
+ if (intel_dp->psr.panel_replay_enabled)
+ dg2_activate_panel_replay(intel_dp);
+ else if (intel_dp->psr.psr2_enabled)
hsw_activate_psr2(intel_dp);
else
hsw_activate_psr1(intel_dp);
@@ -1462,12 +1597,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* All supported adlp panels have 1-based X granularity, this may
* cause issues if non-supported panels are used.
*/
- if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
- ADLP_1_BASED_X_GRANULARITY);
- else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
- ADLP_1_BASED_X_GRANULARITY);
+ if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ 0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
@@ -1518,6 +1651,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
intel_dp->psr.busy_frontbuffer_bits = 0;
intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
@@ -1533,8 +1667,12 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
+ if (intel_dp->psr.panel_replay_enabled)
+ drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
+ else
+ drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
+
intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
@@ -1563,7 +1701,10 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
return;
}
- if (intel_dp->psr.psr2_enabled) {
+ if (intel_dp->psr.panel_replay_enabled) {
+ intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
+ TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
+ } else if (intel_dp->psr.psr2_enabled) {
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
@@ -1612,8 +1753,11 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (!intel_dp->psr.enabled)
return;
- drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
+ if (intel_dp->psr.panel_replay_enabled)
+ drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
+ else
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
@@ -1646,6 +1790,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
intel_dp->psr.enabled = false;
+ intel_dp->psr.panel_replay_enabled = false;
intel_dp->psr.psr2_enabled = false;
intel_dp->psr.psr2_sel_fetch_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
@@ -1793,81 +1938,6 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
}
-void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
-}
-
-void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- if (plane->id == PLANE_CURSOR)
- intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- plane_state->ctl);
- else
- intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- PLANE_SEL_FETCH_CTL_ENABLE);
-}
-
-void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- const struct drm_rect *clip;
- u32 val;
- int x, y;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- if (plane->id == PLANE_CURSOR)
- return;
-
- clip = &plane_state->psr2_sel_fetch_area;
-
- val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
- val |= plane_state->uapi.dst.x1;
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
-
- x = plane_state->view.color_plane[color_plane].x;
-
- /*
- * From Bspec: UV surface Start Y Position = half of Y plane Y
- * start position.
- */
- if (!color_plane)
- y = plane_state->view.color_plane[color_plane].y + clip->y1;
- else
- y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
-
- val = y << 16 | x;
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
- val);
-
- /* Sizes are 0 based */
- val = (drm_rect_height(clip) - 1) << 16;
- val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
-}
-
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -2127,8 +2197,19 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
continue;
inter = pipe_clip;
- if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
+ sel_fetch_area->y1 = -1;
+ sel_fetch_area->y2 = -1;
+ /*
+ * if plane sel fetch was previously enabled ->
+ * disable it
+ */
+ if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
+ crtc_state->update_planes |= BIT(plane->id);
+
continue;
+ }
if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
full_update = true;
@@ -2217,7 +2298,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!crtc_state->has_psr)
+ if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2703,7 +2784,7 @@ void intel_psr_init(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!HAS_PSR(dev_priv))
+ if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
return;
/*
@@ -2721,7 +2802,10 @@ void intel_psr_init(struct intel_dp *intel_dp)
return;
}
- intel_dp->psr.source_support = true;
+ if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
+ intel_dp->psr.source_panel_replay_support = true;
+ else
+ intel_dp->psr.source_support = true;
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
@@ -2738,12 +2822,19 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
{
struct drm_dp_aux *aux = &intel_dp->aux;
int ret;
+ unsigned int offset;
+
+ offset = intel_dp->psr.panel_replay_enabled ?
+ DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
- ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
+ ret = drm_dp_dpcd_readb(aux, offset, status);
if (ret != 1)
return ret;
- ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
+ offset = intel_dp->psr.panel_replay_enabled ?
+ DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
+
+ ret = drm_dp_dpcd_readb(aux, offset, error_status);
if (ret != 1)
return ret;
@@ -2964,7 +3055,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
status = live_status[status_val];
}
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+ seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
@@ -2977,18 +3068,22 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
bool enabled;
u32 val;
- seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
+ seq_printf(m, "Sink support: PSR = %s",
+ str_yes_no(psr->sink_support));
+
if (psr->sink_support)
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
- seq_puts(m, "\n");
+ seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
- if (!psr->sink_support)
+ if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
- if (psr->enabled)
+ if (psr->panel_replay_enabled)
+ status = "Panel Replay Enabled";
+ else if (psr->enabled)
status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
else
status = "disabled";
@@ -3001,14 +3096,17 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
goto unlock;
}
- if (psr->psr2_enabled) {
+ if (psr->panel_replay_enabled) {
+ val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
+ enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
+ } else if (psr->psr2_enabled) {
val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
enabled = val & EDP_PSR_ENABLE;
}
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
str_enabled_disabled(enabled), val);
psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
@@ -3146,6 +3244,16 @@ void intel_psr_debugfs_register(struct drm_i915_private *i915)
i915, &i915_edp_psr_status_fops);
}
+static const char *psr_mode_str(struct intel_dp *intel_dp)
+{
+ if (intel_dp->psr.panel_replay_enabled)
+ return "PANEL-REPLAY";
+ else if (intel_dp->psr.enabled)
+ return "PSR";
+
+ return "unknown";
+}
+
static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
@@ -3160,12 +3268,19 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
"reserved",
"sink internal error",
};
+ static const char * const panel_replay_status[] = {
+ "Sink device frame is locked to the Source device",
+ "Sink device is coasting, using the VTotal target",
+ "Sink device is governing the frame rate (frame rate unlock is granted)",
+ "Sink device in the process of re-locking with the Source device",
+ };
const char *str;
int ret;
u8 status, error_status;
+ u32 idx;
- if (!CAN_PSR(intel_dp)) {
- seq_puts(m, "PSR Unsupported\n");
+ if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
+ seq_puts(m, "PSR/Panel-Replay Unsupported\n");
return -ENODEV;
}
@@ -3176,15 +3291,20 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
if (ret)
return ret;
- status &= DP_PSR_SINK_STATE_MASK;
- if (status < ARRAY_SIZE(sink_status))
- str = sink_status[status];
- else
- str = "unknown";
+ str = "unknown";
+ if (intel_dp->psr.panel_replay_enabled) {
+ idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
+ if (idx < ARRAY_SIZE(panel_replay_status))
+ str = panel_replay_status[idx];
+ } else if (intel_dp->psr.enabled) {
+ idx = status & DP_PSR_SINK_STATE_MASK;
+ if (idx < ARRAY_SIZE(sink_status))
+ str = sink_status[idx];
+ }
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str);
+ seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
- seq_printf(m, "Sink PSR error status: 0x%x", error_status);
+ seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
@@ -3193,11 +3313,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
else
seq_puts(m, "\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- seq_puts(m, "\tPSR RFB storage error\n");
+ seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- seq_puts(m, "\tPSR VSC SDP uncorrectable error\n");
+ seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
if (error_status & DP_PSR_LINK_CRC_ERROR)
- seq_puts(m, "\tPSR Link CRC error\n");
+ seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
return ret;
}
@@ -3217,13 +3337,16 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
+ /* TODO: Add support for MST connectors as well. */
+ if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+ connector->mst_port)
return;
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(i915))
+ if (HAS_PSR(i915) || HAS_DP20(i915))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index bf35f42df..143e0595c 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -21,6 +21,13 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
+#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
+ (intel_dp)->psr.source_support)
+
+#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
+ (intel_dp)->psr.source_panel_replay_support)
+
+bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@@ -48,16 +55,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
-void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane);
-void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-
-void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index d39951383..efe4306b3 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -35,6 +35,8 @@
#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 3)
#define EDP_PSR_MAX_SLEEP_TIME_MASK REG_GENMASK(24, 20)
#define EDP_PSR_MAX_SLEEP_TIME(x) REG_FIELD_PREP(EDP_PSR_MAX_SLEEP_TIME_MASK, (x))
+#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK REG_GENMASK(17, 16)
+#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES(x) REG_FIELD_PREP(LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK, (x))
#define EDP_PSR_SKIP_AUX_EXIT REG_BIT(12)
#define EDP_PSR_TP_MASK REG_BIT(11)
#define EDP_PSR_TP_TP1_TP2 REG_FIELD_PREP(EDP_PSR_TP_MASK, 0)
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index 543cdc46a..600c815e3 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -34,9 +34,6 @@
* These qp tables are as per the C model
* and it has the rows pointing to bpps which increment
* in steps of 0.5
- * We do not support fractional bpps as of today,
- * hence we would skip the fractional bpps during
- * our references for qp calclulations.
*/
static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 312f88d90..cc978ee6d 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -35,6 +35,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -1787,17 +1788,28 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
intel_sdvo_get_eld(intel_sdvo, pipe_config);
}
-static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
+static void intel_sdvo_disable_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
intel_sdvo_set_audio_state(intel_sdvo, 0);
}
-static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_enable_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
const u8 *eld = crtc_state->eld;
+ if (!crtc_state->has_audio)
+ return;
+
intel_sdvo_set_audio_state(intel_sdvo, 0);
intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
@@ -1818,9 +1830,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- if (old_crtc_state->has_audio)
- intel_sdvo_disable_audio(intel_sdvo);
-
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1912,9 +1921,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
-
- if (pipe_config->has_audio)
- intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
}
static enum drm_mode_status
@@ -3317,7 +3323,6 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
ddc->ddc_bus = ddc_bus;
ddc->ddc.owner = THIS_MODULE;
- ddc->ddc.class = I2C_CLASS_DDC;
snprintf(ddc->ddc.name, I2C_NAME_SIZE, "SDVO %c DDC%d",
port_name(sdvo->base.port), ddc_bus);
ddc->ddc.dev.parent = &pdev->dev;
@@ -3396,6 +3401,8 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
}
intel_encoder->pre_enable = intel_sdvo_pre_enable;
intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->audio_enable = intel_sdvo_enable_audio;
+ intel_encoder->audio_disable = intel_sdvo_disable_audio;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
intel_encoder->get_config = intel_sdvo_get_config;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index ce5a73a4c..bc61e736f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -3,7 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
-#include <linux/util_macros.h>
+#include <linux/math.h>
#include "i915_reg.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 1fb16510f..d7b440c8c 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -48,6 +48,11 @@
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
+static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
+{
+ return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A';
+}
+
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -1636,7 +1641,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
- "sprite %c", sprite_name(pipe, sprite));
+ "sprite %c", sprite_name(dev_priv, pipe, sprite));
kfree(modifiers);
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index f64d348a9..dcf05e00e 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -1030,18 +1030,25 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl
__xelpdp_tc_phy_enable_tcss_power(tc, enable);
- if ((!tc_phy_wait_for_ready(tc) ||
- !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- if (enable) {
- __xelpdp_tc_phy_enable_tcss_power(tc, false);
- xelpdp_tc_phy_wait_for_tcss_power(tc, false);
- }
+ if (enable && !tc_phy_wait_for_ready(tc))
+ goto out_disable;
- return false;
- }
+ if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
+ goto out_disable;
return true;
+
+out_disable:
+ if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
+ return false;
+
+ if (!enable)
+ return false;
+
+ __xelpdp_tc_phy_enable_tcss_power(tc, false);
+ xelpdp_tc_phy_wait_for_tcss_power(tc, false);
+
+ return false;
}
static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index f790fd10b..992a725de 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1417,9 +1417,6 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
static void set_color_conversion(struct drm_i915_private *dev_priv,
const struct color_conversion *color_conversion)
{
- if (!color_conversion)
- return;
-
intel_de_write(dev_priv, TV_CSC_Y,
(color_conversion->ry << 16) | color_conversion->gy);
intel_de_write(dev_priv, TV_CSC_Y2,
@@ -1454,9 +1451,6 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
int xpos, ypos;
unsigned int xsize, ysize;
- if (!tv_mode)
- return; /* can't happen (mode_prepare prevents this) */
-
tv_ctl = intel_de_read(dev_priv, TV_CTL);
tv_ctl &= TV_CTL_SAVE;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 2cec2abf9..fe256bf7b 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -265,6 +265,32 @@ int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline)
return (scanline + vtotal - crtc->scanline_offset) % vtotal;
}
+/*
+ * The uncore version of the spin lock functions is used to decide
+ * whether we need to lock the uncore lock or not. This is only
+ * needed in i915, not in Xe.
+ *
+ * This lock in i915 is needed because some old platforms (at least
+ * IVB and possibly HSW as well), which are not supported in Xe, need
+ * all register accesses to the same cacheline to be serialized,
+ * otherwise they may hang.
+ */
+static void intel_vblank_section_enter(struct drm_i915_private *i915)
+ __acquires(i915->uncore.lock)
+{
+#ifdef I915
+ spin_lock(&i915->uncore.lock);
+#endif
+}
+
+static void intel_vblank_section_exit(struct drm_i915_private *i915)
+ __releases(i915->uncore.lock)
+{
+#ifdef I915
+ spin_unlock(&i915->uncore.lock);
+#endif
+}
+
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
@@ -302,11 +328,12 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
}
/*
- * Lock uncore.lock, as we will do multiple timing critical raw
- * register reads, potentially with preemption disabled, so the
- * following code must not block on uncore.lock.
+ * Enter vblank critical section, as we will do multiple
+ * timing critical raw register reads, potentially with
+ * preemption disabled, so the following code must not block.
*/
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ local_irq_save(irqflags);
+ intel_vblank_section_enter(dev_priv);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -374,7 +401,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ intel_vblank_section_exit(dev_priv);
+ local_irq_restore(irqflags);
/*
* While in vblank, position will be negative
@@ -412,9 +440,13 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
unsigned long irqflags;
int position;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ local_irq_save(irqflags);
+ intel_vblank_section_enter(dev_priv);
+
position = __intel_get_crtc_scanline(crtc);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ intel_vblank_section_exit(dev_priv);
+ local_irq_restore(irqflags);
return position;
}
@@ -537,7 +569,7 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
* Need to audit everything to make sure it's safe.
*/
spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
- spin_lock(&i915->uncore.lock);
+ intel_vblank_section_enter(i915);
drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
@@ -546,7 +578,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
crtc->mode_flags = mode_flags;
crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
-
- spin_unlock(&i915->uncore.lock);
+ intel_vblank_section_exit(i915);
spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 6757dbae9..17d6572f9 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -77,8 +77,8 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
static void
calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
{
+ int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel);
int bpc = vdsc_cfg->bits_per_component;
- int bpp = vdsc_cfg->bits_per_pixel >> 4;
int qp_bpc_modifier = (bpc - 8) * 2;
int uncompressed_bpg_rate;
int first_line_bpg_offset;
@@ -148,7 +148,13 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
static const s8 ofs_und8[] = {
10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
};
-
+ /*
+ * For 420 format since bits_per_pixel (bpp) is set to target bpp * 2,
+ * QP table values for target bpp 4.0 to 4.4375 (rounded to 4.0) are
+ * actually for bpp 8 to 8.875 (rounded to 4.0 * 2 i.e 8).
+ * Similarly values for target bpp 4.5 to 4.8375 (rounded to 4.5)
+ * are for bpp 9 to 9.875 (rounded to 4.5 * 2 i.e 9), and so on.
+ */
bpp_i = bpp - 8;
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
u8 range_bpg_offset;
@@ -178,6 +184,9 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
}
} else {
+ /* fractional bpp part * 10000 (for precision up to 4 decimal places) */
+ int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel);
+
static const s8 ofs_und6[] = {
0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
};
@@ -191,7 +200,14 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
};
- bpp_i = (2 * (bpp - 6));
+ /*
+ * QP table rows have values in increment of 0.5.
+ * So 6.0 bpp to 6.4375 will have index 0, 6.5 to 6.9375 will have index 1,
+ * and so on.
+ * 0.5 fractional part with 4 decimal precision becomes 5000
+ */
+ bpp_i = ((bpp - 6) + (fractional_bits < 5000 ? 0 : 1));
+
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
u8 range_bpg_offset;
@@ -248,7 +264,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
- u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
+ u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
int err;
int ret;
@@ -279,8 +295,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
- /* Gen 11 only supports integral values of bpp */
- vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_pixel = pipe_config->dsc.compressed_bpp_x16;
/*
* According to DSC 1.2 specs in Section 4.1 if native_420 is set
@@ -797,13 +812,13 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
}
static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
- bool *check_equal)
+ bool *all_equal)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
i915_reg_t dsc_reg[2];
int i, vdsc_per_pipe, dsc_reg_num;
- u32 val = 0;
+ u32 val;
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
dsc_reg_num = min_t(int, ARRAY_SIZE(dsc_reg), vdsc_per_pipe);
@@ -812,20 +827,13 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
intel_dsc_get_pps_reg(crtc_state, pps, dsc_reg, dsc_reg_num);
- if (check_equal)
- *check_equal = true;
-
- for (i = 0; i < dsc_reg_num; i++) {
- u32 tmp;
+ *all_equal = true;
- tmp = intel_de_read(i915, dsc_reg[i]);
+ val = intel_de_read(i915, dsc_reg[0]);
- if (i == 0) {
- val = tmp;
- } else if (check_equal && tmp != val) {
- *check_equal = false;
- break;
- } else if (!check_equal) {
+ for (i = 1; i < dsc_reg_num; i++) {
+ if (intel_de_read(i915, dsc_reg[i]) != val) {
+ *all_equal = false;
break;
}
}
@@ -874,7 +882,7 @@ static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
if (vdsc_cfg->native_420)
vdsc_cfg->bits_per_pixel >>= 1;
- crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
+ crtc_state->dsc.compressed_bpp_x16 = vdsc_cfg->bits_per_pixel;
/* PPS 2 */
pps_temp = intel_dsc_pps_read_and_verify(crtc_state, 2);
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index eb5bd0743..f542ee1db 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that during VRR toggle/push/etc.
+ */
+ if (crtc_state->bigjoiner_pipes)
+ return;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 245a64332..8bba6c2e5 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -18,10 +18,10 @@
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
-#include "gt/intel_gt.h"
#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
@@ -630,6 +630,18 @@ skl_plane_disable_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
+static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
+}
+
static void
icl_plane_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
@@ -643,7 +655,7 @@ icl_plane_disable_arm(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
- intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
+ icl_plane_disable_sel_fetch_arm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1007,7 +1019,8 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
* The DPT object contains only one vma, so the VMA's offset
* within the DPT is always 0.
*/
- drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start);
+ drm_WARN_ON(&i915->drm, plane_state->dpt_vma &&
+ plane_state->dpt_vma->node.start);
drm_WARN_ON(&i915->drm, offset & 0x1fffff);
return offset >> 9;
} else {
@@ -1197,6 +1210,48 @@ skl_plane_update_arm(struct intel_plane *plane,
skl_plane_surf(plane_state, 0));
}
+static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ const struct drm_rect *clip;
+ u32 val;
+ int x, y;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ clip = &plane_state->psr2_sel_fetch_area;
+
+ val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+ val |= plane_state->uapi.dst.x1;
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
+
+ x = plane_state->view.color_plane[color_plane].x;
+
+ /*
+ * From Bspec: UV surface Start Y Position = half of Y plane Y
+ * start position.
+ */
+ if (!color_plane)
+ y = plane_state->view.color_plane[color_plane].y + clip->y1;
+ else
+ y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
+
+ val = y << 16 | x;
+
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
+ val);
+
+ /* Sizes are 0 based */
+ val = (drm_rect_height(clip) - 1) << 16;
+ val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
+}
+
static void
icl_plane_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1269,7 +1324,24 @@ icl_plane_update_noarm(struct intel_plane *plane,
if (plane_state->force_black)
icl_plane_csc_load_black(plane);
- intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
+ icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
+}
+
+static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
+ else
+ icl_plane_disable_sel_fetch_arm(plane, crtc_state);
}
static void
@@ -1296,7 +1368,7 @@ icl_plane_update_arm(struct intel_plane *plane,
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
- intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state);
+ icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state);
/*
* The control register self-arms if the plane was previously
@@ -1855,16 +1927,19 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
}
}
-static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+static void check_protection(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- return intel_pxp_key_check(i915->pxp, obj, false) == 0;
-}
+ if (DISPLAY_VER(i915) < 11)
+ return;
-static bool pxp_is_borked(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+ plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0;
+ plane_state->force_black = i915_gem_object_is_protected(obj) &&
+ !plane_state->decrypt;
}
static int skl_plane_check(struct intel_crtc_state *crtc_state,
@@ -1911,10 +1986,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (DISPLAY_VER(dev_priv) >= 11) {
- plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
- plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
- }
+ check_protection(plane_state);
/* HW only has 8 bits pixel precision, disable plane if invisible */
if (!(plane_state->hw.alpha >> 8))
@@ -2218,6 +2290,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (HAS_4TILE(i915))
caps |= INTEL_PLANE_CAP_TILING_4;
+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
+ return caps;
+
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
if (DISPLAY_VER(i915) >= 12)
@@ -2489,7 +2564,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
- if (!dev_priv->params.enable_dpt &&
+ if (!dev_priv->display.params.enable_dpt &&
intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) {
drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n");
goto error;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 99b8ccdc3..56588d6e2 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -412,7 +412,7 @@ static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->params.enable_sagv)
+ if (!i915->display.params.enable_sagv)
return false;
if (DISPLAY_VER(i915) >= 12)
@@ -3702,7 +3702,8 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
};
seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
- seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv));
+ seq_printf(m, "SAGV modparam: %s\n",
+ str_enabled_disabled(i915->display.params.enable_sagv));
seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f488394d3..9b33b8a74 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -561,6 +561,12 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
glk_dsi_disable_mipi_io(encoder);
}
+static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
+{
+ return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
+ BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+}
+
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -570,7 +576,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
drm_dbg_kms(&dev_priv->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
@@ -589,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) &&
+ if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
intel_de_wait_for_clear(dev_priv, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
@@ -627,8 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
u32 temp;
temp = intel_de_read(dev_priv, port_ctrl);
@@ -664,8 +669,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
/* de-assert ip_tg_enable signal */
intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
@@ -955,9 +959,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -1529,16 +1532,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
}
}
-static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
-
- intel_dsi_vbt_gpio_cleanup(intel_dsi);
- intel_encoder_destroy(encoder);
-}
-
static const struct drm_encoder_funcs intel_dsi_funcs = {
- .destroy = intel_dsi_encoder_destroy,
+ .destroy = intel_encoder_destroy,
};
static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index e38f06a6e..dcbfe32fd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -279,7 +279,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
}
static struct i915_gem_proto_context *
-proto_context_create(struct drm_i915_private *i915, unsigned int flags)
+proto_context_create(struct drm_i915_file_private *fpriv,
+ struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_proto_context *pc, *err;
@@ -287,6 +288,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags)
if (!pc)
return ERR_PTR(-ENOMEM);
+ pc->fpriv = fpriv;
pc->num_user_engines = -1;
pc->user_engines = NULL;
pc->user_flags = BIT(UCONTEXT_BANNABLE) |
@@ -1622,6 +1624,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
err = PTR_ERR(ppgtt);
goto err_ctx;
}
+ ppgtt->vm.fpriv = pc->fpriv;
vm = &ppgtt->vm;
}
if (vm)
@@ -1741,7 +1744,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
/* 0 reserved for invalid/unassigned ppgtt */
xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(file_priv, i915, 0);
if (IS_ERR(pc)) {
err = PTR_ERR(pc);
goto err;
@@ -1823,6 +1826,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->vm_id = id;
+ ppgtt->vm.fpriv = file_priv;
return 0;
err_put:
@@ -2285,7 +2289,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
- ext_data.pc = proto_context_create(i915, args->flags);
+ ext_data.pc = proto_context_create(file->driver_priv, i915,
+ args->flags);
if (IS_ERR(ext_data.pc))
return PTR_ERR(ext_data.pc);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index cb78214a7..03bc7f9d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -188,6 +188,9 @@ struct i915_gem_proto_engine {
* CONTEXT_CREATE_SET_PARAM during GEM_CONTEXT_CREATE.
*/
struct i915_gem_proto_context {
+ /** @fpriv: Client which creates the context */
+ struct drm_i915_file_private *fpriv;
+
/** @vm: See &i915_gem_context.vm */
struct i915_address_space *vm;
@@ -409,9 +412,9 @@ struct i915_gem_context {
/** @stale: tracks stale engines to be destroyed */
struct {
- /** @lock: guards engines */
+ /** @stale.lock: guards engines */
spinlock_t lock;
- /** @engines: list of stale engines */
+ /** @stale.engines: list of stale engines */
struct list_head engines;
} stale;
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 683fd8d31..555022c06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -9,6 +9,7 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm_auth.h>
#include <drm/drm_syncobj.h>
#include "display/intel_frontbuffer.h"
@@ -253,6 +254,8 @@ struct i915_execbuffer {
struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
+ intel_wakeref_t wakeref;
+ intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -1156,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP)
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
else
io_mapping_unmap_atomic((void __iomem *)vaddr);
}
@@ -1172,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache,
if (cache->vaddr & KMAP) {
struct page *page = i915_gem_object_get_page(obj, cache->page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) |
(unsigned long)vaddr;
} else {
@@ -1202,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
if (cache->vaddr & CLFLUSH_AFTER)
mb();
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
i915_gem_object_finish_access(obj);
} else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
@@ -1234,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
struct page *page;
if (cache->vaddr) {
- kunmap_atomic(unmask_page(cache->vaddr));
+ kunmap_local(unmask_page(cache->vaddr));
} else {
unsigned int flushes;
int err;
@@ -1256,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
if (!obj->mm.dirty)
set_page_dirty(page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
cache->page = pageno;
@@ -1678,7 +1681,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
size = nreloc * sizeof(*relocs);
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
+ relocs = kvmalloc_array(1, size, GFP_KERNEL);
if (!relocs) {
err = -ENOMEM;
goto err;
@@ -2719,13 +2722,13 @@ eb_select_engine(struct i915_execbuffer *eb)
for_each_child(ce, child)
intel_context_get(child);
- intel_gt_pm_get(gt);
+ eb->wakeref = intel_gt_pm_get(ce->engine->gt);
/*
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
* free VMAs while execbuf ioctl is validating VMAs.
*/
if (gt->info.id)
- intel_gt_pm_get(to_gt(gt->i915));
+ eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2765,9 +2768,9 @@ eb_select_engine(struct i915_execbuffer *eb)
err:
if (gt->info.id)
- intel_gt_pm_put(to_gt(gt->i915));
+ intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
intel_context_put(ce);
@@ -2785,8 +2788,8 @@ eb_put_engine(struct i915_execbuffer *eb)
* i915_vma_parked() from interfering while execbuf validates vmas.
*/
if (eb->gt->info.id)
- intel_gt_pm_put(to_gt(eb->gt->i915));
- intel_gt_pm_put(eb->gt);
+ intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
+ intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
intel_context_put(eb->context);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 6bc26b4b0..ea7561ae6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -36,7 +36,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct sg_table *st;
struct scatterlist *sg;
unsigned int npages; /* restricted by sg_alloc_table */
- int max_order = MAX_ORDER;
+ int max_order = MAX_PAGE_ORDER;
unsigned int max_segment;
gfp_t gfp;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index c26d87555..58e6c680f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -106,6 +106,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
+#ifdef CONFIG_PROC_FS
+ INIT_LIST_HEAD(&obj->client_link);
+#endif
+
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->lut_lock);
@@ -293,6 +297,10 @@ void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ /* We need to keep this alive for RCU read access from fdinfo. */
+ if (obj->mm.n_placements > 1)
+ kfree(obj->mm.placements);
+
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -389,9 +397,6 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
if (obj->ops->release)
obj->ops->release(obj);
- if (obj->mm.n_placements > 1)
- kfree(obj->mm.placements);
-
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
@@ -442,6 +447,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+ i915_drm_client_remove_object(obj);
+
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
@@ -493,17 +500,15 @@ static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
pgoff_t idx = offset >> PAGE_SHIFT;
- void *src_map;
void *src_ptr;
- src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
-
- src_ptr = src_map + offset_in_page(offset);
+ src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
+ + offset_in_page(offset);
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_virt_range(src_ptr, size);
memcpy(dst, src_ptr, size);
- kunmap_atomic(src_map);
+ kunmap_local(src_ptr);
}
static void
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 229240400..0c5cdab27 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -302,6 +302,18 @@ struct drm_i915_gem_object {
*/
struct i915_address_space *shares_resv_from;
+#ifdef CONFIG_PROC_FS
+ /**
+ * @client: @i915_drm_client which created the object
+ */
+ struct i915_drm_client *client;
+
+ /**
+ * @client_link: Link into @i915_drm_client.objects_list
+ */
+ struct list_head client_link;
+#endif
+
union {
struct rcu_head rcu;
struct llist_node freed;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 5df128e2f..ef85c6dc9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -65,16 +65,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
dst = vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
- void *src;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
goto err_st;
- src = kmap_atomic(page);
- memcpy(dst, src, PAGE_SIZE);
+ memcpy_from_page(dst, page, 0, PAGE_SIZE);
drm_clflush_virt_range(dst, PAGE_SIZE);
- kunmap_atomic(src);
put_page(page);
dst += PAGE_SIZE;
@@ -113,16 +110,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
- char *dst;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
continue;
- dst = kmap_atomic(page);
drm_clflush_virt_range(src, PAGE_SIZE);
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ memcpy_to_page(page, 0, src, PAGE_SIZE);
set_page_dirty(page);
if (obj->mm.madv == I915_MADV_WILLNEED)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 73a4a4eb2..38b72d865 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -485,11 +485,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err < 0)
return err;
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
+ pagefault_disable();
unwritten = __copy_from_user_inatomic(vaddr + pg,
user_data,
len);
- kunmap_atomic(vaddr);
+ pagefault_enable();
+ kunmap_local(vaddr);
err = aops->write_end(obj->base.filp, mapping, offset, len,
len - unwritten, page, data);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 1a766d8e7..8c88075ee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -386,6 +386,27 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+ /* Wa_14019821291 */
+ if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
+ /*
+ * This workaround is primarily implemented by the BIOS. We
+ * just need to figure out whether the BIOS has applied the
+ * workaround (meaning the programmed address falls within
+ * the DSM) and, if so, reserve that part of the DSM to
+ * prevent accidental reuse. The DSM location should be just
+ * below the WOPCM.
+ */
+ u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
+ MTL_GSCPSMI_BASEADDR_LSB,
+ MTL_GSCPSMI_BASEADDR_MSB);
+ if (gscpsmi_base >= i915->dsm.stolen.start &&
+ gscpsmi_base < i915->dsm.stolen.end) {
+ *base = gscpsmi_base;
+ *size = i915->dsm.stolen.end - gscpsmi_base;
+ return;
+ }
+ }
+
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
case GEN8_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 6b9f6cf50..3ff3d8889 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -115,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
do {
struct page *page;
- GEM_BUG_ON(order > MAX_ORDER);
+ GEM_BUG_ON(order > MAX_PAGE_ORDER);
page = alloc_pages(GFP | __GFP_ZERO, order);
if (!page)
goto err;
@@ -1082,7 +1082,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
goto err_unlock;
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
- u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
+ u32 *ptr = kmap_local_page(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(ptr, PAGE_SIZE);
@@ -1090,12 +1090,12 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (ptr[dword] != val) {
pr_err("n=%lu ptr[%u]=%u, val=%u\n",
n, dword, ptr[dword], val);
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
err = -EINVAL;
break;
}
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
}
i915_gem_object_finish_access(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 3bef1beec..2a0c0634d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -24,7 +24,6 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{
unsigned int needs_clflush;
struct page *page;
- void *map;
u32 *cpu;
int err;
@@ -34,8 +33,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
- map = kmap_atomic(page);
- cpu = map + offset_in_page(offset);
+ cpu = kmap_local_page(page) + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE)
drm_clflush_virt_range(cpu, sizeof(*cpu));
@@ -45,7 +43,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
if (needs_clflush & CLFLUSH_AFTER)
drm_clflush_virt_range(cpu, sizeof(*cpu));
- kunmap_atomic(map);
+ kunmap_local(cpu);
i915_gem_object_finish_access(ctx->obj);
out:
@@ -57,7 +55,6 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{
unsigned int needs_clflush;
struct page *page;
- void *map;
u32 *cpu;
int err;
@@ -67,15 +64,14 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
- map = kmap_atomic(page);
- cpu = map + offset_in_page(offset);
+ cpu = kmap_local_page(page) + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE)
drm_clflush_virt_range(cpu, sizeof(*cpu));
*v = *cpu;
- kunmap_atomic(map);
+ kunmap_local(cpu);
i915_gem_object_finish_access(ctx->obj);
out:
@@ -85,6 +81,7 @@ out:
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@@ -99,7 +96,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -112,12 +109,13 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
i915_vma_unpin_iomap(vma);
out_rpm:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@@ -132,7 +130,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -145,7 +143,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
i915_vma_unpin_iomap(vma);
out_rpm:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7021b6e9b..89d4dc8b6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -489,12 +489,12 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
for (n = 0; n < real_page_count(obj); n++) {
u32 *map;
- map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ map = kmap_local_page(i915_gem_object_get_page(obj, n));
for (m = 0; m < DW_PER_PAGE; m++)
map[m] = value;
if (!has_llc)
drm_clflush_virt_range(map, PAGE_SIZE);
- kunmap_atomic(map);
+ kunmap_local(map);
}
i915_gem_object_finish_access(obj);
@@ -520,7 +520,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
for (n = 0; n < real_page_count(obj); n++) {
u32 *map, m;
- map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ map = kmap_local_page(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(map, PAGE_SIZE);
@@ -546,7 +546,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
}
out_unmap:
- kunmap_atomic(map);
+ kunmap_local(map);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index e57f93900..d684a70f2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -504,7 +504,7 @@ static int igt_dmabuf_export_vmap(void *arg)
}
if (memchr_inv(ptr, 0, dmabuf->size)) {
- pr_err("Exported object not initialiased to zero!\n");
+ pr_err("Exported object not initialised to zero!\n");
err = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 72957a36a..2c51a2c45 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -630,14 +630,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
- intel_gt_pm_get(to_gt(i915));
+ intel_gt_pm_get_untracked(to_gt(i915));
cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
- intel_gt_pm_put(to_gt(i915));
+ intel_gt_pm_put_untracked(to_gt(i915));
i915_gem_driver_register__shrinker(i915);
}
@@ -778,6 +778,7 @@ err_obj:
static int gtt_set(struct drm_i915_gem_object *obj)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@@ -786,7 +787,7 @@ static int gtt_set(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@@ -798,12 +799,13 @@ static int gtt_set(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_check(struct drm_i915_gem_object *obj)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@@ -812,7 +814,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@@ -828,7 +830,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index e199d7dbb..2b0327cc4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -83,7 +83,7 @@ live_context(struct drm_i915_private *i915, struct file *file)
int err;
u32 id;
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(fpriv, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
@@ -152,7 +152,7 @@ kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx;
struct i915_gem_proto_context *pc;
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(NULL, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 86a04afff..e1bf13e3d 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -226,7 +226,7 @@ u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
static int mtl_dummy_pipe_control(struct i915_request *rq)
{
/* Wa_14016712196 */
- if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 71)) ||
+ if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
IS_DG2(rq->i915)) {
u32 *cs;
@@ -822,7 +822,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
flags |= PIPE_CONTROL_FLUSH_L3;
/* Wa_14016712196 */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
/* dummy PIPE_CONTROL + depth flush */
cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 9895e18df..81bf22163 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -5,6 +5,7 @@
#include <linux/log2.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gen8_ppgtt.h"
@@ -222,6 +223,9 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ if (vm->rsvd.obj)
+ i915_gem_object_put(vm->rsvd.obj);
+
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
@@ -950,6 +954,44 @@ err_pd:
return ERR_PTR(err);
}
+static int gen8_init_rsvd(struct i915_address_space *vm)
+{
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ if (!intel_gt_needs_wa_16018031267(vm->gt))
+ return 0;
+
+ /* The memory will be used only by GPU. */
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
+ I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (ret)
+ goto unref;
+
+ vm->rsvd.vma = i915_vma_make_unshrinkable(vma);
+ vm->rsvd.obj = obj;
+ vm->total -= vma->node.size;
+ return 0;
+unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1031,6 +1073,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
+ err = gen8_init_rsvd(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
return ppgtt;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index ecc990ec1..d650beb8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -28,11 +28,14 @@ static void irq_disable(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
+ intel_wakeref_t wakeref;
+
/*
* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference.
*/
- if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
+ wakeref = intel_gt_pm_get_if_awake(b->irq_engine->gt);
+ if (GEM_WARN_ON(!wakeref))
return;
/*
@@ -41,7 +44,7 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
- WRITE_ONCE(b->irq_armed, true);
+ WRITE_ONCE(b->irq_armed, wakeref);
/* Requests may have completed before we could enable the interrupt. */
if (!b->irq_enabled++ && b->irq_enable(b))
@@ -61,12 +64,14 @@ static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
+ intel_wakeref_t wakeref = b->irq_armed;
+
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
b->irq_disable(b);
- WRITE_ONCE(b->irq_armed, false);
- intel_gt_pm_put_async(b->irq_engine->gt);
+ WRITE_ONCE(b->irq_armed, 0);
+ intel_gt_pm_put_async(b->irq_engine->gt, wakeref);
}
static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index 72dfd3748..bdf09fd67 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include "intel_engine_types.h"
+#include "intel_wakeref.h"
/*
* Rather than have every client wait upon all user interrupts,
@@ -43,7 +44,7 @@ struct intel_breadcrumbs {
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
- bool irq_armed;
+ intel_wakeref_t irq_armed;
/* Not all breadcrumbs are attached to physical HW */
intel_engine_mask_t engine_mask;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index a53b26178..a2f124574 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
+#include "i915_drm_client.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -50,6 +51,7 @@ intel_context_create(struct intel_engine_cs *engine)
int intel_context_alloc_state(struct intel_context *ce)
{
+ struct i915_gem_context *ctx;
int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
@@ -66,6 +68,18 @@ int intel_context_alloc_state(struct intel_context *ce)
goto unlock;
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+
+ rcu_read_lock();
+ ctx = rcu_dereference(ce->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (ctx) {
+ if (ctx->client)
+ i915_drm_client_add_context_objects(ctx->client,
+ ce);
+ i915_gem_context_put(ctx);
+ }
}
unlock:
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index a80e3b7c2..25564c015 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -212,7 +212,7 @@ static inline void intel_context_enter(struct intel_context *ce)
return;
ce->ops->enter(ce);
- intel_gt_pm_get(ce->vm->gt);
+ ce->wakeref = intel_gt_pm_get(ce->vm->gt);
}
static inline void intel_context_mark_active(struct intel_context *ce)
@@ -229,7 +229,7 @@ static inline void intel_context_exit(struct intel_context *ce)
if (--ce->active_count)
return;
- intel_gt_pm_put_async(ce->vm->gt);
+ intel_gt_pm_put_async(ce->vm->gt, ce->wakeref);
ce->ops->exit(ce);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index aceaac28a..7eccbd70d 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -17,6 +17,7 @@
#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
+#include "intel_wakeref.h"
#include "uc/intel_guc_fwif.h"
@@ -112,6 +113,7 @@ struct intel_context {
u32 ring_size;
struct intel_ring *ring;
struct intel_timeline *timeline;
+ intel_wakeref_t wakeref;
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4a11219e5..84be97f95 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -47,7 +47,7 @@
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
+#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
#define MAX_MMIO_BASES 3
struct engine_info {
@@ -908,6 +908,23 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
info->engine_mask &= ~BIT(GSC0);
}
+ /*
+ * Do not create the command streamer for CCS slices beyond the first.
+ * All the workload submitted to the first engine will be shared among
+ * all the slices.
+ *
+ * Once the user will be allowed to customize the CCS mode, then this
+ * check needs to be removed.
+ */
+ if (IS_DG2(gt->i915)) {
+ u8 first_ccs = __ffs(CCS_MASK(gt));
+
+ /* Mask off all the CCS engine */
+ info->engine_mask &= ~GENMASK(CCS3, CCS0);
+ /* Put back in the first CCS engine */
+ info->engine_mask |= BIT(_CCS(first_ccs));
+ }
+
return info->engine_mask;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9a527e1f5..1a8e2b7db 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -188,7 +188,7 @@ static void heartbeat(struct work_struct *wrk)
* low latency and no jitter] the chance to naturally
* complete before being preempted.
*/
- attr.priority = 0;
+ attr.priority = I915_PRIORITY_NORMAL;
if (rq->sched.attr.priority >= attr.priority)
attr.priority = I915_PRIORITY_HEARTBEAT;
if (rq->sched.attr.priority >= attr.priority)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 5a3a5b29d..fb7bff27b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -63,7 +63,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
ENGINE_TRACE(engine, "\n");
- intel_gt_pm_get(engine->gt);
+ engine->wakeref_track = intel_gt_pm_get(engine->gt);
/* Discard stale context state from across idling */
ce = engine->kernel_context;
@@ -122,6 +122,7 @@ __queue_and_release_pm(struct i915_request *rq,
*/
GEM_BUG_ON(rq->context->active_count != 1);
__intel_gt_pm_get(engine->gt);
+ rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref);
/*
* We have to serialise all potential retirement paths with our
@@ -282,7 +283,7 @@ static int __engine_park(struct intel_wakeref *wf)
engine->park(engine);
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async(engine->gt, engine->wakeref_track);
return 0;
}
@@ -293,7 +294,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
+ intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops, engine->name);
intel_engine_init_heartbeat(engine);
intel_gsc_idle_msg_enable(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index d68675925..1d97c435a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -10,6 +10,7 @@
#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index fdd4ddd3a..a8eac59e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -118,9 +118,15 @@
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define PER_CTX_BB_FORCE BIT(2)
+#define PER_CTX_BB_VALID BIT(0)
+
#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
#define ECOSKPD(base) _MMIO((base) + 0x1d0)
+#define XEHP_BLITTER_SCHEDULING_MODE_MASK REG_GENMASK(12, 11)
+#define XEHP_BLITTER_ROUND_ROBIN_MODE \
+ REG_FIELD_PREP(XEHP_BLITTER_SCHEDULING_MODE_MASK, 1)
#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4)
#define ECO_GATING_CX_ONLY REG_BIT(3)
#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3)
@@ -257,5 +263,7 @@
#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
+#define VDBOX_CGCTL3F1C(base) _MMIO((base) + 0x3f1c)
+#define MFXPIPE_CLKGATE_DIS REG_BIT(3)
#endif /* __INTEL_ENGINE_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 876976025..960e6be20 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -446,7 +446,9 @@ struct intel_engine_cs {
unsigned long serial;
unsigned long wakeref_serial;
+ intel_wakeref_t wakeref_track;
struct intel_wakeref wakeref;
+
struct file *default_state;
struct {
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 42e09f158..b061a0a0d 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -630,7 +630,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !--engine->fw_active)
intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async_untracked(engine->gt);
/*
* If this is part of a virtual engine, its next request may
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 15fc8e470..21a7e3191 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -245,16 +245,15 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc)) {
+ if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
guc_ggtt_ct_invalidate(gt);
- } else if (GRAPHICS_VER(i915) >= 12) {
+ else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
GEN12_GUC_TLB_INV_CR,
GEN12_GUC_TLB_INV_CR_INVALIDATE);
- } else {
+ else
intel_uncore_write_fw(gt->uncore,
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- }
}
}
@@ -297,7 +296,7 @@ static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
return intel_gt_is_bind_context_ready(gt);
}
-static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
+static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref)
{
struct intel_context *ce;
struct intel_gt *gt = ggtt->vm.gt;
@@ -314,7 +313,8 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
* would conflict with fs_reclaim trying to allocate memory while
* doing rpm_resume().
*/
- if (!intel_gt_pm_get_if_awake(gt))
+ *wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!*wakeref)
return NULL;
intel_engine_pm_get(ce->engine);
@@ -322,10 +322,10 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
return ce;
}
-static void gen8_ggtt_bind_put_ce(struct intel_context *ce)
+static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref)
{
intel_engine_pm_put(ce->engine);
- intel_gt_pm_put(ce->engine->gt);
+ intel_gt_pm_put(ce->engine->gt, wakeref);
}
static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
@@ -338,12 +338,13 @@ static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
struct sgt_iter iter;
struct i915_request *rq;
struct intel_context *ce;
+ intel_wakeref_t wakeref;
u32 *cs;
if (!num_entries)
return true;
- ce = gen8_ggtt_bind_get_ce(ggtt);
+ ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref);
if (!ce)
return false;
@@ -419,13 +420,13 @@ queue_err_rq:
offset += n_ptes;
}
- gen8_ggtt_bind_put_ce(ce);
+ gen8_ggtt_bind_put_ce(ce, wakeref);
return true;
err_rq:
i915_request_put(rq);
put_ce:
- gen8_ggtt_bind_put_ce(ce);
+ gen8_ggtt_bind_put_ce(ce, wakeref);
return false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index 7ab3ca0f9..013c64251 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -21,8 +21,11 @@ struct mei_aux_device;
/**
* struct intel_gsc - graphics security controller
*
- * @gem_obj: scratch memory GSC operations
- * @intf : gsc interface
+ * @intf: gsc interface
+ * @intf.adev: MEI aux. device for this @intf
+ * @intf.gem_obj: scratch memory GSC operations
+ * @intf.irq: IRQ for this device (%-1 for no IRQ)
+ * @intf.id: this interface's id number/index
*/
struct intel_gsc {
struct intel_gsc_intf {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ba1186fc5..6a2c2718b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -451,7 +451,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
+ RING_TAIL(RENDER_RING_BASE));
spin_unlock_irqrestore(&uncore->lock, flags);
}
}
@@ -1024,6 +1024,12 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
return I915_MAP_WC;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71));
+}
+
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
{
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 970bedf6b..003eb93b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -87,8 +87,13 @@ static inline bool gt_is_root(struct intel_gt *gt)
return !gt->info.id;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt);
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
+#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
+ intel_gt_needs_wa_16018031267(engine->gt) && \
+ engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -114,6 +119,11 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
+static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
+{
+ return guc_to_gt(guc)->i915;
+}
+
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
@@ -167,6 +177,20 @@ void intel_gt_release_all(struct drm_i915_private *i915);
(id__)++) \
for_each_if(((gt__) = (i915__)->gt[(id__)]))
+/* Simple iterator over all initialised engines */
+#define for_each_engine(engine__, gt__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_NUM_ENGINES; \
+ (id__)++) \
+ for_each_if ((engine__) = (gt__)->engine[(id__)])
+
+/* Iterator over subset of engines selected by mask */
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
+ for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
+ (tmp__) ? \
+ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
+ 0;)
+
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
new file mode 100644
index 000000000..044219c59
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
+#include "intel_gt_regs.h"
+
+void intel_gt_apply_ccs_mode(struct intel_gt *gt)
+{
+ int cslice;
+ u32 mode = 0;
+ int first_ccs = __ffs(CCS_MASK(gt));
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /* Build the value for the fixed CCS load balancing */
+ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
+ if (CCS_MASK(gt) & BIT(cslice))
+ /*
+ * If available, assign the cslice
+ * to the first available engine...
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs);
+
+ else
+ /*
+ * ... otherwise, mark the cslice as
+ * unavailable if no CCS dispatches here
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice,
+ XEHP_CCS_MODE_CSLICE_MASK);
+ }
+
+ intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
new file mode 100644
index 000000000..9e5549cae
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CCS_MODE_H__
+#define __INTEL_GT_CCS_MODE_H__
+
+struct intel_gt;
+
+void intel_gt_apply_ccs_mode(struct intel_gt *gt);
+
+#endif /* __INTEL_GT_CCS_MODE_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
index 8f9b874fd..3aa1d014c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
@@ -6,8 +6,8 @@
#include <drm/drm_print.h>
-#include "i915_drv.h" /* for_each_engine! */
#include "intel_engine.h"
+#include "intel_gt.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 34913912d..e253750a5 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -388,8 +388,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* registers. This wakeref will be released in the unlock
* routine.
*
- * This is expected to become a formally documented/numbered
- * workaround soon.
+ * Wa_22018931422
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index f5899d503..220ac4f92 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -28,19 +28,20 @@
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
+ intel_wakeref_t wakeref;
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
}
static void runtime_begin(struct intel_gt *gt)
@@ -138,7 +139,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt)
* runtime_pm is per-device rather than per-tile, so this is still the
* correct structure.
*/
- intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops);
+ intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops, "GT");
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
@@ -167,7 +168,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GT_TRACE(gt, "force:%s", str_yes_no(force));
+ GT_TRACE(gt, "force:%s\n", str_yes_no(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -236,6 +237,7 @@ int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err;
err = intel_gt_has_unrecoverable_error(gt);
@@ -252,7 +254,7 @@ int intel_gt_resume(struct intel_gt *gt)
*/
gt_sanitize(gt, true);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
@@ -295,7 +297,7 @@ int intel_gt_resume(struct intel_gt *gt)
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
intel_gt_bind_context_set_ready(gt);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index b1eeb5b33..911fd0160 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -16,19 +16,28 @@ static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
return intel_wakeref_is_active(&gt->wakeref);
}
-static inline void intel_gt_pm_get(struct intel_gt *gt)
+static inline void intel_gt_pm_get_untracked(struct intel_gt *gt)
{
intel_wakeref_get(&gt->wakeref);
}
+static inline intel_wakeref_t intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_gt_pm_get_untracked(gt);
+ return intel_wakeref_track(&gt->wakeref);
+}
+
static inline void __intel_gt_pm_get(struct intel_gt *gt)
{
__intel_wakeref_get(&gt->wakeref);
}
-static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
+static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
- return intel_wakeref_get_if_active(&gt->wakeref);
+ if (!intel_wakeref_get_if_active(&gt->wakeref))
+ return 0;
+
+ return intel_wakeref_track(&gt->wakeref);
}
static inline void intel_gt_pm_might_get(struct intel_gt *gt)
@@ -36,12 +45,18 @@ static inline void intel_gt_pm_might_get(struct intel_gt *gt)
intel_wakeref_might_get(&gt->wakeref);
}
-static inline void intel_gt_pm_put(struct intel_gt *gt)
+static inline void intel_gt_pm_put_untracked(struct intel_gt *gt)
{
intel_wakeref_put(&gt->wakeref);
}
-static inline void intel_gt_pm_put_async(struct intel_gt *gt)
+static inline void intel_gt_pm_put(struct intel_gt *gt, intel_wakeref_t handle)
+{
+ intel_wakeref_untrack(&gt->wakeref, handle);
+ intel_gt_pm_put_untracked(gt);
+}
+
+static inline void intel_gt_pm_put_async_untracked(struct intel_gt *gt)
{
intel_wakeref_put_async(&gt->wakeref);
}
@@ -51,9 +66,14 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
intel_wakeref_might_put(&gt->wakeref);
}
-#define with_intel_gt_pm(gt, tmp) \
- for (tmp = 1, intel_gt_pm_get(gt); tmp; \
- intel_gt_pm_put(gt), tmp = 0)
+static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t handle)
+{
+ intel_wakeref_untrack(&gt->wakeref, handle);
+ intel_gt_pm_put_async_untracked(gt);
+}
+
+#define with_intel_gt_pm(gt, wf) \
+ for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0)
/**
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
@@ -64,7 +84,7 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
* @wf: pointer to a temporary wakeref.
*/
#define with_intel_gt_pm_if_awake(gt, wf) \
- for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0)
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index f900cc68d..7114c116e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -27,7 +27,7 @@
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
atomic_inc(&gt->user_wakeref);
- intel_gt_pm_get(gt);
+ intel_gt_pm_get_untracked(gt);
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
}
@@ -36,7 +36,7 @@ void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
{
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_put(gt->uncore);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put_untracked(gt);
atomic_dec(&gt->user_wakeref);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index eecd0a87a..743fe3566 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -469,6 +469,9 @@
#define XEHP_PSS_MODE2 MCR_REG(0x703c)
#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
+#define XEHP_PSS_CHICKEN MCR_REG(0x7044)
+#define FD_END_COLLECT REG_BIT(5)
+
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
@@ -537,6 +540,9 @@
#define XEHP_SQCM MCR_REG(0x8724)
#define EN_32B_ACCESS REG_BIT(30)
+#define MTL_GSCPSMI_BASEADDR_LSB _MMIO(0x880c)
+#define MTL_GSCPSMI_BASEADDR_MSB _MMIO(0x8810)
+
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
@@ -1471,8 +1477,14 @@
#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GEN12_RCU_MODE _MMIO(0x14800)
+#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
#define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+#define XEHP_CCS_MODE _MMIO(0x14804)
+#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */
+#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1)
+#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
+
#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 4fbed27ef..86f73fe55 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -63,6 +63,9 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
+
+ if (vm->fpriv)
+ i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@@ -84,6 +87,9 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
+
+ if (vm->fpriv)
+ i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@@ -95,6 +101,16 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
+ /*
+ * FIXME: It is suspected that some Address Translation Service (ATS)
+ * issue on IOMMU is causing CAT errors to occur on some MTL workloads.
+ * Applying a write barrier to the ppgtt set entry functions appeared
+ * to have no effect, so we must temporarily use I915_MAP_WC here on
+ * MTL until a proper ATS solution is found.
+ */
+ if (IS_METEORLAKE(vm->i915))
+ type = I915_MAP_WC;
+
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -109,6 +125,16 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
+ /*
+ * FIXME: It is suspected that some Address Translation Service (ATS)
+ * issue on IOMMU is causing CAT errors to occur on some MTL workloads.
+ * Applying a write barrier to the ppgtt set entry functions appeared
+ * to have no effect, so we must temporarily use I915_MAP_WC here on
+ * MTL until a proper ATS solution is found.
+ */
+ if (IS_METEORLAKE(vm->i915))
+ type = I915_MAP_WC;
+
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index b471edac2..6b85222ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -249,8 +249,13 @@ struct i915_address_space {
struct work_struct release_work;
struct drm_mm mm;
+ struct {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ } rsvd;
struct intel_gt *gt;
struct drm_i915_private *i915;
+ struct drm_i915_file_private *fpriv;
struct device *dma;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index eaf66d903..7c367ba8d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -829,6 +829,18 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
}
static void
+lrc_setup_bb_per_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr)
+{
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ ctx_bb_ggtt_addr |
+ PER_CTX_BB_FORCE |
+ PER_CTX_BB_VALID;
+}
+
+static void
lrc_setup_indirect_ctx(u32 *regs,
const struct intel_engine_cs *engine,
u32 ctx_bb_ggtt_addr,
@@ -1020,7 +1032,13 @@ static u32 context_wa_bb_offset(const struct intel_context *ce)
return PAGE_SIZE * ce->wa_bb_page;
}
-static u32 *context_indirect_bb(const struct intel_context *ce)
+/*
+ * per_ctx below determines which WABB section is used.
+ * When true, the function returns the location of the
+ * PER_CTX_BB. When false, the function returns the
+ * location of the INDIRECT_CTX.
+ */
+static u32 *context_wabb(const struct intel_context *ce, bool per_ctx)
{
void *ptr;
@@ -1029,6 +1047,7 @@ static u32 *context_indirect_bb(const struct intel_context *ce)
ptr = ce->lrc_reg_state;
ptr -= LRC_STATE_OFFSET; /* back to start of context image */
ptr += context_wa_bb_offset(ce);
+ ptr += per_ctx ? PAGE_SIZE : 0;
return ptr;
}
@@ -1105,7 +1124,8 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) >= 12) {
ce->wa_bb_page = context_size / PAGE_SIZE;
- context_size += PAGE_SIZE;
+ /* INDIRECT_CTX and PER_CTX_BB need separate pages. */
+ context_size += PAGE_SIZE * 2;
}
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
@@ -1407,12 +1427,85 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return gen12_emit_aux_table_inv(ce->engine, cs);
}
+static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs)
+{
+ struct intel_gt *gt = ce->engine->gt;
+ int mocs = gt->mocs.uc_index << 1;
+
+ /**
+ * Wa_16018031267 / Wa_16018063123 requires that SW forces the
+ * main copy engine arbitration into round robin mode. We
+ * additionally need to submit the following WABB blt command
+ * to produce 4 subblits with each subblit generating 0 byte
+ * write requests as WABB:
+ *
+ * XY_FASTCOLOR_BLT
+ * BG0 -> 5100000E
+ * BG1 -> 0000003F (Dest pitch)
+ * BG2 -> 00000000 (X1, Y1) = (0, 0)
+ * BG3 -> 00040001 (X2, Y2) = (1, 4)
+ * BG4 -> scratch
+ * BG5 -> scratch
+ * BG6-12 -> 00000000
+ * BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 )
+ * BG14 -> 00000010 (Qpitch = 4)
+ * BG15 -> 00000000
+ */
+ *cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2);
+ *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f;
+ *cs++ = 0;
+ *cs++ = 4 << 16 | 1;
+ *cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
+ *cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0x20004004;
+ *cs++ = 0x10;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine))
+ cs = xehp_emit_fastcolor_blt_wabb(ce, cs);
+
+ return cs;
+}
+
+static void
+setup_per_ctx_bb(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 *(*emit)(const struct intel_context *, u32 *))
+{
+ /* Place PER_CTX_BB on next page after INDIRECT_CTX */
+ u32 * const start = context_wabb(ce, true);
+ u32 *cs;
+
+ cs = emit(ce, start);
+
+ /* PER_CTX_BB must manually terminate */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+ lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine,
+ lrc_indirect_bb(ce) + PAGE_SIZE);
+}
+
static void
setup_indirect_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
- u32 * const start = context_indirect_bb(ce);
+ u32 * const start = context_wabb(ce, false);
u32 *cs;
cs = emit(ce, start);
@@ -1511,6 +1604,7 @@ u32 lrc_update_regs(const struct intel_context *ce,
/* Mutually exclusive wrt to global indirect bb */
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
+ setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb);
}
return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index f602895f6..6a3246240 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -849,13 +849,12 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
- if (sseu->max_slices == 0) {
+ if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n");
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
sseu_print_xehp_topology(sseu, p);
- } else {
+ else
sseu_print_hsw_topology(sseu, p);
- }
}
void intel_sseu_print_ss_info(const char *type,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 192ac0e59..59816dd6f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -10,6 +10,7 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
@@ -51,7 +52,8 @@
* registers belonging to BCS, VCS or VECS should be implemented in
* xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
* engine's MMIO range but that are part of of the common RCS/CCS reset domain
- * should be implemented in general_render_compute_wa_init().
+ * should be implemented in general_render_compute_wa_init(). The settings
+ * about the CCS load balancing should be added in ccs_engine_wa_mode().
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -777,6 +779,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_18019271663:dg2 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+
+ /* Wa_14019877138:dg2 */
+ wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
}
static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
@@ -786,8 +791,13 @@ static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
dg2_ctx_gt_tuning_init(engine, wal);
- if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
+ /*
+ * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
+ * gen12_emit_indirect_ctx_rcs() rather than here on some early
+ * steppings.
+ */
+ if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
+ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
}
@@ -905,7 +915,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_ctx_workarounds_init(engine, wal);
else if (IS_PONTEVECCHIO(i915))
; /* noop; none at this time */
@@ -1640,7 +1650,8 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- /* Wa_14018778641 / Wa_18018781329 */
+ /* Wa_14018575942 / Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -1663,8 +1674,22 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
+wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ int id;
+
+ for_each_engine(engine, gt, id)
+ if (engine->class == VIDEO_DECODE_CLASS)
+ wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
+ MFXPIPE_CLKGATE_DIS);
+}
+
+static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ wa_16021867713(gt, wal);
+
/*
* Wa_14018778641
* Wa_18018781329
@@ -1674,6 +1699,9 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
+ /* Wa_22016670082 */
+ wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
+
debug_dump_steering(gt);
}
@@ -1690,7 +1718,7 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
{
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
@@ -1723,7 +1751,7 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
return;
}
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_gt_workarounds_init(gt, wal);
else if (IS_PONTEVECCHIO(i915))
pvc_gt_workarounds_init(gt, wal);
@@ -2196,7 +2224,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
if (engine->gt->type == GT_MEDIA)
; /* none yet */
- else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_whitelist_build(engine);
else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
@@ -2340,14 +2368,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, true);
}
- if (IS_DG2_G11(i915) || IS_DG2_G10(i915)) {
- /* Wa_22014600077:dg2 */
- wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
- 0 /* Wa_14012342262 write-only reg, so skip verification */,
- true);
- }
-
if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
@@ -2782,6 +2802,11 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
}
+ /* Wa_16018031267, Wa_16018063123 */
+ if (NEEDS_FASTCOLOR_BLT_WABB(engine))
+ wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
+ XEHP_BLITTER_SCHEDULING_MODE_MASK,
+ XEHP_BLITTER_ROUND_ROBIN_MODE);
}
static void
@@ -2811,7 +2836,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
/*
@@ -2827,6 +2852,28 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
+static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct intel_gt *gt = engine->gt;
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /*
+ * Wa_14019159160: This workaround, along with others, leads to
+ * significant challenges in utilizing load balancing among the
+ * CCS slices. Consequently, an architectural decision has been
+ * made to completely disable automatic CCS load balancing.
+ */
+ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
+
+ /*
+ * After having disabled automatic load balancing we need to
+ * assign all slices to a single CCS. We will call it CCS mode 1
+ */
+ intel_gt_apply_ccs_mode(gt);
+}
+
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
@@ -2864,7 +2911,8 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
+ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
+ IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74)))
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
@@ -2915,6 +2963,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+
+ /* Wa_18028616096 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
}
if (IS_DG2_G11(i915)) {
@@ -2943,11 +2994,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
true);
}
- if (IS_DG2_G10(i915) || IS_DG2_G12(i915)) {
- /* Wa_18028616096 */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
- }
-
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,
@@ -2978,8 +3024,10 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
- if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
general_render_compute_wa_init(engine, wal);
+ ccs_engine_wa_mode(engine, wal);
+ }
if (engine->class == COMPUTE_CLASS)
ccs_engine_wa_init(engine, wal);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 86cecf7a1..5ffa5e30f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -21,20 +21,22 @@ static int cmp_u32(const void *A, const void *B)
return *a - *b;
}
-static void perf_begin(struct intel_gt *gt)
+static intel_wakeref_t perf_begin(struct intel_gt *gt)
{
- intel_gt_pm_get(gt);
+ intel_wakeref_t wakeref = intel_gt_pm_get(gt);
/* Boost gpufreq to max [waitboost] and keep it fixed */
atomic_inc(&gt->rps.num_waiters);
queue_work(gt->i915->unordered_wq, &gt->rps.work);
flush_work(&gt->rps.work);
+
+ return wakeref;
}
-static int perf_end(struct intel_gt *gt)
+static int perf_end(struct intel_gt *gt, intel_wakeref_t wakeref)
{
atomic_dec(&gt->rps.num_waiters);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return igt_flush_test(gt->i915);
}
@@ -133,12 +135,13 @@ static int perf_mi_bb_start(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- perf_begin(gt);
+ wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *batch;
@@ -207,7 +210,7 @@ out:
pr_info("%s: MI_BB_START cycles: %u\n",
engine->name, trifilter(cycles));
}
- if (perf_end(gt))
+ if (perf_end(gt, wakeref))
err = -EIO;
return err;
@@ -260,12 +263,13 @@ static int perf_mi_noop(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- perf_begin(gt);
+ wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *base, *nop;
@@ -364,7 +368,7 @@ out:
pr_info("%s: 16K MI_NOOP cycles: %u\n",
engine->name, trifilter(cycles));
}
- if (perf_end(gt))
+ if (perf_end(gt, wakeref))
err = -EIO;
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 273d440a5..bc441ce7b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -84,7 +84,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p)
{
- i915_active_unlock_wait(&p->active);
+ wait_var_event_timeout(&p->active, i915_active_is_idle(&p->active), HZ);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 097124170..33351deee 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -81,6 +81,7 @@ static int live_gt_clocks(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (!gt->clock_frequency) { /* unknown */
@@ -91,7 +92,7 @@ static int live_gt_clocks(void *arg)
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for_each_engine(engine, gt, id) {
@@ -128,7 +129,7 @@ static int live_gt_clocks(void *arg)
}
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 5f826b6dc..e17b8777d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1555,7 +1555,7 @@ static int live_lrc_isolation(void *arg)
return err;
}
-static int indirect_ctx_submit_req(struct intel_context *ce)
+static int wabb_ctx_submit_req(struct intel_context *ce)
{
struct i915_request *rq;
int err = 0;
@@ -1579,7 +1579,8 @@ static int indirect_ctx_submit_req(struct intel_context *ce)
#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
static u32 *
-emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+emit_wabb_ctx_canary(const struct intel_context *ce,
+ u32 *cs, bool per_ctx)
{
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
@@ -1587,26 +1588,43 @@ emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
*cs++ = i915_mmio_reg_offset(RING_START(0));
*cs++ = i915_ggtt_offset(ce->state) +
context_wa_bb_offset(ce) +
- CTX_BB_CANARY_OFFSET;
+ CTX_BB_CANARY_OFFSET +
+ (per_ctx ? PAGE_SIZE : 0);
*cs++ = 0;
return cs;
}
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, false);
+}
+
+static u32 *
+emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, true);
+}
+
static void
-indirect_ctx_bb_setup(struct intel_context *ce)
+wabb_ctx_setup(struct intel_context *ce, bool per_ctx)
{
- u32 *cs = context_indirect_bb(ce);
+ u32 *cs = context_wabb(ce, per_ctx);
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
- setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+ if (per_ctx)
+ setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary);
+ else
+ setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
}
-static bool check_ring_start(struct intel_context *ce)
+static bool check_ring_start(struct intel_context *ce, bool per_ctx)
{
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
- LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+ LRC_STATE_OFFSET + context_wa_bb_offset(ce) +
+ (per_ctx ? PAGE_SIZE : 0);
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
return true;
@@ -1618,21 +1636,21 @@ static bool check_ring_start(struct intel_context *ce)
return false;
}
-static int indirect_ctx_bb_check(struct intel_context *ce)
+static int wabb_ctx_check(struct intel_context *ce, bool per_ctx)
{
int err;
- err = indirect_ctx_submit_req(ce);
+ err = wabb_ctx_submit_req(ce);
if (err)
return err;
- if (!check_ring_start(ce))
+ if (!check_ring_start(ce, per_ctx))
return -EINVAL;
return 0;
}
-static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+static int __lrc_wabb_ctx(struct intel_engine_cs *engine, bool per_ctx)
{
struct intel_context *a, *b;
int err;
@@ -1667,14 +1685,14 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
* As ring start is restored apriori of starting the indirect ctx bb and
* as it will be different for each context, it fits to this purpose.
*/
- indirect_ctx_bb_setup(a);
- indirect_ctx_bb_setup(b);
+ wabb_ctx_setup(a, per_ctx);
+ wabb_ctx_setup(b, per_ctx);
- err = indirect_ctx_bb_check(a);
+ err = wabb_ctx_check(a, per_ctx);
if (err)
goto unpin_b;
- err = indirect_ctx_bb_check(b);
+ err = wabb_ctx_check(b, per_ctx);
unpin_b:
intel_context_unpin(b);
@@ -1688,7 +1706,7 @@ put_a:
return err;
}
-static int live_lrc_indirect_ctx_bb(void *arg)
+static int lrc_wabb_ctx(void *arg, bool per_ctx)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@@ -1697,7 +1715,7 @@ static int live_lrc_indirect_ctx_bb(void *arg)
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
- err = __live_lrc_indirect_ctx_bb(engine);
+ err = __lrc_wabb_ctx(engine, per_ctx);
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
@@ -1710,6 +1728,16 @@ static int live_lrc_indirect_ctx_bb(void *arg)
return err;
}
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, false);
+}
+
+static int live_lrc_per_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, true);
+}
+
static void garbage_reset(struct intel_engine_cs *engine,
struct i915_request *rq)
{
@@ -1947,6 +1975,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
SUBTEST(live_lrc_indirect_ctx_bb),
+ SUBTEST(live_lrc_per_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 79aa6ac66..f40de408c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -261,11 +261,12 @@ static int igt_atomic_reset(void *arg)
{
struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@@ -296,7 +297,7 @@ static int igt_atomic_reset(void *arg)
unlock:
igt_global_reset_unlock(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
@@ -307,6 +308,7 @@ static int igt_atomic_engine_reset(void *arg)
const typeof(*igt_atomic_phases) *p;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
@@ -317,7 +319,7 @@ static int igt_atomic_engine_reset(void *arg)
if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@@ -365,7 +367,7 @@ static int igt_atomic_engine_reset(void *arg)
out_unlock:
igt_global_reset_unlock(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index fb30f733b..dcef8d498 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -224,6 +224,7 @@ int live_rps_clock_interval(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = 0;
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
@@ -236,7 +237,7 @@ int live_rps_clock_interval(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_rps_disable(&gt->rps);
intel_gt_check_clock_frequency(gt);
@@ -355,7 +356,7 @@ int live_rps_clock_interval(void *arg)
}
intel_rps_enable(&gt->rps);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@@ -376,6 +377,7 @@ int live_rps_control(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = 0;
/*
@@ -398,7 +400,7 @@ int live_rps_control(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
ktime_t min_dt, max_dt;
@@ -488,7 +490,7 @@ int live_rps_control(void *arg)
break;
}
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@@ -1023,6 +1025,7 @@ int live_rps_interrupt(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
u32 pm_events;
int err = 0;
@@ -1033,9 +1036,9 @@ int live_rps_interrupt(void *arg)
if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
- intel_gt_pm_get(gt);
- pm_events = rps->pm_events;
- intel_gt_pm_put(gt);
+ pm_events = 0;
+ with_intel_gt_pm(gt, wakeref)
+ pm_events = rps->pm_events;
if (!pm_events) {
pr_err("No RPS PM events registered, but RPS is enabled?\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 952c8d52d..302d05402 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -266,6 +266,7 @@ static int run_test(struct intel_gt *gt, int test_type)
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
struct igt_spinner spin;
u32 slpc_min_freq, slpc_max_freq;
int err = 0;
@@ -311,7 +312,7 @@ static int run_test(struct intel_gt *gt, int test_type)
}
intel_gt_pm_wait_for_idle(gt);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
u32 max_act_freq;
@@ -397,7 +398,7 @@ static int run_test(struct intel_gt *gt, int test_type)
if (igt_flush_test(gt->i915))
err = -EIO;
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
intel_gt_pm_wait_for_idle(gt);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 5f138de3c..40817ebcc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -322,6 +322,7 @@ static int i915_gsc_proxy_component_bind(struct device *i915_kdev,
gsc->proxy.component = data;
gsc->proxy.component->mei_dev = mei_kdev;
mutex_unlock(&gsc->proxy.mutex);
+ gt_dbg(gt, "GSC proxy mei component bound\n");
return 0;
}
@@ -342,6 +343,7 @@ static void i915_gsc_proxy_component_unbind(struct device *i915_kdev,
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_IE | HECI_H_CSR_RST, 0);
+ gt_dbg(gt, "GSC proxy mei component unbound\n");
}
static const struct component_ops i915_gsc_proxy_component_ops = {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 3f3df1166..2b450c43b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -330,7 +330,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
static u32 guc_ctl_devid(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2b6dfe62c..813cc888e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -105,61 +105,67 @@ struct intel_guc {
*/
struct {
/**
- * @lock: protects everything in submission_state,
- * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
- * out of zero
+ * @submission_state.lock: protects everything in
+ * submission_state, ce->guc_id.id, and ce->guc_id.ref
+ * when transitioning in and out of zero
*/
spinlock_t lock;
/**
- * @guc_ids: used to allocate new guc_ids, single-lrc
+ * @submission_state.guc_ids: used to allocate new
+ * guc_ids, single-lrc
*/
struct ida guc_ids;
/**
- * @num_guc_ids: Number of guc_ids, selftest feature to be able
- * to reduce this number while testing.
+ * @submission_state.num_guc_ids: Number of guc_ids, selftest
+ * feature to be able to reduce this number while testing.
*/
int num_guc_ids;
/**
- * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+ * @submission_state.guc_ids_bitmap: used to allocate
+ * new guc_ids, multi-lrc
*/
unsigned long *guc_ids_bitmap;
/**
- * @guc_id_list: list of intel_context with valid guc_ids but no
- * refs
+ * @submission_state.guc_id_list: list of intel_context
+ * with valid guc_ids but no refs
*/
struct list_head guc_id_list;
/**
- * @guc_ids_in_use: Number single-lrc guc_ids in use
+ * @submission_state.guc_ids_in_use: Number single-lrc
+ * guc_ids in use
*/
unsigned int guc_ids_in_use;
/**
- * @destroyed_contexts: list of contexts waiting to be destroyed
- * (deregistered with the GuC)
+ * @submission_state.destroyed_contexts: list of contexts
+ * waiting to be destroyed (deregistered with the GuC)
*/
struct list_head destroyed_contexts;
/**
- * @destroyed_worker: worker to deregister contexts, need as we
- * need to take a GT PM reference and can't from destroy
- * function as it might be in an atomic context (no sleeping)
+ * @submission_state.destroyed_worker: worker to deregister
+ * contexts, need as we need to take a GT PM reference and
+ * can't from destroy function as it might be in an atomic
+ * context (no sleeping)
*/
struct work_struct destroyed_worker;
/**
- * @reset_fail_worker: worker to trigger a GT reset after an
- * engine reset fails
+ * @submission_state.reset_fail_worker: worker to trigger
+ * a GT reset after an engine reset fails
*/
struct work_struct reset_fail_worker;
/**
- * @reset_fail_mask: mask of engines that failed to reset
+ * @submission_state.reset_fail_mask: mask of engines that
+ * failed to reset
*/
intel_engine_mask_t reset_fail_mask;
/**
- * @sched_disable_delay_ms: schedule disable delay, in ms, for
- * contexts
+ * @submission_state.sched_disable_delay_ms: schedule
+ * disable delay, in ms, for contexts
*/
unsigned int sched_disable_delay_ms;
/**
- * @sched_disable_gucid_threshold: threshold of min remaining available
- * guc_ids before we start bypassing the schedule disable delay
+ * @submission_state.sched_disable_gucid_threshold:
+ * threshold of min remaining available guc_ids before
+ * we start bypassing the schedule disable delay
*/
unsigned int sched_disable_gucid_threshold;
} submission_state;
@@ -243,37 +249,40 @@ struct intel_guc {
*/
struct {
/**
- * @lock: Lock protecting the below fields and the engine stats.
+ * @timestamp.lock: Lock protecting the below fields and
+ * the engine stats.
*/
spinlock_t lock;
/**
- * @gt_stamp: 64 bit extended value of the GT timestamp.
+ * @timestamp.gt_stamp: 64-bit extended value of the GT
+ * timestamp.
*/
u64 gt_stamp;
/**
- * @ping_delay: Period for polling the GT timestamp for
- * overflow.
+ * @timestamp.ping_delay: Period for polling the GT
+ * timestamp for overflow.
*/
unsigned long ping_delay;
/**
- * @work: Periodic work to adjust GT timestamp, engine and
- * context usage for overflows.
+ * @timestamp.work: Periodic work to adjust GT timestamp,
+ * engine and context usage for overflows.
*/
struct delayed_work work;
/**
- * @shift: Right shift value for the gpm timestamp
+ * @timestamp.shift: Right shift value for the gpm timestamp
*/
u32 shift;
/**
- * @last_stat_jiffies: jiffies at last actual stats collection time
- * We use this timestamp to ensure we don't oversample the
- * stats because runtime power management events can trigger
- * stats collection at much higher rates than required.
+ * @timestamp.last_stat_jiffies: jiffies at last actual
+ * stats collection time. We use this timestamp to ensure
+ * we don't oversample the stats because runtime power
+ * management events can trigger stats collection at much
+ * higher rates than required.
*/
unsigned long last_stat_jiffies;
} timestamp;
@@ -297,6 +306,10 @@ struct intel_guc {
* @number_guc_id_stolen: The number of guc_ids that have been stolen
*/
int number_guc_id_stolen;
+ /**
+ * @fast_response_selftest: Backdoor to CT handler for fast response selftest
+ */
+ u32 fast_response_selftest;
#endif
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a4da0208c..a1cd40d80 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -355,7 +355,7 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc,
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
const struct __guc_mmio_reg_descr_group *lists;
if (GRAPHICS_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 89e314b37..0d5197c08 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -265,7 +265,7 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
u32 *cmds;
int err;
- err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
+ err = i915_inject_probe_error(guc_to_i915(guc), -ENXIO);
if (err)
return err;
@@ -1076,6 +1076,15 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
found = true;
break;
}
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (!found && ct_to_guc(ct)->fast_response_selftest) {
+ CT_DEBUG(ct, "Assuming unsolicited response due to FAST_REQUEST selftest\n");
+ ct_to_guc(ct)->fast_response_selftest++;
+ found = true;
+ }
+#endif
+
if (!found) {
CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n",
len, hxg[0], fence, ct->requests.last_fence);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 55bc8b55f..bf16351c9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -520,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -573,7 +573,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@@ -589,7 +589,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
static u32 __get_default_log_level(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
/* A negative value means "use platform/config default" */
if (i915->params.guc_log_level < 0) {
@@ -664,7 +664,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
int ret = 0;
@@ -796,7 +796,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
if (!log->relay.started)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index 1adec6de2..9df792730 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -14,7 +14,7 @@ static bool __guc_rc_supported(struct intel_guc *guc)
{
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 2dfb07cc4..3e681ab6f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -34,7 +34,7 @@ static bool __detect_slpc_supported(struct intel_guc *guc)
{
/* GuC SLPC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_slpc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 17df71117..a259f1118 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1107,7 +1107,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
if (deregister)
guc_signal_context_fence(ce);
if (destroyed) {
- intel_gt_pm_put_async(guc_to_gt(guc));
+ intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -1303,6 +1303,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
unsigned long flags;
u32 reset_count;
bool in_reset;
+ intel_wakeref_t wakeref;
spin_lock_irqsave(&guc->timestamp.lock, flags);
@@ -1325,7 +1326,8 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
- if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
+ wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
/*
@@ -1334,7 +1336,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
*/
guc_update_engine_gt_clks(engine);
guc_update_pm_timestamp(guc, now);
- intel_gt_pm_put_async(gt);
+ intel_gt_pm_put_async(gt, wakeref);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
guc->timestamp.gt_stamp = gt_stamp_saved;
@@ -3385,9 +3387,9 @@ static void destroyed_worker_func(struct work_struct *w)
struct intel_guc *guc = container_of(w, struct intel_guc,
submission_state.destroyed_worker);
struct intel_gt *gt = guc_to_gt(guc);
- int tmp;
+ intel_wakeref_t wakeref;
- with_intel_gt_pm(gt, tmp)
+ with_intel_gt_pm(gt, wakeref)
deregister_destroyed_contexts(guc);
}
@@ -4624,12 +4626,12 @@ static bool __guc_submission_supported(struct intel_guc *guc)
{
/* GuC submission is unavailable for pre-Gen11 */
return intel_guc_is_supported(guc) &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 11;
}
static bool __guc_submission_selected(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
if (!intel_guc_submission_is_supported(guc))
return false;
@@ -4894,7 +4896,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
intel_context_put(ce);
} else if (context_destroyed(ce)) {
/* Context has been destroyed */
- intel_gt_pm_put_async(guc_to_gt(guc));
+ intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 27f6561dd..3872d309e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -106,11 +106,6 @@ static void __confirm_options(struct intel_uc *uc)
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
- if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
- !intel_uc_supports_huc(uc))
- gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "HuC is not supported!");
-
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 362639162..756093eaf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1343,16 +1343,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
u32 len = min_t(u32, size, PAGE_SIZE - offset);
- void *vaddr;
if (idx > 0) {
idx--;
continue;
}
- vaddr = kmap_atomic(page);
- memcpy(dst, vaddr + offset, len);
- kunmap_atomic(vaddr);
+ memcpy_from_page(dst, page, offset, len);
offset = 0;
dst += len;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index bfb721435..c900aac85 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -286,11 +286,126 @@ err_wakeref:
return ret;
}
+/*
+ * Send a context schedule H2G message with an invalid context id.
+ * This should generate a GUC_RESULT_INVALID_CONTEXT response.
+ */
+static int bad_h2g(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_SCHED_CONTEXT,
+ 0x12345678,
+ };
+
+ return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
+}
+
+/*
+ * Set a spinner running to make sure the system is alive and active,
+ * then send a bad but asynchronous H2G command and wait to see if an
+ * error response is returned. If no response is received or if the
+ * spinner dies then the test will fail.
+ */
+#define FAST_RESPONSE_TIMEOUT_MS 1000
+static int intel_guc_fast_request(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine = intel_selftest_find_any_engine(gt);
+ bool spinning = false;
+ int ret = 0;
+
+ if (!engine)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ gt_err(gt, "Failed to create spinner request: %pe\n", ce);
+ goto err_pm;
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
+ goto err_pm;
+ }
+ spinning = true;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ gt_err(gt, "Failed to create spinner request: %pe\n", rq);
+ goto err_spin;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ gt->uc.guc.fast_response_selftest = 1;
+
+ ret = bad_h2g(&gt->uc.guc);
+ if (ret) {
+ gt_err(gt, "Failed to send H2G: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ ret = wait_for(gt->uc.guc.fast_response_selftest != 1 || i915_request_completed(rq),
+ FAST_RESPONSE_TIMEOUT_MS);
+ if (ret) {
+ gt_err(gt, "Request wait failed: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ if (i915_request_completed(rq)) {
+ gt_err(gt, "Spinner died waiting for fast request error!\n");
+ ret = -EIO;
+ goto err_rq;
+ }
+
+ if (gt->uc.guc.fast_response_selftest != 2) {
+ gt_err(gt, "Unexpected fast response count: %d\n",
+ gt->uc.guc.fast_response_selftest);
+ goto err_rq;
+ }
+
+ igt_spinner_end(&spin);
+ spinning = false;
+
+ ret = intel_selftest_wait_for_rq(rq);
+ if (ret) {
+ gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+err_rq:
+ i915_request_put(rq);
+
+err_spin:
+ if (spinning)
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+err_pm:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return ret;
+}
+
int intel_guc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_scrub_ctbs),
SUBTEST(intel_guc_steal_guc_ids),
+ SUBTEST(intel_guc_fast_request),
};
struct intel_gt *gt = to_gt(i915);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index 34b5d952e..26fdc392f 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -74,7 +74,7 @@ static int intel_hang_guc(void *arg)
goto err;
}
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 05f9348b7..d4a3f3e09 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -3047,7 +3047,7 @@ put_obj:
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- u32 per_ctx_start[CACHELINE_DWORDS] = {0};
+ u32 per_ctx_start[CACHELINE_DWORDS] = {};
unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid)
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 835c3fde8..313efdabe 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -56,7 +56,7 @@ static const struct pixel_format bdw_pixel_formats[] = {
{DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
/* non-supported format has bpp default to 0 */
- {0, 0, NULL},
+ {}
};
static const struct pixel_format skl_pixel_formats[] = {
@@ -76,7 +76,7 @@ static const struct pixel_format skl_pixel_formats[] = {
{DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
/* non-supported format has bpp default to 0 */
- {0, 0, NULL},
+ {}
};
static int bdw_format_to_drm(int format)
@@ -293,7 +293,7 @@ static const struct cursor_mode_format cursor_pixel_formats[] = {
{DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
/* non-supported format has bpp default to 0 */
- {0, 0, 0, 0, NULL},
+ {}
};
static int cursor_mode_to_drm(int mode)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index d30f8814d..efcb00472 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -538,7 +538,7 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
- struct dpll clock = {0};
+ struct dpll clock = {};
u32 temp;
/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
@@ -2576,7 +2576,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index ddf49c2db..2905df83e 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1211,11 +1211,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(remain, PAGE_SIZE - x);
- src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+ src = kmap_local_page(i915_gem_object_get_page(src_obj, n));
if (src_needs_clflush)
drm_clflush_virt_range(src + x, len);
memcpy(ptr, src + x, len);
- kunmap_atomic(src);
+ kunmap_local(src);
ptr += len;
remain -= len;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e9b79c2c3..db99c2ef6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,8 @@
#include <drm/drm_debugfs.h>
+#include "display/intel_display_params.h"
+
#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
@@ -49,6 +51,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_driver.h"
+#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "i915_scheduler.h"
@@ -67,13 +70,13 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
- intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915->params, &p);
+ intel_display_params_dump(i915, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
@@ -297,107 +300,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
return 0;
}
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
- size_t count, loff_t *pos)
-{
- struct i915_gpu_coredump *error;
- ssize_t ret;
- void *buf;
-
- error = file->private_data;
- if (!error)
- return 0;
-
- /* Bounce buffer required because of kernfs __user API convenience. */
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
- if (ret <= 0)
- goto out;
-
- if (!copy_to_user(ubuf, buf, ret))
- *pos += ret;
- else
- ret = -EFAULT;
-
-out:
- kfree(buf);
- return ret;
-}
-
-static int gpu_state_release(struct inode *inode, struct file *file)
-{
- i915_gpu_coredump_put(file->private_data);
- return 0;
-}
-
-static int i915_gpu_info_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct i915_gpu_coredump *gpu;
- intel_wakeref_t wakeref;
-
- gpu = NULL;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
-
- if (IS_ERR(gpu))
- return PTR_ERR(gpu);
-
- file->private_data = gpu;
- return 0;
-}
-
-static const struct file_operations i915_gpu_info_fops = {
- .owner = THIS_MODULE,
- .open = i915_gpu_info_open,
- .read = gpu_state_read,
- .llseek = default_llseek,
- .release = gpu_state_release,
-};
-
-static ssize_t
-i915_error_state_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct i915_gpu_coredump *error = filp->private_data;
-
- if (!error)
- return 0;
-
- drm_dbg(&error->i915->drm, "Resetting error state\n");
- i915_reset_error_state(error->i915);
-
- return cnt;
-}
-
-static int i915_error_state_open(struct inode *inode, struct file *file)
-{
- struct i915_gpu_coredump *error;
-
- error = i915_first_error_state(inode->i_private);
- if (IS_ERR(error))
- return PTR_ERR(error);
-
- file->private_data = error;
- return 0;
-}
-
-static const struct file_operations i915_error_state_fops = {
- .owner = THIS_MODULE,
- .open = i915_error_state_open,
- .read = gpu_state_read,
- .write = i915_error_state_write,
- .llseek = default_llseek,
- .release = gpu_state_release,
-};
-#endif
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -837,10 +739,6 @@ static const struct i915_debugfs_files {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
- {"i915_error_state", &i915_error_state_fops},
- {"i915_gpu_info", &i915_gpu_info_fops},
-#endif
};
void i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -863,4 +761,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv)
drm_debugfs_create_files(i915_debugfs_list,
ARRAY_SIZE(i915_debugfs_list),
minor->debugfs_root, minor);
+
+ i915_gpu_error_debugfs_register(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 802de2c6d..9967148ae 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -231,16 +231,10 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->display.backlight.lock);
mutex_init(&dev_priv->sb_lock);
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
- mutex_init(&dev_priv->display.audio.mutex);
- mutex_init(&dev_priv->display.wm.wm_mutex);
- mutex_init(&dev_priv->display.pps.mutex);
- mutex_init(&dev_priv->display.hdcp.hdcp_mutex);
-
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -804,7 +798,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset2;
- intel_pxp_init(i915);
+ ret = intel_pxp_init(i915);
+ if (ret && ret != -ENODEV)
+ drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
if (ret)
@@ -907,6 +903,8 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
+
+ intel_display_device_remove(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -1037,7 +1035,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
- intel_runtime_pm_driver_release(&i915->runtime_pm);
+ intel_runtime_pm_driver_last_release(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index 2a44b3876..fa6852713 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -28,6 +28,10 @@ struct i915_drm_client *i915_drm_client_alloc(void)
kref_init(&client->kref);
spin_lock_init(&client->ctx_lock);
INIT_LIST_HEAD(&client->ctx_list);
+#ifdef CONFIG_PROC_FS
+ spin_lock_init(&client->objects_lock);
+ INIT_LIST_HEAD(&client->objects_list);
+#endif
return client;
}
@@ -41,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
}
#ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+ struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+ const enum intel_region_id id = obj->mm.region ?
+ obj->mm.region->id : INTEL_REGION_SMEM;
+ const u64 sz = obj->base.size;
+
+ if (obj->base.handle_count > 1)
+ stats[id].shared += sz;
+ else
+ stats[id].private += sz;
+
+ if (i915_gem_object_has_pages(obj)) {
+ stats[id].resident += sz;
+
+ if (!dma_resv_test_signaled(obj->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
+ stats[id].active += sz;
+ else if (i915_gem_object_is_shrinkable(obj) &&
+ obj->mm.madv == I915_MADV_DONTNEED)
+ stats[id].purgeable += sz;
+ }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_drm_client *client = fpriv->client;
+ struct drm_i915_private *i915 = fpriv->i915;
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mr;
+ struct list_head __rcu *pos;
+ unsigned int id;
+
+ /* Public objects. */
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, obj, id)
+ obj_meminfo(obj, stats);
+ spin_unlock(&file->table_lock);
+
+ /* Internal objects. */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &client->objects_list) {
+ obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+ client_link));
+ if (!obj)
+ continue;
+ obj_meminfo(obj, stats);
+ i915_gem_object_put(obj);
+ }
+ rcu_read_unlock();
+
+ for_each_memory_region(mr, i915, id)
+ drm_print_memory_stats(p,
+ &stats[id],
+ DRM_GEM_OBJECT_RESIDENT |
+ DRM_GEM_OBJECT_PURGEABLE,
+ mr->uabi_name);
+}
+
static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -102,10 +168,52 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file)
* ******************************************************************
*/
+ show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
show_client_class(p, i915, file_priv->client, i);
}
+
+void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned long flags;
+
+ GEM_WARN_ON(obj->client);
+ GEM_WARN_ON(!list_empty(&obj->client_link));
+
+ spin_lock_irqsave(&client->objects_lock, flags);
+ obj->client = i915_drm_client_get(client);
+ list_add_tail_rcu(&obj->client_link, &client->objects_list);
+ spin_unlock_irqrestore(&client->objects_lock, flags);
+}
+
+void i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
+{
+ struct i915_drm_client *client = fetch_and_zero(&obj->client);
+ unsigned long flags;
+
+ /* Object may not be associated with a client. */
+ if (!client)
+ return;
+
+ spin_lock_irqsave(&client->objects_lock, flags);
+ list_del_rcu(&obj->client_link);
+ spin_unlock_irqrestore(&client->objects_lock, flags);
+
+ i915_drm_client_put(client);
+}
+
+void i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce)
+{
+ if (ce->state)
+ i915_drm_client_add_object(client, ce->state->obj);
+
+ if (ce->ring != ce->engine->legacy.ring && ce->ring->vma)
+ i915_drm_client_add_object(client, ce->ring->vma->obj);
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index 67816c912..a439dd789 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -12,6 +12,10 @@
#include <uapi/drm/i915_drm.h>
+#include "i915_file_private.h"
+#include "gem/i915_gem_object_types.h"
+#include "gt/intel_context_types.h"
+
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
struct drm_file;
@@ -25,6 +29,20 @@ struct i915_drm_client {
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */
+#ifdef CONFIG_PROC_FS
+ /**
+ * @objects_lock: lock protecting @objects_list
+ */
+ spinlock_t objects_lock;
+
+ /**
+ * @objects_list: list of objects created by this client
+ *
+ * Protected by @objects_lock.
+ */
+ struct list_head objects_list;
+#endif
+
/**
* @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
*/
@@ -49,4 +67,28 @@ struct i915_drm_client *i915_drm_client_alloc(void);
void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
+#ifdef CONFIG_PROC_FS
+void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj);
+void i915_drm_client_remove_object(struct drm_i915_gem_object *obj);
+void i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce);
+#else
+static inline void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj)
+{
+}
+
+static inline void
+i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
+{
+}
+
+static inline void
+i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce)
+{
+}
+#endif
+
#endif /* !__I915_DRM_CLIENT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dd452c220..861567362 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -396,20 +396,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
return i915->gt[0];
}
-/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, gt__, id__) \
- for ((id__) = 0; \
- (id__) < I915_NUM_ENGINES; \
- (id__)++) \
- for_each_if ((engine__) = (gt__)->engine[(id__)])
-
-/* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
- for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
- (tmp__) ? \
- ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
- 0;)
-
#define rb_to_uabi_engine(rb) \
rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
@@ -418,11 +404,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
(engine__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-#define for_each_uabi_class_engine(engine__, class__, i915__) \
- for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
- (engine__) && (engine__)->uabi_class == (class__); \
- (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-
#define INTEL_INFO(i915) ((i915)->__info)
#define RUNTIME_INFO(i915) (&(i915)->__runtime)
#define DRIVER_CAPS(i915) (&(i915)->caps)
@@ -575,6 +556,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
+#define IS_LUNARLAKE(i915) 0
#define IS_DG2_G10(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c166ad5e1..92758b6b4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1306,8 +1306,6 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
-
- spin_lock_init(&dev_priv->display.fb_tracking.lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b4e31e59c..d04660b60 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -57,6 +57,7 @@
#include "i915_memcpy.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
+#include "i915_sysfs.h"
#include "i915_utils.h"
#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -520,7 +521,7 @@ __find_vma(struct i915_vma_coredump *vma, const char *name)
return NULL;
}
-struct i915_vma_coredump *
+static struct i915_vma_coredump *
intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
{
return __find_vma(ee->vma, "batch");
@@ -609,9 +610,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma)
+static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
struct page *page;
@@ -660,6 +661,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
struct drm_printer p = i915_error_printer(m);
i915_params_dump(params, &p);
+ intel_display_params_dump(m->i915, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
@@ -1027,6 +1029,7 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
static void cleanup_params(struct i915_gpu_coredump *error)
{
i915_params_free(&error->params);
+ intel_display_params_free(&error->display_params);
}
static void cleanup_uc(struct intel_uc_coredump *uc)
@@ -1988,6 +1991,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->suspend_count = i915->suspend_count;
i915_params_copy(&error->params, &i915->params);
+ intel_display_params_copy(&error->display_params);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
@@ -2137,7 +2141,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
return error;
}
-struct i915_gpu_coredump *
+static struct i915_gpu_coredump *
i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
static DEFINE_MUTEX(capture_mutex);
@@ -2174,7 +2178,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
- pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
+ pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n");
pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
@@ -2208,7 +2212,7 @@ void i915_capture_error_state(struct intel_gt *gt,
i915_gpu_coredump_put(error);
}
-struct i915_gpu_coredump *
+static struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
struct i915_gpu_coredump *error;
@@ -2375,3 +2379,184 @@ void intel_klog_error_capture(struct intel_gt *gt,
drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
}
#endif
+
+static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct i915_gpu_coredump *error;
+ ssize_t ret;
+ void *buf;
+
+ error = file->private_data;
+ if (!error)
+ return 0;
+
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
+ goto out;
+
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static int gpu_state_release(struct inode *inode, struct file *file)
+{
+ i915_gpu_coredump_put(file->private_data);
+ return 0;
+}
+
+static int i915_gpu_info_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = inode->i_private;
+ struct i915_gpu_coredump *gpu;
+ intel_wakeref_t wakeref;
+
+ gpu = NULL;
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
+
+ if (IS_ERR(gpu))
+ return PTR_ERR(gpu);
+
+ file->private_data = gpu;
+ return 0;
+}
+
+static const struct file_operations i915_gpu_info_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_gpu_info_open,
+ .read = gpu_state_read,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct i915_gpu_coredump *error = filp->private_data;
+
+ if (!error)
+ return 0;
+
+ drm_dbg(&error->i915->drm, "Resetting error state\n");
+ i915_reset_error_state(error->i915);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct i915_gpu_coredump *error;
+
+ error = i915_first_error_state(inode->i_private);
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ file->private_data = error;
+ return 0;
+}
+
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = gpu_state_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915,
+ &i915_error_state_fops);
+ debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915,
+ &i915_gpu_info_fops);
+}
+
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct i915_gpu_coredump *gpu;
+ ssize_t ret = 0;
+
+ /*
+ * FIXME: Concurrent clients triggering resets and reading + clearing
+ * dumps can cause inconsistent sysfs reads when a user calls in with a
+ * non-zero offset to complete a prior partial read but the
+ * gpu_coredump has been cleared or replaced.
+ */
+
+ gpu = i915_first_error_state(i915);
+ if (IS_ERR(gpu)) {
+ ret = PTR_ERR(gpu);
+ } else if (gpu) {
+ ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_coredump_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
+
+ if (off < len) {
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+ drm_dbg(&dev_priv->drm, "Resetting error state\n");
+ i915_reset_error_state(dev_priv);
+
+ return count;
+}
+
+static const struct bin_attribute error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = S_IRUSR | S_IWUSR,
+ .size = 0,
+ .read = error_state_read,
+ .write = error_state_write,
+};
+
+void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
+{
+ struct device *kdev = i915->drm.primary->kdev;
+
+ if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+ drm_err(&i915->drm, "error_state sysfs setup failed\n");
+}
+
+void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
+{
+ struct device *kdev = i915->drm.primary->kdev;
+
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 48f6c0040..7c255bb1c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -15,6 +15,7 @@
#include <drm/drm_mm.h>
#include "display/intel_display_device.h"
+#include "display/intel_display_params.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_gt_types.h"
@@ -215,6 +216,7 @@ struct i915_gpu_coredump {
struct intel_display_runtime_info display_runtime_info;
struct intel_driver_caps driver_caps;
struct i915_params params;
+ struct intel_display_params display_params;
struct intel_overlay_error_state *overlay;
@@ -283,14 +285,7 @@ static inline void intel_klog_error_capture(struct intel_gt *gt,
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma);
-struct i915_vma_coredump *
-intel_gpu_error_find_batch(const struct intel_engine_coredump *ee);
-
-struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt,
- intel_engine_mask_t engine_mask, u32 dump_flags);
+
void i915_capture_error_state(struct intel_gt *gt,
intel_engine_mask_t engine_mask, u32 dump_flags);
@@ -338,10 +333,13 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
kref_put(&gpu->ref, __i915_gpu_coredump_free);
}
-struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
+void i915_gpu_error_debugfs_register(struct drm_i915_private *i915);
+void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915);
+void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915);
+
#else
__printf(2, 3)
@@ -409,12 +407,6 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
-static inline struct i915_gpu_coredump *
-i915_first_error_state(struct drm_i915_private *i915)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
@@ -424,6 +416,18 @@ static inline void i915_disable_error_state(struct drm_i915_private *i915,
{
}
+static inline void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
+{
+}
+
+static inline void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
+{
+}
+
+static inline void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 1b021a490..ba8227725 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -23,6 +23,8 @@
*/
#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/cpufeature.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 036c4c3ed..de4304854 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -67,33 +67,9 @@ i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-i915_param_named_unsafe(enable_dc, int, 0400,
- "Enable power-saving display C-states. "
- "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
- "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
-
-i915_param_named_unsafe(enable_fbc, int, 0400,
- "Enable frame buffer compression for power savings "
- "(default: -1 (use per-chip default))");
-
-i915_param_named_unsafe(lvds_channel_mode, int, 0400,
- "Specify LVDS channel mode "
- "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-
-i915_param_named_unsafe(panel_use_ssc, int, 0400,
- "Use Spread Spectrum Clock with panels [LVDS/eDP] "
- "(default: auto from VBT)");
-
-i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
- "Override/Ignore selection of SDVO panel mode in the VBT "
- "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-
i915_param_named_unsafe(reset, uint, 0400,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
-i915_param_named_unsafe(vbt_firmware, charp, 0400,
- "Load VBT from specified file under /lib/firmware");
-
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
i915_param_named(error_capture, bool, 0400,
"Record the GPU state following a hang. "
@@ -106,55 +82,10 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0400,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-i915_param_named_unsafe(enable_psr, int, 0400,
- "Enable PSR "
- "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
- "Default: -1 (use per-chip default)");
-
-i915_param_named(psr_safest_params, bool, 0400,
- "Replace PSR VBT parameters by the safest and not optimal ones. This "
- "is helpful to detect if PSR issues are related to bad values set in "
- " VBT. (0=use VBT parameters, 1=use safest parameters)");
-
-i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
- "Enable PSR2 selective fetch "
- "(0=disabled, 1=enabled) "
- "Default: 0");
-
-i915_param_named_unsafe(enable_sagv, bool, 0600,
- "Enable system agent voltage/frequency scaling (SAGV) (default: true)");
-
i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe options for specified supported devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
-i915_param_named_unsafe(disable_power_well, int, 0400,
- "Disable display power wells when possible "
- "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
-
-i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)");
-
-i915_param_named_unsafe(enable_dpt, bool, 0400,
- "Enable display page table (DPT) (default: true)");
-
-i915_param_named_unsafe(load_detect_test, bool, 0400,
- "Force-enable the VGA load detect code for testing (default:false). "
- "For developers only.");
-
-i915_param_named_unsafe(force_reset_modeset_test, bool, 0400,
- "Force a modeset during gpu reset for testing (default:false). "
- "For developers only.");
-
-i915_param_named_unsafe(invert_brightness, int, 0400,
- "Invert backlight brightness "
- "(-1 force normal, 0 machine defaults, 1 force inversion), please "
- "report PCI device ID, subsystem vendor and subsystem device ID "
- "to dri-devel@lists.freedesktop.org, if your machine needs it. "
- "It will then be included in an upcoming module version.");
-
-i915_param_named(disable_display, bool, 0400,
- "Disable display (default: false)");
-
i915_param_named(memtest, bool, 0400,
"Perform a read/write test of all device memory on module load (default: off)");
@@ -162,19 +93,6 @@ i915_param_named(mmio_debug, int, 0400,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
-/* Special case writable file */
-i915_param_named(verbose_state_checks, bool, 0600,
- "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
-
-i915_param_named_unsafe(nuclear_pageflip, bool, 0400,
- "Force enable atomic functionality on platforms that don't have full support yet.");
-
-/* WA to get away with the default setting in VBT for early platforms.Will be removed */
-i915_param_named_unsafe(edp_vswing, int, 0400,
- "Ignore/Override vswing pre-emph table selection from VBT "
- "(0=use value from vbt [default], 1=low power swing(200mV),"
- "2=default swing(400mV))");
-
i915_param_named_unsafe(enable_guc, int, 0400,
"Enable GuC load for GuC submission and/or HuC load. "
"Required functionality can be selected using bitmask values. "
@@ -196,18 +114,11 @@ i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
i915_param_named_unsafe(gsc_firmware_path, charp, 0400,
"GSC firmware path to use instead of the default one");
-i915_param_named_unsafe(enable_dp_mst, bool, 0400,
- "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
-
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
-i915_param_named(enable_dpcd_backlight, int, 0400,
- "Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
-
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index d5194b039..1315d7fac 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -46,21 +46,7 @@ struct drm_printer;
* debugfs file
*/
#define I915_PARAMS_FOR_EACH(param) \
- param(char *, vbt_firmware, NULL, 0400) \
param(int, modeset, -1, 0400) \
- param(int, lvds_channel_mode, 0, 0400) \
- param(int, panel_use_ssc, -1, 0600) \
- param(int, vbt_sdvo_panel_type, -1, 0400) \
- param(int, enable_dc, -1, 0400) \
- param(int, enable_fbc, -1, 0600) \
- param(int, enable_psr, -1, 0600) \
- param(bool, enable_dpt, true, 0400) \
- param(bool, psr_safest_params, false, 0400) \
- param(bool, enable_psr2_sel_fetch, true, 0400) \
- param(bool, enable_sagv, true, 0600) \
- param(int, disable_power_well, -1, 0400) \
- param(int, enable_ips, 1, 0600) \
- param(int, invert_brightness, 0, 0600) \
param(int, enable_guc, -1, 0400) \
param(int, guc_log_level, -1, 0400) \
param(char *, guc_firmware_path, NULL, 0400) \
@@ -69,23 +55,15 @@ struct drm_printer;
param(char *, gsc_firmware_path, NULL, 0400) \
param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
- param(int, edp_vswing, 0, 0400) \
param(unsigned int, reset, 3, 0600) \
param(unsigned int, inject_probe_failure, 0, 0) \
- param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
param(unsigned int, lmem_size, 0, 0400) \
param(unsigned int, lmem_bar_size, 0, 0400) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
- param(bool, load_detect_test, false, 0600) \
- param(bool, force_reset_modeset_test, false, 0600) \
param(bool, error_capture, true, IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) ? 0600 : 0) \
- param(bool, disable_display, false, 0400) \
- param(bool, verbose_state_checks, true, 0) \
- param(bool, nuclear_pageflip, false, 0400) \
- param(bool, enable_dp_mst, true, 0600) \
param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0)
#define MEMBER(T, member, ...) T member;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2d695818f..bd9d812b1 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3225,7 +3225,7 @@ u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
struct intel_gt *gt = to_gt(i915);
/* Wa_18013179988 */
- if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
intel_wakeref_t wakeref;
u32 reg, shift;
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 13b1ae9b9..46445248d 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -291,7 +291,8 @@ struct i915_perf_stream {
int size_exponent;
/**
- * @ptr_lock: Locks reads and writes to all head/tail state
+ * @oa_buffer.ptr_lock: Locks reads and writes to all
+ * head/tail state
*
* Consider: the head and tail pointer state needs to be read
* consistently from a hrtimer callback (atomic context) and
@@ -313,7 +314,8 @@ struct i915_perf_stream {
spinlock_t ptr_lock;
/**
- * @head: Although we can always read back the head pointer register,
+ * @oa_buffer.head: Although we can always read back
+ * the head pointer register,
* we prefer to avoid trusting the HW state, just to avoid any
* risk that some hardware condition could * somehow bump the
* head pointer unpredictably and cause us to forward the wrong
@@ -322,7 +324,8 @@ struct i915_perf_stream {
u32 head;
/**
- * @tail: The last verified tail that can be read by userspace.
+ * @oa_buffer.tail: The last verified tail that can be
+ * read by userspace.
*/
u32 tail;
} oa_buffer;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index f861863eb..21eb0c5b3 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -31,6 +31,16 @@
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
+static struct i915_pmu *event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct i915_pmu, base);
+}
+
+static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu)
+{
+ return container_of(pmu, struct drm_i915_private, pmu);
+}
+
static u8 engine_config_sample(u64 config)
{
return config & I915_PMU_SAMPLE_MASK;
@@ -141,7 +151,7 @@ static u32 frequency_enabled_mask(void)
static bool pmu_needs_timer(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
u32 enable;
/*
@@ -213,19 +223,19 @@ static u64 get_rc6(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
unsigned long flags;
- bool awake = false;
u64 val;
- if (intel_gt_pm_get_if_awake(gt)) {
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
val = __get_rc6(gt);
- intel_gt_pm_put_async(gt);
- awake = true;
+ intel_gt_pm_put_async(gt, wakeref);
}
spin_lock_irqsave(&pmu->lock, flags);
- if (awake) {
+ if (wakeref) {
store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
} else {
/*
@@ -251,7 +261,7 @@ static u64 get_rc6(struct intel_gt *gt)
static void init_rc6(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_gt *gt;
unsigned int i;
@@ -429,12 +439,14 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
if (!frequency_sampling_enabled(pmu, gt_id))
return;
/* Report 0/0 (actual/requested) frequency while parked. */
- if (!intel_gt_pm_get_if_awake(gt))
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!wakeref)
return;
if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
@@ -463,14 +475,13 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
period_ns / 1000);
}
- intel_gt_pm_put_async(gt);
+ intel_gt_pm_put_async(gt, wakeref);
}
static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
- struct drm_i915_private *i915 =
- container_of(hrtimer, struct drm_i915_private, pmu.timer);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
unsigned int period_ns;
struct intel_gt *gt;
unsigned int i;
@@ -505,8 +516,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
static void i915_pmu_event_destroy(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
drm_WARN_ON(&i915->drm, event->parent);
@@ -572,8 +583,8 @@ config_status(struct drm_i915_private *i915, u64 config)
static int engine_event_init(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_engine_cs *engine;
engine = intel_engine_lookup_user(i915, engine_event_class(event),
@@ -586,9 +597,8 @@ static int engine_event_init(struct perf_event *event)
static int i915_pmu_event_init(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
int ret;
if (pmu->closed)
@@ -628,9 +638,8 @@ static int i915_pmu_event_init(struct perf_event *event)
static u64 __i915_pmu_event_read(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
u64 val = 0;
if (is_engine_event(event)) {
@@ -686,10 +695,8 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_event_read(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
struct hw_perf_event *hwc = &event->hw;
- struct i915_pmu *pmu = &i915->pmu;
u64 prev, new;
if (pmu->closed) {
@@ -707,10 +714,9 @@ static void i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_enable(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
- struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@@ -771,10 +777,9 @@ update:
static void i915_pmu_disable(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
- struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@@ -818,9 +823,7 @@ static void i915_pmu_disable(struct perf_event *event)
static void i915_pmu_event_start(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return;
@@ -848,9 +851,7 @@ out:
static int i915_pmu_event_add(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return -ENODEV;
@@ -982,7 +983,7 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
static struct attribute **
create_event_attributes(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
static const struct {
unsigned int counter;
const char *name;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b89cf2041..6f1c4e2a9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -195,8 +195,6 @@
#define DPIO_SFR_BYPASS (1 << 1)
#define DPIO_CMNRST (1 << 0)
-#define DPIO_PHY(pipe) ((pipe) >> 1)
-
/*
* Per pipe/PLL DPIO regs
*/
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e88bb4f04..613decd47 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -155,81 +155,6 @@ static const struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
-
- struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct i915_gpu_coredump *gpu;
- ssize_t ret = 0;
-
- /*
- * FIXME: Concurrent clients triggering resets and reading + clearing
- * dumps can cause inconsistent sysfs reads when a user calls in with a
- * non-zero offset to complete a prior partial read but the
- * gpu_coredump has been cleared or replaced.
- */
-
- gpu = i915_first_error_state(i915);
- if (IS_ERR(gpu)) {
- ret = PTR_ERR(gpu);
- } else if (gpu) {
- ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
- i915_gpu_coredump_put(gpu);
- } else {
- const char *str = "No error state collected\n";
- size_t len = strlen(str);
-
- if (off < len) {
- ret = min_t(size_t, count, len - off);
- memcpy(buf, str + off, ret);
- }
- }
-
- return ret;
-}
-
-static ssize_t error_state_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-
- drm_dbg(&dev_priv->drm, "Resetting error state\n");
- i915_reset_error_state(dev_priv);
-
- return count;
-}
-
-static const struct bin_attribute error_state_attr = {
- .attr.name = "error",
- .attr.mode = S_IRUSR | S_IWUSR,
- .size = 0,
- .read = error_state_read,
- .write = error_state_write,
-};
-
-static void i915_setup_error_capture(struct device *kdev)
-{
- if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
- drm_err(&kdev_minor_to_i915(kdev)->drm,
- "error_state sysfs setup failed\n");
-}
-
-static void i915_teardown_error_capture(struct device *kdev)
-{
- sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
-}
-#else
-static void i915_setup_error_capture(struct device *kdev) {}
-static void i915_teardown_error_capture(struct device *kdev) {}
-#endif
-
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@@ -255,7 +180,7 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
drm_warn(&dev_priv->drm,
"failed to register GT sysfs directory\n");
- i915_setup_error_capture(kdev);
+ i915_gpu_error_sysfs_setup(dev_priv);
intel_engines_add_sysfs(dev_priv);
}
@@ -264,7 +189,7 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
- i915_teardown_error_capture(kdev);
+ i915_gpu_error_sysfs_teardown(dev_priv);
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index c61066498..f98577967 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,7 +40,7 @@
struct drm_i915_private;
struct timer_list;
-#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+#define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html"
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d09aad34b..b70715b14 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -34,6 +34,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
@@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_tryget(vma))
+ return -ENOENT;
+
+ /*
+ * Exclude global GTT VMA from holding a GT wakeref
+ * while active, otherwise GPU never goes idle.
+ */
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we and our _retire() counterpart can be
+ * called asynchronously, storing a wakeref tracking
+ * handle inside struct i915_vma is not safe, and
+ * there is no other good place for that. Hence,
+ * use untracked variants of intel_gt_pm_get/put().
+ */
+ intel_gt_pm_get_untracked(vma->vm->gt);
+ }
+
+ return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- i915_vma_put(active_to_vma(ref));
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we can be called from atomic contexts,
+ * use an async variant of intel_gt_pm_put().
+ */
+ intel_gt_pm_put_async_untracked(vma->vm->gt);
+ }
+
+ i915_vma_put(vma);
}
static struct i915_vma *
@@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref;
unsigned int bound;
int err;
@@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
- if (flags & PIN_GLOBAL)
- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ /*
+ * In case of a global GTT, we must hold a runtime-pm wakeref
+ * while global PTEs are updated. In other cases, we hold
+ * the rpm reference while the VMA is active. Since runtime
+ * resume may require allocations, which are forbidden inside
+ * vm->mutex, get the first rpm wakeref outside of the mutex.
+ */
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@@ -1561,8 +1598,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
- if (wakeref)
- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index e98b6d69a..9b6d87c8b 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -41,7 +41,7 @@
* To virtualize GPU resources GVT-g driver depends on hypervisor technology
* e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
* and be virtualized within GVT-g device module. More architectural design
- * doc is available on https://01.org/group/2230/documentation-list.
+ * doc is available on https://github.com/intel/gvt-linux/wiki.
*/
static LIST_HEAD(intel_gvt_devices);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 3d1fdea98..60a03340b 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -216,6 +216,22 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
+static const char *region_type_str(u16 type)
+{
+ switch (type) {
+ case INTEL_MEMORY_SYSTEM:
+ return "system";
+ case INTEL_MEMORY_LOCAL:
+ return "local";
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return "stolen-local";
+ case INTEL_MEMORY_STOLEN_SYSTEM:
+ return "stolen-system";
+ default:
+ return "unknown";
+ }
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
@@ -244,6 +260,9 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->type = type;
mem->instance = instance;
+ snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ region_type_str(type), instance);
+
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 2953ed5c3..9ba36454e 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -80,6 +80,7 @@ struct intel_memory_region {
u16 instance;
enum intel_region_id id;
char name[16];
+ char uabi_name[16];
bool private; /* not for userspace */
struct {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8743153fa..860b51b56 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -50,184 +50,44 @@
* present for a given platform.
*/
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-#include <linux/sort.h>
-
-#define STACKDEPTH 8
-
-static noinline depot_stack_handle_t __save_depot_stack(void)
+static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
{
- unsigned long entries[STACKDEPTH];
- unsigned int n;
-
- n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
- return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
+ return container_of(rpm, struct drm_i915_private, runtime_pm);
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- spin_lock_init(&rpm->debug.lock);
- stack_depot_init();
+ ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
}
-static noinline depot_stack_handle_t
+static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- depot_stack_handle_t stack, *stacks;
- unsigned long flags;
-
- if (rpm->no_wakeref_tracking)
+ if (!rpm->available || rpm->no_wakeref_tracking)
return -1;
- stack = __save_depot_stack();
- if (!stack)
- return -1;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
-
- if (!rpm->debug.count)
- rpm->debug.last_acquire = stack;
-
- stacks = krealloc(rpm->debug.owners,
- (rpm->debug.count + 1) * sizeof(*stacks),
- GFP_NOWAIT | __GFP_NOWARN);
- if (stacks) {
- stacks[rpm->debug.count++] = stack;
- rpm->debug.owners = stacks;
- } else {
- stack = -1;
- }
-
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- return stack;
+ return intel_ref_tracker_alloc(&rpm->debug);
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
- depot_stack_handle_t stack)
+ intel_wakeref_t wakeref)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
- unsigned long flags, n;
- bool found = false;
-
- if (unlikely(stack == -1))
+ if (!rpm->available || rpm->no_wakeref_tracking)
return;
- spin_lock_irqsave(&rpm->debug.lock, flags);
- for (n = rpm->debug.count; n--; ) {
- if (rpm->debug.owners[n] == stack) {
- memmove(rpm->debug.owners + n,
- rpm->debug.owners + n + 1,
- (--rpm->debug.count - n) * sizeof(stack));
- found = true;
- break;
- }
- }
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- if (drm_WARN(&i915->drm, !found,
- "Unmatched wakeref (tracking %lu), count %u\n",
- rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
- if (!buf)
- return;
-
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
-
- stack = READ_ONCE(rpm->debug.last_release);
- if (stack) {
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
- }
-
- kfree(buf);
- }
+ intel_ref_tracker_free(&rpm->debug, wakeref);
}
-static int cmphandle(const void *_a, const void *_b)
+static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
- const depot_stack_handle_t * const a = _a, * const b = _b;
-
- if (*a < *b)
- return -1;
- else if (*a > *b)
- return 1;
- else
- return 0;
-}
-
-static void
-__print_intel_runtime_pm_wakeref(struct drm_printer *p,
- const struct intel_runtime_pm_debug *dbg)
-{
- unsigned long i;
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
- if (!buf)
- return;
-
- if (dbg->last_acquire) {
- stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref last acquired:\n%s", buf);
- }
-
- if (dbg->last_release) {
- stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref last released:\n%s", buf);
- }
-
- drm_printf(p, "Wakeref count: %lu\n", dbg->count);
-
- sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
-
- for (i = 0; i < dbg->count; i++) {
- depot_stack_handle_t stack = dbg->owners[i];
- unsigned long rep;
-
- rep = 1;
- while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
- rep++, i++;
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
- }
-
- kfree(buf);
-}
-
-static noinline void
-__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
- struct intel_runtime_pm_debug *saved)
-{
- *saved = *debug;
-
- debug->owners = NULL;
- debug->count = 0;
- debug->last_release = __save_depot_stack();
-}
-
-static void
-dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
-{
- if (debug->count) {
- struct drm_printer p = drm_debug_printer("i915");
-
- __print_intel_runtime_pm_wakeref(&p, debug);
- }
-
- kfree(debug->owners);
+ ref_tracker_dir_exit(&rpm->debug);
}
static noinline void
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
- struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
@@ -235,60 +95,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
flags))
return;
- __untrack_all_wakerefs(&rpm->debug, &dbg);
+ ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT);
spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- dump_and_free_wakeref_tracking(&dbg);
-}
-
-static noinline void
-untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
-{
- struct intel_runtime_pm_debug dbg = {};
- unsigned long flags;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
- __untrack_all_wakerefs(&rpm->debug, &dbg);
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- dump_and_free_wakeref_tracking(&dbg);
}
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p)
{
- struct intel_runtime_pm_debug dbg = {};
-
- do {
- unsigned long alloc = dbg.count;
- depot_stack_handle_t *s;
-
- spin_lock_irq(&rpm->debug.lock);
- dbg.count = rpm->debug.count;
- if (dbg.count <= alloc) {
- memcpy(dbg.owners,
- rpm->debug.owners,
- dbg.count * sizeof(*s));
- }
- dbg.last_acquire = rpm->debug.last_acquire;
- dbg.last_release = rpm->debug.last_release;
- spin_unlock_irq(&rpm->debug.lock);
- if (dbg.count <= alloc)
- break;
-
- s = krealloc(dbg.owners,
- dbg.count * sizeof(*s),
- GFP_NOWAIT | __GFP_NOWARN);
- if (!s)
- goto out;
-
- dbg.owners = s;
- } while (1);
-
- __print_intel_runtime_pm_wakeref(p, &dbg);
-
-out:
- kfree(dbg.owners);
+ intel_ref_tracker_show(&rpm->debug, p);
}
#else
@@ -297,14 +111,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
}
-static depot_stack_handle_t
+static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
return -1;
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
- intel_wakeref_t wref)
+ intel_wakeref_t wakeref)
{
}
@@ -349,9 +163,7 @@ intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
bool wakelock)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
int ret;
ret = pm_runtime_get_sync(rpm->kdev);
@@ -556,9 +368,7 @@ void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
*/
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct device *kdev = rpm->kdev;
/*
@@ -611,9 +421,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct device *kdev = rpm->kdev;
/* Transfer rpm ownership back to core */
@@ -628,9 +436,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
int count = atomic_read(&rpm->wakeref_count);
intel_wakeref_auto_fini(&rpm->userfault_wakeref);
@@ -639,14 +445,17 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
"i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
intel_rpm_raw_wakeref_count(count),
intel_rpm_wakelock_count(count));
+}
+void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm)
+{
+ intel_runtime_pm_driver_release(rpm);
untrack_all_intel_runtime_pm_wakerefs(rpm);
}
void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 =
- container_of(rpm, struct drm_i915_private, runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct device *kdev = &pdev->dev;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index f79cda7a2..de3579d39 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -11,8 +11,6 @@
#include "intel_wakeref.h"
-#include "i915_utils.h"
-
struct device;
struct drm_i915_private;
struct drm_printer;
@@ -77,15 +75,7 @@ struct intel_runtime_pm {
* paired rpm_put) we can remove corresponding pairs of and keep
* the array trimmed to active wakerefs.
*/
- struct intel_runtime_pm_debug {
- spinlock_t lock;
-
- depot_stack_handle_t last_acquire;
- depot_stack_handle_t last_release;
-
- depot_stack_handle_t *owners;
- unsigned long count;
- } debug;
+ struct ref_tracker_dir debug;
#endif
};
@@ -189,6 +179,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 623a69089..dea2f6318 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -99,7 +99,8 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
- struct intel_wakeref_lockclass *key)
+ struct intel_wakeref_lockclass *key,
+ const char *name)
{
wf->i915 = i915;
wf->ops = ops;
@@ -111,6 +112,10 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
lockdep_init_map(&wf->work.work.lockdep_map,
"wakeref.work", &key->work, 0);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+ ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
+#endif
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
@@ -191,3 +196,31 @@ void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
intel_wakeref_auto(wf, 0);
INTEL_WAKEREF_BUG_ON(wf->wakeref);
}
+
+void intel_ref_tracker_show(struct ref_tracker_dir *dir,
+ struct drm_printer *p)
+{
+ const size_t buf_size = PAGE_SIZE;
+ char *buf, *sb, *se;
+ size_t count;
+
+ buf = kmalloc(buf_size, GFP_NOWAIT);
+ if (!buf)
+ return;
+
+ count = ref_tracker_dir_snprint(dir, buf, buf_size);
+ if (!count)
+ goto free;
+ /* printk does not like big buffers, so we split it */
+ for (sb = buf; *sb; sb = se + 1) {
+ se = strchrnul(sb, '\n');
+ drm_printf(p, "%.*s", (int)(se - sb + 1), sb);
+ if (!*se)
+ break;
+ }
+ if (count >= buf_size)
+ drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
+ count + 1 - buf_size);
+free:
+ kfree(buf);
+}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index ec881b097..68aa3be48 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,16 +7,25 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
+#include <drm/drm_print.h>
+
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/ref_tracker.h>
+#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+typedef unsigned long intel_wakeref_t;
+
+#define INTEL_REFTRACK_DEAD_COUNT 16
+#define INTEL_REFTRACK_PRINT_LIMIT 16
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
#else
@@ -26,8 +35,6 @@
struct intel_runtime_pm;
struct intel_wakeref;
-typedef depot_stack_handle_t intel_wakeref_t;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
@@ -43,6 +50,10 @@ struct intel_wakeref {
const struct intel_wakeref_ops *ops;
struct delayed_work work;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+ struct ref_tracker_dir debug;
+#endif
};
struct intel_wakeref_lockclass {
@@ -53,11 +64,12 @@ struct intel_wakeref_lockclass {
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
- struct intel_wakeref_lockclass *key);
-#define intel_wakeref_init(wf, i915, ops) do { \
+ struct intel_wakeref_lockclass *key,
+ const char *name);
+#define intel_wakeref_init(wf, i915, ops, name) do { \
static struct intel_wakeref_lockclass __key; \
\
- __intel_wakeref_init((wf), (i915), (ops), &__key); \
+ __intel_wakeref_init((wf), (i915), (ops), &__key, name); \
} while (0)
int __intel_wakeref_get_first(struct intel_wakeref *wf);
@@ -261,6 +273,57 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf)
*/
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
+#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1))
+
+static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir)
+{
+ struct ref_tracker *user = NULL;
+
+ ref_tracker_alloc(dir, &user, GFP_NOWAIT);
+
+ return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF;
+}
+
+static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir,
+ intel_wakeref_t handle)
+{
+ struct ref_tracker *user;
+
+ user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle;
+
+ ref_tracker_free(dir, &user);
+}
+
+void intel_ref_tracker_show(struct ref_tracker_dir *dir,
+ struct drm_printer *p);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+
+static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
+{
+ return intel_ref_tracker_alloc(&wf->debug);
+}
+
+static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
+ intel_wakeref_t handle)
+{
+ intel_ref_tracker_free(&wf->debug, handle);
+}
+
+#else
+
+static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
+{
+ return -1;
+}
+
+static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
+ intel_wakeref_t handle)
+{
+}
+
+#endif
+
struct intel_wakeref_auto {
struct drm_i915_private *i915;
struct timer_list timer;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index dc327cf40..75278e78c 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -199,6 +199,9 @@ int intel_pxp_init(struct drm_i915_private *i915)
struct intel_gt *gt;
bool is_full_feature = false;
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return -ENOTCONN;
+
/*
* NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
* we still need it if PXP's backend tee transport is needed.
@@ -303,6 +306,8 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp)
if (!pxp->arb_is_valid)
return 0;
+
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini");
/*
* To ensure synchronous and coherent session teardown completion
* in response to suspend or shutdown triggers, don't use a worker.
@@ -324,6 +329,8 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
if (pxp->arb_is_valid)
return 0;
+
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart");
/*
* The arb-session is currently inactive and we are doing a reset and restart
* due to a runtime event. Use the worker that was designed for this.
@@ -332,8 +339,11 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
timeout = intel_pxp_get_backend_timeout_ms(pxp);
- if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)",
+ timeout);
return -ETIMEDOUT;
+ }
return 0;
}
@@ -414,10 +424,12 @@ int intel_pxp_start(struct intel_pxp *pxp)
int ret = 0;
ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT);
- if (ret < 0)
+ if (ret < 0) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)", ret);
return ret;
- else if (ret > 1)
+ } else if (ret > 1) {
return -EIO; /* per UAPI spec, user may retry later */
+ }
mutex_lock(&pxp->arb_mutex);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index 91e9622c0..d81750b9b 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -40,11 +40,12 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
/* immediately mark PXP as inactive on termination */
intel_pxp_mark_termination_in_progress(pxp);
- pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED |
+ PXP_EVENT_TYPE_IRQ;
}
if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
- pxp->session_events |= PXP_TERMINATION_COMPLETE;
+ pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ;
if (pxp->session_events)
queue_work(system_unbound_wq, &pxp->session_work);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 0a3e66b02..091c86e03 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -137,8 +137,10 @@ void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_res
static void pxp_terminate_complete(struct intel_pxp *pxp)
{
/* Re-create the arb session after teardown handle complete */
- if (fetch_and_zero(&pxp->hw_state_invalidated))
+ if (fetch_and_zero(&pxp->hw_state_invalidated)) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: creating arb_session after invalidation");
pxp_create_arb_session(pxp);
+ }
complete_all(&pxp->termination);
}
@@ -157,6 +159,8 @@ static void pxp_session_work(struct work_struct *work)
if (!events)
return;
+ drm_dbg(&gt->i915->drm, "PXP: processing event-flags 0x%08x", events);
+
if (events & PXP_INVAL_REQUIRED)
intel_pxp_invalidate(pxp);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 7e11fa803..07864b584 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -124,6 +124,7 @@ struct intel_pxp {
#define PXP_TERMINATION_REQUEST BIT(0)
#define PXP_TERMINATION_COMPLETE BIT(1)
#define PXP_INVAL_REQUIRED BIT(2)
+#define PXP_EVENT_TYPE_IRQ BIT(3)
};
#endif /* __INTEL_PXP_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
index 47f4ae18a..88fa845e9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
@@ -77,7 +77,7 @@ __sync_print(struct i915_syncmap *p,
for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
buf = __sync_print(__sync_child(p)[i], buf, sz,
depth + 1,
- last << 1 | !!(p->bitmap >> (i + 1)),
+ last << 1 | ((p->bitmap >> (i + 1)) ? 1 : 0),
i);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
index 2990dd4d4..e14ac0ab1 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/jiffies.h>
+
//#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
@@ -12,7 +14,7 @@
#define REDUCED_TIMESLICE 5
#define REDUCED_PREEMPT 10
-#define WAIT_FOR_RESET_TIME 10000
+#define WAIT_FOR_RESET_TIME_MS 10000
struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
{
@@ -91,7 +93,7 @@ int intel_selftest_wait_for_rq(struct i915_request *rq)
{
long ret;
- ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME);
+ ret = i915_request_wait(rq, 0, msecs_to_jiffies(WAIT_FOR_RESET_TIME_MS));
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 03ea75cd8..4f98aa8a8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -24,6 +24,8 @@
#include "../i915_selftest.h"
+#include "gt/intel_gt.h"
+
static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
unsigned int num_ranges,
bool is_watertight)
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index f32e9f787..40874ebfb 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -33,18 +33,22 @@ int intel_gmch_bridge_setup(struct drm_i915_private *i915)
i915->gmch.pdev);
}
+static int mchbar_reg(struct drm_i915_private *i915)
+{
+ return GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+}
+
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_i915_private *i915)
{
- int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
if (GRAPHICS_VER(i915) >= 4)
- pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
- pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4, &temp_hi);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
@@ -68,10 +72,10 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915)
}
if (GRAPHICS_VER(i915) >= 4)
- pci_write_config_dword(i915->gmch.pdev, reg + 4,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4,
upper_32_bits(i915->gmch.mch_res.start));
- pci_write_config_dword(i915->gmch.pdev, reg,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915),
lower_32_bits(i915->gmch.mch_res.start));
return 0;
}
@@ -79,7 +83,6 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915)
/* Setup MCHBAR if possible, return true if we should disable it again */
void intel_gmch_bar_setup(struct drm_i915_private *i915)
{
- int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -92,7 +95,7 @@ void intel_gmch_bar_setup(struct drm_i915_private *i915)
pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp);
enabled = temp & 1;
}
@@ -110,15 +113,13 @@ void intel_gmch_bar_setup(struct drm_i915_private *i915)
pci_write_config_dword(i915->gmch.pdev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
- pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp);
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), temp | 1);
}
}
void intel_gmch_bar_teardown(struct drm_i915_private *i915)
{
- int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
if (i915->gmch.mchbar_need_disable) {
if (IS_I915G(i915) || IS_I915GM(i915)) {
u32 deven_val;
@@ -131,10 +132,10 @@ void intel_gmch_bar_teardown(struct drm_i915_private *i915)
} else {
u32 mchbar_val;
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915),
&mchbar_val);
mchbar_val &= ~1;
- pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915),
mchbar_val);
}
}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index b98dec3ad..ffa195560 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -166,23 +166,6 @@ u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
return val;
}
-u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRWRDA_NP, reg, &val);
-}
-
u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
@@ -227,9 +210,9 @@ static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy p
return IOSF_PORT_DPIO;
}
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+ u32 port = vlv_dpio_phy_iosf_port(i915, phy);
u32 val = 0;
vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
@@ -239,16 +222,16 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
* so ideally we should check the register offset instead...
*/
drm_WARN(&i915->drm, val == 0xffffffff,
- "DPIO read pipe %c reg 0x%x == 0x%x\n",
- pipe_name(pipe), reg, val);
+ "DPIO PHY%d read reg 0x%x == 0x%x\n",
+ phy, reg, val);
return val;
}
void vlv_dpio_write(struct drm_i915_private *i915,
- enum pipe pipe, int reg, u32 val)
+ enum dpio_phy phy, int reg, u32 val)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+ u32 port = vlv_dpio_phy_iosf_port(i915, phy);
vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
index 9ce283d96..c20cf41b2 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ b/drivers/gpu/drm/i915/vlv_sideband.h
@@ -11,7 +11,7 @@
#include "vlv_sideband_reg.h"
-enum pipe;
+enum dpio_phy;
struct drm_i915_private;
enum {
@@ -26,9 +26,6 @@ enum {
};
void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
-u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg);
-void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val);
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
static inline void vlv_bunit_get(struct drm_i915_private *i915)
@@ -75,9 +72,9 @@ static inline void vlv_dpio_get(struct drm_i915_private *i915)
vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO));
}
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg);
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg);
void vlv_dpio_write(struct drm_i915_private *i915,
- enum pipe pipe, int reg, u32 val);
+ enum dpio_phy phy, int reg, u32 val);
static inline void vlv_dpio_put(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig
new file mode 100644
index 000000000..3bfa2ac21
--- /dev/null
+++ b/drivers/gpu/drm/imagination/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+# Copyright (c) 2023 Imagination Technologies Ltd.
+
+config DRM_POWERVR
+ tristate "Imagination Technologies PowerVR (Series 6 and later) & IMG Graphics"
+ depends on ARM64
+ depends on DRM
+ depends on PM
+ select DRM_EXEC
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_SCHED
+ select DRM_GPUVM
+ select FW_LOADER
+ help
+ Choose this option if you have a system that has an Imagination
+ Technologies PowerVR (Series 6 or later) or IMG GPU.
+
+ If "M" is selected, the module will be called powervr.
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
new file mode 100644
index 000000000..ec6db8e9b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only OR MIT
+# Copyright (c) 2023 Imagination Technologies Ltd.
+
+subdir-ccflags-y := -I$(srctree)/$(src)
+
+powervr-y := \
+ pvr_ccb.o \
+ pvr_cccb.o \
+ pvr_context.o \
+ pvr_device.o \
+ pvr_device_info.o \
+ pvr_drv.o \
+ pvr_free_list.o \
+ pvr_fw.o \
+ pvr_fw_meta.o \
+ pvr_fw_mips.o \
+ pvr_fw_startstop.o \
+ pvr_fw_trace.o \
+ pvr_gem.o \
+ pvr_hwrt.o \
+ pvr_job.o \
+ pvr_mmu.o \
+ pvr_params.o \
+ pvr_power.o \
+ pvr_queue.o \
+ pvr_stream.o \
+ pvr_stream_defs.o \
+ pvr_sync.o \
+ pvr_vm.o \
+ pvr_vm_mips.o
+
+powervr-$(CONFIG_DEBUG_FS) += \
+ pvr_debugfs.o
+
+obj-$(CONFIG_DRM_POWERVR) += powervr.o
diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c
new file mode 100644
index 000000000..4deeac7ed
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_ccb.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_free_list.h"
+#include "pvr_fw.h"
+#include "pvr_gem.h"
+#include "pvr_power.h"
+
+#include <drm/drm_managed.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define RESERVE_SLOT_TIMEOUT (1 * HZ) /* 1s */
+#define RESERVE_SLOT_MIN_RETRIES 10
+
+static void
+ccb_ctrl_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_ccb_ctl *ctrl = cpu_ptr;
+ struct pvr_ccb *pvr_ccb = priv;
+
+ ctrl->write_offset = 0;
+ ctrl->read_offset = 0;
+ ctrl->wrap_mask = pvr_ccb->num_cmds - 1;
+ ctrl->cmd_size = pvr_ccb->cmd_size;
+}
+
+/**
+ * pvr_ccb_init() - Initialise a CCB
+ * @pvr_dev: Device pointer.
+ * @pvr_ccb: Pointer to CCB structure to initialise.
+ * @num_cmds_log2: Log2 of number of commands in this CCB.
+ * @cmd_size: Command size for this CCB.
+ *
+ * Return:
+ * * Zero on success, or
+ * * Any error code returned by pvr_fw_object_create_and_map().
+ */
+static int
+pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb,
+ u32 num_cmds_log2, size_t cmd_size)
+{
+ u32 num_cmds = 1 << num_cmds_log2;
+ u32 ccb_size = num_cmds * cmd_size;
+ int err;
+
+ pvr_ccb->num_cmds = num_cmds;
+ pvr_ccb->cmd_size = cmd_size;
+
+ err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_ccb->lock);
+ if (err)
+ return err;
+
+ /*
+ * Map CCB and control structure as uncached, so we don't have to flush
+ * CPU cache repeatedly when polling for space.
+ */
+ pvr_ccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_ccb->ctrl),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ ccb_ctrl_init, pvr_ccb, &pvr_ccb->ctrl_obj);
+ if (IS_ERR(pvr_ccb->ctrl))
+ return PTR_ERR(pvr_ccb->ctrl);
+
+ pvr_ccb->ccb = pvr_fw_object_create_and_map(pvr_dev, ccb_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &pvr_ccb->ccb_obj);
+ if (IS_ERR(pvr_ccb->ccb)) {
+ err = PTR_ERR(pvr_ccb->ccb);
+ goto err_free_ctrl;
+ }
+
+ pvr_fw_object_get_fw_addr(pvr_ccb->ctrl_obj, &pvr_ccb->ctrl_fw_addr);
+ pvr_fw_object_get_fw_addr(pvr_ccb->ccb_obj, &pvr_ccb->ccb_fw_addr);
+
+ WRITE_ONCE(pvr_ccb->ctrl->write_offset, 0);
+ WRITE_ONCE(pvr_ccb->ctrl->read_offset, 0);
+ WRITE_ONCE(pvr_ccb->ctrl->wrap_mask, num_cmds - 1);
+ WRITE_ONCE(pvr_ccb->ctrl->cmd_size, cmd_size);
+
+ return 0;
+
+err_free_ctrl:
+ pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
+
+ return err;
+}
+
+/**
+ * pvr_ccb_fini() - Release CCB structure
+ * @pvr_ccb: CCB to release.
+ */
+void
+pvr_ccb_fini(struct pvr_ccb *pvr_ccb)
+{
+ pvr_fw_object_unmap_and_destroy(pvr_ccb->ccb_obj);
+ pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj);
+}
+
+/**
+ * pvr_ccb_slot_available_locked() - Test whether any slots are available in CCB
+ * @pvr_ccb: CCB to test.
+ * @write_offset: Address to store number of next available slot. May be %NULL.
+ *
+ * Caller must hold @pvr_ccb->lock.
+ *
+ * Return:
+ * * %true if a slot is available, or
+ * * %false if no slot is available.
+ */
+static __always_inline bool
+pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
+{
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
+ u32 next_write_offset = (READ_ONCE(ctrl->write_offset) + 1) & READ_ONCE(ctrl->wrap_mask);
+
+ lockdep_assert_held(&pvr_ccb->lock);
+
+ if (READ_ONCE(ctrl->read_offset) != next_write_offset) {
+ if (write_offset)
+ *write_offset = next_write_offset;
+ return true;
+ }
+
+ return false;
+}
+
+static void
+process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd)
+{
+ switch (cmd->cmd_type) {
+ case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
+ pvr_power_reset(pvr_dev, false);
+ break;
+
+ case ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+ pvr_free_list_process_reconstruct_req(pvr_dev,
+ &cmd->cmd_data.cmd_freelists_reconstruction);
+ break;
+
+ case ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW:
+ pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs);
+ break;
+
+ default:
+ drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n",
+ cmd->cmd_type);
+ break;
+ }
+}
+
+/**
+ * pvr_fwccb_process() - Process any pending FWCCB commands
+ * @pvr_dev: Target PowerVR device
+ */
+void pvr_fwccb_process(struct pvr_device *pvr_dev)
+{
+ struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb;
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl;
+ u32 read_offset;
+
+ mutex_lock(&pvr_dev->fwccb.lock);
+
+ while ((read_offset = READ_ONCE(ctrl->read_offset)) != READ_ONCE(ctrl->write_offset)) {
+ struct rogue_fwif_fwccb_cmd cmd = fwccb[read_offset];
+
+ WRITE_ONCE(ctrl->read_offset, (read_offset + 1) & READ_ONCE(ctrl->wrap_mask));
+
+ /* Drop FWCCB lock while we process command. */
+ mutex_unlock(&pvr_dev->fwccb.lock);
+
+ process_fwccb_command(pvr_dev, &cmd);
+
+ mutex_lock(&pvr_dev->fwccb.lock);
+ }
+
+ mutex_unlock(&pvr_dev->fwccb.lock);
+}
+
+/**
+ * pvr_kccb_capacity() - Returns the maximum number of usable KCCB slots.
+ * @pvr_dev: Target PowerVR device
+ *
+ * Return:
+ * * The maximum number of active slots.
+ */
+static u32 pvr_kccb_capacity(struct pvr_device *pvr_dev)
+{
+ /* Capacity is the number of slot minus one to cope with the wrapping
+ * mechanisms. If we were to use all slots, we might end up with
+ * read_offset == write_offset, which the FW considers as a KCCB-is-empty
+ * condition.
+ */
+ return pvr_dev->kccb.slot_count - 1;
+}
+
+/**
+ * pvr_kccb_used_slot_count_locked() - Get the number of used slots
+ * @pvr_dev: Device pointer.
+ *
+ * KCCB lock must be held.
+ *
+ * Return:
+ * * The number of slots currently used.
+ */
+static u32
+pvr_kccb_used_slot_count_locked(struct pvr_device *pvr_dev)
+{
+ struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
+ u32 wr_offset = READ_ONCE(ctrl->write_offset);
+ u32 rd_offset = READ_ONCE(ctrl->read_offset);
+ u32 used_count;
+
+ lockdep_assert_held(&pvr_ccb->lock);
+
+ if (wr_offset >= rd_offset)
+ used_count = wr_offset - rd_offset;
+ else
+ used_count = wr_offset + pvr_dev->kccb.slot_count - rd_offset;
+
+ return used_count;
+}
+
+/**
+ * pvr_kccb_send_cmd_reserved_powered() - Send command to the KCCB, with the PM ref
+ * held and a slot pre-reserved
+ * @pvr_dev: Device pointer.
+ * @cmd: Command to sent.
+ * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
+ */
+void
+pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev,
+ struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot)
+{
+ struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb;
+ struct rogue_fwif_kccb_cmd *kccb = pvr_ccb->ccb;
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
+ u32 old_write_offset;
+ u32 new_write_offset;
+
+ WARN_ON(pvr_dev->lost);
+
+ mutex_lock(&pvr_ccb->lock);
+
+ if (WARN_ON(!pvr_dev->kccb.reserved_count))
+ goto out_unlock;
+
+ old_write_offset = READ_ONCE(ctrl->write_offset);
+
+ /* We reserved the slot, we should have one available. */
+ if (WARN_ON(!pvr_ccb_slot_available_locked(pvr_ccb, &new_write_offset)))
+ goto out_unlock;
+
+ memcpy(&kccb[old_write_offset], cmd,
+ sizeof(struct rogue_fwif_kccb_cmd));
+ if (kccb_slot) {
+ *kccb_slot = old_write_offset;
+ /* Clear return status for this slot. */
+ WRITE_ONCE(pvr_dev->kccb.rtn[old_write_offset],
+ ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE);
+ }
+ mb(); /* memory barrier */
+ WRITE_ONCE(ctrl->write_offset, new_write_offset);
+ pvr_dev->kccb.reserved_count--;
+
+ /* Kick MTS */
+ pvr_fw_mts_schedule(pvr_dev,
+ PVR_FWIF_DM_GP & ~ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK);
+
+out_unlock:
+ mutex_unlock(&pvr_ccb->lock);
+}
+
+/**
+ * pvr_kccb_try_reserve_slot() - Try to reserve a KCCB slot
+ * @pvr_dev: Device pointer.
+ *
+ * Return:
+ * * true if a KCCB slot was reserved, or
+ * * false otherwise.
+ */
+static bool pvr_kccb_try_reserve_slot(struct pvr_device *pvr_dev)
+{
+ bool reserved = false;
+ u32 used_count;
+
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+
+ used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
+ if (pvr_dev->kccb.reserved_count < pvr_kccb_capacity(pvr_dev) - used_count) {
+ pvr_dev->kccb.reserved_count++;
+ reserved = true;
+ }
+
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+
+ return reserved;
+}
+
+/**
+ * pvr_kccb_reserve_slot_sync() - Try to reserve a slot synchronously
+ * @pvr_dev: Device pointer.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -EBUSY if no slots were reserved after %RESERVE_SLOT_TIMEOUT, with a minimum of
+ * %RESERVE_SLOT_MIN_RETRIES retries.
+ */
+static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev)
+{
+ unsigned long start_timestamp = jiffies;
+ bool reserved = false;
+ u32 retries = 0;
+
+ while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT ||
+ retries < RESERVE_SLOT_MIN_RETRIES) {
+ reserved = pvr_kccb_try_reserve_slot(pvr_dev);
+ if (reserved)
+ break;
+
+ usleep_range(1, 50);
+
+ if (retries < U32_MAX)
+ retries++;
+ }
+
+ return reserved ? 0 : -EBUSY;
+}
+
+/**
+ * pvr_kccb_send_cmd_powered() - Send command to the KCCB, with a PM ref held
+ * @pvr_dev: Device pointer.
+ * @cmd: Command to sent.
+ * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
+ *
+ * Returns:
+ * * Zero on success, or
+ * * -EBUSY if timeout while waiting for a free KCCB slot.
+ */
+int
+pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot)
+{
+ int err;
+
+ err = pvr_kccb_reserve_slot_sync(pvr_dev);
+ if (err)
+ return err;
+
+ pvr_kccb_send_cmd_reserved_powered(pvr_dev, cmd, kccb_slot);
+ return 0;
+}
+
+/**
+ * pvr_kccb_send_cmd() - Send command to the KCCB
+ * @pvr_dev: Device pointer.
+ * @cmd: Command to sent.
+ * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
+ *
+ * Returns:
+ * * Zero on success, or
+ * * -EBUSY if timeout while waiting for a free KCCB slot.
+ */
+int
+pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot)
+{
+ int err;
+
+ err = pvr_power_get(pvr_dev);
+ if (err)
+ return err;
+
+ err = pvr_kccb_send_cmd_powered(pvr_dev, cmd, kccb_slot);
+
+ pvr_power_put(pvr_dev);
+
+ return err;
+}
+
+/**
+ * pvr_kccb_wait_for_completion() - Wait for a KCCB command to complete
+ * @pvr_dev: Device pointer.
+ * @slot_nr: KCCB slot to wait on.
+ * @timeout: Timeout length (in jiffies).
+ * @rtn_out: Location to store KCCB command result. May be %NULL.
+ *
+ * Returns:
+ * * Zero on success, or
+ * * -ETIMEDOUT on timeout.
+ */
+int
+pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr,
+ u32 timeout, u32 *rtn_out)
+{
+ int ret = wait_event_timeout(pvr_dev->kccb.rtn_q, READ_ONCE(pvr_dev->kccb.rtn[slot_nr]) &
+ ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED, timeout);
+
+ if (ret && rtn_out)
+ *rtn_out = READ_ONCE(pvr_dev->kccb.rtn[slot_nr]);
+
+ return ret ? 0 : -ETIMEDOUT;
+}
+
+/**
+ * pvr_kccb_is_idle() - Returns whether the device's KCCB is idle
+ * @pvr_dev: Device pointer
+ *
+ * Returns:
+ * * %true if the KCCB is idle (contains no commands), or
+ * * %false if the KCCB contains pending commands.
+ */
+bool
+pvr_kccb_is_idle(struct pvr_device *pvr_dev)
+{
+ struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->kccb.ccb.ctrl;
+ bool idle;
+
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+
+ idle = (READ_ONCE(ctrl->write_offset) == READ_ONCE(ctrl->read_offset));
+
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+
+ return idle;
+}
+
+static const char *
+pvr_kccb_fence_get_driver_name(struct dma_fence *f)
+{
+ return PVR_DRIVER_NAME;
+}
+
+static const char *
+pvr_kccb_fence_get_timeline_name(struct dma_fence *f)
+{
+ return "kccb";
+}
+
+static const struct dma_fence_ops pvr_kccb_fence_ops = {
+ .get_driver_name = pvr_kccb_fence_get_driver_name,
+ .get_timeline_name = pvr_kccb_fence_get_timeline_name,
+};
+
+/**
+ * struct pvr_kccb_fence - Fence object used to wait for a KCCB slot
+ */
+struct pvr_kccb_fence {
+ /** @base: Base dma_fence object. */
+ struct dma_fence base;
+
+ /** @node: Node used to insert the fence in the pvr_device::kccb::waiters list. */
+ struct list_head node;
+};
+
+/**
+ * pvr_kccb_wake_up_waiters() - Check the KCCB waiters
+ * @pvr_dev: Target PowerVR device
+ *
+ * Signal as many KCCB fences as we have slots available.
+ */
+void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev)
+{
+ struct pvr_kccb_fence *fence, *tmp_fence;
+ u32 used_count, available_count;
+
+ /* Wake up those waiting for KCCB slot execution. */
+ wake_up_all(&pvr_dev->kccb.rtn_q);
+
+ /* Then iterate over all KCCB fences and signal as many as we can. */
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+ used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
+
+ if (WARN_ON(used_count + pvr_dev->kccb.reserved_count > pvr_kccb_capacity(pvr_dev)))
+ goto out_unlock;
+
+ available_count = pvr_kccb_capacity(pvr_dev) - used_count - pvr_dev->kccb.reserved_count;
+ list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) {
+ if (!available_count)
+ break;
+
+ list_del(&fence->node);
+ pvr_dev->kccb.reserved_count++;
+ available_count--;
+ dma_fence_signal(&fence->base);
+ dma_fence_put(&fence->base);
+ }
+
+out_unlock:
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+}
+
+/**
+ * pvr_kccb_fini() - Cleanup device KCCB
+ * @pvr_dev: Target PowerVR device
+ */
+void pvr_kccb_fini(struct pvr_device *pvr_dev)
+{
+ pvr_ccb_fini(&pvr_dev->kccb.ccb);
+ WARN_ON(!list_empty(&pvr_dev->kccb.waiters));
+ WARN_ON(pvr_dev->kccb.reserved_count);
+}
+
+/**
+ * pvr_kccb_init() - Initialise device KCCB
+ * @pvr_dev: Target PowerVR device
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_ccb_init().
+ */
+int
+pvr_kccb_init(struct pvr_device *pvr_dev)
+{
+ pvr_dev->kccb.slot_count = 1 << ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
+ INIT_LIST_HEAD(&pvr_dev->kccb.waiters);
+ pvr_dev->kccb.fence_ctx.id = dma_fence_context_alloc(1);
+ spin_lock_init(&pvr_dev->kccb.fence_ctx.lock);
+
+ return pvr_ccb_init(pvr_dev, &pvr_dev->kccb.ccb,
+ ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT,
+ sizeof(struct rogue_fwif_kccb_cmd));
+}
+
+/**
+ * pvr_kccb_fence_alloc() - Allocate a pvr_kccb_fence object
+ *
+ * Return:
+ * * NULL if the allocation fails, or
+ * * A valid dma_fence pointer otherwise.
+ */
+struct dma_fence *pvr_kccb_fence_alloc(void)
+{
+ struct pvr_kccb_fence *kccb_fence;
+
+ kccb_fence = kzalloc(sizeof(*kccb_fence), GFP_KERNEL);
+ if (!kccb_fence)
+ return NULL;
+
+ return &kccb_fence->base;
+}
+
+/**
+ * pvr_kccb_fence_put() - Drop a KCCB fence reference
+ * @fence: The fence to drop the reference on.
+ *
+ * If the fence hasn't been initialized yet, dma_fence_free() is called. This
+ * way we have a single function taking care of both cases.
+ */
+void pvr_kccb_fence_put(struct dma_fence *fence)
+{
+ if (!fence)
+ return;
+
+ if (!fence->ops) {
+ dma_fence_free(fence);
+ } else {
+ WARN_ON(fence->ops != &pvr_kccb_fence_ops);
+ dma_fence_put(fence);
+ }
+}
+
+/**
+ * pvr_kccb_reserve_slot() - Reserve a KCCB slot for later use
+ * @pvr_dev: Target PowerVR device
+ * @f: KCCB fence object previously allocated with pvr_kccb_fence_alloc()
+ *
+ * Try to reserve a KCCB slot, and if there's no slot available,
+ * initializes the fence object and queue it to the waiters list.
+ *
+ * If NULL is returned, that means the slot is reserved. In that case,
+ * the @f is freed and shouldn't be accessed after that point.
+ *
+ * Return:
+ * * NULL if a slot was available directly, or
+ * * A valid dma_fence object to wait on if no slot was available.
+ */
+struct dma_fence *
+pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f)
+{
+ struct pvr_kccb_fence *fence = container_of(f, struct pvr_kccb_fence, base);
+ struct dma_fence *out_fence = NULL;
+ u32 used_count;
+
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+
+ used_count = pvr_kccb_used_slot_count_locked(pvr_dev);
+ if (pvr_dev->kccb.reserved_count >= pvr_kccb_capacity(pvr_dev) - used_count) {
+ dma_fence_init(&fence->base, &pvr_kccb_fence_ops,
+ &pvr_dev->kccb.fence_ctx.lock,
+ pvr_dev->kccb.fence_ctx.id,
+ atomic_inc_return(&pvr_dev->kccb.fence_ctx.seqno));
+ out_fence = dma_fence_get(&fence->base);
+ list_add_tail(&fence->node, &pvr_dev->kccb.waiters);
+ } else {
+ pvr_kccb_fence_put(f);
+ pvr_dev->kccb.reserved_count++;
+ }
+
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+
+ return out_fence;
+}
+
+/**
+ * pvr_kccb_release_slot() - Release a KCCB slot reserved with
+ * pvr_kccb_reserve_slot()
+ * @pvr_dev: Target PowerVR device
+ *
+ * Should only be called if something failed after the
+ * pvr_kccb_reserve_slot() call and you know you won't call
+ * pvr_kccb_send_cmd_reserved().
+ */
+void pvr_kccb_release_slot(struct pvr_device *pvr_dev)
+{
+ mutex_lock(&pvr_dev->kccb.ccb.lock);
+ if (!WARN_ON(!pvr_dev->kccb.reserved_count))
+ pvr_dev->kccb.reserved_count--;
+ mutex_unlock(&pvr_dev->kccb.ccb.lock);
+}
+
+/**
+ * pvr_fwccb_init() - Initialise device FWCCB
+ * @pvr_dev: Target PowerVR device
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_ccb_init().
+ */
+int
+pvr_fwccb_init(struct pvr_device *pvr_dev)
+{
+ return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb,
+ ROGUE_FWIF_FWCCB_NUMCMDS_LOG2,
+ sizeof(struct rogue_fwif_fwccb_cmd));
+}
diff --git a/drivers/gpu/drm/imagination/pvr_ccb.h b/drivers/gpu/drm/imagination/pvr_ccb.h
new file mode 100644
index 000000000..4c8aef31e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_ccb.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_CCB_H
+#define PVR_CCB_H
+
+#include "pvr_rogue_fwif.h"
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+struct pvr_ccb {
+ /** @ctrl_obj: FW object representing CCB control structure. */
+ struct pvr_fw_object *ctrl_obj;
+ /** @ccb_obj: FW object representing CCB. */
+ struct pvr_fw_object *ccb_obj;
+
+ /** @ctrl_fw_addr: FW virtual address of CCB control structure. */
+ u32 ctrl_fw_addr;
+ /** @ccb_fw_addr: FW virtual address of CCB. */
+ u32 ccb_fw_addr;
+
+ /** @num_cmds: Number of commands in this CCB. */
+ u32 num_cmds;
+
+ /** @cmd_size: Size of each command in this CCB, in bytes. */
+ u32 cmd_size;
+
+ /** @lock: Mutex protecting @ctrl and @ccb. */
+ struct mutex lock;
+ /**
+ * @ctrl: Kernel mapping of CCB control structure. @lock must be held
+ * when accessing.
+ */
+ struct rogue_fwif_ccb_ctl *ctrl;
+ /** @ccb: Kernel mapping of CCB. @lock must be held when accessing. */
+ void *ccb;
+};
+
+int pvr_kccb_init(struct pvr_device *pvr_dev);
+void pvr_kccb_fini(struct pvr_device *pvr_dev);
+int pvr_fwccb_init(struct pvr_device *pvr_dev);
+void pvr_ccb_fini(struct pvr_ccb *ccb);
+
+void pvr_fwccb_process(struct pvr_device *pvr_dev);
+
+struct dma_fence *pvr_kccb_fence_alloc(void);
+void pvr_kccb_fence_put(struct dma_fence *fence);
+struct dma_fence *
+pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f);
+void pvr_kccb_release_slot(struct pvr_device *pvr_dev);
+int pvr_kccb_send_cmd(struct pvr_device *pvr_dev,
+ struct rogue_fwif_kccb_cmd *cmd, u32 *kccb_slot);
+int pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev,
+ struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot);
+void pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev,
+ struct rogue_fwif_kccb_cmd *cmd,
+ u32 *kccb_slot);
+int pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr, u32 timeout,
+ u32 *rtn_out);
+bool pvr_kccb_is_idle(struct pvr_device *pvr_dev);
+void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev);
+
+#endif /* PVR_CCB_H */
diff --git a/drivers/gpu/drm/imagination/pvr_cccb.c b/drivers/gpu/drm/imagination/pvr_cccb.c
new file mode 100644
index 000000000..4fabab41b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_cccb.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_cccb.h"
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+static __always_inline u32
+get_ccb_space(u32 w_off, u32 r_off, u32 ccb_size)
+{
+ return (((r_off) - (w_off)) + ((ccb_size) - 1)) & ((ccb_size) - 1);
+}
+
+static void
+cccb_ctrl_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_cccb_ctl *ctrl = cpu_ptr;
+ struct pvr_cccb *pvr_cccb = priv;
+
+ WRITE_ONCE(ctrl->write_offset, 0);
+ WRITE_ONCE(ctrl->read_offset, 0);
+ WRITE_ONCE(ctrl->dep_offset, 0);
+ WRITE_ONCE(ctrl->wrap_mask, pvr_cccb->wrap_mask);
+}
+
+/**
+ * pvr_cccb_init() - Initialise a Client CCB
+ * @pvr_dev: Device pointer.
+ * @pvr_cccb: Pointer to Client CCB structure to initialise.
+ * @size_log2: Log2 size of Client CCB in bytes.
+ * @name: Name of owner of Client CCB. Used for fence context.
+ *
+ * Return:
+ * * Zero on success, or
+ * * Any error code returned by pvr_fw_object_create_and_map().
+ */
+int
+pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *pvr_cccb,
+ u32 size_log2, const char *name)
+{
+ size_t size = 1 << size_log2;
+ int err;
+
+ pvr_cccb->size = size;
+ pvr_cccb->write_offset = 0;
+ pvr_cccb->wrap_mask = size - 1;
+
+ /*
+ * Map CCCB and control structure as uncached, so we don't have to flush
+ * CPU cache repeatedly when polling for space.
+ */
+ pvr_cccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_cccb->ctrl),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ cccb_ctrl_init, pvr_cccb,
+ &pvr_cccb->ctrl_obj);
+ if (IS_ERR(pvr_cccb->ctrl))
+ return PTR_ERR(pvr_cccb->ctrl);
+
+ pvr_cccb->cccb = pvr_fw_object_create_and_map(pvr_dev, size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &pvr_cccb->cccb_obj);
+ if (IS_ERR(pvr_cccb->cccb)) {
+ err = PTR_ERR(pvr_cccb->cccb);
+ goto err_free_ctrl;
+ }
+
+ pvr_fw_object_get_fw_addr(pvr_cccb->ctrl_obj, &pvr_cccb->ctrl_fw_addr);
+ pvr_fw_object_get_fw_addr(pvr_cccb->cccb_obj, &pvr_cccb->cccb_fw_addr);
+
+ return 0;
+
+err_free_ctrl:
+ pvr_fw_object_unmap_and_destroy(pvr_cccb->ctrl_obj);
+
+ return err;
+}
+
+/**
+ * pvr_cccb_fini() - Release Client CCB structure
+ * @pvr_cccb: Client CCB to release.
+ */
+void
+pvr_cccb_fini(struct pvr_cccb *pvr_cccb)
+{
+ pvr_fw_object_unmap_and_destroy(pvr_cccb->cccb_obj);
+ pvr_fw_object_unmap_and_destroy(pvr_cccb->ctrl_obj);
+}
+
+/**
+ * pvr_cccb_cmdseq_fits() - Check if a command sequence fits in the CCCB
+ * @pvr_cccb: Target Client CCB.
+ * @size: Size of the command sequence.
+ *
+ * Check if a command sequence fits in the CCCB we have at hand.
+ *
+ * Return:
+ * * true if the command sequence fits in the CCCB, or
+ * * false otherwise.
+ */
+bool pvr_cccb_cmdseq_fits(struct pvr_cccb *pvr_cccb, size_t size)
+{
+ struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
+ u32 read_offset, remaining;
+ bool fits = false;
+
+ read_offset = READ_ONCE(ctrl->read_offset);
+ remaining = pvr_cccb->size - pvr_cccb->write_offset;
+
+ /* Always ensure we have enough room for a padding command at the end of the CCCB.
+ * If our command sequence does not fit, reserve the remaining space for a padding
+ * command.
+ */
+ if (size + PADDING_COMMAND_SIZE > remaining)
+ size += remaining;
+
+ if (get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size) >= size)
+ fits = true;
+
+ return fits;
+}
+
+/**
+ * pvr_cccb_write_command_with_header() - Write a command + command header to a
+ * Client CCB
+ * @pvr_cccb: Target Client CCB.
+ * @cmd_type: Client CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*.
+ * @cmd_size: Size of command in bytes.
+ * @cmd_data: Pointer to command to write.
+ * @ext_job_ref: External job reference.
+ * @int_job_ref: Internal job reference.
+ *
+ * Caller must make sure there's enough space in CCCB to queue this command. This
+ * can be done by calling pvr_cccb_cmdseq_fits().
+ *
+ * This function is not protected by any lock. The caller must ensure there's
+ * no concurrent caller, which should be guaranteed by the drm_sched model (job
+ * submission is serialized in drm_sched_main()).
+ */
+void
+pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb, u32 cmd_type, u32 cmd_size,
+ void *cmd_data, u32 ext_job_ref, u32 int_job_ref)
+{
+ u32 sz_with_hdr = pvr_cccb_get_size_of_cmd_with_hdr(cmd_size);
+ struct rogue_fwif_ccb_cmd_header cmd_header = {
+ .cmd_type = cmd_type,
+ .cmd_size = ALIGN(cmd_size, 8),
+ .ext_job_ref = ext_job_ref,
+ .int_job_ref = int_job_ref,
+ };
+ struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
+ u32 remaining = pvr_cccb->size - pvr_cccb->write_offset;
+ u32 required_size, cccb_space, read_offset;
+
+ /*
+ * Always ensure we have enough room for a padding command at the end of
+ * the CCCB.
+ */
+ if (remaining < sz_with_hdr + PADDING_COMMAND_SIZE) {
+ /*
+ * Command would need to wrap, so we need to pad the remainder
+ * of the CCCB.
+ */
+ required_size = sz_with_hdr + remaining;
+ } else {
+ required_size = sz_with_hdr;
+ }
+
+ read_offset = READ_ONCE(ctrl->read_offset);
+ cccb_space = get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size);
+ if (WARN_ON(cccb_space < required_size))
+ return;
+
+ if (required_size != sz_with_hdr) {
+ /* Add padding command */
+ struct rogue_fwif_ccb_cmd_header pad_cmd = {
+ .cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_PADDING,
+ .cmd_size = remaining - sizeof(pad_cmd),
+ };
+
+ memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset], &pad_cmd, sizeof(pad_cmd));
+ pvr_cccb->write_offset = 0;
+ }
+
+ memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset], &cmd_header, sizeof(cmd_header));
+ memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset + sizeof(cmd_header)], cmd_data, cmd_size);
+ pvr_cccb->write_offset += sz_with_hdr;
+}
+
+static void fill_cmd_kick_data(struct pvr_cccb *cccb, u32 ctx_fw_addr,
+ struct pvr_hwrt_data *hwrt,
+ struct rogue_fwif_kccb_cmd_kick_data *k)
+{
+ k->context_fw_addr = ctx_fw_addr;
+ k->client_woff_update = cccb->write_offset;
+ k->client_wrap_mask_update = cccb->wrap_mask;
+
+ if (hwrt) {
+ u32 cleanup_state_offset = offsetof(struct rogue_fwif_hwrtdata, cleanup_state);
+
+ pvr_fw_object_get_fw_addr_offset(hwrt->fw_obj, cleanup_state_offset,
+ &k->cleanup_ctl_fw_addr[k->num_cleanup_ctl++]);
+ }
+}
+
+/**
+ * pvr_cccb_send_kccb_kick: Send KCCB kick to trigger command processing
+ * @pvr_dev: Device pointer.
+ * @pvr_cccb: Pointer to CCCB to process.
+ * @cctx_fw_addr: FW virtual address for context owning this Client CCB.
+ * @hwrt: HWRT data set associated with this kick. May be %NULL.
+ *
+ * You must call pvr_kccb_reserve_slot() and wait for the returned fence to
+ * signal (if this function didn't return NULL) before calling
+ * pvr_cccb_send_kccb_kick().
+ */
+void
+pvr_cccb_send_kccb_kick(struct pvr_device *pvr_dev,
+ struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr,
+ struct pvr_hwrt_data *hwrt)
+{
+ struct rogue_fwif_kccb_cmd cmd_kick = {
+ .cmd_type = ROGUE_FWIF_KCCB_CMD_KICK,
+ };
+
+ fill_cmd_kick_data(pvr_cccb, cctx_fw_addr, hwrt, &cmd_kick.cmd_data.cmd_kick_data);
+
+ /* Make sure the writes to the CCCB are flushed before sending the KICK. */
+ wmb();
+
+ pvr_kccb_send_cmd_reserved_powered(pvr_dev, &cmd_kick, NULL);
+}
+
+void
+pvr_cccb_send_kccb_combined_kick(struct pvr_device *pvr_dev,
+ struct pvr_cccb *geom_cccb,
+ struct pvr_cccb *frag_cccb,
+ u32 geom_ctx_fw_addr,
+ u32 frag_ctx_fw_addr,
+ struct pvr_hwrt_data *hwrt,
+ bool frag_is_pr)
+{
+ struct rogue_fwif_kccb_cmd cmd_kick = {
+ .cmd_type = ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK,
+ };
+
+ fill_cmd_kick_data(geom_cccb, geom_ctx_fw_addr, hwrt,
+ &cmd_kick.cmd_data.combined_geom_frag_cmd_kick_data.geom_cmd_kick_data);
+
+ /* If this is a partial-render job, we don't attach resources to cleanup-ctl array,
+ * because the resources are already retained by the geometry job.
+ */
+ fill_cmd_kick_data(frag_cccb, frag_ctx_fw_addr, frag_is_pr ? NULL : hwrt,
+ &cmd_kick.cmd_data.combined_geom_frag_cmd_kick_data.frag_cmd_kick_data);
+
+ /* Make sure the writes to the CCCB are flushed before sending the KICK. */
+ wmb();
+
+ pvr_kccb_send_cmd_reserved_powered(pvr_dev, &cmd_kick, NULL);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_cccb.h b/drivers/gpu/drm/imagination/pvr_cccb.h
new file mode 100644
index 000000000..943fe8f2c
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_cccb.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_CCCB_H
+#define PVR_CCCB_H
+
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_shared.h"
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define PADDING_COMMAND_SIZE sizeof(struct rogue_fwif_ccb_cmd_header)
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declaration from pvr_hwrt.h. */
+struct pvr_hwrt_data;
+
+struct pvr_cccb {
+ /** @ctrl_obj: FW object representing CCCB control structure. */
+ struct pvr_fw_object *ctrl_obj;
+
+ /** @ccb_obj: FW object representing CCCB. */
+ struct pvr_fw_object *cccb_obj;
+
+ /**
+ * @ctrl: Kernel mapping of CCCB control structure. @lock must be held
+ * when accessing.
+ */
+ struct rogue_fwif_cccb_ctl *ctrl;
+
+ /** @cccb: Kernel mapping of CCCB. @lock must be held when accessing.*/
+ u8 *cccb;
+
+ /** @ctrl_fw_addr: FW virtual address of CCCB control structure. */
+ u32 ctrl_fw_addr;
+ /** @ccb_fw_addr: FW virtual address of CCCB. */
+ u32 cccb_fw_addr;
+
+ /** @size: Size of CCCB in bytes. */
+ size_t size;
+
+ /** @write_offset: CCCB write offset. */
+ u32 write_offset;
+
+ /** @wrap_mask: CCCB wrap mask. */
+ u32 wrap_mask;
+};
+
+int pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *cccb,
+ u32 size_log2, const char *name);
+void pvr_cccb_fini(struct pvr_cccb *cccb);
+
+void pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb,
+ u32 cmd_type, u32 cmd_size, void *cmd_data,
+ u32 ext_job_ref, u32 int_job_ref);
+void pvr_cccb_send_kccb_kick(struct pvr_device *pvr_dev,
+ struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr,
+ struct pvr_hwrt_data *hwrt);
+void pvr_cccb_send_kccb_combined_kick(struct pvr_device *pvr_dev,
+ struct pvr_cccb *geom_cccb,
+ struct pvr_cccb *frag_cccb,
+ u32 geom_ctx_fw_addr,
+ u32 frag_ctx_fw_addr,
+ struct pvr_hwrt_data *hwrt,
+ bool frag_is_pr);
+bool pvr_cccb_cmdseq_fits(struct pvr_cccb *pvr_cccb, size_t size);
+
+/**
+ * pvr_cccb_get_size_of_cmd_with_hdr() - Get the size of a command and its header.
+ * @cmd_size: Command size.
+ *
+ * Returns the size of the command and its header.
+ */
+static __always_inline u32
+pvr_cccb_get_size_of_cmd_with_hdr(u32 cmd_size)
+{
+ WARN_ON(!IS_ALIGNED(cmd_size, 8));
+ return sizeof(struct rogue_fwif_ccb_cmd_header) + ALIGN(cmd_size, 8);
+}
+
+/**
+ * pvr_cccb_cmdseq_can_fit() - Check if a command sequence can fit in the CCCB.
+ * @pvr_cccb: Target Client CCB.
+ * @size: Command sequence size.
+ *
+ * Returns:
+ * * true it the CCCB is big enough to contain a command sequence, or
+ * * false otherwise.
+ */
+static __always_inline bool
+pvr_cccb_cmdseq_can_fit(struct pvr_cccb *pvr_cccb, size_t size)
+{
+ /* We divide the capacity by two to simplify our CCCB fencing logic:
+ * we want to be sure that, no matter what we had queued before, we
+ * are able to either queue our command sequence at the end or add a
+ * padding command and queue the command sequence at the beginning
+ * of the CCCB. If the command sequence size is bigger than half the
+ * CCCB capacity, we'd have to queue the padding command and make sure
+ * the FW is done processing it before queueing our command sequence.
+ */
+ return size + PADDING_COMMAND_SIZE <= pvr_cccb->size / 2;
+}
+
+#endif /* PVR_CCCB_H */
diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c
new file mode 100644
index 000000000..eded5e955
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_context.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_cccb.h"
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_job.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_common.h"
+#include "pvr_rogue_fwif_resetframework.h"
+#include "pvr_stream.h"
+#include "pvr_stream_defs.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_auth.h>
+#include <drm/drm_managed.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+static int
+remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
+ enum pvr_context_priority *priority_out)
+{
+ switch (uapi_priority) {
+ case DRM_PVR_CTX_PRIORITY_LOW:
+ *priority_out = PVR_CTX_PRIORITY_LOW;
+ break;
+ case DRM_PVR_CTX_PRIORITY_NORMAL:
+ *priority_out = PVR_CTX_PRIORITY_MEDIUM;
+ break;
+ case DRM_PVR_CTX_PRIORITY_HIGH:
+ if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
+ return -EACCES;
+ *priority_out = PVR_CTX_PRIORITY_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_fw_obj_size(enum drm_pvr_ctx_type type)
+{
+ switch (type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ return sizeof(struct rogue_fwif_fwrendercontext);
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ return sizeof(struct rogue_fwif_fwcomputecontext);
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ return sizeof(struct rogue_fwif_fwtransfercontext);
+ }
+
+ return -EINVAL;
+}
+
+static int
+process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+ u64 stream_user_ptr, u32 stream_size, void *dest)
+{
+ void *stream;
+ int err;
+
+ stream = kzalloc(stream_size, GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
+ if (err)
+ goto err_free;
+
+ kfree(stream);
+
+ return 0;
+
+err_free:
+ kfree(stream);
+
+ return err;
+}
+
+static int init_render_fw_objs(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
+ struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map;
+
+ if (!args->static_context_state_len)
+ return -EINVAL;
+
+ static_rendercontext_state = &fw_render_context->static_render_context_state;
+
+ /* Copy static render context state from userspace. */
+ return process_static_context_state(ctx->pvr_dev,
+ &pvr_static_render_context_state_stream,
+ args->static_context_state,
+ args->static_context_state_len,
+ &static_rendercontext_state->ctxswitch_regs[0]);
+}
+
+static int init_compute_fw_objs(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map;
+ struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
+
+ if (!args->static_context_state_len)
+ return -EINVAL;
+
+ ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs;
+
+ /* Copy static render context state from userspace. */
+ return process_static_context_state(ctx->pvr_dev,
+ &pvr_static_compute_context_state_stream,
+ args->static_context_state,
+ args->static_context_state_len,
+ ctxswitch_regs);
+}
+
+static int init_transfer_fw_objs(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ if (args->static_context_state_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int init_fw_objs(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ return init_render_fw_objs(ctx, args, fw_ctx_map);
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ return init_compute_fw_objs(ctx, args, fw_ctx_map);
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ return init_transfer_fw_objs(ctx, args, fw_ctx_map);
+ }
+
+ return -EINVAL;
+}
+
+static void
+ctx_fw_data_init(void *cpu_ptr, void *priv)
+{
+ struct pvr_context *ctx = priv;
+
+ memcpy(cpu_ptr, ctx->data, ctx->data_size);
+}
+
+/**
+ * pvr_context_destroy_queues() - Destroy all queues attached to a context.
+ * @ctx: Context to destroy queues on.
+ *
+ * Should be called when the last reference to a context object is dropped.
+ * It releases all resources attached to the queues bound to this context.
+ */
+static void pvr_context_destroy_queues(struct pvr_context *ctx)
+{
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ pvr_queue_destroy(ctx->queues.fragment);
+ pvr_queue_destroy(ctx->queues.geometry);
+ break;
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ pvr_queue_destroy(ctx->queues.compute);
+ break;
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ pvr_queue_destroy(ctx->queues.transfer);
+ break;
+ }
+}
+
+/**
+ * pvr_context_create_queues() - Create all queues attached to a context.
+ * @ctx: Context to create queues on.
+ * @args: Context creation arguments passed by userspace.
+ * @fw_ctx_map: CPU mapping of the FW context object.
+ *
+ * Return:
+ * * 0 on success, or
+ * * A negative error code otherwise.
+ */
+static int pvr_context_create_queues(struct pvr_context *ctx,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ int err;
+
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY,
+ args, fw_ctx_map);
+ if (IS_ERR(ctx->queues.geometry)) {
+ err = PTR_ERR(ctx->queues.geometry);
+ ctx->queues.geometry = NULL;
+ goto err_destroy_queues;
+ }
+
+ ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT,
+ args, fw_ctx_map);
+ if (IS_ERR(ctx->queues.fragment)) {
+ err = PTR_ERR(ctx->queues.fragment);
+ ctx->queues.fragment = NULL;
+ goto err_destroy_queues;
+ }
+ return 0;
+
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE,
+ args, fw_ctx_map);
+ if (IS_ERR(ctx->queues.compute)) {
+ err = PTR_ERR(ctx->queues.compute);
+ ctx->queues.compute = NULL;
+ goto err_destroy_queues;
+ }
+ return 0;
+
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
+ args, fw_ctx_map);
+ if (IS_ERR(ctx->queues.transfer)) {
+ err = PTR_ERR(ctx->queues.transfer);
+ ctx->queues.transfer = NULL;
+ goto err_destroy_queues;
+ }
+ return 0;
+ }
+
+ return -EINVAL;
+
+err_destroy_queues:
+ pvr_context_destroy_queues(ctx);
+ return err;
+}
+
+/**
+ * pvr_context_kill_queues() - Kill queues attached to context.
+ * @ctx: Context to kill queues on.
+ *
+ * Killing the queues implies making them unusable for future jobs, while still
+ * letting the currently submitted jobs a chance to finish. Queue resources will
+ * stay around until pvr_context_destroy_queues() is called.
+ */
+static void pvr_context_kill_queues(struct pvr_context *ctx)
+{
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ pvr_queue_kill(ctx->queues.fragment);
+ pvr_queue_kill(ctx->queues.geometry);
+ break;
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ pvr_queue_kill(ctx->queues.compute);
+ break;
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ pvr_queue_kill(ctx->queues.transfer);
+ break;
+ }
+}
+
+/**
+ * pvr_context_create() - Create a context.
+ * @pvr_file: File to attach the created context to.
+ * @args: Context creation arguments.
+ *
+ * Return:
+ * * 0 on success, or
+ * * A negative error code on failure.
+ */
+int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args)
+{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+ struct pvr_context *ctx;
+ int ctx_size;
+ int err;
+
+ /* Context creation flags are currently unused and must be zero. */
+ if (args->flags)
+ return -EINVAL;
+
+ ctx_size = get_fw_obj_size(args->type);
+ if (ctx_size < 0)
+ return ctx_size;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->data_size = ctx_size;
+ ctx->type = args->type;
+ ctx->flags = args->flags;
+ ctx->pvr_dev = pvr_dev;
+ kref_init(&ctx->ref_count);
+
+ err = remap_priority(pvr_file, args->priority, &ctx->priority);
+ if (err)
+ goto err_free_ctx;
+
+ ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+ if (IS_ERR(ctx->vm_ctx)) {
+ err = PTR_ERR(ctx->vm_ctx);
+ goto err_free_ctx;
+ }
+
+ ctx->data = kzalloc(ctx_size, GFP_KERNEL);
+ if (!ctx->data) {
+ err = -ENOMEM;
+ goto err_put_vm;
+ }
+
+ err = pvr_context_create_queues(ctx, args, ctx->data);
+ if (err)
+ goto err_free_ctx_data;
+
+ err = init_fw_objs(ctx, args, ctx->data);
+ if (err)
+ goto err_destroy_queues;
+
+ err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ ctx_fw_data_init, ctx, &ctx->fw_obj);
+ if (err)
+ goto err_free_ctx_data;
+
+ err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_destroy_fw_obj;
+
+ err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL);
+ if (err) {
+ /*
+ * It's possible that another thread could have taken a reference on the context at
+ * this point as it is in the ctx_ids xarray. Therefore instead of directly
+ * destroying the context, drop a reference instead.
+ */
+ pvr_context_put(ctx);
+ return err;
+ }
+
+ return 0;
+
+err_destroy_fw_obj:
+ pvr_fw_object_destroy(ctx->fw_obj);
+
+err_destroy_queues:
+ pvr_context_destroy_queues(ctx);
+
+err_free_ctx_data:
+ kfree(ctx->data);
+
+err_put_vm:
+ pvr_vm_context_put(ctx->vm_ctx);
+
+err_free_ctx:
+ kfree(ctx);
+ return err;
+}
+
+static void
+pvr_context_release(struct kref *ref_count)
+{
+ struct pvr_context *ctx =
+ container_of(ref_count, struct pvr_context, ref_count);
+ struct pvr_device *pvr_dev = ctx->pvr_dev;
+
+ xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
+ pvr_context_destroy_queues(ctx);
+ pvr_fw_object_destroy(ctx->fw_obj);
+ kfree(ctx->data);
+ pvr_vm_context_put(ctx->vm_ctx);
+ kfree(ctx);
+}
+
+/**
+ * pvr_context_put() - Release reference on context
+ * @ctx: Target context.
+ */
+void
+pvr_context_put(struct pvr_context *ctx)
+{
+ if (ctx)
+ kref_put(&ctx->ref_count, pvr_context_release);
+}
+
+/**
+ * pvr_context_destroy() - Destroy context
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Userspace context handle.
+ *
+ * Removes context from context list and drops initial reference. Context will
+ * then be destroyed once all outstanding references are dropped.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if context not in context list.
+ */
+int
+pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
+
+ if (!ctx)
+ return -EINVAL;
+
+ /* Make sure nothing can be queued to the queues after that point. */
+ pvr_context_kill_queues(ctx);
+
+ /* Release the reference held by the handle set. */
+ pvr_context_put(ctx);
+
+ return 0;
+}
+
+/**
+ * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all contexts associated with @pvr_file from the device context list and drops initial
+ * references. Contexts will then be destroyed once all outstanding references are dropped.
+ */
+void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_context *ctx;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->ctx_handles, handle, ctx)
+ pvr_context_destroy(pvr_file, handle);
+}
+
+/**
+ * pvr_context_device_init() - Device level initialization for queue related resources.
+ * @pvr_dev: The device to initialize.
+ */
+void pvr_context_device_init(struct pvr_device *pvr_dev)
+{
+ xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
+}
+
+/**
+ * pvr_context_device_fini() - Device level cleanup for queue related resources.
+ * @pvr_dev: The device to cleanup.
+ */
+void pvr_context_device_fini(struct pvr_device *pvr_dev)
+{
+ WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
+ xa_destroy(&pvr_dev->ctx_ids);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h
new file mode 100644
index 000000000..0c7b97dfa
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_context.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_CONTEXT_H
+#define PVR_CONTEXT_H
+
+#include <drm/gpu_scheduler.h>
+
+#include <linux/compiler_attributes.h>
+#include <linux/dma-fence.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_cccb.h"
+#include "pvr_device.h"
+#include "pvr_queue.h"
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+enum pvr_context_priority {
+ PVR_CTX_PRIORITY_LOW = 0,
+ PVR_CTX_PRIORITY_MEDIUM,
+ PVR_CTX_PRIORITY_HIGH,
+};
+
+/**
+ * struct pvr_context - Context data
+ */
+struct pvr_context {
+ /** @ref_count: Refcount for context. */
+ struct kref ref_count;
+
+ /** @pvr_dev: Pointer to owning device. */
+ struct pvr_device *pvr_dev;
+
+ /** @vm_ctx: Pointer to associated VM context. */
+ struct pvr_vm_context *vm_ctx;
+
+ /** @type: Type of context. */
+ enum drm_pvr_ctx_type type;
+
+ /** @flags: Context flags. */
+ u32 flags;
+
+ /** @priority: Context priority*/
+ enum pvr_context_priority priority;
+
+ /** @fw_obj: FW object representing FW-side context data. */
+ struct pvr_fw_object *fw_obj;
+
+ /** @data: Pointer to local copy of FW context data. */
+ void *data;
+
+ /** @data_size: Size of FW context data, in bytes. */
+ u32 data_size;
+
+ /** @ctx_id: FW context ID. */
+ u32 ctx_id;
+
+ /**
+ * @faulty: Set to 1 when the context queues had unfinished job when
+ * a GPU reset happened.
+ *
+ * In that case, the context is in an inconsistent state and can't be
+ * used anymore.
+ */
+ atomic_t faulty;
+
+ /** @queues: Union containing all kind of queues. */
+ union {
+ struct {
+ /** @geometry: Geometry queue. */
+ struct pvr_queue *geometry;
+
+ /** @fragment: Fragment queue. */
+ struct pvr_queue *fragment;
+ };
+
+ /** @compute: Compute queue. */
+ struct pvr_queue *compute;
+
+ /** @compute: Transfer queue. */
+ struct pvr_queue *transfer;
+ } queues;
+};
+
+static __always_inline struct pvr_queue *
+pvr_context_get_queue_for_job(struct pvr_context *ctx, enum drm_pvr_job_type type)
+{
+ switch (type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.geometry : NULL;
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.fragment : NULL;
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return ctx->type == DRM_PVR_CTX_TYPE_COMPUTE ? ctx->queues.compute : NULL;
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG ? ctx->queues.transfer : NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * pvr_context_get() - Take additional reference on context.
+ * @ctx: Context pointer.
+ *
+ * Call pvr_context_put() to release.
+ *
+ * Returns:
+ * * The requested context on success, or
+ * * %NULL if no context pointer passed.
+ */
+static __always_inline struct pvr_context *
+pvr_context_get(struct pvr_context *ctx)
+{
+ if (ctx)
+ kref_get(&ctx->ref_count);
+
+ return ctx;
+}
+
+/**
+ * pvr_context_lookup() - Lookup context pointer from handle and file.
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Context handle.
+ *
+ * Takes reference on context. Call pvr_context_put() to release.
+ *
+ * Return:
+ * * The requested context on success, or
+ * * %NULL on failure (context does not exist, or does not belong to @pvr_file).
+ */
+static __always_inline struct pvr_context *
+pvr_context_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_context *ctx;
+
+ /* Take the array lock to protect against context removal. */
+ xa_lock(&pvr_file->ctx_handles);
+ ctx = pvr_context_get(xa_load(&pvr_file->ctx_handles, handle));
+ xa_unlock(&pvr_file->ctx_handles);
+
+ return ctx;
+}
+
+/**
+ * pvr_context_lookup_id() - Lookup context pointer from ID.
+ * @pvr_dev: Device pointer.
+ * @id: FW context ID.
+ *
+ * Takes reference on context. Call pvr_context_put() to release.
+ *
+ * Return:
+ * * The requested context on success, or
+ * * %NULL on failure (context does not exist).
+ */
+static __always_inline struct pvr_context *
+pvr_context_lookup_id(struct pvr_device *pvr_dev, u32 id)
+{
+ struct pvr_context *ctx;
+
+ /* Take the array lock to protect against context removal. */
+ xa_lock(&pvr_dev->ctx_ids);
+
+ /* Contexts are removed from the ctx_ids set in the context release path,
+ * meaning the ref_count reached zero before they get removed. We need
+ * to make sure we're not trying to acquire a context that's being
+ * destroyed.
+ */
+ ctx = xa_load(&pvr_dev->ctx_ids, id);
+ if (!kref_get_unless_zero(&ctx->ref_count))
+ ctx = NULL;
+
+ xa_unlock(&pvr_dev->ctx_ids);
+
+ return ctx;
+}
+
+static __always_inline u32
+pvr_context_get_fw_addr(struct pvr_context *ctx)
+{
+ u32 ctx_fw_addr = 0;
+
+ pvr_fw_object_get_fw_addr(ctx->fw_obj, &ctx_fw_addr);
+
+ return ctx_fw_addr;
+}
+
+void pvr_context_put(struct pvr_context *ctx);
+
+int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args);
+
+int pvr_context_destroy(struct pvr_file *pvr_file, u32 handle);
+
+void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file);
+
+void pvr_context_device_init(struct pvr_device *pvr_dev);
+
+void pvr_context_device_fini(struct pvr_device *pvr_dev);
+
+#endif /* PVR_CONTEXT_H */
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c
new file mode 100644
index 000000000..6b77c9b4b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_debugfs.h"
+
+#include "pvr_device.h"
+#include "pvr_fw_trace.h"
+#include "pvr_params.h"
+
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
+static const struct pvr_debugfs_entry pvr_debugfs_entries[] = {
+ {"pvr_params", pvr_params_debugfs_init},
+ {"pvr_fw", pvr_fw_trace_debugfs_init},
+};
+
+void
+pvr_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm_dev = minor->dev;
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct dentry *root = minor->debugfs_root;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
+ const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i];
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(entry->name, root);
+ if (IS_ERR(dir)) {
+ drm_warn(drm_dev,
+ "failed to create debugfs dir '%s' (err=%d)",
+ entry->name, (int)PTR_ERR(dir));
+ continue;
+ }
+
+ entry->init(pvr_dev, dir);
+ }
+}
+
+/*
+ * Since all entries are created under &drm_minor->debugfs_root, there's no
+ * need for a pvr_debugfs_fini() as DRM will clean up everything under its root
+ * automatically.
+ */
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.h b/drivers/gpu/drm/imagination/pvr_debugfs.h
new file mode 100644
index 000000000..ebacbd13b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DEBUGFS_H
+#define PVR_DEBUGFS_H
+
+/* Forward declaration from <drm/drm_drv.h>. */
+struct drm_minor;
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+struct pvr_debugfs_entry {
+ const char *name;
+ void (*init)(struct pvr_device *pvr_dev, struct dentry *dir);
+};
+
+void pvr_debugfs_init(struct drm_minor *minor);
+#else /* defined(CONFIG_DEBUG_FS) */
+#include <linux/compiler_attributes.h>
+
+static __always_inline void pvr_debugfs_init(struct drm_minor *minor) {}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* PVR_DEBUGFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
new file mode 100644
index 000000000..1704c0268
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_device_info.h"
+
+#include "pvr_fw.h"
+#include "pvr_params.h"
+#include "pvr_power.h"
+#include "pvr_queue.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_stream.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_print.h>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/compiler_attributes.h>
+#include <linux/compiler_types.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Major number for the supported version of the firmware. */
+#define PVR_FW_VERSION_MAJOR 1
+
+/**
+ * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
+ * control registers.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets struct pvr_device->regs.
+ *
+ * This method of mapping the device control registers into memory ensures that
+ * they are unmapped when the driver is detached (i.e. no explicit cleanup is
+ * required).
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by devm_platform_ioremap_resource().
+ */
+static int
+pvr_device_reg_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
+ struct resource *regs_resource;
+ void __iomem *regs;
+
+ pvr_dev->regs_resource = NULL;
+ pvr_dev->regs = NULL;
+
+ regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
+ if (IS_ERR(regs))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(regs),
+ "failed to ioremap gpu registers\n");
+
+ pvr_dev->regs = regs;
+ pvr_dev->regs_resource = regs_resource;
+
+ return 0;
+}
+
+/**
+ * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
+ * struct pvr_device->mem_clk.
+ *
+ * Three clocks are required by the PowerVR device: core, sys and mem. On
+ * return, this function guarantees that the clocks are in one of the following
+ * states:
+ *
+ * * All successfully initialized,
+ * * Core errored, sys and mem uninitialized,
+ * * Core deinitialized, sys errored, mem uninitialized, or
+ * * Core and sys deinitialized, mem errored.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by devm_clk_get(), or
+ * * Any error returned by devm_clk_get_optional().
+ */
+static int pvr_device_clk_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct clk *core_clk;
+ struct clk *sys_clk;
+ struct clk *mem_clk;
+
+ core_clk = devm_clk_get(drm_dev->dev, "core");
+ if (IS_ERR(core_clk))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
+ "failed to get core clock\n");
+
+ sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
+ if (IS_ERR(sys_clk))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
+ "failed to get sys clock\n");
+
+ mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
+ if (IS_ERR(mem_clk))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
+ "failed to get mem clock\n");
+
+ pvr_dev->core_clk = core_clk;
+ pvr_dev->sys_clk = sys_clk;
+ pvr_dev->mem_clk = mem_clk;
+
+ return 0;
+}
+
+/**
+ * pvr_device_process_active_queues() - Process all queue related events.
+ * @pvr_dev: PowerVR device to check
+ *
+ * This is called any time we receive a FW event. It iterates over all
+ * active queues and calls pvr_queue_process() on them.
+ */
+static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
+{
+ struct pvr_queue *queue, *tmp_queue;
+ LIST_HEAD(active_queues);
+
+ mutex_lock(&pvr_dev->queues.lock);
+
+ /* Move all active queues to a temporary list. Queues that remain
+ * active after we're done processing them are re-inserted to
+ * the queues.active list by pvr_queue_process().
+ */
+ list_splice_init(&pvr_dev->queues.active, &active_queues);
+
+ list_for_each_entry_safe(queue, tmp_queue, &active_queues, node)
+ pvr_queue_process(queue);
+
+ mutex_unlock(&pvr_dev->queues.lock);
+}
+
+static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data)
+{
+ struct pvr_device *pvr_dev = data;
+ irqreturn_t ret = IRQ_NONE;
+
+ /* We are in the threaded handler, we can keep dequeuing events until we
+ * don't see any. This should allow us to reduce the number of interrupts
+ * when the GPU is receiving a massive amount of short jobs.
+ */
+ while (pvr_fw_irq_pending(pvr_dev)) {
+ pvr_fw_irq_clear(pvr_dev);
+
+ if (pvr_dev->fw_dev.booted) {
+ pvr_fwccb_process(pvr_dev);
+ pvr_kccb_wake_up_waiters(pvr_dev);
+ pvr_device_process_active_queues(pvr_dev);
+ }
+
+ pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev);
+
+ ret = IRQ_HANDLED;
+ }
+
+ /* Unmask FW irqs before returning, so new interrupts can be received. */
+ pvr_fw_irq_enable(pvr_dev);
+ return ret;
+}
+
+static irqreturn_t pvr_device_irq_handler(int irq, void *data)
+{
+ struct pvr_device *pvr_dev = data;
+
+ if (!pvr_fw_irq_pending(pvr_dev))
+ return IRQ_NONE; /* Spurious IRQ - ignore. */
+
+ /* Mask the FW interrupts before waking up the thread. Will be unmasked
+ * when the thread handler is done processing events.
+ */
+ pvr_fw_irq_disable(pvr_dev);
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success,
+ * * Any error returned by platform_get_irq_byname(), or
+ * * Any error returned by request_irq().
+ */
+static int
+pvr_device_irq_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
+
+ init_waitqueue_head(&pvr_dev->kccb.rtn_q);
+
+ pvr_dev->irq = platform_get_irq(plat_dev, 0);
+ if (pvr_dev->irq < 0)
+ return pvr_dev->irq;
+
+ /* Clear any pending events before requesting the IRQ line. */
+ pvr_fw_irq_clear(pvr_dev);
+ pvr_fw_irq_enable(pvr_dev);
+
+ return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler,
+ pvr_device_irq_thread_handler,
+ IRQF_SHARED, "gpu", pvr_dev);
+}
+
+/**
+ * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_device_irq_fini(struct pvr_device *pvr_dev)
+{
+ free_irq(pvr_dev->irq, pvr_dev);
+}
+
+/**
+ * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
+ * @pvr_dev: Target PowerVR device.
+ * @base: First part of the filename.
+ * @major: Major version number.
+ *
+ * A PowerVR firmware filename consists of three parts separated by underscores
+ * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
+ * of @base, the second part is the hardware version string derived from @pvr_fw
+ * and the final part is the firmware version number constructed from @major with
+ * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
+ *
+ * The returned string will have been slab allocated and must be freed with
+ * kfree().
+ *
+ * Return:
+ * * The constructed filename on success, or
+ * * Any error returned by kasprintf().
+ */
+static char *
+pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
+ u8 major)
+{
+ struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
+
+ return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
+ gpu_id->v, gpu_id->n, gpu_id->c, major);
+}
+
+static void
+pvr_release_firmware(void *data)
+{
+ struct pvr_device *pvr_dev = data;
+
+ release_firmware(pvr_dev->fw_dev.firmware);
+}
+
+/**
+ * pvr_request_firmware() - Load firmware for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * See pvr_build_firmware_filename() for details on firmware file naming.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by pvr_build_firmware_filename(), or
+ * * Any error returned by request_firmware().
+ */
+static int
+pvr_request_firmware(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = &pvr_dev->base;
+ char *filename;
+ const struct firmware *fw;
+ int err;
+
+ filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
+ PVR_FW_VERSION_MAJOR);
+ if (!filename)
+ return -ENOMEM;
+
+ /*
+ * This function takes a copy of &filename, meaning we can free our
+ * instance before returning.
+ */
+ err = request_firmware(&fw, filename, pvr_dev->base.dev);
+ if (err) {
+ drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
+ filename, err);
+ goto err_free_filename;
+ }
+
+ drm_info(drm_dev, "loaded firmware %s\n", filename);
+ kfree(filename);
+
+ pvr_dev->fw_dev.firmware = fw;
+
+ return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev);
+
+err_free_filename:
+ kfree(filename);
+
+ return err;
+}
+
+/**
+ * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers.
+ *
+ * Sets struct pvr_dev.gpu_id.
+ *
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_load_gpu_id(struct pvr_device *pvr_dev)
+{
+ struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
+ u64 bvnc;
+
+ /*
+ * Try reading the BVNC using the newer (cleaner) method first. If the
+ * B value is zero, fall back to the older method.
+ */
+ bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC);
+
+ gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
+ if (gpu_id->b != 0) {
+ gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
+ gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
+ gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
+ } else {
+ u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION);
+ u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID);
+ u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
+
+ gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
+ gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
+ gpu_id->n = FIELD_GET(0xFF00, core_id_config);
+ gpu_id->c = FIELD_GET(0x00FF, core_id_config);
+ }
+}
+
+/**
+ * pvr_set_dma_info() - Set PowerVR device DMA information
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets the DMA mask and max segment size for the PowerVR device.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by PVR_FEATURE_VALUE(), or
+ * * Any error returned by dma_set_mask().
+ */
+
+static int
+pvr_set_dma_info(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u16 phys_bus_width;
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
+ if (err) {
+ drm_err(drm_dev, "Failed to get device physical bus width\n");
+ return err;
+ }
+
+ err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
+ if (err) {
+ drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
+ return err;
+ }
+
+ dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
+
+ return 0;
+}
+
+/**
+ * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * The following steps are taken to ensure the device is ready:
+ *
+ * 1. Read the hardware version information from control registers,
+ * 2. Initialise the hardware feature information,
+ * 3. Setup the device DMA information,
+ * 4. Setup the device-scoped memory context, and
+ * 5. Load firmware into the device.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%ENODEV if the GPU is not supported,
+ * * Any error returned by pvr_set_dma_info(),
+ * * Any error returned by pvr_memory_context_init(), or
+ * * Any error returned by pvr_request_firmware().
+ */
+static int
+pvr_device_gpu_init(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ pvr_load_gpu_id(pvr_dev);
+
+ err = pvr_request_firmware(pvr_dev);
+ if (err)
+ return err;
+
+ err = pvr_fw_validate_init_device_info(pvr_dev);
+ if (err)
+ return err;
+
+ if (PVR_HAS_FEATURE(pvr_dev, meta))
+ pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
+ else if (PVR_HAS_FEATURE(pvr_dev, mips))
+ pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
+ else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor))
+ pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
+ else
+ return -EINVAL;
+
+ pvr_stream_create_musthave_masks(pvr_dev);
+
+ err = pvr_set_dma_info(pvr_dev);
+ if (err)
+ return err;
+
+ if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+ pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
+ if (IS_ERR(pvr_dev->kernel_vm_ctx))
+ return PTR_ERR(pvr_dev->kernel_vm_ctx);
+ }
+
+ err = pvr_fw_init(pvr_dev);
+ if (err)
+ goto err_vm_ctx_put;
+
+ return 0;
+
+err_vm_ctx_put:
+ if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+ pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
+ pvr_dev->kernel_vm_ctx = NULL;
+ }
+
+ return err;
+}
+
+/**
+ * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_device_gpu_fini(struct pvr_device *pvr_dev)
+{
+ pvr_fw_fini(pvr_dev);
+
+ if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+ WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
+ pvr_dev->kernel_vm_ctx = NULL;
+ }
+}
+
+/**
+ * pvr_device_init() - Initialize a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * If this function returns successfully, the device will have been fully
+ * initialized. Otherwise, any parts of the device initialized before an error
+ * occurs will be de-initialized before returning.
+ *
+ * NOTE: The initialization steps currently taken are the bare minimum required
+ * to read from the control registers. The device is unlikely to function
+ * until further initialization steps are added. [This note should be
+ * removed when that happens.]
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by pvr_device_reg_init(),
+ * * Any error returned by pvr_device_clk_init(), or
+ * * Any error returned by pvr_device_gpu_init().
+ */
+int
+pvr_device_init(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct device *dev = drm_dev->dev;
+ int err;
+
+ /*
+ * Setup device parameters. We do this first in case other steps
+ * depend on them.
+ */
+ err = pvr_device_params_init(&pvr_dev->params);
+ if (err)
+ return err;
+
+ /* Enable and initialize clocks required for the device to operate. */
+ err = pvr_device_clk_init(pvr_dev);
+ if (err)
+ return err;
+
+ /* Explicitly power the GPU so we can access control registers before the FW is booted. */
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ /* Map the control registers into memory. */
+ err = pvr_device_reg_init(pvr_dev);
+ if (err)
+ goto err_pm_runtime_put;
+
+ /* Perform GPU-specific initialization steps. */
+ err = pvr_device_gpu_init(pvr_dev);
+ if (err)
+ goto err_pm_runtime_put;
+
+ err = pvr_device_irq_init(pvr_dev);
+ if (err)
+ goto err_device_gpu_fini;
+
+ pm_runtime_put(dev);
+
+ return 0;
+
+err_device_gpu_fini:
+ pvr_device_gpu_fini(pvr_dev);
+
+err_pm_runtime_put:
+ pm_runtime_put_sync_suspend(dev);
+
+ return err;
+}
+
+/**
+ * pvr_device_fini() - Deinitialize a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_device_fini(struct pvr_device *pvr_dev)
+{
+ /*
+ * Deinitialization stages are performed in reverse order compared to
+ * the initialization stages in pvr_device_init().
+ */
+ pvr_device_irq_fini(pvr_dev);
+ pvr_device_gpu_fini(pvr_dev);
+}
+
+bool
+pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
+{
+ switch (quirk) {
+ case 47217:
+ return PVR_HAS_QUIRK(pvr_dev, 47217);
+ case 48545:
+ return PVR_HAS_QUIRK(pvr_dev, 48545);
+ case 49927:
+ return PVR_HAS_QUIRK(pvr_dev, 49927);
+ case 51764:
+ return PVR_HAS_QUIRK(pvr_dev, 51764);
+ case 62269:
+ return PVR_HAS_QUIRK(pvr_dev, 62269);
+ default:
+ return false;
+ };
+}
+
+bool
+pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
+{
+ switch (enhancement) {
+ case 35421:
+ return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
+ case 42064:
+ return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
+ default:
+ return false;
+ };
+}
+
+/**
+ * pvr_device_has_feature() - Look up device feature based on feature definition
+ * @pvr_dev: Device pointer.
+ * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
+ *
+ * Returns:
+ * * %true if feature is present on device, or
+ * * %false if feature is not present on device.
+ */
+bool
+pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
+{
+ switch (feature) {
+ case PVR_FEATURE_CLUSTER_GROUPING:
+ return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
+
+ case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
+ return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
+
+ case PVR_FEATURE_FB_CDC_V4:
+ return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
+
+ case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
+ return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
+
+ case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
+ return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
+
+ case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
+ return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
+
+ case PVR_FEATURE_TESSELLATION:
+ return PVR_HAS_FEATURE(pvr_dev, tessellation);
+
+ case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
+ return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
+
+ case PVR_FEATURE_VDM_DRAWINDIRECT:
+ return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
+
+ case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
+ return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
+
+ case PVR_FEATURE_ZLS_SUBTILE:
+ return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
+
+ /* Derived features. */
+ case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
+ u8 cdm_control_stream_format = 0;
+
+ PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
+ return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
+ }
+
+ case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
+ if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
+ u8 fbcdc_algorithm = 0;
+
+ PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
+ return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
+ }
+ return false;
+
+ default:
+ WARN(true, "Looking up undefined feature %u\n", feature);
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
new file mode 100644
index 000000000..ecdd5767d
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DEVICE_H
+#define PVR_DEVICE_H
+
+#include "pvr_ccb.h"
+#include "pvr_device_info.h"
+#include "pvr_fw.h"
+#include "pvr_params.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_mm.h>
+
+#include <linux/bits.h>
+#include <linux/compiler_attributes.h>
+#include <linux/compiler_types.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+
+/* Forward declaration from <linux/clk.h>. */
+struct clk;
+
+/* Forward declaration from <linux/firmware.h>. */
+struct firmware;
+
+/**
+ * struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device
+ * @b: Branch ID.
+ * @v: Version ID.
+ * @n: Number of scalable units.
+ * @c: Config ID.
+ */
+struct pvr_gpu_id {
+ u16 b, v, n, c;
+};
+
+/**
+ * struct pvr_fw_version - Firmware version information
+ * @major: Major version number.
+ * @minor: Minor version number.
+ */
+struct pvr_fw_version {
+ u16 major, minor;
+};
+
+/**
+ * struct pvr_device - powervr-specific wrapper for &struct drm_device
+ */
+struct pvr_device {
+ /**
+ * @base: The underlying &struct drm_device.
+ *
+ * Do not access this member directly, instead call
+ * from_pvr_device().
+ */
+ struct drm_device base;
+
+ /** @gpu_id: GPU ID detected at runtime. */
+ struct pvr_gpu_id gpu_id;
+
+ /**
+ * @features: Hardware feature information.
+ *
+ * Do not access this member directly, instead use PVR_HAS_FEATURE()
+ * or PVR_FEATURE_VALUE() macros.
+ */
+ struct pvr_device_features features;
+
+ /**
+ * @quirks: Hardware quirk information.
+ *
+ * Do not access this member directly, instead use PVR_HAS_QUIRK().
+ */
+ struct pvr_device_quirks quirks;
+
+ /**
+ * @enhancements: Hardware enhancement information.
+ *
+ * Do not access this member directly, instead use
+ * PVR_HAS_ENHANCEMENT().
+ */
+ struct pvr_device_enhancements enhancements;
+
+ /** @fw_version: Firmware version detected at runtime. */
+ struct pvr_fw_version fw_version;
+
+ /** @regs_resource: Resource representing device control registers. */
+ struct resource *regs_resource;
+
+ /**
+ * @regs: Device control registers.
+ *
+ * These are mapped into memory when the device is initialized; that
+ * location is where this pointer points.
+ */
+ void __iomem *regs;
+
+ /**
+ * @core_clk: General core clock.
+ *
+ * This is the primary clock used by the entire GPU core.
+ */
+ struct clk *core_clk;
+
+ /**
+ * @sys_clk: Optional system bus clock.
+ *
+ * This may be used on some platforms to provide an independent clock to the SoC Interface
+ * (SOCIF). If present, this needs to be enabled/disabled together with @core_clk.
+ */
+ struct clk *sys_clk;
+
+ /**
+ * @mem_clk: Optional memory clock.
+ *
+ * This may be used on some platforms to provide an independent clock to the Memory
+ * Interface (MEMIF). If present, this needs to be enabled/disabled together with @core_clk.
+ */
+ struct clk *mem_clk;
+
+ /** @irq: IRQ number. */
+ int irq;
+
+ /** @fwccb: Firmware CCB. */
+ struct pvr_ccb fwccb;
+
+ /**
+ * @kernel_vm_ctx: Virtual memory context used for kernel mappings.
+ *
+ * This is used for mappings in the firmware address region when a META firmware processor
+ * is in use.
+ *
+ * When a MIPS firmware processor is in use, this will be %NULL.
+ */
+ struct pvr_vm_context *kernel_vm_ctx;
+
+ /** @fw_dev: Firmware related data. */
+ struct pvr_fw_device fw_dev;
+
+ /**
+ * @params: Device-specific parameters.
+ *
+ * The values of these parameters are initialized from the
+ * defaults specified as module parameters. They may be
+ * modified at runtime via debugfs (if enabled).
+ */
+ struct pvr_device_params params;
+
+ /** @stream_musthave_quirks: Bit array of "must-have" quirks for stream commands. */
+ u32 stream_musthave_quirks[PVR_STREAM_TYPE_MAX][PVR_STREAM_EXTHDR_TYPE_MAX];
+
+ /**
+ * @mmu_flush_cache_flags: Records which MMU caches require flushing
+ * before submitting the next job.
+ */
+ atomic_t mmu_flush_cache_flags;
+
+ /**
+ * @ctx_ids: Array of contexts belonging to this device. Array members
+ * are of type "struct pvr_context *".
+ *
+ * This array is used to allocate IDs used by the firmware.
+ */
+ struct xarray ctx_ids;
+
+ /**
+ * @free_list_ids: Array of free lists belonging to this device. Array members
+ * are of type "struct pvr_free_list *".
+ *
+ * This array is used to allocate IDs used by the firmware.
+ */
+ struct xarray free_list_ids;
+
+ /**
+ * @job_ids: Array of jobs belonging to this device. Array members
+ * are of type "struct pvr_job *".
+ */
+ struct xarray job_ids;
+
+ /**
+ * @queues: Queue-related fields.
+ */
+ struct {
+ /** @queues.active: Active queue list. */
+ struct list_head active;
+
+ /** @queues.idle: Idle queue list. */
+ struct list_head idle;
+
+ /** @queues.lock: Lock protecting access to the active/idle
+ * lists. */
+ struct mutex lock;
+ } queues;
+
+ /**
+ * @watchdog: Watchdog for communications with firmware.
+ */
+ struct {
+ /** @watchdog.work: Work item for watchdog callback. */
+ struct delayed_work work;
+
+ /**
+ * @watchdog.old_kccb_cmds_executed: KCCB command execution
+ * count at last watchdog poll.
+ */
+ u32 old_kccb_cmds_executed;
+
+ /**
+ * @watchdog.kccb_stall_count: Number of watchdog polls
+ * KCCB has been stalled for.
+ */
+ u32 kccb_stall_count;
+ } watchdog;
+
+ /**
+ * @kccb: Circular buffer for communications with firmware.
+ */
+ struct {
+ /** @kccb.ccb: Kernel CCB. */
+ struct pvr_ccb ccb;
+
+ /** @kccb.rtn_q: Waitqueue for KCCB command return waiters. */
+ wait_queue_head_t rtn_q;
+
+ /** @kccb.rtn_obj: Object representing KCCB return slots. */
+ struct pvr_fw_object *rtn_obj;
+
+ /**
+ * @kccb.rtn: Pointer to CPU mapping of KCCB return slots.
+ * Must be accessed by READ_ONCE()/WRITE_ONCE().
+ */
+ u32 *rtn;
+
+ /** @kccb.slot_count: Total number of KCCB slots available. */
+ u32 slot_count;
+
+ /** @kccb.reserved_count: Number of KCCB slots reserved for
+ * future use. */
+ u32 reserved_count;
+
+ /**
+ * @kccb.waiters: List of KCCB slot waiters.
+ */
+ struct list_head waiters;
+
+ /** @kccb.fence_ctx: KCCB fence context. */
+ struct {
+ /** @kccb.fence_ctx.id: KCCB fence context ID
+ * allocated with dma_fence_context_alloc(). */
+ u64 id;
+
+ /** @kccb.fence_ctx.seqno: Sequence number incremented
+ * each time a fence is created. */
+ atomic_t seqno;
+
+ /**
+ * @kccb.fence_ctx.lock: Lock used to synchronize
+ * access to fences allocated by this context.
+ */
+ spinlock_t lock;
+ } fence_ctx;
+ } kccb;
+
+ /**
+ * @lost: %true if the device has been lost.
+ *
+ * This variable is set if the device has become irretrievably unavailable, e.g. if the
+ * firmware processor has stopped responding and can not be revived via a hard reset.
+ */
+ bool lost;
+
+ /**
+ * @reset_sem: Reset semaphore.
+ *
+ * GPU reset code will lock this for writing. Any code that submits commands to the firmware
+ * that isn't in an IRQ handler or on the scheduler workqueue must lock this for reading.
+ * Once this has been successfully locked, &pvr_dev->lost _must_ be checked, and -%EIO must
+ * be returned if it is set.
+ */
+ struct rw_semaphore reset_sem;
+
+ /** @sched_wq: Workqueue for schedulers. */
+ struct workqueue_struct *sched_wq;
+};
+
+/**
+ * struct pvr_file - powervr-specific data to be assigned to &struct
+ * drm_file.driver_priv
+ */
+struct pvr_file {
+ /**
+ * @file: A reference to the parent &struct drm_file.
+ *
+ * Do not access this member directly, instead call from_pvr_file().
+ */
+ struct drm_file *file;
+
+ /**
+ * @pvr_dev: A reference to the powervr-specific wrapper for the
+ * associated device. Saves on repeated calls to to_pvr_device().
+ */
+ struct pvr_device *pvr_dev;
+
+ /**
+ * @ctx_handles: Array of contexts belonging to this file. Array members
+ * are of type "struct pvr_context *".
+ *
+ * This array is used to allocate handles returned to userspace.
+ */
+ struct xarray ctx_handles;
+
+ /**
+ * @free_list_handles: Array of free lists belonging to this file. Array
+ * members are of type "struct pvr_free_list *".
+ *
+ * This array is used to allocate handles returned to userspace.
+ */
+ struct xarray free_list_handles;
+
+ /**
+ * @hwrt_handles: Array of HWRT datasets belonging to this file. Array
+ * members are of type "struct pvr_hwrt_dataset *".
+ *
+ * This array is used to allocate handles returned to userspace.
+ */
+ struct xarray hwrt_handles;
+
+ /**
+ * @vm_ctx_handles: Array of VM contexts belonging to this file. Array
+ * members are of type "struct pvr_vm_context *".
+ *
+ * This array is used to allocate handles returned to userspace.
+ */
+ struct xarray vm_ctx_handles;
+};
+
+/**
+ * PVR_HAS_FEATURE() - Tests whether a PowerVR device has a given feature
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @feature: [IN] Hardware feature name.
+ *
+ * Feature names are derived from those found in &struct pvr_device_features by
+ * dropping the 'has_' prefix, which is applied by this macro.
+ *
+ * Return:
+ * * true if the named feature is present in the hardware
+ * * false if the named feature is not present in the hardware
+ */
+#define PVR_HAS_FEATURE(pvr_dev, feature) ((pvr_dev)->features.has_##feature)
+
+/**
+ * PVR_FEATURE_VALUE() - Gets a PowerVR device feature value
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @feature: [IN] Feature name.
+ * @value_out: [OUT] Feature value.
+ *
+ * This macro will get a feature value for those features that have values.
+ * If the feature is not present, nothing will be stored to @value_out.
+ *
+ * Feature names are derived from those found in &struct pvr_device_features by
+ * dropping the 'has_' prefix.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if the named feature is not present in the hardware
+ */
+#define PVR_FEATURE_VALUE(pvr_dev, feature, value_out) \
+ ({ \
+ struct pvr_device *_pvr_dev = pvr_dev; \
+ int _ret = -EINVAL; \
+ if (_pvr_dev->features.has_##feature) { \
+ *(value_out) = _pvr_dev->features.feature; \
+ _ret = 0; \
+ } \
+ _ret; \
+ })
+
+/**
+ * PVR_HAS_QUIRK() - Tests whether a physical device has a given quirk
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @quirk: [IN] Hardware quirk name.
+ *
+ * Quirk numbers are derived from those found in #pvr_device_quirks by
+ * dropping the 'has_brn' prefix, which is applied by this macro.
+ *
+ * Returns
+ * * true if the quirk is present in the hardware, or
+ * * false if the quirk is not present in the hardware.
+ */
+#define PVR_HAS_QUIRK(pvr_dev, quirk) ((pvr_dev)->quirks.has_brn##quirk)
+
+/**
+ * PVR_HAS_ENHANCEMENT() - Tests whether a physical device has a given
+ * enhancement
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @enhancement: [IN] Hardware enhancement name.
+ *
+ * Enhancement numbers are derived from those found in #pvr_device_enhancements
+ * by dropping the 'has_ern' prefix, which is applied by this macro.
+ *
+ * Returns
+ * * true if the enhancement is present in the hardware, or
+ * * false if the enhancement is not present in the hardware.
+ */
+#define PVR_HAS_ENHANCEMENT(pvr_dev, enhancement) ((pvr_dev)->enhancements.has_ern##enhancement)
+
+#define from_pvr_device(pvr_dev) (&(pvr_dev)->base)
+
+#define to_pvr_device(drm_dev) container_of_const(drm_dev, struct pvr_device, base)
+
+#define from_pvr_file(pvr_file) ((pvr_file)->file)
+
+#define to_pvr_file(file) ((file)->driver_priv)
+
+/**
+ * PVR_PACKED_BVNC() - Packs B, V, N and C values into a 64-bit unsigned integer
+ * @b: Branch ID.
+ * @v: Version ID.
+ * @n: Number of scalable units.
+ * @c: Config ID.
+ *
+ * The packed layout is as follows:
+ *
+ * +--------+--------+--------+-------+
+ * | 63..48 | 47..32 | 31..16 | 15..0 |
+ * +========+========+========+=======+
+ * | B | V | N | C |
+ * +--------+--------+--------+-------+
+ *
+ * pvr_gpu_id_to_packed_bvnc() should be used instead of this macro when a
+ * &struct pvr_gpu_id is available in order to ensure proper type checking.
+ *
+ * Return: Packed BVNC.
+ */
+/* clang-format off */
+#define PVR_PACKED_BVNC(b, v, n, c) \
+ ((((u64)(b) & GENMASK_ULL(15, 0)) << 48) | \
+ (((u64)(v) & GENMASK_ULL(15, 0)) << 32) | \
+ (((u64)(n) & GENMASK_ULL(15, 0)) << 16) | \
+ (((u64)(c) & GENMASK_ULL(15, 0)) << 0))
+/* clang-format on */
+
+/**
+ * pvr_gpu_id_to_packed_bvnc() - Packs B, V, N and C values into a 64-bit
+ * unsigned integer
+ * @gpu_id: GPU ID.
+ *
+ * The packed layout is as follows:
+ *
+ * +--------+--------+--------+-------+
+ * | 63..48 | 47..32 | 31..16 | 15..0 |
+ * +========+========+========+=======+
+ * | B | V | N | C |
+ * +--------+--------+--------+-------+
+ *
+ * This should be used in preference to PVR_PACKED_BVNC() when a &struct
+ * pvr_gpu_id is available in order to ensure proper type checking.
+ *
+ * Return: Packed BVNC.
+ */
+static __always_inline u64
+pvr_gpu_id_to_packed_bvnc(struct pvr_gpu_id *gpu_id)
+{
+ return PVR_PACKED_BVNC(gpu_id->b, gpu_id->v, gpu_id->n, gpu_id->c);
+}
+
+static __always_inline void
+packed_bvnc_to_pvr_gpu_id(u64 bvnc, struct pvr_gpu_id *gpu_id)
+{
+ gpu_id->b = (bvnc & GENMASK_ULL(63, 48)) >> 48;
+ gpu_id->v = (bvnc & GENMASK_ULL(47, 32)) >> 32;
+ gpu_id->n = (bvnc & GENMASK_ULL(31, 16)) >> 16;
+ gpu_id->c = bvnc & GENMASK_ULL(15, 0);
+}
+
+int pvr_device_init(struct pvr_device *pvr_dev);
+void pvr_device_fini(struct pvr_device *pvr_dev);
+void pvr_device_reset(struct pvr_device *pvr_dev);
+
+bool
+pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk);
+bool
+pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement);
+bool
+pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature);
+
+/**
+ * PVR_CR_FIELD_GET() - Extract a single field from a PowerVR control register
+ * @val: Value of the target register.
+ * @field: Field specifier, as defined in "pvr_rogue_cr_defs.h".
+ *
+ * Return: The extracted field.
+ */
+#define PVR_CR_FIELD_GET(val, field) FIELD_GET(~ROGUE_CR_##field##_CLRMSK, val)
+
+/**
+ * pvr_cr_read32() - Read a 32-bit register from a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ *
+ * Return: The value of the requested register.
+ */
+static __always_inline u32
+pvr_cr_read32(struct pvr_device *pvr_dev, u32 reg)
+{
+ return ioread32(pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_read64() - Read a 64-bit register from a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ *
+ * Return: The value of the requested register.
+ */
+static __always_inline u64
+pvr_cr_read64(struct pvr_device *pvr_dev, u32 reg)
+{
+ return ioread64(pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_write32() - Write to a 32-bit register in a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ * @val: Value to write.
+ */
+static __always_inline void
+pvr_cr_write32(struct pvr_device *pvr_dev, u32 reg, u32 val)
+{
+ iowrite32(val, pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_write64() - Write to a 64-bit register in a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ * @val: Value to write.
+ */
+static __always_inline void
+pvr_cr_write64(struct pvr_device *pvr_dev, u32 reg, u64 val)
+{
+ iowrite64(val, pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_poll_reg32() - Wait for a 32-bit register to match a given value by
+ * polling
+ * @pvr_dev: Target PowerVR device.
+ * @reg_addr: Address of register.
+ * @reg_value: Expected register value (after masking).
+ * @reg_mask: Mask of bits valid for comparison with @reg_value.
+ * @timeout_usec: Timeout length, in us.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%ETIMEDOUT on timeout.
+ */
+static __always_inline int
+pvr_cr_poll_reg32(struct pvr_device *pvr_dev, u32 reg_addr, u32 reg_value,
+ u32 reg_mask, u64 timeout_usec)
+{
+ u32 value;
+
+ return readl_poll_timeout(pvr_dev->regs + reg_addr, value,
+ (value & reg_mask) == reg_value, 0, timeout_usec);
+}
+
+/**
+ * pvr_cr_poll_reg64() - Wait for a 64-bit register to match a given value by
+ * polling
+ * @pvr_dev: Target PowerVR device.
+ * @reg_addr: Address of register.
+ * @reg_value: Expected register value (after masking).
+ * @reg_mask: Mask of bits valid for comparison with @reg_value.
+ * @timeout_usec: Timeout length, in us.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%ETIMEDOUT on timeout.
+ */
+static __always_inline int
+pvr_cr_poll_reg64(struct pvr_device *pvr_dev, u32 reg_addr, u64 reg_value,
+ u64 reg_mask, u64 timeout_usec)
+{
+ u64 value;
+
+ return readq_poll_timeout(pvr_dev->regs + reg_addr, value,
+ (value & reg_mask) == reg_value, 0, timeout_usec);
+}
+
+/**
+ * pvr_round_up_to_cacheline_size() - Round up a provided size to be cacheline
+ * aligned
+ * @pvr_dev: Target PowerVR device.
+ * @size: Initial size, in bytes.
+ *
+ * Returns:
+ * * Size aligned to cacheline size.
+ */
+static __always_inline size_t
+pvr_round_up_to_cacheline_size(struct pvr_device *pvr_dev, size_t size)
+{
+ u16 slc_cacheline_size_bits = 0;
+ u16 slc_cacheline_size_bytes;
+
+ WARN_ON(!PVR_HAS_FEATURE(pvr_dev, slc_cache_line_size_bits));
+ PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits,
+ &slc_cacheline_size_bits);
+ slc_cacheline_size_bytes = slc_cacheline_size_bits / 8;
+
+ return round_up(size, slc_cacheline_size_bytes);
+}
+
+/**
+ * DOC: IOCTL validation helpers
+ *
+ * To validate the constraints imposed on IOCTL argument structs, a collection
+ * of macros and helper functions exist in ``pvr_device.h``.
+ *
+ * Of the current helpers, it should only be necessary to call
+ * PVR_IOCTL_UNION_PADDING_CHECK() directly. This macro should be used once in
+ * every code path which extracts a union member from a struct passed from
+ * userspace.
+ */
+
+/**
+ * pvr_ioctl_union_padding_check() - Validate that the implicit padding between
+ * the end of a union member and the end of the union itself is zeroed.
+ * @instance: Pointer to the instance of the struct to validate.
+ * @union_offset: Offset into the type of @instance of the target union. Must
+ * be 64-bit aligned.
+ * @union_size: Size of the target union in the type of @instance. Must be
+ * 64-bit aligned.
+ * @member_size: Size of the target member in the target union specified by
+ * @union_offset and @union_size. It is assumed that the offset of the target
+ * member is zero relative to @union_offset. Must be 64-bit aligned.
+ *
+ * You probably want to use PVR_IOCTL_UNION_PADDING_CHECK() instead of calling
+ * this function directly, since that macro abstracts away much of the setup,
+ * and also provides some static validation. See its docs for details.
+ *
+ * Return:
+ * * %true if every byte between the end of the used member of the union and
+ * the end of that union is zeroed, or
+ * * %false otherwise.
+ */
+static __always_inline bool
+pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
+ size_t union_size, size_t member_size)
+{
+ /*
+ * void pointer arithmetic is technically illegal - cast to a byte
+ * pointer so this addition works safely.
+ */
+ void *padding_start = ((u8 *)instance) + union_offset + member_size;
+ size_t padding_size = union_size - member_size;
+
+ return !memchr_inv(padding_start, 0, padding_size);
+}
+
+/**
+ * PVR_STATIC_ASSERT_64BIT_ALIGNED() - Inline assertion for 64-bit alignment.
+ * @static_expr_: Target expression to evaluate.
+ *
+ * If @static_expr_ does not evaluate to a constant integer which would be a
+ * 64-bit aligned address (i.e. a multiple of 8), compilation will fail.
+ *
+ * Return:
+ * The value of @static_expr_.
+ */
+#define PVR_STATIC_ASSERT_64BIT_ALIGNED(static_expr_) \
+ ({ \
+ static_assert(((static_expr_) & (sizeof(u64) - 1)) == 0); \
+ (static_expr_); \
+ })
+
+/**
+ * PVR_IOCTL_UNION_PADDING_CHECK() - Validate that the implicit padding between
+ * the end of a union member and the end of the union itself is zeroed.
+ * @struct_instance_: An expression which evaluates to a pointer to a UAPI data
+ * struct.
+ * @union_: The name of the union member of @struct_instance_ to check. If the
+ * union member is nested within the type of @struct_instance_, this may
+ * contain the member access operator (".").
+ * @member_: The name of the member of @union_ to assess.
+ *
+ * This is a wrapper around pvr_ioctl_union_padding_check() which performs
+ * alignment checks and simplifies things for the caller.
+ *
+ * Return:
+ * * %true if every byte in @struct_instance_ between the end of @member_ and
+ * the end of @union_ is zeroed, or
+ * * %false otherwise.
+ */
+#define PVR_IOCTL_UNION_PADDING_CHECK(struct_instance_, union_, member_) \
+ ({ \
+ typeof(struct_instance_) __instance = (struct_instance_); \
+ size_t __union_offset = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
+ offsetof(typeof(*__instance), union_)); \
+ size_t __union_size = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
+ sizeof(__instance->union_)); \
+ size_t __member_size = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
+ sizeof(__instance->union_.member_)); \
+ pvr_ioctl_union_padding_check(__instance, __union_offset, \
+ __union_size, __member_size); \
+ })
+
+#define PVR_FW_PROCESSOR_TYPE_META 0
+#define PVR_FW_PROCESSOR_TYPE_MIPS 1
+#define PVR_FW_PROCESSOR_TYPE_RISCV 2
+
+#endif /* PVR_DEVICE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_device_info.c b/drivers/gpu/drm/imagination/pvr_device_info.c
new file mode 100644
index 000000000..d3301cde7
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device_info.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_device_info.h"
+#include "pvr_rogue_fwif_dev_info.h"
+
+#include <drm/drm_print.h>
+
+#include <linux/bits.h>
+#include <linux/minmax.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#define QUIRK_MAPPING(quirk) \
+ [PVR_FW_HAS_BRN_##quirk] = offsetof(struct pvr_device, quirks.has_brn##quirk)
+
+static const uintptr_t quirks_mapping[] = {
+ QUIRK_MAPPING(44079),
+ QUIRK_MAPPING(47217),
+ QUIRK_MAPPING(48492),
+ QUIRK_MAPPING(48545),
+ QUIRK_MAPPING(49927),
+ QUIRK_MAPPING(50767),
+ QUIRK_MAPPING(51764),
+ QUIRK_MAPPING(62269),
+ QUIRK_MAPPING(63142),
+ QUIRK_MAPPING(63553),
+ QUIRK_MAPPING(66011),
+ QUIRK_MAPPING(71242),
+};
+
+#undef QUIRK_MAPPING
+
+#define ENHANCEMENT_MAPPING(enhancement) \
+ [PVR_FW_HAS_ERN_##enhancement] = offsetof(struct pvr_device, \
+ enhancements.has_ern##enhancement)
+
+static const uintptr_t enhancements_mapping[] = {
+ ENHANCEMENT_MAPPING(35421),
+ ENHANCEMENT_MAPPING(38020),
+ ENHANCEMENT_MAPPING(38748),
+ ENHANCEMENT_MAPPING(42064),
+ ENHANCEMENT_MAPPING(42290),
+ ENHANCEMENT_MAPPING(42606),
+ ENHANCEMENT_MAPPING(47025),
+ ENHANCEMENT_MAPPING(57596),
+};
+
+#undef ENHANCEMENT_MAPPING
+
+static void pvr_device_info_set_common(struct pvr_device *pvr_dev, const u64 *bitmask,
+ u32 bitmask_size, const uintptr_t *mapping, u32 mapping_max)
+{
+ const u32 mapping_max_size = (mapping_max + 63) >> 6;
+ const u32 nr_bits = min(bitmask_size * 64, mapping_max);
+
+ /* Warn if any unsupported values in the bitmask. */
+ if (bitmask_size > mapping_max_size) {
+ if (mapping == quirks_mapping)
+ drm_warn(from_pvr_device(pvr_dev), "Unsupported quirks in firmware image");
+ else
+ drm_warn(from_pvr_device(pvr_dev),
+ "Unsupported enhancements in firmware image");
+ } else if (bitmask_size == mapping_max_size && (mapping_max & 63)) {
+ u64 invalid_mask = ~0ull << (mapping_max & 63);
+
+ if (bitmask[bitmask_size - 1] & invalid_mask) {
+ if (mapping == quirks_mapping)
+ drm_warn(from_pvr_device(pvr_dev),
+ "Unsupported quirks in firmware image");
+ else
+ drm_warn(from_pvr_device(pvr_dev),
+ "Unsupported enhancements in firmware image");
+ }
+ }
+
+ for (u32 i = 0; i < nr_bits; i++) {
+ if (bitmask[i >> 6] & BIT_ULL(i & 63))
+ *(bool *)((u8 *)pvr_dev + mapping[i]) = true;
+ }
+}
+
+/**
+ * pvr_device_info_set_quirks() - Set device quirks from device information in firmware
+ * @pvr_dev: Device pointer.
+ * @quirks: Pointer to quirks mask in device information.
+ * @quirks_size: Size of quirks mask, in u64s.
+ */
+void pvr_device_info_set_quirks(struct pvr_device *pvr_dev, const u64 *quirks, u32 quirks_size)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(quirks_mapping) != PVR_FW_HAS_BRN_MAX);
+
+ pvr_device_info_set_common(pvr_dev, quirks, quirks_size, quirks_mapping,
+ ARRAY_SIZE(quirks_mapping));
+}
+
+/**
+ * pvr_device_info_set_enhancements() - Set device enhancements from device information in firmware
+ * @pvr_dev: Device pointer.
+ * @enhancements: Pointer to enhancements mask in device information.
+ * @enhancements_size: Size of enhancements mask, in u64s.
+ */
+void pvr_device_info_set_enhancements(struct pvr_device *pvr_dev, const u64 *enhancements,
+ u32 enhancements_size)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(enhancements_mapping) != PVR_FW_HAS_ERN_MAX);
+
+ pvr_device_info_set_common(pvr_dev, enhancements, enhancements_size,
+ enhancements_mapping, ARRAY_SIZE(enhancements_mapping));
+}
+
+#define FEATURE_MAPPING(fw_feature, feature) \
+ [PVR_FW_HAS_FEATURE_##fw_feature] = { \
+ .flag_offset = offsetof(struct pvr_device, features.has_##feature), \
+ .value_offset = 0 \
+ }
+
+#define FEATURE_MAPPING_VALUE(fw_feature, feature) \
+ [PVR_FW_HAS_FEATURE_##fw_feature] = { \
+ .flag_offset = offsetof(struct pvr_device, features.has_##feature), \
+ .value_offset = offsetof(struct pvr_device, features.feature) \
+ }
+
+static const struct {
+ uintptr_t flag_offset;
+ uintptr_t value_offset;
+} features_mapping[] = {
+ FEATURE_MAPPING(AXI_ACELITE, axi_acelite),
+ FEATURE_MAPPING_VALUE(CDM_CONTROL_STREAM_FORMAT, cdm_control_stream_format),
+ FEATURE_MAPPING(CLUSTER_GROUPING, cluster_grouping),
+ FEATURE_MAPPING_VALUE(COMMON_STORE_SIZE_IN_DWORDS, common_store_size_in_dwords),
+ FEATURE_MAPPING(COMPUTE, compute),
+ FEATURE_MAPPING(COMPUTE_MORTON_CAPABLE, compute_morton_capable),
+ FEATURE_MAPPING(COMPUTE_OVERLAP, compute_overlap),
+ FEATURE_MAPPING(COREID_PER_OS, coreid_per_os),
+ FEATURE_MAPPING(DYNAMIC_DUST_POWER, dynamic_dust_power),
+ FEATURE_MAPPING_VALUE(ECC_RAMS, ecc_rams),
+ FEATURE_MAPPING_VALUE(FBCDC, fbcdc),
+ FEATURE_MAPPING_VALUE(FBCDC_ALGORITHM, fbcdc_algorithm),
+ FEATURE_MAPPING_VALUE(FBCDC_ARCHITECTURE, fbcdc_architecture),
+ FEATURE_MAPPING_VALUE(FBC_MAX_DEFAULT_DESCRIPTORS, fbc_max_default_descriptors),
+ FEATURE_MAPPING_VALUE(FBC_MAX_LARGE_DESCRIPTORS, fbc_max_large_descriptors),
+ FEATURE_MAPPING(FB_CDC_V4, fb_cdc_v4),
+ FEATURE_MAPPING(GPU_MULTICORE_SUPPORT, gpu_multicore_support),
+ FEATURE_MAPPING(GPU_VIRTUALISATION, gpu_virtualisation),
+ FEATURE_MAPPING(GS_RTA_SUPPORT, gs_rta_support),
+ FEATURE_MAPPING(IRQ_PER_OS, irq_per_os),
+ FEATURE_MAPPING_VALUE(ISP_MAX_TILES_IN_FLIGHT, isp_max_tiles_in_flight),
+ FEATURE_MAPPING_VALUE(ISP_SAMPLES_PER_PIXEL, isp_samples_per_pixel),
+ FEATURE_MAPPING(ISP_ZLS_D24_S8_PACKING_OGL_MODE, isp_zls_d24_s8_packing_ogl_mode),
+ FEATURE_MAPPING_VALUE(LAYOUT_MARS, layout_mars),
+ FEATURE_MAPPING_VALUE(MAX_PARTITIONS, max_partitions),
+ FEATURE_MAPPING_VALUE(META, meta),
+ FEATURE_MAPPING_VALUE(META_COREMEM_SIZE, meta_coremem_size),
+ FEATURE_MAPPING(MIPS, mips),
+ FEATURE_MAPPING_VALUE(NUM_CLUSTERS, num_clusters),
+ FEATURE_MAPPING_VALUE(NUM_ISP_IPP_PIPES, num_isp_ipp_pipes),
+ FEATURE_MAPPING_VALUE(NUM_OSIDS, num_osids),
+ FEATURE_MAPPING_VALUE(NUM_RASTER_PIPES, num_raster_pipes),
+ FEATURE_MAPPING(PBE2_IN_XE, pbe2_in_xe),
+ FEATURE_MAPPING(PBVNC_COREID_REG, pbvnc_coreid_reg),
+ FEATURE_MAPPING(PERFBUS, perfbus),
+ FEATURE_MAPPING(PERF_COUNTER_BATCH, perf_counter_batch),
+ FEATURE_MAPPING_VALUE(PHYS_BUS_WIDTH, phys_bus_width),
+ FEATURE_MAPPING(RISCV_FW_PROCESSOR, riscv_fw_processor),
+ FEATURE_MAPPING(ROGUEXE, roguexe),
+ FEATURE_MAPPING(S7_TOP_INFRASTRUCTURE, s7_top_infrastructure),
+ FEATURE_MAPPING(SIMPLE_INTERNAL_PARAMETER_FORMAT, simple_internal_parameter_format),
+ FEATURE_MAPPING(SIMPLE_INTERNAL_PARAMETER_FORMAT_V2, simple_internal_parameter_format_v2),
+ FEATURE_MAPPING_VALUE(SIMPLE_PARAMETER_FORMAT_VERSION, simple_parameter_format_version),
+ FEATURE_MAPPING_VALUE(SLC_BANKS, slc_banks),
+ FEATURE_MAPPING_VALUE(SLC_CACHE_LINE_SIZE_BITS, slc_cache_line_size_bits),
+ FEATURE_MAPPING(SLC_SIZE_CONFIGURABLE, slc_size_configurable),
+ FEATURE_MAPPING_VALUE(SLC_SIZE_IN_KILOBYTES, slc_size_in_kilobytes),
+ FEATURE_MAPPING(SOC_TIMER, soc_timer),
+ FEATURE_MAPPING(SYS_BUS_SECURE_RESET, sys_bus_secure_reset),
+ FEATURE_MAPPING(TESSELLATION, tessellation),
+ FEATURE_MAPPING(TILE_REGION_PROTECTION, tile_region_protection),
+ FEATURE_MAPPING_VALUE(TILE_SIZE_X, tile_size_x),
+ FEATURE_MAPPING_VALUE(TILE_SIZE_Y, tile_size_y),
+ FEATURE_MAPPING(TLA, tla),
+ FEATURE_MAPPING(TPU_CEM_DATAMASTER_GLOBAL_REGISTERS, tpu_cem_datamaster_global_registers),
+ FEATURE_MAPPING(TPU_DM_GLOBAL_REGISTERS, tpu_dm_global_registers),
+ FEATURE_MAPPING(TPU_FILTERING_MODE_CONTROL, tpu_filtering_mode_control),
+ FEATURE_MAPPING_VALUE(USC_MIN_OUTPUT_REGISTERS_PER_PIX, usc_min_output_registers_per_pix),
+ FEATURE_MAPPING(VDM_DRAWINDIRECT, vdm_drawindirect),
+ FEATURE_MAPPING(VDM_OBJECT_LEVEL_LLS, vdm_object_level_lls),
+ FEATURE_MAPPING_VALUE(VIRTUAL_ADDRESS_SPACE_BITS, virtual_address_space_bits),
+ FEATURE_MAPPING(WATCHDOG_TIMER, watchdog_timer),
+ FEATURE_MAPPING(WORKGROUP_PROTECTION, workgroup_protection),
+ FEATURE_MAPPING_VALUE(XE_ARCHITECTURE, xe_architecture),
+ FEATURE_MAPPING(XE_MEMORY_HIERARCHY, xe_memory_hierarchy),
+ FEATURE_MAPPING(XE_TPU2, xe_tpu2),
+ FEATURE_MAPPING_VALUE(XPU_MAX_REGBANKS_ADDR_WIDTH, xpu_max_regbanks_addr_width),
+ FEATURE_MAPPING_VALUE(XPU_MAX_SLAVES, xpu_max_slaves),
+ FEATURE_MAPPING_VALUE(XPU_REGISTER_BROADCAST, xpu_register_broadcast),
+ FEATURE_MAPPING(XT_TOP_INFRASTRUCTURE, xt_top_infrastructure),
+ FEATURE_MAPPING(ZLS_SUBTILE, zls_subtile),
+};
+
+#undef FEATURE_MAPPING_VALUE
+#undef FEATURE_MAPPING
+
+/**
+ * pvr_device_info_set_features() - Set device features from device information in firmware
+ * @pvr_dev: Device pointer.
+ * @features: Pointer to features mask in device information.
+ * @features_size: Size of features mask, in u64s.
+ * @feature_param_size: Size of feature parameters, in u64s.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL on malformed stream.
+ */
+int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features, u32 features_size,
+ u32 feature_param_size)
+{
+ const u32 mapping_max = ARRAY_SIZE(features_mapping);
+ const u32 mapping_max_size = (mapping_max + 63) >> 6;
+ const u32 nr_bits = min(features_size * 64, mapping_max);
+ const u64 *feature_params = features + features_size;
+ u32 param_idx = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(features_mapping) != PVR_FW_HAS_FEATURE_MAX);
+
+ /* Verify no unsupported values in the bitmask. */
+ if (features_size > mapping_max_size) {
+ drm_warn(from_pvr_device(pvr_dev), "Unsupported features in firmware image");
+ } else if (features_size == mapping_max_size &&
+ ((mapping_max & 63) != 0)) {
+ u64 invalid_mask = ~0ull << (mapping_max & 63);
+
+ if (features[features_size - 1] & invalid_mask)
+ drm_warn(from_pvr_device(pvr_dev),
+ "Unsupported features in firmware image");
+ }
+
+ for (u32 i = 0; i < nr_bits; i++) {
+ if (features[i >> 6] & BIT_ULL(i & 63)) {
+ *(bool *)((u8 *)pvr_dev + features_mapping[i].flag_offset) = true;
+
+ if (features_mapping[i].value_offset) {
+ if (param_idx >= feature_param_size)
+ return -EINVAL;
+
+ *(u64 *)((u8 *)pvr_dev + features_mapping[i].value_offset) =
+ feature_params[param_idx];
+ param_idx++;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_device_info.h b/drivers/gpu/drm/imagination/pvr_device_info.h
new file mode 100644
index 000000000..f61fb988b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device_info.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DEVICE_INFO_H
+#define PVR_DEVICE_INFO_H
+
+#include <linux/types.h>
+
+struct pvr_device;
+
+/*
+ * struct pvr_device_features - Hardware feature information
+ */
+struct pvr_device_features {
+ bool has_axi_acelite;
+ bool has_cdm_control_stream_format;
+ bool has_cluster_grouping;
+ bool has_common_store_size_in_dwords;
+ bool has_compute;
+ bool has_compute_morton_capable;
+ bool has_compute_overlap;
+ bool has_coreid_per_os;
+ bool has_dynamic_dust_power;
+ bool has_ecc_rams;
+ bool has_fb_cdc_v4;
+ bool has_fbc_max_default_descriptors;
+ bool has_fbc_max_large_descriptors;
+ bool has_fbcdc;
+ bool has_fbcdc_algorithm;
+ bool has_fbcdc_architecture;
+ bool has_gpu_multicore_support;
+ bool has_gpu_virtualisation;
+ bool has_gs_rta_support;
+ bool has_irq_per_os;
+ bool has_isp_max_tiles_in_flight;
+ bool has_isp_samples_per_pixel;
+ bool has_isp_zls_d24_s8_packing_ogl_mode;
+ bool has_layout_mars;
+ bool has_max_partitions;
+ bool has_meta;
+ bool has_meta_coremem_size;
+ bool has_mips;
+ bool has_num_clusters;
+ bool has_num_isp_ipp_pipes;
+ bool has_num_osids;
+ bool has_num_raster_pipes;
+ bool has_pbe2_in_xe;
+ bool has_pbvnc_coreid_reg;
+ bool has_perfbus;
+ bool has_perf_counter_batch;
+ bool has_phys_bus_width;
+ bool has_riscv_fw_processor;
+ bool has_roguexe;
+ bool has_s7_top_infrastructure;
+ bool has_simple_internal_parameter_format;
+ bool has_simple_internal_parameter_format_v2;
+ bool has_simple_parameter_format_version;
+ bool has_slc_banks;
+ bool has_slc_cache_line_size_bits;
+ bool has_slc_size_configurable;
+ bool has_slc_size_in_kilobytes;
+ bool has_soc_timer;
+ bool has_sys_bus_secure_reset;
+ bool has_tessellation;
+ bool has_tile_region_protection;
+ bool has_tile_size_x;
+ bool has_tile_size_y;
+ bool has_tla;
+ bool has_tpu_cem_datamaster_global_registers;
+ bool has_tpu_dm_global_registers;
+ bool has_tpu_filtering_mode_control;
+ bool has_usc_min_output_registers_per_pix;
+ bool has_vdm_drawindirect;
+ bool has_vdm_object_level_lls;
+ bool has_virtual_address_space_bits;
+ bool has_watchdog_timer;
+ bool has_workgroup_protection;
+ bool has_xe_architecture;
+ bool has_xe_memory_hierarchy;
+ bool has_xe_tpu2;
+ bool has_xpu_max_regbanks_addr_width;
+ bool has_xpu_max_slaves;
+ bool has_xpu_register_broadcast;
+ bool has_xt_top_infrastructure;
+ bool has_zls_subtile;
+
+ u64 cdm_control_stream_format;
+ u64 common_store_size_in_dwords;
+ u64 ecc_rams;
+ u64 fbc_max_default_descriptors;
+ u64 fbc_max_large_descriptors;
+ u64 fbcdc;
+ u64 fbcdc_algorithm;
+ u64 fbcdc_architecture;
+ u64 isp_max_tiles_in_flight;
+ u64 isp_samples_per_pixel;
+ u64 layout_mars;
+ u64 max_partitions;
+ u64 meta;
+ u64 meta_coremem_size;
+ u64 num_clusters;
+ u64 num_isp_ipp_pipes;
+ u64 num_osids;
+ u64 num_raster_pipes;
+ u64 phys_bus_width;
+ u64 simple_parameter_format_version;
+ u64 slc_banks;
+ u64 slc_cache_line_size_bits;
+ u64 slc_size_in_kilobytes;
+ u64 tile_size_x;
+ u64 tile_size_y;
+ u64 usc_min_output_registers_per_pix;
+ u64 virtual_address_space_bits;
+ u64 xe_architecture;
+ u64 xpu_max_regbanks_addr_width;
+ u64 xpu_max_slaves;
+ u64 xpu_register_broadcast;
+};
+
+/*
+ * struct pvr_device_quirks - Hardware quirk information
+ */
+struct pvr_device_quirks {
+ bool has_brn44079;
+ bool has_brn47217;
+ bool has_brn48492;
+ bool has_brn48545;
+ bool has_brn49927;
+ bool has_brn50767;
+ bool has_brn51764;
+ bool has_brn62269;
+ bool has_brn63142;
+ bool has_brn63553;
+ bool has_brn66011;
+ bool has_brn71242;
+};
+
+/*
+ * struct pvr_device_enhancements - Hardware enhancement information
+ */
+struct pvr_device_enhancements {
+ bool has_ern35421;
+ bool has_ern38020;
+ bool has_ern38748;
+ bool has_ern42064;
+ bool has_ern42290;
+ bool has_ern42606;
+ bool has_ern47025;
+ bool has_ern57596;
+};
+
+void pvr_device_info_set_quirks(struct pvr_device *pvr_dev, const u64 *bitmask,
+ u32 bitmask_len);
+void pvr_device_info_set_enhancements(struct pvr_device *pvr_dev, const u64 *bitmask,
+ u32 bitmask_len);
+int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features, u32 features_size,
+ u32 feature_param_size);
+
+/*
+ * Meta cores
+ *
+ * These are the values for the 'meta' feature when the feature is present
+ * (as per &struct pvr_device_features)/
+ */
+#define PVR_META_MTP218 (1)
+#define PVR_META_MTP219 (2)
+#define PVR_META_LTP218 (3)
+#define PVR_META_LTP217 (4)
+
+enum {
+ PVR_FEATURE_CDM_USER_MODE_QUEUE,
+ PVR_FEATURE_CLUSTER_GROUPING,
+ PVR_FEATURE_COMPUTE_MORTON_CAPABLE,
+ PVR_FEATURE_FB_CDC_V4,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT,
+ PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE,
+ PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP,
+ PVR_FEATURE_S7_TOP_INFRASTRUCTURE,
+ PVR_FEATURE_TESSELLATION,
+ PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS,
+ PVR_FEATURE_VDM_DRAWINDIRECT,
+ PVR_FEATURE_VDM_OBJECT_LEVEL_LLS,
+ PVR_FEATURE_ZLS_SUBTILE,
+};
+
+#endif /* PVR_DEVICE_INFO_H */
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
new file mode 100644
index 000000000..5c3b2d58d
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -0,0 +1,1501 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_context.h"
+#include "pvr_debugfs.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_free_list.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_job.h"
+#include "pvr_mmu.h"
+#include "pvr_power.h"
+#include "pvr_rogue_defs.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_rogue_fwif_shared.h"
+#include "pvr_vm.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/xarray.h>
+
+/**
+ * DOC: PowerVR (Series 6 and later) and IMG Graphics Driver
+ *
+ * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
+ *
+ * * AXE-1-16M (found in Texas Instruments AM62)
+ */
+
+/**
+ * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object.
+ * @drm_dev: [IN] Target DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_bo_args.
+ * @file: [IN] DRM file-private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero
+ * or wider than &typedef size_t,
+ * * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are
+ * reserved or undefined are set,
+ * * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not
+ * zero,
+ * * Any error encountered while creating the object (see
+ * pvr_gem_object_create()), or
+ * * Any error encountered while transferring ownership of the object into a
+ * userspace-accessible handle (see pvr_gem_object_into_handle()).
+ */
+static int
+pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_bo_args *args = raw_args;
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct pvr_file *pvr_file = to_pvr_file(file);
+
+ struct pvr_gem_object *pvr_obj;
+ size_t sanitized_size;
+
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ /* All padding fields must be zeroed. */
+ if (args->_padding_c != 0) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ /*
+ * On 64-bit platforms (our primary target), size_t is a u64. However,
+ * on other architectures we have to check for overflow when casting
+ * down to size_t from u64.
+ *
+ * We also disallow zero-sized allocations, and reserved (kernel-only)
+ * flags.
+ */
+ if (args->size > SIZE_MAX || args->size == 0 || args->flags &
+ ~DRM_PVR_BO_FLAGS_MASK || args->size & (PVR_DEVICE_PAGE_SIZE - 1)) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ sanitized_size = (size_t)args->size;
+
+ /*
+ * Create a buffer object and transfer ownership to a userspace-
+ * accessible handle.
+ */
+ pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags);
+ if (IS_ERR(pvr_obj)) {
+ err = PTR_ERR(pvr_obj);
+ goto err_drm_dev_exit;
+ }
+
+ /* This function will not modify &args->handle unless it succeeds. */
+ err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle);
+ if (err)
+ goto err_destroy_obj;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_destroy_obj:
+ /*
+ * GEM objects are refcounted, so there is no explicit destructor
+ * function. Instead, we release the singular reference we currently
+ * hold on the object and let GEM take care of the rest.
+ */
+ pvr_gem_object_put(pvr_obj);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be
+ * used when calling mmap() from userspace to map the given GEM buffer object
+ * @drm_dev: [IN] DRM device (unused).
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_get_bo_mmap_offset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET.
+ *
+ * This IOCTL does *not* perform an mmap. See the docs on
+ * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%ENOENT if the handle does not reference a valid GEM buffer object,
+ * * -%EINVAL if any padding fields in &struct
+ * drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or
+ * * Any error returned by drm_gem_create_mmap_offset().
+ */
+static int
+pvr_ioctl_get_bo_mmap_offset(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gem_object *gem_obj;
+ int idx;
+ int ret;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ /* All padding fields must be zeroed. */
+ if (args->_padding_4 != 0) {
+ ret = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ /*
+ * Obtain a kernel reference to the buffer object. This reference is
+ * counted and must be manually dropped before returning. If a buffer
+ * object cannot be found for the specified handle, return -%ENOENT (No
+ * such file or directory).
+ */
+ pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
+ if (!pvr_obj) {
+ ret = -ENOENT;
+ goto err_drm_dev_exit;
+ }
+
+ gem_obj = gem_from_pvr_gem(pvr_obj);
+
+ /*
+ * Allocate a fake offset which can be used in userspace calls to mmap
+ * on the DRM device file. If this fails, return the error code. This
+ * operation is idempotent.
+ */
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret != 0) {
+ /* Drop our reference to the buffer object. */
+ drm_gem_object_put(gem_obj);
+ goto err_drm_dev_exit;
+ }
+
+ /*
+ * Read out the fake offset allocated by the earlier call to
+ * drm_gem_create_mmap_offset.
+ */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+
+ /* Drop our reference to the buffer object. */
+ pvr_gem_object_put(pvr_obj);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return ret;
+}
+
+static __always_inline u64
+pvr_fw_version_packed(u32 major, u32 minor)
+{
+ return ((u64)major << 32) | minor;
+}
+
+static u32
+rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev)
+{
+ u32 max_partitions = 0;
+ u32 tile_size_x = 0;
+ u32 tile_size_y = 0;
+
+ PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x);
+ PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y);
+ PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions);
+
+ if (tile_size_x == 16 && tile_size_y == 16) {
+ u32 usc_min_output_registers_per_pix = 0;
+
+ PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix,
+ &usc_min_output_registers_per_pix);
+
+ return tile_size_x * tile_size_y * max_partitions *
+ usc_min_output_registers_per_pix;
+ }
+
+ return max_partitions * 1024;
+}
+
+static u32
+rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev)
+{
+ u32 common_store_size_in_dwords = 512 * 4 * 4;
+ u32 alloc_region_size;
+
+ PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords);
+
+ alloc_region_size = common_store_size_in_dwords - (256U * 4U) -
+ rogue_get_common_store_partition_space_size(pvr_dev);
+
+ if (PVR_HAS_QUIRK(pvr_dev, 44079)) {
+ u32 common_store_split_point = (768U * 4U * 4U);
+
+ return min(common_store_split_point - (256U * 4U), alloc_region_size);
+ }
+
+ return alloc_region_size;
+}
+
+static inline u32
+rogue_get_num_phantoms(struct pvr_device *pvr_dev)
+{
+ u32 num_clusters = 1;
+
+ PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters);
+
+ return ROGUE_REQ_NUM_PHANTOMS(num_clusters);
+}
+
+static inline u32
+rogue_get_max_coeffs(struct pvr_device *pvr_dev)
+{
+ u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS;
+ u32 pending_allocation_shared_regs = 2U * 1024U;
+ u32 pending_allocation_coeff_regs = 0U;
+ u32 num_phantoms = rogue_get_num_phantoms(pvr_dev);
+ u32 tiles_in_flight = 0;
+ u32 max_coeff_pixel_portion;
+
+ PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight);
+ max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms);
+ max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS;
+
+ /*
+ * Compute tasks on cores with BRN48492 and without compute overlap may lock
+ * up without two additional lines of coeffs.
+ */
+ if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap))
+ pending_allocation_coeff_regs = 2U * 1024U;
+
+ if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748))
+ pending_allocation_shared_regs = 0;
+
+ if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020))
+ max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS;
+
+ return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs -
+ (max_coeff_pixel_portion + max_coeff_additional_portion +
+ pending_allocation_shared_regs);
+}
+
+static inline u32
+rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev)
+{
+ u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev);
+
+ if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) &&
+ !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) {
+ /* Driver must not use the 2 reserved lines. */
+ available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2;
+ }
+
+ /*
+ * The maximum amount of local memory available to a kernel is the minimum
+ * of the total number of coefficient registers available and the max common
+ * store allocation size which can be made by the CDM.
+ *
+ * If any coeff lines are reserved for tessellation or pixel then we need to
+ * subtract those too.
+ */
+ return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS);
+}
+
+/**
+ * pvr_dev_query_gpu_info_get()
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ * struct drm_pvr_dev_query_gpu_info.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ *
+ * Returns:
+ * * 0 on success, or if size is requested using a NULL pointer, or
+ * * -%E2BIG if the indicated length of the allocation is less than is
+ * required to contain the copied data, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_gpu_info gpu_info = {0};
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_gpu_info);
+ return 0;
+ }
+
+ gpu_info.gpu_id =
+ pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id);
+ gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev);
+
+ err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info);
+ if (err < 0)
+ return err;
+
+ if (args->size > sizeof(gpu_info))
+ args->size = sizeof(gpu_info);
+ return 0;
+}
+
+/**
+ * pvr_dev_query_runtime_info_get()
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ * struct drm_pvr_dev_query_runtime_info.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ *
+ * Returns:
+ * * 0 on success, or if size is requested using a NULL pointer, or
+ * * -%E2BIG if the indicated length of the allocation is less than is
+ * required to contain the copied data, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_runtime_info runtime_info = {0};
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_runtime_info);
+ return 0;
+ }
+
+ runtime_info.free_list_min_pages =
+ pvr_get_free_list_min_pages(pvr_dev);
+ runtime_info.free_list_max_pages =
+ ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE;
+ runtime_info.common_store_alloc_region_size =
+ rogue_get_common_store_alloc_region_size(pvr_dev);
+ runtime_info.common_store_partition_space_size =
+ rogue_get_common_store_partition_space_size(pvr_dev);
+ runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev);
+ runtime_info.cdm_max_local_mem_size_regs =
+ rogue_get_cdm_max_local_mem_size_regs(pvr_dev);
+
+ err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info);
+ if (err < 0)
+ return err;
+
+ if (args->size > sizeof(runtime_info))
+ args->size = sizeof(runtime_info);
+ return 0;
+}
+
+/**
+ * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given
+ * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required
+ * for it.
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ * struct drm_pvr_dev_query_query_quirks.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ * If the userspace pointer in the query object is NULL, or the count is
+ * short, no data is copied.
+ * The count field will be updated to that copied, or if either pointer is
+ * NULL, that which would have been copied.
+ * The size field in the query object will be updated to the size copied.
+ *
+ * Returns:
+ * * 0 on success, or if size/count is requested using a NULL pointer, or
+ * * -%EINVAL if args contained non-zero reserved fields, or
+ * * -%E2BIG if the indicated length of the allocation is less than is
+ * required to contain the copied data, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_quirks_get(struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ /*
+ * @FIXME - hardcoding of numbers here is intended as an
+ * intermediate step so the UAPI can be fixed, but requires a
+ * a refactor in the future to store them in a more appropriate
+ * location
+ */
+ static const u32 umd_quirks_musthave[] = {
+ 47217,
+ 49927,
+ 62269,
+ };
+ static const u32 umd_quirks[] = {
+ 48545,
+ 51764,
+ };
+ struct drm_pvr_dev_query_quirks query;
+ u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)];
+ size_t out_musthave_count = 0;
+ size_t out_count = 0;
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_quirks);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+
+ if (err < 0)
+ return err;
+ if (query._padding_c)
+ return -EINVAL;
+
+ for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) {
+ if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) {
+ out[out_count++] = umd_quirks_musthave[i];
+ out_musthave_count++;
+ }
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) {
+ if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i]))
+ out[out_count++] = umd_quirks[i];
+ }
+
+ if (!query.quirks)
+ goto copy_out;
+ if (query.count < out_count)
+ return -E2BIG;
+
+ if (copy_to_user(u64_to_user_ptr(query.quirks), out,
+ out_count * sizeof(u32))) {
+ return -EFAULT;
+ }
+
+ query.musthave_count = out_musthave_count;
+
+copy_out:
+ query.count = out_count;
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+/**
+ * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the
+ * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount
+ * of space required for it.
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ * struct drm_pvr_dev_query_enhancements.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ * If the userspace pointer in the query object is NULL, or the count is
+ * short, no data is copied.
+ * The count field will be updated to that copied, or if either pointer is
+ * NULL, that which would have been copied.
+ * The size field in the query object will be updated to the size copied.
+ *
+ * Returns:
+ * * 0 on success, or if size/count is requested using a NULL pointer, or
+ * * -%EINVAL if args contained non-zero reserved fields, or
+ * * -%E2BIG if the indicated length of the allocation is less than is
+ * required to contain the copied data, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ /*
+ * @FIXME - hardcoding of numbers here is intended as an
+ * intermediate step so the UAPI can be fixed, but requires a
+ * a refactor in the future to store them in a more appropriate
+ * location
+ */
+ const u32 umd_enhancements[] = {
+ 35421,
+ 42064,
+ };
+ struct drm_pvr_dev_query_enhancements query;
+ u32 out[ARRAY_SIZE(umd_enhancements)];
+ size_t out_idx = 0;
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_enhancements);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+
+ if (err < 0)
+ return err;
+ if (query._padding_a)
+ return -EINVAL;
+ if (query._padding_c)
+ return -EINVAL;
+
+ for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) {
+ if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i]))
+ out[out_idx++] = umd_enhancements[i];
+ }
+
+ if (!query.enhancements)
+ goto copy_out;
+ if (query.count < out_idx)
+ return -E2BIG;
+
+ if (copy_to_user(u64_to_user_ptr(query.enhancements), out,
+ out_idx * sizeof(u32))) {
+ return -EFAULT;
+ }
+
+copy_out:
+ query.count = out_idx;
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+/**
+ * pvr_ioctl_dev_query() - IOCTL to copy information about a device
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_dev_query_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY.
+ * If the given receiving struct pointer is NULL, or the indicated size is too
+ * small, the expected size of the struct type will be returned in the size
+ * argument field.
+ *
+ * Return:
+ * * 0 on success or when fetching the size with args->pointer == NULL, or
+ * * -%E2BIG if the indicated size of the receiving struct is less than is
+ * required to contain the copied data, or
+ * * -%EINVAL if the indicated struct type is unknown, or
+ * * -%ENOMEM if local memory could not be allocated, or
+ * * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct drm_pvr_ioctl_dev_query_args *args = raw_args;
+ int idx;
+ int ret = -EINVAL;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ switch ((enum drm_pvr_dev_query)args->type) {
+ case DRM_PVR_DEV_QUERY_GPU_INFO_GET:
+ ret = pvr_dev_query_gpu_info_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET:
+ ret = pvr_dev_query_runtime_info_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_QUIRKS_GET:
+ ret = pvr_dev_query_quirks_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET:
+ ret = pvr_dev_query_enhancements_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_HEAP_INFO_GET:
+ ret = pvr_heap_info_get(pvr_dev, args);
+ break;
+
+ case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET:
+ ret = pvr_static_data_areas_get(pvr_dev, args);
+ break;
+ }
+
+ drm_dev_exit(idx);
+
+ return ret;
+}
+
+/**
+ * pvr_ioctl_create_context() - IOCTL to create a context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if provided arguments are invalid, or
+ * * -%EFAULT if arguments can't be copied from userspace, or
+ * * Any error returned by pvr_create_render_context().
+ */
+static int
+pvr_ioctl_create_context(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_context_args *args = raw_args;
+ struct pvr_file *pvr_file = file->driver_priv;
+ int idx;
+ int ret;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ ret = pvr_context_create(pvr_file, args);
+
+ drm_dev_exit(idx);
+
+ return ret;
+}
+
+/**
+ * pvr_ioctl_destroy_context() - IOCTL to destroy a context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_destroy_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if context not in context list.
+ */
+static int
+pvr_ioctl_destroy_context(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_destroy_context_args *args = raw_args;
+ struct pvr_file *pvr_file = file->driver_priv;
+
+ if (args->_padding_4)
+ return -EINVAL;
+
+ return pvr_context_destroy(pvr_file, args->handle);
+}
+
+/**
+ * pvr_ioctl_create_free_list() - IOCTL to create a free list
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_free_list_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_free_list_create().
+ */
+static int
+pvr_ioctl_create_free_list(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_free_list_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_free_list *free_list;
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ free_list = pvr_free_list_create(pvr_file, args);
+ if (IS_ERR(free_list)) {
+ err = PTR_ERR(free_list);
+ goto err_drm_dev_exit;
+ }
+
+ /* Allocate object handle for userspace. */
+ err = xa_alloc(&pvr_file->free_list_handles,
+ &args->handle,
+ free_list,
+ xa_limit_32b,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_cleanup;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_cleanup:
+ pvr_free_list_put(free_list);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_destroy_free_list_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if free list not in object list.
+ */
+static int
+pvr_ioctl_destroy_free_list(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_destroy_free_list_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_free_list *free_list;
+
+ if (args->_padding_4)
+ return -EINVAL;
+
+ free_list = xa_erase(&pvr_file->free_list_handles, args->handle);
+ if (!free_list)
+ return -EINVAL;
+
+ pvr_free_list_put(free_list);
+ return 0;
+}
+
+/**
+ * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_hwrt_dataset_create().
+ */
+static int
+pvr_ioctl_create_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_hwrt_dataset *hwrt;
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ hwrt = pvr_hwrt_dataset_create(pvr_file, args);
+ if (IS_ERR(hwrt)) {
+ err = PTR_ERR(hwrt);
+ goto err_drm_dev_exit;
+ }
+
+ /* Allocate object handle for userspace. */
+ err = xa_alloc(&pvr_file->hwrt_handles,
+ &args->handle,
+ hwrt,
+ xa_limit_32b,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_cleanup;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_cleanup:
+ pvr_hwrt_dataset_put(hwrt);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_destroy_hwrt_dataset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if HWRT dataset not in object list.
+ */
+static int
+pvr_ioctl_destroy_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_destroy_hwrt_dataset_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_hwrt_dataset *hwrt;
+
+ if (args->_padding_4)
+ return -EINVAL;
+
+ hwrt = xa_erase(&pvr_file->hwrt_handles, args->handle);
+ if (!hwrt)
+ return -EINVAL;
+
+ pvr_hwrt_dataset_put(hwrt);
+ return 0;
+}
+
+/**
+ * pvr_ioctl_create_vm_context() - IOCTL to create a VM context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_vm_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_vm_create_context().
+ */
+static int
+pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_create_vm_context_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_vm_context *vm_ctx;
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ if (args->_padding_4) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true);
+ if (IS_ERR(vm_ctx)) {
+ err = PTR_ERR(vm_ctx);
+ goto err_drm_dev_exit;
+ }
+
+ /* Allocate object handle for userspace. */
+ err = xa_alloc(&pvr_file->vm_ctx_handles,
+ &args->handle,
+ vm_ctx,
+ xa_limit_32b,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_cleanup;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_cleanup:
+ pvr_vm_context_put(vm_ctx);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context
+* @drm_dev: [IN] DRM device.
+* @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+* &struct drm_pvr_ioctl_destroy_vm_context_args.
+* @file: [IN] DRM file private data.
+*
+* Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT.
+*
+* Return:
+* * 0 on success, or
+* * -%EINVAL if object not in object list.
+ */
+static int
+pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_vm_context *vm_ctx;
+
+ if (args->_padding_4)
+ return -EINVAL;
+
+ vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle);
+ if (!vm_ctx)
+ return -EINVAL;
+
+ pvr_vm_context_put(vm_ctx);
+ return 0;
+}
+
+/**
+ * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space.
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_vm_map_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_VM_MAP.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero,
+ * * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset
+ * and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall
+ * within the buffer object specified by
+ * &drm_pvr_ioctl_vm_op_map_args.handle,
+ * * -%EINVAL if the bounds specified by
+ * &drm_pvr_ioctl_vm_op_map_args.device_addr and
+ * &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual
+ * address range which falls entirely within a single heap, or
+ * * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a
+ * valid PowerVR buffer object.
+ */
+static int
+pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct drm_pvr_ioctl_vm_map_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_vm_context *vm_ctx;
+
+ struct pvr_gem_object *pvr_obj;
+ size_t pvr_obj_size;
+
+ u64 offset_plus_size;
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ /* Initial validation of args. */
+ if (args->_padding_14) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ if (args->flags != 0 ||
+ check_add_overflow(args->offset, args->size, &offset_plus_size) ||
+ !pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+ if (!vm_ctx) {
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
+ if (!pvr_obj) {
+ err = -ENOENT;
+ goto err_put_vm_context;
+ }
+
+ pvr_obj_size = pvr_gem_object_size(pvr_obj);
+
+ /*
+ * Validate offset and size args. The alignment of these will be
+ * checked when mapping; for now just check that they're within valid
+ * bounds
+ */
+ if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) {
+ err = -EINVAL;
+ goto err_put_pvr_object;
+ }
+
+ err = pvr_vm_map(vm_ctx, pvr_obj, args->offset,
+ args->device_addr, args->size);
+ if (err)
+ goto err_put_pvr_object;
+
+ /*
+ * In order to set up the mapping, we needed a reference to &pvr_obj.
+ * However, pvr_vm_map() obtains and stores its own reference, so we
+ * must release ours before returning.
+ */
+
+err_put_pvr_object:
+ pvr_gem_object_put(pvr_obj);
+
+err_put_vm_context:
+ pvr_vm_context_put(vm_ctx);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space.
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_vm_unmap_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid
+ * device page-aligned device-virtual address, or
+ * * -%ENOENT if there is currently no PowerVR buffer object mapped at
+ * &drm_pvr_ioctl_vm_op_unmap_args.device_addr.
+ */
+static int
+pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_vm_unmap_args *args = raw_args;
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ struct pvr_vm_context *vm_ctx;
+ int err;
+
+ /* Initial validation of args. */
+ if (args->_padding_4)
+ return -EINVAL;
+
+ vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+ if (!vm_ctx)
+ return -EINVAL;
+
+ err = pvr_vm_unmap(vm_ctx, args->device_addr, args->size);
+
+ pvr_vm_context_put(vm_ctx);
+
+ return err;
+}
+
+/*
+ * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_submit_job_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EINVAL if arguments are invalid.
+ */
+static int
+pvr_ioctl_submit_jobs(struct drm_device *drm_dev, void *raw_args,
+ struct drm_file *file)
+{
+ struct drm_pvr_ioctl_submit_jobs_args *args = raw_args;
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct pvr_file *pvr_file = to_pvr_file(file);
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ err = pvr_submit_jobs(pvr_dev, pvr_file, args);
+
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+int
+pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out)
+{
+ if (usr_stride < min_stride)
+ return -EINVAL;
+
+ return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride);
+}
+
+int
+pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in)
+{
+ if (usr_stride < min_stride)
+ return -EINVAL;
+
+ if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size)))
+ return -EFAULT;
+
+ if (usr_stride > obj_size &&
+ clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int
+pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out)
+{
+ int ret = 0;
+ void *out_alloc;
+
+ if (in->stride < min_stride)
+ return -EINVAL;
+
+ if (!in->count)
+ return 0;
+
+ out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
+ if (!out_alloc)
+ return -ENOMEM;
+
+ if (obj_size == in->stride) {
+ if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
+ (unsigned long)obj_size * in->count))
+ ret = -EFAULT;
+ } else {
+ void __user *in_ptr = u64_to_user_ptr(in->array);
+ void *out_ptr = out_alloc;
+
+ for (u32 i = 0; i < in->count; i++) {
+ ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
+ if (ret)
+ break;
+
+ out_ptr += obj_size;
+ in_ptr += in->stride;
+ }
+ }
+
+ if (ret) {
+ kvfree(out_alloc);
+ return ret;
+ }
+
+ *out = out_alloc;
+ return 0;
+}
+
+int
+pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
+ const void *in)
+{
+ if (out->stride < min_stride)
+ return -EINVAL;
+
+ if (!out->count)
+ return 0;
+
+ if (obj_size == out->stride) {
+ if (copy_to_user(u64_to_user_ptr(out->array), in,
+ (unsigned long)obj_size * out->count))
+ return -EFAULT;
+ } else {
+ u32 cpy_elem_size = min_t(u32, out->stride, obj_size);
+ void __user *out_ptr = u64_to_user_ptr(out->array);
+ const void *in_ptr = in;
+
+ for (u32 i = 0; i < out->count; i++) {
+ if (copy_to_user(out_ptr, in_ptr, cpy_elem_size))
+ return -EFAULT;
+
+ out_ptr += obj_size;
+ in_ptr += out->stride;
+ }
+
+ if (out->stride > obj_size &&
+ clear_user(u64_to_user_ptr(out->array + obj_size),
+ out->stride - obj_size)) {
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+#define DRM_PVR_IOCTL(_name, _func, _flags) \
+ DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags)
+
+/* clang-format off */
+
+static const struct drm_ioctl_desc pvr_drm_driver_ioctls[] = {
+ DRM_PVR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET, get_bo_mmap_offset, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_VM_CONTEXT, create_vm_context, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(DESTROY_VM_CONTEXT, destroy_vm_context, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(VM_MAP, vm_map, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(VM_UNMAP, vm_unmap, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_CONTEXT, create_context, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(DESTROY_CONTEXT, destroy_context, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_FREE_LIST, create_free_list, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(DESTROY_FREE_LIST, destroy_free_list, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(CREATE_HWRT_DATASET, create_hwrt_dataset, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(DESTROY_HWRT_DATASET, destroy_hwrt_dataset, DRM_RENDER_ALLOW),
+ DRM_PVR_IOCTL(SUBMIT_JOBS, submit_jobs, DRM_RENDER_ALLOW),
+};
+
+/* clang-format on */
+
+#undef DRM_PVR_IOCTL
+
+/**
+ * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened
+ * @drm_dev: [IN] DRM device.
+ * @file: [IN] DRM file private data.
+ *
+ * Allocates powervr-specific file private data (&struct pvr_file).
+ *
+ * Registered in &pvr_drm_driver.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%ENOMEM if the allocation of a &struct ipvr_file fails, or
+ * * Any error returned by pvr_memory_context_init().
+ */
+static int
+pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ struct pvr_file *pvr_file;
+
+ pvr_file = kzalloc(sizeof(*pvr_file), GFP_KERNEL);
+ if (!pvr_file)
+ return -ENOMEM;
+
+ /*
+ * Store reference to base DRM file private data for use by
+ * from_pvr_file.
+ */
+ pvr_file->file = file;
+
+ /*
+ * Store reference to powervr-specific outer device struct in file
+ * private data for convenient access.
+ */
+ pvr_file->pvr_dev = pvr_dev;
+
+ xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1);
+
+ /*
+ * Store reference to powervr-specific file private data in DRM file
+ * private data.
+ */
+ file->driver_priv = pvr_file;
+
+ return 0;
+}
+
+/**
+ * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct
+ * drm_file is closed.
+ * @drm_dev: [IN] DRM device (unused).
+ * @file: [IN] DRM file private data.
+ *
+ * Frees powervr-specific file private data (&struct pvr_file).
+ *
+ * Registered in &pvr_drm_driver.
+ */
+static void
+pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
+ struct drm_file *file)
+{
+ struct pvr_file *pvr_file = to_pvr_file(file);
+
+ /* Kill remaining contexts. */
+ pvr_destroy_contexts_for_file(pvr_file);
+
+ /* Drop references on any remaining objects. */
+ pvr_destroy_free_lists_for_file(pvr_file);
+ pvr_destroy_hwrt_datasets_for_file(pvr_file);
+ pvr_destroy_vm_contexts_for_file(pvr_file);
+
+ kfree(pvr_file);
+ file->driver_priv = NULL;
+}
+
+DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops);
+
+static struct drm_driver pvr_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_GEM_GPUVA | DRIVER_RENDER |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ .open = pvr_drm_driver_open,
+ .postclose = pvr_drm_driver_postclose,
+ .ioctls = pvr_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(pvr_drm_driver_ioctls),
+ .fops = &pvr_drm_driver_fops,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = pvr_debugfs_init,
+#endif
+
+ .name = PVR_DRIVER_NAME,
+ .desc = PVR_DRIVER_DESC,
+ .date = PVR_DRIVER_DATE,
+ .major = PVR_DRIVER_MAJOR,
+ .minor = PVR_DRIVER_MINOR,
+ .patchlevel = PVR_DRIVER_PATCHLEVEL,
+
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+ .gem_create_object = pvr_gem_create_object,
+};
+
+static int
+pvr_probe(struct platform_device *plat_dev)
+{
+ struct pvr_device *pvr_dev;
+ struct drm_device *drm_dev;
+ int err;
+
+ pvr_dev = devm_drm_dev_alloc(&plat_dev->dev, &pvr_drm_driver,
+ struct pvr_device, base);
+ if (IS_ERR(pvr_dev))
+ return PTR_ERR(pvr_dev);
+
+ drm_dev = &pvr_dev->base;
+
+ platform_set_drvdata(plat_dev, drm_dev);
+
+ init_rwsem(&pvr_dev->reset_sem);
+
+ pvr_context_device_init(pvr_dev);
+
+ err = pvr_queue_device_init(pvr_dev);
+ if (err)
+ goto err_context_fini;
+
+ devm_pm_runtime_enable(&plat_dev->dev);
+ pm_runtime_mark_last_busy(&plat_dev->dev);
+
+ pm_runtime_set_autosuspend_delay(&plat_dev->dev, 50);
+ pm_runtime_use_autosuspend(&plat_dev->dev);
+ pvr_watchdog_init(pvr_dev);
+
+ err = pvr_device_init(pvr_dev);
+ if (err)
+ goto err_watchdog_fini;
+
+ err = drm_dev_register(drm_dev, 0);
+ if (err)
+ goto err_device_fini;
+
+ xa_init_flags(&pvr_dev->free_list_ids, XA_FLAGS_ALLOC1);
+ xa_init_flags(&pvr_dev->job_ids, XA_FLAGS_ALLOC1);
+
+ return 0;
+
+err_device_fini:
+ pvr_device_fini(pvr_dev);
+
+err_watchdog_fini:
+ pvr_watchdog_fini(pvr_dev);
+
+ pvr_queue_device_fini(pvr_dev);
+
+err_context_fini:
+ pvr_context_device_fini(pvr_dev);
+
+ return err;
+}
+
+static int
+pvr_remove(struct platform_device *plat_dev)
+{
+ struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+
+ WARN_ON(!xa_empty(&pvr_dev->job_ids));
+ WARN_ON(!xa_empty(&pvr_dev->free_list_ids));
+
+ xa_destroy(&pvr_dev->job_ids);
+ xa_destroy(&pvr_dev->free_list_ids);
+
+ pm_runtime_suspend(drm_dev->dev);
+ pvr_device_fini(pvr_dev);
+ drm_dev_unplug(drm_dev);
+ pvr_watchdog_fini(pvr_dev);
+ pvr_queue_device_fini(pvr_dev);
+ pvr_context_device_fini(pvr_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "img,img-axe", .data = NULL },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static const struct dev_pm_ops pvr_pm_ops = {
+ RUNTIME_PM_OPS(pvr_power_device_suspend, pvr_power_device_resume, pvr_power_device_idle)
+};
+
+static struct platform_driver pvr_driver = {
+ .probe = pvr_probe,
+ .remove = pvr_remove,
+ .driver = {
+ .name = PVR_DRIVER_NAME,
+ .pm = &pvr_pm_ops,
+ .of_match_table = dt_match,
+ },
+};
+module_platform_driver(pvr_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION(PVR_DRIVER_DESC);
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_drv.h b/drivers/gpu/drm/imagination/pvr_drv.h
new file mode 100644
index 000000000..378fe477b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_drv.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DRV_H
+#define PVR_DRV_H
+
+#include "linux/compiler_attributes.h"
+#include <uapi/drm/pvr_drm.h>
+
+#define PVR_DRIVER_NAME "powervr"
+#define PVR_DRIVER_DESC "Imagination PowerVR (Series 6 and later) & IMG Graphics"
+#define PVR_DRIVER_DATE "20230904"
+
+/*
+ * Driver interface version:
+ * - 1.0: Initial interface
+ */
+#define PVR_DRIVER_MAJOR 1
+#define PVR_DRIVER_MINOR 0
+#define PVR_DRIVER_PATCHLEVEL 0
+
+int pvr_get_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, void *out);
+int pvr_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, const void *in);
+int pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size,
+ void **out);
+int pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
+ const void *in);
+
+#define PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
+ (offsetof(_typename, _last_mandatory_field) + \
+ sizeof(((_typename *)NULL)->_last_mandatory_field))
+
+/* NOLINTBEGIN(bugprone-macro-parentheses) */
+#define PVR_UOBJ_DECL(_typename, _last_mandatory_field) \
+ , _typename : PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
+/* NOLINTEND(bugprone-macro-parentheses) */
+
+/**
+ * DOC: PVR user objects.
+ *
+ * Macros used to aid copying structured and array data to and from
+ * userspace. Objects can differ in size, provided the minimum size
+ * allowed is specified (using the last mandatory field in the struct).
+ * All types used with PVR_UOBJ_GET/SET macros must be listed here under
+ * PVR_UOBJ_MIN_SIZE, with the last mandatory struct field specified.
+ */
+
+/**
+ * PVR_UOBJ_MIN_SIZE() - Fetch the minimum copy size of a compatible type object.
+ * @_obj_name: The name of the object. Cannot be a typename - this is deduced.
+ *
+ * This cannot fail. Using the macro with an incompatible type will result in a
+ * compiler error.
+ *
+ * To add compatibility for a type, list it within the macro in an orderly
+ * fashion. The second argument is the name of the last mandatory field of the
+ * struct type, which is used to calculate the size. See also PVR_UOBJ_DECL().
+ *
+ * Return: The minimum copy size.
+ */
+#define PVR_UOBJ_MIN_SIZE(_obj_name) _Generic(_obj_name \
+ PVR_UOBJ_DECL(struct drm_pvr_job, hwrt) \
+ PVR_UOBJ_DECL(struct drm_pvr_sync_op, value) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_gpu_info, num_phantoms) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_runtime_info, cdm_max_local_mem_size_regs) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_quirks, _padding_c) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_enhancements, _padding_c) \
+ PVR_UOBJ_DECL(struct drm_pvr_heap, page_size_log2) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_heap_info, heaps) \
+ PVR_UOBJ_DECL(struct drm_pvr_static_data_area, offset) \
+ PVR_UOBJ_DECL(struct drm_pvr_dev_query_static_data_areas, static_data_areas) \
+ )
+
+/**
+ * PVR_UOBJ_GET() - Copies from _src_usr_ptr to &_dest_obj.
+ * @_dest_obj: The destination container object in kernel space.
+ * @_usr_size: The size of the source container in user space.
+ * @_src_usr_ptr: __u64 raw pointer to the source container in user space.
+ *
+ * Return: Error code. See pvr_get_uobj().
+ */
+#define PVR_UOBJ_GET(_dest_obj, _usr_size, _src_usr_ptr) \
+ pvr_get_uobj(_src_usr_ptr, _usr_size, \
+ PVR_UOBJ_MIN_SIZE(_dest_obj), \
+ sizeof(_dest_obj), &(_dest_obj))
+
+/**
+ * PVR_UOBJ_SET() - Copies from &_src_obj to _dest_usr_ptr.
+ * @_dest_usr_ptr: __u64 raw pointer to the destination container in user space.
+ * @_usr_size: The size of the destination container in user space.
+ * @_src_obj: The source container object in kernel space.
+ *
+ * Return: Error code. See pvr_set_uobj().
+ */
+#define PVR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
+ pvr_set_uobj(_dest_usr_ptr, _usr_size, \
+ PVR_UOBJ_MIN_SIZE(_src_obj), \
+ sizeof(_src_obj), &(_src_obj))
+
+/**
+ * PVR_UOBJ_GET_ARRAY() - Copies from @_src_drm_pvr_obj_array.array to
+ * alloced memory and returns a pointer in _dest_array.
+ * @_dest_array: The destination C array object in kernel space.
+ * @_src_drm_pvr_obj_array: The &struct drm_pvr_obj_array containing a __u64 raw
+ * pointer to the source C array in user space and the size of each array
+ * element in user space (the 'stride').
+ *
+ * Return: Error code. See pvr_get_uobj_array().
+ */
+#define PVR_UOBJ_GET_ARRAY(_dest_array, _src_drm_pvr_obj_array) \
+ pvr_get_uobj_array(_src_drm_pvr_obj_array, \
+ PVR_UOBJ_MIN_SIZE((_dest_array)[0]), \
+ sizeof((_dest_array)[0]), (void **)&(_dest_array))
+
+/**
+ * PVR_UOBJ_SET_ARRAY() - Copies from _src_array to @_dest_drm_pvr_obj_array.array.
+ * @_dest_drm_pvr_obj_array: The &struct drm_pvr_obj_array containing a __u64 raw
+ * pointer to the destination C array in user space and the size of each array
+ * element in user space (the 'stride').
+ * @_src_array: The source C array object in kernel space.
+ *
+ * Return: Error code. See pvr_set_uobj_array().
+ */
+#define PVR_UOBJ_SET_ARRAY(_dest_drm_pvr_obj_array, _src_array) \
+ pvr_set_uobj_array(_dest_drm_pvr_obj_array, \
+ PVR_UOBJ_MIN_SIZE((_src_array)[0]), \
+ sizeof((_src_array)[0]), _src_array)
+
+#endif /* PVR_DRV_H */
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
new file mode 100644
index 000000000..5e51bc980
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_free_list.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_gem.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#define FREE_LIST_ENTRY_SIZE sizeof(u32)
+
+#define FREE_LIST_ALIGNMENT \
+ ((ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE / FREE_LIST_ENTRY_SIZE) - 1)
+
+#define FREE_LIST_MIN_PAGES 50
+#define FREE_LIST_MIN_PAGES_BRN66011 40
+#define FREE_LIST_MIN_PAGES_ROGUEXE 25
+
+/**
+ * pvr_get_free_list_min_pages() - Get minimum free list size for this device
+ * @pvr_dev: Device pointer.
+ *
+ * Returns:
+ * * Minimum free list size, in PM physical pages.
+ */
+u32
+pvr_get_free_list_min_pages(struct pvr_device *pvr_dev)
+{
+ u32 value;
+
+ if (PVR_HAS_FEATURE(pvr_dev, roguexe)) {
+ if (PVR_HAS_QUIRK(pvr_dev, 66011))
+ value = FREE_LIST_MIN_PAGES_BRN66011;
+ else
+ value = FREE_LIST_MIN_PAGES_ROGUEXE;
+ } else {
+ value = FREE_LIST_MIN_PAGES;
+ }
+
+ return value;
+}
+
+static int
+free_list_create_kernel_structure(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_free_list_args *args,
+ struct pvr_free_list *free_list)
+{
+ struct pvr_gem_object *free_list_obj;
+ struct pvr_vm_context *vm_ctx;
+ u64 free_list_size;
+ int err;
+
+ if (args->grow_threshold > 100 ||
+ args->initial_num_pages > args->max_num_pages ||
+ args->grow_num_pages > args->max_num_pages ||
+ args->max_num_pages == 0 ||
+ (args->initial_num_pages < args->max_num_pages && !args->grow_num_pages) ||
+ (args->initial_num_pages == args->max_num_pages && args->grow_num_pages))
+ return -EINVAL;
+
+ if ((args->initial_num_pages & FREE_LIST_ALIGNMENT) ||
+ (args->max_num_pages & FREE_LIST_ALIGNMENT) ||
+ (args->grow_num_pages & FREE_LIST_ALIGNMENT))
+ return -EINVAL;
+
+ vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+ if (!vm_ctx)
+ return -EINVAL;
+
+ free_list_obj = pvr_vm_find_gem_object(vm_ctx, args->free_list_gpu_addr,
+ NULL, &free_list_size);
+ if (!free_list_obj) {
+ err = -EINVAL;
+ goto err_put_vm_context;
+ }
+
+ if ((free_list_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS) ||
+ !(free_list_obj->flags & DRM_PVR_BO_PM_FW_PROTECT) ||
+ free_list_size < (args->max_num_pages * FREE_LIST_ENTRY_SIZE)) {
+ err = -EINVAL;
+ goto err_put_free_list_obj;
+ }
+
+ free_list->pvr_dev = pvr_file->pvr_dev;
+ free_list->current_pages = 0;
+ free_list->max_pages = args->max_num_pages;
+ free_list->grow_pages = args->grow_num_pages;
+ free_list->grow_threshold = args->grow_threshold;
+ free_list->obj = free_list_obj;
+ free_list->free_list_gpu_addr = args->free_list_gpu_addr;
+ free_list->initial_num_pages = args->initial_num_pages;
+
+ pvr_vm_context_put(vm_ctx);
+
+ return 0;
+
+err_put_free_list_obj:
+ pvr_gem_object_put(free_list_obj);
+
+err_put_vm_context:
+ pvr_vm_context_put(vm_ctx);
+
+ return err;
+}
+
+static void
+free_list_destroy_kernel_structure(struct pvr_free_list *free_list)
+{
+ WARN_ON(!list_empty(&free_list->hwrt_list));
+
+ pvr_gem_object_put(free_list->obj);
+}
+
+/**
+ * calculate_free_list_ready_pages_locked() - Function to work out the number of free
+ * list pages to reserve for growing within
+ * the FW without having to wait for the
+ * host to progress a grow request
+ * @free_list: Pointer to free list.
+ * @pages: Total pages currently in free list.
+ *
+ * If the threshold or grow size means less than the alignment size (4 pages on
+ * Rogue), then the feature is not used.
+ *
+ * Caller must hold &free_list->lock.
+ *
+ * Return: number of pages to reserve.
+ */
+static u32
+calculate_free_list_ready_pages_locked(struct pvr_free_list *free_list, u32 pages)
+{
+ u32 ready_pages;
+
+ lockdep_assert_held(&free_list->lock);
+
+ ready_pages = ((pages * free_list->grow_threshold) / 100);
+
+ /* The number of pages must be less than the grow size. */
+ ready_pages = min(ready_pages, free_list->grow_pages);
+
+ /*
+ * The number of pages must be a multiple of the free list align size.
+ */
+ ready_pages &= ~FREE_LIST_ALIGNMENT;
+
+ return ready_pages;
+}
+
+static u32
+calculate_free_list_ready_pages(struct pvr_free_list *free_list, u32 pages)
+{
+ u32 ret;
+
+ mutex_lock(&free_list->lock);
+
+ ret = calculate_free_list_ready_pages_locked(free_list, pages);
+
+ mutex_unlock(&free_list->lock);
+
+ return ret;
+}
+
+static void
+free_list_fw_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_freelist *fw_data = cpu_ptr;
+ struct pvr_free_list *free_list = priv;
+ u32 ready_pages;
+
+ /* Fill out FW structure */
+ ready_pages = calculate_free_list_ready_pages(free_list,
+ free_list->initial_num_pages);
+
+ fw_data->max_pages = free_list->max_pages;
+ fw_data->current_pages = free_list->initial_num_pages - ready_pages;
+ fw_data->grow_pages = free_list->grow_pages;
+ fw_data->ready_pages = ready_pages;
+ fw_data->freelist_id = free_list->fw_id;
+ fw_data->grow_pending = false;
+ fw_data->current_stack_top = fw_data->current_pages - 1;
+ fw_data->freelist_dev_addr = free_list->free_list_gpu_addr;
+ fw_data->current_dev_addr = (fw_data->freelist_dev_addr +
+ ((fw_data->max_pages - fw_data->current_pages) *
+ FREE_LIST_ENTRY_SIZE)) &
+ ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
+}
+
+static int
+free_list_create_fw_structure(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_free_list_args *args,
+ struct pvr_free_list *free_list)
+{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+
+ /*
+ * Create and map the FW structure so we can initialise it. This is not
+ * accessed on the CPU side post-initialisation so the mapping lifetime
+ * is only for this function.
+ */
+ free_list->fw_data = pvr_fw_object_create_and_map(pvr_dev, sizeof(*free_list->fw_data),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ free_list_fw_init, free_list,
+ &free_list->fw_obj);
+ if (IS_ERR(free_list->fw_data))
+ return PTR_ERR(free_list->fw_data);
+
+ return 0;
+}
+
+static void
+free_list_destroy_fw_structure(struct pvr_free_list *free_list)
+{
+ pvr_fw_object_unmap_and_destroy(free_list->fw_obj);
+}
+
+static int
+pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
+ struct sg_table *sgt, u32 offset, u32 num_pages)
+{
+ struct sg_dma_page_iter dma_iter;
+ u32 *page_list;
+
+ lockdep_assert_held(&free_list->lock);
+
+ page_list = pvr_gem_object_vmap(free_list->obj);
+ if (IS_ERR(page_list))
+ return PTR_ERR(page_list);
+
+ offset /= FREE_LIST_ENTRY_SIZE;
+ /* clang-format off */
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
+ u64 dma_pfn = dma_addr >>
+ ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+ u32 dma_addr_offset;
+
+ BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
+
+ for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
+ dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
+ WARN_ON_ONCE(dma_pfn >> 32);
+
+ page_list[offset++] = (u32)dma_pfn;
+ dma_pfn++;
+
+ num_pages--;
+ if (!num_pages)
+ break;
+ }
+
+ if (!num_pages)
+ break;
+ }
+ /* clang-format on */
+
+ /* Make sure our free_list update is flushed. */
+ wmb();
+
+ pvr_gem_object_vunmap(free_list->obj);
+
+ return 0;
+}
+
+static int
+pvr_free_list_insert_node_locked(struct pvr_free_list_node *free_list_node)
+{
+ struct pvr_free_list *free_list = free_list_node->free_list;
+ struct sg_table *sgt;
+ u32 start_page;
+ u32 offset;
+ int err;
+
+ lockdep_assert_held(&free_list->lock);
+
+ start_page = free_list->max_pages - free_list->current_pages -
+ free_list_node->num_pages;
+ offset = (start_page * FREE_LIST_ENTRY_SIZE) &
+ ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
+
+ sgt = drm_gem_shmem_get_pages_sgt(&free_list_node->mem_obj->base);
+ if (WARN_ON(IS_ERR(sgt)))
+ return PTR_ERR(sgt);
+
+ err = pvr_free_list_insert_pages_locked(free_list, sgt,
+ offset, free_list_node->num_pages);
+ if (!err)
+ free_list->current_pages += free_list_node->num_pages;
+
+ return err;
+}
+
+static int
+pvr_free_list_grow(struct pvr_free_list *free_list, u32 num_pages)
+{
+ struct pvr_device *pvr_dev = free_list->pvr_dev;
+ struct pvr_free_list_node *free_list_node;
+ int err;
+
+ mutex_lock(&free_list->lock);
+
+ if (num_pages & FREE_LIST_ALIGNMENT) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
+ free_list_node = kzalloc(sizeof(*free_list_node), GFP_KERNEL);
+ if (!free_list_node) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ free_list_node->num_pages = num_pages;
+ free_list_node->free_list = free_list;
+
+ free_list_node->mem_obj = pvr_gem_object_create(pvr_dev,
+ num_pages <<
+ ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+ PVR_BO_FW_FLAGS_DEVICE_CACHED);
+ if (IS_ERR(free_list_node->mem_obj)) {
+ err = PTR_ERR(free_list_node->mem_obj);
+ goto err_free;
+ }
+
+ err = pvr_free_list_insert_node_locked(free_list_node);
+ if (err)
+ goto err_destroy_gem_object;
+
+ list_add_tail(&free_list_node->node, &free_list->mem_block_list);
+
+ /*
+ * Reserve a number ready pages to allow the FW to process OOM quickly
+ * and asynchronously request a grow.
+ */
+ free_list->ready_pages =
+ calculate_free_list_ready_pages_locked(free_list,
+ free_list->current_pages);
+ free_list->current_pages -= free_list->ready_pages;
+
+ mutex_unlock(&free_list->lock);
+
+ return 0;
+
+err_destroy_gem_object:
+ pvr_gem_object_put(free_list_node->mem_obj);
+
+err_free:
+ kfree(free_list_node);
+
+err_unlock:
+ mutex_unlock(&free_list->lock);
+
+ return err;
+}
+
+void pvr_free_list_process_grow_req(struct pvr_device *pvr_dev,
+ struct rogue_fwif_fwccb_cmd_freelist_gs_data *req)
+{
+ struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, req->freelist_id);
+ struct rogue_fwif_kccb_cmd resp_cmd = {
+ .cmd_type = ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE,
+ };
+ struct rogue_fwif_freelist_gs_data *resp = &resp_cmd.cmd_data.free_list_gs_data;
+ u32 grow_pages = 0;
+
+ /* If we don't have a freelist registered for this ID, we can't do much. */
+ if (WARN_ON(!free_list))
+ return;
+
+ /* Since the FW made the request, it has already consumed the ready pages,
+ * update the host struct.
+ */
+ free_list->current_pages += free_list->ready_pages;
+ free_list->ready_pages = 0;
+
+ /* If the grow succeeds, update the grow_pages argument. */
+ if (!pvr_free_list_grow(free_list, free_list->grow_pages))
+ grow_pages = free_list->grow_pages;
+
+ /* Now prepare the response and send it back to the FW. */
+ pvr_fw_object_get_fw_addr(free_list->fw_obj, &resp->freelist_fw_addr);
+ resp->delta_pages = grow_pages;
+ resp->new_pages = free_list->current_pages + free_list->ready_pages;
+ resp->ready_pages = free_list->ready_pages;
+ pvr_free_list_put(free_list);
+
+ WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL));
+}
+
+static void
+pvr_free_list_free_node(struct pvr_free_list_node *free_list_node)
+{
+ pvr_gem_object_put(free_list_node->mem_obj);
+
+ kfree(free_list_node);
+}
+
+/**
+ * pvr_free_list_create() - Create a new free list and return an object pointer
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ *
+ * Return:
+ * * Pointer to new free_list, or
+ * * ERR_PTR(-%ENOMEM) on out of memory.
+ */
+struct pvr_free_list *
+pvr_free_list_create(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_free_list_args *args)
+{
+ struct pvr_free_list *free_list;
+ int err;
+
+ /* Create and fill out the kernel structure */
+ free_list = kzalloc(sizeof(*free_list), GFP_KERNEL);
+
+ if (!free_list)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&free_list->ref_count);
+ INIT_LIST_HEAD(&free_list->mem_block_list);
+ INIT_LIST_HEAD(&free_list->hwrt_list);
+ mutex_init(&free_list->lock);
+
+ err = free_list_create_kernel_structure(pvr_file, args, free_list);
+ if (err < 0)
+ goto err_free;
+
+ /* Allocate global object ID for firmware. */
+ err = xa_alloc(&pvr_file->pvr_dev->free_list_ids,
+ &free_list->fw_id,
+ free_list,
+ xa_limit_32b,
+ GFP_KERNEL);
+ if (err)
+ goto err_destroy_kernel_structure;
+
+ err = free_list_create_fw_structure(pvr_file, args, free_list);
+ if (err < 0)
+ goto err_free_fw_id;
+
+ err = pvr_free_list_grow(free_list, args->initial_num_pages);
+ if (err < 0)
+ goto err_fw_struct_cleanup;
+
+ return free_list;
+
+err_fw_struct_cleanup:
+ WARN_ON(pvr_fw_structure_cleanup(free_list->pvr_dev,
+ ROGUE_FWIF_CLEANUP_FREELIST,
+ free_list->fw_obj, 0));
+
+err_free_fw_id:
+ xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id);
+
+err_destroy_kernel_structure:
+ free_list_destroy_kernel_structure(free_list);
+
+err_free:
+ mutex_destroy(&free_list->lock);
+ kfree(free_list);
+
+ return ERR_PTR(err);
+}
+
+static void
+pvr_free_list_release(struct kref *ref_count)
+{
+ struct pvr_free_list *free_list =
+ container_of(ref_count, struct pvr_free_list, ref_count);
+ struct list_head *pos, *n;
+ int err;
+
+ xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id);
+
+ err = pvr_fw_structure_cleanup(free_list->pvr_dev,
+ ROGUE_FWIF_CLEANUP_FREELIST,
+ free_list->fw_obj, 0);
+ if (err == -EBUSY) {
+ /* Flush the FWCCB to process any HWR or freelist reconstruction
+ * request that might keep the freelist busy, and try again.
+ */
+ pvr_fwccb_process(free_list->pvr_dev);
+ err = pvr_fw_structure_cleanup(free_list->pvr_dev,
+ ROGUE_FWIF_CLEANUP_FREELIST,
+ free_list->fw_obj, 0);
+ }
+
+ WARN_ON(err);
+
+ /* clang-format off */
+ list_for_each_safe(pos, n, &free_list->mem_block_list) {
+ struct pvr_free_list_node *free_list_node =
+ container_of(pos, struct pvr_free_list_node, node);
+
+ list_del(pos);
+ pvr_free_list_free_node(free_list_node);
+ }
+ /* clang-format on */
+
+ free_list_destroy_kernel_structure(free_list);
+ free_list_destroy_fw_structure(free_list);
+ mutex_destroy(&free_list->lock);
+ kfree(free_list);
+}
+
+/**
+ * pvr_destroy_free_lists_for_file: Destroy any free lists associated with the
+ * given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all free lists associated with @pvr_file from the device free_list
+ * list and drops initial references. Free lists will then be destroyed once
+ * all outstanding references are dropped.
+ */
+void pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_free_list *free_list;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->free_list_handles, handle, free_list) {
+ (void)free_list;
+ pvr_free_list_put(xa_erase(&pvr_file->free_list_handles, handle));
+ }
+}
+
+/**
+ * pvr_free_list_put() - Release reference on free list
+ * @free_list: Pointer to list to release reference on
+ */
+void
+pvr_free_list_put(struct pvr_free_list *free_list)
+{
+ if (free_list)
+ kref_put(&free_list->ref_count, pvr_free_list_release);
+}
+
+void pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data)
+{
+ mutex_lock(&free_list->lock);
+
+ list_add_tail(&hwrt_data->freelist_node, &free_list->hwrt_list);
+
+ mutex_unlock(&free_list->lock);
+}
+
+void pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data)
+{
+ mutex_lock(&free_list->lock);
+
+ list_del(&hwrt_data->freelist_node);
+
+ mutex_unlock(&free_list->lock);
+}
+
+static void
+pvr_free_list_reconstruct(struct pvr_device *pvr_dev, u32 freelist_id)
+{
+ struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, freelist_id);
+ struct pvr_free_list_node *free_list_node;
+ struct rogue_fwif_freelist *fw_data;
+ struct pvr_hwrt_data *hwrt_data;
+
+ if (!free_list)
+ return;
+
+ mutex_lock(&free_list->lock);
+
+ /* Rebuild the free list based on the memory block list. */
+ free_list->current_pages = 0;
+
+ list_for_each_entry(free_list_node, &free_list->mem_block_list, node)
+ WARN_ON(pvr_free_list_insert_node_locked(free_list_node));
+
+ /*
+ * Remove the ready pages, which are reserved to allow the FW to process OOM quickly and
+ * asynchronously request a grow.
+ */
+ free_list->current_pages -= free_list->ready_pages;
+
+ fw_data = free_list->fw_data;
+ fw_data->current_stack_top = fw_data->current_pages - 1;
+ fw_data->allocated_page_count = 0;
+ fw_data->allocated_mmu_page_count = 0;
+
+ /* Reset the state of any associated HWRTs. */
+ list_for_each_entry(hwrt_data, &free_list->hwrt_list, freelist_node) {
+ struct rogue_fwif_hwrtdata *hwrt_fw_data = pvr_fw_object_vmap(hwrt_data->fw_obj);
+
+ if (!WARN_ON(IS_ERR(hwrt_fw_data))) {
+ hwrt_fw_data->state = ROGUE_FWIF_RTDATA_STATE_HWR;
+ hwrt_fw_data->hwrt_data_flags &= ~HWRTDATA_HAS_LAST_GEOM;
+ }
+
+ pvr_fw_object_vunmap(hwrt_data->fw_obj);
+ }
+
+ mutex_unlock(&free_list->lock);
+
+ pvr_free_list_put(free_list);
+}
+
+void
+pvr_free_list_process_reconstruct_req(struct pvr_device *pvr_dev,
+ struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data *req)
+{
+ struct rogue_fwif_kccb_cmd resp_cmd = {
+ .cmd_type = ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE,
+ };
+ struct rogue_fwif_freelists_reconstruction_data *resp =
+ &resp_cmd.cmd_data.free_lists_reconstruction_data;
+
+ for (u32 i = 0; i < req->freelist_count; i++)
+ pvr_free_list_reconstruct(pvr_dev, req->freelist_ids[i]);
+
+ resp->freelist_count = req->freelist_count;
+ memcpy(resp->freelist_ids, req->freelist_ids,
+ req->freelist_count * sizeof(resp->freelist_ids[0]));
+
+ WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL));
+}
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.h b/drivers/gpu/drm/imagination/pvr_free_list.h
new file mode 100644
index 000000000..bfb4f5fc6
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_free_list.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FREE_LIST_H
+#define PVR_FREE_LIST_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_device.h"
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_gem_object;
+
+/* Forward declaration from pvr_hwrt.h. */
+struct pvr_hwrt_data;
+
+/**
+ * struct pvr_free_list_node - structure representing an allocation in the free
+ * list
+ */
+struct pvr_free_list_node {
+ /** @node: List node for &pvr_free_list.mem_block_list. */
+ struct list_head node;
+
+ /** @free_list: Pointer to owning free list. */
+ struct pvr_free_list *free_list;
+
+ /** @num_pages: Number of pages in this node. */
+ u32 num_pages;
+
+ /** @mem_obj: GEM object representing the pages in this node. */
+ struct pvr_gem_object *mem_obj;
+};
+
+/**
+ * struct pvr_free_list - structure representing a free list
+ */
+struct pvr_free_list {
+ /** @ref_count: Reference count of object. */
+ struct kref ref_count;
+
+ /** @pvr_dev: Pointer to device that owns this object. */
+ struct pvr_device *pvr_dev;
+
+ /** @obj: GEM object representing the free list. */
+ struct pvr_gem_object *obj;
+
+ /** @fw_obj: FW object representing the FW-side structure. */
+ struct pvr_fw_object *fw_obj;
+
+ /** @fw_data: Pointer to CPU mapping of the FW-side structure. */
+ struct rogue_fwif_freelist *fw_data;
+
+ /**
+ * @lock: Mutex protecting modification of the free list. Must be held when accessing any
+ * of the members below.
+ */
+ struct mutex lock;
+
+ /** @fw_id: Firmware ID for this object. */
+ u32 fw_id;
+
+ /** @current_pages: Current number of pages in free list. */
+ u32 current_pages;
+
+ /** @max_pages: Maximum number of pages in free list. */
+ u32 max_pages;
+
+ /** @grow_pages: Pages to grow free list by per request. */
+ u32 grow_pages;
+
+ /**
+ * @grow_threshold: Percentage of FL memory used that should trigger a
+ * new grow request.
+ */
+ u32 grow_threshold;
+
+ /**
+ * @ready_pages: Number of pages reserved for FW to use while a grow
+ * request is being processed.
+ */
+ u32 ready_pages;
+
+ /** @mem_block_list: List of memory blocks in this free list. */
+ struct list_head mem_block_list;
+
+ /** @hwrt_list: List of HWRTs using this free list. */
+ struct list_head hwrt_list;
+
+ /** @initial_num_pages: Initial number of pages in free list. */
+ u32 initial_num_pages;
+
+ /** @free_list_gpu_addr: Address of free list in GPU address space. */
+ u64 free_list_gpu_addr;
+};
+
+struct pvr_free_list *
+pvr_free_list_create(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_free_list_args *args);
+
+void
+pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file);
+
+u32
+pvr_get_free_list_min_pages(struct pvr_device *pvr_dev);
+
+static __always_inline struct pvr_free_list *
+pvr_free_list_get(struct pvr_free_list *free_list)
+{
+ if (free_list)
+ kref_get(&free_list->ref_count);
+
+ return free_list;
+}
+
+/**
+ * pvr_free_list_lookup() - Lookup free list pointer from handle and file
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on free list object. Call pvr_free_list_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, is not a free list, or
+ * does not belong to @pvr_file)
+ */
+static __always_inline struct pvr_free_list *
+pvr_free_list_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_free_list *free_list;
+
+ xa_lock(&pvr_file->free_list_handles);
+ free_list = pvr_free_list_get(xa_load(&pvr_file->free_list_handles, handle));
+ xa_unlock(&pvr_file->free_list_handles);
+
+ return free_list;
+}
+
+/**
+ * pvr_free_list_lookup_id() - Lookup free list pointer from FW ID
+ * @pvr_dev: Device pointer.
+ * @id: FW object ID.
+ *
+ * Takes reference on free list object. Call pvr_free_list_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a free list)
+ */
+static __always_inline struct pvr_free_list *
+pvr_free_list_lookup_id(struct pvr_device *pvr_dev, u32 id)
+{
+ struct pvr_free_list *free_list;
+
+ xa_lock(&pvr_dev->free_list_ids);
+
+ /* Contexts are removed from the ctx_ids set in the context release path,
+ * meaning the ref_count reached zero before they get removed. We need
+ * to make sure we're not trying to acquire a context that's being
+ * destroyed.
+ */
+ free_list = xa_load(&pvr_dev->free_list_ids, id);
+ if (free_list && !kref_get_unless_zero(&free_list->ref_count))
+ free_list = NULL;
+ xa_unlock(&pvr_dev->free_list_ids);
+
+ return free_list;
+}
+
+void
+pvr_free_list_put(struct pvr_free_list *free_list);
+
+void
+pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data);
+void
+pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data);
+
+void pvr_free_list_process_grow_req(struct pvr_device *pvr_dev,
+ struct rogue_fwif_fwccb_cmd_freelist_gs_data *req);
+
+void
+pvr_free_list_process_reconstruct_req(struct pvr_device *pvr_dev,
+ struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data *req);
+
+#endif /* PVR_FREE_LIST_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
new file mode 100644
index 000000000..3debc9870
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -0,0 +1,1489 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_device.h"
+#include "pvr_device_info.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_fw_trace.h"
+#include "pvr_gem.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif_dev_info.h"
+#include "pvr_rogue_heap_config.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_mm.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/sizes.h>
+
+#define FW_MAX_SUPPORTED_MAJOR_VERSION 1
+
+#define FW_BOOT_TIMEOUT_USEC 5000000
+
+/* Config heap occupies top 192k of the firmware heap. */
+#define PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY SZ_64K
+#define PVR_ROGUE_FW_CONFIG_HEAP_SIZE (3 * PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+
+/* Main firmware allocations should come from the remainder of the heap. */
+#define PVR_ROGUE_FW_MAIN_HEAP_BASE ROGUE_FW_HEAP_BASE
+
+/* Offsets from start of configuration area of FW heap. */
+#define PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET 0
+#define PVR_ROGUE_FWIF_OSINIT_OFFSET \
+ (PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+#define PVR_ROGUE_FWIF_SYSINIT_OFFSET \
+ (PVR_ROGUE_FWIF_OSINIT_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+
+#define PVR_ROGUE_FAULT_PAGE_SIZE SZ_4K
+
+#define PVR_SYNC_OBJ_SIZE sizeof(u32)
+
+const struct pvr_fw_layout_entry *
+pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ u32 entry;
+
+ for (entry = 0; entry < num_layout_entries; entry++) {
+ if (layout_entries[entry].id == id)
+ return &layout_entries[entry];
+ }
+
+ return NULL;
+}
+
+static const struct pvr_fw_layout_entry *
+pvr_fw_find_private_data(struct pvr_device *pvr_dev)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ u32 entry;
+
+ for (entry = 0; entry < num_layout_entries; entry++) {
+ if (layout_entries[entry].id == META_PRIVATE_DATA ||
+ layout_entries[entry].id == MIPS_PRIVATE_DATA ||
+ layout_entries[entry].id == RISCV_PRIVATE_DATA)
+ return &layout_entries[entry];
+ }
+
+ return NULL;
+}
+
+#define DEV_INFO_MASK_SIZE(x) DIV_ROUND_UP(x, 64)
+
+/**
+ * pvr_fw_validate() - Parse firmware header and check compatibility
+ * @pvr_dev: Device pointer.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -EINVAL if firmware is incompatible.
+ */
+static int
+pvr_fw_validate(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ const struct firmware *firmware = pvr_dev->fw_dev.firmware;
+ const struct pvr_fw_layout_entry *layout_entries;
+ const struct pvr_fw_info_header *header;
+ const u8 *fw = firmware->data;
+ u32 fw_offset = firmware->size - SZ_4K;
+ u32 layout_table_size;
+ u32 entry;
+
+ if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE))
+ return -EINVAL;
+
+ header = (const struct pvr_fw_info_header *)&fw[fw_offset];
+
+ if (header->info_version != PVR_FW_INFO_VERSION) {
+ drm_err(drm_dev, "Unsupported fw info version %u\n",
+ header->info_version);
+ return -EINVAL;
+ }
+
+ if (header->header_len != sizeof(struct pvr_fw_info_header) ||
+ header->layout_entry_size != sizeof(struct pvr_fw_layout_entry) ||
+ header->layout_entry_num > PVR_FW_INFO_MAX_NUM_ENTRIES) {
+ drm_err(drm_dev, "FW info format mismatch\n");
+ return -EINVAL;
+ }
+
+ if (!(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ||
+ header->fw_version_major > FW_MAX_SUPPORTED_MAJOR_VERSION ||
+ header->fw_version_major == 0) {
+ drm_err(drm_dev, "Unsupported FW version %u.%u (build: %u%s)\n",
+ header->fw_version_major, header->fw_version_minor,
+ header->fw_version_build,
+ (header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ? " OS" : "");
+ return -EINVAL;
+ }
+
+ if (pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id) != header->bvnc) {
+ struct pvr_gpu_id fw_gpu_id;
+
+ packed_bvnc_to_pvr_gpu_id(header->bvnc, &fw_gpu_id);
+ drm_err(drm_dev, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n",
+ fw_gpu_id.b, fw_gpu_id.v, fw_gpu_id.n, fw_gpu_id.c,
+ pvr_dev->gpu_id.b, pvr_dev->gpu_id.v, pvr_dev->gpu_id.n, pvr_dev->gpu_id.c);
+ return -EINVAL;
+ }
+
+ fw_offset += header->header_len;
+ layout_table_size =
+ header->layout_entry_size * header->layout_entry_num;
+ if ((fw_offset + layout_table_size) > firmware->size)
+ return -EINVAL;
+
+ layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
+ for (entry = 0; entry < header->layout_entry_num; entry++) {
+ u32 start_addr = layout_entries[entry].base_addr;
+ u32 end_addr = start_addr + layout_entries[entry].alloc_size;
+
+ if (start_addr >= end_addr)
+ return -EINVAL;
+ }
+
+ fw_offset = (firmware->size - SZ_4K) - header->device_info_size;
+
+ drm_info(drm_dev, "FW version v%u.%u (build %u OS)\n", header->fw_version_major,
+ header->fw_version_minor, header->fw_version_build);
+
+ pvr_dev->fw_version.major = header->fw_version_major;
+ pvr_dev->fw_version.minor = header->fw_version_minor;
+
+ pvr_dev->fw_dev.header = header;
+ pvr_dev->fw_dev.layout_entries = layout_entries;
+
+ return 0;
+}
+
+static int
+pvr_fw_get_device_info(struct pvr_device *pvr_dev)
+{
+ const struct firmware *firmware = pvr_dev->fw_dev.firmware;
+ struct pvr_fw_device_info_header *header;
+ const u8 *fw = firmware->data;
+ const u64 *dev_info;
+ u32 fw_offset;
+
+ fw_offset = (firmware->size - SZ_4K) - pvr_dev->fw_dev.header->device_info_size;
+
+ header = (struct pvr_fw_device_info_header *)&fw[fw_offset];
+ dev_info = (u64 *)(header + 1);
+
+ pvr_device_info_set_quirks(pvr_dev, dev_info, header->brn_mask_size);
+ dev_info += header->brn_mask_size;
+
+ pvr_device_info_set_enhancements(pvr_dev, dev_info, header->ern_mask_size);
+ dev_info += header->ern_mask_size;
+
+ return pvr_device_info_set_features(pvr_dev, dev_info, header->feature_mask_size,
+ header->feature_param_size);
+}
+
+static void
+layout_get_sizes(struct pvr_device *pvr_dev)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+
+ fw_mem->code_alloc_size = 0;
+ fw_mem->data_alloc_size = 0;
+ fw_mem->core_code_alloc_size = 0;
+ fw_mem->core_data_alloc_size = 0;
+
+ /* Extract section sizes from FW layout table. */
+ for (u32 entry = 0; entry < num_layout_entries; entry++) {
+ switch (layout_entries[entry].type) {
+ case FW_CODE:
+ fw_mem->code_alloc_size += layout_entries[entry].alloc_size;
+ break;
+ case FW_DATA:
+ fw_mem->data_alloc_size += layout_entries[entry].alloc_size;
+ break;
+ case FW_COREMEM_CODE:
+ fw_mem->core_code_alloc_size +=
+ layout_entries[entry].alloc_size;
+ break;
+ case FW_COREMEM_DATA:
+ fw_mem->core_data_alloc_size +=
+ layout_entries[entry].alloc_size;
+ break;
+ case NONE:
+ break;
+ }
+ }
+}
+
+int
+pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr,
+ void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr,
+ void **host_addr_out)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ u32 end_addr = addr + size;
+ int entry = 0;
+
+ /* Ensure requested range is not zero, and size is not causing addr to overflow. */
+ if (end_addr <= addr)
+ return -EINVAL;
+
+ for (entry = 0; entry < num_layout_entries; entry++) {
+ u32 entry_start_addr = layout_entries[entry].base_addr;
+ u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
+
+ if (addr >= entry_start_addr && addr < entry_end_addr &&
+ end_addr > entry_start_addr && end_addr <= entry_end_addr) {
+ switch (layout_entries[entry].type) {
+ case FW_CODE:
+ *host_addr_out = fw_code_ptr;
+ break;
+
+ case FW_DATA:
+ *host_addr_out = fw_data_ptr;
+ break;
+
+ case FW_COREMEM_CODE:
+ *host_addr_out = fw_core_code_ptr;
+ break;
+
+ case FW_COREMEM_DATA:
+ *host_addr_out = fw_core_data_ptr;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ /* Direct Mem write to mapped memory */
+ addr -= layout_entries[entry].base_addr;
+ addr += layout_entries[entry].alloc_offset;
+
+ /*
+ * Add offset to pointer to FW allocation only if that
+ * allocation is available
+ */
+ *(u8 **)host_addr_out += addr;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+pvr_fw_create_fwif_connection_ctl(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ fw_dev->fwif_connection_ctl =
+ pvr_fw_object_create_and_map_offset(pvr_dev,
+ fw_dev->fw_heap_info.config_offset +
+ PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET,
+ sizeof(*fw_dev->fwif_connection_ctl),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL,
+ &fw_dev->mem.fwif_connection_ctl_obj);
+ if (IS_ERR(fw_dev->fwif_connection_ctl)) {
+ drm_err(drm_dev,
+ "Unable to allocate FWIF connection control memory\n");
+ return PTR_ERR(fw_dev->fwif_connection_ctl);
+ }
+
+ return 0;
+}
+
+static void
+pvr_fw_fini_fwif_connection_ctl(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ pvr_fw_object_unmap_and_destroy(fw_dev->mem.fwif_connection_ctl_obj);
+}
+
+static void
+fw_osinit_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_osinit *fwif_osinit = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+
+ fwif_osinit->kernel_ccbctl_fw_addr = pvr_dev->kccb.ccb.ctrl_fw_addr;
+ fwif_osinit->kernel_ccb_fw_addr = pvr_dev->kccb.ccb.ccb_fw_addr;
+ pvr_fw_object_get_fw_addr(pvr_dev->kccb.rtn_obj,
+ &fwif_osinit->kernel_ccb_rtn_slots_fw_addr);
+
+ fwif_osinit->firmware_ccbctl_fw_addr = pvr_dev->fwccb.ctrl_fw_addr;
+ fwif_osinit->firmware_ccb_fw_addr = pvr_dev->fwccb.ccb_fw_addr;
+
+ fwif_osinit->work_est_firmware_ccbctl_fw_addr = 0;
+ fwif_osinit->work_est_firmware_ccb_fw_addr = 0;
+
+ pvr_fw_object_get_fw_addr(fw_mem->hwrinfobuf_obj,
+ &fwif_osinit->rogue_fwif_hwr_info_buf_ctl_fw_addr);
+ pvr_fw_object_get_fw_addr(fw_mem->osdata_obj, &fwif_osinit->fw_os_data_fw_addr);
+
+ fwif_osinit->hwr_debug_dump_limit = 0;
+
+ rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.hw_bvnc);
+ rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.fw_bvnc);
+}
+
+static void
+fw_osdata_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_osdata *fwif_osdata = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+
+ pvr_fw_object_get_fw_addr(fw_mem->power_sync_obj, &fwif_osdata->power_sync_fw_addr);
+}
+
+static void
+fw_fault_page_init(void *cpu_ptr, void *priv)
+{
+ u32 *fault_page = cpu_ptr;
+
+ for (int i = 0; i < PVR_ROGUE_FAULT_PAGE_SIZE / sizeof(*fault_page); i++)
+ fault_page[i] = 0xdeadbee0;
+}
+
+static void
+fw_sysinit_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_sysinit *fwif_sysinit = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+ dma_addr_t fault_dma_addr = 0;
+ u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk);
+
+ WARN_ON(!clock_speed_hz);
+
+ WARN_ON(pvr_fw_object_get_dma_addr(fw_mem->fault_page_obj, 0, &fault_dma_addr));
+ fwif_sysinit->fault_phys_addr = (u64)fault_dma_addr;
+
+ fwif_sysinit->pds_exec_base = ROGUE_PDSCODEDATA_HEAP_BASE;
+ fwif_sysinit->usc_exec_base = ROGUE_USCCODE_HEAP_BASE;
+
+ pvr_fw_object_get_fw_addr(fw_mem->runtime_cfg_obj, &fwif_sysinit->runtime_cfg_fw_addr);
+ pvr_fw_object_get_fw_addr(fw_dev->fw_trace.tracebuf_ctrl_obj,
+ &fwif_sysinit->trace_buf_ctl_fw_addr);
+ pvr_fw_object_get_fw_addr(fw_mem->sysdata_obj, &fwif_sysinit->fw_sys_data_fw_addr);
+ pvr_fw_object_get_fw_addr(fw_mem->gpu_util_fwcb_obj,
+ &fwif_sysinit->gpu_util_fw_cb_ctl_fw_addr);
+ if (fw_mem->core_data_obj) {
+ pvr_fw_object_get_fw_addr(fw_mem->core_data_obj,
+ &fwif_sysinit->coremem_data_store.fw_addr);
+ }
+
+ /* Currently unsupported. */
+ fwif_sysinit->counter_dump_ctl.buffer_fw_addr = 0;
+ fwif_sysinit->counter_dump_ctl.size_in_dwords = 0;
+
+ /* Skip alignment checks. */
+ fwif_sysinit->align_checks = 0;
+
+ fwif_sysinit->filter_flags = 0;
+ fwif_sysinit->hw_perf_filter = 0;
+ fwif_sysinit->firmware_perf = FW_PERF_CONF_NONE;
+ fwif_sysinit->initial_core_clock_speed = clock_speed_hz;
+ fwif_sysinit->active_pm_latency_ms = 0;
+ fwif_sysinit->gpio_validation_mode = ROGUE_FWIF_GPIO_VAL_OFF;
+ fwif_sysinit->firmware_started = false;
+ fwif_sysinit->marker_val = 1;
+
+ memset(&fwif_sysinit->bvnc_km_feature_flags, 0,
+ sizeof(fwif_sysinit->bvnc_km_feature_flags));
+}
+
+#define ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB 4
+
+static void
+fw_sysdata_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_sysdata *fwif_sysdata = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ u32 slc_size_in_kilobytes = 0;
+ u32 config_flags = 0;
+
+ WARN_ON(PVR_FEATURE_VALUE(pvr_dev, slc_size_in_kilobytes, &slc_size_in_kilobytes));
+
+ if (slc_size_in_kilobytes < ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB)
+ config_flags |= ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP;
+
+ fwif_sysdata->config_flags = config_flags;
+}
+
+static void
+fw_runtime_cfg_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_runtime_cfg *runtime_cfg = cpu_ptr;
+ struct pvr_device *pvr_dev = priv;
+ u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk);
+
+ WARN_ON(!clock_speed_hz);
+
+ runtime_cfg->core_clock_speed = clock_speed_hz;
+ runtime_cfg->active_pm_latency_ms = 0;
+ runtime_cfg->active_pm_latency_persistant = true;
+ WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
+ &runtime_cfg->default_dusts_num_init) != 0);
+}
+
+static void
+fw_gpu_util_fwcb_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_gpu_util_fwcb *gpu_util_fwcb = cpu_ptr;
+
+ gpu_util_fwcb->last_word = PVR_FWIF_GPU_UTIL_STATE_IDLE;
+}
+
+static int
+pvr_fw_create_structures(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+ int err;
+
+ fw_dev->power_sync = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->power_sync),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->power_sync_obj);
+ if (IS_ERR(fw_dev->power_sync)) {
+ drm_err(drm_dev, "Unable to allocate FW power_sync structure\n");
+ return PTR_ERR(fw_dev->power_sync);
+ }
+
+ fw_dev->hwrinfobuf = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->hwrinfobuf),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->hwrinfobuf_obj);
+ if (IS_ERR(fw_dev->hwrinfobuf)) {
+ drm_err(drm_dev,
+ "Unable to allocate FW hwrinfobuf structure\n");
+ err = PTR_ERR(fw_dev->hwrinfobuf);
+ goto err_release_power_sync;
+ }
+
+ err = pvr_fw_object_create(pvr_dev, PVR_SYNC_OBJ_SIZE,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->mmucache_sync_obj);
+ if (err) {
+ drm_err(drm_dev,
+ "Unable to allocate MMU cache sync object\n");
+ goto err_release_hwrinfobuf;
+ }
+
+ fw_dev->fwif_sysdata = pvr_fw_object_create_and_map(pvr_dev,
+ sizeof(*fw_dev->fwif_sysdata),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_sysdata_init, pvr_dev,
+ &fw_mem->sysdata_obj);
+ if (IS_ERR(fw_dev->fwif_sysdata)) {
+ drm_err(drm_dev, "Unable to allocate FW SYSDATA structure\n");
+ err = PTR_ERR(fw_dev->fwif_sysdata);
+ goto err_release_mmucache_sync_obj;
+ }
+
+ err = pvr_fw_object_create(pvr_dev, PVR_ROGUE_FAULT_PAGE_SIZE,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_fault_page_init, NULL, &fw_mem->fault_page_obj);
+ if (err) {
+ drm_err(drm_dev, "Unable to allocate FW fault page\n");
+ goto err_release_sysdata;
+ }
+
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_gpu_util_fwcb),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_gpu_util_fwcb_init, pvr_dev, &fw_mem->gpu_util_fwcb_obj);
+ if (err) {
+ drm_err(drm_dev, "Unable to allocate GPU util FWCB\n");
+ goto err_release_fault_page;
+ }
+
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_runtime_cfg),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_runtime_cfg_init, pvr_dev, &fw_mem->runtime_cfg_obj);
+ if (err) {
+ drm_err(drm_dev, "Unable to allocate FW runtime config\n");
+ goto err_release_gpu_util_fwcb;
+ }
+
+ err = pvr_fw_trace_init(pvr_dev);
+ if (err)
+ goto err_release_runtime_cfg;
+
+ fw_dev->fwif_osdata = pvr_fw_object_create_and_map(pvr_dev,
+ sizeof(*fw_dev->fwif_osdata),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_osdata_init, pvr_dev,
+ &fw_mem->osdata_obj);
+ if (IS_ERR(fw_dev->fwif_osdata)) {
+ drm_err(drm_dev, "Unable to allocate FW OSDATA structure\n");
+ err = PTR_ERR(fw_dev->fwif_osdata);
+ goto err_fw_trace_fini;
+ }
+
+ fw_dev->fwif_osinit =
+ pvr_fw_object_create_and_map_offset(pvr_dev,
+ fw_dev->fw_heap_info.config_offset +
+ PVR_ROGUE_FWIF_OSINIT_OFFSET,
+ sizeof(*fw_dev->fwif_osinit),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_osinit_init, pvr_dev, &fw_mem->osinit_obj);
+ if (IS_ERR(fw_dev->fwif_osinit)) {
+ drm_err(drm_dev, "Unable to allocate FW OSINIT structure\n");
+ err = PTR_ERR(fw_dev->fwif_osinit);
+ goto err_release_osdata;
+ }
+
+ fw_dev->fwif_sysinit =
+ pvr_fw_object_create_and_map_offset(pvr_dev,
+ fw_dev->fw_heap_info.config_offset +
+ PVR_ROGUE_FWIF_SYSINIT_OFFSET,
+ sizeof(*fw_dev->fwif_sysinit),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_sysinit_init, pvr_dev, &fw_mem->sysinit_obj);
+ if (IS_ERR(fw_dev->fwif_sysinit)) {
+ drm_err(drm_dev, "Unable to allocate FW SYSINIT structure\n");
+ err = PTR_ERR(fw_dev->fwif_sysinit);
+ goto err_release_osinit;
+ }
+
+ return 0;
+
+err_release_osinit:
+ pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj);
+
+err_release_osdata:
+ pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj);
+
+err_fw_trace_fini:
+ pvr_fw_trace_fini(pvr_dev);
+
+err_release_runtime_cfg:
+ pvr_fw_object_destroy(fw_mem->runtime_cfg_obj);
+
+err_release_gpu_util_fwcb:
+ pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj);
+
+err_release_fault_page:
+ pvr_fw_object_destroy(fw_mem->fault_page_obj);
+
+err_release_sysdata:
+ pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj);
+
+err_release_mmucache_sync_obj:
+ pvr_fw_object_destroy(fw_mem->mmucache_sync_obj);
+
+err_release_hwrinfobuf:
+ pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj);
+
+err_release_power_sync:
+ pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj);
+
+ return err;
+}
+
+static void
+pvr_fw_destroy_structures(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+
+ pvr_fw_trace_fini(pvr_dev);
+ pvr_fw_object_destroy(fw_mem->runtime_cfg_obj);
+ pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj);
+ pvr_fw_object_destroy(fw_mem->fault_page_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->sysinit_obj);
+
+ pvr_fw_object_destroy(fw_mem->mmucache_sync_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj);
+ pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj);
+}
+
+/**
+ * pvr_fw_process() - Process firmware image, allocate FW memory and create boot
+ * arguments
+ * @pvr_dev: Device pointer.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_fw_object_create_and_map_offset(), or
+ * * Any error returned by pvr_fw_object_create_and_map().
+ */
+static int
+pvr_fw_process(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+ const u8 *fw = pvr_dev->fw_dev.firmware->data;
+ const struct pvr_fw_layout_entry *private_data;
+ u8 *fw_code_ptr;
+ u8 *fw_data_ptr;
+ u8 *fw_core_code_ptr;
+ u8 *fw_core_data_ptr;
+ int err;
+
+ layout_get_sizes(pvr_dev);
+
+ private_data = pvr_fw_find_private_data(pvr_dev);
+ if (!private_data)
+ return -EINVAL;
+
+ /* Allocate and map memory for firmware sections. */
+
+ /*
+ * Code allocation must be at the start of the firmware heap, otherwise
+ * firmware processor will be unable to boot.
+ *
+ * This has the useful side-effect that for every other object in the
+ * driver, a firmware address of 0 is invalid.
+ */
+ fw_code_ptr = pvr_fw_object_create_and_map_offset(pvr_dev, 0, fw_mem->code_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->code_obj);
+ if (IS_ERR(fw_code_ptr)) {
+ drm_err(drm_dev, "Unable to allocate FW code memory\n");
+ return PTR_ERR(fw_code_ptr);
+ }
+
+ if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) {
+ u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
+
+ fw_data_ptr =
+ pvr_fw_object_create_and_map_offset(pvr_dev, base_addr,
+ fw_mem->data_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->data_obj);
+ } else {
+ fw_data_ptr = pvr_fw_object_create_and_map(pvr_dev, fw_mem->data_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->data_obj);
+ }
+ if (IS_ERR(fw_data_ptr)) {
+ drm_err(drm_dev, "Unable to allocate FW data memory\n");
+ err = PTR_ERR(fw_data_ptr);
+ goto err_free_fw_code_obj;
+ }
+
+ /* Core code and data sections are optional. */
+ if (fw_mem->core_code_alloc_size) {
+ fw_core_code_ptr =
+ pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_code_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->core_code_obj);
+ if (IS_ERR(fw_core_code_ptr)) {
+ drm_err(drm_dev,
+ "Unable to allocate FW core code memory\n");
+ err = PTR_ERR(fw_core_code_ptr);
+ goto err_free_fw_data_obj;
+ }
+ } else {
+ fw_core_code_ptr = NULL;
+ }
+
+ if (fw_mem->core_data_alloc_size) {
+ fw_core_data_ptr =
+ pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_data_alloc_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &fw_mem->core_data_obj);
+ if (IS_ERR(fw_core_data_ptr)) {
+ drm_err(drm_dev,
+ "Unable to allocate FW core data memory\n");
+ err = PTR_ERR(fw_core_data_ptr);
+ goto err_free_fw_core_code_obj;
+ }
+ } else {
+ fw_core_data_ptr = NULL;
+ }
+
+ fw_mem->code = kzalloc(fw_mem->code_alloc_size, GFP_KERNEL);
+ fw_mem->data = kzalloc(fw_mem->data_alloc_size, GFP_KERNEL);
+ if (fw_mem->core_code_alloc_size)
+ fw_mem->core_code = kzalloc(fw_mem->core_code_alloc_size, GFP_KERNEL);
+ if (fw_mem->core_data_alloc_size)
+ fw_mem->core_data = kzalloc(fw_mem->core_data_alloc_size, GFP_KERNEL);
+
+ if (!fw_mem->code || !fw_mem->data ||
+ (!fw_mem->core_code && fw_mem->core_code_alloc_size) ||
+ (!fw_mem->core_data && fw_mem->core_data_alloc_size)) {
+ err = -ENOMEM;
+ goto err_free_kdata;
+ }
+
+ err = pvr_dev->fw_dev.defs->fw_process(pvr_dev, fw,
+ fw_mem->code, fw_mem->data, fw_mem->core_code,
+ fw_mem->core_data, fw_mem->core_code_alloc_size);
+
+ if (err)
+ goto err_free_fw_core_data_obj;
+
+ memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size);
+ memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size);
+ if (fw_mem->core_code)
+ memcpy(fw_core_code_ptr, fw_mem->core_code, fw_mem->core_code_alloc_size);
+ if (fw_mem->core_data)
+ memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size);
+
+ /* We're finished with the firmware section memory on the CPU, unmap. */
+ if (fw_core_data_ptr)
+ pvr_fw_object_vunmap(fw_mem->core_data_obj);
+ if (fw_core_code_ptr)
+ pvr_fw_object_vunmap(fw_mem->core_code_obj);
+ pvr_fw_object_vunmap(fw_mem->data_obj);
+ fw_data_ptr = NULL;
+ pvr_fw_object_vunmap(fw_mem->code_obj);
+ fw_code_ptr = NULL;
+
+ err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
+ if (err)
+ goto err_free_fw_core_data_obj;
+
+ return 0;
+
+err_free_kdata:
+ kfree(fw_mem->core_data);
+ kfree(fw_mem->core_code);
+ kfree(fw_mem->data);
+ kfree(fw_mem->code);
+
+err_free_fw_core_data_obj:
+ if (fw_core_data_ptr)
+ pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj);
+
+err_free_fw_core_code_obj:
+ if (fw_core_code_ptr)
+ pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj);
+
+err_free_fw_data_obj:
+ if (fw_data_ptr)
+ pvr_fw_object_vunmap(fw_mem->data_obj);
+ pvr_fw_object_destroy(fw_mem->data_obj);
+
+err_free_fw_code_obj:
+ if (fw_code_ptr)
+ pvr_fw_object_vunmap(fw_mem->code_obj);
+ pvr_fw_object_destroy(fw_mem->code_obj);
+
+ return err;
+}
+
+static int
+pvr_copy_to_fw(struct pvr_fw_object *dest_obj, u8 *src_ptr, u32 size)
+{
+ u8 *dest_ptr = pvr_fw_object_vmap(dest_obj);
+
+ if (IS_ERR(dest_ptr))
+ return PTR_ERR(dest_ptr);
+
+ memcpy(dest_ptr, src_ptr, size);
+
+ pvr_fw_object_vunmap(dest_obj);
+
+ return 0;
+}
+
+static int
+pvr_fw_reinit_code_data(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+ int err;
+
+ err = pvr_copy_to_fw(fw_mem->code_obj, fw_mem->code, fw_mem->code_alloc_size);
+ if (err)
+ return err;
+
+ err = pvr_copy_to_fw(fw_mem->data_obj, fw_mem->data, fw_mem->data_alloc_size);
+ if (err)
+ return err;
+
+ if (fw_mem->core_code) {
+ err = pvr_copy_to_fw(fw_mem->core_code_obj, fw_mem->core_code,
+ fw_mem->core_code_alloc_size);
+ if (err)
+ return err;
+ }
+
+ if (fw_mem->core_data) {
+ err = pvr_copy_to_fw(fw_mem->core_data_obj, fw_mem->core_data,
+ fw_mem->core_data_alloc_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+pvr_fw_cleanup(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+
+ pvr_fw_fini_fwif_connection_ctl(pvr_dev);
+ if (fw_mem->core_code_obj)
+ pvr_fw_object_destroy(fw_mem->core_code_obj);
+ if (fw_mem->core_data_obj)
+ pvr_fw_object_destroy(fw_mem->core_data_obj);
+ pvr_fw_object_destroy(fw_mem->code_obj);
+ pvr_fw_object_destroy(fw_mem->data_obj);
+}
+
+/**
+ * pvr_wait_for_fw_boot() - Wait for firmware to finish booting
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%ETIMEDOUT if firmware fails to boot within timeout.
+ */
+int
+pvr_wait_for_fw_boot(struct pvr_device *pvr_dev)
+{
+ ktime_t deadline = ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ while (ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) {
+ if (READ_ONCE(fw_dev->fwif_sysinit->firmware_started))
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * pvr_fw_heap_info_init() - Calculate size and masks for FW heap
+ * @pvr_dev: Target PowerVR device.
+ * @log2_size: Log2 of raw heap size.
+ * @reserved_size: Size of reserved area of heap, in bytes. May be zero.
+ */
+void
+pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ fw_dev->fw_heap_info.gpu_addr = PVR_ROGUE_FW_MAIN_HEAP_BASE;
+ fw_dev->fw_heap_info.log2_size = log2_size;
+ fw_dev->fw_heap_info.reserved_size = reserved_size;
+ fw_dev->fw_heap_info.raw_size = 1 << fw_dev->fw_heap_info.log2_size;
+ fw_dev->fw_heap_info.offset_mask = fw_dev->fw_heap_info.raw_size - 1;
+ fw_dev->fw_heap_info.config_offset = fw_dev->fw_heap_info.raw_size -
+ PVR_ROGUE_FW_CONFIG_HEAP_SIZE;
+ fw_dev->fw_heap_info.size = fw_dev->fw_heap_info.raw_size -
+ (PVR_ROGUE_FW_CONFIG_HEAP_SIZE + reserved_size);
+}
+
+/**
+ * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function must be called before querying device information.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if firmware validation fails.
+ */
+int
+pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = pvr_fw_validate(pvr_dev);
+ if (err)
+ return err;
+
+ return pvr_fw_get_device_info(pvr_dev);
+}
+
+/**
+ * pvr_fw_init() - Initialise and boot firmware
+ * @pvr_dev: Target PowerVR device
+ *
+ * On successful completion of the function the PowerVR device will be
+ * initialised and ready to use.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%EINVAL on invalid firmware image,
+ * * -%ENOMEM on out of memory, or
+ * * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout.
+ */
+int
+pvr_fw_init(struct pvr_device *pvr_dev)
+{
+ u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
+ u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ int err;
+
+ if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META)
+ fw_dev->defs = &pvr_fw_defs_meta;
+ else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS)
+ fw_dev->defs = &pvr_fw_defs_mips;
+ else
+ return -EINVAL;
+
+ err = fw_dev->defs->init(pvr_dev);
+ if (err)
+ return err;
+
+ drm_mm_init(&fw_dev->fw_mm, ROGUE_FW_HEAP_BASE, fw_dev->fw_heap_info.raw_size);
+ fw_dev->fw_mm_base = ROGUE_FW_HEAP_BASE;
+ spin_lock_init(&fw_dev->fw_mm_lock);
+
+ INIT_LIST_HEAD(&fw_dev->fw_objs.list);
+ err = drmm_mutex_init(from_pvr_device(pvr_dev), &fw_dev->fw_objs.lock);
+ if (err)
+ goto err_mm_takedown;
+
+ err = pvr_fw_process(pvr_dev);
+ if (err)
+ goto err_mm_takedown;
+
+ /* Initialise KCCB and FWCCB. */
+ err = pvr_kccb_init(pvr_dev);
+ if (err)
+ goto err_fw_cleanup;
+
+ err = pvr_fwccb_init(pvr_dev);
+ if (err)
+ goto err_kccb_fini;
+
+ /* Allocate memory for KCCB return slots. */
+ pvr_dev->kccb.rtn = pvr_fw_object_create_and_map(pvr_dev, kccb_rtn_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &pvr_dev->kccb.rtn_obj);
+ if (IS_ERR(pvr_dev->kccb.rtn)) {
+ err = PTR_ERR(pvr_dev->kccb.rtn);
+ goto err_fwccb_fini;
+ }
+
+ err = pvr_fw_create_structures(pvr_dev);
+ if (err)
+ goto err_kccb_rtn_release;
+
+ err = pvr_fw_start(pvr_dev);
+ if (err)
+ goto err_destroy_structures;
+
+ err = pvr_wait_for_fw_boot(pvr_dev);
+ if (err) {
+ drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
+ goto err_fw_stop;
+ }
+
+ fw_dev->booted = true;
+
+ return 0;
+
+err_fw_stop:
+ pvr_fw_stop(pvr_dev);
+
+err_destroy_structures:
+ pvr_fw_destroy_structures(pvr_dev);
+
+err_kccb_rtn_release:
+ pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj);
+
+err_fwccb_fini:
+ pvr_ccb_fini(&pvr_dev->fwccb);
+
+err_kccb_fini:
+ pvr_kccb_fini(pvr_dev);
+
+err_fw_cleanup:
+ pvr_fw_cleanup(pvr_dev);
+
+err_mm_takedown:
+ drm_mm_takedown(&fw_dev->fw_mm);
+
+ if (fw_dev->defs->fini)
+ fw_dev->defs->fini(pvr_dev);
+
+ return err;
+}
+
+/**
+ * pvr_fw_fini() - Shutdown firmware processor and free associated memory
+ * @pvr_dev: Target PowerVR device
+ */
+void
+pvr_fw_fini(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ fw_dev->booted = false;
+
+ pvr_fw_destroy_structures(pvr_dev);
+ pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj);
+
+ /*
+ * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has
+ * been unregistered at this point so no new work should be being submitted.
+ */
+ pvr_ccb_fini(&pvr_dev->fwccb);
+ pvr_kccb_fini(pvr_dev);
+ pvr_fw_cleanup(pvr_dev);
+
+ mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
+ WARN_ON(!list_empty(&pvr_dev->fw_dev.fw_objs.list));
+ mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ drm_mm_takedown(&fw_dev->fw_mm);
+
+ if (fw_dev->defs->fini)
+ fw_dev->defs->fini(pvr_dev);
+}
+
+/**
+ * pvr_fw_mts_schedule() - Schedule work via an MTS kick
+ * @pvr_dev: Target PowerVR device
+ * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_*
+ */
+void
+pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val)
+{
+ /* Ensure memory is flushed before kicking MTS. */
+ wmb();
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_SCHEDULE, val);
+
+ /* Ensure the MTS kick goes through before continuing. */
+ mb();
+}
+
+/**
+ * pvr_fw_structure_cleanup() - Send FW cleanup request for an object
+ * @pvr_dev: Target PowerVR device.
+ * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type.
+ * @fw_obj: Pointer to FW object containing object to cleanup.
+ * @offset: Offset within FW object of object to cleanup.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -EBUSY if object is busy,
+ * * -ETIMEDOUT on timeout, or
+ * * -EIO if device is lost.
+ */
+int
+pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
+ u32 offset)
+{
+ struct rogue_fwif_kccb_cmd cmd;
+ int slot_nr;
+ int idx;
+ int err;
+ u32 rtn;
+
+ struct rogue_fwif_cleanup_request *cleanup_req = &cmd.cmd_data.cleanup_data;
+
+ down_read(&pvr_dev->reset_sem);
+
+ if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
+ err = -EIO;
+ goto err_up_read;
+ }
+
+ cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_CLEANUP;
+ cmd.kccb_flags = 0;
+ cleanup_req->cleanup_type = type;
+
+ switch (type) {
+ case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT:
+ pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
+ &cleanup_req->cleanup_data.context_fw_addr);
+ break;
+ case ROGUE_FWIF_CLEANUP_HWRTDATA:
+ pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
+ &cleanup_req->cleanup_data.hwrt_data_fw_addr);
+ break;
+ case ROGUE_FWIF_CLEANUP_FREELIST:
+ pvr_fw_object_get_fw_addr_offset(fw_obj, offset,
+ &cleanup_req->cleanup_data.freelist_fw_addr);
+ break;
+ default:
+ err = -EINVAL;
+ goto err_drm_dev_exit;
+ }
+
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot_nr);
+ if (err)
+ goto err_drm_dev_exit;
+
+ err = pvr_kccb_wait_for_completion(pvr_dev, slot_nr, HZ, &rtn);
+ if (err)
+ goto err_drm_dev_exit;
+
+ if (rtn & ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)
+ err = -EBUSY;
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+err_up_read:
+ up_read(&pvr_dev->reset_sem);
+
+ return err;
+}
+
+/**
+ * pvr_fw_object_fw_map() - Map a FW object in firmware address space
+ * @pvr_dev: Device pointer.
+ * @fw_obj: FW object to map.
+ * @dev_addr: Desired address in device space, if a specific address is
+ * required. 0 otherwise.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if @fw_obj is already mapped but has no references, or
+ * * Any error returned by DRM.
+ */
+static int
+pvr_fw_object_fw_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj, u64 dev_addr)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ int err;
+
+ spin_lock(&fw_dev->fw_mm_lock);
+
+ if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (!dev_addr) {
+ /*
+ * Allocate from the main heap only (firmware heap minus
+ * config space).
+ */
+ err = drm_mm_insert_node_in_range(&fw_dev->fw_mm, &fw_obj->fw_mm_node,
+ gem_obj->size, 0, 0,
+ fw_dev->fw_heap_info.gpu_addr,
+ fw_dev->fw_heap_info.gpu_addr +
+ fw_dev->fw_heap_info.size, 0);
+ if (err)
+ goto err_unlock;
+ } else {
+ fw_obj->fw_mm_node.start = dev_addr;
+ fw_obj->fw_mm_node.size = gem_obj->size;
+ err = drm_mm_reserve_node(&fw_dev->fw_mm, &fw_obj->fw_mm_node);
+ if (err)
+ goto err_unlock;
+ }
+
+ spin_unlock(&fw_dev->fw_mm_lock);
+
+ /* Map object on GPU. */
+ err = fw_dev->defs->vm_map(pvr_dev, fw_obj);
+ if (err)
+ goto err_remove_node;
+
+ fw_obj->fw_addr_offset = (u32)(fw_obj->fw_mm_node.start - fw_dev->fw_mm_base);
+
+ return 0;
+
+err_remove_node:
+ spin_lock(&fw_dev->fw_mm_lock);
+ drm_mm_remove_node(&fw_obj->fw_mm_node);
+
+err_unlock:
+ spin_unlock(&fw_dev->fw_mm_lock);
+
+ return err;
+}
+
+/**
+ * pvr_fw_object_fw_unmap() - Unmap a previously mapped FW object
+ * @fw_obj: FW object to unmap.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if object is not currently mapped.
+ */
+static int
+pvr_fw_object_fw_unmap(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
+ struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ fw_dev->defs->vm_unmap(pvr_dev, fw_obj);
+
+ spin_lock(&fw_dev->fw_mm_lock);
+
+ if (!drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
+ spin_unlock(&fw_dev->fw_mm_lock);
+ return -EINVAL;
+ }
+
+ drm_mm_remove_node(&fw_obj->fw_mm_node);
+
+ spin_unlock(&fw_dev->fw_mm_lock);
+
+ return 0;
+}
+
+static void *
+pvr_fw_object_create_and_map_common(struct pvr_device *pvr_dev, size_t size,
+ u64 flags, u64 dev_addr,
+ void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **fw_obj_out)
+{
+ struct pvr_fw_object *fw_obj;
+ void *cpu_ptr;
+ int err;
+
+ /* %DRM_PVR_BO_PM_FW_PROTECT is implicit for FW objects. */
+ flags |= DRM_PVR_BO_PM_FW_PROTECT;
+
+ fw_obj = kzalloc(sizeof(*fw_obj), GFP_KERNEL);
+ if (!fw_obj)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fw_obj->node);
+ fw_obj->init = init;
+ fw_obj->init_priv = init_priv;
+
+ fw_obj->gem = pvr_gem_object_create(pvr_dev, size, flags);
+ if (IS_ERR(fw_obj->gem)) {
+ err = PTR_ERR(fw_obj->gem);
+ fw_obj->gem = NULL;
+ goto err_put_object;
+ }
+
+ err = pvr_fw_object_fw_map(pvr_dev, fw_obj, dev_addr);
+ if (err)
+ goto err_put_object;
+
+ cpu_ptr = pvr_fw_object_vmap(fw_obj);
+ if (IS_ERR(cpu_ptr)) {
+ err = PTR_ERR(cpu_ptr);
+ goto err_put_object;
+ }
+
+ *fw_obj_out = fw_obj;
+
+ if (fw_obj->init)
+ fw_obj->init(cpu_ptr, fw_obj->init_priv);
+
+ mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
+ list_add_tail(&fw_obj->node, &pvr_dev->fw_dev.fw_objs.list);
+ mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ return cpu_ptr;
+
+err_put_object:
+ pvr_fw_object_destroy(fw_obj);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_fw_object_create() - Create a FW object and map to firmware
+ * @pvr_dev: PowerVR device pointer.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @init: Initialisation callback.
+ * @init_priv: Private pointer to pass to initialisation callback.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_fw_object_create_common().
+ */
+int
+pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv), void *init_priv,
+ struct pvr_fw_object **fw_obj_out)
+{
+ void *cpu_ptr;
+
+ cpu_ptr = pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
+ fw_obj_out);
+ if (IS_ERR(cpu_ptr))
+ return PTR_ERR(cpu_ptr);
+
+ pvr_fw_object_vunmap(*fw_obj_out);
+
+ return 0;
+}
+
+/**
+ * pvr_fw_object_create_and_map() - Create a FW object and map to firmware and CPU
+ * @pvr_dev: PowerVR device pointer.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @init: Initialisation callback.
+ * @init_priv: Private pointer to pass to initialisation callback.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
+ * mapping.
+ *
+ * Returns:
+ * * Pointer to CPU mapping of newly created object, or
+ * * Any error returned by pvr_fw_object_create(), or
+ * * Any error returned by pvr_fw_object_vmap().
+ */
+void *
+pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **fw_obj_out)
+{
+ return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv,
+ fw_obj_out);
+}
+
+/**
+ * pvr_fw_object_create_and_map_offset() - Create a FW object and map to
+ * firmware at the provided offset and to the CPU.
+ * @pvr_dev: PowerVR device pointer.
+ * @dev_offset: Base address of desired FW mapping, offset from start of FW heap.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @init: Initialisation callback.
+ * @init_priv: Private pointer to pass to initialisation callback.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU
+ * mapping.
+ *
+ * Returns:
+ * * Pointer to CPU mapping of newly created object, or
+ * * Any error returned by pvr_fw_object_create(), or
+ * * Any error returned by pvr_fw_object_vmap().
+ */
+void *
+pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev,
+ u32 dev_offset, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **fw_obj_out)
+{
+ u64 dev_addr = pvr_dev->fw_dev.fw_mm_base + dev_offset;
+
+ return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, dev_addr, init, init_priv,
+ fw_obj_out);
+}
+
+/**
+ * pvr_fw_object_destroy() - Destroy a pvr_fw_object
+ * @fw_obj: Pointer to object to destroy.
+ */
+void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
+ struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
+
+ mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
+ list_del(&fw_obj->node);
+ mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
+ /* If we can't unmap, leak the memory. */
+ if (WARN_ON(pvr_fw_object_fw_unmap(fw_obj)))
+ return;
+ }
+
+ if (fw_obj->gem)
+ pvr_gem_object_put(fw_obj->gem);
+
+ kfree(fw_obj);
+}
+
+/**
+ * pvr_fw_object_get_fw_addr_offset() - Return address of object in firmware address space, with
+ * given offset.
+ * @fw_obj: Pointer to object.
+ * @offset: Desired offset from start of object.
+ * @fw_addr_out: Location to store address to.
+ */
+void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(pvr_obj)->dev);
+
+ *fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset);
+}
+
+/*
+ * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW
+ * structures
+ * @pvr_dev: Device pointer
+ *
+ * If this function returns an error then the caller must regard the device as lost.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_fw_init_dev_structures() or pvr_fw_reset_all().
+ */
+int
+pvr_fw_hard_reset(struct pvr_device *pvr_dev)
+{
+ struct list_head *pos;
+ int err;
+
+ /* Reset all FW objects */
+ mutex_lock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ list_for_each(pos, &pvr_dev->fw_dev.fw_objs.list) {
+ struct pvr_fw_object *fw_obj = container_of(pos, struct pvr_fw_object, node);
+ void *cpu_ptr = pvr_fw_object_vmap(fw_obj);
+
+ WARN_ON(IS_ERR(cpu_ptr));
+
+ if (!(fw_obj->gem->flags & PVR_BO_FW_NO_CLEAR_ON_RESET)) {
+ memset(cpu_ptr, 0, pvr_gem_object_size(fw_obj->gem));
+
+ if (fw_obj->init)
+ fw_obj->init(cpu_ptr, fw_obj->init_priv);
+ }
+
+ pvr_fw_object_vunmap(fw_obj);
+ }
+
+ mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock);
+
+ err = pvr_fw_reinit_code_data(pvr_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
new file mode 100644
index 000000000..b7966bd57
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -0,0 +1,509 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_H
+#define PVR_FW_H
+
+#include "pvr_fw_info.h"
+#include "pvr_fw_trace.h"
+#include "pvr_gem.h"
+
+#include <drm/drm_mm.h>
+
+#include <linux/types.h>
+
+/* Forward declarations from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declaration from "pvr_vm.h". */
+struct pvr_vm_context;
+
+#define ROGUE_FWIF_FWCCB_NUMCMDS_LOG2 5
+
+#define ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT 7
+
+/**
+ * struct pvr_fw_object - container for firmware memory allocations
+ */
+struct pvr_fw_object {
+ /** @ref_count: FW object reference counter. */
+ struct kref ref_count;
+
+ /** @gem: GEM object backing the FW object. */
+ struct pvr_gem_object *gem;
+
+ /**
+ * @fw_mm_node: Node representing mapping in FW address space. @pvr_obj->lock must
+ * be held when writing.
+ */
+ struct drm_mm_node fw_mm_node;
+
+ /**
+ * @fw_addr_offset: Virtual address offset of firmware mapping. Only
+ * valid if @flags has %PVR_GEM_OBJECT_FLAGS_FW_MAPPED
+ * set.
+ */
+ u32 fw_addr_offset;
+
+ /**
+ * @init: Initialisation callback. Will be called on object creation and FW hard reset.
+ * Object will have been zeroed before this is called.
+ */
+ void (*init)(void *cpu_ptr, void *priv);
+
+ /** @init_priv: Private data for initialisation callback. */
+ void *init_priv;
+
+ /** @node: Node for firmware object list. */
+ struct list_head node;
+};
+
+/**
+ * struct pvr_fw_defs - FW processor function table and static definitions
+ */
+struct pvr_fw_defs {
+ /**
+ * @init:
+ *
+ * FW processor specific initialisation.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function must call pvr_fw_heap_calculate() to initialise the firmware heap for this
+ * FW processor.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any appropriate error on failure.
+ */
+ int (*init)(struct pvr_device *pvr_dev);
+
+ /**
+ * @fini:
+ *
+ * FW processor specific finalisation.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function is optional.
+ */
+ void (*fini)(struct pvr_device *pvr_dev);
+
+ /**
+ * @fw_process:
+ *
+ * Load and process firmware image.
+ * @pvr_dev: Target PowerVR device.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to firmware code section.
+ * @fw_data_ptr: Pointer to firmware data section.
+ * @fw_core_code_ptr: Pointer to firmware core code section. May be %NULL.
+ * @fw_core_data_ptr: Pointer to firmware core data section. May be %NULL.
+ * @core_code_alloc_size: Total allocation size of core code section.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any appropriate error on failure.
+ */
+ int (*fw_process)(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr,
+ u8 *fw_core_data_ptr, u32 core_code_alloc_size);
+
+ /**
+ * @vm_map:
+ *
+ * Map FW object into FW processor address space.
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to map.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any appropriate error on failure.
+ */
+ int (*vm_map)(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+ /**
+ * @vm_unmap:
+ *
+ * Unmap FW object from FW processor address space.
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to map.
+ *
+ * This function is mandatory.
+ */
+ void (*vm_unmap)(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+ /**
+ * @get_fw_addr_with_offset:
+ *
+ * Called to get address of object in firmware address space, with offset.
+ * @fw_obj: Pointer to object.
+ * @offset: Desired offset from start of object.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * Address in firmware address space.
+ */
+ u32 (*get_fw_addr_with_offset)(struct pvr_fw_object *fw_obj, u32 offset);
+
+ /**
+ * @wrapper_init:
+ *
+ * Called to initialise FW wrapper.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * 0 on success.
+ * * Any appropriate error on failure.
+ */
+ int (*wrapper_init)(struct pvr_device *pvr_dev);
+
+ /**
+ * @has_fixed_data_addr:
+ *
+ * Called to check if firmware fixed data must be loaded at the address given by the
+ * firmware layout table.
+ *
+ * This function is mandatory.
+ *
+ * Returns:
+ * * %true if firmware fixed data must be loaded at the address given by the firmware
+ * layout table.
+ * * %false otherwise.
+ */
+ bool (*has_fixed_data_addr)(void);
+
+ /**
+ * @irq: FW Interrupt information.
+ *
+ * Those are processor dependent, and should be initialized by the
+ * processor backend in pvr_fw_funcs::init().
+ */
+ struct {
+ /** @enable_reg: FW interrupt enable register. */
+ u32 enable_reg;
+
+ /** @status_reg: FW interrupt status register. */
+ u32 status_reg;
+
+ /**
+ * @clear_reg: FW interrupt clear register.
+ *
+ * If @status_reg == @clear_reg, we clear by write a bit to zero,
+ * otherwise we clear by writing a bit to one.
+ */
+ u32 clear_reg;
+
+ /** @event_mask: Bitmask of events to listen for. */
+ u32 event_mask;
+
+ /** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */
+ u32 clear_mask;
+ } irq;
+};
+
+/**
+ * struct pvr_fw_mem - FW memory allocations
+ */
+struct pvr_fw_mem {
+ /** @code_obj: Object representing firmware code. */
+ struct pvr_fw_object *code_obj;
+
+ /** @data_obj: Object representing firmware data. */
+ struct pvr_fw_object *data_obj;
+
+ /**
+ * @core_code_obj: Object representing firmware core code. May be
+ * %NULL if firmware does not contain this section.
+ */
+ struct pvr_fw_object *core_code_obj;
+
+ /**
+ * @core_data_obj: Object representing firmware core data. May be
+ * %NULL if firmware does not contain this section.
+ */
+ struct pvr_fw_object *core_data_obj;
+
+ /** @code: Driver-side copy of firmware code. */
+ u8 *code;
+
+ /** @data: Driver-side copy of firmware data. */
+ u8 *data;
+
+ /**
+ * @core_code: Driver-side copy of firmware core code. May be %NULL if firmware does not
+ * contain this section.
+ */
+ u8 *core_code;
+
+ /**
+ * @core_data: Driver-side copy of firmware core data. May be %NULL if firmware does not
+ * contain this section.
+ */
+ u8 *core_data;
+
+ /** @code_alloc_size: Allocation size of firmware code section. */
+ u32 code_alloc_size;
+
+ /** @data_alloc_size: Allocation size of firmware data section. */
+ u32 data_alloc_size;
+
+ /** @core_code_alloc_size: Allocation size of firmware core code section. */
+ u32 core_code_alloc_size;
+
+ /** @core_data_alloc_size: Allocation size of firmware core data section. */
+ u32 core_data_alloc_size;
+
+ /**
+ * @fwif_connection_ctl_obj: Object representing FWIF connection control
+ * structure.
+ */
+ struct pvr_fw_object *fwif_connection_ctl_obj;
+
+ /** @osinit_obj: Object representing FW OSINIT structure. */
+ struct pvr_fw_object *osinit_obj;
+
+ /** @sysinit_obj: Object representing FW SYSINIT structure. */
+ struct pvr_fw_object *sysinit_obj;
+
+ /** @osdata_obj: Object representing FW OSDATA structure. */
+ struct pvr_fw_object *osdata_obj;
+
+ /** @hwrinfobuf_obj: Object representing FW hwrinfobuf structure. */
+ struct pvr_fw_object *hwrinfobuf_obj;
+
+ /** @sysdata_obj: Object representing FW SYSDATA structure. */
+ struct pvr_fw_object *sysdata_obj;
+
+ /** @power_sync_obj: Object representing power sync state. */
+ struct pvr_fw_object *power_sync_obj;
+
+ /** @fault_page_obj: Object representing FW fault page. */
+ struct pvr_fw_object *fault_page_obj;
+
+ /** @gpu_util_fwcb_obj: Object representing FW GPU utilisation control structure. */
+ struct pvr_fw_object *gpu_util_fwcb_obj;
+
+ /** @runtime_cfg_obj: Object representing FW runtime config structure. */
+ struct pvr_fw_object *runtime_cfg_obj;
+
+ /** @mmucache_sync_obj: Object used as the sync parameter in an MMU cache operation. */
+ struct pvr_fw_object *mmucache_sync_obj;
+};
+
+struct pvr_fw_device {
+ /** @firmware: Handle to the firmware loaded into the device. */
+ const struct firmware *firmware;
+
+ /** @header: Pointer to firmware header. */
+ const struct pvr_fw_info_header *header;
+
+ /** @layout_entries: Pointer to firmware layout. */
+ const struct pvr_fw_layout_entry *layout_entries;
+
+ /** @mem: Structure containing objects representing firmware memory allocations. */
+ struct pvr_fw_mem mem;
+
+ /** @booted: %true if the firmware has been booted, %false otherwise. */
+ bool booted;
+
+ /**
+ * @processor_type: FW processor type for this device. Must be one of
+ * %PVR_FW_PROCESSOR_TYPE_*.
+ */
+ u16 processor_type;
+
+ /** @funcs: Function table for the FW processor used by this device. */
+ const struct pvr_fw_defs *defs;
+
+ /** @processor_data: Pointer to data specific to FW processor. */
+ union {
+ /** @mips_data: Pointer to MIPS-specific data. */
+ struct pvr_fw_mips_data *mips_data;
+ } processor_data;
+
+ /** @fw_heap_info: Firmware heap information. */
+ struct {
+ /** @gpu_addr: Base address of firmware heap in GPU address space. */
+ u64 gpu_addr;
+
+ /** @size: Size of main area of heap. */
+ u32 size;
+
+ /** @offset_mask: Mask for offsets within FW heap. */
+ u32 offset_mask;
+
+ /** @raw_size: Raw size of heap, including reserved areas. */
+ u32 raw_size;
+
+ /** @log2_size: Log2 of raw size of heap. */
+ u32 log2_size;
+
+ /** @config_offset: Offset of config area within heap. */
+ u32 config_offset;
+
+ /** @reserved_size: Size of reserved area in heap. */
+ u32 reserved_size;
+ } fw_heap_info;
+
+ /** @fw_mm: Firmware address space allocator. */
+ struct drm_mm fw_mm;
+
+ /** @fw_mm_lock: Lock protecting access to &fw_mm. */
+ spinlock_t fw_mm_lock;
+
+ /** @fw_mm_base: Base address of address space managed by @fw_mm. */
+ u64 fw_mm_base;
+
+ /**
+ * @fwif_connection_ctl: Pointer to CPU mapping of FWIF connection
+ * control structure.
+ */
+ struct rogue_fwif_connection_ctl *fwif_connection_ctl;
+
+ /** @fwif_sysinit: Pointer to CPU mapping of FW SYSINIT structure. */
+ struct rogue_fwif_sysinit *fwif_sysinit;
+
+ /** @fwif_sysdata: Pointer to CPU mapping of FW SYSDATA structure. */
+ struct rogue_fwif_sysdata *fwif_sysdata;
+
+ /** @fwif_osinit: Pointer to CPU mapping of FW OSINIT structure. */
+ struct rogue_fwif_osinit *fwif_osinit;
+
+ /** @fwif_osdata: Pointer to CPU mapping of FW OSDATA structure. */
+ struct rogue_fwif_osdata *fwif_osdata;
+
+ /** @power_sync: Pointer to CPU mapping of power sync state. */
+ u32 *power_sync;
+
+ /** @hwrinfobuf: Pointer to CPU mapping of FW HWR info buffer. */
+ struct rogue_fwif_hwrinfobuf *hwrinfobuf;
+
+ /** @fw_trace: Device firmware trace buffer state. */
+ struct pvr_fw_trace fw_trace;
+
+ /** @fw_objs: Structure tracking FW objects. */
+ struct {
+ /** @list: Head of FW object list. */
+ struct list_head list;
+
+ /** @lock: Lock protecting access to FW object list. */
+ struct mutex lock;
+ } fw_objs;
+};
+
+#define pvr_fw_irq_read_reg(pvr_dev, name) \
+ pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg)
+
+#define pvr_fw_irq_write_reg(pvr_dev, name, value) \
+ pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value)
+
+#define pvr_fw_irq_pending(pvr_dev) \
+ (pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask)
+
+#define pvr_fw_irq_clear(pvr_dev) \
+ pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask)
+
+#define pvr_fw_irq_enable(pvr_dev) \
+ pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask)
+
+#define pvr_fw_irq_disable(pvr_dev) \
+ pvr_fw_irq_write_reg(pvr_dev, enable, 0)
+
+extern const struct pvr_fw_defs pvr_fw_defs_meta;
+extern const struct pvr_fw_defs pvr_fw_defs_mips;
+
+int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev);
+int pvr_fw_init(struct pvr_device *pvr_dev);
+void pvr_fw_fini(struct pvr_device *pvr_dev);
+
+int pvr_wait_for_fw_boot(struct pvr_device *pvr_dev);
+
+int
+pvr_fw_hard_reset(struct pvr_device *pvr_dev);
+
+void pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val);
+
+void
+pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size);
+
+const struct pvr_fw_layout_entry *
+pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id);
+int
+pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr,
+ void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr,
+ void **host_addr_out);
+
+int
+pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
+ u32 offset);
+
+int pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv), void *init_priv,
+ struct pvr_fw_object **pvr_obj_out);
+
+void *pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags,
+ void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **pvr_obj_out);
+
+void *
+pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev, u32 dev_offset, size_t size,
+ u64 flags, void (*init)(void *cpu_ptr, void *priv),
+ void *init_priv, struct pvr_fw_object **pvr_obj_out);
+
+static __always_inline void *
+pvr_fw_object_vmap(struct pvr_fw_object *fw_obj)
+{
+ return pvr_gem_object_vmap(fw_obj->gem);
+}
+
+static __always_inline void
+pvr_fw_object_vunmap(struct pvr_fw_object *fw_obj)
+{
+ pvr_gem_object_vunmap(fw_obj->gem);
+}
+
+void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj);
+
+static __always_inline void
+pvr_fw_object_unmap_and_destroy(struct pvr_fw_object *fw_obj)
+{
+ pvr_fw_object_vunmap(fw_obj);
+ pvr_fw_object_destroy(fw_obj);
+}
+
+/**
+ * pvr_fw_object_get_dma_addr() - Get DMA address for given offset in firmware
+ * object.
+ * @fw_obj: Pointer to object to lookup address in.
+ * @offset: Offset within object to lookup address at.
+ * @dma_addr_out: Pointer to location to store DMA address.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if object is not currently backed, or if @offset is out of valid
+ * range for this object.
+ */
+static __always_inline int
+pvr_fw_object_get_dma_addr(struct pvr_fw_object *fw_obj, u32 offset, dma_addr_t *dma_addr_out)
+{
+ return pvr_gem_get_dma_addr(fw_obj->gem, offset, dma_addr_out);
+}
+
+void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out);
+
+static __always_inline void
+pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out)
+{
+ pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out);
+}
+
+#endif /* PVR_FW_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_info.h b/drivers/gpu/drm/imagination/pvr_fw_info.h
new file mode 100644
index 000000000..c36394406
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_info.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_INFO_H
+#define PVR_FW_INFO_H
+
+#include <linux/bits.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+/*
+ * Firmware binary block unit in bytes.
+ * Raw data stored in FW binary will be aligned to this size.
+ */
+#define FW_BLOCK_SIZE SZ_4K
+
+/* Maximum number of entries in firmware layout table. */
+#define PVR_FW_INFO_MAX_NUM_ENTRIES 8
+
+enum pvr_fw_section_id {
+ META_CODE = 0,
+ META_PRIVATE_DATA,
+ META_COREMEM_CODE,
+ META_COREMEM_DATA,
+ MIPS_CODE,
+ MIPS_EXCEPTIONS_CODE,
+ MIPS_BOOT_CODE,
+ MIPS_PRIVATE_DATA,
+ MIPS_BOOT_DATA,
+ MIPS_STACK,
+ RISCV_UNCACHED_CODE,
+ RISCV_CACHED_CODE,
+ RISCV_PRIVATE_DATA,
+ RISCV_COREMEM_CODE,
+ RISCV_COREMEM_DATA,
+};
+
+enum pvr_fw_section_type {
+ NONE = 0,
+ FW_CODE,
+ FW_DATA,
+ FW_COREMEM_CODE,
+ FW_COREMEM_DATA,
+};
+
+/*
+ * FW binary format with FW info attached:
+ *
+ * Contents Offset
+ * +-----------------+
+ * | | 0
+ * | |
+ * | Original binary |
+ * | file |
+ * | (.ldr/.elf) |
+ * | |
+ * | |
+ * +-----------------+
+ * | Device info | FILE_SIZE - 4K - device_info_size
+ * +-----------------+
+ * | FW info header | FILE_SIZE - 4K
+ * +-----------------+
+ * | |
+ * | FW layout table |
+ * | |
+ * +-----------------+
+ * FILE_SIZE
+ */
+
+#define PVR_FW_INFO_VERSION 3
+
+#define PVR_FW_FLAGS_OPEN_SOURCE BIT(0)
+
+/** struct pvr_fw_info_header - Firmware header */
+struct pvr_fw_info_header {
+ /** @info_version: FW info header version. */
+ u32 info_version;
+ /** @header_len: Header length. */
+ u32 header_len;
+ /** @layout_entry_num: Number of entries in the layout table. */
+ u32 layout_entry_num;
+ /** @layout_entry_size: Size of an entry in the layout table. */
+ u32 layout_entry_size;
+ /** @bvnc: GPU ID supported by firmware. */
+ aligned_u64 bvnc;
+ /** @fw_page_size: Page size of processor on which firmware executes. */
+ u32 fw_page_size;
+ /** @flags: Compatibility flags. */
+ u32 flags;
+ /** @fw_version_major: Firmware major version number. */
+ u16 fw_version_major;
+ /** @fw_version_minor: Firmware minor version number. */
+ u16 fw_version_minor;
+ /** @fw_version_build: Firmware build number. */
+ u32 fw_version_build;
+ /** @device_info_size: Size of device info structure. */
+ u32 device_info_size;
+ /** @padding: Padding. */
+ u32 padding;
+};
+
+/**
+ * struct pvr_fw_layout_entry - Entry in firmware layout table, describing a
+ * section of the firmware image
+ */
+struct pvr_fw_layout_entry {
+ /** @id: Section ID. */
+ enum pvr_fw_section_id id;
+ /** @type: Section type. */
+ enum pvr_fw_section_type type;
+ /** @base_addr: Base address of section in FW address space. */
+ u32 base_addr;
+ /** @max_size: Maximum size of section, in bytes. */
+ u32 max_size;
+ /** @alloc_size: Allocation size of section, in bytes. */
+ u32 alloc_size;
+ /** @alloc_offset: Allocation offset of section. */
+ u32 alloc_offset;
+};
+
+/**
+ * struct pvr_fw_device_info_header - Device information header.
+ */
+struct pvr_fw_device_info_header {
+ /** @brn_mask_size: BRN mask size (in u64s). */
+ u64 brn_mask_size;
+ /** @ern_mask_size: ERN mask size (in u64s). */
+ u64 ern_mask_size;
+ /** @feature_mask_size: Feature mask size (in u64s). */
+ u64 feature_mask_size;
+ /** @feature_param_size: Feature parameter size (in u64s). */
+ u64 feature_param_size;
+};
+
+#endif /* PVR_FW_INFO_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
new file mode 100644
index 000000000..c39beb70c
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_fw_meta.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_meta.h"
+#include "pvr_vm.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_META_SHIFT 25 /* 32 MB */
+
+#define POLL_TIMEOUT_USEC 1000000
+
+/**
+ * pvr_meta_cr_read32() - Read a META register via the Slave Port
+ * @pvr_dev: Device pointer.
+ * @reg_addr: Address of register to read.
+ * @reg_value_out: Pointer to location to store register value.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_cr_poll_reg32().
+ */
+int
+pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out)
+{
+ int err;
+
+ /* Wait for Slave Port to be Ready. */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1,
+ ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+ ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+ ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ /* Issue a Read. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0,
+ reg_addr | ROGUE_CR_META_SP_MSLVCTRL0_RD_EN);
+ (void)pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0); /* Fence write. */
+
+ /* Wait for Slave Port to be Ready. */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1,
+ ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+ ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+ ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ *reg_value_out = pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVDATAX);
+
+ return 0;
+}
+
+static int
+pvr_meta_wrapper_init(struct pvr_device *pvr_dev)
+{
+ u64 garten_config;
+
+ /* Configure META to Master boot. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_META_BOOT, ROGUE_CR_META_BOOT_MODE_EN);
+
+ /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address. */
+
+ /* Garten IDLE bit controlled by META. */
+ garten_config = ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+ /* The fence addr is set during the fw init sequence. */
+
+ /* Set PC = 0 for fences. */
+ garten_config &=
+ ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+ garten_config |=
+ (u64)MMU_CONTEXT_MAPPING_FWPRIV
+ << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
+
+ /* Set SLC DM=META. */
+ garten_config |= ((u64)ROGUE_FW_SEGMMU_META_BIFDM_ID)
+ << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG, garten_config);
+
+ return 0;
+}
+
+static __always_inline void
+add_boot_arg(u32 **boot_conf, u32 param, u32 data)
+{
+ *(*boot_conf)++ = param;
+ *(*boot_conf)++ = data;
+}
+
+static int
+meta_ldr_cmd_loadmem(struct drm_device *drm_dev, const u8 *fw,
+ struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, const u32 fw_size)
+{
+ struct rogue_meta_ldr_l2_data_blk *l2_block =
+ (struct rogue_meta_ldr_l2_data_blk *)(fw +
+ l1_data->cmd_data[1]);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ u32 offset = l1_data->cmd_data[0];
+ u32 data_size;
+ void *write_addr;
+ int err;
+
+ /* Verify header is within bounds. */
+ if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size)
+ return -EINVAL;
+
+ data_size = l2_block->length - 6 /* L2 Tag length and checksum */;
+
+ /* Verify data is within bounds. */
+ if (((u8 *)l2_block->block_data - fw) >= fw_size ||
+ ((((u8 *)l2_block->block_data) + data_size) - fw) >= fw_size)
+ return -EINVAL;
+
+ if (!ROGUE_META_IS_COREMEM_CODE(offset, coremem_size) &&
+ !ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) {
+ /* Global range is aliased to local range */
+ offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+ }
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, offset, data_size, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ offset, data_size);
+ return err;
+ }
+
+ memcpy(write_addr, l2_block->block_data, data_size);
+
+ return 0;
+}
+
+static int
+meta_ldr_cmd_zeromem(struct drm_device *drm_dev,
+ struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ u32 offset = l1_data->cmd_data[0];
+ u32 byte_count = l1_data->cmd_data[1];
+ void *write_addr;
+ int err;
+
+ if (ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) {
+ /* cannot zero coremem directly */
+ return 0;
+ }
+
+ /* Global range is aliased to local range */
+ offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, offset, byte_count, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ offset, byte_count);
+ return err;
+ }
+
+ memset(write_addr, 0, byte_count);
+
+ return 0;
+}
+
+static int
+meta_ldr_cmd_config(struct drm_device *drm_dev, const u8 *fw,
+ struct rogue_meta_ldr_l1_data_blk *l1_data,
+ const u32 fw_size, u32 **boot_conf_ptr)
+{
+ struct rogue_meta_ldr_l2_data_blk *l2_block =
+ (struct rogue_meta_ldr_l2_data_blk *)(fw +
+ l1_data->cmd_data[0]);
+ struct rogue_meta_ldr_cfg_blk *config_command;
+ u32 l2_block_size;
+ u32 curr_block_size = 0;
+ u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL;
+
+ /* Verify block header is within bounds. */
+ if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size)
+ return -EINVAL;
+
+ l2_block_size = l2_block->length - 6 /* L2 Tag length and checksum */;
+ config_command = (struct rogue_meta_ldr_cfg_blk *)l2_block->block_data;
+
+ if (((u8 *)config_command - fw) >= fw_size ||
+ ((((u8 *)config_command) + l2_block_size) - fw) >= fw_size)
+ return -EINVAL;
+
+ while (l2_block_size >= 12) {
+ if (config_command->type != ROGUE_META_LDR_CFG_WRITE)
+ return -EINVAL;
+
+ /*
+ * Only write to bootloader if we got a valid pointer to the FW
+ * code allocation.
+ */
+ if (boot_conf) {
+ u32 register_offset = config_command->block_data[0];
+ u32 register_value = config_command->block_data[1];
+
+ /* Do register write */
+ add_boot_arg(&boot_conf, register_offset,
+ register_value);
+ }
+
+ curr_block_size = 12;
+ l2_block_size -= curr_block_size;
+ config_command = (struct rogue_meta_ldr_cfg_blk
+ *)((uintptr_t)config_command +
+ curr_block_size);
+ }
+
+ if (boot_conf_ptr)
+ *boot_conf_ptr = boot_conf;
+
+ return 0;
+}
+
+/**
+ * process_ldr_command_stream() - Process LDR firmware image and populate
+ * firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ * @boot_conf_ptr: Pointer to boot config argument pointer.
+ *
+ * Returns :
+ * * 0 on success, or
+ * * -EINVAL on any error in LDR command stream.
+ */
+static int
+process_ldr_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr,
+ u8 *fw_core_data_ptr, u32 **boot_conf_ptr)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct rogue_meta_ldr_block_hdr *ldr_header =
+ (struct rogue_meta_ldr_block_hdr *)fw;
+ struct rogue_meta_ldr_l1_data_blk *l1_data =
+ (struct rogue_meta_ldr_l1_data_blk *)(fw + ldr_header->sl_data);
+ const u32 fw_size = pvr_dev->fw_dev.firmware->size;
+ int err;
+
+ u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL;
+ u32 coremem_size;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, meta_coremem_size, &coremem_size);
+ if (err)
+ return err;
+
+ coremem_size *= SZ_1K;
+
+ while (l1_data) {
+ /* Verify block header is within bounds. */
+ if (((u8 *)l1_data - fw) >= fw_size || ((u8 *)(l1_data + 1) - fw) >= fw_size)
+ return -EINVAL;
+
+ if (ROGUE_META_LDR_BLK_IS_COMMENT(l1_data->cmd)) {
+ /* Don't process comment blocks */
+ goto next_block;
+ }
+
+ switch (l1_data->cmd & ROGUE_META_LDR_CMD_MASK)
+ case ROGUE_META_LDR_CMD_LOADMEM: {
+ err = meta_ldr_cmd_loadmem(drm_dev, fw, l1_data,
+ coremem_size,
+ fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr,
+ fw_core_data_ptr, fw_size);
+ if (err)
+ return err;
+ break;
+
+ case ROGUE_META_LDR_CMD_START_THREADS:
+ /* Don't process this block */
+ break;
+
+ case ROGUE_META_LDR_CMD_ZEROMEM:
+ err = meta_ldr_cmd_zeromem(drm_dev, l1_data,
+ coremem_size,
+ fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr,
+ fw_core_data_ptr);
+ if (err)
+ return err;
+ break;
+
+ case ROGUE_META_LDR_CMD_CONFIG:
+ err = meta_ldr_cmd_config(drm_dev, fw, l1_data, fw_size,
+ &boot_conf);
+ if (err)
+ return err;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+next_block:
+ if (l1_data->next == 0xFFFFFFFF)
+ break;
+
+ l1_data = (struct rogue_meta_ldr_l1_data_blk *)(fw +
+ l1_data->next);
+ }
+
+ if (boot_conf_ptr)
+ *boot_conf_ptr = boot_conf;
+
+ return 0;
+}
+
+static void
+configure_seg_id(u64 seg_out_addr, u32 seg_base, u32 seg_limit, u32 seg_id,
+ u32 **boot_conf_ptr)
+{
+ u32 seg_out_addr0 = seg_out_addr & 0x00000000FFFFFFFFUL;
+ u32 seg_out_addr1 = (seg_out_addr >> 32) & 0x00000000FFFFFFFFUL;
+ u32 *boot_conf = *boot_conf_ptr;
+
+ /* META segments have a minimum size. */
+ u32 limit_off = max(seg_limit, ROGUE_FW_SEGMMU_ALIGN);
+
+ /* The limit is an offset, therefore off = size - 1. */
+ limit_off -= 1;
+
+ seg_base |= ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE;
+
+ add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_BASE(seg_id), seg_base);
+ add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_LIMIT(seg_id), limit_off);
+ add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA0(seg_id), seg_out_addr0);
+ add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA1(seg_id), seg_out_addr1);
+
+ *boot_conf_ptr = boot_conf;
+}
+
+static u64 get_fw_obj_gpu_addr(struct pvr_fw_object *fw_obj)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+ return fw_obj->fw_addr_offset + fw_dev->fw_heap_info.gpu_addr;
+}
+
+static void
+configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr)
+{
+ const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries;
+ u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num;
+ u64 seg_out_addr_top;
+ u32 i;
+
+ seg_out_addr_top =
+ ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
+ ROGUE_FW_SEGMMU_META_BIFDM_ID);
+
+ for (i = 0; i < num_layout_entries; i++) {
+ /*
+ * FW code is using the bootloader segment which is already
+ * configured on boot. FW coremem code and data don't use the
+ * segment MMU. Only the FW data segment needs to be configured.
+ */
+ if (layout_entries[i].type == FW_DATA) {
+ u32 seg_id = ROGUE_FW_SEGMMU_DATA_ID;
+ u64 seg_out_addr = get_fw_obj_gpu_addr(pvr_dev->fw_dev.mem.data_obj);
+
+ seg_out_addr += layout_entries[i].alloc_offset;
+ seg_out_addr |= seg_out_addr_top;
+
+ /* Write the sequence to the bootldr. */
+ configure_seg_id(seg_out_addr,
+ layout_entries[i].base_addr,
+ layout_entries[i].alloc_size, seg_id,
+ boot_conf_ptr);
+
+ break;
+ }
+ }
+}
+
+static void
+configure_meta_caches(u32 **boot_conf_ptr)
+{
+ u32 *boot_conf = *boot_conf_ptr;
+ u32 d_cache_t0, i_cache_t0;
+ u32 d_cache_t1, i_cache_t1;
+ u32 d_cache_t2, i_cache_t2;
+ u32 d_cache_t3, i_cache_t3;
+
+ /* Initialise I/Dcache settings */
+ d_cache_t0 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ d_cache_t1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ d_cache_t2 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ d_cache_t3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ i_cache_t0 = 0;
+ i_cache_t1 = 0;
+ i_cache_t2 = 0;
+ i_cache_t3 = 0;
+
+ d_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ i_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+
+ /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
+ add_boot_arg(&boot_conf, META_CR_MMCU_LOCAL_EBCTRL,
+ META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
+ META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
+
+ /* Data cache partitioning thread 0 to 3 */
+ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(0), d_cache_t0);
+ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(1), d_cache_t1);
+ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(2), d_cache_t2);
+ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(3), d_cache_t3);
+
+ /* Enable data cache hits */
+ add_boot_arg(&boot_conf, META_CR_MMCU_DCACHE_CTRL,
+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+ /* Instruction cache partitioning thread 0 to 3 */
+ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(0), i_cache_t0);
+ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(1), i_cache_t1);
+ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(2), i_cache_t2);
+ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(3), i_cache_t3);
+
+ /* Enable instruction cache hits */
+ add_boot_arg(&boot_conf, META_CR_MMCU_ICACHE_CTRL,
+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+ add_boot_arg(&boot_conf, 0x040000C0, 0);
+
+ *boot_conf_ptr = boot_conf;
+}
+
+static int
+pvr_meta_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+ u32 core_code_alloc_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ u32 *boot_conf;
+ int err;
+
+ boot_conf = ((u32 *)fw_code_ptr) + ROGUE_FW_BOOTLDR_CONF_OFFSET;
+
+ /* Slave port and JTAG accesses are privileged. */
+ add_boot_arg(&boot_conf, META_CR_SYSC_JTAG_THREAD,
+ META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+ configure_seg_mmu(pvr_dev, &boot_conf);
+
+ /* Populate FW sections from LDR image. */
+ err = process_ldr_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
+ fw_core_data_ptr, &boot_conf);
+ if (err)
+ return err;
+
+ configure_meta_caches(&boot_conf);
+
+ /* End argument list. */
+ add_boot_arg(&boot_conf, 0, 0);
+
+ if (fw_dev->mem.core_code_obj) {
+ u32 core_code_fw_addr;
+
+ pvr_fw_object_get_fw_addr(fw_dev->mem.core_code_obj, &core_code_fw_addr);
+ add_boot_arg(&boot_conf, core_code_fw_addr, core_code_alloc_size);
+ } else {
+ add_boot_arg(&boot_conf, 0, 0);
+ }
+ /* None of the cores supported by this driver have META DMA. */
+ add_boot_arg(&boot_conf, 0, 0);
+
+ return 0;
+}
+
+static int
+pvr_meta_init(struct pvr_device *pvr_dev)
+{
+ pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_META_SHIFT, 0);
+
+ return 0;
+}
+
+static u32
+pvr_meta_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+ u32 fw_addr = fw_obj->fw_addr_offset + offset + ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS;
+
+ /* META cacheability is determined by address. */
+ if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+ fw_addr |= ROGUE_FW_SEGMMU_DATA_META_UNCACHED |
+ ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+
+ return fw_addr;
+}
+
+static int
+pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
+ pvr_gem_object_size(pvr_obj));
+}
+
+static void
+pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
+ fw_obj->fw_mm_node.size);
+}
+
+static bool
+pvr_meta_has_fixed_data_addr(void)
+{
+ return false;
+}
+
+const struct pvr_fw_defs pvr_fw_defs_meta = {
+ .init = pvr_meta_init,
+ .fw_process = pvr_meta_fw_process,
+ .vm_map = pvr_meta_vm_map,
+ .vm_unmap = pvr_meta_vm_unmap,
+ .get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
+ .wrapper_init = pvr_meta_wrapper_init,
+ .has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
+ .irq = {
+ .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE,
+ .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
+ .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS,
+ .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN,
+ .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK,
+ },
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.h b/drivers/gpu/drm/imagination/pvr_fw_meta.h
new file mode 100644
index 000000000..911ad700c
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_META_H
+#define PVR_FW_META_H
+
+#include <linux/types.h>
+
+/* Forward declaration from pvr_device.h */
+struct pvr_device;
+
+int pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out);
+
+#endif /* PVR_FW_META_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c
new file mode 100644
index 000000000..0bed0257e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_mips.h"
+#include "pvr_vm_mips.h"
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_MIPS_BASE 0xC0000000
+#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */
+#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M
+
+/**
+ * process_elf_command_stream() - Process ELF firmware image and populate
+ * firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ *
+ * Returns :
+ * * 0 on success, or
+ * * -EINVAL on any error in ELF command stream.
+ */
+static int
+process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr,
+ u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+ struct elf32_hdr *header = (struct elf32_hdr *)fw;
+ struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u32 entry;
+ int err;
+
+ for (entry = 0; entry < header->e_phnum; entry++, program_header++) {
+ void *write_addr;
+
+ /* Only consider loadable entries in the ELF segment table */
+ if (program_header->p_type != PT_LOAD)
+ continue;
+
+ err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr,
+ program_header->p_memsz, fw_code_ptr, fw_data_ptr,
+ fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+ if (err) {
+ drm_err(drm_dev,
+ "Addr 0x%x (size: %d) not found in any firmware segment",
+ program_header->p_vaddr, program_header->p_memsz);
+ return err;
+ }
+
+ /* Write to FW allocation only if available */
+ if (write_addr) {
+ memcpy(write_addr, fw + program_header->p_offset,
+ program_header->p_filesz);
+
+ memset((u8 *)write_addr + program_header->p_filesz, 0,
+ program_header->p_memsz - program_header->p_filesz);
+ }
+ }
+
+ return 0;
+}
+
+static int
+pvr_mips_init(struct pvr_device *pvr_dev)
+{
+ pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_MIPS_SHIFT, ROGUE_FW_HEAP_MIPS_RESERVED_SIZE);
+
+ return pvr_vm_mips_init(pvr_dev);
+}
+
+static void
+pvr_mips_fini(struct pvr_device *pvr_dev)
+{
+ pvr_vm_mips_fini(pvr_dev);
+}
+
+static int
+pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+ u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+ u32 core_code_alloc_size)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+ const struct pvr_fw_layout_entry *boot_code_entry;
+ const struct pvr_fw_layout_entry *boot_data_entry;
+ const struct pvr_fw_layout_entry *exception_code_entry;
+ const struct pvr_fw_layout_entry *stack_entry;
+ struct rogue_mipsfw_boot_data *boot_data;
+ dma_addr_t dma_addr;
+ u32 page_nr;
+ int err;
+
+ err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
+ fw_core_data_ptr);
+ if (err)
+ return err;
+
+ boot_code_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_BOOT_CODE);
+ boot_data_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_BOOT_DATA);
+ exception_code_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_EXCEPTIONS_CODE);
+ if (!boot_code_entry || !boot_data_entry || !exception_code_entry)
+ return -EINVAL;
+
+ WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.code_obj->gem, boot_code_entry->alloc_offset,
+ &mips_data->boot_code_dma_addr));
+ WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.data_obj->gem, boot_data_entry->alloc_offset,
+ &mips_data->boot_data_dma_addr));
+ WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.code_obj->gem,
+ exception_code_entry->alloc_offset,
+ &mips_data->exception_code_dma_addr));
+
+ stack_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_STACK);
+ if (!stack_entry)
+ return -EINVAL;
+
+ boot_data = (struct rogue_mipsfw_boot_data *)(fw_data_ptr + boot_data_entry->alloc_offset +
+ ROGUE_MIPSFW_BOOTLDR_CONF_OFFSET);
+
+ WARN_ON(pvr_fw_object_get_dma_addr(fw_dev->mem.data_obj, stack_entry->alloc_offset,
+ &dma_addr));
+ boot_data->stack_phys_addr = dma_addr;
+
+ boot_data->reg_base = pvr_dev->regs_resource->start;
+
+ for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
+ /* Firmware expects 4k pages, but host page size might be different. */
+ u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT;
+ u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK;
+
+ boot_data->pt_phys_addr[page_nr] = mips_data->pt_dma_addr[src_page_nr] +
+ page_offset;
+ }
+
+ boot_data->pt_log2_page_size = ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+ boot_data->pt_num_pages = ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES;
+ boot_data->reserved1 = 0;
+ boot_data->reserved2 = 0;
+
+ return 0;
+}
+
+static int
+pvr_mips_wrapper_init(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_mips_data *mips_data = pvr_dev->fw_dev.processor_data.mips_data;
+ const u64 remap_settings = ROGUE_MIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE;
+ u32 phys_bus_width;
+
+ int err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
+
+ if (WARN_ON(err))
+ return err;
+
+ /* Currently MIPS FW only supported with physical bus width > 32 bits. */
+ if (WARN_ON(phys_bus_width <= 32))
+ return -EINVAL;
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_CONFIG,
+ (ROGUE_MIPSFW_REGISTERS_VIRTUAL_BASE >>
+ ROGUE_MIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) |
+ ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+
+ /* Configure remap for boot code, boot data and exceptions code areas. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1,
+ ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN |
+ ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN);
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2,
+ (mips_data->boot_code_dma_addr &
+ ~ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+ if (PVR_HAS_QUIRK(pvr_dev, 63553)) {
+ /*
+ * WA always required on 36 bit cores, to avoid continuous unmapped memory accesses
+ * to address 0x0.
+ */
+ WARN_ON(phys_bus_width != 36);
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1,
+ ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN);
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2,
+ (mips_data->boot_code_dma_addr &
+ ~ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK) |
+ remap_settings);
+ }
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1,
+ ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN |
+ ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN);
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2,
+ (mips_data->boot_data_dma_addr &
+ ~ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1,
+ ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN |
+ ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN);
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2,
+ (mips_data->exception_code_dma_addr &
+ ~ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+ /* Garten IDLE bit controlled by MIPS. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG,
+ ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+ /* Turn on the EJTAG probe. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_DEBUG_CONFIG, 0);
+
+ return 0;
+}
+
+static u32
+pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+ struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev);
+
+ /* MIPS cacheability is determined by page table. */
+ return ((fw_obj->fw_addr_offset + offset) & pvr_dev->fw_dev.fw_heap_info.offset_mask) |
+ ROGUE_FW_HEAP_MIPS_BASE;
+}
+
+static bool
+pvr_mips_has_fixed_data_addr(void)
+{
+ return true;
+}
+
+const struct pvr_fw_defs pvr_fw_defs_mips = {
+ .init = pvr_mips_init,
+ .fini = pvr_mips_fini,
+ .fw_process = pvr_mips_fw_process,
+ .vm_map = pvr_vm_mips_map,
+ .vm_unmap = pvr_vm_mips_unmap,
+ .get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset,
+ .wrapper_init = pvr_mips_wrapper_init,
+ .has_fixed_data_addr = pvr_mips_has_fixed_data_addr,
+ .irq = {
+ .enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE,
+ .status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS,
+ .clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR,
+ .event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN,
+ .clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN,
+ },
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.h b/drivers/gpu/drm/imagination/pvr_fw_mips.h
new file mode 100644
index 000000000..408dbe63a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_MIPS_H
+#define PVR_FW_MIPS_H
+
+#include "pvr_rogue_mips.h"
+
+#include <asm/page.h>
+#include <linux/types.h>
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_gem_object;
+
+#define PVR_MIPS_PT_PAGE_COUNT ((ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K) \
+ >> PAGE_SHIFT)
+/**
+ * struct pvr_fw_mips_data - MIPS-specific data
+ */
+struct pvr_fw_mips_data {
+ /**
+ * @pt_pages: Pages containing MIPS pagetable.
+ */
+ struct page *pt_pages[PVR_MIPS_PT_PAGE_COUNT];
+
+ /** @pt: Pointer to CPU mapping of MIPS pagetable. */
+ u32 *pt;
+
+ /** @pt_dma_addr: DMA mappings of MIPS pagetable. */
+ dma_addr_t pt_dma_addr[PVR_MIPS_PT_PAGE_COUNT];
+
+ /** @boot_code_dma_addr: DMA address of MIPS boot code. */
+ dma_addr_t boot_code_dma_addr;
+
+ /** @boot_data_dma_addr: DMA address of MIPS boot data. */
+ dma_addr_t boot_data_dma_addr;
+
+ /** @exception_code_dma_addr: DMA address of MIPS exception code. */
+ dma_addr_t exception_code_dma_addr;
+
+ /** @cache_policy: Cache policy for this processor. */
+ u32 cache_policy;
+
+ /** @pfn_mask: PFN mask for MIPS pagetable. */
+ u32 pfn_mask;
+};
+
+#endif /* PVR_FW_MIPS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
new file mode 100644
index 000000000..36cec227c
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_meta.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_meta.h"
+#include "pvr_vm.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define POLL_TIMEOUT_USEC 1000000
+
+static void
+rogue_axi_ace_list_init(struct pvr_device *pvr_dev)
+{
+ /* Setup AXI-ACE config. Set everything to outer cache. */
+ u64 reg_val =
+ (3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+ (3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+ (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_AXI_ACE_LITE_CONFIGURATION, reg_val);
+}
+
+static void
+rogue_bif_init(struct pvr_device *pvr_dev)
+{
+ dma_addr_t pc_dma_addr;
+ u64 pc_addr;
+
+ /* Acquire the address of the Kernel Page Catalogue. */
+ pc_dma_addr = pvr_vm_get_page_table_root_addr(pvr_dev->kernel_vm_ctx);
+
+ /* Write the kernel catalogue base. */
+ pc_addr = ((((u64)pc_dma_addr >> ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT) &
+ ~ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
+
+ pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
+ pc_addr);
+}
+
+static int
+rogue_slc_init(struct pvr_device *pvr_dev)
+{
+ u16 slc_cache_line_size_bits;
+ u32 reg_val;
+ int err;
+
+ /*
+ * SLC Misc control.
+ *
+ * Note: This is a 64bit register and we set only the lower 32bits
+ * leaving the top 32bits (ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS)
+ * unchanged from the HW default.
+ */
+ reg_val = (pvr_cr_read32(pvr_dev, ROGUE_CR_SLC_CTRL_MISC) &
+ ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) |
+ ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits, &slc_cache_line_size_bits);
+ if (err)
+ return err;
+
+ /* Bypass burst combiner if SLC line size is smaller than 1024 bits. */
+ if (slc_cache_line_size_bits < 1024)
+ reg_val |= ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+
+ if (PVR_HAS_QUIRK(pvr_dev, 71242) && !PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support))
+ reg_val |= ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN;
+
+ pvr_cr_write32(pvr_dev, ROGUE_CR_SLC_CTRL_MISC, reg_val);
+
+ return 0;
+}
+
+/**
+ * pvr_fw_start() - Start FW processor and boot firmware
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by rogue_slc_init().
+ */
+int
+pvr_fw_start(struct pvr_device *pvr_dev)
+{
+ bool has_reset2 = PVR_HAS_FEATURE(pvr_dev, xe_tpu2);
+ u64 soft_reset_mask;
+ int err;
+
+ if (PVR_HAS_FEATURE(pvr_dev, pbe2_in_xe))
+ soft_reset_mask = ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL;
+ else
+ soft_reset_mask = ROGUE_CR_SOFT_RESET_MASKFULL;
+
+ if (PVR_HAS_FEATURE(pvr_dev, sys_bus_secure_reset)) {
+ /*
+ * Disable the default sys_bus_secure protection to perform
+ * minimal setup.
+ */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE, 0);
+ (void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */
+ }
+
+ /* Set Rogue in soft-reset. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
+ if (has_reset2)
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, ROGUE_CR_SOFT_RESET2_MASKFULL);
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline. */
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
+ if (has_reset2)
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
+
+ /* Take Rascal and Dust out of reset. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET,
+ soft_reset_mask ^ ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN);
+ if (has_reset2)
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, 0);
+
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
+ if (has_reset2)
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
+
+ /* Take everything out of reset but the FW processor. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, ROGUE_CR_SOFT_RESET_GARTEN_EN);
+ if (has_reset2)
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, 0);
+
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
+ if (has_reset2)
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2);
+
+ err = rogue_slc_init(pvr_dev);
+ if (err)
+ goto err_reset;
+
+ /* Initialise Firmware wrapper. */
+ pvr_dev->fw_dev.defs->wrapper_init(pvr_dev);
+
+ /* We must init the AXI-ACE interface before first BIF transaction. */
+ rogue_axi_ace_list_init(pvr_dev);
+
+ if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+ /* Initialise BIF. */
+ rogue_bif_init(pvr_dev);
+ }
+
+ /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */
+ udelay(3);
+
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, 0x0);
+ (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET);
+
+ /* ... and afterwards. */
+ udelay(3);
+
+ return 0;
+
+err_reset:
+ /* Put everything back into soft-reset. */
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask);
+
+ return err;
+}
+
+/**
+ * pvr_fw_stop() - Stop FW processor
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error returned by pvr_cr_poll_reg32().
+ */
+int
+pvr_fw_stop(struct pvr_device *pvr_dev)
+{
+ const u32 sidekick_idle_mask = ROGUE_CR_SIDEKICK_IDLE_MASKFULL &
+ ~(ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN |
+ ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN |
+ ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN);
+ bool skip_garten_idle = false;
+ u32 reg_value;
+ int err;
+
+ /*
+ * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper.
+ * For cores with the LAYOUT_MARS feature, SIDEKICK would have been
+ * powered down by the FW.
+ */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask,
+ sidekick_idle_mask, POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ /* Unset MTS DM association with threads. */
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
+ ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL &
+ ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK);
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
+ ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL &
+ ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK);
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
+ ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL &
+ ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK);
+ pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
+ ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL &
+ ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK);
+
+ /* Extra Idle checks. */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_STATUS_MMU, 0,
+ ROGUE_CR_BIF_STATUS_MMU_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_STATUS_MMU, 0,
+ ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ if (!PVR_HAS_FEATURE(pvr_dev, xt_top_infrastructure)) {
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_READS_EXT_STATUS, 0,
+ ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+ }
+
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_READS_EXT_STATUS, 0,
+ ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ err = pvr_cr_poll_reg64(pvr_dev, ROGUE_CR_SLC_STATUS1, 0,
+ ROGUE_CR_SLC_STATUS1_MASKFULL,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ /*
+ * Wait for SLC to signal IDLE.
+ * For cores with the LAYOUT_MARS feature, SLC would have been powered
+ * down by the FW.
+ */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SLC_IDLE,
+ ROGUE_CR_SLC_IDLE_MASKFULL,
+ ROGUE_CR_SLC_IDLE_MASKFULL, POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ /*
+ * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper.
+ * For cores with the LAYOUT_MARS feature, SIDEKICK would have been powered
+ * down by the FW.
+ */
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask,
+ sidekick_idle_mask, POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+
+ if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_META) {
+ err = pvr_meta_cr_read32(pvr_dev, META_CR_TxVECINT_BHALT, &reg_value);
+ if (err)
+ return err;
+
+ /*
+ * Wait for Sidekick/Jones to signal IDLE including the Garten
+ * Wrapper if there is no debugger attached (TxVECINT_BHALT =
+ * 0x0).
+ */
+ if (reg_value)
+ skip_garten_idle = true;
+ }
+
+ if (!skip_garten_idle) {
+ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE,
+ ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN,
+ ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN,
+ POLL_TIMEOUT_USEC);
+ if (err)
+ return err;
+ }
+
+ if (PVR_HAS_FEATURE(pvr_dev, pbe2_in_xe))
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET,
+ ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL);
+ else
+ pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, ROGUE_CR_SOFT_RESET_MASKFULL);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.h b/drivers/gpu/drm/imagination/pvr_fw_startstop.h
new file mode 100644
index 000000000..a3cef061b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_STARTSTOP_H
+#define PVR_FW_STARTSTOP_H
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+int pvr_fw_start(struct pvr_device *pvr_dev);
+int pvr_fw_stop(struct pvr_device *pvr_dev);
+
+#endif /* PVR_FW_STARTSTOP_H */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
new file mode 100644
index 000000000..31199e45b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_sf.h"
+#include "pvr_fw_trace.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+
+#include <linux/build_bug.h>
+#include <linux/dcache.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+static void
+tracebuf_ctrl_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr;
+ struct pvr_fw_trace *fw_trace = priv;
+ u32 thread_nr;
+
+ tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+ tracebuf_ctrl->tracebuf_flags = 0;
+
+ if (fw_trace->group_mask)
+ tracebuf_ctrl->log_type = fw_trace->group_mask | ROGUE_FWIF_LOG_TYPE_TRACE;
+ else
+ tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct rogue_fwif_tracebuf_space *tracebuf_space =
+ &tracebuf_ctrl->tracebuf[thread_nr];
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ pvr_fw_object_get_fw_addr(trace_buffer->buf_obj,
+ &tracebuf_space->trace_buffer_fw_addr);
+
+ tracebuf_space->trace_buffer = trace_buffer->buf;
+ tracebuf_space->trace_pointer = 0;
+ }
+}
+
+int pvr_fw_trace_init(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ u32 thread_nr;
+ int err;
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ trace_buffer->buf =
+ pvr_fw_object_create_and_map(pvr_dev,
+ ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS *
+ sizeof(*trace_buffer->buf),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+ PVR_BO_FW_NO_CLEAR_ON_RESET,
+ NULL, NULL, &trace_buffer->buf_obj);
+ if (IS_ERR(trace_buffer->buf)) {
+ drm_err(drm_dev, "Unable to allocate trace buffer\n");
+ err = PTR_ERR(trace_buffer->buf);
+ trace_buffer->buf = NULL;
+ goto err_free_buf;
+ }
+ }
+
+ /* TODO: Provide control of group mask. */
+ fw_trace->group_mask = 0;
+
+ fw_trace->tracebuf_ctrl =
+ pvr_fw_object_create_and_map(pvr_dev,
+ sizeof(*fw_trace->tracebuf_ctrl),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+ PVR_BO_FW_NO_CLEAR_ON_RESET,
+ tracebuf_ctrl_init, fw_trace,
+ &fw_trace->tracebuf_ctrl_obj);
+ if (IS_ERR(fw_trace->tracebuf_ctrl)) {
+ drm_err(drm_dev, "Unable to allocate trace buffer control structure\n");
+ err = PTR_ERR(fw_trace->tracebuf_ctrl);
+ goto err_free_buf;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
+ ARRAY_SIZE(fw_trace->buffers));
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct rogue_fwif_tracebuf_space *tracebuf_space =
+ &fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ trace_buffer->tracebuf_space = tracebuf_space;
+ }
+
+ return 0;
+
+err_free_buf:
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ if (trace_buffer->buf)
+ pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
+ }
+
+ return err;
+}
+
+void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+ u32 thread_nr;
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+ struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+ pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj);
+ }
+ pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * update_logtype() - Send KCCB command to trigger FW to update logtype
+ * @pvr_dev: Target PowerVR device
+ * @group_mask: New log group mask.
+ *
+ * Returns:
+ * * 0 on success,
+ * * Any error returned by pvr_kccb_send_cmd(), or
+ * * -%EIO if the device is lost.
+ */
+static int
+update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
+{
+ struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+ struct rogue_fwif_kccb_cmd cmd;
+ int idx;
+ int err;
+
+ if (group_mask)
+ fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask;
+ else
+ fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
+
+ fw_trace->group_mask = group_mask;
+
+ down_read(&pvr_dev->reset_sem);
+ if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) {
+ err = -EIO;
+ goto err_up_read;
+ }
+
+ cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE;
+ cmd.kccb_flags = 0;
+
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd, NULL);
+
+ drm_dev_exit(idx);
+
+err_up_read:
+ up_read(&pvr_dev->reset_sem);
+
+ return err;
+}
+
+struct pvr_fw_trace_seq_data {
+ /** @buffer: Pointer to copy of trace data. */
+ u32 *buffer;
+
+ /** @start_offset: Starting offset in trace data, as reported by FW. */
+ u32 start_offset;
+
+ /** @idx: Current index into trace data. */
+ u32 idx;
+
+ /** @assert_buf: Trace assert buffer, as reported by FW. */
+ struct rogue_fwif_file_info_buf assert_buf;
+};
+
+static u32 find_sfid(u32 id)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
+ if (stid_fmts[i].id == id)
+ return i;
+ }
+
+ return ROGUE_FW_SF_LAST;
+}
+
+static u32 read_fw_trace(struct pvr_fw_trace_seq_data *trace_seq_data, u32 offset)
+{
+ u32 idx;
+
+ idx = trace_seq_data->idx + offset;
+ if (idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
+ return 0;
+
+ idx = (idx + trace_seq_data->start_offset) % ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+ return trace_seq_data->buffer[idx];
+}
+
+/**
+ * fw_trace_get_next() - Advance trace index to next entry
+ * @trace_seq_data: Trace sequence data.
+ *
+ * Returns:
+ * * %true if trace index is now pointing to a valid entry, or
+ * * %false if trace index is pointing to an invalid entry, or has hit the end
+ * of the trace.
+ */
+static bool fw_trace_get_next(struct pvr_fw_trace_seq_data *trace_seq_data)
+{
+ u32 id, sf_id;
+
+ while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
+ id = read_fw_trace(trace_seq_data, 0);
+ trace_seq_data->idx++;
+ if (!ROGUE_FW_LOG_VALIDID(id))
+ continue;
+ if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
+ /* Assertion failure marks the end of the trace. */
+ return false;
+ }
+
+ sf_id = find_sfid(id);
+ if (sf_id == ROGUE_FW_SF_FIRST)
+ continue;
+ if (sf_id == ROGUE_FW_SF_LAST) {
+ /*
+ * Could not match with an ID in the SF table, trace is
+ * most likely corrupt from this point.
+ */
+ return false;
+ }
+
+ /* Skip over the timestamp, and any parameters. */
+ trace_seq_data->idx += 2 + ROGUE_FW_SF_PARAMNUM(id);
+
+ /* Ensure index is now pointing to a valid trace entry. */
+ id = read_fw_trace(trace_seq_data, 0);
+ if (!ROGUE_FW_LOG_VALIDID(id))
+ continue;
+
+ return true;
+ }
+
+ /* Hit end of trace data. */
+ return false;
+}
+
+/**
+ * fw_trace_get_first() - Find first valid entry in trace
+ * @trace_seq_data: Trace sequence data.
+ *
+ * Skips over invalid (usually zero) and ROGUE_FW_SF_FIRST entries.
+ *
+ * If the trace has no valid entries, this function will exit with the trace
+ * index pointing to the end of the trace. trace_seq_show() will return an error
+ * in this state.
+ */
+static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
+{
+ trace_seq_data->idx = 0;
+
+ while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
+ u32 id = read_fw_trace(trace_seq_data, 0);
+
+ if (ROGUE_FW_LOG_VALIDID(id)) {
+ u32 sf_id = find_sfid(id);
+
+ if (sf_id != ROGUE_FW_SF_FIRST)
+ break;
+ }
+ trace_seq_data->idx++;
+ }
+}
+
+static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+ u32 i;
+
+ /* Reset trace index, then advance to *pos. */
+ fw_trace_get_first(trace_seq_data);
+
+ for (i = 0; i < *pos; i++) {
+ if (!fw_trace_get_next(trace_seq_data))
+ return NULL;
+ }
+
+ return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
+}
+
+static void *fw_trace_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+
+ (*pos)++;
+ if (!fw_trace_get_next(trace_seq_data))
+ return NULL;
+
+ return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
+}
+
+static void fw_trace_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int fw_trace_seq_show(struct seq_file *s, void *v)
+{
+ struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+ u64 timestamp;
+ u32 id;
+ u32 sf_id;
+
+ if (trace_seq_data->idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
+ return -EINVAL;
+
+ id = read_fw_trace(trace_seq_data, 0);
+ /* Index is not pointing at a valid entry. */
+ if (!ROGUE_FW_LOG_VALIDID(id))
+ return -EINVAL;
+
+ sf_id = find_sfid(id);
+ /* Index is not pointing at a valid entry. */
+ if (sf_id == ROGUE_FW_SF_LAST)
+ return -EINVAL;
+
+ timestamp = read_fw_trace(trace_seq_data, 1) |
+ ((u64)read_fw_trace(trace_seq_data, 2) << 32);
+ timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
+ ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
+
+ seq_printf(s, "[%llu] : ", timestamp);
+ if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
+ seq_printf(s, "ASSERTION %s failed at %s:%u",
+ trace_seq_data->assert_buf.info,
+ trace_seq_data->assert_buf.path,
+ trace_seq_data->assert_buf.line_num);
+ } else {
+ seq_printf(s, stid_fmts[sf_id].name,
+ read_fw_trace(trace_seq_data, 3),
+ read_fw_trace(trace_seq_data, 4),
+ read_fw_trace(trace_seq_data, 5),
+ read_fw_trace(trace_seq_data, 6),
+ read_fw_trace(trace_seq_data, 7),
+ read_fw_trace(trace_seq_data, 8),
+ read_fw_trace(trace_seq_data, 9),
+ read_fw_trace(trace_seq_data, 10),
+ read_fw_trace(trace_seq_data, 11),
+ read_fw_trace(trace_seq_data, 12),
+ read_fw_trace(trace_seq_data, 13),
+ read_fw_trace(trace_seq_data, 14),
+ read_fw_trace(trace_seq_data, 15),
+ read_fw_trace(trace_seq_data, 16),
+ read_fw_trace(trace_seq_data, 17),
+ read_fw_trace(trace_seq_data, 18),
+ read_fw_trace(trace_seq_data, 19),
+ read_fw_trace(trace_seq_data, 20),
+ read_fw_trace(trace_seq_data, 21),
+ read_fw_trace(trace_seq_data, 22));
+ }
+ seq_puts(s, "\n");
+ return 0;
+}
+
+static const struct seq_operations pvr_fw_trace_seq_ops = {
+ .start = fw_trace_seq_start,
+ .next = fw_trace_seq_next,
+ .stop = fw_trace_seq_stop,
+ .show = fw_trace_seq_show
+};
+
+static int fw_trace_open(struct inode *inode, struct file *file)
+{
+ struct pvr_fw_trace_buffer *trace_buffer = inode->i_private;
+ struct rogue_fwif_tracebuf_space *tracebuf_space =
+ trace_buffer->tracebuf_space;
+ struct pvr_fw_trace_seq_data *trace_seq_data;
+ int err;
+
+ trace_seq_data = kzalloc(sizeof(*trace_seq_data), GFP_KERNEL);
+ if (!trace_seq_data)
+ return -ENOMEM;
+
+ trace_seq_data->buffer = kcalloc(ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS,
+ sizeof(*trace_seq_data->buffer), GFP_KERNEL);
+ if (!trace_seq_data->buffer) {
+ err = -ENOMEM;
+ goto err_free_data;
+ }
+
+ /*
+ * Take a local copy of the trace buffer, as firmware may still be
+ * writing to it. This will exist as long as this file is open.
+ */
+ memcpy(trace_seq_data->buffer, trace_buffer->buf,
+ ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS * sizeof(u32));
+ trace_seq_data->start_offset = READ_ONCE(tracebuf_space->trace_pointer);
+ trace_seq_data->assert_buf = tracebuf_space->assert_buf;
+ fw_trace_get_first(trace_seq_data);
+
+ err = seq_open(file, &pvr_fw_trace_seq_ops);
+ if (err)
+ goto err_free_buffer;
+
+ ((struct seq_file *)file->private_data)->private = trace_seq_data;
+
+ return 0;
+
+err_free_buffer:
+ kfree(trace_seq_data->buffer);
+
+err_free_data:
+ kfree(trace_seq_data);
+
+ return err;
+}
+
+static int fw_trace_release(struct inode *inode, struct file *file)
+{
+ struct pvr_fw_trace_seq_data *trace_seq_data =
+ ((struct seq_file *)file->private_data)->private;
+
+ seq_release(inode, file);
+ kfree(trace_seq_data->buffer);
+ kfree(trace_seq_data);
+
+ return 0;
+}
+
+static const struct file_operations pvr_fw_trace_fops = {
+ .owner = THIS_MODULE,
+ .open = fw_trace_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = fw_trace_release,
+};
+
+void
+pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
+{
+ if (old_mask != new_mask)
+ update_logtype(pvr_dev, new_mask);
+}
+
+void
+pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
+{
+ struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+ u32 thread_nr;
+
+ static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
+ "The filename buffer is only large enough for a single-digit thread count");
+
+ for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
+ char filename[8];
+
+ snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
+ debugfs_create_file(filename, 0400, dir,
+ &fw_trace->buffers[thread_nr],
+ &pvr_fw_trace_fops);
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h
new file mode 100644
index 000000000..0074d2b18
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_FW_TRACE_H
+#define PVR_FW_TRACE_H
+
+#include <drm/drm_file.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif.h"
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declarations from pvr_rogue_fwif.h */
+struct rogue_fwif_tracebuf;
+struct rogue_fwif_tracebuf_space;
+
+/**
+ * struct pvr_fw_trace_buffer - Structure representing a trace buffer
+ */
+struct pvr_fw_trace_buffer {
+ /** @buf_obj: FW buffer object representing trace buffer. */
+ struct pvr_fw_object *buf_obj;
+
+ /** @buf: Pointer to CPU mapping of trace buffer. */
+ u32 *buf;
+
+ /**
+ * @tracebuf_space: Pointer to FW tracebuf_space structure for this
+ * trace buffer.
+ */
+ struct rogue_fwif_tracebuf_space *tracebuf_space;
+};
+
+/**
+ * struct pvr_fw_trace - Device firmware trace data
+ */
+struct pvr_fw_trace {
+ /**
+ * @tracebuf_ctrl_obj: Object representing FW trace buffer control
+ * structure.
+ */
+ struct pvr_fw_object *tracebuf_ctrl_obj;
+
+ /**
+ * @tracebuf_ctrl: Pointer to CPU mapping of FW trace buffer control
+ * structure.
+ */
+ struct rogue_fwif_tracebuf *tracebuf_ctrl;
+
+ /**
+ * @buffers: Array representing the actual trace buffers owned by this
+ * device.
+ */
+ struct pvr_fw_trace_buffer buffers[ROGUE_FW_THREAD_MAX];
+
+ /** @group_mask: Mask of enabled trace groups. */
+ u32 group_mask;
+};
+
+int pvr_fw_trace_init(struct pvr_device *pvr_dev);
+void pvr_fw_trace_fini(struct pvr_device *pvr_dev);
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask,
+ u32 new_mask);
+
+void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* PVR_FW_TRACE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
new file mode 100644
index 000000000..6a8c81fe8
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
+
+#include <linux/compiler.h>
+#include <linux/compiler_attributes.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/iosys-map.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+
+static void pvr_gem_object_free(struct drm_gem_object *obj)
+{
+ drm_gem_shmem_object_free(obj);
+}
+
+static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
+{
+ struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
+ struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
+
+ if (!(pvr_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS))
+ return -EINVAL;
+
+ return drm_gem_shmem_mmap(shmem_obj, vma);
+}
+
+static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
+ .free = pvr_gem_object_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = pvr_gem_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * pvr_gem_object_flags_validate() - Verify that a collection of PowerVR GEM
+ * mapping and/or creation flags form a valid combination.
+ * @flags: PowerVR GEM mapping/creation flags to validate.
+ *
+ * This function explicitly allows kernel-only flags. All ioctl entrypoints
+ * should do their own validation as well as relying on this function.
+ *
+ * Return:
+ * * %true if @flags contains valid mapping and/or creation flags, or
+ * * %false otherwise.
+ */
+static bool
+pvr_gem_object_flags_validate(u64 flags)
+{
+ static const u64 invalid_combinations[] = {
+ /*
+ * Memory flagged as PM/FW-protected cannot be mapped to
+ * userspace. To make this explicit, we require that the two
+ * flags allowing each of these respective features are never
+ * specified together.
+ */
+ (DRM_PVR_BO_PM_FW_PROTECT |
+ DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
+ };
+
+ int i;
+
+ /*
+ * Check for bits set in undefined regions. Reserved regions refer to
+ * options that can only be set by the kernel. These are explicitly
+ * allowed in most cases, and must be checked specifically in IOCTL
+ * callback code.
+ */
+ if ((flags & PVR_BO_UNDEFINED_MASK) != 0)
+ return false;
+
+ /*
+ * Check for all combinations of flags marked as invalid in the array
+ * above.
+ */
+ for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
+ u64 combo = invalid_combinations[i];
+
+ if ((flags & combo) == combo)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * pvr_gem_object_into_handle() - Convert a reference to an object into a
+ * userspace-accessible handle.
+ * @pvr_obj: [IN] Target PowerVR-specific object.
+ * @pvr_file: [IN] File to associate the handle with.
+ * @handle: [OUT] Pointer to store the created handle in. Remains unmodified if
+ * an error is encountered.
+ *
+ * If an error is encountered, ownership of @pvr_obj will not have been
+ * transferred. If this function succeeds, however, further use of @pvr_obj is
+ * considered undefined behaviour unless another reference to it is explicitly
+ * held.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while attempting to allocate a handle on @pvr_file.
+ */
+int
+pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
+ struct pvr_file *pvr_file, u32 *handle)
+{
+ struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
+ struct drm_file *file = from_pvr_file(pvr_file);
+
+ u32 new_handle;
+ int err;
+
+ err = drm_gem_handle_create(file, gem_obj, &new_handle);
+ if (err)
+ return err;
+
+ /*
+ * Release our reference to @pvr_obj, effectively transferring
+ * ownership to the handle.
+ */
+ pvr_gem_object_put(pvr_obj);
+
+ /*
+ * Do not store the new handle in @handle until no more errors can
+ * occur.
+ */
+ *handle = new_handle;
+
+ return 0;
+}
+
+/**
+ * pvr_gem_object_from_handle() - Obtain a reference to an object from a
+ * userspace handle.
+ * @pvr_file: PowerVR-specific file to which @handle is associated.
+ * @handle: Userspace handle referencing the target object.
+ *
+ * On return, @handle always maintains its reference to the requested object
+ * (if it had one in the first place). If this function succeeds, the returned
+ * object will hold an additional reference. When the caller is finished with
+ * the returned object, they should call pvr_gem_object_put() on it to release
+ * this reference.
+ *
+ * Return:
+ * * A pointer to the requested PowerVR-specific object on success, or
+ * * %NULL otherwise.
+ */
+struct pvr_gem_object *
+pvr_gem_object_from_handle(struct pvr_file *pvr_file, u32 handle)
+{
+ struct drm_file *file = from_pvr_file(pvr_file);
+ struct drm_gem_object *gem_obj;
+
+ gem_obj = drm_gem_object_lookup(file, handle);
+ if (!gem_obj)
+ return NULL;
+
+ return gem_to_pvr_gem(gem_obj);
+}
+
+/**
+ * pvr_gem_object_vmap() - Map a PowerVR GEM object into CPU virtual address
+ * space.
+ * @pvr_obj: Target PowerVR GEM object.
+ *
+ * Once the caller is finished with the CPU mapping, they must call
+ * pvr_gem_object_vunmap() on @pvr_obj.
+ *
+ * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_cpu() is called to make
+ * sure the CPU mapping is consistent.
+ *
+ * Return:
+ * * A pointer to the CPU mapping on success,
+ * * -%ENOMEM if the mapping fails, or
+ * * Any error encountered while attempting to acquire a reference to the
+ * backing pages for @pvr_obj.
+ */
+void *
+pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
+{
+ struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
+ struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
+ struct iosys_map map;
+ int err;
+
+ dma_resv_lock(obj->resv, NULL);
+
+ err = drm_gem_shmem_vmap(shmem_obj, &map);
+ if (err)
+ goto err_unlock;
+
+ if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
+ struct device *dev = shmem_obj->base.dev->dev;
+
+ /* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped
+ * in GPU space yet.
+ */
+ if (shmem_obj->sgt)
+ dma_sync_sgtable_for_cpu(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+ }
+
+ dma_resv_unlock(obj->resv);
+
+ return map.vaddr;
+
+err_unlock:
+ dma_resv_unlock(obj->resv);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_gem_object_vunmap() - Unmap a PowerVR memory object from CPU virtual
+ * address space.
+ * @pvr_obj: Target PowerVR GEM object.
+ *
+ * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_device() is called to make
+ * sure the GPU mapping is consistent.
+ */
+void
+pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
+{
+ struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr);
+ struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
+
+ if (WARN_ON(!map.vaddr))
+ return;
+
+ dma_resv_lock(obj->resv, NULL);
+
+ if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
+ struct device *dev = shmem_obj->base.dev->dev;
+
+ /* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped
+ * in GPU space yet.
+ */
+ if (shmem_obj->sgt)
+ dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+ }
+
+ drm_gem_shmem_vunmap(shmem_obj, &map);
+
+ dma_resv_unlock(obj->resv);
+}
+
+/**
+ * pvr_gem_object_zero() - Zeroes the physical memory behind an object.
+ * @pvr_obj: Target PowerVR GEM object.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while attempting to map @pvr_obj to the CPU (see
+ * pvr_gem_object_vmap()).
+ */
+static int
+pvr_gem_object_zero(struct pvr_gem_object *pvr_obj)
+{
+ void *cpu_ptr;
+
+ cpu_ptr = pvr_gem_object_vmap(pvr_obj);
+ if (IS_ERR(cpu_ptr))
+ return PTR_ERR(cpu_ptr);
+
+ memset(cpu_ptr, 0, pvr_gem_object_size(pvr_obj));
+
+ /* Make sure the zero-ing is done before vumap-ing the object. */
+ wmb();
+
+ pvr_gem_object_vunmap(pvr_obj);
+
+ return 0;
+}
+
+/**
+ * pvr_gem_create_object() - Allocate and pre-initializes a pvr_gem_object
+ * @drm_dev: DRM device creating this object.
+ * @size: Size of the object to allocate in bytes.
+ *
+ * Return:
+ * * The new pre-initialized GEM object on success,
+ * * -ENOMEM if the allocation failed.
+ */
+struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size)
+{
+ struct drm_gem_object *gem_obj;
+ struct pvr_gem_object *pvr_obj;
+
+ pvr_obj = kzalloc(sizeof(*pvr_obj), GFP_KERNEL);
+ if (!pvr_obj)
+ return ERR_PTR(-ENOMEM);
+
+ gem_obj = gem_from_pvr_gem(pvr_obj);
+ gem_obj->funcs = &pvr_gem_object_funcs;
+
+ return gem_obj;
+}
+
+/**
+ * pvr_gem_object_create() - Creates a PowerVR-specific buffer object.
+ * @pvr_dev: Target PowerVR device.
+ * @size: Size of the object to allocate in bytes. Must be greater than zero.
+ * Any value which is not an exact multiple of the system page size will be
+ * rounded up to satisfy this condition.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ *
+ * The created object may be larger than @size, but can never be smaller. To
+ * get the exact size, call pvr_gem_object_size() on the returned pointer.
+ *
+ * Return:
+ * * The newly-minted PowerVR-specific buffer object on success,
+ * * -%EINVAL if @size is zero or @flags is not valid,
+ * * -%ENOMEM if sufficient physical memory cannot be allocated, or
+ * * Any other error returned by drm_gem_create_mmap_offset().
+ */
+struct pvr_gem_object *
+pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
+{
+ struct drm_gem_shmem_object *shmem_obj;
+ struct pvr_gem_object *pvr_obj;
+ struct sg_table *sgt;
+ int err;
+
+ /* Verify @size and @flags before continuing. */
+ if (size == 0 || !pvr_gem_object_flags_validate(flags))
+ return ERR_PTR(-EINVAL);
+
+ shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size);
+ if (IS_ERR(shmem_obj))
+ return ERR_CAST(shmem_obj);
+
+ shmem_obj->pages_mark_dirty_on_put = true;
+ shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
+ pvr_obj = shmem_gem_to_pvr_gem(shmem_obj);
+ pvr_obj->flags = flags;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto err_shmem_object_free;
+ }
+
+ dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt,
+ DMA_BIDIRECTIONAL);
+
+ /*
+ * Do this last because pvr_gem_object_zero() requires a fully
+ * configured instance of struct pvr_gem_object.
+ */
+ pvr_gem_object_zero(pvr_obj);
+
+ return pvr_obj;
+
+err_shmem_object_free:
+ drm_gem_shmem_free(shmem_obj);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_gem_get_dma_addr() - Get DMA address for given offset in object
+ * @pvr_obj: Pointer to object to lookup address in.
+ * @offset: Offset within object to lookup address at.
+ * @dma_addr_out: Pointer to location to store DMA address.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL if object is not currently backed, or if @offset is out of valid
+ * range for this object.
+ */
+int
+pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
+ dma_addr_t *dma_addr_out)
+{
+ struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
+ u32 accumulated_offset = 0;
+ struct scatterlist *sgl;
+ unsigned int sgt_idx;
+
+ WARN_ON(!shmem_obj->sgt);
+ for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, sgt_idx) {
+ u32 new_offset = accumulated_offset + sg_dma_len(sgl);
+
+ if (offset >= accumulated_offset && offset < new_offset) {
+ *dma_addr_out = sg_dma_address(sgl) +
+ (offset - accumulated_offset);
+ return 0;
+ }
+
+ accumulated_offset = new_offset;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h
new file mode 100644
index 000000000..e0e5ea509
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_gem.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_GEM_H
+#define PVR_GEM_H
+
+#include "pvr_rogue_heap_config.h"
+#include "pvr_rogue_meta.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_mm.h>
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <linux/compiler_attributes.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/**
+ * DOC: Flags for DRM_IOCTL_PVR_CREATE_BO (kernel-only)
+ *
+ * Kernel-only values allowed in &pvr_gem_object->flags. The majority of options
+ * for this field are specified in the UAPI header "pvr_drm.h" with a
+ * DRM_PVR_BO_ prefix. To distinguish these internal options (which must exist
+ * in ranges marked as "reserved" in the UAPI header), we drop the DRM prefix.
+ * The public options should be used directly, DRM prefix and all.
+ *
+ * To avoid potentially confusing gaps in the UAPI options, these kernel-only
+ * options are specified "in reverse", starting at bit 63.
+ *
+ * We use "reserved" to refer to bits defined here and not exposed in the UAPI.
+ * Bits not defined anywhere are "undefined".
+ *
+ * CPU mapping options
+ * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this
+ * flag to override this behaviour and map the object cached.
+ *
+ * Firmware options
+ * :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard
+ * reset. Set this flag to override this behaviour and preserve buffer contents on reset.
+ */
+#define PVR_BO_CPU_CACHED BIT_ULL(63)
+
+#define PVR_BO_FW_NO_CLEAR_ON_RESET BIT_ULL(62)
+
+#define PVR_BO_KERNEL_FLAGS_MASK (PVR_BO_CPU_CACHED | PVR_BO_FW_NO_CLEAR_ON_RESET)
+
+/* Bits 61..3 are undefined. */
+/* Bits 2..0 are defined in the UAPI. */
+
+/* Other utilities. */
+#define PVR_BO_UNDEFINED_MASK ~(PVR_BO_KERNEL_FLAGS_MASK | DRM_PVR_BO_FLAGS_MASK)
+
+/*
+ * All firmware-mapped memory uses (mostly) the same flags. Specifically,
+ * firmware-mapped memory should be:
+ * * Read/write on the device,
+ * * Read/write on the CPU, and
+ * * Write-combined on the CPU.
+ *
+ * The only variation is in caching on the device.
+ */
+#define PVR_BO_FW_FLAGS_DEVICE_CACHED (ULL(0))
+#define PVR_BO_FW_FLAGS_DEVICE_UNCACHED DRM_PVR_BO_BYPASS_DEVICE_CACHE
+
+/**
+ * struct pvr_gem_object - powervr-specific wrapper for &struct drm_gem_object
+ */
+struct pvr_gem_object {
+ /**
+ * @base: The underlying &struct drm_gem_shmem_object.
+ *
+ * Do not access this member directly, instead call
+ * shem_gem_from_pvr_gem().
+ */
+ struct drm_gem_shmem_object base;
+
+ /**
+ * @flags: Options set at creation-time. Some of these options apply to
+ * the creation operation itself (which are stored here for reference)
+ * with the remainder used for mapping options to both the device and
+ * CPU. These are used every time this object is mapped, but may be
+ * changed after creation.
+ *
+ * Must be a combination of DRM_PVR_BO_* and/or PVR_BO_* flags.
+ *
+ * .. note::
+ *
+ * This member is declared const to indicate that none of these
+ * options may change or be changed throughout the object's
+ * lifetime.
+ */
+ u64 flags;
+
+};
+
+static_assert(offsetof(struct pvr_gem_object, base) == 0,
+ "offsetof(struct pvr_gem_object, base) not zero");
+
+#define shmem_gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base)
+
+#define shmem_gem_to_pvr_gem(shmem_obj) container_of_const(shmem_obj, struct pvr_gem_object, base)
+
+#define gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base.base)
+
+#define gem_to_pvr_gem(gem_obj) container_of_const(gem_obj, struct pvr_gem_object, base.base)
+
+/* Functions defined in pvr_gem.c */
+
+struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size);
+
+struct pvr_gem_object *pvr_gem_object_create(struct pvr_device *pvr_dev,
+ size_t size, u64 flags);
+
+int pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
+ struct pvr_file *pvr_file, u32 *handle);
+struct pvr_gem_object *pvr_gem_object_from_handle(struct pvr_file *pvr_file,
+ u32 handle);
+
+static __always_inline struct sg_table *
+pvr_gem_object_get_pages_sgt(struct pvr_gem_object *pvr_obj)
+{
+ return drm_gem_shmem_get_pages_sgt(shmem_gem_from_pvr_gem(pvr_obj));
+}
+
+void *pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj);
+void pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj);
+
+int pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
+ dma_addr_t *dma_addr_out);
+
+/**
+ * pvr_gem_object_get() - Acquire reference on pvr_gem_object
+ * @pvr_obj: Pointer to object to acquire reference on.
+ */
+static __always_inline void
+pvr_gem_object_get(struct pvr_gem_object *pvr_obj)
+{
+ drm_gem_object_get(gem_from_pvr_gem(pvr_obj));
+}
+
+/**
+ * pvr_gem_object_put() - Release reference on pvr_gem_object
+ * @pvr_obj: Pointer to object to release reference on.
+ */
+static __always_inline void
+pvr_gem_object_put(struct pvr_gem_object *pvr_obj)
+{
+ drm_gem_object_put(gem_from_pvr_gem(pvr_obj));
+}
+
+static __always_inline size_t
+pvr_gem_object_size(struct pvr_gem_object *pvr_obj)
+{
+ return gem_from_pvr_gem(pvr_obj)->size;
+}
+
+#endif /* PVR_GEM_H */
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
new file mode 100644
index 000000000..54f88d6c0
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_free_list.h"
+#include "pvr_hwrt.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs_client.h"
+#include "pvr_rogue_fwif.h"
+
+#include <drm/drm_gem.h>
+#include <linux/bitops.h>
+#include <linux/math.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+static_assert(ROGUE_FWIF_NUM_RTDATAS == 2);
+static_assert(ROGUE_FWIF_NUM_GEOMDATAS == 1);
+static_assert(ROGUE_FWIF_NUM_RTDATA_FREELISTS == 2);
+
+/*
+ * struct pvr_rt_mtile_info - Render target macrotile information
+ */
+struct pvr_rt_mtile_info {
+ u32 mtile_x[3];
+ u32 mtile_y[3];
+ u32 tile_max_x;
+ u32 tile_max_y;
+ u32 tile_size_x;
+ u32 tile_size_y;
+ u32 num_tiles_x;
+ u32 num_tiles_y;
+};
+
+/* Size of Shadow Render Target Cache entry */
+#define SRTC_ENTRY_SIZE sizeof(u32)
+/* Size of Renders Accumulation Array entry */
+#define RAA_ENTRY_SIZE sizeof(u32)
+
+static int
+hwrt_init_kernel_structure(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+ struct pvr_hwrt_dataset *hwrt)
+{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+ int err;
+ int i;
+
+ hwrt->pvr_dev = pvr_dev;
+ hwrt->max_rts = args->layers;
+
+ /* Get pointers to the free lists */
+ for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]);
+ if (!hwrt->free_lists[i]) {
+ err = -EINVAL;
+ goto err_put_free_lists;
+ }
+ }
+
+ if (hwrt->free_lists[ROGUE_FW_LOCAL_FREELIST]->current_pages <
+ pvr_get_free_list_min_pages(pvr_dev)) {
+ err = -EINVAL;
+ goto err_put_free_lists;
+ }
+
+ return 0;
+
+err_put_free_lists:
+ for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ pvr_free_list_put(hwrt->free_lists[i]);
+ hwrt->free_lists[i] = NULL;
+ }
+
+ return err;
+}
+
+static void
+hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+ pvr_free_list_put(hwrt->free_lists[i]);
+ hwrt->free_lists[i] = NULL;
+ }
+}
+
+static void
+hwrt_fini_common_fw_structure(struct pvr_hwrt_dataset *hwrt)
+{
+ pvr_fw_object_destroy(hwrt->common_fw_obj);
+}
+
+static int
+get_cr_isp_mtile_size_val(struct pvr_device *pvr_dev, u32 samples,
+ struct pvr_rt_mtile_info *info, u32 *value_out)
+{
+ u32 x = info->mtile_x[0];
+ u32 y = info->mtile_y[0];
+ u32 samples_per_pixel;
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, isp_samples_per_pixel, &samples_per_pixel);
+ if (err)
+ return err;
+
+ if (samples_per_pixel == 1) {
+ if (samples >= 4)
+ x <<= 1;
+ if (samples >= 2)
+ y <<= 1;
+ } else if (samples_per_pixel == 2) {
+ if (samples >= 8)
+ x <<= 1;
+ if (samples >= 4)
+ y <<= 1;
+ } else if (samples_per_pixel == 4) {
+ if (samples >= 8)
+ y <<= 1;
+ } else {
+ WARN(true, "Unsupported ISP samples per pixel value");
+ return -EINVAL;
+ }
+
+ *value_out = ((x << ROGUE_CR_ISP_MTILE_SIZE_X_SHIFT) & ~ROGUE_CR_ISP_MTILE_SIZE_X_CLRMSK) |
+ ((y << ROGUE_CR_ISP_MTILE_SIZE_Y_SHIFT) & ~ROGUE_CR_ISP_MTILE_SIZE_Y_CLRMSK);
+
+ return 0;
+}
+
+static int
+get_cr_multisamplectl_val(u32 samples, bool y_flip, u64 *value_out)
+{
+ static const struct {
+ u8 x[8];
+ u8 y[8];
+ } sample_positions[4] = {
+ /* 1 sample */
+ {
+ .x = { 8 },
+ .y = { 8 },
+ },
+ /* 2 samples */
+ {
+ .x = { 12, 4 },
+ .y = { 12, 4 },
+ },
+ /* 4 samples */
+ {
+ .x = { 6, 14, 2, 10 },
+ .y = { 2, 6, 10, 14 },
+ },
+ /* 8 samples */
+ {
+ .x = { 9, 7, 13, 5, 3, 1, 11, 15 },
+ .y = { 5, 11, 9, 3, 13, 7, 15, 1 },
+ },
+ };
+ const int idx = fls(samples) - 1;
+ u64 value = 0;
+
+ if (idx < 0 || idx > 3)
+ return -EINVAL;
+
+ for (u32 i = 0; i < 8; i++) {
+ value |= ((u64)sample_positions[idx].x[i]) << (i * 8);
+ if (y_flip)
+ value |= (((u64)(16 - sample_positions[idx].y[i]) & 0xf)) << (i * 8 + 4);
+ else
+ value |= ((u64)sample_positions[idx].y[i]) << (i * 8 + 4);
+ }
+
+ *value_out = value;
+
+ return 0;
+}
+
+static int
+get_cr_te_aa_val(struct pvr_device *pvr_dev, u32 samples, u32 *value_out)
+{
+ u32 samples_per_pixel;
+ u32 value = 0;
+ int err = 0;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, isp_samples_per_pixel, &samples_per_pixel);
+ if (err)
+ return err;
+
+ switch (samples_per_pixel) {
+ case 1:
+ if (samples >= 2)
+ value |= ROGUE_CR_TE_AA_Y_EN;
+ if (samples >= 4)
+ value |= ROGUE_CR_TE_AA_X_EN;
+ break;
+ case 2:
+ if (samples >= 2)
+ value |= ROGUE_CR_TE_AA_X2_EN;
+ if (samples >= 4)
+ value |= ROGUE_CR_TE_AA_Y_EN;
+ if (samples >= 8)
+ value |= ROGUE_CR_TE_AA_X_EN;
+ break;
+ case 4:
+ if (samples >= 2)
+ value |= ROGUE_CR_TE_AA_X2_EN;
+ if (samples >= 4)
+ value |= ROGUE_CR_TE_AA_Y2_EN;
+ if (samples >= 8)
+ value |= ROGUE_CR_TE_AA_Y_EN;
+ break;
+ default:
+ WARN(true, "Unsupported ISP samples per pixel value");
+ return -EINVAL;
+ }
+
+ *value_out = value;
+
+ return 0;
+}
+
+static void
+hwrtdata_common_init(void *cpu_ptr, void *priv)
+{
+ struct pvr_hwrt_dataset *hwrt = priv;
+
+ memcpy(cpu_ptr, &hwrt->common, sizeof(hwrt->common));
+}
+
+static int
+hwrt_init_common_fw_structure(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+ struct pvr_hwrt_dataset *hwrt)
+{
+ struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+ struct pvr_rt_mtile_info info;
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &info.tile_size_x);
+ if (WARN_ON(err))
+ return err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &info.tile_size_y);
+ if (WARN_ON(err))
+ return err;
+
+ info.num_tiles_x = DIV_ROUND_UP(args->width, info.tile_size_x);
+ info.num_tiles_y = DIV_ROUND_UP(args->height, info.tile_size_y);
+
+ if (PVR_HAS_FEATURE(pvr_dev, simple_parameter_format_version)) {
+ u32 parameter_format;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, simple_parameter_format_version,
+ &parameter_format);
+ if (WARN_ON(err))
+ return err;
+
+ WARN_ON(parameter_format != 2);
+
+ /*
+ * Set up 16 macrotiles with a multiple of 2x2 tiles per macrotile, which is
+ * aligned to a tile group.
+ */
+ info.mtile_x[0] = DIV_ROUND_UP(info.num_tiles_x, 8) * 2;
+ info.mtile_y[0] = DIV_ROUND_UP(info.num_tiles_y, 8) * 2;
+ info.mtile_x[1] = 0;
+ info.mtile_y[1] = 0;
+ info.mtile_x[2] = 0;
+ info.mtile_y[2] = 0;
+ info.tile_max_x = round_up(info.num_tiles_x, 2) - 1;
+ info.tile_max_y = round_up(info.num_tiles_y, 2) - 1;
+ } else {
+ /* Set up 16 macrotiles with a multiple of 4x4 tiles per macrotile. */
+ info.mtile_x[0] = round_up(DIV_ROUND_UP(info.num_tiles_x, 4), 4);
+ info.mtile_y[0] = round_up(DIV_ROUND_UP(info.num_tiles_y, 4), 4);
+ info.mtile_x[1] = info.mtile_x[0] * 2;
+ info.mtile_y[1] = info.mtile_y[0] * 2;
+ info.mtile_x[2] = info.mtile_x[0] * 3;
+ info.mtile_y[2] = info.mtile_y[0] * 3;
+ info.tile_max_x = info.num_tiles_x - 1;
+ info.tile_max_y = info.num_tiles_y - 1;
+ }
+
+ hwrt->common.geom_caches_need_zeroing = false;
+
+ hwrt->common.isp_merge_lower_x = args->isp_merge_lower_x;
+ hwrt->common.isp_merge_lower_y = args->isp_merge_lower_y;
+ hwrt->common.isp_merge_upper_x = args->isp_merge_upper_x;
+ hwrt->common.isp_merge_upper_y = args->isp_merge_upper_y;
+ hwrt->common.isp_merge_scale_x = args->isp_merge_scale_x;
+ hwrt->common.isp_merge_scale_y = args->isp_merge_scale_y;
+
+ err = get_cr_multisamplectl_val(args->samples, false,
+ &hwrt->common.multi_sample_ctl);
+ if (err)
+ return err;
+
+ err = get_cr_multisamplectl_val(args->samples, true,
+ &hwrt->common.flipped_multi_sample_ctl);
+ if (err)
+ return err;
+
+ hwrt->common.mtile_stride = info.mtile_x[0] * info.mtile_y[0];
+
+ err = get_cr_te_aa_val(pvr_dev, args->samples, &hwrt->common.teaa);
+ if (err)
+ return err;
+
+ hwrt->common.screen_pixel_max =
+ (((args->width - 1) << ROGUE_CR_PPP_SCREEN_PIXXMAX_SHIFT) &
+ ~ROGUE_CR_PPP_SCREEN_PIXXMAX_CLRMSK) |
+ (((args->height - 1) << ROGUE_CR_PPP_SCREEN_PIXYMAX_SHIFT) &
+ ~ROGUE_CR_PPP_SCREEN_PIXYMAX_CLRMSK);
+
+ hwrt->common.te_screen =
+ ((info.tile_max_x << ROGUE_CR_TE_SCREEN_XMAX_SHIFT) &
+ ~ROGUE_CR_TE_SCREEN_XMAX_CLRMSK) |
+ ((info.tile_max_y << ROGUE_CR_TE_SCREEN_YMAX_SHIFT) &
+ ~ROGUE_CR_TE_SCREEN_YMAX_CLRMSK);
+ hwrt->common.te_mtile1 =
+ ((info.mtile_x[0] << ROGUE_CR_TE_MTILE1_X1_SHIFT) & ~ROGUE_CR_TE_MTILE1_X1_CLRMSK) |
+ ((info.mtile_x[1] << ROGUE_CR_TE_MTILE1_X2_SHIFT) & ~ROGUE_CR_TE_MTILE1_X2_CLRMSK) |
+ ((info.mtile_x[2] << ROGUE_CR_TE_MTILE1_X3_SHIFT) & ~ROGUE_CR_TE_MTILE1_X3_CLRMSK);
+ hwrt->common.te_mtile2 =
+ ((info.mtile_y[0] << ROGUE_CR_TE_MTILE2_Y1_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y1_CLRMSK) |
+ ((info.mtile_y[1] << ROGUE_CR_TE_MTILE2_Y2_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y2_CLRMSK) |
+ ((info.mtile_y[2] << ROGUE_CR_TE_MTILE2_Y3_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y3_CLRMSK);
+
+ err = get_cr_isp_mtile_size_val(pvr_dev, args->samples, &info,
+ &hwrt->common.isp_mtile_size);
+ if (err)
+ return err;
+
+ hwrt->common.tpc_stride = geom_data_args->tpc_stride;
+ hwrt->common.tpc_size = geom_data_args->tpc_size;
+
+ hwrt->common.rgn_header_size = args->region_header_size;
+
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_hwrtdata_common),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED, hwrtdata_common_init, hwrt,
+ &hwrt->common_fw_obj);
+
+ return err;
+}
+
+static void
+hwrt_fw_data_init(void *cpu_ptr, void *priv)
+{
+ struct pvr_hwrt_data *hwrt_data = priv;
+
+ memcpy(cpu_ptr, &hwrt_data->data, sizeof(hwrt_data->data));
+}
+
+static int
+hwrt_data_init_fw_structure(struct pvr_file *pvr_file,
+ struct pvr_hwrt_dataset *hwrt,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+ struct drm_pvr_create_hwrt_rt_data_args *rt_data_args,
+ struct pvr_hwrt_data *hwrt_data)
+{
+ struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+ struct rogue_fwif_rta_ctl *rta_ctl;
+ int free_list_i;
+ int err;
+
+ pvr_fw_object_get_fw_addr(hwrt->common_fw_obj,
+ &hwrt_data->data.hwrt_data_common_fw_addr);
+
+ for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
+ pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj,
+ &hwrt_data->data.freelists_fw_addr[free_list_i]);
+ }
+
+ hwrt_data->data.tail_ptrs_dev_addr = geom_data_args->tpc_dev_addr;
+ hwrt_data->data.vheap_table_dev_addr = geom_data_args->vheap_table_dev_addr;
+ hwrt_data->data.rtc_dev_addr = geom_data_args->rtc_dev_addr;
+
+ hwrt_data->data.pm_mlist_dev_addr = rt_data_args->pm_mlist_dev_addr;
+ hwrt_data->data.macrotile_array_dev_addr = rt_data_args->macrotile_array_dev_addr;
+ hwrt_data->data.rgn_header_dev_addr = rt_data_args->region_header_dev_addr;
+
+ rta_ctl = &hwrt_data->data.rta_ctl;
+
+ rta_ctl->render_target_index = 0;
+ rta_ctl->active_render_targets = 0;
+ rta_ctl->valid_render_targets_fw_addr = 0;
+ rta_ctl->rta_num_partial_renders_fw_addr = 0;
+ rta_ctl->max_rts = args->layers;
+
+ if (args->layers > 1) {
+ err = pvr_fw_object_create(pvr_dev, args->layers * SRTC_ENTRY_SIZE,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &hwrt_data->srtc_obj);
+ if (err)
+ return err;
+ pvr_fw_object_get_fw_addr(hwrt_data->srtc_obj,
+ &rta_ctl->valid_render_targets_fw_addr);
+
+ err = pvr_fw_object_create(pvr_dev, args->layers * RAA_ENTRY_SIZE,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &hwrt_data->raa_obj);
+ if (err)
+ goto err_put_shadow_rt_cache;
+ pvr_fw_object_get_fw_addr(hwrt_data->raa_obj,
+ &rta_ctl->rta_num_partial_renders_fw_addr);
+ }
+
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_hwrtdata),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ hwrt_fw_data_init, hwrt_data, &hwrt_data->fw_obj);
+ if (err)
+ goto err_put_raa_obj;
+
+ pvr_free_list_add_hwrt(hwrt->free_lists[0], hwrt_data);
+
+ return 0;
+
+err_put_raa_obj:
+ if (args->layers > 1)
+ pvr_fw_object_destroy(hwrt_data->raa_obj);
+
+err_put_shadow_rt_cache:
+ if (args->layers > 1)
+ pvr_fw_object_destroy(hwrt_data->srtc_obj);
+
+ return err;
+}
+
+static void
+hwrt_data_fini_fw_structure(struct pvr_hwrt_dataset *hwrt, int hwrt_nr)
+{
+ struct pvr_hwrt_data *hwrt_data = &hwrt->data[hwrt_nr];
+
+ pvr_free_list_remove_hwrt(hwrt->free_lists[0], hwrt_data);
+
+ if (hwrt->max_rts > 1) {
+ pvr_fw_object_destroy(hwrt_data->raa_obj);
+ pvr_fw_object_destroy(hwrt_data->srtc_obj);
+ }
+
+ pvr_fw_object_destroy(hwrt_data->fw_obj);
+}
+
+/**
+ * pvr_hwrt_dataset_create() - Create a new HWRT dataset
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ *
+ * Return:
+ * * Pointer to new HWRT, or
+ * * ERR_PTR(-%ENOMEM) on out of memory.
+ */
+struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_create(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args)
+{
+ struct pvr_hwrt_dataset *hwrt;
+ int err, i = 0;
+
+ /* Create and fill out the kernel structure */
+ hwrt = kzalloc(sizeof(*hwrt), GFP_KERNEL);
+
+ if (!hwrt)
+ return ERR_PTR(-ENOMEM);
+
+ err = hwrt_init_kernel_structure(pvr_file, args, hwrt);
+ if (err < 0)
+ goto err_free;
+
+ err = hwrt_init_common_fw_structure(pvr_file, args, hwrt);
+ if (err < 0)
+ goto err_fini_kernel_structure;
+
+ for (; i < ARRAY_SIZE(hwrt->data); i++) {
+ err = hwrt_data_init_fw_structure(pvr_file, hwrt, args,
+ &args->rt_data_args[i],
+ &hwrt->data[i]);
+ if (err < 0)
+ goto err_fini_data_structures;
+
+ hwrt->data[i].hwrt_dataset = hwrt;
+ }
+
+ kref_init(&hwrt->ref_count);
+ return hwrt;
+
+err_fini_data_structures:
+ while (--i >= 0)
+ hwrt_data_fini_fw_structure(hwrt, i);
+
+err_fini_kernel_structure:
+ hwrt_fini_kernel_structure(hwrt);
+
+err_free:
+ kfree(hwrt);
+
+ return ERR_PTR(err);
+}
+
+static void
+pvr_hwrt_dataset_release(struct kref *ref_count)
+{
+ struct pvr_hwrt_dataset *hwrt =
+ container_of(ref_count, struct pvr_hwrt_dataset, ref_count);
+
+ for (int i = ARRAY_SIZE(hwrt->data) - 1; i >= 0; i--) {
+ WARN_ON(pvr_fw_structure_cleanup(hwrt->pvr_dev, ROGUE_FWIF_CLEANUP_HWRTDATA,
+ hwrt->data[i].fw_obj, 0));
+ hwrt_data_fini_fw_structure(hwrt, i);
+ }
+
+ hwrt_fini_common_fw_structure(hwrt);
+ hwrt_fini_kernel_structure(hwrt);
+
+ kfree(hwrt);
+}
+
+/**
+ * pvr_destroy_hwrt_datasets_for_file: Destroy any HWRT datasets associated
+ * with the given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all HWRT datasets associated with @pvr_file from the device
+ * hwrt_dataset list and drops initial references. HWRT datasets will then be
+ * destroyed once all outstanding references are dropped.
+ */
+void pvr_destroy_hwrt_datasets_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_hwrt_dataset *hwrt;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->hwrt_handles, handle, hwrt) {
+ (void)hwrt;
+ pvr_hwrt_dataset_put(xa_erase(&pvr_file->hwrt_handles, handle));
+ }
+}
+
+/**
+ * pvr_hwrt_dataset_put() - Release reference on HWRT dataset
+ * @hwrt: Pointer to HWRT dataset to release reference on
+ */
+void
+pvr_hwrt_dataset_put(struct pvr_hwrt_dataset *hwrt)
+{
+ if (hwrt)
+ kref_put(&hwrt->ref_count, pvr_hwrt_dataset_release);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.h b/drivers/gpu/drm/imagination/pvr_hwrt.h
new file mode 100644
index 000000000..676070b20
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_HWRT_H
+#define PVR_HWRT_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_device.h"
+#include "pvr_rogue_fwif_shared.h"
+
+/* Forward declaration from pvr_free_list.h. */
+struct pvr_free_list;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/**
+ * struct pvr_hwrt_data - structure representing HWRT data
+ */
+struct pvr_hwrt_data {
+ /** @fw_obj: FW object representing the FW-side structure. */
+ struct pvr_fw_object *fw_obj;
+
+ /** @data: Local copy of FW-side structure. */
+ struct rogue_fwif_hwrtdata data;
+
+ /** @freelist_node: List node connecting this HWRT to the local freelist. */
+ struct list_head freelist_node;
+
+ /**
+ * @srtc_obj: FW object representing shadow render target cache.
+ *
+ * Only valid if @max_rts > 1.
+ */
+ struct pvr_fw_object *srtc_obj;
+
+ /**
+ * @raa_obj: FW object representing renders accumulation array.
+ *
+ * Only valid if @max_rts > 1.
+ */
+ struct pvr_fw_object *raa_obj;
+
+ /** @hwrt_dataset: Back pointer to owning HWRT dataset. */
+ struct pvr_hwrt_dataset *hwrt_dataset;
+};
+
+/**
+ * struct pvr_hwrt_dataset - structure representing a HWRT data set.
+ */
+struct pvr_hwrt_dataset {
+ /** @ref_count: Reference count of object. */
+ struct kref ref_count;
+
+ /** @pvr_dev: Pointer to device that owns this object. */
+ struct pvr_device *pvr_dev;
+
+ /** @common_fw_obj: FW object representing common FW-side structure. */
+ struct pvr_fw_object *common_fw_obj;
+
+ /** @common: Common HWRT data. */
+ struct rogue_fwif_hwrtdata_common common;
+
+ /** @data: HWRT data structures belonging to this set. */
+ struct pvr_hwrt_data data[ROGUE_FWIF_NUM_RTDATAS];
+
+ /** @free_lists: Free lists used by HWRT data set. */
+ struct pvr_free_list *free_lists[ROGUE_FWIF_NUM_RTDATA_FREELISTS];
+
+ /** @max_rts: Maximum render targets for this HWRT data set. */
+ u16 max_rts;
+};
+
+struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_create(struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_create_hwrt_dataset_args *args);
+
+void
+pvr_destroy_hwrt_datasets_for_file(struct pvr_file *pvr_file);
+
+/**
+ * pvr_hwrt_dataset_lookup() - Lookup HWRT dataset pointer from handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on dataset object. Call pvr_hwrt_dataset_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a HWRT
+ * dataset)
+ */
+static __always_inline struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_hwrt_dataset *hwrt;
+
+ xa_lock(&pvr_file->hwrt_handles);
+ hwrt = xa_load(&pvr_file->hwrt_handles, handle);
+
+ if (hwrt)
+ kref_get(&hwrt->ref_count);
+
+ xa_unlock(&pvr_file->hwrt_handles);
+
+ return hwrt;
+}
+
+void
+pvr_hwrt_dataset_put(struct pvr_hwrt_dataset *hwrt);
+
+/**
+ * pvr_hwrt_data_lookup() - Lookup HWRT data pointer from handle and index
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ * @index: Index of RT data within dataset.
+ *
+ * Takes reference on dataset object. Call pvr_hwrt_data_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a HWRT
+ * dataset, or index is out of range)
+ */
+static __always_inline struct pvr_hwrt_data *
+pvr_hwrt_data_lookup(struct pvr_file *pvr_file, u32 handle, u32 index)
+{
+ struct pvr_hwrt_dataset *hwrt_dataset = pvr_hwrt_dataset_lookup(pvr_file, handle);
+
+ if (hwrt_dataset) {
+ if (index < ARRAY_SIZE(hwrt_dataset->data))
+ return &hwrt_dataset->data[index];
+
+ pvr_hwrt_dataset_put(hwrt_dataset);
+ }
+
+ return NULL;
+}
+
+/**
+ * pvr_hwrt_data_put() - Release reference on HWRT data
+ * @hwrt: Pointer to HWRT data to release reference on
+ */
+static __always_inline void
+pvr_hwrt_data_put(struct pvr_hwrt_data *hwrt)
+{
+ if (hwrt)
+ pvr_hwrt_dataset_put(hwrt->hwrt_dataset);
+}
+
+static __always_inline struct pvr_hwrt_data *
+pvr_hwrt_data_get(struct pvr_hwrt_data *hwrt)
+{
+ if (hwrt)
+ kref_get(&hwrt->hwrt_dataset->ref_count);
+
+ return hwrt;
+}
+
+#endif /* PVR_HWRT_H */
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
new file mode 100644
index 000000000..78c2f3c6d
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_job.h"
+#include "pvr_mmu.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_stream.h"
+#include "pvr_stream_defs.h"
+#include "pvr_sync.h"
+
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <linux/types.h>
+#include <uapi/drm/pvr_drm.h>
+
+static void pvr_job_release(struct kref *kref)
+{
+ struct pvr_job *job = container_of(kref, struct pvr_job, ref_count);
+
+ xa_erase(&job->pvr_dev->job_ids, job->id);
+
+ pvr_hwrt_data_put(job->hwrt);
+ pvr_context_put(job->ctx);
+
+ WARN_ON(job->paired_job);
+
+ pvr_queue_job_cleanup(job);
+ pvr_job_release_pm_ref(job);
+
+ kfree(job->cmd);
+ kfree(job);
+}
+
+/**
+ * pvr_job_put() - Release reference on job
+ * @job: Target job.
+ */
+void
+pvr_job_put(struct pvr_job *job)
+{
+ if (job)
+ kref_put(&job->ref_count, pvr_job_release);
+}
+
+/**
+ * pvr_job_process_stream() - Build job FW structure from stream
+ * @pvr_dev: Device pointer.
+ * @cmd_defs: Stream definition.
+ * @stream: Pointer to command stream.
+ * @stream_size: Size of command stream, in bytes.
+ * @job: Pointer to job.
+ *
+ * Caller is responsible for freeing the output structure.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%ENOMEM on out of memory, or
+ * * -%EINVAL on malformed stream.
+ */
+static int
+pvr_job_process_stream(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+ void *stream, u32 stream_size, struct pvr_job *job)
+{
+ int err;
+
+ job->cmd = kzalloc(cmd_defs->dest_size, GFP_KERNEL);
+ if (!job->cmd)
+ return -ENOMEM;
+
+ job->cmd_len = cmd_defs->dest_size;
+
+ err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, job->cmd);
+ if (err)
+ kfree(job->cmd);
+
+ return err;
+}
+
+static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job,
+ const struct pvr_stream_cmd_defs *stream_def,
+ u64 stream_userptr, u32 stream_len)
+{
+ void *stream;
+ int err;
+
+ stream = kzalloc(stream_len, GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) {
+ err = -EFAULT;
+ goto err_free_stream;
+ }
+
+ err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job);
+
+err_free_stream:
+ kfree(stream);
+
+ return err;
+}
+
+static u32
+convert_geom_flags(u32 in_flags)
+{
+ u32 out_flags = 0;
+
+ if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST)
+ out_flags |= ROGUE_GEOM_FLAGS_FIRSTKICK;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST)
+ out_flags |= ROGUE_GEOM_FLAGS_LASTKICK;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+ out_flags |= ROGUE_GEOM_FLAGS_SINGLE_CORE;
+
+ return out_flags;
+}
+
+static u32
+convert_frag_flags(u32 in_flags)
+{
+ u32 out_flags = 0;
+
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE)
+ out_flags |= ROGUE_FRAG_FLAGS_SINGLE_CORE;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER)
+ out_flags |= ROGUE_FRAG_FLAGS_DEPTHBUFFER;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER)
+ out_flags |= ROGUE_FRAG_FLAGS_STENCILBUFFER;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP)
+ out_flags |= ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER)
+ out_flags |= ROGUE_FRAG_FLAGS_SCRATCHBUFFER;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS)
+ out_flags |= ROGUE_FRAG_FLAGS_GET_VIS_RESULTS;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
+ out_flags |= ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE;
+
+ return out_flags;
+}
+
+static int
+pvr_geom_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ struct rogue_fwif_cmd_geom *cmd;
+ int err;
+
+ if (args->flags & ~DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER)
+ return -EINVAL;
+
+ if (!job->hwrt)
+ return -EINVAL;
+
+ job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_GEOM;
+ err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_geom_stream,
+ args->cmd_stream, args->cmd_stream_len);
+ if (err)
+ return err;
+
+ cmd = job->cmd;
+ cmd->cmd_shared.cmn.frame_num = 0;
+ cmd->flags = convert_geom_flags(args->flags);
+ pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
+ return 0;
+}
+
+static int
+pvr_frag_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ struct rogue_fwif_cmd_frag *cmd;
+ int err;
+
+ if (args->flags & ~DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER)
+ return -EINVAL;
+
+ if (!job->hwrt)
+ return -EINVAL;
+
+ job->fw_ccb_cmd_type = (args->flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER) ?
+ ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR :
+ ROGUE_FWIF_CCB_CMD_TYPE_FRAG;
+ err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_frag_stream,
+ args->cmd_stream, args->cmd_stream_len);
+ if (err)
+ return err;
+
+ cmd = job->cmd;
+ cmd->cmd_shared.cmn.frame_num = 0;
+ cmd->flags = convert_frag_flags(args->flags);
+ pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
+ return 0;
+}
+
+static u32
+convert_compute_flags(u32 in_flags)
+{
+ u32 out_flags = 0;
+
+ if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP)
+ out_flags |= ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP;
+ if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+ out_flags |= ROGUE_COMPUTE_FLAG_SINGLE_CORE;
+
+ return out_flags;
+}
+
+static int
+pvr_compute_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ struct rogue_fwif_cmd_compute *cmd;
+ int err;
+
+ if (args->flags & ~DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (job->ctx->type != DRM_PVR_CTX_TYPE_COMPUTE)
+ return -EINVAL;
+
+ job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_CDM;
+ err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_compute_stream,
+ args->cmd_stream, args->cmd_stream_len);
+ if (err)
+ return err;
+
+ cmd = job->cmd;
+ cmd->common.frame_num = 0;
+ cmd->flags = convert_compute_flags(args->flags);
+ return 0;
+}
+
+static u32
+convert_transfer_flags(u32 in_flags)
+{
+ u32 out_flags = 0;
+
+ if (in_flags & DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE)
+ out_flags |= ROGUE_TRANSFER_FLAGS_SINGLE_CORE;
+
+ return out_flags;
+}
+
+static int
+pvr_transfer_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ struct rogue_fwif_cmd_transfer *cmd;
+ int err;
+
+ if (args->flags & ~DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (job->ctx->type != DRM_PVR_CTX_TYPE_TRANSFER_FRAG)
+ return -EINVAL;
+
+ job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D;
+ err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_transfer_stream,
+ args->cmd_stream, args->cmd_stream_len);
+ if (err)
+ return err;
+
+ cmd = job->cmd;
+ cmd->common.frame_num = 0;
+ cmd->flags = convert_transfer_flags(args->flags);
+ return 0;
+}
+
+static int
+pvr_job_fw_cmd_init(struct pvr_job *job,
+ struct drm_pvr_job *args)
+{
+ switch (args->type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return pvr_geom_job_fw_cmd_init(job, args);
+
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return pvr_frag_job_fw_cmd_init(job, args);
+
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return pvr_compute_job_fw_cmd_init(job, args);
+
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return pvr_transfer_job_fw_cmd_init(job, args);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * struct pvr_job_data - Helper container for pairing jobs with the
+ * sync_ops supplied for them by the user.
+ */
+struct pvr_job_data {
+ /** @job: Pointer to the job. */
+ struct pvr_job *job;
+
+ /** @sync_ops: Pointer to the sync_ops associated with @job. */
+ struct drm_pvr_sync_op *sync_ops;
+
+ /** @sync_op_count: Number of members of @sync_ops. */
+ u32 sync_op_count;
+};
+
+/**
+ * prepare_job_syncs() - Prepare all sync objects for a single job.
+ * @pvr_file: PowerVR file.
+ * @job_data: Precreated job and sync_ops array.
+ * @signal_array: xarray to receive signal sync objects.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_sync_signal_array_collect_ops(),
+ * pvr_sync_add_deps_to_job(), drm_sched_job_add_resv_dependencies() or
+ * pvr_sync_signal_array_update_fences().
+ */
+static int
+prepare_job_syncs(struct pvr_file *pvr_file,
+ struct pvr_job_data *job_data,
+ struct xarray *signal_array)
+{
+ struct dma_fence *done_fence;
+ int err = pvr_sync_signal_array_collect_ops(signal_array,
+ from_pvr_file(pvr_file),
+ job_data->sync_op_count,
+ job_data->sync_ops);
+
+ if (err)
+ return err;
+
+ err = pvr_sync_add_deps_to_job(pvr_file, &job_data->job->base,
+ job_data->sync_op_count,
+ job_data->sync_ops, signal_array);
+ if (err)
+ return err;
+
+ if (job_data->job->hwrt) {
+ /* The geometry job writes the HWRT region headers, which are
+ * then read by the fragment job.
+ */
+ struct drm_gem_object *obj =
+ gem_from_pvr_gem(job_data->job->hwrt->fw_obj->gem);
+ enum dma_resv_usage usage =
+ dma_resv_usage_rw(job_data->job->type ==
+ DRM_PVR_JOB_TYPE_GEOMETRY);
+
+ dma_resv_lock(obj->resv, NULL);
+ err = drm_sched_job_add_resv_dependencies(&job_data->job->base,
+ obj->resv, usage);
+ dma_resv_unlock(obj->resv);
+ if (err)
+ return err;
+ }
+
+ /* We need to arm the job to get the job done fence. */
+ done_fence = pvr_queue_job_arm(job_data->job);
+
+ err = pvr_sync_signal_array_update_fences(signal_array,
+ job_data->sync_op_count,
+ job_data->sync_ops,
+ done_fence);
+ return err;
+}
+
+/**
+ * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs.
+ * @pvr_file: PowerVR file.
+ * @job_data: Array of precreated jobs and their sync_ops.
+ * @job_count: Number of jobs.
+ * @signal_array: xarray to receive signal sync objects.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_vm_bind_job_prepare_syncs().
+ */
+static int
+prepare_job_syncs_for_each(struct pvr_file *pvr_file,
+ struct pvr_job_data *job_data,
+ u32 *job_count,
+ struct xarray *signal_array)
+{
+ for (u32 i = 0; i < *job_count; i++) {
+ int err = prepare_job_syncs(pvr_file, &job_data[i],
+ signal_array);
+
+ if (err) {
+ *job_count = i;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct pvr_job *
+create_job(struct pvr_device *pvr_dev,
+ struct pvr_file *pvr_file,
+ struct drm_pvr_job *args)
+{
+ struct pvr_job *job = NULL;
+ int err;
+
+ if (!args->cmd_stream || !args->cmd_stream_len)
+ return ERR_PTR(-EINVAL);
+
+ if (args->type != DRM_PVR_JOB_TYPE_GEOMETRY &&
+ args->type != DRM_PVR_JOB_TYPE_FRAGMENT &&
+ (args->hwrt.set_handle || args->hwrt.data_index))
+ return ERR_PTR(-EINVAL);
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&job->ref_count);
+ job->type = args->type;
+ job->pvr_dev = pvr_dev;
+
+ err = xa_alloc(&pvr_dev->job_ids, &job->id, job, xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_put_job;
+
+ job->ctx = pvr_context_lookup(pvr_file, args->context_handle);
+ if (!job->ctx) {
+ err = -EINVAL;
+ goto err_put_job;
+ }
+
+ if (args->hwrt.set_handle) {
+ job->hwrt = pvr_hwrt_data_lookup(pvr_file, args->hwrt.set_handle,
+ args->hwrt.data_index);
+ if (!job->hwrt) {
+ err = -EINVAL;
+ goto err_put_job;
+ }
+ }
+
+ err = pvr_job_fw_cmd_init(job, args);
+ if (err)
+ goto err_put_job;
+
+ err = pvr_queue_job_init(job);
+ if (err)
+ goto err_put_job;
+
+ return job;
+
+err_put_job:
+ pvr_job_put(job);
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_job_data_fini() - Cleanup all allocs used to set up job submission.
+ * @job_data: Job data array.
+ * @job_count: Number of members of @job_data.
+ */
+static void
+pvr_job_data_fini(struct pvr_job_data *job_data, u32 job_count)
+{
+ for (u32 i = 0; i < job_count; i++) {
+ pvr_job_put(job_data[i].job);
+ kvfree(job_data[i].sync_ops);
+ }
+}
+
+/**
+ * pvr_job_data_init() - Init an array of created jobs, associating them with
+ * the appropriate sync_ops args, which will be copied in.
+ * @pvr_dev: Target PowerVR device.
+ * @pvr_file: Pointer to PowerVR file structure.
+ * @job_args: Job args array copied from user.
+ * @job_count: Number of members of @job_args.
+ * @job_data_out: Job data array.
+ */
+static int pvr_job_data_init(struct pvr_device *pvr_dev,
+ struct pvr_file *pvr_file,
+ struct drm_pvr_job *job_args,
+ u32 *job_count,
+ struct pvr_job_data *job_data_out)
+{
+ int err = 0, i = 0;
+
+ for (; i < *job_count; i++) {
+ job_data_out[i].job =
+ create_job(pvr_dev, pvr_file, &job_args[i]);
+ err = PTR_ERR_OR_ZERO(job_data_out[i].job);
+
+ if (err) {
+ *job_count = i;
+ job_data_out[i].job = NULL;
+ goto err_cleanup;
+ }
+
+ err = PVR_UOBJ_GET_ARRAY(job_data_out[i].sync_ops,
+ &job_args[i].sync_ops);
+ if (err) {
+ *job_count = i;
+
+ /* Ensure the job created above is also cleaned up. */
+ i++;
+ goto err_cleanup;
+ }
+
+ job_data_out[i].sync_op_count = job_args[i].sync_ops.count;
+ }
+
+ return 0;
+
+err_cleanup:
+ pvr_job_data_fini(job_data_out, i);
+
+ return err;
+}
+
+static void
+push_jobs(struct pvr_job_data *job_data, u32 job_count)
+{
+ for (u32 i = 0; i < job_count; i++)
+ pvr_queue_job_push(job_data[i].job);
+}
+
+static int
+prepare_fw_obj_resv(struct drm_exec *exec, struct pvr_fw_object *fw_obj)
+{
+ return drm_exec_prepare_obj(exec, gem_from_pvr_gem(fw_obj->gem), 1);
+}
+
+static int
+jobs_lock_all_objs(struct drm_exec *exec, struct pvr_job_data *job_data,
+ u32 job_count)
+{
+ for (u32 i = 0; i < job_count; i++) {
+ struct pvr_job *job = job_data[i].job;
+
+ /* Grab a lock on a the context, to guard against
+ * concurrent submission to the same queue.
+ */
+ int err = drm_exec_lock_obj(exec,
+ gem_from_pvr_gem(job->ctx->fw_obj->gem));
+
+ if (err)
+ return err;
+
+ if (job->hwrt) {
+ err = prepare_fw_obj_resv(exec,
+ job->hwrt->fw_obj);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+prepare_job_resvs_for_each(struct drm_exec *exec, struct pvr_job_data *job_data,
+ u32 job_count)
+{
+ drm_exec_until_all_locked(exec) {
+ int err = jobs_lock_all_objs(exec, job_data, job_count);
+
+ drm_exec_retry_on_contention(exec);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+update_job_resvs(struct pvr_job *job)
+{
+ if (job->hwrt) {
+ enum dma_resv_usage usage = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
+ struct drm_gem_object *obj = gem_from_pvr_gem(job->hwrt->fw_obj->gem);
+
+ dma_resv_add_fence(obj->resv, &job->base.s_fence->finished, usage);
+ }
+}
+
+static void
+update_job_resvs_for_each(struct pvr_job_data *job_data, u32 job_count)
+{
+ for (u32 i = 0; i < job_count; i++)
+ update_job_resvs(job_data[i].job);
+}
+
+static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b)
+{
+ struct pvr_job *geom_job = a, *frag_job = b;
+ struct dma_fence *fence;
+ unsigned long index;
+
+ /* Geometry and fragment jobs can be combined if they are queued to the
+ * same context and targeting the same HWRT.
+ */
+ if (a->type != DRM_PVR_JOB_TYPE_GEOMETRY ||
+ b->type != DRM_PVR_JOB_TYPE_FRAGMENT ||
+ a->ctx != b->ctx ||
+ a->hwrt != b->hwrt)
+ return false;
+
+ xa_for_each(&frag_job->base.dependencies, index, fence) {
+ /* We combine when we see an explicit geom -> frag dep. */
+ if (&geom_job->base.s_fence->scheduled == fence)
+ return true;
+ }
+
+ return false;
+}
+
+static struct dma_fence *
+get_last_queued_job_scheduled_fence(struct pvr_queue *queue,
+ struct pvr_job_data *job_data,
+ u32 cur_job_pos)
+{
+ /* We iterate over the current job array in reverse order to grab the
+ * last to-be-queued job targeting the same queue.
+ */
+ for (u32 i = cur_job_pos; i > 0; i--) {
+ struct pvr_job *job = job_data[i - 1].job;
+
+ if (job->ctx == queue->ctx && job->type == queue->type)
+ return dma_fence_get(&job->base.s_fence->scheduled);
+ }
+
+ /* If we didn't find any, we just return the last queued job scheduled
+ * fence attached to the queue.
+ */
+ return dma_fence_get(queue->last_queued_job_scheduled_fence);
+}
+
+static int
+pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
+{
+ for (u32 i = 0; i < *job_count - 1; i++) {
+ struct pvr_job *geom_job = job_data[i].job;
+ struct pvr_job *frag_job = job_data[i + 1].job;
+ struct pvr_queue *frag_queue;
+ struct dma_fence *f;
+
+ if (!can_combine_jobs(job_data[i].job, job_data[i + 1].job))
+ continue;
+
+ /* The fragment job will be submitted by the geometry queue. We
+ * need to make sure it comes after all the other fragment jobs
+ * queued before it.
+ */
+ frag_queue = pvr_context_get_queue_for_job(frag_job->ctx,
+ frag_job->type);
+ f = get_last_queued_job_scheduled_fence(frag_queue, job_data,
+ i);
+ if (f) {
+ int err = drm_sched_job_add_dependency(&geom_job->base,
+ f);
+ if (err) {
+ *job_count = i;
+ return err;
+ }
+ }
+
+ /* The KCCB slot will be reserved by the geometry job, so we can
+ * drop the KCCB fence on the fragment job.
+ */
+ pvr_kccb_fence_put(frag_job->kccb_fence);
+ frag_job->kccb_fence = NULL;
+
+ geom_job->paired_job = frag_job;
+ frag_job->paired_job = geom_job;
+
+ /* Skip the fragment job we just paired to the geometry job. */
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * pvr_submit_jobs() - Submit jobs to the GPU
+ * @pvr_dev: Target PowerVR device.
+ * @pvr_file: Pointer to PowerVR file structure.
+ * @args: Ioctl args.
+ *
+ * This initial implementation is entirely synchronous; on return the GPU will
+ * be idle. This will not be the case for future implementations.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%EFAULT if arguments can not be copied from user space, or
+ * * -%EINVAL on invalid arguments, or
+ * * Any other error.
+ */
+int
+pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_submit_jobs_args *args)
+{
+ struct pvr_job_data *job_data = NULL;
+ struct drm_pvr_job *job_args;
+ struct xarray signal_array;
+ u32 jobs_alloced = 0;
+ struct drm_exec exec;
+ int err;
+
+ if (!args->jobs.count)
+ return -EINVAL;
+
+ err = PVR_UOBJ_GET_ARRAY(job_args, &args->jobs);
+ if (err)
+ return err;
+
+ job_data = kvmalloc_array(args->jobs.count, sizeof(*job_data),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job_data) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = pvr_job_data_init(pvr_dev, pvr_file, job_args, &args->jobs.count,
+ job_data);
+ if (err)
+ goto out_free;
+
+ jobs_alloced = args->jobs.count;
+
+ /*
+ * Flush MMU if needed - this has been deferred until now to avoid
+ * overuse of this expensive operation.
+ */
+ err = pvr_mmu_flush_exec(pvr_dev, false);
+ if (err)
+ goto out_job_data_cleanup;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0);
+
+ xa_init_flags(&signal_array, XA_FLAGS_ALLOC);
+
+ err = prepare_job_syncs_for_each(pvr_file, job_data, &args->jobs.count,
+ &signal_array);
+ if (err)
+ goto out_exec_fini;
+
+ err = prepare_job_resvs_for_each(&exec, job_data, args->jobs.count);
+ if (err)
+ goto out_exec_fini;
+
+ err = pvr_jobs_link_geom_frag(job_data, &args->jobs.count);
+ if (err)
+ goto out_exec_fini;
+
+ /* Anything after that point must succeed because we start exposing job
+ * finished fences to the outside world.
+ */
+ update_job_resvs_for_each(job_data, args->jobs.count);
+ push_jobs(job_data, args->jobs.count);
+ pvr_sync_signal_array_push_fences(&signal_array);
+ err = 0;
+
+out_exec_fini:
+ drm_exec_fini(&exec);
+ pvr_sync_signal_array_cleanup(&signal_array);
+
+out_job_data_cleanup:
+ pvr_job_data_fini(job_data, jobs_alloced);
+
+out_free:
+ kvfree(job_data);
+ kvfree(job_args);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_job.h b/drivers/gpu/drm/imagination/pvr_job.h
new file mode 100644
index 000000000..0ca003c5c
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_job.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_JOB_H
+#define PVR_JOB_H
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <linux/kref.h>
+#include <linux/types.h>
+
+#include <drm/drm_gem.h>
+#include <drm/gpu_scheduler.h>
+
+#include "pvr_power.h"
+
+/* Forward declaration from "pvr_context.h". */
+struct pvr_context;
+
+/* Forward declarations from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declarations from "pvr_hwrt.h". */
+struct pvr_hwrt_data;
+
+/* Forward declaration from "pvr_queue.h". */
+struct pvr_queue;
+
+struct pvr_job {
+ /** @base: drm_sched_job object. */
+ struct drm_sched_job base;
+
+ /** @ref_count: Refcount for job. */
+ struct kref ref_count;
+
+ /** @type: Type of job. */
+ enum drm_pvr_job_type type;
+
+ /** @id: Job ID number. */
+ u32 id;
+
+ /**
+ * @paired_job: Job paired to this job.
+ *
+ * This field is only meaningful for geometry and fragment jobs.
+ *
+ * Paired jobs are executed on the same context, and need to be submitted
+ * atomically to the FW, to make sure the partial render logic has a
+ * fragment job to execute when the Parameter Manager runs out of memory.
+ *
+ * The geometry job should point to the fragment job it's paired with,
+ * and the fragment job should point to the geometry job it's paired with.
+ */
+ struct pvr_job *paired_job;
+
+ /** @cccb_fence: Fence used to wait for CCCB space. */
+ struct dma_fence *cccb_fence;
+
+ /** @kccb_fence: Fence used to wait for KCCB space. */
+ struct dma_fence *kccb_fence;
+
+ /** @done_fence: Fence to signal when the job is done. */
+ struct dma_fence *done_fence;
+
+ /** @pvr_dev: Device pointer. */
+ struct pvr_device *pvr_dev;
+
+ /** @ctx: Pointer to owning context. */
+ struct pvr_context *ctx;
+
+ /** @cmd: Command data. Format depends on @type. */
+ void *cmd;
+
+ /** @cmd_len: Length of command data, in bytes. */
+ u32 cmd_len;
+
+ /**
+ * @fw_ccb_cmd_type: Firmware CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*.
+ */
+ u32 fw_ccb_cmd_type;
+
+ /** @hwrt: HWRT object. Will be NULL for compute and transfer jobs. */
+ struct pvr_hwrt_data *hwrt;
+
+ /**
+ * @has_pm_ref: True if the job has a power ref, thus forcing the GPU to stay on until
+ * the job is done.
+ */
+ bool has_pm_ref;
+};
+
+/**
+ * pvr_job_get() - Take additional reference on job.
+ * @job: Job pointer.
+ *
+ * Call pvr_job_put() to release.
+ *
+ * Returns:
+ * * The requested job on success, or
+ * * %NULL if no job pointer passed.
+ */
+static __always_inline struct pvr_job *
+pvr_job_get(struct pvr_job *job)
+{
+ if (job)
+ kref_get(&job->ref_count);
+
+ return job;
+}
+
+void pvr_job_put(struct pvr_job *job);
+
+/**
+ * pvr_job_release_pm_ref() - Release the PM ref if the job acquired it.
+ * @job: The job to release the PM ref on.
+ */
+static __always_inline void
+pvr_job_release_pm_ref(struct pvr_job *job)
+{
+ if (job->has_pm_ref) {
+ pvr_power_put(job->pvr_dev);
+ job->has_pm_ref = false;
+ }
+}
+
+/**
+ * pvr_job_get_pm_ref() - Get a PM ref and attach it to the job.
+ * @job: The job to attach the PM ref to.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_power_get() otherwise.
+ */
+static __always_inline int
+pvr_job_get_pm_ref(struct pvr_job *job)
+{
+ int err;
+
+ if (job->has_pm_ref)
+ return 0;
+
+ err = pvr_power_get(job->pvr_dev);
+ if (!err)
+ job->has_pm_ref = true;
+
+ return err;
+}
+
+int pvr_job_wait_first_non_signaled_native_dep(struct pvr_job *job);
+
+bool pvr_job_non_native_deps_done(struct pvr_job *job);
+
+int pvr_job_fits_in_cccb(struct pvr_job *job, unsigned long native_dep_count);
+
+void pvr_job_submit(struct pvr_job *job);
+
+int pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
+ struct drm_pvr_ioctl_submit_jobs_args *args);
+
+#endif /* PVR_JOB_H */
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
new file mode 100644
index 000000000..4fe70610e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -0,0 +1,2640 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_mmu.h"
+
+#include "pvr_ccb.h"
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_gem.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_mmu_defs.h"
+
+#include <drm/drm_drv.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/kmemleak.h>
+#include <linux/minmax.h>
+#include <linux/sizes.h>
+
+#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_))
+#define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1)))
+
+/*
+ * The value of the device page size (%PVR_DEVICE_PAGE_SIZE) is currently
+ * pegged to the host page size (%PAGE_SIZE). This chunk of macro goodness both
+ * ensures that the selected host page size corresponds to a valid device page
+ * size and sets up values needed by the MMU code below.
+ */
+#if (PVR_DEVICE_PAGE_SIZE == SZ_4K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_4KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_4KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_4KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_16K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_16KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_16KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_16KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_64K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_64KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_64KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_64KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_256K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_256KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_256KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_256KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_1M)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_1MB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_1MB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_1MB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_2M)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_2MB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_2MB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_2MB_RANGE_CLRMSK
+#else
+# error Unsupported device page size PVR_DEVICE_PAGE_SIZE
+#endif
+
+#define ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X \
+ (ROGUE_MMUCTRL_ENTRIES_PT_VALUE >> \
+ (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K)))
+
+enum pvr_mmu_sync_level {
+ PVR_MMU_SYNC_LEVEL_NONE = -1,
+ PVR_MMU_SYNC_LEVEL_0 = 0,
+ PVR_MMU_SYNC_LEVEL_1 = 1,
+ PVR_MMU_SYNC_LEVEL_2 = 2,
+};
+
+#define PVR_MMU_SYNC_LEVEL_0_FLAGS (ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT | \
+ ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT | \
+ ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB)
+#define PVR_MMU_SYNC_LEVEL_1_FLAGS (PVR_MMU_SYNC_LEVEL_0_FLAGS | ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD)
+#define PVR_MMU_SYNC_LEVEL_2_FLAGS (PVR_MMU_SYNC_LEVEL_1_FLAGS | ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC)
+
+/**
+ * pvr_mmu_set_flush_flags() - Set MMU cache flush flags for next call to
+ * pvr_mmu_flush_exec().
+ * @pvr_dev: Target PowerVR device.
+ * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS.
+ *
+ * This function must be called following any possible change to the MMU page
+ * tables.
+ */
+static void pvr_mmu_set_flush_flags(struct pvr_device *pvr_dev, u32 flags)
+{
+ atomic_fetch_or(flags, &pvr_dev->mmu_flush_cache_flags);
+}
+
+/**
+ * pvr_mmu_flush_request_all() - Request flush of all MMU caches when
+ * subsequently calling pvr_mmu_flush_exec().
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function must be called following any possible change to the MMU page
+ * tables.
+ */
+void pvr_mmu_flush_request_all(struct pvr_device *pvr_dev)
+{
+ pvr_mmu_set_flush_flags(pvr_dev, PVR_MMU_SYNC_LEVEL_2_FLAGS);
+}
+
+/**
+ * pvr_mmu_flush_exec() - Execute a flush of all MMU caches previously
+ * requested.
+ * @pvr_dev: Target PowerVR device.
+ * @wait: Do not return until the flush is completed.
+ *
+ * This function must be called prior to submitting any new GPU job. The flush
+ * will complete before the jobs are scheduled, so this can be called once after
+ * a series of maps. However, a single unmap should always be immediately
+ * followed by a flush and it should be explicitly waited by setting @wait.
+ *
+ * As a failure to flush the MMU caches could risk memory corruption, if the
+ * flush fails (implying the firmware is not responding) then the GPU device is
+ * marked as lost.
+ *
+ * Returns:
+ * * 0 on success when @wait is true, or
+ * * -%EIO if the device is unavailable, or
+ * * Any error encountered while submitting the flush command via the KCCB.
+ */
+int pvr_mmu_flush_exec(struct pvr_device *pvr_dev, bool wait)
+{
+ struct rogue_fwif_kccb_cmd cmd_mmu_cache = {};
+ struct rogue_fwif_mmucachedata *cmd_mmu_cache_data =
+ &cmd_mmu_cache.cmd_data.mmu_cache_data;
+ int err = 0;
+ u32 slot;
+ int idx;
+
+ if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx))
+ return -EIO;
+
+ /* Can't flush MMU if the firmware hasn't booted yet. */
+ if (!pvr_dev->fw_dev.booted)
+ goto err_drm_dev_exit;
+
+ cmd_mmu_cache_data->cache_flags =
+ atomic_xchg(&pvr_dev->mmu_flush_cache_flags, 0);
+
+ if (!cmd_mmu_cache_data->cache_flags)
+ goto err_drm_dev_exit;
+
+ cmd_mmu_cache.cmd_type = ROGUE_FWIF_KCCB_CMD_MMUCACHE;
+
+ pvr_fw_object_get_fw_addr(pvr_dev->fw_dev.mem.mmucache_sync_obj,
+ &cmd_mmu_cache_data->mmu_cache_sync_fw_addr);
+ cmd_mmu_cache_data->mmu_cache_sync_update_value = 0;
+
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd_mmu_cache, &slot);
+ if (err)
+ goto err_reset_and_retry;
+
+ err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL);
+ if (err)
+ goto err_reset_and_retry;
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_reset_and_retry:
+ /*
+ * Flush command failure is most likely the result of a firmware lockup. Hard
+ * reset the GPU and retry.
+ */
+ err = pvr_power_reset(pvr_dev, true);
+ if (err)
+ goto err_drm_dev_exit; /* Device is lost. */
+
+ /* Retry sending flush request. */
+ err = pvr_kccb_send_cmd(pvr_dev, &cmd_mmu_cache, &slot);
+ if (err) {
+ pvr_device_lost(pvr_dev);
+ goto err_drm_dev_exit;
+ }
+
+ if (wait) {
+ err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL);
+ if (err)
+ pvr_device_lost(pvr_dev);
+ }
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+/**
+ * DOC: PowerVR Virtual Memory Handling
+ */
+/**
+ * DOC: PowerVR Virtual Memory Handling (constants)
+ *
+ * .. c:macro:: PVR_IDX_INVALID
+ *
+ * Default value for a u16-based index.
+ *
+ * This value cannot be zero, since zero is a valid index value.
+ */
+#define PVR_IDX_INVALID ((u16)(-1))
+
+/**
+ * DOC: MMU backing pages
+ */
+/**
+ * DOC: MMU backing pages (constants)
+ *
+ * .. c:macro:: PVR_MMU_BACKING_PAGE_SIZE
+ *
+ * Page size of a PowerVR device's integrated MMU. The CPU page size must be
+ * at least as large as this value for the current implementation; this is
+ * checked at compile-time.
+ */
+#define PVR_MMU_BACKING_PAGE_SIZE SZ_4K
+static_assert(PAGE_SIZE >= PVR_MMU_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_mmu_backing_page - Represents a single page used to back a page
+ * table of any level.
+ * @dma_addr: DMA address of this page.
+ * @host_ptr: CPU address of this page.
+ * @pvr_dev: The PowerVR device to which this page is associated. **For
+ * internal use only.**
+ */
+struct pvr_mmu_backing_page {
+ dma_addr_t dma_addr;
+ void *host_ptr;
+/* private: internal use only */
+ struct page *raw_page;
+ struct pvr_device *pvr_dev;
+};
+
+/**
+ * pvr_mmu_backing_page_init() - Initialize a MMU backing page.
+ * @page: Target backing page.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function performs three distinct operations:
+ *
+ * 1. Allocate a single page,
+ * 2. Map the page to the CPU, and
+ * 3. Map the page to DMA-space.
+ *
+ * It is expected that @page be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%ENOMEM if allocation of the backing page or mapping of the backing
+ * page to DMA fails.
+ */
+static int
+pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page,
+ struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ struct page *raw_page;
+ int err;
+
+ dma_addr_t dma_addr;
+ void *host_ptr;
+
+ raw_page = alloc_page(__GFP_ZERO | GFP_KERNEL);
+ if (!raw_page)
+ return -ENOMEM;
+
+ host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (!host_ptr) {
+ err = -ENOMEM;
+ goto err_free_page;
+ }
+
+ dma_addr = dma_map_page(dev, raw_page, 0, PVR_MMU_BACKING_PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ err = -ENOMEM;
+ goto err_unmap_page;
+ }
+
+ page->dma_addr = dma_addr;
+ page->host_ptr = host_ptr;
+ page->pvr_dev = pvr_dev;
+ page->raw_page = raw_page;
+ kmemleak_alloc(page->host_ptr, PAGE_SIZE, 1, GFP_KERNEL);
+
+ return 0;
+
+err_unmap_page:
+ vunmap(host_ptr);
+
+err_free_page:
+ __free_page(raw_page);
+
+ return err;
+}
+
+/**
+ * pvr_mmu_backing_page_fini() - Teardown a MMU backing page.
+ * @page: Target backing page.
+ *
+ * This function performs the mirror operations to pvr_mmu_backing_page_init(),
+ * in reverse order:
+ *
+ * 1. Unmap the page from DMA-space,
+ * 2. Unmap the page from the CPU, and
+ * 3. Free the page.
+ *
+ * It also zeros @page.
+ *
+ * It is a no-op to call this function a second (or further) time on any @page.
+ */
+static void
+pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page)
+{
+ struct device *dev;
+
+ /* Do nothing if no allocation is present. */
+ if (!page->pvr_dev)
+ return;
+
+ dev = from_pvr_device(page->pvr_dev)->dev;
+
+ dma_unmap_page(dev, page->dma_addr, PVR_MMU_BACKING_PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ kmemleak_free(page->host_ptr);
+ vunmap(page->host_ptr);
+
+ __free_page(page->raw_page);
+
+ memset(page, 0, sizeof(*page));
+}
+
+/**
+ * pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the
+ * device.
+ * @page: Target backing page.
+ * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS.
+ *
+ * .. caution::
+ *
+ * **This is potentially an expensive function call.** Only call
+ * pvr_mmu_backing_page_sync() once you're sure you have no more changes to
+ * make to the backing page in the immediate future.
+ */
+static void
+pvr_mmu_backing_page_sync(struct pvr_mmu_backing_page *page, u32 flags)
+{
+ struct pvr_device *pvr_dev = page->pvr_dev;
+ struct device *dev;
+
+ /*
+ * Do nothing if no allocation is present. This may be the case if
+ * we are unmapping pages.
+ */
+ if (!pvr_dev)
+ return;
+
+ dev = from_pvr_device(pvr_dev)->dev;
+
+ dma_sync_single_for_device(dev, page->dma_addr,
+ PVR_MMU_BACKING_PAGE_SIZE, DMA_TO_DEVICE);
+
+ pvr_mmu_set_flush_flags(pvr_dev, flags);
+}
+
+/**
+ * DOC: Raw page tables
+ */
+
+#define PVR_PAGE_TABLE_TYPEOF_ENTRY(level_) \
+ typeof_member(struct pvr_page_table_l##level_##_entry_raw, val)
+
+#define PVR_PAGE_TABLE_FIELD_GET(level_, name_, field_, entry_) \
+ (((entry_).val & \
+ ~ROGUE_MMUCTRL_##name_##_DATA_##field_##_CLRMSK) >> \
+ ROGUE_MMUCTRL_##name_##_DATA_##field_##_SHIFT)
+
+#define PVR_PAGE_TABLE_FIELD_PREP(level_, name_, field_, val_) \
+ ((((PVR_PAGE_TABLE_TYPEOF_ENTRY(level_))(val_)) \
+ << ROGUE_MMUCTRL_##name_##_DATA_##field_##_SHIFT) & \
+ ~ROGUE_MMUCTRL_##name_##_DATA_##field_##_CLRMSK)
+
+/**
+ * struct pvr_page_table_l2_entry_raw - A single entry in a level 2 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ * :widths: 1 5
+ * :stub-columns: 1
+ *
+ * * - 31..4
+ * - **Level 1 Page Table Base Address:** Bits 39..12 of the L1
+ * page table base address, which is 4KiB aligned.
+ *
+ * * - 3..2
+ * - *(reserved)*
+ *
+ * * - 1
+ * - **Pending:** When valid bit is not set, indicates that a valid
+ * entry is pending and the MMU should wait for the driver to map
+ * the entry. This is used to support page demand mapping of
+ * memory.
+ *
+ * * - 0
+ * - **Valid:** Indicates that the entry contains a valid L1 page
+ * table. If the valid bit is not set, then an attempted use of
+ * the page would result in a page fault.
+ */
+struct pvr_page_table_l2_entry_raw {
+ u32 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l2_entry_raw) * 8 ==
+ ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE);
+
+static bool
+pvr_page_table_l2_entry_raw_is_valid(struct pvr_page_table_l2_entry_raw entry)
+{
+ return PVR_PAGE_TABLE_FIELD_GET(2, PC, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l2_entry_raw_set() - Write a valid entry into a raw level 2
+ * page table.
+ * @entry: Target raw level 2 page table entry.
+ * @child_table_dma_addr: DMA address of the level 1 page table to be
+ * associated with @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of %ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE.
+ */
+static void
+pvr_page_table_l2_entry_raw_set(struct pvr_page_table_l2_entry_raw *entry,
+ dma_addr_t child_table_dma_addr)
+{
+ child_table_dma_addr >>= ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+
+ WRITE_ONCE(entry->val,
+ PVR_PAGE_TABLE_FIELD_PREP(2, PC, VALID, true) |
+ PVR_PAGE_TABLE_FIELD_PREP(2, PC, ENTRY_PENDING, false) |
+ PVR_PAGE_TABLE_FIELD_PREP(2, PC, PD_BASE, child_table_dma_addr));
+}
+
+static void
+pvr_page_table_l2_entry_raw_clear(struct pvr_page_table_l2_entry_raw *entry)
+{
+ WRITE_ONCE(entry->val, 0);
+}
+
+/**
+ * struct pvr_page_table_l1_entry_raw - A single entry in a level 1 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ * :widths: 1 5
+ * :stub-columns: 1
+ *
+ * * - 63..41
+ * - *(reserved)*
+ *
+ * * - 40
+ * - **Pending:** When valid bit is not set, indicates that a valid entry
+ * is pending and the MMU should wait for the driver to map the entry.
+ * This is used to support page demand mapping of memory.
+ *
+ * * - 39..5
+ * - **Level 0 Page Table Base Address:** The way this value is
+ * interpreted depends on the page size. Bits not specified in the
+ * table below (e.g. bits 11..5 for page size 4KiB) should be
+ * considered reserved.
+ *
+ * This table shows the bits used in an L1 page table entry to
+ * represent the Physical Table Base Address for a given Page Size.
+ * Since each L1 page table entry covers 2MiB of address space, the
+ * maximum page size is 2MiB.
+ *
+ * .. flat-table::
+ * :widths: 1 1 1 1
+ * :header-rows: 1
+ * :stub-columns: 1
+ *
+ * * - Page size
+ * - L0 page table base address bits
+ * - Number of L0 page table entries
+ * - Size of L0 page table
+ *
+ * * - 4KiB
+ * - 39..12
+ * - 512
+ * - 4KiB
+ *
+ * * - 16KiB
+ * - 39..10
+ * - 128
+ * - 1KiB
+ *
+ * * - 64KiB
+ * - 39..8
+ * - 32
+ * - 256B
+ *
+ * * - 256KiB
+ * - 39..6
+ * - 8
+ * - 64B
+ *
+ * * - 1MiB
+ * - 39..5 (4 = '0')
+ * - 2
+ * - 16B
+ *
+ * * - 2MiB
+ * - 39..5 (4..3 = '00')
+ * - 1
+ * - 8B
+ *
+ * * - 4
+ * - *(reserved)*
+ *
+ * * - 3..1
+ * - **Page Size:** Sets the page size, from 4KiB to 2MiB.
+ *
+ * * - 0
+ * - **Valid:** Indicates that the entry contains a valid L0 page table.
+ * If the valid bit is not set, then an attempted use of the page would
+ * result in a page fault.
+ */
+struct pvr_page_table_l1_entry_raw {
+ u64 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l1_entry_raw) * 8 ==
+ ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE);
+
+static bool
+pvr_page_table_l1_entry_raw_is_valid(struct pvr_page_table_l1_entry_raw entry)
+{
+ return PVR_PAGE_TABLE_FIELD_GET(1, PD, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l1_entry_raw_set() - Write a valid entry into a raw level 1
+ * page table.
+ * @entry: Target raw level 1 page table entry.
+ * @child_table_dma_addr: DMA address of the level 0 page table to be
+ * associated with @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of 4 KiB.
+ */
+static void
+pvr_page_table_l1_entry_raw_set(struct pvr_page_table_l1_entry_raw *entry,
+ dma_addr_t child_table_dma_addr)
+{
+ WRITE_ONCE(entry->val,
+ PVR_PAGE_TABLE_FIELD_PREP(1, PD, VALID, true) |
+ PVR_PAGE_TABLE_FIELD_PREP(1, PD, ENTRY_PENDING, false) |
+ PVR_PAGE_TABLE_FIELD_PREP(1, PD, PAGE_SIZE, ROGUE_MMUCTRL_PAGE_SIZE_X) |
+ /*
+ * The use of a 4K-specific macro here is correct. It is
+ * a future optimization to allocate sub-host-page-sized
+ * blocks for individual tables, so the condition that any
+ * page table address is aligned to the size of the
+ * largest (a 4KB) table currently holds.
+ */
+ (child_table_dma_addr & ~ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK));
+}
+
+static void
+pvr_page_table_l1_entry_raw_clear(struct pvr_page_table_l1_entry_raw *entry)
+{
+ WRITE_ONCE(entry->val, 0);
+}
+
+/**
+ * struct pvr_page_table_l0_entry_raw - A single entry in a level 0 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ * :widths: 1 5
+ * :stub-columns: 1
+ *
+ * * - 63
+ * - *(reserved)*
+ *
+ * * - 62
+ * - **PM/FW Protect:** Indicates a protected region which only the
+ * Parameter Manager (PM) or firmware processor can write to.
+ *
+ * * - 61..40
+ * - **VP Page (High):** Virtual-physical page used for Parameter Manager
+ * (PM) memory. This field is only used if the additional level of PB
+ * virtualization is enabled. The VP Page field is needed by the PM in
+ * order to correctly reconstitute the free lists after render
+ * completion. This (High) field holds bits 39..18 of the value; the
+ * Low field holds bits 17..12. Bits 11..0 are always zero because the
+ * value is always aligned to the 4KiB page size.
+ *
+ * * - 39..12
+ * - **Physical Page Address:** The way this value is interpreted depends
+ * on the page size. Bits not specified in the table below (e.g. bits
+ * 20..12 for page size 2MiB) should be considered reserved.
+ *
+ * This table shows the bits used in an L0 page table entry to represent
+ * the Physical Page Address for a given page size (as defined in the
+ * associated L1 page table entry).
+ *
+ * .. flat-table::
+ * :widths: 1 1
+ * :header-rows: 1
+ * :stub-columns: 1
+ *
+ * * - Page size
+ * - Physical address bits
+ *
+ * * - 4KiB
+ * - 39..12
+ *
+ * * - 16KiB
+ * - 39..14
+ *
+ * * - 64KiB
+ * - 39..16
+ *
+ * * - 256KiB
+ * - 39..18
+ *
+ * * - 1MiB
+ * - 39..20
+ *
+ * * - 2MiB
+ * - 39..21
+ *
+ * * - 11..6
+ * - **VP Page (Low):** Continuation of VP Page (High).
+ *
+ * * - 5
+ * - **Pending:** When valid bit is not set, indicates that a valid entry
+ * is pending and the MMU should wait for the driver to map the entry.
+ * This is used to support page demand mapping of memory.
+ *
+ * * - 4
+ * - **PM Src:** Set on Parameter Manager (PM) allocated page table
+ * entries when indicated by the PM. Note that this bit will only be set
+ * by the PM, not by the device driver.
+ *
+ * * - 3
+ * - **SLC Bypass Control:** Specifies requests to this page should bypass
+ * the System Level Cache (SLC), if enabled in SLC configuration.
+ *
+ * * - 2
+ * - **Cache Coherency:** Indicates that the page is coherent (i.e. it
+ * does not require a cache flush between operations on the CPU and the
+ * device).
+ *
+ * * - 1
+ * - **Read Only:** If set, this bit indicates that the page is read only.
+ * An attempted write to this page would result in a write-protection
+ * fault.
+ *
+ * * - 0
+ * - **Valid:** Indicates that the entry contains a valid page. If the
+ * valid bit is not set, then an attempted use of the page would result
+ * in a page fault.
+ */
+struct pvr_page_table_l0_entry_raw {
+ u64 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l0_entry_raw) * 8 ==
+ ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE);
+
+/**
+ * struct pvr_page_flags_raw - The configurable flags from a single entry in a
+ * level 0 page table.
+ * @val: The raw value of these flags. Since these are a strict subset of
+ * &struct pvr_page_table_l0_entry_raw; use that type for our member here.
+ *
+ * The flags stored in this type are: PM/FW Protect; SLC Bypass Control; Cache
+ * Coherency, and Read Only (bits 62, 3, 2 and 1 respectively).
+ *
+ * This type should never be instantiated directly; instead use
+ * pvr_page_flags_raw_create() to ensure only valid bits of @val are set.
+ */
+struct pvr_page_flags_raw {
+ struct pvr_page_table_l0_entry_raw val;
+} __packed;
+static_assert(sizeof(struct pvr_page_flags_raw) ==
+ sizeof(struct pvr_page_table_l0_entry_raw));
+
+static bool
+pvr_page_table_l0_entry_raw_is_valid(struct pvr_page_table_l0_entry_raw entry)
+{
+ return PVR_PAGE_TABLE_FIELD_GET(0, PT, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l0_entry_raw_set() - Write a valid entry into a raw level 0
+ * page table.
+ * @entry: Target raw level 0 page table entry.
+ * @dma_addr: DMA address of the physical page to be associated with @entry.
+ * @flags: Options to be set on @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of %PVR_DEVICE_PAGE_SIZE.
+ *
+ * The @flags parameter is directly assigned into @entry. It is the callers
+ * responsibility to ensure that only bits specified in
+ * &struct pvr_page_flags_raw are set in @flags.
+ */
+static void
+pvr_page_table_l0_entry_raw_set(struct pvr_page_table_l0_entry_raw *entry,
+ dma_addr_t dma_addr,
+ struct pvr_page_flags_raw flags)
+{
+ WRITE_ONCE(entry->val, PVR_PAGE_TABLE_FIELD_PREP(0, PT, VALID, true) |
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, ENTRY_PENDING, false) |
+ (dma_addr & ~ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK) |
+ flags.val.val);
+}
+
+static void
+pvr_page_table_l0_entry_raw_clear(struct pvr_page_table_l0_entry_raw *entry)
+{
+ WRITE_ONCE(entry->val, 0);
+}
+
+/**
+ * pvr_page_flags_raw_create() - Initialize the flag bits of a raw level 0 page
+ * table entry.
+ * @read_only: This page is read-only (see: Read Only).
+ * @cache_coherent: This page does not require cache flushes (see: Cache
+ * Coherency).
+ * @slc_bypass: This page bypasses the device cache (see: SLC Bypass Control).
+ * @pm_fw_protect: This page is only for use by the firmware or Parameter
+ * Manager (see PM/FW Protect).
+ *
+ * For more details on the use of these four options, see their respective
+ * entries in the table under &struct pvr_page_table_l0_entry_raw.
+ *
+ * Return:
+ * A new &struct pvr_page_flags_raw instance which can be passed directly to
+ * pvr_page_table_l0_entry_raw_set() or pvr_page_table_l0_insert().
+ */
+static struct pvr_page_flags_raw
+pvr_page_flags_raw_create(bool read_only, bool cache_coherent, bool slc_bypass,
+ bool pm_fw_protect)
+{
+ struct pvr_page_flags_raw flags;
+
+ flags.val.val =
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, READ_ONLY, read_only) |
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, CC, cache_coherent) |
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, SLC_BYPASS_CTRL, slc_bypass) |
+ PVR_PAGE_TABLE_FIELD_PREP(0, PT, PM_META_PROTECT, pm_fw_protect);
+
+ return flags;
+}
+
+/**
+ * struct pvr_page_table_l2_raw - The raw data of a level 2 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_MMU_BACKING_PAGE_SIZE.
+ */
+struct pvr_page_table_l2_raw {
+ /** @entries: The raw values of this table. */
+ struct pvr_page_table_l2_entry_raw
+ entries[ROGUE_MMUCTRL_ENTRIES_PC_VALUE];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l2_raw) == PVR_MMU_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_page_table_l1_raw - The raw data of a level 1 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_MMU_BACKING_PAGE_SIZE.
+ */
+struct pvr_page_table_l1_raw {
+ /** @entries: The raw values of this table. */
+ struct pvr_page_table_l1_entry_raw
+ entries[ROGUE_MMUCTRL_ENTRIES_PD_VALUE];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l1_raw) == PVR_MMU_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_page_table_l0_raw - The raw data of a level 0 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_MMU_BACKING_PAGE_SIZE.
+ *
+ * .. caution::
+ *
+ * The size of level 0 page tables is variable depending on the page size
+ * specified in the associated level 1 page table entry. Since the device
+ * page size in use is pegged to the host page size, it cannot vary at
+ * runtime. This structure is therefore only defined to contain the required
+ * number of entries for the current device page size. **You should never
+ * read or write beyond the last supported entry.**
+ */
+struct pvr_page_table_l0_raw {
+ /** @entries: The raw values of this table. */
+ struct pvr_page_table_l0_entry_raw
+ entries[ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l0_raw) <= PVR_MMU_BACKING_PAGE_SIZE);
+
+/**
+ * DOC: Mirror page tables
+ */
+
+/*
+ * We pre-declare these types because they cross-depend on pointers to each
+ * other.
+ */
+struct pvr_page_table_l1;
+struct pvr_page_table_l0;
+
+/**
+ * struct pvr_page_table_l2 - A wrapped level 2 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l2_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l2_get_entry_raw().
+ *
+ * A level 2 page table forms the root of the page table tree structure, so
+ * this type has no &parent or &parent_idx members.
+ */
+struct pvr_page_table_l2 {
+ /**
+ * @entries: The children of this node in the page table tree
+ * structure. These are also mirror tables. The indexing of this array
+ * is identical to that of the raw equivalent
+ * (&pvr_page_table_l1_raw.entries).
+ */
+ struct pvr_page_table_l1 *entries[ROGUE_MMUCTRL_ENTRIES_PC_VALUE];
+
+ /**
+ * @backing_page: A handle to the memory which holds the raw
+ * equivalent of this table. **For internal use only.**
+ */
+ struct pvr_mmu_backing_page backing_page;
+
+ /**
+ * @entry_count: The current number of valid entries (that we know of)
+ * in this table. This value is essentially a refcount - the table is
+ * destroyed when this value is decremented to zero by
+ * pvr_page_table_l2_remove().
+ */
+ u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l2_init() - Initialize a level 2 page table.
+ * @table: Target level 2 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while intializing &table->backing_page using
+ * pvr_mmu_backing_page_init().
+ */
+static int
+pvr_page_table_l2_init(struct pvr_page_table_l2 *table,
+ struct pvr_device *pvr_dev)
+{
+ return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l2_fini() - Teardown a level 2 page table.
+ * @table: Target level 2 page table.
+ *
+ * It is an error to attempt to use @table after calling this function.
+ */
+static void
+pvr_page_table_l2_fini(struct pvr_page_table_l2 *table)
+{
+ pvr_mmu_backing_page_fini(&table->backing_page);
+}
+
+/**
+ * pvr_page_table_l2_sync() - Flush a level 2 page table from the CPU to the
+ * device.
+ * @table: Target level 2 page table.
+ *
+ * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l2_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child level 1 page tables of @table also need to be flushed, this should
+ * be done first using pvr_page_table_l1_sync() *before* calling this function.
+ */
+static void
+pvr_page_table_l2_sync(struct pvr_page_table_l2 *table)
+{
+ pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_2_FLAGS);
+}
+
+/**
+ * pvr_page_table_l2_get_raw() - Access the raw equivalent of a mirror level 2
+ * page table.
+ * @table: Target level 2 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l2_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l2_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static struct pvr_page_table_l2_raw *
+pvr_page_table_l2_get_raw(struct pvr_page_table_l2 *table)
+{
+ return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l2_get_entry_raw() - Access an entry from the raw equivalent
+ * of a mirror level 2 page table.
+ * @table: Target level 2 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 2 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l2_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer.
+ *
+ * Return:
+ * A pointer to the requested raw level 2 page table entry.
+ */
+static struct pvr_page_table_l2_entry_raw *
+pvr_page_table_l2_get_entry_raw(struct pvr_page_table_l2 *table, u16 idx)
+{
+ return &pvr_page_table_l2_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l2_entry_is_valid() - Check if a level 2 page table entry is
+ * marked as valid.
+ * @table: Target level 2 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static bool
+pvr_page_table_l2_entry_is_valid(struct pvr_page_table_l2 *table, u16 idx)
+{
+ struct pvr_page_table_l2_entry_raw entry_raw =
+ *pvr_page_table_l2_get_entry_raw(table, idx);
+
+ return pvr_page_table_l2_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * struct pvr_page_table_l1 - A wrapped level 1 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l1_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l1_get_entry_raw().
+ */
+struct pvr_page_table_l1 {
+ /**
+ * @entries: The children of this node in the page table tree
+ * structure. These are also mirror tables. The indexing of this array
+ * is identical to that of the raw equivalent
+ * (&pvr_page_table_l0_raw.entries).
+ */
+ struct pvr_page_table_l0 *entries[ROGUE_MMUCTRL_ENTRIES_PD_VALUE];
+
+ /**
+ * @backing_page: A handle to the memory which holds the raw
+ * equivalent of this table. **For internal use only.**
+ */
+ struct pvr_mmu_backing_page backing_page;
+
+ union {
+ /**
+ * @parent: The parent of this node in the page table tree structure.
+ *
+ * This is also a mirror table.
+ *
+ * Only valid when the L1 page table is active. When the L1 page table
+ * has been removed and queued for destruction, the next_free field
+ * should be used instead.
+ */
+ struct pvr_page_table_l2 *parent;
+
+ /**
+ * @next_free: Pointer to the next L1 page table to take/free.
+ *
+ * Used to form a linked list of L1 page tables. This is used
+ * when preallocating tables and when the page table has been
+ * removed and queued for destruction.
+ */
+ struct pvr_page_table_l1 *next_free;
+ };
+
+ /**
+ * @parent_idx: The index of the entry in the parent table (see
+ * @parent) which corresponds to this table.
+ */
+ u16 parent_idx;
+
+ /**
+ * @entry_count: The current number of valid entries (that we know of)
+ * in this table. This value is essentially a refcount - the table is
+ * destroyed when this value is decremented to zero by
+ * pvr_page_table_l1_remove().
+ */
+ u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l1_init() - Initialize a level 1 page table.
+ * @table: Target level 1 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * When this function returns successfully, @table is still not considered
+ * valid. It must be inserted into the page table tree structure with
+ * pvr_page_table_l2_insert() before it is ready for use.
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while intializing &table->backing_page using
+ * pvr_mmu_backing_page_init().
+ */
+static int
+pvr_page_table_l1_init(struct pvr_page_table_l1 *table,
+ struct pvr_device *pvr_dev)
+{
+ table->parent_idx = PVR_IDX_INVALID;
+
+ return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l1_free() - Teardown a level 1 page table.
+ * @table: Target level 1 page table.
+ *
+ * It is an error to attempt to use @table after calling this function, even
+ * indirectly. This includes calling pvr_page_table_l2_remove(), which must
+ * be called *before* pvr_page_table_l1_free().
+ */
+static void
+pvr_page_table_l1_free(struct pvr_page_table_l1 *table)
+{
+ pvr_mmu_backing_page_fini(&table->backing_page);
+ kfree(table);
+}
+
+/**
+ * pvr_page_table_l1_sync() - Flush a level 1 page table from the CPU to the
+ * device.
+ * @table: Target level 1 page table.
+ *
+ * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l1_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child level 0 page tables of @table also need to be flushed, this should
+ * be done first using pvr_page_table_l0_sync() *before* calling this function.
+ */
+static void
+pvr_page_table_l1_sync(struct pvr_page_table_l1 *table)
+{
+ pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_1_FLAGS);
+}
+
+/**
+ * pvr_page_table_l1_get_raw() - Access the raw equivalent of a mirror level 1
+ * page table.
+ * @table: Target level 1 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l1_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l1_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static struct pvr_page_table_l1_raw *
+pvr_page_table_l1_get_raw(struct pvr_page_table_l1 *table)
+{
+ return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l1_get_entry_raw() - Access an entry from the raw equivalent
+ * of a mirror level 1 page table.
+ * @table: Target level 1 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 1 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l1_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer.
+ *
+ * Return:
+ * A pointer to the requested raw level 1 page table entry.
+ */
+static struct pvr_page_table_l1_entry_raw *
+pvr_page_table_l1_get_entry_raw(struct pvr_page_table_l1 *table, u16 idx)
+{
+ return &pvr_page_table_l1_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l1_entry_is_valid() - Check if a level 1 page table entry is
+ * marked as valid.
+ * @table: Target level 1 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static bool
+pvr_page_table_l1_entry_is_valid(struct pvr_page_table_l1 *table, u16 idx)
+{
+ struct pvr_page_table_l1_entry_raw entry_raw =
+ *pvr_page_table_l1_get_entry_raw(table, idx);
+
+ return pvr_page_table_l1_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * struct pvr_page_table_l0 - A wrapped level 0 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l0_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l0_get_entry_raw().
+ *
+ * There is no mirror representation of an individual page, so this type has no
+ * &entries member.
+ */
+struct pvr_page_table_l0 {
+ /**
+ * @backing_page: A handle to the memory which holds the raw
+ * equivalent of this table. **For internal use only.**
+ */
+ struct pvr_mmu_backing_page backing_page;
+
+ union {
+ /**
+ * @parent: The parent of this node in the page table tree structure.
+ *
+ * This is also a mirror table.
+ *
+ * Only valid when the L0 page table is active. When the L0 page table
+ * has been removed and queued for destruction, the next_free field
+ * should be used instead.
+ */
+ struct pvr_page_table_l1 *parent;
+
+ /**
+ * @next_free: Pointer to the next L0 page table to take/free.
+ *
+ * Used to form a linked list of L0 page tables. This is used
+ * when preallocating tables and when the page table has been
+ * removed and queued for destruction.
+ */
+ struct pvr_page_table_l0 *next_free;
+ };
+
+ /**
+ * @parent_idx: The index of the entry in the parent table (see
+ * @parent) which corresponds to this table.
+ */
+ u16 parent_idx;
+
+ /**
+ * @entry_count: The current number of valid entries (that we know of)
+ * in this table. This value is essentially a refcount - the table is
+ * destroyed when this value is decremented to zero by
+ * pvr_page_table_l0_remove().
+ */
+ u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l0_init() - Initialize a level 0 page table.
+ * @table: Target level 0 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * When this function returns successfully, @table is still not considered
+ * valid. It must be inserted into the page table tree structure with
+ * pvr_page_table_l1_insert() before it is ready for use.
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while intializing &table->backing_page using
+ * pvr_mmu_backing_page_init().
+ */
+static int
+pvr_page_table_l0_init(struct pvr_page_table_l0 *table,
+ struct pvr_device *pvr_dev)
+{
+ table->parent_idx = PVR_IDX_INVALID;
+
+ return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l0_free() - Teardown a level 0 page table.
+ * @table: Target level 0 page table.
+ *
+ * It is an error to attempt to use @table after calling this function, even
+ * indirectly. This includes calling pvr_page_table_l1_remove(), which must
+ * be called *before* pvr_page_table_l0_free().
+ */
+static void
+pvr_page_table_l0_free(struct pvr_page_table_l0 *table)
+{
+ pvr_mmu_backing_page_fini(&table->backing_page);
+ kfree(table);
+}
+
+/**
+ * pvr_page_table_l0_sync() - Flush a level 0 page table from the CPU to the
+ * device.
+ * @table: Target level 0 page table.
+ *
+ * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l0_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child pages of @table also need to be flushed, this should be done first
+ * using a DMA sync function (e.g. dma_sync_sg_for_device()) *before* calling
+ * this function.
+ */
+static void
+pvr_page_table_l0_sync(struct pvr_page_table_l0 *table)
+{
+ pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_0_FLAGS);
+}
+
+/**
+ * pvr_page_table_l0_get_raw() - Access the raw equivalent of a mirror level 0
+ * page table.
+ * @table: Target level 0 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l0_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l0_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static struct pvr_page_table_l0_raw *
+pvr_page_table_l0_get_raw(struct pvr_page_table_l0 *table)
+{
+ return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l0_get_entry_raw() - Access an entry from the raw equivalent
+ * of a mirror level 0 page table.
+ * @table: Target level 0 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 0 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l0_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer. This is espcially important for level 0 page tables, which
+ * can have a variable number of entries.
+ *
+ * Return:
+ * A pointer to the requested raw level 0 page table entry.
+ */
+static struct pvr_page_table_l0_entry_raw *
+pvr_page_table_l0_get_entry_raw(struct pvr_page_table_l0 *table, u16 idx)
+{
+ return &pvr_page_table_l0_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l0_entry_is_valid() - Check if a level 0 page table entry is
+ * marked as valid.
+ * @table: Target level 0 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static bool
+pvr_page_table_l0_entry_is_valid(struct pvr_page_table_l0 *table, u16 idx)
+{
+ struct pvr_page_table_l0_entry_raw entry_raw =
+ *pvr_page_table_l0_get_entry_raw(table, idx);
+
+ return pvr_page_table_l0_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * struct pvr_mmu_context - context holding data for operations at page
+ * catalogue level, intended for use with a VM context.
+ */
+struct pvr_mmu_context {
+ /** @pvr_dev: The PVR device associated with the owning VM context. */
+ struct pvr_device *pvr_dev;
+
+ /** @page_table_l2: The MMU table root. */
+ struct pvr_page_table_l2 page_table_l2;
+};
+
+/**
+ * struct pvr_page_table_ptr - A reference to a single physical page as indexed
+ * by the page table structure.
+ *
+ * Intended for embedding in a &struct pvr_mmu_op_context.
+ */
+struct pvr_page_table_ptr {
+ /**
+ * @l1_table: A cached handle to the level 1 page table the
+ * context is currently traversing.
+ */
+ struct pvr_page_table_l1 *l1_table;
+
+ /**
+ * @l0_table: A cached handle to the level 0 page table the
+ * context is currently traversing.
+ */
+ struct pvr_page_table_l0 *l0_table;
+
+ /**
+ * @l2_idx: Index into the level 2 page table the context is
+ * currently referencing.
+ */
+ u16 l2_idx;
+
+ /**
+ * @l1_idx: Index into the level 1 page table the context is
+ * currently referencing.
+ */
+ u16 l1_idx;
+
+ /**
+ * @l0_idx: Index into the level 0 page table the context is
+ * currently referencing.
+ */
+ u16 l0_idx;
+};
+
+/**
+ * struct pvr_mmu_op_context - context holding data for individual
+ * device-virtual mapping operations. Intended for use with a VM bind operation.
+ */
+struct pvr_mmu_op_context {
+ /** @mmu_ctx: The MMU context associated with the owning VM context. */
+ struct pvr_mmu_context *mmu_ctx;
+
+ /** @map: Data specifically for map operations. */
+ struct {
+ /**
+ * @sgt: Scatter gather table containing pages pinned for use by
+ * this context - these are currently pinned when initialising
+ * the VM bind operation.
+ */
+ struct sg_table *sgt;
+
+ /** @sgt_offset: Start address of the device-virtual mapping. */
+ u64 sgt_offset;
+
+ /**
+ * @l1_prealloc_tables: Preallocated l1 page table objects
+ * use by this context when creating a page mapping. Linked list
+ * fully created during initialisation.
+ */
+ struct pvr_page_table_l1 *l1_prealloc_tables;
+
+ /**
+ * @l0_prealloc_tables: Preallocated l0 page table objects
+ * use by this context when creating a page mapping. Linked list
+ * fully created during initialisation.
+ */
+ struct pvr_page_table_l0 *l0_prealloc_tables;
+ } map;
+
+ /** @unmap: Data specifically for unmap operations. */
+ struct {
+ /**
+ * @l1_free_tables: Collects page table objects freed by unmap
+ * ops. Linked list empty at creation.
+ */
+ struct pvr_page_table_l1 *l1_free_tables;
+
+ /**
+ * @l0_free_tables: Collects page table objects freed by unmap
+ * ops. Linked list empty at creation.
+ */
+ struct pvr_page_table_l0 *l0_free_tables;
+ } unmap;
+
+ /**
+ * @curr_page: A reference to a single physical page as indexed by the
+ * page table structure.
+ */
+ struct pvr_page_table_ptr curr_page;
+
+ /**
+ * @sync_level_required: The maximum level of the page table tree
+ * structure which has (possibly) been modified since it was last
+ * flushed to the device.
+ *
+ * This field should only be set with pvr_mmu_op_context_require_sync()
+ * or indirectly by pvr_mmu_op_context_sync_partial().
+ */
+ enum pvr_mmu_sync_level sync_level_required;
+};
+
+/**
+ * pvr_page_table_l2_insert() - Insert an entry referring to a level 1 page
+ * table into a level 2 page table.
+ * @op_ctx: Target MMU op context pointing at the entry to insert the L1 page
+ * table into.
+ * @child_table: Target level 1 page table to be referenced by the new entry.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L2 entry.
+ *
+ * It is the caller's responsibility to execute any memory barries to ensure
+ * that the creation of @child_table is ordered before the L2 entry is inserted.
+ */
+static void
+pvr_page_table_l2_insert(struct pvr_mmu_op_context *op_ctx,
+ struct pvr_page_table_l1 *child_table)
+{
+ struct pvr_page_table_l2 *l2_table =
+ &op_ctx->mmu_ctx->page_table_l2;
+ struct pvr_page_table_l2_entry_raw *entry_raw =
+ pvr_page_table_l2_get_entry_raw(l2_table,
+ op_ctx->curr_page.l2_idx);
+
+ pvr_page_table_l2_entry_raw_set(entry_raw,
+ child_table->backing_page.dma_addr);
+
+ child_table->parent = l2_table;
+ child_table->parent_idx = op_ctx->curr_page.l2_idx;
+ l2_table->entries[op_ctx->curr_page.l2_idx] = child_table;
+ ++l2_table->entry_count;
+ op_ctx->curr_page.l1_table = child_table;
+}
+
+/**
+ * pvr_page_table_l2_remove() - Remove a level 1 page table from a level 2 page
+ * table.
+ * @op_ctx: Target MMU op context pointing at the L2 entry to remove.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L2 entry.
+ */
+static void
+pvr_page_table_l2_remove(struct pvr_mmu_op_context *op_ctx)
+{
+ struct pvr_page_table_l2 *l2_table =
+ &op_ctx->mmu_ctx->page_table_l2;
+ struct pvr_page_table_l2_entry_raw *entry_raw =
+ pvr_page_table_l2_get_entry_raw(l2_table,
+ op_ctx->curr_page.l1_table->parent_idx);
+
+ WARN_ON(op_ctx->curr_page.l1_table->parent != l2_table);
+
+ pvr_page_table_l2_entry_raw_clear(entry_raw);
+
+ l2_table->entries[op_ctx->curr_page.l1_table->parent_idx] = NULL;
+ op_ctx->curr_page.l1_table->parent_idx = PVR_IDX_INVALID;
+ op_ctx->curr_page.l1_table->next_free = op_ctx->unmap.l1_free_tables;
+ op_ctx->unmap.l1_free_tables = op_ctx->curr_page.l1_table;
+ op_ctx->curr_page.l1_table = NULL;
+
+ --l2_table->entry_count;
+}
+
+/**
+ * pvr_page_table_l1_insert() - Insert an entry referring to a level 0 page
+ * table into a level 1 page table.
+ * @op_ctx: Target MMU op context pointing at the entry to insert the L0 page
+ * table into.
+ * @child_table: L0 page table to insert.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L1 entry.
+ *
+ * It is the caller's responsibility to execute any memory barries to ensure
+ * that the creation of @child_table is ordered before the L1 entry is inserted.
+ */
+static void
+pvr_page_table_l1_insert(struct pvr_mmu_op_context *op_ctx,
+ struct pvr_page_table_l0 *child_table)
+{
+ struct pvr_page_table_l1_entry_raw *entry_raw =
+ pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l1_table,
+ op_ctx->curr_page.l1_idx);
+
+ pvr_page_table_l1_entry_raw_set(entry_raw,
+ child_table->backing_page.dma_addr);
+
+ child_table->parent = op_ctx->curr_page.l1_table;
+ child_table->parent_idx = op_ctx->curr_page.l1_idx;
+ op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx] = child_table;
+ ++op_ctx->curr_page.l1_table->entry_count;
+ op_ctx->curr_page.l0_table = child_table;
+}
+
+/**
+ * pvr_page_table_l1_remove() - Remove a level 0 page table from a level 1 page
+ * table.
+ * @op_ctx: Target MMU op context pointing at the L1 entry to remove.
+ *
+ * If this function results in the L1 table becoming empty, it will be removed
+ * from its parent level 2 page table and destroyed.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L1 entry.
+ */
+static void
+pvr_page_table_l1_remove(struct pvr_mmu_op_context *op_ctx)
+{
+ struct pvr_page_table_l1_entry_raw *entry_raw =
+ pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l0_table->parent,
+ op_ctx->curr_page.l0_table->parent_idx);
+
+ WARN_ON(op_ctx->curr_page.l0_table->parent !=
+ op_ctx->curr_page.l1_table);
+
+ pvr_page_table_l1_entry_raw_clear(entry_raw);
+
+ op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l0_table->parent_idx] = NULL;
+ op_ctx->curr_page.l0_table->parent_idx = PVR_IDX_INVALID;
+ op_ctx->curr_page.l0_table->next_free = op_ctx->unmap.l0_free_tables;
+ op_ctx->unmap.l0_free_tables = op_ctx->curr_page.l0_table;
+ op_ctx->curr_page.l0_table = NULL;
+
+ if (--op_ctx->curr_page.l1_table->entry_count == 0) {
+ /* Clear the parent L2 page table entry. */
+ if (op_ctx->curr_page.l1_table->parent_idx != PVR_IDX_INVALID)
+ pvr_page_table_l2_remove(op_ctx);
+ }
+}
+
+/**
+ * pvr_page_table_l0_insert() - Insert an entry referring to a physical page
+ * into a level 0 page table.
+ * @op_ctx: Target MMU op context pointing at the L0 entry to insert.
+ * @dma_addr: Target DMA address to be referenced by the new entry.
+ * @flags: Page options to be stored in the new entry.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L0 entry.
+ */
+static void
+pvr_page_table_l0_insert(struct pvr_mmu_op_context *op_ctx,
+ dma_addr_t dma_addr, struct pvr_page_flags_raw flags)
+{
+ struct pvr_page_table_l0_entry_raw *entry_raw =
+ pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table,
+ op_ctx->curr_page.l0_idx);
+
+ pvr_page_table_l0_entry_raw_set(entry_raw, dma_addr, flags);
+
+ /*
+ * There is no entry to set here - we don't keep a mirror of
+ * individual pages.
+ */
+
+ ++op_ctx->curr_page.l0_table->entry_count;
+}
+
+/**
+ * pvr_page_table_l0_remove() - Remove a physical page from a level 0 page
+ * table.
+ * @op_ctx: Target MMU op context pointing at the L0 entry to remove.
+ *
+ * If this function results in the L0 table becoming empty, it will be removed
+ * from its parent L1 page table and destroyed.
+ *
+ * It is the caller's responsibility to ensure @op_ctx.curr_page points to a
+ * valid L0 entry.
+ */
+static void
+pvr_page_table_l0_remove(struct pvr_mmu_op_context *op_ctx)
+{
+ struct pvr_page_table_l0_entry_raw *entry_raw =
+ pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table,
+ op_ctx->curr_page.l0_idx);
+
+ pvr_page_table_l0_entry_raw_clear(entry_raw);
+
+ /*
+ * There is no entry to clear here - we don't keep a mirror of
+ * individual pages.
+ */
+
+ if (--op_ctx->curr_page.l0_table->entry_count == 0) {
+ /* Clear the parent L1 page table entry. */
+ if (op_ctx->curr_page.l0_table->parent_idx != PVR_IDX_INVALID)
+ pvr_page_table_l1_remove(op_ctx);
+ }
+}
+
+/**
+ * DOC: Page table index utilities
+ */
+
+/**
+ * pvr_page_table_l2_idx() - Calculate the level 2 page table index for a
+ * device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 2 page table corresponding to @device_addr.
+ */
+static u16
+pvr_page_table_l2_idx(u64 device_addr)
+{
+ return (device_addr & ~ROGUE_MMUCTRL_VADDR_PC_INDEX_CLRMSK) >>
+ ROGUE_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+}
+
+/**
+ * pvr_page_table_l1_idx() - Calculate the level 1 page table index for a
+ * device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 1 page table corresponding to @device_addr.
+ */
+static u16
+pvr_page_table_l1_idx(u64 device_addr)
+{
+ return (device_addr & ~ROGUE_MMUCTRL_VADDR_PD_INDEX_CLRMSK) >>
+ ROGUE_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+}
+
+/**
+ * pvr_page_table_l0_idx() - Calculate the level 0 page table index for a
+ * device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 0 page table corresponding to @device_addr.
+ */
+static u16
+pvr_page_table_l0_idx(u64 device_addr)
+{
+ return (device_addr & ~ROGUE_MMUCTRL_VADDR_PT_INDEX_CLRMSK) >>
+ ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT;
+}
+
+/**
+ * DOC: High-level page table operations
+ */
+
+/**
+ * pvr_page_table_l1_get_or_insert() - Retrieves (optionally inserting if
+ * necessary) a level 1 page table from the specified level 2 page table entry.
+ * @op_ctx: Target MMU op context.
+ * @should_insert: [IN] Specifies whether new page tables should be inserted
+ * when empty page table entries are encountered during traversal.
+ *
+ * Return:
+ * * 0 on success, or
+ *
+ * If @should_insert is %false:
+ * * -%ENXIO if a level 1 page table would have been inserted.
+ *
+ * If @should_insert is %true:
+ * * Any error encountered while inserting the level 1 page table.
+ */
+static int
+pvr_page_table_l1_get_or_insert(struct pvr_mmu_op_context *op_ctx,
+ bool should_insert)
+{
+ struct pvr_page_table_l2 *l2_table =
+ &op_ctx->mmu_ctx->page_table_l2;
+ struct pvr_page_table_l1 *table;
+
+ if (pvr_page_table_l2_entry_is_valid(l2_table,
+ op_ctx->curr_page.l2_idx)) {
+ op_ctx->curr_page.l1_table =
+ l2_table->entries[op_ctx->curr_page.l2_idx];
+ return 0;
+ }
+
+ if (!should_insert)
+ return -ENXIO;
+
+ /* Take a prealloced table. */
+ table = op_ctx->map.l1_prealloc_tables;
+ if (!table)
+ return -ENOMEM;
+
+ /* Pop */
+ op_ctx->map.l1_prealloc_tables = table->next_free;
+ table->next_free = NULL;
+
+ /* Ensure new table is fully written out before adding to L2 page table. */
+ wmb();
+
+ pvr_page_table_l2_insert(op_ctx, table);
+
+ return 0;
+}
+
+/**
+ * pvr_page_table_l0_get_or_insert() - Retrieves (optionally inserting if
+ * necessary) a level 0 page table from the specified level 1 page table entry.
+ * @op_ctx: Target MMU op context.
+ * @should_insert: [IN] Specifies whether new page tables should be inserted
+ * when empty page table entries are encountered during traversal.
+ *
+ * Return:
+ * * 0 on success,
+ *
+ * If @should_insert is %false:
+ * * -%ENXIO if a level 0 page table would have been inserted.
+ *
+ * If @should_insert is %true:
+ * * Any error encountered while inserting the level 0 page table.
+ */
+static int
+pvr_page_table_l0_get_or_insert(struct pvr_mmu_op_context *op_ctx,
+ bool should_insert)
+{
+ struct pvr_page_table_l0 *table;
+
+ if (pvr_page_table_l1_entry_is_valid(op_ctx->curr_page.l1_table,
+ op_ctx->curr_page.l1_idx)) {
+ op_ctx->curr_page.l0_table =
+ op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx];
+ return 0;
+ }
+
+ if (!should_insert)
+ return -ENXIO;
+
+ /* Take a prealloced table. */
+ table = op_ctx->map.l0_prealloc_tables;
+ if (!table)
+ return -ENOMEM;
+
+ /* Pop */
+ op_ctx->map.l0_prealloc_tables = table->next_free;
+ table->next_free = NULL;
+
+ /* Ensure new table is fully written out before adding to L1 page table. */
+ wmb();
+
+ pvr_page_table_l1_insert(op_ctx, table);
+
+ return 0;
+}
+
+/**
+ * pvr_mmu_context_create() - Create an MMU context.
+ * @pvr_dev: PVR device associated with owning VM context.
+ *
+ * Returns:
+ * * Newly created MMU context object on success, or
+ * * -%ENOMEM if no memory is available,
+ * * Any error code returned by pvr_page_table_l2_init().
+ */
+struct pvr_mmu_context *pvr_mmu_context_create(struct pvr_device *pvr_dev)
+{
+ struct pvr_mmu_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ int err;
+
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ err = pvr_page_table_l2_init(&ctx->page_table_l2, pvr_dev);
+ if (err)
+ return ERR_PTR(err);
+
+ ctx->pvr_dev = pvr_dev;
+
+ return ctx;
+}
+
+/**
+ * pvr_mmu_context_destroy() - Destroy an MMU context.
+ * @ctx: Target MMU context.
+ */
+void pvr_mmu_context_destroy(struct pvr_mmu_context *ctx)
+{
+ pvr_page_table_l2_fini(&ctx->page_table_l2);
+ kfree(ctx);
+}
+
+/**
+ * pvr_mmu_get_root_table_dma_addr() - Get the DMA address of the root of the
+ * page table structure behind a VM context.
+ * @ctx: Target MMU context.
+ */
+dma_addr_t pvr_mmu_get_root_table_dma_addr(struct pvr_mmu_context *ctx)
+{
+ return ctx->page_table_l2.backing_page.dma_addr;
+}
+
+/**
+ * pvr_page_table_l1_alloc() - Allocate a l1 page_table object.
+ * @ctx: MMU context of owning VM context.
+ *
+ * Returns:
+ * * Newly created page table object on success, or
+ * * -%ENOMEM if no memory is available,
+ * * Any error code returned by pvr_page_table_l1_init().
+ */
+static struct pvr_page_table_l1 *
+pvr_page_table_l1_alloc(struct pvr_mmu_context *ctx)
+{
+ int err;
+
+ struct pvr_page_table_l1 *table =
+ kzalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ err = pvr_page_table_l1_init(table, ctx->pvr_dev);
+ if (err) {
+ kfree(table);
+ return ERR_PTR(err);
+ }
+
+ return table;
+}
+
+/**
+ * pvr_page_table_l0_alloc() - Allocate a l0 page_table object.
+ * @ctx: MMU context of owning VM context.
+ *
+ * Returns:
+ * * Newly created page table object on success, or
+ * * -%ENOMEM if no memory is available,
+ * * Any error code returned by pvr_page_table_l0_init().
+ */
+static struct pvr_page_table_l0 *
+pvr_page_table_l0_alloc(struct pvr_mmu_context *ctx)
+{
+ int err;
+
+ struct pvr_page_table_l0 *table =
+ kzalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ err = pvr_page_table_l0_init(table, ctx->pvr_dev);
+ if (err) {
+ kfree(table);
+ return ERR_PTR(err);
+ }
+
+ return table;
+}
+
+/**
+ * pvr_mmu_op_context_require_sync() - Mark an MMU op context as requiring a
+ * sync operation for the referenced page tables up to a specified level.
+ * @op_ctx: Target MMU op context.
+ * @level: Maximum page table level for which a sync is required.
+ */
+static void
+pvr_mmu_op_context_require_sync(struct pvr_mmu_op_context *op_ctx,
+ enum pvr_mmu_sync_level level)
+{
+ if (op_ctx->sync_level_required < level)
+ op_ctx->sync_level_required = level;
+}
+
+/**
+ * pvr_mmu_op_context_sync_manual() - Trigger a sync of some or all of the
+ * page tables referenced by a MMU op context.
+ * @op_ctx: Target MMU op context.
+ * @level: Maximum page table level to sync.
+ *
+ * Do not call this function directly. Instead use
+ * pvr_mmu_op_context_sync_partial() which is checked against the current
+ * value of &op_ctx->sync_level_required as set by
+ * pvr_mmu_op_context_require_sync().
+ */
+static void
+pvr_mmu_op_context_sync_manual(struct pvr_mmu_op_context *op_ctx,
+ enum pvr_mmu_sync_level level)
+{
+ /*
+ * We sync the page table levels in ascending order (starting from the
+ * leaf node) to ensure consistency.
+ */
+
+ WARN_ON(level < PVR_MMU_SYNC_LEVEL_NONE);
+
+ if (level <= PVR_MMU_SYNC_LEVEL_NONE)
+ return;
+
+ if (op_ctx->curr_page.l0_table)
+ pvr_page_table_l0_sync(op_ctx->curr_page.l0_table);
+
+ if (level < PVR_MMU_SYNC_LEVEL_1)
+ return;
+
+ if (op_ctx->curr_page.l1_table)
+ pvr_page_table_l1_sync(op_ctx->curr_page.l1_table);
+
+ if (level < PVR_MMU_SYNC_LEVEL_2)
+ return;
+
+ pvr_page_table_l2_sync(&op_ctx->mmu_ctx->page_table_l2);
+}
+
+/**
+ * pvr_mmu_op_context_sync_partial() - Trigger a sync of some or all of the
+ * page tables referenced by a MMU op context.
+ * @op_ctx: Target MMU op context.
+ * @level: Requested page table level to sync up to (inclusive).
+ *
+ * If @level is greater than the maximum level recorded by @op_ctx as requiring
+ * a sync operation, only the previously recorded maximum will be used.
+ *
+ * Additionally, if @level is greater than or equal to the maximum level
+ * recorded by @op_ctx as requiring a sync operation, that maximum level will be
+ * reset as a full sync will be performed. This is equivalent to calling
+ * pvr_mmu_op_context_sync().
+ */
+static void
+pvr_mmu_op_context_sync_partial(struct pvr_mmu_op_context *op_ctx,
+ enum pvr_mmu_sync_level level)
+{
+ /*
+ * If the requested sync level is greater than or equal to the
+ * currently required sync level, we do two things:
+ * * Don't waste time syncing levels we haven't previously marked as
+ * requiring a sync, and
+ * * Reset the required sync level since we are about to sync
+ * everything that was previously marked as requiring a sync.
+ */
+ if (level >= op_ctx->sync_level_required) {
+ level = op_ctx->sync_level_required;
+ op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
+ }
+
+ pvr_mmu_op_context_sync_manual(op_ctx, level);
+}
+
+/**
+ * pvr_mmu_op_context_sync() - Trigger a sync of every page table referenced by
+ * a MMU op context.
+ * @op_ctx: Target MMU op context.
+ *
+ * The maximum level marked internally as requiring a sync will be reset so
+ * that subsequent calls to this function will be no-ops unless @op_ctx is
+ * otherwise updated.
+ */
+static void
+pvr_mmu_op_context_sync(struct pvr_mmu_op_context *op_ctx)
+{
+ pvr_mmu_op_context_sync_manual(op_ctx, op_ctx->sync_level_required);
+
+ op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
+}
+
+/**
+ * pvr_mmu_op_context_load_tables() - Load pointers to tables in each level of
+ * the page table tree structure needed to reference the physical page
+ * referenced by a MMU op context.
+ * @op_ctx: Target MMU op context.
+ * @should_create: Specifies whether new page tables should be created when
+ * empty page table entries are encountered during traversal.
+ * @load_level_required: Maximum page table level to load.
+ *
+ * If @should_create is %true, this function may modify the stored required
+ * sync level of @op_ctx as new page tables are created and inserted into their
+ * respective parents.
+ *
+ * Since there is only one root page table, it is technically incorrect to call
+ * this function with a value of @load_level_required greater than or equal to
+ * the root level number. However, this is not explicitly disallowed here.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by pvr_page_table_l1_get_or_create() if
+ * @load_level_required >= 1 except -%ENXIO, or
+ * * Any error returned by pvr_page_table_l0_get_or_create() if
+ * @load_level_required >= 0 except -%ENXIO.
+ */
+static int
+pvr_mmu_op_context_load_tables(struct pvr_mmu_op_context *op_ctx,
+ bool should_create,
+ enum pvr_mmu_sync_level load_level_required)
+{
+ const struct pvr_page_table_l1 *l1_head_before =
+ op_ctx->map.l1_prealloc_tables;
+ const struct pvr_page_table_l0 *l0_head_before =
+ op_ctx->map.l0_prealloc_tables;
+ int err;
+
+ /* Clear tables we're about to fetch in case of error states. */
+ if (load_level_required >= PVR_MMU_SYNC_LEVEL_1)
+ op_ctx->curr_page.l1_table = NULL;
+
+ if (load_level_required >= PVR_MMU_SYNC_LEVEL_0)
+ op_ctx->curr_page.l0_table = NULL;
+
+ /* Get or create L1 page table. */
+ if (load_level_required >= PVR_MMU_SYNC_LEVEL_1) {
+ err = pvr_page_table_l1_get_or_insert(op_ctx, should_create);
+ if (err) {
+ /*
+ * If @should_create is %false and no L1 page table was
+ * found, return early but without an error. Since
+ * pvr_page_table_l1_get_or_create() can only return
+ * -%ENXIO if @should_create is %false, there is no
+ * need to check it here.
+ */
+ if (err == -ENXIO)
+ err = 0;
+
+ return err;
+ }
+ }
+
+ /* Get or create L0 page table. */
+ if (load_level_required >= PVR_MMU_SYNC_LEVEL_0) {
+ err = pvr_page_table_l0_get_or_insert(op_ctx, should_create);
+ if (err) {
+ /*
+ * If @should_create is %false and no L0 page table was
+ * found, return early but without an error. Since
+ * pvr_page_table_l0_get_or_insert() can only return
+ * -%ENXIO if @should_create is %false, there is no
+ * need to check it here.
+ */
+ if (err == -ENXIO)
+ err = 0;
+
+ /*
+ * At this point, an L1 page table could have been
+ * inserted but is now empty due to the failed attempt
+ * at inserting an L0 page table. In this instance, we
+ * must remove the empty L1 page table ourselves as
+ * pvr_page_table_l1_remove() is never called as part
+ * of the error path in
+ * pvr_page_table_l0_get_or_insert().
+ */
+ if (l1_head_before != op_ctx->map.l1_prealloc_tables) {
+ pvr_page_table_l2_remove(op_ctx);
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_2);
+ }
+
+ return err;
+ }
+ }
+
+ /*
+ * A sync is only needed if table objects were inserted. This can be
+ * inferred by checking if the pointer at the head of the linked list
+ * has changed.
+ */
+ if (l1_head_before != op_ctx->map.l1_prealloc_tables)
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_2);
+ else if (l0_head_before != op_ctx->map.l0_prealloc_tables)
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_1);
+
+ return 0;
+}
+
+/**
+ * pvr_mmu_op_context_set_curr_page() - Reassign the current page of an MMU op
+ * context, syncing any page tables previously assigned to it which are no
+ * longer relevant.
+ * @op_ctx: Target MMU op context.
+ * @device_addr: New pointer target.
+ * @should_create: Specify whether new page tables should be created when
+ * empty page table entries are encountered during traversal.
+ *
+ * This function performs a full sync on the pointer, regardless of which
+ * levels are modified.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_mmu_op_context_load_tables().
+ */
+static int
+pvr_mmu_op_context_set_curr_page(struct pvr_mmu_op_context *op_ctx,
+ u64 device_addr, bool should_create)
+{
+ pvr_mmu_op_context_sync(op_ctx);
+
+ op_ctx->curr_page.l2_idx = pvr_page_table_l2_idx(device_addr);
+ op_ctx->curr_page.l1_idx = pvr_page_table_l1_idx(device_addr);
+ op_ctx->curr_page.l0_idx = pvr_page_table_l0_idx(device_addr);
+ op_ctx->curr_page.l1_table = NULL;
+ op_ctx->curr_page.l0_table = NULL;
+
+ return pvr_mmu_op_context_load_tables(op_ctx, should_create,
+ PVR_MMU_SYNC_LEVEL_1);
+}
+
+/**
+ * pvr_mmu_op_context_next_page() - Advance the current page of an MMU op
+ * context.
+ * @op_ctx: Target MMU op context.
+ * @should_create: Specify whether new page tables should be created when
+ * empty page table entries are encountered during traversal.
+ *
+ * If @should_create is %false, it is the caller's responsibility to verify that
+ * the state of the table references in @op_ctx is valid on return. If -%ENXIO
+ * is returned, at least one of the table references is invalid. It should be
+ * noted that @op_ctx as a whole will be left in a valid state if -%ENXIO is
+ * returned, unlike other error codes. The caller should check which references
+ * are invalid by comparing them to %NULL. Only &@ptr->l2_table is guaranteed
+ * to be valid, since it represents the root of the page table tree structure.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EPERM if the operation would wrap at the top of the page table
+ * hierarchy,
+ * * -%ENXIO if @should_create is %false and a page table of any level would
+ * have otherwise been created, or
+ * * Any error returned while attempting to create missing page tables if
+ * @should_create is %true.
+ */
+static int
+pvr_mmu_op_context_next_page(struct pvr_mmu_op_context *op_ctx,
+ bool should_create)
+{
+ s8 load_level_required = PVR_MMU_SYNC_LEVEL_NONE;
+
+ if (++op_ctx->curr_page.l0_idx != ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X)
+ goto load_tables;
+
+ op_ctx->curr_page.l0_idx = 0;
+ load_level_required = PVR_MMU_SYNC_LEVEL_0;
+
+ if (++op_ctx->curr_page.l1_idx != ROGUE_MMUCTRL_ENTRIES_PD_VALUE)
+ goto load_tables;
+
+ op_ctx->curr_page.l1_idx = 0;
+ load_level_required = PVR_MMU_SYNC_LEVEL_1;
+
+ if (++op_ctx->curr_page.l2_idx != ROGUE_MMUCTRL_ENTRIES_PC_VALUE)
+ goto load_tables;
+
+ /*
+ * If the pattern continued, we would set &op_ctx->curr_page.l2_idx to
+ * zero here. However, that would wrap the top layer of the page table
+ * hierarchy which is not a valid operation. Instead, we warn and return
+ * an error.
+ */
+ WARN(true,
+ "%s(%p) attempted to loop the top of the page table hierarchy",
+ __func__, op_ctx);
+ return -EPERM;
+
+ /* If indices have wrapped, we need to load new tables. */
+load_tables:
+ /* First, flush tables which will be unloaded. */
+ pvr_mmu_op_context_sync_partial(op_ctx, load_level_required);
+
+ /* Then load tables from the required level down. */
+ return pvr_mmu_op_context_load_tables(op_ctx, should_create,
+ load_level_required);
+}
+
+/**
+ * DOC: Single page operations
+ */
+
+/**
+ * pvr_page_create() - Create a device-virtual memory page and insert it into
+ * a level 0 page table.
+ * @op_ctx: Target MMU op context pointing at the device-virtual address of the
+ * target page.
+ * @dma_addr: DMA address of the physical page backing the created page.
+ * @flags: Page options saved on the level 0 page table entry for reading by
+ * the device.
+ *
+ * Return:
+ * * 0 on success, or
+ * * -%EEXIST if the requested page already exists.
+ */
+static int
+pvr_page_create(struct pvr_mmu_op_context *op_ctx, dma_addr_t dma_addr,
+ struct pvr_page_flags_raw flags)
+{
+ /* Do not create a new page if one already exists. */
+ if (pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table,
+ op_ctx->curr_page.l0_idx)) {
+ return -EEXIST;
+ }
+
+ pvr_page_table_l0_insert(op_ctx, dma_addr, flags);
+
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
+
+ return 0;
+}
+
+/**
+ * pvr_page_destroy() - Destroy a device page after removing it from its
+ * parent level 0 page table.
+ * @op_ctx: Target MMU op context.
+ */
+static void
+pvr_page_destroy(struct pvr_mmu_op_context *op_ctx)
+{
+ /* Do nothing if the page does not exist. */
+ if (!pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table,
+ op_ctx->curr_page.l0_idx)) {
+ return;
+ }
+
+ /* Clear the parent L0 page table entry. */
+ pvr_page_table_l0_remove(op_ctx);
+
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
+}
+
+/**
+ * pvr_mmu_op_context_destroy() - Destroy an MMU op context.
+ * @op_ctx: Target MMU op context.
+ */
+void pvr_mmu_op_context_destroy(struct pvr_mmu_op_context *op_ctx)
+{
+ const bool flush_caches =
+ op_ctx->sync_level_required != PVR_MMU_SYNC_LEVEL_NONE;
+
+ pvr_mmu_op_context_sync(op_ctx);
+
+ /* Unmaps should be flushed immediately. Map flushes can be deferred. */
+ if (flush_caches && !op_ctx->map.sgt)
+ pvr_mmu_flush_exec(op_ctx->mmu_ctx->pvr_dev, true);
+
+ while (op_ctx->map.l0_prealloc_tables) {
+ struct pvr_page_table_l0 *tmp = op_ctx->map.l0_prealloc_tables;
+
+ op_ctx->map.l0_prealloc_tables =
+ op_ctx->map.l0_prealloc_tables->next_free;
+ pvr_page_table_l0_free(tmp);
+ }
+
+ while (op_ctx->map.l1_prealloc_tables) {
+ struct pvr_page_table_l1 *tmp = op_ctx->map.l1_prealloc_tables;
+
+ op_ctx->map.l1_prealloc_tables =
+ op_ctx->map.l1_prealloc_tables->next_free;
+ pvr_page_table_l1_free(tmp);
+ }
+
+ while (op_ctx->unmap.l0_free_tables) {
+ struct pvr_page_table_l0 *tmp = op_ctx->unmap.l0_free_tables;
+
+ op_ctx->unmap.l0_free_tables =
+ op_ctx->unmap.l0_free_tables->next_free;
+ pvr_page_table_l0_free(tmp);
+ }
+
+ while (op_ctx->unmap.l1_free_tables) {
+ struct pvr_page_table_l1 *tmp = op_ctx->unmap.l1_free_tables;
+
+ op_ctx->unmap.l1_free_tables =
+ op_ctx->unmap.l1_free_tables->next_free;
+ pvr_page_table_l1_free(tmp);
+ }
+
+ kfree(op_ctx);
+}
+
+/**
+ * pvr_mmu_op_context_create() - Create an MMU op context.
+ * @ctx: MMU context associated with owning VM context.
+ * @sgt: Scatter gather table containing pages pinned for use by this context.
+ * @sgt_offset: Start offset of the requested device-virtual memory mapping.
+ * @size: Size in bytes of the requested device-virtual memory mapping. For an
+ * unmapping, this should be zero so that no page tables are allocated.
+ *
+ * Returns:
+ * * Newly created MMU op context object on success, or
+ * * -%ENOMEM if no memory is available,
+ * * Any error code returned by pvr_page_table_l2_init().
+ */
+struct pvr_mmu_op_context *
+pvr_mmu_op_context_create(struct pvr_mmu_context *ctx, struct sg_table *sgt,
+ u64 sgt_offset, u64 size)
+{
+ int err;
+
+ struct pvr_mmu_op_context *op_ctx =
+ kzalloc(sizeof(*op_ctx), GFP_KERNEL);
+
+ if (!op_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ op_ctx->mmu_ctx = ctx;
+ op_ctx->map.sgt = sgt;
+ op_ctx->map.sgt_offset = sgt_offset;
+ op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE;
+
+ if (size) {
+ /*
+ * The number of page table objects we need to prealloc is
+ * indicated by the mapping size, start offset and the sizes
+ * of the areas mapped per PT or PD. The range calculation is
+ * identical to that for the index into a table for a device
+ * address, so we reuse those functions here.
+ */
+ const u32 l1_start_idx = pvr_page_table_l2_idx(sgt_offset);
+ const u32 l1_end_idx = pvr_page_table_l2_idx(sgt_offset + size);
+ const u32 l1_count = l1_end_idx - l1_start_idx + 1;
+ const u32 l0_start_idx = pvr_page_table_l1_idx(sgt_offset);
+ const u32 l0_end_idx = pvr_page_table_l1_idx(sgt_offset + size);
+ const u32 l0_count = l0_end_idx - l0_start_idx + 1;
+
+ /*
+ * Alloc and push page table entries until we have enough of
+ * each type, ending with linked lists of l0 and l1 entries in
+ * reverse order.
+ */
+ for (int i = 0; i < l1_count; i++) {
+ struct pvr_page_table_l1 *l1_tmp =
+ pvr_page_table_l1_alloc(ctx);
+
+ err = PTR_ERR_OR_ZERO(l1_tmp);
+ if (err)
+ goto err_cleanup;
+
+ l1_tmp->next_free = op_ctx->map.l1_prealloc_tables;
+ op_ctx->map.l1_prealloc_tables = l1_tmp;
+ }
+
+ for (int i = 0; i < l0_count; i++) {
+ struct pvr_page_table_l0 *l0_tmp =
+ pvr_page_table_l0_alloc(ctx);
+
+ err = PTR_ERR_OR_ZERO(l0_tmp);
+ if (err)
+ goto err_cleanup;
+
+ l0_tmp->next_free = op_ctx->map.l0_prealloc_tables;
+ op_ctx->map.l0_prealloc_tables = l0_tmp;
+ }
+ }
+
+ return op_ctx;
+
+err_cleanup:
+ pvr_mmu_op_context_destroy(op_ctx);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_mmu_op_context_unmap_curr_page() - Unmap pages from a memory context
+ * starting from the current page of an MMU op context.
+ * @op_ctx: Target MMU op context pointing at the first page to unmap.
+ * @nr_pages: Number of pages to unmap.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error encountered while advancing @op_ctx.curr_page with
+ * pvr_mmu_op_context_next_page() (except -%ENXIO).
+ */
+static int
+pvr_mmu_op_context_unmap_curr_page(struct pvr_mmu_op_context *op_ctx,
+ u64 nr_pages)
+{
+ int err;
+
+ if (nr_pages == 0)
+ return 0;
+
+ /*
+ * Destroy first page outside loop, as it doesn't require a page
+ * advance beforehand. If the L0 page table reference in
+ * @op_ctx.curr_page is %NULL, there cannot be a mapped page at
+ * @op_ctx.curr_page (so skip ahead).
+ */
+ if (op_ctx->curr_page.l0_table)
+ pvr_page_destroy(op_ctx);
+
+ for (u64 page = 1; page < nr_pages; ++page) {
+ err = pvr_mmu_op_context_next_page(op_ctx, false);
+ /*
+ * If the page table tree structure at @op_ctx.curr_page is
+ * incomplete, skip ahead. We don't care about unmapping pages
+ * that cannot exist.
+ *
+ * FIXME: This could be made more efficient by jumping ahead
+ * using pvr_mmu_op_context_set_curr_page().
+ */
+ if (err == -ENXIO)
+ continue;
+ else if (err)
+ return err;
+
+ pvr_page_destroy(op_ctx);
+ }
+
+ return 0;
+}
+
+/**
+ * pvr_mmu_unmap() - Unmap pages from a memory context.
+ * @op_ctx: Target MMU op context.
+ * @device_addr: First device-virtual address to unmap.
+ * @size: Size in bytes to unmap.
+ *
+ * The total amount of device-virtual memory unmapped is
+ * @nr_pages * %PVR_DEVICE_PAGE_SIZE.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_page_table_ptr_init(), or
+ * * Any error code returned by pvr_page_table_ptr_unmap().
+ */
+int pvr_mmu_unmap(struct pvr_mmu_op_context *op_ctx, u64 device_addr, u64 size)
+{
+ int err = pvr_mmu_op_context_set_curr_page(op_ctx, device_addr, false);
+
+ if (err)
+ return err;
+
+ return pvr_mmu_op_context_unmap_curr_page(op_ctx,
+ size >> PVR_DEVICE_PAGE_SHIFT);
+}
+
+/**
+ * pvr_mmu_map_sgl() - Map part of a scatter-gather table entry to
+ * device-virtual memory.
+ * @op_ctx: Target MMU op context pointing to the first page that should be
+ * mapped.
+ * @sgl: Target scatter-gather table entry.
+ * @offset: Offset into @sgl to map from. Must result in a starting address
+ * from @sgl which is CPU page-aligned.
+ * @size: Size of the memory to be mapped in bytes. Must be a non-zero multiple
+ * of the device page size.
+ * @page_flags: Page options to be applied to every device-virtual memory page
+ * in the created mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if the range specified by @offset and @size is not completely
+ * within @sgl, or
+ * * Any error encountered while creating a page with pvr_page_create(), or
+ * * Any error encountered while advancing @op_ctx.curr_page with
+ * pvr_mmu_op_context_next_page().
+ */
+static int
+pvr_mmu_map_sgl(struct pvr_mmu_op_context *op_ctx, struct scatterlist *sgl,
+ u64 offset, u64 size, struct pvr_page_flags_raw page_flags)
+{
+ const unsigned int pages = size >> PVR_DEVICE_PAGE_SHIFT;
+ dma_addr_t dma_addr = sg_dma_address(sgl) + offset;
+ const unsigned int dma_len = sg_dma_len(sgl);
+ struct pvr_page_table_ptr ptr_copy;
+ unsigned int page;
+ int err;
+
+ if (size > dma_len || offset > dma_len - size)
+ return -EINVAL;
+
+ /*
+ * Before progressing, save a copy of the start pointer so we can use
+ * it again if we enter an error state and have to destroy pages.
+ */
+ memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy));
+
+ /*
+ * Create first page outside loop, as it doesn't require a page advance
+ * beforehand.
+ */
+ err = pvr_page_create(op_ctx, dma_addr, page_flags);
+ if (err)
+ return err;
+
+ for (page = 1; page < pages; ++page) {
+ err = pvr_mmu_op_context_next_page(op_ctx, true);
+ if (err)
+ goto err_destroy_pages;
+
+ dma_addr += PVR_DEVICE_PAGE_SIZE;
+
+ err = pvr_page_create(op_ctx, dma_addr, page_flags);
+ if (err)
+ goto err_destroy_pages;
+ }
+
+ return 0;
+
+err_destroy_pages:
+ memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page));
+ err = pvr_mmu_op_context_unmap_curr_page(op_ctx, page);
+
+ return err;
+}
+
+/**
+ * pvr_mmu_map() - Map an object's virtual memory to physical memory.
+ * @op_ctx: Target MMU op context.
+ * @size: Size of memory to be mapped in bytes. Must be a non-zero multiple
+ * of the device page size.
+ * @flags: Flags from pvr_gem_object associated with the mapping.
+ * @device_addr: Virtual device address to map to. Must be device page-aligned.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_page_table_ptr_init(), or
+ * * Any error code returned by pvr_mmu_map_sgl(), or
+ * * Any error code returned by pvr_page_table_ptr_next_page().
+ */
+int pvr_mmu_map(struct pvr_mmu_op_context *op_ctx, u64 size, u64 flags,
+ u64 device_addr)
+{
+ struct pvr_page_table_ptr ptr_copy;
+ struct pvr_page_flags_raw flags_raw;
+ struct scatterlist *sgl;
+ u64 mapped_size = 0;
+ unsigned int count;
+ int err;
+
+ if (!size)
+ return 0;
+
+ if ((op_ctx->map.sgt_offset | size) & ~PVR_DEVICE_PAGE_MASK)
+ return -EINVAL;
+
+ err = pvr_mmu_op_context_set_curr_page(op_ctx, device_addr, true);
+ if (err)
+ return -EINVAL;
+
+ memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy));
+
+ flags_raw = pvr_page_flags_raw_create(false, false,
+ flags & DRM_PVR_BO_BYPASS_DEVICE_CACHE,
+ flags & DRM_PVR_BO_PM_FW_PROTECT);
+
+ /* Map scatter gather table */
+ for_each_sgtable_dma_sg(op_ctx->map.sgt, sgl, count) {
+ const size_t sgl_len = sg_dma_len(sgl);
+ u64 sgl_offset, map_sgl_len;
+
+ if (sgl_len <= op_ctx->map.sgt_offset) {
+ op_ctx->map.sgt_offset -= sgl_len;
+ continue;
+ }
+
+ sgl_offset = op_ctx->map.sgt_offset;
+ map_sgl_len = min_t(u64, sgl_len - sgl_offset, size - mapped_size);
+
+ err = pvr_mmu_map_sgl(op_ctx, sgl, sgl_offset, map_sgl_len,
+ flags_raw);
+ if (err)
+ break;
+
+ /*
+ * Flag the L0 page table as requiring a flush when the MMU op
+ * context is destroyed.
+ */
+ pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0);
+
+ op_ctx->map.sgt_offset = 0;
+ mapped_size += map_sgl_len;
+
+ if (mapped_size >= size)
+ break;
+
+ err = pvr_mmu_op_context_next_page(op_ctx, true);
+ if (err)
+ break;
+ }
+
+ if (err && mapped_size) {
+ memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page));
+ pvr_mmu_op_context_unmap_curr_page(op_ctx,
+ mapped_size >> PVR_DEVICE_PAGE_SHIFT);
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.h b/drivers/gpu/drm/imagination/pvr_mmu.h
new file mode 100644
index 000000000..a8ecd4601
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_mmu.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_MMU_H
+#define PVR_MMU_H
+
+#include <linux/memory.h>
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h" */
+struct pvr_device;
+
+/* Forward declaration from "pvr_mmu.c" */
+struct pvr_mmu_context;
+struct pvr_mmu_op_context;
+
+/* Forward declaration from "pvr_vm.c" */
+struct pvr_vm_context;
+
+/* Forward declaration from <linux/scatterlist.h> */
+struct sg_table;
+
+/**
+ * DOC: Public API (constants)
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_SIZE
+ *
+ * Fixed page size referenced by leaf nodes in the page table tree
+ * structure. In the current implementation, this value is pegged to the
+ * CPU page size (%PAGE_SIZE). It is therefore an error to specify a CPU
+ * page size which is not also a supported device page size. The supported
+ * device page sizes are: 4KiB, 16KiB, 64KiB, 256KiB, 1MiB and 2MiB.
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_SHIFT
+ *
+ * Shift value used to efficiently multiply or divide by
+ * %PVR_DEVICE_PAGE_SIZE.
+ *
+ * This value is derived from %PVR_DEVICE_PAGE_SIZE.
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_MASK
+ *
+ * Mask used to round a value down to the nearest multiple of
+ * %PVR_DEVICE_PAGE_SIZE. When bitwise negated, it will indicate whether a
+ * value is already a multiple of %PVR_DEVICE_PAGE_SIZE.
+ *
+ * This value is derived from %PVR_DEVICE_PAGE_SIZE.
+ */
+
+/* PVR_DEVICE_PAGE_SIZE determines the page size */
+#define PVR_DEVICE_PAGE_SIZE (PAGE_SIZE)
+#define PVR_DEVICE_PAGE_SHIFT (PAGE_SHIFT)
+#define PVR_DEVICE_PAGE_MASK (PAGE_MASK)
+
+/**
+ * DOC: Page table index utilities (constants)
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_SPACE_SIZE
+ *
+ * Size of device-virtual address space which can be represented in the page
+ * table structure.
+ *
+ * This value is checked at runtime against
+ * &pvr_device_features.virtual_address_space_bits by
+ * pvr_vm_create_context(), which will return an error if the feature value
+ * does not match this constant.
+ *
+ * .. admonition:: Future work
+ *
+ * It should be possible to support other values of
+ * &pvr_device_features.virtual_address_space_bits, but so far no
+ * hardware has been created which advertises an unsupported value.
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_BITS
+ *
+ * Number of bits needed to represent any value less than
+ * %PVR_PAGE_TABLE_ADDR_SPACE_SIZE exactly.
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_MASK
+ *
+ * Bitmask of device-virtual addresses which are valid in the page table
+ * structure.
+ *
+ * This value is derived from %PVR_PAGE_TABLE_ADDR_SPACE_SIZE, so the same
+ * notes on that constant apply here.
+ */
+#define PVR_PAGE_TABLE_ADDR_SPACE_SIZE SZ_1T
+#define PVR_PAGE_TABLE_ADDR_BITS __ffs(PVR_PAGE_TABLE_ADDR_SPACE_SIZE)
+#define PVR_PAGE_TABLE_ADDR_MASK (PVR_PAGE_TABLE_ADDR_SPACE_SIZE - 1)
+
+void pvr_mmu_flush_request_all(struct pvr_device *pvr_dev);
+int pvr_mmu_flush_exec(struct pvr_device *pvr_dev, bool wait);
+
+struct pvr_mmu_context *pvr_mmu_context_create(struct pvr_device *pvr_dev);
+void pvr_mmu_context_destroy(struct pvr_mmu_context *ctx);
+
+dma_addr_t pvr_mmu_get_root_table_dma_addr(struct pvr_mmu_context *ctx);
+
+void pvr_mmu_op_context_destroy(struct pvr_mmu_op_context *op_ctx);
+struct pvr_mmu_op_context *
+pvr_mmu_op_context_create(struct pvr_mmu_context *ctx,
+ struct sg_table *sgt, u64 sgt_offset, u64 size);
+
+int pvr_mmu_map(struct pvr_mmu_op_context *op_ctx, u64 size, u64 flags,
+ u64 device_addr);
+int pvr_mmu_unmap(struct pvr_mmu_op_context *op_ctx, u64 device_addr, u64 size);
+
+#endif /* PVR_MMU_H */
diff --git a/drivers/gpu/drm/imagination/pvr_params.c b/drivers/gpu/drm/imagination/pvr_params.c
new file mode 100644
index 000000000..b91759f36
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_params.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_params.h"
+
+#include <linux/cache.h>
+#include <linux/moduleparam.h>
+
+static struct pvr_device_params pvr_device_param_defaults __read_mostly = {
+#define X(type_, name_, value_, desc_, ...) .name_ = (value_),
+ PVR_DEVICE_PARAMS
+#undef X
+};
+
+#define PVR_DEVICE_PARAM_NAMED(name_, type_, desc_) \
+ module_param_named(name_, pvr_device_param_defaults.name_, type_, \
+ 0400); \
+ MODULE_PARM_DESC(name_, desc_);
+
+/*
+ * This list of defines must contain every type specified in "pvr_params.h" as
+ * ``PVR_PARAM_TYPE_*_C``.
+ */
+#define PVR_PARAM_TYPE_X32_MODPARAM uint
+
+#define X(type_, name_, value_, desc_, ...) \
+ PVR_DEVICE_PARAM_NAMED(name_, PVR_PARAM_TYPE_##type_##_MODPARAM, desc_);
+PVR_DEVICE_PARAMS
+#undef X
+
+int
+pvr_device_params_init(struct pvr_device_params *params)
+{
+ /*
+ * If heap-allocated parameters are added in the future (e.g.
+ * modparam's charp type), they must be handled specially here (via
+ * kstrdup() in the case of charp). Since that's not necessary yet,
+ * a straight copy will do for now. This change will also require a
+ * pvr_device_params_fini() function to free any heap-allocated copies.
+ */
+
+ *params = pvr_device_param_defaults;
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#include "pvr_device.h"
+
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/stddef.h>
+
+/*
+ * This list of defines must contain every type specified in "pvr_params.h" as
+ * ``PVR_PARAM_TYPE_*_C``.
+ */
+#define PVR_PARAM_TYPE_X32_FMT "0x%08llx"
+
+#define X_SET(name_, mode_) X_SET_##mode_(name_)
+#define X_SET_DEF(name_, update_, mode_) X_SET_DEF_##mode_(name_, update_)
+
+#define X_SET_RO(name_) NULL
+#define X_SET_RW(name_) __pvr_device_param_##name_##set
+
+#define X_SET_DEF_RO(name_, update_)
+#define X_SET_DEF_RW(name_, update_) \
+ static int \
+ X_SET_RW(name_)(void *data, u64 val) \
+ { \
+ struct pvr_device *pvr_dev = data; \
+ /* This is not just (update_) to suppress -Waddress. */ \
+ if ((void *)(update_) != NULL) \
+ (update_)(pvr_dev, pvr_dev->params.name_, val); \
+ pvr_dev->params.name_ = val; \
+ return 0; \
+ }
+
+#define X(type_, name_, value_, desc_, mode_, update_) \
+ static int \
+ __pvr_device_param_##name_##_get(void *data, u64 *val) \
+ { \
+ struct pvr_device *pvr_dev = data; \
+ *val = pvr_dev->params.name_; \
+ return 0; \
+ } \
+ X_SET_DEF(name_, update_, mode_) \
+ static int \
+ __pvr_device_param_##name_##_open(struct inode *inode, \
+ struct file *file) \
+ { \
+ __simple_attr_check_format(PVR_PARAM_TYPE_##type_##_FMT, \
+ 0ull); \
+ return simple_attr_open(inode, file, \
+ __pvr_device_param_##name_##_get, \
+ X_SET(name_, mode_), \
+ PVR_PARAM_TYPE_##type_##_FMT); \
+ }
+PVR_DEVICE_PARAMS
+#undef X
+
+#undef X_SET
+#undef X_SET_RO
+#undef X_SET_RW
+#undef X_SET_DEF
+#undef X_SET_DEF_RO
+#undef X_SET_DEF_RW
+
+static struct {
+#define X(type_, name_, value_, desc_, mode_, update_) \
+ const struct file_operations name_;
+ PVR_DEVICE_PARAMS
+#undef X
+} pvr_device_param_debugfs_fops = {
+#define X(type_, name_, value_, desc_, mode_, update_) \
+ .name_ = { \
+ .owner = THIS_MODULE, \
+ .open = __pvr_device_param_##name_##_open, \
+ .release = simple_attr_release, \
+ .read = simple_attr_read, \
+ .write = simple_attr_write, \
+ .llseek = generic_file_llseek, \
+ },
+ PVR_DEVICE_PARAMS
+#undef X
+};
+
+void
+pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
+{
+#define X_MODE(mode_) X_MODE_##mode_
+#define X_MODE_RO 0400
+#define X_MODE_RW 0600
+
+#define X(type_, name_, value_, desc_, mode_, update_) \
+ debugfs_create_file(#name_, X_MODE(mode_), dir, pvr_dev, \
+ &pvr_device_param_debugfs_fops.name_);
+ PVR_DEVICE_PARAMS
+#undef X
+
+#undef X_MODE
+#undef X_MODE_RO
+#undef X_MODE_RW
+}
+#endif
diff --git a/drivers/gpu/drm/imagination/pvr_params.h b/drivers/gpu/drm/imagination/pvr_params.h
new file mode 100644
index 000000000..5807915b4
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_params.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_PARAMS_H
+#define PVR_PARAMS_H
+
+#include "pvr_rogue_fwif.h"
+
+#include <linux/cache.h>
+#include <linux/compiler_attributes.h>
+
+/*
+ * This is the definitive list of types allowed in the definition of
+ * %PVR_DEVICE_PARAMS.
+ */
+#define PVR_PARAM_TYPE_X32_C u32
+
+/*
+ * This macro defines all device-specific parameters; that is parameters which
+ * are set independently per device.
+ *
+ * The X-macro accepts the following arguments. Arguments marked with [debugfs]
+ * are ignored when debugfs is disabled; values used for these arguments may
+ * safely be gated behind CONFIG_DEBUG_FS.
+ *
+ * @type_: The definitive list of allowed values is PVR_PARAM_TYPE_*_C.
+ * @name_: Name of the parameter. This is used both as the field name in C and
+ * stringified as the parameter name.
+ * @value_: Initial/default value.
+ * @desc_: String literal used as help text to describe the usage of this
+ * parameter.
+ * @mode_: [debugfs] One of {RO,RW}. The access mode of the debugfs entry for
+ * this parameter.
+ * @update_: [debugfs] When debugfs support is enabled, parameters may be
+ * updated at runtime. When this happens, this function will be
+ * called to allow changes to propagate. The signature of this
+ * function is:
+ *
+ * void (*)(struct pvr_device *pvr_dev, T old_val, T new_val)
+ *
+ * Where T is the C type associated with @type_.
+ *
+ * If @mode_ does not allow write access, this function will never be
+ * called. In this case, or if no update callback is required, you
+ * should specify NULL for this argument.
+ */
+#define PVR_DEVICE_PARAMS \
+ X(X32, fw_trace_mask, ROGUE_FWIF_LOG_TYPE_NONE, \
+ "Enable FW trace for the specified groups. Specifying 0 disables " \
+ "all FW tracing.", \
+ RW, pvr_fw_trace_mask_update)
+
+struct pvr_device_params {
+#define X(type_, name_, value_, desc_, ...) \
+ PVR_PARAM_TYPE_##type_##_C name_;
+ PVR_DEVICE_PARAMS
+#undef X
+};
+
+int pvr_device_params_init(struct pvr_device_params *params);
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+void pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* PVR_PARAMS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
new file mode 100644
index 000000000..ba7816fd2
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_power.h"
+#include "pvr_queue.h"
+#include "pvr_rogue_fwif.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define POWER_SYNC_TIMEOUT_US (1000000) /* 1s */
+
+#define WATCHDOG_TIME_MS (500)
+
+/**
+ * pvr_device_lost() - Mark GPU device as lost
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This will cause the DRM device to be unplugged.
+ */
+void
+pvr_device_lost(struct pvr_device *pvr_dev)
+{
+ if (!pvr_dev->lost) {
+ pvr_dev->lost = true;
+ drm_dev_unplug(from_pvr_device(pvr_dev));
+ }
+}
+
+static int
+pvr_power_send_command(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *pow_cmd)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ u32 slot_nr;
+ u32 value;
+ int err;
+
+ WRITE_ONCE(*fw_dev->power_sync, 0);
+
+ err = pvr_kccb_send_cmd_powered(pvr_dev, pow_cmd, &slot_nr);
+ if (err)
+ return err;
+
+ /* Wait for FW to acknowledge. */
+ return readl_poll_timeout(pvr_dev->fw_dev.power_sync, value, value != 0, 100,
+ POWER_SYNC_TIMEOUT_US);
+}
+
+static int
+pvr_power_request_idle(struct pvr_device *pvr_dev)
+{
+ struct rogue_fwif_kccb_cmd pow_cmd;
+
+ /* Send FORCED_IDLE request to FW. */
+ pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
+ pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_FORCED_IDLE_REQ;
+ pow_cmd.cmd_data.pow_data.power_req_data.pow_request_type = ROGUE_FWIF_POWER_FORCE_IDLE;
+
+ return pvr_power_send_command(pvr_dev, &pow_cmd);
+}
+
+static int
+pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
+{
+ struct rogue_fwif_kccb_cmd pow_cmd;
+
+ /* Send POW_OFF request to firmware. */
+ pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
+ pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_OFF_REQ;
+ pow_cmd.cmd_data.pow_data.power_req_data.forced = true;
+
+ return pvr_power_send_command(pvr_dev, &pow_cmd);
+}
+
+static int
+pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset)
+{
+ if (!hard_reset) {
+ int err;
+
+ cancel_delayed_work_sync(&pvr_dev->watchdog.work);
+
+ err = pvr_power_request_idle(pvr_dev);
+ if (err)
+ return err;
+
+ err = pvr_power_request_pwr_off(pvr_dev);
+ if (err)
+ return err;
+ }
+
+ return pvr_fw_stop(pvr_dev);
+}
+
+static int
+pvr_power_fw_enable(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = pvr_fw_start(pvr_dev);
+ if (err)
+ return err;
+
+ err = pvr_wait_for_fw_boot(pvr_dev);
+ if (err) {
+ drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n");
+ pvr_fw_stop(pvr_dev);
+ return err;
+ }
+
+ queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
+ msecs_to_jiffies(WATCHDOG_TIME_MS));
+
+ return 0;
+}
+
+bool
+pvr_power_is_idle(struct pvr_device *pvr_dev)
+{
+ /*
+ * FW power state can be out of date if a KCCB command has been submitted but the FW hasn't
+ * started processing it yet. So also check the KCCB status.
+ */
+ enum rogue_fwif_pow_state pow_state = READ_ONCE(pvr_dev->fw_dev.fwif_sysdata->pow_state);
+ bool kccb_idle = pvr_kccb_is_idle(pvr_dev);
+
+ return (pow_state == ROGUE_FWIF_POW_IDLE) && kccb_idle;
+}
+
+static bool
+pvr_watchdog_kccb_stalled(struct pvr_device *pvr_dev)
+{
+ /* Check KCCB commands are progressing. */
+ u32 kccb_cmds_executed = pvr_dev->fw_dev.fwif_osdata->kccb_cmds_executed;
+ bool kccb_is_idle = pvr_kccb_is_idle(pvr_dev);
+
+ if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed && !kccb_is_idle) {
+ pvr_dev->watchdog.kccb_stall_count++;
+
+ /*
+ * If we have commands pending with no progress for 2 consecutive polls then
+ * consider KCCB command processing stalled.
+ */
+ if (pvr_dev->watchdog.kccb_stall_count == 2) {
+ pvr_dev->watchdog.kccb_stall_count = 0;
+ return true;
+ }
+ } else if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed) {
+ bool has_active_contexts;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ has_active_contexts = list_empty(&pvr_dev->queues.active);
+ mutex_unlock(&pvr_dev->queues.lock);
+
+ if (has_active_contexts) {
+ /* Send a HEALTH_CHECK command so we can verify FW is still alive. */
+ struct rogue_fwif_kccb_cmd health_check_cmd;
+
+ health_check_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK;
+
+ pvr_kccb_send_cmd_powered(pvr_dev, &health_check_cmd, NULL);
+ }
+ } else {
+ pvr_dev->watchdog.old_kccb_cmds_executed = kccb_cmds_executed;
+ pvr_dev->watchdog.kccb_stall_count = 0;
+ }
+
+ return false;
+}
+
+static void
+pvr_watchdog_worker(struct work_struct *work)
+{
+ struct pvr_device *pvr_dev = container_of(work, struct pvr_device,
+ watchdog.work.work);
+ bool stalled;
+
+ if (pvr_dev->lost)
+ return;
+
+ if (pm_runtime_get_if_in_use(from_pvr_device(pvr_dev)->dev) <= 0)
+ goto out_requeue;
+
+ if (!pvr_dev->fw_dev.booted)
+ goto out_pm_runtime_put;
+
+ stalled = pvr_watchdog_kccb_stalled(pvr_dev);
+
+ if (stalled) {
+ drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
+
+ pvr_power_reset(pvr_dev, true);
+ /* Device may be lost at this point. */
+ }
+
+out_pm_runtime_put:
+ pm_runtime_put(from_pvr_device(pvr_dev)->dev);
+
+out_requeue:
+ if (!pvr_dev->lost) {
+ queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work,
+ msecs_to_jiffies(WATCHDOG_TIME_MS));
+ }
+}
+
+/**
+ * pvr_watchdog_init() - Initialise watchdog for device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%ENOMEM on out of memory.
+ */
+int
+pvr_watchdog_init(struct pvr_device *pvr_dev)
+{
+ INIT_DELAYED_WORK(&pvr_dev->watchdog.work, pvr_watchdog_worker);
+
+ return 0;
+}
+
+int
+pvr_power_device_suspend(struct device *dev)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ int err = 0;
+ int idx;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ if (pvr_dev->fw_dev.booted) {
+ err = pvr_power_fw_disable(pvr_dev, false);
+ if (err)
+ goto err_drm_dev_exit;
+ }
+
+ clk_disable_unprepare(pvr_dev->mem_clk);
+ clk_disable_unprepare(pvr_dev->sys_clk);
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+int
+pvr_power_device_resume(struct device *dev)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+ int idx;
+ int err;
+
+ if (!drm_dev_enter(drm_dev, &idx))
+ return -EIO;
+
+ err = clk_prepare_enable(pvr_dev->core_clk);
+ if (err)
+ goto err_drm_dev_exit;
+
+ err = clk_prepare_enable(pvr_dev->sys_clk);
+ if (err)
+ goto err_core_clk_disable;
+
+ err = clk_prepare_enable(pvr_dev->mem_clk);
+ if (err)
+ goto err_sys_clk_disable;
+
+ if (pvr_dev->fw_dev.booted) {
+ err = pvr_power_fw_enable(pvr_dev);
+ if (err)
+ goto err_mem_clk_disable;
+ }
+
+ drm_dev_exit(idx);
+
+ return 0;
+
+err_mem_clk_disable:
+ clk_disable_unprepare(pvr_dev->mem_clk);
+
+err_sys_clk_disable:
+ clk_disable_unprepare(pvr_dev->sys_clk);
+
+err_core_clk_disable:
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+err_drm_dev_exit:
+ drm_dev_exit(idx);
+
+ return err;
+}
+
+int
+pvr_power_device_idle(struct device *dev)
+{
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+ struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+
+ return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY;
+}
+
+/**
+ * pvr_power_reset() - Reset the GPU
+ * @pvr_dev: Device pointer
+ * @hard_reset: %true for hard reset, %false for soft reset
+ *
+ * If @hard_reset is %false and the FW processor fails to respond during the reset process, this
+ * function will attempt a hard reset.
+ *
+ * If a hard reset fails then the GPU device is reported as lost.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * Any error code returned by pvr_power_get, pvr_power_fw_disable or pvr_power_fw_enable().
+ */
+int
+pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
+{
+ bool queues_disabled = false;
+ int err;
+
+ /*
+ * Take a power reference during the reset. This should prevent any interference with the
+ * power state during reset.
+ */
+ WARN_ON(pvr_power_get(pvr_dev));
+
+ down_write(&pvr_dev->reset_sem);
+
+ if (pvr_dev->lost) {
+ err = -EIO;
+ goto err_up_write;
+ }
+
+ /* Disable IRQs for the duration of the reset. */
+ disable_irq(pvr_dev->irq);
+
+ do {
+ if (hard_reset) {
+ pvr_queue_device_pre_reset(pvr_dev);
+ queues_disabled = true;
+ }
+
+ err = pvr_power_fw_disable(pvr_dev, hard_reset);
+ if (!err) {
+ if (hard_reset) {
+ pvr_dev->fw_dev.booted = false;
+ WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev));
+
+ err = pvr_fw_hard_reset(pvr_dev);
+ if (err)
+ goto err_device_lost;
+
+ err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev);
+ pvr_dev->fw_dev.booted = true;
+ if (err)
+ goto err_device_lost;
+ } else {
+ /* Clear the FW faulted flags. */
+ pvr_dev->fw_dev.fwif_sysdata->hwr_state_flags &=
+ ~(ROGUE_FWIF_HWR_FW_FAULT |
+ ROGUE_FWIF_HWR_RESTART_REQUESTED);
+ }
+
+ pvr_fw_irq_clear(pvr_dev);
+
+ err = pvr_power_fw_enable(pvr_dev);
+ }
+
+ if (err && hard_reset)
+ goto err_device_lost;
+
+ if (err && !hard_reset) {
+ drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset");
+ hard_reset = true;
+ }
+ } while (err);
+
+ if (queues_disabled)
+ pvr_queue_device_post_reset(pvr_dev);
+
+ enable_irq(pvr_dev->irq);
+
+ up_write(&pvr_dev->reset_sem);
+
+ pvr_power_put(pvr_dev);
+
+ return 0;
+
+err_device_lost:
+ drm_err(from_pvr_device(pvr_dev), "GPU device lost");
+ pvr_device_lost(pvr_dev);
+
+ /* Leave IRQs disabled if the device is lost. */
+
+ if (queues_disabled)
+ pvr_queue_device_post_reset(pvr_dev);
+
+err_up_write:
+ up_write(&pvr_dev->reset_sem);
+
+ pvr_power_put(pvr_dev);
+
+ return err;
+}
+
+/**
+ * pvr_watchdog_fini() - Shutdown watchdog for device
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_watchdog_fini(struct pvr_device *pvr_dev)
+{
+ cancel_delayed_work_sync(&pvr_dev->watchdog.work);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
new file mode 100644
index 000000000..9a9312dcb
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_POWER_H
+#define PVR_POWER_H
+
+#include "pvr_device.h"
+
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+int pvr_watchdog_init(struct pvr_device *pvr_dev);
+void pvr_watchdog_fini(struct pvr_device *pvr_dev);
+
+void pvr_device_lost(struct pvr_device *pvr_dev);
+
+bool pvr_power_is_idle(struct pvr_device *pvr_dev);
+
+int pvr_power_device_suspend(struct device *dev);
+int pvr_power_device_resume(struct device *dev);
+int pvr_power_device_idle(struct device *dev);
+
+int pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset);
+
+static __always_inline int
+pvr_power_get(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+ return pm_runtime_resume_and_get(drm_dev->dev);
+}
+
+static __always_inline int
+pvr_power_put(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+ return pm_runtime_put(drm_dev->dev);
+}
+
+#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
new file mode 100644
index 000000000..5ed9c98fb
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -0,0 +1,1432 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+
+#include "pvr_cccb.h"
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_job.h"
+#include "pvr_queue.h"
+#include "pvr_vm.h"
+
+#include "pvr_rogue_fwif_client.h"
+
+#define MAX_DEADLINE_MS 30000
+
+#define CTX_COMPUTE_CCCB_SIZE_LOG2 15
+#define CTX_FRAG_CCCB_SIZE_LOG2 15
+#define CTX_GEOM_CCCB_SIZE_LOG2 15
+#define CTX_TRANSFER_CCCB_SIZE_LOG2 15
+
+static int get_xfer_ctx_state_size(struct pvr_device *pvr_dev)
+{
+ u32 num_isp_store_registers;
+
+ if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) {
+ num_isp_store_registers = 1;
+ } else {
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers);
+ if (WARN_ON(err))
+ return err;
+ }
+
+ return sizeof(struct rogue_fwif_frag_ctx_state) +
+ (num_isp_store_registers *
+ sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0]));
+}
+
+static int get_frag_ctx_state_size(struct pvr_device *pvr_dev)
+{
+ u32 num_isp_store_registers;
+ int err;
+
+ if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) {
+ err = PVR_FEATURE_VALUE(pvr_dev, num_raster_pipes, &num_isp_store_registers);
+ if (WARN_ON(err))
+ return err;
+
+ if (PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support)) {
+ u32 xpu_max_slaves;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, xpu_max_slaves, &xpu_max_slaves);
+ if (WARN_ON(err))
+ return err;
+
+ num_isp_store_registers *= (1 + xpu_max_slaves);
+ }
+ } else {
+ err = PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers);
+ if (WARN_ON(err))
+ return err;
+ }
+
+ return sizeof(struct rogue_fwif_frag_ctx_state) +
+ (num_isp_store_registers *
+ sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0]));
+}
+
+static int get_ctx_state_size(struct pvr_device *pvr_dev, enum drm_pvr_job_type type)
+{
+ switch (type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return sizeof(struct rogue_fwif_geom_ctx_state);
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return get_frag_ctx_state_size(pvr_dev);
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return sizeof(struct rogue_fwif_compute_ctx_state);
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return get_xfer_ctx_state_size(pvr_dev);
+ }
+
+ WARN(1, "Invalid queue type");
+ return -EINVAL;
+}
+
+static u32 get_ctx_offset(enum drm_pvr_job_type type)
+{
+ switch (type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return offsetof(struct rogue_fwif_fwrendercontext, geom_context);
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return offsetof(struct rogue_fwif_fwrendercontext, frag_context);
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return offsetof(struct rogue_fwif_fwcomputecontext, cdm_context);
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return offsetof(struct rogue_fwif_fwtransfercontext, tq_context);
+ }
+
+ return 0;
+}
+
+static const char *
+pvr_queue_fence_get_driver_name(struct dma_fence *f)
+{
+ return PVR_DRIVER_NAME;
+}
+
+static void pvr_queue_fence_release(struct dma_fence *f)
+{
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+
+ pvr_context_put(fence->queue->ctx);
+ dma_fence_free(f);
+}
+
+static const char *
+pvr_queue_job_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+
+ switch (fence->queue->type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return "geometry";
+
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return "fragment";
+
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return "compute";
+
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return "transfer";
+ }
+
+ WARN(1, "Invalid queue type");
+ return "invalid";
+}
+
+static const char *
+pvr_queue_cccb_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+
+ switch (fence->queue->type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return "geometry-cccb";
+
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return "fragment-cccb";
+
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return "compute-cccb";
+
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ return "transfer-cccb";
+ }
+
+ WARN(1, "Invalid queue type");
+ return "invalid";
+}
+
+static const struct dma_fence_ops pvr_queue_job_fence_ops = {
+ .get_driver_name = pvr_queue_fence_get_driver_name,
+ .get_timeline_name = pvr_queue_job_fence_get_timeline_name,
+ .release = pvr_queue_fence_release,
+};
+
+/**
+ * to_pvr_queue_job_fence() - Return a pvr_queue_fence object if the fence is
+ * backed by a UFO.
+ * @f: The dma_fence to turn into a pvr_queue_fence.
+ *
+ * Return:
+ * * A non-NULL pvr_queue_fence object if the dma_fence is backed by a UFO, or
+ * * NULL otherwise.
+ */
+static struct pvr_queue_fence *
+to_pvr_queue_job_fence(struct dma_fence *f)
+{
+ struct drm_sched_fence *sched_fence = to_drm_sched_fence(f);
+
+ if (sched_fence)
+ f = sched_fence->parent;
+
+ if (f && f->ops == &pvr_queue_job_fence_ops)
+ return container_of(f, struct pvr_queue_fence, base);
+
+ return NULL;
+}
+
+static const struct dma_fence_ops pvr_queue_cccb_fence_ops = {
+ .get_driver_name = pvr_queue_fence_get_driver_name,
+ .get_timeline_name = pvr_queue_cccb_fence_get_timeline_name,
+ .release = pvr_queue_fence_release,
+};
+
+/**
+ * pvr_queue_fence_put() - Put wrapper for pvr_queue_fence objects.
+ * @f: The dma_fence object to put.
+ *
+ * If the pvr_queue_fence has been initialized, we call dma_fence_put(),
+ * otherwise we free the object with dma_fence_free(). This allows us
+ * to do the right thing before and after pvr_queue_fence_init() had been
+ * called.
+ */
+static void pvr_queue_fence_put(struct dma_fence *f)
+{
+ if (!f)
+ return;
+
+ if (WARN_ON(f->ops &&
+ f->ops != &pvr_queue_cccb_fence_ops &&
+ f->ops != &pvr_queue_job_fence_ops))
+ return;
+
+ /* If the fence hasn't been initialized yet, free the object directly. */
+ if (f->ops)
+ dma_fence_put(f);
+ else
+ dma_fence_free(f);
+}
+
+/**
+ * pvr_queue_fence_alloc() - Allocate a pvr_queue_fence fence object
+ *
+ * Call this function to allocate job CCCB and done fences. This only
+ * allocates the objects. Initialization happens when the underlying
+ * dma_fence object is to be returned to drm_sched (in prepare_job() or
+ * run_job()).
+ *
+ * Return:
+ * * A valid pointer if the allocation succeeds, or
+ * * NULL if the allocation fails.
+ */
+static struct dma_fence *
+pvr_queue_fence_alloc(void)
+{
+ struct pvr_queue_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ return &fence->base;
+}
+
+/**
+ * pvr_queue_fence_init() - Initializes a pvr_queue_fence object.
+ * @f: The fence to initialize
+ * @queue: The queue this fence belongs to.
+ * @fence_ops: The fence operations.
+ * @fence_ctx: The fence context.
+ *
+ * Wrapper around dma_fence_init() that takes care of initializing the
+ * pvr_queue_fence::queue field too.
+ */
+static void
+pvr_queue_fence_init(struct dma_fence *f,
+ struct pvr_queue *queue,
+ const struct dma_fence_ops *fence_ops,
+ struct pvr_queue_fence_ctx *fence_ctx)
+{
+ struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+
+ pvr_context_get(queue->ctx);
+ fence->queue = queue;
+ dma_fence_init(&fence->base, fence_ops,
+ &fence_ctx->lock, fence_ctx->id,
+ atomic_inc_return(&fence_ctx->seqno));
+}
+
+/**
+ * pvr_queue_cccb_fence_init() - Initializes a CCCB fence object.
+ * @fence: The fence to initialize.
+ * @queue: The queue this fence belongs to.
+ *
+ * Initializes a fence that can be used to wait for CCCB space.
+ *
+ * Should be called in the ::prepare_job() path, so the fence returned to
+ * drm_sched is valid.
+ */
+static void
+pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
+{
+ pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops,
+ &queue->cccb_fence_ctx.base);
+}
+
+/**
+ * pvr_queue_job_fence_init() - Initializes a job done fence object.
+ * @fence: The fence to initialize.
+ * @queue: The queue this fence belongs to.
+ *
+ * Initializes a fence that will be signaled when the GPU is done executing
+ * a job.
+ *
+ * Should be called *before* the ::run_job() path, so the fence is initialised
+ * before being placed in the pending_list.
+ */
+static void
+pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
+{
+ pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+ &queue->job_fence_ctx);
+}
+
+/**
+ * pvr_queue_fence_ctx_init() - Queue fence context initialization.
+ * @fence_ctx: The context to initialize
+ */
+static void
+pvr_queue_fence_ctx_init(struct pvr_queue_fence_ctx *fence_ctx)
+{
+ spin_lock_init(&fence_ctx->lock);
+ fence_ctx->id = dma_fence_context_alloc(1);
+ atomic_set(&fence_ctx->seqno, 0);
+}
+
+static u32 ufo_cmds_size(u32 elem_count)
+{
+ /* We can pass at most ROGUE_FWIF_CCB_CMD_MAX_UFOS per UFO-related command. */
+ u32 full_cmd_count = elem_count / ROGUE_FWIF_CCB_CMD_MAX_UFOS;
+ u32 remaining_elems = elem_count % ROGUE_FWIF_CCB_CMD_MAX_UFOS;
+ u32 size = full_cmd_count *
+ pvr_cccb_get_size_of_cmd_with_hdr(ROGUE_FWIF_CCB_CMD_MAX_UFOS *
+ sizeof(struct rogue_fwif_ufo));
+
+ if (remaining_elems) {
+ size += pvr_cccb_get_size_of_cmd_with_hdr(remaining_elems *
+ sizeof(struct rogue_fwif_ufo));
+ }
+
+ return size;
+}
+
+static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count)
+{
+ /* One UFO cmd for the fence signaling, one UFO cmd per native fence native,
+ * and a command for the job itself.
+ */
+ return ufo_cmds_size(1) + ufo_cmds_size(ufo_wait_count) +
+ pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len);
+}
+
+/**
+ * job_count_remaining_native_deps() - Count the number of non-signaled native dependencies.
+ * @job: Job to operate on.
+ *
+ * Returns: Number of non-signaled native deps remaining.
+ */
+static unsigned long job_count_remaining_native_deps(struct pvr_job *job)
+{
+ unsigned long remaining_count = 0;
+ struct dma_fence *fence = NULL;
+ unsigned long index;
+
+ xa_for_each(&job->base.dependencies, index, fence) {
+ struct pvr_queue_fence *jfence;
+
+ jfence = to_pvr_queue_job_fence(fence);
+ if (!jfence)
+ continue;
+
+ if (!dma_fence_is_signaled(&jfence->base))
+ remaining_count++;
+ }
+
+ return remaining_count;
+}
+
+/**
+ * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job.
+ * @queue: The queue this job will be submitted to.
+ * @job: The job to get the CCCB fence on.
+ *
+ * The CCCB fence is a synchronization primitive allowing us to delay job
+ * submission until there's enough space in the CCCB to submit the job.
+ *
+ * Return:
+ * * NULL if there's enough space in the CCCB to submit this job, or
+ * * A valid dma_fence object otherwise.
+ */
+static struct dma_fence *
+pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job)
+{
+ struct pvr_queue_fence *cccb_fence;
+ unsigned int native_deps_remaining;
+
+ /* If the fence is NULL, that means we already checked that we had
+ * enough space in the cccb for our job.
+ */
+ if (!job->cccb_fence)
+ return NULL;
+
+ mutex_lock(&queue->cccb_fence_ctx.job_lock);
+
+ /* Count remaining native dependencies and check if the job fits in the CCCB. */
+ native_deps_remaining = job_count_remaining_native_deps(job);
+ if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) {
+ pvr_queue_fence_put(job->cccb_fence);
+ job->cccb_fence = NULL;
+ goto out_unlock;
+ }
+
+ /* There should be no job attached to the CCCB fence context:
+ * drm_sched_entity guarantees that jobs are submitted one at a time.
+ */
+ if (WARN_ON(queue->cccb_fence_ctx.job))
+ pvr_job_put(queue->cccb_fence_ctx.job);
+
+ queue->cccb_fence_ctx.job = pvr_job_get(job);
+
+ /* Initialize the fence before returning it. */
+ cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base);
+ if (!WARN_ON(cccb_fence->queue))
+ pvr_queue_cccb_fence_init(job->cccb_fence, queue);
+
+out_unlock:
+ mutex_unlock(&queue->cccb_fence_ctx.job_lock);
+
+ return dma_fence_get(job->cccb_fence);
+}
+
+/**
+ * pvr_queue_get_job_kccb_fence() - Get the KCCB fence attached to a job.
+ * @queue: The queue this job will be submitted to.
+ * @job: The job to get the KCCB fence on.
+ *
+ * The KCCB fence is a synchronization primitive allowing us to delay job
+ * submission until there's enough space in the KCCB to submit the job.
+ *
+ * Return:
+ * * NULL if there's enough space in the KCCB to submit this job, or
+ * * A valid dma_fence object otherwise.
+ */
+static struct dma_fence *
+pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job)
+{
+ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
+ struct dma_fence *kccb_fence = NULL;
+
+ /* If the fence is NULL, that means we already checked that we had
+ * enough space in the KCCB for our job.
+ */
+ if (!job->kccb_fence)
+ return NULL;
+
+ if (!WARN_ON(job->kccb_fence->ops)) {
+ kccb_fence = pvr_kccb_reserve_slot(pvr_dev, job->kccb_fence);
+ job->kccb_fence = NULL;
+ }
+
+ return kccb_fence;
+}
+
+static struct dma_fence *
+pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job)
+{
+ struct pvr_job *frag_job = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ?
+ job->paired_job : NULL;
+ struct dma_fence *f;
+ unsigned long index;
+
+ if (!frag_job)
+ return NULL;
+
+ xa_for_each(&frag_job->base.dependencies, index, f) {
+ /* Skip already signaled fences. */
+ if (dma_fence_is_signaled(f))
+ continue;
+
+ /* Skip our own fence. */
+ if (f == &job->base.s_fence->scheduled)
+ continue;
+
+ return dma_fence_get(f);
+ }
+
+ return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity);
+}
+
+/**
+ * pvr_queue_prepare_job() - Return the next internal dependencies expressed as a dma_fence.
+ * @sched_job: The job to query the next internal dependency on
+ * @s_entity: The entity this job is queue on.
+ *
+ * After iterating over drm_sched_job::dependencies, drm_sched let the driver return
+ * its own internal dependencies. We use this function to return our internal dependencies.
+ */
+static struct dma_fence *
+pvr_queue_prepare_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
+ struct pvr_queue *queue = container_of(s_entity, struct pvr_queue, entity);
+ struct dma_fence *internal_dep = NULL;
+
+ /*
+ * Initialize the done_fence, so we can signal it. This must be done
+ * here because otherwise by the time of run_job() the job will end up
+ * in the pending list without a valid fence.
+ */
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) {
+ /*
+ * This will be called on a paired fragment job after being
+ * submitted to firmware. We can tell if this is the case and
+ * bail early from whether run_job() has been called on the
+ * geometry job, which would issue a pm ref.
+ */
+ if (job->paired_job->has_pm_ref)
+ return NULL;
+
+ /*
+ * In this case we need to use the job's own ctx to initialise
+ * the done_fence. The other steps are done in the ctx of the
+ * paired geometry job.
+ */
+ pvr_queue_job_fence_init(job->done_fence,
+ job->ctx->queues.fragment);
+ } else {
+ pvr_queue_job_fence_init(job->done_fence, queue);
+ }
+
+ /* CCCB fence is used to make sure we have enough space in the CCCB to
+ * submit our commands.
+ */
+ internal_dep = pvr_queue_get_job_cccb_fence(queue, job);
+
+ /* KCCB fence is used to make sure we have a KCCB slot to queue our
+ * CMD_KICK.
+ */
+ if (!internal_dep)
+ internal_dep = pvr_queue_get_job_kccb_fence(queue, job);
+
+ /* Any extra internal dependency should be added here, using the following
+ * pattern:
+ *
+ * if (!internal_dep)
+ * internal_dep = pvr_queue_get_job_xxxx_fence(queue, job);
+ */
+
+ /* The paired job fence should come last, when everything else is ready. */
+ if (!internal_dep)
+ internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job);
+
+ return internal_dep;
+}
+
+/**
+ * pvr_queue_update_active_state_locked() - Update the queue active state.
+ * @queue: Queue to update the state on.
+ *
+ * Locked version of pvr_queue_update_active_state(). Must be called with
+ * pvr_device::queue::lock held.
+ */
+static void pvr_queue_update_active_state_locked(struct pvr_queue *queue)
+{
+ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
+
+ lockdep_assert_held(&pvr_dev->queues.lock);
+
+ /* The queue is temporary out of any list when it's being reset,
+ * we don't want a call to pvr_queue_update_active_state_locked()
+ * to re-insert it behind our back.
+ */
+ if (list_empty(&queue->node))
+ return;
+
+ if (!atomic_read(&queue->in_flight_job_count))
+ list_move_tail(&queue->node, &pvr_dev->queues.idle);
+ else
+ list_move_tail(&queue->node, &pvr_dev->queues.active);
+}
+
+/**
+ * pvr_queue_update_active_state() - Update the queue active state.
+ * @queue: Queue to update the state on.
+ *
+ * Active state is based on the in_flight_job_count value.
+ *
+ * Updating the active state implies moving the queue in or out of the
+ * active queue list, which also defines whether the queue is checked
+ * or not when a FW event is received.
+ *
+ * This function should be called any time a job is submitted or it done
+ * fence is signaled.
+ */
+static void pvr_queue_update_active_state(struct pvr_queue *queue)
+{
+ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ pvr_queue_update_active_state_locked(queue);
+ mutex_unlock(&pvr_dev->queues.lock);
+}
+
+static void pvr_queue_submit_job_to_cccb(struct pvr_job *job)
+{
+ struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler);
+ struct rogue_fwif_ufo ufos[ROGUE_FWIF_CCB_CMD_MAX_UFOS];
+ struct pvr_cccb *cccb = &queue->cccb;
+ struct pvr_queue_fence *jfence;
+ struct dma_fence *fence;
+ unsigned long index;
+ u32 ufo_count = 0;
+
+ /* We need to add the queue to the active list before updating the CCCB,
+ * otherwise we might miss the FW event informing us that something
+ * happened on this queue.
+ */
+ atomic_inc(&queue->in_flight_job_count);
+ pvr_queue_update_active_state(queue);
+
+ xa_for_each(&job->base.dependencies, index, fence) {
+ jfence = to_pvr_queue_job_fence(fence);
+ if (!jfence)
+ continue;
+
+ /* Skip the partial render fence, we will place it at the end. */
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job &&
+ &job->paired_job->base.s_fence->scheduled == fence)
+ continue;
+
+ if (dma_fence_is_signaled(&jfence->base))
+ continue;
+
+ pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj,
+ &ufos[ufo_count].addr);
+ ufos[ufo_count++].value = jfence->base.seqno;
+
+ if (ufo_count == ARRAY_SIZE(ufos)) {
+ pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR,
+ sizeof(ufos), ufos, 0, 0);
+ ufo_count = 0;
+ }
+ }
+
+ /* Partial render fence goes last. */
+ if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) {
+ jfence = to_pvr_queue_job_fence(job->paired_job->done_fence);
+ if (!WARN_ON(!jfence)) {
+ pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj,
+ &ufos[ufo_count].addr);
+ ufos[ufo_count++].value = job->paired_job->done_fence->seqno;
+ }
+ }
+
+ if (ufo_count) {
+ pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR,
+ sizeof(ufos[0]) * ufo_count, ufos, 0, 0);
+ }
+
+ if (job->type == DRM_PVR_JOB_TYPE_GEOMETRY && job->paired_job) {
+ struct rogue_fwif_cmd_geom *cmd = job->cmd;
+
+ /* Reference value for the partial render test is the current queue fence
+ * seqno minus one.
+ */
+ pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj,
+ &cmd->partial_render_geom_frag_fence.addr);
+ cmd->partial_render_geom_frag_fence.value = job->done_fence->seqno - 1;
+ }
+
+ /* Submit job to FW */
+ pvr_cccb_write_command_with_header(cccb, job->fw_ccb_cmd_type, job->cmd_len, job->cmd,
+ job->id, job->id);
+
+ /* Signal the job fence. */
+ pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr);
+ ufos[0].value = job->done_fence->seqno;
+ pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_UPDATE,
+ sizeof(ufos[0]), ufos, 0, 0);
+}
+
+/**
+ * pvr_queue_run_job() - Submit a job to the FW.
+ * @sched_job: The job to submit.
+ *
+ * This function is called when all non-native dependencies have been met and
+ * when the commands resulting from this job are guaranteed to fit in the CCCB.
+ */
+static struct dma_fence *pvr_queue_run_job(struct drm_sched_job *sched_job)
+{
+ struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
+ struct pvr_device *pvr_dev = job->pvr_dev;
+ int err;
+
+ /* The fragment job is issued along the geometry job when we use combined
+ * geom+frag kicks. When we get there, we should simply return the
+ * done_fence that's been initialized earlier.
+ */
+ if (job->paired_job && job->type == DRM_PVR_JOB_TYPE_FRAGMENT &&
+ job->done_fence->ops) {
+ return dma_fence_get(job->done_fence);
+ }
+
+ /* The only kind of jobs that can be paired are geometry and fragment, and
+ * we bail out early if we see a fragment job that's paired with a geomtry
+ * job.
+ * Paired jobs must also target the same context and point to the same
+ * HWRT.
+ */
+ if (WARN_ON(job->paired_job &&
+ (job->type != DRM_PVR_JOB_TYPE_GEOMETRY ||
+ job->paired_job->type != DRM_PVR_JOB_TYPE_FRAGMENT ||
+ job->hwrt != job->paired_job->hwrt ||
+ job->ctx != job->paired_job->ctx)))
+ return ERR_PTR(-EINVAL);
+
+ err = pvr_job_get_pm_ref(job);
+ if (WARN_ON(err))
+ return ERR_PTR(err);
+
+ if (job->paired_job) {
+ err = pvr_job_get_pm_ref(job->paired_job);
+ if (WARN_ON(err))
+ return ERR_PTR(err);
+ }
+
+ /* Submit our job to the CCCB */
+ pvr_queue_submit_job_to_cccb(job);
+
+ if (job->paired_job) {
+ struct pvr_job *geom_job = job;
+ struct pvr_job *frag_job = job->paired_job;
+ struct pvr_queue *geom_queue = job->ctx->queues.geometry;
+ struct pvr_queue *frag_queue = job->ctx->queues.fragment;
+
+ /* Submit the fragment job along the geometry job and send a combined kick. */
+ pvr_queue_submit_job_to_cccb(frag_job);
+ pvr_cccb_send_kccb_combined_kick(pvr_dev,
+ &geom_queue->cccb, &frag_queue->cccb,
+ pvr_context_get_fw_addr(geom_job->ctx) +
+ geom_queue->ctx_offset,
+ pvr_context_get_fw_addr(frag_job->ctx) +
+ frag_queue->ctx_offset,
+ job->hwrt,
+ frag_job->fw_ccb_cmd_type ==
+ ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR);
+ } else {
+ struct pvr_queue *queue = container_of(job->base.sched,
+ struct pvr_queue, scheduler);
+
+ pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb,
+ pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset,
+ job->hwrt);
+ }
+
+ return dma_fence_get(job->done_fence);
+}
+
+static void pvr_queue_stop(struct pvr_queue *queue, struct pvr_job *bad_job)
+{
+ drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
+}
+
+static void pvr_queue_start(struct pvr_queue *queue)
+{
+ struct pvr_job *job;
+
+ /* Make sure we CPU-signal the UFO object, so other queues don't get
+ * blocked waiting on it.
+ */
+ *queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno);
+
+ list_for_each_entry(job, &queue->scheduler.pending_list, base.list) {
+ if (dma_fence_is_signaled(job->done_fence)) {
+ /* Jobs might have completed after drm_sched_stop() was called.
+ * In that case, re-assign the parent field to the done_fence.
+ */
+ WARN_ON(job->base.s_fence->parent);
+ job->base.s_fence->parent = dma_fence_get(job->done_fence);
+ } else {
+ /* If we had unfinished jobs, flag the entity as guilty so no
+ * new job can be submitted.
+ */
+ atomic_set(&queue->ctx->faulty, 1);
+ }
+ }
+
+ drm_sched_start(&queue->scheduler, true);
+}
+
+/**
+ * pvr_queue_timedout_job() - Handle a job timeout event.
+ * @s_job: The job this timeout occurred on.
+ *
+ * FIXME: We don't do anything here to unblock the situation, we just stop+start
+ * the scheduler, and re-assign parent fences in the middle.
+ *
+ * Return:
+ * * DRM_GPU_SCHED_STAT_NOMINAL.
+ */
+static enum drm_gpu_sched_stat
+pvr_queue_timedout_job(struct drm_sched_job *s_job)
+{
+ struct drm_gpu_scheduler *sched = s_job->sched;
+ struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler);
+ struct pvr_device *pvr_dev = queue->ctx->pvr_dev;
+ struct pvr_job *job;
+ u32 job_count = 0;
+
+ dev_err(sched->dev, "Job timeout\n");
+
+ /* Before we stop the scheduler, make sure the queue is out of any list, so
+ * any call to pvr_queue_update_active_state_locked() that might happen
+ * until the scheduler is really stopped doesn't end up re-inserting the
+ * queue in the active list. This would cause
+ * pvr_queue_signal_done_fences() and drm_sched_stop() to race with each
+ * other when accessing the pending_list, since drm_sched_stop() doesn't
+ * grab the job_list_lock when modifying the list (it's assuming the
+ * only other accessor is the scheduler, and it's safe to not grab the
+ * lock since it's stopped).
+ */
+ mutex_lock(&pvr_dev->queues.lock);
+ list_del_init(&queue->node);
+ mutex_unlock(&pvr_dev->queues.lock);
+
+ drm_sched_stop(sched, s_job);
+
+ /* Re-assign job parent fences. */
+ list_for_each_entry(job, &sched->pending_list, base.list) {
+ job->base.s_fence->parent = dma_fence_get(job->done_fence);
+ job_count++;
+ }
+ WARN_ON(atomic_read(&queue->in_flight_job_count) != job_count);
+
+ /* Re-insert the queue in the proper list, and kick a queue processing
+ * operation if there were jobs pending.
+ */
+ mutex_lock(&pvr_dev->queues.lock);
+ if (!job_count) {
+ list_move_tail(&queue->node, &pvr_dev->queues.idle);
+ } else {
+ atomic_set(&queue->in_flight_job_count, job_count);
+ list_move_tail(&queue->node, &pvr_dev->queues.active);
+ pvr_queue_process(queue);
+ }
+ mutex_unlock(&pvr_dev->queues.lock);
+
+ drm_sched_start(sched, true);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+/**
+ * pvr_queue_free_job() - Release the reference the scheduler had on a job object.
+ * @sched_job: Job object to free.
+ */
+static void pvr_queue_free_job(struct drm_sched_job *sched_job)
+{
+ struct pvr_job *job = container_of(sched_job, struct pvr_job, base);
+
+ drm_sched_job_cleanup(sched_job);
+ job->paired_job = NULL;
+ pvr_job_put(job);
+}
+
+static const struct drm_sched_backend_ops pvr_queue_sched_ops = {
+ .prepare_job = pvr_queue_prepare_job,
+ .run_job = pvr_queue_run_job,
+ .timedout_job = pvr_queue_timedout_job,
+ .free_job = pvr_queue_free_job,
+};
+
+/**
+ * pvr_queue_fence_is_ufo_backed() - Check if a dma_fence is backed by a UFO object
+ * @f: Fence to test.
+ *
+ * A UFO-backed fence is a fence that can be signaled or waited upon FW-side.
+ * pvr_job::done_fence objects are backed by the timeline UFO attached to the queue
+ * they are pushed to, but those fences are not directly exposed to the outside
+ * world, so we also need to check if the fence we're being passed is a
+ * drm_sched_fence that was coming from our driver.
+ */
+bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f)
+{
+ struct drm_sched_fence *sched_fence = f ? to_drm_sched_fence(f) : NULL;
+
+ if (sched_fence &&
+ sched_fence->sched->ops == &pvr_queue_sched_ops)
+ return true;
+
+ if (f && f->ops == &pvr_queue_job_fence_ops)
+ return true;
+
+ return false;
+}
+
+/**
+ * pvr_queue_signal_done_fences() - Signal done fences.
+ * @queue: Queue to check.
+ *
+ * Signal done fences of jobs whose seqno is less than the current value of
+ * the UFO object attached to the queue.
+ */
+static void
+pvr_queue_signal_done_fences(struct pvr_queue *queue)
+{
+ struct pvr_job *job, *tmp_job;
+ u32 cur_seqno;
+
+ spin_lock(&queue->scheduler.job_list_lock);
+ cur_seqno = *queue->timeline_ufo.value;
+ list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) {
+ if ((int)(cur_seqno - lower_32_bits(job->done_fence->seqno)) < 0)
+ break;
+
+ if (!dma_fence_is_signaled(job->done_fence)) {
+ dma_fence_signal(job->done_fence);
+ pvr_job_release_pm_ref(job);
+ atomic_dec(&queue->in_flight_job_count);
+ }
+ }
+ spin_unlock(&queue->scheduler.job_list_lock);
+}
+
+/**
+ * pvr_queue_check_job_waiting_for_cccb_space() - Check if the job waiting for CCCB space
+ * can be unblocked
+ * pushed to the CCCB
+ * @queue: Queue to check
+ *
+ * If we have a job waiting for CCCB, and this job now fits in the CCCB, we signal
+ * its CCCB fence, which should kick drm_sched.
+ */
+static void
+pvr_queue_check_job_waiting_for_cccb_space(struct pvr_queue *queue)
+{
+ struct pvr_queue_fence *cccb_fence;
+ u32 native_deps_remaining;
+ struct pvr_job *job;
+
+ mutex_lock(&queue->cccb_fence_ctx.job_lock);
+ job = queue->cccb_fence_ctx.job;
+ if (!job)
+ goto out_unlock;
+
+ /* If we have a job attached to the CCCB fence context, its CCCB fence
+ * shouldn't be NULL.
+ */
+ if (WARN_ON(!job->cccb_fence)) {
+ job = NULL;
+ goto out_unlock;
+ }
+
+ /* If we get there, CCCB fence has to be initialized. */
+ cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base);
+ if (WARN_ON(!cccb_fence->queue)) {
+ job = NULL;
+ goto out_unlock;
+ }
+
+ /* Evict signaled dependencies before checking for CCCB space.
+ * If the job fits, signal the CCCB fence, this should unblock
+ * the drm_sched_entity.
+ */
+ native_deps_remaining = job_count_remaining_native_deps(job);
+ if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) {
+ job = NULL;
+ goto out_unlock;
+ }
+
+ dma_fence_signal(job->cccb_fence);
+ pvr_queue_fence_put(job->cccb_fence);
+ job->cccb_fence = NULL;
+ queue->cccb_fence_ctx.job = NULL;
+
+out_unlock:
+ mutex_unlock(&queue->cccb_fence_ctx.job_lock);
+
+ pvr_job_put(job);
+}
+
+/**
+ * pvr_queue_process() - Process events that happened on a queue.
+ * @queue: Queue to check
+ *
+ * Signal job fences and check if jobs waiting for CCCB space can be unblocked.
+ */
+void pvr_queue_process(struct pvr_queue *queue)
+{
+ lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock);
+
+ pvr_queue_check_job_waiting_for_cccb_space(queue);
+ pvr_queue_signal_done_fences(queue);
+ pvr_queue_update_active_state_locked(queue);
+}
+
+static u32 get_dm_type(struct pvr_queue *queue)
+{
+ switch (queue->type) {
+ case DRM_PVR_JOB_TYPE_GEOMETRY:
+ return PVR_FWIF_DM_GEOM;
+ case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+ case DRM_PVR_JOB_TYPE_FRAGMENT:
+ return PVR_FWIF_DM_FRAG;
+ case DRM_PVR_JOB_TYPE_COMPUTE:
+ return PVR_FWIF_DM_CDM;
+ }
+
+ return ~0;
+}
+
+/**
+ * init_fw_context() - Initializes the queue part of a FW context.
+ * @queue: Queue object to initialize the FW context for.
+ * @fw_ctx_map: The FW context CPU mapping.
+ *
+ * FW contexts are containing various states, one of them being a per-queue state
+ * that needs to be initialized for each queue being exposed by a context. This
+ * function takes care of that.
+ */
+static void init_fw_context(struct pvr_queue *queue, void *fw_ctx_map)
+{
+ struct pvr_context *ctx = queue->ctx;
+ struct pvr_fw_object *fw_mem_ctx_obj = pvr_vm_get_fw_mem_context(ctx->vm_ctx);
+ struct rogue_fwif_fwcommoncontext *cctx_fw;
+ struct pvr_cccb *cccb = &queue->cccb;
+
+ cctx_fw = fw_ctx_map + queue->ctx_offset;
+ cctx_fw->ccbctl_fw_addr = cccb->ctrl_fw_addr;
+ cctx_fw->ccb_fw_addr = cccb->cccb_fw_addr;
+
+ cctx_fw->dm = get_dm_type(queue);
+ cctx_fw->priority = ctx->priority;
+ cctx_fw->priority_seq_num = 0;
+ cctx_fw->max_deadline_ms = MAX_DEADLINE_MS;
+ cctx_fw->pid = task_tgid_nr(current);
+ cctx_fw->server_common_context_id = ctx->ctx_id;
+
+ pvr_fw_object_get_fw_addr(fw_mem_ctx_obj, &cctx_fw->fw_mem_context_fw_addr);
+
+ pvr_fw_object_get_fw_addr(queue->reg_state_obj, &cctx_fw->context_state_addr);
+}
+
+/**
+ * pvr_queue_cleanup_fw_context() - Wait for the FW context to be idle and clean it up.
+ * @queue: Queue on FW context to clean up.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error returned by pvr_fw_structure_cleanup() otherwise.
+ */
+static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
+{
+ if (!queue->ctx->fw_obj)
+ return 0;
+
+ return pvr_fw_structure_cleanup(queue->ctx->pvr_dev,
+ ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT,
+ queue->ctx->fw_obj, queue->ctx_offset);
+}
+
+/**
+ * pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
+ * @job: The job to initialize.
+ *
+ * Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
+ * and pvr_queue_job_push() can't fail. We also make sure the context type is
+ * valid and the job can fit in the CCCB.
+ *
+ * Return:
+ * * 0 on success, or
+ * * An error code if something failed.
+ */
+int pvr_queue_job_init(struct pvr_job *job)
+{
+ /* Fragment jobs need at least one native fence wait on the geometry job fence. */
+ u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0;
+ struct pvr_queue *queue;
+ int err;
+
+ if (atomic_read(&job->ctx->faulty))
+ return -EIO;
+
+ queue = pvr_context_get_queue_for_job(job->ctx, job->type);
+ if (!queue)
+ return -EINVAL;
+
+ if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count)))
+ return -E2BIG;
+
+ err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE);
+ if (err)
+ return err;
+
+ job->cccb_fence = pvr_queue_fence_alloc();
+ job->kccb_fence = pvr_kccb_fence_alloc();
+ job->done_fence = pvr_queue_fence_alloc();
+ if (!job->cccb_fence || !job->kccb_fence || !job->done_fence)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * pvr_queue_job_arm() - Arm a job object.
+ * @job: The job to arm.
+ *
+ * Initializes fences and return the drm_sched finished fence so it can
+ * be exposed to the outside world. Once this function is called, you should
+ * make sure the job is pushed using pvr_queue_job_push(), or guarantee that
+ * no one grabbed a reference to the returned fence. The latter can happen if
+ * we do multi-job submission, and something failed when creating/initializing
+ * a job. In that case, we know the fence didn't leave the driver, and we
+ * can thus guarantee nobody will wait on an dead fence object.
+ *
+ * Return:
+ * * A dma_fence object.
+ */
+struct dma_fence *pvr_queue_job_arm(struct pvr_job *job)
+{
+ drm_sched_job_arm(&job->base);
+
+ return &job->base.s_fence->finished;
+}
+
+/**
+ * pvr_queue_job_cleanup() - Cleanup fence/scheduler related fields in the job object.
+ * @job: The job to cleanup.
+ *
+ * Should be called in the job release path.
+ */
+void pvr_queue_job_cleanup(struct pvr_job *job)
+{
+ pvr_queue_fence_put(job->done_fence);
+ pvr_queue_fence_put(job->cccb_fence);
+ pvr_kccb_fence_put(job->kccb_fence);
+
+ if (job->base.s_fence)
+ drm_sched_job_cleanup(&job->base);
+}
+
+/**
+ * pvr_queue_job_push() - Push a job to its queue.
+ * @job: The job to push.
+ *
+ * Must be called after pvr_queue_job_init() and after all dependencies
+ * have been added to the job. This will effectively queue the job to
+ * the drm_sched_entity attached to the queue. We grab a reference on
+ * the job object, so the caller is free to drop its reference when it's
+ * done accessing the job object.
+ */
+void pvr_queue_job_push(struct pvr_job *job)
+{
+ struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler);
+
+ /* Keep track of the last queued job scheduled fence for combined submit. */
+ dma_fence_put(queue->last_queued_job_scheduled_fence);
+ queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled);
+
+ pvr_job_get(job);
+ drm_sched_entity_push_job(&job->base);
+}
+
+static void reg_state_init(void *cpu_ptr, void *priv)
+{
+ struct pvr_queue *queue = priv;
+
+ if (queue->type == DRM_PVR_JOB_TYPE_GEOMETRY) {
+ struct rogue_fwif_geom_ctx_state *geom_ctx_state_fw = cpu_ptr;
+
+ geom_ctx_state_fw->geom_core[0].geom_reg_vdm_call_stack_pointer_init =
+ queue->callstack_addr;
+ }
+}
+
+/**
+ * pvr_queue_create() - Create a queue object.
+ * @ctx: The context this queue will be attached to.
+ * @type: The type of jobs being pushed to this queue.
+ * @args: The arguments passed to the context creation function.
+ * @fw_ctx_map: CPU mapping of the FW context object.
+ *
+ * Create a queue object that will be used to queue and track jobs.
+ *
+ * Return:
+ * * A valid pointer to a pvr_queue object, or
+ * * An error pointer if the creation/initialization failed.
+ */
+struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
+ enum drm_pvr_job_type type,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map)
+{
+ static const struct {
+ u32 cccb_size;
+ const char *name;
+ } props[] = {
+ [DRM_PVR_JOB_TYPE_GEOMETRY] = {
+ .cccb_size = CTX_GEOM_CCCB_SIZE_LOG2,
+ .name = "geometry",
+ },
+ [DRM_PVR_JOB_TYPE_FRAGMENT] = {
+ .cccb_size = CTX_FRAG_CCCB_SIZE_LOG2,
+ .name = "fragment"
+ },
+ [DRM_PVR_JOB_TYPE_COMPUTE] = {
+ .cccb_size = CTX_COMPUTE_CCCB_SIZE_LOG2,
+ .name = "compute"
+ },
+ [DRM_PVR_JOB_TYPE_TRANSFER_FRAG] = {
+ .cccb_size = CTX_TRANSFER_CCCB_SIZE_LOG2,
+ .name = "transfer_frag"
+ },
+ };
+ struct pvr_device *pvr_dev = ctx->pvr_dev;
+ struct drm_gpu_scheduler *sched;
+ struct pvr_queue *queue;
+ int ctx_state_size, err;
+ void *cpu_map;
+
+ if (WARN_ON(type >= sizeof(props)))
+ return ERR_PTR(-EINVAL);
+
+ switch (ctx->type) {
+ case DRM_PVR_CTX_TYPE_RENDER:
+ if (type != DRM_PVR_JOB_TYPE_GEOMETRY &&
+ type != DRM_PVR_JOB_TYPE_FRAGMENT)
+ return ERR_PTR(-EINVAL);
+ break;
+ case DRM_PVR_CTX_TYPE_COMPUTE:
+ if (type != DRM_PVR_JOB_TYPE_COMPUTE)
+ return ERR_PTR(-EINVAL);
+ break;
+ case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+ if (type != DRM_PVR_JOB_TYPE_TRANSFER_FRAG)
+ return ERR_PTR(-EINVAL);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctx_state_size = get_ctx_state_size(pvr_dev, type);
+ if (ctx_state_size < 0)
+ return ERR_PTR(ctx_state_size);
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return ERR_PTR(-ENOMEM);
+
+ queue->type = type;
+ queue->ctx_offset = get_ctx_offset(type);
+ queue->ctx = ctx;
+ queue->callstack_addr = args->callstack_addr;
+ sched = &queue->scheduler;
+ INIT_LIST_HEAD(&queue->node);
+ mutex_init(&queue->cccb_fence_ctx.job_lock);
+ pvr_queue_fence_ctx_init(&queue->cccb_fence_ctx.base);
+ pvr_queue_fence_ctx_init(&queue->job_fence_ctx);
+
+ err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name);
+ if (err)
+ goto err_free_queue;
+
+ err = pvr_fw_object_create(pvr_dev, ctx_state_size,
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ reg_state_init, queue, &queue->reg_state_obj);
+ if (err)
+ goto err_cccb_fini;
+
+ init_fw_context(queue, fw_ctx_map);
+
+ if (type != DRM_PVR_JOB_TYPE_GEOMETRY && type != DRM_PVR_JOB_TYPE_FRAGMENT &&
+ args->callstack_addr) {
+ err = -EINVAL;
+ goto err_release_reg_state;
+ }
+
+ cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ NULL, NULL, &queue->timeline_ufo.fw_obj);
+ if (IS_ERR(cpu_map)) {
+ err = PTR_ERR(cpu_map);
+ goto err_release_reg_state;
+ }
+
+ queue->timeline_ufo.value = cpu_map;
+
+ err = drm_sched_init(&queue->scheduler,
+ &pvr_queue_sched_ops,
+ pvr_dev->sched_wq, 1, 64 * 1024, 1,
+ msecs_to_jiffies(500),
+ pvr_dev->sched_wq, NULL, "pvr-queue",
+ pvr_dev->base.dev);
+ if (err)
+ goto err_release_ufo;
+
+ err = drm_sched_entity_init(&queue->entity,
+ DRM_SCHED_PRIORITY_KERNEL,
+ &sched, 1, &ctx->faulty);
+ if (err)
+ goto err_sched_fini;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ list_add_tail(&queue->node, &pvr_dev->queues.idle);
+ mutex_unlock(&pvr_dev->queues.lock);
+
+ return queue;
+
+err_sched_fini:
+ drm_sched_fini(&queue->scheduler);
+
+err_release_ufo:
+ pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj);
+
+err_release_reg_state:
+ pvr_fw_object_destroy(queue->reg_state_obj);
+
+err_cccb_fini:
+ pvr_cccb_fini(&queue->cccb);
+
+err_free_queue:
+ mutex_destroy(&queue->cccb_fence_ctx.job_lock);
+ kfree(queue);
+
+ return ERR_PTR(err);
+}
+
+void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev)
+{
+ struct pvr_queue *queue;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ list_for_each_entry(queue, &pvr_dev->queues.idle, node)
+ pvr_queue_stop(queue, NULL);
+ list_for_each_entry(queue, &pvr_dev->queues.active, node)
+ pvr_queue_stop(queue, NULL);
+ mutex_unlock(&pvr_dev->queues.lock);
+}
+
+void pvr_queue_device_post_reset(struct pvr_device *pvr_dev)
+{
+ struct pvr_queue *queue;
+
+ mutex_lock(&pvr_dev->queues.lock);
+ list_for_each_entry(queue, &pvr_dev->queues.active, node)
+ pvr_queue_start(queue);
+ list_for_each_entry(queue, &pvr_dev->queues.idle, node)
+ pvr_queue_start(queue);
+ mutex_unlock(&pvr_dev->queues.lock);
+}
+
+/**
+ * pvr_queue_kill() - Kill a queue.
+ * @queue: The queue to kill.
+ *
+ * Kill the queue so no new jobs can be pushed. Should be called when the
+ * context handle is destroyed. The queue object might last longer if jobs
+ * are still in flight and holding a reference to the context this queue
+ * belongs to.
+ */
+void pvr_queue_kill(struct pvr_queue *queue)
+{
+ drm_sched_entity_destroy(&queue->entity);
+ dma_fence_put(queue->last_queued_job_scheduled_fence);
+ queue->last_queued_job_scheduled_fence = NULL;
+}
+
+/**
+ * pvr_queue_destroy() - Destroy a queue.
+ * @queue: The queue to destroy.
+ *
+ * Cleanup the queue and free the resources attached to it. Should be
+ * called from the context release function.
+ */
+void pvr_queue_destroy(struct pvr_queue *queue)
+{
+ if (!queue)
+ return;
+
+ mutex_lock(&queue->ctx->pvr_dev->queues.lock);
+ list_del_init(&queue->node);
+ mutex_unlock(&queue->ctx->pvr_dev->queues.lock);
+
+ drm_sched_fini(&queue->scheduler);
+ drm_sched_entity_fini(&queue->entity);
+
+ if (WARN_ON(queue->last_queued_job_scheduled_fence))
+ dma_fence_put(queue->last_queued_job_scheduled_fence);
+
+ pvr_queue_cleanup_fw_context(queue);
+
+ pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj);
+ pvr_fw_object_destroy(queue->reg_state_obj);
+ pvr_cccb_fini(&queue->cccb);
+ mutex_destroy(&queue->cccb_fence_ctx.job_lock);
+ kfree(queue);
+}
+
+/**
+ * pvr_queue_device_init() - Device-level initialization of queue related fields.
+ * @pvr_dev: The device to initialize.
+ *
+ * Initializes all fields related to queue management in pvr_device.
+ *
+ * Return:
+ * * 0 on success, or
+ * * An error code on failure.
+ */
+int pvr_queue_device_init(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ INIT_LIST_HEAD(&pvr_dev->queues.active);
+ INIT_LIST_HEAD(&pvr_dev->queues.idle);
+ err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_dev->queues.lock);
+ if (err)
+ return err;
+
+ pvr_dev->sched_wq = alloc_workqueue("powervr-sched", WQ_UNBOUND, 0);
+ if (!pvr_dev->sched_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * pvr_queue_device_fini() - Device-level cleanup of queue related fields.
+ * @pvr_dev: The device to cleanup.
+ *
+ * Cleanup/free all queue-related resources attached to a pvr_device object.
+ */
+void pvr_queue_device_fini(struct pvr_device *pvr_dev)
+{
+ destroy_workqueue(pvr_dev->sched_wq);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
new file mode 100644
index 000000000..e06ced693
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_QUEUE_H
+#define PVR_QUEUE_H
+
+#include <drm/gpu_scheduler.h>
+
+#include "pvr_cccb.h"
+#include "pvr_device.h"
+
+struct pvr_context;
+struct pvr_queue;
+
+/**
+ * struct pvr_queue_fence_ctx - Queue fence context
+ *
+ * Used to implement dma_fence_ops for pvr_job::{done,cccb}_fence.
+ */
+struct pvr_queue_fence_ctx {
+ /** @id: Fence context ID allocated with dma_fence_context_alloc(). */
+ u64 id;
+
+ /** @seqno: Sequence number incremented each time a fence is created. */
+ atomic_t seqno;
+
+ /** @lock: Lock used to synchronize access to fences allocated by this context. */
+ spinlock_t lock;
+};
+
+/**
+ * struct pvr_queue_cccb_fence_ctx - CCCB fence context
+ *
+ * Context used to manage fences controlling access to the CCCB. No fences are
+ * issued if there's enough space in the CCCB to push job commands.
+ */
+struct pvr_queue_cccb_fence_ctx {
+ /** @base: Base queue fence context. */
+ struct pvr_queue_fence_ctx base;
+
+ /**
+ * @job: Job waiting for CCCB space.
+ *
+ * Thanks to the serializationg done at the drm_sched_entity level,
+ * there's no more than one job waiting for CCCB at a given time.
+ *
+ * This field is NULL if no jobs are currently waiting for CCCB space.
+ *
+ * Must be accessed with @job_lock held.
+ */
+ struct pvr_job *job;
+
+ /** @job_lock: Lock protecting access to the job object. */
+ struct mutex job_lock;
+};
+
+/**
+ * struct pvr_queue_fence - Queue fence object
+ */
+struct pvr_queue_fence {
+ /** @base: Base dma_fence. */
+ struct dma_fence base;
+
+ /** @queue: Queue that created this fence. */
+ struct pvr_queue *queue;
+};
+
+/**
+ * struct pvr_queue - Job queue
+ *
+ * Used to queue and track execution of pvr_job objects.
+ */
+struct pvr_queue {
+ /** @scheduler: Single entity scheduler use to push jobs to this queue. */
+ struct drm_gpu_scheduler scheduler;
+
+ /** @entity: Scheduling entity backing this queue. */
+ struct drm_sched_entity entity;
+
+ /** @type: Type of jobs queued to this queue. */
+ enum drm_pvr_job_type type;
+
+ /** @ctx: Context object this queue is bound to. */
+ struct pvr_context *ctx;
+
+ /** @node: Used to add the queue to the active/idle queue list. */
+ struct list_head node;
+
+ /**
+ * @in_flight_job_count: Number of jobs submitted to the CCCB that
+ * have not been processed yet.
+ */
+ atomic_t in_flight_job_count;
+
+ /**
+ * @cccb_fence_ctx: CCCB fence context.
+ *
+ * Used to control access to the CCCB is full, such that we don't
+ * end up trying to push commands to the CCCB if there's not enough
+ * space to receive all commands needed for a job to complete.
+ */
+ struct pvr_queue_cccb_fence_ctx cccb_fence_ctx;
+
+ /** @job_fence_ctx: Job fence context object. */
+ struct pvr_queue_fence_ctx job_fence_ctx;
+
+ /** @timeline_ufo: Timeline UFO for the context queue. */
+ struct {
+ /** @fw_obj: FW object representing the UFO value. */
+ struct pvr_fw_object *fw_obj;
+
+ /** @value: CPU mapping of the UFO value. */
+ u32 *value;
+ } timeline_ufo;
+
+ /**
+ * @last_queued_job_scheduled_fence: The scheduled fence of the last
+ * job queued to this queue.
+ *
+ * We use it to insert frag -> geom dependencies when issuing combined
+ * geom+frag jobs, to guarantee that the fragment job that's part of
+ * the combined operation comes after all fragment jobs that were queued
+ * before it.
+ */
+ struct dma_fence *last_queued_job_scheduled_fence;
+
+ /** @cccb: Client Circular Command Buffer. */
+ struct pvr_cccb cccb;
+
+ /** @reg_state_obj: FW object representing the register state of this queue. */
+ struct pvr_fw_object *reg_state_obj;
+
+ /** @ctx_offset: Offset of the queue context in the FW context object. */
+ u32 ctx_offset;
+
+ /** @callstack_addr: Initial call stack address for register state object. */
+ u64 callstack_addr;
+};
+
+bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f);
+
+int pvr_queue_job_init(struct pvr_job *job);
+
+void pvr_queue_job_cleanup(struct pvr_job *job);
+
+void pvr_queue_job_push(struct pvr_job *job);
+
+struct dma_fence *pvr_queue_job_arm(struct pvr_job *job);
+
+struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
+ enum drm_pvr_job_type type,
+ struct drm_pvr_ioctl_create_context_args *args,
+ void *fw_ctx_map);
+
+void pvr_queue_kill(struct pvr_queue *queue);
+
+void pvr_queue_destroy(struct pvr_queue *queue);
+
+void pvr_queue_process(struct pvr_queue *queue);
+
+void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev);
+
+void pvr_queue_device_post_reset(struct pvr_device *pvr_dev);
+
+int pvr_queue_device_init(struct pvr_device *pvr_dev);
+
+void pvr_queue_device_fini(struct pvr_device *pvr_dev);
+
+#endif /* PVR_QUEUE_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
new file mode 100644
index 000000000..2a90d0279
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
@@ -0,0 +1,6193 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+/* *** Autogenerated C -- do not edit *** */
+
+#ifndef PVR_ROGUE_CR_DEFS_H
+#define PVR_ROGUE_CR_DEFS_H
+
+/* clang-format off */
+
+#define ROGUE_CR_DEFS_REVISION 1
+
+/* Register ROGUE_CR_RASTERISATION_INDIRECT */
+#define ROGUE_CR_RASTERISATION_INDIRECT 0x8238U
+#define ROGUE_CR_RASTERISATION_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_PBE_INDIRECT */
+#define ROGUE_CR_PBE_INDIRECT 0x83E0U
+#define ROGUE_CR_PBE_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_PBE_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_PBE_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_PBE_PERF_INDIRECT */
+#define ROGUE_CR_PBE_PERF_INDIRECT 0x83D8U
+#define ROGUE_CR_PBE_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_TPU_PERF_INDIRECT */
+#define ROGUE_CR_TPU_PERF_INDIRECT 0x83F0U
+#define ROGUE_CR_TPU_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_RASTERISATION_PERF_INDIRECT */
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT 0x8318U
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT */
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT 0x8028U
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_USC_PERF_INDIRECT */
+#define ROGUE_CR_USC_PERF_INDIRECT 0x8030U
+#define ROGUE_CR_USC_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_BLACKPEARL_INDIRECT */
+#define ROGUE_CR_BLACKPEARL_INDIRECT 0x8388U
+#define ROGUE_CR_BLACKPEARL_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_INDIRECT */
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT 0x83F8U
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_TEXAS3_PERF_INDIRECT */
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT 0x83D0U
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_TEXAS_PERF_INDIRECT */
+#define ROGUE_CR_TEXAS_PERF_INDIRECT 0x8288U
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_BX_TU_PERF_INDIRECT */
+#define ROGUE_CR_BX_TU_PERF_INDIRECT 0xC900U
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_CLK_CTRL */
+#define ROGUE_CR_CLK_CTRL 0x0000U
+#define ROGUE_CR_CLK_CTRL__PBE2_XE__MASKFULL 0xFFFFFF003F3FFFFFULL
+#define ROGUE_CR_CLK_CTRL__S7_TOP__MASKFULL 0xCFCF03000F3F3F0FULL
+#define ROGUE_CR_CLK_CTRL_MASKFULL 0xFFFFFF003F3FFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_SHIFT 62U
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_CLRMSK 0x3FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_ON 0x4000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_AUTO 0x8000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_SHIFT 60U
+#define ROGUE_CR_CLK_CTRL_IPP_CLRMSK 0xCFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_IPP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_ON 0x1000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_AUTO 0x2000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_SHIFT 58U
+#define ROGUE_CR_CLK_CTRL_FBC_CLRMSK 0xF3FFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FBC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_ON 0x0400000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_AUTO 0x0800000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_SHIFT 56U
+#define ROGUE_CR_CLK_CTRL_FBDC_CLRMSK 0xFCFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FBDC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_ON 0x0100000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_AUTO 0x0200000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_SHIFT 54U
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_CLRMSK 0xFF3FFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_ON 0x0040000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_AUTO 0x0080000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_SHIFT 52U
+#define ROGUE_CR_CLK_CTRL_USCS_CLRMSK 0xFFCFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_USCS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_ON 0x0010000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_AUTO 0x0020000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_SHIFT 50U
+#define ROGUE_CR_CLK_CTRL_PBE_CLRMSK 0xFFF3FFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_PBE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_ON 0x0004000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_AUTO 0x0008000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_SHIFT 48U
+#define ROGUE_CR_CLK_CTRL_MCU_L1_CLRMSK 0xFFFCFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_ON 0x0001000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_AUTO 0x0002000000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_SHIFT 46U
+#define ROGUE_CR_CLK_CTRL_CDM_CLRMSK 0xFFFF3FFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_CDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_ON 0x0000400000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_AUTO 0x0000800000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_SHIFT 44U
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_CLRMSK 0xFFFFCFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_ON 0x0000100000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_AUTO 0x0000200000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT 42U
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK 0xFFFFF3FFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_ON 0x0000040000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_AUTO 0x0000080000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SHIFT 40U
+#define ROGUE_CR_CLK_CTRL_BIF_CLRMSK 0xFFFFFCFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_ON 0x0000010000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_AUTO 0x0000020000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT 28U
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFCFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_ON 0x0000000010000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO 0x0000000020000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_SHIFT 26U
+#define ROGUE_CR_CLK_CTRL_MCU_L0_CLRMSK 0xFFFFFFFFF3FFFFFFULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_ON 0x0000000004000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_AUTO 0x0000000008000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_SHIFT 24U
+#define ROGUE_CR_CLK_CTRL_TPU_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_TPU_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_ON 0x0000000001000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_AUTO 0x0000000002000000ULL
+#define ROGUE_CR_CLK_CTRL_USC_SHIFT 20U
+#define ROGUE_CR_CLK_CTRL_USC_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_CLK_CTRL_USC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USC_ON 0x0000000000100000ULL
+#define ROGUE_CR_CLK_CTRL_USC_AUTO 0x0000000000200000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_SHIFT 18U
+#define ROGUE_CR_CLK_CTRL_TLA_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_CLK_CTRL_TLA_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_ON 0x0000000000040000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_AUTO 0x0000000000080000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_SHIFT 16U
+#define ROGUE_CR_CLK_CTRL_SLC_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_CLK_CTRL_SLC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_ON 0x0000000000010000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_AUTO 0x0000000000020000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_SHIFT 14U
+#define ROGUE_CR_CLK_CTRL_UVS_CLRMSK 0xFFFFFFFFFFFF3FFFULL
+#define ROGUE_CR_CLK_CTRL_UVS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_ON 0x0000000000004000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_AUTO 0x0000000000008000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_SHIFT 12U
+#define ROGUE_CR_CLK_CTRL_PDS_CLRMSK 0xFFFFFFFFFFFFCFFFULL
+#define ROGUE_CR_CLK_CTRL_PDS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_ON 0x0000000000001000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_AUTO 0x0000000000002000ULL
+#define ROGUE_CR_CLK_CTRL_VDM_SHIFT 10U
+#define ROGUE_CR_CLK_CTRL_VDM_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_CLK_CTRL_VDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_VDM_ON 0x0000000000000400ULL
+#define ROGUE_CR_CLK_CTRL_VDM_AUTO 0x0000000000000800ULL
+#define ROGUE_CR_CLK_CTRL_PM_SHIFT 8U
+#define ROGUE_CR_CLK_CTRL_PM_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_CLK_CTRL_PM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PM_ON 0x0000000000000100ULL
+#define ROGUE_CR_CLK_CTRL_PM_AUTO 0x0000000000000200ULL
+#define ROGUE_CR_CLK_CTRL_GPP_SHIFT 6U
+#define ROGUE_CR_CLK_CTRL_GPP_CLRMSK 0xFFFFFFFFFFFFFF3FULL
+#define ROGUE_CR_CLK_CTRL_GPP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_GPP_ON 0x0000000000000040ULL
+#define ROGUE_CR_CLK_CTRL_GPP_AUTO 0x0000000000000080ULL
+#define ROGUE_CR_CLK_CTRL_TE_SHIFT 4U
+#define ROGUE_CR_CLK_CTRL_TE_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_CLK_CTRL_TE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TE_ON 0x0000000000000010ULL
+#define ROGUE_CR_CLK_CTRL_TE_AUTO 0x0000000000000020ULL
+#define ROGUE_CR_CLK_CTRL_TSP_SHIFT 2U
+#define ROGUE_CR_CLK_CTRL_TSP_CLRMSK 0xFFFFFFFFFFFFFFF3ULL
+#define ROGUE_CR_CLK_CTRL_TSP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TSP_ON 0x0000000000000004ULL
+#define ROGUE_CR_CLK_CTRL_TSP_AUTO 0x0000000000000008ULL
+#define ROGUE_CR_CLK_CTRL_ISP_SHIFT 0U
+#define ROGUE_CR_CLK_CTRL_ISP_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+#define ROGUE_CR_CLK_CTRL_ISP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_ISP_ON 0x0000000000000001ULL
+#define ROGUE_CR_CLK_CTRL_ISP_AUTO 0x0000000000000002ULL
+
+/* Register ROGUE_CR_CLK_STATUS */
+#define ROGUE_CR_CLK_STATUS 0x0008U
+#define ROGUE_CR_CLK_STATUS__PBE2_XE__MASKFULL 0x00000001FFF077FFULL
+#define ROGUE_CR_CLK_STATUS__S7_TOP__MASKFULL 0x00000001B3101773ULL
+#define ROGUE_CR_CLK_STATUS_MASKFULL 0x00000001FFF077FFULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_SHIFT 32U
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_RUNNING 0x0000000100000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_SHIFT 31U
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_RUNNING 0x0000000080000000ULL
+#define ROGUE_CR_CLK_STATUS_IPP_SHIFT 30U
+#define ROGUE_CR_CLK_STATUS_IPP_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_IPP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_IPP_RUNNING 0x0000000040000000ULL
+#define ROGUE_CR_CLK_STATUS_FBC_SHIFT 29U
+#define ROGUE_CR_CLK_STATUS_FBC_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FBC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FBC_RUNNING 0x0000000020000000ULL
+#define ROGUE_CR_CLK_STATUS_FBDC_SHIFT 28U
+#define ROGUE_CR_CLK_STATUS_FBDC_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FBDC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FBDC_RUNNING 0x0000000010000000ULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_SHIFT 27U
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_RUNNING 0x0000000008000000ULL
+#define ROGUE_CR_CLK_STATUS_USCS_SHIFT 26U
+#define ROGUE_CR_CLK_STATUS_USCS_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_USCS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_USCS_RUNNING 0x0000000004000000ULL
+#define ROGUE_CR_CLK_STATUS_PBE_SHIFT 25U
+#define ROGUE_CR_CLK_STATUS_PBE_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_PBE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PBE_RUNNING 0x0000000002000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_SHIFT 24U
+#define ROGUE_CR_CLK_STATUS_MCU_L1_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_RUNNING 0x0000000001000000ULL
+#define ROGUE_CR_CLK_STATUS_CDM_SHIFT 23U
+#define ROGUE_CR_CLK_STATUS_CDM_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_CLK_STATUS_CDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_CDM_RUNNING 0x0000000000800000ULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_SHIFT 22U
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_RUNNING 0x0000000000400000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT 21U
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING 0x0000000000200000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SHIFT 20U
+#define ROGUE_CR_CLK_STATUS_BIF_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_RUNNING 0x0000000000100000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT 14U
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING 0x0000000000004000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_SHIFT 13U
+#define ROGUE_CR_CLK_STATUS_MCU_L0_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_RUNNING 0x0000000000002000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_SHIFT 12U
+#define ROGUE_CR_CLK_STATUS_TPU_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_CLK_STATUS_TPU_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_RUNNING 0x0000000000001000ULL
+#define ROGUE_CR_CLK_STATUS_USC_SHIFT 10U
+#define ROGUE_CR_CLK_STATUS_USC_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_CLK_STATUS_USC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_USC_RUNNING 0x0000000000000400ULL
+#define ROGUE_CR_CLK_STATUS_TLA_SHIFT 9U
+#define ROGUE_CR_CLK_STATUS_TLA_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_CLK_STATUS_TLA_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TLA_RUNNING 0x0000000000000200ULL
+#define ROGUE_CR_CLK_STATUS_SLC_SHIFT 8U
+#define ROGUE_CR_CLK_STATUS_SLC_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_CLK_STATUS_SLC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_SLC_RUNNING 0x0000000000000100ULL
+#define ROGUE_CR_CLK_STATUS_UVS_SHIFT 7U
+#define ROGUE_CR_CLK_STATUS_UVS_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_CLK_STATUS_UVS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_UVS_RUNNING 0x0000000000000080ULL
+#define ROGUE_CR_CLK_STATUS_PDS_SHIFT 6U
+#define ROGUE_CR_CLK_STATUS_PDS_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_CLK_STATUS_PDS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PDS_RUNNING 0x0000000000000040ULL
+#define ROGUE_CR_CLK_STATUS_VDM_SHIFT 5U
+#define ROGUE_CR_CLK_STATUS_VDM_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_CLK_STATUS_VDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_VDM_RUNNING 0x0000000000000020ULL
+#define ROGUE_CR_CLK_STATUS_PM_SHIFT 4U
+#define ROGUE_CR_CLK_STATUS_PM_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_STATUS_PM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PM_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_STATUS_GPP_SHIFT 3U
+#define ROGUE_CR_CLK_STATUS_GPP_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_CLK_STATUS_GPP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_GPP_RUNNING 0x0000000000000008ULL
+#define ROGUE_CR_CLK_STATUS_TE_SHIFT 2U
+#define ROGUE_CR_CLK_STATUS_TE_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_STATUS_TE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TE_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_STATUS_TSP_SHIFT 1U
+#define ROGUE_CR_CLK_STATUS_TSP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_CLK_STATUS_TSP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TSP_RUNNING 0x0000000000000002ULL
+#define ROGUE_CR_CLK_STATUS_ISP_SHIFT 0U
+#define ROGUE_CR_CLK_STATUS_ISP_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_STATUS_ISP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_ISP_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_CORE_ID */
+#define ROGUE_CR_CORE_ID__PBVNC 0x0020U
+#define ROGUE_CR_CORE_ID__PBVNC__MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT 48U
+#define ROGUE_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT 32U
+#define ROGUE_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT 16U
+#define ROGUE_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT 0U
+#define ROGUE_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_CORE_ID */
+#define ROGUE_CR_CORE_ID 0x0018U
+#define ROGUE_CR_CORE_ID_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CORE_ID_ID_SHIFT 16U
+#define ROGUE_CR_CORE_ID_ID_CLRMSK 0x0000FFFFU
+#define ROGUE_CR_CORE_ID_CONFIG_SHIFT 0U
+#define ROGUE_CR_CORE_ID_CONFIG_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_CORE_REVISION */
+#define ROGUE_CR_CORE_REVISION 0x0020U
+#define ROGUE_CR_CORE_REVISION_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CORE_REVISION_DESIGNER_SHIFT 24U
+#define ROGUE_CR_CORE_REVISION_DESIGNER_CLRMSK 0x00FFFFFFU
+#define ROGUE_CR_CORE_REVISION_MAJOR_SHIFT 16U
+#define ROGUE_CR_CORE_REVISION_MAJOR_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CORE_REVISION_MINOR_SHIFT 8U
+#define ROGUE_CR_CORE_REVISION_MINOR_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CORE_REVISION_MAINTENANCE_SHIFT 0U
+#define ROGUE_CR_CORE_REVISION_MAINTENANCE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_DESIGNER_REV_FIELD1 */
+#define ROGUE_CR_DESIGNER_REV_FIELD1 0x0028U
+#define ROGUE_CR_DESIGNER_REV_FIELD1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0U
+#define ROGUE_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_DESIGNER_REV_FIELD2 */
+#define ROGUE_CR_DESIGNER_REV_FIELD2 0x0030U
+#define ROGUE_CR_DESIGNER_REV_FIELD2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0U
+#define ROGUE_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_CHANGESET_NUMBER */
+#define ROGUE_CR_CHANGESET_NUMBER 0x0040U
+#define ROGUE_CR_CHANGESET_NUMBER_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT 0U
+#define ROGUE_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_CLK_XTPLUS_CTRL */
+#define ROGUE_CR_CLK_XTPLUS_CTRL 0x0080U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_MASKFULL 0x0000003FFFFF0000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_SHIFT 36U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK 0xFFFFFFCFFFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_ON 0x0000001000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_AUTO 0x0000002000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT 34U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK 0xFFFFFFF3FFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_ON 0x0000000400000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_AUTO 0x0000000800000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_SHIFT 32U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK 0xFFFFFFFCFFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_ON 0x0000000100000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_AUTO 0x0000000200000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT 30U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK 0xFFFFFFFF3FFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_ON 0x0000000040000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO 0x0000000080000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT 28U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK 0xFFFFFFFFCFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_ON 0x0000000010000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO 0x0000000020000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT 26U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK 0xFFFFFFFFF3FFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_ON 0x0000000004000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO 0x0000000008000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT 24U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_ON 0x0000000001000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_AUTO 0x0000000002000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT 22U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK 0xFFFFFFFFFF3FFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON 0x0000000000400000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO 0x0000000000800000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT 20U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON 0x0000000000100000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO 0x0000000000200000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT 18U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON 0x0000000000040000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO 0x0000000000080000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT 16U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON 0x0000000000010000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO 0x0000000000020000ULL
+
+/* Register ROGUE_CR_CLK_XTPLUS_STATUS */
+#define ROGUE_CR_CLK_XTPLUS_STATUS 0x0088U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_MASKFULL 0x00000000000007FFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_SHIFT 10U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_RUNNING 0x0000000000000400ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_SHIFT 9U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_RUNNING 0x0000000000000200ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT 8U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING 0x0000000000000100ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT 7U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING 0x0000000000000080ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT 6U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING 0x0000000000000040ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT 5U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING 0x0000000000000020ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT 4U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT 3U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING 0x0000000000000008ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT 2U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT 1U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING 0x0000000000000002ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT 0U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SOFT_RESET */
+#define ROGUE_CR_SOFT_RESET 0x0100U
+#define ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL 0xFFEFFFFFFFFFFC3DULL
+#define ROGUE_CR_SOFT_RESET_MASKFULL 0x00E7FFFFFFFFFC3DULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT 63U
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_EN 0x8000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT 62U
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_EN 0x4000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_SHIFT 61U
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_EN 0x2000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_SHIFT 60U
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_EN 0x1000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_SHIFT 59U
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_CLRMSK 0xF7FFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_EN 0x0800000000000000ULL
+#define ROGUE_CR_SOFT_RESET_TE3_SHIFT 58U
+#define ROGUE_CR_SOFT_RESET_TE3_CLRMSK 0xFBFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TE3_EN 0x0400000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VCE_SHIFT 57U
+#define ROGUE_CR_SOFT_RESET_VCE_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VCE_EN 0x0200000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VBS_SHIFT 56U
+#define ROGUE_CR_SOFT_RESET_VBS_CLRMSK 0xFEFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VBS_EN 0x0100000000000000ULL
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_SHIFT 55U
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_CLRMSK 0xFF7FFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_EN 0x0080000000000000ULL
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_SHIFT 54U
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_CLRMSK 0xFFBFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_EN 0x0040000000000000ULL
+#define ROGUE_CR_SOFT_RESET_FBA_SHIFT 53U
+#define ROGUE_CR_SOFT_RESET_FBA_CLRMSK 0xFFDFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBA_EN 0x0020000000000000ULL
+#define ROGUE_CR_SOFT_RESET_FB_CDC_SHIFT 51U
+#define ROGUE_CR_SOFT_RESET_FB_CDC_CLRMSK 0xFFF7FFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FB_CDC_EN 0x0008000000000000ULL
+#define ROGUE_CR_SOFT_RESET_SH_SHIFT 50U
+#define ROGUE_CR_SOFT_RESET_SH_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_SH_EN 0x0004000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VRDM_SHIFT 49U
+#define ROGUE_CR_SOFT_RESET_VRDM_CLRMSK 0xFFFDFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VRDM_EN 0x0002000000000000ULL
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_SHIFT 48U
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_CLRMSK 0xFFFEFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_EN 0x0001000000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT 47U
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK 0xFFFF7FFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_EN 0x0000800000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT 46U
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK 0xFFFFBFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_EN 0x0000400000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_SHIFT 45U
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_EN 0x0000200000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_SHIFT 44U
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK 0xFFFFEFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_EN 0x0000100000000000ULL
+#define ROGUE_CR_SOFT_RESET_IPP_SHIFT 43U
+#define ROGUE_CR_SOFT_RESET_IPP_CLRMSK 0xFFFFF7FFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_IPP_EN 0x0000080000000000ULL
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_SHIFT 42U
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_EN 0x0000040000000000ULL
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_SHIFT 41U
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_CLRMSK 0xFFFFFDFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_EN 0x0000020000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_SHIFT 40U
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_EN 0x0000010000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_SHIFT 39U
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_CLRMSK 0xFFFFFF7FFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_EN 0x0000008000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_SHIFT 38U
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_EN 0x0000004000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_SHIFT 37U
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_EN 0x0000002000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_SHIFT 36U
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_EN 0x0000001000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_SHIFT 35U
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_EN 0x0000000800000000ULL
+#define ROGUE_CR_SOFT_RESET_MMU_SHIFT 34U
+#define ROGUE_CR_SOFT_RESET_MMU_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_MMU_EN 0x0000000400000000ULL
+#define ROGUE_CR_SOFT_RESET_BIF1_SHIFT 33U
+#define ROGUE_CR_SOFT_RESET_BIF1_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF1_EN 0x0000000200000000ULL
+#define ROGUE_CR_SOFT_RESET_GARTEN_SHIFT 32U
+#define ROGUE_CR_SOFT_RESET_GARTEN_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_GARTEN_EN 0x0000000100000000ULL
+#define ROGUE_CR_SOFT_RESET_CPU_SHIFT 32U
+#define ROGUE_CR_SOFT_RESET_CPU_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_CPU_EN 0x0000000100000000ULL
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_SHIFT 31U
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_EN 0x0000000080000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_SHIFT 30U
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_EN 0x0000000040000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_SHIFT 29U
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_EN 0x0000000020000000ULL
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_SHIFT 28U
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_EN 0x0000000010000000ULL
+#define ROGUE_CR_SOFT_RESET_SLC_SHIFT 27U
+#define ROGUE_CR_SOFT_RESET_SLC_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_SOFT_RESET_SLC_EN 0x0000000008000000ULL
+#define ROGUE_CR_SOFT_RESET_TLA_SHIFT 26U
+#define ROGUE_CR_SOFT_RESET_TLA_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TLA_EN 0x0000000004000000ULL
+#define ROGUE_CR_SOFT_RESET_UVS_SHIFT 25U
+#define ROGUE_CR_SOFT_RESET_UVS_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_UVS_EN 0x0000000002000000ULL
+#define ROGUE_CR_SOFT_RESET_TE_SHIFT 24U
+#define ROGUE_CR_SOFT_RESET_TE_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TE_EN 0x0000000001000000ULL
+#define ROGUE_CR_SOFT_RESET_GPP_SHIFT 23U
+#define ROGUE_CR_SOFT_RESET_GPP_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_SOFT_RESET_GPP_EN 0x0000000000800000ULL
+#define ROGUE_CR_SOFT_RESET_FBDC_SHIFT 22U
+#define ROGUE_CR_SOFT_RESET_FBDC_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBDC_EN 0x0000000000400000ULL
+#define ROGUE_CR_SOFT_RESET_FBC_SHIFT 21U
+#define ROGUE_CR_SOFT_RESET_FBC_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBC_EN 0x0000000000200000ULL
+#define ROGUE_CR_SOFT_RESET_PM_SHIFT 20U
+#define ROGUE_CR_SOFT_RESET_PM_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PM_EN 0x0000000000100000ULL
+#define ROGUE_CR_SOFT_RESET_PBE_SHIFT 19U
+#define ROGUE_CR_SOFT_RESET_PBE_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_SOFT_RESET_PBE_EN 0x0000000000080000ULL
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_SHIFT 18U
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_EN 0x0000000000040000ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L1_SHIFT 17U
+#define ROGUE_CR_SOFT_RESET_MCU_L1_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_SOFT_RESET_MCU_L1_EN 0x0000000000020000ULL
+#define ROGUE_CR_SOFT_RESET_BIF_SHIFT 16U
+#define ROGUE_CR_SOFT_RESET_BIF_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF_EN 0x0000000000010000ULL
+#define ROGUE_CR_SOFT_RESET_CDM_SHIFT 15U
+#define ROGUE_CR_SOFT_RESET_CDM_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SOFT_RESET_CDM_EN 0x0000000000008000ULL
+#define ROGUE_CR_SOFT_RESET_VDM_SHIFT 14U
+#define ROGUE_CR_SOFT_RESET_VDM_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_SOFT_RESET_VDM_EN 0x0000000000004000ULL
+#define ROGUE_CR_SOFT_RESET_TESS_SHIFT 13U
+#define ROGUE_CR_SOFT_RESET_TESS_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_SOFT_RESET_TESS_EN 0x0000000000002000ULL
+#define ROGUE_CR_SOFT_RESET_PDS_SHIFT 12U
+#define ROGUE_CR_SOFT_RESET_PDS_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_SOFT_RESET_PDS_EN 0x0000000000001000ULL
+#define ROGUE_CR_SOFT_RESET_ISP_SHIFT 11U
+#define ROGUE_CR_SOFT_RESET_ISP_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_SOFT_RESET_ISP_EN 0x0000000000000800ULL
+#define ROGUE_CR_SOFT_RESET_TSP_SHIFT 10U
+#define ROGUE_CR_SOFT_RESET_TSP_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_SOFT_RESET_TSP_EN 0x0000000000000400ULL
+#define ROGUE_CR_SOFT_RESET_SYSARB_SHIFT 5U
+#define ROGUE_CR_SOFT_RESET_SYSARB_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_SOFT_RESET_SYSARB_EN 0x0000000000000020ULL
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT 4U
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_EN 0x0000000000000010ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L0_SHIFT 3U
+#define ROGUE_CR_SOFT_RESET_MCU_L0_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L0_EN 0x0000000000000008ULL
+#define ROGUE_CR_SOFT_RESET_TPU_SHIFT 2U
+#define ROGUE_CR_SOFT_RESET_TPU_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SOFT_RESET_TPU_EN 0x0000000000000004ULL
+#define ROGUE_CR_SOFT_RESET_USC_SHIFT 0U
+#define ROGUE_CR_SOFT_RESET_USC_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SOFT_RESET_USC_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SOFT_RESET2 */
+#define ROGUE_CR_SOFT_RESET2 0x0108U
+#define ROGUE_CR_SOFT_RESET2_MASKFULL 0x00000000001FFFFFULL
+#define ROGUE_CR_SOFT_RESET2_SPFILTER_SHIFT 12U
+#define ROGUE_CR_SOFT_RESET2_SPFILTER_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_SOFT_RESET2_TDM_SHIFT 11U
+#define ROGUE_CR_SOFT_RESET2_TDM_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_SOFT_RESET2_TDM_EN 0x00000800U
+#define ROGUE_CR_SOFT_RESET2_ASTC_SHIFT 10U
+#define ROGUE_CR_SOFT_RESET2_ASTC_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_SOFT_RESET2_ASTC_EN 0x00000400U
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_SHIFT 9U
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_EN 0x00000200U
+#define ROGUE_CR_SOFT_RESET2_USCPS_SHIFT 8U
+#define ROGUE_CR_SOFT_RESET2_USCPS_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SOFT_RESET2_USCPS_EN 0x00000100U
+#define ROGUE_CR_SOFT_RESET2_IPF_SHIFT 7U
+#define ROGUE_CR_SOFT_RESET2_IPF_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SOFT_RESET2_IPF_EN 0x00000080U
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_SHIFT 6U
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_EN 0x00000040U
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_SHIFT 5U
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_EN 0x00000020U
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_SHIFT 4U
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_EN 0x00000010U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT 3U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_EN 0x00000008U
+#define ROGUE_CR_SOFT_RESET2_PIXEL_SHIFT 2U
+#define ROGUE_CR_SOFT_RESET2_PIXEL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SOFT_RESET2_PIXEL_EN 0x00000004U
+#define ROGUE_CR_SOFT_RESET2_CDM_SHIFT 1U
+#define ROGUE_CR_SOFT_RESET2_CDM_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SOFT_RESET2_CDM_EN 0x00000002U
+#define ROGUE_CR_SOFT_RESET2_VERTEX_SHIFT 0U
+#define ROGUE_CR_SOFT_RESET2_VERTEX_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SOFT_RESET2_VERTEX_EN 0x00000001U
+
+/* Register ROGUE_CR_EVENT_STATUS */
+#define ROGUE_CR_EVENT_STATUS 0x0130U
+#define ROGUE_CR_EVENT_STATUS__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL
+#define ROGUE_CR_EVENT_STATUS__SIGNALS__MASKFULL 0x00000000E007FFFFULL
+#define ROGUE_CR_EVENT_STATUS_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT 31U
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN 0x80000000U
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT 30U
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN 0x40000000U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT 29U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT 28U
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN 0x10000000U
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT 27U
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN 0x08000000U
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT 26U
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN 0x04000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT 25U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN 0x02000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT 24U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN 0x01000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT 23U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN 0x00800000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT 22U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN 0x00400000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT 21U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN 0x00200000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT 20U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN 0x00100000U
+#define ROGUE_CR_EVENT_STATUS_SAFETY_SHIFT 20U
+#define ROGUE_CR_EVENT_STATUS_SAFETY_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_STATUS_SAFETY_EN 0x00100000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT 19U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN 0x00080000U
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_SHIFT 19U
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_EN 0x00080000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_SHIFT 17U
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_EN 0x00020000U
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT 17U
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT 16U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN 0x00010000U
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_SHIFT 15U
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK 0xFFFF7FFFU
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_EN 0x00008000U
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT 14U
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_EN 0x00004000U
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_SHIFT 13U
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_EN 0x00002000U
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_SHIFT 12U
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_EN 0x00001000U
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_SHIFT 11U
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_EN 0x00000800U
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT 10U
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_EN 0x00000400U
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT 9U
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN 0x00000200U
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT 8U
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN 0x00000100U
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT 7U
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN 0x00000080U
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 6U
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_EN 0x00000040U
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_SHIFT 5U
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_EN 0x00000020U
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT 4U
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_EN 0x00000010U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 3U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN 0x00000008U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT 2U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_EN 0x00000004U
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT 1U
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_EN 0x00000002U
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT 0U
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_TIMER */
+#define ROGUE_CR_TIMER 0x0160U
+#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL
+#define ROGUE_CR_TIMER_BIT31_SHIFT 63U
+#define ROGUE_CR_TIMER_BIT31_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TIMER_BIT31_EN 0x8000000000000000ULL
+#define ROGUE_CR_TIMER_VALUE_SHIFT 0U
+#define ROGUE_CR_TIMER_VALUE_CLRMSK 0xFFFF000000000000ULL
+
+/* Register ROGUE_CR_TLA_STATUS */
+#define ROGUE_CR_TLA_STATUS 0x0178U
+#define ROGUE_CR_TLA_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TLA_STATUS_BLIT_COUNT_SHIFT 39U
+#define ROGUE_CR_TLA_STATUS_BLIT_COUNT_CLRMSK 0x0000007FFFFFFFFFULL
+#define ROGUE_CR_TLA_STATUS_REQUEST_SHIFT 7U
+#define ROGUE_CR_TLA_STATUS_REQUEST_CLRMSK 0xFFFFFF800000007FULL
+#define ROGUE_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT 1U
+#define ROGUE_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK 0xFFFFFFFFFFFFFF81ULL
+#define ROGUE_CR_TLA_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_TLA_STATUS_BUSY_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_TLA_STATUS_BUSY_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_PM_PARTIAL_RENDER_ENABLE */
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE 0x0338U
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT 0U
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN 0x00000001U
+
+/* Register ROGUE_CR_SIDEKICK_IDLE */
+#define ROGUE_CR_SIDEKICK_IDLE 0x03C8U
+#define ROGUE_CR_SIDEKICK_IDLE_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_SHIFT 6U
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_EN 0x00000040U
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_SHIFT 5U
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_EN 0x00000020U
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_SHIFT 4U
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_EN 0x00000010U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_SHIFT 3U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_EN 0x00000008U
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_SHIFT 2U
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN 0x00000004U
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_SHIFT 1U
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN 0x00000002U
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_SHIFT 0U
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN 0x00000001U
+
+/* Register ROGUE_CR_MARS_IDLE */
+#define ROGUE_CR_MARS_IDLE 0x08F8U
+#define ROGUE_CR_MARS_IDLE_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_SHIFT 2U
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_EN 0x00000004U
+#define ROGUE_CR_MARS_IDLE_CPU_SHIFT 1U
+#define ROGUE_CR_MARS_IDLE_CPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MARS_IDLE_CPU_EN 0x00000002U
+#define ROGUE_CR_MARS_IDLE_SOCIF_SHIFT 0U
+#define ROGUE_CR_MARS_IDLE_SOCIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MARS_IDLE_SOCIF_EN 0x00000001U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_STATUS */
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS 0x0430U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL 0x00000000000000F3ULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT 4U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK 0xFFFFFF0FU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT 1U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN 0x00000002U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK0 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0 0x0438U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK1 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1 0x0440U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK2 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2 0x0448U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK0 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0 0x0450U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK1 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1 0x0458U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK2 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2 0x0460U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_CDM_CONTEXT_STORE_STATUS */
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS 0x04A0U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN 0x00000002U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_CONTEXT_PDS0 */
+#define ROGUE_CR_CDM_CONTEXT_PDS0 0x04A8U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_CONTEXT_PDS1 */
+#define ROGUE_CR_CDM_CONTEXT_PDS1 0x04B0U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_TERMINATE_PDS */
+#define ROGUE_CR_CDM_TERMINATE_PDS 0x04B8U
+#define ROGUE_CR_CDM_TERMINATE_PDS_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_TERMINATE_PDS1 */
+#define ROGUE_CR_CDM_TERMINATE_PDS1 0x04C0U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_CONTEXT_LOAD_PDS0 */
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0 0x04D8U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_CONTEXT_LOAD_PDS1 */
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1 0x04E0U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_CONFIG */
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG 0x0810U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_MASKFULL 0x000001030F01FFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT 40U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN 0x0000010000000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT 33U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN 0x0000000200000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT 32U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN 0x0000000100000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT 25U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK 0xFFFFFFFFF1FFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT 24U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN 0x0000000001000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT 16U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 0x0000000000000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS 0x0000000000010000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1 0x0818U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2 0x0820U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1 0x0828U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2 0x0830U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1 0x0838U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2 0x0840U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1 0x0848U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2 0x0850U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1 0x0858U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2 0x0860U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS */
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS 0x0868U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL 0x00000001FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN 0x0000000100000000ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR */
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR 0x0870U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG 0x0878U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL 0xFFFFFFF7FFFFFFBFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT 36U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT 11U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN 0x0000000000000800ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT 7U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK 0xFFFFFFFFFFFFF87FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB 0x0000000000000000ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB 0x0000000000000080ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB 0x0000000000000100ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB 0x0000000000000180ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB 0x0000000000000200ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB 0x0000000000000280ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB 0x0000000000000300ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB 0x0000000000000380ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB 0x0000000000000400ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT 1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK 0xFFFFFFFFFFFFFFC1ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ 0x0880U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL 0x000000000000003FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT 1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK 0xFFFFFFC1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA 0x0888U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL 0xFFFFFFF7FFFFFF81ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT 36U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT 11U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN 0x0000000000000800ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT 7U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK 0xFFFFFFFFFFFFF87FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE 0x08A0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS 0x08A8U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR 0x08B0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE */
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE 0x08B8U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_NMI_EVENT */
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT 0x08C0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_DEBUG_CONFIG */
+#define ROGUE_CR_MIPS_DEBUG_CONFIG 0x08C8U
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT 0U
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_EXCEPTION_STATUS */
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS 0x08D0U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_MASKFULL 0x000000000000003FULL
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT 5U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN 0x00000020U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT 4U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN 0x00000010U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT 3U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN 0x00000008U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT 2U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN 0x00000004U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT 1U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN 0x00000002U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT 0U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_STATUS */
+#define ROGUE_CR_MIPS_WRAPPER_STATUS 0x08E8U
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_XPU_BROADCAST */
+#define ROGUE_CR_XPU_BROADCAST 0x0890U
+#define ROGUE_CR_XPU_BROADCAST_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_XPU_BROADCAST_MASK_SHIFT 0U
+#define ROGUE_CR_XPU_BROADCAST_MASK_CLRMSK 0xFFFFFE00U
+
+/* Register ROGUE_CR_META_SP_MSLVDATAX */
+#define ROGUE_CR_META_SP_MSLVDATAX 0x0A00U
+#define ROGUE_CR_META_SP_MSLVDATAX_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_META_SP_MSLVDATAT */
+#define ROGUE_CR_META_SP_MSLVDATAT 0x0A08U
+#define ROGUE_CR_META_SP_MSLVDATAT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_META_SP_MSLVCTRL0 */
+#define ROGUE_CR_META_SP_MSLVCTRL0 0x0A10U
+#define ROGUE_CR_META_SP_MSLVCTRL0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVCTRL0_ADDR_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK 0x00000003U
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT 1U
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_EN 0x00000002U
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_EN 0x00000001U
+
+/* Register ROGUE_CR_META_SP_MSLVCTRL1 */
+#define ROGUE_CR_META_SP_MSLVCTRL1 0x0A18U
+#define ROGUE_CR_META_SP_MSLVCTRL1_MASKFULL 0x00000000F7F4003FULL
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT 30U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT 29U
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN 0x20000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT 28U
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN 0x10000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT 26U
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN 0x04000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT 25U
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN 0x02000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_SHIFT 24U
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_EN 0x01000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT 21U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK 0xFF1FFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT 20U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_EN 0x00100000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT 18U
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN 0x00040000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_THREAD_SHIFT 4U
+#define ROGUE_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK 0xFFFFFFCFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_META_SP_MSLVHANDSHKE */
+#define ROGUE_CR_META_SP_MSLVHANDSHKE 0x0A50U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_META_SP_MSLVT0KICK */
+#define ROGUE_CR_META_SP_MSLVT0KICK 0x0A80U
+#define ROGUE_CR_META_SP_MSLVT0KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT0KICKI */
+#define ROGUE_CR_META_SP_MSLVT0KICKI 0x0A88U
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT1KICK */
+#define ROGUE_CR_META_SP_MSLVT1KICK 0x0A90U
+#define ROGUE_CR_META_SP_MSLVT1KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT1KICKI */
+#define ROGUE_CR_META_SP_MSLVT1KICKI 0x0A98U
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT2KICK */
+#define ROGUE_CR_META_SP_MSLVT2KICK 0x0AA0U
+#define ROGUE_CR_META_SP_MSLVT2KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT2KICKI */
+#define ROGUE_CR_META_SP_MSLVT2KICKI 0x0AA8U
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT3KICK */
+#define ROGUE_CR_META_SP_MSLVT3KICK 0x0AB0U
+#define ROGUE_CR_META_SP_MSLVT3KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT3KICKI */
+#define ROGUE_CR_META_SP_MSLVT3KICKI 0x0AB8U
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVRST */
+#define ROGUE_CR_META_SP_MSLVRST 0x0AC0U
+#define ROGUE_CR_META_SP_MSLVRST_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_EN 0x00000001U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQSTATUS */
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS 0x0AC8U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_MASKFULL 0x000000000000000CULL
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT 3U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN 0x00000008U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN 0x00000004U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQENABLE */
+#define ROGUE_CR_META_SP_MSLVIRQENABLE 0x0AD0U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_MASKFULL 0x000000000000000CULL
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT 3U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_EN 0x00000008U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_EN 0x00000004U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQLEVEL */
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL 0x0AD8U
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_EN 0x00000001U
+
+/* Register ROGUE_CR_MTS_SCHEDULE */
+#define ROGUE_CR_MTS_SCHEDULE 0x0B00U
+#define ROGUE_CR_MTS_SCHEDULE_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE1 */
+#define ROGUE_CR_MTS_SCHEDULE1 0x10B00U
+#define ROGUE_CR_MTS_SCHEDULE1_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE2 */
+#define ROGUE_CR_MTS_SCHEDULE2 0x20B00U
+#define ROGUE_CR_MTS_SCHEDULE2_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE3 */
+#define ROGUE_CR_MTS_SCHEDULE3 0x30B00U
+#define ROGUE_CR_MTS_SCHEDULE3_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE4 */
+#define ROGUE_CR_MTS_SCHEDULE4 0x40B00U
+#define ROGUE_CR_MTS_SCHEDULE4_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE5 */
+#define ROGUE_CR_MTS_SCHEDULE5 0x50B00U
+#define ROGUE_CR_MTS_SCHEDULE5_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE6 */
+#define ROGUE_CR_MTS_SCHEDULE6 0x60B00U
+#define ROGUE_CR_MTS_SCHEDULE6_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE7 */
+#define ROGUE_CR_MTS_SCHEDULE7 0x70B00U
+#define ROGUE_CR_MTS_SCHEDULE7_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC */
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC 0x0B30U
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC */
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC 0x0B38U
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC */
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC 0x0B40U
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC */
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC 0x0B48U
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG */
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG 0x0B50U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL 0x000FF0FFFFFFF701ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL 0x0000FFFFFFFFF001ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT 44U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK 0xFFFF0FFFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT 44U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK 0xFFF00FFFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT 40U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT 12U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT 9U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK 0xFFFFFFFFFFFFF9FFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT 8U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN 0x0000000000000100ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT 0U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META 0x0000000000000000ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE 0x0B58U
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE 0x0B60U
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE 0x0B68U
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE 0x0B70U
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE 0x0B78U
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE 0x0B80U
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_INTCTX */
+#define ROGUE_CR_MTS_INTCTX 0x0B98U
+#define ROGUE_CR_MTS_INTCTX_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT 22U
+#define ROGUE_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK 0xC03FFFFFU
+#define ROGUE_CR_MTS_INTCTX_DM_PTR_SHIFT 18U
+#define ROGUE_CR_MTS_INTCTX_DM_PTR_CLRMSK 0xFFC3FFFFU
+#define ROGUE_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT 16U
+#define ROGUE_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK 0xFFFCFFFFU
+#define ROGUE_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT 8U
+#define ROGUE_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MTS_BGCTX */
+#define ROGUE_CR_MTS_BGCTX 0x0BA0U
+#define ROGUE_CR_MTS_BGCTX_MASKFULL 0x0000000000003FFFULL
+#define ROGUE_CR_MTS_BGCTX_DM_PTR_SHIFT 10U
+#define ROGUE_CR_MTS_BGCTX_DM_PTR_CLRMSK 0xFFFFC3FFU
+#define ROGUE_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT 8U
+#define ROGUE_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK 0xFFFFFCFFU
+#define ROGUE_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE */
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE 0x0BA8U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT 56U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK 0x00FFFFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT 48U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK 0xFF00FFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT 40U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK 0xFFFF00FFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT 32U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK 0xFFFFFF00FFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT 24U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK 0xFFFFFFFF00FFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT 16U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT 8U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_MTS_GPU_INT_STATUS */
+#define ROGUE_CR_MTS_GPU_INT_STATUS 0x0BB0U
+#define ROGUE_CR_MTS_GPU_INT_STATUS_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT 0U
+#define ROGUE_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_SCHEDULE_ENABLE */
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE 0x0BC8U
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_IRQ_OS0_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS 0x0BD8U
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS0_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR 0x0BE8U
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS1_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS 0x10BD8U
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS1_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR 0x10BE8U
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS2_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS 0x20BD8U
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS2_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR 0x20BE8U
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS3_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS 0x30BD8U
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS3_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR 0x30BE8U
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS4_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS 0x40BD8U
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS4_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR 0x40BE8U
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS5_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS 0x50BD8U
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS5_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR 0x50BE8U
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS6_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS 0x60BD8U
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS6_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR 0x60BE8U
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS7_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS 0x70BD8U
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS7_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR 0x70BE8U
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_META_BOOT */
+#define ROGUE_CR_META_BOOT 0x0BF8U
+#define ROGUE_CR_META_BOOT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_BOOT_MODE_SHIFT 0U
+#define ROGUE_CR_META_BOOT_MODE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_BOOT_MODE_EN 0x00000001U
+
+/* Register ROGUE_CR_GARTEN_SLC */
+#define ROGUE_CR_GARTEN_SLC 0x0BB8U
+#define ROGUE_CR_GARTEN_SLC_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT 0U
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_EN 0x00000001U
+
+/* Register ROGUE_CR_PPP */
+#define ROGUE_CR_PPP 0x0CD0U
+#define ROGUE_CR_PPP_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_CHECKSUM_SHIFT 0U
+#define ROGUE_CR_PPP_CHECKSUM_CLRMSK 0x00000000U
+
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_MASK 0x00000003U
+/* Top-left to bottom-right */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_TL2BR 0x00000000U
+/* Top-right to bottom-left */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_TR2BL 0x00000001U
+/* Bottom-left to top-right */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_BL2TR 0x00000002U
+/* Bottom-right to top-left */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_BR2TL 0x00000003U
+
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_MASK 0x00000003U
+/* Normal render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_NORM 0x00000000U
+/* Fast 2D render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_FAST_2D 0x00000002U
+/* Fast scale render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE 0x00000003U
+
+/* Register ROGUE_CR_ISP_RENDER */
+#define ROGUE_CR_ISP_RENDER 0x0F08U
+#define ROGUE_CR_ISP_RENDER_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT 8U
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN 0x00000100U
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT 7U
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN 0x00000080U
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT 6U
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN 0x00000040U
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_SHIFT 5U
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_EN 0x00000020U
+#define ROGUE_CR_ISP_RENDER_RESUME_SHIFT 4U
+#define ROGUE_CR_ISP_RENDER_RESUME_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ISP_RENDER_RESUME_EN 0x00000010U
+#define ROGUE_CR_ISP_RENDER_DIR_SHIFT 2U
+#define ROGUE_CR_ISP_RENDER_DIR_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_ISP_RENDER_DIR_TL2BR 0x00000000U
+#define ROGUE_CR_ISP_RENDER_DIR_TR2BL 0x00000004U
+#define ROGUE_CR_ISP_RENDER_DIR_BL2TR 0x00000008U
+#define ROGUE_CR_ISP_RENDER_DIR_BR2TL 0x0000000CU
+#define ROGUE_CR_ISP_RENDER_MODE_SHIFT 0U
+#define ROGUE_CR_ISP_RENDER_MODE_CLRMSK 0xFFFFFFFCU
+#define ROGUE_CR_ISP_RENDER_MODE_NORM 0x00000000U
+#define ROGUE_CR_ISP_RENDER_MODE_FAST_2D 0x00000002U
+#define ROGUE_CR_ISP_RENDER_MODE_FAST_SCALE 0x00000003U
+
+/* Register ROGUE_CR_ISP_CTL */
+#define ROGUE_CR_ISP_CTL 0x0F38U
+#define ROGUE_CR_ISP_CTL_MASKFULL 0x00000000FFFFF3FFULL
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT 31U
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_EN 0x80000000U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_SHIFT 30U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_EN 0x40000000U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT 29U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_EN 0x20000000U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT 28U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_EN 0x10000000U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_SHIFT 27U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_EN 0x08000000U
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_SHIFT 26U
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_EN 0x04000000U
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_SHIFT 25U
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_EN 0x02000000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT 23U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK 0xFE7FFFFFU
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 0x00000000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 0x00800000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL 0x01000000U
+#define ROGUE_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT 21U
+#define ROGUE_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK 0xFF9FFFFFU
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_SHIFT 20U
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_EN 0x00100000U
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT 19U
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN 0x00080000U
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT 18U
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN 0x00040000U
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT 17U
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN 0x00020000U
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_SHIFT 16U
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_EN 0x00010000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_SHIFT 12U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE 0x00000000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO 0x00001000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE 0x00002000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR 0x00003000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE 0x00004000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX 0x00005000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN 0x00006000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT 0x00007000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE 0x00008000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN 0x00009000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN 0x0000A000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE 0x0000B000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN 0x0000C000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN 0x0000D000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN 0x0000E000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN 0x0000F000U
+#define ROGUE_CR_ISP_CTL_VALID_ID_SHIFT 4U
+#define ROGUE_CR_ISP_CTL_VALID_ID_CLRMSK 0xFFFFFC0FU
+#define ROGUE_CR_ISP_CTL_UPASS_START_SHIFT 0U
+#define ROGUE_CR_ISP_CTL_UPASS_START_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_ISP_STATUS */
+#define ROGUE_CR_ISP_STATUS 0x1038U
+#define ROGUE_CR_ISP_STATUS_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_SHIFT 2U
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_EN 0x00000004U
+#define ROGUE_CR_ISP_STATUS_ACTIVE_SHIFT 1U
+#define ROGUE_CR_ISP_STATUS_ACTIVE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ISP_STATUS_ACTIVE_EN 0x00000002U
+#define ROGUE_CR_ISP_STATUS_EOR_SHIFT 0U
+#define ROGUE_CR_ISP_STATUS_EOR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ISP_STATUS_EOR_EN 0x00000001U
+
+/* Register group: ROGUE_CR_ISP_XTP_RESUME, with 64 repeats */
+#define ROGUE_CR_ISP_XTP_RESUME_REPEATCOUNT 64U
+/* Register ROGUE_CR_ISP_XTP_RESUME0 */
+#define ROGUE_CR_ISP_XTP_RESUME0 0x3A00U
+#define ROGUE_CR_ISP_XTP_RESUME0_MASKFULL 0x00000000003FF3FFULL
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_X_SHIFT 12U
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK 0xFFC00FFFU
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT 0U
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK 0xFFFFFC00U
+
+/* Register group: ROGUE_CR_ISP_XTP_STORE, with 32 repeats */
+#define ROGUE_CR_ISP_XTP_STORE_REPEATCOUNT 32U
+/* Register ROGUE_CR_ISP_XTP_STORE0 */
+#define ROGUE_CR_ISP_XTP_STORE0 0x3C00U
+#define ROGUE_CR_ISP_XTP_STORE0_MASKFULL 0x000000007F3FF3FFULL
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_SHIFT 30U
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_EN 0x40000000U
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_SHIFT 29U
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_EN 0x20000000U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT 28U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_EN 0x10000000U
+#define ROGUE_CR_ISP_XTP_STORE0_MT_SHIFT 24U
+#define ROGUE_CR_ISP_XTP_STORE0_MT_CLRMSK 0xF0FFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_X_SHIFT 12U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_X_CLRMSK 0xFFC00FFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_Y_SHIFT 0U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK 0xFFFFFC00U
+
+/* Register group: ROGUE_CR_BIF_CAT_BASE, with 8 repeats */
+#define ROGUE_CR_BIF_CAT_BASE_REPEATCOUNT 8U
+/* Register ROGUE_CR_BIF_CAT_BASE0 */
+#define ROGUE_CR_BIF_CAT_BASE0 0x1200U
+#define ROGUE_CR_BIF_CAT_BASE0_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE1 */
+#define ROGUE_CR_BIF_CAT_BASE1 0x1208U
+#define ROGUE_CR_BIF_CAT_BASE1_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE2 */
+#define ROGUE_CR_BIF_CAT_BASE2 0x1210U
+#define ROGUE_CR_BIF_CAT_BASE2_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE3 */
+#define ROGUE_CR_BIF_CAT_BASE3 0x1218U
+#define ROGUE_CR_BIF_CAT_BASE3_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE4 */
+#define ROGUE_CR_BIF_CAT_BASE4 0x1220U
+#define ROGUE_CR_BIF_CAT_BASE4_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE5 */
+#define ROGUE_CR_BIF_CAT_BASE5 0x1228U
+#define ROGUE_CR_BIF_CAT_BASE5_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE6 */
+#define ROGUE_CR_BIF_CAT_BASE6 0x1230U
+#define ROGUE_CR_BIF_CAT_BASE6_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE7 */
+#define ROGUE_CR_BIF_CAT_BASE7 0x1238U
+#define ROGUE_CR_BIF_CAT_BASE7_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE_INDEX */
+#define ROGUE_CR_BIF_CAT_BASE_INDEX 0x1240U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_MASKFULL 0x00070707073F0707ULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT 48U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK 0xFFF8FFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT 40U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT 32U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT 24U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK 0xFFFFFFFFF8FFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT 19U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK 0xFFFFFFFFFFC7FFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT 16U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK 0xFFFFFFFFFFF8FFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT 8U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK 0xFFFFFFFFFFFFF8FFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TA_SHIFT 0U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK 0xFFFFFFFFFFFFFFF8ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_VCE0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0 0x1248U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_TE0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0 0x1250U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_ALIST0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0 0x1260U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_VCE1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1 0x1268U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_TE1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1 0x1270U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_ALIST1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1 0x1280U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_MMU_ENTRY_STATUS */
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS 0x1288U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_MASKFULL 0x000000FFFFFFF0F3ULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT 12U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT 4U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK 0xFFFFFFFFFFFFFF0FULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+
+/* Register ROGUE_CR_BIF_MMU_ENTRY */
+#define ROGUE_CR_BIF_MMU_ENTRY 0x1290U
+#define ROGUE_CR_BIF_MMU_ENTRY_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_SHIFT 1U
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_EN 0x00000002U
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_CTRL_INVAL */
+#define ROGUE_CR_BIF_CTRL_INVAL 0x12A0U
+#define ROGUE_CR_BIF_CTRL_INVAL_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_SHIFT 3U
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_EN 0x00000008U
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_SHIFT 2U
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_EN 0x00000004U
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_SHIFT 1U
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_EN 0x00000002U
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_SHIFT 0U
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_CTRL */
+#define ROGUE_CR_BIF_CTRL 0x12A8U
+#define ROGUE_CR_BIF_CTRL__XE_MEM__MASKFULL 0x000000000000033FULL
+#define ROGUE_CR_BIF_CTRL_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_SHIFT 9U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_EN 0x00000200U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT 8U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN 0x00000100U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT 7U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN 0x00000080U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT 6U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN 0x00000040U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT 5U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN 0x00000020U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT 4U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN 0x00000010U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_SHIFT 3U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_EN 0x00000008U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT 2U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_EN 0x00000004U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT 1U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN 0x00000002U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT 0U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS 0x12B0U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS 0x12B8U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL 0x001FFFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT 52U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN 0x0010000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT 46U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK 0xFFF03FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK 0xFFFFC0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS 0x12C0U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS 0x12C8U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_BIF_MMU_STATUS */
+#define ROGUE_CR_BIF_MMU_STATUS 0x12D0U
+#define ROGUE_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL 0x000000001FFFFFF7ULL
+#define ROGUE_CR_BIF_MMU_STATUS_MASKFULL 0x000000001FFFFFF7ULL
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT 28U
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_EN 0x10000000U
+#define ROGUE_CR_BIF_MMU_STATUS_PC_DATA_SHIFT 20U
+#define ROGUE_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PD_DATA_SHIFT 12U
+#define ROGUE_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PT_DATA_SHIFT 4U
+#define ROGUE_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_SHIFT 2U
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_EN 0x00000004U
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_SHIFT 1U
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_EN 0x00000002U
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register group: ROGUE_CR_BIF_TILING_CFG, with 8 repeats */
+#define ROGUE_CR_BIF_TILING_CFG_REPEATCOUNT 8U
+/* Register ROGUE_CR_BIF_TILING_CFG0 */
+#define ROGUE_CR_BIF_TILING_CFG0 0x12D8U
+#define ROGUE_CR_BIF_TILING_CFG0_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG1 */
+#define ROGUE_CR_BIF_TILING_CFG1 0x12E0U
+#define ROGUE_CR_BIF_TILING_CFG1_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG2 */
+#define ROGUE_CR_BIF_TILING_CFG2 0x12E8U
+#define ROGUE_CR_BIF_TILING_CFG2_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG3 */
+#define ROGUE_CR_BIF_TILING_CFG3 0x12F0U
+#define ROGUE_CR_BIF_TILING_CFG3_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG4 */
+#define ROGUE_CR_BIF_TILING_CFG4 0x12F8U
+#define ROGUE_CR_BIF_TILING_CFG4_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG5 */
+#define ROGUE_CR_BIF_TILING_CFG5 0x1300U
+#define ROGUE_CR_BIF_TILING_CFG5_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG6 */
+#define ROGUE_CR_BIF_TILING_CFG6 0x1308U
+#define ROGUE_CR_BIF_TILING_CFG6_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG7 */
+#define ROGUE_CR_BIF_TILING_CFG7 0x1310U
+#define ROGUE_CR_BIF_TILING_CFG7_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_READS_EXT_STATUS */
+#define ROGUE_CR_BIF_READS_EXT_STATUS 0x1320U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MMU_SHIFT 16U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK 0xF000FFFFU
+#define ROGUE_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT 0U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_READS_INT_STATUS */
+#define ROGUE_CR_BIF_READS_INT_STATUS 0x1328U
+#define ROGUE_CR_BIF_READS_INT_STATUS_MASKFULL 0x0000000007FFFFFFULL
+#define ROGUE_CR_BIF_READS_INT_STATUS_MMU_SHIFT 16U
+#define ROGUE_CR_BIF_READS_INT_STATUS_MMU_CLRMSK 0xF800FFFFU
+#define ROGUE_CR_BIF_READS_INT_STATUS_BANK1_SHIFT 0U
+#define ROGUE_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_READS_INT_STATUS */
+#define ROGUE_CR_BIFPM_READS_INT_STATUS 0x1330U
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT 0U
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_READS_EXT_STATUS */
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS 0x1338U
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT 0U
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_STATUS_MMU */
+#define ROGUE_CR_BIFPM_STATUS_MMU 0x1350U
+#define ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT 0U
+#define ROGUE_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_STATUS_MMU */
+#define ROGUE_CR_BIF_STATUS_MMU 0x1358U
+#define ROGUE_CR_BIF_STATUS_MMU_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIF_STATUS_MMU_REQUESTS_SHIFT 0U
+#define ROGUE_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_FAULT_READ */
+#define ROGUE_CR_BIF_FAULT_READ 0x13E0U
+#define ROGUE_CR_BIF_FAULT_READ_MASKFULL 0x000000FFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS */
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS 0x1430U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS */
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS 0x1438U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_MCU_FENCE */
+#define ROGUE_CR_MCU_FENCE 0x1740U
+#define ROGUE_CR_MCU_FENCE_MASKFULL 0x000007FFFFFFFFE0ULL
+#define ROGUE_CR_MCU_FENCE_DM_SHIFT 40U
+#define ROGUE_CR_MCU_FENCE_DM_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_MCU_FENCE_DM_VERTEX 0x0000000000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_PIXEL 0x0000010000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_COMPUTE 0x0000020000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_RAY_VERTEX 0x0000030000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_RAY 0x0000040000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_FASTRENDER 0x0000050000000000ULL
+#define ROGUE_CR_MCU_FENCE_ADDR_SHIFT 5U
+#define ROGUE_CR_MCU_FENCE_ADDR_CLRMSK 0xFFFFFF000000001FULL
+#define ROGUE_CR_MCU_FENCE_ADDR_ALIGNSHIFT 5U
+#define ROGUE_CR_MCU_FENCE_ADDR_ALIGNSIZE 32U
+
+/* Register group: ROGUE_CR_SCRATCH, with 16 repeats */
+#define ROGUE_CR_SCRATCH_REPEATCOUNT 16U
+/* Register ROGUE_CR_SCRATCH0 */
+#define ROGUE_CR_SCRATCH0 0x1A00U
+#define ROGUE_CR_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH1 */
+#define ROGUE_CR_SCRATCH1 0x1A08U
+#define ROGUE_CR_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH2 */
+#define ROGUE_CR_SCRATCH2 0x1A10U
+#define ROGUE_CR_SCRATCH2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH2_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH3 */
+#define ROGUE_CR_SCRATCH3 0x1A18U
+#define ROGUE_CR_SCRATCH3_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH3_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH4 */
+#define ROGUE_CR_SCRATCH4 0x1A20U
+#define ROGUE_CR_SCRATCH4_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH4_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH4_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH5 */
+#define ROGUE_CR_SCRATCH5 0x1A28U
+#define ROGUE_CR_SCRATCH5_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH5_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH5_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH6 */
+#define ROGUE_CR_SCRATCH6 0x1A30U
+#define ROGUE_CR_SCRATCH6_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH6_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH6_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH7 */
+#define ROGUE_CR_SCRATCH7 0x1A38U
+#define ROGUE_CR_SCRATCH7_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH7_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH7_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH8 */
+#define ROGUE_CR_SCRATCH8 0x1A40U
+#define ROGUE_CR_SCRATCH8_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH8_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH8_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH9 */
+#define ROGUE_CR_SCRATCH9 0x1A48U
+#define ROGUE_CR_SCRATCH9_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH9_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH9_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH10 */
+#define ROGUE_CR_SCRATCH10 0x1A50U
+#define ROGUE_CR_SCRATCH10_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH10_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH10_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH11 */
+#define ROGUE_CR_SCRATCH11 0x1A58U
+#define ROGUE_CR_SCRATCH11_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH11_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH11_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH12 */
+#define ROGUE_CR_SCRATCH12 0x1A60U
+#define ROGUE_CR_SCRATCH12_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH12_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH12_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH13 */
+#define ROGUE_CR_SCRATCH13 0x1A68U
+#define ROGUE_CR_SCRATCH13_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH13_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH13_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH14 */
+#define ROGUE_CR_SCRATCH14 0x1A70U
+#define ROGUE_CR_SCRATCH14_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH14_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH14_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH15 */
+#define ROGUE_CR_SCRATCH15 0x1A78U
+#define ROGUE_CR_SCRATCH15_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH15_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH15_DATA_CLRMSK 0x00000000U
+
+/* Register group: ROGUE_CR_OS0_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS0_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS0_SCRATCH0 */
+#define ROGUE_CR_OS0_SCRATCH0 0x1A80U
+#define ROGUE_CR_OS0_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS0_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS0_SCRATCH1 */
+#define ROGUE_CR_OS0_SCRATCH1 0x1A88U
+#define ROGUE_CR_OS0_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS0_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS0_SCRATCH2 */
+#define ROGUE_CR_OS0_SCRATCH2 0x1A90U
+#define ROGUE_CR_OS0_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS0_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS0_SCRATCH3 */
+#define ROGUE_CR_OS0_SCRATCH3 0x1A98U
+#define ROGUE_CR_OS0_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS0_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS1_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS1_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS1_SCRATCH0 */
+#define ROGUE_CR_OS1_SCRATCH0 0x11A80U
+#define ROGUE_CR_OS1_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS1_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS1_SCRATCH1 */
+#define ROGUE_CR_OS1_SCRATCH1 0x11A88U
+#define ROGUE_CR_OS1_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS1_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS1_SCRATCH2 */
+#define ROGUE_CR_OS1_SCRATCH2 0x11A90U
+#define ROGUE_CR_OS1_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS1_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS1_SCRATCH3 */
+#define ROGUE_CR_OS1_SCRATCH3 0x11A98U
+#define ROGUE_CR_OS1_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS1_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS2_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS2_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS2_SCRATCH0 */
+#define ROGUE_CR_OS2_SCRATCH0 0x21A80U
+#define ROGUE_CR_OS2_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS2_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS2_SCRATCH1 */
+#define ROGUE_CR_OS2_SCRATCH1 0x21A88U
+#define ROGUE_CR_OS2_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS2_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS2_SCRATCH2 */
+#define ROGUE_CR_OS2_SCRATCH2 0x21A90U
+#define ROGUE_CR_OS2_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS2_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS2_SCRATCH3 */
+#define ROGUE_CR_OS2_SCRATCH3 0x21A98U
+#define ROGUE_CR_OS2_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS2_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS3_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS3_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS3_SCRATCH0 */
+#define ROGUE_CR_OS3_SCRATCH0 0x31A80U
+#define ROGUE_CR_OS3_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS3_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS3_SCRATCH1 */
+#define ROGUE_CR_OS3_SCRATCH1 0x31A88U
+#define ROGUE_CR_OS3_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS3_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS3_SCRATCH2 */
+#define ROGUE_CR_OS3_SCRATCH2 0x31A90U
+#define ROGUE_CR_OS3_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS3_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS3_SCRATCH3 */
+#define ROGUE_CR_OS3_SCRATCH3 0x31A98U
+#define ROGUE_CR_OS3_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS3_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS4_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS4_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS4_SCRATCH0 */
+#define ROGUE_CR_OS4_SCRATCH0 0x41A80U
+#define ROGUE_CR_OS4_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS4_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS4_SCRATCH1 */
+#define ROGUE_CR_OS4_SCRATCH1 0x41A88U
+#define ROGUE_CR_OS4_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS4_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS4_SCRATCH2 */
+#define ROGUE_CR_OS4_SCRATCH2 0x41A90U
+#define ROGUE_CR_OS4_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS4_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS4_SCRATCH3 */
+#define ROGUE_CR_OS4_SCRATCH3 0x41A98U
+#define ROGUE_CR_OS4_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS4_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS5_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS5_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS5_SCRATCH0 */
+#define ROGUE_CR_OS5_SCRATCH0 0x51A80U
+#define ROGUE_CR_OS5_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS5_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS5_SCRATCH1 */
+#define ROGUE_CR_OS5_SCRATCH1 0x51A88U
+#define ROGUE_CR_OS5_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS5_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS5_SCRATCH2 */
+#define ROGUE_CR_OS5_SCRATCH2 0x51A90U
+#define ROGUE_CR_OS5_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS5_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS5_SCRATCH3 */
+#define ROGUE_CR_OS5_SCRATCH3 0x51A98U
+#define ROGUE_CR_OS5_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS5_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS6_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS6_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS6_SCRATCH0 */
+#define ROGUE_CR_OS6_SCRATCH0 0x61A80U
+#define ROGUE_CR_OS6_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS6_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS6_SCRATCH1 */
+#define ROGUE_CR_OS6_SCRATCH1 0x61A88U
+#define ROGUE_CR_OS6_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS6_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS6_SCRATCH2 */
+#define ROGUE_CR_OS6_SCRATCH2 0x61A90U
+#define ROGUE_CR_OS6_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS6_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS6_SCRATCH3 */
+#define ROGUE_CR_OS6_SCRATCH3 0x61A98U
+#define ROGUE_CR_OS6_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS6_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS7_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS7_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS7_SCRATCH0 */
+#define ROGUE_CR_OS7_SCRATCH0 0x71A80U
+#define ROGUE_CR_OS7_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS7_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS7_SCRATCH1 */
+#define ROGUE_CR_OS7_SCRATCH1 0x71A88U
+#define ROGUE_CR_OS7_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS7_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS7_SCRATCH2 */
+#define ROGUE_CR_OS7_SCRATCH2 0x71A90U
+#define ROGUE_CR_OS7_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS7_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS7_SCRATCH3 */
+#define ROGUE_CR_OS7_SCRATCH3 0x71A98U
+#define ROGUE_CR_OS7_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS7_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_SPFILTER_SIGNAL_DESCR */
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR 0x2700U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT 0U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK 0xFFFF0000U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN */
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN 0x2708U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL 0x000000FFFFFFFFF0ULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE 16U
+
+/* Register group: ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT 16U
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 0x3000U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1 0x3008U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2 0x3010U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3 0x3018U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4 0x3020U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5 0x3028U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6 0x3030U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7 0x3038U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8 0x3040U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9 0x3048U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10 0x3050U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11 0x3058U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12 0x3060U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13 0x3068U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14 0x3070U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15 0x3078U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_BOOT */
+#define ROGUE_CR_FWCORE_BOOT 0x3090U
+#define ROGUE_CR_FWCORE_BOOT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_SHIFT 0U
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_RESET_ADDR */
+#define ROGUE_CR_FWCORE_RESET_ADDR 0x3098U
+#define ROGUE_CR_FWCORE_RESET_ADDR_MASKFULL 0x00000000FFFFFFFEULL
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_SHIFT 1U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK 0x00000001U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT 1U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE 2U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR */
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR 0x30A0U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL 0x00000000FFFFFFFEULL
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT 1U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK 0x00000001U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT 1U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE 2U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT */
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT 0x30A8U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT 0U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS */
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS 0x30B0U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL 0x000000000000F771ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS */
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS 0x30B8U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL 0x001FFFFFFFFFFFF0ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT 52U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN 0x0010000000000000ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT 46U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK 0xFFF03FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFC0FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_FWCORE_MEM_CTRL_INVAL */
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL 0x30C0U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT 3U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN 0x00000008U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT 2U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_EN 0x00000004U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT 1U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_EN 0x00000002U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_MMU_STATUS */
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS 0x30C8U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_MASKFULL 0x000000000FFFFFF7ULL
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT 20U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT 2U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN 0x00000004U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT 1U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN 0x00000002U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS */
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS 0x30D8U
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL 0x0000000000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK 0xFFFFF000U
+
+/* Register ROGUE_CR_FWCORE_MEM_READS_INT_STATUS */
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS 0x30E0U
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL 0x00000000000007FFULL
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK 0xFFFFF800U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_FENCE */
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE 0x30E8U
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT 0U
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_EN 0x00000001U
+
+/* Register group: ROGUE_CR_FWCORE_MEM_CAT_BASE, with 8 repeats */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT 8U
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE0 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0 0x30F0U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE1 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1 0x30F8U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE2 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2 0x3100U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE3 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3 0x3108U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE4 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4 0x3110U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE5 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5 0x3118U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE6 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6 0x3120U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE7 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7 0x3128U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_WDT_RESET */
+#define ROGUE_CR_FWCORE_WDT_RESET 0x3130U
+#define ROGUE_CR_FWCORE_WDT_RESET_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_WDT_CTRL */
+#define ROGUE_CR_FWCORE_WDT_CTRL 0x3138U
+#define ROGUE_CR_FWCORE_WDT_CTRL_MASKFULL 0x00000000FFFF1F01ULL
+#define ROGUE_CR_FWCORE_WDT_CTRL_PROT_SHIFT 16U
+#define ROGUE_CR_FWCORE_WDT_CTRL_PROT_CLRMSK 0x0000FFFFU
+#define ROGUE_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT 8U
+#define ROGUE_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK 0xFFFFE0FFU
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_WDT_COUNT */
+#define ROGUE_CR_FWCORE_WDT_COUNT 0x3140U
+#define ROGUE_CR_FWCORE_WDT_COUNT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FWCORE_WDT_COUNT_VALUE_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK 0x00000000U
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED0, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED00 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED00 0x3400U
+#define ROGUE_CR_FWCORE_DMI_RESERVED00_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED01 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED01 0x3408U
+#define ROGUE_CR_FWCORE_DMI_RESERVED01_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED02 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED02 0x3410U
+#define ROGUE_CR_FWCORE_DMI_RESERVED02_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED03 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED03 0x3418U
+#define ROGUE_CR_FWCORE_DMI_RESERVED03_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DATA0 */
+#define ROGUE_CR_FWCORE_DMI_DATA0 0x3420U
+#define ROGUE_CR_FWCORE_DMI_DATA0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DATA1 */
+#define ROGUE_CR_FWCORE_DMI_DATA1 0x3428U
+#define ROGUE_CR_FWCORE_DMI_DATA1_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED1, with 5 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT 5U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED10 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED10 0x3430U
+#define ROGUE_CR_FWCORE_DMI_RESERVED10_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED11 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED11 0x3438U
+#define ROGUE_CR_FWCORE_DMI_RESERVED11_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED12 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED12 0x3440U
+#define ROGUE_CR_FWCORE_DMI_RESERVED12_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED13 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED13 0x3448U
+#define ROGUE_CR_FWCORE_DMI_RESERVED13_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED14 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED14 0x3450U
+#define ROGUE_CR_FWCORE_DMI_RESERVED14_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DMCONTROL */
+#define ROGUE_CR_FWCORE_DMI_DMCONTROL 0x3480U
+#define ROGUE_CR_FWCORE_DMI_DMCONTROL_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DMSTATUS */
+#define ROGUE_CR_FWCORE_DMI_DMSTATUS 0x3488U
+#define ROGUE_CR_FWCORE_DMI_DMSTATUS_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED2, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED20 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED20 0x3490U
+#define ROGUE_CR_FWCORE_DMI_RESERVED20_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED21 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED21 0x3498U
+#define ROGUE_CR_FWCORE_DMI_RESERVED21_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED22 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED22 0x34A0U
+#define ROGUE_CR_FWCORE_DMI_RESERVED22_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED23 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED23 0x34A8U
+#define ROGUE_CR_FWCORE_DMI_RESERVED23_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_ABSTRACTCS */
+#define ROGUE_CR_FWCORE_DMI_ABSTRACTCS 0x34B0U
+#define ROGUE_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_COMMAND */
+#define ROGUE_CR_FWCORE_DMI_COMMAND 0x34B8U
+#define ROGUE_CR_FWCORE_DMI_COMMAND_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBCS */
+#define ROGUE_CR_FWCORE_DMI_SBCS 0x35C0U
+#define ROGUE_CR_FWCORE_DMI_SBCS_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBADDRESS0 */
+#define ROGUE_CR_FWCORE_DMI_SBADDRESS0 0x35C8U
+#define ROGUE_CR_FWCORE_DMI_SBADDRESS0_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED3, with 2 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT 2U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED30 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED30 0x34D0U
+#define ROGUE_CR_FWCORE_DMI_RESERVED30_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED31 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED31 0x34D8U
+#define ROGUE_CR_FWCORE_DMI_RESERVED31_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_SBDATA, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_SBDATA_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA0 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA0 0x35E0U
+#define ROGUE_CR_FWCORE_DMI_SBDATA0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA1 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA1 0x35E8U
+#define ROGUE_CR_FWCORE_DMI_SBDATA1_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA2 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA2 0x35F0U
+#define ROGUE_CR_FWCORE_DMI_SBDATA2_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA3 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA3 0x35F8U
+#define ROGUE_CR_FWCORE_DMI_SBDATA3_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_HALTSUM0 */
+#define ROGUE_CR_FWCORE_DMI_HALTSUM0 0x3600U
+#define ROGUE_CR_FWCORE_DMI_HALTSUM0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC_CTRL_MISC */
+#define ROGUE_CR_SLC_CTRL_MISC 0x3800U
+#define ROGUE_CR_SLC_CTRL_MISC_MASKFULL 0xFFFFFFFF01FF010FULL
+#define ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT 32U
+#define ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT 24U
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN 0x0000000001000000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT 16U
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE 0x0000000000000000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE 0x0000000000010000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 0x0000000000100000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 0x0000000000110000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 0x0000000000200000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE 0x0000000000210000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_EN 0x0000000000000100ULL
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN 0x0000000000000008ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN 0x0000000000000004ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN 0x0000000000000002ULL
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC_CTRL_FLUSH_INVAL */
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL 0x3818U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL 0x0000000080000FFFULL
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT 31U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN 0x80000000U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT 11U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN 0x00000800U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT 10U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN 0x00000400U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT 9U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN 0x00000200U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN 0x00000100U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT 7U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN 0x00000080U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT 6U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN 0x00000040U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT 5U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN 0x00000020U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT 4U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN 0x00000010U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN 0x00000008U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN 0x00000004U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN 0x00000002U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_STATUS0 */
+#define ROGUE_CR_SLC_STATUS0 0x3820U
+#define ROGUE_CR_SLC_STATUS0_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT 2U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN 0x00000004U
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_SHIFT 1U
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_EN 0x00000002U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_CTRL_BYPASS */
+#define ROGUE_CR_SLC_CTRL_BYPASS 0x3828U
+#define ROGUE_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL 0x0FFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_SHIFT 59U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_CLRMSK 0xF7FFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_EN 0x0800000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT 58U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK 0xFBFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_EN 0x0400000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_SHIFT 57U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_EN 0x0200000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_SHIFT 56U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK 0xFEFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_EN 0x0100000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_SHIFT 55U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_CLRMSK 0xFF7FFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_EN 0x0080000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_SHIFT 54U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_CLRMSK 0xFFBFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_EN 0x0040000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_SHIFT 53U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_CLRMSK 0xFFDFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_EN 0x0020000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT 52U
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN 0x0010000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT 51U
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK 0xFFF7FFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN 0x0008000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT 50U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_EN 0x0004000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT 49U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK 0xFFFDFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN 0x0002000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT 48U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK 0xFFFEFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN 0x0001000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT 47U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK 0xFFFF7FFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN 0x0000800000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT 46U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK 0xFFFFBFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN 0x0000400000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT 45U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_EN 0x0000200000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT 44U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK 0xFFFFEFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_EN 0x0000100000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT 43U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK 0xFFFFF7FFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_EN 0x0000080000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT 42U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_EN 0x0000040000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT 41U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK 0xFFFFFDFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_EN 0x0000020000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT 40U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_EN 0x0000010000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT 39U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK 0xFFFFFF7FFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN 0x0000008000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT 38U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN 0x0000004000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT 37U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN 0x0000002000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT 36U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_EN 0x0000001000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT 35U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN 0x0000000800000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT 34U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN 0x0000000400000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT 33U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN 0x0000000200000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT 32U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN 0x0000000100000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT 31U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN 0x0000000080000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT 30U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN 0x0000000040000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT 29U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN 0x0000000020000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT 28U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN 0x0000000010000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT 27U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN 0x0000000008000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT 26U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_EN 0x0000000004000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT 25U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN 0x0000000002000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT 24U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_EN 0x0000000001000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT 23U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN 0x0000000000800000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT 22U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_EN 0x0000000000400000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT 21U
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN 0x0000000000200000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT 20U
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_EN 0x0000000000100000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT 19U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_EN 0x0000000000080000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT 18U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_EN 0x0000000000040000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT 17U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_EN 0x0000000000020000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT 16U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN 0x0000000000010000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT 15U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN 0x0000000000008000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT 14U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_EN 0x0000000000004000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT 13U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_EN 0x0000000000002000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT 12U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_EN 0x0000000000001000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT 11U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN 0x0000000000000800ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT 10U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN 0x0000000000000400ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT 9U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN 0x0000000000000200ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_EN 0x0000000000000100ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT 7U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_EN 0x0000000000000080ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT 6U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_EN 0x0000000000000040ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT 5U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN 0x0000000000000020ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT 4U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_EN 0x0000000000000010ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN 0x0000000000000008ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN 0x0000000000000004ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_EN 0x0000000000000002ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC_STATUS1 */
+#define ROGUE_CR_SLC_STATUS1 0x3870U
+#define ROGUE_CR_SLC_STATUS1_MASKFULL 0x800003FF03FFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_PAUSED_SHIFT 63U
+#define ROGUE_CR_SLC_STATUS1_PAUSED_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_PAUSED_EN 0x8000000000000000ULL
+#define ROGUE_CR_SLC_STATUS1_READS1_SHIFT 32U
+#define ROGUE_CR_SLC_STATUS1_READS1_CLRMSK 0xFFFFFC00FFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_READS0_SHIFT 16U
+#define ROGUE_CR_SLC_STATUS1_READS0_CLRMSK 0xFFFFFFFFFC00FFFFULL
+#define ROGUE_CR_SLC_STATUS1_READS1_EXT_SHIFT 8U
+#define ROGUE_CR_SLC_STATUS1_READS1_EXT_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_SLC_STATUS1_READS0_EXT_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS1_READS0_EXT_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_SLC_IDLE */
+#define ROGUE_CR_SLC_IDLE 0x3898U
+#define ROGUE_CR_SLC_IDLE__XE_MEM__MASKFULL 0x00000000000003FFULL
+#define ROGUE_CR_SLC_IDLE_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_SHIFT 9U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_EN 0x00000200U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_SHIFT 8U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_EN 0x00000100U
+#define ROGUE_CR_SLC_IDLE_IMGBV4_SHIFT 7U
+#define ROGUE_CR_SLC_IDLE_IMGBV4_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SLC_IDLE_IMGBV4_EN 0x00000080U
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_SHIFT 6U
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_EN 0x00000040U
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_SHIFT 5U
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_EN 0x00000020U
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_SHIFT 4U
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_EN 0x00000010U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_SHIFT 3U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_EN 0x00000008U
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_SHIFT 2U
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_EN 0x00000004U
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_SHIFT 1U
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_EN 0x00000002U
+#define ROGUE_CR_SLC_IDLE_CBAR_SHIFT 0U
+#define ROGUE_CR_SLC_IDLE_CBAR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_IDLE_CBAR_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_STATUS2 */
+#define ROGUE_CR_SLC_STATUS2 0x3908U
+#define ROGUE_CR_SLC_STATUS2_MASKFULL 0x000003FF03FFFFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS3_SHIFT 32U
+#define ROGUE_CR_SLC_STATUS2_READS3_CLRMSK 0xFFFFFC00FFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS2_SHIFT 16U
+#define ROGUE_CR_SLC_STATUS2_READS2_CLRMSK 0xFFFFFFFFFC00FFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS3_EXT_SHIFT 8U
+#define ROGUE_CR_SLC_STATUS2_READS3_EXT_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_SLC_STATUS2_READS2_EXT_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS2_READS2_EXT_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_SLC_CTRL_MISC2 */
+#define ROGUE_CR_SLC_CTRL_MISC2 0x3930U
+#define ROGUE_CR_SLC_CTRL_MISC2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE */
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE 0x3938U
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT 0U
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN 0x00000001U
+
+/* Register ROGUE_CR_USC_UVS0_CHECKSUM */
+#define ROGUE_CR_USC_UVS0_CHECKSUM 0x5000U
+#define ROGUE_CR_USC_UVS0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS1_CHECKSUM */
+#define ROGUE_CR_USC_UVS1_CHECKSUM 0x5008U
+#define ROGUE_CR_USC_UVS1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS2_CHECKSUM */
+#define ROGUE_CR_USC_UVS2_CHECKSUM 0x5010U
+#define ROGUE_CR_USC_UVS2_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS3_CHECKSUM */
+#define ROGUE_CR_USC_UVS3_CHECKSUM 0x5018U
+#define ROGUE_CR_USC_UVS3_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PPP_SIGNATURE */
+#define ROGUE_CR_PPP_SIGNATURE 0x5020U
+#define ROGUE_CR_PPP_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_PPP_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TE_SIGNATURE */
+#define ROGUE_CR_TE_SIGNATURE 0x5028U
+#define ROGUE_CR_TE_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TE_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_TE_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TE_CHECKSUM */
+#define ROGUE_CR_TE_CHECKSUM 0x5110U
+#define ROGUE_CR_TE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVB_CHECKSUM */
+#define ROGUE_CR_USC_UVB_CHECKSUM 0x5118U
+#define ROGUE_CR_USC_UVB_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVB_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VCE_CHECKSUM */
+#define ROGUE_CR_VCE_CHECKSUM 0x5030U
+#define ROGUE_CR_VCE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VCE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_VCE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_ISP_PDS_CHECKSUM */
+#define ROGUE_CR_ISP_PDS_CHECKSUM 0x5038U
+#define ROGUE_CR_ISP_PDS_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_ISP_TPF_CHECKSUM */
+#define ROGUE_CR_ISP_TPF_CHECKSUM 0x5040U
+#define ROGUE_CR_ISP_TPF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TFPU_PLANE0_CHECKSUM */
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM 0x5048U
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TFPU_PLANE1_CHECKSUM */
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM 0x5050U
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PBE_CHECKSUM */
+#define ROGUE_CR_PBE_CHECKSUM 0x5058U
+#define ROGUE_CR_PBE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PBE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_PBE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PDS_DOUTM_STM_SIGNATURE */
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE 0x5060U
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_IFPU_ISP_CHECKSUM */
+#define ROGUE_CR_IFPU_ISP_CHECKSUM 0x5068U
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS4_CHECKSUM */
+#define ROGUE_CR_USC_UVS4_CHECKSUM 0x5100U
+#define ROGUE_CR_USC_UVS4_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS5_CHECKSUM */
+#define ROGUE_CR_USC_UVS5_CHECKSUM 0x5108U
+#define ROGUE_CR_USC_UVS5_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PPP_CLIP_CHECKSUM */
+#define ROGUE_CR_PPP_CLIP_CHECKSUM 0x5120U
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_PHASE */
+#define ROGUE_CR_PERF_TA_PHASE 0x6008U
+#define ROGUE_CR_PERF_TA_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_PHASE */
+#define ROGUE_CR_PERF_3D_PHASE 0x6010U
+#define ROGUE_CR_PERF_3D_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_3D_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_COMPUTE_PHASE */
+#define ROGUE_CR_PERF_COMPUTE_PHASE 0x6018U
+#define ROGUE_CR_PERF_COMPUTE_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_CYCLE */
+#define ROGUE_CR_PERF_TA_CYCLE 0x6020U
+#define ROGUE_CR_PERF_TA_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_CYCLE */
+#define ROGUE_CR_PERF_3D_CYCLE 0x6028U
+#define ROGUE_CR_PERF_3D_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_3D_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_COMPUTE_CYCLE */
+#define ROGUE_CR_PERF_COMPUTE_CYCLE 0x6030U
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_OR_3D_CYCLE */
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE 0x6038U
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_INITIAL_TA_CYCLE */
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE 0x6040U
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC0_READ_STALL */
+#define ROGUE_CR_PERF_SLC0_READ_STALL 0x60B8U
+#define ROGUE_CR_PERF_SLC0_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC0_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL 0x60C0U
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC1_READ_STALL */
+#define ROGUE_CR_PERF_SLC1_READ_STALL 0x60E0U
+#define ROGUE_CR_PERF_SLC1_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC1_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL 0x60E8U
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC2_READ_STALL */
+#define ROGUE_CR_PERF_SLC2_READ_STALL 0x6158U
+#define ROGUE_CR_PERF_SLC2_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC2_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL 0x6160U
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC3_READ_STALL */
+#define ROGUE_CR_PERF_SLC3_READ_STALL 0x6180U
+#define ROGUE_CR_PERF_SLC3_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC3_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL 0x6188U
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_SPINUP */
+#define ROGUE_CR_PERF_3D_SPINUP 0x6220U
+#define ROGUE_CR_PERF_3D_SPINUP_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_SPINUP_CYCLES_SHIFT 0U
+#define ROGUE_CR_PERF_3D_SPINUP_CYCLES_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_AXI_ACE_LITE_CONFIGURATION */
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION 0x38C0U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL 0x00003FFFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT 45U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN 0x0000200000000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT 37U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK 0xFFFFE01FFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT 36U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK \
+ 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN \
+ 0x0000001000000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT 35U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN 0x0000000800000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT 34U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN 0x0000000400000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT 30U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK 0xFFFFFFFC3FFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT 26U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK 0xFFFFFFFFC3FFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT 22U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK 0xFFFFFFFFFC3FFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT 20U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT 18U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT 16U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT 14U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK 0xFFFFFFFFFFFF3FFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT 12U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK 0xFFFFFFFFFFFFCFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT 10U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT 8U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT 4U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFF0FULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT 0U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFFF0ULL
+
+/* Register ROGUE_CR_POWER_ESTIMATE_RESULT */
+#define ROGUE_CR_POWER_ESTIMATE_RESULT 0x6328U
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT 0U
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF */
+#define ROGUE_CR_TA_PERF 0x7600U
+#define ROGUE_CR_TA_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TA_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TA_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TA_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TA_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TA_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TA_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TA_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TA_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TA_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TA_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TA_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TA_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TA_PERF_SELECT0 */
+#define ROGUE_CR_TA_PERF_SELECT0 0x7608U
+#define ROGUE_CR_TA_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT1 */
+#define ROGUE_CR_TA_PERF_SELECT1 0x7610U
+#define ROGUE_CR_TA_PERF_SELECT1_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT2 */
+#define ROGUE_CR_TA_PERF_SELECT2 0x7618U
+#define ROGUE_CR_TA_PERF_SELECT2_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT3 */
+#define ROGUE_CR_TA_PERF_SELECT3 0x7620U
+#define ROGUE_CR_TA_PERF_SELECT3_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECTED_BITS */
+#define ROGUE_CR_TA_PERF_SELECTED_BITS 0x7648U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_0 */
+#define ROGUE_CR_TA_PERF_COUNTER_0 0x7650U
+#define ROGUE_CR_TA_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_1 */
+#define ROGUE_CR_TA_PERF_COUNTER_1 0x7658U
+#define ROGUE_CR_TA_PERF_COUNTER_1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_1_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_1_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_2 */
+#define ROGUE_CR_TA_PERF_COUNTER_2 0x7660U
+#define ROGUE_CR_TA_PERF_COUNTER_2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_2_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_2_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_3 */
+#define ROGUE_CR_TA_PERF_COUNTER_3 0x7668U
+#define ROGUE_CR_TA_PERF_COUNTER_3_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_3_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_3_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_RASTERISATION_PERF */
+#define ROGUE_CR_RASTERISATION_PERF 0x7700U
+#define ROGUE_CR_RASTERISATION_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_RASTERISATION_PERF_SELECT0 */
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0 0x7708U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_RASTERISATION_PERF_COUNTER_0 */
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0 0x7750U
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF 0x7800U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0 */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0 0x7808U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 0x7850U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF */
+#define ROGUE_CR_TPU_MCU_L0_PERF 0x7900U
+#define ROGUE_CR_TPU_MCU_L0_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_SELECT0 */
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0 0x7908U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0 */
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0 0x7950U
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_PERF */
+#define ROGUE_CR_USC_PERF 0x8100U
+#define ROGUE_CR_USC_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_USC_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_USC_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_USC_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_USC_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_USC_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_USC_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_USC_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_USC_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_USC_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_USC_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_USC_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_USC_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_USC_PERF_SELECT0 */
+#define ROGUE_CR_USC_PERF_SELECT0 0x8108U
+#define ROGUE_CR_USC_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_USC_PERF_COUNTER_0 */
+#define ROGUE_CR_USC_PERF_COUNTER_0 0x8150U
+#define ROGUE_CR_USC_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_USC_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_JONES_IDLE */
+#define ROGUE_CR_JONES_IDLE 0x8328U
+#define ROGUE_CR_JONES_IDLE_MASKFULL 0x0000000000007FFFULL
+#define ROGUE_CR_JONES_IDLE_TDM_SHIFT 14U
+#define ROGUE_CR_JONES_IDLE_TDM_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_JONES_IDLE_TDM_EN 0x00004000U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_SHIFT 13U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_EN 0x00002000U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_SHIFT 12U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_JONES_IDLE_FB_CDC_EN 0x00001000U
+#define ROGUE_CR_JONES_IDLE_MMU_SHIFT 11U
+#define ROGUE_CR_JONES_IDLE_MMU_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_JONES_IDLE_MMU_EN 0x00000800U
+#define ROGUE_CR_JONES_IDLE_TLA_SHIFT 10U
+#define ROGUE_CR_JONES_IDLE_TLA_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_JONES_IDLE_TLA_EN 0x00000400U
+#define ROGUE_CR_JONES_IDLE_GARTEN_SHIFT 9U
+#define ROGUE_CR_JONES_IDLE_GARTEN_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_JONES_IDLE_GARTEN_EN 0x00000200U
+#define ROGUE_CR_JONES_IDLE_HOSTIF_SHIFT 8U
+#define ROGUE_CR_JONES_IDLE_HOSTIF_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_JONES_IDLE_HOSTIF_EN 0x00000100U
+#define ROGUE_CR_JONES_IDLE_SOCIF_SHIFT 7U
+#define ROGUE_CR_JONES_IDLE_SOCIF_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_JONES_IDLE_SOCIF_EN 0x00000080U
+#define ROGUE_CR_JONES_IDLE_TILING_SHIFT 6U
+#define ROGUE_CR_JONES_IDLE_TILING_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_JONES_IDLE_TILING_EN 0x00000040U
+#define ROGUE_CR_JONES_IDLE_IPP_SHIFT 5U
+#define ROGUE_CR_JONES_IDLE_IPP_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_JONES_IDLE_IPP_EN 0x00000020U
+#define ROGUE_CR_JONES_IDLE_USCS_SHIFT 4U
+#define ROGUE_CR_JONES_IDLE_USCS_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_JONES_IDLE_USCS_EN 0x00000010U
+#define ROGUE_CR_JONES_IDLE_PM_SHIFT 3U
+#define ROGUE_CR_JONES_IDLE_PM_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_JONES_IDLE_PM_EN 0x00000008U
+#define ROGUE_CR_JONES_IDLE_CDM_SHIFT 2U
+#define ROGUE_CR_JONES_IDLE_CDM_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_JONES_IDLE_CDM_EN 0x00000004U
+#define ROGUE_CR_JONES_IDLE_VDM_SHIFT 1U
+#define ROGUE_CR_JONES_IDLE_VDM_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_JONES_IDLE_VDM_EN 0x00000002U
+#define ROGUE_CR_JONES_IDLE_BIF_SHIFT 0U
+#define ROGUE_CR_JONES_IDLE_BIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_JONES_IDLE_BIF_EN 0x00000001U
+
+/* Register ROGUE_CR_TORNADO_PERF */
+#define ROGUE_CR_TORNADO_PERF 0x8228U
+#define ROGUE_CR_TORNADO_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TORNADO_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TORNADO_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TORNADO_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TORNADO_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TORNADO_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TORNADO_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TORNADO_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TORNADO_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TORNADO_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TORNADO_PERF_SELECT0 */
+#define ROGUE_CR_TORNADO_PERF_SELECT0 0x8230U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TORNADO_PERF_COUNTER_0 */
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0 0x8268U
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TEXAS_PERF */
+#define ROGUE_CR_TEXAS_PERF 0x8290U
+#define ROGUE_CR_TEXAS_PERF_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_TEXAS_PERF_CLR_5_SHIFT 6U
+#define ROGUE_CR_TEXAS_PERF_CLR_5_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_TEXAS_PERF_CLR_5_EN 0x00000040U
+#define ROGUE_CR_TEXAS_PERF_CLR_4_SHIFT 5U
+#define ROGUE_CR_TEXAS_PERF_CLR_4_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_TEXAS_PERF_CLR_4_EN 0x00000020U
+#define ROGUE_CR_TEXAS_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TEXAS_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TEXAS_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TEXAS_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TEXAS_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TEXAS_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TEXAS_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TEXAS_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TEXAS_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TEXAS_PERF_SELECT0 */
+#define ROGUE_CR_TEXAS_PERF_SELECT0 0x8298U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MASKFULL 0x3FFF3FFF803FFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_SHIFT 31U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_EN 0x0000000080000000ULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFC0FFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TEXAS_PERF_COUNTER_0 */
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0 0x82D8U
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_JONES_PERF */
+#define ROGUE_CR_JONES_PERF 0x8330U
+#define ROGUE_CR_JONES_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_JONES_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_JONES_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_JONES_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_JONES_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_JONES_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_JONES_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_JONES_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_JONES_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_JONES_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_JONES_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_JONES_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_JONES_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_JONES_PERF_SELECT0 */
+#define ROGUE_CR_JONES_PERF_SELECT0 0x8338U
+#define ROGUE_CR_JONES_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_JONES_PERF_COUNTER_0 */
+#define ROGUE_CR_JONES_PERF_COUNTER_0 0x8368U
+#define ROGUE_CR_JONES_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_BLACKPEARL_PERF */
+#define ROGUE_CR_BLACKPEARL_PERF 0x8400U
+#define ROGUE_CR_BLACKPEARL_PERF_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_SHIFT 6U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_EN 0x00000040U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_SHIFT 5U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_EN 0x00000020U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_SELECT0 */
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0 0x8408U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MASKFULL 0x3FFF3FFF803FFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT 31U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_EN 0x0000000080000000ULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFC0FFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_COUNTER_0 */
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0 0x8448U
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PBE_PERF */
+#define ROGUE_CR_PBE_PERF 0x8478U
+#define ROGUE_CR_PBE_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_PBE_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_PBE_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_PBE_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_PBE_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_PBE_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_PBE_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_PBE_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_PBE_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_PBE_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_PBE_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_PBE_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_PBE_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_PBE_PERF_SELECT0 */
+#define ROGUE_CR_PBE_PERF_SELECT0 0x8480U
+#define ROGUE_CR_PBE_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_PBE_PERF_COUNTER_0 */
+#define ROGUE_CR_PBE_PERF_COUNTER_0 0x84B0U
+#define ROGUE_CR_PBE_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OCP_REVINFO */
+#define ROGUE_CR_OCP_REVINFO 0x9000U
+#define ROGUE_CR_OCP_REVINFO_MASKFULL 0x00000007FFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT 33U
+#define ROGUE_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK 0xFFFFFFF9FFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT 32U
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_EN 0x0000000100000000ULL
+#define ROGUE_CR_OCP_REVINFO_REVISION_SHIFT 0U
+#define ROGUE_CR_OCP_REVINFO_REVISION_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_OCP_SYSCONFIG */
+#define ROGUE_CR_OCP_SYSCONFIG 0x9010U
+#define ROGUE_CR_OCP_SYSCONFIG_MASKFULL 0x0000000000000FFFULL
+#define ROGUE_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT 10U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK 0xFFFFF3FFU
+#define ROGUE_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT 8U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK 0xFFFFFCFFU
+#define ROGUE_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT 6U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT 4U
+#define ROGUE_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK 0xFFFFFFCFU
+#define ROGUE_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT 2U
+#define ROGUE_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT 0U
+#define ROGUE_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_0 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0 0x9020U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_1 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1 0x9028U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_2 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2 0x9030U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_0 */
+#define ROGUE_CR_OCP_IRQSTATUS_0 0x9038U
+#define ROGUE_CR_OCP_IRQSTATUS_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_1 */
+#define ROGUE_CR_OCP_IRQSTATUS_1 0x9040U
+#define ROGUE_CR_OCP_IRQSTATUS_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_2 */
+#define ROGUE_CR_OCP_IRQSTATUS_2 0x9048U
+#define ROGUE_CR_OCP_IRQSTATUS_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_0 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_0 0x9050U
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_1 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_1 0x9058U
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_2 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_2 0x9060U
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_0 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0 0x9068U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_1 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1 0x9070U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_2 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2 0x9078U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQ_EVENT */
+#define ROGUE_CR_OCP_IRQ_EVENT 0x9080U
+#define ROGUE_CR_OCP_IRQ_EVENT_MASKFULL 0x00000000000FFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT 19U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN 0x0000000000080000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT 18U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN 0x0000000000040000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT 17U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN 0x0000000000020000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT 16U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN 0x0000000000010000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT 15U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000008000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT 14U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN 0x0000000000004000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT 13U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN 0x0000000000002000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT 12U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN 0x0000000000001000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT 11U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000800ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT 10U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN 0x0000000000000400ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT 9U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN 0x0000000000000200ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT 8U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN 0x0000000000000100ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT 7U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000080ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT 6U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN 0x0000000000000040ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT 5U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN 0x0000000000000020ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT 4U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN 0x0000000000000010ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT 3U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000008ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT 2U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN 0x0000000000000004ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT 1U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN 0x0000000000000002ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT 0U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_OCP_DEBUG_CONFIG */
+#define ROGUE_CR_OCP_DEBUG_CONFIG 0x9088U
+#define ROGUE_CR_OCP_DEBUG_CONFIG_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_SHIFT 0U
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_DEBUG_STATUS */
+#define ROGUE_CR_OCP_DEBUG_STATUS 0x9090U
+#define ROGUE_CR_OCP_DEBUG_STATUS_MASKFULL 0x001F1F77FFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT 51U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK 0xFFE7FFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT 50U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN 0x0004000000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT 48U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK 0xFFFCFFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT 43U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK 0xFFFFE7FFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT 42U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN 0x0000040000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT 40U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK 0xFFFFFCFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT 38U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN 0x0000004000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT 37U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN 0x0000002000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT 36U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN 0x0000001000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT 34U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN 0x0000000400000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT 33U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN 0x0000000200000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT 32U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN 0x0000000100000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT 31U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN 0x0000000080000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT 30U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN 0x0000000040000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT 29U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN 0x0000000020000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT 27U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK 0xFFFFFFFFE7FFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT 26U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN 0x0000000004000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT 24U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT 23U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN 0x0000000000800000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT 22U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN 0x0000000000400000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT 21U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN 0x0000000000200000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT 19U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK 0xFFFFFFFFFFE7FFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT 18U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN 0x0000000000040000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT 16U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT 15U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN 0x0000000000008000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT 14U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN 0x0000000000004000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT 13U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN 0x0000000000002000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT 11U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK 0xFFFFFFFFFFFFE7FFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT 10U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN 0x0000000000000400ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT 8U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT 7U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN 0x0000000000000080ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT 6U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN 0x0000000000000040ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT 5U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN 0x0000000000000020ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT 3U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK 0xFFFFFFFFFFFFFFE7ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT 2U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN 0x0000000000000004ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT 0U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT 6U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN 0x00000040U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT 5U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_EN 0x00000020U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_SHIFT 4U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_EN 0x00000010U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT 3U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN 0x00000008U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT 2U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_EN 0x00000004U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT 1U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN 0x00000002U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT 0U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_EN 0x00000001U
+
+#define ROGUE_CR_BIF_TRUST_DM_MASK 0x0000007FU
+
+/* Register ROGUE_CR_BIF_TRUST */
+#define ROGUE_CR_BIF_TRUST 0xA000U
+#define ROGUE_CR_BIF_TRUST_MASKFULL 0x00000000001FFFFFULL
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT 20U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN 0x00100000U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT 19U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN 0x00080000U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT 18U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN 0x00040000U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT 17U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN 0x00020000U
+#define ROGUE_CR_BIF_TRUST_ENABLE_SHIFT 16U
+#define ROGUE_CR_BIF_TRUST_ENABLE_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_BIF_TRUST_ENABLE_EN 0x00010000U
+#define ROGUE_CR_BIF_TRUST_DM_TRUSTED_SHIFT 9U
+#define ROGUE_CR_BIF_TRUST_DM_TRUSTED_CLRMSK 0xFFFF01FFU
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT 8U
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN 0x00000100U
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT 7U
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN 0x00000080U
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT 6U
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN 0x00000040U
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT 5U
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN 0x00000020U
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT 4U
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN 0x00000010U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT 3U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN 0x00000008U
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT 2U
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN 0x00000004U
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT 1U
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN 0x00000002U
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT 0U
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN 0x00000001U
+
+/* Register ROGUE_CR_SYS_BUS_SECURE */
+#define ROGUE_CR_SYS_BUS_SECURE 0xA100U
+#define ROGUE_CR_SYS_BUS_SECURE__SECR__MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SYS_BUS_SECURE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_SHIFT 0U
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FBA_FC0_CHECKSUM */
+#define ROGUE_CR_FBA_FC0_CHECKSUM 0xD170U
+#define ROGUE_CR_FBA_FC0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC1_CHECKSUM */
+#define ROGUE_CR_FBA_FC1_CHECKSUM 0xD178U
+#define ROGUE_CR_FBA_FC1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC2_CHECKSUM */
+#define ROGUE_CR_FBA_FC2_CHECKSUM 0xD180U
+#define ROGUE_CR_FBA_FC2_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC3_CHECKSUM */
+#define ROGUE_CR_FBA_FC3_CHECKSUM 0xD188U
+#define ROGUE_CR_FBA_FC3_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_CLK_CTRL2 */
+#define ROGUE_CR_CLK_CTRL2 0xD200U
+#define ROGUE_CR_CLK_CTRL2_MASKFULL 0x0000000000000F33ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_SHIFT 10U
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_ON 0x0000000000000400ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_AUTO 0x0000000000000800ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_SHIFT 8U
+#define ROGUE_CR_CLK_CTRL2_VRDM_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_ON 0x0000000000000100ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_AUTO 0x0000000000000200ULL
+#define ROGUE_CR_CLK_CTRL2_SH_SHIFT 4U
+#define ROGUE_CR_CLK_CTRL2_SH_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_CLK_CTRL2_SH_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_SH_ON 0x0000000000000010ULL
+#define ROGUE_CR_CLK_CTRL2_SH_AUTO 0x0000000000000020ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_SHIFT 0U
+#define ROGUE_CR_CLK_CTRL2_FBA_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+#define ROGUE_CR_CLK_CTRL2_FBA_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_ON 0x0000000000000001ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_AUTO 0x0000000000000002ULL
+
+/* Register ROGUE_CR_CLK_STATUS2 */
+#define ROGUE_CR_CLK_STATUS2 0xD208U
+#define ROGUE_CR_CLK_STATUS2_MASKFULL 0x0000000000000015ULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_SHIFT 4U
+#define ROGUE_CR_CLK_STATUS2_VRDM_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_STATUS2_SH_SHIFT 2U
+#define ROGUE_CR_CLK_STATUS2_SH_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_STATUS2_SH_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_SH_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_STATUS2_FBA_SHIFT 0U
+#define ROGUE_CR_CLK_STATUS2_FBA_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_STATUS2_FBA_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_FBA_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_RPM_SHF_FPL */
+#define ROGUE_CR_RPM_SHF_FPL 0xD520U
+#define ROGUE_CR_RPM_SHF_FPL_MASKFULL 0x3FFFFFFFFFFFFFFCULL
+#define ROGUE_CR_RPM_SHF_FPL_SIZE_SHIFT 40U
+#define ROGUE_CR_RPM_SHF_FPL_SIZE_CLRMSK 0xC00000FFFFFFFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_BASE_SHIFT 2U
+#define ROGUE_CR_RPM_SHF_FPL_BASE_CLRMSK 0xFFFFFF0000000003ULL
+#define ROGUE_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT 2U
+#define ROGUE_CR_RPM_SHF_FPL_BASE_ALIGNSIZE 4U
+
+/* Register ROGUE_CR_RPM_SHF_FPL_READ */
+#define ROGUE_CR_RPM_SHF_FPL_READ 0xD528U
+#define ROGUE_CR_RPM_SHF_FPL_READ_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHF_FPL_WRITE */
+#define ROGUE_CR_RPM_SHF_FPL_WRITE 0xD530U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHG_FPL */
+#define ROGUE_CR_RPM_SHG_FPL 0xD538U
+#define ROGUE_CR_RPM_SHG_FPL_MASKFULL 0x3FFFFFFFFFFFFFFCULL
+#define ROGUE_CR_RPM_SHG_FPL_SIZE_SHIFT 40U
+#define ROGUE_CR_RPM_SHG_FPL_SIZE_CLRMSK 0xC00000FFFFFFFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_BASE_SHIFT 2U
+#define ROGUE_CR_RPM_SHG_FPL_BASE_CLRMSK 0xFFFFFF0000000003ULL
+#define ROGUE_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT 2U
+#define ROGUE_CR_RPM_SHG_FPL_BASE_ALIGNSIZE 4U
+
+/* Register ROGUE_CR_RPM_SHG_FPL_READ */
+#define ROGUE_CR_RPM_SHG_FPL_READ 0xD540U
+#define ROGUE_CR_RPM_SHG_FPL_READ_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHG_FPL_WRITE */
+#define ROGUE_CR_RPM_SHG_FPL_WRITE 0xD548U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_SH_PERF */
+#define ROGUE_CR_SH_PERF 0xD5F8U
+#define ROGUE_CR_SH_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_SH_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_SH_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SH_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_SH_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_SH_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SH_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_SH_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_SH_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SH_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_SH_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_SH_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SH_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_SH_PERF_SELECT0 */
+#define ROGUE_CR_SH_PERF_SELECT0 0xD600U
+#define ROGUE_CR_SH_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_SH_PERF_COUNTER_0 */
+#define ROGUE_CR_SH_PERF_COUNTER_0 0xD628U
+#define ROGUE_CR_SH_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SH_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_SH_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_SHG_CHECKSUM */
+#define ROGUE_CR_SHF_SHG_CHECKSUM 0xD1C0U
+#define ROGUE_CR_SHF_SHG_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM */
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM 0xD1C8U
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_VARY_BIF_CHECKSUM */
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM 0xD1D0U
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_RPM_BIF_CHECKSUM */
+#define ROGUE_CR_RPM_BIF_CHECKSUM 0xD1D8U
+#define ROGUE_CR_RPM_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHG_BIF_CHECKSUM */
+#define ROGUE_CR_SHG_BIF_CHECKSUM 0xD1E0U
+#define ROGUE_CR_SHG_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHG_FE_BE_CHECKSUM */
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM 0xD1E8U
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BF_PERF */
+#define DPX_CR_BF_PERF 0xC458U
+#define DPX_CR_BF_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BF_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BF_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BF_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BF_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BF_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BF_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BF_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BF_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BF_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BF_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BF_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BF_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BF_PERF_SELECT0 */
+#define DPX_CR_BF_PERF_SELECT0 0xC460U
+#define DPX_CR_BF_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BF_PERF_COUNTER_0 */
+#define DPX_CR_BF_PERF_COUNTER_0 0xC488U
+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BT_PERF */
+#define DPX_CR_BT_PERF 0xC3D0U
+#define DPX_CR_BT_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BT_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BT_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BT_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BT_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BT_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BT_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BT_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BT_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BT_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BT_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BT_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BT_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BT_PERF_SELECT0 */
+#define DPX_CR_BT_PERF_SELECT0 0xC3D8U
+#define DPX_CR_BT_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BT_PERF_COUNTER_0 */
+#define DPX_CR_BT_PERF_COUNTER_0 0xC420U
+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_RQ_USC_DEBUG */
+#define DPX_CR_RQ_USC_DEBUG 0xC110U
+#define DPX_CR_RQ_USC_DEBUG_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT 0U
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS */
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS 0xC5C8U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT 0U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS */
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS 0xC5D0U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL 0x03FFFFFFFFFFFFF0ULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT 57U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN 0x0200000000000000ULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT 44U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK 0xFE000FFFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT 40U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register DPX_CR_BIF_MMU_STATUS */
+#define DPX_CR_BIF_MMU_STATUS 0xC5D8U
+#define DPX_CR_BIF_MMU_STATUS_MASKFULL 0x000000000FFFFFF7ULL
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT 20U
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT 12U
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT 4U
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT 2U
+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN 0x00000004U
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT 1U
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN 0x00000002U
+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT 0U
+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register DPX_CR_RT_PERF */
+#define DPX_CR_RT_PERF 0xC700U
+#define DPX_CR_RT_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_RT_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_RT_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_RT_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_RT_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_RT_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_RT_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_RT_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_RT_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_RT_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_RT_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_RT_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_RT_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_RT_PERF_SELECT0 */
+#define DPX_CR_RT_PERF_SELECT0 0xC708U
+#define DPX_CR_RT_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_RT_PERF_COUNTER_0 */
+#define DPX_CR_RT_PERF_COUNTER_0 0xC730U
+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BX_TU_PERF */
+#define DPX_CR_BX_TU_PERF 0xC908U
+#define DPX_CR_BX_TU_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BX_TU_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BX_TU_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BX_TU_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BX_TU_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BX_TU_PERF_SELECT0 */
+#define DPX_CR_BX_TU_PERF_SELECT0 0xC910U
+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BX_TU_PERF_COUNTER_0 */
+#define DPX_CR_BX_TU_PERF_COUNTER_0 0xC938U
+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_RS_PDS_RR_CHECKSUM */
+#define DPX_CR_RS_PDS_RR_CHECKSUM 0xC0F0U
+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT 0U
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT */
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT 0xE140U
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT 0U
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MMU_CBASE_MAPPING */
+#define ROGUE_CR_MMU_CBASE_MAPPING 0xE148U
+#define ROGUE_CR_MMU_CBASE_MAPPING_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT 0U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK 0xF0000000U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_MMU_FAULT_STATUS */
+#define ROGUE_CR_MMU_FAULT_STATUS 0xE150U
+#define ROGUE_CR_MMU_FAULT_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT 28U
+#define ROGUE_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT 20U
+#define ROGUE_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK 0xFFFFFFFFF00FFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT 12U
+#define ROGUE_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK 0xFFFFFFFFFFF00FFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT 6U
+#define ROGUE_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK 0xFFFFFFFFFFFFF03FULL
+#define ROGUE_CR_MMU_FAULT_STATUS_LEVEL_SHIFT 4U
+#define ROGUE_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_SHIFT 3U
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_EN 0x0000000000000008ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_TYPE_SHIFT 1U
+#define ROGUE_CR_MMU_FAULT_STATUS_TYPE_CLRMSK 0xFFFFFFFFFFFFFFF9ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MMU_FAULT_STATUS_META */
+#define ROGUE_CR_MMU_FAULT_STATUS_META 0xE158U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT 28U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT 20U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK 0xFFFFFFFFF00FFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT 12U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK 0xFFFFFFFFFFF00FFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT 6U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK 0xFFFFFFFFFFFFF03FULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT 4U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_SHIFT 3U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_EN 0x0000000000000008ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT 1U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK 0xFFFFFFFFFFFFFFF9ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT 0U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC3_CTRL_MISC */
+#define ROGUE_CR_SLC3_CTRL_MISC 0xE200U
+#define ROGUE_CR_SLC3_CTRL_MISC_MASKFULL 0x0000000000000107ULL
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT 8U
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN 0x00000100U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT 0U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK 0xFFFFFFF8U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR 0x00000000U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH 0x00000001U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH 0x00000002U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH 0x00000003U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH 0x00000004U
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE */
+#define ROGUE_CR_SLC3_SCRAMBLE 0xE208U
+#define ROGUE_CR_SLC3_SCRAMBLE_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE2 */
+#define ROGUE_CR_SLC3_SCRAMBLE2 0xE210U
+#define ROGUE_CR_SLC3_SCRAMBLE2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE2_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE2_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE3 */
+#define ROGUE_CR_SLC3_SCRAMBLE3 0xE218U
+#define ROGUE_CR_SLC3_SCRAMBLE3_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE3_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE3_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE4 */
+#define ROGUE_CR_SLC3_SCRAMBLE4 0xE260U
+#define ROGUE_CR_SLC3_SCRAMBLE4_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE4_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE4_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_STATUS */
+#define ROGUE_CR_SLC3_STATUS 0xE220U
+#define ROGUE_CR_SLC3_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_WRITES1_SHIFT 48U
+#define ROGUE_CR_SLC3_STATUS_WRITES1_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_WRITES0_SHIFT 32U
+#define ROGUE_CR_SLC3_STATUS_WRITES0_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_READS1_SHIFT 16U
+#define ROGUE_CR_SLC3_STATUS_READS1_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_SLC3_STATUS_READS0_SHIFT 0U
+#define ROGUE_CR_SLC3_STATUS_READS0_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_SLC3_IDLE */
+#define ROGUE_CR_SLC3_IDLE 0xE228U
+#define ROGUE_CR_SLC3_IDLE_MASKFULL 0x00000000000FFFFFULL
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT 18U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK 0xFFF3FFFFU
+#define ROGUE_CR_SLC3_IDLE_MMU_SHIFT 17U
+#define ROGUE_CR_SLC3_IDLE_MMU_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_SLC3_IDLE_MMU_EN 0x00020000U
+#define ROGUE_CR_SLC3_IDLE_RDI_SHIFT 16U
+#define ROGUE_CR_SLC3_IDLE_RDI_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_SLC3_IDLE_RDI_EN 0x00010000U
+#define ROGUE_CR_SLC3_IDLE_IMGBV4_SHIFT 12U
+#define ROGUE_CR_SLC3_IDLE_IMGBV4_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_SLC3_IDLE_CACHE_BANKS_SHIFT 4U
+#define ROGUE_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT 2U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT 1U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_EN 0x00000002U
+#define ROGUE_CR_SLC3_IDLE_XBAR_SHIFT 0U
+#define ROGUE_CR_SLC3_IDLE_XBAR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC3_IDLE_XBAR_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC3_FAULT_STOP_STATUS */
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS 0xE248U
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_MASKFULL 0x0000000000001FFFULL
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT 0U
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK 0xFFFFE000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_MODE */
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE 0xF048U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK 0xFFFFFFFCU
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX 0x00000000U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE 0x00000001U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST 0x00000002U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING0 */
+#define ROGUE_CR_CONTEXT_MAPPING0 0xF078U
+#define ROGUE_CR_CONTEXT_MAPPING0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING0_2D_SHIFT 24U
+#define ROGUE_CR_CONTEXT_MAPPING0_2D_CLRMSK 0x00FFFFFFU
+#define ROGUE_CR_CONTEXT_MAPPING0_CDM_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING0_CDM_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING0_3D_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING0_3D_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING0_TA_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING0_TA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING1 */
+#define ROGUE_CR_CONTEXT_MAPPING1 0xF080U
+#define ROGUE_CR_CONTEXT_MAPPING1_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING1_HOST_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING1_HOST_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING1_TLA_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING1_TLA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING2 */
+#define ROGUE_CR_CONTEXT_MAPPING2 0xF088U
+#define ROGUE_CR_CONTEXT_MAPPING2_MASKFULL 0x0000000000FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING2_ALIST0_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING2_TE0_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING2_TE0_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING2_VCE0_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING2_VCE0_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING3 */
+#define ROGUE_CR_CONTEXT_MAPPING3 0xF090U
+#define ROGUE_CR_CONTEXT_MAPPING3_MASKFULL 0x0000000000FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING3_ALIST1_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING3_TE1_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING3_TE1_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING3_VCE1_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING3_VCE1_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_JONES_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ 0xF098U
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ 0xF0A0U
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_DUST_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ 0xF0A8U
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING4 */
+#define ROGUE_CR_CONTEXT_MAPPING4 0xF210U
+#define ROGUE_CR_CONTEXT_MAPPING4_MASKFULL 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT 40U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK 0xFFFF00FFFFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT 32U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK 0xFFFFFF00FFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT 24U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK 0xFFFFFFFF00FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_MULTICORE_GPU */
+#define ROGUE_CR_MULTICORE_GPU 0xF300U
+#define ROGUE_CR_MULTICORE_GPU_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT 6U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN 0x00000040U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT 5U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN 0x00000020U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT 4U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN 0x00000010U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT 3U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN 0x00000008U
+#define ROGUE_CR_MULTICORE_GPU_ID_SHIFT 0U
+#define ROGUE_CR_MULTICORE_GPU_ID_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_MULTICORE_SYSTEM */
+#define ROGUE_CR_MULTICORE_SYSTEM 0xF308U
+#define ROGUE_CR_MULTICORE_SYSTEM_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT 0U
+#define ROGUE_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON 0xF310U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON 0xF320U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON 0xF330U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_ECC_RAM_ERR_INJ */
+#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_ECC_RAM_INIT_KICK */
+#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_ECC_RAM_INIT_DONE */
+#define ROGUE_CR_ECC_RAM_INIT_DONE 0xF350U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_ENABLE */
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE 0xF390U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_STATUS */
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE 0xF398U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_CLEAR */
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE 0xF3A0U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* clang-format on */
+
+#endif /* PVR_ROGUE_CR_DEFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h
new file mode 100644
index 000000000..46186b56e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_CR_DEFS_CLIENT_H
+#define PVR_ROGUE_CR_DEFS_CLIENT_H
+
+/* clang-format off */
+
+/*
+ * This register controls the anti-aliasing mode of the Tiling Co-Processor, independent control is
+ * provided in both X & Y axis.
+ * This register needs to be set based on the ISP Samples Per Pixel a core supports.
+ *
+ * When ISP Samples Per Pixel = 1:
+ * 2xmsaa is achieved by enabling Y - TE does AA on Y plane only
+ * 4xmsaa is achieved by enabling Y and X - TE does AA on X and Y plane
+ * 8xmsaa not supported by XE cores
+ *
+ * When ISP Samples Per Pixel = 2:
+ * 2xmsaa is achieved by enabling X2 - does not affect TE
+ * 4xmsaa is achieved by enabling Y and X2 - TE does AA on Y plane only
+ * 8xmsaa is achieved by enabling Y, X and X2 - TE does AA on X and Y plane
+ * 8xmsaa not supported by XE cores
+ *
+ * When ISP Samples Per Pixel = 4:
+ * 2xmsaa is achieved by enabling X2 - does not affect TE
+ * 4xmsaa is achieved by enabling Y2 and X2 - TE does AA on Y plane only
+ * 8xmsaa not supported by XE cores
+ */
+/* Register ROGUE_CR_TE_AA */
+#define ROGUE_CR_TE_AA 0x0C00U
+#define ROGUE_CR_TE_AA_MASKFULL 0x000000000000000Full
+/* Y2
+ * Indicates 4xmsaa when X2 and Y2 are set to 1. This does not affect TE and is only used within
+ * TPW.
+ */
+#define ROGUE_CR_TE_AA_Y2_SHIFT 3
+#define ROGUE_CR_TE_AA_Y2_CLRMSK 0xFFFFFFF7
+#define ROGUE_CR_TE_AA_Y2_EN 0x00000008
+/* Y
+ * Anti-Aliasing in Y Plane Enabled
+ */
+#define ROGUE_CR_TE_AA_Y_SHIFT 2
+#define ROGUE_CR_TE_AA_Y_CLRMSK 0xFFFFFFFB
+#define ROGUE_CR_TE_AA_Y_EN 0x00000004
+/* X
+ * Anti-Aliasing in X Plane Enabled
+ */
+#define ROGUE_CR_TE_AA_X_SHIFT 1
+#define ROGUE_CR_TE_AA_X_CLRMSK 0xFFFFFFFD
+#define ROGUE_CR_TE_AA_X_EN 0x00000002
+/* X2
+ * 2x Anti-Aliasing Enabled, affects PPP only
+ */
+#define ROGUE_CR_TE_AA_X2_SHIFT (0U)
+#define ROGUE_CR_TE_AA_X2_CLRMSK (0xFFFFFFFEU)
+#define ROGUE_CR_TE_AA_X2_EN (0x00000001U)
+
+/* MacroTile Boundaries X Plane */
+/* Register ROGUE_CR_TE_MTILE1 */
+#define ROGUE_CR_TE_MTILE1 0x0C08
+#define ROGUE_CR_TE_MTILE1_MASKFULL 0x0000000007FFFFFFull
+/* X1 default: 0x00000004
+ * X1 MacroTile boundary, left tile X for second column of macrotiles (16MT mode) - 32 pixels across
+ * tile
+ */
+#define ROGUE_CR_TE_MTILE1_X1_SHIFT 18
+#define ROGUE_CR_TE_MTILE1_X1_CLRMSK 0xF803FFFF
+/* X2 default: 0x00000008
+ * X2 MacroTile boundary, left tile X for third(16MT) column of macrotiles - 32 pixels across tile
+ */
+#define ROGUE_CR_TE_MTILE1_X2_SHIFT 9U
+#define ROGUE_CR_TE_MTILE1_X2_CLRMSK 0xFFFC01FF
+/* X3 default: 0x0000000c
+ * X3 MacroTile boundary, left tile X for fourth column of macrotiles (16MT) - 32 pixels across tile
+ */
+#define ROGUE_CR_TE_MTILE1_X3_SHIFT 0
+#define ROGUE_CR_TE_MTILE1_X3_CLRMSK 0xFFFFFE00
+
+/* MacroTile Boundaries Y Plane. */
+/* Register ROGUE_CR_TE_MTILE2 */
+#define ROGUE_CR_TE_MTILE2 0x0C10
+#define ROGUE_CR_TE_MTILE2_MASKFULL 0x0000000007FFFFFFull
+/* Y1 default: 0x00000004
+ * X1 MacroTile boundary, ltop tile Y for second column of macrotiles (16MT mode) - 32 pixels tile
+ * height
+ */
+#define ROGUE_CR_TE_MTILE2_Y1_SHIFT 18
+#define ROGUE_CR_TE_MTILE2_Y1_CLRMSK 0xF803FFFF
+/* Y2 default: 0x00000008
+ * X2 MacroTile boundary, top tile Y for third(16MT) column of macrotiles - 32 pixels tile height
+ */
+#define ROGUE_CR_TE_MTILE2_Y2_SHIFT 9
+#define ROGUE_CR_TE_MTILE2_Y2_CLRMSK 0xFFFC01FF
+/* Y3 default: 0x0000000c
+ * X3 MacroTile boundary, top tile Y for fourth column of macrotiles (16MT) - 32 pixels tile height
+ */
+#define ROGUE_CR_TE_MTILE2_Y3_SHIFT 0
+#define ROGUE_CR_TE_MTILE2_Y3_CLRMSK 0xFFFFFE00
+
+/*
+ * In order to perform the tiling operation and generate the display list the maximum screen size
+ * must be configured in terms of the number of tiles in X & Y axis.
+ */
+
+/* Register ROGUE_CR_TE_SCREEN */
+#define ROGUE_CR_TE_SCREEN 0x0C18U
+#define ROGUE_CR_TE_SCREEN_MASKFULL 0x00000000001FF1FFull
+/* YMAX default: 0x00000010
+ * Maximum Y tile address visible on screen, 32 pixel tile height, 16Kx16K max screen size
+ */
+#define ROGUE_CR_TE_SCREEN_YMAX_SHIFT 12
+#define ROGUE_CR_TE_SCREEN_YMAX_CLRMSK 0xFFE00FFF
+/* XMAX default: 0x00000010
+ * Maximum X tile address visible on screen, 32 pixel tile width, 16Kx16K max screen size
+ */
+#define ROGUE_CR_TE_SCREEN_XMAX_SHIFT 0
+#define ROGUE_CR_TE_SCREEN_XMAX_CLRMSK 0xFFFFFE00
+
+/*
+ * In order to perform the tiling operation and generate the display list the maximum screen size
+ * must be configured in terms of the number of pixels in X & Y axis since this may not be the same
+ * as the number of tiles defined in the RGX_CR_TE_SCREEN register.
+ */
+/* Register ROGUE_CR_PPP_SCREEN */
+#define ROGUE_CR_PPP_SCREEN 0x0C98
+#define ROGUE_CR_PPP_SCREEN_MASKFULL 0x000000007FFF7FFFull
+/* PIXYMAX
+ * Screen height in pixels. (16K x 16K max screen size)
+ */
+#define ROGUE_CR_PPP_SCREEN_PIXYMAX_SHIFT 16
+#define ROGUE_CR_PPP_SCREEN_PIXYMAX_CLRMSK 0x8000FFFF
+/* PIXXMAX
+ * Screen width in pixels.(16K x 16K max screen size)
+ */
+#define ROGUE_CR_PPP_SCREEN_PIXXMAX_SHIFT 0
+#define ROGUE_CR_PPP_SCREEN_PIXXMAX_CLRMSK 0xFFFF8000
+
+/* Register ROGUE_CR_ISP_MTILE_SIZE */
+#define ROGUE_CR_ISP_MTILE_SIZE 0x0F18
+#define ROGUE_CR_ISP_MTILE_SIZE_MASKFULL 0x0000000003FF03FFull
+/* X
+ * Macrotile width, in tiles. A value of zero corresponds to the maximum size
+ */
+#define ROGUE_CR_ISP_MTILE_SIZE_X_SHIFT 16
+#define ROGUE_CR_ISP_MTILE_SIZE_X_CLRMSK 0xFC00FFFF
+#define ROGUE_CR_ISP_MTILE_SIZE_X_ALIGNSHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_X_ALIGNSIZE 1
+/* Y
+ * Macrotile height, in tiles. A value of zero corresponds to the maximum size
+ */
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_SHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_CLRMSK 0xFFFFFC00
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_ALIGNSHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_ALIGNSIZE 1
+
+/* clang-format on */
+
+#endif /* PVR_ROGUE_CR_DEFS_CLIENT_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_defs.h
new file mode 100644
index 000000000..932b01686
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_defs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_DEFS_H
+#define PVR_ROGUE_DEFS_H
+
+#include "pvr_rogue_cr_defs.h"
+
+#include <linux/bits.h>
+
+/*
+ ******************************************************************************
+ * ROGUE Defines
+ ******************************************************************************
+ */
+
+#define ROGUE_FW_MAX_NUM_OS (8U)
+#define ROGUE_FW_HOST_OS (0U)
+#define ROGUE_FW_GUEST_OSID_START (1U)
+
+#define ROGUE_FW_THREAD_0 (0U)
+#define ROGUE_FW_THREAD_1 (1U)
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((s32)(x)) > 0) ? ((x) / 8) : (0))
+
+#define MAX_HW_GEOM_FRAG_CONTEXTS 2U
+
+#define ROGUE_CR_CLK_CTRL_ALL_ON \
+ (0x5555555555555555ull & ROGUE_CR_CLK_CTRL_MASKFULL)
+#define ROGUE_CR_CLK_CTRL_ALL_AUTO \
+ (0xaaaaaaaaaaaaaaaaull & ROGUE_CR_CLK_CTRL_MASKFULL)
+#define ROGUE_CR_CLK_CTRL2_ALL_ON \
+ (0x5555555555555555ull & ROGUE_CR_CLK_CTRL2_MASKFULL)
+#define ROGUE_CR_CLK_CTRL2_ALL_AUTO \
+ (0xaaaaaaaaaaaaaaaaull & ROGUE_CR_CLK_CTRL2_MASKFULL)
+
+#define ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN \
+ (ROGUE_CR_SOFT_RESET_DUST_A_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_B_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_C_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_D_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_E_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_F_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_G_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN \
+ (ROGUE_CR_SOFT_RESET_RASCAL_CORE_EN | \
+ ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+/* SOFT_RESET steps as defined in the TRM */
+#define ROGUE_S7_SOFT_RESET_DUSTS (ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define ROGUE_S7_SOFT_RESET_JONES \
+ (ROGUE_CR_SOFT_RESET_PM_EN | ROGUE_CR_SOFT_RESET_VDM_EN | \
+ ROGUE_CR_SOFT_RESET_ISP_EN)
+
+#define ROGUE_S7_SOFT_RESET_JONES_ALL \
+ (ROGUE_S7_SOFT_RESET_JONES | ROGUE_CR_SOFT_RESET_BIF_EN | \
+ ROGUE_CR_SOFT_RESET_SLC_EN | ROGUE_CR_SOFT_RESET_GARTEN_EN)
+
+#define ROGUE_S7_SOFT_RESET2 \
+ (ROGUE_CR_SOFT_RESET2_BLACKPEARL_EN | ROGUE_CR_SOFT_RESET2_PIXEL_EN | \
+ ROGUE_CR_SOFT_RESET2_CDM_EN | ROGUE_CR_SOFT_RESET2_VERTEX_EN)
+
+#define ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U)
+#define ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE \
+ BIT(ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define ROGUE_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U)
+#define ROGUE_BIF_PM_VIRTUAL_PAGE_SIZE BIT(ROGUE_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+#define ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (16U)
+
+/*
+ * To get the number of required Dusts, divide the number of
+ * clusters by 2 and round up
+ */
+#define ROGUE_REQ_NUM_DUSTS(CLUSTERS) (((CLUSTERS) + 1U) / 2U)
+
+/*
+ * To get the number of required Bernado/Phantom(s), divide
+ * the number of clusters by 4 and round up
+ */
+#define ROGUE_REQ_NUM_PHANTOMS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+#define ROGUE_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+#define ROGUE_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+
+/*
+ * FW MMU contexts
+ */
+#define MMU_CONTEXT_MAPPING_FWPRIV (0x0) /* FW code/private data */
+#define MMU_CONTEXT_MAPPING_FWIF (0x0) /* Host/FW data */
+
+/*
+ * Utility macros to calculate CAT_BASE register addresses
+ */
+#define BIF_CAT_BASEX(n) \
+ (ROGUE_CR_BIF_CAT_BASE0 + \
+ (n) * (ROGUE_CR_BIF_CAT_BASE1 - ROGUE_CR_BIF_CAT_BASE0))
+
+#define FWCORE_MEM_CAT_BASEX(n) \
+ (ROGUE_CR_FWCORE_MEM_CAT_BASE0 + \
+ (n) * (ROGUE_CR_FWCORE_MEM_CAT_BASE1 - \
+ ROGUE_CR_FWCORE_MEM_CAT_BASE0))
+
+/*
+ * FWCORE wrapper register defines
+ */
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT \
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK \
+ ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK
+#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U)
+
+#define ROGUE_MAX_COMPUTE_SHARED_REGISTERS (2 * 1024)
+#define ROGUE_MAX_VERTEX_SHARED_REGISTERS 1024
+#define ROGUE_MAX_PIXEL_SHARED_REGISTERS 1024
+#define ROGUE_CSRM_LINE_SIZE_IN_DWORDS (64 * 4 * 4)
+
+#define ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE 64
+#define ROGUE_CDMCTRL_USC_COMMON_SIZE_UPPER 256
+
+/*
+ * The maximum amount of local memory which can be allocated by a single kernel
+ * (in dwords/32-bit registers).
+ *
+ * ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE is in bytes so we divide by four.
+ */
+#define ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS ((ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE * \
+ ROGUE_CDMCTRL_USC_COMMON_SIZE_UPPER) >> 2)
+
+/*
+ ******************************************************************************
+ * WA HWBRNs
+ ******************************************************************************
+ */
+
+/* GPU CR timer tick in GPU cycles */
+#define ROGUE_CRTIME_TICK_IN_CYCLES (256U)
+
+/* for nohw multicore return max cores possible to client */
+#define ROGUE_MULTICORE_MAX_NOHW_CORES (4U)
+
+/*
+ * If the size of the SLC is less than this value then the TPU bypasses the SLC.
+ */
+#define ROGUE_TPU_CACHED_SLC_SIZE_THRESHOLD (128U * 1024U)
+
+/*
+ * If the size of the SLC is bigger than this value then the TCU must not be
+ * bypassed in the SLC.
+ * In XE_MEMORY_HIERARCHY cores, the TCU is bypassed by default.
+ */
+#define ROGUE_TCU_CACHED_SLC_SIZE_THRESHOLD (32U * 1024U)
+
+/*
+ * Register used by the FW to track the current boot stage (not used in MIPS)
+ */
+#define ROGUE_FW_BOOT_STAGE_REGISTER (ROGUE_CR_POWER_ESTIMATE_RESULT)
+
+/*
+ * Virtualisation definitions
+ */
+#define ROGUE_VIRTUALISATION_REG_SIZE_PER_OS \
+ (ROGUE_CR_MTS_SCHEDULE1 - ROGUE_CR_MTS_SCHEDULE)
+
+/*
+ * Macro used to indicate which version of HWPerf is active
+ */
+#define ROGUE_FEATURE_HWPERF_ROGUE
+
+/*
+ * Maximum number of cores supported by TRP
+ */
+#define ROGUE_TRP_MAX_NUM_CORES (4U)
+
+#endif /* PVR_ROGUE_DEFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif.h
new file mode 100644
index 000000000..172886be4
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif.h
@@ -0,0 +1,2188 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_H
+#define PVR_ROGUE_FWIF_H
+
+#include <linux/bits.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_defs.h"
+#include "pvr_rogue_fwif_common.h"
+#include "pvr_rogue_fwif_shared.h"
+
+/*
+ ****************************************************************************
+ * Logging type
+ ****************************************************************************
+ */
+#define ROGUE_FWIF_LOG_TYPE_NONE 0x00000000U
+#define ROGUE_FWIF_LOG_TYPE_TRACE 0x00000001U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MAIN 0x00000002U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MTS 0x00000004U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_CSW 0x00000010U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_BIF 0x00000020U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_PM 0x00000040U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_RTD 0x00000080U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_SPM 0x00000100U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_POW 0x00000200U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_HWR 0x00000400U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_HWP 0x00000800U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_RPM 0x00001000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_DMA 0x00002000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MISC 0x00004000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU
+#define ROGUE_FWIF_LOG_TYPE_MASK 0x80007FFFU
+
+/* String used in pvrdebug -h output */
+#define ROGUE_FWIF_LOG_GROUPS_STRING_LIST \
+ "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug"
+
+/* Table entry to map log group strings to log type value */
+struct rogue_fwif_log_group_map_entry {
+ const char *log_group_name;
+ u32 log_group_type;
+};
+
+/*
+ ****************************************************************************
+ * ROGUE FW signature checks
+ ****************************************************************************
+ */
+#define ROGUE_FW_SIG_BUFFER_SIZE_MIN (8192)
+
+#define ROGUE_FWIF_TIMEDIFF_ID ((0x1UL << 28) | ROGUE_CR_TIMER)
+
+/*
+ ****************************************************************************
+ * Trace Buffer
+ ****************************************************************************
+ */
+
+/* Default size of ROGUE_FWIF_TRACEBUF_SPACE in DWords */
+#define ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U
+#define ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE 200U
+#define ROGUE_FW_THREAD_NUM 1U
+#define ROGUE_FW_THREAD_MAX 2U
+
+#define ROGUE_FW_POLL_TYPE_SET 0x80000000U
+
+struct rogue_fwif_file_info_buf {
+ char path[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE];
+ char info[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE];
+ u32 line_num;
+ u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_tracebuf_space {
+ u32 trace_pointer;
+
+ u32 trace_buffer_fw_addr;
+
+ /* To be used by host when reading from trace buffer */
+ u32 *trace_buffer;
+
+ struct rogue_fwif_file_info_buf assert_buf;
+} __aligned(8);
+
+/* Total number of FW fault logs stored */
+#define ROGUE_FWIF_FWFAULTINFO_MAX (8U)
+
+struct rogue_fw_fault_info {
+ aligned_u64 cr_timer;
+ aligned_u64 os_timer;
+
+ u32 data __aligned(8);
+ u32 reserved;
+ struct rogue_fwif_file_info_buf fault_buf;
+} __aligned(8);
+
+enum rogue_fwif_pow_state {
+ ROGUE_FWIF_POW_OFF, /* idle and ready to full power down */
+ ROGUE_FWIF_POW_ON, /* running HW commands */
+ ROGUE_FWIF_POW_FORCED_IDLE, /* forced idle */
+ ROGUE_FWIF_POW_IDLE, /* idle waiting for host handshake */
+};
+
+/* Firmware HWR states */
+/* The HW state is ok or locked up */
+#define ROGUE_FWIF_HWR_HARDWARE_OK BIT(0)
+/* Tells if a HWR reset is in progress */
+#define ROGUE_FWIF_HWR_RESET_IN_PROGRESS BIT(1)
+/* A DM unrelated lockup has been detected */
+#define ROGUE_FWIF_HWR_GENERAL_LOCKUP BIT(3)
+/* At least one DM is running without being close to a lockup */
+#define ROGUE_FWIF_HWR_DM_RUNNING_OK BIT(4)
+/* At least one DM is close to lockup */
+#define ROGUE_FWIF_HWR_DM_STALLING BIT(5)
+/* The FW has faulted and needs to restart */
+#define ROGUE_FWIF_HWR_FW_FAULT BIT(6)
+/* The FW has requested the host to restart it */
+#define ROGUE_FWIF_HWR_RESTART_REQUESTED BIT(7)
+
+#define ROGUE_FWIF_PHR_STATE_SHIFT (8U)
+/* The FW has requested the host to restart it, per PHR configuration */
+#define ROGUE_FWIF_PHR_RESTART_REQUESTED ((1) << ROGUE_FWIF_PHR_STATE_SHIFT)
+/* A PHR triggered GPU reset has just finished */
+#define ROGUE_FWIF_PHR_RESTART_FINISHED ((2) << ROGUE_FWIF_PHR_STATE_SHIFT)
+#define ROGUE_FWIF_PHR_RESTART_MASK \
+ (ROGUE_FWIF_PHR_RESTART_REQUESTED | ROGUE_FWIF_PHR_RESTART_FINISHED)
+
+#define ROGUE_FWIF_PHR_MODE_OFF (0UL)
+#define ROGUE_FWIF_PHR_MODE_RD_RESET (1UL)
+#define ROGUE_FWIF_PHR_MODE_FULL_RESET (2UL)
+
+/* Firmware per-DM HWR states */
+/* DM is working if all flags are cleared */
+#define ROGUE_FWIF_DM_STATE_WORKING (0)
+/* DM is idle and ready for HWR */
+#define ROGUE_FWIF_DM_STATE_READY_FOR_HWR BIT(0)
+/* DM need to skip to next cmd before resuming processing */
+#define ROGUE_FWIF_DM_STATE_NEEDS_SKIP BIT(2)
+/* DM need partial render cleanup before resuming processing */
+#define ROGUE_FWIF_DM_STATE_NEEDS_PR_CLEANUP BIT(3)
+/* DM need to increment Recovery Count once fully recovered */
+#define ROGUE_FWIF_DM_STATE_NEEDS_TRACE_CLEAR BIT(4)
+/* DM was identified as locking up and causing HWR */
+#define ROGUE_FWIF_DM_STATE_GUILTY_LOCKUP BIT(5)
+/* DM was innocently affected by another lockup which caused HWR */
+#define ROGUE_FWIF_DM_STATE_INNOCENT_LOCKUP BIT(6)
+/* DM was identified as over-running and causing HWR */
+#define ROGUE_FWIF_DM_STATE_GUILTY_OVERRUNING BIT(7)
+/* DM was innocently affected by another DM over-running which caused HWR */
+#define ROGUE_FWIF_DM_STATE_INNOCENT_OVERRUNING BIT(8)
+/* DM was forced into HWR as it delayed more important workloads */
+#define ROGUE_FWIF_DM_STATE_HARD_CONTEXT_SWITCH BIT(9)
+/* DM was forced into HWR due to an uncorrected GPU ECC error */
+#define ROGUE_FWIF_DM_STATE_GPU_ECC_HWR BIT(10)
+
+/* Firmware's connection state */
+enum rogue_fwif_connection_fw_state {
+ /* Firmware is offline */
+ ROGUE_FW_CONNECTION_FW_OFFLINE = 0,
+ /* Firmware is initialised */
+ ROGUE_FW_CONNECTION_FW_READY,
+ /* Firmware connection is fully established */
+ ROGUE_FW_CONNECTION_FW_ACTIVE,
+ /* Firmware is clearing up connection data*/
+ ROGUE_FW_CONNECTION_FW_OFFLOADING,
+ ROGUE_FW_CONNECTION_FW_STATE_COUNT
+};
+
+/* OS' connection state */
+enum rogue_fwif_connection_os_state {
+ /* OS is offline */
+ ROGUE_FW_CONNECTION_OS_OFFLINE = 0,
+ /* OS's KM driver is setup and waiting */
+ ROGUE_FW_CONNECTION_OS_READY,
+ /* OS connection is fully established */
+ ROGUE_FW_CONNECTION_OS_ACTIVE,
+ ROGUE_FW_CONNECTION_OS_STATE_COUNT
+};
+
+struct rogue_fwif_os_runtime_flags {
+ unsigned int os_state : 3;
+ unsigned int fl_ok : 1;
+ unsigned int fl_grow_pending : 1;
+ unsigned int isolated_os : 1;
+ unsigned int reserved : 26;
+};
+
+#define PVR_SLR_LOG_ENTRIES 10
+/* MAX_CLIENT_CCB_NAME not visible to this header */
+#define PVR_SLR_LOG_STRLEN 30
+
+struct rogue_fwif_slr_entry {
+ aligned_u64 timestamp;
+ u32 fw_ctx_addr;
+ u32 num_ufos;
+ char ccb_name[PVR_SLR_LOG_STRLEN];
+ char padding[2];
+} __aligned(8);
+
+#define MAX_THREAD_NUM 2
+
+/* firmware trace control data */
+struct rogue_fwif_tracebuf {
+ u32 log_type;
+ struct rogue_fwif_tracebuf_space tracebuf[MAX_THREAD_NUM];
+ /*
+ * Member initialised only when sTraceBuf is actually allocated (in
+ * ROGUETraceBufferInitOnDemandResources)
+ */
+ u32 tracebuf_size_in_dwords;
+ /* Compatibility and other flags */
+ u32 tracebuf_flags;
+} __aligned(8);
+
+/* firmware system data shared with the Host driver */
+struct rogue_fwif_sysdata {
+ /* Configuration flags from host */
+ u32 config_flags;
+ /* Extended configuration flags from host */
+ u32 config_flags_ext;
+ enum rogue_fwif_pow_state pow_state;
+ u32 hw_perf_ridx;
+ u32 hw_perf_widx;
+ u32 hw_perf_wrap_count;
+ /* Constant after setup, needed in FW */
+ u32 hw_perf_size;
+ /* The number of times the FW drops a packet due to buffer full */
+ u32 hw_perf_drop_count;
+
+ /*
+ * ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid
+ * when FW is built with ROGUE_HWPERF_UTILIZATION &
+ * ROGUE_HWPERF_DROP_TRACKING defined in rogue_fw_hwperf.c
+ */
+ /* Buffer utilisation, high watermark of bytes in use */
+ u32 hw_perf_ut;
+ /* The ordinal of the first packet the FW dropped */
+ u32 first_drop_ordinal;
+ /* The ordinal of the last packet the FW dropped */
+ u32 last_drop_ordinal;
+ /* State flags for each Operating System mirrored from Fw coremem */
+ struct rogue_fwif_os_runtime_flags
+ os_runtime_flags_mirror[ROGUE_FW_MAX_NUM_OS];
+
+ struct rogue_fw_fault_info fault_info[ROGUE_FWIF_FWFAULTINFO_MAX];
+ u32 fw_faults;
+ u32 cr_poll_addr[MAX_THREAD_NUM];
+ u32 cr_poll_mask[MAX_THREAD_NUM];
+ u32 cr_poll_count[MAX_THREAD_NUM];
+ aligned_u64 start_idle_time;
+
+#if defined(SUPPORT_ROGUE_FW_STATS_FRAMEWORK)
+# define ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE (8)
+# define ROGUE_FWIF_STATS_FRAMEWORK_MAX \
+ (2048 * ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE)
+ u32 fw_stats_buf[ROGUE_FWIF_STATS_FRAMEWORK_MAX] __aligned(8);
+#endif
+ u32 hwr_state_flags;
+ u32 hwr_recovery_flags[PVR_FWIF_DM_MAX];
+ /* Compatibility and other flags */
+ u32 fw_sys_data_flags;
+ /* Identify whether MC config is P-P or P-S */
+ u32 mc_config;
+} __aligned(8);
+
+/* per-os firmware shared data */
+struct rogue_fwif_osdata {
+ /* Configuration flags from an OS */
+ u32 fw_os_config_flags;
+ /* Markers to signal that the host should perform a full sync check */
+ u32 fw_sync_check_mark;
+ u32 host_sync_check_mark;
+
+ u32 forced_updates_requested;
+ u8 slr_log_wp;
+ struct rogue_fwif_slr_entry slr_log_first;
+ struct rogue_fwif_slr_entry slr_log[PVR_SLR_LOG_ENTRIES];
+ aligned_u64 last_forced_update_time;
+
+ /* Interrupt count from Threads > */
+ u32 interrupt_count[MAX_THREAD_NUM];
+ u32 kccb_cmds_executed;
+ u32 power_sync_fw_addr;
+ /* Compatibility and other flags */
+ u32 fw_os_data_flags;
+ u32 padding;
+} __aligned(8);
+
+/* Firmware trace time-stamp field breakup */
+
+/* ROGUE_CR_TIMER register read (48 bits) value*/
+#define ROGUE_FWT_TIMESTAMP_TIME_SHIFT (0U)
+#define ROGUE_FWT_TIMESTAMP_TIME_CLRMSK (0xFFFF000000000000ull)
+
+/* Extra debug-info (16 bits) */
+#define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U)
+#define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK
+
+/* Debug-info sub-fields */
+/*
+ * Bit 0: ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from ROGUE_CR_EVENT_STATUS
+ * register
+ */
+#define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U)
+#define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SET \
+ BIT(ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT)
+
+/* Bit 1: ROGUE_CR_BIF_MMU_ENTRY_PENDING bit from ROGUE_CR_BIF_MMU_ENTRY register */
+#define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U)
+#define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET \
+ BIT(ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT)
+
+/* Bit 2: ROGUE_CR_SLAVE_EVENT register is non-zero */
+#define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U)
+#define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SET \
+ BIT(ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT)
+
+/* Bit 3-15: Unused bits */
+
+#define ROGUE_FWT_DEBUG_INFO_STR_MAXLEN 64
+#define ROGUE_FWT_DEBUG_INFO_STR_PREPEND " (debug info: "
+#define ROGUE_FWT_DEBUG_INFO_STR_APPEND ")"
+
+/*
+ ******************************************************************************
+ * HWR Data
+ ******************************************************************************
+ */
+enum rogue_hwrtype {
+ ROGUE_HWRTYPE_UNKNOWNFAILURE = 0,
+ ROGUE_HWRTYPE_OVERRUN = 1,
+ ROGUE_HWRTYPE_POLLFAILURE = 2,
+ ROGUE_HWRTYPE_BIF0FAULT = 3,
+ ROGUE_HWRTYPE_BIF1FAULT = 4,
+ ROGUE_HWRTYPE_TEXASBIF0FAULT = 5,
+ ROGUE_HWRTYPE_MMUFAULT = 6,
+ ROGUE_HWRTYPE_MMUMETAFAULT = 7,
+ ROGUE_HWRTYPE_MIPSTLBFAULT = 8,
+ ROGUE_HWRTYPE_ECCFAULT = 9,
+ ROGUE_HWRTYPE_MMURISCVFAULT = 10,
+};
+
+#define ROGUE_FWIF_HWRTYPE_BIF_BANK_GET(hwr_type) \
+ (((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) ? 0 : 1)
+
+#define ROGUE_FWIF_HWRTYPE_PAGE_FAULT_GET(hwr_type) \
+ ((((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_BIF1FAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_TEXASBIF0FAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_MMUFAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_MMUMETAFAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_MIPSTLBFAULT) || \
+ ((hwr_type) == ROGUE_HWRTYPE_MMURISCVFAULT)) \
+ ? true \
+ : false)
+
+struct rogue_bifinfo {
+ aligned_u64 bif_req_status;
+ aligned_u64 bif_mmu_status;
+ aligned_u64 pc_address; /* phys address of the page catalogue */
+ aligned_u64 reserved;
+};
+
+struct rogue_eccinfo {
+ u32 fault_gpu;
+};
+
+struct rogue_mmuinfo {
+ aligned_u64 mmu_status[2];
+ aligned_u64 pc_address; /* phys address of the page catalogue */
+ aligned_u64 reserved;
+};
+
+struct rogue_pollinfo {
+ u32 thread_num;
+ u32 cr_poll_addr;
+ u32 cr_poll_mask;
+ u32 cr_poll_last_value;
+ aligned_u64 reserved;
+} __aligned(8);
+
+struct rogue_tlbinfo {
+ u32 bad_addr;
+ u32 entry_lo;
+};
+
+struct rogue_hwrinfo {
+ union {
+ struct rogue_bifinfo bif_info;
+ struct rogue_mmuinfo mmu_info;
+ struct rogue_pollinfo poll_info;
+ struct rogue_tlbinfo tlb_info;
+ struct rogue_eccinfo ecc_info;
+ } hwr_data;
+
+ aligned_u64 cr_timer;
+ aligned_u64 os_timer;
+ u32 frame_num;
+ u32 pid;
+ u32 active_hwrt_data;
+ u32 hwr_number;
+ u32 event_status;
+ u32 hwr_recovery_flags;
+ enum rogue_hwrtype hwr_type;
+ u32 dm;
+ u32 core_id;
+ aligned_u64 cr_time_of_kick;
+ aligned_u64 cr_time_hw_reset_start;
+ aligned_u64 cr_time_hw_reset_finish;
+ aligned_u64 cr_time_freelist_ready;
+ aligned_u64 reserved[2];
+} __aligned(8);
+
+/* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define ROGUE_FWIF_HWINFO_MAX_FIRST 8U
+/* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define ROGUE_FWIF_HWINFO_MAX_LAST 8U
+/* Total number of HWR logs stored in a buffer */
+#define ROGUE_FWIF_HWINFO_MAX \
+ (ROGUE_FWIF_HWINFO_MAX_FIRST + ROGUE_FWIF_HWINFO_MAX_LAST)
+/* Index of the last log in the HWR log buffer */
+#define ROGUE_FWIF_HWINFO_LAST_INDEX (ROGUE_FWIF_HWINFO_MAX - 1U)
+
+struct rogue_fwif_hwrinfobuf {
+ struct rogue_hwrinfo hwr_info[ROGUE_FWIF_HWINFO_MAX];
+ u32 hwr_counter;
+ u32 write_index;
+ u32 dd_req_count;
+ u32 hwr_info_buf_flags; /* Compatibility and other flags */
+ u32 hwr_dm_locked_up_count[PVR_FWIF_DM_MAX];
+ u32 hwr_dm_overran_count[PVR_FWIF_DM_MAX];
+ u32 hwr_dm_recovered_count[PVR_FWIF_DM_MAX];
+ u32 hwr_dm_false_detect_count[PVR_FWIF_DM_MAX];
+} __aligned(8);
+
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN (1)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN (2)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN (3)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN (4)
+
+#define ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN (1)
+#define ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (2)
+
+#define ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP (1)
+#define ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP (2)
+/*
+ ******************************************************************************
+ * ROGUE firmware Init Config Data
+ ******************************************************************************
+ */
+
+/* Flag definitions affecting the firmware globally */
+#define ROGUE_FWIF_INICFG_CTXSWITCH_MODE_RAND BIT(0)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_SRESET_EN BIT(1)
+#define ROGUE_FWIF_INICFG_HWPERF_EN BIT(2)
+#define ROGUE_FWIF_INICFG_DM_KILL_MODE_RAND_EN BIT(3)
+#define ROGUE_FWIF_INICFG_POW_RASCALDUST BIT(4)
+/* Bit 5 is reserved. */
+#define ROGUE_FWIF_INICFG_FBCDC_V3_1_EN BIT(6)
+#define ROGUE_FWIF_INICFG_CHECK_MLIST_EN BIT(7)
+#define ROGUE_FWIF_INICFG_DISABLE_CLKGATING_EN BIT(8)
+/* Bit 9 is reserved. */
+/* Bit 10 is reserved. */
+/* Bit 11 is reserved. */
+#define ROGUE_FWIF_INICFG_REGCONFIG_EN BIT(12)
+#define ROGUE_FWIF_INICFG_ASSERT_ON_OUTOFMEMORY BIT(13)
+#define ROGUE_FWIF_INICFG_HWP_DISABLE_FILTER BIT(14)
+/* Bit 15 is reserved. */
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_FAST \
+ (ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN \
+ << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM \
+ (ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN \
+ << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SLOW \
+ (ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN \
+ << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_NODELAY \
+ (ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN \
+ << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MASK \
+ (7 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP BIT(19)
+#define ROGUE_FWIF_INICFG_ASSERT_ON_HWR_TRIGGER BIT(20)
+#define ROGUE_FWIF_INICFG_FABRIC_COHERENCY_ENABLED BIT(21)
+#define ROGUE_FWIF_INICFG_VALIDATE_IRQ BIT(22)
+#define ROGUE_FWIF_INICFG_DISABLE_PDP_EN BIT(23)
+#define ROGUE_FWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN BIT(24)
+#define ROGUE_FWIF_INICFG_WORKEST BIT(25)
+#define ROGUE_FWIF_INICFG_PDVFS BIT(26)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT (27)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND \
+ (ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN \
+ << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN \
+ (ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN \
+ << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_MASK \
+ (3 << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT (29)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_NONE (0)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP \
+ (ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP \
+ << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP \
+ (ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP \
+ << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_MASK \
+ (ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP | \
+ ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP)
+#define ROGUE_FWIF_INICFG_VALIDATE_SOCUSC_TIMER BIT(31)
+
+#define ROGUE_FWIF_INICFG_ALL (0xFFFFFFFFU)
+
+/* Extended Flag definitions affecting the firmware globally */
+#define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0)
+/* [7] YUV10 override
+ * [6:4] Quality
+ * [3] Quality enable
+ * [2:1] Compression scheme
+ * [0] Lossy group
+ */
+#define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK (0xFF)
+#define ROGUE_FWIF_INICFG_EXT_ALL (ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK)
+
+/* Flag definitions affecting only workloads submitted by a particular OS */
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN BIT(0)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN BIT(1)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN BIT(2)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN BIT(3)
+
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_TDM BIT(4)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_GEOM BIT(5)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_FRAG BIT(6)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_CDM BIT(7)
+
+#define ROGUE_FWIF_INICFG_OS_ALL (0xFF)
+
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL \
+ (ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN | \
+ ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \
+ ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN | \
+ ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN)
+
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CLRMSK \
+ ~(ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL)
+
+#define ROGUE_FWIF_FILTCFG_TRUNCATE_HALF BIT(3)
+#define ROGUE_FWIF_FILTCFG_TRUNCATE_INT BIT(2)
+#define ROGUE_FWIF_FILTCFG_NEW_FILTER_MODE BIT(1)
+
+enum rogue_activepm_conf {
+ ROGUE_ACTIVEPM_FORCE_OFF = 0,
+ ROGUE_ACTIVEPM_FORCE_ON = 1,
+ ROGUE_ACTIVEPM_DEFAULT = 2
+};
+
+enum rogue_rd_power_island_conf {
+ ROGUE_RD_POWER_ISLAND_FORCE_OFF = 0,
+ ROGUE_RD_POWER_ISLAND_FORCE_ON = 1,
+ ROGUE_RD_POWER_ISLAND_DEFAULT = 2
+};
+
+struct rogue_fw_register_list {
+ /* Register number */
+ u16 reg_num;
+ /* Indirect register number (or 0 if not used) */
+ u16 indirect_reg_num;
+ /* Start value for indirect register */
+ u16 indirect_start_val;
+ /* End value for indirect register */
+ u16 indirect_end_val;
+};
+
+struct rogue_fwif_dllist_node {
+ u32 p;
+ u32 n;
+};
+
+/*
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define ROGUE_FWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/* This number is used to represent unallocated page catalog base register */
+#define ROGUE_FW_BIF_INVALID_PCSET 0xFFFFFFFFU
+
+/* Firmware memory context. */
+struct rogue_fwif_fwmemcontext {
+ /* device physical address of context's page catalogue */
+ aligned_u64 pc_dev_paddr;
+ /*
+ * associated page catalog base register (ROGUE_FW_BIF_INVALID_PCSET ==
+ * unallocated)
+ */
+ u32 page_cat_base_reg_set;
+ /* breakpoint address */
+ u32 breakpoint_addr;
+ /* breakpoint handler address */
+ u32 bp_handler_addr;
+ /* DM and enable control for BP */
+ u32 breakpoint_ctl;
+ /* Compatibility and other flags */
+ u32 fw_mem_ctx_flags;
+ u32 padding;
+} __aligned(8);
+
+/*
+ * FW context state flags
+ */
+#define ROGUE_FWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U)
+#define ROGUE_FWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU)
+#define ROGUE_FWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U)
+#define ROGUE_FWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U)
+
+#define ROGUE_NUM_GEOM_CORES_MAX 4
+
+/*
+ * FW-accessible TA state which must be written out to memory on context store
+ */
+struct rogue_fwif_geom_ctx_state_per_geom {
+ /* To store in mid-TA */
+ aligned_u64 geom_reg_vdm_call_stack_pointer;
+ /* Initial value (in case is 'lost' due to a lock-up */
+ aligned_u64 geom_reg_vdm_call_stack_pointer_init;
+ u32 geom_reg_vbs_so_prim[4];
+ u16 geom_current_idx;
+ u16 padding[3];
+} __aligned(8);
+
+struct rogue_fwif_geom_ctx_state {
+ /* FW-accessible TA state which must be written out to memory on context store */
+ struct rogue_fwif_geom_ctx_state_per_geom geom_core[ROGUE_NUM_GEOM_CORES_MAX];
+} __aligned(8);
+
+/*
+ * FW-accessible ISP state which must be written out to memory on context store
+ */
+struct rogue_fwif_frag_ctx_state {
+ u32 frag_reg_pm_deallocated_mask_status;
+ u32 frag_reg_dm_pds_mtilefree_status;
+ /* Compatibility and other flags */
+ u32 ctx_state_flags;
+ /*
+ * frag_reg_isp_store should be the last element of the structure as this
+ * is an array whose size is determined at runtime after detecting the
+ * ROGUE core
+ */
+ u32 frag_reg_isp_store[];
+} __aligned(8);
+
+#define ROGUE_FWIF_CTX_USING_BUFFER_A (0)
+#define ROGUE_FWIF_CTX_USING_BUFFER_B (1U)
+
+struct rogue_fwif_compute_ctx_state {
+ u32 ctx_state_flags; /* Target buffer and other flags */
+};
+
+struct rogue_fwif_fwcommoncontext {
+ /* CCB details for this firmware context */
+ u32 ccbctl_fw_addr; /* CCB control */
+ u32 ccb_fw_addr; /* CCB base */
+ struct rogue_fwif_dma_addr ccb_meta_dma_addr;
+
+ /* Context suspend state */
+ /* geom/frag context suspend state, read/written by FW */
+ u32 context_state_addr __aligned(8);
+
+ /* Flags e.g. for context switching */
+ u32 fw_com_ctx_flags;
+ u32 priority;
+ u32 priority_seq_num;
+
+ /* Framework state */
+ /* Register updates for Framework */
+ u32 rf_cmd_addr __aligned(8);
+
+ /* Statistic updates waiting to be passed back to the host... */
+ /* True when some stats are pending */
+ bool stats_pending __aligned(4);
+ /* Number of stores on this context since last update */
+ s32 stats_num_stores;
+ /* Number of OOMs on this context since last update */
+ s32 stats_num_out_of_memory;
+ /* Number of PRs on this context since last update */
+ s32 stats_num_partial_renders;
+ /* Data Master type */
+ u32 dm;
+ /* Device Virtual Address of the signal the context is waiting on */
+ aligned_u64 wait_signal_address;
+ /* List entry for the wait-signal list */
+ struct rogue_fwif_dllist_node wait_signal_node __aligned(8);
+ /* List entry for the buffer stalled list */
+ struct rogue_fwif_dllist_node buf_stalled_node __aligned(8);
+ /* Address of the circular buffer queue pointers */
+ aligned_u64 cbuf_queue_ctrl_addr;
+
+ aligned_u64 robustness_address;
+ /* Max HWR deadline limit in ms */
+ u32 max_deadline_ms;
+ /* Following HWR circular buffer read-offset needs resetting */
+ bool read_offset_needs_reset;
+
+ /* List entry for the waiting list */
+ struct rogue_fwif_dllist_node waiting_node __aligned(8);
+ /* List entry for the run list */
+ struct rogue_fwif_dllist_node run_node __aligned(8);
+ /* UFO that last failed (or NULL) */
+ struct rogue_fwif_ufo last_failed_ufo;
+
+ /* Memory context */
+ u32 fw_mem_context_fw_addr;
+
+ /* References to the host side originators */
+ /* the Server Common Context */
+ u32 server_common_context_id;
+ /* associated process ID */
+ u32 pid;
+
+ /* True when Geom DM OOM is not allowed */
+ bool geom_oom_disabled __aligned(4);
+} __aligned(8);
+
+/* Firmware render context. */
+struct rogue_fwif_fwrendercontext {
+ /* Geometry firmware context. */
+ struct rogue_fwif_fwcommoncontext geom_context;
+ /* Fragment firmware context. */
+ struct rogue_fwif_fwcommoncontext frag_context;
+
+ struct rogue_fwif_static_rendercontext_state static_render_context_state;
+
+ /* Number of commands submitted to the WorkEst FW CCB */
+ u32 work_est_ccb_submitted;
+
+ /* Compatibility and other flags */
+ u32 fw_render_ctx_flags;
+} __aligned(8);
+
+/* Firmware compute context. */
+struct rogue_fwif_fwcomputecontext {
+ /* Firmware context for the CDM */
+ struct rogue_fwif_fwcommoncontext cdm_context;
+
+ struct rogue_fwif_static_computecontext_state
+ static_compute_context_state;
+
+ /* Number of commands submitted to the WorkEst FW CCB */
+ u32 work_est_ccb_submitted;
+
+ /* Compatibility and other flags */
+ u32 compute_ctx_flags;
+
+ u32 wgp_state;
+ u32 wgp_checksum;
+ u32 core_mask_a;
+ u32 core_mask_b;
+} __aligned(8);
+
+/* Firmware TDM context. */
+struct rogue_fwif_fwtdmcontext {
+ /* Firmware context for the TDM */
+ struct rogue_fwif_fwcommoncontext tdm_context;
+
+ /* Number of commands submitted to the WorkEst FW CCB */
+ u32 work_est_ccb_submitted;
+} __aligned(8);
+
+/* Firmware TQ3D context. */
+struct rogue_fwif_fwtransfercontext {
+ /* Firmware context for TQ3D. */
+ struct rogue_fwif_fwcommoncontext tq_context;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Defines for CMD_TYPE corruption detection and forward compatibility check
+ ******************************************************************************
+ */
+
+/*
+ * CMD_TYPE 32bit contains:
+ * 31:16 Reserved for magic value to detect corruption (16 bits)
+ * 15 Reserved for ROGUE_CCB_TYPE_TASK (1 bit)
+ * 14:0 Bits available for CMD_TYPEs (15 bits)
+ */
+
+/* Magic value to detect corruption */
+#define ROGUE_CMD_MAGIC_DWORD (0x2ABC)
+#define ROGUE_CMD_MAGIC_DWORD_MASK (0xFFFF0000U)
+#define ROGUE_CMD_MAGIC_DWORD_SHIFT (16U)
+#define ROGUE_CMD_MAGIC_DWORD_SHIFTED \
+ (ROGUE_CMD_MAGIC_DWORD << ROGUE_CMD_MAGIC_DWORD_SHIFT)
+
+/* Kernel CCB control for ROGUE */
+struct rogue_fwif_ccb_ctl {
+ /* write offset into array of commands (MUST be aligned to 16 bytes!) */
+ u32 write_offset;
+ /* Padding to ensure read and write offsets are in separate cache lines. */
+ u8 padding[128 - sizeof(u32)];
+ /* read offset into array of commands */
+ u32 read_offset;
+ /* Offset wrapping mask (Total capacity of the CCB - 1) */
+ u32 wrap_mask;
+ /* size of each command in bytes */
+ u32 cmd_size;
+ u32 padding2;
+} __aligned(8);
+
+/* Kernel CCB command structure for ROGUE */
+
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */
+
+/*
+ * can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT
+ * bit from BIF_CTRL reg
+ */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10)
+/* BIF_CTRL_INVAL_TLB1_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB \
+ (ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8)
+/* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800)
+
+/* indicates FW should interrupt the host */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U)
+
+struct rogue_fwif_mmucachedata {
+ u32 cache_flags;
+ u32 mmu_cache_sync_fw_addr;
+ u32 mmu_cache_sync_update_value;
+};
+
+#define ROGUE_FWIF_BPDATA_FLAGS_ENABLE BIT(0)
+#define ROGUE_FWIF_BPDATA_FLAGS_WRITE BIT(1)
+#define ROGUE_FWIF_BPDATA_FLAGS_CTL BIT(2)
+#define ROGUE_FWIF_BPDATA_FLAGS_REGS BIT(3)
+
+struct rogue_fwif_bpdata {
+ /* Memory context */
+ u32 fw_mem_context_fw_addr;
+ /* Breakpoint address */
+ u32 bp_addr;
+ /* Breakpoint handler */
+ u32 bp_handler_addr;
+ /* Breakpoint control */
+ u32 bp_dm;
+ u32 bp_data_flags;
+ /* Number of temporary registers to overallocate */
+ u32 temp_regs;
+ /* Number of shared registers to overallocate */
+ u32 shared_regs;
+ /* DM associated with the breakpoint */
+ u32 dm;
+};
+
+#define ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS \
+ (ROGUE_FWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */
+
+struct rogue_fwif_kccb_cmd_kick_data {
+ /* address of the firmware context */
+ u32 context_fw_addr;
+ /* Client CCB woff update */
+ u32 client_woff_update;
+ /* Client CCB wrap mask update after CCCB growth */
+ u32 client_wrap_mask_update;
+ /* number of CleanupCtl pointers attached */
+ u32 num_cleanup_ctl;
+ /* CleanupCtl structures associated with command */
+ u32 cleanup_ctl_fw_addr
+ [ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS];
+ /*
+ * offset to the CmdHeader which houses the workload estimation kick
+ * data.
+ */
+ u32 work_est_cmd_header_offset;
+};
+
+struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data {
+ struct rogue_fwif_kccb_cmd_kick_data geom_cmd_kick_data;
+ struct rogue_fwif_kccb_cmd_kick_data frag_cmd_kick_data;
+};
+
+struct rogue_fwif_kccb_cmd_force_update_data {
+ /* address of the firmware context */
+ u32 context_fw_addr;
+ /* Client CCB fence offset */
+ u32 ccb_fence_offset;
+};
+
+enum rogue_fwif_cleanup_type {
+ /* FW common context cleanup */
+ ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT,
+ /* FW HW RT data cleanup */
+ ROGUE_FWIF_CLEANUP_HWRTDATA,
+ /* FW freelist cleanup */
+ ROGUE_FWIF_CLEANUP_FREELIST,
+ /* FW ZS Buffer cleanup */
+ ROGUE_FWIF_CLEANUP_ZSBUFFER,
+};
+
+struct rogue_fwif_cleanup_request {
+ /* Cleanup type */
+ enum rogue_fwif_cleanup_type cleanup_type;
+ union {
+ /* FW common context to cleanup */
+ u32 context_fw_addr;
+ /* HW RT to cleanup */
+ u32 hwrt_data_fw_addr;
+ /* Freelist to cleanup */
+ u32 freelist_fw_addr;
+ /* ZS Buffer to cleanup */
+ u32 zs_buffer_fw_addr;
+ } cleanup_data;
+};
+
+enum rogue_fwif_power_type {
+ ROGUE_FWIF_POW_OFF_REQ = 1,
+ ROGUE_FWIF_POW_FORCED_IDLE_REQ,
+ ROGUE_FWIF_POW_NUM_UNITS_CHANGE,
+ ROGUE_FWIF_POW_APM_LATENCY_CHANGE
+};
+
+enum rogue_fwif_power_force_idle_type {
+ ROGUE_FWIF_POWER_FORCE_IDLE = 1,
+ ROGUE_FWIF_POWER_CANCEL_FORCED_IDLE,
+ ROGUE_FWIF_POWER_HOST_TIMEOUT,
+};
+
+struct rogue_fwif_power_request {
+ /* Type of power request */
+ enum rogue_fwif_power_type pow_type;
+ union {
+ /* Number of active Dusts */
+ u32 num_of_dusts;
+ /* If the operation is mandatory */
+ bool forced __aligned(4);
+ /*
+ * Type of Request. Consolidating Force Idle, Cancel Forced
+ * Idle, Host Timeout
+ */
+ enum rogue_fwif_power_force_idle_type pow_request_type;
+ } power_req_data;
+};
+
+struct rogue_fwif_slcflushinvaldata {
+ /* Context to fence on (only useful when bDMContext == TRUE) */
+ u32 context_fw_addr;
+ /* Invalidate the cache as well as flushing */
+ bool inval __aligned(4);
+ /* The data to flush/invalidate belongs to a specific DM context */
+ bool dm_context __aligned(4);
+ /* Optional address of range (only useful when bDMContext == FALSE) */
+ aligned_u64 address;
+ /* Optional size of range (only useful when bDMContext == FALSE) */
+ aligned_u64 size;
+};
+
+enum rogue_fwif_hwperf_update_config {
+ ROGUE_FWIF_HWPERF_CTRL_TOGGLE = 0,
+ ROGUE_FWIF_HWPERF_CTRL_SET = 1,
+ ROGUE_FWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2
+};
+
+struct rogue_fwif_hwperf_ctrl {
+ enum rogue_fwif_hwperf_update_config opcode; /* Control operation code */
+ aligned_u64 mask; /* Mask of events to toggle */
+};
+
+struct rogue_fwif_hwperf_config_enable_blks {
+ /* Number of ROGUE_HWPERF_CONFIG_MUX_CNTBLK in the array */
+ u32 num_blocks;
+ /* Address of the ROGUE_HWPERF_CONFIG_MUX_CNTBLK array */
+ u32 block_configs_fw_addr;
+};
+
+struct rogue_fwif_hwperf_config_da_blks {
+ /* Number of ROGUE_HWPERF_CONFIG_CNTBLK in the array */
+ u32 num_blocks;
+ /* Address of the ROGUE_HWPERF_CONFIG_CNTBLK array */
+ u32 block_configs_fw_addr;
+};
+
+struct rogue_fwif_coreclkspeedchange_data {
+ u32 new_clock_speed; /* New clock speed */
+};
+
+#define ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX 16
+
+struct rogue_fwif_hwperf_ctrl_blks {
+ bool enable;
+ /* Number of block IDs in the array */
+ u32 num_blocks;
+ /* Array of ROGUE_HWPERF_CNTBLK_ID values */
+ u16 block_ids[ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX];
+};
+
+struct rogue_fwif_hwperf_select_custom_cntrs {
+ u16 custom_block;
+ u16 num_counters;
+ u32 custom_counter_ids_fw_addr;
+};
+
+struct rogue_fwif_zsbuffer_backing_data {
+ u32 zs_buffer_fw_addr; /* ZS-Buffer FW address */
+
+ bool done __aligned(4); /* action backing/unbacking succeeded */
+};
+
+struct rogue_fwif_freelist_gs_data {
+ /* Freelist FW address */
+ u32 freelist_fw_addr;
+ /* Amount of the Freelist change */
+ u32 delta_pages;
+ /* New amount of pages on the freelist (including ready pages) */
+ u32 new_pages;
+ /* Number of ready pages to be held in reserve until OOM */
+ u32 ready_pages;
+};
+
+#define MAX_FREELISTS_SIZE 3
+#define MAX_HW_GEOM_FRAG_CONTEXTS_SIZE 3
+
+#define ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT \
+ (MAX_HW_GEOM_FRAG_CONTEXTS_SIZE * MAX_FREELISTS_SIZE * 2U)
+#define ROGUE_FWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U
+
+struct rogue_fwif_freelists_reconstruction_data {
+ u32 freelist_count;
+ u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT];
+};
+
+struct rogue_fwif_write_offset_update_data {
+ /*
+ * Context to that may need to be resumed following write offset update
+ */
+ u32 context_fw_addr;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Proactive DVFS Structures
+ ******************************************************************************
+ */
+#define NUM_OPP_VALUES 16
+
+struct pdvfs_opp {
+ u32 volt; /* V */
+ u32 freq; /* Hz */
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_opp {
+ struct pdvfs_opp opp_values[NUM_OPP_VALUES];
+ u32 min_opp_point;
+ u32 max_opp_point;
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_max_freq_data {
+ u32 max_opp_point;
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_min_freq_data {
+ u32 min_opp_point;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Register configuration structures
+ ******************************************************************************
+ */
+
+#define ROGUE_FWIF_REG_CFG_MAX_SIZE 512
+
+enum rogue_fwif_regdata_cmd_type {
+ ROGUE_FWIF_REGCFG_CMD_ADD = 101,
+ ROGUE_FWIF_REGCFG_CMD_CLEAR = 102,
+ ROGUE_FWIF_REGCFG_CMD_ENABLE = 103,
+ ROGUE_FWIF_REGCFG_CMD_DISABLE = 104
+};
+
+enum rogue_fwif_reg_cfg_type {
+ /* Sidekick power event */
+ ROGUE_FWIF_REG_CFG_TYPE_PWR_ON = 0,
+ /* Rascal / dust power event */
+ ROGUE_FWIF_REG_CFG_TYPE_DUST_CHANGE,
+ /* Geometry kick */
+ ROGUE_FWIF_REG_CFG_TYPE_GEOM,
+ /* Fragment kick */
+ ROGUE_FWIF_REG_CFG_TYPE_FRAG,
+ /* Compute kick */
+ ROGUE_FWIF_REG_CFG_TYPE_CDM,
+ /* TLA kick */
+ ROGUE_FWIF_REG_CFG_TYPE_TLA,
+ /* TDM kick */
+ ROGUE_FWIF_REG_CFG_TYPE_TDM,
+ /* Applies to all types. Keep as last element */
+ ROGUE_FWIF_REG_CFG_TYPE_ALL
+};
+
+struct rogue_fwif_reg_cfg_rec {
+ u64 sddr;
+ u64 mask;
+ u64 value;
+};
+
+struct rogue_fwif_regconfig_data {
+ enum rogue_fwif_regdata_cmd_type cmd_type;
+ enum rogue_fwif_reg_cfg_type reg_config_type;
+ struct rogue_fwif_reg_cfg_rec reg_config __aligned(8);
+};
+
+struct rogue_fwif_reg_cfg {
+ /*
+ * PDump WRW command write granularity is 32 bits.
+ * Add padding to ensure array size is 32 bit granular.
+ */
+ u8 num_regs_type[ALIGN((u32)ROGUE_FWIF_REG_CFG_TYPE_ALL,
+ sizeof(u32))] __aligned(8);
+ struct rogue_fwif_reg_cfg_rec
+ reg_configs[ROGUE_FWIF_REG_CFG_MAX_SIZE] __aligned(8);
+} __aligned(8);
+
+enum rogue_fwif_os_state_change {
+ ROGUE_FWIF_OS_ONLINE = 1,
+ ROGUE_FWIF_OS_OFFLINE
+};
+
+struct rogue_fwif_os_state_change_data {
+ u32 osid;
+ enum rogue_fwif_os_state_change new_os_state;
+} __aligned(8);
+
+enum rogue_fwif_counter_dump_request {
+ ROGUE_FWIF_PWR_COUNTER_DUMP_START = 1,
+ ROGUE_FWIF_PWR_COUNTER_DUMP_STOP,
+ ROGUE_FWIF_PWR_COUNTER_DUMP_SAMPLE,
+};
+
+struct rogue_fwif_counter_dump_data {
+ enum rogue_fwif_counter_dump_request counter_dump_request;
+} __aligned(8);
+
+enum rogue_fwif_kccb_cmd_type {
+ /* Common commands */
+ ROGUE_FWIF_KCCB_CMD_KICK = 101U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ ROGUE_FWIF_KCCB_CMD_MMUCACHE = 102U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ ROGUE_FWIF_KCCB_CMD_BP = 103U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* SLC flush and invalidation request */
+ ROGUE_FWIF_KCCB_CMD_SLCFLUSHINVAL = 105U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /*
+ * Requests cleanup of a FW resource (type specified in the command
+ * data)
+ */
+ ROGUE_FWIF_KCCB_CMD_CLEANUP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Power request */
+ ROGUE_FWIF_KCCB_CMD_POW = 107U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Backing for on-demand ZS-Buffer done */
+ ROGUE_FWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE =
+ 108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Unbacking for on-demand ZS-Buffer done */
+ ROGUE_FWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE =
+ 109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Freelist Grow done */
+ ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE =
+ 110U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Freelists Reconstruction done */
+ ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE =
+ 112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /*
+ * Informs the firmware that the host has added more data to a CDM2
+ * Circular Buffer
+ */
+ ROGUE_FWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE =
+ 114U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Health check request */
+ ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK = 115U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Forcing signalling of all unmet UFOs for a given CCB offset */
+ ROGUE_FWIF_KCCB_CMD_FORCE_UPDATE = 116U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* There is a geometry and a fragment command in this single kick */
+ ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK = 117U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Informs the FW that a Guest OS has come online / offline. */
+ ROGUE_FWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Commands only permitted to the native or host OS */
+ ROGUE_FWIF_KCCB_CMD_REGCONFIG = 200U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Core clock speed change event */
+ ROGUE_FWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /*
+ * Ask the firmware to update its cached ui32LogType value from the (shared)
+ * tracebuf control structure
+ */
+ ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Set a maximum frequency/OPP point */
+ ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /*
+ * Changes the relative scheduling priority for a particular OSid. It can
+ * only be serviced for the Host DDK
+ */
+ ROGUE_FWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Set or clear firmware state flags */
+ ROGUE_FWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Set a minimum frequency/OPP point */
+ ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Configure Periodic Hardware Reset behaviour */
+ ROGUE_FWIF_KCCB_CMD_PHR_CFG = 213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Configure Safety Firmware Watchdog */
+ ROGUE_FWIF_KCCB_CMD_WDG_CFG = 215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Controls counter dumping in the FW */
+ ROGUE_FWIF_KCCB_CMD_COUNTER_DUMP = 216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Configure, clear and enable multiple HWPerf blocks */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Configure the custom counters for HWPerf */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Configure directly addressable counters for HWPerf */
+ ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+};
+
+#define ROGUE_FWIF_LAST_ALLOWED_GUEST_KCCB_CMD \
+ (ROGUE_FWIF_KCCB_CMD_REGCONFIG - 1)
+
+/* Kernel CCB command packet */
+struct rogue_fwif_kccb_cmd {
+ /* Command type */
+ enum rogue_fwif_kccb_cmd_type cmd_type;
+ /* Compatibility and other flags */
+ u32 kccb_flags;
+
+ /*
+ * NOTE: Make sure that uCmdData is the last member of this struct
+ * This is to calculate actual command size for device mem copy.
+ * (Refer ROGUEGetCmdMemCopySize())
+ */
+ union {
+ /* Data for Kick command */
+ struct rogue_fwif_kccb_cmd_kick_data cmd_kick_data;
+ /* Data for combined geom/frag Kick command */
+ struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data
+ combined_geom_frag_cmd_kick_data;
+ /* Data for MMU cache command */
+ struct rogue_fwif_mmucachedata mmu_cache_data;
+ /* Data for Breakpoint Commands */
+ struct rogue_fwif_bpdata bp_data;
+ /* Data for SLC Flush/Inval commands */
+ struct rogue_fwif_slcflushinvaldata slc_flush_inval_data;
+ /* Data for cleanup commands */
+ struct rogue_fwif_cleanup_request cleanup_data;
+ /* Data for power request commands */
+ struct rogue_fwif_power_request pow_data;
+ /* Data for HWPerf control command */
+ struct rogue_fwif_hwperf_ctrl hw_perf_ctrl;
+ /*
+ * Data for HWPerf configure, clear and enable performance
+ * counter block command
+ */
+ struct rogue_fwif_hwperf_config_enable_blks
+ hw_perf_cfg_enable_blks;
+ /*
+ * Data for HWPerf enable or disable performance counter block
+ * commands
+ */
+ struct rogue_fwif_hwperf_ctrl_blks hw_perf_ctrl_blks;
+ /* Data for HWPerf configure the custom counters to read */
+ struct rogue_fwif_hwperf_select_custom_cntrs
+ hw_perf_select_cstm_cntrs;
+ /* Data for HWPerf configure Directly Addressable blocks */
+ struct rogue_fwif_hwperf_config_da_blks hw_perf_cfg_da_blks;
+ /* Data for core clock speed change */
+ struct rogue_fwif_coreclkspeedchange_data
+ core_clk_speed_change_data;
+ /* Feedback for Z/S Buffer backing/unbacking */
+ struct rogue_fwif_zsbuffer_backing_data zs_buffer_backing_data;
+ /* Feedback for Freelist grow/shrink */
+ struct rogue_fwif_freelist_gs_data free_list_gs_data;
+ /* Feedback for Freelists reconstruction*/
+ struct rogue_fwif_freelists_reconstruction_data
+ free_lists_reconstruction_data;
+ /* Data for custom register configuration */
+ struct rogue_fwif_regconfig_data reg_config_data;
+ /* Data for informing the FW about the write offset update */
+ struct rogue_fwif_write_offset_update_data
+ write_offset_update_data;
+ /* Data for setting the max frequency/OPP */
+ struct rogue_fwif_pdvfs_max_freq_data pdvfs_max_freq_data;
+ /* Data for setting the min frequency/OPP */
+ struct rogue_fwif_pdvfs_min_freq_data pdvfs_min_freq_data;
+ /* Data for updating the Guest Online states */
+ struct rogue_fwif_os_state_change_data cmd_os_online_state_data;
+ /* Dev address for TBI buffer allocated on demand */
+ u32 tbi_buffer_fw_addr;
+ /* Data for dumping of register ranges */
+ struct rogue_fwif_counter_dump_data counter_dump_config_data;
+ /* Data for signalling all unmet fences for a given CCB */
+ struct rogue_fwif_kccb_cmd_force_update_data force_update_data;
+ } cmd_data __aligned(8);
+} __aligned(8);
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_kccb_cmd);
+
+/*
+ ******************************************************************************
+ * Firmware CCB command structure for ROGUE
+ ******************************************************************************
+ */
+
+struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data {
+ u32 zs_buffer_id;
+};
+
+struct rogue_fwif_fwccb_cmd_freelist_gs_data {
+ u32 freelist_id;
+};
+
+struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data {
+ u32 freelist_count;
+ u32 hwr_counter;
+ u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT];
+};
+
+/* 1 if a page fault happened */
+#define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF BIT(0)
+/* 1 if applicable to all contexts */
+#define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS BIT(1)
+
+struct rogue_fwif_fwccb_cmd_context_reset_data {
+ /* Context affected by the reset */
+ u32 server_common_context_id;
+ /* Reason for reset */
+ enum rogue_context_reset_reason reset_reason;
+ /* Data Master affected by the reset */
+ u32 dm;
+ /* Job ref running at the time of reset */
+ u32 reset_job_ref;
+ /* ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */
+ u32 flags;
+ /* At what page catalog address */
+ aligned_u64 pc_address;
+ /* Page fault address (only when applicable) */
+ aligned_u64 fault_address;
+};
+
+struct rogue_fwif_fwccb_cmd_fw_pagefault_data {
+ /* Page fault address */
+ u64 fw_fault_addr;
+};
+
+enum rogue_fwif_fwccb_cmd_type {
+ /* Requests ZSBuffer to be backed with physical pages */
+ ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests ZSBuffer to be unbacked */
+ ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests an on-demand freelist grow/shrink */
+ ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW = 103U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests freelists reconstruction */
+ ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION =
+ 104U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Notifies host of a HWR event on a context */
+ ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION =
+ 105U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests an on-demand debug dump */
+ ROGUE_FWIF_FWCCB_CMD_DEBUG_DUMP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ /* Requests an on-demand update on process stats */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS = 107U |
+ ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ ROGUE_FWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE =
+ 108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+ ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART =
+ 109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+ /* Notifies host of a FW pagefault */
+ ROGUE_FWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION =
+ 112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+};
+
+enum rogue_fwif_fwccb_cmd_update_stats_type {
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32TotalNumPartialRenders stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS = 1,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32TotalNumOutOfMemory stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32NumGeomStores stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_GEOM_STORES,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32NumFragStores stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_FRAG_STORES,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32NumCDMStores stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES,
+ /*
+ * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+ * ui32NumTDMStores stat
+ */
+ ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES
+};
+
+struct rogue_fwif_fwccb_cmd_update_stats_data {
+ /* Element to update */
+ enum rogue_fwif_fwccb_cmd_update_stats_type element_to_update;
+ /* The pid of the process whose stats are being updated */
+ u32 pid_owner;
+ /* Adjustment to be made to the statistic */
+ s32 adjustment_value;
+};
+
+struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data {
+ u32 core_clk_rate;
+} __aligned(8);
+
+struct rogue_fwif_fwccb_cmd {
+ /* Command type */
+ enum rogue_fwif_fwccb_cmd_type cmd_type;
+ /* Compatibility and other flags */
+ u32 fwccb_flags;
+
+ union {
+ /* Data for Z/S-Buffer on-demand (un)backing*/
+ struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data
+ cmd_zs_buffer_backing;
+ /* Data for on-demand freelist grow/shrink */
+ struct rogue_fwif_fwccb_cmd_freelist_gs_data cmd_free_list_gs;
+ /* Data for freelists reconstruction */
+ struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data
+ cmd_freelists_reconstruction;
+ /* Data for context reset notification */
+ struct rogue_fwif_fwccb_cmd_context_reset_data
+ cmd_context_reset_notification;
+ /* Data for updating process stats */
+ struct rogue_fwif_fwccb_cmd_update_stats_data
+ cmd_update_stats_data;
+ struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data
+ cmd_core_clk_rate_change;
+ struct rogue_fwif_fwccb_cmd_fw_pagefault_data cmd_fw_pagefault;
+ } cmd_data __aligned(8);
+} __aligned(8);
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_fwccb_cmd);
+
+/*
+ ******************************************************************************
+ * Workload estimation Firmware CCB command structure for ROGUE
+ ******************************************************************************
+ */
+struct rogue_fwif_workest_fwccb_cmd {
+ /* Index for return data array */
+ u16 return_data_index;
+ /* The cycles the workload took on the hardware */
+ u32 cycles_taken;
+};
+
+/*
+ ******************************************************************************
+ * Client CCB commands for ROGUE
+ ******************************************************************************
+ */
+
+/*
+ * Required memory alignment for 64-bit variables accessible by Meta
+ * (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared
+ * between the host and meta that contains 64-bit variables has to maintain
+ * this alignment)
+ */
+#define ROGUE_FWIF_FWALLOC_ALIGN sizeof(u64)
+
+#define ROGUE_CCB_TYPE_TASK BIT(15)
+#define ROGUE_CCB_FWALLOC_ALIGN(size) \
+ (((size) + (ROGUE_FWIF_FWALLOC_ALIGN - 1)) & \
+ ~(ROGUE_FWIF_FWALLOC_ALIGN - 1))
+
+#define ROGUE_FWIF_CCB_CMD_TYPE_GEOM \
+ (201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D \
+ (202U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FRAG \
+ (203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR \
+ (204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_CDM \
+ (205U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_TDM \
+ (206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FBSC_INVALIDATE \
+ (207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_2D \
+ (208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_PRE_TIMESTAMP \
+ (209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_NULL \
+ (210U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_ABORT \
+ (211U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+
+/* Leave a gap between CCB specific commands and generic commands */
+#define ROGUE_FWIF_CCB_CMD_TYPE_FENCE (212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UPDATE (213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_RMW_UPDATE \
+ (214U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR (215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_PRIORITY (216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+/*
+ * Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+ * padding code with the CCB wrap upsets the FW if we don't have the task type
+ * bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+ */
+#define ROGUE_FWIF_CCB_CMD_TYPE_POST_TIMESTAMP \
+ (217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_UPDATE \
+ (218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE \
+ (219U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+
+#define ROGUE_FWIF_CCB_CMD_TYPE_PADDING (221U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+
+struct rogue_fwif_workest_kick_data {
+ /* Index for the KM Workload estimation return data array */
+ u16 return_data_index __aligned(8);
+ /* Predicted time taken to do the work in cycles */
+ u32 cycles_prediction __aligned(8);
+ /* Deadline for the workload */
+ aligned_u64 deadline;
+};
+
+struct rogue_fwif_ccb_cmd_header {
+ u32 cmd_type;
+ u32 cmd_size;
+ /*
+ * external job reference - provided by client and used in debug for
+ * tracking submitted work
+ */
+ u32 ext_job_ref;
+ /*
+ * internal job reference - generated by services and used in debug for
+ * tracking submitted work
+ */
+ u32 int_job_ref;
+ /* Workload Estimation - Workload Estimation Data */
+ struct rogue_fwif_workest_kick_data work_est_kick_data __aligned(8);
+};
+
+/*
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ ******************************************************************************
+ */
+struct rogue_fwif_cmd_priority {
+ s32 priority;
+};
+
+/*
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ ******************************************************************************
+ */
+struct rogue_fwif_sigbuf_ctl {
+ /* Ptr to Signature Buffer memory */
+ u32 buffer_fw_addr;
+ /* Amount of space left for storing regs in the buffer */
+ u32 left_size_in_regs;
+} __aligned(8);
+
+struct rogue_fwif_counter_dump_ctl {
+ /* Ptr to counter dump buffer */
+ u32 buffer_fw_addr;
+ /* Amount of space for storing in the buffer */
+ u32 size_in_dwords;
+} __aligned(8);
+
+struct rogue_fwif_firmware_gcov_ctl {
+ /* Ptr to firmware gcov buffer */
+ u32 buffer_fw_addr;
+ /* Amount of space for storing in the buffer */
+ u32 size;
+} __aligned(8);
+
+/*
+ *****************************************************************************
+ * ROGUE Compatibility checks
+ *****************************************************************************
+ */
+
+/*
+ * WARNING: Whenever the layout of ROGUE_FWIF_COMPCHECKS_BVNC changes, the
+ * following define should be increased by 1 to indicate to the compatibility
+ * logic that layout has changed.
+ */
+#define ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION 3
+
+struct rogue_fwif_compchecks_bvnc {
+ /* WARNING: This field must be defined as first one in this structure */
+ u32 layout_version;
+ aligned_u64 bvnc;
+} __aligned(8);
+
+struct rogue_fwif_init_options {
+ u8 os_count_support;
+ u8 padding[7];
+} __aligned(8);
+
+#define ROGUE_FWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+ struct rogue_fwif_compchecks_bvnc(name) = { \
+ ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION, \
+ 0, \
+ }
+
+static inline void rogue_fwif_compchecks_bvnc_init(struct rogue_fwif_compchecks_bvnc *compchecks)
+{
+ compchecks->layout_version = ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION;
+ compchecks->bvnc = 0;
+}
+
+struct rogue_fwif_compchecks {
+ /* hardware BVNC (from the ROGUE registers) */
+ struct rogue_fwif_compchecks_bvnc hw_bvnc;
+ /* firmware BVNC */
+ struct rogue_fwif_compchecks_bvnc fw_bvnc;
+ /* identifier of the FW processor version */
+ u32 fw_processor_version;
+ /* software DDK version */
+ u32 ddk_version;
+ /* software DDK build no. */
+ u32 ddk_build;
+ /* build options bit-field */
+ u32 build_options;
+ /* initialisation options bit-field */
+ struct rogue_fwif_init_options init_options;
+ /* Information is valid */
+ bool updated __aligned(4);
+ u32 padding;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ ******************************************************************************
+ */
+struct rogue_fwif_runtime_cfg {
+ /* APM latency in ms before signalling IDLE to the host */
+ u32 active_pm_latency_ms;
+ /* Compatibility and other flags */
+ u32 runtime_cfg_flags;
+ /*
+ * If set, APM latency does not reset to system default each GPU power
+ * transition
+ */
+ bool active_pm_latency_persistant __aligned(4);
+ /* Core clock speed, currently only used to calculate timer ticks */
+ u32 core_clock_speed;
+ /* Last number of dusts change requested by the host */
+ u32 default_dusts_num_init;
+ /* Periodic Hardware Reset configuration values */
+ u32 phr_mode;
+ /* New number of milliseconds C/S is allowed to last */
+ u32 hcs_deadline_ms;
+ /* The watchdog period in microseconds */
+ u32 wdg_period_us;
+ /* Array of priorities per OS */
+ u32 osid_priority[ROGUE_FW_MAX_NUM_OS];
+ /* On-demand allocated HWPerf buffer address, to be passed to the FW */
+ u32 hwperf_buf_fw_addr;
+
+ bool padding __aligned(4);
+};
+
+/*
+ *****************************************************************************
+ * Control data for ROGUE
+ *****************************************************************************
+ */
+
+#define ROGUE_FWIF_HWR_DEBUG_DUMP_ALL (99999U)
+
+enum rogue_fwif_tpu_dm {
+ ROGUE_FWIF_TPU_DM_PDM = 0,
+ ROGUE_FWIF_TPU_DM_VDM = 1,
+ ROGUE_FWIF_TPU_DM_CDM = 2,
+ ROGUE_FWIF_TPU_DM_TDM = 3,
+ ROGUE_FWIF_TPU_DM_LAST
+};
+
+enum rogue_fwif_gpio_val_mode {
+ /* No GPIO validation */
+ ROGUE_FWIF_GPIO_VAL_OFF = 0,
+ /*
+ * Simple test case that initiates by sending data via the GPIO and then
+ * sends back any data received over the GPIO
+ */
+ ROGUE_FWIF_GPIO_VAL_GENERAL = 1,
+ /*
+ * More complex test case that writes and reads data across the entire
+ * GPIO AP address range.
+ */
+ ROGUE_FWIF_GPIO_VAL_AP = 2,
+ /* Validates the GPIO Testbench. */
+ ROGUE_FWIF_GPIO_VAL_TESTBENCH = 5,
+ /* Send and then receive each byte in the range 0-255. */
+ ROGUE_FWIF_GPIO_VAL_LOOPBACK = 6,
+ /* Send and then receive each power-of-2 byte in the range 0-255. */
+ ROGUE_FWIF_GPIO_VAL_LOOPBACK_LITE = 7,
+ ROGUE_FWIF_GPIO_VAL_LAST
+};
+
+enum fw_perf_conf {
+ FW_PERF_CONF_NONE = 0,
+ FW_PERF_CONF_ICACHE = 1,
+ FW_PERF_CONF_DCACHE = 2,
+ FW_PERF_CONF_JTLB_INSTR = 5,
+ FW_PERF_CONF_INSTRUCTIONS = 6
+};
+
+enum fw_boot_stage {
+ FW_BOOT_STAGE_TLB_INIT_FAILURE = -2,
+ FW_BOOT_STAGE_NOT_AVAILABLE = -1,
+ FW_BOOT_NOT_STARTED = 0,
+ FW_BOOT_BLDR_STARTED = 1,
+ FW_BOOT_CACHE_DONE,
+ FW_BOOT_TLB_DONE,
+ FW_BOOT_MAIN_STARTED,
+ FW_BOOT_ALIGNCHECKS_DONE,
+ FW_BOOT_INIT_DONE,
+};
+
+/*
+ * Kernel CCB return slot responses. Usage of bit-fields instead of bare
+ * integers allows FW to possibly pack-in several responses for each single kCCB
+ * command.
+ */
+/* Command executed (return status from FW) */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED BIT(0)
+/* A cleanup was requested but resource busy */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY BIT(1)
+/* Poll failed in FW for a HW operation to complete */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_POLL_FAILURE BIT(2)
+/* Reset value of a kCCB return slot (set by host) */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U
+
+struct rogue_fwif_connection_ctl {
+ /* Fw-Os connection states */
+ enum rogue_fwif_connection_fw_state connection_fw_state;
+ enum rogue_fwif_connection_os_state connection_os_state;
+ u32 alive_fw_token;
+ u32 alive_os_token;
+} __aligned(8);
+
+struct rogue_fwif_osinit {
+ /* Kernel CCB */
+ u32 kernel_ccbctl_fw_addr;
+ u32 kernel_ccb_fw_addr;
+ u32 kernel_ccb_rtn_slots_fw_addr;
+
+ /* Firmware CCB */
+ u32 firmware_ccbctl_fw_addr;
+ u32 firmware_ccb_fw_addr;
+
+ /* Workload Estimation Firmware CCB */
+ u32 work_est_firmware_ccbctl_fw_addr;
+ u32 work_est_firmware_ccb_fw_addr;
+
+ u32 rogue_fwif_hwr_info_buf_ctl_fw_addr;
+
+ u32 hwr_debug_dump_limit;
+
+ u32 fw_os_data_fw_addr;
+
+ /* Compatibility checks to be populated by the Firmware */
+ struct rogue_fwif_compchecks rogue_comp_checks;
+} __aligned(8);
+
+/* BVNC Features */
+struct rogue_hwperf_bvnc_block {
+ /* Counter block ID, see ROGUE_HWPERF_CNTBLK_ID */
+ u16 block_id;
+
+ /* Number of counters in this block type */
+ u16 num_counters;
+
+ /* Number of blocks of this type */
+ u16 num_blocks;
+
+ u16 reserved;
+};
+
+#define ROGUE_HWPERF_MAX_BVNC_LEN (24)
+
+#define ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN (16U)
+
+/* BVNC Features */
+struct rogue_hwperf_bvnc {
+ /* BVNC string */
+ char bvnc_string[ROGUE_HWPERF_MAX_BVNC_LEN];
+ /* See ROGUE_HWPERF_FEATURE_FLAGS */
+ u32 bvnc_km_feature_flags;
+ /* Number of blocks described in aBvncBlocks */
+ u16 num_bvnc_blocks;
+ /* Number of GPU cores present */
+ u16 bvnc_gpu_cores;
+ /* Supported Performance Blocks for BVNC */
+ struct rogue_hwperf_bvnc_block
+ bvnc_blocks[ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN];
+};
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_hwperf_bvnc);
+
+struct rogue_fwif_sysinit {
+ /* Fault read address */
+ aligned_u64 fault_phys_addr;
+
+ /* PDS execution base */
+ aligned_u64 pds_exec_base;
+ /* UCS execution base */
+ aligned_u64 usc_exec_base;
+ /* FBCDC bindless texture state table base */
+ aligned_u64 fbcdc_state_table_base;
+ aligned_u64 fbcdc_large_state_table_base;
+ /* Texture state base */
+ aligned_u64 texture_heap_base;
+
+ /* Event filter for Firmware events */
+ u64 hw_perf_filter;
+
+ aligned_u64 slc3_fence_dev_addr;
+
+ u32 tpu_trilinear_frac_mask[ROGUE_FWIF_TPU_DM_LAST] __aligned(8);
+
+ /* Signature and Checksum Buffers for DMs */
+ struct rogue_fwif_sigbuf_ctl sigbuf_ctl[PVR_FWIF_DM_MAX];
+
+ struct rogue_fwif_pdvfs_opp pdvfs_opp_info;
+
+ struct rogue_fwif_dma_addr coremem_data_store;
+
+ struct rogue_fwif_counter_dump_ctl counter_dump_ctl;
+
+ u32 filter_flags;
+
+ u32 runtime_cfg_fw_addr;
+
+ u32 trace_buf_ctl_fw_addr;
+ u32 fw_sys_data_fw_addr;
+
+ u32 gpu_util_fw_cb_ctl_fw_addr;
+ u32 reg_cfg_fw_addr;
+ u32 hwperf_ctl_fw_addr;
+
+ u32 align_checks;
+
+ /* Core clock speed at FW boot time */
+ u32 initial_core_clock_speed;
+
+ /* APM latency in ms before signalling IDLE to the host */
+ u32 active_pm_latency_ms;
+
+ /* Flag to be set by the Firmware after successful start */
+ bool firmware_started __aligned(4);
+
+ /* Host/FW Trace synchronisation Partition Marker */
+ u32 marker_val;
+
+ /* Firmware initialization complete time */
+ u32 firmware_started_timestamp;
+
+ u32 jones_disable_mask;
+
+ /* Firmware performance counter config */
+ enum fw_perf_conf firmware_perf;
+
+ /*
+ * FW Pointer to memory containing core clock rate in Hz.
+ * Firmware (PDVFS) updates the memory when running on non primary FW
+ * thread to communicate to host driver.
+ */
+ u32 core_clock_rate_fw_addr;
+
+ enum rogue_fwif_gpio_val_mode gpio_validation_mode;
+
+ /* Used in HWPerf for decoding BVNC Features */
+ struct rogue_hwperf_bvnc bvnc_km_feature_flags;
+
+ /* Value to write into ROGUE_CR_TFBC_COMPRESSION_CONTROL */
+ u32 tfbc_compression_control;
+} __aligned(8);
+
+/*
+ *****************************************************************************
+ * Timer correlation shared data and defines
+ *****************************************************************************
+ */
+
+struct rogue_fwif_time_corr {
+ aligned_u64 os_timestamp;
+ aligned_u64 os_mono_timestamp;
+ aligned_u64 cr_timestamp;
+
+ /*
+ * Utility variable used to convert CR timer deltas to OS timer deltas
+ * (nS), where the deltas are relative to the timestamps above:
+ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below
+ */
+ aligned_u64 cr_delta_to_os_delta_kns;
+
+ u32 core_clock_speed;
+ u32 reserved;
+} __aligned(8);
+
+/*
+ * The following macros are used to help converting FW timestamps to the Host
+ * time domain. On the FW the ROGUE_CR_TIMER counter is used to keep track of
+ * time; it increments by 1 every 256 GPU clock ticks, so the general
+ * formula to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ * otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ * deltaCR * 256 256 * scale
+ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ]
+ * GPUclockspeed GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to the base
+ * OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and periodic
+ * frequency calibration (executed every few seconds if the FW is doing
+ * some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20)
+
+#define ROGUE_FWIF_GET_DELTA_OSTIME_NS(delta_cr, k) \
+ (((delta_cr) * (k)) >> ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+/*
+ ******************************************************************************
+ * GPU Utilisation
+ ******************************************************************************
+ */
+
+/* See rogue_common.h for a list of GPU states */
+#define ROGUE_FWIF_GPU_UTIL_TIME_MASK \
+ (0xFFFFFFFFFFFFFFFFull & ~ROGUE_FWIF_GPU_UTIL_STATE_MASK)
+
+#define ROGUE_FWIF_GPU_UTIL_GET_TIME(word) \
+ ((word)(&ROGUE_FWIF_GPU_UTIL_TIME_MASK))
+#define ROGUE_FWIF_GPU_UTIL_GET_STATE(word) \
+ ((word)(&ROGUE_FWIF_GPU_UTIL_STATE_MASK))
+
+/*
+ * The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the
+ * Host. In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define ROGUE_FWIF_GPU_UTIL_GET_PERIOD(newtime, oldtime) \
+ (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U)
+
+#define ROGUE_FWIF_GPU_UTIL_MAKE_WORD(time, state) \
+ (ROGUE_FWIF_GPU_UTIL_GET_TIME(time) | \
+ ROGUE_FWIF_GPU_UTIL_GET_STATE(state))
+
+/*
+ * The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are
+ * processed by the MISR. The update frequency of this array depends on how fast
+ * the system can change state (basically how small the APM latency is) and
+ * perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the
+ * FW will read old data, which is still quite ok if the Host is updating the
+ * timer correlation at that time.
+ */
+#define ROGUE_FWIF_TIME_CORR_ARRAY_SIZE 256U
+#define ROGUE_FWIF_TIME_CORR_CURR_INDEX(seqcount) \
+ ((seqcount) % ROGUE_FWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((ROGUE_FWIF_TIME_CORR_ARRAY_SIZE &
+ (ROGUE_FWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U,
+ "ROGUE_FWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+struct rogue_fwif_gpu_util_fwcb {
+ struct rogue_fwif_time_corr time_corr[ROGUE_FWIF_TIME_CORR_ARRAY_SIZE];
+ u32 time_corr_seq_count;
+
+ /* Compatibility and other flags */
+ u32 gpu_util_flags;
+
+ /* Last GPU state + OS time of the last state update */
+ aligned_u64 last_word;
+
+ /* Counters for the amount of time the GPU was active/idle/blocked */
+ aligned_u64 stats_counters[PVR_FWIF_GPU_UTIL_STATE_NUM];
+} __aligned(8);
+
+struct rogue_fwif_rta_ctl {
+ /* Render number */
+ u32 render_target_index;
+ /* index in RTA */
+ u32 current_render_target;
+ /* total active RTs */
+ u32 active_render_targets;
+ /* total active RTs from the first TA kick, for OOM */
+ u32 cumul_active_render_targets;
+ /* Array of valid RT indices */
+ u32 valid_render_targets_fw_addr;
+ /* Array of number of occurred partial renders per render target */
+ u32 rta_num_partial_renders_fw_addr;
+ /* Number of render targets in the array */
+ u32 max_rts;
+ /* Compatibility and other flags */
+ u32 rta_ctl_flags;
+} __aligned(8);
+
+struct rogue_fwif_freelist {
+ aligned_u64 freelist_dev_addr;
+ aligned_u64 current_dev_addr;
+ u32 current_stack_top;
+ u32 max_pages;
+ u32 grow_pages;
+ /* HW pages */
+ u32 current_pages;
+ u32 allocated_page_count;
+ u32 allocated_mmu_page_count;
+ u32 freelist_id;
+
+ bool grow_pending __aligned(4);
+ /* Pages that should be used only when OOM is reached */
+ u32 ready_pages;
+ /* Compatibility and other flags */
+ u32 freelist_flags;
+ /* PM Global PB on which Freelist is loaded */
+ u32 pm_global_pb;
+ u32 padding;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * HWRTData
+ ******************************************************************************
+ */
+
+/* HWRTData flags */
+/* Deprecated flags 1:0 */
+#define HWRTDATA_HAS_LAST_GEOM BIT(2)
+#define HWRTDATA_PARTIAL_RENDERED BIT(3)
+#define HWRTDATA_DISABLE_TILE_REORDERING BIT(4)
+#define HWRTDATA_NEED_BRN65101_BLIT BIT(5)
+#define HWRTDATA_FIRST_BRN65101_STRIP BIT(6)
+#define HWRTDATA_NEED_BRN67182_2ND_RENDER BIT(7)
+
+enum rogue_fwif_rtdata_state {
+ ROGUE_FWIF_RTDATA_STATE_NONE = 0,
+ ROGUE_FWIF_RTDATA_STATE_KICK_GEOM,
+ ROGUE_FWIF_RTDATA_STATE_KICK_GEOM_FIRST,
+ ROGUE_FWIF_RTDATA_STATE_GEOM_FINISHED,
+ ROGUE_FWIF_RTDATA_STATE_KICK_FRAG,
+ ROGUE_FWIF_RTDATA_STATE_FRAG_FINISHED,
+ ROGUE_FWIF_RTDATA_STATE_FRAG_CONTEXT_STORED,
+ ROGUE_FWIF_RTDATA_STATE_GEOM_OUTOFMEM,
+ ROGUE_FWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+ /*
+ * In case of HWR, we can't set the RTDATA state to NONE, as this will
+ * cause any TA to become a first TA. To ensure all related TA's are
+ * skipped, we use the HWR state
+ */
+ ROGUE_FWIF_RTDATA_STATE_HWR,
+ ROGUE_FWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU
+};
+
+struct rogue_fwif_hwrtdata_common {
+ bool geom_caches_need_zeroing __aligned(4);
+
+ u32 screen_pixel_max;
+ aligned_u64 multi_sample_ctl;
+ u64 flipped_multi_sample_ctl;
+ u32 tpc_stride;
+ u32 tpc_size;
+ u32 te_screen;
+ u32 mtile_stride;
+ u32 teaa;
+ u32 te_mtile1;
+ u32 te_mtile2;
+ u32 isp_merge_lower_x;
+ u32 isp_merge_lower_y;
+ u32 isp_merge_upper_x;
+ u32 isp_merge_upper_y;
+ u32 isp_merge_scale_x;
+ u32 isp_merge_scale_y;
+ u32 rgn_header_size;
+ u32 isp_mtile_size;
+ u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_hwrtdata {
+ /* MList Data Store */
+ aligned_u64 pm_mlist_dev_addr;
+
+ aligned_u64 vce_cat_base[4];
+ aligned_u64 vce_last_cat_base[4];
+ aligned_u64 te_cat_base[4];
+ aligned_u64 te_last_cat_base[4];
+ aligned_u64 alist_cat_base;
+ aligned_u64 alist_last_cat_base;
+
+ aligned_u64 pm_alist_stack_pointer;
+ u32 pm_mlist_stack_pointer;
+
+ u32 hwrt_data_common_fw_addr;
+
+ u32 hwrt_data_flags;
+ enum rogue_fwif_rtdata_state state;
+
+ u32 freelists_fw_addr[MAX_FREELISTS_SIZE] __aligned(8);
+ u32 freelist_hwr_snapshot[MAX_FREELISTS_SIZE];
+
+ aligned_u64 vheap_table_dev_addr;
+
+ struct rogue_fwif_rta_ctl rta_ctl;
+
+ aligned_u64 tail_ptrs_dev_addr;
+ aligned_u64 macrotile_array_dev_addr;
+ aligned_u64 rgn_header_dev_addr;
+ aligned_u64 rtc_dev_addr;
+
+ u32 owner_geom_not_used_by_host __aligned(8);
+
+ bool geom_caches_need_zeroing __aligned(4);
+
+ struct rogue_fwif_cleanup_ctl cleanup_state __aligned(64);
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Sync checkpoints
+ ******************************************************************************
+ */
+
+#define PVR_SYNC_CHECKPOINT_UNDEF 0x000
+#define PVR_SYNC_CHECKPOINT_ACTIVE 0xac1 /* Checkpoint has not signaled. */
+#define PVR_SYNC_CHECKPOINT_SIGNALED 0x519 /* Checkpoint has signaled. */
+#define PVR_SYNC_CHECKPOINT_ERRORED 0xeff /* Checkpoint has been errored. */
+
+#include "pvr_rogue_fwif_check.h"
+
+#endif /* PVR_ROGUE_FWIF_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h
new file mode 100644
index 000000000..51dc37e78
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h
@@ -0,0 +1,493 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_CHECK_H
+#define PVR_ROGUE_FWIF_CHECK_H
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+ static_assert(offsetof(type, member) == (offset), \
+ "offsetof(" #type ", " #member ") incorrect")
+
+#define SIZE_CHECK(type, size) \
+ static_assert(sizeof(type) == (size), #type " is incorrect size")
+
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, path, 0);
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, info, 200);
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, line_num, 400);
+SIZE_CHECK(struct rogue_fwif_file_info_buf, 408);
+
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_pointer, 0);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer, 8);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, assert_buf, 16);
+SIZE_CHECK(struct rogue_fwif_tracebuf_space, 424);
+
+OFFSET_CHECK(struct rogue_fwif_tracebuf, log_type, 0);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf, 8);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_size_in_dwords, 856);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_flags, 860);
+SIZE_CHECK(struct rogue_fwif_tracebuf, 864);
+
+OFFSET_CHECK(struct rogue_fw_fault_info, cr_timer, 0);
+OFFSET_CHECK(struct rogue_fw_fault_info, os_timer, 8);
+OFFSET_CHECK(struct rogue_fw_fault_info, data, 16);
+OFFSET_CHECK(struct rogue_fw_fault_info, reserved, 20);
+OFFSET_CHECK(struct rogue_fw_fault_info, fault_buf, 24);
+SIZE_CHECK(struct rogue_fw_fault_info, 432);
+
+OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags, 0);
+OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags_ext, 4);
+OFFSET_CHECK(struct rogue_fwif_sysdata, pow_state, 8);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ridx, 12);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_widx, 16);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_wrap_count, 20);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_size, 24);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_drop_count, 28);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ut, 32);
+OFFSET_CHECK(struct rogue_fwif_sysdata, first_drop_ordinal, 36);
+OFFSET_CHECK(struct rogue_fwif_sysdata, last_drop_ordinal, 40);
+OFFSET_CHECK(struct rogue_fwif_sysdata, os_runtime_flags_mirror, 44);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fault_info, 80);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fw_faults, 3536);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_addr, 3540);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_mask, 3548);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_count, 3556);
+OFFSET_CHECK(struct rogue_fwif_sysdata, start_idle_time, 3568);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_state_flags, 3576);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_recovery_flags, 3580);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fw_sys_data_flags, 3616);
+OFFSET_CHECK(struct rogue_fwif_sysdata, mc_config, 3620);
+SIZE_CHECK(struct rogue_fwif_sysdata, 3624);
+
+OFFSET_CHECK(struct rogue_fwif_slr_entry, timestamp, 0);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, fw_ctx_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, num_ufos, 12);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, ccb_name, 16);
+SIZE_CHECK(struct rogue_fwif_slr_entry, 48);
+
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_config_flags, 0);
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_sync_check_mark, 4);
+OFFSET_CHECK(struct rogue_fwif_osdata, host_sync_check_mark, 8);
+OFFSET_CHECK(struct rogue_fwif_osdata, forced_updates_requested, 12);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_wp, 16);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_first, 24);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log, 72);
+OFFSET_CHECK(struct rogue_fwif_osdata, last_forced_update_time, 552);
+OFFSET_CHECK(struct rogue_fwif_osdata, interrupt_count, 560);
+OFFSET_CHECK(struct rogue_fwif_osdata, kccb_cmds_executed, 568);
+OFFSET_CHECK(struct rogue_fwif_osdata, power_sync_fw_addr, 572);
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_data_flags, 576);
+SIZE_CHECK(struct rogue_fwif_osdata, 584);
+
+OFFSET_CHECK(struct rogue_bifinfo, bif_req_status, 0);
+OFFSET_CHECK(struct rogue_bifinfo, bif_mmu_status, 8);
+OFFSET_CHECK(struct rogue_bifinfo, pc_address, 16);
+OFFSET_CHECK(struct rogue_bifinfo, reserved, 24);
+SIZE_CHECK(struct rogue_bifinfo, 32);
+
+OFFSET_CHECK(struct rogue_eccinfo, fault_gpu, 0);
+SIZE_CHECK(struct rogue_eccinfo, 4);
+
+OFFSET_CHECK(struct rogue_mmuinfo, mmu_status, 0);
+OFFSET_CHECK(struct rogue_mmuinfo, pc_address, 16);
+OFFSET_CHECK(struct rogue_mmuinfo, reserved, 24);
+SIZE_CHECK(struct rogue_mmuinfo, 32);
+
+OFFSET_CHECK(struct rogue_pollinfo, thread_num, 0);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_addr, 4);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_mask, 8);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_last_value, 12);
+OFFSET_CHECK(struct rogue_pollinfo, reserved, 16);
+SIZE_CHECK(struct rogue_pollinfo, 24);
+
+OFFSET_CHECK(struct rogue_tlbinfo, bad_addr, 0);
+OFFSET_CHECK(struct rogue_tlbinfo, entry_lo, 4);
+SIZE_CHECK(struct rogue_tlbinfo, 8);
+
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_data, 0);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_timer, 32);
+OFFSET_CHECK(struct rogue_hwrinfo, os_timer, 40);
+OFFSET_CHECK(struct rogue_hwrinfo, frame_num, 48);
+OFFSET_CHECK(struct rogue_hwrinfo, pid, 52);
+OFFSET_CHECK(struct rogue_hwrinfo, active_hwrt_data, 56);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_number, 60);
+OFFSET_CHECK(struct rogue_hwrinfo, event_status, 64);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_recovery_flags, 68);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_type, 72);
+OFFSET_CHECK(struct rogue_hwrinfo, dm, 76);
+OFFSET_CHECK(struct rogue_hwrinfo, core_id, 80);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_of_kick, 88);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_start, 96);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_finish, 104);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_freelist_ready, 112);
+OFFSET_CHECK(struct rogue_hwrinfo, reserved, 120);
+SIZE_CHECK(struct rogue_hwrinfo, 136);
+
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_counter, 2176);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, write_index, 2180);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, dd_req_count, 2184);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info_buf_flags, 2188);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_locked_up_count, 2192);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_overran_count, 2228);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_recovered_count, 2264);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_false_detect_count, 2300);
+SIZE_CHECK(struct rogue_fwif_hwrinfobuf, 2336);
+
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, pc_dev_paddr, 0);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, page_cat_base_reg_set, 8);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_addr, 12);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, bp_handler_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_ctl, 20);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, fw_mem_ctx_flags, 24);
+SIZE_CHECK(struct rogue_fwif_fwmemcontext, 32);
+
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer_init, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vbs_so_prim, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_current_idx, 32);
+SIZE_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, 40);
+
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state, geom_core, 0);
+SIZE_CHECK(struct rogue_fwif_geom_ctx_state, 160);
+
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_pm_deallocated_mask_status, 0);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_dm_pds_mtilefree_status, 4);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, ctx_state_flags, 8);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_isp_store, 12);
+SIZE_CHECK(struct rogue_fwif_frag_ctx_state, 16);
+
+OFFSET_CHECK(struct rogue_fwif_compute_ctx_state, ctx_state_flags, 0);
+SIZE_CHECK(struct rogue_fwif_compute_ctx_state, 4);
+
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccbctl_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_meta_dma_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, context_state_addr, 24);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_com_ctx_flags, 28);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority, 32);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority_seq_num, 36);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, rf_cmd_addr, 40);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_pending, 44);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_stores, 48);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_out_of_memory, 52);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_partial_renders, 56);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, dm, 60);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_address, 64);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_node, 72);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, buf_stalled_node, 80);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, cbuf_queue_ctrl_addr, 88);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, robustness_address, 96);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, max_deadline_ms, 104);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, read_offset_needs_reset, 108);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, waiting_node, 112);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, run_node, 120);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, last_failed_ufo, 128);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_mem_context_fw_addr, 136);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, server_common_context_id, 140);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, pid, 144);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, geom_oom_disabled, 148);
+SIZE_CHECK(struct rogue_fwif_fwcommoncontext, 152);
+
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, write_offset, 0);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, padding, 4);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, read_offset, 128);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, wrap_mask, 132);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, cmd_size, 136);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, padding2, 140);
+SIZE_CHECK(struct rogue_fwif_ccb_ctl, 144);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_woff_update, 4);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_wrap_mask_update, 8);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, num_cleanup_ctl, 12);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, cleanup_ctl_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, work_est_cmd_header_offset, 28);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_kick_data, 32);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, geom_cmd_kick_data, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, frag_cmd_kick_data, 32);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, 64);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, ccb_fence_offset, 4);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_type, 0);
+OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_data, 4);
+SIZE_CHECK(struct rogue_fwif_cleanup_request, 8);
+
+OFFSET_CHECK(struct rogue_fwif_power_request, pow_type, 0);
+OFFSET_CHECK(struct rogue_fwif_power_request, power_req_data, 4);
+SIZE_CHECK(struct rogue_fwif_power_request, 8);
+
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, inval, 4);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, dm_context, 8);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, address, 16);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, size, 24);
+SIZE_CHECK(struct rogue_fwif_slcflushinvaldata, 32);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, opcode, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, mask, 8);
+SIZE_CHECK(struct rogue_fwif_hwperf_ctrl, 16);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, num_blocks, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, block_configs_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_config_enable_blks, 8);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, num_blocks, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, block_configs_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_config_da_blks, 8);
+
+OFFSET_CHECK(struct rogue_fwif_coreclkspeedchange_data, new_clock_speed, 0);
+SIZE_CHECK(struct rogue_fwif_coreclkspeedchange_data, 4);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, enable, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, num_blocks, 4);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, block_ids, 8);
+SIZE_CHECK(struct rogue_fwif_hwperf_ctrl_blks, 40);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_block, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, num_counters, 2);
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_counter_ids_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, 8);
+
+OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, zs_buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, done, 4);
+SIZE_CHECK(struct rogue_fwif_zsbuffer_backing_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, freelist_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, delta_pages, 4);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, new_pages, 8);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, ready_pages, 12);
+SIZE_CHECK(struct rogue_fwif_freelist_gs_data, 16);
+
+OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_count, 0);
+OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_ids, 4);
+SIZE_CHECK(struct rogue_fwif_freelists_reconstruction_data, 76);
+
+OFFSET_CHECK(struct rogue_fwif_write_offset_update_data, context_fw_addr, 0);
+SIZE_CHECK(struct rogue_fwif_write_offset_update_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, kccb_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_data, 8);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd, 88);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, server_common_context_id, 0);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_reason, 4);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, dm, 8);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_job_ref, 12);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, flags, 16);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, pc_address, 24);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, fault_address, 32);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, 40);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, fw_fault_addr, 0);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, fwccb_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_data, 8);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd, 88);
+
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_size, 4);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, ext_job_ref, 8);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, int_job_ref, 12);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, work_est_kick_data, 16);
+SIZE_CHECK(struct rogue_fwif_ccb_cmd_header, 40);
+
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_ms, 0);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, runtime_cfg_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_persistant, 8);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, core_clock_speed, 12);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, default_dusts_num_init, 16);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, phr_mode, 20);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hcs_deadline_ms, 24);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, wdg_period_us, 28);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, osid_priority, 32);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hwperf_buf_fw_addr, 64);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, padding, 68);
+SIZE_CHECK(struct rogue_fwif_runtime_cfg, 72);
+
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_fw_state, 0);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_os_state, 4);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_fw_token, 8);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_os_token, 12);
+SIZE_CHECK(struct rogue_fwif_connection_ctl, 16);
+
+OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, layout_version, 0);
+OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, bvnc, 8);
+SIZE_CHECK(struct rogue_fwif_compchecks_bvnc, 16);
+
+OFFSET_CHECK(struct rogue_fwif_init_options, os_count_support, 0);
+SIZE_CHECK(struct rogue_fwif_init_options, 8);
+
+OFFSET_CHECK(struct rogue_fwif_compchecks, hw_bvnc, 0);
+OFFSET_CHECK(struct rogue_fwif_compchecks, fw_bvnc, 16);
+OFFSET_CHECK(struct rogue_fwif_compchecks, fw_processor_version, 32);
+OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_version, 36);
+OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_build, 40);
+OFFSET_CHECK(struct rogue_fwif_compchecks, build_options, 44);
+OFFSET_CHECK(struct rogue_fwif_compchecks, init_options, 48);
+OFFSET_CHECK(struct rogue_fwif_compchecks, updated, 56);
+SIZE_CHECK(struct rogue_fwif_compchecks, 64);
+
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccbctl_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_rtn_slots_fw_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccbctl_fw_addr, 12);
+OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccb_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccbctl_fw_addr, 20);
+OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccb_fw_addr, 24);
+OFFSET_CHECK(struct rogue_fwif_osinit, rogue_fwif_hwr_info_buf_ctl_fw_addr, 28);
+OFFSET_CHECK(struct rogue_fwif_osinit, hwr_debug_dump_limit, 32);
+OFFSET_CHECK(struct rogue_fwif_osinit, fw_os_data_fw_addr, 36);
+OFFSET_CHECK(struct rogue_fwif_osinit, rogue_comp_checks, 40);
+SIZE_CHECK(struct rogue_fwif_osinit, 104);
+
+OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, left_size_in_regs, 4);
+SIZE_CHECK(struct rogue_fwif_sigbuf_ctl, 8);
+
+OFFSET_CHECK(struct pdvfs_opp, volt, 0);
+OFFSET_CHECK(struct pdvfs_opp, freq, 4);
+SIZE_CHECK(struct pdvfs_opp, 8);
+
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, opp_values, 0);
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, min_opp_point, 128);
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, max_opp_point, 132);
+SIZE_CHECK(struct rogue_fwif_pdvfs_opp, 136);
+
+OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, size_in_dwords, 4);
+SIZE_CHECK(struct rogue_fwif_counter_dump_ctl, 8);
+
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_string, 0);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_km_feature_flags, 24);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, num_bvnc_blocks, 28);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_gpu_cores, 30);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_blocks, 32);
+SIZE_CHECK(struct rogue_hwperf_bvnc, 160);
+
+OFFSET_CHECK(struct rogue_fwif_sysinit, fault_phys_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_sysinit, pds_exec_base, 8);
+OFFSET_CHECK(struct rogue_fwif_sysinit, usc_exec_base, 16);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_state_table_base, 24);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_large_state_table_base, 32);
+OFFSET_CHECK(struct rogue_fwif_sysinit, texture_heap_base, 40);
+OFFSET_CHECK(struct rogue_fwif_sysinit, hw_perf_filter, 48);
+OFFSET_CHECK(struct rogue_fwif_sysinit, slc3_fence_dev_addr, 56);
+OFFSET_CHECK(struct rogue_fwif_sysinit, tpu_trilinear_frac_mask, 64);
+OFFSET_CHECK(struct rogue_fwif_sysinit, sigbuf_ctl, 80);
+OFFSET_CHECK(struct rogue_fwif_sysinit, pdvfs_opp_info, 152);
+OFFSET_CHECK(struct rogue_fwif_sysinit, coremem_data_store, 288);
+OFFSET_CHECK(struct rogue_fwif_sysinit, counter_dump_ctl, 304);
+OFFSET_CHECK(struct rogue_fwif_sysinit, filter_flags, 312);
+OFFSET_CHECK(struct rogue_fwif_sysinit, runtime_cfg_fw_addr, 316);
+OFFSET_CHECK(struct rogue_fwif_sysinit, trace_buf_ctl_fw_addr, 320);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fw_sys_data_fw_addr, 324);
+OFFSET_CHECK(struct rogue_fwif_sysinit, gpu_util_fw_cb_ctl_fw_addr, 328);
+OFFSET_CHECK(struct rogue_fwif_sysinit, reg_cfg_fw_addr, 332);
+OFFSET_CHECK(struct rogue_fwif_sysinit, hwperf_ctl_fw_addr, 336);
+OFFSET_CHECK(struct rogue_fwif_sysinit, align_checks, 340);
+OFFSET_CHECK(struct rogue_fwif_sysinit, initial_core_clock_speed, 344);
+OFFSET_CHECK(struct rogue_fwif_sysinit, active_pm_latency_ms, 348);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started, 352);
+OFFSET_CHECK(struct rogue_fwif_sysinit, marker_val, 356);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started_timestamp, 360);
+OFFSET_CHECK(struct rogue_fwif_sysinit, jones_disable_mask, 364);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_perf, 368);
+OFFSET_CHECK(struct rogue_fwif_sysinit, core_clock_rate_fw_addr, 372);
+OFFSET_CHECK(struct rogue_fwif_sysinit, gpio_validation_mode, 376);
+OFFSET_CHECK(struct rogue_fwif_sysinit, bvnc_km_feature_flags, 380);
+OFFSET_CHECK(struct rogue_fwif_sysinit, tfbc_compression_control, 540);
+SIZE_CHECK(struct rogue_fwif_sysinit, 544);
+
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr, 0);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr_seq_count, 10240);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, gpu_util_flags, 10244);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, last_word, 10248);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, stats_counters, 10256);
+SIZE_CHECK(struct rogue_fwif_gpu_util_fwcb, 10280);
+
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, render_target_index, 0);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, current_render_target, 4);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, active_render_targets, 8);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, cumul_active_render_targets, 12);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, valid_render_targets_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_num_partial_renders_fw_addr, 20);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, max_rts, 24);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_ctl_flags, 28);
+SIZE_CHECK(struct rogue_fwif_rta_ctl, 32);
+
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_dev_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_stack_top, 16);
+OFFSET_CHECK(struct rogue_fwif_freelist, max_pages, 20);
+OFFSET_CHECK(struct rogue_fwif_freelist, grow_pages, 24);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_pages, 28);
+OFFSET_CHECK(struct rogue_fwif_freelist, allocated_page_count, 32);
+OFFSET_CHECK(struct rogue_fwif_freelist, allocated_mmu_page_count, 36);
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_id, 40);
+OFFSET_CHECK(struct rogue_fwif_freelist, grow_pending, 44);
+OFFSET_CHECK(struct rogue_fwif_freelist, ready_pages, 48);
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_flags, 52);
+OFFSET_CHECK(struct rogue_fwif_freelist, pm_global_pb, 56);
+SIZE_CHECK(struct rogue_fwif_freelist, 64);
+
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, geom_caches_need_zeroing, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, screen_pixel_max, 4);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, multi_sample_ctl, 8);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, flipped_multi_sample_ctl, 16);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_stride, 24);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_size, 28);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_screen, 32);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, mtile_stride, 36);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, teaa, 40);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile1, 44);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile2, 48);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_x, 52);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_y, 56);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_x, 60);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_y, 64);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_x, 68);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_y, 72);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, rgn_header_size, 76);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_mtile_size, 80);
+SIZE_CHECK(struct rogue_fwif_hwrtdata_common, 88);
+
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_cat_base, 8);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_last_cat_base, 40);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_cat_base, 72);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_last_cat_base, 104);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_cat_base, 136);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_last_cat_base, 144);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_alist_stack_pointer, 152);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_stack_pointer, 160);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_common_fw_addr, 164);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_flags, 168);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, state, 172);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelists_fw_addr, 176);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelist_hwr_snapshot, 188);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vheap_table_dev_addr, 200);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rta_ctl, 208);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, tail_ptrs_dev_addr, 240);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, macrotile_array_dev_addr, 248);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rgn_header_dev_addr, 256);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rtc_dev_addr, 264);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, owner_geom_not_used_by_host, 272);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, geom_caches_need_zeroing, 276);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, cleanup_state, 320);
+SIZE_CHECK(struct rogue_fwif_hwrtdata, 384);
+
+OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, state, 0);
+OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, fw_ref_count, 4);
+SIZE_CHECK(struct rogue_fwif_sync_checkpoint, 8);
+
+#endif /* PVR_ROGUE_FWIF_CHECK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h
new file mode 100644
index 000000000..6e2244000
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_CLIENT_H
+#define PVR_ROGUE_FWIF_CLIENT_H
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif_shared.h"
+
+/*
+ * Page size used for Parameter Management.
+ */
+#define ROGUE_PM_PAGE_SIZE SZ_4K
+
+/*
+ * Minimum/Maximum PB size.
+ *
+ * Base page size is dependent on core:
+ * S6/S6XT/S7 = 50 pages
+ * S8XE = 40 pages
+ * S8XE with BRN66011 fixed = 25 pages
+ *
+ * Minimum PB = Base Pages + (NUM_TE_PIPES-1)*16K + (NUM_VCE_PIPES-1)*64K +
+ * IF_PM_PREALLOC(NUM_TE_PIPES*16K + NUM_VCE_PIPES*16K)
+ *
+ * Maximum PB size must ensure that no PM address space can be fully used,
+ * because if the full address space was used it would wrap and corrupt itself.
+ * Since there are two freelists (local is always minimum sized) this can be
+ * described as following three conditions being met:
+ *
+ * (Minimum PB + Maximum PB) < ALIST PM address space size (16GB)
+ * (Minimum PB + Maximum PB) < TE PM address space size (16GB) / NUM_TE_PIPES
+ * (Minimum PB + Maximum PB) < VCE PM address space size (16GB) / NUM_VCE_PIPES
+ *
+ * Since the max of NUM_TE_PIPES and NUM_VCE_PIPES is 4, we have a hard limit
+ * of 4GB minus the Minimum PB. For convenience we take the smaller power-of-2
+ * value of 2GB. This is far more than any current applications use.
+ */
+#define ROGUE_PM_MAX_FREELIST_SIZE SZ_2G
+
+/*
+ * Flags supported by the geometry DM command i.e. &struct rogue_fwif_cmd_geom.
+ */
+
+#define ROGUE_GEOM_FLAGS_FIRSTKICK BIT_MASK(0)
+#define ROGUE_GEOM_FLAGS_LASTKICK BIT_MASK(1)
+/* Use single core in a multi core setup. */
+#define ROGUE_GEOM_FLAGS_SINGLE_CORE BIT_MASK(3)
+
+/*
+ * Flags supported by the fragment DM command i.e. &struct rogue_fwif_cmd_frag.
+ */
+
+/* Use single core in a multi core setup. */
+#define ROGUE_FRAG_FLAGS_SINGLE_CORE BIT_MASK(3)
+/* Indicates whether this render produces visibility results. */
+#define ROGUE_FRAG_FLAGS_GET_VIS_RESULTS BIT_MASK(5)
+/* Indicates whether a depth buffer is present. */
+#define ROGUE_FRAG_FLAGS_DEPTHBUFFER BIT_MASK(7)
+/* Indicates whether a stencil buffer is present. */
+#define ROGUE_FRAG_FLAGS_STENCILBUFFER BIT_MASK(8)
+/* Disable pixel merging for this render. */
+#define ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE BIT_MASK(15)
+/* Indicates whether a scratch buffer is present. */
+#define ROGUE_FRAG_FLAGS_SCRATCHBUFFER BIT_MASK(19)
+/* Disallow compute overlapped with this render. */
+#define ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP BIT_MASK(26)
+
+/*
+ * Flags supported by the compute DM command i.e. &struct rogue_fwif_cmd_compute.
+ */
+
+#define ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP BIT_MASK(2)
+/*!< Use single core in a multi core setup. */
+#define ROGUE_COMPUTE_FLAG_SINGLE_CORE BIT_MASK(5)
+
+/*
+ * Flags supported by the transfer DM command i.e. &struct rogue_fwif_cmd_transfer.
+ */
+
+/*!< Use single core in a multi core setup. */
+#define ROGUE_TRANSFER_FLAGS_SINGLE_CORE BIT_MASK(1)
+
+/*
+ ************************************************
+ * Parameter/HWRTData control structures.
+ ************************************************
+ */
+
+/*
+ * Configuration registers which need to be loaded by the firmware before a geometry
+ * job can be started.
+ */
+struct rogue_fwif_geom_regs {
+ u64 vdm_ctrl_stream_base;
+ u64 tpu_border_colour_table;
+
+ /* Only used when feature VDM_DRAWINDIRECT present. */
+ u64 vdm_draw_indirect0;
+ /* Only used when feature VDM_DRAWINDIRECT present. */
+ u32 vdm_draw_indirect1;
+
+ u32 ppp_ctrl;
+ u32 te_psg;
+ /* Only used when BRN 49927 present. */
+ u32 tpu;
+
+ u32 vdm_context_resume_task0_size;
+ /* Only used when feature VDM_OBJECT_LEVEL_LLS present. */
+ u32 vdm_context_resume_task3_size;
+
+ /* Only used when BRN 56279 or BRN 67381 present. */
+ u32 pds_ctrl;
+
+ u32 view_idx;
+
+ /* Only used when feature TESSELLATION present */
+ u32 pds_coeff_free_prog;
+
+ u32 padding;
+};
+
+/* Only used when BRN 44455 or BRN 63027 present. */
+struct rogue_fwif_dummy_rgnhdr_init_geom_regs {
+ u64 te_psgregion_addr;
+};
+
+/*
+ * Represents a geometry command that can be used to tile a whole scene's objects as
+ * per TA behavior.
+ */
+struct rogue_fwif_cmd_geom {
+ /*
+ * rogue_fwif_cmd_geom_frag_shared field must always be at the beginning of the
+ * struct.
+ *
+ * The command struct (rogue_fwif_cmd_geom) is shared between Client and
+ * Firmware. Kernel is unable to perform read/write operations on the
+ * command struct, the SHARED region is the only exception from this rule.
+ * This region must be the first member so that Kernel can easily access it.
+ * For more info, see rogue_fwif_cmd_geom_frag_shared definition.
+ */
+ struct rogue_fwif_cmd_geom_frag_shared cmd_shared;
+
+ struct rogue_fwif_geom_regs regs __aligned(8);
+ u32 flags __aligned(8);
+
+ /*
+ * Holds the geometry/fragment fence value to allow the fragment partial render command
+ * to go through.
+ */
+ struct rogue_fwif_ufo partial_render_geom_frag_fence;
+
+ /* Only used when BRN 44455 or BRN 63027 present. */
+ struct rogue_fwif_dummy_rgnhdr_init_geom_regs dummy_rgnhdr_init_geom_regs __aligned(8);
+
+ /* Only used when BRN 61484 or BRN 66333 present. */
+ u32 brn61484_66333_live_rt;
+
+ u32 padding;
+};
+
+/*
+ * Configuration registers which need to be loaded by the firmware before ISP
+ * can be started.
+ */
+struct rogue_fwif_frag_regs {
+ u32 usc_pixel_output_ctrl;
+
+#define ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL 8U
+ u32 usc_clear_register[ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL];
+
+ u32 isp_bgobjdepth;
+ u32 isp_bgobjvals;
+ u32 isp_aa;
+ /* Only used when feature S7_TOP_INFRASTRUCTURE present. */
+ u32 isp_xtp_pipe_enable;
+
+ u32 isp_ctl;
+
+ /* Only used when BRN 49927 present. */
+ u32 tpu;
+
+ u32 event_pixel_pds_info;
+
+ /* Only used when feature CLUSTER_GROUPING present. */
+ u32 pixel_phantom;
+
+ u32 view_idx;
+
+ u32 event_pixel_pds_data;
+
+ /* Only used when BRN 65101 present. */
+ u32 brn65101_event_pixel_pds_data;
+
+ /* Only used when feature GPU_MULTICORE_SUPPORT or BRN 47217 present. */
+ u32 isp_oclqry_stride;
+
+ /* Only used when feature ZLS_SUBTILE present. */
+ u32 isp_zls_pixels;
+
+ /* Only used when feature ISP_ZLS_D24_S8_PACKING_OGL_MODE present. */
+ u32 rgx_cr_blackpearl_fix;
+
+ /* All values below the ALIGN(8) must be 64 bit. */
+ aligned_u64 isp_scissor_base;
+ u64 isp_dbias_base;
+ u64 isp_oclqry_base;
+ u64 isp_zlsctl;
+ u64 isp_zload_store_base;
+ u64 isp_stencil_load_store_base;
+
+ /*
+ * Only used when feature FBCDC_ALGORITHM present and value < 3 or feature
+ * FB_CDC_V4 present. Additionally, BRNs 48754, 60227, 72310 and 72311 must
+ * not be present.
+ */
+ u64 fb_cdc_zls;
+
+#define ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS 3U
+ u64 pbe_word[8U][ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS];
+ u64 tpu_border_colour_table;
+ u64 pds_bgnd[3U];
+
+ /* Only used when BRN 65101 present. */
+ u64 pds_bgnd_brn65101[3U];
+
+ u64 pds_pr_bgnd[3U];
+
+ /* Only used when BRN 62850 or 62865 present. */
+ u64 isp_dummy_stencil_store_base;
+
+ /* Only used when BRN 66193 present. */
+ u64 isp_dummy_depth_store_base;
+
+ /* Only used when BRN 67182 present. */
+ u32 rgnhdr_single_rt_size;
+ /* Only used when BRN 67182 present. */
+ u32 rgnhdr_scratch_offset;
+};
+
+struct rogue_fwif_cmd_frag {
+ struct rogue_fwif_cmd_geom_frag_shared cmd_shared __aligned(8);
+
+ struct rogue_fwif_frag_regs regs __aligned(8);
+ /* command control flags. */
+ u32 flags;
+ /* Stride IN BYTES for Z-Buffer in case of RTAs. */
+ u32 zls_stride;
+ /* Stride IN BYTES for S-Buffer in case of RTAs. */
+ u32 sls_stride;
+
+ /* Only used if feature GPU_MULTICORE_SUPPORT present. */
+ u32 execute_count;
+};
+
+/*
+ * Configuration registers which need to be loaded by the firmware before CDM
+ * can be started.
+ */
+struct rogue_fwif_compute_regs {
+ u64 tpu_border_colour_table;
+
+ /* Only used when feature CDM_USER_MODE_QUEUE present. */
+ u64 cdm_cb_queue;
+
+ /* Only used when feature CDM_USER_MODE_QUEUE present. */
+ u64 cdm_cb_base;
+ /* Only used when feature CDM_USER_MODE_QUEUE present. */
+ u64 cdm_cb;
+
+ /* Only used when feature CDM_USER_MODE_QUEUE is not present. */
+ u64 cdm_ctrl_stream_base;
+
+ u64 cdm_context_state_base_addr;
+
+ /* Only used when BRN 49927 is present. */
+ u32 tpu;
+ u32 cdm_resume_pds1;
+
+ /* Only used when feature COMPUTE_MORTON_CAPABLE present. */
+ u32 cdm_item;
+
+ /* Only used when feature CLUSTER_GROUPING present. */
+ u32 compute_cluster;
+
+ /* Only used when feature TPU_DM_GLOBAL_REGISTERS present. */
+ u32 tpu_tag_cdm_ctrl;
+
+ u32 padding;
+};
+
+struct rogue_fwif_cmd_compute {
+ /* Common command attributes */
+ struct rogue_fwif_cmd_common common __aligned(8);
+
+ /* CDM registers */
+ struct rogue_fwif_compute_regs regs;
+
+ /* Control flags */
+ u32 flags __aligned(8);
+
+ /* Only used when feature UNIFIED_STORE_VIRTUAL_PARTITIONING present. */
+ u32 num_temp_regions;
+
+ /* Only used when feature CDM_USER_MODE_QUEUE present. */
+ u32 stream_start_offset;
+
+ /* Only used when feature GPU_MULTICORE_SUPPORT present. */
+ u32 execute_count;
+};
+
+struct rogue_fwif_transfer_regs {
+ /*
+ * All 32 bit values should be added in the top section. This then requires only a
+ * single RGXFW_ALIGN to align all the 64 bit values in the second section.
+ */
+ u32 isp_bgobjvals;
+
+ u32 usc_pixel_output_ctrl;
+ u32 usc_clear_register0;
+ u32 usc_clear_register1;
+ u32 usc_clear_register2;
+ u32 usc_clear_register3;
+
+ u32 isp_mtile_size;
+ u32 isp_render_origin;
+ u32 isp_ctl;
+
+ /* Only used when feature S7_TOP_INFRASTRUCTURE present. */
+ u32 isp_xtp_pipe_enable;
+ u32 isp_aa;
+
+ u32 event_pixel_pds_info;
+
+ u32 event_pixel_pds_code;
+ u32 event_pixel_pds_data;
+
+ u32 isp_render;
+ u32 isp_rgn;
+
+ /* Only used when feature GPU_MULTICORE_SUPPORT present. */
+ u32 frag_screen;
+
+ /* All values below the aligned_u64 must be 64 bit. */
+ aligned_u64 pds_bgnd0_base;
+ u64 pds_bgnd1_base;
+ u64 pds_bgnd3_sizeinfo;
+
+ u64 isp_mtile_base;
+#define ROGUE_PBE_WORDS_REQUIRED_FOR_TQS 3
+ /* TQ_MAX_RENDER_TARGETS * PBE_STATE_SIZE */
+ u64 pbe_wordx_mrty[3U * ROGUE_PBE_WORDS_REQUIRED_FOR_TQS];
+};
+
+struct rogue_fwif_cmd_transfer {
+ /* Common command attributes */
+ struct rogue_fwif_cmd_common common __aligned(8);
+
+ struct rogue_fwif_transfer_regs regs __aligned(8);
+
+ u32 flags;
+
+ u32 padding;
+};
+
+#include "pvr_rogue_fwif_client_check.h"
+
+#endif /* PVR_ROGUE_FWIF_CLIENT_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h
new file mode 100644
index 000000000..54aa44741
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_CLIENT_CHECK_H
+#define PVR_ROGUE_FWIF_CLIENT_CHECK_H
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+ static_assert(offsetof(type, member) == (offset), \
+ "offsetof(" #type ", " #member ") incorrect")
+
+#define SIZE_CHECK(type, size) \
+ static_assert(sizeof(type) == (size), #type " is incorrect size")
+
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_ctrl_stream_base, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu_border_colour_table, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect0, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect1, 24);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, ppp_ctrl, 28);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, te_psg, 32);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu, 36);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task0_size, 40);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task3_size, 44);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_ctrl, 48);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, view_idx, 52);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_coeff_free_prog, 56);
+SIZE_CHECK(struct rogue_fwif_geom_regs, 64);
+
+OFFSET_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, te_psgregion_addr, 0);
+SIZE_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, cmd_shared, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, regs, 16);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, flags, 80);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, partial_render_geom_frag_fence, 84);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, dummy_rgnhdr_init_geom_regs, 96);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, brn61484_66333_live_rt, 104);
+SIZE_CHECK(struct rogue_fwif_cmd_geom, 112);
+
+OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_pixel_output_ctrl, 0);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_clear_register, 4);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjdepth, 36);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjvals, 40);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_aa, 44);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_xtp_pipe_enable, 48);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_ctl, 52);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu, 56);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_info, 60);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pixel_phantom, 64);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, view_idx, 68);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_data, 72);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, brn65101_event_pixel_pds_data, 76);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_stride, 80);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zls_pixels, 84);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgx_cr_blackpearl_fix, 88);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_scissor_base, 96);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dbias_base, 104);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_base, 112);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zlsctl, 120);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zload_store_base, 128);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_stencil_load_store_base, 136);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, fb_cdc_zls, 144);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pbe_word, 152);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu_border_colour_table, 344);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd, 352);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd_brn65101, 376);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_pr_bgnd, 400);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_stencil_store_base, 424);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_depth_store_base, 432);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_single_rt_size, 440);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_scratch_offset, 444);
+SIZE_CHECK(struct rogue_fwif_frag_regs, 448);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, cmd_shared, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, regs, 16);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, flags, 464);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, zls_stride, 468);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, sls_stride, 472);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, execute_count, 476);
+SIZE_CHECK(struct rogue_fwif_cmd_frag, 480);
+
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_border_colour_table, 0);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_queue, 8);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_base, 16);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb, 24);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_ctrl_stream_base, 32);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_context_state_base_addr, 40);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu, 48);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_resume_pds1, 52);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_item, 56);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, compute_cluster, 60);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_tag_cdm_ctrl, 64);
+SIZE_CHECK(struct rogue_fwif_compute_regs, 72);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, common, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, regs, 8);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, flags, 80);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, num_temp_regions, 84);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, stream_start_offset, 88);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, execute_count, 92);
+SIZE_CHECK(struct rogue_fwif_cmd_compute, 96);
+
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_bgobjvals, 0);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_pixel_output_ctrl, 4);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register0, 8);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register1, 12);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register2, 16);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register3, 20);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_size, 24);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render_origin, 28);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_ctl, 32);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_xtp_pipe_enable, 36);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_aa, 40);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_info, 44);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_code, 48);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_data, 52);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render, 56);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_rgn, 60);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, frag_screen, 64);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd0_base, 72);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd1_base, 80);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd3_sizeinfo, 88);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_base, 96);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pbe_wordx_mrty, 104);
+SIZE_CHECK(struct rogue_fwif_transfer_regs, 176);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, common, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, regs, 8);
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, flags, 184);
+SIZE_CHECK(struct rogue_fwif_cmd_transfer, 192);
+
+#endif /* PVR_ROGUE_FWIF_CLIENT_CHECK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h
new file mode 100644
index 000000000..6ebb95ba9
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_COMMON_H
+#define PVR_ROGUE_FWIF_COMMON_H
+
+#include <linux/build_bug.h>
+
+/*
+ * This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver.
+ */
+#define PVR_FW_ALIGNMENT_LSB 7U
+
+/* Macro to test structure size alignment. */
+#define PVR_FW_STRUCT_SIZE_ASSERT(_a) \
+ static_assert((sizeof(_a) & PVR_FW_ALIGNMENT_LSB) == 0U, \
+ "Size of " #_a " is not properly aligned")
+
+/* The master definition for data masters known to the firmware. */
+
+#define PVR_FWIF_DM_GP (0)
+/* Either TDM or 2D DM is present. */
+/* When the 'tla' feature is present in the hw (as per @pvr_device_features). */
+#define PVR_FWIF_DM_2D (1)
+/*
+ * When the 'fastrender_dm' feature is present in the hw (as per
+ * @pvr_device_features).
+ */
+#define PVR_FWIF_DM_TDM (1)
+
+#define PVR_FWIF_DM_GEOM (2)
+#define PVR_FWIF_DM_FRAG (3)
+#define PVR_FWIF_DM_CDM (4)
+#define PVR_FWIF_DM_RAY (5)
+#define PVR_FWIF_DM_GEOM2 (6)
+#define PVR_FWIF_DM_GEOM3 (7)
+#define PVR_FWIF_DM_GEOM4 (8)
+
+#define PVR_FWIF_DM_LAST PVR_FWIF_DM_GEOM4
+
+/* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RAY, GEOM2, GEOM3, GEOM4 */
+#define PVR_FWIF_DM_MAX (PVR_FWIF_DM_LAST + 1U)
+
+/* GPU Utilisation states */
+#define PVR_FWIF_GPU_UTIL_STATE_IDLE 0U
+#define PVR_FWIF_GPU_UTIL_STATE_ACTIVE 1U
+#define PVR_FWIF_GPU_UTIL_STATE_BLOCKED 2U
+#define PVR_FWIF_GPU_UTIL_STATE_NUM 3U
+#define PVR_FWIF_GPU_UTIL_STATE_MASK 0x3ULL
+
+/*
+ * Maximum amount of register writes that can be done by the register
+ * programmer (FW or META DMA). This is not a HW limitation, it is only
+ * a protection against malformed inputs to the register programmer.
+ */
+#define PVR_MAX_NUM_REGISTER_PROGRAMMER_WRITES 128U
+
+#endif /* PVR_ROGUE_FWIF_COMMON_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h
new file mode 100644
index 000000000..168277bce
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_DEV_INFO_H__
+#define __PVR_ROGUE_FWIF_DEV_INFO_H__
+
+enum {
+ PVR_FW_HAS_BRN_44079 = 0,
+ PVR_FW_HAS_BRN_47217,
+ PVR_FW_HAS_BRN_48492,
+ PVR_FW_HAS_BRN_48545,
+ PVR_FW_HAS_BRN_49927,
+ PVR_FW_HAS_BRN_50767,
+ PVR_FW_HAS_BRN_51764,
+ PVR_FW_HAS_BRN_62269,
+ PVR_FW_HAS_BRN_63142,
+ PVR_FW_HAS_BRN_63553,
+ PVR_FW_HAS_BRN_66011,
+ PVR_FW_HAS_BRN_71242,
+
+ PVR_FW_HAS_BRN_MAX
+};
+
+enum {
+ PVR_FW_HAS_ERN_35421 = 0,
+ PVR_FW_HAS_ERN_38020,
+ PVR_FW_HAS_ERN_38748,
+ PVR_FW_HAS_ERN_42064,
+ PVR_FW_HAS_ERN_42290,
+ PVR_FW_HAS_ERN_42606,
+ PVR_FW_HAS_ERN_47025,
+ PVR_FW_HAS_ERN_57596,
+
+ PVR_FW_HAS_ERN_MAX
+};
+
+enum {
+ PVR_FW_HAS_FEATURE_AXI_ACELITE = 0,
+ PVR_FW_HAS_FEATURE_CDM_CONTROL_STREAM_FORMAT,
+ PVR_FW_HAS_FEATURE_CLUSTER_GROUPING,
+ PVR_FW_HAS_FEATURE_COMMON_STORE_SIZE_IN_DWORDS,
+ PVR_FW_HAS_FEATURE_COMPUTE,
+ PVR_FW_HAS_FEATURE_COMPUTE_MORTON_CAPABLE,
+ PVR_FW_HAS_FEATURE_COMPUTE_OVERLAP,
+ PVR_FW_HAS_FEATURE_COREID_PER_OS,
+ PVR_FW_HAS_FEATURE_DYNAMIC_DUST_POWER,
+ PVR_FW_HAS_FEATURE_ECC_RAMS,
+ PVR_FW_HAS_FEATURE_FBCDC,
+ PVR_FW_HAS_FEATURE_FBCDC_ALGORITHM,
+ PVR_FW_HAS_FEATURE_FBCDC_ARCHITECTURE,
+ PVR_FW_HAS_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS,
+ PVR_FW_HAS_FEATURE_FBC_MAX_LARGE_DESCRIPTORS,
+ PVR_FW_HAS_FEATURE_FB_CDC_V4,
+ PVR_FW_HAS_FEATURE_GPU_MULTICORE_SUPPORT,
+ PVR_FW_HAS_FEATURE_GPU_VIRTUALISATION,
+ PVR_FW_HAS_FEATURE_GS_RTA_SUPPORT,
+ PVR_FW_HAS_FEATURE_IRQ_PER_OS,
+ PVR_FW_HAS_FEATURE_ISP_MAX_TILES_IN_FLIGHT,
+ PVR_FW_HAS_FEATURE_ISP_SAMPLES_PER_PIXEL,
+ PVR_FW_HAS_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE,
+ PVR_FW_HAS_FEATURE_LAYOUT_MARS,
+ PVR_FW_HAS_FEATURE_MAX_PARTITIONS,
+ PVR_FW_HAS_FEATURE_META,
+ PVR_FW_HAS_FEATURE_META_COREMEM_SIZE,
+ PVR_FW_HAS_FEATURE_MIPS,
+ PVR_FW_HAS_FEATURE_NUM_CLUSTERS,
+ PVR_FW_HAS_FEATURE_NUM_ISP_IPP_PIPES,
+ PVR_FW_HAS_FEATURE_NUM_OSIDS,
+ PVR_FW_HAS_FEATURE_NUM_RASTER_PIPES,
+ PVR_FW_HAS_FEATURE_PBE2_IN_XE,
+ PVR_FW_HAS_FEATURE_PBVNC_COREID_REG,
+ PVR_FW_HAS_FEATURE_PERFBUS,
+ PVR_FW_HAS_FEATURE_PERF_COUNTER_BATCH,
+ PVR_FW_HAS_FEATURE_PHYS_BUS_WIDTH,
+ PVR_FW_HAS_FEATURE_RISCV_FW_PROCESSOR,
+ PVR_FW_HAS_FEATURE_ROGUEXE,
+ PVR_FW_HAS_FEATURE_S7_TOP_INFRASTRUCTURE,
+ PVR_FW_HAS_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT,
+ PVR_FW_HAS_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2,
+ PVR_FW_HAS_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION,
+ PVR_FW_HAS_FEATURE_SLC_BANKS,
+ PVR_FW_HAS_FEATURE_SLC_CACHE_LINE_SIZE_BITS,
+ PVR_FW_HAS_FEATURE_SLC_SIZE_CONFIGURABLE,
+ PVR_FW_HAS_FEATURE_SLC_SIZE_IN_KILOBYTES,
+ PVR_FW_HAS_FEATURE_SOC_TIMER,
+ PVR_FW_HAS_FEATURE_SYS_BUS_SECURE_RESET,
+ PVR_FW_HAS_FEATURE_TESSELLATION,
+ PVR_FW_HAS_FEATURE_TILE_REGION_PROTECTION,
+ PVR_FW_HAS_FEATURE_TILE_SIZE_X,
+ PVR_FW_HAS_FEATURE_TILE_SIZE_Y,
+ PVR_FW_HAS_FEATURE_TLA,
+ PVR_FW_HAS_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS,
+ PVR_FW_HAS_FEATURE_TPU_DM_GLOBAL_REGISTERS,
+ PVR_FW_HAS_FEATURE_TPU_FILTERING_MODE_CONTROL,
+ PVR_FW_HAS_FEATURE_USC_MIN_OUTPUT_REGISTERS_PER_PIX,
+ PVR_FW_HAS_FEATURE_VDM_DRAWINDIRECT,
+ PVR_FW_HAS_FEATURE_VDM_OBJECT_LEVEL_LLS,
+ PVR_FW_HAS_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS,
+ PVR_FW_HAS_FEATURE_WATCHDOG_TIMER,
+ PVR_FW_HAS_FEATURE_WORKGROUP_PROTECTION,
+ PVR_FW_HAS_FEATURE_XE_ARCHITECTURE,
+ PVR_FW_HAS_FEATURE_XE_MEMORY_HIERARCHY,
+ PVR_FW_HAS_FEATURE_XE_TPU2,
+ PVR_FW_HAS_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH,
+ PVR_FW_HAS_FEATURE_XPU_MAX_SLAVES,
+ PVR_FW_HAS_FEATURE_XPU_REGISTER_BROADCAST,
+ PVR_FW_HAS_FEATURE_XT_TOP_INFRASTRUCTURE,
+ PVR_FW_HAS_FEATURE_ZLS_SUBTILE,
+
+ PVR_FW_HAS_FEATURE_MAX
+};
+
+#endif /* __PVR_ROGUE_FWIF_DEV_INFO_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h
new file mode 100644
index 000000000..1db1f4c53
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_RESETFRAMEWORK_H
+#define PVR_ROGUE_FWIF_RESETFRAMEWORK_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif_shared.h"
+
+struct rogue_fwif_rf_registers {
+ union {
+ u64 cdmreg_cdm_cb_base;
+ u64 cdmreg_cdm_ctrl_stream_base;
+ };
+ u64 cdmreg_cdm_cb_queue;
+ u64 cdmreg_cdm_cb;
+};
+
+struct rogue_fwif_rf_cmd {
+ /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+ struct rogue_fwif_rf_registers fw_registers __aligned(8);
+};
+
+#define ROGUE_FWIF_RF_CMD_SIZE sizeof(struct rogue_fwif_rf_cmd)
+
+#endif /* PVR_ROGUE_FWIF_RESETFRAMEWORK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
new file mode 100644
index 000000000..56e11009e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
@@ -0,0 +1,1648 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_SF_H
+#define PVR_ROGUE_FWIF_SF_H
+
+/*
+ ******************************************************************************
+ * *DO*NOT* rearrange or delete lines in rogue_fw_log_sfgroups or stid_fmts
+ * WILL BREAK fw tracing message compatibility with previous
+ * fw versions. Only add new ones, if so required.
+ ******************************************************************************
+ */
+
+/* Available log groups. */
+enum rogue_fw_log_sfgroups {
+ ROGUE_FW_GROUP_NULL,
+ ROGUE_FW_GROUP_MAIN,
+ ROGUE_FW_GROUP_CLEANUP,
+ ROGUE_FW_GROUP_CSW,
+ ROGUE_FW_GROUP_PM,
+ ROGUE_FW_GROUP_RTD,
+ ROGUE_FW_GROUP_SPM,
+ ROGUE_FW_GROUP_MTS,
+ ROGUE_FW_GROUP_BIF,
+ ROGUE_FW_GROUP_MISC,
+ ROGUE_FW_GROUP_POW,
+ ROGUE_FW_GROUP_HWR,
+ ROGUE_FW_GROUP_HWP,
+ ROGUE_FW_GROUP_RPM,
+ ROGUE_FW_GROUP_DMA,
+ ROGUE_FW_GROUP_DBG,
+};
+
+#define PVR_SF_STRING_MAX_SIZE 256U
+
+/* pair of string format id and string formats */
+struct rogue_fw_stid_fmt {
+ u32 id;
+ char name[PVR_SF_STRING_MAX_SIZE];
+};
+
+/*
+ * The symbolic names found in the table above are assigned an u32 value of
+ * the following format:
+ * 31 30 28 27 20 19 16 15 12 11 0 bits
+ * - --- ---- ---- ---- ---- ---- ---- ----
+ * 0-11: id number
+ * 12-15: group id number
+ * 16-19: number of parameters
+ * 20-27: unused
+ * 28-30: active: identify SF packet, otherwise regular int32
+ * 31: reserved for signed/unsigned compatibility
+ *
+ * The following macro assigns those values to the enum generated SF ids list.
+ */
+#define ROGUE_FW_LOG_IDMARKER (0x70000000U)
+#define ROGUE_FW_LOG_CREATESFID(a, b, e) ((u32)(a) | ((u32)(b) << 12) | ((u32)(e) << 16) | \
+ ROGUE_FW_LOG_IDMARKER)
+
+#define ROGUE_FW_LOG_IDMASK (0xFFF00000)
+#define ROGUE_FW_LOG_VALIDID(I) (((I) & ROGUE_FW_LOG_IDMASK) == ROGUE_FW_LOG_IDMARKER)
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define ROGUE_FW_SF_GID(x) (((u32)(x) >> 12) & 0xfU)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define ROGUE_FW_SF_PARAMNUM(x) (((u32)(x) >> 16) & 0xfU)
+
+/* pair of string format id and string formats */
+struct rogue_km_stid_fmt {
+ u32 id;
+ const char *name;
+};
+
+static const struct rogue_km_stid_fmt stid_fmts[] = {
+ { ROGUE_FW_LOG_CREATESFID(0, ROGUE_FW_GROUP_NULL, 0),
+ "You should not use this string" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MAIN, 6),
+ "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MAIN, 2),
+ "3D finished, HWRTData0State=%x, HWRTData1State=%x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MAIN, 4),
+ "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MAIN, 0),
+ "3D Transfer finished" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MAIN, 0),
+ "Compute finished" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MAIN, 0),
+ "TA finished" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MAIN, 0),
+ "Restart TA after partial render" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MAIN, 0),
+ "Resume TA without partial render" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MAIN, 2),
+ "Out of memory! Context 0x%08x, HWRTData 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MAIN, 0),
+ "TLA finished" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MAIN, 3),
+ "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO Checks for FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MAIN, 3),
+ "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MAIN, 0),
+ "UFO Checks succeeded" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MAIN, 3),
+ "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MAIN, 1),
+ "UFO SPM PR-Checks for FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MAIN, 4),
+ "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO Updates for FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO Update: [0x%08.8x] = 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MAIN, 1),
+ "ASSERT Failed: line %d of:" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_MAIN, 2),
+ "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_MAIN, 3),
+ "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_MAIN, 0),
+ "HWR: Reset HW" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_MAIN, 0),
+ "HWR: Lockup recovered." },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_MAIN, 1),
+ "HWR: False lockup detected for DM%u" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_MAIN, 3),
+ "Alignment check %d failed: host = 0x%x, fw = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_MAIN, 0),
+ "GP USC triggered" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_MAIN, 2),
+ "Overallocating %u temporary registers and %u shared registers for breakpoint handler" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_MAIN, 1),
+ "Setting breakpoint: Addr 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_MAIN, 0),
+ "Store breakpoint state" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_MAIN, 0),
+ "Unsetting BP Registers" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_MAIN, 1),
+ "Active RTs expected to be zero, actually %u" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_MAIN, 1),
+ "RTC present, %u active render targets" },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_MAIN, 1),
+ "Estimated Power 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_MAIN, 1),
+ "RTA render target %u" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_MAIN, 2),
+ "Kick RTA render %u of %u" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_MAIN, 3),
+ "HWR sizes check %d failed: addresses = %d, sizes = %d" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_MAIN, 1),
+ "Pow: DUSTS_ENABLE = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_MAIN, 2),
+ "Pow: On(1)/Off(0): %d, Units: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_MAIN, 2),
+ "Pow: Changing number of dusts from %d to %d" },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_MAIN, 0),
+ "Pow: Sidekick ready to be powered down" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_MAIN, 2),
+ "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_MAIN, 0),
+ "No ZS Buffer used for partial render (store)" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_MAIN, 0),
+ "No Depth/Stencil Buffer used for partial render (load)" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_MAIN, 2),
+ "HWR: Lock-up DM%d FWCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_MAIN, 7),
+ "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d" },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_MAIN, 3),
+ "MLIST%d checker: MList[%d] = 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_MAIN, 1),
+ "MLIST%d OK" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_MAIN, 1),
+ "MLIST%d is empty" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_MAIN, 8),
+ "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d" },
+ { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_MAIN, 0),
+ "3D OQ flush kick" },
+ { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_MAIN, 1),
+ "HWPerf block ID (0x%x) unsupported by device" },
+ { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_MAIN, 2),
+ "Setting breakpoint: Addr 0x%08.8x DM%u" },
+ { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d" },
+ { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_MAIN, 1),
+ "RDM finished on context %u" },
+ { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d" },
+ { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_MAIN, 0),
+ "SHG finished" },
+ { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_MAIN, 1),
+ "FBA finished on context %u" },
+ { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_MAIN, 0),
+ "UFO Checks failed" },
+ { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_MAIN, 1),
+ "Kill DM%d start" },
+ { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_MAIN, 1),
+ "Kill DM%d complete" },
+ { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_MAIN, 2),
+ "FC%u cCCB Woff update = %u" },
+ { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_MAIN, 4),
+ "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d" },
+ { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU init" },
+ { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_MAIN, 1),
+ "GPU Units init (# mask: 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_MAIN, 3),
+ "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d" },
+ { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_MAIN, 3),
+ "Register configuration added. Address: 0x%x Value: 0x%x%x" },
+ { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_MAIN, 1),
+ "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)" },
+ { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_MAIN, 0),
+ "Perform TPC flush." },
+ { ROGUE_FW_LOG_CREATESFID(74, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU has locked up (see HWR logs for more info)" },
+ { ROGUE_FW_LOG_CREATESFID(75, ROGUE_FW_GROUP_MAIN, 0),
+ "HWR has been triggered - GPU has overrun its deadline (see HWR logs)" },
+ { ROGUE_FW_LOG_CREATESFID(76, ROGUE_FW_GROUP_MAIN, 0),
+ "HWR has been triggered - GPU has failed a poll (see HWR logs)" },
+ { ROGUE_FW_LOG_CREATESFID(77, ROGUE_FW_GROUP_MAIN, 1),
+ "Doppler out of memory event for FC %u" },
+ { ROGUE_FW_LOG_CREATESFID(78, ROGUE_FW_GROUP_MAIN, 3),
+ "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(79, ROGUE_FW_GROUP_MAIN, 3),
+ "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(80, ROGUE_FW_GROUP_MAIN, 1),
+ "TIMESTAMP -> [0x%08.8x]" },
+ { ROGUE_FW_LOG_CREATESFID(81, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO RMW Updates for FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(82, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO Update: [0x%08.8x] = 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(83, ROGUE_FW_GROUP_MAIN, 2),
+ "Kick Null cmd: FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(84, ROGUE_FW_GROUP_MAIN, 2),
+ "RPM Out of memory! Context 0x%08x, SH requestor %d" },
+ { ROGUE_FW_LOG_CREATESFID(85, ROGUE_FW_GROUP_MAIN, 4),
+ "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d" },
+ { ROGUE_FW_LOG_CREATESFID(86, ROGUE_FW_GROUP_MAIN, 4),
+ "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(87, ROGUE_FW_GROUP_MAIN, 4),
+ "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(88, ROGUE_FW_GROUP_MAIN, 4),
+ "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(89, ROGUE_FW_GROUP_MAIN, 3),
+ "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(90, ROGUE_FW_GROUP_MAIN, 3),
+ "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(91, ROGUE_FW_GROUP_MAIN, 1),
+ "Host Sync Partition marker: %d" },
+ { ROGUE_FW_LOG_CREATESFID(92, ROGUE_FW_GROUP_MAIN, 1),
+ "Host Sync Partition repeat: %d" },
+ { ROGUE_FW_LOG_CREATESFID(93, ROGUE_FW_GROUP_MAIN, 1),
+ "Core clock set to %d Hz" },
+ { ROGUE_FW_LOG_CREATESFID(94, ROGUE_FW_GROUP_MAIN, 7),
+ "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(95, ROGUE_FW_GROUP_MAIN, 3),
+ "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(96, ROGUE_FW_GROUP_MAIN, 5),
+ "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(97, ROGUE_FW_GROUP_MAIN, 4),
+ "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(98, ROGUE_FW_GROUP_MAIN, 0),
+ "Compute stalled" },
+ { ROGUE_FW_LOG_CREATESFID(99, ROGUE_FW_GROUP_MAIN, 3),
+ "Compute stalled (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(100, ROGUE_FW_GROUP_MAIN, 3),
+ "Compute resumed (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(101, ROGUE_FW_GROUP_MAIN, 4),
+ "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(102, ROGUE_FW_GROUP_MAIN, 4),
+ "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(103, ROGUE_FW_GROUP_MAIN, 1),
+ "DM: %u signal check failed" },
+ { ROGUE_FW_LOG_CREATESFID(104, ROGUE_FW_GROUP_MAIN, 3),
+ "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(105, ROGUE_FW_GROUP_MAIN, 0),
+ "TDM finished" },
+ { ROGUE_FW_LOG_CREATESFID(106, ROGUE_FW_GROUP_MAIN, 4),
+ "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(107, ROGUE_FW_GROUP_MAIN, 0),
+ "BRN 54141 HIT" },
+ { ROGUE_FW_LOG_CREATESFID(108, ROGUE_FW_GROUP_MAIN, 0),
+ "BRN 54141 Dummy TA kicked" },
+ { ROGUE_FW_LOG_CREATESFID(109, ROGUE_FW_GROUP_MAIN, 0),
+ "BRN 54141 resume TA" },
+ { ROGUE_FW_LOG_CREATESFID(110, ROGUE_FW_GROUP_MAIN, 0),
+ "BRN 54141 double hit after applying WA" },
+ { ROGUE_FW_LOG_CREATESFID(111, ROGUE_FW_GROUP_MAIN, 2),
+ "BRN 54141 Dummy TA VDM base address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(112, ROGUE_FW_GROUP_MAIN, 4),
+ "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(113, ROGUE_FW_GROUP_MAIN, 2),
+ "TDM stalled (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(114, ROGUE_FW_GROUP_MAIN, 1),
+ "Write Offset update notification for stalled FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(115, ROGUE_FW_GROUP_MAIN, 3),
+ "Changing OSid %d's priority from %u to %u" },
+ { ROGUE_FW_LOG_CREATESFID(116, ROGUE_FW_GROUP_MAIN, 0),
+ "Compute resumed" },
+ { ROGUE_FW_LOG_CREATESFID(117, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(118, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(119, ROGUE_FW_GROUP_MAIN, 11),
+ "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(120, ROGUE_FW_GROUP_MAIN, 10),
+ "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(121, ROGUE_FW_GROUP_MAIN, 8),
+ "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(122, ROGUE_FW_GROUP_MAIN, 6),
+ "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(123, ROGUE_FW_GROUP_MAIN, 8),
+ "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(124, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(125, ROGUE_FW_GROUP_MAIN, 1),
+ "Reconfigure CSRM: special coeff support enable %d." },
+ { ROGUE_FW_LOG_CREATESFID(127, ROGUE_FW_GROUP_MAIN, 1),
+ "TA requires max coeff mode, deferring: %d." },
+ { ROGUE_FW_LOG_CREATESFID(128, ROGUE_FW_GROUP_MAIN, 1),
+ "3D requires max coeff mode, deferring: %d." },
+ { ROGUE_FW_LOG_CREATESFID(129, ROGUE_FW_GROUP_MAIN, 1),
+ "Kill DM%d failed" },
+ { ROGUE_FW_LOG_CREATESFID(130, ROGUE_FW_GROUP_MAIN, 2),
+ "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(131, ROGUE_FW_GROUP_MAIN, 3),
+ "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(132, ROGUE_FW_GROUP_MAIN, 1),
+ "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs)." },
+ { ROGUE_FW_LOG_CREATESFID(133, ROGUE_FW_GROUP_MAIN, 1),
+ "HCS changed to %d ms" },
+ { ROGUE_FW_LOG_CREATESFID(134, ROGUE_FW_GROUP_MAIN, 4),
+ "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(135, ROGUE_FW_GROUP_MAIN, 2),
+ " Phantom %d: USCTiles=%d" },
+ { ROGUE_FW_LOG_CREATESFID(136, ROGUE_FW_GROUP_MAIN, 0),
+ "Isolation grouping is disabled" },
+ { ROGUE_FW_LOG_CREATESFID(137, ROGUE_FW_GROUP_MAIN, 1),
+ "Isolation group configured with a priority threshold of %d" },
+ { ROGUE_FW_LOG_CREATESFID(138, ROGUE_FW_GROUP_MAIN, 1),
+ "OS %d has come online" },
+ { ROGUE_FW_LOG_CREATESFID(139, ROGUE_FW_GROUP_MAIN, 1),
+ "OS %d has gone offline" },
+ { ROGUE_FW_LOG_CREATESFID(140, ROGUE_FW_GROUP_MAIN, 4),
+ "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(141, ROGUE_FW_GROUP_MAIN, 7),
+ "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(142, ROGUE_FW_GROUP_MAIN, 6),
+ "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(143, ROGUE_FW_GROUP_MAIN, 5),
+ "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(144, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU deinit" },
+ { ROGUE_FW_LOG_CREATESFID(145, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU units deinit" },
+ { ROGUE_FW_LOG_CREATESFID(146, ROGUE_FW_GROUP_MAIN, 2),
+ "Initialised OS %d with config flags 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(147, ROGUE_FW_GROUP_MAIN, 2),
+ "UFO limit exceeded %d/%d" },
+ { ROGUE_FW_LOG_CREATESFID(148, ROGUE_FW_GROUP_MAIN, 0),
+ "3D Dummy stencil store" },
+ { ROGUE_FW_LOG_CREATESFID(149, ROGUE_FW_GROUP_MAIN, 3),
+ "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(150, ROGUE_FW_GROUP_MAIN, 1),
+ "Unknown Command (eCmdType=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(151, ROGUE_FW_GROUP_MAIN, 4),
+ "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(152, ROGUE_FW_GROUP_MAIN, 5),
+ "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d" },
+ { ROGUE_FW_LOG_CREATESFID(153, ROGUE_FW_GROUP_MAIN, 3),
+ "TDM context switch check: Roff %u points to 0x%08x, Match=%u" },
+ { ROGUE_FW_LOG_CREATESFID(154, ROGUE_FW_GROUP_MAIN, 6),
+ "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(155, ROGUE_FW_GROUP_MAIN, 2),
+ "FW IRQ # %u @ %u" },
+ { ROGUE_FW_LOG_CREATESFID(156, ROGUE_FW_GROUP_MAIN, 3),
+ "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u" },
+ { ROGUE_FW_LOG_CREATESFID(157, ROGUE_FW_GROUP_MAIN, 3),
+ "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(158, ROGUE_FW_GROUP_MAIN, 3),
+ "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(159, ROGUE_FW_GROUP_MAIN, 4),
+ "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(160, ROGUE_FW_GROUP_MAIN, 4),
+ "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u" },
+ { ROGUE_FW_LOG_CREATESFID(161, ROGUE_FW_GROUP_MAIN, 3),
+ "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(162, ROGUE_FW_GROUP_MAIN, 4),
+ "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(163, ROGUE_FW_GROUP_MAIN, 4),
+ "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u" },
+ { ROGUE_FW_LOG_CREATESFID(164, ROGUE_FW_GROUP_MAIN, 3),
+ "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(165, ROGUE_FW_GROUP_MAIN, 8),
+ "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(166, ROGUE_FW_GROUP_MAIN, 4),
+ "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)" },
+ { ROGUE_FW_LOG_CREATESFID(167, ROGUE_FW_GROUP_MAIN, 2),
+ "OSid %u has %u stale commands in its KCCB" },
+ { ROGUE_FW_LOG_CREATESFID(168, ROGUE_FW_GROUP_MAIN, 0),
+ "Applying VCE pause" },
+ { ROGUE_FW_LOG_CREATESFID(169, ROGUE_FW_GROUP_MAIN, 3),
+ "OSid %u KCCB slot %u value updated to %u" },
+ { ROGUE_FW_LOG_CREATESFID(170, ROGUE_FW_GROUP_MAIN, 7),
+ "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(171, ROGUE_FW_GROUP_MAIN, 10),
+ "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u" },
+ { ROGUE_FW_LOG_CREATESFID(172, ROGUE_FW_GROUP_MAIN, 10),
+ "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u" },
+ { ROGUE_FW_LOG_CREATESFID(173, ROGUE_FW_GROUP_MAIN, 2),
+ "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u" },
+ { ROGUE_FW_LOG_CREATESFID(174, ROGUE_FW_GROUP_MAIN, 2),
+ "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(175, ROGUE_FW_GROUP_MAIN, 3),
+ "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(176, ROGUE_FW_GROUP_MAIN, 2),
+ "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(177, ROGUE_FW_GROUP_MAIN, 1),
+ "Set Periodic Hardware Reset Mode: %d" },
+ { ROGUE_FW_LOG_CREATESFID(179, ROGUE_FW_GROUP_MAIN, 3),
+ "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(180, ROGUE_FW_GROUP_MAIN, 1),
+ "PHR mode %d triggered a reset" },
+ { ROGUE_FW_LOG_CREATESFID(181, ROGUE_FW_GROUP_MAIN, 2),
+ "Signal update, Snoop Filter: %u, Signal Id: %u" },
+ { ROGUE_FW_LOG_CREATESFID(182, ROGUE_FW_GROUP_MAIN, 1),
+ "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8." },
+ { ROGUE_FW_LOG_CREATESFID(183, ROGUE_FW_GROUP_MAIN, 4),
+ "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u" },
+ { ROGUE_FW_LOG_CREATESFID(184, ROGUE_FW_GROUP_MAIN, 5),
+ "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d" },
+ { ROGUE_FW_LOG_CREATESFID(185, ROGUE_FW_GROUP_MAIN, 3),
+ "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x." },
+ { ROGUE_FW_LOG_CREATESFID(186, ROGUE_FW_GROUP_MAIN, 3),
+ "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u" },
+ { ROGUE_FW_LOG_CREATESFID(187, ROGUE_FW_GROUP_MAIN, 2),
+ "Signal updates: FIFO: %u, Signals: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(188, ROGUE_FW_GROUP_MAIN, 2),
+ "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(189, ROGUE_FW_GROUP_MAIN, 0),
+ "Insert BRN68497 WA blit after TDM Context store." },
+ { ROGUE_FW_LOG_CREATESFID(190, ROGUE_FW_GROUP_MAIN, 1),
+ "UFO Updates for previously finished FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(191, ROGUE_FW_GROUP_MAIN, 1),
+ "RTC with RTA present, %u active render targets" },
+ { ROGUE_FW_LOG_CREATESFID(192, ROGUE_FW_GROUP_MAIN, 0),
+ "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!" },
+ { ROGUE_FW_LOG_CREATESFID(193, ROGUE_FW_GROUP_MAIN, 2),
+ "Block 0x%x / Counter 0x%x INVALID and ignored" },
+ { ROGUE_FW_LOG_CREATESFID(194, ROGUE_FW_GROUP_MAIN, 2),
+ "ECC fault GPU=0x%08x FW=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(195, ROGUE_FW_GROUP_MAIN, 1),
+ "Processing XPU event on DM = %d" },
+ { ROGUE_FW_LOG_CREATESFID(196, ROGUE_FW_GROUP_MAIN, 2),
+ "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u" },
+ { ROGUE_FW_LOG_CREATESFID(197, ROGUE_FW_GROUP_MAIN, 1),
+ "GPU-%u has locked up (see HWR logs for more info)" },
+ { ROGUE_FW_LOG_CREATESFID(198, ROGUE_FW_GROUP_MAIN, 3),
+ "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(199, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU has locked up (see HWR logs for more info)" },
+ { ROGUE_FW_LOG_CREATESFID(200, ROGUE_FW_GROUP_MAIN, 1),
+ "Reprocessing outstanding XPU events from cores 0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(201, ROGUE_FW_GROUP_MAIN, 3),
+ "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(202, ROGUE_FW_GROUP_MAIN, 8),
+ "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(203, ROGUE_FW_GROUP_MAIN, 3),
+ "TDM stalled Core %u (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(204, ROGUE_FW_GROUP_MAIN, 8),
+ "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(205, ROGUE_FW_GROUP_MAIN, 4),
+ "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(206, ROGUE_FW_GROUP_MAIN, 6),
+ "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(207, ROGUE_FW_GROUP_MAIN, 3),
+ "TDM resumed core %u (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(208, ROGUE_FW_GROUP_MAIN, 4),
+ "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(209, ROGUE_FW_GROUP_MAIN, 2),
+ " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)" },
+ { ROGUE_FW_LOG_CREATESFID(210, ROGUE_FW_GROUP_MAIN, 2),
+ "Mask = 0x%X, mask2 = 0x%X" },
+ { ROGUE_FW_LOG_CREATESFID(211, ROGUE_FW_GROUP_MAIN, 3),
+ " core %u, reg = %u, mask = 0x%X)" },
+ { ROGUE_FW_LOG_CREATESFID(212, ROGUE_FW_GROUP_MAIN, 1),
+ "ECC fault received from safety bus: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(213, ROGUE_FW_GROUP_MAIN, 1),
+ "Safety Watchdog threshold period set to 0x%x clock cycles" },
+ { ROGUE_FW_LOG_CREATESFID(214, ROGUE_FW_GROUP_MAIN, 0),
+ "MTS Safety Event triggered by the safety watchdog." },
+ { ROGUE_FW_LOG_CREATESFID(215, ROGUE_FW_GROUP_MAIN, 3),
+ "DM%d USC tasks range limit 0 - %d, stride %d" },
+ { ROGUE_FW_LOG_CREATESFID(216, ROGUE_FW_GROUP_MAIN, 1),
+ "ECC fault GPU=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(217, ROGUE_FW_GROUP_MAIN, 0),
+ "GPU Hardware units reset to prevent transient faults." },
+ { ROGUE_FW_LOG_CREATESFID(218, ROGUE_FW_GROUP_MAIN, 2),
+ "Kick Abort cmd: FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(219, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(220, ROGUE_FW_GROUP_MAIN, 0),
+ "Ray finished" },
+ { ROGUE_FW_LOG_CREATESFID(221, ROGUE_FW_GROUP_MAIN, 2),
+ "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X" },
+ { ROGUE_FW_LOG_CREATESFID(222, ROGUE_FW_GROUP_MAIN, 2),
+ "CFI Timeout detected (%d increasing to %d)" },
+ { ROGUE_FW_LOG_CREATESFID(223, ROGUE_FW_GROUP_MAIN, 2),
+ "CFI Timeout detected for FBM (%d increasing to %d)" },
+ { ROGUE_FW_LOG_CREATESFID(224, ROGUE_FW_GROUP_MAIN, 0),
+ "Geom OOM event not allowed" },
+ { ROGUE_FW_LOG_CREATESFID(225, ROGUE_FW_GROUP_MAIN, 4),
+ "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)" },
+ { ROGUE_FW_LOG_CREATESFID(226, ROGUE_FW_GROUP_MAIN, 2),
+ "Skipping already executed TA FWCtx 0x%08.8x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(227, ROGUE_FW_GROUP_MAIN, 2),
+ "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM" },
+ { ROGUE_FW_LOG_CREATESFID(228, ROGUE_FW_GROUP_MAIN, 8),
+ "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(229, ROGUE_FW_GROUP_MAIN, 12),
+ "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(230, ROGUE_FW_GROUP_MAIN, 11),
+ "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(231, ROGUE_FW_GROUP_MAIN, 7),
+ "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(232, ROGUE_FW_GROUP_MAIN, 1),
+ "TDM finished: Kick ID %u " },
+ { ROGUE_FW_LOG_CREATESFID(233, ROGUE_FW_GROUP_MAIN, 1),
+ "TA finished: Kick ID %u " },
+ { ROGUE_FW_LOG_CREATESFID(234, ROGUE_FW_GROUP_MAIN, 3),
+ "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x" },
+ { ROGUE_FW_LOG_CREATESFID(235, ROGUE_FW_GROUP_MAIN, 1),
+ "Compute finished: Kick ID %u " },
+ { ROGUE_FW_LOG_CREATESFID(236, ROGUE_FW_GROUP_MAIN, 10),
+ "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(237, ROGUE_FW_GROUP_MAIN, 8),
+ "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(238, ROGUE_FW_GROUP_MAIN, 1),
+ "Ray finished: Kick ID %u " },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MTS, 2),
+ "Bg Task DM = %u, counted = %d" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MTS, 1),
+ "Bg Task complete DM = %u" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MTS, 3),
+ "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MTS, 1),
+ "Irq Task complete DM = %u" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MTS, 0),
+ "Kick MTS Bg task DM=All" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MTS, 1),
+ "Kick MTS Irq task DM=%d" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MTS, 2),
+ "Ready queue debug DM = %u, celltype = %d" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MTS, 2),
+ "Ready-to-run debug DM = %u, item = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MTS, 3),
+ "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MTS, 3),
+ "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MTS, 3),
+ "Ready queue debug DM = %u, celltype = %d, OSid = %u" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MTS, 3),
+ "Bg Task DM = %u, counted = %d, OSid = %u" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MTS, 1),
+ "Bg Task complete DM Bitfield: %u" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MTS, 0),
+ "Irq Task complete." },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_MTS, 7),
+ "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d." },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MTS, 4),
+ "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MTS, 2),
+ "KCCB Slot %u: Return value %u" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MTS, 1),
+ "Bg Task OSid = %u" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MTS, 3),
+ "KCCB Slot %u: Cmd=0x%08x, OSid=%u" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MTS, 1),
+ "Irq Task (EVENT_STATUS=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MTS, 2),
+ "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_CLEANUP, 1),
+ "FwCommonContext [0x%08x] cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_CLEANUP, 3),
+ "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HWRTData [0x%08x] for DM=%d, received cleanup request" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_CLEANUP, 3),
+ "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HWRTData [0x%08x] HW Context for DM%u is busy" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HWRTData [0x%08x] HW Context %u cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_CLEANUP, 1),
+ "Freelist [0x%08x] cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_CLEANUP, 1),
+ "ZSBuffer [0x%08x] cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_CLEANUP, 3),
+ "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_CLEANUP, 4),
+ "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_CLEANUP, 3),
+ "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_CLEANUP, 4),
+ "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_CLEANUP, 2),
+ "HW Ray Frame Data [0x%08x] HW Context %u cleaned" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_CLEANUP, 1),
+ "Discarding invalid cleanup request of type 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_CLEANUP, 1),
+ "Received cleanup request for HWRTData [0x%08x]" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_CLEANUP, 3),
+ "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_CLEANUP, 3),
+ "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_CSW, 1),
+ "CDM FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_CSW, 3),
+ "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_CSW, 1),
+ "CDM FWCtx shared alloc size load 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_CSW, 0),
+ "*** CDM FWCtx store complete" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_CSW, 0),
+ "*** CDM FWCtx store start" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_CSW, 0),
+ "CDM Soft Reset" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_CSW, 1),
+ "3D FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_CSW, 1),
+ "*** 3D FWCtx 0x%08.8x resume" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_CSW, 0),
+ "*** 3D context store complete" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_CSW, 3),
+ "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_CSW, 0),
+ "*** 3D context store start" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_CSW, 1),
+ "*** 3D TQ FWCtx 0x%08.8x resume" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_CSW, 1),
+ "TA FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_CSW, 3),
+ "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_CSW, 2),
+ "TA context shared alloc size store 0x%x, load 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_CSW, 0),
+ "*** TA context store complete" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_CSW, 0),
+ "*** TA context store start" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_CSW, 3),
+ "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_CSW, 2),
+ "Set FWCtx 0x%x priority to %u" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_CSW, 2),
+ "3D context store pipe%d state: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_CSW, 2),
+ "3D context resume pipe%d state: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_CSW, 1),
+ "SHG FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_CSW, 3),
+ "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_CSW, 2),
+ "SHG context shared alloc size store 0x%x, load 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_CSW, 0),
+ "*** SHG context store complete" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_CSW, 0),
+ "*** SHG context store start" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_CSW, 1),
+ "Performing TA indirection, last used pipe %d" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_CSW, 0),
+ "CDM context store hit ctrl stream terminate. Skip resume." },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_CSW, 4),
+ "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_CSW, 2),
+ "TA PDS/USC state buffer flip (%d->%d)" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_CSW, 0),
+ "TA context store hit BRN 52563: vertex store tasks outstanding" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_CSW, 1),
+ "TA USC poll failed (USC vertex task count: %d)" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_CSW, 0),
+ "TA context store deferred due to BRN 54141." },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_CSW, 7),
+ "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_CSW, 0),
+ "*** TDM context store start" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_CSW, 0),
+ "*** TDM context store complete" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_CSW, 2),
+ "TDM context needs resume, header [0x%08.8x, 0x%08.8x]" },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_CSW, 8),
+ "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u" },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_CSW, 3),
+ "3D context store pipe %2d (%2d) state: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_CSW, 3),
+ "3D context resume pipe %2d (%2d) state: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_CSW, 1),
+ "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_CSW, 3),
+ "3D context store pipe%d state: 0x%08.8x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_CSW, 3),
+ "3D context resume pipe%d state: 0x%08.8x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_CSW, 2),
+ "3D context resume IPP state: 0x%08.8x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_CSW, 1),
+ "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_CSW, 3),
+ "TDM context resume pipe%d state: 0x%08.8x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_CSW, 0),
+ "*** 3D context store start version 4" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_CSW, 2),
+ "Multicore context resume on DM%d active core mask 0x%04.4x" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_CSW, 2),
+ "Multicore context store on DM%d active core mask 0x%04.4x" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_CSW, 5),
+ "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_CSW, 0),
+ "*** RDM FWCtx store complete" },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_CSW, 0),
+ "*** RDM FWCtx store start" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_CSW, 1),
+ "RDM FWCtx 0x%08.8x needs resume" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_CSW, 1),
+ "RDM FWCtx 0x%08.8x resume" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_BIF, 3),
+ "Activate MemCtx=0x%08x BIFreq=%d secure=%d" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_BIF, 1),
+ "Deactivate MemCtx=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_BIF, 1),
+ "Alloc PC reg %d" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_BIF, 2),
+ "Grab reg set %d refcount now %d" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_BIF, 2),
+ "Ungrab reg set %d refcount now %d" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_BIF, 6),
+ "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_BIF, 2),
+ "Trust enabled:%d, for BIFreq=%d" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_BIF, 9),
+ "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_BIF, 4),
+ "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_BIF, 3),
+ "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_BIF, 7),
+ "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x" }, \
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_BIF, 5),
+ "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_BIF, 1),
+ "Unmap GPU memory (event status 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_BIF, 3),
+ "Activate MemCtx=0x%08x DM=%d secure=%d" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_BIF, 6),
+ "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_BIF, 4),
+ "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_BIF, 2),
+ "Trust enabled:%d, for DM=%d" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_BIF, 5),
+ "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_BIF, 6),
+ "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_BIF, 3),
+ "Alloc PC set %d as register range [%u - %u]" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO write 0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO read 0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MISC, 0),
+ "GPIO enabled" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MISC, 0),
+ "GPIO disabled" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO status=%d (0=OK, 1=Disabled)" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MISC, 2),
+ "GPIO_AP: Read address=0x%02x (%d byte(s))" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MISC, 2),
+ "GPIO_AP: Write address=0x%02x (%d byte(s))" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MISC, 0),
+ "GPIO_AP timeout!" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MISC, 1),
+ "GPIO already read 0x%02x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MISC, 2),
+ "SR: Check buffer %d available returned %d" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Waiting for buffer %d" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MISC, 2),
+ "SR: Timeout waiting for buffer %d (after %d ticks)" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MISC, 2),
+ "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Skip remaining strip %d in frame" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Inform HW that strip %d is a new frame" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Strip mode is %d" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Strip Render start (strip %d)" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Strip Render complete (buffer %d)" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MISC, 1),
+ "SR: Strip Render fault (buffer %d)" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_MISC, 1),
+ "TRP state: %d" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_MISC, 1),
+ "TRP failure: %d" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MISC, 1),
+ "SW TRP State: %d" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_MISC, 1),
+ "SW TRP failure: %d" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_MISC, 1),
+ "HW kick event (%u)" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_MISC, 4),
+ "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_MISC, 6),
+ "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_MISC, 6),
+ "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_MISC, 4),
+ "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_MISC, 6),
+ "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_PM, 10),
+ "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_PM, 8),
+ "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_PM, 14),
+ "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_PM, 14),
+ "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_PM, 5),
+ "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_PM, 1),
+ "Grow for freelist ID=0x%08x denied by host" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_PM, 5),
+ "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_PM, 1),
+ "Reconstruction of freelist ID=0x%08x failed" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_PM, 2),
+ "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_PM, 2),
+ "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_PM, 1),
+ "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_PM, 1),
+ "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_PM, 1),
+ "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_PM, 1),
+ "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_PM, 1),
+ "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_PM, 7),
+ "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_PM, 10),
+ "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_PM, 10),
+ "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_PM, 7),
+ "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_PM, 7),
+ "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_PM, 10),
+ "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_PM, 10),
+ "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_PM, 5),
+ "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_PM, 5),
+ "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_PM, 5),
+ "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_PM, 5),
+ "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_PM, 4),
+ "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_PM, 1),
+ "3D Timeout Now for FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_PM, 1),
+ "GEOM PM Recycle for FWCtx 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_PM, 1),
+ "PM running primary config (Core %d)" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_PM, 1),
+ "PM running secondary config (Core %d)" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_PM, 1),
+ "PM running tertiary config (Core %d)" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_PM, 1),
+ "PM running quaternary config (Core %d)" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_RPM, 3),
+ "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_RPM, 3),
+ "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_RPM, 0),
+ "RPM request failed. Waiting for freelist grow." },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_RPM, 0),
+ "RPM request failed. Aborting the current frame." },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_RPM, 1),
+ "RPM waiting for pending grow on freelist 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_RPM, 3),
+ "Request freelist grow [0x%08x] current pages %d, grow size %d" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_RPM, 2),
+ "Freelist load: SHF = 0x%08x, SHG = 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_RPM, 2),
+ "SHF FPL register: 0x%08x.0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_RPM, 2),
+ "SHG FPL register: 0x%08x.0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_RPM, 5),
+ "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_RPM, 0),
+ "Restarting SHG" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_RPM, 0),
+ "Grow failed, aborting the current frame." },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_RPM, 1),
+ "RPM abort complete on HWFrameData [0x%08x]." },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_RPM, 1),
+ "RPM freelist cleanup [0x%08x] requires abort to proceed." },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_RPM, 2),
+ "RPM page table base register: 0x%08x.0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_RPM, 0),
+ "Issuing RPM abort." },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_RPM, 0),
+ "RPM OOM received but toggle bits indicate free pages available" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_RPM, 0),
+ "RPM hardware timeout. Unable to process OOM event." },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_RPM, 5),
+ "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_RPM, 5),
+ "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_RPM, 3),
+ "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_RPM, 3),
+ "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_RTD, 2),
+ "3D RTData 0x%08x finished on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_RTD, 2),
+ "3D RTData 0x%08x ready on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_RTD, 4),
+ "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_RTD, 2),
+ "Loading VFP table 0x%08x%08x for 3D" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_RTD, 2),
+ "Loading VFP table 0x%08x%08x for TA" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_RTD, 10),
+ "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_RTD, 0),
+ "Perform VHEAP table store" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_RTD, 2),
+ "RTData 0x%08x: found match in Context=%d: Load=No, Store=No" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_RTD, 2),
+ "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_RTD, 3),
+ "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_RTD, 3),
+ "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_RTD, 5),
+ "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_RTD, 10),
+ "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_RTD, 2),
+ "TA RTData 0x%08x finished on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_RTD, 2),
+ "TA RTData 0x%08x loaded on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_RTD, 12),
+ "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_RTD, 12),
+ "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_RTD, 1),
+ "Freelist 0x%x RESET!!!!!!!!" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_RTD, 5),
+ "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_RTD, 3),
+ "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_RTD, 1),
+ "Freelist reconstruction ACK from host (HWR state :%u)" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_RTD, 0),
+ "Freelist reconstruction completed" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_RTD, 3),
+ "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_RTD, 3),
+ "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_RTD, 8),
+ "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_RTD, 2),
+ "3D RTData 0x%08x loaded on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_RTD, 4),
+ "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_RTD, 2),
+ "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_RTD, 3),
+ "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_RTD, 12),
+ "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_RTD, 12),
+ "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_RTD, 5),
+ "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_RTD, 7),
+ "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_RTD, 4),
+ "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_RTD, 6),
+ "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_RTD, 1),
+ "TA RTData 0x%08x marked as killed." },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_RTD, 1),
+ "3D RTData 0x%08x marked as killed." },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_RTD, 1),
+ "RTData 0x%08x will be killed after TA restart." },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_RTD, 3),
+ "RTData 0x%08x Render State Buffer 0x%02x%08x will be reset." },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_RTD, 3),
+ "GEOM RTData 0x%08x using Render State Buffer 0x%02x%08x." },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_RTD, 3),
+ "FRAG RTData 0x%08x using Render State Buffer 0x%02x%08x." },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_SPM, 0),
+ "Force Z-Load for partial render" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_SPM, 0),
+ "Force Z-Store for partial render" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_SPM, 1),
+ "3D MemFree: Local FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_SPM, 1),
+ "3D MemFree: MMU FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_SPM, 1),
+ "3D MemFree: Global FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_SPM, 6),
+ "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_SPM, 3),
+ "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_SPM, 5),
+ "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_SPM, 0),
+ "Partial render avoided" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_SPM, 0),
+ "Partial render discarded" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_SPM, 0),
+ "Partial Render finished" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = 3D-BG" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = 3D-IRQ" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = NONE" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = TA-BG" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_SPM, 0),
+ "SPM Owner = TA-IRQ" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_SPM, 2),
+ "ZStore address 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_SPM, 2),
+ "SStore address 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_SPM, 2),
+ "ZLoad address 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_SPM, 2),
+ "SLoad address 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_SPM, 0),
+ "No deferred ZS Buffer provided" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_SPM, 1),
+ "ZS Buffer successfully populated (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_SPM, 1),
+ "No need to populate ZS Buffer (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_SPM, 1),
+ "ZS Buffer successfully unpopulated (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_SPM, 1),
+ "No need to unpopulate ZS Buffer (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_SPM, 1),
+ "Send ZS-Buffer backing request to host (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_SPM, 1),
+ "Send ZS-Buffer unbacking request to host (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_SPM, 1),
+ "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_SPM, 1),
+ "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_SPM, 1),
+ "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_SPM, 1),
+ "Partial Render waiting for SBuffer to be backed (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = none" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR blocked" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = wait for grow" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = wait for HW" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR running" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR avoided" },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR executed" },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_SPM, 2),
+ "3DMemFree matches freelist 0x%08x (FL type = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_SPM, 0),
+ "Raise the 3DMemFreeDetected flag" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_SPM, 1),
+ "Wait for pending grow on Freelist 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_SPM, 1),
+ "ZS Buffer failed to be populated (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_SPM, 5),
+ "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_SPM, 4),
+ "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u" },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_SPM, 5),
+ "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_SPM, 1),
+ "No deferred partial render FW (Type=%d) Buffer provided" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_SPM, 1),
+ "No need to populate PR Buffer (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_SPM, 1),
+ "No need to unpopulate PR Buffer (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_SPM, 1),
+ "Send PR Buffer backing request to host (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_SPM, 1),
+ "Send PR Buffer unbacking request to host (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_SPM, 1),
+ "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_SPM, 1),
+ "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_SPM, 2),
+ "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_SPM, 4),
+ "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u" },
+ { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_SPM, 3),
+ "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_SPM, 3),
+ "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u" },
+ { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_SPM, 3),
+ "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_SPM, 0),
+ "SPM State = PR force free" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_POW, 4),
+ "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_POW, 3),
+ "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_POW, 3),
+ "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_POW, 4),
+ "Initiate powoff query. Inactive DMs: %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_POW, 2),
+ "Any RD-DM pending? %d, Any RD-DM Active? %d" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_POW, 3),
+ "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_POW, 2),
+ "HW Request On(1)/Off(0): %d, Units: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_POW, 2),
+ "Request to change num of dusts to %d (Power flags=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_POW, 2),
+ "Changing number of dusts from %d to %d" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_POW, 0),
+ "Sidekick init" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_POW, 1),
+ "Rascal+Dusts init (# dusts mask: 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_POW, 0),
+ "Initiate powoff query for RD-DMs." },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_POW, 0),
+ "Initiate powoff query for TLA-DM." },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_POW, 2),
+ "Any RD-DM pending? %d, Any RD-DM Active? %d" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_POW, 2),
+ "TLA-DM pending? %d, TLA-DM Active? %d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_POW, 1),
+ "Request power up due to BRN37270. Pow stat int: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_POW, 3),
+ "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_POW, 1),
+ "OS requested forced IDLE, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_POW, 1),
+ "OS cancelled forced IDLE, pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_POW, 3),
+ "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_POW, 3),
+ "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_POW, 2),
+ "Active PM latency set to %dms. Core clock: %d Hz" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_POW, 2),
+ "Compute cluster mask change to 0x%x, %d dusts powered." },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_POW, 0),
+ "Null command executed, repeating initiate powoff query for RD-DMs." },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_POW, 1),
+ "Power monitor: Estimate of dynamic energy %u" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_POW, 3),
+ "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: New deadline, time = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: New workload, cycles = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Proactive frequency calculated = %u" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Reactive utilisation = %u percent" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: Reactive frequency calculated = %u.%u" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: OPP Point Sent = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: Deadline removed = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: Workload removed = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Throttle to a maximum = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: Failed to pass OPP point via GPIO." },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: Invalid node passed to function." },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: Disabled: Not enabled by host." },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_POW, 2),
+ "HW Request Completed(1)/Aborted(0): %d, Ticks: %d" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_POW, 1),
+ "Allowed number of dusts is %d due to BRN59042." },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_POW, 3),
+ "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_POW, 5),
+ "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: GPU transitioned to idle" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_POW, 0),
+ "Proactive DVFS: GPU transitioned to active" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_POW, 1),
+ "Power counter dumping: Data truncated writing register %u. Buffer too small." },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_POW, 0),
+ "Power controller returned ABORT for last request so retrying." },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_POW, 2),
+ "Discarding invalid power request: type 0x%x, DM %u" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_POW, 2),
+ "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_POW, 2),
+ "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_POW, 1),
+ "Detected attempt to change dust count while not forced idle (pow state 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_POW, 3),
+ "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_POW, 2),
+ "Conflicting clock frequency range: OPP min = %u, max = %u" },
+ { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_POW, 1),
+ "Proactive DVFS: Set floor to a minimum = 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_POW, 2),
+ "OS requested pow off (forced = %d), pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_POW, 1),
+ "Discarding invalid power request: type 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_POW, 3),
+ "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_POW, 2),
+ "Changing SPU power state mask from 0x%x to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_POW, 1),
+ "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)" },
+ { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_POW, 1),
+ "Invalid SPU power mask 0x%x! Changing to 1" },
+ { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_POW, 2),
+ "Proactive DVFS: Send OPP %u with clock divider value %u" },
+ { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_POW, 0),
+ "PPA block started in perf validation mode." },
+ { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_POW, 1),
+ "Reset PPA block state %u (1=reset, 0=recalculate)." },
+ { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_POW, 1),
+ "Power controller returned ABORT for Core-%d last request so retrying." },
+ { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_POW, 3),
+ "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_POW, 5),
+ "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_POW, 4),
+ "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_POW, 2),
+ "RAC pending? %d, RAC Active? %d" },
+ { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_POW, 0),
+ "Initiate powoff query for RAC." },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_HWR, 2),
+ "Lockup detected on DM%d, FWCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_HWR, 3),
+ "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_HWR, 0),
+ "Reset HW" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_HWR, 0),
+ "Lockup recovered." },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_HWR, 2),
+ "Lock-up DM%d FWCtx: 0x%08.8x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_HWR, 4),
+ "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_HWR, 3),
+ "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_HWR, 3),
+ "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_HWR, 4),
+ "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_HWR, 4),
+ "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_HWR, 3),
+ "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_HWR, 4),
+ "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_HWR, 3),
+ "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_HWR, 2),
+ "Discarded cmd on DM%u FWCtx=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_HWR, 6),
+ "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_HWR, 2),
+ "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_HWR, 5),
+ "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_HWR, 8),
+ "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_HWR, 2),
+ "FL reconstruction 0x%08.8x c%d" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_HWR, 3),
+ "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x." },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_HWR, 2),
+ "Reset HW (mmu:%d, extmem: %d)" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_HWR, 4),
+ "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_HWR, 2),
+ "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_HWR, 5),
+ "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_HWR, 1),
+ "Recovery DM%u: DM fully recovered" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_HWR, 2),
+ "DM%u: Hold scheduling due to R-Flag = 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_HWR, 0),
+ "Analysis: Need freelist reconstruction" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_HWR, 2),
+ "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_HWR, 2),
+ "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_HWR, 2),
+ "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_HWR, 0),
+ "GPU has locked up" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_HWR, 1),
+ "DM%u ready for HWR" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_HWR, 2),
+ "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_HWR, 1),
+ "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_HWR, 1),
+ "DM%u timed out" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_HWR, 1),
+ "RGX_CR_EVENT_STATUS=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_HWR, 2),
+ "DM%u lockup falsely detected, R-Flags=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_HWR, 0),
+ "GPU has overrun its deadline" },
+ { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_HWR, 0),
+ "GPU has failed a poll" },
+ { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_HWR, 2),
+ "RGX DM%u phase count=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_HWR, 2),
+ "Reset HW (loop:%d, poll failures: 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_HWR, 1),
+ "MMU fault event: 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_HWR, 1),
+ "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_HWR, 1),
+ "Fast CRC Failed. Proceeding to full register checking (DM: %u)." },
+ { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_HWR, 2),
+ "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_HWR, 2),
+ "Fast CRC Check result for DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_HWR, 2),
+ "Full Signature Check result for DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_HWR, 3),
+ "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u" },
+ { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_HWR, 3),
+ "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d" },
+ { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_HWR, 2),
+ "Deadline counter for DM%u is HWRDeadline=%u" },
+ { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_HWR, 1),
+ "Holding Scheduling on OSid %u due to pending freelist reconstruction" },
+ { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_HWR, 2),
+ "Requesting reconstruction for freelist 0x%x (ID=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_HWR, 1),
+ "Reconstruction of freelist ID=%d complete" },
+ { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_HWR, 4),
+ "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_HWR, 1),
+ "Reconstruction of freelist ID=%d failed" },
+ { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_HWR, 4),
+ "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_HWR, 4),
+ "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_HWR, 2),
+ "USC slots: %u used by DM%u" },
+ { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_HWR, 1),
+ "USC slots: %u empty" },
+ { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_HWR, 5),
+ "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_HWR, 1),
+ "Begin hardware reset (HWR Counter=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_HWR, 1),
+ "Finished hardware reset (HWR Counter=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_HWR, 2),
+ "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction" },
+ { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_HWR, 5),
+ "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_HWR, 4),
+ "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_HWR, 3),
+ "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_HWR, 1),
+ "At least one other DM is running okay so DM%u will get another chance" },
+ { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_HWR, 2),
+ "Reconstructing in FW, FL: 0x%x (ID=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_HWR, 4),
+ "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)" },
+ { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_HWR, 5),
+ "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_HWR, 3),
+ "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_HWR, 1),
+ "End long HW poll (result=%d)" },
+ { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_HWR, 3),
+ "DM%u has taken %d ticks and deadline is %d ticks" },
+ { ROGUE_FW_LOG_CREATESFID(74, ROGUE_FW_GROUP_HWR, 5),
+ "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u" },
+ { ROGUE_FW_LOG_CREATESFID(75, ROGUE_FW_GROUP_HWR, 6),
+ "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u" },
+ { ROGUE_FW_LOG_CREATESFID(76, ROGUE_FW_GROUP_HWR, 1),
+ "GPU-%u has locked up" },
+ { ROGUE_FW_LOG_CREATESFID(77, ROGUE_FW_GROUP_HWR, 1),
+ "DM%u has locked up" },
+ { ROGUE_FW_LOG_CREATESFID(78, ROGUE_FW_GROUP_HWR, 2),
+ "Core %d RGX_CR_EVENT_STATUS=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(79, ROGUE_FW_GROUP_HWR, 2),
+ "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(80, ROGUE_FW_GROUP_HWR, 5),
+ "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(81, ROGUE_FW_GROUP_HWR, 3),
+ "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(82, ROGUE_FW_GROUP_HWR, 4),
+ "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(83, ROGUE_FW_GROUP_HWR, 4),
+ "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(84, ROGUE_FW_GROUP_HWR, 3),
+ "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(85, ROGUE_FW_GROUP_HWR, 3),
+ "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(86, ROGUE_FW_GROUP_HWR, 4),
+ "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d" },
+ { ROGUE_FW_LOG_CREATESFID(87, ROGUE_FW_GROUP_HWR, 6),
+ "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u" },
+ { ROGUE_FW_LOG_CREATESFID(88, ROGUE_FW_GROUP_HWR, 3),
+ "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)" },
+ { ROGUE_FW_LOG_CREATESFID(89, ROGUE_FW_GROUP_HWR, 2),
+ "TEXAS1_PFS poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(90, ROGUE_FW_GROUP_HWR, 2),
+ "BIF_PFS poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(91, ROGUE_FW_GROUP_HWR, 2),
+ "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(92, ROGUE_FW_GROUP_HWR, 2),
+ "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(93, ROGUE_FW_GROUP_HWR, 2),
+ "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(94, ROGUE_FW_GROUP_HWR, 2),
+ "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(95, ROGUE_FW_GROUP_HWR, 3),
+ "TEXAS%d_PFS poll failed on core %d with value 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(96, ROGUE_FW_GROUP_HWR, 3),
+ "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u" },
+ { ROGUE_FW_LOG_CREATESFID(97, ROGUE_FW_GROUP_HWR, 1),
+ "FW attempted to write to read-only GPU address 0x%08x" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_HWP, 2),
+ "Block 0x%x mapped to Config Idx %u" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x omitted from event - not enabled in HW" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x included in event - enabled in HW" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_HWP, 2),
+ "Select register state hi_0x%x lo_0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_HWP, 1),
+ "Counter stream block header word 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_HWP, 1),
+ "Counter register offset 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x config unset, skipping" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_HWP, 1),
+ "Accessing Indirect block 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_HWP, 1),
+ "Accessing Direct block 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_HWP, 1),
+ "Programmed counter select register at offset 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_HWP, 2),
+ "Block register offset 0x%x and value 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_HWP, 1),
+ "Reading config block from driver 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_HWP, 2),
+ "Reading block range 0x%x to 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_HWP, 1),
+ "Recording block 0x%x config from driver" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_HWP, 0),
+ "Finished reading config block from driver" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_HWP, 2),
+ "Custom Counter offset: 0x%x value: 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_HWP, 2),
+ "Select counter n:%u ID:0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_HWP, 3),
+ "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_HWP, 1),
+ "Custom Counters filter status %d" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_HWP, 2),
+ "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_HWP, 2),
+ "The package will be discarded because it contains %d counters IDs while the upper limit is %d" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_HWP, 2),
+ "Check Filter 0x%x is 0x%x ?" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_HWP, 1),
+ "The custom block %u is reset" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_HWP, 1),
+ "Encountered an invalid command (%d)" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_HWP, 2),
+ "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_HWP, 3),
+ "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)" },
+ { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_HWP, 1),
+ "Custom Counter block: %d" },
+ { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x ENABLED" },
+ { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_HWP, 1),
+ "Block 0x%x DISABLED" },
+ { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_HWP, 2),
+ "Accessing Indirect block 0x%x, instance %u" },
+ { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_HWP, 2),
+ "Counter register 0x%x, Value 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_HWP, 1),
+ "Counters filter status %d" },
+ { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_HWP, 2),
+ "Block 0x%x mapped to Ctl Idx %u" },
+ { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_HWP, 0),
+ "Block(s) in use for workload estimation." },
+ { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_HWP, 3),
+ "GPU %u Cycle counter 0x%x, Value 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_HWP, 3),
+ "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x" },
+ { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_HWP, 1),
+ "Blocks IGNORED for GPU %u" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_DMA, 5),
+ "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_DMA, 4),
+ "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_DMA, 1),
+ "DMA Interrupt register 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_DMA, 1),
+ "Waiting for transfer of type 0x%02x completion..." },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_DMA, 3),
+ "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_DMA, 3),
+ "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_DMA, 1),
+ "Transfer 0x%02x request poll failure" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_DMA, 2),
+ "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_DMA, 7),
+ "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u" },
+
+ { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_DBG, 2),
+ "0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_DBG, 1),
+ "0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_DBG, 2),
+ "0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_DBG, 3),
+ "0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_DBG, 4),
+ "0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_DBG, 5),
+ "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_DBG, 6),
+ "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_DBG, 7),
+ "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_DBG, 8),
+ "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" },
+ { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_DBG, 1),
+ "%d" },
+ { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_DBG, 2),
+ "%d %d" },
+ { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_DBG, 3),
+ "%d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_DBG, 4),
+ "%d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_DBG, 5),
+ "%d %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_DBG, 6),
+ "%d %d %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_DBG, 7),
+ "%d %d %d %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_DBG, 8),
+ "%d %d %d %d %d %d %d %d" },
+ { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_DBG, 1),
+ "%u" },
+ { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_DBG, 2),
+ "%u %u" },
+ { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_DBG, 3),
+ "%u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_DBG, 4),
+ "%u %u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_DBG, 5),
+ "%u %u %u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_DBG, 6),
+ "%u %u %u %u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_DBG, 7),
+ "%u %u %u %u %u %u %u" },
+ { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_DBG, 8),
+ "%u %u %u %u %u %u %u %u" },
+
+ { ROGUE_FW_LOG_CREATESFID(65535, ROGUE_FW_GROUP_NULL, 15),
+ "You should not use this string" },
+};
+
+#define ROGUE_FW_SF_FIRST ROGUE_FW_LOG_CREATESFID(0, ROGUE_FW_GROUP_NULL, 0)
+#define ROGUE_FW_SF_MAIN_ASSERT_FAILED ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MAIN, 1)
+#define ROGUE_FW_SF_LAST ROGUE_FW_LOG_CREATESFID(65535, ROGUE_FW_GROUP_NULL, 15)
+
+#endif /* PVR_ROGUE_FWIF_SF_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h
new file mode 100644
index 000000000..6c09c15bf
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_SHARED_H
+#define PVR_ROGUE_FWIF_SHARED_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define ROGUE_FWIF_NUM_RTDATAS 2U
+#define ROGUE_FWIF_NUM_GEOMDATAS 1U
+#define ROGUE_FWIF_NUM_RTDATA_FREELISTS 2U
+#define ROGUE_NUM_GEOM_CORES 1U
+
+#define ROGUE_NUM_GEOM_CORES_SIZE 2U
+
+/*
+ * Maximum number of UFOs in a CCB command.
+ * The number is based on having 32 sync prims (as originally), plus 32 sync
+ * checkpoints.
+ * Once the use of sync prims is no longer supported, we will retain
+ * the same total (64) as the number of sync checkpoints which may be
+ * supporting a fence is not visible to the client driver and has to
+ * allow for the number of different timelines involved in fence merges.
+ */
+#define ROGUE_FWIF_CCB_CMD_MAX_UFOS (32U + 32U)
+
+/*
+ * This is a generic limit imposed on any DM (GEOMETRY,FRAGMENT,CDM,TDM,2D,TRANSFER)
+ * command passed through the bridge.
+ * Just across the bridge in the server, any incoming kick command size is
+ * checked against this maximum limit.
+ * In case the incoming command size is larger than the specified limit,
+ * the bridge call is retired with error.
+ */
+#define ROGUE_FWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U)
+
+#define ROGUE_FWIF_PRBUFFER_START (0)
+#define ROGUE_FWIF_PRBUFFER_ZSBUFFER (0)
+#define ROGUE_FWIF_PRBUFFER_MSAABUFFER (1)
+#define ROGUE_FWIF_PRBUFFER_MAXSUPPORTED (2)
+
+struct rogue_fwif_dma_addr {
+ aligned_u64 dev_addr;
+ u32 fw_addr;
+ u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_ufo {
+ u32 addr;
+ u32 value;
+};
+
+#define ROGUE_FWIF_UFO_ADDR_IS_SYNC_CHECKPOINT (1)
+
+struct rogue_fwif_sync_checkpoint {
+ u32 state;
+ u32 fw_ref_count;
+};
+
+struct rogue_fwif_cleanup_ctl {
+ /* Number of commands received by the FW */
+ u32 submitted_commands;
+ /* Number of commands executed by the FW */
+ u32 executed_commands;
+} __aligned(8);
+
+/*
+ * Used to share frame numbers across UM-KM-FW,
+ * frame number is set in UM,
+ * frame number is required in both KM for HTB and FW for FW trace.
+ *
+ * May be used to house Kick flags in the future.
+ */
+struct rogue_fwif_cmd_common {
+ /* associated frame number */
+ u32 frame_num;
+};
+
+/*
+ * Geometry and fragment commands require set of firmware addresses that are stored in the Kernel.
+ * Client has handle(s) to Kernel containers storing these addresses, instead of raw addresses. We
+ * have to patch/write these addresses in KM to prevent UM from controlling FW addresses directly.
+ * Typedefs for geometry and fragment commands are shared between Client and Firmware (both
+ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use geometry|fragment
+ * CMD type definitions directly. Therefore we have a SHARED block that is shared between UM-KM-FW
+ * across all BVNC configurations.
+ */
+struct rogue_fwif_cmd_geom_frag_shared {
+ /* Common command attributes */
+ struct rogue_fwif_cmd_common cmn;
+
+ /*
+ * RTData associated with this command, this is used for context
+ * selection and for storing out HW-context, when TA is switched out for
+ * continuing later
+ */
+ u32 hwrt_data_fw_addr;
+
+ /* Supported PR Buffers like Z/S/MSAA Scratch */
+ u32 pr_buffer_fw_addr[ROGUE_FWIF_PRBUFFER_MAXSUPPORTED];
+};
+
+/*
+ * Client Circular Command Buffer (CCCB) control structure.
+ * This is shared between the Server and the Firmware and holds byte offsets
+ * into the CCCB as well as the wrapping mask to aid wrap around. A given
+ * snapshot of this queue with Cmd 1 running on the GPU might be:
+ *
+ * Roff Doff Woff
+ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
+ * < runnable commands >< !ready to run >
+ *
+ * Cmd 1 : Currently executing on the GPU data master.
+ * Cmd 2,3,4: Fence dependencies met, commands runnable.
+ * Cmd 5... : Fence dependency not met yet.
+ */
+struct rogue_fwif_cccb_ctl {
+ /* Host write offset into CCB. This must be aligned to 16 bytes. */
+ u32 write_offset;
+ /*
+ * Firmware read offset into CCB. Points to the command that is runnable
+ * on GPU, if R!=W
+ */
+ u32 read_offset;
+ /*
+ * Firmware fence dependency offset. Points to commands not ready, i.e.
+ * fence dependencies are not met.
+ */
+ u32 dep_offset;
+ /* Offset wrapping mask, total capacity in bytes of the CCB-1 */
+ u32 wrap_mask;
+
+ /* Only used if SUPPORT_AGP is present. */
+ u32 read_offset2;
+
+ /* Only used if SUPPORT_AGP4 is present. */
+ u32 read_offset3;
+ /* Only used if SUPPORT_AGP4 is present. */
+ u32 read_offset4;
+
+ u32 padding;
+} __aligned(8);
+
+#define ROGUE_FW_LOCAL_FREELIST (0)
+#define ROGUE_FW_GLOBAL_FREELIST (1)
+#define ROGUE_FW_FREELIST_TYPE_LAST ROGUE_FW_GLOBAL_FREELIST
+#define ROGUE_FW_MAX_FREELISTS (ROGUE_FW_FREELIST_TYPE_LAST + 1U)
+
+struct rogue_fwif_geom_registers_caswitch {
+ u64 geom_reg_vdm_context_state_base_addr;
+ u64 geom_reg_vdm_context_state_resume_addr;
+ u64 geom_reg_ta_context_state_base_addr;
+
+ struct {
+ u64 geom_reg_vdm_context_store_task0;
+ u64 geom_reg_vdm_context_store_task1;
+ u64 geom_reg_vdm_context_store_task2;
+
+ /* VDM resume state update controls */
+ u64 geom_reg_vdm_context_resume_task0;
+ u64 geom_reg_vdm_context_resume_task1;
+ u64 geom_reg_vdm_context_resume_task2;
+
+ u64 geom_reg_vdm_context_store_task3;
+ u64 geom_reg_vdm_context_store_task4;
+
+ u64 geom_reg_vdm_context_resume_task3;
+ u64 geom_reg_vdm_context_resume_task4;
+ } geom_state[2];
+};
+
+#define ROGUE_FWIF_GEOM_REGISTERS_CSWITCH_SIZE \
+ sizeof(struct rogue_fwif_geom_registers_caswitch)
+
+struct rogue_fwif_cdm_registers_cswitch {
+ u64 cdmreg_cdm_context_pds0;
+ u64 cdmreg_cdm_context_pds1;
+ u64 cdmreg_cdm_terminate_pds;
+ u64 cdmreg_cdm_terminate_pds1;
+
+ /* CDM resume controls */
+ u64 cdmreg_cdm_resume_pds0;
+ u64 cdmreg_cdm_context_pds0_b;
+ u64 cdmreg_cdm_resume_pds0_b;
+};
+
+struct rogue_fwif_static_rendercontext_state {
+ /* Geom registers for ctx switch */
+ struct rogue_fwif_geom_registers_caswitch ctxswitch_regs[ROGUE_NUM_GEOM_CORES_SIZE]
+ __aligned(8);
+};
+
+#define ROGUE_FWIF_STATIC_RENDERCONTEXT_SIZE \
+ sizeof(struct rogue_fwif_static_rendercontext_state)
+
+struct rogue_fwif_static_computecontext_state {
+ /* CDM registers for ctx switch */
+ struct rogue_fwif_cdm_registers_cswitch ctxswitch_regs __aligned(8);
+};
+
+#define ROGUE_FWIF_STATIC_COMPUTECONTEXT_SIZE \
+ sizeof(struct rogue_fwif_static_computecontext_state)
+
+enum rogue_fwif_prbuffer_state {
+ ROGUE_FWIF_PRBUFFER_UNBACKED = 0,
+ ROGUE_FWIF_PRBUFFER_BACKED,
+ ROGUE_FWIF_PRBUFFER_BACKING_PENDING,
+ ROGUE_FWIF_PRBUFFER_UNBACKING_PENDING,
+};
+
+struct rogue_fwif_prbuffer {
+ /* Buffer ID*/
+ u32 buffer_id;
+ /* Needs On-demand Z/S/MSAA Buffer allocation */
+ bool on_demand __aligned(4);
+ /* Z/S/MSAA -Buffer state */
+ enum rogue_fwif_prbuffer_state state;
+ /* Cleanup state */
+ struct rogue_fwif_cleanup_ctl cleanup_sate;
+ /* Compatibility and other flags */
+ u32 prbuffer_flags;
+} __aligned(8);
+
+/* Last reset reason for a context. */
+enum rogue_context_reset_reason {
+ /* No reset reason recorded */
+ ROGUE_CONTEXT_RESET_REASON_NONE = 0,
+ /* Caused a reset due to locking up */
+ ROGUE_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1,
+ /* Affected by another context locking up */
+ ROGUE_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2,
+ /* Overran the global deadline */
+ ROGUE_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3,
+ /* Affected by another context overrunning */
+ ROGUE_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4,
+ /* Forced reset to ensure scheduling requirements */
+ ROGUE_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5,
+ /* FW Safety watchdog triggered */
+ ROGUE_CONTEXT_RESET_REASON_FW_WATCHDOG = 12,
+ /* FW page fault (no HWR) */
+ ROGUE_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13,
+ /* FW execution error (GPU reset requested) */
+ ROGUE_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14,
+ /* Host watchdog detected FW error */
+ ROGUE_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15,
+ /* Geometry DM OOM event is not allowed */
+ ROGUE_CONTEXT_GEOM_OOM_DISABLED = 16,
+};
+
+struct rogue_context_reset_reason_data {
+ enum rogue_context_reset_reason reset_reason;
+ u32 reset_ext_job_ref;
+};
+
+#include "pvr_rogue_fwif_shared_check.h"
+
+#endif /* PVR_ROGUE_FWIF_SHARED_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h
new file mode 100644
index 000000000..597ed54bb
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_SHARED_CHECK_H
+#define PVR_ROGUE_FWIF_SHARED_CHECK_H
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+ static_assert(offsetof(type, member) == (offset), \
+ "offsetof(" #type ", " #member ") incorrect")
+
+#define SIZE_CHECK(type, size) \
+ static_assert(sizeof(type) == (size), #type " is incorrect size")
+
+OFFSET_CHECK(struct rogue_fwif_dma_addr, dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_dma_addr, fw_addr, 8);
+SIZE_CHECK(struct rogue_fwif_dma_addr, 16);
+
+OFFSET_CHECK(struct rogue_fwif_ufo, addr, 0);
+OFFSET_CHECK(struct rogue_fwif_ufo, value, 4);
+SIZE_CHECK(struct rogue_fwif_ufo, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, submitted_commands, 0);
+OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, executed_commands, 4);
+SIZE_CHECK(struct rogue_fwif_cleanup_ctl, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, write_offset, 0);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset, 4);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, dep_offset, 8);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, wrap_mask, 12);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset2, 16);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset3, 20);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset4, 24);
+SIZE_CHECK(struct rogue_fwif_cccb_ctl, 32);
+
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_reg_vdm_context_state_base_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_reg_vdm_context_state_resume_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_reg_ta_context_state_base_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task0, 24);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task1, 32);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task2, 40);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task0, 48);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task1, 56);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task2, 64);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task3, 72);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task4, 80);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task3, 88);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task4, 96);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task0, 104);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task1, 112);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task2, 120);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task0, 128);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task1, 136);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task2, 144);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task3, 152);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task4, 160);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task3, 168);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task4, 176);
+SIZE_CHECK(struct rogue_fwif_geom_registers_caswitch, 184);
+
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0, 0);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds1, 8);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds, 16);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds1, 24);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0, 32);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0_b, 40);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0_b, 48);
+SIZE_CHECK(struct rogue_fwif_cdm_registers_cswitch, 56);
+
+OFFSET_CHECK(struct rogue_fwif_static_rendercontext_state, ctxswitch_regs, 0);
+SIZE_CHECK(struct rogue_fwif_static_rendercontext_state, 368);
+
+OFFSET_CHECK(struct rogue_fwif_static_computecontext_state, ctxswitch_regs, 0);
+SIZE_CHECK(struct rogue_fwif_static_computecontext_state, 56);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_common, frame_num, 0);
+SIZE_CHECK(struct rogue_fwif_cmd_common, 4);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, cmn, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, hwrt_data_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, pr_buffer_fw_addr, 8);
+SIZE_CHECK(struct rogue_fwif_cmd_geom_frag_shared, 16);
+
+#endif /* PVR_ROGUE_FWIF_SHARED_CHECK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h
new file mode 100644
index 000000000..1c2c4ebed
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_FWIF_STREAM_H
+#define PVR_ROGUE_FWIF_STREAM_H
+
+/**
+ * DOC: Streams
+ *
+ * Commands are submitted to the kernel driver in the form of streams.
+ *
+ * A command stream has the following layout :
+ * - A 64-bit header containing:
+ * * A u32 containing the length of the main stream inclusive of the length of the header.
+ * * A u32 for padding.
+ * - The main stream data.
+ * - The extension stream (optional), which is composed of:
+ * * One or more headers.
+ * * The extension stream data, corresponding to the extension headers.
+ *
+ * The main stream provides the base command data. This has a fixed layout based on the features
+ * supported by a given GPU.
+ *
+ * The extension stream provides the command parameters that are required for BRNs & ERNs for the
+ * current GPU. This stream is comprised of one or more headers, followed by data for each given
+ * BRN/ERN.
+ *
+ * Each header is a u32 containing a bitmask of quirks & enhancements in the extension stream, a
+ * "type" field determining the set of quirks & enhancements the bitmask represents, and a
+ * continuation bit determining whether any more headers are present. The headers are then followed
+ * by command data; this is specific to each quirk/enhancement. All unused / reserved bits in the
+ * header must be set to 0.
+ *
+ * All parameters and headers in the main and extension streams must be naturally aligned.
+ *
+ * If a parameter appears in both the main and extension streams, then the extension parameter is
+ * used.
+ */
+
+/*
+ * Stream extension header definition
+ */
+#define PVR_STREAM_EXTHDR_TYPE_SHIFT 29U
+#define PVR_STREAM_EXTHDR_TYPE_MASK (7U << PVR_STREAM_EXTHDR_TYPE_SHIFT)
+#define PVR_STREAM_EXTHDR_TYPE_MAX 8U
+#define PVR_STREAM_EXTHDR_CONTINUATION BIT(28U)
+
+#define PVR_STREAM_EXTHDR_DATA_MASK ~(PVR_STREAM_EXTHDR_TYPE_MASK | PVR_STREAM_EXTHDR_CONTINUATION)
+
+/*
+ * Stream extension header - Geometry 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_GEOM0 0U
+
+#define PVR_STREAM_EXTHDR_GEOM0_BRN49927 BIT(0U)
+
+#define PVR_STREAM_EXTHDR_GEOM0_VALID PVR_STREAM_EXTHDR_GEOM0_BRN49927
+
+/*
+ * Stream extension header - Fragment 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_FRAG0 0U
+
+#define PVR_STREAM_EXTHDR_FRAG0_BRN47217 BIT(0U)
+#define PVR_STREAM_EXTHDR_FRAG0_BRN49927 BIT(1U)
+
+#define PVR_STREAM_EXTHDR_FRAG0_VALID PVR_STREAM_EXTHDR_FRAG0_BRN49927
+
+/*
+ * Stream extension header - Compute 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_COMPUTE0 0U
+
+#define PVR_STREAM_EXTHDR_COMPUTE0_BRN49927 BIT(0U)
+
+#define PVR_STREAM_EXTHDR_COMPUTE0_VALID PVR_STREAM_EXTHDR_COMPUTE0_BRN49927
+
+#endif /* PVR_ROGUE_FWIF_STREAM_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h b/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h
new file mode 100644
index 000000000..684766006
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_HEAP_CONFIG_H
+#define PVR_ROGUE_HEAP_CONFIG_H
+
+#include <linux/sizes.h>
+
+/*
+ * ROGUE Device Virtual Address Space Definitions
+ *
+ * This file defines the ROGUE virtual address heaps that are used in
+ * application memory contexts. It also shows where the Firmware memory heap
+ * fits into this, but the firmware heap is only ever created in the
+ * kernel driver and never exposed to userspace.
+ *
+ * ROGUE_PDSCODEDATA_HEAP_BASE and ROGUE_USCCODE_HEAP_BASE will be programmed,
+ * on a global basis, into ROGUE_CR_PDS_EXEC_BASE and ROGUE_CR_USC_CODE_BASE_*
+ * respectively. Therefore if client drivers use multiple configs they must
+ * still be consistent with their definitions for these heaps.
+ *
+ * Base addresses have to be a multiple of 4MiB.
+ * Heaps must not start at 0x0000000000, as this is reserved for internal
+ * use within the driver.
+ * Range comments, those starting in column 0 below are a section heading of
+ * sorts and are above the heaps in that range. Often this is the reserved
+ * size of the heap within the range.
+ */
+
+/* 0x00_0000_0000 ************************************************************/
+
+/* 0x00_0000_0000 - 0x00_0040_0000 */
+/* 0 MiB to 4 MiB, size of 4 MiB : RESERVED */
+
+/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/
+/* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : RESERVED **/
+
+/* 0x80_0000_0000 ************************************************************/
+
+/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/
+/* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/
+#define ROGUE_GENERAL_HEAP_BASE 0x8000000000ull
+#define ROGUE_GENERAL_HEAP_SIZE SZ_128G
+
+/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF */
+/* 640 GiB to 704 GiB, size of 64 GiB : FREE */
+
+/* B0_0000_0000 - 0xB7_FFFF_FFFF */
+/* 704 GiB to 736 GiB, size of 32 GiB : FREE */
+
+/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF */
+/* 736 GiB to 768 GiB, size of 32 GiB : RESERVED */
+
+/* 0xC0_0000_0000 ************************************************************/
+
+/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF */
+/* 768 GiB to 872 GiB, size of 104 GiB : FREE */
+
+/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF */
+/* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP */
+#define ROGUE_PDSCODEDATA_HEAP_BASE 0xDA00000000ull
+#define ROGUE_PDSCODEDATA_HEAP_SIZE SZ_4G
+
+/* 0xDB_0000_0000 - 0xDB_FFFF_FFFF */
+/* 876 GiB to 880 GiB, size of 256 MiB (reserved 4GiB) : BRN **/
+/*
+ * The BRN63142 quirk workaround requires Region Header memory to be at the top
+ * of a 16GiB aligned range. This is so when masked with 0x03FFFFFFFF the
+ * address will avoid aliasing PB addresses. Start at 879.75GiB. Size of 256MiB.
+ */
+#define ROGUE_RGNHDR_HEAP_BASE 0xDBF0000000ull
+#define ROGUE_RGNHDR_HEAP_SIZE SZ_256M
+
+/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF */
+/* 880 GiB to 896 GiB, size of 16 GiB : FREE */
+
+/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF */
+/* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP */
+#define ROGUE_USCCODE_HEAP_BASE 0xE000000000ull
+#define ROGUE_USCCODE_HEAP_SIZE SZ_4G
+
+/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF */
+/* 900 GiB to 903 GiB, size of 3 GiB : RESERVED */
+
+/* 0xE1_C000_000 - 0xE1_FFFF_FFFF */
+/* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP */
+#define ROGUE_FW_HEAP_BASE 0xE1C0000000ull
+
+/* 0xE2_0000_0000 - 0xE3_FFFF_FFFF */
+/* 904 GiB to 912 GiB, size of 8 GiB : FREE */
+
+/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF */
+/* 912 GiB to 968 GiB, size of 16 GiB : TRANSFER_FRAG */
+#define ROGUE_TRANSFER_FRAG_HEAP_BASE 0xE400000000ull
+#define ROGUE_TRANSFER_FRAG_HEAP_SIZE SZ_16G
+
+/* 0xE8_0000_0000 - 0xF1_FFFF_FFFF */
+/* 928 GiB to 968 GiB, size of 40 GiB : RESERVED */
+
+/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/
+/* 968 GiB to 969 GiB, size of 2 MiB : VISTEST_HEAP */
+#define ROGUE_VISTEST_HEAP_BASE 0xF200000000ull
+#define ROGUE_VISTEST_HEAP_SIZE SZ_2M
+
+/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF */
+/* 969 GiB to 972 GiB, size of 3 GiB : FREE */
+
+/* 0xF3_0000_0000 - 0xFF_FFFF_FFFF */
+/* 972 GiB to 1024 GiB, size of 52 GiB : FREE */
+
+/* 0xFF_FFFF_FFFF ************************************************************/
+
+#endif /* PVR_ROGUE_HEAP_CONFIG_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_meta.h b/drivers/gpu/drm/imagination/pvr_rogue_meta.h
new file mode 100644
index 000000000..3020e6582
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_meta.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_META_H
+#define PVR_ROGUE_META_H
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/*
+ ******************************************************************************
+ * META registers and MACROS
+ *****************************************************************************
+ */
+#define META_CR_CTRLREG_BASE(t) (0x04800000U + (0x1000U * (t)))
+
+#define META_CR_TXPRIVEXT (0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN BIT(7)
+
+#define META_CR_SYSC_JTAG_THREAD (0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004)
+
+#define META_CR_PERF_COUNT0 (0x0480FFE0)
+#define META_CR_PERF_COUNT1 (0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT (28)
+#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (8 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (9 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS \
+ (0xA << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE (0xD << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT (24)
+#define META_CR_PERF_COUNT_THR_MASK (0x0F000000)
+#define META_CR_PERF_COUNT_THR_0 (0x1 << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1 (0x2 << META_CR_PERF_COUNT_THR_1)
+
+#define META_CR_TxVECINT_BHALT (0x04820500)
+#define META_CR_PERF_ICORE0 (0x0480FFD0)
+#define META_CR_PERF_ICORE1 (0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS (0x8)
+
+#define META_CR_PERF_COUNT(ctrl, thr) \
+ ((META_CR_PERF_COUNT_CTRL_##ctrl << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+ ((thr) << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U)
+#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U)
+
+/* Poll for done. */
+#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U)
+/* Set for read. */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U)
+#define META_CR_TXUXXRXRQ_TX_S (12)
+#define META_CR_TXUXXRXRQ_RX_S (4)
+#define META_CR_TXUXXRXRQ_UXX_S (0)
+
+/* Internal ctrl regs. */
+#define META_CR_TXUIN_ID (0x0)
+/* Data unit regs. */
+#define META_CR_TXUD0_ID (0x1)
+/* Data unit regs. */
+#define META_CR_TXUD1_ID (0x2)
+/* Address unit regs. */
+#define META_CR_TXUA0_ID (0x3)
+/* Address unit regs. */
+#define META_CR_TXUA1_ID (0x4)
+/* PC registers. */
+#define META_CR_TXUPC_ID (0x5)
+
+/* Macros to calculate register access values. */
+#define META_CR_CORE_REG(thr, reg_num, unit) \
+ (((u32)(thr) << META_CR_TXUXXRXRQ_TX_S) | \
+ ((u32)(reg_num) << META_CR_TXUXXRXRQ_RX_S) | \
+ ((u32)(unit) << META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUPC_ID)
+
+#define META_CR_COREREG_ENABLE (0x0000000U)
+#define META_CR_COREREG_STATUS (0x0000010U)
+#define META_CR_COREREG_DEFR (0x00000A0U)
+#define META_CR_COREREG_PRIVEXT (0x00000E8U)
+
+#define META_CR_T0ENABLE_OFFSET \
+ (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE)
+#define META_CR_T0STATUS_OFFSET \
+ (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS)
+#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR)
+#define META_CR_T0PRIVEXT_OFFSET \
+ (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_T1ENABLE_OFFSET \
+ (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE)
+#define META_CR_T1STATUS_OFFSET \
+ (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS)
+#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR)
+#define META_CR_T1PRIVEXT_OFFSET \
+ (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */
+#define META_CR_TXSTATUS_PRIV (0x00020000U)
+#define META_CR_TXPRIVEXT_MINIM (0x00000080U)
+
+#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U)
+
+#define META_CR_TXCLKCTRL (0x048000B0)
+#define META_CR_TXCLKCTRL_ALL_ON (0x55111111)
+#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222)
+
+#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600)
+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14)
+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6)
+#define META_CR_SYSC_DCPART(n) (0x04830200 + (n) * 0x8)
+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31)
+#define META_CR_SYSC_ICPART(n) (0x04830220 + (n) * 0x8)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7)
+#define META_CR_MMCU_DCACHE_CTRL (0x04830018)
+#define META_CR_MMCU_ICACHE_CTRL (0x04830020)
+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1)
+
+/*
+ ******************************************************************************
+ * META LDR Format
+ ******************************************************************************
+ */
+/* Block header structure. */
+struct rogue_meta_ldr_block_hdr {
+ u32 dev_id;
+ u32 sl_code;
+ u32 sl_data;
+ u16 pc_ctrl;
+ u16 crc;
+};
+
+/* High level data stream block structure. */
+struct rogue_meta_ldr_l1_data_blk {
+ u16 cmd;
+ u16 length;
+ u32 next;
+ u32 cmd_data[4];
+};
+
+/* High level data stream block structure. */
+struct rogue_meta_ldr_l2_data_blk {
+ u16 tag;
+ u16 length;
+ u32 block_data[4];
+};
+
+/* Config command structure. */
+struct rogue_meta_ldr_cfg_blk {
+ u32 type;
+ u32 block_data[4];
+};
+
+/* Block type definitions */
+#define ROGUE_META_LDR_COMMENT_TYPE_MASK (0x0010U)
+#define ROGUE_META_LDR_BLK_IS_COMMENT(x) (((x) & ROGUE_META_LDR_COMMENT_TYPE_MASK) != 0U)
+
+/*
+ * Command definitions
+ * Value Name Description
+ * 0 LoadMem Load memory with binary data.
+ * 1 LoadCore Load a set of core registers.
+ * 2 LoadMMReg Load a set of memory mapped registers.
+ * 3 StartThreads Set each thread PC and SP, then enable threads.
+ * 4 ZeroMem Zeros a memory region.
+ * 5 Config Perform a configuration command.
+ */
+#define ROGUE_META_LDR_CMD_MASK (0x000FU)
+
+#define ROGUE_META_LDR_CMD_LOADMEM (0x0000U)
+#define ROGUE_META_LDR_CMD_LOADCORE (0x0001U)
+#define ROGUE_META_LDR_CMD_LOADMMREG (0x0002U)
+#define ROGUE_META_LDR_CMD_START_THREADS (0x0003U)
+#define ROGUE_META_LDR_CMD_ZEROMEM (0x0004U)
+#define ROGUE_META_LDR_CMD_CONFIG (0x0005U)
+
+/*
+ * Config Command definitions
+ * Value Name Description
+ * 0 Pause Pause for x times 100 instructions
+ * 1 Read Read a value from register - No value return needed.
+ * Utilises effects of issuing reads to certain registers
+ * 2 Write Write to mem location
+ * 3 MemSet Set mem to value
+ * 4 MemCheck check mem for specific value.
+ */
+#define ROGUE_META_LDR_CFG_PAUSE (0x0000)
+#define ROGUE_META_LDR_CFG_READ (0x0001)
+#define ROGUE_META_LDR_CFG_WRITE (0x0002)
+#define ROGUE_META_LDR_CFG_MEMSET (0x0003)
+#define ROGUE_META_LDR_CFG_MEMCHECK (0x0004)
+
+/*
+ ******************************************************************************
+ * ROGUE FW segmented MMU definitions
+ ******************************************************************************
+ */
+/* All threads can access the segment. */
+#define ROGUE_FW_SEGMMU_ALLTHRS (0xf << 8U)
+/* Writable. */
+#define ROGUE_FW_SEGMMU_WRITEABLE (0x1U << 1U)
+/* All threads can access and writable. */
+#define ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE \
+ (ROGUE_FW_SEGMMU_ALLTHRS | ROGUE_FW_SEGMMU_WRITEABLE)
+
+/* Direct map region 10 used for mapping GPU memory - max 8MB. */
+#define ROGUE_FW_SEGMMU_DMAP_GPU_ID (10U)
+#define ROGUE_FW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U)
+#define ROGUE_FW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U)
+
+/* Segment IDs. */
+#define ROGUE_FW_SEGMMU_DATA_ID (1U)
+#define ROGUE_FW_SEGMMU_BOOTLDR_ID (2U)
+#define ROGUE_FW_SEGMMU_TEXT_ID (ROGUE_FW_SEGMMU_BOOTLDR_ID)
+
+/*
+ * SLC caching strategy in S7 and volcanic is emitted through the segment MMU.
+ * All the segments configured through the macro ROGUE_FW_SEGMMU_OUTADDR_TOP are
+ * CACHED in the SLC.
+ * The interface has been kept the same to simplify the code changes.
+ * The bifdm argument is ignored (no longer relevant) in S7 and volcanic.
+ */
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx) \
+ ((((u64)((pers) & 0x3)) << 52) | (((u64)((mmu_ctx) & 0xFF)) << 44) | \
+ (((u64)((slc_policy) & 0x1)) << 40))
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) \
+ ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3, 0x0, mmu_ctx)
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) \
+ ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0, 0x1, mmu_ctx)
+
+/*
+ * To configure the Page Catalog and BIF-DM fed into the BIF for Garten
+ * accesses through this segment.
+ */
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) \
+ (((u64)((u64)(pc) & 0xFU) << 44U) | ((u64)((u64)(bifdm) & 0xFU) << 40U))
+
+#define ROGUE_FW_SEGMMU_META_BIFDM_ID (0x7U)
+
+/* META segments have 4kB minimum size. */
+#define ROGUE_FW_SEGMMU_ALIGN (0x1000U)
+
+/* Segmented MMU registers (n = segment id). */
+#define META_CR_MMCU_SEGMENT_N_BASE(n) (0x04850000U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_LIMIT(n) (0x04850004U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_OUTA0(n) (0x04850008U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_OUTA1(n) (0x0485000CU + ((n) * 0x10U))
+
+/*
+ * The following defines must be recalculated if the Meta MMU segments used
+ * to access Host-FW data are changed
+ * Current combinations are:
+ * - SLC uncached, META cached, FW base address 0x70000000
+ * - SLC uncached, META uncached, FW base address 0xF0000000
+ * - SLC cached, META cached, FW base address 0x10000000
+ * - SLC cached, META uncached, FW base address 0x90000000
+ */
+#define ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U)
+#define ROGUE_FW_SEGMMU_DATA_META_CACHED (0x0U)
+#define ROGUE_FW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT)
+#define ROGUE_FW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT)
+/*
+ * For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in
+ * the PTEs for the FW data, not in the Meta Segment MMU, which means these
+ * defines have no real effect in those cases.
+ */
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U)
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U)
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U)
+
+/*
+ ******************************************************************************
+ * ROGUE FW Bootloader defaults
+ ******************************************************************************
+ */
+#define ROGUE_FW_BOOTLDR_META_ADDR (0x40000000U)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR_1 (0x000000E1)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR \
+ ((((u64)ROGUE_FW_BOOTLDR_DEVV_ADDR_1) << 32) | \
+ ROGUE_FW_BOOTLDR_DEVV_ADDR_0)
+#define ROGUE_FW_BOOTLDR_LIMIT (0x1FFFF000)
+#define ROGUE_FW_MAX_BOOTLDR_OFFSET (0x1000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define ROGUE_FW_BOOTLDR_CONF_OFFSET (0x80)
+
+/*
+ ******************************************************************************
+ * ROGUE META Stack
+ ******************************************************************************
+ */
+#define ROGUE_META_STACK_SIZE (0x1000U)
+
+/*
+ ******************************************************************************
+ * ROGUE META Core memory
+ ******************************************************************************
+ */
+/* Code and data both map to the same physical memory. */
+#define ROGUE_META_COREMEM_CODE_ADDR (0x80000000U)
+#define ROGUE_META_COREMEM_DATA_ADDR (0x82000000U)
+#define ROGUE_META_COREMEM_OFFSET_MASK (0x01ffffffU)
+
+#define ROGUE_META_IS_COREMEM_CODE(a, b) \
+ ({ \
+ u32 _a = (a), _b = (b); \
+ ((_a) >= ROGUE_META_COREMEM_CODE_ADDR) && \
+ ((_a) < (ROGUE_META_COREMEM_CODE_ADDR + (_b))); \
+ })
+#define ROGUE_META_IS_COREMEM_DATA(a, b) \
+ ({ \
+ u32 _a = (a), _b = (b); \
+ ((_a) >= ROGUE_META_COREMEM_DATA_ADDR) && \
+ ((_a) < (ROGUE_META_COREMEM_DATA_ADDR + (_b))); \
+ })
+/*
+ ******************************************************************************
+ * 2nd thread
+ ******************************************************************************
+ */
+#define ROGUE_FW_THR1_PC (0x18930000)
+#define ROGUE_FW_THR1_SP (0x78890000)
+
+/*
+ ******************************************************************************
+ * META compatibility
+ ******************************************************************************
+ */
+
+#define META_CR_CORE_ID (0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT (16U)
+#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU)
+
+#define ROGUE_CR_META_MTP218_CORE_ID_VALUE 0x19
+#define ROGUE_CR_META_MTP219_CORE_ID_VALUE 0x1E
+#define ROGUE_CR_META_LTP218_CORE_ID_VALUE 0x1C
+#define ROGUE_CR_META_LTP217_CORE_ID_VALUE 0x1F
+
+#define ROGUE_FW_PROCESSOR_META "META"
+
+#endif /* PVR_ROGUE_META_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mips.h b/drivers/gpu/drm/imagination/pvr_rogue_mips.h
new file mode 100644
index 000000000..41ed618fd
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mips.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_MIPS_H
+#define PVR_ROGUE_MIPS_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Utility defines for memory management. */
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K (12)
+#define ROGUE_MIPSFW_PAGE_SIZE_4K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+#define ROGUE_MIPSFW_PAGE_MASK_4K (ROGUE_MIPSFW_PAGE_SIZE_4K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K (16)
+#define ROGUE_MIPSFW_PAGE_SIZE_64K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K)
+#define ROGUE_MIPSFW_PAGE_MASK_64K (ROGUE_MIPSFW_PAGE_SIZE_64K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_256K (18)
+#define ROGUE_MIPSFW_PAGE_SIZE_256K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_256K)
+#define ROGUE_MIPSFW_PAGE_MASK_256K (ROGUE_MIPSFW_PAGE_SIZE_256K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_1MB (20)
+#define ROGUE_MIPSFW_PAGE_SIZE_1MB (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_1MB)
+#define ROGUE_MIPSFW_PAGE_MASK_1MB (ROGUE_MIPSFW_PAGE_SIZE_1MB - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_4MB (22)
+#define ROGUE_MIPSFW_PAGE_SIZE_4MB (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4MB)
+#define ROGUE_MIPSFW_PAGE_MASK_4MB (ROGUE_MIPSFW_PAGE_SIZE_4MB - 1)
+#define ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE (2)
+/* log2 page table sizes dependent on FW heap size and page size (for each OS). */
+#define ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev) ((pvr_dev)->fw_dev.fw_heap_info.log2_size - \
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K + \
+ ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE)
+#define ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_64K(pvr_dev) ((pvr_dev)->fw_dev.fw_heap_info.log2_size - \
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K + \
+ ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE)
+/* Maximum number of page table pages (both Host and MIPS pages). */
+#define ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES (4)
+/* Total number of TLB entries. */
+#define ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES (16)
+/* "Uncached" caching policy. */
+#define ROGUE_MIPSFW_UNCACHED_CACHE_POLICY (2)
+/* "Write-back write-allocate" caching policy. */
+#define ROGUE_MIPSFW_WRITEBACK_CACHE_POLICY (3)
+/* "Write-through no write-allocate" caching policy. */
+#define ROGUE_MIPSFW_WRITETHROUGH_CACHE_POLICY (1)
+/* Cached policy used by MIPS in case of physical bus on 32 bit. */
+#define ROGUE_MIPSFW_CACHED_POLICY (ROGUE_MIPSFW_WRITEBACK_CACHE_POLICY)
+/* Cached policy used by MIPS in case of physical bus on more than 32 bit. */
+#define ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT (ROGUE_MIPSFW_WRITETHROUGH_CACHE_POLICY)
+/* Total number of Remap entries. */
+#define ROGUE_MIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES)
+
+/* MIPS EntryLo/PTE format. */
+
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U)
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF)
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000)
+
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U)
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF)
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000)
+
+/* Page Frame Number */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT (6)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12)
+/* Mask used for the MIPS Page Table in case of physical bus on 32 bit. */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SIZE (20)
+/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit. */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24)
+#define ROGUE_MIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (ROGUE_MIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \
+ ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT)
+
+#define ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U)
+#define ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7)
+
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_SHIFT (2U)
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB)
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_EN (0X00000004)
+
+#define ROGUE_MIPSFW_ENTRYLO_VALID_SHIFT (1U)
+#define ROGUE_MIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD)
+#define ROGUE_MIPSFW_ENTRYLO_VALID_EN (0X00000002)
+
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_SHIFT (0U)
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE)
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN (0X00000001)
+
+#define ROGUE_MIPSFW_ENTRYLO_DVG (ROGUE_MIPSFW_ENTRYLO_DIRTY_EN | \
+ ROGUE_MIPSFW_ENTRYLO_VALID_EN | \
+ ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN)
+#define ROGUE_MIPSFW_ENTRYLO_UNCACHED (ROGUE_MIPSFW_UNCACHED_CACHE_POLICY << \
+ ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT)
+#define ROGUE_MIPSFW_ENTRYLO_DVG_UNCACHED (ROGUE_MIPSFW_ENTRYLO_DVG | \
+ ROGUE_MIPSFW_ENTRYLO_UNCACHED)
+
+/* Remap Range Config Addr Out. */
+/* These defines refer to the upper half of the Remap Range Config register. */
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0)
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register. */
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12)
+#define ROGUE_MIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \
+ ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT)
+
+/*
+ * Pages to trampoline problematic physical addresses:
+ * - ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
+ * - ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000
+ * - ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000
+ * - (benign trampoline) : 0x1FC0_3000
+ * that would otherwise be erroneously remapped by the MIPS wrapper.
+ * (see "Firmware virtual layout and remap configuration" section below)
+ */
+
+#define ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2)
+#define ROGUE_MIPSFW_TRAMPOLINE_NUMPAGES BIT(ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES)
+#define ROGUE_MIPSFW_TRAMPOLINE_SIZE (ROGUE_MIPSFW_TRAMPOLINE_NUMPAGES << \
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+#define ROGUE_MIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES + \
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+
+#define ROGUE_MIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+#define ROGUE_MIPSFW_TRAMPOLINE_OFFSET(a) ((a) - ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+
+#define ROGUE_MIPSFW_SENSITIVE_ADDR(a) (ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN == \
+ (~((1 << ROGUE_MIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE) - 1) \
+ & (a)))
+
+/* Firmware virtual layout and remap configuration. */
+/*
+ * For each remap region we define:
+ * - the virtual base used by the Firmware to access code/data through that region
+ * - the microAptivAP physical address correspondent to the virtual base address,
+ * used as input address and remapped to the actual physical address
+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from
+ * the bottom of the base input address that survive onto the output address
+ * (this defines both the alignment and the maximum size of the remapped region)
+ * - one or more code/data segments within the remapped region.
+ */
+
+/* Boot remap setup. */
+#define ROGUE_MIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000)
+#define ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000)
+#define ROGUE_MIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (ROGUE_MIPSFW_BOOT_REMAP_VIRTUAL_BASE)
+
+/* Data remap setup. */
+#define ROGUE_MIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000)
+#define ROGUE_MIPSFW_DATA_CACHED_REMAP_VIRTUAL_BASE (0x9FC01000)
+#define ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000)
+#define ROGUE_MIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (ROGUE_MIPSFW_DATA_REMAP_VIRTUAL_BASE)
+
+/* Code remap setup. */
+#define ROGUE_MIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000)
+#define ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000)
+#define ROGUE_MIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_EXCEPTIONS_VIRTUAL_BASE (ROGUE_MIPSFW_CODE_REMAP_VIRTUAL_BASE)
+
+/* Permanent mappings setup. */
+#define ROGUE_MIPSFW_PT_VIRTUAL_BASE (0xCF000000)
+#define ROGUE_MIPSFW_REGISTERS_VIRTUAL_BASE (0xCF800000)
+#define ROGUE_MIPSFW_STACK_VIRTUAL_BASE (0xCF600000)
+
+/* Bootloader configuration data. */
+/*
+ * Bootloader configuration offset (where ROGUE_MIPSFW_BOOT_DATA lives)
+ * within the bootloader/NMI data page.
+ */
+#define ROGUE_MIPSFW_BOOTLDR_CONF_OFFSET (0x0)
+
+/* NMI shared data. */
+/* Base address of the shared data within the bootloader/NMI data page. */
+#define ROGUE_MIPSFW_NMI_SHARED_DATA_BASE (0x100)
+/* Size used by Debug dump data. */
+#define ROGUE_MIPSFW_NMI_SHARED_SIZE (0x2B0)
+/* Offsets in the NMI shared area in 32-bit words. */
+#define ROGUE_MIPSFW_NMI_SYNC_FLAG_OFFSET (0x0)
+#define ROGUE_MIPSFW_NMI_STATE_OFFSET (0x1)
+#define ROGUE_MIPSFW_NMI_ERROR_STATE_SET (0x1)
+
+/* MIPS boot stage. */
+#define ROGUE_MIPSFW_BOOT_STAGE_OFFSET (0x400)
+
+/*
+ * MIPS private data in the bootloader data page.
+ * Memory below this offset is used by the FW only, no interface data allowed.
+ */
+#define ROGUE_MIPSFW_PRIVATE_DATA_OFFSET (0x800)
+
+struct rogue_mipsfw_boot_data {
+ u64 stack_phys_addr;
+ u64 reg_base;
+ u64 pt_phys_addr[ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES];
+ u32 pt_log2_page_size;
+ u32 pt_num_pages;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+#define ROGUE_MIPSFW_GET_OFFSET_IN_DWORDS(offset) ((offset) / sizeof(u32))
+#define ROGUE_MIPSFW_GET_OFFSET_IN_QWORDS(offset) ((offset) / sizeof(u64))
+
+/* Used for compatibility checks. */
+#define ROGUE_MIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU)
+#define ROGUE_MIPSFW_ARCHTYPE_VER_SHIFT (10U)
+#define ROGUE_MIPSFW_CORE_ID_VALUE (0x001U)
+#define ROGUE_FW_PROCESSOR_MIPS "MIPS"
+
+/* microAptivAP cache line size. */
+#define ROGUE_MIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U)
+
+/*
+ * The SOCIF transactions are identified with the top 16 bits of the physical address emitted by
+ * the MIPS.
+ */
+#define ROGUE_MIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U)
+
+/* Values to put in the MIPS selectors for performance counters. */
+/* Icache accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U)
+/* Icache misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U)
+
+/* Dcache accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U)
+/* Dcache misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U)
+
+/* ITLB instruction accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U)
+/* JTLB instruction accesses misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U)
+
+ /* Instructions completed in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U)
+/* JTLB data misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U)
+
+/* Shift for the Event field in the MIPS perf ctrl registers. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U)
+
+/* Additional flags for performance counters. See MIPS manual for further reference. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U)
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U)
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U)
+
+#define ROGUE_MIPSFW_C0_NBHWIRQ 8
+
+/* Macros to decode C0_Cause register. */
+#define ROGUE_MIPSFW_C0_CAUSE_EXCCODE(cause) (((cause) & 0x7c) >> 2)
+#define ROGUE_MIPSFW_C0_CAUSE_EXCCODE_FWERROR 9
+/* Use only when Coprocessor Unusable exception. */
+#define ROGUE_MIPSFW_C0_CAUSE_UNUSABLE_UNIT(cause) (((cause) >> 28) & 0x3)
+#define ROGUE_MIPSFW_C0_CAUSE_PENDING_HWIRQ(cause) (((cause) & 0x3fc00) >> 10)
+#define ROGUE_MIPSFW_C0_CAUSE_FDCIPENDING BIT(21)
+#define ROGUE_MIPSFW_C0_CAUSE_IV BIT(23)
+#define ROGUE_MIPSFW_C0_CAUSE_IC BIT(25)
+#define ROGUE_MIPSFW_C0_CAUSE_PCIPENDING BIT(26)
+#define ROGUE_MIPSFW_C0_CAUSE_TIPENDING BIT(30)
+#define ROGUE_MIPSFW_C0_CAUSE_BRANCH_DELAY BIT(31)
+
+/* Macros to decode C0_Debug register. */
+#define ROGUE_MIPSFW_C0_DEBUG_EXCCODE(debug) (((debug) >> 10) & 0x1f)
+#define ROGUE_MIPSFW_C0_DEBUG_DSS BIT(0)
+#define ROGUE_MIPSFW_C0_DEBUG_DBP BIT(1)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBL BIT(2)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBS BIT(3)
+#define ROGUE_MIPSFW_C0_DEBUG_DIB BIT(4)
+#define ROGUE_MIPSFW_C0_DEBUG_DINT BIT(5)
+#define ROGUE_MIPSFW_C0_DEBUG_DIBIMPR BIT(6)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBLIMPR BIT(18)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBSIMPR BIT(19)
+#define ROGUE_MIPSFW_C0_DEBUG_IEXI BIT(20)
+#define ROGUE_MIPSFW_C0_DEBUG_DBUSEP BIT(21)
+#define ROGUE_MIPSFW_C0_DEBUG_CACHEEP BIT(22)
+#define ROGUE_MIPSFW_C0_DEBUG_MCHECKP BIT(23)
+#define ROGUE_MIPSFW_C0_DEBUG_IBUSEP BIT(24)
+#define ROGUE_MIPSFW_C0_DEBUG_DM BIT(30)
+#define ROGUE_MIPSFW_C0_DEBUG_DBD BIT(31)
+
+/* Macros to decode TLB entries. */
+#define ROGUE_MIPSFW_TLB_GET_MASK(page_mask) (((page_mask) >> 13) & 0XFFFFU)
+/* Page size in KB. */
+#define ROGUE_MIPSFW_TLB_GET_PAGE_SIZE(page_mask) ((((page_mask) | 0x1FFF) + 1) >> 11)
+/* Page size in KB. */
+#define ROGUE_MIPSFW_TLB_GET_PAGE_MASK(page_size) ((((page_size) << 11) - 1) & ~0x7FF)
+#define ROGUE_MIPSFW_TLB_GET_VPN2(entry_hi) ((entry_hi) >> 13)
+#define ROGUE_MIPSFW_TLB_GET_COHERENCY(entry_lo) (((entry_lo) >> 3) & 0x7U)
+#define ROGUE_MIPSFW_TLB_GET_PFN(entry_lo) (((entry_lo) >> 6) & 0XFFFFFU)
+/* GET_PA uses a non-standard PFN mask for 36 bit addresses. */
+#define ROGUE_MIPSFW_TLB_GET_PA(entry_lo) (((u64)(entry_lo) & \
+ ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6)
+#define ROGUE_MIPSFW_TLB_GET_INHIBIT(entry_lo) (((entry_lo) >> 30) & 0x3U)
+#define ROGUE_MIPSFW_TLB_GET_DGV(entry_lo) ((entry_lo) & 0x7U)
+#define ROGUE_MIPSFW_TLB_GLOBAL BIT(0)
+#define ROGUE_MIPSFW_TLB_VALID BIT(1)
+#define ROGUE_MIPSFW_TLB_DIRTY BIT(2)
+#define ROGUE_MIPSFW_TLB_XI BIT(30)
+#define ROGUE_MIPSFW_TLB_RI BIT(31)
+
+#define ROGUE_MIPSFW_REMAP_GET_REGION_SIZE(region_size_encoding) (1 << (((region_size_encoding) \
+ + 1) << 1))
+
+struct rogue_mips_tlb_entry {
+ u32 tlb_page_mask;
+ u32 tlb_hi;
+ u32 tlb_lo0;
+ u32 tlb_lo1;
+};
+
+struct rogue_mips_remap_entry {
+ u32 remap_addr_in; /* Always 4k aligned. */
+ u32 remap_addr_out; /* Always 4k aligned. */
+ u32 remap_region_size;
+};
+
+struct rogue_mips_state {
+ u32 error_state; /* This must come first in the structure. */
+ u32 error_epc;
+ u32 status_register;
+ u32 cause_register;
+ u32 bad_register;
+ u32 epc;
+ u32 sp;
+ u32 debug;
+ u32 depc;
+ u32 bad_instr;
+ u32 unmapped_address;
+ struct rogue_mips_tlb_entry tlb[ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES];
+ struct rogue_mips_remap_entry remap[ROGUE_MIPSFW_NUMBER_OF_REMAP_ENTRIES];
+};
+
+#include "pvr_rogue_mips_check.h"
+
+#endif /* PVR_ROGUE_MIPS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h b/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h
new file mode 100644
index 000000000..824b4bf33
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_ROGUE_MIPS_CHECK_H
+#define PVR_ROGUE_MIPS_CHECK_H
+
+#include <linux/build_bug.h>
+
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_page_mask) == 0,
+ "offsetof(struct rogue_mips_tlb_entry, tlb_page_mask) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_hi) == 4,
+ "offsetof(struct rogue_mips_tlb_entry, tlb_hi) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_lo0) == 8,
+ "offsetof(struct rogue_mips_tlb_entry, tlb_lo0) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_lo1) == 12,
+ "offsetof(struct rogue_mips_tlb_entry, tlb_lo1) incorrect");
+static_assert(sizeof(struct rogue_mips_tlb_entry) == 16,
+ "struct rogue_mips_tlb_entry is incorrect size");
+
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_addr_in) == 0,
+ "offsetof(struct rogue_mips_remap_entry, remap_addr_in) incorrect");
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_addr_out) == 4,
+ "offsetof(struct rogue_mips_remap_entry, remap_addr_out) incorrect");
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_region_size) == 8,
+ "offsetof(struct rogue_mips_remap_entry, remap_region_size) incorrect");
+static_assert(sizeof(struct rogue_mips_remap_entry) == 12,
+ "struct rogue_mips_remap_entry is incorrect size");
+
+static_assert(offsetof(struct rogue_mips_state, error_state) == 0,
+ "offsetof(struct rogue_mips_state, error_state) incorrect");
+static_assert(offsetof(struct rogue_mips_state, error_epc) == 4,
+ "offsetof(struct rogue_mips_state, error_epc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, status_register) == 8,
+ "offsetof(struct rogue_mips_state, status_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, cause_register) == 12,
+ "offsetof(struct rogue_mips_state, cause_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, bad_register) == 16,
+ "offsetof(struct rogue_mips_state, bad_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, epc) == 20,
+ "offsetof(struct rogue_mips_state, epc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, sp) == 24,
+ "offsetof(struct rogue_mips_state, sp) incorrect");
+static_assert(offsetof(struct rogue_mips_state, debug) == 28,
+ "offsetof(struct rogue_mips_state, debug) incorrect");
+static_assert(offsetof(struct rogue_mips_state, depc) == 32,
+ "offsetof(struct rogue_mips_state, depc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, bad_instr) == 36,
+ "offsetof(struct rogue_mips_state, bad_instr) incorrect");
+static_assert(offsetof(struct rogue_mips_state, unmapped_address) == 40,
+ "offsetof(struct rogue_mips_state, unmapped_address) incorrect");
+static_assert(offsetof(struct rogue_mips_state, tlb) == 44,
+ "offsetof(struct rogue_mips_state, tlb) incorrect");
+static_assert(offsetof(struct rogue_mips_state, remap) == 300,
+ "offsetof(struct rogue_mips_state, remap) incorrect");
+static_assert(sizeof(struct rogue_mips_state) == 684,
+ "struct rogue_mips_state is incorrect size");
+
+#endif /* PVR_ROGUE_MIPS_CHECK_H */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h
new file mode 100644
index 000000000..f361ccdd5
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+/* *** Autogenerated C -- do not edit *** */
+
+#ifndef PVR_ROGUE_MMU_DEFS_H
+#define PVR_ROGUE_MMU_DEFS_H
+
+#define ROGUE_MMU_DEFS_REVISION 0
+
+#define ROGUE_BIF_DM_ENCODING_VERTEX (0x00000000U)
+#define ROGUE_BIF_DM_ENCODING_PIXEL (0x00000001U)
+#define ROGUE_BIF_DM_ENCODING_COMPUTE (0x00000002U)
+#define ROGUE_BIF_DM_ENCODING_TLA (0x00000003U)
+#define ROGUE_BIF_DM_ENCODING_PB_VCE (0x00000004U)
+#define ROGUE_BIF_DM_ENCODING_PB_TE (0x00000005U)
+#define ROGUE_BIF_DM_ENCODING_META (0x00000007U)
+#define ROGUE_BIF_DM_ENCODING_HOST (0x00000008U)
+#define ROGUE_BIF_DM_ENCODING_PM_ALIST (0x00000009U)
+
+#define ROGUE_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U)
+#define ROGUE_MMUCTRL_VADDR_PC_INDEX_CLRMSK (0xFFFFFF003FFFFFFFULL)
+#define ROGUE_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U)
+#define ROGUE_MMUCTRL_VADDR_PD_INDEX_CLRMSK (0xFFFFFFFFC01FFFFFULL)
+#define ROGUE_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U)
+#define ROGUE_MMUCTRL_VADDR_PT_INDEX_CLRMSK (0xFFFFFFFFFFE00FFFULL)
+
+#define ROGUE_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U)
+#define ROGUE_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U)
+#define ROGUE_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U)
+
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U)
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U)
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U)
+
+#define ROGUE_MMUCTRL_PAGE_SIZE_MASK (0x00000007U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_4KB (0x00000000U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_16KB (0x00000001U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_64KB (0x00000002U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_256KB (0x00000003U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_1MB (0x00000004U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_2MB (0x00000005U)
+
+#define ROGUE_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (0xFFFFFF0000000FFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U)
+#define ROGUE_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (0xFFFFFF0000003FFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U)
+#define ROGUE_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (0xFFFFFF000000FFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U)
+#define ROGUE_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (0xFFFFFF000003FFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U)
+#define ROGUE_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (0xFFFFFF00000FFFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U)
+#define ROGUE_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (0xFFFFFF00001FFFFFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (0xFFFFFF0000000FFFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U)
+#define ROGUE_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (0xFFFFFF00000003FFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U)
+#define ROGUE_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (0xFFFFFF00000000FFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U)
+#define ROGUE_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (0xFFFFFF000000003FULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (0xFFFFFF000000001FULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (0xFFFFFF000000001FULL)
+
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U)
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (0xBFFFFFFFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (0x4000000000000000ULL)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (0xC00000FFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PAGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PT_DATA_PAGE_CLRMSK (0xFFFFFF0000000FFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (0xFFFFFFFFFFFFF03FULL)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFFFFFFFFDFULL)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (0x0000000000000020ULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (0xFFFFFFFFFFFFFFEFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_EN (0x0000000000000010ULL)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (0xFFFFFFFFFFFFFFF7ULL)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (0x0000000000000008ULL)
+#define ROGUE_MMUCTRL_PT_DATA_CC_SHIFT (2U)
+#define ROGUE_MMUCTRL_PT_DATA_CC_CLRMSK (0xFFFFFFFFFFFFFFFBULL)
+#define ROGUE_MMUCTRL_PT_DATA_CC_EN (0x0000000000000004ULL)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (0xFFFFFFFFFFFFFFFDULL)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_EN (0x0000000000000002ULL)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_CLRMSK (0xFFFFFFFFFFFFFFFEULL)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_EN (0x0000000000000001ULL)
+
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U)
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFEFFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (0x0000010000000000ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (0xFFFFFF000000001FULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (0xFFFFFFFFFFFFFFF1ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (0x0000000000000000ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (0x0000000000000002ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (0x0000000000000004ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (0x0000000000000006ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (0x0000000000000008ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (0x000000000000000aULL)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_CLRMSK (0xFFFFFFFFFFFFFFFEULL)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_EN (0x0000000000000001ULL)
+
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_EN (0x00000001U)
+
+#endif /* PVR_ROGUE_MMU_DEFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c
new file mode 100644
index 000000000..975336a4f
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+
+#include <linux/align.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <uapi/drm/pvr_drm.h>
+
+static __always_inline bool
+stream_def_is_supported(struct pvr_device *pvr_dev, const struct pvr_stream_def *stream_def)
+{
+ if (stream_def->feature == PVR_FEATURE_NONE)
+ return true;
+
+ if (!(stream_def->feature & PVR_FEATURE_NOT) &&
+ pvr_device_has_feature(pvr_dev, stream_def->feature)) {
+ return true;
+ }
+
+ if ((stream_def->feature & PVR_FEATURE_NOT) &&
+ !pvr_device_has_feature(pvr_dev, stream_def->feature & ~PVR_FEATURE_NOT)) {
+ return true;
+ }
+
+ return false;
+}
+
+static int
+pvr_stream_get_data(u8 *stream, u32 *stream_offset, u32 stream_size, u32 data_size, u32 align_size,
+ void *dest)
+{
+ *stream_offset = ALIGN(*stream_offset, align_size);
+
+ if ((*stream_offset + data_size) > stream_size)
+ return -EINVAL;
+
+ memcpy(dest, stream + *stream_offset, data_size);
+
+ (*stream_offset) += data_size;
+
+ return 0;
+}
+
+/**
+ * pvr_stream_process_1() - Process a single stream and fill destination structure
+ * @pvr_dev: Device pointer.
+ * @stream_def: Stream definition.
+ * @nr_entries: Number of entries in &stream_def.
+ * @stream: Pointer to stream.
+ * @stream_offset: Starting offset within stream.
+ * @stream_size: Size of input stream, in bytes.
+ * @dest: Pointer to destination structure.
+ * @dest_size: Size of destination structure.
+ * @stream_offset_out: Pointer to variable to write updated stream offset to. May be NULL.
+ *
+ * Returns:
+ * * 0 on success, or
+ * * -%EINVAL on malformed stream.
+ */
+static int
+pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *stream_def,
+ u32 nr_entries, u8 *stream, u32 stream_offset, u32 stream_size,
+ u8 *dest, u32 dest_size, u32 *stream_offset_out)
+{
+ int err = 0;
+ u32 i;
+
+ for (i = 0; i < nr_entries; i++) {
+ if (stream_def[i].offset >= dest_size) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (!stream_def_is_supported(pvr_dev, &stream_def[i]))
+ continue;
+
+ switch (stream_def[i].size) {
+ case PVR_STREAM_SIZE_8:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u8),
+ sizeof(u8), dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+
+ case PVR_STREAM_SIZE_16:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u16),
+ sizeof(u16), dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+
+ case PVR_STREAM_SIZE_32:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+ sizeof(u32), dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+
+ case PVR_STREAM_SIZE_64:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u64),
+ sizeof(u64), dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+
+ case PVR_STREAM_SIZE_ARRAY:
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size,
+ stream_def[i].array_size, sizeof(u64),
+ dest + stream_def[i].offset);
+ if (err)
+ return err;
+ break;
+ }
+ }
+
+ if (stream_offset_out)
+ *stream_offset_out = stream_offset;
+
+ return err;
+}
+
+static int
+pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
+ const struct pvr_stream_cmd_defs *cmd_defs, void *ext_stream,
+ u32 stream_offset, u32 ext_stream_size, void *dest)
+{
+ u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX];
+ u32 ext_header;
+ int err = 0;
+ u32 i;
+
+ /* Copy "must have" mask from device. We clear this as we process the stream. */
+ memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type],
+ sizeof(musthave_masks));
+
+ do {
+ const struct pvr_stream_ext_header *header;
+ u32 type;
+ u32 data;
+
+ err = pvr_stream_get_data(ext_stream, &stream_offset, ext_stream_size, sizeof(u32),
+ sizeof(ext_header), &ext_header);
+ if (err)
+ return err;
+
+ type = (ext_header & PVR_STREAM_EXTHDR_TYPE_MASK) >> PVR_STREAM_EXTHDR_TYPE_SHIFT;
+ data = ext_header & PVR_STREAM_EXTHDR_DATA_MASK;
+
+ if (type >= cmd_defs->ext_nr_headers)
+ return -EINVAL;
+
+ header = &cmd_defs->ext_headers[type];
+ if (data & ~header->valid_mask)
+ return -EINVAL;
+
+ musthave_masks[type] &= ~data;
+
+ for (i = 0; i < header->ext_streams_num; i++) {
+ const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i];
+
+ if (!(ext_header & ext_def->header_mask))
+ continue;
+
+ if (!pvr_device_has_uapi_quirk(pvr_dev, ext_def->quirk))
+ return -EINVAL;
+
+ err = pvr_stream_process_1(pvr_dev, ext_def->stream, ext_def->stream_len,
+ ext_stream, stream_offset,
+ ext_stream_size, dest,
+ cmd_defs->dest_size, &stream_offset);
+ if (err)
+ return err;
+ }
+ } while (ext_header & PVR_STREAM_EXTHDR_CONTINUATION);
+
+ /*
+ * Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks
+ * for this command was not present.
+ */
+ for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ if (musthave_masks[i])
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pvr_stream_process() - Build FW structure from stream
+ * @pvr_dev: Device pointer.
+ * @cmd_defs: Stream definition.
+ * @stream: Pointer to command stream.
+ * @stream_size: Size of command stream, in bytes.
+ * @dest_out: Pointer to destination buffer.
+ *
+ * Caller is responsible for freeing the output structure.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%ENOMEM on out of memory, or
+ * * -%EINVAL on malformed stream.
+ */
+int
+pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+ void *stream, u32 stream_size, void *dest_out)
+{
+ u32 stream_offset = 0;
+ u32 main_stream_len;
+ u32 padding;
+ int err;
+
+ if (!stream || !stream_size)
+ return -EINVAL;
+
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+ sizeof(u32), &main_stream_len);
+ if (err)
+ return err;
+
+ /*
+ * u32 after stream length is padding to ensure u64 alignment, but may be used for expansion
+ * in the future. Verify it's zero.
+ */
+ err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+ sizeof(u32), &padding);
+ if (err)
+ return err;
+
+ if (main_stream_len < stream_offset || main_stream_len > stream_size || padding)
+ return -EINVAL;
+
+ err = pvr_stream_process_1(pvr_dev, cmd_defs->main_stream, cmd_defs->main_stream_len,
+ stream, stream_offset, main_stream_len, dest_out,
+ cmd_defs->dest_size, &stream_offset);
+ if (err)
+ return err;
+
+ if (stream_offset < stream_size) {
+ err = pvr_stream_process_ext_stream(pvr_dev, cmd_defs, stream, stream_offset,
+ stream_size, dest_out);
+ if (err)
+ return err;
+ } else {
+ u32 i;
+
+ /*
+ * If we don't have an extension stream then there must not be any "must have"
+ * quirks for this command.
+ */
+ for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+ if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i])
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pvr_stream_create_musthave_masks() - Create "must have" masks for streams based on current device
+ * quirks
+ * @pvr_dev: Device pointer.
+ */
+void
+pvr_stream_create_musthave_masks(struct pvr_device *pvr_dev)
+{
+ memset(pvr_dev->stream_musthave_quirks, 0, sizeof(pvr_dev->stream_musthave_quirks));
+
+ if (pvr_device_has_uapi_quirk(pvr_dev, 47217))
+ pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_FRAG][0] |=
+ PVR_STREAM_EXTHDR_FRAG0_BRN47217;
+
+ if (pvr_device_has_uapi_quirk(pvr_dev, 49927)) {
+ pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_GEOM][0] |=
+ PVR_STREAM_EXTHDR_GEOM0_BRN49927;
+ pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_FRAG][0] |=
+ PVR_STREAM_EXTHDR_FRAG0_BRN49927;
+ pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_COMPUTE][0] |=
+ PVR_STREAM_EXTHDR_COMPUTE0_BRN49927;
+ }
+}
diff --git a/drivers/gpu/drm/imagination/pvr_stream.h b/drivers/gpu/drm/imagination/pvr_stream.h
new file mode 100644
index 000000000..d92acb3a6
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_STREAM_H
+#define PVR_STREAM_H
+
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct pvr_device;
+
+struct pvr_job;
+
+enum pvr_stream_type {
+ PVR_STREAM_TYPE_GEOM = 0,
+ PVR_STREAM_TYPE_FRAG,
+ PVR_STREAM_TYPE_COMPUTE,
+ PVR_STREAM_TYPE_TRANSFER,
+ PVR_STREAM_TYPE_STATIC_RENDER_CONTEXT,
+ PVR_STREAM_TYPE_STATIC_COMPUTE_CONTEXT,
+
+ PVR_STREAM_TYPE_MAX
+};
+
+enum pvr_stream_size {
+ PVR_STREAM_SIZE_8 = 0,
+ PVR_STREAM_SIZE_16,
+ PVR_STREAM_SIZE_32,
+ PVR_STREAM_SIZE_64,
+ PVR_STREAM_SIZE_ARRAY,
+};
+
+#define PVR_FEATURE_NOT BIT(31)
+#define PVR_FEATURE_NONE U32_MAX
+
+struct pvr_stream_def {
+ u32 offset;
+ enum pvr_stream_size size;
+ u32 array_size;
+ u32 feature;
+};
+
+struct pvr_stream_ext_def {
+ const struct pvr_stream_def *stream;
+ u32 stream_len;
+ u32 header_mask;
+ u32 quirk;
+};
+
+struct pvr_stream_ext_header {
+ const struct pvr_stream_ext_def *ext_streams;
+ u32 ext_streams_num;
+ u32 valid_mask;
+};
+
+struct pvr_stream_cmd_defs {
+ enum pvr_stream_type type;
+
+ const struct pvr_stream_def *main_stream;
+ u32 main_stream_len;
+
+ u32 ext_nr_headers;
+ const struct pvr_stream_ext_header *ext_headers;
+
+ size_t dest_size;
+};
+
+int
+pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+ void *stream, u32 stream_size, void *dest_out);
+void
+pvr_stream_create_musthave_masks(struct pvr_device *pvr_dev);
+
+#endif /* PVR_STREAM_H */
diff --git a/drivers/gpu/drm/imagination/pvr_stream_defs.c b/drivers/gpu/drm/imagination/pvr_stream_defs.c
new file mode 100644
index 000000000..f8bd1a8c0
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream_defs.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device_info.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+#include "pvr_stream_defs.h"
+
+#include <linux/stddef.h>
+#include <uapi/drm/pvr_drm.h>
+
+#define PVR_STREAM_DEF_SET(owner, member, _size, _array_size, _feature) \
+ { .offset = offsetof(struct owner, member), \
+ .size = (_size), \
+ .array_size = (_array_size), \
+ .feature = (_feature) }
+
+#define PVR_STREAM_DEF(owner, member, member_size) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, PVR_FEATURE_NONE)
+
+#define PVR_STREAM_DEF_FEATURE(owner, member, member_size, feature) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, feature)
+
+#define PVR_STREAM_DEF_NOT_FEATURE(owner, member, member_size, feature) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, \
+ (feature) | PVR_FEATURE_NOT)
+
+#define PVR_STREAM_DEF_ARRAY(owner, member) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \
+ sizeof(((struct owner *)0)->member), PVR_FEATURE_NONE)
+
+#define PVR_STREAM_DEF_ARRAY_FEATURE(owner, member, feature) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \
+ sizeof(((struct owner *)0)->member), feature)
+
+#define PVR_STREAM_DEF_ARRAY_NOT_FEATURE(owner, member, feature) \
+ PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \
+ sizeof(((struct owner *)0)->member), (feature) | PVR_FEATURE_NOT)
+
+/*
+ * When adding new parameters to the stream definition, the new parameters must go after the
+ * existing parameters, to preserve order. As parameters are naturally aligned, care must be taken
+ * with respect to implicit padding in the stream; padding should be minimised as much as possible.
+ */
+static const struct pvr_stream_def rogue_fwif_cmd_geom_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.vdm_ctrl_stream_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.tpu_border_colour_table, 64),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_draw_indirect0, 64,
+ PVR_FEATURE_VDM_DRAWINDIRECT),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_draw_indirect1, 32,
+ PVR_FEATURE_VDM_DRAWINDIRECT),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.ppp_ctrl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.te_psg, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.vdm_context_resume_task0_size, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_context_resume_task3_size, 32,
+ PVR_FEATURE_VDM_OBJECT_LEVEL_LLS),
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.view_idx, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.pds_coeff_free_prog, 32,
+ PVR_FEATURE_TESSELLATION),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_geom_stream_brn49927[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_geom_ext_streams_0[] = {
+ {
+ .stream = rogue_fwif_cmd_geom_stream_brn49927,
+ .stream_len = ARRAY_SIZE(rogue_fwif_cmd_geom_stream_brn49927),
+ .header_mask = PVR_STREAM_EXTHDR_GEOM0_BRN49927,
+ .quirk = 49927,
+ },
+};
+
+static const struct pvr_stream_ext_header cmd_geom_ext_headers[] = {
+ {
+ .ext_streams = cmd_geom_ext_streams_0,
+ .ext_streams_num = ARRAY_SIZE(cmd_geom_ext_streams_0),
+ .valid_mask = PVR_STREAM_EXTHDR_GEOM0_VALID,
+ },
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_geom_stream = {
+ .type = PVR_STREAM_TYPE_GEOM,
+
+ .main_stream = rogue_fwif_cmd_geom_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_geom_stream),
+
+ .ext_nr_headers = ARRAY_SIZE(cmd_geom_ext_headers),
+ .ext_headers = cmd_geom_ext_headers,
+
+ .dest_size = sizeof(struct rogue_fwif_cmd_geom),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_scissor_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_dbias_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_oclqry_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_zlsctl, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_zload_store_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_stencil_load_store_base, 64),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.fb_cdc_zls, 64,
+ PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pbe_word),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.tpu_border_colour_table, 64),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pds_bgnd),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pds_pr_bgnd),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.usc_clear_register),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.usc_pixel_output_ctrl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_bgobjdepth, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_bgobjvals, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_aa, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_xtp_pipe_enable, 32,
+ PVR_FEATURE_S7_TOP_INFRASTRUCTURE),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_ctl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.event_pixel_pds_info, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.pixel_phantom, 32,
+ PVR_FEATURE_CLUSTER_GROUPING),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.view_idx, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.event_pixel_pds_data, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_oclqry_stride, 32,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_zls_pixels, 32,
+ PVR_FEATURE_ZLS_SUBTILE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.rgx_cr_blackpearl_fix, 32,
+ PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, zls_stride, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, sls_stride, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, execute_count, 32,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream_brn47217[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_oclqry_stride, 32),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream_brn49927[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_frag_ext_streams_0[] = {
+ {
+ .stream = rogue_fwif_cmd_frag_stream_brn47217,
+ .stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream_brn47217),
+ .header_mask = PVR_STREAM_EXTHDR_FRAG0_BRN47217,
+ .quirk = 47217,
+ },
+ {
+ .stream = rogue_fwif_cmd_frag_stream_brn49927,
+ .stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream_brn49927),
+ .header_mask = PVR_STREAM_EXTHDR_FRAG0_BRN49927,
+ .quirk = 49927,
+ },
+};
+
+static const struct pvr_stream_ext_header cmd_frag_ext_headers[] = {
+ {
+ .ext_streams = cmd_frag_ext_streams_0,
+ .ext_streams_num = ARRAY_SIZE(cmd_frag_ext_streams_0),
+ .valid_mask = PVR_STREAM_EXTHDR_FRAG0_VALID,
+ },
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_frag_stream = {
+ .type = PVR_STREAM_TYPE_FRAG,
+
+ .main_stream = rogue_fwif_cmd_frag_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream),
+
+ .ext_nr_headers = ARRAY_SIZE(cmd_frag_ext_headers),
+ .ext_headers = cmd_frag_ext_headers,
+
+ .dest_size = sizeof(struct rogue_fwif_cmd_frag),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_compute_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.tpu_border_colour_table, 64),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb_queue, 64,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb_base, 64,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb, 64,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF_NOT_FEATURE(rogue_fwif_cmd_compute, regs.cdm_ctrl_stream_base, 64,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.cdm_context_state_base_addr, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.cdm_resume_pds1, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_item, 32,
+ PVR_FEATURE_COMPUTE_MORTON_CAPABLE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.compute_cluster, 32,
+ PVR_FEATURE_CLUSTER_GROUPING),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.tpu_tag_cdm_ctrl, 32,
+ PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, stream_start_offset, 32,
+ PVR_FEATURE_CDM_USER_MODE_QUEUE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, execute_count, 32,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_compute_stream_brn49927[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_compute_ext_streams_0[] = {
+ {
+ .stream = rogue_fwif_cmd_compute_stream_brn49927,
+ .stream_len = ARRAY_SIZE(rogue_fwif_cmd_compute_stream_brn49927),
+ .header_mask = PVR_STREAM_EXTHDR_COMPUTE0_BRN49927,
+ .quirk = 49927,
+ },
+};
+
+static const struct pvr_stream_ext_header cmd_compute_ext_headers[] = {
+ {
+ .ext_streams = cmd_compute_ext_streams_0,
+ .ext_streams_num = ARRAY_SIZE(cmd_compute_ext_streams_0),
+ .valid_mask = PVR_STREAM_EXTHDR_COMPUTE0_VALID,
+ },
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_compute_stream = {
+ .type = PVR_STREAM_TYPE_COMPUTE,
+
+ .main_stream = rogue_fwif_cmd_compute_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_compute_stream),
+
+ .ext_nr_headers = ARRAY_SIZE(cmd_compute_ext_headers),
+ .ext_headers = cmd_compute_ext_headers,
+
+ .dest_size = sizeof(struct rogue_fwif_cmd_compute),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_transfer_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd0_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd1_base, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd3_sizeinfo, 64),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_mtile_base, 64),
+ PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_transfer, regs.pbe_wordx_mrty),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_bgobjvals, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_pixel_output_ctrl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register0, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register1, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register2, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register3, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_mtile_size, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_render_origin, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_ctl, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_aa, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_info, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_code, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_data, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_render, 32),
+ PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_rgn, 32),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_transfer, regs.isp_xtp_pipe_enable, 32,
+ PVR_FEATURE_S7_TOP_INFRASTRUCTURE),
+ PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_transfer, regs.frag_screen, 32,
+ PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_transfer_stream = {
+ .type = PVR_STREAM_TYPE_TRANSFER,
+
+ .main_stream = rogue_fwif_cmd_transfer_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_transfer_stream),
+
+ .ext_nr_headers = 0,
+
+ .dest_size = sizeof(struct rogue_fwif_cmd_transfer),
+};
+
+static const struct pvr_stream_def rogue_fwif_static_render_context_state_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_reg_vdm_context_state_base_addr, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_reg_vdm_context_state_resume_addr, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_reg_ta_context_state_base_addr, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task0, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task1, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task2, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task3, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_store_task4, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task0, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task1, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task2, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task3, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[0].geom_reg_vdm_context_resume_task4, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task0, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task1, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task2, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task3, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_store_task4, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task0, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task1, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task2, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task3, 64),
+ PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch,
+ geom_state[1].geom_reg_vdm_context_resume_task4, 64),
+};
+
+const struct pvr_stream_cmd_defs pvr_static_render_context_state_stream = {
+ .type = PVR_STREAM_TYPE_STATIC_RENDER_CONTEXT,
+
+ .main_stream = rogue_fwif_static_render_context_state_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_static_render_context_state_stream),
+
+ .ext_nr_headers = 0,
+
+ .dest_size = sizeof(struct rogue_fwif_geom_registers_caswitch),
+};
+
+static const struct pvr_stream_def rogue_fwif_static_compute_context_state_stream[] = {
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds1, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds1, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0_b, 64),
+ PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0_b, 64),
+};
+
+const struct pvr_stream_cmd_defs pvr_static_compute_context_state_stream = {
+ .type = PVR_STREAM_TYPE_STATIC_COMPUTE_CONTEXT,
+
+ .main_stream = rogue_fwif_static_compute_context_state_stream,
+ .main_stream_len = ARRAY_SIZE(rogue_fwif_static_compute_context_state_stream),
+
+ .ext_nr_headers = 0,
+
+ .dest_size = sizeof(struct rogue_fwif_cdm_registers_cswitch),
+};
diff --git a/drivers/gpu/drm/imagination/pvr_stream_defs.h b/drivers/gpu/drm/imagination/pvr_stream_defs.h
new file mode 100644
index 000000000..f33b82165
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream_defs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_STREAM_DEFS_H
+#define PVR_STREAM_DEFS_H
+
+#include "pvr_stream.h"
+
+extern const struct pvr_stream_cmd_defs pvr_cmd_geom_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_frag_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_compute_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_transfer_stream;
+extern const struct pvr_stream_cmd_defs pvr_static_render_context_state_stream;
+extern const struct pvr_stream_cmd_defs pvr_static_compute_context_state_stream;
+
+#endif /* PVR_STREAM_DEFS_H */
diff --git a/drivers/gpu/drm/imagination/pvr_sync.c b/drivers/gpu/drm/imagination/pvr_sync.c
new file mode 100644
index 000000000..129f646d1
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_sync.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <drm/drm_syncobj.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/xarray.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include "pvr_device.h"
+#include "pvr_queue.h"
+#include "pvr_sync.h"
+
+static int
+pvr_check_sync_op(const struct drm_pvr_sync_op *sync_op)
+{
+ u8 handle_type;
+
+ if (sync_op->flags & ~DRM_PVR_SYNC_OP_FLAGS_MASK)
+ return -EINVAL;
+
+ handle_type = sync_op->flags & DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK;
+ if (handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ &&
+ handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ)
+ return -EINVAL;
+
+ if (handle_type == DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ &&
+ sync_op->value != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+pvr_sync_signal_free(struct pvr_sync_signal *sig_sync)
+{
+ if (!sig_sync)
+ return;
+
+ drm_syncobj_put(sig_sync->syncobj);
+ dma_fence_chain_free(sig_sync->chain);
+ dma_fence_put(sig_sync->fence);
+ kfree(sig_sync);
+}
+
+void
+pvr_sync_signal_array_cleanup(struct xarray *array)
+{
+ struct pvr_sync_signal *sig_sync;
+ unsigned long i;
+
+ xa_for_each(array, i, sig_sync)
+ pvr_sync_signal_free(sig_sync);
+
+ xa_destroy(array);
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_add(struct xarray *array, struct drm_file *file, u32 handle, u64 point)
+{
+ struct pvr_sync_signal *sig_sync;
+ struct dma_fence *cur_fence;
+ int err;
+ u32 id;
+
+ sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
+ if (!sig_sync)
+ return ERR_PTR(-ENOMEM);
+
+ sig_sync->handle = handle;
+ sig_sync->point = point;
+
+ if (point > 0) {
+ sig_sync->chain = dma_fence_chain_alloc();
+ if (!sig_sync->chain) {
+ err = -ENOMEM;
+ goto err_free_sig_sync;
+ }
+ }
+
+ sig_sync->syncobj = drm_syncobj_find(file, handle);
+ if (!sig_sync->syncobj) {
+ err = -EINVAL;
+ goto err_free_sig_sync;
+ }
+
+ /* Retrieve the current fence attached to that point. It's
+ * perfectly fine to get a NULL fence here, it just means there's
+ * no fence attached to that point yet.
+ */
+ if (!drm_syncobj_find_fence(file, handle, point, 0, &cur_fence))
+ sig_sync->fence = cur_fence;
+
+ err = xa_alloc(array, &id, sig_sync, xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_free_sig_sync;
+
+ return sig_sync;
+
+err_free_sig_sync:
+ pvr_sync_signal_free(sig_sync);
+ return ERR_PTR(err);
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_search(struct xarray *array, u32 handle, u64 point)
+{
+ struct pvr_sync_signal *sig_sync;
+ unsigned long i;
+
+ xa_for_each(array, i, sig_sync) {
+ if (handle == sig_sync->handle && point == sig_sync->point)
+ return sig_sync;
+ }
+
+ return NULL;
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_get(struct xarray *array, struct drm_file *file, u32 handle, u64 point)
+{
+ struct pvr_sync_signal *sig_sync;
+
+ sig_sync = pvr_sync_signal_array_search(array, handle, point);
+ if (sig_sync)
+ return sig_sync;
+
+ return pvr_sync_signal_array_add(array, file, handle, point);
+}
+
+int
+pvr_sync_signal_array_collect_ops(struct xarray *array,
+ struct drm_file *file,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops)
+{
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct pvr_sync_signal *sig_sync;
+ int ret;
+
+ if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL))
+ continue;
+
+ ret = pvr_check_sync_op(&sync_ops[i]);
+ if (ret)
+ return ret;
+
+ sig_sync = pvr_sync_signal_array_get(array, file,
+ sync_ops[i].handle,
+ sync_ops[i].value);
+ if (IS_ERR(sig_sync))
+ return PTR_ERR(sig_sync);
+ }
+
+ return 0;
+}
+
+int
+pvr_sync_signal_array_update_fences(struct xarray *array,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops,
+ struct dma_fence *done_fence)
+{
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct dma_fence *old_fence;
+ struct pvr_sync_signal *sig_sync;
+
+ if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL))
+ continue;
+
+ sig_sync = pvr_sync_signal_array_search(array, sync_ops[i].handle,
+ sync_ops[i].value);
+ if (WARN_ON(!sig_sync))
+ return -EINVAL;
+
+ old_fence = sig_sync->fence;
+ sig_sync->fence = dma_fence_get(done_fence);
+ dma_fence_put(old_fence);
+
+ if (WARN_ON(!sig_sync->fence))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+pvr_sync_signal_array_push_fences(struct xarray *array)
+{
+ struct pvr_sync_signal *sig_sync;
+ unsigned long i;
+
+ xa_for_each(array, i, sig_sync) {
+ if (sig_sync->chain) {
+ drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
+ sig_sync->fence, sig_sync->point);
+ sig_sync->chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
+ }
+ }
+}
+
+static int
+pvr_sync_add_dep_to_job(struct drm_sched_job *job, struct dma_fence *f)
+{
+ struct dma_fence_unwrap iter;
+ u32 native_fence_count = 0;
+ struct dma_fence *uf;
+ int err = 0;
+
+ dma_fence_unwrap_for_each(uf, &iter, f) {
+ if (pvr_queue_fence_is_ufo_backed(uf))
+ native_fence_count++;
+ }
+
+ /* No need to unwrap the fence if it's fully non-native. */
+ if (!native_fence_count)
+ return drm_sched_job_add_dependency(job, f);
+
+ dma_fence_unwrap_for_each(uf, &iter, f) {
+ /* There's no dma_fence_unwrap_stop() helper cleaning up the refs
+ * owned by dma_fence_unwrap(), so let's just iterate over all
+ * entries without doing anything when something failed.
+ */
+ if (err)
+ continue;
+
+ if (pvr_queue_fence_is_ufo_backed(uf)) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(uf);
+
+ /* If this is a native dependency, we wait for the scheduled fence,
+ * and we will let pvr_queue_run_job() issue FW waits.
+ */
+ err = drm_sched_job_add_dependency(job,
+ dma_fence_get(&s_fence->scheduled));
+ } else {
+ err = drm_sched_job_add_dependency(job, dma_fence_get(uf));
+ }
+ }
+
+ dma_fence_put(f);
+ return err;
+}
+
+int
+pvr_sync_add_deps_to_job(struct pvr_file *pvr_file, struct drm_sched_job *job,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops,
+ struct xarray *signal_array)
+{
+ int err = 0;
+
+ if (!sync_op_count)
+ return 0;
+
+ for (u32 i = 0; i < sync_op_count; i++) {
+ struct pvr_sync_signal *sig_sync;
+ struct dma_fence *fence;
+
+ if (sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+ continue;
+
+ err = pvr_check_sync_op(&sync_ops[i]);
+ if (err)
+ return err;
+
+ sig_sync = pvr_sync_signal_array_search(signal_array, sync_ops[i].handle,
+ sync_ops[i].value);
+ if (sig_sync) {
+ if (WARN_ON(!sig_sync->fence))
+ return -EINVAL;
+
+ fence = dma_fence_get(sig_sync->fence);
+ } else {
+ err = drm_syncobj_find_fence(from_pvr_file(pvr_file), sync_ops[i].handle,
+ sync_ops[i].value, 0, &fence);
+ if (err)
+ return err;
+ }
+
+ err = pvr_sync_add_dep_to_job(job, fence);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_sync.h b/drivers/gpu/drm/imagination/pvr_sync.h
new file mode 100644
index 000000000..db6ccfda1
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_sync.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_SYNC_H
+#define PVR_SYNC_H
+
+#include <uapi/drm/pvr_drm.h>
+
+/* Forward declaration from <linux/xarray.h>. */
+struct xarray;
+
+/* Forward declaration from <drm/drm_file.h>. */
+struct drm_file;
+
+/* Forward declaration from <drm/gpu_scheduler.h>. */
+struct drm_sched_job;
+
+/* Forward declaration from "pvr_device.h". */
+struct pvr_file;
+
+/**
+ * struct pvr_sync_signal - Object encoding a syncobj signal operation
+ *
+ * The job submission logic collects all signal operations in an array of
+ * pvr_sync_signal objects. This array also serves as a cache to get the
+ * latest dma_fence when multiple jobs are submitted at once, and one job
+ * signals a syncobj point that's later waited on by a subsequent job.
+ */
+struct pvr_sync_signal {
+ /** @handle: Handle of the syncobj to signal. */
+ u32 handle;
+
+ /**
+ * @point: Point to signal in the syncobj.
+ *
+ * Only relevant for timeline syncobjs.
+ */
+ u64 point;
+
+ /** @syncobj: Syncobj retrieved from the handle. */
+ struct drm_syncobj *syncobj;
+
+ /**
+ * @chain: Chain object used to link the new fence with the
+ * existing timeline syncobj.
+ *
+ * Should be zero when manipulating a regular syncobj.
+ */
+ struct dma_fence_chain *chain;
+
+ /**
+ * @fence: New fence object to attach to the syncobj.
+ *
+ * This pointer starts with the current fence bound to
+ * the <handle,point> pair.
+ */
+ struct dma_fence *fence;
+};
+
+void
+pvr_sync_signal_array_cleanup(struct xarray *array);
+
+int
+pvr_sync_signal_array_collect_ops(struct xarray *array,
+ struct drm_file *file,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops);
+
+int
+pvr_sync_signal_array_update_fences(struct xarray *array,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops,
+ struct dma_fence *done_fence);
+
+void
+pvr_sync_signal_array_push_fences(struct xarray *array);
+
+int
+pvr_sync_add_deps_to_job(struct pvr_file *pvr_file, struct drm_sched_job *job,
+ u32 sync_op_count,
+ const struct drm_pvr_sync_op *sync_ops,
+ struct xarray *signal_array);
+
+#endif /* PVR_SYNC_H */
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
new file mode 100644
index 000000000..e59517ba0
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -0,0 +1,1090 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_vm.h"
+
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_mmu.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_heap_config.h"
+
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gpuvm.h>
+
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/stddef.h>
+
+/**
+ * DOC: Memory context
+ *
+ * This is the "top level" datatype in the VM code. It's exposed in the public
+ * API as an opaque handle.
+ */
+
+/**
+ * struct pvr_vm_context - Context type used to represent a single VM.
+ */
+struct pvr_vm_context {
+ /**
+ * @pvr_dev: The PowerVR device to which this context is bound.
+ * This binding is immutable for the life of the context.
+ */
+ struct pvr_device *pvr_dev;
+
+ /** @mmu_ctx: The context for binding to physical memory. */
+ struct pvr_mmu_context *mmu_ctx;
+
+ /** @gpuvm_mgr: GPUVM object associated with this context. */
+ struct drm_gpuvm gpuvm_mgr;
+
+ /** @lock: Global lock on this VM. */
+ struct mutex lock;
+
+ /**
+ * @fw_mem_ctx_obj: Firmware object representing firmware memory
+ * context.
+ */
+ struct pvr_fw_object *fw_mem_ctx_obj;
+
+ /** @ref_count: Reference count of object. */
+ struct kref ref_count;
+
+ /**
+ * @dummy_gem: GEM object to enable VM reservation. All private BOs
+ * should use the @dummy_gem.resv and not their own _resv field.
+ */
+ struct drm_gem_object dummy_gem;
+};
+
+static inline
+struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
+{
+ return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
+}
+
+struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
+{
+ if (vm_ctx)
+ kref_get(&vm_ctx->ref_count);
+
+ return vm_ctx;
+}
+
+/**
+ * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
+ * page table structure behind a VM context.
+ * @vm_ctx: Target VM context.
+ */
+dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
+{
+ return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
+}
+
+/**
+ * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
+ * @vm_ctx: Target VM context.
+ *
+ * This is used to allow private BOs to share a dma_resv for faster fence
+ * updates.
+ *
+ * Returns: The dma_resv pointer.
+ */
+struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
+{
+ return vm_ctx->dummy_gem.resv;
+}
+
+/**
+ * DOC: Memory mappings
+ */
+
+/**
+ * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
+ */
+struct pvr_vm_gpuva {
+ /** @base: The wrapped drm_gpuva object. */
+ struct drm_gpuva base;
+};
+
+enum pvr_vm_bind_type {
+ PVR_VM_BIND_TYPE_MAP,
+ PVR_VM_BIND_TYPE_UNMAP,
+};
+
+/**
+ * struct pvr_vm_bind_op - Context of a map/unmap operation.
+ */
+struct pvr_vm_bind_op {
+ /** @type: Map or unmap. */
+ enum pvr_vm_bind_type type;
+
+ /** @pvr_obj: Object associated with mapping (map only). */
+ struct pvr_gem_object *pvr_obj;
+
+ /**
+ * @vm_ctx: VM context where the mapping will be created or destroyed.
+ */
+ struct pvr_vm_context *vm_ctx;
+
+ /** @mmu_op_ctx: MMU op context. */
+ struct pvr_mmu_op_context *mmu_op_ctx;
+
+ /** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
+ struct drm_gpuvm_bo *gpuvm_bo;
+
+ /**
+ * @new_va: Prealloced VA mapping object (init in callback).
+ * Used when creating a mapping.
+ */
+ struct pvr_vm_gpuva *new_va;
+
+ /**
+ * @prev_va: Prealloced VA mapping object (init in callback).
+ * Used when a mapping or unmapping operation overlaps an existing
+ * mapping and splits away the beginning into a new mapping.
+ */
+ struct pvr_vm_gpuva *prev_va;
+
+ /**
+ * @next_va: Prealloced VA mapping object (init in callback).
+ * Used when a mapping or unmapping operation overlaps an existing
+ * mapping and splits away the end into a new mapping.
+ */
+ struct pvr_vm_gpuva *next_va;
+
+ /** @offset: Offset into @pvr_obj to begin mapping from. */
+ u64 offset;
+
+ /** @device_addr: Device-virtual address at the start of the mapping. */
+ u64 device_addr;
+
+ /** @size: Size of the desired mapping. */
+ u64 size;
+};
+
+/**
+ * pvr_vm_bind_op_exec() - Execute a single bind op.
+ * @bind_op: Bind op context.
+ *
+ * Returns:
+ * * 0 on success,
+ * * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
+ * a callback function.
+ */
+static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
+{
+ switch (bind_op->type) {
+ case PVR_VM_BIND_TYPE_MAP:
+ return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
+ bind_op, bind_op->device_addr,
+ bind_op->size,
+ gem_from_pvr_gem(bind_op->pvr_obj),
+ bind_op->offset);
+
+ case PVR_VM_BIND_TYPE_UNMAP:
+ return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
+ bind_op, bind_op->device_addr,
+ bind_op->size);
+ }
+
+ /*
+ * This shouldn't happen unless something went wrong
+ * in drm_sched.
+ */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
+{
+ drm_gpuvm_bo_put(bind_op->gpuvm_bo);
+
+ kfree(bind_op->new_va);
+ kfree(bind_op->prev_va);
+ kfree(bind_op->next_va);
+
+ if (bind_op->pvr_obj)
+ pvr_gem_object_put(bind_op->pvr_obj);
+
+ if (bind_op->mmu_op_ctx)
+ pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
+}
+
+static int
+pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
+ struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj, u64 offset,
+ u64 device_addr, u64 size)
+{
+ struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
+ const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
+ const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
+ struct sg_table *sgt;
+ u64 offset_plus_size;
+ int err;
+
+ if (check_add_overflow(offset, size, &offset_plus_size))
+ return -EINVAL;
+
+ if (is_user &&
+ !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
+ return -EINVAL;
+ }
+
+ if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
+ offset & ~PAGE_MASK || size & ~PAGE_MASK ||
+ offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
+ return -EINVAL;
+
+ bind_op->type = PVR_VM_BIND_TYPE_MAP;
+
+ dma_resv_lock(obj->resv, NULL);
+ bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
+ dma_resv_unlock(obj->resv);
+ if (IS_ERR(bind_op->gpuvm_bo))
+ return PTR_ERR(bind_op->gpuvm_bo);
+
+ bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
+ bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
+ bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
+ if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
+ err = -ENOMEM;
+ goto err_bind_op_fini;
+ }
+
+ /* Pin pages so they're ready for use. */
+ sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
+ err = PTR_ERR_OR_ZERO(sgt);
+ if (err)
+ goto err_bind_op_fini;
+
+ bind_op->mmu_op_ctx =
+ pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
+ err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
+ if (err) {
+ bind_op->mmu_op_ctx = NULL;
+ goto err_bind_op_fini;
+ }
+
+ bind_op->pvr_obj = pvr_obj;
+ bind_op->vm_ctx = vm_ctx;
+ bind_op->device_addr = device_addr;
+ bind_op->size = size;
+ bind_op->offset = offset;
+
+ return 0;
+
+err_bind_op_fini:
+ pvr_vm_bind_op_fini(bind_op);
+
+ return err;
+}
+
+static int
+pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
+ struct pvr_vm_context *vm_ctx, u64 device_addr,
+ u64 size)
+{
+ int err;
+
+ if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
+ return -EINVAL;
+
+ bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
+
+ bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
+ bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
+ if (!bind_op->prev_va || !bind_op->next_va) {
+ err = -ENOMEM;
+ goto err_bind_op_fini;
+ }
+
+ bind_op->mmu_op_ctx =
+ pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
+ err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
+ if (err) {
+ bind_op->mmu_op_ctx = NULL;
+ goto err_bind_op_fini;
+ }
+
+ bind_op->vm_ctx = vm_ctx;
+ bind_op->device_addr = device_addr;
+ bind_op->size = size;
+
+ return 0;
+
+err_bind_op_fini:
+ pvr_vm_bind_op_fini(bind_op);
+
+ return err;
+}
+
+/**
+ * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
+ * @op: gpuva op containing the remap details.
+ * @op_ctx: Operation context.
+ *
+ * Context: Called by drm_gpuvm_sm_map following a successful mapping while
+ * @op_ctx.vm_ctx mutex is held.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_mmu_map().
+ */
+static int
+pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
+{
+ struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
+ struct pvr_vm_bind_op *ctx = op_ctx;
+ int err;
+
+ if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
+ return -EINVAL;
+
+ err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
+ op->map.va.addr);
+ if (err)
+ return err;
+
+ drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
+ drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
+ ctx->new_va = NULL;
+
+ return 0;
+}
+
+/**
+ * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
+ * @op: gpuva op containing the unmap details.
+ * @op_ctx: Operation context.
+ *
+ * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
+ * @op_ctx.vm_ctx mutex is held.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_mmu_unmap().
+ */
+static int
+pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
+{
+ struct pvr_vm_bind_op *ctx = op_ctx;
+
+ int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
+ op->unmap.va->va.range);
+
+ if (err)
+ return err;
+
+ drm_gpuva_unmap(&op->unmap);
+ drm_gpuva_unlink(op->unmap.va);
+
+ return 0;
+}
+
+/**
+ * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
+ * @op: gpuva op containing the remap details.
+ * @op_ctx: Operation context.
+ *
+ * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
+ * mapping or unmapping operation causes a region to be split. The
+ * @op_ctx.vm_ctx mutex is held.
+ *
+ * Return:
+ * * 0 on success, or
+ * * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
+ */
+static int
+pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
+{
+ struct pvr_vm_bind_op *ctx = op_ctx;
+ u64 va_start = 0, va_range = 0;
+ int err;
+
+ drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
+ err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
+ if (err)
+ return err;
+
+ /* No actual remap required: the page table tree depth is fixed to 3,
+ * and we use 4k page table entries only for now.
+ */
+ drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
+
+ if (op->remap.prev) {
+ pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
+ drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
+ ctx->prev_va = NULL;
+ }
+
+ if (op->remap.next) {
+ pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
+ drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
+ ctx->next_va = NULL;
+ }
+
+ drm_gpuva_unlink(op->remap.unmap->va);
+
+ return 0;
+}
+
+/*
+ * Public API
+ *
+ * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
+ */
+
+/**
+ * pvr_device_addr_is_valid() - Tests whether a device-virtual address
+ * is valid.
+ * @device_addr: Virtual device address to test.
+ *
+ * Return:
+ * * %true if @device_addr is within the valid range for a device page
+ * table and is aligned to the device page size, or
+ * * %false otherwise.
+ */
+bool
+pvr_device_addr_is_valid(u64 device_addr)
+{
+ return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
+ (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
+}
+
+/**
+ * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
+ * address and associated size are both valid.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address to test.
+ * @size: Size of the range based at @device_addr to test.
+ *
+ * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
+ * @device_addr + @size) to verify a device-virtual address range initially
+ * seems intuitive, but it produces a false-negative when the address range
+ * is right at the end of device-virtual address space.
+ *
+ * This function catches that corner case, as well as checking that
+ * @size is non-zero.
+ *
+ * Return:
+ * * %true if @device_addr is device page aligned; @size is device page
+ * aligned; the range specified by @device_addr and @size is within the
+ * bounds of the device-virtual address space, and @size is non-zero, or
+ * * %false otherwise.
+ */
+bool
+pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
+ u64 device_addr, u64 size)
+{
+ return pvr_device_addr_is_valid(device_addr) &&
+ drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
+ size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
+ (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
+}
+
+static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
+{
+ kfree(to_pvr_vm_context(gpuvm));
+}
+
+static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
+ .vm_free = pvr_gpuvm_free,
+ .sm_step_map = pvr_vm_gpuva_map,
+ .sm_step_remap = pvr_vm_gpuva_remap,
+ .sm_step_unmap = pvr_vm_gpuva_unmap,
+};
+
+static void
+fw_mem_context_init(void *cpu_ptr, void *priv)
+{
+ struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
+ struct pvr_vm_context *vm_ctx = priv;
+
+ fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
+ fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
+}
+
+/**
+ * pvr_vm_create_context() - Create a new VM context.
+ * @pvr_dev: Target PowerVR device.
+ * @is_userspace_context: %true if this context is for userspace. This will
+ * create a firmware memory context for the VM context
+ * and disable warnings when tearing down mappings.
+ *
+ * Return:
+ * * A handle to the newly-minted VM context on success,
+ * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
+ * missing or has an unsupported value,
+ * * -%ENOMEM if allocation of the structure behind the opaque handle fails,
+ * or
+ * * Any error encountered while setting up internal structures.
+ */
+struct pvr_vm_context *
+pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+ struct pvr_vm_context *vm_ctx;
+ u16 device_addr_bits;
+
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
+ &device_addr_bits);
+ if (err) {
+ drm_err(drm_dev,
+ "Failed to get device virtual address space bits\n");
+ return ERR_PTR(err);
+ }
+
+ if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
+ drm_err(drm_dev,
+ "Device has unsupported virtual address space size\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
+ if (!vm_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ vm_ctx->pvr_dev = pvr_dev;
+
+ vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
+ err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
+ if (err)
+ goto err_free;
+
+ if (is_userspace_context) {
+ err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
+ PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+ fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
+
+ if (err)
+ goto err_page_table_destroy;
+ }
+
+ drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
+ drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
+ is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
+ 0, &pvr_dev->base, &vm_ctx->dummy_gem,
+ 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
+
+ mutex_init(&vm_ctx->lock);
+ kref_init(&vm_ctx->ref_count);
+
+ return vm_ctx;
+
+err_page_table_destroy:
+ pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
+
+err_free:
+ kfree(vm_ctx);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_vm_context_release() - Teardown a VM context.
+ * @ref_count: Pointer to reference counter of the VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+static void
+pvr_vm_context_release(struct kref *ref_count)
+{
+ struct pvr_vm_context *vm_ctx =
+ container_of(ref_count, struct pvr_vm_context, ref_count);
+
+ if (vm_ctx->fw_mem_ctx_obj)
+ pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
+
+ WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range));
+
+ pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
+ drm_gem_private_object_fini(&vm_ctx->dummy_gem);
+ mutex_destroy(&vm_ctx->lock);
+
+ drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
+}
+
+/**
+ * pvr_vm_context_lookup() - Look up VM context from handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on VM context object. Call pvr_vm_context_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a VM context)
+ */
+struct pvr_vm_context *
+pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_vm_context *vm_ctx;
+
+ xa_lock(&pvr_file->vm_ctx_handles);
+ vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
+ if (vm_ctx)
+ kref_get(&vm_ctx->ref_count);
+
+ xa_unlock(&pvr_file->vm_ctx_handles);
+
+ return vm_ctx;
+}
+
+/**
+ * pvr_vm_context_put() - Release a reference on a VM context
+ * @vm_ctx: Target VM context.
+ *
+ * Returns:
+ * * %true if the VM context was destroyed, or
+ * * %false if there are any references still remaining.
+ */
+bool
+pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
+{
+ if (vm_ctx)
+ return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
+
+ return true;
+}
+
+/**
+ * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
+ * given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all vm_contexts associated with @pvr_file from the device VM context
+ * list and drops initial references. vm_contexts will then be destroyed once
+ * all outstanding references are dropped.
+ */
+void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_vm_context *vm_ctx;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
+ /* vm_ctx is not used here because that would create a race with xa_erase */
+ pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
+ }
+}
+
+static int
+pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
+{
+ struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
+ struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
+
+ /* Unmap operations don't have an object to lock. */
+ if (!pvr_obj)
+ return 0;
+
+ /* Acquire lock on the GEM being mapped. */
+ return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
+}
+
+/**
+ * pvr_vm_map() - Map a section of physical memory into a section of
+ * device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @pvr_obj_offset: Offset into @pvr_obj to map from.
+ * @device_addr: Virtual device address at the start of the requested mapping.
+ * @size: Size of the requested mapping.
+ *
+ * No handle is returned to represent the mapping. Instead, callers should
+ * remember @device_addr and use that as a handle.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
+ * address; the region specified by @pvr_obj_offset and @size does not fall
+ * entirely within @pvr_obj, or any part of the specified region of @pvr_obj
+ * is not device-virtual page-aligned,
+ * * Any error encountered while performing internal operations required to
+ * destroy the mapping (returned from pvr_vm_gpuva_map or
+ * pvr_vm_gpuva_remap).
+ */
+int
+pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+ u64 pvr_obj_offset, u64 device_addr, u64 size)
+{
+ struct pvr_vm_bind_op bind_op = {0};
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &vm_ctx->gpuvm_mgr,
+ .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES,
+ .extra = {
+ .fn = pvr_vm_lock_extra,
+ .priv = &bind_op,
+ },
+ };
+
+ int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
+ pvr_obj_offset, device_addr,
+ size);
+
+ if (err)
+ return err;
+
+ pvr_gem_object_get(pvr_obj);
+
+ err = drm_gpuvm_exec_lock(&vm_exec);
+ if (err)
+ goto err_cleanup;
+
+ err = pvr_vm_bind_op_exec(&bind_op);
+
+ drm_gpuvm_exec_unlock(&vm_exec);
+
+err_cleanup:
+ pvr_vm_bind_op_fini(&bind_op);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
+ * address,
+ * * Any error encountered while performing internal operations required to
+ * destroy the mapping (returned from pvr_vm_gpuva_unmap or
+ * pvr_vm_gpuva_remap).
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+{
+ struct pvr_vm_bind_op bind_op = {0};
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &vm_ctx->gpuvm_mgr,
+ .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES,
+ .extra = {
+ .fn = pvr_vm_lock_extra,
+ .priv = &bind_op,
+ },
+ };
+
+ int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
+ size);
+ if (err)
+ return err;
+
+ err = drm_gpuvm_exec_lock(&vm_exec);
+ if (err)
+ goto err_cleanup;
+
+ err = pvr_vm_bind_op_exec(&bind_op);
+
+ drm_gpuvm_exec_unlock(&vm_exec);
+
+err_cleanup:
+ pvr_vm_bind_op_fini(&bind_op);
+
+ return err;
+}
+
+/* Static data areas are determined by firmware. */
+static const struct drm_pvr_static_data_area static_data_areas[] = {
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
+ .location_heap_id = DRM_PVR_HEAP_GENERAL,
+ .offset = 0,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+ .location_heap_id = DRM_PVR_HEAP_GENERAL,
+ .offset = 128,
+ .size = 1024,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+ .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
+ .offset = 0,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
+ .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
+ .offset = 128,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+ .location_heap_id = DRM_PVR_HEAP_USC_CODE,
+ .offset = 0,
+ .size = 128,
+ },
+};
+
+#define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
+
+/*
+ * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
+ * static data area for each heap.
+ */
+static const struct drm_pvr_heap pvr_heaps[] = {
+ [DRM_PVR_HEAP_GENERAL] = {
+ .base = ROGUE_GENERAL_HEAP_BASE,
+ .size = ROGUE_GENERAL_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_PDS_CODE_DATA] = {
+ .base = ROGUE_PDSCODEDATA_HEAP_BASE,
+ .size = ROGUE_PDSCODEDATA_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_USC_CODE] = {
+ .base = ROGUE_USCCODE_HEAP_BASE,
+ .size = ROGUE_USCCODE_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_RGNHDR] = {
+ .base = ROGUE_RGNHDR_HEAP_BASE,
+ .size = ROGUE_RGNHDR_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_VIS_TEST] = {
+ .base = ROGUE_VISTEST_HEAP_BASE,
+ .size = ROGUE_VISTEST_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_TRANSFER_FRAG] = {
+ .base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
+ .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+};
+
+int
+pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_static_data_areas query = {0};
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+ if (err < 0)
+ return err;
+
+ if (!query.static_data_areas.array) {
+ query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
+ query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
+ goto copy_out;
+ }
+
+ if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
+ query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
+
+ err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
+ if (err < 0)
+ return err;
+
+copy_out:
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+int
+pvr_heap_info_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_heap_info query = {0};
+ u64 dest;
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_heap_info);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+ if (err < 0)
+ return err;
+
+ if (!query.heaps.array) {
+ query.heaps.count = ARRAY_SIZE(pvr_heaps);
+ query.heaps.stride = sizeof(struct drm_pvr_heap);
+ goto copy_out;
+ }
+
+ if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
+ query.heaps.count = ARRAY_SIZE(pvr_heaps);
+
+ /* Region header heap is only present if BRN63142 is present. */
+ dest = query.heaps.array;
+ for (size_t i = 0; i < query.heaps.count; i++) {
+ struct drm_pvr_heap heap = pvr_heaps[i];
+
+ if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
+ heap.size = 0;
+
+ err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
+ if (err < 0)
+ return err;
+
+ dest += query.heaps.stride;
+ }
+
+copy_out:
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+/**
+ * pvr_heap_contains_range() - Determine if a given heap contains the specified
+ * device-virtual address range.
+ * @pvr_heap: Target heap.
+ * @start: Inclusive start of the target range.
+ * @end: Inclusive end of the target range.
+ *
+ * It is an error to call this function with values of @start and @end that do
+ * not satisfy the condition @start <= @end.
+ */
+static __always_inline bool
+pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
+{
+ return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
+}
+
+/**
+ * pvr_find_heap_containing() - Find a heap which contains the specified
+ * device-virtual address range.
+ * @pvr_dev: Target PowerVR device.
+ * @start: Start of the target range.
+ * @size: Size of the target range.
+ *
+ * Return:
+ * * A pointer to a constant instance of struct drm_pvr_heap representing the
+ * heap containing the entire range specified by @start and @size on
+ * success, or
+ * * %NULL if no such heap exists.
+ */
+const struct drm_pvr_heap *
+pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
+{
+ u64 end;
+
+ if (check_add_overflow(start, size - 1, &end))
+ return NULL;
+
+ /*
+ * There are no guarantees about the order of address ranges in
+ * &pvr_heaps, so iterate over the entire array for a heap whose
+ * range completely encompasses the given range.
+ */
+ for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
+ /* Filter heaps that present only with an associated quirk */
+ if (heap_id == DRM_PVR_HEAP_RGNHDR &&
+ !PVR_HAS_QUIRK(pvr_dev, 63142)) {
+ continue;
+ }
+
+ if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
+ return &pvr_heaps[heap_id];
+ }
+
+ return NULL;
+}
+
+/**
+ * pvr_vm_find_gem_object() - Look up a buffer object from a given
+ * device-virtual address.
+ * @vm_ctx: [IN] Target VM context.
+ * @device_addr: [IN] Virtual device address at the start of the required
+ * object.
+ * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
+ * of the mapped region within the buffer object. May be
+ * %NULL if this information is not required.
+ * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
+ * region. May be %NULL if this information is not required.
+ *
+ * If successful, a reference will be taken on the buffer object. The caller
+ * must drop the reference with pvr_gem_object_put().
+ *
+ * Return:
+ * * The PowerVR buffer object mapped at @device_addr if one exists, or
+ * * %NULL otherwise.
+ */
+struct pvr_gem_object *
+pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
+ u64 *mapped_offset_out, u64 *mapped_size_out)
+{
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+
+ mutex_lock(&vm_ctx->lock);
+
+ va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
+ if (!va)
+ goto err_unlock;
+
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+ pvr_gem_object_get(pvr_obj);
+
+ if (mapped_offset_out)
+ *mapped_offset_out = va->gem.offset;
+ if (mapped_size_out)
+ *mapped_size_out = va->va.range;
+
+ mutex_unlock(&vm_ctx->lock);
+
+ return pvr_obj;
+
+err_unlock:
+ mutex_unlock(&vm_ctx->lock);
+
+ return NULL;
+}
+
+/**
+ * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
+ * @vm_ctx: Target VM context.
+ *
+ * Returns:
+ * * FW object representing firmware memory context, or
+ * * %NULL if this VM context does not have a firmware memory context.
+ */
+struct pvr_fw_object *
+pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
+{
+ return vm_ctx->fw_mem_ctx_obj;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
new file mode 100644
index 000000000..f2a6463f2
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_VM_H
+#define PVR_VM_H
+
+#include "pvr_rogue_mmu_defs.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h" */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declaration from "pvr_gem.h" */
+struct pvr_gem_object;
+
+/* Forward declaration from "pvr_vm.c" */
+struct pvr_vm_context;
+
+/* Forward declaration from <uapi/drm/pvr_drm.h> */
+struct drm_pvr_ioctl_get_heap_info_args;
+
+/* Forward declaration from <drm/drm_exec.h> */
+struct drm_exec;
+
+/* Functions defined in pvr_vm.c */
+
+bool pvr_device_addr_is_valid(u64 device_addr);
+bool pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
+ u64 device_addr, u64 size);
+
+struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
+ bool is_userspace_context);
+
+int pvr_vm_map(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
+ u64 device_addr, u64 size);
+int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
+
+dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx);
+struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx);
+
+int pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args);
+int pvr_heap_info_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args);
+const struct drm_pvr_heap *pvr_find_heap_containing(struct pvr_device *pvr_dev,
+ u64 addr, u64 size);
+
+struct pvr_gem_object *pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx,
+ u64 device_addr,
+ u64 *mapped_offset_out,
+ u64 *mapped_size_out);
+
+struct pvr_fw_object *
+pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx);
+
+struct pvr_vm_context *pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle);
+struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx);
+bool pvr_vm_context_put(struct pvr_vm_context *vm_ctx);
+void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file);
+
+#endif /* PVR_VM_H */
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
new file mode 100644
index 000000000..b7fef3c79
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_mmu.h"
+#include "pvr_rogue_mips.h"
+#include "pvr_vm.h"
+#include "pvr_vm_mips.h"
+
+#include <drm/drm_managed.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/**
+ * pvr_vm_mips_init() - Initialise MIPS FW pagetable
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%EINVAL,
+ * * Any error returned by pvr_gem_object_create(), or
+ * * And error returned by pvr_gem_object_vmap().
+ */
+int
+pvr_vm_mips_init(struct pvr_device *pvr_dev)
+{
+ u32 pt_size = 1 << ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev);
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+ struct pvr_fw_mips_data *mips_data;
+ u32 phys_bus_width;
+ int page_nr;
+ int err;
+
+ /* Page table size must be at most ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * 4k pages. */
+ if (pt_size > ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * SZ_4K)
+ return -EINVAL;
+
+ if (PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width))
+ return -EINVAL;
+
+ mips_data = drmm_kzalloc(from_pvr_device(pvr_dev), sizeof(*mips_data), GFP_KERNEL);
+ if (!mips_data)
+ return -ENOMEM;
+
+ for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) {
+ mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!mips_data->pt_pages[page_nr]) {
+ err = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ mips_data->pt_dma_addr[page_nr] = dma_map_page(dev, mips_data->pt_pages[page_nr], 0,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mips_data->pt_dma_addr[page_nr])) {
+ err = -ENOMEM;
+ __free_page(mips_data->pt_pages[page_nr]);
+ goto err_free_pages;
+ }
+ }
+
+ mips_data->pt = vmap(mips_data->pt_pages, pt_size >> PAGE_SHIFT, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!mips_data->pt) {
+ err = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ mips_data->pfn_mask = (phys_bus_width > 32) ? ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT :
+ ROGUE_MIPSFW_ENTRYLO_PFN_MASK;
+
+ mips_data->cache_policy = (phys_bus_width > 32) ? ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT :
+ ROGUE_MIPSFW_CACHED_POLICY;
+
+ pvr_dev->fw_dev.processor_data.mips_data = mips_data;
+
+ return 0;
+
+err_free_pages:
+ while (--page_nr >= 0) {
+ dma_unmap_page(from_pvr_device(pvr_dev)->dev,
+ mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
+
+ __free_page(mips_data->pt_pages[page_nr]);
+ }
+
+ return err;
+}
+
+/**
+ * pvr_vm_mips_fini() - Release MIPS FW pagetable
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_vm_mips_fini(struct pvr_device *pvr_dev)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+ int page_nr;
+
+ vunmap(mips_data->pt);
+ for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) {
+ dma_unmap_page(from_pvr_device(pvr_dev)->dev,
+ mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
+
+ __free_page(mips_data->pt_pages[page_nr]);
+ }
+
+ fw_dev->processor_data.mips_data = NULL;
+}
+
+static u32
+get_mips_pte_flags(bool read, bool write, u32 cache_policy)
+{
+ u32 flags = 0;
+
+ if (read && write) /* Read/write. */
+ flags |= ROGUE_MIPSFW_ENTRYLO_DIRTY_EN;
+ else if (write) /* Write only. */
+ flags |= ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN;
+ else
+ WARN_ON(!read);
+
+ flags |= cache_policy << ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT;
+
+ flags |= ROGUE_MIPSFW_ENTRYLO_VALID_EN | ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN;
+
+ return flags;
+}
+
+/**
+ * pvr_vm_mips_map() - Map a FW object into MIPS address space
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to map.
+ *
+ * Returns:
+ * * 0 on success,
+ * * -%EINVAL if object does not reside within FW address space, or
+ * * Any error returned by pvr_fw_object_get_dma_addr().
+ */
+int
+pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+ const u64 start = fw_obj->fw_mm_node.start;
+ const u64 size = fw_obj->fw_mm_node.size;
+ u64 end;
+ u32 cache_policy;
+ u32 pte_flags;
+ s32 start_pfn;
+ s32 end_pfn;
+ s32 pfn;
+ int err;
+
+ if (check_add_overflow(start, size - 1, &end))
+ return -EINVAL;
+
+ if (start < ROGUE_FW_HEAP_BASE ||
+ start >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size ||
+ end < ROGUE_FW_HEAP_BASE ||
+ end >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size ||
+ (start & ROGUE_MIPSFW_PAGE_MASK_4K) ||
+ ((end + 1) & ROGUE_MIPSFW_PAGE_MASK_4K))
+ return -EINVAL;
+
+ start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+ end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+
+ if (pvr_obj->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+ cache_policy = ROGUE_MIPSFW_UNCACHED_CACHE_POLICY;
+ else
+ cache_policy = mips_data->cache_policy;
+
+ pte_flags = get_mips_pte_flags(true, true, cache_policy);
+
+ for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
+ dma_addr_t dma_addr;
+ u32 pte;
+
+ err = pvr_fw_object_get_dma_addr(fw_obj,
+ (pfn - start_pfn) <<
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K,
+ &dma_addr);
+ if (err)
+ goto err_unmap_pages;
+
+ pte = ((dma_addr >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+ << ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT) & mips_data->pfn_mask;
+ pte |= pte_flags;
+
+ WRITE_ONCE(mips_data->pt[pfn], pte);
+ }
+
+ pvr_mmu_flush_request_all(pvr_dev);
+
+ return 0;
+
+err_unmap_pages:
+ while (--pfn >= start_pfn)
+ WRITE_ONCE(mips_data->pt[pfn], 0);
+
+ pvr_mmu_flush_request_all(pvr_dev);
+ WARN_ON(pvr_mmu_flush_exec(pvr_dev, true));
+
+ return err;
+}
+
+/**
+ * pvr_vm_mips_unmap() - Unmap a FW object into MIPS address space
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to unmap.
+ */
+void
+pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+ struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+ struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+ const u64 start = fw_obj->fw_mm_node.start;
+ const u64 size = fw_obj->fw_mm_node.size;
+ const u64 end = start + size;
+
+ const u32 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >>
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+ const u32 end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >>
+ ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+
+ for (u32 pfn = start_pfn; pfn < end_pfn; pfn++)
+ WRITE_ONCE(mips_data->pt[pfn], 0);
+
+ pvr_mmu_flush_request_all(pvr_dev);
+ WARN_ON(pvr_mmu_flush_exec(pvr_dev, true));
+}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.h b/drivers/gpu/drm/imagination/pvr_vm_mips.h
new file mode 100644
index 000000000..0fd59f68f
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_VM_MIPS_H
+#define PVR_VM_MIPS_H
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+int
+pvr_vm_mips_init(struct pvr_device *pvr_dev);
+void
+pvr_vm_mips_fini(struct pvr_device *pvr_dev);
+int
+pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+void
+pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+#endif /* PVR_VM_MIPS_H */
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index b61cec0cc..ad5f29ea8 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -80,7 +80,7 @@ err:
return err;
}
-static int dcss_drv_platform_remove(struct platform_device *pdev)
+static void dcss_drv_platform_remove(struct platform_device *pdev)
{
struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
@@ -88,8 +88,6 @@ static int dcss_drv_platform_remove(struct platform_device *pdev)
dcss_dev_destroy(mdrv->dcss);
kfree(mdrv);
-
- return 0;
}
static void dcss_drv_platform_shutdown(struct platform_device *pdev)
@@ -120,7 +118,7 @@ MODULE_DEVICE_TABLE(of, dcss_of_match);
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
- .remove = dcss_drv_platform_remove,
+ .remove_new = dcss_drv_platform_remove,
.shutdown = dcss_drv_platform_shutdown,
.driver = {
.name = "imx-dcss",
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index 989eca32d..53840ab05 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -12,8 +12,10 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
@@ -617,7 +619,6 @@ static int imx_ldb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
struct imx_ldb *imx_ldb;
int dual;
@@ -638,9 +639,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
imx_ldb->dev = dev;
-
- if (of_id)
- imx_ldb->lvds_mux = of_id->data;
+ imx_ldb->lvds_mux = device_get_match_data(dev);
dual = of_property_read_bool(np, "fsl,dual-channel");
if (dual)
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 4beb3b4bd..43ddf3a98 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -506,14 +506,12 @@ static int imx_lcdc_probe(struct platform_device *pdev)
return 0;
}
-static int imx_lcdc_remove(struct platform_device *pdev)
+static void imx_lcdc_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
-
- return 0;
}
static void imx_lcdc_shutdown(struct platform_device *pdev)
@@ -527,7 +525,7 @@ static struct platform_driver imx_lcdc_driver = {
.of_match_table = imx_lcdc_of_dev_id,
},
.probe = imx_lcdc_probe,
- .remove = imx_lcdc_remove,
+ .remove_new = imx_lcdc_remove,
.shutdown = imx_lcdc_shutdown,
};
module_platform_driver(imx_lcdc_driver);
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 24035b534..169b83987 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -448,7 +448,7 @@ static const struct drm_driver kmb_driver = {
.minor = DRIVER_MINOR,
};
-static int kmb_remove(struct platform_device *pdev)
+static void kmb_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct drm_device *drm = dev_get_drvdata(dev);
@@ -473,7 +473,6 @@ static int kmb_remove(struct platform_device *pdev)
/* Unregister DSI host */
kmb_dsi_host_unregister(kmb->kmb_dsi);
drm_atomic_helper_shutdown(drm);
- return 0;
}
static int kmb_probe(struct platform_device *pdev)
@@ -621,7 +620,7 @@ static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume);
static struct platform_driver kmb_platform_driver = {
.probe = kmb_probe,
- .remove = kmb_remove,
+ .remove_new = kmb_remove,
.driver = {
.name = "kmb-drm",
.pm = &kmb_pm_ops,
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
index 891d5cd50..8389f2d7d 100644
--- a/drivers/gpu/drm/lima/lima_ctx.c
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+#include <linux/pid.h>
#include <linux/slab.h>
#include "lima_device.h"
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 02cef0cea..0bf7105c8 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -514,7 +514,7 @@ int lima_device_suspend(struct device *dev)
/* check any task running */
for (i = 0; i < lima_pipe_num; i++) {
- if (atomic_read(&ldev->pipe[i].base.hw_rq_count))
+ if (atomic_read(&ldev->pipe[i].base.credit_count))
return -EBUSY;
}
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 295f0353a..c3bf8cda8 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -123,7 +123,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
for (i = 0; i < num_bos; i++)
drm_gem_object_get(&bos[i]->base.base);
- err = drm_sched_job_init(&task->base, &context->base, vm);
+ err = drm_sched_job_init(&task->base, &context->base, 1, vm);
if (err) {
kfree(task->bos);
return err;
@@ -488,7 +488,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
- return drm_sched_init(&pipe->base, &lima_sched_ops,
+ return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
1,
lima_job_hang_limit,
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index df6946d50..8e59753e5 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -3,6 +3,7 @@
config DRM_LOONGSON
tristate "DRM support for Loongson Graphics"
depends on DRM && PCI && MMU
+ depends on LOONGARCH || MIPS || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_TTM
select I2C
diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.c b/drivers/gpu/drm/loongson/lsdc_i2c.c
index 9625d0b1d..ce90c2553 100644
--- a/drivers/gpu/drm/loongson/lsdc_i2c.c
+++ b/drivers/gpu/drm/loongson/lsdc_i2c.c
@@ -154,7 +154,6 @@ int lsdc_create_i2c_chan(struct drm_device *ddev,
adapter = &li2c->adapter;
adapter->algo_data = &li2c->bit;
adapter->owner = THIS_MODULE;
- adapter->class = I2C_CLASS_DDC;
adapter->dev.parent = ddev->dev;
adapter->nr = -1;
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
index 0d5094633..d227a2c1d 100644
--- a/drivers/gpu/drm/loongson/lsdc_plane.c
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "lsdc_drv.h"
#include "lsdc_regs.h"
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index d4d193f60..5e4436403 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -16,7 +16,8 @@ mediatek-drm-y := mtk_disp_aal.o \
mtk_dsi.o \
mtk_dpi.o \
mtk_ethdr.o \
- mtk_mdp_rdma.o
+ mtk_mdp_rdma.o \
+ mtk_padding.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index f47f417d8..8519e9bad 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -185,7 +185,6 @@ static int mtk_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_cec *cec;
- struct resource *res;
int ret;
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
@@ -195,8 +194,7 @@ static int mtk_cec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cec);
spin_lock_init(&cec->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->regs = devm_ioremap_resource(dev, res);
+ cec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->regs)) {
ret = PTR_ERR(cec->regs);
dev_err(dev, "Failed to ioremap cec: %d\n", ret);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index 2209159d8..40fe40308 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -168,7 +168,6 @@ static int mtk_disp_aal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_aal *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -181,8 +180,7 @@ static int mtk_disp_aal_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap aal\n");
return PTR_ERR(priv->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 4234ff748..465cddce0 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -153,7 +153,6 @@ static int mtk_disp_ccorr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_disp_ccorr *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -166,8 +165,7 @@ static int mtk_disp_ccorr_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs)) {
dev_err(dev, "failed to ioremap ccorr\n");
return PTR_ERR(priv->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 1311562d2..74fa56339 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -110,6 +110,8 @@ void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev,
unsigned int next);
void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev,
unsigned int next);
+int mtk_ovl_adaptor_power_on(struct device *dev);
+void mtk_ovl_adaptor_power_off(struct device *dev);
int mtk_ovl_adaptor_clk_enable(struct device *dev);
void mtk_ovl_adaptor_clk_disable(struct device *dev);
void mtk_ovl_adaptor_config(struct device *dev, unsigned int w,
@@ -151,6 +153,8 @@ void mtk_rdma_disable_vblank(struct device *dev);
const u32 *mtk_rdma_get_formats(struct device *dev);
size_t mtk_rdma_get_num_formats(struct device *dev);
+int mtk_mdp_rdma_power_on(struct device *dev);
+void mtk_mdp_rdma_power_off(struct device *dev);
int mtk_mdp_rdma_clk_enable(struct device *dev);
void mtk_mdp_rdma_clk_disable(struct device *dev);
void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt);
@@ -160,4 +164,8 @@ void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg,
const u32 *mtk_mdp_rdma_get_formats(struct device *dev);
size_t mtk_mdp_rdma_get_num_formats(struct device *dev);
+int mtk_padding_clk_enable(struct device *dev);
+void mtk_padding_clk_disable(struct device *dev);
+void mtk_padding_start(struct device *dev);
+void mtk_padding_stop(struct device *dev);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 6bf636785..12a37f740 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -27,13 +27,14 @@
#define MTK_OVL_ADAPTOR_LAYER_NUM 4
enum mtk_ovl_adaptor_comp_type {
- OVL_ADAPTOR_TYPE_RDMA = 0,
- OVL_ADAPTOR_TYPE_MERGE,
OVL_ADAPTOR_TYPE_ETHDR,
+ OVL_ADAPTOR_TYPE_MDP_RDMA,
+ OVL_ADAPTOR_TYPE_MERGE,
OVL_ADAPTOR_TYPE_NUM,
};
enum mtk_ovl_adaptor_comp_id {
+ OVL_ADAPTOR_ETHDR0,
OVL_ADAPTOR_MDP_RDMA0,
OVL_ADAPTOR_MDP_RDMA1,
OVL_ADAPTOR_MDP_RDMA2,
@@ -46,13 +47,14 @@ enum mtk_ovl_adaptor_comp_id {
OVL_ADAPTOR_MERGE1,
OVL_ADAPTOR_MERGE2,
OVL_ADAPTOR_MERGE3,
- OVL_ADAPTOR_ETHDR0,
OVL_ADAPTOR_ID_MAX
};
struct ovl_adaptor_comp_match {
enum mtk_ovl_adaptor_comp_type type;
+ enum mtk_ddp_comp_id comp_id;
int alias_id;
+ const struct mtk_ddp_comp_funcs *funcs;
};
struct mtk_disp_ovl_adaptor {
@@ -62,25 +64,44 @@ struct mtk_disp_ovl_adaptor {
};
static const char * const private_comp_stem[OVL_ADAPTOR_TYPE_NUM] = {
- [OVL_ADAPTOR_TYPE_RDMA] = "vdo1-rdma",
- [OVL_ADAPTOR_TYPE_MERGE] = "merge",
[OVL_ADAPTOR_TYPE_ETHDR] = "ethdr",
+ [OVL_ADAPTOR_TYPE_MDP_RDMA] = "vdo1-rdma",
+ [OVL_ADAPTOR_TYPE_MERGE] = "merge",
+};
+
+static const struct mtk_ddp_comp_funcs ethdr = {
+ .clk_enable = mtk_ethdr_clk_enable,
+ .clk_disable = mtk_ethdr_clk_disable,
+ .start = mtk_ethdr_start,
+ .stop = mtk_ethdr_stop,
+};
+
+static const struct mtk_ddp_comp_funcs merge = {
+ .clk_enable = mtk_merge_clk_enable,
+ .clk_disable = mtk_merge_clk_disable,
+};
+
+static const struct mtk_ddp_comp_funcs rdma = {
+ .power_on = mtk_mdp_rdma_power_on,
+ .power_off = mtk_mdp_rdma_power_off,
+ .clk_enable = mtk_mdp_rdma_clk_enable,
+ .clk_disable = mtk_mdp_rdma_clk_disable,
};
static const struct ovl_adaptor_comp_match comp_matches[OVL_ADAPTOR_ID_MAX] = {
- [OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_RDMA, 0 },
- [OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_RDMA, 1 },
- [OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_RDMA, 2 },
- [OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_RDMA, 3 },
- [OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_RDMA, 4 },
- [OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_RDMA, 5 },
- [OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_RDMA, 6 },
- [OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_RDMA, 7 },
- [OVL_ADAPTOR_MERGE0] = { OVL_ADAPTOR_TYPE_MERGE, 1 },
- [OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, 2 },
- [OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, 3 },
- [OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, 4 },
- [OVL_ADAPTOR_ETHDR0] = { OVL_ADAPTOR_TYPE_ETHDR, 0 },
+ [OVL_ADAPTOR_ETHDR0] = { OVL_ADAPTOR_TYPE_ETHDR, DDP_COMPONENT_ETHDR_MIXER, 0, &ethdr },
+ [OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA0, 0, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA1, 1, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA2, 2, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA3, 3, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA4, 4, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA5, 5, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA6, 6, &rdma },
+ [OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA7, 7, &rdma },
+ [OVL_ADAPTOR_MERGE0] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE1, 1, &merge },
+ [OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE2, 2, &merge },
+ [OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE3, 3, &merge },
+ [OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE4, 4, &merge },
};
void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx,
@@ -172,68 +193,112 @@ void mtk_ovl_adaptor_config(struct device *dev, unsigned int w,
void mtk_ovl_adaptor_start(struct device *dev)
{
+ int i;
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
- mtk_ethdr_start(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i] ||
+ !comp_matches[i].funcs->start)
+ continue;
+
+ comp_matches[i].funcs->start(ovl_adaptor->ovl_adaptor_comp[i]);
+ }
}
void mtk_ovl_adaptor_stop(struct device *dev)
{
+ int i;
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
- mtk_ethdr_stop(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i] ||
+ !comp_matches[i].funcs->stop)
+ continue;
+
+ comp_matches[i].funcs->stop(ovl_adaptor->ovl_adaptor_comp[i]);
+ }
}
-int mtk_ovl_adaptor_clk_enable(struct device *dev)
+/**
+ * power_off - Power off the devices in OVL adaptor
+ * @dev: Device to be powered off
+ * @num: Number of the devices to be powered off
+ *
+ * Calls the .power_off() ovl_adaptor component callback if it is present.
+ */
+static inline void power_off(struct device *dev, int num)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
- struct device *comp;
- int ret;
int i;
- for (i = 0; i < OVL_ADAPTOR_MERGE0; i++) {
- comp = ovl_adaptor->ovl_adaptor_comp[i];
- ret = pm_runtime_get_sync(comp);
+ if (num > OVL_ADAPTOR_ID_MAX)
+ num = OVL_ADAPTOR_ID_MAX;
+
+ for (i = num - 1; i >= 0; i--) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i] ||
+ !comp_matches[i].funcs->power_off)
+ continue;
+
+ comp_matches[i].funcs->power_off(ovl_adaptor->ovl_adaptor_comp[i]);
+ }
+}
+
+/**
+ * mtk_ovl_adaptor_power_on - Power on the devices in OVL adaptor
+ * @dev: Device to be powered on
+ *
+ * Different from OVL, OVL adaptor is a pseudo device so
+ * we didn't define it in the device tree, pm_runtime_resume_and_get()
+ * called by .atomic_enable() power on no device in OVL adaptor,
+ * we have to implement a function to do the job instead.
+ *
+ * Return: Zero for success or negative number for failure.
+ */
+int mtk_ovl_adaptor_power_on(struct device *dev)
+{
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+ int i, ret;
+
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i] ||
+ !comp_matches[i].funcs->power_on)
+ continue;
+
+ ret = comp_matches[i].funcs->power_on(ovl_adaptor->ovl_adaptor_comp[i]);
if (ret < 0) {
dev_err(dev, "Failed to enable power domain %d, err %d\n", i, ret);
- goto pwr_err;
+ power_off(dev, i);
+ return ret;
}
}
+ return 0;
+}
+
+void mtk_ovl_adaptor_power_off(struct device *dev)
+{
+ power_off(dev, OVL_ADAPTOR_ID_MAX);
+}
+
+int mtk_ovl_adaptor_clk_enable(struct device *dev)
+{
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+ struct device *comp;
+ int ret;
+ int i;
for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
comp = ovl_adaptor->ovl_adaptor_comp[i];
-
- if (i < OVL_ADAPTOR_MERGE0)
- ret = mtk_mdp_rdma_clk_enable(comp);
- else if (i < OVL_ADAPTOR_ETHDR0)
- ret = mtk_merge_clk_enable(comp);
- else
- ret = mtk_ethdr_clk_enable(comp);
+ if (!comp || !comp_matches[i].funcs->clk_enable)
+ continue;
+ ret = comp_matches[i].funcs->clk_enable(comp);
if (ret) {
dev_err(dev, "Failed to enable clock %d, err %d\n", i, ret);
- goto clk_err;
+ while (--i >= 0)
+ comp_matches[i].funcs->clk_disable(comp);
+ return ret;
}
}
-
- return ret;
-
-clk_err:
- while (--i >= 0) {
- comp = ovl_adaptor->ovl_adaptor_comp[i];
- if (i < OVL_ADAPTOR_MERGE0)
- mtk_mdp_rdma_clk_disable(comp);
- else if (i < OVL_ADAPTOR_ETHDR0)
- mtk_merge_clk_disable(comp);
- else
- mtk_ethdr_clk_disable(comp);
- }
- i = OVL_ADAPTOR_MERGE0;
-
-pwr_err:
- while (--i >= 0)
- pm_runtime_put(ovl_adaptor->ovl_adaptor_comp[i]);
-
- return ret;
+ return 0;
}
void mtk_ovl_adaptor_clk_disable(struct device *dev)
@@ -244,15 +309,11 @@ void mtk_ovl_adaptor_clk_disable(struct device *dev)
for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
comp = ovl_adaptor->ovl_adaptor_comp[i];
-
- if (i < OVL_ADAPTOR_MERGE0) {
- mtk_mdp_rdma_clk_disable(comp);
+ if (!comp || !comp_matches[i].funcs->clk_disable)
+ continue;
+ comp_matches[i].funcs->clk_disable(comp);
+ if (i < OVL_ADAPTOR_MERGE0)
pm_runtime_put(comp);
- } else if (i < OVL_ADAPTOR_ETHDR0) {
- mtk_merge_clk_disable(comp);
- } else {
- mtk_ethdr_clk_disable(comp);
- }
}
}
@@ -314,40 +375,31 @@ size_t mtk_ovl_adaptor_get_num_formats(struct device *dev)
void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex)
{
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA0);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA1);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA2);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA3);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA4);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA5);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA6);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA7);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE1);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE2);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE3);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE4);
- mtk_mutex_add_comp(mutex, DDP_COMPONENT_ETHDR_MIXER);
+ int i;
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i])
+ continue;
+ mtk_mutex_add_comp(mutex, comp_matches[i].comp_id);
+ }
}
void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex)
{
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA0);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA1);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA2);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA3);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA4);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA5);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA6);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA7);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE1);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE2);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE3);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE4);
- mtk_mutex_remove_comp(mutex, DDP_COMPONENT_ETHDR_MIXER);
+ int i;
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+
+ for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) {
+ if (!ovl_adaptor->ovl_adaptor_comp[i])
+ continue;
+ mtk_mutex_remove_comp(mutex, comp_matches[i].comp_id);
+ }
}
void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next)
{
+ mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2);
@@ -355,11 +407,11 @@ void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsig
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER);
- mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
}
void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, unsigned int next)
{
+ mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2);
@@ -367,7 +419,6 @@ void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, un
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER);
mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER);
- mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next);
}
static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node,
@@ -386,17 +437,10 @@ static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node,
}
static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = {
- {
- .compatible = "mediatek,mt8195-vdo1-rdma",
- .data = (void *)OVL_ADAPTOR_TYPE_RDMA,
- }, {
- .compatible = "mediatek,mt8195-disp-merge",
- .data = (void *)OVL_ADAPTOR_TYPE_MERGE,
- }, {
- .compatible = "mediatek,mt8195-disp-ethdr",
- .data = (void *)OVL_ADAPTOR_TYPE_ETHDR,
- },
- {},
+ { .compatible = "mediatek,mt8195-disp-ethdr", .data = (void *)OVL_ADAPTOR_TYPE_ETHDR },
+ { .compatible = "mediatek,mt8195-disp-merge", .data = (void *)OVL_ADAPTOR_TYPE_MERGE },
+ { .compatible = "mediatek,mt8195-vdo1-rdma", .data = (void *)OVL_ADAPTOR_TYPE_MDP_RDMA },
+ { /* sentinel */ }
};
static int compare_of(struct device *dev, void *data)
@@ -531,16 +575,15 @@ static int mtk_disp_ovl_adaptor_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_disp_ovl_adaptor_remove(struct platform_device *pdev)
+static void mtk_disp_ovl_adaptor_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &mtk_disp_ovl_adaptor_master_ops);
pm_runtime_disable(&pdev->dev);
- return 0;
}
struct platform_driver mtk_disp_ovl_adaptor_driver = {
.probe = mtk_disp_ovl_adaptor_probe,
- .remove = mtk_disp_ovl_adaptor_remove,
+ .remove_new = mtk_disp_ovl_adaptor_remove,
.driver = {
.name = "mediatek-disp-ovl-adaptor",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index d645b85f9..3d17de34d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -723,7 +723,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
- ret = pm_runtime_resume_and_get(comp->dev);
+ ret = mtk_ddp_comp_power_on(comp);
if (ret < 0) {
DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
return;
@@ -733,7 +733,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
- pm_runtime_put(comp->dev);
+ mtk_ddp_comp_power_off(comp);
return;
}
@@ -746,7 +746,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- int i, ret;
+ int i;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
if (!mtk_crtc->enabled)
@@ -776,9 +776,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
mtk_crtc_ddp_hw_fini(mtk_crtc);
- ret = pm_runtime_put(comp->dev);
- if (ret < 0)
- DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret);
+ mtk_ddp_comp_power_off(comp);
mtk_crtc->enabled = false;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 3046c0409..a9b5a21cd 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -398,6 +398,8 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
};
static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
+ .power_on = mtk_ovl_adaptor_power_on,
+ .power_off = mtk_ovl_adaptor_power_off,
.clk_enable = mtk_ovl_adaptor_clk_enable,
.clk_disable = mtk_ovl_adaptor_clk_disable,
.config = mtk_ovl_adaptor_config,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 4bae55bdb..15b2eafff 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -7,6 +7,7 @@
#define MTK_DRM_DDP_COMP_H
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
@@ -46,6 +47,8 @@ enum mtk_ddp_comp_type {
struct mtk_ddp_comp;
struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
+ int (*power_on)(struct device *dev);
+ void (*power_off)(struct device *dev);
int (*clk_enable)(struct device *dev);
void (*clk_disable)(struct device *dev);
void (*config)(struct device *dev, unsigned int w,
@@ -92,6 +95,23 @@ struct mtk_ddp_comp {
const struct mtk_ddp_comp_funcs *funcs;
};
+static inline int mtk_ddp_comp_power_on(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->power_on)
+ return comp->funcs->power_on(comp->dev);
+ else
+ return pm_runtime_resume_and_get(comp->dev);
+ return 0;
+}
+
+static inline void mtk_ddp_comp_power_off(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->power_off)
+ comp->funcs->power_off(comp->dev);
+ else
+ pm_runtime_put(comp->dev);
+}
+
static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->clk_enable)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2b0c35cac..14a1e0157 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -5,7 +5,6 @@
*/
#include <linux/component.h>
-#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -611,9 +610,6 @@ static int mtk_drm_bind(struct device *dev)
struct drm_device *drm;
int ret, i;
- if (!iommu_present(&platform_bus_type))
- return -EPROBE_DEFER;
-
pdev = of_find_device_by_node(private->mutex_node);
if (!pdev) {
dev_err(dev, "Waiting for disp-mutex device %pOF\n",
@@ -1003,6 +999,7 @@ static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_dsi_driver,
&mtk_ethdr_driver,
&mtk_mdp_rdma_driver,
+ &mtk_padding_driver,
};
static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 6f98fff4f..33fadb08d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -77,5 +77,5 @@ extern struct platform_driver mtk_dpi_driver;
extern struct platform_driver mtk_dsi_driver;
extern struct platform_driver mtk_ethdr_driver;
extern struct platform_driver mtk_mdp_rdma_driver;
-
+extern struct platform_driver mtk_padding_driver;
#endif /* MTK_DRM_DRV_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index db7ac666e..6a5d0c345 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -346,10 +346,9 @@ static int mtk_ethdr_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_ethdr_remove(struct platform_device *pdev)
+static void mtk_ethdr_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &mtk_ethdr_component_ops);
- return 0;
}
static const struct of_device_id mtk_ethdr_driver_dt_match[] = {
@@ -361,7 +360,7 @@ MODULE_DEVICE_TABLE(of, mtk_ethdr_driver_dt_match);
struct platform_driver mtk_ethdr_driver = {
.probe = mtk_ethdr_probe,
- .remove = mtk_ethdr_remove,
+ .remove_new = mtk_ethdr_remove,
.driver = {
.name = "mediatek-disp-ethdr",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index d675c954b..54e46e440 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -297,7 +297,6 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
strscpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
ddc->adap.owner = THIS_MODULE;
- ddc->adap.class = I2C_CLASS_DDC;
ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
ddc->adap.retries = 3;
ddc->adap.dev.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index c7233d0ac..ee9ce9b6d 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -242,6 +242,22 @@ size_t mtk_mdp_rdma_get_num_formats(struct device *dev)
return ARRAY_SIZE(formats);
}
+int mtk_mdp_rdma_power_on(struct device *dev)
+{
+ int ret = pm_runtime_resume_and_get(dev);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to power on: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+void mtk_mdp_rdma_power_off(struct device *dev)
+{
+ pm_runtime_put(dev);
+}
+
int mtk_mdp_rdma_clk_enable(struct device *dev)
{
struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c
new file mode 100644
index 000000000..0d6451c14
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_padding.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
+#include "mtk_disp_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define PADDING_CONTROL_REG 0x00
+#define PADDING_BYPASS BIT(0)
+#define PADDING_ENABLE BIT(1)
+#define PADDING_PIC_SIZE_REG 0x04
+#define PADDING_H_REG 0x08 /* horizontal */
+#define PADDING_V_REG 0x0c /* vertical */
+#define PADDING_COLOR_REG 0x10
+
+/**
+ * struct mtk_padding - Basic information of the Padding
+ * @clk: Clock of the module
+ * @reg: Virtual address of the Padding for CPU to access
+ * @cmdq_reg: CMDQ setting of the Padding
+ *
+ * Every Padding should have different clock source, register base, and
+ * CMDQ settings, we stored these differences all together.
+ */
+struct mtk_padding {
+ struct clk *clk;
+ void __iomem *reg;
+ struct cmdq_client_reg cmdq_reg;
+};
+
+int mtk_padding_clk_enable(struct device *dev)
+{
+ struct mtk_padding *padding = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(padding->clk);
+}
+
+void mtk_padding_clk_disable(struct device *dev)
+{
+ struct mtk_padding *padding = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(padding->clk);
+}
+
+void mtk_padding_start(struct device *dev)
+{
+ struct mtk_padding *padding = dev_get_drvdata(dev);
+
+ writel(PADDING_ENABLE | PADDING_BYPASS,
+ padding->reg + PADDING_CONTROL_REG);
+
+ /*
+ * Notice that even the padding is in bypass mode,
+ * all the settings must be cleared to 0 or
+ * undefined behaviors could happen
+ */
+ writel(0, padding->reg + PADDING_PIC_SIZE_REG);
+ writel(0, padding->reg + PADDING_H_REG);
+ writel(0, padding->reg + PADDING_V_REG);
+ writel(0, padding->reg + PADDING_COLOR_REG);
+}
+
+void mtk_padding_stop(struct device *dev)
+{
+ struct mtk_padding *padding = dev_get_drvdata(dev);
+
+ writel(0, padding->reg + PADDING_CONTROL_REG);
+}
+
+static int mtk_padding_bind(struct device *dev, struct device *master, void *data)
+{
+ return 0;
+}
+
+static void mtk_padding_unbind(struct device *dev, struct device *master, void *data)
+{
+}
+
+static const struct component_ops mtk_padding_component_ops = {
+ .bind = mtk_padding_bind,
+ .unbind = mtk_padding_unbind,
+};
+
+static int mtk_padding_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_padding *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clk\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ priv->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->reg)) {
+ dev_err(dev, "failed to do ioremap\n");
+ return PTR_ERR(priv->reg);
+ }
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+ if (ret) {
+ dev_err(dev, "failed to get gce client reg\n");
+ return ret;
+ }
+#endif
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = component_add(dev, &mtk_padding_component_ops);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return dev_err_probe(dev, ret, "failed to add component\n");
+ }
+
+ return 0;
+}
+
+static int mtk_padding_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_padding_component_ops);
+ return 0;
+}
+
+static const struct of_device_id mtk_padding_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8188-disp-padding" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_padding_driver_dt_match);
+
+struct platform_driver mtk_padding_driver = {
+ .probe = mtk_padding_probe,
+ .remove = mtk_padding_remove,
+ .driver = {
+ .name = "mediatek-disp-padding",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_padding_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
index e5fe4e994..a6bc1bdb3 100644
--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
@@ -323,13 +323,11 @@ static int meson_dw_mipi_dsi_probe(struct platform_device *pdev)
return 0;
}
-static int meson_dw_mipi_dsi_remove(struct platform_device *pdev)
+static void meson_dw_mipi_dsi_remove(struct platform_device *pdev)
{
struct meson_dw_mipi_dsi *mipi_dsi = platform_get_drvdata(pdev);
dw_mipi_dsi_remove(mipi_dsi->dmd);
-
- return 0;
}
static const struct of_device_id meson_dw_mipi_dsi_of_table[] = {
@@ -340,7 +338,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_mipi_dsi_of_table);
static struct platform_driver meson_dw_mipi_dsi_platform_driver = {
.probe = meson_dw_mipi_dsi_probe,
- .remove = meson_dw_mipi_dsi_remove,
+ .remove_new = meson_dw_mipi_dsi_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_mipi_dsi_of_table,
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 0c48bdf3e..423eb302b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -106,7 +106,6 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
i2c->data = BIT(info->i2c.data_bit);
i2c->clock = BIT(info->i2c.clock_bit);
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index ad70b611b..f202f26ad 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -17,6 +17,7 @@ config DRM_MSM
select DRM_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_EXEC
select DRM_KMS_HELPER
select DRM_PANEL
select DRM_BRIDGE
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 49671364f..b1173128b 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -63,6 +63,7 @@ msm-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_encoder_phys_wb.o \
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_catalog.o \
+ disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_dsc.o \
disp/dpu1/dpu_hw_dsc_1_2.o \
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index e5916c106..c003f9701 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -684,7 +684,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- u32 regbit;
+ u32 hbb;
int ret;
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
@@ -820,18 +820,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
- /* Set the highest bank bit */
- if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
- regbit = 2;
- else
- regbit = 1;
+ BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
+ hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
- gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
- gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1);
if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
adreno_is_a540(adreno_gpu))
- gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, hbb);
/* Disable All flat shading optimization (ALLFLATOPTDIS) */
gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
@@ -1785,5 +1782,11 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
+ /* Set the highest bank bit */
+ if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
+ adreno_gpu->ubwc_config.highest_bank_bit = 15;
+ else
+ adreno_gpu->ubwc_config.highest_bank_bit = 14;
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index edc1eeef8..792a4c60a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1270,81 +1270,95 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
}
-static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
/* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
- u32 rgb565_predicator = 0;
+ gpu->ubwc_config.rgb565_predicator = 0;
/* Unknown, introduced with A650 family */
- u32 uavflagprd_inv = 0;
+ gpu->ubwc_config.uavflagprd_inv = 0;
/* Whether the minimum access length is 64 bits */
- u32 min_acc_len = 0;
+ gpu->ubwc_config.min_acc_len = 0;
/* Entirely magic, per-GPU-gen value */
- u32 ubwc_mode = 0;
+ gpu->ubwc_config.ubwc_mode = 0;
/*
* The Highest Bank Bit value represents the bit of the highest DDR bank.
- * We then subtract 13 from it (13 is the minimum value allowed by hw) and
- * write the lowest two bits of the remaining value as hbb_lo and the
- * one above it as hbb_hi to the hardware. This should ideally use DRAM
- * type detection.
+ * This should ideally use DRAM type detection.
*/
- u32 hbb_hi = 0;
- u32 hbb_lo = 2;
- /* Unknown, introduced with A640/680 */
- u32 amsbc = 0;
+ gpu->ubwc_config.highest_bank_bit = 15;
- if (adreno_is_a610(adreno_gpu)) {
- /* HBB = 14 */
- hbb_lo = 1;
- min_acc_len = 1;
- ubwc_mode = 1;
+ if (adreno_is_a610(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 13;
+ gpu->ubwc_config.min_acc_len = 1;
+ gpu->ubwc_config.ubwc_mode = 1;
}
- /* a618 is using the hw default values */
- if (adreno_is_a618(adreno_gpu))
- return;
+ if (adreno_is_a618(gpu))
+ gpu->ubwc_config.highest_bank_bit = 14;
- if (adreno_is_a619_holi(adreno_gpu))
- hbb_lo = 0;
+ if (adreno_is_a619(gpu))
+ /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
+ gpu->ubwc_config.highest_bank_bit = 13;
+
+ if (adreno_is_a619_holi(gpu))
+ gpu->ubwc_config.highest_bank_bit = 13;
- if (adreno_is_a640_family(adreno_gpu))
- amsbc = 1;
+ if (adreno_is_a640_family(gpu))
+ gpu->ubwc_config.amsbc = 1;
- if (adreno_is_a650(adreno_gpu) ||
- adreno_is_a660(adreno_gpu) ||
- adreno_is_a690(adreno_gpu) ||
- adreno_is_a730(adreno_gpu) ||
- adreno_is_a740_family(adreno_gpu)) {
+ if (adreno_is_a650(gpu) ||
+ adreno_is_a660(gpu) ||
+ adreno_is_a690(gpu) ||
+ adreno_is_a730(gpu) ||
+ adreno_is_a740_family(gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
- hbb_lo = 3;
- amsbc = 1;
- rgb565_predicator = 1;
- uavflagprd_inv = 2;
+ gpu->ubwc_config.highest_bank_bit = 16;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
}
- if (adreno_is_7c3(adreno_gpu)) {
- hbb_lo = 1;
- amsbc = 1;
- rgb565_predicator = 1;
- uavflagprd_inv = 2;
+ if (adreno_is_7c3(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 14;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
}
+}
+
+static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ /*
+ * We subtract 13 from the highest bank bit (13 is the minimum value
+ * allowed by hw) and write the lowest two bits of the remaining value
+ * as hbb_lo and the one above it as hbb_hi to the hardware.
+ */
+ BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13);
+ u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
+ u32 hbb_hi = hbb >> 2;
+ u32 hbb_lo = hbb & 3;
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
- rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 |
- min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
+ adreno_gpu->ubwc_config.rgb565_predicator << 11 |
+ hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
+ adreno_gpu->ubwc_config.min_acc_len << 3 |
+ hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
- min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
+ adreno_gpu->ubwc_config.min_acc_len << 3 |
+ hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
- uavflagprd_inv << 4 | min_acc_len << 3 |
- hbb_lo << 1 | ubwc_mode);
+ adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
+ adreno_gpu->ubwc_config.min_acc_len << 3 |
+ hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
FIELD_PREP(GENMASK(8, 5), hbb_lo));
- gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
+ adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
@@ -1780,7 +1794,7 @@ static int hw_init(struct msm_gpu *gpu)
else
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
- gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1);
/* Set weights for bicubic filtering */
if (adreno_is_a650_family(adreno_gpu)) {
@@ -2911,5 +2925,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
a6xx_fault_handler);
+ a6xx_calc_ubwc_config(adreno_gpu);
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 3ee14646a..2ce7d7b16 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -841,7 +841,8 @@ static void suspend_scheduler(struct msm_gpu *gpu)
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
- kthread_park(sched->thread);
+
+ drm_sched_wqueue_stop(sched);
}
}
@@ -851,7 +852,8 @@ static void resume_scheduler(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
- kthread_unpark(sched->thread);
+
+ drm_sched_wqueue_start(sched);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 3fe9fd240..074fb4987 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -373,6 +373,9 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
return -EINVAL;
*value = ctx->aspace->va_size;
return 0;
+ case MSM_PARAM_HIGHEST_BANK_BIT:
+ *value = adreno_gpu->ubwc_config.highest_bank_bit;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 80b3f6312..bc14df96f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -165,6 +165,15 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *fw[ADRENO_FW_MAX];
+ struct {
+ u32 rgb565_predicator;
+ u32 uavflagprd_inv;
+ u32 min_acc_len;
+ u32 ubwc_mode;
+ u32 highest_bank_bit;
+ u32 amsbc;
+ } ubwc_config;
+
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
new file mode 100644
index 000000000..eb5dfff2e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_10_0_SM8650_H
+#define _DPU_10_0_SM8650_H
+
+static const struct dpu_caps sm8650_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 8192,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8650_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sm8650_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1000,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1000,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8650_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sm8650_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8650_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8650_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_8", .id = PINGPONG_8,
+ .base = 0x7e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_9", .id = PINGPONG_9,
+ .base = 0x7e400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8650_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ }, {
+ .name = "merge_3d_4", .id = MERGE_3D_4,
+ .base = 0x7e700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8650_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sm8650_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg sm8650_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm8650_perf_data = {
+ .max_bw_low = 17000000,
+ .max_bw_high = 27000000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8650_mdss_ver = {
+ .core_major_ver = 10,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8650_cfg = {
+ .mdss_ver = &sm8650_mdss_ver,
+ .caps = &sm8650_dpu_caps,
+ .mdp = &sm8650_mdp,
+ .ctl_count = ARRAY_SIZE(sm8650_ctl),
+ .ctl = sm8650_ctl,
+ .sspp_count = ARRAY_SIZE(sm8650_sspp),
+ .sspp = sm8650_sspp,
+ .mixer_count = ARRAY_SIZE(sm8650_lm),
+ .mixer = sm8650_lm,
+ .dspp_count = ARRAY_SIZE(sm8650_dspp),
+ .dspp = sm8650_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8650_pp),
+ .pingpong = sm8650_pp,
+ .dsc_count = ARRAY_SIZE(sm8650_dsc),
+ .dsc = sm8650_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8650_merge_3d),
+ .merge_3d = sm8650_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8650_wb),
+ .wb = sm8650_wb,
+ .intf_count = ARRAY_SIZE(sm8650_intf),
+ .intf = sm8650_intf,
+ .vbif_count = ARRAY_SIZE(sm8650_vbif),
+ .vbif = sm8650_vbif,
+ .perf = &sm8650_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index aa1867943..1d3e9666c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -10,7 +10,6 @@
static const struct dpu_caps msm8998_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -70,7 +69,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -78,7 +77,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
@@ -86,7 +85,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
@@ -94,7 +93,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -102,7 +101,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1ac,
.features = DMA_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -110,7 +109,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1ac,
.features = DMA_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -118,7 +117,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1ac,
.features = DMA_CURSOR_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -126,7 +125,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1ac,
.features = DMA_CURSOR_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 38ac0c1a1..7a23389a5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sdm845_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -68,7 +67,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
@@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1c8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1c8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1c8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1c8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
new file mode 100644
index 000000000..cbbdaebe3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Richard Acayan. All rights reserved.
+ */
+
+#ifndef _DPU_4_1_SDM670_H
+#define _DPU_4_1_SDM670_H
+
+static const struct dpu_mdp_cfg sdm670_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = BIT(DPU_MDP_AUDIO_SELECT),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_sspp_cfg sdm670_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1c8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_dsc_cfg sdm670_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
+};
+
+static const struct dpu_mdss_version sdm670_mdss_ver = {
+ .core_major_ver = 4,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sdm670_cfg = {
+ .mdss_ver = &sdm670_mdss_ver,
+ .caps = &sdm845_dpu_caps,
+ .mdp = &sdm670_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm670_sspp),
+ .sspp = sdm670_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .dsc_count = ARRAY_SIZE(sdm670_dsc),
+ .dsc = sdm670_dsc,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sdm845_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index c022e5786..145f3d595 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8150_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -77,7 +76,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sm8150_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -85,7 +84,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sm8150_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
@@ -93,7 +92,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sm8150_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
@@ -101,7 +100,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sm8150_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -109,7 +108,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -117,7 +116,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -125,7 +124,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -133,7 +132,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index cb0758f08..9e3bec8bc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sc8180x_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sm8150_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sm8150_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
@@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sm8150_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
@@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sm8150_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index cec7af666..76b2ec0d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -68,8 +68,8 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SM6125_MASK,
- .sblk = &sm6125_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index 9f8068fa0..a57d50b1f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -75,32 +74,32 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
@@ -337,8 +336,8 @@ static const struct dpu_wb_cfg sm8250_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -385,6 +384,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = {
.mdss_ver = &sm8250_mdss_ver,
.caps = &sm8250_dpu_caps,
.mdp = &sm8250_mdp,
+ .cdm = &sc7280_cdm,
.ctl_count = ARRAY_SIZE(sm8250_ctl),
.ctl = sm8250_ctl,
.sspp_count = ARRAY_SIZE(sm8250_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
index 9bfa15e4e..7382ebb6e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sc7180_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x9,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
@@ -52,8 +51,8 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sc7180_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -61,7 +60,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -69,7 +68,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -77,7 +76,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -158,8 +157,8 @@ static const struct dpu_wb_cfg sc7180_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
index 57ce14c18..43f64a005 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm6115_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
@@ -39,8 +38,8 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm6115_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -48,7 +47,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index 62db84bd1..e17a30be7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -11,7 +11,6 @@
static const struct dpu_caps sm6350_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -59,8 +58,8 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sc7180_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -68,7 +67,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
index fb36fba51..3cbb2fe8a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
@@ -39,7 +39,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
.features = VIG_QCM2290_MASK,
- .sblk = &qcm2290_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_noscale,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -47,7 +47,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &qcm2290_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
index 5a3aad364..a06c8634d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h
@@ -11,7 +11,6 @@
static const struct dpu_caps sm6375_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
@@ -40,8 +39,8 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm6115_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -49,7 +48,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index 022b0408c..aced16e35 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8350_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -75,64 +74,64 @@ static const struct dpu_sspp_cfg sm8350_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
@@ -304,8 +303,8 @@ static const struct dpu_wb_cfg sm8350_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
index b9c296e51..2f153e0b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sc7280_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2400,
@@ -58,7 +57,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
.features = VIG_SC7280_MASK_SDMA,
- .sblk = &sc7280_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_3_0_rot_v2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
@@ -66,7 +65,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -74,7 +73,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -82,7 +81,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -170,8 +169,8 @@ static const struct dpu_wb_cfg sc7280_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
@@ -249,6 +248,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = {
.mdss_ver = &sc7280_mdss_ver,
.caps = &sc7280_dpu_caps,
.mdp = &sc7280_mdp,
+ .cdm = &sc7280_cdm,
.ctl_count = ARRAY_SIZE(sc7280_ctl),
.ctl = sc7280_ctl,
.sspp_count = ARRAY_SIZE(sc7280_sspp),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 4c0528794..0d143e390 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sc8280xp_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 11,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -75,32 +74,32 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x2ac,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x2ac,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x2ac,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x2ac,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index 7adc42257..a1779c559 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8450_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -76,32 +75,32 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x32c,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8450_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x32c,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8450_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x32c,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8450_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x32c,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8450_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
@@ -109,7 +108,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x32c,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
@@ -117,7 +116,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x32c,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
@@ -125,7 +124,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x32c,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
@@ -133,7 +132,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x32c,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
@@ -322,8 +321,8 @@ static const struct dpu_wb_cfg sm8450_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 69b80af65..ad48defa1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -10,7 +10,6 @@
static const struct dpu_caps sm8550_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
@@ -67,71 +66,71 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_1,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_2,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_3,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_12", .id = SSPP_DMA4,
.base = 0x2c000, .len = 0x344,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sm8550_dma_sblk_4,
+ .sblk = &dpu_dma_sblk,
.xin_id = 14,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_13", .id = SSPP_DMA5,
.base = 0x2e000, .len = 0x344,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sm8550_dma_sblk_5,
+ .sblk = &dpu_dma_sblk,
.xin_id = 15,
.type = SSPP_TYPE_DMA,
},
@@ -316,8 +315,8 @@ static const struct dpu_wb_cfg sm8550_wb[] = {
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
- .format_list = wb2_formats,
- .num_formats = ARRAY_SIZE(wb2_formats),
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index ef871239a..68fae048a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
&perf->core_clk_rate);
debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0600, entry,
+ debugfs_create_u32("threshold_low", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_low);
- debugfs_create_u32("threshold_high", 0600, entry,
+ debugfs_create_u32("threshold_high", 0400, entry,
(u32 *)&perf->perf_cfg->max_bw_high);
- debugfs_create_u32("min_core_ib", 0600, entry,
+ debugfs_create_u32("min_core_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_core_ib);
- debugfs_create_u32("min_llcc_ib", 0600, entry,
+ debugfs_create_u32("min_llcc_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_llcc_ib);
- debugfs_create_u32("min_dram_ib", 0600, entry,
+ debugfs_create_u32("min_dram_ib", 0400, entry,
(u32 *)&perf->perf_cfg->min_dram_ib);
debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index db501ce1d..88c2e51ab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -51,17 +51,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static void dpu_crtc_destroy(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
- if (!crtc)
- return;
-
- drm_crtc_cleanup(crtc);
- kfree(dpu_crtc);
-}
-
static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -1435,7 +1424,6 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
static const struct drm_crtc_funcs dpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = dpu_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = dpu_crtc_reset,
.atomic_duplicate_state = dpu_crtc_duplicate_state,
@@ -1469,9 +1457,13 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct dpu_crtc *dpu_crtc;
int i, ret;
- dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
- if (!dpu_crtc)
- return ERR_PTR(-ENOMEM);
+ dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base,
+ plane, cursor,
+ &dpu_crtc_funcs,
+ NULL);
+
+ if (IS_ERR(dpu_crtc))
+ return ERR_CAST(dpu_crtc);
crtc = &dpu_crtc->base;
crtc->dev = dev;
@@ -1491,9 +1483,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
dpu_crtc_frame_event_work);
}
- drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
- NULL);
-
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
if (dpu_kms->catalog->dspp_count)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index b26d23b2a..aca689314 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -16,6 +16,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_framebuffer.h>
#include "msm_drv.h"
#include "dpu_kms.h"
@@ -26,6 +27,7 @@
#include "dpu_hw_dspp.h"
#include "dpu_hw_dsc.h"
#include "dpu_hw_merge3d.h"
+#include "dpu_hw_cdm.h"
#include "dpu_formats.h"
#include "dpu_encoder_phys.h"
#include "dpu_crtc.h"
@@ -142,10 +144,6 @@ enum dpu_enc_rc_states {
* to track crtc in the disable() hook which is called
* _after_ encoder_mask is cleared.
* @connector: If a mode is set, cached pointer to the active connector
- * @crtc_kickoff_cb: Callback into CRTC that will flush & start
- * all CTL paths
- * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
- * @debugfs_root: Debug file system root file node
* @enc_lock: Lock around physical encoder
* create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
@@ -154,6 +152,8 @@ enum dpu_enc_rc_states {
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
* @frame_done_timeout_ms: frame done timeout in ms
+ * @frame_done_timeout_cnt: atomic counter tracking the number of frame
+ * done timeouts
* @frame_done_timer: watchdog timer for frame done event
* @disp_info: local copy of msm_display_info struct
* @idle_pc_supported: indicate if idle power collaps is supported
@@ -187,13 +187,13 @@ struct dpu_encoder_virt {
struct drm_crtc *crtc;
struct drm_connector *connector;
- struct dentry *debugfs_root;
struct mutex enc_lock;
DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
atomic_t frame_done_timeout_ms;
+ atomic_t frame_done_timeout_cnt;
struct timer_list frame_done_timer;
struct msm_display_info disp_info;
@@ -449,41 +449,6 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
return linecount;
}
-static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
-{
- struct dpu_encoder_virt *dpu_enc = NULL;
- int i = 0;
-
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
-
- mutex_lock(&dpu_enc->enc_lock);
-
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys->ops.destroy) {
- phys->ops.destroy(phys);
- --dpu_enc->num_phys_encs;
- dpu_enc->phys_encs[i] = NULL;
- }
- }
-
- if (dpu_enc->num_phys_encs)
- DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
- dpu_enc->num_phys_encs);
- dpu_enc->num_phys_encs = 0;
- mutex_unlock(&dpu_enc->enc_lock);
-
- drm_encoder_cleanup(drm_enc);
- mutex_destroy(&dpu_enc->enc_lock);
-}
-
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface)
@@ -624,6 +589,7 @@ static int dpu_encoder_virt_atomic_check(
struct drm_display_mode *adj_mode;
struct msm_display_topology topology;
struct dpu_global_state *global_state;
+ struct drm_framebuffer *fb;
struct drm_dsc_config *dsc;
int i = 0;
int ret = 0;
@@ -665,6 +631,22 @@ static int dpu_encoder_virt_atomic_check(
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
/*
+ * Use CDM only for writeback at the moment as other interfaces cannot handle it.
+ * if writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
+ * earlier.
+ */
+ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ fb = conn_state->writeback_job->fb;
+
+ if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb))))
+ topology.needs_cdm = true;
+ if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
+ crtc_state->mode_changed = true;
+ else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
+ crtc_state->mode_changed = true;
+ }
+
+ /*
* Release and Allocate resources on every modeset
* Dont allocate when active is false.
*/
@@ -1104,6 +1086,15 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->dsc_mask = dsc_mask;
+ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
+ struct dpu_hw_blk *hw_cdm = NULL;
+
+ dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CDM,
+ &hw_cdm, 1);
+ dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
+ }
+
cstate = to_dpu_crtc_state(crtc_state);
for (i = 0; i < num_lm; i++) {
@@ -1214,6 +1205,8 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
+ atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
+
if (disp_info->intf_type == INTF_DP)
dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
@@ -2092,6 +2085,15 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_pp->merge_3d->idx);
}
+ if (phys_enc->hw_cdm) {
+ if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
+ phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
+ PINGPONG_NONE);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
+ phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
+ phys_enc->hw_cdm->idx);
+ }
+
if (dpu_enc->dsc) {
dpu_encoder_unprep_dsc(dpu_enc);
dpu_enc->dsc = NULL;
@@ -2120,18 +2122,20 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
- struct dpu_encoder_virt *dpu_enc = s->private;
+ struct drm_encoder *drm_enc = s->private;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
int i;
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
+ seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d",
phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
atomic_read(&phys->vsync_cnt),
- atomic_read(&phys->underrun_cnt));
+ atomic_read(&phys->underrun_cnt),
+ atomic_read(&dpu_enc->frame_done_timeout_cnt));
seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
}
@@ -2142,49 +2146,18 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
-static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root)
{
- struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
-
- char name[12];
-
- if (!drm_enc->dev) {
- DPU_ERROR("invalid encoder or kms\n");
- return -EINVAL;
- }
-
- snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id);
-
- /* create overall sub-directory for the encoder */
- dpu_enc->debugfs_root = debugfs_create_dir(name,
- drm_enc->dev->primary->debugfs_root);
-
/* don't error check these */
debugfs_create_file("status", 0600,
- dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
-
- return 0;
+ root, drm_enc, &_dpu_encoder_status_fops);
}
#else
-static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
-{
- return 0;
-}
+#define dpu_encoder_debugfs_init NULL
#endif
-static int dpu_encoder_late_register(struct drm_encoder *encoder)
-{
- return _dpu_encoder_init_debugfs(encoder);
-}
-
-static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
-{
- struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
-
- debugfs_remove_recursive(dpu_enc->debugfs_root);
-}
-
static int dpu_encoder_virt_add_phys_encs(
+ struct drm_device *dev,
struct msm_display_info *disp_info,
struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params *params)
@@ -2206,7 +2179,7 @@ static int dpu_encoder_virt_add_phys_encs(
if (disp_info->intf_type == INTF_WB) {
- enc = dpu_encoder_phys_wb_init(params);
+ enc = dpu_encoder_phys_wb_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
@@ -2217,7 +2190,7 @@ static int dpu_encoder_virt_add_phys_encs(
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
} else if (disp_info->is_cmd_mode) {
- enc = dpu_encoder_phys_cmd_init(params);
+ enc = dpu_encoder_phys_cmd_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
@@ -2228,7 +2201,7 @@ static int dpu_encoder_virt_add_phys_encs(
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
} else {
- enc = dpu_encoder_phys_vid_init(params);
+ enc = dpu_encoder_phys_vid_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
@@ -2317,7 +2290,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
break;
}
- ret = dpu_encoder_virt_add_phys_encs(disp_info,
+ ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
dpu_enc, &phys_params);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
@@ -2353,6 +2326,9 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
+ if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1)
+ msm_disp_snapshot_state(drm_enc->dev);
+
event = DPU_ENCODER_FRAME_EVENT_ERROR;
trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
@@ -2366,9 +2342,7 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
- .destroy = dpu_encoder_destroy,
- .late_register = dpu_encoder_late_register,
- .early_unregister = dpu_encoder_early_unregister,
+ .debugfs_init = dpu_encoder_debugfs_init,
};
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
@@ -2377,20 +2351,13 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
- struct drm_encoder *drm_enc = NULL;
- struct dpu_encoder_virt *dpu_enc = NULL;
- int ret = 0;
-
- dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
- if (!dpu_enc)
- return ERR_PTR(-ENOMEM);
+ struct dpu_encoder_virt *dpu_enc;
+ int ret;
- ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
- drm_enc_mode, NULL);
- if (ret) {
- devm_kfree(dev->dev, dpu_enc);
- return ERR_PTR(ret);
- }
+ dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base,
+ &dpu_encoder_funcs, drm_enc_mode, NULL);
+ if (IS_ERR(dpu_enc))
+ return ERR_CAST(dpu_enc);
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
@@ -2400,10 +2367,13 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
mutex_init(&dpu_enc->rc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
- if (ret)
- goto fail;
+ if (ret) {
+ DPU_ERROR("failed to setup encoder\n");
+ return ERR_PTR(-ENOMEM);
+ }
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
@@ -2416,13 +2386,6 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
DPU_DEBUG_ENC(dpu_enc, "created\n");
return &dpu_enc->base;
-
-fail:
- DPU_ERROR("failed to create encoder\n");
- if (drm_enc)
- dpu_encoder_destroy(drm_enc);
-
- return ERR_PTR(ret);
}
int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
@@ -2449,9 +2412,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
case MSM_ENC_TX_COMPLETE:
fn_wait = phys->ops.wait_for_tx_complete;
break;
- case MSM_ENC_VBLANK:
- fn_wait = phys->ops.wait_for_vblank;
- break;
default:
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 96bda57b6..993f26343 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -14,8 +14,10 @@
#include "dpu_hw_intf.h"
#include "dpu_hw_wb.h"
#include "dpu_hw_pingpong.h"
+#include "dpu_hw_cdm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
+#include "dpu_hw_util.h"
#include "dpu_encoder.h"
#include "dpu_crtc.h"
@@ -72,7 +74,6 @@ struct dpu_encoder_phys;
* @enable: DRM Call. Enable a DRM mode.
* @disable: DRM Call. Disable mode.
* @atomic_check: DRM Call. Atomic check new DRM state.
- * @destroy: DRM Call. Destroy and release resources.
* @control_vblank_irq Register/Deregister for VBLANK IRQ
* @wait_for_commit_done: Wait for hardware to have flushed the
* current pending frames to hardware
@@ -102,11 +103,9 @@ struct dpu_encoder_phys_ops {
int (*atomic_check)(struct dpu_encoder_phys *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
- void (*destroy)(struct dpu_encoder_phys *encoder);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
- int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
@@ -153,6 +152,7 @@ enum dpu_intr_idx {
* @hw_pp: Hardware interface to the ping pong registers
* @hw_intf: Hardware interface to the intf registers
* @hw_wb: Hardware interface to the wb registers
+ * @hw_cdm: Hardware interface to the CDM registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @vblank_ctl_lock: Vblank ctl mutex lock to protect vblank_refcount
@@ -182,6 +182,7 @@ struct dpu_encoder_phys {
struct dpu_hw_pingpong *hw_pp;
struct dpu_hw_intf *hw_intf;
struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_cdm *hw_cdm;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
struct mutex vblank_ctl_lock;
@@ -212,6 +213,7 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
* @wbirq_refcount: Reference count of writeback interrupt
* @wb_done_timeout_cnt: number of wb done irq timeout errors
* @wb_cfg: writeback block config to store fb related details
+ * @cdm_cfg: cdm block config needed to store writeback block's CDM configuration
* @wb_conn: backpointer to writeback connector
* @wb_job: backpointer to current writeback job
* @dest: dpu buffer layout for current writeback output buffer
@@ -221,6 +223,7 @@ struct dpu_encoder_phys_wb {
atomic_t wbirq_refcount;
int wb_done_timeout_cnt;
struct dpu_hw_wb_cfg wb_cfg;
+ struct dpu_hw_cdm_cfg cdm_cfg;
struct drm_writeback_connector *wb_conn;
struct drm_writeback_job *wb_job;
struct dpu_hw_fmt_layout dest;
@@ -283,22 +286,24 @@ struct dpu_encoder_wait_info {
* @p: Pointer to init params structure
* Return: Error code or newly allocated encoder
*/
-struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @dev: Corresponding device for devres management
* @p: Pointer to init params structure
* Return: Error code or newly allocated encoder
*/
-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @dev: Corresponding device for devres management
* @init: Pointer to init info structure with initialization params
*/
-struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 2d788c5e2..a301e2833 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -13,6 +13,8 @@
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
+#include <drm/drm_managed.h>
+
#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
@@ -567,14 +569,6 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
phys_enc->enable_state = DPU_ENC_DISABLED;
}
-static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_encoder_phys_cmd *cmd_enc =
- to_dpu_encoder_phys_cmd(phys_enc);
-
- kfree(cmd_enc);
-}
-
static void dpu_encoder_phys_cmd_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
@@ -690,33 +684,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
}
-static int dpu_encoder_phys_cmd_wait_for_vblank(
- struct dpu_encoder_phys *phys_enc)
-{
- int rc = 0;
- struct dpu_encoder_phys_cmd *cmd_enc;
- struct dpu_encoder_wait_info wait_info;
-
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
-
- /* only required for master controller */
- if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- return rc;
-
- wait_info.wq = &cmd_enc->pending_vblank_wq;
- wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
- wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
-
- atomic_inc(&cmd_enc->pending_vblank_cnt);
-
- rc = dpu_encoder_helper_wait_for_irq(phys_enc,
- phys_enc->irq[INTR_IDX_RDPTR],
- dpu_encoder_phys_cmd_te_rd_ptr_irq,
- &wait_info);
-
- return rc;
-}
-
static void dpu_encoder_phys_cmd_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
@@ -740,12 +707,10 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable;
- ops->destroy = dpu_encoder_phys_cmd_destroy;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
- ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
@@ -755,7 +720,7 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
}
-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
@@ -763,7 +728,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
DPU_DEBUG("intf\n");
- cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
if (!cmd_enc) {
DPU_ERROR("failed to allocate\n");
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index f66e0f125..f02411b06 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -11,6 +11,8 @@
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
+#include <drm/drm_managed.h>
+
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->parent ? \
(e)->parent->base.id : -1, \
@@ -450,13 +452,7 @@ skip_flush:
phys_enc->enable_state = DPU_ENC_ENABLING;
}
-static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
-{
- DPU_DEBUG_VIDENC(phys_enc, "\n");
- kfree(phys_enc);
-}
-
-static int dpu_encoder_phys_vid_wait_for_vblank(
+static int dpu_encoder_phys_vid_wait_for_tx_complete(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_wait_info wait_info;
@@ -570,7 +566,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
* scanout buffer) don't latch properly..
*/
if (dpu_encoder_phys_vid_is_master(phys_enc)) {
- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
@@ -590,7 +586,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
@@ -693,11 +689,9 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable;
- ops->destroy = dpu_encoder_phys_vid_destroy;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
- ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
- ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
ops->irq_control = dpu_encoder_phys_vid_irq_control;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
@@ -706,7 +700,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
}
-struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
@@ -716,7 +710,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
return ERR_PTR(-EINVAL);
}
- phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL);
if (!phys_enc) {
DPU_ERROR("failed to create encoder due to memory allocation error\n");
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 0b6a761d6..4cd2d9e31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include "dpu_encoder_phys.h"
#include "dpu_formats.h"
@@ -206,13 +207,14 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
}
/**
- * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * dpu_encoder_phys_wb_setup_ctl - setup wb pipeline for ctl path
* @phys_enc:Pointer to physical encoder
*/
-static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
+static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_wb *hw_wb;
struct dpu_hw_ctl *ctl;
+ struct dpu_hw_cdm *hw_cdm;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
@@ -221,6 +223,7 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
hw_wb = phys_enc->hw_wb;
ctl = phys_enc->hw_ctl;
+ hw_cdm = phys_enc->hw_cdm;
if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
(phys_enc->hw_ctl &&
@@ -237,6 +240,9 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
if (mode_3d && hw_pp && hw_pp->merge_3d)
intf_cfg.merge_3d = hw_pp->merge_3d->idx;
+ if (hw_cdm)
+ intf_cfg.cdm = hw_cdm->idx;
+
if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
mode_3d);
@@ -259,6 +265,96 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
}
/**
+ * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
+ * This API does not handle DPU_CHROMA_H1V2.
+ * @phys_enc:Pointer to physical encoder
+ */
+static void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_cdm *hw_cdm;
+ struct dpu_hw_cdm_cfg *cdm_cfg;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_encoder_phys_wb *wb_enc;
+ const struct msm_format *format;
+ const struct dpu_format *dpu_fmt;
+ struct drm_writeback_job *wb_job;
+ int ret;
+
+ if (!phys_enc)
+ return;
+
+ wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ cdm_cfg = &wb_enc->cdm_cfg;
+ hw_pp = phys_enc->hw_pp;
+ hw_cdm = phys_enc->hw_cdm;
+ wb_job = wb_enc->wb_job;
+
+ format = msm_framebuffer_format(wb_enc->wb_job->fb);
+ dpu_fmt = dpu_get_dpu_format_ext(format->pixel_format, wb_job->fb->modifier);
+
+ if (!hw_cdm)
+ return;
+
+ if (!DPU_FORMAT_IS_YUV(dpu_fmt)) {
+ DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent),
+ dpu_fmt->base.pixel_format);
+ if (hw_cdm->ops.bind_pingpong_blk)
+ hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE);
+
+ return;
+ }
+
+ memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg));
+
+ cdm_cfg->output_width = wb_job->fb->width;
+ cdm_cfg->output_height = wb_job->fb->height;
+ cdm_cfg->output_fmt = dpu_fmt;
+ cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
+ cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ?
+ CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
+ cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l;
+
+ /* enable 10 bit logic */
+ switch (cdm_cfg->output_fmt->chroma_sample) {
+ case DPU_CHROMA_RGB:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case DPU_CHROMA_H2V1:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case DPU_CHROMA_420:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
+ break;
+ case DPU_CHROMA_H1V2:
+ default:
+ DPU_ERROR("[enc:%d] unsupported chroma sampling type\n",
+ DRMID(phys_enc->parent));
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ }
+
+ DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+ DRMID(phys_enc->parent), cdm_cfg->output_width,
+ cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format,
+ cdm_cfg->output_type, cdm_cfg->output_bit_depth,
+ cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type);
+
+ if (hw_cdm->ops.enable) {
+ cdm_cfg->pp_id = hw_pp->idx;
+ ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n",
+ DRMID(phys_enc->parent), ret);
+ return;
+ }
+ }
+}
+
+/**
* dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
* @phys_enc: Pointer to physical encoder
* @crtc_state: Pointer to CRTC atomic state
@@ -307,7 +403,7 @@ static int dpu_encoder_phys_wb_atomic_check(
return -EINVAL;
}
- return 0;
+ return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
}
@@ -320,6 +416,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_wb *hw_wb;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_cdm *hw_cdm;
u32 pending_flush = 0;
if (!phys_enc)
@@ -328,6 +425,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
hw_wb = phys_enc->hw_wb;
hw_pp = phys_enc->hw_pp;
hw_ctl = phys_enc->hw_ctl;
+ hw_cdm = phys_enc->hw_cdm;
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
@@ -343,6 +441,9 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
hw_pp->merge_3d->idx);
+ if (hw_cdm && hw_ctl->ops.update_pending_flush_cdm)
+ hw_ctl->ops.update_pending_flush_cdm(hw_ctl, hw_cdm->idx);
+
if (hw_ctl->ops.get_pending_flush)
pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl);
@@ -374,8 +475,9 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
- dpu_encoder_phys_wb_setup_cdp(phys_enc);
+ dpu_encoder_helper_phys_setup_cdm(phys_enc);
+ dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
/**
@@ -580,20 +682,6 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
phys_enc->enable_state = DPU_ENC_DISABLED;
}
-/**
- * dpu_encoder_phys_wb_destroy - destroy writeback encoder
- * @phys_enc: Pointer to physical encoder
- */
-static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
-{
- if (!phys_enc)
- return;
-
- DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
-
- kfree(phys_enc);
-}
-
static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job)
{
@@ -689,7 +777,6 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable;
- ops->destroy = dpu_encoder_phys_wb_destroy;
ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
@@ -705,9 +792,10 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @dev: Corresponding device for devres management
* @p: Pointer to init info structure with initialization params
*/
-struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
@@ -720,7 +808,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
return ERR_PTR(-EINVAL);
}
- wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ wb_enc = drmm_kzalloc(dev, sizeof(*wb_enc), GFP_KERNEL);
if (!wb_enc) {
DPU_ERROR("failed to allocate wb phys_enc enc\n");
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 7056c08b9..54e871740 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -22,23 +22,14 @@
BIT(DPU_SSPP_CSC_10BIT))
#define VIG_MSM8998_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
(VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-#define VIG_SC7180_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
-
-#define VIG_SM6125_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
-
-#define VIG_SC7180_MASK_SDMA \
- (VIG_SC7180_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
#define DMA_MSM8998_MASK \
@@ -47,7 +38,7 @@
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
- (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+ (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
#define VIG_SC7280_MASK_SDMA \
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
@@ -211,7 +202,41 @@ static const u32 rotation_v2_formats[] = {
/* TODO add formats after validation */
};
-static const uint32_t wb2_formats[] = {
+static const u32 wb2_formats_rgb[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_XBGR4444,
+};
+
+static const u32 wb2_formats_rgb_yuv[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888,
@@ -243,6 +268,7 @@ static const uint32_t wb2_formats[] = {
DRM_FORMAT_BGRA4444,
DRM_FORMAT_BGRX4444,
DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_NV12,
};
/*************************************************************
@@ -252,17 +278,14 @@ static const uint32_t wb2_formats[] = {
#define SSPP_SCALER_VER(maj, min) (((maj) << 16) | (min))
/* SSPP common configuration */
-#define _VIG_SBLK(sdma_pri, qseed_ver, scaler_ver) \
+#define _VIG_SBLK(scaler_ver) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
- .smart_dma_priority = sdma_pri, \
.scaler_blk = {.name = "scaler", \
- .id = qseed_ver, \
.version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
- .id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
@@ -271,17 +294,14 @@ static const uint32_t wb2_formats[] = {
.rotation_cfg = NULL, \
}
-#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, scaler_ver, rot_cfg) \
+#define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
- .smart_dma_priority = sdma_pri, \
.scaler_blk = {.name = "scaler", \
- .id = qseed_ver, \
.version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
- .id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
@@ -290,138 +310,64 @@ static const uint32_t wb2_formats[] = {
.rotation_cfg = rot_cfg, \
}
-#define _DMA_SBLK(sdma_pri) \
+#define _VIG_SBLK_NOSCALE() \
+ { \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+#define _DMA_SBLK() \
{ \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
- .smart_dma_priority = sdma_pri, \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 2));
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 2));
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 2));
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 2));
-
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
.rot_maxheight = 1088,
.rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
.rot_format_list = rotation_v2_formats,
};
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 3));
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 3));
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 3));
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 3));
-
-static const struct dpu_sspp_sub_blks sm8150_vig_sblk_0 =
- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 4));
-static const struct dpu_sspp_sub_blks sm8150_vig_sblk_1 =
- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 4));
-static const struct dpu_sspp_sub_blks sm8150_vig_sblk_2 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 4));
-static const struct dpu_sspp_sub_blks sm8150_vig_sblk_3 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3,
- SSPP_SCALER_VER(1, 4));
-
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
-
-static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
- _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 0));
-
-static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
- _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 0),
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale =
+ _VIG_SBLK_NOSCALE();
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_2 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 2));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_3 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 3));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 4));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_2_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(2, 4));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 0));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0_rot_v2 =
+ _VIG_SBLK_ROT(SSPP_SCALER_VER(3, 0),
&dpu_rot_sc7280_cfg_v2);
-static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
- _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 0));
-
-static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
- _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE,
- SSPP_SCALER_VER(2, 4));
-
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 0));
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 0));
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 0));
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 0));
-
-static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 =
- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 1));
-static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 =
- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 1));
-static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 1));
-static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 1));
-
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 2));
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 2));
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
- _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 2));
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
- _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4,
- SSPP_SCALER_VER(3, 2));
-static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
-static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
-
-#define _VIG_SBLK_NOSCALE(sdma_pri) \
- { \
- .maxdwnscale = SSPP_UNITY_SCALE, \
- .maxupscale = SSPP_UNITY_SCALE, \
- .smart_dma_priority = sdma_pri, \
- .format_list = plane_formats_yuv, \
- .num_formats = ARRAY_SIZE(plane_formats_yuv), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
- }
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_1 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 1));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 2));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 3));
-static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE(2);
-static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK(1);
+static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
/*************************************************************
* MIXER sub blocks config
@@ -473,12 +419,12 @@ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
* DSPP sub blocks config
*************************************************************/
static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
- .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ .pcc = {.name = "pcc", .base = 0x1700,
.len = 0x90, .version = 0x10007},
};
static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
- .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ .pcc = {.name = "pcc", .base = 0x1700,
.len = 0x90, .version = 0x40000},
};
@@ -486,19 +432,19 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
* PINGPONG sub blocks config
*************************************************************/
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
- .te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
.version = 0x1},
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0,
+ .dither = {.name = "dither", .base = 0xe0,
.len = 0x20, .version = 0x20000},
};
@@ -516,6 +462,16 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = {
};
/*************************************************************
+ * CDM block config
+ *************************************************************/
+static const struct dpu_cdm_cfg sc7280_cdm = {
+ .name = "cdm_0",
+ .id = CDM_0,
+ .len = 0x228,
+ .base = 0x79200,
+};
+
+/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
@@ -523,6 +479,7 @@ static const u32 msm8998_rt_pri_lvl[] = {1, 2, 2, 2};
static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1};
static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+static const u32 sm8650_rt_pri_lvl[] = {4, 4, 5, 5, 5, 5, 5, 6};
static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
{
@@ -609,6 +566,26 @@ static const struct dpu_vbif_cfg sm8550_vbif[] = {
},
};
+static const struct dpu_vbif_cfg sm8650_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1074,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x40,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sm8650_rt_pri_lvl),
+ .priority_lvl = sm8650_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 16,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
/*************************************************************
* PERF data config
*************************************************************/
@@ -705,6 +682,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_3_0_msm8998.h"
#include "catalog/dpu_4_0_sdm845.h"
+#include "catalog/dpu_4_1_sdm670.h"
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
@@ -724,3 +702,5 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_8_1_sm8450.h"
#include "catalog/dpu_9_0_sm8550.h"
+
+#include "catalog/dpu_10_0_sm8650.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 624450753..ba82ef456 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -51,9 +51,7 @@ enum {
/**
* SSPP sub-blocks/features
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
- * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
- * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
- * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4)
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
@@ -71,9 +69,7 @@ enum {
*/
enum {
DPU_SSPP_SCALER_QSEED2 = 0x1,
- DPU_SSPP_SCALER_QSEED3,
- DPU_SSPP_SCALER_QSEED3LITE,
- DPU_SSPP_SCALER_QSEED4,
+ DPU_SSPP_SCALER_QSEED3_COMPATIBLE,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
DPU_SSPP_CSC_10BIT,
@@ -249,50 +245,50 @@ enum {
unsigned long features
/**
- * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
- * @name: string name for debug purposes
- * @id: enum identifying this sub-block
- * @base: offset of this sub-block relative to the block
- * offset
- * @len register block length of this sub-block
- */
-#define DPU_HW_SUBBLK_INFO \
- char name[DPU_HW_BLK_NAME_LEN]; \
- u32 id; \
- u32 base; \
- u32 len
-
-/**
* struct dpu_scaler_blk: Scaler information
- * @info: HW register and features supported by this sub-blk
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
* @version: qseed block revision, on QSEED3+ platforms this is the value of
* scaler_blk.base + QSEED3_HW_VERSION registers.
*/
struct dpu_scaler_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
u32 version;
};
struct dpu_csc_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
};
/**
* struct dpu_pp_blk : Pixel processing sub-blk information
- * @info: HW register and features supported by this sub-blk
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
* @version: HW Algorithm version
*/
struct dpu_pp_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
u32 version;
};
/**
* struct dpu_dsc_blk - DSC Encoder sub-blk information
- * @info: HW register and features supported by this sub-blk
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
*/
struct dpu_dsc_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
};
/**
@@ -342,7 +338,6 @@ struct dpu_rotation_cfg {
* @max_mixer_width max layer mixer line width support.
* @max_mixer_blendstages max layer mixer blend stages or
* supported z order
- * @qseed_type qseed2 or qseed3 support.
* @has_src_split source split feature status
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
@@ -355,7 +350,6 @@ struct dpu_rotation_cfg {
struct dpu_caps {
u32 max_mixer_width;
u32 max_mixer_blendstages;
- u32 qseed_type;
bool has_src_split;
bool has_dim_layer;
bool has_idle_pc;
@@ -372,7 +366,6 @@ struct dpu_caps {
* common: Pointer to common configurations shared by sub blocks
* @maxdwnscale: max downscale ratio supported(without DECIMATION)
* @maxupscale: maxupscale ratio supported
- * @smart_dma_priority: hw priority of rect1 of multirect pipe
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
* @qseed_ver: qseed version
* @scaler_blk:
@@ -386,7 +379,6 @@ struct dpu_caps {
struct dpu_sspp_sub_blks {
u32 maxdwnscale;
u32 maxupscale;
- u32 smart_dma_priority;
u32 max_per_pipe_bw;
u32 qseed_ver;
struct dpu_scaler_blk scaler_blk;
@@ -691,6 +683,17 @@ struct dpu_vbif_cfg {
};
/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @name string name for debug purposes
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_cdm_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
* Define CDP use cases
* @DPU_PERF_CDP_UDAGE_RT: real-time use cases
* @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
@@ -813,6 +816,8 @@ struct dpu_mdss_cfg {
u32 wb_count;
const struct dpu_wb_cfg *wb;
+ const struct dpu_cdm_cfg *cdm;
+
u32 ad_count;
u32 dspp_count;
@@ -828,6 +833,7 @@ struct dpu_mdss_cfg {
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
+extern const struct dpu_mdss_cfg dpu_sdm670_cfg;
extern const struct dpu_mdss_cfg dpu_sm8150_cfg;
extern const struct dpu_mdss_cfg dpu_sc8180x_cfg;
extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
@@ -842,5 +848,6 @@ extern const struct dpu_mdss_cfg dpu_sc7280_cfg;
extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 000000000..9016b3ade
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+
+#include <drm/drm_managed.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+#define CDM_MUX 0x224
+
+/* CDM CDWN2 sub-block bit definitions */
+#define CDM_CDWN2_OP_MODE_EN BIT(0)
+#define CDM_CDWN2_OP_MODE_ENABLE_H BIT(1)
+#define CDM_CDWN2_OP_MODE_ENABLE_V BIT(2)
+#define CDM_CDWN2_OP_MODE_BITS_OUT_8BIT BIT(7)
+#define CDM_CDWN2_V_PIXEL_METHOD_MASK GENMASK(6, 5)
+#define CDM_CDWN2_H_PIXEL_METHOD_MASK GENMASK(4, 3)
+
+/* CDM CSC10 sub-block bit definitions */
+#define CDM_CSC10_OP_MODE_EN BIT(0)
+#define CDM_CSC10_OP_MODE_SRC_FMT_YUV BIT(1)
+#define CDM_CSC10_OP_MODE_DST_FMT_YUV BIT(2)
+
+/* CDM HDMI pack sub-block bit definitions */
+#define CDM_HDMI_PACK_OP_MODE_EN BIT(0)
+
+/*
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/*
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/*
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/*
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode;
+ u32 out_size;
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ opmode = 0;
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ opmode = CDM_CDWN2_OP_MODE_ENABLE_H |
+ FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_PIXEL_DROP);
+ break;
+ case CDM_CDWN_AVG:
+ opmode = CDM_CDWN2_OP_MODE_ENABLE_H |
+ FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_AVG);
+ break;
+ case CDM_CDWN_COSITE:
+ opmode = CDM_CDWN2_OP_MODE_ENABLE_H |
+ FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_COSITE);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ opmode = CDM_CDWN2_OP_MODE_ENABLE_H |
+ FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, CDM_CDWN2_METHOD_OFFSITE);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ DPU_ERROR("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* if its only Horizontal downsample, we dont need to do anything here */
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ opmode |= CDM_CDWN2_OP_MODE_ENABLE_V |
+ FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_PIXEL_DROP);
+ break;
+ case CDM_CDWN_AVG:
+ opmode |= CDM_CDWN2_OP_MODE_ENABLE_V |
+ FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_AVG);
+ break;
+ case CDM_CDWN_COSITE:
+ opmode |= CDM_CDWN2_OP_MODE_ENABLE_V |
+ FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_COSITE);
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ opmode |= CDM_CDWN2_OP_MODE_ENABLE_V |
+ FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK,
+ CDM_CDWN2_METHOD_OFFSITE);
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->output_bit_depth != CDM_CDWN_OUTPUT_10BIT)
+ opmode |= CDM_CDWN2_OP_MODE_BITS_OUT_8BIT;
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= CDM_CDWN2_OP_MODE_EN; /* EN CDWN module */
+ else
+ opmode &= ~CDM_CDWN2_OP_MODE_EN;
+
+ out_size = (cfg->output_width & 0xFFFF) | ((cfg->output_height & 0xFFFF) << 16);
+ DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT, ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt;
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!ctx || !cdm)
+ return -EINVAL;
+
+ fmt = cdm->output_fmt;
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, cdm->csc_cfg, true);
+ dpu_hw_cdm_setup_cdwn(ctx, cdm);
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample == DPU_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = CDM_HDMI_PACK_OP_MODE_EN;
+ opmode |= (fmt->chroma_sample << 1);
+ }
+
+ csc |= CDM_CSC10_OP_MODE_DST_FMT_YUV;
+ csc &= ~CDM_CSC10_OP_MODE_SRC_FMT_YUV;
+ csc |= CDM_CSC10_OP_MODE_EN;
+
+ if (ctx && ctx->ops.bind_pingpong_blk)
+ ctx->ops.bind_pingpong_blk(ctx, cdm->pp_id);
+
+ DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int mux_cfg;
+
+ c = &ctx->hw;
+
+ mux_cfg = DPU_REG_READ(c, CDM_MUX);
+ mux_cfg &= ~0xf;
+
+ if (pp)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, CDM_MUX, mux_cfg);
+}
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev,
+ const struct dpu_cdm_cfg *cfg, void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
+{
+ struct dpu_hw_cdm *c;
+
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_CDM;
+
+ /* Assign ops */
+ c->idx = cfg->id;
+ c->caps = cfg;
+
+ c->ops.enable = dpu_hw_cdm_enable;
+ if (mdss_rev->core_major_ver >= 5)
+ c->ops.bind_pingpong_blk = dpu_hw_cdm_bind_pingpong_blk;
+
+ return c;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 000000000..348424df8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+
+struct dpu_hw_cdm;
+
+/**
+ * struct dpu_hw_cdm_cfg : current configuration of CDM block
+ *
+ * @output_width: output ROI width of CDM block
+ * @output_height: output ROI height of CDM block
+ * @output_bit_depth: output bit-depth of CDM block
+ * @h_cdwn_type: downsample type used for horizontal pixels
+ * @v_cdwn_type: downsample type used for vertical pixels
+ * @output_fmt: handle to dpu_format of CDM block
+ * @csc_cfg: handle to CSC matrix programmed for CDM block
+ * @output_type: interface to which CDM is paired (HDMI/WB)
+ * @pp_id: ping-pong block to which CDM is bound to
+ */
+struct dpu_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct dpu_format *output_fmt;
+ const struct dpu_csc_cfg *csc_cfg;
+ u32 output_type;
+ int pp_id;
+};
+
+/*
+ * These values are used indicate which type of downsample is used
+ * in the horizontal/vertical direction for the CDM block.
+ */
+enum dpu_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+/*
+ * CDM block can be paired with WB or HDMI block. These values match
+ * the input with which the CDM block is paired.
+ */
+enum dpu_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+/*
+ * CDM block can give an 8-bit or 10-bit output. These values
+ * are used to indicate the output bit depth of CDM block
+ */
+enum dpu_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/*
+ * CDM block can downsample using different methods. These values
+ * are used to indicate the downsample method which can be used
+ * either in the horizontal or vertical direction.
+ */
+enum dpu_hw_cdwn_op_mode_method_h_v {
+ CDM_CDWN2_METHOD_PIXEL_DROP,
+ CDM_CDWN2_METHOD_AVG,
+ CDM_CDWN2_METHOD_COSITE,
+ CDM_CDWN2_METHOD_OFFSITE
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @bind_pingpong_blk: enable/disable the connection with pingpong which
+ * will feed pixels to this cdm
+ */
+struct dpu_hw_cdm_ops {
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct dpu_hw_cdm *cdm, struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable/disable the connection with pingpong
+ * @cdm Pointer to chroma down context
+ * @pp pingpong block id.
+ */
+ void (*bind_pingpong_blk)(struct dpu_hw_cdm *cdm, const enum dpu_pingpong pp);
+};
+
+/**
+ * struct dpu_hw_cdm - cdm description
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: CDM index
+ * @caps: Pointer to cdm_cfg
+ * @ops: handle to operations possible for this CDM
+ */
+struct dpu_hw_cdm {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct dpu_cdm_cfg *caps;
+ enum dpu_cdm idx;
+
+ /* ops */
+ struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @dev: DRM device handle
+ * @cdm: CDM catalog entry for which driver object is required
+ * @addr : mapped register io address of MDSS
+ * @mdss_rev: mdss hw core revision
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev,
+ const struct dpu_cdm_cfg *cdm, void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
+
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index e7b680a15..e76565c3e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -32,11 +32,13 @@
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_WB_ACTIVE 0x0EC
#define CTL_INTF_ACTIVE 0x0F4
+#define CTL_CDM_ACTIVE 0x0F8
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_FLUSH 0x104
#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
+#define CTL_CDM_FLUSH 0x114
#define CTL_INTF_MASTER 0x134
#define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4))
@@ -46,6 +48,7 @@
#define DPU_REG_RESET_TIMEOUT_US 2000
#define MERGE_3D_IDX 23
#define DSC_IDX 22
+#define CDM_IDX 26
#define INTF_IDX 31
#define WB_IDX 16
#define DSPP_IDX 29 /* From DPU hw rev 7.x.x */
@@ -107,6 +110,7 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
ctx->pending_wb_flush_mask = 0;
ctx->pending_merge_3d_flush_mask = 0;
ctx->pending_dsc_flush_mask = 0;
+ ctx->pending_cdm_flush_mask = 0;
memset(ctx->pending_dspp_flush_mask, 0,
sizeof(ctx->pending_dspp_flush_mask));
@@ -151,6 +155,10 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
ctx->pending_dsc_flush_mask);
+ if (ctx->pending_flush_mask & BIT(CDM_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
+ ctx->pending_cdm_flush_mask);
+
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
@@ -282,6 +290,13 @@ static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
}
}
+static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
+{
+ /* update pending flush only if CDM_0 is flushed */
+ if (cdm_num == CDM_0)
+ ctx->pending_flush_mask |= BIT(CDM_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
enum dpu_wb wb)
{
@@ -310,6 +325,12 @@ static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(DSC_IDX);
}
+static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
+{
+ ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
+ ctx->pending_flush_mask |= BIT(CDM_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp, u32 dspp_sub_blk)
{
@@ -543,6 +564,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->dsc)
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+
+ if (cfg->cdm)
+ DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
@@ -586,6 +610,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 wb_active = 0;
u32 merge3d_active = 0;
u32 dsc_active;
+ u32 cdm_active;
/*
* This API resets each portion of the CTL path namely,
@@ -621,6 +646,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
dsc_active &= ~cfg->dsc;
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
}
+
+ if (cfg->cdm) {
+ cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
+ cdm_active &= ~cfg->cdm;
+ DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
+ }
}
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
@@ -654,12 +685,14 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
ops->update_pending_flush_dsc =
dpu_hw_ctl_update_pending_flush_dsc_v1;
+ ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
+ ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 279ebd8df..ff85b5ee0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -39,6 +39,7 @@ struct dpu_hw_stage_cfg {
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
+ * @cdm: CDM block used
* @stream_sel: Stream selection for multi-stream interfaces
* @dsc: DSC BIT masks used
*/
@@ -48,6 +49,7 @@ struct dpu_hw_intf_cfg {
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
enum dpu_ctl_mode_sel intf_mode_sel;
+ enum dpu_cdm cdm;
int stream_sel;
unsigned int dsc;
};
@@ -167,6 +169,14 @@ struct dpu_hw_ctl_ops {
enum dpu_dsc blk);
/**
+ * OR in the given flushbits to the cached pending_(cdm_)flush_mask
+ * No effect on hardware
+ * @ctx: ctl path ctx pointer
+ * @cdm_num: idx of cdm to be flushed
+ */
+ void (*update_pending_flush_cdm)(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num);
+
+ /**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
@@ -239,6 +249,7 @@ struct dpu_hw_ctl_ops {
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
* @pending_dsc_flush_mask: pending DSC flush
+ * @pending_cdm_flush_mask: pending CDM flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -256,6 +267,7 @@ struct dpu_hw_ctl {
u32 pending_merge_3d_flush_mask;
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
u32 pending_dsc_flush_mask;
+ u32 pending_cdm_flush_mask;
/* ops */
struct dpu_hw_ctl_ops ops;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 088807db2..6a0a74832 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -6,6 +6,8 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <drm/drm_managed.h>
+
#include "dpu_core_irq.h"
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
@@ -472,8 +474,9 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
return intr_status;
}
-struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
unsigned int i;
@@ -481,7 +484,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
if (!addr || !m)
return ERR_PTR(-EINVAL);
- intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
@@ -512,11 +515,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return intr;
}
-void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
-{
- kfree(intr);
-}
-
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx,
void (*irq_cb)(void *arg),
@@ -527,14 +525,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
int ret;
if (!irq_cb) {
- DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
+ DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
return -EINVAL;
}
if (!dpu_core_irq_is_valid(irq_idx)) {
- DPU_ERROR("invalid IRQ=[%d, %d]\n",
- DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
+ DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 53a21ebc5..564b750a2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -70,15 +70,12 @@ struct dpu_hw_intr {
/**
* dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @dev: Corresponding device for devres management
* @addr: mapped register io address of MDP
* @m: pointer to MDSS catalog data
*/
-struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
-/**
- * dpu_hw_intr_destroy(): Cleanup interrutps hw object
- * @intr: pointer to interrupts hw object
- */
-void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index d85157acf..5df545904 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -98,6 +98,7 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
DPU_HW_BLK_DSC,
+ DPU_HW_BLK_CDM,
DPU_HW_BLK_MAX,
};
@@ -185,6 +186,11 @@ enum dpu_dsc {
DSC_MAX
};
+enum dpu_cdm {
+ CDM_0 = 1,
+ CDM_MAX
+};
+
enum dpu_pingpong {
PINGPONG_NONE,
PINGPONG_0,
@@ -195,6 +201,8 @@ enum dpu_pingpong {
PINGPONG_5,
PINGPONG_6,
PINGPONG_7,
+ PINGPONG_8,
+ PINGPONG_9,
PINGPONG_S0,
PINGPONG_MAX
};
@@ -204,6 +212,7 @@ enum dpu_merge_3d {
MERGE_3D_1,
MERGE_3D_2,
MERGE_3D_3,
+ MERGE_3D_4,
MERGE_3D_MAX
};
@@ -458,6 +467,7 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_ROT (1 << 9)
#define DPU_DBG_MASK_DSPP (1 << 10)
#define DPU_DBG_MASK_DSC (1 << 11)
+#define DPU_DBG_MASK_CDM (1 << 12)
/**
* struct dpu_hw_tear_check - Struct contains parameters to configure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 069bf429e..0bf8a83e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -396,15 +396,6 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_sspp *ctx,
format);
}
-static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_sspp *ctx)
-{
- if (!ctx)
- return 0;
-
- return dpu_hw_get_scaler3_ver(&ctx->hw,
- ctx->cap->sblk->scaler_blk.base);
-}
-
/*
* dpu_hw_sspp_setup_rects()
*/
@@ -615,12 +606,8 @@ static void _setup_layer_ops(struct dpu_hw_sspp *c,
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
- if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
- test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
- test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
+ if (test_bit(DPU_SSPP_SCALER_QSEED3_COMPATIBLE, &features))
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
- c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
- }
if (test_bit(DPU_SSPP_CDP, &features))
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
@@ -655,10 +642,7 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
cfg->len,
kms);
- if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED4))
+ if (sblk->scaler_blk.len)
dpu_debugfs_create_regset32("scaler_blk", 0400,
debugfs_root,
sblk->scaler_blk.base + cfg->base,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 3641ef6be..b7dc52312 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -22,21 +22,6 @@ struct dpu_hw_sspp;
#define DPU_SSPP_SOLID_FILL BIT(4)
/**
- * Define all scaler feature bits in catalog
- */
-#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \
- BIT(DPU_SSPP_SCALER_QSEED2) | \
- BIT(DPU_SSPP_SCALER_QSEED3) | \
- BIT(DPU_SSPP_SCALER_QSEED3LITE) | \
- BIT(DPU_SSPP_SCALER_QSEED4))
-
-/*
- * Define all CSC feature bits in catalog
- */
-#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \
- BIT(DPU_SSPP_CSC_10BIT))
-
-/**
* Component indices
*/
enum {
@@ -297,12 +282,6 @@ struct dpu_hw_sspp_ops {
const struct dpu_format *format);
/**
- * get_scaler_ver - get scaler h/w version
- * @ctx: Pointer to pipe context
- */
- u32 (*get_scaler_ver)(struct dpu_hw_sspp *ctx);
-
- /**
* setup_cdp - setup client driven prefetch
* @pipe: Pointer to software pipe context
* @fmt: format used by the sw pipe
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 1d4f0b97c..dd4758273 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -381,12 +381,6 @@ end:
DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
}
-u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
- u32 scaler_offset)
-{
- return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
-}
-
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10)
@@ -563,3 +557,47 @@ bool dpu_hw_clk_force_ctrl(struct dpu_hw_blk_reg_map *c,
return clk_forced_on;
}
+
+#define TO_S15D16(_x_)((_x_) << 7)
+
+const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+};
+
+const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+};
+
+const struct dpu_csc_cfg dpu_csc10_rgb2yuv_601l = {
+ {
+ TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
+ TO_S15D16(0x1fb5), TO_S15D16(0x1f6c), TO_S15D16(0x00e1),
+ TO_S15D16(0x00e1), TO_S15D16(0x1f45), TO_S15D16(0x1fdc)
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index ec09fc386..64ded69fa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -19,6 +19,12 @@
#define MISR_CTRL_STATUS_CLEAR BIT(10)
#define MISR_CTRL_FREE_RUN_MASK BIT(31)
+#define TO_S15D16(_x_)((_x_) << 7)
+
+extern const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L;
+extern const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L;
+extern const struct dpu_csc_cfg dpu_csc10_rgb2yuv_601l;
+
/*
* This is the common struct maintained by each sub block
* for mapping the register offsets in this block to the
@@ -340,9 +346,6 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
u32 scaler_offset, u32 scaler_version,
const struct dpu_format *format);
-u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
- u32 scaler_offset);
-
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index a5121a50b..98e34afde 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
@@ -211,12 +213,13 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
ops->set_write_gather_en = dpu_hw_set_write_gather_en;
}
-struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
+ const struct dpu_vbif_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_vbif *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -234,8 +237,3 @@ struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
return c;
}
-
-void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
-{
- kfree(vbif);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
index 7e10d2a17..e2b430750 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -108,12 +108,12 @@ struct dpu_hw_vbif {
/**
* dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
* VBIF catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: VBIF catalog entry for which driver object is required
* @addr: Mapped register io address of MDSS
*/
-struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
- void __iomem *addr);
-
-void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
+ const struct dpu_vbif_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 81acbebac..2330eb2a4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -274,9 +274,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
void *p = dpu_hw_util_get_log_mask_ptr();
struct dentry *entry;
- struct drm_device *dev;
- struct msm_drm_private *priv;
- int i;
if (!p)
return -EINVAL;
@@ -285,9 +282,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
if (minor->type != DRM_MINOR_PRIMARY)
return 0;
- dev = dpu_kms->dev;
- priv = dev->dev_private;
-
entry = debugfs_create_dir("debug", minor->debugfs_root);
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
@@ -297,11 +291,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
dpu_debugfs_core_irq_init(dpu_kms, entry);
dpu_debugfs_sspp_init(dpu_kms, entry);
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (priv->dp[i])
- msm_dp_debugfs_init(priv->dp[i], minor);
- }
-
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@@ -603,7 +592,6 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
- drm_encoder_cleanup(encoder);
return rc;
}
}
@@ -636,7 +624,6 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
- drm_encoder_cleanup(encoder);
return rc;
}
@@ -668,7 +655,6 @@ static int _dpu_kms_initialize_writeback(struct drm_device *dev,
n_formats);
if (rc) {
DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
- drm_encoder_cleanup(encoder);
return rc;
}
@@ -812,20 +798,13 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
{
int i;
- if (dpu_kms->hw_intr)
- dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
/* safe to call these more than once during shutdown */
_dpu_kms_mmu_destroy(dpu_kms);
- if (dpu_kms->catalog) {
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i]) {
- dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
- dpu_kms->hw_vbif[i] = NULL;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ dpu_kms->hw_vbif[i] = NULL;
}
dpu_kms_global_obj_fini(dpu_kms);
@@ -858,7 +837,6 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- int i;
if (!dpu_kms || !dpu_kms->dev)
return -EINVAL;
@@ -867,9 +845,6 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
if (!priv)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
- msm_dp_irq_postinstall(priv->dp[i]);
-
return 0;
}
@@ -977,6 +952,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
}
}
+ if (cat->cdm)
+ msm_disp_snapshot_add_block(disp_state, cat->cdm->len,
+ dpu_kms->mmio + cat->cdm->base, cat->cdm->name);
+
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@@ -1080,7 +1059,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
if (!dpu_kms->catalog) {
DPU_ERROR("device config not known!\n");
rc = -EINVAL;
- goto power_error;
+ goto err_pm_put;
}
/*
@@ -1090,26 +1069,26 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
rc = _dpu_kms_mmu_init(dpu_kms);
if (rc) {
DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
if (IS_ERR(dpu_kms->mdss)) {
rc = PTR_ERR(dpu_kms->mdss);
DPU_ERROR("failed to get MDSS data: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
if (!dpu_kms->mdss) {
rc = -EINVAL;
DPU_ERROR("NULL MDSS data\n");
- goto power_error;
+ goto err_pm_put;
}
rc = dpu_rm_init(dev, &dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
@@ -1120,18 +1099,18 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
rc = PTR_ERR(dpu_kms->hw_mdp);
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
dpu_kms->hw_mdp = NULL;
- goto power_error;
+ goto err_pm_put;
}
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_hw_vbif *hw;
const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
- hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]);
+ hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif[vbif->id]);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc);
- goto power_error;
+ goto err_pm_put;
}
dpu_kms->hw_vbif[vbif->id] = hw;
@@ -1147,15 +1126,15 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate);
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
- goto perf_err;
+ goto err_pm_put;
}
- dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
- if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_intr)) {
rc = PTR_ERR(dpu_kms->hw_intr);
DPU_ERROR("hw_intr init failed: %d\n", rc);
dpu_kms->hw_intr = NULL;
- goto hw_intr_init_err;
+ goto err_pm_put;
}
dev->mode_config.min_width = 0;
@@ -1180,7 +1159,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
rc = _dpu_kms_drm_obj_init(dpu_kms);
if (rc) {
DPU_ERROR("modeset init failed: %d\n", rc);
- goto drm_obj_init_err;
+ goto err_pm_put;
}
dpu_vbif_init_memtypes(dpu_kms);
@@ -1189,10 +1168,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
return 0;
-drm_obj_init_err:
-hw_intr_init_err:
-perf_err:
-power_error:
+err_pm_put:
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
@@ -1350,6 +1326,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
+ { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
{ .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
{ .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
{ .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, },
@@ -1364,6 +1341,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
+ { .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 9112a702a..d1207f4ec 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -136,6 +136,7 @@ struct dpu_global_state {
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
+ uint32_t cdm_to_enc_id;
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 3eef5e025..ff975ad51 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -21,6 +21,7 @@
#include "dpu_kms.h"
#include "dpu_formats.h"
#include "dpu_hw_sspp.h"
+#include "dpu_hw_util.h"
#include "dpu_trace.h"
#include "dpu_crtc.h"
#include "dpu_vbif.h"
@@ -78,8 +79,6 @@ static const uint32_t qcom_compressed_supported_formats[] = {
struct dpu_plane {
struct drm_plane base;
- struct mutex lock;
-
enum dpu_sspp pipe;
uint32_t color_fill;
@@ -470,8 +469,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw,
scale_cfg->src_height[i] /= chroma_subsmpl_v;
}
- if (pipe_hw->cap->features &
- BIT(DPU_SSPP_SCALER_QSEED4)) {
+ if (pipe_hw->cap->sblk->scaler_blk.version >= 0x3000) {
scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
} else {
@@ -511,36 +509,6 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
}
}
-static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
- },
- /* signed bias */
- { 0xfff0, 0xff80, 0xff80,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
- { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
-};
-
-static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
- },
- /* signed bias */
- { 0xffc0, 0xfe00, 0xfe00,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
- { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
-};
-
static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe,
const struct dpu_format *fmt)
{
@@ -774,8 +742,8 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
- (!(pipe->sspp->cap->features & DPU_SSPP_SCALER) ||
- !(pipe->sspp->cap->features & DPU_SSPP_CSC_ANY))) {
+ (!pipe->sspp->cap->sblk->scaler_blk.len ||
+ !pipe->sspp->cap->sblk->csc_blk.len)) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
@@ -824,6 +792,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
plane);
int ret = 0, min_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate;
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
@@ -892,14 +862,16 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
max_linewidth = pdpu->catalog->caps->max_linewidth;
- if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
+ if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
+ _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
/*
* In parallel multirect case only the half of the usual width
* is supported for tiled formats. If we are here, we know that
* full width is more than max_linewidth, thus each rect is
* wider than allowed.
*/
- if (DPU_FORMAT_IS_UBWC(fmt)) {
+ if (DPU_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
@@ -1213,29 +1185,6 @@ static void dpu_plane_atomic_update(struct drm_plane *plane,
}
}
-static void dpu_plane_destroy(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
- struct dpu_plane_state *pstate;
-
- DPU_DEBUG_PLANE(pdpu, "\n");
-
- if (pdpu) {
- pstate = to_dpu_plane_state(plane->state);
- _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false);
-
- if (pstate->r_pipe.sspp)
- _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false);
-
- mutex_destroy(&pdpu->lock);
-
- /* this will destroy the states as well */
- drm_plane_cleanup(plane);
-
- kfree(pdpu);
- }
-}
-
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -1405,7 +1354,6 @@ static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs dpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = dpu_plane_destroy,
.reset = dpu_plane_reset,
.atomic_duplicate_state = dpu_plane_duplicate_state,
.atomic_destroy_state = dpu_plane_destroy_state,
@@ -1433,35 +1381,28 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
struct dpu_hw_sspp *pipe_hw;
uint32_t num_formats;
uint32_t supported_rotations;
- int ret = -EINVAL;
-
- /* create and zero local structure */
- pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
- if (!pdpu) {
- DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
- ret = -ENOMEM;
- return ERR_PTR(ret);
- }
-
- /* cache local stuff for later */
- plane = &pdpu->base;
- pdpu->pipe = pipe;
+ int ret;
/* initialize underlying h/w driver */
pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
DPU_ERROR("[%u]SSPP is invalid\n", pipe);
- goto clean_plane;
+ return ERR_PTR(-EINVAL);
}
format_list = pipe_hw->cap->sblk->format_list;
num_formats = pipe_hw->cap->sblk->num_formats;
- ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base,
+ 0xff, &dpu_plane_funcs,
format_list, num_formats,
supported_format_modifiers, type, NULL);
- if (ret)
- goto clean_plane;
+ if (IS_ERR(pdpu))
+ return ERR_CAST(pdpu);
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
pdpu->catalog = kms->catalog;
@@ -1488,13 +1429,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
- mutex_init(&pdpu->lock);
-
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
return plane;
-
-clean_plane:
- kfree(pdpu);
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 0bb28cf4a..724537ab7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -8,6 +8,7 @@
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_intf.h"
@@ -28,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
* @topology: selected topology for the display
- * @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
struct msm_display_topology topology;
@@ -176,6 +176,18 @@ int dpu_rm_init(struct drm_device *dev,
rm->hw_sspp[sspp->id - SSPP_NONE] = hw;
}
+ if (cat->cdm) {
+ struct dpu_hw_cdm *hw;
+
+ hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed cdm object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->cdm_blk = &hw->base;
+ }
+
return 0;
fail:
@@ -191,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
* _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
* @rm: dpu resource manager handle
* @primary_idx: index of primary mixer in rm->mixer_blks[]
+ *
+ * Returns: lm peer mixed id on success or %-EINVAL on error
*/
static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
{
@@ -422,6 +436,26 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
return 0;
}
+static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc)
+{
+ /* try allocating only one CDM block */
+ if (!rm->cdm_blk) {
+ DPU_ERROR("CDM block does not exist\n");
+ return -EIO;
+ }
+
+ if (global_state->cdm_to_enc_id) {
+ DPU_ERROR("CDM_0 is already allocated\n");
+ return -EIO;
+ }
+
+ global_state->cdm_to_enc_id = enc->base.id;
+
+ return 0;
+}
+
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -447,6 +481,14 @@ static int _dpu_rm_make_reservation(
if (ret)
return ret;
+ if (reqs->topology.needs_cdm) {
+ ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
+ if (ret) {
+ DPU_ERROR("unable to find CDM blk\n");
+ return ret;
+ }
+ }
+
return ret;
}
@@ -457,9 +499,9 @@ static int _dpu_rm_populate_requirements(
{
reqs->topology = req_topology;
- DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
+ DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n",
reqs->topology.num_lm, reqs->topology.num_dsc,
- reqs->topology.num_intf);
+ reqs->topology.num_intf, reqs->topology.needs_cdm);
return 0;
}
@@ -488,6 +530,7 @@ void dpu_rm_release(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
}
int dpu_rm_reserve(
@@ -561,6 +604,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
hw_to_enc_id = global_state->dsc_to_enc_id;
max_blks = ARRAY_SIZE(rm->dsc_blks);
break;
+ case DPU_HW_BLK_CDM:
+ hw_blks = &rm->cdm_blk;
+ hw_to_enc_id = &global_state->cdm_to_enc_id;
+ max_blks = 1;
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 36752d837..e3f83ebc6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -22,6 +22,7 @@ struct dpu_global_state;
* @hw_wb: array of wb hardware resources
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
+ * @cdm_blk: cdm hardware resource
*/
struct dpu_rm {
struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
@@ -33,6 +34,7 @@ struct dpu_rm {
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE];
+ struct dpu_hw_blk *cdm_blk;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 310095722..75f93e346 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -6,6 +6,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -123,16 +124,6 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
drm_gem_object_put(val);
}
-static void mdp4_crtc_destroy(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
-
- kfree(mdp4_crtc);
-}
-
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
[VG1] = 1,
@@ -484,7 +475,6 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp4_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
@@ -625,6 +615,13 @@ static const char *dma_names[] = {
"DMA_P", "DMA_S", "DMA_E",
};
+static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
+{
+ struct mdp4_crtc *mdp4_crtc = ptr;
+
+ drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+}
+
/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id, int ovlp_id,
@@ -632,10 +629,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
{
struct drm_crtc *crtc = NULL;
struct mdp4_crtc *mdp4_crtc;
+ int ret;
- mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
- if (!mdp4_crtc)
- return ERR_PTR(-ENOMEM);
+ mdp4_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp4_crtc, base,
+ plane, NULL,
+ &mdp4_crtc_funcs, NULL);
+ if (IS_ERR(mdp4_crtc))
+ return ERR_CAST(mdp4_crtc);
crtc = &mdp4_crtc->base;
@@ -657,9 +657,10 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
+ ret = drmm_add_action_or_reset(dev, mdp4_crtc_flip_cleanup, mdp4_crtc);
+ if (ret)
+ return ERR_PTR(ret);
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
- NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
return crtc;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 39b8fe53c..74dafe710 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -26,18 +26,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(mdp4_dsi_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
- .destroy = mdp4_dsi_encoder_destroy,
-};
-
static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -148,28 +136,18 @@ static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
/* initialize encoder */
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_dsi_encoder *mdp4_dsi_encoder;
- int ret;
- mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
- if (!mdp4_dsi_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_dsi_encoder = drmm_encoder_alloc(dev, struct mdp4_dsi_encoder, base,
+ NULL, DRM_MODE_ENCODER_DSI, NULL);
+ if (IS_ERR(mdp4_dsi_encoder))
+ return ERR_CAST(mdp4_dsi_encoder);
encoder = &mdp4_dsi_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
return encoder;
-
-fail:
- if (encoder)
- mdp4_dsi_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index 88645dbc3..3b70764b4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -25,17 +25,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp4_dtv_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
- .destroy = mdp4_dtv_encoder_destroy,
-};
-
static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -173,41 +162,29 @@ long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
/* initialize encoder */
struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_dtv_encoder *mdp4_dtv_encoder;
- int ret;
- mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
- if (!mdp4_dtv_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_dtv_encoder = drmm_encoder_alloc(dev, struct mdp4_dtv_encoder, base,
+ NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (IS_ERR(mdp4_dtv_encoder))
+ return ERR_CAST(mdp4_dtv_encoder);
encoder = &mdp4_dtv_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
- goto fail;
+ return ERR_CAST(mdp4_dtv_encoder->hdmi_clk);
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
- goto fail;
+ return ERR_CAST(mdp4_dtv_encoder->mdp_clk);
}
return encoder;
-
-fail:
- if (encoder)
- mdp4_dtv_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 10eb3e5b2..576995ddc 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -18,7 +18,7 @@ struct mdp4_lcdc_encoder {
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
- struct regulator *regs[3];
+ struct regulator_bulk_data regs[3];
bool enabled;
uint32_t bsc;
};
@@ -30,18 +30,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
- to_mdp4_lcdc_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp4_lcdc_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
- .destroy = mdp4_lcdc_encoder_destroy,
-};
-
/* this should probably be a helper: */
static struct drm_connector *get_connector(struct drm_encoder *encoder)
{
@@ -271,12 +259,10 @@ static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
- int i, ret;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
return;
@@ -301,11 +287,8 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
- for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
- ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
- }
+ regulator_bulk_disable(ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
mdp4_lcdc_encoder->enabled = false;
}
@@ -319,7 +302,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
uint32_t config;
- int i, ret;
+ int ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
@@ -339,11 +322,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
- for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
- ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
- }
+ ret = regulator_bulk_enable(ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulators: %d\n", ret);
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
@@ -383,63 +365,38 @@ long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
struct device_node *panel_node)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
- struct regulator *reg;
int ret;
- mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL);
- if (!mdp4_lcdc_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_lcdc_encoder = drmm_encoder_alloc(dev, struct mdp4_lcdc_encoder, base,
+ NULL, DRM_MODE_ENCODER_LVDS, NULL);
+ if (IS_ERR(mdp4_lcdc_encoder))
+ return ERR_CAST(mdp4_lcdc_encoder);
mdp4_lcdc_encoder->panel_node = panel_node;
encoder = &mdp4_lcdc_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
- ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
- goto fail;
+ return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
}
/* TODO: different regulators in other cases? */
- reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[0] = reg;
-
- reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[1] = reg;
+ mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
- reg = devm_regulator_get(dev->dev, "lvds-vdda");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[2] = reg;
+ ret = devm_regulator_bulk_get(dev->dev,
+ ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
+ if (ret)
+ return ERR_PTR(ret);
return encoder;
-
-fail:
- if (encoder)
- mdp4_lcdc_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 694d54341..c5179e4c3 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -1350,23 +1350,17 @@ int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
return cfg_handler->revision;
}
-void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
-{
- kfree(cfg_handler);
-}
-
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
struct mdp5_cfg_handler *cfg_handler;
const struct mdp5_cfg_handler *cfg_handlers;
- int i, ret = 0, num_handlers;
+ int i, num_handlers;
- cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
+ cfg_handler = devm_kzalloc(dev->dev, sizeof(*cfg_handler), GFP_KERNEL);
if (unlikely(!cfg_handler)) {
- ret = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
switch (major) {
@@ -1381,8 +1375,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
default:
DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
- ret = -ENXIO;
- goto fail;
+ return ERR_PTR(-ENXIO);
}
/* only after mdp5_cfg global pointer's init can we access the hw */
@@ -1396,8 +1389,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
if (unlikely(!mdp5_cfg)) {
DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
- ret = -ENXIO;
- goto fail;
+ return ERR_PTR(-ENXIO);
}
cfg_handler->revision = minor;
@@ -1406,10 +1398,4 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
DBG("MDP5: %s hw config selected", mdp5_cfg->name);
return cfg_handler;
-
-fail:
- if (cfg_handler)
- mdp5_cfg_destroy(cfg_handler);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index c2502cc33..26c5d8b4a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
@@ -121,6 +121,5 @@ int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
-void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 86036dd4e..4a3db2ea1 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -172,14 +173,11 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
drm_gem_object_put(val);
}
-static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+static void mdp5_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc *mdp5_crtc = ptr;
- drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
-
- kfree(mdp5_crtc);
}
static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
@@ -1147,7 +1145,6 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
@@ -1161,7 +1158,6 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
@@ -1327,10 +1323,16 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
+ int ret;
- mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
- if (!mdp5_crtc)
- return ERR_PTR(-ENOMEM);
+ mdp5_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp5_crtc, base,
+ plane, cursor_plane,
+ cursor_plane ?
+ &mdp5_crtc_no_lm_cursor_funcs :
+ &mdp5_crtc_funcs,
+ NULL);
+ if (IS_ERR(mdp5_crtc))
+ return ERR_CAST(mdp5_crtc);
crtc = &mdp5_crtc->base;
@@ -1346,13 +1348,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
- drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
- cursor_plane ?
- &mdp5_crtc_no_lm_cursor_funcs :
- &mdp5_crtc_funcs, NULL);
-
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
+ ret = drmm_add_action_or_reset(dev, mdp5_crtc_flip_cleanup, mdp5_crtc);
+ if (ret)
+ return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 1220f2b20..666de99a4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -681,11 +681,6 @@ void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
}
}
-void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
-{
- kfree(ctl_mgr);
-}
-
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
{
@@ -697,18 +692,16 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
unsigned long flags;
int c, ret;
- ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
+ ctl_mgr = devm_kzalloc(dev->dev, sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
- ret = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
- ret = -ENOSPC;
- goto fail;
+ return ERR_PTR(-ENOSPC);
}
/* initialize the CTL manager: */
@@ -727,7 +720,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
- goto fail;
+ return ERR_PTR(ret);
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
@@ -755,10 +748,4 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
return ctl_mgr;
-
-fail:
- if (ctl_mgr)
- mdp5_ctlm_destroy(ctl_mgr);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
index c2af68aa7..9020e8efc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -17,7 +17,6 @@ struct mdp5_ctl_manager;
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
-void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
/*
* CTL prototypes:
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 79d67c495..8db97083e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -16,17 +16,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-static void mdp5_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp5_encoder);
-}
-
-static const struct drm_encoder_funcs mdp5_encoder_funcs = {
- .destroy = mdp5_encoder_destroy,
-};
-
static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -342,13 +331,11 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
struct mdp5_encoder *mdp5_encoder;
int enc_type = (intf->type == INTF_DSI) ?
DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
- int ret;
- mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
- if (!mdp5_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp5_encoder = drmm_encoder_alloc(dev, struct mdp5_encoder, base,
+ NULL, enc_type, NULL);
+ if (IS_ERR(mdp5_encoder))
+ return ERR_CAST(mdp5_encoder);
encoder = &mdp5_encoder->base;
mdp5_encoder->ctl = ctl;
@@ -356,15 +343,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
spin_lock_init(&mdp5_encoder->intf_lock);
- drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
-
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
return encoder;
-
-fail:
- if (encoder)
- mdp5_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index ec933d597..082763466 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -209,13 +209,6 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_gem_address_space *aspace = kms->aspace;
- int i;
-
- for (i = 0; i < mdp5_kms->num_hwmixers; i++)
- mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
-
- for (i = 0; i < mdp5_kms->num_hwpipes; i++)
- mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu);
@@ -623,18 +616,6 @@ fail:
static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
{
- int i;
-
- if (mdp5_kms->ctlm)
- mdp5_ctlm_destroy(mdp5_kms->ctlm);
- if (mdp5_kms->smp)
- mdp5_smp_destroy(mdp5_kms->smp);
- if (mdp5_kms->cfg)
- mdp5_cfg_destroy(mdp5_kms->cfg);
-
- for (i = 0; i < mdp5_kms->num_intfs; i++)
- kfree(mdp5_kms->intfs[i]);
-
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&mdp5_kms->pdev->dev);
@@ -652,7 +633,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
for (i = 0; i < cnt; i++) {
struct mdp5_hw_pipe *hwpipe;
- hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
+ hwpipe = mdp5_pipe_init(dev, pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
@@ -724,7 +705,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
for (i = 0; i < hw_cfg->lm.count; i++) {
struct mdp5_hw_mixer *mixer;
- mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
+ mixer = mdp5_mixer_init(dev, &hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
@@ -755,7 +736,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
if (intf_types[i] == INTF_DISABLED)
continue;
- intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ intf = devm_kzalloc(dev->dev, sizeof(*intf), GFP_KERNEL);
if (!intf) {
DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 2536def2a..2822b533f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -140,20 +140,16 @@ int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
return 0;
}
-void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
-{
- kfree(mixer);
-}
-
static const char * const mixer_names[] = {
"LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
};
-struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
+struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev,
+ const struct mdp5_lm_instance *lm)
{
struct mdp5_hw_mixer *mixer;
- mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ mixer = devm_kzalloc(dev->dev, sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 545ee223b..2bedd7583 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -25,8 +25,8 @@ struct mdp5_hw_mixer_state {
struct drm_crtc *hwmixer_to_crtc[8];
};
-struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
-void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
+struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev,
+ const struct mdp5_lm_instance *lm);
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index e4b8a7898..99b2c30b1 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -151,17 +151,13 @@ int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
return 0;
}
-void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
-{
- kfree(hwpipe);
-}
-
-struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev,
+ enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps)
{
struct mdp5_hw_pipe *hwpipe;
- hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
+ hwpipe = devm_kzalloc(dev->dev, sizeof(*hwpipe), GFP_KERNEL);
if (!hwpipe)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index cca67938c..452138821 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -39,8 +39,8 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
struct mdp5_hw_pipe **r_hwpipe);
int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
-struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev,
+ enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps);
-void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe);
#endif /* __MDP5_PIPE_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index b68682c1b..8b59562e2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -370,23 +370,17 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
drm_modeset_unlock(&mdp5_kms->glob_state_lock);
}
-void mdp5_smp_destroy(struct mdp5_smp *smp)
-{
- kfree(smp);
-}
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
+ struct drm_device *dev = mdp5_kms->dev;
struct mdp5_smp_state *state;
struct mdp5_global_state *global_state;
struct mdp5_smp *smp;
- int ret;
- smp = kzalloc(sizeof(*smp), GFP_KERNEL);
- if (unlikely(!smp)) {
- ret = -ENOMEM;
- goto fail;
- }
+ smp = devm_kzalloc(dev->dev, sizeof(*smp), GFP_KERNEL);
+ if (unlikely(!smp))
+ return ERR_PTR(-ENOMEM);
smp->dev = mdp5_kms->dev;
smp->blk_cnt = cfg->mmb_count;
@@ -400,9 +394,4 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
return smp;
-fail:
- if (smp)
- mdp5_smp_destroy(smp);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
index ba5618e13..d8b6a1141 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
@@ -68,7 +68,6 @@ struct mdp5_smp;
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
const struct mdp5_smp_block *cfg);
-void mdp5_smp_destroy(struct mdp5_smp *smp);
void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 8e3b677f3..03f4951c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -291,6 +291,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(dp_aux->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&aux->mutex);
if (!aux->initted) {
ret = -EIO;
@@ -364,6 +368,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
exit:
mutex_unlock(&aux->mutex);
+ pm_runtime_put_sync(dp_aux->dev);
return ret;
}
@@ -474,7 +479,6 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
int dp_aux_register(struct drm_dp_aux *dp_aux)
{
- struct dp_aux_private *aux;
int ret;
if (!dp_aux) {
@@ -482,12 +486,7 @@ int dp_aux_register(struct drm_dp_aux *dp_aux)
return -EINVAL;
}
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- aux->dp_aux.name = "dpu_dp_aux";
- aux->dp_aux.dev = aux->dev;
- aux->dp_aux.transfer = dp_aux_transfer;
- ret = drm_dp_aux_register(&aux->dp_aux);
+ ret = drm_dp_aux_register(dp_aux);
if (ret) {
DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
ret);
@@ -502,6 +501,21 @@ void dp_aux_unregister(struct drm_dp_aux *dp_aux)
drm_dp_aux_unregister(dp_aux);
}
+static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
+ unsigned long wait_us)
+{
+ int ret;
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ pm_runtime_get_sync(aux->dev);
+ ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
+ pm_runtime_put_sync(aux->dev);
+
+ return ret;
+}
+
struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
bool is_edp)
{
@@ -525,6 +539,17 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
aux->catalog = catalog;
aux->retry_cnt = 0;
+ /*
+ * Use the drm_dp_aux_init() to use the aux adapter
+ * before registering AUX with the DRM device so that
+ * msm eDP panel can be detected by generic_dep_panel_probe().
+ */
+ aux->dp_aux.name = "dpu_dp_aux";
+ aux->dp_aux.dev = dev;
+ aux->dp_aux.transfer = dp_aux_transfer;
+ aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted;
+ drm_dp_aux_init(&aux->dp_aux);
+
return &aux->dp_aux;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 3bba901af..6c281dc09 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -19,13 +19,9 @@
#define DEBUG_NAME "msm_dp"
struct dp_debug_private {
- struct dentry *root;
-
struct dp_link *link;
struct dp_panel *panel;
struct drm_connector *connector;
- struct device *dev;
- struct drm_device *drm_dev;
struct dp_debug dp_debug;
};
@@ -204,35 +200,33 @@ static const struct file_operations test_active_fops = {
.write = dp_test_active_write
};
-static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
+static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool is_edp)
{
- char path[64];
struct dp_debug_private *debug = container_of(dp_debug,
struct dp_debug_private, dp_debug);
- snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name);
-
- debug->root = debugfs_create_dir(path, minor->debugfs_root);
-
- debugfs_create_file("dp_debug", 0444, debug->root,
+ debugfs_create_file("dp_debug", 0444, root,
debug, &dp_debug_fops);
- debugfs_create_file("msm_dp_test_active", 0444,
- debug->root,
- debug, &test_active_fops);
+ if (!is_edp) {
+ debugfs_create_file("msm_dp_test_active", 0444,
+ root,
+ debug, &test_active_fops);
- debugfs_create_file("msm_dp_test_data", 0444,
- debug->root,
- debug, &dp_test_data_fops);
+ debugfs_create_file("msm_dp_test_data", 0444,
+ root,
+ debug, &dp_test_data_fops);
- debugfs_create_file("msm_dp_test_type", 0444,
- debug->root,
- debug, &dp_test_type_fops);
+ debugfs_create_file("msm_dp_test_type", 0444,
+ root,
+ debug, &dp_test_type_fops);
+ }
}
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
- struct drm_connector *connector, struct drm_minor *minor)
+ struct drm_connector *connector,
+ struct dentry *root, bool is_edp)
{
struct dp_debug_private *debug;
struct dp_debug *dp_debug;
@@ -253,46 +247,15 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
debug->dp_debug.debug_en = false;
debug->link = link;
debug->panel = panel;
- debug->dev = dev;
- debug->drm_dev = minor->dev;
- debug->connector = connector;
dp_debug = &debug->dp_debug;
dp_debug->vdisplay = 0;
dp_debug->hdisplay = 0;
dp_debug->vrefresh = 0;
- dp_debug_init(dp_debug, minor);
+ dp_debug_init(dp_debug, root, is_edp);
return dp_debug;
error:
return ERR_PTR(rc);
}
-
-static int dp_debug_deinit(struct dp_debug *dp_debug)
-{
- struct dp_debug_private *debug;
-
- if (!dp_debug)
- return -EINVAL;
-
- debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
- debugfs_remove_recursive(debug->root);
-
- return 0;
-}
-
-void dp_debug_put(struct dp_debug *dp_debug)
-{
- struct dp_debug_private *debug;
-
- if (!dp_debug)
- return;
-
- debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
- dp_debug_deinit(dp_debug);
-
- devm_kfree(debug->dev, debug);
-}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 124227873..9b3b2e702 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -34,7 +34,8 @@ struct dp_debug {
* @panel: instance of panel module
* @link: instance of link module
* @connector: double pointer to display connector
- * @minor: pointer to drm minor number after device registration
+ * @root: connector's debugfs root
+ * @is_edp: set for eDP connectors / panels
* return: pointer to allocated debug module data
*
* This function sets up the debug module and provides a way
@@ -43,31 +44,21 @@ struct dp_debug {
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
struct drm_connector *connector,
- struct drm_minor *minor);
-
-/**
- * dp_debug_put()
- *
- * Cleans up dp_debug instance
- *
- * @dp_debug: instance of dp_debug
- */
-void dp_debug_put(struct dp_debug *dp_debug);
+ struct dentry *root,
+ bool is_edp);
#else
static inline
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
- struct drm_connector *connector, struct drm_minor *minor)
+ struct drm_connector *connector,
+ struct dentry *root,
+ bool is_edp)
{
return ERR_PTR(-EINVAL);
}
-static inline void dp_debug_put(struct dp_debug *dp_debug)
-{
-}
-
#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* _DP_DEBUG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 4f89c9939..78464c395 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -49,13 +49,11 @@ enum {
ST_CONNECTED,
ST_DISCONNECT_PENDING,
ST_DISPLAY_OFF,
- ST_SUSPENDED,
};
enum {
EV_NO_EVENT,
/* hpd events */
- EV_HPD_INIT_SETUP,
EV_HPD_PLUG_INT,
EV_IRQ_HPD_INT,
EV_HPD_UNPLUG_INT,
@@ -281,11 +279,6 @@ static int dp_display_bind(struct device *dev, struct device *master,
dp->dp_display.drm_dev = drm;
priv->dp[dp->id] = &dp->dp_display;
- rc = dp->parser->parse(dp->parser);
- if (rc) {
- DRM_ERROR("device tree parsing failed\n");
- goto end;
- }
dp->drm_dev = drm;
@@ -296,11 +289,6 @@ static int dp_display_bind(struct device *dev, struct device *master,
goto end;
}
- rc = dp_power_client_init(dp->power);
- if (rc) {
- DRM_ERROR("Power client create failed\n");
- goto end;
- }
rc = dp_register_audio_driver(dev, dp->audio);
if (rc) {
@@ -325,15 +313,10 @@ static void dp_display_unbind(struct device *dev, struct device *master,
struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
- /* disable all HPD interrupts */
- if (dp->core_initialized)
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
-
kthread_stop(dp->ev_tsk);
of_dp_aux_depopulate_bus(dp->aux);
- dp_power_client_deinit(dp->power);
dp_unregister_audio_driver(dev, dp->audio);
dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
@@ -357,12 +340,11 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display)
drm_helper_hpd_irq_event(connector->dev);
}
-
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- if ((hpd && dp->dp_display.is_connected) ||
- (!hpd && !dp->dp_display.is_connected)) {
+ if ((hpd && dp->dp_display.link_ready) ||
+ (!hpd && !dp->dp_display.link_ready)) {
drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
(hpd ? "on" : "off"));
return 0;
@@ -378,7 +360,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
dp->panel->downstream_ports);
}
- dp->dp_display.is_connected = hpd;
+ dp->dp_display.link_ready = hpd;
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
@@ -581,6 +563,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
int ret;
+ struct platform_device *pdev = dp->dp_display.pdev;
mutex_lock(&dp->event_mutex);
@@ -588,7 +571,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
- if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -605,9 +588,17 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- ret = dp_display_usbpd_configure_cb(&dp->dp_display.pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret) {
+ DRM_ERROR("failed to pm_runtime_resume\n");
+ mutex_unlock(&dp->event_mutex);
+ return ret;
+ }
+
+ ret = dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
+ pm_runtime_put_sync(&pdev->dev);
} else {
dp->hpd_state = ST_MAINLINK_READY;
}
@@ -637,6 +628,7 @@ static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
+ struct platform_device *pdev = dp->dp_display.pdev;
mutex_lock(&dp->event_mutex);
@@ -664,6 +656,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -687,6 +680,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp->dp_display.connector_type, state);
/* uevent will complete disconnection part */
+ pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -702,7 +696,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
- if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -726,7 +720,6 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
{
- dp_debug_put(dp->debug);
dp_audio_put(dp->audio);
dp_panel_put(dp->panel);
dp_aux_put(dp->aux);
@@ -924,7 +917,7 @@ int dp_display_set_plugged_cb(struct msm_dp *dp_display,
dp_display->plugged_cb = fn;
dp_display->codec_dev = codec_dev;
- plugged = dp_display->is_connected;
+ plugged = dp_display->link_ready;
dp_display_handle_plugged_change(dp_display, plugged);
return 0;
@@ -1114,9 +1107,6 @@ static int hpd_event_thread(void *data)
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
switch (todo->event_id) {
- case EV_HPD_INIT_SETUP:
- dp_display_host_init(dp_priv);
- break;
case EV_HPD_PLUG_INT:
dp_hpd_plug_handle(dp_priv, todo->data);
break;
@@ -1195,27 +1185,21 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
return ret;
}
-int dp_display_request_irq(struct msm_dp *dp_display)
+static int dp_display_request_irq(struct dp_display_private *dp)
{
int rc = 0;
- struct dp_display_private *dp;
+ struct platform_device *pdev = dp->dp_display.pdev;
- if (!dp_display) {
- DRM_ERROR("invalid input\n");
- return -EINVAL;
- }
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- dp->irq = irq_of_parse_and_map(dp->dp_display.pdev->dev.of_node, 0);
- if (!dp->irq) {
+ dp->irq = platform_get_irq(pdev, 0);
+ if (dp->irq < 0) {
DRM_ERROR("failed to get irq\n");
- return -EINVAL;
+ return dp->irq;
}
- rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq,
- dp_display_irq_handler,
- IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+ rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler,
+ IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN,
+ "dp_display_isr", dp);
+
if (rc < 0) {
DRM_ERROR("failed to request IRQ%u: %d\n",
dp->irq, rc);
@@ -1244,6 +1228,29 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde
return NULL;
}
+static int dp_display_get_next_bridge(struct msm_dp *dp);
+
+static int dp_display_probe_tail(struct device *dev)
+{
+ struct msm_dp *dp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dp_display_get_next_bridge(dp);
+ if (ret)
+ return ret;
+
+ ret = component_add(dev, &dp_display_comp_ops);
+ if (ret)
+ DRM_ERROR("component add failed, rc=%d\n", ret);
+
+ return ret;
+}
+
+static int dp_auxbus_done_probe(struct drm_dp_aux *aux)
+{
+ return dp_display_probe_tail(aux->dev);
+}
+
static int dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -1277,6 +1284,18 @@ static int dp_display_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
+ rc = dp->parser->parse(dp->parser);
+ if (rc) {
+ DRM_ERROR("device tree parsing failed\n");
+ goto err;
+ }
+
+ rc = dp_power_client_init(dp->power);
+ if (rc) {
+ DRM_ERROR("Power client create failed\n");
+ goto err;
+ }
+
/* setup event q */
mutex_init(&dp->event_mutex);
init_waitqueue_head(&dp->event_q);
@@ -1289,13 +1308,31 @@ static int dp_display_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, &dp->dp_display);
- rc = component_add(&pdev->dev, &dp_display_comp_ops);
- if (rc) {
- DRM_ERROR("component add failed, rc=%d\n", rc);
- dp_display_deinit_sub_modules(dp);
+ rc = devm_pm_runtime_enable(&pdev->dev);
+ if (rc)
+ goto err;
+
+ rc = dp_display_request_irq(dp);
+ if (rc)
+ goto err;
+
+ if (dp->dp_display.is_edp) {
+ rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe);
+ if (rc) {
+ DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc);
+ goto err;
+ }
+ } else {
+ rc = dp_display_probe_tail(&pdev->dev);
+ if (rc)
+ goto err;
}
return rc;
+
+err:
+ dp_display_deinit_sub_modules(dp);
+ return rc;
}
static void dp_display_remove(struct platform_device *pdev)
@@ -1304,113 +1341,50 @@ static void dp_display_remove(struct platform_device *pdev)
component_del(&pdev->dev, &dp_display_comp_ops);
dp_display_deinit_sub_modules(dp);
-
platform_set_drvdata(pdev, NULL);
}
-static int dp_pm_resume(struct device *dev)
+static int dp_pm_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_dp *dp_display = platform_get_drvdata(pdev);
- struct dp_display_private *dp;
- int sink_count = 0;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- mutex_lock(&dp->event_mutex);
-
- drm_dbg_dp(dp->drm_dev,
- "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- /* start from disconnected state */
- dp->hpd_state = ST_DISCONNECTED;
-
- /* turn on dp ctrl/phy */
- dp_display_host_init(dp);
-
- if (dp_display->is_edp)
- dp_catalog_ctrl_hpd_enable(dp->catalog);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
- if (dp_catalog_link_is_connected(dp->catalog)) {
- /*
- * set sink to normal operation mode -- D0
- * before dpcd read
- */
- dp_display_host_phy_init(dp);
- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
- sink_count = drm_dp_read_sink_count(dp->aux);
- if (sink_count < 0)
- sink_count = 0;
+ disable_irq(dp->irq);
+ if (dp->dp_display.is_edp) {
dp_display_host_phy_exit(dp);
+ dp_catalog_ctrl_hpd_disable(dp->catalog);
}
-
- dp->link->sink_count = sink_count;
- /*
- * can not declared display is connected unless
- * HDMI cable is plugged in and sink_count of
- * dongle become 1
- * also only signal audio when disconnected
- */
- if (dp->link->sink_count) {
- dp->dp_display.is_connected = true;
- } else {
- dp->dp_display.is_connected = false;
- dp_display_handle_plugged_change(dp_display, false);
- }
-
- drm_dbg_dp(dp->drm_dev,
- "After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n",
- dp->dp_display.connector_type, dp->link->sink_count,
- dp->dp_display.is_connected, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- mutex_unlock(&dp->event_mutex);
+ dp_display_host_deinit(dp);
return 0;
}
-static int dp_pm_suspend(struct device *dev)
+static int dp_pm_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_dp *dp_display = platform_get_drvdata(pdev);
- struct dp_display_private *dp;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- mutex_lock(&dp->event_mutex);
-
- drm_dbg_dp(dp->drm_dev,
- "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- /* mainlink enabled */
- if (dp_power_clk_status(dp->power, DP_CTRL_PM))
- dp_ctrl_off_link_stream(dp->ctrl);
-
- dp_display_host_phy_exit(dp);
-
- /* host_init will be called at pm_resume */
- dp_display_host_deinit(dp);
-
- dp->hpd_state = ST_SUSPENDED;
-
- drm_dbg_dp(dp->drm_dev,
- "After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
- mutex_unlock(&dp->event_mutex);
+ /*
+ * for eDP, host cotroller, HPD block and PHY are enabled here
+ * but with HPD irq disabled
+ *
+ * for DP, only host controller is enabled here.
+ * HPD block is enabled at dp_bridge_hpd_enable()
+ * PHY will be enabled at plugin handler later
+ */
+ dp_display_host_init(dp);
+ if (dp->dp_display.is_edp) {
+ dp_catalog_ctrl_hpd_enable(dp->catalog);
+ dp_display_host_phy_init(dp);
+ }
+ enable_irq(dp->irq);
return 0;
}
static const struct dev_pm_ops dp_pm_ops = {
- .suspend = dp_pm_suspend,
- .resume = dp_pm_resume,
+ SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver dp_display_driver = {
@@ -1440,19 +1414,6 @@ void __exit msm_dp_unregister(void)
platform_driver_unregister(&dp_display_driver);
}
-void msm_dp_irq_postinstall(struct msm_dp *dp_display)
-{
- struct dp_display_private *dp;
-
- if (!dp_display)
- return;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- if (!dp_display->is_edp)
- dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 0);
-}
-
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
struct dp_display_private *dp;
@@ -1462,7 +1423,7 @@ bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
return dp->wide_bus_en;
}
-void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
+void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp)
{
struct dp_display_private *dp;
struct device *dev;
@@ -1473,7 +1434,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
dp->debug = dp_debug_get(dev, dp->panel,
dp->link, dp->dp_display.connector,
- minor);
+ root, is_edp);
if (IS_ERR(dp->debug)) {
rc = PTR_ERR(dp->debug);
DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
@@ -1485,33 +1446,8 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
{
int rc;
struct dp_display_private *dp_priv;
- struct device_node *aux_bus;
- struct device *dev;
dp_priv = container_of(dp, struct dp_display_private, dp_display);
- dev = &dp_priv->dp_display.pdev->dev;
- aux_bus = of_get_child_by_name(dev->of_node, "aux-bus");
-
- if (aux_bus && dp->is_edp) {
- dp_display_host_init(dp_priv);
- dp_catalog_ctrl_hpd_enable(dp_priv->catalog);
- dp_display_host_phy_init(dp_priv);
-
- /*
- * The code below assumes that the panel will finish probing
- * by the time devm_of_dp_aux_populate_ep_devices() returns.
- * This isn't a great assumption since it will fail if the
- * panel driver is probed asynchronously but is the best we
- * can do without a bigger driver reorganization.
- */
- rc = of_dp_aux_populate_bus(dp_priv->aux, NULL);
- of_node_put(aux_bus);
- if (rc)
- goto error;
- } else if (dp->is_edp) {
- DRM_ERROR("eDP aux_bus not found\n");
- return -ENODEV;
- }
/*
* External bridges are mandatory for eDP interfaces: one has to
@@ -1520,21 +1456,13 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
* For DisplayPort interfaces external bridges are optional, so
* silently ignore an error if one is not present (-ENODEV).
*/
- rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser);
+ rc = devm_dp_parser_find_next_bridge(&dp->pdev->dev, dp_priv->parser);
if (!dp->is_edp && rc == -ENODEV)
return 0;
- if (!rc) {
+ if (!rc)
dp->next_bridge = dp_priv->parser->next_bridge;
- return 0;
- }
-error:
- if (dp->is_edp) {
- of_dp_aux_depopulate_bus(dp_priv->aux);
- dp_display_host_phy_exit(dp_priv);
- dp_display_host_deinit(dp_priv);
- }
return rc;
}
@@ -1548,16 +1476,6 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
- ret = dp_display_request_irq(dp_display);
- if (ret) {
- DRM_ERROR("request_irq failed, ret=%d\n", ret);
- return ret;
- }
-
- ret = dp_display_get_next_bridge(dp_display);
- if (ret)
- return ret;
-
ret = dp_bridge_init(dp_display, dev, encoder);
if (ret) {
DRM_DEV_ERROR(dev->dev,
@@ -1599,6 +1517,11 @@ void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
dp_hpd_plug_handle(dp_display, 0);
mutex_lock(&dp_display->event_mutex);
+ if (pm_runtime_resume_and_get(&dp->pdev->dev)) {
+ DRM_ERROR("failed to pm_runtime_resume\n");
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
state = dp_display->hpd_state;
if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
@@ -1663,10 +1586,9 @@ void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
mutex_lock(&dp_display->event_mutex);
state = dp_display->hpd_state;
- if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) {
- mutex_unlock(&dp_display->event_mutex);
- return;
- }
+ if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED)
+ drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
+ dp->connector_type, state);
dp_display_disable(dp_display);
@@ -1679,6 +1601,8 @@ void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
}
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
+
+ pm_runtime_put_sync(&dp->pdev->dev);
mutex_unlock(&dp_display->event_mutex);
}
@@ -1717,7 +1641,21 @@ void dp_bridge_hpd_enable(struct drm_bridge *bridge)
struct msm_dp *dp_display = dp_bridge->dp_display;
struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ /*
+ * this is for external DP with hpd irq enabled case,
+ * step-1: dp_pm_runtime_resume() enable dp host only
+ * step-2: enable hdp block and have hpd irq enabled here
+ * step-3: waiting for plugin irq while phy is not initialized
+ * step-4: DP PHY is initialized at plugin handler before link training
+ *
+ */
mutex_lock(&dp->event_mutex);
+ if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) {
+ DRM_ERROR("failed to resume power\n");
+ mutex_unlock(&dp->event_mutex);
+ return;
+ }
+
dp_catalog_ctrl_hpd_enable(dp->catalog);
/* enable HDP interrupts */
@@ -1739,6 +1677,8 @@ void dp_bridge_hpd_disable(struct drm_bridge *bridge)
dp_catalog_ctrl_hpd_disable(dp->catalog);
dp_display->internal_hpd = false;
+
+ pm_runtime_put_sync(&dp_display->pdev->dev);
mutex_unlock(&dp->event_mutex);
}
@@ -1753,13 +1693,8 @@ void dp_bridge_hpd_notify(struct drm_bridge *bridge,
if (dp_display->internal_hpd)
return;
- if (!dp->core_initialized) {
- drm_dbg_dp(dp->drm_dev, "not initialized\n");
- return;
- }
-
- if (!dp_display->is_connected && status == connector_status_connected)
+ if (!dp_display->link_ready && status == connector_status_connected)
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
- else if (dp_display->is_connected && status == connector_status_disconnected)
+ else if (dp_display->link_ready && status == connector_status_disconnected)
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index f66cdbc35..102f3507d 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -17,7 +17,7 @@ struct msm_dp {
struct drm_bridge *bridge;
struct drm_connector *connector;
struct drm_bridge *next_bridge;
- bool is_connected;
+ bool link_ready;
bool audio_enabled;
bool power_on;
unsigned int connector_type;
@@ -36,11 +36,11 @@ struct msm_dp {
int dp_display_set_plugged_cb(struct msm_dp *dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev);
int dp_display_get_modes(struct msm_dp *dp_display);
-int dp_display_request_irq(struct msm_dp *dp_display);
bool dp_display_check_video_test(struct msm_dp *dp_display);
int dp_display_get_test_bpp(struct msm_dp *dp_display);
void dp_display_signal_audio_start(struct msm_dp *dp_display);
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
void dp_display_set_psr(struct msm_dp *dp, bool enter);
+void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp);
#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index e3bdd7dd4..46e688903 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -24,10 +24,10 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
dp = to_dp_bridge(bridge)->dp_display;
- drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
- (dp->is_connected) ? "true" : "false");
+ drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
+ (dp->link_ready) ? "true" : "false");
- return (dp->is_connected) ? connector_status_connected :
+ return (dp->link_ready) ? connector_status_connected :
connector_status_disconnected;
}
@@ -40,8 +40,8 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge,
dp = to_dp_bridge(bridge)->dp_display;
- drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
- (dp->is_connected) ? "true" : "false");
+ drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
+ (dp->link_ready) ? "true" : "false");
/*
* There is no protection in the DRM framework to check if the display
@@ -55,7 +55,7 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge,
* After that this piece of code can be removed.
*/
if (bridge->ops & DRM_BRIDGE_OP_HPD)
- return (dp->is_connected) ? 0 : -ENOTCONN;
+ return (dp->link_ready) ? 0 : -ENOTCONN;
return 0;
}
@@ -78,7 +78,7 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
dp = to_dp_bridge(bridge)->dp_display;
/* pluggable case assumes EDID is read when HPD */
- if (dp->is_connected) {
+ if (dp->link_ready) {
rc = dp_display_get_modes(dp);
if (rc <= 0) {
DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
@@ -90,6 +90,13 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
return rc;
}
+static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+
+ dp_display_debugfs_init(dp, root, false);
+}
+
static const struct drm_bridge_funcs dp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -105,6 +112,7 @@ static const struct drm_bridge_funcs dp_bridge_ops = {
.hpd_enable = dp_bridge_hpd_enable,
.hpd_disable = dp_bridge_hpd_disable,
.hpd_notify = dp_bridge_hpd_notify,
+ .debugfs_init = dp_bridge_debugfs_init,
};
static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
@@ -260,6 +268,13 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
+static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+
+ dp_display_debugfs_init(dp, root, true);
+}
+
static const struct drm_bridge_funcs edp_bridge_ops = {
.atomic_enable = edp_bridge_atomic_enable,
.atomic_disable = edp_bridge_atomic_disable,
@@ -270,6 +285,7 @@ static const struct drm_bridge_funcs edp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_check = edp_bridge_atomic_check,
+ .debugfs_init = edp_bridge_debugfs_init,
};
int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index 5cb84ca40..c4843dd69 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -152,45 +152,17 @@ int dp_power_client_init(struct dp_power *dp_power)
power = container_of(dp_power, struct dp_power_private, dp_power);
- pm_runtime_enable(power->dev);
-
return dp_power_clk_init(power);
}
-void dp_power_client_deinit(struct dp_power *dp_power)
-{
- struct dp_power_private *power;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- pm_runtime_disable(power->dev);
-}
-
int dp_power_init(struct dp_power *dp_power)
{
- int rc = 0;
- struct dp_power_private *power = NULL;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- pm_runtime_get_sync(power->dev);
-
- rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
- if (rc)
- pm_runtime_put_sync(power->dev);
-
- return rc;
+ return dp_power_clk_enable(dp_power, DP_CORE_PM, true);
}
int dp_power_deinit(struct dp_power *dp_power)
{
- struct dp_power_private *power;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- dp_power_clk_enable(dp_power, DP_CORE_PM, false);
- pm_runtime_put_sync(power->dev);
- return 0;
+ return dp_power_clk_enable(dp_power, DP_CORE_PM, false);
}
struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
index a3dec2007..55ada51ed 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.h
+++ b/drivers/gpu/drm/msm/dp/dp_power.h
@@ -81,17 +81,6 @@ int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type,
int dp_power_client_init(struct dp_power *power);
/**
- * dp_power_clinet_deinit() - de-initialize clock and regulator modules
- *
- * @power: instance of power module
- * return: 0 for success, error for failure.
- *
- * This API will de-initialize the DisplayPort's clocks and regulator
- * modules.
- */
-void dp_power_client_deinit(struct dp_power *power);
-
-/**
* dp_power_get() - configure and get the DisplayPort power module data
*
* @parser: instance of parser module
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 1f98ff74c..10ba7d153 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -190,6 +190,21 @@ static const struct msm_dsi_config sm8550_dsi_cfg = {
},
};
+static const struct regulator_bulk_data sm8650_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 16600 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config sm8650_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sm8650_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sm8650_dsi_regulators),
+ .bus_clk_names = dsi_v2_4_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
+ .io_start = {
+ { 0xae94000, 0xae96000 },
+ },
+};
+
static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
{ .supply = "refgen" },
@@ -281,6 +296,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
&sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0,
+ &sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 43f0dd74e..4c9b4b376 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -28,6 +28,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
+#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index e49ebd9f6..24a347fe2 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -587,6 +587,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_5nm_8450_cfgs },
{ .compatible = "qcom,sm8550-dsi-phy-4nm",
.data = &dsi_phy_4nm_8550_cfgs },
+ { .compatible = "qcom,sm8650-dsi-phy-4nm",
+ .data = &dsi_phy_4nm_8650_cfgs },
#endif
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 8b640d174..e4275d3ad 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -62,6 +62,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 89a6344bc..82d015aa2 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -1121,6 +1121,10 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
+static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 98000 },
+};
+
static const struct regulator_bulk_data dsi_phy_7nm_97800uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 97800 },
};
@@ -1281,3 +1285,26 @@ const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = {
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V5_2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_98000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V5_2,
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index de182c004..7aa500d24 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -249,7 +249,6 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
i2c->owner = THIS_MODULE;
- i2c->class = I2C_CLASS_DDC;
snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
i2c->dev.parent = &hdmi->pdev->dev;
i2c->algo = &msm_hdmi_i2c_algorithm;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 04d304eed..4494f6d1c 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -304,36 +304,21 @@ int msm_debugfs_late_init(struct drm_device *dev)
return ret;
}
-void msm_debugfs_init(struct drm_minor *minor)
+static void msm_debugfs_gpu_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct dentry *gpu_devfreq;
- drm_debugfs_create_files(msm_debugfs_list,
- ARRAY_SIZE(msm_debugfs_list),
- minor->debugfs_root, minor);
-
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms) {
- drm_debugfs_create_files(msm_kms_debugfs_list,
- ARRAY_SIZE(msm_kms_debugfs_list),
- minor->debugfs_root, minor);
- debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
- dev, &msm_kms_fops);
- }
-
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);
debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
&priv->disable_err_irq);
- debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
- dev, &shrink_fops);
-
gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
@@ -344,6 +329,30 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_u32("downdifferential",0600, gpu_devfreq,
&priv->gpu_devfreq_config.downdifferential);
+}
+
+void msm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (priv->gpu_pdev)
+ msm_debugfs_gpu_init(minor);
+
+ if (priv->kms) {
+ drm_debugfs_create_files(msm_kms_debugfs_list,
+ ARRAY_SIZE(msm_kms_debugfs_list),
+ minor->debugfs_root, minor);
+ debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
+ dev, &msm_kms_fops);
+ }
+
+ debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
+ dev, &shrink_fops);
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 3f217b578..50b65ffc2 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -37,9 +37,10 @@
* - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
* - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
* - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
+ * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 10
+#define MSM_VERSION_MINOR 12
#define MSM_VERSION_PATCHLEVEL 0
static void msm_deinit_vram(struct drm_device *ddev);
@@ -544,6 +545,85 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
return msm_gem_set_iova(obj, ctx->aspace, iova);
}
+static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
+ __user void *metadata,
+ u32 metadata_size)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *buf;
+ int ret;
+
+ /* Impose a moderate upper bound on metadata size: */
+ if (metadata_size > 128) {
+ return -EOVERFLOW;
+ }
+
+ /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */
+ buf = memdup_user(metadata, metadata_size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = msm_gem_lock_interruptible(obj);
+ if (ret)
+ goto out;
+
+ msm_obj->metadata =
+ krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
+ msm_obj->metadata_size = metadata_size;
+ memcpy(msm_obj->metadata, buf, metadata_size);
+
+ msm_gem_unlock(obj);
+
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj,
+ __user void *metadata,
+ u32 *metadata_size)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *buf;
+ int ret, len;
+
+ if (!metadata) {
+ /*
+ * Querying the size is inherently racey, but
+ * EXT_external_objects expects the app to confirm
+ * via device and driver UUIDs that the exporter and
+ * importer versions match. All we can do from the
+ * kernel side is check the length under obj lock
+ * when userspace tries to retrieve the metadata
+ */
+ *metadata_size = msm_obj->metadata_size;
+ return 0;
+ }
+
+ ret = msm_gem_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ /* Avoid copy_to_user() under gem obj lock: */
+ len = msm_obj->metadata_size;
+ buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL);
+
+ msm_gem_unlock(obj);
+
+ if (*metadata_size < len) {
+ ret = -ETOOSMALL;
+ } else if (copy_to_user(metadata, buf, len)) {
+ ret = -EFAULT;
+ } else {
+ *metadata_size = len;
+ }
+
+ kfree(buf);
+
+ return 0;
+}
+
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -566,6 +646,8 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
break;
case MSM_INFO_SET_NAME:
case MSM_INFO_GET_NAME:
+ case MSM_INFO_SET_METADATA:
+ case MSM_INFO_GET_METADATA:
break;
default:
return -EINVAL;
@@ -618,7 +700,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
break;
case MSM_INFO_GET_NAME:
if (args->value && (args->len < strlen(msm_obj->name))) {
- ret = -EINVAL;
+ ret = -ETOOSMALL;
break;
}
args->len = strlen(msm_obj->name);
@@ -628,6 +710,14 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = -EFAULT;
}
break;
+ case MSM_INFO_SET_METADATA:
+ ret = msm_ioctl_gem_info_set_metadata(
+ obj, u64_to_user_ptr(args->value), args->len);
+ break;
+ case MSM_INFO_GET_METADATA:
+ ret = msm_ioctl_gem_info_get_metadata(
+ obj, u64_to_user_ptr(args->value), &args->len);
+ break;
}
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index cd5bf658d..16a7cbc0b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -78,12 +78,10 @@ enum msm_dsi_controller {
* enum msm_event_wait - type of HW events to wait for
* @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
* @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
- * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
*/
enum msm_event_wait {
MSM_ENC_COMMIT_DONE = 0,
MSM_ENC_TX_COMPLETE,
- MSM_ENC_VBLANK,
};
/**
@@ -92,12 +90,14 @@ enum msm_event_wait {
* @num_intf: number of interfaces the panel is mounted on
* @num_dspp: number of dspp blocks used
* @num_dsc: number of Display Stream Compression (DSC) blocks used
+ * @needs_cdm: indicates whether cdm block is needed for this display topology
*/
struct msm_display_topology {
u32 num_lm;
u32 num_intf;
u32 num_dspp;
u32 num_dsc;
+ bool needs_cdm;
};
/* Commit/Event thread specific structure */
@@ -386,10 +386,8 @@ int __init msm_dp_register(void);
void __exit msm_dp_unregister(void);
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
-void msm_dp_irq_postinstall(struct msm_dp *dp_display);
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
-void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
#else
@@ -407,19 +405,10 @@ static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
return -EINVAL;
}
-static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
-{
-}
-
static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
{
}
-static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
- struct drm_minor *minor)
-{
-}
-
static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
return false;
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index e3f61c39d..80166f702 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
for (i = 0; i < n; i++) {
ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
- drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
+ drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
return ret;
@@ -176,7 +176,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct msm_format *format;
int ret, i, n;
- drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
+ drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n",
mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
@@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
refcount_set(&msm_fb->dirtyfb, 1);
- drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
+ drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb);
return fb;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index db1e748da..175ee4ab8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -226,9 +226,9 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(msm_obj->madv > madv)) {
- DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
- msm_obj->madv, madv);
+ if (msm_obj->madv > madv) {
+ DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
@@ -1058,6 +1058,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
drm_gem_object_release(obj);
+ kfree(msm_obj->metadata);
kfree(msm_obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8ddef5443..8d414b072 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -9,6 +9,7 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include "drm/drm_exec.h"
#include "drm/gpu_scheduler.h"
#include "msm_drv.h"
@@ -108,6 +109,10 @@ struct msm_gem_object {
char name[32]; /* Identifier to print for the debugfs files */
+ /* userspace metadata backchannel */
+ void *metadata;
+ u32 metadata_size;
+
/**
* pin_count: Number of times the pages are pinned
*
@@ -254,7 +259,7 @@ struct msm_gem_submit {
struct msm_gpu *gpu;
struct msm_gem_address_space *aspace;
struct list_head node; /* node in ring submit list */
- struct ww_acquire_ctx ticket;
+ struct drm_exec exec;
uint32_t seqno; /* Sequence number of the submit on the ring */
/* Hw fence, which is created when the scheduler executes the job, and
@@ -270,9 +275,9 @@ struct msm_gem_submit {
int fence_id; /* key into queue->fence_idr */
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
- bool fault_dumped; /* Limit devcoredump dumping to one per submit */
- bool valid; /* true if no cmdstream patching needed */
- bool in_rb; /* "sudo" mode, copy cmds into RB */
+ bool bos_pinned : 1;
+ bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */
+ bool in_rb : 1; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
@@ -287,10 +292,6 @@ struct msm_gem_submit {
struct drm_msm_gem_submit_reloc *relocs;
} *cmd; /* array of size nr_cmds */
struct {
-/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_PINNED 0x2000 /* obj (pages) is pinned and on active list */
uint32_t flags;
union {
struct drm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 5a7d48c02..07ca4ddfe 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -75,7 +75,7 @@ static bool
wait_for_idle(struct drm_gem_object *obj)
{
enum dma_resv_usage usage = dma_resv_usage_rw(true);
- return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
+ return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
}
static bool
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 99744de6c..fba781931 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -17,6 +17,12 @@
#include "msm_gem.h"
#include "msm_gpu_trace.h"
+/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
+ * error msgs for debugging, but we don't spam dmesg by default
+ */
+#define SUBMIT_ERROR(submit, fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
+
/*
* Cmdstream submission:
*/
@@ -37,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (sz > SIZE_MAX)
return ERR_PTR(-ENOMEM);
- submit = kzalloc(sz, GFP_KERNEL);
+ submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
if (!submit)
return ERR_PTR(-ENOMEM);
@@ -48,7 +54,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- ret = drm_sched_job_init(&submit->base, queue->entity, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue);
if (ret) {
kfree(submit->hw_fence);
kfree(submit);
@@ -136,7 +142,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
- DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
+ SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
i = 0;
goto out;
@@ -144,8 +150,6 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
submit->bos[i].handle = submit_bo.handle;
submit->bos[i].flags = submit_bo.flags;
- /* in validate_objects() we figure out if this is true: */
- submit->bos[i].iova = submit_bo.presumed;
}
spin_lock(&file->table_lock);
@@ -158,7 +162,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
+ SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -202,13 +206,13 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
- DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
+ SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
return -EINVAL;
}
if (submit_cmd.size % 4) {
- DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
- submit_cmd.size);
+ SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
ret = -EINVAL;
goto out;
}
@@ -228,7 +232,7 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
ret = -ENOMEM;
goto out;
}
- submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
+ submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
if (!submit->cmd[i].relocs) {
ret = -ENOMEM;
goto out;
@@ -244,101 +248,30 @@ out:
return ret;
}
-/* Unwind bo state, according to cleanup_flags. In the success case, only
- * the lock is dropped at the end of the submit (and active/pin ref is dropped
- * later when the submit is retired).
- */
-static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
- unsigned cleanup_flags)
-{
- struct drm_gem_object *obj = submit->bos[i].obj;
- unsigned flags = submit->bos[i].flags & cleanup_flags;
-
- /*
- * Clear flags bit before dropping lock, so that the msm_job_run()
- * path isn't racing with submit_cleanup() (ie. the read/modify/
- * write is protected by the obj lock in all paths)
- */
- submit->bos[i].flags &= ~cleanup_flags;
-
- if (flags & BO_PINNED)
- msm_gem_unpin_locked(obj);
-
- if (flags & BO_LOCKED)
- dma_resv_unlock(obj->resv);
-}
-
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
-{
- unsigned cleanup_flags = BO_PINNED | BO_LOCKED;
- submit_cleanup_bo(submit, i, cleanup_flags);
-
- if (!(submit->bos[i].flags & BO_VALID))
- submit->bos[i].iova = 0;
-}
-
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
- int contended, slow_locked = -1, i, ret = 0;
-
-retry:
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = submit->bos[i].obj;
-
- if (slow_locked == i)
- slow_locked = -1;
+ int ret;
- contended = i;
+ drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos);
- if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = dma_resv_lock_interruptible(obj->resv,
- &submit->ticket);
+ drm_exec_until_all_locked (&submit->exec) {
+ for (unsigned i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+ ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
+ drm_exec_retry_on_contention(&submit->exec);
if (ret)
- goto fail;
- submit->bos[i].flags |= BO_LOCKED;
+ goto error;
}
}
- ww_acquire_done(&submit->ticket);
-
return 0;
-fail:
- if (ret == -EALREADY) {
- DRM_ERROR("handle %u at index %u already on submit list\n",
- submit->bos[i].handle, i);
- ret = -EINVAL;
- }
-
- for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i);
-
- if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked);
-
- if (ret == -EDEADLK) {
- struct drm_gem_object *obj = submit->bos[contended].obj;
- /* we lost out in a seqno race, lock and retry.. */
- ret = dma_resv_lock_slow_interruptible(obj->resv,
- &submit->ticket);
- if (!ret) {
- submit->bos[contended].flags |= BO_LOCKED;
- slow_locked = contended;
- goto retry;
- }
-
- /* Not expecting -EALREADY here, if the bo was already
- * locked, we should have gotten -EALREADY already from
- * the dma_resv_lock_interruptable() call.
- */
- WARN_ON_ONCE(ret == -EALREADY);
- }
-
+error:
return ret;
}
-static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
+static int submit_fence_sync(struct msm_gem_submit *submit)
{
int i, ret = 0;
@@ -346,22 +279,6 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
struct drm_gem_object *obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
- /* NOTE: _reserve_shared() must happen before
- * _add_shared_fence(), which makes this a slightly
- * strange place to call it. OTOH this is a
- * convenient can-fail point to hook it in.
- */
- ret = dma_resv_reserve_fences(obj->resv, 1);
- if (ret)
- return ret;
-
- /* If userspace has determined that explicit fencing is
- * used, it can disable implicit sync on the entire
- * submit:
- */
- if (no_implicit)
- continue;
-
/* Otherwise userspace can ask for implicit sync to be
* disabled on specific buffers. This is useful for internal
* usermode driver managed buffers, suballocation, etc.
@@ -384,8 +301,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
struct msm_drm_private *priv = submit->dev->dev_private;
int i, ret = 0;
- submit->valid = true;
-
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
struct msm_gem_vma *vma;
@@ -401,14 +316,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
if (ret)
break;
- if (vma->iova == submit->bos[i].iova) {
- submit->bos[i].flags |= BO_VALID;
- } else {
- submit->bos[i].iova = vma->iova;
- /* iova changed, so address in cmdstream is not valid: */
- submit->bos[i].flags &= ~BO_VALID;
- submit->valid = false;
- }
+ submit->bos[i].iova = vma->iova;
}
/*
@@ -421,13 +329,28 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
mutex_lock(&priv->lru.lock);
for (i = 0; i < submit->nr_bos; i++) {
msm_gem_pin_obj_locked(submit->bos[i].obj);
- submit->bos[i].flags |= BO_PINNED;
}
mutex_unlock(&priv->lru.lock);
+ submit->bos_pinned = true;
+
return ret;
}
+static void submit_unpin_objects(struct msm_gem_submit *submit)
+{
+ if (!submit->bos_pinned)
+ return;
+
+ for (int i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = submit->bos[i].obj;
+
+ msm_gem_unpin_locked(obj);
+ }
+
+ submit->bos_pinned = false;
+}
+
static void submit_attach_object_fences(struct msm_gem_submit *submit)
{
int i;
@@ -445,11 +368,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct drm_gem_object **obj, uint64_t *iova, bool *valid)
+ struct drm_gem_object **obj, uint64_t *iova)
{
if (idx >= submit->nr_bos) {
- DRM_ERROR("invalid buffer index: %u (out of %u)\n",
- idx, submit->nr_bos);
+ SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
return -EINVAL;
}
@@ -457,8 +380,6 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
*obj = submit->bos[idx].obj;
if (iova)
*iova = submit->bos[idx].iova;
- if (valid)
- *valid = !!(submit->bos[idx].flags & BO_VALID);
return 0;
}
@@ -471,11 +392,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint32_t *ptr;
int ret = 0;
- if (!nr_relocs)
- return 0;
-
if (offset % 4) {
- DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
+ SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
}
@@ -494,11 +412,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
uint32_t off;
uint64_t iova;
- bool valid;
if (submit_reloc.submit_offset % 4) {
- DRM_ERROR("non-aligned reloc offset: %u\n",
- submit_reloc.submit_offset);
+ SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
ret = -EINVAL;
goto out;
}
@@ -508,18 +425,15 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
- DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+ SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
ret = -EINVAL;
goto out;
}
- ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
if (ret)
goto out;
- if (valid)
- continue;
-
iova += submit_reloc.reloc_offset;
if (submit_reloc.shift < 0)
@@ -544,18 +458,14 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
- unsigned cleanup_flags = BO_LOCKED;
- unsigned i;
-
- if (error)
- cleanup_flags |= BO_PINNED;
-
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = submit->bos[i].obj;
- submit_cleanup_bo(submit, i, cleanup_flags);
- if (error)
- drm_gem_object_put(obj);
+ if (error) {
+ submit_unpin_objects(submit);
+ /* job wasn't enqueued to scheduler, so early retirement: */
+ msm_submit_retire(submit);
}
+
+ if (submit->exec.objects)
+ drm_exec_fini(&submit->exec);
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -749,7 +659,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
- bool has_ww_ticket = false;
unsigned i;
int ret;
@@ -855,15 +764,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
/* copy_*_user while holding a ww ticket upsets lockdep */
- ww_acquire_init(&submit->ticket, &reservation_ww_class);
- has_ww_ticket = true;
ret = submit_lock_objects(submit);
if (ret)
goto out;
- ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
- if (ret)
- goto out;
+ if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto out;
+ }
ret = submit_pin_objects(submit);
if (ret)
@@ -873,32 +782,27 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
uint64_t iova;
- ret = submit_bo(submit, submit->cmd[i].idx,
- &obj, &iova, NULL);
+ ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova);
if (ret)
goto out;
if (!submit->cmd[i].size ||
((submit->cmd[i].size + submit->cmd[i].offset) >
obj->size / 4)) {
- DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
+ SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
ret = -EINVAL;
goto out;
}
submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
- if (submit->valid)
+ if (likely(!submit->cmd[i].nr_relocs))
continue;
if (!gpu->allow_relocs) {
- if (submit->cmd[i].nr_relocs) {
- DRM_ERROR("relocs not allowed\n");
- ret = -EINVAL;
- goto out;
- }
-
- continue;
+ SUBMIT_ERROR(submit, "relocs not allowed\n");
+ ret = -EINVAL;
+ goto out;
}
ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
@@ -974,6 +878,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
+ if (ret)
+ goto out;
+
submit_attach_object_fences(submit);
/* The scheduler owns a ref now: */
@@ -993,8 +900,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
submit_cleanup(submit, !!ret);
- if (has_ww_ticket)
- ww_acquire_fini(&submit->ticket);
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5c10b559a..655002b21 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -292,8 +292,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
/* Set the active crash state to be dumped on failure */
gpu->crashstate = state;
- /* FIXME: Release the crashstate if this errors out? */
- dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
@@ -366,29 +365,31 @@ static void recover_worker(struct kthread_work *work)
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
- if (submit) {
- /* Increment the fault counts */
- submit->queue->faults++;
- if (submit->aspace)
- submit->aspace->faults++;
- get_comm_cmdline(submit, &comm, &cmd);
+ /*
+ * If the submit retired while we were waiting for the worker to run,
+ * or waiting to acquire the gpu lock, then nothing more to do.
+ */
+ if (!submit)
+ goto out_unlock;
- if (comm && cmd) {
- DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, comm, cmd);
+ /* Increment the fault counts */
+ submit->queue->faults++;
+ if (submit->aspace)
+ submit->aspace->faults++;
- msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", comm, cmd);
- } else {
- msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
+ get_comm_cmdline(submit, &comm, &cmd);
+
+ if (comm && cmd) {
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
+ gpu->name, comm, cmd);
+
+ msm_rd_dump_submit(priv->hangrd, submit,
+ "offending task: %s (%s)", comm, cmd);
} else {
- /*
- * We couldn't attribute this fault to any particular context,
- * so increment the global fault count instead.
- */
- gpu->global_faults++;
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
+
+ msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
/* Record the crash state */
@@ -441,6 +442,7 @@ static void recover_worker(struct kthread_work *work)
pm_runtime_put(&gpu->pdev->dev);
+out_unlock:
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 4252e3839..2bfcb222e 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
* DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
* cases, so we don't use it (no need for kernel generated jobs).
*/
-#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
/**
* struct msm_file_private - per-drm_file context
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 84c21ec2c..af6a6fcb1 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
@@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
- drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+ drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 29bb38f0b..35423d10a 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -28,6 +28,8 @@
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
+#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
+
struct msm_mdss {
struct device *dev;
@@ -40,8 +42,9 @@ struct msm_mdss {
struct irq_domain *domain;
} irq_controller;
const struct msm_mdss_data *mdss_data;
- struct icc_path *path[2];
- u32 num_paths;
+ struct icc_path *mdp_path[2];
+ u32 num_mdp_paths;
+ struct icc_path *reg_bus_path;
};
static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
@@ -49,38 +52,26 @@ static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
{
struct icc_path *path0;
struct icc_path *path1;
+ struct icc_path *reg_bus_path;
- path0 = of_icc_get(dev, "mdp0-mem");
+ path0 = devm_of_icc_get(dev, "mdp0-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
- msm_mdss->path[0] = path0;
- msm_mdss->num_paths = 1;
+ msm_mdss->mdp_path[0] = path0;
+ msm_mdss->num_mdp_paths = 1;
- path1 = of_icc_get(dev, "mdp1-mem");
+ path1 = devm_of_icc_get(dev, "mdp1-mem");
if (!IS_ERR_OR_NULL(path1)) {
- msm_mdss->path[1] = path1;
- msm_mdss->num_paths++;
+ msm_mdss->mdp_path[1] = path1;
+ msm_mdss->num_mdp_paths++;
}
- return 0;
-}
-
-static void msm_mdss_put_icc_path(void *data)
-{
- struct msm_mdss *msm_mdss = data;
- int i;
-
- for (i = 0; i < msm_mdss->num_paths; i++)
- icc_put(msm_mdss->path[i]);
-}
+ reg_bus_path = of_icc_get(dev, "cpu-cfg");
+ if (!IS_ERR_OR_NULL(reg_bus_path))
+ msm_mdss->reg_bus_path = reg_bus_path;
-static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
-{
- int i;
-
- for (i = 0; i < msm_mdss->num_paths; i++)
- icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
+ return 0;
}
static void msm_mdss_irq(struct irq_desc *desc)
@@ -236,14 +227,22 @@ const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
- int ret;
+ int ret, i;
/*
* Several components have AXI clocks that can only be turned on if
* the interconnect is enabled (non-zero bandwidth). Let's make sure
* that the interconnects are at least at a minimum amount.
*/
- msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+ for (i = 0; i < msm_mdss->num_mdp_paths; i++)
+ icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW));
+
+ if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw)
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ msm_mdss->mdss_data->reg_bus_bw);
+ else
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ DEFAULT_REG_BW);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
@@ -295,8 +294,15 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
+ int i;
+
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
- msm_mdss_icc_request_bw(msm_mdss, 0);
+
+ for (i = 0; i < msm_mdss->num_mdp_paths; i++)
+ icc_set_bw(msm_mdss->mdp_path[i], 0, 0);
+
+ if (msm_mdss->reg_bus_path)
+ icc_set_bw(msm_mdss->reg_bus_path, 0, 0);
return 0;
}
@@ -384,6 +390,8 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (!msm_mdss)
return ERR_PTR(-ENOMEM);
+ msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev);
+
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
@@ -393,9 +401,6 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
- if (ret)
- return ERR_PTR(ret);
if (is_mdp5)
ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
@@ -477,8 +482,6 @@ static int mdss_probe(struct platform_device *pdev)
if (IS_ERR(mdss))
return PTR_ERR(mdss);
- mdss->mdss_data = of_device_get_match_data(&pdev->dev);
-
platform_set_drvdata(pdev, mdss);
/*
@@ -510,11 +513,13 @@ static const struct msm_mdss_data msm8998_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data qcm2290_data = {
/* no UBWC */
.highest_bank_bit = 0x2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc7180_data = {
@@ -522,6 +527,7 @@ static const struct msm_mdss_data sc7180_data = {
.ubwc_dec_version = UBWC_2_0,
.ubwc_static = 0x1e,
.highest_bank_bit = 0x3,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc7280_data = {
@@ -531,6 +537,7 @@ static const struct msm_mdss_data sc7280_data = {
.ubwc_static = 1,
.highest_bank_bit = 1,
.macrotile_mode = 1,
+ .reg_bus_bw = 74000,
};
static const struct msm_mdss_data sc8180x_data = {
@@ -538,6 +545,7 @@ static const struct msm_mdss_data sc8180x_data = {
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc8280xp_data = {
@@ -547,12 +555,21 @@ static const struct msm_mdss_data sc8280xp_data = {
.ubwc_static = 1,
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
+};
+
+static const struct msm_mdss_data sdm670_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sdm845_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6350_data = {
@@ -561,12 +578,14 @@ static const struct msm_mdss_data sm6350_data = {
.ubwc_swizzle = 6,
.ubwc_static = 0x1e,
.highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm8150_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6115_data = {
@@ -575,6 +594,7 @@ static const struct msm_mdss_data sm6115_data = {
.ubwc_swizzle = 7,
.ubwc_static = 0x11f,
.highest_bank_bit = 0x1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6125_data = {
@@ -592,6 +612,18 @@ static const struct msm_mdss_data sm8250_data = {
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
+};
+
+static const struct msm_mdss_data sm8350_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = 6,
+ .ubwc_static = 1,
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ .highest_bank_bit = 3,
+ .macrotile_mode = 1,
+ .reg_bus_bw = 74000,
};
static const struct msm_mdss_data sm8550_data = {
@@ -602,11 +634,13 @@ static const struct msm_mdss_data sm8550_data = {
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 57000,
};
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
+ { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
{ .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
@@ -618,9 +652,10 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
+ { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },
+ { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
+ { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data},
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
index 02bbab42a..3afef4b17 100644
--- a/drivers/gpu/drm/msm/msm_mdss.h
+++ b/drivers/gpu/drm/msm/msm_mdss.h
@@ -14,6 +14,7 @@ struct msm_mdss_data {
u32 ubwc_static;
u32 highest_bank_bit;
u32 macrotile_mode;
+ u32 reg_bus_bw;
};
#define UBWC_1_0 0x10000000
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 5adc51f7a..ca44fd291 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -270,6 +270,9 @@ int msm_rd_debugfs_init(struct drm_minor *minor)
struct msm_rd_state *rd;
int ret;
+ if (!priv->gpu_pdev)
+ return 0;
+
/* only create on first minor: */
if (priv->rd)
return 0;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index a7e152f65..9d6655f96 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -27,9 +27,10 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
struct drm_gem_object *obj = submit->bos[i].obj;
msm_gem_unpin_active(obj);
- submit->bos[i].flags &= ~BO_PINNED;
}
+ submit->bos_pinned = false;
+
mutex_unlock(&priv->lru.lock);
/* TODO move submit path over to using a per-ring lock.. */
@@ -97,7 +98,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
/* currently managing hangcheck ourselves: */
sched_timeout = MAX_SCHEDULE_TIMEOUT;
- ret = drm_sched_init(&ring->sched, &msm_sched_ops,
+ ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
num_hw_submissions, 0, sched_timeout,
NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 625c1bfc4..b483ef482 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -11,9 +11,10 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
@@ -346,18 +347,13 @@ MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
static int mxsfb_probe(struct platform_device *pdev)
{
struct drm_device *drm;
- const struct of_device_id *of_id =
- of_match_device(mxsfb_dt_ids, &pdev->dev);
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- ret = mxsfb_load(drm, of_id->data);
+ ret = mxsfb_load(drm, device_get_match_data(&pdev->dev));
if (ret)
goto err_free;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 1e6aaf95f..ceef470c9 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM
help
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
+
+config DRM_NOUVEAU_GSP_DEFAULT
+ bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
+ depends on DRM_NOUVEAU
+ default n
+ help
+ Say Y here if you want to use the GSP codepaths by default on
+ Turing and Ampere GPUs.
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index d093549f6..8d37a694b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -38,7 +38,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -945,7 +947,8 @@ nv50_msto_prepare(struct drm_atomic_state *state,
if (ret == 0) {
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index,
payload->vc_start_slot, payload->time_slots,
- payload->pbn, payload->time_slots * mst_state->pbn_div);
+ payload->pbn,
+ payload->time_slots * dfixed_trunc(mst_state->pbn_div));
} else {
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
}
@@ -989,7 +992,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- if (!mst_state->pbn_div) {
+ if (!mst_state->pbn_div.full) {
struct nouveau_encoder *outp = mstc->mstm->outp;
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2edd7bb13..80f74ee0f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -127,21 +127,16 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
- /* When a client exits without waiting for it's queued up jobs to
- * finish it might happen that we fault the channel. This is due to
- * drm_file_free() calling drm_gem_release() before the postclose()
- * callback. Hence, we can't tear down this scheduler entity before
- * uvmm mappings are unmapped. Currently, we can't detect this case.
- *
- * However, this should be rare and harmless, since the channel isn't
- * needed anymore.
- */
- nouveau_sched_entity_fini(&chan->sched_entity);
+ /* Cancel all jobs from the entity's queue. */
+ if (chan->sched)
+ drm_sched_entity_fini(&chan->sched->entity);
- /* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
+ if (chan->sched)
+ nouveau_sched_destroy(&chan->sched);
+
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -204,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
+ struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -268,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
break;
}
+ case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ break;
+ case NOUVEAU_GETPARAM_VRAM_USED: {
+ struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+ getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
+ break;
+ }
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
@@ -344,10 +348,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
- drm->sched_wq);
- if (ret)
- goto done;
+ /* If we're not using the VM_BIND uAPI, we don't need a scheduler.
+ *
+ * The client lock is already acquired by nouveau_abi16_get().
+ */
+ if (nouveau_cli_uvmm(cli)) {
+ ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
+ chan->chan->dma.ib_max);
+ if (ret)
+ goto done;
+ }
init->channel = chan->chan->chid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 9f538486c..11c8c4a80 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
- struct nouveau_sched_entity sched_entity;
+ struct nouveau_sched *sched;
};
struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 479effcf6..79cfab53f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
+#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
- bios->legacy.i2c_indices.crt, 1, 1);
+ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
- all_heads, 0);
+ all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
- all_heads, 1);
+ all_heads, DCB_OUTPUT_B);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 254d6c9ef..5d8ee1729 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,10 +148,17 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
*/
- if (bo->base.dev)
+ if (bo->base.dev) {
+ /* Gem objects not being shared with other VMs get their
+ * dma_resv from a root GEM object.
+ */
+ if (nvbo->no_share)
+ drm_gem_object_put(nvbo->r_obj);
+
drm_gem_object_release(&bo->base);
- else
+ } else {
dma_resv_fini(&bo->base._resv);
+ }
kfree(nvbo);
}
@@ -1055,17 +1062,18 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct drm_gem_object *obj = &bo->base;
struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
-
if (new_reg->mem_type == TTM_PL_TT) {
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
if (ret)
return ret;
}
+ drm_gpuvm_bo_gem_evict(obj, evict);
nouveau_bo_move_ntfy(bo, new_reg);
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
@@ -1130,6 +1138,7 @@ out:
out_ntfy:
if (ret) {
nouveau_bo_move_ntfy(bo, bo->resource);
+ drm_gpuvm_bo_gem_evict(obj, !evict);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 07f671cf8..70c551921 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -26,6 +26,11 @@ struct nouveau_bo {
struct list_head entry;
int pbbo_index;
bool validate_mapped;
+
+ /* Root GEM object we derive the dma_resv of in case this BO is not
+ * shared between VMs.
+ */
+ struct drm_gem_object *r_obj;
bool no_share;
/* GPU address space is independent of CPU word size */
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 75545da9d..a947e1d5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -190,6 +190,8 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
static void
nouveau_cli_fini(struct nouveau_cli *cli)
{
+ struct nouveau_uvmm *uvmm = nouveau_cli_uvmm_locked(cli);
+
/* All our channels are dead now, which means all the fences they
* own are signalled, and all callback functions have been called.
*
@@ -199,8 +201,10 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
- nouveau_uvmm_fini(&cli->uvmm);
- nouveau_sched_entity_fini(&cli->sched_entity);
+ if (cli->sched)
+ nouveau_sched_destroy(&cli->sched);
+ if (uvmm)
+ nouveau_uvmm_fini(uvmm);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
@@ -307,8 +311,17 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
cli->mem = &mems[ret];
- ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
- drm->sched_wq);
+ /* Don't pass in the (shared) sched_wq in order to let
+ * nouveau_sched_create() create a dedicated one for VM_BIND jobs.
+ *
+ * This is required to ensure that for VM_BIND jobs free_job() work and
+ * run_job() work can always run concurrently and hence, free_job() work
+ * can never stall run_job() work. For EXEC jobs we don't have this
+ * requirement, since EXEC job's free_job() does not require to take any
+ * locks which indirectly or directly are held for allocations
+ * elsewhere.
+ */
+ ret = nouveau_sched_create(&cli->sched, drm, NULL, 1);
if (ret)
goto done;
@@ -579,13 +592,16 @@ nouveau_drm_device_init(struct drm_device *dev)
nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent;
- ret = nouveau_sched_init(drm);
- if (ret)
+ drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
+ WQ_MAX_ACTIVE);
+ if (!drm->sched_wq) {
+ ret = -ENOMEM;
goto fail_alloc;
+ }
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
- goto fail_sched;
+ goto fail_wq;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
@@ -655,8 +671,8 @@ fail_ttm:
nouveau_cli_fini(&drm->client);
fail_master:
nouveau_cli_fini(&drm->master);
-fail_sched:
- nouveau_sched_fini(drm);
+fail_wq:
+ destroy_workqueue(drm->sched_wq);
fail_alloc:
nvif_parent_dtor(&drm->parent);
kfree(drm);
@@ -710,9 +726,7 @@ nouveau_drm_device_fini(struct drm_device *dev)
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
-
- nouveau_sched_fini(drm);
-
+ destroy_workqueue(drm->sched_wq);
nvif_parent_dtor(&drm->parent);
mutex_destroy(&drm->clients_lock);
kfree(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e73a233c6..e239c6bf4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -93,9 +93,12 @@ struct nouveau_cli {
struct nvif_mmu mmu;
struct nouveau_vmm vmm;
struct nouveau_vmm svm;
- struct nouveau_uvmm uvmm;
+ struct {
+ struct nouveau_uvmm *ptr;
+ bool disabled;
+ } uvmm;
- struct nouveau_sched_entity sched_entity;
+ struct nouveau_sched *sched;
const struct nvif_mclass *mem;
@@ -121,10 +124,7 @@ struct nouveau_cli_work {
static inline struct nouveau_uvmm *
nouveau_cli_uvmm(struct nouveau_cli *cli)
{
- if (!cli || !cli->uvmm.vmm.cli)
- return NULL;
-
- return &cli->uvmm;
+ return cli ? cli->uvmm.ptr : NULL;
}
static inline struct nouveau_uvmm *
@@ -258,6 +258,9 @@ struct nouveau_drm {
u64 context_base;
} *runl;
+ /* Workqueue used for channel schedulers. */
+ struct workqueue_struct *sched_wq;
+
/* context for accelerated drm-internal operations */
struct nouveau_channel *cechan;
struct nouveau_channel *channel;
@@ -298,10 +301,6 @@ struct nouveau_drm {
struct mutex lock;
bool component_registered;
} audio;
-
- struct drm_gpu_scheduler sched;
- struct workqueue_struct *sched_wq;
-
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 9a5ef5747..e65c0ef23 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: MIT
-#include <drm/drm_exec.h>
-
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
@@ -86,14 +84,12 @@
*/
static int
-nouveau_exec_job_submit(struct nouveau_job *job)
+nouveau_exec_job_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_cli *cli = job->cli;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
int ret;
/* Create a new fence, but do not emit yet. */
@@ -102,52 +98,29 @@ nouveau_exec_job_submit(struct nouveau_job *job)
return ret;
nouveau_uvmm_lock(uvmm);
- drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
- drm_exec_until_all_locked(exec) {
- struct drm_gpuva *va;
-
- drm_gpuvm_for_each_va(va, &uvmm->base) {
- if (unlikely(va == &uvmm->base.kernel_alloc_node))
- continue;
-
- ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- goto err_uvmm_unlock;
- }
+ ret = drm_gpuvm_exec_lock(vme);
+ if (ret) {
+ nouveau_uvmm_unlock(uvmm);
+ return ret;
}
nouveau_uvmm_unlock(uvmm);
- drm_exec_for_each_locked_object(exec, index, obj) {
- struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-
- ret = nouveau_bo_validate(nvbo, true, false);
- if (ret)
- goto err_exec_fini;
+ ret = drm_gpuvm_exec_validate(vme);
+ if (ret) {
+ drm_gpuvm_exec_unlock(vme);
+ return ret;
}
return 0;
-
-err_uvmm_unlock:
- nouveau_uvmm_unlock(uvmm);
-err_exec_fini:
- drm_exec_fini(exec);
- return ret;
-
}
static void
-nouveau_exec_job_armed_submit(struct nouveau_job *job)
+nouveau_exec_job_armed_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
-
- drm_exec_for_each_locked_object(exec, index, obj)
- dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
- drm_exec_fini(exec);
+ drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+ job->resv_usage, job->resv_usage);
+ drm_gpuvm_exec_unlock(vme);
}
static struct dma_fence *
@@ -192,6 +165,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
+ nouveau_job_done(job);
nouveau_job_free(job);
kfree(exec_job->fence);
@@ -211,8 +185,6 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
- nouveau_sched_entity_fini(job->entity);
-
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -259,10 +231,12 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
}
}
+ args.file_priv = __args->file_priv;
job->chan = __args->chan;
- args.sched_entity = __args->sched_entity;
- args.file_priv = __args->file_priv;
+ args.sched = __args->sched;
+ /* Plus one to account for the HW fence. */
+ args.credits = job->push.count + 1;
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
@@ -415,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (ret)
goto out;
- args.sched_entity = &chan16->sched_entity;
+ args.sched = chan16->sched;
args.file_priv = file_priv;
args.chan = chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
index 5488d337b..9b3b151fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.h
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.h
@@ -3,16 +3,12 @@
#ifndef __NOUVEAU_EXEC_H__
#define __NOUVEAU_EXEC_H__
-#include <drm/drm_exec.h>
-
#include "nouveau_drv.h"
#include "nouveau_sched.h"
struct nouveau_exec_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
-
- struct drm_exec exec;
+ struct nouveau_sched *sched;
struct nouveau_channel *chan;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 7b69e6df5..5a887d67d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -111,7 +111,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
- if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
+ if (nvbo->no_share && uvmm &&
+ drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
return -EPERM;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
@@ -245,7 +246,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (unlikely(!uvmm))
return -EINVAL;
- resv = &uvmm->resv;
+ resv = drm_gpuvm_resv(&uvmm->base);
}
if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
@@ -288,6 +289,11 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
+ if (nvbo->no_share) {
+ nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
+ drm_gem_object_get(nvbo->r_obj);
+ }
+
*pnvbo = nvbo;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 23cd43a7f..bf2dc7567 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -43,11 +43,10 @@ static int nouveau_platform_probe(struct platform_device *pdev)
return 0;
}
-static int nouveau_platform_remove(struct platform_device *pdev)
+static void nouveau_platform_remove(struct platform_device *pdev)
{
struct drm_device *dev = platform_get_drvdata(pdev);
nouveau_drm_device_remove(dev);
- return 0;
}
#if IS_ENABLED(CONFIG_OF)
@@ -93,5 +92,5 @@ struct platform_driver nouveau_platform_driver = {
.of_match_table = of_match_ptr(nouveau_platform_match),
},
.probe = nouveau_platform_probe,
- .remove = nouveau_platform_remove,
+ .remove_new = nouveau_platform_remove,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 7c376c4cc..32fa2e273 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -12,30 +12,28 @@
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
-/* FIXME
- *
- * We want to make sure that jobs currently executing can't be deferred by
- * other jobs competing for the hardware. Otherwise we might end up with job
- * timeouts just because of too many clients submitting too many jobs. We don't
- * want jobs to time out because of system load, but because of the job being
- * too bulky.
- *
- * For now allow for up to 16 concurrent jobs in flight until we know how many
- * rings the hardware can process in parallel.
- */
-#define NOUVEAU_SCHED_HW_SUBMISSIONS 16
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
+/* Starts at 0, since the DRM scheduler interprets those parameters as (initial)
+ * index to the run-queue array.
+ */
+enum nouveau_sched_priority {
+ NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL,
+ NOUVEAU_SCHED_PRIORITY_COUNT,
+};
+
int
nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args)
{
- struct nouveau_sched_entity *entity = args->sched_entity;
+ struct nouveau_sched *sched = args->sched;
int ret;
+ INIT_LIST_HEAD(&job->entry);
+
job->file_priv = args->file_priv;
job->cli = nouveau_cli(args->file_priv);
- job->entity = entity;
+ job->sched = sched;
job->sync = args->sync;
job->resv_usage = args->resv_usage;
@@ -86,10 +84,10 @@ nouveau_job_init(struct nouveau_job *job,
ret = -ENOMEM;
goto err_free_objs;
}
-
}
- ret = drm_sched_job_init(&job->base, &entity->base, NULL);
+ ret = drm_sched_job_init(&job->base, &sched->entity,
+ args->credits, NULL);
if (ret)
goto err_free_chains;
@@ -109,6 +107,27 @@ return ret;
}
void
+nouveau_job_fini(struct nouveau_job *job)
+{
+ dma_fence_put(job->done_fence);
+ drm_sched_job_cleanup(&job->base);
+
+ job->ops->free(job);
+}
+
+void
+nouveau_job_done(struct nouveau_job *job)
+{
+ struct nouveau_sched *sched = job->sched;
+
+ spin_lock(&sched->job.list.lock);
+ list_del(&job->entry);
+ spin_unlock(&sched->job.list.lock);
+
+ wake_up(&sched->job.wq);
+}
+
+void
nouveau_job_free(struct nouveau_job *job)
{
kfree(job->in_sync.data);
@@ -117,13 +136,6 @@ nouveau_job_free(struct nouveau_job *job)
kfree(job->out_sync.chains);
}
-void nouveau_job_fini(struct nouveau_job *job)
-{
- dma_fence_put(job->done_fence);
- drm_sched_job_cleanup(&job->base);
- job->ops->free(job);
-}
-
static int
sync_find_fence(struct nouveau_job *job,
struct drm_nouveau_sync *sync,
@@ -261,8 +273,13 @@ nouveau_job_fence_attach(struct nouveau_job *job)
int
nouveau_job_submit(struct nouveau_job *job)
{
- struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
+ struct nouveau_sched *sched = job->sched;
struct dma_fence *done_fence = NULL;
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &nouveau_cli_uvmm(job->cli)->base,
+ .flags = DRM_EXEC_IGNORE_DUPLICATES,
+ .num_fences = 1,
+ };
int ret;
ret = nouveau_job_add_deps(job);
@@ -276,46 +293,29 @@ nouveau_job_submit(struct nouveau_job *job)
/* Make sure the job appears on the sched_entity's queue in the same
* order as it was submitted.
*/
- mutex_lock(&entity->mutex);
+ mutex_lock(&sched->mutex);
/* Guarantee we won't fail after the submit() callback returned
* successfully.
*/
if (job->ops->submit) {
- ret = job->ops->submit(job);
+ ret = job->ops->submit(job, &vm_exec);
if (ret)
goto err_cleanup;
}
+ /* Submit was successful; add the job to the schedulers job list. */
+ spin_lock(&sched->job.list.lock);
+ list_add(&job->entry, &sched->job.list.head);
+ spin_unlock(&sched->job.list.lock);
+
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
if (job->sync)
done_fence = dma_fence_get(job->done_fence);
- /* If a sched job depends on a dma-fence from a job from the same GPU
- * scheduler instance, but a different scheduler entity, the GPU
- * scheduler does only wait for the particular job to be scheduled,
- * rather than for the job to fully complete. This is due to the GPU
- * scheduler assuming that there is a scheduler instance per ring.
- * However, the current implementation, in order to avoid arbitrary
- * amounts of kthreads, has a single scheduler instance while scheduler
- * entities represent rings.
- *
- * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
- * out-fences in order to force the scheduler to wait for full job
- * completion for dependent jobs from different entities and same
- * scheduler instance.
- *
- * There is some work in progress [1] to address the issues of firmware
- * schedulers; once it is in-tree the scheduler topology in Nouveau
- * should be re-worked accordingly.
- *
- * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/
- */
- set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
-
if (job->ops->armed_submit)
- job->ops->armed_submit(job);
+ job->ops->armed_submit(job, &vm_exec);
nouveau_job_fence_attach(job);
@@ -326,7 +326,7 @@ nouveau_job_submit(struct nouveau_job *job)
drm_sched_entity_push_job(&job->base);
- mutex_unlock(&entity->mutex);
+ mutex_unlock(&sched->mutex);
if (done_fence) {
dma_fence_wait(done_fence, true);
@@ -336,20 +336,13 @@ nouveau_job_submit(struct nouveau_job *job)
return 0;
err_cleanup:
- mutex_unlock(&entity->mutex);
+ mutex_unlock(&sched->mutex);
nouveau_job_fence_attach_cleanup(job);
err:
job->state = NOUVEAU_JOB_SUBMIT_FAILED;
return ret;
}
-bool
-nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
- struct work_struct *work)
-{
- return queue_work(entity->sched_wq, work);
-}
-
static struct dma_fence *
nouveau_job_run(struct nouveau_job *job)
{
@@ -399,50 +392,116 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
nouveau_job_fini(job);
}
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
- struct drm_gpu_scheduler *sched,
- struct workqueue_struct *sched_wq)
-{
- mutex_init(&entity->mutex);
- spin_lock_init(&entity->job.list.lock);
- INIT_LIST_HEAD(&entity->job.list.head);
- init_waitqueue_head(&entity->job.wq);
-
- entity->sched_wq = sched_wq;
- return drm_sched_entity_init(&entity->base,
- DRM_SCHED_PRIORITY_NORMAL,
- &sched, 1, NULL);
-}
-
-void
-nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
-{
- drm_sched_entity_destroy(&entity->base);
-}
-
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
};
-int nouveau_sched_init(struct nouveau_drm *drm)
+static int
+nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit)
{
- struct drm_gpu_scheduler *sched = &drm->sched;
+ struct drm_gpu_scheduler *drm_sched = &sched->base;
+ struct drm_sched_entity *entity = &sched->entity;
long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+ int ret;
- drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
- if (!drm->sched_wq)
+ if (!wq) {
+ wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE,
+ current->pid);
+ if (!wq)
+ return -ENOMEM;
+
+ sched->wq = wq;
+ }
+
+ ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
+ NOUVEAU_SCHED_PRIORITY_COUNT,
+ credit_limit, 0, job_hang_limit,
+ NULL, NULL, "nouveau_sched", drm->dev->dev);
+ if (ret)
+ goto fail_wq;
+
+ /* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use
+ * when we want to have a single run-queue only.
+ *
+ * It's not documented, but one will find out when trying to use any
+ * other priority running into faults, because the scheduler uses the
+ * priority as array index.
+ *
+ * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not
+ * matching the enum type used in drm_sched_entity_init().
+ */
+ ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL,
+ &drm_sched, 1, NULL);
+ if (ret)
+ goto fail_sched;
+
+ mutex_init(&sched->mutex);
+ spin_lock_init(&sched->job.list.lock);
+ INIT_LIST_HEAD(&sched->job.list.head);
+ init_waitqueue_head(&sched->job.wq);
+
+ return 0;
+
+fail_sched:
+ drm_sched_fini(drm_sched);
+fail_wq:
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
+ return ret;
+}
+
+int
+nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit)
+{
+ struct nouveau_sched *sched;
+ int ret;
+
+ sched = kzalloc(sizeof(*sched), GFP_KERNEL);
+ if (!sched)
return -ENOMEM;
- return drm_sched_init(sched, &nouveau_sched_ops,
- DRM_SCHED_PRIORITY_COUNT,
- NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
- NULL, NULL, "nouveau_sched", drm->dev->dev);
+ ret = nouveau_sched_init(sched, drm, wq, credit_limit);
+ if (ret) {
+ kfree(sched);
+ return ret;
+ }
+
+ *psched = sched;
+
+ return 0;
+}
+
+
+static void
+nouveau_sched_fini(struct nouveau_sched *sched)
+{
+ struct drm_gpu_scheduler *drm_sched = &sched->base;
+ struct drm_sched_entity *entity = &sched->entity;
+
+ rmb(); /* for list_empty to work without lock */
+ wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+
+ drm_sched_entity_fini(entity);
+ drm_sched_fini(drm_sched);
+
+ /* Destroy workqueue after scheduler tear down, otherwise it might still
+ * be in use.
+ */
+ if (sched->wq)
+ destroy_workqueue(sched->wq);
}
-void nouveau_sched_fini(struct nouveau_drm *drm)
+void
+nouveau_sched_destroy(struct nouveau_sched **psched)
{
- destroy_workqueue(drm->sched_wq);
- drm_sched_fini(&drm->sched);
+ struct nouveau_sched *sched = *psched;
+
+ nouveau_sched_fini(sched);
+ kfree(sched);
+
+ *psched = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index 27ac19792..e1f01a23e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -5,7 +5,7 @@
#include <linux/types.h>
-#include <drm/drm_exec.h>
+#include <drm/drm_gpuvm.h>
#include <drm/gpu_scheduler.h>
#include "nouveau_drv.h"
@@ -26,7 +26,8 @@ enum nouveau_job_state {
struct nouveau_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
+ u32 credits;
enum dma_resv_usage resv_usage;
bool sync;
@@ -49,12 +50,12 @@ struct nouveau_job {
enum nouveau_job_state state;
- struct nouveau_sched_entity *entity;
+ struct nouveau_sched *sched;
+ struct list_head entry;
struct drm_file *file_priv;
struct nouveau_cli *cli;
- struct drm_exec exec;
enum dma_resv_usage resv_usage;
struct dma_fence *done_fence;
@@ -76,8 +77,8 @@ struct nouveau_job {
/* If .submit() returns without any error, it is guaranteed that
* armed_submit() is called.
*/
- int (*submit)(struct nouveau_job *);
- void (*armed_submit)(struct nouveau_job *);
+ int (*submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
+ void (*armed_submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
struct dma_fence *(*run)(struct nouveau_job *);
void (*free)(struct nouveau_job *);
enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
@@ -90,20 +91,17 @@ int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
int nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args);
-void nouveau_job_free(struct nouveau_job *job);
-
-int nouveau_job_submit(struct nouveau_job *job);
void nouveau_job_fini(struct nouveau_job *job);
+int nouveau_job_submit(struct nouveau_job *job);
+void nouveau_job_done(struct nouveau_job *job);
+void nouveau_job_free(struct nouveau_job *job);
-#define to_nouveau_sched_entity(entity) \
- container_of((entity), struct nouveau_sched_entity, base)
-
-struct nouveau_sched_entity {
- struct drm_sched_entity base;
+struct nouveau_sched {
+ struct drm_gpu_scheduler base;
+ struct drm_sched_entity entity;
+ struct workqueue_struct *wq;
struct mutex mutex;
- struct workqueue_struct *sched_wq;
-
struct {
struct {
struct list_head head;
@@ -113,15 +111,8 @@ struct nouveau_sched_entity {
} job;
};
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
- struct drm_gpu_scheduler *sched,
- struct workqueue_struct *sched_wq);
-void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
-
-bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
- struct work_struct *work);
-
-int nouveau_sched_init(struct nouveau_drm *drm);
-void nouveau_sched_fini(struct nouveau_drm *drm);
+int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit);
+void nouveau_sched_destroy(struct nouveau_sched **psched);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 5cf892c50..ee02cd833 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -62,6 +62,8 @@ struct bind_job_op {
enum vm_bind_op op;
u32 flags;
+ struct drm_gpuvm_bo *vm_bo;
+
struct {
u64 addr;
u64 range;
@@ -436,8 +438,9 @@ nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
static void
op_map_prepare_unwind(struct nouveau_uvma *uvma)
{
+ struct drm_gpuva *va = &uvma->va;
nouveau_uvma_gem_put(uvma);
- drm_gpuva_remove(&uvma->va);
+ drm_gpuva_remove(va);
nouveau_uvma_free(uvma);
}
@@ -466,6 +469,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
break;
case DRM_GPUVA_OP_REMAP: {
struct drm_gpuva_op_remap *r = &op->remap;
+ struct drm_gpuva *va = r->unmap->va;
if (r->next)
op_map_prepare_unwind(new->next);
@@ -473,7 +477,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
if (r->prev)
op_map_prepare_unwind(new->prev);
- op_unmap_prepare_unwind(r->unmap->va);
+ op_unmap_prepare_unwind(va);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -604,6 +608,9 @@ op_unmap_prepare(struct drm_gpuva_op_unmap *u)
drm_gpuva_unmap(u);
}
+/*
+ * Note: @args should not be NULL when calling for a map operation.
+ */
static int
nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
@@ -624,7 +631,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
if (ret)
goto unwind;
- if (args && vmm_get_range) {
+ if (vmm_get_range) {
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
vmm_get_range);
if (ret) {
@@ -632,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
goto unwind;
}
}
+
break;
}
case DRM_GPUVA_OP_REMAP: {
@@ -804,15 +812,15 @@ op_remap(struct drm_gpuva_op_remap *r,
struct drm_gpuva_op_unmap *u = r->unmap;
struct nouveau_uvma *uvma = uvma_from_va(u->va);
u64 addr = uvma->va.va.addr;
- u64 range = uvma->va.va.range;
+ u64 end = uvma->va.va.addr + uvma->va.va.range;
if (r->prev)
addr = r->prev->va.addr + r->prev->va.range;
if (r->next)
- range = r->next->va.addr - addr;
+ end = r->next->va.addr;
- op_unmap_range(u, addr, range);
+ op_unmap_range(u, addr, end - addr);
}
static int
@@ -929,25 +937,13 @@ nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
static int
nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
{
- u64 end = addr + range;
- u64 kernel_managed_end = uvmm->kernel_managed_addr +
- uvmm->kernel_managed_size;
-
if (addr & ~PAGE_MASK)
return -EINVAL;
if (range & ~PAGE_MASK)
return -EINVAL;
- if (end <= addr)
- return -EINVAL;
-
- if (addr < NOUVEAU_VA_SPACE_START ||
- end > NOUVEAU_VA_SPACE_END)
- return -EINVAL;
-
- if (addr < kernel_managed_end &&
- end > uvmm->kernel_managed_addr)
+ if (!drm_gpuvm_range_valid(&uvmm->base, addr, range))
return -EINVAL;
return 0;
@@ -970,6 +966,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref)
{
struct nouveau_uvmm_bind_job *job =
container_of(kref, struct nouveau_uvmm_bind_job, kref);
+ struct bind_job_op *op, *next;
+
+ list_for_each_op_safe(op, next, &job->ops) {
+ list_del(&op->entry);
+ kfree(op);
+ }
nouveau_job_free(&job->base);
kfree(job);
@@ -1011,14 +1013,16 @@ bind_validate_op(struct nouveau_job *job,
static void
bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
{
- struct nouveau_uvmm_bind_job *bind_job;
- struct nouveau_sched_entity *entity = job->entity;
+ struct nouveau_sched *sched = job->sched;
+ struct nouveau_job *__job;
struct bind_job_op *op;
u64 end = addr + range;
again:
- spin_lock(&entity->job.list.lock);
- list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+ spin_lock(&sched->job.list.lock);
+ list_for_each_entry(__job, &sched->job.list.head, entry) {
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
+
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_UNMAP) {
u64 op_addr = op->va.addr;
@@ -1026,7 +1030,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1034,7 +1038,7 @@ again:
}
}
}
- spin_unlock(&entity->job.list.lock);
+ spin_unlock(&sched->job.list.lock);
}
static int
@@ -1113,22 +1117,28 @@ bind_validate_region(struct nouveau_job *job)
}
static void
-bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
+bind_link_gpuvas(struct bind_job_op *bop)
{
+ struct nouveau_uvma_prealloc *new = &bop->new;
+ struct drm_gpuvm_bo *vm_bo = bop->vm_bo;
+ struct drm_gpuva_ops *ops = bop->ops;
struct drm_gpuva_op *op;
drm_gpuva_for_each_op(op, ops) {
switch (op->op) {
case DRM_GPUVA_OP_MAP:
- drm_gpuva_link(&new->map->va);
+ drm_gpuva_link(&new->map->va, vm_bo);
break;
- case DRM_GPUVA_OP_REMAP:
+ case DRM_GPUVA_OP_REMAP: {
+ struct drm_gpuva *va = op->remap.unmap->va;
+
if (op->remap.prev)
- drm_gpuva_link(&new->prev->va);
+ drm_gpuva_link(&new->prev->va, va->vm_bo);
if (op->remap.next)
- drm_gpuva_link(&new->next->va);
- drm_gpuva_unlink(op->remap.unmap->va);
+ drm_gpuva_link(&new->next->va, va->vm_bo);
+ drm_gpuva_unlink(va);
break;
+ }
case DRM_GPUVA_OP_UNMAP:
drm_gpuva_unlink(op->unmap.va);
break;
@@ -1139,21 +1149,70 @@ bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new)
}
static int
-nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+ struct bind_job_op *op;
+ int ret;
+
+ list_for_each_op(op, &bind_job->ops) {
+ struct drm_gpuva_op *va_op;
+
+ if (!op->ops)
+ continue;
+
+ drm_gpuva_for_each_op(va_op, op->ops) {
+ struct drm_gem_object *obj = op_gem_obj(va_op);
+
+ if (unlikely(!obj))
+ continue;
+
+ ret = drm_exec_prepare_obj(exec, obj, num_fences);
+ if (ret)
+ return ret;
+
+ /* Don't validate GEMs backing mappings we're about to
+ * unmap, it's not worth the effort.
+ */
+ if (va_op->op == DRM_GPUVA_OP_UNMAP)
+ continue;
+
+ ret = nouveau_bo_validate(nouveau_gem_object(obj),
+ true, false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
- struct drm_exec *exec = &job->exec;
+ struct drm_exec *exec = &vme->exec;
struct bind_job_op *op;
int ret;
list_for_each_op(op, &bind_job->ops) {
if (op->op == OP_MAP) {
- op->gem.obj = drm_gem_object_lookup(job->file_priv,
- op->gem.handle);
- if (!op->gem.obj)
+ struct drm_gem_object *obj = op->gem.obj =
+ drm_gem_object_lookup(job->file_priv,
+ op->gem.handle);
+ if (!obj)
return -ENOENT;
+
+ dma_resv_lock(obj->resv, NULL);
+ op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj);
+ dma_resv_unlock(obj->resv);
+ if (IS_ERR(op->vm_bo))
+ return PTR_ERR(op->vm_bo);
+
+ drm_gpuvm_bo_extobj_add(op->vm_bo);
}
ret = bind_validate_op(job, op);
@@ -1176,6 +1235,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
* unwind all GPU VA space changes on failure.
*/
nouveau_uvmm_lock(uvmm);
+
list_for_each_op(op, &bind_job->ops) {
switch (op->op) {
case OP_MAP_SPARSE:
@@ -1287,55 +1347,13 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
}
- drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(exec, vme->flags, 0);
drm_exec_until_all_locked(exec) {
- list_for_each_op(op, &bind_job->ops) {
- struct drm_gpuva_op *va_op;
-
- if (IS_ERR_OR_NULL(op->ops))
- continue;
-
- drm_gpuva_for_each_op(va_op, op->ops) {
- struct drm_gem_object *obj = op_gem_obj(va_op);
-
- if (unlikely(!obj))
- continue;
-
- ret = drm_exec_prepare_obj(exec, obj, 1);
- drm_exec_retry_on_contention(exec);
- if (ret) {
- op = list_last_op(&bind_job->ops);
- goto unwind;
- }
- }
- }
- }
-
- list_for_each_op(op, &bind_job->ops) {
- struct drm_gpuva_op *va_op;
-
- if (IS_ERR_OR_NULL(op->ops))
- continue;
-
- drm_gpuva_for_each_op(va_op, op->ops) {
- struct drm_gem_object *obj = op_gem_obj(va_op);
-
- if (unlikely(!obj))
- continue;
-
- /* Don't validate GEMs backing mappings we're about to
- * unmap, it's not worth the effort.
- */
- if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
- continue;
-
- ret = nouveau_bo_validate(nouveau_gem_object(obj),
- true, false);
- if (ret) {
- op = list_last_op(&bind_job->ops);
- goto unwind;
- }
+ ret = bind_lock_validate(job, exec, vme->num_fences);
+ drm_exec_retry_on_contention(exec);
+ if (ret) {
+ op = list_last_op(&bind_job->ops);
+ goto unwind;
}
}
@@ -1364,7 +1382,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
case OP_UNMAP_SPARSE:
case OP_MAP:
case OP_UNMAP:
- bind_link_gpuvas(op->ops, &op->new);
+ bind_link_gpuvas(op);
break;
default:
break;
@@ -1372,10 +1390,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
nouveau_uvmm_unlock(uvmm);
- spin_lock(&entity->job.list.lock);
- list_add(&bind_job->entry, &entity->job.list.head);
- spin_unlock(&entity->job.list.lock);
-
return 0;
unwind_continue:
@@ -1410,21 +1424,17 @@ unwind:
}
nouveau_uvmm_unlock(uvmm);
- drm_exec_fini(exec);
+ drm_gpuvm_exec_unlock(vme);
return ret;
}
static void
-nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job,
+ struct drm_gpuvm_exec *vme)
{
- struct drm_exec *exec = &job->exec;
- struct drm_gem_object *obj;
- unsigned long index;
-
- drm_exec_for_each_locked_object(exec, index, obj)
- dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
- drm_exec_fini(exec);
+ drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+ job->resv_usage, job->resv_usage);
+ drm_gpuvm_exec_unlock(vme);
}
static struct dma_fence *
@@ -1462,14 +1472,11 @@ out:
}
static void
-nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job)
{
- struct nouveau_uvmm_bind_job *bind_job =
- container_of(work, struct nouveau_uvmm_bind_job, work);
- struct nouveau_job *job = &bind_job->base;
+ struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
- struct nouveau_sched_entity *entity = job->entity;
- struct bind_job_op *op, *next;
+ struct bind_job_op *op;
list_for_each_op(op, &bind_job->ops) {
struct drm_gem_object *obj = op->gem.obj;
@@ -1511,42 +1518,27 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
if (!IS_ERR_OR_NULL(op->ops))
drm_gpuva_ops_free(&uvmm->base, op->ops);
+ if (!IS_ERR_OR_NULL(op->vm_bo)) {
+ dma_resv_lock(obj->resv, NULL);
+ drm_gpuvm_bo_put(op->vm_bo);
+ dma_resv_unlock(obj->resv);
+ }
+
if (obj)
drm_gem_object_put(obj);
}
- spin_lock(&entity->job.list.lock);
- list_del(&bind_job->entry);
- spin_unlock(&entity->job.list.lock);
-
+ nouveau_job_done(job);
complete_all(&bind_job->complete);
- wake_up(&entity->job.wq);
-
- /* Remove and free ops after removing the bind job from the job list to
- * avoid races against bind_validate_map_sparse().
- */
- list_for_each_op_safe(op, next, &bind_job->ops) {
- list_del(&op->entry);
- kfree(op);
- }
nouveau_uvmm_bind_job_put(bind_job);
}
-static void
-nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
-{
- struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
- struct nouveau_sched_entity *entity = job->entity;
-
- nouveau_sched_entity_qwork(entity, &bind_job->work);
-}
-
static struct nouveau_job_ops nouveau_bind_job_ops = {
.submit = nouveau_uvmm_bind_job_submit,
.armed_submit = nouveau_uvmm_bind_job_armed_submit,
.run = nouveau_uvmm_bind_job_run,
- .free = nouveau_uvmm_bind_job_free_qwork,
+ .free = nouveau_uvmm_bind_job_cleanup,
};
static int
@@ -1607,7 +1599,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
return ret;
INIT_LIST_HEAD(&job->ops);
- INIT_LIST_HEAD(&job->entry);
for (i = 0; i < __args->op.count; i++) {
ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
@@ -1618,11 +1609,12 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
}
init_completion(&job->complete);
- INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
- args.sched_entity = __args->sched_entity;
args.file_priv = __args->file_priv;
+ args.sched = __args->sched;
+ args.credits = 1;
+
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
@@ -1648,18 +1640,6 @@ err_free:
return ret;
}
-int
-nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
- void *data,
- struct drm_file *file_priv)
-{
- struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct drm_nouveau_vm_init *init = data;
-
- return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
- init->kernel_managed_size);
-}
-
static int
nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args)
{
@@ -1760,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
if (ret)
return ret;
- args.sched_entity = &cli->sched_entity;
+ args.sched = cli->sched;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
@@ -1776,15 +1756,18 @@ void
nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem)
{
struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
- drm_gem_for_each_gpuva(va, obj) {
- struct nouveau_uvma *uvma = uvma_from_va(va);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
- nouveau_uvma_map(uvma, mem);
- drm_gpuva_invalidate(va, false);
+ nouveau_uvma_map(uvma, mem);
+ drm_gpuva_invalidate(va, false);
+ }
}
}
@@ -1792,29 +1775,62 @@ void
nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo)
{
struct drm_gem_object *obj = &nvbo->bo.base;
+ struct drm_gpuvm_bo *vm_bo;
struct drm_gpuva *va;
dma_resv_assert_held(obj->resv);
- drm_gem_for_each_gpuva(va, obj) {
- struct nouveau_uvma *uvma = uvma_from_va(va);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gpuvm_bo_for_each_va(va, vm_bo) {
+ struct nouveau_uvma *uvma = uvma_from_va(va);
- nouveau_uvma_unmap(uvma);
- drm_gpuva_invalidate(va, true);
+ nouveau_uvma_unmap(uvma);
+ drm_gpuva_invalidate(va, true);
+ }
}
}
+static void
+nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
+{
+ struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm);
+
+ kfree(uvmm);
+}
+
+static int
+nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+
+ return nouveau_bo_validate(nvbo, true, false);
+}
+
+static const struct drm_gpuvm_ops gpuvm_ops = {
+ .vm_free = nouveau_uvmm_free,
+ .vm_bo_validate = nouveau_uvmm_bo_validate,
+};
+
int
-nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
- u64 kernel_managed_addr, u64 kernel_managed_size)
+nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
{
+ struct nouveau_uvmm *uvmm;
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct drm_device *drm = cli->drm->dev;
+ struct drm_gem_object *r_obj;
+ struct drm_nouveau_vm_init *init = data;
+ u64 kernel_managed_end;
int ret;
- u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size;
- mutex_init(&uvmm->mutex);
- dma_resv_init(&uvmm->resv);
- mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
- mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
+ if (check_add_overflow(init->kernel_managed_addr,
+ init->kernel_managed_size,
+ &kernel_managed_end))
+ return -EINVAL;
+
+ if (kernel_managed_end > NOUVEAU_VA_SPACE_END)
+ return -EINVAL;
mutex_lock(&cli->mutex);
@@ -1823,39 +1839,48 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
goto out_unlock;
}
- if (kernel_managed_end <= kernel_managed_addr) {
- ret = -EINVAL;
+ uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL);
+ if (!uvmm) {
+ ret = -ENOMEM;
goto out_unlock;
}
- if (kernel_managed_end > NOUVEAU_VA_SPACE_END) {
- ret = -EINVAL;
+ r_obj = drm_gpuvm_resv_object_alloc(drm);
+ if (!r_obj) {
+ kfree(uvmm);
+ ret = -ENOMEM;
goto out_unlock;
}
- uvmm->kernel_managed_addr = kernel_managed_addr;
- uvmm->kernel_managed_size = kernel_managed_size;
+ mutex_init(&uvmm->mutex);
+ mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
- drm_gpuvm_init(&uvmm->base, cli->name,
+ drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj,
NOUVEAU_VA_SPACE_START,
NOUVEAU_VA_SPACE_END,
- kernel_managed_addr, kernel_managed_size,
- NULL);
+ init->kernel_managed_addr,
+ init->kernel_managed_size,
+ &gpuvm_ops);
+ /* GPUVM takes care from here on. */
+ drm_gem_object_put(r_obj);
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
cli->vmm.vmm.object.oclass, RAW,
- kernel_managed_addr, kernel_managed_size,
- NULL, 0, &cli->uvmm.vmm.vmm);
+ init->kernel_managed_addr,
+ init->kernel_managed_size,
+ NULL, 0, &uvmm->vmm.vmm);
if (ret)
- goto out_free_gpuva_mgr;
+ goto out_gpuvm_fini;
- cli->uvmm.vmm.cli = cli;
+ uvmm->vmm.cli = cli;
+ cli->uvmm.ptr = uvmm;
mutex_unlock(&cli->mutex);
return 0;
-out_free_gpuva_mgr:
- drm_gpuvm_destroy(&uvmm->base);
+out_gpuvm_fini:
+ drm_gpuvm_put(&uvmm->base);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
@@ -1867,15 +1892,8 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
struct nouveau_cli *cli = uvmm->vmm.cli;
- struct nouveau_sched_entity *entity = &cli->sched_entity;
struct drm_gpuva *va, *next;
- if (!cli)
- return;
-
- rmb(); /* for list_empty to work without lock */
- wait_event(entity->job.wq, list_empty(&entity->job.list.head));
-
nouveau_uvmm_lock(uvmm);
drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
struct nouveau_uvma *uvma = uvma_from_va(va);
@@ -1910,8 +1928,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
- drm_gpuvm_destroy(&uvmm->base);
+ drm_gpuvm_put(&uvmm->base);
mutex_unlock(&cli->mutex);
-
- dma_resv_fini(&uvmm->resv);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index a308c5976..9d3c34858 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -12,12 +12,6 @@ struct nouveau_uvmm {
struct nouveau_vmm vmm;
struct maple_tree region_mt;
struct mutex mutex;
- struct dma_resv resv;
-
- u64 kernel_managed_addr;
- u64 kernel_managed_size;
-
- bool disabled;
};
struct nouveau_uvma_region {
@@ -50,8 +44,6 @@ struct nouveau_uvmm_bind_job {
struct nouveau_job base;
struct kref kref;
- struct list_head entry;
- struct work_struct work;
struct completion complete;
/* struct bind_job_op */
@@ -60,7 +52,7 @@ struct nouveau_uvmm_bind_job {
struct nouveau_uvmm_bind_job_args {
struct drm_file *file_priv;
- struct nouveau_sched_entity *sched_entity;
+ struct nouveau_sched *sched;
unsigned int flags;
@@ -82,8 +74,6 @@ struct nouveau_uvmm_bind_job_args {
#define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base)
-int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
- u64 kernel_managed_addr, u64 kernel_managed_size);
void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm);
void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 87a62d4ff..7d4716dcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -24,7 +24,6 @@
#include "chan.h"
#include "chid.h"
#include "cgrp.h"
-#include "chid.h"
#include "runl.h"
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4bf486b57..cb05f7f48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
+static void of_fini(void *p)
+{
+ kfree(p);
+}
+
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
- .fini = (void(*)(void *))kfree,
+ .fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 7bcbc4895..271bfa038 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -25,6 +25,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/gsp.h>
void
gm107_devinit_disable(struct nvkm_devinit *init)
@@ -33,10 +34,13 @@ gm107_devinit_disable(struct nvkm_devinit *init)
u32 r021c00 = nvkm_rd32(device, 0x021c00);
u32 r021c04 = nvkm_rd32(device, 0x021c04);
- if (r021c00 & 0x00000001)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
- if (r021c00 & 0x00000004)
- nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ /* gsp only wants to enable/disable display */
+ if (!nvkm_gsp_rm(device->gsp)) {
+ if (r021c00 & 0x00000001)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
+ if (r021c00 & 0x00000004)
+ nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
+ }
if (r021c04 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
index 11b4c9c27..666eb93b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
@@ -41,6 +41,7 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
rm->dtor = r535_devinit_dtor;
rm->post = hw->post;
+ rm->disable = hw->disable;
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index d66fc3570..a73a5b589 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -2312,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
int ret;
+ bool enable_gsp = fwif->enable;
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
return -EINVAL;
if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index a7f3fc342..dd5b5a17e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
- if (refcount_inc_not_zero(&iobj->maps))
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ /* read barrier match the wmb on refcount set */
+ smp_rmb();
return iobj->map;
+ }
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
+ /* barrier to ensure the ptrs are written before refcount is set */
+ smp_wmb();
refcount_set(&iobj->maps, 1);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
index e7e8fdf3a..29682722b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c
@@ -28,19 +28,14 @@ static void
gp10b_ltc_init(struct nvkm_ltc *ltc)
{
struct nvkm_device *device = ltc->subdev.device;
- struct iommu_fwspec *spec;
+ u32 sid;
nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
nvkm_wr32(device, 0x100800, ltc->ltc_nr);
- spec = dev_iommu_fwspec_get(device->dev);
- if (spec) {
- u32 sid = spec->ids[0] & 0xffff;
-
- /* stream ID */
+ if (tegra_dev_iommu_get_stream_id(device->dev, &sid))
nvkm_wr32(device, 0x160000, sid << 2);
- }
}
static const struct nvkm_ltc_func
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index c26aab493..993691b3c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -22,11 +22,11 @@
#include <linux/hardirq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/component.h>
#include <linux/sys_soc.h>
#include <drm/drm_fourcc.h>
@@ -4765,7 +4765,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
if (soc)
dispc->feat = soc->data;
else
- dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data;
+ dispc->feat = device_get_match_data(&pdev->dev);
r = dispc_errata_i734_wa_init(dispc);
if (r)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 02955f976..988888e16 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -22,12 +22,13 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/gfp.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/suspend.h>
@@ -1445,7 +1446,7 @@ static int dss_probe(struct platform_device *pdev)
if (soc)
dss->feat = soc->data;
else
- dss->feat = of_match_device(dss_of_match, &pdev->dev)->data;
+ dss->feat = device_get_match_data(&pdev->dev);
/* Map I/O registers, get and setup clocks. */
dss->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index c48fa531c..3421e8389 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -48,7 +48,7 @@ struct omap_gem_object {
* OMAP_BO_MEM_DMA_API flag set)
*
* - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
- * if they are physically contiguous (when sgt->orig_nents == 1)
+ * if they are physically contiguous
*
* - buffers mapped through the TILER when pin_cnt is not zero, in which
* case the DMA address points to the TILER aperture
@@ -148,12 +148,18 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
return drm_vma_node_offset_addr(&obj->vma_node);
}
+static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
+{
+ return !(drm_prime_get_contiguous_size(sgt) < size);
+}
+
static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
{
if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
return true;
- if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
+ if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) &&
+ omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size))
return true;
return false;
@@ -1385,7 +1391,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
union omap_gem_size gsize;
/* Without a DMM only physically contiguous buffers can be supported. */
- if (sgt->orig_nents != 1 && !priv->has_dmm)
+ if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm)
return ERR_PTR(-EINVAL);
gsize.bytes = PAGE_ALIGN(size);
@@ -1399,7 +1405,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
omap_obj->sgt = sgt;
- if (sgt->orig_nents == 1) {
+ if (omap_gem_sgt_is_contiguous(sgt, size)) {
omap_obj->dma_addr = sg_dma_address(sgt->sgl);
} else {
/* Create pages list from sgt */
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index a4ac4b477..8f3783742 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -194,6 +194,15 @@ config DRM_PANEL_ILITEK_ILI9341
QVGA (240x320) RGB panels. support serial & parallel rgb
interface.
+config DRM_PANEL_ILITEK_ILI9805
+ tristate "Ilitek ILI9805-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Ilitek ILI9805 controller.
+
config DRM_PANEL_ILITEK_ILI9881C
tristate "Ilitek ILI9881C-based panels"
depends on OF
@@ -737,6 +746,15 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
+config DRM_PANEL_SYNAPTICS_R63353
+ tristate "Synaptics R63353-based panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Synaptics R63353 controller.
+
config DRM_PANEL_SONY_ACX565AKM
tristate "Sony ACX565AKM panel"
depends on GPIOLIB && OF && SPI
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d10c3de51..d94a644d0 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9805) += panel-ilitek-ili9805.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9882T) += panel-ilitek-ili9882t.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SYNAPTICS_R63353) += panel-synaptics-r63353.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index bc0881495..0ffe8f8c0 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1768,11 +1768,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
- .clock = 162850,
+ .clock = 162680,
.hdisplay = 1200,
- .hsync_start = 1200 + 50,
- .hsync_end = 1200 + 50 + 20,
- .htotal = 1200 + 50 + 20 + 50,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 20,
+ .htotal = 1200 + 60 + 20 + 40,
.vdisplay = 1920,
.vsync_start = 1920 + 116,
.vsync_end = 1920 + 116 + 8,
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 70feee787..e5e3f0b9c 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -588,6 +588,7 @@ static int panel_edp_get_modes(struct drm_panel *panel,
{
struct panel_edp *p = to_panel_edp(panel);
int num = 0;
+ bool has_hard_coded_modes = p->desc->num_timings || p->desc->num_modes;
bool has_override_edid_mode = p->detected_panel &&
p->detected_panel != ERR_PTR(-EINVAL) &&
p->detected_panel->override_edid_mode;
@@ -598,7 +599,11 @@ static int panel_edp_get_modes(struct drm_panel *panel,
if (!p->edid)
p->edid = drm_get_edid(connector, p->ddc);
- if (p->edid) {
+ /*
+ * If both edid and hard-coded modes exists, skip edid modes to
+ * avoid multiple preferred modes.
+ */
+ if (p->edid && !has_hard_coded_modes) {
if (has_override_edid_mode) {
/*
* override_edid_mode is specified. Use
@@ -615,12 +620,7 @@ static int panel_edp_get_modes(struct drm_panel *panel,
pm_runtime_put_autosuspend(panel->dev);
}
- /*
- * Add hard-coded panel modes. Don't call this if there are no timings
- * and no modes (the generic edp-panel case) because it will clobber
- * the display_info that was already set by drm_add_edid_modes().
- */
- if (p->desc->num_timings || p->desc->num_modes)
+ if (has_hard_coded_modes)
num += panel_edp_get_non_edid_modes(p, connector);
else if (!num)
dev_warn(p->base.dev, "No display modes\n");
@@ -982,6 +982,19 @@ static const struct panel_desc auo_b101ean01 = {
},
};
+static const struct drm_display_mode auo_b116xa3_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
static const struct drm_display_mode auo_b116xak01_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -1835,6 +1848,12 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_200_500_e80 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 80,
+};
+
static const struct panel_delay delay_200_500_e80_d50 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -1854,6 +1873,19 @@ static const struct panel_delay delay_200_500_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e200_d10 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 10,
+};
+
+static const struct panel_delay delay_200_150_e200 = {
+ .hpd_absent = 200,
+ .unprepare = 150,
+ .enable = 200,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
@@ -1883,39 +1915,76 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x208d, &delay_200_500_e50, "B140HTN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
- EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
+ EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
+ &auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
- EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+ EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1",
+ &auo_b116xa3_mode),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0715, &delay_200_150_e200, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0731, &delay_200_500_e80, "NT116WHM-N42"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0741, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07f6, &delay_200_500_e200, "NT140FHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x08b2, &delay_200_500_e200, "NT140WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1138, &innolux_n116bca_ea1.delay, "N116BCA-EA1-RC4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1145, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
+
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x8c4d, &delay_200_150_e200, "R140NWFM R1"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+
+ EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
+
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index 6de117232..00791ea81 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Elida kd35t133 5.5" MIPI-DSI panel driver
+ * Elida kd35t133 3.5" MIPI-DSI panel driver
* Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
*
* based on
@@ -43,7 +43,6 @@ struct kd35t133 {
struct regulator *vdd;
struct regulator *iovcc;
enum drm_panel_orientation orientation;
- bool prepared;
};
static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
@@ -91,9 +90,6 @@ static int kd35t133_unprepare(struct drm_panel *panel)
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
dev_err(ctx->dev, "failed to set display off: %d\n", ret);
@@ -109,8 +105,6 @@ static int kd35t133_unprepare(struct drm_panel *panel)
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vdd);
- ctx->prepared = false;
-
return 0;
}
@@ -120,9 +114,6 @@ static int kd35t133_prepare(struct drm_panel *panel)
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
- if (ctx->prepared)
- return 0;
-
dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vdd);
if (ret < 0) {
@@ -166,8 +157,6 @@ static int kd35t133_prepare(struct drm_panel *panel)
msleep(50);
- ctx->prepared = true;
-
return 0;
disable_iovcc:
@@ -211,11 +200,6 @@ static int kd35t133_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
drm_mode_probed_add(connector, mode);
- /*
- * TODO: Remove once all drm drivers call
- * drm_connector_set_orientation_from_panel()
- */
- drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
@@ -301,27 +285,11 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
-{
- struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
-
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
-
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
-}
-
static void kd35t133_remove(struct mipi_dsi_device *dsi)
{
struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
- kd35t133_shutdown(dsi);
-
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
@@ -342,7 +310,6 @@ static struct mipi_dsi_driver kd35t133_driver = {
},
.probe = kd35t133_probe,
.remove = kd35t133_remove,
- .shutdown = kd35t133_shutdown,
};
module_mipi_dsi_driver(kd35t133_driver);
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index c73243d85..ff0dc08b9 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -38,6 +38,7 @@
#define HX8394_CMD_SETMIPI 0xba
#define HX8394_CMD_SETOTP 0xbb
#define HX8394_CMD_SETREGBANK 0xbd
+#define HX8394_CMD_UNKNOWN5 0xbf
#define HX8394_CMD_UNKNOWN1 0xc0
#define HX8394_CMD_SETDGCLUT 0xc1
#define HX8394_CMD_SETID 0xc3
@@ -52,6 +53,7 @@
#define HX8394_CMD_SETGIP1 0xd5
#define HX8394_CMD_SETGIP2 0xd6
#define HX8394_CMD_SETGPO 0xd6
+#define HX8394_CMD_UNKNOWN4 0xd8
#define HX8394_CMD_SETSCALING 0xdd
#define HX8394_CMD_SETIDLE 0xdf
#define HX8394_CMD_SETGAMMA 0xe0
@@ -68,7 +70,7 @@ struct hx8394 {
struct gpio_desc *reset_gpio;
struct regulator *vcc;
struct regulator *iovcc;
- bool prepared;
+ enum drm_panel_orientation orientation;
const struct hx8394_panel_desc *desc;
};
@@ -203,6 +205,140 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
.init_sequence = hsd060bhw4_init_sequence,
};
+static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* 5.19.8 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
+
+ /* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+
+ /* 5.19.3 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+
+ /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+ 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+ 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+ 0x86);
+
+ /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+ 0x6e, 0x6e);
+
+ /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+ 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+ 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+ 0x07, 0x0c, 0x40);
+
+ /* 5.19.20 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+ 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+ 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.21 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+ 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+ 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+ 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+ 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+ 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+ 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+ 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+ 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+ 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
+
+ /* 5.19.17 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+ 0x0b);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0x02);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
+ 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
+ 0xed);
+
+ return 0;
+}
+
+static const struct drm_display_mode powkiddy_x55_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 44,
+ .hsync_end = 720 + 44 + 20,
+ .htotal = 720 + 44 + 20 + 20,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 12,
+ .vsync_end = 1280 + 12 + 10,
+ .vtotal = 1280 + 12 + 10 + 10,
+ .clock = 63290,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 67,
+ .height_mm = 121,
+};
+
+static const struct hx8394_panel_desc powkiddy_x55_desc = {
+ .mode = &powkiddy_x55_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = powkiddy_x55_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -262,16 +398,11 @@ static int hx8394_unprepare(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
- if (!ctx->prepared)
- return 0;
-
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vcc);
- ctx->prepared = false;
-
return 0;
}
@@ -280,9 +411,6 @@ static int hx8394_prepare(struct drm_panel *panel)
struct hx8394 *ctx = panel_to_hx8394(panel);
int ret;
- if (ctx->prepared)
- return 0;
-
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
ret = regulator_enable(ctx->vcc);
@@ -301,8 +429,6 @@ static int hx8394_prepare(struct drm_panel *panel)
msleep(180);
- ctx->prepared = true;
-
return 0;
disable_vcc:
@@ -335,12 +461,20 @@ static int hx8394_get_modes(struct drm_panel *panel,
return 1;
}
+static enum drm_panel_orientation hx8394_get_orientation(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+
+ return ctx->orientation;
+}
+
static const struct drm_panel_funcs hx8394_drm_funcs = {
.disable = hx8394_disable,
.unprepare = hx8394_unprepare,
.prepare = hx8394_prepare,
.enable = hx8394_enable,
.get_modes = hx8394_get_modes,
+ .get_orientation = hx8394_get_orientation,
};
static int hx8394_probe(struct mipi_dsi_device *dsi)
@@ -358,6 +492,12 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"Failed to get reset gpio\n");
+ ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
+ return ret;
+ }
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
@@ -401,27 +541,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static void hx8394_shutdown(struct mipi_dsi_device *dsi)
-{
- struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
- int ret;
-
- ret = drm_panel_disable(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
-
- ret = drm_panel_unprepare(&ctx->panel);
- if (ret < 0)
- dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
-}
-
static void hx8394_remove(struct mipi_dsi_device *dsi)
{
struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
- hx8394_shutdown(dsi);
-
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
@@ -431,6 +555,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, hx8394_of_match);
@@ -438,7 +563,6 @@ MODULE_DEVICE_TABLE(of, hx8394_of_match);
static struct mipi_dsi_driver hx8394_driver = {
.probe = hx8394_probe,
.remove = hx8394_remove,
- .shutdown = hx8394_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = hx8394_of_match,
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
new file mode 100644
index 000000000..1cbc25758
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BSH Hausgerate GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define ILI9805_EXTCMD_CMD_SET_ENABLE_REG (0xff)
+#define ILI9805_SETEXTC_PARAMETER1 (0xff)
+#define ILI9805_SETEXTC_PARAMETER2 (0x98)
+#define ILI9805_SETEXTC_PARAMETER3 (0x05)
+
+#define ILI9805_INSTR(_delay, ...) { \
+ .delay = (_delay), \
+ .len = sizeof((u8[]) {__VA_ARGS__}), \
+ .data = (u8[]){__VA_ARGS__} \
+ }
+
+struct ili9805_instr {
+ size_t len;
+ const u8 *data;
+ u32 delay;
+};
+
+struct ili9805_desc {
+ const char *name;
+ const struct ili9805_instr *init;
+ const size_t init_length;
+ const struct drm_display_mode *mode;
+ u32 width_mm;
+ u32 height_mm;
+};
+
+struct ili9805 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ const struct ili9805_desc *desc;
+
+ struct regulator *dvdd;
+ struct regulator *avdd;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct ili9805_instr gpm1780a0_init[] = {
+ ILI9805_INSTR(100, ILI9805_EXTCMD_CMD_SET_ENABLE_REG, ILI9805_SETEXTC_PARAMETER1,
+ ILI9805_SETEXTC_PARAMETER2, ILI9805_SETEXTC_PARAMETER3),
+ ILI9805_INSTR(100, 0xFD, 0x0F, 0x10, 0x44, 0x00),
+ ILI9805_INSTR(0, 0xf8, 0x18, 0x02, 0x02, 0x18, 0x02, 0x02, 0x30, 0x00,
+ 0x00, 0x30, 0x00, 0x00, 0x30, 0x00, 0x00),
+ ILI9805_INSTR(0, 0xB8, 0x62),
+ ILI9805_INSTR(0, 0xF1, 0x00),
+ ILI9805_INSTR(0, 0xF2, 0x00, 0x58, 0x40),
+ ILI9805_INSTR(0, 0xF3, 0x60, 0x83, 0x04),
+ ILI9805_INSTR(0, 0xFC, 0x04, 0x0F, 0x01),
+ ILI9805_INSTR(0, 0xEB, 0x08, 0x0F),
+ ILI9805_INSTR(0, 0xe0, 0x00, 0x08, 0x0d, 0x0e, 0x0e, 0x0d, 0x0a, 0x08, 0x04,
+ 0x08, 0x0d, 0x0f, 0x0b, 0x1c, 0x14, 0x0a),
+ ILI9805_INSTR(0, 0xe1, 0x00, 0x08, 0x0d, 0x0e, 0x0e, 0x0d, 0x0a, 0x08, 0x04,
+ 0x08, 0x0d, 0x0f, 0x0b, 0x1c, 0x14, 0x0a),
+ ILI9805_INSTR(10, 0xc1, 0x13, 0x39, 0x19, 0x06),
+ ILI9805_INSTR(10, 0xc7, 0xe5),
+ ILI9805_INSTR(10, 0xB1, 0x00, 0x12, 0x14),
+ ILI9805_INSTR(10, 0xB4, 0x02),
+ ILI9805_INSTR(0, 0xBB, 0x14, 0x55),
+ ILI9805_INSTR(0, MIPI_DCS_SET_ADDRESS_MODE, 0x08),
+ ILI9805_INSTR(0, MIPI_DCS_SET_PIXEL_FORMAT, 0x77),
+ ILI9805_INSTR(0, 0x20),
+ ILI9805_INSTR(0, 0xB0, 0x01),
+ ILI9805_INSTR(0, 0xB6, 0x31, 0x00, 0xef),
+ ILI9805_INSTR(0, 0xDF, 0x23),
+ ILI9805_INSTR(0, 0xB9, 0x02, 0x00),
+};
+
+static const struct ili9805_instr tm041xdhg01_init[] = {
+ ILI9805_INSTR(100, ILI9805_EXTCMD_CMD_SET_ENABLE_REG, ILI9805_SETEXTC_PARAMETER1,
+ ILI9805_SETEXTC_PARAMETER2, ILI9805_SETEXTC_PARAMETER3),
+ ILI9805_INSTR(100, 0xFD, 0x0F, 0x13, 0x44, 0x00),
+ ILI9805_INSTR(0, 0xf8, 0x18, 0x02, 0x02, 0x18, 0x02, 0x02, 0x30, 0x01,
+ 0x01, 0x30, 0x01, 0x01, 0x30, 0x01, 0x01),
+ ILI9805_INSTR(0, 0xB8, 0x74),
+ ILI9805_INSTR(0, 0xF1, 0x00),
+ ILI9805_INSTR(0, 0xF2, 0x00, 0x58, 0x40),
+ ILI9805_INSTR(0, 0xFC, 0x04, 0x0F, 0x01),
+ ILI9805_INSTR(0, 0xEB, 0x08, 0x0F),
+ ILI9805_INSTR(0, 0xe0, 0x01, 0x0d, 0x15, 0x0e, 0x0f, 0x0f, 0x0b, 0x08, 0x04,
+ 0x07, 0x0a, 0x0d, 0x0c, 0x15, 0x0f, 0x08),
+ ILI9805_INSTR(0, 0xe1, 0x01, 0x0d, 0x15, 0x0e, 0x0f, 0x0f, 0x0b, 0x08, 0x04,
+ 0x07, 0x0a, 0x0d, 0x0c, 0x15, 0x0f, 0x08),
+ ILI9805_INSTR(10, 0xc1, 0x15, 0x03, 0x03, 0x31),
+ ILI9805_INSTR(10, 0xB1, 0x00, 0x12, 0x14),
+ ILI9805_INSTR(10, 0xB4, 0x02),
+ ILI9805_INSTR(0, 0xBB, 0x14, 0x55),
+ ILI9805_INSTR(0, MIPI_DCS_SET_ADDRESS_MODE, 0x0a),
+ ILI9805_INSTR(0, MIPI_DCS_SET_PIXEL_FORMAT, 0x77),
+ ILI9805_INSTR(0, 0x20),
+ ILI9805_INSTR(0, 0xB0, 0x00),
+ ILI9805_INSTR(0, 0xB6, 0x01),
+ ILI9805_INSTR(0, 0xc2, 0x11),
+ ILI9805_INSTR(0, 0x51, 0xFF),
+ ILI9805_INSTR(0, 0x53, 0x24),
+ ILI9805_INSTR(0, 0x55, 0x00),
+};
+
+static inline struct ili9805 *panel_to_ili9805(struct drm_panel *panel)
+{
+ return container_of(panel, struct ili9805, panel);
+}
+
+static int ili9805_power_on(struct ili9805 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = regulator_enable(ctx->avdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable avdd regulator (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(ctx->dvdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable dvdd regulator (%d)\n", ret);
+ regulator_disable(ctx->avdd);
+ return ret;
+ }
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(5000, 10000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ msleep(120);
+
+ return 0;
+}
+
+static int ili9805_power_off(struct ili9805 *ctx)
+{
+ gpiod_set_value(ctx->reset_gpio, 0);
+ regulator_disable(ctx->dvdd);
+ regulator_disable(ctx->avdd);
+
+ return 0;
+}
+
+static int ili9805_activate(struct ili9805 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int i, ret;
+
+ for (i = 0; i < ctx->desc->init_length; i++) {
+ const struct ili9805_instr *instr = &ctx->desc->init[i];
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, instr->data, instr->len);
+ if (ret < 0)
+ return ret;
+
+ if (instr->delay > 0)
+ msleep(instr->delay);
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+ if (ret) {
+ dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(5000, 6000);
+
+ ret = mipi_dsi_dcs_set_display_on(ctx->dsi);
+ if (ret) {
+ dev_err(dev, "Failed to set display ON (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ili9805_prepare(struct drm_panel *panel)
+{
+ struct ili9805 *ctx = panel_to_ili9805(panel);
+ int ret;
+
+ ret = ili9805_power_on(ctx);
+ if (ret)
+ return ret;
+
+ ret = ili9805_activate(ctx);
+ if (ret) {
+ ili9805_power_off(ctx);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ili9805_deactivate(struct ili9805 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display OFF (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ili9805_unprepare(struct drm_panel *panel)
+{
+ struct ili9805 *ctx = panel_to_ili9805(panel);
+
+ ili9805_deactivate(ctx);
+ ili9805_power_off(ctx);
+
+ return 0;
+}
+
+static const struct drm_display_mode gpm1780a0_timing = {
+ .clock = 26227,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 10,
+ .hsync_end = 480 + 10 + 2,
+ .htotal = 480 + 10 + 2 + 36,
+
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 10 + 4,
+ .vtotal = 480 + 2 + 4 + 10,
+};
+
+static const struct drm_display_mode tm041xdhg01_timing = {
+ .clock = 26227,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 10,
+ .hsync_end = 480 + 10 + 2,
+ .htotal = 480 + 10 + 2 + 36,
+
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 10 + 4,
+ .vtotal = 768 + 2 + 4 + 10,
+};
+
+static int ili9805_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct ili9805 *ctx = panel_to_ili9805(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
+ if (!mode) {
+ dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ ctx->desc->mode->hdisplay,
+ ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ili9805_funcs = {
+ .prepare = ili9805_prepare,
+ .unprepare = ili9805_unprepare,
+ .get_modes = ili9805_get_modes,
+};
+
+static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct ili9805 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+ ctx->desc = of_device_get_match_data(&dsi->dev);
+
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET;
+ dsi->lanes = 2;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &ili9805_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
+ if (IS_ERR(ctx->dvdd))
+ return PTR_ERR(ctx->dvdd);
+ ctx->avdd = devm_regulator_get(&dsi->dev, "avdd");
+ if (IS_ERR(ctx->avdd))
+ return PTR_ERR(ctx->avdd);
+
+ ctx->reset_gpio = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->panel.prepare_prev_first = true;
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(&dsi->dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ili9805_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct ili9805 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct ili9805_desc gpm1780a0_desc = {
+ .init = gpm1780a0_init,
+ .init_length = ARRAY_SIZE(gpm1780a0_init),
+ .mode = &gpm1780a0_timing,
+ .width_mm = 65,
+ .height_mm = 65,
+};
+
+static const struct ili9805_desc tm041xdhg01_desc = {
+ .init = tm041xdhg01_init,
+ .init_length = ARRAY_SIZE(tm041xdhg01_init),
+ .mode = &tm041xdhg01_timing,
+ .width_mm = 42,
+ .height_mm = 96,
+};
+
+static const struct of_device_id ili9805_of_match[] = {
+ { .compatible = "giantplus,gpm1790a0", .data = &gpm1780a0_desc },
+ { .compatible = "tianma,tm041xdhg01", .data = &tm041xdhg01_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9805_of_match);
+
+static struct mipi_dsi_driver ili9805_dsi_driver = {
+ .probe = ili9805_dsi_probe,
+ .remove = ili9805_dsi_remove,
+ .driver = {
+ .name = "ili9805-dsi",
+ .of_match_table = ili9805_of_match,
+ },
+};
+module_mipi_dsi_driver(ili9805_dsi_driver);
+
+MODULE_AUTHOR("Matthias Proske <Matthias.Proske@bshg.com>");
+MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
+MODULE_DESCRIPTION("Ilitek ILI9805 Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 7838947a1..2ffe5f68a 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -830,6 +830,203 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(0),
};
+static const struct ili9881c_instr am8001280g_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0xD3),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x07, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x10, 0x01),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x03),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x03),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x00),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+
+ ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x00),
+ ILI9881C_COMMAND_INSTR(0x61, 0x01),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x66, 0x06),
+ ILI9881C_COMMAND_INSTR(0x67, 0x07),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x08),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x00),
+ ILI9881C_COMMAND_INSTR(0x77, 0x01),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x80, 0x08),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x30),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x15),
+ ILI9881C_COMMAND_INSTR(0x3a, 0xa4),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x0B),
+ ILI9881C_COMMAND_INSTR(0x50, 0xa5),
+ ILI9881C_COMMAND_INSTR(0x51, 0xa0),
+ ILI9881C_COMMAND_INSTR(0x53, 0x70),
+ ILI9881C_COMMAND_INSTR(0x55, 0x7A),
+ ILI9881C_COMMAND_INSTR(0x60, 0x14),
+
+ ILI9881C_COMMAND_INSTR(0xA0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xA8, 0xD4),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x2B),
+ ILI9881C_COMMAND_INSTR(0xAB, 0xB5),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x19),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x53),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x6A),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x31),
+
+ ILI9881C_COMMAND_INSTR(0xC0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xC8, 0xD4),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x2B),
+ ILI9881C_COMMAND_INSTR(0xCB, 0xB5),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x19),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x53),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x1A),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x6A),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x31),
+ ILI9881C_SWITCH_PAGE_INSTR(0),
+ ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c),
+ ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1014,6 +1211,23 @@ static const struct drm_display_mode w552946aba_default_mode = {
.height_mm = 121,
};
+static const struct drm_display_mode am8001280g_default_mode = {
+ .clock = 67911,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 20,
+ .hsync_end = 800 + 20 + 32,
+ .htotal = 800 + 20 + 32 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 6,
+ .vsync_end = 1280 + 6 + 8,
+ .vtotal = 1280 + 6 + 8 + 4,
+
+ .width_mm = 94,
+ .height_mm = 151,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1094,6 +1308,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
+ ctx->panel.prepare_prev_first = true;
+
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -1145,11 +1361,20 @@ static const struct ili9881c_desc w552946aba_desc = {
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
};
+static const struct ili9881c_desc am8001280g_desc = {
+ .init = am8001280g_init,
+ .init_length = ARRAY_SIZE(am8001280g_init),
+ .mode = &am8001280g_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
+ { .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index c44c69456..94d89ffd5 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -28,6 +28,7 @@ struct nv3051d_panel_info {
unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_flags;
+ u32 mode_flags;
};
struct panel_nv3051d {
@@ -387,15 +388,7 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
-
- /*
- * The panel in the RG351V is identical to the 353P, except it
- * requires MIPI_DSI_CLOCK_NON_CONTINUOUS to operate correctly.
- */
- if (of_device_is_compatible(dev->of_node, "anbernic,rg351v-panel"))
- dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->mode_flags = ctx->panel_info->mode_flags;
drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -483,16 +476,56 @@ static const struct drm_display_mode nv3051d_rgxx3_modes[] = {
},
};
-static const struct nv3051d_panel_info nv3051d_rgxx3_info = {
+static const struct drm_display_mode nv3051d_rk2023_modes[] = {
+ {
+ .hdisplay = 640,
+ .hsync_start = 640 + 40,
+ .hsync_end = 640 + 40 + 2,
+ .htotal = 640 + 40 + 2 + 80,
+ .vdisplay = 480,
+ .vsync_start = 480 + 18,
+ .vsync_end = 480 + 18 + 2,
+ .vtotal = 480 + 18 + 2 + 4,
+ .clock = 24150,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct nv3051d_panel_info nv3051d_rg351v_info = {
.display_modes = nv3051d_rgxx3_modes,
.num_modes = ARRAY_SIZE(nv3051d_rgxx3_modes),
.width_mm = 70,
.height_mm = 57,
.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS,
+};
+
+static const struct nv3051d_panel_info nv3051d_rg353p_info = {
+ .display_modes = nv3051d_rgxx3_modes,
+ .num_modes = ARRAY_SIZE(nv3051d_rgxx3_modes),
+ .width_mm = 70,
+ .height_mm = 57,
+ .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+};
+
+static const struct nv3051d_panel_info nv3051d_rk2023_info = {
+ .display_modes = nv3051d_rk2023_modes,
+ .num_modes = ARRAY_SIZE(nv3051d_rk2023_modes),
+ .width_mm = 70,
+ .height_mm = 57,
+ .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
};
static const struct of_device_id newvision_nv3051d_of_match[] = {
- { .compatible = "newvision,nv3051d", .data = &nv3051d_rgxx3_info },
+ { .compatible = "anbernic,rg351v-panel", .data = &nv3051d_rg351v_info },
+ { .compatible = "anbernic,rg353p-panel", .data = &nv3051d_rg353p_info },
+ { .compatible = "powkiddy,rk2023-panel", .data = &nv3051d_rk2023_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, newvision_nv3051d_of_match);
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index 71e57de6d..1aab0c9ae 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -20,11 +20,18 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+struct nv3052c_reg {
+ u8 cmd;
+ u8 val;
+};
+
struct nv3052c_panel_info {
const struct drm_display_mode *display_modes;
unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_format, bus_flags;
+ const struct nv3052c_reg *panel_regs;
+ unsigned int panel_regs_len;
};
struct nv3052c {
@@ -36,15 +43,10 @@ struct nv3052c {
struct gpio_desc *reset_gpio;
};
-struct nv3052c_reg {
- u8 cmd;
- u8 val;
-};
-
-static const struct nv3052c_reg nv3052c_panel_regs[] = {
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x01 },
+static const struct nv3052c_reg ltk035c5444t_panel_regs[] = {
+ // EXTC Command set enable, select page 1
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x01 },
+ // Mostly unknown registers
{ 0xe3, 0x00 },
{ 0x40, 0x00 },
{ 0x03, 0x40 },
@@ -62,15 +64,15 @@ static const struct nv3052c_reg nv3052c_panel_regs[] = {
{ 0x25, 0x06 },
{ 0x26, 0x14 },
{ 0x27, 0x14 },
- { 0x38, 0xcc },
- { 0x39, 0xd7 },
- { 0x3a, 0x4a },
+ { 0x38, 0xcc }, // VCOM_ADJ1
+ { 0x39, 0xd7 }, // VCOM_ADJ2
+ { 0x3a, 0x4a }, // VCOM_ADJ3
{ 0x28, 0x40 },
{ 0x29, 0x01 },
{ 0x2a, 0xdf },
{ 0x49, 0x3c },
- { 0x91, 0x77 },
- { 0x92, 0x77 },
+ { 0x91, 0x77 }, // EXTPW_CTRL2
+ { 0x92, 0x77 }, // EXTPW_CTRL3
{ 0xa0, 0x55 },
{ 0xa1, 0x50 },
{ 0xa4, 0x9c },
@@ -94,123 +96,321 @@ static const struct nv3052c_reg nv3052c_panel_regs[] = {
{ 0xb8, 0x26 },
{ 0xf0, 0x00 },
{ 0xf6, 0xc0 },
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x02 },
- { 0xb0, 0x0b },
- { 0xb1, 0x16 },
- { 0xb2, 0x17 },
- { 0xb3, 0x2c },
- { 0xb4, 0x32 },
- { 0xb5, 0x3b },
- { 0xb6, 0x29 },
- { 0xb7, 0x40 },
- { 0xb8, 0x0d },
- { 0xb9, 0x05 },
- { 0xba, 0x12 },
- { 0xbb, 0x10 },
- { 0xbc, 0x12 },
- { 0xbd, 0x15 },
- { 0xbe, 0x19 },
- { 0xbf, 0x0e },
- { 0xc0, 0x16 },
- { 0xc1, 0x0a },
- { 0xd0, 0x0c },
- { 0xd1, 0x17 },
- { 0xd2, 0x14 },
- { 0xd3, 0x2e },
- { 0xd4, 0x32 },
- { 0xd5, 0x3c },
- { 0xd6, 0x22 },
- { 0xd7, 0x3d },
- { 0xd8, 0x0d },
- { 0xd9, 0x07 },
- { 0xda, 0x13 },
- { 0xdb, 0x13 },
- { 0xdc, 0x11 },
- { 0xdd, 0x15 },
- { 0xde, 0x19 },
- { 0xdf, 0x10 },
- { 0xe0, 0x17 },
- { 0xe1, 0x0a },
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x03 },
- { 0x00, 0x2a },
- { 0x01, 0x2a },
- { 0x02, 0x2a },
- { 0x03, 0x2a },
- { 0x04, 0x61 },
- { 0x05, 0x80 },
- { 0x06, 0xc7 },
- { 0x07, 0x01 },
- { 0x08, 0x03 },
- { 0x09, 0x04 },
- { 0x70, 0x22 },
- { 0x71, 0x80 },
- { 0x30, 0x2a },
- { 0x31, 0x2a },
- { 0x32, 0x2a },
- { 0x33, 0x2a },
- { 0x34, 0x61 },
- { 0x35, 0xc5 },
- { 0x36, 0x80 },
- { 0x37, 0x23 },
- { 0x40, 0x03 },
- { 0x41, 0x04 },
- { 0x42, 0x05 },
- { 0x43, 0x06 },
- { 0x44, 0x11 },
- { 0x45, 0xe8 },
- { 0x46, 0xe9 },
- { 0x47, 0x11 },
- { 0x48, 0xea },
- { 0x49, 0xeb },
- { 0x50, 0x07 },
- { 0x51, 0x08 },
- { 0x52, 0x09 },
- { 0x53, 0x0a },
- { 0x54, 0x11 },
- { 0x55, 0xec },
- { 0x56, 0xed },
- { 0x57, 0x11 },
- { 0x58, 0xef },
- { 0x59, 0xf0 },
- { 0xb1, 0x01 },
- { 0xb4, 0x15 },
- { 0xb5, 0x16 },
- { 0xb6, 0x09 },
- { 0xb7, 0x0f },
- { 0xb8, 0x0d },
- { 0xb9, 0x0b },
- { 0xba, 0x00 },
- { 0xc7, 0x02 },
- { 0xca, 0x17 },
- { 0xcb, 0x18 },
- { 0xcc, 0x0a },
- { 0xcd, 0x10 },
- { 0xce, 0x0e },
- { 0xcf, 0x0c },
- { 0xd0, 0x00 },
- { 0x81, 0x00 },
- { 0x84, 0x15 },
- { 0x85, 0x16 },
- { 0x86, 0x10 },
- { 0x87, 0x0a },
- { 0x88, 0x0c },
- { 0x89, 0x0e },
- { 0x8a, 0x02 },
- { 0x97, 0x00 },
- { 0x9a, 0x17 },
- { 0x9b, 0x18 },
- { 0x9c, 0x0f },
- { 0x9d, 0x09 },
- { 0x9e, 0x0b },
- { 0x9f, 0x0d },
- { 0xa0, 0x01 },
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x02 },
+ // EXTC Command set enable, select page 2
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
+ // Set gray scale voltage to adjust gamma
+ { 0xb0, 0x0b }, // PGAMVR0
+ { 0xb1, 0x16 }, // PGAMVR1
+ { 0xb2, 0x17 }, // PGAMVR2
+ { 0xb3, 0x2c }, // PGAMVR3
+ { 0xb4, 0x32 }, // PGAMVR4
+ { 0xb5, 0x3b }, // PGAMVR5
+ { 0xb6, 0x29 }, // PGAMPR0
+ { 0xb7, 0x40 }, // PGAMPR1
+ { 0xb8, 0x0d }, // PGAMPK0
+ { 0xb9, 0x05 }, // PGAMPK1
+ { 0xba, 0x12 }, // PGAMPK2
+ { 0xbb, 0x10 }, // PGAMPK3
+ { 0xbc, 0x12 }, // PGAMPK4
+ { 0xbd, 0x15 }, // PGAMPK5
+ { 0xbe, 0x19 }, // PGAMPK6
+ { 0xbf, 0x0e }, // PGAMPK7
+ { 0xc0, 0x16 }, // PGAMPK8
+ { 0xc1, 0x0a }, // PGAMPK9
+ // Set gray scale voltage to adjust gamma
+ { 0xd0, 0x0c }, // NGAMVR0
+ { 0xd1, 0x17 }, // NGAMVR0
+ { 0xd2, 0x14 }, // NGAMVR1
+ { 0xd3, 0x2e }, // NGAMVR2
+ { 0xd4, 0x32 }, // NGAMVR3
+ { 0xd5, 0x3c }, // NGAMVR4
+ { 0xd6, 0x22 }, // NGAMPR0
+ { 0xd7, 0x3d }, // NGAMPR1
+ { 0xd8, 0x0d }, // NGAMPK0
+ { 0xd9, 0x07 }, // NGAMPK1
+ { 0xda, 0x13 }, // NGAMPK2
+ { 0xdb, 0x13 }, // NGAMPK3
+ { 0xdc, 0x11 }, // NGAMPK4
+ { 0xdd, 0x15 }, // NGAMPK5
+ { 0xde, 0x19 }, // NGAMPK6
+ { 0xdf, 0x10 }, // NGAMPK7
+ { 0xe0, 0x17 }, // NGAMPK8
+ { 0xe1, 0x0a }, // NGAMPK9
+ // EXTC Command set enable, select page 3
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x03 },
+ // Set various timing settings
+ { 0x00, 0x2a }, // GIP_VST_1
+ { 0x01, 0x2a }, // GIP_VST_2
+ { 0x02, 0x2a }, // GIP_VST_3
+ { 0x03, 0x2a }, // GIP_VST_4
+ { 0x04, 0x61 }, // GIP_VST_5
+ { 0x05, 0x80 }, // GIP_VST_6
+ { 0x06, 0xc7 }, // GIP_VST_7
+ { 0x07, 0x01 }, // GIP_VST_8
+ { 0x08, 0x03 }, // GIP_VST_9
+ { 0x09, 0x04 }, // GIP_VST_10
+ { 0x70, 0x22 }, // GIP_ECLK1
+ { 0x71, 0x80 }, // GIP_ECLK2
+ { 0x30, 0x2a }, // GIP_CLK_1
+ { 0x31, 0x2a }, // GIP_CLK_2
+ { 0x32, 0x2a }, // GIP_CLK_3
+ { 0x33, 0x2a }, // GIP_CLK_4
+ { 0x34, 0x61 }, // GIP_CLK_5
+ { 0x35, 0xc5 }, // GIP_CLK_6
+ { 0x36, 0x80 }, // GIP_CLK_7
+ { 0x37, 0x23 }, // GIP_CLK_8
+ { 0x40, 0x03 }, // GIP_CLKA_1
+ { 0x41, 0x04 }, // GIP_CLKA_2
+ { 0x42, 0x05 }, // GIP_CLKA_3
+ { 0x43, 0x06 }, // GIP_CLKA_4
+ { 0x44, 0x11 }, // GIP_CLKA_5
+ { 0x45, 0xe8 }, // GIP_CLKA_6
+ { 0x46, 0xe9 }, // GIP_CLKA_7
+ { 0x47, 0x11 }, // GIP_CLKA_8
+ { 0x48, 0xea }, // GIP_CLKA_9
+ { 0x49, 0xeb }, // GIP_CLKA_10
+ { 0x50, 0x07 }, // GIP_CLKB_1
+ { 0x51, 0x08 }, // GIP_CLKB_2
+ { 0x52, 0x09 }, // GIP_CLKB_3
+ { 0x53, 0x0a }, // GIP_CLKB_4
+ { 0x54, 0x11 }, // GIP_CLKB_5
+ { 0x55, 0xec }, // GIP_CLKB_6
+ { 0x56, 0xed }, // GIP_CLKB_7
+ { 0x57, 0x11 }, // GIP_CLKB_8
+ { 0x58, 0xef }, // GIP_CLKB_9
+ { 0x59, 0xf0 }, // GIP_CLKB_10
+ // Map internal GOA signals to GOA output pad
+ { 0xb1, 0x01 }, // PANELD2U2
+ { 0xb4, 0x15 }, // PANELD2U5
+ { 0xb5, 0x16 }, // PANELD2U6
+ { 0xb6, 0x09 }, // PANELD2U7
+ { 0xb7, 0x0f }, // PANELD2U8
+ { 0xb8, 0x0d }, // PANELD2U9
+ { 0xb9, 0x0b }, // PANELD2U10
+ { 0xba, 0x00 }, // PANELD2U11
+ { 0xc7, 0x02 }, // PANELD2U24
+ { 0xca, 0x17 }, // PANELD2U27
+ { 0xcb, 0x18 }, // PANELD2U28
+ { 0xcc, 0x0a }, // PANELD2U29
+ { 0xcd, 0x10 }, // PANELD2U30
+ { 0xce, 0x0e }, // PANELD2U31
+ { 0xcf, 0x0c }, // PANELD2U32
+ { 0xd0, 0x00 }, // PANELD2U33
+ // Map internal GOA signals to GOA output pad
+ { 0x81, 0x00 }, // PANELU2D2
+ { 0x84, 0x15 }, // PANELU2D5
+ { 0x85, 0x16 }, // PANELU2D6
+ { 0x86, 0x10 }, // PANELU2D7
+ { 0x87, 0x0a }, // PANELU2D8
+ { 0x88, 0x0c }, // PANELU2D9
+ { 0x89, 0x0e }, // PANELU2D10
+ { 0x8a, 0x02 }, // PANELU2D11
+ { 0x97, 0x00 }, // PANELU2D24
+ { 0x9a, 0x17 }, // PANELU2D27
+ { 0x9b, 0x18 }, // PANELU2D28
+ { 0x9c, 0x0f }, // PANELU2D29
+ { 0x9d, 0x09 }, // PANELU2D30
+ { 0x9e, 0x0b }, // PANELU2D31
+ { 0x9f, 0x0d }, // PANELU2D32
+ { 0xa0, 0x01 }, // PANELU2D33
+ // EXTC Command set enable, select page 2
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
+ // Unknown registers
+ { 0x01, 0x01 },
+ { 0x02, 0xda },
+ { 0x03, 0xba },
+ { 0x04, 0xa8 },
+ { 0x05, 0x9a },
+ { 0x06, 0x70 },
+ { 0x07, 0xff },
+ { 0x08, 0x91 },
+ { 0x09, 0x90 },
+ { 0x0a, 0xff },
+ { 0x0b, 0x8f },
+ { 0x0c, 0x60 },
+ { 0x0d, 0x58 },
+ { 0x0e, 0x48 },
+ { 0x0f, 0x38 },
+ { 0x10, 0x2b },
+ // EXTC Command set enable, select page 0
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 },
+ // Display Access Control
+ { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0
+};
+
+static const struct nv3052c_reg fs035vg158_panel_regs[] = {
+ // EXTC Command set enable, select page 1
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x01 },
+ // Mostly unknown registers
+ { 0xe3, 0x00 },
+ { 0x40, 0x00 },
+ { 0x03, 0x40 },
+ { 0x04, 0x00 },
+ { 0x05, 0x03 },
+ { 0x08, 0x00 },
+ { 0x09, 0x07 },
+ { 0x0a, 0x01 },
+ { 0x0b, 0x32 },
+ { 0x0c, 0x32 },
+ { 0x0d, 0x0b },
+ { 0x0e, 0x00 },
+ { 0x23, 0x20 }, // RGB interface control: DE MODE PCLK-N
+ { 0x24, 0x0c },
+ { 0x25, 0x06 },
+ { 0x26, 0x14 },
+ { 0x27, 0x14 },
+ { 0x38, 0x9c }, //VCOM_ADJ1, different to ltk035c5444t
+ { 0x39, 0xa7 }, //VCOM_ADJ2, different to ltk035c5444t
+ { 0x3a, 0x50 }, //VCOM_ADJ3, different to ltk035c5444t
+ { 0x28, 0x40 },
+ { 0x29, 0x01 },
+ { 0x2a, 0xdf },
+ { 0x49, 0x3c },
+ { 0x91, 0x57 }, //EXTPW_CTRL2, different to ltk035c5444t
+ { 0x92, 0x57 }, //EXTPW_CTRL3, different to ltk035c5444t
+ { 0xa0, 0x55 },
+ { 0xa1, 0x50 },
+ { 0xa4, 0x9c },
+ { 0xa7, 0x02 },
+ { 0xa8, 0x01 },
+ { 0xa9, 0x01 },
+ { 0xaa, 0xfc },
+ { 0xab, 0x28 },
+ { 0xac, 0x06 },
+ { 0xad, 0x06 },
+ { 0xae, 0x06 },
+ { 0xaf, 0x03 },
+ { 0xb0, 0x08 },
+ { 0xb1, 0x26 },
+ { 0xb2, 0x28 },
+ { 0xb3, 0x28 },
+ { 0xb4, 0x03 }, // Unknown, different to ltk035c5444
+ { 0xb5, 0x08 },
+ { 0xb6, 0x26 },
+ { 0xb7, 0x08 },
+ { 0xb8, 0x26 },
+ { 0xf0, 0x00 },
+ { 0xf6, 0xc0 },
+ // EXTC Command set enable, select page 0
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
+ // Set gray scale voltage to adjust gamma
+ { 0xb0, 0x0b }, // PGAMVR0
+ { 0xb1, 0x16 }, // PGAMVR1
+ { 0xb2, 0x17 }, // PGAMVR2
+ { 0xb3, 0x2c }, // PGAMVR3
+ { 0xb4, 0x32 }, // PGAMVR4
+ { 0xb5, 0x3b }, // PGAMVR5
+ { 0xb6, 0x29 }, // PGAMPR0
+ { 0xb7, 0x40 }, // PGAMPR1
+ { 0xb8, 0x0d }, // PGAMPK0
+ { 0xb9, 0x05 }, // PGAMPK1
+ { 0xba, 0x12 }, // PGAMPK2
+ { 0xbb, 0x10 }, // PGAMPK3
+ { 0xbc, 0x12 }, // PGAMPK4
+ { 0xbd, 0x15 }, // PGAMPK5
+ { 0xbe, 0x19 }, // PGAMPK6
+ { 0xbf, 0x0e }, // PGAMPK7
+ { 0xc0, 0x16 }, // PGAMPK8
+ { 0xc1, 0x0a }, // PGAMPK9
+ // Set gray scale voltage to adjust gamma
+ { 0xd0, 0x0c }, // NGAMVR0
+ { 0xd1, 0x17 }, // NGAMVR0
+ { 0xd2, 0x14 }, // NGAMVR1
+ { 0xd3, 0x2e }, // NGAMVR2
+ { 0xd4, 0x32 }, // NGAMVR3
+ { 0xd5, 0x3c }, // NGAMVR4
+ { 0xd6, 0x22 }, // NGAMPR0
+ { 0xd7, 0x3d }, // NGAMPR1
+ { 0xd8, 0x0d }, // NGAMPK0
+ { 0xd9, 0x07 }, // NGAMPK1
+ { 0xda, 0x13 }, // NGAMPK2
+ { 0xdb, 0x13 }, // NGAMPK3
+ { 0xdc, 0x11 }, // NGAMPK4
+ { 0xdd, 0x15 }, // NGAMPK5
+ { 0xde, 0x19 }, // NGAMPK6
+ { 0xdf, 0x10 }, // NGAMPK7
+ { 0xe0, 0x17 }, // NGAMPK8
+ { 0xe1, 0x0a }, // NGAMPK9
+ // EXTC Command set enable, select page 3
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x03 },
+ // Set various timing settings
+ { 0x00, 0x2a }, // GIP_VST_1
+ { 0x01, 0x2a }, // GIP_VST_2
+ { 0x02, 0x2a }, // GIP_VST_3
+ { 0x03, 0x2a }, // GIP_VST_4
+ { 0x04, 0x61 }, // GIP_VST_5
+ { 0x05, 0x80 }, // GIP_VST_6
+ { 0x06, 0xc7 }, // GIP_VST_7
+ { 0x07, 0x01 }, // GIP_VST_8
+ { 0x08, 0x03 }, // GIP_VST_9
+ { 0x09, 0x04 }, // GIP_VST_10
+ { 0x70, 0x22 }, // GIP_ECLK1
+ { 0x71, 0x80 }, // GIP_ECLK2
+ { 0x30, 0x2a }, // GIP_CLK_1
+ { 0x31, 0x2a }, // GIP_CLK_2
+ { 0x32, 0x2a }, // GIP_CLK_3
+ { 0x33, 0x2a }, // GIP_CLK_4
+ { 0x34, 0x61 }, // GIP_CLK_5
+ { 0x35, 0xc5 }, // GIP_CLK_6
+ { 0x36, 0x80 }, // GIP_CLK_7
+ { 0x37, 0x23 }, // GIP_CLK_8
+ { 0x40, 0x03 }, // GIP_CLKA_1
+ { 0x41, 0x04 }, // GIP_CLKA_2
+ { 0x42, 0x05 }, // GIP_CLKA_3
+ { 0x43, 0x06 }, // GIP_CLKA_4
+ { 0x44, 0x11 }, // GIP_CLKA_5
+ { 0x45, 0xe8 }, // GIP_CLKA_6
+ { 0x46, 0xe9 }, // GIP_CLKA_7
+ { 0x47, 0x11 }, // GIP_CLKA_8
+ { 0x48, 0xea }, // GIP_CLKA_9
+ { 0x49, 0xeb }, // GIP_CLKA_10
+ { 0x50, 0x07 }, // GIP_CLKB_1
+ { 0x51, 0x08 }, // GIP_CLKB_2
+ { 0x52, 0x09 }, // GIP_CLKB_3
+ { 0x53, 0x0a }, // GIP_CLKB_4
+ { 0x54, 0x11 }, // GIP_CLKB_5
+ { 0x55, 0xec }, // GIP_CLKB_6
+ { 0x56, 0xed }, // GIP_CLKB_7
+ { 0x57, 0x11 }, // GIP_CLKB_8
+ { 0x58, 0xef }, // GIP_CLKB_9
+ { 0x59, 0xf0 }, // GIP_CLKB_10
+ // Map internal GOA signals to GOA output pad
+ { 0xb1, 0x01 }, // PANELD2U2
+ { 0xb4, 0x15 }, // PANELD2U5
+ { 0xb5, 0x16 }, // PANELD2U6
+ { 0xb6, 0x09 }, // PANELD2U7
+ { 0xb7, 0x0f }, // PANELD2U8
+ { 0xb8, 0x0d }, // PANELD2U9
+ { 0xb9, 0x0b }, // PANELD2U10
+ { 0xba, 0x00 }, // PANELD2U11
+ { 0xc7, 0x02 }, // PANELD2U24
+ { 0xca, 0x17 }, // PANELD2U27
+ { 0xcb, 0x18 }, // PANELD2U28
+ { 0xcc, 0x0a }, // PANELD2U29
+ { 0xcd, 0x10 }, // PANELD2U30
+ { 0xce, 0x0e }, // PANELD2U31
+ { 0xcf, 0x0c }, // PANELD2U32
+ { 0xd0, 0x00 }, // PANELD2U33
+ // Map internal GOA signals to GOA output pad
+ { 0x81, 0x00 }, // PANELU2D2
+ { 0x84, 0x15 }, // PANELU2D5
+ { 0x85, 0x16 }, // PANELU2D6
+ { 0x86, 0x10 }, // PANELU2D7
+ { 0x87, 0x0a }, // PANELU2D8
+ { 0x88, 0x0c }, // PANELU2D9
+ { 0x89, 0x0e }, // PANELU2D10
+ { 0x8a, 0x02 }, // PANELU2D11
+ { 0x97, 0x00 }, // PANELU2D24
+ { 0x9a, 0x17 }, // PANELU2D27
+ { 0x9b, 0x18 }, // PANELU2D28
+ { 0x9c, 0x0f }, // PANELU2D29
+ { 0x9d, 0x09 }, // PANELU2D30
+ { 0x9e, 0x0b }, // PANELU2D31
+ { 0x9f, 0x0d }, // PANELU2D32
+ { 0xa0, 0x01 }, // PANELU2D33
+ // EXTC Command set enable, select page 2
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
+ // Unknown registers
{ 0x01, 0x01 },
{ 0x02, 0xda },
{ 0x03, 0xba },
@@ -227,10 +427,10 @@ static const struct nv3052c_reg nv3052c_panel_regs[] = {
{ 0x0e, 0x48 },
{ 0x0f, 0x38 },
{ 0x10, 0x2b },
- { 0xff, 0x30 },
- { 0xff, 0x52 },
- { 0xff, 0x00 },
- { 0x36, 0x0a },
+ // EXTC Command set enable, select page 0
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 },
+ // Display Access Control
+ { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0
};
static inline struct nv3052c *to_nv3052c(struct drm_panel *panel)
@@ -241,6 +441,8 @@ static inline struct nv3052c *to_nv3052c(struct drm_panel *panel)
static int nv3052c_prepare(struct drm_panel *panel)
{
struct nv3052c *priv = to_nv3052c(panel);
+ const struct nv3052c_reg *panel_regs = priv->panel_info->panel_regs;
+ unsigned int panel_regs_len = priv->panel_info->panel_regs_len;
struct mipi_dbi *dbi = &priv->dbi;
unsigned int i;
int err;
@@ -257,9 +459,9 @@ static int nv3052c_prepare(struct drm_panel *panel)
gpiod_set_value_cansleep(priv->reset_gpio, 0);
usleep_range(5000, 20000);
- for (i = 0; i < ARRAY_SIZE(nv3052c_panel_regs); i++) {
- err = mipi_dbi_command(dbi, nv3052c_panel_regs[i].cmd,
- nv3052c_panel_regs[i].val);
+ for (i = 0; i < panel_regs_len; i++) {
+ err = mipi_dbi_command(dbi, panel_regs[i].cmd,
+ panel_regs[i].val);
if (err) {
dev_err(priv->dev, "Unable to set register: %d\n", err);
@@ -453,6 +655,21 @@ static const struct drm_display_mode ltk035c5444t_modes[] = {
},
};
+static const struct drm_display_mode fs035vg158_modes[] = {
+ { /* 60 Hz */
+ .clock = 21000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 34,
+ .hsync_end = 640 + 34 + 4,
+ .htotal = 640 + 34 + 4 + 20,
+ .vdisplay = 480,
+ .vsync_start = 480 + 12,
+ .vsync_end = 480 + 12 + 4,
+ .vtotal = 480 + 12 + 4 + 6,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
static const struct nv3052c_panel_info ltk035c5444t_panel_info = {
.display_modes = ltk035c5444t_modes,
.num_modes = ARRAY_SIZE(ltk035c5444t_modes),
@@ -460,10 +677,31 @@ static const struct nv3052c_panel_info ltk035c5444t_panel_info = {
.height_mm = 64,
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .panel_regs = ltk035c5444t_panel_regs,
+ .panel_regs_len = ARRAY_SIZE(ltk035c5444t_panel_regs),
+};
+
+static const struct nv3052c_panel_info fs035vg158_panel_info = {
+ .display_modes = fs035vg158_modes,
+ .num_modes = ARRAY_SIZE(fs035vg158_modes),
+ .width_mm = 70,
+ .height_mm = 53,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .panel_regs = fs035vg158_panel_regs,
+ .panel_regs_len = ARRAY_SIZE(fs035vg158_panel_regs),
+};
+
+static const struct spi_device_id nv3052c_ids[] = {
+ { "ltk035c5444t", },
+ { "fs035vg158", },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(spi, nv3052c_ids);
static const struct of_device_id nv3052c_of_match[] = {
{ .compatible = "leadtek,ltk035c5444t", .data = &ltk035c5444t_panel_info },
+ { .compatible = "fascontek,fs035vg158", .data = &fs035vg158_panel_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nv3052c_of_match);
@@ -473,6 +711,7 @@ static struct spi_driver nv3052c_driver = {
.name = "nv3052c",
.of_match_table = nv3052c_of_match,
},
+ .id_table = nv3052c_ids,
.probe = nv3052c_probe,
.remove = nv3052c_remove,
};
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index d6dceb858..83a9cf53d 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1023,7 +1023,7 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.hdisplay = 480,
.hsync_start = 480 + 2, /* HFP = 2 */
.hsync_end = 480 + 2 + 0, /* HSync = 0 */
- .htotal = 480 + 2 + 0 + 5, /* HFP = 5 */
+ .htotal = 480 + 2 + 0 + 5, /* HBP = 5 */
.vdisplay = 800,
.vsync_start = 800 + 2, /* VFP = 2 */
.vsync_end = 800 + 2 + 0, /* VSync = 0 */
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 1ba633fd5..72fdab8ad 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1134,6 +1134,37 @@ static const struct panel_desc auo_g133han01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_g156han04_timings = {
+ .pixelclock = { 137000000, 141000000, 146000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 60, 60, 60 },
+ .hback_porch = { 90, 92, 111 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 12, 12, 12 },
+ .vback_porch = { 24, 36, 56 },
+ .vsync_len = { 8, 8, 8 },
+};
+
+static const struct panel_desc auo_g156han04 = {
+ .timings = &auo_g156han04_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 344,
+ .height = 194,
+ },
+ .delay = {
+ .prepare = 50, /* T2 */
+ .enable = 200, /* T3 */
+ .disable = 110, /* T10 */
+ .unprepare = 1000, /* T13 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode auo_g156xtn01_mode = {
.clock = 76000,
.hdisplay = 1366,
@@ -1324,6 +1355,52 @@ static const struct panel_desc bananapi_s070wv20_ct16 = {
},
};
+static const struct drm_display_mode boe_bp101wx1_100_mode = {
+ .clock = 78945,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 0,
+ .hsync_end = 1280 + 0 + 2,
+ .htotal = 1280 + 62 + 0 + 2,
+ .vdisplay = 800,
+ .vsync_start = 800 + 8,
+ .vsync_end = 800 + 8 + 2,
+ .vtotal = 800 + 6 + 8 + 2,
+};
+
+static const struct panel_desc boe_bp082wx1_100 = {
+ .modes = &boe_bp101wx1_100_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 177,
+ .height = 110,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+static const struct panel_desc boe_bp101wx1_100 = {
+ .modes = &boe_bp101wx1_100_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing boe_ev121wxm_n10_1850_timing = {
.pixelclock = { 69922000, 71000000, 72293000 },
.hactive = { 1280, 1280, 1280 },
@@ -1973,6 +2050,33 @@ static const struct panel_desc eink_vb3300_kca = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing evervision_vgg644804_timing = {
+ .pixelclock = { 25175000, 25175000, 25175000 },
+ .hactive = { 640, 640, 640 },
+ .hfront_porch = { 16, 16, 16 },
+ .hback_porch = { 82, 114, 170 },
+ .hsync_len = { 5, 30, 30 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 30, 32, 34 },
+ .vsync_len = { 1, 3, 5 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc evervision_vgg644804 = {
+ .timings = &evervision_vgg644804_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 115,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+};
+
static const struct display_timing evervision_vgg804821_timing = {
.pixelclock = { 27600000, 33300000, 50000000 },
.hactive = { 800, 800, 800 },
@@ -4235,6 +4339,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g133han01",
.data = &auo_g133han01,
}, {
+ .compatible = "auo,g156han04",
+ .data = &auo_g156han04,
+ }, {
.compatible = "auo,g156xtn01",
.data = &auo_g156xtn01,
}, {
@@ -4256,6 +4363,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "bananapi,s070wv20-ct16",
.data = &bananapi_s070wv20_ct16,
}, {
+ .compatible = "boe,bp082wx1-100",
+ .data = &boe_bp082wx1_100,
+ }, {
+ .compatible = "boe,bp101wx1-100",
+ .data = &boe_bp101wx1_100,
+ }, {
.compatible = "boe,ev121wxm-n10-1850",
.data = &boe_ev121wxm_n10_1850,
}, {
@@ -4337,6 +4450,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "eink,vb3300-kca",
.data = &eink_vb3300_kca,
}, {
+ .compatible = "evervision,vgg644804",
+ .data = &evervision_vgg644804,
+ }, {
.compatible = "evervision,vgg804821",
.data = &evervision_vgg804821,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 036ac403e..421eb4592 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -423,6 +423,42 @@ static void kd50t048a_gip_sequence(struct st7701 *st7701)
0xFF, 0xFF, 0xFF, 0xFF, 0x10, 0x45, 0x67, 0x98, 0xBA);
}
+static void rg_arc_gip_sequence(struct st7701 *st7701)
+{
+ st7701_switch_cmd_bkx(st7701, true, 3);
+ ST7701_DSI(st7701, 0xEF, 0x08);
+ st7701_switch_cmd_bkx(st7701, true, 0);
+ ST7701_DSI(st7701, 0xC7, 0x04);
+ ST7701_DSI(st7701, 0xCC, 0x38);
+ st7701_switch_cmd_bkx(st7701, true, 1);
+ ST7701_DSI(st7701, 0xB9, 0x10);
+ ST7701_DSI(st7701, 0xBC, 0x03);
+ ST7701_DSI(st7701, 0xC0, 0x89);
+ ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
+ ST7701_DSI(st7701, 0xE1, 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x20);
+ ST7701_DSI(st7701, 0xE2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xE3, 0x00, 0x00, 0x33, 0x00);
+ ST7701_DSI(st7701, 0xE4, 0x22, 0x00);
+ ST7701_DSI(st7701, 0xE5, 0x04, 0x5C, 0xA0, 0xA0, 0x06, 0x5C, 0xA0,
+ 0xA0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xE6, 0x00, 0x00, 0x33, 0x00);
+ ST7701_DSI(st7701, 0xE7, 0x22, 0x00);
+ ST7701_DSI(st7701, 0xE8, 0x05, 0x5C, 0xA0, 0xA0, 0x07, 0x5C, 0xA0,
+ 0xA0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xEB, 0x02, 0x00, 0x40, 0x40, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xED, 0xFA, 0x45, 0x0B, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xB0, 0x54, 0xAF);
+ ST7701_DSI(st7701, 0xEF, 0x08, 0x08, 0x08, 0x45, 0x3F, 0x54);
+ st7701_switch_cmd_bkx(st7701, false, 0);
+ ST7701_DSI(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0x17);
+ ST7701_DSI(st7701, MIPI_DCS_SET_PIXEL_FORMAT, 0x77);
+ ST7701_DSI(st7701, MIPI_DCS_EXIT_SLEEP_MODE, 0x00);
+ msleep(120);
+}
+
static int st7701_prepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
@@ -839,6 +875,105 @@ static const struct st7701_panel_desc kd50t048a_desc = {
.gip_sequence = kd50t048a_gip_sequence,
};
+static const struct drm_display_mode rg_arc_mode = {
+ .clock = 25600,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 60,
+ .hsync_end = 480 + 60 + 42,
+ .htotal = 480 + 60 + 42 + 60,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 10,
+ .vsync_end = 640 + 10 + 4,
+ .vtotal = 640 + 10 + 4 + 16,
+
+ .width_mm = 63,
+ .height_mm = 84,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc rg_arc_desc = {
+ .mode = &rg_arc_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 80,
+
+ .pv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1d),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x12),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0a),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x25),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x03),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x01) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x16),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1e),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0e),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x06),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x0c),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x09),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x26),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x00),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x15),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x00),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x3f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1c)
+ },
+ .nlinv = 0,
+ .vop_uv = 4500000,
+ .vcom_uv = 762500,
+ .vgh_mv = 15000,
+ .vgl_mv = -9510,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = rg_arc_gip_sequence,
+};
+
static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct st7701_panel_desc *desc;
@@ -917,6 +1052,7 @@ static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id st7701_of_match[] = {
+ { .compatible = "anbernic,rg-arc-panel", .data = &rg_arc_desc },
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
new file mode 100644
index 000000000..169c62974
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synaptics R63353 Controller driver
+ *
+ * Copyright (C) 2020 BSH Hausgerate GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/media-bus-format.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define R63353_INSTR(...) { \
+ .len = sizeof((u8[]) {__VA_ARGS__}), \
+ .data = (u8[]){__VA_ARGS__} \
+ }
+
+struct r63353_instr {
+ size_t len;
+ const u8 *data;
+};
+
+static const struct r63353_instr sharp_ls068b3sx02_init[] = {
+ R63353_INSTR(0x51, 0xff),
+ R63353_INSTR(0x53, 0x0c),
+ R63353_INSTR(0x55, 0x00),
+ R63353_INSTR(0x84, 0x00),
+ R63353_INSTR(0x29),
+};
+
+struct r63353_desc {
+ const char *name;
+ const struct r63353_instr *init;
+ const size_t init_length;
+ const struct drm_display_mode *mode;
+ u32 width_mm;
+ u32 height_mm;
+};
+
+struct r63353_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator *dvdd;
+ struct regulator *avdd;
+
+ struct r63353_desc *pdata;
+};
+
+static inline struct r63353_panel *to_r63353_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct r63353_panel, base);
+}
+
+static int r63353_panel_power_on(struct r63353_panel *rpanel)
+{
+ struct mipi_dsi_device *dsi = rpanel->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = regulator_enable(rpanel->avdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable avdd regulator (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(15000, 25000);
+
+ ret = regulator_enable(rpanel->dvdd);
+ if (ret) {
+ dev_err(dev, "Failed to enable dvdd regulator (%d)\n", ret);
+ regulator_disable(rpanel->avdd);
+ return ret;
+ }
+
+ usleep_range(300000, 350000);
+ gpiod_set_value(rpanel->reset_gpio, 1);
+ usleep_range(15000, 25000);
+
+ return 0;
+}
+
+static int r63353_panel_power_off(struct r63353_panel *rpanel)
+{
+ gpiod_set_value(rpanel->reset_gpio, 0);
+ regulator_disable(rpanel->dvdd);
+ regulator_disable(rpanel->avdd);
+
+ return 0;
+}
+
+static int r63353_panel_activate(struct r63353_panel *rpanel)
+{
+ struct mipi_dsi_device *dsi = rpanel->dsi;
+ struct device *dev = &dsi->dev;
+ int i, ret;
+
+ ret = mipi_dsi_dcs_soft_reset(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
+ goto fail;
+ }
+
+ usleep_range(15000, 17000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
+ goto fail;
+ }
+
+ for (i = 0; i < rpanel->pdata->init_length; i++) {
+ const struct r63353_instr *instr = &rpanel->pdata->init[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len);
+ if (ret < 0)
+ goto fail;
+ }
+
+ msleep(120);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
+ goto fail;
+ }
+
+ usleep_range(5000, 10000);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display ON (%d)\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ gpiod_set_value(rpanel->reset_gpio, 0);
+
+ return ret;
+}
+
+static int r63353_panel_prepare(struct drm_panel *panel)
+{
+ struct r63353_panel *rpanel = to_r63353_panel(panel);
+ struct mipi_dsi_device *dsi = rpanel->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dev_dbg(dev, "Preparing\n");
+
+ ret = r63353_panel_power_on(rpanel);
+ if (ret)
+ return ret;
+
+ ret = r63353_panel_activate(rpanel);
+ if (ret) {
+ r63353_panel_power_off(rpanel);
+ return ret;
+ }
+
+ dev_dbg(dev, "Prepared\n");
+ return 0;
+}
+
+static int r63353_panel_deactivate(struct r63353_panel *rpanel)
+{
+ struct mipi_dsi_device *dsi = rpanel->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display OFF (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int r63353_panel_unprepare(struct drm_panel *panel)
+{
+ struct r63353_panel *rpanel = to_r63353_panel(panel);
+
+ r63353_panel_deactivate(rpanel);
+ r63353_panel_power_off(rpanel);
+
+ return 0;
+}
+
+static const struct drm_display_mode sharp_ls068b3sx02_timing = {
+ .clock = 70000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 35,
+ .hsync_end = 640 + 35 + 2,
+ .htotal = 640 + 35 + 2 + 150,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 2,
+ .vsync_end = 1280 + 2 + 4,
+ .vtotal = 1280 + 2 + 4 + 0,
+};
+
+static int r63353_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct r63353_panel *rpanel = to_r63353_panel(panel);
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ mode = drm_mode_duplicate(connector->dev, rpanel->pdata->mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = rpanel->pdata->width_mm;
+ connector->display_info.height_mm = rpanel->pdata->height_mm;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs r63353_panel_funcs = {
+ .prepare = r63353_panel_prepare,
+ .unprepare = r63353_panel_unprepare,
+ .get_modes = r63353_panel_get_modes,
+};
+
+static int r63353_panel_probe(struct mipi_dsi_device *dsi)
+{
+ int ret = 0;
+ struct device *dev = &dsi->dev;
+ struct r63353_panel *panel;
+
+ panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, panel);
+ panel->dsi = dsi;
+ panel->pdata = (struct r63353_desc *)of_device_get_match_data(dev);
+
+ dev_info(dev, "Panel %s\n", panel->pdata->name);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET;
+
+ panel->dvdd = devm_regulator_get(dev, "dvdd");
+ if (IS_ERR(panel->dvdd))
+ return PTR_ERR(panel->dvdd);
+ panel->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(panel->avdd))
+ return PTR_ERR(panel->avdd);
+
+ panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(panel->reset_gpio)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(panel->reset_gpio);
+ }
+
+ drm_panel_init(&panel->base, dev, &r63353_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ panel->base.prepare_prev_first = true;
+ ret = drm_panel_of_backlight(&panel->base);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&panel->base);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&panel->base);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void r63353_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(dev, "Failed to detach from host (%d)\n", ret);
+
+ drm_panel_remove(&rpanel->base);
+}
+
+static void r63353_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
+
+ r63353_panel_unprepare(&rpanel->base);
+}
+
+static const struct r63353_desc sharp_ls068b3sx02_data = {
+ .name = "Sharp LS068B3SX02",
+ .mode = &sharp_ls068b3sx02_timing,
+ .init = sharp_ls068b3sx02_init,
+ .init_length = ARRAY_SIZE(sharp_ls068b3sx02_init),
+ .width_mm = 68,
+ .height_mm = 159,
+};
+
+static const struct of_device_id r63353_of_match[] = {
+ { .compatible = "sharp,ls068b3sx02", .data = &sharp_ls068b3sx02_data },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, r63353_of_match);
+
+static struct mipi_dsi_driver r63353_panel_driver = {
+ .driver = {
+ .name = "r63353-dsi",
+ .of_match_table = r63353_of_match,
+ },
+ .probe = r63353_panel_probe,
+ .remove = r63353_panel_remove,
+ .shutdown = r63353_panel_shutdown,
+};
+
+module_mipi_dsi_driver(r63353_panel_driver);
+
+MODULE_AUTHOR("Matthias Proske <Matthias.Proske@bshg.com>");
+MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
+MODULE_DESCRIPTION("Synaptics R63353 Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 775144695..b15ca56a0 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -253,8 +253,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 28f7046e1..a45e4addc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -403,7 +403,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
panfrost_job_enable_interrupts(pfdev);
}
-static int panfrost_device_resume(struct device *dev)
+static int panfrost_device_runtime_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -413,7 +413,7 @@ static int panfrost_device_resume(struct device *dev)
return 0;
}
-static int panfrost_device_suspend(struct device *dev)
+static int panfrost_device_runtime_suspend(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -421,10 +421,83 @@ static int panfrost_device_suspend(struct device *dev)
return -EBUSY;
panfrost_devfreq_suspend(pfdev);
+ panfrost_job_suspend_irq(pfdev);
+ panfrost_mmu_suspend_irq(pfdev);
+ panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
return 0;
}
-EXPORT_GPL_RUNTIME_DEV_PM_OPS(panfrost_pm_ops, panfrost_device_suspend,
- panfrost_device_resume, NULL);
+static int panfrost_device_resume(struct device *dev)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF)) {
+ unsigned long freq = pfdev->pfdevfreq.fast_rate;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_set_opp(dev, opp);
+ dev_pm_opp_put(opp);
+ }
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) {
+ ret = clk_enable(pfdev->clock);
+ if (ret)
+ goto err_clk;
+
+ if (pfdev->bus_clock) {
+ ret = clk_enable(pfdev->bus_clock);
+ if (ret)
+ goto err_bus_clk;
+ }
+ }
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ goto err_resume;
+
+ return 0;
+
+err_resume:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS) && pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+err_bus_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS))
+ clk_disable(pfdev->clock);
+err_clk:
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF))
+ dev_pm_opp_set_opp(dev, NULL);
+ return ret;
+}
+
+static int panfrost_device_suspend(struct device *dev)
+{
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) {
+ if (pfdev->bus_clock)
+ clk_disable(pfdev->bus_clock);
+
+ clk_disable(pfdev->clock);
+ }
+
+ if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF))
+ dev_pm_opp_set_opp(dev, NULL);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(panfrost_pm_ops) = {
+ RUNTIME_PM_OPS(panfrost_device_runtime_suspend, panfrost_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(panfrost_device_suspend, panfrost_device_resume)
+};
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 1ef38f60d..62f7e3527 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -25,6 +25,23 @@ struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
#define MAX_PM_DOMAINS 5
+enum panfrost_drv_comp_bits {
+ PANFROST_COMP_BIT_GPU,
+ PANFROST_COMP_BIT_JOB,
+ PANFROST_COMP_BIT_MMU,
+ PANFROST_COMP_BIT_MAX
+};
+
+/**
+ * enum panfrost_gpu_pm - Supported kernel power management features
+ * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend
+ * @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend
+ */
+enum panfrost_gpu_pm {
+ GPU_PM_CLK_DIS,
+ GPU_PM_VREG_OFF,
+};
+
struct panfrost_features {
u16 id;
u16 revision;
@@ -75,12 +92,17 @@ struct panfrost_compatible {
/* Vendor implementation quirks callback */
void (*vendor_quirk)(struct panfrost_device *pfdev);
+
+ /* Allowed PM features */
+ u8 pm_features;
};
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
struct platform_device *pdev;
+ int gpu_irq;
+ int mmu_irq;
void __iomem *iomem;
struct clk *clock;
@@ -94,6 +116,7 @@ struct panfrost_device {
struct panfrost_features features;
const struct panfrost_compatible *comp;
+ DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX);
spinlock_t as_lock;
unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 7cabf4e3d..a926d71e8 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -274,7 +274,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
- NULL);
+ 1, NULL);
if (ret)
goto out_put_job;
@@ -734,6 +734,7 @@ static const struct panfrost_compatible mediatek_mt8183_b_data = {
.supply_names = mediatek_mt8183_b_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
.pm_domain_names = mediatek_mt8183_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" };
@@ -742,6 +743,7 @@ static const struct panfrost_compatible mediatek_mt8186_data = {
.supply_names = mediatek_mt8183_b_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains),
.pm_domain_names = mediatek_mt8186_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const char * const mediatek_mt8192_supplies[] = { "mali", NULL };
@@ -752,6 +754,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = {
.supply_names = mediatek_mt8192_supplies,
.num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains),
.pm_domain_names = mediatek_mt8192_pm_domains,
+ .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF),
};
static const struct of_device_id dt_match[] = {
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index e7942ac44..47751302f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -220,16 +220,8 @@ void panfrost_core_dump(struct panfrost_job *job)
iter.hdr->bomap.data[0] = bomap - bomap_start;
- for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
- struct page *page = sg_page_iter_page(&page_iter);
-
- if (!IS_ERR(page)) {
- *bomap++ = page_to_phys(page);
- } else {
- dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
- *bomap++ = 0;
- }
- }
+ for_each_sgtable_page(bo->base.sgt, &page_iter, 0)
+ *bomap++ = page_to_phys(sg_page_iter_page(&page_iter));
iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 311cf4525..fd8e44992 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -22,9 +22,13 @@
static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- u32 state = gpu_read(pfdev, GPU_INT_STAT);
- u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+ u32 fault_status, state;
+ if (test_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended))
+ return IRQ_NONE;
+
+ fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+ state = gpu_read(pfdev, GPU_INT_STAT);
if (!state)
return IRQ_NONE;
@@ -60,14 +64,23 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
gpu_write(pfdev, GPU_INT_MASK, 0);
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
- gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
+ clear_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
+
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
- val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+ val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000);
if (ret) {
- dev_err(pfdev->dev, "gpu soft reset timed out\n");
- return ret;
+ dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n");
+
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val,
+ val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+ if (ret) {
+ dev_err(pfdev->dev, "gpu hard reset timed out\n");
+ return ret;
+ }
}
gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
@@ -402,7 +415,7 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == (pfdev->features.l2_present & core_mask),
- 100, 20000);
+ 10, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu L2");
@@ -410,13 +423,13 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
pfdev->features.shader_present & core_mask);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == (pfdev->features.shader_present & core_mask),
- 100, 20000);
+ 10, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
- val, val == pfdev->features.tiler_present, 100, 1000);
+ val, val == pfdev->features.tiler_present, 10, 1000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu tiler");
}
@@ -428,26 +441,34 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
- val, !val, 1, 1000);
+ val, !val, 1, 2000);
if (ret)
dev_err(pfdev->dev, "shader power transition timeout");
gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
- val, !val, 1, 1000);
+ val, !val, 1, 2000);
if (ret)
dev_err(pfdev->dev, "tiler power transition timeout");
gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
- val, !val, 0, 1000);
+ val, !val, 0, 2000);
if (ret)
dev_err(pfdev->dev, "l2 power transition timeout");
}
+void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ synchronize_irq(pfdev->gpu_irq);
+}
+
int panfrost_gpu_init(struct panfrost_device *pfdev)
{
- int err, irq;
+ int err;
err = panfrost_gpu_soft_reset(pfdev);
if (err)
@@ -462,11 +483,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
dma_set_max_seg_size(pfdev->dev, UINT_MAX);
- irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
- if (irq < 0)
- return irq;
+ pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+ if (pfdev->gpu_irq < 0)
+ return pfdev->gpu_irq;
- err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
+ err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request gpu irq");
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
index 876fdad9f..d841b8650 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -15,6 +15,7 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev);
int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev);
void panfrost_cycle_counter_get(struct panfrost_device *pfdev);
void panfrost_cycle_counter_put(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index ecd2e0351..0c2dbf6ef 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -405,6 +405,8 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
int j;
u32 irq_mask = 0;
+ clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
+
for (j = 0; j < NUM_JOB_SLOTS; j++) {
irq_mask |= MK_JS_MASK(j);
}
@@ -413,6 +415,14 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
job_write(pfdev, JOB_INT_MASK, irq_mask);
}
+void panfrost_job_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
+
+ job_write(pfdev, JOB_INT_MASK, 0);
+ synchronize_irq(pfdev->js->irq);
+}
+
static void panfrost_job_handle_err(struct panfrost_device *pfdev,
struct panfrost_job *job,
unsigned int js)
@@ -792,17 +802,25 @@ static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
struct panfrost_device *pfdev = data;
panfrost_job_handle_irqs(pfdev);
- job_write(pfdev, JOB_INT_MASK,
- GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
- GENMASK(NUM_JOB_SLOTS - 1, 0));
+
+ /* Enable interrupts only if we're not about to get suspended */
+ if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
+ job_write(pfdev, JOB_INT_MASK,
+ GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
+ GENMASK(NUM_JOB_SLOTS - 1, 0));
+
return IRQ_HANDLED;
}
static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- u32 status = job_read(pfdev, JOB_INT_STAT);
+ u32 status;
+
+ if (test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
+ return IRQ_NONE;
+ status = job_read(pfdev, JOB_INT_STAT);
if (!status)
return IRQ_NONE;
@@ -852,7 +870,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
js->queue[j].fence_context = dma_fence_context_alloc(1);
ret = drm_sched_init(&js->queue[j].sched,
- &panfrost_sched_ops,
+ &panfrost_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
nentries, 0,
msecs_to_jiffies(JOB_TIMEOUT_MS),
@@ -963,7 +981,7 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
- if (atomic_read(&js->queue[i].sched.hw_rq_count))
+ if (atomic_read(&js->queue[i].sched.credit_count))
return false;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 17ff808db..ec581b978 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -47,6 +47,7 @@ int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
+void panfrost_job_suspend_irq(struct panfrost_device *pfdev);
int panfrost_job_is_idle(struct panfrost_device *pfdev);
#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 846dd697c..b91019cd5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -231,6 +231,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
{
struct panfrost_mmu *mmu, *mmu_tmp;
+ clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
+
spin_lock(&pfdev->as_lock);
pfdev->as_alloc_mask = 0;
@@ -500,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+ /* Can happen if the last fault only partially filled this
+ * section of the pages array before failing. In that case
+ * we skip already filled pages.
+ */
+ if (pages[i])
+ continue;
+
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL;
- goto err_pages;
+ goto err_unlock;
}
}
@@ -512,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
- goto err_pages;
+ goto err_unlock;
ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
@@ -535,8 +544,6 @@ out:
err_map:
sg_free_table(sgt);
-err_pages:
- drm_gem_shmem_put_pages(&bo->base);
err_unlock:
dma_resv_unlock(obj->resv);
err_bo:
@@ -670,6 +677,9 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
+ if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended))
+ return IRQ_NONE;
+
if (!mmu_read(pfdev, MMU_INT_STAT))
return IRQ_NONE;
@@ -744,22 +754,25 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
}
- spin_lock(&pfdev->as_lock);
- mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
- spin_unlock(&pfdev->as_lock);
+ /* Enable interrupts only if we're not about to get suspended */
+ if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
+ spin_lock(&pfdev->as_lock);
+ mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
+ spin_unlock(&pfdev->as_lock);
+ }
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{
- int err, irq;
+ int err;
- irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
- if (irq < 0)
- return irq;
+ pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+ if (pfdev->mmu_irq < 0)
+ return pfdev->mmu_irq;
- err = devm_request_threaded_irq(pfdev->dev, irq,
+ err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-mmu",
@@ -777,3 +790,11 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev)
{
mmu_write(pfdev, MMU_INT_MASK, 0);
}
+
+void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev)
+{
+ set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
+
+ mmu_write(pfdev, MMU_INT_MASK, 0);
+ synchronize_irq(pfdev->mmu_irq);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index cc2a0d307..022a9a74a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -14,6 +14,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
void panfrost_mmu_reset(struct panfrost_device *pfdev);
+void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev);
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index 55ec80755..c25743b05 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -44,6 +44,7 @@
GPU_IRQ_MULTIPLE_FAULT)
#define GPU_CMD 0x30
#define GPU_CMD_SOFT_RESET 0x01
+#define GPU_CMD_HARD_RESET 0x02
#define GPU_CMD_PERFCNT_CLEAR 0x03
#define GPU_CMD_PERFCNT_SAMPLE 0x04
#define GPU_CMD_CYCLE_COUNT_START 0x05
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 404b0483b..c6d35c33d 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -485,7 +485,6 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
static int qxl_primary_apply_cursor(struct qxl_device *qdev,
struct drm_plane_state *plane_state)
{
- struct drm_framebuffer *fb = plane_state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
@@ -510,8 +509,8 @@ static int qxl_primary_apply_cursor(struct qxl_device *qdev,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x;
- cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y;
+ cmd->u.set.position.x = plane_state->crtc_x + plane_state->hotspot_x;
+ cmd->u.set.position.y = plane_state->crtc_y + plane_state->hotspot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
@@ -531,7 +530,6 @@ out_free_release:
static int qxl_primary_move_cursor(struct qxl_device *qdev,
struct drm_plane_state *plane_state)
{
- struct drm_framebuffer *fb = plane_state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
@@ -554,8 +552,8 @@ static int qxl_primary_move_cursor(struct qxl_device *qdev,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = plane_state->crtc_x + fb->hot_x;
- cmd->u.position.y = plane_state->crtc_y + fb->hot_y;
+ cmd->u.position.x = plane_state->crtc_x + plane_state->hotspot_x;
+ cmd->u.position.y = plane_state->crtc_y + plane_state->hotspot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_release_fence_buffer_objects(release);
@@ -851,8 +849,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo;
qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo,
- new_state->fb->hot_x,
- new_state->fb->hot_y);
+ new_state->hotspot_x,
+ new_state->hotspot_y);
qxl_free_cursor(old_cursor_bo);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 307a890fd..32069acd9 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -119,7 +119,6 @@ struct qxl_output {
#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
-#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
struct ttm_device bdev;
@@ -256,8 +255,6 @@ struct qxl_device {
#define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
-int qxl_debugfs_fence_init(struct qxl_device *rdev);
-
int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
void qxl_device_fini(struct qxl_device *qdev);
@@ -344,8 +341,6 @@ qxl_image_alloc_objects(struct qxl_device *qdev,
int height, int stride);
void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
-void qxl_update_screen(struct qxl_device *qxl);
-
/* qxl io operations (qxl_cmd.c) */
void qxl_io_create_primary(struct qxl_device *qdev,
@@ -445,8 +440,6 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
-struct qxl_drv_surface *
-qxl_surface_lookup(struct drm_device *dev, int surface_id);
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
/* qxl_ioctl.c */
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0..9febc8b73 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
+ struct qxl_release *release;
+ int count = 0, sc = 0;
+ bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
+ release = container_of(fence, struct qxl_release, base);
+ have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
- if (!wait_event_timeout(qdev->release_event,
- (dma_fence_is_signaled(fence) ||
- (qxl_io_notify_oom(qdev), 0)),
- timeout))
- return 0;
+retry:
+ sc++;
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+
+ qxl_io_notify_oom(qdev);
+
+ for (count = 0; count < 11; count++) {
+ if (!qxl_queue_garbage_collect(qdev, true))
+ break;
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+ }
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+
+ if (have_drawable_releases || sc < 4) {
+ if (sc > 2)
+ /* back off */
+ usleep_range(500, 1000);
+
+ if (time_after(jiffies, end))
+ return 0;
+
+ if (have_drawable_releases && sc > 300) {
+ DMA_FENCE_WARN(fence,
+ "failed to wait on release %llu after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
+ goto signaled;
+ }
+ goto retry;
+ }
+ /*
+ * yeah, original sync_obj_wait gave up after 3 spins when
+ * have_drawable_releases is not set.
+ */
+signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 4aca09cab..6e537c5bd 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
index 63a1ffbb3..3b645558f 100644
--- a/drivers/gpu/drm/radeon/clearstate_evergreen.h
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -1049,7 +1049,7 @@ static const struct cs_extent_def SECT_CONTEXT_defs[] =
{SECT_CONTEXT_def_5, 0x0000a29e, 5 },
{SECT_CONTEXT_def_6, 0x0000a2a5, 56 },
{SECT_CONTEXT_def_7, 0x0000a2de, 290 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const u32 SECT_CLEAR_def_1[] =
{
@@ -1060,7 +1060,7 @@ static const u32 SECT_CLEAR_def_1[] =
static const struct cs_extent_def SECT_CLEAR_defs[] =
{
{SECT_CLEAR_def_1, 0x0000ffc0, 3 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const u32 SECT_CTRLCONST_def_1[] =
{
@@ -1070,11 +1070,11 @@ static const u32 SECT_CTRLCONST_def_1[] =
static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
- { 0, 0, 0 }
+ { NULL, 0, 0 }
};
static const struct cs_section_def evergreen_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
- { 0, SECT_NONE }
+ { NULL, SECT_NONE }
};
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index e8fe239b9..324e9b765 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -21,6 +21,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
#include "radeon.h"
#include "radeon_asic.h"
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 4a1d5447e..4c06f4745 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -21,6 +21,7 @@
*
*/
#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
#include "dce6_afmt.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f0ae087be..a424b8600 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 5f3078f8a..681119c91 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -26,6 +26,7 @@
*/
#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
#include <drm/radeon_drm.h>
#include "evergreen_hdmi.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 85c4bb186..fc7b0e8f1 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
@@ -922,8 +923,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
- ATOM_CONNECTOR_INFO_I2C ci =
- supported_devices->info.asConnInfo[i];
+ ATOM_CONNECTOR_INFO_I2C ci;
+
+ if (frev > 1)
+ ci = supported_devices->info_2d1.asConnInfo[i];
+ else
+ ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index d6ccaf24e..91b58fbc2 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -26,6 +26,8 @@
#include <linux/component.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_eld.h>
+#include <drm/drm_edid.h>
#include "dce6_afmt.h"
#include "evergreen_hdmi.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 05e678674..dacaaa007 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -27,7 +27,9 @@
#include <linux/types.h>
-#define RREG32_ENDPOINT(block, reg) \
+struct cea_sad;
+
+#define RREG32_ENDPOINT(block, reg) \
radeon_audio_endpoint_rreg(rdev, (block), (reg))
#define WREG32_ENDPOINT(block, reg, v) \
radeon_audio_endpoint_wreg(rdev, (block), (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2620efc7c..6952b1273 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 34a1c73d3..02a65971d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -33,7 +33,6 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
-#include <drm/drm_legacy.h>
#include "radeon_family.h"
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 9cb6401fe..3de3dce9e 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
+#include <drm/drm_edid.h>
#include <drm/drm_device.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 314d066e6..3d174390a 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -918,7 +918,6 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 1decdcec0..59c4db13d 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -32,13 +32,13 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+struct edid;
struct radeon_bo;
struct radeon_device;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index e6534fa9f..38048593b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -413,6 +413,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
+ radeon_debugfs_ring_init(rdev, ring);
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
@@ -421,7 +422,6 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
}
- radeon_debugfs_ring_init(rdev, ring);
radeon_ring_lockup_update(rdev, ring);
return 0;
}
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
index 8f9a728af..07ad17d24 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
@@ -14,7 +14,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
-#include <drm/drm_plane_helper.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 84aa811ca..bd08d5748 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -30,7 +30,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#define RK3288_GRF_SOC_CON6 0x25c
#define RK3288_EDP_LCDC_SEL BIT(5)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 21254e4e1..a855c45ae 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -24,7 +24,6 @@
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
-#include "rockchip_drm_vop.h"
static inline struct cdn_dp_device *connector_to_dp(struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 6396f9324..4cc8ed8f4 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -26,7 +26,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#define DSI_PHY_RSTZ 0xa0
#define PHY_DISFORCEPLL 0
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 341550199..fe33092ab 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -18,7 +18,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#define RK3228_GRF_SOC_CON2 0x0408
#define RK3228_HDMI_SDAIN_MSK BIT(14)
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 345253e03..b66c1ef58 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -23,7 +23,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#include "inno_hdmi.h"
@@ -793,7 +792,6 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
init_completion(&i2c->cmp);
adap = &i2c->adap;
- adap->class = I2C_CLASS_DDC;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index fa6e592e0..95cd1b49e 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -4,6 +4,7 @@
* Zheng Yang <zhengyang@rock-chips.com>
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -17,7 +18,6 @@
#include "rk3066_hdmi.h"
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#define DEFAULT_PLLA_RATE 30000000
@@ -55,7 +55,6 @@ struct rk3066_hdmi {
unsigned int tmdsclk;
struct hdmi_data_info hdmi_data;
- struct drm_display_mode previous_mode;
};
static struct rk3066_hdmi *encoder_to_rk3066_hdmi(struct drm_encoder *encoder)
@@ -387,21 +386,21 @@ static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
return 0;
}
-static void
-rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ int mux, val;
- /* Store the display mode for plugin/DPMS poweron events. */
- drm_mode_copy(&hdmi->previous_mode, adj_mode);
-}
+ conn_state = drm_atomic_get_new_connector_state(state, &hdmi->connector);
+ if (WARN_ON(!conn_state))
+ return;
-static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
-{
- struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
- int mux, val;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
if (mux)
@@ -414,10 +413,11 @@ static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
(mux) ? "1" : "0");
- rk3066_hdmi_setup(hdmi, &hdmi->previous_mode);
+ rk3066_hdmi_setup(hdmi, &crtc_state->adjusted_mode);
}
-static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder);
@@ -434,14 +434,6 @@ static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder)
rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
}
-static bool
-rk3066_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- return true;
-}
-
static int
rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -457,11 +449,9 @@ rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
static const
struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
- .enable = rk3066_hdmi_encoder_enable,
- .disable = rk3066_hdmi_encoder_disable,
- .mode_fixup = rk3066_hdmi_encoder_mode_fixup,
- .mode_set = rk3066_hdmi_encoder_mode_set,
- .atomic_check = rk3066_hdmi_encoder_atomic_check,
+ .atomic_check = rk3066_hdmi_encoder_atomic_check,
+ .atomic_enable = rk3066_hdmi_encoder_enable,
+ .atomic_disable = rk3066_hdmi_encoder_disable,
};
static enum drm_connector_status
@@ -725,7 +715,6 @@ static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
init_completion(&i2c->cmpltn);
adap = &i2c->adap;
- adap->class = I2C_CLASS_DDC;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->dev.of_node = hdmi->dev->of_node;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index aeb03a572..bbb9e0bf6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -20,6 +20,23 @@
#define ROCKCHIP_MAX_CONNECTOR 2
#define ROCKCHIP_MAX_CRTC 4
+/*
+ * display output interface supported by rockchip lcdc
+ */
+#define ROCKCHIP_OUT_MODE_P888 0
+#define ROCKCHIP_OUT_MODE_BT1120 0
+#define ROCKCHIP_OUT_MODE_P666 1
+#define ROCKCHIP_OUT_MODE_P565 2
+#define ROCKCHIP_OUT_MODE_BT656 5
+#define ROCKCHIP_OUT_MODE_S888 8
+#define ROCKCHIP_OUT_MODE_S888_DUMMY 12
+#define ROCKCHIP_OUT_MODE_YUV420 14
+/* for use special outface */
+#define ROCKCHIP_OUT_MODE_AAAA 15
+
+/* output flags */
+#define ROCKCHIP_OUTPUT_DSI_DUAL BIT(0)
+
struct drm_device;
struct drm_connector;
struct iommu_domain;
@@ -31,6 +48,7 @@ struct rockchip_crtc_state {
int output_bpc;
int output_flags;
bool enable_afbc;
+ bool yuv_overlay;
u32 bus_format;
u32 bus_flags;
int color_space;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 4b2daefeb..b33e5bdc2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -277,18 +277,6 @@ struct vop_data {
/* dst alpha ctrl define */
#define DST_FACTOR_M0(x) (((x) & 0x7) << 6)
-/*
- * display output interface supported by rockchip lcdc
- */
-#define ROCKCHIP_OUT_MODE_P888 0
-#define ROCKCHIP_OUT_MODE_P666 1
-#define ROCKCHIP_OUT_MODE_P565 2
-/* for use special outface */
-#define ROCKCHIP_OUT_MODE_AAAA 15
-
-/* output flags */
-#define ROCKCHIP_OUTPUT_DSI_DUAL BIT(0)
-
enum alpha_mode {
ALPHA_STRAIGHT,
ALPHA_INVERSE,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 6862fb146..fdd768bbd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -35,7 +35,6 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
-#include "rockchip_drm_fb.h"
#include "rockchip_drm_vop2.h"
#include "rockchip_rgb.h"
@@ -190,7 +189,10 @@ struct vop2 {
void __iomem *regs;
struct regmap *map;
- struct regmap *grf;
+ struct regmap *sys_grf;
+ struct regmap *vop_grf;
+ struct regmap *vo1_grf;
+ struct regmap *sys_pmu;
/* physical map length of vop2 register */
u32 len;
@@ -209,6 +211,7 @@ struct vop2 {
unsigned int enable_count;
struct clk *hclk;
struct clk *aclk;
+ struct clk *pclk;
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
@@ -217,6 +220,25 @@ struct vop2 {
struct vop2_win win[];
};
+#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
+ (x) == ROCKCHIP_VOP2_EP_HDMI1)
+
+#define vop2_output_if_is_dp(x) ((x) == ROCKCHIP_VOP2_EP_DP0 || \
+ (x) == ROCKCHIP_VOP2_EP_DP1)
+
+#define vop2_output_if_is_edp(x) ((x) == ROCKCHIP_VOP2_EP_EDP0 || \
+ (x) == ROCKCHIP_VOP2_EP_EDP1)
+
+#define vop2_output_if_is_mipi(x) ((x) == ROCKCHIP_VOP2_EP_MIPI0 || \
+ (x) == ROCKCHIP_VOP2_EP_MIPI1)
+
+#define vop2_output_if_is_lvds(x) ((x) == ROCKCHIP_VOP2_EP_LVDS0 || \
+ (x) == ROCKCHIP_VOP2_EP_LVDS1)
+
+#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
+
+static const struct regmap_config vop2_regmap_config;
+
static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
{
return container_of(crtc, struct vop2_video_port, crtc);
@@ -266,12 +288,23 @@ static bool vop2_cluster_window(const struct vop2_win *win)
return win->data->feature & WIN_FEATURE_CLUSTER;
}
+/*
+ * Note:
+ * The write mask function is documented but missing on rk3566/8, writes
+ * to these bits have no effect. For newer soc(rk3588 and following) the
+ * write mask is needed for register writes.
+ *
+ * GLB_CFG_DONE_EN has no write mask bit.
+ *
+ */
static void vop2_cfg_done(struct vop2_video_port *vp)
{
struct vop2 *vop2 = vp->vop2;
+ u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN;
+
+ val |= BIT(vp->id) | (BIT(vp->id) << 16);
- regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE,
- BIT(vp->id) | RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
+ regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val);
}
static void vop2_win_disable(struct vop2_win *win)
@@ -325,11 +358,14 @@ static enum vop2_data_format vop2_convert_format(u32 format)
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
return VOP2_FMT_YUV422SP;
+ case DRM_FORMAT_NV20:
case DRM_FORMAT_Y210:
return VOP2_FMT_YUV422SP_10;
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return VOP2_FMT_YUV444SP;
+ case DRM_FORMAT_NV30:
+ return VOP2_FMT_YUV444SP_10;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
return VOP2_FMT_VYUY422;
@@ -414,6 +450,8 @@ static bool vop2_win_uv_swap(u32 format)
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV15:
+ case DRM_FORMAT_NV20:
+ case DRM_FORMAT_NV30:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
return true;
@@ -457,6 +495,17 @@ static bool vop2_output_uv_swap(u32 bus_format, u32 output_mode)
return false;
}
+static bool vop2_output_rg_swap(struct vop2 *vop2, u32 bus_format)
+{
+ if (vop2->data->soc_id == 3588) {
+ if (bus_format == MEDIA_BUS_FMT_YUV8_1X24 ||
+ bus_format == MEDIA_BUS_FMT_YUV10_1X30)
+ return true;
+ }
+
+ return false;
+}
+
static bool is_yuv_output(u32 bus_format)
{
switch (bus_format) {
@@ -514,6 +563,18 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
return vop2_convert_afbc_format(format) >= 0;
}
+/*
+ * 0: Full mode, 16 lines for one tail
+ * 1: half block mode, 8 lines one tail
+ */
+static bool vop2_half_block_enable(struct drm_plane_state *pstate)
+{
+ if (pstate->rotation & (DRM_MODE_ROTATE_270 | DRM_MODE_ROTATE_90))
+ return false;
+ else
+ return true;
+}
+
static u32 vop2_afbc_transform_offset(struct drm_plane_state *pstate,
bool afbc_half_block_en)
{
@@ -849,13 +910,32 @@ static int vop2_core_clks_prepare_enable(struct vop2 *vop2)
goto err;
}
+ ret = clk_prepare_enable(vop2->pclk);
+ if (ret < 0) {
+ drm_err(vop2->drm, "failed to enable pclk - %d\n", ret);
+ goto err1;
+ }
+
return 0;
+err1:
+ clk_disable_unprepare(vop2->aclk);
err:
clk_disable_unprepare(vop2->hclk);
return ret;
}
+static void rk3588_vop2_power_domain_enable_all(struct vop2 *vop2)
+{
+ u32 pd;
+
+ pd = vop2_readl(vop2, RK3588_SYS_PD_CTRL);
+ pd &= ~(VOP2_PD_CLUSTER0 | VOP2_PD_CLUSTER1 | VOP2_PD_CLUSTER2 |
+ VOP2_PD_CLUSTER3 | VOP2_PD_ESMART);
+
+ vop2_writel(vop2, RK3588_SYS_PD_CTRL, pd);
+}
+
static void vop2_enable(struct vop2 *vop2)
{
int ret;
@@ -878,11 +958,12 @@ static void vop2_enable(struct vop2 *vop2)
return;
}
- regcache_sync(vop2->map);
-
if (vop2->data->soc_id == 3566)
vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
+ if (vop2->data->soc_id == 3588)
+ rk3588_vop2_power_domain_enable_all(vop2);
+
vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN);
/*
@@ -908,8 +989,9 @@ static void vop2_disable(struct vop2 *vop2)
pm_runtime_put_sync(vop2->dev);
- regcache_mark_dirty(vop2->map);
+ regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
+ clk_disable_unprepare(vop2->pclk);
clk_disable_unprepare(vop2->aclk);
clk_disable_unprepare(vop2->hclk);
}
@@ -1135,6 +1217,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
bool rotate_90 = pstate->rotation & DRM_MODE_ROTATE_90;
struct rockchip_gem_object *rk_obj;
unsigned long offset;
+ bool half_block_en;
bool afbc_en;
dma_addr_t yrgb_mst;
dma_addr_t uv_mst;
@@ -1227,6 +1310,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_info = (dsp_h - 1) << 16 | ((dsp_w - 1) & 0xffff);
format = vop2_convert_format(fb->format->format);
+ half_block_en = vop2_half_block_enable(pstate);
drm_dbg(vop2->drm, "vp%d update %s[%dx%d->%dx%d@%dx%d] fmt[%p4cc_%s] addr[%pad]\n",
vp->id, win->data->name, actual_w, actual_h, dsp_w, dsp_h,
@@ -1234,6 +1318,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
+ if (vop2_cluster_window(win))
+ vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
+
if (afbc_en) {
u32 stride;
@@ -1272,15 +1359,21 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 1);
vop2_win_write(win, VOP2_WIN_AFBC_FORMAT, afbc_format);
vop2_win_write(win, VOP2_WIN_AFBC_UV_SWAP, uv_swap);
- vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0);
+ /*
+ * On rk3566/8, this bit is auto gating enable,
+ * but this function is not work well so we need
+ * to disable it for these two platform.
+ * On rk3588, and the following new soc(rk3528/rk3576),
+ * this bit is gating disable, we should write 1 to
+ * disable gating when enable afbc.
+ */
+ if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568)
+ vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 0);
+ else
+ vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1);
+
vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
- if (pstate->rotation & (DRM_MODE_ROTATE_270 | DRM_MODE_ROTATE_90)) {
- vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, 0);
- transform_offset = vop2_afbc_transform_offset(pstate, false);
- } else {
- vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, 1);
- transform_offset = vop2_afbc_transform_offset(pstate, true);
- }
+ transform_offset = vop2_afbc_transform_offset(pstate, half_block_en);
vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info);
vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, transform_offset);
@@ -1292,6 +1385,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_270, rotate_270);
vop2_win_write(win, VOP2_WIN_AFBC_ROTATE_90, rotate_90);
} else {
+ if (vop2_cluster_window(win)) {
+ vop2_win_write(win, VOP2_WIN_AFBC_ENABLE, 0);
+ vop2_win_write(win, VOP2_WIN_AFBC_TRANSFORM_OFFSET, 0);
+ }
+
vop2_win_write(win, VOP2_WIN_YRGB_VIR, DIV_ROUND_UP(fb->pitches[0], 4));
}
@@ -1424,8 +1522,18 @@ static void vop2_post_config(struct drm_crtc *crtc)
u32 top_margin = 100, bottom_margin = 100;
u16 hsize = hdisplay * (left_margin + right_margin) / 200;
u16 vsize = vdisplay * (top_margin + bottom_margin) / 200;
+ u16 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
u16 hact_end, vact_end;
u32 val;
+ u32 bg_dly;
+ u32 pre_scan_dly;
+
+ bg_dly = vp->data->pre_scan_max_dly[3];
+ vop2_writel(vp->vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
+ FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
+
+ pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
+ vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
vsize = rounddown(vsize, 2);
hsize = rounddown(hsize, 2);
@@ -1461,10 +1569,10 @@ static void vop2_post_config(struct drm_crtc *crtc)
vop2_vp_write(vp, RK3568_VP_DSP_BG, 0);
}
-static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
- u32 polflags)
+static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
{
struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
u32 die, dip;
die = vop2_readl(vop2, RK3568_DSP_IF_EN);
@@ -1478,9 +1586,9 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
dip &= ~RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL;
dip |= FIELD_PREP(RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL, polflags);
if (polflags & POLFLAG_DCLK_INV)
- regmap_write(vop2->grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
+ regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16) | BIT(3));
else
- regmap_write(vop2->grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
+ regmap_write(vop2->sys_grf, RK3568_GRF_VO_CON1, BIT(3 + 16));
break;
case ROCKCHIP_VOP2_EP_HDMI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
@@ -1526,13 +1634,280 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
break;
default:
drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
- return;
+ return 0;
}
dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
vop2_writel(vop2, RK3568_DSP_IF_EN, die);
vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
+
+ return crtc->state->adjusted_mode.crtc_clock * 1000LL;
+}
+
+/*
+ * calc the dclk on rk3588
+ * the available div of dclk is 1, 2, 4
+ */
+static unsigned long rk3588_calc_dclk(unsigned long child_clk, unsigned long max_dclk)
+{
+ if (child_clk * 4 <= max_dclk)
+ return child_clk * 4;
+ else if (child_clk * 2 <= max_dclk)
+ return child_clk * 2;
+ else if (child_clk <= max_dclk)
+ return child_clk;
+ else
+ return 0;
+}
+
+/*
+ * 4 pixclk/cycle on rk3588
+ * RGB/eDP/HDMI: if_pixclk >= dclk_core
+ * DP: dp_pixclk = dclk_out <= dclk_core
+ * DSI: mipi_pixclk <= dclk_out <= dclk_core
+ */
+static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
+ int *dclk_core_div, int *dclk_out_div,
+ int *if_pixclk_div, int *if_dclk_div)
+{
+ struct vop2 *vop2 = vp->vop2;
+ struct drm_crtc *crtc = &vp->crtc;
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
+ int output_mode = vcstate->output_mode;
+ unsigned long v_pixclk = adjusted_mode->crtc_clock * 1000LL; /* video timing pixclk */
+ unsigned long dclk_core_rate = v_pixclk >> 2;
+ unsigned long dclk_rate = v_pixclk;
+ unsigned long dclk_out_rate;
+ unsigned long if_pixclk_rate;
+ int K = 1;
+
+ if (vop2_output_if_is_hdmi(id)) {
+ /*
+ * K = 2: dclk_core = if_pixclk_rate > if_dclk_rate
+ * K = 1: dclk_core = hdmie_edp_dclk > if_pixclk_rate
+ */
+ if (output_mode == ROCKCHIP_OUT_MODE_YUV420) {
+ dclk_rate = dclk_rate >> 1;
+ K = 2;
+ }
+
+ if_pixclk_rate = (dclk_core_rate << 1) / K;
+ /*
+ * if_dclk_rate = dclk_core_rate / K;
+ * *if_pixclk_div = dclk_rate / if_pixclk_rate;
+ * *if_dclk_div = dclk_rate / if_dclk_rate;
+ */
+ *if_pixclk_div = 2;
+ *if_dclk_div = 4;
+ } else if (vop2_output_if_is_edp(id)) {
+ /*
+ * edp_pixclk = edp_dclk > dclk_core
+ */
+ if_pixclk_rate = v_pixclk / K;
+ dclk_rate = if_pixclk_rate * K;
+ /*
+ * *if_pixclk_div = dclk_rate / if_pixclk_rate;
+ * *if_dclk_div = *if_pixclk_div;
+ */
+ *if_pixclk_div = K;
+ *if_dclk_div = K;
+ } else if (vop2_output_if_is_dp(id)) {
+ if (output_mode == ROCKCHIP_OUT_MODE_YUV420)
+ dclk_out_rate = v_pixclk >> 3;
+ else
+ dclk_out_rate = v_pixclk >> 2;
+
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ if (!dclk_rate) {
+ drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld KHZ\n",
+ dclk_out_rate);
+ return 0;
+ }
+ *dclk_out_div = dclk_rate / dclk_out_rate;
+ } else if (vop2_output_if_is_mipi(id)) {
+ if_pixclk_rate = dclk_core_rate / K;
+ /*
+ * dclk_core = dclk_out * K = if_pixclk * K = v_pixclk / 4
+ */
+ dclk_out_rate = if_pixclk_rate;
+ /*
+ * dclk_rate = N * dclk_core_rate N = (1,2,4 ),
+ * we get a little factor here
+ */
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ if (!dclk_rate) {
+ drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld KHZ\n",
+ dclk_out_rate);
+ return 0;
+ }
+ *dclk_out_div = dclk_rate / dclk_out_rate;
+ /*
+ * mipi pixclk == dclk_out
+ */
+ *if_pixclk_div = 1;
+ } else if (vop2_output_if_is_dpi(id)) {
+ dclk_rate = v_pixclk;
+ }
+
+ *dclk_core_div = dclk_rate / dclk_core_rate;
+ *if_pixclk_div = ilog2(*if_pixclk_div);
+ *if_dclk_div = ilog2(*if_dclk_div);
+ *dclk_core_div = ilog2(*dclk_core_div);
+ *dclk_out_div = ilog2(*dclk_out_div);
+
+ drm_dbg(vop2->drm, "dclk: %ld, pixclk_div: %d, dclk_div: %d\n",
+ dclk_rate, *if_pixclk_div, *if_dclk_div);
+
+ return dclk_rate;
+}
+
+/*
+ * MIPI port mux on rk3588:
+ * 0: Video Port2
+ * 1: Video Port3
+ * 3: Video Port 1(MIPI1 only)
+ */
+static u32 rk3588_get_mipi_port_mux(int vp_id)
+{
+ if (vp_id == 1)
+ return 3;
+ else if (vp_id == 3)
+ return 1;
+ else
+ return 0;
+}
+
+static u32 rk3588_get_hdmi_pol(u32 flags)
+{
+ u32 val;
+
+ val = (flags & DRM_MODE_FLAG_NHSYNC) ? BIT(HSYNC_POSITIVE) : 0;
+ val |= (flags & DRM_MODE_FLAG_NVSYNC) ? BIT(VSYNC_POSITIVE) : 0;
+
+ return val;
+}
+
+static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+ int dclk_core_div, dclk_out_div, if_pixclk_div, if_dclk_div;
+ unsigned long clock;
+ u32 die, dip, div, vp_clk_div, val;
+
+ clock = rk3588_calc_cru_cfg(vp, id, &dclk_core_div, &dclk_out_div,
+ &if_pixclk_div, &if_dclk_div);
+ if (!clock)
+ return 0;
+
+ vp_clk_div = FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_CORE_DIV, dclk_core_div);
+ vp_clk_div |= FIELD_PREP(RK3588_VP_CLK_CTRL__DCLK_OUT_DIV, dclk_out_div);
+
+ die = vop2_readl(vop2, RK3568_DSP_IF_EN);
+ dip = vop2_readl(vop2, RK3568_DSP_IF_POL);
+ div = vop2_readl(vop2, RK3568_DSP_IF_CTRL);
+
+ switch (id) {
+ case ROCKCHIP_VOP2_EP_HDMI0:
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
+ val = rk3588_get_hdmi_pol(polflags);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
+ break;
+ case ROCKCHIP_VOP2_EP_HDMI1:
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
+ val = rk3588_get_hdmi_pol(polflags);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
+ break;
+ case ROCKCHIP_VOP2_EP_EDP0:
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
+ break;
+ case ROCKCHIP_VOP2_EP_EDP1:
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
+ div &= ~RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV, if_dclk_div);
+ div |= FIELD_PREP(RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI0:
+ div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_MIPI0_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX;
+ val = rk3588_get_mipi_port_mux(vp->id);
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX, !!val);
+ break;
+ case ROCKCHIP_VOP2_EP_MIPI1:
+ div &= ~RK3588_DSP_IF_MIPI1_PCLK_DIV;
+ div |= FIELD_PREP(RK3588_DSP_IF_MIPI1_PCLK_DIV, if_pixclk_div);
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ val = rk3588_get_mipi_port_mux(vp->id);
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, val);
+ break;
+ case ROCKCHIP_VOP2_EP_DP0:
+ die &= ~RK3588_SYS_DSP_INFACE_EN_DP0_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_DP0 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_DP0_MUX, vp->id);
+ dip &= ~RK3588_DSP_IF_POL__DP0_PIN_POL;
+ dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP0_PIN_POL, polflags);
+ break;
+ case ROCKCHIP_VOP2_EP_DP1:
+ die &= ~RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX;
+ die |= RK3588_SYS_DSP_INFACE_EN_MIPI1 |
+ FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX, vp->id);
+ dip &= ~RK3588_DSP_IF_POL__DP1_PIN_POL;
+ dip |= FIELD_PREP(RK3588_DSP_IF_POL__DP1_PIN_POL, polflags);
+ break;
+ default:
+ drm_err(vop2->drm, "Invalid interface id %d on vp%d\n", id, vp->id);
+ return 0;
+ }
+
+ dip |= RK3568_DSP_IF_POL__CFG_DONE_IMD;
+
+ vop2_vp_write(vp, RK3588_VP_CLK_CTRL, vp_clk_div);
+ vop2_writel(vop2, RK3568_DSP_IF_EN, die);
+ vop2_writel(vop2, RK3568_DSP_IF_CTRL, div);
+ vop2_writel(vop2, RK3568_DSP_IF_POL, dip);
+
+ return clock;
+}
+
+static unsigned long vop2_set_intf_mux(struct vop2_video_port *vp, int ep_id, u32 polflags)
+{
+ struct vop2 *vop2 = vp->vop2;
+
+ if (vop2->data->soc_id == 3566 || vop2->data->soc_id == 3568)
+ return rk3568_set_intf_mux(vp, ep_id, polflags);
+ else if (vop2->data->soc_id == 3588)
+ return rk3588_set_intf_mux(vp, ep_id, polflags);
+ else
+ return 0;
}
static int us_to_vertical_line(struct drm_display_mode *mode, int us)
@@ -1587,6 +1962,8 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2->enable_count++;
+ vcstate->yuv_overlay = is_yuv_output(vcstate->bus_format);
+
vop2_crtc_enable_irq(vp, VP_INT_POST_BUF_EMPTY);
polflags = 0;
@@ -1600,11 +1977,21 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
- rk3568_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
+ /*
+ * for drive a high resolution(4KP120, 8K), vop on rk3588/rk3576 need
+ * process multi(1/2/4/8) pixels per cycle, so the dclk feed by the
+ * system cru may be the 1/2 or 1/4 of mode->clock.
+ */
+ clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
+ }
+
+ if (!clock) {
+ vop2_unlock(vop2);
+ return;
}
if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
- !(vp_data->feature & VOP_FEATURE_OUTPUT_10BIT))
+ !(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT))
out_mode = ROCKCHIP_OUT_MODE_P888;
else
out_mode = vcstate->output_mode;
@@ -1613,8 +2000,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
if (vop2_output_uv_swap(vcstate->bus_format, vcstate->output_mode))
dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_RB_SWAP;
+ if (vop2_output_rg_swap(vop2, vcstate->bus_format))
+ dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_RG_SWAP;
- if (is_yuv_output(vcstate->bus_format))
+ if (vcstate->yuv_overlay)
dsp_ctrl |= RK3568_VP_DSP_CTRL__POST_DSP_OUT_R2Y;
vop2_dither_setup(crtc, &dsp_ctrl);
@@ -1918,28 +2307,22 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
u32 layer_sel = 0;
u32 port_sel;
unsigned int nlayer, ofs;
- struct drm_display_mode *adjusted_mode;
- u16 hsync_len;
- u16 hdisplay;
- u32 bg_dly;
- u32 pre_scan_dly;
+ u32 ovl_ctrl;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
struct vop2_video_port *vp1 = &vop2->vps[1];
struct vop2_video_port *vp2 = &vop2->vps[2];
+ struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state);
- adjusted_mode = &vp->crtc.state->adjusted_mode;
- hsync_len = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
- hdisplay = adjusted_mode->crtc_hdisplay;
-
- bg_dly = vp->data->pre_scan_max_dly[3];
- vop2_writel(vop2, RK3568_VP_BG_MIX_CTRL(vp->id),
- FIELD_PREP(RK3568_VP_BG_MIX_CTRL__BG_DLY, bg_dly));
+ ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL);
+ ovl_ctrl |= RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD;
+ if (vcstate->yuv_overlay)
+ ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id);
+ else
+ ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id);
- pre_scan_dly = ((bg_dly + (hdisplay >> 1) - 1) << 16) | hsync_len;
- vop2_vp_write(vp, RK3568_VP_PRE_SCAN_HTIMING, pre_scan_dly);
+ vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl);
- vop2_writel(vop2, RK3568_OVL_CTRL, 0);
port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL);
port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT;
@@ -1980,6 +2363,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
port_sel &= ~RK3568_OVL_PORT_SEL__CLUSTER1;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__CLUSTER1, vp->id);
break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER2;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER2, vp->id);
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ port_sel &= ~RK3588_OVL_PORT_SEL__CLUSTER3;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__CLUSTER3, vp->id);
+ break;
case ROCKCHIP_VOP2_ESMART0:
port_sel &= ~RK3568_OVL_PORT_SEL__ESMART0;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART0, vp->id);
@@ -1988,6 +2379,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
port_sel &= ~RK3568_OVL_PORT_SEL__ESMART1;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__ESMART1, vp->id);
break;
+ case ROCKCHIP_VOP2_ESMART2:
+ port_sel &= ~RK3588_OVL_PORT_SEL__ESMART2;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART2, vp->id);
+ break;
+ case ROCKCHIP_VOP2_ESMART3:
+ port_sel &= ~RK3588_OVL_PORT_SEL__ESMART3;
+ port_sel |= FIELD_PREP(RK3588_OVL_PORT_SEL__ESMART3, vp->id);
+ break;
case ROCKCHIP_VOP2_SMART0:
port_sel &= ~RK3568_OVL_PORT_SEL__SMART0;
port_sel |= FIELD_PREP(RK3568_OVL_PORT_SEL__SMART0, vp->id);
@@ -2013,7 +2412,6 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel);
- vop2_writel(vop2, RK3568_OVL_CTRL, RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD);
}
static void vop2_setup_dly_for_windows(struct vop2 *vop2)
@@ -2725,8 +3123,29 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop2->lut_regs))
return PTR_ERR(vop2->lut_regs);
}
+ if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_GRF) {
+ vop2->sys_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(vop2->sys_grf))
+ return dev_err_probe(dev, PTR_ERR(vop2->sys_grf), "cannot get sys_grf");
+ }
- vop2->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (vop2_data->feature & VOP2_FEATURE_HAS_VOP_GRF) {
+ vop2->vop_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vop-grf");
+ if (IS_ERR(vop2->vop_grf))
+ return dev_err_probe(dev, PTR_ERR(vop2->vop_grf), "cannot get vop_grf");
+ }
+
+ if (vop2_data->feature & VOP2_FEATURE_HAS_VO1_GRF) {
+ vop2->vo1_grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,vo1-grf");
+ if (IS_ERR(vop2->vo1_grf))
+ return dev_err_probe(dev, PTR_ERR(vop2->vo1_grf), "cannot get vo1_grf");
+ }
+
+ if (vop2_data->feature & VOP2_FEATURE_HAS_SYS_PMU) {
+ vop2->sys_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,pmu");
+ if (IS_ERR(vop2->sys_pmu))
+ return dev_err_probe(dev, PTR_ERR(vop2->sys_pmu), "cannot get sys_pmu");
+ }
vop2->hclk = devm_clk_get(vop2->dev, "hclk");
if (IS_ERR(vop2->hclk)) {
@@ -2740,6 +3159,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(vop2->aclk);
}
+ vop2->pclk = devm_clk_get_optional(vop2->dev, "pclk_vop");
+ if (IS_ERR(vop2->pclk)) {
+ drm_err(vop2->drm, "failed to get pclk source\n");
+ return PTR_ERR(vop2->pclk);
+ }
+
vop2->irq = platform_get_irq(pdev, 0);
if (vop2->irq < 0) {
drm_err(vop2->drm, "cannot find irq for vop2\n");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 56fd31e05..615a16196 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -7,16 +7,22 @@
#ifndef _ROCKCHIP_DRM_VOP2_H
#define _ROCKCHIP_DRM_VOP2_H
-#include "rockchip_drm_vop.h"
-
#include <linux/regmap.h>
#include <drm/drm_modes.h>
+#include "rockchip_drm_vop.h"
-#define VOP_FEATURE_OUTPUT_10BIT BIT(0)
+#define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0)
+
+#define VOP2_FEATURE_HAS_SYS_GRF BIT(0)
+#define VOP2_FEATURE_HAS_VO0_GRF BIT(1)
+#define VOP2_FEATURE_HAS_VO1_GRF BIT(2)
+#define VOP2_FEATURE_HAS_VOP_GRF BIT(3)
+#define VOP2_FEATURE_HAS_SYS_PMU BIT(4)
#define WIN_FEATURE_AFBDC BIT(0)
#define WIN_FEATURE_CLUSTER BIT(1)
+#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
/*
* the delay number of a window in different mode.
*/
@@ -39,6 +45,18 @@ enum vop2_scale_down_mode {
VOP2_SCALE_DOWN_AVG,
};
+/*
+ * vop2 internal power domain id,
+ * should be all none zero, 0 will be treat as invalid;
+ */
+#define VOP2_PD_CLUSTER0 BIT(0)
+#define VOP2_PD_CLUSTER1 BIT(1)
+#define VOP2_PD_CLUSTER2 BIT(2)
+#define VOP2_PD_CLUSTER3 BIT(3)
+#define VOP2_PD_DSC_8K BIT(5)
+#define VOP2_PD_DSC_4K BIT(6)
+#define VOP2_PD_ESMART BIT(7)
+
enum vop2_win_regs {
VOP2_WIN_ENABLE,
VOP2_WIN_FORMAT,
@@ -139,6 +157,7 @@ struct vop2_video_port_data {
struct vop2_data {
u8 nr_vps;
+ u64 feature;
const struct vop2_win_data *win;
const struct vop2_video_port_data *vp;
struct vop_rect max_input;
@@ -166,19 +185,6 @@ struct vop2_data {
#define WB_YRGB_FIFO_FULL_INTR BIT(18)
#define WB_COMPLETE_INTR BIT(19)
-/*
- * display output interface supported by rockchip lcdc
- */
-#define ROCKCHIP_OUT_MODE_P888 0
-#define ROCKCHIP_OUT_MODE_BT1120 0
-#define ROCKCHIP_OUT_MODE_P666 1
-#define ROCKCHIP_OUT_MODE_P565 2
-#define ROCKCHIP_OUT_MODE_BT656 5
-#define ROCKCHIP_OUT_MODE_S888 8
-#define ROCKCHIP_OUT_MODE_S888_DUMMY 12
-#define ROCKCHIP_OUT_MODE_YUV420 14
-/* for use special outface */
-#define ROCKCHIP_OUT_MODE_AAAA 15
enum vop_csc_format {
CSC_BT601L,
@@ -206,6 +212,11 @@ enum dst_factor_mode {
};
#define RK3568_GRF_VO_CON1 0x0364
+
+#define RK3588_GRF_SOC_CON1 0x0304
+#define RK3588_GRF_VOP_CON2 0x08
+#define RK3588_GRF_VO1_CON0 0x00
+
/* System registers definition */
#define RK3568_REG_CFG_DONE 0x000
#define RK3568_VERSION_INFO 0x004
@@ -214,6 +225,7 @@ enum dst_factor_mode {
#define RK3568_DSP_IF_EN 0x028
#define RK3568_DSP_IF_CTRL 0x02c
#define RK3568_DSP_IF_POL 0x030
+#define RK3588_SYS_PD_CTRL 0x034
#define RK3568_WB_CTRL 0x40
#define RK3568_WB_XSCAL_FACTOR 0x44
#define RK3568_WB_YRGB_MST 0x48
@@ -234,9 +246,14 @@ enum dst_factor_mode {
#define RK3568_VP_INT_RAW_STATUS(vp) (0xAC + (vp) * 0x10)
/* Video Port registers definition */
+#define RK3568_VP0_CTRL_BASE 0x0C00
+#define RK3568_VP1_CTRL_BASE 0x0D00
+#define RK3568_VP2_CTRL_BASE 0x0E00
+#define RK3588_VP3_CTRL_BASE 0x0F00
#define RK3568_VP_DSP_CTRL 0x00
#define RK3568_VP_MIPI_CTRL 0x04
#define RK3568_VP_COLOR_BAR_CTRL 0x08
+#define RK3588_VP_CLK_CTRL 0x0C
#define RK3568_VP_3D_LUT_CTRL 0x10
#define RK3568_VP_3D_LUT_MST 0x20
#define RK3568_VP_DSP_BG 0x2C
@@ -278,6 +295,17 @@ enum dst_factor_mode {
#define RK3568_SMART_DLY_NUM 0x6F8
/* Cluster register definition, offset relative to window base */
+#define RK3568_CLUSTER0_CTRL_BASE 0x1000
+#define RK3568_CLUSTER1_CTRL_BASE 0x1200
+#define RK3588_CLUSTER2_CTRL_BASE 0x1400
+#define RK3588_CLUSTER3_CTRL_BASE 0x1600
+#define RK3568_ESMART0_CTRL_BASE 0x1800
+#define RK3568_ESMART1_CTRL_BASE 0x1A00
+#define RK3568_SMART0_CTRL_BASE 0x1C00
+#define RK3568_SMART1_CTRL_BASE 0x1E00
+#define RK3588_ESMART2_CTRL_BASE 0x1C00
+#define RK3588_ESMART3_CTRL_BASE 0x1E00
+
#define RK3568_CLUSTER_WIN_CTRL0 0x00
#define RK3568_CLUSTER_WIN_CTRL1 0x04
#define RK3568_CLUSTER_WIN_YRGB_MST 0x10
@@ -371,13 +399,18 @@ enum dst_factor_mode {
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_EN BIT(17)
#define RK3568_VP_DSP_CTRL__PRE_DITHER_DOWN_EN BIT(16)
#define RK3568_VP_DSP_CTRL__POST_DSP_OUT_R2Y BIT(15)
+#define RK3568_VP_DSP_CTRL__DSP_RG_SWAP BIT(10)
#define RK3568_VP_DSP_CTRL__DSP_RB_SWAP BIT(9)
+#define RK3568_VP_DSP_CTRL__DSP_BG_SWAP BIT(8)
#define RK3568_VP_DSP_CTRL__DSP_INTERLACE BIT(7)
#define RK3568_VP_DSP_CTRL__DSP_FILED_POL BIT(6)
#define RK3568_VP_DSP_CTRL__P2I_EN BIT(5)
#define RK3568_VP_DSP_CTRL__CORE_DCLK_DIV BIT(4)
#define RK3568_VP_DSP_CTRL__OUT_MODE GENMASK(3, 0)
+#define RK3588_VP_CLK_CTRL__DCLK_OUT_DIV GENMASK(3, 2)
+#define RK3588_VP_CLK_CTRL__DCLK_CORE_DIV GENMASK(1, 0)
+
#define RK3568_VP_POST_SCL_CTRL__VSCALEDOWN BIT(1)
#define RK3568_VP_POST_SCL_CTRL__HSCALEDOWN BIT(0)
@@ -396,11 +429,37 @@ enum dst_factor_mode {
#define RK3568_SYS_DSP_INFACE_EN_HDMI BIT(1)
#define RK3568_SYS_DSP_INFACE_EN_RGB BIT(0)
+#define RK3588_SYS_DSP_INFACE_EN_MIPI1_MUX GENMASK(22, 21)
+#define RK3588_SYS_DSP_INFACE_EN_MIPI0_MUX GENMASK(20, 20)
+#define RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX GENMASK(19, 18)
+#define RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX GENMASK(17, 16)
+#define RK3588_SYS_DSP_INFACE_EN_DP1_MUX GENMASK(15, 14)
+#define RK3588_SYS_DSP_INFACE_EN_DP0_MUX GENMASK(13, 12)
+#define RK3588_SYS_DSP_INFACE_EN_DPI GENMASK(9, 8)
+#define RK3588_SYS_DSP_INFACE_EN_MIPI1 BIT(7)
+#define RK3588_SYS_DSP_INFACE_EN_MIPI0 BIT(6)
+#define RK3588_SYS_DSP_INFACE_EN_HDMI1 BIT(5)
+#define RK3588_SYS_DSP_INFACE_EN_EDP1 BIT(4)
+#define RK3588_SYS_DSP_INFACE_EN_HDMI0 BIT(3)
+#define RK3588_SYS_DSP_INFACE_EN_EDP0 BIT(2)
+#define RK3588_SYS_DSP_INFACE_EN_DP1 BIT(1)
+#define RK3588_SYS_DSP_INFACE_EN_DP0 BIT(0)
+
+#define RK3588_DSP_IF_MIPI1_PCLK_DIV GENMASK(27, 26)
+#define RK3588_DSP_IF_MIPI0_PCLK_DIV GENMASK(25, 24)
+#define RK3588_DSP_IF_EDP_HDMI1_PCLK_DIV GENMASK(22, 22)
+#define RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV GENMASK(21, 20)
+#define RK3588_DSP_IF_EDP_HDMI0_PCLK_DIV GENMASK(18, 18)
+#define RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV GENMASK(17, 16)
+
#define RK3568_DSP_IF_POL__MIPI_PIN_POL GENMASK(19, 16)
#define RK3568_DSP_IF_POL__EDP_PIN_POL GENMASK(15, 12)
#define RK3568_DSP_IF_POL__HDMI_PIN_POL GENMASK(7, 4)
#define RK3568_DSP_IF_POL__RGB_LVDS_PIN_POL GENMASK(3, 0)
+#define RK3588_DSP_IF_POL__DP1_PIN_POL GENMASK(14, 12)
+#define RK3588_DSP_IF_POL__DP0_PIN_POL GENMASK(10, 8)
+
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2_PHASE_LOCK BIT(5)
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2 BIT(4)
@@ -415,14 +474,19 @@ enum dst_factor_mode {
#define VOP2_COLOR_KEY_MASK BIT(31)
#define RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD BIT(28)
+#define RK3568_OVL_CTRL__YUV_MODE(vp) BIT(vp)
#define RK3568_VP_BG_MIX_CTRL__BG_DLY GENMASK(31, 24)
#define RK3568_OVL_PORT_SEL__SEL_PORT GENMASK(31, 16)
#define RK3568_OVL_PORT_SEL__SMART1 GENMASK(31, 30)
#define RK3568_OVL_PORT_SEL__SMART0 GENMASK(29, 28)
+#define RK3588_OVL_PORT_SEL__ESMART3 GENMASK(31, 30)
+#define RK3588_OVL_PORT_SEL__ESMART2 GENMASK(29, 28)
#define RK3568_OVL_PORT_SEL__ESMART1 GENMASK(27, 26)
#define RK3568_OVL_PORT_SEL__ESMART0 GENMASK(25, 24)
+#define RK3588_OVL_PORT_SEL__CLUSTER3 GENMASK(23, 22)
+#define RK3588_OVL_PORT_SEL__CLUSTER2 GENMASK(21, 20)
#define RK3568_OVL_PORT_SEL__CLUSTER1 GENMASK(19, 18)
#define RK3568_OVL_PORT_SEL__CLUSTER0 GENMASK(17, 16)
#define RK3568_OVL_PORT_SET__PORT2_MUX GENMASK(11, 8)
@@ -435,6 +499,10 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_DLY_NUM__CLUSTER0_1 GENMASK(15, 8)
#define RK3568_CLUSTER_DLY_NUM__CLUSTER0_0 GENMASK(7, 0)
+#define RK3568_CLUSTER_WIN_CTRL0__WIN0_EN BIT(0)
+
+#define RK3568_SMART_REGION0_CTRL__WIN0_EN BIT(0)
+
#define RK3568_SMART_DLY_NUM__SMART1 GENMASK(31, 24)
#define RK3568_SMART_DLY_NUM__SMART0 GENMASK(23, 16)
#define RK3568_SMART_DLY_NUM__ESMART1 GENMASK(15, 8)
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f2831d304..77b76cff1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -27,7 +27,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#include "rockchip_lvds.h"
#define DISPLAY_OUTPUT_RGB 0
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index c677b71ae..dbfbde246 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -19,7 +19,6 @@
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
struct rockchip_rgb {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 22288ad7f..18efb3fe1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -17,9 +17,7 @@
static const uint32_t formats_cluster[] = {
DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
@@ -34,6 +32,30 @@ static const uint32_t formats_cluster[] = {
DRM_FORMAT_Y210, /* yuv422_10bit non-Linear mode only */
};
+static const uint32_t formats_esmart[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_NV12, /* yuv420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV21, /* yvu420_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV61, /* yvu422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV42, /* yvu444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
+ DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */
+ DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */
+ DRM_FORMAT_YUYV, /* yuv422_8bit[YUYV] linear mode */
+ DRM_FORMAT_UYVY, /* yuv422_8bit[UYVY] linear mode */
+};
+
static const uint32_t formats_rk356x_esmart[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -48,8 +70,10 @@ static const uint32_t formats_rk356x_esmart[] = {
DRM_FORMAT_NV15, /* yuv420_10bit linear mode, 2 plane, no padding */
DRM_FORMAT_NV16, /* yuv422_8bit linear mode, 2 plane */
DRM_FORMAT_NV61, /* yuv422_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV20, /* yuv422_10bit linear mode, 2 plane, no padding */
DRM_FORMAT_NV24, /* yuv444_8bit linear mode, 2 plane */
DRM_FORMAT_NV42, /* yuv444_8bit linear mode, 2 plane */
+ DRM_FORMAT_NV30, /* yuv444_10bit linear mode, 2 plane, no padding */
DRM_FORMAT_YVYU, /* yuv422_8bit[YVYU] linear mode */
DRM_FORMAT_VYUY, /* yuv422_8bit[VYUY] linear mode */
};
@@ -110,7 +134,7 @@ static const uint64_t format_modifiers_afbc[] = {
static const struct vop2_video_port_data rk3568_vop_video_ports[] = {
{
.id = 0,
- .feature = VOP_FEATURE_OUTPUT_10BIT,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
.gamma_lut_len = 1024,
.cubic_lut_len = 9 * 9 * 9,
.max_output = { 4096, 2304 },
@@ -234,7 +258,188 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
},
};
+static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
+ {
+ .id = 0,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 9 * 9 * 9, /* 9x9x9 */
+ .max_output = { 4096, 2304 },
+ /* hdr2sdr sdr2hdr hdr2hdr sdr2sdr */
+ .pre_scan_max_dly = { 76, 65, 65, 54 },
+ .offset = 0xc00,
+ }, {
+ .id = 1,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 729, /* 9x9x9 */
+ .max_output = { 4096, 2304 },
+ .pre_scan_max_dly = { 76, 65, 65, 54 },
+ .offset = 0xd00,
+ }, {
+ .id = 2,
+ .feature = VOP2_VP_FEATURE_OUTPUT_10BIT,
+ .gamma_lut_len = 1024,
+ .cubic_lut_len = 17 * 17 * 17, /* 17x17x17 */
+ .max_output = { 4096, 2304 },
+ .pre_scan_max_dly = { 52, 52, 52, 52 },
+ .offset = 0xe00,
+ }, {
+ .id = 3,
+ .gamma_lut_len = 1024,
+ .max_output = { 2048, 1536 },
+ .pre_scan_max_dly = { 52, 52, 52, 52 },
+ .offset = 0xf00,
+ },
+};
+
+/*
+ * rk3588 vop with 4 cluster, 4 esmart win.
+ * Every cluster can work as 4K win or split into two win.
+ * All win in cluster support AFBCD.
+ *
+ * Every esmart win and smart win support 4 Multi-region.
+ *
+ * Scale filter mode:
+ *
+ * * Cluster: bicubic for horizontal scale up, others use bilinear
+ * * ESmart:
+ * * nearest-neighbor/bilinear/bicubic for scale up
+ * * nearest-neighbor/bilinear/average for scale down
+ *
+ * AXI Read ID assignment:
+ * Two AXI bus:
+ * AXI0 is a read/write bus with a higher performance.
+ * AXI1 is a read only bus.
+ *
+ * Every window on a AXI bus must assigned two unique
+ * read id(yrgb_id/uv_id, valid id are 0x1~0xe).
+ *
+ * AXI0:
+ * Cluster0/1, Esmart0/1, WriteBack
+ *
+ * AXI 1:
+ * Cluster2/3, Esmart2/3
+ *
+ */
+static const struct vop2_win_data rk3588_vop_win_data[] = {
+ {
+ .name = "Cluster0-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER0,
+ .base = 0x1000,
+ .formats = formats_cluster,
+ .nformats = ARRAY_SIZE(formats_cluster),
+ .format_modifiers = format_modifiers_afbc,
+ .layer_sel_id = 0,
+ .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .dly = { 4, 26, 29 },
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Cluster1-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER1,
+ .base = 0x1200,
+ .formats = formats_cluster,
+ .nformats = ARRAY_SIZE(formats_cluster),
+ .format_modifiers = format_modifiers_afbc,
+ .layer_sel_id = 1,
+ .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .dly = { 4, 26, 29 },
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Cluster2-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER2,
+ .base = 0x1400,
+ .formats = formats_cluster,
+ .nformats = ARRAY_SIZE(formats_cluster),
+ .format_modifiers = format_modifiers_afbc,
+ .layer_sel_id = 4,
+ .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .dly = { 4, 26, 29 },
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Cluster3-win0",
+ .phys_id = ROCKCHIP_VOP2_CLUSTER3,
+ .base = 0x1600,
+ .formats = formats_cluster,
+ .nformats = ARRAY_SIZE(formats_cluster),
+ .format_modifiers = format_modifiers_afbc,
+ .layer_sel_id = 5,
+ .supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .max_upscale_factor = 4,
+ .max_downscale_factor = 4,
+ .dly = { 4, 26, 29 },
+ .feature = WIN_FEATURE_AFBDC | WIN_FEATURE_CLUSTER,
+ }, {
+ .name = "Esmart0-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART0,
+ .formats = formats_esmart,
+ .nformats = ARRAY_SIZE(formats_esmart),
+ .format_modifiers = format_modifiers,
+ .base = 0x1800,
+ .layer_sel_id = 2,
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ .dly = { 23, 45, 48 },
+ }, {
+ .name = "Esmart1-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART1,
+ .formats = formats_esmart,
+ .nformats = ARRAY_SIZE(formats_esmart),
+ .format_modifiers = format_modifiers,
+ .base = 0x1a00,
+ .layer_sel_id = 3,
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ .dly = { 23, 45, 48 },
+ }, {
+ .name = "Esmart2-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART2,
+ .base = 0x1c00,
+ .formats = formats_esmart,
+ .nformats = ARRAY_SIZE(formats_esmart),
+ .format_modifiers = format_modifiers,
+ .layer_sel_id = 6,
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ .dly = { 23, 45, 48 },
+ }, {
+ .name = "Esmart3-win0",
+ .phys_id = ROCKCHIP_VOP2_ESMART3,
+ .formats = formats_esmart,
+ .nformats = ARRAY_SIZE(formats_esmart),
+ .format_modifiers = format_modifiers,
+ .base = 0x1e00,
+ .layer_sel_id = 7,
+ .supported_rotations = DRM_MODE_REFLECT_Y,
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .max_upscale_factor = 8,
+ .max_downscale_factor = 8,
+ .dly = { 23, 45, 48 },
+ },
+};
+
static const struct vop2_data rk3566_vop = {
+ .feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
.max_input = { 4096, 2304 },
.max_output = { 4096, 2304 },
@@ -245,6 +450,7 @@ static const struct vop2_data rk3566_vop = {
};
static const struct vop2_data rk3568_vop = {
+ .feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
.max_input = { 4096, 2304 },
.max_output = { 4096, 2304 },
@@ -254,6 +460,18 @@ static const struct vop2_data rk3568_vop = {
.soc_id = 3568,
};
+static const struct vop2_data rk3588_vop = {
+ .feature = VOP2_FEATURE_HAS_SYS_GRF | VOP2_FEATURE_HAS_VO1_GRF |
+ VOP2_FEATURE_HAS_VOP_GRF | VOP2_FEATURE_HAS_SYS_PMU,
+ .nr_vps = 4,
+ .max_input = { 4096, 4320 },
+ .max_output = { 4096, 4320 },
+ .vp = rk3588_vop_video_ports,
+ .win = rk3588_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3588_vop_win_data),
+ .soc_id = 3588,
+};
+
static const struct of_device_id vop2_dt_match[] = {
{
.compatible = "rockchip,rk3566-vop",
@@ -262,6 +480,9 @@ static const struct of_device_id vop2_dt_match[] = {
.compatible = "rockchip,rk3568-vop",
.data = &rk3568_vop,
}, {
+ .compatible = "rockchip,rk3588-vop",
+ .data = &rk3588_vop
+ }, {
},
};
MODULE_DEVICE_TABLE(of, vop2_dt_match);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index 3143ecaaf..f8ed093b7 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -51,7 +51,7 @@ DECLARE_EVENT_CLASS(drm_sched_job,
__assign_str(name, sched_job->sched->name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
- &sched_job->sched->hw_rq_count);
+ &sched_job->sched->credit_count);
),
TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
__entry->entity, __entry->id,
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 9ee928982..58c816128 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -88,13 +88,14 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
/* The "priority" of an entity cannot exceed the number of run-queues of a
- * scheduler. Protect against num_rqs being 0, by converting to signed.
+ * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
+ * the lowest priority available.
*/
if (entity->priority >= sched_list[0]->num_rqs) {
drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
entity->priority, sched_list[0]->num_rqs);
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
- (s32) DRM_SCHED_PRIORITY_MIN);
+ (s32) DRM_SCHED_PRIORITY_KERNEL);
}
entity->rq = sched_list[0]->sched_rq[entity->priority];
}
@@ -379,7 +380,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
container_of(cb, struct drm_sched_entity, cb);
drm_sched_entity_clear_dep(f, cb);
- drm_sched_wakeup_if_can_queue(entity->rq->sched);
+ drm_sched_wakeup(entity->rq->sched, entity);
}
/**
@@ -611,7 +612,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo(entity, submit_ts);
- drm_sched_wakeup_if_can_queue(entity->rq->sched);
+ drm_sched_wakeup(entity->rq->sched, entity);
}
}
EXPORT_SYMBOL(drm_sched_entity_push_job);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 99797a8c8..d442b8932 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -48,7 +48,30 @@
* through the jobs entity pointer.
*/
-#include <linux/kthread.h>
+/**
+ * DOC: Flow Control
+ *
+ * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
+ * in which the jobs fetched from scheduler entities are executed.
+ *
+ * In this context the &drm_gpu_scheduler keeps track of a driver specified
+ * credit limit representing the capacity of this scheduler and a credit count;
+ * every &drm_sched_job carries a driver specified number of credits.
+ *
+ * Once a job is executed (but not yet finished), the job's credits contribute
+ * to the scheduler's credit count until the job is finished. If by executing
+ * one more job the scheduler's credit count would exceed the scheduler's
+ * credit limit, the job won't be executed. Instead, the scheduler will wait
+ * until the credit count has decreased enough to not overflow its credit limit.
+ * This implies waiting for previously executed jobs.
+ *
+ * Optionally, drivers may register a callback (update_job_credits) provided by
+ * struct drm_sched_backend_ops to update the job's credits dynamically. The
+ * scheduler executes this callback every time the scheduler considers a job for
+ * execution and subsequently checks whether the job fits the scheduler's credit
+ * limit.
+ */
+
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
@@ -76,6 +99,51 @@ int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
module_param_named(sched_policy, drm_sched_policy, int, 0444);
+static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
+{
+ u32 credits;
+
+ drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
+ atomic_read(&sched->credit_count),
+ &credits));
+
+ return credits;
+}
+
+/**
+ * drm_sched_can_queue -- Can we queue more to the hardware?
+ * @sched: scheduler instance
+ * @entity: the scheduler entity
+ *
+ * Return true if we can push at least one more job from @entity, false
+ * otherwise.
+ */
+static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+{
+ struct drm_sched_job *s_job;
+
+ s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+ if (!s_job)
+ return false;
+
+ if (sched->ops->update_job_credits) {
+ s_job->credits = sched->ops->update_job_credits(s_job);
+
+ drm_WARN(sched, !s_job->credits,
+ "Jobs with zero credits bypass job-flow control.\n");
+ }
+
+ /* If a job exceeds the credit limit, truncate it to the credit limit
+ * itself to guarantee forward progress.
+ */
+ if (drm_WARN(sched, s_job->credits > sched->credit_limit,
+ "Jobs may not exceed the credit limit, truncate.\n"))
+ s_job->credits = sched->credit_limit;
+
+ return drm_sched_available_credits(sched) >= s_job->credits;
+}
+
static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
const struct rb_node *b)
{
@@ -187,12 +255,18 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
/**
* drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
*
+ * @sched: the gpu scheduler
* @rq: scheduler run queue to check.
*
- * Try to find a ready entity, returns NULL if none found.
+ * Try to find the next ready entity.
+ *
+ * Return an entity if one is found; return an error-pointer (!NULL) if an
+ * entity was ready, but the scheduler had insufficient credits to accommodate
+ * its job; return NULL, if no ready entity was found.
*/
static struct drm_sched_entity *
-drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
+drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
+ struct drm_sched_rq *rq)
{
struct drm_sched_entity *entity;
@@ -202,6 +276,14 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
if (drm_sched_entity_is_ready(entity)) {
+ /* If we can't queue yet, preserve the current
+ * entity in terms of fairness.
+ */
+ if (!drm_sched_can_queue(sched, entity)) {
+ spin_unlock(&rq->lock);
+ return ERR_PTR(-ENOSPC);
+ }
+
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
@@ -211,8 +293,15 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
}
list_for_each_entry(entity, &rq->entities, list) {
-
if (drm_sched_entity_is_ready(entity)) {
+ /* If we can't queue yet, preserve the current entity in
+ * terms of fairness.
+ */
+ if (!drm_sched_can_queue(sched, entity)) {
+ spin_unlock(&rq->lock);
+ return ERR_PTR(-ENOSPC);
+ }
+
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
@@ -231,12 +320,18 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
/**
* drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
*
+ * @sched: the gpu scheduler
* @rq: scheduler run queue to check.
*
- * Find oldest waiting ready entity, returns NULL if none found.
+ * Find oldest waiting ready entity.
+ *
+ * Return an entity if one is found; return an error-pointer (!NULL) if an
+ * entity was ready, but the scheduler had insufficient credits to accommodate
+ * its job; return NULL, if no ready entity was found.
*/
static struct drm_sched_entity *
-drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
+drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
+ struct drm_sched_rq *rq)
{
struct rb_node *rb;
@@ -246,6 +341,14 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
if (drm_sched_entity_is_ready(entity)) {
+ /* If we can't queue yet, preserve the current entity in
+ * terms of fairness.
+ */
+ if (!drm_sched_can_queue(sched, entity)) {
+ spin_unlock(&rq->lock);
+ return ERR_PTR(-ENOSPC);
+ }
+
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
break;
@@ -257,6 +360,42 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_run_job_queue - enqueue run-job work
+ * @sched: scheduler instance
+ */
+static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
+{
+ if (!READ_ONCE(sched->pause_submit))
+ queue_work(sched->submit_wq, &sched->work_run_job);
+}
+
+/**
+ * __drm_sched_run_free_queue - enqueue free-job work
+ * @sched: scheduler instance
+ */
+static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+{
+ if (!READ_ONCE(sched->pause_submit))
+ queue_work(sched->submit_wq, &sched->work_free_job);
+}
+
+/**
+ * drm_sched_run_free_queue - enqueue free-job work if ready
+ * @sched: scheduler instance
+ */
+static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *job;
+
+ spin_lock(&sched->job_list_lock);
+ job = list_first_entry_or_null(&sched->pending_list,
+ struct drm_sched_job, list);
+ if (job && dma_fence_is_signaled(&job->s_fence->finished))
+ __drm_sched_run_free_queue(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
+/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
@@ -267,7 +406,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
struct drm_sched_fence *s_fence = s_job->s_fence;
struct drm_gpu_scheduler *sched = s_fence->sched;
- atomic_dec(&sched->hw_rq_count);
+ atomic_sub(s_job->credits, &sched->credit_count);
atomic_dec(sched->score);
trace_drm_sched_process_job(s_fence);
@@ -275,7 +414,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
dma_fence_put(&s_fence->finished);
- wake_up_interruptible(&sched->wake_up_worker);
+ __drm_sched_run_free_queue(sched);
}
/**
@@ -299,10 +438,35 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
*/
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
+ lockdep_assert_held(&sched->job_list_lock);
+
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->pending_list))
- queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
+ mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
+}
+
+static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
+}
+
+/**
+ * drm_sched_tdr_queue_imm: - immediately start job timeout handler
+ *
+ * @sched: scheduler for which the timeout handling should be started.
+ *
+ * Start timeout handling immediately for the named scheduler.
+ */
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
+{
+ spin_lock(&sched->job_list_lock);
+ sched->timeout = 0;
+ drm_sched_start_timeout(sched);
+ spin_unlock(&sched->job_list_lock);
}
+EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
/**
* drm_sched_fault - immediately start timeout handler
@@ -388,7 +552,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
- /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
+ /* Protects against concurrent deletion in drm_sched_get_finished_job */
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
@@ -416,11 +580,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
spin_unlock(&sched->job_list_lock);
}
- if (status != DRM_GPU_SCHED_STAT_ENODEV) {
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
- }
+ if (status != DRM_GPU_SCHED_STAT_ENODEV)
+ drm_sched_start_timeout_unlocked(sched);
}
/**
@@ -439,13 +600,13 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job, *tmp;
- kthread_park(sched->thread);
+ drm_sched_wqueue_stop(sched);
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_cleanup_job cannot race against us and release the
+ * drm_sched_get_finished_job cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
+ * (earlier) cleanups and drm_sched_get_finished_job will not be called
* now until the scheduler thread is unparked.
*/
if (bad && bad->sched == sched)
@@ -468,7 +629,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
&s_job->cb)) {
dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
- atomic_dec(&sched->hw_rq_count);
+ atomic_sub(s_job->credits, &sched->credit_count);
} else {
/*
* remove job from pending_list.
@@ -529,7 +690,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
- atomic_inc(&sched->hw_rq_count);
+ atomic_add(s_job->credits, &sched->credit_count);
if (!full_recovery)
continue;
@@ -546,13 +707,10 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
drm_sched_job_done(s_job, -ECANCELED);
}
- if (full_recovery) {
- spin_lock(&sched->job_list_lock);
- drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
- }
+ if (full_recovery)
+ drm_sched_start_timeout_unlocked(sched);
- kthread_unpark(sched->thread);
+ drm_sched_wqueue_start(sched);
}
EXPORT_SYMBOL(drm_sched_start);
@@ -613,6 +771,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* drm_sched_job_init - init a scheduler job
* @job: scheduler job to init
* @entity: scheduler entity to use
+ * @credits: the number of credits this job contributes to the schedulers
+ * credit limit
* @owner: job owner for debugging
*
* Refer to drm_sched_entity_push_job() documentation
@@ -630,7 +790,7 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- void *owner)
+ u32 credits, void *owner)
{
if (!entity->rq) {
/* This will most likely be followed by missing frames
@@ -641,7 +801,13 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOENT;
}
+ if (unlikely(!credits)) {
+ pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
+ return -EINVAL;
+ }
+
job->entity = entity;
+ job->credits = credits;
job->s_fence = drm_sched_fence_alloc(entity, owner);
if (!job->s_fence)
return -ENOMEM;
@@ -854,27 +1020,17 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
EXPORT_SYMBOL(drm_sched_job_cleanup);
/**
- * drm_sched_can_queue -- Can we queue more to the hardware?
- * @sched: scheduler instance
- *
- * Return true if we can push more jobs to the hw, otherwise false.
- */
-static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
-{
- return atomic_read(&sched->hw_rq_count) <
- sched->hw_submission_limit;
-}
-
-/**
- * drm_sched_wakeup_if_can_queue - Wake up the scheduler
+ * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
* @sched: scheduler instance
+ * @entity: the scheduler entity
*
* Wake up the scheduler if we can queue jobs.
*/
-void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
{
- if (drm_sched_can_queue(sched))
- wake_up_interruptible(&sched->wake_up_worker);
+ if (drm_sched_can_queue(sched, entity))
+ drm_sched_run_job_queue(sched);
}
/**
@@ -882,7 +1038,11 @@ void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
*
* @sched: scheduler instance
*
- * Returns the entity to process or NULL if none are found.
+ * Return an entity to process or NULL if none are found.
+ *
+ * Note, that we break out of the for-loop when "entity" is non-null, which can
+ * also be an error-pointer--this assures we don't process lower priority
+ * run-queues. See comments in the respectively called functions.
*/
static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
@@ -890,23 +1050,21 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
struct drm_sched_entity *entity;
int i;
- if (!drm_sched_can_queue(sched))
- return NULL;
-
- /* Kernel run queue has higher priority than normal run queue*/
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ /* Start with the highest priority.
+ */
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
- drm_sched_rq_select_entity_fifo(sched->sched_rq[i]) :
- drm_sched_rq_select_entity_rr(sched->sched_rq[i]);
+ drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
+ drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
if (entity)
break;
}
- return entity;
+ return IS_ERR(entity) ? NULL : entity;
}
/**
- * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
+ * drm_sched_get_finished_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
@@ -914,7 +1072,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
-drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job, *next;
@@ -934,8 +1092,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
typeof(*next), list);
if (next) {
- next->s_fence->scheduled.timestamp =
- dma_fence_timestamp(&job->s_fence->finished);
+ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
+ &next->s_fence->scheduled.flags))
+ next->s_fence->scheduled.timestamp =
+ dma_fence_timestamp(&job->s_fence->finished);
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
@@ -985,91 +1145,84 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
EXPORT_SYMBOL(drm_sched_pick_best);
/**
- * drm_sched_blocked - check if the scheduler is blocked
- *
- * @sched: scheduler instance
+ * drm_sched_free_job_work - worker to call free_job
*
- * Returns true if blocked, otherwise false.
+ * @w: free job work
*/
-static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
+static void drm_sched_free_job_work(struct work_struct *w)
{
- if (kthread_should_park()) {
- kthread_parkme();
- return true;
- }
+ struct drm_gpu_scheduler *sched =
+ container_of(w, struct drm_gpu_scheduler, work_free_job);
+ struct drm_sched_job *job;
+
+ if (READ_ONCE(sched->pause_submit))
+ return;
- return false;
+ job = drm_sched_get_finished_job(sched);
+ if (job)
+ sched->ops->free_job(job);
+
+ drm_sched_run_free_queue(sched);
+ drm_sched_run_job_queue(sched);
}
/**
- * drm_sched_main - main scheduler thread
- *
- * @param: scheduler instance
+ * drm_sched_run_job_work - worker to call run_job
*
- * Returns 0.
+ * @w: run job work
*/
-static int drm_sched_main(void *param)
+static void drm_sched_run_job_work(struct work_struct *w)
{
- struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
+ struct drm_gpu_scheduler *sched =
+ container_of(w, struct drm_gpu_scheduler, work_run_job);
+ struct drm_sched_entity *entity;
+ struct dma_fence *fence;
+ struct drm_sched_fence *s_fence;
+ struct drm_sched_job *sched_job;
int r;
- sched_set_fifo_low(current);
-
- while (!kthread_should_stop()) {
- struct drm_sched_entity *entity = NULL;
- struct drm_sched_fence *s_fence;
- struct drm_sched_job *sched_job;
- struct dma_fence *fence;
- struct drm_sched_job *cleanup_job = NULL;
-
- wait_event_interruptible(sched->wake_up_worker,
- (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
- (!drm_sched_blocked(sched) &&
- (entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop());
-
- if (cleanup_job)
- sched->ops->free_job(cleanup_job);
-
- if (!entity)
- continue;
-
- sched_job = drm_sched_entity_pop_job(entity);
+ if (READ_ONCE(sched->pause_submit))
+ return;
- if (!sched_job) {
- complete_all(&entity->entity_idle);
- continue;
- }
+ /* Find entity with a ready job */
+ entity = drm_sched_select_entity(sched);
+ if (!entity)
+ return; /* No more work */
- s_fence = sched_job->s_fence;
+ sched_job = drm_sched_entity_pop_job(entity);
+ if (!sched_job) {
+ complete_all(&entity->entity_idle);
+ drm_sched_run_job_queue(sched);
+ return;
+ }
- atomic_inc(&sched->hw_rq_count);
- drm_sched_job_begin(sched_job);
+ s_fence = sched_job->s_fence;
- trace_drm_run_job(sched_job, entity);
- fence = sched->ops->run_job(sched_job);
- complete_all(&entity->entity_idle);
- drm_sched_fence_scheduled(s_fence, fence);
+ atomic_add(sched_job->credits, &sched->credit_count);
+ drm_sched_job_begin(sched_job);
- if (!IS_ERR_OR_NULL(fence)) {
- /* Drop for original kref_init of the fence */
- dma_fence_put(fence);
+ trace_drm_run_job(sched_job, entity);
+ fence = sched->ops->run_job(sched_job);
+ complete_all(&entity->entity_idle);
+ drm_sched_fence_scheduled(s_fence, fence);
- r = dma_fence_add_callback(fence, &sched_job->cb,
- drm_sched_job_done_cb);
- if (r == -ENOENT)
- drm_sched_job_done(sched_job, fence->error);
- else if (r)
- DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
- r);
- } else {
- drm_sched_job_done(sched_job, IS_ERR(fence) ?
- PTR_ERR(fence) : 0);
- }
+ if (!IS_ERR_OR_NULL(fence)) {
+ /* Drop for original kref_init of the fence */
+ dma_fence_put(fence);
- wake_up(&sched->job_scheduled);
+ r = dma_fence_add_callback(fence, &sched_job->cb,
+ drm_sched_job_done_cb);
+ if (r == -ENOENT)
+ drm_sched_job_done(sched_job, fence->error);
+ else if (r)
+ DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
+ } else {
+ drm_sched_job_done(sched_job, IS_ERR(fence) ?
+ PTR_ERR(fence) : 0);
}
- return 0;
+
+ wake_up(&sched->job_scheduled);
+ drm_sched_run_job_queue(sched);
}
/**
@@ -1077,8 +1230,10 @@ static int drm_sched_main(void *param)
*
* @sched: scheduler instance
* @ops: backend operations for this scheduler
+ * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
+ * allocated and used
* @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
- * @hw_submission: number of hw submissions that can be in flight
+ * @credit_limit: the number of credits this scheduler can hold from all jobs
* @hang_limit: number of times to allow a job to hang before dropping it
* @timeout: timeout value in jiffies for the scheduler
* @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
@@ -1091,14 +1246,15 @@ static int drm_sched_main(void *param)
*/
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
- u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit,
+ struct workqueue_struct *submit_wq,
+ u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name, struct device *dev)
{
int i, ret;
sched->ops = ops;
- sched->hw_submission_limit = hw_submission;
+ sched->credit_limit = credit_limit;
sched->name = name;
sched->timeout = timeout;
sched->timeout_wq = timeout_wq ? : system_wq;
@@ -1121,46 +1277,50 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
return 0;
}
+ if (submit_wq) {
+ sched->submit_wq = submit_wq;
+ sched->own_submit_wq = false;
+ } else {
+ sched->submit_wq = alloc_ordered_workqueue(name, 0);
+ if (!sched->submit_wq)
+ return -ENOMEM;
+
+ sched->own_submit_wq = true;
+ }
+ ret = -ENOMEM;
sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
GFP_KERNEL | __GFP_ZERO);
- if (!sched->sched_rq) {
- drm_err(sched, "%s: out of memory for sched_rq\n", __func__);
- return -ENOMEM;
- }
+ if (!sched->sched_rq)
+ goto Out_free;
sched->num_rqs = num_rqs;
- ret = -ENOMEM;
- for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
goto Out_unroll;
drm_sched_rq_init(sched, sched->sched_rq[i]);
}
- init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
INIT_LIST_HEAD(&sched->pending_list);
spin_lock_init(&sched->job_list_lock);
- atomic_set(&sched->hw_rq_count, 0);
+ atomic_set(&sched->credit_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
+ INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
+ INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
atomic_set(&sched->_score, 0);
atomic64_set(&sched->job_id_count, 0);
-
- /* Each scheduler will run on a seperate kernel thread */
- sched->thread = kthread_run(drm_sched_main, sched, sched->name);
- if (IS_ERR(sched->thread)) {
- ret = PTR_ERR(sched->thread);
- sched->thread = NULL;
- DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
- goto Out_unroll;
- }
+ sched->pause_submit = false;
sched->ready = true;
return 0;
Out_unroll:
- for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--)
+ for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
kfree(sched->sched_rq[i]);
+Out_free:
kfree(sched->sched_rq);
sched->sched_rq = NULL;
+ if (sched->own_submit_wq)
+ destroy_workqueue(sched->submit_wq);
drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
return ret;
}
@@ -1178,10 +1338,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
struct drm_sched_entity *s_entity;
int i;
- if (sched->thread)
- kthread_stop(sched->thread);
+ drm_sched_wqueue_stop(sched);
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
@@ -1202,6 +1361,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);
+ if (sched->own_submit_wq)
+ destroy_workqueue(sched->submit_wq);
sched->ready = false;
kfree(sched->sched_rq);
sched->sched_rq = NULL;
@@ -1231,9 +1392,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
- for (i = DRM_SCHED_PRIORITY_MIN;
- i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
- i++) {
+ for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
@@ -1252,3 +1411,42 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
}
}
EXPORT_SYMBOL(drm_sched_increase_karma);
+
+/**
+ * drm_sched_wqueue_ready - Is the scheduler ready for submission
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if submission is ready
+ */
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
+{
+ return sched->ready;
+}
+EXPORT_SYMBOL(drm_sched_wqueue_ready);
+
+/**
+ * drm_sched_wqueue_stop - stop scheduler submission
+ *
+ * @sched: scheduler instance
+ */
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
+{
+ WRITE_ONCE(sched->pause_submit, true);
+ cancel_work_sync(&sched->work_run_job);
+ cancel_work_sync(&sched->work_free_job);
+}
+EXPORT_SYMBOL(drm_sched_wqueue_stop);
+
+/**
+ * drm_sched_wqueue_start - start scheduler submission
+ *
+ * @sched: scheduler instance
+ */
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
+{
+ WRITE_ONCE(sched->pause_submit, false);
+ queue_work(sched->submit_wq, &sched->work_run_job);
+ queue_work(sched->submit_wq, &sched->work_free_job);
+}
+EXPORT_SYMBOL(drm_sched_wqueue_start);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index e0174f82e..3d0e093a7 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -319,7 +319,7 @@ static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x)
pwm_init_state(ssd130x->pwm, &pwmstate);
pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
- pwm_apply_state(ssd130x->pwm, &pwmstate);
+ pwm_apply_might_sleep(ssd130x->pwm, &pwmstate);
/* Enable the PWM */
pwm_enable(ssd130x->pwm);
@@ -808,7 +808,8 @@ static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
const struct iosys_map *vmap,
struct drm_rect *rect,
- u8 *buf, u8 *data_array)
+ u8 *buf, u8 *data_array,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
struct iosys_map dst;
@@ -826,7 +827,7 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
return ret;
iosys_map_set_vaddr(&dst, buf);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -838,7 +839,8 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
const struct iosys_map *vmap,
struct drm_rect *rect, u8 *buf,
- u8 *data_array)
+ u8 *data_array,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
unsigned int dst_pitch = drm_rect_width(rect);
@@ -855,7 +857,7 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
return ret;
iosys_map_set_vaddr(&dst, buf);
- drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -871,6 +873,7 @@ static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane,
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
+ struct drm_shadow_plane_state *shadow_plane_state = &ssd130x_state->base;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state = NULL;
const struct drm_format_info *fi;
@@ -895,6 +898,16 @@ static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane,
pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+ if (plane_state->fb->format != fi) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&shadow_plane_state->fmtcnv_state,
+ pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->buffer)
return -ENOMEM;
@@ -909,6 +922,7 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
+ struct drm_shadow_plane_state *shadow_plane_state = &ssd130x_state->base;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state = NULL;
const struct drm_format_info *fi;
@@ -933,6 +947,16 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
+ if (plane_state->fb->format != fi) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&shadow_plane_state->fmtcnv_state,
+ pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->buffer)
return -ENOMEM;
@@ -968,7 +992,8 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
ssd130x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip,
ssd130x_plane_state->buffer,
- ssd130x_crtc_state->data_array);
+ ssd130x_crtc_state->data_array,
+ &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
@@ -1002,7 +1027,8 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane,
ssd132x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip,
ssd130x_plane_state->buffer,
- ssd130x_crtc_state->data_array);
+ ssd130x_crtc_state->data_array,
+ &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index acf7cedf0..075c5c3ee 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -17,7 +17,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_plane_helper.h>
#include <linux/regmap.h>
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index 48183bbd0..deb3bb96e 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -859,16 +859,14 @@ static int sprd_dpu_probe(struct platform_device *pdev)
return component_add(&pdev->dev, &dpu_component_ops);
}
-static int sprd_dpu_remove(struct platform_device *pdev)
+static void sprd_dpu_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &dpu_component_ops);
-
- return 0;
}
struct platform_driver sprd_dpu_driver = {
.probe = sprd_dpu_probe,
- .remove = sprd_dpu_remove,
+ .remove_new = sprd_dpu_remove,
.driver = {
.name = "sprd-dpu-drv",
.of_match_table = dpu_match_table,
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index 0aa39156f..a74cd0caf 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -138,10 +138,9 @@ static int sprd_drm_probe(struct platform_device *pdev)
return drm_of_component_probe(&pdev->dev, component_compare_of, &drm_component_ops);
}
-static int sprd_drm_remove(struct platform_device *pdev)
+static void sprd_drm_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &drm_component_ops);
- return 0;
}
static void sprd_drm_shutdown(struct platform_device *pdev)
@@ -164,7 +163,7 @@ MODULE_DEVICE_TABLE(of, drm_match_table);
static struct platform_driver sprd_drm_driver = {
.probe = sprd_drm_probe,
- .remove = sprd_drm_remove,
+ .remove_new = sprd_drm_remove,
.shutdown = sprd_drm_shutdown,
.driver = {
.name = "sprd-drm-drv",
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index d7b143a75..0b69c140e 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -1051,18 +1051,16 @@ static int sprd_dsi_probe(struct platform_device *pdev)
return mipi_dsi_host_register(&dsi->host);
}
-static int sprd_dsi_remove(struct platform_device *pdev)
+static void sprd_dsi_remove(struct platform_device *pdev)
{
struct sprd_dsi *dsi = dev_get_drvdata(&pdev->dev);
mipi_dsi_host_unregister(&dsi->host);
-
- return 0;
}
struct platform_driver sprd_dsi_driver = {
.probe = sprd_dsi_probe,
- .remove = sprd_dsi_remove,
+ .remove_new = sprd_dsi_remove,
.driver = {
.name = "sprd-dsi-drv",
.of_match_table = dsi_match_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
index d1a65a921..f5f62eb0e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
@@ -302,7 +302,6 @@ int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
return -ENOMEM;
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DDC;
adap->algo = &sun4i_hdmi_i2c_algorithm;
strscpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 373bcd792..03d1c76ae 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -960,7 +960,8 @@ int host1x_client_iommu_attach(struct host1x_client *client)
* not the shared IOMMU domain, don't try to attach it to a different
* domain. This allows using the IOMMU-backed DMA API.
*/
- if (domain && domain != tegra->domain)
+ if (domain && domain->type != IOMMU_DOMAIN_IDENTITY &&
+ domain != tegra->domain)
return 0;
if (tegra->domain) {
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 4cfa4df21..417fb8842 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -24,6 +24,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index d5a3d3f4f..833415766 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -20,6 +20,7 @@
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index ba7baa622..d6183b3d7 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -9,15 +9,16 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_connector_test.o \
drm_damage_helper_test.o \
drm_dp_mst_helper_test.o \
+ drm_exec_test.o \
drm_format_helper_test.o \
drm_format_test.o \
drm_framebuffer_test.o \
+ drm_gem_shmem_test.o \
drm_managed_test.o \
drm_mm_test.o \
drm_modes_test.o \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
- drm_rect_test.o \
- drm_exec_test.o
+ drm_rect_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 09ee6f6af..e48863a44 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -8,14 +8,12 @@
#include <linux/prime_numbers.h>
#include <linux/sched/signal.h>
+#include <linux/sizes.h>
#include <drm/drm_buddy.h>
#include "../lib/drm_random.h"
-#define TIMEOUT(name__) \
- unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
-
static unsigned int random_seed;
static inline u64 get_size(int order, u64 chunk_size)
@@ -23,303 +21,293 @@ static inline u64 get_size(int order, u64 chunk_size)
return (1 << order) * chunk_size;
}
-__printf(2, 3)
-static bool __timeout(unsigned long timeout, const char *fmt, ...)
+static void drm_test_buddy_alloc_range_bias(struct kunit *test)
{
- va_list va;
-
- if (!signal_pending(current)) {
- cond_resched();
- if (time_before(jiffies, timeout))
- return false;
- }
-
- if (fmt) {
- va_start(va, fmt);
- vprintk(fmt, va);
- va_end(va);
- }
-
- return true;
-}
-
-static void __dump_block(struct kunit *test, struct drm_buddy *mm,
- struct drm_buddy_block *block, bool buddy)
-{
- kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
- block->header, drm_buddy_block_state(block),
- drm_buddy_block_order(block), drm_buddy_block_offset(block),
- drm_buddy_block_size(mm, block), !block->parent, buddy);
-}
-
-static void dump_block(struct kunit *test, struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *buddy;
-
- __dump_block(test, mm, block, false);
-
- buddy = drm_get_buddy(block);
- if (buddy)
- __dump_block(test, mm, buddy, true);
-}
-
-static int check_block(struct kunit *test, struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *buddy;
- unsigned int block_state;
- u64 block_size;
- u64 offset;
- int err = 0;
-
- block_state = drm_buddy_block_state(block);
-
- if (block_state != DRM_BUDDY_ALLOCATED &&
- block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
- kunit_err(test, "block state mismatch\n");
- err = -EINVAL;
- }
+ u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
+ DRM_RND_STATE(prng, random_seed);
+ unsigned int i, count, *order;
+ struct drm_buddy mm;
+ LIST_HEAD(allocated);
- block_size = drm_buddy_block_size(mm, block);
- offset = drm_buddy_block_offset(block);
+ bias_size = SZ_1M;
+ ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
+ ps = max(SZ_4K, ps);
+ mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
- if (block_size < mm->chunk_size) {
- kunit_err(test, "block size smaller than min size\n");
- err = -EINVAL;
- }
+ kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
- /* We can't use is_power_of_2() for a u64 on 32-bit systems. */
- if (block_size & (block_size - 1)) {
- kunit_err(test, "block size not power of two\n");
- err = -EINVAL;
- }
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
- if (!IS_ALIGNED(block_size, mm->chunk_size)) {
- kunit_err(test, "block size not aligned to min size\n");
- err = -EINVAL;
- }
+ count = mm_size / bias_size;
+ order = drm_random_order(count, &prng);
+ KUNIT_EXPECT_TRUE(test, order);
- if (!IS_ALIGNED(offset, mm->chunk_size)) {
- kunit_err(test, "block offset not aligned to min size\n");
- err = -EINVAL;
- }
+ /*
+ * Idea is to split the address space into uniform bias ranges, and then
+ * in some random order allocate within each bias, using various
+ * patterns within. This should detect if allocations leak out from a
+ * given bias, for example.
+ */
- if (!IS_ALIGNED(offset, block_size)) {
- kunit_err(test, "block offset not aligned to block size\n");
- err = -EINVAL;
+ for (i = 0; i < count; i++) {
+ LIST_HEAD(tmp);
+ u32 size;
+
+ bias_start = order[i] * bias_size;
+ bias_end = bias_start + bias_size;
+ bias_rem = bias_size;
+
+ /* internal round_up too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, bias_size,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+
+ /* size too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size + ps, ps);
+
+ /* bias range too small for size */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end, bias_size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end, bias_size, ps);
+
+ /* bias misaligned */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end - ps,
+ bias_size >> 1, bias_size >> 1,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
+
+ /* single big page */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* single page with internal round_up */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, ps, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, ps, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* random size within */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ if (size)
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+
+ bias_rem -= size;
+ /* too big for current avail */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_rem + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_rem + ps, ps);
+
+ if (bias_rem) {
+ /* random fill of the remainder */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ size = max(size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+ /*
+ * Intentionally allow some space to be left
+ * unallocated, and ideally not always on the bias
+ * boundaries.
+ */
+ drm_buddy_free_list(&mm, &tmp);
+ } else {
+ list_splice_tail(&tmp, &allocated);
+ }
}
- buddy = drm_get_buddy(block);
-
- if (!buddy && block->parent) {
- kunit_err(test, "buddy has gone fishing\n");
- err = -EINVAL;
- }
+ kfree(order);
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
- if (buddy) {
- if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
- kunit_err(test, "buddy has wrong offset\n");
- err = -EINVAL;
- }
+ /*
+ * Something more free-form. Idea is to pick a random starting bias
+ * range within the address space and then start filling it up. Also
+ * randomly grow the bias range in both directions as we go along. This
+ * should give us bias start/end which is not always uniform like above,
+ * and in some cases will require the allocator to jump over already
+ * allocated nodes in the middle of the address space.
+ */
- if (drm_buddy_block_size(mm, buddy) != block_size) {
- kunit_err(test, "buddy size mismatch\n");
- err = -EINVAL;
- }
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
- if (drm_buddy_block_state(buddy) == block_state &&
- block_state == DRM_BUDDY_FREE) {
- kunit_err(test, "block and its buddy are free\n");
- err = -EINVAL;
- }
- }
+ bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
+ bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
+ bias_end = max(bias_end, bias_start + ps);
+ bias_rem = bias_end - bias_start;
+
+ do {
+ u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+ bias_rem -= size;
+
+ /*
+ * Try to randomly grow the bias range in both directions, or
+ * only one, or perhaps don't grow at all.
+ */
+ do {
+ u32 old_bias_start = bias_start;
+ u32 old_bias_end = bias_end;
+
+ if (bias_start)
+ bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
+ if (bias_end != mm_size)
+ bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
+
+ bias_rem += old_bias_start - bias_start;
+ bias_rem += bias_end - old_bias_end;
+ } while (!bias_rem && (bias_start || bias_end != mm_size));
+ } while (bias_rem);
+
+ KUNIT_ASSERT_EQ(test, bias_start, 0);
+ KUNIT_ASSERT_EQ(test, bias_end, mm_size);
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
+ ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc passed with bias(%x-%x), size=%u\n",
+ bias_start, bias_end, ps);
- return err;
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
}
-static int check_blocks(struct kunit *test, struct drm_buddy *mm,
- struct list_head *blocks, u64 expected_size, bool is_contiguous)
+static void drm_test_buddy_alloc_contiguous(struct kunit *test)
{
+ const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
+ unsigned long i, n_pages, total;
struct drm_buddy_block *block;
- struct drm_buddy_block *prev;
- u64 total;
- int err = 0;
-
- block = NULL;
- prev = NULL;
- total = 0;
-
- list_for_each_entry(block, blocks, link) {
- err = check_block(test, mm, block);
-
- if (!drm_buddy_block_is_allocated(block)) {
- kunit_err(test, "block not allocated\n");
- err = -EINVAL;
- }
-
- if (is_contiguous && prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = drm_buddy_block_offset(prev);
- prev_block_size = drm_buddy_block_size(mm, prev);
- offset = drm_buddy_block_offset(block);
-
- if (offset != (prev_offset + prev_block_size)) {
- kunit_err(test, "block offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- if (err)
- break;
-
- total += drm_buddy_block_size(mm, block);
- prev = block;
- }
-
- if (!err) {
- if (total != expected_size) {
- kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
- expected_size, total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- kunit_err(test, "prev block, dump:\n");
- dump_block(test, mm, prev);
- }
+ struct drm_buddy mm;
+ LIST_HEAD(left);
+ LIST_HEAD(middle);
+ LIST_HEAD(right);
+ LIST_HEAD(allocated);
- kunit_err(test, "bad block, dump:\n");
- dump_block(test, mm, block);
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
- return err;
-}
+ /*
+ * Idea is to fragment the address space by alternating block
+ * allocations between three different lists; one for left, middle and
+ * right. We can then free a list to simulate fragmentation. In
+ * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
+ * including the try_harder path.
+ */
-static int check_mm(struct kunit *test, struct drm_buddy *mm)
-{
- struct drm_buddy_block *root;
- struct drm_buddy_block *prev;
- unsigned int i;
- u64 total;
- int err = 0;
-
- if (!mm->n_roots) {
- kunit_err(test, "n_roots is zero\n");
- return -EINVAL;
- }
+ i = 0;
+ n_pages = mm_size / ps;
+ do {
+ struct list_head *list;
+ int slot = i % 3;
+
+ if (slot == 0)
+ list = &left;
+ else if (slot == 1)
+ list = &middle;
+ else
+ list = &right;
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list, 0),
+ "buddy_alloc hit an error size=%lu\n",
+ ps);
+ } while (++i < n_pages);
+
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
+
+ drm_buddy_free_list(&mm, &middle);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%lu\n", 2 * ps);
+
+ drm_buddy_free_list(&mm, &right);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
+ /*
+ * At this point we should have enough contiguous space for 2 blocks,
+ * however they are never buddies (since we freed middle and right) so
+ * will require the try_harder logic to find them.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 2 * ps);
- if (mm->n_roots != hweight64(mm->size)) {
- kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
- mm->n_roots, hweight64(mm->size));
- return -EINVAL;
- }
+ drm_buddy_free_list(&mm, &left);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 3 * ps);
- root = NULL;
- prev = NULL;
total = 0;
+ list_for_each_entry(block, &allocated, link)
+ total += drm_buddy_block_size(&mm, block);
- for (i = 0; i < mm->n_roots; ++i) {
- struct drm_buddy_block *block;
- unsigned int order;
-
- root = mm->roots[i];
- if (!root) {
- kunit_err(test, "root(%u) is NULL\n", i);
- err = -EINVAL;
- break;
- }
-
- err = check_block(test, mm, root);
-
- if (!drm_buddy_block_is_free(root)) {
- kunit_err(test, "root not free\n");
- err = -EINVAL;
- }
-
- order = drm_buddy_block_order(root);
-
- if (!i) {
- if (order != mm->max_order) {
- kunit_err(test, "max order root missing\n");
- err = -EINVAL;
- }
- }
-
- if (prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = drm_buddy_block_offset(prev);
- prev_block_size = drm_buddy_block_size(mm, prev);
- offset = drm_buddy_block_offset(root);
-
- if (offset != (prev_offset + prev_block_size)) {
- kunit_err(test, "root offset mismatch\n");
- err = -EINVAL;
- }
- }
+ KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
- block = list_first_entry_or_null(&mm->free_list[order],
- struct drm_buddy_block, link);
- if (block != root) {
- kunit_err(test, "root mismatch at order=%u\n", order);
- err = -EINVAL;
- }
-
- if (err)
- break;
-
- prev = root;
- total += drm_buddy_block_size(mm, root);
- }
-
- if (!err) {
- if (total != mm->size) {
- kunit_err(test, "expected mm size=%llx, found=%llx\n",
- mm->size, total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- kunit_err(test, "prev root(%u), dump:\n", i - 1);
- dump_block(test, mm, prev);
- }
-
- if (root) {
- kunit_err(test, "bad root(%u), dump:\n", i);
- dump_block(test, mm, root);
- }
-
- return err;
-}
-
-static void mm_config(u64 *size, u64 *chunk_size)
-{
- DRM_RND_STATE(prng, random_seed);
- u32 s, ms;
-
- /* Nothing fancy, just try to get an interesting bit pattern */
-
- prandom_seed_state(&prng, random_seed);
-
- /* Let size be a random number of pages up to 8 GB (2M pages) */
- s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
- /* Let the chunk size be a random power of 2 less than size */
- ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
- /* Round size down to the chunk size */
- s &= -ms;
-
- /* Convert from pages to bytes */
- *chunk_size = (u64)ms << 12;
- *size = (u64)s << 12;
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
}
static void drm_test_buddy_alloc_pathological(struct kunit *test)
@@ -403,96 +391,6 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
drm_buddy_fini(&mm);
}
-static void drm_test_buddy_alloc_smoke(struct kunit *test)
-{
- u64 mm_size, chunk_size, start = 0;
- unsigned long flags = 0;
- struct drm_buddy mm;
- int *order;
- int i;
-
- DRM_RND_STATE(prng, random_seed);
- TIMEOUT(end_time);
-
- mm_config(&mm_size, &chunk_size);
-
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
- "buddy_init failed\n");
-
- order = drm_random_order(mm.max_order + 1, &prng);
- KUNIT_ASSERT_TRUE(test, order);
-
- for (i = 0; i <= mm.max_order; ++i) {
- struct drm_buddy_block *block;
- int max_order = order[i];
- bool timeout = false;
- LIST_HEAD(blocks);
- u64 total, size;
- LIST_HEAD(tmp);
- int order, err;
-
- KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
- "pre-mm check failed, abort\n");
-
- order = max_order;
- total = 0;
-
- do {
-retry:
- size = get_size(order, chunk_size);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
- if (err) {
- if (err == -ENOMEM) {
- KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- } else {
- if (order--) {
- err = 0;
- goto retry;
- }
-
- KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
- order);
- }
-
- break;
- }
-
- block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
- KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
-
- list_move_tail(&block->link, &blocks);
- KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
- "buddy_alloc order mismatch\n");
-
- total += drm_buddy_block_size(&mm, block);
-
- if (__timeout(end_time, NULL)) {
- timeout = true;
- break;
- }
- } while (total < mm.size);
-
- if (!err)
- err = check_blocks(test, &mm, &blocks, total, false);
-
- drm_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
- "post-mm check failed\n");
- }
-
- if (err || timeout)
- break;
-
- cond_resched();
- }
-
- kfree(order);
- drm_buddy_fini(&mm);
-}
-
static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
{
u64 mm_size, size, start = 0;
@@ -634,64 +532,6 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
drm_buddy_fini(&mm);
}
-static void drm_test_buddy_alloc_range(struct kunit *test)
-{
- unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
- u64 offset, size, rem, chunk_size, end;
- unsigned long page_num;
- struct drm_buddy mm;
- LIST_HEAD(blocks);
-
- mm_config(&size, &chunk_size);
-
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
- "buddy_init failed");
-
- KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
- "pre-mm check failed, abort!");
-
- rem = mm.size;
- offset = 0;
-
- for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
- struct drm_buddy_block *block;
- LIST_HEAD(tmp);
-
- size = min(page_num * mm.chunk_size, rem);
- end = offset + size;
-
- KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
- size, mm.chunk_size,
- &tmp, flags),
- "alloc_range with offset=%llx, size=%llx failed\n", offset, size);
-
- block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
- KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
-
- KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
- "alloc_range start offset mismatch, found=%llx, expected=%llx\n",
- drm_buddy_block_offset(block), offset);
-
- KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
-
- list_splice_tail(&tmp, &blocks);
-
- offset += size;
-
- rem -= size;
- if (!rem)
- break;
-
- cond_resched();
- }
-
- drm_buddy_free_list(&mm, &blocks);
-
- KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
-
- drm_buddy_fini(&mm);
-}
-
static void drm_test_buddy_alloc_limit(struct kunit *test)
{
u64 size = U64_MAX, start = 0;
@@ -732,18 +572,19 @@ static int drm_buddy_suite_init(struct kunit_suite *suite)
while (!random_seed)
random_seed = get_random_u32();
- kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", random_seed);
+ kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
+ random_seed);
return 0;
}
static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_limit),
- KUNIT_CASE(drm_test_buddy_alloc_range),
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
- KUNIT_CASE(drm_test_buddy_alloc_smoke),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
+ KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+ KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index e3c818dfc..d916e548f 100644
--- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -68,6 +68,152 @@ static void dp_mst_calc_pbn_mode_desc(const struct drm_dp_mst_calc_pbn_mode_test
KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_cases,
dp_mst_calc_pbn_mode_desc);
+struct drm_dp_mst_calc_pbn_div_test {
+ int link_rate;
+ int lane_count;
+ fixed20_12 expected;
+};
+
+#define fp_init(__int, __frac) { \
+ .full = (__int) * (1 << 12) + \
+ (__frac) * (1 << 12) / 100000 \
+}
+
+static const struct drm_dp_mst_calc_pbn_div_test drm_dp_mst_calc_pbn_div_dp1_4_cases[] = {
+ /*
+ * UHBR rates (DP Standard v2.1 2.7.6.3, specifying the rounded to
+ * closest value to 2 decimal places):
+ * .expected = .link_rate * .lane_count * 0.9671 / 8 / 54 / 100
+ * DP1.4 rates (DP Standard v2.1 2.6.4.2):
+ * .expected = .link_rate * .lane_count * 0.8000 / 8 / 54 / 100
+ *
+ * truncated to 5 decimal places.
+ */
+ {
+ .link_rate = 2000000,
+ .lane_count = 4,
+ .expected = fp_init(179, 9259), /* 179.09259 */
+ },
+ {
+ .link_rate = 2000000,
+ .lane_count = 2,
+ .expected = fp_init(89, 54629),
+ },
+ {
+ .link_rate = 2000000,
+ .lane_count = 1,
+ .expected = fp_init(44, 77314),
+ },
+ {
+ .link_rate = 1350000,
+ .lane_count = 4,
+ .expected = fp_init(120, 88750),
+ },
+ {
+ .link_rate = 1350000,
+ .lane_count = 2,
+ .expected = fp_init(60, 44375),
+ },
+ {
+ .link_rate = 1350000,
+ .lane_count = 1,
+ .expected = fp_init(30, 22187),
+ },
+ {
+ .link_rate = 1000000,
+ .lane_count = 4,
+ .expected = fp_init(89, 54629),
+ },
+ {
+ .link_rate = 1000000,
+ .lane_count = 2,
+ .expected = fp_init(44, 77314),
+ },
+ {
+ .link_rate = 1000000,
+ .lane_count = 1,
+ .expected = fp_init(22, 38657),
+ },
+ {
+ .link_rate = 810000,
+ .lane_count = 4,
+ .expected = fp_init(60, 0),
+ },
+ {
+ .link_rate = 810000,
+ .lane_count = 2,
+ .expected = fp_init(30, 0),
+ },
+ {
+ .link_rate = 810000,
+ .lane_count = 1,
+ .expected = fp_init(15, 0),
+ },
+ {
+ .link_rate = 540000,
+ .lane_count = 4,
+ .expected = fp_init(40, 0),
+ },
+ {
+ .link_rate = 540000,
+ .lane_count = 2,
+ .expected = fp_init(20, 0),
+ },
+ {
+ .link_rate = 540000,
+ .lane_count = 1,
+ .expected = fp_init(10, 0),
+ },
+ {
+ .link_rate = 270000,
+ .lane_count = 4,
+ .expected = fp_init(20, 0),
+ },
+ {
+ .link_rate = 270000,
+ .lane_count = 2,
+ .expected = fp_init(10, 0),
+ },
+ {
+ .link_rate = 270000,
+ .lane_count = 1,
+ .expected = fp_init(5, 0),
+ },
+ {
+ .link_rate = 162000,
+ .lane_count = 4,
+ .expected = fp_init(12, 0),
+ },
+ {
+ .link_rate = 162000,
+ .lane_count = 2,
+ .expected = fp_init(6, 0),
+ },
+ {
+ .link_rate = 162000,
+ .lane_count = 1,
+ .expected = fp_init(3, 0),
+ },
+};
+
+static void drm_test_dp_mst_calc_pbn_div(struct kunit *test)
+{
+ const struct drm_dp_mst_calc_pbn_div_test *params = test->param_value;
+ /* mgr->dev is only needed by drm_dbg_kms(), but it's not called for the test cases. */
+ struct drm_dp_mst_topology_mgr *mgr = test->priv;
+
+ KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(mgr, params->link_rate, params->lane_count).full,
+ params->expected.full);
+}
+
+static void dp_mst_calc_pbn_div_desc(const struct drm_dp_mst_calc_pbn_div_test *t, char *desc)
+{
+ sprintf(desc, "Link rate %d lane count %d", t->link_rate, t->lane_count);
+}
+
+KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_div, drm_dp_mst_calc_pbn_div_dp1_4_cases,
+ dp_mst_calc_pbn_div_desc);
+
static u8 data[] = { 0xff, 0x00, 0xdd };
struct drm_dp_mst_sideband_msg_req_test {
@@ -416,13 +562,27 @@ KUNIT_ARRAY_PARAM(drm_dp_mst_sideband_msg_req, drm_dp_mst_sideband_msg_req_cases
static struct kunit_case drm_dp_mst_helper_tests[] = {
KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_gen_params),
+ KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_div, drm_dp_mst_calc_pbn_div_gen_params),
KUNIT_CASE_PARAM(drm_test_dp_mst_sideband_msg_req_decode,
drm_dp_mst_sideband_msg_req_gen_params),
{ }
};
+static int drm_dp_mst_helper_tests_init(struct kunit *test)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ mgr = kunit_kzalloc(test, sizeof(*mgr), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mgr);
+
+ test->priv = mgr;
+
+ return 0;
+}
+
static struct kunit_suite drm_dp_mst_helper_test_suite = {
.name = "drm_dp_mst_helper",
+ .init = drm_dp_mst_helper_tests_init,
.test_cases = drm_dp_mst_helper_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c
index 563949d77..81f928a42 100644
--- a/drivers/gpu/drm/tests/drm_exec_test.c
+++ b/drivers/gpu/drm/tests/drm_exec_test.c
@@ -46,7 +46,7 @@ static void sanitycheck(struct kunit *test)
{
struct drm_exec exec;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_fini(&exec);
KUNIT_SUCCEED(test);
}
@@ -60,7 +60,7 @@ static void test_lock(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
@@ -80,7 +80,7 @@ static void test_lock_unlock(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
@@ -107,7 +107,7 @@ static void test_duplicates(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_lock_obj(&exec, &gobj);
drm_exec_retry_on_contention(&exec);
@@ -134,7 +134,7 @@ static void test_prepare(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
ret = drm_exec_prepare_obj(&exec, &gobj, 1);
drm_exec_retry_on_contention(&exec);
@@ -159,7 +159,7 @@ static void test_prepare_array(struct kunit *test)
drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
ret = drm_exec_prepare_array(&exec, array, ARRAY_SIZE(array),
1);
@@ -174,14 +174,14 @@ static void test_multiple_loops(struct kunit *test)
{
struct drm_exec exec;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
{
break;
}
drm_exec_fini(&exec);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
{
break;
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index f6408e56f..08992636e 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -20,6 +20,10 @@
#define TEST_USE_DEFAULT_PITCH 0
+static unsigned char fmtcnv_state_mem[PAGE_SIZE];
+static struct drm_format_conv_state fmtcnv_state =
+ DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(fmtcnv_state_mem, sizeof(fmtcnv_state_mem));
+
struct convert_to_gray8_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
@@ -630,8 +634,7 @@ static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_gray8(&dst, dst_pitch, &src, &fb, &params->clip);
-
+ drm_fb_xrgb8888_to_gray8(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -664,7 +667,7 @@ static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_rgb332(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_rgb332(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -697,12 +700,14 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip, false);
+ drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state, false);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
- drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, true);
+ drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state, true);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size);
@@ -711,7 +716,8 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
@@ -748,7 +754,7 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -757,7 +763,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
@@ -794,7 +801,7 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -803,7 +810,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
@@ -840,7 +848,7 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -849,7 +857,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
@@ -890,7 +899,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
@@ -898,7 +907,8 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip,
+ &fmtcnv_state);
KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -933,7 +943,7 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -942,7 +952,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
@@ -979,7 +990,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -989,7 +1000,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
int blit_result = 0;
blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
- &params->clip);
+ &params->clip, &fmtcnv_state);
KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -1024,7 +1035,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -1034,7 +1045,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
int blit_result = 0;
blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
- &params->clip);
+ &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
@@ -1071,7 +1082,7 @@ static void drm_test_fb_xrgb8888_to_mono(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_xrgb8888_to_mono(&dst, dst_pitch, &src, &fb, &params->clip);
+ drm_fb_xrgb8888_to_mono(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1104,7 +1115,7 @@ static void drm_test_fb_swab(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
@@ -1114,7 +1125,7 @@ static void drm_test_fb_swab(struct kunit *test)
int blit_result;
blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
- &src, &fb, &params->clip);
+ &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_FALSE(test, blit_result);
@@ -1123,7 +1134,8 @@ static void drm_test_fb_swab(struct kunit *test)
buf = dst.vaddr;
memset(buf, 0, dst_size);
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_FALSE(test, blit_result);
@@ -1137,7 +1149,8 @@ static void drm_test_fb_swab(struct kunit *test)
mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
fb.format = &mock_format;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_FALSE(test, blit_result);
@@ -1175,7 +1188,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
@@ -1214,7 +1228,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
int blit_result = 0;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip);
+ blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip,
+ &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
@@ -1817,7 +1832,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
int blit_result;
- blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip);
+ blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip,
+ &fmtcnv_state);
KUNIT_EXPECT_FALSE(test, blit_result);
for (size_t i = 0; i < fb.format->num_planes; i++) {
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
new file mode 100644
index 000000000..91202e40c
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test suite for GEM objects backed by shmem buffers
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/iosys-map.h>
+#include <linux/sizes.h>
+
+#include <kunit/test.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#define TEST_SIZE SZ_1M
+#define TEST_BYTE 0xae
+
+/*
+ * Wrappers to avoid an explicit type casting when passing action
+ * functions to kunit_add_action().
+ */
+static void kfree_wrapper(void *ptr)
+{
+ const void *obj = ptr;
+
+ kfree(obj);
+}
+
+static void sg_free_table_wrapper(void *ptr)
+{
+ struct sg_table *sgt = ptr;
+
+ sg_free_table(sgt);
+}
+
+static void drm_gem_shmem_free_wrapper(void *ptr)
+{
+ struct drm_gem_shmem_object *shmem = ptr;
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object backed by shmem buffer. The test
+ * case succeeds if the GEM object is successfully allocated with the
+ * shmem file node and object functions attributes set, and the size
+ * attribute is equal to the correct size.
+ */
+static void drm_gem_shmem_test_obj_create(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
+ KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
+ KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object from a scatter/gather table exported
+ * via a DMA-BUF. The test case succeed if the GEM object is successfully
+ * created with the shmem file node attribute equal to NULL and the sgt
+ * attribute pointing to the scatter/gather table that has been imported.
+ */
+static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *gem_obj;
+ struct dma_buf buf_mock;
+ struct dma_buf_attachment attach_mock;
+ struct sg_table *sgt;
+ char *buf;
+ int ret;
+
+ /* Create a mock scatter/gather table */
+ buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, buf);
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, sgt);
+
+ ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sg_init_one(sgt->sgl, buf, TEST_SIZE);
+
+ /* Init a mock DMA-BUF */
+ buf_mock.size = TEST_SIZE;
+ attach_mock.dmabuf = &buf_mock;
+
+ gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
+ KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
+ KUNIT_EXPECT_NULL(test, gem_obj->filp);
+ KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ kunit_remove_action(test, sg_free_table_wrapper, sgt);
+ kunit_remove_action(test, kfree_wrapper, sgt);
+
+ shmem = to_drm_gem_shmem_obj(gem_obj);
+ KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test pinning backing pages for a shmem GEM object. The test case
+ * succeeds if a suitable number of backing pages are allocated, and
+ * the pages table counter attribute is increased by one.
+ */
+static void drm_gem_shmem_test_pin_pages(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ int i, ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_pin(shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+
+ for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
+
+ drm_gem_shmem_unpin(shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+}
+
+/*
+ * Test creating a virtual mapping for a shmem GEM object. The test
+ * case succeeds if the backing memory is mapped and the reference
+ * counter for virtual mapping is increased by one. Moreover, the test
+ * case writes and then reads a test pattern over the mapped memory.
+ */
+static void drm_gem_shmem_test_vmap(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct iosys_map map;
+ int ret, i;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_EXPECT_NULL(test, shmem->vaddr);
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_vmap(shmem, &map);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
+ KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+
+ iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
+ for (i = 0; i < TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
+
+ drm_gem_shmem_vunmap(shmem, &map);
+ KUNIT_EXPECT_NULL(test, shmem->vaddr);
+ KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+}
+
+/*
+ * Test exporting a scatter/gather table of pinned pages suitable for
+ * PRIME usage from a shmem GEM object. The test case succeeds if a
+ * scatter/gather table large enough to accommodate the backing memory
+ * is successfully exported.
+ */
+static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int si, len = 0;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_pin(shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ sgt = drm_gem_shmem_get_sg_table(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ KUNIT_EXPECT_NULL(test, shmem->sgt);
+
+ ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ for_each_sgtable_sg(sgt, sg, si) {
+ KUNIT_EXPECT_NOT_NULL(test, sg);
+ len += sg->length;
+ }
+
+ KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test pinning pages and exporting a scatter/gather table suitable for
+ * driver usage from a shmem GEM object. The test case succeeds if the
+ * backing pages are pinned and a scatter/gather table large enough to
+ * accommodate the backing memory is successfully exported.
+ */
+static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int si, ret, len = 0;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+ KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
+
+ for_each_sgtable_sg(sgt, sg, si) {
+ KUNIT_EXPECT_NOT_NULL(test, sg);
+ len += sg->length;
+ }
+
+ KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test updating the madvise state of a shmem GEM object. The test
+ * case checks that the function for setting madv updates it only if
+ * its current value is greater or equal than zero and returns false
+ * if it has a negative value.
+ */
+static void drm_gem_shmem_test_madvise(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+ KUNIT_ASSERT_EQ(test, shmem->madv, 0);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_madvise(shmem, 1);
+ KUNIT_EXPECT_TRUE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, 1);
+
+ /* Set madv to a negative value */
+ ret = drm_gem_shmem_madvise(shmem, -1);
+ KUNIT_EXPECT_FALSE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+
+ /* Check that madv cannot be set back to a positive value */
+ ret = drm_gem_shmem_madvise(shmem, 0);
+ KUNIT_EXPECT_FALSE(test, ret);
+ KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+}
+
+/*
+ * Test purging a shmem GEM object. First, assert that a newly created
+ * shmem GEM object is not purgeable. Then, set madvise to a positive
+ * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
+ * backing pages. Finally, assert that the shmem GEM object is now
+ * purgeable and purge it.
+ */
+static void drm_gem_shmem_test_purge(struct kunit *test)
+{
+ struct drm_device *drm_dev = test->priv;
+ struct drm_gem_shmem_object *shmem;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+ ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_gem_shmem_is_purgeable(shmem);
+ KUNIT_EXPECT_FALSE(test, ret);
+
+ ret = drm_gem_shmem_madvise(shmem, 1);
+ KUNIT_EXPECT_TRUE(test, ret);
+
+ /* The scatter/gather table will be freed by drm_gem_shmem_free */
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+
+ ret = drm_gem_shmem_is_purgeable(shmem);
+ KUNIT_EXPECT_TRUE(test, ret);
+
+ drm_gem_shmem_purge(shmem);
+ KUNIT_EXPECT_NULL(test, shmem->pages);
+ KUNIT_EXPECT_NULL(test, shmem->sgt);
+ KUNIT_EXPECT_EQ(test, shmem->madv, -1);
+}
+
+static int drm_gem_shmem_test_init(struct kunit *test)
+{
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ /* Allocate a parent device */
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /*
+ * The DRM core will automatically initialize the GEM core and create
+ * a DRM Memory Manager object which provides an address space pool
+ * for GEM objects allocation.
+ */
+ drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
+ 0, DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
+
+ test->priv = drm_dev;
+
+ return 0;
+}
+
+static struct kunit_case drm_gem_shmem_test_cases[] = {
+ KUNIT_CASE(drm_gem_shmem_test_obj_create),
+ KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
+ KUNIT_CASE(drm_gem_shmem_test_pin_pages),
+ KUNIT_CASE(drm_gem_shmem_test_vmap),
+ KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
+ KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
+ KUNIT_CASE(drm_gem_shmem_test_madvise),
+ KUNIT_CASE(drm_gem_shmem_test_purge),
+ {}
+};
+
+static struct kunit_suite drm_gem_shmem_suite = {
+ .name = "drm_gem_shmem",
+ .init = drm_gem_shmem_test_init,
+ .test_cases = drm_gem_shmem_test_cases
+};
+
+kunit_test_suite(drm_gem_shmem_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index bccb33b90..ca4f8e4c5 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -5,6 +5,7 @@
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
+#include <kunit/device.h>
#include <kunit/resource.h>
#include <linux/device.h>
@@ -15,40 +16,6 @@
static const struct drm_mode_config_funcs drm_mode_config_funcs = {
};
-static int fake_probe(struct platform_device *pdev)
-{
- return 0;
-}
-
-static struct platform_driver fake_platform_driver = {
- .probe = fake_probe,
- .driver = {
- .name = KUNIT_DEVICE_NAME,
- },
-};
-
-static void kunit_action_platform_driver_unregister(void *ptr)
-{
- struct platform_driver *drv = ptr;
-
- platform_driver_unregister(drv);
-
-}
-
-static void kunit_action_platform_device_put(void *ptr)
-{
- struct platform_device *pdev = ptr;
-
- platform_device_put(pdev);
-}
-
-static void kunit_action_platform_device_del(void *ptr)
-{
- struct platform_device *pdev = ptr;
-
- platform_device_del(pdev);
-}
-
/**
* drm_kunit_helper_alloc_device - Allocate a mock device for a KUnit test
* @test: The test context object
@@ -66,34 +33,7 @@ static void kunit_action_platform_device_del(void *ptr)
*/
struct device *drm_kunit_helper_alloc_device(struct kunit *test)
{
- struct platform_device *pdev;
- int ret;
-
- ret = platform_driver_register(&fake_platform_driver);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ret = kunit_add_action_or_reset(test,
- kunit_action_platform_driver_unregister,
- &fake_platform_driver);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- pdev = platform_device_alloc(KUNIT_DEVICE_NAME, PLATFORM_DEVID_NONE);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
-
- ret = kunit_add_action_or_reset(test,
- kunit_action_platform_device_put,
- pdev);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ret = platform_device_add(pdev);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- ret = kunit_add_action_or_reset(test,
- kunit_action_platform_device_del,
- pdev);
- KUNIT_ASSERT_EQ(test, ret, 0);
-
- return &pdev->dev;
+ return kunit_device_register(test, KUNIT_DEVICE_NAME);
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
@@ -106,19 +46,7 @@ EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
*/
void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
-
- kunit_release_action(test,
- kunit_action_platform_device_del,
- pdev);
-
- kunit_release_action(test,
- kunit_action_platform_device_put,
- pdev);
-
- kunit_release_action(test,
- kunit_action_platform_driver_unregister,
- &fake_platform_driver);
+ kunit_device_unregister(test, dev);
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 05d5e7af6..f37c0d765 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -17,10 +17,6 @@
#include "../lib/drm_random.h"
-static unsigned int random_seed;
-static unsigned int max_iterations = 8192;
-static unsigned int max_prime = 128;
-
enum {
BEST,
BOTTOMUP,
@@ -37,10 +33,6 @@ static const struct insert_mode {
[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
{}
-}, evict_modes[] = {
- { "bottom-up", DRM_MM_INSERT_LOW },
- { "top-down", DRM_MM_INSERT_HIGH },
- {}
};
static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
@@ -97,57 +89,6 @@ static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 sta
return ok;
}
-static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
-{
- struct drm_mm_node *node, *check, *found;
- unsigned long n;
- u64 addr;
-
- if (!assert_no_holes(test, mm))
- return false;
-
- n = 0;
- addr = 0;
- drm_mm_for_each_node(node, mm) {
- if (node->start != addr) {
- KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n",
- n, addr, node->start);
- return false;
- }
-
- if (node->size != size) {
- KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n",
- n, size, node->size);
- return false;
- }
-
- if (drm_mm_hole_follows(node)) {
- KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n);
- return false;
- }
-
- found = NULL;
- drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
- if (node != check) {
- KUNIT_FAIL(test,
- "lookup return wrong node, expected start %llx, found %llx\n",
- node->start, check->start);
- return false;
- }
- found = check;
- }
- if (!found) {
- KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size);
- return false;
- }
-
- addr += size;
- n++;
- }
-
- return true;
-}
-
static u64 misalignment(struct drm_mm_node *node, u64 alignment)
{
u64 rem;
@@ -216,7 +157,7 @@ static void drm_test_mm_init(struct kunit *test)
/* After creation, it should all be one massive hole */
if (!assert_one_hole(test, &mm, 0, size)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm not one hole on creation");
goto out;
}
@@ -230,14 +171,14 @@ static void drm_test_mm_init(struct kunit *test)
/* After filling the range entirely, there should be no holes */
if (!assert_no_holes(test, &mm)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm has holes when filled");
goto out;
}
/* And then after emptying it again, the massive hole should be back */
drm_mm_remove_node(&tmp);
if (!assert_one_hole(test, &mm, 0, size)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm does not have single hole after emptying");
goto out;
}
@@ -247,13 +188,13 @@ out:
static void drm_test_mm_debug(struct kunit *test)
{
+ struct drm_printer p = drm_debug_printer(test->name);
struct drm_mm mm;
struct drm_mm_node nodes[2];
/* Create a small drm_mm with a couple of nodes and a few holes, and
* check that the debug iterator doesn't explode over a trivial drm_mm.
*/
-
drm_mm_init(&mm, 0, 4096);
memset(nodes, 0, sizeof(nodes));
@@ -268,215 +209,9 @@ static void drm_test_mm_debug(struct kunit *test)
KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
"failed to reserve node[0] {start=%lld, size=%lld)\n",
nodes[0].start, nodes[0].size);
-}
-
-static struct drm_mm_node *set_node(struct drm_mm_node *node,
- u64 start, u64 size)
-{
- node->start = start;
- node->size = size;
- return node;
-}
-
-static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
-{
- int err;
-
- err = drm_mm_reserve_node(mm, node);
- if (likely(err == -ENOSPC))
- return true;
-
- if (!err) {
- KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n",
- node->start, node->size);
- drm_mm_remove_node(node);
- } else {
- KUNIT_FAIL(test,
- "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
- err, -ENOSPC, node->start, node->size);
- }
- return false;
-}
-
-static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
- unsigned int count,
- u64 size)
-{
- const struct boundary {
- u64 start, size;
- const char *name;
- } boundaries[] = {
-#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
- B(0, 0),
- B(-size, 0),
- B(size, 0),
- B(size * count, 0),
- B(-size, size),
- B(-size, -size),
- B(-size, 2 * size),
- B(0, -size),
- B(size, -size),
- B(count * size, size),
- B(count * size, -size),
- B(count * size, count * size),
- B(count * size, -count * size),
- B(count * size, -(count + 1) * size),
- B((count + 1) * size, size),
- B((count + 1) * size, -size),
- B((count + 1) * size, -2 * size),
-#undef B
- };
- struct drm_mm_node tmp = {};
- int n;
-
- for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
- if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start,
- boundaries[n].size))) {
- KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n",
- n, boundaries[n].name, count, size);
- return false;
- }
- }
-
- return true;
-}
-
-static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size)
-{
- DRM_RND_STATE(prng, random_seed);
- struct drm_mm mm;
- struct drm_mm_node tmp, *nodes, *node, *next;
- unsigned int *order, n, m, o = 0;
- int ret, err;
-
- /* For exercising drm_mm_reserve_node(), we want to check that
- * reservations outside of the drm_mm range are rejected, and to
- * overlapping and otherwise already occupied ranges. Afterwards,
- * the tree and nodes should be intact.
- */
-
- DRM_MM_BUG_ON(!count);
- DRM_MM_BUG_ON(!size);
-
- ret = -ENOMEM;
- order = drm_random_order(count, &prng);
- if (!order)
- goto err;
-
- nodes = vzalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- ret = -EINVAL;
- drm_mm_init(&mm, 0, count * size);
-
- if (!check_reserve_boundaries(test, &mm, count, size))
- goto out;
-
- for (n = 0; n < count; n++) {
- nodes[n].start = order[n] * size;
- nodes[n].size = size;
-
- err = drm_mm_reserve_node(&mm, &nodes[n]);
- if (err) {
- KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
- n, nodes[n].start);
- ret = err;
- goto out;
- }
-
- if (!drm_mm_node_allocated(&nodes[n])) {
- KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n",
- n, nodes[n].start);
- goto out;
- }
-
- if (!expect_reserve_fail(test, &mm, &nodes[n]))
- goto out;
- }
-
- /* After random insertion the nodes should be in order */
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- /* Repeated use should then fail */
- drm_random_reorder(order, count, &prng);
- for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1)))
- goto out;
-
- /* Remove and reinsert should work */
- drm_mm_remove_node(&nodes[order[n]]);
- err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
- if (err) {
- KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
- n, nodes[n].start);
- ret = err;
- goto out;
- }
- }
-
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- /* Overlapping use should then fail */
- for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count)))
- goto out;
- }
- for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n))))
- goto out;
- }
-
- /* Remove several, reinsert, check full */
- for_each_prime_number(n, min(max_prime, count)) {
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- drm_mm_remove_node(node);
- }
-
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- err = drm_mm_reserve_node(&mm, node);
- if (err) {
- KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n",
- m, n, node->start);
- ret = err;
- goto out;
- }
- }
-
- o += n;
-
- if (!assert_continuous(test, &mm, size))
- goto out;
- }
-
- ret = 0;
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- vfree(nodes);
- kfree(order);
-err:
- return ret;
-}
-static void drm_test_mm_reserve(struct kunit *test)
-{
- const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
- int n;
-
- for_each_prime_number_from(n, 1, 54) {
- u64 size = BIT_ULL(n);
-
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
-
- cond_resched();
- }
+ drm_mm_print(&mm, &p);
+ KUNIT_SUCCEED(test);
}
static bool expect_insert(struct kunit *test, struct drm_mm *mm,
@@ -503,600 +238,6 @@ static bool expect_insert(struct kunit *test, struct drm_mm *mm,
return true;
}
-static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
-{
- struct drm_mm_node tmp = {};
- int err;
-
- err = drm_mm_insert_node(mm, &tmp, size);
- if (likely(err == -ENOSPC))
- return true;
-
- if (!err) {
- KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n",
- tmp.start, tmp.size);
- drm_mm_remove_node(&tmp);
- } else {
- KUNIT_FAIL(test,
- "impossible insert failed with wrong error %d [expected %d], size %llu\n",
- err, -ENOSPC, size);
- }
- return false;
-}
-
-static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace)
-{
- DRM_RND_STATE(prng, random_seed);
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int *order, n, m, o = 0;
- int ret;
-
- /* Fill a range with lots of nodes, check it doesn't fail too early */
-
- DRM_MM_BUG_ON(!count);
- DRM_MM_BUG_ON(!size);
-
- ret = -ENOMEM;
- nodes = vmalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(count, &prng);
- if (!order)
- goto err_nodes;
-
- ret = -EINVAL;
- drm_mm_init(&mm, 0, count * size);
-
- for (mode = insert_modes; mode->name; mode++) {
- for (n = 0; n < count; n++) {
- struct drm_mm_node tmp;
-
- node = replace ? &tmp : &nodes[n];
- memset(node, 0, sizeof(*node));
- if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
- KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n",
- mode->name, size, n);
- goto out;
- }
-
- if (replace) {
- drm_mm_replace_node(&tmp, &nodes[n]);
- if (drm_mm_node_allocated(&tmp)) {
- KUNIT_FAIL(test,
- "replaced old-node still allocated! step %d\n",
- n);
- goto out;
- }
-
- if (!assert_node(test, &nodes[n], &mm, size, 0, n)) {
- KUNIT_FAIL(test,
- "replaced node did not inherit parameters, size %llu step %d\n",
- size, n);
- goto out;
- }
-
- if (tmp.start != nodes[n].start) {
- KUNIT_FAIL(test,
- "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
- tmp.start, size, nodes[n].start, nodes[n].size);
- goto out;
- }
- }
- }
-
- /* After random insertion the nodes should be in order */
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- /* Repeated use should then fail */
- if (!expect_insert_fail(test, &mm, size))
- goto out;
-
- /* Remove one and reinsert, as the only hole it should refill itself */
- for (n = 0; n < count; n++) {
- u64 addr = nodes[n].start;
-
- drm_mm_remove_node(&nodes[n]);
- if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) {
- KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n",
- mode->name, size, n);
- goto out;
- }
-
- if (nodes[n].start != addr) {
- KUNIT_FAIL(test,
- "%s reinsert node moved, step %d, expected %llx, found %llx\n",
- mode->name, n, addr, nodes[n].start);
- goto out;
- }
-
- if (!assert_continuous(test, &mm, size))
- goto out;
- }
-
- /* Remove several, reinsert, check full */
- for_each_prime_number(n, min(max_prime, count)) {
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- drm_mm_remove_node(node);
- }
-
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
- KUNIT_FAIL(test,
- "%s multiple reinsert failed, size %llu step %d\n",
- mode->name, size, n);
- goto out;
- }
- }
-
- o += n;
-
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- if (!expect_insert_fail(test, &mm, size))
- goto out;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
- }
-
- ret = 0;
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
- return ret;
-}
-
-static void drm_test_mm_insert(struct kunit *test)
-{
- const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
- unsigned int n;
-
- for_each_prime_number_from(n, 1, 54) {
- u64 size = BIT_ULL(n);
-
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
-
- cond_resched();
- }
-}
-
-static void drm_test_mm_replace(struct kunit *test)
-{
- const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
- unsigned int n;
-
- /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node,
- * then replacing it with the intended node. We want to check that
- * the tree is intact and all the information we need is carried
- * across to the target node.
- */
-
- for_each_prime_number_from(n, 1, 54) {
- u64 size = BIT_ULL(n);
-
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
-
- cond_resched();
- }
-}
-
-static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
- u64 size, u64 alignment, unsigned long color,
- u64 range_start, u64 range_end, const struct insert_mode *mode)
-{
- int err;
-
- err = drm_mm_insert_node_in_range(mm, node,
- size, alignment, color,
- range_start, range_end,
- mode->mode);
- if (err) {
- KUNIT_FAIL(test,
- "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
- size, alignment, color, mode->name,
- range_start, range_end, err);
- return false;
- }
-
- if (!assert_node(test, node, mm, size, alignment, color)) {
- drm_mm_remove_node(node);
- return false;
- }
-
- return true;
-}
-
-static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
- u64 size, u64 range_start, u64 range_end)
-{
- struct drm_mm_node tmp = {};
- int err;
-
- err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end,
- 0);
- if (likely(err == -ENOSPC))
- return true;
-
- if (!err) {
- KUNIT_FAIL(test,
- "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
- tmp.start, tmp.size, range_start, range_end);
- drm_mm_remove_node(&tmp);
- } else {
- KUNIT_FAIL(test,
- "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
- err, -ENOSPC, size, range_start, range_end);
- }
-
- return false;
-}
-
-static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
- u64 size, u64 start, u64 end)
-{
- struct drm_mm_node *node;
- unsigned int n;
-
- if (!expect_insert_in_range_fail(test, mm, size, start, end))
- return false;
-
- n = div64_u64(start + size - 1, size);
- drm_mm_for_each_node(node, mm) {
- if (node->start < start || node->start + node->size > end) {
- KUNIT_FAIL(test,
- "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
- n, node->start, node->start + node->size, start, end);
- return false;
- }
-
- if (node->start != n * size) {
- KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n",
- n, n * size, node->start);
- return false;
- }
-
- if (node->size != size) {
- KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n",
- n, size, node->size);
- return false;
- }
-
- if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) {
- KUNIT_FAIL(test, "node %d is followed by a hole!\n", n);
- return false;
- }
-
- n++;
- }
-
- if (start > 0) {
- node = __drm_mm_interval_first(mm, 0, start - 1);
- if (drm_mm_node_allocated(node)) {
- KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n",
- node->start, node->size, start);
- return false;
- }
- }
-
- if (end < U64_MAX) {
- node = __drm_mm_interval_first(mm, end, U64_MAX);
- if (drm_mm_node_allocated(node)) {
- KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n",
- node->start, node->size, end);
- return false;
- }
- }
-
- return true;
-}
-
-static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size,
- u64 start, u64 end)
-{
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int n, start_n, end_n;
- int ret;
-
- DRM_MM_BUG_ON(!count);
- DRM_MM_BUG_ON(!size);
- DRM_MM_BUG_ON(end <= start);
-
- /* Very similar to __drm_test_mm_insert(), but now instead of populating the
- * full range of the drm_mm, we try to fill a small portion of it.
- */
-
- ret = -ENOMEM;
- nodes = vzalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- ret = -EINVAL;
- drm_mm_init(&mm, 0, count * size);
-
- start_n = div64_u64(start + size - 1, size);
- end_n = div64_u64(end - size, size);
-
- for (mode = insert_modes; mode->name; mode++) {
- for (n = start_n; n <= end_n; n++) {
- if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
- start, end, mode)) {
- KUNIT_FAIL(test,
- "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
- mode->name, size, n, start_n, end_n, start, end);
- goto out;
- }
- }
-
- if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
- KUNIT_FAIL(test,
- "%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
- mode->name, start, end, size);
- goto out;
- }
-
- /* Remove one and reinsert, it should refill itself */
- for (n = start_n; n <= end_n; n++) {
- u64 addr = nodes[n].start;
-
- drm_mm_remove_node(&nodes[n]);
- if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
- start, end, mode)) {
- KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n);
- goto out;
- }
-
- if (nodes[n].start != addr) {
- KUNIT_FAIL(test,
- "%s reinsert node moved, step %d, expected %llx, found %llx\n",
- mode->name, n, addr, nodes[n].start);
- goto out;
- }
- }
-
- if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
- KUNIT_FAIL(test,
- "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
- mode->name, start, end, size);
- goto out;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
- }
-
- ret = 0;
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- vfree(nodes);
- return ret;
-}
-
-static int insert_outside_range(struct kunit *test)
-{
- struct drm_mm mm;
- const unsigned int start = 1024;
- const unsigned int end = 2048;
- const unsigned int size = end - start;
-
- drm_mm_init(&mm, start, size);
-
- if (!expect_insert_in_range_fail(test, &mm, 1, 0, start))
- return -EINVAL;
-
- if (!expect_insert_in_range_fail(test, &mm, size,
- start - size / 2, start + (size + 1) / 2))
- return -EINVAL;
-
- if (!expect_insert_in_range_fail(test, &mm, size,
- end - (size + 1) / 2, end + size / 2))
- return -EINVAL;
-
- if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size))
- return -EINVAL;
-
- drm_mm_takedown(&mm);
- return 0;
-}
-
-static void drm_test_mm_insert_range(struct kunit *test)
-{
- const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
- unsigned int n;
-
- /* Check that requests outside the bounds of drm_mm are rejected. */
- KUNIT_ASSERT_FALSE(test, insert_outside_range(test));
-
- for_each_prime_number_from(n, 1, 50) {
- const u64 size = BIT_ULL(n);
- const u64 max = count * size;
-
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
- max / 2, max));
- KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
- max / 4 + 1, 3 * max / 4 - 1));
-
- cond_resched();
- }
-}
-
-static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
- unsigned int num_insert, const struct insert_mode *mode)
-{
- unsigned int size = 4096;
- unsigned int i;
-
- for (i = 0; i < num_insert; i++) {
- if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
- KUNIT_FAIL(test, "%s insert failed\n", mode->name);
- return -EINVAL;
- }
- }
-
- /* introduce fragmentation by freeing every other node */
- for (i = 0; i < num_insert; i++) {
- if (i % 2 == 0)
- drm_mm_remove_node(&nodes[i]);
- }
-
- return 0;
-}
-
-static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
- unsigned int num_insert, struct drm_mm_node *nodes,
- const struct insert_mode *mode)
-{
- unsigned int size = 8192;
- ktime_t start;
- unsigned int i;
-
- start = ktime_get();
- for (i = 0; i < num_insert; i++) {
- if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
- KUNIT_FAIL(test, "%s insert failed\n", mode->name);
- return 0;
- }
- }
-
- return ktime_to_ns(ktime_sub(ktime_get(), start));
-}
-
-static void drm_test_mm_frag(struct kunit *test)
-{
- struct drm_mm mm;
- const struct insert_mode *mode;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int insert_size = 10000;
- unsigned int scale_factor = 4;
-
- /* We need 4 * insert_size nodes to hold intermediate allocated
- * drm_mm nodes.
- * 1 times for prepare_frag()
- * 1 times for get_insert_time()
- * 2 times for get_insert_time()
- */
- nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- /* For BOTTOMUP and TOPDOWN, we first fragment the
- * address space using prepare_frag() and then try to verify
- * that insertions scale quadratically from 10k to 20k insertions
- */
- drm_mm_init(&mm, 1, U64_MAX - 2);
- for (mode = insert_modes; mode->name; mode++) {
- u64 insert_time1, insert_time2;
-
- if (mode->mode != DRM_MM_INSERT_LOW &&
- mode->mode != DRM_MM_INSERT_HIGH)
- continue;
-
- if (prepare_frag(test, &mm, nodes, insert_size, mode))
- goto err;
-
- insert_time1 = get_insert_time(test, &mm, insert_size,
- nodes + insert_size, mode);
- if (insert_time1 == 0)
- goto err;
-
- insert_time2 = get_insert_time(test, &mm, (insert_size * 2),
- nodes + insert_size * 2, mode);
- if (insert_time2 == 0)
- goto err;
-
- kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
- mode->name, insert_size, insert_size * 2, insert_time1, insert_time2);
-
- if (insert_time2 > (scale_factor * insert_time1)) {
- KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n",
- mode->name, insert_time2 - (scale_factor * insert_time1));
- goto err;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- }
-
-err:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- vfree(nodes);
-}
-
-static void drm_test_mm_align(struct kunit *test)
-{
- const struct insert_mode *mode;
- const unsigned int max_count = min(8192u, max_prime);
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int prime;
-
- /* For each of the possible insertion modes, we pick a few
- * arbitrary alignments and check that the inserted node
- * meets our requirements.
- */
-
- nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- drm_mm_init(&mm, 1, U64_MAX - 2);
-
- for (mode = insert_modes; mode->name; mode++) {
- unsigned int i = 0;
-
- for_each_prime_number_from(prime, 1, max_count) {
- u64 size = next_prime_number(prime);
-
- if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) {
- KUNIT_FAIL(test, "%s insert failed with alignment=%d",
- mode->name, prime);
- goto out;
- }
-
- i++;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- vfree(nodes);
-}
-
static void drm_test_mm_align_pot(struct kunit *test, int max)
{
struct drm_mm mm;
@@ -1144,626 +285,6 @@ static void drm_test_mm_align64(struct kunit *test)
drm_test_mm_align_pot(test, 64);
}
-static void show_scan(struct kunit *test, const struct drm_mm_scan *scan)
-{
- kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
- scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color);
-}
-
-static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
-{
- u64 hole_start, hole_end;
- struct drm_mm_node *hole;
-
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- struct drm_mm_node *next = list_next_entry(hole, node_list);
- const char *node1 = NULL, *node2 = NULL;
-
- if (drm_mm_node_allocated(hole))
- node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ",
- hole->start, hole->size, hole->color);
-
- if (drm_mm_node_allocated(next))
- node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]",
- next->start, next->size, next->color);
-
- kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1,
- hole_start, hole_end, hole_end - hole_start, node2);
-
- kfree(node2);
- kfree(node1);
-
- if (!--count)
- break;
- }
-}
-
-struct evict_node {
- struct drm_mm_node node;
- struct list_head link;
-};
-
-static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan,
- struct evict_node *nodes, unsigned int *order, unsigned int count,
- bool use_color, struct list_head *evict_list)
-{
- struct evict_node *e, *en;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- e = &nodes[order ? order[i] : i];
- list_add(&e->link, evict_list);
- if (drm_mm_scan_add_block(scan, &e->node))
- break;
- }
- list_for_each_entry_safe(e, en, evict_list, link) {
- if (!drm_mm_scan_remove_block(scan, &e->node))
- list_del(&e->link);
- }
- if (list_empty(evict_list)) {
- KUNIT_FAIL(test,
- "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
- scan->size, count, scan->alignment, scan->color);
- return false;
- }
-
- list_for_each_entry(e, evict_list, link)
- drm_mm_remove_node(&e->node);
-
- if (use_color) {
- struct drm_mm_node *node;
-
- while ((node = drm_mm_scan_color_evict(scan))) {
- e = container_of(node, typeof(*e), node);
- drm_mm_remove_node(&e->node);
- list_add(&e->link, evict_list);
- }
- } else {
- if (drm_mm_scan_color_evict(scan)) {
- KUNIT_FAIL(test,
- "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
- return false;
- }
- }
-
- return true;
-}
-
-static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
- unsigned int total_size, struct evict_node *nodes)
-{
- struct drm_mm_scan scan;
- LIST_HEAD(evict_list);
- struct evict_node *e;
- struct drm_mm_node *node;
- unsigned int n;
-
- drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
- for (n = 0; n < total_size; n++) {
- e = &nodes[n];
- list_add(&e->link, &evict_list);
- drm_mm_scan_add_block(&scan, &e->node);
- }
- list_for_each_entry(e, &evict_list, link)
- drm_mm_scan_remove_block(&scan, &e->node);
-
- for (n = 0; n < total_size; n++) {
- e = &nodes[n];
-
- if (!drm_mm_node_allocated(&e->node)) {
- KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n);
- return false;
- }
-
- e->link.next = NULL;
- }
-
- drm_mm_for_each_node(node, mm) {
- e = container_of(node, typeof(*e), node);
- e->link.next = &e->link;
- }
-
- for (n = 0; n < total_size; n++) {
- e = &nodes[n];
-
- if (!e->link.next) {
- KUNIT_FAIL(test, "node[%d] no longer connected!\n", n);
- return false;
- }
- }
-
- return assert_continuous(test, mm, nodes[0].node.size);
-}
-
-static bool evict_everything(struct kunit *test, struct drm_mm *mm,
- unsigned int total_size, struct evict_node *nodes)
-{
- struct drm_mm_scan scan;
- LIST_HEAD(evict_list);
- struct evict_node *e;
- unsigned int n;
- int err;
-
- drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
- for (n = 0; n < total_size; n++) {
- e = &nodes[n];
- list_add(&e->link, &evict_list);
- if (drm_mm_scan_add_block(&scan, &e->node))
- break;
- }
-
- err = 0;
- list_for_each_entry(e, &evict_list, link) {
- if (!drm_mm_scan_remove_block(&scan, &e->node)) {
- if (!err) {
- KUNIT_FAIL(test, "Node %lld not marked for eviction!\n",
- e->node.start);
- err = -EINVAL;
- }
- }
- }
- if (err)
- return false;
-
- list_for_each_entry(e, &evict_list, link)
- drm_mm_remove_node(&e->node);
-
- if (!assert_one_hole(test, mm, 0, total_size))
- return false;
-
- list_for_each_entry(e, &evict_list, link) {
- err = drm_mm_reserve_node(mm, &e->node);
- if (err) {
- KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
- return false;
- }
- }
-
- return assert_continuous(test, mm, nodes[0].node.size);
-}
-
-static int evict_something(struct kunit *test, struct drm_mm *mm,
- u64 range_start, u64 range_end, struct evict_node *nodes,
- unsigned int *order, unsigned int count, unsigned int size,
- unsigned int alignment, const struct insert_mode *mode)
-{
- struct drm_mm_scan scan;
- LIST_HEAD(evict_list);
- struct evict_node *e;
- struct drm_mm_node tmp;
- int err;
-
- drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start,
- range_end, mode->mode);
- if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list))
- return -EINVAL;
-
- memset(&tmp, 0, sizeof(tmp));
- err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
- DRM_MM_INSERT_EVICT);
- if (err) {
- KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n",
- size, alignment);
- show_scan(test, &scan);
- show_holes(test, mm, 3);
- return err;
- }
-
- if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
- KUNIT_FAIL(test,
- "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
- tmp.start, tmp.size, range_start, range_end);
- err = -EINVAL;
- }
-
- if (!assert_node(test, &tmp, mm, size, alignment, 0) ||
- drm_mm_hole_follows(&tmp)) {
- KUNIT_FAIL(test,
- "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
- tmp.size, size, alignment, misalignment(&tmp, alignment),
- tmp.start, drm_mm_hole_follows(&tmp));
- err = -EINVAL;
- }
-
- drm_mm_remove_node(&tmp);
- if (err)
- return err;
-
- list_for_each_entry(e, &evict_list, link) {
- err = drm_mm_reserve_node(mm, &e->node);
- if (err) {
- KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
- return err;
- }
- }
-
- if (!assert_continuous(test, mm, nodes[0].node.size)) {
- KUNIT_FAIL(test, "range is no longer continuous\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void drm_test_mm_evict(struct kunit *test)
-{
- DRM_RND_STATE(prng, random_seed);
- const unsigned int size = 8192;
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct evict_node *nodes;
- struct drm_mm_node *node, *next;
- unsigned int *order, n;
-
- /* Here we populate a full drm_mm and then try and insert a new node
- * by evicting other nodes in a random order. The drm_mm_scan should
- * pick the first matching hole it finds from the random list. We
- * repeat that for different allocation strategies, alignments and
- * sizes to try and stress the hole finder.
- */
-
- nodes = vzalloc(array_size(size, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(size, &prng);
- if (!order)
- goto err_nodes;
-
- drm_mm_init(&mm, 0, size);
- for (n = 0; n < size; n++) {
- if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- goto out;
- }
- }
-
- /* First check that using the scanner doesn't break the mm */
- if (!evict_nothing(test, &mm, size, nodes)) {
- KUNIT_FAIL(test, "evict_nothing() failed\n");
- goto out;
- }
- if (!evict_everything(test, &mm, size, nodes)) {
- KUNIT_FAIL(test, "evict_everything() failed\n");
- goto out;
- }
-
- for (mode = evict_modes; mode->name; mode++) {
- for (n = 1; n <= size; n <<= 1) {
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1,
- mode)) {
- KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n",
- mode->name, n);
- goto out;
- }
- }
-
- for (n = 1; n < size; n <<= 1) {
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
- size / 2, n, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u, alignment=%u) failed\n",
- mode->name, size / 2, n);
- goto out;
- }
- }
-
- for_each_prime_number_from(n, 1, min(size, max_prime)) {
- unsigned int nsize = (size - n + 1) / 2;
-
- DRM_MM_BUG_ON(!nsize);
-
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
- nsize, n, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u, alignment=%u) failed\n",
- mode->name, nsize, n);
- goto out;
- }
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
-}
-
-static void drm_test_mm_evict_range(struct kunit *test)
-{
- DRM_RND_STATE(prng, random_seed);
- const unsigned int size = 8192;
- const unsigned int range_size = size / 2;
- const unsigned int range_start = size / 4;
- const unsigned int range_end = range_start + range_size;
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct evict_node *nodes;
- struct drm_mm_node *node, *next;
- unsigned int *order, n;
-
- /* Like drm_test_mm_evict() but now we are limiting the search to a
- * small portion of the full drm_mm.
- */
-
- nodes = vzalloc(array_size(size, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(size, &prng);
- if (!order)
- goto err_nodes;
-
- drm_mm_init(&mm, 0, size);
- for (n = 0; n < size; n++) {
- if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- goto out;
- }
- }
-
- for (mode = evict_modes; mode->name; mode++) {
- for (n = 1; n <= range_size; n <<= 1) {
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, range_start, range_end, nodes,
- order, size, n, 1, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u) failed with range [%u, %u]\n",
- mode->name, n, range_start, range_end);
- goto out;
- }
- }
-
- for (n = 1; n <= range_size; n <<= 1) {
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, range_start, range_end, nodes,
- order, size, range_size / 2, n, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
- mode->name, range_size / 2, n, range_start, range_end);
- goto out;
- }
- }
-
- for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
- unsigned int nsize = (range_size - n + 1) / 2;
-
- DRM_MM_BUG_ON(!nsize);
-
- drm_random_reorder(order, size, &prng);
- if (evict_something(test, &mm, range_start, range_end, nodes,
- order, size, nsize, n, mode)) {
- KUNIT_FAIL(test,
- "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
- mode->name, nsize, n, range_start, range_end);
- goto out;
- }
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
-}
-
-static unsigned int node_index(const struct drm_mm_node *node)
-{
- return div64_u64(node->start, node->size);
-}
-
-static void drm_test_mm_topdown(struct kunit *test)
-{
- const struct insert_mode *topdown = &insert_modes[TOPDOWN];
-
- DRM_RND_STATE(prng, random_seed);
- const unsigned int count = 8192;
- unsigned int size;
- unsigned long *bitmap;
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int *order, n, m, o = 0;
-
- /* When allocating top-down, we expect to be returned a node
- * from a suitable hole at the top of the drm_mm. We check that
- * the returned node does match the highest available slot.
- */
-
- nodes = vzalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- bitmap = bitmap_zalloc(count, GFP_KERNEL);
- if (!bitmap)
- goto err_nodes;
-
- order = drm_random_order(count, &prng);
- if (!order)
- goto err_bitmap;
-
- for (size = 1; size <= 64; size <<= 1) {
- drm_mm_init(&mm, 0, size * count);
- for (n = 0; n < count; n++) {
- if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) {
- KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n);
- goto out;
- }
-
- if (drm_mm_hole_follows(&nodes[n])) {
- KUNIT_FAIL(test,
- "hole after topdown insert %d, start=%llx\n, size=%u",
- n, nodes[n].start, size);
- goto out;
- }
-
- if (!assert_one_hole(test, &mm, 0, size * (count - n - 1)))
- goto out;
- }
-
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- drm_random_reorder(order, count, &prng);
- for_each_prime_number_from(n, 1, min(count, max_prime)) {
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- drm_mm_remove_node(node);
- __set_bit(node_index(node), bitmap);
- }
-
- for (m = 0; m < n; m++) {
- unsigned int last;
-
- node = &nodes[order[(o + m) % count]];
- if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) {
- KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
- goto out;
- }
-
- if (drm_mm_hole_follows(node)) {
- KUNIT_FAIL(test,
- "hole after topdown insert %d/%d, start=%llx\n",
- m, n, node->start);
- goto out;
- }
-
- last = find_last_bit(bitmap, count);
- if (node_index(node) != last) {
- KUNIT_FAIL(test,
- "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
- m, n, size, last, node_index(node));
- goto out;
- }
-
- __clear_bit(last, bitmap);
- }
-
- DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
-
- o += n;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_bitmap:
- bitmap_free(bitmap);
-err_nodes:
- vfree(nodes);
-}
-
-static void drm_test_mm_bottomup(struct kunit *test)
-{
- const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
-
- DRM_RND_STATE(prng, random_seed);
- const unsigned int count = 8192;
- unsigned int size;
- unsigned long *bitmap;
- struct drm_mm mm;
- struct drm_mm_node *nodes, *node, *next;
- unsigned int *order, n, m, o = 0;
-
- /* Like drm_test_mm_topdown, but instead of searching for the last hole,
- * we search for the first.
- */
-
- nodes = vzalloc(array_size(count, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- bitmap = bitmap_zalloc(count, GFP_KERNEL);
- if (!bitmap)
- goto err_nodes;
-
- order = drm_random_order(count, &prng);
- if (!order)
- goto err_bitmap;
-
- for (size = 1; size <= 64; size <<= 1) {
- drm_mm_init(&mm, 0, size * count);
- for (n = 0; n < count; n++) {
- if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) {
- KUNIT_FAIL(test,
- "bottomup insert failed, size %u step %d\n", size, n);
- goto out;
- }
-
- if (!assert_one_hole(test, &mm, size * (n + 1), size * count))
- goto out;
- }
-
- if (!assert_continuous(test, &mm, size))
- goto out;
-
- drm_random_reorder(order, count, &prng);
- for_each_prime_number_from(n, 1, min(count, max_prime)) {
- for (m = 0; m < n; m++) {
- node = &nodes[order[(o + m) % count]];
- drm_mm_remove_node(node);
- __set_bit(node_index(node), bitmap);
- }
-
- for (m = 0; m < n; m++) {
- unsigned int first;
-
- node = &nodes[order[(o + m) % count]];
- if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) {
- KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
- goto out;
- }
-
- first = find_first_bit(bitmap, count);
- if (node_index(node) != first) {
- KUNIT_FAIL(test,
- "node %d/%d not inserted into bottom hole, expected %d, found %d\n",
- m, n, first, node_index(node));
- goto out;
- }
- __clear_bit(first, bitmap);
- }
-
- DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
-
- o += n;
- }
-
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- DRM_MM_BUG_ON(!drm_mm_clean(&mm));
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_bitmap:
- bitmap_free(bitmap);
-err_nodes:
- vfree(nodes);
-}
-
static void drm_test_mm_once(struct kunit *test, unsigned int mode)
{
struct drm_mm mm;
@@ -1817,440 +338,18 @@ static void drm_test_mm_highest(struct kunit *test)
drm_test_mm_once(test, DRM_MM_INSERT_HIGH);
}
-static void separate_adjacent_colors(const struct drm_mm_node *node,
- unsigned long color, u64 *start, u64 *end)
-{
- if (drm_mm_node_allocated(node) && node->color != color)
- ++*start;
-
- node = list_next_entry(node, node_list);
- if (drm_mm_node_allocated(node) && node->color != color)
- --*end;
-}
-
-static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node)
-{
- if (!drm_mm_hole_follows(node) &&
- drm_mm_node_allocated(list_next_entry(node, node_list))) {
- KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
- node->color, node->start, node->size,
- list_next_entry(node, node_list)->color,
- list_next_entry(node, node_list)->start,
- list_next_entry(node, node_list)->size);
- return true;
- }
-
- return false;
-}
-
-static void drm_test_mm_color(struct kunit *test)
-{
- const unsigned int count = min(4096u, max_iterations);
- const struct insert_mode *mode;
- struct drm_mm mm;
- struct drm_mm_node *node, *nn;
- unsigned int n;
-
- /* Color adjustment complicates everything. First we just check
- * that when we insert a node we apply any color_adjustment callback.
- * The callback we use should ensure that there is a gap between
- * any two nodes, and so after each insertion we check that those
- * holes are inserted and that they are preserved.
- */
-
- drm_mm_init(&mm, 0, U64_MAX);
-
- for (n = 1; n <= count; n++) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- goto out;
-
- if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- kfree(node);
- goto out;
- }
- }
-
- drm_mm_for_each_node_safe(node, nn, &mm) {
- if (node->color != node->size) {
- KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n",
- node->size, node->color);
-
- goto out;
- }
-
- drm_mm_remove_node(node);
- kfree(node);
- }
-
- /* Now, let's start experimenting with applying a color callback */
- mm.color_adjust = separate_adjacent_colors;
- for (mode = insert_modes; mode->name; mode++) {
- u64 last;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- goto out;
-
- node->size = 1 + 2 * count;
- node->color = node->size;
-
- if (drm_mm_reserve_node(&mm, node)) {
- KUNIT_FAIL(test, "initial reserve failed!\n");
- goto out;
- }
-
- last = node->start + node->size;
-
- for (n = 1; n <= count; n++) {
- int rem;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- goto out;
-
- node->start = last;
- node->size = n + count;
- node->color = node->size;
-
- if (drm_mm_reserve_node(&mm, node) != -ENOSPC) {
- KUNIT_FAIL(test, "reserve %d did not report color overlap!", n);
- goto out;
- }
-
- node->start += n + 1;
- rem = misalignment(node, n + count);
- node->start += n + count - rem;
-
- if (drm_mm_reserve_node(&mm, node)) {
- KUNIT_FAIL(test, "reserve %d failed", n);
- goto out;
- }
-
- last = node->start + node->size;
- }
-
- for (n = 1; n <= count; n++) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- goto out;
-
- if (!expect_insert(test, &mm, node, n, n, n, mode)) {
- KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n);
- kfree(node);
- goto out;
- }
- }
-
- drm_mm_for_each_node_safe(node, nn, &mm) {
- u64 rem;
-
- if (node->color != node->size) {
- KUNIT_FAIL(test,
- "%s invalid color stored: expected %lld, found %ld\n",
- mode->name, node->size, node->color);
-
- goto out;
- }
-
- if (colors_abutt(test, node))
- goto out;
-
- div64_u64_rem(node->start, node->size, &rem);
- if (rem) {
- KUNIT_FAIL(test,
- "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
- mode->name, node->start, node->size, rem);
- goto out;
- }
-
- drm_mm_remove_node(node);
- kfree(node);
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, nn, &mm) {
- drm_mm_remove_node(node);
- kfree(node);
- }
- drm_mm_takedown(&mm);
-}
-
-static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
- u64 range_end, struct evict_node *nodes, unsigned int *order,
- unsigned int count, unsigned int size, unsigned int alignment,
- unsigned long color, const struct insert_mode *mode)
-{
- struct drm_mm_scan scan;
- LIST_HEAD(evict_list);
- struct evict_node *e;
- struct drm_mm_node tmp;
- int err;
-
- drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start,
- range_end, mode->mode);
- if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list))
- return -EINVAL;
-
- memset(&tmp, 0, sizeof(tmp));
- err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
- DRM_MM_INSERT_EVICT);
- if (err) {
- KUNIT_FAIL(test,
- "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
- size, alignment, color, err);
- show_scan(test, &scan);
- show_holes(test, mm, 3);
- return err;
- }
-
- if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
- KUNIT_FAIL(test,
- "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
- tmp.start, tmp.size, range_start, range_end);
- err = -EINVAL;
- }
-
- if (colors_abutt(test, &tmp))
- err = -EINVAL;
-
- if (!assert_node(test, &tmp, mm, size, alignment, color)) {
- KUNIT_FAIL(test,
- "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
- tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start);
- err = -EINVAL;
- }
-
- drm_mm_remove_node(&tmp);
- if (err)
- return err;
-
- list_for_each_entry(e, &evict_list, link) {
- err = drm_mm_reserve_node(mm, &e->node);
- if (err) {
- KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
- return err;
- }
- }
-
- cond_resched();
- return 0;
-}
-
-static void drm_test_mm_color_evict(struct kunit *test)
-{
- DRM_RND_STATE(prng, random_seed);
- const unsigned int total_size = min(8192u, max_iterations);
- const struct insert_mode *mode;
- unsigned long color = 0;
- struct drm_mm mm;
- struct evict_node *nodes;
- struct drm_mm_node *node, *next;
- unsigned int *order, n;
-
- /* Check that the drm_mm_scan also honours color adjustment when
- * choosing its victims to create a hole. Our color_adjust does not
- * allow two nodes to be placed together without an intervening hole
- * enlarging the set of victims that must be evicted.
- */
-
- nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(total_size, &prng);
- if (!order)
- goto err_nodes;
-
- drm_mm_init(&mm, 0, 2 * total_size - 1);
- mm.color_adjust = separate_adjacent_colors;
- for (n = 0; n < total_size; n++) {
- if (!expect_insert(test, &mm, &nodes[n].node,
- 1, 0, color++,
- &insert_modes[0])) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- goto out;
- }
- }
-
- for (mode = evict_modes; mode->name; mode++) {
- for (n = 1; n <= total_size; n <<= 1) {
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
- n, 1, color++, mode)) {
- KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n);
- goto out;
- }
- }
-
- for (n = 1; n < total_size; n <<= 1) {
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
- total_size / 2, n, color++, mode)) {
- KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
- mode->name, total_size / 2, n);
- goto out;
- }
- }
-
- for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
- unsigned int nsize = (total_size - n + 1) / 2;
-
- DRM_MM_BUG_ON(!nsize);
-
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
- nsize, n, color++, mode)) {
- KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
- mode->name, nsize, n);
- goto out;
- }
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
-}
-
-static void drm_test_mm_color_evict_range(struct kunit *test)
-{
- DRM_RND_STATE(prng, random_seed);
- const unsigned int total_size = 8192;
- const unsigned int range_size = total_size / 2;
- const unsigned int range_start = total_size / 4;
- const unsigned int range_end = range_start + range_size;
- const struct insert_mode *mode;
- unsigned long color = 0;
- struct drm_mm mm;
- struct evict_node *nodes;
- struct drm_mm_node *node, *next;
- unsigned int *order, n;
-
- /* Like drm_test_mm_color_evict(), but limited to small portion of the full
- * drm_mm range.
- */
-
- nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
- KUNIT_ASSERT_TRUE(test, nodes);
-
- order = drm_random_order(total_size, &prng);
- if (!order)
- goto err_nodes;
-
- drm_mm_init(&mm, 0, 2 * total_size - 1);
- mm.color_adjust = separate_adjacent_colors;
- for (n = 0; n < total_size; n++) {
- if (!expect_insert(test, &mm, &nodes[n].node,
- 1, 0, color++,
- &insert_modes[0])) {
- KUNIT_FAIL(test, "insert failed, step %d\n", n);
- goto out;
- }
- }
-
- for (mode = evict_modes; mode->name; mode++) {
- for (n = 1; n <= range_size; n <<= 1) {
- drm_random_reorder(order, range_size, &prng);
- if (evict_color(test, &mm, range_start, range_end, nodes, order,
- total_size, n, 1, color++, mode)) {
- KUNIT_FAIL(test,
- "%s evict_color(size=%u) failed for range [%x, %x]\n",
- mode->name, n, range_start, range_end);
- goto out;
- }
- }
-
- for (n = 1; n < range_size; n <<= 1) {
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, range_start, range_end, nodes, order,
- total_size, range_size / 2, n, color++, mode)) {
- KUNIT_FAIL(test,
- "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
- mode->name, total_size / 2, n, range_start, range_end);
- goto out;
- }
- }
-
- for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
- unsigned int nsize = (range_size - n + 1) / 2;
-
- DRM_MM_BUG_ON(!nsize);
-
- drm_random_reorder(order, total_size, &prng);
- if (evict_color(test, &mm, range_start, range_end, nodes, order,
- total_size, nsize, n, color++, mode)) {
- KUNIT_FAIL(test,
- "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
- mode->name, nsize, n, range_start, range_end);
- goto out;
- }
- }
-
- cond_resched();
- }
-
-out:
- drm_mm_for_each_node_safe(node, next, &mm)
- drm_mm_remove_node(node);
- drm_mm_takedown(&mm);
- kfree(order);
-err_nodes:
- vfree(nodes);
-}
-
-static int drm_mm_suite_init(struct kunit_suite *suite)
-{
- while (!random_seed)
- random_seed = get_random_u32();
-
- kunit_info(suite,
- "Testing DRM range manager, with random_seed=0x%x max_iterations=%u max_prime=%u\n",
- random_seed, max_iterations, max_prime);
-
- return 0;
-}
-
-module_param(random_seed, uint, 0400);
-module_param(max_iterations, uint, 0400);
-module_param(max_prime, uint, 0400);
-
static struct kunit_case drm_mm_tests[] = {
KUNIT_CASE(drm_test_mm_init),
KUNIT_CASE(drm_test_mm_debug),
- KUNIT_CASE(drm_test_mm_reserve),
- KUNIT_CASE(drm_test_mm_insert),
- KUNIT_CASE(drm_test_mm_replace),
- KUNIT_CASE(drm_test_mm_insert_range),
- KUNIT_CASE(drm_test_mm_frag),
- KUNIT_CASE(drm_test_mm_align),
KUNIT_CASE(drm_test_mm_align32),
KUNIT_CASE(drm_test_mm_align64),
- KUNIT_CASE(drm_test_mm_evict),
- KUNIT_CASE(drm_test_mm_evict_range),
- KUNIT_CASE(drm_test_mm_topdown),
- KUNIT_CASE(drm_test_mm_bottomup),
KUNIT_CASE(drm_test_mm_lowest),
KUNIT_CASE(drm_test_mm_highest),
- KUNIT_CASE(drm_test_mm_color),
- KUNIT_CASE(drm_test_mm_color_evict),
- KUNIT_CASE(drm_test_mm_color_evict_range),
{}
};
static struct kunit_suite drm_mm_test_suite = {
.name = "drm_mm",
- .suite_init = drm_mm_suite_init,
.test_cases = drm_mm_tests,
};
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 1baa4ace1..94f8e3178 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -174,10 +174,6 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
crtc->state->event);
- /* There is nothing to do if CRTC is not going to be enabled. */
- if (!crtc->state->active)
- return;
-
/*
* Flush CRTC changes with go bit only if new modeset is not
* coming, so CRTC is enabled trough out the commit.
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 98efbaf3b..1ad711f8d 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -322,6 +322,60 @@ const struct dispc_features dispc_am625_feats = {
.vid_order = { 1, 0 },
};
+const struct dispc_features dispc_am62a7_feats = {
+ /*
+ * if the code reaches dispc_mode_valid with VP1,
+ * it should return MODE_BAD.
+ */
+ .max_pclk_khz = {
+ [DISPC_VP_TIED_OFF] = 0,
+ [DISPC_VP_DPI] = 165000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM62A7,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ /* VP1 of the DSS in AM62A7 SoC is tied off internally */
+ .vp_bus_type = { DISPC_VP_TIED_OFF, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+};
+
static const u16 *dispc_common_regmap;
struct dss_vp_data {
@@ -824,6 +878,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
case DISPC_K2G:
return dispc_k2g_read_and_clear_irqstatus(dispc);
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -840,6 +895,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
dispc_k2g_set_irqenable(dispc, mask);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_set_irqenable(dispc, mask);
@@ -1331,6 +1387,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
x, y, layer);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
@@ -2250,6 +2307,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
dispc_k2g_plane_init(dispc);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_plane_init(dispc);
@@ -2357,6 +2415,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
break;
case DISPC_AM625:
+ case DISPC_AM62A7:
case DISPC_AM65X:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
@@ -2702,14 +2761,28 @@ static void dispc_init_errata(struct dispc_device *dispc)
}
}
+/*
+ * K2G display controller does not support soft reset, so we do a basic manual
+ * reset here: make sure the IRQs are masked and VPs are disabled.
+ */
+static void dispc_softreset_k2g(struct dispc_device *dispc)
+{
+ dispc_set_irqenable(dispc, 0);
+ dispc_read_and_clear_irqstatus(dispc);
+
+ for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
+ VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
static int dispc_softreset(struct dispc_device *dispc)
{
u32 val;
- int ret = 0;
+ int ret;
- /* K2G display controller does not support soft reset */
- if (dispc->feat->subrev == DISPC_K2G)
+ if (dispc->feat->subrev == DISPC_K2G) {
+ dispc_softreset_k2g(dispc);
return 0;
+ }
/* Soft reset */
REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 33ac5ad7a..086327d51 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -54,12 +54,14 @@ enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
DISPC_VP_OLDI, /* OLDI (LVDS) output */
DISPC_VP_INTERNAL, /* SoC internal routing */
+ DISPC_VP_TIED_OFF, /* Tied off / Unavailable */
DISPC_VP_MAX_BUS_TYPE,
};
enum dispc_dss_subrevision {
DISPC_K2G,
DISPC_AM625,
+ DISPC_AM62A7,
DISPC_AM65X,
DISPC_J721E,
};
@@ -88,6 +90,7 @@ struct dispc_features {
extern const struct dispc_features dispc_k2g_feats;
extern const struct dispc_features dispc_am625_feats;
+extern const struct dispc_features dispc_am62a7_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 4d063eb9c..d15f836dc 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -32,9 +32,9 @@ int tidss_runtime_get(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- r = pm_runtime_get_sync(tidss->dev);
+ r = pm_runtime_resume_and_get(tidss->dev);
WARN_ON(r < 0);
- return r < 0 ? r : 0;
+ return r;
}
void tidss_runtime_put(struct tidss_device *tidss)
@@ -43,7 +43,9 @@ void tidss_runtime_put(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- r = pm_runtime_put_sync(tidss->dev);
+ pm_runtime_mark_last_busy(tidss->dev);
+
+ r = pm_runtime_put_autosuspend(tidss->dev);
WARN_ON(r < 0);
}
@@ -136,6 +138,8 @@ static int tidss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tidss);
+ spin_lock_init(&tidss->wait_lock);
+
ret = dispc_init(tidss);
if (ret) {
dev_err(dev, "failed to initialize dispc: %d\n", ret);
@@ -144,6 +148,9 @@ static int tidss_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
#ifndef CONFIG_PM
/* If we don't have PM, we need to call resume manually */
dispc_runtime_resume(tidss->dispc);
@@ -192,6 +199,7 @@ err_runtime_suspend:
#ifndef CONFIG_PM
dispc_runtime_suspend(tidss->dispc);
#endif
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return ret;
@@ -215,6 +223,7 @@ static void tidss_remove(struct platform_device *pdev)
/* If we don't have PM, we need to call suspend manually */
dispc_runtime_suspend(tidss->dispc);
#endif
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
/* devm allocated dispc goes away with the dev so mark it NULL */
@@ -231,6 +240,7 @@ static void tidss_shutdown(struct platform_device *pdev)
static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
{ .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
+ { .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
{ }
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 0c681c760..604334ef5 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -93,33 +93,21 @@ void tidss_irq_resume(struct tidss_device *tidss)
spin_unlock_irqrestore(&tidss->wait_lock, flags);
}
-static void tidss_irq_preinstall(struct drm_device *ddev)
-{
- struct tidss_device *tidss = to_tidss(ddev);
-
- spin_lock_init(&tidss->wait_lock);
-
- tidss_runtime_get(tidss);
-
- dispc_set_irqenable(tidss->dispc, 0);
- dispc_read_and_clear_irqstatus(tidss->dispc);
-
- tidss_runtime_put(tidss);
-}
-
-static void tidss_irq_postinstall(struct drm_device *ddev)
+int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
{
struct tidss_device *tidss = to_tidss(ddev);
- unsigned long flags;
- unsigned int i;
+ int ret;
- tidss_runtime_get(tidss);
+ if (irq == IRQ_NOTCONNECTED)
+ return -ENOTCONN;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev);
+ if (ret)
+ return ret;
tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
- for (i = 0; i < tidss->num_crtcs; ++i) {
+ for (unsigned int i = 0; i < tidss->num_crtcs; ++i) {
struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
@@ -127,28 +115,6 @@ static void tidss_irq_postinstall(struct drm_device *ddev)
tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
}
- tidss_irq_update(tidss);
-
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
-
- tidss_runtime_put(tidss);
-}
-
-int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
-{
- int ret;
-
- if (irq == IRQ_NOTCONNECTED)
- return -ENOTCONN;
-
- tidss_irq_preinstall(ddev);
-
- ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev);
- if (ret)
- return ret;
-
- tidss_irq_postinstall(ddev);
-
return 0;
}
@@ -156,9 +122,5 @@ void tidss_irq_uninstall(struct drm_device *ddev)
{
struct tidss_device *tidss = to_tidss(ddev);
- tidss_runtime_get(tidss);
- dispc_set_irqenable(tidss->dispc, 0);
- tidss_runtime_put(tidss);
-
free_irq(tidss->irq, ddev);
}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index d096d8d2b..a0e494c80 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -29,7 +29,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
tidss_runtime_get(tidss);
drm_atomic_helper_commit_modeset_disables(ddev, old_state);
- drm_atomic_helper_commit_planes(ddev, old_state, 0);
+ drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 2f6eaac7f..23bf16f59 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -570,19 +570,18 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
match);
}
-static int tilcdc_pdev_remove(struct platform_device *pdev)
+static void tilcdc_pdev_remove(struct platform_device *pdev)
{
int ret;
ret = tilcdc_get_external_components(&pdev->dev, NULL);
if (ret < 0)
- return ret;
+ dev_err(&pdev->dev, "tilcdc_get_external_components() failed (%pe)\n",
+ ERR_PTR(ret));
else if (ret == 0)
tilcdc_fini(platform_get_drvdata(pdev));
else
component_master_del(&pdev->dev, &tilcdc_comp_ops);
-
- return 0;
}
static void tilcdc_pdev_shutdown(struct platform_device *pdev)
@@ -599,7 +598,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
static struct platform_driver tilcdc_platform_driver = {
.probe = tilcdc_pdev_probe,
- .remove = tilcdc_pdev_remove,
+ .remove_new = tilcdc_pdev_remove,
.shutdown = tilcdc_pdev_shutdown,
.driver = {
.name = "tilcdc",
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index e5b10e415..4f8f31723 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -404,14 +404,12 @@ err_unload:
return ret;
}
-static int arcpgu_remove(struct platform_device *pdev)
+static void arcpgu_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_dev_unregister(drm);
arcpgu_unload(drm);
-
- return 0;
}
static const struct of_device_id arcpgu_of_table[] = {
@@ -423,7 +421,7 @@ MODULE_DEVICE_TABLE(of, arcpgu_of_table);
static struct platform_driver arcpgu_platform_driver = {
.probe = arcpgu_probe,
- .remove = arcpgu_remove,
+ .remove_new = arcpgu_remove,
.driver = {
.name = "arcpgu",
.of_match_table = arcpgu_of_table,
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index c5c34cd2e..4e3a152f8 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -411,7 +411,8 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
- drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb, &damage);
+ drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb,
+ &damage, &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 4ceb68ffa..dd8b0a181 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -78,7 +78,7 @@ static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
}
static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *rect)
+ struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
@@ -98,7 +98,7 @@ static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap, fmtcnv_state);
if (ret)
goto err_msg;
} else {
@@ -171,7 +171,8 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
drm_dev_exit(idx);
}
@@ -281,7 +282,8 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
- ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
out_exit:
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 2d999a0fa..ab89b7fc7 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -19,7 +19,6 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -758,7 +757,11 @@ static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state)
{
+ struct drm_device *dev = plane->dev;
+ struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
struct drm_framebuffer *new_fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
@@ -777,6 +780,16 @@ static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
+ if (new_fb->format != odev->format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ odev->pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
@@ -817,7 +830,7 @@ static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage);
+ &damage, &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 73dd4f428..8fd6758f5 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -509,7 +509,8 @@ static void repaper_get_temperature(struct repaper_epd *epd)
epd->factored_stage_time = epd->stage_time * factor10x / 10;
}
-static int repaper_fb_dirty(struct drm_framebuffer *fb)
+static int repaper_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_format_conv_state *fmtcnv_state)
{
struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
@@ -545,7 +546,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
iosys_map_set_vaddr(&dst, buf);
iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -830,13 +831,16 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
struct drm_rect rect;
if (!pipe->crtc.state->active)
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- repaper_fb_dirty(state->fb);
+ repaper_fb_dirty(state->fb, &fmtcnv_state);
+
+ drm_format_conv_state_release(&fmtcnv_state);
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 5fefc895b..7ce1c4617 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -19,12 +19,12 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#define DRIVER_NAME "simpledrm"
@@ -579,6 +579,44 @@ static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
+static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *new_shadow_plane_state =
+ to_drm_shadow_plane_state(new_plane_state);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct drm_device *dev = plane->dev;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (new_fb->format != sdev->format) {
+ void *buf;
+
+ /* format conversion necessary; reserve buffer */
+ buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
+ sdev->pitch, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -609,7 +647,7 @@ static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane
iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
- fb, &damage);
+ fb, &damage, &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
@@ -635,7 +673,7 @@ static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plan
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_check = simpledrm_primary_plane_helper_atomic_check,
.atomic_update = simpledrm_primary_plane_helper_atomic_update,
.atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
};
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 3cf4eec16..7336fa1dd 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -64,7 +64,8 @@ static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 };
static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
struct drm_framebuffer *fb,
- struct drm_rect *clip)
+ struct drm_rect *clip,
+ struct drm_format_conv_state *fmtcnv_state)
{
size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
unsigned int x, y;
@@ -77,7 +78,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
iosys_map_set_vaddr(&dst_map, buf);
iosys_map_set_vaddr(&vmap, vaddr);
- drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip);
+ drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip, fmtcnv_state);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
@@ -93,7 +94,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
}
static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+ struct drm_rect *clip, struct drm_format_conv_state *fmtcnv_state)
{
int ret;
@@ -101,7 +102,7 @@ static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuf
if (ret)
return ret;
- st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip);
+ st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -109,7 +110,7 @@ static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuf
}
static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *rect)
+ struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
struct mipi_dbi *dbi = &dbidev->dbi;
@@ -121,7 +122,7 @@ static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect);
+ ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect, fmtcnv_state);
if (ret)
goto err_msg;
@@ -160,7 +161,8 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
drm_dev_exit(idx);
}
@@ -238,7 +240,8 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
msleep(100);
- st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+ st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect,
+ &shadow_plane_state->fmtcnv_state);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
out_exit:
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
index b1b423b68..19eaff22e 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
if (params->pools_init_expected) {
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
- for (int j = 0; j <= MAX_ORDER; ++j) {
+ for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
pt = pool->caching[i].orders[j];
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
KUNIT_EXPECT_EQ(test, pt.caching, i);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index 2d9cae8cd..cceaa18d4 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -109,7 +109,7 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
},
{
.description = "Above the allocation limit",
- .order = MAX_ORDER + 1,
+ .order = MAX_PAGE_ORDER + 1,
},
{
.description = "One page, with coherent DMA mappings enabled",
@@ -118,7 +118,7 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
},
{
.description = "Above the allocation limit, with coherent DMA mappings enabled",
- .order = MAX_ORDER + 1,
+ .order = MAX_PAGE_ORDER + 1,
.use_dma_alloc = true,
},
};
@@ -165,7 +165,7 @@ static void ttm_pool_alloc_basic(struct kunit *test)
fst_page = tt->pages[0];
last_page = tt->pages[tt->num_pages - 1];
- if (params->order <= MAX_ORDER) {
+ if (params->order <= MAX_PAGE_ORDER) {
if (params->use_dma_alloc) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
@@ -182,7 +182,7 @@ static void ttm_pool_alloc_basic(struct kunit *test)
* order 0 blocks
*/
KUNIT_ASSERT_EQ(test, fst_page->private,
- min_t(unsigned int, MAX_ORDER,
+ min_t(unsigned int, MAX_PAGE_ORDER,
params->order));
KUNIT_ASSERT_EQ(test, last_page->private, 0);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e58b7e249..f95b0406c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -370,7 +370,13 @@ static void ttm_bo_release(struct kref *kref)
spin_unlock(&bo->bdev->lru_lock);
INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
- queue_work(bdev->wq, &bo->delayed_delete);
+
+ /* Schedule the worker on the closest NUMA node. This
+ * improves performance since system memory might be
+ * cleared on free and that is best done on a CPU core
+ * close to it.
+ */
+ queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
return;
}
@@ -764,7 +770,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
* This function may sleep while waiting for space to become available.
* Returns:
* -EBUSY: No space available (only if no_wait == 1).
- * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * -ENOSPC: Could not allocate space for the buffer object, either due to
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
@@ -824,7 +830,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
goto error;
}
- ret = -ENOMEM;
+ ret = -ENOSPC;
if (!type_found) {
pr_err(TTM_PFX "No compatible memory type found\n");
ret = -EINVAL;
@@ -910,6 +916,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
return -EINVAL;
ret = ttm_bo_move_buffer(bo, placement, ctx);
+ /* For backward compatibility with userspace */
+ if (ret == -ENOSPC)
+ return -ENOMEM;
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index d48b39132..760279600 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -95,11 +95,17 @@ static int ttm_global_init(void)
ttm_pool_mgr_init(num_pages);
ttm_tt_mgr_init(num_pages, num_dma32);
- glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+ glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
+ __GFP_NOWARN);
+ /* Retry without GFP_DMA32 for platforms DMA32 is not available */
if (unlikely(glob->dummy_read_page == NULL)) {
- ret = -ENOMEM;
- goto out;
+ glob->dummy_read_page = alloc_page(__GFP_ZERO);
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
}
INIT_LIST_HEAD(&glob->device_list);
@@ -195,7 +201,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bool use_dma_alloc, bool use_dma32)
{
struct ttm_global *glob = &ttm_glob;
- int ret;
+ int ret, nid;
if (WARN_ON(vma_manager == NULL))
return -EINVAL;
@@ -204,7 +210,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
if (ret)
return ret;
- bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16);
+ bdev->wq = alloc_workqueue("ttm",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16);
if (!bdev->wq) {
ttm_global_release();
return -ENOMEM;
@@ -213,7 +220,13 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
- ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
+
+ if (dev)
+ nid = dev_to_node(dev);
+ else
+ nid = NUMA_NO_NODE;
+
+ ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index dbc96984d..6e1fd6985 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
-static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
-static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
+static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
-static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
-static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
+static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
+static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@@ -288,17 +288,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
+ if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->nid != NUMA_NO_NODE)
+ return &pool->caching[caching].orders[order];
+
if (pool->use_dma32)
return &global_dma32_uncached[order];
@@ -447,7 +453,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
else
gfp_flags |= GFP_HIGHUSER;
- for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
+ for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages));
num_pages;
order = min_t(unsigned int, order, __fls(num_pages))) {
struct ttm_pool_type *pt;
@@ -566,11 +572,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- if (use_dma_alloc || nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j <= MAX_ORDER; ++j)
- ttm_pool_type_init(&pool->caching[i].orders[j],
- pool, i, j);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ /* Initialize only pool types which are actually used */
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_init(pt, pool, i, j);
+ }
}
}
EXPORT_SYMBOL(ttm_pool_init);
@@ -599,10 +611,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j <= MAX_ORDER; ++j)
- ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, i, j);
+ if (pt != &pool->caching[i].orders[j])
+ continue;
+
+ ttm_pool_type_fini(pt);
+ }
}
/* We removed the pool types from the LRU, but we need to also make sure
@@ -656,7 +674,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i;
seq_puts(m, "\t ");
- for (i = 0; i <= MAX_ORDER; ++i)
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
@@ -667,7 +685,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
unsigned int i;
- for (i = 0; i <= MAX_ORDER; ++i)
+ for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
@@ -776,7 +794,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
- for (i = 0; i <= MAX_ORDER; ++i) {
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -816,7 +834,7 @@ void ttm_pool_mgr_fini(void)
{
unsigned int i;
- for (i = 0; i <= MAX_ORDER; ++i) {
+ for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 40876bcdd..7702359c9 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -21,7 +21,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -261,6 +260,22 @@ static const uint64_t udl_primary_plane_fmtmods[] = {
DRM_FORMAT_MOD_INVALID
};
+static int udl_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -296,7 +311,7 @@ out_drm_gem_fb_end_cpu_access:
static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_check = udl_primary_plane_helper_atomic_check,
.atomic_update = udl_primary_plane_helper_atomic_update,
};
diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile
index e8b314137..b7d673f11 100644
--- a/drivers/gpu/drm/v3d/Makefile
+++ b/drivers/gpu/drm/v3d/Makefile
@@ -11,7 +11,9 @@ v3d-y := \
v3d_mmu.o \
v3d_perfmon.o \
v3d_trace_points.o \
- v3d_sched.o
+ v3d_sched.o \
+ v3d_sysfs.o \
+ v3d_submit.o
v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 8b3229a37..1bdfac8be 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -33,6 +33,9 @@ void v3d_free_object(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
+ if (bo->vaddr)
+ v3d_put_bo_vaddr(bo);
+
v3d_mmu_remove_ptes(bo);
mutex_lock(&v3d->bo_lock);
@@ -134,6 +137,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
bo = to_v3d_bo(&shmem_obj->base);
+ bo->vaddr = NULL;
ret = v3d_bo_create_finish(&shmem_obj->base);
if (ret)
@@ -167,6 +171,20 @@ v3d_prime_import_sg_table(struct drm_device *dev,
return obj;
}
+void v3d_get_bo_vaddr(struct v3d_bo *bo)
+{
+ struct drm_gem_shmem_object *obj = &bo->base;
+
+ bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+}
+
+void v3d_put_bo_vaddr(struct v3d_bo *bo)
+{
+ vunmap(bo->vaddr);
+ bo->vaddr = NULL;
+}
+
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -233,3 +251,36 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
drm_gem_object_put(gem_obj);
return 0;
}
+
+int
+v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_v3d_wait_bo *args = data;
+ ktime_t start = ktime_get();
+ u64 delta_ns;
+ unsigned long timeout_jiffies =
+ nsecs_to_jiffies_timeout(args->timeout_ns);
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ ret = drm_gem_dma_resv_wait(file_priv, args->handle,
+ true, timeout_jiffies);
+
+ /* Decrement the user's timeout, in case we got interrupted
+ * such that the ioctl will be restarted.
+ */
+ delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (delta_ns < args->timeout_ns)
+ args->timeout_ns -= delta_ns;
+ else
+ args->timeout_ns = 0;
+
+ /* Asked to wait beyond the jiffie/scheduler precision? */
+ if (ret == -ETIME && args->timeout_ns)
+ ret = -EAGAIN;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 330669f51..94eafcecc 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -12,69 +12,83 @@
#include "v3d_drv.h"
#include "v3d_regs.h"
-#define REGDEF(reg) { reg, #reg }
+#define REGDEF(min_ver, max_ver, reg) { min_ver, max_ver, reg, #reg }
struct v3d_reg_def {
+ u32 min_ver;
+ u32 max_ver;
u32 reg;
const char *name;
};
static const struct v3d_reg_def v3d_hub_reg_defs[] = {
- REGDEF(V3D_HUB_AXICFG),
- REGDEF(V3D_HUB_UIFCFG),
- REGDEF(V3D_HUB_IDENT0),
- REGDEF(V3D_HUB_IDENT1),
- REGDEF(V3D_HUB_IDENT2),
- REGDEF(V3D_HUB_IDENT3),
- REGDEF(V3D_HUB_INT_STS),
- REGDEF(V3D_HUB_INT_MSK_STS),
-
- REGDEF(V3D_MMU_CTL),
- REGDEF(V3D_MMU_VIO_ADDR),
- REGDEF(V3D_MMU_VIO_ID),
- REGDEF(V3D_MMU_DEBUG_INFO),
+ REGDEF(33, 42, V3D_HUB_AXICFG),
+ REGDEF(33, 71, V3D_HUB_UIFCFG),
+ REGDEF(33, 71, V3D_HUB_IDENT0),
+ REGDEF(33, 71, V3D_HUB_IDENT1),
+ REGDEF(33, 71, V3D_HUB_IDENT2),
+ REGDEF(33, 71, V3D_HUB_IDENT3),
+ REGDEF(33, 71, V3D_HUB_INT_STS),
+ REGDEF(33, 71, V3D_HUB_INT_MSK_STS),
+
+ REGDEF(33, 71, V3D_MMU_CTL),
+ REGDEF(33, 71, V3D_MMU_VIO_ADDR),
+ REGDEF(33, 71, V3D_MMU_VIO_ID),
+ REGDEF(33, 71, V3D_MMU_DEBUG_INFO),
+
+ REGDEF(71, 71, V3D_GMP_STATUS(71)),
+ REGDEF(71, 71, V3D_GMP_CFG(71)),
+ REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)),
};
static const struct v3d_reg_def v3d_gca_reg_defs[] = {
- REGDEF(V3D_GCA_SAFE_SHUTDOWN),
- REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK),
+ REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN),
+ REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK),
};
static const struct v3d_reg_def v3d_core_reg_defs[] = {
- REGDEF(V3D_CTL_IDENT0),
- REGDEF(V3D_CTL_IDENT1),
- REGDEF(V3D_CTL_IDENT2),
- REGDEF(V3D_CTL_MISCCFG),
- REGDEF(V3D_CTL_INT_STS),
- REGDEF(V3D_CTL_INT_MSK_STS),
- REGDEF(V3D_CLE_CT0CS),
- REGDEF(V3D_CLE_CT0CA),
- REGDEF(V3D_CLE_CT0EA),
- REGDEF(V3D_CLE_CT1CS),
- REGDEF(V3D_CLE_CT1CA),
- REGDEF(V3D_CLE_CT1EA),
-
- REGDEF(V3D_PTB_BPCA),
- REGDEF(V3D_PTB_BPCS),
-
- REGDEF(V3D_GMP_STATUS),
- REGDEF(V3D_GMP_CFG),
- REGDEF(V3D_GMP_VIO_ADDR),
-
- REGDEF(V3D_ERR_FDBGO),
- REGDEF(V3D_ERR_FDBGB),
- REGDEF(V3D_ERR_FDBGS),
- REGDEF(V3D_ERR_STAT),
+ REGDEF(33, 71, V3D_CTL_IDENT0),
+ REGDEF(33, 71, V3D_CTL_IDENT1),
+ REGDEF(33, 71, V3D_CTL_IDENT2),
+ REGDEF(33, 71, V3D_CTL_MISCCFG),
+ REGDEF(33, 71, V3D_CTL_INT_STS),
+ REGDEF(33, 71, V3D_CTL_INT_MSK_STS),
+ REGDEF(33, 71, V3D_CLE_CT0CS),
+ REGDEF(33, 71, V3D_CLE_CT0CA),
+ REGDEF(33, 71, V3D_CLE_CT0EA),
+ REGDEF(33, 71, V3D_CLE_CT1CS),
+ REGDEF(33, 71, V3D_CLE_CT1CA),
+ REGDEF(33, 71, V3D_CLE_CT1EA),
+
+ REGDEF(33, 71, V3D_PTB_BPCA),
+ REGDEF(33, 71, V3D_PTB_BPCS),
+
+ REGDEF(33, 42, V3D_GMP_STATUS(33)),
+ REGDEF(33, 42, V3D_GMP_CFG(33)),
+ REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)),
+
+ REGDEF(33, 71, V3D_ERR_FDBGO),
+ REGDEF(33, 71, V3D_ERR_FDBGB),
+ REGDEF(33, 71, V3D_ERR_FDBGS),
+ REGDEF(33, 71, V3D_ERR_STAT),
};
static const struct v3d_reg_def v3d_csd_reg_defs[] = {
- REGDEF(V3D_CSD_STATUS),
- REGDEF(V3D_CSD_CURRENT_CFG0),
- REGDEF(V3D_CSD_CURRENT_CFG1),
- REGDEF(V3D_CSD_CURRENT_CFG2),
- REGDEF(V3D_CSD_CURRENT_CFG3),
- REGDEF(V3D_CSD_CURRENT_CFG4),
- REGDEF(V3D_CSD_CURRENT_CFG5),
- REGDEF(V3D_CSD_CURRENT_CFG6),
+ REGDEF(41, 71, V3D_CSD_STATUS),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)),
+ REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)),
+ REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)),
+ REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7),
};
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
@@ -85,38 +99,41 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
int i, core;
for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg,
- V3D_READ(v3d_hub_reg_defs[i].reg));
+ const struct v3d_reg_def *def = &v3d_hub_reg_defs[i];
+
+ if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ def->name, def->reg, V3D_READ(def->reg));
+ }
}
- if (v3d->ver < 41) {
- for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+ for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+ const struct v3d_reg_def *def = &v3d_gca_reg_defs[i];
+
+ if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) {
seq_printf(m, "%s (0x%04x): 0x%08x\n",
- v3d_gca_reg_defs[i].name,
- v3d_gca_reg_defs[i].reg,
- V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+ def->name, def->reg, V3D_GCA_READ(def->reg));
}
}
for (core = 0; core < v3d->cores; core++) {
for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) {
- seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
- core,
- v3d_core_reg_defs[i].name,
- v3d_core_reg_defs[i].reg,
- V3D_CORE_READ(core,
- v3d_core_reg_defs[i].reg));
+ const struct v3d_reg_def *def = &v3d_core_reg_defs[i];
+
+ if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) {
+ seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
+ core, def->name, def->reg,
+ V3D_CORE_READ(core, def->reg));
+ }
}
- if (v3d_has_csd(v3d)) {
- for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) {
+ for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) {
+ const struct v3d_reg_def *def = &v3d_csd_reg_defs[i];
+
+ if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) {
seq_printf(m, "core %d %s (0x%04x): 0x%08x\n",
- core,
- v3d_csd_reg_defs[i].name,
- v3d_csd_reg_defs[i].reg,
- V3D_CORE_READ(core,
- v3d_csd_reg_defs[i].reg));
+ core, def->name, def->reg,
+ V3D_CORE_READ(core, def->reg));
}
}
}
@@ -147,8 +164,10 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU));
seq_printf(m, "TFU: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU));
- seq_printf(m, "TSY: %s\n",
- str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
+ if (v3d->ver <= 42) {
+ seq_printf(m, "TSY: %s\n",
+ str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY));
+ }
seq_printf(m, "MSO: %s\n",
str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_MSO));
seq_printf(m, "L3C: %s (%dkb)\n",
@@ -177,10 +196,14 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
seq_printf(m, " QPUs: %d\n", nslc * qups);
seq_printf(m, " Semaphores: %d\n",
V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM));
- seq_printf(m, " BCG int: %d\n",
- (ident2 & V3D_IDENT2_BCG_INT) != 0);
- seq_printf(m, " Override TMU: %d\n",
- (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
+ if (v3d->ver <= 42) {
+ seq_printf(m, " BCG int: %d\n",
+ (ident2 & V3D_IDENT2_BCG_INT) != 0);
+ }
+ if (v3d->ver < 40) {
+ seq_printf(m, " Override TMU: %d\n",
+ (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
+ }
}
return 0;
@@ -212,14 +235,15 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int measure_ms = 1000;
if (v3d->ver >= 40) {
+ int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
- V3D_SET_FIELD(V3D_PCTR_CYCLE_COUNT,
+ V3D_SET_FIELD(cycle_count_reg,
V3D_PCTR_S0));
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
} else {
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_PCTRS0,
- V3D_PCTR_CYCLE_COUNT);
+ V3D_PCTR_CYCLE_COUNT(v3d->ver));
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V3_PCTR_0_EN,
V3D_V3_PCTR_0_EN_ENABLE |
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index ffbbe9d52..3debf37e7 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/sched/clock.h>
#include <linux/reset.h>
#include <drm/drm_drv.h>
@@ -90,6 +91,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
args->value = 1;
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -111,6 +115,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) {
+ v3d_priv->enabled_ns[i] = 0;
+ v3d_priv->start_ns[i] = 0;
+ v3d_priv->jobs_sent[i] = 0;
+
sched = &v3d->queue[i].sched;
drm_sched_entity_init(&v3d_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
@@ -136,7 +144,35 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
+static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct v3d_file_priv *file_priv = file->driver_priv;
+ u64 timestamp = local_clock();
+ enum v3d_queue queue;
+
+ for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
+ /* Note that, in case of a GPU reset, the time spent during an
+ * attempt of executing the job is not computed in the runtime.
+ */
+ drm_printf(p, "drm-engine-%s: \t%llu ns\n",
+ v3d_queue_to_string(queue),
+ file_priv->start_ns[queue] ? file_priv->enabled_ns[queue]
+ + timestamp - file_priv->start_ns[queue]
+ : file_priv->enabled_ns[queue]);
+
+ /* Note that we only count jobs that completed. Therefore, jobs
+ * that were resubmitted due to a GPU reset are not computed.
+ */
+ drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n",
+ v3d_queue_to_string(queue), file_priv->jobs_sent[queue]);
+ }
+}
+
+static const struct file_operations v3d_drm_fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = drm_show_fdinfo,
+};
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be
@@ -156,6 +192,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
static const struct drm_driver v3d_drm_driver = {
@@ -176,6 +213,7 @@ static const struct drm_driver v3d_drm_driver = {
.ioctls = v3d_drm_ioctls,
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
.fops = &v3d_drm_fops,
+ .show_fdinfo = v3d_show_fdinfo,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -187,6 +225,7 @@ static const struct drm_driver v3d_drm_driver = {
static const struct of_device_id v3d_of_match[] = {
{ .compatible = "brcm,2711-v3d" },
+ { .compatible = "brcm,2712-v3d" },
{ .compatible = "brcm,7268-v3d" },
{ .compatible = "brcm,7278-v3d" },
{},
@@ -281,8 +320,14 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
goto irq_disable;
+ ret = v3d_sysfs_init(dev);
+ if (ret)
+ goto drm_unregister;
+
return 0;
+drm_unregister:
+ drm_dev_unregister(drm);
irq_disable:
v3d_irq_disable(v3d);
gem_destroy:
@@ -296,6 +341,9 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct v3d_dev *v3d = to_v3d_dev(drm);
+ struct device *dev = &pdev->dev;
+
+ v3d_sysfs_destroy(dev);
drm_dev_unregister(drm);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 106454f28..3c7d58866 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,13 +19,30 @@ struct reset_control;
#define GMP_GRANULARITY (128 * 1024)
-#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
+#define V3D_MAX_QUEUES (V3D_CPU + 1)
+
+static inline char *v3d_queue_to_string(enum v3d_queue queue)
+{
+ switch (queue) {
+ case V3D_BIN: return "bin";
+ case V3D_RENDER: return "render";
+ case V3D_TFU: return "tfu";
+ case V3D_CSD: return "csd";
+ case V3D_CACHE_CLEAN: return "cache_clean";
+ case V3D_CPU: return "cpu";
+ }
+ return "UNKNOWN";
+}
struct v3d_queue_state {
struct drm_gpu_scheduler sched;
u64 fence_context;
u64 emit_seqno;
+
+ u64 start_ns;
+ u64 enabled_ns;
+ u64 jobs_sent;
};
/* Performance monitor object. The perform lifetime is controlled by userspace
@@ -106,6 +123,7 @@ struct v3d_dev {
struct v3d_render_job *render_job;
struct v3d_tfu_job *tfu_job;
struct v3d_csd_job *csd_job;
+ struct v3d_cpu_job *cpu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
@@ -167,6 +185,12 @@ struct v3d_file_priv {
} perfmon;
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
+
+ u64 start_ns[V3D_MAX_QUEUES];
+
+ u64 enabled_ns[V3D_MAX_QUEUES];
+
+ u64 jobs_sent[V3D_MAX_QUEUES];
};
struct v3d_bo {
@@ -178,6 +202,8 @@ struct v3d_bo {
* v3d_render_job->unref_list
*/
struct list_head unref_head;
+
+ void *vaddr;
};
static inline struct v3d_bo *
@@ -238,6 +264,11 @@ struct v3d_job {
*/
struct v3d_perfmon *perfmon;
+ /* File descriptor of the process that submitted the job that could be used
+ * for collecting stats by process of GPU usage.
+ */
+ struct drm_file *file;
+
/* Callback for the freeing of the job on refcount going to 0. */
void (*free)(struct kref *ref);
};
@@ -285,6 +316,112 @@ struct v3d_csd_job {
struct drm_v3d_submit_csd args;
};
+enum v3d_cpu_job_type {
+ V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1,
+ V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY,
+ V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY,
+ V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY,
+};
+
+struct v3d_timestamp_query {
+ /* Offset of this query in the timestamp BO for its value. */
+ u32 offset;
+
+ /* Syncobj that indicates the timestamp availability */
+ struct drm_syncobj *syncobj;
+};
+
+/* Number of perfmons required to handle all supported performance counters */
+#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_PERFCNT_NUM, \
+ DRM_V3D_MAX_PERF_COUNTERS)
+
+struct v3d_performance_query {
+ /* Performance monitor IDs for this query */
+ u32 kperfmon_ids[V3D_MAX_PERFMONS];
+
+ /* Syncobj that indicates the query availability */
+ struct drm_syncobj *syncobj;
+};
+
+struct v3d_indirect_csd_info {
+ /* Indirect CSD */
+ struct v3d_csd_job *job;
+
+ /* Clean cache job associated to the Indirect CSD job */
+ struct v3d_job *clean_job;
+
+ /* Offset within the BO where the workgroup counts are stored */
+ u32 offset;
+
+ /* Workgroups size */
+ u32 wg_size;
+
+ /* Indices of the uniforms with the workgroup dispatch counts
+ * in the uniform stream.
+ */
+ u32 wg_uniform_offsets[3];
+
+ /* Indirect BO */
+ struct drm_gem_object *indirect;
+
+ /* Context of the Indirect CSD job */
+ struct ww_acquire_ctx acquire_ctx;
+};
+
+struct v3d_timestamp_query_info {
+ struct v3d_timestamp_query *queries;
+
+ u32 count;
+};
+
+struct v3d_performance_query_info {
+ struct v3d_performance_query *queries;
+
+ /* Number of performance queries */
+ u32 count;
+
+ /* Number of performance monitors related to that query pool */
+ u32 nperfmons;
+
+ /* Number of performance counters related to that query pool */
+ u32 ncounters;
+};
+
+struct v3d_copy_query_results_info {
+ /* Define if should write to buffer using 64 or 32 bits */
+ bool do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ bool do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ bool availability_bit;
+
+ /* Offset of the copy buffer in the BO */
+ u32 offset;
+
+ /* Stride of the copy buffer in the BO */
+ u32 stride;
+};
+
+struct v3d_cpu_job {
+ struct v3d_job base;
+
+ enum v3d_cpu_job_type job_type;
+
+ struct v3d_indirect_csd_info indirect_csd;
+
+ struct v3d_timestamp_query_info timestamp_query;
+
+ struct v3d_copy_query_results_info copy;
+
+ struct v3d_performance_query_info performance_query;
+};
+
+typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *);
+
struct v3d_submit_outsync {
struct drm_syncobj *syncobj;
};
@@ -352,12 +489,16 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
void v3d_free_object(struct drm_gem_object *gem_obj);
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t size);
+void v3d_get_bo_vaddr(struct v3d_bo *bo);
+void v3d_put_bo_vaddr(struct v3d_bo *bo);
int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
@@ -372,19 +513,21 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset(struct v3d_dev *v3d);
+void v3d_invalidate_caches(struct v3d_dev *v3d);
+void v3d_clean_caches(struct v3d_dev *v3d);
+
+/* v3d_submit.c */
+void v3d_job_cleanup(struct v3d_job *job);
+void v3d_job_put(struct v3d_job *job);
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void v3d_job_cleanup(struct v3d_job *job);
-void v3d_job_put(struct v3d_job *job);
-void v3d_reset(struct v3d_dev *v3d);
-void v3d_invalidate_caches(struct v3d_dev *v3d);
-void v3d_clean_caches(struct v3d_dev *v3d);
+int v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* v3d_irq.c */
int v3d_irq_init(struct v3d_dev *v3d);
@@ -393,8 +536,6 @@ void v3d_irq_disable(struct v3d_dev *v3d);
void v3d_irq_reset(struct v3d_dev *v3d);
/* v3d_mmu.c */
-int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
- u32 *offset);
int v3d_mmu_set_page_table(struct v3d_dev *v3d);
void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
@@ -418,3 +559,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+/* v3d_sysfs.c */
+int v3d_sysfs_init(struct device *dev);
+void v3d_sysfs_destroy(struct device *dev);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 2e94ce788..afc565078 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -11,8 +11,6 @@
#include <linux/uaccess.h>
#include <drm/drm_managed.h>
-#include <drm/drm_syncobj.h>
-#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
@@ -47,9 +45,9 @@ v3d_init_hw_state(struct v3d_dev *v3d)
static void
v3d_idle_axi(struct v3d_dev *v3d, int core)
{
- V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
+ V3D_CORE_WRITE(core, V3D_GMP_CFG(v3d->ver), V3D_GMP_CFG_STOP_REQ);
- if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
+ if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS(v3d->ver)) &
(V3D_GMP_STATUS_RD_COUNT_MASK |
V3D_GMP_STATUS_WR_COUNT_MASK |
V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
@@ -241,771 +239,6 @@ v3d_invalidate_caches(struct v3d_dev *v3d)
v3d_invalidate_slices(v3d, 0);
}
-/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
- *
- * We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list). They're entirely private
- * to v3d, so we don't attach dma-buf fences to them.
- */
-static int
-v3d_lock_bo_reservations(struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx)
-{
- int i, ret;
-
- ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
- if (ret)
- return ret;
-
- for (i = 0; i < job->bo_count; i++) {
- ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
- if (ret)
- goto fail;
-
- ret = drm_sched_job_add_implicit_dependencies(&job->base,
- job->bo[i], true);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
- return ret;
-}
-
-/**
- * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
- * referenced by the job.
- * @dev: DRM device
- * @file_priv: DRM file for this fd
- * @job: V3D job being set up
- * @bo_handles: GEM handles
- * @bo_count: Number of GEM handles passed in
- *
- * The command validator needs to reference BOs by their index within
- * the submitted job's BO list. This does the validation of the job's
- * BO list and reference counting for the lifetime of the job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
- */
-static int
-v3d_lookup_bos(struct drm_device *dev,
- struct drm_file *file_priv,
- struct v3d_job *job,
- u64 bo_handles,
- u32 bo_count)
-{
- job->bo_count = bo_count;
-
- if (!job->bo_count) {
- /* See comment on bo_index for why we have to check
- * this.
- */
- DRM_DEBUG("Rendering requires BOs\n");
- return -EINVAL;
- }
-
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)bo_handles,
- job->bo_count, &job->bo);
-}
-
-static void
-v3d_job_free(struct kref *ref)
-{
- struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- int i;
-
- if (job->bo) {
- for (i = 0; i < job->bo_count; i++)
- drm_gem_object_put(job->bo[i]);
- kvfree(job->bo);
- }
-
- dma_fence_put(job->irq_fence);
- dma_fence_put(job->done_fence);
-
- if (job->perfmon)
- v3d_perfmon_put(job->perfmon);
-
- kfree(job);
-}
-
-static void
-v3d_render_job_free(struct kref *ref)
-{
- struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
- base.refcount);
- struct v3d_bo *bo, *save;
-
- list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
- drm_gem_object_put(&bo->base.base);
- }
-
- v3d_job_free(ref);
-}
-
-void v3d_job_cleanup(struct v3d_job *job)
-{
- if (!job)
- return;
-
- drm_sched_job_cleanup(&job->base);
- v3d_job_put(job);
-}
-
-void v3d_job_put(struct v3d_job *job)
-{
- kref_put(&job->refcount, job->free);
-}
-
-int
-v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
- struct drm_v3d_wait_bo *args = data;
- ktime_t start = ktime_get();
- u64 delta_ns;
- unsigned long timeout_jiffies =
- nsecs_to_jiffies_timeout(args->timeout_ns);
-
- if (args->pad != 0)
- return -EINVAL;
-
- ret = drm_gem_dma_resv_wait(file_priv, args->handle,
- true, timeout_jiffies);
-
- /* Decrement the user's timeout, in case we got interrupted
- * such that the ioctl will be restarted.
- */
- delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
- if (delta_ns < args->timeout_ns)
- args->timeout_ns -= delta_ns;
- else
- args->timeout_ns = 0;
-
- /* Asked to wait beyond the jiffie/scheduler precision? */
- if (ret == -ETIME && args->timeout_ns)
- ret = -EAGAIN;
-
- return ret;
-}
-
-static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
- void **container, size_t size, void (*free)(struct kref *ref),
- u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
-{
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct v3d_job *job;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int ret, i;
-
- *container = kcalloc(1, size, GFP_KERNEL);
- if (!*container) {
- DRM_ERROR("Cannot allocate memory for v3d job.");
- return -ENOMEM;
- }
-
- job = *container;
- job->v3d = v3d;
- job->free = free;
-
- ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- v3d_priv);
- if (ret)
- goto fail;
-
- if (has_multisync) {
- if (se->in_sync_count && se->wait_stage == queue) {
- struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
-
- for (i = 0; i < se->in_sync_count; i++) {
- struct drm_v3d_sem in;
-
- if (copy_from_user(&in, handle++, sizeof(in))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy wait dep handle.\n");
- goto fail_deps;
- }
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
- }
- } else {
- ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
-
- // TODO: Investigate why this was filtered out for the IOCTL.
- if (ret && ret != -ENOENT)
- goto fail_deps;
- }
-
- kref_init(&job->refcount);
-
- return 0;
-
-fail_deps:
- drm_sched_job_cleanup(&job->base);
-fail:
- kfree(*container);
- *container = NULL;
-
- return ret;
-}
-
-static void
-v3d_push_job(struct v3d_job *job)
-{
- drm_sched_job_arm(&job->base);
-
- job->done_fence = dma_fence_get(&job->base.s_fence->finished);
-
- /* put by scheduler job completion */
- kref_get(&job->refcount);
-
- drm_sched_entity_push_job(&job->base);
-}
-
-static void
-v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
- struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx,
- u32 out_sync,
- struct v3d_submit_ext *se,
- struct dma_fence *done_fence)
-{
- struct drm_syncobj *sync_out;
- bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
- int i;
-
- for (i = 0; i < job->bo_count; i++) {
- /* XXX: Use shared fences for read-only objects. */
- dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
- DMA_RESV_USAGE_WRITE);
- }
-
- drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
-
- /* Update the return sync object for the job */
- /* If it only supports a single signal semaphore*/
- if (!has_multisync) {
- sync_out = drm_syncobj_find(file_priv, out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, done_fence);
- drm_syncobj_put(sync_out);
- }
- return;
- }
-
- /* If multiple semaphores extension is supported */
- if (se->out_sync_count) {
- for (i = 0; i < se->out_sync_count; i++) {
- drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
- done_fence);
- drm_syncobj_put(se->out_syncs[i].syncobj);
- }
- kvfree(se->out_syncs);
- }
-}
-
-static void
-v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
-{
- unsigned int i;
-
- if (!(se && se->out_sync_count))
- return;
-
- for (i = 0; i < se->out_sync_count; i++)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-}
-
-static int
-v3d_get_multisync_post_deps(struct drm_file *file_priv,
- struct v3d_submit_ext *se,
- u32 count, u64 handles)
-{
- struct drm_v3d_sem __user *post_deps;
- int i, ret;
-
- if (!count)
- return 0;
-
- se->out_syncs = (struct v3d_submit_outsync *)
- kvmalloc_array(count,
- sizeof(struct v3d_submit_outsync),
- GFP_KERNEL);
- if (!se->out_syncs)
- return -ENOMEM;
-
- post_deps = u64_to_user_ptr(handles);
-
- for (i = 0; i < count; i++) {
- struct drm_v3d_sem out;
-
- if (copy_from_user(&out, post_deps++, sizeof(out))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy post dep handles\n");
- goto fail;
- }
-
- se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
- out.handle);
- if (!se->out_syncs[i].syncobj) {
- ret = -EINVAL;
- goto fail;
- }
- }
- se->out_sync_count = count;
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- drm_syncobj_put(se->out_syncs[i].syncobj);
- kvfree(se->out_syncs);
-
- return ret;
-}
-
-/* Get data for multiple binary semaphores synchronization. Parse syncobj
- * to be signaled when job completes (out_sync).
- */
-static int
-v3d_get_multisync_submit_deps(struct drm_file *file_priv,
- struct drm_v3d_extension __user *ext,
- void *data)
-{
- struct drm_v3d_multi_sync multisync;
- struct v3d_submit_ext *se = data;
- int ret;
-
- if (copy_from_user(&multisync, ext, sizeof(multisync)))
- return -EFAULT;
-
- if (multisync.pad)
- return -EINVAL;
-
- ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
- multisync.out_syncs);
- if (ret)
- return ret;
-
- se->in_sync_count = multisync.in_sync_count;
- se->in_syncs = multisync.in_syncs;
- se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
- se->wait_stage = multisync.wait_stage;
-
- return 0;
-}
-
-/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
- * according to the extension id (name).
- */
-static int
-v3d_get_extensions(struct drm_file *file_priv,
- u64 ext_handles,
- void *data)
-{
- struct drm_v3d_extension __user *user_ext;
- int ret;
-
- user_ext = u64_to_user_ptr(ext_handles);
- while (user_ext) {
- struct drm_v3d_extension ext;
-
- if (copy_from_user(&ext, user_ext, sizeof(ext))) {
- DRM_DEBUG("Failed to copy submit extension\n");
- return -EFAULT;
- }
-
- switch (ext.id) {
- case DRM_V3D_EXT_ID_MULTI_SYNC:
- ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
- if (ret)
- return ret;
- break;
- default:
- DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
- return -EINVAL;
- }
-
- user_ext = u64_to_user_ptr(ext.next);
- }
-
- return 0;
-}
-
-/**
- * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * This is the main entrypoint for userspace to submit a 3D frame to
- * the GPU. Userspace provides the binner command list (if
- * applicable), and the kernel sets up the render command list to draw
- * to the framebuffer described in the ioctl, using the command lists
- * that the 3D engine's binner will produce.
- */
-int
-v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_cl *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_bin_job *bin = NULL;
- struct v3d_render_job *render = NULL;
- struct v3d_job *clean_job = NULL;
- struct v3d_job *last_job;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
-
- if (args->pad)
- return -EINVAL;
-
- if (args->flags &&
- args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
- DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
- v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
- if (ret)
- goto fail;
-
- render->start = args->rcl_start;
- render->end = args->rcl_end;
- INIT_LIST_HEAD(&render->unref_list);
-
- if (args->bcl_start != args->bcl_end) {
- ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
- v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
- if (ret)
- goto fail;
-
- bin->start = args->bcl_start;
- bin->end = args->bcl_end;
- bin->qma = args->qma;
- bin->qms = args->qms;
- bin->qts = args->qts;
- bin->render = render;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
- ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
- goto fail;
-
- last_job = clean_job;
- } else {
- last_job = &render->base;
- }
-
- ret = v3d_lookup_bos(dev, file_priv, last_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- render->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
-
- if (!render->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- if (bin) {
- bin->base.perfmon = render->base.perfmon;
- v3d_perfmon_get(bin->base.perfmon);
- v3d_push_job(&bin->base);
-
- ret = drm_sched_job_add_dependency(&render->base.base,
- dma_fence_get(bin->base.done_fence));
- if (ret)
- goto fail_unreserve;
- }
-
- v3d_push_job(&render->base);
-
- if (clean_job) {
- struct dma_fence *render_fence =
- dma_fence_get(render->base.done_fence);
- ret = drm_sched_job_add_dependency(&clean_job->base,
- render_fence);
- if (ret)
- goto fail_unreserve;
- clean_job->perfmon = render->base.perfmon;
- v3d_perfmon_get(clean_job->perfmon);
- v3d_push_job(clean_job);
- }
-
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- last_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- last_job->done_fence);
-
- if (bin)
- v3d_job_put(&bin->base);
- v3d_job_put(&render->base);
- if (clean_job)
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(last_job->bo,
- last_job->bo_count, &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)bin);
- v3d_job_cleanup((void *)render);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the TFU, which we don't
- * need to validate since the TFU is behind the MMU.
- */
-int
-v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_v3d_submit_tfu *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_tfu_job *job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret = 0;
-
- trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_DEBUG("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
- v3d_job_free, args->in_sync, &se, V3D_TFU);
- if (ret)
- goto fail;
-
- job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
- sizeof(*job->base.bo), GFP_KERNEL);
- if (!job->base.bo) {
- ret = -ENOMEM;
- goto fail;
- }
-
- job->args = *args;
-
- for (job->base.bo_count = 0;
- job->base.bo_count < ARRAY_SIZE(args->bo_handles);
- job->base.bo_count++) {
- struct drm_gem_object *bo;
-
- if (!args->bo_handles[job->base.bo_count])
- break;
-
- bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- job->base.bo_count,
- args->bo_handles[job->base.bo_count]);
- ret = -ENOENT;
- goto fail;
- }
- job->base.bo[job->base.bo_count] = bo;
- }
-
- ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
- if (ret)
- goto fail;
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- &job->base, &acquire_ctx,
- args->out_sync,
- &se,
- job->base.done_fence);
-
- v3d_job_put(&job->base);
-
- return 0;
-
-fail:
- v3d_job_cleanup((void *)job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
-/**
- * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the CSD, which we don't
- * need to validate since the CSD is behind the MMU.
- */
-int
-v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
- struct drm_v3d_submit_csd *args = data;
- struct v3d_submit_ext se = {0};
- struct v3d_csd_job *job = NULL;
- struct v3d_job *clean_job = NULL;
- struct ww_acquire_ctx acquire_ctx;
- int ret;
-
- trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
-
- if (args->pad)
- return -EINVAL;
-
- if (!v3d_has_csd(v3d)) {
- DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
- return -EINVAL;
- }
-
- if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
- DRM_INFO("invalid flags: %d\n", args->flags);
- return -EINVAL;
- }
-
- if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
- ret = v3d_get_extensions(file_priv, args->extensions, &se);
- if (ret) {
- DRM_DEBUG("Failed to get extensions.\n");
- return ret;
- }
- }
-
- ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
- v3d_job_free, args->in_sync, &se, V3D_CSD);
- if (ret)
- goto fail;
-
- ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
- v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
- goto fail;
-
- job->args = *args;
-
- ret = v3d_lookup_bos(dev, file_priv, clean_job,
- args->bo_handles, args->bo_handle_count);
- if (ret)
- goto fail;
-
- ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
- if (ret)
- goto fail;
-
- if (args->perfmon_id) {
- job->base.perfmon = v3d_perfmon_find(v3d_priv,
- args->perfmon_id);
- if (!job->base.perfmon) {
- ret = -ENOENT;
- goto fail_perfmon;
- }
- }
-
- mutex_lock(&v3d->sched_lock);
- v3d_push_job(&job->base);
-
- ret = drm_sched_job_add_dependency(&clean_job->base,
- dma_fence_get(job->base.done_fence));
- if (ret)
- goto fail_unreserve;
-
- v3d_push_job(clean_job);
- mutex_unlock(&v3d->sched_lock);
-
- v3d_attach_fences_and_unlock_reservation(file_priv,
- clean_job,
- &acquire_ctx,
- args->out_sync,
- &se,
- clean_job->done_fence);
-
- v3d_job_put(&job->base);
- v3d_job_put(clean_job);
-
- return 0;
-
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
- drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
- &acquire_ctx);
-fail:
- v3d_job_cleanup((void *)job);
- v3d_job_cleanup(clean_job);
- v3d_put_multisync_post_deps(&se);
-
- return ret;
-}
-
int
v3d_gem_init(struct drm_device *dev)
{
@@ -1013,8 +246,12 @@ v3d_gem_init(struct drm_device *dev)
u32 pt_size = 4096 * 1024;
int ret, i;
- for (i = 0; i < V3D_MAX_QUEUES; i++)
+ for (i = 0; i < V3D_MAX_QUEUES; i++) {
v3d->queue[i].fence_context = dma_fence_context_alloc(1);
+ v3d->queue[i].start_ns = 0;
+ v3d->queue[i].enabled_ns = 0;
+ v3d->queue[i].jobs_sent = 0;
+ }
spin_lock_init(&v3d->mm_lock);
spin_lock_init(&v3d->job_lock);
@@ -1072,6 +309,8 @@ v3d_gem_destroy(struct drm_device *dev)
*/
WARN_ON(v3d->bin_job);
WARN_ON(v3d->render_job);
+ WARN_ON(v3d->tfu_job);
+ WARN_ON(v3d->csd_job);
drm_mm_takedown(&v3d->mm);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index e714d5318..b8f9f2a3d 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -14,21 +14,23 @@
*/
#include <linux/platform_device.h>
+#include <linux/sched/clock.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
-#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
- V3D_INT_FLDONE | \
- V3D_INT_FRDONE | \
- V3D_INT_CSDDONE | \
- V3D_INT_GMPV))
+#define V3D_CORE_IRQS(ver) ((u32)(V3D_INT_OUTOMEM | \
+ V3D_INT_FLDONE | \
+ V3D_INT_FRDONE | \
+ V3D_INT_CSDDONE(ver) | \
+ (ver < 71 ? V3D_INT_GMPV : 0)))
-#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
- V3D_HUB_INT_MMU_PTI | \
- V3D_HUB_INT_MMU_CAP | \
- V3D_HUB_INT_TFUC))
+#define V3D_HUB_IRQS(ver) ((u32)(V3D_HUB_INT_MMU_WRV | \
+ V3D_HUB_INT_MMU_PTI | \
+ V3D_HUB_INT_MMU_CAP | \
+ V3D_HUB_INT_TFUC | \
+ (ver >= 71 ? V3D_V7_HUB_INT_GMPV : 0)))
static irqreturn_t
v3d_hub_irq(int irq, void *arg);
@@ -100,6 +102,17 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->bin_job->base.irq_fence);
+ struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv;
+ u64 runtime = local_clock() - file->start_ns[V3D_BIN];
+
+ file->jobs_sent[V3D_BIN]++;
+ v3d->queue[V3D_BIN].jobs_sent++;
+
+ file->start_ns[V3D_BIN] = 0;
+ v3d->queue[V3D_BIN].start_ns = 0;
+
+ file->enabled_ns[V3D_BIN] += runtime;
+ v3d->queue[V3D_BIN].enabled_ns += runtime;
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -109,15 +122,37 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
to_v3d_fence(v3d->render_job->base.irq_fence);
+ struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv;
+ u64 runtime = local_clock() - file->start_ns[V3D_RENDER];
+
+ file->jobs_sent[V3D_RENDER]++;
+ v3d->queue[V3D_RENDER].jobs_sent++;
+
+ file->start_ns[V3D_RENDER] = 0;
+ v3d->queue[V3D_RENDER].start_ns = 0;
+
+ file->enabled_ns[V3D_RENDER] += runtime;
+ v3d->queue[V3D_RENDER].enabled_ns += runtime;
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
- if (intsts & V3D_INT_CSDDONE) {
+ if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
struct v3d_fence *fence =
to_v3d_fence(v3d->csd_job->base.irq_fence);
+ struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv;
+ u64 runtime = local_clock() - file->start_ns[V3D_CSD];
+
+ file->jobs_sent[V3D_CSD]++;
+ v3d->queue[V3D_CSD].jobs_sent++;
+
+ file->start_ns[V3D_CSD] = 0;
+ v3d->queue[V3D_CSD].start_ns = 0;
+
+ file->enabled_ns[V3D_CSD] += runtime;
+ v3d->queue[V3D_CSD].enabled_ns += runtime;
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -127,7 +162,7 @@ v3d_irq(int irq, void *arg)
/* We shouldn't be triggering these if we have GMP in
* always-allowed mode.
*/
- if (intsts & V3D_INT_GMPV)
+ if (v3d->ver < 71 && (intsts & V3D_INT_GMPV))
dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
@@ -154,6 +189,17 @@ v3d_hub_irq(int irq, void *arg)
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
to_v3d_fence(v3d->tfu_job->base.irq_fence);
+ struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv;
+ u64 runtime = local_clock() - file->start_ns[V3D_TFU];
+
+ file->jobs_sent[V3D_TFU]++;
+ v3d->queue[V3D_TFU].jobs_sent++;
+
+ file->start_ns[V3D_TFU] = 0;
+ v3d->queue[V3D_TFU].start_ns = 0;
+
+ file->enabled_ns[V3D_TFU] += runtime;
+ v3d->queue[V3D_TFU].enabled_ns += runtime;
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -197,6 +243,11 @@ v3d_hub_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
+ if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) {
+ dev_err(v3d->drm.dev, "GMP Violation\n");
+ status = IRQ_HANDLED;
+ }
+
return status;
}
@@ -211,8 +262,8 @@ v3d_irq_init(struct v3d_dev *v3d)
* for us.
*/
for (core = 0; core < v3d->cores; core++)
- V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
- V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
+ V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
+ V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver));
irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
@@ -256,12 +307,12 @@ v3d_irq_enable(struct v3d_dev *v3d)
/* Enable our set of interrupts, masking out any others. */
for (core = 0; core < v3d->cores; core++) {
- V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS);
- V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS);
+ V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS(v3d->ver));
+ V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS(v3d->ver));
}
- V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS);
- V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS);
+ V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS(v3d->ver));
+ V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS(v3d->ver));
}
void
@@ -276,8 +327,8 @@ v3d_irq_disable(struct v3d_dev *v3d)
/* Clear any pending interrupts we might have left. */
for (core = 0; core < v3d->cores; core++)
- V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
- V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
+ V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver));
+ V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver));
cancel_work_sync(&v3d->overflow_mem_work);
}
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 3663e0d6b..1b1a62ad9 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -57,6 +57,7 @@
#define V3D_HUB_INT_MSK_STS 0x0005c
#define V3D_HUB_INT_MSK_SET 0x00060
#define V3D_HUB_INT_MSK_CLR 0x00064
+# define V3D_V7_HUB_INT_GMPV BIT(6)
# define V3D_HUB_INT_MMU_WRV BIT(5)
# define V3D_HUB_INT_MMU_PTI BIT(4)
# define V3D_HUB_INT_MMU_CAP BIT(3)
@@ -64,6 +65,7 @@
# define V3D_HUB_INT_TFUC BIT(1)
# define V3D_HUB_INT_TFUF BIT(0)
+/* GCA registers only exist in V3D < 41 */
#define V3D_GCA_CACHE_CTRL 0x0000c
# define V3D_GCA_CACHE_CTRL_FLUSH BIT(0)
@@ -86,7 +88,8 @@
# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c
# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
-#define V3D_TFU_CS 0x00400
+#define V3D_TFU_CS(ver) ((ver >= 71) ? 0x00700 : 0x00400)
+
/* Stops current job, empties input fifo. */
# define V3D_TFU_CS_TFURST BIT(31)
# define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16)
@@ -95,7 +98,7 @@
# define V3D_TFU_CS_NFREE_SHIFT 8
# define V3D_TFU_CS_BUSY BIT(0)
-#define V3D_TFU_SU 0x00404
+#define V3D_TFU_SU(ver) ((ver >= 71) ? 0x00704 : 0x00404)
/* Interrupt when FINTTHR input slots are free (0 = disabled) */
# define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8)
# define V3D_TFU_SU_FINTTHR_SHIFT 8
@@ -106,39 +109,42 @@
# define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0)
# define V3D_TFU_SU_THROTTLE_SHIFT 0
-#define V3D_TFU_ICFG 0x00408
+#define V3D_TFU_ICFG(ver) ((ver >= 71) ? 0x00708 : 0x00408)
/* Interrupt when the conversion is complete. */
# define V3D_TFU_ICFG_IOC BIT(0)
/* Input Image Address */
-#define V3D_TFU_IIA 0x0040c
+#define V3D_TFU_IIA(ver) ((ver >= 71) ? 0x0070c : 0x0040c)
/* Input Chroma Address */
-#define V3D_TFU_ICA 0x00410
+#define V3D_TFU_ICA(ver) ((ver >= 71) ? 0x00710 : 0x00410)
/* Input Image Stride */
-#define V3D_TFU_IIS 0x00414
+#define V3D_TFU_IIS(ver) ((ver >= 71) ? 0x00714 : 0x00414)
/* Input Image U-Plane Address */
-#define V3D_TFU_IUA 0x00418
+#define V3D_TFU_IUA(ver) ((ver >= 71) ? 0x00718 : 0x00418)
+/* Image output config (VD 7.x only) */
+#define V3D_V7_TFU_IOC 0x0071c
/* Output Image Address */
-#define V3D_TFU_IOA 0x0041c
+#define V3D_TFU_IOA(ver) ((ver >= 71) ? 0x00720 : 0x0041c)
/* Image Output Size */
-#define V3D_TFU_IOS 0x00420
+#define V3D_TFU_IOS(ver) ((ver >= 71) ? 0x00724 : 0x00420)
/* TFU YUV Coefficient 0 */
-#define V3D_TFU_COEF0 0x00424
-/* Use these regs instead of the defaults. */
+#define V3D_TFU_COEF0(ver) ((ver >= 71) ? 0x00728 : 0x00424)
+/* Use these regs instead of the defaults (V3D 4.x only) */
# define V3D_TFU_COEF0_USECOEF BIT(31)
/* TFU YUV Coefficient 1 */
-#define V3D_TFU_COEF1 0x00428
+#define V3D_TFU_COEF1(ver) ((ver >= 71) ? 0x0072c : 0x00428)
/* TFU YUV Coefficient 2 */
-#define V3D_TFU_COEF2 0x0042c
+#define V3D_TFU_COEF2(ver) ((ver >= 71) ? 0x00730 : 0x0042c)
/* TFU YUV Coefficient 3 */
-#define V3D_TFU_COEF3 0x00430
+#define V3D_TFU_COEF3(ver) ((ver >= 71) ? 0x00734 : 0x00430)
+/* V3D 4.x only */
#define V3D_TFU_CRC 0x00434
/* Per-MMU registers. */
#define V3D_MMUC_CONTROL 0x01000
-# define V3D_MMUC_CONTROL_CLEAR BIT(3)
+#define V3D_MMUC_CONTROL_CLEAR(ver) ((ver >= 71) ? BIT(11) : BIT(3))
# define V3D_MMUC_CONTROL_FLUSHING BIT(2)
# define V3D_MMUC_CONTROL_FLUSH BIT(1)
# define V3D_MMUC_CONTROL_ENABLE BIT(0)
@@ -246,7 +252,6 @@
#define V3D_CTL_L2TCACTL 0x00030
# define V3D_L2TCACTL_TMUWCF BIT(8)
-# define V3D_L2TCACTL_L2T_NO_WM BIT(4)
/* Invalidates cache lines. */
# define V3D_L2TCACTL_FLM_FLUSH 0
/* Removes cachelines without writing dirty lines back. */
@@ -267,8 +272,8 @@
#define V3D_CTL_INT_MSK_CLR 0x00064
# define V3D_INT_QPU_MASK V3D_MASK(27, 16)
# define V3D_INT_QPU_SHIFT 16
-# define V3D_INT_CSDDONE BIT(7)
-# define V3D_INT_PCTR BIT(6)
+#define V3D_INT_CSDDONE(ver) ((ver >= 71) ? BIT(6) : BIT(7))
+#define V3D_INT_PCTR(ver) ((ver >= 71) ? BIT(5) : BIT(6))
# define V3D_INT_GMPV BIT(5)
# define V3D_INT_TRFB BIT(4)
# define V3D_INT_SPILLUSE BIT(3)
@@ -350,21 +355,25 @@
#define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \
4 * (x))
# define V3D_PCTR_S0_MASK V3D_MASK(6, 0)
+# define V3D_V7_PCTR_S0_MASK V3D_MASK(7, 0)
# define V3D_PCTR_S0_SHIFT 0
# define V3D_PCTR_S1_MASK V3D_MASK(14, 8)
+# define V3D_V7_PCTR_S1_MASK V3D_MASK(15, 8)
# define V3D_PCTR_S1_SHIFT 8
# define V3D_PCTR_S2_MASK V3D_MASK(22, 16)
+# define V3D_V7_PCTR_S2_MASK V3D_MASK(23, 16)
# define V3D_PCTR_S2_SHIFT 16
# define V3D_PCTR_S3_MASK V3D_MASK(30, 24)
+# define V3D_V7_PCTR_S3_MASK V3D_MASK(31, 24)
# define V3D_PCTR_S3_SHIFT 24
-# define V3D_PCTR_CYCLE_COUNT 32
+#define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32)
/* Output values of the counters. */
#define V3D_PCTR_0_PCTR0 0x00680
#define V3D_PCTR_0_PCTR31 0x006fc
#define V3D_PCTR_0_PCTRX(x) (V3D_PCTR_0_PCTR0 + \
4 * (x))
-#define V3D_GMP_STATUS 0x00800
+#define V3D_GMP_STATUS(ver) ((ver >= 71) ? 0x00600 : 0x00800)
# define V3D_GMP_STATUS_GMPRST BIT(31)
# define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24)
# define V3D_GMP_STATUS_WR_COUNT_SHIFT 24
@@ -377,13 +386,13 @@
# define V3D_GMP_STATUS_INVPROT BIT(1)
# define V3D_GMP_STATUS_VIO BIT(0)
-#define V3D_GMP_CFG 0x00804
+#define V3D_GMP_CFG(ver) ((ver >= 71) ? 0x00604 : 0x00804)
# define V3D_GMP_CFG_LBURSTEN BIT(3)
# define V3D_GMP_CFG_PGCRSEN BIT()
# define V3D_GMP_CFG_STOP_REQ BIT(1)
# define V3D_GMP_CFG_PROT_ENABLE BIT(0)
-#define V3D_GMP_VIO_ADDR 0x00808
+#define V3D_GMP_VIO_ADDR(ver) ((ver >= 71) ? 0x00608 : 0x00808)
#define V3D_GMP_VIO_TYPE 0x0080c
#define V3D_GMP_TABLE_ADDR 0x00810
#define V3D_GMP_CLEAR_LOAD 0x00814
@@ -398,25 +407,25 @@
# define V3D_CSD_STATUS_HAVE_CURRENT_DISPATCH BIT(1)
# define V3D_CSD_STATUS_HAVE_QUEUED_DISPATCH BIT(0)
-#define V3D_CSD_QUEUED_CFG0 0x00904
+#define V3D_CSD_QUEUED_CFG0(ver) ((ver >= 71) ? 0x00930 : 0x00904)
# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_MASK V3D_MASK(31, 16)
# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_SHIFT 16
# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_MASK V3D_MASK(15, 0)
# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_SHIFT 0
-#define V3D_CSD_QUEUED_CFG1 0x00908
+#define V3D_CSD_QUEUED_CFG1(ver) ((ver >= 71) ? 0x00934 : 0x00908)
# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_MASK V3D_MASK(31, 16)
# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_SHIFT 16
# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_MASK V3D_MASK(15, 0)
# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_SHIFT 0
-#define V3D_CSD_QUEUED_CFG2 0x0090c
+#define V3D_CSD_QUEUED_CFG2(ver) ((ver >= 71) ? 0x00938 : 0x0090c)
# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_MASK V3D_MASK(31, 16)
# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_SHIFT 16
# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_MASK V3D_MASK(15, 0)
# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_SHIFT 0
-#define V3D_CSD_QUEUED_CFG3 0x00910
+#define V3D_CSD_QUEUED_CFG3(ver) ((ver >= 71) ? 0x0093c : 0x00910)
# define V3D_CSD_QUEUED_CFG3_OVERLAP_WITH_PREV BIT(26)
# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_MASK V3D_MASK(25, 20)
# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_SHIFT 20
@@ -428,23 +437,28 @@
# define V3D_CSD_QUEUED_CFG3_WG_SIZE_SHIFT 0
/* Number of batches, minus 1 */
-#define V3D_CSD_QUEUED_CFG4 0x00914
+#define V3D_CSD_QUEUED_CFG4(ver) ((ver >= 71) ? 0x00940 : 0x00914)
/* Shader address, pnan, singleseg, threading, like a shader record. */
-#define V3D_CSD_QUEUED_CFG5 0x00918
+#define V3D_CSD_QUEUED_CFG5(ver) ((ver >= 71) ? 0x00944 : 0x00918)
/* Uniforms address (4 byte aligned) */
-#define V3D_CSD_QUEUED_CFG6 0x0091c
-
-#define V3D_CSD_CURRENT_CFG0 0x00920
-#define V3D_CSD_CURRENT_CFG1 0x00924
-#define V3D_CSD_CURRENT_CFG2 0x00928
-#define V3D_CSD_CURRENT_CFG3 0x0092c
-#define V3D_CSD_CURRENT_CFG4 0x00930
-#define V3D_CSD_CURRENT_CFG5 0x00934
-#define V3D_CSD_CURRENT_CFG6 0x00938
-
-#define V3D_CSD_CURRENT_ID0 0x0093c
+#define V3D_CSD_QUEUED_CFG6(ver) ((ver >= 71) ? 0x00948 : 0x0091c)
+
+/* V3D 7.x+ only */
+#define V3D_V7_CSD_QUEUED_CFG7 0x0094c
+
+#define V3D_CSD_CURRENT_CFG0(ver) ((ver >= 71) ? 0x00958 : 0x00920)
+#define V3D_CSD_CURRENT_CFG1(ver) ((ver >= 71) ? 0x0095c : 0x00924)
+#define V3D_CSD_CURRENT_CFG2(ver) ((ver >= 71) ? 0x00960 : 0x00928)
+#define V3D_CSD_CURRENT_CFG3(ver) ((ver >= 71) ? 0x00964 : 0x0092c)
+#define V3D_CSD_CURRENT_CFG4(ver) ((ver >= 71) ? 0x00968 : 0x00930)
+#define V3D_CSD_CURRENT_CFG5(ver) ((ver >= 71) ? 0x0096c : 0x00934)
+#define V3D_CSD_CURRENT_CFG6(ver) ((ver >= 71) ? 0x00970 : 0x00938)
+/* V3D 7.x+ only */
+#define V3D_V7_CSD_CURRENT_CFG7 0x00974
+
+#define V3D_CSD_CURRENT_ID0(ver) ((ver >= 71) ? 0x00978 : 0x0093c)
# define V3D_CSD_CURRENT_ID0_WG_X_MASK V3D_MASK(31, 16)
# define V3D_CSD_CURRENT_ID0_WG_X_SHIFT 16
# define V3D_CSD_CURRENT_ID0_WG_IN_SG_MASK V3D_MASK(11, 8)
@@ -452,7 +466,7 @@
# define V3D_CSD_CURRENT_ID0_L_IDX_MASK V3D_MASK(7, 0)
# define V3D_CSD_CURRENT_ID0_L_IDX_SHIFT 0
-#define V3D_CSD_CURRENT_ID1 0x00940
+#define V3D_CSD_CURRENT_ID1(ver) ((ver >= 71) ? 0x0097c : 0x00940)
# define V3D_CSD_CURRENT_ID0_WG_Z_MASK V3D_MASK(31, 16)
# define V3D_CSD_CURRENT_ID0_WG_Z_SHIFT 16
# define V3D_CSD_CURRENT_ID0_WG_Y_MASK V3D_MASK(15, 0)
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 038e1ae58..54015ad76 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -18,12 +18,17 @@
* semaphores to interlock between them.
*/
+#include <linux/sched/clock.h>
#include <linux/kthread.h>
+#include <drm/drm_syncobj.h>
+
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
+#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
+
static struct v3d_job *
to_v3d_job(struct drm_sched_job *sched_job)
{
@@ -54,6 +59,12 @@ to_csd_job(struct drm_sched_job *sched_job)
return container_of(sched_job, struct v3d_csd_job, base.base);
}
+static struct v3d_cpu_job *
+to_cpu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct v3d_cpu_job, base.base);
+}
+
static void
v3d_sched_job_free(struct drm_sched_job *sched_job)
{
@@ -63,6 +74,28 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
}
static void
+v3d_cpu_job_free(struct drm_sched_job *sched_job)
+{
+ struct v3d_cpu_job *job = to_cpu_job(sched_job);
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+
+ if (timestamp_query->queries) {
+ for (int i = 0; i < timestamp_query->count; i++)
+ drm_syncobj_put(timestamp_query->queries[i].syncobj);
+ kvfree(timestamp_query->queries);
+ }
+
+ if (performance_query->queries) {
+ for (int i = 0; i < performance_query->count; i++)
+ drm_syncobj_put(performance_query->queries[i].syncobj);
+ kvfree(performance_query->queries);
+ }
+
+ v3d_job_cleanup(&job->base);
+}
+
+static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{
if (job->perfmon != v3d->active_perfmon)
@@ -76,6 +109,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
@@ -107,6 +141,9 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
job->start, job->end);
+ file->start_ns[V3D_BIN] = local_clock();
+ v3d->queue[V3D_BIN].start_ns = file->start_ns[V3D_BIN];
+
v3d_switch_perfmon(v3d, &job->base);
/* Set the current and end address of the control list.
@@ -131,6 +168,7 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
{
struct v3d_render_job *job = to_render_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -158,6 +196,9 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
job->start, job->end);
+ file->start_ns[V3D_RENDER] = local_clock();
+ v3d->queue[V3D_RENDER].start_ns = file->start_ns[V3D_RENDER];
+
v3d_switch_perfmon(v3d, &job->base);
/* XXX: Set the QCFG */
@@ -176,6 +217,7 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
struct v3d_tfu_job *job = to_tfu_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
@@ -190,20 +232,25 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
- V3D_WRITE(V3D_TFU_IIA, job->args.iia);
- V3D_WRITE(V3D_TFU_IIS, job->args.iis);
- V3D_WRITE(V3D_TFU_ICA, job->args.ica);
- V3D_WRITE(V3D_TFU_IUA, job->args.iua);
- V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
- V3D_WRITE(V3D_TFU_IOS, job->args.ios);
- V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
- if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
- V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
- V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
- V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
+ file->start_ns[V3D_TFU] = local_clock();
+ v3d->queue[V3D_TFU].start_ns = file->start_ns[V3D_TFU];
+
+ V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia);
+ V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis);
+ V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
+ V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
+ V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
+ if (v3d->ver >= 71)
+ V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
+ V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
+ V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
+ if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
+ V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
+ V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
+ V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
}
/* ICFG kicks off the job. */
- V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
+ V3D_WRITE(V3D_TFU_ICFG(v3d->ver), job->args.icfg | V3D_TFU_ICFG_IOC);
return fence;
}
@@ -213,9 +260,10 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- int i;
+ int i, csd_cfg0_reg, csd_cfg_reg_count;
v3d->csd_job = job;
@@ -231,24 +279,314 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
+ file->start_ns[V3D_CSD] = local_clock();
+ v3d->queue[V3D_CSD].start_ns = file->start_ns[V3D_CSD];
+
v3d_switch_perfmon(v3d, &job->base);
- for (i = 1; i <= 6; i++)
- V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]);
+ csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
+ csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7;
+ for (i = 1; i <= csd_cfg_reg_count; i++)
+ V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
/* CFG0 write kicks off the job. */
- V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]);
+ V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]);
return fence;
}
+static void
+v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
+{
+ struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
+ struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
+ u32 *wg_counts;
+
+ v3d_get_bo_vaddr(bo);
+ v3d_get_bo_vaddr(indirect);
+
+ wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);
+
+ if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0)
+ return;
+
+ args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+ args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
+
+ for (int i = 0; i < 3; i++) {
+ /* 0xffffffff indicates that the uniform rewrite is not needed */
+ if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) {
+ u32 uniform_idx = indirect_csd->wg_uniform_offsets[i];
+ ((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
+ }
+ }
+
+ v3d_put_bo_vaddr(indirect);
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_timestamp_query(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ u8 *value_addr;
+
+ v3d_get_bo_vaddr(bo);
+
+ for (int i = 0; i < timestamp_query->count; i++) {
+ value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
+ *((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull;
+
+ drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj,
+ job->base.done_fence);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_timestamp_query *queries = timestamp_query->queries;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ u8 *value_addr;
+
+ v3d_get_bo_vaddr(bo);
+
+ for (int i = 0; i < timestamp_query->count; i++) {
+ value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
+ *((u64 *)value_addr) = 0;
+
+ drm_syncobj_replace_fence(queries[i].syncobj, NULL);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
+{
+ if (do_64bit) {
+ u64 *dst64 = (u64 *)dst;
+
+ dst64[idx] = value;
+ } else {
+ u32 *dst32 = (u32 *)dst;
+
+ dst32[idx] = (u32)value;
+ }
+}
+
+static void
+v3d_copy_query_results(struct v3d_cpu_job *job)
+{
+ struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+ struct v3d_timestamp_query *queries = timestamp_query->queries;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]);
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct dma_fence *fence;
+ u8 *query_addr;
+ bool available, write_result;
+ u8 *data;
+ int i;
+
+ v3d_get_bo_vaddr(bo);
+ v3d_get_bo_vaddr(timestamp);
+
+ data = ((u8 *)bo->vaddr) + copy->offset;
+
+ for (i = 0; i < timestamp_query->count; i++) {
+ fence = drm_syncobj_fence_get(queries[i].syncobj);
+ available = fence ? dma_fence_is_signaled(fence) : false;
+
+ write_result = available || copy->do_partial;
+ if (write_result) {
+ query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
+ write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr));
+ }
+
+ if (copy->availability_bit)
+ write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u);
+
+ data += copy->stride;
+
+ dma_fence_put(fence);
+ }
+
+ v3d_put_bo_vaddr(timestamp);
+ v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_reset_performance_queries(struct v3d_cpu_job *job)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_perfmon *perfmon;
+
+ for (int i = 0; i < performance_query->count; i++) {
+ for (int j = 0; j < performance_query->nperfmons; j++) {
+ perfmon = v3d_perfmon_find(v3d_priv,
+ performance_query->queries[i].kperfmon_ids[j]);
+ if (!perfmon) {
+ DRM_DEBUG("Failed to find perfmon.");
+ continue;
+ }
+
+ v3d_perfmon_stop(v3d, perfmon, false);
+
+ memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64));
+
+ v3d_perfmon_put(perfmon);
+ }
+
+ drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL);
+ }
+}
+
+static void
+v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_perfmon *perfmon;
+ u64 counter_values[V3D_PERFCNT_NUM];
+
+ for (int i = 0; i < performance_query->nperfmons; i++) {
+ perfmon = v3d_perfmon_find(v3d_priv,
+ performance_query->queries[query].kperfmon_ids[i]);
+ if (!perfmon) {
+ DRM_DEBUG("Failed to find perfmon.");
+ continue;
+ }
+
+ v3d_perfmon_stop(v3d, perfmon, true);
+
+ memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
+ perfmon->ncounters * sizeof(u64));
+
+ v3d_perfmon_put(perfmon);
+ }
+
+ for (int i = 0; i < performance_query->ncounters; i++)
+ write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
+}
+
+static void
+v3d_copy_performance_query(struct v3d_cpu_job *job)
+{
+ struct v3d_performance_query_info *performance_query = &job->performance_query;
+ struct v3d_copy_query_results_info *copy = &job->copy;
+ struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+ struct dma_fence *fence;
+ bool available, write_result;
+ u8 *data;
+
+ v3d_get_bo_vaddr(bo);
+
+ data = ((u8 *)bo->vaddr) + copy->offset;
+
+ for (int i = 0; i < performance_query->count; i++) {
+ fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
+ available = fence ? dma_fence_is_signaled(fence) : false;
+
+ write_result = available || copy->do_partial;
+ if (write_result)
+ v3d_write_performance_query_result(job, data, i);
+
+ if (copy->availability_bit)
+ write_to_buffer(data, performance_query->ncounters,
+ copy->do_64bit, available ? 1u : 0u);
+
+ data += copy->stride;
+
+ dma_fence_put(fence);
+ }
+
+ v3d_put_bo_vaddr(bo);
+}
+
+static const v3d_cpu_job_fn cpu_job_function[] = {
+ [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect,
+ [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query,
+ [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries,
+ [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results,
+ [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries,
+ [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query,
+};
+
+static struct dma_fence *
+v3d_cpu_job_run(struct drm_sched_job *sched_job)
+{
+ struct v3d_cpu_job *job = to_cpu_job(sched_job);
+ struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_file_priv *file = job->base.file->driver_priv;
+ u64 runtime;
+
+ v3d->cpu_job = job;
+
+ if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
+ DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
+ return NULL;
+ }
+
+ file->start_ns[V3D_CPU] = local_clock();
+ v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU];
+
+ trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
+
+ cpu_job_function[job->job_type](job);
+
+ trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
+
+ runtime = local_clock() - file->start_ns[V3D_CPU];
+
+ file->enabled_ns[V3D_CPU] += runtime;
+ v3d->queue[V3D_CPU].enabled_ns += runtime;
+
+ file->jobs_sent[V3D_CPU]++;
+ v3d->queue[V3D_CPU].jobs_sent++;
+
+ file->start_ns[V3D_CPU] = 0;
+ v3d->queue[V3D_CPU].start_ns = 0;
+
+ return NULL;
+}
+
static struct dma_fence *
v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
struct v3d_dev *v3d = job->v3d;
+ struct v3d_file_priv *file = job->file->driver_priv;
+ u64 runtime;
+
+ file->start_ns[V3D_CACHE_CLEAN] = local_clock();
+ v3d->queue[V3D_CACHE_CLEAN].start_ns = file->start_ns[V3D_CACHE_CLEAN];
v3d_clean_caches(v3d);
+ runtime = local_clock() - file->start_ns[V3D_CACHE_CLEAN];
+
+ file->enabled_ns[V3D_CACHE_CLEAN] += runtime;
+ v3d->queue[V3D_CACHE_CLEAN].enabled_ns += runtime;
+
+ file->jobs_sent[V3D_CACHE_CLEAN]++;
+ v3d->queue[V3D_CACHE_CLEAN].jobs_sent++;
+
+ file->start_ns[V3D_CACHE_CLEAN] = 0;
+ v3d->queue[V3D_CACHE_CLEAN].start_ns = 0;
+
return NULL;
}
@@ -336,7 +674,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
- u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4);
+ u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
/* If we've made progress, skip reset and let the timer get
* rearmed.
@@ -379,6 +717,12 @@ static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.free_job = v3d_sched_job_free
};
+static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
+ .run_job = v3d_cpu_job_run,
+ .timedout_job = v3d_generic_job_timedout,
+ .free_job = v3d_cpu_job_free
+};
+
int
v3d_sched_init(struct v3d_dev *v3d)
{
@@ -388,7 +732,7 @@ v3d_sched_init(struct v3d_dev *v3d)
int ret;
ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
- &v3d_bin_sched_ops,
+ &v3d_bin_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -397,7 +741,7 @@ v3d_sched_init(struct v3d_dev *v3d)
return ret;
ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
- &v3d_render_sched_ops,
+ &v3d_render_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -406,7 +750,7 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;
ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
- &v3d_tfu_sched_ops,
+ &v3d_tfu_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -416,7 +760,7 @@ v3d_sched_init(struct v3d_dev *v3d)
if (v3d_has_csd(v3d)) {
ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
- &v3d_csd_sched_ops,
+ &v3d_csd_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -425,7 +769,7 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;
ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
- &v3d_cache_clean_sched_ops,
+ &v3d_cache_clean_sched_ops, NULL,
DRM_SCHED_PRIORITY_COUNT,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
@@ -434,6 +778,15 @@ v3d_sched_init(struct v3d_dev *v3d)
goto fail;
}
+ ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
+ &v3d_cpu_sched_ops, NULL,
+ DRM_SCHED_PRIORITY_COUNT,
+ 1, job_hang_limit,
+ msecs_to_jiffies(hang_limit_ms), NULL,
+ NULL, "v3d_cpu", v3d->drm.dev);
+ if (ret)
+ goto fail;
+
return 0;
fail:
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
new file mode 100644
index 000000000..88f63d526
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -0,0 +1,1341 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014-2018 Broadcom
+ * Copyright (C) 2023 Raspberry Pi
+ */
+
+#include <drm/drm_syncobj.h>
+
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+#include "v3d_trace.h"
+
+/* Takes the reservation lock on all the BOs being referenced, so that
+ * at queue submit time we can update the reservations.
+ *
+ * We don't lock the RCL the tile alloc/state BOs, or overflow memory
+ * (all of which are on exec->unref_list). They're entirely private
+ * to v3d, so we don't attach dma-buf fences to them.
+ */
+static int
+v3d_lock_bo_reservations(struct v3d_job *job,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i, ret;
+
+ ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < job->bo_count; i++) {
+ ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
+ if (ret)
+ goto fail;
+
+ ret = drm_sched_job_add_implicit_dependencies(&job->base,
+ job->bo[i], true);
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+ return ret;
+}
+
+/**
+ * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @job: V3D job being set up
+ * @bo_handles: GEM handles
+ * @bo_count: Number of GEM handles passed in
+ *
+ * The command validator needs to reference BOs by their index within
+ * the submitted job's BO list. This does the validation of the job's
+ * BO list and reference counting for the lifetime of the job.
+ *
+ * Note that this function doesn't need to unreference the BOs on
+ * failure, because that will happen at v3d_exec_cleanup() time.
+ */
+static int
+v3d_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct v3d_job *job,
+ u64 bo_handles,
+ u32 bo_count)
+{
+ job->bo_count = bo_count;
+
+ if (!job->bo_count) {
+ /* See comment on bo_index for why we have to check
+ * this.
+ */
+ DRM_DEBUG("Rendering requires BOs\n");
+ return -EINVAL;
+ }
+
+ return drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)bo_handles,
+ job->bo_count, &job->bo);
+}
+
+static void
+v3d_job_free(struct kref *ref)
+{
+ struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
+ int i;
+
+ if (job->bo) {
+ for (i = 0; i < job->bo_count; i++)
+ drm_gem_object_put(job->bo[i]);
+ kvfree(job->bo);
+ }
+
+ dma_fence_put(job->irq_fence);
+ dma_fence_put(job->done_fence);
+
+ if (job->perfmon)
+ v3d_perfmon_put(job->perfmon);
+
+ kfree(job);
+}
+
+static void
+v3d_render_job_free(struct kref *ref)
+{
+ struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
+ base.refcount);
+ struct v3d_bo *bo, *save;
+
+ list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
+ drm_gem_object_put(&bo->base.base);
+ }
+
+ v3d_job_free(ref);
+}
+
+void v3d_job_cleanup(struct v3d_job *job)
+{
+ if (!job)
+ return;
+
+ drm_sched_job_cleanup(&job->base);
+ v3d_job_put(job);
+}
+
+void v3d_job_put(struct v3d_job *job)
+{
+ if (!job)
+ return;
+
+ kref_put(&job->refcount, job->free);
+}
+
+static int
+v3d_job_allocate(void **container, size_t size)
+{
+ *container = kcalloc(1, size, GFP_KERNEL);
+ if (!*container) {
+ DRM_ERROR("Cannot allocate memory for V3D job.\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+v3d_job_deallocate(void **container)
+{
+ kfree(*container);
+ *container = NULL;
+}
+
+static int
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
+ struct v3d_job *job, void (*free)(struct kref *ref),
+ u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int ret, i;
+
+ job->v3d = v3d;
+ job->free = free;
+ job->file = file_priv;
+
+ ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
+ 1, v3d_priv);
+ if (ret)
+ return ret;
+
+ if (has_multisync) {
+ if (se->in_sync_count && se->wait_stage == queue) {
+ struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
+
+ for (i = 0; i < se->in_sync_count; i++) {
+ struct drm_v3d_sem in;
+
+ if (copy_from_user(&in, handle++, sizeof(in))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy wait dep handle.\n");
+ goto fail_deps;
+ }
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
+ goto fail_deps;
+ }
+ }
+ } else {
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
+ goto fail_deps;
+ }
+
+ kref_init(&job->refcount);
+
+ return 0;
+
+fail_deps:
+ drm_sched_job_cleanup(&job->base);
+ return ret;
+}
+
+static void
+v3d_push_job(struct v3d_job *job)
+{
+ drm_sched_job_arm(&job->base);
+
+ job->done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ /* put by scheduler job completion */
+ kref_get(&job->refcount);
+
+ drm_sched_entity_push_job(&job->base);
+}
+
+static void
+v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
+ struct v3d_job *job,
+ struct ww_acquire_ctx *acquire_ctx,
+ u32 out_sync,
+ struct v3d_submit_ext *se,
+ struct dma_fence *done_fence)
+{
+ struct drm_syncobj *sync_out;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int i;
+
+ for (i = 0; i < job->bo_count; i++) {
+ /* XXX: Use shared fences for read-only objects. */
+ dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+
+ /* Update the return sync object for the job */
+ /* If it only supports a single signal semaphore*/
+ if (!has_multisync) {
+ sync_out = drm_syncobj_find(file_priv, out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ return;
+ }
+
+ /* If multiple semaphores extension is supported */
+ if (se->out_sync_count) {
+ for (i = 0; i < se->out_sync_count; i++) {
+ drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
+ done_fence);
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ }
+ kvfree(se->out_syncs);
+ }
+}
+
+static int
+v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
+ struct v3d_dev *v3d,
+ struct drm_v3d_submit_csd *args,
+ struct v3d_csd_job **job,
+ struct v3d_job **clean_job,
+ struct v3d_submit_ext *se,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int ret;
+
+ ret = v3d_job_allocate((void *)job, sizeof(**job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &(*job)->base,
+ v3d_job_free, args->in_sync, se, V3D_CSD);
+ if (ret) {
+ v3d_job_deallocate((void *)job);
+ return ret;
+ }
+
+ ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, *clean_job,
+ v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
+ if (ret) {
+ v3d_job_deallocate((void *)clean_job);
+ return ret;
+ }
+
+ (*job)->args = *args;
+
+ ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ return ret;
+
+ return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
+}
+
+static void
+v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
+{
+ unsigned int i;
+
+ if (!(se && se->out_sync_count))
+ return;
+
+ for (i = 0; i < se->out_sync_count; i++)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+}
+
+static int
+v3d_get_multisync_post_deps(struct drm_file *file_priv,
+ struct v3d_submit_ext *se,
+ u32 count, u64 handles)
+{
+ struct drm_v3d_sem __user *post_deps;
+ int i, ret;
+
+ if (!count)
+ return 0;
+
+ se->out_syncs = (struct v3d_submit_outsync *)
+ kvmalloc_array(count,
+ sizeof(struct v3d_submit_outsync),
+ GFP_KERNEL);
+ if (!se->out_syncs)
+ return -ENOMEM;
+
+ post_deps = u64_to_user_ptr(handles);
+
+ for (i = 0; i < count; i++) {
+ struct drm_v3d_sem out;
+
+ if (copy_from_user(&out, post_deps++, sizeof(out))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy post dep handles\n");
+ goto fail;
+ }
+
+ se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
+ out.handle);
+ if (!se->out_syncs[i].syncobj) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+ se->out_sync_count = count;
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+
+ return ret;
+}
+
+/* Get data for multiple binary semaphores synchronization. Parse syncobj
+ * to be signaled when job completes (out_sync).
+ */
+static int
+v3d_get_multisync_submit_deps(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_submit_ext *se)
+{
+ struct drm_v3d_multi_sync multisync;
+ int ret;
+
+ if (se->in_sync_count || se->out_sync_count) {
+ DRM_DEBUG("Two multisync extensions were added to the same job.");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&multisync, ext, sizeof(multisync)))
+ return -EFAULT;
+
+ if (multisync.pad)
+ return -EINVAL;
+
+ ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
+ multisync.out_syncs);
+ if (ret)
+ return ret;
+
+ se->in_sync_count = multisync.in_sync_count;
+ se->in_syncs = multisync.in_syncs;
+ se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
+ se->wait_stage = multisync.wait_stage;
+
+ return 0;
+}
+
+/* Get data for the indirect CSD job submission. */
+static int
+v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct v3d_dev *v3d = v3d_priv->v3d;
+ struct drm_v3d_indirect_csd indirect_csd;
+ struct v3d_indirect_csd_info *info = &job->indirect_csd;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
+ return -EFAULT;
+
+ if (!v3d_has_csd(v3d)) {
+ DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n");
+ return -EINVAL;
+ }
+
+ job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
+ info->offset = indirect_csd.offset;
+ info->wg_size = indirect_csd.wg_size;
+ memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
+ sizeof(indirect_csd.wg_uniform_offsets));
+
+ info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
+
+ return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
+ &info->job, &info->clean_job,
+ NULL, &info->acquire_ctx);
+}
+
+/* Get data for the query timestamp job submission. */
+static int
+v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *offsets, *syncs;
+ struct drm_v3d_timestamp_query timestamp;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
+ return -EFAULT;
+
+ if (timestamp.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(timestamp.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ offsets = u64_to_user_ptr(timestamp.offsets);
+ syncs = u64_to_user_ptr(timestamp.syncs);
+
+ for (int i = 0; i < timestamp.count; i++) {
+ u32 offset, sync;
+
+ if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].offset = offset;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = timestamp.count;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ struct drm_v3d_reset_timestamp_query reset;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(reset.syncs);
+
+ for (int i = 0; i < reset.count; i++) {
+ u32 sync;
+
+ job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = reset.count;
+
+ return 0;
+}
+
+/* Get data for the copy timestamp query results job submission. */
+static int
+v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *offsets, *syncs;
+ struct drm_v3d_copy_timestamp_query copy;
+ int i;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&copy, ext, sizeof(copy)))
+ return -EFAULT;
+
+ if (copy.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
+
+ job->timestamp_query.queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_timestamp_query),
+ GFP_KERNEL);
+ if (!job->timestamp_query.queries)
+ return -ENOMEM;
+
+ offsets = u64_to_user_ptr(copy.offsets);
+ syncs = u64_to_user_ptr(copy.syncs);
+
+ for (i = 0; i < copy.count; i++) {
+ u32 offset, sync;
+
+ if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].offset = offset;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->timestamp_query.queries);
+ return -EFAULT;
+ }
+
+ job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ }
+ job->timestamp_query.count = copy.count;
+
+ job->copy.do_64bit = copy.do_64bit;
+ job->copy.do_partial = copy.do_partial;
+ job->copy.availability_bit = copy.availability_bit;
+ job->copy.offset = copy.offset;
+ job->copy.stride = copy.stride;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ u64 __user *kperfmon_ids;
+ struct drm_v3d_reset_performance_query reset;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&reset, ext, sizeof(reset)))
+ return -EFAULT;
+
+ job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
+
+ job->performance_query.queries = kvmalloc_array(reset.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!job->performance_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(reset.syncs);
+ kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
+
+ for (int i = 0; i < reset.count; i++) {
+ u32 sync;
+ u64 ids;
+ u32 __user *ids_pointer;
+ u32 id;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+
+ if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+ for (int j = 0; j < reset.nperfmons; j++) {
+ if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].kperfmon_ids[j] = id;
+ }
+ }
+ job->performance_query.count = reset.count;
+ job->performance_query.nperfmons = reset.nperfmons;
+
+ return 0;
+}
+
+static int
+v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ struct v3d_cpu_job *job)
+{
+ u32 __user *syncs;
+ u64 __user *kperfmon_ids;
+ struct drm_v3d_copy_performance_query copy;
+
+ if (!job) {
+ DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+ return -EINVAL;
+ }
+
+ if (job->job_type) {
+ DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&copy, ext, sizeof(copy)))
+ return -EFAULT;
+
+ if (copy.pad)
+ return -EINVAL;
+
+ job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
+
+ job->performance_query.queries = kvmalloc_array(copy.count,
+ sizeof(struct v3d_performance_query),
+ GFP_KERNEL);
+ if (!job->performance_query.queries)
+ return -ENOMEM;
+
+ syncs = u64_to_user_ptr(copy.syncs);
+ kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
+
+ for (int i = 0; i < copy.count; i++) {
+ u32 sync;
+ u64 ids;
+ u32 __user *ids_pointer;
+ u32 id;
+
+ if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+
+ if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ ids_pointer = u64_to_user_ptr(ids);
+
+ for (int j = 0; j < copy.nperfmons; j++) {
+ if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+ kvfree(job->performance_query.queries);
+ return -EFAULT;
+ }
+
+ job->performance_query.queries[i].kperfmon_ids[j] = id;
+ }
+ }
+ job->performance_query.count = copy.count;
+ job->performance_query.nperfmons = copy.nperfmons;
+ job->performance_query.ncounters = copy.ncounters;
+
+ job->copy.do_64bit = copy.do_64bit;
+ job->copy.do_partial = copy.do_partial;
+ job->copy.availability_bit = copy.availability_bit;
+ job->copy.offset = copy.offset;
+ job->copy.stride = copy.stride;
+
+ return 0;
+}
+
+/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+ * according to the extension id (name).
+ */
+static int
+v3d_get_extensions(struct drm_file *file_priv,
+ u64 ext_handles,
+ struct v3d_submit_ext *se,
+ struct v3d_cpu_job *job)
+{
+ struct drm_v3d_extension __user *user_ext;
+ int ret;
+
+ user_ext = u64_to_user_ptr(ext_handles);
+ while (user_ext) {
+ struct drm_v3d_extension ext;
+
+ if (copy_from_user(&ext, user_ext, sizeof(ext))) {
+ DRM_DEBUG("Failed to copy submit extension\n");
+ return -EFAULT;
+ }
+
+ switch (ext.id) {
+ case DRM_V3D_EXT_ID_MULTI_SYNC:
+ ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
+ break;
+ case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
+ ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
+ ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
+ ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
+ break;
+ case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
+ ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ user_ext = u64_to_user_ptr(ext.next);
+ }
+
+ return 0;
+}
+
+/**
+ * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * This is the main entrypoint for userspace to submit a 3D frame to
+ * the GPU. Userspace provides the binner command list (if
+ * applicable), and the kernel sets up the render command list to draw
+ * to the framebuffer described in the ioctl, using the command lists
+ * that the 3D engine's binner will produce.
+ */
+int
+v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_cl *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_bin_job *bin = NULL;
+ struct v3d_render_job *render = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct v3d_job *last_job;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags &&
+ args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
+ DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_allocate((void *)&render, sizeof(*render));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &render->base,
+ v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
+ if (ret) {
+ v3d_job_deallocate((void *)&render);
+ goto fail;
+ }
+
+ render->start = args->rcl_start;
+ render->end = args->rcl_end;
+ INIT_LIST_HEAD(&render->unref_list);
+
+ if (args->bcl_start != args->bcl_end) {
+ ret = v3d_job_allocate((void *)&bin, sizeof(*bin));
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, &bin->base,
+ v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
+ if (ret) {
+ v3d_job_deallocate((void *)&bin);
+ goto fail;
+ }
+
+ bin->start = args->bcl_start;
+ bin->end = args->bcl_end;
+ bin->qma = args->qma;
+ bin->qms = args->qms;
+ bin->qts = args->qts;
+ bin->render = render;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job));
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, clean_job,
+ v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
+ if (ret) {
+ v3d_job_deallocate((void *)&clean_job);
+ goto fail;
+ }
+
+ last_job = clean_job;
+ } else {
+ last_job = &render->base;
+ }
+
+ ret = v3d_lookup_bos(dev, file_priv, last_job,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ goto fail;
+
+ ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ if (args->perfmon_id) {
+ render->base.perfmon = v3d_perfmon_find(v3d_priv,
+ args->perfmon_id);
+
+ if (!render->base.perfmon) {
+ ret = -ENOENT;
+ goto fail_perfmon;
+ }
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ if (bin) {
+ bin->base.perfmon = render->base.perfmon;
+ v3d_perfmon_get(bin->base.perfmon);
+ v3d_push_job(&bin->base);
+
+ ret = drm_sched_job_add_dependency(&render->base.base,
+ dma_fence_get(bin->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+ }
+
+ v3d_push_job(&render->base);
+
+ if (clean_job) {
+ struct dma_fence *render_fence =
+ dma_fence_get(render->base.done_fence);
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ render_fence);
+ if (ret)
+ goto fail_unreserve;
+ clean_job->perfmon = render->base.perfmon;
+ v3d_perfmon_get(clean_job->perfmon);
+ v3d_push_job(clean_job);
+ }
+
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ last_job,
+ &acquire_ctx,
+ args->out_sync,
+ &se,
+ last_job->done_fence);
+
+ v3d_job_put(&bin->base);
+ v3d_job_put(&render->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
+ drm_gem_unlock_reservations(last_job->bo,
+ last_job->bo_count, &acquire_ctx);
+fail:
+ v3d_job_cleanup((void *)bin);
+ v3d_job_cleanup((void *)render);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+/**
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the TFU, which we don't
+ * need to validate since the TFU is behind the MMU.
+ */
+int
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct drm_v3d_submit_tfu *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_tfu_job *job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_DEBUG("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_allocate((void *)&job, sizeof(*job));
+ if (ret)
+ return ret;
+
+ ret = v3d_job_init(v3d, file_priv, &job->base,
+ v3d_job_free, args->in_sync, &se, V3D_TFU);
+ if (ret) {
+ v3d_job_deallocate((void *)&job);
+ goto fail;
+ }
+
+ job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
+ sizeof(*job->base.bo), GFP_KERNEL);
+ if (!job->base.bo) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ job->args = *args;
+
+ for (job->base.bo_count = 0;
+ job->base.bo_count < ARRAY_SIZE(args->bo_handles);
+ job->base.bo_count++) {
+ struct drm_gem_object *bo;
+
+ if (!args->bo_handles[job->base.bo_count])
+ break;
+
+ bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
+ if (!bo) {
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+ job->base.bo_count,
+ args->bo_handles[job->base.bo_count]);
+ ret = -ENOENT;
+ goto fail;
+ }
+ job->base.bo[job->base.bo_count] = bo;
+ }
+
+ ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&job->base);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ &job->base, &acquire_ctx,
+ args->out_sync,
+ &se,
+ job->base.done_fence);
+
+ v3d_job_put(&job->base);
+
+ return 0;
+
+fail:
+ v3d_job_cleanup((void *)job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+/**
+ * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the CSD, which we don't
+ * need to validate since the CSD is behind the MMU.
+ */
+int
+v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_csd *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_csd_job *job = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (!v3d_has_csd(v3d)) {
+ DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
+ return -EINVAL;
+ }
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
+ &job, &clean_job, &se,
+ &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ if (args->perfmon_id) {
+ job->base.perfmon = v3d_perfmon_find(v3d_priv,
+ args->perfmon_id);
+ if (!job->base.perfmon) {
+ ret = -ENOENT;
+ goto fail_perfmon;
+ }
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&job->base);
+
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(clean_job);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ clean_job,
+ &acquire_ctx,
+ args->out_sync,
+ &se,
+ clean_job->done_fence);
+
+ v3d_job_put(&job->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
+ drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+ &acquire_ctx);
+fail:
+ v3d_job_cleanup((void *)job);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+
+ return ret;
+}
+
+static const unsigned int cpu_job_bo_handle_count[] = {
+ [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
+ [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
+ [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
+ [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
+ [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
+ [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
+};
+
+/**
+ * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace specifies the CPU job type and data required to perform its
+ * operations through the drm_v3d_extension struct.
+ */
+int
+v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct drm_v3d_submit_cpu *args = data;
+ struct v3d_submit_ext se = {0};
+ struct v3d_submit_ext *out_se = NULL;
+ struct v3d_cpu_job *cpu_job = NULL;
+ struct v3d_csd_job *csd_job = NULL;
+ struct v3d_job *clean_job = NULL;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("Invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job));
+ if (ret)
+ return ret;
+
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ goto fail;
+ }
+ }
+
+ /* Every CPU job must have a CPU job user extension */
+ if (!cpu_job->job_type) {
+ DRM_DEBUG("CPU job must have a CPU job user extension.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
+ DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
+
+ ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
+ v3d_job_free, 0, &se, V3D_CPU);
+ if (ret) {
+ v3d_job_deallocate((void *)&cpu_job);
+ goto fail;
+ }
+
+ clean_job = cpu_job->indirect_csd.clean_job;
+ csd_job = cpu_job->indirect_csd.job;
+
+ if (args->bo_handle_count) {
+ ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
+ args->bo_handles, args->bo_handle_count);
+ if (ret)
+ goto fail;
+
+ ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
+ if (ret)
+ goto fail;
+ }
+
+ mutex_lock(&v3d->sched_lock);
+ v3d_push_job(&cpu_job->base);
+
+ switch (cpu_job->job_type) {
+ case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
+ ret = drm_sched_job_add_dependency(&csd_job->base.base,
+ dma_fence_get(cpu_job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(&csd_job->base);
+
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(csd_job->base.done_fence));
+ if (ret)
+ goto fail_unreserve;
+
+ v3d_push_job(clean_job);
+
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&v3d->sched_lock);
+
+ out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
+
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ &cpu_job->base,
+ &acquire_ctx, 0,
+ out_se, cpu_job->base.done_fence);
+
+ switch (cpu_job->job_type) {
+ case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
+ v3d_attach_fences_and_unlock_reservation(file_priv,
+ clean_job,
+ &cpu_job->indirect_csd.acquire_ctx,
+ 0, &se, clean_job->done_fence);
+ break;
+ default:
+ break;
+ }
+
+ v3d_job_put(&cpu_job->base);
+ v3d_job_put(&csd_job->base);
+ v3d_job_put(clean_job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+
+ drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
+ &acquire_ctx);
+
+ drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+ &cpu_job->indirect_csd.acquire_ctx);
+
+fail:
+ v3d_job_cleanup((void *)cpu_job);
+ v3d_job_cleanup((void *)csd_job);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
+ kvfree(cpu_job->timestamp_query.queries);
+ kvfree(cpu_job->performance_query.queries);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_sysfs.c b/drivers/gpu/drm/v3d/v3d_sysfs.c
new file mode 100644
index 000000000..d106845ba
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_sysfs.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Igalia S.L.
+ */
+
+#include <linux/sched/clock.h>
+#include <linux/sysfs.h>
+
+#include "v3d_drv.h"
+
+static ssize_t
+gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct v3d_dev *v3d = to_v3d_dev(drm);
+ enum v3d_queue queue;
+ u64 timestamp = local_clock();
+ u64 active_runtime;
+ ssize_t len = 0;
+
+ len += sysfs_emit(buf, "queue\ttimestamp\tjobs\truntime\n");
+
+ for (queue = 0; queue < V3D_MAX_QUEUES; queue++) {
+ if (v3d->queue[queue].start_ns)
+ active_runtime = timestamp - v3d->queue[queue].start_ns;
+ else
+ active_runtime = 0;
+
+ /* Each line will display the queue name, timestamp, the number
+ * of jobs sent to that queue and the runtime, as can be seem here:
+ *
+ * queue timestamp jobs runtime
+ * bin 239043069420 22620 17438164056
+ * render 239043069420 22619 27284814161
+ * tfu 239043069420 8763 394592566
+ * csd 239043069420 3168 10787905530
+ * cache_clean 239043069420 6127 237375940
+ */
+ len += sysfs_emit_at(buf, len, "%s\t%llu\t%llu\t%llu\n",
+ v3d_queue_to_string(queue),
+ timestamp,
+ v3d->queue[queue].jobs_sent,
+ v3d->queue[queue].enabled_ns + active_runtime);
+ }
+
+ return len;
+}
+static DEVICE_ATTR_RO(gpu_stats);
+
+static struct attribute *v3d_sysfs_entries[] = {
+ &dev_attr_gpu_stats.attr,
+ NULL,
+};
+
+static struct attribute_group v3d_sysfs_attr_group = {
+ .attrs = v3d_sysfs_entries,
+};
+
+int
+v3d_sysfs_init(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &v3d_sysfs_attr_group);
+}
+
+void
+v3d_sysfs_destroy(struct device *dev)
+{
+ return sysfs_remove_group(&dev->kobj, &v3d_sysfs_attr_group);
+}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index 7aa8dc356..5917b9414 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -225,6 +225,63 @@ TRACE_EVENT(v3d_submit_csd,
__entry->seqno)
);
+TRACE_EVENT(v3d_submit_cpu_ioctl,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
+TRACE_EVENT(v3d_cpu_job_begin,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
+TRACE_EVENT(v3d_cpu_job_end,
+ TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+ TP_ARGS(dev, job_type),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(enum v3d_cpu_job_type, job_type)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->job_type = job_type;
+ ),
+
+ TP_printk("dev=%u, job_type=%d",
+ __entry->dev,
+ __entry->job_type)
+);
+
TRACE_EVENT(v3d_cache_clean_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 341edd982..9ff3bade9 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -429,8 +429,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
VBOX_MOUSE_POINTER_ALPHA;
hgsmi_update_pointer_shape(vbox->guest_pool, flags,
- min_t(u32, max(fb->hot_x, 0), width),
- min_t(u32, max(fb->hot_y, 0), height),
+ min_t(u32, max(new_state->hotspot_x, 0), width),
+ min_t(u32, max(new_state->hotspot_y, 0), height),
width, height, vbox->cursor_data, data_size);
mutex_unlock(&vbox->hw_mutex);
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
index 63ca46f4c..becb3dbaa 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
@@ -153,12 +153,9 @@ static int __build_mock(struct kunit *test, struct drm_device *drm,
return 0;
}
-static void kunit_action_drm_dev_unregister(void *ptr)
-{
- struct drm_device *drm = ptr;
-
- drm_dev_unregister(drm);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(kunit_action_drm_dev_unregister,
+ drm_dev_unregister,
+ struct drm_device *);
static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
{
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 4626fe9aa..f827f2654 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -672,11 +672,21 @@ vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
return &new_state->base;
}
+static void vc4_hdmi_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct vc4_hdmi_connector_state *vc4_state =
+ conn_state_to_vc4_hdmi_conn_state(state);
+
+ __drm_atomic_helper_connector_destroy_state(state);
+ kfree(vc4_state);
+}
+
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_destroy_state = vc4_hdmi_connector_destroy_state,
.atomic_get_property = vc4_hdmi_connector_get_property,
.atomic_set_property = vc4_hdmi_connector_set_property,
};
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 00e713fae..5948e34f7 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1505,9 +1505,6 @@ static int vc4_prepare_fb(struct drm_plane *plane,
drm_gem_plane_helper_prepare_fb(plane, state);
- if (plane->state->fb == state->fb)
- return 0;
-
return vc4_bo_inc_usecnt(bo);
}
@@ -1516,7 +1513,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
{
struct vc4_bo *bo;
- if (plane->state->fb == state->fb || !state->fb)
+ if (!state->fb)
return;
bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 96365a772..bb7d86a0c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -58,6 +58,9 @@
#define MAX_CAPSET_ID 63
#define MAX_RINGS 64
+/* See virtio_gpu_ctx_create. One additional character for NULL terminator. */
+#define DEBUG_NAME_MAX_LEN 65
+
struct virtio_gpu_object_params {
unsigned long size;
bool dumb;
@@ -274,6 +277,8 @@ struct virtio_gpu_fpriv {
uint64_t base_fence_ctx;
uint64_t ring_idx_mask;
struct mutex context_lock;
+ char debug_name[DEBUG_NAME_MAX_LEN];
+ bool explicit_debug_name;
};
/* virtgpu_ioctl.c */
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index b24b11f25..e4f76f315 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -42,12 +42,19 @@
static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fpriv *vfpriv)
{
- char dbgname[TASK_COMM_LEN];
+ if (vfpriv->explicit_debug_name) {
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init,
+ strlen(vfpriv->debug_name),
+ vfpriv->debug_name);
+ } else {
+ char dbgname[TASK_COMM_LEN];
- get_task_comm(dbgname, current);
- virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
- vfpriv->context_init, strlen(dbgname),
- dbgname);
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init, strlen(dbgname),
+ dbgname);
+ }
vfpriv->context_created = true;
}
@@ -107,6 +114,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
value = vgdev->capset_id_mask;
break;
+ case VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME:
+ value = vgdev->has_context_init ? 1 : 0;
+ break;
default:
return -EINVAL;
}
@@ -565,8 +575,8 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
int ret = 0;
- uint32_t num_params, i, param, value;
- uint64_t valid_ring_mask;
+ uint32_t num_params, i;
+ uint64_t valid_ring_mask, param, value;
size_t len;
struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -580,7 +590,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
return -EINVAL;
/* Number of unique parameters supported at this time. */
- if (num_params > 3)
+ if (num_params > 4)
return -EINVAL;
ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
@@ -642,6 +652,21 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
vfpriv->ring_idx_mask = value;
break;
+ case VIRTGPU_CONTEXT_PARAM_DEBUG_NAME:
+ if (vfpriv->explicit_debug_name) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = strncpy_from_user(vfpriv->debug_name,
+ u64_to_user_ptr(value),
+ DEBUG_NAME_MAX_LEN - 1);
+ if (ret < 0)
+ goto out_unlock;
+
+ vfpriv->explicit_debug_name = true;
+ ret = 0;
+ break;
default:
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a1ef657eb..a72a2dbda 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -333,16 +333,16 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
plane->state->crtc_x,
plane->state->crtc_y,
- plane->state->fb ? plane->state->fb->hot_x : 0,
- plane->state->fb ? plane->state->fb->hot_y : 0);
+ plane->state->hotspot_x,
+ plane->state->hotspot_y);
output->cursor.hdr.type =
cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
output->cursor.resource_id = cpu_to_le32(handle);
if (plane->state->fb) {
output->cursor.hot_x =
- cpu_to_le32(plane->state->fb->hot_x);
+ cpu_to_le32(plane->state->hotspot_x);
output->cursor.hot_y =
- cpu_to_le32(plane->state->fb->hot_y);
+ cpu_to_le32(plane->state->hotspot_y);
} else {
output->cursor.hot_x = cpu_to_le32(0);
output->cursor.hot_y = cpu_to_le32(0);
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index d7e63aa14..bc724cbd5 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -30,17 +30,25 @@ static const struct drm_connector_funcs vkms_wb_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int vkms_wb_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb;
- const struct drm_display_mode *mode = &crtc_state->mode;
+ const struct drm_display_mode *mode;
int ret;
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
+ if (!conn_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ mode = &crtc_state->mode;
+
fb = conn_state->writeback_job->fb;
if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
@@ -48,17 +56,13 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
- ret = drm_atomic_helper_check_wb_encoder_state(encoder, conn_state);
+ ret = drm_atomic_helper_check_wb_connector_state(connector, state);
if (ret < 0)
return ret;
return 0;
}
-static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = {
- .atomic_check = vkms_wb_encoder_atomic_check,
-};
-
static int vkms_wb_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -161,6 +165,7 @@ static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
.prepare_writeback_job = vkms_wb_prepare_job,
.cleanup_writeback_job = vkms_wb_cleanup_job,
.atomic_commit = vkms_wb_atomic_commit,
+ .atomic_check = vkms_wb_atomic_check,
};
int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
@@ -171,7 +176,7 @@ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
return drm_writeback_connector_init(&vkmsdev->drm, wb,
&vkms_wb_connector_funcs,
- &vkms_wb_encoder_helper_funcs,
+ NULL,
vkms_wb_formats,
ARRAY_SIZE(vkms_wb_formats),
1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index c52c7bf14..717d624e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -456,8 +456,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
.no_wait_gpu = false
};
u32 j, initial_line = dst_offset / dst_stride;
- struct vmw_bo_blit_line_data d;
+ struct vmw_bo_blit_line_data d = {0};
int ret = 0;
+ struct page **dst_pages = NULL;
+ struct page **src_pages = NULL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -477,12 +479,35 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
+ if (!src->ttm->pages && src->ttm->sg) {
+ src_pages = kvmalloc_array(src->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!src_pages)
+ return -ENOMEM;
+ ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
+ src->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+ if (!dst->ttm->pages && dst->ttm->sg) {
+ dst_pages = kvmalloc_array(dst->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ if (!dst_pages) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
+ dst->ttm->num_pages);
+ if (ret)
+ goto out;
+ }
+
d.mapped_dst = 0;
d.mapped_src = 0;
d.dst_addr = NULL;
d.src_addr = NULL;
- d.dst_pages = dst->ttm->pages;
- d.src_pages = src->ttm->pages;
+ d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
+ d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
@@ -504,6 +529,10 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
+ if (src_pages)
+ kvfree(src_pages);
+ if (dst_pages)
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 2bfac3aad..4aac88cc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -377,7 +377,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
{
struct ttm_operation_ctx ctx = {
.interruptible = params->bo_type != ttm_bo_type_kernel,
- .no_wait_gpu = false
+ .no_wait_gpu = false,
+ .resv = params->resv,
};
struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
@@ -394,8 +395,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
- &vmw_bo->placement, 0, &ctx, NULL,
- NULL, destroy);
+ &vmw_bo->placement, 0, &ctx,
+ params->sg, params->resv, destroy);
if (unlikely(ret))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 0d496dc9c..f349642e6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -55,6 +55,8 @@ struct vmw_bo_params {
enum ttm_bo_type bo_type;
size_t size;
bool pin;
+ struct dma_resv *resv;
+ struct sg_table *sg;
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c7d90f96d..58fb40c93 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
- /* TTM currently doesn't fully support SEV encryption. */
- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
- return -EINVAL;
-
- if (vmw_force_coherent)
+ /*
+ * When running with SEV we always want dma mappings, because
+ * otherwise ttm tt pool pages will bounce through swiotlb running
+ * out of available space.
+ */
+ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
@@ -1627,6 +1628,7 @@ static const struct drm_driver driver = {
.prime_fd_to_handle = vmw_prime_fd_to_handle,
.prime_handle_to_fd = vmw_prime_handle_to_fd,
+ .gem_prime_import_sg_table = vmw_prime_import_sg_table,
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3cd5090de..6acc7ad0e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1131,6 +1131,9 @@ extern int vmw_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t handle, uint32_t flags,
int *prime_fd);
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table);
/*
* MemoryOBject management - vmwgfx_mob.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 12787bb9c..d6bcaf078 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -149,6 +149,38 @@ out_no_bo:
return ret;
}
+struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ int ret;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_gem_object *gem = NULL;
+ struct vmw_bo *vbo;
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_sg,
+ .size = attach->dmabuf->size,
+ .pin = false,
+ .resv = attach->dmabuf->resv,
+ .sg = table,
+
+ };
+
+ dma_resv_lock(params.resv, NULL);
+
+ ret = vmw_bo_create(dev_priv, &params, &vbo);
+ if (ret != 0)
+ goto out_no_bo;
+
+ vbo->tbo.base.funcs = &vmw_gem_object_funcs;
+
+ gem = &vbo->tbo.base;
+out_no_bo:
+ dma_resv_unlock(params.resv);
+ return gem;
+}
int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 5681a1b42..f27afc6e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -770,13 +770,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
s32 hotspot_x, hotspot_y;
- hotspot_x = du->hotspot_x;
- hotspot_y = du->hotspot_y;
-
- if (new_state->fb) {
- hotspot_x += new_state->fb->hot_x;
- hotspot_y += new_state->fb->hot_y;
- }
+ hotspot_x = du->hotspot_x + new_state->hotspot_x;
+ hotspot_y = du->hotspot_y + new_state->hotspot_y;
du->cursor_surface = vps->surf;
du->cursor_bo = vps->bo;
@@ -839,10 +834,21 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state = NULL;
struct drm_framebuffer *new_fb = new_state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
int ret;
+ /*
+ * Ignore damage clips if the framebuffer attached to the plane's state
+ * has changed since the last plane update (page-flip). In this case, a
+ * full plane update should happen because uploads are done per-buffer.
+ */
+ if (old_fb != new_fb)
+ new_state->ignore_damage_clips = true;
+
if (new_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_state->crtc);
@@ -926,6 +932,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -933,9 +940,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != new_state->enable)
- return -EINVAL;
+ /*
+ * This is fine in general, but broken userspace might expect
+ * some actual rendering so give a clue as why it's blank.
+ */
+ if (new_state->enable && !has_primary)
+ drm_dbg_driver(&vmw->drm,
+ "CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index db81e635d..9fda4f4ec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -243,10 +243,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
};
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 2d72a5ee7..c99cad444 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -75,8 +75,12 @@ int vmw_prime_fd_to_handle(struct drm_device *dev,
int fd, u32 *handle)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = ttm_prime_fd_to_handle(tfile, fd, handle);
- return ttm_prime_fd_to_handle(tfile, fd, handle);
+ if (ret)
+ ret = drm_gem_prime_fd_to_handle(dev, file_priv, fd, handle);
+
+ return ret;
}
int vmw_prime_handle_to_fd(struct drm_device *dev,
@@ -85,5 +89,12 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
int *prime_fd)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ int ret;
+
+ if (handle > VMWGFX_NUM_MOB)
+ ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index af8562c95..fcb87d837 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -220,13 +220,18 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
switch (dev_priv->map_mode) {
case vmw_dma_map_bind:
case vmw_dma_map_populate:
- vsgt->sgt = &vmw_tt->sgt;
- ret = sg_alloc_table_from_pages_segment(
- &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
- (unsigned long)vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
- if (ret)
- goto out_sg_alloc_fail;
+ if (vmw_tt->dma_ttm.page_flags & TTM_TT_FLAG_EXTERNAL) {
+ vsgt->sgt = vmw_tt->dma_ttm.sg;
+ } else {
+ vsgt->sgt = &vmw_tt->sgt;
+ ret = sg_alloc_table_from_pages_segment(&vmw_tt->sgt,
+ vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev),
+ GFP_KERNEL);
+ if (ret)
+ goto out_sg_alloc_fail;
+ }
ret = vmw_ttm_map_for_dma(vmw_tt);
if (unlikely(ret != 0))
@@ -241,8 +246,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
out_map_fail:
- sg_free_table(vmw_tt->vsgt.sgt);
- vmw_tt->vsgt.sgt = NULL;
+ drm_warn(&dev_priv->drm, "VSG table map failed!");
+ sg_free_table(vsgt->sgt);
+ vsgt->sgt = NULL;
out_sg_alloc_fail:
return ret;
}
@@ -388,15 +394,17 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- int ret;
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
- /* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (external && ttm->sg)
+ return drm_prime_sg_to_dma_addr_array(ttm->sg,
+ ttm->dma_address,
+ ttm->num_pages);
- return ret;
+ return ttm_pool_alloc(&bdev->pool, ttm, ctx);
}
static void vmw_ttm_unpopulate(struct ttm_device *bdev,
@@ -404,6 +412,10 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
+ bool external = (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (external)
+ return;
vmw_ttm_unbind(bdev, ttm);
@@ -422,6 +434,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct vmw_ttm_tt *vmw_be;
int ret;
+ bool external = bo->type == ttm_bo_type_sg;
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
@@ -430,7 +443,10 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+ if (external)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent || external)
ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
ttm_cached);
else
diff --git a/drivers/gpu/drm/xe/.gitignore b/drivers/gpu/drm/xe/.gitignore
new file mode 100644
index 000000000..8778bf132
--- /dev/null
+++ b/drivers/gpu/drm/xe/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+*.hdrtest
+/generated
+/xe_gen_wa_oob
diff --git a/drivers/gpu/drm/xe/.kunitconfig b/drivers/gpu/drm/xe/.kunitconfig
new file mode 100644
index 000000000..9590eac91
--- /dev/null
+++ b/drivers/gpu/drm/xe/.kunitconfig
@@ -0,0 +1,13 @@
+# xe dependencies
+CONFIG_KUNIT=y
+CONFIG_PCI=y
+CONFIG_PCI_IOV=y
+CONFIG_DEBUG_FS=y
+CONFIG_DRM=y
+CONFIG_DRM_FBDEV_EMULATION=y
+CONFIG_DRM_KMS_HELPER=y
+CONFIG_DRM_XE=y
+CONFIG_DRM_XE_DISPLAY=n
+CONFIG_EXPERT=y
+CONFIG_FB=y
+CONFIG_DRM_XE_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
new file mode 100644
index 000000000..e36ae1f0d
--- /dev/null
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_XE
+ tristate "Intel Xe Graphics"
+ depends on DRM && PCI && MMU && (m || (y && KUNIT=y)) && 64BIT
+ select INTERVAL_TREE
+ # we need shmfs for the swappable backing store, and in particular
+ # the shmem_readpage() which depends upon tmpfs
+ select SHMEM
+ select TMPFS
+ select DRM_BUDDY
+ select DRM_EXEC
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ select DRM_SUBALLOC_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HDCP_HELPER
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_MIPI_DSI
+ select RELAY
+ select IRQ_WORK
+ # xe depends on ACPI_VIDEO when ACPI is enabled
+ # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+ select BACKLIGHT_CLASS_DEVICE if ACPI
+ select INPUT if ACPI
+ select ACPI_VIDEO if X86 && ACPI
+ select ACPI_BUTTON if ACPI
+ select ACPI_WMI if X86 && ACPI
+ select SYNC_FILE
+ select IOSF_MBI
+ select CRC32
+ select SND_HDA_I915 if SND_HDA_CORE
+ select CEC_CORE if CEC_NOTIFIER
+ select VMAP_PFN
+ select DRM_TTM
+ select DRM_TTM_HELPER
+ select DRM_EXEC
+ select DRM_GPUVM
+ select DRM_SCHED
+ select MMU_NOTIFIER
+ select WANT_DEV_COREDUMP
+ select AUXILIARY_BUS
+ help
+ Experimental driver for Intel Xe series GPUs
+
+ If "M" is selected, the module will be called xe.
+
+config DRM_XE_DISPLAY
+ bool "Enable display support"
+ depends on DRM_XE && DRM_XE=m
+ select FB_IOMEM_HELPERS
+ select I2C
+ select I2C_ALGOBIT
+ default y
+ help
+ Disable this option only if you want to compile out display support.
+
+config DRM_XE_FORCE_PROBE
+ string "Force probe xe for selected Intel hardware IDs"
+ depends on DRM_XE
+ help
+ This is the default value for the xe.force_probe module
+ parameter. Using the module parameter overrides this option.
+
+ Force probe the xe for Intel graphics devices that are
+ recognized but not properly supported by this kernel version. It is
+ recommended to upgrade to a kernel version with proper support as soon
+ as it is available.
+
+ It can also be used to block the probe of recognized and fully
+ supported devices.
+
+ Use "" to disable force probe. If in doubt, use this.
+
+ Use "<pci-id>[,<pci-id>,...]" to force probe the xe for listed
+ devices. For example, "4500" or "4500,4571".
+
+ Use "*" to force probe the driver for all known devices.
+
+ Use "!" right before the ID to block the probe of the device. For
+ example, "4500,!4571" forces the probe of 4500 and blocks the probe of
+ 4571.
+
+ Use "!*" to block the probe of the driver for all known devices.
+
+menu "drm/Xe Debugging"
+depends on DRM_XE
+depends on EXPERT
+source "drivers/gpu/drm/xe/Kconfig.debug"
+endmenu
+
+menu "drm/xe Profile Guided Optimisation"
+ visible if EXPERT
+ depends on DRM_XE
+ source "drivers/gpu/drm/xe/Kconfig.profile"
+endmenu
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
new file mode 100644
index 000000000..549065f57
--- /dev/null
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_XE_WERROR
+ bool "Force GCC to throw an error instead of a warning when compiling"
+ # As this may inadvertently break the build, only allow the user
+ # to shoot oneself in the foot iff they aim really hard
+ depends on EXPERT
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Add -Werror to the build flags for (and only for) xe.ko.
+ Do not enable this unless you are writing code for the xe.ko module.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_XE_DEBUG
+ bool "Enable additional driver debugging"
+ depends on DRM_XE
+ depends on EXPERT
+ depends on !COMPILE_TEST
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_XE_DEBUG_VM
+ bool "Enable extra VM debugging info"
+ default n
+ help
+ Enable extra VM debugging info
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_XE_DEBUG_SRIOV
+ bool "Enable extra SR-IOV debugging"
+ default n
+ help
+ Enable extra SR-IOV debugging info.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_XE_DEBUG_MEM
+ bool "Enable passing SYS/VRAM addresses to user space"
+ default n
+ help
+ Pass object location trough uapi. Intended for extended
+ testing and development only.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_XE_SIMPLE_ERROR_CAPTURE
+ bool "Enable simple error capture to dmesg on job timeout"
+ default n
+ help
+ Choose this option when debugging an unexpected job timeout
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_XE_KUNIT_TEST
+ tristate "KUnit tests for the drm xe driver" if !KUNIT_ALL_TESTS
+ depends on DRM_XE && KUNIT && DEBUG_FS
+ default KUNIT_ALL_TESTS
+ select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
+ help
+ Choose this option to allow the driver to perform selftests under
+ the kunit framework
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_XE_LARGE_GUC_BUFFER
+ bool "Enable larger guc log buffer"
+ default n
+ help
+ Choose this option when debugging guc issues.
+ Buffer should be large enough for complex issues.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_XE_USERPTR_INVAL_INJECT
+ bool "Inject userptr invalidation -EINVAL errors"
+ default n
+ help
+ Choose this option when debugging error paths that
+ are hit during checks for userptr invalidations.
+
+ Recomended for driver developers only.
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/xe/Kconfig.profile b/drivers/gpu/drm/xe/Kconfig.profile
new file mode 100644
index 000000000..ba17a25e8
--- /dev/null
+++ b/drivers/gpu/drm/xe/Kconfig.profile
@@ -0,0 +1,54 @@
+config DRM_XE_JOB_TIMEOUT_MAX
+ int "Default max job timeout (ms)"
+ default 10000 # milliseconds
+ help
+ Configures the default max job timeout after which job will
+ be forcefully taken away from scheduler.
+config DRM_XE_JOB_TIMEOUT_MIN
+ int "Default min job timeout (ms)"
+ default 1 # milliseconds
+ help
+ Configures the default min job timeout after which job will
+ be forcefully taken away from scheduler.
+config DRM_XE_TIMESLICE_MAX
+ int "Default max timeslice duration (us)"
+ default 10000000 # microseconds
+ help
+ Configures the default max timeslice duration between multiple
+ contexts by guc scheduling.
+config DRM_XE_TIMESLICE_MIN
+ int "Default min timeslice duration (us)"
+ default 1 # microseconds
+ help
+ Configures the default min timeslice duration between multiple
+ contexts by guc scheduling.
+config DRM_XE_PREEMPT_TIMEOUT
+ int "Preempt timeout (us, jiffy granularity)"
+ default 640000 # microseconds
+ help
+ How long to wait (in microseconds) for a preemption event to occur
+ when submitting a new context. If the current context does not hit
+ an arbitration point and yield to HW before the timer expires, the
+ HW will be reset to allow the more important context to execute.
+config DRM_XE_PREEMPT_TIMEOUT_MAX
+ int "Default max preempt timeout (us)"
+ default 10000000 # microseconds
+ help
+ Configures the default max preempt timeout after which context
+ will be forcefully taken away and higher priority context will
+ run.
+config DRM_XE_PREEMPT_TIMEOUT_MIN
+ int "Default min preempt timeout (us)"
+ default 1 # microseconds
+ help
+ Configures the default min preempt timeout after which context
+ will be forcefully taken away and higher priority context will
+ run.
+config DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT
+ bool "Default configuration of limitation on scheduler timeout"
+ default y
+ help
+ Configures the enablement of limitation on scheduler timeout
+ to apply to applicable user. For elevated user, all above MIN
+ and MAX values will apply when this configuration is enable to
+ apply limitation. By default limitation is applied.
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
new file mode 100644
index 000000000..1c9e6906b
--- /dev/null
+++ b/drivers/gpu/drm/xe/Makefile
@@ -0,0 +1,305 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+# Unconditionally enable W=1 warnings locally
+# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn
+subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
+subdir-ccflags-y += -Wmissing-declarations
+subdir-ccflags-y += $(call cc-option, -Wrestrict)
+subdir-ccflags-y += -Wmissing-format-attribute
+subdir-ccflags-y += -Wmissing-prototypes
+subdir-ccflags-y += -Wold-style-definition
+subdir-ccflags-y += -Wmissing-include-dirs
+subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
+subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
+subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
+subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
+subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
+subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
+# The following turn off the warnings enabled by -Wextra
+ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
+subdir-ccflags-y += -Wno-missing-field-initializers
+subdir-ccflags-y += -Wno-type-limits
+subdir-ccflags-y += -Wno-shift-negative-value
+endif
+ifeq ($(findstring 3, $(KBUILD_EXTRA_WARN)),)
+subdir-ccflags-y += -Wno-sign-compare
+endif
+# --- end copy-paste
+
+# Enable -Werror in CI and development
+subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror
+
+subdir-ccflags-y += -I$(obj) -I$(srctree)/$(src)
+
+# generated sources
+hostprogs := xe_gen_wa_oob
+
+generated_oob := $(obj)/generated/xe_wa_oob.c $(obj)/generated/xe_wa_oob.h
+
+quiet_cmd_wa_oob = GEN $(notdir $(generated_oob))
+ cmd_wa_oob = mkdir -p $(@D); $^ $(generated_oob)
+
+$(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
+ $(srctree)/$(src)/xe_wa_oob.rules
+ $(call cmd,wa_oob)
+
+uses_generated_oob := \
+ $(obj)/xe_gsc.o \
+ $(obj)/xe_guc.o \
+ $(obj)/xe_migrate.o \
+ $(obj)/xe_ring_ops.o \
+ $(obj)/xe_vm.o \
+ $(obj)/xe_wa.o \
+ $(obj)/xe_ttm_stolen_mgr.o
+
+$(uses_generated_oob): $(generated_oob)
+
+# Please keep these build lists sorted!
+
+# core driver code
+
+xe-y += xe_bb.o \
+ xe_bo.o \
+ xe_bo_evict.o \
+ xe_debugfs.o \
+ xe_devcoredump.o \
+ xe_device.o \
+ xe_device_sysfs.o \
+ xe_dma_buf.o \
+ xe_drm_client.o \
+ xe_exec.o \
+ xe_execlist.o \
+ xe_exec_queue.o \
+ xe_force_wake.o \
+ xe_ggtt.o \
+ xe_gpu_scheduler.o \
+ xe_gsc.o \
+ xe_gsc_submit.o \
+ xe_gt.o \
+ xe_gt_ccs_mode.o \
+ xe_gt_clock.o \
+ xe_gt_debugfs.o \
+ xe_gt_freq.o \
+ xe_gt_idle.o \
+ xe_gt_mcr.o \
+ xe_gt_pagefault.o \
+ xe_gt_sysfs.o \
+ xe_gt_throttle_sysfs.o \
+ xe_gt_tlb_invalidation.o \
+ xe_gt_topology.o \
+ xe_guc.o \
+ xe_guc_ads.o \
+ xe_guc_ct.o \
+ xe_guc_debugfs.o \
+ xe_guc_hwconfig.o \
+ xe_guc_log.o \
+ xe_guc_pc.o \
+ xe_guc_submit.o \
+ xe_heci_gsc.o \
+ xe_hw_engine.o \
+ xe_hw_engine_class_sysfs.o \
+ xe_hw_fence.o \
+ xe_huc.o \
+ xe_huc_debugfs.o \
+ xe_irq.o \
+ xe_lrc.o \
+ xe_migrate.o \
+ xe_mmio.o \
+ xe_mocs.o \
+ xe_module.o \
+ xe_pat.o \
+ xe_pci.o \
+ xe_pcode.o \
+ xe_pm.o \
+ xe_preempt_fence.o \
+ xe_pt.o \
+ xe_pt_walk.o \
+ xe_query.o \
+ xe_range_fence.o \
+ xe_reg_sr.o \
+ xe_reg_whitelist.o \
+ xe_rtp.o \
+ xe_ring_ops.o \
+ xe_sa.o \
+ xe_sched_job.o \
+ xe_step.o \
+ xe_sync.o \
+ xe_tile.o \
+ xe_tile_sysfs.o \
+ xe_trace.o \
+ xe_ttm_sys_mgr.o \
+ xe_ttm_stolen_mgr.o \
+ xe_ttm_vram_mgr.o \
+ xe_tuning.o \
+ xe_uc.o \
+ xe_uc_debugfs.o \
+ xe_uc_fw.o \
+ xe_vm.o \
+ xe_wait_user_fence.o \
+ xe_wa.o \
+ xe_wopcm.o
+
+# graphics hardware monitoring (HWMON) support
+xe-$(CONFIG_HWMON) += xe_hwmon.o
+
+# graphics virtualization (SR-IOV) support
+xe-y += xe_sriov.o
+
+xe-$(CONFIG_PCI_IOV) += \
+ xe_lmtt.o \
+ xe_lmtt_2l.o \
+ xe_lmtt_ml.o
+
+# i915 Display compat #defines and #includes
+subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
+ -I$(srctree)/$(src)/display/ext \
+ -I$(srctree)/$(src)/compat-i915-headers \
+ -I$(srctree)/drivers/gpu/drm/xe/display/ \
+ -I$(srctree)/drivers/gpu/drm/i915/display/ \
+ -Ddrm_i915_gem_object=xe_bo \
+ -Ddrm_i915_private=xe_device
+
+CFLAGS_i915-display/intel_fbdev.o = -Wno-override-init
+CFLAGS_i915-display/intel_display_device.o = -Wno-override-init
+
+# Rule to build SOC code shared with i915
+$(obj)/i915-soc/%.o: $(srctree)/drivers/gpu/drm/i915/soc/%.c FORCE
+ $(call cmd,force_checksrc)
+ $(call if_changed_rule,cc_o_c)
+
+# Rule to build display code shared with i915
+$(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
+ $(call cmd,force_checksrc)
+ $(call if_changed_rule,cc_o_c)
+
+# Display code specific to xe
+xe-$(CONFIG_DRM_XE_DISPLAY) += \
+ xe_display.o \
+ display/xe_fb_pin.o \
+ display/xe_hdcp_gsc.o \
+ display/xe_plane_initial.o \
+ display/xe_display_rps.o \
+ display/xe_display_misc.o \
+ display/xe_dsb_buffer.o \
+ display/intel_fbdev_fb.o \
+ display/intel_fb_bo.o \
+ display/ext/i915_irq.o \
+ display/ext/i915_utils.o
+
+# SOC code shared with i915
+xe-$(CONFIG_DRM_XE_DISPLAY) += \
+ i915-soc/intel_dram.o \
+ i915-soc/intel_pch.o
+
+# Display code shared with i915
+xe-$(CONFIG_DRM_XE_DISPLAY) += \
+ i915-display/icl_dsi.o \
+ i915-display/intel_atomic.o \
+ i915-display/intel_atomic_plane.o \
+ i915-display/intel_audio.o \
+ i915-display/intel_backlight.o \
+ i915-display/intel_bios.o \
+ i915-display/intel_bw.o \
+ i915-display/intel_cdclk.o \
+ i915-display/intel_color.o \
+ i915-display/intel_combo_phy.o \
+ i915-display/intel_connector.o \
+ i915-display/intel_crtc.o \
+ i915-display/intel_crtc_state_dump.o \
+ i915-display/intel_cursor.o \
+ i915-display/intel_cx0_phy.o \
+ i915-display/intel_ddi.o \
+ i915-display/intel_ddi_buf_trans.o \
+ i915-display/intel_display.o \
+ i915-display/intel_display_debugfs.o \
+ i915-display/intel_display_debugfs_params.o \
+ i915-display/intel_display_device.o \
+ i915-display/intel_display_driver.o \
+ i915-display/intel_display_irq.o \
+ i915-display/intel_display_params.o \
+ i915-display/intel_display_power.o \
+ i915-display/intel_display_power_map.o \
+ i915-display/intel_display_power_well.o \
+ i915-display/intel_display_trace.o \
+ i915-display/intel_display_wa.o \
+ i915-display/intel_dkl_phy.o \
+ i915-display/intel_dmc.o \
+ i915-display/intel_dp.o \
+ i915-display/intel_dp_aux.o \
+ i915-display/intel_dp_aux_backlight.o \
+ i915-display/intel_dp_hdcp.o \
+ i915-display/intel_dp_link_training.o \
+ i915-display/intel_dp_mst.o \
+ i915-display/intel_dpll.o \
+ i915-display/intel_dpll_mgr.o \
+ i915-display/intel_dpt_common.o \
+ i915-display/intel_drrs.o \
+ i915-display/intel_dsb.o \
+ i915-display/intel_dsi.o \
+ i915-display/intel_dsi_dcs_backlight.o \
+ i915-display/intel_dsi_vbt.o \
+ i915-display/intel_fb.o \
+ i915-display/intel_fbc.o \
+ i915-display/intel_fdi.o \
+ i915-display/intel_fifo_underrun.o \
+ i915-display/intel_frontbuffer.o \
+ i915-display/intel_global_state.o \
+ i915-display/intel_gmbus.o \
+ i915-display/intel_hdcp.o \
+ i915-display/intel_hdmi.o \
+ i915-display/intel_hotplug.o \
+ i915-display/intel_hotplug_irq.o \
+ i915-display/intel_hti.o \
+ i915-display/intel_link_bw.o \
+ i915-display/intel_lspcon.o \
+ i915-display/intel_modeset_lock.o \
+ i915-display/intel_modeset_setup.o \
+ i915-display/intel_modeset_verify.o \
+ i915-display/intel_panel.o \
+ i915-display/intel_pipe_crc.o \
+ i915-display/intel_pmdemand.o \
+ i915-display/intel_pps.o \
+ i915-display/intel_psr.o \
+ i915-display/intel_qp_tables.o \
+ i915-display/intel_quirks.o \
+ i915-display/intel_snps_phy.o \
+ i915-display/intel_tc.o \
+ i915-display/intel_vblank.o \
+ i915-display/intel_vdsc.o \
+ i915-display/intel_vga.o \
+ i915-display/intel_vrr.o \
+ i915-display/intel_wm.o \
+ i915-display/skl_scaler.o \
+ i915-display/skl_universal_plane.o \
+ i915-display/skl_watermark.o
+
+ifeq ($(CONFIG_ACPI),y)
+ xe-$(CONFIG_DRM_XE_DISPLAY) += \
+ i915-display/intel_acpi.o \
+ i915-display/intel_opregion.o
+endif
+
+ifeq ($(CONFIG_DRM_FBDEV_EMULATION),y)
+ xe-$(CONFIG_DRM_XE_DISPLAY) += i915-display/intel_fbdev.o
+endif
+
+obj-$(CONFIG_DRM_XE) += xe.o
+obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/
+
+# header test
+hdrtest_find_args := -not -path xe_rtp_helpers.h
+ifneq ($(CONFIG_DRM_XE_DISPLAY),y)
+ hdrtest_find_args += -not -path display/\* -not -path compat-i915-headers/\* -not -path xe_display.h
+endif
+
+always-$(CONFIG_DRM_XE_WERROR) += \
+ $(patsubst %.h,%.hdrtest, $(shell cd $(srctree)/$(src) && find * -name '*.h' $(hdrtest_find_args)))
+
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = $(CC) -DHDRTEST $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/xe/abi/gsc_command_header_abi.h b/drivers/gpu/drm/xe/abi/gsc_command_header_abi.h
new file mode 100644
index 000000000..a4c264680
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/gsc_command_header_abi.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GSC_COMMAND_HEADER_ABI_H
+#define _ABI_GSC_COMMAND_HEADER_ABI_H
+
+#include <linux/types.h>
+
+struct intel_gsc_mtl_header {
+ u32 validity_marker;
+#define GSC_HECI_VALIDITY_MARKER 0xA578875A
+
+ u8 heci_client_id;
+
+ u8 reserved1;
+
+ u16 header_version;
+#define MTL_GSC_HEADER_VERSION 1
+
+ /* FW allows host to decide host_session handle as it sees fit. */
+ u64 host_session_handle;
+
+ /* handle generated by FW for messages that need to be re-submitted */
+ u64 gsc_message_handle;
+
+ u32 message_size; /* lower 20 bits only, upper 12 are reserved */
+
+ /*
+ * Flags mask:
+ * Bit 0: Pending
+ * Bit 1: Session Cleanup;
+ * Bits 2-15: Flags
+ * Bits 16-31: Extension Size
+ * According to internal spec flags are either input or output
+ * we distinguish the flags using OUTFLAG or INFLAG
+ */
+ u32 flags;
+#define GSC_OUTFLAG_MSG_PENDING BIT(0)
+#define GSC_INFLAG_MSG_CLEANUP BIT(1)
+
+ u32 status;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/gsc_mkhi_commands_abi.h b/drivers/gpu/drm/xe/abi/gsc_mkhi_commands_abi.h
new file mode 100644
index 000000000..ad4d04187
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/gsc_mkhi_commands_abi.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GSC_MKHI_COMMANDS_ABI_H
+#define _ABI_GSC_MKHI_COMMANDS_ABI_H
+
+#include <linux/types.h>
+
+/* Heci client ID for MKHI commands */
+#define HECI_MEADDRESS_MKHI 7
+
+/* Generic MKHI header */
+struct gsc_mkhi_header {
+ u8 group_id;
+ u8 command;
+ u8 reserved;
+ u8 result;
+} __packed;
+
+/* GFX_SRV commands */
+#define MKHI_GROUP_ID_GFX_SRV 0x30
+
+#define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42)
+
+struct gsc_get_compatibility_version_in {
+ struct gsc_mkhi_header header;
+} __packed;
+
+struct gsc_get_compatibility_version_out {
+ struct gsc_mkhi_header header;
+ u16 proj_major;
+ u16 compat_major;
+ u16 compat_minor;
+ u16 reserved[5];
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h b/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h
new file mode 100644
index 000000000..57520809e
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _ABI_GSC_PXP_COMMANDS_ABI_H
+#define _ABI_GSC_PXP_COMMANDS_ABI_H
+
+#include <linux/types.h>
+
+/* Heci client ID for PXP commands */
+#define HECI_MEADDRESS_PXP 17
+
+#define PXP_APIVER(x, y) (((x) & 0xFFFF) << 16 | ((y) & 0xFFFF))
+
+/*
+ * there are a lot of status codes for PXP, but we only define the cross-API
+ * common ones that we actually can handle in the kernel driver. Other failure
+ * codes should be printed to error msg for debug.
+ */
+enum pxp_status {
+ PXP_STATUS_SUCCESS = 0x0,
+ PXP_STATUS_ERROR_API_VERSION = 0x1002,
+ PXP_STATUS_NOT_READY = 0x100e,
+ PXP_STATUS_PLATFCONFIG_KF1_NOVERIF = 0x101a,
+ PXP_STATUS_PLATFCONFIG_KF1_BAD = 0x101f,
+ PXP_STATUS_OP_NOT_PERMITTED = 0x4013
+};
+
+/* Common PXP FW message header */
+struct pxp_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ union {
+ u32 status; /* out */
+ u32 stream_id; /* in */
+#define PXP_CMDHDR_EXTDATA_SESSION_VALID GENMASK(0, 0)
+#define PXP_CMDHDR_EXTDATA_APP_TYPE GENMASK(1, 1)
+#define PXP_CMDHDR_EXTDATA_SESSION_ID GENMASK(17, 2)
+ };
+ /* Length of the message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+#define PXP43_CMDID_NEW_HUC_AUTH 0x0000003F /* MTL+ */
+
+/* PXP-Input-Packet: HUC Auth-only */
+struct pxp43_new_huc_auth_in {
+ struct pxp_cmd_header header;
+ u64 huc_base_address;
+ u32 huc_size;
+} __packed;
+
+/* PXP-Output-Packet: HUC Load and Authentication or Auth-only */
+struct pxp43_huc_auth_out {
+ struct pxp_cmd_header header;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
new file mode 100644
index 000000000..79ba98a16
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_ACTIONS_ABI_H
+#define _ABI_GUC_ACTIONS_ABI_H
+
+/**
+ * DOC: HOST2GUC_SELF_CFG
+ *
+ * This message is used by Host KMD to setup of the `GuC Self Config KLVs`_.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_SELF_CFG` = 0x0508 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:16 | **KLV_KEY** - KLV key, see `GuC Self Config KLVs`_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **KLV_LEN** - KLV length |
+ * | | | |
+ * | | | - 32 bit KLV = 1 |
+ * | | | - 64 bit KLV = 2 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **VALUE32** - Bits 31-0 of the KLV value |
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | **VALUE64** - Bits 63-32 of the KLV value (**KLV_LEN** = 2) |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = **NUM** - 1 if KLV was parsed, 0 if not recognized |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_HOST2GUC_SELF_CFG 0x0508
+
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffu << 16)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffffu << 0)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
+
+#define HOST2GUC_SELF_CFG_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_SELF_CFG_RESPONSE_MSG_0_NUM GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
+ * DOC: HOST2GUC_CONTROL_CTB
+ *
+ * This H2G action allows Vf Host to enable or disable H2G and G2H `CT Buffer`_.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_CONTROL_CTB` = 0x4509 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **CONTROL** - control `CTB based communication`_ |
+ * | | | |
+ * | | | - _`GUC_CTB_CONTROL_DISABLE` = 0 |
+ * | | | - _`GUC_CTB_CONTROL_ENABLE` = 1 |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_HOST2GUC_CONTROL_CTB 0x4509
+
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL GUC_HXG_REQUEST_MSG_n_DATAn
+#define GUC_CTB_CONTROL_DISABLE 0u
+#define GUC_CTB_CONTROL_ENABLE 1u
+
+#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_CONTROL_CTB_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/* legacy definitions */
+
+enum xe_guc_action {
+ XE_GUC_ACTION_DEFAULT = 0x0,
+ XE_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
+ XE_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
+ XE_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+ XE_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ XE_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ XE_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
+ XE_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
+ XE_GUC_ACTION_ENTER_S_STATE = 0x501,
+ XE_GUC_ACTION_EXIT_S_STATE = 0x502,
+ XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+ XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
+ XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
+ XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
+ XE_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
+ XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+ XE_GUC_ACTION_SET_CONTEXT_PRIORITY = 0x1005,
+ XE_GUC_ACTION_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+ XE_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
+ XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
+ XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
+ XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
+ XE_GUC_ACTION_SETUP_PC_GUCRC = 0x3004,
+ XE_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+ XE_GUC_ACTION_GET_HWCONFIG = 0x4100,
+ XE_GUC_ACTION_REGISTER_CONTEXT = 0x4502,
+ XE_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
+ XE_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
+ XE_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
+ XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
+ XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
+ XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
+ XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
+ XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
+ XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
+ XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
+ XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY = 0x6004,
+ XE_GUC_ACTION_TLB_INVALIDATION = 0x7000,
+ XE_GUC_ACTION_TLB_INVALIDATION_DONE = 0x7001,
+ XE_GUC_ACTION_TLB_INVALIDATION_ALL = 0x7002,
+ XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION = 0x8002,
+ XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
+ XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
+ XE_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005,
+ XE_GUC_ACTION_LIMIT
+};
+
+enum xe_guc_rc_options {
+ XE_GUCRC_HOST_CONTROL,
+ XE_GUCRC_FIRMWARE_CONTROL,
+};
+
+enum xe_guc_preempt_options {
+ XE_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
+ XE_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
+};
+
+enum xe_guc_report_status {
+ XE_GUC_REPORT_STATUS_UNKNOWN = 0x0,
+ XE_GUC_REPORT_STATUS_ACKED = 0x1,
+ XE_GUC_REPORT_STATUS_ERROR = 0x2,
+ XE_GUC_REPORT_STATUS_COMPLETE = 0x4,
+};
+
+enum xe_guc_sleep_state_status {
+ XE_GUC_SLEEP_STATE_SUCCESS = 0x1,
+ XE_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
+ XE_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
+#define XE_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
+};
+
+#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
+#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
+#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
+#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
+
+#define XE_GUC_TLB_INVAL_TYPE_SHIFT 0
+#define XE_GUC_TLB_INVAL_MODE_SHIFT 8
+/* Flush PPC or SMRO caches along with TLB invalidation request */
+#define XE_GUC_TLB_INVAL_FLUSH_CACHE (1 << 31)
+
+enum xe_guc_tlb_invalidation_type {
+ XE_GUC_TLB_INVAL_FULL = 0x0,
+ XE_GUC_TLB_INVAL_PAGE_SELECTIVE = 0x1,
+ XE_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX = 0x2,
+ XE_GUC_TLB_INVAL_GUC = 0x3,
+};
+
+/*
+ * 0: Heavy mode of Invalidation:
+ * The pipeline of the engine(s) for which the invalidation is targeted to is
+ * blocked, and all the in-flight transactions are guaranteed to be Globally
+ * Observed before completing the TLB invalidation
+ * 1: Lite mode of Invalidation:
+ * TLBs of the targeted engine(s) are immediately invalidated.
+ * In-flight transactions are NOT guaranteed to be Globally Observed before
+ * completing TLB invalidation.
+ * Light Invalidation Mode is to be used only when
+ * it can be guaranteed (by SW) that the address translations remain invariant
+ * for the in-flight transactions across the TLB invalidation. In other words,
+ * this mode can be used when the TLB invalidation is intended to clear out the
+ * stale cached translations that are no longer in use. Light Invalidation Mode
+ * is much faster than the Heavy Invalidation Mode, as it does not wait for the
+ * in-flight transactions to be GOd.
+ */
+enum xe_guc_tlb_inval_mode {
+ XE_GUC_TLB_INVAL_MODE_HEAVY = 0x0,
+ XE_GUC_TLB_INVAL_MODE_LITE = 0x1,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
new file mode 100644
index 000000000..c165e26c0
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _GUC_ACTIONS_SLPC_ABI_H_
+#define _GUC_ACTIONS_SLPC_ABI_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: SLPC SHARED DATA STRUCTURE
+ *
+ * +----+------+--------------------------------------------------------------+
+ * | CL | Bytes| Description |
+ * +====+======+==============================================================+
+ * | 1 | 0-3 | SHARED DATA SIZE |
+ * | +------+--------------------------------------------------------------+
+ * | | 4-7 | GLOBAL STATE |
+ * | +------+--------------------------------------------------------------+
+ * | | 8-11 | DISPLAY DATA ADDRESS |
+ * | +------+--------------------------------------------------------------+
+ * | | 12:63| PADDING |
+ * +----+------+--------------------------------------------------------------+
+ * | | 0:63 | PADDING(PLATFORM INFO) |
+ * +----+------+--------------------------------------------------------------+
+ * | 3 | 0-3 | TASK STATE DATA |
+ * + +------+--------------------------------------------------------------+
+ * | | 4:63 | PADDING |
+ * +----+------+--------------------------------------------------------------+
+ * |4-21|0:1087| OVERRIDE PARAMS AND BIT FIELDS |
+ * +----+------+--------------------------------------------------------------+
+ * | | | PADDING + EXTRA RESERVED PAGE |
+ * +----+------+--------------------------------------------------------------+
+ */
+
+/*
+ * SLPC exposes certain parameters for global configuration by the host.
+ * These are referred to as override parameters, because in most cases
+ * the host will not need to modify the default values used by SLPC.
+ * SLPC remembers the default values which allows the host to easily restore
+ * them by simply unsetting the override. The host can set or unset override
+ * parameters during SLPC (re-)initialization using the SLPC Reset event.
+ * The host can also set or unset override parameters on the fly using the
+ * Parameter Set and Parameter Unset events
+ */
+
+#define SLPC_MAX_OVERRIDE_PARAMETERS 256
+#define SLPC_OVERRIDE_BITFIELD_SIZE \
+ (SLPC_MAX_OVERRIDE_PARAMETERS / 32)
+
+#define SLPC_PAGE_SIZE_BYTES 4096
+#define SLPC_CACHELINE_SIZE_BYTES 64
+#define SLPC_SHARED_DATA_SIZE_BYTE_HEADER SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_MODE_DEFN_TABLE_SIZE SLPC_PAGE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_MAX (2 * SLPC_PAGE_SIZE_BYTES)
+
+/*
+ * Cacheline size aligned (Total size needed for
+ * SLPM_KMD_MAX_OVERRIDE_PARAMETERS=256 is 1088 bytes)
+ */
+#define SLPC_OVERRIDE_PARAMS_TOTAL_BYTES (((((SLPC_MAX_OVERRIDE_PARAMETERS * 4) \
+ + ((SLPC_MAX_OVERRIDE_PARAMETERS / 32) * 4)) \
+ + (SLPC_CACHELINE_SIZE_BYTES - 1)) / SLPC_CACHELINE_SIZE_BYTES) * \
+ SLPC_CACHELINE_SIZE_BYTES)
+
+#define SLPC_SHARED_DATA_SIZE_BYTE_OTHER (SLPC_SHARED_DATA_SIZE_BYTE_MAX - \
+ (SLPC_SHARED_DATA_SIZE_BYTE_HEADER \
+ + SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO \
+ + SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE \
+ + SLPC_OVERRIDE_PARAMS_TOTAL_BYTES \
+ + SLPC_SHARED_DATA_MODE_DEFN_TABLE_SIZE))
+
+enum slpc_task_enable {
+ SLPC_PARAM_TASK_DEFAULT = 0,
+ SLPC_PARAM_TASK_ENABLED,
+ SLPC_PARAM_TASK_DISABLED,
+ SLPC_PARAM_TASK_UNKNOWN
+};
+
+enum slpc_global_state {
+ SLPC_GLOBAL_STATE_NOT_RUNNING = 0,
+ SLPC_GLOBAL_STATE_INITIALIZING = 1,
+ SLPC_GLOBAL_STATE_RESETTING = 2,
+ SLPC_GLOBAL_STATE_RUNNING = 3,
+ SLPC_GLOBAL_STATE_SHUTTING_DOWN = 4,
+ SLPC_GLOBAL_STATE_ERROR = 5
+};
+
+enum slpc_param_id {
+ SLPC_PARAM_TASK_ENABLE_GTPERF = 0,
+ SLPC_PARAM_TASK_DISABLE_GTPERF = 1,
+ SLPC_PARAM_TASK_ENABLE_BALANCER = 2,
+ SLPC_PARAM_TASK_DISABLE_BALANCER = 3,
+ SLPC_PARAM_TASK_ENABLE_DCC = 4,
+ SLPC_PARAM_TASK_DISABLE_DCC = 5,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ = 6,
+ SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ = 7,
+ SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ = 8,
+ SLPC_PARAM_GLOBAL_MAX_GT_SLICE_FREQ_MHZ = 9,
+ SLPC_PARAM_GTPERF_THRESHOLD_MAX_FPS = 10,
+ SLPC_PARAM_GLOBAL_DISABLE_GT_FREQ_MANAGEMENT = 11,
+ SLPC_PARAM_GTPERF_ENABLE_FRAMERATE_STALLING = 12,
+ SLPC_PARAM_GLOBAL_DISABLE_RC6_MODE_CHANGE = 13,
+ SLPC_PARAM_GLOBAL_OC_UNSLICE_FREQ_MHZ = 14,
+ SLPC_PARAM_GLOBAL_OC_SLICE_FREQ_MHZ = 15,
+ SLPC_PARAM_GLOBAL_ENABLE_IA_GT_BALANCING = 16,
+ SLPC_PARAM_GLOBAL_ENABLE_ADAPTIVE_BURST_TURBO = 17,
+ SLPC_PARAM_GLOBAL_ENABLE_EVAL_MODE = 18,
+ SLPC_PARAM_GLOBAL_ENABLE_BALANCER_IN_NON_GAMING_MODE = 19,
+ SLPC_PARAM_GLOBAL_RT_MODE_TURBO_FREQ_DELTA_MHZ = 20,
+ SLPC_PARAM_PWRGATE_RC_MODE = 21,
+ SLPC_PARAM_EDR_MODE_COMPUTE_TIMEOUT_MS = 22,
+ SLPC_PARAM_EDR_QOS_FREQ_MHZ = 23,
+ SLPC_PARAM_MEDIA_FF_RATIO_MODE = 24,
+ SLPC_PARAM_ENABLE_IA_FREQ_LIMITING = 25,
+ SLPC_PARAM_STRATEGIES = 26,
+ SLPC_PARAM_POWER_PROFILE = 27,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY = 28,
+ SLPC_MAX_PARAM = 32,
+};
+
+enum slpc_media_ratio_mode {
+ SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL = 0,
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE = 1,
+ SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO = 2,
+};
+
+enum slpc_gucrc_mode {
+ SLPC_GUCRC_MODE_HW = 0,
+ SLPC_GUCRC_MODE_GUCRC_NO_RC6 = 1,
+ SLPC_GUCRC_MODE_GUCRC_STATIC_TIMEOUT = 2,
+ SLPC_GUCRC_MODE_GUCRC_DYNAMIC_HYSTERESIS = 3,
+
+ SLPC_GUCRC_MODE_MAX,
+};
+
+enum slpc_event_id {
+ SLPC_EVENT_RESET = 0,
+ SLPC_EVENT_SHUTDOWN = 1,
+ SLPC_EVENT_PLATFORM_INFO_CHANGE = 2,
+ SLPC_EVENT_DISPLAY_MODE_CHANGE = 3,
+ SLPC_EVENT_FLIP_COMPLETE = 4,
+ SLPC_EVENT_QUERY_TASK_STATE = 5,
+ SLPC_EVENT_PARAMETER_SET = 6,
+ SLPC_EVENT_PARAMETER_UNSET = 7,
+};
+
+struct slpc_task_state_data {
+ union {
+ u32 task_status_padding;
+ struct {
+ u32 status;
+#define SLPC_GTPERF_TASK_ENABLED REG_BIT(0)
+#define SLPC_DCC_TASK_ENABLED REG_BIT(11)
+#define SLPC_IN_DCC REG_BIT(12)
+#define SLPC_BALANCER_ENABLED REG_BIT(15)
+#define SLPC_IBC_TASK_ENABLED REG_BIT(16)
+#define SLPC_BALANCER_IA_LMT_ENABLED REG_BIT(17)
+#define SLPC_BALANCER_IA_LMT_ACTIVE REG_BIT(18)
+ };
+ };
+ union {
+ u32 freq_padding;
+ struct {
+#define SLPC_MAX_UNSLICE_FREQ_MASK REG_GENMASK(7, 0)
+#define SLPC_MIN_UNSLICE_FREQ_MASK REG_GENMASK(15, 8)
+#define SLPC_MAX_SLICE_FREQ_MASK REG_GENMASK(23, 16)
+#define SLPC_MIN_SLICE_FREQ_MASK REG_GENMASK(31, 24)
+ u32 freq;
+ };
+ };
+} __packed;
+
+struct slpc_shared_data_header {
+ /* Total size in bytes of this shared buffer. */
+ u32 size;
+ u32 global_state;
+ u32 display_data_addr;
+} __packed;
+
+struct slpc_override_params {
+ u32 bits[SLPC_OVERRIDE_BITFIELD_SIZE];
+ u32 values[SLPC_MAX_OVERRIDE_PARAMETERS];
+} __packed;
+
+struct slpc_shared_data {
+ struct slpc_shared_data_header header;
+ u8 shared_data_header_pad[SLPC_SHARED_DATA_SIZE_BYTE_HEADER -
+ sizeof(struct slpc_shared_data_header)];
+
+ u8 platform_info_pad[SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO];
+
+ struct slpc_task_state_data task_state_data;
+ u8 task_state_data_pad[SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE -
+ sizeof(struct slpc_task_state_data)];
+
+ struct slpc_override_params override_params;
+ u8 override_params_pad[SLPC_OVERRIDE_PARAMS_TOTAL_BYTES -
+ sizeof(struct slpc_override_params)];
+
+ u8 shared_data_pad[SLPC_SHARED_DATA_SIZE_BYTE_OTHER];
+
+ /* PAGE 2 (4096 bytes), mode based parameter will be removed soon */
+ u8 reserved_mode_definition[4096];
+} __packed;
+
+/**
+ * DOC: SLPC H2G MESSAGE FORMAT
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_PC_SLPM_REQUEST` = 0x3003 |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:8 | **EVENT_ID** |
+ * + +-------+--------------------------------------------------------------+
+ * | | 7:0 | **EVENT_ARGC** - number of data arguments |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **EVENT_DATA1** |
+ * +---+-------+--------------------------------------------------------------+
+ * |...| 31:0 | ... |
+ * +---+-------+--------------------------------------------------------------+
+ * |2+n| 31:0 | **EVENT_DATAn** |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST 0x3003
+
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_MIN_LEN \
+ (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS 9
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_MAX_LEN \
+ (HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
+ HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xffu << 8)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xffu << 0)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
new file mode 100644
index 000000000..0b1146d0c
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_COMMUNICATION_CTB_ABI_H
+#define _ABI_GUC_COMMUNICATION_CTB_ABI_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+#include "guc_messages_abi.h"
+
+/**
+ * DOC: CT Buffer
+ *
+ * Circular buffer used to send `CTB Message`_
+ */
+
+/**
+ * DOC: CTB Descriptor
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:0 | **HEAD** - offset (in dwords) to the last dword that was |
+ * | | | read from the `CT Buffer`_. |
+ * | | | It can only be updated by the receiver. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **TAIL** - offset (in dwords) to the last dword that was |
+ * | | | written to the `CT Buffer`_. |
+ * | | | It can only be updated by the sender. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **STATUS** - status of the CTB |
+ * | | | |
+ * | | | - _`GUC_CTB_STATUS_NO_ERROR` = 0 (normal operation) |
+ * | | | - _`GUC_CTB_STATUS_OVERFLOW` = 1 (head/tail too large) |
+ * | | | - _`GUC_CTB_STATUS_UNDERFLOW` = 2 (truncated message) |
+ * | | | - _`GUC_CTB_STATUS_MISMATCH` = 4 (head/tail modified) |
+ * +---+-------+--------------------------------------------------------------+
+ * |...| | RESERVED = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ * | 15| 31:0 | RESERVED = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+struct guc_ct_buffer_desc {
+ u32 head;
+ u32 tail;
+ u32 status;
+#define GUC_CTB_STATUS_NO_ERROR 0
+#define GUC_CTB_STATUS_OVERFLOW (1 << 0)
+#define GUC_CTB_STATUS_UNDERFLOW (1 << 1)
+#define GUC_CTB_STATUS_MISMATCH (1 << 2)
+ u32 reserved[13];
+} __packed;
+static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
+
+/**
+ * DOC: CTB Message
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:16 | **FENCE** - message identifier |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:12 | **FORMAT** - format of the CTB message |
+ * | | | - _`GUC_CTB_FORMAT_HXG` = 0 - see `CTB HXG Message`_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 11:8 | **RESERVED** |
+ * | +-------+--------------------------------------------------------------+
+ * | | 7:0 | **NUM_DWORDS** - length of the CTB message (w/o header) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | optional (depends on FORMAT) |
+ * +---+-------+ |
+ * |...| | |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_CTB_HDR_LEN 1u
+#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
+#define GUC_CTB_MSG_MAX_LEN 256u
+#define GUC_CTB_MSG_0_FENCE (0xffffu << 16)
+#define GUC_CTB_MSG_0_FORMAT (0xfu << 12)
+#define GUC_CTB_FORMAT_HXG 0u
+#define GUC_CTB_MSG_0_RESERVED (0xfu << 8)
+#define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0)
+
+/**
+ * DOC: CTB HXG Message
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:16 | FENCE |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:12 | FORMAT = GUC_CTB_FORMAT_HXG_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 11:8 | RESERVED = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | [Embedded `HXG Message`_] |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_CTB_HXG_MSG_MIN_LEN (GUC_CTB_MSG_MIN_LEN + GUC_HXG_MSG_MIN_LEN)
+#define GUC_CTB_HXG_MSG_MAX_LEN GUC_CTB_MSG_MAX_LEN
+
+/**
+ * DOC: CTB based communication
+ *
+ * The CTB (command transport buffer) communication between Host and GuC
+ * is based on u32 data stream written to the shared buffer. One buffer can
+ * be used to transmit data only in one direction (one-directional channel).
+ *
+ * Current status of the each buffer is maintained in the `CTB Descriptor`_.
+ * Each message in data stream is encoded as `CTB HXG Message`_.
+ */
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_communication_mmio_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_mmio_abi.h
new file mode 100644
index 000000000..ef538e34f
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_communication_mmio_abi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_COMMUNICATION_MMIO_ABI_H
+#define _ABI_GUC_COMMUNICATION_MMIO_ABI_H
+
+/**
+ * DOC: GuC MMIO based communication
+ *
+ * The MMIO based communication between Host and GuC relies on special
+ * hardware registers which format could be defined by the software
+ * (so called scratch registers).
+ *
+ * Each MMIO based message, both Host to GuC (H2G) and GuC to Host (G2H)
+ * messages, which maximum length depends on number of available scratch
+ * registers, is directly written into those scratch registers.
+ *
+ * For Gen9+, there are 16 software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 4 parameters and the GuC firmware
+ * itself uses an 4-element array to store the H2G message.
+ *
+ * For Gen11+, there are additional 4 registers 0x190240-0x19024C, which
+ * are, regardless on lower count, preferred over legacy ones.
+ *
+ * The MMIO based communication is mainly used during driver initialization
+ * phase to setup the `CTB based communication`_ that will be used afterwards.
+ */
+
+#define GUC_MAX_MMIO_MSG_LEN 4
+
+/**
+ * DOC: MMIO HXG Message
+ *
+ * Format of the MMIO messages follows definitions of `HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:0 | |
+ * +---+-------+ |
+ * |...| | [Embedded `HXG Message`_] |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
new file mode 100644
index 000000000..ec83551bf
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_ERRORS_ABI_H
+#define _ABI_GUC_ERRORS_ABI_H
+
+enum xe_guc_response_status {
+ XE_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+ XE_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
+};
+
+enum xe_guc_load_status {
+ XE_GUC_LOAD_STATUS_DEFAULT = 0x00,
+ XE_GUC_LOAD_STATUS_START = 0x01,
+ XE_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH = 0x02,
+ XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH = 0x03,
+ XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE = 0x04,
+ XE_GUC_LOAD_STATUS_GDT_DONE = 0x10,
+ XE_GUC_LOAD_STATUS_IDT_DONE = 0x20,
+ XE_GUC_LOAD_STATUS_LAPIC_DONE = 0x30,
+ XE_GUC_LOAD_STATUS_GUCINT_DONE = 0x40,
+ XE_GUC_LOAD_STATUS_DPC_READY = 0x50,
+ XE_GUC_LOAD_STATUS_DPC_ERROR = 0x60,
+ XE_GUC_LOAD_STATUS_EXCEPTION = 0x70,
+ XE_GUC_LOAD_STATUS_INIT_DATA_INVALID = 0x71,
+ XE_GUC_LOAD_STATUS_PXP_TEARDOWN_CTRL_ENABLED = 0x72,
+ XE_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
+ XE_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
+ XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ XE_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
+
+ XE_GUC_LOAD_STATUS_READY = 0xF0,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
new file mode 100644
index 000000000..0400bc0fc
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_KLVS_ABI_H
+#define _ABI_GUC_KLVS_ABI_H
+
+#include <linux/types.h>
+
+/**
+ * DOC: GuC KLV
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31:16 | **KEY** - KLV key identifier |
+ * | | | - `GuC Self Config KLVs`_ |
+ * | | | - `GuC VGT Policy KLVs`_ |
+ * | | | - `GuC VF Configuration KLVs`_ |
+ * | | | |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **LEN** - length of VALUE (in 32bit dwords) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VALUE** - actual value of the KLV (format depends on KEY) |
+ * +---+-------+ |
+ * |...| | |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_KLV_LEN_MIN 1u
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
+
+/**
+ * DOC: GuC Self Config KLVs
+ *
+ * `GuC KLV`_ keys available for use with HOST2GUC_SELF_CFG_.
+ *
+ * _`GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR` : 0x0900
+ * Refers to 64 bit Global Gfx address (in bytes) of memory based interrupts
+ * status vector for use by the GuC.
+ *
+ * _`GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR` : 0x0901
+ * Refers to 64 bit Global Gfx address (in bytes) of memory based interrupts
+ * source vector for use by the GuC.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_ADDR` : 0x0902
+ * Refers to 64 bit Global Gfx address of H2G `CT Buffer`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR : 0x0903
+ * Refers to 64 bit Global Gfx address of H2G `CTB Descriptor`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _`GUC_KLV_SELF_CFG_H2G_CTB_SIZE : 0x0904
+ * Refers to size of H2G `CT Buffer`_ in bytes.
+ * Should be a multiple of 4K.
+ *
+ * _`GUC_KLV_SELF_CFG_G2H_CTB_ADDR : 0x0905
+ * Refers to 64 bit Global Gfx address of G2H `CT Buffer`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR : 0x0906
+ * Refers to 64 bit Global Gfx address of G2H `CTB Descriptor`_.
+ * Should be above WOPCM address but below APIC base address for native mode.
+ *
+ * _GUC_KLV_SELF_CFG_G2H_CTB_SIZE : 0x0907
+ * Refers to size of G2H `CT Buffer`_ in bytes.
+ * Should be a multiple of 4K.
+ */
+
+#define GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY 0x0900
+#define GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY 0x0901
+#define GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY 0x0902
+#define GUC_KLV_SELF_CFG_H2G_CTB_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY 0x0903
+#define GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY 0x0904
+#define GUC_KLV_SELF_CFG_H2G_CTB_SIZE_LEN 1u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY 0x0905
+#define GUC_KLV_SELF_CFG_G2H_CTB_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY 0x0906
+#define GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_LEN 2u
+
+#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907
+#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u
+
+/*
+ * Per context scheduling policy update keys.
+ */
+enum {
+ GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM = 0x2001,
+ GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT = 0x2002,
+ GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 0x2003,
+ GUC_CONTEXT_POLICIES_KLV_ID_PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY = 0x2004,
+ GUC_CONTEXT_POLICIES_KLV_ID_SLPM_GT_FREQUENCY = 0x2005,
+
+ GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
+};
+
+/**
+ * DOC: GuC VGT Policy KLVs
+ *
+ * `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
+ *
+ * _`GUC_KLV_VGT_POLICY_SCHED_IF_IDLE` : 0x8001
+ * This config sets whether strict scheduling is enabled whereby any VF
+ * that doesn’t have work to submit is still allocated a fixed execution
+ * time-slice to ensure active VFs execution is always consitent even
+ * during other VF reprovisiong / rebooting events. Changing this KLV
+ * impacts all VFs and takes effect on the next VF-Switch event.
+ *
+ * :0: don't schedule idle (default)
+ * :1: schedule if idle
+ *
+ * _`GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD` : 0x8002
+ * This config sets the sample period for tracking adverse event counters.
+ * A sample period is the period in millisecs during which events are counted.
+ * This is applicable for all the VFs.
+ *
+ * :0: adverse events are not counted (default)
+ * :n: sample period in milliseconds
+ *
+ * _`GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH` : 0x8D00
+ * This enum is to reset utilized HW engine after VF Switch (i.e to clean
+ * up Stale HW register left behind by previous VF)
+ *
+ * :0: don't reset (default)
+ * :1: reset
+ */
+
+#define GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY 0x8001
+#define GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_LEN 1u
+
+#define GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY 0x8002
+#define GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_LEN 1u
+
+#define GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY 0x8D00
+#define GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_LEN 1u
+
+/**
+ * DOC: GuC VF Configuration KLVs
+ *
+ * `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VF_CFG.
+ *
+ * _`GUC_KLV_VF_CFG_GGTT_START` : 0x0001
+ * A 4K aligned start GTT address/offset assigned to VF.
+ * Value is 64 bits.
+ *
+ * _`GUC_KLV_VF_CFG_GGTT_SIZE` : 0x0002
+ * A 4K aligned size of GGTT assigned to VF.
+ * Value is 64 bits.
+ *
+ * _`GUC_KLV_VF_CFG_LMEM_SIZE` : 0x0003
+ * A 2M aligned size of local memory assigned to VF.
+ * Value is 64 bits.
+ *
+ * _`GUC_KLV_VF_CFG_NUM_CONTEXTS` : 0x0004
+ * Refers to the number of contexts allocated to this VF.
+ *
+ * :0: no contexts (default)
+ * :1-65535: number of contexts (Gen12)
+ *
+ * _`GUC_KLV_VF_CFG_TILE_MASK` : 0x0005
+ * For multi-tiled products, this field contains the bitwise-OR of tiles
+ * assigned to the VF. Bit-0-set means VF has access to Tile-0,
+ * Bit-31-set means VF has access to Tile-31, and etc.
+ * At least one tile will always be allocated.
+ * If all bits are zero, VF KMD should treat this as a fatal error.
+ * For, single-tile products this KLV config is ignored.
+ *
+ * _`GUC_KLV_VF_CFG_NUM_DOORBELLS` : 0x0006
+ * Refers to the number of doorbells allocated to this VF.
+ *
+ * :0: no doorbells (default)
+ * :1-255: number of doorbells (Gen12)
+ *
+ * _`GUC_KLV_VF_CFG_EXEC_QUANTUM` : 0x8A01
+ * This config sets the VFs-execution-quantum in milliseconds.
+ * GUC will attempt to obey the maximum values as much as HW is capable
+ * of and this will never be perfectly-exact (accumulated nano-second
+ * granularity) since the GPUs clock time runs off a different crystal
+ * from the CPUs clock. Changing this KLV on a VF that is currently
+ * running a context wont take effect until a new context is scheduled in.
+ * That said, when the PF is changing this value from 0xFFFFFFFF to
+ * something else, it might never take effect if the VF is running an
+ * inifinitely long compute or shader kernel. In such a scenario, the
+ * PF would need to trigger a VM PAUSE and then change the KLV to force
+ * it to take effect. Such cases might typically happen on a 1PF+1VF
+ * Virtualization config enabled for heavier workloads like AI/ML.
+ *
+ * :0: infinite exec quantum (default)
+ *
+ * _`GUC_KLV_VF_CFG_PREEMPT_TIMEOUT` : 0x8A02
+ * This config sets the VF-preemption-timeout in microseconds.
+ * GUC will attempt to obey the minimum and maximum values as much as
+ * HW is capable and this will never be perfectly-exact (accumulated
+ * nano-second granularity) since the GPUs clock time runs off a
+ * different crystal from the CPUs clock. Changing this KLV on a VF
+ * that is currently running a context wont take effect until a new
+ * context is scheduled in.
+ * That said, when the PF is changing this value from 0xFFFFFFFF to
+ * something else, it might never take effect if the VF is running an
+ * inifinitely long compute or shader kernel.
+ * In this case, the PF would need to trigger a VM PAUSE and then change
+ * the KLV to force it to take effect. Such cases might typically happen
+ * on a 1PF+1VF Virtualization config enabled for heavier workloads like
+ * AI/ML.
+ *
+ * :0: no preemption timeout (default)
+ *
+ * _`GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR` : 0x8A03
+ * This config sets threshold for CAT errors caused by the VF.
+ *
+ * :0: adverse events or error will not be reported (default)
+ * :n: event occurrence count per sampling interval
+ *
+ * _`GUC_KLV_VF_CFG_THRESHOLD_ENGINE_RESET` : 0x8A04
+ * This config sets threshold for engine reset caused by the VF.
+ *
+ * :0: adverse events or error will not be reported (default)
+ * :n: event occurrence count per sampling interval
+ *
+ * _`GUC_KLV_VF_CFG_THRESHOLD_PAGE_FAULT` : 0x8A05
+ * This config sets threshold for page fault errors caused by the VF.
+ *
+ * :0: adverse events or error will not be reported (default)
+ * :n: event occurrence count per sampling interval
+ *
+ * _`GUC_KLV_VF_CFG_THRESHOLD_H2G_STORM` : 0x8A06
+ * This config sets threshold for H2G interrupts triggered by the VF.
+ *
+ * :0: adverse events or error will not be reported (default)
+ * :n: time (us) per sampling interval
+ *
+ * _`GUC_KLV_VF_CFG_THRESHOLD_IRQ_STORM` : 0x8A07
+ * This config sets threshold for GT interrupts triggered by the VF's
+ * workloads.
+ *
+ * :0: adverse events or error will not be reported (default)
+ * :n: time (us) per sampling interval
+ *
+ * _`GUC_KLV_VF_CFG_THRESHOLD_DOORBELL_STORM` : 0x8A08
+ * This config sets threshold for doorbell's ring triggered by the VF.
+ *
+ * :0: adverse events or error will not be reported (default)
+ * :n: time (us) per sampling interval
+ *
+ * _`GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID` : 0x8A0A
+ * Refers to the start index of doorbell assigned to this VF.
+ *
+ * :0: (default)
+ * :1-255: number of doorbells (Gen12)
+ *
+ * _`GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID` : 0x8A0B
+ * Refers to the start index in context array allocated to this VF’s use.
+ *
+ * :0: (default)
+ * :1-65535: number of contexts (Gen12)
+ */
+
+#define GUC_KLV_VF_CFG_GGTT_START_KEY 0x0001
+#define GUC_KLV_VF_CFG_GGTT_START_LEN 2u
+
+#define GUC_KLV_VF_CFG_GGTT_SIZE_KEY 0x0002
+#define GUC_KLV_VF_CFG_GGTT_SIZE_LEN 2u
+
+#define GUC_KLV_VF_CFG_LMEM_SIZE_KEY 0x0003
+#define GUC_KLV_VF_CFG_LMEM_SIZE_LEN 2u
+
+#define GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY 0x0004
+#define GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN 1u
+
+#define GUC_KLV_VF_CFG_TILE_MASK_KEY 0x0005
+#define GUC_KLV_VF_CFG_TILE_MASK_LEN 1u
+
+#define GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY 0x0006
+#define GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN 1u
+
+#define GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY 0x8a01
+#define GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN 1u
+
+#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY 0x8a02
+#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN 1u
+
+#define GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR_KEY 0x8a03
+#define GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR_LEN 1u
+
+#define GUC_KLV_VF_CFG_THRESHOLD_ENGINE_RESET_KEY 0x8a04
+#define GUC_KLV_VF_CFG_THRESHOLD_ENGINE_RESET_LEN 1u
+
+#define GUC_KLV_VF_CFG_THRESHOLD_PAGE_FAULT_KEY 0x8a05
+#define GUC_KLV_VF_CFG_THRESHOLD_PAGE_FAULT_LEN 1u
+
+#define GUC_KLV_VF_CFG_THRESHOLD_H2G_STORM_KEY 0x8a06
+#define GUC_KLV_VF_CFG_THRESHOLD_H2G_STORM_LEN 1u
+
+#define GUC_KLV_VF_CFG_THRESHOLD_IRQ_STORM_KEY 0x8a07
+#define GUC_KLV_VF_CFG_THRESHOLD_IRQ_STORM_LEN 1u
+
+#define GUC_KLV_VF_CFG_THRESHOLD_DOORBELL_STORM_KEY 0x8a08
+#define GUC_KLV_VF_CFG_THRESHOLD_DOORBELL_STORM_LEN 1u
+
+#define GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID_KEY 0x8a0a
+#define GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID_LEN 1u
+
+#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY 0x8a0b
+#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_LEN 1u
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_messages_abi.h b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
new file mode 100644
index 000000000..29e414c82
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2021 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_MESSAGES_ABI_H
+#define _ABI_GUC_MESSAGES_ABI_H
+
+/**
+ * DOC: HXG Message
+ *
+ * All messages exchanged with GuC are defined using 32 bit dwords.
+ * First dword is treated as a message header. Remaining dwords are optional.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | | | |
+ * | 0 | 31 | **ORIGIN** - originator of the message |
+ * | | | - _`GUC_HXG_ORIGIN_HOST` = 0 |
+ * | | | - _`GUC_HXG_ORIGIN_GUC` = 1 |
+ * | | | |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | **TYPE** - message type |
+ * | | | - _`GUC_HXG_TYPE_REQUEST` = 0 |
+ * | | | - _`GUC_HXG_TYPE_EVENT` = 1 |
+ * | | | - _`GUC_HXG_TYPE_NO_RESPONSE_BUSY` = 3 |
+ * | | | - _`GUC_HXG_TYPE_NO_RESPONSE_RETRY` = 5 |
+ * | | | - _`GUC_HXG_TYPE_RESPONSE_FAILURE` = 6 |
+ * | | | - _`GUC_HXG_TYPE_RESPONSE_SUCCESS` = 7 |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **AUX** - auxiliary data (depends on TYPE) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | **PAYLOAD** - optional payload (depends on TYPE) |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_MSG_MIN_LEN 1u
+#define GUC_HXG_MSG_0_ORIGIN (0x1u << 31)
+#define GUC_HXG_ORIGIN_HOST 0u
+#define GUC_HXG_ORIGIN_GUC 1u
+#define GUC_HXG_MSG_0_TYPE (0x7u << 28)
+#define GUC_HXG_TYPE_REQUEST 0u
+#define GUC_HXG_TYPE_EVENT 1u
+#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
+#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
+#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
+#define GUC_HXG_TYPE_RESPONSE_SUCCESS 7u
+#define GUC_HXG_MSG_0_AUX (0xfffffffu << 0)
+#define GUC_HXG_MSG_n_PAYLOAD (0xffffffffu << 0)
+
+/**
+ * DOC: HXG Request
+ *
+ * The `HXG Request`_ message should be used to initiate synchronous activity
+ * for which confirmation or return data is expected.
+ *
+ * The recipient of this message shall use `HXG Response`_, `HXG Failure`_
+ * or `HXG Retry`_ message as a definite reply, and may use `HXG Busy`_
+ * message as a intermediate reply.
+ *
+ * Format of @DATA0 and all @DATAn fields depends on the @ACTION code.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | **DATA0** - request data (depends on ACTION) |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **ACTION** - requested action code |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | **DATAn** - optional data (depends on ACTION) |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfffu << 16)
+#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffffu << 0)
+#define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
+
+/**
+ * DOC: HXG Event
+ *
+ * The `HXG Event`_ message should be used to initiate asynchronous activity
+ * that does not involves immediate confirmation nor data.
+ *
+ * Format of @DATA0 and all @DATAn fields depends on the @ACTION code.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | **DATA0** - event data (depends on ACTION) |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **ACTION** - event action code |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | **DATAn** - optional event data (depends on ACTION) |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_EVENT_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfffu << 16)
+#define GUC_HXG_EVENT_MSG_0_ACTION (0xffffu << 0)
+#define GUC_HXG_EVENT_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
+
+/**
+ * DOC: HXG Busy
+ *
+ * The `HXG Busy`_ message may be used to acknowledge reception of the `HXG Request`_
+ * message if the recipient expects that it processing will be longer than default
+ * timeout.
+ *
+ * The @COUNTER field may be used as a progress indicator.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_NO_RESPONSE_BUSY_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **COUNTER** - progress indicator |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_BUSY_MSG_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_BUSY_MSG_0_COUNTER GUC_HXG_MSG_0_AUX
+
+/**
+ * DOC: HXG Retry
+ *
+ * The `HXG Retry`_ message should be used by recipient to indicate that the
+ * `HXG Request`_ message was dropped and it should be resent again.
+ *
+ * The @REASON field may be used to provide additional information.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_NO_RESPONSE_RETRY_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **REASON** - reason for retry |
+ * | | | - _`GUC_HXG_RETRY_REASON_UNSPECIFIED` = 0 |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_RETRY_MSG_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_RETRY_MSG_0_REASON GUC_HXG_MSG_0_AUX
+#define GUC_HXG_RETRY_REASON_UNSPECIFIED 0u
+
+/**
+ * DOC: HXG Failure
+ *
+ * The `HXG Failure`_ message shall be used as a reply to the `HXG Request`_
+ * message that could not be processed due to an error.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_FAILURE_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | **HINT** - additional error hint |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | **ERROR** - error/result code |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_FAILURE_MSG_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_FAILURE_MSG_0_HINT (0xfffu << 16)
+#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffffu << 0)
+
+/**
+ * DOC: HXG Response
+ *
+ * The `HXG Response`_ message shall be used as a reply to the `HXG Request`_
+ * message that was successfully processed without an error.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | **DATA0** - data (depends on ACTION from `HXG Request`_) |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | **DATAn** - data (depends on ACTION from `HXG Request`_) |
+ * +---+-------+ |
+ * | n | 31:0 | |
+ * +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_RESPONSE_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_RESPONSE_MSG_0_DATA0 GUC_HXG_MSG_0_AUX
+#define GUC_HXG_RESPONSE_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
+
+/* deprecated */
+#define INTEL_GUC_MSG_TYPE_SHIFT 28
+#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
+#define INTEL_GUC_MSG_DATA_SHIFT 16
+#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT)
+#define INTEL_GUC_MSG_CODE_SHIFT 0
+#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT)
+
+enum intel_guc_msg_type {
+ INTEL_GUC_MSG_TYPE_REQUEST = 0x0,
+ INTEL_GUC_MSG_TYPE_RESPONSE = 0xF,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h
new file mode 100644
index 000000000..710cecca9
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h
@@ -0,0 +1 @@
+/* Empty */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h
new file mode 100644
index 000000000..650ea2803
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _I915_GEM_MMAN_H_
+#define _I915_GEM_MMAN_H_
+
+#include "xe_bo_types.h"
+#include <drm/drm_prime.h>
+
+static inline int i915_gem_fb_mmap(struct xe_bo *bo, struct vm_area_struct *vma)
+{
+ return drm_gem_prime_mmap(&bo->ttm.base, vma);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
new file mode 100644
index 000000000..777c20cea
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _I915_GEM_OBJECT_H_
+#define _I915_GEM_OBJECT_H_
+
+#include <linux/types.h>
+
+#include "xe_bo.h"
+
+#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */
+
+static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n)
+{
+ /* Should never be called */
+ WARN_ON(1);
+ return n;
+}
+
+static inline bool i915_gem_object_is_tiled(const struct xe_bo *bo)
+{
+ /* legacy tiling is unused */
+ return false;
+}
+
+static inline bool i915_gem_object_is_userptr(const struct xe_bo *bo)
+{
+ /* legacy tiling is unused */
+ return false;
+}
+
+static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
+ u32 ofs, u64 *ptr, u32 size)
+{
+ struct ttm_bo_kmap_obj map;
+ void *src;
+ bool is_iomem;
+ int ret;
+
+ ret = xe_bo_lock(bo, true);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_kmap(&bo->ttm, ofs >> PAGE_SHIFT, 1, &map);
+ if (ret)
+ goto out_unlock;
+
+ ofs &= ~PAGE_MASK;
+ src = ttm_kmap_obj_virtual(&map, &is_iomem);
+ src += ofs;
+ if (is_iomem)
+ memcpy_fromio(ptr, (void __iomem *)src, size);
+ else
+ memcpy(ptr, src, size);
+
+ ttm_bo_kunmap(&map);
+out_unlock:
+ xe_bo_unlock(bo);
+ return ret;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h
new file mode 100644
index 000000000..2a3f12d29
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _I915_GEM_OBJECT_FRONTBUFFER_H_
+#define _I915_GEM_OBJECT_FRONTBUFFER_H_
+
+#define i915_gem_object_get_frontbuffer(obj) NULL
+#define i915_gem_object_set_frontbuffer(obj, front) (front)
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h b/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
new file mode 100644
index 000000000..21fec9cc8
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_RPS_H__
+#define __INTEL_RPS_H__
+
+#define gen5_rps_irq_handler(x) ({})
+
+#endif /* __INTEL_RPS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_active.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_active.h
new file mode 100644
index 000000000..6f0ab3753
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_active.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _I915_ACTIVE_H_
+#define _I915_ACTIVE_H_
+
+#include "i915_active_types.h"
+
+static inline void i915_active_init(struct i915_active *ref,
+ int (*active)(struct i915_active *ref),
+ void (*retire)(struct i915_active *ref),
+ unsigned long flags)
+{
+ (void) active;
+ (void) retire;
+}
+
+#define i915_active_fini(active) do { } while (0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_active_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_active_types.h
new file mode 100644
index 000000000..8c31f9a8b
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_active_types.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_ACTIVE_TYPES_H_
+#define _I915_ACTIVE_TYPES_H_
+
+struct i915_active {};
+#define I915_ACTIVE_RETIRE_SLEEPS 0
+
+#endif /* _I915_ACTIVE_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_config.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_config.h
new file mode 100644
index 000000000..e835bea08
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_config.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I915_CONFIG_H__
+#define __I915_CONFIG_H__
+
+#include <linux/sched.h>
+
+struct drm_i915_private;
+
+static inline unsigned long
+i915_fence_timeout(const struct drm_i915_private *i915)
+{
+ return MAX_SCHEDULE_TIMEOUT;
+}
+
+#endif /* __I915_CONFIG_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h
new file mode 100644
index 000000000..b4c47617b
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I915_DEBUGFS_H__
+#define __I915_DEBUGFS_H__
+
+struct drm_i915_gem_object;
+struct seq_file;
+
+static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
+
+#endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
new file mode 100644
index 000000000..5d2a77b52
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef _XE_I915_DRV_H_
+#define _XE_I915_DRV_H_
+
+/*
+ * "Adaptation header" to allow i915 display to also build for xe driver.
+ * TODO: refactor i915 and xe so this can cease to exist
+ */
+
+#include <drm/drm_drv.h>
+
+#include "gem/i915_gem_object.h"
+
+#include "soc/intel_pch.h"
+#include "xe_device.h"
+#include "xe_bo.h"
+#include "xe_pm.h"
+#include "xe_step.h"
+#include "i915_gem.h"
+#include "i915_gem_stolen.h"
+#include "i915_gpu_error.h"
+#include "i915_reg_defs.h"
+#include "i915_utils.h"
+#include "intel_gt_types.h"
+#include "intel_step.h"
+#include "intel_uc_fw.h"
+#include "intel_uncore.h"
+#include "intel_runtime_pm.h"
+#include <linux/pm_runtime.h>
+
+static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
+{
+ return container_of(dev, struct drm_i915_private, drm);
+}
+
+static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
+{
+ return dev_get_drvdata(kdev);
+}
+
+
+#define INTEL_JASPERLAKE 0
+#define INTEL_ELKHARTLAKE 0
+#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
+#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
+#define INTEL_DEVID(dev_priv) ((dev_priv)->info.devid)
+#define IS_I830(dev_priv) (dev_priv && 0)
+#define IS_I845G(dev_priv) (dev_priv && 0)
+#define IS_I85X(dev_priv) (dev_priv && 0)
+#define IS_I865G(dev_priv) (dev_priv && 0)
+#define IS_I915G(dev_priv) (dev_priv && 0)
+#define IS_I915GM(dev_priv) (dev_priv && 0)
+#define IS_I945G(dev_priv) (dev_priv && 0)
+#define IS_I945GM(dev_priv) (dev_priv && 0)
+#define IS_I965G(dev_priv) (dev_priv && 0)
+#define IS_I965GM(dev_priv) (dev_priv && 0)
+#define IS_G45(dev_priv) (dev_priv && 0)
+#define IS_GM45(dev_priv) (dev_priv && 0)
+#define IS_G4X(dev_priv) (dev_priv && 0)
+#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
+#define IS_G33(dev_priv) (dev_priv && 0)
+#define IS_IRONLAKE(dev_priv) (dev_priv && 0)
+#define IS_IRONLAKE_M(dev_priv) (dev_priv && 0)
+#define IS_SANDYBRIDGE(dev_priv) (dev_priv && 0)
+#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
+#define IS_IVB_GT1(dev_priv) (dev_priv && 0)
+#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
+#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
+#define IS_HASWELL(dev_priv) (dev_priv && 0)
+#define IS_BROADWELL(dev_priv) (dev_priv && 0)
+#define IS_SKYLAKE(dev_priv) (dev_priv && 0)
+#define IS_BROXTON(dev_priv) (dev_priv && 0)
+#define IS_KABYLAKE(dev_priv) (dev_priv && 0)
+#define IS_GEMINILAKE(dev_priv) (dev_priv && 0)
+#define IS_COFFEELAKE(dev_priv) (dev_priv && 0)
+#define IS_COMETLAKE(dev_priv) (dev_priv && 0)
+#define IS_ICELAKE(dev_priv) (dev_priv && 0)
+#define IS_JASPERLAKE(dev_priv) (dev_priv && 0)
+#define IS_ELKHARTLAKE(dev_priv) (dev_priv && 0)
+#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_TIGERLAKE)
+#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
+#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
+#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
+#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P)
+#define IS_XEHPSDV(dev_priv) (dev_priv && 0)
+#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
+#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC)
+#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
+#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
+
+#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
+#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
+#define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0)
+
+#define IP_VER(ver, rel) ((ver) << 8 | (rel))
+
+#define INTEL_DISPLAY_ENABLED(xe) (HAS_DISPLAY((xe)) && !intel_opregion_headless_sku((xe)))
+
+#define IS_GRAPHICS_VER(xe, first, last) \
+ ((xe)->info.graphics_verx100 >= first * 100 && \
+ (xe)->info.graphics_verx100 <= (last*100 + 99))
+#define IS_MOBILE(xe) (xe && 0)
+#define HAS_LLC(xe) (!IS_DGFX((xe)))
+
+#define HAS_GMD_ID(xe) GRAPHICS_VERx100(xe) >= 1270
+
+/* Workarounds not handled yet */
+#define IS_DISPLAY_STEP(xe, first, last) ({u8 __step = (xe)->info.step.display; first <= __step && __step <= last; })
+#define IS_GRAPHICS_STEP(xe, first, last) ({u8 __step = (xe)->info.step.graphics; first <= __step && __step <= last; })
+
+#define IS_LP(xe) (0)
+#define IS_GEN9_LP(xe) (0)
+#define IS_GEN9_BC(xe) (0)
+
+#define IS_TIGERLAKE_UY(xe) (xe && 0)
+#define IS_COMETLAKE_ULX(xe) (xe && 0)
+#define IS_COFFEELAKE_ULX(xe) (xe && 0)
+#define IS_KABYLAKE_ULX(xe) (xe && 0)
+#define IS_SKYLAKE_ULX(xe) (xe && 0)
+#define IS_HASWELL_ULX(xe) (xe && 0)
+#define IS_COMETLAKE_ULT(xe) (xe && 0)
+#define IS_COFFEELAKE_ULT(xe) (xe && 0)
+#define IS_KABYLAKE_ULT(xe) (xe && 0)
+#define IS_SKYLAKE_ULT(xe) (xe && 0)
+
+#define IS_DG1_GRAPHICS_STEP(xe, first, last) (IS_DG1(xe) && IS_GRAPHICS_STEP(xe, first, last))
+#define IS_DG2_GRAPHICS_STEP(xe, variant, first, last) \
+ ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_ ## variant && \
+ IS_GRAPHICS_STEP(xe, first, last))
+#define IS_XEHPSDV_GRAPHICS_STEP(xe, first, last) (IS_XEHPSDV(xe) && IS_GRAPHICS_STEP(xe, first, last))
+
+/* XXX: No basedie stepping support yet */
+#define IS_PVC_BD_STEP(xe, first, last) (!WARN_ON(1) && IS_PONTEVECCHIO(xe))
+
+#define IS_TIGERLAKE_DISPLAY_STEP(xe, first, last) (IS_TIGERLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
+#define IS_ROCKETLAKE_DISPLAY_STEP(xe, first, last) (IS_ROCKETLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
+#define IS_DG1_DISPLAY_STEP(xe, first, last) (IS_DG1(xe) && IS_DISPLAY_STEP(xe, first, last))
+#define IS_DG2_DISPLAY_STEP(xe, first, last) (IS_DG2(xe) && IS_DISPLAY_STEP(xe, first, last))
+#define IS_ADLP_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_P(xe) && IS_DISPLAY_STEP(xe, first, last))
+#define IS_ADLS_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_S(xe) && IS_DISPLAY_STEP(xe, first, last))
+#define IS_JSL_EHL_DISPLAY_STEP(xe, first, last) (IS_JSL_EHL(xe) && IS_DISPLAY_STEP(xe, first, last))
+#define IS_MTL_DISPLAY_STEP(xe, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
+
+/* FIXME: Add subplatform here */
+#define IS_MTL_GRAPHICS_STEP(xe, sub, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last))
+
+#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
+#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
+#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
+#define IS_RAPTORLAKE_U(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU)
+#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
+#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
+#define to_intel_bo(x) gem_to_xe_bo((x))
+#define mkwrite_device_info(xe) (INTEL_INFO(xe))
+
+#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
+
+#define intel_has_gpu_reset(a) (a && 0)
+
+#include "intel_wakeref.h"
+
+static inline bool intel_runtime_pm_get(struct xe_runtime_pm *pm)
+{
+ struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
+
+ if (xe_pm_runtime_get(xe) < 0) {
+ xe_pm_runtime_put(xe);
+ return false;
+ }
+ return true;
+}
+
+static inline bool intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
+{
+ struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
+
+ return xe_pm_runtime_get_if_active(xe);
+}
+
+static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
+{
+ struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
+
+ xe_pm_runtime_put(xe);
+}
+
+static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, bool wakeref)
+{
+ if (wakeref)
+ intel_runtime_pm_put_unchecked(pm);
+}
+
+#define intel_runtime_pm_get_raw intel_runtime_pm_get
+#define intel_runtime_pm_put_raw intel_runtime_pm_put
+#define assert_rpm_wakelock_held(x) do { } while (0)
+#define assert_rpm_raw_wakeref_held(x) do { } while (0)
+
+#define intel_uncore_forcewake_get(x, y) do { } while (0)
+#define intel_uncore_forcewake_put(x, y) do { } while (0)
+
+#define intel_uncore_arm_unclaimed_mmio_detection(x) do { } while (0)
+
+#define I915_PRIORITY_DISPLAY 0
+struct i915_sched_attr {
+ int priority;
+};
+#define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0)
+
+#define with_intel_runtime_pm(rpm, wf) \
+ for ((wf) = intel_runtime_pm_get(rpm); (wf); \
+ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+
+#define pdev_to_i915 pdev_to_xe_device
+#define RUNTIME_INFO(xe) (&(xe)->info.i915_runtime)
+
+#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
+#define HPD_STORM_DEFAULT_THRESHOLD 50
+
+#ifdef CONFIG_ARM64
+/*
+ * arm64 indirectly includes linux/rtc.h,
+ * which defines a irq_lock, so include it
+ * here before #define-ing it
+ */
+#include <linux/rtc.h>
+#endif
+
+#define irq_lock irq.lock
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h
new file mode 100644
index 000000000..12c671fd5
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/i915_fixed.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h
new file mode 100644
index 000000000..06b723a47
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I915_GEM_H__
+#define __I915_GEM_H__
+#define GEM_BUG_ON
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
new file mode 100644
index 000000000..888e7a87a
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h
@@ -0,0 +1,79 @@
+#ifndef _I915_GEM_STOLEN_H_
+#define _I915_GEM_STOLEN_H_
+
+#include "xe_ttm_stolen_mgr.h"
+#include "xe_res_cursor.h"
+
+struct xe_bo;
+
+struct i915_stolen_fb {
+ struct xe_bo *bo;
+};
+
+static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
+ struct i915_stolen_fb *fb,
+ u32 size, u32 align,
+ u32 start, u32 end)
+{
+ struct xe_bo *bo;
+ int err;
+ u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT;
+
+ bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
+ NULL, size, start, end,
+ ttm_bo_type_kernel, flags);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ bo = NULL;
+ return err;
+ }
+ err = xe_bo_pin(bo);
+ xe_bo_unlock_vm_held(bo);
+
+ if (err) {
+ xe_bo_put(fb->bo);
+ bo = NULL;
+ }
+
+ fb->bo = bo;
+
+ return err;
+}
+
+static inline int i915_gem_stolen_insert_node(struct xe_device *xe,
+ struct i915_stolen_fb *fb,
+ u32 size, u32 align)
+{
+ /* Not used on xe */
+ BUG_ON(1);
+ return -ENODEV;
+}
+
+static inline void i915_gem_stolen_remove_node(struct xe_device *xe,
+ struct i915_stolen_fb *fb)
+{
+ xe_bo_unpin_map_no_vm(fb->bo);
+ fb->bo = NULL;
+}
+
+#define i915_gem_stolen_initialized(xe) (!!ttm_manager_type(&(xe)->ttm, XE_PL_STOLEN))
+#define i915_gem_stolen_node_allocated(fb) (!!((fb)->bo))
+
+static inline u32 i915_gem_stolen_node_offset(struct i915_stolen_fb *fb)
+{
+ struct xe_res_cursor res;
+
+ xe_res_first(fb->bo->ttm.resource, 0, 4096, &res);
+ return res.start;
+}
+
+/* Used for < gen4. These are not supported by Xe */
+#define i915_gem_stolen_area_address(xe) (!WARN_ON(1))
+/* Used for gen9 specific WA. Gen9 is not supported by Xe */
+#define i915_gem_stolen_area_size(xe) (!WARN_ON(1))
+
+#define i915_gem_stolen_node_address(xe, fb) (xe_ttm_stolen_gpu_offset(xe) + \
+ i915_gem_stolen_node_offset(fb))
+#define i915_gem_stolen_node_size(fb) ((u64)((fb)->bo->ttm.base.size))
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h
new file mode 100644
index 000000000..98e9dd78f
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _I915_GPU_ERROR_H_
+#define _I915_GPU_ERROR_H_
+
+struct drm_i915_error_state_buf;
+
+__printf(2, 3)
+static inline void
+i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_irq.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_irq.h
new file mode 100644
index 000000000..61707a07f
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_irq.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/i915_irq.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h
new file mode 100644
index 000000000..8619ec015
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/i915_reg.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_reg_defs.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_reg_defs.h
new file mode 100644
index 000000000..723279c97
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_reg_defs.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/i915_reg_defs.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h
new file mode 100644
index 000000000..d429d421a
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#define trace_i915_reg_rw(a...) do { } while (0)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
new file mode 100644
index 000000000..1d7c4360e
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/i915_utils.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
new file mode 100644
index 000000000..80b024d43
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _I915_VGPU_H_
+#define _I915_VGPU_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct i915_ggtt;
+
+static inline void intel_vgpu_detect(struct drm_i915_private *i915)
+{
+}
+static inline bool intel_vgpu_active(struct drm_i915_private *i915)
+{
+ return false;
+}
+static inline void intel_vgpu_register(struct drm_i915_private *i915)
+{
+}
+static inline bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915)
+{
+ return false;
+}
+static inline bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915)
+{
+ return false;
+}
+static inline bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915)
+{
+ return false;
+}
+static inline int intel_vgt_balloon(struct i915_ggtt *ggtt)
+{
+ return 0;
+}
+static inline void intel_vgt_deballoon(struct i915_ggtt *ggtt)
+{
+}
+
+#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
new file mode 100644
index 000000000..a20d2638e
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef I915_VMA_H
+#define I915_VMA_H
+
+#include <uapi/drm/i915_drm.h>
+#include <drm/drm_mm.h>
+
+/* We don't want these from i915_drm.h in case of Xe */
+#undef I915_TILING_X
+#undef I915_TILING_Y
+#define I915_TILING_X 0
+#define I915_TILING_Y 0
+
+struct xe_bo;
+
+struct i915_vma {
+ struct xe_bo *bo, *dpt;
+ struct drm_mm_node node;
+};
+
+#define i915_ggtt_clear_scanout(bo) do { } while (0)
+
+#define i915_vma_fence_id(vma) -1
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+ return vma->node.start;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
new file mode 100644
index 000000000..e7aaf50f5
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+/* XX: Figure out how to handle this vma mapping in xe */
+struct intel_remapped_plane_info {
+ /* in gtt pages */
+ u32 offset:31;
+ u32 linear:1;
+ union {
+ /* in gtt pages for !linear */
+ struct {
+ u16 width;
+ u16 height;
+ u16 src_stride;
+ u16 dst_stride;
+ };
+
+ /* in gtt pages for linear */
+ u32 size;
+ };
+} __packed;
+
+struct intel_remapped_info {
+ struct intel_remapped_plane_info plane[4];
+ /* in gtt pages */
+ u32 plane_alignment;
+} __packed;
+
+struct intel_rotation_info {
+ struct intel_remapped_plane_info plane[2];
+} __packed;
+
+enum i915_gtt_view_type {
+ I915_GTT_VIEW_NORMAL = 0,
+ I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+};
+
+static inline void assert_i915_gem_gtt_types(void)
+{
+ BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16));
+
+ /* Check that rotation/remapped shares offsets for simplicity */
+ BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
+ offsetof(struct intel_rotation_info, plane[0]));
+ BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
+ offsetofend(struct intel_rotation_info, plane[1]));
+
+ /* As we encode the size of each branch inside the union into its type,
+ * we have to be careful that each branch has a unique size.
+ */
+ switch ((enum i915_gtt_view_type)0) {
+ case I915_GTT_VIEW_NORMAL:
+ case I915_GTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_REMAPPED:
+ /* gcc complains if these are identical cases */
+ break;
+ }
+}
+
+struct i915_gtt_view {
+ enum i915_gtt_view_type type;
+ union {
+ /* Members need to contain no holes/padding */
+ struct intel_rotation_info rotated;
+ struct intel_remapped_info remapped;
+ };
+};
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_clock_gating.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_clock_gating.h
new file mode 100644
index 000000000..ce986f0e8
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_clock_gating.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/intel_clock_gating.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_gt_types.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_gt_types.h
new file mode 100644
index 000000000..c15806d6c
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_gt_types.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_TYPES__
+#define __INTEL_GT_TYPES__
+
+#define intel_gt_support_legacy_fencing(gt) 0
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_mchbar_regs.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_mchbar_regs.h
new file mode 100644
index 000000000..55b316985
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_mchbar_regs.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/intel_mchbar_regs.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pci_config.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pci_config.h
new file mode 100644
index 000000000..8c15867fd
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pci_config.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/intel_pci_config.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
new file mode 100644
index 000000000..0c47661bd
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_PCODE_H__
+#define __INTEL_PCODE_H__
+
+#include "intel_uncore.h"
+#include "xe_pcode.h"
+
+static inline int
+snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
+ int fast_timeout_us, int slow_timeout_ms)
+{
+ return xe_pcode_write_timeout(__compat_uncore_to_gt(uncore), mbox, val,
+ slow_timeout_ms ?: 1);
+}
+
+static inline int
+snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
+{
+
+ return xe_pcode_write(__compat_uncore_to_gt(uncore), mbox, val);
+}
+
+static inline int
+snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
+{
+ return xe_pcode_read(__compat_uncore_to_gt(uncore), mbox, val, val1);
+}
+
+static inline int
+skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ int timeout_base_ms)
+{
+ return xe_pcode_request(__compat_uncore_to_gt(uncore), mbox, request, reply_mask, reply,
+ timeout_base_ms);
+}
+
+#endif /* __INTEL_PCODE_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
new file mode 100644
index 000000000..89da3cc62
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "intel_wakeref.h"
+
+#define intel_runtime_pm xe_runtime_pm
+
+static inline void disable_rpm_wakeref_asserts(void *rpm)
+{
+}
+
+static inline void enable_rpm_wakeref_asserts(void *rpm)
+{
+}
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
new file mode 100644
index 000000000..0006ef812
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_STEP_H__
+#define __INTEL_STEP_H__
+
+#include "xe_device_types.h"
+#include "xe_step.h"
+
+#define intel_display_step_name xe_display_step_name
+
+static inline
+const char *xe_display_step_name(struct xe_device *xe)
+{
+ return xe_step_name(xe->info.step.display);
+}
+
+#endif /* __INTEL_STEP_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h
new file mode 100644
index 000000000..009745328
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_FW_H_
+#define _INTEL_UC_FW_H_
+
+#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
new file mode 100644
index 000000000..cd26ddc0f
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_UNCORE_H__
+#define __INTEL_UNCORE_H__
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_mmio.h"
+
+static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
+{
+ struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
+
+ return xe_root_mmio_gt(xe);
+}
+
+static inline u32 intel_uncore_read(struct intel_uncore *uncore,
+ i915_reg_t i915_reg)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+}
+
+static inline u32 intel_uncore_read8(struct intel_uncore *uncore,
+ i915_reg_t i915_reg)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
+}
+
+static inline u32 intel_uncore_read16(struct intel_uncore *uncore,
+ i915_reg_t i915_reg)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
+}
+
+static inline u64
+intel_uncore_read64_2x32(struct intel_uncore *uncore,
+ i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg)
+{
+ struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg));
+ struct xe_reg upper_reg = XE_REG(i915_mmio_reg_offset(i915_upper_reg));
+ u32 upper, lower, old_upper;
+ int loop = 0;
+
+ upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ do {
+ old_upper = upper;
+ lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ } while (upper != old_upper && loop++ < 2);
+
+ return (u64)upper << 32 | lower;
+}
+
+static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
+ i915_reg_t i915_reg)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+}
+
+static inline void intel_uncore_write(struct intel_uncore *uncore,
+ i915_reg_t i915_reg, u32 val)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+}
+
+static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
+ i915_reg_t i915_reg, u32 clear, u32 set)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
+}
+
+static inline int intel_wait_for_register(struct intel_uncore *uncore,
+ i915_reg_t i915_reg, u32 mask,
+ u32 value, unsigned int timeout)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ timeout * USEC_PER_MSEC, NULL, false);
+}
+
+static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
+ i915_reg_t i915_reg, u32 mask,
+ u32 value, unsigned int timeout)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ timeout * USEC_PER_MSEC, NULL, false);
+}
+
+static inline int
+__intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
+ u32 mask, u32 value, unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ fast_timeout_us + 1000 * slow_timeout_ms,
+ out_value, false);
+}
+
+static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
+ i915_reg_t i915_reg)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+}
+
+static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
+ i915_reg_t i915_reg, u32 val)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+}
+
+static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
+ i915_reg_t i915_reg)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+}
+
+static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
+ i915_reg_t i915_reg, u32 val)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+}
+
+static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
+{
+ struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
+
+ return xe_device_get_root_tile(xe)->mmio.regs;
+}
+
+/*
+ * The raw_reg_{read,write} macros are intended as a micro-optimization for
+ * interrupt handlers so that the pointer indirection on uncore->regs can
+ * be computed once (and presumably cached in a register) instead of generating
+ * extra load instructions for each MMIO access.
+ *
+ * Given that these macros are only intended for non-GSI interrupt registers
+ * (and the goal is to avoid extra instructions generated by the compiler),
+ * these macros do not account for uncore->gsi_offset. Any caller that needs
+ * to use these macros on a GSI register is responsible for adding the
+ * appropriate GSI offset to the 'base' parameter.
+ */
+#define raw_reg_read(base, reg) \
+ readl(base + i915_mmio_reg_offset(reg))
+#define raw_reg_write(base, reg, value) \
+ writel(value, base + i915_mmio_reg_offset(reg))
+
+#endif /* __INTEL_UNCORE_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
new file mode 100644
index 000000000..ecb1c0707
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+typedef unsigned long intel_wakeref_t;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
new file mode 100644
index 000000000..c2c30ece8
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_PXP_H__
+#define __INTEL_PXP_H__
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct intel_pxp;
+
+static inline int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ return -ENODEV;
+}
+
+static inline bool
+i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
+{
+ return false;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_dram.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_dram.h
new file mode 100644
index 000000000..65707e20c
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_dram.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../../i915/soc/intel_dram.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_gmch.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_gmch.h
new file mode 100644
index 000000000..33c5257b3
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_gmch.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../../i915/soc/intel_gmch.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
new file mode 100644
index 000000000..9c46556d3
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../../i915/soc/intel_pch.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
new file mode 100644
index 000000000..ec6f12de5
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _VLV_SIDEBAND_H_
+#define _VLV_SIDEBAND_H_
+
+#include <linux/types.h>
+
+#include "vlv_sideband_reg.h"
+
+enum pipe;
+struct drm_i915_private;
+
+enum {
+ VLV_IOSF_SB_BUNIT,
+ VLV_IOSF_SB_CCK,
+ VLV_IOSF_SB_CCU,
+ VLV_IOSF_SB_DPIO,
+ VLV_IOSF_SB_FLISDSI,
+ VLV_IOSF_SB_GPIO,
+ VLV_IOSF_SB_NC,
+ VLV_IOSF_SB_PUNIT,
+};
+
+static inline void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
+{
+}
+static inline u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
+{
+ return 0;
+}
+static inline void vlv_iosf_sb_write(struct drm_i915_private *i915,
+ u8 port, u32 reg, u32 val)
+{
+}
+static inline void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
+{
+}
+static inline void vlv_bunit_get(struct drm_i915_private *i915)
+{
+}
+static inline u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
+{
+ return 0;
+}
+static inline void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+}
+static inline void vlv_bunit_put(struct drm_i915_private *i915)
+{
+}
+static inline void vlv_cck_get(struct drm_i915_private *i915)
+{
+}
+static inline u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
+{
+ return 0;
+}
+static inline void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+}
+static inline void vlv_cck_put(struct drm_i915_private *i915)
+{
+}
+static inline void vlv_ccu_get(struct drm_i915_private *i915)
+{
+}
+static inline u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
+{
+ return 0;
+}
+static inline void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+}
+static inline void vlv_ccu_put(struct drm_i915_private *i915)
+{
+}
+static inline void vlv_dpio_get(struct drm_i915_private *i915)
+{
+}
+static inline u32 vlv_dpio_read(struct drm_i915_private *i915, int pipe, int reg)
+{
+ return 0;
+}
+static inline void vlv_dpio_write(struct drm_i915_private *i915,
+ int pipe, int reg, u32 val)
+{
+}
+static inline void vlv_dpio_put(struct drm_i915_private *i915)
+{
+}
+static inline void vlv_flisdsi_get(struct drm_i915_private *i915)
+{
+}
+static inline u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
+{
+ return 0;
+}
+static inline void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+}
+static inline void vlv_flisdsi_put(struct drm_i915_private *i915)
+{
+}
+static inline void vlv_nc_get(struct drm_i915_private *i915)
+{
+}
+static inline u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
+{
+ return 0;
+}
+static inline void vlv_nc_put(struct drm_i915_private *i915)
+{
+}
+static inline void vlv_punit_get(struct drm_i915_private *i915)
+{
+}
+static inline u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
+{
+ return 0;
+}
+static inline int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
+{
+ return 0;
+}
+static inline void vlv_punit_put(struct drm_i915_private *i915)
+{
+}
+
+#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h b/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h
new file mode 100644
index 000000000..949f134ce
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "../../i915/vlv_sideband_reg.h"
diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c
new file mode 100644
index 000000000..bee191a4a
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "intel_uncore.h"
+
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier)
+{
+ intel_uncore_write(uncore, imr, 0xffffffff);
+ intel_uncore_posting_read(uncore, imr);
+
+ intel_uncore_write(uncore, ier, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+}
+
+/*
+ * We should clear IMR at preinstall/uninstall, and just check at postinstall.
+ */
+void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
+ u32 val = intel_uncore_read(uncore, reg);
+
+ if (val == 0)
+ return;
+
+ drm_WARN(&xe->drm, 1,
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(reg), val);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
+}
+
+void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir)
+{
+ gen3_assert_iir_is_zero(uncore, iir);
+
+ intel_uncore_write(uncore, ier, ier_val);
+ intel_uncore_write(uncore, imr, imr_val);
+ intel_uncore_posting_read(uncore, imr);
+}
+
+bool intel_irqs_enabled(struct xe_device *xe)
+{
+ /*
+ * XXX: i915 has a racy handling of the irq.enabled, since it doesn't
+ * lock its transitions. Because of that, the irq.enabled sometimes
+ * is not read with the irq.lock in place.
+ * However, the most critical cases like vblank and page flips are
+ * properly using the locks.
+ * We cannot take the lock in here or run any kind of assert because
+ * of i915 inconsistency.
+ * But at this point the xe irq is better protected against races,
+ * although the full solution would be protecting the i915 side.
+ */
+ return xe->irq.enabled;
+}
+
+void intel_synchronize_irq(struct xe_device *xe)
+{
+ synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
+}
diff --git a/drivers/gpu/drm/xe/display/ext/i915_utils.c b/drivers/gpu/drm/xe/display/ext/i915_utils.c
new file mode 100644
index 000000000..43b10a2cc
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/ext/i915_utils.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+bool i915_vtd_active(struct drm_i915_private *i915)
+{
+ if (device_iommu_mapped(i915->drm.dev))
+ return true;
+
+ /* Running as a guest, we assume the host is enforcing VT'd */
+ return i915_run_as_guest();
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
+/* i915 specific, just put here for shutting it up */
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
new file mode 100644
index 000000000..a9c1f9885
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_modeset_helper.h>
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_fb_bo.h"
+
+void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
+{
+ if (bo->flags & XE_BO_CREATE_PINNED_BIT) {
+ /* Unpin our kernel fb first */
+ xe_bo_lock(bo, false);
+ xe_bo_unpin(bo);
+ xe_bo_unlock(bo);
+ }
+ xe_bo_put(bo);
+}
+
+int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct xe_bo *bo,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *i915 = to_i915(bo->ttm.base.dev);
+ int ret;
+
+ xe_bo_get(bo);
+
+ ret = ttm_bo_reserve(&bo->ttm, true, false, NULL);
+ if (ret)
+ goto err;
+
+ if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
+ /*
+ * XE_BO_SCANOUT_BIT should ideally be set at creation, or is
+ * automatically set when creating FB. We cannot change caching
+ * mode when the boect is VM_BINDed, so we can only set
+ * coherency with display when unbound.
+ */
+ if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) {
+ ttm_bo_unreserve(&bo->ttm);
+ ret = -EINVAL;
+ goto err;
+ }
+ bo->flags |= XE_BO_SCANOUT_BIT;
+ }
+ ttm_bo_unreserve(&bo->ttm);
+ return 0;
+
+err:
+ xe_bo_put(bo);
+ return ret;
+}
+
+struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *bo;
+ struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+
+ if (!gem)
+ return ERR_PTR(-ENOENT);
+
+ bo = gem_to_xe_bo(gem);
+ /* Require vram placement or dma-buf import */
+ if (IS_DGFX(i915) &&
+ !xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) &&
+ bo->ttm.type != ttm_bo_type_sg) {
+ drm_gem_object_put(gem);
+ return ERR_PTR(-EREMOTE);
+ }
+
+ return bo;
+}
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.h b/drivers/gpu/drm/xe/display/intel_fb_bo.h
new file mode 100644
index 000000000..5d365b925
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_BO_H__
+#define __INTEL_FB_BO_H__
+
+struct drm_file;
+struct drm_mode_fb_cmd2;
+struct drm_i915_private;
+struct intel_framebuffer;
+struct xe_bo;
+
+void intel_fb_bo_framebuffer_fini(struct xe_bo *bo);
+int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct xe_bo *bo,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
new file mode 100644
index 000000000..51ae3561f
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "intel_fbdev_fb.h"
+
+#include <drm/drm_fb_helper.h>
+
+#include "xe_gt.h"
+#include "xe_ttm_stolen_mgr.h"
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+
+struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_framebuffer *fb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_i915_gem_object *obj;
+ int size;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8), XE_PAGE_SIZE);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = PAGE_ALIGN(size);
+ obj = ERR_PTR(-ENODEV);
+
+ if (!IS_DGFX(dev_priv)) {
+ obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv),
+ NULL, size,
+ ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
+ XE_BO_CREATE_STOLEN_BIT |
+ XE_BO_CREATE_PINNED_BIT);
+ if (!IS_ERR(obj))
+ drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n");
+ else
+ drm_info(&dev_priv->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj));
+ }
+ if (IS_ERR(obj)) {
+ obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size,
+ ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
+ XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
+ XE_BO_CREATE_PINNED_BIT);
+ }
+
+ if (IS_ERR(obj)) {
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ fb = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ fb = intel_framebuffer_create(obj, &mode_cmd);
+ if (IS_ERR(fb)) {
+ xe_bo_unpin_map_no_vm(obj);
+ goto err;
+ }
+
+ drm_gem_object_put(intel_bo_to_drm_bo(obj));
+ return fb;
+
+err:
+ return fb;
+}
+
+int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+ struct drm_i915_gem_object *obj, struct i915_vma *vma)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+
+ if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) {
+ if (obj->flags & XE_BO_CREATE_STOLEN_BIT)
+ info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0);
+ else
+ info->fix.smem_start =
+ pci_resource_start(pdev, 2) +
+ xe_bo_addr(obj, 0, XE_PAGE_SIZE);
+
+ info->fix.smem_len = obj->ttm.base.size;
+ } else {
+ /* XXX: Pure fiction, as the BO may not be physically accessible.. */
+ info->fix.smem_start = 0;
+ info->fix.smem_len = obj->ttm.base.size;
+ }
+ XE_WARN_ON(iosys_map_is_null(&obj->vmap));
+
+ info->screen_base = obj->vmap.vaddr_iomem;
+ info->screen_size = intel_bo_to_drm_bo(obj)->size;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.h b/drivers/gpu/drm/xe/display/intel_fbdev_fb.h
new file mode 100644
index 000000000..ea186772e
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_FB_H__
+#define __INTEL_FBDEV_FB_H__
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct fb_info;
+struct i915_vma;
+
+struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+ struct drm_i915_gem_object *obj, struct i915_vma *vma);
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_display_misc.c b/drivers/gpu/drm/xe/display/xe_display_misc.c
new file mode 100644
index 000000000..242c2ef4c
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_misc.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "intel_display_types.h"
+
+struct pci_dev;
+
+unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode);
+
+unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
+{
+ /* ToDo: Implement the actual handling of vga decode */
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c
new file mode 100644
index 000000000..ab21c581c
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rps.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "intel_display_rps.h"
+
+void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+}
+
+void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
+ struct intel_atomic_state *state,
+ bool interactive)
+{
+}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
new file mode 100644
index 000000000..27c2fb1c0
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023, Intel Corporation.
+ */
+
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_display_types.h"
+#include "intel_dsb_buffer.h"
+#include "xe_bo.h"
+#include "xe_gt.h"
+
+u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+{
+ return xe_bo_ggtt_addr(dsb_buf->vma->bo);
+}
+
+void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+{
+ iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
+}
+
+u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+{
+ return iosys_map_rd(&dsb_buf->vma->bo->vmap, idx * 4, u32);
+}
+
+void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+{
+ WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
+
+ iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
+}
+
+bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (!vma)
+ return false;
+
+ obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915),
+ NULL, PAGE_ALIGN(size),
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(obj)) {
+ kfree(vma);
+ return false;
+ }
+
+ vma->bo = obj;
+ dsb_buf->vma = vma;
+ dsb_buf->buf_size = size;
+
+ return true;
+}
+
+void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+{
+ xe_bo_unpin_map_no_vm(dsb_buf->vma->bo);
+ kfree(dsb_buf->vma);
+}
+
+void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+{
+ /* TODO: add xe specific flush_map() for dsb buffer object. */
+}
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
new file mode 100644
index 000000000..722c84a56
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dpt.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+
+#include <drm/ttm/ttm_bo.h>
+
+static void
+write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs,
+ u32 width, u32 height, u32 src_stride, u32 dst_stride)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
+ u32 column, row;
+
+ /* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
+ * by writing dpt/ggtt in a different order?
+ */
+
+ for (column = 0; column < width; column++) {
+ u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
+
+ for (row = 0; row < height; row++) {
+ u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
+ xe->pat.idx[XE_CACHE_WB]);
+
+ iosys_map_wr(map, *dpt_ofs, u64, pte);
+ *dpt_ofs += 8;
+ src_idx -= src_stride;
+ }
+
+ /* The DE ignores the PTEs for the padding tiles */
+ *dpt_ofs += (dst_stride - height) * 8;
+ }
+
+ /* Align to next page */
+ *dpt_ofs = ALIGN(*dpt_ofs, 4096);
+}
+
+static void
+write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
+ u32 bo_ofs, u32 width, u32 height, u32 src_stride,
+ u32 dst_stride)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
+ u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index)
+ = ggtt->pt_ops->pte_encode_bo;
+ u32 column, row;
+
+ for (row = 0; row < height; row++) {
+ u32 src_idx = src_stride * row + bo_ofs;
+
+ for (column = 0; column < width; column++) {
+ iosys_map_wr(map, *dpt_ofs, u64,
+ pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
+ xe->pat.idx[XE_CACHE_WB]));
+
+ *dpt_ofs += 8;
+ src_idx++;
+ }
+
+ /* The DE ignores the PTEs for the padding tiles */
+ *dpt_ofs += (dst_stride - width) * 8;
+ }
+
+ /* Align to next page */
+ *dpt_ofs = ALIGN(*dpt_ofs, 4096);
+}
+
+static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
+ const struct i915_gtt_view *view,
+ struct i915_vma *vma)
+{
+ struct xe_device *xe = to_xe_device(fb->base.dev);
+ struct xe_tile *tile0 = xe_device_get_root_tile(xe);
+ struct xe_ggtt *ggtt = tile0->mem.ggtt;
+ struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt;
+ u32 dpt_size, size = bo->ttm.base.size;
+
+ if (view->type == I915_GTT_VIEW_NORMAL)
+ dpt_size = ALIGN(size / XE_PAGE_SIZE * 8, XE_PAGE_SIZE);
+ else if (view->type == I915_GTT_VIEW_REMAPPED)
+ dpt_size = ALIGN(intel_remapped_info_size(&fb->remapped_view.gtt.remapped) * 8,
+ XE_PAGE_SIZE);
+ else
+ /* display uses 4K tiles instead of bytes here, convert to entries.. */
+ dpt_size = ALIGN(intel_rotation_info_size(&view->rotated) * 8,
+ XE_PAGE_SIZE);
+
+ if (IS_DGFX(xe))
+ dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM0_BIT |
+ XE_BO_CREATE_GGTT_BIT);
+ else
+ dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_STOLEN_BIT |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(dpt))
+ dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_SYSTEM_BIT |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(dpt))
+ return PTR_ERR(dpt);
+
+ if (view->type == I915_GTT_VIEW_NORMAL) {
+ u32 x;
+
+ for (x = 0; x < size / XE_PAGE_SIZE; x++) {
+ u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
+ xe->pat.idx[XE_CACHE_WB]);
+
+ iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
+ }
+ } else if (view->type == I915_GTT_VIEW_REMAPPED) {
+ const struct intel_remapped_info *remap_info = &view->remapped;
+ u32 i, dpt_ofs = 0;
+
+ for (i = 0; i < ARRAY_SIZE(remap_info->plane); i++)
+ write_dpt_remapped(bo, &dpt->vmap, &dpt_ofs,
+ remap_info->plane[i].offset,
+ remap_info->plane[i].width,
+ remap_info->plane[i].height,
+ remap_info->plane[i].src_stride,
+ remap_info->plane[i].dst_stride);
+
+ } else {
+ const struct intel_rotation_info *rot_info = &view->rotated;
+ u32 i, dpt_ofs = 0;
+
+ for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
+ write_dpt_rotated(bo, &dpt->vmap, &dpt_ofs,
+ rot_info->plane[i].offset,
+ rot_info->plane[i].width,
+ rot_info->plane[i].height,
+ rot_info->plane[i].src_stride,
+ rot_info->plane[i].dst_stride);
+ }
+
+ vma->dpt = dpt;
+ vma->node = dpt->ggtt_node;
+ return 0;
+}
+
+static void
+write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo_ofs,
+ u32 width, u32 height, u32 src_stride, u32 dst_stride)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ u32 column, row;
+
+ for (column = 0; column < width; column++) {
+ u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
+
+ for (row = 0; row < height; row++) {
+ u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
+ xe->pat.idx[XE_CACHE_WB]);
+
+ xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte);
+ *ggtt_ofs += XE_PAGE_SIZE;
+ src_idx -= src_stride;
+ }
+
+ /* The DE ignores the PTEs for the padding tiles */
+ *ggtt_ofs += (dst_stride - height) * XE_PAGE_SIZE;
+ }
+}
+
+static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
+ const struct i915_gtt_view *view,
+ struct i915_vma *vma)
+{
+ struct xe_bo *bo = intel_fb_obj(&fb->base);
+ struct xe_device *xe = to_xe_device(fb->base.dev);
+ struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
+ u32 align;
+ int ret;
+
+ /* TODO: Consider sharing framebuffer mapping?
+ * embed i915_vma inside intel_framebuffer
+ */
+ xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ ret = mutex_lock_interruptible(&ggtt->lock);
+ if (ret)
+ goto out;
+
+ align = XE_PAGE_SIZE;
+ if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
+ align = max_t(u32, align, SZ_64K);
+
+ if (bo->ggtt_node.size && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node;
+ } else if (view->type == I915_GTT_VIEW_NORMAL) {
+ u32 x, size = bo->ttm.base.size;
+
+ ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
+ align, 0);
+ if (ret)
+ goto out_unlock;
+
+ for (x = 0; x < size; x += XE_PAGE_SIZE) {
+ u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
+ xe->pat.idx[XE_CACHE_WB]);
+
+ xe_ggtt_set_pte(ggtt, vma->node.start + x, pte);
+ }
+ } else {
+ u32 i, ggtt_ofs;
+ const struct intel_rotation_info *rot_info = &view->rotated;
+
+ /* display seems to use tiles instead of bytes here, so convert it back.. */
+ u32 size = intel_rotation_info_size(rot_info) * XE_PAGE_SIZE;
+
+ ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size,
+ align, 0);
+ if (ret)
+ goto out_unlock;
+
+ ggtt_ofs = vma->node.start;
+
+ for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
+ write_ggtt_rotated(bo, ggtt, &ggtt_ofs,
+ rot_info->plane[i].offset,
+ rot_info->plane[i].width,
+ rot_info->plane[i].height,
+ rot_info->plane[i].src_stride,
+ rot_info->plane[i].dst_stride);
+ }
+
+ xe_ggtt_invalidate(ggtt);
+out_unlock:
+ mutex_unlock(&ggtt->lock);
+out:
+ xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+ return ret;
+}
+
+static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
+ const struct i915_gtt_view *view)
+{
+ struct drm_device *dev = fb->base.dev;
+ struct xe_device *xe = to_xe_device(dev);
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ struct xe_bo *bo = intel_fb_obj(&fb->base);
+ int ret;
+
+ if (!vma)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
+ intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
+ !(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) {
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ /*
+ * If we need to able to access the clear-color value stored in
+ * the buffer, then we require that such buffers are also CPU
+ * accessible. This is important on small-bar systems where
+ * only some subset of VRAM is CPU accessible.
+ */
+ if (tile->mem.vram.io_size < tile->mem.vram.usable_size) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ /*
+ * Pin the framebuffer, we can't use xe_bo_(un)pin functions as the
+ * assumptions are incorrect for framebuffers
+ */
+ ret = ttm_bo_reserve(&bo->ttm, false, false, NULL);
+ if (ret)
+ goto err;
+
+ if (IS_DGFX(xe))
+ ret = xe_bo_migrate(bo, XE_PL_VRAM0);
+ else
+ ret = xe_bo_validate(bo, NULL, true);
+ if (!ret)
+ ttm_bo_pin(&bo->ttm);
+ ttm_bo_unreserve(&bo->ttm);
+ if (ret)
+ goto err;
+
+ vma->bo = bo;
+ if (intel_fb_uses_dpt(&fb->base))
+ ret = __xe_pin_fb_vma_dpt(fb, view, vma);
+ else
+ ret = __xe_pin_fb_vma_ggtt(fb, view, vma);
+ if (ret)
+ goto err_unpin;
+
+ return vma;
+
+err_unpin:
+ ttm_bo_reserve(&bo->ttm, false, false, NULL);
+ ttm_bo_unpin(&bo->ttm);
+ ttm_bo_unreserve(&bo->ttm);
+err:
+ kfree(vma);
+ return ERR_PTR(ret);
+}
+
+static void __xe_unpin_fb_vma(struct i915_vma *vma)
+{
+ struct xe_device *xe = to_xe_device(vma->bo->ttm.base.dev);
+ struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
+
+ if (vma->dpt)
+ xe_bo_unpin_map_no_vm(vma->dpt);
+ else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) ||
+ vma->bo->ggtt_node.start != vma->node.start)
+ xe_ggtt_remove_node(ggtt, &vma->node);
+
+ ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
+ ttm_bo_unpin(&vma->bo->ttm);
+ ttm_bo_unreserve(&vma->bo->ttm);
+ kfree(vma);
+}
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_gtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags)
+{
+ *out_flags = 0;
+
+ return __xe_pin_fb_vma(to_intel_framebuffer(fb), view);
+}
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
+{
+ __xe_unpin_fb_vma(vma);
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct xe_bo *bo = intel_fb_obj(fb);
+ struct i915_vma *vma;
+
+ /* We reject creating !SCANOUT fb's, so this is weird.. */
+ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT));
+
+ vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+ return 0;
+}
+
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+ __xe_unpin_fb_vma(old_plane_state->ggtt_vma);
+ old_plane_state->ggtt_vma = NULL;
+}
+
+/*
+ * For Xe introduce dummy intel_dpt_create which just return NULL and
+ * intel_dpt_destroy which does nothing.
+ */
+struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
+{
+ return NULL;
+}
+
+void intel_dpt_destroy(struct i915_address_space *vm)
+{
+ return;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
new file mode 100644
index 000000000..0f11a3933
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023, Intel Corporation.
+ */
+
+#include "i915_drv.h"
+#include "intel_hdcp_gsc.h"
+
+bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
+{
+ return true;
+}
+
+bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915)
+{
+ return false;
+}
+
+int intel_hdcp_gsc_init(struct drm_i915_private *i915)
+{
+ drm_info(&i915->drm, "HDCP support not yet implemented\n");
+ return -ENODEV;
+}
+
+void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
+{
+}
+
+ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
+ size_t msg_in_len, u8 *msg_out,
+ size_t msg_out_len)
+{
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
new file mode 100644
index 000000000..ccf83c12b
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+/* for ioread64 */
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "xe_ggtt.h"
+
+#include "i915_drv.h"
+#include "intel_atomic_plane.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
+#include "intel_frontbuffer.h"
+#include "intel_plane_initial.h"
+
+static bool
+intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
+ const struct intel_initial_plane_config *plane_config,
+ struct drm_framebuffer **fb)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!crtc_state->uapi.active)
+ continue;
+
+ if (!plane_state->ggtt_vma)
+ continue;
+
+ if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ *fb = plane_state->hw.fb;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct xe_bo *
+initial_plane_bo(struct xe_device *xe,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct xe_tile *tile0 = xe_device_get_root_tile(xe);
+ struct xe_bo *bo;
+ resource_size_t phys_base;
+ u32 base, size, flags;
+ u64 page_size = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
+
+ if (plane_config->size == 0)
+ return NULL;
+
+ flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT;
+
+ base = round_down(plane_config->base, page_size);
+ if (IS_DGFX(xe)) {
+ u64 __iomem *gte = tile0->mem.ggtt->gsm;
+ u64 pte;
+
+ gte += base / XE_PAGE_SIZE;
+
+ pte = ioread64(gte);
+ if (!(pte & XE_GGTT_PTE_DM)) {
+ drm_err(&xe->drm,
+ "Initial plane programming missing DM bit\n");
+ return NULL;
+ }
+
+ phys_base = pte & ~(page_size - 1);
+ flags |= XE_BO_CREATE_VRAM0_BIT;
+
+ /*
+ * We don't currently expect this to ever be placed in the
+ * stolen portion.
+ */
+ if (phys_base >= tile0->mem.vram.usable_size) {
+ drm_err(&xe->drm,
+ "Initial plane programming using invalid range, phys_base=%pa\n",
+ &phys_base);
+ return NULL;
+ }
+
+ drm_dbg(&xe->drm,
+ "Using phys_base=%pa, based on initial plane programming\n",
+ &phys_base);
+ } else {
+ struct ttm_resource_manager *stolen = ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
+
+ if (!stolen)
+ return NULL;
+ phys_base = base;
+ flags |= XE_BO_CREATE_STOLEN_BIT;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ plane_config->size * 2 >> PAGE_SHIFT >= stolen->size)
+ return NULL;
+ }
+
+ size = round_up(plane_config->base + plane_config->size,
+ page_size);
+ size -= base;
+
+ bo = xe_bo_create_pin_map_at(xe, tile0, NULL, size, phys_base,
+ ttm_bo_type_kernel, flags);
+ if (IS_ERR(bo)) {
+ drm_dbg(&xe->drm,
+ "Failed to create bo phys_base=%pa size %u with flags %x: %li\n",
+ &phys_base, size, flags, PTR_ERR(bo));
+ return NULL;
+ }
+
+ return bo;
+}
+
+static bool
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+ struct xe_bo *bo;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_4_TILED:
+ break;
+ default:
+ drm_dbg(&dev_priv->drm,
+ "Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
+ return false;
+ }
+
+ mode_cmd.pixel_format = fb->format->format;
+ mode_cmd.width = fb->width;
+ mode_cmd.height = fb->height;
+ mode_cmd.pitches[0] = fb->pitches[0];
+ mode_cmd.modifier[0] = fb->modifier;
+ mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
+
+ bo = initial_plane_bo(dev_priv, plane_config);
+ if (!bo)
+ return false;
+
+ if (intel_framebuffer_init(to_intel_framebuffer(fb),
+ bo, &mode_cmd)) {
+ drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ goto err_bo;
+ }
+ /* Reference handed over to fb */
+ xe_bo_put(bo);
+
+ return true;
+
+err_bo:
+ xe_bo_unpin_map_no_vm(bo);
+ return false;
+}
+
+static void
+intel_find_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct drm_framebuffer *fb;
+ struct i915_vma *vma;
+
+ /*
+ * TODO:
+ * Disable planes if get_initial_plane_config() failed.
+ * Make sure things work if the surface base is not page aligned.
+ */
+ if (!plane_config->fb)
+ return;
+
+ if (intel_alloc_initial_plane_obj(crtc, plane_config))
+ fb = &plane_config->fb->base;
+ else if (!intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb))
+ goto nofb;
+
+ plane_state->uapi.rotation = plane_config->rotation;
+ intel_fb_fill_view(to_intel_framebuffer(fb),
+ plane_state->uapi.rotation, &plane_state->view);
+
+ vma = intel_pin_and_fence_fb_obj(fb, false, &plane_state->view.gtt,
+ false, &plane_state->flags);
+ if (IS_ERR(vma))
+ goto nofb;
+
+ plane_state->ggtt_vma = vma;
+ plane_state->uapi.src_x = 0;
+ plane_state->uapi.src_y = 0;
+ plane_state->uapi.src_w = fb->width << 16;
+ plane_state->uapi.src_h = fb->height << 16;
+
+ plane_state->uapi.crtc_x = 0;
+ plane_state->uapi.crtc_y = 0;
+ plane_state->uapi.crtc_w = fb->width;
+ plane_state->uapi.crtc_h = fb->height;
+
+ plane_state->uapi.fb = fb;
+ drm_framebuffer_get(fb);
+
+ plane_state->uapi.crtc = &crtc->base;
+ intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
+
+ atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
+
+ plane_config->vma = vma;
+
+ /*
+ * Flip to the newly created mapping ASAP, so we can re-use the
+ * first part of GGTT for WOPCM, prevent flickering, and prevent
+ * the lookup of sysmem scratch pages.
+ */
+ plane->check_plane(crtc_state, plane_state);
+ plane->async_flip(plane, crtc_state, plane_state, true);
+ return;
+
+nofb:
+ /*
+ * We've failed to reconstruct the BIOS FB. Current display state
+ * indicates that the primary plane is visible, but has a NULL FB,
+ * which will lead to problems later if we don't fix it up. The
+ * simplest solution is to just disable the primary plane now and
+ * pretend the BIOS never had it enabled.
+ */
+ intel_plane_disable_noatomic(crtc, plane);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+ if (plane_config->fb) {
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+
+ /* We may only have the stub and not a full framebuffer */
+ if (drm_framebuffer_read_refcount(fb))
+ drm_framebuffer_put(fb);
+ else
+ kfree(fb);
+ }
+}
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+{
+ struct xe_device *xe = to_xe_device(crtc->base.dev);
+ struct intel_initial_plane_config plane_config = {};
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ xe->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, &plane_config);
+
+ plane_config_fini(&plane_config);
+}
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
new file mode 100644
index 000000000..8e6dd061f
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GFXPIPE_COMMANDS_H_
+#define _XE_GFXPIPE_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+#define GFXPIPE_PIPELINE REG_GENMASK(28, 27)
+#define PIPELINE_COMMON REG_FIELD_PREP(GFXPIPE_PIPELINE, 0x0)
+#define PIPELINE_SINGLE_DW REG_FIELD_PREP(GFXPIPE_PIPELINE, 0x1)
+#define PIPELINE_COMPUTE REG_FIELD_PREP(GFXPIPE_PIPELINE, 0x2)
+#define PIPELINE_3D REG_FIELD_PREP(GFXPIPE_PIPELINE, 0x3)
+
+#define GFXPIPE_OPCODE REG_GENMASK(26, 24)
+#define GFXPIPE_SUBOPCODE REG_GENMASK(23, 16)
+
+#define GFXPIPE_MATCH_MASK (XE_INSTR_CMD_TYPE | \
+ GFXPIPE_PIPELINE | \
+ GFXPIPE_OPCODE | \
+ GFXPIPE_SUBOPCODE)
+
+#define GFXPIPE_COMMON_CMD(opcode, subopcode) \
+ (XE_INSTR_GFXPIPE | PIPELINE_COMMON | \
+ REG_FIELD_PREP(GFXPIPE_OPCODE, opcode) | \
+ REG_FIELD_PREP(GFXPIPE_SUBOPCODE, subopcode))
+
+#define GFXPIPE_SINGLE_DW_CMD(opcode, subopcode) \
+ (XE_INSTR_GFXPIPE | PIPELINE_SINGLE_DW | \
+ REG_FIELD_PREP(GFXPIPE_OPCODE, opcode) | \
+ REG_FIELD_PREP(GFXPIPE_SUBOPCODE, subopcode))
+
+#define GFXPIPE_3D_CMD(opcode, subopcode) \
+ (XE_INSTR_GFXPIPE | PIPELINE_3D | \
+ REG_FIELD_PREP(GFXPIPE_OPCODE, opcode) | \
+ REG_FIELD_PREP(GFXPIPE_SUBOPCODE, subopcode))
+
+#define GFXPIPE_COMPUTE_CMD(opcode, subopcode) \
+ (XE_INSTR_GFXPIPE | PIPELINE_COMPUTE | \
+ REG_FIELD_PREP(GFXPIPE_OPCODE, opcode) | \
+ REG_FIELD_PREP(GFXPIPE_SUBOPCODE, subopcode))
+
+#define STATE_BASE_ADDRESS GFXPIPE_COMMON_CMD(0x1, 0x1)
+#define STATE_SIP GFXPIPE_COMMON_CMD(0x1, 0x2)
+#define GPGPU_CSR_BASE_ADDRESS GFXPIPE_COMMON_CMD(0x1, 0x4)
+#define STATE_COMPUTE_MODE GFXPIPE_COMMON_CMD(0x1, 0x5)
+#define CMD_3DSTATE_BTD GFXPIPE_COMMON_CMD(0x1, 0x6)
+
+#define CMD_3DSTATE_VF_STATISTICS GFXPIPE_SINGLE_DW_CMD(0x0, 0xB)
+
+#define PIPELINE_SELECT GFXPIPE_SINGLE_DW_CMD(0x1, 0x4)
+
+#define CMD_3DSTATE_DRAWING_RECTANGLE_FAST GFXPIPE_3D_CMD(0x0, 0x0)
+#define CMD_3DSTATE_CLEAR_PARAMS GFXPIPE_3D_CMD(0x0, 0x4)
+#define CMD_3DSTATE_DEPTH_BUFFER GFXPIPE_3D_CMD(0x0, 0x5)
+#define CMD_3DSTATE_STENCIL_BUFFER GFXPIPE_3D_CMD(0x0, 0x6)
+#define CMD_3DSTATE_HIER_DEPTH_BUFFER GFXPIPE_3D_CMD(0x0, 0x7)
+#define CMD_3DSTATE_VERTEX_BUFFERS GFXPIPE_3D_CMD(0x0, 0x8)
+#define CMD_3DSTATE_VERTEX_ELEMENTS GFXPIPE_3D_CMD(0x0, 0x9)
+#define CMD_3DSTATE_INDEX_BUFFER GFXPIPE_3D_CMD(0x0, 0xA)
+#define CMD_3DSTATE_VF GFXPIPE_3D_CMD(0x0, 0xC)
+#define CMD_3DSTATE_MULTISAMPLE GFXPIPE_3D_CMD(0x0, 0xD)
+#define CMD_3DSTATE_CC_STATE_POINTERS GFXPIPE_3D_CMD(0x0, 0xE)
+#define CMD_3DSTATE_SCISSOR_STATE_POINTERS GFXPIPE_3D_CMD(0x0, 0xF)
+#define CMD_3DSTATE_VS GFXPIPE_3D_CMD(0x0, 0x10)
+#define CMD_3DSTATE_GS GFXPIPE_3D_CMD(0x0, 0x11)
+#define CMD_3DSTATE_CLIP GFXPIPE_3D_CMD(0x0, 0x12)
+#define CMD_3DSTATE_SF GFXPIPE_3D_CMD(0x0, 0x13)
+#define CMD_3DSTATE_WM GFXPIPE_3D_CMD(0x0, 0x14)
+#define CMD_3DSTATE_CONSTANT_VS GFXPIPE_3D_CMD(0x0, 0x15)
+#define CMD_3DSTATE_CONSTANT_GS GFXPIPE_3D_CMD(0x0, 0x16)
+#define CMD_3DSTATE_SAMPLE_MASK GFXPIPE_3D_CMD(0x0, 0x18)
+#define CMD_3DSTATE_CONSTANT_HS GFXPIPE_3D_CMD(0x0, 0x19)
+#define CMD_3DSTATE_CONSTANT_DS GFXPIPE_3D_CMD(0x0, 0x1A)
+#define CMD_3DSTATE_HS GFXPIPE_3D_CMD(0x0, 0x1B)
+#define CMD_3DSTATE_TE GFXPIPE_3D_CMD(0x0, 0x1C)
+#define CMD_3DSTATE_DS GFXPIPE_3D_CMD(0x0, 0x1D)
+#define CMD_3DSTATE_STREAMOUT GFXPIPE_3D_CMD(0x0, 0x1E)
+#define CMD_3DSTATE_SBE GFXPIPE_3D_CMD(0x0, 0x1F)
+#define CMD_3DSTATE_PS GFXPIPE_3D_CMD(0x0, 0x20)
+#define CMD_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP GFXPIPE_3D_CMD(0x0, 0x21)
+#define CMD_3DSTATE_CPS_POINTERS GFXPIPE_3D_CMD(0x0, 0x22)
+#define CMD_3DSTATE_VIEWPORT_STATE_POINTERS_CC GFXPIPE_3D_CMD(0x0, 0x23)
+#define CMD_3DSTATE_BLEND_STATE_POINTERS GFXPIPE_3D_CMD(0x0, 0x24)
+#define CMD_3DSTATE_BINDING_TABLE_POINTERS_VS GFXPIPE_3D_CMD(0x0, 0x26)
+#define CMD_3DSTATE_BINDING_TABLE_POINTERS_HS GFXPIPE_3D_CMD(0x0, 0x27)
+#define CMD_3DSTATE_BINDING_TABLE_POINTERS_DS GFXPIPE_3D_CMD(0x0, 0x28)
+#define CMD_3DSTATE_BINDING_TABLE_POINTERS_GS GFXPIPE_3D_CMD(0x0, 0x29)
+#define CMD_3DSTATE_BINDING_TABLE_POINTERS_PS GFXPIPE_3D_CMD(0x0, 0x2A)
+#define CMD_3DSTATE_SAMPLER_STATE_POINTERS_VS GFXPIPE_3D_CMD(0x0, 0x2B)
+#define CMD_3DSTATE_SAMPLER_STATE_POINTERS_HS GFXPIPE_3D_CMD(0x0, 0x2C)
+#define CMD_3DSTATE_SAMPLER_STATE_POINTERS_DS GFXPIPE_3D_CMD(0x0, 0x2D)
+#define CMD_3DSTATE_SAMPLER_STATE_POINTERS_GS GFXPIPE_3D_CMD(0x0, 0x2E)
+#define CMD_3DSTATE_SAMPLER_STATE_POINTERS_PS GFXPIPE_3D_CMD(0x0, 0x2F)
+#define CMD_3DSTATE_VF_INSTANCING GFXPIPE_3D_CMD(0x0, 0x49)
+#define CMD_3DSTATE_VF_SGVS GFXPIPE_3D_CMD(0x0, 0x4A)
+#define CMD_3DSTATE_VF_TOPOLOGY GFXPIPE_3D_CMD(0x0, 0x4B)
+#define CMD_3DSTATE_WM_CHROMAKEY GFXPIPE_3D_CMD(0x0, 0x4C)
+#define CMD_3DSTATE_PS_BLEND GFXPIPE_3D_CMD(0x0, 0x4D)
+#define CMD_3DSTATE_WM_DEPTH_STENCIL GFXPIPE_3D_CMD(0x0, 0x4E)
+#define CMD_3DSTATE_PS_EXTRA GFXPIPE_3D_CMD(0x0, 0x4F)
+#define CMD_3DSTATE_RASTER GFXPIPE_3D_CMD(0x0, 0x50)
+#define CMD_3DSTATE_SBE_SWIZ GFXPIPE_3D_CMD(0x0, 0x51)
+#define CMD_3DSTATE_WM_HZ_OP GFXPIPE_3D_CMD(0x0, 0x52)
+#define CMD_3DSTATE_VF_COMPONENT_PACKING GFXPIPE_3D_CMD(0x0, 0x55)
+#define CMD_3DSTATE_VF_SGVS_2 GFXPIPE_3D_CMD(0x0, 0x56)
+#define CMD_3DSTATE_VFG GFXPIPE_3D_CMD(0x0, 0x57)
+#define CMD_3DSTATE_URB_ALLOC_VS GFXPIPE_3D_CMD(0x0, 0x58)
+#define CMD_3DSTATE_URB_ALLOC_HS GFXPIPE_3D_CMD(0x0, 0x59)
+#define CMD_3DSTATE_URB_ALLOC_DS GFXPIPE_3D_CMD(0x0, 0x5A)
+#define CMD_3DSTATE_URB_ALLOC_GS GFXPIPE_3D_CMD(0x0, 0x5B)
+#define CMD_3DSTATE_SO_BUFFER_INDEX_0 GFXPIPE_3D_CMD(0x0, 0x60)
+#define CMD_3DSTATE_SO_BUFFER_INDEX_1 GFXPIPE_3D_CMD(0x0, 0x61)
+#define CMD_3DSTATE_SO_BUFFER_INDEX_2 GFXPIPE_3D_CMD(0x0, 0x62)
+#define CMD_3DSTATE_SO_BUFFER_INDEX_3 GFXPIPE_3D_CMD(0x0, 0x63)
+#define CMD_3DSTATE_PRIMITIVE_REPLICATION GFXPIPE_3D_CMD(0x0, 0x6C)
+#define CMD_3DSTATE_TBIMR_TILE_PASS_INFO GFXPIPE_3D_CMD(0x0, 0x6E)
+#define CMD_3DSTATE_AMFS GFXPIPE_3D_CMD(0x0, 0x6F)
+#define CMD_3DSTATE_DEPTH_BOUNDS GFXPIPE_3D_CMD(0x0, 0x71)
+#define CMD_3DSTATE_AMFS_TEXTURE_POINTERS GFXPIPE_3D_CMD(0x0, 0x72)
+#define CMD_3DSTATE_CONSTANT_TS_POINTER GFXPIPE_3D_CMD(0x0, 0x73)
+#define CMD_3DSTATE_MESH_CONTROL GFXPIPE_3D_CMD(0x0, 0x77)
+#define CMD_3DSTATE_MESH_DISTRIB GFXPIPE_3D_CMD(0x0, 0x78)
+#define CMD_3DSTATE_TASK_REDISTRIB GFXPIPE_3D_CMD(0x0, 0x79)
+#define CMD_3DSTATE_MESH_SHADER GFXPIPE_3D_CMD(0x0, 0x7A)
+#define CMD_3DSTATE_MESH_SHADER_DATA GFXPIPE_3D_CMD(0x0, 0x7B)
+#define CMD_3DSTATE_TASK_CONTROL GFXPIPE_3D_CMD(0x0, 0x7C)
+#define CMD_3DSTATE_TASK_SHADER GFXPIPE_3D_CMD(0x0, 0x7D)
+#define CMD_3DSTATE_TASK_SHADER_DATA GFXPIPE_3D_CMD(0x0, 0x7E)
+#define CMD_3DSTATE_URB_ALLOC_MESH GFXPIPE_3D_CMD(0x0, 0x7F)
+#define CMD_3DSTATE_URB_ALLOC_TASK GFXPIPE_3D_CMD(0x0, 0x80)
+#define CMD_3DSTATE_CLIP_MESH GFXPIPE_3D_CMD(0x0, 0x81)
+#define CMD_3DSTATE_SBE_MESH GFXPIPE_3D_CMD(0x0, 0x82)
+#define CMD_3DSTATE_CPSIZE_CONTROL_BUFFER GFXPIPE_3D_CMD(0x0, 0x83)
+
+#define CMD_3DSTATE_DRAWING_RECTANGLE GFXPIPE_3D_CMD(0x1, 0x0)
+#define CMD_3DSTATE_CHROMA_KEY GFXPIPE_3D_CMD(0x1, 0x4)
+#define CMD_3DSTATE_POLY_STIPPLE_OFFSET GFXPIPE_3D_CMD(0x1, 0x6)
+#define CMD_3DSTATE_POLY_STIPPLE_PATTERN GFXPIPE_3D_CMD(0x1, 0x7)
+#define CMD_3DSTATE_LINE_STIPPLE GFXPIPE_3D_CMD(0x1, 0x8)
+#define CMD_3DSTATE_AA_LINE_PARAMETERS GFXPIPE_3D_CMD(0x1, 0xA)
+#define CMD_3DSTATE_MONOFILTER_SIZE GFXPIPE_3D_CMD(0x1, 0x11)
+#define CMD_3DSTATE_PUSH_CONSTANT_ALLOC_VS GFXPIPE_3D_CMD(0x1, 0x12)
+#define CMD_3DSTATE_PUSH_CONSTANT_ALLOC_HS GFXPIPE_3D_CMD(0x1, 0x13)
+#define CMD_3DSTATE_PUSH_CONSTANT_ALLOC_DS GFXPIPE_3D_CMD(0x1, 0x14)
+#define CMD_3DSTATE_PUSH_CONSTANT_ALLOC_GS GFXPIPE_3D_CMD(0x1, 0x15)
+#define CMD_3DSTATE_PUSH_CONSTANT_ALLOC_PS GFXPIPE_3D_CMD(0x1, 0x16)
+#define CMD_3DSTATE_SO_DECL_LIST GFXPIPE_3D_CMD(0x1, 0x17)
+#define CMD_3DSTATE_SO_DECL_LIST_DW_LEN REG_GENMASK(8, 0)
+#define CMD_3DSTATE_SO_BUFFER GFXPIPE_3D_CMD(0x1, 0x18)
+#define CMD_3DSTATE_BINDING_TABLE_POOL_ALLOC GFXPIPE_3D_CMD(0x1, 0x19)
+#define CMD_3DSTATE_SAMPLE_PATTERN GFXPIPE_3D_CMD(0x1, 0x1C)
+#define CMD_3DSTATE_3D_MODE GFXPIPE_3D_CMD(0x1, 0x1E)
+#define CMD_3DSTATE_SUBSLICE_HASH_TABLE GFXPIPE_3D_CMD(0x1, 0x1F)
+#define CMD_3DSTATE_SLICE_TABLE_STATE_POINTERS GFXPIPE_3D_CMD(0x1, 0x20)
+#define CMD_3DSTATE_PTBR_TILE_PASS_INFO GFXPIPE_3D_CMD(0x1, 0x22)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_gsc_commands.h b/drivers/gpu/drm/xe/instructions/xe_gsc_commands.h
new file mode 100644
index 000000000..f8949cad9
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_gsc_commands.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GSC_COMMANDS_H_
+#define _XE_GSC_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+/*
+ * All GSCCS-specific commands have fixed length, so we can include it in the
+ * defines. Note that the generic GSC command header structure includes an
+ * optional data field in bits 9-21, but there are no commands that actually use
+ * it; some of the commands are instead defined as having an extended length
+ * field spanning bits 0-15, even if the extra bits are not required because the
+ * longest GSCCS command is only 8 dwords. To handle this, the defines below use
+ * a single field for both data and len. If we ever get a commands that does
+ * actually have data and this approach doesn't work for it we can re-work it
+ * at that point.
+ */
+
+#define GSC_OPCODE REG_GENMASK(28, 22)
+#define GSC_CMD_DATA_AND_LEN REG_GENMASK(21, 0)
+
+#define __GSC_INSTR(op, dl) \
+ (XE_INSTR_GSC | \
+ REG_FIELD_PREP(GSC_OPCODE, op) | \
+ REG_FIELD_PREP(GSC_CMD_DATA_AND_LEN, dl))
+
+#define GSC_HECI_CMD_PKT __GSC_INSTR(0, 6)
+
+#define GSC_FW_LOAD __GSC_INSTR(1, 2)
+#define GSC_FW_LOAD_LIMIT_VALID REG_BIT(31)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
new file mode 100644
index 000000000..04179b2a4
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_INSTR_DEFS_H_
+#define _XE_INSTR_DEFS_H_
+
+#include "regs/xe_reg_defs.h"
+
+/*
+ * The first dword of any GPU instruction is the "instruction header." Bits
+ * 31:29 identify the general type of the command and determine how exact
+ * opcodes and sub-opcodes will be encoded in the remaining bits.
+ */
+#define XE_INSTR_CMD_TYPE GENMASK(31, 29)
+#define XE_INSTR_MI REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x0)
+#define XE_INSTR_GSC REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x2)
+#define XE_INSTR_GFXPIPE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x3)
+
+/*
+ * Most (but not all) instructions have a "length" field in the instruction
+ * header. The value expected is the total number of dwords for the
+ * instruction, minus two.
+ *
+ * Some instructions have length fields longer or shorter than 8 bits, but
+ * those are rare. This definition can be used for the common case where
+ * the length field is from 7:0.
+ */
+#define XE_INSTR_LEN_MASK GENMASK(7, 0)
+#define XE_INSTR_NUM_DW(x) REG_FIELD_PREP(XE_INSTR_LEN_MASK, (x) - 2)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
new file mode 100644
index 000000000..1cfa96167
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MI_COMMANDS_H_
+#define _XE_MI_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+/*
+ * MI (Memory Interface) commands are supported by all GT engines. They
+ * provide general memory operations and command streamer control. MI commands
+ * have a command type of 0x0 (MI_COMMAND) in bits 31:29 of the instruction
+ * header dword and a specific MI opcode in bits 28:23.
+ */
+
+#define MI_OPCODE REG_GENMASK(28, 23)
+#define MI_SUBOPCODE REG_GENMASK(22, 17) /* used with MI_EXPANSION */
+
+#define __MI_INSTR(opcode) \
+ (XE_INSTR_MI | REG_FIELD_PREP(MI_OPCODE, opcode))
+
+#define MI_NOOP __MI_INSTR(0x0)
+#define MI_USER_INTERRUPT __MI_INSTR(0x2)
+#define MI_ARB_CHECK __MI_INSTR(0x5)
+
+#define MI_ARB_ON_OFF __MI_INSTR(0x8)
+#define MI_ARB_ENABLE REG_BIT(0)
+#define MI_ARB_DISABLE 0x0
+
+#define MI_BATCH_BUFFER_END __MI_INSTR(0xA)
+#define MI_TOPOLOGY_FILTER __MI_INSTR(0xD)
+#define MI_FORCE_WAKEUP __MI_INSTR(0x1D)
+
+#define MI_STORE_DATA_IMM __MI_INSTR(0x20)
+#define MI_SDI_GGTT REG_BIT(22)
+#define MI_SDI_LEN_DW GENMASK(9, 0)
+#define MI_SDI_NUM_DW(x) REG_FIELD_PREP(MI_SDI_LEN_DW, (x) + 3 - 2)
+#define MI_SDI_NUM_QW(x) (REG_FIELD_PREP(MI_SDI_LEN_DW, 2 * (x) + 3 - 2) | \
+ REG_BIT(21))
+
+#define MI_LOAD_REGISTER_IMM __MI_INSTR(0x22)
+#define MI_LRI_LRM_CS_MMIO REG_BIT(19)
+#define MI_LRI_MMIO_REMAP_EN REG_BIT(17)
+#define MI_LRI_NUM_REGS(x) XE_INSTR_NUM_DW(2 * (x) + 1)
+#define MI_LRI_FORCE_POSTED REG_BIT(12)
+
+#define MI_FLUSH_DW __MI_INSTR(0x26)
+#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
+#define MI_INVALIDATE_TLB REG_BIT(18)
+#define MI_FLUSH_DW_CCS REG_BIT(16)
+#define MI_FLUSH_DW_OP_STOREDW REG_BIT(14)
+#define MI_FLUSH_DW_LEN_DW REG_GENMASK(5, 0)
+#define MI_FLUSH_IMM_DW REG_FIELD_PREP(MI_FLUSH_DW_LEN_DW, 4 - 2)
+#define MI_FLUSH_IMM_QW REG_FIELD_PREP(MI_FLUSH_DW_LEN_DW, 5 - 2)
+#define MI_FLUSH_DW_USE_GTT REG_BIT(2)
+
+#define MI_BATCH_BUFFER_START __MI_INSTR(0x31)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
new file mode 100644
index 000000000..5592774fc
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_ENGINE_REGS_H_
+#define _XE_ENGINE_REGS_H_
+
+#include <asm/page.h>
+
+#include "regs/xe_reg_defs.h"
+
+/*
+ * These *_BASE values represent the MMIO offset where each hardware engine's
+ * registers start. The other definitions in this header are parameterized
+ * macros that will take one of these values as a parameter.
+ */
+#define RENDER_RING_BASE 0x02000
+#define BSD_RING_BASE 0x1c0000
+#define BSD2_RING_BASE 0x1c4000
+#define BSD3_RING_BASE 0x1d0000
+#define BSD4_RING_BASE 0x1d4000
+#define XEHP_BSD5_RING_BASE 0x1e0000
+#define XEHP_BSD6_RING_BASE 0x1e4000
+#define XEHP_BSD7_RING_BASE 0x1f0000
+#define XEHP_BSD8_RING_BASE 0x1f4000
+#define VEBOX_RING_BASE 0x1c8000
+#define VEBOX2_RING_BASE 0x1d8000
+#define XEHP_VEBOX3_RING_BASE 0x1e8000
+#define XEHP_VEBOX4_RING_BASE 0x1f8000
+#define COMPUTE0_RING_BASE 0x1a000
+#define COMPUTE1_RING_BASE 0x1c000
+#define COMPUTE2_RING_BASE 0x1e000
+#define COMPUTE3_RING_BASE 0x26000
+#define BLT_RING_BASE 0x22000
+#define XEHPC_BCS1_RING_BASE 0x3e0000
+#define XEHPC_BCS2_RING_BASE 0x3e2000
+#define XEHPC_BCS3_RING_BASE 0x3e4000
+#define XEHPC_BCS4_RING_BASE 0x3e6000
+#define XEHPC_BCS5_RING_BASE 0x3e8000
+#define XEHPC_BCS6_RING_BASE 0x3ea000
+#define XEHPC_BCS7_RING_BASE 0x3ec000
+#define XEHPC_BCS8_RING_BASE 0x3ee000
+#define GSCCS_RING_BASE 0x11a000
+
+#define RING_TAIL(base) XE_REG((base) + 0x30)
+
+#define RING_HEAD(base) XE_REG((base) + 0x34)
+#define HEAD_ADDR 0x001FFFFC
+
+#define RING_START(base) XE_REG((base) + 0x38)
+
+#define RING_CTL(base) XE_REG((base) + 0x3c)
+#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
+#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
+
+#define RING_PSMI_CTL(base) XE_REG((base) + 0x50, XE_REG_OPTION_MASKED)
+#define RC_SEMA_IDLE_MSG_DISABLE REG_BIT(12)
+#define WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7)
+#define IDLE_MSG_DISABLE REG_BIT(0)
+
+#define RING_PWRCTX_MAXCNT(base) XE_REG((base) + 0x54)
+#define IDLE_WAIT_TIME REG_GENMASK(19, 0)
+
+#define RING_ACTHD_UDW(base) XE_REG((base) + 0x5c)
+#define RING_DMA_FADD_UDW(base) XE_REG((base) + 0x60)
+#define RING_IPEHR(base) XE_REG((base) + 0x68)
+#define RING_ACTHD(base) XE_REG((base) + 0x74)
+#define RING_DMA_FADD(base) XE_REG((base) + 0x78)
+#define RING_HWS_PGA(base) XE_REG((base) + 0x80)
+#define RING_HWSTAM(base) XE_REG((base) + 0x98)
+#define RING_MI_MODE(base) XE_REG((base) + 0x9c)
+#define RING_NOPID(base) XE_REG((base) + 0x94)
+
+#define FF_THREAD_MODE(base) XE_REG((base) + 0xa0)
+#define FF_TESSELATION_DOP_GATE_DISABLE BIT(19)
+
+#define RING_IMR(base) XE_REG((base) + 0xa8)
+
+#define RING_EIR(base) XE_REG((base) + 0xb0)
+#define RING_EMR(base) XE_REG((base) + 0xb4)
+#define RING_ESR(base) XE_REG((base) + 0xb8)
+
+#define RING_CMD_CCTL(base) XE_REG((base) + 0xc4, XE_REG_OPTION_MASKED)
+/*
+ * CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
+ * The lsb of each can be considered a separate enabling bit for encryption.
+ * 6:0 == default MOCS value for reads => 6:1 == table index for reads.
+ * 13:7 == default MOCS value for writes => 13:8 == table index for writes.
+ * 15:14 == Reserved => 31:30 are set to 0.
+ */
+#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 8)
+#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 1)
+
+#define CSFE_CHICKEN1(base) XE_REG((base) + 0xd4, XE_REG_OPTION_MASKED)
+#define GHWSP_CSB_REPORT_DIS REG_BIT(15)
+#define PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS REG_BIT(14)
+
+#define FF_SLICE_CS_CHICKEN1(base) XE_REG((base) + 0xe0, XE_REG_OPTION_MASKED)
+#define FFSC_PERCTX_PREEMPT_CTRL REG_BIT(14)
+
+#define FF_SLICE_CS_CHICKEN2(base) XE_REG((base) + 0xe4, XE_REG_OPTION_MASKED)
+#define PERF_FIX_BALANCING_CFE_DISABLE REG_BIT(15)
+
+#define CS_DEBUG_MODE1(base) XE_REG((base) + 0xec, XE_REG_OPTION_MASKED)
+#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
+#define REPLAY_MODE_GRANULARITY REG_BIT(0)
+
+#define RING_BBADDR(base) XE_REG((base) + 0x140)
+#define RING_BBADDR_UDW(base) XE_REG((base) + 0x168)
+
+#define BCS_SWCTRL(base) XE_REG((base) + 0x200, XE_REG_OPTION_MASKED)
+#define BCS_SWCTRL_DISABLE_256B REG_BIT(2)
+
+/* Handling MOCS value in BLIT_CCTL like it was done CMD_CCTL */
+#define BLIT_CCTL(base) XE_REG((base) + 0x204)
+#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 9)
+#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 1)
+
+#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
+
+#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244)
+#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
+#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
+
+#define RING_MODE(base) XE_REG((base) + 0x29c)
+#define GFX_DISABLE_LEGACY_MODE REG_BIT(3)
+
+#define RING_TIMESTAMP(base) XE_REG((base) + 0x358)
+
+#define RING_TIMESTAMP_UDW(base) XE_REG((base) + 0x358 + 4)
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define STOP_RING REG_BIT(8)
+#define TAIL_ADDR 0x001FFFF8
+
+#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
+
+#define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_DENY REG_BIT(30)
+#define RING_FORCE_TO_NONPRIV_ACCESS_MASK REG_GENMASK(29, 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_RW REG_FIELD_PREP(RING_FORCE_TO_NONPRIV_ACCESS_MASK, 0)
+#define RING_FORCE_TO_NONPRIV_ACCESS_RD REG_FIELD_PREP(RING_FORCE_TO_NONPRIV_ACCESS_MASK, 1)
+#define RING_FORCE_TO_NONPRIV_ACCESS_WR REG_FIELD_PREP(RING_FORCE_TO_NONPRIV_ACCESS_MASK, 2)
+#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID REG_FIELD_PREP(RING_FORCE_TO_NONPRIV_ACCESS_MASK, 3)
+#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
+#define RING_FORCE_TO_NONPRIV_RANGE_MASK REG_GENMASK(1, 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_1 REG_FIELD_PREP(RING_FORCE_TO_NONPRIV_RANGE_MASK, 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_4 REG_FIELD_PREP(RING_FORCE_TO_NONPRIV_RANGE_MASK, 1)
+#define RING_FORCE_TO_NONPRIV_RANGE_16 REG_FIELD_PREP(RING_FORCE_TO_NONPRIV_RANGE_MASK, 2)
+#define RING_FORCE_TO_NONPRIV_RANGE_64 REG_FIELD_PREP(RING_FORCE_TO_NONPRIV_RANGE_MASK, 3)
+#define RING_FORCE_TO_NONPRIV_MASK_VALID (RING_FORCE_TO_NONPRIV_RANGE_MASK | \
+ RING_FORCE_TO_NONPRIV_ACCESS_MASK | \
+ RING_FORCE_TO_NONPRIV_DENY)
+#define RING_MAX_NONPRIV_SLOTS 12
+
+#define RING_EXECLIST_SQ_CONTENTS_LO(base) XE_REG((base) + 0x510)
+#define RING_EXECLIST_SQ_CONTENTS_HI(base) XE_REG((base) + 0x510 + 4)
+
+#define RING_EXECLIST_CONTROL(base) XE_REG((base) + 0x550)
+#define EL_CTRL_LOAD REG_BIT(0)
+
+#define CS_CHICKEN1(base) XE_REG((base) + 0x580, XE_REG_OPTION_MASKED)
+#define PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
+#define PREEMPT_GPGPU_MID_THREAD_LEVEL PREEMPT_GPGPU_LEVEL(0, 0)
+#define PREEMPT_GPGPU_THREAD_GROUP_LEVEL PREEMPT_GPGPU_LEVEL(0, 1)
+#define PREEMPT_GPGPU_COMMAND_LEVEL PREEMPT_GPGPU_LEVEL(1, 0)
+#define PREEMPT_GPGPU_LEVEL_MASK PREEMPT_GPGPU_LEVEL(1, 1)
+#define PREEMPT_3D_OBJECT_LEVEL REG_BIT(0)
+
+#define VDBOX_CGCTL3F08(base) XE_REG((base) + 0x3f08)
+#define CG3DDISHRS_CLKGATE_DIS REG_BIT(5)
+
+#define VDBOX_CGCTL3F10(base) XE_REG((base) + 0x3f10)
+#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+
+#define VDBOX_CGCTL3F18(base) XE_REG((base) + 0x3f18)
+#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
+
+#define VDBOX_CGCTL3F1C(base) XE_REG((base) + 0x3f1c)
+#define MFXPIPE_CLKGATE_DIS REG_BIT(3)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gpu_commands.h b/drivers/gpu/drm/xe/regs/xe_gpu_commands.h
new file mode 100644
index 000000000..a255946b6
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_gpu_commands.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GPU_COMMANDS_H_
+#define _XE_GPU_COMMANDS_H_
+
+#include "regs/xe_reg_defs.h"
+
+#define XY_CTRL_SURF_COPY_BLT ((2 << 29) | (0x48 << 22) | 3)
+#define SRC_ACCESS_TYPE_SHIFT 21
+#define DST_ACCESS_TYPE_SHIFT 20
+#define CCS_SIZE_MASK GENMASK(17, 8)
+#define XE2_CCS_SIZE_MASK GENMASK(18, 9)
+#define XY_CTRL_SURF_MOCS_MASK GENMASK(31, 26)
+#define XE2_XY_CTRL_SURF_MOCS_INDEX_MASK GENMASK(31, 28)
+#define NUM_CCS_BYTES_PER_BLOCK 256
+#define NUM_BYTES_PER_CCS_BYTE(_xe) (GRAPHICS_VER(_xe) >= 20 ? 512 : 256)
+
+#define XY_FAST_COLOR_BLT_CMD (2 << 29 | 0x44 << 22)
+#define XY_FAST_COLOR_BLT_DEPTH_32 (2 << 19)
+#define XY_FAST_COLOR_BLT_DW 16
+#define XY_FAST_COLOR_BLT_MOCS_MASK GENMASK(27, 22)
+#define XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK GENMASK(27, 24)
+#define XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31
+
+#define XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
+#define XY_FAST_COPY_BLT_DEPTH_32 (3<<24)
+#define XY_FAST_COPY_BLT_D1_SRC_TILE4 REG_BIT(31)
+#define XY_FAST_COPY_BLT_D1_DST_TILE4 REG_BIT(30)
+#define XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK GENMASK(23, 20)
+
+#define PVC_MEM_SET_CMD (2 << 29 | 0x5b << 22)
+#define PVC_MEM_SET_CMD_LEN_DW 7
+#define PVC_MEM_SET_MATRIX REG_BIT(17)
+#define PVC_MEM_SET_DATA_FIELD GENMASK(31, 24)
+/* Bspec lists field as [6:0], but index alone is from [6:1] */
+#define PVC_MEM_SET_MOCS_INDEX_MASK GENMASK(6, 1)
+#define XE2_MEM_SET_MOCS_INDEX_MASK GENMASK(6, 3)
+
+#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+
+#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH BIT(9) /* gen12 */
+
+#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29)
+#define PIPE_CONTROL_TILE_CACHE_FLUSH (1<<28)
+#define PIPE_CONTROL_AMFS_FLUSH (1<<25)
+#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24)
+#define PIPE_CONTROL_LRI_POST_SYNC BIT(23)
+#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
+#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET (1<<19)
+#define PIPE_CONTROL_TLB_INVALIDATE BIT(18)
+#define PIPE_CONTROL_PSD_SYNC (1<<17)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12)
+#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11)
+#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10)
+#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_FLUSH_ENABLE (1<<7)
+#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
+#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
+#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
+#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
+#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
+#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
new file mode 100644
index 000000000..9886ec9cb
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GSC_REGS_H_
+#define _XE_GSC_REGS_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "regs/xe_reg_defs.h"
+
+/* Definitions of GSC H/W registers, bits, etc */
+
+#define MTL_GSC_HECI1_BASE 0x00116000
+#define MTL_GSC_HECI2_BASE 0x00117000
+
+#define HECI_H_CSR(base) XE_REG((base) + 0x4)
+#define HECI_H_CSR_IE REG_BIT(0)
+#define HECI_H_CSR_IS REG_BIT(1)
+#define HECI_H_CSR_IG REG_BIT(2)
+#define HECI_H_CSR_RDY REG_BIT(3)
+#define HECI_H_CSR_RST REG_BIT(4)
+
+/*
+ * The FWSTS register values are FW defined and can be different between
+ * HECI1 and HECI2
+ */
+#define HECI_FWSTS1(base) XE_REG((base) + 0xc40)
+#define HECI1_FWSTS1_CURRENT_STATE REG_GENMASK(3, 0)
+#define HECI1_FWSTS1_CURRENT_STATE_RESET 0
+#define HECI1_FWSTS1_PROXY_STATE_NORMAL 5
+#define HECI1_FWSTS1_INIT_COMPLETE REG_BIT(9)
+#define HECI_FWSTS5(base) XE_REG((base) + 0xc68)
+#define HECI1_FWSTS5_HUC_AUTH_DONE REG_BIT(19)
+
+#define HECI_H_GS1(base) XE_REG((base) + 0xc4c)
+#define HECI_H_GS1_ER_PREP REG_BIT(0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
new file mode 100644
index 000000000..1dd361046
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_REGS_H_
+#define _XE_GT_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+/*
+ * The GSI register range [0x0 - 0x40000) is replicated at a higher offset
+ * for the media GT. xe_mmio and xe_gt_mcr functions will automatically
+ * translate offsets by MEDIA_GT_GSI_OFFSET when operating on the media GT.
+ */
+#define MEDIA_GT_GSI_OFFSET 0x380000
+#define MEDIA_GT_GSI_LENGTH 0x40000
+
+/* MTL workpoint reg to get core C state and actual freq of 3D, SAMedia */
+#define MTL_MIRROR_TARGET_WP1 XE_REG(0xc60)
+#define MTL_CAGF_MASK REG_GENMASK(8, 0)
+#define MTL_CC_MASK REG_GENMASK(12, 9)
+
+/* RPM unit config (Gen8+) */
+#define RPM_CONFIG0 XE_REG(0xd00)
+#define RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK REG_GENMASK(5, 3)
+#define RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0
+#define RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1
+#define RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2
+#define RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3
+#define RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
+
+#define FORCEWAKE_ACK_MEDIA_VDBOX(n) XE_REG(0xd50 + (n) * 4)
+#define FORCEWAKE_ACK_MEDIA_VEBOX(n) XE_REG(0xd70 + (n) * 4)
+#define FORCEWAKE_ACK_RENDER XE_REG(0xd84)
+
+#define GMD_ID XE_REG(0xd8c)
+#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
+#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
+#define GMD_ID_REVID REG_GENMASK(5, 0)
+
+#define FORCEWAKE_ACK_GSC XE_REG(0xdf8)
+#define FORCEWAKE_ACK_GT_MTL XE_REG(0xdfc)
+
+#define MCFG_MCR_SELECTOR XE_REG(0xfd0)
+#define MTL_MCR_SELECTOR XE_REG(0xfd4)
+#define SF_MCR_SELECTOR XE_REG(0xfd8)
+#define MCR_SELECTOR XE_REG(0xfdc)
+#define GAM_MCR_SELECTOR XE_REG(0xfe0)
+#define MCR_MULTICAST REG_BIT(31)
+#define MCR_SLICE_MASK REG_GENMASK(30, 27)
+#define MCR_SLICE(slice) REG_FIELD_PREP(MCR_SLICE_MASK, slice)
+#define MCR_SUBSLICE_MASK REG_GENMASK(26, 24)
+#define MCR_SUBSLICE(subslice) REG_FIELD_PREP(MCR_SUBSLICE_MASK, subslice)
+#define MTL_MCR_GROUPID REG_GENMASK(11, 8)
+#define MTL_MCR_INSTANCEID REG_GENMASK(3, 0)
+
+#define PS_INVOCATION_COUNT XE_REG(0x2348)
+
+#define XELP_GLOBAL_MOCS(i) XE_REG(0x4000 + (i) * 4)
+#define XEHP_GLOBAL_MOCS(i) XE_REG_MCR(0x4000 + (i) * 4)
+#define CCS_AUX_INV XE_REG(0x4208)
+
+#define VD0_AUX_INV XE_REG(0x4218)
+#define VE0_AUX_INV XE_REG(0x4238)
+
+#define VE1_AUX_INV XE_REG(0x42b8)
+#define AUX_INV REG_BIT(0)
+
+#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
+#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
+
+#define WM_CHICKEN3 XE_REG_MCR(0x5588, XE_REG_OPTION_MASKED)
+#define HIZ_PLANE_COMPRESSION_DIS REG_BIT(10)
+
+#define CHICKEN_RASTER_2 XE_REG_MCR(0x6208, XE_REG_OPTION_MASKED)
+#define TBIMR_FAST_CLIP REG_BIT(5)
+
+#define FF_MODE XE_REG_MCR(0x6210)
+#define DIS_TE_AUTOSTRIP REG_BIT(31)
+#define DIS_MESH_PARTIAL_AUTOSTRIP REG_BIT(16)
+#define DIS_MESH_AUTOSTRIP REG_BIT(15)
+
+#define VFLSKPD XE_REG_MCR(0x62a8, XE_REG_OPTION_MASKED)
+#define DIS_PARTIAL_AUTOSTRIP REG_BIT(9)
+#define DIS_AUTOSTRIP REG_BIT(6)
+#define DIS_OVER_FETCH_CACHE REG_BIT(1)
+#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
+
+#define FF_MODE2 XE_REG(0x6604)
+#define XEHP_FF_MODE2 XE_REG_MCR(0x6604)
+#define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24)
+#define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224)
+#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
+#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
+
+#define CACHE_MODE_1 XE_REG(0x7004, XE_REG_OPTION_MASKED)
+#define MSAA_OPTIMIZATION_REDUC_DISABLE REG_BIT(11)
+
+#define COMMON_SLICE_CHICKEN1 XE_REG(0x7010)
+
+#define HIZ_CHICKEN XE_REG(0x7018, XE_REG_OPTION_MASKED)
+#define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE REG_BIT(14)
+#define HZ_DEPTH_TEST_LE_GE_OPT_DISABLE REG_BIT(13)
+
+#define XEHP_PSS_MODE2 XE_REG_MCR(0x703c, XE_REG_OPTION_MASKED)
+#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
+
+#define XEHP_PSS_CHICKEN XE_REG_MCR(0x7044, XE_REG_OPTION_MASKED)
+#define FLSH_IGNORES_PSD REG_BIT(10)
+#define FD_END_COLLECT REG_BIT(5)
+
+#define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED)
+#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
+
+#define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED)
+#define XEHP_COMMON_SLICE_CHICKEN3 XE_REG_MCR(0x7304, XE_REG_OPTION_MASKED)
+#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12)
+#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12)
+#define BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11)
+#define DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9)
+
+#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
+#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+
+#define VF_PREEMPTION XE_REG(0x83a4, XE_REG_OPTION_MASKED)
+#define PREEMPTION_VERTEX_COUNT REG_GENMASK(15, 0)
+
+#define VF_SCRATCHPAD XE_REG(0x83a8, XE_REG_OPTION_MASKED)
+#define XE2_VFG_TED_CREDIT_INTERFACE_DISABLE REG_BIT(13)
+
+#define VFG_PREEMPTION_CHICKEN XE_REG(0x83b4, XE_REG_OPTION_MASKED)
+#define POLYGON_TRIFAN_LINELOOP_DISABLE REG_BIT(4)
+
+#define SQCNT1 XE_REG_MCR(0x8718)
+#define XELPMP_SQCNT1 XE_REG(0x8718)
+#define ENFORCE_RAR REG_BIT(23)
+
+#define XEHP_SQCM XE_REG_MCR(0x8724)
+#define EN_32B_ACCESS REG_BIT(30)
+
+#define XE2_FLAT_CCS_BASE_RANGE_LOWER XE_REG_MCR(0x8800)
+#define XE2_FLAT_CCS_ENABLE REG_BIT(0)
+
+#define GSCPSMI_BASE XE_REG(0x880c)
+
+/* Fuse readout registers for GT */
+#define XEHP_FUSE4 XE_REG(0x9114)
+#define CCS_EN_MASK REG_GENMASK(19, 16)
+#define GT_L3_EXC_MASK REG_GENMASK(6, 4)
+
+#define MIRROR_FUSE3 XE_REG(0x9118)
+#define XE2_NODE_ENABLE_MASK REG_GENMASK(31, 16)
+#define L3BANK_PAIR_COUNT 4
+#define L3BANK_MASK REG_GENMASK(3, 0)
+/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
+#define MAX_MSLICES 4
+#define MEML3_EN_MASK REG_GENMASK(3, 0)
+
+#define XELP_EU_ENABLE XE_REG(0x9134) /* "_DISABLE" on Xe_LP */
+#define XELP_EU_MASK REG_GENMASK(7, 0)
+#define XELP_GT_GEOMETRY_DSS_ENABLE XE_REG(0x913c)
+
+#define GT_VEBOX_VDBOX_DISABLE XE_REG(0x9140)
+#define GT_VEBOX_DISABLE_MASK REG_GENMASK(19, 16)
+#define GT_VDBOX_DISABLE_MASK REG_GENMASK(7, 0)
+
+#define XEHP_GT_COMPUTE_DSS_ENABLE XE_REG(0x9144)
+#define XEHPC_GT_COMPUTE_DSS_ENABLE_EXT XE_REG(0x9148)
+#define XE2_GT_COMPUTE_DSS_2 XE_REG(0x914c)
+#define XE2_GT_GEOMETRY_DSS_1 XE_REG(0x9150)
+#define XE2_GT_GEOMETRY_DSS_2 XE_REG(0x9154)
+
+#define GDRST XE_REG(0x941c)
+#define GRDOM_GUC REG_BIT(3)
+#define GRDOM_FULL REG_BIT(0)
+
+#define MISCCPCTL XE_REG(0x9424)
+#define DOP_CLOCK_GATE_RENDER_ENABLE REG_BIT(1)
+
+#define UNSLCGCTL9430 XE_REG(0x9430)
+#define MSQDUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE XE_REG(0x9434)
+#define VFUNIT_CLKGATE_DIS REG_BIT(20)
+#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */
+#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */
+#define GAMEDIA_CLKGATE_DIS REG_BIT(11)
+#define HSUNIT_CLKGATE_DIS REG_BIT(8)
+#define VSUNIT_CLKGATE_DIS REG_BIT(3)
+
+#define UNSLCGCTL9440 XE_REG(0x9440)
+#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16)
+#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15)
+#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14)
+#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6)
+
+#define UNSLCGCTL9444 XE_REG(0x9444)
+#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30)
+#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29)
+#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28)
+#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27)
+#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26)
+#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25)
+#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24)
+#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23)
+#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22)
+#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21)
+#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20)
+#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19)
+#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18)
+#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17)
+#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
+#define LTCDD_CLKGATE_DIS REG_BIT(10)
+
+#define XEHP_SLICE_UNIT_LEVEL_CLKGATE XE_REG_MCR(0x94d4)
+#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
+#define L3_CLKGATE_DIS REG_BIT(16)
+#define NODEDSS_CLKGATE_DIS REG_BIT(12)
+#define MSCUNIT_CLKGATE_DIS REG_BIT(10)
+#define RCCUNIT_CLKGATE_DIS REG_BIT(7)
+#define SARBUNIT_CLKGATE_DIS REG_BIT(5)
+#define SBEUNIT_CLKGATE_DIS REG_BIT(4)
+
+#define UNSLICE_UNIT_LEVEL_CLKGATE2 XE_REG(0x94e4)
+#define VSUNIT_CLKGATE2_DIS REG_BIT(19)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE XE_REG_MCR(0x9524)
+#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28)
+#define GWUNIT_CLKGATE_DIS REG_BIT(16)
+
+#define SUBSLICE_UNIT_LEVEL_CLKGATE2 XE_REG_MCR(0x9528)
+#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+
+#define SSMCGCTL9530 XE_REG_MCR(0x9530)
+#define RTFUNIT_CLKGATE_DIS REG_BIT(18)
+
+#define DFR_RATIO_EN_AND_CHICKEN XE_REG_MCR(0x9550)
+#define DFR_DISABLE REG_BIT(9)
+
+#define RPNSWREQ XE_REG(0xa008)
+#define REQ_RATIO_MASK REG_GENMASK(31, 23)
+
+#define RP_CONTROL XE_REG(0xa024)
+#define RPSWCTL_MASK REG_GENMASK(10, 9)
+#define RPSWCTL_ENABLE REG_FIELD_PREP(RPSWCTL_MASK, 2)
+#define RPSWCTL_DISABLE REG_FIELD_PREP(RPSWCTL_MASK, 0)
+#define RC_CONTROL XE_REG(0xa090)
+#define RC_CTL_HW_ENABLE REG_BIT(31)
+#define RC_CTL_TO_MODE REG_BIT(28)
+#define RC_CTL_RC6_ENABLE REG_BIT(18)
+#define RC_STATE XE_REG(0xa094)
+#define RC_IDLE_HYSTERSIS XE_REG(0xa0ac)
+
+#define PMINTRMSK XE_REG(0xa168)
+#define PMINTR_DISABLE_REDIRECT_TO_GUC REG_BIT(31)
+#define ARAT_EXPIRED_INTRMSK REG_BIT(9)
+
+#define FORCEWAKE_GT XE_REG(0xa188)
+
+#define PG_ENABLE XE_REG(0xa210)
+
+#define CTC_MODE XE_REG(0xa26c)
+#define CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
+#define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0)
+
+#define FORCEWAKE_RENDER XE_REG(0xa278)
+#define FORCEWAKE_MEDIA_VDBOX(n) XE_REG(0xa540 + (n) * 4)
+#define FORCEWAKE_MEDIA_VEBOX(n) XE_REG(0xa560 + (n) * 4)
+#define FORCEWAKE_GSC XE_REG(0xa618)
+
+#define XEHPC_LNCFMISCCFGREG0 XE_REG_MCR(0xb01c, XE_REG_OPTION_MASKED)
+#define XEHPC_OVRLSCCC REG_BIT(0)
+
+/* L3 Cache Control */
+#define XELP_LNCFCMOCS(i) XE_REG(0xb020 + (i) * 4)
+#define XEHP_LNCFCMOCS(i) XE_REG_MCR(0xb020 + (i) * 4)
+#define LNCFCMOCS_REG_COUNT 32
+
+#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
+#define XEHP_LNESPARE REG_BIT(19)
+
+#define XEHP_L3SQCREG5 XE_REG_MCR(0xb158)
+#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0)
+
+#define XEHP_L3SCQREG7 XE_REG_MCR(0xb188)
+#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
+
+#define XEHPC_L3CLOS_MASK(i) XE_REG_MCR(0xb194 + (i) * 8)
+
+#define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658)
+
+#define XEHP_MERT_MOD_CTRL XE_REG_MCR(0xcf28)
+#define RENDER_MOD_CTRL XE_REG_MCR(0xcf2c)
+#define COMP_MOD_CTRL XE_REG_MCR(0xcf30)
+#define XEHP_VDBX_MOD_CTRL XE_REG_MCR(0xcf34)
+#define XELPMP_VDBX_MOD_CTRL XE_REG(0xcf34)
+#define XEHP_VEBX_MOD_CTRL XE_REG_MCR(0xcf38)
+#define XELPMP_VEBX_MOD_CTRL XE_REG(0xcf38)
+#define FORCE_MISS_FTLB REG_BIT(3)
+
+#define XEHP_GAMSTLB_CTRL XE_REG_MCR(0xcf4c)
+#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
+#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
+#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
+
+#define XEHP_GAMCNTRL_CTRL XE_REG_MCR(0xcf54)
+#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
+#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
+
+#define HALF_SLICE_CHICKEN5 XE_REG_MCR(0xe188, XE_REG_OPTION_MASKED)
+#define DISABLE_SAMPLE_G_PERFORMANCE REG_BIT(0)
+
+#define SAMPLER_MODE XE_REG_MCR(0xe18c, XE_REG_OPTION_MASKED)
+#define ENABLE_SMALLPL REG_BIT(15)
+#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
+#define SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
+#define INDIRECT_STATE_BASE_ADDR_OVERRIDE REG_BIT(0)
+
+#define HALF_SLICE_CHICKEN7 XE_REG_MCR(0xe194, XE_REG_OPTION_MASKED)
+#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15)
+
+#define CACHE_MODE_SS XE_REG_MCR(0xe420, XE_REG_OPTION_MASKED)
+#define DISABLE_ECC REG_BIT(5)
+#define ENABLE_PREFETCH_INTO_IC REG_BIT(3)
+
+#define ROW_CHICKEN4 XE_REG_MCR(0xe48c, XE_REG_OPTION_MASKED)
+#define DISABLE_GRF_CLEAR REG_BIT(13)
+#define XEHP_DIS_BBL_SYSPIPE REG_BIT(11)
+#define DISABLE_TDL_PUSH REG_BIT(9)
+#define DIS_PICK_2ND_EU REG_BIT(7)
+#define DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+#define THREAD_EX_ARB_MODE REG_GENMASK(3, 2)
+#define THREAD_EX_ARB_MODE_RR_AFTER_DEP REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
+
+#define ROW_CHICKEN3 XE_REG_MCR(0xe49c, XE_REG_OPTION_MASKED)
+#define DIS_FIX_EOT1_FLUSH REG_BIT(9)
+
+#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
+#define UGM_BACKUP_MODE REG_BIT(13)
+#define MDQ_ARBITRATION_MODE REG_BIT(12)
+#define EARLY_EOT_DIS REG_BIT(1)
+
+#define ROW_CHICKEN2 XE_REG_MCR(0xe4f4, XE_REG_OPTION_MASKED)
+#define DISABLE_READ_SUPPRESSION REG_BIT(15)
+#define DISABLE_EARLY_READ REG_BIT(14)
+#define ENABLE_LARGE_GRF_MODE REG_BIT(12)
+#define PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+#define DISABLE_DOP_GATING REG_BIT(0)
+
+#define RT_CTRL XE_REG_MCR(0xe530)
+#define DIS_NULL_QUERY REG_BIT(10)
+
+#define XEHP_HDC_CHICKEN0 XE_REG_MCR(0xe5f0, XE_REG_OPTION_MASKED)
+#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
+#define DIS_ATOMIC_CHAINING_TYPED_WRITES REG_BIT(3)
+
+#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
+#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
+#define TGM_WRITE_EOM_FORCE REG_BIT(17)
+#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
+#define SEQUENTIAL_ACCESS_UPGRADE_DISABLE REG_BIT(13)
+
+#define LSC_CHICKEN_BIT_0_UDW XE_REG_MCR(0xe7c8 + 4)
+#define UGM_FRAGMENT_THRESHOLD_TO_3 REG_BIT(58 - 32)
+#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32)
+#define XE2_ALLOC_DPA_STARVE_FIX_DIS REG_BIT(47 - 32)
+#define ENABLE_SMP_LD_RENDER_SURFACE_CONTROL REG_BIT(44 - 32)
+#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32)
+#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32)
+#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32)
+#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32)
+
+#define SARB_CHICKEN1 XE_REG_MCR(0xe90c)
+#define COMP_CKN_IN REG_GENMASK(30, 29)
+
+#define RCU_MODE XE_REG(0x14800, XE_REG_OPTION_MASKED)
+#define RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
+#define RCU_MODE_CCS_ENABLE REG_BIT(0)
+
+/*
+ * Total of 4 cslices, where each cslice is in the form:
+ * [0-3] CCS ID
+ * [4-6] RSVD
+ * [7] Disabled
+ */
+#define CCS_MODE XE_REG(0x14804)
+#define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */
+#define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */
+#define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1)
+#define CCS_MODE_CSLICE(cslice, ccs) \
+ ((ccs) << ((cslice) * CCS_MODE_CSLICE_WIDTH))
+
+#define FORCEWAKE_ACK_GT XE_REG(0x130044)
+#define FORCEWAKE_KERNEL BIT(0)
+#define FORCEWAKE_USER BIT(1)
+#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
+
+#define MTL_MEDIA_PERF_LIMIT_REASONS XE_REG(0x138030)
+#define MTL_MEDIA_MC6 XE_REG(0x138048)
+
+#define GT_CORE_STATUS XE_REG(0x138060)
+#define RCN_MASK REG_GENMASK(2, 0)
+#define GT_C0 0
+#define GT_C6 3
+
+#define GT_GFX_RC6_LOCKED XE_REG(0x138104)
+#define GT_GFX_RC6 XE_REG(0x138108)
+
+#define GT0_PERF_LIMIT_REASONS XE_REG(0x1381a8)
+#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
+#define PROCHOT_MASK REG_BIT(0)
+#define THERMAL_LIMIT_MASK REG_BIT(1)
+#define RATL_MASK REG_BIT(5)
+#define VR_THERMALERT_MASK REG_BIT(6)
+#define VR_TDC_MASK REG_BIT(7)
+#define POWER_LIMIT_4_MASK REG_BIT(8)
+#define POWER_LIMIT_1_MASK REG_BIT(10)
+#define POWER_LIMIT_2_MASK REG_BIT(11)
+
+#define GT_PERF_STATUS XE_REG(0x1381b4)
+#define VOLTAGE_MASK REG_GENMASK(10, 0)
+
+#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4))
+
+#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030)
+#define VCS_VECS_INTR_ENABLE XE_REG(0x190034)
+#define GUC_SG_INTR_ENABLE XE_REG(0x190038)
+#define ENGINE1_MASK REG_GENMASK(31, 16)
+#define ENGINE0_MASK REG_GENMASK(15, 0)
+#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c)
+#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044)
+#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048)
+
+#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4))
+#define INTR_DATA_VALID REG_BIT(31)
+#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
+#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
+#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GSC_INSTANCE 6
+
+#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4))
+#define RCS0_RSVD_INTR_MASK XE_REG(0x190090)
+#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0)
+#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8)
+#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac)
+#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0)
+#define GUC_SG_INTR_MASK XE_REG(0x1900e8)
+#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec)
+#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4)
+#define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
+#define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
+#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110)
+#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114)
+#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118)
+#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c)
+#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
+#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
+#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
+#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
+#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+
+#define PVC_GT0_PACKAGE_ENERGY_STATUS XE_REG(0x281004)
+#define PVC_GT0_PACKAGE_RAPL_LIMIT XE_REG(0x281008)
+#define PVC_GT0_PACKAGE_POWER_SKU_UNIT XE_REG(0x281068)
+#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
+#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_guc_regs.h b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
new file mode 100644
index 000000000..92320bbc9
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_REGS_H_
+#define _XE_GUC_REGS_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "regs/xe_reg_defs.h"
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define DIST_DBS_POPULATED XE_REG(0xd08)
+#define DOORBELLS_PER_SQIDI_MASK REG_GENMASK(23, 16)
+#define SQIDIS_DOORBELL_EXIST_MASK REG_GENMASK(15, 0)
+
+#define DRBREGL(x) XE_REG(0x1000 + (x) * 8)
+#define DRB_VALID REG_BIT(0)
+#define DRBREGU(x) XE_REG(0x1000 + (x) * 8 + 4)
+
+#define GTCR XE_REG(0x4274)
+#define GTCR_INVALIDATE REG_BIT(0)
+
+#define GUC_ARAT_C6DIS XE_REG(0xa178)
+
+#define GUC_STATUS XE_REG(0xc000)
+#define GS_AUTH_STATUS_MASK REG_GENMASK(31, 30)
+#define GS_AUTH_STATUS_BAD REG_FIELD_PREP(GS_AUTH_STATUS_MASK, 0x1)
+#define GS_AUTH_STATUS_GOOD REG_FIELD_PREP(GS_AUTH_STATUS_MASK, 0x2)
+#define GS_MIA_MASK REG_GENMASK(18, 16)
+#define GS_MIA_CORE_STATE REG_FIELD_PREP(GS_MIA_MASK, 0x1)
+#define GS_MIA_HALT_REQUESTED REG_FIELD_PREP(GS_MIA_MASK, 0x2)
+#define GS_MIA_ISR_ENTRY REG_FIELD_PREP(GS_MIA_MASK, 0x4)
+#define GS_UKERNEL_MASK REG_GENMASK(15, 8)
+#define GS_BOOTROM_MASK REG_GENMASK(7, 1)
+#define GS_BOOTROM_RSA_FAILED REG_FIELD_PREP(GS_BOOTROM_MASK, 0x50)
+#define GS_BOOTROM_JUMP_PASSED REG_FIELD_PREP(GS_BOOTROM_MASK, 0x76)
+#define GS_MIA_IN_RESET REG_BIT(0)
+
+#define GUC_WOPCM_SIZE XE_REG(0xc050)
+#define GUC_WOPCM_SIZE_MASK REG_GENMASK(31, 12)
+#define GUC_WOPCM_SIZE_LOCKED REG_BIT(0)
+
+#define GUC_SHIM_CONTROL XE_REG(0xc064)
+#define GUC_MOCS_INDEX_MASK REG_GENMASK(27, 24)
+#define GUC_SHIM_WC_ENABLE REG_BIT(21)
+#define GUC_ENABLE_MIA_CLOCK_GATING REG_BIT(15)
+#define GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA REG_BIT(10)
+#define GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA REG_BIT(9)
+#define GUC_MSGCH_ENABLE REG_BIT(4)
+#define GUC_ENABLE_MIA_CACHING REG_BIT(2)
+#define GUC_ENABLE_READ_CACHE_LOGIC REG_BIT(1)
+#define GUC_DISABLE_SRAM_INIT_TO_ZEROES REG_BIT(0)
+
+#define SOFT_SCRATCH(n) XE_REG(0xc180 + (n) * 4)
+#define SOFT_SCRATCH_COUNT 16
+
+#define HUC_KERNEL_LOAD_INFO XE_REG(0xc1dc)
+#define HUC_LOAD_SUCCESSFUL REG_BIT(0)
+
+#define UOS_RSA_SCRATCH(i) XE_REG(0xc200 + (i) * 4)
+#define UOS_RSA_SCRATCH_COUNT 64
+
+#define DMA_ADDR_0_LOW XE_REG(0xc300)
+#define DMA_ADDR_0_HIGH XE_REG(0xc304)
+#define DMA_ADDR_1_LOW XE_REG(0xc308)
+#define DMA_ADDR_1_HIGH XE_REG(0xc30c)
+#define DMA_ADDR_SPACE_MASK REG_GENMASK(20, 16)
+#define DMA_ADDRESS_SPACE_WOPCM REG_FIELD_PREP(DMA_ADDR_SPACE_MASK, 7)
+#define DMA_ADDRESS_SPACE_GGTT REG_FIELD_PREP(DMA_ADDR_SPACE_MASK, 8)
+#define DMA_COPY_SIZE XE_REG(0xc310)
+#define DMA_CTRL XE_REG(0xc314)
+#define HUC_UKERNEL REG_BIT(9)
+#define UOS_MOVE REG_BIT(4)
+#define START_DMA REG_BIT(0)
+#define DMA_GUC_WOPCM_OFFSET XE_REG(0xc340)
+#define GUC_WOPCM_OFFSET_SHIFT 14
+#define GUC_WOPCM_OFFSET_MASK REG_GENMASK(31, GUC_WOPCM_OFFSET_SHIFT)
+#define HUC_LOADING_AGENT_GUC REG_BIT(1)
+#define GUC_WOPCM_OFFSET_VALID REG_BIT(0)
+#define GUC_MAX_IDLE_COUNT XE_REG(0xc3e4)
+
+#define GUC_SEND_INTERRUPT XE_REG(0xc4c8)
+#define GUC_SEND_TRIGGER REG_BIT(0)
+
+#define GUC_BCS_RCS_IER XE_REG(0xc550)
+#define GUC_VCS2_VCS1_IER XE_REG(0xc554)
+#define GUC_WD_VECS_IER XE_REG(0xc558)
+#define GUC_PM_P24C_IER XE_REG(0xc55c)
+
+#define GUC_TLB_INV_CR XE_REG(0xcee8)
+#define GUC_TLB_INV_CR_INVALIDATE REG_BIT(0)
+
+#define HUC_STATUS2 XE_REG(0xd3b0)
+#define HUC_FW_VERIFIED REG_BIT(7)
+
+#define GT_PM_CONFIG XE_REG(0x13816c)
+#define GT_DOORBELL_ENABLE REG_BIT(0)
+
+#define GUC_HOST_INTERRUPT XE_REG(0x1901f0)
+
+#define VF_SW_FLAG(n) XE_REG(0x190240 + (n) * 4)
+#define VF_SW_FLAG_COUNT 4
+
+#define MED_GUC_HOST_INTERRUPT XE_REG(0x190304)
+
+#define MED_VF_SW_FLAG(n) XE_REG(0x190310 + (n) * 4)
+#define MED_VF_SW_FLAG_COUNT 4
+
+/* GuC Interrupt Vector */
+#define GUC_INTR_GUC2HOST REG_BIT(15)
+#define GUC_INTR_EXEC_ERROR REG_BIT(14)
+#define GUC_INTR_DISPLAY_EVENT REG_BIT(13)
+#define GUC_INTR_SEM_SIG REG_BIT(12)
+#define GUC_INTR_IOMMU2GUC REG_BIT(11)
+#define GUC_INTR_DOORBELL_RANG REG_BIT(10)
+#define GUC_INTR_DMA_DONE REG_BIT(9)
+#define GUC_INTR_FATAL_ERROR REG_BIT(8)
+#define GUC_INTR_NOTIF_ERROR REG_BIT(7)
+#define GUC_INTR_SW_INT_6 REG_BIT(6)
+#define GUC_INTR_SW_INT_5 REG_BIT(5)
+#define GUC_INTR_SW_INT_4 REG_BIT(4)
+#define GUC_INTR_SW_INT_3 REG_BIT(3)
+#define GUC_INTR_SW_INT_2 REG_BIT(2)
+#define GUC_INTR_SW_INT_1 REG_BIT(1)
+#define GUC_INTR_SW_INT_0 REG_BIT(0)
+
+#define GUC_NUM_DOORBELLS 256
+
+/* format of the HW-monitored doorbell cacheline */
+struct guc_doorbell_info {
+ u32 db_status;
+#define GUC_DOORBELL_DISABLED 0
+#define GUC_DOORBELL_ENABLED 1
+
+ u32 cookie;
+ u32 reserved[14];
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
new file mode 100644
index 000000000..4be81abc8
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_LRC_LAYOUT_H_
+#define _XE_LRC_LAYOUT_H_
+
+#define CTX_CONTEXT_CONTROL (0x02 + 1)
+#define CTX_RING_HEAD (0x04 + 1)
+#define CTX_RING_TAIL (0x06 + 1)
+#define CTX_RING_START (0x08 + 1)
+#define CTX_RING_CTL (0x0a + 1)
+#define CTX_PDP0_UDW (0x30 + 1)
+#define CTX_PDP0_LDW (0x32 + 1)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
new file mode 100644
index 000000000..519dd1067
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MCHBAR_REGS_H_
+#define _XE_MCHBAR_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way.
+ */
+
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
+#define PCU_CR_PACKAGE_POWER_SKU XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5930)
+#define PKG_TDP GENMASK_ULL(14, 0)
+#define PKG_MIN_PWR GENMASK_ULL(30, 16)
+#define PKG_MAX_PWR GENMASK_ULL(46, 32)
+#define PKG_MAX_WIN GENMASK_ULL(54, 48)
+#define PKG_MAX_WIN_X GENMASK_ULL(54, 53)
+#define PKG_MAX_WIN_Y GENMASK_ULL(52, 48)
+
+
+#define PCU_CR_PACKAGE_POWER_SKU_UNIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5938)
+#define PKG_PWR_UNIT REG_GENMASK(3, 0)
+#define PKG_ENERGY_UNIT REG_GENMASK(12, 8)
+#define PKG_TIME_UNIT REG_GENMASK(19, 16)
+
+#define PCU_CR_PACKAGE_ENERGY_STATUS XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
+
+#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
+#define PKG_PWR_LIM_1 REG_GENMASK(14, 0)
+#define PKG_PWR_LIM_1_EN REG_BIT(15)
+#define PKG_PWR_LIM_1_TIME REG_GENMASK(23, 17)
+#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23, 22)
+#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21, 17)
+
+#endif /* _XE_MCHBAR_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
new file mode 100644
index 000000000..c50e7650c
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_REG_DEFS_H_
+#define _XE_REG_DEFS_H_
+
+#include "compat-i915-headers/i915_reg_defs.h"
+
+/**
+ * struct xe_reg - Register definition
+ *
+ * Register defintion to be used by the individual register. Although the same
+ * definition is used for xe_reg and xe_reg_mcr, they use different internal
+ * APIs for accesses.
+ */
+struct xe_reg {
+ union {
+ struct {
+ /** @addr: address */
+ u32 addr:28;
+ /**
+ * @masked: register is "masked", with upper 16bits used
+ * to identify the bits that are updated on the lower
+ * bits
+ */
+ u32 masked:1;
+ /**
+ * @mcr: register is multicast/replicated in the
+ * hardware and needs special handling. Any register
+ * with this set should also use a type of xe_reg_mcr_t.
+ * It's only here so the few places that deal with MCR
+ * registers specially (xe_sr.c) and tests using the raw
+ * value can inspect it.
+ */
+ u32 mcr:1;
+ /**
+ * @ext: access MMIO extension space for current register.
+ */
+ u32 ext:1;
+ };
+ /** @raw: Raw value with both address and options */
+ u32 raw;
+ };
+};
+
+/**
+ * struct xe_reg_mcr - MCR register definition
+ *
+ * MCR register is the same as a regular register, but uses another type since
+ * the internal API used for accessing them is different: it's never correct to
+ * use regular MMIO access.
+ */
+struct xe_reg_mcr {
+ /** @__reg: The register */
+ struct xe_reg __reg;
+};
+
+
+/**
+ * XE_REG_OPTION_MASKED - Register is "masked", with upper 16 bits marking the
+ * written bits on the lower 16 bits.
+ *
+ * It only applies to registers explicitly marked in bspec with
+ * "Access: Masked". Registers with this option can have write operations to
+ * specific lower bits by setting the corresponding upper bits. Other bits will
+ * not be affected. This allows register writes without needing a RMW cycle and
+ * without caching in software the register value.
+ *
+ * Example: a write with value 0x00010001 will set bit 0 and all other bits
+ * retain their previous values.
+ *
+ * To be used with XE_REG(). XE_REG_MCR() and XE_REG_INITIALIZER()
+ */
+#define XE_REG_OPTION_MASKED .masked = 1
+
+/**
+ * XE_REG_INITIALIZER - Initializer for xe_reg_t.
+ * @r_: Register offset
+ * @...: Additional options like access mode. See struct xe_reg for available
+ * options.
+ *
+ * Register field is mandatory, and additional options may be passed as
+ * arguments. Usually ``XE_REG()`` should be preferred since it creates an
+ * object of the right type. However when initializing static const storage,
+ * where a compound statement is not allowed, this can be used instead.
+ */
+#define XE_REG_INITIALIZER(r_, ...) { .addr = r_, __VA_ARGS__ }
+
+
+/**
+ * XE_REG - Create a struct xe_reg from offset and additional flags
+ * @r_: Register offset
+ * @...: Additional options like access mode. See struct xe_reg for available
+ * options.
+ */
+#define XE_REG(r_, ...) ((const struct xe_reg)XE_REG_INITIALIZER(r_, ##__VA_ARGS__))
+
+/**
+ * XE_REG_EXT - Create a struct xe_reg from extension offset and additional
+ * flags
+ * @r_: Register extension offset
+ * @...: Additional options like access mode. See struct xe_reg for available
+ * options.
+ */
+#define XE_REG_EXT(r_, ...) \
+ ((const struct xe_reg)XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .ext = 1))
+
+/**
+ * XE_REG_MCR - Create a struct xe_reg_mcr from offset and additional flags
+ * @r_: Register offset
+ * @...: Additional options like access mode. See struct xe_reg for available
+ * options.
+ */
+#define XE_REG_MCR(r_, ...) ((const struct xe_reg_mcr){ \
+ .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \
+ })
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
new file mode 100644
index 000000000..2c214bb9b
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef _XE_REGS_H_
+#define _XE_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+#define TIMESTAMP_OVERRIDE XE_REG(0x44074)
+#define TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK REG_GENMASK(15, 12)
+#define TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK REG_GENMASK(9, 0)
+
+#define PCU_IRQ_OFFSET 0x444e0
+#define GU_MISC_IRQ_OFFSET 0x444f0
+#define GU_MISC_GSE REG_BIT(27)
+
+#define SOFTWARE_FLAGS_SPR33 XE_REG(0x4f084)
+
+#define GU_CNTL_PROTECTED XE_REG(0x10100C)
+#define DRIVERINT_FLR_DIS REG_BIT(31)
+
+#define GU_CNTL XE_REG(0x101010)
+#define LMEM_INIT REG_BIT(7)
+#define DRIVERFLR REG_BIT(31)
+
+#define GU_DEBUG XE_REG(0x101018)
+#define DRIVERFLR_STATUS REG_BIT(31)
+
+#define XEHP_CLOCK_GATE_DIS XE_REG(0x101014)
+#define SGSI_SIDECLK_DIS REG_BIT(17)
+
+#define GGC XE_REG(0x108040)
+#define GMS_MASK REG_GENMASK(15, 8)
+#define GGMS_MASK REG_GENMASK(7, 6)
+
+#define DSMBASE XE_REG(0x1080C0)
+#define BDSM_MASK REG_GENMASK64(63, 20)
+
+#define GSMBASE XE_REG(0x108100)
+
+#define STOLEN_RESERVED XE_REG(0x1082c0)
+#define WOPCM_SIZE_MASK REG_GENMASK64(9, 7)
+
+#define MTL_RP_STATE_CAP XE_REG(0x138000)
+
+#define MTL_GT_RPE_FREQUENCY XE_REG(0x13800c)
+
+#define MTL_MEDIAP_STATE_CAP XE_REG(0x138020)
+#define MTL_RPN_CAP_MASK REG_GENMASK(24, 16)
+#define MTL_RP0_CAP_MASK REG_GENMASK(8, 0)
+
+#define MTL_MPE_FREQUENCY XE_REG(0x13802c)
+#define MTL_RPE_MASK REG_GENMASK(8, 0)
+
+#define DG1_MSTR_TILE_INTR XE_REG(0x190008)
+#define DG1_MSTR_IRQ REG_BIT(31)
+#define DG1_MSTR_TILE(t) REG_BIT(t)
+
+#define GFX_MSTR_IRQ XE_REG(0x190010)
+#define MASTER_IRQ REG_BIT(31)
+#define GU_MISC_IRQ REG_BIT(29)
+#define DISPLAY_IRQ REG_BIT(16)
+#define GT_DW_IRQ(x) REG_BIT(x)
+
+#define PVC_RP_STATE_CAP XE_REG(0x281014)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_sriov_regs.h b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
new file mode 100644
index 000000000..58a4e0fad
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_sriov_regs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _REGS_XE_SRIOV_REGS_H_
+#define _REGS_XE_SRIOV_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+#define XE2_LMEM_CFG XE_REG(0x48b0)
+
+#define LMEM_CFG XE_REG(0xcf58)
+#define LMEM_EN REG_BIT(31)
+#define LMTT_DIR_PTR REG_GENMASK(30, 0) /* in multiples of 64KB */
+
+#endif
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
new file mode 100644
index 000000000..39d8a0892
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \
+ xe_bo_test.o \
+ xe_dma_buf_test.o \
+ xe_migrate_test.o \
+ xe_mocs_test.o \
+ xe_pci_test.o \
+ xe_rtp_test.o \
+ xe_wa_test.o
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
new file mode 100644
index 000000000..3436fd9cf
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+#include <kunit/visibility.h>
+
+#include "tests/xe_bo_test.h"
+#include "tests/xe_pci_test.h"
+#include "tests/xe_test.h"
+
+#include "xe_bo_evict.h"
+#include "xe_pci.h"
+#include "xe_pm.h"
+
+static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
+ bool clear, u64 get_val, u64 assign_val,
+ struct kunit *test)
+{
+ struct dma_fence *fence;
+ struct ttm_tt *ttm;
+ struct page *page;
+ pgoff_t ccs_page;
+ long timeout;
+ u64 *cpu_map;
+ int ret;
+ u32 offset;
+
+ /* Move bo to VRAM if not already there. */
+ ret = xe_bo_validate(bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate bo.\n");
+ return ret;
+ }
+
+ /* Optionally clear bo *and* CCS data in VRAM. */
+ if (clear) {
+ fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource);
+ if (IS_ERR(fence)) {
+ KUNIT_FAIL(test, "Failed to submit bo clear.\n");
+ return PTR_ERR(fence);
+ }
+ dma_fence_put(fence);
+ }
+
+ /* Evict to system. CCS data should be copied. */
+ ret = xe_bo_evict(bo, true);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to evict bo.\n");
+ return ret;
+ }
+
+ /* Sync all migration blits */
+ timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ true,
+ 5 * HZ);
+ if (timeout <= 0) {
+ KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
+ return -ETIME;
+ }
+
+ /*
+ * Bo with CCS data is now in system memory. Verify backing store
+ * and data integrity. Then assign for the next testing round while
+ * we still have a CPU map.
+ */
+ ttm = bo->ttm.ttm;
+ if (!ttm || !ttm_tt_is_populated(ttm)) {
+ KUNIT_FAIL(test, "Bo was not in expected placement.\n");
+ return -EINVAL;
+ }
+
+ ccs_page = xe_bo_ccs_pages_start(bo) >> PAGE_SHIFT;
+ if (ccs_page >= ttm->num_pages) {
+ KUNIT_FAIL(test, "No TTM CCS pages present.\n");
+ return -EINVAL;
+ }
+
+ page = ttm->pages[ccs_page];
+ cpu_map = kmap_local_page(page);
+
+ /* Check first CCS value */
+ if (cpu_map[0] != get_val) {
+ KUNIT_FAIL(test,
+ "Expected CCS readout 0x%016llx, got 0x%016llx.\n",
+ (unsigned long long)get_val,
+ (unsigned long long)cpu_map[0]);
+ ret = -EINVAL;
+ }
+
+ /* Check last CCS value, or at least last value in page. */
+ offset = xe_device_ccs_bytes(tile_to_xe(tile), bo->size);
+ offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
+ if (cpu_map[offset] != get_val) {
+ KUNIT_FAIL(test,
+ "Expected CCS readout 0x%016llx, got 0x%016llx.\n",
+ (unsigned long long)get_val,
+ (unsigned long long)cpu_map[offset]);
+ ret = -EINVAL;
+ }
+
+ cpu_map[0] = assign_val;
+ cpu_map[offset] = assign_val;
+ kunmap_local(cpu_map);
+
+ return ret;
+}
+
+static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
+ struct kunit *test)
+{
+ struct xe_bo *bo;
+
+ int ret;
+
+ /* TODO: Sanity check */
+ unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+
+ if (IS_DGFX(xe))
+ kunit_info(test, "Testing vram id %u\n", tile->id);
+ else
+ kunit_info(test, "Testing system memory\n");
+
+ bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
+ ttm_bo_type_device, bo_flags);
+ if (IS_ERR(bo)) {
+ KUNIT_FAIL(test, "Failed to create bo.\n");
+ return;
+ }
+
+ xe_bo_lock(bo, false);
+
+ kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
+ ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
+ test);
+ if (ret)
+ goto out_unlock;
+
+ kunit_info(test, "Verifying that CCS data survives migration.\n");
+ ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL,
+ 0xdeadbeefdeadbeefULL, test);
+ if (ret)
+ goto out_unlock;
+
+ kunit_info(test, "Verifying that CCS data can be properly cleared.\n");
+ ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test);
+
+out_unlock:
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+}
+
+static int ccs_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = xe_cur_kunit();
+ struct xe_tile *tile;
+ int id;
+
+ if (!xe_device_has_flat_ccs(xe)) {
+ kunit_info(test, "Skipping non-flat-ccs device.\n");
+ return 0;
+ }
+
+ xe_device_mem_access_get(xe);
+
+ for_each_tile(tile, xe, id) {
+ /* For igfx run only for primary tile */
+ if (!IS_DGFX(xe) && id > 0)
+ continue;
+ ccs_test_run_tile(xe, tile, test);
+ }
+
+ xe_device_mem_access_put(xe);
+
+ return 0;
+}
+
+void xe_ccs_migrate_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(ccs_test_run_device);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
+
+static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
+{
+ struct xe_bo *bo, *external;
+ unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
+ struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
+ struct xe_gt *__gt;
+ int err, i, id;
+
+ kunit_info(test, "Testing device %s vram id %u\n",
+ dev_name(xe->drm.dev), tile->id);
+
+ for (i = 0; i < 2; ++i) {
+ xe_vm_lock(vm, false);
+ bo = xe_bo_create_user(xe, NULL, vm, 0x10000,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ ttm_bo_type_device,
+ bo_flags);
+ xe_vm_unlock(vm);
+ if (IS_ERR(bo)) {
+ KUNIT_FAIL(test, "bo create err=%pe\n", bo);
+ break;
+ }
+
+ external = xe_bo_create_user(xe, NULL, NULL, 0x10000,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ ttm_bo_type_device, bo_flags);
+ if (IS_ERR(external)) {
+ KUNIT_FAIL(test, "external bo create err=%pe\n", external);
+ goto cleanup_bo;
+ }
+
+ xe_bo_lock(external, false);
+ err = xe_bo_pin_external(external);
+ xe_bo_unlock(external);
+ if (err) {
+ KUNIT_FAIL(test, "external bo pin err=%pe\n",
+ ERR_PTR(err));
+ goto cleanup_external;
+ }
+
+ err = xe_bo_evict_all(xe);
+ if (err) {
+ KUNIT_FAIL(test, "evict err=%pe\n", ERR_PTR(err));
+ goto cleanup_all;
+ }
+
+ for_each_gt(__gt, xe, id)
+ xe_gt_sanitize(__gt);
+ err = xe_bo_restore_kernel(xe);
+ /*
+ * Snapshotting the CTB and copying back a potentially old
+ * version seems risky, depending on what might have been
+ * inflight. Also it seems snapshotting the ADS object and
+ * copying back results in serious breakage. Normally when
+ * calling xe_bo_restore_kernel() we always fully restart the
+ * GT, which re-intializes such things. We could potentially
+ * skip saving and restoring such objects in xe_bo_evict_all()
+ * however seems quite fragile not to also restart the GT. Try
+ * to do that here by triggering a GT reset.
+ */
+ for_each_gt(__gt, xe, id) {
+ xe_gt_reset_async(__gt);
+ flush_work(&__gt->reset.worker);
+ }
+ if (err) {
+ KUNIT_FAIL(test, "restore kernel err=%pe\n",
+ ERR_PTR(err));
+ goto cleanup_all;
+ }
+
+ err = xe_bo_restore_user(xe);
+ if (err) {
+ KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
+ goto cleanup_all;
+ }
+
+ if (!xe_bo_is_vram(external)) {
+ KUNIT_FAIL(test, "external bo is not vram\n");
+ err = -EPROTO;
+ goto cleanup_all;
+ }
+
+ if (xe_bo_is_vram(bo)) {
+ KUNIT_FAIL(test, "bo is vram\n");
+ err = -EPROTO;
+ goto cleanup_all;
+ }
+
+ if (i) {
+ down_read(&vm->lock);
+ xe_vm_lock(vm, false);
+ err = xe_bo_validate(bo, bo->vm, false);
+ xe_vm_unlock(vm);
+ up_read(&vm->lock);
+ if (err) {
+ KUNIT_FAIL(test, "bo valid err=%pe\n",
+ ERR_PTR(err));
+ goto cleanup_all;
+ }
+ xe_bo_lock(external, false);
+ err = xe_bo_validate(external, NULL, false);
+ xe_bo_unlock(external);
+ if (err) {
+ KUNIT_FAIL(test, "external bo valid err=%pe\n",
+ ERR_PTR(err));
+ goto cleanup_all;
+ }
+ }
+
+ xe_bo_lock(external, false);
+ xe_bo_unpin_external(external);
+ xe_bo_unlock(external);
+
+ xe_bo_put(external);
+
+ xe_bo_lock(bo, false);
+ __xe_bo_unset_bulk_move(bo);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ continue;
+
+cleanup_all:
+ xe_bo_lock(external, false);
+ xe_bo_unpin_external(external);
+ xe_bo_unlock(external);
+cleanup_external:
+ xe_bo_put(external);
+cleanup_bo:
+ xe_bo_lock(bo, false);
+ __xe_bo_unset_bulk_move(bo);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ break;
+ }
+
+ xe_vm_put(vm);
+
+ return 0;
+}
+
+static int evict_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = xe_cur_kunit();
+ struct xe_tile *tile;
+ int id;
+
+ if (!IS_DGFX(xe)) {
+ kunit_info(test, "Skipping non-discrete device %s.\n",
+ dev_name(xe->drm.dev));
+ return 0;
+ }
+
+ xe_device_mem_access_get(xe);
+
+ for_each_tile(tile, xe, id)
+ evict_test_run_tile(xe, tile, test);
+
+ xe_device_mem_access_put(xe);
+
+ return 0;
+}
+
+void xe_bo_evict_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(evict_test_run_device);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_evict_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c
new file mode 100644
index 000000000..f408f17f2
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_bo_test.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_bo_test.h"
+
+#include <kunit/test.h>
+
+static struct kunit_case xe_bo_tests[] = {
+ KUNIT_CASE(xe_ccs_migrate_kunit),
+ KUNIT_CASE(xe_bo_evict_kunit),
+ {}
+};
+
+static struct kunit_suite xe_bo_test_suite = {
+ .name = "xe_bo",
+ .test_cases = xe_bo_tests,
+};
+
+kunit_test_suite(xe_bo_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_bo kunit test");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.h b/drivers/gpu/drm/xe/tests/xe_bo_test.h
new file mode 100644
index 000000000..0113ab450
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_bo_test.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_BO_TEST_H_
+#define _XE_BO_TEST_H_
+
+struct kunit;
+
+void xe_ccs_migrate_kunit(struct kunit *test);
+void xe_bo_evict_kunit(struct kunit *test);
+
+#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
new file mode 100644
index 000000000..9f6d571d7
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/xe_drm.h>
+
+#include <kunit/test.h>
+#include <kunit/visibility.h>
+
+#include "tests/xe_dma_buf_test.h"
+#include "tests/xe_pci_test.h"
+
+#include "xe_pci.h"
+
+static bool p2p_enabled(struct dma_buf_test_params *params)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
+ params->attach_ops->allow_peer2peer;
+}
+
+static bool is_dynamic(struct dma_buf_test_params *params)
+{
+ return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
+ params->attach_ops->move_notify;
+}
+
+static void check_residency(struct kunit *test, struct xe_bo *exported,
+ struct xe_bo *imported, struct dma_buf *dmabuf)
+{
+ struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
+ u32 mem_type;
+ int ret;
+
+ xe_bo_assert_held(exported);
+ xe_bo_assert_held(imported);
+
+ mem_type = XE_PL_VRAM0;
+ if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ /* No VRAM allowed */
+ mem_type = XE_PL_TT;
+ else if (params->force_different_devices && !p2p_enabled(params))
+ /* No P2P */
+ mem_type = XE_PL_TT;
+ else if (params->force_different_devices && !is_dynamic(params) &&
+ (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ /* Pin migrated to TT */
+ mem_type = XE_PL_TT;
+
+ if (!xe_bo_is_mem_type(exported, mem_type)) {
+ KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
+ return;
+ }
+
+ if (xe_bo_is_pinned(exported))
+ return;
+
+ /*
+ * Evict exporter. Note that the gem object dma_buf member isn't
+ * set from xe_gem_prime_export(), and it's needed for the move_notify()
+ * functionality, so hack that up here. Evicting the exported bo will
+ * evict also the imported bo through the move_notify() functionality if
+ * importer is on a different device. If they're on the same device,
+ * the exporter and the importer should be the same bo.
+ */
+ swap(exported->ttm.base.dma_buf, dmabuf);
+ ret = xe_bo_evict(exported, true);
+ swap(exported->ttm.base.dma_buf, dmabuf);
+ if (ret) {
+ if (ret != -EINTR && ret != -ERESTARTSYS)
+ KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
+ ret);
+ return;
+ }
+
+ /* Verify that also importer has been evicted to SYSTEM */
+ if (exported != imported && !xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
+ KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
+ return;
+ }
+
+ /* Re-validate the importer. This should move also exporter in. */
+ ret = xe_bo_validate(imported, NULL, false);
+ if (ret) {
+ if (ret != -EINTR && ret != -ERESTARTSYS)
+ KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
+ ret);
+ return;
+ }
+
+ /*
+ * If on different devices, the exporter is kept in system if
+ * possible, saving a migration step as the transfer is just
+ * likely as fast from system memory.
+ */
+ if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
+ else
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+
+ if (params->force_different_devices)
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
+ else
+ KUNIT_EXPECT_TRUE(test, exported == imported);
+}
+
+static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
+{
+ struct kunit *test = xe_cur_kunit();
+ struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
+ struct drm_gem_object *import;
+ struct dma_buf *dmabuf;
+ struct xe_bo *bo;
+ size_t size;
+
+ /* No VRAM on this device? */
+ if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
+ (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ return;
+
+ size = PAGE_SIZE;
+ if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
+ xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ size = SZ_64K;
+
+ kunit_info(test, "running %s\n", __func__);
+ bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
+ ttm_bo_type_device, XE_BO_CREATE_USER_BIT | params->mem_mask);
+ if (IS_ERR(bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(bo));
+ return;
+ }
+
+ dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
+ if (IS_ERR(dmabuf)) {
+ KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
+ PTR_ERR(dmabuf));
+ goto out;
+ }
+
+ import = xe_gem_prime_import(&xe->drm, dmabuf);
+ if (!IS_ERR(import)) {
+ struct xe_bo *import_bo = gem_to_xe_bo(import);
+
+ /*
+ * Did import succeed when it shouldn't due to lack of p2p support?
+ */
+ if (params->force_different_devices &&
+ !p2p_enabled(params) &&
+ !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ KUNIT_FAIL(test,
+ "xe_gem_prime_import() succeeded when it shouldn't have\n");
+ } else {
+ int err;
+
+ /* Is everything where we expect it to be? */
+ xe_bo_lock(import_bo, false);
+ err = xe_bo_validate(import_bo, NULL, false);
+
+ /* Pinning in VRAM is not allowed. */
+ if (!is_dynamic(params) &&
+ params->force_different_devices &&
+ !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+ /* Otherwise only expect interrupts or success. */
+ else if (err && err != -EINTR && err != -ERESTARTSYS)
+ KUNIT_EXPECT_TRUE(test, !err || err == -EINTR ||
+ err == -ERESTARTSYS);
+
+ if (!err)
+ check_residency(test, bo, import_bo, dmabuf);
+ xe_bo_unlock(import_bo);
+ }
+ drm_gem_object_put(import);
+ } else if (PTR_ERR(import) != -EOPNOTSUPP) {
+ /* Unexpected error code. */
+ KUNIT_FAIL(test,
+ "xe_gem_prime_import failed with the wrong err=%ld\n",
+ PTR_ERR(import));
+ } else if (!params->force_different_devices ||
+ p2p_enabled(params) ||
+ (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ /* Shouldn't fail if we can reuse same bo, use p2p or use system */
+ KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
+ PTR_ERR(import));
+ }
+ dma_buf_put(dmabuf);
+out:
+ drm_gem_object_put(&bo->ttm.base);
+}
+
+static const struct dma_buf_attach_ops nop2p_attach_ops = {
+ .allow_peer2peer = false,
+ .move_notify = xe_dma_buf_move_notify
+};
+
+/*
+ * We test the implementation with bos of different residency and with
+ * importers with different capabilities; some lacking p2p support and some
+ * lacking dynamic capabilities (attach_ops == NULL). We also fake
+ * different devices avoiding the import shortcut that just reuses the same
+ * gem object.
+ */
+static const struct dma_buf_test_params test_params[] = {
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops},
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &nop2p_attach_ops},
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &nop2p_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT},
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .attach_ops = &nop2p_attach_ops},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .attach_ops = &nop2p_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &nop2p_attach_ops},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &nop2p_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .force_different_devices = true},
+
+ {}
+};
+
+static int dma_buf_run_device(struct xe_device *xe)
+{
+ const struct dma_buf_test_params *params;
+ struct kunit *test = xe_cur_kunit();
+
+ for (params = test_params; params->mem_mask; ++params) {
+ struct dma_buf_test_params p = *params;
+
+ p.base.id = XE_TEST_LIVE_DMA_BUF;
+ test->priv = &p;
+ xe_test_dmabuf_import_same_driver(xe);
+ }
+
+ /* A non-zero return would halt iteration over driver devices */
+ return 0;
+}
+
+void xe_dma_buf_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(dma_buf_run_device);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
new file mode 100644
index 000000000..9f5a9cda8
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_dma_buf_test.h"
+
+#include <kunit/test.h>
+
+static struct kunit_case xe_dma_buf_tests[] = {
+ KUNIT_CASE(xe_dma_buf_kunit),
+ {}
+};
+
+static struct kunit_suite xe_dma_buf_test_suite = {
+ .name = "xe_dma_buf",
+ .test_cases = xe_dma_buf_tests,
+};
+
+kunit_test_suite(xe_dma_buf_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_dma_buf kunit test");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
new file mode 100644
index 000000000..e6b464ddd
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_DMA_BUF_TEST_H_
+#define _XE_DMA_BUF_TEST_H_
+
+struct kunit;
+
+void xe_dma_buf_kunit(struct kunit *test);
+
+#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_lmtt_test.c b/drivers/gpu/drm/xe/tests/xe_lmtt_test.c
new file mode 100644
index 000000000..1f1557c45
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_lmtt_test.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+static const struct lmtt_ops_param {
+ const char *desc;
+ const struct xe_lmtt_ops *ops;
+} lmtt_ops_params[] = {
+ { "2-level", &lmtt_2l_ops, },
+ { "multi-level", &lmtt_ml_ops, },
+};
+
+static void lmtt_ops_param_get_desc(const struct lmtt_ops_param *p, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", p->desc);
+}
+
+KUNIT_ARRAY_PARAM(lmtt_ops, lmtt_ops_params, lmtt_ops_param_get_desc);
+
+static void test_ops(struct kunit *test)
+{
+ const struct lmtt_ops_param *p = test->param_value;
+ const struct xe_lmtt_ops *ops = p->ops;
+ unsigned int n;
+
+ KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_root_pd_level);
+ KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_num);
+ KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_size);
+ KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_shift);
+ KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_index);
+ KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_encode);
+
+ KUNIT_EXPECT_NE(test, ops->lmtt_root_pd_level(), 0);
+
+ for (n = 0; n <= ops->lmtt_root_pd_level(); n++) {
+ KUNIT_EXPECT_NE_MSG(test, ops->lmtt_pte_num(n), 0,
+ "level=%u", n);
+ KUNIT_EXPECT_NE_MSG(test, ops->lmtt_pte_size(n), 0,
+ "level=%u", n);
+ KUNIT_EXPECT_NE_MSG(test, ops->lmtt_pte_encode(0, n), LMTT_PTE_INVALID,
+ "level=%u", n);
+ }
+
+ for (n = 0; n < ops->lmtt_root_pd_level(); n++) {
+ u64 addr = BIT_ULL(ops->lmtt_pte_shift(n));
+
+ KUNIT_EXPECT_NE_MSG(test, ops->lmtt_pte_shift(n), 0,
+ "level=%u", n);
+ KUNIT_EXPECT_EQ_MSG(test, ops->lmtt_pte_index(addr - 1, n), 0,
+ "addr=%#llx level=%u", addr, n);
+ KUNIT_EXPECT_EQ_MSG(test, ops->lmtt_pte_index(addr + 1, n), 1,
+ "addr=%#llx level=%u", addr, n);
+ KUNIT_EXPECT_EQ_MSG(test, ops->lmtt_pte_index(addr * 2 - 1, n), 1,
+ "addr=%#llx level=%u", addr, n);
+ KUNIT_EXPECT_EQ_MSG(test, ops->lmtt_pte_index(addr * 2, n), 2,
+ "addr=%#llx level=%u", addr, n);
+ }
+}
+
+static struct kunit_case lmtt_test_cases[] = {
+ KUNIT_CASE_PARAM(test_ops, lmtt_ops_gen_params),
+ {}
+};
+
+static struct kunit_suite lmtt_suite = {
+ .name = "lmtt",
+ .test_cases = lmtt_test_cases,
+};
+
+kunit_test_suites(&lmtt_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
new file mode 100644
index 000000000..c347e2c29
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+#include <kunit/visibility.h>
+
+#include "tests/xe_migrate_test.h"
+#include "tests/xe_pci_test.h"
+
+#include "xe_pci.h"
+
+static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
+ const char *str, struct kunit *test)
+{
+ long ret;
+
+ if (IS_ERR(fence)) {
+ KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str,
+ PTR_ERR(fence));
+ return true;
+ }
+ if (!fence)
+ return true;
+
+ ret = dma_fence_wait_timeout(fence, false, 5 * HZ);
+ if (ret <= 0) {
+ KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret);
+ return true;
+ }
+
+ return false;
+}
+
+static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
+ struct xe_bb *bb, u32 second_idx, const char *str,
+ struct kunit *test)
+{
+ u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm);
+ struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
+ batch_base,
+ second_idx);
+ struct dma_fence *fence;
+
+ if (IS_ERR(job)) {
+ KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
+ PTR_ERR(job));
+ return PTR_ERR(job);
+ }
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ if (sanity_fence_failed(xe, fence, str, test))
+ return -ETIMEDOUT;
+
+ dma_fence_put(fence);
+ kunit_info(test, "%s: Job completed\n", str);
+ return 0;
+}
+
+static void
+sanity_populate_cb(struct xe_migrate_pt_update *pt_update,
+ struct xe_tile *tile, struct iosys_map *map, void *dst,
+ u32 qword_ofs, u32 num_qwords,
+ const struct xe_vm_pgtable_update *update)
+{
+ struct migrate_test_params *p =
+ to_migrate_test_params(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));
+ int i;
+ u64 *ptr = dst;
+ u64 value;
+
+ for (i = 0; i < num_qwords; i++) {
+ value = (qword_ofs + i - update->ofs) * 0x1111111111111111ULL;
+ if (map)
+ xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
+ sizeof(u64), u64, value);
+ else
+ ptr[i] = value;
+ }
+
+ kunit_info(xe_cur_kunit(), "Used %s.\n", map ? "CPU" : "GPU");
+ if (p->force_gpu && map)
+ KUNIT_FAIL(xe_cur_kunit(), "GPU pagetable update used CPU.\n");
+}
+
+static const struct xe_migrate_pt_update_ops sanity_ops = {
+ .populate = sanity_populate_cb,
+};
+
+#define check(_retval, _expected, str, _test) \
+ do { if ((_retval) != (_expected)) { \
+ KUNIT_FAIL(_test, "Sanity check failed: " str \
+ " expected %llx, got %llx\n", \
+ (u64)(_expected), (u64)(_retval)); \
+ } } while (0)
+
+static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
+ struct kunit *test, u32 region)
+{
+ struct xe_device *xe = tile_to_xe(m->tile);
+ u64 retval, expected = 0;
+ bool big = bo->size >= SZ_2M;
+ struct dma_fence *fence;
+ const char *str = big ? "Copying big bo" : "Copying small bo";
+ int err;
+
+ struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL,
+ bo->size,
+ ttm_bo_type_kernel,
+ region |
+ XE_BO_NEEDS_CPU_ACCESS);
+ if (IS_ERR(remote)) {
+ KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
+ str, remote);
+ return;
+ }
+
+ err = xe_bo_validate(remote, NULL, false);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
+ str, err);
+ goto out_unlock;
+ }
+
+ err = xe_bo_vmap(remote);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n",
+ str, err);
+ goto out_unlock;
+ }
+
+ xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
+ fence = xe_migrate_clear(m, remote, remote->ttm.resource);
+ if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" :
+ "Clearing remote small bo", test)) {
+ retval = xe_map_rd(xe, &remote->vmap, 0, u64);
+ check(retval, expected, "remote first offset should be cleared",
+ test);
+ retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64);
+ check(retval, expected, "remote last offset should be cleared",
+ test);
+ }
+ dma_fence_put(fence);
+
+ /* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */
+ xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size);
+ xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
+
+ expected = 0xc0c0c0c0c0c0c0c0;
+ fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource,
+ bo->ttm.resource, false);
+ if (!sanity_fence_failed(xe, fence, big ? "Copying big bo remote -> vram" :
+ "Copying small bo remote -> vram", test)) {
+ retval = xe_map_rd(xe, &bo->vmap, 0, u64);
+ check(retval, expected,
+ "remote -> vram bo first offset should be copied", test);
+ retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
+ check(retval, expected,
+ "remote -> vram bo offset should be copied", test);
+ }
+ dma_fence_put(fence);
+
+ /* And other way around.. slightly hacky.. */
+ xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size);
+ xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
+
+ fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource,
+ remote->ttm.resource, false);
+ if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> remote" :
+ "Copying small bo vram -> remote", test)) {
+ retval = xe_map_rd(xe, &remote->vmap, 0, u64);
+ check(retval, expected,
+ "vram -> remote bo first offset should be copied", test);
+ retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64);
+ check(retval, expected,
+ "vram -> remote bo last offset should be copied", test);
+ }
+ dma_fence_put(fence);
+
+ xe_bo_vunmap(remote);
+out_unlock:
+ xe_bo_unlock(remote);
+ xe_bo_put(remote);
+}
+
+static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
+ struct kunit *test)
+{
+ test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT);
+}
+
+static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
+ struct kunit *test)
+{
+ u32 region;
+
+ if (bo->ttm.resource->mem_type == XE_PL_SYSTEM)
+ return;
+
+ if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
+ region = XE_BO_CREATE_VRAM1_BIT;
+ else
+ region = XE_BO_CREATE_VRAM0_BIT;
+ test_copy(m, bo, test, region);
+}
+
+static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
+ struct kunit *test, bool force_gpu)
+{
+ struct xe_device *xe = tile_to_xe(m->tile);
+ struct dma_fence *fence;
+ u64 retval, expected;
+ ktime_t then, now;
+ int i;
+
+ struct xe_vm_pgtable_update update = {
+ .ofs = 1,
+ .qwords = 0x10,
+ .pt_bo = pt,
+ };
+ struct xe_migrate_pt_update pt_update = {
+ .ops = &sanity_ops,
+ };
+ struct migrate_test_params p = {
+ .base.id = XE_TEST_LIVE_MIGRATE,
+ .force_gpu = force_gpu,
+ };
+
+ test->priv = &p;
+ /* Test xe_migrate_update_pgtables() updates the pagetable as expected */
+ expected = 0xf0f0f0f0f0f0f0f0ULL;
+ xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
+
+ then = ktime_get();
+ fence = xe_migrate_update_pgtables(m, m->q->vm, NULL, m->q, &update, 1,
+ NULL, 0, &pt_update);
+ now = ktime_get();
+ if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
+ return;
+
+ kunit_info(test, "Updating without syncing took %llu us,\n",
+ (unsigned long long)ktime_to_us(ktime_sub(now, then)));
+
+ dma_fence_put(fence);
+ retval = xe_map_rd(xe, &pt->vmap, 0, u64);
+ check(retval, expected, "PTE[0] must stay untouched", test);
+
+ for (i = 0; i < update.qwords; i++) {
+ retval = xe_map_rd(xe, &pt->vmap, (update.ofs + i) * 8, u64);
+ check(retval, i * 0x1111111111111111ULL, "PTE update", test);
+ }
+
+ retval = xe_map_rd(xe, &pt->vmap, 8 * (update.ofs + update.qwords),
+ u64);
+ check(retval, expected, "PTE[0x11] must stay untouched", test);
+}
+
+static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
+{
+ struct xe_tile *tile = m->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
+ struct xe_res_cursor src_it;
+ struct dma_fence *fence;
+ u64 retval, expected;
+ struct xe_bb *bb;
+ int err;
+ u8 id = tile->id;
+
+ err = xe_bo_vmap(bo);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n",
+ PTR_ERR(bo));
+ return;
+ }
+
+ big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_PINNED_BIT);
+ if (IS_ERR(big)) {
+ KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
+ goto vunmap;
+ }
+
+ pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_PINNED_BIT);
+ if (IS_ERR(pt)) {
+ KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
+ PTR_ERR(pt));
+ goto free_big;
+ }
+
+ tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
+ 2 * SZ_4K,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_PINNED_BIT);
+ if (IS_ERR(tiny)) {
+ KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
+ PTR_ERR(pt));
+ goto free_pt;
+ }
+
+ bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm);
+ if (IS_ERR(bb)) {
+ KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
+ PTR_ERR(bb));
+ goto free_tiny;
+ }
+
+ kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
+ (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
+ (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
+
+ /* First part of the test, are we updating our pagetable bo with a new entry? */
+ xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
+ 0xdeaddeadbeefbeef);
+ expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
+ if (m->q->vm->flags & XE_VM_FLAG_64K)
+ expected |= XE_PTE_PS64;
+ if (xe_bo_is_vram(pt))
+ xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
+ else
+ xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
+
+ emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
+ &src_it, XE_PAGE_SIZE, pt->ttm.resource);
+
+ run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
+
+ retval = xe_map_rd(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
+ u64);
+ check(retval, expected, "PTE entry write", test);
+
+ /* Now try to write data to our newly mapped 'pagetable', see if it succeeds */
+ bb->len = 0;
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
+ expected = 0;
+
+ emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
+ IS_DGFX(xe));
+ run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
+ test);
+
+ retval = xe_map_rd(xe, &pt->vmap, 0, u32);
+ check(retval, expected, "Write to PT after adding PTE", test);
+
+ /* Sanity checks passed, try the full ones! */
+
+ /* Clear a small bo */
+ kunit_info(test, "Clearing small buffer object\n");
+ xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
+ expected = 0;
+ fence = xe_migrate_clear(m, tiny, tiny->ttm.resource);
+ if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
+ goto out;
+
+ dma_fence_put(fence);
+ retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
+ check(retval, expected, "Command clear small first value", test);
+ retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
+ check(retval, expected, "Command clear small last value", test);
+
+ kunit_info(test, "Copying small buffer object to system\n");
+ test_copy_sysmem(m, tiny, test);
+ if (xe->info.tile_count > 1) {
+ kunit_info(test, "Copying small buffer object to other vram\n");
+ test_copy_vram(m, tiny, test);
+ }
+
+ /* Clear a big bo */
+ kunit_info(test, "Clearing big buffer object\n");
+ xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
+ expected = 0;
+ fence = xe_migrate_clear(m, big, big->ttm.resource);
+ if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
+ goto out;
+
+ dma_fence_put(fence);
+ retval = xe_map_rd(xe, &big->vmap, 0, u32);
+ check(retval, expected, "Command clear big first value", test);
+ retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
+ check(retval, expected, "Command clear big last value", test);
+
+ kunit_info(test, "Copying big buffer object to system\n");
+ test_copy_sysmem(m, big, test);
+ if (xe->info.tile_count > 1) {
+ kunit_info(test, "Copying big buffer object to other vram\n");
+ test_copy_vram(m, big, test);
+ }
+
+ kunit_info(test, "Testing page table update using CPU if GPU idle.\n");
+ test_pt_update(m, pt, test, false);
+ kunit_info(test, "Testing page table update using GPU\n");
+ test_pt_update(m, pt, test, true);
+
+out:
+ xe_bb_free(bb, NULL);
+free_tiny:
+ xe_bo_unpin(tiny);
+ xe_bo_put(tiny);
+free_pt:
+ xe_bo_unpin(pt);
+ xe_bo_put(pt);
+free_big:
+ xe_bo_unpin(big);
+ xe_bo_put(big);
+vunmap:
+ xe_bo_vunmap(m->pt_bo);
+}
+
+static int migrate_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = xe_cur_kunit();
+ struct xe_tile *tile;
+ int id;
+
+ for_each_tile(tile, xe, id) {
+ struct xe_migrate *m = tile->migrate;
+
+ kunit_info(test, "Testing tile id %d.\n", id);
+ xe_vm_lock(m->q->vm, true);
+ xe_device_mem_access_get(xe);
+ xe_migrate_sanity_test(m, test);
+ xe_device_mem_access_put(xe);
+ xe_vm_unlock(m->q->vm);
+ }
+
+ return 0;
+}
+
+void xe_migrate_sanity_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(migrate_test_run_device);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
new file mode 100644
index 000000000..cf0c173b9
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_migrate_test.h"
+
+#include <kunit/test.h>
+
+static struct kunit_case xe_migrate_tests[] = {
+ KUNIT_CASE(xe_migrate_sanity_kunit),
+ {}
+};
+
+static struct kunit_suite xe_migrate_test_suite = {
+ .name = "xe_migrate",
+ .test_cases = xe_migrate_tests,
+};
+
+kunit_test_suite(xe_migrate_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_migrate kunit test");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.h b/drivers/gpu/drm/xe/tests/xe_migrate_test.h
new file mode 100644
index 000000000..7c645c668
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_migrate_test.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MIGRATE_TEST_H_
+#define _XE_MIGRATE_TEST_H_
+
+struct kunit;
+
+void xe_migrate_sanity_kunit(struct kunit *test);
+
+#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
new file mode 100644
index 000000000..7dd34f94e
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+#include <kunit/visibility.h>
+
+#include "tests/xe_mocs_test.h"
+#include "tests/xe_pci_test.h"
+#include "tests/xe_test.h"
+
+#include "xe_pci.h"
+#include "xe_gt.h"
+#include "xe_mocs.h"
+#include "xe_device.h"
+
+struct live_mocs {
+ struct xe_mocs_info table;
+};
+
+static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt)
+{
+ unsigned int flags;
+ struct kunit *test = xe_cur_kunit();
+
+ memset(arg, 0, sizeof(*arg));
+
+ flags = get_mocs_settings(gt_to_xe(gt), &arg->table);
+
+ kunit_info(test, "table size %d", arg->table.size);
+ kunit_info(test, "table uc_index %d", arg->table.uc_index);
+ kunit_info(test, "table n_entries %d", arg->table.n_entries);
+
+ return flags;
+}
+
+static void read_l3cc_table(struct xe_gt *gt,
+ const struct xe_mocs_info *info)
+{
+ unsigned int i;
+ u32 l3cc;
+ u32 reg_val;
+ u32 ret;
+
+ struct kunit *test = xe_cur_kunit();
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
+ mocs_dbg(&gt_to_xe(gt)->drm, "L3CC entries:%d\n", info->n_entries);
+ for (i = 0;
+ i < (info->n_entries + 1) / 2 ?
+ (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
+ get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
+ i++) {
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+ reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
+ else
+ reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
+ mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i,
+ XELP_LNCFCMOCS(i).addr, reg_val, l3cc);
+ if (reg_val != l3cc)
+ KUNIT_FAIL(test, "l3cc reg 0x%x has incorrect val.\n",
+ XELP_LNCFCMOCS(i).addr);
+ }
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_device_mem_access_put(gt_to_xe(gt));
+}
+
+static void read_mocs_table(struct xe_gt *gt,
+ const struct xe_mocs_info *info)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ unsigned int i;
+ u32 mocs;
+ u32 reg_val;
+ u32 ret;
+
+ struct kunit *test = xe_cur_kunit();
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
+ mocs_dbg(&gt_to_xe(gt)->drm, "Global MOCS entries:%d\n", info->n_entries);
+ drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
+ "Unused entries index should have been defined\n");
+ for (i = 0;
+ i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
+ i++) {
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+ reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
+ else
+ reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i,
+ XELP_GLOBAL_MOCS(i).addr, reg_val, mocs);
+ if (reg_val != mocs)
+ KUNIT_FAIL(test, "mocs reg 0x%x has incorrect val.\n",
+ XELP_GLOBAL_MOCS(i).addr);
+ }
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_device_mem_access_put(gt_to_xe(gt));
+}
+
+static int mocs_kernel_test_run_device(struct xe_device *xe)
+{
+ /* Basic check the system is configured with the expected mocs table */
+
+ struct live_mocs mocs;
+ struct xe_gt *gt;
+
+ unsigned int flags;
+ int id;
+
+ for_each_gt(gt, xe, id) {
+ flags = live_mocs_init(&mocs, gt);
+ if (flags & HAS_GLOBAL_MOCS)
+ read_mocs_table(gt, &mocs.table);
+ if (flags & HAS_LNCF_MOCS)
+ read_l3cc_table(gt, &mocs.table);
+ }
+ return 0;
+}
+
+void xe_live_mocs_kernel_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(mocs_kernel_test_run_device);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
new file mode 100644
index 000000000..421b819fd
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_mocs_test.h"
+
+#include <kunit/test.h>
+
+static struct kunit_case xe_mocs_tests[] = {
+ KUNIT_CASE(xe_live_mocs_kernel_kunit),
+ {}
+};
+
+static struct kunit_suite xe_mocs_test_suite = {
+ .name = "xe_mocs",
+ .test_cases = xe_mocs_tests,
+};
+
+kunit_test_suite(xe_mocs_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_mocs kunit test");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
new file mode 100644
index 000000000..7faa3575e
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MOCS_TEST_H_
+#define _XE_MOCS_TEST_H_
+
+struct kunit;
+
+void xe_live_mocs_kernel_kunit(struct kunit *test);
+
+#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
new file mode 100644
index 000000000..602793644
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "tests/xe_pci_test.h"
+
+#include "tests/xe_test.h"
+
+#include <kunit/test-bug.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <kunit/visibility.h>
+
+struct kunit_test_data {
+ int ndevs;
+ xe_device_fn xe_fn;
+};
+
+static int dev_to_xe_device_fn(struct device *dev, void *__data)
+
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct kunit_test_data *data = __data;
+ int ret = 0;
+ int idx;
+
+ data->ndevs++;
+
+ if (drm_dev_enter(drm, &idx))
+ ret = data->xe_fn(to_xe_device(dev_get_drvdata(dev)));
+ drm_dev_exit(idx);
+
+ return ret;
+}
+
+/**
+ * xe_call_for_each_device - Iterate over all devices this driver binds to
+ * @xe_fn: Function to call for each device.
+ *
+ * This function iterated over all devices this driver binds to, and calls
+ * @xe_fn: for each one of them. If the called function returns anything else
+ * than 0, iteration is stopped and the return value is returned by this
+ * function. Across each function call, drm_dev_enter() / drm_dev_exit() is
+ * called for the corresponding drm device.
+ *
+ * Return: Number of devices iterated or
+ * the error code of a call to @xe_fn returning an error code.
+ */
+int xe_call_for_each_device(xe_device_fn xe_fn)
+{
+ int ret;
+ struct kunit_test_data data = {
+ .xe_fn = xe_fn,
+ .ndevs = 0,
+ };
+
+ ret = driver_for_each_device(&xe_pci_driver.driver, NULL,
+ &data, dev_to_xe_device_fn);
+
+ if (!data.ndevs)
+ kunit_skip(current->kunit_test, "test runs only on hardware\n");
+
+ return ret ?: data.ndevs;
+}
+
+/**
+ * xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs
+ * @xe_fn: Function to call for each device.
+ *
+ * This function iterates over the descriptors for all graphics IPs recognized
+ * by the driver and calls @xe_fn: for each one of them.
+ */
+void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
+{
+ const struct xe_graphics_desc *ip, *last = NULL;
+
+ for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
+ ip = graphics_ip_map[i].ip;
+ if (ip == last)
+ continue;
+
+ xe_fn(ip);
+ last = ip;
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
+
+/**
+ * xe_call_for_each_media_ip - Iterate over all recognized media IPs
+ * @xe_fn: Function to call for each device.
+ *
+ * This function iterates over the descriptors for all media IPs recognized
+ * by the driver and calls @xe_fn: for each one of them.
+ */
+void xe_call_for_each_media_ip(xe_media_fn xe_fn)
+{
+ const struct xe_media_desc *ip, *last = NULL;
+
+ for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
+ ip = media_ip_map[i].ip;
+ if (ip == last)
+ continue;
+
+ xe_fn(ip);
+ last = ip;
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
+
+static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
+ u32 *ver, u32 *revid)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_pci_fake_data *data = test->priv;
+
+ if (type == GMDID_MEDIA) {
+ *ver = data->media_verx100;
+ *revid = xe_step_to_gmdid(data->media_step);
+ } else {
+ *ver = data->graphics_verx100;
+ *revid = xe_step_to_gmdid(data->graphics_step);
+ }
+}
+
+int xe_pci_fake_device_init(struct xe_device *xe)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_pci_fake_data *data = test->priv;
+ const struct pci_device_id *ent = pciidlist;
+ const struct xe_device_desc *desc;
+ const struct xe_subplatform_desc *subplatform_desc;
+
+ if (!data) {
+ desc = (const void *)ent->driver_data;
+ subplatform_desc = NULL;
+ goto done;
+ }
+
+ for (ent = pciidlist; ent->device; ent++) {
+ desc = (const void *)ent->driver_data;
+ if (desc->platform == data->platform)
+ break;
+ }
+
+ if (!ent->device)
+ return -ENODEV;
+
+ for (subplatform_desc = desc->subplatforms;
+ subplatform_desc && subplatform_desc->subplatform;
+ subplatform_desc++)
+ if (subplatform_desc->subplatform == data->subplatform)
+ break;
+
+ if (data->subplatform != XE_SUBPLATFORM_NONE && !subplatform_desc)
+ return -ENODEV;
+
+done:
+ kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
+
+ xe_info_init_early(xe, desc, subplatform_desc);
+ xe_info_init(xe, desc->graphics, desc->media);
+
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
new file mode 100644
index 000000000..171e4180f
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+#include "tests/xe_test.h"
+
+#include "xe_device.h"
+#include "xe_pci_test.h"
+#include "xe_pci_types.h"
+
+static void check_graphics_ip(const struct xe_graphics_desc *graphics)
+{
+ struct kunit *test = xe_cur_kunit();
+ u64 mask = graphics->hw_engine_mask;
+
+ /* RCS, CCS, and BCS engines are allowed on the graphics IP */
+ mask &= ~(XE_HW_ENGINE_RCS_MASK |
+ XE_HW_ENGINE_CCS_MASK |
+ XE_HW_ENGINE_BCS_MASK);
+
+ /* Any remaining engines are an error */
+ KUNIT_ASSERT_EQ(test, mask, 0);
+}
+
+static void check_media_ip(const struct xe_media_desc *media)
+{
+ struct kunit *test = xe_cur_kunit();
+ u64 mask = media->hw_engine_mask;
+
+ /* VCS, VECS and GSCCS engines are allowed on the media IP */
+ mask &= ~(XE_HW_ENGINE_VCS_MASK |
+ XE_HW_ENGINE_VECS_MASK |
+ XE_HW_ENGINE_GSCCS_MASK);
+
+ /* Any remaining engines are an error */
+ KUNIT_ASSERT_EQ(test, mask, 0);
+}
+
+static void xe_gmdid_graphics_ip(struct kunit *test)
+{
+ xe_call_for_each_graphics_ip(check_graphics_ip);
+}
+
+static void xe_gmdid_media_ip(struct kunit *test)
+{
+ xe_call_for_each_media_ip(check_media_ip);
+}
+
+static struct kunit_case xe_pci_tests[] = {
+ KUNIT_CASE(xe_gmdid_graphics_ip),
+ KUNIT_CASE(xe_gmdid_media_ip),
+ {}
+};
+
+static struct kunit_suite xe_pci_test_suite = {
+ .name = "xe_pci",
+ .test_cases = xe_pci_tests,
+};
+
+kunit_test_suite(xe_pci_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_pci kunit test");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
new file mode 100644
index 000000000..811ffe5bd
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_PCI_TEST_H_
+#define _XE_PCI_TEST_H_
+
+#include <linux/types.h>
+
+#include "xe_platform_types.h"
+
+struct xe_device;
+struct xe_graphics_desc;
+struct xe_media_desc;
+
+typedef int (*xe_device_fn)(struct xe_device *);
+typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *);
+typedef void (*xe_media_fn)(const struct xe_media_desc *);
+
+int xe_call_for_each_device(xe_device_fn xe_fn);
+void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
+void xe_call_for_each_media_ip(xe_media_fn xe_fn);
+
+struct xe_pci_fake_data {
+ enum xe_platform platform;
+ enum xe_subplatform subplatform;
+ u32 graphics_verx100;
+ u32 media_verx100;
+ u32 graphics_step;
+ u32 media_step;
+};
+
+int xe_pci_fake_device_init(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
new file mode 100644
index 000000000..4a6972897
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/string.h>
+#include <linux/xarray.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_reg_defs.h"
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_pci_test.h"
+#include "xe_reg_sr.h"
+#include "xe_rtp.h"
+
+#define REGULAR_REG1 XE_REG(1)
+#define REGULAR_REG2 XE_REG(2)
+#define REGULAR_REG3 XE_REG(3)
+#define MCR_REG1 XE_REG_MCR(1)
+#define MCR_REG2 XE_REG_MCR(2)
+#define MCR_REG3 XE_REG_MCR(3)
+#define MASKED_REG1 XE_REG(1, XE_REG_OPTION_MASKED)
+
+#undef XE_REG_MCR
+#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
+
+struct rtp_test_case {
+ const char *name;
+ struct xe_reg expected_reg;
+ u32 expected_set_bits;
+ u32 expected_clr_bits;
+ unsigned long expected_count;
+ unsigned int expected_sr_errors;
+ const struct xe_rtp_entry_sr *entries;
+};
+
+static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
+{
+ return true;
+}
+
+static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
+{
+ return false;
+}
+
+static const struct rtp_test_case cases[] = {
+ {
+ .name = "coalesce-same-reg",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0) | REG_BIT(1),
+ .expected_clr_bits = REG_BIT(0) | REG_BIT(1),
+ .expected_count = 1,
+ /* Different bits on the same register: create a single entry */
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ { XE_RTP_NAME("basic-2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1)))
+ },
+ {}
+ },
+ },
+ {
+ .name = "no-match-no-add",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0),
+ .expected_clr_bits = REG_BIT(0),
+ .expected_count = 1,
+ /* Don't coalesce second entry since rules don't match */
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ { XE_RTP_NAME("basic-2"),
+ XE_RTP_RULES(FUNC(match_no)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1)))
+ },
+ {}
+ },
+ },
+ {
+ .name = "no-match-no-add-multiple-rules",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0),
+ .expected_clr_bits = REG_BIT(0),
+ .expected_count = 1,
+ /* Don't coalesce second entry due to one of the rules */
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ { XE_RTP_NAME("basic-2"),
+ XE_RTP_RULES(FUNC(match_yes), FUNC(match_no)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1)))
+ },
+ {}
+ },
+ },
+ {
+ .name = "two-regs-two-entries",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0),
+ .expected_clr_bits = REG_BIT(0),
+ .expected_count = 2,
+ /* Same bits on different registers are not coalesced */
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ { XE_RTP_NAME("basic-2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG2, REG_BIT(0)))
+ },
+ {}
+ },
+ },
+ {
+ .name = "clr-one-set-other",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0),
+ .expected_clr_bits = REG_BIT(1) | REG_BIT(0),
+ .expected_count = 1,
+ /* Check clr vs set actions on different bits */
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ { XE_RTP_NAME("basic-2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(CLR(REGULAR_REG1, REG_BIT(1)))
+ },
+ {}
+ },
+ },
+ {
+#define TEMP_MASK REG_GENMASK(10, 8)
+#define TEMP_FIELD REG_FIELD_PREP(TEMP_MASK, 2)
+ .name = "set-field",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = TEMP_FIELD,
+ .expected_clr_bits = TEMP_MASK,
+ .expected_count = 1,
+ /* Check FIELD_SET works */
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(FIELD_SET(REGULAR_REG1,
+ TEMP_MASK, TEMP_FIELD))
+ },
+ {}
+ },
+#undef TEMP_MASK
+#undef TEMP_FIELD
+ },
+ {
+ .name = "conflict-duplicate",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0),
+ .expected_clr_bits = REG_BIT(0),
+ .expected_count = 1,
+ .expected_sr_errors = 1,
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ /* drop: setting same values twice */
+ { XE_RTP_NAME("basic-2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ {}
+ },
+ },
+ {
+ .name = "conflict-not-disjoint",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0),
+ .expected_clr_bits = REG_BIT(0),
+ .expected_count = 1,
+ .expected_sr_errors = 1,
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ /* drop: bits are not disjoint with previous entries */
+ { XE_RTP_NAME("basic-2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(CLR(REGULAR_REG1, REG_GENMASK(1, 0)))
+ },
+ {}
+ },
+ },
+ {
+ .name = "conflict-reg-type",
+ .expected_reg = REGULAR_REG1,
+ .expected_set_bits = REG_BIT(0),
+ .expected_clr_bits = REG_BIT(0),
+ .expected_count = 1,
+ .expected_sr_errors = 2,
+ .entries = (const struct xe_rtp_entry_sr[]) {
+ { XE_RTP_NAME("basic-1"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0)))
+ },
+ /* drop: regular vs MCR */
+ { XE_RTP_NAME("basic-2"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(MCR_REG1, REG_BIT(1)))
+ },
+ /* drop: regular vs masked */
+ { XE_RTP_NAME("basic-3"),
+ XE_RTP_RULES(FUNC(match_yes)),
+ XE_RTP_ACTIONS(SET(MASKED_REG1, REG_BIT(0)))
+ },
+ {}
+ },
+ },
+};
+
+static void xe_rtp_process_tests(struct kunit *test)
+{
+ const struct rtp_test_case *param = test->param_value;
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt;
+ struct xe_reg_sr *reg_sr = &gt->reg_sr;
+ const struct xe_reg_sr_entry *sre, *sr_entry = NULL;
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+ unsigned long idx, count = 0;
+
+ xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe);
+ xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
+
+ xa_for_each(&reg_sr->xa, idx, sre) {
+ if (idx == param->expected_reg.addr)
+ sr_entry = sre;
+
+ count++;
+ }
+
+ KUNIT_EXPECT_EQ(test, count, param->expected_count);
+ KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits);
+ KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits);
+ KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw);
+ KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors);
+}
+
+static void rtp_desc(const struct rtp_test_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(rtp, cases, rtp_desc);
+
+static int xe_rtp_test_init(struct kunit *test)
+{
+ struct xe_device *xe;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ xe = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct xe_device,
+ drm, DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+
+ /* Initialize an empty device */
+ test->priv = NULL;
+ ret = xe_pci_fake_device_init(xe);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ xe->drm.dev = dev;
+ test->priv = xe;
+
+ return 0;
+}
+
+static void xe_rtp_test_exit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ drm_kunit_helper_free_device(test, xe->drm.dev);
+}
+
+static struct kunit_case xe_rtp_tests[] = {
+ KUNIT_CASE_PARAM(xe_rtp_process_tests, rtp_gen_params),
+ {}
+};
+
+static struct kunit_suite xe_rtp_test_suite = {
+ .name = "xe_rtp",
+ .init = xe_rtp_test_init,
+ .exit = xe_rtp_test_exit,
+ .test_cases = xe_rtp_tests,
+};
+
+kunit_test_suite(xe_rtp_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_rtp kunit test");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h
new file mode 100644
index 000000000..7a1ae213e
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_test.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_TEST_H_
+#define _XE_TEST_H_
+
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include <linux/sched.h>
+#include <kunit/test.h>
+
+/*
+ * Each test that provides a kunit private test structure, place a test id
+ * here and point the kunit->priv to an embedded struct xe_test_priv.
+ */
+enum xe_test_priv_id {
+ XE_TEST_LIVE_DMA_BUF,
+ XE_TEST_LIVE_MIGRATE,
+};
+
+/**
+ * struct xe_test_priv - Base class for test private info
+ * @id: enum xe_test_priv_id to identify the subclass.
+ */
+struct xe_test_priv {
+ enum xe_test_priv_id id;
+};
+
+#define XE_TEST_DECLARE(x) x
+#define XE_TEST_ONLY(x) unlikely(x)
+#define XE_TEST_EXPORT
+#define xe_cur_kunit() current->kunit_test
+
+/**
+ * xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by
+ * current->kunit->priv if it exists and is embedded in the expected subclass.
+ * @id: Id of the expected subclass.
+ *
+ * Return: NULL if the process is not a kunit test, and NULL if the
+ * current kunit->priv pointer is not pointing to an object of the expected
+ * subclass. A pointer to the embedded struct xe_test_priv otherwise.
+ */
+static inline struct xe_test_priv *
+xe_cur_kunit_priv(enum xe_test_priv_id id)
+{
+ struct xe_test_priv *priv;
+
+ if (!xe_cur_kunit())
+ return NULL;
+
+ priv = xe_cur_kunit()->priv;
+ return priv->id == id ? priv : NULL;
+}
+
+#else /* if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) */
+
+#define XE_TEST_DECLARE(x)
+#define XE_TEST_ONLY(x) 0
+#define XE_TEST_EXPORT static
+#define xe_cur_kunit() NULL
+#define xe_cur_kunit_priv(_id) NULL
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
new file mode 100644
index 000000000..b4715b78e
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+#include "xe_device.h"
+#include "xe_pci_test.h"
+#include "xe_reg_sr.h"
+#include "xe_tuning.h"
+#include "xe_wa.h"
+
+struct platform_test_case {
+ const char *name;
+ enum xe_platform platform;
+ enum xe_subplatform subplatform;
+ u32 graphics_verx100;
+ u32 media_verx100;
+ struct xe_step_info step;
+};
+
+#define PLATFORM_CASE(platform__, graphics_step__) \
+ { \
+ .name = #platform__ " (" #graphics_step__ ")", \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_NONE, \
+ .step = { .graphics = STEP_ ## graphics_step__ } \
+ }
+
+
+#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
+ { \
+ .name = #platform__ "_" #subplatform__ " (" #graphics_step__ ")", \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
+ .step = { .graphics = STEP_ ## graphics_step__ } \
+ }
+
+#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
+ media_verx100__, media_step__) \
+ { \
+ .name = #platform__ " (g:" #graphics_step__ ", m:" #media_step__ ")",\
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_NONE, \
+ .graphics_verx100 = graphics_verx100__, \
+ .media_verx100 = media_verx100__, \
+ .step = { .graphics = STEP_ ## graphics_step__, \
+ .media = STEP_ ## media_step__ } \
+ }
+
+static const struct platform_test_case cases[] = {
+ PLATFORM_CASE(TIGERLAKE, B0),
+ PLATFORM_CASE(DG1, A0),
+ PLATFORM_CASE(DG1, B0),
+ PLATFORM_CASE(ALDERLAKE_S, A0),
+ PLATFORM_CASE(ALDERLAKE_S, B0),
+ PLATFORM_CASE(ALDERLAKE_S, C0),
+ PLATFORM_CASE(ALDERLAKE_S, D0),
+ PLATFORM_CASE(ALDERLAKE_P, A0),
+ PLATFORM_CASE(ALDERLAKE_P, B0),
+ PLATFORM_CASE(ALDERLAKE_P, C0),
+ SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
+ SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
+ SUBPLATFORM_CASE(DG2, G10, A0),
+ SUBPLATFORM_CASE(DG2, G10, A1),
+ SUBPLATFORM_CASE(DG2, G10, B0),
+ SUBPLATFORM_CASE(DG2, G10, C0),
+ SUBPLATFORM_CASE(DG2, G11, A0),
+ SUBPLATFORM_CASE(DG2, G11, B0),
+ SUBPLATFORM_CASE(DG2, G11, B1),
+ SUBPLATFORM_CASE(DG2, G12, A0),
+ SUBPLATFORM_CASE(DG2, G12, A1),
+ GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
+ GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
+ GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
+};
+
+static void platform_desc(const struct platform_test_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(platform, cases, platform_desc);
+
+static int xe_wa_test_init(struct kunit *test)
+{
+ const struct platform_test_case *param = test->param_value;
+ struct xe_pci_fake_data data = {
+ .platform = param->platform,
+ .subplatform = param->subplatform,
+ .graphics_verx100 = param->graphics_verx100,
+ .media_verx100 = param->media_verx100,
+ .graphics_step = param->step.graphics,
+ .media_step = param->step.media,
+ };
+ struct xe_device *xe;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ xe = drm_kunit_helper_alloc_drm_device(test, dev,
+ struct xe_device,
+ drm, DRIVER_GEM);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe);
+
+ test->priv = &data;
+ ret = xe_pci_fake_device_init(xe);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ if (!param->graphics_verx100)
+ xe->info.step = param->step;
+
+ /* TODO: init hw engines for engine/LRC WAs */
+ xe->drm.dev = dev;
+ test->priv = xe;
+
+ return 0;
+}
+
+static void xe_wa_test_exit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ drm_kunit_helper_free_device(test, xe->drm.dev);
+}
+
+static void xe_wa_gt(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt;
+ int id;
+
+ for_each_gt(gt, xe, id) {
+ xe_reg_sr_init(&gt->reg_sr, "GT", xe);
+
+ xe_wa_process_gt(gt);
+ xe_tuning_process_gt(gt);
+
+ KUNIT_ASSERT_EQ(test, gt->reg_sr.errors, 0);
+ }
+}
+
+static struct kunit_case xe_wa_tests[] = {
+ KUNIT_CASE_PARAM(xe_wa_gt, platform_gen_params),
+ {}
+};
+
+static struct kunit_suite xe_rtp_test_suite = {
+ .name = "xe_wa",
+ .init = xe_wa_test_init,
+ .exit = xe_wa_test_exit,
+ .test_cases = xe_wa_tests,
+};
+
+kunit_test_suite(xe_rtp_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_wa kunit test");
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
new file mode 100644
index 000000000..34c142e6c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_ASSERT_H_
+#define _XE_ASSERT_H_
+
+#include <linux/string_helpers.h>
+
+#include <drm/drm_print.h>
+
+#include "xe_device_types.h"
+#include "xe_step.h"
+
+/**
+ * DOC: Xe ASSERTs
+ *
+ * While Xe driver aims to be simpler than legacy i915 driver it is still
+ * complex enough that some changes introduced while adding new functionality
+ * could break the existing code.
+ *
+ * Adding &drm_WARN or &drm_err to catch unwanted programming usage could lead
+ * to undesired increased driver footprint and may impact production driver
+ * performance as this additional code will be always present.
+ *
+ * To allow annotate functions with additional detailed debug checks to assert
+ * that all prerequisites are satisfied, without worrying about footprint or
+ * performance penalty on production builds where all potential misuses
+ * introduced during code integration were already fixed, we introduce family
+ * of Xe assert macros that try to follow classic assert() utility:
+ *
+ * * xe_assert()
+ * * xe_tile_assert()
+ * * xe_gt_assert()
+ *
+ * These macros are implemented on top of &drm_WARN, but unlikely to the origin,
+ * warning is triggered when provided condition is false. Additionally all above
+ * assert macros cannot be used in expressions or as a condition, since
+ * underlying code will be compiled out on non-debug builds.
+ *
+ * Note that these macros are not intended for use to cover known gaps in the
+ * implementation; for such cases use regular &drm_WARN or &drm_err and provide
+ * valid safe fallback.
+ *
+ * Also in cases where performance or footprint is not an issue, developers
+ * should continue to use the regular &drm_WARN or &drm_err to ensure that bug
+ * reports from production builds will contain meaningful diagnostics data.
+ *
+ * Below code shows how asserts could help in debug to catch unplanned use::
+ *
+ * static void one_igfx(struct xe_device *xe)
+ * {
+ * xe_assert(xe, xe->info.is_dgfx == false);
+ * xe_assert(xe, xe->info.tile_count == 1);
+ * }
+ *
+ * static void two_dgfx(struct xe_device *xe)
+ * {
+ * xe_assert(xe, xe->info.is_dgfx);
+ * xe_assert(xe, xe->info.tile_count == 2);
+ * }
+ *
+ * void foo(struct xe_device *xe)
+ * {
+ * if (xe->info.dgfx)
+ * return two_dgfx(xe);
+ * return one_igfx(xe);
+ * }
+ *
+ * void bar(struct xe_device *xe)
+ * {
+ * if (drm_WARN_ON(xe->drm, xe->info.tile_count > 2))
+ * return;
+ *
+ * if (xe->info.tile_count == 2)
+ * return two_dgfx(xe);
+ * return one_igfx(xe);
+ * }
+ */
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define __xe_assert_msg(xe, condition, msg, arg...) ({ \
+ (void)drm_WARN(&(xe)->drm, !(condition), "[" DRM_NAME "] Assertion `%s` failed!\n" msg, \
+ __stringify(condition), ## arg); \
+})
+#else
+#define __xe_assert_msg(xe, condition, msg, arg...) ({ \
+ typecheck(const struct xe_device *, xe); \
+ BUILD_BUG_ON_INVALID(condition); \
+})
+#endif
+
+/**
+ * xe_assert - warn if condition is false when debugging.
+ * @xe: the &struct xe_device pointer to which &condition applies
+ * @condition: condition to check
+ *
+ * xe_assert() uses &drm_WARN to emit a warning and print additional information
+ * that could be read from the &xe pointer if provided &condition is false.
+ *
+ * Contrary to &drm_WARN, xe_assert() is effective only on debug builds
+ * (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
+ * or as a condition.
+ *
+ * See `Xe ASSERTs`_ for general usage guidelines.
+ */
+#define xe_assert(xe, condition) xe_assert_msg((xe), condition, "")
+#define xe_assert_msg(xe, condition, msg, arg...) ({ \
+ const struct xe_device *__xe = (xe); \
+ __xe_assert_msg(__xe, condition, \
+ "platform: %d subplatform: %d\n" \
+ "graphics: %s %u.%02u step %s\n" \
+ "media: %s %u.%02u step %s\n" \
+ msg, \
+ __xe->info.platform, __xe->info.subplatform, \
+ __xe->info.graphics_name, \
+ __xe->info.graphics_verx100 / 100, \
+ __xe->info.graphics_verx100 % 100, \
+ xe_step_name(__xe->info.step.graphics), \
+ __xe->info.media_name, \
+ __xe->info.media_verx100 / 100, \
+ __xe->info.media_verx100 % 100, \
+ xe_step_name(__xe->info.step.media), \
+ ## arg); \
+})
+
+/**
+ * xe_tile_assert - warn if condition is false when debugging.
+ * @tile: the &struct xe_tile pointer to which &condition applies
+ * @condition: condition to check
+ *
+ * xe_tile_assert() uses &drm_WARN to emit a warning and print additional
+ * information that could be read from the &tile pointer if provided &condition
+ * is false.
+ *
+ * Contrary to &drm_WARN, xe_tile_assert() is effective only on debug builds
+ * (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
+ * or as a condition.
+ *
+ * See `Xe ASSERTs`_ for general usage guidelines.
+ */
+#define xe_tile_assert(tile, condition) xe_tile_assert_msg((tile), condition, "")
+#define xe_tile_assert_msg(tile, condition, msg, arg...) ({ \
+ const struct xe_tile *__tile = (tile); \
+ char __buf[10] __maybe_unused; \
+ xe_assert_msg(tile_to_xe(__tile), condition, "tile: %u VRAM %s\n" msg, \
+ __tile->id, ({ string_get_size(__tile->mem.vram.actual_physical_size, 1, \
+ STRING_UNITS_2, __buf, sizeof(__buf)); __buf; }), ## arg); \
+})
+
+/**
+ * xe_gt_assert - warn if condition is false when debugging.
+ * @gt: the &struct xe_gt pointer to which &condition applies
+ * @condition: condition to check
+ *
+ * xe_gt_assert() uses &drm_WARN to emit a warning and print additional
+ * information that could be safetely read from the &gt pointer if provided
+ * &condition is false.
+ *
+ * Contrary to &drm_WARN, xe_gt_assert() is effective only on debug builds
+ * (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
+ * or as a condition.
+ *
+ * See `Xe ASSERTs`_ for general usage guidelines.
+ */
+#define xe_gt_assert(gt, condition) xe_gt_assert_msg((gt), condition, "")
+#define xe_gt_assert_msg(gt, condition, msg, arg...) ({ \
+ const struct xe_gt *__gt = (gt); \
+ xe_tile_assert_msg(gt_to_tile(__gt), condition, "GT: %u type %d\n" msg, \
+ __gt->info.id, __gt->info.type, ## arg); \
+})
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
new file mode 100644
index 000000000..7c124475c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_bb.h"
+
+#include "instructions/xe_mi_commands.h"
+#include "regs/xe_gpu_commands.h"
+#include "xe_device.h"
+#include "xe_exec_queue_types.h"
+#include "xe_gt.h"
+#include "xe_hw_fence.h"
+#include "xe_sa.h"
+#include "xe_sched_job.h"
+#include "xe_vm_types.h"
+
+static int bb_prefetch(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (GRAPHICS_VERx100(xe) >= 1250 && !xe_gt_is_media_type(gt))
+ /*
+ * RCS and CCS require 1K, although other engines would be
+ * okay with 512.
+ */
+ return SZ_1K;
+ else
+ return SZ_512;
+}
+
+struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL);
+ int err;
+
+ if (!bb)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * We need to allocate space for the requested number of dwords,
+ * one additional MI_BATCH_BUFFER_END dword, and additional buffer
+ * space to accomodate the platform-specific hardware prefetch
+ * requirements.
+ */
+ bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
+ 4 * (dwords + 1) + bb_prefetch(gt));
+ if (IS_ERR(bb->bo)) {
+ err = PTR_ERR(bb->bo);
+ goto err;
+ }
+
+ bb->cs = xe_sa_bo_cpu_addr(bb->bo);
+ bb->len = 0;
+
+ return bb;
+err:
+ kfree(bb);
+ return ERR_PTR(err);
+}
+
+static struct xe_sched_job *
+__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
+{
+ u32 size = drm_suballoc_size(bb->bo);
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+
+ xe_gt_assert(q->gt, bb->len * 4 + bb_prefetch(q->gt) <= size);
+
+ xe_sa_bo_flush_write(bb->bo);
+
+ return xe_sched_job_create(q, addr);
+}
+
+struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
+ struct xe_bb *bb,
+ u64 batch_base_ofs,
+ u32 second_idx)
+{
+ u64 addr[2] = {
+ batch_base_ofs + drm_suballoc_soffset(bb->bo),
+ batch_base_ofs + drm_suballoc_soffset(bb->bo) +
+ 4 * second_idx,
+ };
+
+ xe_gt_assert(q->gt, second_idx <= bb->len);
+ xe_gt_assert(q->gt, q->vm->flags & XE_VM_FLAG_MIGRATION);
+
+ return __xe_bb_create_job(q, bb, addr);
+}
+
+struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
+ struct xe_bb *bb)
+{
+ u64 addr = xe_sa_bo_gpu_addr(bb->bo);
+
+ xe_gt_assert(q->gt, !(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION));
+ return __xe_bb_create_job(q, bb, &addr);
+}
+
+void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence)
+{
+ if (!bb)
+ return;
+
+ xe_sa_bo_free(bb->bo, fence);
+ kfree(bb);
+}
diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h
new file mode 100644
index 000000000..fafacd73d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bb.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_BB_H_
+#define _XE_BB_H_
+
+#include "xe_bb_types.h"
+
+struct dma_fence;
+
+struct xe_gt;
+struct xe_exec_queue;
+struct xe_sched_job;
+
+struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
+struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
+ struct xe_bb *bb);
+struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
+ struct xe_bb *bb, u64 batch_ofs,
+ u32 second_idx);
+void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_bb_types.h b/drivers/gpu/drm/xe/xe_bb_types.h
new file mode 100644
index 000000000..b7d30308c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bb_types.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_BB_TYPES_H_
+#define _XE_BB_TYPES_H_
+
+#include <linux/types.h>
+
+struct drm_suballoc;
+
+struct xe_bb {
+ struct drm_suballoc *bo;
+
+ u32 *cs;
+ u32 len; /* in dwords */
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
new file mode 100644
index 000000000..eb2c44a32
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -0,0 +1,2235 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_bo.h"
+
+#include <linux/dma-buf.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/xe_drm.h>
+
+#include "xe_device.h"
+#include "xe_dma_buf.h"
+#include "xe_drm_client.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_map.h"
+#include "xe_migrate.h"
+#include "xe_preempt_fence.h"
+#include "xe_res_cursor.h"
+#include "xe_trace.h"
+#include "xe_ttm_stolen_mgr.h"
+#include "xe_vm.h"
+
+const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
+ [XE_PL_SYSTEM] = "system",
+ [XE_PL_TT] = "gtt",
+ [XE_PL_VRAM0] = "vram0",
+ [XE_PL_VRAM1] = "vram1",
+ [XE_PL_STOLEN] = "stolen"
+};
+
+static const struct ttm_place sys_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = XE_PL_SYSTEM,
+ .flags = 0,
+};
+
+static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags,
+};
+
+static const struct ttm_place tt_placement_flags = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = XE_PL_TT,
+ .flags = 0,
+};
+
+static struct ttm_placement tt_placement = {
+ .num_placement = 1,
+ .placement = &tt_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags,
+};
+
+bool mem_type_is_vram(u32 mem_type)
+{
+ return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
+}
+
+static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
+{
+ return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
+}
+
+static bool resource_is_vram(struct ttm_resource *res)
+{
+ return mem_type_is_vram(res->mem_type);
+}
+
+bool xe_bo_is_vram(struct xe_bo *bo)
+{
+ return resource_is_vram(bo->ttm.resource) ||
+ resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
+}
+
+bool xe_bo_is_stolen(struct xe_bo *bo)
+{
+ return bo->ttm.resource->mem_type == XE_PL_STOLEN;
+}
+
+/**
+ * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
+ * @bo: The BO
+ *
+ * The stolen memory is accessed through the PCI BAR for both DGFX and some
+ * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
+ *
+ * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
+ */
+bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
+{
+ return xe_bo_is_stolen(bo) &&
+ GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
+}
+
+static bool xe_bo_is_user(struct xe_bo *bo)
+{
+ return bo->flags & XE_BO_CREATE_USER_BIT;
+}
+
+static struct xe_migrate *
+mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
+{
+ struct xe_tile *tile;
+
+ xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
+ tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
+ return tile->migrate;
+}
+
+static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
+{
+ struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
+ struct ttm_resource_manager *mgr;
+
+ xe_assert(xe, resource_is_vram(res));
+ mgr = ttm_manager_type(&xe->ttm, res->mem_type);
+ return to_xe_ttm_vram_mgr(mgr)->vram;
+}
+
+static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
+ u32 bo_flags, u32 *c)
+{
+ if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
+ xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
+ bo->placements[*c] = (struct ttm_place) {
+ .mem_type = XE_PL_TT,
+ };
+ *c += 1;
+ }
+}
+
+static void add_vram(struct xe_device *xe, struct xe_bo *bo,
+ struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
+{
+ struct ttm_place place = { .mem_type = mem_type };
+ struct xe_mem_region *vram;
+ u64 io_size;
+
+ xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
+ vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
+ xe_assert(xe, vram && vram->usable_size);
+ io_size = vram->io_size;
+
+ /*
+ * For eviction / restore on suspend / resume objects
+ * pinned in VRAM must be contiguous
+ */
+ if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
+ XE_BO_CREATE_GGTT_BIT))
+ place.flags |= TTM_PL_FLAG_CONTIGUOUS;
+
+ if (io_size < vram->usable_size) {
+ if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
+ place.fpfn = 0;
+ place.lpfn = io_size >> PAGE_SHIFT;
+ } else {
+ place.flags |= TTM_PL_FLAG_TOPDOWN;
+ }
+ }
+ places[*c] = place;
+ *c += 1;
+}
+
+static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
+ u32 bo_flags, u32 *c)
+{
+ if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
+ if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
+}
+
+static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
+ u32 bo_flags, u32 *c)
+{
+ if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
+ xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
+ bo->placements[*c] = (struct ttm_place) {
+ .mem_type = XE_PL_STOLEN,
+ .flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
+ XE_BO_CREATE_GGTT_BIT) ?
+ TTM_PL_FLAG_CONTIGUOUS : 0,
+ };
+ *c += 1;
+ }
+}
+
+static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
+ u32 bo_flags)
+{
+ u32 c = 0;
+
+ try_add_vram(xe, bo, bo_flags, &c);
+ try_add_system(xe, bo, bo_flags, &c);
+ try_add_stolen(xe, bo, bo_flags, &c);
+
+ if (!c)
+ return -EINVAL;
+
+ bo->placement = (struct ttm_placement) {
+ .num_placement = c,
+ .placement = bo->placements,
+ .num_busy_placement = c,
+ .busy_placement = bo->placements,
+ };
+
+ return 0;
+}
+
+int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
+ u32 bo_flags)
+{
+ xe_bo_assert_held(bo);
+ return __xe_bo_placement_for_flags(xe, bo, bo_flags);
+}
+
+static void xe_evict_flags(struct ttm_buffer_object *tbo,
+ struct ttm_placement *placement)
+{
+ if (!xe_bo_is_xe_bo(tbo)) {
+ /* Don't handle scatter gather BOs */
+ if (tbo->type == ttm_bo_type_sg) {
+ placement->num_placement = 0;
+ placement->num_busy_placement = 0;
+ return;
+ }
+
+ *placement = sys_placement;
+ return;
+ }
+
+ /*
+ * For xe, sg bos that are evicted to system just triggers a
+ * rebind of the sg list upon subsequent validation to XE_PL_TT.
+ */
+ switch (tbo->resource->mem_type) {
+ case XE_PL_VRAM0:
+ case XE_PL_VRAM1:
+ case XE_PL_STOLEN:
+ *placement = tt_placement;
+ break;
+ case XE_PL_TT:
+ default:
+ *placement = sys_placement;
+ break;
+ }
+}
+
+struct xe_ttm_tt {
+ struct ttm_tt ttm;
+ struct device *dev;
+ struct sg_table sgt;
+ struct sg_table *sg;
+};
+
+static int xe_tt_map_sg(struct ttm_tt *tt)
+{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+ unsigned long num_pages = tt->num_pages;
+ int ret;
+
+ XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
+
+ if (xe_tt->sg)
+ return 0;
+
+ ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
+ num_pages, 0,
+ (u64)num_pages << PAGE_SHIFT,
+ xe_sg_segment_size(xe_tt->dev),
+ GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ xe_tt->sg = &xe_tt->sgt;
+ ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret) {
+ sg_free_table(xe_tt->sg);
+ xe_tt->sg = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+struct sg_table *xe_bo_sg(struct xe_bo *bo)
+{
+ struct ttm_tt *tt = bo->ttm.ttm;
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+
+ return xe_tt->sg;
+}
+
+static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
+ u32 page_flags)
+{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = xe_bo_device(bo);
+ struct xe_ttm_tt *tt;
+ unsigned long extra_pages;
+ enum ttm_caching caching;
+ int err;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->dev = xe->drm.dev;
+
+ extra_pages = 0;
+ if (xe_bo_needs_ccs_pages(bo))
+ extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
+ PAGE_SIZE);
+
+ switch (bo->cpu_caching) {
+ case DRM_XE_GEM_CPU_CACHING_WC:
+ caching = ttm_write_combined;
+ break;
+ default:
+ caching = ttm_cached;
+ break;
+ }
+
+ WARN_ON((bo->flags & XE_BO_CREATE_USER_BIT) && !bo->cpu_caching);
+
+ /*
+ * Display scanout is always non-coherent with the CPU cache.
+ *
+ * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
+ * require a CPU:WC mapping.
+ */
+ if ((!bo->cpu_caching && bo->flags & XE_BO_SCANOUT_BIT) ||
+ (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
+ caching = ttm_write_combined;
+
+ err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
+ if (err) {
+ kfree(tt);
+ return NULL;
+ }
+
+ return &tt->ttm;
+}
+
+static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ int err;
+
+ /*
+ * dma-bufs are not populated with pages, and the dma-
+ * addresses are set up when moved to XE_PL_TT.
+ */
+ if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
+ return 0;
+
+ err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
+ if (err)
+ return err;
+
+ /* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
+ err = xe_tt_map_sg(tt);
+ if (err)
+ ttm_pool_free(&ttm_dev->pool, tt);
+
+ return err;
+}
+
+static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
+{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+
+ if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
+ return;
+
+ if (xe_tt->sg) {
+ dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(xe_tt->sg);
+ xe_tt->sg = NULL;
+ }
+
+ return ttm_pool_free(&ttm_dev->pool, tt);
+}
+
+static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem)
+{
+ struct xe_device *xe = ttm_to_xe_device(bdev);
+
+ switch (mem->mem_type) {
+ case XE_PL_SYSTEM:
+ case XE_PL_TT:
+ return 0;
+ case XE_PL_VRAM0:
+ case XE_PL_VRAM1: {
+ struct xe_ttm_vram_mgr_resource *vres =
+ to_xe_ttm_vram_mgr_resource(mem);
+ struct xe_mem_region *vram = res_to_mem_region(mem);
+
+ if (vres->used_visible_size < mem->size)
+ return -EINVAL;
+
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+
+ if (vram->mapping &&
+ mem->placement & TTM_PL_FLAG_CONTIGUOUS)
+ mem->bus.addr = (u8 __force *)vram->mapping +
+ mem->bus.offset;
+
+ mem->bus.offset += vram->io_start;
+ mem->bus.is_iomem = true;
+
+#if !defined(CONFIG_X86)
+ mem->bus.caching = ttm_write_combined;
+#endif
+ return 0;
+ } case XE_PL_STOLEN:
+ return xe_ttm_stolen_io_mem_reserve(xe, mem);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ struct drm_gem_object *obj = &bo->ttm.base;
+ struct drm_gpuvm_bo *vm_bo;
+ bool idle = false;
+ int ret = 0;
+
+ dma_resv_assert_held(bo->ttm.base.resv);
+
+ if (!list_empty(&bo->ttm.base.gpuva.list)) {
+ dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+ }
+
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
+ struct drm_gpuva *gpuva;
+
+ if (!xe_vm_in_fault_mode(vm)) {
+ drm_gpuvm_bo_evict(vm_bo, true);
+ continue;
+ }
+
+ if (!idle) {
+ long timeout;
+
+ if (ctx->no_wait_gpu &&
+ !dma_resv_test_signaled(bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
+ return -EBUSY;
+
+ timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
+ ctx->interruptible,
+ MAX_SCHEDULE_TIMEOUT);
+ if (!timeout)
+ return -ETIME;
+ if (timeout < 0)
+ return timeout;
+
+ idle = true;
+ }
+
+ drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ trace_xe_vma_evict(vma);
+ ret = xe_vm_invalidate_vma(vma);
+ if (XE_WARN_ON(ret))
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
+ * Note that unmapping the attachment is deferred to the next
+ * map_attachment time, or to bo destroy (after idling) whichever comes first.
+ * This is to avoid syncing before unmap_attachment(), assuming that the
+ * caller relies on idling the reservation object before moving the
+ * backing store out. Should that assumption not hold, then we will be able
+ * to unconditionally call unmap_attachment() when moving out to system.
+ */
+static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
+ struct ttm_resource *new_res)
+{
+ struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
+ struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
+ ttm);
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct sg_table *sg;
+
+ xe_assert(xe, attach);
+ xe_assert(xe, ttm_bo->ttm);
+
+ if (new_res->mem_type == XE_PL_SYSTEM)
+ goto out;
+
+ if (ttm_bo->sg) {
+ dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ }
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+
+ ttm_bo->sg = sg;
+ xe_tt->sg = sg;
+
+out:
+ ttm_bo_move_null(ttm_bo, new_res);
+
+ return 0;
+}
+
+/**
+ * xe_bo_move_notify - Notify subsystems of a pending move
+ * @bo: The buffer object
+ * @ctx: The struct ttm_operation_ctx controlling locking and waits.
+ *
+ * This function notifies subsystems of an upcoming buffer move.
+ * Upon receiving such a notification, subsystems should schedule
+ * halting access to the underlying pages and optionally add a fence
+ * to the buffer object's dma_resv object, that signals when access is
+ * stopped. The caller will wait on all dma_resv fences before
+ * starting the move.
+ *
+ * A subsystem may commence access to the object after obtaining
+ * bindings to the new backing memory under the object lock.
+ *
+ * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
+ * negative error code on error.
+ */
+static int xe_bo_move_notify(struct xe_bo *bo,
+ const struct ttm_operation_ctx *ctx)
+{
+ struct ttm_buffer_object *ttm_bo = &bo->ttm;
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ int ret;
+
+ /*
+ * If this starts to call into many components, consider
+ * using a notification chain here.
+ */
+
+ if (xe_bo_is_pinned(bo))
+ return -EINVAL;
+
+ xe_bo_vunmap(bo);
+ ret = xe_bo_trigger_rebind(xe, bo, ctx);
+ if (ret)
+ return ret;
+
+ /* Don't call move_notify() for imported dma-bufs. */
+ if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
+ dma_buf_move_notify(ttm_bo->base.dma_buf);
+
+ return 0;
+}
+
+static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop)
+{
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct ttm_resource *old_mem = ttm_bo->resource;
+ u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
+ struct ttm_tt *ttm = ttm_bo->ttm;
+ struct xe_migrate *migrate = NULL;
+ struct dma_fence *fence;
+ bool move_lacks_source;
+ bool tt_has_data;
+ bool needs_clear;
+ bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
+ ttm && ttm_tt_is_populated(ttm)) ? true : false;
+ int ret = 0;
+ /* Bo creation path, moving to system or TT. */
+ if ((!old_mem && ttm) && !handle_system_ccs) {
+ ttm_bo_move_null(ttm_bo, new_mem);
+ return 0;
+ }
+
+ if (ttm_bo->type == ttm_bo_type_sg) {
+ ret = xe_bo_move_notify(bo, ctx);
+ if (!ret)
+ ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
+ goto out;
+ }
+
+ tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
+ (ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+
+ move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared) :
+ (!mem_type_is_vram(old_mem_type) && !tt_has_data);
+
+ needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
+ (!ttm && ttm_bo->type == ttm_bo_type_device);
+
+ if ((move_lacks_source && !needs_clear)) {
+ ttm_bo_move_null(ttm_bo, new_mem);
+ goto out;
+ }
+
+ if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
+ ttm_bo_move_null(ttm_bo, new_mem);
+ goto out;
+ }
+
+ /*
+ * Failed multi-hop where the old_mem is still marked as
+ * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
+ */
+ if (old_mem_type == XE_PL_TT &&
+ new_mem->mem_type == XE_PL_TT) {
+ ttm_bo_move_null(ttm_bo, new_mem);
+ goto out;
+ }
+
+ if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
+ ret = xe_bo_move_notify(bo, ctx);
+ if (ret)
+ goto out;
+ }
+
+ if (old_mem_type == XE_PL_TT &&
+ new_mem->mem_type == XE_PL_SYSTEM) {
+ long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
+ true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0) {
+ ret = timeout;
+ goto out;
+ }
+
+ if (!handle_system_ccs) {
+ ttm_bo_move_null(ttm_bo, new_mem);
+ goto out;
+ }
+ }
+
+ if (!move_lacks_source &&
+ ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
+ (mem_type_is_vram(old_mem_type) &&
+ new_mem->mem_type == XE_PL_SYSTEM))) {
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ hop->mem_type = XE_PL_TT;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ ret = -EMULTIHOP;
+ goto out;
+ }
+
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else if (resource_is_vram(new_mem))
+ migrate = mem_type_to_migrate(xe, new_mem->mem_type);
+ else if (mem_type_is_vram(old_mem_type))
+ migrate = mem_type_to_migrate(xe, old_mem_type);
+ else
+ migrate = xe->tiles[0].migrate;
+
+ xe_assert(xe, migrate);
+ trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
+ xe_device_mem_access_get(xe);
+
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
+ /*
+ * Kernel memory that is pinned should only be moved on suspend
+ * / resume, some of the pinned memory is required for the
+ * device to resume / use the GPU to move other evicted memory
+ * (user memory) around. This likely could be optimized a bit
+ * futher where we find the minimum set of pinned memory
+ * required for resume but for simplity doing a memcpy for all
+ * pinned memory.
+ */
+ ret = xe_bo_vmap(bo);
+ if (!ret) {
+ ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
+
+ /* Create a new VMAP once kernel BO back in VRAM */
+ if (!ret && resource_is_vram(new_mem)) {
+ struct xe_mem_region *vram = res_to_mem_region(new_mem);
+ void __iomem *new_addr = vram->mapping +
+ (new_mem->start << PAGE_SHIFT);
+
+ if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
+ ret = -EINVAL;
+ xe_device_mem_access_put(xe);
+ goto out;
+ }
+
+ xe_assert(xe, new_mem->start ==
+ bo->placements->fpfn);
+
+ iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
+ }
+ }
+ } else {
+ if (move_lacks_source)
+ fence = xe_migrate_clear(migrate, bo, new_mem);
+ else
+ fence = xe_migrate_copy(migrate, bo, bo, old_mem,
+ new_mem, handle_system_ccs);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ xe_device_mem_access_put(xe);
+ goto out;
+ }
+ if (!move_lacks_source) {
+ ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
+ true, new_mem);
+ if (ret) {
+ dma_fence_wait(fence, false);
+ ttm_bo_move_null(ttm_bo, new_mem);
+ ret = 0;
+ }
+ } else {
+ /*
+ * ttm_bo_move_accel_cleanup() may blow up if
+ * bo->resource == NULL, so just attach the
+ * fence and set the new resource.
+ */
+ dma_resv_add_fence(ttm_bo->base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ ttm_bo_move_null(ttm_bo, new_mem);
+ }
+
+ dma_fence_put(fence);
+ }
+
+ xe_device_mem_access_put(xe);
+
+out:
+ return ret;
+
+}
+
+/**
+ * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
+ * @bo: The buffer object to move.
+ *
+ * On successful completion, the object memory will be moved to sytem memory.
+ * This function blocks until the object has been fully moved.
+ *
+ * This is needed to for special handling of pinned VRAM object during
+ * suspend-resume.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_evict_pinned(struct xe_bo *bo)
+{
+ struct ttm_place place = {
+ .mem_type = XE_PL_TT,
+ };
+ struct ttm_placement placement = {
+ .placement = &place,
+ .num_placement = 1,
+ };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ };
+ struct ttm_resource *new_mem;
+ int ret;
+
+ xe_bo_assert_held(bo);
+
+ if (WARN_ON(!bo->ttm.resource))
+ return -EINVAL;
+
+ if (WARN_ON(!xe_bo_is_pinned(bo)))
+ return -EINVAL;
+
+ if (WARN_ON(!xe_bo_is_vram(bo)))
+ return -EINVAL;
+
+ ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
+ if (ret)
+ return ret;
+
+ if (!bo->ttm.ttm) {
+ bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
+ if (!bo->ttm.ttm) {
+ ret = -ENOMEM;
+ goto err_res_free;
+ }
+ }
+
+ ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
+ if (ret)
+ goto err_res_free;
+
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto err_res_free;
+
+ ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
+ if (ret)
+ goto err_res_free;
+
+ dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ return 0;
+
+err_res_free:
+ ttm_resource_free(&bo->ttm, &new_mem);
+ return ret;
+}
+
+/**
+ * xe_bo_restore_pinned() - Restore a pinned VRAM object
+ * @bo: The buffer object to move.
+ *
+ * On successful completion, the object memory will be moved back to VRAM.
+ * This function blocks until the object has been fully moved.
+ *
+ * This is needed to for special handling of pinned VRAM object during
+ * suspend-resume.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_restore_pinned(struct xe_bo *bo)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ };
+ struct ttm_resource *new_mem;
+ int ret;
+
+ xe_bo_assert_held(bo);
+
+ if (WARN_ON(!bo->ttm.resource))
+ return -EINVAL;
+
+ if (WARN_ON(!xe_bo_is_pinned(bo)))
+ return -EINVAL;
+
+ if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
+ return -EINVAL;
+
+ ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
+ if (ret)
+ return ret;
+
+ ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
+ if (ret)
+ goto err_res_free;
+
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto err_res_free;
+
+ ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
+ if (ret)
+ goto err_res_free;
+
+ dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ return 0;
+
+err_res_free:
+ ttm_resource_free(&bo->ttm, &new_mem);
+ return ret;
+}
+
+static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
+ unsigned long page_offset)
+{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_res_cursor cursor;
+ struct xe_mem_region *vram;
+
+ if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
+ return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
+
+ vram = res_to_mem_region(ttm_bo->resource);
+ xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+ return (vram->io_start + cursor.start) >> PAGE_SHIFT;
+}
+
+static void __xe_bo_vunmap(struct xe_bo *bo);
+
+/*
+ * TODO: Move this function to TTM so we don't rely on how TTM does its
+ * locking, thereby abusing TTM internals.
+ */
+static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ bool locked;
+
+ xe_assert(xe, !kref_read(&ttm_bo->kref));
+
+ /*
+ * We can typically only race with TTM trylocking under the
+ * lru_lock, which will immediately be unlocked again since
+ * the ttm_bo refcount is zero at this point. So trylocking *should*
+ * always succeed here, as long as we hold the lru lock.
+ */
+ spin_lock(&ttm_bo->bdev->lru_lock);
+ locked = dma_resv_trylock(ttm_bo->base.resv);
+ spin_unlock(&ttm_bo->bdev->lru_lock);
+ xe_assert(xe, locked);
+
+ return locked;
+}
+
+static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ struct dma_fence *replacement = NULL;
+ struct xe_bo *bo;
+
+ if (!xe_bo_is_xe_bo(ttm_bo))
+ return;
+
+ bo = ttm_to_xe_bo(ttm_bo);
+ xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
+
+ /*
+ * Corner case where TTM fails to allocate memory and this BOs resv
+ * still points the VMs resv
+ */
+ if (ttm_bo->base.resv != &ttm_bo->base._resv)
+ return;
+
+ if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
+ return;
+
+ /*
+ * Scrub the preempt fences if any. The unbind fence is already
+ * attached to the resv.
+ * TODO: Don't do this for external bos once we scrub them after
+ * unbind.
+ */
+ dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (xe_fence_is_xe_preempt(fence) &&
+ !dma_fence_is_signaled(fence)) {
+ if (!replacement)
+ replacement = dma_fence_get_stub();
+
+ dma_resv_replace_fences(ttm_bo->base.resv,
+ fence->context,
+ replacement,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ }
+ dma_fence_put(replacement);
+
+ dma_resv_unlock(ttm_bo->base.resv);
+}
+
+static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
+{
+ if (!xe_bo_is_xe_bo(ttm_bo))
+ return;
+
+ /*
+ * Object is idle and about to be destroyed. Release the
+ * dma-buf attachment.
+ */
+ if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
+ struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
+ struct xe_ttm_tt, ttm);
+
+ dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
+ DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ xe_tt->sg = NULL;
+ }
+}
+
+struct ttm_device_funcs xe_ttm_funcs = {
+ .ttm_tt_create = xe_ttm_tt_create,
+ .ttm_tt_populate = xe_ttm_tt_populate,
+ .ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
+ .ttm_tt_destroy = xe_ttm_tt_destroy,
+ .evict_flags = xe_evict_flags,
+ .move = xe_bo_move,
+ .io_mem_reserve = xe_ttm_io_mem_reserve,
+ .io_mem_pfn = xe_ttm_io_mem_pfn,
+ .release_notify = xe_ttm_bo_release_notify,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .delete_mem_notify = xe_ttm_bo_delete_mem_notify,
+};
+
+static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
+{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+
+ if (bo->ttm.base.import_attach)
+ drm_prime_gem_destroy(&bo->ttm.base, NULL);
+ drm_gem_object_release(&bo->ttm.base);
+
+ xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
+
+ if (bo->ggtt_node.size)
+ xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
+
+#ifdef CONFIG_PROC_FS
+ if (bo->client)
+ xe_drm_client_remove_bo(bo);
+#endif
+
+ if (bo->vm && xe_bo_is_user(bo))
+ xe_vm_put(bo->vm);
+
+ kfree(bo);
+}
+
+static void xe_gem_object_free(struct drm_gem_object *obj)
+{
+ /* Our BO reference counting scheme works as follows:
+ *
+ * The gem object kref is typically used throughout the driver,
+ * and the gem object holds a ttm_buffer_object refcount, so
+ * that when the last gem object reference is put, which is when
+ * we end up in this function, we put also that ttm_buffer_object
+ * refcount. Anything using gem interfaces is then no longer
+ * allowed to access the object in a way that requires a gem
+ * refcount, including locking the object.
+ *
+ * driver ttm callbacks is allowed to use the ttm_buffer_object
+ * refcount directly if needed.
+ */
+ __xe_bo_vunmap(gem_to_xe_bo(obj));
+ ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
+}
+
+static void xe_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
+ if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
+ xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
+
+ xe_bo_lock(bo, false);
+ ttm_bo_set_bulk_move(&bo->ttm, NULL);
+ xe_bo_unlock(bo);
+ }
+}
+
+static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
+ struct drm_device *ddev = tbo->base.dev;
+ vm_fault_t ret;
+ int idx;
+
+ ret = ttm_bo_vm_reserve(tbo, vmf);
+ if (ret)
+ return ret;
+
+ if (drm_dev_enter(ddev, &idx)) {
+ struct xe_bo *bo = ttm_to_xe_bo(tbo);
+
+ trace_xe_bo_cpu_fault(bo);
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
+ drm_dev_exit(idx);
+ } else {
+ ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+ }
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ dma_resv_unlock(tbo->base.resv);
+ return ret;
+}
+
+static const struct vm_operations_struct xe_gem_vm_ops = {
+ .fault = xe_gem_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
+static const struct drm_gem_object_funcs xe_gem_object_funcs = {
+ .free = xe_gem_object_free,
+ .close = xe_gem_object_close,
+ .mmap = drm_gem_ttm_mmap,
+ .export = xe_gem_prime_export,
+ .vm_ops = &xe_gem_vm_ops,
+};
+
+/**
+ * xe_bo_alloc - Allocate storage for a struct xe_bo
+ *
+ * This funcition is intended to allocate storage to be used for input
+ * to __xe_bo_create_locked(), in the case a pointer to the bo to be
+ * created is needed before the call to __xe_bo_create_locked().
+ * If __xe_bo_create_locked ends up never to be called, then the
+ * storage allocated with this function needs to be freed using
+ * xe_bo_free().
+ *
+ * Return: A pointer to an uninitialized struct xe_bo on success,
+ * ERR_PTR(-ENOMEM) on error.
+ */
+struct xe_bo *xe_bo_alloc(void)
+{
+ struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ return bo;
+}
+
+/**
+ * xe_bo_free - Free storage allocated using xe_bo_alloc()
+ * @bo: The buffer object storage.
+ *
+ * Refer to xe_bo_alloc() documentation for valid use-cases.
+ */
+void xe_bo_free(struct xe_bo *bo)
+{
+ kfree(bo);
+}
+
+struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
+ struct xe_tile *tile, struct dma_resv *resv,
+ struct ttm_lru_bulk_move *bulk, size_t size,
+ u16 cpu_caching, enum ttm_bo_type type,
+ u32 flags)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ };
+ struct ttm_placement *placement;
+ uint32_t alignment;
+ size_t aligned_size;
+ int err;
+
+ /* Only kernel objects should set GT */
+ xe_assert(xe, !tile || type == ttm_bo_type_kernel);
+
+ if (XE_WARN_ON(!size)) {
+ xe_bo_free(bo);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
+ !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
+ xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
+ aligned_size = ALIGN(size, SZ_64K);
+ if (type != ttm_bo_type_device)
+ size = ALIGN(size, SZ_64K);
+ flags |= XE_BO_INTERNAL_64K;
+ alignment = SZ_64K >> PAGE_SHIFT;
+
+ } else {
+ aligned_size = ALIGN(size, SZ_4K);
+ flags &= ~XE_BO_INTERNAL_64K;
+ alignment = SZ_4K >> PAGE_SHIFT;
+ }
+
+ if (type == ttm_bo_type_device && aligned_size != size)
+ return ERR_PTR(-EINVAL);
+
+ if (!bo) {
+ bo = xe_bo_alloc();
+ if (IS_ERR(bo))
+ return bo;
+ }
+
+ bo->ccs_cleared = false;
+ bo->tile = tile;
+ bo->size = size;
+ bo->flags = flags;
+ bo->cpu_caching = cpu_caching;
+ bo->ttm.base.funcs = &xe_gem_object_funcs;
+ bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
+ INIT_LIST_HEAD(&bo->pinned_link);
+#ifdef CONFIG_PROC_FS
+ INIT_LIST_HEAD(&bo->client_link);
+#endif
+
+ drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
+
+ if (resv) {
+ ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
+ ctx.resv = resv;
+ }
+
+ if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
+ err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
+ if (WARN_ON(err)) {
+ xe_ttm_bo_destroy(&bo->ttm);
+ return ERR_PTR(err);
+ }
+ }
+
+ /* Defer populating type_sg bos */
+ placement = (type == ttm_bo_type_sg ||
+ bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
+ &bo->placement;
+ err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
+ placement, alignment,
+ &ctx, NULL, resv, xe_ttm_bo_destroy);
+ if (err)
+ return ERR_PTR(err);
+
+ /*
+ * The VRAM pages underneath are potentially still being accessed by the
+ * GPU, as per async GPU clearing and async evictions. However TTM makes
+ * sure to add any corresponding move/clear fences into the objects
+ * dma-resv using the DMA_RESV_USAGE_KERNEL slot.
+ *
+ * For KMD internal buffers we don't care about GPU clearing, however we
+ * still need to handle async evictions, where the VRAM is still being
+ * accessed by the GPU. Most internal callers are not expecting this,
+ * since they are missing the required synchronisation before accessing
+ * the memory. To keep things simple just sync wait any kernel fences
+ * here, if the buffer is designated KMD internal.
+ *
+ * For normal userspace objects we should already have the required
+ * pipelining or sync waiting elsewhere, since we already have to deal
+ * with things like async GPU clearing.
+ */
+ if (type == ttm_bo_type_kernel) {
+ long timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ ctx.interruptible,
+ MAX_SCHEDULE_TIMEOUT);
+
+ if (timeout < 0) {
+ if (!resv)
+ dma_resv_unlock(bo->ttm.base.resv);
+ xe_bo_put(bo);
+ return ERR_PTR(timeout);
+ }
+ }
+
+ bo->created = true;
+ if (bulk)
+ ttm_bo_set_bulk_move(&bo->ttm, bulk);
+ else
+ ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
+
+ return bo;
+}
+
+static int __xe_bo_fixed_placement(struct xe_device *xe,
+ struct xe_bo *bo,
+ u32 flags,
+ u64 start, u64 end, u64 size)
+{
+ struct ttm_place *place = bo->placements;
+
+ if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
+ return -EINVAL;
+
+ place->flags = TTM_PL_FLAG_CONTIGUOUS;
+ place->fpfn = start >> PAGE_SHIFT;
+ place->lpfn = end >> PAGE_SHIFT;
+
+ switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
+ case XE_BO_CREATE_VRAM0_BIT:
+ place->mem_type = XE_PL_VRAM0;
+ break;
+ case XE_BO_CREATE_VRAM1_BIT:
+ place->mem_type = XE_PL_VRAM1;
+ break;
+ case XE_BO_CREATE_STOLEN_BIT:
+ place->mem_type = XE_PL_STOLEN;
+ break;
+
+ default:
+ /* 0 or multiple of the above set */
+ return -EINVAL;
+ }
+
+ bo->placement = (struct ttm_placement) {
+ .num_placement = 1,
+ .placement = place,
+ .num_busy_placement = 1,
+ .busy_placement = place,
+ };
+
+ return 0;
+}
+
+static struct xe_bo *
+__xe_bo_create_locked(struct xe_device *xe,
+ struct xe_tile *tile, struct xe_vm *vm,
+ size_t size, u64 start, u64 end,
+ u16 cpu_caching, enum ttm_bo_type type, u32 flags)
+{
+ struct xe_bo *bo = NULL;
+ int err;
+
+ if (vm)
+ xe_vm_assert_held(vm);
+
+ if (start || end != ~0ULL) {
+ bo = xe_bo_alloc();
+ if (IS_ERR(bo))
+ return bo;
+
+ flags |= XE_BO_FIXED_PLACEMENT_BIT;
+ err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
+ if (err) {
+ xe_bo_free(bo);
+ return ERR_PTR(err);
+ }
+ }
+
+ bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
+ vm && !xe_vm_in_fault_mode(vm) &&
+ flags & XE_BO_CREATE_USER_BIT ?
+ &vm->lru_bulk_move : NULL, size,
+ cpu_caching, type, flags);
+ if (IS_ERR(bo))
+ return bo;
+
+ /*
+ * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
+ * to ensure the shared resv doesn't disappear under the bo, the bo
+ * will keep a reference to the vm, and avoid circular references
+ * by having all the vm's bo refereferences released at vm close
+ * time.
+ */
+ if (vm && xe_bo_is_user(bo))
+ xe_vm_get(vm);
+ bo->vm = vm;
+
+ if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
+ if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
+ tile = xe_device_get_root_tile(xe);
+
+ xe_assert(xe, tile);
+
+ if (flags & XE_BO_FIXED_PLACEMENT_BIT) {
+ err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
+ start + bo->size, U64_MAX);
+ } else {
+ err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
+ }
+ if (err)
+ goto err_unlock_put_bo;
+ }
+
+ return bo;
+
+err_unlock_put_bo:
+ __xe_bo_unset_bulk_move(bo);
+ xe_bo_unlock_vm_held(bo);
+ xe_bo_put(bo);
+ return ERR_PTR(err);
+}
+
+struct xe_bo *
+xe_bo_create_locked_range(struct xe_device *xe,
+ struct xe_tile *tile, struct xe_vm *vm,
+ size_t size, u64 start, u64 end,
+ enum ttm_bo_type type, u32 flags)
+{
+ return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
+}
+
+struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size,
+ enum ttm_bo_type type, u32 flags)
+{
+ return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
+}
+
+struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size,
+ u16 cpu_caching,
+ enum ttm_bo_type type,
+ u32 flags)
+{
+ struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
+ cpu_caching, type,
+ flags | XE_BO_CREATE_USER_BIT);
+ if (!IS_ERR(bo))
+ xe_bo_unlock_vm_held(bo);
+
+ return bo;
+}
+
+struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size,
+ enum ttm_bo_type type, u32 flags)
+{
+ struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
+
+ if (!IS_ERR(bo))
+ xe_bo_unlock_vm_held(bo);
+
+ return bo;
+}
+
+struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags)
+{
+ struct xe_bo *bo;
+ int err;
+ u64 start = offset == ~0ull ? 0 : offset;
+ u64 end = offset == ~0ull ? offset : start + size;
+
+ if (flags & XE_BO_CREATE_STOLEN_BIT &&
+ xe_ttm_stolen_cpu_access_needs_ggtt(xe))
+ flags |= XE_BO_CREATE_GGTT_BIT;
+
+ bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
+ flags | XE_BO_NEEDS_CPU_ACCESS);
+ if (IS_ERR(bo))
+ return bo;
+
+ err = xe_bo_pin(bo);
+ if (err)
+ goto err_put;
+
+ err = xe_bo_vmap(bo);
+ if (err)
+ goto err_unpin;
+
+ xe_bo_unlock_vm_held(bo);
+
+ return bo;
+
+err_unpin:
+ xe_bo_unpin(bo);
+err_put:
+ xe_bo_unlock_vm_held(bo);
+ xe_bo_put(bo);
+ return ERR_PTR(err);
+}
+
+struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size,
+ enum ttm_bo_type type, u32 flags)
+{
+ return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
+}
+
+struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
+ const void *data, size_t size,
+ enum ttm_bo_type type, u32 flags)
+{
+ struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ type, flags);
+ if (IS_ERR(bo))
+ return bo;
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
+
+ return bo;
+}
+
+static void __xe_bo_unpin_map_no_vm(struct drm_device *drm, void *arg)
+{
+ xe_bo_unpin_map_no_vm(arg);
+}
+
+struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u32 flags)
+{
+ struct xe_bo *bo;
+ int ret;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
+ if (IS_ERR(bo))
+ return bo;
+
+ ret = drmm_add_action_or_reset(&xe->drm, __xe_bo_unpin_map_no_vm, bo);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return bo;
+}
+
+struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
+ const void *data, size_t size, u32 flags)
+{
+ struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
+
+ if (IS_ERR(bo))
+ return bo;
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
+
+ return bo;
+}
+
+/*
+ * XXX: This is in the VM bind data path, likely should calculate this once and
+ * store, with a recalculation if the BO is moved.
+ */
+uint64_t vram_region_gpu_offset(struct ttm_resource *res)
+{
+ struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
+
+ if (res->mem_type == XE_PL_STOLEN)
+ return xe_ttm_stolen_gpu_offset(xe);
+
+ return res_to_mem_region(res)->dpa_base;
+}
+
+/**
+ * xe_bo_pin_external - pin an external BO
+ * @bo: buffer object to be pinned
+ *
+ * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
+ * BO. Unique call compared to xe_bo_pin as this function has it own set of
+ * asserts and code to ensure evict / restore on suspend / resume.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_bo_pin_external(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ int err;
+
+ xe_assert(xe, !bo->vm);
+ xe_assert(xe, xe_bo_is_user(bo));
+
+ if (!xe_bo_is_pinned(bo)) {
+ err = xe_bo_validate(bo, NULL, false);
+ if (err)
+ return err;
+
+ if (xe_bo_is_vram(bo)) {
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link,
+ &xe->pinned.external_vram);
+ spin_unlock(&xe->pinned.lock);
+ }
+ }
+
+ ttm_bo_pin(&bo->ttm);
+
+ /*
+ * FIXME: If we always use the reserve / unreserve functions for locking
+ * we do not need this.
+ */
+ ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
+
+ return 0;
+}
+
+int xe_bo_pin(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ int err;
+
+ /* We currently don't expect user BO to be pinned */
+ xe_assert(xe, !xe_bo_is_user(bo));
+
+ /* Pinned object must be in GGTT or have pinned flag */
+ xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
+ XE_BO_CREATE_GGTT_BIT));
+
+ /*
+ * No reason we can't support pinning imported dma-bufs we just don't
+ * expect to pin an imported dma-buf.
+ */
+ xe_assert(xe, !bo->ttm.base.import_attach);
+
+ /* We only expect at most 1 pin */
+ xe_assert(xe, !xe_bo_is_pinned(bo));
+
+ err = xe_bo_validate(bo, NULL, false);
+ if (err)
+ return err;
+
+ /*
+ * For pinned objects in on DGFX, which are also in vram, we expect
+ * these to be in contiguous VRAM memory. Required eviction / restore
+ * during suspend / resume (force restore to same physical address).
+ */
+ if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
+ bo->flags & XE_BO_INTERNAL_TEST)) {
+ struct ttm_place *place = &(bo->placements[0]);
+
+ if (mem_type_is_vram(place->mem_type)) {
+ xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
+
+ place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
+ vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
+ place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
+
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ spin_unlock(&xe->pinned.lock);
+ }
+ }
+
+ ttm_bo_pin(&bo->ttm);
+
+ /*
+ * FIXME: If we always use the reserve / unreserve functions for locking
+ * we do not need this.
+ */
+ ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
+
+ return 0;
+}
+
+/**
+ * xe_bo_unpin_external - unpin an external BO
+ * @bo: buffer object to be unpinned
+ *
+ * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
+ * BO. Unique call compared to xe_bo_unpin as this function has it own set of
+ * asserts and code to ensure evict / restore on suspend / resume.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+void xe_bo_unpin_external(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+
+ xe_assert(xe, !bo->vm);
+ xe_assert(xe, xe_bo_is_pinned(bo));
+ xe_assert(xe, xe_bo_is_user(bo));
+
+ if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
+ spin_lock(&xe->pinned.lock);
+ list_del_init(&bo->pinned_link);
+ spin_unlock(&xe->pinned.lock);
+ }
+
+ ttm_bo_unpin(&bo->ttm);
+
+ /*
+ * FIXME: If we always use the reserve / unreserve functions for locking
+ * we do not need this.
+ */
+ ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
+}
+
+void xe_bo_unpin(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+
+ xe_assert(xe, !bo->ttm.base.import_attach);
+ xe_assert(xe, xe_bo_is_pinned(bo));
+
+ if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
+ bo->flags & XE_BO_INTERNAL_TEST)) {
+ struct ttm_place *place = &(bo->placements[0]);
+
+ if (mem_type_is_vram(place->mem_type)) {
+ xe_assert(xe, !list_empty(&bo->pinned_link));
+
+ spin_lock(&xe->pinned.lock);
+ list_del_init(&bo->pinned_link);
+ spin_unlock(&xe->pinned.lock);
+ }
+ }
+
+ ttm_bo_unpin(&bo->ttm);
+}
+
+/**
+ * xe_bo_validate() - Make sure the bo is in an allowed placement
+ * @bo: The bo,
+ * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
+ * NULL. Used together with @allow_res_evict.
+ * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
+ * reservation object.
+ *
+ * Make sure the bo is in allowed placement, migrating it if necessary. If
+ * needed, other bos will be evicted. If bos selected for eviction shares
+ * the @vm's reservation object, they can be evicted iff @allow_res_evict is
+ * set to true, otherwise they will be bypassed.
+ *
+ * Return: 0 on success, negative error code on failure. May return
+ * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
+ */
+int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ };
+
+ if (vm) {
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+
+ ctx.allow_res_evict = allow_res_evict;
+ ctx.resv = xe_vm_resv(vm);
+ }
+
+ return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
+}
+
+bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &xe_ttm_bo_destroy)
+ return true;
+
+ return false;
+}
+
+/*
+ * Resolve a BO address. There is no assert to check if the proper lock is held
+ * so it should only be used in cases where it is not fatal to get the wrong
+ * address, such as printing debug information, but not in cases where memory is
+ * written based on this result.
+ */
+dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ struct xe_res_cursor cur;
+ u64 page;
+
+ xe_assert(xe, page_size <= PAGE_SIZE);
+ page = offset >> PAGE_SHIFT;
+ offset &= (PAGE_SIZE - 1);
+
+ if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
+ xe_assert(xe, bo->ttm.ttm);
+
+ xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
+ page_size, &cur);
+ return xe_res_dma(&cur) + offset;
+ } else {
+ struct xe_res_cursor cur;
+
+ xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
+ page_size, &cur);
+ return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
+ }
+}
+
+dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
+{
+ if (!READ_ONCE(bo->ttm.pin_count))
+ xe_bo_assert_held(bo);
+ return __xe_bo_addr(bo, offset, page_size);
+}
+
+int xe_bo_vmap(struct xe_bo *bo)
+{
+ void *virtual;
+ bool is_iomem;
+ int ret;
+
+ xe_bo_assert_held(bo);
+
+ if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
+ return -EINVAL;
+
+ if (!iosys_map_is_null(&bo->vmap))
+ return 0;
+
+ /*
+ * We use this more or less deprecated interface for now since
+ * ttm_bo_vmap() doesn't offer the optimization of kmapping
+ * single page bos, which is done here.
+ * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
+ * to use struct iosys_map.
+ */
+ ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
+ if (ret)
+ return ret;
+
+ virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+ if (is_iomem)
+ iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
+ else
+ iosys_map_set_vaddr(&bo->vmap, virtual);
+
+ return 0;
+}
+
+static void __xe_bo_vunmap(struct xe_bo *bo)
+{
+ if (!iosys_map_is_null(&bo->vmap)) {
+ iosys_map_clear(&bo->vmap);
+ ttm_bo_kunmap(&bo->kmap);
+ }
+}
+
+void xe_bo_vunmap(struct xe_bo *bo)
+{
+ xe_bo_assert_held(bo);
+ __xe_bo_vunmap(bo);
+}
+
+int xe_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_gem_create *args = data;
+ struct xe_vm *vm = NULL;
+ struct xe_bo *bo;
+ unsigned int bo_flags;
+ u32 handle;
+ int err;
+
+ if (XE_IOCTL_DBG(xe, args->extensions) ||
+ XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ /* at least one valid memory placement must be specified */
+ if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) ||
+ !args->placement))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->flags &
+ ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
+ DRM_XE_GEM_CREATE_FLAG_SCANOUT |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->handle))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, !args->size))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
+ return -EINVAL;
+
+ bo_flags = 0;
+ if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
+ bo_flags |= XE_BO_DEFER_BACKING;
+
+ if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
+ bo_flags |= XE_BO_SCANOUT_BIT;
+
+ bo_flags |= args->placement << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
+
+ if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
+ if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
+ return -EINVAL;
+
+ bo_flags |= XE_BO_NEEDS_CPU_ACCESS;
+ }
+
+ if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
+ args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_CREATE_VRAM_MASK &&
+ args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_SCANOUT_BIT &&
+ args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
+ return -EINVAL;
+
+ if (args->vm_id) {
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -ENOENT;
+ err = xe_vm_lock(vm, true);
+ if (err)
+ goto out_vm;
+ }
+
+ bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
+ ttm_bo_type_device, bo_flags);
+
+ if (vm)
+ xe_vm_unlock(vm);
+
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto out_vm;
+ }
+
+ err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
+ if (err)
+ goto out_bulk;
+
+ args->handle = handle;
+ goto out_put;
+
+out_bulk:
+ if (vm && !xe_vm_in_fault_mode(vm)) {
+ xe_vm_lock(vm, false);
+ __xe_bo_unset_bulk_move(bo);
+ xe_vm_unlock(vm);
+ }
+out_put:
+ xe_bo_put(bo);
+out_vm:
+ if (vm)
+ xe_vm_put(vm);
+
+ return err;
+}
+
+int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct drm_xe_gem_mmap_offset *args = data;
+ struct drm_gem_object *gem_obj;
+
+ if (XE_IOCTL_DBG(xe, args->extensions) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->flags))
+ return -EINVAL;
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (XE_IOCTL_DBG(xe, !gem_obj))
+ return -ENOENT;
+
+ /* The mmap offset was set up at BO allocation time. */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+
+ xe_bo_put(gem_to_xe_bo(gem_obj));
+ return 0;
+}
+
+/**
+ * xe_bo_lock() - Lock the buffer object's dma_resv object
+ * @bo: The struct xe_bo whose lock is to be taken
+ * @intr: Whether to perform any wait interruptible
+ *
+ * Locks the buffer object's dma_resv object. If the buffer object is
+ * pointing to a shared dma_resv object, that shared lock is locked.
+ *
+ * Return: 0 on success, -EINTR if @intr is true and the wait for a
+ * contended lock was interrupted. If @intr is set to false, the
+ * function always returns 0.
+ */
+int xe_bo_lock(struct xe_bo *bo, bool intr)
+{
+ if (intr)
+ return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
+
+ dma_resv_lock(bo->ttm.base.resv, NULL);
+
+ return 0;
+}
+
+/**
+ * xe_bo_unlock() - Unlock the buffer object's dma_resv object
+ * @bo: The struct xe_bo whose lock is to be released.
+ *
+ * Unlock a buffer object lock that was locked by xe_bo_lock().
+ */
+void xe_bo_unlock(struct xe_bo *bo)
+{
+ dma_resv_unlock(bo->ttm.base.resv);
+}
+
+/**
+ * xe_bo_can_migrate - Whether a buffer object likely can be migrated
+ * @bo: The buffer object to migrate
+ * @mem_type: The TTM memory type intended to migrate to
+ *
+ * Check whether the buffer object supports migration to the
+ * given memory type. Note that pinning may affect the ability to migrate as
+ * returned by this function.
+ *
+ * This function is primarily intended as a helper for checking the
+ * possibility to migrate buffer objects and can be called without
+ * the object lock held.
+ *
+ * Return: true if migration is possible, false otherwise.
+ */
+bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
+{
+ unsigned int cur_place;
+
+ if (bo->ttm.type == ttm_bo_type_kernel)
+ return true;
+
+ if (bo->ttm.type == ttm_bo_type_sg)
+ return false;
+
+ for (cur_place = 0; cur_place < bo->placement.num_placement;
+ cur_place++) {
+ if (bo->placements[cur_place].mem_type == mem_type)
+ return true;
+ }
+
+ return false;
+}
+
+static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
+{
+ memset(place, 0, sizeof(*place));
+ place->mem_type = mem_type;
+}
+
+/**
+ * xe_bo_migrate - Migrate an object to the desired region id
+ * @bo: The buffer object to migrate.
+ * @mem_type: The TTM region type to migrate to.
+ *
+ * Attempt to migrate the buffer object to the desired memory region. The
+ * buffer object may not be pinned, and must be locked.
+ * On successful completion, the object memory type will be updated,
+ * but an async migration task may not have completed yet, and to
+ * accomplish that, the object's kernel fences must be signaled with
+ * the object lock held.
+ *
+ * Return: 0 on success. Negative error code on failure. In particular may
+ * return -EINTR or -ERESTARTSYS if signal pending.
+ */
+int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ };
+ struct ttm_placement placement;
+ struct ttm_place requested;
+
+ xe_bo_assert_held(bo);
+
+ if (bo->ttm.resource->mem_type == mem_type)
+ return 0;
+
+ if (xe_bo_is_pinned(bo))
+ return -EBUSY;
+
+ if (!xe_bo_can_migrate(bo, mem_type))
+ return -EINVAL;
+
+ xe_place_from_ttm_type(mem_type, &requested);
+ placement.num_placement = 1;
+ placement.num_busy_placement = 1;
+ placement.placement = &requested;
+ placement.busy_placement = &requested;
+
+ /*
+ * Stolen needs to be handled like below VRAM handling if we ever need
+ * to support it.
+ */
+ drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
+
+ if (mem_type_is_vram(mem_type)) {
+ u32 c = 0;
+
+ add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
+ }
+
+ return ttm_bo_validate(&bo->ttm, &placement, &ctx);
+}
+
+/**
+ * xe_bo_evict - Evict an object to evict placement
+ * @bo: The buffer object to migrate.
+ * @force_alloc: Set force_alloc in ttm_operation_ctx
+ *
+ * On successful completion, the object memory will be moved to evict
+ * placement. Ths function blocks until the object has been fully moved.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ .force_alloc = force_alloc,
+ };
+ struct ttm_placement placement;
+ int ret;
+
+ xe_evict_flags(&bo->ttm, &placement);
+ ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
+ if (ret)
+ return ret;
+
+ dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ return 0;
+}
+
+/**
+ * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
+ * placed in system memory.
+ * @bo: The xe_bo
+ *
+ * Return: true if extra pages need to be allocated, false otherwise.
+ */
+bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+
+ if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
+ return false;
+
+ /* On discrete GPUs, if the GPU can access this buffer from
+ * system memory (i.e., it allows XE_PL_TT placement), FlatCCS
+ * can't be used since there's no CCS storage associated with
+ * non-VRAM addresses.
+ */
+ if (IS_DGFX(xe) && (bo->flags & XE_BO_CREATE_SYSTEM_BIT))
+ return false;
+
+ return true;
+}
+
+/**
+ * __xe_bo_release_dummy() - Dummy kref release function
+ * @kref: The embedded struct kref.
+ *
+ * Dummy release function for xe_bo_put_deferred(). Keep off.
+ */
+void __xe_bo_release_dummy(struct kref *kref)
+{
+}
+
+/**
+ * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
+ * @deferred: The lockless list used for the call to xe_bo_put_deferred().
+ *
+ * Puts all bos whose put was deferred by xe_bo_put_deferred().
+ * The @deferred list can be either an onstack local list or a global
+ * shared list used by a workqueue.
+ */
+void xe_bo_put_commit(struct llist_head *deferred)
+{
+ struct llist_node *freed;
+ struct xe_bo *bo, *next;
+
+ if (!deferred)
+ return;
+
+ freed = llist_del_all(deferred);
+ if (!freed)
+ return;
+
+ llist_for_each_entry_safe(bo, next, freed, freed)
+ drm_gem_object_free(&bo->ttm.base.refcount);
+}
+
+/**
+ * xe_bo_dumb_create - Create a dumb bo as backing for a fb
+ * @file_priv: ...
+ * @dev: ...
+ * @args: ...
+ *
+ * See dumb_create() hook in include/drm/drm_drv.h
+ *
+ * Return: ...
+ */
+int xe_bo_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_bo *bo;
+ uint32_t handle;
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+ int err;
+ u32 page_size = max_t(u32, PAGE_SIZE,
+ xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
+
+ args->pitch = ALIGN(args->width * cpp, 64);
+ args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
+ page_size);
+
+ bo = xe_bo_create_user(xe, NULL, NULL, args->size,
+ DRM_XE_GEM_CPU_CACHING_WC,
+ ttm_bo_type_device,
+ XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT |
+ XE_BO_NEEDS_CPU_ACCESS);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&bo->ttm.base);
+ if (!err)
+ args->handle = handle;
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_bo.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
new file mode 100644
index 000000000..8be42ac6c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_BO_H_
+#define _XE_BO_H_
+
+#include <drm/ttm/ttm_tt.h>
+
+#include "xe_bo_types.h"
+#include "xe_macros.h"
+#include "xe_vm_types.h"
+#include "xe_vm.h"
+
+/**
+ * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
+ * @vm: The vm
+ */
+#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
+
+
+
+#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
+
+#define XE_BO_CREATE_USER_BIT BIT(0)
+/* The bits below need to be contiguous, or things break */
+#define XE_BO_CREATE_SYSTEM_BIT BIT(1)
+#define XE_BO_CREATE_VRAM0_BIT BIT(2)
+#define XE_BO_CREATE_VRAM1_BIT BIT(3)
+#define XE_BO_CREATE_VRAM_MASK (XE_BO_CREATE_VRAM0_BIT | \
+ XE_BO_CREATE_VRAM1_BIT)
+/* -- */
+#define XE_BO_CREATE_STOLEN_BIT BIT(4)
+#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
+ (IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
+ XE_BO_CREATE_SYSTEM_BIT)
+#define XE_BO_CREATE_GGTT_BIT BIT(5)
+#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
+#define XE_BO_CREATE_PINNED_BIT BIT(7)
+#define XE_BO_CREATE_NO_RESV_EVICT BIT(8)
+#define XE_BO_DEFER_BACKING BIT(9)
+#define XE_BO_SCANOUT_BIT BIT(10)
+#define XE_BO_FIXED_PLACEMENT_BIT BIT(11)
+#define XE_BO_PAGETABLE BIT(12)
+#define XE_BO_NEEDS_CPU_ACCESS BIT(13)
+/* this one is trigger internally only */
+#define XE_BO_INTERNAL_TEST BIT(30)
+#define XE_BO_INTERNAL_64K BIT(31)
+
+#define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
+#define XE2_PPGTT_PTE_PAT4 BIT_ULL(61)
+#define XE_PPGTT_PDE_PDPE_PAT2 BIT_ULL(12)
+#define XE_PPGTT_PTE_PAT2 BIT_ULL(7)
+#define XE_PPGTT_PTE_PAT1 BIT_ULL(4)
+#define XE_PPGTT_PTE_PAT0 BIT_ULL(3)
+
+#define XE_PTE_SHIFT 12
+#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
+#define XE_PTE_MASK (XE_PAGE_SIZE - 1)
+#define XE_PDE_SHIFT (XE_PTE_SHIFT - 3)
+#define XE_PDES (1 << XE_PDE_SHIFT)
+#define XE_PDE_MASK (XE_PDES - 1)
+
+#define XE_64K_PTE_SHIFT 16
+#define XE_64K_PAGE_SIZE (1 << XE_64K_PTE_SHIFT)
+#define XE_64K_PTE_MASK (XE_64K_PAGE_SIZE - 1)
+#define XE_64K_PDE_MASK (XE_PDE_MASK >> 4)
+
+#define XE_PDE_PS_2M BIT_ULL(7)
+#define XE_PDPE_PS_1G BIT_ULL(7)
+#define XE_PDE_IPS_64K BIT_ULL(11)
+
+#define XE_GGTT_PTE_DM BIT_ULL(1)
+#define XE_USM_PPGTT_PTE_AE BIT_ULL(10)
+#define XE_PPGTT_PTE_DM BIT_ULL(11)
+#define XE_PDE_64K BIT_ULL(6)
+#define XE_PTE_PS64 BIT_ULL(8)
+#define XE_PTE_NULL BIT_ULL(9)
+
+#define XE_PAGE_PRESENT BIT_ULL(0)
+#define XE_PAGE_RW BIT_ULL(1)
+
+#define XE_PL_SYSTEM TTM_PL_SYSTEM
+#define XE_PL_TT TTM_PL_TT
+#define XE_PL_VRAM0 TTM_PL_VRAM
+#define XE_PL_VRAM1 (XE_PL_VRAM0 + 1)
+#define XE_PL_STOLEN (TTM_NUM_MEM_TYPES - 1)
+
+#define XE_BO_PROPS_INVALID (-1)
+
+struct sg_table;
+
+struct xe_bo *xe_bo_alloc(void);
+void xe_bo_free(struct xe_bo *bo);
+
+struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
+ struct xe_tile *tile, struct dma_resv *resv,
+ struct ttm_lru_bulk_move *bulk, size_t size,
+ u16 cpu_caching, enum ttm_bo_type type,
+ u32 flags);
+struct xe_bo *
+xe_bo_create_locked_range(struct xe_device *xe,
+ struct xe_tile *tile, struct xe_vm *vm,
+ size_t size, u64 start, u64 end,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size,
+ u16 cpu_caching,
+ enum ttm_bo_type type,
+ u32 flags);
+struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm, size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
+ const void *data, size_t size,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u32 flags);
+struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
+ const void *data, size_t size, u32 flags);
+
+int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
+ u32 bo_flags);
+
+static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct xe_bo, ttm);
+}
+
+static inline struct xe_bo *gem_to_xe_bo(const struct drm_gem_object *obj)
+{
+ return container_of(obj, struct xe_bo, ttm.base);
+}
+
+#define xe_bo_device(bo) ttm_to_xe_device((bo)->ttm.bdev)
+
+static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
+{
+ if (bo)
+ drm_gem_object_get(&bo->ttm.base);
+
+ return bo;
+}
+
+static inline void xe_bo_put(struct xe_bo *bo)
+{
+ if (bo)
+ drm_gem_object_put(&bo->ttm.base);
+}
+
+static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
+{
+ if (bo)
+ ttm_bo_set_bulk_move(&bo->ttm, NULL);
+}
+
+static inline void xe_bo_assert_held(struct xe_bo *bo)
+{
+ if (bo)
+ dma_resv_assert_held((bo)->ttm.base.resv);
+}
+
+int xe_bo_lock(struct xe_bo *bo, bool intr);
+
+void xe_bo_unlock(struct xe_bo *bo);
+
+static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
+{
+ if (bo) {
+ XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
+ if (bo->vm)
+ xe_vm_assert_held(bo->vm);
+ else
+ dma_resv_unlock(bo->ttm.base.resv);
+ }
+}
+
+int xe_bo_pin_external(struct xe_bo *bo);
+int xe_bo_pin(struct xe_bo *bo);
+void xe_bo_unpin_external(struct xe_bo *bo);
+void xe_bo_unpin(struct xe_bo *bo);
+int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
+
+static inline bool xe_bo_is_pinned(struct xe_bo *bo)
+{
+ return bo->ttm.pin_count;
+}
+
+static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
+{
+ if (likely(bo)) {
+ xe_bo_lock(bo, false);
+ xe_bo_unpin(bo);
+ xe_bo_unlock(bo);
+
+ xe_bo_put(bo);
+ }
+}
+
+bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
+dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
+dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
+
+static inline dma_addr_t
+xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
+{
+ return xe_bo_addr(bo, 0, page_size);
+}
+
+static inline u32
+xe_bo_ggtt_addr(struct xe_bo *bo)
+{
+ XE_WARN_ON(bo->ggtt_node.size > bo->size);
+ XE_WARN_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
+ return bo->ggtt_node.start;
+}
+
+int xe_bo_vmap(struct xe_bo *bo);
+void xe_bo_vunmap(struct xe_bo *bo);
+
+bool mem_type_is_vram(u32 mem_type);
+bool xe_bo_is_vram(struct xe_bo *bo);
+bool xe_bo_is_stolen(struct xe_bo *bo);
+bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
+uint64_t vram_region_gpu_offset(struct ttm_resource *res);
+
+bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
+
+int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
+int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
+
+int xe_bo_evict_pinned(struct xe_bo *bo);
+int xe_bo_restore_pinned(struct xe_bo *bo);
+
+extern struct ttm_device_funcs xe_ttm_funcs;
+extern const char *const xe_mem_type_to_name[];
+
+int xe_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int xe_bo_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
+
+static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
+{
+ return PAGE_ALIGN(bo->ttm.base.size);
+}
+
+static inline bool xe_bo_has_pages(struct xe_bo *bo)
+{
+ if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
+ xe_bo_is_vram(bo))
+ return true;
+
+ return false;
+}
+
+void __xe_bo_release_dummy(struct kref *kref);
+
+/**
+ * xe_bo_put_deferred() - Put a buffer object with delayed final freeing
+ * @bo: The bo to put.
+ * @deferred: List to which to add the buffer object if we cannot put, or
+ * NULL if the function is to put unconditionally.
+ *
+ * Since the final freeing of an object includes both sleeping and (!)
+ * memory allocation in the dma_resv individualization, it's not ok
+ * to put an object from atomic context nor from within a held lock
+ * tainted by reclaim. In such situations we want to defer the final
+ * freeing until we've exited the restricting context, or in the worst
+ * case to a workqueue.
+ * This function either puts the object if possible without the refcount
+ * reaching zero, or adds it to the @deferred list if that was not possible.
+ * The caller needs to follow up with a call to xe_bo_put_commit() to actually
+ * put the bo iff this function returns true. It's safe to always
+ * follow up with a call to xe_bo_put_commit().
+ * TODO: It's TTM that is the villain here. Perhaps TTM should add an
+ * interface like this.
+ *
+ * Return: true if @bo was the first object put on the @freed list,
+ * false otherwise.
+ */
+static inline bool
+xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
+{
+ if (!deferred) {
+ xe_bo_put(bo);
+ return false;
+ }
+
+ if (!kref_put(&bo->ttm.base.refcount, __xe_bo_release_dummy))
+ return false;
+
+ return llist_add(&bo->freed, deferred);
+}
+
+void xe_bo_put_commit(struct llist_head *deferred);
+
+struct sg_table *xe_bo_sg(struct xe_bo *bo);
+
+/*
+ * xe_sg_segment_size() - Provides upper limit for sg segment size.
+ * @dev: device pointer
+ *
+ * Returns the maximum segment size for the 'struct scatterlist'
+ * elements.
+ */
+static inline unsigned int xe_sg_segment_size(struct device *dev)
+{
+ struct scatterlist __maybe_unused sg;
+ size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
+
+ max = min_t(size_t, max, dma_max_mapping_size(dev));
+
+ /*
+ * The iommu_dma_map_sg() function ensures iova allocation doesn't
+ * cross dma segment boundary. It does so by padding some sg elements.
+ * This can cause overflow, ending up with sg->length being set to 0.
+ * Avoid this by ensuring maximum segment size is half of 'max'
+ * rounded down to PAGE_SIZE.
+ */
+ return round_down(max / 2, PAGE_SIZE);
+}
+
+#define i915_gem_object_flush_if_display(obj) ((void)(obj))
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+/**
+ * xe_bo_is_mem_type - Whether the bo currently resides in the given
+ * TTM memory type
+ * @bo: The bo to check.
+ * @mem_type: The TTM memory type.
+ *
+ * Return: true iff the bo resides in @mem_type, false otherwise.
+ */
+static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
+{
+ xe_bo_assert_held(bo);
+ return bo->ttm.resource->mem_type == mem_type;
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_doc.h b/drivers/gpu/drm/xe/xe_bo_doc.h
new file mode 100644
index 000000000..f57d440cc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bo_doc.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_BO_DOC_H_
+#define _XE_BO_DOC_H_
+
+/**
+ * DOC: Buffer Objects (BO)
+ *
+ * BO management
+ * =============
+ *
+ * TTM manages (placement, eviction, etc...) all BOs in XE.
+ *
+ * BO creation
+ * ===========
+ *
+ * Create a chunk of memory which can be used by the GPU. Placement rules
+ * (sysmem or vram region) passed in upon creation. TTM handles placement of BO
+ * and can trigger eviction of other BOs to make space for the new BO.
+ *
+ * Kernel BOs
+ * ----------
+ *
+ * A kernel BO is created as part of driver load (e.g. uC firmware images, GuC
+ * ADS, etc...) or a BO created as part of a user operation which requires
+ * a kernel BO (e.g. engine state, memory for page tables, etc...). These BOs
+ * are typically mapped in the GGTT (any kernel BOs aside memory for page tables
+ * are in the GGTT), are pinned (can't move or be evicted at runtime), have a
+ * vmap (XE can access the memory via xe_map layer) and have contiguous physical
+ * memory.
+ *
+ * More details of why kernel BOs are pinned and contiguous below.
+ *
+ * User BOs
+ * --------
+ *
+ * A user BO is created via the DRM_IOCTL_XE_GEM_CREATE IOCTL. Once it is
+ * created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
+ * access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
+ * user BOs are evictable and user BOs are never pinned by XE. The allocation of
+ * the backing store can be defered from creation time until first use which is
+ * either mmap, bind, or pagefault.
+ *
+ * Private BOs
+ * ~~~~~~~~~~~
+ *
+ * A private BO is a user BO created with a valid VM argument passed into the
+ * create IOCTL. If a BO is private it cannot be exported via prime FD and
+ * mappings can only be created for the BO within the VM it is tied to. Lastly,
+ * the BO dma-resv slots / lock point to the VM's dma-resv slots / lock (all
+ * private BOs to a VM share common dma-resv slots / lock).
+ *
+ * External BOs
+ * ~~~~~~~~~~~~
+ *
+ * An external BO is a user BO created with a NULL VM argument passed into the
+ * create IOCTL. An external BO can be shared with different UMDs / devices via
+ * prime FD and the BO can be mapped into multiple VMs. An external BO has its
+ * own unique dma-resv slots / lock. An external BO will be in an array of all
+ * VMs which has a mapping of the BO. This allows VMs to lookup and lock all
+ * external BOs mapped in the VM as needed.
+ *
+ * BO placement
+ * ~~~~~~~~~~~~
+ *
+ * When a user BO is created, a mask of valid placements is passed indicating
+ * which memory regions are considered valid.
+ *
+ * The memory region information is available via query uAPI (TODO: add link).
+ *
+ * BO validation
+ * =============
+ *
+ * BO validation (ttm_bo_validate) refers to ensuring a BO has a valid
+ * placement. If a BO was swapped to temporary storage, a validation call will
+ * trigger a move back to a valid (location where GPU can access BO) placement.
+ * Validation of a BO may evict other BOs to make room for the BO being
+ * validated.
+ *
+ * BO eviction / moving
+ * ====================
+ *
+ * All eviction (or in other words, moving a BO from one memory location to
+ * another) is routed through TTM with a callback into XE.
+ *
+ * Runtime eviction
+ * ----------------
+ *
+ * Runtime evictions refers to during normal operations where TTM decides it
+ * needs to move a BO. Typically this is because TTM needs to make room for
+ * another BO and the evicted BO is first BO on LRU list that is not locked.
+ *
+ * An example of this is a new BO which can only be placed in VRAM but there is
+ * not space in VRAM. There could be multiple BOs which have sysmem and VRAM
+ * placement rules which currently reside in VRAM, TTM trigger a will move of
+ * one (or multiple) of these BO(s) until there is room in VRAM to place the new
+ * BO. The evicted BO(s) are valid but still need new bindings before the BO
+ * used again (exec or compute mode rebind worker).
+ *
+ * Another example would be, TTM can't find a BO to evict which has another
+ * valid placement. In this case TTM will evict one (or multiple) unlocked BO(s)
+ * to a temporary unreachable (invalid) placement. The evicted BO(s) are invalid
+ * and before next use need to be moved to a valid placement and rebound.
+ *
+ * In both cases, moves of these BOs are scheduled behind the fences in the BO's
+ * dma-resv slots.
+ *
+ * WW locking tries to ensures if 2 VMs use 51% of the memory forward progress
+ * is made on both VMs.
+ *
+ * Runtime eviction uses per a GT migration engine (TODO: link to migration
+ * engine doc) to do a GPU memcpy from one location to another.
+ *
+ * Rebinds after runtime eviction
+ * ------------------------------
+ *
+ * When BOs are moved, every mapping (VMA) of the BO needs to rebound before
+ * the BO is used again. Every VMA is added to an evicted list of its VM when
+ * the BO is moved. This is safe because of the VM locking structure (TODO: link
+ * to VM locking doc). On the next use of a VM (exec or compute mode rebind
+ * worker) the evicted VMA list is checked and rebinds are triggered. In the
+ * case of faulting VM, the rebind is done in the page fault handler.
+ *
+ * Suspend / resume eviction of VRAM
+ * ---------------------------------
+ *
+ * During device suspend / resume VRAM may lose power which means the contents
+ * of VRAM's memory is blown away. Thus BOs present in VRAM at the time of
+ * suspend must be moved to sysmem in order for their contents to be saved.
+ *
+ * A simple TTM call (ttm_resource_manager_evict_all) can move all non-pinned
+ * (user) BOs to sysmem. External BOs that are pinned need to be manually
+ * evicted with a simple loop + xe_bo_evict call. It gets a little trickier
+ * with kernel BOs.
+ *
+ * Some kernel BOs are used by the GT migration engine to do moves, thus we
+ * can't move all of the BOs via the GT migration engine. For simplity, use a
+ * TTM memcpy (CPU) to move any kernel (pinned) BO on either suspend or resume.
+ *
+ * Some kernel BOs need to be restored to the exact same physical location. TTM
+ * makes this rather easy but the caveat is the memory must be contiguous. Again
+ * for simplity, we enforce that all kernel (pinned) BOs are contiguous and
+ * restored to the same physical location.
+ *
+ * Pinned external BOs in VRAM are restored on resume via the GPU.
+ *
+ * Rebinds after suspend / resume
+ * ------------------------------
+ *
+ * Most kernel BOs have GGTT mappings which must be restored during the resume
+ * process. All user BOs are rebound after validation on their next use.
+ *
+ * Future work
+ * ===========
+ *
+ * Trim the list of BOs which is saved / restored via TTM memcpy on suspend /
+ * resume. All we really need to save / restore via TTM memcpy is the memory
+ * required for the GuC to load and the memory for the GT migrate engine to
+ * operate.
+ *
+ * Do not require kernel BOs to be contiguous in physical memory / restored to
+ * the same physical address on resume. In all likelihood the only memory that
+ * needs to be restored to the same physical address is memory used for page
+ * tables. All of that memory is allocated 1 page at time so the contiguous
+ * requirement isn't needed. Some work on the vmap code would need to be done if
+ * kernel BOs are not contiguous too.
+ *
+ * Make some kernel BO evictable rather than pinned. An example of this would be
+ * engine state, in all likelihood if the dma-slots of these BOs where properly
+ * used rather than pinning we could safely evict + rebind these BOs as needed.
+ *
+ * Some kernel BOs do not need to be restored on resume (e.g. GuC ADS as that is
+ * repopulated on resume), add flag to mark such objects as no save / restore.
+ */
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
new file mode 100644
index 000000000..7a264a9ca
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_bo_evict.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_ggtt.h"
+#include "xe_tile.h"
+
+/**
+ * xe_bo_evict_all - evict all BOs from VRAM
+ *
+ * @xe: xe device
+ *
+ * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
+ * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
+ * All eviction magic done via TTM calls.
+ *
+ * Evict == move VRAM BOs to temporary (typically system) memory.
+ *
+ * This function should be called before the device goes into a suspend state
+ * where the VRAM loses power.
+ */
+int xe_bo_evict_all(struct xe_device *xe)
+{
+ struct ttm_device *bdev = &xe->ttm;
+ struct xe_bo *bo;
+ struct xe_tile *tile;
+ struct list_head still_in_list;
+ u32 mem_type;
+ u8 id;
+ int ret;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ /* User memory */
+ for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bdev, mem_type);
+
+ if (man) {
+ ret = ttm_resource_manager_evict_all(bdev, man);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Pinned user memory in VRAM */
+ INIT_LIST_HEAD(&still_in_list);
+ spin_lock(&xe->pinned.lock);
+ for (;;) {
+ bo = list_first_entry_or_null(&xe->pinned.external_vram,
+ typeof(*bo), pinned_link);
+ if (!bo)
+ break;
+ xe_bo_get(bo);
+ list_move_tail(&bo->pinned_link, &still_in_list);
+ spin_unlock(&xe->pinned.lock);
+
+ xe_bo_lock(bo, false);
+ ret = xe_bo_evict_pinned(bo);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ if (ret) {
+ spin_lock(&xe->pinned.lock);
+ list_splice_tail(&still_in_list,
+ &xe->pinned.external_vram);
+ spin_unlock(&xe->pinned.lock);
+ return ret;
+ }
+
+ spin_lock(&xe->pinned.lock);
+ }
+ list_splice_tail(&still_in_list, &xe->pinned.external_vram);
+ spin_unlock(&xe->pinned.lock);
+
+ /*
+ * Wait for all user BO to be evicted as those evictions depend on the
+ * memory moved below.
+ */
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
+
+ spin_lock(&xe->pinned.lock);
+ for (;;) {
+ bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
+ typeof(*bo), pinned_link);
+ if (!bo)
+ break;
+ xe_bo_get(bo);
+ list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
+ spin_unlock(&xe->pinned.lock);
+
+ xe_bo_lock(bo, false);
+ ret = xe_bo_evict_pinned(bo);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ if (ret)
+ return ret;
+
+ spin_lock(&xe->pinned.lock);
+ }
+ spin_unlock(&xe->pinned.lock);
+
+ return 0;
+}
+
+/**
+ * xe_bo_restore_kernel - restore kernel BOs to VRAM
+ *
+ * @xe: xe device
+ *
+ * Move kernel BOs from temporary (typically system) memory to VRAM via CPU. All
+ * moves done via TTM calls.
+ *
+ * This function should be called early, before trying to init the GT, on device
+ * resume.
+ */
+int xe_bo_restore_kernel(struct xe_device *xe)
+{
+ struct xe_bo *bo;
+ int ret;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ spin_lock(&xe->pinned.lock);
+ for (;;) {
+ bo = list_first_entry_or_null(&xe->pinned.evicted,
+ typeof(*bo), pinned_link);
+ if (!bo)
+ break;
+ xe_bo_get(bo);
+ list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ spin_unlock(&xe->pinned.lock);
+
+ xe_bo_lock(bo, false);
+ ret = xe_bo_restore_pinned(bo);
+ xe_bo_unlock(bo);
+ if (ret) {
+ xe_bo_put(bo);
+ return ret;
+ }
+
+ if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
+ struct xe_tile *tile = bo->tile;
+
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo(tile->mem.ggtt, bo);
+ mutex_unlock(&tile->mem.ggtt->lock);
+ }
+
+ /*
+ * We expect validate to trigger a move VRAM and our move code
+ * should setup the iosys map.
+ */
+ xe_assert(xe, !iosys_map_is_null(&bo->vmap));
+ xe_assert(xe, xe_bo_is_vram(bo));
+
+ xe_bo_put(bo);
+
+ spin_lock(&xe->pinned.lock);
+ }
+ spin_unlock(&xe->pinned.lock);
+
+ return 0;
+}
+
+/**
+ * xe_bo_restore_user - restore pinned user BOs to VRAM
+ *
+ * @xe: xe device
+ *
+ * Move pinned user BOs from temporary (typically system) memory to VRAM via
+ * CPU. All moves done via TTM calls.
+ *
+ * This function should be called late, after GT init, on device resume.
+ */
+int xe_bo_restore_user(struct xe_device *xe)
+{
+ struct xe_bo *bo;
+ struct xe_tile *tile;
+ struct list_head still_in_list;
+ u8 id;
+ int ret;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ /* Pinned user memory in VRAM should be validated on resume */
+ INIT_LIST_HEAD(&still_in_list);
+ spin_lock(&xe->pinned.lock);
+ for (;;) {
+ bo = list_first_entry_or_null(&xe->pinned.external_vram,
+ typeof(*bo), pinned_link);
+ if (!bo)
+ break;
+ list_move_tail(&bo->pinned_link, &still_in_list);
+ xe_bo_get(bo);
+ spin_unlock(&xe->pinned.lock);
+
+ xe_bo_lock(bo, false);
+ ret = xe_bo_restore_pinned(bo);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ if (ret) {
+ spin_lock(&xe->pinned.lock);
+ list_splice_tail(&still_in_list,
+ &xe->pinned.external_vram);
+ spin_unlock(&xe->pinned.lock);
+ return ret;
+ }
+
+ spin_lock(&xe->pinned.lock);
+ }
+ list_splice_tail(&still_in_list, &xe->pinned.external_vram);
+ spin_unlock(&xe->pinned.lock);
+
+ /* Wait for validate to complete */
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.h b/drivers/gpu/drm/xe/xe_bo_evict.h
new file mode 100644
index 000000000..746894798
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bo_evict.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_BO_EVICT_H_
+#define _XE_BO_EVICT_H_
+
+struct xe_device;
+
+int xe_bo_evict_all(struct xe_device *xe);
+int xe_bo_restore_kernel(struct xe_device *xe);
+int xe_bo_restore_user(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
new file mode 100644
index 000000000..81dca1531
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_BO_TYPES_H_
+#define _XE_BO_TYPES_H_
+
+#include <linux/iosys-map.h>
+
+#include <drm/drm_mm.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_placement.h>
+
+struct xe_device;
+struct xe_vm;
+
+#define XE_BO_MAX_PLACEMENTS 3
+
+/* TODO: To be selected with VM_MADVISE */
+#define XE_BO_PRIORITY_NORMAL 1
+
+/** @xe_bo: XE buffer object */
+struct xe_bo {
+ /** @ttm: TTM base buffer object */
+ struct ttm_buffer_object ttm;
+ /** @size: Size of this buffer object */
+ size_t size;
+ /** @flags: flags for this buffer object */
+ u32 flags;
+ /** @vm: VM this BO is attached to, for extobj this will be NULL */
+ struct xe_vm *vm;
+ /** @tile: Tile this BO is attached to (kernel BO only) */
+ struct xe_tile *tile;
+ /** @placements: valid placements for this BO */
+ struct ttm_place placements[XE_BO_MAX_PLACEMENTS];
+ /** @placement: current placement for this BO */
+ struct ttm_placement placement;
+ /** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
+ struct drm_mm_node ggtt_node;
+ /** @vmap: iosys map of this buffer */
+ struct iosys_map vmap;
+ /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
+ struct ttm_bo_kmap_obj kmap;
+ /** @pinned_link: link to present / evicted list of pinned BO */
+ struct list_head pinned_link;
+#ifdef CONFIG_PROC_FS
+ /**
+ * @client: @xe_drm_client which created the bo
+ */
+ struct xe_drm_client *client;
+ /**
+ * @client_link: Link into @xe_drm_client.objects_list
+ */
+ struct list_head client_link;
+#endif
+ /** @freed: List node for delayed put. */
+ struct llist_node freed;
+ /** @created: Whether the bo has passed initial creation */
+ bool created;
+
+ /** @ccs_cleared */
+ bool ccs_cleared;
+
+ /**
+ * @cpu_caching: CPU caching mode. Currently only used for userspace
+ * objects.
+ */
+ u16 cpu_caching;
+};
+
+#define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
+#define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
new file mode 100644
index 000000000..c56fd7d59
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_debugfs.h"
+
+#include <linux/string_helpers.h>
+
+#include <drm/drm_debugfs.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt_debugfs.h"
+#include "xe_step.h"
+
+#ifdef CONFIG_DRM_XE_DEBUG
+#include "xe_bo_evict.h"
+#include "xe_migrate.h"
+#include "xe_vm.h"
+#endif
+
+#ifdef CONFIG_FAULT_INJECTION
+#include <linux/fault-inject.h> /* XXX: fault-inject.h is broken */
+DECLARE_FAULT_ATTR(gt_reset_failure);
+#endif
+
+static struct xe_device *node_to_xe(struct drm_info_node *node)
+{
+ return to_xe_device(node->minor->dev);
+}
+
+static int info(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = node_to_xe(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct xe_gt *gt;
+ u8 id;
+
+ drm_printf(&p, "graphics_verx100 %d\n", xe->info.graphics_verx100);
+ drm_printf(&p, "media_verx100 %d\n", xe->info.media_verx100);
+ drm_printf(&p, "stepping G:%s M:%s D:%s B:%s\n",
+ xe_step_name(xe->info.step.graphics),
+ xe_step_name(xe->info.step.media),
+ xe_step_name(xe->info.step.display),
+ xe_step_name(xe->info.step.basedie));
+ drm_printf(&p, "is_dgfx %s\n", str_yes_no(xe->info.is_dgfx));
+ drm_printf(&p, "platform %d\n", xe->info.platform);
+ drm_printf(&p, "subplatform %d\n",
+ xe->info.subplatform > XE_SUBPLATFORM_NONE ? xe->info.subplatform : 0);
+ drm_printf(&p, "devid 0x%x\n", xe->info.devid);
+ drm_printf(&p, "revid %d\n", xe->info.revid);
+ drm_printf(&p, "tile_count %d\n", xe->info.tile_count);
+ drm_printf(&p, "vm_max_level %d\n", xe->info.vm_max_level);
+ drm_printf(&p, "force_execlist %s\n", str_yes_no(xe->info.force_execlist));
+ drm_printf(&p, "has_flat_ccs %s\n", str_yes_no(xe->info.has_flat_ccs));
+ drm_printf(&p, "has_usm %s\n", str_yes_no(xe->info.has_usm));
+ for_each_gt(gt, xe, id) {
+ drm_printf(&p, "gt%d force wake %d\n", id,
+ xe_force_wake_ref(gt_to_fw(gt), XE_FW_GT));
+ drm_printf(&p, "gt%d engine_mask 0x%llx\n", id,
+ gt->info.engine_mask);
+ }
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"info", info, 0},
+};
+
+static int forcewake_open(struct inode *inode, struct file *file)
+{
+ struct xe_device *xe = inode->i_private;
+ struct xe_gt *gt;
+ u8 id;
+
+ xe_device_mem_access_get(xe);
+
+ for_each_gt(gt, xe, id)
+ XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+
+ return 0;
+}
+
+static int forcewake_release(struct inode *inode, struct file *file)
+{
+ struct xe_device *xe = inode->i_private;
+ struct xe_gt *gt;
+ u8 id;
+
+ for_each_gt(gt, xe, id)
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+
+ xe_device_mem_access_put(xe);
+
+ return 0;
+}
+
+static const struct file_operations forcewake_all_fops = {
+ .owner = THIS_MODULE,
+ .open = forcewake_open,
+ .release = forcewake_release,
+};
+
+void xe_debugfs_register(struct xe_device *xe)
+{
+ struct ttm_device *bdev = &xe->ttm;
+ struct drm_minor *minor = xe->drm.primary;
+ struct dentry *root = minor->debugfs_root;
+ struct ttm_resource_manager *man;
+ struct xe_gt *gt;
+ u32 mem_type;
+ u8 id;
+
+ drm_debugfs_create_files(debugfs_list,
+ ARRAY_SIZE(debugfs_list),
+ root, minor);
+
+ debugfs_create_file("forcewake_all", 0400, root, xe,
+ &forcewake_all_fops);
+
+ for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
+ man = ttm_manager_type(bdev, mem_type);
+
+ if (man) {
+ char name[16];
+
+ sprintf(name, "vram%d_mm", mem_type - XE_PL_VRAM0);
+ ttm_resource_manager_create_debugfs(man, root, name);
+ }
+ }
+
+ man = ttm_manager_type(bdev, XE_PL_TT);
+ ttm_resource_manager_create_debugfs(man, root, "gtt_mm");
+
+ man = ttm_manager_type(bdev, XE_PL_STOLEN);
+ if (man)
+ ttm_resource_manager_create_debugfs(man, root, "stolen_mm");
+
+ for_each_gt(gt, xe, id)
+ xe_gt_debugfs_register(gt);
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
+#endif
+
+}
diff --git a/drivers/gpu/drm/xe/xe_debugfs.h b/drivers/gpu/drm/xe/xe_debugfs.h
new file mode 100644
index 000000000..715b8e2e0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_debugfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_DEBUGFS_H_
+#define _XE_DEBUGFS_H_
+
+struct xe_device;
+
+void xe_debugfs_register(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
new file mode 100644
index 000000000..68abc0b19
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_devcoredump.h"
+#include "xe_devcoredump_types.h"
+
+#include <linux/devcoredump.h>
+#include <generated/utsrelease.h>
+
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_submit.h"
+#include "xe_hw_engine.h"
+
+/**
+ * DOC: Xe device coredump
+ *
+ * Devices overview:
+ * Xe uses dev_coredump infrastructure for exposing the crash errors in a
+ * standardized way.
+ * devcoredump exposes a temporary device under /sys/class/devcoredump/
+ * which is linked with our card device directly.
+ * The core dump can be accessed either from
+ * /sys/class/drm/card<n>/device/devcoredump/ or from
+ * /sys/class/devcoredump/devcd<m> where
+ * /sys/class/devcoredump/devcd<m>/failing_device is a link to
+ * /sys/class/drm/card<n>/device/.
+ *
+ * Snapshot at hang:
+ * The 'data' file is printed with a drm_printer pointer at devcoredump read
+ * time. For this reason, we need to take snapshots from when the hang has
+ * happened, and not only when the user is reading the file. Otherwise the
+ * information is outdated since the resets might have happened in between.
+ *
+ * 'First' failure snapshot:
+ * In general, the first hang is the most critical one since the following hangs
+ * can be a consequence of the initial hang. For this reason we only take the
+ * snapshot of the 'first' failure and ignore subsequent calls of this function,
+ * at least while the coredump device is alive. Dev_coredump has a delayed work
+ * queue that will eventually delete the device and free all the dump
+ * information.
+ */
+
+#ifdef CONFIG_DEV_COREDUMP
+
+static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump)
+{
+ return container_of(coredump, struct xe_device, devcoredump);
+}
+
+static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
+{
+ return &q->gt->uc.guc;
+}
+
+static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct xe_devcoredump *coredump = data;
+ struct xe_devcoredump_snapshot *ss;
+ struct drm_printer p;
+ struct drm_print_iterator iter;
+ struct timespec64 ts;
+ int i;
+
+ /* Our device is gone already... */
+ if (!data || !coredump_to_xe(coredump))
+ return -ENODEV;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ ss = &coredump->snapshot;
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "**** Xe Device Coredump ****\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+
+ ts = ktime_to_timespec64(ss->snapshot_time);
+ drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
+ ts = ktime_to_timespec64(ss->boot_time);
+ drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
+
+ drm_printf(&p, "\n**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
+ xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
+
+ drm_printf(&p, "\n**** HW Engines ****\n");
+ for (i = 0; i < XE_NUM_HW_ENGINES; i++)
+ if (coredump->snapshot.hwe[i])
+ xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
+ &p);
+
+ return count - iter.remain;
+}
+
+static void xe_devcoredump_free(void *data)
+{
+ struct xe_devcoredump *coredump = data;
+ int i;
+
+ /* Our device is gone. Nothing to do... */
+ if (!data || !coredump_to_xe(coredump))
+ return;
+
+ xe_guc_ct_snapshot_free(coredump->snapshot.ct);
+ xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
+ for (i = 0; i < XE_NUM_HW_ENGINES; i++)
+ if (coredump->snapshot.hwe[i])
+ xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
+
+ coredump->captured = false;
+ drm_info(&coredump_to_xe(coredump)->drm,
+ "Xe device coredump has been deleted.\n");
+}
+
+static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ struct xe_exec_queue *q)
+{
+ struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ u32 adj_logical_mask = q->logical_mask;
+ u32 width_mask = (0x1 << q->width) - 1;
+ int i;
+ bool cookie;
+
+ ss->snapshot_time = ktime_get_real();
+ ss->boot_time = ktime_get_boottime();
+
+ cookie = dma_fence_begin_signalling();
+ for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
+ if (adj_logical_mask & BIT(i)) {
+ adj_logical_mask |= width_mask << i;
+ i += q->width;
+ } else {
+ ++i;
+ }
+ }
+
+ xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+
+ coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
+ coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
+
+ for_each_hw_engine(hwe, q->gt, id) {
+ if (hwe->class != q->hwe->class ||
+ !(BIT(hwe->logical_instance) & adj_logical_mask)) {
+ coredump->snapshot.hwe[id] = NULL;
+ continue;
+ }
+ coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
+ }
+
+ xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ dma_fence_end_signalling(cookie);
+}
+
+/**
+ * xe_devcoredump - Take the required snapshots and initialize coredump device.
+ * @q: The faulty xe_exec_queue, where the issue was detected.
+ *
+ * This function should be called at the crash time within the serialized
+ * gt_reset. It is skipped if we still have the core dump device available
+ * with the information of the 'first' snapshot.
+ */
+void xe_devcoredump(struct xe_exec_queue *q)
+{
+ struct xe_device *xe = gt_to_xe(q->gt);
+ struct xe_devcoredump *coredump = &xe->devcoredump;
+
+ if (coredump->captured) {
+ drm_dbg(&xe->drm, "Multiple hangs are occurring, but only the first snapshot was taken\n");
+ return;
+ }
+
+ coredump->captured = true;
+ devcoredump_snapshot(coredump, q);
+
+ drm_info(&xe->drm, "Xe device coredump has been created\n");
+ drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
+ xe->drm.primary->index);
+
+ dev_coredumpm(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
+ xe_devcoredump_read, xe_devcoredump_free);
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
new file mode 100644
index 000000000..6ac218a5c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_DEVCOREDUMP_H_
+#define _XE_DEVCOREDUMP_H_
+
+struct xe_device;
+struct xe_exec_queue;
+
+#ifdef CONFIG_DEV_COREDUMP
+void xe_devcoredump(struct xe_exec_queue *q);
+#else
+static inline void xe_devcoredump(struct xe_exec_queue *q)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
new file mode 100644
index 000000000..7fdad9c3d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_DEVCOREDUMP_TYPES_H_
+#define _XE_DEVCOREDUMP_TYPES_H_
+
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+
+#include "xe_hw_engine_types.h"
+
+struct xe_device;
+
+/**
+ * struct xe_devcoredump_snapshot - Crash snapshot
+ *
+ * This struct contains all the useful information quickly captured at the time
+ * of the crash. So, any subsequent reads of the coredump points to a data that
+ * shows the state of the GPU of when the issue has happened.
+ */
+struct xe_devcoredump_snapshot {
+ /** @snapshot_time: Time of this capture. */
+ ktime_t snapshot_time;
+ /** @boot_time: Relative boot time so the uptime can be calculated. */
+ ktime_t boot_time;
+
+ /* GuC snapshots */
+ /** @ct: GuC CT snapshot */
+ struct xe_guc_ct_snapshot *ct;
+ /** @ge: Guc Engine snapshot */
+ struct xe_guc_submit_exec_queue_snapshot *ge;
+ /** @hwe: HW Engine snapshot array */
+ struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
+};
+
+/**
+ * struct xe_devcoredump - Xe devcoredump main structure
+ *
+ * This struct represents the live and active dev_coredump node.
+ * It is created/populated at the time of a crash/error. Then it
+ * is read later when user access the device coredump data file
+ * for reading the information.
+ */
+struct xe_devcoredump {
+ /** @xe: Xe device. */
+ struct xe_device *xe;
+ /** @captured: The snapshot of the first hang has already been taken. */
+ bool captured;
+ /** @snapshot: Snapshot is captured at time of the first crash */
+ struct xe_devcoredump_snapshot snapshot;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
new file mode 100644
index 000000000..5176c27e4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_device.h"
+
+#include <linux/units.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/xe_drm.h>
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_bo.h"
+#include "xe_debugfs.h"
+#include "xe_display.h"
+#include "xe_dma_buf.h"
+#include "xe_drm_client.h"
+#include "xe_drv.h"
+#include "xe_exec_queue.h"
+#include "xe_exec.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_gt_mcr.h"
+#include "xe_irq.h"
+#include "xe_mmio.h"
+#include "xe_module.h"
+#include "xe_pat.h"
+#include "xe_pcode.h"
+#include "xe_pm.h"
+#include "xe_query.h"
+#include "xe_tile.h"
+#include "xe_ttm_stolen_mgr.h"
+#include "xe_ttm_sys_mgr.h"
+#include "xe_vm.h"
+#include "xe_wait_user_fence.h"
+#include "xe_hwmon.h"
+
+#ifdef CONFIG_LOCKDEP
+struct lockdep_map xe_device_mem_access_lockdep_map = {
+ .name = "xe_device_mem_access_lockdep_map"
+};
+#endif
+
+static int xe_file_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_drm_client *client;
+ struct xe_file *xef;
+ int ret = -ENOMEM;
+
+ xef = kzalloc(sizeof(*xef), GFP_KERNEL);
+ if (!xef)
+ return ret;
+
+ client = xe_drm_client_alloc();
+ if (!client) {
+ kfree(xef);
+ return ret;
+ }
+
+ xef->drm = file;
+ xef->client = client;
+ xef->xe = xe;
+
+ mutex_init(&xef->vm.lock);
+ xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
+
+ mutex_init(&xef->exec_queue.lock);
+ xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
+
+ spin_lock(&xe->clients.lock);
+ xe->clients.count++;
+ spin_unlock(&xe->clients.lock);
+
+ file->driver_priv = xef;
+ return 0;
+}
+
+static void xe_file_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = file->driver_priv;
+ struct xe_vm *vm;
+ struct xe_exec_queue *q;
+ unsigned long idx;
+
+ mutex_lock(&xef->exec_queue.lock);
+ xa_for_each(&xef->exec_queue.xa, idx, q) {
+ xe_exec_queue_kill(q);
+ xe_exec_queue_put(q);
+ }
+ mutex_unlock(&xef->exec_queue.lock);
+ xa_destroy(&xef->exec_queue.xa);
+ mutex_destroy(&xef->exec_queue.lock);
+ mutex_lock(&xef->vm.lock);
+ xa_for_each(&xef->vm.xa, idx, vm)
+ xe_vm_close_and_put(vm);
+ mutex_unlock(&xef->vm.lock);
+ xa_destroy(&xef->vm.xa);
+ mutex_destroy(&xef->vm.lock);
+
+ spin_lock(&xe->clients.lock);
+ xe->clients.count--;
+ spin_unlock(&xe->clients.lock);
+
+ xe_drm_client_put(xef->client);
+ kfree(xef);
+}
+
+static const struct drm_ioctl_desc xe_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
+ DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations xe_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release_noglobal,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .compat_ioctl = drm_compat_ioctl,
+ .llseek = noop_llseek,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_fdinfo,
+#endif
+};
+
+static void xe_driver_release(struct drm_device *dev)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL);
+}
+
+static struct drm_driver driver = {
+ /* Don't use MTRRs here; the Xserver or userspace app should
+ * deal with them for Intel hardware.
+ */
+ .driver_features =
+ DRIVER_GEM |
+ DRIVER_RENDER | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
+ .open = xe_file_open,
+ .postclose = xe_file_close,
+
+ .gem_prime_import = xe_gem_prime_import,
+
+ .dumb_create = xe_bo_dumb_create,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = xe_drm_client_fdinfo,
+#endif
+ .release = &xe_driver_release,
+
+ .ioctls = xe_ioctls,
+ .num_ioctls = ARRAY_SIZE(xe_ioctls),
+ .fops = &xe_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static void xe_device_destroy(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (xe->ordered_wq)
+ destroy_workqueue(xe->ordered_wq);
+
+ if (xe->unordered_wq)
+ destroy_workqueue(xe->unordered_wq);
+
+ ttm_device_fini(&xe->ttm);
+}
+
+struct xe_device *xe_device_create(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct xe_device *xe;
+ int err;
+
+ xe_display_driver_set_hooks(&driver);
+
+ err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ if (err)
+ return ERR_PTR(err);
+
+ xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
+ if (IS_ERR(xe))
+ return xe;
+
+ err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
+ xe->drm.anon_inode->i_mapping,
+ xe->drm.vma_offset_manager, false, false);
+ if (WARN_ON(err))
+ goto err;
+
+ err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
+ if (err)
+ goto err;
+
+ xe->info.devid = pdev->device;
+ xe->info.revid = pdev->revision;
+ xe->info.force_execlist = xe_modparam.force_execlist;
+
+ spin_lock_init(&xe->irq.lock);
+ spin_lock_init(&xe->clients.lock);
+
+ init_waitqueue_head(&xe->ufence_wq);
+
+ drmm_mutex_init(&xe->drm, &xe->usm.lock);
+ xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ /* Trigger a large asid and an early asid wrap. */
+ u32 asid;
+
+ BUILD_BUG_ON(XE_MAX_ASID < 2);
+ err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
+ XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1),
+ &xe->usm.next_asid, GFP_KERNEL);
+ drm_WARN_ON(&xe->drm, err);
+ if (err >= 0)
+ xa_erase(&xe->usm.asid_to_vm, asid);
+ }
+
+ spin_lock_init(&xe->pinned.lock);
+ INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.external_vram);
+ INIT_LIST_HEAD(&xe->pinned.evicted);
+
+ xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
+ xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
+ if (!xe->ordered_wq || !xe->unordered_wq) {
+ drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ err = xe_display_create(xe);
+ if (WARN_ON(err))
+ goto err;
+
+ return xe;
+
+err:
+ return ERR_PTR(err);
+}
+
+/*
+ * The driver-initiated FLR is the highest level of reset that we can trigger
+ * from within the driver. It is different from the PCI FLR in that it doesn't
+ * fully reset the SGUnit and doesn't modify the PCI config space and therefore
+ * it doesn't require a re-enumeration of the PCI BARs. However, the
+ * driver-initiated FLR does still cause a reset of both GT and display and a
+ * memory wipe of local and stolen memory, so recovery would require a full HW
+ * re-init and saving/restoring (or re-populating) the wiped memory. Since we
+ * perform the FLR as the very last action before releasing access to the HW
+ * during the driver release flow, we don't attempt recovery at all, because
+ * if/when a new instance of i915 is bound to the device it will do a full
+ * re-init anyway.
+ */
+static void xe_driver_flr(struct xe_device *xe)
+{
+ const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ int ret;
+
+ if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
+ drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
+ return;
+ }
+
+ drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
+
+ /*
+ * Make sure any pending FLR requests have cleared by waiting for the
+ * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
+ * to make sure it's not still set from a prior attempt (it's a write to
+ * clear bit).
+ * Note that we should never be in a situation where a previous attempt
+ * is still pending (unless the HW is totally dead), but better to be
+ * safe in case something unexpected happens
+ */
+ ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ if (ret) {
+ drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
+ return;
+ }
+ xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+
+ /* Trigger the actual Driver-FLR */
+ xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
+
+ /* Wait for hardware teardown to complete */
+ ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ if (ret) {
+ drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
+ return;
+ }
+
+ /* Wait for hardware/firmware re-init to complete */
+ ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
+ flr_timeout, NULL, false);
+ if (ret) {
+ drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
+ return;
+ }
+
+ /* Clear sticky completion status */
+ xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+}
+
+static void xe_driver_flr_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+
+ if (xe->needs_flr_on_fini)
+ xe_driver_flr(xe);
+}
+
+static void xe_device_sanitize(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_gt *gt;
+ u8 id;
+
+ for_each_gt(gt, xe, id)
+ xe_gt_sanitize(gt);
+}
+
+static int xe_set_dma_info(struct xe_device *xe)
+{
+ unsigned int mask_size = xe->info.dma_mask_size;
+ int err;
+
+ dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
+
+ err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
+ if (err)
+ goto mask_err;
+
+ err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
+ if (err)
+ goto mask_err;
+
+ return 0;
+
+mask_err:
+ drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err);
+ return err;
+}
+
+/*
+ * Initialize MMIO resources that don't require any knowledge about tile count.
+ */
+int xe_device_probe_early(struct xe_device *xe)
+{
+ int err;
+
+ err = xe_mmio_init(xe);
+ if (err)
+ return err;
+
+ err = xe_mmio_root_tile_init(xe);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int xe_device_set_has_flat_ccs(struct xe_device *xe)
+{
+ u32 reg;
+ int err;
+
+ if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
+ return 0;
+
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ return err;
+
+ reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
+ xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
+
+ if (!xe->info.has_flat_ccs)
+ drm_dbg(&xe->drm,
+ "Flat CCS has been disabled in bios, May lead to performance impact");
+
+ return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+}
+
+int xe_device_probe(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ int err;
+ u8 id;
+
+ xe_pat_init_early(xe);
+
+ xe->info.mem_region_mask = 1;
+ err = xe_display_init_nommio(xe);
+ if (err)
+ return err;
+
+ err = xe_set_dma_info(xe);
+ if (err)
+ return err;
+
+ xe_mmio_probe_tiles(xe);
+
+ xe_ttm_sys_mgr_init(xe);
+
+ for_each_gt(gt, xe, id)
+ xe_force_wake_init_gt(gt, gt_to_fw(gt));
+
+ for_each_tile(tile, xe, id) {
+ err = xe_ggtt_init_early(tile->mem.ggtt);
+ if (err)
+ return err;
+ }
+
+ err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe);
+ if (err)
+ return err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_pcode_probe(gt);
+ if (err)
+ return err;
+ }
+
+ err = xe_display_init_noirq(xe);
+ if (err)
+ return err;
+
+ err = xe_irq_install(xe);
+ if (err)
+ goto err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_init_early(gt);
+ if (err)
+ goto err_irq_shutdown;
+ }
+
+ err = xe_device_set_has_flat_ccs(xe);
+ if (err)
+ goto err_irq_shutdown;
+
+ err = xe_mmio_probe_vram(xe);
+ if (err)
+ goto err_irq_shutdown;
+
+ for_each_tile(tile, xe, id) {
+ err = xe_tile_init_noalloc(tile);
+ if (err)
+ goto err_irq_shutdown;
+ }
+
+ /* Allocate and map stolen after potential VRAM resize */
+ xe_ttm_stolen_mgr_init(xe);
+
+ /*
+ * Now that GT is initialized (TTM in particular),
+ * we can try to init display, and inherit the initial fb.
+ * This is the reason the first allocation needs to be done
+ * inside display.
+ */
+ err = xe_display_init_noaccel(xe);
+ if (err)
+ goto err_irq_shutdown;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_init(gt);
+ if (err)
+ goto err_irq_shutdown;
+ }
+
+ xe_heci_gsc_init(xe);
+
+ err = xe_display_init(xe);
+ if (err)
+ goto err_irq_shutdown;
+
+ err = drm_dev_register(&xe->drm, 0);
+ if (err)
+ goto err_fini_display;
+
+ xe_display_register(xe);
+
+ xe_debugfs_register(xe);
+
+ xe_hwmon_register(xe);
+
+ err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
+ if (err)
+ return err;
+
+ return 0;
+
+err_fini_display:
+ xe_display_driver_remove(xe);
+
+err_irq_shutdown:
+ xe_irq_shutdown(xe);
+err:
+ xe_display_fini(xe);
+ return err;
+}
+
+static void xe_device_remove_display(struct xe_device *xe)
+{
+ xe_display_unregister(xe);
+
+ drm_dev_unplug(&xe->drm);
+ xe_display_driver_remove(xe);
+}
+
+void xe_device_remove(struct xe_device *xe)
+{
+ xe_device_remove_display(xe);
+
+ xe_display_fini(xe);
+
+ xe_heci_gsc_fini(xe);
+
+ xe_irq_shutdown(xe);
+}
+
+void xe_device_shutdown(struct xe_device *xe)
+{
+}
+
+void xe_device_wmb(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+
+ wmb();
+ if (IS_DGFX(xe))
+ xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
+}
+
+u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
+{
+ return xe_device_has_flat_ccs(xe) ?
+ DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
+}
+
+bool xe_device_mem_access_ongoing(struct xe_device *xe)
+{
+ if (xe_pm_read_callback_task(xe) != NULL)
+ return true;
+
+ return atomic_read(&xe->mem_access.ref);
+}
+
+void xe_device_assert_mem_access(struct xe_device *xe)
+{
+ XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
+}
+
+bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe)
+{
+ bool active;
+
+ if (xe_pm_read_callback_task(xe) == current)
+ return true;
+
+ active = xe_pm_runtime_get_if_active(xe);
+ if (active) {
+ int ref = atomic_inc_return(&xe->mem_access.ref);
+
+ xe_assert(xe, ref != S32_MAX);
+ }
+
+ return active;
+}
+
+void xe_device_mem_access_get(struct xe_device *xe)
+{
+ int ref;
+
+ /*
+ * This looks racy, but should be fine since the pm_callback_task only
+ * transitions from NULL -> current (and back to NULL again), during the
+ * runtime_resume() or runtime_suspend() callbacks, for which there can
+ * only be a single one running for our device. We only need to prevent
+ * recursively calling the runtime_get or runtime_put from those
+ * callbacks, as well as preventing triggering any access_ongoing
+ * asserts.
+ */
+ if (xe_pm_read_callback_task(xe) == current)
+ return;
+
+ /*
+ * Since the resume here is synchronous it can be quite easy to deadlock
+ * if we are not careful. Also in practice it might be quite timing
+ * sensitive to ever see the 0 -> 1 transition with the callers locks
+ * held, so deadlocks might exist but are hard for lockdep to ever see.
+ * With this in mind, help lockdep learn about the potentially scary
+ * stuff that can happen inside the runtime_resume callback by acquiring
+ * a dummy lock (it doesn't protect anything and gets compiled out on
+ * non-debug builds). Lockdep then only needs to see the
+ * mem_access_lockdep_map -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> mem_access_lockdep_map.
+ * For example if the (callers_locks) are ever grabbed in the
+ * runtime_resume callback, lockdep should give us a nice splat.
+ */
+ lock_map_acquire(&xe_device_mem_access_lockdep_map);
+ lock_map_release(&xe_device_mem_access_lockdep_map);
+
+ xe_pm_runtime_get(xe);
+ ref = atomic_inc_return(&xe->mem_access.ref);
+
+ xe_assert(xe, ref != S32_MAX);
+
+}
+
+void xe_device_mem_access_put(struct xe_device *xe)
+{
+ int ref;
+
+ if (xe_pm_read_callback_task(xe) == current)
+ return;
+
+ ref = atomic_dec_return(&xe->mem_access.ref);
+ xe_pm_runtime_put(xe);
+
+ xe_assert(xe, ref >= 0);
+}
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
new file mode 100644
index 000000000..bf8efb44e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_DEVICE_H_
+#define _XE_DEVICE_H_
+
+struct xe_exec_queue;
+struct xe_file;
+
+#include <drm/drm_util.h>
+
+#include "regs/xe_gpu_commands.h"
+#include "xe_device_types.h"
+#include "xe_force_wake.h"
+#include "xe_macros.h"
+
+#ifdef CONFIG_LOCKDEP
+extern struct lockdep_map xe_device_mem_access_lockdep_map;
+#endif
+
+static inline struct xe_device *to_xe_device(const struct drm_device *dev)
+{
+ return container_of(dev, struct xe_device, drm);
+}
+
+static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+static inline struct xe_device *ttm_to_xe_device(struct ttm_device *ttm)
+{
+ return container_of(ttm, struct xe_device, ttm);
+}
+
+struct xe_device *xe_device_create(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+int xe_device_probe_early(struct xe_device *xe);
+int xe_device_probe(struct xe_device *xe);
+void xe_device_remove(struct xe_device *xe);
+void xe_device_shutdown(struct xe_device *xe);
+
+void xe_device_wmb(struct xe_device *xe);
+
+static inline struct xe_file *to_xe_file(const struct drm_file *file)
+{
+ return file->driver_priv;
+}
+
+static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
+{
+ return &xe->tiles[0];
+}
+
+#define XE_MAX_GT_PER_TILE 2
+
+static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id)
+{
+ if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE))
+ gt_id = 0;
+
+ return gt_id ? tile->media_gt : tile->primary_gt;
+}
+
+static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+ struct xe_gt *gt;
+
+ /*
+ * FIXME: This only works for now because multi-tile and standalone
+ * media are mutually exclusive on the platforms we have today.
+ *
+ * id => GT mapping may change once we settle on how we want to handle
+ * our UAPI.
+ */
+ if (MEDIA_VER(xe) >= 13) {
+ gt = xe_tile_get_gt(root_tile, gt_id);
+ } else {
+ if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE))
+ gt_id = 0;
+
+ gt = xe->tiles[gt_id].primary_gt;
+ }
+
+ if (!gt)
+ return NULL;
+
+ drm_WARN_ON(&xe->drm, gt->info.id != gt_id);
+ drm_WARN_ON(&xe->drm, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
+
+ return gt;
+}
+
+/*
+ * Provide a GT structure suitable for performing non-GT MMIO operations against
+ * the primary tile. Primarily intended for early tile initialization, display
+ * handling, top-most interrupt enable/disable, etc. Since anything using the
+ * MMIO handle returned by this function doesn't need GSI offset translation,
+ * we'll return the primary GT from the root tile.
+ *
+ * FIXME: Fix the driver design so that 'gt' isn't the target of all MMIO
+ * operations.
+ *
+ * Returns the primary gt of the root tile.
+ */
+static inline struct xe_gt *xe_root_mmio_gt(struct xe_device *xe)
+{
+ return xe_device_get_root_tile(xe)->primary_gt;
+}
+
+static inline bool xe_device_uc_enabled(struct xe_device *xe)
+{
+ return !xe->info.force_execlist;
+}
+
+#define for_each_tile(tile__, xe__, id__) \
+ for ((id__) = 0; (id__) < (xe__)->info.tile_count; (id__)++) \
+ for_each_if((tile__) = &(xe__)->tiles[(id__)])
+
+#define for_each_remote_tile(tile__, xe__, id__) \
+ for ((id__) = 1; (id__) < (xe__)->info.tile_count; (id__)++) \
+ for_each_if((tile__) = &(xe__)->tiles[(id__)])
+
+/*
+ * FIXME: This only works for now since multi-tile and standalone media
+ * happen to be mutually exclusive. Future platforms may change this...
+ */
+#define for_each_gt(gt__, xe__, id__) \
+ for ((id__) = 0; (id__) < (xe__)->info.gt_count; (id__)++) \
+ for_each_if((gt__) = xe_device_get_gt((xe__), (id__)))
+
+static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
+{
+ return &gt->mmio.fw;
+}
+
+void xe_device_mem_access_get(struct xe_device *xe);
+bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe);
+void xe_device_mem_access_put(struct xe_device *xe);
+
+void xe_device_assert_mem_access(struct xe_device *xe);
+bool xe_device_mem_access_ongoing(struct xe_device *xe);
+
+static inline bool xe_device_in_fault_mode(struct xe_device *xe)
+{
+ return xe->usm.num_vm_in_fault_mode != 0;
+}
+
+static inline bool xe_device_in_non_fault_mode(struct xe_device *xe)
+{
+ return xe->usm.num_vm_in_non_fault_mode != 0;
+}
+
+static inline bool xe_device_has_flat_ccs(struct xe_device *xe)
+{
+ return xe->info.has_flat_ccs;
+}
+
+static inline bool xe_device_has_sriov(struct xe_device *xe)
+{
+ return xe->info.has_sriov;
+}
+
+u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
new file mode 100644
index 000000000..99113a5a2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_device_sysfs.h"
+#include "xe_pm.h"
+
+/**
+ * DOC: Xe device sysfs
+ * Xe driver requires exposing certain tunable knobs controlled by user space for
+ * each graphics device. Considering this, we need to add sysfs attributes at device
+ * level granularity.
+ * These sysfs attributes will be available under pci device kobj directory.
+ *
+ * vram_d3cold_threshold - Report/change vram used threshold(in MB) below
+ * which vram save/restore is permissible during runtime D3cold entry/exit.
+ */
+
+static ssize_t
+vram_d3cold_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ int ret;
+
+ if (!xe)
+ return -EINVAL;
+
+ ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
+
+ return ret;
+}
+
+static ssize_t
+vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ u32 vram_d3cold_threshold;
+ int ret;
+
+ if (!xe)
+ return -EINVAL;
+
+ ret = kstrtou32(buff, 0, &vram_d3cold_threshold);
+ if (ret)
+ return ret;
+
+ drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold);
+
+ ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold);
+
+ return ret ?: count;
+}
+
+static DEVICE_ATTR_RW(vram_d3cold_threshold);
+
+static void xe_device_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+
+ sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+}
+
+void xe_device_sysfs_init(struct xe_device *xe)
+{
+ struct device *dev = xe->drm.dev;
+ int ret;
+
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (ret) {
+ drm_warn(&xe->drm, "Failed to create sysfs file\n");
+ return;
+ }
+
+ ret = drmm_add_action_or_reset(&xe->drm, xe_device_sysfs_fini, xe);
+ if (ret)
+ drm_warn(&xe->drm, "Failed to add sysfs fini drm action\n");
+}
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.h b/drivers/gpu/drm/xe/xe_device_sysfs.h
new file mode 100644
index 000000000..38b240684
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_DEVICE_SYSFS_H_
+#define _XE_DEVICE_SYSFS_H_
+
+struct xe_device;
+
+void xe_device_sysfs_init(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
new file mode 100644
index 000000000..e8491979a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -0,0 +1,537 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#ifndef _XE_DEVICE_TYPES_H_
+#define _XE_DEVICE_TYPES_H_
+
+#include <linux/pci.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/ttm/ttm_device.h>
+
+#include "xe_devcoredump_types.h"
+#include "xe_heci_gsc.h"
+#include "xe_gt_types.h"
+#include "xe_lmtt_types.h"
+#include "xe_platform_types.h"
+#include "xe_pt_types.h"
+#include "xe_sriov_types.h"
+#include "xe_step_types.h"
+
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+#include "soc/intel_pch.h"
+#include "intel_display_core.h"
+#include "intel_display_device.h"
+#endif
+
+struct xe_ggtt;
+struct xe_pat_ops;
+
+#define XE_BO_INVALID_OFFSET LONG_MAX
+
+#define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100)
+#define MEDIA_VER(xe) ((xe)->info.media_verx100 / 100)
+#define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
+#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
+#define IS_DGFX(xe) ((xe)->info.is_dgfx)
+#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
+
+#define XE_VRAM_FLAGS_NEED64K BIT(0)
+
+#define XE_GT0 0
+#define XE_GT1 1
+#define XE_MAX_TILES_PER_DEVICE (XE_GT1 + 1)
+
+#define XE_MAX_ASID (BIT(20))
+
+#define IS_PLATFORM_STEP(_xe, _platform, min_step, max_step) \
+ ((_xe)->info.platform == (_platform) && \
+ (_xe)->info.step.graphics >= (min_step) && \
+ (_xe)->info.step.graphics < (max_step))
+#define IS_SUBPLATFORM_STEP(_xe, _platform, sub, min_step, max_step) \
+ ((_xe)->info.platform == (_platform) && \
+ (_xe)->info.subplatform == (sub) && \
+ (_xe)->info.step.graphics >= (min_step) && \
+ (_xe)->info.step.graphics < (max_step))
+
+#define tile_to_xe(tile__) \
+ _Generic(tile__, \
+ const struct xe_tile * : (const struct xe_device *)((tile__)->xe), \
+ struct xe_tile * : (tile__)->xe)
+
+/**
+ * struct xe_mem_region - memory region structure
+ * This is used to describe a memory region in xe
+ * device, such as HBM memory or CXL extension memory.
+ */
+struct xe_mem_region {
+ /** @io_start: IO start address of this VRAM instance */
+ resource_size_t io_start;
+ /**
+ * @io_size: IO size of this VRAM instance
+ *
+ * This represents how much of this VRAM we can access
+ * via the CPU through the VRAM BAR. This can be smaller
+ * than @usable_size, in which case only part of VRAM is CPU
+ * accessible (typically the first 256M). This
+ * configuration is known as small-bar.
+ */
+ resource_size_t io_size;
+ /** @dpa_base: This memory regions's DPA (device physical address) base */
+ resource_size_t dpa_base;
+ /**
+ * @usable_size: usable size of VRAM
+ *
+ * Usable size of VRAM excluding reserved portions
+ * (e.g stolen mem)
+ */
+ resource_size_t usable_size;
+ /**
+ * @actual_physical_size: Actual VRAM size
+ *
+ * Actual VRAM size including reserved portions
+ * (e.g stolen mem)
+ */
+ resource_size_t actual_physical_size;
+ /** @mapping: pointer to VRAM mappable space */
+ void __iomem *mapping;
+};
+
+/**
+ * struct xe_tile - hardware tile structure
+ *
+ * From a driver perspective, a "tile" is effectively a complete GPU, containing
+ * an SGunit, 1-2 GTs, and (for discrete platforms) VRAM.
+ *
+ * Multi-tile platforms effectively bundle multiple GPUs behind a single PCI
+ * device and designate one "root" tile as being responsible for external PCI
+ * communication. PCI BAR0 exposes the GGTT and MMIO register space for each
+ * tile in a stacked layout, and PCI BAR2 exposes the local memory associated
+ * with each tile similarly. Device-wide interrupts can be enabled/disabled
+ * at the root tile, and the MSTR_TILE_INTR register will report which tiles
+ * have interrupts that need servicing.
+ */
+struct xe_tile {
+ /** @xe: Backpointer to tile's PCI device */
+ struct xe_device *xe;
+
+ /** @id: ID of the tile */
+ u8 id;
+
+ /**
+ * @primary_gt: Primary GT
+ */
+ struct xe_gt *primary_gt;
+
+ /**
+ * @media_gt: Media GT
+ *
+ * Only present on devices with media version >= 13.
+ */
+ struct xe_gt *media_gt;
+
+ /**
+ * @mmio: MMIO info for a tile.
+ *
+ * Each tile has its own 16MB space in BAR0, laid out as:
+ * * 0-4MB: registers
+ * * 4MB-8MB: reserved
+ * * 8MB-16MB: global GTT
+ */
+ struct {
+ /** @size: size of tile's MMIO space */
+ size_t size;
+
+ /** @regs: pointer to tile's MMIO space (starting with registers) */
+ void __iomem *regs;
+ } mmio;
+
+ /**
+ * @mmio_ext: MMIO-extension info for a tile.
+ *
+ * Each tile has its own additional 256MB (28-bit) MMIO-extension space.
+ */
+ struct {
+ /** @size: size of tile's additional MMIO-extension space */
+ size_t size;
+
+ /** @regs: pointer to tile's additional MMIO-extension space */
+ void __iomem *regs;
+ } mmio_ext;
+
+ /** @mem: memory management info for tile */
+ struct {
+ /**
+ * @vram: VRAM info for tile.
+ *
+ * Although VRAM is associated with a specific tile, it can
+ * still be accessed by all tiles' GTs.
+ */
+ struct xe_mem_region vram;
+
+ /** @vram_mgr: VRAM TTM manager */
+ struct xe_ttm_vram_mgr *vram_mgr;
+
+ /** @ggtt: Global graphics translation table */
+ struct xe_ggtt *ggtt;
+
+ /**
+ * @kernel_bb_pool: Pool from which batchbuffers are allocated.
+ *
+ * Media GT shares a pool with its primary GT.
+ */
+ struct xe_sa_manager *kernel_bb_pool;
+ } mem;
+
+ /** @sriov: tile level virtualization data */
+ union {
+ struct {
+ /** @sriov.pf.lmtt: Local Memory Translation Table. */
+ struct xe_lmtt lmtt;
+ } pf;
+ } sriov;
+
+ /** @migrate: Migration helper for vram blits and clearing */
+ struct xe_migrate *migrate;
+
+ /** @sysfs: sysfs' kobj used by xe_tile_sysfs */
+ struct kobject *sysfs;
+};
+
+/**
+ * struct xe_device - Top level struct of XE device
+ */
+struct xe_device {
+ /** @drm: drm device */
+ struct drm_device drm;
+
+ /** @devcoredump: device coredump */
+ struct xe_devcoredump devcoredump;
+
+ /** @info: device info */
+ struct intel_device_info {
+ /** @graphics_name: graphics IP name */
+ const char *graphics_name;
+ /** @media_name: media IP name */
+ const char *media_name;
+ /** @tile_mmio_ext_size: size of MMIO extension space, per-tile */
+ u32 tile_mmio_ext_size;
+ /** @graphics_verx100: graphics IP version */
+ u32 graphics_verx100;
+ /** @media_verx100: media IP version */
+ u32 media_verx100;
+ /** @mem_region_mask: mask of valid memory regions */
+ u32 mem_region_mask;
+ /** @platform: XE platform enum */
+ enum xe_platform platform;
+ /** @subplatform: XE subplatform enum */
+ enum xe_subplatform subplatform;
+ /** @devid: device ID */
+ u16 devid;
+ /** @revid: device revision */
+ u8 revid;
+ /** @step: stepping information for each IP */
+ struct xe_step_info step;
+ /** @dma_mask_size: DMA address bits */
+ u8 dma_mask_size;
+ /** @vram_flags: Vram flags */
+ u8 vram_flags;
+ /** @tile_count: Number of tiles */
+ u8 tile_count;
+ /** @gt_count: Total number of GTs for entire device */
+ u8 gt_count;
+ /** @vm_max_level: Max VM level */
+ u8 vm_max_level;
+ /** @va_bits: Maximum bits of a virtual address */
+ u8 va_bits;
+
+ /** @is_dgfx: is discrete device */
+ u8 is_dgfx:1;
+ /** @has_asid: Has address space ID */
+ u8 has_asid:1;
+ /** @force_execlist: Forced execlist submission */
+ u8 force_execlist:1;
+ /** @has_flat_ccs: Whether flat CCS metadata is used */
+ u8 has_flat_ccs:1;
+ /** @has_llc: Device has a shared CPU+GPU last level cache */
+ u8 has_llc:1;
+ /** @has_mmio_ext: Device has extra MMIO address range */
+ u8 has_mmio_ext:1;
+ /** @has_range_tlb_invalidation: Has range based TLB invalidations */
+ u8 has_range_tlb_invalidation:1;
+ /** @has_sriov: Supports SR-IOV */
+ u8 has_sriov:1;
+ /** @has_usm: Device has unified shared memory support */
+ u8 has_usm:1;
+ /** @enable_display: display enabled */
+ u8 enable_display:1;
+ /** @skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
+ u8 skip_mtcfg:1;
+ /** @skip_pcode: skip access to PCODE uC */
+ u8 skip_pcode:1;
+ /** @has_heci_gscfi: device has heci gscfi */
+ u8 has_heci_gscfi:1;
+ /** @skip_guc_pc: Skip GuC based PM feature init */
+ u8 skip_guc_pc:1;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+ struct {
+ u32 rawclk_freq;
+ } i915_runtime;
+#endif
+ } info;
+
+ /** @irq: device interrupt state */
+ struct {
+ /** @lock: lock for processing irq's on this device */
+ spinlock_t lock;
+
+ /** @enabled: interrupts enabled on this device */
+ bool enabled;
+ } irq;
+
+ /** @ttm: ttm device */
+ struct ttm_device ttm;
+
+ /** @mmio: mmio info for device */
+ struct {
+ /** @size: size of MMIO space for device */
+ size_t size;
+ /** @regs: pointer to MMIO space for device */
+ void __iomem *regs;
+ } mmio;
+
+ /** @mem: memory info for device */
+ struct {
+ /** @vram: VRAM info for device */
+ struct xe_mem_region vram;
+ /** @sys_mgr: system TTM manager */
+ struct ttm_resource_manager sys_mgr;
+ } mem;
+
+ /** @sriov: device level virtualization data */
+ struct {
+ /** @sriov.__mode: SR-IOV mode (Don't access directly!) */
+ enum xe_sriov_mode __mode;
+ } sriov;
+
+ /** @clients: drm clients info */
+ struct {
+ /** @lock: Protects drm clients info */
+ spinlock_t lock;
+
+ /** @count: number of drm clients */
+ u64 count;
+ } clients;
+
+ /** @usm: unified memory state */
+ struct {
+ /** @asid: convert a ASID to VM */
+ struct xarray asid_to_vm;
+ /** @next_asid: next ASID, used to cyclical alloc asids */
+ u32 next_asid;
+ /** @num_vm_in_fault_mode: number of VM in fault mode */
+ u32 num_vm_in_fault_mode;
+ /** @num_vm_in_non_fault_mode: number of VM in non-fault mode */
+ u32 num_vm_in_non_fault_mode;
+ /** @lock: protects UM state */
+ struct mutex lock;
+ } usm;
+
+ /** @pinned: pinned BO state */
+ struct {
+ /** @lock: protected pinned BO list state */
+ spinlock_t lock;
+ /** @evicted: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ /** @external_vram: pinned external BO in vram*/
+ struct list_head external_vram;
+ } pinned;
+
+ /** @ufence_wq: user fence wait queue */
+ wait_queue_head_t ufence_wq;
+
+ /** @ordered_wq: used to serialize compute mode resume */
+ struct workqueue_struct *ordered_wq;
+
+ /** @unordered_wq: used to serialize unordered work, mostly display */
+ struct workqueue_struct *unordered_wq;
+
+ /** @tiles: device tiles */
+ struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE];
+
+ /**
+ * @mem_access: keep track of memory access in the device, possibly
+ * triggering additional actions when they occur.
+ */
+ struct {
+ /** @ref: ref count of memory accesses */
+ atomic_t ref;
+ } mem_access;
+
+ /**
+ * @pat: Encapsulate PAT related stuff
+ */
+ struct {
+ /** Internal operations to abstract platforms */
+ const struct xe_pat_ops *ops;
+ /** PAT table to program in the HW */
+ const struct xe_pat_table_entry *table;
+ /** Number of PAT entries */
+ int n_entries;
+ u32 idx[__XE_CACHE_LEVEL_COUNT];
+ } pat;
+
+ /** @d3cold: Encapsulate d3cold related stuff */
+ struct {
+ /** capable: Indicates if root port is d3cold capable */
+ bool capable;
+
+ /** @allowed: Indicates if d3cold is a valid device state */
+ bool allowed;
+
+ /** @power_lost: Indicates if card has really lost power. */
+ bool power_lost;
+
+ /**
+ * @vram_threshold:
+ *
+ * This represents the permissible threshold(in megabytes)
+ * for vram save/restore. d3cold will be disallowed,
+ * when vram_usages is above or equals the threshold value
+ * to avoid the vram save/restore latency.
+ * Default threshold value is 300mb.
+ */
+ u32 vram_threshold;
+ /** @lock: protect vram_threshold */
+ struct mutex lock;
+ } d3cold;
+
+ /**
+ * @pm_callback_task: Track the active task that is running in either
+ * the runtime_suspend or runtime_resume callbacks.
+ */
+ struct task_struct *pm_callback_task;
+
+ /** @hwmon: hwmon subsystem integration */
+ struct xe_hwmon *hwmon;
+
+ /** @heci_gsc: graphics security controller */
+ struct xe_heci_gsc heci_gsc;
+
+ /** @needs_flr_on_fini: requests function-reset on fini */
+ bool needs_flr_on_fini;
+
+ /* private: */
+
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+ /*
+ * Any fields below this point are the ones used by display.
+ * They are temporarily added here so xe_device can be desguised as
+ * drm_i915_private during build. After cleanup these should go away,
+ * migrating to the right sub-structs
+ */
+ struct intel_display display;
+ enum intel_pch pch_type;
+ u16 pch_id;
+
+ struct dram_info {
+ bool wm_lv_0_adjust_needed;
+ u8 num_channels;
+ bool symmetric_memory;
+ enum intel_dram_type {
+ INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR3,
+ INTEL_DRAM_DDR4,
+ INTEL_DRAM_LPDDR3,
+ INTEL_DRAM_LPDDR4,
+ INTEL_DRAM_DDR5,
+ INTEL_DRAM_LPDDR5,
+ } type;
+ u8 num_qgv_points;
+ u8 num_psf_gv_points;
+ } dram_info;
+
+ /*
+ * edram size in MB.
+ * Cannot be determined by PCIID. You must always read a register.
+ */
+ u32 edram_size_mb;
+
+ /* To shut up runtime pm macros.. */
+ struct xe_runtime_pm {} runtime_pm;
+
+ /* For pcode */
+ struct mutex sb_lock;
+
+ /* Should be in struct intel_display */
+ u32 skl_preferred_vco_freq, max_dotclk_freq, hti_state;
+ u8 snps_phy_failed_calibration;
+ struct drm_atomic_state *modeset_restore_state;
+ struct list_head global_obj_list;
+
+ union {
+ /* only to allow build, not used functionally */
+ u32 irq_mask;
+ u32 de_irq_mask[I915_MAX_PIPES];
+ };
+ u32 pipestat_irq_mask[I915_MAX_PIPES];
+
+ bool display_irqs_enabled;
+ u32 enabled_irq_mask;
+
+ struct intel_uncore {
+ spinlock_t lock;
+ } uncore;
+
+ /* only to allow build, not used functionally */
+ struct {
+ unsigned int hpll_freq;
+ unsigned int czclk_freq;
+ unsigned int fsb_freq, mem_freq, is_ddr3;
+ u8 vblank_enabled;
+ };
+ struct {
+ const char *dmc_firmware_path;
+ } params;
+
+ void *pxp;
+#endif
+};
+
+/**
+ * struct xe_file - file handle for XE driver
+ */
+struct xe_file {
+ /** @xe: xe DEVICE **/
+ struct xe_device *xe;
+
+ /** @drm: base DRM file */
+ struct drm_file *drm;
+
+ /** @vm: VM state for file */
+ struct {
+ /** @xe: xarray to store VMs */
+ struct xarray xa;
+ /** @lock: protects file VM state */
+ struct mutex lock;
+ } vm;
+
+ /** @exec_queue: Submission exec queue state for file */
+ struct {
+ /** @xe: xarray to store engines */
+ struct xarray xa;
+ /** @lock: protects file engine state */
+ struct mutex lock;
+ } exec_queue;
+
+ /** @client: drm client */
+ struct xe_drm_client *client;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/xe_display.c
new file mode 100644
index 000000000..6ec375c1c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_display.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_display.h"
+#include "regs/xe_regs.h"
+
+#include <linux/fb.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/xe_drm.h>
+
+#include "soc/intel_dram.h"
+#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
+#include "intel_acpi.h"
+#include "intel_audio.h"
+#include "intel_bw.h"
+#include "intel_display.h"
+#include "intel_display_driver.h"
+#include "intel_display_irq.h"
+#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_dp.h"
+#include "intel_fbdev.h"
+#include "intel_hdcp.h"
+#include "intel_hotplug.h"
+#include "intel_opregion.h"
+#include "xe_module.h"
+
+/* Xe device functions */
+
+static bool has_display(struct xe_device *xe)
+{
+ return HAS_DISPLAY(xe);
+}
+
+/**
+ * xe_display_driver_probe_defer - Detect if we need to wait for other drivers
+ * early on
+ * @pdev: PCI device
+ *
+ * Returns: true if probe needs to be deferred, false otherwise
+ */
+bool xe_display_driver_probe_defer(struct pci_dev *pdev)
+{
+ if (!xe_modparam.enable_display)
+ return 0;
+
+ return intel_display_driver_probe_defer(pdev);
+}
+
+static void xe_display_last_close(struct drm_device *dev)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (xe->info.enable_display)
+ intel_fbdev_restore_mode(to_xe_device(dev));
+}
+
+/**
+ * xe_display_driver_set_hooks - Add driver flags and hooks for display
+ * @driver: DRM device driver
+ *
+ * Set features and function hooks in @driver that are needed for driving the
+ * display IP. This sets the driver's capability of driving display, regardless
+ * if the device has it enabled
+ */
+void xe_display_driver_set_hooks(struct drm_driver *driver)
+{
+ if (!xe_modparam.enable_display)
+ return;
+
+ driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
+ driver->lastclose = xe_display_last_close;
+}
+
+static void unset_display_features(struct xe_device *xe)
+{
+ xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+}
+
+static void display_destroy(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ destroy_workqueue(xe->display.hotplug.dp_wq);
+}
+
+/**
+ * xe_display_create - create display struct
+ * @xe: XE device instance
+ *
+ * Initialize all fields used by the display part.
+ *
+ * TODO: once everything can be inside a single struct, make the struct opaque
+ * to the rest of xe and return it to be xe->display.
+ *
+ * Returns: 0 on success
+ */
+int xe_display_create(struct xe_device *xe)
+{
+ int err;
+
+ spin_lock_init(&xe->display.fb_tracking.lock);
+
+ xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
+
+ drmm_mutex_init(&xe->drm, &xe->sb_lock);
+ xe->enabled_irq_mask = ~0;
+
+ err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (!xe->info.enable_display)
+ return;
+
+ intel_power_domains_cleanup(xe);
+}
+
+int xe_display_init_nommio(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return 0;
+
+ /* Fake uncore lock */
+ spin_lock_init(&xe->uncore.lock);
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(xe);
+
+ return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
+}
+
+static void xe_display_fini_noirq(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (!xe->info.enable_display)
+ return;
+
+ intel_display_driver_remove_noirq(xe);
+ intel_power_domains_driver_remove(xe);
+}
+
+int xe_display_init_noirq(struct xe_device *xe)
+{
+ int err;
+
+ if (!xe->info.enable_display)
+ return 0;
+
+ intel_display_driver_early_probe(xe);
+
+ /* Early display init.. */
+ intel_opregion_setup(xe);
+
+ /*
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
+ */
+ intel_dram_detect(xe);
+
+ intel_bw_init_hw(xe);
+
+ intel_display_device_info_runtime_init(xe);
+
+ err = intel_display_driver_probe_noirq(xe);
+ if (err)
+ return err;
+
+ return drmm_add_action_or_reset(&xe->drm, xe_display_fini_noirq, NULL);
+}
+
+static void xe_display_fini_noaccel(struct drm_device *dev, void *dummy)
+{
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (!xe->info.enable_display)
+ return;
+
+ intel_display_driver_remove_nogem(xe);
+}
+
+int xe_display_init_noaccel(struct xe_device *xe)
+{
+ int err;
+
+ if (!xe->info.enable_display)
+ return 0;
+
+ err = intel_display_driver_probe_nogem(xe);
+ if (err)
+ return err;
+
+ return drmm_add_action_or_reset(&xe->drm, xe_display_fini_noaccel, NULL);
+}
+
+int xe_display_init(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return 0;
+
+ return intel_display_driver_probe(xe);
+}
+
+void xe_display_fini(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ /* poll work can call into fbdev, hence clean that up afterwards */
+ intel_hpd_poll_fini(xe);
+ intel_fbdev_fini(xe);
+
+ intel_hdcp_component_fini(xe);
+ intel_audio_deinit(xe);
+}
+
+void xe_display_register(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ intel_display_driver_register(xe);
+ intel_register_dsm_handler();
+ intel_power_domains_enable(xe);
+}
+
+void xe_display_unregister(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ intel_unregister_dsm_handler();
+ intel_power_domains_disable(xe);
+ intel_display_driver_unregister(xe);
+}
+
+void xe_display_driver_remove(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ intel_display_driver_remove(xe);
+
+ intel_display_device_remove(xe);
+}
+
+/* IRQ-related functions */
+
+void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ if (master_ctl & DISPLAY_IRQ)
+ gen11_display_irq_handler(xe);
+}
+
+void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ if (gu_misc_iir & GU_MISC_GSE)
+ intel_opregion_asle_intr(xe);
+}
+
+void xe_display_irq_reset(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ gen11_display_irq_reset(xe);
+}
+
+void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ if (gt->info.id == XE_GT0)
+ gen11_de_irq_postinstall(xe);
+}
+
+static void intel_suspend_encoders(struct xe_device *xe)
+{
+ struct drm_device *dev = &xe->drm;
+ struct intel_encoder *encoder;
+
+ if (has_display(xe))
+ return;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_encoder(dev, encoder)
+ if (encoder->suspend)
+ encoder->suspend(encoder);
+ drm_modeset_unlock_all(dev);
+}
+
+static bool suspend_to_idle(void)
+{
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ return true;
+#endif
+ return false;
+}
+
+void xe_display_pm_suspend(struct xe_device *xe)
+{
+ bool s2idle = suspend_to_idle();
+ if (!xe->info.enable_display)
+ return;
+
+ /*
+ * We do a lot of poking in a lot of registers, make sure they work
+ * properly.
+ */
+ intel_power_domains_disable(xe);
+ if (has_display(xe))
+ drm_kms_helper_poll_disable(&xe->drm);
+
+ intel_display_driver_suspend(xe);
+
+ intel_dp_mst_suspend(xe);
+
+ intel_hpd_cancel_work(xe);
+
+ intel_suspend_encoders(xe);
+
+ intel_opregion_suspend(xe, s2idle ? PCI_D1 : PCI_D3cold);
+
+ intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+
+ intel_dmc_suspend(xe);
+}
+
+void xe_display_pm_suspend_late(struct xe_device *xe)
+{
+ bool s2idle = suspend_to_idle();
+ if (!xe->info.enable_display)
+ return;
+
+ intel_power_domains_suspend(xe, s2idle);
+
+ intel_display_power_suspend_late(xe);
+}
+
+void xe_display_pm_resume_early(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ intel_display_power_resume_early(xe);
+
+ intel_power_domains_resume(xe);
+}
+
+void xe_display_pm_resume(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ return;
+
+ intel_dmc_resume(xe);
+
+ if (has_display(xe))
+ drm_mode_config_reset(&xe->drm);
+
+ intel_display_driver_init_hw(xe);
+ intel_hpd_init(xe);
+
+ /* MST sideband requires HPD interrupts enabled */
+ intel_dp_mst_resume(xe);
+ intel_display_driver_resume(xe);
+
+ intel_hpd_poll_disable(xe);
+ if (has_display(xe))
+ drm_kms_helper_poll_enable(&xe->drm);
+
+ intel_opregion_resume(xe);
+
+ intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
+
+ intel_power_domains_enable(xe);
+}
+
+void xe_display_probe(struct xe_device *xe)
+{
+ if (!xe->info.enable_display)
+ goto no_display;
+
+ intel_display_device_probe(xe);
+
+ if (has_display(xe))
+ return;
+
+no_display:
+ xe->info.enable_display = false;
+ unset_display_features(xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_display.h b/drivers/gpu/drm/xe/xe_display.h
new file mode 100644
index 000000000..710e56180
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_display.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_DISPLAY_H_
+#define _XE_DISPLAY_H_
+
+#include "xe_device.h"
+
+struct drm_driver;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+
+bool xe_display_driver_probe_defer(struct pci_dev *pdev);
+void xe_display_driver_set_hooks(struct drm_driver *driver);
+void xe_display_driver_remove(struct xe_device *xe);
+
+int xe_display_create(struct xe_device *xe);
+
+void xe_display_probe(struct xe_device *xe);
+
+int xe_display_init_nommio(struct xe_device *xe);
+int xe_display_init_noirq(struct xe_device *xe);
+int xe_display_init_noaccel(struct xe_device *xe);
+int xe_display_init(struct xe_device *xe);
+void xe_display_fini(struct xe_device *xe);
+
+void xe_display_register(struct xe_device *xe);
+void xe_display_unregister(struct xe_device *xe);
+
+void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl);
+void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
+void xe_display_irq_reset(struct xe_device *xe);
+void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
+
+void xe_display_pm_suspend(struct xe_device *xe);
+void xe_display_pm_suspend_late(struct xe_device *xe);
+void xe_display_pm_resume_early(struct xe_device *xe);
+void xe_display_pm_resume(struct xe_device *xe);
+
+#else
+
+static inline int xe_display_driver_probe_defer(struct pci_dev *pdev) { return 0; }
+static inline void xe_display_driver_set_hooks(struct drm_driver *driver) { }
+static inline void xe_display_driver_remove(struct xe_device *xe) {}
+
+static inline int xe_display_create(struct xe_device *xe) { return 0; }
+
+static inline void xe_display_probe(struct xe_device *xe) { }
+
+static inline int xe_display_init_nommio(struct xe_device *xe) { return 0; }
+static inline int xe_display_init_noirq(struct xe_device *xe) { return 0; }
+static inline int xe_display_init_noaccel(struct xe_device *xe) { return 0; }
+static inline int xe_display_init(struct xe_device *xe) { return 0; }
+static inline void xe_display_fini(struct xe_device *xe) {}
+
+static inline void xe_display_register(struct xe_device *xe) {}
+static inline void xe_display_unregister(struct xe_device *xe) {}
+
+static inline void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) {}
+static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) {}
+static inline void xe_display_irq_reset(struct xe_device *xe) {}
+static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
+
+static inline void xe_display_pm_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
+static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
+static inline void xe_display_pm_resume(struct xe_device *xe) {}
+
+#endif /* CONFIG_DRM_XE_DISPLAY */
+#endif /* _XE_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
new file mode 100644
index 000000000..da2627ed6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_dma_buf.h"
+
+#include <kunit/test.h>
+#include <linux/dma-buf.h>
+#include <linux/pci-p2pdma.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_prime.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "tests/xe_test.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_ttm_vram_mgr.h"
+#include "xe_vm.h"
+
+MODULE_IMPORT_NS(DMA_BUF);
+
+static int xe_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+
+ if (attach->peer2peer &&
+ pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
+ attach->peer2peer = false;
+
+ if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
+ return -EOPNOTSUPP;
+
+ xe_device_mem_access_get(to_xe_device(obj->dev));
+ return 0;
+}
+
+static void xe_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+
+ xe_device_mem_access_put(to_xe_device(obj->dev));
+}
+
+static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ /*
+ * For now only support pinning in TT memory, for two reasons:
+ * 1) Avoid pinning in a placement not accessible to some importers.
+ * 2) Pinning in VRAM requires PIN accounting which is a to-do.
+ */
+ if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
+ drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
+ return -EINVAL;
+ }
+
+ ret = xe_bo_migrate(bo, XE_PL_TT);
+ if (ret) {
+ if (ret != -EINTR && ret != -ERESTARTSYS)
+ drm_dbg(&xe->drm,
+ "Failed migrating dma-buf to TT memory: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = xe_bo_pin_external(bo);
+ xe_assert(xe, !ret);
+
+ return 0;
+}
+
+static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
+ xe_bo_unpin_external(bo);
+}
+
+static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct dma_buf *dma_buf = attach->dmabuf;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct sg_table *sgt;
+ int r = 0;
+
+ if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!xe_bo_is_pinned(bo)) {
+ if (!attach->peer2peer)
+ r = xe_bo_migrate(bo, XE_PL_TT);
+ else
+ r = xe_bo_validate(bo, NULL, false);
+ if (r)
+ return ERR_PTR(r);
+ }
+
+ switch (bo->ttm.resource->mem_type) {
+ case XE_PL_TT:
+ sgt = drm_prime_pages_to_sg(obj->dev,
+ bo->ttm.ttm->pages,
+ bo->ttm.ttm->num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
+
+ if (dma_map_sgtable(attach->dev, sgt, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
+ break;
+
+ case XE_PL_VRAM0:
+ case XE_PL_VRAM1:
+ r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
+ bo->ttm.resource, 0,
+ bo->ttm.base.size, attach->dev,
+ dir, &sgt);
+ if (r)
+ return ERR_PTR(r);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ return sgt;
+
+error_free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(-EBUSY);
+}
+
+static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct dma_buf *dma_buf = attach->dmabuf;
+ struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
+
+ if (!xe_bo_is_vram(bo)) {
+ dma_unmap_sgtable(attach->dev, sgt, dir, 0);
+ sg_free_table(sgt);
+ kfree(sgt);
+ } else {
+ xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
+ }
+}
+
+static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction direction)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+ bool reads = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_FROM_DEVICE);
+
+ if (!reads)
+ return 0;
+
+ /* Can we do interruptible lock here? */
+ xe_bo_lock(bo, false);
+ (void)xe_bo_migrate(bo, XE_PL_TT);
+ xe_bo_unlock(bo);
+
+ return 0;
+}
+
+static const struct dma_buf_ops xe_dmabuf_ops = {
+ .attach = xe_dma_buf_attach,
+ .detach = xe_dma_buf_detach,
+ .pin = xe_dma_buf_pin,
+ .unpin = xe_dma_buf_unpin,
+ .map_dma_buf = xe_dma_buf_map,
+ .unmap_dma_buf = xe_dma_buf_unmap,
+ .release = drm_gem_dmabuf_release,
+ .begin_cpu_access = xe_dma_buf_begin_cpu_access,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct dma_buf *buf;
+
+ if (bo->vm)
+ return ERR_PTR(-EPERM);
+
+ buf = drm_gem_prime_export(obj, flags);
+ if (!IS_ERR(buf))
+ buf->ops = &xe_dmabuf_ops;
+
+ return buf;
+}
+
+static struct drm_gem_object *
+xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
+ struct dma_buf *dma_buf)
+{
+ struct dma_resv *resv = dma_buf->resv;
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_bo *bo;
+ int ret;
+
+ dma_resv_lock(resv, NULL);
+ bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
+ 0, /* Will require 1way or 2way for vm_bind */
+ ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ goto error;
+ }
+ dma_resv_unlock(resv);
+
+ return &bo->ttm.base;
+
+error:
+ dma_resv_unlock(resv);
+ return ERR_PTR(ret);
+}
+
+static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
+ XE_WARN_ON(xe_bo_evict(bo, false));
+}
+
+static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
+ .move_notify = xe_dma_buf_move_notify
+};
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+
+struct dma_buf_test_params {
+ struct xe_test_priv base;
+ const struct dma_buf_attach_ops *attach_ops;
+ bool force_different_devices;
+ u32 mem_mask;
+};
+
+#define to_dma_buf_test_params(_priv) \
+ container_of(_priv, struct dma_buf_test_params, base)
+#endif
+
+struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ XE_TEST_DECLARE(struct dma_buf_test_params *test =
+ to_dma_buf_test_params
+ (xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
+ const struct dma_buf_attach_ops *attach_ops;
+ struct dma_buf_attachment *attach;
+ struct drm_gem_object *obj;
+ struct xe_bo *bo;
+
+ if (dma_buf->ops == &xe_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev &&
+ !XE_TEST_ONLY(test && test->force_different_devices)) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ /*
+ * Don't publish the bo until we have a valid attachment, and a
+ * valid attachment needs the bo address. So pre-create a bo before
+ * creating the attachment and publish.
+ */
+ bo = xe_bo_alloc();
+ if (IS_ERR(bo))
+ return ERR_CAST(bo);
+
+ attach_ops = &xe_dma_buf_attach_ops;
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ if (test)
+ attach_ops = test->attach_ops;
+#endif
+
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
+ if (IS_ERR(attach)) {
+ obj = ERR_CAST(attach);
+ goto out_err;
+ }
+
+ /* Errors here will take care of freeing the bo. */
+ obj = xe_dma_buf_init_obj(dev, bo, dma_buf);
+ if (IS_ERR(obj))
+ return obj;
+
+
+ get_dma_buf(dma_buf);
+ obj->import_attach = attach;
+ return obj;
+
+out_err:
+ xe_bo_free(bo);
+
+ return obj;
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_dma_buf.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.h b/drivers/gpu/drm/xe/xe_dma_buf.h
new file mode 100644
index 000000000..861dd28a8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dma_buf.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_DMA_BUF_H_
+#define _XE_DMA_BUF_H_
+
+#include <drm/drm_gem.h>
+
+struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags);
+struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
new file mode 100644
index 000000000..6040e4d22
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+#include <drm/xe_drm.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "xe_bo.h"
+#include "xe_bo_types.h"
+#include "xe_device_types.h"
+#include "xe_drm_client.h"
+#include "xe_trace.h"
+
+/**
+ * xe_drm_client_alloc() - Allocate drm client
+ * @void: No arg
+ *
+ * Allocate drm client struct to track client memory against
+ * same till client life. Call this API whenever new client
+ * has opened xe device.
+ *
+ * Return: pointer to client struct or NULL if can't allocate
+ */
+struct xe_drm_client *xe_drm_client_alloc(void)
+{
+ struct xe_drm_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ kref_init(&client->kref);
+
+#ifdef CONFIG_PROC_FS
+ spin_lock_init(&client->bos_lock);
+ INIT_LIST_HEAD(&client->bos_list);
+#endif
+ return client;
+}
+
+/**
+ * __xe_drm_client_free() - Free client struct
+ * @kref: The reference
+ *
+ * This frees client struct. Call this API when xe device is closed
+ * by drm client.
+ *
+ * Return: void
+ */
+void __xe_drm_client_free(struct kref *kref)
+{
+ struct xe_drm_client *client =
+ container_of(kref, typeof(*client), kref);
+
+ kfree(client);
+}
+
+#ifdef CONFIG_PROC_FS
+/**
+ * xe_drm_client_add_bo() - Add BO for tracking client mem usage
+ * @client: The drm client ptr
+ * @bo: The xe BO ptr
+ *
+ * Add all BO created by individual drm client by calling this function.
+ * This helps in tracking client memory usage.
+ *
+ * Return: void
+ */
+void xe_drm_client_add_bo(struct xe_drm_client *client,
+ struct xe_bo *bo)
+{
+ XE_WARN_ON(bo->client);
+ XE_WARN_ON(!list_empty(&bo->client_link));
+
+ spin_lock(&client->bos_lock);
+ bo->client = xe_drm_client_get(client);
+ list_add_tail_rcu(&bo->client_link, &client->bos_list);
+ spin_unlock(&client->bos_lock);
+}
+
+/**
+ * xe_drm_client_remove_bo() - Remove BO for tracking client mem usage
+ * @bo: The xe BO ptr
+ *
+ * Remove all BO removed by individual drm client by calling this function.
+ * This helps in tracking client memory usage.
+ *
+ * Return: void
+ */
+void xe_drm_client_remove_bo(struct xe_bo *bo)
+{
+ struct xe_drm_client *client = bo->client;
+
+ spin_lock(&client->bos_lock);
+ list_del_rcu(&bo->client_link);
+ spin_unlock(&client->bos_lock);
+
+ xe_drm_client_put(client);
+}
+
+static void bo_meminfo(struct xe_bo *bo,
+ struct drm_memory_stats stats[TTM_NUM_MEM_TYPES])
+{
+ u64 sz = bo->size;
+ u32 mem_type;
+
+ if (bo->placement.placement)
+ mem_type = bo->placement.placement->mem_type;
+ else
+ mem_type = XE_PL_TT;
+
+ if (bo->ttm.base.handle_count > 1)
+ stats[mem_type].shared += sz;
+ else
+ stats[mem_type].private += sz;
+
+ if (xe_bo_has_pages(bo)) {
+ stats[mem_type].resident += sz;
+
+ if (!dma_resv_test_signaled(bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
+ stats[mem_type].active += sz;
+ else if (mem_type == XE_PL_SYSTEM)
+ stats[mem_type].purgeable += sz;
+ }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
+ struct xe_file *xef = file->driver_priv;
+ struct ttm_device *bdev = &xef->xe->ttm;
+ struct ttm_resource_manager *man;
+ struct xe_drm_client *client;
+ struct drm_gem_object *obj;
+ struct xe_bo *bo;
+ unsigned int id;
+ u32 mem_type;
+
+ client = xef->client;
+
+ /* Public objects. */
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, obj, id) {
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
+ bo_meminfo(bo, stats);
+ }
+ spin_unlock(&file->table_lock);
+
+ /* Internal objects. */
+ spin_lock(&client->bos_lock);
+ list_for_each_entry_rcu(bo, &client->bos_list, client_link) {
+ if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
+ continue;
+ bo_meminfo(bo, stats);
+ xe_bo_put(bo);
+ }
+ spin_unlock(&client->bos_lock);
+
+ for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
+ if (!xe_mem_type_to_name[mem_type])
+ continue;
+
+ man = ttm_manager_type(bdev, mem_type);
+
+ if (man) {
+ drm_print_memory_stats(p,
+ &stats[mem_type],
+ DRM_GEM_OBJECT_RESIDENT |
+ (mem_type != XE_PL_SYSTEM ? 0 :
+ DRM_GEM_OBJECT_PURGEABLE),
+ xe_mem_type_to_name[mem_type]);
+ }
+ }
+}
+
+/**
+ * xe_drm_client_fdinfo() - Callback for fdinfo interface
+ * @p: The drm_printer ptr
+ * @file: The drm_file ptr
+ *
+ * This is callabck for drm fdinfo interface. Register this callback
+ * in drm driver ops for show_fdinfo.
+ *
+ * Return: void
+ */
+void xe_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ show_meminfo(p, file);
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_drm_client.h b/drivers/gpu/drm/xe/xe_drm_client.h
new file mode 100644
index 000000000..a9649aa36
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_drm_client.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_DRM_CLIENT_H_
+#define _XE_DRM_CLIENT_H_
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/pid.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+struct drm_file;
+struct drm_printer;
+struct xe_bo;
+
+struct xe_drm_client {
+ struct kref kref;
+ unsigned int id;
+#ifdef CONFIG_PROC_FS
+ /**
+ * @bos_lock: lock protecting @bos_list
+ */
+ spinlock_t bos_lock;
+ /**
+ * @bos_list: list of bos created by this client
+ *
+ * Protected by @bos_lock.
+ */
+ struct list_head bos_list;
+#endif
+};
+
+ static inline struct xe_drm_client *
+xe_drm_client_get(struct xe_drm_client *client)
+{
+ kref_get(&client->kref);
+ return client;
+}
+
+void __xe_drm_client_free(struct kref *kref);
+
+static inline void xe_drm_client_put(struct xe_drm_client *client)
+{
+ kref_put(&client->kref, __xe_drm_client_free);
+}
+
+struct xe_drm_client *xe_drm_client_alloc(void);
+static inline struct xe_drm_client *
+xe_drm_client_get(struct xe_drm_client *client);
+static inline void xe_drm_client_put(struct xe_drm_client *client);
+#ifdef CONFIG_PROC_FS
+void xe_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
+void xe_drm_client_add_bo(struct xe_drm_client *client,
+ struct xe_bo *bo);
+void xe_drm_client_remove_bo(struct xe_bo *bo);
+#else
+static inline void xe_drm_client_add_bo(struct xe_drm_client *client,
+ struct xe_bo *bo)
+{
+}
+
+static inline void xe_drm_client_remove_bo(struct xe_bo *bo)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_drv.h b/drivers/gpu/drm/xe/xe_drv.h
new file mode 100644
index 000000000..d45b71426
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_drv.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_DRV_H_
+#define _XE_DRV_H_
+
+#include <drm/drm_drv.h>
+
+#define DRIVER_NAME "xe"
+#define DRIVER_DESC "Intel Xe Graphics"
+#define DRIVER_DATE "20201103"
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ */
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
new file mode 100644
index 000000000..2cd4ad2fc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_exec.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_file.h>
+#include <drm/xe_drm.h>
+#include <linux/delay.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_macros.h"
+#include "xe_ring_ops_types.h"
+#include "xe_sched_job.h"
+#include "xe_sync.h"
+#include "xe_vm.h"
+
+/**
+ * DOC: Execbuf (User GPU command submission)
+ *
+ * Execs have historically been rather complicated in DRM drivers (at least in
+ * the i915) because a few things:
+ *
+ * - Passing in a list BO which are read / written to creating implicit syncs
+ * - Binding at exec time
+ * - Flow controlling the ring at exec time
+ *
+ * In XE we avoid all of this complication by not allowing a BO list to be
+ * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
+ * seperate operations, and using the DRM scheduler to flow control the ring.
+ * Let's deep dive on each of these.
+ *
+ * We can get away from a BO list by forcing the user to use in / out fences on
+ * every exec rather than the kernel tracking dependencies of BO (e.g. if the
+ * user knows an exec writes to a BO and reads from the BO in the next exec, it
+ * is the user's responsibility to pass in / out fence between the two execs).
+ *
+ * Implicit dependencies for external BOs are handled by using the dma-buf
+ * implicit dependency uAPI (TODO: add link). To make this works each exec must
+ * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
+ * BO mapped in the VM.
+ *
+ * We do not allow a user to trigger a bind at exec time rather we have a VM
+ * bind IOCTL which uses the same in / out fence interface as exec. In that
+ * sense, a VM bind is basically the same operation as an exec from the user
+ * perspective. e.g. If an exec depends on a VM bind use the in / out fence
+ * interface (struct drm_xe_sync) to synchronize like syncing between two
+ * dependent execs.
+ *
+ * Although a user cannot trigger a bind, we still have to rebind userptrs in
+ * the VM that have been invalidated since the last exec, likewise we also have
+ * to rebind BOs that have been evicted by the kernel. We schedule these rebinds
+ * behind any pending kernel operations on any external BOs in VM or any BOs
+ * private to the VM. This is accomplished by the rebinds waiting on BOs
+ * DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
+ * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
+ * in DMA_RESV_USAGE_WRITE for external BOs).
+ *
+ * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
+ * mode VMs we use preempt fences and a rebind worker (TODO: add link).
+ *
+ * There is no need to flow control the ring in the exec as we write the ring at
+ * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
+ * MAX_JOB_SIZE. The DRM scheduler will then hold all jobs until space in the
+ * ring is available.
+ *
+ * All of this results in a rather simple exec implementation.
+ *
+ * Flow
+ * ~~~~
+ *
+ * .. code-block::
+ *
+ * Parse input arguments
+ * Wait for any async VM bind passed as in-fences to start
+ * <----------------------------------------------------------------------|
+ * Lock global VM lock in read mode |
+ * Pin userptrs (also finds userptr invalidated since last exec) |
+ * Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
+ * Validate BOs that have been evicted |
+ * Create job |
+ * Rebind invalidated userptrs + evicted BOs (non-compute-mode) |
+ * Add rebind fence dependency to job |
+ * Add job VM dma-resv bookkeeping slot (non-compute mode) |
+ * Add job to external BOs dma-resv write slots (non-compute mode) |
+ * Check if any userptrs invalidated since pin ------ Drop locks ---------|
+ * Install in / out fences for job
+ * Submit job
+ * Unlock all
+ */
+
+static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
+{
+ return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+}
+
+int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_exec *args = data;
+ struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
+ u64 __user *addresses_user = u64_to_user_ptr(args->address);
+ struct xe_exec_queue *q;
+ struct xe_sync_entry *syncs = NULL;
+ u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
+ struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
+ struct drm_exec *exec = &vm_exec.exec;
+ u32 i, num_syncs = 0, num_ufence = 0;
+ struct xe_sched_job *job;
+ struct xe_vm *vm;
+ bool write_locked, skip_retry = false;
+ ktime_t end = 0;
+ int err = 0;
+
+ if (XE_IOCTL_DBG(xe, args->extensions) ||
+ XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+ if (XE_IOCTL_DBG(xe, !q))
+ return -ENOENT;
+
+ if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->num_batch_buffer &&
+ q->width != args->num_batch_buffer))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
+ err = -ECANCELED;
+ goto err_exec_queue;
+ }
+
+ if (args->num_syncs) {
+ syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
+ if (!syncs) {
+ err = -ENOMEM;
+ goto err_exec_queue;
+ }
+ }
+
+ vm = q->vm;
+
+ for (i = 0; i < args->num_syncs; i++) {
+ err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
+ &syncs_user[i], SYNC_PARSE_FLAG_EXEC |
+ (xe_vm_in_lr_mode(vm) ?
+ SYNC_PARSE_FLAG_LR_MODE : 0));
+ if (err)
+ goto err_syncs;
+
+ if (xe_sync_is_ufence(&syncs[i]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+ err = -EINVAL;
+ goto err_syncs;
+ }
+
+ if (xe_exec_queue_is_parallel(q)) {
+ err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
+ q->width);
+ if (err) {
+ err = -EFAULT;
+ goto err_syncs;
+ }
+ }
+
+retry:
+ if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) {
+ err = down_write_killable(&vm->lock);
+ write_locked = true;
+ } else {
+ /* We don't allow execs while the VM is in error state */
+ err = down_read_interruptible(&vm->lock);
+ write_locked = false;
+ }
+ if (err)
+ goto err_syncs;
+
+ if (write_locked) {
+ err = xe_vm_userptr_pin(vm);
+ downgrade_write(&vm->lock);
+ write_locked = false;
+ if (err)
+ goto err_unlock_list;
+ }
+
+ if (!args->num_batch_buffer) {
+ err = xe_vm_lock(vm, true);
+ if (err)
+ goto err_unlock_list;
+
+ if (!xe_vm_in_lr_mode(vm)) {
+ struct dma_fence *fence;
+
+ fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto err_unlock_list;
+ }
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_exec_queue_last_fence_set(q, vm, fence);
+ dma_fence_put(fence);
+ }
+
+ xe_vm_unlock(vm);
+ goto err_unlock_list;
+ }
+
+ vm_exec.vm = &vm->gpuvm;
+ vm_exec.num_fences = 1 + vm->xe->info.tile_count;
+ vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ if (xe_vm_in_lr_mode(vm)) {
+ drm_exec_init(exec, vm_exec.flags, 0);
+ } else {
+ err = drm_gpuvm_exec_lock(&vm_exec);
+ if (err) {
+ if (xe_vm_validate_should_retry(exec, err, &end))
+ err = -EAGAIN;
+ goto err_unlock_list;
+ }
+ }
+
+ if (xe_vm_is_closed_or_banned(q->vm)) {
+ drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
+ err = -ECANCELED;
+ goto err_exec;
+ }
+
+ if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
+ err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
+ skip_retry = true;
+ goto err_exec;
+ }
+
+ job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
+ addresses : &args->address);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err_exec;
+ }
+
+ /*
+ * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
+ * VM mode only.
+ */
+ err = xe_vm_rebind(vm, false);
+ if (err)
+ goto err_put_job;
+
+ /* Wait behind rebinds */
+ if (!xe_vm_in_lr_mode(vm)) {
+ err = drm_sched_job_add_resv_dependencies(&job->drm,
+ xe_vm_resv(vm),
+ DMA_RESV_USAGE_KERNEL);
+ if (err)
+ goto err_put_job;
+ }
+
+ for (i = 0; i < num_syncs && !err; i++)
+ err = xe_sync_entry_add_deps(&syncs[i], job);
+ if (err)
+ goto err_put_job;
+
+ if (!xe_vm_in_lr_mode(vm)) {
+ err = xe_sched_job_last_fence_add_dep(job, vm);
+ if (err)
+ goto err_put_job;
+
+ err = down_read_interruptible(&vm->userptr.notifier_lock);
+ if (err)
+ goto err_put_job;
+
+ err = __xe_vm_userptr_needs_repin(vm);
+ if (err)
+ goto err_repin;
+ }
+
+ /*
+ * Point of no return, if we error after this point just set an error on
+ * the job and let the DRM scheduler / backend clean up the job.
+ */
+ xe_sched_job_arm(job);
+ if (!xe_vm_in_lr_mode(vm))
+ drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
+ DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
+
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], job,
+ &job->drm.s_fence->finished);
+
+ if (xe_exec_queue_is_lr(q))
+ q->ring_ops->emit_job(job);
+ if (!xe_vm_in_lr_mode(vm))
+ xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+ xe_vm_reactivate_rebind(vm);
+
+ if (!err && !xe_vm_in_lr_mode(vm)) {
+ spin_lock(&xe->ttm.lru_lock);
+ ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
+ spin_unlock(&xe->ttm.lru_lock);
+ }
+
+err_repin:
+ if (!xe_vm_in_lr_mode(vm))
+ up_read(&vm->userptr.notifier_lock);
+err_put_job:
+ if (err)
+ xe_sched_job_put(job);
+err_exec:
+ drm_exec_fini(exec);
+err_unlock_list:
+ if (write_locked)
+ up_write(&vm->lock);
+ else
+ up_read(&vm->lock);
+ if (err == -EAGAIN && !skip_retry)
+ goto retry;
+err_syncs:
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_cleanup(&syncs[i]);
+ kfree(syncs);
+err_exec_queue:
+ xe_exec_queue_put(q);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_exec.h b/drivers/gpu/drm/xe/xe_exec.h
new file mode 100644
index 000000000..e4932494c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_exec.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_EXEC_H_
+#define _XE_EXEC_H_
+
+struct drm_device;
+struct drm_file;
+
+int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
new file mode 100644
index 000000000..5093c56d6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -0,0 +1,862 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_exec_queue.h"
+
+#include <linux/nospec.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/xe_drm.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_hw_engine_class_sysfs.h"
+#include "xe_hw_fence.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_migrate.h"
+#include "xe_pm.h"
+#include "xe_ring_ops_types.h"
+#include "xe_trace.h"
+#include "xe_vm.h"
+
+enum xe_exec_queue_sched_prop {
+ XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
+ XE_EXEC_QUEUE_TIMESLICE = 1,
+ XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
+ XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
+};
+
+static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
+ struct xe_vm *vm,
+ u32 logical_mask,
+ u16 width, struct xe_hw_engine *hwe,
+ u32 flags)
+{
+ struct xe_exec_queue *q;
+ struct xe_gt *gt = hwe->gt;
+ int err;
+ int i;
+
+ /* only kernel queues can be permanent */
+ XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
+
+ q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
+ if (!q)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&q->refcount);
+ q->flags = flags;
+ q->hwe = hwe;
+ q->gt = gt;
+ if (vm)
+ q->vm = xe_vm_get(vm);
+ q->class = hwe->class;
+ q->width = width;
+ q->logical_mask = logical_mask;
+ q->fence_irq = &gt->fence_irq[hwe->class];
+ q->ring_ops = gt->ring_ops[hwe->class];
+ q->ops = gt->exec_queue_ops;
+ INIT_LIST_HEAD(&q->compute.link);
+ INIT_LIST_HEAD(&q->multi_gt_link);
+
+ q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
+ q->sched_props.preempt_timeout_us =
+ hwe->eclass->sched_props.preempt_timeout_us;
+ q->sched_props.job_timeout_ms =
+ hwe->eclass->sched_props.job_timeout_ms;
+ if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
+ q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
+ q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
+ else
+ q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
+
+ if (xe_exec_queue_is_parallel(q)) {
+ q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
+ q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
+ }
+ if (q->flags & EXEC_QUEUE_FLAG_VM) {
+ q->bind.fence_ctx = dma_fence_context_alloc(1);
+ q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
+ }
+
+ for (i = 0; i < width; ++i) {
+ err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
+ if (err)
+ goto err_lrc;
+ }
+
+ err = q->ops->init(q);
+ if (err)
+ goto err_lrc;
+
+ /*
+ * Normally the user vm holds an rpm ref to keep the device
+ * awake, and the context holds a ref for the vm, however for
+ * some engines we use the kernels migrate vm underneath which offers no
+ * such rpm ref, or we lack a vm. Make sure we keep a ref here, so we
+ * can perform GuC CT actions when needed. Caller is expected to have
+ * already grabbed the rpm ref outside any sensitive locks.
+ */
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm))
+ drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
+
+ return q;
+
+err_lrc:
+ for (i = i - 1; i >= 0; --i)
+ xe_lrc_finish(q->lrc + i);
+ kfree(q);
+ return ERR_PTR(err);
+}
+
+struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
+ u32 logical_mask, u16 width,
+ struct xe_hw_engine *hwe, u32 flags)
+{
+ struct xe_exec_queue *q;
+ int err;
+
+ if (vm) {
+ err = xe_vm_lock(vm, true);
+ if (err)
+ return ERR_PTR(err);
+ }
+ q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
+ if (vm)
+ xe_vm_unlock(vm);
+
+ return q;
+}
+
+struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
+ struct xe_vm *vm,
+ enum xe_engine_class class, u32 flags)
+{
+ struct xe_hw_engine *hwe, *hwe0 = NULL;
+ enum xe_hw_engine_id id;
+ u32 logical_mask = 0;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (xe_hw_engine_is_reserved(hwe))
+ continue;
+
+ if (hwe->class == class) {
+ logical_mask |= BIT(hwe->logical_instance);
+ if (!hwe0)
+ hwe0 = hwe;
+ }
+ }
+
+ if (!logical_mask)
+ return ERR_PTR(-ENODEV);
+
+ return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
+}
+
+void xe_exec_queue_destroy(struct kref *ref)
+{
+ struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
+ struct xe_exec_queue *eq, *next;
+
+ xe_exec_queue_last_fence_put_unlocked(q);
+ if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
+ list_for_each_entry_safe(eq, next, &q->multi_gt_list,
+ multi_gt_link)
+ xe_exec_queue_put(eq);
+ }
+
+ q->ops->fini(q);
+}
+
+void xe_exec_queue_fini(struct xe_exec_queue *q)
+{
+ int i;
+
+ for (i = 0; i < q->width; ++i)
+ xe_lrc_finish(q->lrc + i);
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
+ xe_device_mem_access_put(gt_to_xe(q->gt));
+ if (q->vm)
+ xe_vm_put(q->vm);
+
+ kfree(q);
+}
+
+void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
+{
+ switch (q->class) {
+ case XE_ENGINE_CLASS_RENDER:
+ sprintf(q->name, "rcs%d", instance);
+ break;
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ sprintf(q->name, "vcs%d", instance);
+ break;
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ sprintf(q->name, "vecs%d", instance);
+ break;
+ case XE_ENGINE_CLASS_COPY:
+ sprintf(q->name, "bcs%d", instance);
+ break;
+ case XE_ENGINE_CLASS_COMPUTE:
+ sprintf(q->name, "ccs%d", instance);
+ break;
+ case XE_ENGINE_CLASS_OTHER:
+ sprintf(q->name, "gsccs%d", instance);
+ break;
+ default:
+ XE_WARN_ON(q->class);
+ }
+}
+
+struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
+{
+ struct xe_exec_queue *q;
+
+ mutex_lock(&xef->exec_queue.lock);
+ q = xa_load(&xef->exec_queue.xa, id);
+ if (q)
+ xe_exec_queue_get(q);
+ mutex_unlock(&xef->exec_queue.lock);
+
+ return q;
+}
+
+enum xe_exec_queue_priority
+xe_exec_queue_device_get_max_priority(struct xe_device *xe)
+{
+ return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
+ XE_EXEC_QUEUE_PRIORITY_NORMAL;
+}
+
+static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
+ u64 value, bool create)
+{
+ if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
+ return -EPERM;
+
+ return q->ops->set_priority(q, value);
+}
+
+static bool xe_exec_queue_enforce_schedule_limit(void)
+{
+#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
+ return true;
+#else
+ return !capable(CAP_SYS_NICE);
+#endif
+}
+
+static void
+xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
+ enum xe_exec_queue_sched_prop prop,
+ u32 *min, u32 *max)
+{
+ switch (prop) {
+ case XE_EXEC_QUEUE_JOB_TIMEOUT:
+ *min = eclass->sched_props.job_timeout_min;
+ *max = eclass->sched_props.job_timeout_max;
+ break;
+ case XE_EXEC_QUEUE_TIMESLICE:
+ *min = eclass->sched_props.timeslice_min;
+ *max = eclass->sched_props.timeslice_max;
+ break;
+ case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
+ *min = eclass->sched_props.preempt_timeout_min;
+ *max = eclass->sched_props.preempt_timeout_max;
+ break;
+ default:
+ break;
+ }
+#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
+ if (capable(CAP_SYS_NICE)) {
+ switch (prop) {
+ case XE_EXEC_QUEUE_JOB_TIMEOUT:
+ *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
+ *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
+ break;
+ case XE_EXEC_QUEUE_TIMESLICE:
+ *min = XE_HW_ENGINE_TIMESLICE_MIN;
+ *max = XE_HW_ENGINE_TIMESLICE_MAX;
+ break;
+ case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
+ *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
+ *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
+ break;
+ default:
+ break;
+ }
+ }
+#endif
+}
+
+static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
+ u64 value, bool create)
+{
+ u32 min = 0, max = 0;
+
+ xe_exec_queue_get_prop_minmax(q->hwe->eclass,
+ XE_EXEC_QUEUE_TIMESLICE, &min, &max);
+
+ if (xe_exec_queue_enforce_schedule_limit() &&
+ !xe_hw_engine_timeout_in_range(value, min, max))
+ return -EINVAL;
+
+ return q->ops->set_timeslice(q, value);
+}
+
+typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
+ struct xe_exec_queue *q,
+ u64 value, bool create);
+
+static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
+ [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
+ [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
+};
+
+static int exec_queue_user_ext_set_property(struct xe_device *xe,
+ struct xe_exec_queue *q,
+ u64 extension,
+ bool create)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.property >=
+ ARRAY_SIZE(exec_queue_set_property_funcs)) ||
+ XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
+ if (!exec_queue_set_property_funcs[idx])
+ return -EINVAL;
+
+ return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
+}
+
+typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
+ struct xe_exec_queue *q,
+ u64 extension,
+ bool create);
+
+static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
+ [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
+};
+
+#define MAX_USER_EXTENSIONS 16
+static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
+ u64 extensions, int ext_number, bool create)
+{
+ u64 __user *address = u64_to_user_ptr(extensions);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.name >=
+ ARRAY_SIZE(exec_queue_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name,
+ ARRAY_SIZE(exec_queue_user_extension_funcs));
+ err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
+ if (XE_IOCTL_DBG(xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return exec_queue_user_extensions(xe, q, ext.next_extension,
+ ++ext_number, create);
+
+ return 0;
+}
+
+static const enum xe_engine_class user_to_xe_engine_class[] = {
+ [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
+ [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
+ [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
+ [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
+};
+
+static struct xe_hw_engine *
+find_hw_engine(struct xe_device *xe,
+ struct drm_xe_engine_class_instance eci)
+{
+ u32 idx;
+
+ if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
+ return NULL;
+
+ if (eci.gt_id >= xe->info.gt_count)
+ return NULL;
+
+ idx = array_index_nospec(eci.engine_class,
+ ARRAY_SIZE(user_to_xe_engine_class));
+
+ return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
+ user_to_xe_engine_class[idx],
+ eci.engine_instance, true);
+}
+
+static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
+ struct drm_xe_engine_class_instance *eci,
+ u16 width, u16 num_placements)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ u32 logical_mask = 0;
+
+ if (XE_IOCTL_DBG(xe, width != 1))
+ return 0;
+ if (XE_IOCTL_DBG(xe, num_placements != 1))
+ return 0;
+ if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
+ return 0;
+
+ eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (xe_hw_engine_is_reserved(hwe))
+ continue;
+
+ if (hwe->class ==
+ user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
+ logical_mask |= BIT(hwe->logical_instance);
+ }
+
+ return logical_mask;
+}
+
+static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
+ struct drm_xe_engine_class_instance *eci,
+ u16 width, u16 num_placements)
+{
+ int len = width * num_placements;
+ int i, j, n;
+ u16 class;
+ u16 gt_id;
+ u32 return_mask = 0, prev_mask;
+
+ if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
+ len > 1))
+ return 0;
+
+ for (i = 0; i < width; ++i) {
+ u32 current_mask = 0;
+
+ for (j = 0; j < num_placements; ++j) {
+ struct xe_hw_engine *hwe;
+
+ n = j * width + i;
+
+ hwe = find_hw_engine(xe, eci[n]);
+ if (XE_IOCTL_DBG(xe, !hwe))
+ return 0;
+
+ if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
+ return 0;
+
+ if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
+ XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
+ return 0;
+
+ class = eci[n].engine_class;
+ gt_id = eci[n].gt_id;
+
+ if (width == 1 || !i)
+ return_mask |= BIT(eci[n].engine_instance);
+ current_mask |= BIT(eci[n].engine_instance);
+ }
+
+ /* Parallel submissions must be logically contiguous */
+ if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
+ return 0;
+
+ prev_mask = current_mask;
+ }
+
+ return return_mask;
+}
+
+int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_exec_queue_create *args = data;
+ struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
+ struct drm_xe_engine_class_instance __user *user_eci =
+ u64_to_user_ptr(args->instances);
+ struct xe_hw_engine *hwe;
+ struct xe_vm *vm, *migrate_vm;
+ struct xe_gt *gt;
+ struct xe_exec_queue *q = NULL;
+ u32 logical_mask;
+ u32 id;
+ u32 len;
+ int err;
+
+ if (XE_IOCTL_DBG(xe, args->flags) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ len = args->width * args->num_placements;
+ if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
+ return -EINVAL;
+
+ err = __copy_from_user(eci, user_eci,
+ sizeof(struct drm_xe_engine_class_instance) *
+ len);
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
+ return -EINVAL;
+
+ if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
+ for_each_gt(gt, xe, id) {
+ struct xe_exec_queue *new;
+
+ if (xe_gt_is_media_type(gt))
+ continue;
+
+ eci[0].gt_id = gt->info.id;
+ logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
+ args->width,
+ args->num_placements);
+ if (XE_IOCTL_DBG(xe, !logical_mask))
+ return -EINVAL;
+
+ hwe = find_hw_engine(xe, eci[0]);
+ if (XE_IOCTL_DBG(xe, !hwe))
+ return -EINVAL;
+
+ /* The migration vm doesn't hold rpm ref */
+ xe_device_mem_access_get(xe);
+
+ migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
+ new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
+ args->width, hwe,
+ EXEC_QUEUE_FLAG_PERSISTENT |
+ EXEC_QUEUE_FLAG_VM |
+ (id ?
+ EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
+ 0));
+
+ xe_device_mem_access_put(xe); /* now held by engine */
+
+ xe_vm_put(migrate_vm);
+ if (IS_ERR(new)) {
+ err = PTR_ERR(new);
+ if (q)
+ goto put_exec_queue;
+ return err;
+ }
+ if (id == 0)
+ q = new;
+ else
+ list_add_tail(&new->multi_gt_list,
+ &q->multi_gt_link);
+ }
+ } else {
+ gt = xe_device_get_gt(xe, eci[0].gt_id);
+ logical_mask = calc_validate_logical_mask(xe, gt, eci,
+ args->width,
+ args->num_placements);
+ if (XE_IOCTL_DBG(xe, !logical_mask))
+ return -EINVAL;
+
+ hwe = find_hw_engine(xe, eci[0]);
+ if (XE_IOCTL_DBG(xe, !hwe))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -ENOENT;
+
+ err = down_read_interruptible(&vm->lock);
+ if (err) {
+ xe_vm_put(vm);
+ return err;
+ }
+
+ if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+ up_read(&vm->lock);
+ xe_vm_put(vm);
+ return -ENOENT;
+ }
+
+ q = xe_exec_queue_create(xe, vm, logical_mask,
+ args->width, hwe, 0);
+ up_read(&vm->lock);
+ xe_vm_put(vm);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
+
+ if (xe_vm_in_preempt_fence_mode(vm)) {
+ q->compute.context = dma_fence_context_alloc(1);
+ spin_lock_init(&q->compute.lock);
+
+ err = xe_vm_add_compute_exec_queue(vm, q);
+ if (XE_IOCTL_DBG(xe, err))
+ goto put_exec_queue;
+ }
+ }
+
+ if (args->extensions) {
+ err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
+ if (XE_IOCTL_DBG(xe, err))
+ goto kill_exec_queue;
+ }
+
+ mutex_lock(&xef->exec_queue.lock);
+ err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
+ mutex_unlock(&xef->exec_queue.lock);
+ if (err)
+ goto kill_exec_queue;
+
+ args->exec_queue_id = id;
+
+ return 0;
+
+kill_exec_queue:
+ xe_exec_queue_kill(q);
+put_exec_queue:
+ xe_exec_queue_put(q);
+ return err;
+}
+
+int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_exec_queue_get_property *args = data;
+ struct xe_exec_queue *q;
+ int ret;
+
+ if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+ if (XE_IOCTL_DBG(xe, !q))
+ return -ENOENT;
+
+ switch (args->property) {
+ case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
+ args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ xe_exec_queue_put(q);
+
+ return ret;
+}
+
+/**
+ * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
+ * @q: The exec_queue
+ *
+ * Return: True if the exec_queue is long-running, false otherwise.
+ */
+bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
+{
+ return q->vm && xe_vm_in_lr_mode(q->vm) &&
+ !(q->flags & EXEC_QUEUE_FLAG_VM);
+}
+
+static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
+{
+ return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
+}
+
+/**
+ * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
+ * @q: The exec_queue
+ *
+ * Return: True if the exec_queue's ring is full, false otherwise.
+ */
+bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
+{
+ struct xe_lrc *lrc = q->lrc;
+ s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
+
+ return xe_exec_queue_num_job_inflight(q) >= max_job;
+}
+
+/**
+ * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
+ * @q: The exec_queue
+ *
+ * FIXME: Need to determine what to use as the short-lived
+ * timeline lock for the exec_queues, so that the return value
+ * of this function becomes more than just an advisory
+ * snapshot in time. The timeline lock must protect the
+ * seqno from racing submissions on the same exec_queue.
+ * Typically vm->resv, but user-created timeline locks use the migrate vm
+ * and never grabs the migrate vm->resv so we have a race there.
+ *
+ * Return: True if the exec_queue is idle, false otherwise.
+ */
+bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
+{
+ if (xe_exec_queue_is_parallel(q)) {
+ int i;
+
+ for (i = 0; i < q->width; ++i) {
+ if (xe_lrc_seqno(&q->lrc[i]) !=
+ q->lrc[i].fence_ctx.next_seqno - 1)
+ return false;
+ }
+
+ return true;
+ }
+
+ return xe_lrc_seqno(&q->lrc[0]) ==
+ q->lrc[0].fence_ctx.next_seqno - 1;
+}
+
+void xe_exec_queue_kill(struct xe_exec_queue *q)
+{
+ struct xe_exec_queue *eq = q, *next;
+
+ list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
+ multi_gt_link) {
+ q->ops->kill(eq);
+ xe_vm_remove_compute_exec_queue(q->vm, eq);
+ }
+
+ q->ops->kill(q);
+ xe_vm_remove_compute_exec_queue(q->vm, q);
+}
+
+int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_exec_queue_destroy *args = data;
+ struct xe_exec_queue *q;
+
+ if (XE_IOCTL_DBG(xe, args->pad) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ mutex_lock(&xef->exec_queue.lock);
+ q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
+ mutex_unlock(&xef->exec_queue.lock);
+ if (XE_IOCTL_DBG(xe, !q))
+ return -ENOENT;
+
+ xe_exec_queue_kill(q);
+
+ trace_xe_exec_queue_close(q);
+ xe_exec_queue_put(q);
+
+ return 0;
+}
+
+static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
+ struct xe_vm *vm)
+{
+ if (q->flags & EXEC_QUEUE_FLAG_VM)
+ lockdep_assert_held(&vm->lock);
+ else
+ xe_vm_assert_held(vm);
+}
+
+/**
+ * xe_exec_queue_last_fence_put() - Drop ref to last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ */
+void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
+{
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+
+ if (q->last_fence) {
+ dma_fence_put(q->last_fence);
+ q->last_fence = NULL;
+ }
+}
+
+/**
+ * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
+ * @q: The exec queue
+ *
+ * Only safe to be called from xe_exec_queue_destroy().
+ */
+void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
+{
+ if (q->last_fence) {
+ dma_fence_put(q->last_fence);
+ q->last_fence = NULL;
+ }
+}
+
+/**
+ * xe_exec_queue_last_fence_get() - Get last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ *
+ * Get last fence, takes a ref
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
+ */
+struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
+ struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+
+ if (q->last_fence &&
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
+ xe_exec_queue_last_fence_put(q, vm);
+
+ fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
+}
+
+/**
+ * xe_exec_queue_last_fence_set() - Set last fence
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind or exec for
+ * @fence: The fence
+ *
+ * Set the last fence for the engine. Increases reference count for fence, when
+ * closing engine xe_exec_queue_last_fence_put should be called.
+ */
+void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
+ struct dma_fence *fence)
+{
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+
+ xe_exec_queue_last_fence_put(q, vm);
+ q->last_fence = dma_fence_get(fence);
+}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
new file mode 100644
index 000000000..d959cc4a1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_EXEC_QUEUE_H_
+#define _XE_EXEC_QUEUE_H_
+
+#include "xe_exec_queue_types.h"
+#include "xe_vm_types.h"
+
+struct drm_device;
+struct drm_file;
+struct xe_device;
+struct xe_file;
+
+struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
+ u32 logical_mask, u16 width,
+ struct xe_hw_engine *hw_engine, u32 flags);
+struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
+ struct xe_vm *vm,
+ enum xe_engine_class class, u32 flags);
+
+void xe_exec_queue_fini(struct xe_exec_queue *q);
+void xe_exec_queue_destroy(struct kref *ref);
+void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);
+
+struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
+
+static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
+{
+ kref_get(&q->refcount);
+ return q;
+}
+
+static inline void xe_exec_queue_put(struct xe_exec_queue *q)
+{
+ kref_put(&q->refcount, xe_exec_queue_destroy);
+}
+
+static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
+{
+ return q->width > 1;
+}
+
+bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
+
+bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
+
+bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
+
+void xe_exec_queue_kill(struct xe_exec_queue *q);
+
+int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
+
+void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
+void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
+struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
+ struct xe_vm *vm);
+void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
+ struct dma_fence *fence);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
new file mode 100644
index 000000000..22e1dffd0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_EXEC_QUEUE_TYPES_H_
+#define _XE_EXEC_QUEUE_TYPES_H_
+
+#include <linux/kref.h>
+
+#include <drm/gpu_scheduler.h>
+
+#include "xe_gpu_scheduler_types.h"
+#include "xe_hw_engine_types.h"
+#include "xe_hw_fence_types.h"
+#include "xe_lrc_types.h"
+
+struct xe_execlist_exec_queue;
+struct xe_gt;
+struct xe_guc_exec_queue;
+struct xe_hw_engine;
+struct xe_vm;
+
+enum xe_exec_queue_priority {
+ XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
+ XE_EXEC_QUEUE_PRIORITY_LOW = 0,
+ XE_EXEC_QUEUE_PRIORITY_NORMAL,
+ XE_EXEC_QUEUE_PRIORITY_HIGH,
+ XE_EXEC_QUEUE_PRIORITY_KERNEL,
+
+ XE_EXEC_QUEUE_PRIORITY_COUNT
+};
+
+/**
+ * struct xe_exec_queue - Execution queue
+ *
+ * Contains all state necessary for submissions. Can either be a user object or
+ * a kernel object.
+ */
+struct xe_exec_queue {
+ /** @gt: graphics tile this exec queue can submit to */
+ struct xe_gt *gt;
+ /**
+ * @hwe: A hardware of the same class. May (physical engine) or may not
+ * (virtual engine) be where jobs actual engine up running. Should never
+ * really be used for submissions.
+ */
+ struct xe_hw_engine *hwe;
+ /** @refcount: ref count of this exec queue */
+ struct kref refcount;
+ /** @vm: VM (address space) for this exec queue */
+ struct xe_vm *vm;
+ /** @class: class of this exec queue */
+ enum xe_engine_class class;
+ /**
+ * @logical_mask: logical mask of where job submitted to exec queue can run
+ */
+ u32 logical_mask;
+ /** @name: name of this exec queue */
+ char name[MAX_FENCE_NAME_LEN];
+ /** @width: width (number BB submitted per exec) of this exec queue */
+ u16 width;
+ /** @fence_irq: fence IRQ used to signal job completion */
+ struct xe_hw_fence_irq *fence_irq;
+
+ /**
+ * @last_fence: last fence on exec queue, protected by vm->lock in write
+ * mode if bind exec queue, protected by dma resv lock if non-bind exec
+ * queue
+ */
+ struct dma_fence *last_fence;
+
+/* queue no longer allowed to submit */
+#define EXEC_QUEUE_FLAG_BANNED BIT(0)
+/* queue used for kernel submission only */
+#define EXEC_QUEUE_FLAG_KERNEL BIT(1)
+/* kernel engine only destroyed at driver unload */
+#define EXEC_QUEUE_FLAG_PERMANENT BIT(2)
+/* queue keeps running pending jobs after destroy ioctl */
+#define EXEC_QUEUE_FLAG_PERSISTENT BIT(3)
+/* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
+#define EXEC_QUEUE_FLAG_VM BIT(4)
+/* child of VM queue for multi-tile VM jobs */
+#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
+/* kernel exec_queue only, set priority to highest level */
+#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6)
+
+ /**
+ * @flags: flags for this exec queue, should statically setup aside from ban
+ * bit
+ */
+ unsigned long flags;
+
+ union {
+ /** @multi_gt_list: list head for VM bind engines if multi-GT */
+ struct list_head multi_gt_list;
+ /** @multi_gt_link: link for VM bind engines if multi-GT */
+ struct list_head multi_gt_link;
+ };
+
+ union {
+ /** @execlist: execlist backend specific state for exec queue */
+ struct xe_execlist_exec_queue *execlist;
+ /** @guc: GuC backend specific state for exec queue */
+ struct xe_guc_exec_queue *guc;
+ };
+
+ union {
+ /**
+ * @parallel: parallel submission state
+ */
+ struct {
+ /** @composite_fence_ctx: context composite fence */
+ u64 composite_fence_ctx;
+ /** @composite_fence_seqno: seqno for composite fence */
+ u32 composite_fence_seqno;
+ } parallel;
+ /**
+ * @bind: bind submission state
+ */
+ struct {
+ /** @fence_ctx: context bind fence */
+ u64 fence_ctx;
+ /** @fence_seqno: seqno for bind fence */
+ u32 fence_seqno;
+ } bind;
+ };
+
+ /** @sched_props: scheduling properties */
+ struct {
+ /** @timeslice_us: timeslice period in micro-seconds */
+ u32 timeslice_us;
+ /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ u32 preempt_timeout_us;
+ /** @job_timeout_ms: job timeout in milliseconds */
+ u32 job_timeout_ms;
+ /** @priority: priority of this exec queue */
+ enum xe_exec_queue_priority priority;
+ } sched_props;
+
+ /** @compute: compute exec queue state */
+ struct {
+ /** @pfence: preemption fence */
+ struct dma_fence *pfence;
+ /** @context: preemption fence context */
+ u64 context;
+ /** @seqno: preemption fence seqno */
+ u32 seqno;
+ /** @link: link into VM's list of exec queues */
+ struct list_head link;
+ /** @lock: preemption fences lock */
+ spinlock_t lock;
+ } compute;
+
+ /** @ops: submission backend exec queue operations */
+ const struct xe_exec_queue_ops *ops;
+
+ /** @ring_ops: ring operations for this exec queue */
+ const struct xe_ring_ops *ring_ops;
+ /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
+ struct drm_sched_entity *entity;
+ /**
+ * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
+ * Protected by @vm's resv. Unused if @vm == NULL.
+ */
+ u64 tlb_flush_seqno;
+ /** @lrc: logical ring context for this exec queue */
+ struct xe_lrc lrc[];
+};
+
+/**
+ * struct xe_exec_queue_ops - Submission backend exec queue operations
+ */
+struct xe_exec_queue_ops {
+ /** @init: Initialize exec queue for submission backend */
+ int (*init)(struct xe_exec_queue *q);
+ /** @kill: Kill inflight submissions for backend */
+ void (*kill)(struct xe_exec_queue *q);
+ /** @fini: Fini exec queue for submission backend */
+ void (*fini)(struct xe_exec_queue *q);
+ /** @set_priority: Set priority for exec queue */
+ int (*set_priority)(struct xe_exec_queue *q,
+ enum xe_exec_queue_priority priority);
+ /** @set_timeslice: Set timeslice for exec queue */
+ int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
+ /** @set_preempt_timeout: Set preemption timeout for exec queue */
+ int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
+ /** @set_job_timeout: Set job timeout for exec queue */
+ int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
+ /**
+ * @suspend: Suspend exec queue from executing, allowed to be called
+ * multiple times in a row before resume with the caveat that
+ * suspend_wait returns before calling suspend again.
+ */
+ int (*suspend)(struct xe_exec_queue *q);
+ /**
+ * @suspend_wait: Wait for an exec queue to suspend executing, should be
+ * call after suspend.
+ */
+ void (*suspend_wait)(struct xe_exec_queue *q);
+ /**
+ * @resume: Resume exec queue execution, exec queue must be in a suspended
+ * state and dma fence returned from most recent suspend call must be
+ * signalled when this function is called.
+ */
+ void (*resume)(struct xe_exec_queue *q);
+ /** @reset_status: check exec queue reset status */
+ bool (*reset_status)(struct xe_exec_queue *q);
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
new file mode 100644
index 000000000..acb4d9f38
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_execlist.h"
+
+#include <drm/drm_managed.h>
+
+#include "instructions/xe_mi_commands.h"
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gpu_commands.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_lrc_layout.h"
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_gt.h"
+#include "xe_hw_fence.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_mmio.h"
+#include "xe_mocs.h"
+#include "xe_ring_ops_types.h"
+#include "xe_sched_job.h"
+
+#define XE_EXECLIST_HANG_LIMIT 1
+
+#define SW_CTX_ID_SHIFT 37
+#define SW_CTX_ID_WIDTH 11
+#define XEHP_SW_CTX_ID_SHIFT 39
+#define XEHP_SW_CTX_ID_WIDTH 16
+
+#define SW_CTX_ID \
+ GENMASK_ULL(SW_CTX_ID_WIDTH + SW_CTX_ID_SHIFT - 1, \
+ SW_CTX_ID_SHIFT)
+
+#define XEHP_SW_CTX_ID \
+ GENMASK_ULL(XEHP_SW_CTX_ID_WIDTH + XEHP_SW_CTX_ID_SHIFT - 1, \
+ XEHP_SW_CTX_ID_SHIFT)
+
+
+static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
+ u32 ctx_id)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 lrc_desc;
+
+ lrc_desc = xe_lrc_descriptor(lrc);
+
+ if (GRAPHICS_VERx100(xe) >= 1250) {
+ xe_gt_assert(hwe->gt, FIELD_FIT(XEHP_SW_CTX_ID, ctx_id));
+ lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id);
+ } else {
+ xe_gt_assert(hwe->gt, FIELD_FIT(SW_CTX_ID, ctx_id));
+ lrc_desc |= FIELD_PREP(SW_CTX_ID, ctx_id);
+ }
+
+ if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
+ xe_mmio_write32(hwe->gt, RCU_MODE,
+ _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
+
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
+ lrc->ring.old_tail = lrc->ring.tail;
+
+ /*
+ * Make sure the context image is complete before we submit it to HW.
+ *
+ * Ostensibly, writes (including the WCB) should be flushed prior to
+ * an uncached write such as our mmio register access, the empirical
+ * evidence (esp. on Braswell) suggests that the WC write into memory
+ * may not be visible to the HW prior to the completion of the UC
+ * register write and that we may begin execution from the context
+ * before its image is complete leading to invalid PD chasing.
+ */
+ wmb();
+
+ xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base),
+ xe_bo_ggtt_addr(hwe->hwsp));
+ xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base));
+ xe_mmio_write32(gt, RING_MODE(hwe->mmio_base),
+ _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+
+ xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
+ lower_32_bits(lrc_desc));
+ xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
+ upper_32_bits(lrc_desc));
+ xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base),
+ EL_CTRL_LOAD);
+}
+
+static void __xe_execlist_port_start(struct xe_execlist_port *port,
+ struct xe_execlist_exec_queue *exl)
+{
+ struct xe_device *xe = gt_to_xe(port->hwe->gt);
+ int max_ctx = FIELD_MAX(SW_CTX_ID);
+
+ if (GRAPHICS_VERx100(xe) >= 1250)
+ max_ctx = FIELD_MAX(XEHP_SW_CTX_ID);
+
+ xe_execlist_port_assert_held(port);
+
+ if (port->running_exl != exl || !exl->has_run) {
+ port->last_ctx_id++;
+
+ /* 0 is reserved for the kernel context */
+ if (port->last_ctx_id > max_ctx)
+ port->last_ctx_id = 1;
+ }
+
+ __start_lrc(port->hwe, exl->q->lrc, port->last_ctx_id);
+ port->running_exl = exl;
+ exl->has_run = true;
+}
+
+static void __xe_execlist_port_idle(struct xe_execlist_port *port)
+{
+ u32 noop[2] = { MI_NOOP, MI_NOOP };
+
+ xe_execlist_port_assert_held(port);
+
+ if (!port->running_exl)
+ return;
+
+ xe_lrc_write_ring(&port->hwe->kernel_lrc, noop, sizeof(noop));
+ __start_lrc(port->hwe, &port->hwe->kernel_lrc, 0);
+ port->running_exl = NULL;
+}
+
+static bool xe_execlist_is_idle(struct xe_execlist_exec_queue *exl)
+{
+ struct xe_lrc *lrc = exl->q->lrc;
+
+ return lrc->ring.tail == lrc->ring.old_tail;
+}
+
+static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
+{
+ struct xe_execlist_exec_queue *exl = NULL;
+ int i;
+
+ xe_execlist_port_assert_held(port);
+
+ for (i = ARRAY_SIZE(port->active) - 1; i >= 0; i--) {
+ while (!list_empty(&port->active[i])) {
+ exl = list_first_entry(&port->active[i],
+ struct xe_execlist_exec_queue,
+ active_link);
+ list_del(&exl->active_link);
+
+ if (xe_execlist_is_idle(exl)) {
+ exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
+ continue;
+ }
+
+ list_add_tail(&exl->active_link, &port->active[i]);
+ __xe_execlist_port_start(port, exl);
+ return;
+ }
+ }
+
+ __xe_execlist_port_idle(port);
+}
+
+static u64 read_execlist_status(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ u32 hi, lo;
+
+ lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
+ hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
+
+ return lo | (u64)hi << 32;
+}
+
+static void xe_execlist_port_irq_handler_locked(struct xe_execlist_port *port)
+{
+ u64 status;
+
+ xe_execlist_port_assert_held(port);
+
+ status = read_execlist_status(port->hwe);
+ if (status & BIT(7))
+ return;
+
+ __xe_execlist_port_start_next_active(port);
+}
+
+static void xe_execlist_port_irq_handler(struct xe_hw_engine *hwe,
+ u16 intr_vec)
+{
+ struct xe_execlist_port *port = hwe->exl_port;
+
+ spin_lock(&port->lock);
+ xe_execlist_port_irq_handler_locked(port);
+ spin_unlock(&port->lock);
+}
+
+static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
+ enum xe_exec_queue_priority priority)
+{
+ xe_execlist_port_assert_held(port);
+
+ if (port->running_exl && port->running_exl->active_priority >= priority)
+ return;
+
+ __xe_execlist_port_start_next_active(port);
+}
+
+static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
+{
+ struct xe_execlist_port *port = exl->port;
+ enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
+
+ XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
+ XE_WARN_ON(priority < 0);
+ XE_WARN_ON(priority >= ARRAY_SIZE(exl->port->active));
+
+ spin_lock_irq(&port->lock);
+
+ if (exl->active_priority != priority &&
+ exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET) {
+ /* Priority changed, move it to the right list */
+ list_del(&exl->active_link);
+ exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
+ }
+
+ if (exl->active_priority == XE_EXEC_QUEUE_PRIORITY_UNSET) {
+ exl->active_priority = priority;
+ list_add_tail(&exl->active_link, &port->active[priority]);
+ }
+
+ xe_execlist_port_wake_locked(exl->port, priority);
+
+ spin_unlock_irq(&port->lock);
+}
+
+static void xe_execlist_port_irq_fail_timer(struct timer_list *timer)
+{
+ struct xe_execlist_port *port =
+ container_of(timer, struct xe_execlist_port, irq_fail);
+
+ spin_lock_irq(&port->lock);
+ xe_execlist_port_irq_handler_locked(port);
+ spin_unlock_irq(&port->lock);
+
+ port->irq_fail.expires = jiffies + msecs_to_jiffies(1000);
+ add_timer(&port->irq_fail);
+}
+
+struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
+ struct xe_hw_engine *hwe)
+{
+ struct drm_device *drm = &xe->drm;
+ struct xe_execlist_port *port;
+ int i;
+
+ port = drmm_kzalloc(drm, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->hwe = hwe;
+
+ spin_lock_init(&port->lock);
+ for (i = 0; i < ARRAY_SIZE(port->active); i++)
+ INIT_LIST_HEAD(&port->active[i]);
+
+ port->last_ctx_id = 1;
+ port->running_exl = NULL;
+
+ hwe->irq_handler = xe_execlist_port_irq_handler;
+
+ /* TODO: Fix the interrupt code so it doesn't race like mad */
+ timer_setup(&port->irq_fail, xe_execlist_port_irq_fail_timer, 0);
+ port->irq_fail.expires = jiffies + msecs_to_jiffies(1000);
+ add_timer(&port->irq_fail);
+
+ return port;
+}
+
+void xe_execlist_port_destroy(struct xe_execlist_port *port)
+{
+ del_timer(&port->irq_fail);
+
+ /* Prevent an interrupt while we're destroying */
+ spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
+ port->hwe->irq_handler = NULL;
+ spin_unlock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
+}
+
+static struct dma_fence *
+execlist_run_job(struct drm_sched_job *drm_job)
+{
+ struct xe_sched_job *job = to_xe_sched_job(drm_job);
+ struct xe_exec_queue *q = job->q;
+ struct xe_execlist_exec_queue *exl = job->q->execlist;
+
+ q->ring_ops->emit_job(job);
+ xe_execlist_make_active(exl);
+
+ return dma_fence_get(job->fence);
+}
+
+static void execlist_job_free(struct drm_sched_job *drm_job)
+{
+ struct xe_sched_job *job = to_xe_sched_job(drm_job);
+
+ xe_sched_job_put(job);
+}
+
+static const struct drm_sched_backend_ops drm_sched_ops = {
+ .run_job = execlist_run_job,
+ .free_job = execlist_job_free,
+};
+
+static int execlist_exec_queue_init(struct xe_exec_queue *q)
+{
+ struct drm_gpu_scheduler *sched;
+ struct xe_execlist_exec_queue *exl;
+ struct xe_device *xe = gt_to_xe(q->gt);
+ int err;
+
+ xe_assert(xe, !xe_device_uc_enabled(xe));
+
+ drm_info(&xe->drm, "Enabling execlist submission (GuC submission disabled)\n");
+
+ exl = kzalloc(sizeof(*exl), GFP_KERNEL);
+ if (!exl)
+ return -ENOMEM;
+
+ exl->q = q;
+
+ err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
+ q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
+ XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
+ NULL, NULL, q->hwe->name,
+ gt_to_xe(q->gt)->drm.dev);
+ if (err)
+ goto err_free;
+
+ sched = &exl->sched;
+ err = drm_sched_entity_init(&exl->entity, 0, &sched, 1, NULL);
+ if (err)
+ goto err_sched;
+
+ exl->port = q->hwe->exl_port;
+ exl->has_run = false;
+ exl->active_priority = XE_EXEC_QUEUE_PRIORITY_UNSET;
+ q->execlist = exl;
+ q->entity = &exl->entity;
+
+ xe_exec_queue_assign_name(q, ffs(q->logical_mask) - 1);
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&exl->sched);
+err_free:
+ kfree(exl);
+ return err;
+}
+
+static void execlist_exec_queue_fini_async(struct work_struct *w)
+{
+ struct xe_execlist_exec_queue *ee =
+ container_of(w, struct xe_execlist_exec_queue, fini_async);
+ struct xe_exec_queue *q = ee->q;
+ struct xe_execlist_exec_queue *exl = q->execlist;
+ struct xe_device *xe = gt_to_xe(q->gt);
+ unsigned long flags;
+
+ xe_assert(xe, !xe_device_uc_enabled(xe));
+
+ spin_lock_irqsave(&exl->port->lock, flags);
+ if (WARN_ON(exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET))
+ list_del(&exl->active_link);
+ spin_unlock_irqrestore(&exl->port->lock, flags);
+
+ drm_sched_entity_fini(&exl->entity);
+ drm_sched_fini(&exl->sched);
+ kfree(exl);
+
+ xe_exec_queue_fini(q);
+}
+
+static void execlist_exec_queue_kill(struct xe_exec_queue *q)
+{
+ /* NIY */
+}
+
+static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+{
+ INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
+ queue_work(system_unbound_wq, &q->execlist->fini_async);
+}
+
+static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
+ enum xe_exec_queue_priority priority)
+{
+ /* NIY */
+ return 0;
+}
+
+static int execlist_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
+{
+ /* NIY */
+ return 0;
+}
+
+static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
+ u32 preempt_timeout_us)
+{
+ /* NIY */
+ return 0;
+}
+
+static int execlist_exec_queue_set_job_timeout(struct xe_exec_queue *q,
+ u32 job_timeout_ms)
+{
+ /* NIY */
+ return 0;
+}
+
+static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
+{
+ /* NIY */
+ return 0;
+}
+
+static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
+
+{
+ /* NIY */
+}
+
+static void execlist_exec_queue_resume(struct xe_exec_queue *q)
+{
+ /* NIY */
+}
+
+static bool execlist_exec_queue_reset_status(struct xe_exec_queue *q)
+{
+ /* NIY */
+ return false;
+}
+
+static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
+ .init = execlist_exec_queue_init,
+ .kill = execlist_exec_queue_kill,
+ .fini = execlist_exec_queue_fini,
+ .set_priority = execlist_exec_queue_set_priority,
+ .set_timeslice = execlist_exec_queue_set_timeslice,
+ .set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
+ .set_job_timeout = execlist_exec_queue_set_job_timeout,
+ .suspend = execlist_exec_queue_suspend,
+ .suspend_wait = execlist_exec_queue_suspend_wait,
+ .resume = execlist_exec_queue_resume,
+ .reset_status = execlist_exec_queue_reset_status,
+};
+
+int xe_execlist_init(struct xe_gt *gt)
+{
+ /* GuC submission enabled, nothing to do */
+ if (xe_device_uc_enabled(gt_to_xe(gt)))
+ return 0;
+
+ gt->exec_queue_ops = &execlist_exec_queue_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_execlist.h b/drivers/gpu/drm/xe/xe_execlist.h
new file mode 100644
index 000000000..26f600ac8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_execlist.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_EXECLIST_H_
+#define _XE_EXECLIST_H_
+
+#include "xe_execlist_types.h"
+
+struct xe_device;
+struct xe_gt;
+
+#define xe_execlist_port_assert_held(port) lockdep_assert_held(&(port)->lock)
+
+int xe_execlist_init(struct xe_gt *gt);
+struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
+ struct xe_hw_engine *hwe);
+void xe_execlist_port_destroy(struct xe_execlist_port *port);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
new file mode 100644
index 000000000..f94bbf4c5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_EXECLIST_TYPES_H_
+#define _XE_EXECLIST_TYPES_H_
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "xe_exec_queue_types.h"
+
+struct xe_hw_engine;
+struct xe_execlist_exec_queue;
+
+struct xe_execlist_port {
+ struct xe_hw_engine *hwe;
+
+ spinlock_t lock;
+
+ struct list_head active[XE_EXEC_QUEUE_PRIORITY_COUNT];
+
+ u32 last_ctx_id;
+
+ struct xe_execlist_exec_queue *running_exl;
+
+ struct timer_list irq_fail;
+};
+
+struct xe_execlist_exec_queue {
+ struct xe_exec_queue *q;
+
+ struct drm_gpu_scheduler sched;
+
+ struct drm_sched_entity entity;
+
+ struct xe_execlist_port *port;
+
+ bool has_run;
+
+ struct work_struct fini_async;
+
+ enum xe_exec_queue_priority active_priority;
+ struct list_head active_link;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
new file mode 100644
index 000000000..9bbe8a504
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_force_wake.h"
+
+#include <drm/drm_util.h>
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_reg_defs.h"
+#include "xe_gt.h"
+#include "xe_mmio.h"
+
+#define XE_FORCE_WAKE_ACK_TIMEOUT_MS 50
+
+static struct xe_gt *
+fw_to_gt(struct xe_force_wake *fw)
+{
+ return fw->gt;
+}
+
+static struct xe_device *
+fw_to_xe(struct xe_force_wake *fw)
+{
+ return gt_to_xe(fw_to_gt(fw));
+}
+
+static void domain_init(struct xe_force_wake_domain *domain,
+ enum xe_force_wake_domain_id id,
+ struct xe_reg reg, struct xe_reg ack, u32 val, u32 mask)
+{
+ domain->id = id;
+ domain->reg_ctl = reg;
+ domain->reg_ack = ack;
+ domain->val = val;
+ domain->mask = mask;
+}
+
+void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ fw->gt = gt;
+ spin_lock_init(&fw->lock);
+
+ /* Assuming gen11+ so assert this assumption is correct */
+ xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
+
+ if (xe->info.graphics_verx100 >= 1270) {
+ domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
+ XE_FW_DOMAIN_ID_GT,
+ FORCEWAKE_GT,
+ FORCEWAKE_ACK_GT_MTL,
+ BIT(0), BIT(16));
+ } else {
+ domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
+ XE_FW_DOMAIN_ID_GT,
+ FORCEWAKE_GT,
+ FORCEWAKE_ACK_GT,
+ BIT(0), BIT(16));
+ }
+}
+
+void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
+{
+ int i, j;
+
+ /* Assuming gen11+ so assert this assumption is correct */
+ xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
+
+ if (!xe_gt_is_media_type(gt))
+ domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
+ XE_FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_RENDER,
+ FORCEWAKE_ACK_RENDER,
+ BIT(0), BIT(16));
+
+ for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
+ if (!(gt->info.engine_mask & BIT(i)))
+ continue;
+
+ domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
+ FORCEWAKE_MEDIA_VDBOX(j),
+ FORCEWAKE_ACK_MEDIA_VDBOX(j),
+ BIT(0), BIT(16));
+ }
+
+ for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
+ if (!(gt->info.engine_mask & BIT(i)))
+ continue;
+
+ domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
+ XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
+ FORCEWAKE_MEDIA_VEBOX(j),
+ FORCEWAKE_ACK_MEDIA_VEBOX(j),
+ BIT(0), BIT(16));
+ }
+
+ if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
+ domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC],
+ XE_FW_DOMAIN_ID_GSC,
+ FORCEWAKE_GSC,
+ FORCEWAKE_ACK_GSC,
+ BIT(0), BIT(16));
+}
+
+static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
+{
+ xe_mmio_write32(gt, domain->reg_ctl, domain->mask | domain->val);
+}
+
+static int domain_wake_wait(struct xe_gt *gt,
+ struct xe_force_wake_domain *domain)
+{
+ return xe_mmio_wait32(gt, domain->reg_ack, domain->val, domain->val,
+ XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
+ NULL, true);
+}
+
+static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
+{
+ xe_mmio_write32(gt, domain->reg_ctl, domain->mask);
+}
+
+static int domain_sleep_wait(struct xe_gt *gt,
+ struct xe_force_wake_domain *domain)
+{
+ return xe_mmio_wait32(gt, domain->reg_ack, domain->val, 0,
+ XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
+ NULL, true);
+}
+
+#define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
+ for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \
+ for_each_if((domain__ = ((fw__)->domains + \
+ (ffs(tmp__) - 1))) && \
+ domain__->reg_ctl.addr)
+
+int xe_force_wake_get(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domains)
+{
+ struct xe_device *xe = fw_to_xe(fw);
+ struct xe_gt *gt = fw_to_gt(fw);
+ struct xe_force_wake_domain *domain;
+ enum xe_force_wake_domains tmp, woken = 0;
+ unsigned long flags;
+ int ret, ret2 = 0;
+
+ spin_lock_irqsave(&fw->lock, flags);
+ for_each_fw_domain_masked(domain, domains, fw, tmp) {
+ if (!domain->ref++) {
+ woken |= BIT(domain->id);
+ domain_wake(gt, domain);
+ }
+ }
+ for_each_fw_domain_masked(domain, woken, fw, tmp) {
+ ret = domain_wake_wait(gt, domain);
+ ret2 |= ret;
+ if (ret)
+ drm_notice(&xe->drm, "Force wake domain (%d) failed to ack wake, ret=%d\n",
+ domain->id, ret);
+ }
+ fw->awake_domains |= woken;
+ spin_unlock_irqrestore(&fw->lock, flags);
+
+ return ret2;
+}
+
+int xe_force_wake_put(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domains)
+{
+ struct xe_device *xe = fw_to_xe(fw);
+ struct xe_gt *gt = fw_to_gt(fw);
+ struct xe_force_wake_domain *domain;
+ enum xe_force_wake_domains tmp, sleep = 0;
+ unsigned long flags;
+ int ret, ret2 = 0;
+
+ spin_lock_irqsave(&fw->lock, flags);
+ for_each_fw_domain_masked(domain, domains, fw, tmp) {
+ if (!--domain->ref) {
+ sleep |= BIT(domain->id);
+ domain_sleep(gt, domain);
+ }
+ }
+ for_each_fw_domain_masked(domain, sleep, fw, tmp) {
+ ret = domain_sleep_wait(gt, domain);
+ ret2 |= ret;
+ if (ret)
+ drm_notice(&xe->drm, "Force wake domain (%d) failed to ack sleep, ret=%d\n",
+ domain->id, ret);
+ }
+ fw->awake_domains &= ~sleep;
+ spin_unlock_irqrestore(&fw->lock, flags);
+
+ return ret2;
+}
diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h
new file mode 100644
index 000000000..83cb157da
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_force_wake.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_FORCE_WAKE_H_
+#define _XE_FORCE_WAKE_H_
+
+#include "xe_assert.h"
+#include "xe_force_wake_types.h"
+
+struct xe_gt;
+
+void xe_force_wake_init_gt(struct xe_gt *gt,
+ struct xe_force_wake *fw);
+void xe_force_wake_init_engines(struct xe_gt *gt,
+ struct xe_force_wake *fw);
+int xe_force_wake_get(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domains);
+int xe_force_wake_put(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domains);
+
+static inline int
+xe_force_wake_ref(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domain)
+{
+ xe_gt_assert(fw->gt, domain);
+ return fw->domains[ffs(domain) - 1].ref;
+}
+
+static inline void
+xe_force_wake_assert_held(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domain)
+{
+ xe_gt_assert(fw->gt, fw->awake_domains & domain);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h
new file mode 100644
index 000000000..ed0edc2cd
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_force_wake_types.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_FORCE_WAKE_TYPES_H_
+#define _XE_FORCE_WAKE_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "regs/xe_reg_defs.h"
+
+enum xe_force_wake_domain_id {
+ XE_FW_DOMAIN_ID_GT = 0,
+ XE_FW_DOMAIN_ID_RENDER,
+ XE_FW_DOMAIN_ID_MEDIA,
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX0,
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX1,
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX2,
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX3,
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX4,
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX5,
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX6,
+ XE_FW_DOMAIN_ID_MEDIA_VDBOX7,
+ XE_FW_DOMAIN_ID_MEDIA_VEBOX0,
+ XE_FW_DOMAIN_ID_MEDIA_VEBOX1,
+ XE_FW_DOMAIN_ID_MEDIA_VEBOX2,
+ XE_FW_DOMAIN_ID_MEDIA_VEBOX3,
+ XE_FW_DOMAIN_ID_GSC,
+ XE_FW_DOMAIN_ID_COUNT
+};
+
+enum xe_force_wake_domains {
+ XE_FW_GT = BIT(XE_FW_DOMAIN_ID_GT),
+ XE_FW_RENDER = BIT(XE_FW_DOMAIN_ID_RENDER),
+ XE_FW_MEDIA = BIT(XE_FW_DOMAIN_ID_MEDIA),
+ XE_FW_MEDIA_VDBOX0 = BIT(XE_FW_DOMAIN_ID_MEDIA_VDBOX0),
+ XE_FW_MEDIA_VDBOX1 = BIT(XE_FW_DOMAIN_ID_MEDIA_VDBOX1),
+ XE_FW_MEDIA_VDBOX2 = BIT(XE_FW_DOMAIN_ID_MEDIA_VDBOX2),
+ XE_FW_MEDIA_VDBOX3 = BIT(XE_FW_DOMAIN_ID_MEDIA_VDBOX3),
+ XE_FW_MEDIA_VDBOX4 = BIT(XE_FW_DOMAIN_ID_MEDIA_VDBOX4),
+ XE_FW_MEDIA_VDBOX5 = BIT(XE_FW_DOMAIN_ID_MEDIA_VDBOX5),
+ XE_FW_MEDIA_VDBOX6 = BIT(XE_FW_DOMAIN_ID_MEDIA_VDBOX6),
+ XE_FW_MEDIA_VDBOX7 = BIT(XE_FW_DOMAIN_ID_MEDIA_VDBOX7),
+ XE_FW_MEDIA_VEBOX0 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX0),
+ XE_FW_MEDIA_VEBOX1 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX1),
+ XE_FW_MEDIA_VEBOX2 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX2),
+ XE_FW_MEDIA_VEBOX3 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX3),
+ XE_FW_GSC = BIT(XE_FW_DOMAIN_ID_GSC),
+ XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) - 1
+};
+
+/**
+ * struct xe_force_wake_domain - XE force wake domains
+ */
+struct xe_force_wake_domain {
+ /** @id: domain force wake id */
+ enum xe_force_wake_domain_id id;
+ /** @reg_ctl: domain wake control register address */
+ struct xe_reg reg_ctl;
+ /** @reg_ack: domain ack register address */
+ struct xe_reg reg_ack;
+ /** @val: domain wake write value */
+ u32 val;
+ /** @mask: domain mask */
+ u32 mask;
+ /** @ref: domain reference */
+ u32 ref;
+};
+
+/**
+ * struct xe_force_wake - XE force wake
+ */
+struct xe_force_wake {
+ /** @gt: back pointers to GT */
+ struct xe_gt *gt;
+ /** @lock: protects everything force wake struct */
+ spinlock_t lock;
+ /** @awake_domains: mask of all domains awake */
+ enum xe_force_wake_domains awake_domains;
+ /** @domains: force wake domains */
+ struct xe_force_wake_domain domains[XE_FW_DOMAIN_ID_COUNT];
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
new file mode 100644
index 000000000..106ee2b02
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#define HEADER \
+ "// SPDX-License-Identifier: MIT\n" \
+ "\n" \
+ "/*\n" \
+ " * DO NOT MODIFY.\n" \
+ " *\n" \
+ " * This file was generated from rules: %s\n" \
+ " */\n" \
+ "#ifndef _GENERATED_XE_WA_OOB_\n" \
+ "#define _GENERATED_XE_WA_OOB_\n" \
+ "\n" \
+ "enum {\n"
+
+#define FOOTER \
+ "};\n" \
+ "\n" \
+ "#endif\n"
+
+static void print_usage(FILE *f)
+{
+ fprintf(f, "usage: %s <input-rule-file> <generated-c-source-file> <generated-c-header-file>\n",
+ program_invocation_short_name);
+}
+
+static void print_parse_error(const char *err_msg, const char *line,
+ unsigned int lineno)
+{
+ fprintf(stderr, "ERROR: %s\nERROR: %u: %.60s\n",
+ err_msg, lineno, line);
+}
+
+static char *strip(char *line, size_t linelen)
+{
+ while (isspace(*(line + linelen)))
+ linelen--;
+
+ line[linelen - 1] = '\0';
+
+ return line + strspn(line, " \f\n\r\t\v");
+}
+
+#define MAX_LINE_LEN 4096
+static int parse(FILE *input, FILE *csource, FILE *cheader)
+{
+ char line[MAX_LINE_LEN + 1];
+ char *name, *prev_name = NULL, *rules;
+ unsigned int lineno = 0, idx = 0;
+
+ while (fgets(line, sizeof(line), input)) {
+ size_t linelen;
+ bool is_continuation;
+
+ if (line[0] == '\0' || line[0] == '#' || line[0] == '\n') {
+ lineno++;
+ continue;
+ }
+
+ linelen = strlen(line);
+ if (linelen == MAX_LINE_LEN) {
+ print_parse_error("line too long", line, lineno);
+ return -EINVAL;
+ }
+
+ is_continuation = isspace(line[0]);
+ name = strip(line, linelen);
+
+ if (!is_continuation) {
+ name = strtok(name, " \t");
+ rules = strtok(NULL, "");
+ } else {
+ if (!prev_name) {
+ print_parse_error("invalid rule continuation",
+ line, lineno);
+ return -EINVAL;
+ }
+
+ rules = name;
+ name = NULL;
+ }
+
+ if (rules[0] == '\0') {
+ print_parse_error("invalid empty rule\n", line, lineno);
+ return -EINVAL;
+ }
+
+ if (name) {
+ fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx);
+ fprintf(csource, "{ XE_RTP_NAME(\"%s\"), XE_RTP_RULES(%s) },\n",
+ name, rules);
+ } else {
+ fprintf(csource, "{ XE_RTP_NAME(NULL), XE_RTP_RULES(%s) },\n",
+ rules);
+ }
+
+ idx++;
+ lineno++;
+ if (!is_continuation)
+ prev_name = name;
+ }
+
+ fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx);
+
+ return 0;
+}
+
+int main(int argc, const char *argv[])
+{
+ enum {
+ ARGS_INPUT,
+ ARGS_CSOURCE,
+ ARGS_CHEADER,
+ _ARGS_COUNT
+ };
+ struct {
+ const char *fn;
+ const char *mode;
+ FILE *f;
+ } args[] = {
+ [ARGS_INPUT] = { .fn = argv[1], .mode = "r" },
+ [ARGS_CSOURCE] = { .fn = argv[2], .mode = "w" },
+ [ARGS_CHEADER] = { .fn = argv[3], .mode = "w" },
+ };
+ int ret = 1;
+
+ if (argc < 3) {
+ fprintf(stderr, "ERROR: wrong arguments\n");
+ print_usage(stderr);
+ return 1;
+ }
+
+ for (int i = 0; i < _ARGS_COUNT; i++) {
+ args[i].f = fopen(args[i].fn, args[i].mode);
+ if (!args[i].f) {
+ fprintf(stderr, "ERROR: Can't open %s: %m\n",
+ args[i].fn);
+ goto err;
+ }
+ }
+
+ fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn);
+ ret = parse(args[ARGS_INPUT].f, args[ARGS_CSOURCE].f,
+ args[ARGS_CHEADER].f);
+ if (!ret)
+ fprintf(args[ARGS_CHEADER].f, FOOTER);
+
+err:
+ for (int i = 0; i < _ARGS_COUNT; i++) {
+ if (args[i].f)
+ fclose(args[i].f);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
new file mode 100644
index 000000000..3efd2d066
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_ggtt.h"
+
+#include <linux/sizes.h>
+
+#include <drm/drm_managed.h>
+#include <drm/i915_drm.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_wopcm.h"
+
+#define XELPG_GGTT_PTE_PAT0 BIT_ULL(52)
+#define XELPG_GGTT_PTE_PAT1 BIT_ULL(53)
+
+/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
+#define GUC_GGTT_TOP 0xFEE00000
+
+static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
+ u16 pat_index)
+{
+ u64 pte;
+
+ pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
+ pte |= XE_PAGE_PRESENT;
+
+ if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
+ pte |= XE_GGTT_PTE_DM;
+
+ return pte;
+}
+
+static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
+ u16 pat_index)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ u64 pte;
+
+ pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
+
+ xe_assert(xe, pat_index <= 3);
+
+ if (pat_index & BIT(0))
+ pte |= XELPG_GGTT_PTE_PAT0;
+
+ if (pat_index & BIT(1))
+ pte |= XELPG_GGTT_PTE_PAT1;
+
+ return pte;
+}
+
+static unsigned int probe_gsm_size(struct pci_dev *pdev)
+{
+ u16 gmch_ctl, ggms;
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &gmch_ctl);
+ ggms = (gmch_ctl >> BDW_GMCH_GGMS_SHIFT) & BDW_GMCH_GGMS_MASK;
+ return ggms ? SZ_1M << ggms : 0;
+}
+
+void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
+{
+ xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
+ xe_tile_assert(ggtt->tile, addr < ggtt->size);
+
+ writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
+}
+
+static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
+{
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
+ u64 end = start + size - 1;
+ u64 scratch_pte;
+
+ xe_tile_assert(ggtt->tile, start < end);
+
+ if (ggtt->scratch)
+ scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
+ pat_index);
+ else
+ scratch_pte = 0;
+
+ while (start < end) {
+ xe_ggtt_set_pte(ggtt, start, scratch_pte);
+ start += XE_PAGE_SIZE;
+ }
+}
+
+static void ggtt_fini_early(struct drm_device *drm, void *arg)
+{
+ struct xe_ggtt *ggtt = arg;
+
+ mutex_destroy(&ggtt->lock);
+ drm_mm_takedown(&ggtt->mm);
+}
+
+static void ggtt_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_ggtt *ggtt = arg;
+
+ ggtt->scratch = NULL;
+}
+
+static void primelockdep(struct xe_ggtt *ggtt)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&ggtt->lock);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static const struct xe_ggtt_pt_ops xelp_pt_ops = {
+ .pte_encode_bo = xelp_ggtt_pte_encode_bo,
+};
+
+static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
+ .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+};
+
+/*
+ * Early GGTT initialization, which allows to create new mappings usable by the
+ * GuC.
+ * Mappings are not usable by the HW engines, as it doesn't have scratch /
+ * initial clear done to it yet. That will happen in the regular, non-early
+ * GGTT init.
+ */
+int xe_ggtt_init_early(struct xe_ggtt *ggtt)
+{
+ struct xe_device *xe = tile_to_xe(ggtt->tile);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ unsigned int gsm_size;
+
+ gsm_size = probe_gsm_size(pdev);
+ if (gsm_size == 0) {
+ drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
+ return -ENOMEM;
+ }
+
+ ggtt->gsm = ggtt->tile->mmio.regs + SZ_8M;
+ ggtt->size = (gsm_size / 8) * (u64) XE_PAGE_SIZE;
+
+ if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ ggtt->flags |= XE_GGTT_FLAGS_64K;
+
+ /*
+ * 8B per entry, each points to a 4KB page.
+ *
+ * The GuC address space is limited on both ends of the GGTT, because
+ * the GuC shim HW redirects accesses to those addresses to other HW
+ * areas instead of going through the GGTT. On the bottom end, the GuC
+ * can't access offsets below the WOPCM size, while on the top side the
+ * limit is fixed at GUC_GGTT_TOP. To keep things simple, instead of
+ * checking each object to see if they are accessed by GuC or not, we
+ * just exclude those areas from the allocator. Additionally, to
+ * simplify the driver load, we use the maximum WOPCM size in this logic
+ * instead of the programmed one, so we don't need to wait until the
+ * actual size to be programmed is determined (which requires FW fetch)
+ * before initializing the GGTT. These simplifications might waste space
+ * in the GGTT (about 20-25 MBs depending on the platform) but we can
+ * live with this.
+ *
+ * Another benifit of this is the GuC bootrom can't access anything
+ * below the WOPCM max size so anything the bootom needs to access (e.g.
+ * a RSA key) needs to be placed in the GGTT above the WOPCM max size.
+ * Starting the GGTT allocations above the WOPCM max give us the correct
+ * placement for free.
+ */
+ if (ggtt->size > GUC_GGTT_TOP)
+ ggtt->size = GUC_GGTT_TOP;
+
+ if (GRAPHICS_VERx100(xe) >= 1270)
+ ggtt->pt_ops = &xelpg_pt_ops;
+ else
+ ggtt->pt_ops = &xelp_pt_ops;
+
+ drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
+ ggtt->size - xe_wopcm_size(xe));
+ mutex_init(&ggtt->lock);
+ primelockdep(ggtt);
+
+ return drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
+}
+
+static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
+{
+ struct drm_mm_node *hole;
+ u64 start, end;
+
+ /* Display may have allocated inside ggtt, so be careful with clearing here */
+ xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ mutex_lock(&ggtt->lock);
+ drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
+ xe_ggtt_clear(ggtt, start, end - start);
+
+ xe_ggtt_invalidate(ggtt);
+ mutex_unlock(&ggtt->lock);
+ xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+}
+
+int xe_ggtt_init(struct xe_ggtt *ggtt)
+{
+ struct xe_device *xe = tile_to_xe(ggtt->tile);
+ unsigned int flags;
+ int err;
+
+ /*
+ * So we don't need to worry about 64K GGTT layout when dealing with
+ * scratch entires, rather keep the scratch page in system memory on
+ * platforms where 64K pages are needed for VRAM.
+ */
+ flags = XE_BO_CREATE_PINNED_BIT;
+ if (ggtt->flags & XE_GGTT_FLAGS_64K)
+ flags |= XE_BO_CREATE_SYSTEM_BIT;
+ else
+ flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile);
+
+ ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
+ if (IS_ERR(ggtt->scratch)) {
+ err = PTR_ERR(ggtt->scratch);
+ goto err;
+ }
+
+ xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size);
+
+ xe_ggtt_initial_clear(ggtt);
+
+ return drmm_add_action_or_reset(&xe->drm, ggtt_fini, ggtt);
+err:
+ ggtt->scratch = NULL;
+ return err;
+}
+
+#define GUC_TLB_INV_CR XE_REG(0xcee8)
+#define GUC_TLB_INV_CR_INVALIDATE REG_BIT(0)
+#define PVC_GUC_TLB_INV_DESC0 XE_REG(0xcf7c)
+#define PVC_GUC_TLB_INV_DESC0_VALID REG_BIT(0)
+#define PVC_GUC_TLB_INV_DESC1 XE_REG(0xcf80)
+#define PVC_GUC_TLB_INV_DESC1_INVALIDATE REG_BIT(6)
+
+static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
+{
+ if (!gt)
+ return;
+
+ /*
+ * Invalidation can happen when there's no in-flight work keeping the
+ * GT awake. We need to explicitly grab forcewake to ensure the GT
+ * and GuC are accessible.
+ */
+ xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+
+ /* TODO: vfunc for GuC vs. non-GuC */
+
+ if (gt->uc.guc.submission_state.enabled) {
+ int seqno;
+
+ seqno = xe_gt_tlb_invalidation_guc(gt);
+ xe_gt_assert(gt, seqno > 0);
+ if (seqno > 0)
+ xe_gt_tlb_invalidation_wait(gt, seqno);
+ } else if (xe_device_uc_enabled(gt_to_xe(gt))) {
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
+ xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
+ PVC_GUC_TLB_INV_DESC1_INVALIDATE);
+ xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
+ PVC_GUC_TLB_INV_DESC0_VALID);
+ } else
+ xe_mmio_write32(gt, GUC_TLB_INV_CR,
+ GUC_TLB_INV_CR_INVALIDATE);
+ }
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+}
+
+void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
+{
+ /* Each GT in a tile has its own TLB to cache GGTT lookups */
+ ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
+ ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
+}
+
+void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
+{
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
+ u64 addr, scratch_pte;
+
+ scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, pat_index);
+
+ printk("%sGlobal GTT:", prefix);
+ for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
+ unsigned int i = addr / XE_PAGE_SIZE;
+
+ xe_tile_assert(ggtt->tile, addr <= U32_MAX);
+ if (ggtt->gsm[i] == scratch_pte)
+ continue;
+
+ printk("%s ggtt[0x%08x] = 0x%016llx",
+ prefix, (u32)addr, ggtt->gsm[i]);
+ }
+}
+
+int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt, struct drm_mm_node *node,
+ u32 size, u32 align, u32 mm_flags)
+{
+ return drm_mm_insert_node_generic(&ggtt->mm, node, size, align, 0,
+ mm_flags);
+}
+
+int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
+ u32 size, u32 align)
+{
+ int ret;
+
+ mutex_lock(&ggtt->lock);
+ ret = xe_ggtt_insert_special_node_locked(ggtt, node, size,
+ align, DRM_MM_INSERT_HIGH);
+ mutex_unlock(&ggtt->lock);
+
+ return ret;
+}
+
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+{
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
+ u64 start = bo->ggtt_node.start;
+ u64 offset, pte;
+
+ for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
+ pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
+ xe_ggtt_set_pte(ggtt, start + offset, pte);
+ }
+
+ xe_ggtt_invalidate(ggtt);
+}
+
+static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
+ u64 start, u64 end)
+{
+ int err;
+ u64 alignment = XE_PAGE_SIZE;
+
+ if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
+ alignment = SZ_64K;
+
+ if (XE_WARN_ON(bo->ggtt_node.size)) {
+ /* Someone's already inserted this BO in the GGTT */
+ xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
+ return 0;
+ }
+
+ err = xe_bo_validate(bo, NULL, false);
+ if (err)
+ return err;
+
+ xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ mutex_lock(&ggtt->lock);
+ err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
+ alignment, 0, start, end, 0);
+ if (!err)
+ xe_ggtt_map_bo(ggtt, bo);
+ mutex_unlock(&ggtt->lock);
+ xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+
+ return err;
+}
+
+int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
+ u64 start, u64 end)
+{
+ return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
+}
+
+int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+{
+ return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
+}
+
+void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
+{
+ xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+ mutex_lock(&ggtt->lock);
+
+ xe_ggtt_clear(ggtt, node->start, node->size);
+ drm_mm_remove_node(node);
+ node->size = 0;
+
+ xe_ggtt_invalidate(ggtt);
+
+ mutex_unlock(&ggtt->lock);
+ xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+}
+
+void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+{
+ if (XE_WARN_ON(!bo->ggtt_node.size))
+ return;
+
+ /* This BO is not currently in the GGTT */
+ xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
+
+ xe_ggtt_remove_node(ggtt, &bo->ggtt_node);
+}
+
+int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ggtt->lock);
+ if (err)
+ return err;
+
+ drm_mm_print(&ggtt->mm, p);
+ mutex_unlock(&ggtt->lock);
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
new file mode 100644
index 000000000..a09c166df
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_GGTT_H_
+#define _XE_GGTT_H_
+
+#include "xe_ggtt_types.h"
+
+struct drm_printer;
+
+void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte);
+void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
+int xe_ggtt_init_early(struct xe_ggtt *ggtt);
+int xe_ggtt_init(struct xe_ggtt *ggtt);
+void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix);
+
+int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
+ u32 size, u32 align);
+int xe_ggtt_insert_special_node_locked(struct xe_ggtt *ggtt,
+ struct drm_mm_node *node,
+ u32 size, u32 align, u32 mm_flags);
+void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node);
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
+ u64 start, u64 end);
+void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+
+int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
new file mode 100644
index 000000000..d8c584d9a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GGTT_TYPES_H_
+#define _XE_GGTT_TYPES_H_
+
+#include <drm/drm_mm.h>
+
+#include "xe_pt_types.h"
+
+struct xe_bo;
+struct xe_gt;
+
+struct xe_ggtt_pt_ops {
+ u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
+};
+
+struct xe_ggtt {
+ struct xe_tile *tile;
+
+ u64 size;
+
+#define XE_GGTT_FLAGS_64K BIT(0)
+ unsigned int flags;
+
+ struct xe_bo *scratch;
+
+ struct mutex lock;
+
+ u64 __iomem *gsm;
+
+ const struct xe_ggtt_pt_ops *pt_ops;
+
+ struct drm_mm mm;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
new file mode 100644
index 000000000..e4ad1d6ce
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gpu_scheduler.h"
+
+static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
+{
+ if (!READ_ONCE(sched->base.pause_submit))
+ queue_work(sched->base.submit_wq, &sched->work_process_msg);
+}
+
+static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
+{
+ struct xe_sched_msg *msg;
+
+ spin_lock(&sched->base.job_list_lock);
+ msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
+ if (msg)
+ xe_sched_process_msg_queue(sched);
+ spin_unlock(&sched->base.job_list_lock);
+}
+
+static struct xe_sched_msg *
+xe_sched_get_msg(struct xe_gpu_scheduler *sched)
+{
+ struct xe_sched_msg *msg;
+
+ spin_lock(&sched->base.job_list_lock);
+ msg = list_first_entry_or_null(&sched->msgs,
+ struct xe_sched_msg, link);
+ if (msg)
+ list_del(&msg->link);
+ spin_unlock(&sched->base.job_list_lock);
+
+ return msg;
+}
+
+static void xe_sched_process_msg_work(struct work_struct *w)
+{
+ struct xe_gpu_scheduler *sched =
+ container_of(w, struct xe_gpu_scheduler, work_process_msg);
+ struct xe_sched_msg *msg;
+
+ if (READ_ONCE(sched->base.pause_submit))
+ return;
+
+ msg = xe_sched_get_msg(sched);
+ if (msg) {
+ sched->ops->process_msg(msg);
+
+ xe_sched_process_msg_queue_if_ready(sched);
+ }
+}
+
+int xe_sched_init(struct xe_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
+ const struct xe_sched_backend_ops *xe_ops,
+ struct workqueue_struct *submit_wq,
+ uint32_t hw_submission, unsigned hang_limit,
+ long timeout, struct workqueue_struct *timeout_wq,
+ atomic_t *score, const char *name,
+ struct device *dev)
+{
+ sched->ops = xe_ops;
+ INIT_LIST_HEAD(&sched->msgs);
+ INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
+
+ return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
+ hang_limit, timeout, timeout_wq, score, name,
+ dev);
+}
+
+void xe_sched_fini(struct xe_gpu_scheduler *sched)
+{
+ xe_sched_submission_stop(sched);
+ drm_sched_fini(&sched->base);
+}
+
+void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
+{
+ drm_sched_wqueue_start(&sched->base);
+ queue_work(sched->base.submit_wq, &sched->work_process_msg);
+}
+
+void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
+{
+ drm_sched_wqueue_stop(&sched->base);
+ cancel_work_sync(&sched->work_process_msg);
+}
+
+void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg)
+{
+ spin_lock(&sched->base.job_list_lock);
+ list_add_tail(&msg->link, &sched->msgs);
+ spin_unlock(&sched->base.job_list_lock);
+
+ xe_sched_process_msg_queue(sched);
+}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
new file mode 100644
index 000000000..10c6bb9c9
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GPU_SCHEDULER_H_
+#define _XE_GPU_SCHEDULER_H_
+
+#include "xe_gpu_scheduler_types.h"
+#include "xe_sched_job_types.h"
+
+int xe_sched_init(struct xe_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
+ const struct xe_sched_backend_ops *xe_ops,
+ struct workqueue_struct *submit_wq,
+ uint32_t hw_submission, unsigned hang_limit,
+ long timeout, struct workqueue_struct *timeout_wq,
+ atomic_t *score, const char *name,
+ struct device *dev);
+void xe_sched_fini(struct xe_gpu_scheduler *sched);
+
+void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
+void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
+
+void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg);
+
+static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
+{
+ drm_sched_stop(&sched->base, NULL);
+}
+
+static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
+{
+ drm_sched_tdr_queue_imm(&sched->base);
+}
+
+static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
+{
+ drm_sched_resubmit_jobs(&sched->base);
+}
+
+static inline bool
+xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
+{
+ return drm_sched_invalidate_job(&job->drm, threshold);
+}
+
+static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
+ struct xe_sched_job *job)
+{
+ list_add(&job->drm.list, &sched->base.pending_list);
+}
+
+static inline
+struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
+{
+ return list_first_entry_or_null(&sched->base.pending_list,
+ struct xe_sched_job, drm.list);
+}
+
+static inline int
+xe_sched_entity_init(struct xe_sched_entity *entity,
+ struct xe_gpu_scheduler *sched)
+{
+ return drm_sched_entity_init(entity, 0,
+ (struct drm_gpu_scheduler **)&sched,
+ 1, NULL);
+}
+
+#define xe_sched_entity_fini drm_sched_entity_fini
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h b/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
new file mode 100644
index 000000000..6731b13da
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler_types.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GPU_SCHEDULER_TYPES_H_
+#define _XE_GPU_SCHEDULER_TYPES_H_
+
+#include <drm/gpu_scheduler.h>
+
+/**
+ * struct xe_sched_msg - an in-band (relative to GPU scheduler run queue)
+ * message
+ *
+ * Generic enough for backend defined messages, backend can expand if needed.
+ */
+struct xe_sched_msg {
+ /** @link: list link into the gpu scheduler list of messages */
+ struct list_head link;
+ /**
+ * @private_data: opaque pointer to message private data (backend defined)
+ */
+ void *private_data;
+ /** @opcode: opcode of message (backend defined) */
+ unsigned int opcode;
+};
+
+/**
+ * struct xe_sched_backend_ops - Define the backend operations called by the
+ * scheduler
+ */
+struct xe_sched_backend_ops {
+ /**
+ * @process_msg: Process a message. Allowed to block, it is this
+ * function's responsibility to free message if dynamically allocated.
+ */
+ void (*process_msg)(struct xe_sched_msg *msg);
+};
+
+/**
+ * struct xe_gpu_scheduler - Xe GPU scheduler
+ */
+struct xe_gpu_scheduler {
+ /** @base: DRM GPU scheduler */
+ struct drm_gpu_scheduler base;
+ /** @ops: Xe scheduler ops */
+ const struct xe_sched_backend_ops *ops;
+ /** @msgs: list of messages to be processed in @work_process_msg */
+ struct list_head msgs;
+ /** @work_process_msg: processes messages */
+ struct work_struct work_process_msg;
+};
+
+#define xe_sched_entity drm_sched_entity
+#define xe_sched_policy drm_sched_policy
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
new file mode 100644
index 000000000..a8a895cf4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gsc.h"
+
+#include <drm/drm_managed.h>
+
+#include "abi/gsc_mkhi_commands_abi.h"
+#include "generated/xe_wa_oob.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_gsc_submit.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_huc.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_sched_job.h"
+#include "xe_uc_fw.h"
+#include "xe_wa.h"
+#include "instructions/xe_gsc_commands.h"
+#include "regs/xe_gsc_regs.h"
+
+static struct xe_gt *
+gsc_to_gt(struct xe_gsc *gsc)
+{
+ return container_of(gsc, struct xe_gt, uc.gsc);
+}
+
+static int memcpy_fw(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 fw_size = gsc->fw.size;
+ void *storage;
+
+ /*
+ * FIXME: xe_migrate_copy does not work with stolen mem yet, so we use
+ * a memcpy for now.
+ */
+ storage = kmalloc(fw_size, GFP_KERNEL);
+ if (!storage)
+ return -ENOMEM;
+
+ xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size);
+ xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size);
+ xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size);
+
+ kfree(storage);
+
+ return 0;
+}
+
+static int emit_gsc_upload(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ u64 offset = xe_bo_ggtt_addr(gsc->private);
+ struct xe_bb *bb;
+ struct xe_sched_job *job;
+ struct dma_fence *fence;
+ long timeout;
+
+ bb = xe_bb_new(gt, 4, false);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ bb->cs[bb->len++] = GSC_FW_LOAD;
+ bb->cs[bb->len++] = lower_32_bits(offset);
+ bb->cs[bb->len++] = upper_32_bits(offset);
+ bb->cs[bb->len++] = (gsc->private->size / SZ_4K) | GSC_FW_LOAD_LIMIT_VALID;
+
+ job = xe_bb_create_job(gsc->q, bb);
+ if (IS_ERR(job)) {
+ xe_bb_free(bb, NULL);
+ return PTR_ERR(job);
+ }
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+ dma_fence_put(fence);
+ xe_bb_free(bb, NULL);
+ if (timeout < 0)
+ return timeout;
+ else if (!timeout)
+ return -ETIME;
+
+ return 0;
+}
+
+#define version_query_wr(xe_, map_, offset_, field_, val_) \
+ xe_map_wr_field(xe_, map_, offset_, struct gsc_get_compatibility_version_in, field_, val_)
+#define version_query_rd(xe_, map_, offset_, field_) \
+ xe_map_rd_field(xe_, map_, offset_, struct gsc_get_compatibility_version_out, field_)
+
+static u32 emit_version_query_msg(struct xe_device *xe, struct iosys_map *map, u32 wr_offset)
+{
+ xe_map_memset(xe, map, wr_offset, 0, sizeof(struct gsc_get_compatibility_version_in));
+
+ version_query_wr(xe, map, wr_offset, header.group_id, MKHI_GROUP_ID_GFX_SRV);
+ version_query_wr(xe, map, wr_offset, header.command,
+ MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION);
+
+ return wr_offset + sizeof(struct gsc_get_compatibility_version_in);
+}
+
+#define GSC_VER_PKT_SZ SZ_4K /* 4K each for input and output */
+static int query_compatibility_version(struct xe_gsc *gsc)
+{
+ struct xe_uc_fw_version *compat = &gsc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_bo *bo;
+ u32 wr_offset;
+ u32 rd_offset;
+ u64 ggtt_offset;
+ int err;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_SYSTEM_BIT |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo)) {
+ xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
+ return PTR_ERR(bo);
+ }
+
+ ggtt_offset = xe_bo_ggtt_addr(bo);
+
+ wr_offset = xe_gsc_emit_header(xe, &bo->vmap, 0, HECI_MEADDRESS_MKHI, 0,
+ sizeof(struct gsc_get_compatibility_version_in));
+ wr_offset = emit_version_query_msg(xe, &bo->vmap, wr_offset);
+
+ err = xe_gsc_pkt_submit_kernel(gsc, ggtt_offset, wr_offset,
+ ggtt_offset + GSC_VER_PKT_SZ,
+ GSC_VER_PKT_SZ);
+ if (err) {
+ xe_gt_err(gt,
+ "failed to submit GSC request for compatibility version: %d\n",
+ err);
+ goto out_bo;
+ }
+
+ err = xe_gsc_read_out_header(xe, &bo->vmap, GSC_VER_PKT_SZ,
+ sizeof(struct gsc_get_compatibility_version_out),
+ &rd_offset);
+ if (err) {
+ xe_gt_err(gt, "HuC: invalid GSC reply for version query (err=%d)\n", err);
+ return err;
+ }
+
+ compat->major = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
+ compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
+
+ xe_gt_info(gt, "found GSC cv%u.%u\n", compat->major, compat->minor);
+
+out_bo:
+ xe_bo_unpin_map_no_vm(bo);
+ return err;
+}
+
+static int gsc_fw_is_loaded(struct xe_gt *gt)
+{
+ return xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) &
+ HECI1_FWSTS1_INIT_COMPLETE;
+}
+
+static int gsc_fw_wait(struct xe_gt *gt)
+{
+ /*
+ * GSC load can take up to 250ms from the moment the instruction is
+ * executed by the GSCCS. To account for possible submission delays or
+ * other issues, we use a 500ms timeout in the wait here.
+ */
+ return xe_mmio_wait32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
+ HECI1_FWSTS1_INIT_COMPLETE,
+ HECI1_FWSTS1_INIT_COMPLETE,
+ 500 * USEC_PER_MSEC, NULL, false);
+}
+
+static int gsc_upload(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ /* we should only be here if the init step were successful */
+ xe_assert(xe, xe_uc_fw_is_loadable(&gsc->fw) && gsc->q);
+
+ if (gsc_fw_is_loaded(gt)) {
+ xe_gt_err(gt, "GSC already loaded at upload time\n");
+ return -EEXIST;
+ }
+
+ err = memcpy_fw(gsc);
+ if (err) {
+ xe_gt_err(gt, "Failed to memcpy GSC FW\n");
+ return err;
+ }
+
+ /*
+ * GSC is only killed by an FLR, so we need to trigger one on unload to
+ * make sure we stop it. This is because we assign a chunk of memory to
+ * the GSC as part of the FW load, so we need to make sure it stops
+ * using it when we release it to the system on driver unload. Note that
+ * this is not a problem of the unload per-se, because the GSC will not
+ * touch that memory unless there are requests for it coming from the
+ * driver; therefore, no accesses will happen while Xe is not loaded,
+ * but if we re-load the driver then the GSC might wake up and try to
+ * access that old memory location again.
+ * Given that an FLR is a very disruptive action (see the FLR function
+ * for details), we want to do it as the last action before releasing
+ * the access to the MMIO bar, which means we need to do it as part of
+ * mmio cleanup.
+ */
+ xe->needs_flr_on_fini = true;
+
+ err = emit_gsc_upload(gsc);
+ if (err) {
+ xe_gt_err(gt, "Failed to emit GSC FW upload (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ err = gsc_fw_wait(gt);
+ if (err) {
+ xe_gt_err(gt, "Failed to wait for GSC load (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ err = query_compatibility_version(gsc);
+ if (err)
+ return err;
+
+ err = xe_uc_fw_check_version_requirements(&gsc->fw);
+ if (err)
+ return err;
+
+ xe_gt_dbg(gt, "GSC FW async load completed\n");
+
+ return 0;
+}
+
+static void gsc_work(struct work_struct *work)
+{
+ struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ int ret;
+
+ xe_device_mem_access_get(xe);
+ xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+
+ ret = gsc_upload(gsc);
+ if (ret && ret != -EEXIST) {
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
+ goto out;
+ }
+
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
+
+ /* HuC auth failure is not fatal */
+ if (xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GUC))
+ xe_huc_auth(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC);
+
+out:
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_device_mem_access_put(xe);
+}
+
+int xe_gsc_init(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ int ret;
+
+ gsc->fw.type = XE_UC_FW_TYPE_GSC;
+ INIT_WORK(&gsc->work, gsc_work);
+
+ /* The GSC uC is only available on the media GT */
+ if (tile->media_gt && (gt != tile->media_gt)) {
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
+ return 0;
+ }
+
+ /*
+ * Some platforms can have GuC but not GSC. That would cause
+ * xe_uc_fw_init(gsc) to return a "not supported" failure code and abort
+ * all firmware loading. So check for GSC being enabled before
+ * propagating the failure back up. That way the higher level will keep
+ * going and load GuC as appropriate.
+ */
+ ret = xe_uc_fw_init(&gsc->fw);
+ if (!xe_uc_fw_is_enabled(&gsc->fw))
+ return 0;
+ else if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ xe_gt_err(gt, "GSC init failed with %d", ret);
+ return ret;
+}
+
+static void free_resources(struct drm_device *drm, void *arg)
+{
+ struct xe_gsc *gsc = arg;
+
+ if (gsc->wq) {
+ destroy_workqueue(gsc->wq);
+ gsc->wq = NULL;
+ }
+
+ if (gsc->q) {
+ xe_exec_queue_put(gsc->q);
+ gsc->q = NULL;
+ }
+
+ if (gsc->private) {
+ xe_bo_unpin_map_no_vm(gsc->private);
+ gsc->private = NULL;
+ }
+}
+
+int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_hw_engine *hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_OTHER, 0, true);
+ struct xe_exec_queue *q;
+ struct workqueue_struct *wq;
+ struct xe_bo *bo;
+ int err;
+
+ if (!xe_uc_fw_is_available(&gsc->fw))
+ return 0;
+
+ if (!hwe)
+ return -ENODEV;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_STOLEN_BIT |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ q = xe_exec_queue_create(xe, NULL,
+ BIT(hwe->logical_instance), 1, hwe,
+ EXEC_QUEUE_FLAG_KERNEL |
+ EXEC_QUEUE_FLAG_PERMANENT);
+ if (IS_ERR(q)) {
+ xe_gt_err(gt, "Failed to create queue for GSC submission\n");
+ err = PTR_ERR(q);
+ goto out_bo;
+ }
+
+ wq = alloc_ordered_workqueue("gsc-ordered-wq", 0);
+ if (!wq) {
+ err = -ENOMEM;
+ goto out_q;
+ }
+
+ gsc->private = bo;
+ gsc->q = q;
+ gsc->wq = wq;
+
+ err = drmm_add_action_or_reset(&xe->drm, free_resources, gsc);
+ if (err)
+ return err;
+
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_LOADABLE);
+
+ return 0;
+
+out_q:
+ xe_exec_queue_put(q);
+out_bo:
+ xe_bo_unpin_map_no_vm(bo);
+ return err;
+}
+
+void xe_gsc_load_start(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q)
+ return;
+
+ /* GSC FW survives GT reset and D3Hot */
+ if (gsc_fw_is_loaded(gt)) {
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
+ return;
+ }
+
+ queue_work(gsc->wq, &gsc->work);
+}
+
+void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
+{
+ if (xe_uc_fw_is_loadable(&gsc->fw) && gsc->wq)
+ flush_work(&gsc->work);
+}
+
+/*
+ * wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
+ * GSC engine reset by writing a notification bit in the GS1 register and then
+ * triggering an interrupt to GSC; from the interrupt it will take up to 200ms
+ * for the FW to get prepare for the reset, so we need to wait for that amount
+ * of time.
+ * After the reset is complete we need to then clear the GS1 register.
+ */
+void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
+{
+ u32 gs1_set = prep ? HECI_H_GS1_ER_PREP : 0;
+ u32 gs1_clr = prep ? 0 : HECI_H_GS1_ER_PREP;
+
+ /* WA only applies if the GSC is loaded */
+ if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
+ return;
+
+ xe_mmio_rmw32(gt, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
+
+ if (prep) {
+ /* make sure the reset bit is clear when writing the CSR reg */
+ xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE),
+ HECI_H_CSR_RST, HECI_H_CSR_IG);
+ msleep(200);
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
new file mode 100644
index 000000000..bc1ef7f31
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GSC_H_
+#define _XE_GSC_H_
+
+#include "xe_gsc_types.h"
+
+struct xe_gt;
+
+int xe_gsc_init(struct xe_gsc *gsc);
+int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
+void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
+void xe_gsc_load_start(struct xe_gsc *gsc);
+
+void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.c b/drivers/gpu/drm/xe/xe_gsc_submit.c
new file mode 100644
index 000000000..8c5381e59
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gsc_submit.h"
+
+#include "abi/gsc_command_header_abi.h"
+#include "xe_bb.h"
+#include "xe_exec_queue.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_types.h"
+#include "xe_map.h"
+#include "xe_sched_job.h"
+#include "instructions/xe_gsc_commands.h"
+#include "regs/xe_gsc_regs.h"
+
+#define GSC_HDR_SIZE (sizeof(struct intel_gsc_mtl_header)) /* shorthand define */
+
+#define mtl_gsc_header_wr(xe_, map_, offset_, field_, val_) \
+ xe_map_wr_field(xe_, map_, offset_, struct intel_gsc_mtl_header, field_, val_)
+
+#define mtl_gsc_header_rd(xe_, map_, offset_, field_) \
+ xe_map_rd_field(xe_, map_, offset_, struct intel_gsc_mtl_header, field_)
+
+/*
+ * GSC FW allows us to define the host_session_handle as we see fit, as long
+ * as we use unique identifier for each user, with handle 0 being reserved for
+ * kernel usage.
+ * To be able to differentiate which client subsystem owns the given session, we
+ * include the client id in the top 8 bits of the handle.
+ */
+#define HOST_SESSION_CLIENT_MASK GENMASK_ULL(63, 56)
+
+static struct xe_gt *
+gsc_to_gt(struct xe_gsc *gsc)
+{
+ return container_of(gsc, struct xe_gt, uc.gsc);
+}
+
+/**
+ * xe_gsc_emit_header - write the MTL GSC header in memory
+ * @xe: the Xe device
+ * @map: the iosys map to write to
+ * @offset: offset from the start of the map at which to write the header
+ * @heci_client_id: client id identifying the type of command (see abi for values)
+ * @host_session_id: host session ID of the caller
+ * @payload_size: size of the payload that follows the header
+ *
+ * Returns: offset memory location following the header
+ */
+u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
+ u8 heci_client_id, u64 host_session_id, u32 payload_size)
+{
+ xe_assert(xe, !(host_session_id & HOST_SESSION_CLIENT_MASK));
+
+ if (host_session_id)
+ host_session_id |= FIELD_PREP(HOST_SESSION_CLIENT_MASK, heci_client_id);
+
+ xe_map_memset(xe, map, offset, 0, GSC_HDR_SIZE);
+
+ mtl_gsc_header_wr(xe, map, offset, validity_marker, GSC_HECI_VALIDITY_MARKER);
+ mtl_gsc_header_wr(xe, map, offset, heci_client_id, heci_client_id);
+ mtl_gsc_header_wr(xe, map, offset, host_session_handle, host_session_id);
+ mtl_gsc_header_wr(xe, map, offset, header_version, MTL_GSC_HEADER_VERSION);
+ mtl_gsc_header_wr(xe, map, offset, message_size, payload_size + GSC_HDR_SIZE);
+
+ return offset + GSC_HDR_SIZE;
+};
+
+/**
+ * xe_gsc_check_and_update_pending - check the pending bit and update the input
+ * header with the retry handle from the output header
+ * @xe: the Xe device
+ * @in: the iosys map containing the input buffer
+ * @offset_in: offset within the iosys at which the input buffer is located
+ * @out: the iosys map containing the output buffer
+ * @offset_out: offset within the iosys at which the output buffer is located
+ *
+ * Returns: true if the pending bit was set, false otherwise
+ */
+bool xe_gsc_check_and_update_pending(struct xe_device *xe,
+ struct iosys_map *in, u32 offset_in,
+ struct iosys_map *out, u32 offset_out)
+{
+ if (mtl_gsc_header_rd(xe, out, offset_out, flags) & GSC_OUTFLAG_MSG_PENDING) {
+ u64 handle = mtl_gsc_header_rd(xe, out, offset_out, gsc_message_handle);
+
+ mtl_gsc_header_wr(xe, in, offset_in, gsc_message_handle, handle);
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * xe_gsc_read_out_header - reads and validates the output header and returns
+ * the offset of the reply following the header
+ * @xe: the Xe device
+ * @map: the iosys map containing the output buffer
+ * @offset: offset within the iosys at which the output buffer is located
+ * @min_payload_size: minimum size of the message excluding the gsc header
+ * @payload_offset: optional pointer to be set to the payload offset
+ *
+ * Returns: -errno value on failure, 0 otherwise
+ */
+int xe_gsc_read_out_header(struct xe_device *xe,
+ struct iosys_map *map, u32 offset,
+ u32 min_payload_size,
+ u32 *payload_offset)
+{
+ u32 marker = mtl_gsc_header_rd(xe, map, offset, validity_marker);
+ u32 size = mtl_gsc_header_rd(xe, map, offset, message_size);
+ u32 payload_size = size - GSC_HDR_SIZE;
+
+ if (marker != GSC_HECI_VALIDITY_MARKER)
+ return -EPROTO;
+
+ if (size < GSC_HDR_SIZE || payload_size < min_payload_size)
+ return -ENODATA;
+
+ if (payload_offset)
+ *payload_offset = offset + GSC_HDR_SIZE;
+
+ return 0;
+}
+
+/**
+ * xe_gsc_pkt_submit_kernel - submit a kernel heci pkt to the GSC
+ * @gsc: the GSC uC
+ * @addr_in: GGTT address of the message to send to the GSC
+ * @size_in: size of the message to send to the GSC
+ * @addr_out: GGTT address for the GSC to write the reply to
+ * @size_out: size of the memory reserved for the reply
+ */
+int xe_gsc_pkt_submit_kernel(struct xe_gsc *gsc, u64 addr_in, u32 size_in,
+ u64 addr_out, u32 size_out)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_bb *bb;
+ struct xe_sched_job *job;
+ struct dma_fence *fence;
+ long timeout;
+
+ if (size_in < GSC_HDR_SIZE)
+ return -ENODATA;
+
+ if (size_out < GSC_HDR_SIZE)
+ return -ENOMEM;
+
+ bb = xe_bb_new(gt, 8, false);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ bb->cs[bb->len++] = GSC_HECI_CMD_PKT;
+ bb->cs[bb->len++] = lower_32_bits(addr_in);
+ bb->cs[bb->len++] = upper_32_bits(addr_in);
+ bb->cs[bb->len++] = size_in;
+ bb->cs[bb->len++] = lower_32_bits(addr_out);
+ bb->cs[bb->len++] = upper_32_bits(addr_out);
+ bb->cs[bb->len++] = size_out;
+ bb->cs[bb->len++] = 0;
+
+ job = xe_bb_create_job(gsc->q, bb);
+ if (IS_ERR(job)) {
+ xe_bb_free(bb, NULL);
+ return PTR_ERR(job);
+ }
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+ dma_fence_put(fence);
+ xe_bb_free(bb, NULL);
+ if (timeout < 0)
+ return timeout;
+ else if (!timeout)
+ return -ETIME;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc_submit.h b/drivers/gpu/drm/xe/xe_gsc_submit.h
new file mode 100644
index 000000000..0801da5d4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_submit.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GSC_SUBMIT_H_
+#define _XE_GSC_SUBMIT_H_
+
+#include <linux/types.h>
+
+struct iosys_map;
+struct xe_device;
+struct xe_gsc;
+
+u32 xe_gsc_emit_header(struct xe_device *xe, struct iosys_map *map, u32 offset,
+ u8 heci_client_id, u64 host_session_id, u32 payload_size);
+
+bool xe_gsc_check_and_update_pending(struct xe_device *xe,
+ struct iosys_map *in, u32 offset_in,
+ struct iosys_map *out, u32 offset_out);
+
+int xe_gsc_read_out_header(struct xe_device *xe,
+ struct iosys_map *map, u32 offset,
+ u32 min_payload_size,
+ u32 *payload_offset);
+
+int xe_gsc_pkt_submit_kernel(struct xe_gsc *gsc, u64 addr_in, u32 size_in,
+ u64 addr_out, u32 size_out);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
new file mode 100644
index 000000000..57fefd66a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_types.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GSC_TYPES_H_
+#define _XE_GSC_TYPES_H_
+
+#include <linux/workqueue.h>
+
+#include "xe_uc_fw_types.h"
+
+struct xe_bo;
+struct xe_exec_queue;
+
+/**
+ * struct xe_gsc - GSC
+ */
+struct xe_gsc {
+ /** @fw: Generic uC firmware management */
+ struct xe_uc_fw fw;
+
+ /** @security_version: SVN found in the fetched blob */
+ u32 security_version;
+
+ /** @private: Private data for use by the GSC FW */
+ struct xe_bo *private;
+
+ /** @q: Default queue used for submissions to GSC FW */
+ struct xe_exec_queue *q;
+
+ /** @wq: workqueue to handle jobs for delayed load and proxy handling */
+ struct workqueue_struct *wq;
+
+ /** @work: delayed load and proxy handling work */
+ struct work_struct work;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
new file mode 100644
index 000000000..d23ee1397
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gt.h"
+
+#include <linux/minmax.h>
+
+#include <drm/drm_managed.h>
+#include <drm/xe_drm.h>
+
+#include "instructions/xe_gfxpipe_commands.h"
+#include "instructions/xe_mi_commands.h"
+#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_execlist.h"
+#include "xe_force_wake.h"
+#include "xe_ggtt.h"
+#include "xe_gsc.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_gt_clock.h"
+#include "xe_gt_freq.h"
+#include "xe_gt_idle.h"
+#include "xe_gt_mcr.h"
+#include "xe_gt_pagefault.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_sysfs.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_gt_topology.h"
+#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_pc.h"
+#include "xe_hw_fence.h"
+#include "xe_hw_engine_class_sysfs.h"
+#include "xe_irq.h"
+#include "xe_lmtt.h"
+#include "xe_lrc.h"
+#include "xe_map.h"
+#include "xe_migrate.h"
+#include "xe_mmio.h"
+#include "xe_pat.h"
+#include "xe_mocs.h"
+#include "xe_reg_sr.h"
+#include "xe_ring_ops.h"
+#include "xe_sa.h"
+#include "xe_sched_job.h"
+#include "xe_sriov.h"
+#include "xe_tuning.h"
+#include "xe_uc.h"
+#include "xe_vm.h"
+#include "xe_wa.h"
+#include "xe_wopcm.h"
+
+struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
+{
+ struct xe_gt *gt;
+
+ gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
+ if (!gt)
+ return ERR_PTR(-ENOMEM);
+
+ gt->tile = tile;
+ gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
+
+ return gt;
+}
+
+void xe_gt_sanitize(struct xe_gt *gt)
+{
+ /*
+ * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
+ * reload
+ */
+ gt->uc.guc.submission_state.enabled = false;
+}
+
+static void gt_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_gt *gt = arg;
+ int i;
+
+ destroy_workqueue(gt->ordered_wq);
+
+ for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+}
+
+static void gt_reset_worker(struct work_struct *w);
+
+static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
+{
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ struct dma_fence *fence;
+ long timeout;
+
+ bb = xe_bb_new(gt, 4, false);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ job = xe_bb_create_job(q, bb);
+ if (IS_ERR(job)) {
+ xe_bb_free(bb, NULL);
+ return PTR_ERR(job);
+ }
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+ dma_fence_put(fence);
+ xe_bb_free(bb, NULL);
+ if (timeout < 0)
+ return timeout;
+ else if (!timeout)
+ return -ETIME;
+
+ return 0;
+}
+
+/*
+ * Convert back from encoded value to type-safe, only to be used when reg.mcr
+ * is true
+ */
+static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
+{
+ return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
+}
+
+static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
+{
+ struct xe_reg_sr *sr = &q->hwe->reg_lrc;
+ struct xe_reg_sr_entry *entry;
+ unsigned long idx;
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ struct dma_fence *fence;
+ long timeout;
+ int count = 0;
+
+ if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
+ /* Big enough to emit all of the context's 3DSTATE */
+ bb = xe_bb_new(gt, xe_lrc_size(gt_to_xe(gt), q->hwe->class), false);
+ else
+ /* Just pick a large BB size */
+ bb = xe_bb_new(gt, SZ_4K, false);
+
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ xa_for_each(&sr->xa, idx, entry)
+ ++count;
+
+ if (count) {
+ xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
+
+ xa_for_each(&sr->xa, idx, entry) {
+ struct xe_reg reg = entry->reg;
+ struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
+ u32 val;
+
+ /*
+ * Skip reading the register if it's not really needed
+ */
+ if (reg.masked)
+ val = entry->clr_bits << 16;
+ else if (entry->clr_bits + 1)
+ val = (reg.mcr ?
+ xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
+ xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
+ else
+ val = 0;
+
+ val |= entry->set_bits;
+
+ bb->cs[bb->len++] = reg.addr;
+ bb->cs[bb->len++] = val;
+ xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
+ }
+ }
+
+ xe_lrc_emit_hwe_state_instructions(q, bb);
+
+ job = xe_bb_create_job(q, bb);
+ if (IS_ERR(job)) {
+ xe_bb_free(bb, NULL);
+ return PTR_ERR(job);
+ }
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+ dma_fence_put(fence);
+ xe_bb_free(bb, NULL);
+ if (timeout < 0)
+ return timeout;
+ else if (!timeout)
+ return -ETIME;
+
+ return 0;
+}
+
+int xe_gt_record_default_lrcs(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ int err = 0;
+
+ for_each_hw_engine(hwe, gt, id) {
+ struct xe_exec_queue *q, *nop_q;
+ void *default_lrc;
+
+ if (gt->default_lrc[hwe->class])
+ continue;
+
+ xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
+ xe_wa_process_lrc(hwe);
+ xe_hw_engine_setup_default_lrc_state(hwe);
+ xe_tuning_process_lrc(hwe);
+
+ default_lrc = drmm_kzalloc(&xe->drm,
+ xe_lrc_size(xe, hwe->class),
+ GFP_KERNEL);
+ if (!default_lrc)
+ return -ENOMEM;
+
+ q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
+ hwe, EXEC_QUEUE_FLAG_KERNEL);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
+ hwe->name, q);
+ return err;
+ }
+
+ /* Prime golden LRC with known good state */
+ err = emit_wa_job(gt, q);
+ if (err) {
+ xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
+ hwe->name, ERR_PTR(err), q->guc->id);
+ goto put_exec_queue;
+ }
+
+ nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
+ 1, hwe, EXEC_QUEUE_FLAG_KERNEL);
+ if (IS_ERR(nop_q)) {
+ err = PTR_ERR(nop_q);
+ xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
+ hwe->name, nop_q);
+ goto put_exec_queue;
+ }
+
+ /* Switch to different LRC */
+ err = emit_nop_job(gt, nop_q);
+ if (err) {
+ xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
+ hwe->name, ERR_PTR(err), nop_q->guc->id);
+ goto put_nop_q;
+ }
+
+ /* Reload golden LRC to record the effect of any indirect W/A */
+ err = emit_nop_job(gt, q);
+ if (err) {
+ xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
+ hwe->name, ERR_PTR(err), q->guc->id);
+ goto put_nop_q;
+ }
+
+ xe_map_memcpy_from(xe, default_lrc,
+ &q->lrc[0].bo->vmap,
+ xe_lrc_pphwsp_offset(&q->lrc[0]),
+ xe_lrc_size(xe, hwe->class));
+
+ gt->default_lrc[hwe->class] = default_lrc;
+put_nop_q:
+ xe_exec_queue_put(nop_q);
+put_exec_queue:
+ xe_exec_queue_put(q);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int xe_gt_init_early(struct xe_gt *gt)
+{
+ int err;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ return err;
+
+ xe_gt_topology_init(gt);
+ xe_gt_mcr_init(gt);
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ return err;
+
+ xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
+
+ err = xe_wa_init(gt);
+ if (err)
+ return err;
+
+ xe_wa_process_gt(gt);
+ xe_wa_process_oob(gt);
+ xe_tuning_process_gt(gt);
+
+ return 0;
+}
+
+static void dump_pat_on_error(struct xe_gt *gt)
+{
+ struct drm_printer p;
+ char prefix[32];
+
+ snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
+ p = drm_debug_printer(prefix);
+
+ xe_pat_dump(gt, &p);
+}
+
+static int gt_fw_domain_init(struct xe_gt *gt)
+{
+ int err, i;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ goto err_hw_fence_irq;
+
+ xe_pat_init(gt);
+
+ if (!xe_gt_is_media_type(gt)) {
+ err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
+ if (err)
+ goto err_force_wake;
+ if (IS_SRIOV_PF(gt_to_xe(gt)))
+ xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
+ }
+
+ err = xe_uc_init(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
+ /* Raise GT freq to speed up HuC/GuC load */
+ xe_guc_pc_init_early(&gt->uc.guc.pc);
+
+ err = xe_uc_init_hwconfig(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
+ xe_gt_idle_sysfs_init(&gt->gtidle);
+
+ /* XXX: Fake that we pull the engine mask from hwconfig blob */
+ gt->info.engine_mask = gt->info.__engine_mask;
+
+ /* Enable per hw engine IRQs */
+ xe_irq_enable_hwe(gt);
+
+ /* Rerun MCR init as we now have hw engine list */
+ xe_gt_mcr_init(gt);
+
+ err = xe_hw_engines_init_early(gt);
+ if (err)
+ goto err_force_wake;
+
+ err = xe_hw_engine_class_sysfs_init(gt);
+ if (err)
+ drm_warn(&gt_to_xe(gt)->drm,
+ "failed to register engines sysfs directory, err: %d\n",
+ err);
+
+ /* Initialize CCS mode sysfs after early initialization of HW engines */
+ err = xe_gt_ccs_mode_sysfs_init(gt);
+ if (err)
+ goto err_force_wake;
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ XE_WARN_ON(err);
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return 0;
+
+err_force_wake:
+ dump_pat_on_error(gt);
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+err_hw_fence_irq:
+ for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return err;
+}
+
+static int all_fw_domain_init(struct xe_gt *gt)
+{
+ int err, i;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (err)
+ goto err_hw_fence_irq;
+
+ xe_gt_mcr_set_implicit_defaults(gt);
+ xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
+
+ err = xe_gt_clock_init(gt);
+ if (err)
+ goto err_force_wake;
+
+ xe_mocs_init(gt);
+ err = xe_execlist_init(gt);
+ if (err)
+ goto err_force_wake;
+
+ err = xe_hw_engines_init(gt);
+ if (err)
+ goto err_force_wake;
+
+ err = xe_uc_init_post_hwconfig(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
+ if (!xe_gt_is_media_type(gt)) {
+ /*
+ * USM has its only SA pool to non-block behind user operations
+ */
+ if (gt_to_xe(gt)->info.has_usm) {
+ struct xe_device *xe = gt_to_xe(gt);
+
+ gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
+ IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
+ if (IS_ERR(gt->usm.bb_pool)) {
+ err = PTR_ERR(gt->usm.bb_pool);
+ goto err_force_wake;
+ }
+ }
+ }
+
+ if (!xe_gt_is_media_type(gt)) {
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ tile->migrate = xe_migrate_init(tile);
+ if (IS_ERR(tile->migrate)) {
+ err = PTR_ERR(tile->migrate);
+ goto err_force_wake;
+ }
+ }
+
+ err = xe_uc_init_hw(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
+ /* Configure default CCS mode of 1 engine with all resources */
+ if (xe_gt_ccs_mode_enabled(gt)) {
+ gt->ccs_mode = 1;
+ xe_gt_apply_ccs_mode(gt);
+ }
+
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ XE_WARN_ON(err);
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return 0;
+
+err_force_wake:
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+err_hw_fence_irq:
+ for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return err;
+}
+
+int xe_gt_init(struct xe_gt *gt)
+{
+ int err;
+ int i;
+
+ INIT_WORK(&gt->reset.worker, gt_reset_worker);
+
+ for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
+ gt->ring_ops[i] = xe_ring_ops_get(gt, i);
+ xe_hw_fence_irq_init(&gt->fence_irq[i]);
+ }
+
+ err = xe_gt_tlb_invalidation_init(gt);
+ if (err)
+ return err;
+
+ err = xe_gt_pagefault_init(gt);
+ if (err)
+ return err;
+
+ xe_mocs_init_early(gt);
+
+ xe_gt_sysfs_init(gt);
+
+ err = gt_fw_domain_init(gt);
+ if (err)
+ return err;
+
+ xe_gt_freq_init(gt);
+
+ xe_force_wake_init_engines(gt, gt_to_fw(gt));
+
+ err = all_fw_domain_init(gt);
+ if (err)
+ return err;
+
+ err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int do_gt_reset(struct xe_gt *gt)
+{
+ int err;
+
+ xe_gsc_wa_14015076503(gt, true);
+
+ xe_mmio_write32(gt, GDRST, GRDOM_FULL);
+ err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
+ if (err)
+ xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
+ ERR_PTR(err));
+
+ xe_gsc_wa_14015076503(gt, false);
+
+ return err;
+}
+
+static int do_gt_restart(struct xe_gt *gt)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ int err;
+
+ xe_pat_init(gt);
+
+ xe_gt_mcr_set_implicit_defaults(gt);
+ xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
+
+ err = xe_wopcm_init(&gt->uc.wopcm);
+ if (err)
+ return err;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_hw_engine_enable_ring(hwe);
+
+ err = xe_uc_sanitize_reset(&gt->uc);
+ if (err)
+ return err;
+
+ err = xe_uc_init_hw(&gt->uc);
+ if (err)
+ return err;
+
+ if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
+ xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
+
+ xe_mocs_init(gt);
+ err = xe_uc_start(&gt->uc);
+ if (err)
+ return err;
+
+ for_each_hw_engine(hwe, gt, id) {
+ xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
+ xe_reg_sr_apply_whitelist(hwe);
+ }
+
+ /* Get CCS mode in sync between sw/hw */
+ xe_gt_apply_ccs_mode(gt);
+
+ return 0;
+}
+
+static int gt_reset(struct xe_gt *gt)
+{
+ int err;
+
+ /* We only support GT resets with GuC submission */
+ if (!xe_device_uc_enabled(gt_to_xe(gt)))
+ return -ENODEV;
+
+ xe_gt_info(gt, "reset started\n");
+
+ if (xe_fault_inject_gt_reset()) {
+ err = -ECANCELED;
+ goto err_fail;
+ }
+
+ xe_gt_sanitize(gt);
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (err)
+ goto err_msg;
+
+ xe_uc_gucrc_disable(&gt->uc);
+ xe_uc_stop_prepare(&gt->uc);
+ xe_gt_pagefault_reset(gt);
+
+ err = xe_uc_stop(&gt->uc);
+ if (err)
+ goto err_out;
+
+ err = do_gt_reset(gt);
+ if (err)
+ goto err_out;
+
+ xe_gt_tlb_invalidation_reset(gt);
+
+ err = do_gt_restart(gt);
+ if (err)
+ goto err_out;
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_device_mem_access_put(gt_to_xe(gt));
+ XE_WARN_ON(err);
+
+ xe_gt_info(gt, "reset done\n");
+
+ return 0;
+
+err_out:
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+err_msg:
+ XE_WARN_ON(xe_uc_start(&gt->uc));
+ xe_device_mem_access_put(gt_to_xe(gt));
+err_fail:
+ xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
+
+ gt_to_xe(gt)->needs_flr_on_fini = true;
+
+ return err;
+}
+
+static void gt_reset_worker(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
+
+ gt_reset(gt);
+}
+
+void xe_gt_reset_async(struct xe_gt *gt)
+{
+ xe_gt_info(gt, "trying reset\n");
+
+ /* Don't do a reset while one is already in flight */
+ if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
+ return;
+
+ xe_gt_info(gt, "reset queued\n");
+ queue_work(gt->ordered_wq, &gt->reset.worker);
+}
+
+void xe_gt_suspend_prepare(struct xe_gt *gt)
+{
+ xe_device_mem_access_get(gt_to_xe(gt));
+ XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+
+ xe_uc_stop_prepare(&gt->uc);
+
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_device_mem_access_put(gt_to_xe(gt));
+}
+
+int xe_gt_suspend(struct xe_gt *gt)
+{
+ int err;
+
+ xe_gt_sanitize(gt);
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (err)
+ goto err_msg;
+
+ err = xe_uc_suspend(&gt->uc);
+ if (err)
+ goto err_force_wake;
+
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_device_mem_access_put(gt_to_xe(gt));
+ xe_gt_info(gt, "suspended\n");
+
+ return 0;
+
+err_force_wake:
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+err_msg:
+ xe_device_mem_access_put(gt_to_xe(gt));
+ xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
+
+ return err;
+}
+
+int xe_gt_resume(struct xe_gt *gt)
+{
+ int err;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (err)
+ goto err_msg;
+
+ err = do_gt_restart(gt);
+ if (err)
+ goto err_force_wake;
+
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_device_mem_access_put(gt_to_xe(gt));
+ xe_gt_info(gt, "resumed\n");
+
+ return 0;
+
+err_force_wake:
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+err_msg:
+ xe_device_mem_access_put(gt_to_xe(gt));
+ xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
+
+ return err;
+}
+
+struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
+ enum xe_engine_class class,
+ u16 instance, bool logical)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id)
+ if (hwe->class == class &&
+ ((!logical && hwe->instance == instance) ||
+ (logical && hwe->logical_instance == instance)))
+ return hwe;
+
+ return NULL;
+}
+
+struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
+ enum xe_engine_class class)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ switch (class) {
+ case XE_ENGINE_CLASS_RENDER:
+ case XE_ENGINE_CLASS_COMPUTE:
+ if (hwe->class == XE_ENGINE_CLASS_RENDER ||
+ hwe->class == XE_ENGINE_CLASS_COMPUTE)
+ return hwe;
+ break;
+ default:
+ if (hwe->class == class)
+ return hwe;
+ }
+ }
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
new file mode 100644
index 000000000..4486e083f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GT_H_
+#define _XE_GT_H_
+
+#include <drm/drm_util.h>
+
+#include "xe_device_types.h"
+#include "xe_hw_engine.h"
+
+#define for_each_hw_engine(hwe__, gt__, id__) \
+ for ((id__) = 0; (id__) < ARRAY_SIZE((gt__)->hw_engines); (id__)++) \
+ for_each_if(((hwe__) = (gt__)->hw_engines + (id__)) && \
+ xe_hw_engine_is_valid((hwe__)))
+
+#define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0)
+
+#ifdef CONFIG_FAULT_INJECTION
+#include <linux/fault-inject.h> /* XXX: fault-inject.h is broken */
+extern struct fault_attr gt_reset_failure;
+static inline bool xe_fault_inject_gt_reset(void)
+{
+ return should_fail(&gt_reset_failure, 1);
+}
+#else
+static inline bool xe_fault_inject_gt_reset(void)
+{
+ return false;
+}
+#endif
+
+struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
+int xe_gt_init_early(struct xe_gt *gt);
+int xe_gt_init(struct xe_gt *gt);
+int xe_gt_record_default_lrcs(struct xe_gt *gt);
+void xe_gt_suspend_prepare(struct xe_gt *gt);
+int xe_gt_suspend(struct xe_gt *gt);
+int xe_gt_resume(struct xe_gt *gt);
+void xe_gt_reset_async(struct xe_gt *gt);
+void xe_gt_sanitize(struct xe_gt *gt);
+
+/**
+ * xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
+ * first that matches the same reset domain as @class
+ * @gt: GT structure
+ * @class: hw engine class to lookup
+ */
+struct xe_hw_engine *
+xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt, enum xe_engine_class class);
+
+struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
+ enum xe_engine_class class,
+ u16 instance,
+ bool logical);
+
+static inline bool xe_gt_is_media_type(struct xe_gt *gt)
+{
+ return gt->info.type == XE_GT_TYPE_MEDIA;
+}
+
+static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
+ hwe->instance == gt->usm.reserved_bcs_instance;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
new file mode 100644
index 000000000..396aeb5b9
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
+#include "xe_gt.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_gt_sysfs.h"
+#include "xe_mmio.h"
+
+static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
+{
+ u32 mode = CCS_MODE_CSLICE_0_3_MASK; /* disable all by default */
+ int num_slices = hweight32(CCS_MASK(gt));
+ struct xe_device *xe = gt_to_xe(gt);
+ int width, cslice = 0;
+ u32 config = 0;
+
+ xe_assert(xe, xe_gt_ccs_mode_enabled(gt));
+
+ xe_assert(xe, num_engines && num_engines <= num_slices);
+ xe_assert(xe, !(num_slices % num_engines));
+
+ /*
+ * Loop over all available slices and assign each a user engine.
+ * For example, if there are four compute slices available, the
+ * assignment of compute slices to compute engines would be,
+ *
+ * With 1 engine (ccs0):
+ * slice 0, 1, 2, 3: ccs0
+ *
+ * With 2 engines (ccs0, ccs1):
+ * slice 0, 2: ccs0
+ * slice 1, 3: ccs1
+ *
+ * With 4 engines (ccs0, ccs1, ccs2, ccs3):
+ * slice 0: ccs0
+ * slice 1: ccs1
+ * slice 2: ccs2
+ * slice 3: ccs3
+ */
+ for (width = num_slices / num_engines; width; width--) {
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (hwe->class != XE_ENGINE_CLASS_COMPUTE)
+ continue;
+
+ if (hwe->logical_instance >= num_engines)
+ break;
+
+ config |= BIT(hwe->instance) << XE_HW_ENGINE_CCS0;
+
+ /* If a slice is fused off, leave disabled */
+ while ((CCS_MASK(gt) & BIT(cslice)) == 0)
+ cslice++;
+
+ mode &= ~CCS_MODE_CSLICE(cslice, CCS_MODE_CSLICE_MASK);
+ mode |= CCS_MODE_CSLICE(cslice, hwe->instance);
+ cslice++;
+ }
+ }
+
+ xe_mmio_write32(gt, CCS_MODE, mode);
+
+ xe_gt_info(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
+ mode, config, num_engines, num_slices);
+}
+
+void xe_gt_apply_ccs_mode(struct xe_gt *gt)
+{
+ if (!gt->ccs_mode)
+ return;
+
+ __xe_gt_apply_ccs_mode(gt, gt->ccs_mode);
+}
+
+static ssize_t
+num_cslices_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", hweight32(CCS_MASK(gt)));
+}
+
+static DEVICE_ATTR_RO(num_cslices);
+
+static ssize_t
+ccs_mode_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+
+ return sysfs_emit(buf, "%u\n", gt->ccs_mode);
+}
+
+static ssize_t
+ccs_mode_store(struct device *kdev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct xe_gt *gt = kobj_to_gt(&kdev->kobj);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 num_engines, num_slices;
+ int ret;
+
+ ret = kstrtou32(buff, 0, &num_engines);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure number of engines specified is valid and there is an
+ * exact multiple of engines for slices.
+ */
+ num_slices = hweight32(CCS_MASK(gt));
+ if (!num_engines || num_engines > num_slices || num_slices % num_engines) {
+ xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n",
+ num_engines, num_slices);
+ return -EINVAL;
+ }
+
+ /* CCS mode can only be updated when there are no drm clients */
+ spin_lock(&xe->clients.lock);
+ if (xe->clients.count) {
+ spin_unlock(&xe->clients.lock);
+ return -EBUSY;
+ }
+
+ if (gt->ccs_mode != num_engines) {
+ xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
+ gt->ccs_mode = num_engines;
+ xe_gt_reset_async(gt);
+ }
+
+ spin_unlock(&xe->clients.lock);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(ccs_mode);
+
+static const struct attribute *gt_ccs_mode_attrs[] = {
+ &dev_attr_ccs_mode.attr,
+ &dev_attr_num_cslices.attr,
+ NULL,
+};
+
+static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
+}
+
+/**
+ * xe_gt_ccs_mode_sysfs_init - Initialize CCS mode sysfs interfaces
+ * @gt: GT structure
+ *
+ * Through a per-gt 'ccs_mode' sysfs interface, the user can enable a fixed
+ * number of compute hardware engines to which the available compute slices
+ * are to be allocated. This user configuration change triggers a gt reset
+ * and it is expected that there are no open drm clients while doing so.
+ * The number of available compute slices is exposed to user through a per-gt
+ * 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
+ */
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ if (!xe_gt_ccs_mode_enabled(gt))
+ return 0;
+
+ err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
+ if (err)
+ return err;
+
+ return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.h b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
new file mode 100644
index 000000000..f8779852c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_CCS_MODE_H_
+#define _XE_GT_CCS_MODE_H_
+
+#include "xe_device_types.h"
+#include "xe_gt.h"
+#include "xe_gt_types.h"
+#include "xe_platform_types.h"
+
+void xe_gt_apply_ccs_mode(struct xe_gt *gt);
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
+
+static inline bool xe_gt_ccs_mode_enabled(const struct xe_gt *gt)
+{
+ /* Check if there are more than one compute engines available */
+ return hweight32(CCS_MASK(gt)) > 1;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
new file mode 100644
index 000000000..937054e31
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gt_clock.h"
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_macros.h"
+#include "xe_mmio.h"
+
+static u32 read_reference_ts_freq(struct xe_gt *gt)
+{
+ u32 ts_override = xe_mmio_read32(gt, TIMESTAMP_OVERRIDE);
+ u32 base_freq, frac_freq;
+
+ base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK,
+ ts_override) + 1;
+ base_freq *= 1000000;
+
+ frac_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK,
+ ts_override);
+ frac_freq = 1000000 / (frac_freq + 1);
+
+ return base_freq + frac_freq;
+}
+
+static u32 get_crystal_clock_freq(u32 rpm_config_reg)
+{
+ const u32 f19_2_mhz = 19200000;
+ const u32 f24_mhz = 24000000;
+ const u32 f25_mhz = 25000000;
+ const u32 f38_4_mhz = 38400000;
+ u32 crystal_clock = REG_FIELD_GET(RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK,
+ rpm_config_reg);
+
+ switch (crystal_clock) {
+ case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
+ return f24_mhz;
+ case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
+ return f19_2_mhz;
+ case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
+ return f38_4_mhz;
+ case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
+ return f25_mhz;
+ default:
+ XE_WARN_ON("NOT_POSSIBLE");
+ return 0;
+ }
+}
+
+int xe_gt_clock_init(struct xe_gt *gt)
+{
+ u32 ctc_reg = xe_mmio_read32(gt, CTC_MODE);
+ u32 freq = 0;
+
+ /* Assuming gen11+ so assert this assumption is correct */
+ xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
+
+ if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(gt);
+ } else {
+ u32 c0 = xe_mmio_read32(gt, RPM_CONFIG0);
+
+ freq = get_crystal_clock_freq(c0);
+
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
+ }
+
+ gt->info.reference_clock = freq;
+ return 0;
+}
+
+u64 xe_gt_clock_cycles_to_ns(const struct xe_gt *gt, u64 count)
+{
+ return DIV_ROUND_CLOSEST_ULL(count * NSEC_PER_SEC, gt->info.reference_clock);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.h b/drivers/gpu/drm/xe/xe_gt_clock.h
new file mode 100644
index 000000000..aa162722f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_clock.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GT_CLOCK_H_
+#define _XE_GT_CLOCK_H_
+
+#include <linux/types.h>
+
+struct xe_gt;
+
+int xe_gt_clock_init(struct xe_gt *gt);
+u64 xe_gt_clock_cycles_to_ns(const struct xe_gt *gt, u64 count);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
new file mode 100644
index 000000000..c4b67cf09
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gt_debugfs.h"
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_gt_mcr.h"
+#include "xe_gt_topology.h"
+#include "xe_hw_engine.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_pat.h"
+#include "xe_reg_sr.h"
+#include "xe_reg_whitelist.h"
+#include "xe_uc_debugfs.h"
+#include "xe_wa.h"
+
+static struct xe_gt *node_to_gt(struct drm_info_node *node)
+{
+ return node->info_ent->data;
+}
+
+static int hw_engines(struct seq_file *m, void *data)
+{
+ struct xe_gt *gt = node_to_gt(m->private);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ int err;
+
+ xe_device_mem_access_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (err) {
+ xe_device_mem_access_put(xe);
+ return err;
+ }
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_hw_engine_print(hwe, &p);
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_device_mem_access_put(xe);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int force_reset(struct seq_file *m, void *data)
+{
+ struct xe_gt *gt = node_to_gt(m->private);
+
+ xe_gt_reset_async(gt);
+
+ return 0;
+}
+
+static int sa_info(struct seq_file *m, void *data)
+{
+ struct xe_tile *tile = gt_to_tile(node_to_gt(m->private));
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, &p,
+ tile->mem.kernel_bb_pool->gpu_addr);
+
+ return 0;
+}
+
+static int topology(struct seq_file *m, void *data)
+{
+ struct xe_gt *gt = node_to_gt(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_gt_topology_dump(gt, &p);
+
+ return 0;
+}
+
+static int steering(struct seq_file *m, void *data)
+{
+ struct xe_gt *gt = node_to_gt(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_gt_mcr_steering_dump(gt, &p);
+
+ return 0;
+}
+
+static int ggtt(struct seq_file *m, void *data)
+{
+ struct xe_gt *gt = node_to_gt(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ return xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, &p);
+}
+
+static int register_save_restore(struct seq_file *m, void *data)
+{
+ struct xe_gt *gt = node_to_gt(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ xe_reg_sr_dump(&gt->reg_sr, &p);
+ drm_printf(&p, "\n");
+
+ drm_printf(&p, "Engine\n");
+ for_each_hw_engine(hwe, gt, id)
+ xe_reg_sr_dump(&hwe->reg_sr, &p);
+ drm_printf(&p, "\n");
+
+ drm_printf(&p, "LRC\n");
+ for_each_hw_engine(hwe, gt, id)
+ xe_reg_sr_dump(&hwe->reg_lrc, &p);
+ drm_printf(&p, "\n");
+
+ drm_printf(&p, "Whitelist\n");
+ for_each_hw_engine(hwe, gt, id)
+ xe_reg_whitelist_dump(&hwe->reg_whitelist, &p);
+
+ return 0;
+}
+
+static int workarounds(struct seq_file *m, void *data)
+{
+ struct xe_gt *gt = node_to_gt(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_wa_dump(gt, &p);
+
+ return 0;
+}
+
+static int pat(struct seq_file *m, void *data)
+{
+ struct xe_gt *gt = node_to_gt(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_pat_dump(gt, &p);
+
+ return 0;
+}
+
+static int rcs_default_lrc(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_RENDER);
+ return 0;
+}
+
+static int ccs_default_lrc(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_COMPUTE);
+ return 0;
+}
+
+static int bcs_default_lrc(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_COPY);
+ return 0;
+}
+
+static int vcs_default_lrc(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_VIDEO_DECODE);
+ return 0;
+}
+
+static int vecs_default_lrc(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_lrc_dump_default(&p, node_to_gt(m->private), XE_ENGINE_CLASS_VIDEO_ENHANCE);
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"hw_engines", hw_engines, 0},
+ {"force_reset", force_reset, 0},
+ {"sa_info", sa_info, 0},
+ {"topology", topology, 0},
+ {"steering", steering, 0},
+ {"ggtt", ggtt, 0},
+ {"register-save-restore", register_save_restore, 0},
+ {"workarounds", workarounds, 0},
+ {"pat", pat, 0},
+ {"default_lrc_rcs", rcs_default_lrc},
+ {"default_lrc_ccs", ccs_default_lrc},
+ {"default_lrc_bcs", bcs_default_lrc},
+ {"default_lrc_vcs", vcs_default_lrc},
+ {"default_lrc_vecs", vecs_default_lrc},
+};
+
+void xe_gt_debugfs_register(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct drm_minor *minor = gt_to_xe(gt)->drm.primary;
+ struct dentry *root;
+ struct drm_info_list *local;
+ char name[8];
+ int i;
+
+ xe_gt_assert(gt, minor->debugfs_root);
+
+ sprintf(name, "gt%d", gt->info.id);
+ root = debugfs_create_dir(name, minor->debugfs_root);
+ if (IS_ERR(root)) {
+ drm_warn(&xe->drm, "Create GT directory failed");
+ return;
+ }
+
+ /*
+ * Allocate local copy as we need to pass in the GT to the debugfs
+ * entry and drm_debugfs_create_files just references the drm_info_list
+ * passed in (e.g. can't define this on the stack).
+ */
+#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
+ local = drmm_kmalloc(&xe->drm, DEBUGFS_SIZE, GFP_KERNEL);
+ if (!local)
+ return;
+
+ memcpy(local, debugfs_list, DEBUGFS_SIZE);
+#undef DEBUGFS_SIZE
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
+ local[i].data = gt;
+
+ drm_debugfs_create_files(local,
+ ARRAY_SIZE(debugfs_list),
+ root, minor);
+
+ xe_uc_debugfs_register(&gt->uc, root);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.h b/drivers/gpu/drm/xe/xe_gt_debugfs.h
new file mode 100644
index 000000000..5a329f118
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GT_DEBUGFS_H_
+#define _XE_GT_DEBUGFS_H_
+
+struct xe_gt;
+
+void xe_gt_debugfs_register(struct xe_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
new file mode 100644
index 000000000..e5b0f4ecd
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gt_freq.h"
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "xe_device_types.h"
+#include "xe_gt_sysfs.h"
+#include "xe_gt_throttle_sysfs.h"
+#include "xe_guc_pc.h"
+
+/**
+ * DOC: Xe GT Frequency Management
+ *
+ * This component is responsible for the raw GT frequency management, including
+ * the sysfs API.
+ *
+ * Underneath, Xe enables GuC SLPC automated frequency management. GuC is then
+ * allowed to request PCODE any frequency between the Minimum and the Maximum
+ * selected by this component. Furthermore, it is important to highlight that
+ * PCODE is the ultimate decision maker of the actual running frequency, based
+ * on thermal and other running conditions.
+ *
+ * Xe's Freq provides a sysfs API for frequency management:
+ *
+ * device/tile#/gt#/freq0/<item>_freq *read-only* files:
+ * - act_freq: The actual resolved frequency decided by PCODE.
+ * - cur_freq: The current one requested by GuC PC to the PCODE.
+ * - rpn_freq: The Render Performance (RP) N level, which is the minimal one.
+ * - rpe_freq: The Render Performance (RP) E level, which is the efficient one.
+ * - rp0_freq: The Render Performance (RP) 0 level, which is the maximum one.
+ *
+ * device/tile#/gt#/freq0/<item>_freq *read-write* files:
+ * - min_freq: Min frequency request.
+ * - max_freq: Max frequency request.
+ * If max <= min, then freq_min becomes a fixed frequency request.
+ */
+
+static struct xe_guc_pc *
+dev_to_pc(struct device *dev)
+{
+ return &kobj_to_gt(dev->kobj.parent)->uc.guc.pc;
+}
+
+static ssize_t act_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+
+ return sysfs_emit(buf, "%d\n", xe_guc_pc_get_act_freq(pc));
+}
+static DEVICE_ATTR_RO(act_freq);
+
+static ssize_t cur_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+ ssize_t ret;
+
+ ret = xe_guc_pc_get_cur_freq(pc, &freq);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", freq);
+}
+static DEVICE_ATTR_RO(cur_freq);
+
+static ssize_t rp0_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+
+ return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rp0_freq(pc));
+}
+static DEVICE_ATTR_RO(rp0_freq);
+
+static ssize_t rpe_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+
+ return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpe_freq(pc));
+}
+static DEVICE_ATTR_RO(rpe_freq);
+
+static ssize_t rpn_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+
+ return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpn_freq(pc));
+}
+static DEVICE_ATTR_RO(rpn_freq);
+
+static ssize_t min_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+ ssize_t ret;
+
+ ret = xe_guc_pc_get_min_freq(pc, &freq);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", freq);
+}
+
+static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+ ssize_t ret;
+
+ ret = kstrtou32(buff, 0, &freq);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_pc_set_min_freq(pc, freq);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(min_freq);
+
+static ssize_t max_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+ ssize_t ret;
+
+ ret = xe_guc_pc_get_max_freq(pc, &freq);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", freq);
+}
+
+static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+ ssize_t ret;
+
+ ret = kstrtou32(buff, 0, &freq);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_pc_set_max_freq(pc, freq);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(max_freq);
+
+static const struct attribute *freq_attrs[] = {
+ &dev_attr_act_freq.attr,
+ &dev_attr_cur_freq.attr,
+ &dev_attr_rp0_freq.attr,
+ &dev_attr_rpe_freq.attr,
+ &dev_attr_rpn_freq.attr,
+ &dev_attr_min_freq.attr,
+ &dev_attr_max_freq.attr,
+ NULL
+};
+
+static void freq_fini(struct drm_device *drm, void *arg)
+{
+ struct kobject *kobj = arg;
+
+ sysfs_remove_files(kobj, freq_attrs);
+ kobject_put(kobj);
+}
+
+/**
+ * xe_gt_freq_init - Initialize Xe Freq component
+ * @gt: Xe GT object
+ *
+ * It needs to be initialized after GT Sysfs and GuC PC components are ready.
+ */
+void xe_gt_freq_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ if (xe->info.skip_guc_pc)
+ return;
+
+ gt->freq = kobject_create_and_add("freq0", gt->sysfs);
+ if (!gt->freq) {
+ drm_warn(&xe->drm, "failed to add freq0 directory to %s\n",
+ kobject_name(gt->sysfs));
+ return;
+ }
+
+ err = drmm_add_action_or_reset(&xe->drm, freq_fini, gt->freq);
+ if (err) {
+ drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+ return;
+ }
+
+ err = sysfs_create_files(gt->freq, freq_attrs);
+ if (err)
+ drm_warn(&xe->drm, "failed to add freq attrs to %s, err: %d\n",
+ kobject_name(gt->freq), err);
+
+ xe_gt_throttle_sysfs_init(gt);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.h b/drivers/gpu/drm/xe/xe_gt_freq.h
new file mode 100644
index 000000000..f3fe3c904
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_freq.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_FREQ_H_
+#define _XE_GT_FREQ_H_
+
+struct xe_gt;
+
+void xe_gt_freq_init(struct xe_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
new file mode 100644
index 000000000..9fcae65b6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_idle.h"
+#include "xe_gt_sysfs.h"
+#include "xe_guc_pc.h"
+#include "regs/xe_gt_regs.h"
+#include "xe_mmio.h"
+
+/**
+ * DOC: Xe GT Idle
+ *
+ * Contains functions that init GT idle features like C6
+ *
+ * device/gt#/gtidle/name - name of the state
+ * device/gt#/gtidle/idle_residency_ms - Provides residency of the idle state in ms
+ * device/gt#/gtidle/idle_status - Provides current idle state
+ */
+
+static struct xe_gt_idle *dev_to_gtidle(struct device *dev)
+{
+ struct kobject *kobj = &dev->kobj;
+
+ return &kobj_to_gt(kobj->parent)->gtidle;
+}
+
+static struct xe_gt *gtidle_to_gt(struct xe_gt_idle *gtidle)
+{
+ return container_of(gtidle, struct xe_gt, gtidle);
+}
+
+static struct xe_guc_pc *gtidle_to_pc(struct xe_gt_idle *gtidle)
+{
+ return &gtidle_to_gt(gtidle)->uc.guc.pc;
+}
+
+static const char *gt_idle_state_to_string(enum xe_gt_idle_state state)
+{
+ switch (state) {
+ case GT_IDLE_C0:
+ return "gt-c0";
+ case GT_IDLE_C6:
+ return "gt-c6";
+ default:
+ return "unknown";
+ }
+}
+
+static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
+{
+ u64 delta, overflow_residency, prev_residency;
+
+ overflow_residency = BIT_ULL(32);
+
+ /*
+ * Counter wrap handling
+ * Store previous hw counter values for counter wrap-around handling
+ * Relying on sufficient frequency of queries otherwise counters can still wrap.
+ */
+ prev_residency = gtidle->prev_residency;
+ gtidle->prev_residency = cur_residency;
+
+ /* delta */
+ if (cur_residency >= prev_residency)
+ delta = cur_residency - prev_residency;
+ else
+ delta = cur_residency + (overflow_residency - prev_residency);
+
+ /* Add delta to extended raw driver copy of idle residency */
+ cur_residency = gtidle->cur_residency + delta;
+ gtidle->cur_residency = cur_residency;
+
+ /* residency multiplier in ns, convert to ms */
+ cur_residency = mul_u64_u32_div(cur_residency, gtidle->residency_multiplier, 1e6);
+
+ return cur_residency;
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
+
+ return sysfs_emit(buff, "%s\n", gtidle->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t idle_status_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
+ struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
+ enum xe_gt_idle_state state;
+
+ state = gtidle->idle_status(pc);
+
+ return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
+}
+static DEVICE_ATTR_RO(idle_status);
+
+static ssize_t idle_residency_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
+ struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
+ u64 residency;
+
+ residency = gtidle->idle_residency(pc);
+ return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
+}
+static DEVICE_ATTR_RO(idle_residency_ms);
+
+static const struct attribute *gt_idle_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_idle_status.attr,
+ &dev_attr_idle_residency_ms.attr,
+ NULL,
+};
+
+static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct kobject *kobj = arg;
+
+ sysfs_remove_files(kobj, gt_idle_attrs);
+ kobject_put(kobj);
+}
+
+void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
+{
+ struct xe_gt *gt = gtidle_to_gt(gtidle);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct kobject *kobj;
+ int err;
+
+ kobj = kobject_create_and_add("gtidle", gt->sysfs);
+ if (!kobj) {
+ drm_warn(&xe->drm, "%s failed, err: %d\n", __func__, -ENOMEM);
+ return;
+ }
+
+ if (xe_gt_is_media_type(gt)) {
+ sprintf(gtidle->name, "gt%d-mc", gt->info.id);
+ gtidle->idle_residency = xe_guc_pc_mc6_residency;
+ } else {
+ sprintf(gtidle->name, "gt%d-rc", gt->info.id);
+ gtidle->idle_residency = xe_guc_pc_rc6_residency;
+ }
+
+ /* Multiplier for Residency counter in units of 1.28us */
+ gtidle->residency_multiplier = 1280;
+ gtidle->idle_status = xe_guc_pc_c_status;
+
+ err = sysfs_create_files(kobj, gt_idle_attrs);
+ if (err) {
+ kobject_put(kobj);
+ drm_warn(&xe->drm, "failed to register gtidle sysfs, err: %d\n", err);
+ return;
+ }
+
+ err = drmm_add_action_or_reset(&xe->drm, gt_idle_sysfs_fini, kobj);
+ if (err)
+ drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+}
+
+void xe_gt_idle_enable_c6(struct xe_gt *gt)
+{
+ xe_device_assert_mem_access(gt_to_xe(gt));
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ /* Units of 1280 ns for a total of 5s */
+ xe_mmio_write32(gt, RC_IDLE_HYSTERSIS, 0x3B9ACA);
+ /* Enable RC6 */
+ xe_mmio_write32(gt, RC_CONTROL,
+ RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
+}
+
+void xe_gt_idle_disable_c6(struct xe_gt *gt)
+{
+ xe_device_assert_mem_access(gt_to_xe(gt));
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+
+ xe_mmio_write32(gt, PG_ENABLE, 0);
+ xe_mmio_write32(gt, RC_CONTROL, 0);
+ xe_mmio_write32(gt, RC_STATE, 0);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
new file mode 100644
index 000000000..69280fd16
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_IDLE_H_
+#define _XE_GT_IDLE_H_
+
+#include "xe_gt_idle_types.h"
+
+struct xe_gt;
+
+void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle);
+void xe_gt_idle_enable_c6(struct xe_gt *gt);
+void xe_gt_idle_disable_c6(struct xe_gt *gt);
+
+#endif /* _XE_GT_IDLE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_idle_types.h b/drivers/gpu/drm/xe/xe_gt_idle_types.h
new file mode 100644
index 000000000..f99b44753
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_idle_types.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_IDLE_SYSFS_TYPES_H_
+#define _XE_GT_IDLE_SYSFS_TYPES_H_
+
+#include <linux/types.h>
+
+struct xe_guc_pc;
+
+/* States of GT Idle */
+enum xe_gt_idle_state {
+ GT_IDLE_C0,
+ GT_IDLE_C6,
+ GT_IDLE_UNKNOWN,
+};
+
+/**
+ * struct xe_gt_idle - A struct that contains idle properties based of gt
+ */
+struct xe_gt_idle {
+ /** @name: name */
+ char name[16];
+ /** @residency_multiplier: residency multiplier in ns */
+ u32 residency_multiplier;
+ /** @cur_residency: raw driver copy of idle residency */
+ u64 cur_residency;
+ /** @prev_residency: previous residency counter */
+ u64 prev_residency;
+ /** @idle_status: get the current idle state */
+ enum xe_gt_idle_state (*idle_status)(struct xe_guc_pc *pc);
+ /** @idle_residency: get idle residency counter */
+ u64 (*idle_residency)(struct xe_guc_pc *pc);
+};
+
+#endif /* _XE_GT_IDLE_SYSFS_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
new file mode 100644
index 000000000..8546cd3cc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gt_mcr.h"
+
+#include "regs/xe_gt_regs.h"
+#include "xe_gt.h"
+#include "xe_gt_topology.h"
+#include "xe_gt_types.h"
+#include "xe_mmio.h"
+
+/**
+ * DOC: GT Multicast/Replicated (MCR) Register Support
+ *
+ * Some GT registers are designed as "multicast" or "replicated" registers:
+ * multiple instances of the same register share a single MMIO offset. MCR
+ * registers are generally used when the hardware needs to potentially track
+ * independent values of a register per hardware unit (e.g., per-subslice,
+ * per-L3bank, etc.). The specific types of replication that exist vary
+ * per-platform.
+ *
+ * MMIO accesses to MCR registers are controlled according to the settings
+ * programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR
+ * registers can be done in either multicast (a single write updates all
+ * instances of the register to the same value) or unicast (a write updates only
+ * one specific instance) form. Reads of MCR registers always operate in a
+ * unicast manner regardless of how the multicast/unicast bit is set in
+ * MCR_SELECTOR. Selection of a specific MCR instance for unicast operations is
+ * referred to as "steering."
+ *
+ * If MCR register operations are steered toward a hardware unit that is
+ * fused off or currently powered down due to power gating, the MMIO operation
+ * is "terminated" by the hardware. Terminated read operations will return a
+ * value of zero and terminated unicast write operations will be silently
+ * ignored. During device initialization, the goal of the various
+ * ``init_steering_*()`` functions is to apply the platform-specific rules for
+ * each MCR register type to identify a steering target that will select a
+ * non-terminated instance.
+ */
+
+#define STEER_SEMAPHORE XE_REG(0xFD0)
+
+static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr)
+{
+ return reg_mcr.__reg;
+}
+
+enum {
+ MCR_OP_READ,
+ MCR_OP_WRITE
+};
+
+static const struct xe_mmio_range xelp_l3bank_steering_table[] = {
+ { 0x00B100, 0x00B3FF },
+ {},
+};
+
+static const struct xe_mmio_range xehp_l3bank_steering_table[] = {
+ { 0x008C80, 0x008CFF },
+ { 0x00B100, 0x00B3FF },
+ {},
+};
+
+/*
+ * Although the bspec lists more "MSLICE" ranges than shown here, some of those
+ * are of a "GAM" subclass that has special rules and doesn't need to be
+ * included here.
+ */
+static const struct xe_mmio_range xehp_mslice_steering_table[] = {
+ { 0x00DD00, 0x00DDFF },
+ { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
+ {},
+};
+
+static const struct xe_mmio_range xehp_lncf_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D880, 0x00D8FF },
+ {},
+};
+
+/*
+ * We have several types of MCR registers where steering to (0,0) will always
+ * provide us with a non-terminated value. We'll stick them all in the same
+ * table for simplicity.
+ */
+static const struct xe_mmio_range xehpc_instance0_steering_table[] = {
+ { 0x004000, 0x004AFF }, /* HALF-BSLICE */
+ { 0x008800, 0x00887F }, /* CC */
+ { 0x008A80, 0x008AFF }, /* TILEPSMI */
+ { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
+ { 0x00B100, 0x00B3FF }, /* L3BANK */
+ { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
+ { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
+ { 0x00DD00, 0x00DDFF }, /* BSLICE */
+ { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
+ { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
+ { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
+ { 0x024180, 0x0241FF }, /* HALF-BSLICE */
+ {},
+};
+
+static const struct xe_mmio_range xelpg_instance0_steering_table[] = {
+ { 0x000B00, 0x000BFF }, /* SQIDI */
+ { 0x001000, 0x001FFF }, /* SQIDI */
+ { 0x004000, 0x0048FF }, /* GAM */
+ { 0x008700, 0x0087FF }, /* SQIDI */
+ { 0x00B000, 0x00B0FF }, /* NODE */
+ { 0x00C800, 0x00CFFF }, /* GAM */
+ { 0x00D880, 0x00D8FF }, /* NODE */
+ { 0x00DD00, 0x00DDFF }, /* OAAL2 */
+ {},
+};
+
+static const struct xe_mmio_range xelpg_l3bank_steering_table[] = {
+ { 0x00B100, 0x00B3FF },
+ {},
+};
+
+static const struct xe_mmio_range xelp_dss_steering_table[] = {
+ { 0x008150, 0x00815F },
+ { 0x009520, 0x00955F },
+ { 0x00DE80, 0x00E8FF },
+ { 0x024A00, 0x024A7F },
+ {},
+};
+
+/* DSS steering is used for GSLICE ranges as well */
+static const struct xe_mmio_range xehp_dss_steering_table[] = {
+ { 0x005200, 0x0052FF }, /* GSLICE */
+ { 0x005400, 0x007FFF }, /* GSLICE */
+ { 0x008140, 0x00815F }, /* GSLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
+ { 0x008D00, 0x008DFF }, /* DSS */
+ { 0x0094D0, 0x00955F }, /* GSLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
+ { 0x009680, 0x0096FF }, /* DSS */
+ { 0x00D800, 0x00D87F }, /* GSLICE */
+ { 0x00DC00, 0x00DCFF }, /* GSLICE */
+ { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved ) */
+ { 0x017000, 0x017FFF }, /* GSLICE */
+ { 0x024A00, 0x024A7F }, /* DSS */
+ {},
+};
+
+/* DSS steering is used for COMPUTE ranges as well */
+static const struct xe_mmio_range xehpc_dss_steering_table[] = {
+ { 0x008140, 0x00817F }, /* COMPUTE (0x8140-0x814F & 0x8160-0x817F), DSS (0x8150-0x815F) */
+ { 0x0094D0, 0x00955F }, /* COMPUTE (0x94D0-0x951F), DSS (0x9520-0x955F) */
+ { 0x009680, 0x0096FF }, /* DSS */
+ { 0x00DC00, 0x00DCFF }, /* COMPUTE */
+ { 0x00DE80, 0x00E7FF }, /* DSS (0xDF00-0xE1FF reserved ) */
+ {},
+};
+
+/* DSS steering is used for SLICE ranges as well */
+static const struct xe_mmio_range xelpg_dss_steering_table[] = {
+ { 0x005200, 0x0052FF }, /* SLICE */
+ { 0x005500, 0x007FFF }, /* SLICE */
+ { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
+ { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
+ { 0x009680, 0x0096FF }, /* DSS */
+ { 0x00D800, 0x00D87F }, /* SLICE */
+ { 0x00DC00, 0x00DCFF }, /* SLICE */
+ { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */
+ {},
+};
+
+static const struct xe_mmio_range xelpmp_oaddrm_steering_table[] = {
+ { 0x393200, 0x39323F },
+ { 0x393400, 0x3934FF },
+ {},
+};
+
+static const struct xe_mmio_range dg2_implicit_steering_table[] = {
+ { 0x000B00, 0x000BFF }, /* SF (SQIDI replication) */
+ { 0x001000, 0x001FFF }, /* SF (SQIDI replication) */
+ { 0x004000, 0x004AFF }, /* GAM (MSLICE replication) */
+ { 0x008700, 0x0087FF }, /* MCFG (SQIDI replication) */
+ { 0x00C800, 0x00CFFF }, /* GAM (MSLICE replication) */
+ { 0x00F000, 0x00FFFF }, /* GAM (MSLICE replication) */
+ {},
+};
+
+static const struct xe_mmio_range xe2lpg_dss_steering_table[] = {
+ { 0x005200, 0x0052FF }, /* SLICE */
+ { 0x005500, 0x007FFF }, /* SLICE */
+ { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
+ { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
+ { 0x009680, 0x0096FF }, /* DSS */
+ { 0x00D800, 0x00D87F }, /* SLICE */
+ { 0x00DC00, 0x00DCFF }, /* SLICE */
+ { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */
+ { 0x00E980, 0x00E9FF }, /* SLICE */
+ { 0x013000, 0x0133FF }, /* DSS (0x13000-0x131FF), SLICE (0x13200-0x133FF) */
+ {},
+};
+
+static const struct xe_mmio_range xe2lpg_sqidi_psmi_steering_table[] = {
+ { 0x000B00, 0x000BFF },
+ { 0x001000, 0x001FFF },
+ {},
+};
+
+static const struct xe_mmio_range xe2lpg_instance0_steering_table[] = {
+ { 0x004000, 0x004AFF }, /* GAM, rsvd, GAMWKR */
+ { 0x008700, 0x00887F }, /* SQIDI, MEMPIPE */
+ { 0x00B000, 0x00B3FF }, /* NODE, L3BANK */
+ { 0x00C800, 0x00CFFF }, /* GAM */
+ { 0x00D880, 0x00D8FF }, /* NODE */
+ { 0x00DD00, 0x00DDFF }, /* MEMPIPE */
+ { 0x00E900, 0x00E97F }, /* MEMPIPE */
+ { 0x00F000, 0x00FFFF }, /* GAM, GAMWKR */
+ { 0x013400, 0x0135FF }, /* MEMPIPE */
+ {},
+};
+
+static const struct xe_mmio_range xe2lpm_gpmxmt_steering_table[] = {
+ { 0x388160, 0x38817F },
+ { 0x389480, 0x3894CF },
+ {},
+};
+
+static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = {
+ { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */
+ { 0x384900, 0x384AFF }, /* GAM */
+ { 0x389560, 0x3895FF }, /* MEDIAINF */
+ { 0x38B600, 0x38B8FF }, /* L3BANK */
+ { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */
+ { 0x38F000, 0x38F0FF }, /* GAM */
+ { 0x393C00, 0x393C7F }, /* MEDIAINF */
+ {},
+};
+
+static void init_steering_l3bank(struct xe_gt *gt)
+{
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
+ u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
+ xe_mmio_read32(gt, MIRROR_FUSE3));
+ u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK,
+ xe_mmio_read32(gt, XEHP_FUSE4));
+
+ /*
+ * Group selects mslice, instance selects bank within mslice.
+ * Bank 0 is always valid _except_ when the bank mask is 010b.
+ */
+ gt->steering[L3BANK].group_target = __ffs(mslice_mask);
+ gt->steering[L3BANK].instance_target =
+ bank_mask & BIT(0) ? 0 : 2;
+ } else if (gt_to_xe(gt)->info.platform == XE_DG2) {
+ u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
+ xe_mmio_read32(gt, MIRROR_FUSE3));
+ u32 bank = __ffs(mslice_mask) * 8;
+
+ /*
+ * Like mslice registers, look for a valid mslice and steer to
+ * the first L3BANK of that quad. Access to the Nth L3 bank is
+ * split between the first bits of group and instance
+ */
+ gt->steering[L3BANK].group_target = (bank >> 2) & 0x7;
+ gt->steering[L3BANK].instance_target = bank & 0x3;
+ } else {
+ u32 fuse = REG_FIELD_GET(L3BANK_MASK,
+ ~xe_mmio_read32(gt, MIRROR_FUSE3));
+
+ gt->steering[L3BANK].group_target = 0; /* unused */
+ gt->steering[L3BANK].instance_target = __ffs(fuse);
+ }
+}
+
+static void init_steering_mslice(struct xe_gt *gt)
+{
+ u32 mask = REG_FIELD_GET(MEML3_EN_MASK,
+ xe_mmio_read32(gt, MIRROR_FUSE3));
+
+ /*
+ * mslice registers are valid (not terminated) if either the meml3
+ * associated with the mslice is present, or at least one DSS associated
+ * with the mslice is present. There will always be at least one meml3
+ * so we can just use that to find a non-terminated mslice and ignore
+ * the DSS fusing.
+ */
+ gt->steering[MSLICE].group_target = __ffs(mask);
+ gt->steering[MSLICE].instance_target = 0; /* unused */
+
+ /*
+ * LNCF termination is also based on mslice presence, so we'll set
+ * it up here. Either LNCF within a non-terminated mslice will work,
+ * so we just always pick LNCF 0 here.
+ */
+ gt->steering[LNCF].group_target = __ffs(mask) << 1;
+ gt->steering[LNCF].instance_target = 0; /* unused */
+}
+
+static void init_steering_dss(struct xe_gt *gt)
+{
+ unsigned int dss = min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0),
+ xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0));
+ unsigned int dss_per_grp = gt_to_xe(gt)->info.platform == XE_PVC ? 8 : 4;
+
+ gt->steering[DSS].group_target = dss / dss_per_grp;
+ gt->steering[DSS].instance_target = dss % dss_per_grp;
+}
+
+static void init_steering_oaddrm(struct xe_gt *gt)
+{
+ /*
+ * First instance is only terminated if the entire first media slice
+ * is absent (i.e., no VCS0 or VECS0).
+ */
+ if (gt->info.engine_mask & (XE_HW_ENGINE_VCS0 | XE_HW_ENGINE_VECS0))
+ gt->steering[OADDRM].group_target = 0;
+ else
+ gt->steering[OADDRM].group_target = 1;
+
+ gt->steering[DSS].instance_target = 0; /* unused */
+}
+
+static void init_steering_sqidi_psmi(struct xe_gt *gt)
+{
+ u32 mask = REG_FIELD_GET(XE2_NODE_ENABLE_MASK,
+ xe_mmio_read32(gt, MIRROR_FUSE3));
+ u32 select = __ffs(mask);
+
+ gt->steering[SQIDI_PSMI].group_target = select >> 1;
+ gt->steering[SQIDI_PSMI].instance_target = select & 0x1;
+}
+
+static void init_steering_inst0(struct xe_gt *gt)
+{
+ gt->steering[DSS].group_target = 0; /* unused */
+ gt->steering[DSS].instance_target = 0; /* unused */
+}
+
+static const struct {
+ const char *name;
+ void (*init)(struct xe_gt *gt);
+} xe_steering_types[] = {
+ [L3BANK] = { "L3BANK", init_steering_l3bank },
+ [MSLICE] = { "MSLICE", init_steering_mslice },
+ [LNCF] = { "LNCF", NULL }, /* initialized by mslice init */
+ [DSS] = { "DSS", init_steering_dss },
+ [OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm },
+ [SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi },
+ [INSTANCE0] = { "INSTANCE 0", init_steering_inst0 },
+ [IMPLICIT_STEERING] = { "IMPLICIT", NULL },
+};
+
+void xe_gt_mcr_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES);
+ BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES);
+
+ spin_lock_init(&gt->mcr_lock);
+
+ if (gt->info.type == XE_GT_TYPE_MEDIA) {
+ drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13);
+
+ if (MEDIA_VER(xe) >= 20) {
+ gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
+ gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table;
+ } else {
+ gt->steering[OADDRM].ranges = xelpmp_oaddrm_steering_table;
+ }
+ } else {
+ if (GRAPHICS_VER(xe) >= 20) {
+ gt->steering[DSS].ranges = xe2lpg_dss_steering_table;
+ gt->steering[SQIDI_PSMI].ranges = xe2lpg_sqidi_psmi_steering_table;
+ gt->steering[INSTANCE0].ranges = xe2lpg_instance0_steering_table;
+ } else if (GRAPHICS_VERx100(xe) >= 1270) {
+ gt->steering[INSTANCE0].ranges = xelpg_instance0_steering_table;
+ gt->steering[L3BANK].ranges = xelpg_l3bank_steering_table;
+ gt->steering[DSS].ranges = xelpg_dss_steering_table;
+ } else if (xe->info.platform == XE_PVC) {
+ gt->steering[INSTANCE0].ranges = xehpc_instance0_steering_table;
+ gt->steering[DSS].ranges = xehpc_dss_steering_table;
+ } else if (xe->info.platform == XE_DG2) {
+ gt->steering[L3BANK].ranges = xehp_l3bank_steering_table;
+ gt->steering[MSLICE].ranges = xehp_mslice_steering_table;
+ gt->steering[LNCF].ranges = xehp_lncf_steering_table;
+ gt->steering[DSS].ranges = xehp_dss_steering_table;
+ gt->steering[IMPLICIT_STEERING].ranges = dg2_implicit_steering_table;
+ } else {
+ gt->steering[L3BANK].ranges = xelp_l3bank_steering_table;
+ gt->steering[DSS].ranges = xelp_dss_steering_table;
+ }
+ }
+
+ /* Select non-terminated steering target for each type */
+ for (int i = 0; i < NUM_STEERING_TYPES; i++)
+ if (gt->steering[i].ranges && xe_steering_types[i].init)
+ xe_steering_types[i].init(gt);
+}
+
+/**
+ * xe_gt_mcr_set_implicit_defaults - Initialize steer control registers
+ * @gt: GT structure
+ *
+ * Some register ranges don't need to have their steering control registers
+ * changed on each access - it's sufficient to set them once on initialization.
+ * This function sets those registers for each platform *
+ */
+void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (xe->info.platform == XE_DG2) {
+ u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
+ REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
+
+ xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val);
+ xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val);
+ /*
+ * For GAM registers, all reads should be directed to instance 1
+ * (unicast reads against other instances are not allowed),
+ * and instance 1 is already the hardware's default steering
+ * target, which we never change
+ */
+ }
+}
+
+/*
+ * xe_gt_mcr_get_nonterminated_steering - find group/instance values that
+ * will steer a register to a non-terminated instance
+ * @gt: GT structure
+ * @reg: register for which the steering is required
+ * @group: return variable for group steering
+ * @instance: return variable for instance steering
+ *
+ * This function returns a group/instance pair that is guaranteed to work for
+ * read steering of the given register. Note that a value will be returned even
+ * if the register is not replicated and therefore does not actually require
+ * steering.
+ *
+ * Returns true if the caller should steer to the @group/@instance values
+ * returned. Returns false if the caller need not perform any steering
+ */
+static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance)
+{
+ const struct xe_reg reg = to_xe_reg(reg_mcr);
+ const struct xe_mmio_range *implicit_ranges;
+
+ for (int type = 0; type < IMPLICIT_STEERING; type++) {
+ if (!gt->steering[type].ranges)
+ continue;
+
+ for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) {
+ if (xe_mmio_in_range(gt, &gt->steering[type].ranges[i], reg)) {
+ *group = gt->steering[type].group_target;
+ *instance = gt->steering[type].instance_target;
+ return true;
+ }
+ }
+ }
+
+ implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges;
+ if (implicit_ranges)
+ for (int i = 0; implicit_ranges[i].end > 0; i++)
+ if (xe_mmio_in_range(gt, &implicit_ranges[i], reg))
+ return false;
+
+ /*
+ * Not found in a steering table and not a register with implicit
+ * steering. Just steer to 0/0 as a guess and raise a warning.
+ */
+ drm_WARN(&gt_to_xe(gt)->drm, true,
+ "Did not find MCR register %#x in any MCR steering table\n",
+ reg.addr);
+ *group = 0;
+ *instance = 0;
+
+ return true;
+}
+
+/*
+ * Obtain exclusive access to MCR steering. On MTL and beyond we also need
+ * to synchronize with external clients (e.g., firmware), so a semaphore
+ * register will also need to be taken.
+ */
+static void mcr_lock(struct xe_gt *gt) __acquires(&gt->mcr_lock)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int ret = 0;
+
+ spin_lock(&gt->mcr_lock);
+
+ /*
+ * Starting with MTL we also need to grab a semaphore register
+ * to synchronize with external agents (e.g., firmware) that now
+ * shares the same steering control register. The semaphore is obtained
+ * when a read to the relevant register returns 1.
+ */
+ if (GRAPHICS_VERx100(xe) >= 1270)
+ ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL,
+ true);
+
+ drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
+}
+
+static void mcr_unlock(struct xe_gt *gt) __releases(&gt->mcr_lock)
+{
+ /* Release hardware semaphore - this is done by writing 1 to the register */
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
+ xe_mmio_write32(gt, STEER_SEMAPHORE, 0x1);
+
+ spin_unlock(&gt->mcr_lock);
+}
+
+/*
+ * Access a register with specific MCR steering
+ *
+ * Caller needs to make sure the relevant forcewake wells are up.
+ */
+static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
+ u8 rw_flag, int group, int instance, u32 value)
+{
+ const struct xe_reg reg = to_xe_reg(reg_mcr);
+ struct xe_reg steer_reg;
+ u32 steer_val, val = 0;
+
+ lockdep_assert_held(&gt->mcr_lock);
+
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
+ steer_reg = MTL_MCR_SELECTOR;
+ steer_val = REG_FIELD_PREP(MTL_MCR_GROUPID, group) |
+ REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance);
+ } else {
+ steer_reg = MCR_SELECTOR;
+ steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, group) |
+ REG_FIELD_PREP(MCR_SUBSLICE_MASK, instance);
+ }
+
+ /*
+ * Always leave the hardware in multicast mode when doing reads and only
+ * change it to unicast mode when doing writes of a specific instance.
+ *
+ * The setting of the multicast/unicast bit usually wouldn't matter for
+ * read operations (which always return the value from a single register
+ * instance regardless of how that bit is set), but some platforms may
+ * have workarounds requiring us to remain in multicast mode for reads,
+ * e.g. Wa_22013088509 on PVC. There's no real downside to this, so
+ * we'll just go ahead and do so on all platforms; we'll only clear the
+ * multicast bit from the mask when explicitly doing a write operation.
+ *
+ * No need to save old steering reg value.
+ */
+ if (rw_flag == MCR_OP_READ)
+ steer_val |= MCR_MULTICAST;
+
+ xe_mmio_write32(gt, steer_reg, steer_val);
+
+ if (rw_flag == MCR_OP_READ)
+ val = xe_mmio_read32(gt, reg);
+ else
+ xe_mmio_write32(gt, reg, value);
+
+ /*
+ * If we turned off the multicast bit (during a write) we're required
+ * to turn it back on before finishing. The group and instance values
+ * don't matter since they'll be re-programmed on the next MCR
+ * operation.
+ */
+ if (rw_flag == MCR_OP_WRITE)
+ xe_mmio_write32(gt, steer_reg, MCR_MULTICAST);
+
+ return val;
+}
+
+/**
+ * xe_gt_mcr_unicast_read_any - reads a non-terminated instance of an MCR register
+ * @gt: GT structure
+ * @reg_mcr: register to read
+ *
+ * Reads a GT MCR register. The read will be steered to a non-terminated
+ * instance (i.e., one that isn't fused off or powered down by power gating).
+ * This function assumes the caller is already holding any necessary forcewake
+ * domains.
+ *
+ * Returns the value from a non-terminated instance of @reg.
+ */
+u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr)
+{
+ const struct xe_reg reg = to_xe_reg(reg_mcr);
+ u8 group, instance;
+ u32 val;
+ bool steer;
+
+ steer = xe_gt_mcr_get_nonterminated_steering(gt, reg_mcr,
+ &group, &instance);
+
+ if (steer) {
+ mcr_lock(gt);
+ val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ,
+ group, instance, 0);
+ mcr_unlock(gt);
+ } else {
+ val = xe_mmio_read32(gt, reg);
+ }
+
+ return val;
+}
+
+/**
+ * xe_gt_mcr_unicast_read - read a specific instance of an MCR register
+ * @gt: GT structure
+ * @reg_mcr: the MCR register to read
+ * @group: the MCR group
+ * @instance: the MCR instance
+ *
+ * Returns the value read from an MCR register after steering toward a specific
+ * group/instance.
+ */
+u32 xe_gt_mcr_unicast_read(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ int group, int instance)
+{
+ u32 val;
+
+ mcr_lock(gt);
+ val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, group, instance, 0);
+ mcr_unlock(gt);
+
+ return val;
+}
+
+/**
+ * xe_gt_mcr_unicast_write - write a specific instance of an MCR register
+ * @gt: GT structure
+ * @reg_mcr: the MCR register to write
+ * @value: value to write
+ * @group: the MCR group
+ * @instance: the MCR instance
+ *
+ * Write an MCR register in unicast mode after steering toward a specific
+ * group/instance.
+ */
+void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
+ u32 value, int group, int instance)
+{
+ mcr_lock(gt);
+ rw_with_mcr_steering(gt, reg_mcr, MCR_OP_WRITE, group, instance, value);
+ mcr_unlock(gt);
+}
+
+/**
+ * xe_gt_mcr_multicast_write - write a value to all instances of an MCR register
+ * @gt: GT structure
+ * @reg_mcr: the MCR register to write
+ * @value: value to write
+ *
+ * Write an MCR register in multicast mode to update all instances.
+ */
+void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
+ u32 value)
+{
+ struct xe_reg reg = to_xe_reg(reg_mcr);
+
+ /*
+ * Synchronize with any unicast operations. Once we have exclusive
+ * access, the MULTICAST bit should already be set, so there's no need
+ * to touch the steering register.
+ */
+ mcr_lock(gt);
+ xe_mmio_write32(gt, reg, value);
+ mcr_unlock(gt);
+}
+
+void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ for (int i = 0; i < NUM_STEERING_TYPES; i++) {
+ if (gt->steering[i].ranges) {
+ drm_printf(p, "%s steering: group=%#x, instance=%#x\n",
+ xe_steering_types[i].name,
+ gt->steering[i].group_target,
+ gt->steering[i].instance_target);
+ for (int j = 0; gt->steering[i].ranges[j].end; j++)
+ drm_printf(p, "\t0x%06x - 0x%06x\n",
+ gt->steering[i].ranges[j].start,
+ gt->steering[i].ranges[j].end);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
new file mode 100644
index 000000000..27ca1bc88
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GT_MCR_H_
+#define _XE_GT_MCR_H_
+
+#include "regs/xe_reg_defs.h"
+
+struct drm_printer;
+struct xe_gt;
+
+void xe_gt_mcr_init(struct xe_gt *gt);
+
+void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt);
+
+u32 xe_gt_mcr_unicast_read(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
+ int group, int instance);
+u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr mcr_reg);
+
+void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
+ u32 value, int group, int instance);
+void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
+ u32 value);
+
+void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
+
+#endif /* _XE_GT_MCR_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
new file mode 100644
index 000000000..ab8536c4f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gt_pagefault.h"
+
+#include <linux/bitfield.h>
+#include <linux/circ_buf.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_managed.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+
+#include "abi/guc_actions_abi.h"
+#include "xe_bo.h"
+#include "xe_gt.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_migrate.h"
+#include "xe_pt.h"
+#include "xe_trace.h"
+#include "xe_vm.h"
+
+struct pagefault {
+ u64 page_addr;
+ u32 asid;
+ u16 pdata;
+ u8 vfid;
+ u8 access_type;
+ u8 fault_type;
+ u8 fault_level;
+ u8 engine_class;
+ u8 engine_instance;
+ u8 fault_unsuccessful;
+ bool trva_fault;
+};
+
+enum access_type {
+ ACCESS_TYPE_READ = 0,
+ ACCESS_TYPE_WRITE = 1,
+ ACCESS_TYPE_ATOMIC = 2,
+ ACCESS_TYPE_RESERVED = 3,
+};
+
+enum fault_type {
+ NOT_PRESENT = 0,
+ WRITE_ACCESS_VIOLATION = 1,
+ ATOMIC_ACCESS_VIOLATION = 2,
+};
+
+struct acc {
+ u64 va_range_base;
+ u32 asid;
+ u32 sub_granularity;
+ u8 granularity;
+ u8 vfid;
+ u8 access_type;
+ u8 engine_class;
+ u8 engine_instance;
+};
+
+static bool access_is_atomic(enum access_type access_type)
+{
+ return access_type == ACCESS_TYPE_ATOMIC;
+}
+
+static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
+{
+ return BIT(tile->id) & vma->tile_present &&
+ !(BIT(tile->id) & vma->tile_invalidated);
+}
+
+static bool vma_matches(struct xe_vma *vma, u64 page_addr)
+{
+ if (page_addr > xe_vma_end(vma) - 1 ||
+ page_addr + SZ_4K - 1 < xe_vma_start(vma))
+ return false;
+
+ return true;
+}
+
+static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
+{
+ struct xe_vma *vma = NULL;
+
+ if (vm->usm.last_fault_vma) { /* Fast lookup */
+ if (vma_matches(vm->usm.last_fault_vma, page_addr))
+ vma = vm->usm.last_fault_vma;
+ }
+ if (!vma)
+ vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
+
+ return vma;
+}
+
+static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
+ bool atomic, unsigned int id)
+{
+ struct xe_bo *bo = xe_vma_bo(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ unsigned int num_shared = 2; /* slots for bind + move */
+ int err;
+
+ err = xe_vm_prepare_vma(exec, vma, num_shared);
+ if (err)
+ return err;
+
+ if (atomic && IS_DGFX(vm->xe)) {
+ if (xe_vma_is_userptr(vma)) {
+ err = -EACCES;
+ return err;
+ }
+
+ /* Migrate to VRAM, move should invalidate the VMA first */
+ err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
+ if (err)
+ return err;
+ } else if (bo) {
+ /* Create backing store if needed */
+ err = xe_bo_validate(bo, vm, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct drm_exec exec;
+ struct xe_vm *vm;
+ struct xe_vma *vma = NULL;
+ struct dma_fence *fence;
+ bool write_locked;
+ int ret = 0;
+ bool atomic;
+
+ /* SW isn't expected to handle TRTT faults */
+ if (pf->trva_fault)
+ return -EFAULT;
+
+ /* ASID to VM */
+ mutex_lock(&xe->usm.lock);
+ vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
+ if (vm && xe_vm_in_fault_mode(vm))
+ xe_vm_get(vm);
+ else
+ vm = NULL;
+ mutex_unlock(&xe->usm.lock);
+ if (!vm)
+ return -EINVAL;
+
+retry_userptr:
+ /*
+ * TODO: Avoid exclusive lock if VM doesn't have userptrs, or
+ * start out read-locked?
+ */
+ down_write(&vm->lock);
+ write_locked = true;
+ vma = lookup_vma(vm, pf->page_addr);
+ if (!vma) {
+ ret = -EINVAL;
+ goto unlock_vm;
+ }
+
+ if (!xe_vma_is_userptr(vma) ||
+ !xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
+ downgrade_write(&vm->lock);
+ write_locked = false;
+ }
+
+ trace_xe_vma_pagefault(vma);
+
+ atomic = access_is_atomic(pf->access_type);
+
+ /* Check if VMA is valid */
+ if (vma_is_valid(tile, vma) && !atomic)
+ goto unlock_vm;
+
+ /* TODO: Validate fault */
+
+ if (xe_vma_is_userptr(vma) && write_locked) {
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del_init(&uvma->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+
+ ret = xe_vma_userptr_pin_pages(uvma);
+ if (ret)
+ goto unlock_vm;
+
+ downgrade_write(&vm->lock);
+ write_locked = false;
+ }
+
+ /* Lock VM and BOs dma-resv */
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = xe_pf_begin(&exec, vma, atomic, tile->id);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ goto unlock_dma_resv;
+ }
+
+ /* Bind VMA only to the GT that has faulted */
+ trace_xe_vma_pf_bind(vma);
+ fence = __xe_pt_bind_vma(tile, vma, xe_tile_migrate_engine(tile), NULL, 0,
+ vma->tile_present & BIT(tile->id));
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto unlock_dma_resv;
+ }
+
+ /*
+ * XXX: Should we drop the lock before waiting? This only helps if doing
+ * GPU binds which is currently only done if we have to wait for more
+ * than 10ms on a move.
+ */
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+ if (xe_vma_is_userptr(vma))
+ ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
+ vma->tile_invalidated &= ~BIT(tile->id);
+
+unlock_dma_resv:
+ drm_exec_fini(&exec);
+unlock_vm:
+ if (!ret)
+ vm->usm.last_fault_vma = vma;
+ if (write_locked)
+ up_write(&vm->lock);
+ else
+ up_read(&vm->lock);
+ if (ret == -EAGAIN)
+ goto retry_userptr;
+
+ if (!ret) {
+ ret = xe_gt_tlb_invalidation_vma(gt, NULL, vma);
+ if (ret >= 0)
+ ret = 0;
+ }
+ xe_vm_put(vm);
+
+ return ret;
+}
+
+static int send_pagefault_reply(struct xe_guc *guc,
+ struct xe_guc_pagefault_reply *reply)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
+ reply->dw0,
+ reply->dw1,
+ };
+
+ return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
+}
+
+static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
+{
+ drm_dbg(&xe->drm, "\n\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tPDATA: 0x%04x\n"
+ "\tFaulted Address: 0x%08x%08x\n"
+ "\tFaultType: %d\n"
+ "\tAccessType: %d\n"
+ "\tFaultLevel: %d\n"
+ "\tEngineClass: %d\n"
+ "\tEngineInstance: %d\n",
+ pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
+ lower_32_bits(pf->page_addr),
+ pf->fault_type, pf->access_type, pf->fault_level,
+ pf->engine_class, pf->engine_instance);
+}
+
+#define PF_MSG_LEN_DW 4
+
+static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
+{
+ const struct xe_guc_pagefault_desc *desc;
+ bool ret = false;
+
+ spin_lock_irq(&pf_queue->lock);
+ if (pf_queue->head != pf_queue->tail) {
+ desc = (const struct xe_guc_pagefault_desc *)
+ (pf_queue->data + pf_queue->head);
+
+ pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
+ pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
+ pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0);
+ pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0);
+ pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) <<
+ PFD_PDATA_HI_SHIFT;
+ pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0);
+ pf->asid = FIELD_GET(PFD_ASID, desc->dw1);
+ pf->vfid = FIELD_GET(PFD_VFID, desc->dw2);
+ pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2);
+ pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2);
+ pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) <<
+ PFD_VIRTUAL_ADDR_HI_SHIFT;
+ pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
+ PFD_VIRTUAL_ADDR_LO_SHIFT;
+
+ pf_queue->head = (pf_queue->head + PF_MSG_LEN_DW) %
+ PF_QUEUE_NUM_DW;
+ ret = true;
+ }
+ spin_unlock_irq(&pf_queue->lock);
+
+ return ret;
+}
+
+static bool pf_queue_full(struct pf_queue *pf_queue)
+{
+ lockdep_assert_held(&pf_queue->lock);
+
+ return CIRC_SPACE(pf_queue->tail, pf_queue->head, PF_QUEUE_NUM_DW) <=
+ PF_MSG_LEN_DW;
+}
+
+int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct pf_queue *pf_queue;
+ unsigned long flags;
+ u32 asid;
+ bool full;
+
+ if (unlikely(len != PF_MSG_LEN_DW))
+ return -EPROTO;
+
+ asid = FIELD_GET(PFD_ASID, msg[1]);
+ pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
+
+ spin_lock_irqsave(&pf_queue->lock, flags);
+ full = pf_queue_full(pf_queue);
+ if (!full) {
+ memcpy(pf_queue->data + pf_queue->tail, msg, len * sizeof(u32));
+ pf_queue->tail = (pf_queue->tail + len) % PF_QUEUE_NUM_DW;
+ queue_work(gt->usm.pf_wq, &pf_queue->worker);
+ } else {
+ drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
+ }
+ spin_unlock_irqrestore(&pf_queue->lock, flags);
+
+ return full ? -ENOSPC : 0;
+}
+
+#define USM_QUEUE_MAX_RUNTIME_MS 20
+
+static void pf_queue_work_func(struct work_struct *w)
+{
+ struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
+ struct xe_gt *gt = pf_queue->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_guc_pagefault_reply reply = {};
+ struct pagefault pf = {};
+ unsigned long threshold;
+ int ret;
+
+ threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
+
+ while (get_pagefault(pf_queue, &pf)) {
+ ret = handle_pagefault(gt, &pf);
+ if (unlikely(ret)) {
+ print_pagefault(xe, &pf);
+ pf.fault_unsuccessful = 1;
+ drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
+ }
+
+ reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
+ FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) |
+ FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
+ FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
+ FIELD_PREP(PFR_ASID, pf.asid);
+
+ reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) |
+ FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) |
+ FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) |
+ FIELD_PREP(PFR_PDATA, pf.pdata);
+
+ send_pagefault_reply(&gt->uc.guc, &reply);
+
+ if (time_after(jiffies, threshold) &&
+ pf_queue->head != pf_queue->tail) {
+ queue_work(gt->usm.pf_wq, w);
+ break;
+ }
+ }
+}
+
+static void acc_queue_work_func(struct work_struct *w);
+
+int xe_gt_pagefault_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int i;
+
+ if (!xe->info.has_usm)
+ return 0;
+
+ for (i = 0; i < NUM_PF_QUEUE; ++i) {
+ gt->usm.pf_queue[i].gt = gt;
+ spin_lock_init(&gt->usm.pf_queue[i].lock);
+ INIT_WORK(&gt->usm.pf_queue[i].worker, pf_queue_work_func);
+ }
+ for (i = 0; i < NUM_ACC_QUEUE; ++i) {
+ gt->usm.acc_queue[i].gt = gt;
+ spin_lock_init(&gt->usm.acc_queue[i].lock);
+ INIT_WORK(&gt->usm.acc_queue[i].worker, acc_queue_work_func);
+ }
+
+ gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue",
+ WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE);
+ if (!gt->usm.pf_wq)
+ return -ENOMEM;
+
+ gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
+ WQ_UNBOUND | WQ_HIGHPRI,
+ NUM_ACC_QUEUE);
+ if (!gt->usm.acc_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void xe_gt_pagefault_reset(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int i;
+
+ if (!xe->info.has_usm)
+ return;
+
+ for (i = 0; i < NUM_PF_QUEUE; ++i) {
+ spin_lock_irq(&gt->usm.pf_queue[i].lock);
+ gt->usm.pf_queue[i].head = 0;
+ gt->usm.pf_queue[i].tail = 0;
+ spin_unlock_irq(&gt->usm.pf_queue[i].lock);
+ }
+
+ for (i = 0; i < NUM_ACC_QUEUE; ++i) {
+ spin_lock(&gt->usm.acc_queue[i].lock);
+ gt->usm.acc_queue[i].head = 0;
+ gt->usm.acc_queue[i].tail = 0;
+ spin_unlock(&gt->usm.acc_queue[i].lock);
+ }
+}
+
+static int granularity_in_byte(int val)
+{
+ switch (val) {
+ case 0:
+ return SZ_128K;
+ case 1:
+ return SZ_2M;
+ case 2:
+ return SZ_16M;
+ case 3:
+ return SZ_64M;
+ default:
+ return 0;
+ }
+}
+
+static int sub_granularity_in_byte(int val)
+{
+ return (granularity_in_byte(val) / 32);
+}
+
+static void print_acc(struct xe_device *xe, struct acc *acc)
+{
+ drm_warn(&xe->drm, "Access counter request:\n"
+ "\tType: %s\n"
+ "\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tEngine: %d:%d\n"
+ "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
+ "\tSub_Granularity Vector: 0x%08x\n"
+ "\tVA Range base: 0x%016llx\n",
+ acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
+ acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
+ granularity_in_byte(acc->granularity) / SZ_1K,
+ sub_granularity_in_byte(acc->granularity) / SZ_1K,
+ acc->sub_granularity, acc->va_range_base);
+}
+
+static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
+{
+ u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
+ sub_granularity_in_byte(acc->granularity);
+
+ return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
+}
+
+static int handle_acc(struct xe_gt *gt, struct acc *acc)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct drm_exec exec;
+ struct xe_vm *vm;
+ struct xe_vma *vma;
+ int ret = 0;
+
+ /* We only support ACC_TRIGGER at the moment */
+ if (acc->access_type != ACC_TRIGGER)
+ return -EINVAL;
+
+ /* ASID to VM */
+ mutex_lock(&xe->usm.lock);
+ vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
+ if (vm)
+ xe_vm_get(vm);
+ mutex_unlock(&xe->usm.lock);
+ if (!vm || !xe_vm_in_fault_mode(vm))
+ return -EINVAL;
+
+ down_read(&vm->lock);
+
+ /* Lookup VMA */
+ vma = get_acc_vma(vm, acc);
+ if (!vma) {
+ ret = -EINVAL;
+ goto unlock_vm;
+ }
+
+ trace_xe_vma_acc(vma);
+
+ /* Userptr or null can't be migrated, nothing to do */
+ if (xe_vma_has_no_bo(vma))
+ goto unlock_vm;
+
+ /* Lock VM and BOs dma-resv */
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = xe_pf_begin(&exec, vma, true, tile->id);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+ }
+
+ drm_exec_fini(&exec);
+unlock_vm:
+ up_read(&vm->lock);
+ xe_vm_put(vm);
+
+ return ret;
+}
+
+#define make_u64(hi__, low__) ((u64)(hi__) << 32 | (u64)(low__))
+
+#define ACC_MSG_LEN_DW 4
+
+static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
+{
+ const struct xe_guc_acc_desc *desc;
+ bool ret = false;
+
+ spin_lock(&acc_queue->lock);
+ if (acc_queue->head != acc_queue->tail) {
+ desc = (const struct xe_guc_acc_desc *)
+ (acc_queue->data + acc_queue->head);
+
+ acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
+ acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
+ FIELD_GET(ACC_SUBG_LO, desc->dw0);
+ acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1);
+ acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1);
+ acc->asid = FIELD_GET(ACC_ASID, desc->dw1);
+ acc->vfid = FIELD_GET(ACC_VFID, desc->dw2);
+ acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0);
+ acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
+ desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
+
+ acc_queue->head = (acc_queue->head + ACC_MSG_LEN_DW) %
+ ACC_QUEUE_NUM_DW;
+ ret = true;
+ }
+ spin_unlock(&acc_queue->lock);
+
+ return ret;
+}
+
+static void acc_queue_work_func(struct work_struct *w)
+{
+ struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
+ struct xe_gt *gt = acc_queue->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct acc acc = {};
+ unsigned long threshold;
+ int ret;
+
+ threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
+
+ while (get_acc(acc_queue, &acc)) {
+ ret = handle_acc(gt, &acc);
+ if (unlikely(ret)) {
+ print_acc(xe, &acc);
+ drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
+ }
+
+ if (time_after(jiffies, threshold) &&
+ acc_queue->head != acc_queue->tail) {
+ queue_work(gt->usm.acc_wq, w);
+ break;
+ }
+ }
+}
+
+static bool acc_queue_full(struct acc_queue *acc_queue)
+{
+ lockdep_assert_held(&acc_queue->lock);
+
+ return CIRC_SPACE(acc_queue->tail, acc_queue->head, ACC_QUEUE_NUM_DW) <=
+ ACC_MSG_LEN_DW;
+}
+
+int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct acc_queue *acc_queue;
+ u32 asid;
+ bool full;
+
+ if (unlikely(len != ACC_MSG_LEN_DW))
+ return -EPROTO;
+
+ asid = FIELD_GET(ACC_ASID, msg[1]);
+ acc_queue = &gt->usm.acc_queue[asid % NUM_ACC_QUEUE];
+
+ spin_lock(&acc_queue->lock);
+ full = acc_queue_full(acc_queue);
+ if (!full) {
+ memcpy(acc_queue->data + acc_queue->tail, msg,
+ len * sizeof(u32));
+ acc_queue->tail = (acc_queue->tail + len) % ACC_QUEUE_NUM_DW;
+ queue_work(gt->usm.acc_wq, &acc_queue->worker);
+ } else {
+ drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
+ }
+ spin_unlock(&acc_queue->lock);
+
+ return full ? -ENOSPC : 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.h b/drivers/gpu/drm/xe/xe_gt_pagefault.h
new file mode 100644
index 000000000..839c065a5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GT_PAGEFAULT_H_
+#define _XE_GT_PAGEFAULT_H_
+
+#include <linux/types.h>
+
+struct xe_gt;
+struct xe_guc;
+
+int xe_gt_pagefault_init(struct xe_gt *gt);
+void xe_gt_pagefault_reset(struct xe_gt *gt);
+int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len);
+int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif /* _XE_GT_PAGEFAULT_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
new file mode 100644
index 000000000..5991bcadd
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_PRINTK_H_
+#define _XE_GT_PRINTK_H_
+
+#include <drm/drm_print.h>
+
+#include "xe_device_types.h"
+
+#define xe_gt_printk(_gt, _level, _fmt, ...) \
+ drm_##_level(&gt_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define xe_gt_err(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_warn(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), warn, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_notice(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), notice, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_info(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), info, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_dbg(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_err_ratelimited(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define xe_gt_WARN(_gt, _condition, _fmt, ...) \
+ drm_WARN(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define xe_gt_WARN_ONCE(_gt, _condition, _fmt, ...) \
+ drm_WARN_ONCE(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define xe_gt_WARN_ON(_gt, _condition) \
+ xe_gt_WARN((_gt), _condition, "%s(%s)", "gt_WARN_ON", __stringify(_condition))
+
+#define xe_gt_WARN_ON_ONCE(_gt, _condition) \
+ xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "gt_WARN_ON_ONCE", __stringify(_condition))
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.c b/drivers/gpu/drm/xe/xe_gt_sysfs.c
new file mode 100644
index 000000000..c69d2e8a0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sysfs.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gt_sysfs.h"
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include <drm/drm_managed.h>
+
+#include "xe_gt.h"
+
+static void xe_gt_sysfs_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static const struct kobj_type xe_gt_sysfs_kobj_type = {
+ .release = xe_gt_sysfs_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void gt_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ kobject_put(gt->sysfs);
+}
+
+void xe_gt_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct kobj_gt *kg;
+ int err;
+
+ kg = kzalloc(sizeof(*kg), GFP_KERNEL);
+ if (!kg)
+ return;
+
+ kobject_init(&kg->base, &xe_gt_sysfs_kobj_type);
+ kg->gt = gt;
+
+ err = kobject_add(&kg->base, tile->sysfs, "gt%d", gt->info.id);
+ if (err) {
+ drm_warn(&xe->drm, "failed to add GT sysfs directory, err: %d\n", err);
+ kobject_put(&kg->base);
+ return;
+ }
+
+ gt->sysfs = &kg->base;
+
+ err = drmm_add_action_or_reset(&xe->drm, gt_sysfs_fini, gt);
+ if (err) {
+ drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+ return;
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs.h b/drivers/gpu/drm/xe/xe_gt_sysfs.h
new file mode 100644
index 000000000..e3ec278ca
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sysfs.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GT_SYSFS_H_
+#define _XE_GT_SYSFS_H_
+
+#include "xe_gt_sysfs_types.h"
+
+void xe_gt_sysfs_init(struct xe_gt *gt);
+
+static inline struct xe_gt *
+kobj_to_gt(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_gt, base)->gt;
+}
+
+#endif /* _XE_GT_SYSFS_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_sysfs_types.h b/drivers/gpu/drm/xe/xe_gt_sysfs_types.h
new file mode 100644
index 000000000..d3bc6b833
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sysfs_types.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GT_SYSFS_TYPES_H_
+#define _XE_GT_SYSFS_TYPES_H_
+
+#include <linux/kobject.h>
+
+struct xe_gt;
+
+/**
+ * struct kobj_gt - A GT's kobject struct that connects the kobject and the GT
+ *
+ * When dealing with multiple GTs, this struct helps to understand which GT
+ * needs to be addressed on a given sysfs call.
+ */
+struct kobj_gt {
+ /** @base: The actual kobject */
+ struct kobject base;
+ /** @gt: A pointer to the GT itself */
+ struct xe_gt *gt;
+};
+
+#endif /* _XE_GT_SYSFS_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
new file mode 100644
index 000000000..63d640591
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include <regs/xe_gt_regs.h>
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_sysfs.h"
+#include "xe_gt_throttle_sysfs.h"
+#include "xe_mmio.h"
+
+/**
+ * DOC: Xe GT Throttle
+ *
+ * Provides sysfs entries for frequency throttle reasons in GT
+ *
+ * device/gt#/freq0/throttle/status - Overall status
+ * device/gt#/freq0/throttle/reason_pl1 - Frequency throttle due to PL1
+ * device/gt#/freq0/throttle/reason_pl2 - Frequency throttle due to PL2
+ * device/gt#/freq0/throttle/reason_pl4 - Frequency throttle due to PL4, Iccmax etc.
+ * device/gt#/freq0/throttle/reason_thermal - Frequency throttle due to thermal
+ * device/gt#/freq0/throttle/reason_prochot - Frequency throttle due to prochot
+ * device/gt#/freq0/throttle/reason_ratl - Frequency throttle due to RATL
+ * device/gt#/freq0/throttle/reason_vr_thermalert - Frequency throttle due to VR THERMALERT
+ * device/gt#/freq0/throttle/reason_vr_tdc - Frequency throttle due to VR TDC
+ */
+
+static struct xe_gt *
+dev_to_gt(struct device *dev)
+{
+ return kobj_to_gt(dev->kobj.parent);
+}
+
+static u32 read_perf_limit_reasons(struct xe_gt *gt)
+{
+ u32 reg;
+
+ if (xe_gt_is_media_type(gt))
+ reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS);
+ else
+ reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS);
+
+ return reg;
+}
+
+static u32 read_status(struct xe_gt *gt)
+{
+ u32 status = read_perf_limit_reasons(gt) & GT0_PERF_LIMIT_REASONS_MASK;
+
+ return status;
+}
+
+static u32 read_reason_pl1(struct xe_gt *gt)
+{
+ u32 pl1 = read_perf_limit_reasons(gt) & POWER_LIMIT_1_MASK;
+
+ return pl1;
+}
+
+static u32 read_reason_pl2(struct xe_gt *gt)
+{
+ u32 pl2 = read_perf_limit_reasons(gt) & POWER_LIMIT_2_MASK;
+
+ return pl2;
+}
+
+static u32 read_reason_pl4(struct xe_gt *gt)
+{
+ u32 pl4 = read_perf_limit_reasons(gt) & POWER_LIMIT_4_MASK;
+
+ return pl4;
+}
+
+static u32 read_reason_thermal(struct xe_gt *gt)
+{
+ u32 thermal = read_perf_limit_reasons(gt) & THERMAL_LIMIT_MASK;
+
+ return thermal;
+}
+
+static u32 read_reason_prochot(struct xe_gt *gt)
+{
+ u32 prochot = read_perf_limit_reasons(gt) & PROCHOT_MASK;
+
+ return prochot;
+}
+
+static u32 read_reason_ratl(struct xe_gt *gt)
+{
+ u32 ratl = read_perf_limit_reasons(gt) & RATL_MASK;
+
+ return ratl;
+}
+
+static u32 read_reason_vr_thermalert(struct xe_gt *gt)
+{
+ u32 thermalert = read_perf_limit_reasons(gt) & VR_THERMALERT_MASK;
+
+ return thermalert;
+}
+
+static u32 read_reason_vr_tdc(struct xe_gt *gt)
+{
+ u32 tdc = read_perf_limit_reasons(gt) & VR_TDC_MASK;
+
+ return tdc;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool status = !!read_status(gt);
+
+ return sysfs_emit(buff, "%u\n", status);
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t reason_pl1_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool pl1 = !!read_reason_pl1(gt);
+
+ return sysfs_emit(buff, "%u\n", pl1);
+}
+static DEVICE_ATTR_RO(reason_pl1);
+
+static ssize_t reason_pl2_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool pl2 = !!read_reason_pl2(gt);
+
+ return sysfs_emit(buff, "%u\n", pl2);
+}
+static DEVICE_ATTR_RO(reason_pl2);
+
+static ssize_t reason_pl4_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool pl4 = !!read_reason_pl4(gt);
+
+ return sysfs_emit(buff, "%u\n", pl4);
+}
+static DEVICE_ATTR_RO(reason_pl4);
+
+static ssize_t reason_thermal_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool thermal = !!read_reason_thermal(gt);
+
+ return sysfs_emit(buff, "%u\n", thermal);
+}
+static DEVICE_ATTR_RO(reason_thermal);
+
+static ssize_t reason_prochot_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool prochot = !!read_reason_prochot(gt);
+
+ return sysfs_emit(buff, "%u\n", prochot);
+}
+static DEVICE_ATTR_RO(reason_prochot);
+
+static ssize_t reason_ratl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool ratl = !!read_reason_ratl(gt);
+
+ return sysfs_emit(buff, "%u\n", ratl);
+}
+static DEVICE_ATTR_RO(reason_ratl);
+
+static ssize_t reason_vr_thermalert_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool thermalert = !!read_reason_vr_thermalert(gt);
+
+ return sysfs_emit(buff, "%u\n", thermalert);
+}
+static DEVICE_ATTR_RO(reason_vr_thermalert);
+
+static ssize_t reason_vr_tdc_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct xe_gt *gt = dev_to_gt(dev);
+ bool tdc = !!read_reason_vr_tdc(gt);
+
+ return sysfs_emit(buff, "%u\n", tdc);
+}
+static DEVICE_ATTR_RO(reason_vr_tdc);
+
+static struct attribute *throttle_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_reason_pl1.attr,
+ &dev_attr_reason_pl2.attr,
+ &dev_attr_reason_pl4.attr,
+ &dev_attr_reason_thermal.attr,
+ &dev_attr_reason_prochot.attr,
+ &dev_attr_reason_ratl.attr,
+ &dev_attr_reason_vr_thermalert.attr,
+ &dev_attr_reason_vr_tdc.attr,
+ NULL
+};
+
+static const struct attribute_group throttle_group_attrs = {
+ .name = "throttle",
+ .attrs = throttle_attrs,
+};
+
+static void gt_throttle_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ sysfs_remove_group(gt->freq, &throttle_group_attrs);
+}
+
+void xe_gt_throttle_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ err = sysfs_create_group(gt->freq, &throttle_group_attrs);
+ if (err) {
+ drm_warn(&xe->drm, "failed to register throttle sysfs, err: %d\n", err);
+ return;
+ }
+
+ err = drmm_add_action_or_reset(&xe->drm, gt_throttle_sysfs_fini, gt);
+ if (err)
+ drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h
new file mode 100644
index 000000000..3ecfd4bef
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_THROTTLE_SYSFS_H_
+#define _XE_GT_THROTTLE_SYSFS_H_
+
+#include <drm/drm_managed.h>
+
+struct xe_gt;
+
+void xe_gt_throttle_sysfs_init(struct xe_gt *gt);
+
+#endif /* _XE_GT_THROTTLE_SYSFS_H_ */
+
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
new file mode 100644
index 000000000..f4c485289
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_gt_tlb_invalidation.h"
+
+#include "abi/guc_actions_abi.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_trace.h"
+
+#define TLB_TIMEOUT (HZ / 4)
+
+static void xe_gt_tlb_fence_timeout(struct work_struct *work)
+{
+ struct xe_gt *gt = container_of(work, struct xe_gt,
+ tlb_invalidation.fence_tdr.work);
+ struct xe_gt_tlb_invalidation_fence *fence, *next;
+
+ spin_lock_irq(&gt->tlb_invalidation.pending_lock);
+ list_for_each_entry_safe(fence, next,
+ &gt->tlb_invalidation.pending_fences, link) {
+ s64 since_inval_ms = ktime_ms_delta(ktime_get(),
+ fence->invalidation_time);
+
+ if (msecs_to_jiffies(since_inval_ms) < TLB_TIMEOUT)
+ break;
+
+ trace_xe_gt_tlb_invalidation_fence_timeout(fence);
+ drm_err(&gt_to_xe(gt)->drm, "gt%d: TLB invalidation fence timeout, seqno=%d recv=%d",
+ gt->info.id, fence->seqno, gt->tlb_invalidation.seqno_recv);
+
+ list_del(&fence->link);
+ fence->base.error = -ETIME;
+ dma_fence_signal(&fence->base);
+ dma_fence_put(&fence->base);
+ }
+ if (!list_empty(&gt->tlb_invalidation.pending_fences))
+ queue_delayed_work(system_wq,
+ &gt->tlb_invalidation.fence_tdr,
+ TLB_TIMEOUT);
+ spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
+}
+
+/**
+ * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
+ * @gt: graphics tile
+ *
+ * Initialize GT TLB invalidation state, purely software initialization, should
+ * be called once during driver load.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+{
+ gt->tlb_invalidation.seqno = 1;
+ INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
+ spin_lock_init(&gt->tlb_invalidation.pending_lock);
+ spin_lock_init(&gt->tlb_invalidation.lock);
+ gt->tlb_invalidation.fence_context = dma_fence_context_alloc(1);
+ INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
+ xe_gt_tlb_fence_timeout);
+
+ return 0;
+}
+
+static void
+__invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ trace_xe_gt_tlb_invalidation_fence_signal(fence);
+ dma_fence_signal(&fence->base);
+ dma_fence_put(&fence->base);
+}
+
+static void
+invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ list_del(&fence->link);
+ __invalidation_fence_signal(fence);
+}
+
+/**
+ * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
+ * @gt: graphics tile
+ *
+ * Signal any pending invalidation fences, should be called during a GT reset
+ */
+void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
+{
+ struct xe_gt_tlb_invalidation_fence *fence, *next;
+ struct xe_guc *guc = &gt->uc.guc;
+ int pending_seqno;
+
+ /*
+ * CT channel is already disabled at this point. No new TLB requests can
+ * appear.
+ */
+
+ mutex_lock(&gt->uc.guc.ct.lock);
+ spin_lock_irq(&gt->tlb_invalidation.pending_lock);
+ cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
+ /*
+ * We might have various kworkers waiting for TLB flushes to complete
+ * which are not tracked with an explicit TLB fence, however at this
+ * stage that will never happen since the CT is already disabled, so
+ * make sure we signal them here under the assumption that we have
+ * completed a full GT reset.
+ */
+ if (gt->tlb_invalidation.seqno == 1)
+ pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
+ else
+ pending_seqno = gt->tlb_invalidation.seqno - 1;
+ WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
+ wake_up_all(&guc->ct.wq);
+
+ list_for_each_entry_safe(fence, next,
+ &gt->tlb_invalidation.pending_fences, link)
+ invalidation_fence_signal(fence);
+ spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
+ mutex_unlock(&gt->uc.guc.ct.lock);
+}
+
+static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
+{
+ int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
+
+ if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
+ return false;
+
+ if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
+ return true;
+
+ return seqno_recv >= seqno;
+}
+
+static int send_tlb_invalidation(struct xe_guc *guc,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ u32 *action, int len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ int seqno;
+ int ret;
+
+ /*
+ * XXX: The seqno algorithm relies on TLB invalidation being processed
+ * in order which they currently are, if that changes the algorithm will
+ * need to be updated.
+ */
+
+ mutex_lock(&guc->ct.lock);
+ seqno = gt->tlb_invalidation.seqno;
+ if (fence) {
+ fence->seqno = seqno;
+ trace_xe_gt_tlb_invalidation_fence_send(fence);
+ }
+ action[1] = seqno;
+ ret = xe_guc_ct_send_locked(&guc->ct, action, len,
+ G2H_LEN_DW_TLB_INVALIDATE, 1);
+ if (!ret && fence) {
+ spin_lock_irq(&gt->tlb_invalidation.pending_lock);
+ /*
+ * We haven't actually published the TLB fence as per
+ * pending_fences, but in theory our seqno could have already
+ * been written as we acquired the pending_lock. In such a case
+ * we can just go ahead and signal the fence here.
+ */
+ if (tlb_invalidation_seqno_past(gt, seqno)) {
+ __invalidation_fence_signal(fence);
+ } else {
+ fence->invalidation_time = ktime_get();
+ list_add_tail(&fence->link,
+ &gt->tlb_invalidation.pending_fences);
+
+ if (list_is_singular(&gt->tlb_invalidation.pending_fences))
+ queue_delayed_work(system_wq,
+ &gt->tlb_invalidation.fence_tdr,
+ TLB_TIMEOUT);
+ }
+ spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
+ } else if (ret < 0 && fence) {
+ __invalidation_fence_signal(fence);
+ }
+ if (!ret) {
+ gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ if (!gt->tlb_invalidation.seqno)
+ gt->tlb_invalidation.seqno = 1;
+ ret = seqno;
+ }
+ mutex_unlock(&guc->ct.lock);
+
+ return ret;
+}
+
+#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
+ XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
+ XE_GUC_TLB_INVAL_FLUSH_CACHE)
+
+/**
+ * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
+ * @gt: graphics tile
+ *
+ * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
+ * caller can use seqno + xe_gt_tlb_invalidation_wait to wait for completion.
+ *
+ * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
+ * negative error code on error.
+ */
+int xe_gt_tlb_invalidation_guc(struct xe_gt *gt)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION,
+ 0, /* seqno, replaced in send_tlb_invalidation */
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
+ };
+
+ return send_tlb_invalidation(&gt->uc.guc, NULL, action,
+ ARRAY_SIZE(action));
+}
+
+/**
+ * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
+ * @gt: graphics tile
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion, can be NULL
+ * @vma: VMA to invalidate
+ *
+ * Issue a range based TLB invalidation if supported, if not fallback to a full
+ * TLB invalidation. Completion of TLB is asynchronous and caller can either use
+ * the invalidation fence or seqno + xe_gt_tlb_invalidation_wait to wait for
+ * completion.
+ *
+ * Return: Seqno which can be passed to xe_gt_tlb_invalidation_wait on success,
+ * negative error code on error.
+ */
+int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_vma *vma)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+#define MAX_TLB_INVALIDATION_LEN 7
+ u32 action[MAX_TLB_INVALIDATION_LEN];
+ int len = 0;
+
+ xe_gt_assert(gt, vma);
+
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist) {
+ if (fence)
+ __invalidation_fence_signal(fence);
+
+ return 0;
+ }
+
+ action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
+ action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
+ if (!xe->info.has_range_tlb_invalidation) {
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
+ } else {
+ u64 start = xe_vma_start(vma);
+ u64 length = xe_vma_size(vma);
+ u64 align, end;
+
+ if (length < SZ_4K)
+ length = SZ_4K;
+
+ /*
+ * We need to invalidate a higher granularity if start address
+ * is not aligned to length. When start is not aligned with
+ * length we need to find the length large enough to create an
+ * address mask covering the required range.
+ */
+ align = roundup_pow_of_two(length);
+ start = ALIGN_DOWN(xe_vma_start(vma), align);
+ end = ALIGN(xe_vma_end(vma), align);
+ length = align;
+ while (start + length < end) {
+ length <<= 1;
+ start = ALIGN_DOWN(xe_vma_start(vma), length);
+ }
+
+ /*
+ * Minimum invalidation size for a 2MB page that the hardware
+ * expects is 16MB
+ */
+ if (length >= SZ_2M) {
+ length = max_t(u64, SZ_16M, length);
+ start = ALIGN_DOWN(xe_vma_start(vma), length);
+ }
+
+ xe_gt_assert(gt, length >= SZ_4K);
+ xe_gt_assert(gt, is_power_of_2(length));
+ xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)));
+ xe_gt_assert(gt, IS_ALIGNED(start, length));
+
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
+ action[len++] = xe_vma_vm(vma)->usm.asid;
+ action[len++] = lower_32_bits(start);
+ action[len++] = upper_32_bits(start);
+ action[len++] = ilog2(length) - ilog2(SZ_4K);
+ }
+
+ xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
+
+ return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
+}
+
+/**
+ * xe_gt_tlb_invalidation_wait - Wait for TLB to complete
+ * @gt: graphics tile
+ * @seqno: seqno to wait which was returned from xe_gt_tlb_invalidation
+ *
+ * Wait for 200ms for a TLB invalidation to complete, in practice we always
+ * should receive the TLB invalidation within 200ms.
+ *
+ * Return: 0 on success, -ETIME on TLB invalidation timeout
+ */
+int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct drm_printer p = drm_err_printer(__func__);
+ int ret;
+
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist)
+ return 0;
+
+ /*
+ * XXX: See above, this algorithm only works if seqno are always in
+ * order
+ */
+ ret = wait_event_timeout(guc->ct.wq,
+ tlb_invalidation_seqno_past(gt, seqno),
+ TLB_TIMEOUT);
+ if (!ret) {
+ drm_err(&xe->drm, "gt%d: TLB invalidation time'd out, seqno=%d, recv=%d\n",
+ gt->info.id, seqno, gt->tlb_invalidation.seqno_recv);
+ xe_guc_ct_print(&guc->ct, &p, true);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+/**
+ * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
+ * @guc: guc
+ * @msg: message indicating TLB invalidation done
+ * @len: length of message
+ *
+ * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
+ * invalidation fences for seqno. Algorithm for this depends on seqno being
+ * received in-order and asserts this assumption.
+ *
+ * Return: 0 on success, -EPROTO for malformed messages.
+ */
+int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_gt_tlb_invalidation_fence *fence, *next;
+ unsigned long flags;
+
+ if (unlikely(len != 1))
+ return -EPROTO;
+
+ /*
+ * This can also be run both directly from the IRQ handler and also in
+ * process_g2h_msg(). Only one may process any individual CT message,
+ * however the order they are processed here could result in skipping a
+ * seqno. To handle that we just process all the seqnos from the last
+ * seqno_recv up to and including the one in msg[0]. The delta should be
+ * very small so there shouldn't be much of pending_fences we actually
+ * need to iterate over here.
+ *
+ * From GuC POV we expect the seqnos to always appear in-order, so if we
+ * see something later in the timeline we can be sure that anything
+ * appearing earlier has already signalled, just that we have yet to
+ * officially process the CT message like if racing against
+ * process_g2h_msg().
+ */
+ spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
+ if (tlb_invalidation_seqno_past(gt, msg[0])) {
+ spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
+ return 0;
+ }
+
+ /*
+ * wake_up_all() and wait_event_timeout() already have the correct
+ * barriers.
+ */
+ WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
+ wake_up_all(&guc->ct.wq);
+
+ list_for_each_entry_safe(fence, next,
+ &gt->tlb_invalidation.pending_fences, link) {
+ trace_xe_gt_tlb_invalidation_fence_recv(fence);
+
+ if (!tlb_invalidation_seqno_past(gt, fence->seqno))
+ break;
+
+ invalidation_fence_signal(fence);
+ }
+
+ if (!list_empty(&gt->tlb_invalidation.pending_fences))
+ mod_delayed_work(system_wq,
+ &gt->tlb_invalidation.fence_tdr,
+ TLB_TIMEOUT);
+ else
+ cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
+
+ spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
new file mode 100644
index 000000000..b333c1709
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_TLB_INVALIDATION_H_
+#define _XE_GT_TLB_INVALIDATION_H_
+
+#include <linux/types.h>
+
+#include "xe_gt_tlb_invalidation_types.h"
+
+struct xe_gt;
+struct xe_guc;
+struct xe_vma;
+
+int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_guc(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
+ struct xe_gt_tlb_invalidation_fence *fence,
+ struct xe_vma *vma);
+int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno);
+int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif /* _XE_GT_TLB_INVALIDATION_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
new file mode 100644
index 000000000..934c828ef
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_TLB_INVALIDATION_TYPES_H_
+#define _XE_GT_TLB_INVALIDATION_TYPES_H_
+
+#include <linux/dma-fence.h>
+
+/**
+ * struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
+ *
+ * Optionally passed to xe_gt_tlb_invalidation and will be signaled upon TLB
+ * invalidation completion.
+ */
+struct xe_gt_tlb_invalidation_fence {
+ /** @base: dma fence base */
+ struct dma_fence base;
+ /** @link: link into list of pending tlb fences */
+ struct list_head link;
+ /** @seqno: seqno of TLB invalidation to signal fence one */
+ int seqno;
+ /** @invalidation_time: time of TLB invalidation */
+ ktime_t invalidation_time;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
new file mode 100644
index 000000000..a8d7f272c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gt_topology.h"
+
+#include <linux/bitmap.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_gt.h"
+#include "xe_mmio.h"
+
+#define XE_MAX_DSS_FUSE_BITS (32 * XE_MAX_DSS_FUSE_REGS)
+#define XE_MAX_EU_FUSE_BITS (32 * XE_MAX_EU_FUSE_REGS)
+
+static void
+load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
+{
+ va_list argp;
+ u32 fuse_val[XE_MAX_DSS_FUSE_REGS] = {};
+ int i;
+
+ if (drm_WARN_ON(&gt_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS))
+ numregs = XE_MAX_DSS_FUSE_REGS;
+
+ va_start(argp, numregs);
+ for (i = 0; i < numregs; i++)
+ fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, struct xe_reg));
+ va_end(argp);
+
+ bitmap_from_arr32(mask, fuse_val, numregs * 32);
+}
+
+static void
+load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
+ u32 val = 0;
+ int i;
+
+ BUILD_BUG_ON(XE_MAX_EU_FUSE_REGS > 1);
+
+ /*
+ * Pre-Xe_HP platforms inverted the bit meaning (disable instead
+ * of enable).
+ */
+ if (GRAPHICS_VERx100(xe) < 1250)
+ reg_val = ~reg_val & XELP_EU_MASK;
+
+ /* On PVC, one bit = one EU */
+ if (GRAPHICS_VERx100(xe) == 1260) {
+ val = reg_val;
+ } else {
+ /* All other platforms, one bit = 2 EU */
+ for (i = 0; i < fls(reg_val); i++)
+ if (reg_val & BIT(i))
+ val |= 0x3 << 2 * i;
+ }
+
+ bitmap_from_arr32(mask, &val, XE_MAX_EU_FUSE_BITS);
+}
+
+static void
+get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
+{
+ if (GRAPHICS_VER(xe) > 20) {
+ *geometry_regs = 3;
+ *compute_regs = 3;
+ } else if (GRAPHICS_VERx100(xe) == 1260) {
+ *geometry_regs = 0;
+ *compute_regs = 2;
+ } else if (GRAPHICS_VERx100(xe) >= 1250) {
+ *geometry_regs = 1;
+ *compute_regs = 1;
+ } else {
+ *geometry_regs = 1;
+ *compute_regs = 0;
+ }
+}
+
+void
+xe_gt_topology_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct drm_printer p = drm_debug_printer("GT topology");
+ int num_geometry_regs, num_compute_regs;
+
+ get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);
+
+ /*
+ * Register counts returned shouldn't exceed the number of registers
+ * passed as parameters below.
+ */
+ drm_WARN_ON(&xe->drm, num_geometry_regs > 3);
+ drm_WARN_ON(&xe->drm, num_compute_regs > 3);
+
+ load_dss_mask(gt, gt->fuse_topo.g_dss_mask,
+ num_geometry_regs,
+ XELP_GT_GEOMETRY_DSS_ENABLE,
+ XE2_GT_GEOMETRY_DSS_1,
+ XE2_GT_GEOMETRY_DSS_2);
+ load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs,
+ XEHP_GT_COMPUTE_DSS_ENABLE,
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
+ XE2_GT_COMPUTE_DSS_2);
+ load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
+
+ xe_gt_topology_dump(gt, &p);
+}
+
+void
+xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ drm_printf(p, "dss mask (geometry): %*pb\n", XE_MAX_DSS_FUSE_BITS,
+ gt->fuse_topo.g_dss_mask);
+ drm_printf(p, "dss mask (compute): %*pb\n", XE_MAX_DSS_FUSE_BITS,
+ gt->fuse_topo.c_dss_mask);
+
+ drm_printf(p, "EU mask per DSS: %*pb\n", XE_MAX_EU_FUSE_BITS,
+ gt->fuse_topo.eu_mask_per_dss);
+
+}
+
+/*
+ * Used to obtain the index of the first DSS. Can start searching from the
+ * beginning of a specific dss group (e.g., gslice, cslice, etc.) if
+ * groupsize and groupnum are non-zero.
+ */
+unsigned int
+xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum)
+{
+ return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize);
+}
+
+bool xe_dss_mask_empty(const xe_dss_mask_t mask)
+{
+ return bitmap_empty(mask, XE_MAX_DSS_FUSE_BITS);
+}
+
+/**
+ * xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant
+ * @gt: GT to check
+ * @quad: Which quadrant of the DSS space to check
+ *
+ * Since Xe_HP platforms can have up to four CCS engines, those engines
+ * are each logically associated with a quarter of the possible DSS. If there
+ * are no DSS present in one of the four quadrants of the DSS space, the
+ * corresponding CCS engine is also not available for use.
+ *
+ * Returns false if all DSS in a quadrant of the GT are fused off, else true.
+ */
+bool xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ xe_dss_mask_t all_dss;
+ int g_dss_regs, c_dss_regs, dss_per_quad, quad_first;
+
+ bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
+ XE_MAX_DSS_FUSE_BITS);
+
+ get_num_dss_regs(xe, &g_dss_regs, &c_dss_regs);
+ dss_per_quad = 32 * max(g_dss_regs, c_dss_regs) / 4;
+
+ quad_first = xe_dss_mask_group_ffs(all_dss, dss_per_quad, quad);
+
+ return quad_first < (quad + 1) * dss_per_quad;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
new file mode 100644
index 000000000..d1b54fb52
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GT_TOPOLOGY_H_
+#define _XE_GT_TOPOLOGY_H_
+
+#include "xe_gt_types.h"
+
+struct drm_printer;
+
+void xe_gt_topology_init(struct xe_gt *gt);
+
+void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p);
+
+unsigned int
+xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
+
+bool xe_dss_mask_empty(const xe_dss_mask_t mask);
+
+bool
+xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
+
+#endif /* _XE_GT_TOPOLOGY_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
new file mode 100644
index 000000000..f74684660
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -0,0 +1,363 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#ifndef _XE_GT_TYPES_H_
+#define _XE_GT_TYPES_H_
+
+#include "xe_force_wake_types.h"
+#include "xe_gt_idle_types.h"
+#include "xe_hw_engine_types.h"
+#include "xe_hw_fence_types.h"
+#include "xe_reg_sr_types.h"
+#include "xe_sa_types.h"
+#include "xe_uc_types.h"
+
+struct xe_exec_queue_ops;
+struct xe_migrate;
+struct xe_ring_ops;
+
+enum xe_gt_type {
+ XE_GT_TYPE_UNINITIALIZED,
+ XE_GT_TYPE_MAIN,
+ XE_GT_TYPE_MEDIA,
+};
+
+#define XE_MAX_DSS_FUSE_REGS 3
+#define XE_MAX_EU_FUSE_REGS 1
+
+typedef unsigned long xe_dss_mask_t[BITS_TO_LONGS(32 * XE_MAX_DSS_FUSE_REGS)];
+typedef unsigned long xe_eu_mask_t[BITS_TO_LONGS(32 * XE_MAX_EU_FUSE_REGS)];
+
+struct xe_mmio_range {
+ u32 start;
+ u32 end;
+};
+
+/*
+ * The hardware has multiple kinds of multicast register ranges that need
+ * special register steering (and future platforms are expected to add
+ * additional types).
+ *
+ * During driver startup, we initialize the steering control register to
+ * direct reads to a slice/subslice that are valid for the 'subslice' class
+ * of multicast registers. If another type of steering does not have any
+ * overlap in valid steering targets with 'subslice' style registers, we will
+ * need to explicitly re-steer reads of registers of the other type.
+ *
+ * Only the replication types that may need additional non-default steering
+ * are listed here.
+ */
+enum xe_steering_type {
+ L3BANK,
+ MSLICE,
+ LNCF,
+ DSS,
+ OADDRM,
+ SQIDI_PSMI,
+
+ /*
+ * On some platforms there are multiple types of MCR registers that
+ * will always return a non-terminated value at instance (0, 0). We'll
+ * lump those all into a single category to keep things simple.
+ */
+ INSTANCE0,
+
+ /*
+ * Register ranges that don't need special steering for each register:
+ * it's sufficient to keep the HW-default for the selector, or only
+ * change it once, on GT initialization. This needs to be the last
+ * steering type.
+ */
+ IMPLICIT_STEERING,
+ NUM_STEERING_TYPES
+};
+
+#define gt_to_tile(gt__) \
+ _Generic(gt__, \
+ const struct xe_gt * : (const struct xe_tile *)((gt__)->tile), \
+ struct xe_gt * : (gt__)->tile)
+
+#define gt_to_xe(gt__) \
+ _Generic(gt__, \
+ const struct xe_gt * : (const struct xe_device *)(gt_to_tile(gt__)->xe), \
+ struct xe_gt * : gt_to_tile(gt__)->xe)
+
+/**
+ * struct xe_gt - A "Graphics Technology" unit of the GPU
+ *
+ * A GT ("Graphics Technology") is the subset of a GPU primarily responsible
+ * for implementing the graphics, compute, and/or media IP. It encapsulates
+ * the hardware engines, programmable execution units, and GuC. Each GT has
+ * its own handling of power management (RC6+forcewake) and multicast register
+ * steering.
+ *
+ * A GPU/tile may have a single GT that supplies all graphics, compute, and
+ * media functionality, or the graphics/compute and media may be split into
+ * separate GTs within a tile.
+ */
+struct xe_gt {
+ /** @tile: Backpointer to GT's tile */
+ struct xe_tile *tile;
+
+ /** @info: GT info */
+ struct {
+ /** @type: type of GT */
+ enum xe_gt_type type;
+ /** @id: Unique ID of this GT within the PCI Device */
+ u8 id;
+ /** @reference_clock: clock frequency */
+ u32 reference_clock;
+ /** @engine_mask: mask of engines present on GT */
+ u64 engine_mask;
+ /**
+ * @__engine_mask: mask of engines present on GT read from
+ * xe_pci.c, used to fake reading the engine_mask from the
+ * hwconfig blob.
+ */
+ u64 __engine_mask;
+ } info;
+
+ /**
+ * @mmio: mmio info for GT. All GTs within a tile share the same
+ * register space, but have their own copy of GSI registers at a
+ * specific offset, as well as their own forcewake handling.
+ */
+ struct {
+ /** @fw: force wake for GT */
+ struct xe_force_wake fw;
+ /**
+ * @adj_limit: adjust MMIO address if address is below this
+ * value
+ */
+ u32 adj_limit;
+ /** @adj_offset: offect to add to MMIO address when adjusting */
+ u32 adj_offset;
+ } mmio;
+
+ /**
+ * @reg_sr: table with registers to be restored on GT init/resume/reset
+ */
+ struct xe_reg_sr reg_sr;
+
+ /** @reset: state for GT resets */
+ struct {
+ /**
+ * @worker: work so GT resets can done async allowing to reset
+ * code to safely flush all code paths
+ */
+ struct work_struct worker;
+ } reset;
+
+ /** @tlb_invalidation: TLB invalidation state */
+ struct {
+ /** @seqno: TLB invalidation seqno, protected by CT lock */
+#define TLB_INVALIDATION_SEQNO_MAX 0x100000
+ int seqno;
+ /**
+ * @seqno_recv: last received TLB invalidation seqno, protected by CT lock
+ */
+ int seqno_recv;
+ /**
+ * @pending_fences: list of pending fences waiting TLB
+ * invaliations, protected by CT lock
+ */
+ struct list_head pending_fences;
+ /**
+ * @pending_lock: protects @pending_fences and updating
+ * @seqno_recv.
+ */
+ spinlock_t pending_lock;
+ /**
+ * @fence_tdr: schedules a delayed call to
+ * xe_gt_tlb_fence_timeout after the timeut interval is over.
+ */
+ struct delayed_work fence_tdr;
+ /** @fence_context: context for TLB invalidation fences */
+ u64 fence_context;
+ /**
+ * @fence_seqno: seqno to TLB invalidation fences, protected by
+ * tlb_invalidation.lock
+ */
+ u32 fence_seqno;
+ /** @lock: protects TLB invalidation fences */
+ spinlock_t lock;
+ } tlb_invalidation;
+
+ /**
+ * @ccs_mode: Number of compute engines enabled.
+ * Allows fixed mapping of available compute slices to compute engines.
+ * By default only the first available compute engine is enabled and all
+ * available compute slices are allocated to it.
+ */
+ u32 ccs_mode;
+
+ /** @usm: unified shared memory state */
+ struct {
+ /**
+ * @bb_pool: Pool from which batchbuffers, for USM operations
+ * (e.g. migrations, fixing page tables), are allocated.
+ * Dedicated pool needed so USM operations to not get blocked
+ * behind any user operations which may have resulted in a
+ * fault.
+ */
+ struct xe_sa_manager *bb_pool;
+ /**
+ * @reserved_bcs_instance: reserved BCS instance used for USM
+ * operations (e.g. mmigrations, fixing page tables)
+ */
+ u16 reserved_bcs_instance;
+ /** @pf_wq: page fault work queue, unbound, high priority */
+ struct workqueue_struct *pf_wq;
+ /** @acc_wq: access counter work queue, unbound, high priority */
+ struct workqueue_struct *acc_wq;
+ /**
+ * @pf_queue: Page fault queue used to sync faults so faults can
+ * be processed not under the GuC CT lock. The queue is sized so
+ * it can sync all possible faults (1 per physical engine).
+ * Multiple queues exists for page faults from different VMs are
+ * be processed in parallel.
+ */
+ struct pf_queue {
+ /** @gt: back pointer to GT */
+ struct xe_gt *gt;
+#define PF_QUEUE_NUM_DW 128
+ /** @data: data in the page fault queue */
+ u32 data[PF_QUEUE_NUM_DW];
+ /**
+ * @head: head pointer in DWs for page fault queue,
+ * moved by worker which processes faults.
+ */
+ u16 head;
+ /**
+ * @tail: tail pointer in DWs for page fault queue,
+ * moved by G2H handler.
+ */
+ u16 tail;
+ /** @lock: protects page fault queue */
+ spinlock_t lock;
+ /** @worker: to process page faults */
+ struct work_struct worker;
+#define NUM_PF_QUEUE 4
+ } pf_queue[NUM_PF_QUEUE];
+ /**
+ * @acc_queue: Same as page fault queue, cannot process access
+ * counters under CT lock.
+ */
+ struct acc_queue {
+ /** @gt: back pointer to GT */
+ struct xe_gt *gt;
+#define ACC_QUEUE_NUM_DW 128
+ /** @data: data in the page fault queue */
+ u32 data[ACC_QUEUE_NUM_DW];
+ /**
+ * @head: head pointer in DWs for page fault queue,
+ * moved by worker which processes faults.
+ */
+ u16 head;
+ /**
+ * @tail: tail pointer in DWs for page fault queue,
+ * moved by G2H handler.
+ */
+ u16 tail;
+ /** @lock: protects page fault queue */
+ spinlock_t lock;
+ /** @worker: to process access counters */
+ struct work_struct worker;
+#define NUM_ACC_QUEUE 4
+ } acc_queue[NUM_ACC_QUEUE];
+ } usm;
+
+ /** @ordered_wq: used to serialize GT resets and TDRs */
+ struct workqueue_struct *ordered_wq;
+
+ /** @uc: micro controllers on the GT */
+ struct xe_uc uc;
+
+ /** @gtidle: idle properties of GT */
+ struct xe_gt_idle gtidle;
+
+ /** @exec_queue_ops: submission backend exec queue operations */
+ const struct xe_exec_queue_ops *exec_queue_ops;
+
+ /**
+ * @ring_ops: ring operations for this hw engine (1 per engine class)
+ */
+ const struct xe_ring_ops *ring_ops[XE_ENGINE_CLASS_MAX];
+
+ /** @fence_irq: fence IRQs (1 per engine class) */
+ struct xe_hw_fence_irq fence_irq[XE_ENGINE_CLASS_MAX];
+
+ /** @default_lrc: default LRC state */
+ void *default_lrc[XE_ENGINE_CLASS_MAX];
+
+ /** @hw_engines: hardware engines on the GT */
+ struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES];
+
+ /** @eclass: per hardware engine class interface on the GT */
+ struct xe_hw_engine_class_intf eclass[XE_ENGINE_CLASS_MAX];
+
+ /** @pcode: GT's PCODE */
+ struct {
+ /** @lock: protecting GT's PCODE mailbox data */
+ struct mutex lock;
+ } pcode;
+
+ /** @sysfs: sysfs' kobj used by xe_gt_sysfs */
+ struct kobject *sysfs;
+
+ /** @freq: Main GT freq sysfs control */
+ struct kobject *freq;
+
+ /** @mocs: info */
+ struct {
+ /** @uc_index: UC index */
+ u8 uc_index;
+ /** @wb_index: WB index, only used on L3_CCS platforms */
+ u8 wb_index;
+ } mocs;
+
+ /** @fuse_topo: GT topology reported by fuse registers */
+ struct {
+ /** @g_dss_mask: dual-subslices usable by geometry */
+ xe_dss_mask_t g_dss_mask;
+
+ /** @c_dss_mask: dual-subslices usable by compute */
+ xe_dss_mask_t c_dss_mask;
+
+ /** @eu_mask_per_dss: EU mask per DSS*/
+ xe_eu_mask_t eu_mask_per_dss;
+ } fuse_topo;
+
+ /** @steering: register steering for individual HW units */
+ struct {
+ /* @ranges: register ranges used for this steering type */
+ const struct xe_mmio_range *ranges;
+
+ /** @group_target: target to steer accesses to */
+ u16 group_target;
+ /** @instance_target: instance to steer accesses to */
+ u16 instance_target;
+ } steering[NUM_STEERING_TYPES];
+
+ /**
+ * @mcr_lock: protects the MCR_SELECTOR register for the duration
+ * of a steered operation
+ */
+ spinlock_t mcr_lock;
+
+ /** @wa_active: keep track of active workarounds */
+ struct {
+ /** @gt: bitmap with active GT workarounds */
+ unsigned long *gt;
+ /** @engine: bitmap with active engine workarounds */
+ unsigned long *engine;
+ /** @lrc: bitmap with active LRC workarounds */
+ unsigned long *lrc;
+ /** @oob: bitmap with active OOB workaroudns */
+ unsigned long *oob;
+ } wa_active;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
new file mode 100644
index 000000000..0a61390c6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc.h"
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_errors_abi.h"
+#include "generated/xe_wa_oob.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_guc_ads.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_hwconfig.h"
+#include "xe_guc_log.h"
+#include "xe_guc_pc.h"
+#include "xe_guc_submit.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+#include "xe_uc.h"
+#include "xe_uc_fw.h"
+#include "xe_wa.h"
+#include "xe_wopcm.h"
+
+/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
+#define GUC_GGTT_TOP 0xFEE00000
+static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
+ struct xe_bo *bo)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 addr = xe_bo_ggtt_addr(bo);
+
+ xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
+ xe_assert(xe, addr < GUC_GGTT_TOP);
+ xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
+
+ return addr;
+}
+
+static u32 guc_ctl_debug_flags(struct xe_guc *guc)
+{
+ u32 level = xe_guc_log_get_level(&guc->log);
+ u32 flags = 0;
+
+ if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
+ flags |= GUC_LOG_DISABLED;
+ else
+ flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
+ GUC_LOG_VERBOSITY_SHIFT;
+
+ return flags;
+}
+
+static u32 guc_ctl_feature_flags(struct xe_guc *guc)
+{
+ u32 flags = 0;
+
+ if (!guc_to_xe(guc)->info.skip_guc_pc)
+ flags |= GUC_CTL_ENABLE_SLPC;
+
+ return flags;
+}
+
+static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
+{
+ u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT;
+ u32 flags;
+
+ #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
+ #define LOG_UNIT SZ_1M
+ #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
+ #else
+ #define LOG_UNIT SZ_4K
+ #define LOG_FLAG 0
+ #endif
+
+ #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
+ #define CAPTURE_UNIT SZ_1M
+ #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
+ #else
+ #define CAPTURE_UNIT SZ_4K
+ #define CAPTURE_FLAG 0
+ #endif
+
+ BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
+ BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
+ BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
+ BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
+
+ BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
+ (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
+ BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
+ (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
+ BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
+ (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
+
+ flags = GUC_LOG_VALID |
+ GUC_LOG_NOTIFY_ON_HALF_FULL |
+ CAPTURE_FLAG |
+ LOG_FLAG |
+ ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
+ ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
+ ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
+ GUC_LOG_CAPTURE_SHIFT) |
+ (offset << GUC_LOG_BUF_ADDR_SHIFT);
+
+ #undef LOG_UNIT
+ #undef LOG_FLAG
+ #undef CAPTURE_UNIT
+ #undef CAPTURE_FLAG
+
+ return flags;
+}
+
+static u32 guc_ctl_ads_flags(struct xe_guc *guc)
+{
+ u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
+ u32 flags = ads << GUC_ADS_ADDR_SHIFT;
+
+ return flags;
+}
+
+static u32 guc_ctl_wa_flags(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 flags = 0;
+
+ if (XE_WA(gt, 22012773006))
+ flags |= GUC_WA_POLLCS;
+
+ if (XE_WA(gt, 16011759253))
+ flags |= GUC_WA_GAM_CREDITS;
+
+ if (XE_WA(gt, 14014475959))
+ flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+
+ if (XE_WA(gt, 22011391025) || XE_WA(gt, 14012197797))
+ flags |= GUC_WA_DUAL_QUEUE;
+
+ /*
+ * Wa_22011802037: FIXME - there's more to be done than simply setting
+ * this flag: make sure each CS is stopped when preparing for GT reset
+ * and wait for pending MI_FW.
+ */
+ if (GRAPHICS_VERx100(xe) < 1270)
+ flags |= GUC_WA_PRE_PARSER;
+
+ if (XE_WA(gt, 16011777198))
+ flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
+
+ if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
+ flags |= GUC_WA_CONTEXT_ISOLATION;
+
+ if ((XE_WA(gt, 16015675438) || XE_WA(gt, 18020744125)) &&
+ !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
+ flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
+
+ if (XE_WA(gt, 1509372804))
+ flags |= GUC_WA_RENDER_RST_RC6_EXIT;
+
+ return flags;
+}
+
+static u32 guc_ctl_devid(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+
+ return (((u32)xe->info.devid) << 16) | xe->info.revid;
+}
+
+static void guc_init_params(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 *params = guc->params;
+ int i;
+
+ BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
+ BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
+
+ params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
+ params[GUC_CTL_FEATURE] = 0;
+ params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
+ params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
+ params[GUC_CTL_WA] = 0;
+ params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
+
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
+}
+
+static void guc_init_params_post_hwconfig(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 *params = guc->params;
+ int i;
+
+ BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
+ BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT);
+
+ params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
+ params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
+ params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
+ params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
+ params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
+ params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
+
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]);
+}
+
+/*
+ * Initialize the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+static void guc_write_params(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ int i;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ xe_mmio_write32(gt, SOFT_SCRATCH(0), 0);
+
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
+}
+
+static void guc_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_guc *guc = arg;
+
+ xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ xe_guc_pc_fini(&guc->pc);
+ xe_uc_fini_hw(&guc_to_gt(guc)->uc);
+ xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+}
+
+int xe_guc_init(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ guc->fw.type = XE_UC_FW_TYPE_GUC;
+ ret = xe_uc_fw_init(&guc->fw);
+ if (ret)
+ goto out;
+
+ if (!xe_uc_fw_is_enabled(&guc->fw))
+ return 0;
+
+ ret = xe_guc_log_init(&guc->log);
+ if (ret)
+ goto out;
+
+ ret = xe_guc_ads_init(&guc->ads);
+ if (ret)
+ goto out;
+
+ ret = xe_guc_ct_init(&guc->ct);
+ if (ret)
+ goto out;
+
+ ret = xe_guc_pc_init(&guc->pc);
+ if (ret)
+ goto out;
+
+ ret = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, guc_fini, guc);
+ if (ret)
+ goto out;
+
+ guc_init_params(guc);
+
+ if (xe_gt_is_media_type(gt))
+ guc->notify_reg = MED_GUC_HOST_INTERRUPT;
+ else
+ guc->notify_reg = GUC_HOST_INTERRUPT;
+
+ xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
+
+ return 0;
+
+out:
+ drm_err(&xe->drm, "GuC init failed with %d", ret);
+ return ret;
+}
+
+/**
+ * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
+ * @guc: The GuC object
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_guc_init_post_hwconfig(struct xe_guc *guc)
+{
+ guc_init_params_post_hwconfig(guc);
+
+ return xe_guc_ads_init_post_hwconfig(&guc->ads);
+}
+
+int xe_guc_post_load_init(struct xe_guc *guc)
+{
+ xe_guc_ads_populate_post_load(&guc->ads);
+ guc->submission_state.enabled = true;
+
+ return 0;
+}
+
+int xe_guc_reset(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 guc_status, gdrst;
+ int ret;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ xe_mmio_write32(gt, GDRST, GRDOM_GUC);
+
+ ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
+ if (ret) {
+ drm_err(&xe->drm, "GuC reset timed out, GDRST=0x%8x\n",
+ gdrst);
+ goto err_out;
+ }
+
+ guc_status = xe_mmio_read32(gt, GUC_STATUS);
+ if (!(guc_status & GS_MIA_IN_RESET)) {
+ drm_err(&xe->drm,
+ "GuC status: 0x%x, MIA core expected to be in reset\n",
+ guc_status);
+ ret = -EIO;
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+
+ return ret;
+}
+
+static void guc_prepare_xfer(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
+ GUC_ENABLE_MIA_CLOCK_GATING;
+
+ if (GRAPHICS_VERx100(xe) < 1250)
+ shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_MIA_CACHING;
+
+ if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
+ shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
+
+ /* Must program this register before loading the ucode with DMA */
+ xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
+
+ xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+}
+
+/*
+ * Supporting MMIO & in memory RSA
+ */
+static int guc_xfer_rsa(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 rsa[UOS_RSA_SCRATCH_COUNT];
+ size_t copied;
+ int i;
+
+ if (guc->fw.rsa_size > 256) {
+ u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
+ xe_uc_fw_rsa_offset(&guc->fw);
+ xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
+ return 0;
+ }
+
+ copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa));
+ if (copied < sizeof(rsa))
+ return -ENOMEM;
+
+ for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
+ xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]);
+
+ return 0;
+}
+
+static int guc_wait_ucode(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 status;
+ int ret;
+
+ /*
+ * Wait for the GuC to start up.
+ * NB: Docs recommend not using the interrupt for completion.
+ * Measurements indicate this should take no more than 20ms
+ * (assuming the GT clock is at maximum frequency). So, a
+ * timeout here indicates that the GuC has failed and is unusable.
+ * (Higher levels of the driver may decide to reset the GuC and
+ * attempt the ucode load again if this happens.)
+ *
+ * FIXME: There is a known (but exceedingly unlikely) race condition
+ * where the asynchronous frequency management code could reduce
+ * the GT clock while a GuC reload is in progress (during a full
+ * GT reset). A fix is in progress but there are complex locking
+ * issues to be resolved. In the meantime bump the timeout to
+ * 200ms. Even at slowest clock, this should be sufficient. And
+ * in the working case, a larger timeout makes no difference.
+ */
+ ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK,
+ FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY),
+ 200000, &status, false);
+
+ if (ret) {
+ struct drm_device *drm = &xe->drm;
+ struct drm_printer p = drm_info_printer(drm->dev);
+
+ drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
+ drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
+ REG_FIELD_GET(GS_MIA_IN_RESET, status),
+ REG_FIELD_GET(GS_BOOTROM_MASK, status),
+ REG_FIELD_GET(GS_UKERNEL_MASK, status),
+ REG_FIELD_GET(GS_MIA_MASK, status),
+ REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
+
+ if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
+ drm_info(drm, "GuC firmware signature verification failed\n");
+ ret = -ENOEXEC;
+ }
+
+ if (REG_FIELD_GET(GS_UKERNEL_MASK, status) ==
+ XE_GUC_LOAD_STATUS_EXCEPTION) {
+ drm_info(drm, "GuC firmware exception. EIP: %#x\n",
+ xe_mmio_read32(guc_to_gt(guc),
+ SOFT_SCRATCH(13)));
+ ret = -ENXIO;
+ }
+
+ xe_guc_log_print(&guc->log, &p);
+ } else {
+ drm_dbg(&xe->drm, "GuC successfully loaded");
+ }
+
+ return ret;
+}
+
+static int __xe_guc_upload(struct xe_guc *guc)
+{
+ int ret;
+
+ guc_write_params(guc);
+ guc_prepare_xfer(guc);
+
+ /*
+ * Note that GuC needs the CSS header plus uKernel code to be copied
+ * by the DMA engine in one operation, whereas the RSA signature is
+ * loaded separately, either by copying it to the UOS_RSA_SCRATCH
+ * register (if key size <= 256) or through a ggtt-pinned vma (if key
+ * size > 256). The RSA size and therefore the way we provide it to the
+ * HW is fixed for each platform and hard-coded in the bootrom.
+ */
+ ret = guc_xfer_rsa(guc);
+ if (ret)
+ goto out;
+ /*
+ * Current uCode expects the code to be loaded at 8k; locations below
+ * this are used for the stack.
+ */
+ ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
+ if (ret)
+ goto out;
+
+ /* Wait for authentication */
+ ret = guc_wait_ucode(guc);
+ if (ret)
+ goto out;
+
+ xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
+ return 0;
+
+out:
+ xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
+ return 0 /* FIXME: ret, don't want to stop load currently */;
+}
+
+/**
+ * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
+ * @guc: The GuC object
+ *
+ * This function uploads a minimal GuC that does not support submissions but
+ * in a state where the hwconfig table can be read. Next, it reads and parses
+ * the hwconfig table so it can be used for subsequent steps in the driver load.
+ * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only).
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
+{
+ int ret;
+
+ xe_guc_ads_populate_minimal(&guc->ads);
+
+ ret = __xe_guc_upload(guc);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_hwconfig_init(guc);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_enable_communication(guc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int xe_guc_upload(struct xe_guc *guc)
+{
+ xe_guc_ads_populate(&guc->ads);
+
+ return __xe_guc_upload(guc);
+}
+
+static void guc_handle_mmio_msg(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 msg;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
+ msg &= XE_GUC_RECV_MSG_EXCEPTION |
+ XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
+ xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
+
+ if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
+ drm_err(&guc_to_xe(guc)->drm,
+ "Received early GuC crash dump notification!\n");
+
+ if (msg & XE_GUC_RECV_MSG_EXCEPTION)
+ drm_err(&guc_to_xe(guc)->drm,
+ "Received early GuC exception notification!\n");
+}
+
+static void guc_enable_irq(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 events = xe_gt_is_media_type(gt) ?
+ REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST) :
+ REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+ /* Primary GuC and media GuC share a single enable bit */
+ xe_mmio_write32(gt, GUC_SG_INTR_ENABLE,
+ REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
+
+ /*
+ * There are separate mask bits for primary and media GuCs, so use
+ * a RMW operation to avoid clobbering the other GuC's setting.
+ */
+ xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0);
+}
+
+int xe_guc_enable_communication(struct xe_guc *guc)
+{
+ int err;
+
+ guc_enable_irq(guc);
+
+ xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
+ ARAT_EXPIRED_INTRMSK, 0);
+
+ err = xe_guc_ct_enable(&guc->ct);
+ if (err)
+ return err;
+
+ guc_handle_mmio_msg(guc);
+
+ return 0;
+}
+
+int xe_guc_suspend(struct xe_guc *guc)
+{
+ int ret;
+ u32 action[] = {
+ XE_GUC_ACTION_CLIENT_SOFT_RESET,
+ };
+
+ ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
+ if (ret) {
+ drm_err(&guc_to_xe(guc)->drm,
+ "GuC suspend: CLIENT_SOFT_RESET fail: %d!\n", ret);
+ return ret;
+ }
+
+ xe_guc_sanitize(guc);
+ return 0;
+}
+
+void xe_guc_notify(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ const u32 default_notify_data = 0;
+
+ /*
+ * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass
+ * additional payload data to the GuC but this capability is not
+ * used by the firmware yet. Use default value in the meantime.
+ */
+ xe_mmio_write32(gt, guc->notify_reg, default_notify_data);
+}
+
+int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_AUTHENTICATE_HUC,
+ rsa_addr
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
+ u32 len, u32 *response_buf)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 header, reply;
+ struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
+ MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
+ const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
+ int ret;
+ int i;
+
+ BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
+
+ xe_assert(xe, !guc->ct.enabled);
+ xe_assert(xe, len);
+ xe_assert(xe, len <= VF_SW_FLAG_COUNT);
+ xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
+ xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
+ GUC_HXG_ORIGIN_HOST);
+ xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
+ GUC_HXG_TYPE_REQUEST);
+
+retry:
+ /* Not in critical data-path, just do if else for GT type */
+ if (xe_gt_is_media_type(gt)) {
+ for (i = 0; i < len; ++i)
+ xe_mmio_write32(gt, MED_VF_SW_FLAG(i),
+ request[i]);
+ xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX));
+ } else {
+ for (i = 0; i < len; ++i)
+ xe_mmio_write32(gt, VF_SW_FLAG(i),
+ request[i]);
+ xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX));
+ }
+
+ xe_guc_notify(guc);
+
+ ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
+ 50000, &reply, false);
+ if (ret) {
+timeout:
+ drm_err(&xe->drm, "mmio request %#x: no reply %#x\n",
+ request[0], reply);
+ return ret;
+ }
+
+ header = xe_mmio_read32(gt, reply_reg);
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
+ GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
+ /*
+ * Once we got a BUSY reply we must wait again for the final
+ * response but this time we can't use ORIGIN mask anymore.
+ * To spot a right change in the reply, we take advantage that
+ * response SUCCESS and FAILURE differ only by the single bit
+ * and all other bits are set and can be used as a new mask.
+ */
+ u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE;
+ u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits);
+
+ BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
+ BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
+
+ ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask,
+ 1000000, &header, false);
+
+ if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
+ GUC_HXG_ORIGIN_GUC))
+ goto proto;
+ if (unlikely(ret))
+ goto timeout;
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
+ GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
+ u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
+
+ drm_dbg(&xe->drm, "mmio request %#x: retrying, reason %#x\n",
+ request[0], reason);
+ goto retry;
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
+ GUC_HXG_TYPE_RESPONSE_FAILURE) {
+ u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
+ u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
+
+ drm_err(&xe->drm, "mmio request %#x: failure %#x/%#x\n",
+ request[0], error, hint);
+ return -ENXIO;
+ }
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) !=
+ GUC_HXG_TYPE_RESPONSE_SUCCESS) {
+proto:
+ drm_err(&xe->drm, "mmio request %#x: unexpected reply %#x\n",
+ request[0], header);
+ return -EPROTO;
+ }
+
+ /* Just copy entire possible message response */
+ if (response_buf) {
+ response_buf[0] = header;
+
+ for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
+ reply_reg.addr += sizeof(u32);
+ response_buf[i] = xe_mmio_read32(gt, reply_reg);
+ }
+ }
+
+ /* Use data from the GuC response as our return value */
+ return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
+}
+
+int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
+{
+ return xe_guc_mmio_send_recv(guc, request, len, NULL);
+}
+
+static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
+ GUC_ACTION_HOST2GUC_SELF_CFG),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32,
+ lower_32_bits(val)),
+ FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64,
+ upper_32_bits(val)),
+ };
+ int ret;
+
+ xe_assert(xe, len <= 2);
+ xe_assert(xe, len != 1 || !upper_32_bits(val));
+
+ /* Self config must go over MMIO */
+ ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
+
+ if (unlikely(ret < 0))
+ return ret;
+ if (unlikely(ret > 1))
+ return -EPROTO;
+ if (unlikely(!ret))
+ return -ENOKEY;
+
+ return 0;
+}
+
+int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val)
+{
+ return guc_self_cfg(guc, key, 1, val);
+}
+
+int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
+{
+ return guc_self_cfg(guc, key, 2, val);
+}
+
+void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
+{
+ if (iir & GUC_INTR_GUC2HOST)
+ xe_guc_ct_irq_handler(&guc->ct);
+}
+
+void xe_guc_sanitize(struct xe_guc *guc)
+{
+ xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
+ xe_guc_ct_disable(&guc->ct);
+ guc->submission_state.enabled = false;
+}
+
+int xe_guc_reset_prepare(struct xe_guc *guc)
+{
+ return xe_guc_submit_reset_prepare(guc);
+}
+
+void xe_guc_reset_wait(struct xe_guc *guc)
+{
+ xe_guc_submit_reset_wait(guc);
+}
+
+void xe_guc_stop_prepare(struct xe_guc *guc)
+{
+ XE_WARN_ON(xe_guc_pc_stop(&guc->pc));
+}
+
+int xe_guc_stop(struct xe_guc *guc)
+{
+ int ret;
+
+ xe_guc_ct_disable(&guc->ct);
+
+ ret = xe_guc_submit_stop(guc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int xe_guc_start(struct xe_guc *guc)
+{
+ int ret;
+
+ ret = xe_guc_pc_start(&guc->pc);
+ XE_WARN_ON(ret);
+
+ return xe_guc_submit_start(guc);
+}
+
+void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 status;
+ int err;
+ int i;
+
+ xe_uc_fw_print(&guc->fw, p);
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ return;
+
+ status = xe_mmio_read32(gt, GUC_STATUS);
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ REG_FIELD_GET(GS_BOOTROM_MASK, status));
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ REG_FIELD_GET(GS_UKERNEL_MASK, status));
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ REG_FIELD_GET(GS_MIA_MASK, status));
+ drm_printf(p, "\tLog level = %d\n",
+ xe_guc_log_get_level(&guc->log));
+
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, xe_mmio_read32(gt, SOFT_SCRATCH(i)));
+ }
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+
+ xe_guc_ct_print(&guc->ct, p, false);
+ xe_guc_submit_print(guc, p);
+}
+
+/**
+ * xe_guc_in_reset() - Detect if GuC MIA is in reset.
+ * @guc: The GuC object
+ *
+ * This function detects runtime resume from d3cold by leveraging
+ * GUC_STATUS, GUC doesn't get reset during d3hot,
+ * it strictly to be called from RPM resume handler.
+ *
+ * Return: true if failed to get forcewake or GuC MIA is in Reset,
+ * otherwise false.
+ */
+bool xe_guc_in_reset(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 status;
+ int err;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ return true;
+
+ status = xe_mmio_read32(gt, GUC_STATUS);
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+
+ return status & GS_MIA_IN_RESET;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
new file mode 100644
index 000000000..d3e49e7fd
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_H_
+#define _XE_GUC_H_
+
+#include "xe_gt.h"
+#include "xe_guc_types.h"
+#include "xe_hw_engine_types.h"
+#include "xe_macros.h"
+
+struct drm_printer;
+
+int xe_guc_init(struct xe_guc *guc);
+int xe_guc_init_post_hwconfig(struct xe_guc *guc);
+int xe_guc_post_load_init(struct xe_guc *guc);
+int xe_guc_reset(struct xe_guc *guc);
+int xe_guc_upload(struct xe_guc *guc);
+int xe_guc_min_load_for_hwconfig(struct xe_guc *guc);
+int xe_guc_enable_communication(struct xe_guc *guc);
+int xe_guc_suspend(struct xe_guc *guc);
+void xe_guc_notify(struct xe_guc *guc);
+int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr);
+int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len);
+int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, u32 len,
+ u32 *response_buf);
+int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val);
+int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val);
+void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir);
+void xe_guc_sanitize(struct xe_guc *guc);
+void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p);
+int xe_guc_reset_prepare(struct xe_guc *guc);
+void xe_guc_reset_wait(struct xe_guc *guc);
+void xe_guc_stop_prepare(struct xe_guc *guc);
+int xe_guc_stop(struct xe_guc *guc);
+int xe_guc_start(struct xe_guc *guc);
+bool xe_guc_in_reset(struct xe_guc *guc);
+
+static inline u16 xe_engine_class_to_guc_class(enum xe_engine_class class)
+{
+ switch (class) {
+ case XE_ENGINE_CLASS_RENDER:
+ return GUC_RENDER_CLASS;
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ return GUC_VIDEO_CLASS;
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ return GUC_VIDEOENHANCE_CLASS;
+ case XE_ENGINE_CLASS_COPY:
+ return GUC_BLITTER_CLASS;
+ case XE_ENGINE_CLASS_COMPUTE:
+ return GUC_COMPUTE_CLASS;
+ case XE_ENGINE_CLASS_OTHER:
+ return GUC_GSC_OTHER_CLASS;
+ default:
+ XE_WARN_ON(class);
+ return -1;
+ }
+}
+
+static inline struct xe_gt *guc_to_gt(struct xe_guc *guc)
+{
+ return container_of(guc, struct xe_gt, uc.guc);
+}
+
+static inline struct xe_device *guc_to_xe(struct xe_guc *guc)
+{
+ return gt_to_xe(guc_to_gt(guc));
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
new file mode 100644
index 000000000..390e6f1bf
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc_ads.h"
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "xe_bo.h"
+#include "xe_gt.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_guc.h"
+#include "xe_hw_engine.h"
+#include "xe_lrc.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+
+/* Slack of a few additional entries per engine */
+#define ADS_REGSET_EXTRA_MAX 8
+
+static struct xe_guc *
+ads_to_guc(struct xe_guc_ads *ads)
+{
+ return container_of(ads, struct xe_guc, ads);
+}
+
+static struct xe_gt *
+ads_to_gt(struct xe_guc_ads *ads)
+{
+ return container_of(ads, struct xe_gt, uc.guc.ads);
+}
+
+static struct xe_device *
+ads_to_xe(struct xe_guc_ads *ads)
+{
+ return gt_to_xe(ads_to_gt(ads));
+}
+
+static struct iosys_map *
+ads_to_map(struct xe_guc_ads *ads)
+{
+ return &ads->bo->vmap;
+}
+
+/* UM Queue parameters: */
+#define GUC_UM_QUEUE_SIZE (SZ_64K)
+#define GUC_PAGE_RES_TIMEOUT_US (-1)
+
+/*
+ * The Additional Data Struct (ADS) has pointers for different buffers used by
+ * the GuC. One single gem object contains the ADS struct itself (guc_ads) and
+ * all the extra buffers indirectly linked via the ADS struct's entries.
+ *
+ * Layout of the ADS blob allocated for the GuC:
+ *
+ * +---------------------------------------+ <== base
+ * | guc_ads |
+ * +---------------------------------------+
+ * | guc_policies |
+ * +---------------------------------------+
+ * | guc_gt_system_info |
+ * +---------------------------------------+
+ * | guc_engine_usage |
+ * +---------------------------------------+
+ * | guc_um_init_params |
+ * +---------------------------------------+ <== static
+ * | guc_mmio_reg[countA] (engine 0.0) |
+ * | guc_mmio_reg[countB] (engine 0.1) |
+ * | guc_mmio_reg[countC] (engine 1.0) |
+ * | ... |
+ * +---------------------------------------+ <== dynamic
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ * | golden contexts |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ * | capture lists |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ * | UM queues |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ * | private data |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
+ */
+struct __guc_ads_blob {
+ struct guc_ads ads;
+ struct guc_policies policies;
+ struct guc_gt_system_info system_info;
+ struct guc_engine_usage engine_usage;
+ struct guc_um_init_params um_init_params;
+ /* From here on, location is dynamic! Refer to above diagram. */
+ struct guc_mmio_reg regset[0];
+} __packed;
+
+#define ads_blob_read(ads_, field_) \
+ xe_map_rd_field(ads_to_xe(ads_), ads_to_map(ads_), 0, \
+ struct __guc_ads_blob, field_)
+
+#define ads_blob_write(ads_, field_, val_) \
+ xe_map_wr_field(ads_to_xe(ads_), ads_to_map(ads_), 0, \
+ struct __guc_ads_blob, field_, val_)
+
+#define info_map_write(xe_, map_, field_, val_) \
+ xe_map_wr_field(xe_, map_, 0, struct guc_gt_system_info, field_, val_)
+
+#define info_map_read(xe_, map_, field_) \
+ xe_map_rd_field(xe_, map_, 0, struct guc_gt_system_info, field_)
+
+static size_t guc_ads_regset_size(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+
+ xe_assert(xe, ads->regset_size);
+
+ return ads->regset_size;
+}
+
+static size_t guc_ads_golden_lrc_size(struct xe_guc_ads *ads)
+{
+ return PAGE_ALIGN(ads->golden_lrc_size);
+}
+
+static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
+{
+ /* FIXME: Allocate a proper capture list */
+ return PAGE_ALIGN(PAGE_SIZE);
+}
+
+static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+
+ if (!xe->info.has_usm)
+ return 0;
+
+ return GUC_UM_QUEUE_SIZE * GUC_UM_HW_QUEUE_MAX;
+}
+
+static size_t guc_ads_private_data_size(struct xe_guc_ads *ads)
+{
+ return PAGE_ALIGN(ads_to_guc(ads)->fw.private_data_size);
+}
+
+static size_t guc_ads_regset_offset(struct xe_guc_ads *ads)
+{
+ return offsetof(struct __guc_ads_blob, regset);
+}
+
+static size_t guc_ads_golden_lrc_offset(struct xe_guc_ads *ads)
+{
+ size_t offset;
+
+ offset = guc_ads_regset_offset(ads) +
+ guc_ads_regset_size(ads);
+
+ return PAGE_ALIGN(offset);
+}
+
+static size_t guc_ads_capture_offset(struct xe_guc_ads *ads)
+{
+ size_t offset;
+
+ offset = guc_ads_golden_lrc_offset(ads) +
+ guc_ads_golden_lrc_size(ads);
+
+ return PAGE_ALIGN(offset);
+}
+
+static size_t guc_ads_um_queues_offset(struct xe_guc_ads *ads)
+{
+ u32 offset;
+
+ offset = guc_ads_capture_offset(ads) +
+ guc_ads_capture_size(ads);
+
+ return PAGE_ALIGN(offset);
+}
+
+static size_t guc_ads_private_data_offset(struct xe_guc_ads *ads)
+{
+ size_t offset;
+
+ offset = guc_ads_um_queues_offset(ads) +
+ guc_ads_um_queues_size(ads);
+
+ return PAGE_ALIGN(offset);
+}
+
+static size_t guc_ads_size(struct xe_guc_ads *ads)
+{
+ return guc_ads_private_data_offset(ads) +
+ guc_ads_private_data_size(ads);
+}
+
+static bool needs_wa_1607983814(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) < 1250;
+}
+
+static size_t calculate_regset_size(struct xe_gt *gt)
+{
+ struct xe_reg_sr_entry *sr_entry;
+ unsigned long sr_idx;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ unsigned int count = 0;
+
+ for_each_hw_engine(hwe, gt, id)
+ xa_for_each(&hwe->reg_sr.xa, sr_idx, sr_entry)
+ count++;
+
+ count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
+
+ if (needs_wa_1607983814(gt_to_xe(gt)))
+ count += LNCFCMOCS_REG_COUNT;
+
+ return count * sizeof(struct guc_mmio_reg);
+}
+
+static u32 engine_enable_mask(struct xe_gt *gt, enum xe_engine_class class)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ u32 mask = 0;
+
+ for_each_hw_engine(hwe, gt, id)
+ if (hwe->class == class)
+ mask |= BIT(hwe->instance);
+
+ return mask;
+}
+
+static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+ size_t total_size = 0, alloc_size, real_size;
+ int class;
+
+ for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ if (!engine_enable_mask(gt, class))
+ continue;
+
+ real_size = xe_lrc_size(xe, class);
+ alloc_size = PAGE_ALIGN(real_size);
+ total_size += alloc_size;
+ }
+
+ return total_size;
+}
+
+#define MAX_GOLDEN_LRC_SIZE (SZ_4K * 64)
+
+int xe_guc_ads_init(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+
+ ads->golden_lrc_size = calculate_golden_lrc_size(ads);
+ ads->regset_size = calculate_regset_size(gt);
+
+ bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ ads->bo = bo;
+
+ return 0;
+}
+
+/**
+ * xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load
+ * @ads: Additional data structures object
+ *
+ * Recalcuate golden_lrc_size & regset_size as the number hardware engines may
+ * have changed after the hwconfig was loaded. Also verify the new sizes fit in
+ * the already allocated ADS buffer object.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads)
+{
+ struct xe_gt *gt = ads_to_gt(ads);
+ u32 prev_regset_size = ads->regset_size;
+
+ xe_gt_assert(gt, ads->bo);
+
+ ads->golden_lrc_size = calculate_golden_lrc_size(ads);
+ ads->regset_size = calculate_regset_size(gt);
+
+ xe_gt_assert(gt, ads->golden_lrc_size +
+ (ads->regset_size - prev_regset_size) <=
+ MAX_GOLDEN_LRC_SIZE);
+
+ return 0;
+}
+
+static void guc_policies_init(struct xe_guc_ads *ads)
+{
+ ads_blob_write(ads, policies.dpc_promote_time,
+ GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
+ ads_blob_write(ads, policies.max_num_work_items,
+ GLOBAL_POLICY_MAX_NUM_WI);
+ ads_blob_write(ads, policies.global_flags, 0);
+ ads_blob_write(ads, policies.is_valid, 1);
+}
+
+static void fill_engine_enable_masks(struct xe_gt *gt,
+ struct iosys_map *info_map)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ info_map_write(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS],
+ engine_enable_mask(gt, XE_ENGINE_CLASS_RENDER));
+ info_map_write(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS],
+ engine_enable_mask(gt, XE_ENGINE_CLASS_COPY));
+ info_map_write(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS],
+ engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_DECODE));
+ info_map_write(xe, info_map,
+ engine_enabled_masks[GUC_VIDEOENHANCE_CLASS],
+ engine_enable_mask(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE));
+ info_map_write(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS],
+ engine_enable_mask(gt, XE_ENGINE_CLASS_COMPUTE));
+ info_map_write(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
+ engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER));
+}
+
+static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+ struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ offsetof(struct __guc_ads_blob, system_info));
+ u8 guc_class;
+
+ for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) {
+ if (!info_map_read(xe, &info_map,
+ engine_enabled_masks[guc_class]))
+ continue;
+
+ ads_blob_write(ads, ads.eng_state_size[guc_class],
+ guc_ads_golden_lrc_size(ads) -
+ xe_lrc_skip_size(xe));
+ ads_blob_write(ads, ads.golden_context_lrca[guc_class],
+ xe_bo_ggtt_addr(ads->bo) +
+ guc_ads_golden_lrc_offset(ads));
+ }
+}
+
+static void guc_mapping_table_init_invalid(struct xe_gt *gt,
+ struct iosys_map *info_map)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int i, j;
+
+ /* Table must be set to invalid values for entries not used */
+ for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i)
+ for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
+ info_map_write(xe, info_map, mapping_table[i][j],
+ GUC_MAX_INSTANCES_PER_CLASS);
+}
+
+static void guc_mapping_table_init(struct xe_gt *gt,
+ struct iosys_map *info_map)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ guc_mapping_table_init_invalid(gt, info_map);
+
+ for_each_hw_engine(hwe, gt, id) {
+ u8 guc_class;
+
+ guc_class = xe_engine_class_to_guc_class(hwe->class);
+ info_map_write(xe, info_map,
+ mapping_table[guc_class][hwe->logical_instance],
+ hwe->instance);
+ }
+}
+
+static void guc_capture_list_init(struct xe_guc_ads *ads)
+{
+ int i, j;
+ u32 addr = xe_bo_ggtt_addr(ads->bo) + guc_ads_capture_offset(ads);
+
+ /* FIXME: Populate a proper capture list */
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
+ for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
+ ads_blob_write(ads, ads.capture_instance[i][j], addr);
+ ads_blob_write(ads, ads.capture_class[i][j], addr);
+ }
+
+ ads_blob_write(ads, ads.capture_global[i], addr);
+ }
+}
+
+static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
+ struct iosys_map *regset_map,
+ struct xe_reg reg,
+ unsigned int n_entry)
+{
+ struct guc_mmio_reg entry = {
+ .offset = reg.addr,
+ .flags = reg.masked ? GUC_REGSET_MASKED : 0,
+ };
+
+ xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
+ &entry, sizeof(entry));
+}
+
+static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
+ struct iosys_map *regset_map,
+ struct xe_hw_engine *hwe)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+ struct xe_hw_engine *hwe_rcs_reset_domain =
+ xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
+ struct xe_reg_sr_entry *entry;
+ unsigned long idx;
+ unsigned int count = 0;
+ const struct {
+ struct xe_reg reg;
+ bool skip;
+ } *e, extra_regs[] = {
+ { .reg = RING_MODE(hwe->mmio_base), },
+ { .reg = RING_HWS_PGA(hwe->mmio_base), },
+ { .reg = RING_IMR(hwe->mmio_base), },
+ { .reg = RCU_MODE, .skip = hwe != hwe_rcs_reset_domain },
+ { .reg = CCS_MODE,
+ .skip = hwe != hwe_rcs_reset_domain || !xe_gt_ccs_mode_enabled(hwe->gt) },
+ };
+ u32 i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(extra_regs) > ADS_REGSET_EXTRA_MAX);
+
+ xa_for_each(&hwe->reg_sr.xa, idx, entry)
+ guc_mmio_regset_write_one(ads, regset_map, entry->reg, count++);
+
+ for (e = extra_regs; e < extra_regs + ARRAY_SIZE(extra_regs); e++) {
+ if (e->skip)
+ continue;
+
+ guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
+ }
+
+ /* Wa_1607983814 */
+ if (needs_wa_1607983814(xe) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
+ guc_mmio_regset_write_one(ads, regset_map,
+ XELP_LNCFCMOCS(i), count++);
+ }
+ }
+
+ return count;
+}
+
+static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
+{
+ size_t regset_offset = guc_ads_regset_offset(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ u32 addr = xe_bo_ggtt_addr(ads->bo) + regset_offset;
+ struct iosys_map regset_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ regset_offset);
+ unsigned int regset_used = 0;
+
+ for_each_hw_engine(hwe, gt, id) {
+ unsigned int count;
+ u8 gc;
+
+ /*
+ * 1. Write all MMIO entries for this exec queue to the table. No
+ * need to worry about fused-off engines and when there are
+ * entries in the regset: the reg_state_list has been zero'ed
+ * by xe_guc_ads_populate()
+ */
+ count = guc_mmio_regset_write(ads, &regset_map, hwe);
+ if (!count)
+ continue;
+
+ /*
+ * 2. Record in the header (ads.reg_state_list) the address
+ * location and number of entries
+ */
+ gc = xe_engine_class_to_guc_class(hwe->class);
+ ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].address, addr);
+ ads_blob_write(ads, ads.reg_state_list[gc][hwe->instance].count, count);
+
+ addr += count * sizeof(struct guc_mmio_reg);
+ iosys_map_incr(&regset_map, count * sizeof(struct guc_mmio_reg));
+
+ regset_used += count * sizeof(struct guc_mmio_reg);
+ }
+
+ xe_gt_assert(gt, regset_used <= ads->regset_size);
+}
+
+static void guc_um_init_params(struct xe_guc_ads *ads)
+{
+ u32 um_queue_offset = guc_ads_um_queues_offset(ads);
+ u64 base_dpa;
+ u32 base_ggtt;
+ int i;
+
+ base_ggtt = xe_bo_ggtt_addr(ads->bo) + um_queue_offset;
+ base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset;
+
+ for (i = 0; i < GUC_UM_HW_QUEUE_MAX; ++i) {
+ ads_blob_write(ads, um_init_params.queue_params[i].base_dpa,
+ base_dpa + (i * GUC_UM_QUEUE_SIZE));
+ ads_blob_write(ads, um_init_params.queue_params[i].base_ggtt_address,
+ base_ggtt + (i * GUC_UM_QUEUE_SIZE));
+ ads_blob_write(ads, um_init_params.queue_params[i].size_in_bytes,
+ GUC_UM_QUEUE_SIZE);
+ }
+
+ ads_blob_write(ads, um_init_params.page_response_timeout_in_us,
+ GUC_PAGE_RES_TIMEOUT_US);
+}
+
+static void guc_doorbell_init(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+
+ if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) {
+ u32 distdbreg =
+ xe_mmio_read32(gt, DIST_DBS_POPULATED);
+
+ ads_blob_write(ads,
+ system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
+ REG_FIELD_GET(DOORBELLS_PER_SQIDI_MASK, distdbreg) + 1);
+ }
+}
+
+/**
+ * xe_guc_ads_populate_minimal - populate minimal ADS
+ * @ads: Additional data structures object
+ *
+ * This function populates a minimal ADS that does not support submissions but
+ * enough so the GuC can load and the hwconfig table can be read.
+ */
+void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
+{
+ struct xe_gt *gt = ads_to_gt(ads);
+ struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ offsetof(struct __guc_ads_blob, system_info));
+ u32 base = xe_bo_ggtt_addr(ads->bo);
+
+ xe_gt_assert(gt, ads->bo);
+
+ xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ guc_policies_init(ads);
+ guc_prep_golden_lrc_null(ads);
+ guc_mapping_table_init_invalid(gt, &info_map);
+ guc_doorbell_init(ads);
+
+ ads_blob_write(ads, ads.scheduler_policies, base +
+ offsetof(struct __guc_ads_blob, policies));
+ ads_blob_write(ads, ads.gt_system_info, base +
+ offsetof(struct __guc_ads_blob, system_info));
+ ads_blob_write(ads, ads.private_data, base +
+ guc_ads_private_data_offset(ads));
+}
+
+void xe_guc_ads_populate(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+ struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ offsetof(struct __guc_ads_blob, system_info));
+ u32 base = xe_bo_ggtt_addr(ads->bo);
+
+ xe_gt_assert(gt, ads->bo);
+
+ xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
+ guc_policies_init(ads);
+ fill_engine_enable_masks(gt, &info_map);
+ guc_mmio_reg_state_init(ads);
+ guc_prep_golden_lrc_null(ads);
+ guc_mapping_table_init(gt, &info_map);
+ guc_capture_list_init(ads);
+ guc_doorbell_init(ads);
+
+ if (xe->info.has_usm) {
+ guc_um_init_params(ads);
+ ads_blob_write(ads, ads.um_init_data, base +
+ offsetof(struct __guc_ads_blob, um_init_params));
+ }
+
+ ads_blob_write(ads, ads.scheduler_policies, base +
+ offsetof(struct __guc_ads_blob, policies));
+ ads_blob_write(ads, ads.gt_system_info, base +
+ offsetof(struct __guc_ads_blob, system_info));
+ ads_blob_write(ads, ads.private_data, base +
+ guc_ads_private_data_offset(ads));
+}
+
+static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
+{
+ struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+ struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ offsetof(struct __guc_ads_blob, system_info));
+ size_t total_size = 0, alloc_size, real_size;
+ u32 addr_ggtt, offset;
+ int class;
+
+ offset = guc_ads_golden_lrc_offset(ads);
+ addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
+
+ for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ u8 guc_class;
+
+ guc_class = xe_engine_class_to_guc_class(class);
+
+ if (!info_map_read(xe, &info_map,
+ engine_enabled_masks[guc_class]))
+ continue;
+
+ xe_gt_assert(gt, gt->default_lrc[class]);
+
+ real_size = xe_lrc_size(xe, class);
+ alloc_size = PAGE_ALIGN(real_size);
+ total_size += alloc_size;
+
+ /*
+ * This interface is slightly confusing. We need to pass the
+ * base address of the full golden context and the size of just
+ * the engine state, which is the section of the context image
+ * that starts after the execlists LRC registers. This is
+ * required to allow the GuC to restore just the engine state
+ * when a watchdog reset occurs.
+ * We calculate the engine state size by removing the size of
+ * what comes before it in the context image (which is identical
+ * on all engines).
+ */
+ ads_blob_write(ads, ads.eng_state_size[guc_class],
+ real_size - xe_lrc_skip_size(xe));
+ ads_blob_write(ads, ads.golden_context_lrca[guc_class],
+ addr_ggtt);
+
+ xe_map_memcpy_to(xe, ads_to_map(ads), offset,
+ gt->default_lrc[class], real_size);
+
+ addr_ggtt += alloc_size;
+ offset += alloc_size;
+ }
+
+ xe_gt_assert(gt, total_size == ads->golden_lrc_size);
+}
+
+void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
+{
+ guc_populate_golden_lrc(ads);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.h b/drivers/gpu/drm/xe/xe_guc_ads.h
new file mode 100644
index 000000000..138ef6267
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_ads.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ADS_H_
+#define _XE_GUC_ADS_H_
+
+#include "xe_guc_ads_types.h"
+
+int xe_guc_ads_init(struct xe_guc_ads *ads);
+int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads);
+void xe_guc_ads_populate(struct xe_guc_ads *ads);
+void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads);
+void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h
new file mode 100644
index 000000000..4afe44bec
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ADS_TYPES_H_
+#define _XE_GUC_ADS_TYPES_H_
+
+#include <linux/types.h>
+
+struct xe_bo;
+
+/**
+ * struct xe_guc_ads - GuC additional data structures (ADS)
+ */
+struct xe_guc_ads {
+ /** @bo: XE BO for GuC ads blob */
+ struct xe_bo *bo;
+ /** @golden_lrc_size: golden LRC size */
+ size_t golden_lrc_size;
+ /** @regset_size: size of register set passed to GuC for save/restore */
+ u32 regset_size;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
new file mode 100644
index 000000000..24a33fa36
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -0,0 +1,1320 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc_ct.h"
+
+#include <linux/bitfield.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_klvs_abi.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_guc.h"
+#include "xe_guc_submit.h"
+#include "xe_map.h"
+#include "xe_pm.h"
+#include "xe_trace.h"
+
+/* Used when a CT send wants to block and / or receive data */
+struct g2h_fence {
+ u32 *response_buffer;
+ u32 seqno;
+ u16 response_len;
+ u16 error;
+ u16 hint;
+ u16 reason;
+ bool retry;
+ bool fail;
+ bool done;
+};
+
+static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
+{
+ g2h_fence->response_buffer = response_buffer;
+ g2h_fence->response_len = 0;
+ g2h_fence->fail = false;
+ g2h_fence->retry = false;
+ g2h_fence->done = false;
+ g2h_fence->seqno = ~0x0;
+}
+
+static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
+{
+ return g2h_fence->seqno == ~0x0;
+}
+
+static struct xe_guc *
+ct_to_guc(struct xe_guc_ct *ct)
+{
+ return container_of(ct, struct xe_guc, ct);
+}
+
+static struct xe_gt *
+ct_to_gt(struct xe_guc_ct *ct)
+{
+ return container_of(ct, struct xe_gt, uc.guc.ct);
+}
+
+static struct xe_device *
+ct_to_xe(struct xe_guc_ct *ct)
+{
+ return gt_to_xe(ct_to_gt(ct));
+}
+
+/**
+ * DOC: GuC CTB Blob
+ *
+ * We allocate single blob to hold both CTB descriptors and buffers:
+ *
+ * +--------+-----------------------------------------------+------+
+ * | offset | contents | size |
+ * +========+===============================================+======+
+ * | 0x0000 | H2G CTB Descriptor (send) | |
+ * +--------+-----------------------------------------------+ 4K |
+ * | 0x0800 | G2H CTB Descriptor (g2h) | |
+ * +--------+-----------------------------------------------+------+
+ * | 0x1000 | H2G CT Buffer (send) | n*4K |
+ * | | | |
+ * +--------+-----------------------------------------------+------+
+ * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
+ * | + n*4K | | |
+ * +--------+-----------------------------------------------+------+
+ *
+ * Size of each ``CT Buffer`` must be multiple of 4K.
+ * We don't expect too many messages in flight at any time, unless we are
+ * using the GuC submission. In that case each request requires a minimum
+ * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
+ * enough space to avoid backpressure on the driver. We increase the size
+ * of the receive buffer (relative to the send) to ensure a G2H response
+ * CTB has a landing spot.
+ */
+
+#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
+#define CTB_H2G_BUFFER_SIZE (SZ_4K)
+#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
+#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
+
+static size_t guc_ct_size(void)
+{
+ return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
+ CTB_G2H_BUFFER_SIZE;
+}
+
+static void guc_ct_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_guc_ct *ct = arg;
+
+ xa_destroy(&ct->fence_lookup);
+}
+
+static void g2h_worker_func(struct work_struct *w);
+
+static void primelockdep(struct xe_guc_ct *ct)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&ct->lock);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+int xe_guc_ct_init(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ int err;
+
+ xe_assert(xe, !(guc_ct_size() % PAGE_SIZE));
+
+ drmm_mutex_init(&xe->drm, &ct->lock);
+ spin_lock_init(&ct->fast_lock);
+ xa_init(&ct->fence_lookup);
+ INIT_WORK(&ct->g2h_worker, g2h_worker_func);
+ init_waitqueue_head(&ct->wq);
+ init_waitqueue_head(&ct->g2h_fence_wq);
+
+ primelockdep(ct);
+
+ bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ ct->bo = bo;
+
+ err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#define desc_read(xe_, guc_ctb__, field_) \
+ xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
+ struct guc_ct_buffer_desc, field_)
+
+#define desc_write(xe_, guc_ctb__, field_, val_) \
+ xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
+ struct guc_ct_buffer_desc, field_, val_)
+
+static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
+ struct iosys_map *map)
+{
+ h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
+ h2g->info.resv_space = 0;
+ h2g->info.tail = 0;
+ h2g->info.head = 0;
+ h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
+ h2g->info.size) -
+ h2g->info.resv_space;
+ h2g->info.broken = false;
+
+ h2g->desc = *map;
+ xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
+
+ h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
+}
+
+static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
+ struct iosys_map *map)
+{
+ g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
+ g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
+ g2h->info.head = 0;
+ g2h->info.tail = 0;
+ g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
+ g2h->info.size) -
+ g2h->info.resv_space;
+ g2h->info.broken = false;
+
+ g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
+ xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
+
+ g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
+ CTB_H2G_BUFFER_SIZE);
+}
+
+static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
+{
+ struct xe_guc *guc = ct_to_guc(ct);
+ u32 desc_addr, ctb_addr, size;
+ int err;
+
+ desc_addr = xe_bo_ggtt_addr(ct->bo);
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
+ size = ct->ctbs.h2g.info.size * sizeof(u32);
+
+ err = xe_guc_self_cfg64(guc,
+ GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
+ desc_addr);
+ if (err)
+ return err;
+
+ err = xe_guc_self_cfg64(guc,
+ GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
+ ctb_addr);
+ if (err)
+ return err;
+
+ return xe_guc_self_cfg32(guc,
+ GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
+ size);
+}
+
+static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
+{
+ struct xe_guc *guc = ct_to_guc(ct);
+ u32 desc_addr, ctb_addr, size;
+ int err;
+
+ desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
+ CTB_H2G_BUFFER_SIZE;
+ size = ct->ctbs.g2h.info.size * sizeof(u32);
+
+ err = xe_guc_self_cfg64(guc,
+ GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
+ desc_addr);
+ if (err)
+ return err;
+
+ err = xe_guc_self_cfg64(guc,
+ GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
+ ctb_addr);
+ if (err)
+ return err;
+
+ return xe_guc_self_cfg32(guc,
+ GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
+ size);
+}
+
+static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
+{
+ u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
+ GUC_ACTION_HOST2GUC_CONTROL_CTB),
+ FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
+ enable ? GUC_CTB_CONTROL_ENABLE :
+ GUC_CTB_CONTROL_DISABLE),
+ };
+ int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+int xe_guc_ct_enable(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ int err;
+
+ xe_assert(xe, !ct->enabled);
+
+ guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
+ guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
+
+ err = guc_ct_ctb_h2g_register(ct);
+ if (err)
+ goto err_out;
+
+ err = guc_ct_ctb_g2h_register(ct);
+ if (err)
+ goto err_out;
+
+ err = guc_ct_control_toggle(ct, true);
+ if (err)
+ goto err_out;
+
+ mutex_lock(&ct->lock);
+ spin_lock_irq(&ct->fast_lock);
+ ct->g2h_outstanding = 0;
+ ct->enabled = true;
+ spin_unlock_irq(&ct->fast_lock);
+ mutex_unlock(&ct->lock);
+
+ smp_mb();
+ wake_up_all(&ct->wq);
+ drm_dbg(&xe->drm, "GuC CT communication channel enabled\n");
+
+ return 0;
+
+err_out:
+ drm_err(&xe->drm, "Failed to enable CT (%d)\n", err);
+
+ return err;
+}
+
+void xe_guc_ct_disable(struct xe_guc_ct *ct)
+{
+ mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
+ spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
+ ct->enabled = false; /* Finally disable CT communication */
+ spin_unlock_irq(&ct->fast_lock);
+ mutex_unlock(&ct->lock);
+
+ xa_destroy(&ct->fence_lookup);
+}
+
+static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
+{
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+
+ lockdep_assert_held(&ct->lock);
+
+ if (cmd_len > h2g->info.space) {
+ h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+ h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
+ h2g->info.size) -
+ h2g->info.resv_space;
+ if (cmd_len > h2g->info.space)
+ return false;
+ }
+
+ return true;
+}
+
+static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
+{
+ if (!g2h_len)
+ return true;
+
+ lockdep_assert_held(&ct->fast_lock);
+
+ return ct->ctbs.g2h.info.space > g2h_len;
+}
+
+static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
+{
+ lockdep_assert_held(&ct->lock);
+
+ if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
+{
+ lockdep_assert_held(&ct->lock);
+ ct->ctbs.h2g.info.space -= cmd_len;
+}
+
+static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
+{
+ xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space);
+
+ if (g2h_len) {
+ lockdep_assert_held(&ct->fast_lock);
+
+ ct->ctbs.g2h.info.space -= g2h_len;
+ ct->g2h_outstanding += num_g2h;
+ }
+}
+
+static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
+{
+ lockdep_assert_held(&ct->fast_lock);
+ xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <=
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
+
+ ct->ctbs.g2h.info.space += g2h_len;
+ --ct->g2h_outstanding;
+}
+
+static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
+{
+ spin_lock_irq(&ct->fast_lock);
+ __g2h_release_space(ct, g2h_len);
+ spin_unlock_irq(&ct->fast_lock);
+}
+
+#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
+
+static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 ct_fence_value, bool want_response)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+ u32 cmd[H2G_CT_HEADERS];
+ u32 tail = h2g->info.tail;
+ u32 full_len;
+ struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
+ tail * sizeof(u32));
+
+ full_len = len + GUC_CTB_HDR_LEN;
+
+ lockdep_assert_held(&ct->lock);
+ xe_assert(xe, full_len <= GUC_CTB_MSG_MAX_LEN);
+ xe_assert(xe, tail <= h2g->info.size);
+
+ /* Command will wrap, zero fill (NOPs), return and check credits again */
+ if (tail + full_len > h2g->info.size) {
+ xe_map_memset(xe, &map, 0, 0,
+ (h2g->info.size - tail) * sizeof(u32));
+ h2g_reserve_space(ct, (h2g->info.size - tail));
+ h2g->info.tail = 0;
+ desc_write(xe, h2g, tail, h2g->info.tail);
+
+ return -EAGAIN;
+ }
+
+ /*
+ * dw0: CT header (including fence)
+ * dw1: HXG header (including action code)
+ * dw2+: action data
+ */
+ cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
+ FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
+ FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
+ if (want_response) {
+ cmd[1] =
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
+ GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ } else {
+ cmd[1] =
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) |
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
+ GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ }
+
+ /* H2G header in cmd[1] replaces action[0] so: */
+ --len;
+ ++action;
+
+ /* Write H2G ensuring visable before descriptor update */
+ xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
+ xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
+ xe_device_wmb(xe);
+
+ /* Update local copies */
+ h2g->info.tail = (tail + full_len) % h2g->info.size;
+ h2g_reserve_space(ct, full_len);
+
+ /* Update descriptor */
+ desc_write(xe, h2g, tail, h2g->info.tail);
+
+ trace_xe_guc_ctb_h2g(ct_to_gt(ct)->info.id, *(action - 1), full_len,
+ desc_read(xe, h2g, head), h2g->info.tail);
+
+ return 0;
+}
+
+static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
+ u32 len, u32 g2h_len, u32 num_g2h,
+ struct g2h_fence *g2h_fence)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ int ret;
+
+ xe_assert(xe, !g2h_len || !g2h_fence);
+ xe_assert(xe, !num_g2h || !g2h_fence);
+ xe_assert(xe, !g2h_len || num_g2h);
+ xe_assert(xe, g2h_len || !num_g2h);
+ lockdep_assert_held(&ct->lock);
+
+ if (unlikely(ct->ctbs.h2g.info.broken)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ if (unlikely(!ct->enabled)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (g2h_fence) {
+ g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
+ num_g2h = 1;
+
+ if (g2h_fence_needs_alloc(g2h_fence)) {
+ void *ptr;
+
+ g2h_fence->seqno = (ct->fence_seqno++ & 0xffff);
+ ptr = xa_store(&ct->fence_lookup,
+ g2h_fence->seqno,
+ g2h_fence, GFP_ATOMIC);
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ goto out;
+ }
+ }
+ }
+
+ if (g2h_len)
+ spin_lock_irq(&ct->fast_lock);
+retry:
+ ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
+ if (unlikely(ret))
+ goto out_unlock;
+
+ ret = h2g_write(ct, action, len, g2h_fence ? g2h_fence->seqno : 0,
+ !!g2h_fence);
+ if (unlikely(ret)) {
+ if (ret == -EAGAIN)
+ goto retry;
+ goto out_unlock;
+ }
+
+ __g2h_reserve_space(ct, g2h_len, num_g2h);
+ xe_guc_notify(ct_to_guc(ct));
+out_unlock:
+ if (g2h_len)
+ spin_unlock_irq(&ct->fast_lock);
+out:
+ return ret;
+}
+
+static void kick_reset(struct xe_guc_ct *ct)
+{
+ xe_gt_reset_async(ct_to_gt(ct));
+}
+
+static int dequeue_one_g2h(struct xe_guc_ct *ct);
+
+static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h,
+ struct g2h_fence *g2h_fence)
+{
+ struct drm_device *drm = &ct_to_xe(ct)->drm;
+ struct drm_printer p = drm_info_printer(drm->dev);
+ unsigned int sleep_period_ms = 1;
+ int ret;
+
+ xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
+ lockdep_assert_held(&ct->lock);
+ xe_device_assert_mem_access(ct_to_xe(ct));
+
+try_again:
+ ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
+ g2h_fence);
+
+ /*
+ * We wait to try to restore credits for about 1 second before bailing.
+ * In the case of H2G credits we have no choice but just to wait for the
+ * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
+ * the case of G2H we process any G2H in the channel, hopefully freeing
+ * credits as we consume the G2H messages.
+ */
+ if (unlikely(ret == -EBUSY &&
+ !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+
+ if (sleep_period_ms == 1024)
+ goto broken;
+
+ trace_xe_guc_ct_h2g_flow_control(h2g->info.head, h2g->info.tail,
+ h2g->info.size,
+ h2g->info.space,
+ len + GUC_CTB_HDR_LEN);
+ msleep(sleep_period_ms);
+ sleep_period_ms <<= 1;
+
+ goto try_again;
+ } else if (unlikely(ret == -EBUSY)) {
+ struct xe_device *xe = ct_to_xe(ct);
+ struct guc_ctb *g2h = &ct->ctbs.g2h;
+
+ trace_xe_guc_ct_g2h_flow_control(g2h->info.head,
+ desc_read(xe, g2h, tail),
+ g2h->info.size,
+ g2h->info.space,
+ g2h_fence ?
+ GUC_CTB_HXG_MSG_MAX_LEN :
+ g2h_len);
+
+#define g2h_avail(ct) \
+ (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
+ if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
+ g2h_avail(ct), HZ))
+ goto broken;
+#undef g2h_avail
+
+ if (dequeue_one_g2h(ct) < 0)
+ goto broken;
+
+ goto try_again;
+ }
+
+ return ret;
+
+broken:
+ drm_err(drm, "No forward process on H2G, reset required");
+ xe_guc_ct_print(ct, &p, true);
+ ct->ctbs.h2g.info.broken = true;
+
+ return -EDEADLK;
+}
+
+static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
+{
+ int ret;
+
+ xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
+
+ mutex_lock(&ct->lock);
+ ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
+ mutex_unlock(&ct->lock);
+
+ return ret;
+}
+
+int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h)
+{
+ int ret;
+
+ ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
+ if (ret == -EDEADLK)
+ kick_reset(ct);
+
+ return ret;
+}
+
+int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h)
+{
+ int ret;
+
+ ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
+ if (ret == -EDEADLK)
+ kick_reset(ct);
+
+ return ret;
+}
+
+int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
+{
+ int ret;
+
+ lockdep_assert_held(&ct->lock);
+
+ ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
+ if (ret == -EDEADLK)
+ kick_reset(ct);
+
+ return ret;
+}
+
+/*
+ * Check if a GT reset is in progress or will occur and if GT reset brought the
+ * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
+ */
+static bool retry_failure(struct xe_guc_ct *ct, int ret)
+{
+ if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
+ return false;
+
+#define ct_alive(ct) \
+ (ct->enabled && !ct->ctbs.h2g.info.broken && !ct->ctbs.g2h.info.broken)
+ if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
+ return false;
+#undef ct_alive
+
+ return true;
+}
+
+static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buffer, bool no_fail)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct g2h_fence g2h_fence;
+ int ret = 0;
+
+ /*
+ * We use a fence to implement blocking sends / receiving response data.
+ * The seqno of the fence is sent in the H2G, returned in the G2H, and
+ * an xarray is used as storage media with the seqno being to key.
+ * Fields in the fence hold success, failure, retry status and the
+ * response data. Safe to allocate on the stack as the xarray is the
+ * only reference and it cannot be present after this function exits.
+ */
+retry:
+ g2h_fence_init(&g2h_fence, response_buffer);
+retry_same_fence:
+ ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
+ if (unlikely(ret == -ENOMEM)) {
+ void *ptr;
+
+ /* Retry allocation /w GFP_KERNEL */
+ ptr = xa_store(&ct->fence_lookup,
+ g2h_fence.seqno,
+ &g2h_fence, GFP_KERNEL);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ goto retry_same_fence;
+ } else if (unlikely(ret)) {
+ if (ret == -EDEADLK)
+ kick_reset(ct);
+
+ if (no_fail && retry_failure(ct, ret))
+ goto retry_same_fence;
+
+ if (!g2h_fence_needs_alloc(&g2h_fence))
+ xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+
+ return ret;
+ }
+
+ ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+ if (!ret) {
+ drm_err(&xe->drm, "Timed out wait for G2H, fence %u, action %04x",
+ g2h_fence.seqno, action[0]);
+ xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+ return -ETIME;
+ }
+
+ if (g2h_fence.retry) {
+ drm_warn(&xe->drm, "Send retry, action 0x%04x, reason %d",
+ action[0], g2h_fence.reason);
+ goto retry;
+ }
+ if (g2h_fence.fail) {
+ drm_err(&xe->drm, "Send failed, action 0x%04x, error %d, hint %d",
+ action[0], g2h_fence.error, g2h_fence.hint);
+ ret = -EIO;
+ }
+
+ return ret > 0 ? 0 : ret;
+}
+
+int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buffer)
+{
+ return guc_ct_send_recv(ct, action, len, response_buffer, false);
+}
+
+int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
+ u32 len, u32 *response_buffer)
+{
+ return guc_ct_send_recv(ct, action, len, response_buffer, true);
+}
+
+static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
+
+ lockdep_assert_held(&ct->lock);
+
+ switch (action) {
+ case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+ case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+ case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
+ g2h_release_space(ct, len);
+ }
+
+ return 0;
+}
+
+static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 response_len = len - GUC_CTB_MSG_MIN_LEN;
+ u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
+ u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]);
+ struct g2h_fence *g2h_fence;
+
+ lockdep_assert_held(&ct->lock);
+
+ g2h_fence = xa_erase(&ct->fence_lookup, fence);
+ if (unlikely(!g2h_fence)) {
+ /* Don't tear down channel, as send could've timed out */
+ drm_warn(&xe->drm, "G2H fence (%u) not found!\n", fence);
+ g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
+ return 0;
+ }
+
+ xe_assert(xe, fence == g2h_fence->seqno);
+
+ if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
+ g2h_fence->fail = true;
+ g2h_fence->error =
+ FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[1]);
+ g2h_fence->hint =
+ FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[1]);
+ } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
+ g2h_fence->retry = true;
+ g2h_fence->reason =
+ FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, msg[1]);
+ } else if (g2h_fence->response_buffer) {
+ g2h_fence->response_len = response_len;
+ memcpy(g2h_fence->response_buffer, msg + GUC_CTB_MSG_MIN_LEN,
+ response_len * sizeof(u32));
+ }
+
+ g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
+
+ g2h_fence->done = true;
+ smp_mb();
+
+ wake_up_all(&ct->g2h_fence_wq);
+
+ return 0;
+}
+
+static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 hxg, origin, type;
+ int ret;
+
+ lockdep_assert_held(&ct->lock);
+
+ hxg = msg[1];
+
+ origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg);
+ if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
+ drm_err(&xe->drm,
+ "G2H channel broken on read, origin=%d, reset required\n",
+ origin);
+ ct->ctbs.g2h.info.broken = true;
+
+ return -EPROTO;
+ }
+
+ type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg);
+ switch (type) {
+ case GUC_HXG_TYPE_EVENT:
+ ret = parse_g2h_event(ct, msg, len);
+ break;
+ case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+ case GUC_HXG_TYPE_RESPONSE_FAILURE:
+ case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
+ ret = parse_g2h_response(ct, msg, len);
+ break;
+ default:
+ drm_err(&xe->drm,
+ "G2H channel broken on read, type=%d, reset required\n",
+ type);
+ ct->ctbs.g2h.info.broken = true;
+
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_guc *guc = ct_to_guc(ct);
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
+ u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN;
+ u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN;
+ int ret = 0;
+
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT)
+ return 0;
+
+ switch (action) {
+ case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+ ret = xe_guc_sched_done_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+ ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
+ ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
+ ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
+ adj_len);
+ break;
+ case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
+ /* Selftest only at the moment */
+ break;
+ case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
+ case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
+ /* FIXME: Handle this */
+ break;
+ case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
+ ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
+ adj_len);
+ break;
+ case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
+ ret = xe_guc_pagefault_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
+ ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
+ adj_len);
+ break;
+ case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
+ ret = xe_guc_access_counter_notify_handler(guc, payload,
+ adj_len);
+ break;
+ default:
+ drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
+ }
+
+ if (ret)
+ drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
+ action, ret);
+
+ return 0;
+}
+
+static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct guc_ctb *g2h = &ct->ctbs.g2h;
+ u32 tail, head, len;
+ s32 avail;
+ u32 action;
+
+ lockdep_assert_held(&ct->fast_lock);
+
+ if (!ct->enabled)
+ return -ENODEV;
+
+ if (g2h->info.broken)
+ return -EPIPE;
+
+ /* Calculate DW available to read */
+ tail = desc_read(xe, g2h, tail);
+ avail = tail - g2h->info.head;
+ if (unlikely(avail == 0))
+ return 0;
+
+ if (avail < 0)
+ avail += g2h->info.size;
+
+ /* Read header */
+ xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
+ sizeof(u32));
+ len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
+ if (len > avail) {
+ drm_err(&xe->drm,
+ "G2H channel broken on read, avail=%d, len=%d, reset required\n",
+ avail, len);
+ g2h->info.broken = true;
+
+ return -EPROTO;
+ }
+
+ head = (g2h->info.head + 1) % g2h->info.size;
+ avail = len - 1;
+
+ /* Read G2H message */
+ if (avail + head > g2h->info.size) {
+ u32 avail_til_wrap = g2h->info.size - head;
+
+ xe_map_memcpy_from(xe, msg + 1,
+ &g2h->cmds, sizeof(u32) * head,
+ avail_til_wrap * sizeof(u32));
+ xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
+ &g2h->cmds, 0,
+ (avail - avail_til_wrap) * sizeof(u32));
+ } else {
+ xe_map_memcpy_from(xe, msg + 1,
+ &g2h->cmds, sizeof(u32) * head,
+ avail * sizeof(u32));
+ }
+
+ action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
+
+ if (fast_path) {
+ if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT)
+ return 0;
+
+ switch (action) {
+ case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
+ break; /* Process these in fast-path */
+ default:
+ return 0;
+ }
+ }
+
+ /* Update local / descriptor header */
+ g2h->info.head = (head + avail) % g2h->info.size;
+ desc_write(xe, g2h, head, g2h->info.head);
+
+ trace_xe_guc_ctb_g2h(ct_to_gt(ct)->info.id, action, len,
+ g2h->info.head, tail);
+
+ return len;
+}
+
+static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_guc *guc = ct_to_guc(ct);
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]);
+ u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN;
+ u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN;
+ int ret = 0;
+
+ switch (action) {
+ case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
+ ret = xe_guc_pagefault_handler(guc, payload, adj_len);
+ break;
+ case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
+ __g2h_release_space(ct, len);
+ ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
+ adj_len);
+ break;
+ default:
+ drm_warn(&xe->drm, "NOT_POSSIBLE");
+ }
+
+ if (ret)
+ drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n",
+ action, ret);
+}
+
+/**
+ * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
+ * @ct: GuC CT object
+ *
+ * Anything related to page faults is critical for performance, process these
+ * critical G2H in the IRQ. This is safe as these handlers either just wake up
+ * waiters or queue another worker.
+ */
+void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ bool ongoing;
+ int len;
+
+ ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+ if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
+ return;
+
+ spin_lock(&ct->fast_lock);
+ do {
+ len = g2h_read(ct, ct->fast_msg, true);
+ if (len > 0)
+ g2h_fast_path(ct, ct->fast_msg, len);
+ } while (len > 0);
+ spin_unlock(&ct->fast_lock);
+
+ if (ongoing)
+ xe_device_mem_access_put(xe);
+}
+
+/* Returns less than zero on error, 0 on done, 1 on more available */
+static int dequeue_one_g2h(struct xe_guc_ct *ct)
+{
+ int len;
+ int ret;
+
+ lockdep_assert_held(&ct->lock);
+
+ spin_lock_irq(&ct->fast_lock);
+ len = g2h_read(ct, ct->msg, false);
+ spin_unlock_irq(&ct->fast_lock);
+ if (len <= 0)
+ return len;
+
+ ret = parse_g2h_msg(ct, ct->msg, len);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = process_g2h_msg(ct, ct->msg, len);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 1;
+}
+
+static void g2h_worker_func(struct work_struct *w)
+{
+ struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
+ bool ongoing;
+ int ret;
+
+ /*
+ * Normal users must always hold mem_access.ref around CT calls. However
+ * during the runtime pm callbacks we rely on CT to talk to the GuC, but
+ * at this stage we can't rely on mem_access.ref and even the
+ * callback_task will be different than current. For such cases we just
+ * need to ensure we always process the responses from any blocking
+ * ct_send requests or where we otherwise expect some response when
+ * initiated from those callbacks (which will need to wait for the below
+ * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if
+ * the device has suspended to the point that the CT communication has
+ * been disabled.
+ *
+ * If we are inside the runtime pm callback, we can be the only task
+ * still issuing CT requests (since that requires having the
+ * mem_access.ref). It seems like it might in theory be possible to
+ * receive unsolicited events from the GuC just as we are
+ * suspending-resuming, but those will currently anyway be lost when
+ * eventually exiting from suspend, hence no need to wake up the device
+ * here. If we ever need something stronger than get_if_ongoing() then
+ * we need to be careful with blocking the pm callbacks from getting CT
+ * responses, if the worker here is blocked on those callbacks
+ * completing, creating a deadlock.
+ */
+ ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+ if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
+ return;
+
+ do {
+ mutex_lock(&ct->lock);
+ ret = dequeue_one_g2h(ct);
+ mutex_unlock(&ct->lock);
+
+ if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
+ struct drm_device *drm = &ct_to_xe(ct)->drm;
+ struct drm_printer p = drm_info_printer(drm->dev);
+
+ xe_guc_ct_print(ct, &p, false);
+ kick_reset(ct);
+ }
+ } while (ret == 1);
+
+ if (ongoing)
+ xe_device_mem_access_put(ct_to_xe(ct));
+}
+
+static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
+ struct guc_ctb_snapshot *snapshot,
+ bool atomic)
+{
+ u32 head, tail;
+
+ xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
+ sizeof(struct guc_ct_buffer_desc));
+ memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
+
+ snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32),
+ atomic ? GFP_ATOMIC : GFP_KERNEL);
+
+ if (!snapshot->cmds) {
+ drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n");
+ return;
+ }
+
+ head = snapshot->desc.head;
+ tail = snapshot->desc.tail;
+
+ if (head != tail) {
+ struct iosys_map map =
+ IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32));
+
+ while (head != tail) {
+ snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32);
+ ++head;
+ if (head == ctb->info.size) {
+ head = 0;
+ map = ctb->cmds;
+ } else {
+ iosys_map_incr(&map, sizeof(u32));
+ }
+ }
+ }
+}
+
+static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ u32 head, tail;
+
+ drm_printf(p, "\tsize: %d\n", snapshot->info.size);
+ drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
+ drm_printf(p, "\thead: %d\n", snapshot->info.head);
+ drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
+ drm_printf(p, "\tspace: %d\n", snapshot->info.space);
+ drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
+ drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
+ drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
+ drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
+
+ if (!snapshot->cmds)
+ return;
+
+ head = snapshot->desc.head;
+ tail = snapshot->desc.tail;
+
+ while (head != tail) {
+ drm_printf(p, "\tcmd[%d]: 0x%08x\n", head,
+ snapshot->cmds[head]);
+ ++head;
+ if (head == snapshot->info.size)
+ head = 0;
+ }
+}
+
+static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot)
+{
+ kfree(snapshot->cmds);
+}
+
+/**
+ * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
+ * @ct: GuC CT object.
+ * @atomic: Boolean to indicate if this is called from atomic context like
+ * reset or CTB handler or from some regular path like debugfs.
+ *
+ * This can be printed out in a later stage like during dev_coredump
+ * analysis.
+ *
+ * Returns: a GuC CT snapshot object that must be freed by the caller
+ * by using `xe_guc_ct_snapshot_free`.
+ */
+struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
+ bool atomic)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_guc_ct_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot),
+ atomic ? GFP_ATOMIC : GFP_KERNEL);
+
+ if (!snapshot) {
+ drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n");
+ return NULL;
+ }
+
+ if (ct->enabled) {
+ snapshot->ct_enabled = true;
+ snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
+ &snapshot->h2g, atomic);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h,
+ &snapshot->g2h, atomic);
+ }
+
+ return snapshot;
+}
+
+/**
+ * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
+ * @snapshot: GuC CT snapshot object.
+ * @p: drm_printer where it will be printed out.
+ *
+ * This function prints out a given GuC CT snapshot object.
+ */
+void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
+
+ if (snapshot->ct_enabled) {
+ drm_puts(p, "\nH2G CTB (all sizes in DW):\n");
+ guc_ctb_snapshot_print(&snapshot->h2g, p);
+
+ drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
+ guc_ctb_snapshot_print(&snapshot->g2h, p);
+
+ drm_printf(p, "\tg2h outstanding: %d\n",
+ snapshot->g2h_outstanding);
+ } else {
+ drm_puts(p, "\nCT disabled\n");
+ }
+}
+
+/**
+ * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
+ * @snapshot: GuC CT snapshot object.
+ *
+ * This function free all the memory that needed to be allocated at capture
+ * time.
+ */
+void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
+{
+ if (!snapshot)
+ return;
+
+ guc_ctb_snapshot_free(&snapshot->h2g);
+ guc_ctb_snapshot_free(&snapshot->g2h);
+ kfree(snapshot);
+}
+
+/**
+ * xe_guc_ct_print - GuC CT Print.
+ * @ct: GuC CT.
+ * @p: drm_printer where it will be printed out.
+ * @atomic: Boolean to indicate if this is called from atomic context like
+ * reset or CTB handler or from some regular path like debugfs.
+ *
+ * This function quickly capture a snapshot and immediately print it out.
+ */
+void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
+{
+ struct xe_guc_ct_snapshot *snapshot;
+
+ snapshot = xe_guc_ct_snapshot_capture(ct, atomic);
+ xe_guc_ct_snapshot_print(snapshot, p);
+ xe_guc_ct_snapshot_free(snapshot);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
new file mode 100644
index 000000000..f15f8a485
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_CT_H_
+#define _XE_GUC_CT_H_
+
+#include "xe_guc_ct_types.h"
+
+struct drm_printer;
+
+int xe_guc_ct_init(struct xe_guc_ct *ct);
+int xe_guc_ct_enable(struct xe_guc_ct *ct);
+void xe_guc_ct_disable(struct xe_guc_ct *ct);
+void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
+
+struct xe_guc_ct_snapshot *
+xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic);
+void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
+ struct drm_printer *p);
+void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
+void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
+
+static inline void xe_guc_ct_irq_handler(struct xe_guc_ct *ct)
+{
+ wake_up_all(&ct->wq);
+ if (ct->enabled)
+ queue_work(system_unbound_wq, &ct->g2h_worker);
+ xe_guc_ct_fast_path(ct);
+}
+
+/* Basic CT send / receives */
+int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h);
+int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h);
+int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 *response_buffer);
+static inline int
+xe_guc_ct_send_block(struct xe_guc_ct *ct, const u32 *action, u32 len)
+{
+ return xe_guc_ct_send_recv(ct, action, len, NULL);
+}
+
+/* This is only version of the send CT you can call from a G2H handler */
+int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action,
+ u32 len);
+
+/* Can't fail because a GT reset is in progress */
+int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
+ u32 len, u32 *response_buffer);
+static inline int
+xe_guc_ct_send_block_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len)
+{
+ return xe_guc_ct_send_recv_no_fail(ct, action, len, NULL);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
new file mode 100644
index 000000000..d814d4ee3
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_CT_TYPES_H_
+#define _XE_GUC_CT_TYPES_H_
+
+#include <linux/interrupt.h>
+#include <linux/iosys-map.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+#include <linux/xarray.h>
+
+#include "abi/guc_communication_ctb_abi.h"
+
+struct xe_bo;
+
+/**
+ * struct guc_ctb_info - GuC command transport buffer (CTB) info
+ */
+struct guc_ctb_info {
+ /** @size: size of CTB commands (DW) */
+ u32 size;
+ /** @resv_space: reserved space of CTB commands (DW) */
+ u32 resv_space;
+ /** @head: head of CTB commands (DW) */
+ u32 head;
+ /** @tail: tail of CTB commands (DW) */
+ u32 tail;
+ /** @space: space in CTB commands (DW) */
+ u32 space;
+ /** @broken: channel broken */
+ bool broken;
+};
+
+/**
+ * struct guc_ctb - GuC command transport buffer (CTB)
+ */
+struct guc_ctb {
+ /** @desc: dma buffer map for CTB descriptor */
+ struct iosys_map desc;
+ /** @cmds: dma buffer map for CTB commands */
+ struct iosys_map cmds;
+ /** @info: CTB info */
+ struct guc_ctb_info info;
+};
+
+/**
+ * struct guc_ctb_snapshot - GuC command transport buffer (CTB) snapshot
+ */
+struct guc_ctb_snapshot {
+ /** @desc: snapshot of the CTB descriptor */
+ struct guc_ct_buffer_desc desc;
+ /** @cmds: snapshot of the CTB commands */
+ u32 *cmds;
+ /** @info: snapshot of the CTB info */
+ struct guc_ctb_info info;
+};
+
+/**
+ * struct xe_guc_ct_snapshot - GuC command transport (CT) snapshot
+ */
+struct xe_guc_ct_snapshot {
+ /** @ct_enabled: CT enabled info at capture time. */
+ bool ct_enabled;
+ /** @g2h_outstanding: G2H outstanding info at the capture time */
+ u32 g2h_outstanding;
+ /** @g2h: G2H CTB snapshot */
+ struct guc_ctb_snapshot g2h;
+ /** @h2g: H2G CTB snapshot */
+ struct guc_ctb_snapshot h2g;
+};
+
+/**
+ * struct xe_guc_ct - GuC command transport (CT) layer
+ *
+ * Includes a pair of CT buffers for bi-directional communication and tracking
+ * for the H2G and G2H requests sent and received through the buffers.
+ */
+struct xe_guc_ct {
+ /** @bo: XE BO for CT */
+ struct xe_bo *bo;
+ /** @lock: protects everything in CT layer */
+ struct mutex lock;
+ /** @fast_lock: protects G2H channel and credits */
+ spinlock_t fast_lock;
+ /** @ctbs: buffers for sending and receiving commands */
+ struct {
+ /** @send: Host to GuC (H2G, send) channel */
+ struct guc_ctb h2g;
+ /** @recv: GuC to Host (G2H, receive) channel */
+ struct guc_ctb g2h;
+ } ctbs;
+ /** @g2h_outstanding: number of outstanding G2H */
+ u32 g2h_outstanding;
+ /** @g2h_worker: worker to process G2H messages */
+ struct work_struct g2h_worker;
+ /** @enabled: CT enabled */
+ bool enabled;
+ /** @fence_seqno: G2H fence seqno - 16 bits used by CT */
+ u32 fence_seqno;
+ /** @fence_lookup: G2H fence lookup */
+ struct xarray fence_lookup;
+ /** @wq: wait queue used for reliable CT sends and freeing G2H credits */
+ wait_queue_head_t wq;
+ /** @g2h_fence_wq: wait queue used for G2H fencing */
+ wait_queue_head_t g2h_fence_wq;
+ /** @msg: Message buffer */
+ u32 msg[GUC_CTB_MSG_MAX_LEN];
+ /** @fast_msg: Message buffer */
+ u32 fast_msg[GUC_CTB_MSG_MAX_LEN];
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
new file mode 100644
index 000000000..ffd7d53bc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc_debugfs.h"
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_log.h"
+#include "xe_macros.h"
+
+static struct xe_guc *node_to_guc(struct drm_info_node *node)
+{
+ return node->info_ent->data;
+}
+
+static int guc_info(struct seq_file *m, void *data)
+{
+ struct xe_guc *guc = node_to_guc(m->private);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_device_mem_access_get(xe);
+ xe_guc_print_info(guc, &p);
+ xe_device_mem_access_put(xe);
+
+ return 0;
+}
+
+static int guc_log(struct seq_file *m, void *data)
+{
+ struct xe_guc *guc = node_to_guc(m->private);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_device_mem_access_get(xe);
+ xe_guc_log_print(&guc->log, &p);
+ xe_device_mem_access_put(xe);
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"guc_info", guc_info, 0},
+ {"guc_log", guc_log, 0},
+};
+
+void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent)
+{
+ struct drm_minor *minor = guc_to_xe(guc)->drm.primary;
+ struct drm_info_list *local;
+ int i;
+
+#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
+ local = drmm_kmalloc(&guc_to_xe(guc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
+ if (!local)
+ return;
+
+ memcpy(local, debugfs_list, DEBUGFS_SIZE);
+#undef DEBUGFS_SIZE
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
+ local[i].data = guc;
+
+ drm_debugfs_create_files(local,
+ ARRAY_SIZE(debugfs_list),
+ parent, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.h b/drivers/gpu/drm/xe/xe_guc_debugfs.h
new file mode 100644
index 000000000..4756dff26
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_DEBUGFS_H_
+#define _XE_GUC_DEBUGFS_H_
+
+struct dentry;
+struct xe_guc;
+
+void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
new file mode 100644
index 000000000..4c39f01e4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ENGINE_TYPES_H_
+#define _XE_GUC_ENGINE_TYPES_H_
+
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "xe_gpu_scheduler_types.h"
+
+struct dma_fence;
+struct xe_exec_queue;
+
+/**
+ * struct xe_guc_exec_queue - GuC specific state for an xe_exec_queue
+ */
+struct xe_guc_exec_queue {
+ /** @q: Backpointer to parent xe_exec_queue */
+ struct xe_exec_queue *q;
+ /** @sched: GPU scheduler for this xe_exec_queue */
+ struct xe_gpu_scheduler sched;
+ /** @entity: Scheduler entity for this xe_exec_queue */
+ struct xe_sched_entity entity;
+ /**
+ * @static_msgs: Static messages for this xe_exec_queue, used when
+ * a message needs to sent through the GPU scheduler but memory
+ * allocations are not allowed.
+ */
+#define MAX_STATIC_MSG_TYPE 3
+ struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
+ /** @lr_tdr: long running TDR worker */
+ struct work_struct lr_tdr;
+ /** @fini_async: do final fini async from this worker */
+ struct work_struct fini_async;
+ /** @resume_time: time of last resume */
+ u64 resume_time;
+ /** @state: GuC specific state for this xe_exec_queue */
+ atomic_t state;
+ /** @wqi_head: work queue item tail */
+ u32 wqi_head;
+ /** @wqi_tail: work queue item tail */
+ u32 wqi_tail;
+ /** @id: GuC id for this exec_queue */
+ u16 id;
+ /** @suspend_wait: wait queue used to wait on pending suspends */
+ wait_queue_head_t suspend_wait;
+ /** @suspend_pending: a suspend of the exec_queue is pending */
+ bool suspend_pending;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
new file mode 100644
index 000000000..4dd5a88a7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_FWIF_H
+#define _XE_GUC_FWIF_H
+
+#include <linux/bits.h>
+
+#include "abi/guc_klvs_abi.h"
+
+#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4
+#define G2H_LEN_DW_DEREGISTER_CONTEXT 3
+#define G2H_LEN_DW_TLB_INVALIDATE 3
+
+#define GUC_CONTEXT_DISABLE 0
+#define GUC_CONTEXT_ENABLE 1
+
+#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
+#define GUC_CLIENT_PRIORITY_HIGH 1
+#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
+#define GUC_CLIENT_PRIORITY_NORMAL 3
+#define GUC_CLIENT_PRIORITY_NUM 4
+
+#define GUC_RENDER_ENGINE 0
+#define GUC_VIDEO_ENGINE 1
+#define GUC_BLITTER_ENGINE 2
+#define GUC_VIDEOENHANCE_ENGINE 3
+#define GUC_VIDEO_ENGINE2 4
+#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+
+#define GUC_RENDER_CLASS 0
+#define GUC_VIDEO_CLASS 1
+#define GUC_VIDEOENHANCE_CLASS 2
+#define GUC_BLITTER_CLASS 3
+#define GUC_COMPUTE_CLASS 4
+#define GUC_GSC_OTHER_CLASS 5
+#define GUC_LAST_ENGINE_CLASS GUC_GSC_OTHER_CLASS
+#define GUC_MAX_ENGINE_CLASSES 16
+#define GUC_MAX_INSTANCES_PER_CLASS 32
+
+/* Helper for context registration H2G */
+struct guc_ctxt_registration_info {
+ u32 flags;
+ u32 context_idx;
+ u32 engine_class;
+ u32 engine_submit_mask;
+ u32 wq_desc_lo;
+ u32 wq_desc_hi;
+ u32 wq_base_lo;
+ u32 wq_base_hi;
+ u32 wq_size;
+ u32 hwlrca_lo;
+ u32 hwlrca_hi;
+};
+#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
+
+/* 32-bit KLV structure as used by policy updates and others */
+struct guc_klv_generic_dw_t {
+ u32 kl;
+ u32 value;
+} __packed;
+
+/* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
+struct guc_update_exec_queue_policy_header {
+ u32 action;
+ u32 guc_id;
+} __packed;
+
+struct guc_update_exec_queue_policy {
+ struct guc_update_exec_queue_policy_header header;
+ struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
+} __packed;
+
+/* GUC_CTL_* - Parameters for loading the GuC */
+#define GUC_CTL_LOG_PARAMS 0
+#define GUC_LOG_VALID BIT(0)
+#define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1)
+#define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2)
+#define GUC_LOG_LOG_ALLOC_UNITS BIT(3)
+#define GUC_LOG_CRASH_SHIFT 4
+#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
+#define GUC_LOG_DEBUG_SHIFT 6
+#define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT)
+#define GUC_LOG_CAPTURE_SHIFT 10
+#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT)
+#define GUC_LOG_BUF_ADDR_SHIFT 12
+
+#define GUC_CTL_WA 1
+#define GUC_WA_GAM_CREDITS BIT(10)
+#define GUC_WA_DUAL_QUEUE BIT(11)
+#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
+#define GUC_WA_POLLCS BIT(18)
+#define GUC_WA_RENDER_RST_RC6_EXIT BIT(19)
+#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
+
+#define GUC_CTL_FEATURE 2
+#define GUC_CTL_ENABLE_SLPC BIT(2)
+#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
+
+#define GUC_CTL_DEBUG 3
+#define GUC_LOG_VERBOSITY_SHIFT 0
+#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
+#define GUC_LOG_VERBOSITY_MIN 0
+#define GUC_LOG_VERBOSITY_MAX 3
+#define GUC_LOG_VERBOSITY_MASK 0x0000000f
+#define GUC_LOG_DESTINATION_MASK (3 << 4)
+#define GUC_LOG_DISABLED (1 << 6)
+#define GUC_PROFILE_ENABLED (1 << 7)
+
+#define GUC_CTL_ADS 4
+#define GUC_ADS_ADDR_SHIFT 1
+#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
+
+#define GUC_CTL_DEVID 5
+
+#define GUC_CTL_MAX_DWORDS 14
+
+/* Scheduling policy settings */
+
+#define GLOBAL_POLICY_MAX_NUM_WI 15
+
+/* Don't reset an engine upon preemption failure */
+#define GLOBAL_POLICY_DISABLE_ENGINE_RESET BIT(0)
+
+#define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
+
+struct guc_policies {
+ u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
+ /*
+ * In micro seconds. How much time to allow before DPC processing is
+ * called back via interrupt (to prevent DPC queue drain starving).
+ * Typically 1000s of micro seconds (example only, not granularity).
+ */
+ u32 dpc_promote_time;
+
+ /* Must be set to take these new values. */
+ u32 is_valid;
+
+ /*
+ * Max number of WIs to process per call. A large value may keep CS
+ * idle.
+ */
+ u32 max_num_work_items;
+
+ u32 global_flags;
+ u32 reserved[4];
+} __packed;
+
+/* GuC MMIO reg state struct */
+struct guc_mmio_reg {
+ u32 offset;
+ u32 value;
+ u32 flags;
+ u32 mask;
+#define GUC_REGSET_MASKED BIT(0)
+#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
+#define GUC_REGSET_RESTORE_ONLY BIT(3)
+} __packed;
+
+/* GuC register sets */
+struct guc_mmio_reg_set {
+ u32 address;
+ u16 count;
+ u16 reserved;
+} __packed;
+
+/* Generic GT SysInfo data types */
+#define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED 0
+#define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK 1
+#define GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI 2
+#define GUC_GENERIC_GT_SYSINFO_MAX 16
+
+/* HW info */
+struct guc_gt_system_info {
+ u8 mapping_table[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+ u32 engine_enabled_masks[GUC_MAX_ENGINE_CLASSES];
+ u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
+} __packed;
+
+enum {
+ GUC_CAPTURE_LIST_INDEX_PF = 0,
+ GUC_CAPTURE_LIST_INDEX_VF = 1,
+ GUC_CAPTURE_LIST_INDEX_MAX = 2,
+};
+
+/* GuC Additional Data Struct */
+struct guc_ads {
+ struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+ u32 reserved0;
+ u32 scheduler_policies;
+ u32 gt_system_info;
+ u32 reserved1;
+ u32 control_data;
+ u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
+ u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
+ u32 private_data;
+ u32 um_init_data;
+ u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
+ u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
+ u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
+ u32 reserved[14];
+} __packed;
+
+/* Engine usage stats */
+struct guc_engine_usage_record {
+ u32 current_context_index;
+ u32 last_switch_in_stamp;
+ u32 reserved0;
+ u32 total_runtime;
+ u32 reserved1[4];
+} __packed;
+
+struct guc_engine_usage {
+ struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
+/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
+enum xe_guc_recv_message {
+ XE_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
+ XE_GUC_RECV_MSG_EXCEPTION = BIT(30),
+};
+
+/* Page fault structures */
+struct access_counter_desc {
+ u32 dw0;
+#define ACCESS_COUNTER_TYPE BIT(0)
+#define ACCESS_COUNTER_SUBG_LO GENMASK(31, 1)
+
+ u32 dw1;
+#define ACCESS_COUNTER_SUBG_HI BIT(0)
+#define ACCESS_COUNTER_RSVD0 GENMASK(2, 1)
+#define ACCESS_COUNTER_ENG_INSTANCE GENMASK(8, 3)
+#define ACCESS_COUNTER_ENG_CLASS GENMASK(11, 9)
+#define ACCESS_COUNTER_ASID GENMASK(31, 12)
+
+ u32 dw2;
+#define ACCESS_COUNTER_VFID GENMASK(5, 0)
+#define ACCESS_COUNTER_RSVD1 GENMASK(7, 6)
+#define ACCESS_COUNTER_GRANULARITY GENMASK(10, 8)
+#define ACCESS_COUNTER_RSVD2 GENMASK(16, 11)
+#define ACCESS_COUNTER_VIRTUAL_ADDR_RANGE_LO GENMASK(31, 17)
+
+ u32 dw3;
+#define ACCESS_COUNTER_VIRTUAL_ADDR_RANGE_HI GENMASK(31, 0)
+} __packed;
+
+enum guc_um_queue_type {
+ GUC_UM_HW_QUEUE_PAGE_FAULT = 0,
+ GUC_UM_HW_QUEUE_PAGE_FAULT_RESPONSE,
+ GUC_UM_HW_QUEUE_ACCESS_COUNTER,
+ GUC_UM_HW_QUEUE_MAX
+};
+
+struct guc_um_queue_params {
+ u64 base_dpa;
+ u32 base_ggtt_address;
+ u32 size_in_bytes;
+ u32 rsvd[4];
+} __packed;
+
+struct guc_um_init_params {
+ u64 page_response_timeout_in_us;
+ u32 rsvd[6];
+ struct guc_um_queue_params queue_params[GUC_UM_HW_QUEUE_MAX];
+} __packed;
+
+enum xe_guc_fault_reply_type {
+ PFR_ACCESS = 0,
+ PFR_ENGINE,
+ PFR_VFID,
+ PFR_ALL,
+ PFR_INVALID
+};
+
+enum xe_guc_response_desc_type {
+ TLB_INVALIDATION_DESC = 0,
+ FAULT_RESPONSE_DESC
+};
+
+struct xe_guc_pagefault_desc {
+ u32 dw0;
+#define PFD_FAULT_LEVEL GENMASK(2, 0)
+#define PFD_SRC_ID GENMASK(10, 3)
+#define PFD_RSVD_0 GENMASK(17, 11)
+#define XE2_PFD_TRVA_FAULT BIT(18)
+#define PFD_ENG_INSTANCE GENMASK(24, 19)
+#define PFD_ENG_CLASS GENMASK(27, 25)
+#define PFD_PDATA_LO GENMASK(31, 28)
+
+ u32 dw1;
+#define PFD_PDATA_HI GENMASK(11, 0)
+#define PFD_PDATA_HI_SHIFT 4
+#define PFD_ASID GENMASK(31, 12)
+
+ u32 dw2;
+#define PFD_ACCESS_TYPE GENMASK(1, 0)
+#define PFD_FAULT_TYPE GENMASK(3, 2)
+#define PFD_VFID GENMASK(9, 4)
+#define PFD_RSVD_1 GENMASK(11, 10)
+#define PFD_VIRTUAL_ADDR_LO GENMASK(31, 12)
+#define PFD_VIRTUAL_ADDR_LO_SHIFT 12
+
+ u32 dw3;
+#define PFD_VIRTUAL_ADDR_HI GENMASK(31, 0)
+#define PFD_VIRTUAL_ADDR_HI_SHIFT 32
+} __packed;
+
+struct xe_guc_pagefault_reply {
+ u32 dw0;
+#define PFR_VALID BIT(0)
+#define PFR_SUCCESS BIT(1)
+#define PFR_REPLY GENMASK(4, 2)
+#define PFR_RSVD_0 GENMASK(9, 5)
+#define PFR_DESC_TYPE GENMASK(11, 10)
+#define PFR_ASID GENMASK(31, 12)
+
+ u32 dw1;
+#define PFR_VFID GENMASK(5, 0)
+#define PFR_RSVD_1 BIT(6)
+#define PFR_ENG_INSTANCE GENMASK(12, 7)
+#define PFR_ENG_CLASS GENMASK(15, 13)
+#define PFR_PDATA GENMASK(31, 16)
+
+ u32 dw2;
+#define PFR_RSVD_2 GENMASK(31, 0)
+} __packed;
+
+struct xe_guc_acc_desc {
+ u32 dw0;
+#define ACC_TYPE BIT(0)
+#define ACC_TRIGGER 0
+#define ACC_NOTIFY 1
+#define ACC_SUBG_LO GENMASK(31, 1)
+
+ u32 dw1;
+#define ACC_SUBG_HI BIT(0)
+#define ACC_RSVD0 GENMASK(2, 1)
+#define ACC_ENG_INSTANCE GENMASK(8, 3)
+#define ACC_ENG_CLASS GENMASK(11, 9)
+#define ACC_ASID GENMASK(31, 12)
+
+ u32 dw2;
+#define ACC_VFID GENMASK(5, 0)
+#define ACC_RSVD1 GENMASK(7, 6)
+#define ACC_GRANULARITY GENMASK(10, 8)
+#define ACC_RSVD2 GENMASK(16, 11)
+#define ACC_VIRTUAL_ADDR_RANGE_LO GENMASK(31, 17)
+
+ u32 dw3;
+#define ACC_VIRTUAL_ADDR_RANGE_HI GENMASK(31, 0)
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
new file mode 100644
index 000000000..2a13a0091
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc_hwconfig.h"
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_guc.h"
+#include "xe_map.h"
+
+static int send_get_hwconfig(struct xe_guc *guc, u32 ggtt_addr, u32 size)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_GET_HWCONFIG,
+ lower_32_bits(ggtt_addr),
+ upper_32_bits(ggtt_addr),
+ size,
+ };
+
+ return xe_guc_mmio_send(guc, action, ARRAY_SIZE(action));
+}
+
+static int guc_hwconfig_size(struct xe_guc *guc, u32 *size)
+{
+ int ret = send_get_hwconfig(guc, 0, 0);
+
+ if (ret < 0)
+ return ret;
+
+ *size = ret;
+ return 0;
+}
+
+static int guc_hwconfig_copy(struct xe_guc *guc)
+{
+ int ret = send_get_hwconfig(guc, xe_bo_ggtt_addr(guc->hwconfig.bo),
+ guc->hwconfig.size);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int xe_guc_hwconfig_init(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ u32 size;
+ int err;
+
+ /* Initialization already done */
+ if (guc->hwconfig.bo)
+ return 0;
+
+ /*
+ * All hwconfig the same across GTs so only GT0 needs to be configured
+ */
+ if (gt->info.id != XE_GT0)
+ return 0;
+
+ /* ADL_P, DG2+ supports hwconfig table */
+ if (GRAPHICS_VERx100(xe) < 1255 && xe->info.platform != XE_ALDERLAKE_P)
+ return 0;
+
+ err = guc_hwconfig_size(guc, &size);
+ if (err)
+ return err;
+ if (!size)
+ return -EINVAL;
+
+ bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+ guc->hwconfig.bo = bo;
+ guc->hwconfig.size = size;
+
+ return guc_hwconfig_copy(guc);
+}
+
+u32 xe_guc_hwconfig_size(struct xe_guc *guc)
+{
+ return !guc->hwconfig.bo ? 0 : guc->hwconfig.size;
+}
+
+void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+
+ XE_WARN_ON(!guc->hwconfig.bo);
+
+ xe_map_memcpy_from(xe, dst, &guc->hwconfig.bo->vmap, 0,
+ guc->hwconfig.size);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.h b/drivers/gpu/drm/xe/xe_guc_hwconfig.h
new file mode 100644
index 000000000..b5794d641
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_HWCONFIG_H_
+#define _XE_GUC_HWCONFIG_H_
+
+#include <linux/types.h>
+
+struct xe_guc;
+
+int xe_guc_hwconfig_init(struct xe_guc *guc);
+u32 xe_guc_hwconfig_size(struct xe_guc *guc);
+void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
new file mode 100644
index 000000000..bcd2f4d34
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc_log.h"
+
+#include <drm/drm_managed.h>
+
+#include "xe_bo.h"
+#include "xe_gt.h"
+#include "xe_map.h"
+#include "xe_module.h"
+
+static struct xe_gt *
+log_to_gt(struct xe_guc_log *log)
+{
+ return container_of(log, struct xe_gt, uc.guc.log);
+}
+
+static struct xe_device *
+log_to_xe(struct xe_guc_log *log)
+{
+ return gt_to_xe(log_to_gt(log));
+}
+
+static size_t guc_log_size(void)
+{
+ /*
+ * GuC Log buffer Layout
+ *
+ * +===============================+ 00B
+ * | Crash dump state header |
+ * +-------------------------------+ 32B
+ * | Debug state header |
+ * +-------------------------------+ 64B
+ * | Capture state header |
+ * +-------------------------------+ 96B
+ * | |
+ * +===============================+ PAGE_SIZE (4KB)
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
+ * | Debug logs |
+ * +===============================+ + DEBUG_SIZE
+ * | Capture logs |
+ * +===============================+ + CAPTURE_SIZE
+ */
+ return PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
+ CAPTURE_BUFFER_SIZE;
+}
+
+void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
+{
+ struct xe_device *xe = log_to_xe(log);
+ size_t size;
+ int i, j;
+
+ xe_assert(xe, log->bo);
+
+ size = log->bo->size;
+
+#define DW_PER_READ 128
+ xe_assert(xe, !(size % (DW_PER_READ * sizeof(u32))));
+ for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) {
+ u32 read[DW_PER_READ];
+
+ xe_map_memcpy_from(xe, read, &log->bo->vmap, i * sizeof(u32),
+ DW_PER_READ * sizeof(u32));
+#define DW_PER_PRINT 4
+ for (j = 0; j < DW_PER_READ / DW_PER_PRINT; ++j) {
+ u32 *print = read + j * DW_PER_PRINT;
+
+ drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(print + 0), *(print + 1),
+ *(print + 2), *(print + 3));
+ }
+ }
+}
+
+int xe_guc_log_init(struct xe_guc_log *log)
+{
+ struct xe_device *xe = log_to_xe(log);
+ struct xe_tile *tile = gt_to_tile(log_to_gt(log));
+ struct xe_bo *bo;
+
+ bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, guc_log_size());
+ log->bo = bo;
+ log->level = xe_modparam.guc_log_level;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
new file mode 100644
index 000000000..2d25ab28b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_LOG_H_
+#define _XE_GUC_LOG_H_
+
+#include "xe_guc_log_types.h"
+
+struct drm_printer;
+
+#if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER)
+#define CRASH_BUFFER_SIZE SZ_1M
+#define DEBUG_BUFFER_SIZE SZ_8M
+#define CAPTURE_BUFFER_SIZE SZ_2M
+#else
+#define CRASH_BUFFER_SIZE SZ_8K
+#define DEBUG_BUFFER_SIZE SZ_64K
+#define CAPTURE_BUFFER_SIZE SZ_16K
+#endif
+/*
+ * While we're using plain log level in i915, GuC controls are much more...
+ * "elaborate"? We have a couple of bits for verbosity, separate bit for actual
+ * log enabling, and separate bit for default logging - which "conveniently"
+ * ignores the enable bit.
+ */
+#define GUC_LOG_LEVEL_DISABLED 0
+#define GUC_LOG_LEVEL_NON_VERBOSE 1
+#define GUC_LOG_LEVEL_IS_ENABLED(x) ((x) > GUC_LOG_LEVEL_DISABLED)
+#define GUC_LOG_LEVEL_IS_VERBOSE(x) ((x) > GUC_LOG_LEVEL_NON_VERBOSE)
+#define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({ \
+ typeof(x) _x = (x); \
+ GUC_LOG_LEVEL_IS_VERBOSE(_x) ? _x - 2 : 0; \
+})
+#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2)
+#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
+
+int xe_guc_log_init(struct xe_guc_log *log);
+void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p);
+
+static inline u32
+xe_guc_log_get_level(struct xe_guc_log *log)
+{
+ return log->level;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h
new file mode 100644
index 000000000..125080d13
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_log_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_LOG_TYPES_H_
+#define _XE_GUC_LOG_TYPES_H_
+
+#include <linux/types.h>
+
+struct xe_bo;
+
+/**
+ * struct xe_guc_log - GuC log
+ */
+struct xe_guc_log {
+ /** @level: GuC log level */
+ u32 level;
+ /** @bo: XE BO for GuC log */
+ struct xe_bo *bo;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
new file mode 100644
index 000000000..d91702592
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -0,0 +1,1002 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc_pc.h"
+
+#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_slpc_abi.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_idle.h"
+#include "xe_gt_sysfs.h"
+#include "xe_gt_types.h"
+#include "xe_guc_ct.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_pcode.h"
+
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
+#define RP_STATE_CAP XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define RP0_MASK REG_GENMASK(7, 0)
+#define RP1_MASK REG_GENMASK(15, 8)
+#define RPN_MASK REG_GENMASK(23, 16)
+
+#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
+#define RPE_MASK REG_GENMASK(15, 8)
+
+#define GT_PERF_STATUS XE_REG(0x1381b4)
+#define CAGF_MASK REG_GENMASK(19, 11)
+
+#define GT_FREQUENCY_MULTIPLIER 50
+#define GT_FREQUENCY_SCALER 3
+
+/**
+ * DOC: GuC Power Conservation (PC)
+ *
+ * GuC Power Conservation (PC) supports multiple features for the most
+ * efficient and performing use of the GT when GuC submission is enabled,
+ * including frequency management, Render-C states management, and various
+ * algorithms for power balancing.
+ *
+ * Single Loop Power Conservation (SLPC) is the name given to the suite of
+ * connected power conservation features in the GuC firmware. The firmware
+ * exposes a programming interface to the host for the control of SLPC.
+ *
+ * Frequency management:
+ * =====================
+ *
+ * Xe driver enables SLPC with all of its defaults features and frequency
+ * selection, which varies per platform.
+ *
+ * Render-C States:
+ * ================
+ *
+ * Render-C states is also a GuC PC feature that is now enabled in Xe for
+ * all platforms.
+ *
+ */
+
+static struct xe_guc *
+pc_to_guc(struct xe_guc_pc *pc)
+{
+ return container_of(pc, struct xe_guc, pc);
+}
+
+static struct xe_device *
+pc_to_xe(struct xe_guc_pc *pc)
+{
+ struct xe_guc *guc = pc_to_guc(pc);
+ struct xe_gt *gt = container_of(guc, struct xe_gt, uc.guc);
+
+ return gt_to_xe(gt);
+}
+
+static struct xe_gt *
+pc_to_gt(struct xe_guc_pc *pc)
+{
+ return container_of(pc, struct xe_gt, uc.guc.pc);
+}
+
+static struct iosys_map *
+pc_to_maps(struct xe_guc_pc *pc)
+{
+ return &pc->bo->vmap;
+}
+
+#define slpc_shared_data_read(pc_, field_) \
+ xe_map_rd_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
+ struct slpc_shared_data, field_)
+
+#define slpc_shared_data_write(pc_, field_, val_) \
+ xe_map_wr_field(pc_to_xe(pc_), pc_to_maps(pc_), 0, \
+ struct slpc_shared_data, field_, val_)
+
+#define SLPC_EVENT(id, count) \
+ (FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
+ FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
+
+static int wait_for_pc_state(struct xe_guc_pc *pc,
+ enum slpc_global_state state)
+{
+ int timeout_us = 5000; /* rought 5ms, but no need for precision */
+ int slept, wait = 10;
+
+ xe_device_assert_mem_access(pc_to_xe(pc));
+
+ for (slept = 0; slept < timeout_us;) {
+ if (slpc_shared_data_read(pc, header.global_state) == state)
+ return 0;
+
+ usleep_range(wait, wait << 1);
+ slept += wait;
+ wait <<= 1;
+ if (slept + wait > timeout_us)
+ wait = timeout_us - slept;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int pc_action_reset(struct xe_guc_pc *pc)
+{
+ struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
+ int ret;
+ u32 action[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_RESET, 2),
+ xe_bo_ggtt_addr(pc->bo),
+ 0,
+ };
+
+ ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
+ if (ret)
+ drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret));
+
+ return ret;
+}
+
+static int pc_action_shutdown(struct xe_guc_pc *pc)
+{
+ struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
+ int ret;
+ u32 action[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_SHUTDOWN, 2),
+ xe_bo_ggtt_addr(pc->bo),
+ 0,
+ };
+
+ ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
+ if (ret)
+ drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe",
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static int pc_action_query_task_state(struct xe_guc_pc *pc)
+{
+ struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
+ int ret;
+ u32 action[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
+ xe_bo_ggtt_addr(pc->bo),
+ 0,
+ };
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ return -EAGAIN;
+
+ /* Blocking here to ensure the results are ready before reading them */
+ ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
+ if (ret)
+ drm_err(&pc_to_xe(pc)->drm,
+ "GuC PC query task state failed: %pe", ERR_PTR(ret));
+
+ return ret;
+}
+
+static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
+{
+ struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
+ int ret;
+ u32 action[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
+ id,
+ value,
+ };
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ return -EAGAIN;
+
+ ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
+ if (ret)
+ drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe",
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
+{
+ struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
+ u32 action[] = {
+ XE_GUC_ACTION_SETUP_PC_GUCRC,
+ mode,
+ };
+ int ret;
+
+ ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
+ if (ret)
+ drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe",
+ ERR_PTR(ret));
+ return ret;
+}
+
+static u32 decode_freq(u32 raw)
+{
+ return DIV_ROUND_CLOSEST(raw * GT_FREQUENCY_MULTIPLIER,
+ GT_FREQUENCY_SCALER);
+}
+
+static u32 encode_freq(u32 freq)
+{
+ return DIV_ROUND_CLOSEST(freq * GT_FREQUENCY_SCALER,
+ GT_FREQUENCY_MULTIPLIER);
+}
+
+static u32 pc_get_min_freq(struct xe_guc_pc *pc)
+{
+ u32 freq;
+
+ freq = FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
+ slpc_shared_data_read(pc, task_state_data.freq));
+
+ return decode_freq(freq);
+}
+
+static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
+
+ /* Allow/Disallow punit to process software freq requests */
+ xe_mmio_write32(gt, RP_CONTROL, state);
+}
+
+static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 rpnswreq;
+
+ pc_set_manual_rp_ctrl(pc, true);
+
+ /* Req freq is in units of 16.66 Mhz */
+ rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
+ xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
+
+ /* Sleep for a small time to allow pcode to respond */
+ usleep_range(100, 300);
+
+ pc_set_manual_rp_ctrl(pc, false);
+}
+
+static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
+{
+ /*
+ * Let's only check for the rpn-rp0 range. If max < min,
+ * min becomes a fixed request.
+ */
+ if (freq < pc->rpn_freq || freq > pc->rp0_freq)
+ return -EINVAL;
+
+ /*
+ * GuC policy is to elevate minimum frequency to the efficient levels
+ * Our goal is to have the admin choices respected.
+ */
+ pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+ freq < pc->rpe_freq);
+
+ return pc_action_set_param(pc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ freq);
+}
+
+static int pc_get_max_freq(struct xe_guc_pc *pc)
+{
+ u32 freq;
+
+ freq = FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
+ slpc_shared_data_read(pc, task_state_data.freq));
+
+ return decode_freq(freq);
+}
+
+static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
+{
+ /*
+ * Let's only check for the rpn-rp0 range. If max < min,
+ * min becomes a fixed request.
+ * Also, overclocking is not supported.
+ */
+ if (freq < pc->rpn_freq || freq > pc->rp0_freq)
+ return -EINVAL;
+
+ return pc_action_set_param(pc,
+ SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
+ freq);
+}
+
+static void mtl_update_rpe_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ if (xe_gt_is_media_type(gt))
+ reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
+ else
+ reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
+
+ pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
+}
+
+static void tgl_update_rpe_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 reg;
+
+ /*
+ * For PVC we still need to use fused RP1 as the approximation for RPe
+ * For other platforms than PVC we get the resolved RPe directly from
+ * PCODE at a different register
+ */
+ if (xe->info.platform == XE_PVC)
+ reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
+ else
+ reg = xe_mmio_read32(gt, FREQ_INFO_REC);
+
+ pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+}
+
+static void pc_update_rp_values(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (GRAPHICS_VERx100(xe) >= 1270)
+ mtl_update_rpe_value(pc);
+ else
+ tgl_update_rpe_value(pc);
+
+ /*
+ * RPe is decided at runtime by PCODE. In the rare case where that's
+ * smaller than the fused min, we will trust the PCODE and use that
+ * as our minimum one.
+ */
+ pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
+}
+
+/**
+ * xe_guc_pc_get_act_freq - Get Actual running frequency
+ * @pc: The GuC PC
+ *
+ * Returns: The Actual running frequency. Which might be 0 if GT is in Render-C sleep state (RC6).
+ */
+u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 freq;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+
+ /* When in RC6, actual frequency reported will be 0. */
+ if (GRAPHICS_VERx100(xe) >= 1270) {
+ freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
+ freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
+ } else {
+ freq = xe_mmio_read32(gt, GT_PERF_STATUS);
+ freq = REG_FIELD_GET(CAGF_MASK, freq);
+ }
+
+ freq = decode_freq(freq);
+
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return freq;
+}
+
+/**
+ * xe_guc_pc_get_cur_freq - Get Current requested frequency
+ * @pc: The GuC PC
+ * @freq: A pointer to a u32 where the freq value will be returned
+ *
+ * Returns: 0 on success,
+ * -EAGAIN if GuC PC not ready (likely in middle of a reset).
+ */
+int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ int ret;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ /*
+ * GuC SLPC plays with cur freq request when GuCRC is enabled
+ * Block RC6 for a more reliable read.
+ */
+ ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (ret)
+ goto out;
+
+ *freq = xe_mmio_read32(gt, RPNSWREQ);
+
+ *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
+ *freq = decode_freq(*freq);
+
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+out:
+ xe_device_mem_access_put(gt_to_xe(gt));
+ return ret;
+}
+
+/**
+ * xe_guc_pc_get_rp0_freq - Get the RP0 freq
+ * @pc: The GuC PC
+ *
+ * Returns: RP0 freq.
+ */
+u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
+{
+ return pc->rp0_freq;
+}
+
+/**
+ * xe_guc_pc_get_rpe_freq - Get the RPe freq
+ * @pc: The GuC PC
+ *
+ * Returns: RPe freq.
+ */
+u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_device_mem_access_get(xe);
+ pc_update_rp_values(pc);
+ xe_device_mem_access_put(xe);
+
+ return pc->rpe_freq;
+}
+
+/**
+ * xe_guc_pc_get_rpn_freq - Get the RPn freq
+ * @pc: The GuC PC
+ *
+ * Returns: RPn freq.
+ */
+u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
+{
+ return pc->rpn_freq;
+}
+
+/**
+ * xe_guc_pc_get_min_freq - Get the min operational frequency
+ * @pc: The GuC PC
+ * @freq: A pointer to a u32 where the freq value will be returned
+ *
+ * Returns: 0 on success,
+ * -EAGAIN if GuC PC not ready (likely in middle of a reset).
+ */
+int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ int ret;
+
+ xe_device_mem_access_get(pc_to_xe(pc));
+ mutex_lock(&pc->freq_lock);
+ if (!pc->freq_ready) {
+ /* Might be in the middle of a gt reset */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /*
+ * GuC SLPC plays with min freq request when GuCRC is enabled
+ * Block RC6 for a more reliable read.
+ */
+ ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (ret)
+ goto out;
+
+ ret = pc_action_query_task_state(pc);
+ if (ret)
+ goto fw;
+
+ *freq = pc_get_min_freq(pc);
+
+fw:
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+out:
+ mutex_unlock(&pc->freq_lock);
+ xe_device_mem_access_put(pc_to_xe(pc));
+ return ret;
+}
+
+/**
+ * xe_guc_pc_set_min_freq - Set the minimal operational frequency
+ * @pc: The GuC PC
+ * @freq: The selected minimal frequency
+ *
+ * Returns: 0 on success,
+ * -EAGAIN if GuC PC not ready (likely in middle of a reset),
+ * -EINVAL if value out of bounds.
+ */
+int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
+{
+ int ret;
+
+ xe_device_mem_access_get(pc_to_xe(pc));
+ mutex_lock(&pc->freq_lock);
+ if (!pc->freq_ready) {
+ /* Might be in the middle of a gt reset */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = pc_set_min_freq(pc, freq);
+ if (ret)
+ goto out;
+
+ pc->user_requested_min = freq;
+
+out:
+ mutex_unlock(&pc->freq_lock);
+ xe_device_mem_access_put(pc_to_xe(pc));
+
+ return ret;
+}
+
+/**
+ * xe_guc_pc_get_max_freq - Get Maximum operational frequency
+ * @pc: The GuC PC
+ * @freq: A pointer to a u32 where the freq value will be returned
+ *
+ * Returns: 0 on success,
+ * -EAGAIN if GuC PC not ready (likely in middle of a reset).
+ */
+int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
+{
+ int ret;
+
+ xe_device_mem_access_get(pc_to_xe(pc));
+ mutex_lock(&pc->freq_lock);
+ if (!pc->freq_ready) {
+ /* Might be in the middle of a gt reset */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = pc_action_query_task_state(pc);
+ if (ret)
+ goto out;
+
+ *freq = pc_get_max_freq(pc);
+
+out:
+ mutex_unlock(&pc->freq_lock);
+ xe_device_mem_access_put(pc_to_xe(pc));
+ return ret;
+}
+
+/**
+ * xe_guc_pc_set_max_freq - Set the maximum operational frequency
+ * @pc: The GuC PC
+ * @freq: The selected maximum frequency value
+ *
+ * Returns: 0 on success,
+ * -EAGAIN if GuC PC not ready (likely in middle of a reset),
+ * -EINVAL if value out of bounds.
+ */
+int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
+{
+ int ret;
+
+ xe_device_mem_access_get(pc_to_xe(pc));
+ mutex_lock(&pc->freq_lock);
+ if (!pc->freq_ready) {
+ /* Might be in the middle of a gt reset */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = pc_set_max_freq(pc, freq);
+ if (ret)
+ goto out;
+
+ pc->user_requested_max = freq;
+
+out:
+ mutex_unlock(&pc->freq_lock);
+ xe_device_mem_access_put(pc_to_xe(pc));
+ return ret;
+}
+
+/**
+ * xe_guc_pc_c_status - get the current GT C state
+ * @pc: XE_GuC_PC instance
+ */
+enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg, gt_c_state;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
+ reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
+ gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
+ } else {
+ reg = xe_mmio_read32(gt, GT_CORE_STATUS);
+ gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
+ }
+
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ switch (gt_c_state) {
+ case GT_C6:
+ return GT_IDLE_C6;
+ case GT_C0:
+ return GT_IDLE_C0;
+ default:
+ return GT_IDLE_UNKNOWN;
+ }
+}
+
+/**
+ * xe_guc_pc_rc6_residency - rc6 residency counter
+ * @pc: Xe_GuC_PC instance
+ */
+u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ reg = xe_mmio_read32(gt, GT_GFX_RC6);
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return reg;
+}
+
+/**
+ * xe_guc_pc_mc6_residency - mc6 residency counter
+ * @pc: Xe_GuC_PC instance
+ */
+u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u64 reg;
+
+ xe_device_mem_access_get(gt_to_xe(gt));
+ reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
+ xe_device_mem_access_put(gt_to_xe(gt));
+
+ return reg;
+}
+
+static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ xe_device_assert_mem_access(pc_to_xe(pc));
+
+ if (xe_gt_is_media_type(gt))
+ reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
+ else
+ reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
+
+ pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
+
+ pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg));
+}
+
+static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 reg;
+
+ xe_device_assert_mem_access(pc_to_xe(pc));
+
+ if (xe->info.platform == XE_PVC)
+ reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
+ else
+ reg = xe_mmio_read32(gt, RP_STATE_CAP);
+ pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+}
+
+static void pc_init_fused_rp_values(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (GRAPHICS_VERx100(xe) >= 1270)
+ mtl_init_fused_rp_values(pc);
+ else
+ tgl_init_fused_rp_values(pc);
+}
+
+/**
+ * xe_guc_pc_init_early - Initialize RPx values and request a higher GT
+ * frequency to allow faster GuC load times
+ * @pc: Xe_GuC_PC instance
+ */
+void xe_guc_pc_init_early(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+ pc_init_fused_rp_values(pc);
+ pc_set_cur_freq(pc, pc->rp0_freq);
+}
+
+static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
+{
+ int ret;
+
+ lockdep_assert_held(&pc->freq_lock);
+
+ ret = pc_action_query_task_state(pc);
+ if (ret)
+ return ret;
+
+ /*
+ * GuC defaults to some RPmax that is not actually achievable without
+ * overclocking. Let's adjust it to the Hardware RP0, which is the
+ * regular maximum
+ */
+ if (pc_get_max_freq(pc) > pc->rp0_freq)
+ pc_set_max_freq(pc, pc->rp0_freq);
+
+ /*
+ * Same thing happens for Server platforms where min is listed as
+ * RPMax
+ */
+ if (pc_get_min_freq(pc) > pc->rp0_freq)
+ pc_set_min_freq(pc, pc->rp0_freq);
+
+ return 0;
+}
+
+static int pc_adjust_requested_freq(struct xe_guc_pc *pc)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&pc->freq_lock);
+
+ if (pc->user_requested_min != 0) {
+ ret = pc_set_min_freq(pc, pc->user_requested_min);
+ if (ret)
+ return ret;
+ }
+
+ if (pc->user_requested_max != 0) {
+ ret = pc_set_max_freq(pc, pc->user_requested_max);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * xe_guc_pc_gucrc_disable - Disable GuC RC
+ * @pc: Xe_GuC_PC instance
+ *
+ * Disables GuC RC by taking control of RC6 back from GuC.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
+{
+ struct xe_device *xe = pc_to_xe(pc);
+ struct xe_gt *gt = pc_to_gt(pc);
+ int ret = 0;
+
+ if (xe->info.skip_guc_pc)
+ return 0;
+
+ xe_device_mem_access_get(pc_to_xe(pc));
+
+ ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
+ if (ret)
+ goto out;
+
+ ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (ret)
+ goto out;
+
+ xe_gt_idle_disable_c6(gt);
+
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+
+out:
+ xe_device_mem_access_put(pc_to_xe(pc));
+ return ret;
+}
+
+static void pc_init_pcode_freq(struct xe_guc_pc *pc)
+{
+ u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
+ u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
+
+ XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
+}
+
+static int pc_init_freqs(struct xe_guc_pc *pc)
+{
+ int ret;
+
+ mutex_lock(&pc->freq_lock);
+
+ ret = pc_adjust_freq_bounds(pc);
+ if (ret)
+ goto out;
+
+ ret = pc_adjust_requested_freq(pc);
+ if (ret)
+ goto out;
+
+ pc_update_rp_values(pc);
+
+ pc_init_pcode_freq(pc);
+
+ /*
+ * The frequencies are really ready for use only after the user
+ * requested ones got restored.
+ */
+ pc->freq_ready = true;
+
+out:
+ mutex_unlock(&pc->freq_lock);
+ return ret;
+}
+
+/**
+ * xe_guc_pc_start - Start GuC's Power Conservation component
+ * @pc: Xe_GuC_PC instance
+ */
+int xe_guc_pc_start(struct xe_guc_pc *pc)
+{
+ struct xe_device *xe = pc_to_xe(pc);
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ int ret;
+
+ xe_gt_assert(gt, xe_device_uc_enabled(xe));
+
+ xe_device_mem_access_get(pc_to_xe(pc));
+
+ ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (ret)
+ goto out_fail_force_wake;
+
+ if (xe->info.skip_guc_pc) {
+ if (xe->info.platform != XE_PVC)
+ xe_gt_idle_enable_c6(gt);
+
+ /* Request max possible since dynamic freq mgmt is not enabled */
+ pc_set_cur_freq(pc, UINT_MAX);
+
+ ret = 0;
+ goto out;
+ }
+
+ memset(pc->bo->vmap.vaddr, 0, size);
+ slpc_shared_data_write(pc, header.size, size);
+
+ ret = pc_action_reset(pc);
+ if (ret)
+ goto out;
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
+ drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pc_init_freqs(pc);
+ if (ret)
+ goto out;
+
+ if (xe->info.platform == XE_PVC) {
+ xe_guc_pc_gucrc_disable(pc);
+ ret = 0;
+ goto out;
+ }
+
+ ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL);
+
+out:
+ XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+out_fail_force_wake:
+ xe_device_mem_access_put(pc_to_xe(pc));
+ return ret;
+}
+
+/**
+ * xe_guc_pc_stop - Stop GuC's Power Conservation component
+ * @pc: Xe_GuC_PC instance
+ */
+int xe_guc_pc_stop(struct xe_guc_pc *pc)
+{
+ struct xe_device *xe = pc_to_xe(pc);
+ int ret;
+
+ xe_device_mem_access_get(pc_to_xe(pc));
+
+ if (xe->info.skip_guc_pc) {
+ xe_gt_idle_disable_c6(pc_to_gt(pc));
+ ret = 0;
+ goto out;
+ }
+
+ mutex_lock(&pc->freq_lock);
+ pc->freq_ready = false;
+ mutex_unlock(&pc->freq_lock);
+
+ ret = pc_action_shutdown(pc);
+ if (ret)
+ goto out;
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
+ drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
+ ret = -EIO;
+ }
+
+out:
+ xe_device_mem_access_put(pc_to_xe(pc));
+ return ret;
+}
+
+/**
+ * xe_guc_pc_fini - Finalize GuC's Power Conservation component
+ * @pc: Xe_GuC_PC instance
+ */
+void xe_guc_pc_fini(struct xe_guc_pc *pc)
+{
+ struct xe_device *xe = pc_to_xe(pc);
+
+ if (xe->info.skip_guc_pc) {
+ xe_device_mem_access_get(xe);
+ xe_gt_idle_disable_c6(pc_to_gt(pc));
+ xe_device_mem_access_put(xe);
+ return;
+ }
+
+ XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
+ XE_WARN_ON(xe_guc_pc_stop(pc));
+ mutex_destroy(&pc->freq_lock);
+}
+
+/**
+ * xe_guc_pc_init - Initialize GuC's Power Conservation component
+ * @pc: Xe_GuC_PC instance
+ */
+int xe_guc_pc_init(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_bo *bo;
+ u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+
+ if (xe->info.skip_guc_pc)
+ return 0;
+
+ mutex_init(&pc->freq_lock);
+
+ bo = xe_managed_bo_create_pin_map(xe, tile, size,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ pc->bo = bo;
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
new file mode 100644
index 000000000..cecad8e93
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_PC_H_
+#define _XE_GUC_PC_H_
+
+#include "xe_guc_pc_types.h"
+
+int xe_guc_pc_init(struct xe_guc_pc *pc);
+void xe_guc_pc_fini(struct xe_guc_pc *pc);
+int xe_guc_pc_start(struct xe_guc_pc *pc);
+int xe_guc_pc_stop(struct xe_guc_pc *pc);
+int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc);
+
+u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
+int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
+u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
+u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
+u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc);
+int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
+int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq);
+int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq);
+int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq);
+
+enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc);
+u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc);
+u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc);
+void xe_guc_pc_init_early(struct xe_guc_pc *pc);
+#endif /* _XE_GUC_PC_H_ */
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
new file mode 100644
index 000000000..2afd0dbc3
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_PC_TYPES_H_
+#define _XE_GUC_PC_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/**
+ * struct xe_guc_pc - GuC Power Conservation (PC)
+ */
+struct xe_guc_pc {
+ /** @bo: GGTT buffer object that is shared with GuC PC */
+ struct xe_bo *bo;
+ /** @rp0_freq: HW RP0 frequency - The Maximum one */
+ u32 rp0_freq;
+ /** @rpe_freq: HW RPe frequency - The Efficient one */
+ u32 rpe_freq;
+ /** @rpn_freq: HW RPN frequency - The Minimum one */
+ u32 rpn_freq;
+ /** @user_requested_min: Stash the minimum requested freq by user */
+ u32 user_requested_min;
+ /** @user_requested_max: Stash the maximum requested freq by user */
+ u32 user_requested_max;
+ /** @freq_lock: Let's protect the frequencies */
+ struct mutex freq_lock;
+ /** @freq_ready: Only handle freq changes, if they are really ready */
+ bool freq_ready;
+};
+
+#endif /* _XE_GUC_PC_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
new file mode 100644
index 000000000..bd1079aa8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -0,0 +1,1987 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_guc_submit.h"
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/circ_buf.h>
+#include <linux/delay.h>
+#include <linux/dma-fence-array.h>
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_klvs_abi.h"
+#include "regs/xe_lrc_layout.h"
+#include "xe_assert.h"
+#include "xe_devcoredump.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_force_wake.h"
+#include "xe_gpu_scheduler.h"
+#include "xe_gt.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_submit_types.h"
+#include "xe_hw_engine.h"
+#include "xe_hw_fence.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_map.h"
+#include "xe_mocs.h"
+#include "xe_ring_ops_types.h"
+#include "xe_sched_job.h"
+#include "xe_trace.h"
+#include "xe_vm.h"
+
+static struct xe_guc *
+exec_queue_to_guc(struct xe_exec_queue *q)
+{
+ return &q->gt->uc.guc;
+}
+
+/*
+ * Helpers for engine state, using an atomic as some of the bits can transition
+ * as the same time (e.g. a suspend can be happning at the same time as schedule
+ * engine done being processed).
+ */
+#define EXEC_QUEUE_STATE_REGISTERED (1 << 0)
+#define ENGINE_STATE_ENABLED (1 << 1)
+#define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
+#define EXEC_QUEUE_STATE_PENDING_DISABLE (1 << 3)
+#define EXEC_QUEUE_STATE_DESTROYED (1 << 4)
+#define ENGINE_STATE_SUSPENDED (1 << 5)
+#define EXEC_QUEUE_STATE_RESET (1 << 6)
+#define ENGINE_STATE_KILLED (1 << 7)
+
+static bool exec_queue_registered(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
+}
+
+static void set_exec_queue_registered(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
+}
+
+static void clear_exec_queue_registered(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
+}
+
+static bool exec_queue_enabled(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & ENGINE_STATE_ENABLED;
+}
+
+static void set_exec_queue_enabled(struct xe_exec_queue *q)
+{
+ atomic_or(ENGINE_STATE_ENABLED, &q->guc->state);
+}
+
+static void clear_exec_queue_enabled(struct xe_exec_queue *q)
+{
+ atomic_and(~ENGINE_STATE_ENABLED, &q->guc->state);
+}
+
+static bool exec_queue_pending_enable(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
+}
+
+static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
+}
+
+static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
+}
+
+static bool exec_queue_pending_disable(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
+}
+
+static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
+}
+
+static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
+}
+
+static bool exec_queue_destroyed(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
+}
+
+static void set_exec_queue_destroyed(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
+}
+
+static bool exec_queue_banned(struct xe_exec_queue *q)
+{
+ return (q->flags & EXEC_QUEUE_FLAG_BANNED);
+}
+
+static void set_exec_queue_banned(struct xe_exec_queue *q)
+{
+ q->flags |= EXEC_QUEUE_FLAG_BANNED;
+}
+
+static bool exec_queue_suspended(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & ENGINE_STATE_SUSPENDED;
+}
+
+static void set_exec_queue_suspended(struct xe_exec_queue *q)
+{
+ atomic_or(ENGINE_STATE_SUSPENDED, &q->guc->state);
+}
+
+static void clear_exec_queue_suspended(struct xe_exec_queue *q)
+{
+ atomic_and(~ENGINE_STATE_SUSPENDED, &q->guc->state);
+}
+
+static bool exec_queue_reset(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
+}
+
+static void set_exec_queue_reset(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
+}
+
+static bool exec_queue_killed(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & ENGINE_STATE_KILLED;
+}
+
+static void set_exec_queue_killed(struct xe_exec_queue *q)
+{
+ atomic_or(ENGINE_STATE_KILLED, &q->guc->state);
+}
+
+static bool exec_queue_killed_or_banned(struct xe_exec_queue *q)
+{
+ return exec_queue_killed(q) || exec_queue_banned(q);
+}
+
+#ifdef CONFIG_PROVE_LOCKING
+static int alloc_submit_wq(struct xe_guc *guc)
+{
+ int i;
+
+ for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
+ guc->submission_state.submit_wq_pool[i] =
+ alloc_ordered_workqueue("submit_wq", 0);
+ if (!guc->submission_state.submit_wq_pool[i])
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ while (i)
+ destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
+
+ return -ENOMEM;
+}
+
+static void free_submit_wq(struct xe_guc *guc)
+{
+ int i;
+
+ for (i = 0; i < NUM_SUBMIT_WQ; ++i)
+ destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
+}
+
+static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
+{
+ int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
+
+ return guc->submission_state.submit_wq_pool[idx];
+}
+#else
+static int alloc_submit_wq(struct xe_guc *guc)
+{
+ return 0;
+}
+
+static void free_submit_wq(struct xe_guc *guc)
+{
+
+}
+
+static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
+{
+ return NULL;
+}
+#endif
+
+static void guc_submit_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_guc *guc = arg;
+
+ xa_destroy(&guc->submission_state.exec_queue_lookup);
+ ida_destroy(&guc->submission_state.guc_ids);
+ bitmap_free(guc->submission_state.guc_ids_bitmap);
+ free_submit_wq(guc);
+ mutex_destroy(&guc->submission_state.lock);
+}
+
+#define GUC_ID_MAX 65535
+#define GUC_ID_NUMBER_MLRC 4096
+#define GUC_ID_NUMBER_SLRC (GUC_ID_MAX - GUC_ID_NUMBER_MLRC)
+#define GUC_ID_START_MLRC GUC_ID_NUMBER_SLRC
+
+static const struct xe_exec_queue_ops guc_exec_queue_ops;
+
+static void primelockdep(struct xe_guc *guc)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+
+ mutex_lock(&guc->submission_state.lock);
+ might_lock(&guc->submission_state.suspend.lock);
+ mutex_unlock(&guc->submission_state.lock);
+
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+int xe_guc_submit_init(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ int err;
+
+ guc->submission_state.guc_ids_bitmap =
+ bitmap_zalloc(GUC_ID_NUMBER_MLRC, GFP_KERNEL);
+ if (!guc->submission_state.guc_ids_bitmap)
+ return -ENOMEM;
+
+ err = alloc_submit_wq(guc);
+ if (err) {
+ bitmap_free(guc->submission_state.guc_ids_bitmap);
+ return err;
+ }
+
+ gt->exec_queue_ops = &guc_exec_queue_ops;
+
+ mutex_init(&guc->submission_state.lock);
+ xa_init(&guc->submission_state.exec_queue_lookup);
+ ida_init(&guc->submission_state.guc_ids);
+
+ spin_lock_init(&guc->submission_state.suspend.lock);
+ guc->submission_state.suspend.context = dma_fence_context_alloc(1);
+
+ primelockdep(guc);
+
+ err = drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
+{
+ int i;
+
+ lockdep_assert_held(&guc->submission_state.lock);
+
+ for (i = 0; i < xa_count; ++i)
+ xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
+
+ if (xe_exec_queue_is_parallel(q))
+ bitmap_release_region(guc->submission_state.guc_ids_bitmap,
+ q->guc->id - GUC_ID_START_MLRC,
+ order_base_2(q->width));
+ else
+ ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
+}
+
+static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ int ret;
+ void *ptr;
+ int i;
+
+ /*
+ * Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
+ * worse case user gets -ENOMEM on engine create and has to try again.
+ *
+ * FIXME: Have caller pre-alloc or post-alloc /w GFP_KERNEL to prevent
+ * failure.
+ */
+ lockdep_assert_held(&guc->submission_state.lock);
+
+ if (xe_exec_queue_is_parallel(q)) {
+ void *bitmap = guc->submission_state.guc_ids_bitmap;
+
+ ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
+ order_base_2(q->width));
+ } else {
+ ret = ida_simple_get(&guc->submission_state.guc_ids, 0,
+ GUC_ID_NUMBER_SLRC, GFP_NOWAIT);
+ }
+ if (ret < 0)
+ return ret;
+
+ q->guc->id = ret;
+ if (xe_exec_queue_is_parallel(q))
+ q->guc->id += GUC_ID_START_MLRC;
+
+ for (i = 0; i < q->width; ++i) {
+ ptr = xa_store(&guc->submission_state.exec_queue_lookup,
+ q->guc->id + i, q, GFP_NOWAIT);
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ goto err_release;
+ }
+ }
+
+ return 0;
+
+err_release:
+ __release_guc_id(guc, q, i);
+
+ return ret;
+}
+
+static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ mutex_lock(&guc->submission_state.lock);
+ __release_guc_id(guc, q, q->width);
+ mutex_unlock(&guc->submission_state.lock);
+}
+
+struct exec_queue_policy {
+ u32 count;
+ struct guc_update_exec_queue_policy h2g;
+};
+
+static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
+{
+ size_t bytes = sizeof(policy->h2g.header) +
+ (sizeof(policy->h2g.klv[0]) * policy->count);
+
+ return bytes / sizeof(u32);
+}
+
+static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
+ u16 guc_id)
+{
+ policy->h2g.header.action =
+ XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
+ policy->h2g.header.guc_id = guc_id;
+ policy->count = 0;
+}
+
+#define MAKE_EXEC_QUEUE_POLICY_ADD(func, id) \
+static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy, \
+ u32 data) \
+{ \
+ XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
+\
+ policy->h2g.klv[policy->count].kl = \
+ FIELD_PREP(GUC_KLV_0_KEY, \
+ GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
+ FIELD_PREP(GUC_KLV_0_LEN, 1); \
+ policy->h2g.klv[policy->count].value = data; \
+ policy->count++; \
+}
+
+MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
+MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
+MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
+#undef MAKE_EXEC_QUEUE_POLICY_ADD
+
+static const int xe_exec_queue_prio_to_guc[] = {
+ [XE_EXEC_QUEUE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
+ [XE_EXEC_QUEUE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
+ [XE_EXEC_QUEUE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
+ [XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
+};
+
+static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct exec_queue_policy policy;
+ struct xe_device *xe = guc_to_xe(guc);
+ enum xe_exec_queue_priority prio = q->sched_props.priority;
+ u32 timeslice_us = q->sched_props.timeslice_us;
+ u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
+
+ xe_assert(xe, exec_queue_registered(q));
+
+ __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
+ __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
+ __guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
+ __guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
+
+ xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
+ __guc_exec_queue_policy_action_size(&policy), 0, 0);
+}
+
+static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct exec_queue_policy policy;
+
+ __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
+ __guc_exec_queue_policy_add_preemption_timeout(&policy, 1);
+
+ xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
+ __guc_exec_queue_policy_action_size(&policy), 0, 0);
+}
+
+#define parallel_read(xe_, map_, field_) \
+ xe_map_rd_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
+ field_)
+#define parallel_write(xe_, map_, field_, val_) \
+ xe_map_wr_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
+ field_, val_)
+
+static void __register_mlrc_engine(struct xe_guc *guc,
+ struct xe_exec_queue *q,
+ struct guc_ctxt_registration_info *info)
+{
+#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 action[MAX_MLRC_REG_SIZE];
+ int len = 0;
+ int i;
+
+ xe_assert(xe, xe_exec_queue_is_parallel(q));
+
+ action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+ action[len++] = info->flags;
+ action[len++] = info->context_idx;
+ action[len++] = info->engine_class;
+ action[len++] = info->engine_submit_mask;
+ action[len++] = info->wq_desc_lo;
+ action[len++] = info->wq_desc_hi;
+ action[len++] = info->wq_base_lo;
+ action[len++] = info->wq_base_hi;
+ action[len++] = info->wq_size;
+ action[len++] = q->width;
+ action[len++] = info->hwlrca_lo;
+ action[len++] = info->hwlrca_hi;
+
+ for (i = 1; i < q->width; ++i) {
+ struct xe_lrc *lrc = q->lrc + i;
+
+ action[len++] = lower_32_bits(xe_lrc_descriptor(lrc));
+ action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
+ }
+
+ xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
+#undef MAX_MLRC_REG_SIZE
+
+ xe_guc_ct_send(&guc->ct, action, len, 0, 0);
+}
+
+static void __register_engine(struct xe_guc *guc,
+ struct guc_ctxt_registration_info *info)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_REGISTER_CONTEXT,
+ info->flags,
+ info->context_idx,
+ info->engine_class,
+ info->engine_submit_mask,
+ info->wq_desc_lo,
+ info->wq_desc_hi,
+ info->wq_base_lo,
+ info->wq_base_hi,
+ info->wq_size,
+ info->hwlrca_lo,
+ info->hwlrca_hi,
+ };
+
+ xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
+}
+
+static void register_engine(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_lrc *lrc = q->lrc;
+ struct guc_ctxt_registration_info info;
+
+ xe_assert(xe, !exec_queue_registered(q));
+
+ memset(&info, 0, sizeof(info));
+ info.context_idx = q->guc->id;
+ info.engine_class = xe_engine_class_to_guc_class(q->class);
+ info.engine_submit_mask = q->logical_mask;
+ info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
+ info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
+ info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
+
+ if (xe_exec_queue_is_parallel(q)) {
+ u32 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
+ struct iosys_map map = xe_lrc_parallel_map(lrc);
+
+ info.wq_desc_lo = lower_32_bits(ggtt_addr +
+ offsetof(struct guc_submit_parallel_scratch, wq_desc));
+ info.wq_desc_hi = upper_32_bits(ggtt_addr +
+ offsetof(struct guc_submit_parallel_scratch, wq_desc));
+ info.wq_base_lo = lower_32_bits(ggtt_addr +
+ offsetof(struct guc_submit_parallel_scratch, wq[0]));
+ info.wq_base_hi = upper_32_bits(ggtt_addr +
+ offsetof(struct guc_submit_parallel_scratch, wq[0]));
+ info.wq_size = WQ_SIZE;
+
+ q->guc->wqi_head = 0;
+ q->guc->wqi_tail = 0;
+ xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
+ parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
+ }
+
+ /*
+ * We must keep a reference for LR engines if engine is registered with
+ * the GuC as jobs signal immediately and can't destroy an engine if the
+ * GuC has a reference to it.
+ */
+ if (xe_exec_queue_is_lr(q))
+ xe_exec_queue_get(q);
+
+ set_exec_queue_registered(q);
+ trace_xe_exec_queue_register(q);
+ if (xe_exec_queue_is_parallel(q))
+ __register_mlrc_engine(guc, q, &info);
+ else
+ __register_engine(guc, &info);
+ init_policies(guc, q);
+}
+
+static u32 wq_space_until_wrap(struct xe_exec_queue *q)
+{
+ return (WQ_SIZE - q->guc->wqi_tail);
+}
+
+static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct iosys_map map = xe_lrc_parallel_map(q->lrc);
+ unsigned int sleep_period_ms = 1;
+
+#define AVAILABLE_SPACE \
+ CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
+ if (wqi_size > AVAILABLE_SPACE) {
+try_again:
+ q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
+ if (wqi_size > AVAILABLE_SPACE) {
+ if (sleep_period_ms == 1024) {
+ xe_gt_reset_async(q->gt);
+ return -ENODEV;
+ }
+
+ msleep(sleep_period_ms);
+ sleep_period_ms <<= 1;
+ goto try_again;
+ }
+ }
+#undef AVAILABLE_SPACE
+
+ return 0;
+}
+
+static int wq_noop_append(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct iosys_map map = xe_lrc_parallel_map(q->lrc);
+ u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
+
+ if (wq_wait_for_space(q, wq_space_until_wrap(q)))
+ return -ENODEV;
+
+ xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
+
+ parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
+ FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, len_dw));
+ q->guc->wqi_tail = 0;
+
+ return 0;
+}
+
+static void wq_item_append(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct iosys_map map = xe_lrc_parallel_map(q->lrc);
+#define WQ_HEADER_SIZE 4 /* Includes 1 LRC address too */
+ u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)];
+ u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
+ u32 len_dw = (wqi_size / sizeof(u32)) - 1;
+ int i = 0, j;
+
+ if (wqi_size > wq_space_until_wrap(q)) {
+ if (wq_noop_append(q))
+ return;
+ }
+ if (wq_wait_for_space(q, wqi_size))
+ return;
+
+ wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
+ FIELD_PREP(WQ_LEN_MASK, len_dw);
+ wqi[i++] = xe_lrc_descriptor(q->lrc);
+ wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
+ FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc->ring.tail / sizeof(u64));
+ wqi[i++] = 0;
+ for (j = 1; j < q->width; ++j) {
+ struct xe_lrc *lrc = q->lrc + j;
+
+ wqi[i++] = lrc->ring.tail / sizeof(u64);
+ }
+
+ xe_assert(xe, i == wqi_size / sizeof(u32));
+
+ iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
+ wq[q->guc->wqi_tail / sizeof(u32)]));
+ xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
+ q->guc->wqi_tail += wqi_size;
+ xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
+
+ xe_device_wmb(xe);
+
+ map = xe_lrc_parallel_map(q->lrc);
+ parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
+}
+
+#define RESUME_PENDING ~0x0ull
+static void submit_exec_queue(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_lrc *lrc = q->lrc;
+ u32 action[3];
+ u32 g2h_len = 0;
+ u32 num_g2h = 0;
+ int len = 0;
+ bool extra_submit = false;
+
+ xe_assert(xe, exec_queue_registered(q));
+
+ if (xe_exec_queue_is_parallel(q))
+ wq_item_append(q);
+ else
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
+
+ if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
+ return;
+
+ if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
+ action[len++] = XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
+ action[len++] = q->guc->id;
+ action[len++] = GUC_CONTEXT_ENABLE;
+ g2h_len = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
+ num_g2h = 1;
+ if (xe_exec_queue_is_parallel(q))
+ extra_submit = true;
+
+ q->guc->resume_time = RESUME_PENDING;
+ set_exec_queue_pending_enable(q);
+ set_exec_queue_enabled(q);
+ trace_xe_exec_queue_scheduling_enable(q);
+ } else {
+ action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
+ action[len++] = q->guc->id;
+ trace_xe_exec_queue_submit(q);
+ }
+
+ xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h);
+
+ if (extra_submit) {
+ len = 0;
+ action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
+ action[len++] = q->guc->id;
+ trace_xe_exec_queue_submit(q);
+
+ xe_guc_ct_send(&guc->ct, action, len, 0, 0);
+ }
+}
+
+static struct dma_fence *
+guc_exec_queue_run_job(struct drm_sched_job *drm_job)
+{
+ struct xe_sched_job *job = to_xe_sched_job(drm_job);
+ struct xe_exec_queue *q = job->q;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ bool lr = xe_exec_queue_is_lr(q);
+
+ xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
+ exec_queue_banned(q) || exec_queue_suspended(q));
+
+ trace_xe_sched_job_run(job);
+
+ if (!exec_queue_killed_or_banned(q) && !xe_sched_job_is_error(job)) {
+ if (!exec_queue_registered(q))
+ register_engine(q);
+ if (!lr) /* LR jobs are emitted in the exec IOCTL */
+ q->ring_ops->emit_job(job);
+ submit_exec_queue(q);
+ }
+
+ if (lr) {
+ xe_sched_job_set_error(job, -EOPNOTSUPP);
+ return NULL;
+ } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
+ return job->fence;
+ } else {
+ return dma_fence_get(job->fence);
+ }
+}
+
+static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
+{
+ struct xe_sched_job *job = to_xe_sched_job(drm_job);
+
+ trace_xe_sched_job_free(job);
+ xe_sched_job_put(job);
+}
+
+static int guc_read_stopped(struct xe_guc *guc)
+{
+ return atomic_read(&guc->submission_state.stopped);
+}
+
+#define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \
+ u32 action[] = { \
+ XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET, \
+ q->guc->id, \
+ GUC_CONTEXT_##enable_disable, \
+ }
+
+static void disable_scheduling_deregister(struct xe_guc *guc,
+ struct xe_exec_queue *q)
+{
+ MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
+ struct xe_device *xe = guc_to_xe(guc);
+ int ret;
+
+ set_min_preemption_timeout(guc, q);
+ smp_rmb();
+ ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
+ guc_read_stopped(guc), HZ * 5);
+ if (!ret) {
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+
+ drm_warn(&xe->drm, "Pending enable failed to respond");
+ xe_sched_submission_start(sched);
+ xe_gt_reset_async(q->gt);
+ xe_sched_tdr_queue_imm(sched);
+ return;
+ }
+
+ clear_exec_queue_enabled(q);
+ set_exec_queue_pending_disable(q);
+ set_exec_queue_destroyed(q);
+ trace_xe_exec_queue_scheduling_disable(q);
+
+ /*
+ * Reserve space for both G2H here as the 2nd G2H is sent from a G2H
+ * handler and we are not allowed to reserved G2H space in handlers.
+ */
+ xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_SCHED_CONTEXT_MODE_SET +
+ G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
+}
+
+static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p);
+
+#if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
+static void simple_error_capture(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct drm_printer p = drm_err_printer("");
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ u32 adj_logical_mask = q->logical_mask;
+ u32 width_mask = (0x1 << q->width) - 1;
+ int i;
+ bool cookie;
+
+ if (q->vm && !q->vm->error_capture.capture_once) {
+ q->vm->error_capture.capture_once = true;
+ cookie = dma_fence_begin_signalling();
+ for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
+ if (adj_logical_mask & BIT(i)) {
+ adj_logical_mask |= width_mask << i;
+ i += q->width;
+ } else {
+ ++i;
+ }
+ }
+
+ xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ xe_guc_ct_print(&guc->ct, &p, true);
+ guc_exec_queue_print(q, &p);
+ for_each_hw_engine(hwe, guc_to_gt(guc), id) {
+ if (hwe->class != q->hwe->class ||
+ !(BIT(hwe->logical_instance) & adj_logical_mask))
+ continue;
+ xe_hw_engine_print(hwe, &p);
+ }
+ xe_analyze_vm(&p, q->vm, q->gt->info.id);
+ xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
+ dma_fence_end_signalling(cookie);
+ }
+}
+#else
+static void simple_error_capture(struct xe_exec_queue *q)
+{
+}
+#endif
+
+static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ /** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
+ wake_up_all(&xe->ufence_wq);
+
+ if (xe_exec_queue_is_lr(q))
+ queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
+ else
+ xe_sched_tdr_queue_imm(&q->guc->sched);
+}
+
+static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
+{
+ struct xe_guc_exec_queue *ge =
+ container_of(w, struct xe_guc_exec_queue, lr_tdr);
+ struct xe_exec_queue *q = ge->q;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gpu_scheduler *sched = &ge->sched;
+
+ xe_assert(xe, xe_exec_queue_is_lr(q));
+ trace_xe_exec_queue_lr_cleanup(q);
+
+ /* Kill the run_job / process_msg entry points */
+ xe_sched_submission_stop(sched);
+
+ /*
+ * Engine state now mostly stable, disable scheduling / deregister if
+ * needed. This cleanup routine might be called multiple times, where
+ * the actual async engine deregister drops the final engine ref.
+ * Calling disable_scheduling_deregister will mark the engine as
+ * destroyed and fire off the CT requests to disable scheduling /
+ * deregister, which we only want to do once. We also don't want to mark
+ * the engine as pending_disable again as this may race with the
+ * xe_guc_deregister_done_handler() which treats it as an unexpected
+ * state.
+ */
+ if (exec_queue_registered(q) && !exec_queue_destroyed(q)) {
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ int ret;
+
+ set_exec_queue_banned(q);
+ disable_scheduling_deregister(guc, q);
+
+ /*
+ * Must wait for scheduling to be disabled before signalling
+ * any fences, if GT broken the GT reset code should signal us.
+ */
+ ret = wait_event_timeout(guc->ct.wq,
+ !exec_queue_pending_disable(q) ||
+ guc_read_stopped(guc), HZ * 5);
+ if (!ret) {
+ drm_warn(&xe->drm, "Schedule disable failed to respond");
+ xe_sched_submission_start(sched);
+ xe_gt_reset_async(q->gt);
+ return;
+ }
+ }
+
+ xe_sched_submission_start(sched);
+}
+
+static enum drm_gpu_sched_stat
+guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
+{
+ struct xe_sched_job *job = to_xe_sched_job(drm_job);
+ struct xe_sched_job *tmp_job;
+ struct xe_exec_queue *q = job->q;
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_device *xe = guc_to_xe(exec_queue_to_guc(q));
+ int err = -ETIME;
+ int i = 0;
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)));
+
+ drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), q->guc->id, q->flags);
+ simple_error_capture(q);
+ xe_devcoredump(q);
+ } else {
+ drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), q->guc->id, q->flags);
+ }
+ trace_xe_sched_job_timedout(job);
+
+ /* Kill the run_job entry point */
+ xe_sched_submission_stop(sched);
+
+ /*
+ * Kernel jobs should never fail, nor should VM jobs if they do
+ * somethings has gone wrong and the GT needs a reset
+ */
+ if (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
+ (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q))) {
+ if (!xe_sched_invalidate_job(job, 2)) {
+ xe_sched_add_pending_job(sched, job);
+ xe_sched_submission_start(sched);
+ xe_gt_reset_async(q->gt);
+ goto out;
+ }
+ }
+
+ /* Engine state now stable, disable scheduling if needed */
+ if (exec_queue_registered(q)) {
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ int ret;
+
+ if (exec_queue_reset(q))
+ err = -EIO;
+ set_exec_queue_banned(q);
+ if (!exec_queue_destroyed(q)) {
+ xe_exec_queue_get(q);
+ disable_scheduling_deregister(guc, q);
+ }
+
+ /*
+ * Must wait for scheduling to be disabled before signalling
+ * any fences, if GT broken the GT reset code should signal us.
+ *
+ * FIXME: Tests can generate a ton of 0x6000 (IOMMU CAT fault
+ * error) messages which can cause the schedule disable to get
+ * lost. If this occurs, trigger a GT reset to recover.
+ */
+ smp_rmb();
+ ret = wait_event_timeout(guc->ct.wq,
+ !exec_queue_pending_disable(q) ||
+ guc_read_stopped(guc), HZ * 5);
+ if (!ret || guc_read_stopped(guc)) {
+ drm_warn(&xe->drm, "Schedule disable failed to respond");
+ xe_sched_add_pending_job(sched, job);
+ xe_sched_submission_start(sched);
+ xe_gt_reset_async(q->gt);
+ xe_sched_tdr_queue_imm(sched);
+ goto out;
+ }
+ }
+
+ /* Stop fence signaling */
+ xe_hw_fence_irq_stop(q->fence_irq);
+
+ /*
+ * Fence state now stable, stop / start scheduler which cleans up any
+ * fences that are complete
+ */
+ xe_sched_add_pending_job(sched, job);
+ xe_sched_submission_start(sched);
+ xe_guc_exec_queue_trigger_cleanup(q);
+
+ /* Mark all outstanding jobs as bad, thus completing them */
+ spin_lock(&sched->base.job_list_lock);
+ list_for_each_entry(tmp_job, &sched->base.pending_list, drm.list)
+ xe_sched_job_set_error(tmp_job, !i++ ? err : -ECANCELED);
+ spin_unlock(&sched->base.job_list_lock);
+
+ /* Start fence signaling */
+ xe_hw_fence_irq_start(q->fence_irq);
+
+out:
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+static void __guc_exec_queue_fini_async(struct work_struct *w)
+{
+ struct xe_guc_exec_queue *ge =
+ container_of(w, struct xe_guc_exec_queue, fini_async);
+ struct xe_exec_queue *q = ge->q;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ trace_xe_exec_queue_destroy(q);
+
+ if (xe_exec_queue_is_lr(q))
+ cancel_work_sync(&ge->lr_tdr);
+ release_guc_id(guc, q);
+ xe_sched_entity_fini(&ge->entity);
+ xe_sched_fini(&ge->sched);
+
+ kfree(ge);
+ xe_exec_queue_fini(q);
+}
+
+static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
+{
+ INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
+
+ /* We must block on kernel engines so slabs are empty on driver unload */
+ if (q->flags & EXEC_QUEUE_FLAG_PERMANENT)
+ __guc_exec_queue_fini_async(&q->guc->fini_async);
+ else
+ queue_work(system_wq, &q->guc->fini_async);
+}
+
+static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ /*
+ * Might be done from within the GPU scheduler, need to do async as we
+ * fini the scheduler when the engine is fini'd, the scheduler can't
+ * complete fini within itself (circular dependency). Async resolves
+ * this we and don't really care when everything is fini'd, just that it
+ * is.
+ */
+ guc_exec_queue_fini_async(q);
+}
+
+static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
+{
+ struct xe_exec_queue *q = msg->private_data;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
+ trace_xe_exec_queue_cleanup_entity(q);
+
+ if (exec_queue_registered(q))
+ disable_scheduling_deregister(guc, q);
+ else
+ __guc_exec_queue_fini(guc, q);
+}
+
+static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
+{
+ return !exec_queue_killed_or_banned(q) && exec_queue_registered(q);
+}
+
+static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
+{
+ struct xe_exec_queue *q = msg->private_data;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ if (guc_exec_queue_allowed_to_change_state(q))
+ init_policies(guc, q);
+ kfree(msg);
+}
+
+static void suspend_fence_signal(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
+ guc_read_stopped(guc));
+ xe_assert(xe, q->guc->suspend_pending);
+
+ q->guc->suspend_pending = false;
+ smp_wmb();
+ wake_up(&q->guc->suspend_wait);
+}
+
+static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
+{
+ struct xe_exec_queue *q = msg->private_data;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
+ exec_queue_enabled(q)) {
+ wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
+ guc_read_stopped(guc));
+
+ if (!guc_read_stopped(guc)) {
+ MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
+ s64 since_resume_ms =
+ ktime_ms_delta(ktime_get(),
+ q->guc->resume_time);
+ s64 wait_ms = q->vm->preempt.min_run_period_ms -
+ since_resume_ms;
+
+ if (wait_ms > 0 && q->guc->resume_time)
+ msleep(wait_ms);
+
+ set_exec_queue_suspended(q);
+ clear_exec_queue_enabled(q);
+ set_exec_queue_pending_disable(q);
+ trace_xe_exec_queue_scheduling_disable(q);
+
+ xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
+ }
+ } else if (q->guc->suspend_pending) {
+ set_exec_queue_suspended(q);
+ suspend_fence_signal(q);
+ }
+}
+
+static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
+{
+ struct xe_exec_queue *q = msg->private_data;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ if (guc_exec_queue_allowed_to_change_state(q)) {
+ MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
+
+ q->guc->resume_time = RESUME_PENDING;
+ clear_exec_queue_suspended(q);
+ set_exec_queue_pending_enable(q);
+ set_exec_queue_enabled(q);
+ trace_xe_exec_queue_scheduling_enable(q);
+
+ xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
+ } else {
+ clear_exec_queue_suspended(q);
+ }
+}
+
+#define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
+#define SET_SCHED_PROPS 2
+#define SUSPEND 3
+#define RESUME 4
+
+static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
+{
+ trace_xe_sched_msg_recv(msg);
+
+ switch (msg->opcode) {
+ case CLEANUP:
+ __guc_exec_queue_process_msg_cleanup(msg);
+ break;
+ case SET_SCHED_PROPS:
+ __guc_exec_queue_process_msg_set_sched_props(msg);
+ break;
+ case SUSPEND:
+ __guc_exec_queue_process_msg_suspend(msg);
+ break;
+ case RESUME:
+ __guc_exec_queue_process_msg_resume(msg);
+ break;
+ default:
+ XE_WARN_ON("Unknown message type");
+ }
+}
+
+static const struct drm_sched_backend_ops drm_sched_ops = {
+ .run_job = guc_exec_queue_run_job,
+ .free_job = guc_exec_queue_free_job,
+ .timedout_job = guc_exec_queue_timedout_job,
+};
+
+static const struct xe_sched_backend_ops xe_sched_ops = {
+ .process_msg = guc_exec_queue_process_msg,
+};
+
+static int guc_exec_queue_init(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_guc_exec_queue *ge;
+ long timeout;
+ int err;
+
+ xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
+
+ ge = kzalloc(sizeof(*ge), GFP_KERNEL);
+ if (!ge)
+ return -ENOMEM;
+
+ q->guc = ge;
+ ge->q = q;
+ init_waitqueue_head(&ge->suspend_wait);
+
+ timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
+ msecs_to_jiffies(q->sched_props.job_timeout_ms);
+ err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
+ get_submit_wq(guc),
+ q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
+ timeout, guc_to_gt(guc)->ordered_wq, NULL,
+ q->name, gt_to_xe(q->gt)->drm.dev);
+ if (err)
+ goto err_free;
+
+ sched = &ge->sched;
+ err = xe_sched_entity_init(&ge->entity, sched);
+ if (err)
+ goto err_sched;
+
+ if (xe_exec_queue_is_lr(q))
+ INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
+
+ mutex_lock(&guc->submission_state.lock);
+
+ err = alloc_guc_id(guc, q);
+ if (err)
+ goto err_entity;
+
+ q->entity = &ge->entity;
+
+ if (guc_read_stopped(guc))
+ xe_sched_stop(sched);
+
+ mutex_unlock(&guc->submission_state.lock);
+
+ xe_exec_queue_assign_name(q, q->guc->id);
+
+ trace_xe_exec_queue_create(q);
+
+ return 0;
+
+err_entity:
+ xe_sched_entity_fini(&ge->entity);
+err_sched:
+ xe_sched_fini(&ge->sched);
+err_free:
+ kfree(ge);
+
+ return err;
+}
+
+static void guc_exec_queue_kill(struct xe_exec_queue *q)
+{
+ trace_xe_exec_queue_kill(q);
+ set_exec_queue_killed(q);
+ xe_guc_exec_queue_trigger_cleanup(q);
+}
+
+static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
+ u32 opcode)
+{
+ INIT_LIST_HEAD(&msg->link);
+ msg->opcode = opcode;
+ msg->private_data = q;
+
+ trace_xe_sched_msg_add(msg);
+ xe_sched_add_msg(&q->guc->sched, msg);
+}
+
+#define STATIC_MSG_CLEANUP 0
+#define STATIC_MSG_SUSPEND 1
+#define STATIC_MSG_RESUME 2
+static void guc_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
+
+ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT))
+ guc_exec_queue_add_msg(q, msg, CLEANUP);
+ else
+ __guc_exec_queue_fini(exec_queue_to_guc(q), q);
+}
+
+static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
+ enum xe_exec_queue_priority priority)
+{
+ struct xe_sched_msg *msg;
+
+ if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q))
+ return 0;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ q->sched_props.priority = priority;
+ guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
+
+ return 0;
+}
+
+static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
+{
+ struct xe_sched_msg *msg;
+
+ if (q->sched_props.timeslice_us == timeslice_us ||
+ exec_queue_killed_or_banned(q))
+ return 0;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ q->sched_props.timeslice_us = timeslice_us;
+ guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
+
+ return 0;
+}
+
+static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
+ u32 preempt_timeout_us)
+{
+ struct xe_sched_msg *msg;
+
+ if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
+ exec_queue_killed_or_banned(q))
+ return 0;
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ q->sched_props.preempt_timeout_us = preempt_timeout_us;
+ guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
+
+ return 0;
+}
+
+static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ xe_assert(xe, !exec_queue_registered(q));
+ xe_assert(xe, !exec_queue_banned(q));
+ xe_assert(xe, !exec_queue_killed(q));
+
+ sched->base.timeout = job_timeout_ms;
+
+ return 0;
+}
+
+static int guc_exec_queue_suspend(struct xe_exec_queue *q)
+{
+ struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
+
+ if (exec_queue_killed_or_banned(q) || q->guc->suspend_pending)
+ return -EINVAL;
+
+ q->guc->suspend_pending = true;
+ guc_exec_queue_add_msg(q, msg, SUSPEND);
+
+ return 0;
+}
+
+static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
+ guc_read_stopped(guc));
+}
+
+static void guc_exec_queue_resume(struct xe_exec_queue *q)
+{
+ struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ xe_assert(xe, !q->guc->suspend_pending);
+
+ guc_exec_queue_add_msg(q, msg, RESUME);
+}
+
+static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
+{
+ return exec_queue_reset(q);
+}
+
+/*
+ * All of these functions are an abstraction layer which other parts of XE can
+ * use to trap into the GuC backend. All of these functions, aside from init,
+ * really shouldn't do much other than trap into the DRM scheduler which
+ * synchronizes these operations.
+ */
+static const struct xe_exec_queue_ops guc_exec_queue_ops = {
+ .init = guc_exec_queue_init,
+ .kill = guc_exec_queue_kill,
+ .fini = guc_exec_queue_fini,
+ .set_priority = guc_exec_queue_set_priority,
+ .set_timeslice = guc_exec_queue_set_timeslice,
+ .set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
+ .set_job_timeout = guc_exec_queue_set_job_timeout,
+ .suspend = guc_exec_queue_suspend,
+ .suspend_wait = guc_exec_queue_suspend_wait,
+ .resume = guc_exec_queue_resume,
+ .reset_status = guc_exec_queue_reset_status,
+};
+
+static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+
+ /* Stop scheduling + flush any DRM scheduler operations */
+ xe_sched_submission_stop(sched);
+
+ /* Clean up lost G2H + reset engine state */
+ if (exec_queue_registered(q)) {
+ if ((exec_queue_banned(q) && exec_queue_destroyed(q)) ||
+ xe_exec_queue_is_lr(q))
+ xe_exec_queue_put(q);
+ else if (exec_queue_destroyed(q))
+ __guc_exec_queue_fini(guc, q);
+ }
+ if (q->guc->suspend_pending) {
+ set_exec_queue_suspended(q);
+ suspend_fence_signal(q);
+ }
+ atomic_and(EXEC_QUEUE_STATE_DESTROYED | ENGINE_STATE_SUSPENDED,
+ &q->guc->state);
+ q->guc->resume_time = 0;
+ trace_xe_exec_queue_stop(q);
+
+ /*
+ * Ban any engine (aside from kernel and engines used for VM ops) with a
+ * started but not complete job or if a job has gone through a GT reset
+ * more than twice.
+ */
+ if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
+ struct xe_sched_job *job = xe_sched_first_pending_job(sched);
+
+ if (job) {
+ if ((xe_sched_job_started(job) &&
+ !xe_sched_job_completed(job)) ||
+ xe_sched_invalidate_job(job, 2)) {
+ trace_xe_sched_job_ban(job);
+ xe_sched_tdr_queue_imm(&q->guc->sched);
+ set_exec_queue_banned(q);
+ }
+ }
+ }
+}
+
+int xe_guc_submit_reset_prepare(struct xe_guc *guc)
+{
+ int ret;
+
+ /*
+ * Using an atomic here rather than submission_state.lock as this
+ * function can be called while holding the CT lock (engine reset
+ * failure). submission_state.lock needs the CT lock to resubmit jobs.
+ * Atomic is not ideal, but it works to prevent against concurrent reset
+ * and releasing any TDRs waiting on guc->submission_state.stopped.
+ */
+ ret = atomic_fetch_or(1, &guc->submission_state.stopped);
+ smp_wmb();
+ wake_up_all(&guc->ct.wq);
+
+ return ret;
+}
+
+void xe_guc_submit_reset_wait(struct xe_guc *guc)
+{
+ wait_event(guc->ct.wq, !guc_read_stopped(guc));
+}
+
+int xe_guc_submit_stop(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+ struct xe_device *xe = guc_to_xe(guc);
+
+ xe_assert(xe, guc_read_stopped(guc) == 1);
+
+ mutex_lock(&guc->submission_state.lock);
+
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ guc_exec_queue_stop(guc, q);
+
+ mutex_unlock(&guc->submission_state.lock);
+
+ /*
+ * No one can enter the backend at this point, aside from new engine
+ * creation which is protected by guc->submission_state.lock.
+ */
+
+ return 0;
+}
+
+static void guc_exec_queue_start(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+
+ if (!exec_queue_killed_or_banned(q)) {
+ int i;
+
+ trace_xe_exec_queue_resubmit(q);
+ for (i = 0; i < q->width; ++i)
+ xe_lrc_set_ring_head(q->lrc + i, q->lrc[i].ring.tail);
+ xe_sched_resubmit_jobs(sched);
+ }
+
+ xe_sched_submission_start(sched);
+}
+
+int xe_guc_submit_start(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+ struct xe_device *xe = guc_to_xe(guc);
+
+ xe_assert(xe, guc_read_stopped(guc) == 1);
+
+ mutex_lock(&guc->submission_state.lock);
+ atomic_dec(&guc->submission_state.stopped);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ guc_exec_queue_start(q);
+ mutex_unlock(&guc->submission_state.lock);
+
+ wake_up_all(&guc->ct.wq);
+
+ return 0;
+}
+
+static struct xe_exec_queue *
+g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q;
+
+ if (unlikely(guc_id >= GUC_ID_MAX)) {
+ drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
+ return NULL;
+ }
+
+ q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
+ if (unlikely(!q)) {
+ drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
+ return NULL;
+ }
+
+ xe_assert(xe, guc_id >= q->guc->id);
+ xe_assert(xe, guc_id < (q->guc->id + q->width));
+
+ return q;
+}
+
+static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_DEREGISTER_CONTEXT,
+ q->guc->id,
+ };
+
+ trace_xe_exec_queue_deregister(q);
+
+ xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q;
+ u32 guc_id = msg[0];
+
+ if (unlikely(len < 2)) {
+ drm_err(&xe->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ q = g2h_exec_queue_lookup(guc, guc_id);
+ if (unlikely(!q))
+ return -EPROTO;
+
+ if (unlikely(!exec_queue_pending_enable(q) &&
+ !exec_queue_pending_disable(q))) {
+ drm_err(&xe->drm, "Unexpected engine state 0x%04x",
+ atomic_read(&q->guc->state));
+ return -EPROTO;
+ }
+
+ trace_xe_exec_queue_scheduling_done(q);
+
+ if (exec_queue_pending_enable(q)) {
+ q->guc->resume_time = ktime_get();
+ clear_exec_queue_pending_enable(q);
+ smp_wmb();
+ wake_up_all(&guc->ct.wq);
+ } else {
+ clear_exec_queue_pending_disable(q);
+ if (q->guc->suspend_pending) {
+ suspend_fence_signal(q);
+ } else {
+ if (exec_queue_banned(q)) {
+ smp_wmb();
+ wake_up_all(&guc->ct.wq);
+ }
+ deregister_exec_queue(guc, q);
+ }
+ }
+
+ return 0;
+}
+
+int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q;
+ u32 guc_id = msg[0];
+
+ if (unlikely(len < 1)) {
+ drm_err(&xe->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ q = g2h_exec_queue_lookup(guc, guc_id);
+ if (unlikely(!q))
+ return -EPROTO;
+
+ if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
+ exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
+ drm_err(&xe->drm, "Unexpected engine state 0x%04x",
+ atomic_read(&q->guc->state));
+ return -EPROTO;
+ }
+
+ trace_xe_exec_queue_deregister_done(q);
+
+ clear_exec_queue_registered(q);
+
+ if (exec_queue_banned(q) || xe_exec_queue_is_lr(q))
+ xe_exec_queue_put(q);
+ else
+ __guc_exec_queue_fini(guc, q);
+
+ return 0;
+}
+
+int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q;
+ u32 guc_id = msg[0];
+
+ if (unlikely(len < 1)) {
+ drm_err(&xe->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ q = g2h_exec_queue_lookup(guc, guc_id);
+ if (unlikely(!q))
+ return -EPROTO;
+
+ drm_info(&xe->drm, "Engine reset: guc_id=%d", guc_id);
+
+ /* FIXME: Do error capture, most likely async */
+
+ trace_xe_exec_queue_reset(q);
+
+ /*
+ * A banned engine is a NOP at this point (came from
+ * guc_exec_queue_timedout_job). Otherwise, kick drm scheduler to cancel
+ * jobs by setting timeout of the job to the minimum value kicking
+ * guc_exec_queue_timedout_job.
+ */
+ set_exec_queue_reset(q);
+ if (!exec_queue_banned(q))
+ xe_guc_exec_queue_trigger_cleanup(q);
+
+ return 0;
+}
+
+int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
+ u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_exec_queue *q;
+ u32 guc_id = msg[0];
+
+ if (unlikely(len < 1)) {
+ drm_err(&xe->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ q = g2h_exec_queue_lookup(guc, guc_id);
+ if (unlikely(!q))
+ return -EPROTO;
+
+ drm_dbg(&xe->drm, "Engine memory cat error: guc_id=%d", guc_id);
+ trace_xe_exec_queue_memory_cat_error(q);
+
+ /* Treat the same as engine reset */
+ set_exec_queue_reset(q);
+ if (!exec_queue_banned(q))
+ xe_guc_exec_queue_trigger_cleanup(q);
+
+ return 0;
+}
+
+int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ u8 guc_class, instance;
+ u32 reason;
+
+ if (unlikely(len != 3)) {
+ drm_err(&xe->drm, "Invalid length %u", len);
+ return -EPROTO;
+ }
+
+ guc_class = msg[0];
+ instance = msg[1];
+ reason = msg[2];
+
+ /* Unexpected failure of a hardware feature, log an actual error */
+ drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
+ guc_class, instance, reason);
+
+ xe_gt_reset_async(guc_to_gt(guc));
+
+ return 0;
+}
+
+static void
+guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
+ struct xe_guc_submit_exec_queue_snapshot *snapshot)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct iosys_map map = xe_lrc_parallel_map(q->lrc);
+ int i;
+
+ snapshot->guc.wqi_head = q->guc->wqi_head;
+ snapshot->guc.wqi_tail = q->guc->wqi_tail;
+ snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
+ snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
+ snapshot->parallel.wq_desc.status = parallel_read(xe, map,
+ wq_desc.wq_status);
+
+ if (snapshot->parallel.wq_desc.head !=
+ snapshot->parallel.wq_desc.tail) {
+ for (i = snapshot->parallel.wq_desc.head;
+ i != snapshot->parallel.wq_desc.tail;
+ i = (i + sizeof(u32)) % WQ_SIZE)
+ snapshot->parallel.wq[i / sizeof(u32)] =
+ parallel_read(xe, map, wq[i / sizeof(u32)]);
+ }
+}
+
+static void
+guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "\tWQ head: %u (internal), %d (memory)\n",
+ snapshot->guc.wqi_head, snapshot->parallel.wq_desc.head);
+ drm_printf(p, "\tWQ tail: %u (internal), %d (memory)\n",
+ snapshot->guc.wqi_tail, snapshot->parallel.wq_desc.tail);
+ drm_printf(p, "\tWQ status: %u\n", snapshot->parallel.wq_desc.status);
+
+ if (snapshot->parallel.wq_desc.head !=
+ snapshot->parallel.wq_desc.tail) {
+ for (i = snapshot->parallel.wq_desc.head;
+ i != snapshot->parallel.wq_desc.tail;
+ i = (i + sizeof(u32)) % WQ_SIZE)
+ drm_printf(p, "\tWQ[%zu]: 0x%08x\n", i / sizeof(u32),
+ snapshot->parallel.wq[i / sizeof(u32)]);
+ }
+}
+
+/**
+ * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
+ * @q: Xe exec queue.
+ *
+ * This can be printed out in a later stage like during dev_coredump
+ * analysis.
+ *
+ * Returns: a GuC Submit Engine snapshot object that must be freed by the
+ * caller, using `xe_guc_exec_queue_snapshot_free`.
+ */
+struct xe_guc_submit_exec_queue_snapshot *
+xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job;
+ struct xe_guc_submit_exec_queue_snapshot *snapshot;
+ int i;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+
+ if (!snapshot) {
+ drm_err(&xe->drm, "Skipping GuC Engine snapshot entirely.\n");
+ return NULL;
+ }
+
+ snapshot->guc.id = q->guc->id;
+ memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
+ snapshot->class = q->class;
+ snapshot->logical_mask = q->logical_mask;
+ snapshot->width = q->width;
+ snapshot->refcount = kref_read(&q->refcount);
+ snapshot->sched_timeout = sched->base.timeout;
+ snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
+ snapshot->sched_props.preempt_timeout_us =
+ q->sched_props.preempt_timeout_us;
+
+ snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot),
+ GFP_ATOMIC);
+
+ if (!snapshot->lrc) {
+ drm_err(&xe->drm, "Skipping GuC Engine LRC snapshot.\n");
+ } else {
+ for (i = 0; i < q->width; ++i) {
+ struct xe_lrc *lrc = q->lrc + i;
+
+ snapshot->lrc[i].context_desc =
+ lower_32_bits(xe_lrc_ggtt_addr(lrc));
+ snapshot->lrc[i].head = xe_lrc_ring_head(lrc);
+ snapshot->lrc[i].tail.internal = lrc->ring.tail;
+ snapshot->lrc[i].tail.memory =
+ xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL);
+ snapshot->lrc[i].start_seqno = xe_lrc_start_seqno(lrc);
+ snapshot->lrc[i].seqno = xe_lrc_seqno(lrc);
+ }
+ }
+
+ snapshot->schedule_state = atomic_read(&q->guc->state);
+ snapshot->exec_queue_flags = q->flags;
+
+ snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
+ if (snapshot->parallel_execution)
+ guc_exec_queue_wq_snapshot_capture(q, snapshot);
+
+ spin_lock(&sched->base.job_list_lock);
+ snapshot->pending_list_size = list_count_nodes(&sched->base.pending_list);
+ snapshot->pending_list = kmalloc_array(snapshot->pending_list_size,
+ sizeof(struct pending_list_snapshot),
+ GFP_ATOMIC);
+
+ if (!snapshot->pending_list) {
+ drm_err(&xe->drm, "Skipping GuC Engine pending_list snapshot.\n");
+ } else {
+ i = 0;
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ snapshot->pending_list[i].seqno =
+ xe_sched_job_seqno(job);
+ snapshot->pending_list[i].fence =
+ dma_fence_is_signaled(job->fence) ? 1 : 0;
+ snapshot->pending_list[i].finished =
+ dma_fence_is_signaled(&job->drm.s_fence->finished)
+ ? 1 : 0;
+ i++;
+ }
+ }
+
+ spin_unlock(&sched->base.job_list_lock);
+
+ return snapshot;
+}
+
+/**
+ * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
+ * @snapshot: GuC Submit Engine snapshot object.
+ * @p: drm_printer where it will be printed out.
+ *
+ * This function prints out a given GuC Submit Engine snapshot object.
+ */
+void
+xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ int i;
+
+ if (!snapshot)
+ return;
+
+ drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
+ drm_printf(p, "\tName: %s\n", snapshot->name);
+ drm_printf(p, "\tClass: %d\n", snapshot->class);
+ drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
+ drm_printf(p, "\tWidth: %d\n", snapshot->width);
+ drm_printf(p, "\tRef: %d\n", snapshot->refcount);
+ drm_printf(p, "\tTimeout: %ld (ms)\n", snapshot->sched_timeout);
+ drm_printf(p, "\tTimeslice: %u (us)\n",
+ snapshot->sched_props.timeslice_us);
+ drm_printf(p, "\tPreempt timeout: %u (us)\n",
+ snapshot->sched_props.preempt_timeout_us);
+
+ for (i = 0; snapshot->lrc && i < snapshot->width; ++i) {
+ drm_printf(p, "\tHW Context Desc: 0x%08x\n",
+ snapshot->lrc[i].context_desc);
+ drm_printf(p, "\tLRC Head: (memory) %u\n",
+ snapshot->lrc[i].head);
+ drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
+ snapshot->lrc[i].tail.internal,
+ snapshot->lrc[i].tail.memory);
+ drm_printf(p, "\tStart seqno: (memory) %d\n",
+ snapshot->lrc[i].start_seqno);
+ drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->lrc[i].seqno);
+ }
+ drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
+ drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
+
+ if (snapshot->parallel_execution)
+ guc_exec_queue_wq_snapshot_print(snapshot, p);
+
+ for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
+ i++)
+ drm_printf(p, "\tJob: seqno=%d, fence=%d, finished=%d\n",
+ snapshot->pending_list[i].seqno,
+ snapshot->pending_list[i].fence,
+ snapshot->pending_list[i].finished);
+}
+
+/**
+ * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
+ * snapshot.
+ * @snapshot: GuC Submit Engine snapshot object.
+ *
+ * This function free all the memory that needed to be allocated at capture
+ * time.
+ */
+void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
+{
+ if (!snapshot)
+ return;
+
+ kfree(snapshot->lrc);
+ kfree(snapshot->pending_list);
+ kfree(snapshot);
+}
+
+static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
+{
+ struct xe_guc_submit_exec_queue_snapshot *snapshot;
+
+ snapshot = xe_guc_exec_queue_snapshot_capture(q);
+ xe_guc_exec_queue_snapshot_print(snapshot, p);
+ xe_guc_exec_queue_snapshot_free(snapshot);
+}
+
+/**
+ * xe_guc_submit_print - GuC Submit Print.
+ * @guc: GuC.
+ * @p: drm_printer where it will be printed out.
+ *
+ * This function capture and prints snapshots of **all** GuC Engines.
+ */
+void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ if (!xe_device_uc_enabled(guc_to_xe(guc)))
+ return;
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ guc_exec_queue_print(q, p);
+ mutex_unlock(&guc->submission_state.lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
new file mode 100644
index 000000000..fc97869c5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_SUBMIT_H_
+#define _XE_GUC_SUBMIT_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_exec_queue;
+struct xe_guc;
+
+int xe_guc_submit_init(struct xe_guc *guc);
+
+int xe_guc_submit_reset_prepare(struct xe_guc *guc);
+void xe_guc_submit_reset_wait(struct xe_guc *guc);
+int xe_guc_submit_stop(struct xe_guc *guc);
+int xe_guc_submit_start(struct xe_guc *guc);
+
+int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
+int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
+ u32 len);
+int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+struct xe_guc_submit_exec_queue_snapshot *
+xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
+void
+xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
+ struct drm_printer *p);
+void
+xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
+void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_submit_types.h b/drivers/gpu/drm/xe/xe_guc_submit_types.h
new file mode 100644
index 000000000..649b0a852
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_submit_types.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_GUC_SUBMIT_TYPES_H_
+#define _XE_GUC_SUBMIT_TYPES_H_
+
+#include "xe_hw_engine_types.h"
+
+/* Work item for submitting workloads into work queue of GuC. */
+#define WQ_STATUS_ACTIVE 1
+#define WQ_STATUS_SUSPENDED 2
+#define WQ_STATUS_CMD_ERROR 3
+#define WQ_STATUS_ENGINE_ID_NOT_USED 4
+#define WQ_STATUS_SUSPENDED_FROM_RESET 5
+#define WQ_TYPE_NOOP 0x4
+#define WQ_TYPE_MULTI_LRC 0x5
+#define WQ_TYPE_MASK GENMASK(7, 0)
+#define WQ_LEN_MASK GENMASK(26, 16)
+
+#define WQ_GUC_ID_MASK GENMASK(15, 0)
+#define WQ_RING_TAIL_MASK GENMASK(28, 18)
+
+#define PARALLEL_SCRATCH_SIZE 2048
+#define WQ_SIZE (PARALLEL_SCRATCH_SIZE / 2)
+#define WQ_OFFSET (PARALLEL_SCRATCH_SIZE - WQ_SIZE)
+#define CACHELINE_BYTES 64
+
+struct guc_sched_wq_desc {
+ u32 head;
+ u32 tail;
+ u32 error_offset;
+ u32 wq_status;
+ u32 reserved[28];
+} __packed;
+
+struct sync_semaphore {
+ u32 semaphore;
+ u8 unused[CACHELINE_BYTES - sizeof(u32)];
+};
+
+/**
+ * struct guc_submit_parallel_scratch - A scratch shared mapped buffer.
+ */
+struct guc_submit_parallel_scratch {
+ /** @wq_desc: Guc scheduler workqueue descriptor */
+ struct guc_sched_wq_desc wq_desc;
+
+ /** @go: Go Semaphore */
+ struct sync_semaphore go;
+ /** @join: Joined semaphore for the relevant hw engine instances */
+ struct sync_semaphore join[XE_HW_ENGINE_MAX_INSTANCE];
+
+ /** @unused: Unused/Reserved memory space */
+ u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
+ sizeof(struct sync_semaphore) *
+ (XE_HW_ENGINE_MAX_INSTANCE + 1)];
+
+ /** @wq: Workqueue info */
+ u32 wq[WQ_SIZE / sizeof(u32)];
+};
+
+struct lrc_snapshot {
+ u32 context_desc;
+ u32 head;
+ struct {
+ u32 internal;
+ u32 memory;
+ } tail;
+ u32 start_seqno;
+ u32 seqno;
+};
+
+struct pending_list_snapshot {
+ u32 seqno;
+ bool fence;
+ bool finished;
+};
+
+/**
+ * struct xe_guc_submit_exec_queue_snapshot - Snapshot for devcoredump
+ */
+struct xe_guc_submit_exec_queue_snapshot {
+ /** @name: name of this exec queue */
+ char name[MAX_FENCE_NAME_LEN];
+ /** @class: class of this exec queue */
+ enum xe_engine_class class;
+ /**
+ * @logical_mask: logical mask of where job submitted to exec queue can run
+ */
+ u32 logical_mask;
+ /** @width: width (number BB submitted per exec) of this exec queue */
+ u16 width;
+ /** @refcount: ref count of this exec queue */
+ u32 refcount;
+ /**
+ * @sched_timeout: the time after which a job is removed from the
+ * scheduler.
+ */
+ long sched_timeout;
+
+ /** @sched_props: scheduling properties */
+ struct {
+ /** @timeslice_us: timeslice period in micro-seconds */
+ u32 timeslice_us;
+ /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ u32 preempt_timeout_us;
+ } sched_props;
+
+ /** @lrc: LRC Snapshot */
+ struct lrc_snapshot *lrc;
+
+ /** @schedule_state: Schedule State at the moment of Crash */
+ u32 schedule_state;
+ /** @exec_queue_flags: Flags of the faulty exec_queue */
+ unsigned long exec_queue_flags;
+
+ /** @guc: GuC Engine Snapshot */
+ struct {
+ /** @wqi_head: work queue item head */
+ u32 wqi_head;
+ /** @wqi_tail: work queue item tail */
+ u32 wqi_tail;
+ /** @id: GuC id for this exec_queue */
+ u16 id;
+ } guc;
+
+ /**
+ * @parallel_execution: Indication if the failure was during parallel
+ * execution
+ */
+ bool parallel_execution;
+ /** @parallel: snapshot of the useful parallel scratch */
+ struct {
+ /** @wq_desc: Workqueue description */
+ struct {
+ /** @head: Workqueue Head */
+ u32 head;
+ /** @tail: Workqueue Tail */
+ u32 tail;
+ /** @status: Workqueue Status */
+ u32 status;
+ } wq_desc;
+ /** @wq: Workqueue Items */
+ u32 wq[WQ_SIZE / sizeof(u32)];
+ } parallel;
+
+ /** @pending_list_size: Size of the pending list snapshot array */
+ int pending_list_size;
+ /** @pending_list: snapshot of the pending list info */
+ struct pending_list_snapshot *pending_list;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
new file mode 100644
index 000000000..cd80802e8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_GUC_TYPES_H_
+#define _XE_GUC_TYPES_H_
+
+#include <linux/idr.h>
+#include <linux/xarray.h>
+
+#include "regs/xe_reg_defs.h"
+#include "xe_guc_ads_types.h"
+#include "xe_guc_ct_types.h"
+#include "xe_guc_fwif.h"
+#include "xe_guc_log_types.h"
+#include "xe_guc_pc_types.h"
+#include "xe_uc_fw_types.h"
+
+/**
+ * struct xe_guc - Graphic micro controller
+ */
+struct xe_guc {
+ /** @fw: Generic uC firmware management */
+ struct xe_uc_fw fw;
+ /** @log: GuC log */
+ struct xe_guc_log log;
+ /** @ads: GuC ads */
+ struct xe_guc_ads ads;
+ /** @ct: GuC ct */
+ struct xe_guc_ct ct;
+ /** @pc: GuC Power Conservation */
+ struct xe_guc_pc pc;
+ /** @submission_state: GuC submission state */
+ struct {
+ /** @exec_queue_lookup: Lookup an xe_engine from guc_id */
+ struct xarray exec_queue_lookup;
+ /** @guc_ids: used to allocate new guc_ids, single-lrc */
+ struct ida guc_ids;
+ /** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
+ unsigned long *guc_ids_bitmap;
+ /** @stopped: submissions are stopped */
+ atomic_t stopped;
+ /** @lock: protects submission state */
+ struct mutex lock;
+ /** @suspend: suspend fence state */
+ struct {
+ /** @lock: suspend fences lock */
+ spinlock_t lock;
+ /** @context: suspend fences context */
+ u64 context;
+ /** @seqno: suspend fences seqno */
+ u32 seqno;
+ } suspend;
+#ifdef CONFIG_PROVE_LOCKING
+#define NUM_SUBMIT_WQ 256
+ /** @submit_wq_pool: submission ordered workqueues pool */
+ struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
+ /** @submit_wq_idx: submission ordered workqueue index */
+ int submit_wq_idx;
+#endif
+ /** @enabled: submission is enabled */
+ bool enabled;
+ } submission_state;
+ /** @hwconfig: Hardware config state */
+ struct {
+ /** @bo: buffer object of the hardware config */
+ struct xe_bo *bo;
+ /** @size: size of the hardware config */
+ u32 size;
+ } hwconfig;
+
+ /**
+ * @notify_reg: Register which is written to notify GuC of H2G messages
+ */
+ struct xe_reg notify_reg;
+ /** @params: Control params for fw initialization */
+ u32 params[GUC_CTL_MAX_DWORDS];
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
new file mode 100644
index 000000000..bfdd33b9b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2023, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/irq.h>
+#include <linux/mei_aux.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#include "xe_device_types.h"
+#include "xe_drv.h"
+#include "xe_heci_gsc.h"
+#include "xe_platform_types.h"
+
+#define GSC_BAR_LENGTH 0x00000FFC
+
+#define DG1_GSC_HECI2_BASE 0x259000
+#define PVC_GSC_HECI2_BASE 0x285000
+#define DG2_GSC_HECI2_BASE 0x374000
+
+static void heci_gsc_irq_mask(struct irq_data *d)
+{
+ /* generic irq handling */
+}
+
+static void heci_gsc_irq_unmask(struct irq_data *d)
+{
+ /* generic irq handling */
+}
+
+static struct irq_chip heci_gsc_irq_chip = {
+ .name = "gsc_irq_chip",
+ .irq_mask = heci_gsc_irq_mask,
+ .irq_unmask = heci_gsc_irq_unmask,
+};
+
+static int heci_gsc_irq_init(int irq)
+{
+ irq_set_chip_and_handler_name(irq, &heci_gsc_irq_chip,
+ handle_simple_irq, "heci_gsc_irq_handler");
+
+ return irq_set_chip_data(irq, NULL);
+}
+
+/**
+ * struct heci_gsc_def - graphics security controller heci interface definitions
+ *
+ * @name: name of the heci device
+ * @bar: address of the mmio bar
+ * @bar_size: size of the mmio bar
+ * @use_polling: indication of using polling mode for the device
+ * @slow_firmware: indication of whether the device is slow (needs longer timeouts)
+ */
+struct heci_gsc_def {
+ const char *name;
+ unsigned long bar;
+ size_t bar_size;
+ bool use_polling;
+ bool slow_firmware;
+};
+
+/* gsc resources and definitions */
+static const struct heci_gsc_def heci_gsc_def_dg1 = {
+ .name = "mei-gscfi",
+ .bar = DG1_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+};
+
+static const struct heci_gsc_def heci_gsc_def_dg2 = {
+ .name = "mei-gscfi",
+ .bar = DG2_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+};
+
+static const struct heci_gsc_def heci_gsc_def_pvc = {
+ .name = "mei-gscfi",
+ .bar = PVC_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ .slow_firmware = true,
+};
+
+static void heci_gsc_release_dev(struct device *dev)
+{
+ struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
+ struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
+
+ kfree(adev);
+}
+
+void xe_heci_gsc_fini(struct xe_device *xe)
+{
+ struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
+
+ if (!HAS_HECI_GSCFI(xe))
+ return;
+
+ if (heci_gsc->adev) {
+ struct auxiliary_device *aux_dev = &heci_gsc->adev->aux_dev;
+
+ auxiliary_device_delete(aux_dev);
+ auxiliary_device_uninit(aux_dev);
+ heci_gsc->adev = NULL;
+ }
+
+ if (heci_gsc->irq >= 0)
+ irq_free_desc(heci_gsc->irq);
+ heci_gsc->irq = -1;
+}
+
+static int heci_gsc_irq_setup(struct xe_device *xe)
+{
+ struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
+ int ret;
+
+ heci_gsc->irq = irq_alloc_desc(0);
+ if (heci_gsc->irq < 0) {
+ drm_err(&xe->drm, "gsc irq error %d\n", heci_gsc->irq);
+ return heci_gsc->irq;
+ }
+
+ ret = heci_gsc_irq_init(heci_gsc->irq);
+ if (ret < 0)
+ drm_err(&xe->drm, "gsc irq init failed %d\n", ret);
+
+ return ret;
+}
+
+static int heci_gsc_add_device(struct xe_device *xe, const struct heci_gsc_def *def)
+{
+ struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct auxiliary_device *aux_dev;
+ struct mei_aux_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+ adev->irq = heci_gsc->irq;
+ adev->bar.parent = &pdev->resource[0];
+ adev->bar.start = def->bar + pdev->resource[0].start;
+ adev->bar.end = adev->bar.start + def->bar_size - 1;
+ adev->bar.flags = IORESOURCE_MEM;
+ adev->bar.desc = IORES_DESC_NONE;
+ adev->slow_firmware = def->slow_firmware;
+
+ aux_dev = &adev->aux_dev;
+ aux_dev->name = def->name;
+ aux_dev->id = (pci_domain_nr(pdev->bus) << 16) |
+ PCI_DEVID(pdev->bus->number, pdev->devfn);
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = heci_gsc_release_dev;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret < 0) {
+ drm_err(&xe->drm, "gsc aux init failed %d\n", ret);
+ kfree(adev);
+ return ret;
+ }
+
+ heci_gsc->adev = adev; /* needed by the notifier */
+ ret = auxiliary_device_add(aux_dev);
+ if (ret < 0) {
+ drm_err(&xe->drm, "gsc aux add failed %d\n", ret);
+ heci_gsc->adev = NULL;
+
+ /* adev will be freed with the put_device() and .release sequence */
+ auxiliary_device_uninit(aux_dev);
+ }
+ return ret;
+}
+
+void xe_heci_gsc_init(struct xe_device *xe)
+{
+ struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
+ const struct heci_gsc_def *def;
+ int ret;
+
+ if (!HAS_HECI_GSCFI(xe))
+ return;
+
+ heci_gsc->irq = -1;
+
+ if (xe->info.platform == XE_PVC) {
+ def = &heci_gsc_def_pvc;
+ } else if (xe->info.platform == XE_DG2) {
+ def = &heci_gsc_def_dg2;
+ } else if (xe->info.platform == XE_DG1) {
+ def = &heci_gsc_def_dg1;
+ } else {
+ drm_warn_once(&xe->drm, "Unknown platform\n");
+ return;
+ }
+
+ if (!def->name) {
+ drm_warn_once(&xe->drm, "HECI is not implemented!\n");
+ return;
+ }
+
+ if (!def->use_polling) {
+ ret = heci_gsc_irq_setup(xe);
+ if (ret)
+ goto fail;
+ }
+
+ ret = heci_gsc_add_device(xe, def);
+ if (ret)
+ goto fail;
+
+ return;
+fail:
+ xe_heci_gsc_fini(xe);
+}
+
+void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
+{
+ int ret;
+
+ if ((iir & GSC_IRQ_INTF(1)) == 0)
+ return;
+
+ if (!HAS_HECI_GSCFI(xe)) {
+ drm_warn_once(&xe->drm, "GSC irq: not supported");
+ return;
+ }
+
+ if (xe->heci_gsc.irq < 0)
+ return;
+
+ ret = generic_handle_irq(xe->heci_gsc.irq);
+ if (ret)
+ drm_err_ratelimited(&xe->drm, "error handling GSC irq: %d\n", ret);
+}
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h
new file mode 100644
index 000000000..9db454478
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2023, Intel Corporation. All rights reserved.
+ */
+#ifndef __XE_HECI_GSC_DEV_H__
+#define __XE_HECI_GSC_DEV_H__
+
+#include <linux/types.h>
+
+struct xe_device;
+struct mei_aux_device;
+
+/*
+ * The HECI1 bit corresponds to bit15 and HECI2 to bit14.
+ * The reason for this is to allow growth for more interfaces in the future.
+ */
+#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+
+/**
+ * struct xe_heci_gsc - graphics security controller for xe, HECI interface
+ *
+ * @adev : pointer to mei auxiliary device structure
+ * @irq : irq number
+ *
+ */
+struct xe_heci_gsc {
+ struct mei_aux_device *adev;
+ int irq;
+};
+
+void xe_heci_gsc_init(struct xe_device *xe);
+void xe_heci_gsc_fini(struct xe_device *xe);
+void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir);
+
+#endif /* __XE_HECI_GSC_DEV_H__ */
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
new file mode 100644
index 000000000..01b2d0bd2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_huc.h"
+
+#include <drm/drm_managed.h>
+
+#include "abi/gsc_pxp_commands_abi.h"
+#include "regs/xe_gsc_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_gsc_submit.h"
+#include "xe_gt.h"
+#include "xe_guc.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_uc_fw.h"
+
+static struct xe_gt *
+huc_to_gt(struct xe_huc *huc)
+{
+ return container_of(huc, struct xe_gt, uc.huc);
+}
+
+static struct xe_device *
+huc_to_xe(struct xe_huc *huc)
+{
+ return gt_to_xe(huc_to_gt(huc));
+}
+
+static struct xe_guc *
+huc_to_guc(struct xe_huc *huc)
+{
+ return &container_of(huc, struct xe_uc, huc)->guc;
+}
+
+static void free_gsc_pkt(struct drm_device *drm, void *arg)
+{
+ struct xe_huc *huc = arg;
+
+ xe_bo_unpin_map_no_vm(huc->gsc_pkt);
+ huc->gsc_pkt = NULL;
+}
+
+#define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
+static int huc_alloc_gsc_pkt(struct xe_huc *huc)
+{
+ struct xe_gt *gt = huc_to_gt(huc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_bo *bo;
+
+ /* we use a single object for both input and output */
+ bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
+ PXP43_HUC_AUTH_INOUT_SIZE * 2,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_SYSTEM_BIT |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ huc->gsc_pkt = bo;
+
+ return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
+}
+
+int xe_huc_init(struct xe_huc *huc)
+{
+ struct xe_gt *gt = huc_to_gt(huc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ int ret;
+
+ huc->fw.type = XE_UC_FW_TYPE_HUC;
+
+ /* On platforms with a media GT the HuC is only available there */
+ if (tile->media_gt && (gt != tile->media_gt)) {
+ xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
+ return 0;
+ }
+
+ ret = xe_uc_fw_init(&huc->fw);
+ if (ret)
+ goto out;
+
+ if (!xe_uc_fw_is_enabled(&huc->fw))
+ return 0;
+
+ if (huc->fw.has_gsc_headers) {
+ ret = huc_alloc_gsc_pkt(huc);
+ if (ret)
+ goto out;
+ }
+
+ xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
+
+ return 0;
+
+out:
+ drm_err(&xe->drm, "HuC init failed with %d", ret);
+ return ret;
+}
+
+int xe_huc_upload(struct xe_huc *huc)
+{
+ if (!xe_uc_fw_is_loadable(&huc->fw))
+ return 0;
+ return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
+}
+
+#define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \
+ xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_)
+#define huc_auth_msg_rd(xe_, map_, offset_, field_) \
+ xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_)
+
+static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map,
+ u32 wr_offset, u32 huc_offset, u32 huc_size)
+{
+ xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
+
+ huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
+ huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
+ huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
+ huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
+ sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header));
+ huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
+ huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
+
+ return wr_offset + sizeof(struct pxp43_new_huc_auth_in);
+}
+
+static int huc_auth_via_gsccs(struct xe_huc *huc)
+{
+ struct xe_gt *gt = huc_to_gt(huc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_bo *pkt = huc->gsc_pkt;
+ u32 wr_offset;
+ u32 rd_offset;
+ u64 ggtt_offset;
+ u32 out_status;
+ int retry = 5;
+ int err = 0;
+
+ if (!pkt)
+ return -ENODEV;
+
+ ggtt_offset = xe_bo_ggtt_addr(pkt);
+
+ wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0,
+ sizeof(struct pxp43_new_huc_auth_in));
+ wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
+ xe_bo_ggtt_addr(huc->fw.bo),
+ huc->fw.bo->size);
+ do {
+ err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset,
+ ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
+ PXP43_HUC_AUTH_INOUT_SIZE);
+ if (err)
+ break;
+
+ if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap,
+ PXP43_HUC_AUTH_INOUT_SIZE)) {
+ err = -EBUSY;
+ msleep(50);
+ }
+ } while (--retry && err == -EBUSY);
+
+ if (err) {
+ drm_err(&xe->drm, "failed to submit GSC request to auth: %d\n", err);
+ return err;
+ }
+
+ err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE,
+ sizeof(struct pxp43_huc_auth_out), &rd_offset);
+ if (err) {
+ drm_err(&xe->drm, "HuC: invalid GSC reply for auth (err=%d)\n", err);
+ return err;
+ }
+
+ /*
+ * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already
+ * authenticated. If the same error is ever returned with HuC not loaded
+ * we'll still catch it when we check the authentication bit later.
+ */
+ out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status);
+ if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) {
+ drm_err(&xe->drm, "auth failed with GSC error = 0x%x\n", out_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct {
+ const char *name;
+ struct xe_reg reg;
+ u32 val;
+} huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = {
+ [XE_HUC_AUTH_VIA_GUC] = { "GuC",
+ HUC_KERNEL_LOAD_INFO,
+ HUC_LOAD_SUCCESSFUL },
+ [XE_HUC_AUTH_VIA_GSC] = { "GSC",
+ HECI_FWSTS5(MTL_GSC_HECI1_BASE),
+ HECI1_FWSTS5_HUC_AUTH_DONE },
+};
+
+bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
+{
+ struct xe_gt *gt = huc_to_gt(huc);
+
+ return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
+}
+
+int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
+{
+ struct xe_device *xe = huc_to_xe(huc);
+ struct xe_gt *gt = huc_to_gt(huc);
+ struct xe_guc *guc = huc_to_guc(huc);
+ int ret;
+
+ if (!xe_uc_fw_is_loadable(&huc->fw))
+ return 0;
+
+ /* On newer platforms the HuC survives reset, so no need to re-auth */
+ if (xe_huc_is_authenticated(huc, type)) {
+ xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
+ return 0;
+ }
+
+ if (!xe_uc_fw_is_loaded(&huc->fw))
+ return -ENOEXEC;
+
+ switch (type) {
+ case XE_HUC_AUTH_VIA_GUC:
+ ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) +
+ xe_uc_fw_rsa_offset(&huc->fw));
+ break;
+ case XE_HUC_AUTH_VIA_GSC:
+ ret = huc_auth_via_gsccs(huc);
+ break;
+ default:
+ XE_WARN_ON(type);
+ return -EINVAL;
+ }
+ if (ret) {
+ drm_err(&xe->drm, "Failed to trigger HuC auth via %s: %d\n",
+ huc_auth_modes[type].name, ret);
+ goto fail;
+ }
+
+ ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
+ huc_auth_modes[type].val, 100000, NULL, false);
+ if (ret) {
+ drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
+ goto fail;
+ }
+
+ xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
+ drm_dbg(&xe->drm, "HuC authenticated via %s\n", huc_auth_modes[type].name);
+
+ return 0;
+
+fail:
+ drm_err(&xe->drm, "HuC: Auth via %s failed: %d\n",
+ huc_auth_modes[type].name, ret);
+ xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
+
+ return ret;
+}
+
+void xe_huc_sanitize(struct xe_huc *huc)
+{
+ if (!xe_uc_fw_is_loadable(&huc->fw))
+ return;
+ xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
+}
+
+void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
+{
+ struct xe_gt *gt = huc_to_gt(huc);
+ int err;
+
+ xe_uc_fw_print(&huc->fw, p);
+
+ if (!xe_uc_fw_is_enabled(&huc->fw))
+ return;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ return;
+
+ drm_printf(p, "\nHuC status: 0x%08x\n",
+ xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+}
diff --git a/drivers/gpu/drm/xe/xe_huc.h b/drivers/gpu/drm/xe/xe_huc.h
new file mode 100644
index 000000000..532017230
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_huc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_HUC_H_
+#define _XE_HUC_H_
+
+#include "xe_huc_types.h"
+
+struct drm_printer;
+
+enum xe_huc_auth_types {
+ XE_HUC_AUTH_VIA_GUC = 0,
+ XE_HUC_AUTH_VIA_GSC,
+ XE_HUC_AUTH_TYPES_COUNT
+};
+
+int xe_huc_init(struct xe_huc *huc);
+int xe_huc_upload(struct xe_huc *huc);
+int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type);
+bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type);
+void xe_huc_sanitize(struct xe_huc *huc);
+void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_huc_debugfs.c b/drivers/gpu/drm/xe/xe_huc_debugfs.c
new file mode 100644
index 000000000..18585a7ee
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_huc_debugfs.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_huc_debugfs.h"
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_huc.h"
+#include "xe_macros.h"
+
+static struct xe_gt *
+huc_to_gt(struct xe_huc *huc)
+{
+ return container_of(huc, struct xe_gt, uc.huc);
+}
+
+static struct xe_device *
+huc_to_xe(struct xe_huc *huc)
+{
+ return gt_to_xe(huc_to_gt(huc));
+}
+
+static struct xe_huc *node_to_huc(struct drm_info_node *node)
+{
+ return node->info_ent->data;
+}
+
+static int huc_info(struct seq_file *m, void *data)
+{
+ struct xe_huc *huc = node_to_huc(m->private);
+ struct xe_device *xe = huc_to_xe(huc);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_device_mem_access_get(xe);
+ xe_huc_print_info(huc, &p);
+ xe_device_mem_access_put(xe);
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"huc_info", huc_info, 0},
+};
+
+void xe_huc_debugfs_register(struct xe_huc *huc, struct dentry *parent)
+{
+ struct drm_minor *minor = huc_to_xe(huc)->drm.primary;
+ struct drm_info_list *local;
+ int i;
+
+#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
+ local = drmm_kmalloc(&huc_to_xe(huc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
+ if (!local)
+ return;
+
+ memcpy(local, debugfs_list, DEBUGFS_SIZE);
+#undef DEBUGFS_SIZE
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
+ local[i].data = huc;
+
+ drm_debugfs_create_files(local,
+ ARRAY_SIZE(debugfs_list),
+ parent, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_huc_debugfs.h b/drivers/gpu/drm/xe/xe_huc_debugfs.h
new file mode 100644
index 000000000..ec58f1818
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_huc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_HUC_DEBUGFS_H_
+#define _XE_HUC_DEBUGFS_H_
+
+struct dentry;
+struct xe_huc;
+
+void xe_huc_debugfs_register(struct xe_huc *huc, struct dentry *parent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_huc_types.h b/drivers/gpu/drm/xe/xe_huc_types.h
new file mode 100644
index 000000000..cfbaa5e0d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_huc_types.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_HUC_TYPES_H_
+#define _XE_HUC_TYPES_H_
+
+#include "xe_uc_fw_types.h"
+
+struct xe_bo;
+
+/**
+ * struct xe_huc - HuC
+ */
+struct xe_huc {
+ /** @fw: Generic uC firmware management */
+ struct xe_uc_fw fw;
+
+ /** @gsc_pkt: bo to store the packet for auth via GSC */
+ struct xe_bo *gsc_pkt;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
new file mode 100644
index 000000000..1fa5cf5ee
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_hw_engine.h"
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_execlist.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_gt_ccs_mode.h"
+#include "xe_gt_topology.h"
+#include "xe_hw_fence.h"
+#include "xe_irq.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_mmio.h"
+#include "xe_reg_sr.h"
+#include "xe_rtp.h"
+#include "xe_sched_job.h"
+#include "xe_tuning.h"
+#include "xe_uc_fw.h"
+#include "xe_wa.h"
+
+#define MAX_MMIO_BASES 3
+struct engine_info {
+ const char *name;
+ unsigned int class : 8;
+ unsigned int instance : 8;
+ enum xe_force_wake_domains domain;
+ u32 mmio_base;
+};
+
+static const struct engine_info engine_infos[] = {
+ [XE_HW_ENGINE_RCS0] = {
+ .name = "rcs0",
+ .class = XE_ENGINE_CLASS_RENDER,
+ .instance = 0,
+ .domain = XE_FW_RENDER,
+ .mmio_base = RENDER_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS0] = {
+ .name = "bcs0",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 0,
+ .domain = XE_FW_RENDER,
+ .mmio_base = BLT_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS1] = {
+ .name = "bcs1",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 1,
+ .domain = XE_FW_RENDER,
+ .mmio_base = XEHPC_BCS1_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS2] = {
+ .name = "bcs2",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 2,
+ .domain = XE_FW_RENDER,
+ .mmio_base = XEHPC_BCS2_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS3] = {
+ .name = "bcs3",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 3,
+ .domain = XE_FW_RENDER,
+ .mmio_base = XEHPC_BCS3_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS4] = {
+ .name = "bcs4",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 4,
+ .domain = XE_FW_RENDER,
+ .mmio_base = XEHPC_BCS4_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS5] = {
+ .name = "bcs5",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 5,
+ .domain = XE_FW_RENDER,
+ .mmio_base = XEHPC_BCS5_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS6] = {
+ .name = "bcs6",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 6,
+ .domain = XE_FW_RENDER,
+ .mmio_base = XEHPC_BCS6_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS7] = {
+ .name = "bcs7",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 7,
+ .domain = XE_FW_RENDER,
+ .mmio_base = XEHPC_BCS7_RING_BASE,
+ },
+ [XE_HW_ENGINE_BCS8] = {
+ .name = "bcs8",
+ .class = XE_ENGINE_CLASS_COPY,
+ .instance = 8,
+ .domain = XE_FW_RENDER,
+ .mmio_base = XEHPC_BCS8_RING_BASE,
+ },
+
+ [XE_HW_ENGINE_VCS0] = {
+ .name = "vcs0",
+ .class = XE_ENGINE_CLASS_VIDEO_DECODE,
+ .instance = 0,
+ .domain = XE_FW_MEDIA_VDBOX0,
+ .mmio_base = BSD_RING_BASE,
+ },
+ [XE_HW_ENGINE_VCS1] = {
+ .name = "vcs1",
+ .class = XE_ENGINE_CLASS_VIDEO_DECODE,
+ .instance = 1,
+ .domain = XE_FW_MEDIA_VDBOX1,
+ .mmio_base = BSD2_RING_BASE,
+ },
+ [XE_HW_ENGINE_VCS2] = {
+ .name = "vcs2",
+ .class = XE_ENGINE_CLASS_VIDEO_DECODE,
+ .instance = 2,
+ .domain = XE_FW_MEDIA_VDBOX2,
+ .mmio_base = BSD3_RING_BASE,
+ },
+ [XE_HW_ENGINE_VCS3] = {
+ .name = "vcs3",
+ .class = XE_ENGINE_CLASS_VIDEO_DECODE,
+ .instance = 3,
+ .domain = XE_FW_MEDIA_VDBOX3,
+ .mmio_base = BSD4_RING_BASE,
+ },
+ [XE_HW_ENGINE_VCS4] = {
+ .name = "vcs4",
+ .class = XE_ENGINE_CLASS_VIDEO_DECODE,
+ .instance = 4,
+ .domain = XE_FW_MEDIA_VDBOX4,
+ .mmio_base = XEHP_BSD5_RING_BASE,
+ },
+ [XE_HW_ENGINE_VCS5] = {
+ .name = "vcs5",
+ .class = XE_ENGINE_CLASS_VIDEO_DECODE,
+ .instance = 5,
+ .domain = XE_FW_MEDIA_VDBOX5,
+ .mmio_base = XEHP_BSD6_RING_BASE,
+ },
+ [XE_HW_ENGINE_VCS6] = {
+ .name = "vcs6",
+ .class = XE_ENGINE_CLASS_VIDEO_DECODE,
+ .instance = 6,
+ .domain = XE_FW_MEDIA_VDBOX6,
+ .mmio_base = XEHP_BSD7_RING_BASE,
+ },
+ [XE_HW_ENGINE_VCS7] = {
+ .name = "vcs7",
+ .class = XE_ENGINE_CLASS_VIDEO_DECODE,
+ .instance = 7,
+ .domain = XE_FW_MEDIA_VDBOX7,
+ .mmio_base = XEHP_BSD8_RING_BASE,
+ },
+ [XE_HW_ENGINE_VECS0] = {
+ .name = "vecs0",
+ .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ .instance = 0,
+ .domain = XE_FW_MEDIA_VEBOX0,
+ .mmio_base = VEBOX_RING_BASE,
+ },
+ [XE_HW_ENGINE_VECS1] = {
+ .name = "vecs1",
+ .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ .instance = 1,
+ .domain = XE_FW_MEDIA_VEBOX1,
+ .mmio_base = VEBOX2_RING_BASE,
+ },
+ [XE_HW_ENGINE_VECS2] = {
+ .name = "vecs2",
+ .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ .instance = 2,
+ .domain = XE_FW_MEDIA_VEBOX2,
+ .mmio_base = XEHP_VEBOX3_RING_BASE,
+ },
+ [XE_HW_ENGINE_VECS3] = {
+ .name = "vecs3",
+ .class = XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ .instance = 3,
+ .domain = XE_FW_MEDIA_VEBOX3,
+ .mmio_base = XEHP_VEBOX4_RING_BASE,
+ },
+ [XE_HW_ENGINE_CCS0] = {
+ .name = "ccs0",
+ .class = XE_ENGINE_CLASS_COMPUTE,
+ .instance = 0,
+ .domain = XE_FW_RENDER,
+ .mmio_base = COMPUTE0_RING_BASE,
+ },
+ [XE_HW_ENGINE_CCS1] = {
+ .name = "ccs1",
+ .class = XE_ENGINE_CLASS_COMPUTE,
+ .instance = 1,
+ .domain = XE_FW_RENDER,
+ .mmio_base = COMPUTE1_RING_BASE,
+ },
+ [XE_HW_ENGINE_CCS2] = {
+ .name = "ccs2",
+ .class = XE_ENGINE_CLASS_COMPUTE,
+ .instance = 2,
+ .domain = XE_FW_RENDER,
+ .mmio_base = COMPUTE2_RING_BASE,
+ },
+ [XE_HW_ENGINE_CCS3] = {
+ .name = "ccs3",
+ .class = XE_ENGINE_CLASS_COMPUTE,
+ .instance = 3,
+ .domain = XE_FW_RENDER,
+ .mmio_base = COMPUTE3_RING_BASE,
+ },
+ [XE_HW_ENGINE_GSCCS0] = {
+ .name = "gsccs0",
+ .class = XE_ENGINE_CLASS_OTHER,
+ .instance = OTHER_GSC_INSTANCE,
+ .domain = XE_FW_GSC,
+ .mmio_base = GSCCS_RING_BASE,
+ },
+};
+
+static void hw_engine_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_hw_engine *hwe = arg;
+
+ if (hwe->exl_port)
+ xe_execlist_port_destroy(hwe->exl_port);
+ xe_lrc_finish(&hwe->kernel_lrc);
+
+ hwe->gt = NULL;
+}
+
+static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
+ u32 val)
+{
+ xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
+ xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
+
+ reg.addr += hwe->mmio_base;
+
+ xe_mmio_write32(hwe->gt, reg, val);
+}
+
+static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
+{
+ xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
+ xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
+
+ reg.addr += hwe->mmio_base;
+
+ return xe_mmio_read32(hwe->gt, reg);
+}
+
+void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
+{
+ u32 ccs_mask =
+ xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
+
+ if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
+ xe_mmio_write32(hwe->gt, RCU_MODE,
+ _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
+
+ hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
+ hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
+ xe_bo_ggtt_addr(hwe->hwsp));
+ hw_engine_mmio_write32(hwe, RING_MODE(0),
+ _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+ hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
+ _MASKED_BIT_DISABLE(STOP_RING));
+ hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
+}
+
+static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return xe_gt_ccs_mode_enabled(gt) &&
+ xe_rtp_match_first_render_or_compute(gt, hwe);
+}
+
+void
+xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ const u8 mocs_write_idx = gt->mocs.uc_index;
+ const u8 mocs_read_idx = gt->mocs.uc_index;
+ u32 blit_cctl_val = REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, mocs_write_idx) |
+ REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, mocs_read_idx);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+ const struct xe_rtp_entry_sr lrc_was[] = {
+ /*
+ * Some blitter commands do not have a field for MOCS, those
+ * commands will use MOCS index pointed by BLIT_CCTL.
+ * BLIT_CCTL registers are needed to be programmed to un-cached.
+ */
+ { XE_RTP_NAME("BLIT_CCTL_default_MOCS"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED),
+ ENGINE_CLASS(COPY)),
+ XE_RTP_ACTIONS(FIELD_SET(BLIT_CCTL(0),
+ BLIT_CCTL_DST_MOCS_MASK |
+ BLIT_CCTL_SRC_MOCS_MASK,
+ blit_cctl_val,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+ /* Use Fixed slice CCS mode */
+ { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
+ XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
+ XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
+ RCU_MODE_FIXED_SLICE_CCS_MODE))
+ },
+ {}
+ };
+
+ xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
+}
+
+static void
+hw_engine_setup_default_state(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ /*
+ * RING_CMD_CCTL specifies the default MOCS entry that will be
+ * used by the command streamer when executing commands that
+ * don't have a way to explicitly specify a MOCS setting.
+ * The default should usually reference whichever MOCS entry
+ * corresponds to uncached behavior, although use of a WB cached
+ * entry is recommended by the spec in certain circumstances on
+ * specific platforms.
+ * Bspec: 72161
+ */
+ const u8 mocs_write_idx = gt->mocs.uc_index;
+ const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
+ gt->mocs.wb_index : gt->mocs.uc_index;
+ u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
+ REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, mocs_read_idx);
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+ const struct xe_rtp_entry_sr engine_entries[] = {
+ { XE_RTP_NAME("RING_CMD_CCTL_default_MOCS"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_ACTIONS(FIELD_SET(RING_CMD_CCTL(0),
+ CMD_CCTL_WRITE_OVERRIDE_MASK |
+ CMD_CCTL_READ_OVERRIDE_MASK,
+ ring_cmd_cctl_val,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+ /*
+ * To allow the GSC engine to go idle on MTL we need to enable
+ * idle messaging and set the hysteresis value (we use 0xA=5us
+ * as recommended in spec). On platforms after MTL this is
+ * enabled by default.
+ */
+ { XE_RTP_NAME("MTL GSCCS IDLE MSG enable"),
+ XE_RTP_RULES(MEDIA_VERSION(1300), ENGINE_CLASS(OTHER)),
+ XE_RTP_ACTIONS(CLR(RING_PSMI_CTL(0),
+ IDLE_MSG_DISABLE,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)),
+ FIELD_SET(RING_PWRCTX_MAXCNT(0),
+ IDLE_WAIT_TIME,
+ 0xA,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+ {}
+ };
+
+ xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
+}
+
+static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
+ enum xe_hw_engine_id id)
+{
+ const struct engine_info *info;
+
+ if (WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name))
+ return;
+
+ if (!(gt->info.engine_mask & BIT(id)))
+ return;
+
+ info = &engine_infos[id];
+
+ xe_gt_assert(gt, !hwe->gt);
+
+ hwe->gt = gt;
+ hwe->class = info->class;
+ hwe->instance = info->instance;
+ hwe->mmio_base = info->mmio_base;
+ hwe->domain = info->domain;
+ hwe->name = info->name;
+ hwe->fence_irq = &gt->fence_irq[info->class];
+ hwe->engine_id = id;
+
+ hwe->eclass = &gt->eclass[hwe->class];
+ if (!hwe->eclass->sched_props.job_timeout_ms) {
+ hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
+ hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
+ hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
+ hwe->eclass->sched_props.timeslice_us = 1 * 1000;
+ hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
+ hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
+ hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
+ hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
+ hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
+ /* Record default props */
+ hwe->eclass->defaults = hwe->eclass->sched_props;
+ }
+
+ xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
+ xe_tuning_process_engine(hwe);
+ xe_wa_process_engine(hwe);
+ hw_engine_setup_default_state(hwe);
+
+ xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
+ xe_reg_whitelist_process_engine(hwe);
+}
+
+static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
+ enum xe_hw_engine_id id)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ int err;
+
+ xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
+ xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
+
+ xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
+ xe_reg_sr_apply_whitelist(hwe);
+
+ hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(hwe->hwsp)) {
+ err = PTR_ERR(hwe->hwsp);
+ goto err_name;
+ }
+
+ err = xe_lrc_init(&hwe->kernel_lrc, hwe, NULL, NULL, SZ_16K);
+ if (err)
+ goto err_hwsp;
+
+ if (!xe_device_uc_enabled(xe)) {
+ hwe->exl_port = xe_execlist_port_create(xe, hwe);
+ if (IS_ERR(hwe->exl_port)) {
+ err = PTR_ERR(hwe->exl_port);
+ goto err_kernel_lrc;
+ }
+ }
+
+ if (xe_device_uc_enabled(xe))
+ xe_hw_engine_enable_ring(hwe);
+
+ /* We reserve the highest BCS instance for USM */
+ if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
+ gt->usm.reserved_bcs_instance = hwe->instance;
+
+ err = drmm_add_action_or_reset(&xe->drm, hw_engine_fini, hwe);
+ if (err)
+ return err;
+
+ return 0;
+
+err_kernel_lrc:
+ xe_lrc_finish(&hwe->kernel_lrc);
+err_hwsp:
+ xe_bo_unpin_map_no_vm(hwe->hwsp);
+err_name:
+ hwe->name = NULL;
+
+ return err;
+}
+
+static void hw_engine_setup_logical_mapping(struct xe_gt *gt)
+{
+ int class;
+
+ /* FIXME: Doing a simple logical mapping that works for most hardware */
+ for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ int logical_instance = 0;
+
+ for_each_hw_engine(hwe, gt, id)
+ if (hwe->class == class)
+ hwe->logical_instance = logical_instance++;
+ }
+}
+
+static void read_media_fuses(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 media_fuse;
+ u16 vdbox_mask;
+ u16 vebox_mask;
+ int i, j;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE);
+
+ /*
+ * Pre-Xe_HP platforms had register bits representing absent engines,
+ * whereas Xe_HP and beyond have bits representing present engines.
+ * Invert the polarity on old platforms so that we can use common
+ * handling below.
+ */
+ if (GRAPHICS_VERx100(xe) < 1250)
+ media_fuse = ~media_fuse;
+
+ vdbox_mask = REG_FIELD_GET(GT_VDBOX_DISABLE_MASK, media_fuse);
+ vebox_mask = REG_FIELD_GET(GT_VEBOX_DISABLE_MASK, media_fuse);
+
+ for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
+ if (!(gt->info.engine_mask & BIT(i)))
+ continue;
+
+ if (!(BIT(j) & vdbox_mask)) {
+ gt->info.engine_mask &= ~BIT(i);
+ drm_info(&xe->drm, "vcs%u fused off\n", j);
+ }
+ }
+
+ for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
+ if (!(gt->info.engine_mask & BIT(i)))
+ continue;
+
+ if (!(BIT(j) & vebox_mask)) {
+ gt->info.engine_mask &= ~BIT(i);
+ drm_info(&xe->drm, "vecs%u fused off\n", j);
+ }
+ }
+}
+
+static void read_copy_fuses(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 bcs_mask;
+
+ if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
+ return;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3);
+ bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
+
+ /* BCS0 is always present; only BCS1-BCS8 may be fused off */
+ for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
+ if (!(gt->info.engine_mask & BIT(i)))
+ continue;
+
+ if (!(BIT(j / 2) & bcs_mask)) {
+ gt->info.engine_mask &= ~BIT(i);
+ drm_info(&xe->drm, "bcs%u fused off\n", j);
+ }
+ }
+}
+
+static void read_compute_fuses_from_dss(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ /*
+ * CCS fusing based on DSS masks only applies to platforms that can
+ * have more than one CCS.
+ */
+ if (hweight64(gt->info.engine_mask &
+ GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)) <= 1)
+ return;
+
+ /*
+ * CCS availability on Xe_HP is inferred from the presence of DSS in
+ * each quadrant.
+ */
+ for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
+ if (!(gt->info.engine_mask & BIT(i)))
+ continue;
+
+ if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
+ gt->info.engine_mask &= ~BIT(i);
+ drm_info(&xe->drm, "ccs%u fused off\n", j);
+ }
+ }
+}
+
+static void read_compute_fuses_from_reg(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 ccs_mask;
+
+ ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4);
+ ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
+
+ for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
+ if (!(gt->info.engine_mask & BIT(i)))
+ continue;
+
+ if ((ccs_mask & BIT(j)) == 0) {
+ gt->info.engine_mask &= ~BIT(i);
+ drm_info(&xe->drm, "ccs%u fused off\n", j);
+ }
+ }
+}
+
+static void read_compute_fuses(struct xe_gt *gt)
+{
+ if (GRAPHICS_VER(gt_to_xe(gt)) >= 20)
+ read_compute_fuses_from_reg(gt);
+ else
+ read_compute_fuses_from_dss(gt);
+}
+
+static void check_gsc_availability(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
+ return;
+
+ /*
+ * The GSCCS is only used to communicate with the GSC FW, so if we don't
+ * have the FW there is nothing we need the engine for and can therefore
+ * skip its initialization.
+ */
+ if (!xe_uc_fw_is_available(&gt->uc.gsc.fw)) {
+ gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
+ drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
+ }
+}
+
+int xe_hw_engines_init_early(struct xe_gt *gt)
+{
+ int i;
+
+ read_media_fuses(gt);
+ read_copy_fuses(gt);
+ read_compute_fuses(gt);
+ check_gsc_availability(gt);
+
+ BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
+ BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
+
+ for (i = 0; i < ARRAY_SIZE(gt->hw_engines); i++)
+ hw_engine_init_early(gt, &gt->hw_engines[i], i);
+
+ return 0;
+}
+
+int xe_hw_engines_init(struct xe_gt *gt)
+{
+ int err;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ err = hw_engine_init(gt, hwe, id);
+ if (err)
+ return err;
+ }
+
+ hw_engine_setup_logical_mapping(gt);
+
+ return 0;
+}
+
+void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
+{
+ wake_up_all(&gt_to_xe(hwe->gt)->ufence_wq);
+
+ if (hwe->irq_handler)
+ hwe->irq_handler(hwe, intr_vec);
+
+ if (intr_vec & GT_RENDER_USER_INTERRUPT)
+ xe_hw_fence_irq_run(hwe->fence_irq);
+}
+
+/**
+ * xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
+ * @hwe: Xe HW Engine.
+ *
+ * This can be printed out in a later stage like during dev_coredump
+ * analysis.
+ *
+ * Returns: a Xe HW Engine snapshot object that must be freed by the
+ * caller, using `xe_hw_engine_snapshot_free`.
+ */
+struct xe_hw_engine_snapshot *
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
+{
+ struct xe_hw_engine_snapshot *snapshot;
+ int len;
+
+ if (!xe_hw_engine_is_valid(hwe))
+ return NULL;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+
+ if (!snapshot)
+ return NULL;
+
+ len = strlen(hwe->name) + 1;
+ snapshot->name = kzalloc(len, GFP_ATOMIC);
+ if (snapshot->name)
+ strscpy(snapshot->name, hwe->name, len);
+
+ snapshot->class = hwe->class;
+ snapshot->logical_instance = hwe->logical_instance;
+ snapshot->forcewake.domain = hwe->domain;
+ snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
+ hwe->domain);
+ snapshot->mmio_base = hwe->mmio_base;
+
+ snapshot->reg.ring_hwstam = hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
+ snapshot->reg.ring_hws_pga = hw_engine_mmio_read32(hwe,
+ RING_HWS_PGA(0));
+ snapshot->reg.ring_execlist_status_lo =
+ hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
+ snapshot->reg.ring_execlist_status_hi =
+ hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
+ snapshot->reg.ring_execlist_sq_contents_lo =
+ hw_engine_mmio_read32(hwe,
+ RING_EXECLIST_SQ_CONTENTS_LO(0));
+ snapshot->reg.ring_execlist_sq_contents_hi =
+ hw_engine_mmio_read32(hwe,
+ RING_EXECLIST_SQ_CONTENTS_HI(0));
+ snapshot->reg.ring_start = hw_engine_mmio_read32(hwe, RING_START(0));
+ snapshot->reg.ring_head =
+ hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
+ snapshot->reg.ring_tail =
+ hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
+ snapshot->reg.ring_ctl = hw_engine_mmio_read32(hwe, RING_CTL(0));
+ snapshot->reg.ring_mi_mode =
+ hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
+ snapshot->reg.ring_mode = hw_engine_mmio_read32(hwe, RING_MODE(0));
+ snapshot->reg.ring_imr = hw_engine_mmio_read32(hwe, RING_IMR(0));
+ snapshot->reg.ring_esr = hw_engine_mmio_read32(hwe, RING_ESR(0));
+ snapshot->reg.ring_emr = hw_engine_mmio_read32(hwe, RING_EMR(0));
+ snapshot->reg.ring_eir = hw_engine_mmio_read32(hwe, RING_EIR(0));
+ snapshot->reg.ring_acthd_udw =
+ hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
+ snapshot->reg.ring_acthd = hw_engine_mmio_read32(hwe, RING_ACTHD(0));
+ snapshot->reg.ring_bbaddr_udw =
+ hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
+ snapshot->reg.ring_bbaddr = hw_engine_mmio_read32(hwe, RING_BBADDR(0));
+ snapshot->reg.ring_dma_fadd_udw =
+ hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
+ snapshot->reg.ring_dma_fadd =
+ hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
+ snapshot->reg.ipehr = hw_engine_mmio_read32(hwe, RING_IPEHR(0));
+
+ if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
+ snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE);
+
+ return snapshot;
+}
+
+/**
+ * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
+ * @snapshot: Xe HW Engine snapshot object.
+ * @p: drm_printer where it will be printed out.
+ *
+ * This function prints out a given Xe HW Engine snapshot object.
+ */
+void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
+
+ drm_printf(p, "%s (physical), logical instance=%d\n",
+ snapshot->name ? snapshot->name : "",
+ snapshot->logical_instance);
+ drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
+ snapshot->forcewake.domain, snapshot->forcewake.ref);
+ drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam);
+ drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga);
+ drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n",
+ snapshot->reg.ring_execlist_status_lo);
+ drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n",
+ snapshot->reg.ring_execlist_status_hi);
+ drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n",
+ snapshot->reg.ring_execlist_sq_contents_lo);
+ drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n",
+ snapshot->reg.ring_execlist_sq_contents_hi);
+ drm_printf(p, "\tRING_START: 0x%08x\n", snapshot->reg.ring_start);
+ drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
+ drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
+ drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl);
+ drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode);
+ drm_printf(p, "\tRING_MODE: 0x%08x\n",
+ snapshot->reg.ring_mode);
+ drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
+ drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
+ drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
+ drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
+ drm_printf(p, "\tACTHD: 0x%08x_%08x\n", snapshot->reg.ring_acthd_udw,
+ snapshot->reg.ring_acthd);
+ drm_printf(p, "\tBBADDR: 0x%08x_%08x\n", snapshot->reg.ring_bbaddr_udw,
+ snapshot->reg.ring_bbaddr);
+ drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n",
+ snapshot->reg.ring_dma_fadd_udw,
+ snapshot->reg.ring_dma_fadd);
+ drm_printf(p, "\tIPEHR: 0x%08x\n\n", snapshot->reg.ipehr);
+ if (snapshot->class == XE_ENGINE_CLASS_COMPUTE)
+ drm_printf(p, "\tRCU_MODE: 0x%08x\n",
+ snapshot->reg.rcu_mode);
+}
+
+/**
+ * xe_hw_engine_snapshot_free - Free all allocated objects for a given snapshot.
+ * @snapshot: Xe HW Engine snapshot object.
+ *
+ * This function free all the memory that needed to be allocated at capture
+ * time.
+ */
+void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
+{
+ if (!snapshot)
+ return;
+
+ kfree(snapshot->name);
+ kfree(snapshot);
+}
+
+/**
+ * xe_hw_engine_print - Xe HW Engine Print.
+ * @hwe: Hardware Engine.
+ * @p: drm_printer.
+ *
+ * This function quickly capture a snapshot and immediately print it out.
+ */
+void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
+{
+ struct xe_hw_engine_snapshot *snapshot;
+
+ snapshot = xe_hw_engine_snapshot_capture(hwe);
+ xe_hw_engine_snapshot_print(snapshot, p);
+ xe_hw_engine_snapshot_free(snapshot);
+}
+
+u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
+ enum xe_engine_class engine_class)
+{
+ u32 mask = 0;
+ enum xe_hw_engine_id id;
+
+ for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
+ if (engine_infos[id].class == engine_class &&
+ gt->info.engine_mask & BIT(id))
+ mask |= BIT(engine_infos[id].instance);
+ }
+ return mask;
+}
+
+bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (hwe->class == XE_ENGINE_CLASS_OTHER)
+ return true;
+
+ /* Check for engines disabled by ccs_mode setting */
+ if (xe_gt_ccs_mode_enabled(gt) &&
+ hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ hwe->logical_instance >= gt->ccs_mode)
+ return true;
+
+ return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
+ hwe->instance == gt->usm.reserved_bcs_instance;
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h
new file mode 100644
index 000000000..71968ee2f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_HW_ENGINE_H_
+#define _XE_HW_ENGINE_H_
+
+#include "xe_hw_engine_types.h"
+
+struct drm_printer;
+
+#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN
+#define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN
+#else
+#define XE_HW_ENGINE_JOB_TIMEOUT_MIN 1
+#endif
+#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MAX
+#define XE_HW_ENGINE_JOB_TIMEOUT_MAX CONFIG_DRM_XE_JOB_TIMEOUT_MAX
+#else
+#define XE_HW_ENGINE_JOB_TIMEOUT_MAX (10 * 1000)
+#endif
+#ifdef CONFIG_DRM_XE_TIMESLICE_MIN
+#define XE_HW_ENGINE_TIMESLICE_MIN CONFIG_DRM_XE_TIMESLICE_MIN
+#else
+#define XE_HW_ENGINE_TIMESLICE_MIN 1
+#endif
+#ifdef CONFIG_DRM_XE_TIMESLICE_MAX
+#define XE_HW_ENGINE_TIMESLICE_MAX CONFIG_DRM_XE_TIMESLICE_MAX
+#else
+#define XE_HW_ENGINE_TIMESLICE_MAX (10 * 1000 * 1000)
+#endif
+#ifdef CONFIG_DRM_XE_PREEMPT_TIMEOUT
+#define XE_HW_ENGINE_PREEMPT_TIMEOUT CONFIG_DRM_XE_PREEMPT_TIMEOUT
+#else
+#define XE_HW_ENGINE_PREEMPT_TIMEOUT (640 * 1000)
+#endif
+#ifdef CONFIG_DRM_XE_PREEMPT_TIMEOUT_MIN
+#define XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN CONFIG_DRM_XE_PREEMPT_TIMEOUT_MIN
+#else
+#define XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN 1
+#endif
+#ifdef CONFIG_DRM_XE_PREEMPT_TIMEOUT_MAX
+#define XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX CONFIG_DRM_XE_PREEMPT_TIMEOUT_MAX
+#else
+#define XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX (10 * 1000 * 1000)
+#endif
+
+int xe_hw_engines_init_early(struct xe_gt *gt);
+int xe_hw_engines_init(struct xe_gt *gt);
+void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec);
+void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe);
+u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
+ enum xe_engine_class engine_class);
+
+struct xe_hw_engine_snapshot *
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe);
+void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot);
+void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
+ struct drm_printer *p);
+void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p);
+void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe);
+
+bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe);
+static inline bool xe_hw_engine_is_valid(struct xe_hw_engine *hwe)
+{
+ return hwe->name;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
new file mode 100644
index 000000000..e49bc14f0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "xe_gt.h"
+#include "xe_hw_engine_class_sysfs.h"
+
+#define MAX_ENGINE_CLASS_NAME_LEN 16
+static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
+ struct kobject *parent);
+
+/**
+ * xe_hw_engine_timeout_in_range - Helper to check if timeout is in range
+ * @timeout: timeout to validate
+ * @min: min value of valid range
+ * @max: max value of valid range
+ *
+ * This helper helps to validate if timeout is in min-max range of HW engine
+ * scheduler.
+ *
+ * Returns: Returns false value for failure and true for success.
+ */
+bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max)
+{
+ return timeout >= min && timeout <= max;
+}
+
+static void kobj_xe_hw_engine_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static const struct kobj_type kobj_xe_hw_engine_type = {
+ .release = kobj_xe_hw_engine_release,
+ .sysfs_ops = &kobj_sysfs_ops
+};
+
+static ssize_t job_timeout_max_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 timeout;
+ int err;
+
+ err = kstrtou32(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout < eclass->sched_props.job_timeout_min)
+ return -EINVAL;
+
+ if (!xe_hw_engine_timeout_in_range(timeout,
+ XE_HW_ENGINE_JOB_TIMEOUT_MIN,
+ XE_HW_ENGINE_JOB_TIMEOUT_MAX))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.job_timeout_max, timeout);
+
+ return count;
+}
+
+static ssize_t job_timeout_max_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_max);
+}
+
+static struct kobj_attribute job_timeout_max_attr =
+__ATTR(job_timeout_max, 0644, job_timeout_max_show, job_timeout_max_store);
+
+static ssize_t job_timeout_min_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 timeout;
+ int err;
+
+ err = kstrtou32(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout > eclass->sched_props.job_timeout_max)
+ return -EINVAL;
+
+ if (!xe_hw_engine_timeout_in_range(timeout,
+ XE_HW_ENGINE_JOB_TIMEOUT_MIN,
+ XE_HW_ENGINE_JOB_TIMEOUT_MAX))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.job_timeout_min, timeout);
+
+ return count;
+}
+
+static ssize_t job_timeout_min_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_min);
+}
+
+static struct kobj_attribute job_timeout_min_attr =
+__ATTR(job_timeout_min, 0644, job_timeout_min_show, job_timeout_min_store);
+
+static ssize_t job_timeout_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 min = eclass->sched_props.job_timeout_min;
+ u32 max = eclass->sched_props.job_timeout_max;
+ u32 timeout;
+ int err;
+
+ err = kstrtou32(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (!xe_hw_engine_timeout_in_range(timeout, min, max))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.job_timeout_ms, timeout);
+
+ return count;
+}
+
+static ssize_t job_timeout_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.job_timeout_ms);
+}
+
+static struct kobj_attribute job_timeout_attr =
+__ATTR(job_timeout_ms, 0644, job_timeout_show, job_timeout_store);
+
+static ssize_t job_timeout_default(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.job_timeout_ms);
+}
+
+static struct kobj_attribute job_timeout_def =
+__ATTR(job_timeout_ms, 0444, job_timeout_default, NULL);
+
+static ssize_t job_timeout_min_default(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.job_timeout_min);
+}
+
+static struct kobj_attribute job_timeout_min_def =
+__ATTR(job_timeout_min, 0444, job_timeout_min_default, NULL);
+
+static ssize_t job_timeout_max_default(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.job_timeout_max);
+}
+
+static struct kobj_attribute job_timeout_max_def =
+__ATTR(job_timeout_max, 0444, job_timeout_max_default, NULL);
+
+static ssize_t timeslice_duration_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 min = eclass->sched_props.timeslice_min;
+ u32 max = eclass->sched_props.timeslice_max;
+ u32 duration;
+ int err;
+
+ err = kstrtou32(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (!xe_hw_engine_timeout_in_range(duration, min, max))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.timeslice_us, duration);
+
+ return count;
+}
+
+static ssize_t timeslice_duration_max_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 duration;
+ int err;
+
+ err = kstrtou32(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration < eclass->sched_props.timeslice_min)
+ return -EINVAL;
+
+ if (!xe_hw_engine_timeout_in_range(duration,
+ XE_HW_ENGINE_TIMESLICE_MIN,
+ XE_HW_ENGINE_TIMESLICE_MAX))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.timeslice_max, duration);
+
+ return count;
+}
+
+static ssize_t timeslice_duration_max_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.timeslice_max);
+}
+
+static struct kobj_attribute timeslice_duration_max_attr =
+ __ATTR(timeslice_duration_max, 0644, timeslice_duration_max_show,
+ timeslice_duration_max_store);
+
+static ssize_t timeslice_duration_min_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 duration;
+ int err;
+
+ err = kstrtou32(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > eclass->sched_props.timeslice_max)
+ return -EINVAL;
+
+ if (!xe_hw_engine_timeout_in_range(duration,
+ XE_HW_ENGINE_TIMESLICE_MIN,
+ XE_HW_ENGINE_TIMESLICE_MAX))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.timeslice_min, duration);
+
+ return count;
+}
+
+static ssize_t timeslice_duration_min_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.timeslice_min);
+}
+
+static struct kobj_attribute timeslice_duration_min_attr =
+ __ATTR(timeslice_duration_min, 0644, timeslice_duration_min_show,
+ timeslice_duration_min_store);
+
+static ssize_t timeslice_duration_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.timeslice_us);
+}
+
+static struct kobj_attribute timeslice_duration_attr =
+ __ATTR(timeslice_duration_us, 0644, timeslice_duration_show,
+ timeslice_duration_store);
+
+static ssize_t timeslice_default(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.timeslice_us);
+}
+
+static struct kobj_attribute timeslice_duration_def =
+__ATTR(timeslice_duration_us, 0444, timeslice_default, NULL);
+
+static ssize_t timeslice_min_default(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.timeslice_min);
+}
+
+static struct kobj_attribute timeslice_duration_min_def =
+__ATTR(timeslice_duration_min, 0444, timeslice_min_default, NULL);
+
+static ssize_t timeslice_max_default(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.timeslice_max);
+}
+
+static struct kobj_attribute timeslice_duration_max_def =
+__ATTR(timeslice_duration_max, 0444, timeslice_max_default, NULL);
+
+static ssize_t preempt_timeout_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 min = eclass->sched_props.preempt_timeout_min;
+ u32 max = eclass->sched_props.preempt_timeout_max;
+ u32 timeout;
+ int err;
+
+ err = kstrtou32(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (!xe_hw_engine_timeout_in_range(timeout, min, max))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.preempt_timeout_us, timeout);
+
+ return count;
+}
+
+static ssize_t preempt_timeout_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_us);
+}
+
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_us, 0644, preempt_timeout_show, preempt_timeout_store);
+
+static ssize_t preempt_timeout_default(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_us);
+}
+
+static struct kobj_attribute preempt_timeout_def =
+__ATTR(preempt_timeout_us, 0444, preempt_timeout_default, NULL);
+
+static ssize_t preempt_timeout_min_default(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_min);
+}
+
+static struct kobj_attribute preempt_timeout_min_def =
+__ATTR(preempt_timeout_min, 0444, preempt_timeout_min_default, NULL);
+
+static ssize_t preempt_timeout_max_default(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj->parent);
+
+ return sprintf(buf, "%u\n", eclass->defaults.preempt_timeout_max);
+}
+
+static struct kobj_attribute preempt_timeout_max_def =
+__ATTR(preempt_timeout_max, 0444, preempt_timeout_max_default, NULL);
+
+static ssize_t preempt_timeout_max_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 timeout;
+ int err;
+
+ err = kstrtou32(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout < eclass->sched_props.preempt_timeout_min)
+ return -EINVAL;
+
+ if (!xe_hw_engine_timeout_in_range(timeout,
+ XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN,
+ XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.preempt_timeout_max, timeout);
+
+ return count;
+}
+
+static ssize_t preempt_timeout_max_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_max);
+}
+
+static struct kobj_attribute preempt_timeout_max_attr =
+ __ATTR(preempt_timeout_max, 0644, preempt_timeout_max_show,
+ preempt_timeout_max_store);
+
+static ssize_t preempt_timeout_min_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+ u32 timeout;
+ int err;
+
+ err = kstrtou32(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout > eclass->sched_props.preempt_timeout_max)
+ return -EINVAL;
+
+ if (!xe_hw_engine_timeout_in_range(timeout,
+ XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN,
+ XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX))
+ return -EINVAL;
+
+ WRITE_ONCE(eclass->sched_props.preempt_timeout_min, timeout);
+
+ return count;
+}
+
+static ssize_t preempt_timeout_min_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct xe_hw_engine_class_intf *eclass = kobj_to_eclass(kobj);
+
+ return sprintf(buf, "%u\n", eclass->sched_props.preempt_timeout_min);
+}
+
+static struct kobj_attribute preempt_timeout_min_attr =
+ __ATTR(preempt_timeout_min, 0644, preempt_timeout_min_show,
+ preempt_timeout_min_store);
+
+static const struct attribute *defaults[] = {
+ &job_timeout_def.attr,
+ &job_timeout_min_def.attr,
+ &job_timeout_max_def.attr,
+ &timeslice_duration_def.attr,
+ &timeslice_duration_min_def.attr,
+ &timeslice_duration_max_def.attr,
+ &preempt_timeout_def.attr,
+ &preempt_timeout_min_def.attr,
+ &preempt_timeout_max_def.attr,
+ NULL
+};
+
+static const struct attribute *files[] = {
+ &job_timeout_attr.attr,
+ &job_timeout_min_attr.attr,
+ &job_timeout_max_attr.attr,
+ &timeslice_duration_attr.attr,
+ &timeslice_duration_min_attr.attr,
+ &timeslice_duration_max_attr.attr,
+ &preempt_timeout_attr.attr,
+ &preempt_timeout_min_attr.attr,
+ &preempt_timeout_max_attr.attr,
+ NULL
+};
+
+static void kobj_xe_hw_engine_class_fini(struct drm_device *drm, void *arg)
+{
+ struct kobject *kobj = arg;
+
+ sysfs_remove_files(kobj, files);
+ kobject_put(kobj);
+}
+
+ static struct kobj_eclass *
+kobj_xe_hw_engine_class(struct xe_device *xe, struct kobject *parent, char *name)
+{
+ struct kobj_eclass *keclass;
+ int err = 0;
+
+ keclass = kzalloc(sizeof(*keclass), GFP_KERNEL);
+ if (!keclass)
+ return NULL;
+
+ kobject_init(&keclass->base, &kobj_xe_hw_engine_type);
+ if (kobject_add(&keclass->base, parent, "%s", name)) {
+ kobject_put(&keclass->base);
+ return NULL;
+ }
+
+ err = drmm_add_action_or_reset(&xe->drm, kobj_xe_hw_engine_class_fini,
+ &keclass->base);
+ if (err)
+ drm_warn(&xe->drm,
+ "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+ return keclass;
+}
+
+static void hw_engine_class_defaults_fini(struct drm_device *drm, void *arg)
+{
+ struct kobject *kobj = arg;
+
+ sysfs_remove_files(kobj, defaults);
+ kobject_put(kobj);
+}
+
+static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
+ struct kobject *parent)
+{
+ struct kobject *kobj;
+ int err = 0;
+
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ if (!kobj)
+ return -ENOMEM;
+
+ kobject_init(kobj, &kobj_xe_hw_engine_type);
+ err = kobject_add(kobj, parent, "%s", ".defaults");
+ if (err)
+ goto err_object;
+
+ err = sysfs_create_files(kobj, defaults);
+ if (err)
+ goto err_object;
+
+ err = drmm_add_action_or_reset(&xe->drm, hw_engine_class_defaults_fini,
+ kobj);
+ if (err)
+ drm_warn(&xe->drm,
+ "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+ return err;
+err_object:
+ kobject_put(kobj);
+ return err;
+}
+
+static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void hw_engine_class_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct kobject *kobj = arg;
+
+ kobject_put(kobj);
+}
+
+/**
+ * xe_hw_engine_class_sysfs_init - Init HW engine classes on GT.
+ * @gt: Xe GT.
+ *
+ * This routine creates sysfs for HW engine classes and adds methods
+ * to get/set different scheduling properties for HW engines class.
+ *
+ * Returns: Returns error value for failure and 0 for success.
+ */
+int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct kobject *kobj;
+ u16 class_mask = 0;
+ int err = 0;
+
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ if (!kobj)
+ return -ENOMEM;
+
+ kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type);
+
+ err = kobject_add(kobj, gt->sysfs, "engines");
+ if (err)
+ goto err_object;
+
+ for_each_hw_engine(hwe, gt, id) {
+ char name[MAX_ENGINE_CLASS_NAME_LEN];
+ struct kobj_eclass *keclass;
+
+ if (hwe->class == XE_ENGINE_CLASS_OTHER ||
+ hwe->class == XE_ENGINE_CLASS_MAX)
+ continue;
+
+ if ((class_mask >> hwe->class) & 1)
+ continue;
+
+ class_mask |= 1 << hwe->class;
+
+ switch (hwe->class) {
+ case XE_ENGINE_CLASS_RENDER:
+ strcpy(name, "rcs");
+ break;
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ strcpy(name, "vcs");
+ break;
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ strcpy(name, "vecs");
+ break;
+ case XE_ENGINE_CLASS_COPY:
+ strcpy(name, "bcs");
+ break;
+ case XE_ENGINE_CLASS_COMPUTE:
+ strcpy(name, "ccs");
+ break;
+ default:
+ err = -EINVAL;
+ goto err_object;
+ }
+
+ keclass = kobj_xe_hw_engine_class(xe, kobj, name);
+ if (!keclass) {
+ err = -EINVAL;
+ goto err_object;
+ }
+
+ keclass->eclass = hwe->eclass;
+ err = xe_add_hw_engine_class_defaults(xe, &keclass->base);
+ if (err) {
+ drm_warn(&xe->drm,
+ "Add .defaults to engines failed!, err: %d\n",
+ err);
+ goto err_object;
+ }
+
+ err = sysfs_create_files(&keclass->base, files);
+ if (err)
+ goto err_object;
+ }
+
+ err = drmm_add_action_or_reset(&xe->drm, hw_engine_class_sysfs_fini,
+ kobj);
+ if (err)
+ drm_warn(&xe->drm,
+ "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+
+ return err;
+err_object:
+ kobject_put(kobj);
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
new file mode 100644
index 000000000..ec5ba673b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_ENGINE_CLASS_SYSFS_H_
+#define _XE_ENGINE_CLASS_SYSFS_H_
+
+#include <linux/kobject.h>
+
+struct xe_gt;
+struct xe_hw_engine_class_intf;
+
+int xe_hw_engine_class_sysfs_init(struct xe_gt *gt);
+bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max);
+
+/**
+ * struct kobj_eclass - A eclass's kobject struct that connects the kobject and the
+ * eclass.
+ *
+ * When dealing with multiple eclass, this struct helps to understand which eclass
+ * needs to be addressed on a given sysfs call.
+ */
+struct kobj_eclass {
+ /** @base: The actual kobject */
+ struct kobject base;
+ /** @eclass: A pointer to the hw engine class interface */
+ struct xe_hw_engine_class_intf *eclass;
+};
+
+static inline struct xe_hw_engine_class_intf *kobj_to_eclass(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_eclass, base)->eclass;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
new file mode 100644
index 000000000..39908dec0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_HW_ENGINE_TYPES_H_
+#define _XE_HW_ENGINE_TYPES_H_
+
+#include "xe_force_wake_types.h"
+#include "xe_lrc_types.h"
+#include "xe_reg_sr_types.h"
+
+/* See "Engine ID Definition" struct in the Icelake PRM */
+enum xe_engine_class {
+ XE_ENGINE_CLASS_RENDER = 0,
+ XE_ENGINE_CLASS_VIDEO_DECODE = 1,
+ XE_ENGINE_CLASS_VIDEO_ENHANCE = 2,
+ XE_ENGINE_CLASS_COPY = 3,
+ XE_ENGINE_CLASS_OTHER = 4,
+ XE_ENGINE_CLASS_COMPUTE = 5,
+ XE_ENGINE_CLASS_MAX = 6,
+};
+
+enum xe_hw_engine_id {
+ XE_HW_ENGINE_RCS0,
+#define XE_HW_ENGINE_RCS_MASK GENMASK_ULL(XE_HW_ENGINE_RCS0, XE_HW_ENGINE_RCS0)
+ XE_HW_ENGINE_BCS0,
+ XE_HW_ENGINE_BCS1,
+ XE_HW_ENGINE_BCS2,
+ XE_HW_ENGINE_BCS3,
+ XE_HW_ENGINE_BCS4,
+ XE_HW_ENGINE_BCS5,
+ XE_HW_ENGINE_BCS6,
+ XE_HW_ENGINE_BCS7,
+ XE_HW_ENGINE_BCS8,
+#define XE_HW_ENGINE_BCS_MASK GENMASK_ULL(XE_HW_ENGINE_BCS8, XE_HW_ENGINE_BCS0)
+ XE_HW_ENGINE_VCS0,
+ XE_HW_ENGINE_VCS1,
+ XE_HW_ENGINE_VCS2,
+ XE_HW_ENGINE_VCS3,
+ XE_HW_ENGINE_VCS4,
+ XE_HW_ENGINE_VCS5,
+ XE_HW_ENGINE_VCS6,
+ XE_HW_ENGINE_VCS7,
+#define XE_HW_ENGINE_VCS_MASK GENMASK_ULL(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0)
+ XE_HW_ENGINE_VECS0,
+ XE_HW_ENGINE_VECS1,
+ XE_HW_ENGINE_VECS2,
+ XE_HW_ENGINE_VECS3,
+#define XE_HW_ENGINE_VECS_MASK GENMASK_ULL(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0)
+ XE_HW_ENGINE_CCS0,
+ XE_HW_ENGINE_CCS1,
+ XE_HW_ENGINE_CCS2,
+ XE_HW_ENGINE_CCS3,
+#define XE_HW_ENGINE_CCS_MASK GENMASK_ULL(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
+ XE_HW_ENGINE_GSCCS0,
+#define XE_HW_ENGINE_GSCCS_MASK GENMASK_ULL(XE_HW_ENGINE_GSCCS0, XE_HW_ENGINE_GSCCS0)
+ XE_NUM_HW_ENGINES,
+};
+
+/* FIXME: s/XE_HW_ENGINE_MAX_INSTANCE/XE_HW_ENGINE_MAX_COUNT */
+#define XE_HW_ENGINE_MAX_INSTANCE 9
+
+struct xe_bo;
+struct xe_execlist_port;
+struct xe_gt;
+
+/**
+ * struct xe_hw_engine_class_intf - per hw engine class struct interface
+ *
+ * Contains all the hw engine properties per engine class.
+ *
+ * @sched_props: scheduling properties
+ * @defaults: default scheduling properties
+ */
+struct xe_hw_engine_class_intf {
+ /**
+ * @sched_props: scheduling properties
+ * @defaults: default scheduling properties
+ */
+ struct {
+ /** @set_job_timeout: Set job timeout in ms for engine */
+ u32 job_timeout_ms;
+ /** @job_timeout_min: Min job timeout in ms for engine */
+ u32 job_timeout_min;
+ /** @job_timeout_max: Max job timeout in ms for engine */
+ u32 job_timeout_max;
+ /** @timeslice_us: timeslice period in micro-seconds */
+ u32 timeslice_us;
+ /** @timeslice_min: min timeslice period in micro-seconds */
+ u32 timeslice_min;
+ /** @timeslice_max: max timeslice period in micro-seconds */
+ u32 timeslice_max;
+ /** @preempt_timeout_us: preemption timeout in micro-seconds */
+ u32 preempt_timeout_us;
+ /** @preempt_timeout_min: min preemption timeout in micro-seconds */
+ u32 preempt_timeout_min;
+ /** @preempt_timeout_max: max preemption timeout in micro-seconds */
+ u32 preempt_timeout_max;
+ } sched_props, defaults;
+};
+
+/**
+ * struct xe_hw_engine - Hardware engine
+ *
+ * Contains all the hardware engine state for physical instances.
+ */
+struct xe_hw_engine {
+ /** @gt: graphics tile this hw engine belongs to */
+ struct xe_gt *gt;
+ /** @name: name of this hw engine */
+ const char *name;
+ /** @class: class of this hw engine */
+ enum xe_engine_class class;
+ /** @instance: physical instance of this hw engine */
+ u16 instance;
+ /** @logical_instance: logical instance of this hw engine */
+ u16 logical_instance;
+ /** @mmio_base: MMIO base address of this hw engine*/
+ u32 mmio_base;
+ /**
+ * @reg_sr: table with registers to be restored on GT init/resume/reset
+ */
+ struct xe_reg_sr reg_sr;
+ /**
+ * @reg_whitelist: table with registers to be whitelisted
+ */
+ struct xe_reg_sr reg_whitelist;
+ /**
+ * @reg_lrc: LRC workaround registers
+ */
+ struct xe_reg_sr reg_lrc;
+ /** @domain: force wake domain of this hw engine */
+ enum xe_force_wake_domains domain;
+ /** @hwsp: hardware status page buffer object */
+ struct xe_bo *hwsp;
+ /** @kernel_lrc: Kernel LRC (should be replaced /w an xe_engine) */
+ struct xe_lrc kernel_lrc;
+ /** @exl_port: execlists port */
+ struct xe_execlist_port *exl_port;
+ /** @fence_irq: fence IRQ to run when a hw engine IRQ is received */
+ struct xe_hw_fence_irq *fence_irq;
+ /** @irq_handler: IRQ handler to run when hw engine IRQ is received */
+ void (*irq_handler)(struct xe_hw_engine *hwe, u16 intr_vec);
+ /** @engine_id: id for this hw engine */
+ enum xe_hw_engine_id engine_id;
+ /** @eclass: pointer to per hw engine class interface */
+ struct xe_hw_engine_class_intf *eclass;
+};
+
+/**
+ * struct xe_hw_engine_snapshot - Hardware engine snapshot
+ *
+ * Contains the snapshot of useful hardware engine info and registers.
+ */
+struct xe_hw_engine_snapshot {
+ /** @name: name of the hw engine */
+ char *name;
+ /** @class: class of this hw engine */
+ enum xe_engine_class class;
+ /** @logical_instance: logical instance of this hw engine */
+ u16 logical_instance;
+ /** @forcewake: Force Wake information snapshot */
+ struct {
+ /** @domain: force wake domain of this hw engine */
+ enum xe_force_wake_domains domain;
+ /** @ref: Forcewake ref for the above domain */
+ int ref;
+ } forcewake;
+ /** @mmio_base: MMIO base address of this hw engine*/
+ u32 mmio_base;
+ /** @reg: Useful MMIO register snapshot */
+ struct {
+ /** @ring_hwstam: RING_HWSTAM */
+ u32 ring_hwstam;
+ /** @ring_hws_pga: RING_HWS_PGA */
+ u32 ring_hws_pga;
+ /** @ring_execlist_status_lo: RING_EXECLIST_STATUS_LO */
+ u32 ring_execlist_status_lo;
+ /** @ring_execlist_status_hi: RING_EXECLIST_STATUS_HI */
+ u32 ring_execlist_status_hi;
+ /** @ring_execlist_sq_contents_lo: RING_EXECLIST_SQ_CONTENTS */
+ u32 ring_execlist_sq_contents_lo;
+ /** @ring_execlist_sq_contents_hi: RING_EXECLIST_SQ_CONTENTS + 4 */
+ u32 ring_execlist_sq_contents_hi;
+ /** @ring_start: RING_START */
+ u32 ring_start;
+ /** @ring_head: RING_HEAD */
+ u32 ring_head;
+ /** @ring_tail: RING_TAIL */
+ u32 ring_tail;
+ /** @ring_ctl: RING_CTL */
+ u32 ring_ctl;
+ /** @ring_mi_mode: RING_MI_MODE */
+ u32 ring_mi_mode;
+ /** @ring_mode: RING_MODE */
+ u32 ring_mode;
+ /** @ring_imr: RING_IMR */
+ u32 ring_imr;
+ /** @ring_esr: RING_ESR */
+ u32 ring_esr;
+ /** @ring_emr: RING_EMR */
+ u32 ring_emr;
+ /** @ring_eir: RING_EIR */
+ u32 ring_eir;
+ /** @ring_acthd_udw: RING_ACTHD_UDW */
+ u32 ring_acthd_udw;
+ /** @ring_acthd: RING_ACTHD */
+ u32 ring_acthd;
+ /** @ring_bbaddr_udw: RING_BBADDR_UDW */
+ u32 ring_bbaddr_udw;
+ /** @ring_bbaddr: RING_BBADDR */
+ u32 ring_bbaddr;
+ /** @ring_dma_fadd_udw: RING_DMA_FADD_UDW */
+ u32 ring_dma_fadd_udw;
+ /** @ring_dma_fadd: RING_DMA_FADD */
+ u32 ring_dma_fadd;
+ /** @ipehr: IPEHR */
+ u32 ipehr;
+ /** @rcu_mode: RCU_MODE */
+ u32 rcu_mode;
+ } reg;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
new file mode 100644
index 000000000..a5de3e7b0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_hw_fence.h"
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_hw_engine.h"
+#include "xe_macros.h"
+#include "xe_map.h"
+#include "xe_trace.h"
+
+static struct kmem_cache *xe_hw_fence_slab;
+
+int __init xe_hw_fence_module_init(void)
+{
+ xe_hw_fence_slab = kmem_cache_create("xe_hw_fence",
+ sizeof(struct xe_hw_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!xe_hw_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void xe_hw_fence_module_exit(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(xe_hw_fence_slab);
+}
+
+static struct xe_hw_fence *fence_alloc(void)
+{
+ return kmem_cache_zalloc(xe_hw_fence_slab, GFP_KERNEL);
+}
+
+static void fence_free(struct rcu_head *rcu)
+{
+ struct xe_hw_fence *fence =
+ container_of(rcu, struct xe_hw_fence, dma.rcu);
+
+ if (!WARN_ON_ONCE(!fence))
+ kmem_cache_free(xe_hw_fence_slab, fence);
+}
+
+static void hw_fence_irq_run_cb(struct irq_work *work)
+{
+ struct xe_hw_fence_irq *irq = container_of(work, typeof(*irq), work);
+ struct xe_hw_fence *fence, *next;
+ bool tmp;
+
+ tmp = dma_fence_begin_signalling();
+ spin_lock(&irq->lock);
+ if (irq->enabled) {
+ list_for_each_entry_safe(fence, next, &irq->pending, irq_link) {
+ struct dma_fence *dma_fence = &fence->dma;
+
+ trace_xe_hw_fence_try_signal(fence);
+ if (dma_fence_is_signaled_locked(dma_fence)) {
+ trace_xe_hw_fence_signal(fence);
+ list_del_init(&fence->irq_link);
+ dma_fence_put(dma_fence);
+ }
+ }
+ }
+ spin_unlock(&irq->lock);
+ dma_fence_end_signalling(tmp);
+}
+
+void xe_hw_fence_irq_init(struct xe_hw_fence_irq *irq)
+{
+ spin_lock_init(&irq->lock);
+ init_irq_work(&irq->work, hw_fence_irq_run_cb);
+ INIT_LIST_HEAD(&irq->pending);
+ irq->enabled = true;
+}
+
+void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
+{
+ struct xe_hw_fence *fence, *next;
+ unsigned long flags;
+ int err;
+ bool tmp;
+
+ if (XE_WARN_ON(!list_empty(&irq->pending))) {
+ tmp = dma_fence_begin_signalling();
+ spin_lock_irqsave(&irq->lock, flags);
+ list_for_each_entry_safe(fence, next, &irq->pending, irq_link) {
+ list_del_init(&fence->irq_link);
+ err = dma_fence_signal_locked(&fence->dma);
+ dma_fence_put(&fence->dma);
+ XE_WARN_ON(err);
+ }
+ spin_unlock_irqrestore(&irq->lock, flags);
+ dma_fence_end_signalling(tmp);
+ }
+}
+
+void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
+{
+ irq_work_queue(&irq->work);
+}
+
+void xe_hw_fence_irq_stop(struct xe_hw_fence_irq *irq)
+{
+ spin_lock_irq(&irq->lock);
+ irq->enabled = false;
+ spin_unlock_irq(&irq->lock);
+}
+
+void xe_hw_fence_irq_start(struct xe_hw_fence_irq *irq)
+{
+ spin_lock_irq(&irq->lock);
+ irq->enabled = true;
+ spin_unlock_irq(&irq->lock);
+
+ irq_work_queue(&irq->work);
+}
+
+void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
+ struct xe_hw_fence_irq *irq, const char *name)
+{
+ ctx->gt = gt;
+ ctx->irq = irq;
+ ctx->dma_fence_ctx = dma_fence_context_alloc(1);
+ ctx->next_seqno = XE_FENCE_INITIAL_SEQNO;
+ sprintf(ctx->name, "%s", name);
+}
+
+void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx)
+{
+}
+
+static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence);
+
+static struct xe_hw_fence_irq *xe_hw_fence_irq(struct xe_hw_fence *fence)
+{
+ return container_of(fence->dma.lock, struct xe_hw_fence_irq, lock);
+}
+
+static const char *xe_hw_fence_get_driver_name(struct dma_fence *dma_fence)
+{
+ struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
+
+ return dev_name(gt_to_xe(fence->ctx->gt)->drm.dev);
+}
+
+static const char *xe_hw_fence_get_timeline_name(struct dma_fence *dma_fence)
+{
+ struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
+
+ return fence->ctx->name;
+}
+
+static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
+{
+ struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
+ struct xe_device *xe = gt_to_xe(fence->ctx->gt);
+ u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
+
+ return dma_fence->error ||
+ !__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops);
+}
+
+static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
+{
+ struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
+ struct xe_hw_fence_irq *irq = xe_hw_fence_irq(fence);
+
+ dma_fence_get(dma_fence);
+ list_add_tail(&fence->irq_link, &irq->pending);
+
+ /* SW completed (no HW IRQ) so kick handler to signal fence */
+ if (xe_hw_fence_signaled(dma_fence))
+ xe_hw_fence_irq_run(irq);
+
+ return true;
+}
+
+static void xe_hw_fence_release(struct dma_fence *dma_fence)
+{
+ struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
+
+ trace_xe_hw_fence_free(fence);
+ XE_WARN_ON(!list_empty(&fence->irq_link));
+ call_rcu(&dma_fence->rcu, fence_free);
+}
+
+static const struct dma_fence_ops xe_hw_fence_ops = {
+ .get_driver_name = xe_hw_fence_get_driver_name,
+ .get_timeline_name = xe_hw_fence_get_timeline_name,
+ .enable_signaling = xe_hw_fence_enable_signaling,
+ .signaled = xe_hw_fence_signaled,
+ .release = xe_hw_fence_release,
+};
+
+static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence)
+{
+ if (XE_WARN_ON(fence->ops != &xe_hw_fence_ops))
+ return NULL;
+
+ return container_of(fence, struct xe_hw_fence, dma);
+}
+
+struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx,
+ struct iosys_map seqno_map)
+{
+ struct xe_hw_fence *fence;
+
+ fence = fence_alloc();
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ fence->ctx = ctx;
+ fence->seqno_map = seqno_map;
+ INIT_LIST_HEAD(&fence->irq_link);
+
+ dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
+ ctx->dma_fence_ctx, ctx->next_seqno++);
+
+ trace_xe_hw_fence_create(fence);
+
+ return fence;
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.h b/drivers/gpu/drm/xe/xe_hw_fence.h
new file mode 100644
index 000000000..cfe5fd603
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_fence.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_HW_FENCE_H_
+#define _XE_HW_FENCE_H_
+
+#include "xe_hw_fence_types.h"
+
+/* Cause an early wrap to catch wrapping errors */
+#define XE_FENCE_INITIAL_SEQNO (-127)
+
+int xe_hw_fence_module_init(void);
+void xe_hw_fence_module_exit(void);
+
+void xe_hw_fence_irq_init(struct xe_hw_fence_irq *irq);
+void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq);
+void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq);
+void xe_hw_fence_irq_stop(struct xe_hw_fence_irq *irq);
+void xe_hw_fence_irq_start(struct xe_hw_fence_irq *irq);
+
+void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
+ struct xe_hw_fence_irq *irq, const char *name);
+void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx);
+
+struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx,
+ struct iosys_map seqno_map);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_fence_types.h b/drivers/gpu/drm/xe/xe_hw_fence_types.h
new file mode 100644
index 000000000..b33c4956e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_fence_types.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_HW_FENCE_TYPES_H_
+#define _XE_HW_FENCE_TYPES_H_
+
+#include <linux/dma-fence.h>
+#include <linux/iosys-map.h>
+#include <linux/irq_work.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct xe_gt;
+
+/**
+ * struct xe_hw_fence_irq - hardware fence IRQ handler
+ *
+ * One per engine class, signals completed xe_hw_fences, triggered via hw engine
+ * interrupt. On each trigger, search list of pending fences and signal.
+ */
+struct xe_hw_fence_irq {
+ /** @lock: protects all xe_hw_fences + pending list */
+ spinlock_t lock;
+ /** @work: IRQ worker run to signal the fences */
+ struct irq_work work;
+ /** @pending: list of pending xe_hw_fences */
+ struct list_head pending;
+ /** @enabled: fence signaling enabled */
+ bool enabled;
+};
+
+#define MAX_FENCE_NAME_LEN 16
+
+/**
+ * struct xe_hw_fence_ctx - hardware fence context
+ *
+ * The context for a hardware fence. 1 to 1 relationship with xe_engine. Points
+ * to a xe_hw_fence_irq, maintains serial seqno.
+ */
+struct xe_hw_fence_ctx {
+ /** @gt: graphics tile of hardware fence context */
+ struct xe_gt *gt;
+ /** @irq: fence irq handler */
+ struct xe_hw_fence_irq *irq;
+ /** @dma_fence_ctx: dma fence context for hardware fence */
+ u64 dma_fence_ctx;
+ /** @next_seqno: next seqno for hardware fence */
+ u32 next_seqno;
+ /** @name: name of hardware fence context */
+ char name[MAX_FENCE_NAME_LEN];
+};
+
+/**
+ * struct xe_hw_fence - hardware fence
+ *
+ * Used to indicate a xe_sched_job is complete via a seqno written to memory.
+ * Signals on error or seqno past.
+ */
+struct xe_hw_fence {
+ /** @dma: base dma fence for hardware fence context */
+ struct dma_fence dma;
+ /** @ctx: hardware fence context */
+ struct xe_hw_fence_ctx *ctx;
+ /** @seqno_map: I/O map for seqno */
+ struct iosys_map seqno_map;
+ /** @irq_link: Link in struct xe_hw_fence_irq.pending */
+ struct list_head irq_link;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
new file mode 100644
index 000000000..a6f43446c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+#include <linux/types.h>
+
+#include <drm/drm_managed.h>
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_mchbar_regs.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_hwmon.h"
+#include "xe_mmio.h"
+#include "xe_pcode.h"
+#include "xe_pcode_api.h"
+
+enum xe_hwmon_reg {
+ REG_PKG_RAPL_LIMIT,
+ REG_PKG_POWER_SKU,
+ REG_PKG_POWER_SKU_UNIT,
+ REG_GT_PERF_STATUS,
+ REG_PKG_ENERGY_STATUS,
+};
+
+enum xe_hwmon_reg_operation {
+ REG_READ32,
+ REG_RMW32,
+ REG_READ64,
+};
+
+/*
+ * SF_* - scale factors for particular quantities according to hwmon spec.
+ */
+#define SF_POWER 1000000 /* microwatts */
+#define SF_CURR 1000 /* milliamperes */
+#define SF_VOLTAGE 1000 /* millivolts */
+#define SF_ENERGY 1000000 /* microjoules */
+#define SF_TIME 1000 /* milliseconds */
+
+/**
+ * struct xe_hwmon_energy_info - to accumulate energy
+ */
+struct xe_hwmon_energy_info {
+ /** @reg_val_prev: previous energy reg val */
+ u32 reg_val_prev;
+ /** @accum_energy: accumulated energy */
+ long accum_energy;
+};
+
+/**
+ * struct xe_hwmon - xe hwmon data structure
+ */
+struct xe_hwmon {
+ /** @hwmon_dev: hwmon device for xe */
+ struct device *hwmon_dev;
+ /** @gt: primary gt */
+ struct xe_gt *gt;
+ /** @hwmon_lock: lock for rw attributes*/
+ struct mutex hwmon_lock;
+ /** @scl_shift_power: pkg power unit */
+ int scl_shift_power;
+ /** @scl_shift_energy: pkg energy unit */
+ int scl_shift_energy;
+ /** @scl_shift_time: pkg time unit */
+ int scl_shift_time;
+ /** @ei: Energy info for energy1_input */
+ struct xe_hwmon_energy_info ei;
+};
+
+static u32 xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg)
+{
+ struct xe_device *xe = gt_to_xe(hwmon->gt);
+ struct xe_reg reg = XE_REG(0);
+
+ switch (hwmon_reg) {
+ case REG_PKG_RAPL_LIMIT:
+ if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_RAPL_LIMIT;
+ else if (xe->info.platform == XE_PVC)
+ reg = PVC_GT0_PACKAGE_RAPL_LIMIT;
+ break;
+ case REG_PKG_POWER_SKU:
+ if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_POWER_SKU;
+ else if (xe->info.platform == XE_PVC)
+ reg = PVC_GT0_PACKAGE_POWER_SKU;
+ break;
+ case REG_PKG_POWER_SKU_UNIT:
+ if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_POWER_SKU_UNIT;
+ else if (xe->info.platform == XE_PVC)
+ reg = PVC_GT0_PACKAGE_POWER_SKU_UNIT;
+ break;
+ case REG_GT_PERF_STATUS:
+ if (xe->info.platform == XE_DG2)
+ reg = GT_PERF_STATUS;
+ break;
+ case REG_PKG_ENERGY_STATUS:
+ if (xe->info.platform == XE_DG2)
+ reg = PCU_CR_PACKAGE_ENERGY_STATUS;
+ else if (xe->info.platform == XE_PVC)
+ reg = PVC_GT0_PLATFORM_ENERGY_STATUS;
+ break;
+ default:
+ drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
+ break;
+ }
+
+ return reg.raw;
+}
+
+static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
+ enum xe_hwmon_reg_operation operation, u64 *value,
+ u32 clr, u32 set)
+{
+ struct xe_reg reg;
+
+ reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
+
+ if (!reg.raw)
+ return;
+
+ switch (operation) {
+ case REG_READ32:
+ *value = xe_mmio_read32(hwmon->gt, reg);
+ break;
+ case REG_RMW32:
+ *value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
+ break;
+ case REG_READ64:
+ *value = xe_mmio_read64_2x32(hwmon->gt, reg);
+ break;
+ default:
+ drm_warn(&gt_to_xe(hwmon->gt)->drm, "Invalid xe hwmon reg operation: %d\n",
+ operation);
+ break;
+ }
+}
+
+#define PL1_DISABLE 0
+
+/*
+ * HW allows arbitrary PL1 limits to be set but silently clamps these values to
+ * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
+ * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
+ * clamped values when read.
+ */
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long *value)
+{
+ u64 reg_val, min, max;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val, 0, 0);
+ /* Check if PL1 limit is disabled */
+ if (!(reg_val & PKG_PWR_LIM_1_EN)) {
+ *value = PL1_DISABLE;
+ goto unlock;
+ }
+
+ reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
+ *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ64, &reg_val, 0, 0);
+ min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
+ min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
+ max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
+ max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
+
+ if (min && max)
+ *value = clamp_t(u64, *value, min, max);
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+}
+
+static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long value)
+{
+ int ret = 0;
+ u64 reg_val;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
+ if (value == PL1_DISABLE) {
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
+ PKG_PWR_LIM_1_EN, 0);
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ32, &reg_val,
+ PKG_PWR_LIM_1_EN, 0);
+
+ if (reg_val & PKG_PWR_LIM_1_EN) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
+ }
+
+ /* Computation in 64-bits to avoid overflow. Round to nearest. */
+ reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
+ reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, &reg_val,
+ PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, long *value)
+{
+ u64 reg_val;
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ32, &reg_val, 0, 0);
+ reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
+ *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
+}
+
+/*
+ * xe_hwmon_energy_get - Obtain energy value
+ *
+ * The underlying energy hardware register is 32-bits and is subject to
+ * overflow. How long before overflow? For example, with an example
+ * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
+ * a power draw of 1000 watts, the 32-bit counter will overflow in
+ * approximately 4.36 minutes.
+ *
+ * Examples:
+ * 1 watt: (2^32 >> 14) / 1 W / (60 * 60 * 24) secs/day -> 3 days
+ * 1000 watts: (2^32 >> 14) / 1000 W / 60 secs/min -> 4.36 minutes
+ *
+ * The function significantly increases overflow duration (from 4.36
+ * minutes) by accumulating the energy register into a 'long' as allowed by
+ * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
+ * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
+ * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
+ * energy1_input overflows. This at 1000 W is an overflow duration of 278 years.
+ */
+static void
+xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)
+{
+ struct xe_hwmon_energy_info *ei = &hwmon->ei;
+ u64 reg_val;
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS, REG_READ32,
+ &reg_val, 0, 0);
+
+ if (reg_val >= ei->reg_val_prev)
+ ei->accum_energy += reg_val - ei->reg_val_prev;
+ else
+ ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+
+ ei->reg_val_prev = reg_val;
+
+ *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
+ hwmon->scl_shift_energy);
+}
+
+static ssize_t
+xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ u32 x, y, x_w = 2; /* 2 bits */
+ u64 r, tau4, out;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ REG_READ32, &r, 0, 0);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
+ y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
+
+ /*
+ * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
+ * = (4 | x) << (y - 2)
+ *
+ * Here (y - 2) ensures a 1.x fixed point representation of 1.x
+ * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
+ *
+ * As y can be < 2, we compute tau4 = (4 | x) << y
+ * and then add 2 when doing the final right shift to account for units
+ */
+ tau4 = (u64)((1 << x_w) | x) << y;
+
+ /* val in hwmon interface units (millisec) */
+ out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+ return sysfs_emit(buf, "%llu\n", out);
+}
+
+static ssize_t
+xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ u32 x, y, rxy, x_w = 2; /* 2 bits */
+ u64 tau4, r, max_win;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
+ * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
+ *
+ * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
+ * However, it is observed that existing discrete GPUs does not provide correct
+ * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
+ * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
+ */
+#define PKG_MAX_WIN_DEFAULT 0x12ull
+
+ /*
+ * val must be < max in hwmon interface units. The steps below are
+ * explained in xe_hwmon_power1_max_interval_show()
+ */
+ r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
+ x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
+ y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
+ tau4 = (u64)((1 << x_w) | x) << y;
+ max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
+
+ if (val > max_win)
+ return -EINVAL;
+
+ /* val in hw units */
+ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
+
+ /*
+ * Convert val to 1.x * power(2,y)
+ * y = ilog2(val)
+ * x = (val - (1 << y)) >> (y - 2)
+ */
+ if (!val) {
+ y = 0;
+ x = 0;
+ } else {
+ y = ilog2(val);
+ x = (val - (1ul << y)) << x_w >> y;
+ }
+
+ rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW32, (u64 *)&r,
+ PKG_PWR_LIM_1_TIME, rxy);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
+ xe_hwmon_power1_max_interval_show,
+ xe_hwmon_power1_max_interval_store, 0);
+
+static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_power1_max_interval.dev_attr.attr,
+ NULL
+};
+
+static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret = 0;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
+ ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return ret;
+}
+
+static const struct attribute_group hwmon_attrgroup = {
+ .attrs = hwmon_attributes,
+ .is_visible = xe_hwmon_attributes_visible,
+};
+
+static const struct attribute_group *hwmon_groups[] = {
+ &hwmon_attrgroup,
+ NULL
+};
+
+static const struct hwmon_channel_info *hwmon_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
+ HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
+ NULL
+};
+
+/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
+static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
+{
+ /* Avoid Illegal Subcommand error */
+ if (gt_to_xe(gt)->info.platform == XE_DG2)
+ return -ENXIO;
+
+ return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
+ POWER_SETUP_SUBCOMMAND_READ_I1, 0),
+ uval, NULL);
+}
+
+static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
+{
+ return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
+ POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
+ uval);
+}
+
+static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, long *value, u32 scale_factor)
+{
+ int ret;
+ u32 uval;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
+ if (ret)
+ goto unlock;
+
+ *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
+ scale_factor, POWER_SETUP_I1_SHIFT);
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, long value, u32 scale_factor)
+{
+ int ret;
+ u32 uval;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
+ ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
+{
+ u64 reg_val;
+
+ xe_hwmon_process_reg(hwmon, REG_GT_PERF_STATUS,
+ REG_READ32, &reg_val, 0, 0);
+ /* HW register value in units of 2.5 millivolt */
+ *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
+}
+
+static umode_t
+xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int chan)
+{
+ u32 uval;
+
+ switch (attr) {
+ case hwmon_power_max:
+ return xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? 0664 : 0;
+ case hwmon_power_rated_max:
+ return xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU) ? 0444 : 0;
+ case hwmon_power_crit:
+ return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int chan, long *val)
+{
+ switch (attr) {
+ case hwmon_power_max:
+ xe_hwmon_power_max_read(hwmon, val);
+ return 0;
+ case hwmon_power_rated_max:
+ xe_hwmon_power_rated_max_read(hwmon, val);
+ return 0;
+ case hwmon_power_crit:
+ return xe_hwmon_power_curr_crit_read(hwmon, val, SF_POWER);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int chan, long val)
+{
+ switch (attr) {
+ case hwmon_power_max:
+ return xe_hwmon_power_max_write(hwmon, val);
+ case hwmon_power_crit:
+ return xe_hwmon_power_curr_crit_write(hwmon, val, SF_POWER);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
+xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr)
+{
+ u32 uval;
+
+ switch (attr) {
+ case hwmon_curr_crit:
+ return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+{
+ switch (attr) {
+ case hwmon_curr_crit:
+ return xe_hwmon_power_curr_crit_read(hwmon, val, SF_CURR);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, long val)
+{
+ switch (attr) {
+ case hwmon_curr_crit:
+ return xe_hwmon_power_curr_crit_write(hwmon, val, SF_CURR);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
+xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ return xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS) ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ xe_hwmon_get_voltage(hwmon, val);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
+xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr)
+{
+ switch (attr) {
+ case hwmon_energy_input:
+ return xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS) ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, long *val)
+{
+ switch (attr) {
+ case hwmon_energy_input:
+ xe_hwmon_energy_get(hwmon, val);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
+xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
+ int ret;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ switch (type) {
+ case hwmon_power:
+ ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
+ break;
+ case hwmon_curr:
+ ret = xe_hwmon_curr_is_visible(hwmon, attr);
+ break;
+ case hwmon_in:
+ ret = xe_hwmon_in_is_visible(hwmon, attr);
+ break;
+ case hwmon_energy:
+ ret = xe_hwmon_energy_is_visible(hwmon, attr);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return ret;
+}
+
+static int
+xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ switch (type) {
+ case hwmon_power:
+ ret = xe_hwmon_power_read(hwmon, attr, channel, val);
+ break;
+ case hwmon_curr:
+ ret = xe_hwmon_curr_read(hwmon, attr, val);
+ break;
+ case hwmon_in:
+ ret = xe_hwmon_in_read(hwmon, attr, val);
+ break;
+ case hwmon_energy:
+ ret = xe_hwmon_energy_read(hwmon, attr, val);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return ret;
+}
+
+static int
+xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+
+ switch (type) {
+ case hwmon_power:
+ ret = xe_hwmon_power_write(hwmon, attr, channel, val);
+ break;
+ case hwmon_curr:
+ ret = xe_hwmon_curr_write(hwmon, attr, val);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+
+ return ret;
+}
+
+static const struct hwmon_ops hwmon_ops = {
+ .is_visible = xe_hwmon_is_visible,
+ .read = xe_hwmon_read,
+ .write = xe_hwmon_write,
+};
+
+static const struct hwmon_chip_info hwmon_chip_info = {
+ .ops = &hwmon_ops,
+ .info = hwmon_info,
+};
+
+static void
+xe_hwmon_get_preregistration_info(struct xe_device *xe)
+{
+ struct xe_hwmon *hwmon = xe->hwmon;
+ long energy;
+ u64 val_sku_unit = 0;
+
+ /*
+ * The contents of register PKG_POWER_SKU_UNIT do not change,
+ * so read it once and store the shift values.
+ */
+ if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
+ xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
+ REG_READ32, &val_sku_unit, 0, 0);
+ hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
+ hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
+ hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
+ }
+
+ /*
+ * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
+ * first value of the energy register read
+ */
+ if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, 0))
+ xe_hwmon_energy_get(hwmon, &energy);
+}
+
+static void xe_hwmon_mutex_destroy(void *arg)
+{
+ struct xe_hwmon *hwmon = arg;
+
+ mutex_destroy(&hwmon->hwmon_lock);
+}
+
+void xe_hwmon_register(struct xe_device *xe)
+{
+ struct device *dev = xe->drm.dev;
+ struct xe_hwmon *hwmon;
+
+ /* hwmon is available only for dGfx */
+ if (!IS_DGFX(xe))
+ return;
+
+ hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return;
+
+ xe->hwmon = hwmon;
+
+ mutex_init(&hwmon->hwmon_lock);
+ if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
+ return;
+
+ /* primary GT to access device level properties */
+ hwmon->gt = xe->tiles[0].primary_gt;
+
+ xe_hwmon_get_preregistration_info(xe);
+
+ drm_dbg(&xe->drm, "Register xe hwmon interface\n");
+
+ /* hwmon_dev points to device hwmon<i> */
+ hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
+ &hwmon_chip_info,
+ hwmon_groups);
+
+ if (IS_ERR(hwmon->hwmon_dev)) {
+ drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
+ xe->hwmon = NULL;
+ return;
+ }
+}
+
diff --git a/drivers/gpu/drm/xe/xe_hwmon.h b/drivers/gpu/drm/xe/xe_hwmon.h
new file mode 100644
index 000000000..c42a1de2c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hwmon.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_HWMON_H_
+#define _XE_HWMON_H_
+
+#include <linux/types.h>
+
+struct xe_device;
+
+#if IS_REACHABLE(CONFIG_HWMON)
+void xe_hwmon_register(struct xe_device *xe);
+#else
+static inline void xe_hwmon_register(struct xe_device *xe) { };
+#endif
+
+#endif /* _XE_HWMON_H_ */
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
new file mode 100644
index 000000000..d1f5ba4bb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_irq.h"
+
+#include <linux/sched/clock.h>
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_device.h"
+#include "xe_display.h"
+#include "xe_drv.h"
+#include "xe_gt.h"
+#include "xe_guc.h"
+#include "xe_hw_engine.h"
+#include "xe_mmio.h"
+
+/*
+ * Interrupt registers for a unit are always consecutive and ordered
+ * ISR, IMR, IIR, IER.
+ */
+#define IMR(offset) XE_REG(offset + 0x4)
+#define IIR(offset) XE_REG(offset + 0x8)
+#define IER(offset) XE_REG(offset + 0xc)
+
+static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg)
+{
+ u32 val = xe_mmio_read32(mmio, reg);
+
+ if (val == 0)
+ return;
+
+ drm_WARN(&gt_to_xe(mmio)->drm, 1,
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
+ reg.addr, val);
+ xe_mmio_write32(mmio, reg, 0xffffffff);
+ xe_mmio_read32(mmio, reg);
+ xe_mmio_write32(mmio, reg, 0xffffffff);
+ xe_mmio_read32(mmio, reg);
+}
+
+/*
+ * Unmask and enable the specified interrupts. Does not check current state,
+ * so any bits not specified here will become masked and disabled.
+ */
+static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
+{
+ struct xe_gt *mmio = tile->primary_gt;
+
+ /*
+ * If we're just enabling an interrupt now, it shouldn't already
+ * be raised in the IIR.
+ */
+ assert_iir_is_zero(mmio, IIR(irqregs));
+
+ xe_mmio_write32(mmio, IER(irqregs), bits);
+ xe_mmio_write32(mmio, IMR(irqregs), ~bits);
+
+ /* Posting read */
+ xe_mmio_read32(mmio, IMR(irqregs));
+}
+
+/* Mask and disable all interrupts. */
+static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
+{
+ struct xe_gt *mmio = tile->primary_gt;
+
+ xe_mmio_write32(mmio, IMR(irqregs), ~0);
+ /* Posting read */
+ xe_mmio_read32(mmio, IMR(irqregs));
+
+ xe_mmio_write32(mmio, IER(irqregs), 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ xe_mmio_write32(mmio, IIR(irqregs), ~0);
+ xe_mmio_read32(mmio, IIR(irqregs));
+ xe_mmio_write32(mmio, IIR(irqregs), ~0);
+ xe_mmio_read32(mmio, IIR(irqregs));
+}
+
+static u32 xelp_intr_disable(struct xe_device *xe)
+{
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
+
+ xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt. Indications will be cleared on related acks.
+ * New indications can and will light up during processing,
+ * and will generate new interrupt after enabling master.
+ */
+ return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
+}
+
+static u32
+gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
+{
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ u32 iir;
+
+ if (!(master_ctl & GU_MISC_IRQ))
+ return 0;
+
+ iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
+ if (likely(iir))
+ xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
+
+ return iir;
+}
+
+static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
+{
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
+
+ xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
+ if (stall)
+ xe_mmio_read32(mmio, GFX_MSTR_IRQ);
+}
+
+/* Enable/unmask the HWE interrupts for a specific GT's engines. */
+void xe_irq_enable_hwe(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 ccs_mask, bcs_mask;
+ u32 irqs, dmask, smask;
+ u32 gsc_mask = 0;
+
+ if (xe_device_uc_enabled(xe)) {
+ irqs = GT_RENDER_USER_INTERRUPT |
+ GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
+ } else {
+ irqs = GT_RENDER_USER_INTERRUPT |
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
+ }
+
+ ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
+ bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
+
+ dmask = irqs << 16 | irqs;
+ smask = irqs << 16;
+
+ if (!xe_gt_is_media_type(gt)) {
+ /* Enable interrupts for each engine class */
+ xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask);
+ if (ccs_mask)
+ xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask);
+
+ /* Unmask interrupts for each engine instance */
+ xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask);
+ xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask);
+ if (bcs_mask & (BIT(1)|BIT(2)))
+ xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
+ if (bcs_mask & (BIT(3)|BIT(4)))
+ xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
+ if (bcs_mask & (BIT(5)|BIT(6)))
+ xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
+ if (bcs_mask & (BIT(7)|BIT(8)))
+ xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
+ if (ccs_mask & (BIT(0)|BIT(1)))
+ xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask);
+ if (ccs_mask & (BIT(2)|BIT(3)))
+ xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask);
+ }
+
+ if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
+ /* Enable interrupts for each engine class */
+ xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask);
+
+ /* Unmask interrupts for each engine instance */
+ xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask);
+ xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
+ xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
+
+ if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER))
+ gsc_mask = irqs;
+ else if (HAS_HECI_GSCFI(xe))
+ gsc_mask = GSC_IRQ_INTF(1);
+ if (gsc_mask) {
+ xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask);
+ xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask);
+ }
+ }
+}
+
+static u32
+gt_engine_identity(struct xe_device *xe,
+ struct xe_gt *mmio,
+ const unsigned int bank,
+ const unsigned int bit)
+{
+ u32 timeout_ts;
+ u32 ident;
+
+ lockdep_assert_held(&xe->irq.lock);
+
+ xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
+
+ /*
+ * NB: Specs do not specify how long to spin wait,
+ * so we do ~100us as an educated guess.
+ */
+ timeout_ts = (local_clock() >> 10) + 100;
+ do {
+ ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
+ } while (!(ident & INTR_DATA_VALID) &&
+ !time_after32(local_clock() >> 10, timeout_ts));
+
+ if (unlikely(!(ident & INTR_DATA_VALID))) {
+ drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
+ return 0;
+ }
+
+ xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
+
+ return ident;
+}
+
+#define OTHER_MEDIA_GUC_INSTANCE 16
+
+static void
+gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
+{
+ if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
+ return xe_guc_irq_handler(&gt->uc.guc, iir);
+ if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
+ return xe_guc_irq_handler(&gt->uc.guc, iir);
+
+ if (instance != OTHER_GUC_INSTANCE &&
+ instance != OTHER_MEDIA_GUC_INSTANCE) {
+ WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
+ instance, iir);
+ }
+}
+
+static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
+ enum xe_engine_class class,
+ unsigned int instance)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+
+ if (MEDIA_VER(xe) < 13)
+ return tile->primary_gt;
+
+ if (class == XE_ENGINE_CLASS_VIDEO_DECODE ||
+ class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
+ return tile->media_gt;
+
+ if (class == XE_ENGINE_CLASS_OTHER &&
+ (instance == OTHER_MEDIA_GUC_INSTANCE || instance == OTHER_GSC_INSTANCE))
+ return tile->media_gt;
+
+ return tile->primary_gt;
+}
+
+static void gt_irq_handler(struct xe_tile *tile,
+ u32 master_ctl, unsigned long *intr_dw,
+ u32 *identity)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_gt *mmio = tile->primary_gt;
+ unsigned int bank, bit;
+ u16 instance, intr_vec;
+ enum xe_engine_class class;
+ struct xe_hw_engine *hwe;
+
+ spin_lock(&xe->irq.lock);
+
+ for (bank = 0; bank < 2; bank++) {
+ if (!(master_ctl & GT_DW_IRQ(bank)))
+ continue;
+
+ intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
+ for_each_set_bit(bit, intr_dw + bank, 32)
+ identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
+ xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
+
+ for_each_set_bit(bit, intr_dw + bank, 32) {
+ struct xe_gt *engine_gt;
+
+ class = INTR_ENGINE_CLASS(identity[bit]);
+ instance = INTR_ENGINE_INSTANCE(identity[bit]);
+ intr_vec = INTR_ENGINE_INTR(identity[bit]);
+
+ engine_gt = pick_engine_gt(tile, class, instance);
+
+ hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
+ if (hwe) {
+ xe_hw_engine_handle_irq(hwe, intr_vec);
+ continue;
+ }
+
+ if (class == XE_ENGINE_CLASS_OTHER) {
+ /* HECI GSCFI interrupts come from outside of GT */
+ if (HAS_HECI_GSCFI(xe) && instance == OTHER_GSC_INSTANCE)
+ xe_heci_gsc_irq_handler(xe, intr_vec);
+ else
+ gt_other_irq_handler(engine_gt, instance, intr_vec);
+ continue;
+ }
+ }
+ }
+
+ spin_unlock(&xe->irq.lock);
+}
+
+/*
+ * Top-level interrupt handler for Xe_LP platforms (which did not have
+ * a "master tile" interrupt register.
+ */
+static irqreturn_t xelp_irq_handler(int irq, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ u32 master_ctl, gu_misc_iir;
+ unsigned long intr_dw[2];
+ u32 identity[32];
+
+ spin_lock(&xe->irq.lock);
+ if (!xe->irq.enabled) {
+ spin_unlock(&xe->irq.lock);
+ return IRQ_NONE;
+ }
+ spin_unlock(&xe->irq.lock);
+
+ master_ctl = xelp_intr_disable(xe);
+ if (!master_ctl) {
+ xelp_intr_enable(xe, false);
+ return IRQ_NONE;
+ }
+
+ gt_irq_handler(tile, master_ctl, intr_dw, identity);
+
+ xe_display_irq_handler(xe, master_ctl);
+
+ gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
+
+ xelp_intr_enable(xe, false);
+
+ xe_display_irq_enable(xe, gu_misc_iir);
+
+ return IRQ_HANDLED;
+}
+
+static u32 dg1_intr_disable(struct xe_device *xe)
+{
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ u32 val;
+
+ /* First disable interrupts */
+ xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
+
+ /* Get the indication levels and ack the master unit */
+ val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
+ if (unlikely(!val))
+ return 0;
+
+ xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
+
+ return val;
+}
+
+static void dg1_intr_enable(struct xe_device *xe, bool stall)
+{
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
+
+ xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
+ if (stall)
+ xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
+}
+
+/*
+ * Top-level interrupt handler for Xe_LP+ and beyond. These platforms have
+ * a "master tile" interrupt register which must be consulted before the
+ * "graphics master" interrupt register.
+ */
+static irqreturn_t dg1_irq_handler(int irq, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
+ unsigned long intr_dw[2];
+ u32 identity[32];
+ u8 id;
+
+ /* TODO: This really shouldn't be copied+pasted */
+
+ spin_lock(&xe->irq.lock);
+ if (!xe->irq.enabled) {
+ spin_unlock(&xe->irq.lock);
+ return IRQ_NONE;
+ }
+ spin_unlock(&xe->irq.lock);
+
+ master_tile_ctl = dg1_intr_disable(xe);
+ if (!master_tile_ctl) {
+ dg1_intr_enable(xe, false);
+ return IRQ_NONE;
+ }
+
+ for_each_tile(tile, xe, id) {
+ struct xe_gt *mmio = tile->primary_gt;
+
+ if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
+ continue;
+
+ master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
+
+ /*
+ * We might be in irq handler just when PCIe DPC is initiated
+ * and all MMIO reads will be returned with all 1's. Ignore this
+ * irq as device is inaccessible.
+ */
+ if (master_ctl == REG_GENMASK(31, 0)) {
+ dev_dbg(tile_to_xe(tile)->drm.dev,
+ "Ignore this IRQ as device might be in DPC containment.\n");
+ return IRQ_HANDLED;
+ }
+
+ xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
+
+ gt_irq_handler(tile, master_ctl, intr_dw, identity);
+
+ /*
+ * Display interrupts (including display backlight operations
+ * that get reported as Gunit GSE) would only be hooked up to
+ * the primary tile.
+ */
+ if (id == 0) {
+ xe_display_irq_handler(xe, master_ctl);
+ gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
+ }
+ }
+
+ dg1_intr_enable(xe, false);
+ xe_display_irq_enable(xe, gu_misc_iir);
+
+ return IRQ_HANDLED;
+}
+
+static void gt_irq_reset(struct xe_tile *tile)
+{
+ struct xe_gt *mmio = tile->primary_gt;
+
+ u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
+ XE_ENGINE_CLASS_COMPUTE);
+ u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
+ XE_ENGINE_CLASS_COPY);
+
+ /* Disable RCS, BCS, VCS and VECS class engines. */
+ xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
+ xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
+ if (ccs_mask)
+ xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
+
+ /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+ xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~0);
+ if (bcs_mask & (BIT(1)|BIT(2)))
+ xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
+ if (bcs_mask & (BIT(3)|BIT(4)))
+ xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
+ if (bcs_mask & (BIT(5)|BIT(6)))
+ xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
+ if (bcs_mask & (BIT(7)|BIT(8)))
+ xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~0);
+ if (ccs_mask & (BIT(0)|BIT(1)))
+ xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
+ if (ccs_mask & (BIT(2)|BIT(3)))
+ xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
+
+ if ((tile->media_gt &&
+ xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
+ HAS_HECI_GSCFI(tile_to_xe(tile))) {
+ xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
+ xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
+ }
+
+ xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
+ xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE, 0);
+ xe_mmio_write32(mmio, GUC_SG_INTR_MASK, ~0);
+}
+
+static void xelp_irq_reset(struct xe_tile *tile)
+{
+ xelp_intr_disable(tile_to_xe(tile));
+
+ gt_irq_reset(tile);
+
+ mask_and_disable(tile, PCU_IRQ_OFFSET);
+}
+
+static void dg1_irq_reset(struct xe_tile *tile)
+{
+ if (tile->id == 0)
+ dg1_intr_disable(tile_to_xe(tile));
+
+ gt_irq_reset(tile);
+
+ mask_and_disable(tile, PCU_IRQ_OFFSET);
+}
+
+static void dg1_irq_reset_mstr(struct xe_tile *tile)
+{
+ struct xe_gt *mmio = tile->primary_gt;
+
+ xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
+}
+
+static void xe_irq_reset(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, xe, id) {
+ if (GRAPHICS_VERx100(xe) >= 1210)
+ dg1_irq_reset(tile);
+ else
+ xelp_irq_reset(tile);
+ }
+
+ tile = xe_device_get_root_tile(xe);
+ mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
+ xe_display_irq_reset(xe);
+
+ /*
+ * The tile's top-level status register should be the last one
+ * to be reset to avoid possible bit re-latching from lower
+ * level interrupts.
+ */
+ if (GRAPHICS_VERx100(xe) >= 1210) {
+ for_each_tile(tile, xe, id)
+ dg1_irq_reset_mstr(tile);
+ }
+}
+
+static void xe_irq_postinstall(struct xe_device *xe)
+{
+ xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
+
+ /*
+ * ASLE backlight operations are reported via GUnit GSE interrupts
+ * on the root tile.
+ */
+ unmask_and_enable(xe_device_get_root_tile(xe),
+ GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
+
+ /* Enable top-level interrupts */
+ if (GRAPHICS_VERx100(xe) >= 1210)
+ dg1_intr_enable(xe, true);
+ else
+ xelp_intr_enable(xe, true);
+}
+
+static irq_handler_t xe_irq_handler(struct xe_device *xe)
+{
+ if (GRAPHICS_VERx100(xe) >= 1210)
+ return dg1_irq_handler;
+ else
+ return xelp_irq_handler;
+}
+
+static void irq_uninstall(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int irq;
+
+ if (!xe->irq.enabled)
+ return;
+
+ xe->irq.enabled = false;
+ xe_irq_reset(xe);
+
+ irq = pci_irq_vector(pdev, 0);
+ free_irq(irq, xe);
+}
+
+int xe_irq_install(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ irq_handler_t irq_handler;
+ int err, irq;
+
+ irq_handler = xe_irq_handler(xe);
+ if (!irq_handler) {
+ drm_err(&xe->drm, "No supported interrupt handler");
+ return -EINVAL;
+ }
+
+ xe_irq_reset(xe);
+
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (err < 0) {
+ drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
+ return err;
+ }
+
+ irq = pci_irq_vector(pdev, 0);
+ err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
+ if (err < 0) {
+ drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err);
+ return err;
+ }
+
+ xe->irq.enabled = true;
+
+ xe_irq_postinstall(xe);
+
+ err = drmm_add_action_or_reset(&xe->drm, irq_uninstall, xe);
+ if (err)
+ goto free_irq_handler;
+
+ return 0;
+
+free_irq_handler:
+ free_irq(irq, xe);
+
+ return err;
+}
+
+void xe_irq_shutdown(struct xe_device *xe)
+{
+ irq_uninstall(&xe->drm, xe);
+}
+
+void xe_irq_suspend(struct xe_device *xe)
+{
+ int irq = to_pci_dev(xe->drm.dev)->irq;
+
+ spin_lock_irq(&xe->irq.lock);
+ xe->irq.enabled = false; /* no new irqs */
+ spin_unlock_irq(&xe->irq.lock);
+
+ synchronize_irq(irq); /* flush irqs */
+ xe_irq_reset(xe); /* turn irqs off */
+}
+
+void xe_irq_resume(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ int id;
+
+ /*
+ * lock not needed:
+ * 1. no irq will arrive before the postinstall
+ * 2. display is not yet resumed
+ */
+ xe->irq.enabled = true;
+ xe_irq_reset(xe);
+ xe_irq_postinstall(xe); /* turn irqs on */
+
+ for_each_gt(gt, xe, id)
+ xe_irq_enable_hwe(gt);
+}
diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h
new file mode 100644
index 000000000..bc42bc90d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_irq.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_IRQ_H_
+#define _XE_IRQ_H_
+
+struct xe_device;
+struct xe_tile;
+struct xe_gt;
+
+int xe_irq_install(struct xe_device *xe);
+void xe_irq_shutdown(struct xe_device *xe);
+void xe_irq_suspend(struct xe_device *xe);
+void xe_irq_resume(struct xe_device *xe);
+void xe_irq_enable_hwe(struct xe_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
new file mode 100644
index 000000000..0d7c5514e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/align.h>
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_sriov_regs.h"
+
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_lmtt.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_res_cursor.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+
+/**
+ * DOC: Local Memory Translation Table
+ *
+ * The Local Memory Translation Table (LMTT) provides additional abstraction
+ * when Virtual Function (VF) is accessing device Local Memory (VRAM).
+ *
+ * The Root LMTT Page Directory contains one entry for each VF. Entries are
+ * indexed by the function number (1-based, index 0 is unused).
+ *
+ * See `Two-Level LMTT Structure`_ and `Multi-Level LMTT Structure`_.
+ */
+
+#define lmtt_assert(lmtt, condition) xe_tile_assert(lmtt_to_tile(lmtt), condition)
+#define lmtt_debug(lmtt, msg...) xe_sriov_dbg_verbose(lmtt_to_xe(lmtt), "LMTT: " msg)
+
+static bool xe_has_multi_level_lmtt(struct xe_device *xe)
+{
+ return xe->info.platform == XE_PVC;
+}
+
+static struct xe_tile *lmtt_to_tile(struct xe_lmtt *lmtt)
+{
+ return container_of(lmtt, struct xe_tile, sriov.pf.lmtt);
+}
+
+static struct xe_device *lmtt_to_xe(struct xe_lmtt *lmtt)
+{
+ return tile_to_xe(lmtt_to_tile(lmtt));
+}
+
+static u64 lmtt_page_size(struct xe_lmtt *lmtt)
+{
+ return BIT_ULL(lmtt->ops->lmtt_pte_shift(0));
+}
+
+static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level)
+{
+ unsigned int num_entries = level ? lmtt->ops->lmtt_pte_num(level) : 0;
+ struct xe_lmtt_pt *pt;
+ struct xe_bo *bo;
+ int err;
+
+ pt = kzalloc(struct_size(pt, entries, num_entries), GFP_KERNEL);
+ if (!pt) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ bo = xe_bo_create_pin_map(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), NULL,
+ PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
+ lmtt->ops->lmtt_pte_num(level)),
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
+ XE_BO_CREATE_PINNED_BIT);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto out_free_pt;
+ }
+
+ lmtt_assert(lmtt, xe_bo_is_vram(bo));
+
+ pt->level = level;
+ pt->bo = bo;
+ return pt;
+
+out_free_pt:
+ kfree(pt);
+out:
+ return ERR_PTR(err);
+}
+
+static void lmtt_pt_free(struct xe_lmtt_pt *pt)
+{
+ xe_bo_unpin_map_no_vm(pt->bo);
+ kfree(pt);
+}
+
+static int lmtt_init_pd(struct xe_lmtt *lmtt)
+{
+ struct xe_lmtt_pt *pd;
+
+ lmtt_assert(lmtt, !lmtt->pd);
+ lmtt_assert(lmtt, lmtt->ops->lmtt_root_pd_level());
+
+ pd = lmtt_pt_alloc(lmtt, lmtt->ops->lmtt_root_pd_level());
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+
+ lmtt->pd = pd;
+ return 0;
+}
+
+static void lmtt_fini_pd(struct xe_lmtt *lmtt)
+{
+ struct xe_lmtt_pt *pd = lmtt->pd;
+ unsigned int num_entries = lmtt->ops->lmtt_pte_num(pd->level);
+ unsigned int n = 0;
+
+ /* make sure we don't leak */
+ for (n = 0; n < num_entries; n++)
+ lmtt_assert(lmtt, !pd->entries[n]);
+
+ lmtt->pd = NULL;
+ lmtt_pt_free(pd);
+}
+
+static void fini_lmtt(struct drm_device *drm, void *arg)
+{
+ struct xe_lmtt *lmtt = arg;
+
+ lmtt_assert(lmtt, !(!!lmtt->ops ^ !!lmtt->pd));
+
+ if (!lmtt->pd)
+ return;
+
+ lmtt_fini_pd(lmtt);
+ lmtt->ops = NULL;
+}
+
+/**
+ * xe_lmtt_init - LMTT software initialization.
+ * @lmtt: the &xe_lmtt to initialize
+ *
+ * The LMTT initialization requires two steps.
+ *
+ * The xe_lmtt_init() checks if LMTT is required on current device and selects
+ * and initialize proper variant of the LMTT Root Directory. Currently supported
+ * variants are `Two-Level LMTT Structure`_ and `Multi-Level LMTT Structure`_.
+ *
+ * In next step xe_lmtt_init_hw() will register this directory on the hardware.
+ *
+ * Notes:
+ * The LMTT allocations are managed and will be implicitly released on driver unload.
+ * This function shall be called only once and only when running as a PF driver.
+ * Any LMTT initialization failure should block VFs enabling.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_lmtt_init(struct xe_lmtt *lmtt)
+{
+ struct xe_device *xe = lmtt_to_xe(lmtt);
+ int err;
+
+ lmtt_assert(lmtt, IS_SRIOV_PF(xe));
+ lmtt_assert(lmtt, !lmtt->ops);
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ if (xe_has_multi_level_lmtt(xe))
+ lmtt->ops = &lmtt_ml_ops;
+ else
+ lmtt->ops = &lmtt_2l_ops;
+
+ err = lmtt_init_pd(lmtt);
+ if (unlikely(err))
+ goto fail;
+
+ return drmm_add_action_or_reset(&xe->drm, fini_lmtt, lmtt);
+
+fail:
+ lmtt->ops = NULL;
+ return err;
+}
+
+static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt)
+{
+ struct xe_tile *tile = lmtt_to_tile(lmtt);
+ struct xe_device *xe = tile_to_xe(tile);
+ dma_addr_t offset = xe_bo_main_addr(lmtt->pd->bo, XE_PAGE_SIZE);
+
+ lmtt_debug(lmtt, "DIR offset %pad\n", &offset);
+ lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo));
+ lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K));
+
+ xe_mmio_write32(tile->primary_gt,
+ GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
+ LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
+}
+
+/**
+ * xe_lmtt_init_hw - Perform LMTT hardware initialization.
+ * @lmtt: the &xe_lmtt to initialize
+ *
+ * This function is a second step of the LMTT initialization.
+ * This function registers LMTT Root Directory prepared in xe_lmtt_init().
+ *
+ * This function shall be called after every hardware reset.
+ * This function shall be called only when running as a PF driver.
+ */
+void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
+{
+ if (!lmtt->pd)
+ return;
+
+ lmtt_setup_dir_ptr(lmtt);
+}
+
+static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
+ u64 pte, unsigned int idx)
+{
+ unsigned int level = pt->level;
+
+ lmtt_assert(lmtt, idx <= lmtt->ops->lmtt_pte_num(level));
+ lmtt_debug(lmtt, "WRITE level=%u index=%u pte=%#llx\n", level, idx, pte);
+
+ switch (lmtt->ops->lmtt_pte_size(level)) {
+ case sizeof(u32):
+ xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte);
+ break;
+ case sizeof(u64):
+ xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte);
+ break;
+ default:
+ lmtt_assert(lmtt, !!!"invalid pte size");
+ }
+}
+
+static void lmtt_destroy_pt(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pd)
+{
+ unsigned int num_entries = pd->level ? lmtt->ops->lmtt_pte_num(pd->level) : 0;
+ struct xe_lmtt_pt *pt;
+ unsigned int i;
+
+ for (i = 0; i < num_entries; i++) {
+ pt = pd->entries[i];
+ pd->entries[i] = NULL;
+ if (!pt)
+ continue;
+
+ lmtt_destroy_pt(lmtt, pt);
+ }
+
+ lmtt_pt_free(pd);
+}
+
+static void lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
+{
+ struct xe_lmtt_pt *pd = lmtt->pd;
+ struct xe_lmtt_pt *pt;
+
+ pt = pd->entries[vfid];
+ pd->entries[vfid] = NULL;
+ if (!pt)
+ return;
+
+ lmtt_write_pte(lmtt, pd, LMTT_PTE_INVALID, vfid);
+
+ lmtt_assert(lmtt, pd->level > 0);
+ lmtt_assert(lmtt, pt->level == pd->level - 1);
+ lmtt_destroy_pt(lmtt, pt);
+}
+
+static int __lmtt_alloc_range(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pd,
+ u64 start, u64 end)
+{
+ u64 pte_addr_shift = BIT_ULL(lmtt->ops->lmtt_pte_shift(pd->level));
+ u64 offset;
+ int err;
+
+ lmtt_assert(lmtt, pd->level > 0);
+
+ offset = start;
+ while (offset < end) {
+ struct xe_lmtt_pt *pt;
+ u64 next, pde, pt_addr;
+ unsigned int idx;
+
+ pt = lmtt_pt_alloc(lmtt, pd->level - 1);
+ if (IS_ERR(pt))
+ return PTR_ERR(pt);
+
+ pt_addr = xe_bo_main_addr(pt->bo, XE_PAGE_SIZE);
+
+ idx = lmtt->ops->lmtt_pte_index(offset, pd->level);
+ pde = lmtt->ops->lmtt_pte_encode(pt_addr, pd->level);
+
+ lmtt_write_pte(lmtt, pd, pde, idx);
+
+ pd->entries[idx] = pt;
+
+ next = min(end, round_up(offset + 1, pte_addr_shift));
+
+ if (pt->level != 0) {
+ err = __lmtt_alloc_range(lmtt, pt, offset, next);
+ if (err)
+ return err;
+ }
+
+ offset = next;
+ }
+
+ return 0;
+}
+
+static int lmtt_alloc_range(struct xe_lmtt *lmtt, unsigned int vfid, u64 start, u64 end)
+{
+ struct xe_lmtt_pt *pd = lmtt->pd;
+ struct xe_lmtt_pt *pt;
+ u64 pt_addr;
+ u64 pde;
+ int err;
+
+ lmtt_assert(lmtt, pd->level > 0);
+ lmtt_assert(lmtt, vfid <= lmtt->ops->lmtt_pte_num(pd->level));
+ lmtt_assert(lmtt, IS_ALIGNED(start, lmtt_page_size(lmtt)));
+ lmtt_assert(lmtt, IS_ALIGNED(end, lmtt_page_size(lmtt)));
+
+ if (pd->entries[vfid])
+ return -ENOTEMPTY;
+
+ pt = lmtt_pt_alloc(lmtt, pd->level - 1);
+ if (IS_ERR(pt))
+ return PTR_ERR(pt);
+
+ pt_addr = xe_bo_main_addr(pt->bo, XE_PAGE_SIZE);
+
+ pde = lmtt->ops->lmtt_pte_encode(pt_addr, pd->level);
+
+ lmtt_write_pte(lmtt, pd, pde, vfid);
+
+ pd->entries[vfid] = pt;
+
+ if (pt->level != 0) {
+ err = __lmtt_alloc_range(lmtt, pt, start, end);
+ if (err)
+ goto out_free_pt;
+ }
+
+ return 0;
+
+out_free_pt:
+ lmtt_pt_free(pt);
+ return err;
+}
+
+static struct xe_lmtt_pt *lmtt_leaf_pt(struct xe_lmtt *lmtt, unsigned int vfid, u64 addr)
+{
+ struct xe_lmtt_pt *pd = lmtt->pd;
+ struct xe_lmtt_pt *pt;
+
+ lmtt_assert(lmtt, vfid <= lmtt->ops->lmtt_pte_num(pd->level));
+ pt = pd->entries[vfid];
+
+ while (pt->level) {
+ lmtt_assert(lmtt, lmtt->ops->lmtt_pte_index(addr, pt->level) <=
+ lmtt->ops->lmtt_pte_num(pt->level));
+
+ pt = pt->entries[lmtt->ops->lmtt_pte_index(addr, pt->level)];
+
+ addr >>= lmtt->ops->lmtt_pte_shift(pt->level);
+ }
+
+ lmtt_assert(lmtt, lmtt->ops->lmtt_pte_index(addr, pt->level) <=
+ lmtt->ops->lmtt_pte_num(pt->level));
+ lmtt_assert(lmtt, pt->level != pd->level);
+ lmtt_assert(lmtt, pt->level == 0);
+ return pt;
+}
+
+static void lmtt_insert_bo(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 start)
+{
+ u64 page_size = lmtt_page_size(lmtt);
+ struct xe_res_cursor cur;
+ struct xe_lmtt_pt *pt;
+ u64 addr, vram_offset;
+
+ lmtt_assert(lmtt, IS_ALIGNED(start, page_size));
+ lmtt_assert(lmtt, IS_ALIGNED(bo->size, page_size));
+ lmtt_assert(lmtt, xe_bo_is_vram(bo));
+
+ vram_offset = vram_region_gpu_offset(bo->ttm.resource);
+ xe_res_first(bo->ttm.resource, 0, bo->size, &cur);
+ while (cur.remaining) {
+ addr = xe_res_dma(&cur);
+ addr += vram_offset; /* XXX */
+
+ pt = lmtt_leaf_pt(lmtt, vfid, start);
+
+ lmtt_write_pte(lmtt, pt, lmtt->ops->lmtt_pte_encode(addr, 0),
+ lmtt->ops->lmtt_pte_index(start, 0));
+
+ xe_res_next(&cur, page_size);
+ start += page_size;
+ }
+}
+
+/**
+ * xe_lmtt_prepare_pages - Create VF's LMTT Page Tables.
+ * @lmtt: the &xe_lmtt to update
+ * @vfid: the VF identifier (1-based)
+ * @range: top range of LMEM offset to be supported
+ *
+ * This function creates empty LMTT page tables for given VF to support
+ * up to maximum #range LMEM offset. The LMTT page tables created by this
+ * function must be released using xe_lmtt_drop_pages() function.
+ *
+ * Notes:
+ * This function shall be called only after successful LMTT initialization.
+ * See xe_lmtt_init().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range)
+{
+ lmtt_assert(lmtt, lmtt->pd);
+ lmtt_assert(lmtt, vfid);
+
+ return lmtt_alloc_range(lmtt, vfid, 0, range);
+}
+
+/**
+ * xe_lmtt_populate_pages - Update VF's LMTT Page Table Entries.
+ * @lmtt: the &xe_lmtt to update
+ * @vfid: the VF identifier (1-based)
+ * @bo: the buffer object with LMEM allocation to be mapped
+ * @offset: the offset at which #bo should be mapped
+ *
+ * This function updates VF's LMTT entries to use given buffer object as a backstore.
+ *
+ * Notes:
+ * This function shall be called only after successful preparation of the
+ * VF's LMTT Page Tables. See xe_lmtt_prepare().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset)
+{
+ lmtt_assert(lmtt, lmtt->pd);
+ lmtt_assert(lmtt, vfid);
+
+ lmtt_insert_bo(lmtt, vfid, bo, offset);
+ return 0;
+}
+
+/**
+ * xe_lmtt_drop_pages - Remove VF's LMTT Pages.
+ * @lmtt: the &xe_lmtt to update
+ * @vfid: the VF identifier (1-based)
+ *
+ * This function removes all LMTT Page Tables prepared by xe_lmtt_prepare_pages().
+ *
+ * This function shall be called only after successful LMTT initialization.
+ * See xe_lmtt_init().
+ */
+void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid)
+{
+ lmtt_assert(lmtt, lmtt->pd);
+ lmtt_assert(lmtt, vfid);
+
+ lmtt_drop_pages(lmtt, vfid);
+}
+
+/**
+ * xe_lmtt_estimate_pt_size - Estimate size of LMTT PT allocations.
+ * @lmtt: the &xe_lmtt
+ * @size: the size of the LMEM to be mapped over LMTT (including any offset)
+ *
+ * This function shall be called only by PF.
+ *
+ * Return: size of the PT allocation(s) needed to support given LMEM size.
+ */
+u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size)
+{
+ unsigned int level = 0;
+ u64 pt_size;
+
+ lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
+ lmtt_assert(lmtt, IS_DGFX(lmtt_to_xe(lmtt)));
+ lmtt_assert(lmtt, lmtt->ops);
+
+ pt_size = PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
+ lmtt->ops->lmtt_pte_num(level));
+
+ while (++level < lmtt->ops->lmtt_root_pd_level()) {
+ pt_size *= lmtt->ops->lmtt_pte_index(size, level) + 1;
+ pt_size += PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
+ lmtt->ops->lmtt_pte_num(level));
+ }
+
+ return pt_size;
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_lmtt_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_lmtt.h b/drivers/gpu/drm/xe/xe_lmtt.h
new file mode 100644
index 000000000..cb10ef994
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_lmtt.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_LMTT_H_
+#define _XE_LMTT_H_
+
+#include <linux/types.h>
+
+struct xe_bo;
+struct xe_lmtt;
+struct xe_lmtt_ops;
+
+#ifdef CONFIG_PCI_IOV
+int xe_lmtt_init(struct xe_lmtt *lmtt);
+void xe_lmtt_init_hw(struct xe_lmtt *lmtt);
+int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range);
+int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset);
+void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid);
+u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size);
+#else
+static inline int xe_lmtt_init(struct xe_lmtt *lmtt) { return 0; }
+static inline void xe_lmtt_init_hw(struct xe_lmtt *lmtt) { }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_lmtt_2l.c b/drivers/gpu/drm/xe/xe_lmtt_2l.c
new file mode 100644
index 000000000..84bc5c421
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_lmtt_2l.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/log2.h>
+#include <linux/sizes.h>
+
+#include "xe_lmtt_types.h"
+#include "xe_macros.h"
+
+/**
+ * DOC: Two-Level LMTT Structure
+ *
+ * LMHAW (Local Memory Host Address Width) is 37 bit (128GB)
+ *
+ * LMGAW (Local Memory Guest Address Width) is 37 bit (128GB)
+ *
+ * The following figure illustrates the structure and function of the 2L LMTT::
+ *
+ * LMTT Directory
+ * (1 Entry per VF)
+ * +-----------+ LMTT (per VF)
+ * | | +-----------+
+ * | | | |
+ * | | index: | |
+ * | | LMEM VF +===========+
+ * | | offset --> | PTE | ==> LMEM PF offset
+ * | | +===========+
+ * index: +===========+ | |
+ * VFID --> | PDE | -----------------> +-----------+
+ * +===========+ / \.
+ * | | / \.
+ * | | / \.
+ * | | / \.
+ * +-----------+ <== [LMTT Directory Ptr] \.
+ * / \ / \.
+ * / \ +-----------+-----------------+------+---+
+ * / \ | 31:HAW-16 | HAW-17:5 | 4:1 | 0 |
+ * / \ +===========+=================+======+===+
+ * / \ | Reserved | LMEM Page (2MB) | Rsvd | V |
+ * / \ +-----------+-----------------+------+---+
+ * / \.
+ * +-----------+-----------------+------+---+
+ * | 31:HAW-12 | HAW-13:4 | 3:1 | 0 |
+ * +===========+=================+======+===+
+ * | Reserved | LMTT Ptr (64KB) | Rsvd | V |
+ * +-----------+-----------------+------+---+
+ *
+ */
+
+typedef u32 lmtt_2l_pde_t;
+typedef u32 lmtt_2l_pte_t;
+
+#if IS_ENABLED(CONFIG_DRM_XE_LMTT_2L_128GB)
+#define LMTT_2L_HAW 37 /* 128 GiB */
+#else
+#define LMTT_2L_HAW 35 /* 32 GiB */
+#endif
+
+#define LMTT_2L_PDE_MAX_NUM 64 /* SRIOV with PF and 63 VFs, index 0 (PF) is unused */
+#define LMTT_2L_PDE_LMTT_PTR GENMASK(LMTT_2L_HAW - 13, 4)
+#define LMTT_2L_PDE_VALID BIT(0)
+
+#define LMTT_2L_PTE_MAX_NUM BIT(LMTT_2L_HAW - ilog2(SZ_2M))
+#define LMTT_2L_PTE_LMEM_PAGE GENMASK(LMTT_2L_HAW - 17, 5)
+#define LMTT_2L_PTE_VALID BIT(0)
+
+static unsigned int lmtt_2l_root_pd_level(void)
+{
+ return 1; /* implementation is 0-based */
+}
+
+static unsigned int lmtt_2l_pte_num(unsigned int level)
+{
+ switch (level) {
+ case 1:
+ return LMTT_2L_PDE_MAX_NUM;
+ case 0:
+ BUILD_BUG_ON(LMTT_2L_HAW == 37 && LMTT_2L_PTE_MAX_NUM != SZ_64K);
+ BUILD_BUG_ON(LMTT_2L_HAW == 35 && LMTT_2L_PTE_MAX_NUM != SZ_16K);
+ return LMTT_2L_PTE_MAX_NUM;
+ default:
+ return 0;
+ }
+}
+
+static unsigned int lmtt_2l_pte_size(unsigned int level)
+{
+ switch (level) {
+ case 1:
+ return sizeof(lmtt_2l_pde_t);
+ case 0:
+ return sizeof(lmtt_2l_pte_t);
+ default:
+ return 0;
+ }
+}
+
+static unsigned int lmtt_2l_pte_shift(unsigned int level)
+{
+ switch (level) {
+ case 0:
+ return ilog2(SZ_2M);
+ default:
+ return 0;
+ }
+}
+
+static unsigned int lmtt_2l_pte_index(u64 addr, unsigned int level)
+{
+ addr >>= lmtt_2l_pte_shift(level);
+
+ switch (level) {
+ case 0:
+ /* SZ_2M increments */
+ BUILD_BUG_ON_NOT_POWER_OF_2(LMTT_2L_PTE_MAX_NUM);
+ return addr & (LMTT_2L_PTE_MAX_NUM - 1);
+ default:
+ return 0;
+ }
+}
+
+static u64 lmtt_2l_pte_encode(unsigned long offset, unsigned int level)
+{
+ switch (level) {
+ case 0:
+ XE_WARN_ON(!IS_ALIGNED(offset, SZ_2M));
+ XE_WARN_ON(!FIELD_FIT(LMTT_2L_PTE_LMEM_PAGE, offset / SZ_2M));
+ return FIELD_PREP(LMTT_2L_PTE_LMEM_PAGE, offset / SZ_2M) | LMTT_2L_PTE_VALID;
+ case 1:
+ XE_WARN_ON(!IS_ALIGNED(offset, SZ_64K));
+ XE_WARN_ON(!FIELD_FIT(LMTT_2L_PDE_LMTT_PTR, offset / SZ_64K));
+ return FIELD_PREP(LMTT_2L_PDE_LMTT_PTR, offset / SZ_64K) | LMTT_2L_PDE_VALID;
+ default:
+ XE_WARN_ON(true);
+ return 0;
+ }
+}
+
+const struct xe_lmtt_ops lmtt_2l_ops = {
+ .lmtt_root_pd_level = lmtt_2l_root_pd_level,
+ .lmtt_pte_num = lmtt_2l_pte_num,
+ .lmtt_pte_size = lmtt_2l_pte_size,
+ .lmtt_pte_shift = lmtt_2l_pte_shift,
+ .lmtt_pte_index = lmtt_2l_pte_index,
+ .lmtt_pte_encode = lmtt_2l_pte_encode,
+};
diff --git a/drivers/gpu/drm/xe/xe_lmtt_ml.c b/drivers/gpu/drm/xe/xe_lmtt_ml.c
new file mode 100644
index 000000000..b21215a2e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_lmtt_ml.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/log2.h>
+#include <linux/sizes.h>
+
+#include "xe_lmtt_types.h"
+#include "xe_macros.h"
+
+/**
+ * DOC: Multi-Level LMTT Structure
+ *
+ * LMHAW (Local Memory Host Address Width) is 48 bit (256TB)
+ *
+ * LMGAW (Local Memory Guest Address Width) is 48 bit (256TB)
+ *
+ * The following figure illustrates the structure and function of the ML LMTT::
+ *
+ * LMTT L3 Directory
+ * (1 Entry per VF) LMTT L1 Leaf
+ * +-----------+ +-----------+
+ * | | LMTT L2 (per VF) | |
+ * | | +-----------+ | |
+ * | | | | index: +===========+
+ * | | | | GDPA --> | PTE | => LMEM PF offset
+ * | | | | 34:21 +===========+
+ * | | index: | | | |
+ * | | LMEM VF +===========+ | |
+ * | | offset -> | PTE | ----------> +-----------+
+ * | | GAW-1:35 +===========+ / \.
+ * index: +===========+ | | / \.
+ * VFID --> | PDE | ---------> +-----------+ / \.
+ * +===========+ / / / \.
+ * | | / / / \.
+ * +-----------+ <== [LMTT Directory Ptr] / \.
+ * / \ / / / \.
+ * / \ / / +-----------+-----------------+------+---+
+ * / /\ / | 31:HAW-16 | HAW-17:5 | 4:1 | 0 |
+ * / / \ / +===========+=================+======+===+
+ * / / \ / | Reserved | LMEM Page (2MB) | Rsvd | V |
+ * / / +-----------+-----------------+------+---+
+ * / /
+ * +-----------+-----------------+------+---+
+ * | 63:HAW-12 | HAW-13:4 | 3:1 | 0 |
+ * +===========+=================+======+===+
+ * | Reserved | LMTT Ptr (64KB) | Rsvd | V |
+ * +-----------+-----------------+------+---+
+ *
+ */
+
+typedef u64 lmtt_ml_pde_t;
+typedef u32 lmtt_ml_pte_t;
+
+#define LMTT_ML_HAW 48 /* 256 TiB */
+
+#define LMTT_ML_PDE_MAX_NUM 64 /* SRIOV with PF and 63 VFs, index 0 (PF) is unused */
+#define LMTT_ML_PDE_LMTT_PTR GENMASK_ULL(LMTT_ML_HAW - 13, 4)
+#define LMTT_ML_PDE_VALID BIT(0)
+
+#define LMTT_ML_PDE_L2_SHIFT 35
+#define LMTT_ML_PDE_L2_MAX_NUM BIT_ULL(LMTT_ML_HAW - 35)
+
+#define LMTT_ML_PTE_MAX_NUM BIT(35 - ilog2(SZ_2M))
+#define LMTT_ML_PTE_LMEM_PAGE GENMASK(LMTT_ML_HAW - 17, 5)
+#define LMTT_ML_PTE_VALID BIT(0)
+
+static unsigned int lmtt_ml_root_pd_level(void)
+{
+ return 2; /* implementation is 0-based */
+}
+
+static unsigned int lmtt_ml_pte_num(unsigned int level)
+{
+ switch (level) {
+ case 2:
+ return LMTT_ML_PDE_MAX_NUM;
+ case 1:
+ BUILD_BUG_ON(LMTT_ML_HAW == 48 && LMTT_ML_PDE_L2_MAX_NUM != SZ_8K);
+ return LMTT_ML_PDE_L2_MAX_NUM;
+ case 0:
+ BUILD_BUG_ON(LMTT_ML_PTE_MAX_NUM != SZ_16K);
+ return LMTT_ML_PTE_MAX_NUM;
+ default:
+ return 0;
+ }
+}
+
+static unsigned int lmtt_ml_pte_size(unsigned int level)
+{
+ switch (level) {
+ case 2:
+ case 1:
+ return sizeof(lmtt_ml_pde_t);
+ case 0:
+ return sizeof(lmtt_ml_pte_t);
+ default:
+ return 0;
+ }
+}
+
+static unsigned int lmtt_ml_pte_shift(unsigned int level)
+{
+ switch (level) {
+ case 1:
+ BUILD_BUG_ON(BIT_ULL(LMTT_ML_PDE_L2_SHIFT) != SZ_32G);
+ return ilog2(SZ_32G);
+ case 0:
+ return ilog2(SZ_2M);
+ default:
+ return 0;
+ }
+}
+
+static unsigned int lmtt_ml_pte_index(u64 addr, unsigned int level)
+{
+ addr >>= lmtt_ml_pte_shift(level);
+
+ switch (level) {
+ case 1:
+ /* SZ_32G increments */
+ BUILD_BUG_ON_NOT_POWER_OF_2(LMTT_ML_PDE_L2_MAX_NUM);
+ return addr & (LMTT_ML_PDE_L2_MAX_NUM - 1);
+ case 0:
+ /* SZ_2M increments */
+ BUILD_BUG_ON_NOT_POWER_OF_2(LMTT_ML_PTE_MAX_NUM);
+ return addr & (LMTT_ML_PTE_MAX_NUM - 1);
+ default:
+ return 0;
+ }
+}
+
+static u64 lmtt_ml_pte_encode(unsigned long offset, unsigned int level)
+{
+ switch (level) {
+ case 0:
+ XE_WARN_ON(!IS_ALIGNED(offset, SZ_2M));
+ XE_WARN_ON(!FIELD_FIT(LMTT_ML_PTE_LMEM_PAGE, offset / SZ_2M));
+ return FIELD_PREP(LMTT_ML_PTE_LMEM_PAGE, offset / SZ_2M) | LMTT_ML_PTE_VALID;
+ case 1:
+ case 2:
+ XE_WARN_ON(!IS_ALIGNED(offset, SZ_64K));
+ XE_WARN_ON(!FIELD_FIT(LMTT_ML_PDE_LMTT_PTR, offset / SZ_64K));
+ return FIELD_PREP(LMTT_ML_PDE_LMTT_PTR, offset / SZ_64K) | LMTT_ML_PDE_VALID;
+ default:
+ XE_WARN_ON(true);
+ return 0;
+ }
+}
+
+const struct xe_lmtt_ops lmtt_ml_ops = {
+ .lmtt_root_pd_level = lmtt_ml_root_pd_level,
+ .lmtt_pte_num = lmtt_ml_pte_num,
+ .lmtt_pte_size = lmtt_ml_pte_size,
+ .lmtt_pte_shift = lmtt_ml_pte_shift,
+ .lmtt_pte_index = lmtt_ml_pte_index,
+ .lmtt_pte_encode = lmtt_ml_pte_encode,
+};
diff --git a/drivers/gpu/drm/xe/xe_lmtt_types.h b/drivers/gpu/drm/xe/xe_lmtt_types.h
new file mode 100644
index 000000000..b37abad23
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_lmtt_types.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_LMTT_TYPES_H_
+#define _XE_LMTT_TYPES_H_
+
+#include <linux/types.h>
+
+struct xe_bo;
+struct xe_lmtt;
+struct xe_lmtt_pt;
+struct xe_lmtt_ops;
+
+#define LMTT_PTE_INVALID ULL(0)
+
+/**
+ * struct xe_lmtt - Local Memory Translation Table Manager
+ */
+struct xe_lmtt {
+ /** @pd: root LMTT Directory */
+ struct xe_lmtt_pt *pd;
+
+ /** @ops: LMTT functions */
+ const struct xe_lmtt_ops *ops;
+};
+
+/**
+ * struct xe_lmtt_pt - Local Memory Translation Table Page Table
+ *
+ * Represents single level of the LMTT.
+ */
+struct xe_lmtt_pt {
+ /** @level: page table level, 0 is leaf */
+ unsigned int level;
+
+ /** @bo: buffer object with actual LMTT PTE values */
+ struct xe_bo *bo;
+
+ /** @entries: leaf page tables, exist only for root/non-leaf */
+ struct xe_lmtt_pt *entries[];
+};
+
+/**
+ * struct xe_lmtt_ops - Local Memory Translation Table Operations
+ *
+ * Provides abstraction of the LMTT variants.
+ */
+struct xe_lmtt_ops {
+ /* private: */
+ unsigned int (*lmtt_root_pd_level)(void);
+ unsigned int (*lmtt_pte_num)(unsigned int level);
+ unsigned int (*lmtt_pte_size)(unsigned int level);
+ unsigned int (*lmtt_pte_shift)(unsigned int level);
+ unsigned int (*lmtt_pte_index)(u64 addr, unsigned int level);
+ u64 (*lmtt_pte_encode)(unsigned long offset, unsigned int level);
+};
+
+extern const struct xe_lmtt_ops lmtt_2l_ops;
+extern const struct xe_lmtt_ops lmtt_ml_ops;
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
new file mode 100644
index 000000000..b38319d28
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -0,0 +1,1264 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_lrc.h"
+
+#include "instructions/xe_mi_commands.h"
+#include "instructions/xe_gfxpipe_commands.h"
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gpu_commands.h"
+#include "regs/xe_lrc_layout.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_drm_client.h"
+#include "xe_exec_queue_types.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_hw_fence.h"
+#include "xe_map.h"
+#include "xe_vm.h"
+
+#define LRC_VALID (1 << 0)
+#define LRC_PRIVILEGE (1 << 8)
+#define LRC_ADDRESSING_MODE_SHIFT 3
+#define LRC_LEGACY_64B_CONTEXT 3
+
+#define ENGINE_CLASS_SHIFT 61
+#define ENGINE_INSTANCE_SHIFT 48
+
+static struct xe_device *
+lrc_to_xe(struct xe_lrc *lrc)
+{
+ return gt_to_xe(lrc->fence_ctx.gt);
+}
+
+size_t xe_lrc_size(struct xe_device *xe, enum xe_engine_class class)
+{
+ switch (class) {
+ case XE_ENGINE_CLASS_RENDER:
+ if (GRAPHICS_VER(xe) >= 20)
+ return 4 * SZ_4K;
+ else
+ return 14 * SZ_4K;
+ case XE_ENGINE_CLASS_COMPUTE:
+ /* 14 pages since graphics_ver == 11 */
+ if (GRAPHICS_VER(xe) >= 20)
+ return 3 * SZ_4K;
+ else
+ return 14 * SZ_4K;
+ default:
+ WARN(1, "Unknown engine class: %d", class);
+ fallthrough;
+ case XE_ENGINE_CLASS_COPY:
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ case XE_ENGINE_CLASS_OTHER:
+ return 2 * SZ_4K;
+ }
+}
+
+/*
+ * The per-platform tables are u8-encoded in @data. Decode @data and set the
+ * addresses' offset and commands in @regs. The following encoding is used
+ * for each byte. There are 2 steps: decoding commands and decoding addresses.
+ *
+ * Commands:
+ * [7]: create NOPs - number of NOPs are set in lower bits
+ * [6]: When creating MI_LOAD_REGISTER_IMM command, allow to set
+ * MI_LRI_FORCE_POSTED
+ * [5:0]: Number of NOPs or registers to set values to in case of
+ * MI_LOAD_REGISTER_IMM
+ *
+ * Addresses: these are decoded after a MI_LOAD_REGISTER_IMM command by "count"
+ * number of registers. They are set by using the REG/REG16 macros: the former
+ * is used for offsets smaller than 0x200 while the latter is for values bigger
+ * than that. Those macros already set all the bits documented below correctly:
+ *
+ * [7]: When a register offset needs more than 6 bits, use additional bytes, to
+ * follow, for the lower bits
+ * [6:0]: Register offset, without considering the engine base.
+ *
+ * This function only tweaks the commands and register offsets. Values are not
+ * filled out.
+ */
+static void set_offsets(u32 *regs,
+ const u8 *data,
+ const struct xe_hw_engine *hwe)
+#define NOP(x) (BIT(7) | (x))
+#define LRI(count, flags) ((flags) << 6 | (count) | \
+ BUILD_BUG_ON_ZERO(count >= BIT(6)))
+#define POSTED BIT(0)
+#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
+#define REG16(x) \
+ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
+ (((x) >> 2) & 0x7f)
+#define END 0
+{
+ const u32 base = hwe->mmio_base;
+
+ while (*data) {
+ u8 count, flags;
+
+ if (*data & BIT(7)) { /* skip */
+ count = *data++ & ~BIT(7);
+ regs += count;
+ continue;
+ }
+
+ count = *data & 0x3f;
+ flags = *data >> 6;
+ data++;
+
+ *regs = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
+ if (flags & POSTED)
+ *regs |= MI_LRI_FORCE_POSTED;
+ *regs |= MI_LRI_LRM_CS_MMIO;
+ regs++;
+
+ xe_gt_assert(hwe->gt, count);
+ do {
+ u32 offset = 0;
+ u8 v;
+
+ do {
+ v = *data++;
+ offset <<= 7;
+ offset |= v & ~BIT(7);
+ } while (v & BIT(7));
+
+ regs[0] = base + (offset << 2);
+ regs += 2;
+ } while (--count);
+ }
+
+ *regs = MI_BATCH_BUFFER_END | BIT(0);
+}
+
+static const u8 gen12_xcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END
+};
+
+static const u8 dg2_xcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END
+};
+
+static const u8 gen12_rcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+ NOP(3 + 9 + 1),
+
+ LRI(51, POSTED),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG16(0x588),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+ REG(0x084),
+ NOP(1),
+
+ END
+};
+
+static const u8 xehp_rcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
+static const u8 dg2_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
+static const u8 mtl_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(2),
+ LRI(2, POSTED),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
+#define XE2_CTX_COMMON \
+ NOP(1), /* [0x00] */ \
+ LRI(15, POSTED), /* [0x01] */ \
+ REG16(0x244), /* [0x02] CTXT_SR_CTL */ \
+ REG(0x034), /* [0x04] RING_BUFFER_HEAD */ \
+ REG(0x030), /* [0x06] RING_BUFFER_TAIL */ \
+ REG(0x038), /* [0x08] RING_BUFFER_START */ \
+ REG(0x03c), /* [0x0a] RING_BUFFER_CONTROL */ \
+ REG(0x168), /* [0x0c] BB_ADDR_UDW */ \
+ REG(0x140), /* [0x0e] BB_ADDR */ \
+ REG(0x110), /* [0x10] BB_STATE */ \
+ REG(0x1c0), /* [0x12] BB_PER_CTX_PTR */ \
+ REG(0x1c4), /* [0x14] RCS_INDIRECT_CTX */ \
+ REG(0x1c8), /* [0x16] RCS_INDIRECT_CTX_OFFSET */ \
+ REG(0x180), /* [0x18] CCID */ \
+ REG16(0x2b4), /* [0x1a] SEMAPHORE_TOKEN */ \
+ REG(0x120), /* [0x1c] PRT_BB_STATE */ \
+ REG(0x124), /* [0x1e] PRT_BB_STATE_UDW */ \
+ \
+ NOP(1), /* [0x20] */ \
+ LRI(9, POSTED), /* [0x21] */ \
+ REG16(0x3a8), /* [0x22] CTX_TIMESTAMP */ \
+ REG16(0x3ac), /* [0x24] CTX_TIMESTAMP_UDW */ \
+ REG(0x108), /* [0x26] INDIRECT_RING_STATE */ \
+ REG16(0x284), /* [0x28] dummy reg */ \
+ REG16(0x280), /* [0x2a] CS_ACC_CTR_THOLD */ \
+ REG16(0x27c), /* [0x2c] CS_CTX_SYS_PASID */ \
+ REG16(0x278), /* [0x2e] CS_CTX_ASID */ \
+ REG16(0x274), /* [0x30] PTBP_UDW */ \
+ REG16(0x270) /* [0x32] PTBP_LDW */
+
+static const u8 xe2_rcs_offsets[] = {
+ XE2_CTX_COMMON,
+
+ NOP(2), /* [0x34] */
+ LRI(2, POSTED), /* [0x36] */
+ REG16(0x5a8), /* [0x37] CONTEXT_SCHEDULING_ATTRIBUTES */
+ REG16(0x5ac), /* [0x39] PREEMPTION_STATUS */
+
+ NOP(6), /* [0x41] */
+ LRI(1, 0), /* [0x47] */
+ REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */
+
+ END
+};
+
+static const u8 xe2_bcs_offsets[] = {
+ XE2_CTX_COMMON,
+
+ NOP(4 + 8 + 1), /* [0x34] */
+ LRI(2, POSTED), /* [0x41] */
+ REG16(0x200), /* [0x42] BCS_SWCTRL */
+ REG16(0x204), /* [0x44] BLIT_CCTL */
+
+ END
+};
+
+static const u8 xe2_xcs_offsets[] = {
+ XE2_CTX_COMMON,
+
+ END
+};
+
+#undef END
+#undef REG16
+#undef REG
+#undef LRI
+#undef NOP
+
+static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
+{
+ if (class == XE_ENGINE_CLASS_RENDER) {
+ if (GRAPHICS_VER(xe) >= 20)
+ return xe2_rcs_offsets;
+ else if (GRAPHICS_VERx100(xe) >= 1270)
+ return mtl_rcs_offsets;
+ else if (GRAPHICS_VERx100(xe) >= 1255)
+ return dg2_rcs_offsets;
+ else if (GRAPHICS_VERx100(xe) >= 1250)
+ return xehp_rcs_offsets;
+ else
+ return gen12_rcs_offsets;
+ } else if (class == XE_ENGINE_CLASS_COPY) {
+ if (GRAPHICS_VER(xe) >= 20)
+ return xe2_bcs_offsets;
+ else
+ return gen12_xcs_offsets;
+ } else {
+ if (GRAPHICS_VER(xe) >= 20)
+ return xe2_xcs_offsets;
+ else if (GRAPHICS_VERx100(xe) >= 1255)
+ return dg2_xcs_offsets;
+ else
+ return gen12_xcs_offsets;
+ }
+}
+
+static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
+{
+ regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) |
+ _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+
+ /* TODO: Timestamp */
+}
+
+static int lrc_ring_mi_mode(struct xe_hw_engine *hwe)
+{
+ struct xe_device *xe = gt_to_xe(hwe->gt);
+
+ if (GRAPHICS_VERx100(xe) >= 1250)
+ return 0x70;
+ else
+ return 0x60;
+}
+
+static void reset_stop_ring(u32 *regs, struct xe_hw_engine *hwe)
+{
+ int x;
+
+ x = lrc_ring_mi_mode(hwe);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+}
+
+static inline u32 __xe_lrc_ring_offset(struct xe_lrc *lrc)
+{
+ return 0;
+}
+
+u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
+{
+ return lrc->ring.size;
+}
+
+/* Make the magic macros work */
+#define __xe_lrc_pphwsp_offset xe_lrc_pphwsp_offset
+
+#define LRC_SEQNO_PPHWSP_OFFSET 512
+#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
+#define LRC_PARALLEL_PPHWSP_OFFSET 2048
+#define LRC_PPHWSP_SIZE SZ_4K
+
+static size_t lrc_reg_size(struct xe_device *xe)
+{
+ if (GRAPHICS_VERx100(xe) >= 1250)
+ return 96 * sizeof(u32);
+ else
+ return 80 * sizeof(u32);
+}
+
+size_t xe_lrc_skip_size(struct xe_device *xe)
+{
+ return LRC_PPHWSP_SIZE + lrc_reg_size(xe);
+}
+
+static inline u32 __xe_lrc_seqno_offset(struct xe_lrc *lrc)
+{
+ /* The seqno is stored in the driver-defined portion of PPHWSP */
+ return xe_lrc_pphwsp_offset(lrc) + LRC_SEQNO_PPHWSP_OFFSET;
+}
+
+static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
+{
+ /* The start seqno is stored in the driver-defined portion of PPHWSP */
+ return xe_lrc_pphwsp_offset(lrc) + LRC_START_SEQNO_PPHWSP_OFFSET;
+}
+
+static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc)
+{
+ /* The parallel is stored in the driver-defined portion of PPHWSP */
+ return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET;
+}
+
+static inline u32 __xe_lrc_regs_offset(struct xe_lrc *lrc)
+{
+ return xe_lrc_pphwsp_offset(lrc) + LRC_PPHWSP_SIZE;
+}
+
+#define DECL_MAP_ADDR_HELPERS(elem) \
+static inline struct iosys_map __xe_lrc_##elem##_map(struct xe_lrc *lrc) \
+{ \
+ struct iosys_map map = lrc->bo->vmap; \
+\
+ xe_assert(lrc_to_xe(lrc), !iosys_map_is_null(&map)); \
+ iosys_map_incr(&map, __xe_lrc_##elem##_offset(lrc)); \
+ return map; \
+} \
+static inline u32 __xe_lrc_##elem##_ggtt_addr(struct xe_lrc *lrc) \
+{ \
+ return xe_bo_ggtt_addr(lrc->bo) + __xe_lrc_##elem##_offset(lrc); \
+} \
+
+DECL_MAP_ADDR_HELPERS(ring)
+DECL_MAP_ADDR_HELPERS(pphwsp)
+DECL_MAP_ADDR_HELPERS(seqno)
+DECL_MAP_ADDR_HELPERS(regs)
+DECL_MAP_ADDR_HELPERS(start_seqno)
+DECL_MAP_ADDR_HELPERS(parallel)
+
+#undef DECL_MAP_ADDR_HELPERS
+
+u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc)
+{
+ return __xe_lrc_pphwsp_ggtt_addr(lrc);
+}
+
+u32 xe_lrc_read_ctx_reg(struct xe_lrc *lrc, int reg_nr)
+{
+ struct xe_device *xe = lrc_to_xe(lrc);
+ struct iosys_map map;
+
+ map = __xe_lrc_regs_map(lrc);
+ iosys_map_incr(&map, reg_nr * sizeof(u32));
+ return xe_map_read32(xe, &map);
+}
+
+void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val)
+{
+ struct xe_device *xe = lrc_to_xe(lrc);
+ struct iosys_map map;
+
+ map = __xe_lrc_regs_map(lrc);
+ iosys_map_incr(&map, reg_nr * sizeof(u32));
+ xe_map_write32(xe, &map, val);
+}
+
+static void *empty_lrc_data(struct xe_hw_engine *hwe)
+{
+ struct xe_device *xe = gt_to_xe(hwe->gt);
+ void *data;
+ u32 *regs;
+
+ data = kzalloc(xe_lrc_size(xe, hwe->class), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ /* 1st page: Per-Process of HW status Page */
+ regs = data + LRC_PPHWSP_SIZE;
+ set_offsets(regs, reg_offsets(xe, hwe->class), hwe);
+ set_context_control(regs, hwe);
+ reset_stop_ring(regs, hwe);
+
+ return data;
+}
+
+static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
+{
+ u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
+
+ xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
+ xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
+}
+
+#define PVC_CTX_ASID (0x2e + 1)
+#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
+
+int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct iosys_map map;
+ void *init_data = NULL;
+ u32 arb_enable;
+ int err;
+
+ lrc->flags = 0;
+
+ /*
+ * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
+ * via VM bind calls.
+ */
+ lrc->bo = xe_bo_create_pin_map(xe, tile, vm,
+ ring_size + xe_lrc_size(xe, hwe->class),
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(lrc->bo))
+ return PTR_ERR(lrc->bo);
+
+ lrc->tile = gt_to_tile(hwe->gt);
+ lrc->ring.size = ring_size;
+ lrc->ring.tail = 0;
+
+ xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
+ hwe->fence_irq, hwe->name);
+
+ if (!gt->default_lrc[hwe->class]) {
+ init_data = empty_lrc_data(hwe);
+ if (!init_data) {
+ err = -ENOMEM;
+ goto err_lrc_finish;
+ }
+ }
+
+ /*
+ * Init Per-Process of HW status Page, LRC / context state to known
+ * values
+ */
+ map = __xe_lrc_pphwsp_map(lrc);
+ if (!init_data) {
+ xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */
+ xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE,
+ gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE,
+ xe_lrc_size(xe, hwe->class) - LRC_PPHWSP_SIZE);
+ } else {
+ xe_map_memcpy_to(xe, &map, 0, init_data,
+ xe_lrc_size(xe, hwe->class));
+ kfree(init_data);
+ }
+
+ if (vm) {
+ xe_lrc_set_ppgtt(lrc, vm);
+
+ if (vm->xef)
+ xe_drm_client_add_bo(vm->xef->client, lrc->bo);
+ }
+
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_START, __xe_lrc_ring_ggtt_addr(lrc));
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_HEAD, 0);
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
+ RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
+ if (xe->info.has_asid && vm)
+ xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
+
+ lrc->desc = LRC_VALID;
+ lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
+ /* TODO: Priority */
+
+ /* While this appears to have something about privileged batches or
+ * some such, it really just means PPGTT mode.
+ */
+ if (vm)
+ lrc->desc |= LRC_PRIVILEGE;
+
+ if (GRAPHICS_VERx100(xe) < 1250) {
+ lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT;
+ lrc->desc |= (u64)hwe->class << ENGINE_CLASS_SHIFT;
+ }
+
+ arb_enable = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ xe_lrc_write_ring(lrc, &arb_enable, sizeof(arb_enable));
+
+ map = __xe_lrc_seqno_map(lrc);
+ xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
+
+ map = __xe_lrc_start_seqno_map(lrc);
+ xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
+
+ return 0;
+
+err_lrc_finish:
+ xe_lrc_finish(lrc);
+ return err;
+}
+
+void xe_lrc_finish(struct xe_lrc *lrc)
+{
+ xe_hw_fence_ctx_finish(&lrc->fence_ctx);
+ xe_bo_lock(lrc->bo, false);
+ xe_bo_unpin(lrc->bo);
+ xe_bo_unlock(lrc->bo);
+ xe_bo_put(lrc->bo);
+}
+
+void xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head)
+{
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_HEAD, head);
+}
+
+u32 xe_lrc_ring_head(struct xe_lrc *lrc)
+{
+ return xe_lrc_read_ctx_reg(lrc, CTX_RING_HEAD) & HEAD_ADDR;
+}
+
+u32 xe_lrc_ring_space(struct xe_lrc *lrc)
+{
+ const u32 head = xe_lrc_ring_head(lrc);
+ const u32 tail = lrc->ring.tail;
+ const u32 size = lrc->ring.size;
+
+ return ((head - tail - 1) & (size - 1)) + 1;
+}
+
+static void __xe_lrc_write_ring(struct xe_lrc *lrc, struct iosys_map ring,
+ const void *data, size_t size)
+{
+ struct xe_device *xe = lrc_to_xe(lrc);
+
+ iosys_map_incr(&ring, lrc->ring.tail);
+ xe_map_memcpy_to(xe, &ring, 0, data, size);
+ lrc->ring.tail = (lrc->ring.tail + size) & (lrc->ring.size - 1);
+}
+
+void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size)
+{
+ struct xe_device *xe = lrc_to_xe(lrc);
+ struct iosys_map ring;
+ u32 rhs;
+ size_t aligned_size;
+
+ xe_assert(xe, IS_ALIGNED(size, 4));
+ aligned_size = ALIGN(size, 8);
+
+ ring = __xe_lrc_ring_map(lrc);
+
+ xe_assert(xe, lrc->ring.tail < lrc->ring.size);
+ rhs = lrc->ring.size - lrc->ring.tail;
+ if (size > rhs) {
+ __xe_lrc_write_ring(lrc, ring, data, rhs);
+ __xe_lrc_write_ring(lrc, ring, data + rhs, size - rhs);
+ } else {
+ __xe_lrc_write_ring(lrc, ring, data, size);
+ }
+
+ if (aligned_size > size) {
+ u32 noop = MI_NOOP;
+
+ __xe_lrc_write_ring(lrc, ring, &noop, sizeof(noop));
+ }
+}
+
+u64 xe_lrc_descriptor(struct xe_lrc *lrc)
+{
+ return lrc->desc | xe_lrc_ggtt_addr(lrc);
+}
+
+u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc)
+{
+ return __xe_lrc_seqno_ggtt_addr(lrc);
+}
+
+struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc)
+{
+ return &xe_hw_fence_create(&lrc->fence_ctx,
+ __xe_lrc_seqno_map(lrc))->dma;
+}
+
+s32 xe_lrc_seqno(struct xe_lrc *lrc)
+{
+ struct iosys_map map = __xe_lrc_seqno_map(lrc);
+
+ return xe_map_read32(lrc_to_xe(lrc), &map);
+}
+
+s32 xe_lrc_start_seqno(struct xe_lrc *lrc)
+{
+ struct iosys_map map = __xe_lrc_start_seqno_map(lrc);
+
+ return xe_map_read32(lrc_to_xe(lrc), &map);
+}
+
+u32 xe_lrc_start_seqno_ggtt_addr(struct xe_lrc *lrc)
+{
+ return __xe_lrc_start_seqno_ggtt_addr(lrc);
+}
+
+u32 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc)
+{
+ return __xe_lrc_parallel_ggtt_addr(lrc);
+}
+
+struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc)
+{
+ return __xe_lrc_parallel_map(lrc);
+}
+
+static int instr_dw(u32 cmd_header)
+{
+ /* GFXPIPE "SINGLE_DW" opcodes are a single dword */
+ if ((cmd_header & (XE_INSTR_CMD_TYPE | GFXPIPE_PIPELINE)) ==
+ GFXPIPE_SINGLE_DW_CMD(0, 0))
+ return 1;
+
+ /* 3DSTATE_SO_DECL_LIST has a 9-bit dword length rather than 8 */
+ if ((cmd_header & GFXPIPE_MATCH_MASK) == CMD_3DSTATE_SO_DECL_LIST)
+ return REG_FIELD_GET(CMD_3DSTATE_SO_DECL_LIST_DW_LEN, cmd_header) + 2;
+
+ /* Most instructions have the # of dwords (minus 2) in 7:0 */
+ return REG_FIELD_GET(XE_INSTR_LEN_MASK, cmd_header) + 2;
+}
+
+static int dump_mi_command(struct drm_printer *p,
+ struct xe_gt *gt,
+ u32 *dw,
+ int remaining_dw)
+{
+ u32 inst_header = *dw;
+ u32 numdw = instr_dw(inst_header);
+ u32 opcode = REG_FIELD_GET(MI_OPCODE, inst_header);
+ int num_noop;
+
+ /* First check for commands that don't have/use a '# DW' field */
+ switch (inst_header & MI_OPCODE) {
+ case MI_NOOP:
+ num_noop = 1;
+ while (num_noop < remaining_dw &&
+ (*(++dw) & REG_GENMASK(31, 23)) == MI_NOOP)
+ num_noop++;
+ drm_printf(p, "[%#010x] MI_NOOP (%d dwords)\n", inst_header, num_noop);
+ return num_noop;
+
+ case MI_TOPOLOGY_FILTER:
+ drm_printf(p, "[%#010x] MI_TOPOLOGY_FILTER\n", inst_header);
+ return 1;
+
+ case MI_BATCH_BUFFER_END:
+ drm_printf(p, "[%#010x] MI_BATCH_BUFFER_END\n", inst_header);
+ /* Return 'remaining_dw' to consume the rest of the LRC */
+ return remaining_dw;
+ }
+
+ /*
+ * Any remaining commands include a # of dwords. We should make sure
+ * it doesn't exceed the remaining size of the LRC.
+ */
+ if (xe_gt_WARN_ON(gt, numdw > remaining_dw))
+ numdw = remaining_dw;
+
+ switch (inst_header & MI_OPCODE) {
+ case MI_LOAD_REGISTER_IMM:
+ drm_printf(p, "[%#010x] MI_LOAD_REGISTER_IMM: %d regs\n",
+ inst_header, (numdw - 1) / 2);
+ for (int i = 1; i < numdw; i += 2)
+ drm_printf(p, " - %#6x = %#010x\n", dw[i], dw[i + 1]);
+ return numdw;
+
+ case MI_FORCE_WAKEUP:
+ drm_printf(p, "[%#010x] MI_FORCE_WAKEUP\n", inst_header);
+ return numdw;
+
+ default:
+ drm_printf(p, "[%#010x] unknown MI opcode %#x, likely %d dwords\n",
+ inst_header, opcode, numdw);
+ return numdw;
+ }
+}
+
+static int dump_gfxpipe_command(struct drm_printer *p,
+ struct xe_gt *gt,
+ u32 *dw,
+ int remaining_dw)
+{
+ u32 numdw = instr_dw(*dw);
+ u32 pipeline = REG_FIELD_GET(GFXPIPE_PIPELINE, *dw);
+ u32 opcode = REG_FIELD_GET(GFXPIPE_OPCODE, *dw);
+ u32 subopcode = REG_FIELD_GET(GFXPIPE_SUBOPCODE, *dw);
+
+ /*
+ * Make sure we haven't mis-parsed a number of dwords that exceeds the
+ * remaining size of the LRC.
+ */
+ if (xe_gt_WARN_ON(gt, numdw > remaining_dw))
+ numdw = remaining_dw;
+
+ switch (*dw & GFXPIPE_MATCH_MASK) {
+#define MATCH(cmd) \
+ case cmd: \
+ drm_printf(p, "[%#010x] " #cmd " (%d dwords)\n", *dw, numdw); \
+ return numdw
+#define MATCH3D(cmd) \
+ case CMD_##cmd: \
+ drm_printf(p, "[%#010x] " #cmd " (%d dwords)\n", *dw, numdw); \
+ return numdw
+
+ MATCH(STATE_BASE_ADDRESS);
+ MATCH(STATE_SIP);
+ MATCH(GPGPU_CSR_BASE_ADDRESS);
+ MATCH(STATE_COMPUTE_MODE);
+ MATCH3D(3DSTATE_BTD);
+
+ MATCH3D(3DSTATE_VF_STATISTICS);
+
+ MATCH(PIPELINE_SELECT);
+
+ MATCH3D(3DSTATE_DRAWING_RECTANGLE_FAST);
+ MATCH3D(3DSTATE_CLEAR_PARAMS);
+ MATCH3D(3DSTATE_DEPTH_BUFFER);
+ MATCH3D(3DSTATE_STENCIL_BUFFER);
+ MATCH3D(3DSTATE_HIER_DEPTH_BUFFER);
+ MATCH3D(3DSTATE_VERTEX_BUFFERS);
+ MATCH3D(3DSTATE_VERTEX_ELEMENTS);
+ MATCH3D(3DSTATE_INDEX_BUFFER);
+ MATCH3D(3DSTATE_VF);
+ MATCH3D(3DSTATE_MULTISAMPLE);
+ MATCH3D(3DSTATE_CC_STATE_POINTERS);
+ MATCH3D(3DSTATE_SCISSOR_STATE_POINTERS);
+ MATCH3D(3DSTATE_VS);
+ MATCH3D(3DSTATE_GS);
+ MATCH3D(3DSTATE_CLIP);
+ MATCH3D(3DSTATE_SF);
+ MATCH3D(3DSTATE_WM);
+ MATCH3D(3DSTATE_CONSTANT_VS);
+ MATCH3D(3DSTATE_CONSTANT_GS);
+ MATCH3D(3DSTATE_SAMPLE_MASK);
+ MATCH3D(3DSTATE_CONSTANT_HS);
+ MATCH3D(3DSTATE_CONSTANT_DS);
+ MATCH3D(3DSTATE_HS);
+ MATCH3D(3DSTATE_TE);
+ MATCH3D(3DSTATE_DS);
+ MATCH3D(3DSTATE_STREAMOUT);
+ MATCH3D(3DSTATE_SBE);
+ MATCH3D(3DSTATE_PS);
+ MATCH3D(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP);
+ MATCH3D(3DSTATE_CPS_POINTERS);
+ MATCH3D(3DSTATE_VIEWPORT_STATE_POINTERS_CC);
+ MATCH3D(3DSTATE_BLEND_STATE_POINTERS);
+ MATCH3D(3DSTATE_BINDING_TABLE_POINTERS_VS);
+ MATCH3D(3DSTATE_BINDING_TABLE_POINTERS_HS);
+ MATCH3D(3DSTATE_BINDING_TABLE_POINTERS_DS);
+ MATCH3D(3DSTATE_BINDING_TABLE_POINTERS_GS);
+ MATCH3D(3DSTATE_BINDING_TABLE_POINTERS_PS);
+ MATCH3D(3DSTATE_SAMPLER_STATE_POINTERS_VS);
+ MATCH3D(3DSTATE_SAMPLER_STATE_POINTERS_HS);
+ MATCH3D(3DSTATE_SAMPLER_STATE_POINTERS_DS);
+ MATCH3D(3DSTATE_SAMPLER_STATE_POINTERS_GS);
+ MATCH3D(3DSTATE_SAMPLER_STATE_POINTERS_PS);
+ MATCH3D(3DSTATE_VF_INSTANCING);
+ MATCH3D(3DSTATE_VF_SGVS);
+ MATCH3D(3DSTATE_VF_TOPOLOGY);
+ MATCH3D(3DSTATE_WM_CHROMAKEY);
+ MATCH3D(3DSTATE_PS_BLEND);
+ MATCH3D(3DSTATE_WM_DEPTH_STENCIL);
+ MATCH3D(3DSTATE_PS_EXTRA);
+ MATCH3D(3DSTATE_RASTER);
+ MATCH3D(3DSTATE_SBE_SWIZ);
+ MATCH3D(3DSTATE_WM_HZ_OP);
+ MATCH3D(3DSTATE_VF_COMPONENT_PACKING);
+ MATCH3D(3DSTATE_VF_SGVS_2);
+ MATCH3D(3DSTATE_VFG);
+ MATCH3D(3DSTATE_URB_ALLOC_VS);
+ MATCH3D(3DSTATE_URB_ALLOC_HS);
+ MATCH3D(3DSTATE_URB_ALLOC_DS);
+ MATCH3D(3DSTATE_URB_ALLOC_GS);
+ MATCH3D(3DSTATE_SO_BUFFER_INDEX_0);
+ MATCH3D(3DSTATE_SO_BUFFER_INDEX_1);
+ MATCH3D(3DSTATE_SO_BUFFER_INDEX_2);
+ MATCH3D(3DSTATE_SO_BUFFER_INDEX_3);
+ MATCH3D(3DSTATE_PRIMITIVE_REPLICATION);
+ MATCH3D(3DSTATE_TBIMR_TILE_PASS_INFO);
+ MATCH3D(3DSTATE_AMFS);
+ MATCH3D(3DSTATE_DEPTH_BOUNDS);
+ MATCH3D(3DSTATE_AMFS_TEXTURE_POINTERS);
+ MATCH3D(3DSTATE_CONSTANT_TS_POINTER);
+ MATCH3D(3DSTATE_MESH_CONTROL);
+ MATCH3D(3DSTATE_MESH_DISTRIB);
+ MATCH3D(3DSTATE_TASK_REDISTRIB);
+ MATCH3D(3DSTATE_MESH_SHADER);
+ MATCH3D(3DSTATE_MESH_SHADER_DATA);
+ MATCH3D(3DSTATE_TASK_CONTROL);
+ MATCH3D(3DSTATE_TASK_SHADER);
+ MATCH3D(3DSTATE_TASK_SHADER_DATA);
+ MATCH3D(3DSTATE_URB_ALLOC_MESH);
+ MATCH3D(3DSTATE_URB_ALLOC_TASK);
+ MATCH3D(3DSTATE_CLIP_MESH);
+ MATCH3D(3DSTATE_SBE_MESH);
+ MATCH3D(3DSTATE_CPSIZE_CONTROL_BUFFER);
+
+ MATCH3D(3DSTATE_DRAWING_RECTANGLE);
+ MATCH3D(3DSTATE_CHROMA_KEY);
+ MATCH3D(3DSTATE_POLY_STIPPLE_OFFSET);
+ MATCH3D(3DSTATE_POLY_STIPPLE_PATTERN);
+ MATCH3D(3DSTATE_LINE_STIPPLE);
+ MATCH3D(3DSTATE_AA_LINE_PARAMETERS);
+ MATCH3D(3DSTATE_MONOFILTER_SIZE);
+ MATCH3D(3DSTATE_PUSH_CONSTANT_ALLOC_VS);
+ MATCH3D(3DSTATE_PUSH_CONSTANT_ALLOC_HS);
+ MATCH3D(3DSTATE_PUSH_CONSTANT_ALLOC_DS);
+ MATCH3D(3DSTATE_PUSH_CONSTANT_ALLOC_GS);
+ MATCH3D(3DSTATE_PUSH_CONSTANT_ALLOC_PS);
+ MATCH3D(3DSTATE_SO_DECL_LIST);
+ MATCH3D(3DSTATE_SO_BUFFER);
+ MATCH3D(3DSTATE_BINDING_TABLE_POOL_ALLOC);
+ MATCH3D(3DSTATE_SAMPLE_PATTERN);
+ MATCH3D(3DSTATE_3D_MODE);
+ MATCH3D(3DSTATE_SUBSLICE_HASH_TABLE);
+ MATCH3D(3DSTATE_SLICE_TABLE_STATE_POINTERS);
+ MATCH3D(3DSTATE_PTBR_TILE_PASS_INFO);
+
+ default:
+ drm_printf(p, "[%#010x] unknown GFXPIPE command (pipeline=%#x, opcode=%#x, subopcode=%#x), likely %d dwords\n",
+ *dw, pipeline, opcode, subopcode, numdw);
+ return numdw;
+ }
+}
+
+void xe_lrc_dump_default(struct drm_printer *p,
+ struct xe_gt *gt,
+ enum xe_engine_class hwe_class)
+{
+ u32 *dw;
+ int remaining_dw, num_dw;
+
+ if (!gt->default_lrc[hwe_class]) {
+ drm_printf(p, "No default LRC for class %d\n", hwe_class);
+ return;
+ }
+
+ /*
+ * Skip the beginning of the LRC since it contains the per-process
+ * hardware status page.
+ */
+ dw = gt->default_lrc[hwe_class] + LRC_PPHWSP_SIZE;
+ remaining_dw = (xe_lrc_size(gt_to_xe(gt), hwe_class) - LRC_PPHWSP_SIZE) / 4;
+
+ while (remaining_dw > 0) {
+ if ((*dw & XE_INSTR_CMD_TYPE) == XE_INSTR_MI) {
+ num_dw = dump_mi_command(p, gt, dw, remaining_dw);
+ } else if ((*dw & XE_INSTR_CMD_TYPE) == XE_INSTR_GFXPIPE) {
+ num_dw = dump_gfxpipe_command(p, gt, dw, remaining_dw);
+ } else {
+ num_dw = min(instr_dw(*dw), remaining_dw);
+ drm_printf(p, "[%#10x] Unknown instruction of type %#x, likely %d dwords\n",
+ *dw, REG_FIELD_GET(XE_INSTR_CMD_TYPE, *dw),
+ num_dw);
+ }
+
+ dw += num_dw;
+ remaining_dw -= num_dw;
+ }
+}
+
+struct instr_state {
+ u32 instr;
+ u16 num_dw;
+};
+
+static const struct instr_state xe_hpg_svg_state[] = {
+ { .instr = CMD_3DSTATE_CONSTANT_VS, .num_dw = 11 },
+ { .instr = CMD_3DSTATE_CONSTANT_HS, .num_dw = 11 },
+ { .instr = CMD_3DSTATE_CONSTANT_DS, .num_dw = 11 },
+ { .instr = CMD_3DSTATE_CONSTANT_GS, .num_dw = 11 },
+ { .instr = CMD_3DSTATE_VERTEX_ELEMENTS, .num_dw = 69 },
+ { .instr = CMD_3DSTATE_VF_COMPONENT_PACKING, .num_dw = 5 },
+ { .instr = CMD_3DSTATE_VF_SGVS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_VF_SGVS_2, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_VS, .num_dw = 9 },
+ { .instr = CMD_3DSTATE_BINDING_TABLE_POINTERS_VS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_SAMPLER_STATE_POINTERS_VS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_URB_ALLOC_VS, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_STREAMOUT, .num_dw = 5 },
+ { .instr = CMD_3DSTATE_SO_BUFFER_INDEX_0, .num_dw = 8 },
+ { .instr = CMD_3DSTATE_SO_BUFFER_INDEX_1, .num_dw = 8 },
+ { .instr = CMD_3DSTATE_SO_BUFFER_INDEX_2, .num_dw = 8 },
+ { .instr = CMD_3DSTATE_SO_BUFFER_INDEX_3, .num_dw = 8 },
+ { .instr = CMD_3DSTATE_CLIP, .num_dw = 4 },
+ { .instr = CMD_3DSTATE_PRIMITIVE_REPLICATION, .num_dw = 6 },
+ { .instr = CMD_3DSTATE_CLIP_MESH, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_SF, .num_dw = 4 },
+ { .instr = CMD_3DSTATE_SCISSOR_STATE_POINTERS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_RASTER, .num_dw = 5 },
+ { .instr = CMD_3DSTATE_TBIMR_TILE_PASS_INFO, .num_dw = 4 },
+ { .instr = CMD_3DSTATE_WM_HZ_OP, .num_dw = 6 },
+ { .instr = CMD_3DSTATE_MULTISAMPLE, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_HS, .num_dw = 9 },
+ { .instr = CMD_3DSTATE_BINDING_TABLE_POINTERS_HS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_SAMPLER_STATE_POINTERS_HS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_URB_ALLOC_HS, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_TASK_CONTROL, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_TASK_SHADER, .num_dw = 7 },
+ { .instr = CMD_3DSTATE_TASK_SHADER_DATA, .num_dw = 10 },
+ { .instr = CMD_3DSTATE_URB_ALLOC_TASK, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_TE, .num_dw = 5 },
+ { .instr = CMD_3DSTATE_TASK_REDISTRIB, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_DS, .num_dw = 11 },
+ { .instr = CMD_3DSTATE_BINDING_TABLE_POINTERS_DS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_SAMPLER_STATE_POINTERS_DS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_URB_ALLOC_DS, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_GS, .num_dw = 10 },
+ { .instr = CMD_3DSTATE_BINDING_TABLE_POINTERS_GS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_SAMPLER_STATE_POINTERS_GS, .num_dw = 2 },
+ { .instr = CMD_3DSTATE_URB_ALLOC_GS, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_MESH_CONTROL, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_MESH_SHADER_DATA, .num_dw = 10 },
+ { .instr = CMD_3DSTATE_URB_ALLOC_MESH, .num_dw = 3 },
+ { .instr = CMD_3DSTATE_MESH_SHADER, .num_dw = 8 },
+ { .instr = CMD_3DSTATE_DRAWING_RECTANGLE, .num_dw = 4 },
+};
+
+void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb)
+{
+ struct xe_gt *gt = q->hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ const struct instr_state *state_table = NULL;
+ int state_table_size = 0;
+
+ /*
+ * At the moment we only need to emit non-register state for the RCS
+ * engine.
+ */
+ if (q->hwe->class != XE_ENGINE_CLASS_RENDER)
+ return;
+
+ switch (GRAPHICS_VERx100(xe)) {
+ case 1255:
+ case 1270 ... 2004:
+ state_table = xe_hpg_svg_state;
+ state_table_size = ARRAY_SIZE(xe_hpg_svg_state);
+ break;
+ default:
+ xe_gt_dbg(gt, "No non-register state to emit on graphics ver %d.%02d\n",
+ GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
+ return;
+ }
+
+ for (int i = 0; i < state_table_size; i++) {
+ u32 instr = state_table[i].instr;
+ u16 num_dw = state_table[i].num_dw;
+ bool is_single_dw = ((instr & GFXPIPE_PIPELINE) == PIPELINE_SINGLE_DW);
+
+ xe_gt_assert(gt, (instr & XE_INSTR_CMD_TYPE) == XE_INSTR_GFXPIPE);
+ xe_gt_assert(gt, num_dw != 0);
+ xe_gt_assert(gt, is_single_dw ^ (num_dw > 1));
+
+ /*
+ * Xe2's SVG context is the same as the one on DG2 / MTL
+ * except that 3DSTATE_DRAWING_RECTANGLE (non-pipelined) has
+ * been replaced by 3DSTATE_DRAWING_RECTANGLE_FAST (pipelined).
+ * Just make the replacement here rather than defining a
+ * whole separate table for the single trivial change.
+ */
+ if (GRAPHICS_VER(xe) >= 20 &&
+ instr == CMD_3DSTATE_DRAWING_RECTANGLE)
+ instr = CMD_3DSTATE_DRAWING_RECTANGLE_FAST;
+
+ bb->cs[bb->len] = instr;
+ if (!is_single_dw)
+ bb->cs[bb->len] |= (num_dw - 2);
+
+ bb->len += num_dw;
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
new file mode 100644
index 000000000..28b1d3f40
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef _XE_LRC_H_
+#define _XE_LRC_H_
+
+#include "xe_lrc_types.h"
+
+struct drm_printer;
+struct xe_bb;
+struct xe_device;
+struct xe_exec_queue;
+enum xe_engine_class;
+struct xe_hw_engine;
+struct xe_vm;
+
+#define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
+
+int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size);
+void xe_lrc_finish(struct xe_lrc *lrc);
+
+size_t xe_lrc_size(struct xe_device *xe, enum xe_engine_class class);
+u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc);
+
+void xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head);
+u32 xe_lrc_ring_head(struct xe_lrc *lrc);
+u32 xe_lrc_ring_space(struct xe_lrc *lrc);
+void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size);
+
+u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc);
+u32 *xe_lrc_regs(struct xe_lrc *lrc);
+
+u32 xe_lrc_read_ctx_reg(struct xe_lrc *lrc, int reg_nr);
+void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val);
+
+u64 xe_lrc_descriptor(struct xe_lrc *lrc);
+
+u32 xe_lrc_seqno_ggtt_addr(struct xe_lrc *lrc);
+struct dma_fence *xe_lrc_create_seqno_fence(struct xe_lrc *lrc);
+s32 xe_lrc_seqno(struct xe_lrc *lrc);
+
+u32 xe_lrc_start_seqno_ggtt_addr(struct xe_lrc *lrc);
+s32 xe_lrc_start_seqno(struct xe_lrc *lrc);
+
+u32 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc);
+struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc);
+
+size_t xe_lrc_skip_size(struct xe_device *xe);
+
+void xe_lrc_dump_default(struct drm_printer *p,
+ struct xe_gt *gt,
+ enum xe_engine_class);
+
+void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
new file mode 100644
index 000000000..782203360
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_LRC_TYPES_H_
+#define _XE_LRC_TYPES_H_
+
+#include "xe_hw_fence_types.h"
+
+struct xe_bo;
+
+/**
+ * struct xe_lrc - Logical ring context (LRC) and submission ring object
+ */
+struct xe_lrc {
+ /**
+ * @bo: buffer object (memory) for logical ring context, per process HW
+ * status page, and submission ring.
+ */
+ struct xe_bo *bo;
+
+ /** @tile: tile which this LRC belongs to */
+ struct xe_tile *tile;
+
+ /** @flags: LRC flags */
+ u32 flags;
+
+ /** @ring: submission ring state */
+ struct {
+ /** @size: size of submission ring */
+ u32 size;
+ /** @tail: tail of submission ring */
+ u32 tail;
+ /** @old_tail: shadow of tail */
+ u32 old_tail;
+ } ring;
+
+ /** @desc: LRC descriptor */
+ u64 desc;
+
+ /** @fence_ctx: context for hw fence */
+ struct xe_hw_fence_ctx fence_ctx;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_macros.h b/drivers/gpu/drm/xe/xe_macros.h
new file mode 100644
index 000000000..daf56c846
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_macros.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_MACROS_H_
+#define _XE_MACROS_H_
+
+#include <linux/bug.h>
+
+#define XE_WARN_ON WARN_ON
+
+#define XE_IOCTL_DBG(xe, cond) \
+ ((cond) && (drm_dbg(&(xe)->drm, \
+ "Ioctl argument check failed at %s:%d: %s", \
+ __FILE__, __LINE__, #cond), 1))
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
new file mode 100644
index 000000000..f62e0c8b6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_map.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_MAP_H_
+#define _XE_MAP_H_
+
+#include <linux/iosys-map.h>
+
+#include <xe_device.h>
+
+/**
+ * DOC: Map layer
+ *
+ * All access to any memory shared with a device (both sysmem and vram) in the
+ * XE driver should go through this layer (xe_map). This layer is built on top
+ * of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory`
+ * and with extra hooks into the XE driver that allows adding asserts to memory
+ * accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics).
+ */
+
+static inline void xe_map_memcpy_to(struct xe_device *xe, struct iosys_map *dst,
+ size_t dst_offset, const void *src,
+ size_t len)
+{
+ xe_device_assert_mem_access(xe);
+ iosys_map_memcpy_to(dst, dst_offset, src, len);
+}
+
+static inline void xe_map_memcpy_from(struct xe_device *xe, void *dst,
+ const struct iosys_map *src,
+ size_t src_offset, size_t len)
+{
+ xe_device_assert_mem_access(xe);
+ iosys_map_memcpy_from(dst, src, src_offset, len);
+}
+
+static inline void xe_map_memset(struct xe_device *xe,
+ struct iosys_map *dst, size_t offset,
+ int value, size_t len)
+{
+ xe_device_assert_mem_access(xe);
+ iosys_map_memset(dst, offset, value, len);
+}
+
+/* FIXME: We likely should kill these two functions sooner or later */
+static inline u32 xe_map_read32(struct xe_device *xe, struct iosys_map *map)
+{
+ xe_device_assert_mem_access(xe);
+
+ if (map->is_iomem)
+ return readl(map->vaddr_iomem);
+ else
+ return READ_ONCE(*(u32 *)map->vaddr);
+}
+
+static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
+ u32 val)
+{
+ xe_device_assert_mem_access(xe);
+
+ if (map->is_iomem)
+ writel(val, map->vaddr_iomem);
+ else
+ *(u32 *)map->vaddr = val;
+}
+
+#define xe_map_rd(xe__, map__, offset__, type__) ({ \
+ struct xe_device *__xe = xe__; \
+ xe_device_assert_mem_access(__xe); \
+ iosys_map_rd(map__, offset__, type__); \
+})
+
+#define xe_map_wr(xe__, map__, offset__, type__, val__) ({ \
+ struct xe_device *__xe = xe__; \
+ xe_device_assert_mem_access(__xe); \
+ iosys_map_wr(map__, offset__, type__, val__); \
+})
+
+#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
+ struct xe_device *__xe = xe__; \
+ xe_device_assert_mem_access(__xe); \
+ iosys_map_rd_field(map__, struct_offset__, struct_type__, field__); \
+})
+
+#define xe_map_wr_field(xe__, map__, struct_offset__, struct_type__, field__, val__) ({ \
+ struct xe_device *__xe = xe__; \
+ xe_device_assert_mem_access(__xe); \
+ iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__); \
+})
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
new file mode 100644
index 000000000..70480c305
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -0,0 +1,1455 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "xe_migrate.h"
+
+#include <linux/bitfield.h>
+#include <linux/sizes.h>
+
+#include <drm/drm_managed.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/xe_drm.h>
+
+#include "generated/xe_wa_oob.h"
+#include "instructions/xe_mi_commands.h"
+#include "regs/xe_gpu_commands.h"
+#include "tests/xe_test.h"
+#include "xe_assert.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_exec_queue.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_hw_engine.h"
+#include "xe_lrc.h"
+#include "xe_map.h"
+#include "xe_mocs.h"
+#include "xe_pt.h"
+#include "xe_res_cursor.h"
+#include "xe_sched_job.h"
+#include "xe_sync.h"
+#include "xe_trace.h"
+#include "xe_vm.h"
+#include "xe_wa.h"
+
+/**
+ * struct xe_migrate - migrate context.
+ */
+struct xe_migrate {
+ /** @q: Default exec queue used for migration */
+ struct xe_exec_queue *q;
+ /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
+ struct xe_tile *tile;
+ /** @job_mutex: Timeline mutex for @eng. */
+ struct mutex job_mutex;
+ /** @pt_bo: Page-table buffer object. */
+ struct xe_bo *pt_bo;
+ /** @batch_base_ofs: VM offset of the migration batch buffer */
+ u64 batch_base_ofs;
+ /** @usm_batch_base_ofs: VM offset of the usm batch buffer */
+ u64 usm_batch_base_ofs;
+ /** @cleared_mem_ofs: VM offset of @cleared_bo. */
+ u64 cleared_mem_ofs;
+ /**
+ * @fence: dma-fence representing the last migration job batch.
+ * Protected by @job_mutex.
+ */
+ struct dma_fence *fence;
+ /**
+ * @vm_update_sa: For integrated, used to suballocate page-tables
+ * out of the pt_bo.
+ */
+ struct drm_suballoc_manager vm_update_sa;
+ /** @min_chunk_size: For dgfx, Minimum chunk size */
+ u64 min_chunk_size;
+};
+
+#define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
+#define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
+#define NUM_KERNEL_PDE 17
+#define NUM_PT_SLOTS 32
+#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
+
+/**
+ * xe_tile_migrate_engine() - Get this tile's migrate engine.
+ * @tile: The tile.
+ *
+ * Returns the default migrate engine of this tile.
+ * TODO: Perhaps this function is slightly misplaced, and even unneeded?
+ *
+ * Return: The default migrate engine
+ */
+struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
+{
+ return tile->migrate->q;
+}
+
+static void xe_migrate_fini(struct drm_device *dev, void *arg)
+{
+ struct xe_migrate *m = arg;
+
+ xe_vm_lock(m->q->vm, false);
+ xe_bo_unpin(m->pt_bo);
+ xe_vm_unlock(m->q->vm);
+
+ dma_fence_put(m->fence);
+ xe_bo_put(m->pt_bo);
+ drm_suballoc_manager_fini(&m->vm_update_sa);
+ mutex_destroy(&m->job_mutex);
+ xe_vm_close_and_put(m->q->vm);
+ xe_exec_queue_put(m->q);
+}
+
+static u64 xe_migrate_vm_addr(u64 slot, u32 level)
+{
+ XE_WARN_ON(slot >= NUM_PT_SLOTS);
+
+ /* First slot is reserved for mapping of PT bo and bb, start from 1 */
+ return (slot + 1ULL) << xe_pt_shift(level + 1);
+}
+
+static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
+{
+ /*
+ * Remove the DPA to get a correct offset into identity table for the
+ * migrate offset
+ */
+ addr -= xe->mem.vram.dpa_base;
+ return addr + (256ULL << xe_pt_shift(2));
+}
+
+static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
+ struct xe_vm *vm)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+ u8 id = tile->id;
+ u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
+ u32 map_ofs, level, i;
+ struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
+ u64 entry;
+
+ /* Can't bump NUM_PT_SLOTS too high */
+ BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE);
+ /* Must be a multiple of 64K to support all platforms */
+ BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K);
+ /* And one slot reserved for the 4KiB page table updates */
+ BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
+
+ /* Need to be sure everything fits in the first PT, or create more */
+ xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
+
+ bo = xe_bo_create_pin_map(vm->xe, tile, vm,
+ num_entries * XE_PAGE_SIZE,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_PINNED_BIT);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
+ xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
+
+ map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
+
+ /* Map the entire BO in our level 0 pt */
+ for (i = 0, level = 0; i < num_entries; level++) {
+ entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
+ pat_index, 0);
+
+ xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
+
+ if (vm->flags & XE_VM_FLAG_64K)
+ i += 16;
+ else
+ i += 1;
+ }
+
+ if (!IS_DGFX(xe)) {
+ /* Write out batch too */
+ m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
+ for (i = 0; i < batch->size;
+ i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
+ XE_PAGE_SIZE) {
+ entry = vm->pt_ops->pte_encode_bo(batch, i,
+ pat_index, 0);
+
+ xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
+ entry);
+ level++;
+ }
+ if (xe->info.has_usm) {
+ xe_tile_assert(tile, batch->size == SZ_1M);
+
+ batch = tile->primary_gt->usm.bb_pool->bo;
+ m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
+ xe_tile_assert(tile, batch->size == SZ_512K);
+
+ for (i = 0; i < batch->size;
+ i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
+ XE_PAGE_SIZE) {
+ entry = vm->pt_ops->pte_encode_bo(batch, i,
+ pat_index, 0);
+
+ xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
+ entry);
+ level++;
+ }
+ }
+ } else {
+ u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
+
+ m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+
+ if (xe->info.has_usm) {
+ batch = tile->primary_gt->usm.bb_pool->bo;
+ batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
+ m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
+ }
+ }
+
+ for (level = 1; level < num_level; level++) {
+ u32 flags = 0;
+
+ if (vm->flags & XE_VM_FLAG_64K && level == 1)
+ flags = XE_PDE_64K;
+
+ entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
+ XE_PAGE_SIZE, pat_index);
+ xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
+ entry | flags);
+ }
+
+ /* Write PDE's that point to our BO. */
+ for (i = 0; i < num_entries - num_level; i++) {
+ entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
+ pat_index);
+
+ xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
+ (i + 1) * 8, u64, entry);
+ }
+
+ /* Set up a 1GiB NULL mapping at 255GiB offset. */
+ level = 2;
+ xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
+ vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0)
+ | XE_PTE_NULL);
+ m->cleared_mem_ofs = (255ULL << xe_pt_shift(level));
+
+ /* Identity map the entire vram at 256GiB offset */
+ if (IS_DGFX(xe)) {
+ u64 pos, ofs, flags;
+
+ level = 2;
+ ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
+ flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
+ true, 0);
+
+ /*
+ * Use 1GB pages, it shouldn't matter the physical amount of
+ * vram is less, when we don't access it.
+ */
+ for (pos = xe->mem.vram.dpa_base;
+ pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
+ pos += SZ_1G, ofs += 8)
+ xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
+ }
+
+ /*
+ * Example layout created above, with root level = 3:
+ * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's
+ * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's
+ * [PT9...PT28]: Userspace PT's for VM_BIND, 4 KiB PTE's
+ * [PT29 = PDE 0] [PT30 = PDE 1] [PT31 = PDE 2]
+ *
+ * This makes the lowest part of the VM point to the pagetables.
+ * Hence the lowest 2M in the vm should point to itself, with a few writes
+ * and flushes, other parts of the VM can be used either for copying and
+ * clearing.
+ *
+ * For performance, the kernel reserves PDE's, so about 20 are left
+ * for async VM updates.
+ *
+ * To make it easier to work, each scratch PT is put in slot (1 + PT #)
+ * everywhere, this allows lockless updates to scratch pages by using
+ * the different addresses in VM.
+ */
+#define NUM_VMUSA_UNIT_PER_PAGE 32
+#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
+#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
+ drm_suballoc_manager_init(&m->vm_update_sa,
+ (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
+ NUM_VMUSA_UNIT_PER_PAGE, 0);
+
+ m->pt_bo = bo;
+ return 0;
+}
+
+/*
+ * Due to workaround 16017236439, odd instance hardware copy engines are
+ * faster than even instance ones.
+ * This function returns the mask involving all fast copy engines and the
+ * reserved copy engine to be used as logical mask for migrate engine.
+ * Including the reserved copy engine is required to avoid deadlocks due to
+ * migrate jobs servicing the faults gets stuck behind the job that faulted.
+ */
+static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
+{
+ u32 logical_mask = 0;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id) {
+ if (hwe->class != XE_ENGINE_CLASS_COPY)
+ continue;
+
+ if (!XE_WA(gt, 16017236439) ||
+ xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
+ logical_mask |= BIT(hwe->logical_instance);
+ }
+
+ return logical_mask;
+}
+
+/**
+ * xe_migrate_init() - Initialize a migrate context
+ * @tile: Back-pointer to the tile we're initializing for.
+ *
+ * Return: Pointer to a migrate context on success. Error pointer on error.
+ */
+struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_gt *primary_gt = tile->primary_gt;
+ struct xe_migrate *m;
+ struct xe_vm *vm;
+ int err;
+
+ m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return ERR_PTR(-ENOMEM);
+
+ m->tile = tile;
+
+ /* Special layout, prepared below.. */
+ vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
+ XE_VM_FLAG_SET_TILE_ID(tile));
+ if (IS_ERR(vm))
+ return ERR_CAST(vm);
+
+ xe_vm_lock(vm, false);
+ err = xe_migrate_prepare_vm(tile, m, vm);
+ xe_vm_unlock(vm);
+ if (err) {
+ xe_vm_close_and_put(vm);
+ return ERR_PTR(err);
+ }
+
+ if (xe->info.has_usm) {
+ struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
+ XE_ENGINE_CLASS_COPY,
+ primary_gt->usm.reserved_bcs_instance,
+ false);
+ u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
+
+ if (!hwe || !logical_mask)
+ return ERR_PTR(-EINVAL);
+
+ m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
+ EXEC_QUEUE_FLAG_KERNEL |
+ EXEC_QUEUE_FLAG_PERMANENT |
+ EXEC_QUEUE_FLAG_HIGH_PRIORITY);
+ } else {
+ m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
+ XE_ENGINE_CLASS_COPY,
+ EXEC_QUEUE_FLAG_KERNEL |
+ EXEC_QUEUE_FLAG_PERMANENT);
+ }
+ if (IS_ERR(m->q)) {
+ xe_vm_close_and_put(vm);
+ return ERR_CAST(m->q);
+ }
+
+ mutex_init(&m->job_mutex);
+
+ err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
+ if (err)
+ return ERR_PTR(err);
+
+ if (IS_DGFX(xe)) {
+ if (xe_device_has_flat_ccs(xe))
+ /* min chunk size corresponds to 4K of CCS Metadata */
+ m->min_chunk_size = SZ_4K * SZ_64K /
+ xe_device_ccs_bytes(xe, SZ_64K);
+ else
+ /* Somewhat arbitrary to avoid a huge amount of blits */
+ m->min_chunk_size = SZ_64K;
+ m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
+ drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
+ (unsigned long long)m->min_chunk_size);
+ }
+
+ return m;
+}
+
+static u64 max_mem_transfer_per_pass(struct xe_device *xe)
+{
+ if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
+ return MAX_CCS_LIMITED_TRANSFER;
+
+ return MAX_PREEMPTDISABLE_TRANSFER;
+}
+
+static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
+{
+ struct xe_device *xe = tile_to_xe(m->tile);
+ u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
+
+ if (mem_type_is_vram(cur->mem_type)) {
+ /*
+ * VRAM we want to blit in chunks with sizes aligned to
+ * min_chunk_size in order for the offset to CCS metadata to be
+ * page-aligned. If it's the last chunk it may be smaller.
+ *
+ * Another constraint is that we need to limit the blit to
+ * the VRAM block size, unless size is smaller than
+ * min_chunk_size.
+ */
+ u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
+
+ size = min_t(u64, size, chunk);
+ if (size > m->min_chunk_size)
+ size = round_down(size, m->min_chunk_size);
+ }
+
+ return size;
+}
+
+static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
+{
+ /* If the chunk is not fragmented, allow identity map. */
+ return cur->size >= size;
+}
+
+static u32 pte_update_size(struct xe_migrate *m,
+ bool is_vram,
+ struct ttm_resource *res,
+ struct xe_res_cursor *cur,
+ u64 *L0, u64 *L0_ofs, u32 *L0_pt,
+ u32 cmd_size, u32 pt_ofs, u32 avail_pts)
+{
+ u32 cmds = 0;
+
+ *L0_pt = pt_ofs;
+ if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
+ /* Offset into identity map. */
+ *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
+ cur->start + vram_region_gpu_offset(res));
+ cmds += cmd_size;
+ } else {
+ /* Clip L0 to available size */
+ u64 size = min(*L0, (u64)avail_pts * SZ_2M);
+ u64 num_4k_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+
+ *L0 = size;
+ *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0);
+
+ /* MI_STORE_DATA_IMM */
+ cmds += 3 * DIV_ROUND_UP(num_4k_pages, 0x1ff);
+
+ /* PDE qwords */
+ cmds += num_4k_pages * 2;
+
+ /* Each chunk has a single blit command */
+ cmds += cmd_size;
+ }
+
+ return cmds;
+}
+
+static void emit_pte(struct xe_migrate *m,
+ struct xe_bb *bb, u32 at_pt,
+ bool is_vram, bool is_comp_pte,
+ struct xe_res_cursor *cur,
+ u32 size, struct ttm_resource *res)
+{
+ struct xe_device *xe = tile_to_xe(m->tile);
+ struct xe_vm *vm = m->q->vm;
+ u16 pat_index;
+ u32 ptes;
+ u64 ofs = at_pt * XE_PAGE_SIZE;
+ u64 cur_ofs;
+
+ /* Indirect access needs compression enabled uncached PAT index */
+ if (GRAPHICS_VERx100(xe) >= 2000)
+ pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
+ xe->pat.idx[XE_CACHE_WB];
+ else
+ pat_index = xe->pat.idx[XE_CACHE_WB];
+
+ ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+
+ while (ptes) {
+ u32 chunk = min(0x1ffU, ptes);
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = ofs;
+ bb->cs[bb->len++] = 0;
+
+ cur_ofs = ofs;
+ ofs += chunk * 8;
+ ptes -= chunk;
+
+ while (chunk--) {
+ u64 addr, flags = 0;
+ bool devmem = false;
+
+ addr = xe_res_dma(cur) & PAGE_MASK;
+ if (is_vram) {
+ if (vm->flags & XE_VM_FLAG_64K) {
+ u64 va = cur_ofs * XE_PAGE_SIZE / 8;
+
+ xe_assert(xe, (va & (SZ_64K - 1)) ==
+ (addr & (SZ_64K - 1)));
+
+ flags |= XE_PTE_PS64;
+ }
+
+ addr += vram_region_gpu_offset(res);
+ devmem = true;
+ }
+
+ addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
+ addr, pat_index,
+ 0, devmem, flags);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+
+ xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
+ cur_ofs += 8;
+ }
+ }
+}
+
+#define EMIT_COPY_CCS_DW 5
+static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
+ u64 dst_ofs, bool dst_is_indirect,
+ u64 src_ofs, bool src_is_indirect,
+ u32 size)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 *cs = bb->cs + bb->len;
+ u32 num_ccs_blks;
+ u32 num_pages;
+ u32 ccs_copy_size;
+ u32 mocs;
+
+ if (GRAPHICS_VERx100(xe) >= 2000) {
+ num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1));
+
+ ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1);
+ mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index);
+
+ } else {
+ num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
+ NUM_CCS_BYTES_PER_BLOCK);
+ xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1));
+
+ ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1);
+ mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index);
+ }
+
+ *cs++ = XY_CTRL_SURF_COPY_BLT |
+ (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
+ (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
+ ccs_copy_size;
+ *cs++ = lower_32_bits(src_ofs);
+ *cs++ = upper_32_bits(src_ofs) | mocs;
+ *cs++ = lower_32_bits(dst_ofs);
+ *cs++ = upper_32_bits(dst_ofs) | mocs;
+
+ bb->len = cs - bb->cs;
+}
+
+#define EMIT_COPY_DW 10
+static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
+ u64 src_ofs, u64 dst_ofs, unsigned int size,
+ unsigned int pitch)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 mocs = 0;
+ u32 tile_y = 0;
+
+ xe_gt_assert(gt, size / pitch <= S16_MAX);
+ xe_gt_assert(gt, pitch / 4 <= S16_MAX);
+ xe_gt_assert(gt, pitch <= U16_MAX);
+
+ if (GRAPHICS_VER(xe) >= 20)
+ mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index);
+
+ if (GRAPHICS_VERx100(xe) >= 1250)
+ tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4;
+
+ bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
+ bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4;
+ bb->cs[bb->len++] = lower_32_bits(dst_ofs);
+ bb->cs[bb->len++] = upper_32_bits(dst_ofs);
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = pitch | mocs;
+ bb->cs[bb->len++] = lower_32_bits(src_ofs);
+ bb->cs[bb->len++] = upper_32_bits(src_ofs);
+}
+
+static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
+}
+
+static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
+{
+ return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
+}
+
+static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
+ struct xe_bb *bb,
+ u64 src_ofs, bool src_is_indirect,
+ u64 dst_ofs, bool dst_is_indirect, u32 dst_size,
+ u64 ccs_ofs, bool copy_ccs)
+{
+ struct xe_gt *gt = m->tile->primary_gt;
+ u32 flush_flags = 0;
+
+ if (xe_device_has_flat_ccs(gt_to_xe(gt)) && !copy_ccs && dst_is_indirect) {
+ /*
+ * If the src is already in vram, then it should already
+ * have been cleared by us, or has been populated by the
+ * user. Make sure we copy the CCS aux state as-is.
+ *
+ * Otherwise if the bo doesn't have any CCS metadata attached,
+ * we still need to clear it for security reasons.
+ */
+ u64 ccs_src_ofs = src_is_indirect ? src_ofs : m->cleared_mem_ofs;
+
+ emit_copy_ccs(gt, bb,
+ dst_ofs, true,
+ ccs_src_ofs, src_is_indirect, dst_size);
+
+ flush_flags = MI_FLUSH_DW_CCS;
+ } else if (copy_ccs) {
+ if (!src_is_indirect)
+ src_ofs = ccs_ofs;
+ else if (!dst_is_indirect)
+ dst_ofs = ccs_ofs;
+
+ xe_gt_assert(gt, src_is_indirect || dst_is_indirect);
+
+ emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs,
+ src_is_indirect, dst_size);
+ if (dst_is_indirect)
+ flush_flags = MI_FLUSH_DW_CCS;
+ }
+
+ return flush_flags;
+}
+
+/**
+ * xe_migrate_copy() - Copy content of TTM resources.
+ * @m: The migration context.
+ * @src_bo: The buffer object @src is currently bound to.
+ * @dst_bo: If copying between resources created for the same bo, set this to
+ * the same value as @src_bo. If copying between buffer objects, set it to
+ * the buffer object @dst is currently bound to.
+ * @src: The source TTM resource.
+ * @dst: The dst TTM resource.
+ * @copy_only_ccs: If true copy only CCS metadata
+ *
+ * Copies the contents of @src to @dst: On flat CCS devices,
+ * the CCS metadata is copied as well if needed, or if not present,
+ * the CCS metadata of @dst is cleared for security reasons.
+ *
+ * Return: Pointer to a dma_fence representing the last copy batch, or
+ * an error pointer on failure. If there is a failure, any copy operation
+ * started by the function call has been synced.
+ */
+struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
+ struct xe_bo *src_bo,
+ struct xe_bo *dst_bo,
+ struct ttm_resource *src,
+ struct ttm_resource *dst,
+ bool copy_only_ccs)
+{
+ struct xe_gt *gt = m->tile->primary_gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct dma_fence *fence = NULL;
+ u64 size = src_bo->size;
+ struct xe_res_cursor src_it, dst_it, ccs_it;
+ u64 src_L0_ofs, dst_L0_ofs;
+ u32 src_L0_pt, dst_L0_pt;
+ u64 src_L0, dst_L0;
+ int pass = 0;
+ int err;
+ bool src_is_pltt = src->mem_type == XE_PL_TT;
+ bool dst_is_pltt = dst->mem_type == XE_PL_TT;
+ bool src_is_vram = mem_type_is_vram(src->mem_type);
+ bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+ bool copy_ccs = xe_device_has_flat_ccs(xe) &&
+ xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
+ bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
+
+ /* Copying CCS between two different BOs is not supported yet. */
+ if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
+ return ERR_PTR(-EINVAL);
+
+ if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
+ return ERR_PTR(-EINVAL);
+
+ if (!src_is_vram)
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
+ else
+ xe_res_first(src, 0, size, &src_it);
+ if (!dst_is_vram)
+ xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
+ else
+ xe_res_first(dst, 0, size, &dst_it);
+
+ if (copy_system_ccs)
+ xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
+ PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
+ &ccs_it);
+
+ while (size) {
+ u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 flush_flags;
+ u32 update_idx;
+ u64 ccs_ofs, ccs_size;
+ u32 ccs_pt;
+
+ bool usm = xe->info.has_usm;
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+ dst_L0 = xe_migrate_res_sizes(m, &dst_it);
+
+ drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
+ pass++, src_L0, dst_L0);
+
+ src_L0 = min(src_L0, dst_L0);
+
+ batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
+ &dst_L0_ofs, &dst_L0_pt, 0,
+ avail_pts, avail_pts);
+
+ if (copy_system_ccs) {
+ ccs_size = xe_device_ccs_bytes(xe, src_L0);
+ batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
+ &ccs_ofs, &ccs_pt, 0,
+ 2 * avail_pts,
+ avail_pts);
+ xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
+ }
+
+ /* Add copy commands size here */
+ batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
+ ((xe_device_has_flat_ccs(xe) ? EMIT_COPY_CCS_DW : 0));
+
+ bb = xe_bb_new(gt, batch_size, usm);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ goto err_sync;
+ }
+
+ if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
+ xe_res_next(&src_it, src_L0);
+ else
+ emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
+ &src_it, src_L0, src);
+
+ if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
+ xe_res_next(&dst_it, src_L0);
+ else
+ emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
+ &dst_it, src_L0, dst);
+
+ if (copy_system_ccs)
+ emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+
+ if (!copy_only_ccs)
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
+
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
+ IS_DGFX(xe) ? src_is_vram : src_is_pltt,
+ dst_L0_ofs,
+ IS_DGFX(xe) ? dst_is_vram : dst_is_pltt,
+ src_L0, ccs_ofs, copy_ccs);
+
+ mutex_lock(&m->job_mutex);
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, usm),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, flush_flags);
+ if (!fence) {
+ err = job_add_deps(job, src_bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (!err && src_bo != dst_bo)
+ err = job_add_deps(job, dst_bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (err)
+ goto err_job;
+ }
+
+ xe_sched_job_arm(job);
+ dma_fence_put(fence);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+ size -= src_L0;
+ continue;
+
+err_job:
+ xe_sched_job_put(job);
+err:
+ mutex_unlock(&m->job_mutex);
+ xe_bb_free(bb, NULL);
+
+err_sync:
+ /* Sync partial copy if any. FIXME: under job_mutex? */
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
+ return ERR_PTR(err);
+ }
+
+ return fence;
+}
+
+static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
+ u32 size, u32 pitch)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 *cs = bb->cs + bb->len;
+ u32 len = PVC_MEM_SET_CMD_LEN_DW;
+
+ *cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2);
+ *cs++ = pitch - 1;
+ *cs++ = (size / pitch) - 1;
+ *cs++ = pitch - 1;
+ *cs++ = lower_32_bits(src_ofs);
+ *cs++ = upper_32_bits(src_ofs);
+ if (GRAPHICS_VERx100(xe) >= 2000)
+ *cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
+ else
+ *cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index);
+
+ xe_gt_assert(gt, cs - bb->cs == len + bb->len);
+
+ bb->len += len;
+}
+
+static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
+ u64 src_ofs, u32 size, u32 pitch, bool is_vram)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 *cs = bb->cs + bb->len;
+ u32 len = XY_FAST_COLOR_BLT_DW;
+
+ if (GRAPHICS_VERx100(xe) < 1250)
+ len = 11;
+
+ *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
+ (len - 2);
+ if (GRAPHICS_VERx100(xe) >= 2000)
+ *cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) |
+ (pitch - 1);
+ else
+ *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) |
+ (pitch - 1);
+ *cs++ = 0;
+ *cs++ = (size / pitch) << 16 | pitch / 4;
+ *cs++ = lower_32_bits(src_ofs);
+ *cs++ = upper_32_bits(src_ofs);
+ *cs++ = (is_vram ? 0x0 : 0x1) << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ if (len > 11) {
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ }
+
+ xe_gt_assert(gt, cs - bb->cs == len + bb->len);
+
+ bb->len += len;
+}
+
+static bool has_service_copy_support(struct xe_gt *gt)
+{
+ /*
+ * What we care about is whether the architecture was designed with
+ * service copy functionality (specifically the new MEM_SET / MEM_COPY
+ * instructions) so check the architectural engine list rather than the
+ * actual list since these instructions are usable on BCS0 even if
+ * all of the actual service copy engines (BCS1-BCS8) have been fused
+ * off.
+ */
+ return gt->info.__engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
+ XE_HW_ENGINE_BCS1);
+}
+
+static u32 emit_clear_cmd_len(struct xe_gt *gt)
+{
+ if (has_service_copy_support(gt))
+ return PVC_MEM_SET_CMD_LEN_DW;
+ else
+ return XY_FAST_COLOR_BLT_DW;
+}
+
+static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
+ u32 size, u32 pitch, bool is_vram)
+{
+ if (has_service_copy_support(gt))
+ emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
+ else
+ emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
+ is_vram);
+}
+
+/**
+ * xe_migrate_clear() - Copy content of TTM resources.
+ * @m: The migration context.
+ * @bo: The buffer object @dst is currently bound to.
+ * @dst: The dst TTM resource to be cleared.
+ *
+ * Clear the contents of @dst to zero. On flat CCS devices,
+ * the CCS metadata is cleared to zero as well on VRAM destinations.
+ * TODO: Eliminate the @bo argument.
+ *
+ * Return: Pointer to a dma_fence representing the last clear batch, or
+ * an error pointer on failure. If there is a failure, any clear operation
+ * started by the function call has been synced.
+ */
+struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
+ struct xe_bo *bo,
+ struct ttm_resource *dst)
+{
+ bool clear_vram = mem_type_is_vram(dst->mem_type);
+ struct xe_gt *gt = m->tile->primary_gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
+ struct dma_fence *fence = NULL;
+ u64 size = bo->size;
+ struct xe_res_cursor src_it;
+ struct ttm_resource *src = dst;
+ int err;
+ int pass = 0;
+
+ if (!clear_vram)
+ xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
+ else
+ xe_res_first(src, 0, bo->size, &src_it);
+
+ while (size) {
+ u64 clear_L0_ofs;
+ u32 clear_L0_pt;
+ u32 flush_flags = 0;
+ u64 clear_L0;
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 batch_size, update_idx;
+
+ bool usm = xe->info.has_usm;
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ clear_L0 = xe_migrate_res_sizes(m, &src_it);
+
+ drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
+
+ /* Calculate final sizes and batch size.. */
+ batch_size = 2 +
+ pte_update_size(m, clear_vram, src, &src_it,
+ &clear_L0, &clear_L0_ofs, &clear_L0_pt,
+ clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
+ avail_pts);
+
+ if (xe_device_has_flat_ccs(xe))
+ batch_size += EMIT_COPY_CCS_DW;
+
+ /* Clear commands */
+
+ if (WARN_ON_ONCE(!clear_L0))
+ break;
+
+ bb = xe_bb_new(gt, batch_size, usm);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ goto err_sync;
+ }
+
+ size -= clear_L0;
+ /* Preemption is enabled again by the ring ops. */
+ if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
+ xe_res_next(&src_it, clear_L0);
+ else
+ emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
+ &src_it, clear_L0, dst);
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+
+ if (!clear_system_ccs)
+ emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
+
+ if (xe_device_has_flat_ccs(xe)) {
+ emit_copy_ccs(gt, bb, clear_L0_ofs, true,
+ m->cleared_mem_ofs, false, clear_L0);
+ flush_flags = MI_FLUSH_DW_CCS;
+ }
+
+ mutex_lock(&m->job_mutex);
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, usm),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, flush_flags);
+ if (!fence) {
+ /*
+ * There can't be anything userspace related at this
+ * point, so we just need to respect any potential move
+ * fences, which are always tracked as
+ * DMA_RESV_USAGE_KERNEL.
+ */
+ err = job_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
+ if (err)
+ goto err_job;
+ }
+
+ xe_sched_job_arm(job);
+ dma_fence_put(fence);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+ continue;
+
+err_job:
+ xe_sched_job_put(job);
+err:
+ mutex_unlock(&m->job_mutex);
+ xe_bb_free(bb, NULL);
+err_sync:
+ /* Sync partial copies if any. FIXME: job_mutex? */
+ if (fence) {
+ dma_fence_wait(m->fence, false);
+ dma_fence_put(fence);
+ }
+
+ return ERR_PTR(err);
+ }
+
+ if (clear_system_ccs)
+ bo->ccs_cleared = true;
+
+ return fence;
+}
+
+static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
+ const struct xe_vm_pgtable_update *update,
+ struct xe_migrate_pt_update *pt_update)
+{
+ const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
+ u32 chunk;
+ u32 ofs = update->ofs, size = update->qwords;
+
+ /*
+ * If we have 512 entries (max), we would populate it ourselves,
+ * and update the PDE above it to the new pointer.
+ * The only time this can only happen if we have to update the top
+ * PDE. This requires a BO that is almost vm->size big.
+ *
+ * This shouldn't be possible in practice.. might change when 16K
+ * pages are used. Hence the assert.
+ */
+ xe_tile_assert(tile, update->qwords <= 0x1ff);
+ if (!ppgtt_ofs)
+ ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
+ xe_bo_addr(update->pt_bo, 0,
+ XE_PAGE_SIZE));
+
+ do {
+ u64 addr = ppgtt_ofs + ofs * 8;
+
+ chunk = min(update->qwords, 0x1ffU);
+
+ /* Ensure populatefn can do memset64 by aligning bb->cs */
+ if (!(bb->len & 1))
+ bb->cs[bb->len++] = MI_NOOP;
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
+ update);
+
+ bb->len += chunk * 2;
+ ofs += chunk;
+ size -= chunk;
+ } while (size);
+}
+
+struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
+{
+ return xe_vm_get(m->q->vm);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+struct migrate_test_params {
+ struct xe_test_priv base;
+ bool force_gpu;
+};
+
+#define to_migrate_test_params(_priv) \
+ container_of(_priv, struct migrate_test_params, base)
+#endif
+
+static struct dma_fence *
+xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
+ struct xe_vm *vm, struct xe_bo *bo,
+ const struct xe_vm_pgtable_update *updates,
+ u32 num_updates, bool wait_vm,
+ struct xe_migrate_pt_update *pt_update)
+{
+ XE_TEST_DECLARE(struct migrate_test_params *test =
+ to_migrate_test_params
+ (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
+ const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
+ struct dma_fence *fence;
+ int err;
+ u32 i;
+
+ if (XE_TEST_ONLY(test && test->force_gpu))
+ return ERR_PTR(-ETIME);
+
+ if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL))
+ return ERR_PTR(-ETIME);
+
+ if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP))
+ return ERR_PTR(-ETIME);
+
+ if (ops->pre_commit) {
+ pt_update->job = NULL;
+ err = ops->pre_commit(pt_update);
+ if (err)
+ return ERR_PTR(err);
+ }
+ for (i = 0; i < num_updates; i++) {
+ const struct xe_vm_pgtable_update *update = &updates[i];
+
+ ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
+ update->ofs, update->qwords, update);
+ }
+
+ if (vm) {
+ trace_xe_vm_cpu_bind(vm);
+ xe_device_wmb(vm->xe);
+ }
+
+ fence = dma_fence_get_stub();
+
+ return fence;
+}
+
+static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs)
+{
+ struct dma_fence *fence;
+ int i;
+
+ for (i = 0; i < num_syncs; i++) {
+ fence = syncs[i].fence;
+
+ if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags))
+ return false;
+ }
+ if (q) {
+ fence = xe_exec_queue_last_fence_get(q, vm);
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ dma_fence_put(fence);
+ return false;
+ }
+ dma_fence_put(fence);
+ }
+
+ return true;
+}
+
+/**
+ * xe_migrate_update_pgtables() - Pipelined page-table update
+ * @m: The migrate context.
+ * @vm: The vm we'll be updating.
+ * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
+ * @q: The exec queue to be used for the update or NULL if the default
+ * migration engine is to be used.
+ * @updates: An array of update descriptors.
+ * @num_updates: Number of descriptors in @updates.
+ * @syncs: Array of xe_sync_entry to await before updating. Note that waits
+ * will block the engine timeline.
+ * @num_syncs: Number of entries in @syncs.
+ * @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
+ * pointers to callback functions and, if subclassed, private arguments to
+ * those.
+ *
+ * Perform a pipelined page-table update. The update descriptors are typically
+ * built under the same lock critical section as a call to this function. If
+ * using the default engine for the updates, they will be performed in the
+ * order they grab the job_mutex. If different engines are used, external
+ * synchronization is needed for overlapping updates to maintain page-table
+ * consistency. Note that the meaing of "overlapping" is that the updates
+ * touch the same page-table, which might be a higher-level page-directory.
+ * If no pipelining is needed, then updates may be performed by the cpu.
+ *
+ * Return: A dma_fence that, when signaled, indicates the update completion.
+ */
+struct dma_fence *
+xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_vm *vm,
+ struct xe_bo *bo,
+ struct xe_exec_queue *q,
+ const struct xe_vm_pgtable_update *updates,
+ u32 num_updates,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ struct xe_migrate_pt_update *pt_update)
+{
+ const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
+ struct xe_tile *tile = m->tile;
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_sched_job *job;
+ struct dma_fence *fence;
+ struct drm_suballoc *sa_bo = NULL;
+ struct xe_vma *vma = pt_update->vma;
+ struct xe_bb *bb;
+ u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
+ u64 addr;
+ int err = 0;
+ bool usm = !q && xe->info.has_usm;
+ bool first_munmap_rebind = vma &&
+ vma->gpuva.flags & XE_VMA_FIRST_REBIND;
+ struct xe_exec_queue *q_override = !q ? m->q : q;
+ u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+
+ /* Use the CPU if no in syncs and engine is idle */
+ if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
+ fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
+ num_updates,
+ first_munmap_rebind,
+ pt_update);
+ if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
+ return fence;
+ }
+
+ /* fixed + PTE entries */
+ if (IS_DGFX(xe))
+ batch_size = 2;
+ else
+ batch_size = 6 + num_updates * 2;
+
+ for (i = 0; i < num_updates; i++) {
+ u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, 0x1ff);
+
+ /* align noop + MI_STORE_DATA_IMM cmd prefix */
+ batch_size += 4 * num_cmds + updates[i].qwords * 2;
+ }
+
+ /*
+ * XXX: Create temp bo to copy from, if batch_size becomes too big?
+ *
+ * Worst case: Sum(2 * (each lower level page size) + (top level page size))
+ * Should be reasonably bound..
+ */
+ xe_tile_assert(tile, batch_size < SZ_128K);
+
+ bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
+ if (IS_ERR(bb))
+ return ERR_CAST(bb);
+
+ /* For sysmem PTE's, need to map them in our hole.. */
+ if (!IS_DGFX(xe)) {
+ ppgtt_ofs = NUM_KERNEL_PDE - 1;
+ if (q) {
+ xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
+
+ sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
+ GFP_KERNEL, true, 0);
+ if (IS_ERR(sa_bo)) {
+ err = PTR_ERR(sa_bo);
+ goto err;
+ }
+
+ ppgtt_ofs = NUM_KERNEL_PDE +
+ (drm_suballoc_soffset(sa_bo) /
+ NUM_VMUSA_UNIT_PER_PAGE);
+ page_ofs = (drm_suballoc_soffset(sa_bo) %
+ NUM_VMUSA_UNIT_PER_PAGE) *
+ VM_SA_UPDATE_UNIT_SIZE;
+ }
+
+ /* Map our PT's to gtt */
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
+ bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
+ bb->cs[bb->len++] = 0; /* upper_32_bits */
+
+ for (i = 0; i < num_updates; i++) {
+ struct xe_bo *pt_bo = updates[i].pt_bo;
+
+ xe_tile_assert(tile, pt_bo->size == SZ_4K);
+
+ addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ }
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+
+ addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
+ (page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
+ for (i = 0; i < num_updates; i++)
+ write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
+ &updates[i], pt_update);
+ } else {
+ /* phys pages, no preamble required */
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+
+ for (i = 0; i < num_updates; i++)
+ write_pgtable(tile, bb, 0, &updates[i], pt_update);
+ }
+
+ if (!q)
+ mutex_lock(&m->job_mutex);
+
+ job = xe_bb_create_migration_job(q ?: m->q, bb,
+ xe_migrate_batch_base(m, usm),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err_bb;
+ }
+
+ /* Wait on BO move */
+ if (bo) {
+ err = job_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
+ if (err)
+ goto err_job;
+ }
+
+ /*
+ * Munmap style VM unbind, need to wait for all jobs to be complete /
+ * trigger preempts before moving forward
+ */
+ if (first_munmap_rebind) {
+ err = job_add_deps(job, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (err)
+ goto err_job;
+ }
+
+ err = xe_sched_job_last_fence_add_dep(job, vm);
+ for (i = 0; !err && i < num_syncs; i++)
+ err = xe_sync_entry_add_deps(&syncs[i], job);
+
+ if (err)
+ goto err_job;
+
+ if (ops->pre_commit) {
+ pt_update->job = job;
+ err = ops->pre_commit(pt_update);
+ if (err)
+ goto err_job;
+ }
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ if (!q)
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+ drm_suballoc_free(sa_bo, fence);
+
+ return fence;
+
+err_job:
+ xe_sched_job_put(job);
+err_bb:
+ if (!q)
+ mutex_unlock(&m->job_mutex);
+ xe_bb_free(bb, NULL);
+err:
+ drm_suballoc_free(sa_bo, NULL);
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_migrate_wait() - Complete all operations using the xe_migrate context
+ * @m: Migrate context to wait for.
+ *
+ * Waits until the GPU no longer uses the migrate context's default engine
+ * or its page-table objects. FIXME: What about separate page-table update
+ * engines?
+ */
+void xe_migrate_wait(struct xe_migrate *m)
+{
+ if (m->fence)
+ dma_fence_wait(m->fence, false);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_migrate.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
new file mode 100644
index 000000000..951f19318
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _XE_MIGRATE_
+#define _XE_MIGRATE_
+
+#include <drm/drm_mm.h>
+
+struct dma_fence;
+struct iosys_map;
+struct ttm_resource;
+
+struct xe_bo;
+struct xe_gt;
+struct xe_exec_queue;
+struct xe_migrate;
+struct xe_migrate_pt_update;
+struct xe_sync_entry;
+struct xe_pt;
+struct xe_tile;
+struct xe_vm;
+struct xe_vm_pgtable_update;
+struct xe_vma;
+
+/**
+ * struct xe_migrate_pt_update_ops - Callbacks for the
+ * xe_migrate_update_pgtables() function.
+ */
+struct xe_migrate_pt_update_ops {
+ /**
+ * @populate: Populate a command buffer or page-table with ptes.
+ * @pt_update: Embeddable callback argument.
+ * @tile: The tile for the current operation.
+ * @map: struct iosys_map into the memory to be populated.
+ * @pos: If @map is NULL, map into the memory to be populated.
+ * @ofs: qword offset into @map, unused if @map is NULL.
+ * @num_qwords: Number of qwords to write.
+ * @update: Information about the PTEs to be inserted.
+ *
+ * This interface is intended to be used as a callback into the
+ * page-table system to populate command buffers or shared
+ * page-tables with PTEs.
+ */
+ void (*populate)(struct xe_migrate_pt_update *pt_update,
+ struct xe_tile *tile, struct iosys_map *map,
+ void *pos, u32 ofs, u32 num_qwords,
+ const struct xe_vm_pgtable_update *update);
+
+ /**
+ * @pre_commit: Callback to be called just before arming the
+ * sched_job.
+ * @pt_update: Pointer to embeddable callback argument.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+ int (*pre_commit)(struct xe_migrate_pt_update *pt_update);
+};
+
+/**
+ * struct xe_migrate_pt_update - Argument to the
+ * struct xe_migrate_pt_update_ops callbacks.
+ *
+ * Intended to be subclassed to support additional arguments if necessary.
+ */
+struct xe_migrate_pt_update {
+ /** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */
+ const struct xe_migrate_pt_update_ops *ops;
+ /** @vma: The vma we're updating the pagetable for. */
+ struct xe_vma *vma;
+ /** @job: The job if a GPU page-table update. NULL otherwise */
+ struct xe_sched_job *job;
+ /** @start: Start of update for the range fence */
+ u64 start;
+ /** @last: Last of update for the range fence */
+ u64 last;
+ /** @tile_id: Tile ID of the update */
+ u8 tile_id;
+};
+
+struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
+
+struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
+ struct xe_bo *src_bo,
+ struct xe_bo *dst_bo,
+ struct ttm_resource *src,
+ struct ttm_resource *dst,
+ bool copy_only_ccs);
+
+struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
+ struct xe_bo *bo,
+ struct ttm_resource *dst);
+
+struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m);
+
+struct dma_fence *
+xe_migrate_update_pgtables(struct xe_migrate *m,
+ struct xe_vm *vm,
+ struct xe_bo *bo,
+ struct xe_exec_queue *q,
+ const struct xe_vm_pgtable_update *updates,
+ u32 num_updates,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ struct xe_migrate_pt_update *pt_update);
+
+void xe_migrate_wait(struct xe_migrate *m);
+
+struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate_doc.h b/drivers/gpu/drm/xe/xe_migrate_doc.h
new file mode 100644
index 000000000..63c7d67b5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_migrate_doc.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_MIGRATE_DOC_H_
+#define _XE_MIGRATE_DOC_H_
+
+/**
+ * DOC: Migrate Layer
+ *
+ * The XE migrate layer is used generate jobs which can copy memory (eviction),
+ * clear memory, or program tables (binds). This layer exists in every GT, has
+ * a migrate engine, and uses a special VM for all generated jobs.
+ *
+ * Special VM details
+ * ==================
+ *
+ * The special VM is configured with a page structure where we can dynamically
+ * map BOs which need to be copied and cleared, dynamically map other VM's page
+ * table BOs for updates, and identity map the entire device's VRAM with 1 GB
+ * pages.
+ *
+ * Currently the page structure consists of 32 physical pages with 16 being
+ * reserved for BO mapping during copies and clear, 1 reserved for kernel binds,
+ * several pages are needed to setup the identity mappings (exact number based
+ * on how many bits of address space the device has), and the rest are reserved
+ * user bind operations.
+ *
+ * TODO: Diagram of layout
+ *
+ * Bind jobs
+ * =========
+ *
+ * A bind job consist of two batches and runs either on the migrate engine
+ * (kernel binds) or the bind engine passed in (user binds). In both cases the
+ * VM of the engine is the migrate VM.
+ *
+ * The first batch is used to update the migration VM page structure to point to
+ * the bind VM page table BOs which need to be updated. A physical page is
+ * required for this. If it is a user bind, the page is allocated from pool of
+ * pages reserved user bind operations with drm_suballoc managing this pool. If
+ * it is a kernel bind, the page reserved for kernel binds is used.
+ *
+ * The first batch is only required for devices without VRAM as when the device
+ * has VRAM the bind VM page table BOs are in VRAM and the identity mapping can
+ * be used.
+ *
+ * The second batch is used to program page table updated in the bind VM. Why
+ * not just one batch? Well the TLBs need to be invalidated between these two
+ * batches and that only can be done from the ring.
+ *
+ * When the bind job complete, the page allocated is returned the pool of pages
+ * reserved for user bind operations if a user bind. No need do this for kernel
+ * binds as the reserved kernel page is serially used by each job.
+ *
+ * Copy / clear jobs
+ * =================
+ *
+ * A copy or clear job consist of two batches and runs on the migrate engine.
+ *
+ * Like binds, the first batch is used update the migration VM page structure.
+ * In copy jobs, we need to map the source and destination of the BO into page
+ * the structure. In clear jobs, we just need to add 1 mapping of BO into the
+ * page structure. We use the 16 reserved pages in migration VM for mappings,
+ * this gives us a maximum copy size of 16 MB and maximum clear size of 32 MB.
+ *
+ * The second batch is used do either do the copy or clear. Again similar to
+ * binds, two batches are required as the TLBs need to be invalidated from the
+ * ring between the batches.
+ *
+ * More than one job will be generated if the BO is larger than maximum copy /
+ * clear size.
+ *
+ * Future work
+ * ===========
+ *
+ * Update copy and clear code to use identity mapped VRAM.
+ *
+ * Can we rework the use of the pages async binds to use all the entries in each
+ * page?
+ *
+ * Using large pages for sysmem mappings.
+ *
+ * Is it possible to identity map the sysmem? We should explore this.
+ */
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
new file mode 100644
index 000000000..02f7808f2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2023 Intel Corporation
+ */
+
+#include <linux/minmax.h>
+
+#include "xe_mmio.h"
+
+#include <drm/drm_managed.h>
+#include <drm/xe_drm.h>
+
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_gt_mcr.h"
+#include "xe_macros.h"
+#include "xe_module.h"
+#include "xe_tile.h"
+
+#define XEHP_MTCFG_ADDR XE_REG(0x101800)
+#define TILE_COUNT REG_GENMASK(15, 8)
+
+#define BAR_SIZE_SHIFT 20
+
+static void
+_resize_bar(struct xe_device *xe, int resno, resource_size_t size)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int bar_size = pci_rebar_bytes_to_size(size);
+ int ret;
+
+ if (pci_resource_len(pdev, resno))
+ pci_release_resource(pdev, resno);
+
+ ret = pci_resize_resource(pdev, resno, bar_size);
+ if (ret) {
+ drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support in your BIOS\n",
+ resno, 1 << bar_size, ERR_PTR(ret));
+ return;
+ }
+
+ drm_info(&xe->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size);
+}
+
+/*
+ * if force_vram_bar_size is set, attempt to set to the requested size
+ * else set to maximum possible size
+ */
+static void xe_resize_vram_bar(struct xe_device *xe)
+{
+ u64 force_vram_bar_size = xe_modparam.force_vram_bar_size;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct pci_bus *root = pdev->bus;
+ resource_size_t current_size;
+ resource_size_t rebar_size;
+ struct resource *root_res;
+ u32 bar_size_mask;
+ u32 pci_cmd;
+ int i;
+
+ /* gather some relevant info */
+ current_size = pci_resource_len(pdev, LMEM_BAR);
+ bar_size_mask = pci_rebar_get_possible_sizes(pdev, LMEM_BAR);
+
+ if (!bar_size_mask)
+ return;
+
+ /* set to a specific size? */
+ if (force_vram_bar_size) {
+ u32 bar_size_bit;
+
+ rebar_size = force_vram_bar_size * (resource_size_t)SZ_1M;
+
+ bar_size_bit = bar_size_mask & BIT(pci_rebar_bytes_to_size(rebar_size));
+
+ if (!bar_size_bit) {
+ drm_info(&xe->drm,
+ "Requested size: %lluMiB is not supported by rebar sizes: 0x%x. Leaving default: %lluMiB\n",
+ (u64)rebar_size >> 20, bar_size_mask, (u64)current_size >> 20);
+ return;
+ }
+
+ rebar_size = 1ULL << (__fls(bar_size_bit) + BAR_SIZE_SHIFT);
+
+ if (rebar_size == current_size)
+ return;
+ } else {
+ rebar_size = 1ULL << (__fls(bar_size_mask) + BAR_SIZE_SHIFT);
+
+ /* only resize if larger than current */
+ if (rebar_size <= current_size)
+ return;
+ }
+
+ drm_info(&xe->drm, "Attempting to resize bar from %lluMiB -> %lluMiB\n",
+ (u64)current_size >> 20, (u64)rebar_size >> 20);
+
+ while (root->parent)
+ root = root->parent;
+
+ pci_bus_for_each_resource(root, root_res, i) {
+ if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
+ (u64)root_res->start > 0x100000000ul)
+ break;
+ }
+
+ if (!root_res) {
+ drm_info(&xe->drm, "Can't resize VRAM BAR - platform support is missing. Consider enabling 'Resizable BAR' support in your BIOS\n");
+ return;
+ }
+
+ pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
+ pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_MEMORY);
+
+ _resize_bar(xe, LMEM_BAR, rebar_size);
+
+ pci_assign_unassigned_bus_resources(pdev->bus);
+ pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
+}
+
+static bool xe_pci_resource_valid(struct pci_dev *pdev, int bar)
+{
+ if (!pci_resource_flags(pdev, bar))
+ return false;
+
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_UNSET)
+ return false;
+
+ if (!pci_resource_len(pdev, bar))
+ return false;
+
+ return true;
+}
+
+static int xe_determine_lmem_bar_size(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ if (!xe_pci_resource_valid(pdev, LMEM_BAR)) {
+ drm_err(&xe->drm, "pci resource is not valid\n");
+ return -ENXIO;
+ }
+
+ xe_resize_vram_bar(xe);
+
+ xe->mem.vram.io_start = pci_resource_start(pdev, LMEM_BAR);
+ xe->mem.vram.io_size = pci_resource_len(pdev, LMEM_BAR);
+ if (!xe->mem.vram.io_size)
+ return -EIO;
+
+ /* XXX: Need to change when xe link code is ready */
+ xe->mem.vram.dpa_base = 0;
+
+ /* set up a map to the total memory area. */
+ xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
+
+ return 0;
+}
+
+/**
+ * xe_mmio_tile_vram_size() - Collect vram size and offset information
+ * @tile: tile to get info for
+ * @vram_size: available vram (size - device reserved portions)
+ * @tile_size: actual vram size
+ * @tile_offset: physical start point in the vram address space
+ *
+ * There are 4 places for size information:
+ * - io size (from pci_resource_len of LMEM bar) (only used for small bar and DG1)
+ * - TILEx size (actual vram size)
+ * - GSMBASE offset (TILEx - "stolen")
+ * - CSSBASE offset (TILEx - CSS space necessary)
+ *
+ * CSSBASE is always a lower/smaller offset then GSMBASE.
+ *
+ * The actual available size of memory is to the CCS or GSM base.
+ * NOTE: multi-tile bases will include the tile offset.
+ *
+ */
+static int xe_mmio_tile_vram_size(struct xe_tile *tile, u64 *vram_size,
+ u64 *tile_size, u64 *tile_offset)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_gt *gt = tile->primary_gt;
+ u64 offset;
+ int err;
+ u32 reg;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ return err;
+
+ /* actual size */
+ if (unlikely(xe->info.platform == XE_DG1)) {
+ *tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), LMEM_BAR);
+ *tile_offset = 0;
+ } else {
+ reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id));
+ *tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G;
+ *tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G;
+ }
+
+ /* minus device usage */
+ if (xe->info.has_flat_ccs) {
+ reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
+ offset = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
+ } else {
+ offset = xe_mmio_read64_2x32(gt, GSMBASE);
+ }
+
+ /* remove the tile offset so we have just the available size */
+ *vram_size = offset - *tile_offset;
+
+ return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+}
+
+int xe_mmio_probe_vram(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ resource_size_t io_size;
+ u64 available_size = 0;
+ u64 total_size = 0;
+ u64 tile_offset;
+ u64 tile_size;
+ u64 vram_size;
+ int err;
+ u8 id;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ /* Get the size of the root tile's vram for later accessibility comparison */
+ tile = xe_device_get_root_tile(xe);
+ err = xe_mmio_tile_vram_size(tile, &vram_size, &tile_size, &tile_offset);
+ if (err)
+ return err;
+
+ err = xe_determine_lmem_bar_size(xe);
+ if (err)
+ return err;
+
+ drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
+ &xe->mem.vram.io_size);
+
+ io_size = xe->mem.vram.io_size;
+
+ /* tile specific ranges */
+ for_each_tile(tile, xe, id) {
+ err = xe_mmio_tile_vram_size(tile, &vram_size, &tile_size, &tile_offset);
+ if (err)
+ return err;
+
+ tile->mem.vram.actual_physical_size = tile_size;
+ tile->mem.vram.io_start = xe->mem.vram.io_start + tile_offset;
+ tile->mem.vram.io_size = min_t(u64, vram_size, io_size);
+
+ if (!tile->mem.vram.io_size) {
+ drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
+ return -ENODEV;
+ }
+
+ tile->mem.vram.dpa_base = xe->mem.vram.dpa_base + tile_offset;
+ tile->mem.vram.usable_size = vram_size;
+ tile->mem.vram.mapping = xe->mem.vram.mapping + tile_offset;
+
+ if (tile->mem.vram.io_size < tile->mem.vram.usable_size)
+ drm_info(&xe->drm, "Small BAR device\n");
+ drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
+ tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
+ drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
+ &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
+ &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
+
+ /* calculate total size using tile size to get the correct HW sizing */
+ total_size += tile_size;
+ available_size += vram_size;
+
+ if (total_size > xe->mem.vram.io_size) {
+ drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
+ &total_size, &xe->mem.vram.io_size);
+ }
+
+ io_size -= min_t(u64, tile_size, io_size);
+ }
+
+ xe->mem.vram.actual_physical_size = total_size;
+
+ drm_info(&xe->drm, "Total VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
+ &xe->mem.vram.actual_physical_size);
+ drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
+ &available_size);
+
+ return 0;
+}
+
+void xe_mmio_probe_tiles(struct xe_device *xe)
+{
+ size_t tile_mmio_size = SZ_16M, tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
+ u8 id, tile_count = xe->info.tile_count;
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_tile *tile;
+ void __iomem *regs;
+ u32 mtcfg;
+
+ if (tile_count == 1)
+ goto add_mmio_ext;
+
+ if (!xe->info.skip_mtcfg) {
+ mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
+ tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+ if (tile_count < xe->info.tile_count) {
+ drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
+ xe->info.tile_count, tile_count);
+ xe->info.tile_count = tile_count;
+
+ /*
+ * FIXME: Needs some work for standalone media, but should be impossible
+ * with multi-tile for now.
+ */
+ xe->info.gt_count = xe->info.tile_count;
+ }
+ }
+
+ regs = xe->mmio.regs;
+ for_each_tile(tile, xe, id) {
+ tile->mmio.size = tile_mmio_size;
+ tile->mmio.regs = regs;
+ regs += tile_mmio_size;
+ }
+
+add_mmio_ext:
+ /*
+ * By design, there's a contiguous multi-tile MMIO space (16MB hard coded per tile).
+ * When supported, there could be an additional contiguous multi-tile MMIO extension
+ * space ON TOP of it, and hence the necessity for distinguished MMIO spaces.
+ */
+ if (xe->info.has_mmio_ext) {
+ regs = xe->mmio.regs + tile_mmio_size * tile_count;
+
+ for_each_tile(tile, xe, id) {
+ tile->mmio_ext.size = tile_mmio_ext_size;
+ tile->mmio_ext.regs = regs;
+
+ regs += tile_mmio_ext_size;
+ }
+ }
+}
+
+static void mmio_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = arg;
+
+ pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
+ if (xe->mem.vram.mapping)
+ iounmap(xe->mem.vram.mapping);
+}
+
+static int xe_verify_lmem_ready(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+
+ /*
+ * The boot firmware initializes local memory and assesses its health.
+ * If memory training fails, the punit will have been instructed to
+ * keep the GT powered down; we won't be able to communicate with it
+ * and we should not continue with driver initialization.
+ */
+ if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) {
+ drm_err(&xe->drm, "VRAM not initialized by firmware\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int xe_mmio_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ const int mmio_bar = 0;
+
+ /*
+ * Map the entire BAR.
+ * The first 16MB of the BAR, belong to the root tile, and include:
+ * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB).
+ */
+ xe->mmio.size = pci_resource_len(pdev, mmio_bar);
+ xe->mmio.regs = pci_iomap(pdev, mmio_bar, 0);
+ if (xe->mmio.regs == NULL) {
+ drm_err(&xe->drm, "failed to map registers\n");
+ return -EIO;
+ }
+
+ return drmm_add_action_or_reset(&xe->drm, mmio_fini, xe);
+}
+
+int xe_mmio_root_tile_init(struct xe_device *xe)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+ int err;
+
+ /* Setup first tile; other tiles (if present) will be setup later. */
+ root_tile->mmio.size = SZ_16M;
+ root_tile->mmio.regs = xe->mmio.regs;
+
+ err = xe_verify_lmem_ready(xe);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
+ * @gt: MMIO target GT
+ * @reg: register to read value from
+ *
+ * Although Intel GPUs have some 64-bit registers, the hardware officially
+ * only supports GTTMMADR register reads of 32 bits or smaller. Even if
+ * a readq operation may return a reasonable value, that violation of the
+ * spec shouldn't be relied upon and all 64-bit register reads should be
+ * performed as two 32-bit reads of the upper and lower dwords.
+ *
+ * When reading registers that may be changing (such as
+ * counters), a rollover of the lower dword between the two 32-bit reads
+ * can be problematic. This function attempts to ensure the upper dword has
+ * stabilized before returning the 64-bit value.
+ *
+ * Note that because this function may re-read the register multiple times
+ * while waiting for the value to stabilize it should not be used to read
+ * any registers where read operations have side effects.
+ *
+ * Returns the value of the 64-bit register.
+ */
+u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
+ u32 ldw, udw, oldudw, retries;
+
+ if (reg.addr < gt->mmio.adj_limit) {
+ reg.addr += gt->mmio.adj_offset;
+ reg_udw.addr += gt->mmio.adj_offset;
+ }
+
+ oldudw = xe_mmio_read32(gt, reg_udw);
+ for (retries = 5; retries; --retries) {
+ ldw = xe_mmio_read32(gt, reg);
+ udw = xe_mmio_read32(gt, reg_udw);
+
+ if (udw == oldudw)
+ break;
+
+ oldudw = udw;
+ }
+
+ xe_gt_WARN(gt, retries == 0,
+ "64-bit read of %#x did not stabilize\n", reg.addr);
+
+ return (u64)udw << 32 | ldw;
+}
+
+/**
+ * xe_mmio_wait32() - Wait for a register to match the desired masked value
+ * @gt: MMIO target GT
+ * @reg: register to read value from
+ * @mask: mask to be applied to the value read from the register
+ * @val: desired value after applying the mask
+ * @timeout_us: time out after this period of time. Wait logic tries to be
+ * smart, applying an exponential backoff until @timeout_us is reached.
+ * @out_val: if not NULL, points where to store the last unmasked value
+ * @atomic: needs to be true if calling from an atomic context
+ *
+ * This function polls for the desired masked value and returns zero on success
+ * or -ETIMEDOUT if timed out.
+ *
+ * Note that @timeout_us represents the minimum amount of time to wait before
+ * giving up. The actual time taken by this function can be a little more than
+ * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
+ * it is possible that this function succeeds even after @timeout_us has passed.
+ */
+int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+ u32 *out_val, bool atomic)
+{
+ ktime_t cur = ktime_get_raw();
+ const ktime_t end = ktime_add_us(cur, timeout_us);
+ int ret = -ETIMEDOUT;
+ s64 wait = 10;
+ u32 read;
+
+ for (;;) {
+ read = xe_mmio_read32(gt, reg);
+ if ((read & mask) == val) {
+ ret = 0;
+ break;
+ }
+
+ cur = ktime_get_raw();
+ if (!ktime_before(cur, end))
+ break;
+
+ if (ktime_after(ktime_add_us(cur, wait), end))
+ wait = ktime_us_delta(end, cur);
+
+ if (atomic)
+ udelay(wait);
+ else
+ usleep_range(wait, wait << 1);
+ wait <<= 1;
+ }
+
+ if (ret != 0) {
+ read = xe_mmio_read32(gt, reg);
+ if ((read & mask) == val)
+ ret = 0;
+ }
+
+ if (out_val)
+ *out_val = read;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
new file mode 100644
index 000000000..98de5c13c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2023 Intel Corporation
+ */
+
+#ifndef _XE_MMIO_H_
+#define _XE_MMIO_H_
+
+#include <linux/delay.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "regs/xe_reg_defs.h"
+#include "xe_device_types.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_types.h"
+
+struct drm_device;
+struct drm_file;
+struct xe_device;
+
+#define LMEM_BAR 2
+
+int xe_mmio_init(struct xe_device *xe);
+int xe_mmio_root_tile_init(struct xe_device *xe);
+void xe_mmio_probe_tiles(struct xe_device *xe);
+
+static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+static inline u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+static inline void xe_mmio_write32(struct xe_gt *gt,
+ struct xe_reg reg, u32 val)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
+}
+
+static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
+ u32 set)
+{
+ u32 old, reg_val;
+
+ old = xe_mmio_read32(gt, reg);
+ reg_val = (old & ~clr) | set;
+ xe_mmio_write32(gt, reg, reg_val);
+
+ return old;
+}
+
+static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
+ struct xe_reg reg, u32 val,
+ u32 mask, u32 eval)
+{
+ u32 reg_val;
+
+ xe_mmio_write32(gt, reg, val);
+ reg_val = xe_mmio_read32(gt, reg);
+
+ return (reg_val & mask) != eval ? -EINVAL : 0;
+}
+
+static inline bool xe_mmio_in_range(const struct xe_gt *gt,
+ const struct xe_mmio_range *range,
+ struct xe_reg reg)
+{
+ if (reg.addr < gt->mmio.adj_limit)
+ reg.addr += gt->mmio.adj_offset;
+
+ return range && reg.addr >= range->start && reg.addr <= range->end;
+}
+
+int xe_mmio_probe_vram(struct xe_device *xe);
+u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
+int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+ u32 *out_val, bool atomic);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
new file mode 100644
index 000000000..ef79552e4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_mocs.h"
+
+#include "regs/xe_gt_regs.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_gt.h"
+#include "xe_gt_mcr.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+#include "xe_step_types.h"
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define mocs_dbg drm_dbg
+#else
+__printf(2, 3)
+static inline void mocs_dbg(const struct drm_device *dev,
+ const char *format, ...)
+{ /* noop */ }
+#endif
+
+enum {
+ HAS_GLOBAL_MOCS = BIT(0),
+ HAS_LNCF_MOCS = BIT(1),
+};
+
+struct xe_mocs_entry {
+ u32 control_value;
+ u16 l3cc_value;
+ u16 used;
+};
+
+struct xe_mocs_info {
+ unsigned int size;
+ unsigned int n_entries;
+ const struct xe_mocs_entry *table;
+ u8 uc_index;
+ u8 wb_index;
+ u8 unused_entries_index;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define _LE_CACHEABILITY(value) ((value) << 0)
+#define _LE_TGT_CACHE(value) ((value) << 2)
+#define LE_LRUM(value) ((value) << 4)
+#define LE_AOM(value) ((value) << 6)
+#define LE_RSC(value) ((value) << 7)
+#define LE_SCC(value) ((value) << 8)
+#define LE_PFM(value) ((value) << 11)
+#define LE_SCF(value) ((value) << 14)
+#define LE_COS(value) ((value) << 15)
+#define LE_SSE(value) ((value) << 17)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value) ((value) << 0)
+#define L3_SCC(value) ((value) << 1)
+#define _L3_CACHEABILITY(value) ((value) << 4)
+#define L3_GLBGO(value) ((value) << 6)
+#define L3_LKUP(value) ((value) << 7)
+
+/* Defines for the tables (GLOB_MOCS_0 - GLOB_MOCS_16) */
+#define IG_PAT REG_BIT(8)
+#define L3_CACHE_POLICY_MASK REG_GENMASK(5, 4)
+#define L4_CACHE_POLICY_MASK REG_GENMASK(3, 2)
+
+/* Helper defines */
+#define XELP_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
+#define PVC_NUM_MOCS_ENTRIES 3
+#define MTL_NUM_MOCS_ENTRIES 16
+#define XE2_NUM_MOCS_ENTRIES 16
+
+/* (e)LLC caching options */
+/*
+ * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
+ * the same as LE_UC
+ */
+#define LE_0_PAGETABLE _LE_CACHEABILITY(0)
+#define LE_1_UC _LE_CACHEABILITY(1)
+#define LE_2_WT _LE_CACHEABILITY(2)
+#define LE_3_WB _LE_CACHEABILITY(3)
+
+/* Target cache */
+#define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0)
+#define LE_TC_1_LLC _LE_TGT_CACHE(1)
+#define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2)
+#define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3)
+
+/* L3 caching options */
+#define L3_0_DIRECT _L3_CACHEABILITY(0)
+#define L3_1_UC _L3_CACHEABILITY(1)
+#define L3_2_RESERVED _L3_CACHEABILITY(2)
+#define L3_3_WB _L3_CACHEABILITY(3)
+
+/* L4 caching options */
+#define L4_0_WB REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 0)
+#define L4_1_WT REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 1)
+#define L4_3_UC REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 3)
+
+#define XE2_L3_0_WB REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 0)
+/* XD: WB Transient Display */
+#define XE2_L3_1_XD REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 1)
+#define XE2_L3_3_UC REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 3)
+
+#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
+ [__idx] = { \
+ .control_value = __control_value, \
+ .l3cc_value = __l3cc_value, \
+ .used = 1, \
+ }
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * HW platforms, and for ICL+, be identical across OSes. To achieve
+ * that, the list of entries is published as part of bspec.
+ *
+ * Entries not part of the following tables are undefined as far as userspace is
+ * concerned and shouldn't be relied upon. The last few entries are reserved by
+ * the hardware. They should be initialized according to bspec and never used.
+ *
+ * NOTE1: These tables are part of bspec and defined as part of the hardware
+ * interface. It is expected that, for specific hardware platform, existing
+ * entries will remain constant and the table will only be updated by adding new
+ * entries, filling unused positions.
+ *
+ * NOTE2: Reserved and unspecified MOCS indices have been set to L3 WB. These
+ * reserved entries should never be used. They may be changed to low performant
+ * variants with better coherency in the future if more entries are needed.
+ */
+
+static const struct xe_mocs_entry gen12_mocs_desc[] = {
+ /* Base - L3 + LLC */
+ MOCS_ENTRY(2,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Base - Uncached */
+ MOCS_ENTRY(3,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* Base - L3 */
+ MOCS_ENTRY(4,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Base - LLC */
+ MOCS_ENTRY(5,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Age 0 - LLC */
+ MOCS_ENTRY(6,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1),
+ L3_1_UC),
+ /* Age 0 - L3 + LLC */
+ MOCS_ENTRY(7,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1),
+ L3_3_WB),
+ /* Age: Don't Chg. - LLC */
+ MOCS_ENTRY(8,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2),
+ L3_1_UC),
+ /* Age: Don't Chg. - L3 + LLC */
+ MOCS_ENTRY(9,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2),
+ L3_3_WB),
+ /* No AOM - LLC */
+ MOCS_ENTRY(10,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1),
+ L3_1_UC),
+ /* No AOM - L3 + LLC */
+ MOCS_ENTRY(11,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1),
+ L3_3_WB),
+ /* No AOM; Age 0 - LLC */
+ MOCS_ENTRY(12,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1),
+ L3_1_UC),
+ /* No AOM; Age 0 - L3 + LLC */
+ MOCS_ENTRY(13,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1),
+ L3_3_WB),
+ /* No AOM; Age:DC - LLC */
+ MOCS_ENTRY(14,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1),
+ L3_1_UC),
+ /* No AOM; Age:DC - L3 + LLC */
+ MOCS_ENTRY(15,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1),
+ L3_3_WB),
+ /* Self-Snoop - L3 + LLC */
+ MOCS_ENTRY(18,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3),
+ L3_3_WB),
+ /* Skip Caching - L3 + LLC(12.5%) */
+ MOCS_ENTRY(19,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7),
+ L3_3_WB),
+ /* Skip Caching - L3 + LLC(25%) */
+ MOCS_ENTRY(20,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3),
+ L3_3_WB),
+ /* Skip Caching - L3 + LLC(50%) */
+ MOCS_ENTRY(21,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1),
+ L3_3_WB),
+ /* Skip Caching - L3 + LLC(75%) */
+ MOCS_ENTRY(22,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3),
+ L3_3_WB),
+ /* Skip Caching - L3 + LLC(87.5%) */
+ MOCS_ENTRY(23,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+ MOCS_ENTRY(48,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 */
+ MOCS_ENTRY(49,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + LLC */
+ MOCS_ENTRY(50,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Implicitly enable L1 - HDC:L1 */
+ MOCS_ENTRY(51,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* HW Special Case (CCS) */
+ MOCS_ENTRY(60,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Special Case (Displayable) */
+ MOCS_ENTRY(61,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* HW Reserved - SW program but never use */
+ MOCS_ENTRY(62,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Reserved - SW program but never use */
+ MOCS_ENTRY(63,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC)
+};
+
+static const struct xe_mocs_entry dg1_mocs_desc[] = {
+ /* UC */
+ MOCS_ENTRY(1, 0, L3_1_UC),
+ /* WB - L3 */
+ MOCS_ENTRY(5, 0, L3_3_WB),
+ /* WB - L3 50% */
+ MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB),
+ /* WB - L3 25% */
+ MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB),
+ /* WB - L3 12.5% */
+ MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB),
+
+ /* HDC:L1 + L3 */
+ MOCS_ENTRY(48, 0, L3_3_WB),
+ /* HDC:L1 */
+ MOCS_ENTRY(49, 0, L3_1_UC),
+
+ /* HW Reserved */
+ MOCS_ENTRY(60, 0, L3_1_UC),
+ MOCS_ENTRY(61, 0, L3_1_UC),
+ MOCS_ENTRY(62, 0, L3_1_UC),
+ MOCS_ENTRY(63, 0, L3_1_UC),
+};
+
+static const struct xe_mocs_entry dg2_mocs_desc[] = {
+ /* UC - Coherent; GO:L3 */
+ MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+
+ /* WB - LC */
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+};
+
+static const struct xe_mocs_entry dg2_mocs_desc_g10_ax[] = {
+ /* Wa_14011441408: Set Go to Memory for MOCS#0 */
+ MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+
+ /* WB - LC */
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+};
+
+static const struct xe_mocs_entry pvc_mocs_desc[] = {
+ /* Error */
+ MOCS_ENTRY(0, 0, L3_3_WB),
+
+ /* UC */
+ MOCS_ENTRY(1, 0, L3_1_UC),
+
+ /* WB */
+ MOCS_ENTRY(2, 0, L3_3_WB),
+};
+
+static const struct xe_mocs_entry mtl_mocs_desc[] = {
+ /* Error - Reserved for Non-Use */
+ MOCS_ENTRY(0,
+ 0,
+ L3_LKUP(1) | L3_3_WB),
+ /* Cached - L3 + L4 */
+ MOCS_ENTRY(1,
+ IG_PAT,
+ L3_LKUP(1) | L3_3_WB),
+ /* L4 - GO:L3 */
+ MOCS_ENTRY(2,
+ IG_PAT,
+ L3_LKUP(1) | L3_1_UC),
+ /* Uncached - GO:L3 */
+ MOCS_ENTRY(3,
+ IG_PAT | L4_3_UC,
+ L3_LKUP(1) | L3_1_UC),
+ /* L4 - GO:Mem */
+ MOCS_ENTRY(4,
+ IG_PAT,
+ L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
+ /* Uncached - GO:Mem */
+ MOCS_ENTRY(5,
+ IG_PAT | L4_3_UC,
+ L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
+ /* L4 - L3:NoLKUP; GO:L3 */
+ MOCS_ENTRY(6,
+ IG_PAT,
+ L3_1_UC),
+ /* Uncached - L3:NoLKUP; GO:L3 */
+ MOCS_ENTRY(7,
+ IG_PAT | L4_3_UC,
+ L3_1_UC),
+ /* L4 - L3:NoLKUP; GO:Mem */
+ MOCS_ENTRY(8,
+ IG_PAT,
+ L3_GLBGO(1) | L3_1_UC),
+ /* Uncached - L3:NoLKUP; GO:Mem */
+ MOCS_ENTRY(9,
+ IG_PAT | L4_3_UC,
+ L3_GLBGO(1) | L3_1_UC),
+ /* Display - L3; L4:WT */
+ MOCS_ENTRY(14,
+ IG_PAT | L4_1_WT,
+ L3_LKUP(1) | L3_3_WB),
+ /* CCS - Non-Displayable */
+ MOCS_ENTRY(15,
+ IG_PAT,
+ L3_GLBGO(1) | L3_1_UC),
+};
+
+static const struct xe_mocs_entry xe2_mocs_table[] = {
+ /* Defer to PAT */
+ MOCS_ENTRY(0, XE2_L3_0_WB | L4_3_UC, 0),
+ /* Cached L3, Uncached L4 */
+ MOCS_ENTRY(1, IG_PAT | XE2_L3_0_WB | L4_3_UC, 0),
+ /* Uncached L3, Cached L4 */
+ MOCS_ENTRY(2, IG_PAT | XE2_L3_3_UC | L4_0_WB, 0),
+ /* Uncached L3 + L4 */
+ MOCS_ENTRY(3, IG_PAT | XE2_L3_3_UC | L4_3_UC, 0),
+ /* Cached L3 + L4 */
+ MOCS_ENTRY(4, IG_PAT | XE2_L3_0_WB | L4_0_WB, 0),
+};
+
+static unsigned int get_mocs_settings(struct xe_device *xe,
+ struct xe_mocs_info *info)
+{
+ unsigned int flags = 0;
+
+ memset(info, 0, sizeof(struct xe_mocs_info));
+
+ switch (xe->info.platform) {
+ case XE_LUNARLAKE:
+ info->size = ARRAY_SIZE(xe2_mocs_table);
+ info->table = xe2_mocs_table;
+ info->n_entries = XE2_NUM_MOCS_ENTRIES;
+ info->uc_index = 3;
+ info->wb_index = 4;
+ info->unused_entries_index = 4;
+ break;
+ case XE_PVC:
+ info->size = ARRAY_SIZE(pvc_mocs_desc);
+ info->table = pvc_mocs_desc;
+ info->n_entries = PVC_NUM_MOCS_ENTRIES;
+ info->uc_index = 1;
+ info->wb_index = 2;
+ info->unused_entries_index = 2;
+ break;
+ case XE_METEORLAKE:
+ info->size = ARRAY_SIZE(mtl_mocs_desc);
+ info->table = mtl_mocs_desc;
+ info->n_entries = MTL_NUM_MOCS_ENTRIES;
+ info->uc_index = 9;
+ info->unused_entries_index = 1;
+ break;
+ case XE_DG2:
+ if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G10 &&
+ xe->info.step.graphics >= STEP_A0 &&
+ xe->info.step.graphics <= STEP_B0) {
+ info->size = ARRAY_SIZE(dg2_mocs_desc_g10_ax);
+ info->table = dg2_mocs_desc_g10_ax;
+ } else {
+ info->size = ARRAY_SIZE(dg2_mocs_desc);
+ info->table = dg2_mocs_desc;
+ }
+ info->uc_index = 1;
+ info->n_entries = XELP_NUM_MOCS_ENTRIES;
+ info->unused_entries_index = 3;
+ break;
+ case XE_DG1:
+ info->size = ARRAY_SIZE(dg1_mocs_desc);
+ info->table = dg1_mocs_desc;
+ info->uc_index = 1;
+ info->n_entries = XELP_NUM_MOCS_ENTRIES;
+ info->unused_entries_index = 5;
+ break;
+ case XE_TIGERLAKE:
+ case XE_ROCKETLAKE:
+ case XE_ALDERLAKE_S:
+ case XE_ALDERLAKE_P:
+ case XE_ALDERLAKE_N:
+ info->size = ARRAY_SIZE(gen12_mocs_desc);
+ info->table = gen12_mocs_desc;
+ info->n_entries = XELP_NUM_MOCS_ENTRIES;
+ info->uc_index = 3;
+ info->unused_entries_index = 2;
+ break;
+ default:
+ drm_err(&xe->drm, "Platform that should have a MOCS table does not.\n");
+ return 0;
+ }
+
+ /*
+ * Index 0 is a reserved/unused table entry on most platforms, but
+ * even on those where it does represent a legitimate MOCS entry, it
+ * never represents the "most cached, least coherent" behavior we want
+ * to populate undefined table rows with. So if unused_entries_index
+ * is still 0 at this point, we'll assume that it was omitted by
+ * mistake in the switch statement above.
+ */
+ xe_assert(xe, info->unused_entries_index != 0);
+
+ if (XE_WARN_ON(info->size > info->n_entries)) {
+ info->table = NULL;
+ return 0;
+ }
+
+ if (!IS_DGFX(xe) || GRAPHICS_VER(xe) >= 20)
+ flags |= HAS_GLOBAL_MOCS;
+ if (GRAPHICS_VER(xe) < 20)
+ flags |= HAS_LNCF_MOCS;
+
+ return flags;
+}
+
+/*
+ * Get control_value from MOCS entry. If the table entry is not defined, the
+ * settings from unused_entries_index will be returned.
+ */
+static u32 get_entry_control(const struct xe_mocs_info *info,
+ unsigned int index)
+{
+ if (index < info->size && info->table[index].used)
+ return info->table[index].control_value;
+ return info->table[info->unused_entries_index].control_value;
+}
+
+static void __init_mocs_table(struct xe_gt *gt,
+ const struct xe_mocs_info *info)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ unsigned int i;
+ u32 mocs;
+
+ mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
+ drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
+ "Unused entries index should have been defined\n");
+ for (i = 0;
+ i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
+ i++) {
+ mocs_dbg(&gt_to_xe(gt)->drm, "GLOB_MOCS[%d] 0x%x 0x%x\n", i,
+ XELP_GLOBAL_MOCS(i).addr, mocs);
+
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) > 1250)
+ xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs);
+ else
+ xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs);
+ }
+}
+
+/*
+ * Get l3cc_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is not zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
+ */
+static u16 get_entry_l3cc(const struct xe_mocs_info *info,
+ unsigned int index)
+{
+ if (index < info->size && info->table[index].used)
+ return info->table[index].l3cc_value;
+ return info->table[info->unused_entries_index].l3cc_value;
+}
+
+static u32 l3cc_combine(u16 low, u16 high)
+{
+ return low | (u32)high << 16;
+}
+
+static void init_l3cc_table(struct xe_gt *gt,
+ const struct xe_mocs_info *info)
+{
+ unsigned int i;
+ u32 l3cc;
+
+ mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
+ for (i = 0;
+ i < (info->n_entries + 1) / 2 ?
+ (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
+ get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
+ i++) {
+ mocs_dbg(&gt_to_xe(gt)->drm, "LNCFCMOCS[%d] 0x%x 0x%x\n", i, XELP_LNCFCMOCS(i).addr,
+ l3cc);
+
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
+ xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc);
+ else
+ xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc);
+ }
+}
+
+void xe_mocs_init_early(struct xe_gt *gt)
+{
+ struct xe_mocs_info table;
+
+ get_mocs_settings(gt_to_xe(gt), &table);
+ gt->mocs.uc_index = table.uc_index;
+ gt->mocs.wb_index = table.wb_index;
+}
+
+void xe_mocs_init(struct xe_gt *gt)
+{
+ struct xe_mocs_info table;
+ unsigned int flags;
+
+ /*
+ * MOCS settings are split between "GLOB_MOCS" and/or "LNCFCMOCS"
+ * registers depending on platform.
+ *
+ * These registers should be programmed before GuC initialization
+ * since their values will affect some of the memory transactions
+ * performed by the GuC.
+ */
+ flags = get_mocs_settings(gt_to_xe(gt), &table);
+ mocs_dbg(&gt_to_xe(gt)->drm, "flag:0x%x\n", flags);
+
+ if (flags & HAS_GLOBAL_MOCS)
+ __init_mocs_table(gt, &table);
+ if (flags & HAS_LNCF_MOCS)
+ init_l3cc_table(gt, &table);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_mocs.h b/drivers/gpu/drm/xe/xe_mocs.h
new file mode 100644
index 000000000..053754c5a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mocs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_MOCS_H_
+#define _XE_MOCS_H_
+
+#include <linux/types.h>
+
+struct xe_exec_queue;
+struct xe_gt;
+
+void xe_mocs_init_early(struct xe_gt *gt);
+void xe_mocs_init(struct xe_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
new file mode 100644
index 000000000..110b69864
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_module.h"
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "xe_drv.h"
+#include "xe_hw_fence.h"
+#include "xe_pci.h"
+#include "xe_sched_job.h"
+
+struct xe_modparam xe_modparam = {
+ .enable_display = true,
+ .guc_log_level = 5,
+ .force_probe = CONFIG_DRM_XE_FORCE_PROBE,
+ /* the rest are 0 by default */
+};
+
+module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
+MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
+
+module_param_named(enable_display, xe_modparam.enable_display, bool, 0444);
+MODULE_PARM_DESC(enable_display, "Enable display");
+
+module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
+
+module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
+MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
+
+module_param_named_unsafe(guc_firmware_path, xe_modparam.guc_firmware_path, charp, 0400);
+MODULE_PARM_DESC(guc_firmware_path,
+ "GuC firmware path to use instead of the default one");
+
+module_param_named_unsafe(huc_firmware_path, xe_modparam.huc_firmware_path, charp, 0400);
+MODULE_PARM_DESC(huc_firmware_path,
+ "HuC firmware path to use instead of the default one - empty string disables");
+
+module_param_named_unsafe(gsc_firmware_path, xe_modparam.gsc_firmware_path, charp, 0400);
+MODULE_PARM_DESC(gsc_firmware_path,
+ "GSC firmware path to use instead of the default one - empty string disables");
+
+module_param_named_unsafe(force_probe, xe_modparam.force_probe, charp, 0400);
+MODULE_PARM_DESC(force_probe,
+ "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details.");
+
+struct init_funcs {
+ int (*init)(void);
+ void (*exit)(void);
+};
+
+static const struct init_funcs init_funcs[] = {
+ {
+ .init = xe_hw_fence_module_init,
+ .exit = xe_hw_fence_module_exit,
+ },
+ {
+ .init = xe_sched_job_module_init,
+ .exit = xe_sched_job_module_exit,
+ },
+ {
+ .init = xe_register_pci_driver,
+ .exit = xe_unregister_pci_driver,
+ },
+};
+
+static int __init xe_init(void)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(init_funcs); i++) {
+ err = init_funcs[i].init();
+ if (err) {
+ while (i--)
+ init_funcs[i].exit();
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void __exit xe_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(init_funcs) - 1; i >= 0; i--)
+ init_funcs[i].exit();
+}
+
+module_init(xe_init);
+module_exit(xe_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
new file mode 100644
index 000000000..88ef0e8b2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_MODULE_H_
+#define _XE_MODULE_H_
+
+#include <linux/types.h>
+
+/* Module modprobe variables */
+struct xe_modparam {
+ bool force_execlist;
+ bool enable_display;
+ u32 force_vram_bar_size;
+ int guc_log_level;
+ char *guc_firmware_path;
+ char *huc_firmware_path;
+ char *gsc_firmware_path;
+ char *force_probe;
+};
+
+extern struct xe_modparam xe_modparam;
+
+#endif
+
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
new file mode 100644
index 000000000..1ff6bc79e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_pat.h"
+
+#include <drm/xe_drm.h>
+
+#include "regs/xe_reg_defs.h"
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_mcr.h"
+#include "xe_mmio.h"
+
+#define _PAT_ATS 0x47fc
+#define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \
+ 0x4800, 0x4804, \
+ 0x4848, 0x484c)
+#define _PAT_PTA 0x4820
+
+#define XE2_NO_PROMOTE REG_BIT(10)
+#define XE2_COMP_EN REG_BIT(9)
+#define XE2_L3_CLOS REG_GENMASK(7, 6)
+#define XE2_L3_POLICY REG_GENMASK(5, 4)
+#define XE2_L4_POLICY REG_GENMASK(3, 2)
+#define XE2_COH_MODE REG_GENMASK(1, 0)
+
+#define XELPG_L4_POLICY_MASK REG_GENMASK(3, 2)
+#define XELPG_PAT_3_UC REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 3)
+#define XELPG_PAT_1_WT REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 1)
+#define XELPG_PAT_0_WB REG_FIELD_PREP(XELPG_L4_POLICY_MASK, 0)
+#define XELPG_INDEX_COH_MODE_MASK REG_GENMASK(1, 0)
+#define XELPG_3_COH_2W REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 3)
+#define XELPG_2_COH_1W REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 2)
+#define XELPG_0_COH_NON REG_FIELD_PREP(XELPG_INDEX_COH_MODE_MASK, 0)
+
+#define XEHPC_CLOS_LEVEL_MASK REG_GENMASK(3, 2)
+#define XEHPC_PAT_CLOS(x) REG_FIELD_PREP(XEHPC_CLOS_LEVEL_MASK, x)
+
+#define XELP_MEM_TYPE_MASK REG_GENMASK(1, 0)
+#define XELP_PAT_WB REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 3)
+#define XELP_PAT_WT REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 2)
+#define XELP_PAT_WC REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 1)
+#define XELP_PAT_UC REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
+
+static const char *XELP_MEM_TYPE_STR_MAP[] = { "UC", "WC", "WT", "WB" };
+
+struct xe_pat_ops {
+ void (*program_graphics)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
+ int n_entries);
+ void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
+ int n_entries);
+ void (*dump)(struct xe_gt *gt, struct drm_printer *p);
+};
+
+static const struct xe_pat_table_entry xelp_pat_table[] = {
+ [0] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
+ [1] = { XELP_PAT_WC, XE_COH_NONE },
+ [2] = { XELP_PAT_WT, XE_COH_NONE },
+ [3] = { XELP_PAT_UC, XE_COH_NONE },
+};
+
+static const struct xe_pat_table_entry xehpc_pat_table[] = {
+ [0] = { XELP_PAT_UC, XE_COH_NONE },
+ [1] = { XELP_PAT_WC, XE_COH_NONE },
+ [2] = { XELP_PAT_WT, XE_COH_NONE },
+ [3] = { XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
+ [4] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WT, XE_COH_NONE },
+ [5] = { XEHPC_PAT_CLOS(1) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
+ [6] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WT, XE_COH_NONE },
+ [7] = { XEHPC_PAT_CLOS(2) | XELP_PAT_WB, XE_COH_AT_LEAST_1WAY },
+};
+
+static const struct xe_pat_table_entry xelpg_pat_table[] = {
+ [0] = { XELPG_PAT_0_WB, XE_COH_NONE },
+ [1] = { XELPG_PAT_1_WT, XE_COH_NONE },
+ [2] = { XELPG_PAT_3_UC, XE_COH_NONE },
+ [3] = { XELPG_PAT_0_WB | XELPG_2_COH_1W, XE_COH_AT_LEAST_1WAY },
+ [4] = { XELPG_PAT_0_WB | XELPG_3_COH_2W, XE_COH_AT_LEAST_1WAY },
+};
+
+/*
+ * The Xe2 table is getting large/complicated so it's easier to review if
+ * provided in a form that exactly matches the bspec's formatting. The meaning
+ * of the fields here are:
+ * - no_promote: 0=promotable, 1=no promote
+ * - comp_en: 0=disable, 1=enable
+ * - l3clos: L3 class of service (0-3)
+ * - l3_policy: 0=WB, 1=XD ("WB - Transient Display"), 3=UC
+ * - l4_policy: 0=WB, 1=WT, 3=UC
+ * - coh_mode: 0=no snoop, 2=1-way coherent, 3=2-way coherent
+ *
+ * Reserved entries should be programmed with the maximum caching, minimum
+ * coherency (which matches an all-0's encoding), so we can just omit them
+ * in the table.
+ */
+#define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
+ { \
+ .value = (no_promote ? XE2_NO_PROMOTE : 0) | \
+ (comp_en ? XE2_COMP_EN : 0) | \
+ REG_FIELD_PREP(XE2_L3_CLOS, l3clos) | \
+ REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \
+ REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \
+ REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \
+ .coh_mode = __coh_mode ? XE_COH_AT_LEAST_1WAY : XE_COH_NONE \
+ }
+
+static const struct xe_pat_table_entry xe2_pat_table[] = {
+ [ 0] = XE2_PAT( 0, 0, 0, 0, 3, 0 ),
+ [ 1] = XE2_PAT( 0, 0, 0, 0, 3, 2 ),
+ [ 2] = XE2_PAT( 0, 0, 0, 0, 3, 3 ),
+ [ 3] = XE2_PAT( 0, 0, 0, 3, 3, 0 ),
+ [ 4] = XE2_PAT( 0, 0, 0, 3, 0, 2 ),
+ [ 5] = XE2_PAT( 0, 0, 0, 3, 3, 2 ),
+ [ 6] = XE2_PAT( 1, 0, 0, 1, 3, 0 ),
+ [ 7] = XE2_PAT( 0, 0, 0, 3, 0, 3 ),
+ [ 8] = XE2_PAT( 0, 0, 0, 3, 0, 0 ),
+ [ 9] = XE2_PAT( 0, 1, 0, 0, 3, 0 ),
+ [10] = XE2_PAT( 0, 1, 0, 3, 0, 0 ),
+ [11] = XE2_PAT( 1, 1, 0, 1, 3, 0 ),
+ [12] = XE2_PAT( 0, 1, 0, 3, 3, 0 ),
+ [13] = XE2_PAT( 0, 0, 0, 0, 0, 0 ),
+ [14] = XE2_PAT( 0, 1, 0, 0, 0, 0 ),
+ [15] = XE2_PAT( 1, 1, 0, 1, 1, 0 ),
+ /* 16..19 are reserved; leave set to all 0's */
+ [20] = XE2_PAT( 0, 0, 1, 0, 3, 0 ),
+ [21] = XE2_PAT( 0, 1, 1, 0, 3, 0 ),
+ [22] = XE2_PAT( 0, 0, 1, 0, 3, 2 ),
+ [23] = XE2_PAT( 0, 0, 1, 0, 3, 3 ),
+ [24] = XE2_PAT( 0, 0, 2, 0, 3, 0 ),
+ [25] = XE2_PAT( 0, 1, 2, 0, 3, 0 ),
+ [26] = XE2_PAT( 0, 0, 2, 0, 3, 2 ),
+ [27] = XE2_PAT( 0, 0, 2, 0, 3, 3 ),
+ [28] = XE2_PAT( 0, 0, 3, 0, 3, 0 ),
+ [29] = XE2_PAT( 0, 1, 3, 0, 3, 0 ),
+ [30] = XE2_PAT( 0, 0, 3, 0, 3, 2 ),
+ [31] = XE2_PAT( 0, 0, 3, 0, 3, 3 ),
+};
+
+/* Special PAT values programmed outside the main table */
+static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
+
+u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
+{
+ WARN_ON(pat_index >= xe->pat.n_entries);
+ return xe->pat.table[pat_index].coh_mode;
+}
+
+static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
+ int n_entries)
+{
+ for (int i = 0; i < n_entries; i++) {
+ struct xe_reg reg = XE_REG(_PAT_INDEX(i));
+
+ xe_mmio_write32(gt, reg, table[i].value);
+ }
+}
+
+static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
+ int n_entries)
+{
+ for (int i = 0; i < n_entries; i++) {
+ struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
+
+ xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
+ }
+}
+
+static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int i, err;
+
+ xe_device_mem_access_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ goto err_fw;
+
+ drm_printf(p, "PAT table:\n");
+
+ for (i = 0; i < xe->pat.n_entries; i++) {
+ u32 pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
+
+ drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
+ XELP_MEM_TYPE_STR_MAP[mem_type], pat);
+ }
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+err_fw:
+ xe_assert(xe, !err);
+ xe_device_mem_access_put(xe);
+}
+
+static const struct xe_pat_ops xelp_pat_ops = {
+ .program_graphics = program_pat,
+ .dump = xelp_dump,
+};
+
+static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int i, err;
+
+ xe_device_mem_access_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ goto err_fw;
+
+ drm_printf(p, "PAT table:\n");
+
+ for (i = 0; i < xe->pat.n_entries; i++) {
+ u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
+ u8 mem_type;
+
+ mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
+
+ drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
+ XELP_MEM_TYPE_STR_MAP[mem_type], pat);
+ }
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+err_fw:
+ xe_assert(xe, !err);
+ xe_device_mem_access_put(xe);
+}
+
+static const struct xe_pat_ops xehp_pat_ops = {
+ .program_graphics = program_pat_mcr,
+ .dump = xehp_dump,
+};
+
+static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int i, err;
+
+ xe_device_mem_access_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ goto err_fw;
+
+ drm_printf(p, "PAT table:\n");
+
+ for (i = 0; i < xe->pat.n_entries; i++) {
+ u32 pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
+
+ drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", i,
+ REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat),
+ REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat);
+ }
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+err_fw:
+ xe_assert(xe, !err);
+ xe_device_mem_access_put(xe);
+}
+
+static const struct xe_pat_ops xehpc_pat_ops = {
+ .program_graphics = program_pat_mcr,
+ .dump = xehpc_dump,
+};
+
+static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int i, err;
+
+ xe_device_mem_access_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ goto err_fw;
+
+ drm_printf(p, "PAT table:\n");
+
+ for (i = 0; i < xe->pat.n_entries; i++) {
+ u32 pat;
+
+ if (xe_gt_is_media_type(gt))
+ pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ else
+ pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
+
+ drm_printf(p, "PAT[%2d] = [ %u, %u ] (%#8x)\n", i,
+ REG_FIELD_GET(XELPG_L4_POLICY_MASK, pat),
+ REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat);
+ }
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+err_fw:
+ xe_assert(xe, !err);
+ xe_device_mem_access_put(xe);
+}
+
+/*
+ * SAMedia register offsets are adjusted by the write methods and they target
+ * registers that are not MCR, while for normal GT they are MCR
+ */
+static const struct xe_pat_ops xelpg_pat_ops = {
+ .program_graphics = program_pat,
+ .program_media = program_pat_mcr,
+ .dump = xelpg_dump,
+};
+
+static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
+ int n_entries)
+{
+ program_pat_mcr(gt, table, n_entries);
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
+}
+
+static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
+ int n_entries)
+{
+ program_pat(gt, table, n_entries);
+ xe_mmio_write32(gt, XE_REG(_PAT_ATS), xe2_pat_ats.value);
+}
+
+static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int i, err;
+ u32 pat;
+
+ xe_device_mem_access_get(xe);
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (err)
+ goto err_fw;
+
+ drm_printf(p, "PAT table:\n");
+
+ for (i = 0; i < xe->pat.n_entries; i++) {
+ if (xe_gt_is_media_type(gt))
+ pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ else
+ pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
+
+ drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u, %u ] (%#8x)\n", i,
+ !!(pat & XE2_NO_PROMOTE),
+ !!(pat & XE2_COMP_EN),
+ REG_FIELD_GET(XE2_L3_CLOS, pat),
+ REG_FIELD_GET(XE2_L3_POLICY, pat),
+ REG_FIELD_GET(XE2_L4_POLICY, pat),
+ REG_FIELD_GET(XE2_COH_MODE, pat),
+ pat);
+ }
+
+ /*
+ * Also print PTA_MODE, which describes how the hardware accesses
+ * PPGTT entries.
+ */
+ if (xe_gt_is_media_type(gt))
+ pat = xe_mmio_read32(gt, XE_REG(_PAT_PTA));
+ else
+ pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
+
+ drm_printf(p, "Page Table Access:\n");
+ drm_printf(p, "PTA_MODE= [ %u, %u, %u, %u, %u, %u ] (%#8x)\n",
+ !!(pat & XE2_NO_PROMOTE),
+ !!(pat & XE2_COMP_EN),
+ REG_FIELD_GET(XE2_L3_CLOS, pat),
+ REG_FIELD_GET(XE2_L3_POLICY, pat),
+ REG_FIELD_GET(XE2_L4_POLICY, pat),
+ REG_FIELD_GET(XE2_COH_MODE, pat),
+ pat);
+
+ err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+err_fw:
+ xe_assert(xe, !err);
+ xe_device_mem_access_put(xe);
+}
+
+static const struct xe_pat_ops xe2_pat_ops = {
+ .program_graphics = xe2lpg_program_pat,
+ .program_media = xe2lpm_program_pat,
+ .dump = xe2_dump,
+};
+
+void xe_pat_init_early(struct xe_device *xe)
+{
+ if (GRAPHICS_VER(xe) == 20) {
+ xe->pat.ops = &xe2_pat_ops;
+ xe->pat.table = xe2_pat_table;
+ xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
+ xe->pat.idx[XE_CACHE_NONE] = 3;
+ xe->pat.idx[XE_CACHE_WT] = 15;
+ xe->pat.idx[XE_CACHE_WB] = 2;
+ xe->pat.idx[XE_CACHE_NONE_COMPRESSION] = 12; /*Applicable on xe2 and beyond */
+ } else if (xe->info.platform == XE_METEORLAKE) {
+ xe->pat.ops = &xelpg_pat_ops;
+ xe->pat.table = xelpg_pat_table;
+ xe->pat.n_entries = ARRAY_SIZE(xelpg_pat_table);
+ xe->pat.idx[XE_CACHE_NONE] = 2;
+ xe->pat.idx[XE_CACHE_WT] = 1;
+ xe->pat.idx[XE_CACHE_WB] = 3;
+ } else if (xe->info.platform == XE_PVC) {
+ xe->pat.ops = &xehpc_pat_ops;
+ xe->pat.table = xehpc_pat_table;
+ xe->pat.n_entries = ARRAY_SIZE(xehpc_pat_table);
+ xe->pat.idx[XE_CACHE_NONE] = 0;
+ xe->pat.idx[XE_CACHE_WT] = 2;
+ xe->pat.idx[XE_CACHE_WB] = 3;
+ } else if (xe->info.platform == XE_DG2) {
+ /*
+ * Table is the same as previous platforms, but programming
+ * method has changed.
+ */
+ xe->pat.ops = &xehp_pat_ops;
+ xe->pat.table = xelp_pat_table;
+ xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
+ xe->pat.idx[XE_CACHE_NONE] = 3;
+ xe->pat.idx[XE_CACHE_WT] = 2;
+ xe->pat.idx[XE_CACHE_WB] = 0;
+ } else if (GRAPHICS_VERx100(xe) <= 1210) {
+ WARN_ON_ONCE(!IS_DGFX(xe) && !xe->info.has_llc);
+ xe->pat.ops = &xelp_pat_ops;
+ xe->pat.table = xelp_pat_table;
+ xe->pat.n_entries = ARRAY_SIZE(xelp_pat_table);
+ xe->pat.idx[XE_CACHE_NONE] = 3;
+ xe->pat.idx[XE_CACHE_WT] = 2;
+ xe->pat.idx[XE_CACHE_WB] = 0;
+ } else {
+ /*
+ * Going forward we expect to need new PAT settings for most
+ * new platforms; failure to provide a new table can easily
+ * lead to subtle, hard-to-debug problems. If none of the
+ * conditions above match the platform we're running on we'll
+ * raise an error rather than trying to silently inherit the
+ * most recent platform's behavior.
+ */
+ drm_err(&xe->drm, "Missing PAT table for platform with graphics version %d.%02d!\n",
+ GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100);
+ }
+}
+
+void xe_pat_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (!xe->pat.ops)
+ return;
+
+ if (xe_gt_is_media_type(gt))
+ xe->pat.ops->program_media(gt, xe->pat.table, xe->pat.n_entries);
+ else
+ xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries);
+}
+
+void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (!xe->pat.ops->dump)
+ return;
+
+ xe->pat.ops->dump(gt, p);
+}
diff --git a/drivers/gpu/drm/xe/xe_pat.h b/drivers/gpu/drm/xe/xe_pat.h
new file mode 100644
index 000000000..fa0dfbe52
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pat.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_PAT_H_
+#define _XE_PAT_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_device;
+struct xe_gt;
+
+/**
+ * struct xe_pat_table_entry - The pat_index encoding and other meta information.
+ */
+struct xe_pat_table_entry {
+ /**
+ * @value: The platform specific value encoding the various memory
+ * attributes (this maps to some fixed pat_index). So things like
+ * caching, coherency, compression etc can be encoded here.
+ */
+ u32 value;
+
+ /**
+ * @coh_mode: The GPU coherency mode that @value maps to.
+ */
+#define XE_COH_NONE 1
+#define XE_COH_AT_LEAST_1WAY 2
+ u16 coh_mode;
+};
+
+/**
+ * xe_pat_init_early - SW initialization, setting up data based on device
+ * @xe: xe device
+ */
+void xe_pat_init_early(struct xe_device *xe);
+
+/**
+ * xe_pat_init - Program HW PAT table
+ * @gt: GT structure
+ */
+void xe_pat_init(struct xe_gt *gt);
+
+/**
+ * xe_pat_dump - Dump PAT table
+ * @gt: GT structure
+ * @p: Printer to dump info to
+ */
+void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p);
+
+/**
+ * xe_pat_index_get_coh_mode - Extract the coherency mode for the given
+ * pat_index.
+ * @xe: xe device
+ * @pat_index: The pat_index to query
+ */
+u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
new file mode 100644
index 000000000..dcc5ded15
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -0,0 +1,951 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_pci.h"
+
+#include <kunit/static_stub.h>
+#include <linux/device/driver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_color_mgmt.h>
+#include <drm/drm_drv.h>
+#include <drm/xe_pciids.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_device.h"
+#include "xe_display.h"
+#include "xe_drv.h"
+#include "xe_gt.h"
+#include "xe_macros.h"
+#include "xe_mmio.h"
+#include "xe_module.h"
+#include "xe_pci_types.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
+#include "xe_step.h"
+#include "xe_tile.h"
+
+enum toggle_d3cold {
+ D3COLD_DISABLE,
+ D3COLD_ENABLE,
+};
+
+struct xe_subplatform_desc {
+ enum xe_subplatform subplatform;
+ const char *name;
+ const u16 *pciidlist;
+};
+
+struct xe_gt_desc {
+ enum xe_gt_type type;
+ u32 mmio_adj_limit;
+ u32 mmio_adj_offset;
+};
+
+struct xe_device_desc {
+ /* Should only ever be set for platforms without GMD_ID */
+ const struct xe_graphics_desc *graphics;
+ /* Should only ever be set for platforms without GMD_ID */
+ const struct xe_media_desc *media;
+
+ const char *platform_name;
+ const struct xe_subplatform_desc *subplatforms;
+
+ enum xe_platform platform;
+
+ u8 require_force_probe:1;
+ u8 is_dgfx:1;
+
+ u8 has_display:1;
+ u8 has_heci_gscfi:1;
+ u8 has_llc:1;
+ u8 has_mmio_ext:1;
+ u8 has_sriov:1;
+ u8 skip_guc_pc:1;
+ u8 skip_mtcfg:1;
+ u8 skip_pcode:1;
+};
+
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
+
+#define PLATFORM(x) \
+ .platform = (x), \
+ .platform_name = #x
+
+#define NOP(x) x
+
+static const struct xe_graphics_desc graphics_xelp = {
+ .name = "Xe_LP",
+ .ver = 12,
+ .rel = 0,
+
+ .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
+
+ .dma_mask_size = 39,
+ .va_bits = 48,
+ .vm_max_level = 3,
+};
+
+static const struct xe_graphics_desc graphics_xelpp = {
+ .name = "Xe_LP+",
+ .ver = 12,
+ .rel = 10,
+
+ .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
+
+ .dma_mask_size = 39,
+ .va_bits = 48,
+ .vm_max_level = 3,
+};
+
+#define XE_HP_FEATURES \
+ .has_range_tlb_invalidation = true, \
+ .has_flat_ccs = true, \
+ .dma_mask_size = 46, \
+ .va_bits = 48, \
+ .vm_max_level = 3
+
+static const struct xe_graphics_desc graphics_xehpg = {
+ .name = "Xe_HPG",
+ .ver = 12,
+ .rel = 55,
+
+ .hw_engine_mask =
+ BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
+ BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
+ BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
+
+ XE_HP_FEATURES,
+ .vram_flags = XE_VRAM_FLAGS_NEED64K,
+};
+
+static const struct xe_graphics_desc graphics_xehpc = {
+ .name = "Xe_HPC",
+ .ver = 12,
+ .rel = 60,
+
+ .hw_engine_mask =
+ BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
+ BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
+ BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) |
+ BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) |
+ BIT(XE_HW_ENGINE_BCS8) |
+ BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
+ BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
+
+ XE_HP_FEATURES,
+ .dma_mask_size = 52,
+ .max_remote_tiles = 1,
+ .va_bits = 57,
+ .vm_max_level = 4,
+ .vram_flags = XE_VRAM_FLAGS_NEED64K,
+
+ .has_asid = 1,
+ .has_flat_ccs = 0,
+ .has_usm = 1,
+};
+
+static const struct xe_graphics_desc graphics_xelpg = {
+ .name = "Xe_LPG",
+ .hw_engine_mask =
+ BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
+ BIT(XE_HW_ENGINE_CCS0),
+
+ XE_HP_FEATURES,
+ .has_flat_ccs = 0,
+};
+
+#define XE2_GFX_FEATURES \
+ .dma_mask_size = 46, \
+ .has_asid = 1, \
+ .has_flat_ccs = 1, \
+ .has_range_tlb_invalidation = 1, \
+ .has_usm = 0 /* FIXME: implementation missing */, \
+ .va_bits = 48, \
+ .vm_max_level = 4, \
+ .hw_engine_mask = \
+ BIT(XE_HW_ENGINE_RCS0) | \
+ BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \
+ GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
+
+static const struct xe_graphics_desc graphics_xe2 = {
+ .name = "Xe2_LPG",
+
+ XE2_GFX_FEATURES,
+};
+
+static const struct xe_media_desc media_xem = {
+ .name = "Xe_M",
+ .ver = 12,
+ .rel = 0,
+
+ .hw_engine_mask =
+ BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
+ BIT(XE_HW_ENGINE_VECS0),
+};
+
+static const struct xe_media_desc media_xehpm = {
+ .name = "Xe_HPM",
+ .ver = 12,
+ .rel = 55,
+
+ .hw_engine_mask =
+ BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
+ BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_VECS1),
+};
+
+static const struct xe_media_desc media_xelpmp = {
+ .name = "Xe_LPM+",
+ .hw_engine_mask =
+ BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
+ BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_GSCCS0)
+};
+
+static const struct xe_media_desc media_xe2 = {
+ .name = "Xe2_LPM",
+ .hw_engine_mask =
+ BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VECS0), /* TODO: GSC0 */
+};
+
+static const struct xe_device_desc tgl_desc = {
+ .graphics = &graphics_xelp,
+ .media = &media_xem,
+ PLATFORM(XE_TIGERLAKE),
+ .has_display = true,
+ .has_llc = true,
+ .require_force_probe = true,
+};
+
+static const struct xe_device_desc rkl_desc = {
+ .graphics = &graphics_xelp,
+ .media = &media_xem,
+ PLATFORM(XE_ROCKETLAKE),
+ .has_display = true,
+ .has_llc = true,
+ .require_force_probe = true,
+};
+
+static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 };
+
+static const struct xe_device_desc adl_s_desc = {
+ .graphics = &graphics_xelp,
+ .media = &media_xem,
+ PLATFORM(XE_ALDERLAKE_S),
+ .has_display = true,
+ .has_llc = true,
+ .require_force_probe = true,
+ .subplatforms = (const struct xe_subplatform_desc[]) {
+ { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
+ {},
+ },
+};
+
+static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 };
+
+static const struct xe_device_desc adl_p_desc = {
+ .graphics = &graphics_xelp,
+ .media = &media_xem,
+ PLATFORM(XE_ALDERLAKE_P),
+ .has_display = true,
+ .has_llc = true,
+ .require_force_probe = true,
+ .subplatforms = (const struct xe_subplatform_desc[]) {
+ { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
+ {},
+ },
+};
+
+static const struct xe_device_desc adl_n_desc = {
+ .graphics = &graphics_xelp,
+ .media = &media_xem,
+ PLATFORM(XE_ALDERLAKE_N),
+ .has_display = true,
+ .has_llc = true,
+ .require_force_probe = true,
+};
+
+#define DGFX_FEATURES \
+ .is_dgfx = 1
+
+static const struct xe_device_desc dg1_desc = {
+ .graphics = &graphics_xelpp,
+ .media = &media_xem,
+ DGFX_FEATURES,
+ PLATFORM(XE_DG1),
+ .has_display = true,
+ .has_heci_gscfi = 1,
+ .require_force_probe = true,
+};
+
+static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 };
+static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 };
+static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 };
+
+#define DG2_FEATURES \
+ DGFX_FEATURES, \
+ PLATFORM(XE_DG2), \
+ .has_heci_gscfi = 1, \
+ .subplatforms = (const struct xe_subplatform_desc[]) { \
+ { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
+ { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \
+ { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \
+ { } \
+ }
+
+static const struct xe_device_desc ats_m_desc = {
+ .graphics = &graphics_xehpg,
+ .media = &media_xehpm,
+ .require_force_probe = true,
+
+ DG2_FEATURES,
+ .has_display = false,
+};
+
+static const struct xe_device_desc dg2_desc = {
+ .graphics = &graphics_xehpg,
+ .media = &media_xehpm,
+ .require_force_probe = true,
+
+ DG2_FEATURES,
+ .has_display = true,
+};
+
+static const __maybe_unused struct xe_device_desc pvc_desc = {
+ .graphics = &graphics_xehpc,
+ DGFX_FEATURES,
+ PLATFORM(XE_PVC),
+ .has_display = false,
+ .has_heci_gscfi = 1,
+ .require_force_probe = true,
+};
+
+static const struct xe_device_desc mtl_desc = {
+ /* .graphics and .media determined via GMD_ID */
+ .require_force_probe = true,
+ PLATFORM(XE_METEORLAKE),
+ .has_display = true,
+};
+
+static const struct xe_device_desc lnl_desc = {
+ PLATFORM(XE_LUNARLAKE),
+ .require_force_probe = true,
+};
+
+#undef PLATFORM
+__diag_pop();
+
+/* Map of GMD_ID values to graphics IP */
+static struct gmdid_map graphics_ip_map[] = {
+ { 1270, &graphics_xelpg },
+ { 1271, &graphics_xelpg },
+ { 2004, &graphics_xe2 },
+};
+
+/* Map of GMD_ID values to media IP */
+static struct gmdid_map media_ip_map[] = {
+ { 1300, &media_xelpmp },
+ { 2000, &media_xe2 },
+};
+
+#define INTEL_VGA_DEVICE(id, info) { \
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \
+ PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \
+ (unsigned long) info }
+
+/*
+ * Make sure any device matches here are from most specific to most
+ * general. For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+static const struct pci_device_id pciidlist[] = {
+ XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
+ XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
+ XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
+ XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
+ XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
+ XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
+ XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
+ XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
+ XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
+ XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+#undef INTEL_VGA_DEVICE
+
+/* is device_id present in comma separated list of ids */
+static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
+{
+ char *s, *p, *tok;
+ bool ret;
+
+ if (!devices || !*devices)
+ return false;
+
+ /* match everything */
+ if (negative && strcmp(devices, "!*") == 0)
+ return true;
+ if (!negative && strcmp(devices, "*") == 0)
+ return true;
+
+ s = kstrdup(devices, GFP_KERNEL);
+ if (!s)
+ return false;
+
+ for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
+ u16 val;
+
+ if (negative && tok[0] == '!')
+ tok++;
+ else if ((negative && tok[0] != '!') ||
+ (!negative && tok[0] == '!'))
+ continue;
+
+ if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
+ ret = true;
+ break;
+ }
+ }
+
+ kfree(s);
+
+ return ret;
+}
+
+static bool id_forced(u16 device_id)
+{
+ return device_id_in_list(device_id, xe_modparam.force_probe, false);
+}
+
+static bool id_blocked(u16 device_id)
+{
+ return device_id_in_list(device_id, xe_modparam.force_probe, true);
+}
+
+static const struct xe_subplatform_desc *
+find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc)
+{
+ const struct xe_subplatform_desc *sp;
+ const u16 *id;
+
+ for (sp = desc->subplatforms; sp && sp->subplatform; sp++)
+ for (id = sp->pciidlist; *id; id++)
+ if (*id == xe->info.devid)
+ return sp;
+
+ return NULL;
+}
+
+enum xe_gmdid_type {
+ GMDID_GRAPHICS,
+ GMDID_MEDIA
+};
+
+static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_reg gmdid_reg = GMD_ID;
+ u32 val;
+
+ KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid);
+
+ if (type == GMDID_MEDIA)
+ gmdid_reg.addr += MEDIA_GT_GSI_OFFSET;
+
+ val = xe_mmio_read32(gt, gmdid_reg);
+ *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
+ *revid = REG_FIELD_GET(GMD_ID_REVID, val);
+}
+
+/*
+ * Pre-GMD_ID platform: device descriptor already points to the appropriate
+ * graphics descriptor. Simply forward the description and calculate the version
+ * appropriately. "graphics" should be present in all such platforms, while
+ * media is optional.
+ */
+static void handle_pre_gmdid(struct xe_device *xe,
+ const struct xe_graphics_desc *graphics,
+ const struct xe_media_desc *media)
+{
+ xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel;
+
+ if (media)
+ xe->info.media_verx100 = media->ver * 100 + media->rel;
+
+}
+
+/*
+ * GMD_ID platform: read IP version from hardware and select graphics descriptor
+ * based on the result.
+ */
+static void handle_gmdid(struct xe_device *xe,
+ const struct xe_graphics_desc **graphics,
+ const struct xe_media_desc **media,
+ u32 *graphics_revid,
+ u32 *media_revid)
+{
+ u32 ver;
+
+ read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
+
+ for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
+ if (ver == graphics_ip_map[i].ver) {
+ xe->info.graphics_verx100 = ver;
+ *graphics = graphics_ip_map[i].ip;
+
+ break;
+ }
+ }
+
+ if (!xe->info.graphics_verx100) {
+ drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
+ ver / 100, ver % 100);
+ }
+
+ read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
+
+ /* Media may legitimately be fused off / not present */
+ if (ver == 0)
+ return;
+
+ for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
+ if (ver == media_ip_map[i].ver) {
+ xe->info.media_verx100 = ver;
+ *media = media_ip_map[i].ip;
+
+ break;
+ }
+ }
+
+ if (!xe->info.media_verx100) {
+ drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
+ ver / 100, ver % 100);
+ }
+}
+
+/*
+ * Initialize device info content that only depends on static driver_data
+ * passed to the driver at probe time from PCI ID table.
+ */
+static int xe_info_init_early(struct xe_device *xe,
+ const struct xe_device_desc *desc,
+ const struct xe_subplatform_desc *subplatform_desc)
+{
+ int err;
+
+ xe->info.platform = desc->platform;
+ xe->info.subplatform = subplatform_desc ?
+ subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
+
+ xe->info.is_dgfx = desc->is_dgfx;
+ xe->info.has_heci_gscfi = desc->has_heci_gscfi;
+ xe->info.has_llc = desc->has_llc;
+ xe->info.has_mmio_ext = desc->has_mmio_ext;
+ xe->info.has_sriov = desc->has_sriov;
+ xe->info.skip_guc_pc = desc->skip_guc_pc;
+ xe->info.skip_mtcfg = desc->skip_mtcfg;
+ xe->info.skip_pcode = desc->skip_pcode;
+
+ xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
+ xe_modparam.enable_display &&
+ desc->has_display;
+
+ err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Initialize device info content that does require knowledge about
+ * graphics / media IP version.
+ * Make sure that GT / tile structures allocated by the driver match the data
+ * present in device info.
+ */
+static int xe_info_init(struct xe_device *xe,
+ const struct xe_graphics_desc *graphics_desc,
+ const struct xe_media_desc *media_desc)
+{
+ u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ u8 id;
+
+ /*
+ * If this platform supports GMD_ID, we'll detect the proper IP
+ * descriptor to use from hardware registers. desc->graphics will only
+ * ever be set at this point for platforms before GMD_ID. In that case
+ * the IP descriptions and versions are simply derived from that.
+ */
+ if (graphics_desc) {
+ handle_pre_gmdid(xe, graphics_desc, media_desc);
+ xe->info.step = xe_step_pre_gmdid_get(xe);
+ } else {
+ xe_assert(xe, !media_desc);
+ handle_gmdid(xe, &graphics_desc, &media_desc,
+ &graphics_gmdid_revid, &media_gmdid_revid);
+ xe->info.step = xe_step_gmdid_get(xe,
+ graphics_gmdid_revid,
+ media_gmdid_revid);
+ }
+
+ /*
+ * If we couldn't detect the graphics IP, that's considered a fatal
+ * error and we should abort driver load. Failing to detect media
+ * IP is non-fatal; we'll just proceed without enabling media support.
+ */
+ if (!graphics_desc)
+ return -ENODEV;
+
+ xe->info.graphics_name = graphics_desc->name;
+ xe->info.media_name = media_desc ? media_desc->name : "none";
+ xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size;
+
+ xe->info.dma_mask_size = graphics_desc->dma_mask_size;
+ xe->info.vram_flags = graphics_desc->vram_flags;
+ xe->info.va_bits = graphics_desc->va_bits;
+ xe->info.vm_max_level = graphics_desc->vm_max_level;
+ xe->info.has_asid = graphics_desc->has_asid;
+ xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
+ xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
+ xe->info.has_usm = graphics_desc->has_usm;
+
+ /*
+ * All platforms have at least one primary GT. Any platform with media
+ * version 13 or higher has an additional dedicated media GT. And
+ * depending on the graphics IP there may be additional "remote tiles."
+ * All of these together determine the overall GT count.
+ *
+ * FIXME: 'tile_count' here is misnamed since the rest of the driver
+ * treats it as the number of GTs rather than just the number of tiles.
+ */
+ xe->info.tile_count = 1 + graphics_desc->max_remote_tiles;
+
+ for_each_remote_tile(tile, xe, id) {
+ int err;
+
+ err = xe_tile_init_early(tile, xe, id);
+ if (err)
+ return err;
+ }
+
+ for_each_tile(tile, xe, id) {
+ gt = tile->primary_gt;
+ gt->info.id = xe->info.gt_count++;
+ gt->info.type = XE_GT_TYPE_MAIN;
+ gt->info.__engine_mask = graphics_desc->hw_engine_mask;
+ if (MEDIA_VER(xe) < 13 && media_desc)
+ gt->info.__engine_mask |= media_desc->hw_engine_mask;
+
+ if (MEDIA_VER(xe) < 13 || !media_desc)
+ continue;
+
+ /*
+ * Allocate and setup media GT for platforms with standalone
+ * media.
+ */
+ tile->media_gt = xe_gt_alloc(tile);
+ if (IS_ERR(tile->media_gt))
+ return PTR_ERR(tile->media_gt);
+
+ gt = tile->media_gt;
+ gt->info.type = XE_GT_TYPE_MEDIA;
+ gt->info.__engine_mask = media_desc->hw_engine_mask;
+ gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
+ gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
+
+ /*
+ * FIXME: At the moment multi-tile and standalone media are
+ * mutually exclusive on current platforms. We'll need to
+ * come up with a better way to number GTs if we ever wind
+ * up with platforms that support both together.
+ */
+ drm_WARN_ON(&xe->drm, id != 0);
+ gt->info.id = xe->info.gt_count++;
+ }
+
+ return 0;
+}
+
+static void xe_pci_remove(struct pci_dev *pdev)
+{
+ struct xe_device *xe;
+
+ xe = pci_get_drvdata(pdev);
+ if (!xe) /* driver load aborted, nothing to cleanup */
+ return;
+
+ xe_device_remove(xe);
+ xe_pm_runtime_fini(xe);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct xe_device_desc *desc = (const void *)ent->driver_data;
+ const struct xe_subplatform_desc *subplatform_desc;
+ struct xe_device *xe;
+ int err;
+
+ if (desc->require_force_probe && !id_forced(pdev->device)) {
+ dev_info(&pdev->dev,
+ "Your graphics device %04x is not officially supported\n"
+ "by xe driver in this kernel version. To force Xe probe,\n"
+ "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n"
+ "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n"
+ "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n",
+ pdev->device, pdev->device, pdev->device,
+ pdev->device, pdev->device);
+ return -ENODEV;
+ }
+
+ if (id_blocked(pdev->device)) {
+ dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n",
+ pdev->vendor, pdev->device);
+ return -ENODEV;
+ }
+
+ if (xe_display_driver_probe_defer(pdev))
+ return -EPROBE_DEFER;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ xe = xe_device_create(pdev, ent);
+ if (IS_ERR(xe))
+ return PTR_ERR(xe);
+
+ pci_set_drvdata(pdev, xe);
+
+ xe_pm_assert_unbounded_bridge(xe);
+ subplatform_desc = find_subplatform(xe, desc);
+
+ pci_set_master(pdev);
+
+ err = xe_info_init_early(xe, desc, subplatform_desc);
+ if (err)
+ return err;
+
+ xe_sriov_probe_early(xe, desc->has_sriov);
+
+ err = xe_device_probe_early(xe);
+ if (err)
+ return err;
+
+ err = xe_info_init(xe, desc->graphics, desc->media);
+ if (err)
+ return err;
+
+ xe_display_probe(xe);
+
+ drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d",
+ desc->platform_name,
+ subplatform_desc ? subplatform_desc->name : "",
+ xe->info.devid, xe->info.revid,
+ xe->info.is_dgfx,
+ xe->info.graphics_name,
+ xe->info.graphics_verx100 / 100,
+ xe->info.graphics_verx100 % 100,
+ xe->info.media_name,
+ xe->info.media_verx100 / 100,
+ xe->info.media_verx100 % 100,
+ str_yes_no(xe->info.enable_display),
+ xe->info.dma_mask_size, xe->info.tile_count,
+ xe->info.has_heci_gscfi);
+
+ drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n",
+ xe_step_name(xe->info.step.graphics),
+ xe_step_name(xe->info.step.media),
+ xe_step_name(xe->info.step.display),
+ xe_step_name(xe->info.step.basedie));
+
+ drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n",
+ str_yes_no(xe_device_has_sriov(xe)),
+ xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+
+ err = xe_device_probe(xe);
+ if (err)
+ return err;
+
+ xe_pm_init(xe);
+
+ drm_dbg(&xe->drm, "d3cold: capable=%s\n",
+ str_yes_no(xe->d3cold.capable));
+
+ return 0;
+}
+
+static void xe_pci_shutdown(struct pci_dev *pdev)
+{
+ xe_device_shutdown(pdev_to_xe_device(pdev));
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct pci_dev *root_pdev;
+
+ if (!xe->d3cold.capable)
+ return;
+
+ root_pdev = pcie_find_root_port(pdev);
+ if (!root_pdev)
+ return;
+
+ switch (toggle) {
+ case D3COLD_DISABLE:
+ pci_d3cold_disable(root_pdev);
+ break;
+ case D3COLD_ENABLE:
+ pci_d3cold_enable(root_pdev);
+ break;
+ }
+}
+
+static int xe_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int err;
+
+ err = xe_pm_suspend(pdev_to_xe_device(pdev));
+ if (err)
+ return err;
+
+ /*
+ * Enabling D3Cold is needed for S2Idle/S0ix.
+ * It is save to allow here since xe_pm_suspend has evicted
+ * the local memory and the direct complete optimization is disabled.
+ */
+ d3cold_toggle(pdev, D3COLD_ENABLE);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+static int xe_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int err;
+
+ /* Give back the D3Cold decision to the runtime P M*/
+ d3cold_toggle(pdev, D3COLD_DISABLE);
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+
+ err = xe_pm_resume(pdev_to_xe_device(pdev));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int xe_pci_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ int err;
+
+ err = xe_pm_runtime_suspend(xe);
+ if (err)
+ return err;
+
+ pci_save_state(pdev);
+
+ if (xe->d3cold.allowed) {
+ d3cold_toggle(pdev, D3COLD_ENABLE);
+ pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ } else {
+ d3cold_toggle(pdev, D3COLD_DISABLE);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
+static int xe_pci_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ int err;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ if (xe->d3cold.allowed) {
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+ }
+
+ return xe_pm_runtime_resume(xe);
+}
+
+static int xe_pci_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+
+ xe_pm_d3cold_allowed_toggle(xe);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xe_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume)
+ SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle)
+};
+#endif
+
+static struct pci_driver xe_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = xe_pci_probe,
+ .remove = xe_pci_remove,
+ .shutdown = xe_pci_shutdown,
+#ifdef CONFIG_PM_SLEEP
+ .driver.pm = &xe_pm_ops,
+#endif
+};
+
+int xe_register_pci_driver(void)
+{
+ return pci_register_driver(&xe_pci_driver);
+}
+
+void xe_unregister_pci_driver(void)
+{
+ pci_unregister_driver(&xe_pci_driver);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_pci.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pci.h b/drivers/gpu/drm/xe/xe_pci.h
new file mode 100644
index 000000000..611c1209b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pci.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_PCI_H_
+#define _XE_PCI_H_
+
+int xe_register_pci_driver(void);
+void xe_unregister_pci_driver(void);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
new file mode 100644
index 000000000..b1ad12fa2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_PCI_TYPES_H_
+#define _XE_PCI_TYPES_H_
+
+#include <linux/types.h>
+
+struct xe_graphics_desc {
+ const char *name;
+ u8 ver;
+ u8 rel;
+
+ u8 dma_mask_size; /* available DMA address bits */
+ u8 va_bits;
+ u8 vm_max_level;
+ u8 vram_flags;
+
+ u64 hw_engine_mask; /* hardware engines provided by graphics IP */
+
+ u32 tile_mmio_ext_size; /* size of MMIO extension space, per-tile */
+
+ u8 max_remote_tiles:2;
+
+ u8 has_asid:1;
+ u8 has_flat_ccs:1;
+ u8 has_range_tlb_invalidation:1;
+ u8 has_usm:1;
+};
+
+struct xe_media_desc {
+ const char *name;
+ u8 ver;
+ u8 rel;
+
+ u64 hw_engine_mask; /* hardware engines provided by media IP */
+};
+
+struct gmdid_map {
+ unsigned int ver;
+ const void *ip;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
new file mode 100644
index 000000000..b324dc2a5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_pcode.h"
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+
+#include <drm/drm_managed.h>
+
+#include "xe_gt.h"
+#include "xe_mmio.h"
+#include "xe_pcode_api.h"
+
+/**
+ * DOC: PCODE
+ *
+ * Xe PCODE is the component responsible for interfacing with the PCODE
+ * firmware.
+ * It shall provide a very simple ABI to other Xe components, but be the
+ * single and consolidated place that will communicate with PCODE. All read
+ * and write operations to PCODE will be internal and private to this component.
+ *
+ * What's next:
+ * - PCODE hw metrics
+ * - PCODE for display operations
+ */
+
+static int pcode_mailbox_status(struct xe_gt *gt)
+{
+ u32 err;
+ static const struct pcode_err_decode err_decode[] = {
+ [PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"},
+ [PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"},
+ [PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"},
+ [PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"},
+ [PCODE_LOCKED] = {-EBUSY, "PCODE Locked"},
+ [PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW,
+ "GT ratio out of range"},
+ [PCODE_REJECTED] = {-EACCES, "PCODE Rejected"},
+ [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
+ };
+
+ lockdep_assert_held(&gt->pcode.lock);
+
+ err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
+ if (err) {
+ drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
+ err_decode[err].str ?: "Unknown");
+ return err_decode[err].errno ?: -EPROTO;
+ }
+
+ return 0;
+}
+
+static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+ unsigned int timeout_ms, bool return_data,
+ bool atomic)
+{
+ int err;
+
+ if (gt_to_xe(gt)->info.skip_pcode)
+ return 0;
+
+ lockdep_assert_held(&gt->pcode.lock);
+
+ if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
+ return -EAGAIN;
+
+ xe_mmio_write32(gt, PCODE_DATA0, *data0);
+ xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0);
+ xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
+
+ err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
+ timeout_ms * 1000, NULL, atomic);
+ if (err)
+ return err;
+
+ if (return_data) {
+ *data0 = xe_mmio_read32(gt, PCODE_DATA0);
+ if (data1)
+ *data1 = xe_mmio_read32(gt, PCODE_DATA1);
+ }
+
+ return pcode_mailbox_status(gt);
+}
+
+int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
+{
+ int err;
+
+ mutex_lock(&gt->pcode.lock);
+ err = pcode_mailbox_rw(gt, mbox, &data, NULL, timeout, false, false);
+ mutex_unlock(&gt->pcode.lock);
+
+ return err;
+}
+
+int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
+{
+ int err;
+
+ mutex_lock(&gt->pcode.lock);
+ err = pcode_mailbox_rw(gt, mbox, val, val1, 1, true, false);
+ mutex_unlock(&gt->pcode.lock);
+
+ return err;
+}
+
+static int xe_pcode_try_request(struct xe_gt *gt, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status, bool atomic, int timeout_us)
+{
+ int slept, wait = 10;
+
+ for (slept = 0; slept < timeout_us; slept += wait) {
+ *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ atomic);
+ if ((*status == 0) && ((request & reply_mask) == reply))
+ return 0;
+
+ if (atomic)
+ udelay(wait);
+ else
+ usleep_range(wait, wait << 1);
+ wait <<= 1;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xe_pcode_request - send PCODE request until acknowledgment
+ * @gt: gt
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 50 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ u32 status;
+ int ret;
+
+ mutex_lock(&gt->pcode.lock);
+
+ ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ false, timeout_base_ms * 1000);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
+ * account for interrupts that could reduce the number of these
+ * requests, and for any quirks of the PCODE firmware that delays
+ * the request completion.
+ */
+ drm_err(&gt_to_xe(gt)->drm,
+ "PCODE timeout, retrying with preemption disabled\n");
+ drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
+ preempt_disable();
+ ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ true, timeout_base_ms * 1000);
+ preempt_enable();
+
+out:
+ mutex_unlock(&gt->pcode.lock);
+ return status ? status : ret;
+}
+/**
+ * xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
+ * @gt: gt instance
+ * @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
+ * @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
+ *
+ * This function initialize PCODE's QOS frequency table for a proper minimal
+ * frequency/power steering decision, depending on the current requested GT
+ * frequency. For older platforms this was a more complete table including
+ * the IA freq. However for the latest platforms this table become a simple
+ * 1-1 Ring vs GT frequency. Even though, without setting it, PCODE might
+ * not take the right decisions for some memory frequencies and affect latency.
+ *
+ * It returns 0 on success, and -ERROR number on failure, -EINVAL if max
+ * frequency is higher then the minimal, and other errors directly translated
+ * from the PCODE Error returs:
+ * - -ENXIO: "Illegal Command"
+ * - -ETIMEDOUT: "Timed out"
+ * - -EINVAL: "Illegal Data"
+ * - -ENXIO, "Illegal Subcommand"
+ * - -EBUSY: "PCODE Locked"
+ * - -EOVERFLOW, "GT ratio out of range"
+ * - -EACCES, "PCODE Rejected"
+ * - -EPROTO, "Unknown"
+ */
+int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
+ u32 max_gt_freq)
+{
+ int ret;
+ u32 freq;
+
+ if (!gt_to_xe(gt)->info.has_llc)
+ return 0;
+
+ if (max_gt_freq <= min_gt_freq)
+ return -EINVAL;
+
+ mutex_lock(&gt->pcode.lock);
+ for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
+ u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
+
+ ret = pcode_mailbox_rw(gt, PCODE_WRITE_MIN_FREQ_TABLE,
+ &data, NULL, 1, false, false);
+ if (ret)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&gt->pcode.lock);
+ return ret;
+}
+
+/**
+ * xe_pcode_init - Ensure PCODE is initialized
+ * @gt: gt instance
+ *
+ * This function ensures that PCODE is properly initialized. To be called during
+ * probe and resume paths.
+ *
+ * It returns 0 on success, and -error number on failure.
+ */
+int xe_pcode_init(struct xe_gt *gt)
+{
+ u32 status, request = DGFX_GET_INIT_STATUS;
+ int timeout_us = 180000000; /* 3 min */
+ int ret;
+
+ if (gt_to_xe(gt)->info.skip_pcode)
+ return 0;
+
+ if (!IS_DGFX(gt_to_xe(gt)))
+ return 0;
+
+ mutex_lock(&gt->pcode.lock);
+ ret = xe_pcode_try_request(gt, DGFX_PCODE_STATUS, request,
+ DGFX_INIT_STATUS_COMPLETE,
+ DGFX_INIT_STATUS_COMPLETE,
+ &status, false, timeout_us);
+ mutex_unlock(&gt->pcode.lock);
+
+ if (ret)
+ drm_err(&gt_to_xe(gt)->drm,
+ "PCODE initialization timedout after: 3 min\n");
+
+ return ret;
+}
+
+/**
+ * xe_pcode_probe - Prepare xe_pcode and also ensure PCODE is initialized.
+ * @gt: gt instance
+ *
+ * This function initializes the xe_pcode component, and when needed, it ensures
+ * that PCODE has properly performed its initialization and it is really ready
+ * to go. To be called once only during probe.
+ *
+ * It returns 0 on success, and -error number on failure.
+ */
+int xe_pcode_probe(struct xe_gt *gt)
+{
+ drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
+
+ if (gt_to_xe(gt)->info.skip_pcode)
+ return 0;
+
+ if (!IS_DGFX(gt_to_xe(gt)))
+ return 0;
+
+ return xe_pcode_init(gt);
+}
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
new file mode 100644
index 000000000..08cb1d047
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PCODE_H_
+#define _XE_PCODE_H_
+
+#include <linux/types.h>
+struct xe_gt;
+
+int xe_pcode_probe(struct xe_gt *gt);
+int xe_pcode_init(struct xe_gt *gt);
+int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
+ u32 max_gt_freq);
+int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1);
+int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 val,
+ int timeout_ms);
+#define xe_pcode_write(gt, mbox, val) \
+ xe_pcode_write_timeout(gt, mbox, val, 1)
+
+int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_ms);
+
+#define PCODE_MBOX(mbcmd, param1, param2)\
+ (FIELD_PREP(PCODE_MB_COMMAND, mbcmd)\
+ | FIELD_PREP(PCODE_MB_PARAM1, param1)\
+ | FIELD_PREP(PCODE_MB_PARAM2, param2))
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
new file mode 100644
index 000000000..5935cfe30
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+/* Internal to xe_pcode */
+
+#include "regs/xe_reg_defs.h"
+
+#define PCODE_MAILBOX XE_REG(0x138124)
+#define PCODE_READY REG_BIT(31)
+#define PCODE_MB_PARAM2 REG_GENMASK(23, 16)
+#define PCODE_MB_PARAM1 REG_GENMASK(15, 8)
+#define PCODE_MB_COMMAND REG_GENMASK(7, 0)
+#define PCODE_ERROR_MASK 0xFF
+#define PCODE_SUCCESS 0x0
+#define PCODE_ILLEGAL_CMD 0x1
+#define PCODE_TIMEOUT 0x2
+#define PCODE_ILLEGAL_DATA 0x3
+#define PCODE_ILLEGAL_SUBCOMMAND 0x4
+#define PCODE_LOCKED 0x6
+#define PCODE_GT_RATIO_OUT_OF_RANGE 0x10
+#define PCODE_REJECTED 0x11
+
+#define PCODE_DATA0 XE_REG(0x138128)
+#define PCODE_DATA1 XE_REG(0x13812C)
+
+/* Min Freq QOS Table */
+#define PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define PCODE_READ_MIN_FREQ_TABLE 0x9
+#define PCODE_FREQ_RING_RATIO_SHIFT 16
+
+/* PCODE Init */
+#define DGFX_PCODE_STATUS 0x7E
+#define DGFX_GET_INIT_STATUS 0x0
+#define DGFX_INIT_STATUS_COMPLETE 0x1
+
+#define PCODE_POWER_SETUP 0x7C
+#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
+#define POWER_SETUP_SUBCOMMAND_WRITE_I1 0x5
+#define POWER_SETUP_I1_WATTS REG_BIT(31)
+#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
+#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+
+struct pcode_err_decode {
+ int errno;
+ const char *str;
+};
+
diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h
new file mode 100644
index 000000000..553f53dbd
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_platform_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PLATFORM_INFO_TYPES_H_
+#define _XE_PLATFORM_INFO_TYPES_H_
+
+/*
+ * Keep this in graphics version based order and chronological order within a
+ * version
+ */
+enum xe_platform {
+ XE_PLATFORM_UNINITIALIZED = 0,
+ XE_TIGERLAKE,
+ XE_ROCKETLAKE,
+ XE_ALDERLAKE_S,
+ XE_ALDERLAKE_P,
+ XE_ALDERLAKE_N,
+ XE_DG1,
+ XE_DG2,
+ XE_PVC,
+ XE_METEORLAKE,
+ XE_LUNARLAKE,
+};
+
+enum xe_subplatform {
+ XE_SUBPLATFORM_UNINITIALIZED = 0,
+ XE_SUBPLATFORM_NONE,
+ XE_SUBPLATFORM_ALDERLAKE_P_RPLU,
+ XE_SUBPLATFORM_ALDERLAKE_S_RPLS,
+ XE_SUBPLATFORM_DG2_G10,
+ XE_SUBPLATFORM_DG2_G11,
+ XE_SUBPLATFORM_DG2_G12,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
new file mode 100644
index 000000000..b429c2876
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_pm.h"
+
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_managed.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "xe_bo.h"
+#include "xe_bo_evict.h"
+#include "xe_device.h"
+#include "xe_device_sysfs.h"
+#include "xe_display.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_guc.h"
+#include "xe_irq.h"
+#include "xe_pcode.h"
+#include "xe_wa.h"
+
+/**
+ * DOC: Xe Power Management
+ *
+ * Xe PM shall be guided by the simplicity.
+ * Use the simplest hook options whenever possible.
+ * Let's not reinvent the runtime_pm references and hooks.
+ * Shall have a clear separation of display and gt underneath this component.
+ *
+ * What's next:
+ *
+ * For now s2idle and s3 are only working in integrated devices. The next step
+ * is to iterate through all VRAM's BO backing them up into the system memory
+ * before allowing the system suspend.
+ *
+ * Also runtime_pm needs to be here from the beginning.
+ *
+ * RC6/RPS are also critical PM features. Let's start with GuCRC and GuC SLPC
+ * and no wait boost. Frequency optimizations should come on a next stage.
+ */
+
+/**
+ * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
+ * @xe: xe device instance
+ *
+ * Return: 0 on success
+ */
+int xe_pm_suspend(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ u8 id;
+ int err;
+
+ for_each_gt(gt, xe, id)
+ xe_gt_suspend_prepare(gt);
+
+ /* FIXME: Super racey... */
+ err = xe_bo_evict_all(xe);
+ if (err)
+ return err;
+
+ xe_display_pm_suspend(xe);
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_suspend(gt);
+ if (err) {
+ xe_display_pm_resume(xe);
+ return err;
+ }
+ }
+
+ xe_irq_suspend(xe);
+
+ xe_display_pm_suspend_late(xe);
+
+ return 0;
+}
+
+/**
+ * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
+ * @xe: xe device instance
+ *
+ * Return: 0 on success
+ */
+int xe_pm_resume(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ u8 id;
+ int err;
+
+ for_each_tile(tile, xe, id)
+ xe_wa_apply_tile_workarounds(tile);
+
+ for_each_gt(gt, xe, id) {
+ err = xe_pcode_init(gt);
+ if (err)
+ return err;
+ }
+
+ xe_display_pm_resume_early(xe);
+
+ /*
+ * This only restores pinned memory which is the memory required for the
+ * GT(s) to resume.
+ */
+ err = xe_bo_restore_kernel(xe);
+ if (err)
+ return err;
+
+ xe_irq_resume(xe);
+
+ xe_display_pm_resume(xe);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_resume(gt);
+
+ err = xe_bo_restore_user(xe);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static bool xe_pm_pci_d3cold_capable(struct pci_dev *pdev)
+{
+ struct pci_dev *root_pdev;
+
+ root_pdev = pcie_find_root_port(pdev);
+ if (!root_pdev)
+ return false;
+
+ /* D3Cold requires PME capability and _PR3 power resource */
+ if (!pci_pme_capable(root_pdev, PCI_D3cold) || !pci_pr3_present(root_pdev))
+ return false;
+
+ return true;
+}
+
+static void xe_pm_runtime_init(struct xe_device *xe)
+{
+ struct device *dev = xe->drm.dev;
+
+ /*
+ * Disable the system suspend direct complete optimization.
+ * We need to ensure that the regular device suspend/resume functions
+ * are called since our runtime_pm cannot guarantee local memory
+ * eviction for d3cold.
+ * TODO: Check HDA audio dependencies claimed by i915, and then enforce
+ * this option to integrated graphics as well.
+ */
+ if (IS_DGFX(xe))
+ dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_set_active(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put(dev);
+}
+
+void xe_pm_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ /* For now suspend/resume is only allowed with GuC */
+ if (!xe_device_uc_enabled(xe))
+ return;
+
+ drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
+
+ xe->d3cold.capable = xe_pm_pci_d3cold_capable(pdev);
+
+ if (xe->d3cold.capable) {
+ xe_device_sysfs_init(xe);
+ xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ }
+
+ xe_pm_runtime_init(xe);
+}
+
+void xe_pm_runtime_fini(struct xe_device *xe)
+{
+ struct device *dev = xe->drm.dev;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_forbid(dev);
+}
+
+static void xe_pm_write_callback_task(struct xe_device *xe,
+ struct task_struct *task)
+{
+ WRITE_ONCE(xe->pm_callback_task, task);
+
+ /*
+ * Just in case it's somehow possible for our writes to be reordered to
+ * the extent that something else re-uses the task written in
+ * pm_callback_task. For example after returning from the callback, but
+ * before the reordered write that resets pm_callback_task back to NULL.
+ */
+ smp_mb(); /* pairs with xe_pm_read_callback_task */
+}
+
+struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
+{
+ smp_mb(); /* pairs with xe_pm_write_callback_task */
+
+ return READ_ONCE(xe->pm_callback_task);
+}
+
+int xe_pm_runtime_suspend(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ u8 id;
+ int err = 0;
+
+ if (xe->d3cold.allowed && xe_device_mem_access_ongoing(xe))
+ return -EBUSY;
+
+ /* Disable access_ongoing asserts and prevent recursive pm calls */
+ xe_pm_write_callback_task(xe, current);
+
+ /*
+ * The actual xe_device_mem_access_put() is always async underneath, so
+ * exactly where that is called should makes no difference to us. However
+ * we still need to be very careful with the locks that this callback
+ * acquires and the locks that are acquired and held by any callers of
+ * xe_device_mem_access_get(). We already have the matching annotation
+ * on that side, but we also need it here. For example lockdep should be
+ * able to tell us if the following scenario is in theory possible:
+ *
+ * CPU0 | CPU1 (kworker)
+ * lock(A) |
+ * | xe_pm_runtime_suspend()
+ * | lock(A)
+ * xe_device_mem_access_get() |
+ *
+ * This will clearly deadlock since rpm core needs to wait for
+ * xe_pm_runtime_suspend() to complete, but here we are holding lock(A)
+ * on CPU0 which prevents CPU1 making forward progress. With the
+ * annotation here and in xe_device_mem_access_get() lockdep will see
+ * the potential lock inversion and give us a nice splat.
+ */
+ lock_map_acquire(&xe_device_mem_access_lockdep_map);
+
+ if (xe->d3cold.allowed) {
+ err = xe_bo_evict_all(xe);
+ if (err)
+ goto out;
+ }
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_suspend(gt);
+ if (err)
+ goto out;
+ }
+
+ xe_irq_suspend(xe);
+out:
+ lock_map_release(&xe_device_mem_access_lockdep_map);
+ xe_pm_write_callback_task(xe, NULL);
+ return err;
+}
+
+int xe_pm_runtime_resume(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ u8 id;
+ int err = 0;
+
+ /* Disable access_ongoing asserts and prevent recursive pm calls */
+ xe_pm_write_callback_task(xe, current);
+
+ lock_map_acquire(&xe_device_mem_access_lockdep_map);
+
+ /*
+ * It can be possible that xe has allowed d3cold but other pcie devices
+ * in gfx card soc would have blocked d3cold, therefore card has not
+ * really lost power. Detecting primary Gt power is sufficient.
+ */
+ gt = xe_device_get_gt(xe, 0);
+ xe->d3cold.power_lost = xe_guc_in_reset(&gt->uc.guc);
+
+ if (xe->d3cold.allowed && xe->d3cold.power_lost) {
+ for_each_gt(gt, xe, id) {
+ err = xe_pcode_init(gt);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * This only restores pinned memory which is the memory
+ * required for the GT(s) to resume.
+ */
+ err = xe_bo_restore_kernel(xe);
+ if (err)
+ goto out;
+ }
+
+ xe_irq_resume(xe);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_resume(gt);
+
+ if (xe->d3cold.allowed && xe->d3cold.power_lost) {
+ err = xe_bo_restore_user(xe);
+ if (err)
+ goto out;
+ }
+out:
+ lock_map_release(&xe_device_mem_access_lockdep_map);
+ xe_pm_write_callback_task(xe, NULL);
+ return err;
+}
+
+int xe_pm_runtime_get(struct xe_device *xe)
+{
+ return pm_runtime_get_sync(xe->drm.dev);
+}
+
+int xe_pm_runtime_put(struct xe_device *xe)
+{
+ pm_runtime_mark_last_busy(xe->drm.dev);
+ return pm_runtime_put(xe->drm.dev);
+}
+
+int xe_pm_runtime_get_if_active(struct xe_device *xe)
+{
+ return pm_runtime_get_if_active(xe->drm.dev, true);
+}
+
+void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
+
+ if (!bridge)
+ return;
+
+ if (!bridge->driver) {
+ drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
+ device_set_pm_not_required(&pdev->dev);
+ }
+}
+
+int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
+{
+ struct ttm_resource_manager *man;
+ u32 vram_total_mb = 0;
+ int i;
+
+ for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
+ man = ttm_manager_type(&xe->ttm, i);
+ if (man)
+ vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
+ }
+
+ drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
+
+ if (threshold > vram_total_mb)
+ return -EINVAL;
+
+ mutex_lock(&xe->d3cold.lock);
+ xe->d3cold.vram_threshold = threshold;
+ mutex_unlock(&xe->d3cold.lock);
+
+ return 0;
+}
+
+void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
+{
+ struct ttm_resource_manager *man;
+ u32 total_vram_used_mb = 0;
+ u64 vram_used;
+ int i;
+
+ if (!xe->d3cold.capable) {
+ xe->d3cold.allowed = false;
+ return;
+ }
+
+ for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
+ man = ttm_manager_type(&xe->ttm, i);
+ if (man) {
+ vram_used = ttm_resource_manager_usage(man);
+ total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
+ }
+ }
+
+ mutex_lock(&xe->d3cold.lock);
+
+ if (total_vram_used_mb < xe->d3cold.vram_threshold)
+ xe->d3cold.allowed = true;
+ else
+ xe->d3cold.allowed = false;
+
+ mutex_unlock(&xe->d3cold.lock);
+
+ drm_dbg(&xe->drm,
+ "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
+}
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
new file mode 100644
index 000000000..6b9031f7a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PM_H_
+#define _XE_PM_H_
+
+#include <linux/pm_runtime.h>
+
+/*
+ * TODO: Threshold = 0 will block D3Cold.
+ * Before we can move this to a higher value (like 300), we need to:
+ * 1. rewrite the VRAM save / restore to avoid buffer object locks
+ */
+#define DEFAULT_VRAM_THRESHOLD 0 /* in MB */
+
+struct xe_device;
+
+int xe_pm_suspend(struct xe_device *xe);
+int xe_pm_resume(struct xe_device *xe);
+
+void xe_pm_init(struct xe_device *xe);
+void xe_pm_runtime_fini(struct xe_device *xe);
+int xe_pm_runtime_suspend(struct xe_device *xe);
+int xe_pm_runtime_resume(struct xe_device *xe);
+int xe_pm_runtime_get(struct xe_device *xe);
+int xe_pm_runtime_put(struct xe_device *xe);
+int xe_pm_runtime_get_if_active(struct xe_device *xe);
+void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
+int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
+void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
+struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
new file mode 100644
index 000000000..7bce2a332
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_preempt_fence.h"
+
+#include <linux/slab.h>
+
+#include "xe_exec_queue.h"
+#include "xe_vm.h"
+
+static void preempt_fence_work_func(struct work_struct *w)
+{
+ bool cookie = dma_fence_begin_signalling();
+ struct xe_preempt_fence *pfence =
+ container_of(w, typeof(*pfence), preempt_work);
+ struct xe_exec_queue *q = pfence->q;
+
+ if (pfence->error)
+ dma_fence_set_error(&pfence->base, pfence->error);
+ else
+ q->ops->suspend_wait(q);
+
+ dma_fence_signal(&pfence->base);
+ dma_fence_end_signalling(cookie);
+
+ xe_vm_queue_rebind_worker(q->vm);
+
+ xe_exec_queue_put(q);
+}
+
+static const char *
+preempt_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "xe";
+}
+
+static const char *
+preempt_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "preempt";
+}
+
+static bool preempt_fence_enable_signaling(struct dma_fence *fence)
+{
+ struct xe_preempt_fence *pfence =
+ container_of(fence, typeof(*pfence), base);
+ struct xe_exec_queue *q = pfence->q;
+
+ pfence->error = q->ops->suspend(q);
+ queue_work(system_unbound_wq, &pfence->preempt_work);
+ return true;
+}
+
+static const struct dma_fence_ops preempt_fence_ops = {
+ .get_driver_name = preempt_fence_get_driver_name,
+ .get_timeline_name = preempt_fence_get_timeline_name,
+ .enable_signaling = preempt_fence_enable_signaling,
+};
+
+/**
+ * xe_preempt_fence_alloc() - Allocate a preempt fence with minimal
+ * initialization
+ *
+ * Allocate a preempt fence, and initialize its list head.
+ * If the preempt_fence allocated has been armed with
+ * xe_preempt_fence_arm(), it must be freed using dma_fence_put(). If not,
+ * it must be freed using xe_preempt_fence_free().
+ *
+ * Return: A struct xe_preempt_fence pointer used for calling into
+ * xe_preempt_fence_arm() or xe_preempt_fence_free().
+ * An error pointer on error.
+ */
+struct xe_preempt_fence *xe_preempt_fence_alloc(void)
+{
+ struct xe_preempt_fence *pfence;
+
+ pfence = kmalloc(sizeof(*pfence), GFP_KERNEL);
+ if (!pfence)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&pfence->link);
+ INIT_WORK(&pfence->preempt_work, preempt_fence_work_func);
+
+ return pfence;
+}
+
+/**
+ * xe_preempt_fence_free() - Free a preempt fence allocated using
+ * xe_preempt_fence_alloc().
+ * @pfence: pointer obtained from xe_preempt_fence_alloc();
+ *
+ * Free a preempt fence that has not yet been armed.
+ */
+void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
+{
+ list_del(&pfence->link);
+ kfree(pfence);
+}
+
+/**
+ * xe_preempt_fence_arm() - Arm a preempt fence allocated using
+ * xe_preempt_fence_alloc().
+ * @pfence: The struct xe_preempt_fence pointer returned from
+ * xe_preempt_fence_alloc().
+ * @q: The struct xe_exec_queue used for arming.
+ * @context: The dma-fence context used for arming.
+ * @seqno: The dma-fence seqno used for arming.
+ *
+ * Inserts the preempt fence into @context's timeline, takes @link off any
+ * list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
+ *
+ * Return: A pointer to a struct dma_fence embedded into the preempt fence.
+ * This function doesn't error.
+ */
+struct dma_fence *
+xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
+ u64 context, u32 seqno)
+{
+ list_del_init(&pfence->link);
+ pfence->q = xe_exec_queue_get(q);
+ dma_fence_init(&pfence->base, &preempt_fence_ops,
+ &q->compute.lock, context, seqno);
+
+ return &pfence->base;
+}
+
+/**
+ * xe_preempt_fence_create() - Helper to create and arm a preempt fence.
+ * @q: The struct xe_exec_queue used for arming.
+ * @context: The dma-fence context used for arming.
+ * @seqno: The dma-fence seqno used for arming.
+ *
+ * Allocates and inserts the preempt fence into @context's timeline,
+ * and registers @e as the struct xe_exec_queue to be preempted.
+ *
+ * Return: A pointer to the resulting struct dma_fence on success. An error
+ * pointer on error. In particular if allocation fails it returns
+ * ERR_PTR(-ENOMEM);
+ */
+struct dma_fence *
+xe_preempt_fence_create(struct xe_exec_queue *q,
+ u64 context, u32 seqno)
+{
+ struct xe_preempt_fence *pfence;
+
+ pfence = xe_preempt_fence_alloc();
+ if (IS_ERR(pfence))
+ return ERR_CAST(pfence);
+
+ return xe_preempt_fence_arm(pfence, q, context, seqno);
+}
+
+bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
+{
+ return fence->ops == &preempt_fence_ops;
+}
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.h b/drivers/gpu/drm/xe/xe_preempt_fence.h
new file mode 100644
index 000000000..9406c6fea
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PREEMPT_FENCE_H_
+#define _XE_PREEMPT_FENCE_H_
+
+#include "xe_preempt_fence_types.h"
+
+struct list_head;
+
+struct dma_fence *
+xe_preempt_fence_create(struct xe_exec_queue *q,
+ u64 context, u32 seqno);
+
+struct xe_preempt_fence *xe_preempt_fence_alloc(void);
+
+void xe_preempt_fence_free(struct xe_preempt_fence *pfence);
+
+struct dma_fence *
+xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
+ u64 context, u32 seqno);
+
+static inline struct xe_preempt_fence *
+to_preempt_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct xe_preempt_fence, base);
+}
+
+/**
+ * xe_preempt_fence_link() - Return a link used to keep unarmed preempt
+ * fences on a list.
+ * @pfence: Pointer to the preempt fence.
+ *
+ * The link is embedded in the struct xe_preempt_fence. Use
+ * link_to_preempt_fence() to convert back to the preempt fence.
+ *
+ * Return: A pointer to an embedded struct list_head.
+ */
+static inline struct list_head *
+xe_preempt_fence_link(struct xe_preempt_fence *pfence)
+{
+ return &pfence->link;
+}
+
+/**
+ * to_preempt_fence_from_link() - Convert back to a preempt fence pointer
+ * from a link obtained with xe_preempt_fence_link().
+ * @link: The struct list_head obtained from xe_preempt_fence_link().
+ *
+ * Return: A pointer to the embedding struct xe_preempt_fence.
+ */
+static inline struct xe_preempt_fence *
+to_preempt_fence_from_link(struct list_head *link)
+{
+ return container_of(link, struct xe_preempt_fence, link);
+}
+
+bool xe_fence_is_xe_preempt(const struct dma_fence *fence);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence_types.h b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
new file mode 100644
index 000000000..b54b5c29b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PREEMPT_FENCE_TYPES_H_
+#define _XE_PREEMPT_FENCE_TYPES_H_
+
+#include <linux/dma-fence.h>
+#include <linux/workqueue.h>
+
+struct xe_exec_queue;
+
+/**
+ * struct xe_preempt_fence - XE preempt fence
+ *
+ * hardware and triggers a callback once the xe_engine is complete.
+ */
+struct xe_preempt_fence {
+ /** @base: dma fence base */
+ struct dma_fence base;
+ /** @link: link into list of pending preempt fences */
+ struct list_head link;
+ /** @q: exec queue for this preempt fence */
+ struct xe_exec_queue *q;
+ /** @preempt_work: work struct which issues preemption */
+ struct work_struct preempt_work;
+ /** @error: preempt fence is in error state */
+ int error;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
new file mode 100644
index 000000000..c1a833b1c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_pt.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_drm_client.h"
+#include "xe_gt.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_migrate.h"
+#include "xe_pt_types.h"
+#include "xe_pt_walk.h"
+#include "xe_res_cursor.h"
+#include "xe_trace.h"
+#include "xe_ttm_stolen_mgr.h"
+#include "xe_vm.h"
+
+struct xe_pt_dir {
+ struct xe_pt pt;
+ /** @children: Array of page-table child nodes */
+ struct xe_ptw *children[XE_PDES];
+};
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
+#define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr))
+#define xe_pt_addr(__xe_pt) ((__xe_pt)->addr)
+#else
+#define xe_pt_set_addr(__xe_pt, __addr)
+#define xe_pt_addr(__xe_pt) 0ull
+#endif
+
+static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48};
+static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48};
+
+#define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1)
+
+static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
+{
+ return container_of(pt, struct xe_pt_dir, pt);
+}
+
+static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
+{
+ return container_of(pt_dir->children[index], struct xe_pt, base);
+}
+
+static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
+ unsigned int level)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ u16 pat_index = xe->pat.idx[XE_CACHE_WB];
+ u8 id = tile->id;
+
+ if (!xe_vm_has_scratch(vm))
+ return 0;
+
+ if (level > MAX_HUGEPTE_LEVEL)
+ return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
+ 0, pat_index);
+
+ return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
+ XE_PTE_NULL;
+}
+
+static void xe_pt_free(struct xe_pt *pt)
+{
+ if (pt->level)
+ kfree(as_xe_pt_dir(pt));
+ else
+ kfree(pt);
+}
+
+/**
+ * xe_pt_create() - Create a page-table.
+ * @vm: The vm to create for.
+ * @tile: The tile to create for.
+ * @level: The page-table level.
+ *
+ * Allocate and initialize a single struct xe_pt metadata structure. Also
+ * create the corresponding page-table bo, but don't initialize it. If the
+ * level is grater than zero, then it's assumed to be a directory page-
+ * table and the directory structure is also allocated and initialized to
+ * NULL pointers.
+ *
+ * Return: A valid struct xe_pt pointer on success, Pointer error code on
+ * error.
+ */
+struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
+ unsigned int level)
+{
+ struct xe_pt *pt;
+ struct xe_bo *bo;
+ int err;
+
+ if (level) {
+ struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+
+ pt = (dir) ? &dir->pt : NULL;
+ } else {
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ }
+ if (!pt)
+ return ERR_PTR(-ENOMEM);
+
+ pt->level = level;
+ bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT |
+ XE_BO_CREATE_PINNED_BIT |
+ XE_BO_CREATE_NO_RESV_EVICT |
+ XE_BO_PAGETABLE);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto err_kfree;
+ }
+ pt->bo = bo;
+ pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
+
+ if (vm->xef)
+ xe_drm_client_add_bo(vm->xef->client, pt->bo);
+ xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL);
+
+ return pt;
+
+err_kfree:
+ xe_pt_free(pt);
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
+ * entries.
+ * @tile: The tile the scratch pagetable of which to use.
+ * @vm: The vm we populate for.
+ * @pt: The pagetable the bo of which to initialize.
+ *
+ * Populate the page-table bo of @pt with entries pointing into the tile's
+ * scratch page-table tree if any. Otherwise populate with zeros.
+ */
+void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
+ struct xe_pt *pt)
+{
+ struct iosys_map *map = &pt->bo->vmap;
+ u64 empty;
+ int i;
+
+ if (!xe_vm_has_scratch(vm)) {
+ /*
+ * FIXME: Some memory is allocated already allocated to zero?
+ * Find out which memory that is and avoid this memset...
+ */
+ xe_map_memset(vm->xe, map, 0, 0, SZ_4K);
+ } else {
+ empty = __xe_pt_empty_pte(tile, vm, pt->level);
+ for (i = 0; i < XE_PDES; i++)
+ xe_pt_write(vm->xe, map, i, empty);
+ }
+}
+
+/**
+ * xe_pt_shift() - Return the ilog2 value of the size of the address range of
+ * a page-table at a certain level.
+ * @level: The level.
+ *
+ * Return: The ilog2 value of the size of the address range of a page-table
+ * at level @level.
+ */
+unsigned int xe_pt_shift(unsigned int level)
+{
+ return XE_PTE_SHIFT + XE_PDE_SHIFT * level;
+}
+
+/**
+ * xe_pt_destroy() - Destroy a page-table tree.
+ * @pt: The root of the page-table tree to destroy.
+ * @flags: vm flags. Currently unused.
+ * @deferred: List head of lockless list for deferred putting. NULL for
+ * immediate putting.
+ *
+ * Puts the page-table bo, recursively calls xe_pt_destroy on all children
+ * and finally frees @pt. TODO: Can we remove the @flags argument?
+ */
+void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
+{
+ int i;
+
+ if (!pt)
+ return;
+
+ XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list));
+ xe_bo_unpin(pt->bo);
+ xe_bo_put_deferred(pt->bo, deferred);
+
+ if (pt->level > 0 && pt->num_live) {
+ struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+
+ for (i = 0; i < XE_PDES; i++) {
+ if (xe_pt_entry(pt_dir, i))
+ xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
+ deferred);
+ }
+ }
+ xe_pt_free(pt);
+}
+
+/**
+ * DOC: Pagetable building
+ *
+ * Below we use the term "page-table" for both page-directories, containing
+ * pointers to lower level page-directories or page-tables, and level 0
+ * page-tables that contain only page-table-entries pointing to memory pages.
+ *
+ * When inserting an address range in an already existing page-table tree
+ * there will typically be a set of page-tables that are shared with other
+ * address ranges, and a set that are private to this address range.
+ * The set of shared page-tables can be at most two per level,
+ * and those can't be updated immediately because the entries of those
+ * page-tables may still be in use by the gpu for other mappings. Therefore
+ * when inserting entries into those, we instead stage those insertions by
+ * adding insertion data into struct xe_vm_pgtable_update structures. This
+ * data, (subtrees for the cpu and page-table-entries for the gpu) is then
+ * added in a separate commit step. CPU-data is committed while still under the
+ * vm lock, the object lock and for userptr, the notifier lock in read mode.
+ * The GPU async data is committed either by the GPU or CPU after fulfilling
+ * relevant dependencies.
+ * For non-shared page-tables (and, in fact, for shared ones that aren't
+ * existing at the time of staging), we add the data in-place without the
+ * special update structures. This private part of the page-table tree will
+ * remain disconnected from the vm page-table tree until data is committed to
+ * the shared page tables of the vm tree in the commit phase.
+ */
+
+struct xe_pt_update {
+ /** @update: The update structure we're building for this parent. */
+ struct xe_vm_pgtable_update *update;
+ /** @parent: The parent. Used to detect a parent change. */
+ struct xe_pt *parent;
+ /** @preexisting: Whether the parent was pre-existing or allocated */
+ bool preexisting;
+};
+
+struct xe_pt_stage_bind_walk {
+ /** base: The base class. */
+ struct xe_pt_walk base;
+
+ /* Input parameters for the walk */
+ /** @vm: The vm we're building for. */
+ struct xe_vm *vm;
+ /** @tile: The tile we're building for. */
+ struct xe_tile *tile;
+ /** @default_pte: PTE flag only template. No address is associated */
+ u64 default_pte;
+ /** @dma_offset: DMA offset to add to the PTE. */
+ u64 dma_offset;
+ /**
+ * @needs_64k: This address range enforces 64K alignment and
+ * granularity.
+ */
+ bool needs_64K;
+ /**
+ * @vma: VMA being mapped
+ */
+ struct xe_vma *vma;
+
+ /* Also input, but is updated during the walk*/
+ /** @curs: The DMA address cursor. */
+ struct xe_res_cursor *curs;
+ /** @va_curs_start: The Virtual address coresponding to @curs->start */
+ u64 va_curs_start;
+
+ /* Output */
+ struct xe_walk_update {
+ /** @wupd.entries: Caller provided storage. */
+ struct xe_vm_pgtable_update *entries;
+ /** @wupd.num_used_entries: Number of update @entries used. */
+ unsigned int num_used_entries;
+ /** @wupd.updates: Tracks the update entry at a given level */
+ struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1];
+ } wupd;
+
+ /* Walk state */
+ /**
+ * @l0_end_addr: The end address of the current l0 leaf. Used for
+ * 64K granularity detection.
+ */
+ u64 l0_end_addr;
+ /** @addr_64K: The start address of the current 64K chunk. */
+ u64 addr_64K;
+ /** @found_64: Whether @add_64K actually points to a 64K chunk. */
+ bool found_64K;
+};
+
+static int
+xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
+ pgoff_t offset, bool alloc_entries)
+{
+ struct xe_pt_update *upd = &wupd->updates[parent->level];
+ struct xe_vm_pgtable_update *entry;
+
+ /*
+ * For *each level*, we could only have one active
+ * struct xt_pt_update at any one time. Once we move on to a
+ * new parent and page-directory, the old one is complete, and
+ * updates are either already stored in the build tree or in
+ * @wupd->entries
+ */
+ if (likely(upd->parent == parent))
+ return 0;
+
+ upd->parent = parent;
+ upd->preexisting = true;
+
+ if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1)
+ return -EINVAL;
+
+ entry = wupd->entries + wupd->num_used_entries++;
+ upd->update = entry;
+ entry->ofs = offset;
+ entry->pt_bo = parent->bo;
+ entry->pt = parent;
+ entry->flags = 0;
+ entry->qwords = 0;
+
+ if (alloc_entries) {
+ entry->pt_entries = kmalloc_array(XE_PDES,
+ sizeof(*entry->pt_entries),
+ GFP_KERNEL);
+ if (!entry->pt_entries)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * NOTE: This is a very frequently called function so we allow ourselves
+ * to annotate (using branch prediction hints) the fastpath of updating a
+ * non-pre-existing pagetable with leaf ptes.
+ */
+static int
+xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
+ pgoff_t offset, struct xe_pt *xe_child, u64 pte)
+{
+ struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level];
+ struct xe_pt_update *child_upd = xe_child ?
+ &xe_walk->wupd.updates[xe_child->level] : NULL;
+ int ret;
+
+ ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true);
+ if (unlikely(ret))
+ return ret;
+
+ /*
+ * Register this new pagetable so that it won't be recognized as
+ * a shared pagetable by a subsequent insertion.
+ */
+ if (unlikely(child_upd)) {
+ child_upd->update = NULL;
+ child_upd->parent = xe_child;
+ child_upd->preexisting = false;
+ }
+
+ if (likely(!upd->preexisting)) {
+ /* Continue building a non-connected subtree. */
+ struct iosys_map *map = &parent->bo->vmap;
+
+ if (unlikely(xe_child))
+ parent->base.children[offset] = &xe_child->base;
+
+ xe_pt_write(xe_walk->vm->xe, map, offset, pte);
+ parent->num_live++;
+ } else {
+ /* Shared pt. Stage update. */
+ unsigned int idx;
+ struct xe_vm_pgtable_update *entry = upd->update;
+
+ idx = offset - entry->ofs;
+ entry->pt_entries[idx].pt = xe_child;
+ entry->pt_entries[idx].pte = pte;
+ entry->qwords++;
+ }
+
+ return 0;
+}
+
+static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
+ struct xe_pt_stage_bind_walk *xe_walk)
+{
+ u64 size, dma;
+
+ if (level > MAX_HUGEPTE_LEVEL)
+ return false;
+
+ /* Does the virtual range requested cover a huge pte? */
+ if (!xe_pt_covers(addr, next, level, &xe_walk->base))
+ return false;
+
+ /* Does the DMA segment cover the whole pte? */
+ if (next - xe_walk->va_curs_start > xe_walk->curs->size)
+ return false;
+
+ /* null VMA's do not have dma addresses */
+ if (xe_vma_is_null(xe_walk->vma))
+ return true;
+
+ /* Is the DMA address huge PTE size aligned? */
+ size = next - addr;
+ dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
+
+ return IS_ALIGNED(dma, size);
+}
+
+/*
+ * Scan the requested mapping to check whether it can be done entirely
+ * with 64K PTEs.
+ */
+static bool
+xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
+{
+ struct xe_res_cursor curs = *xe_walk->curs;
+
+ if (!IS_ALIGNED(addr, SZ_64K))
+ return false;
+
+ if (next > xe_walk->l0_end_addr)
+ return false;
+
+ /* null VMA's do not have dma addresses */
+ if (xe_vma_is_null(xe_walk->vma))
+ return true;
+
+ xe_res_next(&curs, addr - xe_walk->va_curs_start);
+ for (; addr < next; addr += SZ_64K) {
+ if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K)
+ return false;
+
+ xe_res_next(&curs, SZ_64K);
+ }
+
+ return addr == next;
+}
+
+/*
+ * For non-compact "normal" 4K level-0 pagetables, we want to try to group
+ * addresses together in 64K-contigous regions to add a 64K TLB hint for the
+ * device to the PTE.
+ * This function determines whether the address is part of such a
+ * segment. For VRAM in normal pagetables, this is strictly necessary on
+ * some devices.
+ */
+static bool
+xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
+{
+ /* Address is within an already found 64k region */
+ if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K)
+ return true;
+
+ xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk);
+ xe_walk->addr_64K = addr;
+
+ return xe_walk->found_64K;
+}
+
+static int
+xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
+ unsigned int level, u64 addr, u64 next,
+ struct xe_ptw **child,
+ enum page_walk_action *action,
+ struct xe_pt_walk *walk)
+{
+ struct xe_pt_stage_bind_walk *xe_walk =
+ container_of(walk, typeof(*xe_walk), base);
+ u16 pat_index = xe_walk->vma->pat_index;
+ struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
+ struct xe_vm *vm = xe_walk->vm;
+ struct xe_pt *xe_child;
+ bool covers;
+ int ret = 0;
+ u64 pte;
+
+ /* Is this a leaf entry ?*/
+ if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
+ struct xe_res_cursor *curs = xe_walk->curs;
+ bool is_null = xe_vma_is_null(xe_walk->vma);
+
+ XE_WARN_ON(xe_walk->va_curs_start != addr);
+
+ pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
+ xe_res_dma(curs) + xe_walk->dma_offset,
+ xe_walk->vma, pat_index, level);
+ pte |= xe_walk->default_pte;
+
+ /*
+ * Set the XE_PTE_PS64 hint if possible, otherwise if
+ * this device *requires* 64K PTE size for VRAM, fail.
+ */
+ if (level == 0 && !xe_parent->is_compact) {
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
+ pte |= XE_PTE_PS64;
+ } else if (XE_WARN_ON(xe_walk->needs_64K)) {
+ return -EINVAL;
+ }
+ }
+
+ ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
+ if (unlikely(ret))
+ return ret;
+
+ if (!is_null)
+ xe_res_next(curs, next - addr);
+ xe_walk->va_curs_start = next;
+ xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
+ *action = ACTION_CONTINUE;
+
+ return ret;
+ }
+
+ /*
+ * Descending to lower level. Determine if we need to allocate a
+ * new page table or -directory, which we do if there is no
+ * previous one or there is one we can completely replace.
+ */
+ if (level == 1) {
+ walk->shifts = xe_normal_pt_shifts;
+ xe_walk->l0_end_addr = next;
+ }
+
+ covers = xe_pt_covers(addr, next, level, &xe_walk->base);
+ if (covers || !*child) {
+ u64 flags = 0;
+
+ xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1);
+ if (IS_ERR(xe_child))
+ return PTR_ERR(xe_child);
+
+ xe_pt_set_addr(xe_child,
+ round_down(addr, 1ull << walk->shifts[level]));
+
+ if (!covers)
+ xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child);
+
+ *child = &xe_child->base;
+
+ /*
+ * Prefer the compact pagetable layout for L0 if possible. Only
+ * possible if VMA covers entire 2MB region as compact 64k and
+ * 4k pages cannot be mixed within a 2MB region.
+ * TODO: Suballocate the pt bo to avoid wasting a lot of
+ * memory.
+ */
+ if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
+ covers && xe_pt_scan_64K(addr, next, xe_walk)) {
+ walk->shifts = xe_compact_pt_shifts;
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
+ flags |= XE_PDE_64K;
+ xe_child->is_compact = true;
+ }
+
+ pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
+ ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
+ pte);
+ }
+
+ *action = ACTION_SUBTREE;
+ return ret;
+}
+
+static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
+ .pt_entry = xe_pt_stage_bind_entry,
+};
+
+/**
+ * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
+ * range.
+ * @tile: The tile we're building for.
+ * @vma: The vma indicating the address range.
+ * @entries: Storage for the update entries used for connecting the tree to
+ * the main tree at commit time.
+ * @num_entries: On output contains the number of @entries used.
+ *
+ * This function builds a disconnected page-table tree for a given address
+ * range. The tree is connected to the main vm tree for the gpu using
+ * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind().
+ * The function builds xe_vm_pgtable_update structures for already existing
+ * shared page-tables, and non-existing shared and non-shared page-tables
+ * are built and populated directly.
+ *
+ * Return 0 on success, negative error code on error.
+ */
+static int
+xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries, u32 *num_entries)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_bo *bo = xe_vma_bo(vma);
+ bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
+ (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
+ struct xe_res_cursor curs;
+ struct xe_pt_stage_bind_walk xe_walk = {
+ .base = {
+ .ops = &xe_pt_stage_bind_ops,
+ .shifts = xe_normal_pt_shifts,
+ .max_level = XE_PT_HIGHEST_LEVEL,
+ },
+ .vm = xe_vma_vm(vma),
+ .tile = tile,
+ .curs = &curs,
+ .va_curs_start = xe_vma_start(vma),
+ .vma = vma,
+ .wupd.entries = entries,
+ .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem,
+ };
+ struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ int ret;
+
+ if (vma && (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) &&
+ (is_devmem || !IS_DGFX(xe)))
+ xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
+
+ if (is_devmem) {
+ xe_walk.default_pte |= XE_PPGTT_PTE_DM;
+ xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
+ }
+
+ if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
+ xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
+
+ xe_bo_assert_held(bo);
+
+ if (!xe_vma_is_null(vma)) {
+ if (xe_vma_is_userptr(vma))
+ xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
+ xe_vma_size(vma), &curs);
+ else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
+ xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
+ xe_vma_size(vma), &curs);
+ else
+ xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
+ xe_vma_size(vma), &curs);
+ } else {
+ curs.size = xe_vma_size(vma);
+ }
+
+ ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma),
+ xe_vma_end(vma), &xe_walk.base);
+
+ *num_entries = xe_walk.wupd.num_used_entries;
+ return ret;
+}
+
+/**
+ * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a
+ * shared pagetable.
+ * @addr: The start address within the non-shared pagetable.
+ * @end: The end address within the non-shared pagetable.
+ * @level: The level of the non-shared pagetable.
+ * @walk: Walk info. The function adjusts the walk action.
+ * @action: next action to perform (see enum page_walk_action)
+ * @offset: Ignored on input, First non-shared entry on output.
+ * @end_offset: Ignored on input, Last non-shared entry + 1 on output.
+ *
+ * A non-shared page-table has some entries that belong to the address range
+ * and others that don't. This function determines the entries that belong
+ * fully to the address range. Depending on level, some entries may
+ * partially belong to the address range (that can't happen at level 0).
+ * The function detects that and adjust those offsets to not include those
+ * partial entries. Iff it does detect partial entries, we know that there must
+ * be shared page tables also at lower levels, so it adjusts the walk action
+ * accordingly.
+ *
+ * Return: true if there were non-shared entries, false otherwise.
+ */
+static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level,
+ struct xe_pt_walk *walk,
+ enum page_walk_action *action,
+ pgoff_t *offset, pgoff_t *end_offset)
+{
+ u64 size = 1ull << walk->shifts[level];
+
+ *offset = xe_pt_offset(addr, level, walk);
+ *end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset;
+
+ if (!level)
+ return true;
+
+ /*
+ * If addr or next are not size aligned, there are shared pts at lower
+ * level, so in that case traverse down the subtree
+ */
+ *action = ACTION_CONTINUE;
+ if (!IS_ALIGNED(addr, size)) {
+ *action = ACTION_SUBTREE;
+ (*offset)++;
+ }
+
+ if (!IS_ALIGNED(end, size)) {
+ *action = ACTION_SUBTREE;
+ (*end_offset)--;
+ }
+
+ return *end_offset > *offset;
+}
+
+struct xe_pt_zap_ptes_walk {
+ /** @base: The walk base-class */
+ struct xe_pt_walk base;
+
+ /* Input parameters for the walk */
+ /** @tile: The tile we're building for */
+ struct xe_tile *tile;
+
+ /* Output */
+ /** @needs_invalidate: Whether we need to invalidate TLB*/
+ bool needs_invalidate;
+};
+
+static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset,
+ unsigned int level, u64 addr, u64 next,
+ struct xe_ptw **child,
+ enum page_walk_action *action,
+ struct xe_pt_walk *walk)
+{
+ struct xe_pt_zap_ptes_walk *xe_walk =
+ container_of(walk, typeof(*xe_walk), base);
+ struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
+ pgoff_t end_offset;
+
+ XE_WARN_ON(!*child);
+ XE_WARN_ON(!level && xe_child->is_compact);
+
+ /*
+ * Note that we're called from an entry callback, and we're dealing
+ * with the child of that entry rather than the parent, so need to
+ * adjust level down.
+ */
+ if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset,
+ &end_offset)) {
+ xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap,
+ offset * sizeof(u64), 0,
+ (end_offset - offset) * sizeof(u64));
+ xe_walk->needs_invalidate = true;
+ }
+
+ return 0;
+}
+
+static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = {
+ .pt_entry = xe_pt_zap_ptes_entry,
+};
+
+/**
+ * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range
+ * @tile: The tile we're zapping for.
+ * @vma: GPU VMA detailing address range.
+ *
+ * Eviction and Userptr invalidation needs to be able to zap the
+ * gpu ptes of a given address range in pagefaulting mode.
+ * In order to be able to do that, that function needs access to the shared
+ * page-table entrieaso it can either clear the leaf PTEs or
+ * clear the pointers to lower-level page-tables. The caller is required
+ * to hold the necessary locks to ensure neither the page-table connectivity
+ * nor the page-table entries of the range is updated from under us.
+ *
+ * Return: Whether ptes were actually updated and a TLB invalidation is
+ * required.
+ */
+bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
+{
+ struct xe_pt_zap_ptes_walk xe_walk = {
+ .base = {
+ .ops = &xe_pt_zap_ptes_ops,
+ .shifts = xe_normal_pt_shifts,
+ .max_level = XE_PT_HIGHEST_LEVEL,
+ },
+ .tile = tile,
+ };
+ struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+
+ if (!(vma->tile_present & BIT(tile->id)))
+ return false;
+
+ (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
+ xe_vma_end(vma), &xe_walk.base);
+
+ return xe_walk.needs_invalidate;
+}
+
+static void
+xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
+ struct iosys_map *map, void *data,
+ u32 qword_ofs, u32 num_qwords,
+ const struct xe_vm_pgtable_update *update)
+{
+ struct xe_pt_entry *ptes = update->pt_entries;
+ u64 *ptr = data;
+ u32 i;
+
+ for (i = 0; i < num_qwords; i++) {
+ if (map)
+ xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
+ sizeof(u64), u64, ptes[i].pte);
+ else
+ ptr[i] = ptes[i].pte;
+ }
+}
+
+static void xe_pt_abort_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ u32 i, j;
+
+ for (i = 0; i < num_entries; i++) {
+ if (!entries[i].pt_entries)
+ continue;
+
+ for (j = 0; j < entries[i].qwords; j++)
+ xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL);
+ kfree(entries[i].pt_entries);
+ }
+}
+
+static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ lockdep_assert_held(&vm->lock);
+
+ if (xe_vma_is_userptr(vma))
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ else if (!xe_vma_is_null(vma))
+ dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
+
+ xe_vm_assert_held(vm);
+}
+
+static void xe_pt_commit_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind,
+ struct llist_head *deferred)
+{
+ u32 i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = 0; i < num_entries; i++) {
+ struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
+
+ if (!rebind)
+ pt->num_live += entries[i].qwords;
+
+ if (!pt->level) {
+ kfree(entries[i].pt_entries);
+ continue;
+ }
+
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = 0; j < entries[i].qwords; j++) {
+ u32 j_ = j + entries[i].ofs;
+ struct xe_pt *newpte = entries[i].pt_entries[j].pt;
+
+ if (xe_pt_entry(pt_dir, j_))
+ xe_pt_destroy(xe_pt_entry(pt_dir, j_),
+ xe_vma_vm(vma)->flags, deferred);
+
+ pt_dir->children[j_] = &newpte->base;
+ }
+ kfree(entries[i].pt_entries);
+ }
+}
+
+static int
+xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries, u32 *num_entries,
+ bool rebind)
+{
+ int err;
+
+ *num_entries = 0;
+ err = xe_pt_stage_bind(tile, vma, entries, num_entries);
+ if (!err)
+ xe_tile_assert(tile, *num_entries);
+ else /* abort! */
+ xe_pt_abort_bind(vma, entries, *num_entries);
+
+ return err;
+}
+
+static void xe_vm_dbg_print_entries(struct xe_device *xe,
+ const struct xe_vm_pgtable_update *entries,
+ unsigned int num_entries)
+#if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
+{
+ unsigned int i;
+
+ vm_dbg(&xe->drm, "%u entries to update\n", num_entries);
+ for (i = 0; i < num_entries; i++) {
+ const struct xe_vm_pgtable_update *entry = &entries[i];
+ struct xe_pt *xe_pt = entry->pt;
+ u64 page_size = 1ull << xe_pt_shift(xe_pt->level);
+ u64 end;
+ u64 start;
+
+ xe_assert(xe, !entry->pt->is_compact);
+ start = entry->ofs * page_size;
+ end = start + page_size * entry->qwords;
+ vm_dbg(&xe->drm,
+ "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n",
+ i, xe_pt->level, entry->ofs, entry->qwords,
+ xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0);
+ }
+}
+#else
+{}
+#endif
+
+#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
+
+static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+{
+ u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
+ static u32 count;
+
+ if (count++ % divisor == divisor - 1) {
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ uvma->userptr.divisor = divisor << 1;
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ return true;
+ }
+
+ return false;
+}
+
+#else
+
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+{
+ return false;
+}
+
+#endif
+
+/**
+ * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks
+ * @base: Base we derive from.
+ * @bind: Whether this is a bind or an unbind operation. A bind operation
+ * makes the pre-commit callback error with -EAGAIN if it detects a
+ * pending invalidation.
+ * @locked: Whether the pre-commit callback locked the userptr notifier lock
+ * and it needs unlocking.
+ */
+struct xe_pt_migrate_pt_update {
+ struct xe_migrate_pt_update base;
+ bool bind;
+ bool locked;
+};
+
+/*
+ * This function adds the needed dependencies to a page-table update job
+ * to make sure racing jobs for separate bind engines don't race writing
+ * to the same page-table range, wreaking havoc. Initially use a single
+ * fence for the entire VM. An optimization would use smaller granularity.
+ */
+static int xe_pt_vm_dependencies(struct xe_sched_job *job,
+ struct xe_range_fence_tree *rftree,
+ u64 start, u64 last)
+{
+ struct xe_range_fence *rtfence;
+ struct dma_fence *fence;
+ int err;
+
+ rtfence = xe_range_fence_tree_first(rftree, start, last);
+ while (rtfence) {
+ fence = rtfence->fence;
+
+ if (!dma_fence_is_signaled(fence)) {
+ /*
+ * Is this a CPU update? GPU is busy updating, so return
+ * an error
+ */
+ if (!job)
+ return -ETIME;
+
+ dma_fence_get(fence);
+ err = drm_sched_job_add_dependency(&job->drm, fence);
+ if (err)
+ return err;
+ }
+
+ rtfence = xe_range_fence_tree_next(rtfence, start, last);
+ }
+
+ return 0;
+}
+
+static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_range_fence_tree *rftree =
+ &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id];
+
+ return xe_pt_vm_dependencies(pt_update->job, rftree,
+ pt_update->start, pt_update->last);
+}
+
+static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_pt_migrate_pt_update *userptr_update =
+ container_of(pt_update, typeof(*userptr_update), base);
+ struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
+ unsigned long notifier_seq = uvma->userptr.notifier_seq;
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ int err = xe_pt_vm_dependencies(pt_update->job,
+ &vm->rftree[pt_update->tile_id],
+ pt_update->start,
+ pt_update->last);
+
+ if (err)
+ return err;
+
+ userptr_update->locked = false;
+
+ /*
+ * Wait until nobody is running the invalidation notifier, and
+ * since we're exiting the loop holding the notifier lock,
+ * nobody can proceed invalidating either.
+ *
+ * Note that we don't update the vma->userptr.notifier_seq since
+ * we don't update the userptr pages.
+ */
+ do {
+ down_read(&vm->userptr.notifier_lock);
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ notifier_seq))
+ break;
+
+ up_read(&vm->userptr.notifier_lock);
+
+ if (userptr_update->bind)
+ return -EAGAIN;
+
+ notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
+ } while (true);
+
+ /* Inject errors to test_whether they are handled correctly */
+ if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
+ up_read(&vm->userptr.notifier_lock);
+ return -EAGAIN;
+ }
+
+ userptr_update->locked = true;
+
+ return 0;
+}
+
+static const struct xe_migrate_pt_update_ops bind_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .pre_commit = xe_pt_pre_commit,
+};
+
+static const struct xe_migrate_pt_update_ops userptr_bind_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .pre_commit = xe_pt_userptr_pre_commit,
+};
+
+struct invalidation_fence {
+ struct xe_gt_tlb_invalidation_fence base;
+ struct xe_gt *gt;
+ struct xe_vma *vma;
+ struct dma_fence *fence;
+ struct dma_fence_cb cb;
+ struct work_struct work;
+};
+
+static const char *
+invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
+{
+ return "xe";
+}
+
+static const char *
+invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
+{
+ return "invalidation_fence";
+}
+
+static const struct dma_fence_ops invalidation_fence_ops = {
+ .get_driver_name = invalidation_fence_get_driver_name,
+ .get_timeline_name = invalidation_fence_get_timeline_name,
+};
+
+static void invalidation_fence_cb(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct invalidation_fence *ifence =
+ container_of(cb, struct invalidation_fence, cb);
+
+ trace_xe_gt_tlb_invalidation_fence_cb(&ifence->base);
+ if (!ifence->fence->error) {
+ queue_work(system_wq, &ifence->work);
+ } else {
+ ifence->base.base.error = ifence->fence->error;
+ dma_fence_signal(&ifence->base.base);
+ dma_fence_put(&ifence->base.base);
+ }
+ dma_fence_put(ifence->fence);
+}
+
+static void invalidation_fence_work_func(struct work_struct *w)
+{
+ struct invalidation_fence *ifence =
+ container_of(w, struct invalidation_fence, work);
+
+ trace_xe_gt_tlb_invalidation_fence_work_func(&ifence->base);
+ xe_gt_tlb_invalidation_vma(ifence->gt, &ifence->base, ifence->vma);
+}
+
+static int invalidation_fence_init(struct xe_gt *gt,
+ struct invalidation_fence *ifence,
+ struct dma_fence *fence,
+ struct xe_vma *vma)
+{
+ int ret;
+
+ trace_xe_gt_tlb_invalidation_fence_create(&ifence->base);
+
+ spin_lock_irq(&gt->tlb_invalidation.lock);
+ dma_fence_init(&ifence->base.base, &invalidation_fence_ops,
+ &gt->tlb_invalidation.lock,
+ gt->tlb_invalidation.fence_context,
+ ++gt->tlb_invalidation.fence_seqno);
+ spin_unlock_irq(&gt->tlb_invalidation.lock);
+
+ INIT_LIST_HEAD(&ifence->base.link);
+
+ dma_fence_get(&ifence->base.base); /* Ref for caller */
+ ifence->fence = fence;
+ ifence->gt = gt;
+ ifence->vma = vma;
+
+ INIT_WORK(&ifence->work, invalidation_fence_work_func);
+ ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
+ if (ret == -ENOENT) {
+ dma_fence_put(ifence->fence); /* Usually dropped in CB */
+ invalidation_fence_work_func(&ifence->work);
+ } else if (ret) {
+ dma_fence_put(&ifence->base.base); /* Caller ref */
+ dma_fence_put(&ifence->base.base); /* Creation ref */
+ }
+
+ xe_gt_assert(gt, !ret || ret == -ENOENT);
+
+ return ret && ret != -ENOENT ? ret : 0;
+}
+
+static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
+ struct xe_pt_migrate_pt_update *update,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ int i, level = 0;
+
+ for (i = 0; i < num_entries; i++) {
+ const struct xe_vm_pgtable_update *entry = &entries[i];
+
+ if (entry->pt->level > level)
+ level = entry->pt->level;
+ }
+
+ /* Greedy (non-optimal) calculation but simple */
+ update->base.start = ALIGN_DOWN(xe_vma_start(vma),
+ 0x1ull << xe_pt_shift(level));
+ update->base.last = ALIGN(xe_vma_end(vma),
+ 0x1ull << xe_pt_shift(level)) - 1;
+}
+
+/**
+ * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma
+ * address range.
+ * @tile: The tile to bind for.
+ * @vma: The vma to bind.
+ * @q: The exec_queue with which to do pipelined page-table updates.
+ * @syncs: Entries to sync on before binding the built tree to the live vm tree.
+ * @num_syncs: Number of @sync entries.
+ * @rebind: Whether we're rebinding this vma to the same address range without
+ * an unbind in-between.
+ *
+ * This function builds a page-table tree (see xe_pt_stage_bind() for more
+ * information on page-table building), and the xe_vm_pgtable_update entries
+ * abstracting the operations needed to attach it to the main vm tree. It
+ * then takes the relevant locks and updates the metadata side of the main
+ * vm tree and submits the operations for pipelined attachment of the
+ * gpu page-table to the vm main tree, (which can be done either by the
+ * cpu and the GPU).
+ *
+ * Return: A valid dma-fence representing the pipelined attachment operation
+ * on success, an error pointer on error.
+ */
+struct dma_fence *
+__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool rebind)
+{
+ struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
+ struct xe_pt_migrate_pt_update bind_pt_update = {
+ .base = {
+ .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops,
+ .vma = vma,
+ .tile_id = tile->id,
+ },
+ .bind = true,
+ };
+ struct xe_vm *vm = xe_vma_vm(vma);
+ u32 num_entries;
+ struct dma_fence *fence;
+ struct invalidation_fence *ifence = NULL;
+ struct xe_range_fence *rfence;
+ int err;
+
+ bind_pt_update.locked = false;
+ xe_bo_assert_held(xe_vma_bo(vma));
+ xe_vm_assert_held(vm);
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing bind, with range [%llx...%llx) engine %p.\n",
+ xe_vma_start(vma), xe_vma_end(vma), q);
+
+ err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
+ if (err)
+ goto err;
+ xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
+ xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
+ num_entries);
+
+ /*
+ * If rebind, we have to invalidate TLB on !LR vms to invalidate
+ * cached PTEs point to freed memory. on LR vms this is done
+ * automatically when the context is re-enabled by the rebind worker,
+ * or in fault mode it was invalidated on PTE zapping.
+ *
+ * If !rebind, and scratch enabled VMs, there is a chance the scratch
+ * PTE is already cached in the TLB so it needs to be invalidated.
+ * on !LR VMs this is done in the ring ops preceding a batch, but on
+ * non-faulting LR, in particular on user-space batch buffer chaining,
+ * it needs to be done here.
+ */
+ if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
+ ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence)
+ return ERR_PTR(-ENOMEM);
+ } else if (rebind && !xe_vm_in_lr_mode(vm)) {
+ /* We bump also if batch_invalidate_tlb is true */
+ vm->tlb_flush_seqno++;
+ }
+
+ rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
+ if (!rfence) {
+ kfree(ifence);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fence = xe_migrate_update_pgtables(tile->migrate,
+ vm, xe_vma_bo(vma), q,
+ entries, num_entries,
+ syncs, num_syncs,
+ &bind_pt_update.base);
+ if (!IS_ERR(fence)) {
+ bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND;
+ LLIST_HEAD(deferred);
+ int err;
+
+ err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
+ &xe_range_fence_kfree_ops,
+ bind_pt_update.base.start,
+ bind_pt_update.base.last, fence);
+ if (err)
+ dma_fence_wait(fence, false);
+
+ /* TLB invalidation must be done before signaling rebind */
+ if (ifence) {
+ int err = invalidation_fence_init(tile->primary_gt, ifence, fence,
+ vma);
+ if (err) {
+ dma_fence_put(fence);
+ kfree(ifence);
+ return ERR_PTR(err);
+ }
+ fence = &ifence->base.base;
+ }
+
+ /* add shared fence now for pagetable delayed destroy */
+ dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
+ last_munmap_rebind ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ DMA_RESV_USAGE_BOOKKEEP);
+ xe_pt_commit_bind(vma, entries, num_entries, rebind,
+ bind_pt_update.locked ? &deferred : NULL);
+
+ /* This vma is live (again?) now */
+ vma->tile_present |= BIT(tile->id);
+
+ if (bind_pt_update.locked) {
+ to_userptr_vma(vma)->userptr.initial_bind = true;
+ up_read(&vm->userptr.notifier_lock);
+ xe_bo_put_commit(&deferred);
+ }
+ if (!rebind && last_munmap_rebind &&
+ xe_vm_in_preempt_fence_mode(vm))
+ xe_vm_queue_rebind_worker(vm);
+ } else {
+ kfree(rfence);
+ kfree(ifence);
+ if (bind_pt_update.locked)
+ up_read(&vm->userptr.notifier_lock);
+ xe_pt_abort_bind(vma, entries, num_entries);
+ }
+
+ return fence;
+
+err:
+ return ERR_PTR(err);
+}
+
+struct xe_pt_stage_unbind_walk {
+ /** @base: The pagewalk base-class. */
+ struct xe_pt_walk base;
+
+ /* Input parameters for the walk */
+ /** @tile: The tile we're unbinding from. */
+ struct xe_tile *tile;
+
+ /**
+ * @modified_start: Walk range start, modified to include any
+ * shared pagetables that we're the only user of and can thus
+ * treat as private.
+ */
+ u64 modified_start;
+ /** @modified_end: Walk range start, modified like @modified_start. */
+ u64 modified_end;
+
+ /* Output */
+ /* @wupd: Structure to track the page-table updates we're building */
+ struct xe_walk_update wupd;
+};
+
+/*
+ * Check whether this range is the only one populating this pagetable,
+ * and in that case, update the walk range checks so that higher levels don't
+ * view us as a shared pagetable.
+ */
+static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level,
+ const struct xe_pt *child,
+ enum page_walk_action *action,
+ struct xe_pt_walk *walk)
+{
+ struct xe_pt_stage_unbind_walk *xe_walk =
+ container_of(walk, typeof(*xe_walk), base);
+ unsigned int shift = walk->shifts[level];
+ u64 size = 1ull << shift;
+
+ if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) &&
+ ((next - addr) >> shift) == child->num_live) {
+ u64 size = 1ull << walk->shifts[level + 1];
+
+ *action = ACTION_CONTINUE;
+
+ if (xe_walk->modified_start >= addr)
+ xe_walk->modified_start = round_down(addr, size);
+ if (xe_walk->modified_end <= next)
+ xe_walk->modified_end = round_up(next, size);
+
+ return true;
+ }
+
+ return false;
+}
+
+static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
+ unsigned int level, u64 addr, u64 next,
+ struct xe_ptw **child,
+ enum page_walk_action *action,
+ struct xe_pt_walk *walk)
+{
+ struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
+
+ XE_WARN_ON(!*child);
+ XE_WARN_ON(!level && xe_child->is_compact);
+
+ xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk);
+
+ return 0;
+}
+
+static int
+xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
+ unsigned int level, u64 addr, u64 next,
+ struct xe_ptw **child,
+ enum page_walk_action *action,
+ struct xe_pt_walk *walk)
+{
+ struct xe_pt_stage_unbind_walk *xe_walk =
+ container_of(walk, typeof(*xe_walk), base);
+ struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
+ pgoff_t end_offset;
+ u64 size = 1ull << walk->shifts[--level];
+
+ if (!IS_ALIGNED(addr, size))
+ addr = xe_walk->modified_start;
+ if (!IS_ALIGNED(next, size))
+ next = xe_walk->modified_end;
+
+ /* Parent == *child is the root pt. Don't kill it. */
+ if (parent != *child &&
+ xe_pt_check_kill(addr, next, level, xe_child, action, walk))
+ return 0;
+
+ if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset,
+ &end_offset))
+ return 0;
+
+ (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false);
+ xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
+
+ return 0;
+}
+
+static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
+ .pt_entry = xe_pt_stage_unbind_entry,
+ .pt_post_descend = xe_pt_stage_unbind_post_descend,
+};
+
+/**
+ * xe_pt_stage_unbind() - Build page-table update structures for an unbind
+ * operation
+ * @tile: The tile we're unbinding for.
+ * @vma: The vma we're unbinding.
+ * @entries: Caller-provided storage for the update structures.
+ *
+ * Builds page-table update structures for an unbind operation. The function
+ * will attempt to remove all page-tables that we're the only user
+ * of, and for that to work, the unbind operation must be committed in the
+ * same critical section that blocks racing binds to the same page-table tree.
+ *
+ * Return: The number of entries used.
+ */
+static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries)
+{
+ struct xe_pt_stage_unbind_walk xe_walk = {
+ .base = {
+ .ops = &xe_pt_stage_unbind_ops,
+ .shifts = xe_normal_pt_shifts,
+ .max_level = XE_PT_HIGHEST_LEVEL,
+ },
+ .tile = tile,
+ .modified_start = xe_vma_start(vma),
+ .modified_end = xe_vma_end(vma),
+ .wupd.entries = entries,
+ };
+ struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+
+ (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
+ xe_vma_end(vma), &xe_walk.base);
+
+ return xe_walk.wupd.num_used_entries;
+}
+
+static void
+xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
+ struct xe_tile *tile, struct iosys_map *map,
+ void *ptr, u32 qword_ofs, u32 num_qwords,
+ const struct xe_vm_pgtable_update *update)
+{
+ struct xe_vma *vma = pt_update->vma;
+ u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level);
+ int i;
+
+ if (map && map->is_iomem)
+ for (i = 0; i < num_qwords; ++i)
+ xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
+ sizeof(u64), u64, empty);
+ else if (map)
+ memset64(map->vaddr + qword_ofs * sizeof(u64), empty,
+ num_qwords);
+ else
+ memset64(ptr, empty, num_qwords);
+}
+
+static void
+xe_pt_commit_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries, u32 num_entries,
+ struct llist_head *deferred)
+{
+ u32 j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (j = 0; j < num_entries; ++j) {
+ struct xe_vm_pgtable_update *entry = &entries[j];
+ struct xe_pt *pt = entry->pt;
+
+ pt->num_live -= entry->qwords;
+ if (pt->level) {
+ struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+ u32 i;
+
+ for (i = entry->ofs; i < entry->ofs + entry->qwords;
+ i++) {
+ if (xe_pt_entry(pt_dir, i))
+ xe_pt_destroy(xe_pt_entry(pt_dir, i),
+ xe_vma_vm(vma)->flags, deferred);
+
+ pt_dir->children[i] = NULL;
+ }
+ }
+ }
+}
+
+static const struct xe_migrate_pt_update_ops unbind_ops = {
+ .populate = xe_migrate_clear_pgtable_callback,
+ .pre_commit = xe_pt_pre_commit,
+};
+
+static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
+ .populate = xe_migrate_clear_pgtable_callback,
+ .pre_commit = xe_pt_userptr_pre_commit,
+};
+
+/**
+ * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma
+ * address range.
+ * @tile: The tile to unbind for.
+ * @vma: The vma to unbind.
+ * @q: The exec_queue with which to do pipelined page-table updates.
+ * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
+ * @num_syncs: Number of @sync entries.
+ *
+ * This function builds a the xe_vm_pgtable_update entries abstracting the
+ * operations needed to detach the page-table tree to be destroyed from the
+ * man vm tree.
+ * It then takes the relevant locks and submits the operations for
+ * pipelined detachment of the gpu page-table from the vm main tree,
+ * (which can be done either by the cpu and the GPU), Finally it frees the
+ * detached page-table tree.
+ *
+ * Return: A valid dma-fence representing the pipelined detachment operation
+ * on success, an error pointer on error.
+ */
+struct dma_fence *
+__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs)
+{
+ struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
+ struct xe_pt_migrate_pt_update unbind_pt_update = {
+ .base = {
+ .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops :
+ &unbind_ops,
+ .vma = vma,
+ .tile_id = tile->id,
+ },
+ };
+ struct xe_vm *vm = xe_vma_vm(vma);
+ u32 num_entries;
+ struct dma_fence *fence = NULL;
+ struct invalidation_fence *ifence;
+ struct xe_range_fence *rfence;
+
+ LLIST_HEAD(deferred);
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+ xe_vm_assert_held(vm);
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing unbind, with range [%llx...%llx) engine %p.\n",
+ xe_vma_start(vma), xe_vma_end(vma), q);
+
+ num_entries = xe_pt_stage_unbind(tile, vma, entries);
+ xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
+ xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
+ num_entries);
+
+ ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence)
+ return ERR_PTR(-ENOMEM);
+
+ rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
+ if (!rfence) {
+ kfree(ifence);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Even if we were already evicted and unbind to destroy, we need to
+ * clear again here. The eviction may have updated pagetables at a
+ * lower level, because it needs to be more conservative.
+ */
+ fence = xe_migrate_update_pgtables(tile->migrate,
+ vm, NULL, q ? q :
+ vm->q[tile->id],
+ entries, num_entries,
+ syncs, num_syncs,
+ &unbind_pt_update.base);
+ if (!IS_ERR(fence)) {
+ int err;
+
+ err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
+ &xe_range_fence_kfree_ops,
+ unbind_pt_update.base.start,
+ unbind_pt_update.base.last, fence);
+ if (err)
+ dma_fence_wait(fence, false);
+
+ /* TLB invalidation must be done before signaling unbind */
+ err = invalidation_fence_init(tile->primary_gt, ifence, fence, vma);
+ if (err) {
+ dma_fence_put(fence);
+ kfree(ifence);
+ return ERR_PTR(err);
+ }
+ fence = &ifence->base.base;
+
+ /* add shared fence now for pagetable delayed destroy */
+ dma_resv_add_fence(xe_vm_resv(vm), fence,
+ DMA_RESV_USAGE_BOOKKEEP);
+
+ /* This fence will be installed by caller when doing eviction */
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ DMA_RESV_USAGE_BOOKKEEP);
+ xe_pt_commit_unbind(vma, entries, num_entries,
+ unbind_pt_update.locked ? &deferred : NULL);
+ vma->tile_present &= ~BIT(tile->id);
+ } else {
+ kfree(rfence);
+ kfree(ifence);
+ }
+
+ if (!vma->tile_present)
+ list_del_init(&vma->combined_links.rebind);
+
+ if (unbind_pt_update.locked) {
+ xe_tile_assert(tile, xe_vma_is_userptr(vma));
+
+ if (!vma->tile_present) {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+ up_read(&vm->userptr.notifier_lock);
+ xe_bo_put_commit(&deferred);
+ }
+
+ return fence;
+}
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
new file mode 100644
index 000000000..71a4fbfcf
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef _XE_PT_H_
+#define _XE_PT_H_
+
+#include <linux/types.h>
+
+#include "xe_pt_types.h"
+
+struct dma_fence;
+struct xe_bo;
+struct xe_device;
+struct xe_exec_queue;
+struct xe_sync_entry;
+struct xe_tile;
+struct xe_vm;
+struct xe_vma;
+
+/* Largest huge pte is currently 1GiB. May become device dependent. */
+#define MAX_HUGEPTE_LEVEL 2
+
+#define xe_pt_write(xe, map, idx, data) \
+ xe_map_wr(xe, map, (idx) * sizeof(u64), u64, data)
+
+unsigned int xe_pt_shift(unsigned int level);
+
+struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
+ unsigned int level);
+
+void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
+ struct xe_pt *pt);
+
+void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
+
+struct dma_fence *
+__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool rebind);
+
+struct dma_fence *
+__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs);
+
+bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
new file mode 100644
index 000000000..cee70cb0f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PT_TYPES_H_
+#define _XE_PT_TYPES_H_
+
+#include <linux/types.h>
+
+#include "xe_pt_walk.h"
+
+struct xe_bo;
+struct xe_device;
+struct xe_vma;
+
+enum xe_cache_level {
+ XE_CACHE_NONE,
+ XE_CACHE_WT,
+ XE_CACHE_WB,
+ XE_CACHE_NONE_COMPRESSION, /*UC + COH_NONE + COMPRESSION */
+ __XE_CACHE_LEVEL_COUNT,
+};
+
+#define XE_VM_MAX_LEVEL 4
+
+struct xe_pt {
+ struct xe_ptw base;
+ struct xe_bo *bo;
+ unsigned int level;
+ unsigned int num_live;
+ bool rebind;
+ bool is_compact;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
+ /** addr: Virtual address start address of the PT. */
+ u64 addr;
+#endif
+};
+
+struct xe_pt_ops {
+ u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset,
+ u16 pat_index, u32 pt_level);
+ u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma,
+ u16 pat_index, u32 pt_level);
+ u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
+ u16 pat_index,
+ u32 pt_level, bool devmem, u64 flags);
+ u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
+ u16 pat_index);
+};
+
+struct xe_pt_entry {
+ struct xe_pt *pt;
+ u64 pte;
+};
+
+struct xe_vm_pgtable_update {
+ /** @bo: page table bo to write to */
+ struct xe_bo *pt_bo;
+
+ /** @ofs: offset inside this PTE to begin writing to (in qwords) */
+ u32 ofs;
+
+ /** @qwords: number of PTE's to write */
+ u32 qwords;
+
+ /** @pt: opaque pointer useful for the caller of xe_migrate_update_pgtables */
+ struct xe_pt *pt;
+
+ /** @pt_entries: Newly added pagetable entries */
+ struct xe_pt_entry *pt_entries;
+
+ /** @flags: Target flags */
+ u32 flags;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
new file mode 100644
index 000000000..b8b3d2aea
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+#include "xe_pt_walk.h"
+
+/**
+ * DOC: GPU page-table tree walking.
+ * The utilities in this file are similar to the CPU page-table walk
+ * utilities in mm/pagewalk.c. The main difference is that we distinguish
+ * the various levels of a page-table tree with an unsigned integer rather
+ * than by name. 0 is the lowest level, and page-tables with level 0 can
+ * not be directories pointing to lower levels, whereas all other levels
+ * can. The user of the utilities determines the highest level.
+ *
+ * Nomenclature:
+ * Each struct xe_ptw, regardless of level is referred to as a page table, and
+ * multiple page tables typically form a page table tree with page tables at
+ * intermediate levels being page directories pointing at page tables at lower
+ * levels. A shared page table for a given address range is a page-table which
+ * is neither fully within nor fully outside the address range and that can
+ * thus be shared by two or more address ranges.
+ *
+ * Please keep this code generic so that it can used as a drm-wide page-
+ * table walker should other drivers find use for it.
+ */
+static u64 xe_pt_addr_end(u64 addr, u64 end, unsigned int level,
+ const struct xe_pt_walk *walk)
+{
+ u64 size = 1ull << walk->shifts[level];
+ u64 tmp = round_up(addr + 1, size);
+
+ return min_t(u64, tmp, end);
+}
+
+static bool xe_pt_next(pgoff_t *offset, u64 *addr, u64 next, u64 end,
+ unsigned int level, const struct xe_pt_walk *walk)
+{
+ pgoff_t step = 1;
+
+ /* Shared pt walk skips to the last pagetable */
+ if (unlikely(walk->shared_pt_mode)) {
+ unsigned int shift = walk->shifts[level];
+ u64 skip_to = round_down(end, 1ull << shift);
+
+ if (skip_to > next) {
+ step += (skip_to - next) >> shift;
+ next = skip_to;
+ }
+ }
+
+ *addr = next;
+ *offset += step;
+
+ return next != end;
+}
+
+/**
+ * xe_pt_walk_range() - Walk a range of a gpu page table tree with callbacks
+ * for each page-table entry in all levels.
+ * @parent: The root page table for walk start.
+ * @level: The root page table level.
+ * @addr: Virtual address start.
+ * @end: Virtual address end + 1.
+ * @walk: Walk info.
+ *
+ * Similar to the CPU page-table walker, this is a helper to walk
+ * a gpu page table and call a provided callback function for each entry.
+ *
+ * Return: 0 on success, negative error code on error. The error is
+ * propagated from the callback and on error the walk is terminated.
+ */
+int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
+ u64 addr, u64 end, struct xe_pt_walk *walk)
+{
+ pgoff_t offset = xe_pt_offset(addr, level, walk);
+ struct xe_ptw **entries = parent->children ? parent->children : NULL;
+ const struct xe_pt_walk_ops *ops = walk->ops;
+ enum page_walk_action action;
+ struct xe_ptw *child;
+ int err = 0;
+ u64 next;
+
+ do {
+ next = xe_pt_addr_end(addr, end, level, walk);
+ if (walk->shared_pt_mode && xe_pt_covers(addr, next, level,
+ walk))
+ continue;
+again:
+ action = ACTION_SUBTREE;
+ child = entries ? entries[offset] : NULL;
+ err = ops->pt_entry(parent, offset, level, addr, next,
+ &child, &action, walk);
+ if (err)
+ break;
+
+ /* Probably not needed yet for gpu pagetable walk. */
+ if (unlikely(action == ACTION_AGAIN))
+ goto again;
+
+ if (likely(!level || !child || action == ACTION_CONTINUE))
+ continue;
+
+ err = xe_pt_walk_range(child, level - 1, addr, next, walk);
+
+ if (!err && ops->pt_post_descend)
+ err = ops->pt_post_descend(parent, offset, level, addr,
+ next, &child, &action, walk);
+ if (err)
+ break;
+
+ } while (xe_pt_next(&offset, &addr, next, end, level, walk));
+
+ return err;
+}
+
+/**
+ * xe_pt_walk_shared() - Walk shared page tables of a page-table tree.
+ * @parent: Root page table directory.
+ * @level: Level of the root.
+ * @addr: Start address.
+ * @end: Last address + 1.
+ * @walk: Walk info.
+ *
+ * This function is similar to xe_pt_walk_range() but it skips page tables
+ * that are private to the range. Since the root (or @parent) page table is
+ * typically also a shared page table this function is different in that it
+ * calls the pt_entry callback and the post_descend callback also for the
+ * root. The root can be detected in the callbacks by checking whether
+ * parent == *child.
+ * Walking only the shared page tables is common for unbind-type operations
+ * where the page-table entries for an address range are cleared or detached
+ * from the main page-table tree.
+ *
+ * Return: 0 on success, negative error code on error: If a callback
+ * returns an error, the walk will be terminated and the error returned by
+ * this function.
+ */
+int xe_pt_walk_shared(struct xe_ptw *parent, unsigned int level,
+ u64 addr, u64 end, struct xe_pt_walk *walk)
+{
+ const struct xe_pt_walk_ops *ops = walk->ops;
+ enum page_walk_action action = ACTION_SUBTREE;
+ struct xe_ptw *child = parent;
+ int err;
+
+ walk->shared_pt_mode = true;
+ err = walk->ops->pt_entry(parent, 0, level + 1, addr, end,
+ &child, &action, walk);
+
+ if (err || action != ACTION_SUBTREE)
+ return err;
+
+ err = xe_pt_walk_range(parent, level, addr, end, walk);
+ if (!err && ops->pt_post_descend) {
+ err = ops->pt_post_descend(parent, 0, level + 1, addr, end,
+ &child, &action, walk);
+ }
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
new file mode 100644
index 000000000..5ecc4d2f0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef __XE_PT_WALK__
+#define __XE_PT_WALK__
+
+#include <linux/pagewalk.h>
+#include <linux/types.h>
+
+/**
+ * struct xe_ptw - base class for driver pagetable subclassing.
+ * @children: Pointer to an array of children if any.
+ *
+ * Drivers could subclass this, and if it's a page-directory, typically
+ * embed an array of xe_ptw pointers.
+ */
+struct xe_ptw {
+ struct xe_ptw **children;
+};
+
+/**
+ * struct xe_pt_walk - Embeddable struct for walk parameters
+ */
+struct xe_pt_walk {
+ /** @ops: The walk ops used for the pagewalk */
+ const struct xe_pt_walk_ops *ops;
+ /**
+ * @shifts: Array of page-table entry shifts used for the
+ * different levels, starting out with the leaf level 0
+ * page-shift as the first entry. It's legal for this pointer to be
+ * changed during the walk.
+ */
+ const u64 *shifts;
+ /** @max_level: Highest populated level in @sizes */
+ unsigned int max_level;
+ /**
+ * @shared_pt_mode: Whether to skip all entries that are private
+ * to the address range and called only for entries that are
+ * shared with other address ranges. Such entries are referred to
+ * as shared pagetables.
+ */
+ bool shared_pt_mode;
+};
+
+/**
+ * typedef xe_pt_entry_fn - gpu page-table-walk callback-function
+ * @parent: The parent page.table.
+ * @offset: The offset (number of entries) into the page table.
+ * @level: The level of @parent.
+ * @addr: The virtual address.
+ * @next: The virtual address for the next call, or end address.
+ * @child: Pointer to pointer to child page-table at this @offset. The
+ * function may modify the value pointed to if, for example, allocating a
+ * child page table.
+ * @action: The walk action to take upon return. See <linux/pagewalk.h>.
+ * @walk: The walk parameters.
+ */
+typedef int (*xe_pt_entry_fn)(struct xe_ptw *parent, pgoff_t offset,
+ unsigned int level, u64 addr, u64 next,
+ struct xe_ptw **child,
+ enum page_walk_action *action,
+ struct xe_pt_walk *walk);
+
+/**
+ * struct xe_pt_walk_ops - Walk callbacks.
+ */
+struct xe_pt_walk_ops {
+ /**
+ * @pt_entry: Callback to be called for each page table entry prior
+ * to descending to the next level. The returned value of the action
+ * function parameter is honored.
+ */
+ xe_pt_entry_fn pt_entry;
+ /**
+ * @pt_post_descend: Callback to be called for each page table entry
+ * after return from descending to the next level. The returned value
+ * of the action function parameter is ignored.
+ */
+ xe_pt_entry_fn pt_post_descend;
+};
+
+int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
+ u64 addr, u64 end, struct xe_pt_walk *walk);
+
+int xe_pt_walk_shared(struct xe_ptw *parent, unsigned int level,
+ u64 addr, u64 end, struct xe_pt_walk *walk);
+
+/**
+ * xe_pt_covers - Whether the address range covers an entire entry in @level
+ * @addr: Start of the range.
+ * @end: End of range + 1.
+ * @level: Page table level.
+ * @walk: Page table walk info.
+ *
+ * This function is a helper to aid in determining whether a leaf page table
+ * entry can be inserted at this @level.
+ *
+ * Return: Whether the range provided covers exactly an entry at this level.
+ */
+static inline bool xe_pt_covers(u64 addr, u64 end, unsigned int level,
+ const struct xe_pt_walk *walk)
+{
+ u64 pt_size = 1ull << walk->shifts[level];
+
+ return end - addr == pt_size && IS_ALIGNED(addr, pt_size);
+}
+
+/**
+ * xe_pt_num_entries: Number of page-table entries of a given range at this
+ * level
+ * @addr: Start address.
+ * @end: End address.
+ * @level: Page table level.
+ * @walk: Walk info.
+ *
+ * Return: The number of page table entries at this level between @start and
+ * @end.
+ */
+static inline pgoff_t
+xe_pt_num_entries(u64 addr, u64 end, unsigned int level,
+ const struct xe_pt_walk *walk)
+{
+ u64 pt_size = 1ull << walk->shifts[level];
+
+ return (round_up(end, pt_size) - round_down(addr, pt_size)) >>
+ walk->shifts[level];
+}
+
+/**
+ * xe_pt_offset: Offset of the page-table entry for a given address.
+ * @addr: The address.
+ * @level: Page table level.
+ * @walk: Walk info.
+ *
+ * Return: The page table entry offset for the given address in a
+ * page table with size indicated by @level.
+ */
+static inline pgoff_t
+xe_pt_offset(u64 addr, unsigned int level, const struct xe_pt_walk *walk)
+{
+ if (level < walk->max_level)
+ addr &= ((1ull << walk->shifts[level + 1]) - 1);
+
+ return addr >> walk->shifts[level];
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
new file mode 100644
index 000000000..1a6f2f51b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_query.h"
+
+#include <linux/nospec.h>
+#include <linux/sched/clock.h>
+
+#include <drm/ttm/ttm_placement.h>
+#include <drm/xe_drm.h>
+
+#include "regs/xe_engine_regs.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_guc_hwconfig.h"
+#include "xe_macros.h"
+#include "xe_mmio.h"
+#include "xe_ttm_vram_mgr.h"
+
+static const u16 xe_to_user_engine_class[] = {
+ [XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
+ [XE_ENGINE_CLASS_COPY] = DRM_XE_ENGINE_CLASS_COPY,
+ [XE_ENGINE_CLASS_VIDEO_DECODE] = DRM_XE_ENGINE_CLASS_VIDEO_DECODE,
+ [XE_ENGINE_CLASS_VIDEO_ENHANCE] = DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ [XE_ENGINE_CLASS_COMPUTE] = DRM_XE_ENGINE_CLASS_COMPUTE,
+};
+
+static const enum xe_engine_class user_to_xe_engine_class[] = {
+ [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
+ [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
+ [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
+ [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
+ [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
+};
+
+static size_t calc_hw_engine_info_size(struct xe_device *xe)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_gt *gt;
+ u8 gt_id;
+ int i = 0;
+
+ for_each_gt(gt, xe, gt_id)
+ for_each_hw_engine(hwe, gt, id) {
+ if (xe_hw_engine_is_reserved(hwe))
+ continue;
+ i++;
+ }
+
+ return sizeof(struct drm_xe_query_engines) +
+ i * sizeof(struct drm_xe_engine);
+}
+
+typedef u64 (*__ktime_func_t)(void);
+static __ktime_func_t __clock_id_to_func(clockid_t clk_id)
+{
+ /*
+ * Use logic same as the perf subsystem to allow user to select the
+ * reference clock id to be used for timestamps.
+ */
+ switch (clk_id) {
+ case CLOCK_MONOTONIC:
+ return &ktime_get_ns;
+ case CLOCK_MONOTONIC_RAW:
+ return &ktime_get_raw_ns;
+ case CLOCK_REALTIME:
+ return &ktime_get_real_ns;
+ case CLOCK_BOOTTIME:
+ return &ktime_get_boottime_ns;
+ case CLOCK_TAI:
+ return &ktime_get_clocktai_ns;
+ default:
+ return NULL;
+ }
+}
+
+static void
+__read_timestamps(struct xe_gt *gt,
+ struct xe_reg lower_reg,
+ struct xe_reg upper_reg,
+ u64 *engine_ts,
+ u64 *cpu_ts,
+ u64 *cpu_delta,
+ __ktime_func_t cpu_clock)
+{
+ u32 upper, lower, old_upper, loop = 0;
+
+ upper = xe_mmio_read32(gt, upper_reg);
+ do {
+ *cpu_delta = local_clock();
+ *cpu_ts = cpu_clock();
+ lower = xe_mmio_read32(gt, lower_reg);
+ *cpu_delta = local_clock() - *cpu_delta;
+ old_upper = upper;
+ upper = xe_mmio_read32(gt, upper_reg);
+ } while (upper != old_upper && loop++ < 2);
+
+ *engine_ts = (u64)upper << 32 | lower;
+}
+
+static int
+query_engine_cycles(struct xe_device *xe,
+ struct drm_xe_device_query *query)
+{
+ struct drm_xe_query_engine_cycles __user *query_ptr;
+ struct drm_xe_engine_class_instance *eci;
+ struct drm_xe_query_engine_cycles resp;
+ size_t size = sizeof(resp);
+ __ktime_func_t cpu_clock;
+ struct xe_hw_engine *hwe;
+ struct xe_gt *gt;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ query_ptr = u64_to_user_ptr(query->data);
+ if (copy_from_user(&resp, query_ptr, size))
+ return -EFAULT;
+
+ cpu_clock = __clock_id_to_func(resp.clockid);
+ if (!cpu_clock)
+ return -EINVAL;
+
+ eci = &resp.eci;
+ if (eci->gt_id >= XE_MAX_GT_PER_TILE)
+ return -EINVAL;
+
+ gt = xe_device_get_gt(xe, eci->gt_id);
+ if (!gt)
+ return -EINVAL;
+
+ if (eci->engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
+ return -EINVAL;
+
+ hwe = xe_gt_hw_engine(gt, user_to_xe_engine_class[eci->engine_class],
+ eci->engine_instance, true);
+ if (!hwe)
+ return -EINVAL;
+
+ xe_device_mem_access_get(xe);
+ xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+
+ __read_timestamps(gt,
+ RING_TIMESTAMP(hwe->mmio_base),
+ RING_TIMESTAMP_UDW(hwe->mmio_base),
+ &resp.engine_cycles,
+ &resp.cpu_timestamp,
+ &resp.cpu_delta,
+ cpu_clock);
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_device_mem_access_put(xe);
+ resp.width = 36;
+
+ /* Only write to the output fields of user query */
+ if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp))
+ return -EFAULT;
+
+ if (put_user(resp.cpu_delta, &query_ptr->cpu_delta))
+ return -EFAULT;
+
+ if (put_user(resp.engine_cycles, &query_ptr->engine_cycles))
+ return -EFAULT;
+
+ if (put_user(resp.width, &query_ptr->width))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int query_engines(struct xe_device *xe,
+ struct drm_xe_device_query *query)
+{
+ size_t size = calc_hw_engine_info_size(xe);
+ struct drm_xe_query_engines __user *query_ptr =
+ u64_to_user_ptr(query->data);
+ struct drm_xe_query_engines *engines;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_gt *gt;
+ u8 gt_id;
+ int i = 0;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ engines = kmalloc(size, GFP_KERNEL);
+ if (!engines)
+ return -ENOMEM;
+
+ for_each_gt(gt, xe, gt_id)
+ for_each_hw_engine(hwe, gt, id) {
+ if (xe_hw_engine_is_reserved(hwe))
+ continue;
+
+ engines->engines[i].instance.engine_class =
+ xe_to_user_engine_class[hwe->class];
+ engines->engines[i].instance.engine_instance =
+ hwe->logical_instance;
+ engines->engines[i].instance.gt_id = gt->info.id;
+ engines->engines[i].instance.pad = 0;
+ memset(engines->engines[i].reserved, 0,
+ sizeof(engines->engines[i].reserved));
+
+ i++;
+ }
+
+ engines->pad = 0;
+ engines->num_engines = i;
+
+ if (copy_to_user(query_ptr, engines, size)) {
+ kfree(engines);
+ return -EFAULT;
+ }
+ kfree(engines);
+
+ return 0;
+}
+
+static size_t calc_mem_regions_size(struct xe_device *xe)
+{
+ u32 num_managers = 1;
+ int i;
+
+ for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i)
+ if (ttm_manager_type(&xe->ttm, i))
+ num_managers++;
+
+ return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]);
+}
+
+static int query_mem_regions(struct xe_device *xe,
+ struct drm_xe_device_query *query)
+{
+ size_t size = calc_mem_regions_size(xe);
+ struct drm_xe_query_mem_regions *mem_regions;
+ struct drm_xe_query_mem_regions __user *query_ptr =
+ u64_to_user_ptr(query->data);
+ struct ttm_resource_manager *man;
+ int ret, i;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ mem_regions = kzalloc(size, GFP_KERNEL);
+ if (XE_IOCTL_DBG(xe, !mem_regions))
+ return -ENOMEM;
+
+ man = ttm_manager_type(&xe->ttm, XE_PL_TT);
+ mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM;
+ /*
+ * The instance needs to be a unique number that represents the index
+ * in the placement mask used at xe_gem_create_ioctl() for the
+ * xe_bo_create() placement.
+ */
+ mem_regions->mem_regions[0].instance = 0;
+ mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
+ mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
+ if (perfmon_capable())
+ mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
+ mem_regions->num_mem_regions = 1;
+
+ for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
+ man = ttm_manager_type(&xe->ttm, i);
+ if (man) {
+ mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class =
+ DRM_XE_MEM_REGION_CLASS_VRAM;
+ mem_regions->mem_regions[mem_regions->num_mem_regions].instance =
+ mem_regions->num_mem_regions;
+ mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size =
+ xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ?
+ SZ_64K : PAGE_SIZE;
+ mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
+ man->size;
+
+ if (perfmon_capable()) {
+ xe_ttm_vram_get_used(man,
+ &mem_regions->mem_regions
+ [mem_regions->num_mem_regions].used,
+ &mem_regions->mem_regions
+ [mem_regions->num_mem_regions].cpu_visible_used);
+ }
+
+ mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
+ xe_ttm_vram_get_cpu_visible_size(man);
+ mem_regions->num_mem_regions++;
+ }
+ }
+
+ if (!copy_to_user(query_ptr, mem_regions, size))
+ ret = 0;
+ else
+ ret = -ENOSPC;
+
+ kfree(mem_regions);
+ return ret;
+}
+
+static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
+{
+ const u32 num_params = DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1;
+ size_t size =
+ sizeof(struct drm_xe_query_config) + num_params * sizeof(u64);
+ struct drm_xe_query_config __user *query_ptr =
+ u64_to_user_ptr(query->data);
+ struct drm_xe_query_config *config;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ config = kzalloc(size, GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ config->num_params = num_params;
+ config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
+ xe->info.devid | (xe->info.revid << 16);
+ if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
+ DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
+ config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
+ xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
+ config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
+ config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY] =
+ xe_exec_queue_device_get_max_priority(xe);
+
+ if (copy_to_user(query_ptr, config, size)) {
+ kfree(config);
+ return -EFAULT;
+ }
+ kfree(config);
+
+ return 0;
+}
+
+static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query)
+{
+ struct xe_gt *gt;
+ size_t size = sizeof(struct drm_xe_query_gt_list) +
+ xe->info.gt_count * sizeof(struct drm_xe_gt);
+ struct drm_xe_query_gt_list __user *query_ptr =
+ u64_to_user_ptr(query->data);
+ struct drm_xe_query_gt_list *gt_list;
+ u8 id;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ gt_list = kzalloc(size, GFP_KERNEL);
+ if (!gt_list)
+ return -ENOMEM;
+
+ gt_list->num_gt = xe->info.gt_count;
+
+ for_each_gt(gt, xe, id) {
+ if (xe_gt_is_media_type(gt))
+ gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
+ else
+ gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN;
+ gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id;
+ gt_list->gt_list[id].gt_id = gt->info.id;
+ gt_list->gt_list[id].reference_clock = gt->info.reference_clock;
+ /*
+ * The mem_regions indexes in the mask below need to
+ * directly identify the struct
+ * drm_xe_query_mem_regions' instance constructed at
+ * query_mem_regions()
+ *
+ * For our current platforms:
+ * Bit 0 -> System Memory
+ * Bit 1 -> VRAM0 on Tile0
+ * Bit 2 -> VRAM1 on Tile1
+ * However the uAPI is generic and it's userspace's
+ * responsibility to check the mem_class, without any
+ * assumption.
+ */
+ if (!IS_DGFX(xe))
+ gt_list->gt_list[id].near_mem_regions = 0x1;
+ else
+ gt_list->gt_list[id].near_mem_regions =
+ BIT(gt_to_tile(gt)->id) << 1;
+ gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^
+ gt_list->gt_list[id].near_mem_regions;
+ }
+
+ if (copy_to_user(query_ptr, gt_list, size)) {
+ kfree(gt_list);
+ return -EFAULT;
+ }
+ kfree(gt_list);
+
+ return 0;
+}
+
+static int query_hwconfig(struct xe_device *xe,
+ struct drm_xe_device_query *query)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ size_t size = xe_guc_hwconfig_size(&gt->uc.guc);
+ void __user *query_ptr = u64_to_user_ptr(query->data);
+ void *hwconfig;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ hwconfig = kzalloc(size, GFP_KERNEL);
+ if (!hwconfig)
+ return -ENOMEM;
+
+ xe_device_mem_access_get(xe);
+ xe_guc_hwconfig_copy(&gt->uc.guc, hwconfig);
+ xe_device_mem_access_put(xe);
+
+ if (copy_to_user(query_ptr, hwconfig, size)) {
+ kfree(hwconfig);
+ return -EFAULT;
+ }
+ kfree(hwconfig);
+
+ return 0;
+}
+
+static size_t calc_topo_query_size(struct xe_device *xe)
+{
+ return xe->info.gt_count *
+ (3 * sizeof(struct drm_xe_query_topology_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
+}
+
+static int copy_mask(void __user **ptr,
+ struct drm_xe_query_topology_mask *topo,
+ void *mask, size_t mask_size)
+{
+ topo->num_bytes = mask_size;
+
+ if (copy_to_user(*ptr, topo, sizeof(*topo)))
+ return -EFAULT;
+ *ptr += sizeof(topo);
+
+ if (copy_to_user(*ptr, mask, mask_size))
+ return -EFAULT;
+ *ptr += mask_size;
+
+ return 0;
+}
+
+static int query_gt_topology(struct xe_device *xe,
+ struct drm_xe_device_query *query)
+{
+ void __user *query_ptr = u64_to_user_ptr(query->data);
+ size_t size = calc_topo_query_size(xe);
+ struct drm_xe_query_topology_mask topo;
+ struct xe_gt *gt;
+ int id;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ for_each_gt(gt, xe, id) {
+ int err;
+
+ topo.gt_id = id;
+
+ topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
+ sizeof(gt->fuse_topo.g_dss_mask));
+ if (err)
+ return err;
+
+ topo.type = DRM_XE_TOPO_DSS_COMPUTE;
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
+ sizeof(gt->fuse_topo.c_dss_mask));
+ if (err)
+ return err;
+
+ topo.type = DRM_XE_TOPO_EU_PER_DSS;
+ err = copy_mask(&query_ptr, &topo,
+ gt->fuse_topo.eu_mask_per_dss,
+ sizeof(gt->fuse_topo.eu_mask_per_dss));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int (* const xe_query_funcs[])(struct xe_device *xe,
+ struct drm_xe_device_query *query) = {
+ query_engines,
+ query_mem_regions,
+ query_config,
+ query_gt_list,
+ query_hwconfig,
+ query_gt_topology,
+ query_engine_cycles,
+};
+
+int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct drm_xe_device_query *query = data;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(xe, query->extensions) ||
+ XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1]))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, query->query >= ARRAY_SIZE(xe_query_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
+ if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx]))
+ return -EINVAL;
+
+ return xe_query_funcs[idx](xe, query);
+}
diff --git a/drivers/gpu/drm/xe/xe_query.h b/drivers/gpu/drm/xe/xe_query.h
new file mode 100644
index 000000000..beeb7a819
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_query.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_QUERY_H_
+#define _XE_QUERY_H_
+
+struct drm_device;
+struct drm_file;
+
+int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_range_fence.c b/drivers/gpu/drm/xe/xe_range_fence.c
new file mode 100644
index 000000000..372378e89
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_range_fence.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/slab.h>
+
+#include "xe_macros.h"
+#include "xe_range_fence.h"
+
+#define XE_RANGE_TREE_START(_node) ((_node)->start)
+#define XE_RANGE_TREE_LAST(_node) ((_node)->last)
+
+INTERVAL_TREE_DEFINE(struct xe_range_fence, rb, u64, __subtree_last,
+ XE_RANGE_TREE_START, XE_RANGE_TREE_LAST, static,
+ xe_range_fence_tree);
+
+static void
+xe_range_fence_signal_notify(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct xe_range_fence *rfence = container_of(cb, typeof(*rfence), cb);
+ struct xe_range_fence_tree *tree = rfence->tree;
+
+ llist_add(&rfence->link, &tree->list);
+}
+
+static bool __xe_range_fence_tree_cleanup(struct xe_range_fence_tree *tree)
+{
+ struct llist_node *node = llist_del_all(&tree->list);
+ struct xe_range_fence *rfence, *next;
+
+ llist_for_each_entry_safe(rfence, next, node, link) {
+ xe_range_fence_tree_remove(rfence, &tree->root);
+ dma_fence_put(rfence->fence);
+ kfree(rfence);
+ }
+
+ return !!node;
+}
+
+/**
+ * xe_range_fence_insert() - range fence insert
+ * @tree: range fence tree to insert intoi
+ * @rfence: range fence
+ * @ops: range fence ops
+ * @start: start address of range fence
+ * @last: last address of range fence
+ * @fence: dma fence which signals range fence can be removed + freed
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int xe_range_fence_insert(struct xe_range_fence_tree *tree,
+ struct xe_range_fence *rfence,
+ const struct xe_range_fence_ops *ops,
+ u64 start, u64 last, struct dma_fence *fence)
+{
+ int err = 0;
+
+ __xe_range_fence_tree_cleanup(tree);
+
+ if (dma_fence_is_signaled(fence))
+ goto free;
+
+ rfence->ops = ops;
+ rfence->start = start;
+ rfence->last = last;
+ rfence->tree = tree;
+ rfence->fence = dma_fence_get(fence);
+ err = dma_fence_add_callback(fence, &rfence->cb,
+ xe_range_fence_signal_notify);
+ if (err == -ENOENT) {
+ dma_fence_put(fence);
+ err = 0;
+ goto free;
+ } else if (err == 0) {
+ xe_range_fence_tree_insert(rfence, &tree->root);
+ return 0;
+ }
+
+free:
+ if (ops->free)
+ ops->free(rfence);
+
+ return err;
+}
+
+static void xe_range_fence_tree_remove_all(struct xe_range_fence_tree *tree)
+{
+ struct xe_range_fence *rfence;
+ bool retry = true;
+
+ rfence = xe_range_fence_tree_iter_first(&tree->root, 0, U64_MAX);
+ while (rfence) {
+ /* Should be ok with the minimalistic callback */
+ if (dma_fence_remove_callback(rfence->fence, &rfence->cb))
+ llist_add(&rfence->link, &tree->list);
+ rfence = xe_range_fence_tree_iter_next(rfence, 0, U64_MAX);
+ }
+
+ while (retry)
+ retry = __xe_range_fence_tree_cleanup(tree);
+}
+
+/**
+ * xe_range_fence_tree_init() - Init range fence tree
+ * @tree: range fence tree
+ */
+void xe_range_fence_tree_init(struct xe_range_fence_tree *tree)
+{
+ memset(tree, 0, sizeof(*tree));
+}
+
+/**
+ * xe_range_fence_tree_fini() - Fini range fence tree
+ * @tree: range fence tree
+ */
+void xe_range_fence_tree_fini(struct xe_range_fence_tree *tree)
+{
+ xe_range_fence_tree_remove_all(tree);
+ XE_WARN_ON(!RB_EMPTY_ROOT(&tree->root.rb_root));
+}
+
+/**
+ * xe_range_fence_tree_first() - range fence tree iterator first
+ * @tree: range fence tree
+ * @start: start address of range fence
+ * @last: last address of range fence
+ *
+ * Return: first range fence found in range or NULL
+ */
+struct xe_range_fence *
+xe_range_fence_tree_first(struct xe_range_fence_tree *tree, u64 start,
+ u64 last)
+{
+ return xe_range_fence_tree_iter_first(&tree->root, start, last);
+}
+
+/**
+ * xe_range_fence_tree_next() - range fence tree iterator next
+ * @rfence: current range fence
+ * @start: start address of range fence
+ * @last: last address of range fence
+ *
+ * Return: next range fence found in range or NULL
+ */
+struct xe_range_fence *
+xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last)
+{
+ return xe_range_fence_tree_iter_next(rfence, start, last);
+}
+
+static void xe_range_fence_free(struct xe_range_fence *rfence)
+{
+ kfree(rfence);
+}
+
+const struct xe_range_fence_ops xe_range_fence_kfree_ops = {
+ .free = xe_range_fence_free,
+};
diff --git a/drivers/gpu/drm/xe/xe_range_fence.h b/drivers/gpu/drm/xe/xe_range_fence.h
new file mode 100644
index 000000000..edd58b34f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_range_fence.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_RANGE_FENCE_H_
+#define _XE_RANGE_FENCE_H_
+
+#include <linux/dma-fence.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+struct xe_range_fence_tree;
+struct xe_range_fence;
+
+/** struct xe_range_fence_ops - XE range fence ops */
+struct xe_range_fence_ops {
+ /** @free: free range fence op */
+ void (*free)(struct xe_range_fence *rfence);
+};
+
+/** struct xe_range_fence - XE range fence (address conflict tracking) */
+struct xe_range_fence {
+ /** @rb: RB tree node inserted into interval tree */
+ struct rb_node rb;
+ /** @start: start address of range fence is interval tree */
+ u64 start;
+ /** @last: last address (inclusive) of range fence is interval tree */
+ u64 last;
+ /** @__subtree_last: interval tree internal usage */
+ u64 __subtree_last;
+ /**
+ * @fence: fence signals address in range fence no longer has conflict
+ */
+ struct dma_fence *fence;
+ /** @tree: interval tree which range fence belongs to */
+ struct xe_range_fence_tree *tree;
+ /**
+ * @cb: callback when fence signals to remove range fence free from interval tree
+ */
+ struct dma_fence_cb cb;
+ /** @link: used to defer free of range fence to non-irq context */
+ struct llist_node link;
+ /** @ops: range fence ops */
+ const struct xe_range_fence_ops *ops;
+};
+
+/** struct xe_range_fence_tree - interval tree to store range fences */
+struct xe_range_fence_tree {
+ /** @root: interval tree root */
+ struct rb_root_cached root;
+ /** @list: list of pending range fences to be freed */
+ struct llist_head list;
+};
+
+extern const struct xe_range_fence_ops xe_range_fence_kfree_ops;
+
+struct xe_range_fence *
+xe_range_fence_tree_first(struct xe_range_fence_tree *tree, u64 start,
+ u64 last);
+
+struct xe_range_fence *
+xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last);
+
+void xe_range_fence_tree_init(struct xe_range_fence_tree *tree);
+
+void xe_range_fence_tree_fini(struct xe_range_fence_tree *tree);
+
+int xe_range_fence_insert(struct xe_range_fence_tree *tree,
+ struct xe_range_fence *rfence,
+ const struct xe_range_fence_ops *ops,
+ u64 start, u64 end,
+ struct dma_fence *fence);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
new file mode 100644
index 000000000..87adefb56
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_reg_sr.h"
+
+#include <kunit/visibility.h>
+#include <linux/align.h>
+#include <linux/string_helpers.h>
+#include <linux/xarray.h>
+
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
+#include "xe_device_types.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_gt_mcr.h"
+#include "xe_gt_printk.h"
+#include "xe_hw_engine_types.h"
+#include "xe_macros.h"
+#include "xe_mmio.h"
+#include "xe_reg_whitelist.h"
+#include "xe_rtp_types.h"
+
+#define XE_REG_SR_GROW_STEP_DEFAULT 16
+
+static void reg_sr_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_reg_sr *sr = arg;
+
+ xa_destroy(&sr->xa);
+ kfree(sr->pool.arr);
+ memset(&sr->pool, 0, sizeof(sr->pool));
+}
+
+int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
+{
+ xa_init(&sr->xa);
+ memset(&sr->pool, 0, sizeof(sr->pool));
+ sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT;
+ sr->name = name;
+
+ return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
+
+static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr)
+{
+ if (sr->pool.used == sr->pool.allocated) {
+ struct xe_reg_sr_entry *arr;
+
+ arr = krealloc_array(sr->pool.arr,
+ ALIGN(sr->pool.allocated + 1, sr->pool.grow_step),
+ sizeof(*arr), GFP_KERNEL);
+ if (!arr)
+ return NULL;
+
+ sr->pool.arr = arr;
+ sr->pool.allocated += sr->pool.grow_step;
+ }
+
+ return &sr->pool.arr[sr->pool.used++];
+}
+
+static bool compatible_entries(const struct xe_reg_sr_entry *e1,
+ const struct xe_reg_sr_entry *e2)
+{
+ /*
+ * Don't allow overwriting values: clr_bits/set_bits should be disjoint
+ * when operating in the same register
+ */
+ if (e1->clr_bits & e2->clr_bits || e1->set_bits & e2->set_bits ||
+ e1->clr_bits & e2->set_bits || e1->set_bits & e2->clr_bits)
+ return false;
+
+ if (e1->reg.raw != e2->reg.raw)
+ return false;
+
+ return true;
+}
+
+static void reg_sr_inc_error(struct xe_reg_sr *sr)
+{
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ sr->errors++;
+#endif
+}
+
+int xe_reg_sr_add(struct xe_reg_sr *sr,
+ const struct xe_reg_sr_entry *e,
+ struct xe_gt *gt)
+{
+ unsigned long idx = e->reg.addr;
+ struct xe_reg_sr_entry *pentry = xa_load(&sr->xa, idx);
+ int ret;
+
+ if (pentry) {
+ if (!compatible_entries(pentry, e)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ pentry->clr_bits |= e->clr_bits;
+ pentry->set_bits |= e->set_bits;
+ pentry->read_mask |= e->read_mask;
+
+ return 0;
+ }
+
+ pentry = alloc_entry(sr);
+ if (!pentry) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ *pentry = *e;
+ ret = xa_err(xa_store(&sr->xa, idx, pentry, GFP_KERNEL));
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ xe_gt_err(gt,
+ "discarding save-restore reg %04lx (clear: %08x, set: %08x, masked: %s, mcr: %s): ret=%d\n",
+ idx, e->clr_bits, e->set_bits,
+ str_yes_no(e->reg.masked),
+ str_yes_no(e->reg.mcr),
+ ret);
+ reg_sr_inc_error(sr);
+
+ return ret;
+}
+
+/*
+ * Convert back from encoded value to type-safe, only to be used when reg.mcr
+ * is true
+ */
+static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
+{
+ return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
+}
+
+static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
+{
+ struct xe_reg reg = entry->reg;
+ struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
+ u32 val;
+
+ /*
+ * If this is a masked register, need to set the upper 16 bits.
+ * Set them to clr_bits since that is always a superset of the bits
+ * being modified.
+ *
+ * When it's not masked, we have to read it from hardware, unless we are
+ * supposed to set all bits.
+ */
+ if (reg.masked)
+ val = entry->clr_bits << 16;
+ else if (entry->clr_bits + 1)
+ val = (reg.mcr ?
+ xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
+ xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
+ else
+ val = 0;
+
+ /*
+ * TODO: add selftest to validate all tables, regardless of platform:
+ * - Masked registers can't have set_bits with upper bits set
+ * - set_bits must be contained in clr_bits
+ */
+ val |= entry->set_bits;
+
+ xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
+
+ if (entry->reg.mcr)
+ xe_gt_mcr_multicast_write(gt, reg_mcr, val);
+ else
+ xe_mmio_write32(gt, reg, val);
+}
+
+void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
+{
+ struct xe_reg_sr_entry *entry;
+ unsigned long reg;
+ int err;
+
+ if (xa_empty(&sr->xa))
+ return;
+
+ xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
+
+ err = xe_force_wake_get(&gt->mmio.fw, XE_FORCEWAKE_ALL);
+ if (err)
+ goto err_force_wake;
+
+ xa_for_each(&sr->xa, reg, entry)
+ apply_one_mmio(gt, entry);
+
+ err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
+ XE_WARN_ON(err);
+
+ return;
+
+err_force_wake:
+ xe_gt_err(gt, "Failed to apply, err=%d\n", err);
+}
+
+void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe)
+{
+ struct xe_reg_sr *sr = &hwe->reg_whitelist;
+ struct xe_gt *gt = hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_reg_sr_entry *entry;
+ struct drm_printer p;
+ u32 mmio_base = hwe->mmio_base;
+ unsigned long reg;
+ unsigned int slot = 0;
+ int err;
+
+ if (xa_empty(&sr->xa))
+ return;
+
+ drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name);
+
+ err = xe_force_wake_get(&gt->mmio.fw, XE_FORCEWAKE_ALL);
+ if (err)
+ goto err_force_wake;
+
+ p = drm_debug_printer(KBUILD_MODNAME);
+ xa_for_each(&sr->xa, reg, entry) {
+ if (slot == RING_MAX_NONPRIV_SLOTS) {
+ xe_gt_err(gt,
+ "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
+ hwe->name, RING_MAX_NONPRIV_SLOTS);
+ break;
+ }
+
+ xe_reg_whitelist_print_entry(&p, 0, reg, entry);
+ xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot),
+ reg | entry->set_bits);
+ slot++;
+ }
+
+ /* And clear the rest just in case of garbage */
+ for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) {
+ u32 addr = RING_NOPID(mmio_base).addr;
+
+ xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr);
+ }
+
+ err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
+ XE_WARN_ON(err);
+
+ return;
+
+err_force_wake:
+ drm_err(&xe->drm, "Failed to apply, err=%d\n", err);
+}
+
+/**
+ * xe_reg_sr_dump - print all save/restore entries
+ * @sr: Save/restore entries
+ * @p: DRM printer
+ */
+void xe_reg_sr_dump(struct xe_reg_sr *sr, struct drm_printer *p)
+{
+ struct xe_reg_sr_entry *entry;
+ unsigned long reg;
+
+ if (!sr->name || xa_empty(&sr->xa))
+ return;
+
+ drm_printf(p, "%s\n", sr->name);
+ xa_for_each(&sr->xa, reg, entry)
+ drm_printf(p, "\tREG[0x%lx] clr=0x%08x set=0x%08x masked=%s mcr=%s\n",
+ reg, entry->clr_bits, entry->set_bits,
+ str_yes_no(entry->reg.masked),
+ str_yes_no(entry->reg.mcr));
+}
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.h b/drivers/gpu/drm/xe/xe_reg_sr.h
new file mode 100644
index 000000000..e3197c33a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_reg_sr.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_REG_SR_
+#define _XE_REG_SR_
+
+#include "xe_reg_sr_types.h"
+
+/*
+ * Reg save/restore bookkeeping
+ */
+
+struct xe_device;
+struct xe_gt;
+struct xe_hw_engine;
+struct drm_printer;
+
+int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe);
+void xe_reg_sr_dump(struct xe_reg_sr *sr, struct drm_printer *p);
+
+int xe_reg_sr_add(struct xe_reg_sr *sr, const struct xe_reg_sr_entry *e,
+ struct xe_gt *gt);
+void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt);
+void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_reg_sr_types.h b/drivers/gpu/drm/xe/xe_reg_sr_types.h
new file mode 100644
index 000000000..ad48a52b8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_reg_sr_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_REG_SR_TYPES_
+#define _XE_REG_SR_TYPES_
+
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "regs/xe_reg_defs.h"
+
+struct xe_reg_sr_entry {
+ struct xe_reg reg;
+ u32 clr_bits;
+ u32 set_bits;
+ /* Mask for bits to consider when reading value back */
+ u32 read_mask;
+};
+
+struct xe_reg_sr {
+ struct {
+ struct xe_reg_sr_entry *arr;
+ unsigned int used;
+ unsigned int allocated;
+ unsigned int grow_step;
+ } pool;
+ struct xarray xa;
+ const char *name;
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ unsigned int errors;
+#endif
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
new file mode 100644
index 000000000..e66ae1bda
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_reg_whitelist.h"
+
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
+#include "xe_gt_types.h"
+#include "xe_platform_types.h"
+#include "xe_rtp.h"
+
+#undef XE_REG_MCR
+#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
+
+static bool match_not_render(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return hwe->class != XE_ENGINE_CLASS_RENDER;
+}
+
+static const struct xe_rtp_entry_sr register_whitelist[] = {
+ { XE_RTP_NAME("WaAllowPMDepthAndInvocationCountAccessFromUMD, 1408556865"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(WHITELIST(PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4))
+ },
+ { XE_RTP_NAME("1508744258, 14012131227, 1808121037"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(WHITELIST(COMMON_SLICE_CHICKEN1, 0))
+ },
+ { XE_RTP_NAME("1806527549"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(WHITELIST(HIZ_CHICKEN, 0))
+ },
+ { XE_RTP_NAME("allow_read_ctx_timestamp"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1260), FUNC(match_not_render)),
+ XE_RTP_ACTIONS(WHITELIST(RING_CTX_TIMESTAMP(0),
+ RING_FORCE_TO_NONPRIV_ACCESS_RD,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+ { XE_RTP_NAME("16014440446"),
+ XE_RTP_RULES(PLATFORM(PVC)),
+ XE_RTP_ACTIONS(WHITELIST(XE_REG(0x4400),
+ RING_FORCE_TO_NONPRIV_DENY |
+ RING_FORCE_TO_NONPRIV_RANGE_64),
+ WHITELIST(XE_REG(0x4500),
+ RING_FORCE_TO_NONPRIV_DENY |
+ RING_FORCE_TO_NONPRIV_RANGE_64))
+ },
+ { XE_RTP_NAME("16017236439"),
+ XE_RTP_RULES(PLATFORM(PVC), ENGINE_CLASS(COPY)),
+ XE_RTP_ACTIONS(WHITELIST(BCS_SWCTRL(0),
+ RING_FORCE_TO_NONPRIV_DENY,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+ {}
+};
+
+/**
+ * xe_reg_whitelist_process_engine - process table of registers to whitelist
+ * @hwe: engine instance to process whitelist for
+ *
+ * Process wwhitelist table for this platform, saving in @hwe all the
+ * registers that need to be whitelisted by the hardware so they can be accessed
+ * by userspace.
+ */
+void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+}
+
+/**
+ * xe_reg_whitelist_print_entry - print one whitelist entry
+ * @p: DRM printer
+ * @indent: indent level
+ * @reg: register allowed/denied
+ * @entry: save-restore entry
+ *
+ * Print details about the entry added to allow/deny access
+ */
+void xe_reg_whitelist_print_entry(struct drm_printer *p, unsigned int indent,
+ u32 reg, struct xe_reg_sr_entry *entry)
+{
+ u32 val = entry->set_bits;
+ const char *access_str = "(invalid)";
+ unsigned int range_bit = 2;
+ u32 range_start, range_end;
+ bool deny;
+
+ deny = val & RING_FORCE_TO_NONPRIV_DENY;
+
+ switch (val & RING_FORCE_TO_NONPRIV_RANGE_MASK) {
+ case RING_FORCE_TO_NONPRIV_RANGE_4:
+ range_bit = 4;
+ break;
+ case RING_FORCE_TO_NONPRIV_RANGE_16:
+ range_bit = 6;
+ break;
+ case RING_FORCE_TO_NONPRIV_RANGE_64:
+ range_bit = 8;
+ break;
+ }
+
+ range_start = reg & REG_GENMASK(25, range_bit);
+ range_end = range_start | REG_GENMASK(range_bit, 0);
+
+ switch (val & RING_FORCE_TO_NONPRIV_ACCESS_MASK) {
+ case RING_FORCE_TO_NONPRIV_ACCESS_RW:
+ access_str = "rw";
+ break;
+ case RING_FORCE_TO_NONPRIV_ACCESS_RD:
+ access_str = "read";
+ break;
+ case RING_FORCE_TO_NONPRIV_ACCESS_WR:
+ access_str = "write";
+ break;
+ }
+
+ drm_printf_indent(p, indent, "REG[0x%x-0x%x]: %s %s access\n",
+ range_start, range_end,
+ deny ? "deny" : "allow",
+ access_str);
+}
+
+/**
+ * xe_reg_whitelist_dump - print all whitelist entries
+ * @sr: Save/restore entries
+ * @p: DRM printer
+ */
+void xe_reg_whitelist_dump(struct xe_reg_sr *sr, struct drm_printer *p)
+{
+ struct xe_reg_sr_entry *entry;
+ unsigned long reg;
+
+ if (!sr->name || xa_empty(&sr->xa))
+ return;
+
+ drm_printf(p, "%s\n", sr->name);
+ xa_for_each(&sr->xa, reg, entry)
+ xe_reg_whitelist_print_entry(p, 1, reg, entry);
+}
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.h b/drivers/gpu/drm/xe/xe_reg_whitelist.h
new file mode 100644
index 000000000..69b121d37
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_REG_WHITELIST_
+#define _XE_REG_WHITELIST_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct xe_hw_engine;
+struct xe_reg_sr;
+struct xe_reg_sr_entry;
+
+void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe);
+
+void xe_reg_whitelist_print_entry(struct drm_printer *p, unsigned int indent,
+ u32 reg, struct xe_reg_sr_entry *entry);
+
+void xe_reg_whitelist_dump(struct xe_reg_sr *sr, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
new file mode 100644
index 000000000..0a306963a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _XE_RES_CURSOR_H_
+#define _XE_RES_CURSOR_H_
+
+#include <linux/scatterlist.h>
+
+#include <drm/drm_mm.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_macros.h"
+#include "xe_ttm_vram_mgr.h"
+
+/* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
+struct xe_res_cursor {
+ u64 start;
+ u64 size;
+ u64 remaining;
+ void *node;
+ u32 mem_type;
+ struct scatterlist *sgl;
+ struct drm_buddy *mm;
+};
+
+static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
+{
+ struct ttm_resource_manager *mgr;
+
+ mgr = ttm_manager_type(res->bo->bdev, res->mem_type);
+ return &to_xe_ttm_vram_mgr(mgr)->mm;
+}
+
+/**
+ * xe_res_first - initialize a xe_res_cursor
+ *
+ * @res: TTM resource object to walk
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void xe_res_first(struct ttm_resource *res,
+ u64 start, u64 size,
+ struct xe_res_cursor *cur)
+{
+ cur->sgl = NULL;
+ if (!res)
+ goto fallback;
+
+ XE_WARN_ON(start + size > res->size);
+
+ cur->mem_type = res->mem_type;
+
+ switch (cur->mem_type) {
+ case XE_PL_STOLEN:
+ case XE_PL_VRAM0:
+ case XE_PL_VRAM1: {
+ struct drm_buddy_block *block;
+ struct list_head *head, *next;
+ struct drm_buddy *mm = xe_res_get_buddy(res);
+
+ head = &to_xe_ttm_vram_mgr_resource(res)->blocks;
+
+ block = list_first_entry_or_null(head,
+ struct drm_buddy_block,
+ link);
+ if (!block)
+ goto fallback;
+
+ while (start >= drm_buddy_block_size(mm, block)) {
+ start -= drm_buddy_block_size(mm, block);
+
+ next = block->link.next;
+ if (next != head)
+ block = list_entry(next, struct drm_buddy_block,
+ link);
+ }
+
+ cur->mm = mm;
+ cur->start = drm_buddy_block_offset(block) + start;
+ cur->size = min(drm_buddy_block_size(mm, block) - start,
+ size);
+ cur->remaining = size;
+ cur->node = block;
+ break;
+ }
+ default:
+ goto fallback;
+ }
+
+ return;
+
+fallback:
+ cur->start = start;
+ cur->size = size;
+ cur->remaining = size;
+ cur->node = NULL;
+ cur->mem_type = XE_PL_TT;
+ XE_WARN_ON(res && start + size > res->size);
+}
+
+static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
+{
+ struct scatterlist *sgl = cur->sgl;
+ u64 start = cur->start;
+
+ while (start >= sg_dma_len(sgl)) {
+ start -= sg_dma_len(sgl);
+ sgl = sg_next(sgl);
+ XE_WARN_ON(!sgl);
+ }
+
+ cur->start = start;
+ cur->size = sg_dma_len(sgl) - start;
+ cur->sgl = sgl;
+}
+
+/**
+ * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
+ *
+ * @sg: scatter gather table to walk
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void xe_res_first_sg(const struct sg_table *sg,
+ u64 start, u64 size,
+ struct xe_res_cursor *cur)
+{
+ XE_WARN_ON(!sg);
+ XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
+ !IS_ALIGNED(size, PAGE_SIZE));
+ cur->node = NULL;
+ cur->start = start;
+ cur->remaining = size;
+ cur->size = 0;
+ cur->sgl = sg->sgl;
+ cur->mem_type = XE_PL_TT;
+ __xe_res_sg_next(cur);
+}
+
+/**
+ * xe_res_next - advance the cursor
+ *
+ * @cur: the cursor to advance
+ * @size: number of bytes to move forward
+ *
+ * Move the cursor @size bytes forwrad, walking to the next node if necessary.
+ */
+static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
+{
+ struct drm_buddy_block *block;
+ struct list_head *next;
+ u64 start;
+
+ XE_WARN_ON(size > cur->remaining);
+
+ cur->remaining -= size;
+ if (!cur->remaining)
+ return;
+
+ if (cur->size > size) {
+ cur->size -= size;
+ cur->start += size;
+ return;
+ }
+
+ if (cur->sgl) {
+ cur->start += size;
+ __xe_res_sg_next(cur);
+ return;
+ }
+
+ switch (cur->mem_type) {
+ case XE_PL_STOLEN:
+ case XE_PL_VRAM0:
+ case XE_PL_VRAM1:
+ start = size - cur->size;
+ block = cur->node;
+
+ next = block->link.next;
+ block = list_entry(next, struct drm_buddy_block, link);
+
+
+ while (start >= drm_buddy_block_size(cur->mm, block)) {
+ start -= drm_buddy_block_size(cur->mm, block);
+
+ next = block->link.next;
+ block = list_entry(next, struct drm_buddy_block, link);
+ }
+
+ cur->start = drm_buddy_block_offset(block) + start;
+ cur->size = min(drm_buddy_block_size(cur->mm, block) - start,
+ cur->remaining);
+ cur->node = block;
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * xe_res_dma - return dma address of cursor at current position
+ *
+ * @cur: the cursor to return the dma address from
+ */
+static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
+{
+ return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
new file mode 100644
index 000000000..68495f885
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_ring_ops.h"
+
+#include "generated/xe_wa_oob.h"
+#include "instructions/xe_mi_commands.h"
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gpu_commands.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_lrc_layout.h"
+#include "xe_exec_queue_types.h"
+#include "xe_gt.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_sched_job.h"
+#include "xe_vm_types.h"
+#include "xe_vm.h"
+#include "xe_wa.h"
+
+/*
+ * 3D-related flags that can't be set on _engines_ that lack access to the 3D
+ * pipeline (i.e., CCS engines).
+ */
+#define PIPE_CONTROL_3D_ENGINE_FLAGS (\
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
+ PIPE_CONTROL_TILE_CACHE_FLUSH | \
+ PIPE_CONTROL_DEPTH_STALL | \
+ PIPE_CONTROL_STALL_AT_SCOREBOARD | \
+ PIPE_CONTROL_PSD_SYNC | \
+ PIPE_CONTROL_AMFS_FLUSH | \
+ PIPE_CONTROL_VF_CACHE_INVALIDATE | \
+ PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET)
+
+/* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */
+#define PIPE_CONTROL_3D_ARCH_FLAGS ( \
+ PIPE_CONTROL_3D_ENGINE_FLAGS | \
+ PIPE_CONTROL_INDIRECT_STATE_DISABLE | \
+ PIPE_CONTROL_FLUSH_ENABLE | \
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
+ PIPE_CONTROL_DC_FLUSH_ENABLE)
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | BIT(8) | state;
+}
+
+static int emit_aux_table_inv(struct xe_gt *gt, struct xe_reg reg,
+ u32 *dw, int i)
+{
+ dw[i++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1) | MI_LRI_MMIO_REMAP_EN;
+ dw[i++] = reg.addr + gt->mmio.adj_offset;
+ dw[i++] = AUX_INV;
+ dw[i++] = MI_NOOP;
+
+ return i;
+}
+
+static int emit_user_interrupt(u32 *dw, int i)
+{
+ dw[i++] = MI_USER_INTERRUPT;
+ dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ dw[i++] = MI_ARB_CHECK;
+
+ return i;
+}
+
+static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
+{
+ dw[i++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ dw[i++] = addr;
+ dw[i++] = 0;
+ dw[i++] = value;
+
+ return i;
+}
+
+static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
+ u32 *dw, int i)
+{
+ dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
+ (invalidate_tlb ? MI_INVALIDATE_TLB : 0);
+ dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
+ dw[i++] = 0;
+ dw[i++] = value;
+
+ return i;
+}
+
+static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
+{
+ dw[i++] = MI_BATCH_BUFFER_START | ppgtt_flag | XE_INSTR_NUM_DW(3);
+ dw[i++] = lower_32_bits(batch_addr);
+ dw[i++] = upper_32_bits(batch_addr);
+
+ return i;
+}
+
+static int emit_flush_invalidate(u32 flag, u32 *dw, int i)
+{
+ dw[i] = MI_FLUSH_DW;
+ dw[i] |= flag;
+ dw[i++] |= MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
+ MI_FLUSH_DW_STORE_INDEX;
+
+ dw[i++] = LRC_PPHWSP_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ dw[i++] = 0;
+ dw[i++] = ~0U;
+
+ return i;
+}
+
+static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
+ int i)
+{
+ u32 flags = PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
+ PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+ PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_STORE_DATA_INDEX;
+
+ if (invalidate_tlb)
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+
+ flags &= ~mask_flags;
+
+ dw[i++] = GFX_OP_PIPE_CONTROL(6);
+ dw[i++] = flags;
+ dw[i++] = LRC_PPHWSP_SCRATCH_ADDR;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ dw[i++] = 0;
+
+ return i;
+}
+
+static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
+ u32 *dw, int i)
+{
+ dw[i++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(1);
+ dw[i++] = lower_32_bits(addr);
+ dw[i++] = upper_32_bits(addr);
+ dw[i++] = lower_32_bits(value);
+ dw[i++] = upper_32_bits(value);
+
+ return i;
+}
+
+static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
+{
+ struct xe_gt *gt = job->q->gt;
+ bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
+ u32 flags;
+
+ flags = (PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+
+ if (XE_WA(gt, 1409600907))
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+
+ if (lacks_render)
+ flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
+ flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
+
+ dw[i++] = GFX_OP_PIPE_CONTROL(6) | PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
+ dw[i++] = flags;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ dw[i++] = 0;
+
+ return i;
+}
+
+static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int i)
+{
+ if (hwe->class != XE_ENGINE_CLASS_RENDER)
+ return i;
+
+ if (XE_WA(hwe->gt, 16020292621)) {
+ dw[i++] = GFX_OP_PIPE_CONTROL(6);
+ dw[i++] = PIPE_CONTROL_LRI_POST_SYNC;
+ dw[i++] = RING_NOPID(hwe->mmio_base).addr;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ }
+
+ return i;
+}
+
+static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
+ int i)
+{
+ dw[i++] = GFX_OP_PIPE_CONTROL(6);
+ dw[i++] = (stall_only ? PIPE_CONTROL_CS_STALL :
+ PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL) |
+ PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE;
+ dw[i++] = addr;
+ dw[i++] = 0;
+ dw[i++] = value;
+ dw[i++] = 0; /* We're thrashing one extra dword. */
+
+ return i;
+}
+
+static u32 get_ppgtt_flag(struct xe_sched_job *job)
+{
+ return job->q->vm ? BIT(8) : 0;
+}
+
+/* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
+static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc,
+ u64 batch_addr, u32 seqno)
+{
+ u32 dw[MAX_JOB_SIZE_DW], i = 0;
+ u32 ppgtt_flag = get_ppgtt_flag(job);
+ struct xe_gt *gt = job->q->gt;
+
+ if (job->ring_ops_flush_tlb) {
+ dw[i++] = preparser_disable(true);
+ i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, true, dw, i);
+ dw[i++] = preparser_disable(false);
+ } else {
+ i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, dw, i);
+ }
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
+ if (job->user_fence.used)
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+ job->user_fence.value,
+ dw, i);
+
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+
+ i = emit_user_interrupt(dw, i);
+
+ xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
+
+ xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
+}
+
+static bool has_aux_ccs(struct xe_device *xe)
+{
+ /*
+ * PVC is a special case that has no compression of either type
+ * (FlatCCS or AuxCCS). Also, AuxCCS is no longer used from Xe2
+ * onward, so any future platforms with no FlatCCS will not have
+ * AuxCCS either.
+ */
+ if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC)
+ return false;
+
+ return !xe->info.has_flat_ccs;
+}
+
+static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
+ u64 batch_addr, u32 seqno)
+{
+ u32 dw[MAX_JOB_SIZE_DW], i = 0;
+ u32 ppgtt_flag = get_ppgtt_flag(job);
+ struct xe_gt *gt = job->q->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
+
+ dw[i++] = preparser_disable(true);
+
+ /* hsdes: 1809175790 */
+ if (has_aux_ccs(xe)) {
+ if (decode)
+ i = emit_aux_table_inv(gt, VD0_AUX_INV, dw, i);
+ else
+ i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
+ }
+
+ if (job->ring_ops_flush_tlb)
+ i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, true, dw, i);
+
+ dw[i++] = preparser_disable(false);
+
+ if (!job->ring_ops_flush_tlb)
+ i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, dw, i);
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
+ if (job->user_fence.used)
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+ job->user_fence.value,
+ dw, i);
+
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+
+ i = emit_user_interrupt(dw, i);
+
+ xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
+
+ xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
+}
+
+static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
+ struct xe_lrc *lrc,
+ u64 batch_addr, u32 seqno)
+{
+ u32 dw[MAX_JOB_SIZE_DW], i = 0;
+ u32 ppgtt_flag = get_ppgtt_flag(job);
+ struct xe_gt *gt = job->q->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
+ u32 mask_flags = 0;
+
+ dw[i++] = preparser_disable(true);
+ if (lacks_render)
+ mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS;
+ else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
+ mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
+
+ /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
+ i = emit_pipe_invalidate(mask_flags, job->ring_ops_flush_tlb, dw, i);
+
+ /* hsdes: 1809175790 */
+ if (has_aux_ccs(xe))
+ i = emit_aux_table_inv(gt, CCS_AUX_INV, dw, i);
+
+ dw[i++] = preparser_disable(false);
+
+ i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, dw, i);
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
+ i = emit_render_cache_flush(job, dw, i);
+
+ if (job->user_fence.used)
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+ job->user_fence.value,
+ dw, i);
+
+ i = emit_pipe_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, lacks_render, dw, i);
+
+ i = emit_user_interrupt(dw, i);
+
+ i = emit_pipe_control_to_ring_end(job->q->hwe, dw, i);
+
+ xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
+
+ xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
+}
+
+static void emit_migration_job_gen12(struct xe_sched_job *job,
+ struct xe_lrc *lrc, u32 seqno)
+{
+ u32 dw[MAX_JOB_SIZE_DW], i = 0;
+
+ i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
+ seqno, dw, i);
+
+ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
+
+ i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i);
+
+ /* XXX: Do we need this? Leaving for now. */
+ dw[i++] = preparser_disable(true);
+ i = emit_flush_invalidate(0, dw, i);
+ dw[i++] = preparser_disable(false);
+
+ i = emit_bb_start(job->batch_addr[1], BIT(8), dw, i);
+
+ dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | job->migrate_flush_flags |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW;
+ dw[i++] = xe_lrc_seqno_ggtt_addr(lrc) | MI_FLUSH_DW_USE_GTT;
+ dw[i++] = 0;
+ dw[i++] = seqno; /* value */
+
+ i = emit_user_interrupt(dw, i);
+
+ xe_gt_assert(job->q->gt, i <= MAX_JOB_SIZE_DW);
+
+ xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
+}
+
+static void emit_job_gen12_gsc(struct xe_sched_job *job)
+{
+ struct xe_gt *gt = job->q->gt;
+
+ xe_gt_assert(gt, job->q->width <= 1); /* no parallel submission for GSCCS */
+
+ __emit_job_gen12_simple(job, job->q->lrc,
+ job->batch_addr[0],
+ xe_sched_job_seqno(job));
+}
+
+static void emit_job_gen12_copy(struct xe_sched_job *job)
+{
+ int i;
+
+ if (xe_sched_job_is_migration(job->q)) {
+ emit_migration_job_gen12(job, job->q->lrc,
+ xe_sched_job_seqno(job));
+ return;
+ }
+
+ for (i = 0; i < job->q->width; ++i)
+ __emit_job_gen12_simple(job, job->q->lrc + i,
+ job->batch_addr[i],
+ xe_sched_job_seqno(job));
+}
+
+static void emit_job_gen12_video(struct xe_sched_job *job)
+{
+ int i;
+
+ /* FIXME: Not doing parallel handshake for now */
+ for (i = 0; i < job->q->width; ++i)
+ __emit_job_gen12_video(job, job->q->lrc + i,
+ job->batch_addr[i],
+ xe_sched_job_seqno(job));
+}
+
+static void emit_job_gen12_render_compute(struct xe_sched_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->q->width; ++i)
+ __emit_job_gen12_render_compute(job, job->q->lrc + i,
+ job->batch_addr[i],
+ xe_sched_job_seqno(job));
+}
+
+static const struct xe_ring_ops ring_ops_gen12_gsc = {
+ .emit_job = emit_job_gen12_gsc,
+};
+
+static const struct xe_ring_ops ring_ops_gen12_copy = {
+ .emit_job = emit_job_gen12_copy,
+};
+
+static const struct xe_ring_ops ring_ops_gen12_video = {
+ .emit_job = emit_job_gen12_video,
+};
+
+static const struct xe_ring_ops ring_ops_gen12_render_compute = {
+ .emit_job = emit_job_gen12_render_compute,
+};
+
+const struct xe_ring_ops *
+xe_ring_ops_get(struct xe_gt *gt, enum xe_engine_class class)
+{
+ switch (class) {
+ case XE_ENGINE_CLASS_OTHER:
+ return &ring_ops_gen12_gsc;
+ case XE_ENGINE_CLASS_COPY:
+ return &ring_ops_gen12_copy;
+ case XE_ENGINE_CLASS_VIDEO_DECODE:
+ case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ return &ring_ops_gen12_video;
+ case XE_ENGINE_CLASS_RENDER:
+ case XE_ENGINE_CLASS_COMPUTE:
+ return &ring_ops_gen12_render_compute;
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.h b/drivers/gpu/drm/xe/xe_ring_ops.h
new file mode 100644
index 000000000..e942735d7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ring_ops.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_RING_OPS_H_
+#define _XE_RING_OPS_H_
+
+#include "xe_hw_engine_types.h"
+#include "xe_ring_ops_types.h"
+
+struct xe_gt;
+
+const struct xe_ring_ops *
+xe_ring_ops_get(struct xe_gt *gt, enum xe_engine_class class);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_ring_ops_types.h b/drivers/gpu/drm/xe/xe_ring_ops_types.h
new file mode 100644
index 000000000..1ae56e2ee
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ring_ops_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_RING_OPS_TYPES_H_
+#define _XE_RING_OPS_TYPES_H_
+
+struct xe_sched_job;
+
+#define MAX_JOB_SIZE_DW 48
+#define MAX_JOB_SIZE_BYTES (MAX_JOB_SIZE_DW * 4)
+
+/**
+ * struct xe_ring_ops - Ring operations
+ */
+struct xe_ring_ops {
+ /** @emit_job: Write job to ring */
+ void (*emit_job)(struct xe_sched_job *job);
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
new file mode 100644
index 000000000..fb44cc752
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_rtp.h"
+
+#include <kunit/visibility.h>
+
+#include <drm/xe_drm.h>
+
+#include "xe_gt.h"
+#include "xe_gt_topology.h"
+#include "xe_macros.h"
+#include "xe_reg_sr.h"
+
+/**
+ * DOC: Register Table Processing
+ *
+ * Internal infrastructure to define how registers should be updated based on
+ * rules and actions. This can be used to define tables with multiple entries
+ * (one per register) that will be walked over at some point in time to apply
+ * the values to the registers that have matching rules.
+ */
+
+static bool has_samedia(const struct xe_device *xe)
+{
+ return xe->info.media_verx100 >= 1300;
+}
+
+static bool rule_matches(const struct xe_device *xe,
+ struct xe_gt *gt,
+ struct xe_hw_engine *hwe,
+ const struct xe_rtp_rule *rules,
+ unsigned int n_rules)
+{
+ const struct xe_rtp_rule *r;
+ unsigned int i;
+ bool match;
+
+ for (r = rules, i = 0; i < n_rules; r = &rules[++i]) {
+ switch (r->match_type) {
+ case XE_RTP_MATCH_PLATFORM:
+ match = xe->info.platform == r->platform;
+ break;
+ case XE_RTP_MATCH_SUBPLATFORM:
+ match = xe->info.platform == r->platform &&
+ xe->info.subplatform == r->subplatform;
+ break;
+ case XE_RTP_MATCH_GRAPHICS_VERSION:
+ match = xe->info.graphics_verx100 == r->ver_start &&
+ (!has_samedia(xe) || !xe_gt_is_media_type(gt));
+ break;
+ case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE:
+ match = xe->info.graphics_verx100 >= r->ver_start &&
+ xe->info.graphics_verx100 <= r->ver_end &&
+ (!has_samedia(xe) || !xe_gt_is_media_type(gt));
+ break;
+ case XE_RTP_MATCH_GRAPHICS_STEP:
+ match = xe->info.step.graphics >= r->step_start &&
+ xe->info.step.graphics < r->step_end &&
+ (!has_samedia(xe) || !xe_gt_is_media_type(gt));
+ break;
+ case XE_RTP_MATCH_MEDIA_VERSION:
+ match = xe->info.media_verx100 == r->ver_start &&
+ (!has_samedia(xe) || xe_gt_is_media_type(gt));
+ break;
+ case XE_RTP_MATCH_MEDIA_VERSION_RANGE:
+ match = xe->info.media_verx100 >= r->ver_start &&
+ xe->info.media_verx100 <= r->ver_end &&
+ (!has_samedia(xe) || xe_gt_is_media_type(gt));
+ break;
+ case XE_RTP_MATCH_MEDIA_STEP:
+ match = xe->info.step.media >= r->step_start &&
+ xe->info.step.media < r->step_end &&
+ (!has_samedia(xe) || xe_gt_is_media_type(gt));
+ break;
+ case XE_RTP_MATCH_INTEGRATED:
+ match = !xe->info.is_dgfx;
+ break;
+ case XE_RTP_MATCH_DISCRETE:
+ match = xe->info.is_dgfx;
+ break;
+ case XE_RTP_MATCH_ENGINE_CLASS:
+ if (drm_WARN_ON(&xe->drm, !hwe))
+ return false;
+
+ match = hwe->class == r->engine_class;
+ break;
+ case XE_RTP_MATCH_NOT_ENGINE_CLASS:
+ if (drm_WARN_ON(&xe->drm, !hwe))
+ return false;
+
+ match = hwe->class != r->engine_class;
+ break;
+ case XE_RTP_MATCH_FUNC:
+ match = r->match_func(gt, hwe);
+ break;
+ default:
+ drm_warn(&xe->drm, "Invalid RTP match %u\n",
+ r->match_type);
+ match = false;
+ }
+
+ if (!match)
+ return false;
+ }
+
+ return true;
+}
+
+static void rtp_add_sr_entry(const struct xe_rtp_action *action,
+ struct xe_gt *gt,
+ u32 mmio_base,
+ struct xe_reg_sr *sr)
+{
+ struct xe_reg_sr_entry sr_entry = {
+ .reg = action->reg,
+ .clr_bits = action->clr_bits,
+ .set_bits = action->set_bits,
+ .read_mask = action->read_mask,
+ };
+
+ sr_entry.reg.addr += mmio_base;
+ xe_reg_sr_add(sr, &sr_entry, gt);
+}
+
+static bool rtp_process_one_sr(const struct xe_rtp_entry_sr *entry,
+ struct xe_device *xe, struct xe_gt *gt,
+ struct xe_hw_engine *hwe, struct xe_reg_sr *sr)
+{
+ const struct xe_rtp_action *action;
+ u32 mmio_base;
+ unsigned int i;
+
+ if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules))
+ return false;
+
+ for (i = 0, action = &entry->actions[0]; i < entry->n_actions; action++, i++) {
+ if ((entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) ||
+ (action->flags & XE_RTP_ACTION_FLAG_ENGINE_BASE))
+ mmio_base = hwe->mmio_base;
+ else
+ mmio_base = 0;
+
+ rtp_add_sr_entry(action, gt, mmio_base, sr);
+ }
+
+ return true;
+}
+
+static void rtp_get_context(struct xe_rtp_process_ctx *ctx,
+ struct xe_hw_engine **hwe,
+ struct xe_gt **gt,
+ struct xe_device **xe)
+{
+ switch (ctx->type) {
+ case XE_RTP_PROCESS_TYPE_GT:
+ *hwe = NULL;
+ *gt = ctx->gt;
+ *xe = gt_to_xe(*gt);
+ break;
+ case XE_RTP_PROCESS_TYPE_ENGINE:
+ *hwe = ctx->hwe;
+ *gt = (*hwe)->gt;
+ *xe = gt_to_xe(*gt);
+ break;
+ };
+}
+
+/**
+ * xe_rtp_process_ctx_enable_active_tracking - Enable tracking of active entries
+ *
+ * Set additional metadata to track what entries are considered "active", i.e.
+ * their rules match the condition. Bits are never cleared: entries with
+ * matching rules set the corresponding bit in the bitmap.
+ *
+ * @ctx: The context for processing the table
+ * @active_entries: bitmap to store the active entries
+ * @n_entries: number of entries to be processed
+ */
+void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
+ unsigned long *active_entries,
+ size_t n_entries)
+{
+ ctx->active_entries = active_entries;
+ ctx->n_entries = n_entries;
+}
+
+static void rtp_mark_active(struct xe_device *xe,
+ struct xe_rtp_process_ctx *ctx,
+ unsigned int first, unsigned int last)
+{
+ if (!ctx->active_entries)
+ return;
+
+ if (drm_WARN_ON(&xe->drm, last > ctx->n_entries))
+ return;
+
+ if (first == last)
+ bitmap_set(ctx->active_entries, first, 1);
+ else
+ bitmap_set(ctx->active_entries, first, last - first + 2);
+}
+
+/**
+ * xe_rtp_process_to_sr - Process all rtp @entries, adding the matching ones to
+ * the save-restore argument.
+ * @ctx: The context for processing the table, with one of device, gt or hwe
+ * @entries: Table with RTP definitions
+ * @sr: Save-restore struct where matching rules execute the action. This can be
+ * viewed as the "coalesced view" of multiple the tables. The bits for each
+ * register set are expected not to collide with previously added entries
+ *
+ * Walk the table pointed by @entries (with an empty sentinel) and add all
+ * entries with matching rules to @sr. If @hwe is not NULL, its mmio_base is
+ * used to calculate the right register offset
+ */
+void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
+ const struct xe_rtp_entry_sr *entries,
+ struct xe_reg_sr *sr)
+{
+ const struct xe_rtp_entry_sr *entry;
+ struct xe_hw_engine *hwe = NULL;
+ struct xe_gt *gt = NULL;
+ struct xe_device *xe = NULL;
+
+ rtp_get_context(ctx, &hwe, &gt, &xe);
+
+ for (entry = entries; entry && entry->name; entry++) {
+ bool match = false;
+
+ if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
+ struct xe_hw_engine *each_hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(each_hwe, gt, id)
+ match |= rtp_process_one_sr(entry, xe, gt,
+ each_hwe, sr);
+ } else {
+ match = rtp_process_one_sr(entry, xe, gt, hwe, sr);
+ }
+
+ if (match)
+ rtp_mark_active(xe, ctx, entry - entries,
+ entry - entries);
+ }
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr);
+
+/**
+ * xe_rtp_process - Process all rtp @entries, without running any action
+ * @ctx: The context for processing the table, with one of device, gt or hwe
+ * @entries: Table with RTP definitions
+ *
+ * Walk the table pointed by @entries (with an empty sentinel), executing the
+ * rules. A few differences from xe_rtp_process_to_sr():
+ *
+ * 1. There is no action associated with each entry since this uses
+ * struct xe_rtp_entry. Its main use is for marking active workarounds via
+ * xe_rtp_process_ctx_enable_active_tracking().
+ * 2. There is support for OR operations by having entries with no name.
+ */
+void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
+ const struct xe_rtp_entry *entries)
+{
+ const struct xe_rtp_entry *entry, *first_entry;
+ struct xe_hw_engine *hwe;
+ struct xe_gt *gt;
+ struct xe_device *xe;
+
+ rtp_get_context(ctx, &hwe, &gt, &xe);
+
+ first_entry = entries;
+ if (drm_WARN_ON(&xe->drm, !first_entry->name))
+ return;
+
+ for (entry = entries; entry && entry->rules; entry++) {
+ if (entry->name)
+ first_entry = entry;
+
+ if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules))
+ continue;
+
+ /* Fast-forward entry, eliminating the OR'ed entries */
+ for (entry++; entry && entry->rules; entry++)
+ if (entry->name)
+ break;
+ entry--;
+
+ rtp_mark_active(xe, ctx, first_entry - entries,
+ entry - entries);
+ }
+}
+
+bool xe_rtp_match_even_instance(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return hwe->instance % 2 == 0;
+}
+
+bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ u64 render_compute_mask = gt->info.engine_mask &
+ (XE_HW_ENGINE_CCS_MASK | XE_HW_ENGINE_RCS_MASK);
+
+ return render_compute_mask &&
+ hwe->engine_id == __ffs(render_compute_mask);
+}
+
+bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ unsigned int dss_per_gslice = 4;
+ unsigned int dss;
+
+ if (drm_WARN(&gt_to_xe(gt)->drm, xe_dss_mask_empty(gt->fuse_topo.g_dss_mask),
+ "Checking gslice for platform without geometry pipeline\n"))
+ return false;
+
+ dss = xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0);
+
+ return dss >= dss_per_gslice;
+}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
new file mode 100644
index 000000000..c56fedd12
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_RTP_
+#define _XE_RTP_
+
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#define _XE_RTP_INCLUDE_PRIVATE_HELPERS
+
+#include "xe_rtp_helpers.h"
+#include "xe_rtp_types.h"
+
+#undef _XE_RTP_INCLUDE_PRIVATE_HELPERS
+
+/*
+ * Register table poke infrastructure
+ */
+
+struct xe_hw_engine;
+struct xe_gt;
+struct xe_reg_sr;
+
+/*
+ * Macros to encode rules to match against platform, IP version, stepping, etc.
+ * Shouldn't be used directly - see XE_RTP_RULES()
+ */
+#define _XE_RTP_RULE_PLATFORM(plat__) \
+ { .match_type = XE_RTP_MATCH_PLATFORM, .platform = plat__ }
+
+#define _XE_RTP_RULE_SUBPLATFORM(plat__, sub__) \
+ { .match_type = XE_RTP_MATCH_SUBPLATFORM, \
+ .platform = plat__, .subplatform = sub__ }
+
+#define _XE_RTP_RULE_GRAPHICS_STEP(start__, end__) \
+ { .match_type = XE_RTP_MATCH_GRAPHICS_STEP, \
+ .step_start = start__, .step_end = end__ }
+
+#define _XE_RTP_RULE_MEDIA_STEP(start__, end__) \
+ { .match_type = XE_RTP_MATCH_MEDIA_STEP, \
+ .step_start = start__, .step_end = end__ }
+
+#define _XE_RTP_RULE_ENGINE_CLASS(cls__) \
+ { .match_type = XE_RTP_MATCH_ENGINE_CLASS, \
+ .engine_class = (cls__) }
+
+/**
+ * XE_RTP_RULE_PLATFORM - Create rule matching platform
+ * @plat_: platform to match
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_PLATFORM(plat_) \
+ _XE_RTP_RULE_PLATFORM(XE_##plat_)
+
+/**
+ * XE_RTP_RULE_SUBPLATFORM - Create rule matching platform and sub-platform
+ * @plat_: platform to match
+ * @sub_: sub-platform to match
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_SUBPLATFORM(plat_, sub_) \
+ _XE_RTP_RULE_SUBPLATFORM(XE_##plat_, XE_SUBPLATFORM_##plat_##_##sub_)
+
+/**
+ * XE_RTP_RULE_GRAPHICS_STEP - Create rule matching graphics stepping
+ * @start_: First stepping matching the rule
+ * @end_: First stepping that does not match the rule
+ *
+ * Note that the range matching this rule is [ @start_, @end_ ), i.e. inclusive
+ * on the left, exclusive on the right.
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_GRAPHICS_STEP(start_, end_) \
+ _XE_RTP_RULE_GRAPHICS_STEP(STEP_##start_, STEP_##end_)
+
+/**
+ * XE_RTP_RULE_MEDIA_STEP - Create rule matching media stepping
+ * @start_: First stepping matching the rule
+ * @end_: First stepping that does not match the rule
+ *
+ * Note that the range matching this rule is [ @start_, @end_ ), i.e. inclusive
+ * on the left, exclusive on the right.
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_MEDIA_STEP(start_, end_) \
+ _XE_RTP_RULE_MEDIA_STEP(STEP_##start_, STEP_##end_)
+
+/**
+ * XE_RTP_RULE_ENGINE_CLASS - Create rule matching an engine class
+ * @cls_: Engine class to match
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_ENGINE_CLASS(cls_) \
+ _XE_RTP_RULE_ENGINE_CLASS(XE_ENGINE_CLASS_##cls_)
+
+/**
+ * XE_RTP_RULE_FUNC - Create rule using callback function for match
+ * @func__: Function to call to decide if rule matches
+ *
+ * This allows more complex checks to be performed. The ``XE_RTP``
+ * infrastructure will simply call the function @func_ passed to decide if this
+ * rule matches the device.
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_FUNC(func__) \
+ { .match_type = XE_RTP_MATCH_FUNC, \
+ .match_func = (func__) }
+
+/**
+ * XE_RTP_RULE_GRAPHICS_VERSION - Create rule matching graphics version
+ * @ver__: Graphics IP version to match
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_GRAPHICS_VERSION(ver__) \
+ { .match_type = XE_RTP_MATCH_GRAPHICS_VERSION, \
+ .ver_start = ver__, }
+
+/**
+ * XE_RTP_RULE_GRAPHICS_VERSION_RANGE - Create rule matching a range of graphics version
+ * @ver_start__: First graphics IP version to match
+ * @ver_end__: Last graphics IP version to match
+ *
+ * Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
+ * inclusive on boths sides
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_GRAPHICS_VERSION_RANGE(ver_start__, ver_end__) \
+ { .match_type = XE_RTP_MATCH_GRAPHICS_VERSION_RANGE, \
+ .ver_start = ver_start__, .ver_end = ver_end__, }
+
+/**
+ * XE_RTP_RULE_MEDIA_VERSION - Create rule matching media version
+ * @ver__: Graphics IP version to match
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_MEDIA_VERSION(ver__) \
+ { .match_type = XE_RTP_MATCH_MEDIA_VERSION, \
+ .ver_start = ver__, }
+
+/**
+ * XE_RTP_RULE_MEDIA_VERSION_RANGE - Create rule matching a range of media version
+ * @ver_start__: First media IP version to match
+ * @ver_end__: Last media IP version to match
+ *
+ * Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
+ * inclusive on boths sides
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_MEDIA_VERSION_RANGE(ver_start__, ver_end__) \
+ { .match_type = XE_RTP_MATCH_MEDIA_VERSION_RANGE, \
+ .ver_start = ver_start__, .ver_end = ver_end__, }
+
+/**
+ * XE_RTP_RULE_IS_INTEGRATED - Create a rule matching integrated graphics devices
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_IS_INTEGRATED \
+ { .match_type = XE_RTP_MATCH_INTEGRATED }
+
+/**
+ * XE_RTP_RULE_IS_DISCRETE - Create a rule matching discrete graphics devices
+ *
+ * Refer to XE_RTP_RULES() for expected usage.
+ */
+#define XE_RTP_RULE_IS_DISCRETE \
+ { .match_type = XE_RTP_MATCH_DISCRETE }
+
+/**
+ * XE_RTP_ACTION_WR - Helper to write a value to the register, overriding all
+ * the bits
+ * @reg_: Register
+ * @val_: Value to set
+ * @...: Additional fields to override in the struct xe_rtp_action entry
+ *
+ * The correspondent notation in bspec is:
+ *
+ * REGNAME = VALUE
+ */
+#define XE_RTP_ACTION_WR(reg_, val_, ...) \
+ { .reg = XE_RTP_DROP_CAST(reg_), \
+ .clr_bits = ~0u, .set_bits = (val_), \
+ .read_mask = (~0u), ##__VA_ARGS__ }
+
+/**
+ * XE_RTP_ACTION_SET - Set bits from @val_ in the register.
+ * @reg_: Register
+ * @val_: Bits to set in the register
+ * @...: Additional fields to override in the struct xe_rtp_action entry
+ *
+ * For masked registers this translates to a single write, while for other
+ * registers it's a RMW. The correspondent bspec notation is (example for bits 2
+ * and 5, but could be any):
+ *
+ * REGNAME[2] = 1
+ * REGNAME[5] = 1
+ */
+#define XE_RTP_ACTION_SET(reg_, val_, ...) \
+ { .reg = XE_RTP_DROP_CAST(reg_), \
+ .clr_bits = val_, .set_bits = val_, \
+ .read_mask = val_, ##__VA_ARGS__ }
+
+/**
+ * XE_RTP_ACTION_CLR: Clear bits from @val_ in the register.
+ * @reg_: Register
+ * @val_: Bits to clear in the register
+ * @...: Additional fields to override in the struct xe_rtp_action entry
+ *
+ * For masked registers this translates to a single write, while for other
+ * registers it's a RMW. The correspondent bspec notation is (example for bits 2
+ * and 5, but could be any):
+ *
+ * REGNAME[2] = 0
+ * REGNAME[5] = 0
+ */
+#define XE_RTP_ACTION_CLR(reg_, val_, ...) \
+ { .reg = XE_RTP_DROP_CAST(reg_), \
+ .clr_bits = val_, .set_bits = 0, \
+ .read_mask = val_, ##__VA_ARGS__ }
+
+/**
+ * XE_RTP_ACTION_FIELD_SET: Set a bit range
+ * @reg_: Register
+ * @mask_bits_: Mask of bits to be changed in the register, forming a field
+ * @val_: Value to set in the field denoted by @mask_bits_
+ * @...: Additional fields to override in the struct xe_rtp_action entry
+ *
+ * For masked registers this translates to a single write, while for other
+ * registers it's a RMW. The correspondent bspec notation is:
+ *
+ * REGNAME[<end>:<start>] = VALUE
+ */
+#define XE_RTP_ACTION_FIELD_SET(reg_, mask_bits_, val_, ...) \
+ { .reg = XE_RTP_DROP_CAST(reg_), \
+ .clr_bits = mask_bits_, .set_bits = val_, \
+ .read_mask = mask_bits_, ##__VA_ARGS__ }
+
+#define XE_RTP_ACTION_FIELD_SET_NO_READ_MASK(reg_, mask_bits_, val_, ...) \
+ { .reg = XE_RTP_DROP_CAST(reg_), \
+ .clr_bits = (mask_bits_), .set_bits = (val_), \
+ .read_mask = 0, ##__VA_ARGS__ }
+
+/**
+ * XE_RTP_ACTION_WHITELIST - Add register to userspace whitelist
+ * @reg_: Register
+ * @val_: Whitelist-specific flags to set
+ * @...: Additional fields to override in the struct xe_rtp_action entry
+ *
+ * Add a register to the whitelist, allowing userspace to modify the ster with
+ * regular user privileges.
+ */
+#define XE_RTP_ACTION_WHITELIST(reg_, val_, ...) \
+ /* TODO fail build if ((flags) & ~(RING_FORCE_TO_NONPRIV_MASK_VALID)) */\
+ { .reg = XE_RTP_DROP_CAST(reg_), \
+ .set_bits = val_, \
+ .clr_bits = RING_FORCE_TO_NONPRIV_MASK_VALID, \
+ ##__VA_ARGS__ }
+
+/**
+ * XE_RTP_NAME - Helper to set the name in xe_rtp_entry
+ * @s_: Name describing this rule, often a HW-specific number
+ *
+ * TODO: maybe move this behind a debug config?
+ */
+#define XE_RTP_NAME(s_) .name = (s_)
+
+/**
+ * XE_RTP_ENTRY_FLAG - Helper to add multiple flags to a struct xe_rtp_entry_sr
+ * @...: Entry flags, without the ``XE_RTP_ENTRY_FLAG_`` prefix
+ *
+ * Helper to automatically add a ``XE_RTP_ENTRY_FLAG_`` prefix to the flags
+ * when defining struct xe_rtp_entry entries. Example:
+ *
+ * .. code-block:: c
+ *
+ * const struct xe_rtp_entry_sr wa_entries[] = {
+ * ...
+ * { XE_RTP_NAME("test-entry"),
+ * ...
+ * XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ * ...
+ * },
+ * ...
+ * };
+ */
+#define XE_RTP_ENTRY_FLAG(...) \
+ .flags = (XE_RTP_PASTE_FOREACH(ENTRY_FLAG_, BITWISE_OR, (__VA_ARGS__)))
+
+/**
+ * XE_RTP_ACTION_FLAG - Helper to add multiple flags to a struct xe_rtp_action
+ * @...: Action flags, without the ``XE_RTP_ACTION_FLAG_`` prefix
+ *
+ * Helper to automatically add a ``XE_RTP_ACTION_FLAG_`` prefix to the flags
+ * when defining struct xe_rtp_action entries. Example:
+ *
+ * .. code-block:: c
+ *
+ * const struct xe_rtp_entry_sr wa_entries[] = {
+ * ...
+ * { XE_RTP_NAME("test-entry"),
+ * ...
+ * XE_RTP_ACTION_SET(..., XE_RTP_ACTION_FLAG(FOREACH_ENGINE)),
+ * ...
+ * },
+ * ...
+ * };
+ */
+#define XE_RTP_ACTION_FLAG(...) \
+ .flags = (XE_RTP_PASTE_FOREACH(ACTION_FLAG_, BITWISE_OR, (__VA_ARGS__)))
+
+/**
+ * XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry_sr entry
+ * @...: Rules
+ *
+ * At least one rule is needed and up to 4 are supported. Multiple rules are
+ * AND'ed together, i.e. all the rules must evaluate to true for the entry to
+ * be processed. See XE_RTP_MATCH_* for the possible match rules. Example:
+ *
+ * .. code-block:: c
+ *
+ * const struct xe_rtp_entry_sr wa_entries[] = {
+ * ...
+ * { XE_RTP_NAME("test-entry"),
+ * XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ * ...
+ * },
+ * ...
+ * };
+ */
+#define XE_RTP_RULES(...) \
+ .n_rules = _XE_COUNT_ARGS(__VA_ARGS__), \
+ .rules = (const struct xe_rtp_rule[]) { \
+ XE_RTP_PASTE_FOREACH(RULE_, COMMA, (__VA_ARGS__)) \
+ }
+
+/**
+ * XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry_sr
+ * @...: Actions to be taken
+ *
+ * At least one action is needed and up to 4 are supported. See XE_RTP_ACTION_*
+ * for the possible actions. Example:
+ *
+ * .. code-block:: c
+ *
+ * const struct xe_rtp_entry_sr wa_entries[] = {
+ * ...
+ * { XE_RTP_NAME("test-entry"),
+ * XE_RTP_RULES(...),
+ * XE_RTP_ACTIONS(SET(..), SET(...), CLR(...)),
+ * ...
+ * },
+ * ...
+ * };
+ */
+#define XE_RTP_ACTIONS(...) \
+ .n_actions = _XE_COUNT_ARGS(__VA_ARGS__), \
+ .actions = (const struct xe_rtp_action[]) { \
+ XE_RTP_PASTE_FOREACH(ACTION_, COMMA, (__VA_ARGS__)) \
+ }
+
+#define XE_RTP_PROCESS_CTX_INITIALIZER(arg__) _Generic((arg__), \
+ struct xe_hw_engine * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \
+ struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT })
+
+void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
+ unsigned long *active_entries,
+ size_t n_entries);
+
+void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
+ const struct xe_rtp_entry_sr *entries,
+ struct xe_reg_sr *sr);
+
+void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
+ const struct xe_rtp_entry *entries);
+
+/* Match functions to be used with XE_RTP_MATCH_FUNC */
+
+/**
+ * xe_rtp_match_even_instance - Match if engine instance is even
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Returns: true if engine instance is even, false otherwise
+ */
+bool xe_rtp_match_even_instance(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
+/*
+ * xe_rtp_match_first_render_or_compute - Match if it's first render or compute
+ * engine in the GT
+ *
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Registers on the render reset domain need to have their values re-applied
+ * when any of those engines are reset. Since the engines reset together, a
+ * programming can be set to just one of them. For simplicity the first engine
+ * of either render or compute class can be chosen.
+ *
+ * Returns: true if engine id is the first to match the render reset domain,
+ * false otherwise.
+ */
+bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
+/*
+ * xe_rtp_match_first_gslice_fused_off - Match when first gslice is fused off
+ *
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Returns: true if first gslice is fused off, false otherwise.
+ */
+bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_rtp_helpers.h b/drivers/gpu/drm/xe/xe_rtp_helpers.h
new file mode 100644
index 000000000..181b6290f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_rtp_helpers.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_RTP_HELPERS_
+#define _XE_RTP_HELPERS_
+
+#ifndef _XE_RTP_INCLUDE_PRIVATE_HELPERS
+#error "This header is supposed to be included by xe_rtp.h only"
+#endif
+
+/*
+ * Helper macros - not to be used outside this header.
+ */
+#define _XE_ESC(...) __VA_ARGS__
+#define _XE_COUNT_ARGS(...) _XE_ESC(__XE_COUNT_ARGS(__VA_ARGS__, 5, 4, 3, 2, 1,))
+#define __XE_COUNT_ARGS(_, _5, _4, _3, _2, X_, ...) X_
+
+#define _XE_FIRST(...) _XE_ESC(__XE_FIRST(__VA_ARGS__,))
+#define __XE_FIRST(x_, ...) x_
+#define _XE_TUPLE_TAIL(...) _XE_ESC(__XE_TUPLE_TAIL(__VA_ARGS__))
+#define __XE_TUPLE_TAIL(x_, ...) (__VA_ARGS__)
+
+#define _XE_DROP_FIRST(x_, ...) __VA_ARGS__
+
+#define _XE_RTP_CONCAT(a, b) __XE_RTP_CONCAT(a, b)
+#define __XE_RTP_CONCAT(a, b) XE_RTP_ ## a ## b
+
+#define __XE_RTP_PASTE_SEP_COMMA ,
+#define __XE_RTP_PASTE_SEP_BITWISE_OR |
+
+/*
+ * XE_RTP_PASTE_FOREACH - Paste XE_RTP_<@prefix_> on each element of the tuple
+ * @args, with the end result separated by @sep_. @sep must be one of the
+ * previously declared macros __XE_RTP_PASTE_SEP_*, or declared with such
+ * prefix.
+ *
+ * Examples:
+ *
+ * 1) XE_RTP_PASTE_FOREACH(TEST_, COMMA, (FOO, BAR))
+ * expands to:
+ *
+ * XE_RTP_TEST_FOO , XE_RTP_TEST_BAR
+ *
+ * 2) XE_RTP_PASTE_FOREACH(TEST2_, COMMA, (FOO))
+ * expands to:
+ *
+ * XE_RTP_TEST2_FOO
+ *
+ * 3) XE_RTP_PASTE_FOREACH(TEST3, BITWISE_OR, (FOO, BAR))
+ * expands to:
+ *
+ * XE_RTP_TEST3_FOO | XE_RTP_TEST3_BAR
+ *
+ * 4) #define __XE_RTP_PASTE_SEP_MY_SEP BANANA
+ * XE_RTP_PASTE_FOREACH(TEST_, MY_SEP, (FOO, BAR))
+ * expands to:
+ *
+ * XE_RTP_TEST_FOO BANANA XE_RTP_TEST_BAR
+ */
+#define XE_RTP_PASTE_FOREACH(prefix_, sep_, args_) _XE_ESC(_XE_RTP_CONCAT(PASTE_, _XE_COUNT_ARGS args_)(prefix_, sep_, args_))
+#define XE_RTP_PASTE_1(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, _XE_FIRST args_)
+#define XE_RTP_PASTE_2(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, _XE_FIRST args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_1(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_3(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, _XE_FIRST args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_2(prefix_, sep_, _XE_TUPLE_TAIL args_)
+#define XE_RTP_PASTE_4(prefix_, sep_, args_) _XE_RTP_CONCAT(prefix_, _XE_FIRST args_) __XE_RTP_PASTE_SEP_ ## sep_ XE_RTP_PASTE_3(prefix_, sep_, _XE_TUPLE_TAIL args_)
+
+/*
+ * XE_RTP_DROP_CAST - Drop cast to convert a compound statement to a initializer
+ *
+ * Example:
+ *
+ * #define foo(a_) ((struct foo){ .a = a_ })
+ * XE_RTP_DROP_CAST(foo(10))
+ * expands to:
+ *
+ * { .a = 10 }
+ */
+#define XE_RTP_DROP_CAST(...) _XE_ESC(_XE_DROP_FIRST _XE_ESC __VA_ARGS__)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h
new file mode 100644
index 000000000..637acc762
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_rtp_types.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_RTP_TYPES_
+#define _XE_RTP_TYPES_
+
+#include <linux/types.h>
+
+#include "regs/xe_reg_defs.h"
+
+struct xe_hw_engine;
+struct xe_gt;
+
+/**
+ * struct xe_rtp_action - action to take for any matching rule
+ *
+ * This struct records what action should be taken in a register that has a
+ * matching rule. Example of actions: set/clear bits.
+ */
+struct xe_rtp_action {
+ /** @reg: Register */
+ struct xe_reg reg;
+ /**
+ * @clr_bits: bits to clear when updating register. It's always a
+ * superset of bits being modified
+ */
+ u32 clr_bits;
+ /** @set_bits: bits to set when updating register */
+ u32 set_bits;
+#define XE_RTP_NOCHECK .read_mask = 0
+ /** @read_mask: mask for bits to consider when reading value back */
+ u32 read_mask;
+#define XE_RTP_ACTION_FLAG_ENGINE_BASE BIT(0)
+ /** @flags: flags to apply on rule evaluation or action */
+ u8 flags;
+};
+
+enum {
+ XE_RTP_MATCH_PLATFORM,
+ XE_RTP_MATCH_SUBPLATFORM,
+ XE_RTP_MATCH_GRAPHICS_VERSION,
+ XE_RTP_MATCH_GRAPHICS_VERSION_RANGE,
+ XE_RTP_MATCH_GRAPHICS_STEP,
+ XE_RTP_MATCH_MEDIA_VERSION,
+ XE_RTP_MATCH_MEDIA_VERSION_RANGE,
+ XE_RTP_MATCH_MEDIA_STEP,
+ XE_RTP_MATCH_INTEGRATED,
+ XE_RTP_MATCH_DISCRETE,
+ XE_RTP_MATCH_ENGINE_CLASS,
+ XE_RTP_MATCH_NOT_ENGINE_CLASS,
+ XE_RTP_MATCH_FUNC,
+};
+
+/** struct xe_rtp_rule - match rule for processing entry */
+struct xe_rtp_rule {
+ u8 match_type;
+
+ /* match filters */
+ union {
+ /* MATCH_PLATFORM / MATCH_SUBPLATFORM */
+ struct {
+ u8 platform;
+ u8 subplatform;
+ };
+ /*
+ * MATCH_GRAPHICS_VERSION / XE_RTP_MATCH_GRAPHICS_VERSION_RANGE /
+ * MATCH_MEDIA_VERSION / XE_RTP_MATCH_MEDIA_VERSION_RANGE
+ */
+ struct {
+ u32 ver_start;
+#define XE_RTP_END_VERSION_UNDEFINED U32_MAX
+ u32 ver_end;
+ };
+ /* MATCH_STEP */
+ struct {
+ u8 step_start;
+ u8 step_end;
+ };
+ /* MATCH_ENGINE_CLASS / MATCH_NOT_ENGINE_CLASS */
+ struct {
+ u8 engine_class;
+ };
+ /* MATCH_FUNC */
+ bool (*match_func)(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+ };
+};
+
+/** struct xe_rtp_entry_sr - Entry in an rtp table */
+struct xe_rtp_entry_sr {
+ const char *name;
+ const struct xe_rtp_action *actions;
+ const struct xe_rtp_rule *rules;
+ u8 n_rules;
+ u8 n_actions;
+#define XE_RTP_ENTRY_FLAG_FOREACH_ENGINE BIT(0)
+ u8 flags;
+};
+
+/** struct xe_rtp_entry - Entry in an rtp table, with no action associated */
+struct xe_rtp_entry {
+ const char *name;
+ const struct xe_rtp_rule *rules;
+ u8 n_rules;
+};
+
+enum xe_rtp_process_type {
+ XE_RTP_PROCESS_TYPE_GT,
+ XE_RTP_PROCESS_TYPE_ENGINE,
+};
+
+struct xe_rtp_process_ctx {
+ union {
+ struct xe_gt *gt;
+ struct xe_hw_engine *hwe;
+ };
+ enum xe_rtp_process_type type;
+ unsigned long *active_entries;
+ size_t n_entries;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
new file mode 100644
index 000000000..2c4632259
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_sa.h"
+
+#include <linux/kernel.h>
+
+#include <drm/drm_managed.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_map.h"
+
+static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_sa_manager *sa_manager = arg;
+ struct xe_bo *bo = sa_manager->bo;
+
+ if (!bo) {
+ drm_err(drm, "no bo for sa manager\n");
+ return;
+ }
+
+ drm_suballoc_manager_fini(&sa_manager->base);
+
+ if (bo->vmap.is_iomem)
+ kvfree(sa_manager->cpu_ptr);
+
+ xe_bo_unpin_map_no_vm(bo);
+ sa_manager->bo = NULL;
+}
+
+struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ u32 managed_size = size - SZ_4K;
+ struct xe_bo *bo;
+ int ret;
+
+ struct xe_sa_manager *sa_manager = drmm_kzalloc(&tile_to_xe(tile)->drm,
+ sizeof(*sa_manager),
+ GFP_KERNEL);
+ if (!sa_manager)
+ return ERR_PTR(-ENOMEM);
+
+ sa_manager->bo = NULL;
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(tile) |
+ XE_BO_CREATE_GGTT_BIT);
+ if (IS_ERR(bo)) {
+ drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
+ PTR_ERR(bo));
+ return (struct xe_sa_manager *)bo;
+ }
+ sa_manager->bo = bo;
+
+ drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
+ sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
+
+ if (bo->vmap.is_iomem) {
+ sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
+ if (!sa_manager->cpu_ptr) {
+ xe_bo_unpin_map_no_vm(sa_manager->bo);
+ sa_manager->bo = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ sa_manager->cpu_ptr = bo->vmap.vaddr;
+ memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
+ }
+
+ ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
+ sa_manager);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sa_manager;
+}
+
+struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
+ unsigned int size)
+{
+ return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
+}
+
+void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
+{
+ struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
+ struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
+
+ if (!sa_manager->bo->vmap.is_iomem)
+ return;
+
+ xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo),
+ xe_sa_bo_cpu_addr(sa_bo),
+ drm_suballoc_size(sa_bo));
+}
+
+void xe_sa_bo_free(struct drm_suballoc *sa_bo,
+ struct dma_fence *fence)
+{
+ drm_suballoc_free(sa_bo, fence);
+}
diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h
new file mode 100644
index 000000000..4e9648305
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sa.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef _XE_SA_H_
+#define _XE_SA_H_
+
+#include "xe_sa_types.h"
+
+struct dma_fence;
+struct xe_bo;
+struct xe_tile;
+
+struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align);
+
+struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
+ u32 size);
+void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo);
+void xe_sa_bo_free(struct drm_suballoc *sa_bo,
+ struct dma_fence *fence);
+
+static inline struct xe_sa_manager *
+to_xe_sa_manager(struct drm_suballoc_manager *mng)
+{
+ return container_of(mng, struct xe_sa_manager, base);
+}
+
+static inline u64 xe_sa_bo_gpu_addr(struct drm_suballoc *sa)
+{
+ return to_xe_sa_manager(sa->manager)->gpu_addr +
+ drm_suballoc_soffset(sa);
+}
+
+static inline void *xe_sa_bo_cpu_addr(struct drm_suballoc *sa)
+{
+ return to_xe_sa_manager(sa->manager)->cpu_ptr +
+ drm_suballoc_soffset(sa);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h
new file mode 100644
index 000000000..2ef896aec
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sa_types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef _XE_SA_TYPES_H_
+#define _XE_SA_TYPES_H_
+
+#include <drm/drm_suballoc.h>
+
+struct xe_bo;
+
+struct xe_sa_manager {
+ struct drm_suballoc_manager base;
+ struct xe_bo *bo;
+ u64 gpu_addr;
+ void *cpu_ptr;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
new file mode 100644
index 000000000..d07b9ee0e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_sched_job.h"
+
+#include <linux/dma-fence-array.h>
+#include <linux/slab.h>
+
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_gt.h"
+#include "xe_hw_engine_types.h"
+#include "xe_hw_fence.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_trace.h"
+#include "xe_vm.h"
+
+static struct kmem_cache *xe_sched_job_slab;
+static struct kmem_cache *xe_sched_job_parallel_slab;
+
+int __init xe_sched_job_module_init(void)
+{
+ xe_sched_job_slab =
+ kmem_cache_create("xe_sched_job",
+ sizeof(struct xe_sched_job) +
+ sizeof(u64), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!xe_sched_job_slab)
+ return -ENOMEM;
+
+ xe_sched_job_parallel_slab =
+ kmem_cache_create("xe_sched_job_parallel",
+ sizeof(struct xe_sched_job) +
+ sizeof(u64) *
+ XE_HW_ENGINE_MAX_INSTANCE, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!xe_sched_job_parallel_slab) {
+ kmem_cache_destroy(xe_sched_job_slab);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void xe_sched_job_module_exit(void)
+{
+ kmem_cache_destroy(xe_sched_job_slab);
+ kmem_cache_destroy(xe_sched_job_parallel_slab);
+}
+
+static struct xe_sched_job *job_alloc(bool parallel)
+{
+ return kmem_cache_zalloc(parallel ? xe_sched_job_parallel_slab :
+ xe_sched_job_slab, GFP_KERNEL);
+}
+
+bool xe_sched_job_is_migration(struct xe_exec_queue *q)
+{
+ return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION);
+}
+
+static void job_free(struct xe_sched_job *job)
+{
+ struct xe_exec_queue *q = job->q;
+ bool is_migration = xe_sched_job_is_migration(q);
+
+ kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
+ xe_sched_job_parallel_slab : xe_sched_job_slab, job);
+}
+
+static struct xe_device *job_to_xe(struct xe_sched_job *job)
+{
+ return gt_to_xe(job->q->gt);
+}
+
+struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
+ u64 *batch_addr)
+{
+ struct xe_sched_job *job;
+ struct dma_fence **fences;
+ bool is_migration = xe_sched_job_is_migration(q);
+ int err;
+ int i, j;
+ u32 width;
+
+ /* only a kernel context can submit a vm-less job */
+ XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
+
+ /* Migration and kernel engines have their own locking */
+ if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
+ lockdep_assert_held(&q->vm->lock);
+ if (!xe_vm_in_lr_mode(q->vm))
+ xe_vm_assert_held(q->vm);
+ }
+
+ job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ job->q = q;
+ kref_init(&job->refcount);
+ xe_exec_queue_get(job->q);
+
+ err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
+ if (err)
+ goto err_free;
+
+ if (!xe_exec_queue_is_parallel(q)) {
+ job->fence = xe_lrc_create_seqno_fence(q->lrc);
+ if (IS_ERR(job->fence)) {
+ err = PTR_ERR(job->fence);
+ goto err_sched_job;
+ }
+ } else {
+ struct dma_fence_array *cf;
+
+ fences = kmalloc_array(q->width, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ err = -ENOMEM;
+ goto err_sched_job;
+ }
+
+ for (j = 0; j < q->width; ++j) {
+ fences[j] = xe_lrc_create_seqno_fence(q->lrc + j);
+ if (IS_ERR(fences[j])) {
+ err = PTR_ERR(fences[j]);
+ goto err_fences;
+ }
+ }
+
+ cf = dma_fence_array_create(q->width, fences,
+ q->parallel.composite_fence_ctx,
+ q->parallel.composite_fence_seqno++,
+ false);
+ if (!cf) {
+ --q->parallel.composite_fence_seqno;
+ err = -ENOMEM;
+ goto err_fences;
+ }
+
+ /* Sanity check */
+ for (j = 0; j < q->width; ++j)
+ xe_assert(job_to_xe(job), cf->base.seqno == fences[j]->seqno);
+
+ job->fence = &cf->base;
+ }
+
+ width = q->width;
+ if (is_migration)
+ width = 2;
+
+ for (i = 0; i < width; ++i)
+ job->batch_addr[i] = batch_addr[i];
+
+ /* All other jobs require a VM to be open which has a ref */
+ if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
+ xe_device_mem_access_get(job_to_xe(job));
+ xe_device_assert_mem_access(job_to_xe(job));
+
+ trace_xe_sched_job_create(job);
+ return job;
+
+err_fences:
+ for (j = j - 1; j >= 0; --j) {
+ --q->lrc[j].fence_ctx.next_seqno;
+ dma_fence_put(fences[j]);
+ }
+ kfree(fences);
+err_sched_job:
+ drm_sched_job_cleanup(&job->drm);
+err_free:
+ xe_exec_queue_put(q);
+ job_free(job);
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_sched_job_destroy - Destroy XE schedule job
+ * @ref: reference to XE schedule job
+ *
+ * Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup
+ * base DRM schedule job, and free memory for XE schedule job.
+ */
+void xe_sched_job_destroy(struct kref *ref)
+{
+ struct xe_sched_job *job =
+ container_of(ref, struct xe_sched_job, refcount);
+
+ if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
+ xe_device_mem_access_put(job_to_xe(job));
+ xe_exec_queue_put(job->q);
+ dma_fence_put(job->fence);
+ drm_sched_job_cleanup(&job->drm);
+ job_free(job);
+}
+
+void xe_sched_job_set_error(struct xe_sched_job *job, int error)
+{
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
+ return;
+
+ dma_fence_set_error(job->fence, error);
+
+ if (dma_fence_is_array(job->fence)) {
+ struct dma_fence_array *array =
+ to_dma_fence_array(job->fence);
+ struct dma_fence **child = array->fences;
+ unsigned int nchild = array->num_fences;
+
+ do {
+ struct dma_fence *current_fence = *child++;
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &current_fence->flags))
+ continue;
+ dma_fence_set_error(current_fence, error);
+ } while (--nchild);
+ }
+
+ trace_xe_sched_job_set_error(job);
+
+ dma_fence_enable_sw_signaling(job->fence);
+ xe_hw_fence_irq_run(job->q->fence_irq);
+}
+
+bool xe_sched_job_started(struct xe_sched_job *job)
+{
+ struct xe_lrc *lrc = job->q->lrc;
+
+ return !__dma_fence_is_later(xe_sched_job_seqno(job),
+ xe_lrc_start_seqno(lrc),
+ job->fence->ops);
+}
+
+bool xe_sched_job_completed(struct xe_sched_job *job)
+{
+ struct xe_lrc *lrc = job->q->lrc;
+
+ /*
+ * Can safely check just LRC[0] seqno as that is last seqno written when
+ * parallel handshake is done.
+ */
+
+ return !__dma_fence_is_later(xe_sched_job_seqno(job), xe_lrc_seqno(lrc),
+ job->fence->ops);
+}
+
+void xe_sched_job_arm(struct xe_sched_job *job)
+{
+ struct xe_exec_queue *q = job->q;
+ struct xe_vm *vm = q->vm;
+
+ if (vm && !xe_sched_job_is_migration(q) && !xe_vm_in_lr_mode(vm) &&
+ (vm->batch_invalidate_tlb || vm->tlb_flush_seqno != q->tlb_flush_seqno)) {
+ xe_vm_assert_held(vm);
+ q->tlb_flush_seqno = vm->tlb_flush_seqno;
+ job->ring_ops_flush_tlb = true;
+ }
+
+ drm_sched_job_arm(&job->drm);
+}
+
+void xe_sched_job_push(struct xe_sched_job *job)
+{
+ xe_sched_job_get(job);
+ trace_xe_sched_job_exec(job);
+ drm_sched_entity_push_job(&job->drm);
+ xe_sched_job_put(job);
+}
+
+/**
+ * xe_sched_job_last_fence_add_dep - Add last fence dependency to job
+ * @job:job to add the last fence dependency to
+ * @vm: virtual memory job belongs to
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
+{
+ struct dma_fence *fence;
+
+ fence = xe_exec_queue_last_fence_get(job->q, vm);
+
+ return drm_sched_job_add_dependency(&job->drm, fence);
+}
diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
new file mode 100644
index 000000000..34f475ba7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sched_job.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_SCHED_JOB_H_
+#define _XE_SCHED_JOB_H_
+
+#include "xe_sched_job_types.h"
+
+struct xe_vm;
+
+#define XE_SCHED_HANG_LIMIT 1
+#define XE_SCHED_JOB_TIMEOUT LONG_MAX
+
+int xe_sched_job_module_init(void);
+void xe_sched_job_module_exit(void);
+
+struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
+ u64 *batch_addr);
+void xe_sched_job_destroy(struct kref *ref);
+
+/**
+ * xe_sched_job_get - get reference to XE schedule job
+ * @job: XE schedule job object
+ *
+ * Increment XE schedule job's reference count
+ */
+static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
+{
+ kref_get(&job->refcount);
+ return job;
+}
+
+/**
+ * xe_sched_job_put - put reference to XE schedule job
+ * @job: XE schedule job object
+ *
+ * Decrement XE schedule job's reference count, call xe_sched_job_destroy when
+ * reference count == 0.
+ */
+static inline void xe_sched_job_put(struct xe_sched_job *job)
+{
+ kref_put(&job->refcount, xe_sched_job_destroy);
+}
+
+void xe_sched_job_set_error(struct xe_sched_job *job, int error);
+static inline bool xe_sched_job_is_error(struct xe_sched_job *job)
+{
+ return job->fence->error < 0;
+}
+
+bool xe_sched_job_started(struct xe_sched_job *job);
+bool xe_sched_job_completed(struct xe_sched_job *job);
+
+void xe_sched_job_arm(struct xe_sched_job *job);
+void xe_sched_job_push(struct xe_sched_job *job);
+
+int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
+
+static inline struct xe_sched_job *
+to_xe_sched_job(struct drm_sched_job *drm)
+{
+ return container_of(drm, struct xe_sched_job, drm);
+}
+
+static inline u32 xe_sched_job_seqno(struct xe_sched_job *job)
+{
+ return job->fence->seqno;
+}
+
+static inline void
+xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
+{
+ job->migrate_flush_flags = flags;
+}
+
+bool xe_sched_job_is_migration(struct xe_exec_queue *q);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
new file mode 100644
index 000000000..46ead6345
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_SCHED_JOB_TYPES_H_
+#define _XE_SCHED_JOB_TYPES_H_
+
+#include <linux/kref.h>
+
+#include <drm/gpu_scheduler.h>
+
+struct xe_exec_queue;
+
+/**
+ * struct xe_sched_job - XE schedule job (batch buffer tracking)
+ */
+struct xe_sched_job {
+ /** @drm: base DRM scheduler job */
+ struct drm_sched_job drm;
+ /** @q: Exec queue */
+ struct xe_exec_queue *q;
+ /** @refcount: ref count of this job */
+ struct kref refcount;
+ /**
+ * @fence: dma fence to indicate completion. 1 way relationship - job
+ * can safely reference fence, fence cannot safely reference job.
+ */
+#define JOB_FLAG_SUBMIT DMA_FENCE_FLAG_USER_BITS
+ struct dma_fence *fence;
+ /** @user_fence: write back value when BB is complete */
+ struct {
+ /** @used: user fence is used */
+ bool used;
+ /** @addr: address to write to */
+ u64 addr;
+ /** @value: write back value */
+ u64 value;
+ } user_fence;
+ /** @migrate_flush_flags: Additional flush flags for migration jobs */
+ u32 migrate_flush_flags;
+ /** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
+ bool ring_ops_flush_tlb;
+ /** @batch_addr: batch buffer address of job */
+ u64 batch_addr[];
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
new file mode 100644
index 000000000..42a0e0c91
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "xe_assert.h"
+#include "xe_sriov.h"
+
+/**
+ * xe_sriov_mode_to_string - Convert enum value to string.
+ * @mode: the &xe_sriov_mode to convert
+ *
+ * Returns: SR-IOV mode as a user friendly string.
+ */
+const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode)
+{
+ switch (mode) {
+ case XE_SRIOV_MODE_NONE:
+ return "none";
+ case XE_SRIOV_MODE_PF:
+ return "SR-IOV PF";
+ case XE_SRIOV_MODE_VF:
+ return "SR-IOV VF";
+ default:
+ return "<invalid>";
+ }
+}
+
+/**
+ * xe_sriov_probe_early - Probe a SR-IOV mode.
+ * @xe: the &xe_device to probe mode on
+ * @has_sriov: flag indicating hardware support for SR-IOV
+ *
+ * This function should be called only once and as soon as possible during
+ * driver probe to detect whether we are running a SR-IOV Physical Function
+ * (PF) or a Virtual Function (VF) device.
+ *
+ * SR-IOV PF mode detection is based on PCI @dev_is_pf() function.
+ * SR-IOV VF mode detection is based on dedicated MMIO register read.
+ */
+void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov)
+{
+ enum xe_sriov_mode mode = XE_SRIOV_MODE_NONE;
+
+ /* TODO: replace with proper mode detection */
+ xe_assert(xe, !has_sriov);
+
+ xe_assert(xe, !xe->sriov.__mode);
+ xe->sriov.__mode = mode;
+ xe_assert(xe, xe->sriov.__mode);
+
+ if (has_sriov)
+ drm_info(&xe->drm, "Running in %s mode\n",
+ xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h
new file mode 100644
index 000000000..5af73a317
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_H_
+#define _XE_SRIOV_H_
+
+#include "xe_assert.h"
+#include "xe_device_types.h"
+#include "xe_sriov_types.h"
+
+const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode);
+
+void xe_sriov_probe_early(struct xe_device *xe, bool has_sriov);
+
+static inline enum xe_sriov_mode xe_device_sriov_mode(struct xe_device *xe)
+{
+ xe_assert(xe, xe->sriov.__mode);
+ return xe->sriov.__mode;
+}
+
+static inline bool xe_device_is_sriov_pf(struct xe_device *xe)
+{
+ return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_PF;
+}
+
+static inline bool xe_device_is_sriov_vf(struct xe_device *xe)
+{
+ return xe_device_sriov_mode(xe) == XE_SRIOV_MODE_VF;
+}
+
+#ifdef CONFIG_PCI_IOV
+#define IS_SRIOV_PF(xe) xe_device_is_sriov_pf(xe)
+#else
+#define IS_SRIOV_PF(xe) (typecheck(struct xe_device *, (xe)) && false)
+#endif
+#define IS_SRIOV_VF(xe) xe_device_is_sriov_vf(xe)
+
+#define IS_SRIOV(xe) (IS_SRIOV_PF(xe) || IS_SRIOV_VF(xe))
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_printk.h b/drivers/gpu/drm/xe/xe_sriov_printk.h
new file mode 100644
index 000000000..117e1d541
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_printk.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PRINTK_H_
+#define _XE_SRIOV_PRINTK_H_
+
+#include <drm/drm_print.h>
+
+#include "xe_device_types.h"
+#include "xe_sriov_types.h"
+
+#define xe_sriov_printk_prefix(xe) \
+ ((xe)->sriov.__mode == XE_SRIOV_MODE_PF ? "PF: " : \
+ (xe)->sriov.__mode == XE_SRIOV_MODE_VF ? "VF: " : "")
+
+#define xe_sriov_printk(xe, _level, fmt, ...) \
+ drm_##_level(&(xe)->drm, "%s" fmt, xe_sriov_printk_prefix(xe), ##__VA_ARGS__)
+
+#define xe_sriov_err(xe, fmt, ...) \
+ xe_sriov_printk((xe), err, fmt, ##__VA_ARGS__)
+
+#define xe_sriov_err_ratelimited(xe, fmt, ...) \
+ xe_sriov_printk((xe), err_ratelimited, fmt, ##__VA_ARGS__)
+
+#define xe_sriov_warn(xe, fmt, ...) \
+ xe_sriov_printk((xe), warn, fmt, ##__VA_ARGS__)
+
+#define xe_sriov_notice(xe, fmt, ...) \
+ xe_sriov_printk((xe), notice, fmt, ##__VA_ARGS__)
+
+#define xe_sriov_info(xe, fmt, ...) \
+ xe_sriov_printk((xe), info, fmt, ##__VA_ARGS__)
+
+#define xe_sriov_dbg(xe, fmt, ...) \
+ xe_sriov_printk((xe), dbg, fmt, ##__VA_ARGS__)
+
+/* for low level noisy debug messages */
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+#define xe_sriov_dbg_verbose(xe, fmt, ...) xe_sriov_dbg(xe, fmt, ##__VA_ARGS__)
+#else
+#define xe_sriov_dbg_verbose(xe, fmt, ...) typecheck(struct xe_device *, (xe))
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
new file mode 100644
index 000000000..999a4311b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_TYPES_H_
+#define _XE_SRIOV_TYPES_H_
+
+#include <linux/build_bug.h>
+
+/**
+ * enum xe_sriov_mode - SR-IOV mode
+ * @XE_SRIOV_MODE_NONE: bare-metal mode (non-virtualized)
+ * @XE_SRIOV_MODE_PF: SR-IOV Physical Function (PF) mode
+ * @XE_SRIOV_MODE_VF: SR-IOV Virtual Function (VF) mode
+ */
+enum xe_sriov_mode {
+ /*
+ * Note: We don't use default enum value 0 to allow catch any too early
+ * attempt of checking the SR-IOV mode prior to the actual mode probe.
+ */
+ XE_SRIOV_MODE_NONE = 1,
+ XE_SRIOV_MODE_PF,
+ XE_SRIOV_MODE_VF,
+};
+static_assert(XE_SRIOV_MODE_NONE);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_step.c b/drivers/gpu/drm/xe/xe_step.c
new file mode 100644
index 000000000..eaf1b718f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_step.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_step.h"
+
+#include <linux/bitfield.h>
+
+#include "xe_device.h"
+#include "xe_platform_types.h"
+
+/*
+ * Provide mapping between PCI's revision ID to the individual GMD
+ * (Graphics/Media/Display) stepping values that can be compared numerically.
+ *
+ * Some platforms may have unusual ways of mapping PCI revision ID to GMD
+ * steppings. E.g., in some cases a higher PCI revision may translate to a
+ * lower stepping of the GT and/or display IP.
+ *
+ * Also note that some revisions/steppings may have been set aside as
+ * placeholders but never materialized in real hardware; in those cases there
+ * may be jumps in the revision IDs or stepping values in the tables below.
+ */
+
+/*
+ * Some platforms always have the same stepping value for GT and display;
+ * use a macro to define these to make it easier to identify the platforms
+ * where the two steppings can deviate.
+ */
+#define COMMON_GT_MEDIA_STEP(x_) \
+ .graphics = STEP_##x_, \
+ .media = STEP_##x_
+
+#define COMMON_STEP(x_) \
+ COMMON_GT_MEDIA_STEP(x_), \
+ .graphics = STEP_##x_, \
+ .media = STEP_##x_, \
+ .display = STEP_##x_
+
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
+
+/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
+static const struct xe_step_info tgl_revids[] = {
+ [0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_B0 },
+ [1] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_D0 },
+};
+
+static const struct xe_step_info dg1_revids[] = {
+ [0] = { COMMON_STEP(A0) },
+ [1] = { COMMON_STEP(B0) },
+};
+
+static const struct xe_step_info adls_revids[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A2 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_B0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_C0 },
+};
+
+static const struct xe_step_info adls_rpls_revids[] = {
+ [0x4] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_D0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display = STEP_C0 },
+};
+
+static const struct xe_step_info adlp_revids[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_C0 },
+ [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_D0 },
+};
+
+static const struct xe_step_info adlp_rpl_revids[] = {
+ [0x4] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_E0 },
+};
+
+static const struct xe_step_info adln_revids[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_D0 },
+};
+
+static const struct xe_step_info dg2_g10_revid_step_tbl[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_A0 },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display = STEP_A0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_B0 },
+ [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display = STEP_C0 },
+};
+
+static const struct xe_step_info dg2_g11_revid_step_tbl[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_B0 },
+ [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display = STEP_C0 },
+ [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display = STEP_C0 },
+};
+
+static const struct xe_step_info dg2_g12_revid_step_tbl[] = {
+ [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display = STEP_C0 },
+ [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display = STEP_C0 },
+};
+
+static const struct xe_step_info pvc_revid_step_tbl[] = {
+ [0x5] = { .graphics = STEP_B0 },
+ [0x6] = { .graphics = STEP_B1 },
+ [0x7] = { .graphics = STEP_C0 },
+};
+
+static const int pvc_basedie_subids[] = {
+ [0x3] = STEP_B0,
+ [0x4] = STEP_B1,
+ [0x5] = STEP_B3,
+};
+
+__diag_pop();
+
+/**
+ * xe_step_pre_gmdid_get - Determine IP steppings from PCI revid
+ * @xe: Xe device
+ *
+ * Convert the PCI revid into proper IP steppings. This should only be
+ * used on platforms that do not have GMD_ID support.
+ */
+struct xe_step_info xe_step_pre_gmdid_get(struct xe_device *xe)
+{
+ const struct xe_step_info *revids = NULL;
+ struct xe_step_info step = {};
+ u16 revid = xe->info.revid;
+ int size = 0;
+ const int *basedie_info = NULL;
+ int basedie_size = 0;
+ int baseid = 0;
+
+ if (xe->info.platform == XE_PVC) {
+ baseid = FIELD_GET(GENMASK(5, 3), xe->info.revid);
+ revid = FIELD_GET(GENMASK(2, 0), xe->info.revid);
+ revids = pvc_revid_step_tbl;
+ size = ARRAY_SIZE(pvc_revid_step_tbl);
+ basedie_info = pvc_basedie_subids;
+ basedie_size = ARRAY_SIZE(pvc_basedie_subids);
+ } else if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G10) {
+ revids = dg2_g10_revid_step_tbl;
+ size = ARRAY_SIZE(dg2_g10_revid_step_tbl);
+ } else if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G11) {
+ revids = dg2_g11_revid_step_tbl;
+ size = ARRAY_SIZE(dg2_g11_revid_step_tbl);
+ } else if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G12) {
+ revids = dg2_g12_revid_step_tbl;
+ size = ARRAY_SIZE(dg2_g12_revid_step_tbl);
+ } else if (xe->info.platform == XE_ALDERLAKE_N) {
+ revids = adln_revids;
+ size = ARRAY_SIZE(adln_revids);
+ } else if (xe->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_S_RPLS) {
+ revids = adls_rpls_revids;
+ size = ARRAY_SIZE(adls_rpls_revids);
+ } else if (xe->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU) {
+ revids = adlp_rpl_revids;
+ size = ARRAY_SIZE(adlp_rpl_revids);
+ } else if (xe->info.platform == XE_ALDERLAKE_P) {
+ revids = adlp_revids;
+ size = ARRAY_SIZE(adlp_revids);
+ } else if (xe->info.platform == XE_ALDERLAKE_S) {
+ revids = adls_revids;
+ size = ARRAY_SIZE(adls_revids);
+ } else if (xe->info.platform == XE_DG1) {
+ revids = dg1_revids;
+ size = ARRAY_SIZE(dg1_revids);
+ } else if (xe->info.platform == XE_TIGERLAKE) {
+ revids = tgl_revids;
+ size = ARRAY_SIZE(tgl_revids);
+ }
+
+ /* Not using the stepping scheme for the platform yet. */
+ if (!revids)
+ return step;
+
+ if (revid < size && revids[revid].graphics != STEP_NONE) {
+ step = revids[revid];
+ } else {
+ drm_warn(&xe->drm, "Unknown revid 0x%02x\n", revid);
+
+ /*
+ * If we hit a gap in the revid array, use the information for
+ * the next revid.
+ *
+ * This may be wrong in all sorts of ways, especially if the
+ * steppings in the array are not monotonically increasing, but
+ * it's better than defaulting to 0.
+ */
+ while (revid < size && revids[revid].graphics == STEP_NONE)
+ revid++;
+
+ if (revid < size) {
+ drm_dbg(&xe->drm, "Using steppings for revid 0x%02x\n",
+ revid);
+ step = revids[revid];
+ } else {
+ drm_dbg(&xe->drm, "Using future steppings\n");
+ step.graphics = STEP_FUTURE;
+ step.display = STEP_FUTURE;
+ }
+ }
+
+ drm_WARN_ON(&xe->drm, step.graphics == STEP_NONE);
+
+ if (basedie_info && basedie_size) {
+ if (baseid < basedie_size && basedie_info[baseid] != STEP_NONE) {
+ step.basedie = basedie_info[baseid];
+ } else {
+ drm_warn(&xe->drm, "Unknown baseid 0x%02x\n", baseid);
+ step.basedie = STEP_FUTURE;
+ }
+ }
+
+ return step;
+}
+
+/**
+ * xe_step_gmdid_get - Determine IP steppings from GMD_ID revid fields
+ * @xe: Xe device
+ * @graphics_gmdid_revid: value of graphics GMD_ID register's revid field
+ * @media_gmdid_revid: value of media GMD_ID register's revid field
+ *
+ * Convert the revid fields of the GMD_ID registers into proper IP steppings.
+ *
+ * GMD_ID revid values are currently expected to have consistent meanings on
+ * all platforms: major steppings (A0, B0, etc.) are 4 apart, with minor
+ * steppings (A1, A2, etc.) taking the values in between.
+ */
+struct xe_step_info xe_step_gmdid_get(struct xe_device *xe,
+ u32 graphics_gmdid_revid,
+ u32 media_gmdid_revid)
+{
+ struct xe_step_info step = {
+ .graphics = STEP_A0 + graphics_gmdid_revid,
+ .media = STEP_A0 + media_gmdid_revid,
+ };
+
+ if (step.graphics >= STEP_FUTURE) {
+ step.graphics = STEP_FUTURE;
+ drm_dbg(&xe->drm, "Graphics GMD_ID revid value %d treated as future stepping\n",
+ graphics_gmdid_revid);
+ }
+
+ if (step.media >= STEP_FUTURE) {
+ step.media = STEP_FUTURE;
+ drm_dbg(&xe->drm, "Media GMD_ID revid value %d treated as future stepping\n",
+ media_gmdid_revid);
+ }
+
+ return step;
+}
+
+#define STEP_NAME_CASE(name) \
+ case STEP_##name: \
+ return #name;
+
+const char *xe_step_name(enum xe_step step)
+{
+ switch (step) {
+ STEP_NAME_LIST(STEP_NAME_CASE);
+
+ default:
+ return "**";
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_step.h b/drivers/gpu/drm/xe/xe_step.h
new file mode 100644
index 000000000..686cb5920
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_step.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_STEP_H_
+#define _XE_STEP_H_
+
+#include <linux/types.h>
+
+#include "xe_step_types.h"
+
+struct xe_device;
+
+struct xe_step_info xe_step_pre_gmdid_get(struct xe_device *xe);
+struct xe_step_info xe_step_gmdid_get(struct xe_device *xe,
+ u32 graphics_gmdid_revid,
+ u32 media_gmdid_revid);
+static inline u32 xe_step_to_gmdid(enum xe_step step) { return step - STEP_A0; }
+
+const char *xe_step_name(enum xe_step step);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_step_types.h b/drivers/gpu/drm/xe/xe_step_types.h
new file mode 100644
index 000000000..ccc9b4795
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_step_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_STEP_TYPES_H_
+#define _XE_STEP_TYPES_H_
+
+#include <linux/types.h>
+
+struct xe_step_info {
+ u8 graphics;
+ u8 media;
+ u8 display;
+ u8 basedie;
+};
+
+#define STEP_ENUM_VAL(name) STEP_##name,
+
+#define STEP_NAME_LIST(func) \
+ func(A0) \
+ func(A1) \
+ func(A2) \
+ func(A3) \
+ func(B0) \
+ func(B1) \
+ func(B2) \
+ func(B3) \
+ func(C0) \
+ func(C1) \
+ func(C2) \
+ func(C3) \
+ func(D0) \
+ func(D1) \
+ func(D2) \
+ func(D3) \
+ func(E0)
+
+/*
+ * Symbolic steppings that do not match the hardware. These are valid both as gt
+ * and display steppings as symbolic names.
+ */
+enum xe_step {
+ STEP_NONE = 0,
+ STEP_NAME_LIST(STEP_ENUM_VAL)
+ STEP_FUTURE,
+ STEP_FOREVER,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
new file mode 100644
index 000000000..02c9577fe
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_sync.h"
+
+#include <linux/dma-fence-array.h>
+#include <linux/kthread.h>
+#include <linux/sched/mm.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_syncobj.h>
+#include <drm/xe_drm.h>
+
+#include "xe_device_types.h"
+#include "xe_exec_queue.h"
+#include "xe_macros.h"
+#include "xe_sched_job_types.h"
+
+struct xe_user_fence {
+ struct xe_device *xe;
+ struct kref refcount;
+ struct dma_fence_cb cb;
+ struct work_struct worker;
+ struct mm_struct *mm;
+ u64 __user *addr;
+ u64 value;
+ int signalled;
+};
+
+static void user_fence_destroy(struct kref *kref)
+{
+ struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
+ refcount);
+
+ mmdrop(ufence->mm);
+ kfree(ufence);
+}
+
+static void user_fence_get(struct xe_user_fence *ufence)
+{
+ kref_get(&ufence->refcount);
+}
+
+static void user_fence_put(struct xe_user_fence *ufence)
+{
+ kref_put(&ufence->refcount, user_fence_destroy);
+}
+
+static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
+ u64 value)
+{
+ struct xe_user_fence *ufence;
+
+ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
+ if (!ufence)
+ return NULL;
+
+ ufence->xe = xe;
+ kref_init(&ufence->refcount);
+ ufence->addr = u64_to_user_ptr(addr);
+ ufence->value = value;
+ ufence->mm = current->mm;
+ mmgrab(ufence->mm);
+
+ return ufence;
+}
+
+static void user_fence_worker(struct work_struct *w)
+{
+ struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
+
+ if (mmget_not_zero(ufence->mm)) {
+ kthread_use_mm(ufence->mm);
+ if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
+ XE_WARN_ON("Copy to user failed");
+ kthread_unuse_mm(ufence->mm);
+ mmput(ufence->mm);
+ }
+
+ wake_up_all(&ufence->xe->ufence_wq);
+ WRITE_ONCE(ufence->signalled, 1);
+ user_fence_put(ufence);
+}
+
+static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
+{
+ INIT_WORK(&ufence->worker, user_fence_worker);
+ queue_work(ufence->xe->ordered_wq, &ufence->worker);
+ dma_fence_put(fence);
+}
+
+static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
+
+ kick_ufence(ufence, fence);
+}
+
+int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
+ struct xe_sync_entry *sync,
+ struct drm_xe_sync __user *sync_user,
+ unsigned int flags)
+{
+ struct drm_xe_sync sync_in;
+ int err;
+ bool exec = flags & SYNC_PARSE_FLAG_EXEC;
+ bool in_lr_mode = flags & SYNC_PARSE_FLAG_LR_MODE;
+ bool disallow_user_fence = flags & SYNC_PARSE_FLAG_DISALLOW_USER_FENCE;
+ bool signal;
+
+ if (copy_from_user(&sync_in, sync_user, sizeof(*sync_user)))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, sync_in.flags & ~DRM_XE_SYNC_FLAG_SIGNAL) ||
+ XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
+ return -EINVAL;
+
+ signal = sync_in.flags & DRM_XE_SYNC_FLAG_SIGNAL;
+ switch (sync_in.type) {
+ case DRM_XE_SYNC_TYPE_SYNCOBJ:
+ if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
+ return -EOPNOTSUPP;
+
+ if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
+ return -EINVAL;
+
+ sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
+ if (XE_IOCTL_DBG(xe, !sync->syncobj))
+ return -ENOENT;
+
+ if (!signal) {
+ sync->fence = drm_syncobj_fence_get(sync->syncobj);
+ if (XE_IOCTL_DBG(xe, !sync->fence))
+ return -EINVAL;
+ }
+ break;
+
+ case DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ:
+ if (XE_IOCTL_DBG(xe, in_lr_mode && signal))
+ return -EOPNOTSUPP;
+
+ if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, sync_in.timeline_value == 0))
+ return -EINVAL;
+
+ sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
+ if (XE_IOCTL_DBG(xe, !sync->syncobj))
+ return -ENOENT;
+
+ if (signal) {
+ sync->chain_fence = dma_fence_chain_alloc();
+ if (!sync->chain_fence)
+ return -ENOMEM;
+ } else {
+ sync->fence = drm_syncobj_fence_get(sync->syncobj);
+ if (XE_IOCTL_DBG(xe, !sync->fence))
+ return -EINVAL;
+
+ err = dma_fence_chain_find_seqno(&sync->fence,
+ sync_in.timeline_value);
+ if (err)
+ return err;
+ }
+ break;
+
+ case DRM_XE_SYNC_TYPE_USER_FENCE:
+ if (XE_IOCTL_DBG(xe, disallow_user_fence))
+ return -EOPNOTSUPP;
+
+ if (XE_IOCTL_DBG(xe, !signal))
+ return -EOPNOTSUPP;
+
+ if (XE_IOCTL_DBG(xe, sync_in.addr & 0x7))
+ return -EINVAL;
+
+ if (exec) {
+ sync->addr = sync_in.addr;
+ } else {
+ sync->ufence = user_fence_create(xe, sync_in.addr,
+ sync_in.timeline_value);
+ if (XE_IOCTL_DBG(xe, !sync->ufence))
+ return -ENOMEM;
+ }
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ sync->type = sync_in.type;
+ sync->flags = sync_in.flags;
+ sync->timeline_value = sync_in.timeline_value;
+
+ return 0;
+}
+
+int xe_sync_entry_wait(struct xe_sync_entry *sync)
+{
+ if (sync->fence)
+ dma_fence_wait(sync->fence, true);
+
+ return 0;
+}
+
+int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
+{
+ int err;
+
+ if (sync->fence) {
+ err = drm_sched_job_add_dependency(&job->drm,
+ dma_fence_get(sync->fence));
+ if (err) {
+ dma_fence_put(sync->fence);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void xe_sync_entry_signal(struct xe_sync_entry *sync, struct xe_sched_job *job,
+ struct dma_fence *fence)
+{
+ if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
+ return;
+
+ if (sync->chain_fence) {
+ drm_syncobj_add_point(sync->syncobj, sync->chain_fence,
+ fence, sync->timeline_value);
+ /*
+ * The chain's ownership is transferred to the
+ * timeline.
+ */
+ sync->chain_fence = NULL;
+ } else if (sync->syncobj) {
+ drm_syncobj_replace_fence(sync->syncobj, fence);
+ } else if (sync->ufence) {
+ int err;
+
+ dma_fence_get(fence);
+ user_fence_get(sync->ufence);
+ err = dma_fence_add_callback(fence, &sync->ufence->cb,
+ user_fence_cb);
+ if (err == -ENOENT) {
+ kick_ufence(sync->ufence, fence);
+ } else if (err) {
+ XE_WARN_ON("failed to add user fence");
+ user_fence_put(sync->ufence);
+ dma_fence_put(fence);
+ }
+ } else if (sync->type == DRM_XE_SYNC_TYPE_USER_FENCE) {
+ job->user_fence.used = true;
+ job->user_fence.addr = sync->addr;
+ job->user_fence.value = sync->timeline_value;
+ }
+}
+
+void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
+{
+ if (sync->syncobj)
+ drm_syncobj_put(sync->syncobj);
+ if (sync->fence)
+ dma_fence_put(sync->fence);
+ if (sync->chain_fence)
+ dma_fence_put(&sync->chain_fence->base);
+ if (sync->ufence)
+ user_fence_put(sync->ufence);
+}
+
+/**
+ * xe_sync_in_fence_get() - Get a fence from syncs, exec queue, and VM
+ * @sync: input syncs
+ * @num_sync: number of syncs
+ * @q: exec queue
+ * @vm: VM
+ *
+ * Get a fence from syncs, exec queue, and VM. If syncs contain in-fences create
+ * and return a composite fence of all in-fences + last fence. If no in-fences
+ * return last fence on input exec queue. Caller must drop reference to
+ * returned fence.
+ *
+ * Return: fence on success, ERR_PTR(-ENOMEM) on failure
+ */
+struct dma_fence *
+xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
+ struct xe_exec_queue *q, struct xe_vm *vm)
+{
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
+ struct dma_fence *fence;
+ int i, num_in_fence = 0, current_fence = 0;
+
+ lockdep_assert_held(&vm->lock);
+
+ /* Count in-fences */
+ for (i = 0; i < num_sync; ++i) {
+ if (sync[i].fence) {
+ ++num_in_fence;
+ fence = sync[i].fence;
+ }
+ }
+
+ /* Easy case... */
+ if (!num_in_fence) {
+ fence = xe_exec_queue_last_fence_get(q, vm);
+ return fence;
+ }
+
+ /* Create composite fence */
+ fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; i < num_sync; ++i) {
+ if (sync[i].fence) {
+ dma_fence_get(sync[i].fence);
+ fences[current_fence++] = sync[i].fence;
+ }
+ }
+ fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
+ cf = dma_fence_array_create(num_in_fence, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ if (!cf) {
+ --vm->composite_fence_seqno;
+ goto err_out;
+ }
+
+ return &cf->base;
+
+err_out:
+ while (current_fence)
+ dma_fence_put(fences[--current_fence]);
+ kfree(fences);
+ kfree(cf);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * xe_sync_ufence_get() - Get user fence from sync
+ * @sync: input sync
+ *
+ * Get a user fence reference from sync.
+ *
+ * Return: xe_user_fence pointer with reference
+ */
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
+{
+ user_fence_get(sync->ufence);
+
+ return sync->ufence;
+}
+
+/**
+ * xe_sync_ufence_put() - Put user fence reference
+ * @ufence: user fence reference
+ *
+ */
+void xe_sync_ufence_put(struct xe_user_fence *ufence)
+{
+ user_fence_put(ufence);
+}
+
+/**
+ * xe_sync_ufence_get_status() - Get user fence status
+ * @ufence: user fence
+ *
+ * Return: 1 if signalled, 0 not signalled, <0 on error
+ */
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
+{
+ return READ_ONCE(ufence->signalled);
+}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
new file mode 100644
index 000000000..0fd0d5120
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_SYNC_H_
+#define _XE_SYNC_H_
+
+#include "xe_sync_types.h"
+
+struct xe_device;
+struct xe_exec_queue;
+struct xe_file;
+struct xe_sched_job;
+struct xe_vm;
+
+#define SYNC_PARSE_FLAG_EXEC BIT(0)
+#define SYNC_PARSE_FLAG_LR_MODE BIT(1)
+#define SYNC_PARSE_FLAG_DISALLOW_USER_FENCE BIT(2)
+
+int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
+ struct xe_sync_entry *sync,
+ struct drm_xe_sync __user *sync_user,
+ unsigned int flags);
+int xe_sync_entry_wait(struct xe_sync_entry *sync);
+int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
+ struct xe_sched_job *job);
+void xe_sync_entry_signal(struct xe_sync_entry *sync,
+ struct xe_sched_job *job,
+ struct dma_fence *fence);
+void xe_sync_entry_cleanup(struct xe_sync_entry *sync);
+struct dma_fence *
+xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
+ struct xe_exec_queue *q, struct xe_vm *vm);
+
+static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
+{
+ return !!sync->ufence;
+}
+
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
+void xe_sync_ufence_put(struct xe_user_fence *ufence);
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h
new file mode 100644
index 000000000..30ac3f519
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sync_types.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_SYNC_TYPES_H_
+#define _XE_SYNC_TYPES_H_
+
+#include <linux/types.h>
+
+struct drm_syncobj;
+struct dma_fence;
+struct dma_fence_chain;
+struct drm_xe_sync;
+struct user_fence;
+
+struct xe_sync_entry {
+ struct drm_syncobj *syncobj;
+ struct dma_fence *fence;
+ struct dma_fence_chain *chain_fence;
+ struct xe_user_fence *ufence;
+ u64 addr;
+ u64 timeline_value;
+ u32 type;
+ u32 flags;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
new file mode 100644
index 000000000..0650b2fa7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_migrate.h"
+#include "xe_sa.h"
+#include "xe_tile.h"
+#include "xe_tile_sysfs.h"
+#include "xe_ttm_vram_mgr.h"
+#include "xe_wa.h"
+
+/**
+ * DOC: Multi-tile Design
+ *
+ * Different vendors use the term "tile" a bit differently, but in the Intel
+ * world, a 'tile' is pretty close to what most people would think of as being
+ * a complete GPU. When multiple GPUs are placed behind a single PCI device,
+ * that's what is referred to as a "multi-tile device." In such cases, pretty
+ * much all hardware is replicated per-tile, although certain responsibilities
+ * like PCI communication, reporting of interrupts to the OS, etc. are handled
+ * solely by the "root tile." A multi-tile platform takes care of tying the
+ * tiles together in a way such that interrupt notifications from remote tiles
+ * are forwarded to the root tile, the per-tile vram is combined into a single
+ * address space, etc.
+ *
+ * In contrast, a "GT" (which officially stands for "Graphics Technology") is
+ * the subset of a GPU/tile that is responsible for implementing graphics
+ * and/or media operations. The GT is where a lot of the driver implementation
+ * happens since it's where the hardware engines, the execution units, and the
+ * GuC all reside.
+ *
+ * Historically most Intel devices were single-tile devices that contained a
+ * single GT. PVC is an example of an Intel platform built on a multi-tile
+ * design (i.e., multiple GPUs behind a single PCI device); each PVC tile only
+ * has a single GT. In contrast, platforms like MTL that have separate chips
+ * for render and media IP are still only a single logical GPU, but the
+ * graphics and media IP blocks are each exposed as a separate GT within that
+ * single GPU. This is important from a software perspective because multi-GT
+ * platforms like MTL only replicate a subset of the GPU hardware and behave
+ * differently than multi-tile platforms like PVC where nearly everything is
+ * replicated.
+ *
+ * Per-tile functionality (shared by all GTs within the tile):
+ * - Complete 4MB MMIO space (containing SGunit/SoC registers, GT
+ * registers, display registers, etc.)
+ * - Global GTT
+ * - VRAM (if discrete)
+ * - Interrupt flows
+ * - Migration context
+ * - kernel batchbuffer pool
+ * - Primary GT
+ * - Media GT (if media version >= 13)
+ *
+ * Per-GT functionality:
+ * - GuC
+ * - Hardware engines
+ * - Programmable hardware units (subslices, EUs)
+ * - GSI subset of registers (multiple copies of these registers reside
+ * within the complete MMIO space provided by the tile, but at different
+ * offsets --- 0 for render, 0x380000 for media)
+ * - Multicast register steering
+ * - TLBs to cache page table translations
+ * - Reset capability
+ * - Low-level power management (e.g., C6)
+ * - Clock frequency
+ * - MOCS and PAT programming
+ */
+
+/**
+ * xe_tile_alloc - Perform per-tile memory allocation
+ * @tile: Tile to perform allocations for
+ *
+ * Allocates various per-tile data structures using DRM-managed allocations.
+ * Does not touch the hardware.
+ *
+ * Returns -ENOMEM if allocations fail, otherwise 0.
+ */
+static int xe_tile_alloc(struct xe_tile *tile)
+{
+ struct drm_device *drm = &tile_to_xe(tile)->drm;
+
+ tile->mem.ggtt = drmm_kzalloc(drm, sizeof(*tile->mem.ggtt),
+ GFP_KERNEL);
+ if (!tile->mem.ggtt)
+ return -ENOMEM;
+ tile->mem.ggtt->tile = tile;
+
+ tile->mem.vram_mgr = drmm_kzalloc(drm, sizeof(*tile->mem.vram_mgr), GFP_KERNEL);
+ if (!tile->mem.vram_mgr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * xe_tile_init_early - Initialize the tile and primary GT
+ * @tile: Tile to initialize
+ * @xe: Parent Xe device
+ * @id: Tile ID
+ *
+ * Initializes per-tile resources that don't require any interactions with the
+ * hardware or any knowledge about the Graphics/Media IP version.
+ *
+ * Returns: 0 on success, negative error code on error.
+ */
+int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
+{
+ int err;
+
+ tile->xe = xe;
+ tile->id = id;
+
+ err = xe_tile_alloc(tile);
+ if (err)
+ return err;
+
+ tile->primary_gt = xe_gt_alloc(tile);
+ if (IS_ERR(tile->primary_gt))
+ return PTR_ERR(tile->primary_gt);
+
+ return 0;
+}
+
+static int tile_ttm_mgr_init(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ int err;
+
+ if (tile->mem.vram.usable_size) {
+ err = xe_ttm_vram_mgr_init(tile, tile->mem.vram_mgr);
+ if (err)
+ return err;
+ xe->info.mem_region_mask |= BIT(tile->id) << 1;
+ }
+
+ return 0;
+}
+
+/**
+ * xe_tile_init_noalloc - Init tile up to the point where allocations can happen.
+ * @tile: The tile to initialize.
+ *
+ * This function prepares the tile to allow memory allocations to VRAM, but is
+ * not allowed to allocate memory itself. This state is useful for display
+ * readout, because the inherited display framebuffer will otherwise be
+ * overwritten as it is usually put at the start of VRAM.
+ *
+ * Note that since this is tile initialization, it should not perform any
+ * GT-specific operations, and thus does not need to hold GT forcewake.
+ *
+ * Returns: 0 on success, negative error code on error.
+ */
+int xe_tile_init_noalloc(struct xe_tile *tile)
+{
+ int err;
+
+ xe_device_mem_access_get(tile_to_xe(tile));
+
+ err = tile_ttm_mgr_init(tile);
+ if (err)
+ goto err_mem_access;
+
+ tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
+ if (IS_ERR(tile->mem.kernel_bb_pool)) {
+ err = PTR_ERR(tile->mem.kernel_bb_pool);
+ goto err_mem_access;
+ }
+ xe_wa_apply_tile_workarounds(tile);
+
+ xe_tile_sysfs_init(tile);
+
+err_mem_access:
+ xe_device_mem_access_put(tile_to_xe(tile));
+ return err;
+}
+
+void xe_tile_migrate_wait(struct xe_tile *tile)
+{
+ xe_migrate_wait(tile->migrate);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
new file mode 100644
index 000000000..1c9e42ade
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_TILE_H_
+#define _XE_TILE_H_
+
+#include "xe_device_types.h"
+
+struct xe_tile;
+
+int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
+int xe_tile_init_noalloc(struct xe_tile *tile);
+
+void xe_tile_migrate_wait(struct xe_tile *tile);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
new file mode 100644
index 000000000..0f8d3e7fc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_tile.h"
+#include "xe_tile_sysfs.h"
+
+static void xe_tile_sysfs_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static const struct kobj_type xe_tile_sysfs_kobj_type = {
+ .release = xe_tile_sysfs_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void tile_sysfs_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_tile *tile = arg;
+
+ kobject_put(tile->sysfs);
+}
+
+void xe_tile_sysfs_init(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = xe->drm.dev;
+ struct kobj_tile *kt;
+ int err;
+
+ kt = kzalloc(sizeof(*kt), GFP_KERNEL);
+ if (!kt)
+ return;
+
+ kobject_init(&kt->base, &xe_tile_sysfs_kobj_type);
+ kt->tile = tile;
+
+ err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
+ if (err) {
+ kobject_put(&kt->base);
+ drm_warn(&xe->drm, "failed to register TILE sysfs directory, err: %d\n", err);
+ return;
+ }
+
+ tile->sysfs = &kt->base;
+
+ err = drmm_add_action_or_reset(&xe->drm, tile_sysfs_fini, tile);
+ if (err)
+ drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
+ __func__, err);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.h b/drivers/gpu/drm/xe/xe_tile_sysfs.h
new file mode 100644
index 000000000..e4f065039
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SYSFS_H_
+#define _XE_TILE_SYSFS_H_
+
+#include "xe_tile_sysfs_types.h"
+
+void xe_tile_sysfs_init(struct xe_tile *tile);
+
+static inline struct xe_tile *
+kobj_to_tile(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_tile, base)->tile;
+}
+
+#endif /* _XE_TILE_SYSFS_H_ */
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs_types.h b/drivers/gpu/drm/xe/xe_tile_sysfs_types.h
new file mode 100644
index 000000000..75906ba11
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs_types.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SYSFS_TYPES_H_
+#define _XE_TILE_SYSFS_TYPES_H_
+
+#include <linux/kobject.h>
+
+struct xe_tile;
+
+/**
+ * struct kobj_tile - A tile's kobject struct that connects the kobject
+ * and the TILE
+ *
+ * When dealing with multiple TILEs, this struct helps to understand which
+ * TILE needs to be addressed on a given sysfs call.
+ */
+struct kobj_tile {
+ /** @base: The actual kobject */
+ struct kobject base;
+ /** @tile: A pointer to the tile itself */
+ struct xe_tile *tile;
+};
+
+#endif /* _XE_TILE_SYSFS_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_trace.c b/drivers/gpu/drm/xe/xe_trace.c
new file mode 100644
index 000000000..2527c556b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "xe_trace.h"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
new file mode 100644
index 000000000..846f14507
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -0,0 +1,631 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xe
+
+#if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _XE_TRACE_H_
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+#include "xe_bo.h"
+#include "xe_bo_types.h"
+#include "xe_exec_queue_types.h"
+#include "xe_gpu_scheduler_types.h"
+#include "xe_gt_tlb_invalidation_types.h"
+#include "xe_gt_types.h"
+#include "xe_guc_exec_queue_types.h"
+#include "xe_sched_job.h"
+#include "xe_vm.h"
+
+DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
+ TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(fence),
+
+ TP_STRUCT__entry(
+ __field(struct xe_gt_tlb_invalidation_fence *, fence)
+ __field(int, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->fence = fence;
+ __entry->seqno = fence->seqno;
+ ),
+
+ TP_printk("fence=%p, seqno=%d",
+ __entry->fence, __entry->seqno)
+);
+
+DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
+ TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
+ xe_gt_tlb_invalidation_fence_work_func,
+ TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
+ TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
+ TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
+ TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
+ TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
+ TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
+ TP_ARGS(fence)
+);
+
+DECLARE_EVENT_CLASS(xe_bo,
+ TP_PROTO(struct xe_bo *bo),
+ TP_ARGS(bo),
+
+ TP_STRUCT__entry(
+ __field(size_t, size)
+ __field(u32, flags)
+ __field(struct xe_vm *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->size = bo->size;
+ __entry->flags = bo->flags;
+ __entry->vm = bo->vm;
+ ),
+
+ TP_printk("size=%zu, flags=0x%02x, vm=%p",
+ __entry->size, __entry->flags, __entry->vm)
+);
+
+DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
+ TP_PROTO(struct xe_bo *bo),
+ TP_ARGS(bo)
+);
+
+TRACE_EVENT(xe_bo_move,
+ TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
+ bool move_lacks_source),
+ TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
+ TP_STRUCT__entry(
+ __field(struct xe_bo *, bo)
+ __field(size_t, size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ __array(char, device_id, 12)
+ __field(bool, move_lacks_source)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->size = bo->size;
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
+ __entry->move_lacks_source = move_lacks_source;
+ ),
+ TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
+ __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
+ xe_mem_type_to_name[__entry->old_placement],
+ xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
+);
+
+DECLARE_EVENT_CLASS(xe_exec_queue,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+ __field(enum xe_engine_class, class)
+ __field(u32, logical_mask)
+ __field(u8, gt_id)
+ __field(u16, width)
+ __field(u16, guc_id)
+ __field(u32, guc_state)
+ __field(u32, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->class = q->class;
+ __entry->logical_mask = q->logical_mask;
+ __entry->gt_id = q->gt->info.id;
+ __entry->width = q->width;
+ __entry->guc_id = q->guc->id;
+ __entry->guc_state = atomic_read(&q->guc->state);
+ __entry->flags = q->flags;
+ ),
+
+ TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
+ __entry->class, __entry->logical_mask,
+ __entry->gt_id, __entry->width, __entry->guc_id,
+ __entry->guc_state, __entry->flags)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
+ TP_PROTO(struct xe_exec_queue *q),
+ TP_ARGS(q)
+);
+
+DECLARE_EVENT_CLASS(xe_sched_job,
+ TP_PROTO(struct xe_sched_job *job),
+ TP_ARGS(job),
+
+ TP_STRUCT__entry(
+ __field(u32, seqno)
+ __field(u16, guc_id)
+ __field(u32, guc_state)
+ __field(u32, flags)
+ __field(int, error)
+ __field(u64, fence)
+ __field(u64, batch_addr)
+ ),
+
+ TP_fast_assign(
+ __entry->seqno = xe_sched_job_seqno(job);
+ __entry->guc_id = job->q->guc->id;
+ __entry->guc_state =
+ atomic_read(&job->q->guc->state);
+ __entry->flags = job->q->flags;
+ __entry->error = job->fence->error;
+ __entry->fence = (unsigned long)job->fence;
+ __entry->batch_addr = (u64)job->batch_addr[0];
+ ),
+
+ TP_printk("fence=0x%016llx, seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ __entry->fence, __entry->seqno, __entry->guc_id,
+ __entry->batch_addr, __entry->guc_state,
+ __entry->flags, __entry->error)
+);
+
+DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
+ TP_PROTO(struct xe_sched_job *job),
+ TP_ARGS(job)
+);
+
+DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
+ TP_PROTO(struct xe_sched_job *job),
+ TP_ARGS(job)
+);
+
+DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
+ TP_PROTO(struct xe_sched_job *job),
+ TP_ARGS(job)
+);
+
+DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
+ TP_PROTO(struct xe_sched_job *job),
+ TP_ARGS(job)
+);
+
+DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
+ TP_PROTO(struct xe_sched_job *job),
+ TP_ARGS(job)
+);
+
+DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
+ TP_PROTO(struct xe_sched_job *job),
+ TP_ARGS(job)
+);
+
+DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
+ TP_PROTO(struct xe_sched_job *job),
+ TP_ARGS(job)
+);
+
+DECLARE_EVENT_CLASS(xe_sched_msg,
+ TP_PROTO(struct xe_sched_msg *msg),
+ TP_ARGS(msg),
+
+ TP_STRUCT__entry(
+ __field(u32, opcode)
+ __field(u16, guc_id)
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = msg->opcode;
+ __entry->guc_id =
+ ((struct xe_exec_queue *)msg->private_data)->guc->id;
+ ),
+
+ TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
+ __entry->opcode)
+);
+
+DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
+ TP_PROTO(struct xe_sched_msg *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
+ TP_PROTO(struct xe_sched_msg *msg),
+ TP_ARGS(msg)
+);
+
+DECLARE_EVENT_CLASS(xe_hw_fence,
+ TP_PROTO(struct xe_hw_fence *fence),
+ TP_ARGS(fence),
+
+ TP_STRUCT__entry(
+ __field(u64, ctx)
+ __field(u32, seqno)
+ __field(struct xe_hw_fence *, fence)
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = fence->dma.context;
+ __entry->seqno = fence->dma.seqno;
+ __entry->fence = fence;
+ ),
+
+ TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
+ __entry->ctx, __entry->fence, __entry->seqno)
+);
+
+DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
+ TP_PROTO(struct xe_hw_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
+ TP_PROTO(struct xe_hw_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
+ TP_PROTO(struct xe_hw_fence *fence),
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(xe_hw_fence, xe_hw_fence_free,
+ TP_PROTO(struct xe_hw_fence *fence),
+ TP_ARGS(fence)
+);
+
+DECLARE_EVENT_CLASS(xe_vma,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma),
+
+ TP_STRUCT__entry(
+ __field(struct xe_vma *, vma)
+ __field(u32, asid)
+ __field(u64, start)
+ __field(u64, end)
+ __field(u64, ptr)
+ ),
+
+ TP_fast_assign(
+ __entry->vma = vma;
+ __entry->asid = xe_vma_vm(vma)->usm.asid;
+ __entry->start = xe_vma_start(vma);
+ __entry->end = xe_vma_end(vma) - 1;
+ __entry->ptr = xe_vma_userptr(vma);
+ ),
+
+ TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
+ __entry->vma, __entry->asid, __entry->start,
+ __entry->end, __entry->ptr)
+)
+
+DEFINE_EVENT(xe_vma, xe_vma_flush,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_pagefault,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_acc,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_fail,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_bind,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_pf_bind,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_unbind,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_worker,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_userptr_rebind_exec,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_rebind_worker,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_rebind_exec,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_invalidate,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_evict,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate_complete,
+ TP_PROTO(struct xe_vma *vma),
+ TP_ARGS(vma)
+);
+
+DECLARE_EVENT_CLASS(xe_vm,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm),
+
+ TP_STRUCT__entry(
+ __field(struct xe_vm *, vm)
+ __field(u32, asid)
+ ),
+
+ TP_fast_assign(
+ __entry->vm = vm;
+ __entry->asid = vm->usm.asid;
+ ),
+
+ TP_printk("vm=%p, asid=0x%05x", __entry->vm,
+ __entry->asid)
+);
+
+DEFINE_EVENT(xe_vm, xe_vm_kill,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(xe_vm, xe_vm_create,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(xe_vm, xe_vm_free,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(xe_vm, xe_vm_cpu_bind,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(xe_vm, xe_vm_restart,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_enter,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_retry,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
+DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
+/* GuC */
+DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
+ TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
+ TP_ARGS(_head, _tail, size, space, len),
+
+ TP_STRUCT__entry(
+ __field(u32, _head)
+ __field(u32, _tail)
+ __field(u32, size)
+ __field(u32, space)
+ __field(u32, len)
+ ),
+
+ TP_fast_assign(
+ __entry->_head = _head;
+ __entry->_tail = _tail;
+ __entry->size = size;
+ __entry->space = space;
+ __entry->len = len;
+ ),
+
+ TP_printk("h2g flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
+ __entry->_head, __entry->_tail, __entry->size,
+ __entry->space, __entry->len)
+);
+
+DEFINE_EVENT(xe_guc_ct_flow_control, xe_guc_ct_h2g_flow_control,
+ TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
+ TP_ARGS(_head, _tail, size, space, len)
+);
+
+DEFINE_EVENT_PRINT(xe_guc_ct_flow_control, xe_guc_ct_g2h_flow_control,
+ TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
+ TP_ARGS(_head, _tail, size, space, len),
+
+ TP_printk("g2h flow control: head=%u, tail=%u, size=%u, space=%u, len=%u",
+ __entry->_head, __entry->_tail, __entry->size,
+ __entry->space, __entry->len)
+);
+
+DECLARE_EVENT_CLASS(xe_guc_ctb,
+ TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
+ TP_ARGS(gt_id, action, len, _head, tail),
+
+ TP_STRUCT__entry(
+ __field(u8, gt_id)
+ __field(u32, action)
+ __field(u32, len)
+ __field(u32, tail)
+ __field(u32, _head)
+ ),
+
+ TP_fast_assign(
+ __entry->gt_id = gt_id;
+ __entry->action = action;
+ __entry->len = len;
+ __entry->tail = tail;
+ __entry->_head = _head;
+ ),
+
+ TP_printk("gt%d: H2G CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
+ __entry->gt_id, __entry->action, __entry->len,
+ __entry->tail, __entry->_head)
+);
+
+DEFINE_EVENT(xe_guc_ctb, xe_guc_ctb_h2g,
+ TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
+ TP_ARGS(gt_id, action, len, _head, tail)
+);
+
+DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
+ TP_PROTO(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
+ TP_ARGS(gt_id, action, len, _head, tail),
+
+ TP_printk("gt%d: G2H CTB: action=0x%x, len=%d, tail=%d, head=%d\n",
+ __entry->gt_id, __entry->action, __entry->len,
+ __entry->tail, __entry->_head)
+
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
+#define TRACE_INCLUDE_FILE xe_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
new file mode 100644
index 000000000..e5d7d5e2b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2023 Intel Corporation
+ * Copyright (C) 2021-2002 Red Hat
+ */
+
+#include <drm/drm_managed.h>
+#include <drm/drm_mm.h>
+
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+
+#include "generated/xe_wa_oob.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_mmio.h"
+#include "xe_res_cursor.h"
+#include "xe_ttm_stolen_mgr.h"
+#include "xe_ttm_vram_mgr.h"
+#include "xe_wa.h"
+
+struct xe_ttm_stolen_mgr {
+ struct xe_ttm_vram_mgr base;
+
+ /* PCI base offset */
+ resource_size_t io_base;
+ /* GPU base offset */
+ resource_size_t stolen_base;
+
+ void __iomem *mapping;
+};
+
+static inline struct xe_ttm_stolen_mgr *
+to_stolen_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct xe_ttm_stolen_mgr, base.manager);
+}
+
+/**
+ * xe_ttm_stolen_cpu_access_needs_ggtt() - If we can't directly CPU access
+ * stolen, can we then fallback to mapping through the GGTT.
+ * @xe: xe device
+ *
+ * Some older integrated platforms don't support reliable CPU access for stolen,
+ * however on such hardware we can always use the mappable part of the GGTT for
+ * CPU access. Check if that's the case for this device.
+ */
+bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
+}
+
+static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+{
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u64 stolen_size;
+ u64 tile_offset;
+ u64 tile_size;
+
+ tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start;
+ tile_size = tile->mem.vram.actual_physical_size;
+
+ /* Use DSM base address instead for stolen memory */
+ mgr->stolen_base = (xe_mmio_read64_2x32(mmio, DSMBASE) & BDSM_MASK) - tile_offset;
+ if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
+ return 0;
+
+ stolen_size = tile_size - mgr->stolen_base;
+
+ /* Verify usage fits in the actual resource available */
+ if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
+ mgr->io_base = tile->mem.vram.io_start + mgr->stolen_base;
+
+ /*
+ * There may be few KB of platform dependent reserved memory at the end
+ * of vram which is not part of the DSM. Such reserved memory portion is
+ * always less then DSM granularity so align down the stolen_size to DSM
+ * granularity to accommodate such reserve vram portion.
+ */
+ return ALIGN_DOWN(stolen_size, SZ_1M);
+}
+
+static u32 get_wopcm_size(struct xe_device *xe)
+{
+ u32 wopcm_size;
+ u64 val;
+
+ val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED);
+ val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
+
+ switch (val) {
+ case 0x5 ... 0x6:
+ val--;
+ fallthrough;
+ case 0x0 ... 0x3:
+ wopcm_size = (1U << val) * SZ_1M;
+ break;
+ default:
+ WARN(1, "Missing case wopcm_size=%llx\n", val);
+ wopcm_size = 0;
+ }
+
+ return wopcm_size;
+}
+
+static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct xe_gt *media_gt = xe_device_get_root_tile(xe)->media_gt;
+ u32 stolen_size, wopcm_size;
+ u32 ggc, gms;
+
+ ggc = xe_mmio_read32(xe_root_mmio_gt(xe), GGC);
+
+ /*
+ * Check GGMS: it should be fixed 0x3 (8MB), which corresponds to the
+ * GTT size
+ */
+ if (drm_WARN_ON(&xe->drm, (ggc & GGMS_MASK) != GGMS_MASK))
+ return 0;
+
+ /*
+ * Graphics >= 1270 uses the offset to the GSMBASE as address in the
+ * PTEs, together with the DM flag being set. Previously there was no
+ * such flag so the address was the io_base.
+ *
+ * DSMBASE = GSMBASE + 8MB
+ */
+ mgr->stolen_base = SZ_8M;
+ mgr->io_base = pci_resource_start(pdev, 2) + mgr->stolen_base;
+
+ /* return valid GMS value, -EIO if invalid */
+ gms = REG_FIELD_GET(GMS_MASK, ggc);
+ switch (gms) {
+ case 0x0 ... 0x04:
+ stolen_size = gms * 32 * SZ_1M;
+ break;
+ case 0xf0 ... 0xfe:
+ stolen_size = (gms - 0xf0 + 1) * 4 * SZ_1M;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Carve out the top of DSM as it contains the reserved WOPCM region */
+ wopcm_size = get_wopcm_size(xe);
+ if (drm_WARN_ON(&xe->drm, !wopcm_size))
+ return 0;
+
+ stolen_size -= wopcm_size;
+
+ if (media_gt && XE_WA(media_gt, 14019821291)) {
+ u64 gscpsmi_base = xe_mmio_read64_2x32(media_gt, GSCPSMI_BASE)
+ & ~GENMASK_ULL(5, 0);
+
+ /*
+ * This workaround is primarily implemented by the BIOS. We
+ * just need to figure out whether the BIOS has applied the
+ * workaround (meaning the programmed address falls within
+ * the DSM) and, if so, reserve that part of the DSM to
+ * prevent accidental reuse. The DSM location should be just
+ * below the WOPCM.
+ */
+ if (gscpsmi_base >= mgr->io_base &&
+ gscpsmi_base < mgr->io_base + stolen_size) {
+ xe_gt_dbg(media_gt,
+ "Reserving %llu bytes of DSM for Wa_14019821291\n",
+ mgr->io_base + stolen_size - gscpsmi_base);
+ stolen_size = gscpsmi_base - mgr->io_base;
+ }
+ }
+
+ if (drm_WARN_ON(&xe->drm, stolen_size + SZ_8M > pci_resource_len(pdev, 2)))
+ return 0;
+
+ return stolen_size;
+}
+
+extern struct resource intel_graphics_stolen_res;
+
+static u64 detect_stolen(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+{
+#ifdef CONFIG_X86
+ /* Map into GGTT */
+ mgr->io_base = pci_resource_start(to_pci_dev(xe->drm.dev), 2);
+
+ /* Stolen memory is x86 only */
+ mgr->stolen_base = intel_graphics_stolen_res.start;
+ return resource_size(&intel_graphics_stolen_res);
+#else
+ return 0;
+#endif
+}
+
+void xe_ttm_stolen_mgr_init(struct xe_device *xe)
+{
+ struct xe_ttm_stolen_mgr *mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u64 stolen_size, io_size, pgsize;
+ int err;
+
+ if (IS_DGFX(xe))
+ stolen_size = detect_bar2_dgfx(xe, mgr);
+ else if (GRAPHICS_VERx100(xe) >= 1270)
+ stolen_size = detect_bar2_integrated(xe, mgr);
+ else
+ stolen_size = detect_stolen(xe, mgr);
+
+ if (!stolen_size) {
+ drm_dbg_kms(&xe->drm, "No stolen memory support\n");
+ return;
+ }
+
+ pgsize = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
+ if (pgsize < PAGE_SIZE)
+ pgsize = PAGE_SIZE;
+
+ /*
+ * We don't try to attempt partial visible support for stolen vram,
+ * since stolen is always at the end of vram, and the BAR size is pretty
+ * much always 256M, with small-bar.
+ */
+ io_size = 0;
+ if (mgr->io_base && !xe_ttm_stolen_cpu_access_needs_ggtt(xe))
+ io_size = stolen_size;
+
+ err = __xe_ttm_vram_mgr_init(xe, &mgr->base, XE_PL_STOLEN, stolen_size,
+ io_size, pgsize);
+ if (err) {
+ drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err);
+ return;
+ }
+
+ drm_dbg_kms(&xe->drm, "Initialized stolen memory support with %llu bytes\n",
+ stolen_size);
+
+ if (io_size)
+ mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, io_size);
+}
+
+u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ struct ttm_resource_manager *ttm_mgr = ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
+ struct xe_ttm_stolen_mgr *mgr = to_stolen_mgr(ttm_mgr);
+ struct xe_res_cursor cur;
+
+ XE_WARN_ON(!mgr->io_base);
+
+ if (xe_ttm_stolen_cpu_access_needs_ggtt(xe))
+ return mgr->io_base + xe_bo_ggtt_addr(bo) + offset;
+
+ xe_res_first(bo->ttm.resource, offset, 4096, &cur);
+ return mgr->io_base + cur.start;
+}
+
+static int __xe_ttm_stolen_io_mem_reserve_bar2(struct xe_device *xe,
+ struct xe_ttm_stolen_mgr *mgr,
+ struct ttm_resource *mem)
+{
+ struct xe_res_cursor cur;
+
+ if (!mgr->io_base)
+ return -EIO;
+
+ xe_res_first(mem, 0, 4096, &cur);
+ mem->bus.offset = cur.start;
+
+ drm_WARN_ON(&xe->drm, !(mem->placement & TTM_PL_FLAG_CONTIGUOUS));
+
+ if (mem->placement & TTM_PL_FLAG_CONTIGUOUS && mgr->mapping)
+ mem->bus.addr = (u8 __force *)mgr->mapping + mem->bus.offset;
+
+ mem->bus.offset += mgr->io_base;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
+
+ return 0;
+}
+
+static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe,
+ struct xe_ttm_stolen_mgr *mgr,
+ struct ttm_resource *mem)
+{
+#ifdef CONFIG_X86
+ struct xe_bo *bo = ttm_to_xe_bo(mem->bo);
+
+ XE_WARN_ON(IS_DGFX(xe));
+
+ /* XXX: Require BO to be mapped to GGTT? */
+ if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT)))
+ return -EIO;
+
+ /* GGTT is always contiguously mapped */
+ mem->bus.offset = xe_bo_ggtt_addr(bo) + mgr->io_base;
+
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
+
+ return 0;
+#else
+ /* How is it even possible to get here without gen12 stolen? */
+ drm_WARN_ON(&xe->drm, 1);
+ return -EIO;
+#endif
+}
+
+int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem)
+{
+ struct ttm_resource_manager *ttm_mgr = ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
+ struct xe_ttm_stolen_mgr *mgr = ttm_mgr ? to_stolen_mgr(ttm_mgr) : NULL;
+
+ if (!mgr || !mgr->io_base)
+ return -EIO;
+
+ if (xe_ttm_stolen_cpu_access_needs_ggtt(xe))
+ return __xe_ttm_stolen_io_mem_reserve_stolen(xe, mgr, mem);
+ else
+ return __xe_ttm_stolen_io_mem_reserve_bar2(xe, mgr, mem);
+}
+
+u64 xe_ttm_stolen_gpu_offset(struct xe_device *xe)
+{
+ struct xe_ttm_stolen_mgr *mgr =
+ to_stolen_mgr(ttm_manager_type(&xe->ttm, XE_PL_STOLEN));
+
+ return mgr->stolen_base;
+}
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
new file mode 100644
index 000000000..1777245ff
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_TTM_STOLEN_MGR_H_
+#define _XE_TTM_STOLEN_MGR_H_
+
+#include <linux/types.h>
+
+struct ttm_resource;
+struct xe_bo;
+struct xe_device;
+
+void xe_ttm_stolen_mgr_init(struct xe_device *xe);
+int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem);
+bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe);
+u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset);
+u64 xe_ttm_stolen_gpu_offset(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
new file mode 100644
index 000000000..3e1fa0c83
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2022 Intel Corporation
+ * Copyright (C) 2021-2002 Red Hat
+ */
+
+#include "xe_ttm_sys_mgr.h"
+
+#include <drm/drm_managed.h>
+
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "xe_bo.h"
+#include "xe_gt.h"
+
+struct xe_ttm_sys_node {
+ struct ttm_buffer_object *tbo;
+ struct ttm_range_mgr_node base;
+};
+
+static inline struct xe_ttm_sys_node *
+to_xe_ttm_sys_node(struct ttm_resource *res)
+{
+ return container_of(res, struct xe_ttm_sys_node, base.base);
+}
+
+static int xe_ttm_sys_mgr_new(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ struct xe_ttm_sys_node *node;
+ int r;
+
+ node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->tbo = tbo;
+ ttm_resource_init(tbo, place, &node->base.base);
+
+ if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
+ ttm_resource_manager_usage(man) > (man->size << PAGE_SHIFT)) {
+ r = -ENOSPC;
+ goto err_fini;
+ }
+
+ node->base.mm_nodes[0].start = 0;
+ node->base.mm_nodes[0].size = PFN_UP(node->base.base.size);
+ node->base.base.start = XE_BO_INVALID_OFFSET;
+
+ *res = &node->base.base;
+
+ return 0;
+
+err_fini:
+ ttm_resource_fini(man, &node->base.base);
+ kfree(node);
+ return r;
+}
+
+static void xe_ttm_sys_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct xe_ttm_sys_node *node = to_xe_ttm_sys_node(res);
+
+ ttm_resource_fini(man, res);
+ kfree(node);
+}
+
+static void xe_ttm_sys_mgr_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+
+}
+
+static const struct ttm_resource_manager_func xe_ttm_sys_mgr_func = {
+ .alloc = xe_ttm_sys_mgr_new,
+ .free = xe_ttm_sys_mgr_del,
+ .debug = xe_ttm_sys_mgr_debug
+};
+
+static void ttm_sys_mgr_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_device *xe = (struct xe_device *)arg;
+ struct ttm_resource_manager *man = &xe->mem.sys_mgr;
+ int err;
+
+ ttm_resource_manager_set_used(man, false);
+
+ err = ttm_resource_manager_evict_all(&xe->ttm, man);
+ if (err)
+ return;
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&xe->ttm, XE_PL_TT, NULL);
+}
+
+int xe_ttm_sys_mgr_init(struct xe_device *xe)
+{
+ struct ttm_resource_manager *man = &xe->mem.sys_mgr;
+ struct sysinfo si;
+ u64 gtt_size;
+
+ si_meminfo(&si);
+ gtt_size = (u64)si.totalram * si.mem_unit;
+ /* TTM limits allocation of all TTM devices by 50% of system memory */
+ gtt_size /= 2;
+
+ man->use_tt = true;
+ man->func = &xe_ttm_sys_mgr_func;
+ ttm_resource_manager_init(man, &xe->ttm, gtt_size >> PAGE_SHIFT);
+ ttm_set_driver_manager(&xe->ttm, XE_PL_TT, man);
+ ttm_resource_manager_set_used(man, true);
+ return drmm_add_action_or_reset(&xe->drm, ttm_sys_mgr_fini, xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.h b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.h
new file mode 100644
index 000000000..e8f5cd395
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_TTM_SYS_MGR_H_
+#define _XE_TTM_SYS_MGR_H_
+
+struct xe_device;
+
+int xe_ttm_sys_mgr_init(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
new file mode 100644
index 000000000..115ec745e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2022 Intel Corporation
+ * Copyright (C) 2021-2002 Red Hat
+ */
+
+#include <drm/drm_managed.h>
+
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_res_cursor.h"
+#include "xe_ttm_vram_mgr.h"
+
+static inline struct drm_buddy_block *
+xe_ttm_vram_mgr_first_block(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct drm_buddy_block, link);
+}
+
+static inline bool xe_is_vram_mgr_blocks_contiguous(struct drm_buddy *mm,
+ struct list_head *head)
+{
+ struct drm_buddy_block *block;
+ u64 start, size;
+
+ block = xe_ttm_vram_mgr_first_block(head);
+ if (!block)
+ return false;
+
+ while (head != block->link.next) {
+ start = drm_buddy_block_offset(block);
+ size = drm_buddy_block_size(mm, block);
+
+ block = list_entry(block->link.next, struct drm_buddy_block,
+ link);
+ if (start + size != drm_buddy_block_offset(block))
+ return false;
+ }
+
+ return true;
+}
+
+static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+ struct xe_ttm_vram_mgr_resource *vres;
+ struct drm_buddy *mm = &mgr->mm;
+ u64 size, remaining_size, min_page_size;
+ unsigned long lpfn;
+ int err;
+
+ lpfn = place->lpfn;
+ if (!lpfn || lpfn > man->size >> PAGE_SHIFT)
+ lpfn = man->size >> PAGE_SHIFT;
+
+ if (tbo->base.size >> PAGE_SHIFT > (lpfn - place->fpfn))
+ return -E2BIG; /* don't trigger eviction for the impossible */
+
+ vres = kzalloc(sizeof(*vres), GFP_KERNEL);
+ if (!vres)
+ return -ENOMEM;
+
+ ttm_resource_init(tbo, place, &vres->base);
+
+ /* bail out quickly if there's likely not enough VRAM for this BO */
+ if (ttm_resource_manager_usage(man) > man->size) {
+ err = -ENOSPC;
+ goto error_fini;
+ }
+
+ INIT_LIST_HEAD(&vres->blocks);
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+
+ if (place->fpfn || lpfn != man->size >> PAGE_SHIFT)
+ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
+
+ if (WARN_ON(!vres->base.size)) {
+ err = -EINVAL;
+ goto error_fini;
+ }
+ size = vres->base.size;
+
+ min_page_size = mgr->default_page_size;
+ if (tbo->page_alignment)
+ min_page_size = tbo->page_alignment << PAGE_SHIFT;
+
+ if (WARN_ON(min_page_size < mm->chunk_size)) {
+ err = -EINVAL;
+ goto error_fini;
+ }
+
+ if (WARN_ON(min_page_size > SZ_2G)) { /* FIXME: sg limit */
+ err = -EINVAL;
+ goto error_fini;
+ }
+
+ if (WARN_ON((size > SZ_2G &&
+ (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS)))) {
+ err = -EINVAL;
+ goto error_fini;
+ }
+
+ if (WARN_ON(!IS_ALIGNED(size, min_page_size))) {
+ err = -EINVAL;
+ goto error_fini;
+ }
+
+ mutex_lock(&mgr->lock);
+ if (lpfn <= mgr->visible_size >> PAGE_SHIFT && size > mgr->visible_avail) {
+ mutex_unlock(&mgr->lock);
+ err = -ENOSPC;
+ goto error_fini;
+ }
+
+ if (place->fpfn + (size >> PAGE_SHIFT) != place->lpfn &&
+ place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ size = roundup_pow_of_two(size);
+ min_page_size = size;
+
+ lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn);
+ }
+
+ remaining_size = size;
+ do {
+ /*
+ * Limit maximum size to 2GiB due to SG table limitations.
+ * FIXME: Should maybe be handled as part of sg construction.
+ */
+ u64 alloc_size = min_t(u64, remaining_size, SZ_2G);
+
+ err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
+ (u64)lpfn << PAGE_SHIFT,
+ alloc_size,
+ min_page_size,
+ &vres->blocks,
+ vres->flags);
+ if (err)
+ goto error_free_blocks;
+
+ remaining_size -= alloc_size;
+ } while (remaining_size);
+
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ if (!drm_buddy_block_trim(mm, vres->base.size, &vres->blocks))
+ size = vres->base.size;
+ }
+
+ if (lpfn <= mgr->visible_size >> PAGE_SHIFT) {
+ vres->used_visible_size = size;
+ } else {
+ struct drm_buddy_block *block;
+
+ list_for_each_entry(block, &vres->blocks, link) {
+ u64 start = drm_buddy_block_offset(block);
+
+ if (start < mgr->visible_size) {
+ u64 end = start + drm_buddy_block_size(mm, block);
+
+ vres->used_visible_size +=
+ min(end, mgr->visible_size) - start;
+ }
+ }
+ }
+
+ mgr->visible_avail -= vres->used_visible_size;
+ mutex_unlock(&mgr->lock);
+
+ if (!(vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) &&
+ xe_is_vram_mgr_blocks_contiguous(mm, &vres->blocks))
+ vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
+
+ /*
+ * For some kernel objects we still rely on the start when io mapping
+ * the object.
+ */
+ if (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) {
+ struct drm_buddy_block *block = list_first_entry(&vres->blocks,
+ typeof(*block),
+ link);
+
+ vres->base.start = drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ } else {
+ vres->base.start = XE_BO_INVALID_OFFSET;
+ }
+
+ *res = &vres->base;
+ return 0;
+
+error_free_blocks:
+ drm_buddy_free_list(mm, &vres->blocks);
+ mutex_unlock(&mgr->lock);
+error_fini:
+ ttm_resource_fini(man, &vres->base);
+ kfree(vres);
+
+ return err;
+}
+
+static void xe_ttm_vram_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct xe_ttm_vram_mgr_resource *vres =
+ to_xe_ttm_vram_mgr_resource(res);
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+ struct drm_buddy *mm = &mgr->mm;
+
+ mutex_lock(&mgr->lock);
+ drm_buddy_free_list(mm, &vres->blocks);
+ mgr->visible_avail += vres->used_visible_size;
+ mutex_unlock(&mgr->lock);
+
+ ttm_resource_fini(man, res);
+
+ kfree(vres);
+}
+
+static void xe_ttm_vram_mgr_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+ struct drm_buddy *mm = &mgr->mm;
+
+ mutex_lock(&mgr->lock);
+ drm_printf(printer, "default_page_size: %lluKiB\n",
+ mgr->default_page_size >> 10);
+ drm_printf(printer, "visible_avail: %lluMiB\n",
+ (u64)mgr->visible_avail >> 20);
+ drm_printf(printer, "visible_size: %lluMiB\n",
+ (u64)mgr->visible_size >> 20);
+
+ drm_buddy_print(mm, printer);
+ mutex_unlock(&mgr->lock);
+ drm_printf(printer, "man size:%llu\n", man->size);
+}
+
+static bool xe_ttm_vram_mgr_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+ struct xe_ttm_vram_mgr_resource *vres =
+ to_xe_ttm_vram_mgr_resource(res);
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
+
+ if (!place->fpfn && !place->lpfn)
+ return true;
+
+ if (!place->fpfn && place->lpfn == mgr->visible_size >> PAGE_SHIFT)
+ return vres->used_visible_size > 0;
+
+ list_for_each_entry(block, &vres->blocks, link) {
+ unsigned long fpfn =
+ drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
+
+ if (place->fpfn < lpfn && place->lpfn > fpfn)
+ return true;
+ }
+
+ return false;
+}
+
+static bool xe_ttm_vram_mgr_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+ struct xe_ttm_vram_mgr_resource *vres =
+ to_xe_ttm_vram_mgr_resource(res);
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
+
+ if (!place->fpfn && !place->lpfn)
+ return true;
+
+ if (!place->fpfn && place->lpfn == mgr->visible_size >> PAGE_SHIFT)
+ return vres->used_visible_size == size;
+
+ list_for_each_entry(block, &vres->blocks, link) {
+ unsigned long fpfn =
+ drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
+
+ if (fpfn < place->fpfn || lpfn > place->lpfn)
+ return false;
+ }
+
+ return true;
+}
+
+static const struct ttm_resource_manager_func xe_ttm_vram_mgr_func = {
+ .alloc = xe_ttm_vram_mgr_new,
+ .free = xe_ttm_vram_mgr_del,
+ .intersects = xe_ttm_vram_mgr_intersects,
+ .compatible = xe_ttm_vram_mgr_compatible,
+ .debug = xe_ttm_vram_mgr_debug
+};
+
+static void ttm_vram_mgr_fini(struct drm_device *dev, void *arg)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_ttm_vram_mgr *mgr = arg;
+ struct ttm_resource_manager *man = &mgr->manager;
+
+ ttm_resource_manager_set_used(man, false);
+
+ if (ttm_resource_manager_evict_all(&xe->ttm, man))
+ return;
+
+ WARN_ON_ONCE(mgr->visible_avail != mgr->visible_size);
+
+ drm_buddy_fini(&mgr->mm);
+
+ ttm_resource_manager_cleanup(&mgr->manager);
+
+ ttm_set_driver_manager(&xe->ttm, mgr->mem_type, NULL);
+
+ mutex_destroy(&mgr->lock);
+}
+
+int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
+ u32 mem_type, u64 size, u64 io_size,
+ u64 default_page_size)
+{
+ struct ttm_resource_manager *man = &mgr->manager;
+ int err;
+
+ man->func = &xe_ttm_vram_mgr_func;
+ mgr->mem_type = mem_type;
+ mutex_init(&mgr->lock);
+ mgr->default_page_size = default_page_size;
+ mgr->visible_size = io_size;
+ mgr->visible_avail = io_size;
+
+ ttm_resource_manager_init(man, &xe->ttm, size);
+ err = drm_buddy_init(&mgr->mm, man->size, default_page_size);
+ if (err)
+ return err;
+
+ ttm_set_driver_manager(&xe->ttm, mem_type, &mgr->manager);
+ ttm_resource_manager_set_used(&mgr->manager, true);
+
+ return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr);
+}
+
+int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_mem_region *vram = &tile->mem.vram;
+
+ mgr->vram = vram;
+ return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
+ vram->usable_size, vram->io_size,
+ PAGE_SIZE);
+}
+
+int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
+ struct ttm_resource *res,
+ u64 offset, u64 length,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt)
+{
+ struct xe_tile *tile = &xe->tiles[res->mem_type - XE_PL_VRAM0];
+ struct xe_ttm_vram_mgr_resource *vres = to_xe_ttm_vram_mgr_resource(res);
+ struct xe_res_cursor cursor;
+ struct scatterlist *sg;
+ int num_entries = 0;
+ int i, r;
+
+ if (vres->used_visible_size < res->size)
+ return -EOPNOTSUPP;
+
+ *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
+ if (!*sgt)
+ return -ENOMEM;
+
+ /* Determine the number of DRM_BUDDY blocks to export */
+ xe_res_first(res, offset, length, &cursor);
+ while (cursor.remaining) {
+ num_entries++;
+ xe_res_next(&cursor, cursor.size);
+ }
+
+ r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
+ if (r)
+ goto error_free;
+
+ /* Initialize scatterlist nodes of sg_table */
+ for_each_sgtable_sg((*sgt), sg, i)
+ sg->length = 0;
+
+ /*
+ * Walk down DRM_BUDDY blocks to populate scatterlist nodes
+ * @note: Use iterator api to get first the DRM_BUDDY block
+ * and the number of bytes from it. Access the following
+ * DRM_BUDDY block(s) if more buffer needs to exported
+ */
+ xe_res_first(res, offset, length, &cursor);
+ for_each_sgtable_sg((*sgt), sg, i) {
+ phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
+ size_t size = cursor.size;
+ dma_addr_t addr;
+
+ addr = dma_map_resource(dev, phys, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ r = dma_mapping_error(dev, addr);
+ if (r)
+ goto error_unmap;
+
+ sg_set_page(sg, NULL, size, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = size;
+
+ xe_res_next(&cursor, cursor.size);
+ }
+
+ return 0;
+
+error_unmap:
+ for_each_sgtable_sg((*sgt), sg, i) {
+ if (!sg->length)
+ continue;
+
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ sg_free_table(*sgt);
+
+error_free:
+ kfree(*sgt);
+ return r;
+}
+
+void xe_ttm_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir,
+ struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_sg(sgt, sg, i)
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+u64 xe_ttm_vram_get_cpu_visible_size(struct ttm_resource_manager *man)
+{
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+
+ return mgr->visible_size;
+}
+
+void xe_ttm_vram_get_used(struct ttm_resource_manager *man,
+ u64 *used, u64 *used_visible)
+{
+ struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
+
+ mutex_lock(&mgr->lock);
+ *used = mgr->mm.size - mgr->mm.avail;
+ *used_visible = mgr->visible_size - mgr->visible_avail;
+ mutex_unlock(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
new file mode 100644
index 000000000..d184e19a9
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_TTM_VRAM_MGR_H_
+#define _XE_TTM_VRAM_MGR_H_
+
+#include "xe_ttm_vram_mgr_types.h"
+
+enum dma_data_direction;
+struct xe_device;
+struct xe_tile;
+
+int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
+ u32 mem_type, u64 size, u64 io_size,
+ u64 default_page_size);
+int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr);
+int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
+ struct ttm_resource *res,
+ u64 offset, u64 length,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt);
+void xe_ttm_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir,
+ struct sg_table *sgt);
+
+u64 xe_ttm_vram_get_cpu_visible_size(struct ttm_resource_manager *man);
+void xe_ttm_vram_get_used(struct ttm_resource_manager *man,
+ u64 *used, u64 *used_visible);
+
+static inline struct xe_ttm_vram_mgr_resource *
+to_xe_ttm_vram_mgr_resource(struct ttm_resource *res)
+{
+ return container_of(res, struct xe_ttm_vram_mgr_resource, base);
+}
+
+static inline struct xe_ttm_vram_mgr *
+to_xe_ttm_vram_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct xe_ttm_vram_mgr, manager);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
new file mode 100644
index 000000000..2d75cf126
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_TTM_VRAM_MGR_TYPES_H_
+#define _XE_TTM_VRAM_MGR_TYPES_H_
+
+#include <drm/drm_buddy.h>
+#include <drm/ttm/ttm_device.h>
+
+struct xe_mem_region;
+
+/**
+ * struct xe_ttm_vram_mgr - XE TTM VRAM manager
+ *
+ * Manages placement of TTM resource in VRAM.
+ */
+struct xe_ttm_vram_mgr {
+ /** @manager: Base TTM resource manager */
+ struct ttm_resource_manager manager;
+ /** @mm: DRM buddy allocator which manages the VRAM */
+ struct drm_buddy mm;
+ /** @vram: ptr to details of associated VRAM region */
+ struct xe_mem_region *vram;
+ /** @visible_size: Proped size of the CPU visible portion */
+ u64 visible_size;
+ /** @visible_avail: CPU visible portion still unallocated */
+ u64 visible_avail;
+ /** @default_page_size: default page size */
+ u64 default_page_size;
+ /** @lock: protects allocations of VRAM */
+ struct mutex lock;
+ /** @mem_type: The TTM memory type */
+ u32 mem_type;
+};
+
+/**
+ * struct xe_ttm_vram_mgr_resource - XE TTM VRAM resource
+ */
+struct xe_ttm_vram_mgr_resource {
+ /** @base: Base TTM resource */
+ struct ttm_resource base;
+ /** @blocks: list of DRM buddy blocks */
+ struct list_head blocks;
+ /** @used_visible_size: How many CPU visible bytes this resource is using */
+ u64 used_visible_size;
+ /** @flags: flags associated with the resource */
+ unsigned long flags;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
new file mode 100644
index 000000000..53ccd338f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_tuning.h"
+
+#include <kunit/visibility.h>
+
+#include "regs/xe_gt_regs.h"
+#include "xe_gt_types.h"
+#include "xe_platform_types.h"
+#include "xe_rtp.h"
+
+#undef XE_REG_MCR
+#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
+
+static const struct xe_rtp_entry_sr gt_tunings[] = {
+ { XE_RTP_NAME("Tuning: Blend Fill Caching Optimization Disable"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS))
+ },
+ { XE_RTP_NAME("Tuning: 32B Access Enable"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(XEHP_SQCM, EN_32B_ACCESS))
+ },
+
+ /* Xe2 */
+
+ { XE_RTP_NAME("Tuning: L3 cache"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004)),
+ XE_RTP_ACTIONS(FIELD_SET(XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
+ REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
+ },
+ { XE_RTP_NAME("Tuning: L3 cache - media"),
+ XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_ACTIONS(FIELD_SET(XE2LPM_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
+ REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
+ },
+
+ {}
+};
+
+static const struct xe_rtp_entry_sr engine_tunings[] = {
+ { XE_RTP_NAME("Tuning: Set Indirect State Override"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1271),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
+ },
+ {}
+};
+
+static const struct xe_rtp_entry_sr lrc_tunings[] = {
+ { XE_RTP_NAME("Tuning: ganged timer, also known as 16011163337"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ /* read verification is ignored due to 1608008084. */
+ XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224))
+ },
+
+ /* DG2 */
+
+ { XE_RTP_NAME("Tuning: L3 cache"),
+ XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(FIELD_SET(XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
+ REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
+ },
+ { XE_RTP_NAME("Tuning: TDS gang timer"),
+ XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
+ /* read verification is ignored as in i915 - need to check enabling */
+ XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(XEHP_FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128))
+ },
+ { XE_RTP_NAME("Tuning: TBIMR fast clip"),
+ XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_2, TBIMR_FAST_CLIP))
+ },
+
+ /* Xe_LPG */
+
+ { XE_RTP_NAME("Tuning: L3 cache"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(FIELD_SET(XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
+ REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
+ },
+
+ {}
+};
+
+void xe_tuning_process_gt(struct xe_gt *gt)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+
+ xe_rtp_process_to_sr(&ctx, gt_tunings, &gt->reg_sr);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_gt);
+
+void xe_tuning_process_engine(struct xe_hw_engine *hwe)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process_to_sr(&ctx, engine_tunings, &hwe->reg_sr);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_engine);
+
+/**
+ * xe_tuning_process_lrc - process lrc tunings
+ * @hwe: engine instance to process tunings for
+ *
+ * Process LRC table for this platform, saving in @hwe all the tunings that need
+ * to be applied on context restore. These are tunings touching registers that
+ * are part of the HW context image.
+ */
+void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process_to_sr(&ctx, lrc_tunings, &hwe->reg_lrc);
+}
diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h
new file mode 100644
index 000000000..4f9c3ac3b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tuning.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_TUNING_
+#define _XE_TUNING_
+
+struct xe_gt;
+struct xe_hw_engine;
+
+void xe_tuning_process_gt(struct xe_gt *gt);
+void xe_tuning_process_engine(struct xe_hw_engine *hwe);
+void xe_tuning_process_lrc(struct xe_hw_engine *hwe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
new file mode 100644
index 000000000..25e1ddfd2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_uc.h"
+
+#include "xe_device.h"
+#include "xe_gsc.h"
+#include "xe_gt.h"
+#include "xe_guc.h"
+#include "xe_guc_pc.h"
+#include "xe_guc_submit.h"
+#include "xe_huc.h"
+#include "xe_uc_fw.h"
+#include "xe_wopcm.h"
+
+static struct xe_gt *
+uc_to_gt(struct xe_uc *uc)
+{
+ return container_of(uc, struct xe_gt, uc);
+}
+
+static struct xe_device *
+uc_to_xe(struct xe_uc *uc)
+{
+ return gt_to_xe(uc_to_gt(uc));
+}
+
+/* Should be called once at driver load only */
+int xe_uc_init(struct xe_uc *uc)
+{
+ int ret;
+
+ /*
+ * We call the GuC/HuC/GSC init functions even if GuC submission is off
+ * to correctly move our tracking of the FW state to "disabled".
+ */
+
+ ret = xe_guc_init(&uc->guc);
+ if (ret)
+ goto err;
+
+ ret = xe_huc_init(&uc->huc);
+ if (ret)
+ goto err;
+
+ ret = xe_gsc_init(&uc->gsc);
+ if (ret)
+ goto err;
+
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return 0;
+
+ ret = xe_wopcm_init(&uc->wopcm);
+ if (ret)
+ goto err;
+
+ ret = xe_guc_submit_init(&uc->guc);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
+}
+
+/**
+ * xe_uc_init_post_hwconfig - init Uc post hwconfig load
+ * @uc: The UC object
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_uc_init_post_hwconfig(struct xe_uc *uc)
+{
+ int err;
+
+ /* GuC submission not enabled, nothing to do */
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return 0;
+
+ err = xe_uc_sanitize_reset(uc);
+ if (err)
+ return err;
+
+ err = xe_guc_init_post_hwconfig(&uc->guc);
+ if (err)
+ return err;
+
+ return xe_gsc_init_post_hwconfig(&uc->gsc);
+}
+
+static int uc_reset(struct xe_uc *uc)
+{
+ struct xe_device *xe = uc_to_xe(uc);
+ int ret;
+
+ ret = xe_guc_reset(&uc->guc);
+ if (ret) {
+ drm_err(&xe->drm, "Failed to reset GuC, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xe_uc_sanitize(struct xe_uc *uc)
+{
+ xe_huc_sanitize(&uc->huc);
+ xe_guc_sanitize(&uc->guc);
+}
+
+int xe_uc_sanitize_reset(struct xe_uc *uc)
+{
+ xe_uc_sanitize(uc);
+
+ return uc_reset(uc);
+}
+
+/**
+ * xe_uc_init_hwconfig - minimally init Uc, read and parse hwconfig
+ * @uc: The UC object
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_uc_init_hwconfig(struct xe_uc *uc)
+{
+ int ret;
+
+ /* GuC submission not enabled, nothing to do */
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return 0;
+
+ ret = xe_guc_min_load_for_hwconfig(&uc->guc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Should be called during driver load, after every GT reset, and after every
+ * suspend to reload / auth the firmwares.
+ */
+int xe_uc_init_hw(struct xe_uc *uc)
+{
+ int ret;
+
+ /* GuC submission not enabled, nothing to do */
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return 0;
+
+ ret = xe_huc_upload(&uc->huc);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_upload(&uc->guc);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_enable_communication(&uc->guc);
+ if (ret)
+ return ret;
+
+ ret = xe_gt_record_default_lrcs(uc_to_gt(uc));
+ if (ret)
+ return ret;
+
+ ret = xe_guc_post_load_init(&uc->guc);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_pc_start(&uc->guc.pc);
+ if (ret)
+ return ret;
+
+ /* We don't fail the driver load if HuC fails to auth, but let's warn */
+ ret = xe_huc_auth(&uc->huc, XE_HUC_AUTH_VIA_GUC);
+ xe_gt_assert(uc_to_gt(uc), !ret);
+
+ /* GSC load is async */
+ xe_gsc_load_start(&uc->gsc);
+
+ return 0;
+}
+
+int xe_uc_fini_hw(struct xe_uc *uc)
+{
+ return xe_uc_sanitize_reset(uc);
+}
+
+int xe_uc_reset_prepare(struct xe_uc *uc)
+{
+ /* GuC submission not enabled, nothing to do */
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return 0;
+
+ return xe_guc_reset_prepare(&uc->guc);
+}
+
+void xe_uc_gucrc_disable(struct xe_uc *uc)
+{
+ XE_WARN_ON(xe_guc_pc_gucrc_disable(&uc->guc.pc));
+}
+
+void xe_uc_stop_prepare(struct xe_uc *uc)
+{
+ xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_guc_stop_prepare(&uc->guc);
+}
+
+int xe_uc_stop(struct xe_uc *uc)
+{
+ /* GuC submission not enabled, nothing to do */
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return 0;
+
+ return xe_guc_stop(&uc->guc);
+}
+
+int xe_uc_start(struct xe_uc *uc)
+{
+ /* GuC submission not enabled, nothing to do */
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return 0;
+
+ return xe_guc_start(&uc->guc);
+}
+
+static void uc_reset_wait(struct xe_uc *uc)
+{
+ int ret;
+
+again:
+ xe_guc_reset_wait(&uc->guc);
+
+ ret = xe_uc_reset_prepare(uc);
+ if (ret)
+ goto again;
+}
+
+int xe_uc_suspend(struct xe_uc *uc)
+{
+ int ret;
+
+ /* GuC submission not enabled, nothing to do */
+ if (!xe_device_uc_enabled(uc_to_xe(uc)))
+ return 0;
+
+ uc_reset_wait(uc);
+
+ ret = xe_uc_stop(uc);
+ if (ret)
+ return ret;
+
+ return xe_guc_suspend(&uc->guc);
+}
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
new file mode 100644
index 000000000..5d5110c0c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_UC_H_
+#define _XE_UC_H_
+
+#include "xe_uc_types.h"
+
+int xe_uc_init(struct xe_uc *uc);
+int xe_uc_init_hwconfig(struct xe_uc *uc);
+int xe_uc_init_post_hwconfig(struct xe_uc *uc);
+int xe_uc_init_hw(struct xe_uc *uc);
+int xe_uc_fini_hw(struct xe_uc *uc);
+void xe_uc_gucrc_disable(struct xe_uc *uc);
+int xe_uc_reset_prepare(struct xe_uc *uc);
+void xe_uc_stop_prepare(struct xe_uc *uc);
+int xe_uc_stop(struct xe_uc *uc);
+int xe_uc_start(struct xe_uc *uc);
+int xe_uc_suspend(struct xe_uc *uc);
+int xe_uc_sanitize_reset(struct xe_uc *uc);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_debugfs.c b/drivers/gpu/drm/xe/xe_uc_debugfs.c
new file mode 100644
index 000000000..0a39ec5a6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc_debugfs.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_debugfs.h>
+
+#include "xe_gt.h"
+#include "xe_guc_debugfs.h"
+#include "xe_huc_debugfs.h"
+#include "xe_macros.h"
+#include "xe_uc_debugfs.h"
+
+void xe_uc_debugfs_register(struct xe_uc *uc, struct dentry *parent)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("uc", parent);
+ if (IS_ERR(root)) {
+ XE_WARN_ON("Create UC directory failed");
+ return;
+ }
+
+ xe_guc_debugfs_register(&uc->guc, root);
+ xe_huc_debugfs_register(&uc->huc, root);
+}
diff --git a/drivers/gpu/drm/xe/xe_uc_debugfs.h b/drivers/gpu/drm/xe/xe_uc_debugfs.h
new file mode 100644
index 000000000..a13382df2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_UC_DEBUGFS_H_
+#define _XE_UC_DEBUGFS_H_
+
+struct dentry;
+struct xe_uc;
+
+void xe_uc_debugfs_register(struct xe_uc *uc, struct dentry *parent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
new file mode 100644
index 000000000..9dff96dfe
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_guc_regs.h"
+#include "xe_bo.h"
+#include "xe_device_types.h"
+#include "xe_force_wake.h"
+#include "xe_gsc.h"
+#include "xe_gt.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_module.h"
+#include "xe_uc_fw.h"
+
+/*
+ * List of required GuC and HuC binaries per-platform. They must be ordered
+ * based on platform, from newer to older.
+ *
+ * Versioning follows the guidelines from
+ * Documentation/driver-api/firmware/firmware-usage-guidelines.rst. There is a
+ * distinction for platforms being officially supported by the driver or not.
+ * Platforms not available publicly or not yet officially supported by the
+ * driver (under force-probe), use the mmp_ver(): the firmware autoselect logic
+ * will select the firmware from disk with filename that matches the full
+ * "mpp version", i.e. major.minor.patch. mmp_ver() should only be used for
+ * this case.
+ *
+ * For platforms officially supported by the driver, the filename always only
+ * ever contains the major version (GuC) or no version at all (HuC).
+ *
+ * After loading the file, the driver parses the versions embedded in the blob.
+ * The major version needs to match a major version supported by the driver (if
+ * any). The minor version is also checked and a notice emitted to the log if
+ * the version found is smaller than the version wanted. This is done only for
+ * informational purposes so users may have a chance to upgrade, but the driver
+ * still loads and use the older firmware.
+ *
+ * Examples:
+ *
+ * 1) Platform officially supported by i915 - using Tigerlake as example.
+ * Driver loads the following firmware blobs from disk:
+ *
+ * - i915/tgl_guc_<major>.bin
+ * - i915/tgl_huc.bin
+ *
+ * <major> number for GuC is checked that it matches the version inside
+ * the blob. <minor> version is checked and if smaller than the expected
+ * an info message is emitted about that.
+ *
+ * 1) XE_<FUTUREINTELPLATFORM>, still under require_force_probe. Using
+ * "wipplat" as a short-name. Driver loads the following firmware blobs
+ * from disk:
+ *
+ * - xe/wipplat_guc_<major>.<minor>.<patch>.bin
+ * - xe/wipplat_huc_<major>.<minor>.<patch>.bin
+ *
+ * <major> and <minor> are checked that they match the version inside
+ * the blob. Both of them need to match exactly what the driver is
+ * expecting, otherwise it fails.
+ *
+ * 3) Platform officially supported by xe and out of force-probe. Using
+ * "plat" as a short-name. Except for the different directory, the
+ * behavior is the same as (1). Driver loads the following firmware
+ * blobs from disk:
+ *
+ * - xe/plat_guc_<major>.bin
+ * - xe/plat_huc.bin
+ *
+ * <major> number for GuC is checked that it matches the version inside
+ * the blob. <minor> version is checked and if smaller than the expected
+ * an info message is emitted about that.
+ *
+ * For the platforms already released with a major version, they should never be
+ * removed from the table. Instead new entries with newer versions may be added
+ * before them, so they take precedence.
+ *
+ * TODO: Currently there's no fallback on major version. That's because xe
+ * driver only supports the one major version of each firmware in the table.
+ * This needs to be fixed when the major version of GuC is updated.
+ */
+
+struct uc_fw_entry {
+ enum xe_platform platform;
+ struct {
+ const char *path;
+ u16 major;
+ u16 minor;
+ bool full_ver_required;
+ };
+};
+
+struct fw_blobs_by_type {
+ const struct uc_fw_entry *entries;
+ u32 count;
+};
+
+#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
+ fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 7)) \
+ fw_def(DG2, major_ver(i915, guc, dg2, 70, 5)) \
+ fw_def(DG1, major_ver(i915, guc, dg1, 70, 5)) \
+ fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 5)) \
+ fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 5)) \
+ fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 5)) \
+ fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 5)) \
+ fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 5))
+
+#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
+ fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
+ fw_def(DG1, no_ver(i915, huc, dg1)) \
+ fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
+ fw_def(ALDERLAKE_S, no_ver(i915, huc, tgl)) \
+ fw_def(ROCKETLAKE, no_ver(i915, huc, tgl)) \
+ fw_def(TIGERLAKE, no_ver(i915, huc, tgl))
+
+/* for the GSC FW we match the compatibility version and not the release one */
+#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
+ fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0))
+
+#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
+ __stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
+
+#define fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c) \
+ MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a ## . ## b ## . ## c))
+#define fw_filename_major_ver(dir_, uc_, shortname_, a, b) \
+ MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a))
+#define fw_filename_no_ver(dir_, uc_, shortname_) \
+ MAKE_FW_PATH(dir_, uc_, shortname_, "")
+
+#define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \
+ { fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \
+ a, b, true }
+#define uc_fw_entry_major_ver(dir_, uc_, shortname_, a, b) \
+ { fw_filename_major_ver(dir_, uc_, shortname_, a, b), \
+ a, b }
+#define uc_fw_entry_no_ver(dir_, uc_, shortname_) \
+ { fw_filename_no_ver(dir_, uc_, shortname_), \
+ 0, 0 }
+
+/* All blobs need to be declared via MODULE_FIRMWARE() */
+#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
+ MODULE_FIRMWARE(fw_filename);
+
+#define XE_UC_FW_ENTRY(platform__, entry__) \
+ { \
+ .platform = XE_ ## platform__, \
+ entry__, \
+ },
+
+XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
+ fw_filename_mmp_ver, fw_filename_major_ver)
+XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
+ fw_filename_mmp_ver, fw_filename_no_ver)
+XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_major_ver)
+
+static struct xe_gt *
+__uc_fw_to_gt(struct xe_uc_fw *uc_fw, enum xe_uc_fw_type type)
+{
+ XE_WARN_ON(type >= XE_UC_FW_NUM_TYPES);
+
+ switch (type) {
+ case XE_UC_FW_TYPE_GUC:
+ return container_of(uc_fw, struct xe_gt, uc.guc.fw);
+ case XE_UC_FW_TYPE_HUC:
+ return container_of(uc_fw, struct xe_gt, uc.huc.fw);
+ case XE_UC_FW_TYPE_GSC:
+ return container_of(uc_fw, struct xe_gt, uc.gsc.fw);
+ default:
+ return NULL;
+ }
+}
+
+static struct xe_gt *uc_fw_to_gt(struct xe_uc_fw *uc_fw)
+{
+ return __uc_fw_to_gt(uc_fw, uc_fw->type);
+}
+
+static struct xe_device *uc_fw_to_xe(struct xe_uc_fw *uc_fw)
+{
+ return gt_to_xe(uc_fw_to_gt(uc_fw));
+}
+
+static void
+uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
+{
+ static const struct uc_fw_entry entries_guc[] = {
+ XE_GUC_FIRMWARE_DEFS(XE_UC_FW_ENTRY,
+ uc_fw_entry_mmp_ver,
+ uc_fw_entry_major_ver)
+ };
+ static const struct uc_fw_entry entries_huc[] = {
+ XE_HUC_FIRMWARE_DEFS(XE_UC_FW_ENTRY,
+ uc_fw_entry_mmp_ver,
+ uc_fw_entry_no_ver)
+ };
+ static const struct uc_fw_entry entries_gsc[] = {
+ XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_major_ver)
+ };
+ static const struct fw_blobs_by_type blobs_all[XE_UC_FW_NUM_TYPES] = {
+ [XE_UC_FW_TYPE_GUC] = { entries_guc, ARRAY_SIZE(entries_guc) },
+ [XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) },
+ [XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) },
+ };
+ static const struct uc_fw_entry *entries;
+ enum xe_platform p = xe->info.platform;
+ u32 count;
+ int i;
+
+ xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all));
+ entries = blobs_all[uc_fw->type].entries;
+ count = blobs_all[uc_fw->type].count;
+
+ for (i = 0; i < count && p <= entries[i].platform; i++) {
+ if (p == entries[i].platform) {
+ uc_fw->path = entries[i].path;
+ uc_fw->versions.wanted.major = entries[i].major;
+ uc_fw->versions.wanted.minor = entries[i].minor;
+ uc_fw->full_ver_required = entries[i].full_ver_required;
+
+ if (uc_fw->type == XE_UC_FW_TYPE_GSC)
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ else
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
+
+ break;
+ }
+ }
+}
+
+static void
+uc_fw_override(struct xe_uc_fw *uc_fw)
+{
+ char *path_override = NULL;
+
+ /* empty string disables, but it's not allowed for GuC */
+ switch (uc_fw->type) {
+ case XE_UC_FW_TYPE_GUC:
+ if (xe_modparam.guc_firmware_path && *xe_modparam.guc_firmware_path)
+ path_override = xe_modparam.guc_firmware_path;
+ break;
+ case XE_UC_FW_TYPE_HUC:
+ path_override = xe_modparam.huc_firmware_path;
+ break;
+ case XE_UC_FW_TYPE_GSC:
+ path_override = xe_modparam.gsc_firmware_path;
+ break;
+ default:
+ break;
+ }
+
+ if (path_override) {
+ uc_fw->path = path_override;
+ uc_fw->user_overridden = true;
+ }
+}
+
+/**
+ * xe_uc_fw_copy_rsa - copy fw RSA to buffer
+ *
+ * @uc_fw: uC firmware
+ * @dst: dst buffer
+ * @max_len: max number of bytes to copy
+ *
+ * Return: number of copied bytes.
+ */
+size_t xe_uc_fw_copy_rsa(struct xe_uc_fw *uc_fw, void *dst, u32 max_len)
+{
+ struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ u32 size = min_t(u32, uc_fw->rsa_size, max_len);
+
+ xe_assert(xe, !(size % 4));
+ xe_assert(xe, xe_uc_fw_is_available(uc_fw));
+
+ xe_map_memcpy_from(xe, dst, &uc_fw->bo->vmap,
+ xe_uc_fw_rsa_offset(uc_fw), size);
+
+ return size;
+}
+
+static void uc_fw_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_uc_fw *uc_fw = arg;
+
+ if (!xe_uc_fw_is_available(uc_fw))
+ return;
+
+ xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_SELECTED);
+}
+
+static void guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
+{
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
+ struct xe_uc_fw_version *compatibility = &uc_fw->versions.found[XE_UC_FW_VER_COMPATIBILITY];
+
+ xe_gt_assert(gt, uc_fw->type == XE_UC_FW_TYPE_GUC);
+ xe_gt_assert(gt, release->major >= 70);
+
+ if (release->major > 70 || release->minor >= 6) {
+ /* v70.6.0 adds CSS header support */
+ compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->submission_version);
+ compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->submission_version);
+ compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
+ css->submission_version);
+ } else if (release->minor >= 3) {
+ /* v70.3.0 introduced v1.1.0 */
+ compatibility->major = 1;
+ compatibility->minor = 1;
+ compatibility->patch = 0;
+ } else {
+ /* v70.0.0 introduced v1.0.0 */
+ compatibility->major = 1;
+ compatibility->minor = 0;
+ compatibility->patch = 0;
+ }
+
+ uc_fw->private_data_size = css->private_data_size;
+}
+
+int xe_uc_fw_check_version_requirements(struct xe_uc_fw *uc_fw)
+{
+ struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct xe_uc_fw_version *wanted = &uc_fw->versions.wanted;
+ struct xe_uc_fw_version *found = &uc_fw->versions.found[uc_fw->versions.wanted_type];
+
+ /* Driver has no requirement on any version, any is good. */
+ if (!wanted->major)
+ return 0;
+
+ /*
+ * If full version is required, both major and minor should match.
+ * Otherwise, at least the major version.
+ */
+ if (wanted->major != found->major ||
+ (uc_fw->full_ver_required && wanted->minor != found->minor)) {
+ drm_notice(&xe->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ found->major, found->minor,
+ wanted->major, wanted->minor);
+ goto fail;
+ }
+
+ if (wanted->minor > found->minor) {
+ drm_notice(&xe->drm, "%s firmware (%u.%u) is recommended, but only (%u.%u) was found in %s\n",
+ xe_uc_fw_type_repr(uc_fw->type),
+ wanted->major, wanted->minor,
+ found->major, found->minor,
+ uc_fw->path);
+ drm_info(&xe->drm, "Consider updating your linux-firmware pkg or downloading from %s\n",
+ XE_UC_FIRMWARE_URL);
+ }
+
+ return 0;
+
+fail:
+ if (xe_uc_fw_is_overridden(uc_fw))
+ return 0;
+
+ return -ENOEXEC;
+}
+
+/* Refer to the "CSS-based Firmware Layout" documentation entry for details */
+static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t fw_size)
+{
+ struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
+ struct uc_css_header *css;
+ size_t size;
+
+ /* Check the size of the blob before examining buffer contents */
+ if (unlikely(fw_size < sizeof(struct uc_css_header))) {
+ drm_warn(&xe->drm, "%s firmware %s: invalid size: %zu < %zu\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw_size, sizeof(struct uc_css_header));
+ return -ENODATA;
+ }
+
+ css = (struct uc_css_header *)fw_data;
+
+ /* Check integrity of size values inside CSS header */
+ size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
+ css->exponent_size_dw) * sizeof(u32);
+ if (unlikely(size != sizeof(struct uc_css_header))) {
+ drm_warn(&xe->drm,
+ "%s firmware %s: unexpected header size: %zu != %zu\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw_size, sizeof(struct uc_css_header));
+ return -EPROTO;
+ }
+
+ /* uCode size must calculated from other sizes */
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = sizeof(struct uc_css_header) + uc_fw->ucode_size +
+ uc_fw->rsa_size;
+ if (unlikely(fw_size < size)) {
+ drm_warn(&xe->drm, "%s firmware %s: invalid size: %zu < %zu\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw_size, size);
+ return -ENOEXEC;
+ }
+
+ /* Get version numbers from the CSS header */
+ release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->sw_version);
+ release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->sw_version);
+ release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->sw_version);
+
+ if (uc_fw->type == XE_UC_FW_TYPE_GUC)
+ guc_read_css_info(uc_fw, css);
+
+ return 0;
+}
+
+static bool is_cpd_header(const void *data)
+{
+ const u32 *marker = data;
+
+ return *marker == GSC_CPD_HEADER_MARKER;
+}
+
+static u32 entry_offset(const struct gsc_cpd_header_v2 *header, const char *name)
+{
+ const struct gsc_cpd_entry *entry;
+ int i;
+
+ entry = (void *)header + header->header_length;
+
+ for (i = 0; i < header->num_of_entries; i++, entry++)
+ if (strcmp(entry->name, name) == 0)
+ return entry->offset & GSC_CPD_ENTRY_OFFSET_MASK;
+
+ return 0;
+}
+
+/* Refer to the "GSC-based Firmware Layout" documentation entry for details */
+static int parse_cpd_header(struct xe_uc_fw *uc_fw, const void *data, size_t size,
+ const char *manifest_entry, const char *css_entry)
+{
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct xe_device *xe = gt_to_xe(gt);
+ const struct gsc_cpd_header_v2 *header = data;
+ struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
+ const struct gsc_manifest_header *manifest;
+ size_t min_size = sizeof(*header);
+ u32 offset;
+
+ /* manifest_entry is mandatory, css_entry is optional */
+ xe_assert(xe, manifest_entry);
+
+ if (size < min_size || !is_cpd_header(header))
+ return -ENOENT;
+
+ if (header->header_length < sizeof(struct gsc_cpd_header_v2)) {
+ xe_gt_err(gt, "invalid CPD header length %u!\n", header->header_length);
+ return -EINVAL;
+ }
+
+ min_size = header->header_length + sizeof(struct gsc_cpd_entry) * header->num_of_entries;
+ if (size < min_size) {
+ xe_gt_err(gt, "FW too small! %zu < %zu\n", size, min_size);
+ return -ENODATA;
+ }
+
+ /* Look for the manifest first */
+ offset = entry_offset(header, manifest_entry);
+ if (!offset) {
+ xe_gt_err(gt, "Failed to find %s manifest!\n",
+ xe_uc_fw_type_repr(uc_fw->type));
+ return -ENODATA;
+ }
+
+ min_size = offset + sizeof(struct gsc_manifest_header);
+ if (size < min_size) {
+ xe_gt_err(gt, "FW too small! %zu < %zu\n", size, min_size);
+ return -ENODATA;
+ }
+
+ manifest = data + offset;
+
+ release->major = manifest->fw_version.major;
+ release->minor = manifest->fw_version.minor;
+ release->patch = manifest->fw_version.hotfix;
+
+ if (uc_fw->type == XE_UC_FW_TYPE_GSC) {
+ struct xe_gsc *gsc = container_of(uc_fw, struct xe_gsc, fw);
+
+ release->build = manifest->fw_version.build;
+ gsc->security_version = manifest->security_version;
+ }
+
+ /* then optionally look for the css header */
+ if (css_entry) {
+ int ret;
+
+ /*
+ * This section does not contain a CSS entry on DG2. We
+ * don't support DG2 HuC right now, so no need to handle
+ * it, just add a reminder in case that changes.
+ */
+ xe_assert(xe, xe->info.platform != XE_DG2);
+
+ offset = entry_offset(header, css_entry);
+
+ /* the CSS header parser will check that the CSS header fits */
+ if (offset > size) {
+ xe_gt_err(gt, "FW too small! %zu < %u\n", size, offset);
+ return -ENODATA;
+ }
+
+ ret = parse_css_header(uc_fw, data + offset, size - offset);
+ if (ret)
+ return ret;
+
+ uc_fw->css_offset = offset;
+ }
+
+ uc_fw->has_gsc_headers = true;
+
+ return 0;
+}
+
+static int parse_gsc_layout(struct xe_uc_fw *uc_fw, const void *data, size_t size)
+{
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ const struct gsc_layout_pointers *layout = data;
+ const struct gsc_bpdt_header *bpdt_header = NULL;
+ const struct gsc_bpdt_entry *bpdt_entry = NULL;
+ size_t min_size = sizeof(*layout);
+ int i;
+
+ if (size < min_size) {
+ xe_gt_err(gt, "GSC FW too small! %zu < %zu\n", size, min_size);
+ return -ENODATA;
+ }
+
+ min_size = layout->boot1.offset + layout->boot1.size;
+ if (size < min_size) {
+ xe_gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n",
+ size, min_size);
+ return -ENODATA;
+ }
+
+ min_size = sizeof(*bpdt_header);
+ if (layout->boot1.size < min_size) {
+ xe_gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n",
+ layout->boot1.size, min_size);
+ return -ENODATA;
+ }
+
+ bpdt_header = data + layout->boot1.offset;
+ if (bpdt_header->signature != GSC_BPDT_HEADER_SIGNATURE) {
+ xe_gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n",
+ bpdt_header->signature);
+ return -EINVAL;
+ }
+
+ min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count;
+ if (layout->boot1.size < min_size) {
+ xe_gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n",
+ layout->boot1.size, min_size);
+ return -ENODATA;
+ }
+
+ bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header);
+ for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) {
+ if ((bpdt_entry->type & GSC_BPDT_ENTRY_TYPE_MASK) !=
+ GSC_BPDT_ENTRY_TYPE_GSC_RBE)
+ continue;
+
+ min_size = bpdt_entry->sub_partition_offset;
+
+ /* the CPD header parser will check that the CPD header fits */
+ if (layout->boot1.size < min_size) {
+ xe_gt_err(gt, "GSC FW boot section too small for CPD offset: %u < %zu\n",
+ layout->boot1.size, min_size);
+ return -ENODATA;
+ }
+
+ return parse_cpd_header(uc_fw,
+ (void *)bpdt_header + min_size,
+ layout->boot1.size - min_size,
+ "RBEP.man", NULL);
+ }
+
+ xe_gt_err(gt, "couldn't find CPD header in GSC binary!\n");
+ return -ENODATA;
+}
+
+static int parse_headers(struct xe_uc_fw *uc_fw, const struct firmware *fw)
+{
+ int ret;
+
+ /*
+ * All GuC releases and older HuC ones use CSS headers, while newer HuC
+ * releases use GSC CPD headers.
+ */
+ switch (uc_fw->type) {
+ case XE_UC_FW_TYPE_GSC:
+ return parse_gsc_layout(uc_fw, fw->data, fw->size);
+ case XE_UC_FW_TYPE_HUC:
+ ret = parse_cpd_header(uc_fw, fw->data, fw->size, "HUCP.man", "huc_fw");
+ if (!ret || ret != -ENOENT)
+ return ret;
+ fallthrough;
+ case XE_UC_FW_TYPE_GUC:
+ return parse_css_header(uc_fw, fw->data, fw->size);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define print_uc_fw_version(p_, version_, prefix_, ...) \
+do { \
+ struct xe_uc_fw_version *ver_ = (version_); \
+ if (ver_->build) \
+ drm_printf(p_, prefix_ " version %u.%u.%u.%u\n", ##__VA_ARGS__, \
+ ver_->major, ver_->minor, \
+ ver_->patch, ver_->build); \
+ else \
+ drm_printf(p_, prefix_ " version %u.%u.%u\n", ##__VA_ARGS__, \
+ ver_->major, ver_->minor, ver_->patch); \
+} while (0)
+
+static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmware_p)
+{
+ struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct device *dev = xe->drm.dev;
+ struct drm_printer p = drm_info_printer(dev);
+ const struct firmware *fw = NULL;
+ int err;
+
+ /*
+ * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
+ * before we're looked at the HW caps to see if we have uc support
+ */
+ BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED);
+ xe_assert(xe, !uc_fw->status);
+ xe_assert(xe, !uc_fw->path);
+
+ uc_fw_auto_select(xe, uc_fw);
+ xe_uc_fw_change_status(uc_fw, uc_fw->path ?
+ XE_UC_FIRMWARE_SELECTED :
+ XE_UC_FIRMWARE_NOT_SUPPORTED);
+
+ if (!xe_uc_fw_is_supported(uc_fw))
+ return 0;
+
+ uc_fw_override(uc_fw);
+
+ /* an empty path means the firmware is disabled */
+ if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) {
+ xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_DISABLED);
+ drm_dbg(&xe->drm, "%s disabled", xe_uc_fw_type_repr(uc_fw->type));
+ return 0;
+ }
+
+ err = request_firmware(&fw, uc_fw->path, dev);
+ if (err)
+ goto fail;
+
+ err = parse_headers(uc_fw, fw);
+ if (err)
+ goto fail;
+
+ print_uc_fw_version(&p,
+ &uc_fw->versions.found[XE_UC_FW_VER_RELEASE],
+ "Using %s firmware from %s",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+
+ /* for GSC FW we want the compatibility version, which we query after load */
+ if (uc_fw->type != XE_UC_FW_TYPE_GSC) {
+ err = xe_uc_fw_check_version_requirements(uc_fw);
+ if (err)
+ goto fail;
+ }
+
+ *firmware_p = fw;
+
+ return 0;
+
+fail:
+ xe_uc_fw_change_status(uc_fw, err == -ENOENT ?
+ XE_UC_FIRMWARE_MISSING :
+ XE_UC_FIRMWARE_ERROR);
+
+ drm_notice(&xe->drm, "%s firmware %s: fetch failed with error %d\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ drm_info(&xe->drm, "%s firmware(s) can be downloaded from %s\n",
+ xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+
+ return err;
+}
+
+static void uc_fw_release(const struct firmware *fw)
+{
+ release_firmware(fw);
+}
+
+static int uc_fw_copy(struct xe_uc_fw *uc_fw, const void *data, size_t size, u32 flags)
+{
+ struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *obj;
+ int err;
+
+ obj = xe_managed_bo_create_from_data(xe, tile, data, size, flags);
+ if (IS_ERR(obj)) {
+ drm_notice(&xe->drm, "%s firmware %s: failed to create / populate bo",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ err = PTR_ERR(obj);
+ goto fail;
+ }
+
+ uc_fw->bo = obj;
+ uc_fw->size = size;
+
+ xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_AVAILABLE);
+
+ err = drmm_add_action_or_reset(&xe->drm, uc_fw_fini, uc_fw);
+ if (err)
+ goto fail;
+
+ return 0;
+
+fail:
+ xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_ERROR);
+ drm_notice(&xe->drm, "%s firmware %s: copy failed with error %d\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+
+ return err;
+}
+
+int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
+{
+ const struct firmware *fw = NULL;
+ int err;
+
+ err = uc_fw_request(uc_fw, &fw);
+ if (err)
+ return err;
+
+ /* no error and no firmware means nothing to copy */
+ if (!fw)
+ return 0;
+
+ err = uc_fw_copy(uc_fw, fw->data, fw->size,
+ XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT);
+
+ uc_fw_release(fw);
+
+ return err;
+}
+
+static u32 uc_fw_ggtt_offset(struct xe_uc_fw *uc_fw)
+{
+ return xe_bo_ggtt_addr(uc_fw->bo);
+}
+
+static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
+{
+ struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ u32 src_offset, dma_ctrl;
+ int ret;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ /* Set the source address for the uCode */
+ src_offset = uc_fw_ggtt_offset(uc_fw) + uc_fw->css_offset;
+ xe_mmio_write32(gt, DMA_ADDR_0_LOW, lower_32_bits(src_offset));
+ xe_mmio_write32(gt, DMA_ADDR_0_HIGH,
+ upper_32_bits(src_offset) | DMA_ADDRESS_SPACE_GGTT);
+
+ /* Set the DMA destination */
+ xe_mmio_write32(gt, DMA_ADDR_1_LOW, offset);
+ xe_mmio_write32(gt, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ /*
+ * Set the transfer size. The header plus uCode will be copied to WOPCM
+ * via DMA, excluding any other components
+ */
+ xe_mmio_write32(gt, DMA_COPY_SIZE,
+ sizeof(struct uc_css_header) + uc_fw->ucode_size);
+
+ /* Start the DMA */
+ xe_mmio_write32(gt, DMA_CTRL,
+ _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = xe_mmio_wait32(gt, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
+ false);
+ if (ret)
+ drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
+
+ /* Disable the bits once DMA is over */
+ xe_mmio_write32(gt, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+
+ return ret;
+}
+
+int xe_uc_fw_upload(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
+{
+ struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ int err;
+
+ /* make sure the status was cleared the last time we reset the uc */
+ xe_assert(xe, !xe_uc_fw_is_loaded(uc_fw));
+
+ if (!xe_uc_fw_is_loadable(uc_fw))
+ return -ENOEXEC;
+
+ /* Call custom loader */
+ err = uc_fw_xfer(uc_fw, offset, dma_flags);
+ if (err)
+ goto fail;
+
+ xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_TRANSFERRED);
+ return 0;
+
+fail:
+ drm_err(&xe->drm, "Failed to load %s firmware %s (%d)\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ err);
+ xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_LOAD_FAIL);
+ return err;
+}
+
+static const char *version_type_repr(enum xe_uc_fw_version_types type)
+{
+ switch (type) {
+ case XE_UC_FW_VER_RELEASE:
+ return "release";
+ case XE_UC_FW_VER_COMPATIBILITY:
+ return "compatibility";
+ default:
+ return "Unknown version type";
+ }
+}
+
+void xe_uc_fw_print(struct xe_uc_fw *uc_fw, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "%s firmware: %s\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ drm_printf(p, "\tstatus: %s\n",
+ xe_uc_fw_status_repr(uc_fw->status));
+
+ print_uc_fw_version(p, &uc_fw->versions.wanted, "\twanted %s",
+ version_type_repr(uc_fw->versions.wanted_type));
+
+ for (i = 0; i < XE_UC_FW_VER_TYPE_COUNT; i++) {
+ struct xe_uc_fw_version *ver = &uc_fw->versions.found[i];
+
+ if (ver->major)
+ print_uc_fw_version(p, ver, "\tfound %s",
+ version_type_repr(i));
+ }
+
+ if (uc_fw->ucode_size)
+ drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
+ if (uc_fw->rsa_size)
+ drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
+}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.h b/drivers/gpu/drm/xe/xe_uc_fw.h
new file mode 100644
index 000000000..85c20795d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc_fw.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_UC_FW_H_
+#define _XE_UC_FW_H_
+
+#include <linux/errno.h>
+
+#include "xe_macros.h"
+#include "xe_uc_fw_abi.h"
+#include "xe_uc_fw_types.h"
+
+struct drm_printer;
+
+int xe_uc_fw_init(struct xe_uc_fw *uc_fw);
+size_t xe_uc_fw_copy_rsa(struct xe_uc_fw *uc_fw, void *dst, u32 max_len);
+int xe_uc_fw_upload(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags);
+int xe_uc_fw_check_version_requirements(struct xe_uc_fw *uc_fw);
+void xe_uc_fw_print(struct xe_uc_fw *uc_fw, struct drm_printer *p);
+
+static inline u32 xe_uc_fw_rsa_offset(struct xe_uc_fw *uc_fw)
+{
+ return sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->css_offset;
+}
+
+static inline void xe_uc_fw_change_status(struct xe_uc_fw *uc_fw,
+ enum xe_uc_fw_status status)
+{
+ uc_fw->__status = status;
+}
+
+static inline
+const char *xe_uc_fw_status_repr(enum xe_uc_fw_status status)
+{
+ switch (status) {
+ case XE_UC_FIRMWARE_NOT_SUPPORTED:
+ return "N/A";
+ case XE_UC_FIRMWARE_UNINITIALIZED:
+ return "UNINITIALIZED";
+ case XE_UC_FIRMWARE_DISABLED:
+ return "DISABLED";
+ case XE_UC_FIRMWARE_SELECTED:
+ return "SELECTED";
+ case XE_UC_FIRMWARE_MISSING:
+ return "MISSING";
+ case XE_UC_FIRMWARE_ERROR:
+ return "ERROR";
+ case XE_UC_FIRMWARE_AVAILABLE:
+ return "AVAILABLE";
+ case XE_UC_FIRMWARE_INIT_FAIL:
+ return "INIT FAIL";
+ case XE_UC_FIRMWARE_LOADABLE:
+ return "LOADABLE";
+ case XE_UC_FIRMWARE_LOAD_FAIL:
+ return "LOAD FAIL";
+ case XE_UC_FIRMWARE_TRANSFERRED:
+ return "TRANSFERRED";
+ case XE_UC_FIRMWARE_RUNNING:
+ return "RUNNING";
+ }
+ return "<invalid>";
+}
+
+static inline int xe_uc_fw_status_to_error(enum xe_uc_fw_status status)
+{
+ switch (status) {
+ case XE_UC_FIRMWARE_NOT_SUPPORTED:
+ return -ENODEV;
+ case XE_UC_FIRMWARE_UNINITIALIZED:
+ return -EACCES;
+ case XE_UC_FIRMWARE_DISABLED:
+ return -EPERM;
+ case XE_UC_FIRMWARE_MISSING:
+ return -ENOENT;
+ case XE_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ case XE_UC_FIRMWARE_INIT_FAIL:
+ case XE_UC_FIRMWARE_LOAD_FAIL:
+ return -EIO;
+ case XE_UC_FIRMWARE_SELECTED:
+ return -ESTALE;
+ case XE_UC_FIRMWARE_AVAILABLE:
+ case XE_UC_FIRMWARE_LOADABLE:
+ case XE_UC_FIRMWARE_TRANSFERRED:
+ case XE_UC_FIRMWARE_RUNNING:
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static inline const char *xe_uc_fw_type_repr(enum xe_uc_fw_type type)
+{
+ switch (type) {
+ case XE_UC_FW_TYPE_GUC:
+ return "GuC";
+ case XE_UC_FW_TYPE_HUC:
+ return "HuC";
+ case XE_UC_FW_TYPE_GSC:
+ return "GSC";
+ default:
+ return "uC";
+ }
+}
+
+static inline enum xe_uc_fw_status
+__xe_uc_fw_status(struct xe_uc_fw *uc_fw)
+{
+ /* shouldn't call this before checking hw/blob availability */
+ XE_WARN_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED);
+ return uc_fw->status;
+}
+
+static inline bool xe_uc_fw_is_supported(struct xe_uc_fw *uc_fw)
+{
+ return __xe_uc_fw_status(uc_fw) != XE_UC_FIRMWARE_NOT_SUPPORTED;
+}
+
+static inline bool xe_uc_fw_is_enabled(struct xe_uc_fw *uc_fw)
+{
+ return __xe_uc_fw_status(uc_fw) > XE_UC_FIRMWARE_DISABLED;
+}
+
+static inline bool xe_uc_fw_is_disabled(struct xe_uc_fw *uc_fw)
+{
+ return __xe_uc_fw_status(uc_fw) == XE_UC_FIRMWARE_DISABLED;
+}
+
+static inline bool xe_uc_fw_is_available(struct xe_uc_fw *uc_fw)
+{
+ return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_AVAILABLE;
+}
+
+static inline bool xe_uc_fw_is_loadable(struct xe_uc_fw *uc_fw)
+{
+ return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_LOADABLE;
+}
+
+static inline bool xe_uc_fw_is_loaded(struct xe_uc_fw *uc_fw)
+{
+ return __xe_uc_fw_status(uc_fw) >= XE_UC_FIRMWARE_TRANSFERRED;
+}
+
+static inline bool xe_uc_fw_is_running(struct xe_uc_fw *uc_fw)
+{
+ return __xe_uc_fw_status(uc_fw) == XE_UC_FIRMWARE_RUNNING;
+}
+
+static inline bool xe_uc_fw_is_overridden(const struct xe_uc_fw *uc_fw)
+{
+ return uc_fw->user_overridden;
+}
+
+static inline void xe_uc_fw_sanitize(struct xe_uc_fw *uc_fw)
+{
+ if (xe_uc_fw_is_loaded(uc_fw))
+ xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_LOADABLE);
+}
+
+static inline u32 __xe_uc_fw_get_upload_size(struct xe_uc_fw *uc_fw)
+{
+ return sizeof(struct uc_css_header) + uc_fw->ucode_size;
+}
+
+/**
+ * xe_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded.
+ * @uc_fw: uC firmware.
+ *
+ * Get the size of the firmware and header that will be uploaded to WOPCM.
+ *
+ * Return: Upload firmware size, or zero on firmware fetch failure.
+ */
+static inline u32 xe_uc_fw_get_upload_size(struct xe_uc_fw *uc_fw)
+{
+ if (!xe_uc_fw_is_available(uc_fw))
+ return 0;
+
+ return __xe_uc_fw_get_upload_size(uc_fw);
+}
+
+#define XE_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_abi.h b/drivers/gpu/drm/xe/xe_uc_fw_abi.h
new file mode 100644
index 000000000..87ade4120
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc_fw_abi.h
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_UC_FW_ABI_H
+#define _XE_UC_FW_ABI_H
+
+#include <linux/build_bug.h>
+#include <linux/types.h>
+
+/**
+ * DOC: CSS-based Firmware Layout
+ *
+ * The CSS-based firmware structure is used for GuC releases on all platforms
+ * and for HuC releases up to DG1. Starting from DG2/MTL the HuC uses the GSC
+ * layout instead.
+ * The CSS firmware layout looks like this::
+ *
+ * +======================================================================+
+ * | Firmware blob |
+ * +===============+===============+============+============+============+
+ * | CSS header | uCode | RSA key | modulus | exponent |
+ * +===============+===============+============+============+============+
+ * <-header size-> <---header size continued ----------->
+ * <--- size ----------------------------------------------------------->
+ * <-key size->
+ * <-mod size->
+ * <-exp size->
+ *
+ * The firmware may or may not have modulus key and exponent data. The header,
+ * uCode and RSA signature are must-have components that will be used by driver.
+ * Length of each components, which is all in dwords, can be found in header.
+ * In the case that modulus and exponent are not present in fw, a.k.a truncated
+ * image, the length value still appears in header.
+ *
+ * Driver will do some basic fw size validation based on the following rules:
+ *
+ * 1. Header, uCode and RSA are must-have components.
+ * 2. All firmware components, if they present, are in the sequence illustrated
+ * in the layout table above.
+ * 3. Length info of each component can be found in header, in dwords.
+ * 4. Modulus and exponent key are not required by driver. They may not appear
+ * in fw. So driver will load a truncated firmware in this case.
+ */
+
+struct uc_css_header {
+ u32 module_type;
+ /*
+ * header_size includes all non-uCode bits, including css_header, rsa
+ * key, modulus key and exponent data.
+ */
+ u32 header_size_dw;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
+ u32 date;
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
+ u32 size_dw; /* uCode plus header_size_dw */
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_DATE_MIN (0xFF << 8)
+#define CSS_DATE_SEC (0xFFFF << 16)
+ char username[8];
+ char buildnumber[12];
+ u32 sw_version;
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
+ union {
+ u32 submission_version; /* only applies to GuC */
+ u32 reserved2;
+ };
+ u32 reserved0[12];
+ union {
+ u32 private_data_size; /* only applies to GuC */
+ u32 reserved1;
+ };
+ u32 header_info;
+} __packed;
+static_assert(sizeof(struct uc_css_header) == 128);
+
+/**
+ * DOC: GSC-based Firmware Layout
+ *
+ * The GSC-based firmware structure is used for GSC releases on all platforms
+ * and for HuC releases starting from DG2/MTL. Older HuC releases use the
+ * CSS-based layout instead. Differently from the CSS headers, the GSC headers
+ * uses a directory + entries structure (i.e., there is array of addresses
+ * pointing to specific header extensions identified by a name). Although the
+ * header structures are the same, some of the entries are specific to GSC while
+ * others are specific to HuC. The manifest header entry, which includes basic
+ * information about the binary (like the version) is always present, but it is
+ * named differently based on the binary type.
+ *
+ * The HuC binary starts with a Code Partition Directory (CPD) header. The
+ * entries we're interested in for use in the driver are:
+ *
+ * 1. "HUCP.man": points to the manifest header for the HuC.
+ * 2. "huc_fw": points to the FW code. On platforms that support load via DMA
+ * and 2-step HuC authentication (i.e. MTL+) this is a full CSS-based binary,
+ * while if the GSC is the one doing the load (which only happens on DG2)
+ * this section only contains the uCode.
+ *
+ * The GSC-based HuC firmware layout looks like this::
+ *
+ * +================================================+
+ * | CPD Header |
+ * +================================================+
+ * | CPD entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | "HUCP.man" |
+ * | ... |
+ * | offset >----------------------------|------o
+ * | ... | |
+ * | entryY | |
+ * | "huc_fw" | |
+ * | ... | |
+ * | offset >----------------------------|----------o
+ * +================================================+ | |
+ * | |
+ * +================================================+ | |
+ * | Manifest Header |<-----o |
+ * | ... | |
+ * | FW version | |
+ * | ... | |
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | FW binary |<---------o
+ * | CSS (MTL+ only) |
+ * | uCode |
+ * | RSA Key (MTL+ only) |
+ * | ... |
+ * +================================================+
+ *
+ * The GSC binary starts instead with a layout header, which contains the
+ * locations of the various partitions of the binary. The one we're interested
+ * in is the boot1 partition, where we can find a BPDT header followed by
+ * entries, one of which points to the RBE sub-section of the partition, which
+ * contains the CPD. The GSC blob does not contain a CSS-based binary, so we
+ * only need to look for the manifest, which is under the "RBEP.man" CPD entry.
+ * Note that we have no need to find where the actual FW code is inside the
+ * image because the GSC ROM will itself parse the headers to find it and load
+ * it.
+ * The GSC firmware header layout looks like this::
+ *
+ * +================================================+
+ * | Layout Pointers |
+ * | ... |
+ * | Boot1 offset >---------------------------|------o
+ * | ... | |
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | BPDT header |<-----o
+ * +================================================+
+ * | BPDT entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | type == GSC_RBE |
+ * | offset >-----------------------------|------o
+ * | ... | |
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | CPD Header |<-----o
+ * +================================================+
+ * | CPD entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | "RBEP.man" |
+ * | ... |
+ * | offset >----------------------------|------o
+ * | ... | |
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | Manifest Header |<-----o
+ * | ... |
+ * | FW version |
+ * | ... |
+ * | Security version |
+ * | ... |
+ * +================================================+
+ */
+
+struct gsc_version {
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+} __packed;
+
+struct gsc_partition {
+ u32 offset;
+ u32 size;
+} __packed;
+
+struct gsc_layout_pointers {
+ u8 rom_bypass_vector[16];
+
+ /* size of this header section, not including ROM bypass vector */
+ u16 size;
+
+ /*
+ * bit0: Backup copy of layout pointers exists
+ * bits1-15: reserved
+ */
+ u8 flags;
+
+ u8 reserved;
+
+ u32 crc32;
+
+ struct gsc_partition datap;
+ struct gsc_partition boot1;
+ struct gsc_partition boot2;
+ struct gsc_partition boot3;
+ struct gsc_partition boot4;
+ struct gsc_partition boot5;
+ struct gsc_partition temp_pages;
+} __packed;
+
+/* Boot partition structures */
+struct gsc_bpdt_header {
+ u32 signature;
+#define GSC_BPDT_HEADER_SIGNATURE 0x000055AA
+
+ u16 descriptor_count; /* num of entries after the header */
+
+ u8 version;
+ u8 configuration;
+
+ u32 crc32;
+
+ u32 build_version;
+ struct gsc_version tool_version;
+} __packed;
+
+struct gsc_bpdt_entry {
+ /*
+ * Bits 0-15: BPDT entry type
+ * Bits 16-17: reserved
+ * Bit 18: code sub-partition
+ * Bits 19-31: reserved
+ */
+ u32 type;
+#define GSC_BPDT_ENTRY_TYPE_MASK GENMASK(15, 0)
+#define GSC_BPDT_ENTRY_TYPE_GSC_RBE 0x1
+
+ u32 sub_partition_offset; /* from the base of the BPDT header */
+ u32 sub_partition_size;
+} __packed;
+
+/* Code partition directory (CPD) structures */
+struct gsc_cpd_header_v2 {
+ u32 header_marker;
+#define GSC_CPD_HEADER_MARKER 0x44504324
+
+ u32 num_of_entries;
+ u8 header_version;
+ u8 entry_version;
+ u8 header_length; /* in bytes */
+ u8 flags;
+ u32 partition_name;
+ u32 crc32;
+} __packed;
+
+struct gsc_cpd_entry {
+ u8 name[12];
+
+ /*
+ * Bits 0-24: offset from the beginning of the code partition
+ * Bit 25: huffman compressed
+ * Bits 26-31: reserved
+ */
+ u32 offset;
+#define GSC_CPD_ENTRY_OFFSET_MASK GENMASK(24, 0)
+#define GSC_CPD_ENTRY_HUFFMAN_COMP BIT(25)
+
+ /*
+ * Module/Item length, in bytes. For Huffman-compressed modules, this
+ * refers to the uncompressed size. For software-compressed modules,
+ * this refers to the compressed size.
+ */
+ u32 length;
+
+ u8 reserved[4];
+} __packed;
+
+struct gsc_manifest_header {
+ u32 header_type; /* 0x4 for manifest type */
+ u32 header_length; /* in dwords */
+ u32 header_version;
+ u32 flags;
+ u32 vendor;
+ u32 date;
+ u32 size; /* In dwords, size of entire manifest (header + extensions) */
+ u32 header_id;
+ u32 internal_data;
+ struct gsc_version fw_version;
+ u32 security_version;
+ struct gsc_version meu_kit_version;
+ u32 meu_manifest_version;
+ u8 general_data[4];
+ u8 reserved3[56];
+ u32 modulus_size; /* in dwords */
+ u32 exponent_size; /* in dwords */
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
new file mode 100644
index 000000000..ee914a5d8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_UC_FW_TYPES_H_
+#define _XE_UC_FW_TYPES_H_
+
+#include <linux/types.h>
+
+struct xe_bo;
+
+/*
+ * +------------+---------------------------------------------------+
+ * | PHASE | FIRMWARE STATUS TRANSITIONS |
+ * +============+===================================================+
+ * | | UNINITIALIZED |
+ * +------------+- / | \ -+
+ * | | DISABLED <--/ | \--> NOT_SUPPORTED |
+ * | init_early | V |
+ * | | SELECTED |
+ * +------------+- / | \ -+
+ * | | MISSING <--/ | \--> ERROR |
+ * | fetch | V |
+ * | | AVAILABLE |
+ * +------------+- | \ -+
+ * | | | \--> INIT FAIL |
+ * | init | V |
+ * | | /------> LOADABLE <----<-----------\ |
+ * +------------+- \ / \ \ \ -+
+ * | | LOAD FAIL <--< \--> TRANSFERRED \ |
+ * | upload | \ / \ / |
+ * | | \---------/ \--> RUNNING |
+ * +------------+---------------------------------------------------+
+ */
+
+/*
+ * FIXME: Ported from the i915 and this is state machine is way too complicated.
+ * Circle back and simplify this.
+ */
+enum xe_uc_fw_status {
+ XE_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */
+ XE_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */
+ XE_UC_FIRMWARE_DISABLED, /* disabled */
+ XE_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */
+ XE_UC_FIRMWARE_MISSING, /* blob not found on the system */
+ XE_UC_FIRMWARE_ERROR, /* invalid format or version */
+ XE_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ XE_UC_FIRMWARE_INIT_FAIL, /* failed to prepare fw objects for load */
+ XE_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
+ XE_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */
+ XE_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
+ XE_UC_FIRMWARE_RUNNING /* init/auth done */
+};
+
+enum xe_uc_fw_type {
+ XE_UC_FW_TYPE_GUC = 0,
+ XE_UC_FW_TYPE_HUC,
+ XE_UC_FW_TYPE_GSC,
+ XE_UC_FW_NUM_TYPES
+};
+
+/**
+ * struct xe_uc_fw_version - Version for XE micro controller firmware
+ */
+struct xe_uc_fw_version {
+ /** @major: major version of the FW */
+ u16 major;
+ /** @minor: minor version of the FW */
+ u16 minor;
+ /** @patch: patch version of the FW */
+ u16 patch;
+ /** @build: build version of the FW (not always available) */
+ u16 build;
+};
+
+enum xe_uc_fw_version_types {
+ XE_UC_FW_VER_RELEASE,
+ XE_UC_FW_VER_COMPATIBILITY,
+ XE_UC_FW_VER_TYPE_COUNT
+};
+
+/**
+ * struct xe_uc_fw - XE micro controller firmware
+ */
+struct xe_uc_fw {
+ /** @type: type uC firmware */
+ enum xe_uc_fw_type type;
+ union {
+ /** @status: firmware load status */
+ const enum xe_uc_fw_status status;
+ /**
+ * @__status: private firmware load status - only to be used
+ * by firmware laoding code
+ */
+ enum xe_uc_fw_status __status;
+ };
+ /** @path: path to uC firmware */
+ const char *path;
+ /** @user_overridden: user provided path to uC firmware via modparam */
+ bool user_overridden;
+ /**
+ * @full_ver_required: driver still under development and not ready
+ * for backward-compatible firmware. To be used only for **new**
+ * platforms, i.e. still under require_force_probe protection and not
+ * supported by i915.
+ */
+ bool full_ver_required;
+ /** @size: size of uC firmware including css header */
+ size_t size;
+
+ /** @bo: XE BO for uC firmware */
+ struct xe_bo *bo;
+
+ /** @has_gsc_headers: whether the FW image starts with GSC headers */
+ bool has_gsc_headers;
+
+ /*
+ * The firmware build process will generate a version header file with
+ * major and minor version defined. The versions are built into CSS
+ * header of firmware. The xe kernel driver set the minimal firmware
+ * version required per platform.
+ */
+
+ /** @versions: FW versions wanted and found */
+ struct {
+ /** @wanted: firmware version wanted by platform */
+ struct xe_uc_fw_version wanted;
+ /** @wanted_type: type of firmware version wanted (release vs compatibility) */
+ enum xe_uc_fw_version_types wanted_type;
+ /** @found: fw versions found in firmware blob */
+ struct xe_uc_fw_version found[XE_UC_FW_VER_TYPE_COUNT];
+ } versions;
+
+ /** @rsa_size: RSA size */
+ u32 rsa_size;
+ /** @ucode_size: micro kernel size */
+ u32 ucode_size;
+ /** @css_offset: offset within the blob at which the CSS is located */
+ u32 css_offset;
+
+ /** @private_data_size: size of private data found in uC css header */
+ u32 private_data_size;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_types.h b/drivers/gpu/drm/xe/xe_uc_types.h
new file mode 100644
index 000000000..9924e4484
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_uc_types.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_UC_TYPES_H_
+#define _XE_UC_TYPES_H_
+
+#include "xe_gsc_types.h"
+#include "xe_guc_types.h"
+#include "xe_huc_types.h"
+#include "xe_wopcm_types.h"
+
+/**
+ * struct xe_uc - XE micro controllers
+ */
+struct xe_uc {
+ /** @guc: Graphics micro controller */
+ struct xe_guc guc;
+ /** @huc: HuC */
+ struct xe_huc huc;
+ /** @gsc: Graphics Security Controller */
+ struct xe_gsc gsc;
+ /** @wopcm: WOPCM */
+ struct xe_wopcm wopcm;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
new file mode 100644
index 000000000..a2024247b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -0,0 +1,3286 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "xe_vm.h"
+
+#include <linux/dma-fence-array.h>
+#include <linux/nospec.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_print.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/xe_drm.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_drm_client.h"
+#include "xe_exec_queue.h"
+#include "xe_gt.h"
+#include "xe_gt_pagefault.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_migrate.h"
+#include "xe_pat.h"
+#include "xe_pm.h"
+#include "xe_preempt_fence.h"
+#include "xe_pt.h"
+#include "xe_res_cursor.h"
+#include "xe_sync.h"
+#include "xe_trace.h"
+#include "generated/xe_wa_oob.h"
+#include "xe_wa.h"
+
+static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
+{
+ return vm->gpuvm.r_obj;
+}
+
+/**
+ * xe_vma_userptr_check_repin() - Advisory check for repin needed
+ * @uvma: The userptr vma
+ *
+ * Check if the userptr vma has been invalidated since last successful
+ * repin. The check is advisory only and can the function can be called
+ * without the vm->userptr.notifier_lock held. There is no guarantee that the
+ * vma userptr will remain valid after a lockless check, so typically
+ * the call needs to be followed by a proper check under the notifier_lock.
+ *
+ * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
+ */
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
+{
+ return mmu_interval_check_retry(&uvma->userptr.notifier,
+ uvma->userptr.notifier_seq) ?
+ -EAGAIN : 0;
+}
+
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+ const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
+ struct page **pages;
+ bool in_kthread = !current->mm;
+ unsigned long notifier_seq;
+ int pinned, ret, i;
+ bool read_only = xe_vma_read_only(vma);
+
+ lockdep_assert_held(&vm->lock);
+ xe_assert(xe, xe_vma_is_userptr(vma));
+retry:
+ if (vma->gpuva.flags & XE_VMA_DESTROYED)
+ return 0;
+
+ notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+ if (notifier_seq == userptr->notifier_seq)
+ return 0;
+
+ pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ if (userptr->sg) {
+ dma_unmap_sgtable(xe->drm.dev,
+ userptr->sg,
+ read_only ? DMA_TO_DEVICE :
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
+ }
+
+ pinned = ret = 0;
+ if (in_kthread) {
+ if (!mmget_not_zero(userptr->notifier.mm)) {
+ ret = -EFAULT;
+ goto mm_closed;
+ }
+ kthread_use_mm(userptr->notifier.mm);
+ }
+
+ while (pinned < num_pages) {
+ ret = get_user_pages_fast(xe_vma_userptr(vma) +
+ pinned * PAGE_SIZE,
+ num_pages - pinned,
+ read_only ? 0 : FOLL_WRITE,
+ &pages[pinned]);
+ if (ret < 0)
+ break;
+
+ pinned += ret;
+ ret = 0;
+ }
+
+ if (in_kthread) {
+ kthread_unuse_mm(userptr->notifier.mm);
+ mmput(userptr->notifier.mm);
+ }
+mm_closed:
+ if (ret)
+ goto out;
+
+ ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
+ pinned, 0,
+ (u64)pinned << PAGE_SHIFT,
+ xe_sg_segment_size(xe->drm.dev),
+ GFP_KERNEL);
+ if (ret) {
+ userptr->sg = NULL;
+ goto out;
+ }
+ userptr->sg = &userptr->sgt;
+
+ ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
+ read_only ? DMA_TO_DEVICE :
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ if (ret) {
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
+ goto out;
+ }
+
+ for (i = 0; i < pinned; ++i) {
+ if (!read_only) {
+ lock_page(pages[i]);
+ set_page_dirty(pages[i]);
+ unlock_page(pages[i]);
+ }
+
+ mark_page_accessed(pages[i]);
+ }
+
+out:
+ release_pages(pages, pinned);
+ kvfree(pages);
+
+ if (!(ret < 0)) {
+ userptr->notifier_seq = notifier_seq;
+ if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
+ goto retry;
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static bool preempt_fences_waiting(struct xe_vm *vm)
+{
+ struct xe_exec_queue *q;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ if (!q->compute.pfence ||
+ (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &q->compute.pfence->flags))) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void free_preempt_fences(struct list_head *list)
+{
+ struct list_head *link, *next;
+
+ list_for_each_safe(link, next, list)
+ xe_preempt_fence_free(to_preempt_fence_from_link(link));
+}
+
+static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
+ unsigned int *count)
+{
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+
+ if (*count >= vm->preempt.num_exec_queues)
+ return 0;
+
+ for (; *count < vm->preempt.num_exec_queues; ++(*count)) {
+ struct xe_preempt_fence *pfence = xe_preempt_fence_alloc();
+
+ if (IS_ERR(pfence))
+ return PTR_ERR(pfence);
+
+ list_move_tail(xe_preempt_fence_link(pfence), list);
+ }
+
+ return 0;
+}
+
+static int wait_for_existing_preempt_fences(struct xe_vm *vm)
+{
+ struct xe_exec_queue *q;
+
+ xe_vm_assert_held(vm);
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ if (q->compute.pfence) {
+ long timeout = dma_fence_wait(q->compute.pfence, false);
+
+ if (timeout < 0)
+ return -ETIME;
+ dma_fence_put(q->compute.pfence);
+ q->compute.pfence = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static bool xe_vm_is_idle(struct xe_vm *vm)
+{
+ struct xe_exec_queue *q;
+
+ xe_vm_assert_held(vm);
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ if (!xe_exec_queue_is_idle(q))
+ return false;
+ }
+
+ return true;
+}
+
+static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
+{
+ struct list_head *link;
+ struct xe_exec_queue *q;
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ struct dma_fence *fence;
+
+ link = list->next;
+ xe_assert(vm->xe, link != list);
+
+ fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
+ q, q->compute.context,
+ ++q->compute.seqno);
+ dma_fence_put(q->compute.pfence);
+ q->compute.pfence = fence;
+ }
+}
+
+static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
+{
+ struct xe_exec_queue *q;
+ int err;
+
+ if (!vm->preempt.num_exec_queues)
+ return 0;
+
+ err = xe_bo_lock(bo, true);
+ if (err)
+ return err;
+
+ err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
+ if (err)
+ goto out_unlock;
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+ if (q->compute.pfence) {
+ dma_resv_add_fence(bo->ttm.base.resv,
+ q->compute.pfence,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+
+out_unlock:
+ xe_bo_unlock(bo);
+ return err;
+}
+
+static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
+ struct drm_exec *exec)
+{
+ struct xe_exec_queue *q;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) {
+ q->ops->resume(q);
+
+ drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence,
+ DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
+ }
+}
+
+int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+ struct drm_gpuvm_exec vm_exec = {
+ .vm = &vm->gpuvm,
+ .flags = DRM_EXEC_INTERRUPTIBLE_WAIT,
+ .num_fences = 1,
+ };
+ struct drm_exec *exec = &vm_exec.exec;
+ struct dma_fence *pfence;
+ int err;
+ bool wait;
+
+ xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
+
+ down_write(&vm->lock);
+ err = drm_gpuvm_exec_lock(&vm_exec);
+ if (err)
+ goto out_up_write;
+
+ pfence = xe_preempt_fence_create(q, q->compute.context,
+ ++q->compute.seqno);
+ if (!pfence) {
+ err = -ENOMEM;
+ goto out_fini;
+ }
+
+ list_add(&q->compute.link, &vm->preempt.exec_queues);
+ ++vm->preempt.num_exec_queues;
+ q->compute.pfence = pfence;
+
+ down_read(&vm->userptr.notifier_lock);
+
+ drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
+ DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
+
+ /*
+ * Check to see if a preemption on VM is in flight or userptr
+ * invalidation, if so trigger this preempt fence to sync state with
+ * other preempt fences on the VM.
+ */
+ wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm);
+ if (wait)
+ dma_fence_enable_sw_signaling(pfence);
+
+ up_read(&vm->userptr.notifier_lock);
+
+out_fini:
+ drm_exec_fini(exec);
+out_up_write:
+ up_write(&vm->lock);
+
+ return err;
+}
+
+/**
+ * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
+ * @vm: The VM.
+ * @q: The exec_queue
+ */
+void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+ if (!xe_vm_in_preempt_fence_mode(vm))
+ return;
+
+ down_write(&vm->lock);
+ list_del(&q->compute.link);
+ --vm->preempt.num_exec_queues;
+ if (q->compute.pfence) {
+ dma_fence_enable_sw_signaling(q->compute.pfence);
+ dma_fence_put(q->compute.pfence);
+ q->compute.pfence = NULL;
+ }
+ up_write(&vm->lock);
+}
+
+/**
+ * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
+ * that need repinning.
+ * @vm: The VM.
+ *
+ * This function checks for whether the VM has userptrs that need repinning,
+ * and provides a release-type barrier on the userptr.notifier_lock after
+ * checking.
+ *
+ * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
+ */
+int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
+{
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ return (list_empty(&vm->userptr.repin_list) &&
+ list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
+}
+
+#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
+
+static void xe_vm_kill(struct xe_vm *vm)
+{
+ struct xe_exec_queue *q;
+
+ lockdep_assert_held(&vm->lock);
+
+ xe_vm_lock(vm, false);
+ vm->flags |= XE_VM_FLAG_BANNED;
+ trace_xe_vm_kill(vm);
+
+ list_for_each_entry(q, &vm->preempt.exec_queues, compute.link)
+ q->ops->kill(q);
+ xe_vm_unlock(vm);
+
+ /* TODO: Inform user the VM is banned */
+}
+
+/**
+ * xe_vm_validate_should_retry() - Whether to retry after a validate error.
+ * @exec: The drm_exec object used for locking before validation.
+ * @err: The error returned from ttm_bo_validate().
+ * @end: A ktime_t cookie that should be set to 0 before first use and
+ * that should be reused on subsequent calls.
+ *
+ * With multiple active VMs, under memory pressure, it is possible that
+ * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
+ * Until ttm properly handles locking in such scenarios, best thing the
+ * driver can do is retry with a timeout. Check if that is necessary, and
+ * if so unlock the drm_exec's objects while keeping the ticket to prepare
+ * for a rerun.
+ *
+ * Return: true if a retry after drm_exec_init() is recommended;
+ * false otherwise.
+ */
+bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
+{
+ ktime_t cur;
+
+ if (err != -ENOMEM)
+ return false;
+
+ cur = ktime_get();
+ *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
+ if (!ktime_before(cur, *end))
+ return false;
+
+ msleep(20);
+ return true;
+}
+
+static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
+{
+ struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
+ struct drm_gpuva *gpuva;
+ int ret;
+
+ lockdep_assert_held(&vm->lock);
+ drm_gpuvm_bo_for_each_va(gpuva, vm_bo)
+ list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
+ &vm->rebind_list);
+
+ ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
+ if (ret)
+ return ret;
+
+ vm_bo->evicted = false;
+ return 0;
+}
+
+static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
+ bool *done)
+{
+ int err;
+
+ /*
+ * 1 fence for each preempt fence plus a fence for each tile from a
+ * possible rebind
+ */
+ err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues +
+ vm->xe->info.tile_count);
+ if (err)
+ return err;
+
+ if (xe_vm_is_idle(vm)) {
+ vm->preempt.rebind_deactivated = true;
+ *done = true;
+ return 0;
+ }
+
+ if (!preempt_fences_waiting(vm)) {
+ *done = true;
+ return 0;
+ }
+
+ err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues);
+ if (err)
+ return err;
+
+ err = wait_for_existing_preempt_fences(vm);
+ if (err)
+ return err;
+
+ return drm_gpuvm_validate(&vm->gpuvm, exec);
+}
+
+static void preempt_rebind_work_func(struct work_struct *w)
+{
+ struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
+ struct drm_exec exec;
+ unsigned int fence_count = 0;
+ LIST_HEAD(preempt_fences);
+ ktime_t end = 0;
+ int err = 0;
+ long wait;
+ int __maybe_unused tries = 0;
+
+ xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
+ trace_xe_vm_rebind_worker_enter(vm);
+
+ down_write(&vm->lock);
+
+ if (xe_vm_is_closed_or_banned(vm)) {
+ up_write(&vm->lock);
+ trace_xe_vm_rebind_worker_exit(vm);
+ return;
+ }
+
+retry:
+ if (xe_vm_userptr_check_repin(vm)) {
+ err = xe_vm_userptr_pin(vm);
+ if (err)
+ goto out_unlock_outer;
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+
+ drm_exec_until_all_locked(&exec) {
+ bool done = false;
+
+ err = xe_preempt_work_begin(&exec, vm, &done);
+ drm_exec_retry_on_contention(&exec);
+ if (err || done) {
+ drm_exec_fini(&exec);
+ if (err && xe_vm_validate_should_retry(&exec, err, &end))
+ err = -EAGAIN;
+
+ goto out_unlock_outer;
+ }
+ }
+
+ err = alloc_preempt_fences(vm, &preempt_fences, &fence_count);
+ if (err)
+ goto out_unlock;
+
+ err = xe_vm_rebind(vm, true);
+ if (err)
+ goto out_unlock;
+
+ /* Wait on rebinds and munmap style VM unbinds */
+ wait = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (wait <= 0) {
+ err = -ETIME;
+ goto out_unlock;
+ }
+
+#define retry_required(__tries, __vm) \
+ (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \
+ (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
+ __xe_vm_userptr_needs_repin(__vm))
+
+ down_read(&vm->userptr.notifier_lock);
+ if (retry_required(tries, vm)) {
+ up_read(&vm->userptr.notifier_lock);
+ err = -EAGAIN;
+ goto out_unlock;
+ }
+
+#undef retry_required
+
+ spin_lock(&vm->xe->ttm.lru_lock);
+ ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
+ spin_unlock(&vm->xe->ttm.lru_lock);
+
+ /* Point of no return. */
+ arm_preempt_fences(vm, &preempt_fences);
+ resume_and_reinstall_preempt_fences(vm, &exec);
+ up_read(&vm->userptr.notifier_lock);
+
+out_unlock:
+ drm_exec_fini(&exec);
+out_unlock_outer:
+ if (err == -EAGAIN) {
+ trace_xe_vm_rebind_worker_retry(vm);
+ goto retry;
+ }
+
+ if (err) {
+ drm_warn(&vm->xe->drm, "VM worker error: %d\n", err);
+ xe_vm_kill(vm);
+ }
+ up_write(&vm->lock);
+
+ free_preempt_fences(&preempt_fences);
+
+ trace_xe_vm_rebind_worker_exit(vm);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
+ struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ long err;
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ down_write(&vm->userptr.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ /* No need to stop gpu access if the userptr is not yet bound. */
+ if (!userptr->initial_bind) {
+ up_write(&vm->userptr.notifier_lock);
+ return true;
+ }
+
+ /*
+ * Tell exec and rebind worker they need to repin and rebind this
+ * userptr.
+ */
+ if (!xe_vm_in_fault_mode(vm) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&userptr->invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+
+ up_write(&vm->userptr.notifier_lock);
+
+ /*
+ * Preempt fences turn into schedule disables, pipeline these.
+ * Note that even in fault mode, we need to wait for binds and
+ * unbinds to complete, and those are attached as BOOKMARK fences
+ * to the vm.
+ */
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+
+ if (xe_vm_in_fault_mode(vm)) {
+ err = xe_vm_invalidate_vma(vma);
+ XE_WARN_ON(err);
+ }
+
+ trace_xe_vma_userptr_invalidate_complete(vma);
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
+ .invalidate = vma_userptr_invalidate,
+};
+
+int xe_vm_userptr_pin(struct xe_vm *vm)
+{
+ struct xe_userptr_vma *uvma, *next;
+ int err = 0;
+ LIST_HEAD(tmp_evict);
+
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
+ lockdep_assert_held_write(&vm->lock);
+
+ /* Collect invalidated userptrs */
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
+ userptr.invalidate_link) {
+ list_del_init(&uvma->userptr.invalidate_link);
+ list_move_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+
+ /* Pin and move to temporary list */
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ err = xe_vma_userptr_pin_pages(uvma);
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
+
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ xe_vm_unlock(vm);
+ if (err)
+ return err;
+ } else {
+ if (err < 0)
+ return err;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
+ * that need repinning.
+ * @vm: The VM.
+ *
+ * This function does an advisory check for whether the VM has userptrs that
+ * need repinning.
+ *
+ * Return: 0 if there are no indications of userptrs needing repinning,
+ * -EAGAIN if there are.
+ */
+int xe_vm_userptr_check_repin(struct xe_vm *vm)
+{
+ return (list_empty_careful(&vm->userptr.repin_list) &&
+ list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
+}
+
+static struct dma_fence *
+xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool first_op, bool last_op);
+
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
+{
+ struct dma_fence *fence;
+ struct xe_vma *vma, *next;
+
+ lockdep_assert_held(&vm->lock);
+ if (xe_vm_in_lr_mode(vm) && !rebind_worker)
+ return 0;
+
+ xe_vm_assert_held(vm);
+ list_for_each_entry_safe(vma, next, &vm->rebind_list,
+ combined_links.rebind) {
+ xe_assert(vm->xe, vma->tile_present);
+
+ list_del_init(&vma->combined_links.rebind);
+ if (rebind_worker)
+ trace_xe_vma_rebind_worker(vma);
+ else
+ trace_xe_vma_rebind_exec(vma);
+ fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ dma_fence_put(fence);
+ }
+
+ return 0;
+}
+
+static void xe_vma_free(struct xe_vma *vma)
+{
+ if (xe_vma_is_userptr(vma))
+ kfree(to_userptr_vma(vma));
+ else
+ kfree(vma);
+}
+
+#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
+#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+
+static struct xe_vma *xe_vma_create(struct xe_vm *vm,
+ struct xe_bo *bo,
+ u64 bo_offset_or_userptr,
+ u64 start, u64 end,
+ u16 pat_index, unsigned int flags)
+{
+ struct xe_vma *vma;
+ struct xe_tile *tile;
+ u8 id;
+ bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
+ bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
+
+ xe_assert(vm->xe, start < end);
+ xe_assert(vm->xe, end < vm->size);
+
+ /*
+ * Allocate and ensure that the xe_vma_is_userptr() return
+ * matches what was allocated.
+ */
+ if (!bo && !is_null) {
+ struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
+
+ if (!uvma)
+ return ERR_PTR(-ENOMEM);
+
+ vma = &uvma->vma;
+ } else {
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_null)
+ vma->gpuva.flags |= DRM_GPUVA_SPARSE;
+ if (bo)
+ vma->gpuva.gem.obj = &bo->ttm.base;
+ }
+
+ INIT_LIST_HEAD(&vma->combined_links.rebind);
+
+ INIT_LIST_HEAD(&vma->gpuva.gem.entry);
+ vma->gpuva.vm = &vm->gpuvm;
+ vma->gpuva.va.addr = start;
+ vma->gpuva.va.range = end - start + 1;
+ if (read_only)
+ vma->gpuva.flags |= XE_VMA_READ_ONLY;
+
+ for_each_tile(tile, vm->xe, id)
+ vma->tile_mask |= 0x1 << id;
+
+ if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC)
+ vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
+
+ vma->pat_index = pat_index;
+
+ if (bo) {
+ struct drm_gpuvm_bo *vm_bo;
+
+ xe_bo_assert_held(bo);
+
+ vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
+ if (IS_ERR(vm_bo)) {
+ xe_vma_free(vma);
+ return ERR_CAST(vm_bo);
+ }
+
+ drm_gpuvm_bo_extobj_add(vm_bo);
+ drm_gem_object_get(&bo->ttm.base);
+ vma->gpuva.gem.offset = bo_offset_or_userptr;
+ drm_gpuva_link(&vma->gpuva, vm_bo);
+ drm_gpuvm_bo_put(vm_bo);
+ } else /* userptr or null */ {
+ if (!is_null) {
+ struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+ u64 size = end - start + 1;
+ int err;
+
+ INIT_LIST_HEAD(&userptr->invalidate_link);
+ INIT_LIST_HEAD(&userptr->repin_link);
+ vma->gpuva.gem.offset = bo_offset_or_userptr;
+
+ err = mmu_interval_notifier_insert(&userptr->notifier,
+ current->mm,
+ xe_vma_userptr(vma), size,
+ &vma_userptr_notifier_ops);
+ if (err) {
+ xe_vma_free(vma);
+ return ERR_PTR(err);
+ }
+
+ userptr->notifier_seq = LONG_MAX;
+ }
+
+ xe_vm_get(vm);
+ }
+
+ return vma;
+}
+
+static void xe_vma_destroy_late(struct xe_vma *vma)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+ bool read_only = xe_vma_read_only(vma);
+
+ if (vma->ufence) {
+ xe_sync_ufence_put(vma->ufence);
+ vma->ufence = NULL;
+ }
+
+ if (xe_vma_is_userptr(vma)) {
+ struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+
+ if (userptr->sg) {
+ dma_unmap_sgtable(xe->drm.dev,
+ userptr->sg,
+ read_only ? DMA_TO_DEVICE :
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
+ }
+
+ /*
+ * Since userptr pages are not pinned, we can't remove
+ * the notifer until we're sure the GPU is not accessing
+ * them anymore
+ */
+ mmu_interval_notifier_remove(&userptr->notifier);
+ xe_vm_put(vm);
+ } else if (xe_vma_is_null(vma)) {
+ xe_vm_put(vm);
+ } else {
+ xe_bo_put(xe_vma_bo(vma));
+ }
+
+ xe_vma_free(vma);
+}
+
+static void vma_destroy_work_func(struct work_struct *w)
+{
+ struct xe_vma *vma =
+ container_of(w, struct xe_vma, destroy_work);
+
+ xe_vma_destroy_late(vma);
+}
+
+static void vma_destroy_cb(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb);
+
+ INIT_WORK(&vma->destroy_work, vma_destroy_work_func);
+ queue_work(system_unbound_wq, &vma->destroy_work);
+}
+
+static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ lockdep_assert_held_write(&vm->lock);
+ xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
+
+ if (xe_vma_is_userptr(vma)) {
+ xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ } else if (!xe_vma_is_null(vma)) {
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ drm_gpuva_unlink(&vma->gpuva);
+ }
+
+ xe_vm_assert_held(vm);
+ if (fence) {
+ int ret = dma_fence_add_callback(fence, &vma->destroy_cb,
+ vma_destroy_cb);
+
+ if (ret) {
+ XE_WARN_ON(ret != -ENOENT);
+ xe_vma_destroy_late(vma);
+ }
+ } else {
+ xe_vma_destroy_late(vma);
+ }
+}
+
+/**
+ * xe_vm_prepare_vma() - drm_exec utility to lock a vma
+ * @exec: The drm_exec object we're currently locking for.
+ * @vma: The vma for witch we want to lock the vm resv and any attached
+ * object's resv.
+ * @num_shared: The number of dma-fence slots to pre-allocate in the
+ * objects' reservation objects.
+ *
+ * Return: 0 on success, negative error code on error. In particular
+ * may return -EDEADLK on WW transaction contention and -EINTR if
+ * an interruptible wait is terminated by a signal.
+ */
+int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
+ unsigned int num_shared)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_bo *bo = xe_vma_bo(vma);
+ int err;
+
+ XE_WARN_ON(!vm);
+ if (num_shared)
+ err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
+ else
+ err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
+ if (!err && bo && !bo->vm) {
+ if (num_shared)
+ err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
+ else
+ err = drm_exec_lock_obj(exec, &bo->ttm.base);
+ }
+
+ return err;
+}
+
+static void xe_vma_destroy_unlocked(struct xe_vma *vma)
+{
+ struct drm_exec exec;
+ int err;
+
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = xe_vm_prepare_vma(&exec, vma, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (XE_WARN_ON(err))
+ break;
+ }
+
+ xe_vma_destroy(vma, NULL);
+
+ drm_exec_fini(&exec);
+}
+
+struct xe_vma *
+xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
+{
+ struct drm_gpuva *gpuva;
+
+ lockdep_assert_held(&vm->lock);
+
+ if (xe_vm_is_closed_or_banned(vm))
+ return NULL;
+
+ xe_assert(vm->xe, start + range <= vm->size);
+
+ gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
+
+ return gpuva ? gpuva_to_vma(gpuva) : NULL;
+}
+
+static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
+{
+ int err;
+
+ xe_assert(vm->xe, xe_vma_vm(vma) == vm);
+ lockdep_assert_held(&vm->lock);
+
+ err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
+ XE_WARN_ON(err); /* Shouldn't be possible */
+
+ return err;
+}
+
+static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
+{
+ xe_assert(vm->xe, xe_vma_vm(vma) == vm);
+ lockdep_assert_held(&vm->lock);
+
+ drm_gpuva_remove(&vma->gpuva);
+ if (vm->usm.last_fault_vma == vma)
+ vm->usm.last_fault_vma = NULL;
+}
+
+static struct drm_gpuva_op *xe_vm_op_alloc(void)
+{
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+
+ if (unlikely(!op))
+ return NULL;
+
+ return &op->base;
+}
+
+static void xe_vm_free(struct drm_gpuvm *gpuvm);
+
+static struct drm_gpuvm_ops gpuvm_ops = {
+ .op_alloc = xe_vm_op_alloc,
+ .vm_bo_validate = xe_gpuvm_validate,
+ .vm_free = xe_vm_free,
+};
+
+static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
+{
+ u64 pte = 0;
+
+ if (pat_index & BIT(0))
+ pte |= XE_PPGTT_PTE_PAT0;
+
+ if (pat_index & BIT(1))
+ pte |= XE_PPGTT_PTE_PAT1;
+
+ return pte;
+}
+
+static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index,
+ u32 pt_level)
+{
+ u64 pte = 0;
+
+ if (pat_index & BIT(0))
+ pte |= XE_PPGTT_PTE_PAT0;
+
+ if (pat_index & BIT(1))
+ pte |= XE_PPGTT_PTE_PAT1;
+
+ if (pat_index & BIT(2)) {
+ if (pt_level)
+ pte |= XE_PPGTT_PDE_PDPE_PAT2;
+ else
+ pte |= XE_PPGTT_PTE_PAT2;
+ }
+
+ if (pat_index & BIT(3))
+ pte |= XELPG_PPGTT_PTE_PAT3;
+
+ if (pat_index & (BIT(4)))
+ pte |= XE2_PPGTT_PTE_PAT4;
+
+ return pte;
+}
+
+static u64 pte_encode_ps(u32 pt_level)
+{
+ XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL);
+
+ if (pt_level == 1)
+ return XE_PDE_PS_2M;
+ else if (pt_level == 2)
+ return XE_PDPE_PS_1G;
+
+ return 0;
+}
+
+static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
+ const u16 pat_index)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ u64 pde;
+
+ pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
+ pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
+ pde |= pde_encode_pat_index(xe, pat_index);
+
+ return pde;
+}
+
+static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
+ u16 pat_index, u32 pt_level)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ u64 pte;
+
+ pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
+ pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
+ pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_ps(pt_level);
+
+ if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
+ pte |= XE_PPGTT_PTE_DM;
+
+ return pte;
+}
+
+static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
+ u16 pat_index, u32 pt_level)
+{
+ struct xe_device *xe = xe_vma_vm(vma)->xe;
+
+ pte |= XE_PAGE_PRESENT;
+
+ if (likely(!xe_vma_read_only(vma)))
+ pte |= XE_PAGE_RW;
+
+ pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_ps(pt_level);
+
+ if (unlikely(xe_vma_is_null(vma)))
+ pte |= XE_PTE_NULL;
+
+ return pte;
+}
+
+static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
+ u16 pat_index,
+ u32 pt_level, bool devmem, u64 flags)
+{
+ u64 pte;
+
+ /* Avoid passing random bits directly as flags */
+ xe_assert(xe, !(flags & ~XE_PTE_PS64));
+
+ pte = addr;
+ pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
+ pte |= pte_encode_pat_index(xe, pat_index, pt_level);
+ pte |= pte_encode_ps(pt_level);
+
+ if (devmem)
+ pte |= XE_PPGTT_PTE_DM;
+
+ pte |= flags;
+
+ return pte;
+}
+
+static const struct xe_pt_ops xelp_pt_ops = {
+ .pte_encode_bo = xelp_pte_encode_bo,
+ .pte_encode_vma = xelp_pte_encode_vma,
+ .pte_encode_addr = xelp_pte_encode_addr,
+ .pde_encode_bo = xelp_pde_encode_bo,
+};
+
+static void vm_destroy_work_func(struct work_struct *w);
+
+/**
+ * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the
+ * given tile and vm.
+ * @xe: xe device.
+ * @tile: tile to set up for.
+ * @vm: vm to set up for.
+ *
+ * Sets up a pagetable tree with one page-table per level and a single
+ * leaf PTE. All pagetable entries point to the single page-table or,
+ * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and
+ * writes become NOPs.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
+ struct xe_vm *vm)
+{
+ u8 id = tile->id;
+ int i;
+
+ for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
+ vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
+ if (IS_ERR(vm->scratch_pt[id][i]))
+ return PTR_ERR(vm->scratch_pt[id][i]);
+
+ xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
+ }
+
+ return 0;
+}
+
+static void xe_vm_free_scratch(struct xe_vm *vm)
+{
+ struct xe_tile *tile;
+ u8 id;
+
+ if (!xe_vm_has_scratch(vm))
+ return;
+
+ for_each_tile(tile, vm->xe, id) {
+ u32 i;
+
+ if (!vm->pt_root[id])
+ continue;
+
+ for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i)
+ if (vm->scratch_pt[id][i])
+ xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL);
+ }
+}
+
+struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
+{
+ struct drm_gem_object *vm_resv_obj;
+ struct xe_vm *vm;
+ int err, number_tiles = 0;
+ struct xe_tile *tile;
+ u8 id;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return ERR_PTR(-ENOMEM);
+
+ vm->xe = xe;
+
+ vm->size = 1ull << xe->info.va_bits;
+
+ vm->flags = flags;
+
+ init_rwsem(&vm->lock);
+
+ INIT_LIST_HEAD(&vm->rebind_list);
+
+ INIT_LIST_HEAD(&vm->userptr.repin_list);
+ INIT_LIST_HEAD(&vm->userptr.invalidated);
+ init_rwsem(&vm->userptr.notifier_lock);
+ spin_lock_init(&vm->userptr.invalidated_lock);
+
+ INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
+
+ INIT_LIST_HEAD(&vm->preempt.exec_queues);
+ vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */
+
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_init(&vm->rftree[id]);
+
+ vm->pt_ops = &xelp_pt_ops;
+
+ if (!(flags & XE_VM_FLAG_MIGRATION))
+ xe_device_mem_access_get(xe);
+
+ vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
+ if (!vm_resv_obj) {
+ err = -ENOMEM;
+ goto err_no_resv;
+ }
+
+ drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
+ vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops);
+
+ drm_gem_object_put(vm_resv_obj);
+
+ err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+ if (err)
+ goto err_close;
+
+ if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ vm->flags |= XE_VM_FLAG_64K;
+
+ for_each_tile(tile, xe, id) {
+ if (flags & XE_VM_FLAG_MIGRATION &&
+ tile->id != XE_VM_FLAG_TILE_ID(flags))
+ continue;
+
+ vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
+ if (IS_ERR(vm->pt_root[id])) {
+ err = PTR_ERR(vm->pt_root[id]);
+ vm->pt_root[id] = NULL;
+ goto err_unlock_close;
+ }
+ }
+
+ if (xe_vm_has_scratch(vm)) {
+ for_each_tile(tile, xe, id) {
+ if (!vm->pt_root[id])
+ continue;
+
+ err = xe_vm_create_scratch(xe, tile, vm);
+ if (err)
+ goto err_unlock_close;
+ }
+ vm->batch_invalidate_tlb = true;
+ }
+
+ if (flags & XE_VM_FLAG_LR_MODE) {
+ INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ vm->flags |= XE_VM_FLAG_LR_MODE;
+ vm->batch_invalidate_tlb = false;
+ }
+
+ /* Fill pt_root after allocating scratch tables */
+ for_each_tile(tile, xe, id) {
+ if (!vm->pt_root[id])
+ continue;
+
+ xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+ }
+ dma_resv_unlock(xe_vm_resv(vm));
+
+ /* Kernel migration VM shouldn't have a circular loop.. */
+ if (!(flags & XE_VM_FLAG_MIGRATION)) {
+ for_each_tile(tile, xe, id) {
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_vm *migrate_vm;
+ struct xe_exec_queue *q;
+ u32 create_flags = EXEC_QUEUE_FLAG_VM;
+
+ if (!vm->pt_root[id])
+ continue;
+
+ migrate_vm = xe_migrate_get_vm(tile->migrate);
+ q = xe_exec_queue_create_class(xe, gt, migrate_vm,
+ XE_ENGINE_CLASS_COPY,
+ create_flags);
+ xe_vm_put(migrate_vm);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto err_close;
+ }
+ vm->q[id] = q;
+ number_tiles++;
+ }
+ }
+
+ if (number_tiles > 1)
+ vm->composite_fence_ctx = dma_fence_context_alloc(1);
+
+ mutex_lock(&xe->usm.lock);
+ if (flags & XE_VM_FLAG_FAULT_MODE)
+ xe->usm.num_vm_in_fault_mode++;
+ else if (!(flags & XE_VM_FLAG_MIGRATION))
+ xe->usm.num_vm_in_non_fault_mode++;
+ mutex_unlock(&xe->usm.lock);
+
+ trace_xe_vm_create(vm);
+
+ return vm;
+
+err_unlock_close:
+ dma_resv_unlock(xe_vm_resv(vm));
+err_close:
+ xe_vm_close_and_put(vm);
+ return ERR_PTR(err);
+
+err_no_resv:
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_fini(&vm->rftree[id]);
+ kfree(vm);
+ if (!(flags & XE_VM_FLAG_MIGRATION))
+ xe_device_mem_access_put(xe);
+ return ERR_PTR(err);
+}
+
+static void xe_vm_close(struct xe_vm *vm)
+{
+ down_write(&vm->lock);
+ vm->size = 0;
+ up_write(&vm->lock);
+}
+
+void xe_vm_close_and_put(struct xe_vm *vm)
+{
+ LIST_HEAD(contested);
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ struct xe_vma *vma, *next_vma;
+ struct drm_gpuva *gpuva, *next;
+ u8 id;
+
+ xe_assert(xe, !vm->preempt.num_exec_queues);
+
+ xe_vm_close(vm);
+ if (xe_vm_in_preempt_fence_mode(vm))
+ flush_work(&vm->preempt.rebind_work);
+
+ down_write(&vm->lock);
+ for_each_tile(tile, xe, id) {
+ if (vm->q[id])
+ xe_exec_queue_last_fence_put(vm->q[id], vm);
+ }
+ up_write(&vm->lock);
+
+ for_each_tile(tile, xe, id) {
+ if (vm->q[id]) {
+ xe_exec_queue_kill(vm->q[id]);
+ xe_exec_queue_put(vm->q[id]);
+ vm->q[id] = NULL;
+ }
+ }
+
+ down_write(&vm->lock);
+ xe_vm_lock(vm, false);
+ drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) {
+ vma = gpuva_to_vma(gpuva);
+
+ if (xe_vma_has_no_bo(vma)) {
+ down_read(&vm->userptr.notifier_lock);
+ vma->gpuva.flags |= XE_VMA_DESTROYED;
+ up_read(&vm->userptr.notifier_lock);
+ }
+
+ xe_vm_remove_vma(vm, vma);
+
+ /* easy case, remove from VMA? */
+ if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) {
+ list_del_init(&vma->combined_links.rebind);
+ xe_vma_destroy(vma, NULL);
+ continue;
+ }
+
+ list_move_tail(&vma->combined_links.destroy, &contested);
+ vma->gpuva.flags |= XE_VMA_DESTROYED;
+ }
+
+ /*
+ * All vm operations will add shared fences to resv.
+ * The only exception is eviction for a shared object,
+ * but even so, the unbind when evicted would still
+ * install a fence to resv. Hence it's safe to
+ * destroy the pagetables immediately.
+ */
+ xe_vm_free_scratch(vm);
+
+ for_each_tile(tile, xe, id) {
+ if (vm->pt_root[id]) {
+ xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
+ vm->pt_root[id] = NULL;
+ }
+ }
+ xe_vm_unlock(vm);
+
+ /*
+ * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL
+ * Since we hold a refcount to the bo, we can remove and free
+ * the members safely without locking.
+ */
+ list_for_each_entry_safe(vma, next_vma, &contested,
+ combined_links.destroy) {
+ list_del_init(&vma->combined_links.destroy);
+ xe_vma_destroy_unlocked(vma);
+ }
+
+ up_write(&vm->lock);
+
+ mutex_lock(&xe->usm.lock);
+ if (vm->flags & XE_VM_FLAG_FAULT_MODE)
+ xe->usm.num_vm_in_fault_mode--;
+ else if (!(vm->flags & XE_VM_FLAG_MIGRATION))
+ xe->usm.num_vm_in_non_fault_mode--;
+ mutex_unlock(&xe->usm.lock);
+
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_fini(&vm->rftree[id]);
+
+ xe_vm_put(vm);
+}
+
+static void vm_destroy_work_func(struct work_struct *w)
+{
+ struct xe_vm *vm =
+ container_of(w, struct xe_vm, destroy_work);
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ u8 id;
+ void *lookup;
+
+ /* xe_vm_close_and_put was not called? */
+ xe_assert(xe, !vm->size);
+
+ if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
+ xe_device_mem_access_put(xe);
+
+ if (xe->info.has_asid && vm->usm.asid) {
+ mutex_lock(&xe->usm.lock);
+ lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
+ xe_assert(xe, lookup == vm);
+ mutex_unlock(&xe->usm.lock);
+ }
+ }
+
+ for_each_tile(tile, xe, id)
+ XE_WARN_ON(vm->pt_root[id]);
+
+ trace_xe_vm_free(vm);
+ kfree(vm);
+}
+
+static void xe_vm_free(struct drm_gpuvm *gpuvm)
+{
+ struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm);
+
+ /* To destroy the VM we need to be able to sleep */
+ queue_work(system_unbound_wq, &vm->destroy_work);
+}
+
+struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
+{
+ struct xe_vm *vm;
+
+ mutex_lock(&xef->vm.lock);
+ vm = xa_load(&xef->vm.xa, id);
+ if (vm)
+ xe_vm_get(vm);
+ mutex_unlock(&xef->vm.lock);
+
+ return vm;
+}
+
+u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
+{
+ return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
+ tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
+}
+
+static struct xe_exec_queue *
+to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
+{
+ return q ? q : vm->q[0];
+}
+
+static struct dma_fence *
+xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool first_op, bool last_op)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+ struct xe_tile *tile;
+ struct dma_fence *fence = NULL;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
+ int cur_fence = 0, i;
+ int number_tiles = hweight8(vma->tile_present);
+ int err;
+ u8 id;
+
+ trace_xe_vma_unbind(vma);
+
+ if (vma->ufence) {
+ struct xe_user_fence * const f = vma->ufence;
+
+ if (!xe_sync_ufence_get_status(f))
+ return ERR_PTR(-EBUSY);
+
+ vma->ufence = NULL;
+ xe_sync_ufence_put(f);
+ }
+
+ if (number_tiles > 1) {
+ fences = kmalloc_array(number_tiles, sizeof(*fences),
+ GFP_KERNEL);
+ if (!fences)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!(vma->tile_present & BIT(id)))
+ goto next;
+
+ fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
+ first_op ? syncs : NULL,
+ first_op ? num_syncs : 0);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto err_fences;
+ }
+
+ if (fences)
+ fences[cur_fence++] = fence;
+
+next:
+ if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+ q = list_next_entry(q, multi_gt_list);
+ }
+
+ if (fences) {
+ cf = dma_fence_array_create(number_tiles, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ if (!cf) {
+ --vm->composite_fence_seqno;
+ err = -ENOMEM;
+ goto err_fences;
+ }
+ }
+
+ fence = cf ? &cf->base : !fence ?
+ xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
+ if (last_op) {
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ }
+
+ return fence;
+
+err_fences:
+ if (fences) {
+ while (cur_fence)
+ dma_fence_put(fences[--cur_fence]);
+ kfree(fences);
+ }
+
+ return ERR_PTR(err);
+}
+
+static struct dma_fence *
+xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool first_op, bool last_op)
+{
+ struct xe_tile *tile;
+ struct dma_fence *fence;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
+ struct xe_vm *vm = xe_vma_vm(vma);
+ int cur_fence = 0, i;
+ int number_tiles = hweight8(vma->tile_mask);
+ int err;
+ u8 id;
+
+ trace_xe_vma_bind(vma);
+
+ if (number_tiles > 1) {
+ fences = kmalloc_array(number_tiles, sizeof(*fences),
+ GFP_KERNEL);
+ if (!fences)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_tile(tile, vm->xe, id) {
+ if (!(vma->tile_mask & BIT(id)))
+ goto next;
+
+ fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
+ first_op ? syncs : NULL,
+ first_op ? num_syncs : 0,
+ vma->tile_present & BIT(id));
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto err_fences;
+ }
+
+ if (fences)
+ fences[cur_fence++] = fence;
+
+next:
+ if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
+ q = list_next_entry(q, multi_gt_list);
+ }
+
+ if (fences) {
+ cf = dma_fence_array_create(number_tiles, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ if (!cf) {
+ --vm->composite_fence_seqno;
+ err = -ENOMEM;
+ goto err_fences;
+ }
+ }
+
+ if (last_op) {
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL,
+ cf ? &cf->base : fence);
+ }
+
+ return cf ? &cf->base : fence;
+
+err_fences:
+ if (fences) {
+ while (cur_fence)
+ dma_fence_put(fences[--cur_fence]);
+ kfree(fences);
+ }
+
+ return ERR_PTR(err);
+}
+
+static struct xe_user_fence *
+find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_syncs; i++) {
+ struct xe_sync_entry *e = &syncs[i];
+
+ if (xe_sync_is_ufence(e))
+ return xe_sync_ufence_get(e);
+ }
+
+ return NULL;
+}
+
+static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_exec_queue *q, struct xe_sync_entry *syncs,
+ u32 num_syncs, bool immediate, bool first_op,
+ bool last_op)
+{
+ struct dma_fence *fence;
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+ struct xe_user_fence *ufence;
+
+ xe_vm_assert_held(vm);
+
+ ufence = find_ufence_get(syncs, num_syncs);
+ if (vma->ufence && ufence)
+ xe_sync_ufence_put(vma->ufence);
+
+ vma->ufence = ufence ?: vma->ufence;
+
+ if (immediate) {
+ fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
+ last_op);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ } else {
+ int i;
+
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+
+ fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
+ if (last_op) {
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ }
+ }
+
+ if (last_op)
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ dma_fence_put(fence);
+
+ return 0;
+}
+
+static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
+ struct xe_bo *bo, struct xe_sync_entry *syncs,
+ u32 num_syncs, bool immediate, bool first_op,
+ bool last_op)
+{
+ int err;
+
+ xe_vm_assert_held(vm);
+ xe_bo_assert_held(bo);
+
+ if (bo && immediate) {
+ err = xe_bo_validate(bo, vm, true);
+ if (err)
+ return err;
+ }
+
+ return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op,
+ last_op);
+}
+
+static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_exec_queue *q, struct xe_sync_entry *syncs,
+ u32 num_syncs, bool first_op, bool last_op)
+{
+ struct dma_fence *fence;
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+
+ xe_vm_assert_held(vm);
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ xe_vma_destroy(vma, fence);
+ if (last_op)
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ dma_fence_put(fence);
+
+ return 0;
+}
+
+#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
+ DRM_XE_VM_CREATE_FLAG_LR_MODE | \
+ DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
+
+int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_vm_create *args = data;
+ struct xe_tile *tile;
+ struct xe_vm *vm;
+ u32 id, asid;
+ int err;
+ u32 flags = 0;
+
+ if (XE_IOCTL_DBG(xe, args->extensions))
+ return -EINVAL;
+
+ if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
+ args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
+
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
+ !xe->info.has_usm))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
+ xe_device_in_non_fault_mode(xe)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) &&
+ xe_device_in_fault_mode(xe)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->extensions))
+ return -EINVAL;
+
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
+ flags |= XE_VM_FLAG_SCRATCH_PAGE;
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
+ flags |= XE_VM_FLAG_LR_MODE;
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
+ flags |= XE_VM_FLAG_FAULT_MODE;
+
+ vm = xe_vm_create(xe, flags);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+ mutex_lock(&xef->vm.lock);
+ err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+ mutex_unlock(&xef->vm.lock);
+ if (err)
+ goto err_close_and_put;
+
+ if (xe->info.has_asid) {
+ mutex_lock(&xe->usm.lock);
+ err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
+ XA_LIMIT(1, XE_MAX_ASID - 1),
+ &xe->usm.next_asid, GFP_KERNEL);
+ mutex_unlock(&xe->usm.lock);
+ if (err < 0)
+ goto err_free_id;
+
+ vm->usm.asid = asid;
+ }
+
+ args->vm_id = id;
+ vm->xef = xef;
+
+ /* Record BO memory for VM pagetable created against client */
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+ xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
+ /* Warning: Security issue - never enable by default */
+ args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
+#endif
+
+ return 0;
+
+err_free_id:
+ mutex_lock(&xef->vm.lock);
+ xa_erase(&xef->vm.xa, id);
+ mutex_unlock(&xef->vm.lock);
+err_close_and_put:
+ xe_vm_close_and_put(vm);
+
+ return err;
+}
+
+int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_vm_destroy *args = data;
+ struct xe_vm *vm;
+ int err = 0;
+
+ if (XE_IOCTL_DBG(xe, args->pad) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ mutex_lock(&xef->vm.lock);
+ vm = xa_load(&xef->vm.xa, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ err = -ENOENT;
+ else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues))
+ err = -EBUSY;
+ else
+ xa_erase(&xef->vm.xa, args->vm_id);
+ mutex_unlock(&xef->vm.lock);
+
+ if (!err)
+ xe_vm_close_and_put(vm);
+
+ return err;
+}
+
+static const u32 region_to_mem_type[] = {
+ XE_PL_TT,
+ XE_PL_VRAM0,
+ XE_PL_VRAM1,
+};
+
+static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_exec_queue *q, u32 region,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ bool first_op, bool last_op)
+{
+ struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+ int err;
+
+ xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+
+ if (!xe_vma_has_no_bo(vma)) {
+ err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
+ if (err)
+ return err;
+ }
+
+ if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
+ return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
+ true, first_op, last_op);
+ } else {
+ int i;
+
+ /* Nothing to do, signal fences now */
+ if (last_op) {
+ for (i = 0; i < num_syncs; i++) {
+ struct dma_fence *fence =
+ xe_exec_queue_last_fence_get(wait_exec_queue, vm);
+
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ dma_fence_put(fence);
+ }
+ }
+
+ return 0;
+ }
+}
+
+static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
+ bool post_commit)
+{
+ down_read(&vm->userptr.notifier_lock);
+ vma->gpuva.flags |= XE_VMA_DESTROYED;
+ up_read(&vm->userptr.notifier_lock);
+ if (post_commit)
+ xe_vm_remove_vma(vm, vma);
+}
+
+#undef ULL
+#define ULL unsigned long long
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
+static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
+{
+ struct xe_vma *vma;
+
+ switch (op->op) {
+ case DRM_GPUVA_OP_MAP:
+ vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->map.va.addr, (ULL)op->map.va.range);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ vma = gpuva_to_vma(op->remap.unmap->va);
+ vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
+ (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
+ op->remap.unmap->keep ? 1 : 0);
+ if (op->remap.prev)
+ vm_dbg(&xe->drm,
+ "REMAP:PREV: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.prev->va.addr,
+ (ULL)op->remap.prev->va.range);
+ if (op->remap.next)
+ vm_dbg(&xe->drm,
+ "REMAP:NEXT: addr=0x%016llx, range=0x%016llx",
+ (ULL)op->remap.next->va.addr,
+ (ULL)op->remap.next->va.range);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ vma = gpuva_to_vma(op->unmap.va);
+ vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d",
+ (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma),
+ op->unmap.keep ? 1 : 0);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ vma = gpuva_to_vma(op->prefetch.va);
+ vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx",
+ (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma));
+ break;
+ default:
+ drm_warn(&xe->drm, "NOT POSSIBLE");
+ }
+}
+#else
+static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
+{
+}
+#endif
+
+/*
+ * Create operations list from IOCTL arguments, setup operations fields so parse
+ * and commit steps are decoupled from IOCTL arguments. This step can fail.
+ */
+static struct drm_gpuva_ops *
+vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
+ u64 bo_offset_or_userptr, u64 addr, u64 range,
+ u32 operation, u32 flags,
+ u32 prefetch_region, u16 pat_index)
+{
+ struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
+ struct drm_gpuva_ops *ops;
+ struct drm_gpuva_op *__op;
+ struct drm_gpuvm_bo *vm_bo;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm,
+ "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
+ operation, (ULL)addr, (ULL)range,
+ (ULL)bo_offset_or_userptr);
+
+ switch (operation) {
+ case DRM_XE_VM_BIND_OP_MAP:
+ case DRM_XE_VM_BIND_OP_MAP_USERPTR:
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
+ obj, bo_offset_or_userptr);
+ break;
+ case DRM_XE_VM_BIND_OP_UNMAP:
+ ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
+ break;
+ case DRM_XE_VM_BIND_OP_PREFETCH:
+ ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
+ break;
+ case DRM_XE_VM_BIND_OP_UNMAP_ALL:
+ xe_assert(vm->xe, bo);
+
+ err = xe_bo_lock(bo, true);
+ if (err)
+ return ERR_PTR(err);
+
+ vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
+ if (IS_ERR(vm_bo)) {
+ xe_bo_unlock(bo);
+ return ERR_CAST(vm_bo);
+ }
+
+ ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
+ drm_gpuvm_bo_put(vm_bo);
+ xe_bo_unlock(bo);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ ops = ERR_PTR(-EINVAL);
+ }
+ if (IS_ERR(ops))
+ return ops;
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ op->map.pat_index = pat_index;
+ } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
+ op->prefetch.region = prefetch_region;
+ }
+
+ print_op(vm->xe, __op);
+ }
+
+ return ops;
+}
+
+static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
+ u16 pat_index, unsigned int flags)
+{
+ struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
+ struct drm_exec exec;
+ struct xe_vma *vma;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (bo) {
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = 0;
+ if (!bo->vm) {
+ err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
+ drm_exec_retry_on_contention(&exec);
+ }
+ if (!err) {
+ err = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ }
+ if (err) {
+ drm_exec_fini(&exec);
+ return ERR_PTR(err);
+ }
+ }
+ }
+ vma = xe_vma_create(vm, bo, op->gem.offset,
+ op->va.addr, op->va.addr +
+ op->va.range - 1, pat_index, flags);
+ if (bo)
+ drm_exec_fini(&exec);
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (err) {
+ prep_vma_destroy(vm, vma, false);
+ xe_vma_destroy_unlocked(vma);
+ return ERR_PTR(err);
+ }
+ } else if (!xe_vma_has_no_bo(vma) && !bo->vm) {
+ err = add_preempt_fences(vm, bo);
+ if (err) {
+ prep_vma_destroy(vm, vma, false);
+ xe_vma_destroy_unlocked(vma);
+ return ERR_PTR(err);
+ }
+ }
+
+ return vma;
+}
+
+static u64 xe_vma_max_pte_size(struct xe_vma *vma)
+{
+ if (vma->gpuva.flags & XE_VMA_PTE_1G)
+ return SZ_1G;
+ else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
+ return SZ_2M;
+ else if (vma->gpuva.flags & XE_VMA_PTE_64K)
+ return SZ_64K;
+ else if (vma->gpuva.flags & XE_VMA_PTE_4K)
+ return SZ_4K;
+
+ return SZ_1G; /* Uninitialized, used max size */
+}
+
+static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
+{
+ switch (size) {
+ case SZ_1G:
+ vma->gpuva.flags |= XE_VMA_PTE_1G;
+ break;
+ case SZ_2M:
+ vma->gpuva.flags |= XE_VMA_PTE_2M;
+ break;
+ case SZ_64K:
+ vma->gpuva.flags |= XE_VMA_PTE_64K;
+ break;
+ case SZ_4K:
+ vma->gpuva.flags |= XE_VMA_PTE_4K;
+ break;
+ }
+}
+
+static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ int err = 0;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ err |= xe_vm_insert_vma(vm, op->map.vma);
+ if (!err)
+ op->flags |= XE_VMA_OP_COMMITTED;
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ {
+ u8 tile_present =
+ gpuva_to_vma(op->base.remap.unmap->va)->tile_present;
+
+ prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va),
+ true);
+ op->flags |= XE_VMA_OP_COMMITTED;
+
+ if (op->remap.prev) {
+ err |= xe_vm_insert_vma(vm, op->remap.prev);
+ if (!err)
+ op->flags |= XE_VMA_OP_PREV_COMMITTED;
+ if (!err && op->remap.skip_prev) {
+ op->remap.prev->tile_present =
+ tile_present;
+ op->remap.prev = NULL;
+ }
+ }
+ if (op->remap.next) {
+ err |= xe_vm_insert_vma(vm, op->remap.next);
+ if (!err)
+ op->flags |= XE_VMA_OP_NEXT_COMMITTED;
+ if (!err && op->remap.skip_next) {
+ op->remap.next->tile_present =
+ tile_present;
+ op->remap.next = NULL;
+ }
+ }
+
+ /* Adjust for partial unbind after removin VMA from VM */
+ if (!err) {
+ op->base.remap.unmap->va->va.addr = op->remap.start;
+ op->base.remap.unmap->va->va.range = op->remap.range;
+ }
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP:
+ prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true);
+ op->flags |= XE_VMA_OP_COMMITTED;
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ op->flags |= XE_VMA_OP_COMMITTED;
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
+
+
+static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
+ struct drm_gpuva_ops *ops,
+ struct xe_sync_entry *syncs, u32 num_syncs,
+ struct list_head *ops_list, bool last)
+{
+ struct xe_vma_op *last_op = NULL;
+ struct drm_gpuva_op *__op;
+ int err = 0;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma;
+ bool first = list_empty(ops_list);
+ unsigned int flags = 0;
+
+ INIT_LIST_HEAD(&op->link);
+ list_add_tail(&op->link, ops_list);
+
+ if (first) {
+ op->flags |= XE_VMA_OP_FIRST;
+ op->num_syncs = num_syncs;
+ op->syncs = syncs;
+ }
+
+ op->q = q;
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ {
+ flags |= op->map.is_null ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
+
+ vma = new_vma(vm, &op->base.map, op->map.pat_index,
+ flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ op->map.vma = vma;
+ break;
+ }
+ case DRM_GPUVA_OP_REMAP:
+ {
+ struct xe_vma *old =
+ gpuva_to_vma(op->base.remap.unmap->va);
+
+ op->remap.start = xe_vma_start(old);
+ op->remap.range = xe_vma_size(old);
+
+ if (op->base.remap.prev) {
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
+
+ vma = new_vma(vm, op->base.remap.prev,
+ old->pat_index, flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ op->remap.prev = vma;
+
+ /*
+ * Userptr creates a new SG mapping so
+ * we must also rebind.
+ */
+ op->remap.skip_prev = !xe_vma_is_userptr(old) &&
+ IS_ALIGNED(xe_vma_end(vma),
+ xe_vma_max_pte_size(old));
+ if (op->remap.skip_prev) {
+ xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
+ op->remap.range -=
+ xe_vma_end(vma) -
+ xe_vma_start(old);
+ op->remap.start = xe_vma_end(vma);
+ }
+ }
+
+ if (op->base.remap.next) {
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
+
+ vma = new_vma(vm, op->base.remap.next,
+ old->pat_index, flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ op->remap.next = vma;
+
+ /*
+ * Userptr creates a new SG mapping so
+ * we must also rebind.
+ */
+ op->remap.skip_next = !xe_vma_is_userptr(old) &&
+ IS_ALIGNED(xe_vma_start(vma),
+ xe_vma_max_pte_size(old));
+ if (op->remap.skip_next) {
+ xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
+ op->remap.range -=
+ xe_vma_end(old) -
+ xe_vma_start(vma);
+ }
+ }
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP:
+ case DRM_GPUVA_OP_PREFETCH:
+ /* Nothing to do */
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ last_op = op;
+
+ err = xe_vma_op_commit(vm, op);
+ if (err)
+ return err;
+ }
+
+ /* FIXME: Unhandled corner case */
+ XE_WARN_ON(!last_op && last && !list_empty(ops_list));
+
+ if (!last_op)
+ return 0;
+
+ last_op->ops = ops;
+ if (last) {
+ last_op->flags |= XE_VMA_OP_LAST;
+ last_op->num_syncs = num_syncs;
+ last_op->syncs = syncs;
+ }
+
+ return 0;
+}
+
+static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
+ struct xe_vma *vma, struct xe_vma_op *op)
+{
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ err = xe_vm_prepare_vma(exec, vma, 1);
+ if (err)
+ return err;
+
+ xe_vm_assert_held(vm);
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
+ op->syncs, op->num_syncs,
+ !xe_vm_in_fault_mode(vm),
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ {
+ bool prev = !!op->remap.prev;
+ bool next = !!op->remap.next;
+
+ if (!op->remap.unmap_done) {
+ if (prev || next)
+ vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
+ err = xe_vm_unbind(vm, vma, op->q, op->syncs,
+ op->num_syncs,
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST &&
+ !prev && !next);
+ if (err)
+ break;
+ op->remap.unmap_done = true;
+ }
+
+ if (prev) {
+ op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
+ err = xe_vm_bind(vm, op->remap.prev, op->q,
+ xe_vma_bo(op->remap.prev), op->syncs,
+ op->num_syncs, true, false,
+ op->flags & XE_VMA_OP_LAST && !next);
+ op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
+ if (err)
+ break;
+ op->remap.prev = NULL;
+ }
+
+ if (next) {
+ op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
+ err = xe_vm_bind(vm, op->remap.next, op->q,
+ xe_vma_bo(op->remap.next),
+ op->syncs, op->num_syncs,
+ true, false,
+ op->flags & XE_VMA_OP_LAST);
+ op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
+ if (err)
+ break;
+ op->remap.next = NULL;
+ }
+
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP:
+ err = xe_vm_unbind(vm, vma, op->q, op->syncs,
+ op->num_syncs, op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
+ op->syncs, op->num_syncs,
+ op->flags & XE_VMA_OP_FIRST,
+ op->flags & XE_VMA_OP_LAST);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ if (err)
+ trace_xe_vma_fail(vma);
+
+ return err;
+}
+
+static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vma_op *op)
+{
+ struct drm_exec exec;
+ int err;
+
+retry_userptr:
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = op_execute(&exec, vm, vma, op);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+ }
+ drm_exec_fini(&exec);
+
+ if (err == -EAGAIN) {
+ lockdep_assert_held_write(&vm->lock);
+
+ if (op->base.op == DRM_GPUVA_OP_REMAP) {
+ if (!op->remap.unmap_done)
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ else if (op->remap.prev)
+ vma = op->remap.prev;
+ else
+ vma = op->remap.next;
+ }
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (!err)
+ goto retry_userptr;
+
+ trace_xe_vma_fail(vma);
+ }
+ }
+
+ return err;
+}
+
+static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ int ret = 0;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ ret = __xe_vma_op_execute(vm, op->map.vma, op);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ {
+ struct xe_vma *vma;
+
+ if (!op->remap.unmap_done)
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ else if (op->remap.prev)
+ vma = op->remap.prev;
+ else
+ vma = op->remap.next;
+
+ ret = __xe_vma_op_execute(vm, vma, op);
+ break;
+ }
+ case DRM_GPUVA_OP_UNMAP:
+ ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
+ op);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ ret = __xe_vma_op_execute(vm,
+ gpuva_to_vma(op->base.prefetch.va),
+ op);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return ret;
+}
+
+static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ bool last = op->flags & XE_VMA_OP_LAST;
+
+ if (last) {
+ while (op->num_syncs--)
+ xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
+ kfree(op->syncs);
+ if (op->q)
+ xe_exec_queue_put(op->q);
+ }
+ if (!list_empty(&op->link))
+ list_del(&op->link);
+ if (op->ops)
+ drm_gpuva_ops_free(&vm->gpuvm, op->ops);
+ if (last)
+ xe_vm_put(vm);
+}
+
+static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
+ bool post_commit, bool prev_post_commit,
+ bool next_post_commit)
+{
+ lockdep_assert_held_write(&vm->lock);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (op->map.vma) {
+ prep_vma_destroy(vm, op->map.vma, post_commit);
+ xe_vma_destroy_unlocked(op->map.vma);
+ }
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (vma) {
+ down_read(&vm->userptr.notifier_lock);
+ vma->gpuva.flags &= ~XE_VMA_DESTROYED;
+ up_read(&vm->userptr.notifier_lock);
+ if (post_commit)
+ xe_vm_insert_vma(vm, vma);
+ }
+ break;
+ }
+ case DRM_GPUVA_OP_REMAP:
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va);
+
+ if (op->remap.prev) {
+ prep_vma_destroy(vm, op->remap.prev, prev_post_commit);
+ xe_vma_destroy_unlocked(op->remap.prev);
+ }
+ if (op->remap.next) {
+ prep_vma_destroy(vm, op->remap.next, next_post_commit);
+ xe_vma_destroy_unlocked(op->remap.next);
+ }
+ if (vma) {
+ down_read(&vm->userptr.notifier_lock);
+ vma->gpuva.flags &= ~XE_VMA_DESTROYED;
+ up_read(&vm->userptr.notifier_lock);
+ if (post_commit)
+ xe_vm_insert_vma(vm, vma);
+ }
+ break;
+ }
+ case DRM_GPUVA_OP_PREFETCH:
+ /* Nothing to do */
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+}
+
+static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
+ struct drm_gpuva_ops **ops,
+ int num_ops_list)
+{
+ int i;
+
+ for (i = num_ops_list - 1; i >= 0; --i) {
+ struct drm_gpuva_ops *__ops = ops[i];
+ struct drm_gpuva_op *__op;
+
+ if (!__ops)
+ continue;
+
+ drm_gpuva_for_each_op_reverse(__op, __ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ xe_vma_op_unwind(vm, op,
+ op->flags & XE_VMA_OP_COMMITTED,
+ op->flags & XE_VMA_OP_PREV_COMMITTED,
+ op->flags & XE_VMA_OP_NEXT_COMMITTED);
+ }
+
+ drm_gpuva_ops_free(&vm->gpuvm, __ops);
+ }
+}
+
+static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
+ struct list_head *ops_list)
+{
+ struct xe_vma_op *op, *next;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ list_for_each_entry_safe(op, next, ops_list, link) {
+ err = xe_vma_op_execute(vm, op);
+ if (err) {
+ drm_warn(&vm->xe->drm, "VM op(%d) failed with %d",
+ op->base.op, err);
+ /*
+ * FIXME: Killing VM rather than proper error handling
+ */
+ xe_vm_kill(vm);
+ return -ENOSPC;
+ }
+ xe_vma_op_cleanup(vm, op);
+ }
+
+ return 0;
+}
+
+#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
+ DRM_XE_VM_BIND_FLAG_DUMPABLE)
+#define XE_64K_PAGE_MASK 0xffffull
+#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
+
+static int vm_bind_ioctl_check_args(struct xe_device *xe,
+ struct drm_xe_vm_bind *args,
+ struct drm_xe_vm_bind_op **bind_ops)
+{
+ int err;
+ int i;
+
+ if (XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->extensions))
+ return -EINVAL;
+
+ if (args->num_binds > 1) {
+ u64 __user *bind_user =
+ u64_to_user_ptr(args->vector_of_binds);
+
+ *bind_ops = kvmalloc_array(args->num_binds,
+ sizeof(struct drm_xe_vm_bind_op),
+ GFP_KERNEL | __GFP_ACCOUNT);
+ if (!*bind_ops)
+ return -ENOMEM;
+
+ err = __copy_from_user(*bind_ops, bind_user,
+ sizeof(struct drm_xe_vm_bind_op) *
+ args->num_binds);
+ if (XE_IOCTL_DBG(xe, err)) {
+ err = -EFAULT;
+ goto free_bind_ops;
+ }
+ } else {
+ *bind_ops = &args->bind;
+ }
+
+ for (i = 0; i < args->num_binds; ++i) {
+ u64 range = (*bind_ops)[i].range;
+ u64 addr = (*bind_ops)[i].addr;
+ u32 op = (*bind_ops)[i].op;
+ u32 flags = (*bind_ops)[i].flags;
+ u32 obj = (*bind_ops)[i].obj;
+ u64 obj_offset = (*bind_ops)[i].obj_offset;
+ u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
+ bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ u16 pat_index = (*bind_ops)[i].pat_index;
+ u16 coh_mode;
+
+ if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
+ pat_index = array_index_nospec(pat_index, xe->pat.n_entries);
+ (*bind_ops)[i].pat_index = pat_index;
+ coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
+ if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
+ if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
+ if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
+ XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
+ XE_IOCTL_DBG(xe, obj && is_null) ||
+ XE_IOCTL_DBG(xe, obj_offset && is_null) ||
+ XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
+ is_null) ||
+ XE_IOCTL_DBG(xe, !obj &&
+ op == DRM_XE_VM_BIND_OP_MAP &&
+ !is_null) ||
+ XE_IOCTL_DBG(xe, !obj &&
+ op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
+ XE_IOCTL_DBG(xe, addr &&
+ op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
+ XE_IOCTL_DBG(xe, range &&
+ op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
+ XE_IOCTL_DBG(xe, obj &&
+ op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
+ XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
+ op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
+ XE_IOCTL_DBG(xe, obj &&
+ op == DRM_XE_VM_BIND_OP_PREFETCH) ||
+ XE_IOCTL_DBG(xe, prefetch_region &&
+ op != DRM_XE_VM_BIND_OP_PREFETCH) ||
+ XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
+ xe->info.mem_region_mask)) ||
+ XE_IOCTL_DBG(xe, obj &&
+ op == DRM_XE_VM_BIND_OP_UNMAP)) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
+ if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, !range &&
+ op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+ }
+
+ return 0;
+
+free_bind_ops:
+ if (args->num_binds > 1)
+ kvfree(*bind_ops);
+ return err;
+}
+
+static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
+ struct xe_exec_queue *q,
+ struct xe_sync_entry *syncs,
+ int num_syncs)
+{
+ struct dma_fence *fence;
+ int i, err = 0;
+
+ fence = xe_sync_in_fence_get(syncs, num_syncs,
+ to_wait_exec_queue(vm, q), vm);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+
+ xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
+ fence);
+ dma_fence_put(fence);
+
+ return err;
+}
+
+int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_vm_bind *args = data;
+ struct drm_xe_sync __user *syncs_user;
+ struct xe_bo **bos = NULL;
+ struct drm_gpuva_ops **ops = NULL;
+ struct xe_vm *vm;
+ struct xe_exec_queue *q = NULL;
+ u32 num_syncs, num_ufence = 0;
+ struct xe_sync_entry *syncs = NULL;
+ struct drm_xe_vm_bind_op *bind_ops;
+ LIST_HEAD(ops_list);
+ int err;
+ int i;
+
+ err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
+ if (err)
+ return err;
+
+ if (args->exec_queue_id) {
+ q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+ if (XE_IOCTL_DBG(xe, !q)) {
+ err = -ENOENT;
+ goto free_objs;
+ }
+
+ if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
+ err = -EINVAL;
+ goto put_exec_queue;
+ }
+ }
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm)) {
+ err = -EINVAL;
+ goto put_exec_queue;
+ }
+
+ err = down_write_killable(&vm->lock);
+ if (err)
+ goto put_vm;
+
+ if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+ err = -ENOENT;
+ goto release_vm_lock;
+ }
+
+ for (i = 0; i < args->num_binds; ++i) {
+ u64 range = bind_ops[i].range;
+ u64 addr = bind_ops[i].addr;
+
+ if (XE_IOCTL_DBG(xe, range > vm->size) ||
+ XE_IOCTL_DBG(xe, addr > vm->size - range)) {
+ err = -EINVAL;
+ goto release_vm_lock;
+ }
+ }
+
+ if (args->num_binds) {
+ bos = kvcalloc(args->num_binds, sizeof(*bos),
+ GFP_KERNEL | __GFP_ACCOUNT);
+ if (!bos) {
+ err = -ENOMEM;
+ goto release_vm_lock;
+ }
+
+ ops = kvcalloc(args->num_binds, sizeof(*ops),
+ GFP_KERNEL | __GFP_ACCOUNT);
+ if (!ops) {
+ err = -ENOMEM;
+ goto release_vm_lock;
+ }
+ }
+
+ for (i = 0; i < args->num_binds; ++i) {
+ struct drm_gem_object *gem_obj;
+ u64 range = bind_ops[i].range;
+ u64 addr = bind_ops[i].addr;
+ u32 obj = bind_ops[i].obj;
+ u64 obj_offset = bind_ops[i].obj_offset;
+ u16 pat_index = bind_ops[i].pat_index;
+ u16 coh_mode;
+
+ if (!obj)
+ continue;
+
+ gem_obj = drm_gem_object_lookup(file, obj);
+ if (XE_IOCTL_DBG(xe, !gem_obj)) {
+ err = -ENOENT;
+ goto put_obj;
+ }
+ bos[i] = gem_to_xe_bo(gem_obj);
+
+ if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
+ XE_IOCTL_DBG(xe, obj_offset >
+ bos[i]->size - range)) {
+ err = -EINVAL;
+ goto put_obj;
+ }
+
+ if (bos[i]->flags & XE_BO_INTERNAL_64K) {
+ if (XE_IOCTL_DBG(xe, obj_offset &
+ XE_64K_PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
+ XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
+ err = -EINVAL;
+ goto put_obj;
+ }
+ }
+
+ coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
+ if (bos[i]->cpu_caching) {
+ if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
+ bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
+ err = -EINVAL;
+ goto put_obj;
+ }
+ } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
+ /*
+ * Imported dma-buf from a different device should
+ * require 1way or 2way coherency since we don't know
+ * how it was mapped on the CPU. Just assume is it
+ * potentially cached on CPU side.
+ */
+ err = -EINVAL;
+ goto put_obj;
+ }
+ }
+
+ if (args->num_syncs) {
+ syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
+ if (!syncs) {
+ err = -ENOMEM;
+ goto put_obj;
+ }
+ }
+
+ syncs_user = u64_to_user_ptr(args->syncs);
+ for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
+ err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
+ &syncs_user[num_syncs],
+ (xe_vm_in_lr_mode(vm) ?
+ SYNC_PARSE_FLAG_LR_MODE : 0) |
+ (!args->num_binds ?
+ SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
+ if (err)
+ goto free_syncs;
+
+ if (xe_sync_is_ufence(&syncs[num_syncs]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+ err = -EINVAL;
+ goto free_syncs;
+ }
+
+ if (!args->num_binds) {
+ err = -ENODATA;
+ goto free_syncs;
+ }
+
+ for (i = 0; i < args->num_binds; ++i) {
+ u64 range = bind_ops[i].range;
+ u64 addr = bind_ops[i].addr;
+ u32 op = bind_ops[i].op;
+ u32 flags = bind_ops[i].flags;
+ u64 obj_offset = bind_ops[i].obj_offset;
+ u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
+ u16 pat_index = bind_ops[i].pat_index;
+
+ ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
+ addr, range, op, flags,
+ prefetch_region, pat_index);
+ if (IS_ERR(ops[i])) {
+ err = PTR_ERR(ops[i]);
+ ops[i] = NULL;
+ goto unwind_ops;
+ }
+
+ err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
+ &ops_list,
+ i == args->num_binds - 1);
+ if (err)
+ goto unwind_ops;
+ }
+
+ /* Nothing to do */
+ if (list_empty(&ops_list)) {
+ err = -ENODATA;
+ goto unwind_ops;
+ }
+
+ xe_vm_get(vm);
+ if (q)
+ xe_exec_queue_get(q);
+
+ err = vm_bind_ioctl_ops_execute(vm, &ops_list);
+
+ up_write(&vm->lock);
+
+ if (q)
+ xe_exec_queue_put(q);
+ xe_vm_put(vm);
+
+ for (i = 0; bos && i < args->num_binds; ++i)
+ xe_bo_put(bos[i]);
+
+ kvfree(bos);
+ kvfree(ops);
+ if (args->num_binds > 1)
+ kvfree(bind_ops);
+
+ return err;
+
+unwind_ops:
+ vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds);
+free_syncs:
+ if (err == -ENODATA)
+ err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
+ while (num_syncs--)
+ xe_sync_entry_cleanup(&syncs[num_syncs]);
+
+ kfree(syncs);
+put_obj:
+ for (i = 0; i < args->num_binds; ++i)
+ xe_bo_put(bos[i]);
+release_vm_lock:
+ up_write(&vm->lock);
+put_vm:
+ xe_vm_put(vm);
+put_exec_queue:
+ if (q)
+ xe_exec_queue_put(q);
+free_objs:
+ kvfree(bos);
+ kvfree(ops);
+ if (args->num_binds > 1)
+ kvfree(bind_ops);
+ return err;
+}
+
+/**
+ * xe_vm_lock() - Lock the vm's dma_resv object
+ * @vm: The struct xe_vm whose lock is to be locked
+ * @intr: Whether to perform any wait interruptible
+ *
+ * Return: 0 on success, -EINTR if @intr is true and the wait for a
+ * contended lock was interrupted. If @intr is false, the function
+ * always returns 0.
+ */
+int xe_vm_lock(struct xe_vm *vm, bool intr)
+{
+ if (intr)
+ return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+
+ return dma_resv_lock(xe_vm_resv(vm), NULL);
+}
+
+/**
+ * xe_vm_unlock() - Unlock the vm's dma_resv object
+ * @vm: The struct xe_vm whose lock is to be released.
+ *
+ * Unlock a buffer object lock that was locked by xe_vm_lock().
+ */
+void xe_vm_unlock(struct xe_vm *vm)
+{
+ dma_resv_unlock(xe_vm_resv(vm));
+}
+
+/**
+ * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
+ * @vma: VMA to invalidate
+ *
+ * Walks a list of page tables leaves which it memset the entries owned by this
+ * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is
+ * complete.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_vm_invalidate_vma(struct xe_vma *vma)
+{
+ struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_tile *tile;
+ u32 tile_needs_invalidate = 0;
+ int seqno[XE_MAX_TILES_PER_DEVICE];
+ u8 id;
+ int ret;
+
+ xe_assert(xe, !xe_vma_is_null(vma));
+ trace_xe_vma_invalidate(vma);
+
+ /* Check that we don't race with page-table updates */
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ if (xe_vma_is_userptr(vma)) {
+ WARN_ON_ONCE(!mmu_interval_check_retry
+ (&to_userptr_vma(vma)->userptr.notifier,
+ to_userptr_vma(vma)->userptr.notifier_seq));
+ WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
+ DMA_RESV_USAGE_BOOKKEEP));
+
+ } else {
+ xe_bo_assert_held(xe_vma_bo(vma));
+ }
+ }
+
+ for_each_tile(tile, xe, id) {
+ if (xe_pt_zap_ptes(tile, vma)) {
+ tile_needs_invalidate |= BIT(id);
+ xe_device_wmb(xe);
+ /*
+ * FIXME: We potentially need to invalidate multiple
+ * GTs within the tile
+ */
+ seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma);
+ if (seqno[id] < 0)
+ return seqno[id];
+ }
+ }
+
+ for_each_tile(tile, xe, id) {
+ if (tile_needs_invalidate & BIT(id)) {
+ ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ vma->tile_invalidated = vma->tile_mask;
+
+ return 0;
+}
+
+int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
+{
+ struct drm_gpuva *gpuva;
+ bool is_vram;
+ uint64_t addr;
+
+ if (!down_read_trylock(&vm->lock)) {
+ drm_printf(p, " Failed to acquire VM lock to dump capture");
+ return 0;
+ }
+ if (vm->pt_root[gt_id]) {
+ addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE);
+ is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo);
+ drm_printf(p, " VM root: A:0x%llx %s\n", addr,
+ is_vram ? "VRAM" : "SYS");
+ }
+
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+ bool is_userptr = xe_vma_is_userptr(vma);
+ bool is_null = xe_vma_is_null(vma);
+
+ if (is_null) {
+ addr = 0;
+ } else if (is_userptr) {
+ struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
+ struct xe_res_cursor cur;
+
+ if (sg) {
+ xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
+ addr = xe_res_dma(&cur);
+ } else {
+ addr = 0;
+ }
+ } else {
+ addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE);
+ is_vram = xe_bo_is_vram(xe_vma_bo(vma));
+ }
+ drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1,
+ xe_vma_size(vma),
+ addr, is_null ? "NULL" : is_userptr ? "USR" :
+ is_vram ? "VRAM" : "SYS");
+ }
+ up_read(&vm->lock);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
new file mode 100644
index 000000000..40188a28c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_VM_H_
+#define _XE_VM_H_
+
+#include "xe_bo_types.h"
+#include "xe_macros.h"
+#include "xe_map.h"
+#include "xe_vm_types.h"
+
+struct drm_device;
+struct drm_printer;
+struct drm_file;
+
+struct ttm_buffer_object;
+struct ttm_validate_buffer;
+
+struct xe_exec_queue;
+struct xe_file;
+struct xe_sync_entry;
+struct drm_exec;
+
+struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
+
+struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
+int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
+
+static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
+{
+ drm_gpuvm_get(&vm->gpuvm);
+ return vm;
+}
+
+static inline void xe_vm_put(struct xe_vm *vm)
+{
+ drm_gpuvm_put(&vm->gpuvm);
+}
+
+int xe_vm_lock(struct xe_vm *vm, bool intr);
+
+void xe_vm_unlock(struct xe_vm *vm);
+
+static inline bool xe_vm_is_closed(struct xe_vm *vm)
+{
+ /* Only guaranteed not to change when vm->lock is held */
+ return !vm->size;
+}
+
+static inline bool xe_vm_is_banned(struct xe_vm *vm)
+{
+ return vm->flags & XE_VM_FLAG_BANNED;
+}
+
+static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
+{
+ lockdep_assert_held(&vm->lock);
+ return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
+}
+
+struct xe_vma *
+xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
+
+/**
+ * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
+ * @vm: The vm
+ *
+ * Return: whether the vm populates unmapped areas with scratch PTEs
+ */
+static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
+{
+ return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
+}
+
+/**
+ * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
+ * @gpuvm: The struct drm_gpuvm pointer
+ *
+ * Return: Pointer to the embedding struct xe_vm.
+ */
+static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
+{
+ return container_of(gpuvm, struct xe_vm, gpuvm);
+}
+
+static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
+{
+ return gpuvm_to_vm(gpuva->vm);
+}
+
+static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
+{
+ return container_of(gpuva, struct xe_vma, gpuva);
+}
+
+static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
+{
+ return container_of(op, struct xe_vma_op, base);
+}
+
+/**
+ * DOC: Provide accessors for vma members to facilitate easy change of
+ * implementation.
+ */
+static inline u64 xe_vma_start(struct xe_vma *vma)
+{
+ return vma->gpuva.va.addr;
+}
+
+static inline u64 xe_vma_size(struct xe_vma *vma)
+{
+ return vma->gpuva.va.range;
+}
+
+static inline u64 xe_vma_end(struct xe_vma *vma)
+{
+ return xe_vma_start(vma) + xe_vma_size(vma);
+}
+
+static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
+{
+ return vma->gpuva.gem.offset;
+}
+
+static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
+{
+ return !vma->gpuva.gem.obj ? NULL :
+ container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
+}
+
+static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
+{
+ return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
+}
+
+static inline bool xe_vma_read_only(struct xe_vma *vma)
+{
+ return vma->gpuva.flags & XE_VMA_READ_ONLY;
+}
+
+static inline u64 xe_vma_userptr(struct xe_vma *vma)
+{
+ return vma->gpuva.gem.offset;
+}
+
+static inline bool xe_vma_is_null(struct xe_vma *vma)
+{
+ return vma->gpuva.flags & DRM_GPUVA_SPARSE;
+}
+
+static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
+{
+ return !xe_vma_bo(vma);
+}
+
+static inline bool xe_vma_is_userptr(struct xe_vma *vma)
+{
+ return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
+}
+
+/**
+ * to_userptr_vma() - Return a pointer to an embedding userptr vma
+ * @vma: Pointer to the embedded struct xe_vma
+ *
+ * Return: Pointer to the embedding userptr vma
+ */
+static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
+{
+ xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
+ return container_of(vma, struct xe_userptr_vma, vma);
+}
+
+u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
+
+int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+void xe_vm_close_and_put(struct xe_vm *vm);
+
+static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
+{
+ return vm->flags & XE_VM_FLAG_FAULT_MODE;
+}
+
+static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
+{
+ return vm->flags & XE_VM_FLAG_LR_MODE;
+}
+
+static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
+{
+ return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
+}
+
+int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
+void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
+
+int xe_vm_userptr_pin(struct xe_vm *vm);
+
+int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
+
+int xe_vm_userptr_check_repin(struct xe_vm *vm);
+
+int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
+
+int xe_vm_invalidate_vma(struct xe_vma *vma);
+
+extern struct ttm_device_funcs xe_ttm_funcs;
+
+static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
+ queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
+}
+
+/**
+ * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
+ * vms.
+ * @vm: The vm.
+ *
+ * If the rebind functionality on a compute vm was disabled due
+ * to nothing to execute. Reactivate it and run the rebind worker.
+ * This function should be called after submitting a batch to a compute vm.
+ */
+static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
+{
+ if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
+ vm->preempt.rebind_deactivated = false;
+ xe_vm_queue_rebind_worker(vm);
+ }
+}
+
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
+
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
+
+bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
+
+int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
+
+int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
+ unsigned int num_shared);
+
+/**
+ * xe_vm_resv() - Return's the vm's reservation object
+ * @vm: The vm
+ *
+ * Return: Pointer to the vm's reservation object.
+ */
+static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
+{
+ return drm_gpuvm_resv(&vm->gpuvm);
+}
+
+/**
+ * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
+ * @vm: The vm
+ */
+#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
+#define vm_dbg drm_dbg
+#else
+__printf(2, 3)
+static inline void vm_dbg(const struct drm_device *dev,
+ const char *format, ...)
+{ /* noop */ }
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h
new file mode 100644
index 000000000..bdc665989
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_doc.h
@@ -0,0 +1,555 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_VM_DOC_H_
+#define _XE_VM_DOC_H_
+
+/**
+ * DOC: XE VM (user address space)
+ *
+ * VM creation
+ * ===========
+ *
+ * Allocate a physical page for root of the page table structure, create default
+ * bind engine, and return a handle to the user.
+ *
+ * Scratch page
+ * ------------
+ *
+ * If the VM is created with the flag, DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE, set the
+ * entire page table structure defaults pointing to blank page allocated by the
+ * VM. Invalid memory access rather than fault just read / write to this page.
+ *
+ * VM bind (create GPU mapping for a BO or userptr)
+ * ================================================
+ *
+ * Creates GPU mapings for a BO or userptr within a VM. VM binds uses the same
+ * in / out fence interface (struct drm_xe_sync) as execs which allows users to
+ * think of binds and execs as more or less the same operation.
+ *
+ * Operations
+ * ----------
+ *
+ * DRM_XE_VM_BIND_OP_MAP - Create mapping for a BO
+ * DRM_XE_VM_BIND_OP_UNMAP - Destroy mapping for a BO / userptr
+ * DRM_XE_VM_BIND_OP_MAP_USERPTR - Create mapping for userptr
+ *
+ * Implementation details
+ * ~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * All bind operations are implemented via a hybrid approach of using the CPU
+ * and GPU to modify page tables. If a new physical page is allocated in the
+ * page table structure we populate that page via the CPU and insert that new
+ * page into the existing page table structure via a GPU job. Also any existing
+ * pages in the page table structure that need to be modified also are updated
+ * via the GPU job. As the root physical page is prealloced on VM creation our
+ * GPU job will always have at least 1 update. The in / out fences are passed to
+ * this job so again this is conceptually the same as an exec.
+ *
+ * Very simple example of few binds on an empty VM with 48 bits of address space
+ * and the resulting operations:
+ *
+ * .. code-block::
+ *
+ * bind BO0 0x0-0x1000
+ * alloc page level 3a, program PTE[0] to BO0 phys address (CPU)
+ * alloc page level 2, program PDE[0] page level 3a phys address (CPU)
+ * alloc page level 1, program PDE[0] page level 2 phys address (CPU)
+ * update root PDE[0] to page level 1 phys address (GPU)
+ *
+ * bind BO1 0x201000-0x202000
+ * alloc page level 3b, program PTE[1] to BO1 phys address (CPU)
+ * update page level 2 PDE[1] to page level 3b phys address (GPU)
+ *
+ * bind BO2 0x1ff000-0x201000
+ * update page level 3a PTE[511] to BO2 phys addres (GPU)
+ * update page level 3b PTE[0] to BO2 phys addres + 0x1000 (GPU)
+ *
+ * GPU bypass
+ * ~~~~~~~~~~
+ *
+ * In the above example the steps using the GPU can be converted to CPU if the
+ * bind can be done immediately (all in-fences satisfied, VM dma-resv kernel
+ * slot is idle).
+ *
+ * Address space
+ * -------------
+ *
+ * Depending on platform either 48 or 57 bits of address space is supported.
+ *
+ * Page sizes
+ * ----------
+ *
+ * The minimum page size is either 4k or 64k depending on platform and memory
+ * placement (sysmem vs. VRAM). We enforce that binds must be aligned to the
+ * minimum page size.
+ *
+ * Larger pages (2M or 1GB) can be used for BOs in VRAM, the BO physical address
+ * is aligned to the larger pages size, and VA is aligned to the larger page
+ * size. Larger pages for userptrs / BOs in sysmem should be possible but is not
+ * yet implemented.
+ *
+ * Sync error handling mode
+ * ------------------------
+ *
+ * In both modes during the bind IOCTL the user input is validated. In sync
+ * error handling mode the newly bound BO is validated (potentially moved back
+ * to a region of memory where is can be used), page tables are updated by the
+ * CPU and the job to do the GPU binds is created in the IOCTL itself. This step
+ * can fail due to memory pressure. The user can recover by freeing memory and
+ * trying this operation again.
+ *
+ * Async error handling mode
+ * -------------------------
+ *
+ * In async error handling the step of validating the BO, updating page tables,
+ * and generating a job are deferred to an async worker. As this step can now
+ * fail after the IOCTL has reported success we need an error handling flow for
+ * which the user can recover from.
+ *
+ * The solution is for a user to register a user address with the VM which the
+ * VM uses to report errors to. The ufence wait interface can be used to wait on
+ * a VM going into an error state. Once an error is reported the VM's async
+ * worker is paused. While the VM's async worker is paused sync,
+ * DRM_XE_VM_BIND_OP_UNMAP operations are allowed (this can free memory). Once the
+ * uses believe the error state is fixed, the async worker can be resumed via
+ * XE_VM_BIND_OP_RESTART operation. When VM async bind work is restarted, the
+ * first operation processed is the operation that caused the original error.
+ *
+ * Bind queues / engines
+ * ---------------------
+ *
+ * Think of the case where we have two bind operations A + B and are submitted
+ * in that order. A has in fences while B has none. If using a single bind
+ * queue, B is now blocked on A's in fences even though it is ready to run. This
+ * example is a real use case for VK sparse binding. We work around this
+ * limitation by implementing bind engines.
+ *
+ * In the bind IOCTL the user can optionally pass in an engine ID which must map
+ * to an engine which is of the special class DRM_XE_ENGINE_CLASS_VM_BIND.
+ * Underneath this is a really virtual engine that can run on any of the copy
+ * hardware engines. The job(s) created each IOCTL are inserted into this
+ * engine's ring. In the example above if A and B have different bind engines B
+ * is free to pass A. If the engine ID field is omitted, the default bind queue
+ * for the VM is used.
+ *
+ * TODO: Explain race in issue 41 and how we solve it
+ *
+ * Array of bind operations
+ * ------------------------
+ *
+ * The uAPI allows multiple binds operations to be passed in via a user array,
+ * of struct drm_xe_vm_bind_op, in a single VM bind IOCTL. This interface
+ * matches the VK sparse binding API. The implementation is rather simple, parse
+ * the array into a list of operations, pass the in fences to the first operation,
+ * and pass the out fences to the last operation. The ordered nature of a bind
+ * engine makes this possible.
+ *
+ * Munmap semantics for unbinds
+ * ----------------------------
+ *
+ * Munmap allows things like:
+ *
+ * .. code-block::
+ *
+ * 0x0000-0x2000 and 0x3000-0x5000 have mappings
+ * Munmap 0x1000-0x4000, results in mappings 0x0000-0x1000 and 0x4000-0x5000
+ *
+ * To support this semantic in the above example we decompose the above example
+ * into 4 operations:
+ *
+ * .. code-block::
+ *
+ * unbind 0x0000-0x2000
+ * unbind 0x3000-0x5000
+ * rebind 0x0000-0x1000
+ * rebind 0x4000-0x5000
+ *
+ * Why not just do a partial unbind of 0x1000-0x2000 and 0x3000-0x4000? This
+ * falls apart when using large pages at the edges and the unbind forces us to
+ * use a smaller page size. For simplity we always issue a set of unbinds
+ * unmapping anything in the range and at most 2 rebinds on the edges.
+ *
+ * Similar to an array of binds, in fences are passed to the first operation and
+ * out fences are signaled on the last operation.
+ *
+ * In this example there is a window of time where 0x0000-0x1000 and
+ * 0x4000-0x5000 are invalid but the user didn't ask for these addresses to be
+ * removed from the mapping. To work around this we treat any munmap style
+ * unbinds which require a rebind as a kernel operations (BO eviction or userptr
+ * invalidation). The first operation waits on the VM's
+ * DMA_RESV_USAGE_PREEMPT_FENCE slots (waits for all pending jobs on VM to
+ * complete / triggers preempt fences) and the last operation is installed in
+ * the VM's DMA_RESV_USAGE_KERNEL slot (blocks future jobs / resume compute mode
+ * VM). The caveat is all dma-resv slots must be updated atomically with respect
+ * to execs and compute mode rebind worker. To accomplish this, hold the
+ * vm->lock in write mode from the first operation until the last.
+ *
+ * Deferred binds in fault mode
+ * ----------------------------
+ *
+ * In a VM is in fault mode (TODO: link to fault mode), new bind operations that
+ * create mappings are by default are deferred to the page fault handler (first
+ * use). This behavior can be overriden by setting the flag
+ * DRM_XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
+ * immediately.
+ *
+ * User pointer
+ * ============
+ *
+ * User pointers are user allocated memory (malloc'd, mmap'd, etc..) for which the
+ * user wants to create a GPU mapping. Typically in other DRM drivers a dummy BO
+ * was created and then a binding was created. We bypass creating a dummy BO in
+ * XE and simply create a binding directly from the userptr.
+ *
+ * Invalidation
+ * ------------
+ *
+ * Since this a core kernel managed memory the kernel can move this memory
+ * whenever it wants. We register an invalidation MMU notifier to alert XE when
+ * a user poiter is about to move. The invalidation notifier needs to block
+ * until all pending users (jobs or compute mode engines) of the userptr are
+ * idle to ensure no faults. This done by waiting on all of VM's dma-resv slots.
+ *
+ * Rebinds
+ * -------
+ *
+ * Either the next exec (non-compute) or rebind worker (compute mode) will
+ * rebind the userptr. The invalidation MMU notifier kicks the rebind worker
+ * after the VM dma-resv wait if the VM is in compute mode.
+ *
+ * Compute mode
+ * ============
+ *
+ * A VM in compute mode enables long running workloads and ultra low latency
+ * submission (ULLS). ULLS is implemented via a continuously running batch +
+ * semaphores. This enables to the user to insert jump to new batch commands
+ * into the continuously running batch. In both cases these batches exceed the
+ * time a dma fence is allowed to exist for before signaling, as such dma fences
+ * are not used when a VM is in compute mode. User fences (TODO: link user fence
+ * doc) are used instead to signal operation's completion.
+ *
+ * Preempt fences
+ * --------------
+ *
+ * If the kernel decides to move memory around (either userptr invalidate, BO
+ * eviction, or mumap style unbind which results in a rebind) and a batch is
+ * running on an engine, that batch can fault or cause a memory corruption as
+ * page tables for the moved memory are no longer valid. To work around this we
+ * introduce the concept of preempt fences. When sw signaling is enabled on a
+ * preempt fence it tells the submission backend to kick that engine off the
+ * hardware and the preempt fence signals when the engine is off the hardware.
+ * Once all preempt fences are signaled for a VM the kernel can safely move the
+ * memory and kick the rebind worker which resumes all the engines execution.
+ *
+ * A preempt fence, for every engine using the VM, is installed the VM's
+ * dma-resv DMA_RESV_USAGE_PREEMPT_FENCE slot. The same preempt fence, for every
+ * engine using the VM, is also installed into the same dma-resv slot of every
+ * external BO mapped in the VM.
+ *
+ * Rebind worker
+ * -------------
+ *
+ * The rebind worker is very similar to an exec. It is resposible for rebinding
+ * evicted BOs or userptrs, waiting on those operations, installing new preempt
+ * fences, and finally resuming executing of engines in the VM.
+ *
+ * Flow
+ * ~~~~
+ *
+ * .. code-block::
+ *
+ * <----------------------------------------------------------------------|
+ * Check if VM is closed, if so bail out |
+ * Lock VM global lock in read mode |
+ * Pin userptrs (also finds userptr invalidated since last rebind worker) |
+ * Lock VM dma-resv and external BOs dma-resv |
+ * Validate BOs that have been evicted |
+ * Wait on and allocate new preempt fences for every engine using the VM |
+ * Rebind invalidated userptrs + evicted BOs |
+ * Wait on last rebind fence |
+ * Wait VM's DMA_RESV_USAGE_KERNEL dma-resv slot |
+ * Install preeempt fences and issue resume for every engine using the VM |
+ * Check if any userptrs invalidated since pin |
+ * Squash resume for all engines |
+ * Unlock all |
+ * Wait all VM's dma-resv slots |
+ * Retry ----------------------------------------------------------
+ * Release all engines waiting to resume
+ * Unlock all
+ *
+ * Timeslicing
+ * -----------
+ *
+ * In order to prevent an engine from continuously being kicked off the hardware
+ * and making no forward progress an engine has a period of time it allowed to
+ * run after resume before it can be kicked off again. This effectively gives
+ * each engine a timeslice.
+ *
+ * Handling multiple GTs
+ * =====================
+ *
+ * If a GT has slower access to some regions and the page table structure are in
+ * the slow region, the performance on that GT could adversely be affected. To
+ * work around this we allow a VM page tables to be shadowed in multiple GTs.
+ * When VM is created, a default bind engine and PT table structure are created
+ * on each GT.
+ *
+ * Binds can optionally pass in a mask of GTs where a mapping should be created,
+ * if this mask is zero then default to all the GTs where the VM has page
+ * tables.
+ *
+ * The implementation for this breaks down into a bunch for_each_gt loops in
+ * various places plus exporting a composite fence for multi-GT binds to the
+ * user.
+ *
+ * Fault mode (unified shared memory)
+ * ==================================
+ *
+ * A VM in fault mode can be enabled on devices that support page faults. If
+ * page faults are enabled, using dma fences can potentially induce a deadlock:
+ * A pending page fault can hold up the GPU work which holds up the dma fence
+ * signaling, and memory allocation is usually required to resolve a page
+ * fault, but memory allocation is not allowed to gate dma fence signaling. As
+ * such, dma fences are not allowed when VM is in fault mode. Because dma-fences
+ * are not allowed, long running workloads and ULLS are enabled on a faulting
+ * VM.
+ *
+ * Defered VM binds
+ * ----------------
+ *
+ * By default, on a faulting VM binds just allocate the VMA and the actual
+ * updating of the page tables is defered to the page fault handler. This
+ * behavior can be overridden by setting the flag DRM_XE_VM_BIND_FLAG_IMMEDIATE in
+ * the VM bind which will then do the bind immediately.
+ *
+ * Page fault handler
+ * ------------------
+ *
+ * Page faults are received in the G2H worker under the CT lock which is in the
+ * path of dma fences (no memory allocations are allowed, faults require memory
+ * allocations) thus we cannot process faults under the CT lock. Another issue
+ * is faults issue TLB invalidations which require G2H credits and we cannot
+ * allocate G2H credits in the G2H handlers without deadlocking. Lastly, we do
+ * not want the CT lock to be an outer lock of the VM global lock (VM global
+ * lock required to fault processing).
+ *
+ * To work around the above issue with processing faults in the G2H worker, we
+ * sink faults to a buffer which is large enough to sink all possible faults on
+ * the GT (1 per hardware engine) and kick a worker to process the faults. Since
+ * the page faults G2H are already received in a worker, kicking another worker
+ * adds more latency to a critical performance path. We add a fast path in the
+ * G2H irq handler which looks at first G2H and if it is a page fault we sink
+ * the fault to the buffer and kick the worker to process the fault. TLB
+ * invalidation responses are also in the critical path so these can also be
+ * processed in this fast path.
+ *
+ * Multiple buffers and workers are used and hashed over based on the ASID so
+ * faults from different VMs can be processed in parallel.
+ *
+ * The page fault handler itself is rather simple, flow is below.
+ *
+ * .. code-block::
+ *
+ * Lookup VM from ASID in page fault G2H
+ * Lock VM global lock in read mode
+ * Lookup VMA from address in page fault G2H
+ * Check if VMA is valid, if not bail
+ * Check if VMA's BO has backing store, if not allocate
+ * <----------------------------------------------------------------------|
+ * If userptr, pin pages |
+ * Lock VM & BO dma-resv locks |
+ * If atomic fault, migrate to VRAM, else validate BO location |
+ * Issue rebind |
+ * Wait on rebind to complete |
+ * Check if userptr invalidated since pin |
+ * Drop VM & BO dma-resv locks |
+ * Retry ----------------------------------------------------------
+ * Unlock all
+ * Issue blocking TLB invalidation |
+ * Send page fault response to GuC
+ *
+ * Access counters
+ * ---------------
+ *
+ * Access counters can be configured to trigger a G2H indicating the device is
+ * accessing VMAs in system memory frequently as hint to migrate those VMAs to
+ * VRAM.
+ *
+ * Same as the page fault handler, access counters G2H cannot be processed the
+ * G2H worker under the CT lock. Again we use a buffer to sink access counter
+ * G2H. Unlike page faults there is no upper bound so if the buffer is full we
+ * simply drop the G2H. Access counters are a best case optimization and it is
+ * safe to drop these unlike page faults.
+ *
+ * The access counter handler itself is rather simple flow is below.
+ *
+ * .. code-block::
+ *
+ * Lookup VM from ASID in access counter G2H
+ * Lock VM global lock in read mode
+ * Lookup VMA from address in access counter G2H
+ * If userptr, bail nothing to do
+ * Lock VM & BO dma-resv locks
+ * Issue migration to VRAM
+ * Unlock all
+ *
+ * Notice no rebind is issued in the access counter handler as the rebind will
+ * be issued on next page fault.
+ *
+ * Cavets with eviction / user pointer invalidation
+ * ------------------------------------------------
+ *
+ * In the case of eviction and user pointer invalidation on a faulting VM, there
+ * is no need to issue a rebind rather we just need to blow away the page tables
+ * for the VMAs and the page fault handler will rebind the VMAs when they fault.
+ * The cavet is to update / read the page table structure the VM global lock is
+ * neeeed. In both the case of eviction and user pointer invalidation locks are
+ * held which make acquiring the VM global lock impossible. To work around this
+ * every VMA maintains a list of leaf page table entries which should be written
+ * to zero to blow away the VMA's page tables. After writing zero to these
+ * entries a blocking TLB invalidate is issued. At this point it is safe for the
+ * kernel to move the VMA's memory around. This is a necessary lockless
+ * algorithm and is safe as leafs cannot be changed while either an eviction or
+ * userptr invalidation is occurring.
+ *
+ * Locking
+ * =======
+ *
+ * VM locking protects all of the core data paths (bind operations, execs,
+ * evictions, and compute mode rebind worker) in XE.
+ *
+ * Locks
+ * -----
+ *
+ * VM global lock (vm->lock) - rw semaphore lock. Outer most lock which protects
+ * the list of userptrs mapped in the VM, the list of engines using this VM, and
+ * the array of external BOs mapped in the VM. When adding or removing any of the
+ * aforemented state from the VM should acquire this lock in write mode. The VM
+ * bind path also acquires this lock in write while the exec / compute mode
+ * rebind worker acquire this lock in read mode.
+ *
+ * VM dma-resv lock (vm->ttm.base.resv->lock) - WW lock. Protects VM dma-resv
+ * slots which is shared with any private BO in the VM. Expected to be acquired
+ * during VM binds, execs, and compute mode rebind worker. This lock is also
+ * held when private BOs are being evicted.
+ *
+ * external BO dma-resv lock (bo->ttm.base.resv->lock) - WW lock. Protects
+ * external BO dma-resv slots. Expected to be acquired during VM binds (in
+ * addition to the VM dma-resv lock). All external BO dma-locks within a VM are
+ * expected to be acquired (in addition to the VM dma-resv lock) during execs
+ * and the compute mode rebind worker. This lock is also held when an external
+ * BO is being evicted.
+ *
+ * Putting it all together
+ * -----------------------
+ *
+ * 1. An exec and bind operation with the same VM can't be executing at the same
+ * time (vm->lock).
+ *
+ * 2. A compute mode rebind worker and bind operation with the same VM can't be
+ * executing at the same time (vm->lock).
+ *
+ * 3. We can't add / remove userptrs or external BOs to a VM while an exec with
+ * the same VM is executing (vm->lock).
+ *
+ * 4. We can't add / remove userptrs, external BOs, or engines to a VM while a
+ * compute mode rebind worker with the same VM is executing (vm->lock).
+ *
+ * 5. Evictions within a VM can't be happen while an exec with the same VM is
+ * executing (dma-resv locks).
+ *
+ * 6. Evictions within a VM can't be happen while a compute mode rebind worker
+ * with the same VM is executing (dma-resv locks).
+ *
+ * dma-resv usage
+ * ==============
+ *
+ * As previously stated to enforce the ordering of kernel ops (eviction, userptr
+ * invalidation, munmap style unbinds which result in a rebind), rebinds during
+ * execs, execs, and resumes in the rebind worker we use both the VMs and
+ * external BOs dma-resv slots. Let try to make this as clear as possible.
+ *
+ * Slot installation
+ * -----------------
+ *
+ * 1. Jobs from kernel ops install themselves into the DMA_RESV_USAGE_KERNEL
+ * slot of either an external BO or VM (depends on if kernel op is operating on
+ * an external or private BO)
+ *
+ * 2. In non-compute mode, jobs from execs install themselves into the
+ * DMA_RESV_USAGE_BOOKKEEP slot of the VM
+ *
+ * 3. In non-compute mode, jobs from execs install themselves into the
+ * DMA_RESV_USAGE_WRITE slot of all external BOs in the VM
+ *
+ * 4. Jobs from binds install themselves into the DMA_RESV_USAGE_BOOKKEEP slot
+ * of the VM
+ *
+ * 5. Jobs from binds install themselves into the DMA_RESV_USAGE_BOOKKEEP slot
+ * of the external BO (if the bind is to an external BO, this is addition to #4)
+ *
+ * 6. Every engine using a compute mode VM has a preempt fence in installed into
+ * the DMA_RESV_USAGE_PREEMPT_FENCE slot of the VM
+ *
+ * 7. Every engine using a compute mode VM has a preempt fence in installed into
+ * the DMA_RESV_USAGE_PREEMPT_FENCE slot of all the external BOs in the VM
+ *
+ * Slot waiting
+ * ------------
+ *
+ * 1. The exection of all jobs from kernel ops shall wait on all slots
+ * (DMA_RESV_USAGE_PREEMPT_FENCE) of either an external BO or VM (depends on if
+ * kernel op is operating on external or private BO)
+ *
+ * 2. In non-compute mode, the exection of all jobs from rebinds in execs shall
+ * wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO or VM
+ * (depends on if the rebind is operatiing on an external or private BO)
+ *
+ * 3. In non-compute mode, the exection of all jobs from execs shall wait on the
+ * last rebind job
+ *
+ * 4. In compute mode, the exection of all jobs from rebinds in the rebind
+ * worker shall wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO
+ * or VM (depends on if rebind is operating on external or private BO)
+ *
+ * 5. In compute mode, resumes in rebind worker shall wait on last rebind fence
+ *
+ * 6. In compute mode, resumes in rebind worker shall wait on the
+ * DMA_RESV_USAGE_KERNEL slot of the VM
+ *
+ * Putting it all together
+ * -----------------------
+ *
+ * 1. New jobs from kernel ops are blocked behind any existing jobs from
+ * non-compute mode execs
+ *
+ * 2. New jobs from non-compute mode execs are blocked behind any existing jobs
+ * from kernel ops and rebinds
+ *
+ * 3. New jobs from kernel ops are blocked behind all preempt fences signaling in
+ * compute mode
+ *
+ * 4. Compute mode engine resumes are blocked behind any existing jobs from
+ * kernel ops and rebinds
+ *
+ * Future work
+ * ===========
+ *
+ * Support large pages for sysmem and userptr.
+ *
+ * Update page faults to handle BOs are page level grainularity (e.g. part of BO
+ * could be in system memory while another part could be in VRAM).
+ *
+ * Page fault handler likely we be optimized a bit more (e.g. Rebinds always
+ * wait on the dma-resv kernel slots of VM or BO, technically we only have to
+ * wait the BO moving. If using a job to do the rebind, we could not block in
+ * the page fault handler rather attach a callback to fence of the rebind job to
+ * signal page fault complete. Our handling of short circuting for atomic faults
+ * for bound VMAs could be better. etc...). We can tune all of this once we have
+ * benchmarks / performance number from workloads up and running.
+ */
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
new file mode 100644
index 000000000..4027da3a1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_VM_TYPES_H_
+#define _XE_VM_TYPES_H_
+
+#include <drm/drm_gpuvm.h>
+
+#include <linux/dma-resv.h>
+#include <linux/kref.h>
+#include <linux/mmu_notifier.h>
+#include <linux/scatterlist.h>
+
+#include "xe_device_types.h"
+#include "xe_pt_types.h"
+#include "xe_range_fence.h"
+
+struct xe_bo;
+struct xe_sync_entry;
+struct xe_user_fence;
+struct xe_vm;
+
+#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
+#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
+#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
+#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
+#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
+#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
+#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
+#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
+
+/** struct xe_userptr - User pointer */
+struct xe_userptr {
+ /** @invalidate_link: Link for the vm::userptr.invalidated list */
+ struct list_head invalidate_link;
+ /** @userptr: link into VM repin list if userptr. */
+ struct list_head repin_link;
+ /**
+ * @notifier: MMU notifier for user pointer (invalidation call back)
+ */
+ struct mmu_interval_notifier notifier;
+ /** @sgt: storage for a scatter gather table */
+ struct sg_table sgt;
+ /** @sg: allocated scatter gather table */
+ struct sg_table *sg;
+ /** @notifier_seq: notifier sequence number */
+ unsigned long notifier_seq;
+ /**
+ * @initial_bind: user pointer has been bound at least once.
+ * write: vm->userptr.notifier_lock in read mode and vm->resv held.
+ * read: vm->userptr.notifier_lock in write mode or vm->resv held.
+ */
+ bool initial_bind;
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+ u32 divisor;
+#endif
+};
+
+struct xe_vma {
+ /** @gpuva: Base GPUVA object */
+ struct drm_gpuva gpuva;
+
+ /**
+ * @combined_links: links into lists which are mutually exclusive.
+ * Locking: vm lock in write mode OR vm lock in read mode and the vm's
+ * resv.
+ */
+ union {
+ /** @rebind: link into VM if this VMA needs rebinding. */
+ struct list_head rebind;
+ /** @destroy: link to contested list when VM is being closed. */
+ struct list_head destroy;
+ } combined_links;
+
+ union {
+ /** @destroy_cb: callback to destroy VMA when unbind job is done */
+ struct dma_fence_cb destroy_cb;
+ /** @destroy_work: worker to destroy this BO */
+ struct work_struct destroy_work;
+ };
+
+ /** @tile_invalidated: VMA has been invalidated */
+ u8 tile_invalidated;
+
+ /** @tile_mask: Tile mask of where to create binding for this VMA */
+ u8 tile_mask;
+
+ /**
+ * @tile_present: GT mask of binding are present for this VMA.
+ * protected by vm->lock, vm->resv and for userptrs,
+ * vm->userptr.notifier_lock for writing. Needs either for reading,
+ * but if reading is done under the vm->lock only, it needs to be held
+ * in write mode.
+ */
+ u8 tile_present;
+
+ /**
+ * @pat_index: The pat index to use when encoding the PTEs for this vma.
+ */
+ u16 pat_index;
+
+ /**
+ * @ufence: The user fence that was provided with MAP.
+ * Needs to be signalled before UNMAP can be processed.
+ */
+ struct xe_user_fence *ufence;
+};
+
+/**
+ * struct xe_userptr_vma - A userptr vma subclass
+ * @vma: The vma.
+ * @userptr: Additional userptr information.
+ */
+struct xe_userptr_vma {
+ struct xe_vma vma;
+ struct xe_userptr userptr;
+};
+
+struct xe_device;
+
+struct xe_vm {
+ /** @gpuvm: base GPUVM used to track VMAs */
+ struct drm_gpuvm gpuvm;
+
+ struct xe_device *xe;
+
+ /* exec queue used for (un)binding vma's */
+ struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
+
+ /** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
+ struct ttm_lru_bulk_move lru_bulk_move;
+
+ u64 size;
+
+ struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
+ struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
+
+ /**
+ * @flags: flags for this VM, statically setup a creation time aside
+ * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
+ */
+#define XE_VM_FLAG_64K BIT(0)
+#define XE_VM_FLAG_LR_MODE BIT(1)
+#define XE_VM_FLAG_MIGRATION BIT(2)
+#define XE_VM_FLAG_SCRATCH_PAGE BIT(3)
+#define XE_VM_FLAG_FAULT_MODE BIT(4)
+#define XE_VM_FLAG_BANNED BIT(5)
+#define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(7, 6), flags)
+#define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(7, 6), (tile)->id)
+ unsigned long flags;
+
+ /** @composite_fence_ctx: context composite fence */
+ u64 composite_fence_ctx;
+ /** @composite_fence_seqno: seqno for composite fence */
+ u32 composite_fence_seqno;
+
+ /**
+ * @lock: outer most lock, protects objects of anything attached to this
+ * VM
+ */
+ struct rw_semaphore lock;
+
+ /**
+ * @rebind_list: list of VMAs that need rebinding. Protected by the
+ * vm->lock in write mode, OR (the vm->lock in read mode and the
+ * vm resv).
+ */
+ struct list_head rebind_list;
+
+ /**
+ * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
+ * from an irq context can be last put and the destroy needs to be able
+ * to sleep.
+ */
+ struct work_struct destroy_work;
+
+ /**
+ * @rftree: range fence tree to track updates to page table structure.
+ * Used to implement conflict tracking between independent bind engines.
+ */
+ struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
+
+ /** @async_ops: async VM operations (bind / unbinds) */
+ struct {
+ /** @list: list of pending async VM ops */
+ struct list_head pending;
+ /** @work: worker to execute async VM ops */
+ struct work_struct work;
+ /** @lock: protects list of pending async VM ops and fences */
+ spinlock_t lock;
+ /** @fence: fence state */
+ struct {
+ /** @context: context of async fence */
+ u64 context;
+ /** @seqno: seqno of async fence */
+ u32 seqno;
+ } fence;
+ /** @error: error state for async VM ops */
+ int error;
+ /**
+ * @munmap_rebind_inflight: an munmap style VM bind is in the
+ * middle of a set of ops which requires a rebind at the end.
+ */
+ bool munmap_rebind_inflight;
+ } async_ops;
+
+ const struct xe_pt_ops *pt_ops;
+
+ /** @userptr: user pointer state */
+ struct {
+ /**
+ * @userptr.repin_list: list of VMAs which are user pointers,
+ * and needs repinning. Protected by @lock.
+ */
+ struct list_head repin_list;
+ /**
+ * @notifier_lock: protects notifier in write mode and
+ * submission in read mode.
+ */
+ struct rw_semaphore notifier_lock;
+ /**
+ * @userptr.invalidated_lock: Protects the
+ * @userptr.invalidated list.
+ */
+ spinlock_t invalidated_lock;
+ /**
+ * @userptr.invalidated: List of invalidated userptrs, not yet
+ * picked
+ * up for revalidation. Protected from access with the
+ * @invalidated_lock. Removing items from the list
+ * additionally requires @lock in write mode, and adding
+ * items to the list requires the @userptr.notifer_lock in
+ * write mode.
+ */
+ struct list_head invalidated;
+ } userptr;
+
+ /** @preempt: preempt state */
+ struct {
+ /**
+ * @min_run_period_ms: The minimum run period before preempting
+ * an engine again
+ */
+ s64 min_run_period_ms;
+ /** @exec_queues: list of exec queues attached to this VM */
+ struct list_head exec_queues;
+ /** @num_exec_queues: number exec queues attached to this VM */
+ int num_exec_queues;
+ /**
+ * @rebind_deactivated: Whether rebind has been temporarily deactivated
+ * due to no work available. Protected by the vm resv.
+ */
+ bool rebind_deactivated;
+ /**
+ * @rebind_work: worker to rebind invalidated userptrs / evicted
+ * BOs
+ */
+ struct work_struct rebind_work;
+ } preempt;
+
+ /** @um: unified memory state */
+ struct {
+ /** @asid: address space ID, unique to each VM */
+ u32 asid;
+ /**
+ * @last_fault_vma: Last fault VMA, used for fast lookup when we
+ * get a flood of faults to the same VMA
+ */
+ struct xe_vma *last_fault_vma;
+ } usm;
+
+ /** @error_capture: allow to track errors */
+ struct {
+ /** @capture_once: capture only one error per VM */
+ bool capture_once;
+ } error_capture;
+
+ /**
+ * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
+ * protected by the vm resv.
+ */
+ u64 tlb_flush_seqno;
+ /** @batch_invalidate_tlb: Always invalidate TLB before batch start */
+ bool batch_invalidate_tlb;
+ /** @xef: XE file handle for tracking this VM's drm client */
+ struct xe_file *xef;
+};
+
+/** struct xe_vma_op_map - VMA map operation */
+struct xe_vma_op_map {
+ /** @vma: VMA to map */
+ struct xe_vma *vma;
+ /** @is_null: is NULL binding */
+ bool is_null;
+ /** @pat_index: The pat index to use for this operation. */
+ u16 pat_index;
+};
+
+/** struct xe_vma_op_remap - VMA remap operation */
+struct xe_vma_op_remap {
+ /** @prev: VMA preceding part of a split mapping */
+ struct xe_vma *prev;
+ /** @next: VMA subsequent part of a split mapping */
+ struct xe_vma *next;
+ /** @start: start of the VMA unmap */
+ u64 start;
+ /** @range: range of the VMA unmap */
+ u64 range;
+ /** @skip_prev: skip prev rebind */
+ bool skip_prev;
+ /** @skip_next: skip next rebind */
+ bool skip_next;
+ /** @unmap_done: unmap operation in done */
+ bool unmap_done;
+};
+
+/** struct xe_vma_op_prefetch - VMA prefetch operation */
+struct xe_vma_op_prefetch {
+ /** @region: memory region to prefetch to */
+ u32 region;
+};
+
+/** enum xe_vma_op_flags - flags for VMA operation */
+enum xe_vma_op_flags {
+ /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
+ XE_VMA_OP_FIRST = BIT(0),
+ /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
+ XE_VMA_OP_LAST = BIT(1),
+ /** @XE_VMA_OP_COMMITTED: VMA operation committed */
+ XE_VMA_OP_COMMITTED = BIT(2),
+ /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
+ XE_VMA_OP_PREV_COMMITTED = BIT(3),
+ /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
+ XE_VMA_OP_NEXT_COMMITTED = BIT(4),
+};
+
+/** struct xe_vma_op - VMA operation */
+struct xe_vma_op {
+ /** @base: GPUVA base operation */
+ struct drm_gpuva_op base;
+ /**
+ * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
+ * operations is processed
+ */
+ struct drm_gpuva_ops *ops;
+ /** @q: exec queue for this operation */
+ struct xe_exec_queue *q;
+ /**
+ * @syncs: syncs for this operation, only used on first and last
+ * operation
+ */
+ struct xe_sync_entry *syncs;
+ /** @num_syncs: number of syncs */
+ u32 num_syncs;
+ /** @link: async operation link */
+ struct list_head link;
+ /** @flags: operation flags */
+ enum xe_vma_op_flags flags;
+
+ union {
+ /** @map: VMA map operation specific data */
+ struct xe_vma_op_map map;
+ /** @remap: VMA remap operation specific data */
+ struct xe_vma_op_remap remap;
+ /** @prefetch: VMA prefetch operation specific data */
+ struct xe_vma_op_prefetch prefetch;
+ };
+};
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
new file mode 100644
index 000000000..5f61dd87c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_wa.h"
+
+#include <drm/drm_managed.h>
+#include <kunit/visibility.h>
+#include <linux/compiler_types.h>
+
+#include "generated/xe_wa_oob.h"
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_device_types.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_hw_engine_types.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+#include "xe_rtp.h"
+#include "xe_step.h"
+
+/**
+ * DOC: Hardware workarounds
+ *
+ * Hardware workarounds are register programming documented to be executed in
+ * the driver that fall outside of the normal programming sequences for a
+ * platform. There are some basic categories of workarounds, depending on
+ * how/when they are applied:
+ *
+ * - LRC workarounds: workarounds that touch registers that are
+ * saved/restored to/from the HW context image. The list is emitted (via Load
+ * Register Immediate commands) once when initializing the device and saved in
+ * the default context. That default context is then used on every context
+ * creation to have a "primed golden context", i.e. a context image that
+ * already contains the changes needed to all the registers.
+ *
+ * - Engine workarounds: the list of these WAs is applied whenever the specific
+ * engine is reset. It's also possible that a set of engine classes share a
+ * common power domain and they are reset together. This happens on some
+ * platforms with render and compute engines. In this case (at least) one of
+ * them need to keeep the workaround programming: the approach taken in the
+ * driver is to tie those workarounds to the first compute/render engine that
+ * is registered. When executing with GuC submission, engine resets are
+ * outside of kernel driver control, hence the list of registers involved in
+ * written once, on engine initialization, and then passed to GuC, that
+ * saves/restores their values before/after the reset takes place. See
+ * ``drivers/gpu/drm/xe/xe_guc_ads.c`` for reference.
+ *
+ * - GT workarounds: the list of these WAs is applied whenever these registers
+ * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
+ *
+ * - Register whitelist: some workarounds need to be implemented in userspace,
+ * but need to touch privileged registers. The whitelist in the kernel
+ * instructs the hardware to allow the access to happen. From the kernel side,
+ * this is just a special case of a MMIO workaround (as we write the list of
+ * these to/be-whitelisted registers to some special HW registers).
+ *
+ * - Workaround batchbuffers: buffers that get executed automatically by the
+ * hardware on every HW context restore. These buffers are created and
+ * programmed in the default context so the hardware always go through those
+ * programming sequences when switching contexts. The support for workaround
+ * batchbuffers is enabled these hardware mechanisms:
+ *
+ * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
+ * context, pointing the hardware to jump to that location when that offset
+ * is reached in the context restore. Workaround batchbuffer in the driver
+ * currently uses this mechanism for all platforms.
+ *
+ * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
+ * pointing the hardware to a buffer to continue executing after the
+ * engine registers are restored in a context restore sequence. This is
+ * currently not used in the driver.
+ *
+ * - Other/OOB: There are WAs that, due to their nature, cannot be applied from
+ * a central place. Those are peppered around the rest of the code, as needed.
+ * Workarounds related to the display IP are the main example.
+ *
+ * .. [1] Technically, some registers are powercontext saved & restored, so they
+ * survive a suspend/resume. In practice, writing them again is not too
+ * costly and simplifies things, so it's the approach taken in the driver.
+ *
+ * .. note::
+ * Hardware workarounds in xe work the same way as in i915, with the
+ * difference of how they are maintained in the code. In xe it uses the
+ * xe_rtp infrastructure so the workarounds can be kept in tables, following
+ * a more declarative approach rather than procedural.
+ */
+
+#undef XE_REG_MCR
+#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
+
+__diag_push();
+__diag_ignore_all("-Woverride-init", "Allow field overrides in table");
+
+static const struct xe_rtp_entry_sr gt_was[] = {
+ { XE_RTP_NAME("14011060649"),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(1200, 1255),
+ ENGINE_CLASS(VIDEO_DECODE),
+ FUNC(xe_rtp_match_even_instance)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("14011059788"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
+ XE_RTP_ACTIONS(SET(DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE))
+ },
+ { XE_RTP_NAME("14015795083"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1260)),
+ XE_RTP_ACTIONS(CLR(MISCCPCTL, DOP_CLOCK_GATE_RENDER_ENABLE))
+ },
+
+ /* DG1 */
+
+ { XE_RTP_NAME("1409420604"),
+ XE_RTP_RULES(PLATFORM(DG1)),
+ XE_RTP_ACTIONS(SET(SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("1408615072"),
+ XE_RTP_RULES(PLATFORM(DG1)),
+ XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE2_DIS))
+ },
+
+ /* DG2 */
+
+ { XE_RTP_NAME("16010515920"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10),
+ GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F18(0), ALNUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("22010523718"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10)),
+ XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, CG3DDISCFEG_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14011006942"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10)),
+ XE_RTP_ACTIONS(SET(SUBSLICE_UNIT_LEVEL_CLKGATE, DSS_ROUTER_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14012362059"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB))
+ },
+ { XE_RTP_NAME("14012362059"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB))
+ },
+ { XE_RTP_NAME("14010948348"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14011037102"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(UNSLCGCTL9444, LTCDD_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14011371254"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(XEHP_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14011431319"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(UNSLCGCTL9440,
+ GAMTLBOACS_CLKGATE_DIS |
+ GAMTLBVDBOX7_CLKGATE_DIS | GAMTLBVDBOX6_CLKGATE_DIS |
+ GAMTLBVDBOX5_CLKGATE_DIS | GAMTLBVDBOX4_CLKGATE_DIS |
+ GAMTLBVDBOX3_CLKGATE_DIS | GAMTLBVDBOX2_CLKGATE_DIS |
+ GAMTLBVDBOX1_CLKGATE_DIS | GAMTLBVDBOX0_CLKGATE_DIS |
+ GAMTLBKCR_CLKGATE_DIS | GAMTLBGUC_CLKGATE_DIS |
+ GAMTLBBLT_CLKGATE_DIS),
+ SET(UNSLCGCTL9444,
+ GAMTLBGFXA0_CLKGATE_DIS | GAMTLBGFXA1_CLKGATE_DIS |
+ GAMTLBCOMPA0_CLKGATE_DIS | GAMTLBCOMPA1_CLKGATE_DIS |
+ GAMTLBCOMPB0_CLKGATE_DIS | GAMTLBCOMPB1_CLKGATE_DIS |
+ GAMTLBCOMPC0_CLKGATE_DIS | GAMTLBCOMPC1_CLKGATE_DIS |
+ GAMTLBCOMPD0_CLKGATE_DIS | GAMTLBCOMPD1_CLKGATE_DIS |
+ GAMTLBMERT_CLKGATE_DIS |
+ GAMTLBVEBOX3_CLKGATE_DIS | GAMTLBVEBOX2_CLKGATE_DIS |
+ GAMTLBVEBOX1_CLKGATE_DIS | GAMTLBVEBOX0_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14010569222"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(UNSLICE_UNIT_LEVEL_CLKGATE, GAMEDIA_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14011028019"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(SSMCGCTL9530, RTFUNIT_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14010680813"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(XEHP_GAMSTLB_CTRL,
+ CONTROL_BLOCK_CLKGATE_DIS |
+ EGRESS_BLOCK_CLKGATE_DIS |
+ TAG_BLOCK_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14014830051"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(CLR(SARB_CHICKEN1, COMP_CKN_IN))
+ },
+ { XE_RTP_NAME("18018781329"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(RENDER_MOD_CTRL, FORCE_MISS_FTLB),
+ SET(COMP_MOD_CTRL, FORCE_MISS_FTLB),
+ SET(XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB),
+ SET(XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB))
+ },
+ { XE_RTP_NAME("1509235366"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(XEHP_GAMCNTRL_CTRL,
+ INVALIDATION_BROADCAST_MODE_DIS |
+ GLOBAL_INVALIDATION_MODE))
+ },
+ { XE_RTP_NAME("14010648519"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(XEHP_L3NODEARBCFG, XEHP_LNESPARE))
+ },
+
+ /* PVC */
+
+ { XE_RTP_NAME("18018781329"),
+ XE_RTP_RULES(PLATFORM(PVC)),
+ XE_RTP_ACTIONS(SET(RENDER_MOD_CTRL, FORCE_MISS_FTLB),
+ SET(COMP_MOD_CTRL, FORCE_MISS_FTLB),
+ SET(XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB),
+ SET(XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB))
+ },
+ { XE_RTP_NAME("16016694945"),
+ XE_RTP_RULES(PLATFORM(PVC)),
+ XE_RTP_ACTIONS(SET(XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC))
+ },
+
+ /* Xe_LPG */
+
+ { XE_RTP_NAME("14015795083"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(CLR(MISCCPCTL, DOP_CLOCK_GATE_RENDER_ENABLE))
+ },
+ { XE_RTP_NAME("14018575942"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_ACTIONS(SET(COMP_MOD_CTRL, FORCE_MISS_FTLB))
+ },
+ { XE_RTP_NAME("22016670082"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_ACTIONS(SET(SQCNT1, ENFORCE_RAR))
+ },
+
+ /* Xe_LPM+ */
+
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(1300),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("22016670082"),
+ XE_RTP_RULES(MEDIA_VERSION(1300)),
+ XE_RTP_ACTIONS(SET(XELPMP_SQCNT1, ENFORCE_RAR))
+ },
+
+ /* Xe2_LPG */
+
+ { XE_RTP_NAME("16020975621"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(XEHP_SLICE_UNIT_LEVEL_CLKGATE, SBEUNIT_CLKGATE_DIS))
+ },
+ { XE_RTP_NAME("14018157293"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(XEHPC_L3CLOS_MASK(0), ~0),
+ SET(XEHPC_L3CLOS_MASK(1), ~0),
+ SET(XEHPC_L3CLOS_MASK(2), ~0),
+ SET(XEHPC_L3CLOS_MASK(3), ~0))
+ },
+
+ /* Xe2_LPM */
+
+ { XE_RTP_NAME("14017421178"),
+ XE_RTP_RULES(MEDIA_VERSION(2000),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(2000),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("14019449301"),
+ XE_RTP_RULES(MEDIA_VERSION(2000), ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F08(0), CG3DDISHRS_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+
+ {}
+};
+
+static const struct xe_rtp_entry_sr engine_was[] = {
+ { XE_RTP_NAME("22010931296, 18011464164, 14010919138"),
+ XE_RTP_RULES(GRAPHICS_VERSION(1200), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(FF_THREAD_MODE(RENDER_RING_BASE),
+ FF_TESSELATION_DOP_GATE_DISABLE))
+ },
+ { XE_RTP_NAME("1409804808"),
+ XE_RTP_RULES(GRAPHICS_VERSION(1200),
+ ENGINE_CLASS(RENDER),
+ IS_INTEGRATED),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN2, PUSH_CONST_DEREF_HOLD_DIS))
+ },
+ { XE_RTP_NAME("14010229206, 1409085225"),
+ XE_RTP_RULES(GRAPHICS_VERSION(1200),
+ ENGINE_CLASS(RENDER),
+ IS_INTEGRATED),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH))
+ },
+ { XE_RTP_NAME("1606931601"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN2, DISABLE_EARLY_READ))
+ },
+ { XE_RTP_NAME("14010826681, 1606700617, 22010271021, 18019627453"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1255), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CS_DEBUG_MODE1(RENDER_RING_BASE),
+ FF_DOP_CLOCK_GATE_DISABLE))
+ },
+ { XE_RTP_NAME("1406941453"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(SAMPLER_MODE, ENABLE_SMALLPL))
+ },
+ { XE_RTP_NAME("FtrPerCtxtPreemptionGranularityControl"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1250), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(FF_SLICE_CS_CHICKEN1(RENDER_RING_BASE),
+ FFSC_PERCTX_PREEMPT_CTRL))
+ },
+
+ /* TGL */
+
+ { XE_RTP_NAME("1607297627, 1607030317, 1607186500"),
+ XE_RTP_RULES(PLATFORM(TIGERLAKE), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(RING_PSMI_CTL(RENDER_RING_BASE),
+ WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ RC_SEMA_IDLE_MSG_DISABLE))
+ },
+
+ /* RKL */
+
+ { XE_RTP_NAME("1607297627, 1607030317, 1607186500"),
+ XE_RTP_RULES(PLATFORM(ROCKETLAKE), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(RING_PSMI_CTL(RENDER_RING_BASE),
+ WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ RC_SEMA_IDLE_MSG_DISABLE))
+ },
+
+ /* ADL-P */
+
+ { XE_RTP_NAME("1607297627, 1607030317, 1607186500"),
+ XE_RTP_RULES(PLATFORM(ALDERLAKE_P), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(RING_PSMI_CTL(RENDER_RING_BASE),
+ WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ RC_SEMA_IDLE_MSG_DISABLE))
+ },
+
+ /* DG2 */
+
+ { XE_RTP_NAME("22013037850"),
+ XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW,
+ DISABLE_128B_EVICTION_COMMAND_UDW))
+ },
+ { XE_RTP_NAME("22014226127"),
+ XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE))
+ },
+ { XE_RTP_NAME("18017747507"),
+ XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(VFG_PREEMPTION_CHICKEN,
+ POLYGON_TRIFAN_LINELOOP_DISABLE))
+ },
+ { XE_RTP_NAME("22012826095, 22013059131"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(FIELD_SET(LSC_CHICKEN_BIT_0_UDW,
+ MAXREQS_PER_BANK,
+ REG_FIELD_PREP(MAXREQS_PER_BANK, 2)))
+ },
+ { XE_RTP_NAME("22012826095, 22013059131"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(FIELD_SET(LSC_CHICKEN_BIT_0_UDW,
+ MAXREQS_PER_BANK,
+ REG_FIELD_PREP(MAXREQS_PER_BANK, 2)))
+ },
+ { XE_RTP_NAME("22013059131"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, FORCE_1_SUB_MESSAGE_PER_FRAGMENT))
+ },
+ { XE_RTP_NAME("22013059131"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, FORCE_1_SUB_MESSAGE_PER_FRAGMENT))
+ },
+ { XE_RTP_NAME("14010918519"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW,
+ FORCE_SLM_FENCE_SCOPE_TO_TILE |
+ FORCE_UGM_FENCE_SCOPE_TO_TILE,
+ /*
+ * Ignore read back as it always returns 0 in these
+ * steps
+ */
+ .read_mask = 0))
+ },
+ { XE_RTP_NAME("14015227452"),
+ XE_RTP_RULES(PLATFORM(DG2),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE))
+ },
+ { XE_RTP_NAME("16015675438"),
+ XE_RTP_RULES(PLATFORM(DG2),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(FF_SLICE_CS_CHICKEN2(RENDER_RING_BASE),
+ PERF_FIX_BALANCING_CFE_DISABLE))
+ },
+ { XE_RTP_NAME("18028616096"),
+ XE_RTP_RULES(PLATFORM(DG2),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3))
+ },
+ { XE_RTP_NAME("16011620976, 22015475538"),
+ XE_RTP_RULES(PLATFORM(DG2),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8))
+ },
+ { XE_RTP_NAME("22012654132"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(CACHE_MODE_SS, ENABLE_PREFETCH_INTO_IC,
+ /*
+ * Register can't be read back for verification on
+ * DG2 due to Wa_14012342262
+ */
+ .read_mask = 0))
+ },
+ { XE_RTP_NAME("22012654132"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(CACHE_MODE_SS, ENABLE_PREFETCH_INTO_IC,
+ /*
+ * Register can't be read back for verification on
+ * DG2 due to Wa_14012342262
+ */
+ .read_mask = 0))
+ },
+ { XE_RTP_NAME("1509727124"),
+ XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(SAMPLER_MODE, SC_DISABLE_POWER_OPTIMIZATION_EBB))
+ },
+ { XE_RTP_NAME("22012856258"),
+ XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN2, DISABLE_READ_SUPPRESSION))
+ },
+ { XE_RTP_NAME("14013392000"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN2, ENABLE_LARGE_GRF_MODE))
+ },
+ { XE_RTP_NAME("14012419201"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
+ DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX))
+ },
+ { XE_RTP_NAME("14012419201"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
+ DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX))
+ },
+ { XE_RTP_NAME("1308578152"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
+ ENGINE_CLASS(RENDER),
+ FUNC(xe_rtp_match_first_gslice_fused_off)),
+ XE_RTP_ACTIONS(CLR(CS_DEBUG_MODE1(RENDER_RING_BASE),
+ REPLAY_MODE_GRANULARITY))
+ },
+ { XE_RTP_NAME("22010960976, 14013347512"),
+ XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(CLR(XEHP_HDC_CHICKEN0,
+ LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK))
+ },
+ { XE_RTP_NAME("1608949956, 14010198302"),
+ XE_RTP_RULES(PLATFORM(DG2), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN,
+ MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE))
+ },
+ { XE_RTP_NAME("22010430635"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4,
+ DISABLE_GRF_CLEAR))
+ },
+ { XE_RTP_NAME("14013202645"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(B0, C0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
+ },
+ { XE_RTP_NAME("14013202645"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
+ },
+ { XE_RTP_NAME("22012532006"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7,
+ DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA))
+ },
+ { XE_RTP_NAME("22012532006"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7,
+ DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA))
+ },
+ { XE_RTP_NAME("14015150844"),
+ XE_RTP_RULES(PLATFORM(DG2), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(XEHP_HDC_CHICKEN0, DIS_ATOMIC_CHAINING_TYPED_WRITES,
+ XE_RTP_NOCHECK))
+ },
+
+ /* PVC */
+
+ { XE_RTP_NAME("22014226127"),
+ XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE))
+ },
+ { XE_RTP_NAME("14015227452"),
+ XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE))
+ },
+ { XE_RTP_NAME("16015675438"),
+ XE_RTP_RULES(PLATFORM(PVC), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(FF_SLICE_CS_CHICKEN2(RENDER_RING_BASE),
+ PERF_FIX_BALANCING_CFE_DISABLE))
+ },
+ { XE_RTP_NAME("14014999345"),
+ XE_RTP_RULES(PLATFORM(PVC), ENGINE_CLASS(COMPUTE),
+ GRAPHICS_STEP(B0, C0)),
+ XE_RTP_ACTIONS(SET(CACHE_MODE_SS, DISABLE_ECC))
+ },
+
+ /* Xe_LPG */
+
+ { XE_RTP_NAME("14017856879"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN3, DIS_FIX_EOT1_FLUSH))
+ },
+ { XE_RTP_NAME("14015150844"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(XEHP_HDC_CHICKEN0, DIS_ATOMIC_CHAINING_TYPED_WRITES,
+ XE_RTP_NOCHECK))
+ },
+
+ /* Xe2_LPG */
+
+ { XE_RTP_NAME("18032247524"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, SEQUENTIAL_ACCESS_UPGRADE_DISABLE))
+ },
+ { XE_RTP_NAME("16018712365"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, XE2_ALLOC_DPA_STARVE_FIX_DIS))
+ },
+ { XE_RTP_NAME("14018957109"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN5, DISABLE_SAMPLE_G_PERFORMANCE))
+ },
+ { XE_RTP_NAME("16021540221"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH))
+ },
+ { XE_RTP_NAME("14019322943"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, TGM_WRITE_EOM_FORCE))
+ },
+ { XE_RTP_NAME("14018471104"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
+ },
+ { XE_RTP_NAME("16018737384"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
+ },
+ /*
+ * These two workarounds are the same, just applying to different
+ * engines. Although Wa_18032095049 (for the RCS) isn't required on
+ * all steppings, disabling these reports has no impact for our
+ * driver or the GuC, so we go ahead and treat it the same as
+ * Wa_16021639441 which does apply to all steppings.
+ */
+ { XE_RTP_NAME("18032095049, 16021639441"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004)),
+ XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
+ GHWSP_CSB_REPORT_DIS |
+ PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
+
+ {}
+};
+
+static const struct xe_rtp_entry_sr lrc_was[] = {
+ { XE_RTP_NAME("1409342910, 14010698770, 14010443199, 1408979724, 1409178076, 1409207793, 1409217633, 1409252684, 1409347922, 1409142259"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN3,
+ DISABLE_CPS_AWARE_COLOR_PIPE))
+ },
+ { XE_RTP_NAME("WaDisableGPGPUMidThreadPreemption"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
+ XE_RTP_ACTIONS(FIELD_SET(CS_CHICKEN1(RENDER_RING_BASE),
+ PREEMPT_GPGPU_LEVEL_MASK,
+ PREEMPT_GPGPU_THREAD_GROUP_LEVEL))
+ },
+ { XE_RTP_NAME("1806527549"),
+ XE_RTP_RULES(GRAPHICS_VERSION(1200)),
+ XE_RTP_ACTIONS(SET(HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE))
+ },
+ { XE_RTP_NAME("1606376872"),
+ XE_RTP_RULES(GRAPHICS_VERSION(1200)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC))
+ },
+
+ /* DG1 */
+
+ { XE_RTP_NAME("1409044764"),
+ XE_RTP_RULES(PLATFORM(DG1)),
+ XE_RTP_ACTIONS(CLR(COMMON_SLICE_CHICKEN3,
+ DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN))
+ },
+ { XE_RTP_NAME("22010493298"),
+ XE_RTP_RULES(PLATFORM(DG1)),
+ XE_RTP_ACTIONS(SET(HIZ_CHICKEN,
+ DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE))
+ },
+
+ /* DG2 */
+
+ { XE_RTP_NAME("16011186671"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(CLR(VFLSKPD, DIS_MULT_MISS_RD_SQUASH),
+ SET(VFLSKPD, DIS_OVER_FETCH_CACHE))
+ },
+ { XE_RTP_NAME("14010469329"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(XEHP_COMMON_SLICE_CHICKEN3,
+ XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE))
+ },
+ { XE_RTP_NAME("14010698770, 22010613112, 22010465075"),
+ XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(XEHP_COMMON_SLICE_CHICKEN3,
+ DISABLE_CPS_AWARE_COLOR_PIPE))
+ },
+ { XE_RTP_NAME("16013271637"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1,
+ MSC_MSAA_REODER_BUF_BYPASS_DISABLE))
+ },
+ { XE_RTP_NAME("14014947963"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(FIELD_SET(VF_PREEMPTION,
+ PREEMPTION_VERTEX_COUNT,
+ 0x4000))
+ },
+ { XE_RTP_NAME("18018764978"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_MODE2,
+ SCOREBOARD_STALL_FLUSH_CONTROL))
+ },
+ { XE_RTP_NAME("18019271663"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE))
+ },
+ { XE_RTP_NAME("14019877138"),
+ XE_RTP_RULES(PLATFORM(DG2)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
+ },
+
+ /* PVC */
+
+ { XE_RTP_NAME("16017236439"),
+ XE_RTP_RULES(PLATFORM(PVC), ENGINE_CLASS(COPY),
+ FUNC(xe_rtp_match_even_instance)),
+ XE_RTP_ACTIONS(SET(BCS_SWCTRL(0),
+ BCS_SWCTRL_DISABLE_256B,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE))),
+ },
+
+ /* Xe_LPG */
+
+ { XE_RTP_NAME("18019271663"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1270, 1271)),
+ XE_RTP_ACTIONS(SET(CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE))
+ },
+
+ /* Xe2_LPG */
+
+ { XE_RTP_NAME("16020518922"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(FF_MODE,
+ DIS_TE_AUTOSTRIP |
+ DIS_MESH_PARTIAL_AUTOSTRIP |
+ DIS_MESH_AUTOSTRIP),
+ SET(VFLSKPD,
+ DIS_PARTIAL_AUTOSTRIP |
+ DIS_AUTOSTRIP))
+ },
+ { XE_RTP_NAME("14019386621"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(VF_SCRATCHPAD, XE2_VFG_TED_CREDIT_INTERFACE_DISABLE))
+ },
+ { XE_RTP_NAME("14019877138"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
+ },
+ { XE_RTP_NAME("14020013138"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS))
+ },
+ { XE_RTP_NAME("14019988906"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
+ },
+
+ {}
+};
+
+static __maybe_unused const struct xe_rtp_entry oob_was[] = {
+#include <generated/xe_wa_oob.c>
+ {}
+};
+
+static_assert(ARRAY_SIZE(oob_was) - 1 == _XE_WA_OOB_COUNT);
+
+__diag_pop();
+
+/**
+ * xe_wa_process_oob - process OOB workaround table
+ * @gt: GT instance to process workarounds for
+ *
+ * Process OOB workaround table for this platform, marking in @gt the
+ * workarounds that are active.
+ */
+void xe_wa_process_oob(struct xe_gt *gt)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.oob,
+ ARRAY_SIZE(oob_was));
+ xe_rtp_process(&ctx, oob_was);
+}
+
+/**
+ * xe_wa_process_gt - process GT workaround table
+ * @gt: GT instance to process workarounds for
+ *
+ * Process GT workaround table for this platform, saving in @gt all the
+ * workarounds that need to be applied at the GT level.
+ */
+void xe_wa_process_gt(struct xe_gt *gt)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.gt,
+ ARRAY_SIZE(gt_was));
+ xe_rtp_process_to_sr(&ctx, gt_was, &gt->reg_sr);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_wa_process_gt);
+
+/**
+ * xe_wa_process_engine - process engine workaround table
+ * @hwe: engine instance to process workarounds for
+ *
+ * Process engine workaround table for this platform, saving in @hwe all the
+ * workarounds that need to be applied at the engine level that match this
+ * engine.
+ */
+void xe_wa_process_engine(struct xe_hw_engine *hwe)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.engine,
+ ARRAY_SIZE(engine_was));
+ xe_rtp_process_to_sr(&ctx, engine_was, &hwe->reg_sr);
+}
+
+/**
+ * xe_wa_process_lrc - process context workaround table
+ * @hwe: engine instance to process workarounds for
+ *
+ * Process context workaround table for this platform, saving in @hwe all the
+ * workarounds that need to be applied on context restore. These are workarounds
+ * touching registers that are part of the HW context image.
+ */
+void xe_wa_process_lrc(struct xe_hw_engine *hwe)
+{
+ struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
+
+ xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.lrc,
+ ARRAY_SIZE(lrc_was));
+ xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
+}
+
+/**
+ * xe_wa_init - initialize gt with workaround bookkeeping
+ * @gt: GT instance to initialize
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_wa_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ size_t n_oob, n_lrc, n_engine, n_gt, total;
+ unsigned long *p;
+
+ n_gt = BITS_TO_LONGS(ARRAY_SIZE(gt_was));
+ n_engine = BITS_TO_LONGS(ARRAY_SIZE(engine_was));
+ n_lrc = BITS_TO_LONGS(ARRAY_SIZE(lrc_was));
+ n_oob = BITS_TO_LONGS(ARRAY_SIZE(oob_was));
+ total = n_gt + n_engine + n_lrc + n_oob;
+
+ p = drmm_kzalloc(&xe->drm, sizeof(*p) * total, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ gt->wa_active.gt = p;
+ p += n_gt;
+ gt->wa_active.engine = p;
+ p += n_engine;
+ gt->wa_active.lrc = p;
+ p += n_lrc;
+ gt->wa_active.oob = p;
+
+ return 0;
+}
+
+void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "GT Workarounds\n");
+ for_each_set_bit(idx, gt->wa_active.gt, ARRAY_SIZE(gt_was))
+ drm_printf_indent(p, 1, "%s\n", gt_was[idx].name);
+
+ drm_printf(p, "\nEngine Workarounds\n");
+ for_each_set_bit(idx, gt->wa_active.engine, ARRAY_SIZE(engine_was))
+ drm_printf_indent(p, 1, "%s\n", engine_was[idx].name);
+
+ drm_printf(p, "\nLRC Workarounds\n");
+ for_each_set_bit(idx, gt->wa_active.lrc, ARRAY_SIZE(lrc_was))
+ drm_printf_indent(p, 1, "%s\n", lrc_was[idx].name);
+
+ drm_printf(p, "\nOOB Workarounds\n");
+ for_each_set_bit(idx, gt->wa_active.oob, ARRAY_SIZE(oob_was))
+ if (oob_was[idx].name)
+ drm_printf_indent(p, 1, "%s\n", oob_was[idx].name);
+}
+
+/*
+ * Apply tile (non-GT, non-display) workarounds. Think very carefully before
+ * adding anything to this function; most workarounds should be implemented
+ * elsewhere. The programming here is primarily for sgunit/soc workarounds,
+ * which are relatively rare. Since the registers these workarounds target are
+ * outside the GT, they should only need to be applied once at device
+ * probe/resume; they will not lose their values on any kind of GT or engine
+ * reset.
+ *
+ * TODO: We may want to move this over to xe_rtp in the future once we have
+ * enough workarounds to justify the work.
+ */
+void xe_wa_apply_tile_workarounds(struct xe_tile *tile)
+{
+ struct xe_gt *mmio = tile->primary_gt;
+
+ if (XE_WA(mmio, 22010954014))
+ xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
+}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
new file mode 100644
index 000000000..1b24d66f9
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_WA_
+#define _XE_WA_
+
+struct drm_printer;
+struct xe_gt;
+struct xe_hw_engine;
+struct xe_tile;
+
+int xe_wa_init(struct xe_gt *gt);
+void xe_wa_process_oob(struct xe_gt *gt);
+void xe_wa_process_gt(struct xe_gt *gt);
+void xe_wa_process_engine(struct xe_hw_engine *hwe);
+void xe_wa_process_lrc(struct xe_hw_engine *hwe);
+void xe_wa_apply_tile_workarounds(struct xe_tile *tile);
+
+void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe);
+void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
+
+/**
+ * XE_WA - Out-of-band workarounds, that don't fit the lifecycle any
+ * other more specific type
+ * @gt__: gt instance
+ * @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
+ */
+#define XE_WA(gt__, id__) test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
new file mode 100644
index 000000000..727bdc429
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -0,0 +1,24 @@
+22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
+16011759253 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)
+14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
+ PLATFORM(DG2)
+22011391025 PLATFORM(DG2)
+14012197797 PLATFORM(DG2), GRAPHICS_STEP(A0, B0)
+16011777198 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0)
+ SUBPLATFORM(DG2, G11), GRAPHICS_STEP(A0, B0)
+22012727170 SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, C0)
+ SUBPLATFORM(DG2, G11)
+22012727685 SUBPLATFORM(DG2, G11)
+16015675438 PLATFORM(PVC)
+ SUBPLATFORM(DG2, G10)
+ SUBPLATFORM(DG2, G12)
+18020744125 PLATFORM(PVC)
+1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0)
+1409600907 GRAPHICS_VERSION_RANGE(1200, 1250)
+14016763929 SUBPLATFORM(DG2, G10)
+ SUBPLATFORM(DG2, G12)
+16017236439 PLATFORM(PVC)
+22010954014 PLATFORM(DG2)
+14019821291 MEDIA_VERSION_RANGE(1300, 2000)
+14015076503 MEDIA_VERSION(1300)
+16020292621 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
new file mode 100644
index 000000000..a75eeba7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_wait_user_fence.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_utils.h>
+#include <drm/xe_drm.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_macros.h"
+#include "xe_exec_queue.h"
+
+static int do_compare(u64 addr, u64 value, u64 mask, u16 op)
+{
+ u64 rvalue;
+ int err;
+ bool passed;
+
+ err = copy_from_user(&rvalue, u64_to_user_ptr(addr), sizeof(rvalue));
+ if (err)
+ return -EFAULT;
+
+ switch (op) {
+ case DRM_XE_UFENCE_WAIT_OP_EQ:
+ passed = (rvalue & mask) == (value & mask);
+ break;
+ case DRM_XE_UFENCE_WAIT_OP_NEQ:
+ passed = (rvalue & mask) != (value & mask);
+ break;
+ case DRM_XE_UFENCE_WAIT_OP_GT:
+ passed = (rvalue & mask) > (value & mask);
+ break;
+ case DRM_XE_UFENCE_WAIT_OP_GTE:
+ passed = (rvalue & mask) >= (value & mask);
+ break;
+ case DRM_XE_UFENCE_WAIT_OP_LT:
+ passed = (rvalue & mask) < (value & mask);
+ break;
+ case DRM_XE_UFENCE_WAIT_OP_LTE:
+ passed = (rvalue & mask) <= (value & mask);
+ break;
+ default:
+ XE_WARN_ON("Not possible");
+ return -EINVAL;
+ }
+
+ return passed ? 0 : 1;
+}
+
+#define VALID_FLAGS DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
+#define MAX_OP DRM_XE_UFENCE_WAIT_OP_LTE
+
+static long to_jiffies_timeout(struct xe_device *xe,
+ struct drm_xe_wait_user_fence *args)
+{
+ unsigned long long t;
+ long timeout;
+
+ /*
+ * For negative timeout we want to wait "forever" by setting
+ * MAX_SCHEDULE_TIMEOUT. But we have to assign this value also
+ * to args->timeout to avoid being zeroed on the signal delivery
+ * (see arithmetics after wait).
+ */
+ if (args->timeout < 0) {
+ args->timeout = MAX_SCHEDULE_TIMEOUT;
+ return MAX_SCHEDULE_TIMEOUT;
+ }
+
+ if (args->timeout == 0)
+ return 0;
+
+ /*
+ * Save the timeout to an u64 variable because nsecs_to_jiffies
+ * might return a value that overflows s32 variable.
+ */
+ if (args->flags & DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)
+ t = drm_timeout_abs_to_jiffies(args->timeout);
+ else
+ t = nsecs_to_jiffies(args->timeout);
+
+ /*
+ * Anything greater then MAX_SCHEDULE_TIMEOUT is meaningless,
+ * also we don't want to cap it at MAX_SCHEDULE_TIMEOUT because
+ * apparently user doesn't mean to wait forever, otherwise the
+ * args->timeout should have been set to a negative value.
+ */
+ if (t > MAX_SCHEDULE_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT - 1;
+ else
+ timeout = t;
+
+ return timeout ?: 1;
+}
+
+int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ DEFINE_WAIT_FUNC(w_wait, woken_wake_function);
+ struct drm_xe_wait_user_fence *args = data;
+ struct xe_exec_queue *q = NULL;
+ u64 addr = args->addr;
+ int err = 0;
+ long timeout;
+ ktime_t start;
+
+ if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
+ XE_IOCTL_DBG(xe, args->pad2) ||
+ XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->flags & ~VALID_FLAGS))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->op > MAX_OP))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, addr & 0x7))
+ return -EINVAL;
+
+ if (args->exec_queue_id) {
+ q = xe_exec_queue_lookup(xef, args->exec_queue_id);
+ if (XE_IOCTL_DBG(xe, !q))
+ return -ENOENT;
+ }
+
+ timeout = to_jiffies_timeout(xe, args);
+
+ start = ktime_get();
+
+ add_wait_queue(&xe->ufence_wq, &w_wait);
+ for (;;) {
+ err = do_compare(addr, args->value, args->mask, args->op);
+ if (err <= 0)
+ break;
+
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ if (q) {
+ if (q->ops->reset_status(q)) {
+ drm_info(&xe->drm, "exec gueue reset detected\n");
+ err = -EIO;
+ break;
+ }
+ }
+
+ if (!timeout) {
+ err = -ETIME;
+ break;
+ }
+
+ timeout = wait_woken(&w_wait, TASK_INTERRUPTIBLE, timeout);
+ }
+ remove_wait_queue(&xe->ufence_wq, &w_wait);
+
+ if (!(args->flags & DRM_XE_UFENCE_WAIT_FLAG_ABSTIME)) {
+ args->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (args->timeout < 0)
+ args->timeout = 0;
+ }
+
+ if (!timeout && !(err < 0))
+ err = -ETIME;
+
+ if (q)
+ xe_exec_queue_put(q);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.h b/drivers/gpu/drm/xe/xe_wait_user_fence.h
new file mode 100644
index 000000000..0e268978f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_WAIT_USER_FENCE_H_
+#define _XE_WAIT_USER_FENCE_H_
+
+struct drm_device;
+struct drm_file;
+
+int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wopcm.c b/drivers/gpu/drm/xe/xe_wopcm.c
new file mode 100644
index 000000000..d3a99157e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_wopcm.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_wopcm.h"
+
+#include "regs/xe_guc_regs.h"
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_mmio.h"
+#include "xe_uc_fw.h"
+
+/**
+ * DOC: Write Once Protected Content Memory (WOPCM) Layout
+ *
+ * The layout of the WOPCM will be fixed after writing to GuC WOPCM size and
+ * offset registers whose values are calculated and determined by HuC/GuC
+ * firmware size and set of hardware requirements/restrictions as shown below:
+ *
+ * ::
+ *
+ * +=========> +====================+ <== WOPCM Top
+ * ^ | HW contexts RSVD |
+ * | +===> +====================+ <== GuC WOPCM Top
+ * | ^ | |
+ * | | | |
+ * | | | |
+ * | GuC | |
+ * | WOPCM | |
+ * | Size +--------------------+
+ * WOPCM | | GuC FW RSVD |
+ * | | +--------------------+
+ * | | | GuC Stack RSVD |
+ * | | +------------------- +
+ * | v | GuC WOPCM RSVD |
+ * | +===> +====================+ <== GuC WOPCM base
+ * | | WOPCM RSVD |
+ * | +------------------- + <== HuC Firmware Top
+ * v | HuC FW |
+ * +=========> +====================+ <== WOPCM Base
+ *
+ * GuC accessible WOPCM starts at GuC WOPCM base and ends at GuC WOPCM top.
+ * The top part of the WOPCM is reserved for hardware contexts (e.g. RC6
+ * context).
+ */
+
+/* Default WOPCM size is 2MB from Gen11, 1MB on previous platforms */
+/* FIXME: Larger size require for 2 tile PVC, do a proper probe sooner or later */
+#define DGFX_WOPCM_SIZE SZ_4M
+/* FIXME: Larger size require for MTL, do a proper probe sooner or later */
+#define MTL_WOPCM_SIZE SZ_4M
+#define WOPCM_SIZE SZ_2M
+
+#define MAX_WOPCM_SIZE SZ_8M
+
+/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
+#define WOPCM_RESERVED_SIZE SZ_16K
+
+/* 16KB reserved at the beginning of GuC WOPCM. */
+#define GUC_WOPCM_RESERVED SZ_16K
+/* 8KB from GUC_WOPCM_RESERVED is reserved for GuC stack. */
+#define GUC_WOPCM_STACK_RESERVED SZ_8K
+
+/* GuC WOPCM Offset value needs to be aligned to 16KB. */
+#define GUC_WOPCM_OFFSET_ALIGNMENT (1UL << GUC_WOPCM_OFFSET_SHIFT)
+
+/* 36KB WOPCM reserved at the end of WOPCM */
+#define WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K)
+
+static inline struct xe_gt *wopcm_to_gt(struct xe_wopcm *wopcm)
+{
+ return container_of(wopcm, struct xe_gt, uc.wopcm);
+}
+
+static inline struct xe_device *wopcm_to_xe(struct xe_wopcm *wopcm)
+{
+ return gt_to_xe(wopcm_to_gt(wopcm));
+}
+
+static u32 context_reserved_size(void)
+{
+ return WOPCM_HW_CTX_RESERVED;
+}
+
+static bool __check_layout(struct xe_device *xe, u32 wopcm_size,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 guc_fw_size, u32 huc_fw_size)
+{
+ const u32 ctx_rsvd = context_reserved_size();
+ u32 size;
+
+ size = wopcm_size - ctx_rsvd;
+ if (unlikely(guc_wopcm_base >= size ||
+ guc_wopcm_size > size - guc_wopcm_base)) {
+ drm_err(&xe->drm,
+ "WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
+ size / SZ_1K);
+ return false;
+ }
+
+ size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
+ if (unlikely(guc_wopcm_size < size)) {
+ drm_err(&xe->drm, "WOPCM: no space for %s: %uK < %uK\n",
+ xe_uc_fw_type_repr(XE_UC_FW_TYPE_GUC),
+ guc_wopcm_size / SZ_1K, size / SZ_1K);
+ return false;
+ }
+
+ size = huc_fw_size + WOPCM_RESERVED_SIZE;
+ if (unlikely(guc_wopcm_base < size)) {
+ drm_err(&xe->drm, "WOPCM: no space for %s: %uK < %uK\n",
+ xe_uc_fw_type_repr(XE_UC_FW_TYPE_HUC),
+ guc_wopcm_base / SZ_1K, size / SZ_1K);
+ return false;
+ }
+
+ return true;
+}
+
+static bool __wopcm_regs_locked(struct xe_gt *gt,
+ u32 *guc_wopcm_base, u32 *guc_wopcm_size)
+{
+ u32 reg_base = xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET);
+ u32 reg_size = xe_mmio_read32(gt, GUC_WOPCM_SIZE);
+
+ if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) ||
+ !(reg_base & GUC_WOPCM_OFFSET_VALID))
+ return false;
+
+ *guc_wopcm_base = reg_base & GUC_WOPCM_OFFSET_MASK;
+ *guc_wopcm_size = reg_size & GUC_WOPCM_SIZE_MASK;
+ return true;
+}
+
+static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt,
+ struct xe_wopcm *wopcm)
+{
+ u32 base = wopcm->guc.base;
+ u32 size = wopcm->guc.size;
+ u32 huc_agent = xe_uc_fw_is_available(&gt->uc.huc.fw) ? HUC_LOADING_AGENT_GUC : 0;
+ u32 mask;
+ int err;
+
+ XE_WARN_ON(!(base & GUC_WOPCM_OFFSET_MASK));
+ XE_WARN_ON(base & ~GUC_WOPCM_OFFSET_MASK);
+ XE_WARN_ON(!(size & GUC_WOPCM_SIZE_MASK));
+ XE_WARN_ON(size & ~GUC_WOPCM_SIZE_MASK);
+
+ mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
+ err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE, size, mask,
+ size | GUC_WOPCM_SIZE_LOCKED);
+ if (err)
+ goto err_out;
+
+ mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
+ err = xe_mmio_write32_and_verify(gt, DMA_GUC_WOPCM_OFFSET,
+ base | huc_agent, mask,
+ base | huc_agent |
+ GUC_WOPCM_OFFSET_VALID);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ drm_notice(&xe->drm, "Failed to init uC WOPCM registers!\n");
+ drm_notice(&xe->drm, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
+ DMA_GUC_WOPCM_OFFSET.addr,
+ xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET));
+ drm_notice(&xe->drm, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
+ GUC_WOPCM_SIZE.addr,
+ xe_mmio_read32(gt, GUC_WOPCM_SIZE));
+
+ return err;
+}
+
+u32 xe_wopcm_size(struct xe_device *xe)
+{
+ return IS_DGFX(xe) ? DGFX_WOPCM_SIZE :
+ xe->info.platform == XE_METEORLAKE ? MTL_WOPCM_SIZE :
+ WOPCM_SIZE;
+}
+
+/**
+ * xe_wopcm_init() - Initialize the WOPCM structure.
+ * @wopcm: pointer to xe_wopcm.
+ *
+ * This function will partition WOPCM space based on GuC and HuC firmware sizes
+ * and will allocate max remaining for use by GuC. This function will also
+ * enforce platform dependent hardware restrictions on GuC WOPCM offset and
+ * size. It will fail the WOPCM init if any of these checks fail, so that the
+ * following WOPCM registers setup and GuC firmware uploading would be aborted.
+ */
+int xe_wopcm_init(struct xe_wopcm *wopcm)
+{
+ struct xe_device *xe = wopcm_to_xe(wopcm);
+ struct xe_gt *gt = wopcm_to_gt(wopcm);
+ u32 guc_fw_size = xe_uc_fw_get_upload_size(&gt->uc.guc.fw);
+ u32 huc_fw_size = xe_uc_fw_get_upload_size(&gt->uc.huc.fw);
+ u32 ctx_rsvd = context_reserved_size();
+ u32 guc_wopcm_base;
+ u32 guc_wopcm_size;
+ bool locked;
+ int ret = 0;
+
+ if (!guc_fw_size)
+ return -EINVAL;
+
+ wopcm->size = xe_wopcm_size(xe);
+ drm_dbg(&xe->drm, "WOPCM: %uK\n", wopcm->size / SZ_1K);
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+ XE_WARN_ON(guc_fw_size >= wopcm->size);
+ XE_WARN_ON(huc_fw_size >= wopcm->size);
+ XE_WARN_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size);
+
+ locked = __wopcm_regs_locked(gt, &guc_wopcm_base, &guc_wopcm_size);
+ if (locked) {
+ drm_dbg(&xe->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+ /*
+ * When the GuC wopcm base and size are preprogrammed by
+ * BIOS/IFWI, check against the max allowed wopcm size to
+ * validate if the programmed values align to the wopcm layout.
+ */
+ wopcm->size = MAX_WOPCM_SIZE;
+
+ goto check;
+ }
+
+ /*
+ * Aligned value of guc_wopcm_base will determine available WOPCM space
+ * for HuC firmware and mandatory reserved area.
+ */
+ guc_wopcm_base = huc_fw_size + WOPCM_RESERVED_SIZE;
+ guc_wopcm_base = ALIGN(guc_wopcm_base, GUC_WOPCM_OFFSET_ALIGNMENT);
+
+ /*
+ * Need to clamp guc_wopcm_base now to make sure the following math is
+ * correct. Formal check of whole WOPCM layout will be done below.
+ */
+ guc_wopcm_base = min(guc_wopcm_base, wopcm->size - ctx_rsvd);
+
+ /* Aligned remainings of usable WOPCM space can be assigned to GuC. */
+ guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
+ guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
+
+ drm_dbg(&xe->drm, "Calculated GuC WOPCM [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+
+check:
+ if (__check_layout(xe, wopcm->size, guc_wopcm_base, guc_wopcm_size,
+ guc_fw_size, huc_fw_size)) {
+ wopcm->guc.base = guc_wopcm_base;
+ wopcm->guc.size = guc_wopcm_size;
+ XE_WARN_ON(!wopcm->guc.base);
+ XE_WARN_ON(!wopcm->guc.size);
+ } else {
+ drm_notice(&xe->drm, "Unsuccessful WOPCM partitioning\n");
+ return -E2BIG;
+ }
+
+ if (!locked)
+ ret = __wopcm_init_regs(xe, gt, wopcm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_wopcm.h b/drivers/gpu/drm/xe/xe_wopcm.h
new file mode 100644
index 000000000..0197a2824
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_wopcm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_WOPCM_H_
+#define _XE_WOPCM_H_
+
+#include "xe_wopcm_types.h"
+
+struct xe_device;
+
+int xe_wopcm_init(struct xe_wopcm *wopcm);
+u32 xe_wopcm_size(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wopcm_types.h b/drivers/gpu/drm/xe/xe_wopcm_types.h
new file mode 100644
index 000000000..486d850c4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_wopcm_types.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_WOPCM_TYPES_H_
+#define _XE_WOPCM_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * struct xe_wopcm - Overall WOPCM info and WOPCM regions.
+ */
+struct xe_wopcm {
+ /** @size: Size of overall WOPCM */
+ u32 size;
+ /** @guc: GuC WOPCM Region info */
+ struct {
+ /** @base: GuC WOPCM base which is offset from WOPCM base */
+ u32 base;
+ /** @size: Size of the GuC WOPCM region */
+ u32 size;
+ } guc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index a7f8611be..db3bb4afb 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -27,7 +27,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index 7d98ae1a8..c3e900250 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -271,7 +271,7 @@ static void hdlc_rx_frame(struct gb_beagleplay *bg)
}
}
-static int hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
+static ssize_t hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
{
size_t i;
u8 c;
@@ -331,7 +331,8 @@ static void hdlc_deinit(struct gb_beagleplay *bg)
flush_work(&bg->tx_work);
}
-static int gb_tty_receive(struct serdev_device *sd, const unsigned char *data, size_t count)
+static ssize_t gb_tty_receive(struct serdev_device *sd, const u8 *data,
+ size_t count)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(sd);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4ce74af79..4c682c650 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -761,14 +761,15 @@ config HID_MULTITOUCH
module will be called hid-multitouch.
config HID_NINTENDO
- tristate "Nintendo Joy-Con and Pro Controller support"
+ tristate "Nintendo Joy-Con, NSO, and Pro Controller support"
depends on NEW_LEDS
depends on LEDS_CLASS
select POWER_SUPPLY
help
- Adds support for the Nintendo Switch Joy-Cons and Pro Controller.
+ Adds support for the Nintendo Switch Joy-Cons, NSO, Pro Controller.
All controllers support bluetooth, and the Pro Controller also supports
- its USB mode.
+ its USB mode. This also includes support for the Nintendo Switch Online
+ Controllers which include the Genesis, SNES, and N64 controllers.
To compile this driver as a module, choose M here: the
module will be called hid-nintendo.
@@ -779,9 +780,9 @@ config NINTENDO_FF
select INPUT_FF_MEMLESS
help
Say Y here if you have a Nintendo Switch controller and want to enable
- force feedback support for it. This works for both joy-cons and the pro
- controller. For the pro controller, both rumble motors can be controlled
- individually.
+ force feedback support for it. This works for both joy-cons, the pro
+ controller, and the NSO N64 controller. For the pro controller, both
+ rumble motors can be controlled individually.
config HID_NTI
tristate "NTI keyboard adapters"
@@ -1296,6 +1297,15 @@ config HID_ALPS
Say Y here if you have a Alps touchpads over i2c-hid or usbhid
and want support for its special functionalities.
+config HID_MCP2200
+ tristate "Microchip MCP2200 HID USB-to-GPIO bridge"
+ depends on USB_HID && GPIOLIB
+ help
+ Provides GPIO functionality over USB-HID through MCP2200 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-mcp2200.ko.
+
config HID_MCP2221
tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support"
depends on USB_HID && I2C
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 8a06d0f84..082a728ea 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MACALLY) += hid-macally.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MALTRON) += hid-maltron.o
+obj-$(CONFIG_HID_MCP2200) += hid-mcp2200.o
obj-$(CONFIG_HID_MCP2221) += hid-mcp2221.o
obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
obj-$(CONFIG_HID_MEGAWORLD_FF) += hid-megaworld.o
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index 2643bb14f..a1950bc6e 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -37,6 +37,11 @@ struct amd_mp2_sensor_info {
dma_addr_t dma_address;
};
+struct sfh_dev_status {
+ bool is_hpd_present;
+ bool is_als_present;
+};
+
struct amd_mp2_dev {
struct pci_dev *pdev;
struct amdtp_cl_data *cl_data;
@@ -47,6 +52,7 @@ struct amd_mp2_dev {
struct amd_input_data in_data;
/* mp2 active control status */
u32 mp2_acs;
+ struct sfh_dev_status dev_en;
};
struct amd_mp2_ops {
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
index 8a037de08..33fbdde8a 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
@@ -132,7 +132,7 @@ static void get_common_inputs(struct common_input_property *common, int report_i
common->event_type = HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM;
}
-static int float_to_int(u32 flt32_val)
+int amd_sfh_float_to_int(u32 flt32_val)
{
int fraction, shift, mantissa, sign, exp, zeropre;
@@ -201,9 +201,9 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
OFFSET_SENSOR_DATA_DEFAULT;
memcpy_fromio(&accel_data, sensoraddr, sizeof(struct sfh_accel_data));
get_common_inputs(&acc_input.common_property, report_id);
- acc_input.in_accel_x_value = float_to_int(accel_data.acceldata.x) / 100;
- acc_input.in_accel_y_value = float_to_int(accel_data.acceldata.y) / 100;
- acc_input.in_accel_z_value = float_to_int(accel_data.acceldata.z) / 100;
+ acc_input.in_accel_x_value = amd_sfh_float_to_int(accel_data.acceldata.x) / 100;
+ acc_input.in_accel_y_value = amd_sfh_float_to_int(accel_data.acceldata.y) / 100;
+ acc_input.in_accel_z_value = amd_sfh_float_to_int(accel_data.acceldata.z) / 100;
memcpy(input_report, &acc_input, sizeof(acc_input));
report_size = sizeof(acc_input);
break;
@@ -212,9 +212,9 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
OFFSET_SENSOR_DATA_DEFAULT;
memcpy_fromio(&gyro_data, sensoraddr, sizeof(struct sfh_gyro_data));
get_common_inputs(&gyro_input.common_property, report_id);
- gyro_input.in_angel_x_value = float_to_int(gyro_data.gyrodata.x) / 1000;
- gyro_input.in_angel_y_value = float_to_int(gyro_data.gyrodata.y) / 1000;
- gyro_input.in_angel_z_value = float_to_int(gyro_data.gyrodata.z) / 1000;
+ gyro_input.in_angel_x_value = amd_sfh_float_to_int(gyro_data.gyrodata.x) / 1000;
+ gyro_input.in_angel_y_value = amd_sfh_float_to_int(gyro_data.gyrodata.y) / 1000;
+ gyro_input.in_angel_z_value = amd_sfh_float_to_int(gyro_data.gyrodata.z) / 1000;
memcpy(input_report, &gyro_input, sizeof(gyro_input));
report_size = sizeof(gyro_input);
break;
@@ -223,9 +223,9 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
OFFSET_SENSOR_DATA_DEFAULT;
memcpy_fromio(&mag_data, sensoraddr, sizeof(struct sfh_mag_data));
get_common_inputs(&magno_input.common_property, report_id);
- magno_input.in_magno_x = float_to_int(mag_data.magdata.x) / 100;
- magno_input.in_magno_y = float_to_int(mag_data.magdata.y) / 100;
- magno_input.in_magno_z = float_to_int(mag_data.magdata.z) / 100;
+ magno_input.in_magno_x = amd_sfh_float_to_int(mag_data.magdata.x) / 100;
+ magno_input.in_magno_y = amd_sfh_float_to_int(mag_data.magdata.y) / 100;
+ magno_input.in_magno_z = amd_sfh_float_to_int(mag_data.magdata.z) / 100;
magno_input.in_magno_accuracy = mag_data.accuracy / 100;
memcpy(input_report, &magno_input, sizeof(magno_input));
report_size = sizeof(magno_input);
@@ -235,13 +235,15 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
OFFSET_SENSOR_DATA_DEFAULT;
memcpy_fromio(&als_data, sensoraddr, sizeof(struct sfh_als_data));
get_common_inputs(&als_input.common_property, report_id);
- als_input.illuminance_value = float_to_int(als_data.lux);
+ als_input.illuminance_value = amd_sfh_float_to_int(als_data.lux);
memcpy_fromio(&binfo, mp2->vsbase, sizeof(struct sfh_base_info));
if (binfo.sbase.s_prop[ALS_IDX].sf.feat & 0x2) {
als_input.light_color_temp = als_data.light_color_temp;
- als_input.chromaticity_x_value = float_to_int(als_data.chromaticity_x);
- als_input.chromaticity_y_value = float_to_int(als_data.chromaticity_y);
+ als_input.chromaticity_x_value =
+ amd_sfh_float_to_int(als_data.chromaticity_x);
+ als_input.chromaticity_y_value =
+ amd_sfh_float_to_int(als_data.chromaticity_y);
}
report_size = sizeof(als_input);
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index e9c6413af..9dbe6f4cb 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -73,6 +73,15 @@ static int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
int i, status;
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ switch (cl_data->sensor_idx[i]) {
+ case HPD_IDX:
+ privdata->dev_en.is_hpd_present = false;
+ break;
+ case ALS_IDX:
+ privdata->dev_en.is_als_present = false;
+ break;
+ }
+
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
privdata->mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
status = amd_sfh_wait_for_response
@@ -178,6 +187,14 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
rc = amdtp_hid_probe(i, cl_data);
if (rc)
goto cleanup;
+ switch (cl_data->sensor_idx[i]) {
+ case HPD_IDX:
+ privdata->dev_en.is_hpd_present = true;
+ break;
+ case ALS_IDX:
+ privdata->dev_en.is_als_present = true;
+ break;
+ }
}
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
@@ -259,6 +276,7 @@ static void amd_mp2_pci_remove(void *privdata)
{
struct amd_mp2_dev *mp2 = privdata;
+ sfh_deinit_emp2();
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
pci_intx(mp2->pdev, false);
@@ -311,12 +329,14 @@ int amd_sfh1_1_init(struct amd_mp2_dev *mp2)
rc = amd_sfh_irq_init(mp2);
if (rc) {
+ sfh_deinit_emp2();
dev_err(dev, "amd_sfh_irq_init failed\n");
return rc;
}
rc = amd_sfh1_1_hid_client_init(mp2);
if (rc) {
+ sfh_deinit_emp2();
dev_err(dev, "amd_sfh1_1_hid_client_init failed\n");
return rc;
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index 4f81ef2d4..ae36312bc 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -7,11 +7,14 @@
*
* Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
*/
+#include <linux/amd-pmf-io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iopoll.h>
#include "amd_sfh_interface.h"
+static struct amd_mp2_dev *emp2;
+
static int amd_sfh_wait_response(struct amd_mp2_dev *mp2, u8 sid, u32 cmd_id)
{
struct sfh_cmd_response cmd_resp;
@@ -73,7 +76,63 @@ static struct amd_mp2_ops amd_sfh_ops = {
.response = amd_sfh_wait_response,
};
+void sfh_deinit_emp2(void)
+{
+ emp2 = NULL;
+}
+
void sfh_interface_init(struct amd_mp2_dev *mp2)
{
mp2->mp2_ops = &amd_sfh_ops;
+ emp2 = mp2;
+}
+
+static int amd_sfh_hpd_info(u8 *user_present)
+{
+ struct hpd_status hpdstatus;
+
+ if (!user_present)
+ return -EINVAL;
+
+ if (!emp2 || !emp2->dev_en.is_hpd_present)
+ return -ENODEV;
+
+ hpdstatus.val = readl(emp2->mmio + AMD_C2P_MSG(4));
+ *user_present = hpdstatus.shpd.presence;
+
+ return 0;
+}
+
+static int amd_sfh_als_info(u32 *ambient_light)
+{
+ struct sfh_als_data als_data;
+ void __iomem *sensoraddr;
+
+ if (!ambient_light)
+ return -EINVAL;
+
+ if (!emp2 || !emp2->dev_en.is_als_present)
+ return -ENODEV;
+
+ sensoraddr = emp2->vsbase +
+ (ALS_IDX * SENSOR_DATA_MEM_SIZE_DEFAULT) +
+ OFFSET_SENSOR_DATA_DEFAULT;
+ memcpy_fromio(&als_data, sensoraddr, sizeof(struct sfh_als_data));
+ *ambient_light = amd_sfh_float_to_int(als_data.lux);
+
+ return 0;
+}
+
+int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op)
+{
+ if (sfh_info) {
+ switch (op) {
+ case MT_HPD:
+ return amd_sfh_hpd_info(&sfh_info->user_present);
+ case MT_ALS:
+ return amd_sfh_als_info(&sfh_info->ambient_light);
+ }
+ }
+ return -EINVAL;
}
+EXPORT_SYMBOL_GPL(amd_get_sfh_info);
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
index 656c3e95e..2c211d287 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
@@ -165,5 +165,7 @@ struct hpd_status {
};
void sfh_interface_init(struct amd_mp2_dev *mp2);
+void sfh_deinit_emp2(void);
void amd_sfh1_1_set_desc_ops(struct amd_mp2_ops *mp2_ops);
+int amd_sfh_float_to_int(u32 flt32_val);
#endif
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 7903c8638..470ae2c29 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -143,6 +143,9 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s
}
EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
+/* Disables missing prototype warnings */
+__bpf_kfunc_start_defs();
+
/**
* hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
*
@@ -152,7 +155,7 @@ EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
*
* @returns %NULL on error, an %__u8 memory pointer on success
*/
-noinline __u8 *
+__bpf_kfunc __u8 *
hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
{
struct hid_bpf_ctx_kern *ctx_kern;
@@ -167,6 +170,7 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr
return ctx_kern->data + offset;
}
+__bpf_kfunc_end_defs();
/*
* The following set contains all functions we agree BPF programs
@@ -274,6 +278,9 @@ static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct b
return fd;
}
+/* Disables missing prototype warnings */
+__bpf_kfunc_start_defs();
+
/**
* hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
*
@@ -286,7 +293,7 @@ static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct b
* is pinned to the BPF file system).
*/
/* called from syscall */
-noinline int
+__bpf_kfunc int
hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
{
struct hid_device *hdev;
@@ -338,7 +345,7 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
*
* @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
*/
-noinline struct hid_bpf_ctx *
+__bpf_kfunc struct hid_bpf_ctx *
hid_bpf_allocate_context(unsigned int hid_id)
{
struct hid_device *hdev;
@@ -371,7 +378,7 @@ hid_bpf_allocate_context(unsigned int hid_id)
* @ctx: the HID-BPF context to release
*
*/
-noinline void
+__bpf_kfunc void
hid_bpf_release_context(struct hid_bpf_ctx *ctx)
{
struct hid_bpf_ctx_kern *ctx_kern;
@@ -397,7 +404,7 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
*
* @returns %0 on success, a negative error code otherwise.
*/
-noinline int
+__bpf_kfunc int
hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
enum hid_report_type rtype, enum hid_class_request reqtype)
{
@@ -465,6 +472,7 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
kfree(dma_data);
return ret;
}
+__bpf_kfunc_end_defs();
/* our HID-BPF entrypoints */
BTF_SET8_START(hid_bpf_fmodret_ids)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e0181218a..de7a477d6 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2749,7 +2749,7 @@ static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct bus_type hid_bus_type = {
+const struct bus_type hid_bus_type = {
.name = "hid",
.dev_groups = hid_dev_groups,
.drv_groups = hid_drv_groups,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 72046039d..175b66800 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -298,6 +298,9 @@
#define USB_VENDOR_ID_CIDC 0x1677
+#define I2C_VENDOR_ID_CIRQUE 0x0488
+#define I2C_PRODUCT_ID_CIRQUE_1063 0x1063
+
#define USB_VENDOR_ID_CJTOUCH 0x24b8
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
@@ -427,6 +430,7 @@
#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V1 0x2BED
#define I2C_DEVICE_ID_HP_SPECTRE_X360_14T_EA100_V2 0x2BEE
#define I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG 0x2D02
+#define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM 0x2F81
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -916,6 +920,7 @@
#define USB_DEVICE_ID_PICK16F1454 0x0042
#define USB_DEVICE_ID_PICK16F1454_V2 0xf2f7
#define USB_DEVICE_ID_LUXAFOR 0xf372
+#define USB_DEVICE_ID_MCP2200 0x00df
#define USB_DEVICE_ID_MCP2221 0x00dd
#define USB_VENDOR_ID_MICROSOFT 0x045e
@@ -987,7 +992,10 @@
#define USB_DEVICE_ID_NINTENDO_JOYCONL 0x2006
#define USB_DEVICE_ID_NINTENDO_JOYCONR 0x2007
#define USB_DEVICE_ID_NINTENDO_PROCON 0x2009
-#define USB_DEVICE_ID_NINTENDO_CHRGGRIP 0x200E
+#define USB_DEVICE_ID_NINTENDO_CHRGGRIP 0x200e
+#define USB_DEVICE_ID_NINTENDO_SNESCON 0x2017
+#define USB_DEVICE_ID_NINTENDO_GENCON 0x201e
+#define USB_DEVICE_ID_NINTENDO_N64CON 0x2019
#define USB_VENDOR_ID_NOVATEK 0x0603
#define USB_DEVICE_ID_NOVATEK_PCT 0x0600
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index c8b20d44b..e03d300d2 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -411,6 +411,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15_EU0556NG),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM),
+ HID_BATTERY_QUIRK_AVOID_QUERY },
{}
};
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index e6a8b6d8e..3c3c497b6 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
}
break;
case REPORT_TYPE_MOUSE:
- workitem->reports_supported |= STD_MOUSE | HIDPP;
- if (djrcv_dev->type == recvr_type_mouse_only)
- workitem->reports_supported |= MULTIMEDIA;
+ workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
break;
}
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index c9c968d4b..a46ff4e8b 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -120,6 +120,9 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
* @scroll_jiffies: Time of last scroll motion.
* @touches: Most recent data for a touch, indexed by tracking ID.
* @tracking_ids: Mapping of current touch input data to @touches.
+ * @hdev: Pointer to the underlying HID device.
+ * @work: Workqueue to handle initialization retry for quirky devices.
+ * @battery_timer: Timer for obtaining battery level information.
*/
struct magicmouse_sc {
struct input_dev *input;
diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
new file mode 100644
index 000000000..bf57f7f6c
--- /dev/null
+++ b/drivers/hid/hid-mcp2200.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MCP2200 - Microchip USB to GPIO bridge
+ *
+ * Copyright (c) 2023, Johannes Roith <johannes@gnu-linux.rocks>
+ *
+ * Datasheet: https://ww1.microchip.com/downloads/en/DeviceDoc/22228A.pdf
+ * App Note for HID: https://ww1.microchip.com/downloads/en/DeviceDoc/93066A.pdf
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/hid.h>
+#include <linux/hidraw.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include "hid-ids.h"
+
+/* Commands codes in a raw output report */
+#define SET_CLEAR_OUTPUTS 0x08
+#define CONFIGURE 0x10
+#define READ_EE 0x20
+#define WRITE_EE 0x40
+#define READ_ALL 0x80
+
+/* MCP GPIO direction encoding */
+enum MCP_IO_DIR {
+ MCP2200_DIR_OUT = 0x00,
+ MCP2200_DIR_IN = 0x01,
+};
+
+/* Altternative pin assignments */
+#define TXLED 2
+#define RXLED 3
+#define USBCFG 6
+#define SSPND 7
+#define MCP_NGPIO 8
+
+/* CMD to set or clear a GPIO output */
+struct mcp_set_clear_outputs {
+ u8 cmd;
+ u8 dummys1[10];
+ u8 set_bmap;
+ u8 clear_bmap;
+ u8 dummys2[3];
+} __packed;
+
+/* CMD to configure the IOs */
+struct mcp_configure {
+ u8 cmd;
+ u8 dummys1[3];
+ u8 io_bmap;
+ u8 config_alt_pins;
+ u8 io_default_val_bmap;
+ u8 config_alt_options;
+ u8 baud_h;
+ u8 baud_l;
+ u8 dummys2[6];
+} __packed;
+
+/* CMD to read all parameters */
+struct mcp_read_all {
+ u8 cmd;
+ u8 dummys[15];
+} __packed;
+
+/* Response to the read all cmd */
+struct mcp_read_all_resp {
+ u8 cmd;
+ u8 eep_addr;
+ u8 dummy;
+ u8 eep_val;
+ u8 io_bmap;
+ u8 config_alt_pins;
+ u8 io_default_val_bmap;
+ u8 config_alt_options;
+ u8 baud_h;
+ u8 baud_l;
+ u8 io_port_val_bmap;
+ u8 dummys[5];
+} __packed;
+
+struct mcp2200 {
+ struct hid_device *hdev;
+ struct mutex lock;
+ struct completion wait_in_report;
+ u8 gpio_dir;
+ u8 gpio_val;
+ u8 gpio_inval;
+ u8 baud_h;
+ u8 baud_l;
+ u8 config_alt_pins;
+ u8 gpio_reset_val;
+ u8 config_alt_options;
+ int status;
+ struct gpio_chip gc;
+ u8 hid_report[16];
+};
+
+/* this executes the READ_ALL cmd */
+static int mcp_cmd_read_all(struct mcp2200 *mcp)
+{
+ struct mcp_read_all *read_all;
+ int len, t;
+
+ reinit_completion(&mcp->wait_in_report);
+
+ mutex_lock(&mcp->lock);
+
+ read_all = (struct mcp_read_all *) mcp->hid_report;
+ read_all->cmd = READ_ALL;
+ len = hid_hw_output_report(mcp->hdev, (u8 *) read_all,
+ sizeof(struct mcp_read_all));
+
+ mutex_unlock(&mcp->lock);
+
+ if (len != sizeof(struct mcp_read_all))
+ return -EINVAL;
+
+ t = wait_for_completion_timeout(&mcp->wait_in_report,
+ msecs_to_jiffies(4000));
+ if (!t)
+ return -ETIMEDOUT;
+
+ /* return status, negative value if wrong response was received */
+ return mcp->status;
+}
+
+static void mcp_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct mcp2200 *mcp = gpiochip_get_data(gc);
+ u8 value;
+ int status;
+ struct mcp_set_clear_outputs *cmd;
+
+ mutex_lock(&mcp->lock);
+ cmd = (struct mcp_set_clear_outputs *) mcp->hid_report;
+
+ value = mcp->gpio_val & ~*mask;
+ value |= (*mask & *bits);
+
+ cmd->cmd = SET_CLEAR_OUTPUTS;
+ cmd->set_bmap = value;
+ cmd->clear_bmap = ~(value);
+
+ status = hid_hw_output_report(mcp->hdev, (u8 *) cmd,
+ sizeof(struct mcp_set_clear_outputs));
+
+ if (status == sizeof(struct mcp_set_clear_outputs))
+ mcp->gpio_val = value;
+
+ mutex_unlock(&mcp->lock);
+}
+
+static void mcp_set(struct gpio_chip *gc, unsigned int gpio_nr, int value)
+{
+ unsigned long mask = 1 << gpio_nr;
+ unsigned long bmap_value = value << gpio_nr;
+
+ mcp_set_multiple(gc, &mask, &bmap_value);
+}
+
+static int mcp_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ u32 val;
+ struct mcp2200 *mcp = gpiochip_get_data(gc);
+ int status;
+
+ status = mcp_cmd_read_all(mcp);
+ if (status)
+ return status;
+
+ val = mcp->gpio_inval;
+ *bits = (val & *mask);
+ return 0;
+}
+
+static int mcp_get(struct gpio_chip *gc, unsigned int gpio_nr)
+{
+ unsigned long mask = 0, bits = 0;
+
+ mask = (1 << gpio_nr);
+ mcp_get_multiple(gc, &mask, &bits);
+ return bits > 0;
+}
+
+static int mcp_get_direction(struct gpio_chip *gc, unsigned int gpio_nr)
+{
+ struct mcp2200 *mcp = gpiochip_get_data(gc);
+
+ return (mcp->gpio_dir & (MCP2200_DIR_IN << gpio_nr))
+ ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
+}
+
+static int mcp_set_direction(struct gpio_chip *gc, unsigned int gpio_nr,
+ enum MCP_IO_DIR io_direction)
+{
+ struct mcp2200 *mcp = gpiochip_get_data(gc);
+ struct mcp_configure *conf;
+ int status;
+ /* after the configure cmd we will need to set the outputs again */
+ unsigned long mask = ~(mcp->gpio_dir); /* only set outputs */
+ unsigned long bits = mcp->gpio_val;
+ /* Offsets of alternative pins in config_alt_pins, 0 is not used */
+ u8 alt_pin_conf[8] = {SSPND, USBCFG, 0, 0, 0, 0, RXLED, TXLED};
+ u8 config_alt_pins = mcp->config_alt_pins;
+
+ /* Read in the reset baudrate first, we need it later */
+ status = mcp_cmd_read_all(mcp);
+ if (status != 0)
+ return status;
+
+ mutex_lock(&mcp->lock);
+ conf = (struct mcp_configure *) mcp->hid_report;
+
+ /* configure will reset the chip! */
+ conf->cmd = CONFIGURE;
+ conf->io_bmap = (mcp->gpio_dir & ~(1 << gpio_nr))
+ | (io_direction << gpio_nr);
+ /* Don't overwrite the reset parameters */
+ conf->baud_h = mcp->baud_h;
+ conf->baud_l = mcp->baud_l;
+ conf->config_alt_options = mcp->config_alt_options;
+ conf->io_default_val_bmap = mcp->gpio_reset_val;
+ /* Adjust alt. func if necessary */
+ if (alt_pin_conf[gpio_nr])
+ config_alt_pins &= ~(1 << alt_pin_conf[gpio_nr]);
+ conf->config_alt_pins = config_alt_pins;
+
+ status = hid_hw_output_report(mcp->hdev, (u8 *) conf,
+ sizeof(struct mcp_set_clear_outputs));
+
+ if (status == sizeof(struct mcp_set_clear_outputs)) {
+ mcp->gpio_dir = conf->io_bmap;
+ mcp->config_alt_pins = config_alt_pins;
+ } else {
+ mutex_unlock(&mcp->lock);
+ return -EIO;
+ }
+
+ mutex_unlock(&mcp->lock);
+
+ /* Configure CMD will clear all IOs -> rewrite them */
+ mcp_set_multiple(gc, &mask, &bits);
+ return 0;
+}
+
+static int mcp_direction_input(struct gpio_chip *gc, unsigned int gpio_nr)
+{
+ return mcp_set_direction(gc, gpio_nr, MCP2200_DIR_IN);
+}
+
+static int mcp_direction_output(struct gpio_chip *gc, unsigned int gpio_nr,
+ int value)
+{
+ int ret;
+ unsigned long mask, bmap_value;
+
+ mask = 1 << gpio_nr;
+ bmap_value = value << gpio_nr;
+
+ ret = mcp_set_direction(gc, gpio_nr, MCP2200_DIR_OUT);
+ if (!ret)
+ mcp_set_multiple(gc, &mask, &bmap_value);
+ return ret;
+}
+
+static const struct gpio_chip template_chip = {
+ .label = "mcp2200",
+ .owner = THIS_MODULE,
+ .get_direction = mcp_get_direction,
+ .direction_input = mcp_direction_input,
+ .direction_output = mcp_direction_output,
+ .set = mcp_set,
+ .set_multiple = mcp_set_multiple,
+ .get = mcp_get,
+ .get_multiple = mcp_get_multiple,
+ .base = -1,
+ .ngpio = MCP_NGPIO,
+ .can_sleep = true,
+};
+
+/*
+ * MCP2200 uses interrupt endpoint for input reports. This function
+ * is called by HID layer when it receives i/p report from mcp2200,
+ * which is actually a response to the previously sent command.
+ */
+static int mcp2200_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct mcp2200 *mcp = hid_get_drvdata(hdev);
+ struct mcp_read_all_resp *all_resp;
+
+ switch (data[0]) {
+ case READ_ALL:
+ all_resp = (struct mcp_read_all_resp *) data;
+ mcp->status = 0;
+ mcp->gpio_inval = all_resp->io_port_val_bmap;
+ mcp->baud_h = all_resp->baud_h;
+ mcp->baud_l = all_resp->baud_l;
+ mcp->gpio_reset_val = all_resp->io_default_val_bmap;
+ mcp->config_alt_pins = all_resp->config_alt_pins;
+ mcp->config_alt_options = all_resp->config_alt_options;
+ break;
+ default:
+ mcp->status = -EIO;
+ break;
+ }
+
+ complete(&mcp->wait_in_report);
+ return 0;
+}
+
+static int mcp2200_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct mcp2200 *mcp;
+
+ mcp = devm_kzalloc(&hdev->dev, sizeof(*mcp), GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "can't parse reports\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, 0);
+ if (ret) {
+ hid_err(hdev, "can't start hardware\n");
+ return ret;
+ }
+
+ hid_info(hdev, "USB HID v%x.%02x Device [%s] on %s\n", hdev->version >> 8,
+ hdev->version & 0xff, hdev->name, hdev->phys);
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "can't open device\n");
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ mutex_init(&mcp->lock);
+ init_completion(&mcp->wait_in_report);
+ hid_set_drvdata(hdev, mcp);
+ mcp->hdev = hdev;
+
+ mcp->gc = template_chip;
+ mcp->gc.parent = &hdev->dev;
+
+ ret = devm_gpiochip_add_data(&hdev->dev, &mcp->gc, mcp);
+ if (ret < 0) {
+ hid_err(hdev, "Unable to register gpiochip\n");
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mcp2200_remove(struct hid_device *hdev)
+{
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id mcp2200_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_MCP2200) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mcp2200_devices);
+
+static struct hid_driver mcp2200_driver = {
+ .name = "mcp2200",
+ .id_table = mcp2200_devices,
+ .probe = mcp2200_probe,
+ .remove = mcp2200_remove,
+ .raw_event = mcp2200_raw_event,
+};
+
+/* Register with HID core */
+module_hid_driver(mcp2200_driver);
+
+MODULE_AUTHOR("Johannes Roith <johannes@gnu-linux.rocks>");
+MODULE_DESCRIPTION("MCP2200 Microchip HID USB to GPIO bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index aef0785c9..f9cceaeff 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -49,6 +49,7 @@ enum {
MCP2221_I2C_MASK_ADDR_NACK = 0x40,
MCP2221_I2C_WRADDRL_SEND = 0x21,
MCP2221_I2C_ADDR_NACK = 0x25,
+ MCP2221_I2C_READ_PARTIAL = 0x54,
MCP2221_I2C_READ_COMPL = 0x55,
MCP2221_ALT_F_NOT_GPIOV = 0xEE,
MCP2221_ALT_F_NOT_GPIOD = 0xEF,
@@ -187,6 +188,25 @@ static int mcp_cancel_last_cmd(struct mcp2221 *mcp)
return mcp_send_data_req_status(mcp, mcp->txbuf, 8);
}
+/* Check if the last command succeeded or failed and return the result.
+ * If the command did fail, cancel that command which will free the i2c bus.
+ */
+static int mcp_chk_last_cmd_status_free_bus(struct mcp2221 *mcp)
+{
+ int ret;
+
+ ret = mcp_chk_last_cmd_status(mcp);
+ if (ret) {
+ /* The last command was a failure.
+ * Send a cancel which will also free the bus.
+ */
+ usleep_range(980, 1000);
+ mcp_cancel_last_cmd(mcp);
+ }
+
+ return ret;
+}
+
static int mcp_set_i2c_speed(struct mcp2221 *mcp)
{
int ret;
@@ -241,7 +261,7 @@ static int mcp_i2c_write(struct mcp2221 *mcp,
usleep_range(980, 1000);
if (last_status) {
- ret = mcp_chk_last_cmd_status(mcp);
+ ret = mcp_chk_last_cmd_status_free_bus(mcp);
if (ret)
return ret;
}
@@ -278,6 +298,7 @@ static int mcp_i2c_smbus_read(struct mcp2221 *mcp,
{
int ret;
u16 total_len;
+ int retries = 0;
mcp->txbuf[0] = type;
if (msg) {
@@ -301,20 +322,31 @@ static int mcp_i2c_smbus_read(struct mcp2221 *mcp,
mcp->rxbuf_idx = 0;
do {
+ /* Wait for the data to be read by the device */
+ usleep_range(980, 1000);
+
memset(mcp->txbuf, 0, 4);
mcp->txbuf[0] = MCP2221_I2C_GET_DATA;
ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
- if (ret)
- return ret;
-
- ret = mcp_chk_last_cmd_status(mcp);
- if (ret)
- return ret;
-
- usleep_range(980, 1000);
+ if (ret) {
+ if (retries < 5) {
+ /* The data wasn't ready to read.
+ * Wait a bit longer and try again.
+ */
+ usleep_range(90, 100);
+ retries++;
+ } else {
+ return ret;
+ }
+ } else {
+ retries = 0;
+ }
} while (mcp->rxbuf_idx < total_len);
+ usleep_range(980, 1000);
+ ret = mcp_chk_last_cmd_status_free_bus(mcp);
+
return ret;
}
@@ -328,11 +360,6 @@ static int mcp_i2c_xfer(struct i2c_adapter *adapter,
mutex_lock(&mcp->lock);
- /* Setting speed before every transaction is required for mcp2221 */
- ret = mcp_set_i2c_speed(mcp);
- if (ret)
- goto exit;
-
if (num == 1) {
if (msgs->flags & I2C_M_RD) {
ret = mcp_i2c_smbus_read(mcp, msgs, MCP2221_I2C_RD_DATA,
@@ -417,9 +444,7 @@ static int mcp_smbus_write(struct mcp2221 *mcp, u16 addr,
if (last_status) {
usleep_range(980, 1000);
- ret = mcp_chk_last_cmd_status(mcp);
- if (ret)
- return ret;
+ ret = mcp_chk_last_cmd_status_free_bus(mcp);
}
return ret;
@@ -437,10 +462,6 @@ static int mcp_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
mutex_lock(&mcp->lock);
- ret = mcp_set_i2c_speed(mcp);
- if (ret)
- goto exit;
-
switch (size) {
case I2C_SMBUS_QUICK:
@@ -791,7 +812,8 @@ static int mcp2221_raw_event(struct hid_device *hdev,
mcp->status = -EIO;
break;
}
- if (data[2] == MCP2221_I2C_READ_COMPL) {
+ if (data[2] == MCP2221_I2C_READ_COMPL ||
+ data[2] == MCP2221_I2C_READ_PARTIAL) {
buf = mcp->rxbuf;
memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]);
mcp->rxbuf_idx = mcp->rxbuf_idx + data[3];
@@ -1150,12 +1172,18 @@ static int mcp2221_probe(struct hid_device *hdev,
if (i2c_clk_freq < 50)
i2c_clk_freq = 50;
mcp->cur_i2c_clk_div = (12000000 / (i2c_clk_freq * 1000)) - 3;
+ ret = mcp_set_i2c_speed(mcp);
+ if (ret) {
+ hid_err(hdev, "can't set i2c speed: %d\n", ret);
+ return ret;
+ }
mcp->adapter.owner = THIS_MODULE;
mcp->adapter.class = I2C_CLASS_HWMON;
mcp->adapter.algo = &mcp_i2c_algo;
mcp->adapter.retries = 1;
mcp->adapter.dev.parent = &hdev->dev;
+ ACPI_COMPANION_SET(&mcp->adapter.dev, ACPI_COMPANION(hdev->dev.parent));
snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
"MCP2221 usb-i2c bridge");
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 997c3a1ad..ccc4032fb 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -3,6 +3,9 @@
* HID driver for Nintendo Switch Joy-Cons and Pro Controllers
*
* Copyright (c) 2019-2021 Daniel J. Ogorchock <djogorchock@gmail.com>
+ * Portions Copyright (c) 2020 Nadia Holmquist Pedersen <nadia@nhp.sh>
+ * Copyright (c) 2022 Emily Strickland <linux@emily.st>
+ * Copyright (c) 2023 Ryan McClelland <rymcclel@gmail.com>
*
* The following resources/projects were referenced for this driver:
* https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering
@@ -17,6 +20,9 @@
* This driver supports the Nintendo Switch Joy-Cons and Pro Controllers. The
* Pro Controllers can either be used over USB or Bluetooth.
*
+ * This driver also incorporates support for Nintendo Switch Online controllers
+ * for the NES, SNES, Sega Genesis, and N64.
+ *
* The driver will retrieve the factory calibration info from the controllers,
* so little to no user calibration should be required.
*
@@ -305,9 +311,14 @@ enum joycon_ctlr_state {
/* Controller type received as part of device info */
enum joycon_ctlr_type {
- JOYCON_CTLR_TYPE_JCL = 0x01,
- JOYCON_CTLR_TYPE_JCR = 0x02,
- JOYCON_CTLR_TYPE_PRO = 0x03,
+ JOYCON_CTLR_TYPE_JCL = 0x01,
+ JOYCON_CTLR_TYPE_JCR = 0x02,
+ JOYCON_CTLR_TYPE_PRO = 0x03,
+ JOYCON_CTLR_TYPE_NESL = 0x09,
+ JOYCON_CTLR_TYPE_NESR = 0x0A,
+ JOYCON_CTLR_TYPE_SNES = 0x0B,
+ JOYCON_CTLR_TYPE_GEN = 0x0D,
+ JOYCON_CTLR_TYPE_N64 = 0x0C,
};
struct joycon_stick_cal {
@@ -348,6 +359,137 @@ struct joycon_imu_cal {
#define JC_BTN_L BIT(22)
#define JC_BTN_ZL BIT(23)
+struct joycon_ctlr_button_mapping {
+ u32 code;
+ u32 bit;
+};
+
+/*
+ * D-pad is configured as buttons for the left Joy-Con only!
+ */
+static const struct joycon_ctlr_button_mapping left_joycon_button_mappings[] = {
+ { BTN_TL, JC_BTN_L, },
+ { BTN_TL2, JC_BTN_ZL, },
+ { BTN_SELECT, JC_BTN_MINUS, },
+ { BTN_THUMBL, JC_BTN_LSTICK, },
+ { BTN_DPAD_UP, JC_BTN_UP, },
+ { BTN_DPAD_DOWN, JC_BTN_DOWN, },
+ { BTN_DPAD_LEFT, JC_BTN_LEFT, },
+ { BTN_DPAD_RIGHT, JC_BTN_RIGHT, },
+ { BTN_Z, JC_BTN_CAP, },
+ { /* sentinel */ },
+};
+
+/*
+ * The unused *right*-side triggers become the SL/SR triggers for the *left*
+ * Joy-Con, if and only if we're not using a charging grip.
+ */
+static const struct joycon_ctlr_button_mapping left_joycon_s_button_mappings[] = {
+ { BTN_TR, JC_BTN_SL_L, },
+ { BTN_TR2, JC_BTN_SR_L, },
+ { /* sentinel */ },
+};
+
+static const struct joycon_ctlr_button_mapping right_joycon_button_mappings[] = {
+ { BTN_EAST, JC_BTN_A, },
+ { BTN_SOUTH, JC_BTN_B, },
+ { BTN_NORTH, JC_BTN_X, },
+ { BTN_WEST, JC_BTN_Y, },
+ { BTN_TR, JC_BTN_R, },
+ { BTN_TR2, JC_BTN_ZR, },
+ { BTN_START, JC_BTN_PLUS, },
+ { BTN_THUMBR, JC_BTN_RSTICK, },
+ { BTN_MODE, JC_BTN_HOME, },
+ { /* sentinel */ },
+};
+
+/*
+ * The unused *left*-side triggers become the SL/SR triggers for the *right*
+ * Joy-Con, if and only if we're not using a charging grip.
+ */
+static const struct joycon_ctlr_button_mapping right_joycon_s_button_mappings[] = {
+ { BTN_TL, JC_BTN_SL_R, },
+ { BTN_TL2, JC_BTN_SR_R, },
+ { /* sentinel */ },
+};
+
+static const struct joycon_ctlr_button_mapping procon_button_mappings[] = {
+ { BTN_EAST, JC_BTN_A, },
+ { BTN_SOUTH, JC_BTN_B, },
+ { BTN_NORTH, JC_BTN_X, },
+ { BTN_WEST, JC_BTN_Y, },
+ { BTN_TL, JC_BTN_L, },
+ { BTN_TR, JC_BTN_R, },
+ { BTN_TL2, JC_BTN_ZL, },
+ { BTN_TR2, JC_BTN_ZR, },
+ { BTN_SELECT, JC_BTN_MINUS, },
+ { BTN_START, JC_BTN_PLUS, },
+ { BTN_THUMBL, JC_BTN_LSTICK, },
+ { BTN_THUMBR, JC_BTN_RSTICK, },
+ { BTN_MODE, JC_BTN_HOME, },
+ { BTN_Z, JC_BTN_CAP, },
+ { /* sentinel */ },
+};
+
+static const struct joycon_ctlr_button_mapping nescon_button_mappings[] = {
+ { BTN_SOUTH, JC_BTN_A, },
+ { BTN_EAST, JC_BTN_B, },
+ { BTN_TL, JC_BTN_L, },
+ { BTN_TR, JC_BTN_R, },
+ { BTN_SELECT, JC_BTN_MINUS, },
+ { BTN_START, JC_BTN_PLUS, },
+ { /* sentinel */ },
+};
+
+static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = {
+ { BTN_EAST, JC_BTN_A, },
+ { BTN_SOUTH, JC_BTN_B, },
+ { BTN_NORTH, JC_BTN_X, },
+ { BTN_WEST, JC_BTN_Y, },
+ { BTN_TL, JC_BTN_L, },
+ { BTN_TR, JC_BTN_R, },
+ { BTN_TL2, JC_BTN_ZL, },
+ { BTN_TR2, JC_BTN_ZR, },
+ { BTN_SELECT, JC_BTN_MINUS, },
+ { BTN_START, JC_BTN_PLUS, },
+ { /* sentinel */ },
+};
+
+/*
+ * "A", "B", and "C" are mapped positionally, rather than by label (e.g., "A"
+ * gets assigned to BTN_EAST instead of BTN_A).
+ */
+static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
+ { BTN_SOUTH, JC_BTN_A, },
+ { BTN_EAST, JC_BTN_B, },
+ { BTN_WEST, JC_BTN_R, },
+ { BTN_SELECT, JC_BTN_ZR, },
+ { BTN_START, JC_BTN_PLUS, },
+ { BTN_MODE, JC_BTN_HOME, },
+ { BTN_Z, JC_BTN_CAP, },
+ { /* sentinel */ },
+};
+
+/*
+ * N64's C buttons get assigned to d-pad directions and registered as buttons.
+ */
+static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
+ { BTN_A, JC_BTN_A, },
+ { BTN_B, JC_BTN_B, },
+ { BTN_TL2, JC_BTN_ZL, }, /* Z */
+ { BTN_TL, JC_BTN_L, },
+ { BTN_TR, JC_BTN_R, },
+ { BTN_TR2, JC_BTN_LSTICK, }, /* ZR */
+ { BTN_START, JC_BTN_PLUS, },
+ { BTN_FORWARD, JC_BTN_Y, }, /* C UP */
+ { BTN_BACK, JC_BTN_ZR, }, /* C DOWN */
+ { BTN_LEFT, JC_BTN_X, }, /* C LEFT */
+ { BTN_RIGHT, JC_BTN_MINUS, }, /* C RIGHT */
+ { BTN_MODE, JC_BTN_HOME, },
+ { BTN_Z, JC_BTN_CAP, },
+ { /* sentinel */ },
+};
+
enum joycon_msg_type {
JOYCON_MSG_TYPE_NONE,
JOYCON_MSG_TYPE_USB,
@@ -506,13 +648,182 @@ struct joycon_ctlr {
/* Does this controller have inputs associated with left joycon? */
#define jc_type_has_left(ctlr) \
(ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCL || \
- ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO)
+ ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO || \
+ ctlr->ctlr_type == JOYCON_CTLR_TYPE_N64)
/* Does this controller have inputs associated with right joycon? */
#define jc_type_has_right(ctlr) \
(ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCR || \
ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO)
+
+/*
+ * Controller device helpers
+ *
+ * These look at the device ID known to the HID subsystem to identify a device,
+ * but take caution: some NSO devices lie about themselves (NES Joy-Cons and
+ * Sega Genesis controller). See type helpers below.
+ *
+ * These helpers are most useful early during the HID probe or in conjunction
+ * with the capability helpers below.
+ */
+static inline bool joycon_device_is_left_joycon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONL;
+}
+
+static inline bool joycon_device_is_right_joycon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONR;
+}
+
+static inline bool joycon_device_is_procon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_PROCON;
+}
+
+static inline bool joycon_device_is_chrggrip(struct joycon_ctlr *ctlr)
+{
+ return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_CHRGGRIP;
+}
+
+static inline bool joycon_device_is_snescon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_SNESCON;
+}
+
+static inline bool joycon_device_is_gencon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_GENCON;
+}
+
+static inline bool joycon_device_is_n64con(struct joycon_ctlr *ctlr)
+{
+ return ctlr->hdev->product == USB_DEVICE_ID_NINTENDO_N64CON;
+}
+
+static inline bool joycon_device_has_usb(struct joycon_ctlr *ctlr)
+{
+ return joycon_device_is_procon(ctlr) ||
+ joycon_device_is_chrggrip(ctlr) ||
+ joycon_device_is_snescon(ctlr) ||
+ joycon_device_is_gencon(ctlr) ||
+ joycon_device_is_n64con(ctlr);
+}
+
+/*
+ * Controller type helpers
+ *
+ * These are slightly different than the device-ID-based helpers above. They are
+ * generally more reliable, since they can distinguish between, e.g., Genesis
+ * versus SNES, or NES Joy-Cons versus regular Switch Joy-Cons. They're most
+ * useful for reporting available inputs. For other kinds of distinctions, see
+ * the capability helpers below.
+ *
+ * They have two major drawbacks: (1) they're not available until after we set
+ * the reporting method and then request the device info; (2) they can't
+ * distinguish all controllers (like the Charging Grip from the Pro controller.)
+ */
+static inline bool joycon_type_is_left_joycon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCL;
+}
+
+static inline bool joycon_type_is_right_joycon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->ctlr_type == JOYCON_CTLR_TYPE_JCR;
+}
+
+static inline bool joycon_type_is_procon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->ctlr_type == JOYCON_CTLR_TYPE_PRO;
+}
+
+static inline bool joycon_type_is_snescon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->ctlr_type == JOYCON_CTLR_TYPE_SNES;
+}
+
+static inline bool joycon_type_is_gencon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->ctlr_type == JOYCON_CTLR_TYPE_GEN;
+}
+
+static inline bool joycon_type_is_n64con(struct joycon_ctlr *ctlr)
+{
+ return ctlr->ctlr_type == JOYCON_CTLR_TYPE_N64;
+}
+
+static inline bool joycon_type_is_left_nescon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->ctlr_type == JOYCON_CTLR_TYPE_NESL;
+}
+
+static inline bool joycon_type_is_right_nescon(struct joycon_ctlr *ctlr)
+{
+ return ctlr->ctlr_type == JOYCON_CTLR_TYPE_NESR;
+}
+
+static inline bool joycon_type_has_left_controls(struct joycon_ctlr *ctlr)
+{
+ return joycon_type_is_left_joycon(ctlr) ||
+ joycon_type_is_procon(ctlr);
+}
+
+static inline bool joycon_type_has_right_controls(struct joycon_ctlr *ctlr)
+{
+ return joycon_type_is_right_joycon(ctlr) ||
+ joycon_type_is_procon(ctlr);
+}
+
+static inline bool joycon_type_is_any_joycon(struct joycon_ctlr *ctlr)
+{
+ return joycon_type_is_left_joycon(ctlr) ||
+ joycon_type_is_right_joycon(ctlr) ||
+ joycon_device_is_chrggrip(ctlr);
+}
+
+static inline bool joycon_type_is_any_nescon(struct joycon_ctlr *ctlr)
+{
+ return joycon_type_is_left_nescon(ctlr) ||
+ joycon_type_is_right_nescon(ctlr);
+}
+
+/*
+ * Controller capability helpers
+ *
+ * These helpers combine the use of the helpers above to detect certain
+ * capabilities during initialization. They are always accurate but (since they
+ * use type helpers) cannot be used early in the HID probe.
+ */
+static inline bool joycon_has_imu(struct joycon_ctlr *ctlr)
+{
+ return joycon_device_is_chrggrip(ctlr) ||
+ joycon_type_is_any_joycon(ctlr) ||
+ joycon_type_is_procon(ctlr);
+}
+
+static inline bool joycon_has_joysticks(struct joycon_ctlr *ctlr)
+{
+ return joycon_device_is_chrggrip(ctlr) ||
+ joycon_type_is_any_joycon(ctlr) ||
+ joycon_type_is_procon(ctlr) ||
+ joycon_type_is_n64con(ctlr);
+}
+
+static inline bool joycon_has_rumble(struct joycon_ctlr *ctlr)
+{
+ return joycon_device_is_chrggrip(ctlr) ||
+ joycon_type_is_any_joycon(ctlr) ||
+ joycon_type_is_procon(ctlr) ||
+ joycon_type_is_n64con(ctlr);
+}
+
+static inline bool joycon_using_usb(struct joycon_ctlr *ctlr)
+{
+ return ctlr->hdev->bus == BUS_USB;
+}
+
static int __joycon_hid_send(struct hid_device *hdev, u8 *data, size_t len)
{
u8 *buf;
@@ -1296,15 +1607,10 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
}
}
-static void joycon_parse_report(struct joycon_ctlr *ctlr,
- struct joycon_input_report *rep)
+static void joycon_handle_rumble_report(struct joycon_ctlr *ctlr, struct joycon_input_report *rep)
{
- struct input_dev *dev = ctlr->input;
unsigned long flags;
- u8 tmp;
- u32 btns;
unsigned long msecs = jiffies_to_msecs(jiffies);
- unsigned long report_delta_ms = msecs - ctlr->last_input_report_msecs;
spin_lock_irqsave(&ctlr->lock, flags);
if (IS_ENABLED(CONFIG_NINTENDO_FF) && rep->vibrator_report &&
@@ -1323,11 +1629,21 @@ static void joycon_parse_report(struct joycon_ctlr *ctlr,
queue_work(ctlr->rumble_queue, &ctlr->rumble_worker);
}
- /* Parse the battery status */
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+}
+
+static void joycon_parse_battery_status(struct joycon_ctlr *ctlr, struct joycon_input_report *rep)
+{
+ u8 tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
tmp = rep->bat_con;
ctlr->host_powered = tmp & BIT(0);
ctlr->battery_charging = tmp & BIT(4);
tmp = tmp >> 5;
+
switch (tmp) {
case 0: /* empty */
ctlr->battery_capacity = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
@@ -1349,102 +1665,121 @@ static void joycon_parse_report(struct joycon_ctlr *ctlr,
hid_warn(ctlr->hdev, "Invalid battery status\n");
break;
}
+
spin_unlock_irqrestore(&ctlr->lock, flags);
+}
- /* Parse the buttons and sticks */
- btns = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24);
-
- if (jc_type_has_left(ctlr)) {
- u16 raw_x;
- u16 raw_y;
- s32 x;
- s32 y;
-
- /* get raw stick values */
- raw_x = hid_field_extract(ctlr->hdev, rep->left_stick, 0, 12);
- raw_y = hid_field_extract(ctlr->hdev,
- rep->left_stick + 1, 4, 12);
- /* map the stick values */
- x = joycon_map_stick_val(&ctlr->left_stick_cal_x, raw_x);
- y = -joycon_map_stick_val(&ctlr->left_stick_cal_y, raw_y);
- /* report sticks */
- input_report_abs(dev, ABS_X, x);
- input_report_abs(dev, ABS_Y, y);
-
- /* report buttons */
- input_report_key(dev, BTN_TL, btns & JC_BTN_L);
- input_report_key(dev, BTN_TL2, btns & JC_BTN_ZL);
- input_report_key(dev, BTN_SELECT, btns & JC_BTN_MINUS);
- input_report_key(dev, BTN_THUMBL, btns & JC_BTN_LSTICK);
- input_report_key(dev, BTN_Z, btns & JC_BTN_CAP);
-
- if (jc_type_is_joycon(ctlr)) {
- /* Report the S buttons as the non-existent triggers */
- input_report_key(dev, BTN_TR, btns & JC_BTN_SL_L);
- input_report_key(dev, BTN_TR2, btns & JC_BTN_SR_L);
-
- /* Report d-pad as digital buttons for the joy-cons */
- input_report_key(dev, BTN_DPAD_DOWN,
- btns & JC_BTN_DOWN);
- input_report_key(dev, BTN_DPAD_UP, btns & JC_BTN_UP);
- input_report_key(dev, BTN_DPAD_RIGHT,
- btns & JC_BTN_RIGHT);
- input_report_key(dev, BTN_DPAD_LEFT,
- btns & JC_BTN_LEFT);
- } else {
- int hatx = 0;
- int haty = 0;
-
- /* d-pad x */
- if (btns & JC_BTN_LEFT)
- hatx = -1;
- else if (btns & JC_BTN_RIGHT)
- hatx = 1;
- input_report_abs(dev, ABS_HAT0X, hatx);
-
- /* d-pad y */
- if (btns & JC_BTN_UP)
- haty = -1;
- else if (btns & JC_BTN_DOWN)
- haty = 1;
- input_report_abs(dev, ABS_HAT0Y, haty);
- }
- }
- if (jc_type_has_right(ctlr)) {
- u16 raw_x;
- u16 raw_y;
- s32 x;
- s32 y;
-
- /* get raw stick values */
- raw_x = hid_field_extract(ctlr->hdev, rep->right_stick, 0, 12);
- raw_y = hid_field_extract(ctlr->hdev,
- rep->right_stick + 1, 4, 12);
- /* map stick values */
- x = joycon_map_stick_val(&ctlr->right_stick_cal_x, raw_x);
- y = -joycon_map_stick_val(&ctlr->right_stick_cal_y, raw_y);
- /* report sticks */
- input_report_abs(dev, ABS_RX, x);
- input_report_abs(dev, ABS_RY, y);
-
- /* report buttons */
- input_report_key(dev, BTN_TR, btns & JC_BTN_R);
- input_report_key(dev, BTN_TR2, btns & JC_BTN_ZR);
- if (jc_type_is_joycon(ctlr)) {
- /* Report the S buttons as the non-existent triggers */
- input_report_key(dev, BTN_TL, btns & JC_BTN_SL_R);
- input_report_key(dev, BTN_TL2, btns & JC_BTN_SR_R);
- }
- input_report_key(dev, BTN_START, btns & JC_BTN_PLUS);
- input_report_key(dev, BTN_THUMBR, btns & JC_BTN_RSTICK);
- input_report_key(dev, BTN_MODE, btns & JC_BTN_HOME);
- input_report_key(dev, BTN_WEST, btns & JC_BTN_Y);
- input_report_key(dev, BTN_NORTH, btns & JC_BTN_X);
- input_report_key(dev, BTN_EAST, btns & JC_BTN_A);
- input_report_key(dev, BTN_SOUTH, btns & JC_BTN_B);
+static void joycon_report_left_stick(struct joycon_ctlr *ctlr,
+ struct joycon_input_report *rep)
+{
+ u16 raw_x;
+ u16 raw_y;
+ s32 x;
+ s32 y;
+
+ raw_x = hid_field_extract(ctlr->hdev, rep->left_stick, 0, 12);
+ raw_y = hid_field_extract(ctlr->hdev, rep->left_stick + 1, 4, 12);
+
+ x = joycon_map_stick_val(&ctlr->left_stick_cal_x, raw_x);
+ y = -joycon_map_stick_val(&ctlr->left_stick_cal_y, raw_y);
+
+ input_report_abs(ctlr->input, ABS_X, x);
+ input_report_abs(ctlr->input, ABS_Y, y);
+}
+
+static void joycon_report_right_stick(struct joycon_ctlr *ctlr,
+ struct joycon_input_report *rep)
+{
+ u16 raw_x;
+ u16 raw_y;
+ s32 x;
+ s32 y;
+
+ raw_x = hid_field_extract(ctlr->hdev, rep->right_stick, 0, 12);
+ raw_y = hid_field_extract(ctlr->hdev, rep->right_stick + 1, 4, 12);
+
+ x = joycon_map_stick_val(&ctlr->right_stick_cal_x, raw_x);
+ y = -joycon_map_stick_val(&ctlr->right_stick_cal_y, raw_y);
+
+ input_report_abs(ctlr->input, ABS_RX, x);
+ input_report_abs(ctlr->input, ABS_RY, y);
+}
+
+static void joycon_report_dpad(struct joycon_ctlr *ctlr,
+ struct joycon_input_report *rep)
+{
+ int hatx = 0;
+ int haty = 0;
+ u32 btns = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24);
+
+ if (btns & JC_BTN_LEFT)
+ hatx = -1;
+ else if (btns & JC_BTN_RIGHT)
+ hatx = 1;
+
+ if (btns & JC_BTN_UP)
+ haty = -1;
+ else if (btns & JC_BTN_DOWN)
+ haty = 1;
+
+ input_report_abs(ctlr->input, ABS_HAT0X, hatx);
+ input_report_abs(ctlr->input, ABS_HAT0Y, haty);
+}
+
+static void joycon_report_buttons(struct joycon_ctlr *ctlr,
+ struct joycon_input_report *rep,
+ const struct joycon_ctlr_button_mapping button_mappings[])
+{
+ const struct joycon_ctlr_button_mapping *button;
+ u32 status = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24);
+
+ for (button = button_mappings; button->code; button++)
+ input_report_key(ctlr->input, button->code, status & button->bit);
+}
+
+static void joycon_parse_report(struct joycon_ctlr *ctlr,
+ struct joycon_input_report *rep)
+{
+ unsigned long flags;
+ unsigned long msecs = jiffies_to_msecs(jiffies);
+ unsigned long report_delta_ms = msecs - ctlr->last_input_report_msecs;
+
+ if (joycon_has_rumble(ctlr))
+ joycon_handle_rumble_report(ctlr, rep);
+
+ joycon_parse_battery_status(ctlr, rep);
+
+ if (joycon_type_is_left_joycon(ctlr)) {
+ joycon_report_left_stick(ctlr, rep);
+ joycon_report_buttons(ctlr, rep, left_joycon_button_mappings);
+ if (!joycon_device_is_chrggrip(ctlr))
+ joycon_report_buttons(ctlr, rep, left_joycon_s_button_mappings);
+ } else if (joycon_type_is_right_joycon(ctlr)) {
+ joycon_report_right_stick(ctlr, rep);
+ joycon_report_buttons(ctlr, rep, right_joycon_button_mappings);
+ if (!joycon_device_is_chrggrip(ctlr))
+ joycon_report_buttons(ctlr, rep, right_joycon_s_button_mappings);
+ } else if (joycon_type_is_procon(ctlr)) {
+ joycon_report_left_stick(ctlr, rep);
+ joycon_report_right_stick(ctlr, rep);
+ joycon_report_dpad(ctlr, rep);
+ joycon_report_buttons(ctlr, rep, procon_button_mappings);
+ } else if (joycon_type_is_any_nescon(ctlr)) {
+ joycon_report_dpad(ctlr, rep);
+ joycon_report_buttons(ctlr, rep, nescon_button_mappings);
+ } else if (joycon_type_is_snescon(ctlr)) {
+ joycon_report_dpad(ctlr, rep);
+ joycon_report_buttons(ctlr, rep, snescon_button_mappings);
+ } else if (joycon_type_is_gencon(ctlr)) {
+ joycon_report_dpad(ctlr, rep);
+ joycon_report_buttons(ctlr, rep, gencon_button_mappings);
+ } else if (joycon_type_is_n64con(ctlr)) {
+ joycon_report_left_stick(ctlr, rep);
+ joycon_report_dpad(ctlr, rep);
+ joycon_report_buttons(ctlr, rep, n64con_button_mappings);
}
- input_sync(dev);
+ input_sync(ctlr->input);
spin_lock_irqsave(&ctlr->lock, flags);
ctlr->last_input_report_msecs = msecs;
@@ -1484,7 +1819,7 @@ static void joycon_parse_report(struct joycon_ctlr *ctlr,
}
/* parse IMU data if present */
- if (rep->id == JC_INPUT_IMU_DATA)
+ if ((rep->id == JC_INPUT_IMU_DATA) && joycon_has_imu(ctlr))
joycon_parse_imu_report(ctlr, rep);
}
@@ -1697,123 +2032,65 @@ static int joycon_play_effect(struct input_dev *dev, void *data,
}
#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */
-static const unsigned int joycon_button_inputs_l[] = {
- BTN_SELECT, BTN_Z, BTN_THUMBL,
- BTN_TL, BTN_TL2,
- 0 /* 0 signals end of array */
-};
-
-static const unsigned int joycon_button_inputs_r[] = {
- BTN_START, BTN_MODE, BTN_THUMBR,
- BTN_SOUTH, BTN_EAST, BTN_NORTH, BTN_WEST,
- BTN_TR, BTN_TR2,
- 0 /* 0 signals end of array */
-};
-
-/* We report joy-con d-pad inputs as buttons and pro controller as a hat. */
-static const unsigned int joycon_dpad_inputs_jc[] = {
- BTN_DPAD_UP, BTN_DPAD_DOWN, BTN_DPAD_LEFT, BTN_DPAD_RIGHT,
- 0 /* 0 signals end of array */
-};
-
-static int joycon_input_create(struct joycon_ctlr *ctlr)
+static void joycon_config_left_stick(struct input_dev *idev)
{
- struct hid_device *hdev;
- const char *name;
- const char *imu_name;
- int ret;
- int i;
-
- hdev = ctlr->hdev;
+ input_set_abs_params(idev,
+ ABS_X,
+ -JC_MAX_STICK_MAG,
+ JC_MAX_STICK_MAG,
+ JC_STICK_FUZZ,
+ JC_STICK_FLAT);
+ input_set_abs_params(idev,
+ ABS_Y,
+ -JC_MAX_STICK_MAG,
+ JC_MAX_STICK_MAG,
+ JC_STICK_FUZZ,
+ JC_STICK_FLAT);
+}
- switch (hdev->product) {
- case USB_DEVICE_ID_NINTENDO_PROCON:
- name = "Nintendo Switch Pro Controller";
- imu_name = "Nintendo Switch Pro Controller IMU";
- break;
- case USB_DEVICE_ID_NINTENDO_CHRGGRIP:
- if (jc_type_has_left(ctlr)) {
- name = "Nintendo Switch Left Joy-Con (Grip)";
- imu_name = "Nintendo Switch Left Joy-Con IMU (Grip)";
- } else {
- name = "Nintendo Switch Right Joy-Con (Grip)";
- imu_name = "Nintendo Switch Right Joy-Con IMU (Grip)";
- }
- break;
- case USB_DEVICE_ID_NINTENDO_JOYCONL:
- name = "Nintendo Switch Left Joy-Con";
- imu_name = "Nintendo Switch Left Joy-Con IMU";
- break;
- case USB_DEVICE_ID_NINTENDO_JOYCONR:
- name = "Nintendo Switch Right Joy-Con";
- imu_name = "Nintendo Switch Right Joy-Con IMU";
- break;
- default: /* Should be impossible */
- hid_err(hdev, "Invalid hid product\n");
- return -EINVAL;
- }
+static void joycon_config_right_stick(struct input_dev *idev)
+{
+ input_set_abs_params(idev,
+ ABS_RX,
+ -JC_MAX_STICK_MAG,
+ JC_MAX_STICK_MAG,
+ JC_STICK_FUZZ,
+ JC_STICK_FLAT);
+ input_set_abs_params(idev,
+ ABS_RY,
+ -JC_MAX_STICK_MAG,
+ JC_MAX_STICK_MAG,
+ JC_STICK_FUZZ,
+ JC_STICK_FLAT);
+}
- ctlr->input = devm_input_allocate_device(&hdev->dev);
- if (!ctlr->input)
- return -ENOMEM;
- ctlr->input->id.bustype = hdev->bus;
- ctlr->input->id.vendor = hdev->vendor;
- ctlr->input->id.product = hdev->product;
- ctlr->input->id.version = hdev->version;
- ctlr->input->uniq = ctlr->mac_addr_str;
- ctlr->input->name = name;
- ctlr->input->phys = hdev->phys;
- input_set_drvdata(ctlr->input, ctlr);
+static void joycon_config_dpad(struct input_dev *idev)
+{
+ input_set_abs_params(idev,
+ ABS_HAT0X,
+ -JC_MAX_DPAD_MAG,
+ JC_MAX_DPAD_MAG,
+ JC_DPAD_FUZZ,
+ JC_DPAD_FLAT);
+ input_set_abs_params(idev,
+ ABS_HAT0Y,
+ -JC_MAX_DPAD_MAG,
+ JC_MAX_DPAD_MAG,
+ JC_DPAD_FUZZ,
+ JC_DPAD_FLAT);
+}
- /* set up sticks and buttons */
- if (jc_type_has_left(ctlr)) {
- input_set_abs_params(ctlr->input, ABS_X,
- -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
- JC_STICK_FUZZ, JC_STICK_FLAT);
- input_set_abs_params(ctlr->input, ABS_Y,
- -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
- JC_STICK_FUZZ, JC_STICK_FLAT);
-
- for (i = 0; joycon_button_inputs_l[i] > 0; i++)
- input_set_capability(ctlr->input, EV_KEY,
- joycon_button_inputs_l[i]);
-
- /* configure d-pad differently for joy-con vs pro controller */
- if (hdev->product != USB_DEVICE_ID_NINTENDO_PROCON) {
- for (i = 0; joycon_dpad_inputs_jc[i] > 0; i++)
- input_set_capability(ctlr->input, EV_KEY,
- joycon_dpad_inputs_jc[i]);
- } else {
- input_set_abs_params(ctlr->input, ABS_HAT0X,
- -JC_MAX_DPAD_MAG, JC_MAX_DPAD_MAG,
- JC_DPAD_FUZZ, JC_DPAD_FLAT);
- input_set_abs_params(ctlr->input, ABS_HAT0Y,
- -JC_MAX_DPAD_MAG, JC_MAX_DPAD_MAG,
- JC_DPAD_FUZZ, JC_DPAD_FLAT);
- }
- }
- if (jc_type_has_right(ctlr)) {
- input_set_abs_params(ctlr->input, ABS_RX,
- -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
- JC_STICK_FUZZ, JC_STICK_FLAT);
- input_set_abs_params(ctlr->input, ABS_RY,
- -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
- JC_STICK_FUZZ, JC_STICK_FLAT);
-
- for (i = 0; joycon_button_inputs_r[i] > 0; i++)
- input_set_capability(ctlr->input, EV_KEY,
- joycon_button_inputs_r[i]);
- }
+static void joycon_config_buttons(struct input_dev *idev,
+ const struct joycon_ctlr_button_mapping button_mappings[])
+{
+ const struct joycon_ctlr_button_mapping *button;
- /* Let's report joy-con S triggers separately */
- if (hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONL) {
- input_set_capability(ctlr->input, EV_KEY, BTN_TR);
- input_set_capability(ctlr->input, EV_KEY, BTN_TR2);
- } else if (hdev->product == USB_DEVICE_ID_NINTENDO_JOYCONR) {
- input_set_capability(ctlr->input, EV_KEY, BTN_TL);
- input_set_capability(ctlr->input, EV_KEY, BTN_TL2);
- }
+ for (button = button_mappings; button->code; button++)
+ input_set_capability(idev, EV_KEY, button->code);
+}
+static void joycon_config_rumble(struct joycon_ctlr *ctlr)
+{
#if IS_ENABLED(CONFIG_NINTENDO_FF)
/* set up rumble */
input_set_capability(ctlr->input, EV_FF, FF_RUMBLE);
@@ -1826,10 +2103,15 @@ static int joycon_input_create(struct joycon_ctlr *ctlr)
joycon_set_rumble(ctlr, 0, 0, false);
ctlr->rumble_msecs = jiffies_to_msecs(jiffies);
#endif
+}
- ret = input_register_device(ctlr->input);
- if (ret)
- return ret;
+static int joycon_imu_input_create(struct joycon_ctlr *ctlr)
+{
+ struct hid_device *hdev;
+ const char *imu_name;
+ int ret;
+
+ hdev = ctlr->hdev;
/* configure the imu input device */
ctlr->imu_input = devm_input_allocate_device(&hdev->dev);
@@ -1841,8 +2123,14 @@ static int joycon_input_create(struct joycon_ctlr *ctlr)
ctlr->imu_input->id.product = hdev->product;
ctlr->imu_input->id.version = hdev->version;
ctlr->imu_input->uniq = ctlr->mac_addr_str;
- ctlr->imu_input->name = imu_name;
ctlr->imu_input->phys = hdev->phys;
+
+ imu_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s (IMU)", ctlr->input->name);
+ if (!imu_name)
+ return -ENOMEM;
+
+ ctlr->imu_input->name = imu_name;
+
input_set_drvdata(ctlr->imu_input, ctlr);
/* configure imu axes */
@@ -1884,6 +2172,71 @@ static int joycon_input_create(struct joycon_ctlr *ctlr)
return 0;
}
+static int joycon_input_create(struct joycon_ctlr *ctlr)
+{
+ struct hid_device *hdev;
+ int ret;
+
+ hdev = ctlr->hdev;
+
+ ctlr->input = devm_input_allocate_device(&hdev->dev);
+ if (!ctlr->input)
+ return -ENOMEM;
+ ctlr->input->id.bustype = hdev->bus;
+ ctlr->input->id.vendor = hdev->vendor;
+ ctlr->input->id.product = hdev->product;
+ ctlr->input->id.version = hdev->version;
+ ctlr->input->uniq = ctlr->mac_addr_str;
+ ctlr->input->name = hdev->name;
+ ctlr->input->phys = hdev->phys;
+ input_set_drvdata(ctlr->input, ctlr);
+
+ ret = input_register_device(ctlr->input);
+ if (ret)
+ return ret;
+
+ if (joycon_type_is_right_joycon(ctlr)) {
+ joycon_config_right_stick(ctlr->input);
+ joycon_config_buttons(ctlr->input, right_joycon_button_mappings);
+ if (!joycon_device_is_chrggrip(ctlr))
+ joycon_config_buttons(ctlr->input, right_joycon_s_button_mappings);
+ } else if (joycon_type_is_left_joycon(ctlr)) {
+ joycon_config_left_stick(ctlr->input);
+ joycon_config_buttons(ctlr->input, left_joycon_button_mappings);
+ if (!joycon_device_is_chrggrip(ctlr))
+ joycon_config_buttons(ctlr->input, left_joycon_s_button_mappings);
+ } else if (joycon_type_is_procon(ctlr)) {
+ joycon_config_left_stick(ctlr->input);
+ joycon_config_right_stick(ctlr->input);
+ joycon_config_dpad(ctlr->input);
+ joycon_config_buttons(ctlr->input, procon_button_mappings);
+ } else if (joycon_type_is_any_nescon(ctlr)) {
+ joycon_config_dpad(ctlr->input);
+ joycon_config_buttons(ctlr->input, nescon_button_mappings);
+ } else if (joycon_type_is_snescon(ctlr)) {
+ joycon_config_dpad(ctlr->input);
+ joycon_config_buttons(ctlr->input, snescon_button_mappings);
+ } else if (joycon_type_is_gencon(ctlr)) {
+ joycon_config_dpad(ctlr->input);
+ joycon_config_buttons(ctlr->input, gencon_button_mappings);
+ } else if (joycon_type_is_n64con(ctlr)) {
+ joycon_config_dpad(ctlr->input);
+ joycon_config_left_stick(ctlr->input);
+ joycon_config_buttons(ctlr->input, n64con_button_mappings);
+ }
+
+ if (joycon_has_imu(ctlr)) {
+ ret = joycon_imu_input_create(ctlr);
+ if (ret)
+ return ret;
+ }
+
+ if (joycon_has_rumble(ctlr))
+ joycon_config_rumble(ctlr);
+
+ return 0;
+}
+
/* Because the subcommand sets all the leds at once, the brightness argument is ignored */
static int joycon_player_led_brightness_set(struct led_classdev *led,
enum led_brightness brightness)
@@ -2120,9 +2473,7 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
struct joycon_input_report *report;
req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO;
- mutex_lock(&ctlr->output_mutex);
ret = joycon_send_subcmd(ctlr, &req, 0, HZ);
- mutex_unlock(&ctlr->output_mutex);
if (ret) {
hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret);
return ret;
@@ -2145,8 +2496,17 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
return -ENOMEM;
hid_info(ctlr->hdev, "controller MAC = %s\n", ctlr->mac_addr_str);
- /* Retrieve the type so we can distinguish for charging grip */
+ /*
+ * Retrieve the type so we can distinguish the controller type
+ * Unfortantly the hdev->product can't always be used due to a ?bug?
+ * with the NSO Genesis controller. Over USB, it will report the
+ * PID as 0x201E, but over bluetooth it will report the PID as 0x2017
+ * which is the same as the NSO SNES controller. This is different from
+ * the rest of the controllers which will report the same PID over USB
+ * and bluetooth.
+ */
ctlr->ctlr_type = report->subcmd_reply.data[2];
+ hid_dbg(ctlr->hdev, "controller type = 0x%02X\n", ctlr->ctlr_type);
return 0;
}
@@ -2158,8 +2518,7 @@ static int joycon_init(struct hid_device *hdev)
mutex_lock(&ctlr->output_mutex);
/* if handshake command fails, assume ble pro controller */
- if ((jc_type_is_procon(ctlr) || jc_type_is_chrggrip(ctlr)) &&
- !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
+ if (joycon_using_usb(ctlr) && !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE, HZ)) {
hid_dbg(hdev, "detected USB controller\n");
/* set baudrate for improved latency */
ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M, HZ);
@@ -2184,24 +2543,43 @@ static int joycon_init(struct hid_device *hdev)
goto out_unlock;
}
- /* get controller calibration data, and parse it */
- ret = joycon_request_calibration(ctlr);
+ /* needed to retrieve the controller type */
+ ret = joycon_read_info(ctlr);
if (ret) {
- /*
- * We can function with default calibration, but it may be
- * inaccurate. Provide a warning, and continue on.
- */
- hid_warn(hdev, "Analog stick positions may be inaccurate\n");
+ hid_err(hdev, "Failed to retrieve controller info; ret=%d\n",
+ ret);
+ goto out_unlock;
}
- /* get IMU calibration data, and parse it */
- ret = joycon_request_imu_calibration(ctlr);
- if (ret) {
- /*
- * We can function with default calibration, but it may be
- * inaccurate. Provide a warning, and continue on.
- */
- hid_warn(hdev, "Unable to read IMU calibration data\n");
+ if (joycon_has_joysticks(ctlr)) {
+ /* get controller calibration data, and parse it */
+ ret = joycon_request_calibration(ctlr);
+ if (ret) {
+ /*
+ * We can function with default calibration, but it may be
+ * inaccurate. Provide a warning, and continue on.
+ */
+ hid_warn(hdev, "Analog stick positions may be inaccurate\n");
+ }
+ }
+
+ if (joycon_has_imu(ctlr)) {
+ /* get IMU calibration data, and parse it */
+ ret = joycon_request_imu_calibration(ctlr);
+ if (ret) {
+ /*
+ * We can function with default calibration, but it may be
+ * inaccurate. Provide a warning, and continue on.
+ */
+ hid_warn(hdev, "Unable to read IMU calibration data\n");
+ }
+
+ /* Enable the IMU */
+ ret = joycon_enable_imu(ctlr);
+ if (ret) {
+ hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
+ goto out_unlock;
+ }
}
/* Set the reporting mode to 0x30, which is the full report mode */
@@ -2211,18 +2589,13 @@ static int joycon_init(struct hid_device *hdev)
goto out_unlock;
}
- /* Enable rumble */
- ret = joycon_enable_rumble(ctlr);
- if (ret) {
- hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
- goto out_unlock;
- }
-
- /* Enable the IMU */
- ret = joycon_enable_imu(ctlr);
- if (ret) {
- hid_err(hdev, "Failed to enable the IMU; ret=%d\n", ret);
- goto out_unlock;
+ if (joycon_has_rumble(ctlr)) {
+ /* Enable rumble */
+ ret = joycon_enable_rumble(ctlr);
+ if (ret) {
+ hid_err(hdev, "Failed to enable rumble; ret=%d\n", ret);
+ goto out_unlock;
+ }
}
out_unlock:
@@ -2367,13 +2740,6 @@ static int nintendo_hid_probe(struct hid_device *hdev,
goto err_close;
}
- ret = joycon_read_info(ctlr);
- if (ret) {
- hid_err(hdev, "Failed to retrieve controller info; ret=%d\n",
- ret);
- goto err_close;
- }
-
/* Initialize the leds */
ret = joycon_leds_create(ctlr);
if (ret) {
@@ -2445,6 +2811,12 @@ static int nintendo_hid_resume(struct hid_device *hdev)
static const struct hid_device_id nintendo_hid_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_PROCON) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
+ USB_DEVICE_ID_NINTENDO_SNESCON) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
+ USB_DEVICE_ID_NINTENDO_GENCON) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
+ USB_DEVICE_ID_NINTENDO_N64CON) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_PROCON) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
@@ -2453,6 +2825,12 @@ static const struct hid_device_id nintendo_hid_devices[] = {
USB_DEVICE_ID_NINTENDO_JOYCONL) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
USB_DEVICE_ID_NINTENDO_JOYCONR) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+ USB_DEVICE_ID_NINTENDO_SNESCON) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+ USB_DEVICE_ID_NINTENDO_GENCON) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+ USB_DEVICE_ID_NINTENDO_N64CON) },
{ }
};
MODULE_DEVICE_TABLE(hid, nintendo_hid_devices);
@@ -2471,6 +2849,7 @@ static struct hid_driver nintendo_hid_driver = {
module_hid_driver(nintendo_hid_driver);
MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ryan McClelland <rymcclel@gmail.com>");
+MODULE_AUTHOR("Emily Strickland <linux@emily.st>");
MODULE_AUTHOR("Daniel J. Ogorchock <djogorchock@gmail.com>");
MODULE_DESCRIPTION("Driver for Nintendo Switch Controllers");
-
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index a4dccdcda..d7dddd99d 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -505,6 +505,7 @@ int picolcd_init_framebuffer(struct picolcd_data *data)
dev_err(dev, "can't get a free page for framebuffer\n");
goto err_nomem;
}
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = fbdata->bitmap;
info->fix.smem_start = (unsigned long)fbdata->bitmap;
memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE);
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index b110818fc..b08a5ab58 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -71,60 +71,222 @@ static LIST_HEAD(steam_devices);
/*
* Commands that can be sent in a feature report.
- * Thanks to Valve for some valuable hints.
+ * Thanks to Valve and SDL for the names.
*/
-#define STEAM_CMD_SET_MAPPINGS 0x80
-#define STEAM_CMD_CLEAR_MAPPINGS 0x81
-#define STEAM_CMD_GET_MAPPINGS 0x82
-#define STEAM_CMD_GET_ATTRIB 0x83
-#define STEAM_CMD_GET_ATTRIB_LABEL 0x84
-#define STEAM_CMD_DEFAULT_MAPPINGS 0x85
-#define STEAM_CMD_FACTORY_RESET 0x86
-#define STEAM_CMD_WRITE_REGISTER 0x87
-#define STEAM_CMD_CLEAR_REGISTER 0x88
-#define STEAM_CMD_READ_REGISTER 0x89
-#define STEAM_CMD_GET_REGISTER_LABEL 0x8a
-#define STEAM_CMD_GET_REGISTER_MAX 0x8b
-#define STEAM_CMD_GET_REGISTER_DEFAULT 0x8c
-#define STEAM_CMD_SET_MODE 0x8d
-#define STEAM_CMD_DEFAULT_MOUSE 0x8e
-#define STEAM_CMD_FORCEFEEDBAK 0x8f
-#define STEAM_CMD_REQUEST_COMM_STATUS 0xb4
-#define STEAM_CMD_GET_SERIAL 0xae
-#define STEAM_CMD_HAPTIC_RUMBLE 0xeb
-
-/* Some useful register ids */
-#define STEAM_REG_LPAD_MODE 0x07
-#define STEAM_REG_RPAD_MODE 0x08
-#define STEAM_REG_RPAD_MARGIN 0x18
-#define STEAM_REG_LED 0x2d
-#define STEAM_REG_GYRO_MODE 0x30
-#define STEAM_REG_LPAD_CLICK_PRESSURE 0x34
-#define STEAM_REG_RPAD_CLICK_PRESSURE 0x35
-
-/* Raw event identifiers */
-#define STEAM_EV_INPUT_DATA 0x01
-#define STEAM_EV_CONNECT 0x03
-#define STEAM_EV_BATTERY 0x04
-#define STEAM_EV_DECK_INPUT_DATA 0x09
+enum {
+ ID_SET_DIGITAL_MAPPINGS = 0x80,
+ ID_CLEAR_DIGITAL_MAPPINGS = 0x81,
+ ID_GET_DIGITAL_MAPPINGS = 0x82,
+ ID_GET_ATTRIBUTES_VALUES = 0x83,
+ ID_GET_ATTRIBUTE_LABEL = 0x84,
+ ID_SET_DEFAULT_DIGITAL_MAPPINGS = 0x85,
+ ID_FACTORY_RESET = 0x86,
+ ID_SET_SETTINGS_VALUES = 0x87,
+ ID_CLEAR_SETTINGS_VALUES = 0x88,
+ ID_GET_SETTINGS_VALUES = 0x89,
+ ID_GET_SETTING_LABEL = 0x8A,
+ ID_GET_SETTINGS_MAXS = 0x8B,
+ ID_GET_SETTINGS_DEFAULTS = 0x8C,
+ ID_SET_CONTROLLER_MODE = 0x8D,
+ ID_LOAD_DEFAULT_SETTINGS = 0x8E,
+ ID_TRIGGER_HAPTIC_PULSE = 0x8F,
+ ID_TURN_OFF_CONTROLLER = 0x9F,
+
+ ID_GET_DEVICE_INFO = 0xA1,
+
+ ID_CALIBRATE_TRACKPADS = 0xA7,
+ ID_RESERVED_0 = 0xA8,
+ ID_SET_SERIAL_NUMBER = 0xA9,
+ ID_GET_TRACKPAD_CALIBRATION = 0xAA,
+ ID_GET_TRACKPAD_FACTORY_CALIBRATION = 0xAB,
+ ID_GET_TRACKPAD_RAW_DATA = 0xAC,
+ ID_ENABLE_PAIRING = 0xAD,
+ ID_GET_STRING_ATTRIBUTE = 0xAE,
+ ID_RADIO_ERASE_RECORDS = 0xAF,
+ ID_RADIO_WRITE_RECORD = 0xB0,
+ ID_SET_DONGLE_SETTING = 0xB1,
+ ID_DONGLE_DISCONNECT_DEVICE = 0xB2,
+ ID_DONGLE_COMMIT_DEVICE = 0xB3,
+ ID_DONGLE_GET_WIRELESS_STATE = 0xB4,
+ ID_CALIBRATE_GYRO = 0xB5,
+ ID_PLAY_AUDIO = 0xB6,
+ ID_AUDIO_UPDATE_START = 0xB7,
+ ID_AUDIO_UPDATE_DATA = 0xB8,
+ ID_AUDIO_UPDATE_COMPLETE = 0xB9,
+ ID_GET_CHIPID = 0xBA,
+
+ ID_CALIBRATE_JOYSTICK = 0xBF,
+ ID_CALIBRATE_ANALOG_TRIGGERS = 0xC0,
+ ID_SET_AUDIO_MAPPING = 0xC1,
+ ID_CHECK_GYRO_FW_LOAD = 0xC2,
+ ID_CALIBRATE_ANALOG = 0xC3,
+ ID_DONGLE_GET_CONNECTED_SLOTS = 0xC4,
+
+ ID_RESET_IMU = 0xCE,
+
+ ID_TRIGGER_HAPTIC_CMD = 0xEA,
+ ID_TRIGGER_RUMBLE_CMD = 0xEB,
+};
+
+/* Settings IDs */
+enum {
+ /* 0 */
+ SETTING_MOUSE_SENSITIVITY,
+ SETTING_MOUSE_ACCELERATION,
+ SETTING_TRACKBALL_ROTATION_ANGLE,
+ SETTING_HAPTIC_INTENSITY_UNUSED,
+ SETTING_LEFT_GAMEPAD_STICK_ENABLED,
+ SETTING_RIGHT_GAMEPAD_STICK_ENABLED,
+ SETTING_USB_DEBUG_MODE,
+ SETTING_LEFT_TRACKPAD_MODE,
+ SETTING_RIGHT_TRACKPAD_MODE,
+ SETTING_MOUSE_POINTER_ENABLED,
+
+ /* 10 */
+ SETTING_DPAD_DEADZONE,
+ SETTING_MINIMUM_MOMENTUM_VEL,
+ SETTING_MOMENTUM_DECAY_AMMOUNT,
+ SETTING_TRACKPAD_RELATIVE_MODE_TICKS_PER_PIXEL,
+ SETTING_HAPTIC_INCREMENT,
+ SETTING_DPAD_ANGLE_SIN,
+ SETTING_DPAD_ANGLE_COS,
+ SETTING_MOMENTUM_VERTICAL_DIVISOR,
+ SETTING_MOMENTUM_MAXIMUM_VELOCITY,
+ SETTING_TRACKPAD_Z_ON,
+
+ /* 20 */
+ SETTING_TRACKPAD_Z_OFF,
+ SETTING_SENSITIVY_SCALE_AMMOUNT,
+ SETTING_LEFT_TRACKPAD_SECONDARY_MODE,
+ SETTING_RIGHT_TRACKPAD_SECONDARY_MODE,
+ SETTING_SMOOTH_ABSOLUTE_MOUSE,
+ SETTING_STEAMBUTTON_POWEROFF_TIME,
+ SETTING_UNUSED_1,
+ SETTING_TRACKPAD_OUTER_RADIUS,
+ SETTING_TRACKPAD_Z_ON_LEFT,
+ SETTING_TRACKPAD_Z_OFF_LEFT,
+
+ /* 30 */
+ SETTING_TRACKPAD_OUTER_SPIN_VEL,
+ SETTING_TRACKPAD_OUTER_SPIN_RADIUS,
+ SETTING_TRACKPAD_OUTER_SPIN_HORIZONTAL_ONLY,
+ SETTING_TRACKPAD_RELATIVE_MODE_DEADZONE,
+ SETTING_TRACKPAD_RELATIVE_MODE_MAX_VEL,
+ SETTING_TRACKPAD_RELATIVE_MODE_INVERT_Y,
+ SETTING_TRACKPAD_DOUBLE_TAP_BEEP_ENABLED,
+ SETTING_TRACKPAD_DOUBLE_TAP_BEEP_PERIOD,
+ SETTING_TRACKPAD_DOUBLE_TAP_BEEP_COUNT,
+ SETTING_TRACKPAD_OUTER_RADIUS_RELEASE_ON_TRANSITION,
+
+ /* 40 */
+ SETTING_RADIAL_MODE_ANGLE,
+ SETTING_HAPTIC_INTENSITY_MOUSE_MODE,
+ SETTING_LEFT_DPAD_REQUIRES_CLICK,
+ SETTING_RIGHT_DPAD_REQUIRES_CLICK,
+ SETTING_LED_BASELINE_BRIGHTNESS,
+ SETTING_LED_USER_BRIGHTNESS,
+ SETTING_ENABLE_RAW_JOYSTICK,
+ SETTING_ENABLE_FAST_SCAN,
+ SETTING_IMU_MODE,
+ SETTING_WIRELESS_PACKET_VERSION,
+
+ /* 50 */
+ SETTING_SLEEP_INACTIVITY_TIMEOUT,
+ SETTING_TRACKPAD_NOISE_THRESHOLD,
+ SETTING_LEFT_TRACKPAD_CLICK_PRESSURE,
+ SETTING_RIGHT_TRACKPAD_CLICK_PRESSURE,
+ SETTING_LEFT_BUMPER_CLICK_PRESSURE,
+ SETTING_RIGHT_BUMPER_CLICK_PRESSURE,
+ SETTING_LEFT_GRIP_CLICK_PRESSURE,
+ SETTING_RIGHT_GRIP_CLICK_PRESSURE,
+ SETTING_LEFT_GRIP2_CLICK_PRESSURE,
+ SETTING_RIGHT_GRIP2_CLICK_PRESSURE,
+
+ /* 60 */
+ SETTING_PRESSURE_MODE,
+ SETTING_CONTROLLER_TEST_MODE,
+ SETTING_TRIGGER_MODE,
+ SETTING_TRACKPAD_Z_THRESHOLD,
+ SETTING_FRAME_RATE,
+ SETTING_TRACKPAD_FILT_CTRL,
+ SETTING_TRACKPAD_CLIP,
+ SETTING_DEBUG_OUTPUT_SELECT,
+ SETTING_TRIGGER_THRESHOLD_PERCENT,
+ SETTING_TRACKPAD_FREQUENCY_HOPPING,
+
+ /* 70 */
+ SETTING_HAPTICS_ENABLED,
+ SETTING_STEAM_WATCHDOG_ENABLE,
+ SETTING_TIMP_TOUCH_THRESHOLD_ON,
+ SETTING_TIMP_TOUCH_THRESHOLD_OFF,
+ SETTING_FREQ_HOPPING,
+ SETTING_TEST_CONTROL,
+ SETTING_HAPTIC_MASTER_GAIN_DB,
+ SETTING_THUMB_TOUCH_THRESH,
+ SETTING_DEVICE_POWER_STATUS,
+ SETTING_HAPTIC_INTENSITY,
+
+ /* 80 */
+ SETTING_STABILIZER_ENABLED,
+ SETTING_TIMP_MODE_MTE,
+};
+
+/* Input report identifiers */
+enum
+{
+ ID_CONTROLLER_STATE = 1,
+ ID_CONTROLLER_DEBUG = 2,
+ ID_CONTROLLER_WIRELESS = 3,
+ ID_CONTROLLER_STATUS = 4,
+ ID_CONTROLLER_DEBUG2 = 5,
+ ID_CONTROLLER_SECONDARY_STATE = 6,
+ ID_CONTROLLER_BLE_STATE = 7,
+ ID_CONTROLLER_DECK_STATE = 9
+};
+
+/* String attribute idenitifiers */
+enum {
+ ATTRIB_STR_BOARD_SERIAL,
+ ATTRIB_STR_UNIT_SERIAL,
+};
/* Values for GYRO_MODE (bitmask) */
-#define STEAM_GYRO_MODE_OFF 0x0000
-#define STEAM_GYRO_MODE_STEERING 0x0001
-#define STEAM_GYRO_MODE_TILT 0x0002
-#define STEAM_GYRO_MODE_SEND_ORIENTATION 0x0004
-#define STEAM_GYRO_MODE_SEND_RAW_ACCEL 0x0008
-#define STEAM_GYRO_MODE_SEND_RAW_GYRO 0x0010
+enum {
+ SETTING_GYRO_MODE_OFF = 0,
+ SETTING_GYRO_MODE_STEERING = BIT(0),
+ SETTING_GYRO_MODE_TILT = BIT(1),
+ SETTING_GYRO_MODE_SEND_ORIENTATION = BIT(2),
+ SETTING_GYRO_MODE_SEND_RAW_ACCEL = BIT(3),
+ SETTING_GYRO_MODE_SEND_RAW_GYRO = BIT(4),
+};
+
+/* Trackpad modes */
+enum {
+ TRACKPAD_ABSOLUTE_MOUSE,
+ TRACKPAD_RELATIVE_MOUSE,
+ TRACKPAD_DPAD_FOUR_WAY_DISCRETE,
+ TRACKPAD_DPAD_FOUR_WAY_OVERLAP,
+ TRACKPAD_DPAD_EIGHT_WAY,
+ TRACKPAD_RADIAL_MODE,
+ TRACKPAD_ABSOLUTE_DPAD,
+ TRACKPAD_NONE,
+ TRACKPAD_GESTURE_KEYBOARD,
+};
+
+/* Pad identifiers for the deck */
+#define STEAM_PAD_LEFT 0
+#define STEAM_PAD_RIGHT 1
+#define STEAM_PAD_BOTH 2
/* Other random constants */
-#define STEAM_SERIAL_LEN 10
+#define STEAM_SERIAL_LEN 0x15
struct steam_device {
struct list_head list;
spinlock_t lock;
struct hid_device *hdev, *client_hdev;
- struct mutex mutex;
- bool client_opened;
+ struct mutex report_mutex;
+ unsigned long client_opened;
struct input_dev __rcu *input;
unsigned long quirks;
struct work_struct work_connect;
@@ -134,7 +296,9 @@ struct steam_device {
struct power_supply __rcu *battery;
u8 battery_charge;
u16 voltage;
- struct delayed_work heartbeat;
+ struct delayed_work mode_switch;
+ bool did_mode_switch;
+ bool gamepad_mode;
struct work_struct rumble_work;
u16 rumble_left;
u16 rumble_right;
@@ -226,13 +390,13 @@ static inline int steam_send_report_byte(struct steam_device *steam, u8 cmd)
return steam_send_report(steam, &cmd, 1);
}
-static int steam_write_registers(struct steam_device *steam,
+static int steam_write_settings(struct steam_device *steam,
/* u8 reg, u16 val */...)
{
/* Send: 0x87 len (reg valLo valHi)* */
u8 reg;
u16 val;
- u8 cmd[64] = {STEAM_CMD_WRITE_REGISTER, 0x00};
+ u8 cmd[64] = {ID_SET_SETTINGS_VALUES, 0x00};
int ret;
va_list args;
@@ -265,23 +429,29 @@ static int steam_get_serial(struct steam_device *steam)
{
/*
* Send: 0xae 0x15 0x01
- * Recv: 0xae 0x15 0x01 serialnumber (10 chars)
+ * Recv: 0xae 0x15 0x01 serialnumber
*/
- int ret;
- u8 cmd[] = {STEAM_CMD_GET_SERIAL, 0x15, 0x01};
+ int ret = 0;
+ u8 cmd[] = {ID_GET_STRING_ATTRIBUTE, sizeof(steam->serial_no), ATTRIB_STR_UNIT_SERIAL};
u8 reply[3 + STEAM_SERIAL_LEN + 1];
+ mutex_lock(&steam->report_mutex);
ret = steam_send_report(steam, cmd, sizeof(cmd));
if (ret < 0)
- return ret;
+ goto out;
ret = steam_recv_report(steam, reply, sizeof(reply));
if (ret < 0)
- return ret;
- if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != 0x01)
- return -EIO;
+ goto out;
+ if (reply[0] != ID_GET_STRING_ATTRIBUTE || reply[1] < 1 ||
+ reply[1] > sizeof(steam->serial_no) || reply[2] != ATTRIB_STR_UNIT_SERIAL) {
+ ret = -EIO;
+ goto out;
+ }
reply[3 + STEAM_SERIAL_LEN] = 0;
- strscpy(steam->serial_no, reply + 3, sizeof(steam->serial_no));
- return 0;
+ strscpy(steam->serial_no, reply + 3, reply[1]);
+out:
+ mutex_unlock(&steam->report_mutex);
+ return ret;
}
/*
@@ -291,14 +461,50 @@ static int steam_get_serial(struct steam_device *steam)
*/
static inline int steam_request_conn_status(struct steam_device *steam)
{
- return steam_send_report_byte(steam, STEAM_CMD_REQUEST_COMM_STATUS);
+ int ret;
+ mutex_lock(&steam->report_mutex);
+ ret = steam_send_report_byte(steam, ID_DONGLE_GET_WIRELESS_STATE);
+ mutex_unlock(&steam->report_mutex);
+ return ret;
+}
+
+/*
+ * Send a haptic pulse to the trackpads
+ * Duration and interval are measured in microseconds, count is the number
+ * of pulses to send for duration time with interval microseconds between them
+ * and gain is measured in decibels, ranging from -24 to +6
+ */
+static inline int steam_haptic_pulse(struct steam_device *steam, u8 pad,
+ u16 duration, u16 interval, u16 count, u8 gain)
+{
+ int ret;
+ u8 report[10] = {ID_TRIGGER_HAPTIC_PULSE, 8};
+
+ /* Left and right are swapped on this report for legacy reasons */
+ if (pad < STEAM_PAD_BOTH)
+ pad ^= 1;
+
+ report[2] = pad;
+ report[3] = duration & 0xFF;
+ report[4] = duration >> 8;
+ report[5] = interval & 0xFF;
+ report[6] = interval >> 8;
+ report[7] = count & 0xFF;
+ report[8] = count >> 8;
+ report[9] = gain;
+
+ mutex_lock(&steam->report_mutex);
+ ret = steam_send_report(steam, report, sizeof(report));
+ mutex_unlock(&steam->report_mutex);
+ return ret;
}
static inline int steam_haptic_rumble(struct steam_device *steam,
u16 intensity, u16 left_speed, u16 right_speed,
u8 left_gain, u8 right_gain)
{
- u8 report[11] = {STEAM_CMD_HAPTIC_RUMBLE, 9};
+ int ret;
+ u8 report[11] = {ID_TRIGGER_RUMBLE_CMD, 9};
report[3] = intensity & 0xFF;
report[4] = intensity >> 8;
@@ -309,7 +515,10 @@ static inline int steam_haptic_rumble(struct steam_device *steam,
report[9] = left_gain;
report[10] = right_gain;
- return steam_send_report(steam, report, sizeof(report));
+ mutex_lock(&steam->report_mutex);
+ ret = steam_send_report(steam, report, sizeof(report));
+ mutex_unlock(&steam->report_mutex);
+ return ret;
}
static void steam_haptic_rumble_cb(struct work_struct *work)
@@ -335,40 +544,36 @@ static int steam_play_effect(struct input_dev *dev, void *data,
static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
{
+ if (steam->gamepad_mode)
+ enable = false;
+
if (enable) {
+ mutex_lock(&steam->report_mutex);
/* enable esc, enter, cursors */
- steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MAPPINGS);
- /* enable mouse */
- steam_send_report_byte(steam, STEAM_CMD_DEFAULT_MOUSE);
- steam_write_registers(steam,
- STEAM_REG_RPAD_MARGIN, 0x01, /* enable margin */
- 0);
-
- cancel_delayed_work_sync(&steam->heartbeat);
+ steam_send_report_byte(steam, ID_SET_DEFAULT_DIGITAL_MAPPINGS);
+ /* reset settings */
+ steam_send_report_byte(steam, ID_LOAD_DEFAULT_SETTINGS);
+ mutex_unlock(&steam->report_mutex);
} else {
+ mutex_lock(&steam->report_mutex);
/* disable esc, enter, cursor */
- steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
+ steam_send_report_byte(steam, ID_CLEAR_DIGITAL_MAPPINGS);
if (steam->quirks & STEAM_QUIRK_DECK) {
- steam_write_registers(steam,
- STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
- STEAM_REG_LPAD_MODE, 0x07, /* disable mouse */
- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
- STEAM_REG_LPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
- STEAM_REG_RPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
+ steam_write_settings(steam,
+ SETTING_LEFT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
+ SETTING_RIGHT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
+ SETTING_LEFT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable haptic click */
+ SETTING_RIGHT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable haptic click */
+ SETTING_STEAM_WATCHDOG_ENABLE, 0, /* disable watchdog that tests if Steam is active */
0);
- /*
- * The Steam Deck has a watchdog that automatically enables
- * lizard mode if it doesn't see any traffic for too long
- */
- if (!work_busy(&steam->heartbeat.work))
- schedule_delayed_work(&steam->heartbeat, 5 * HZ);
+ mutex_unlock(&steam->report_mutex);
} else {
- steam_write_registers(steam,
- STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
- STEAM_REG_LPAD_MODE, 0x07, /* disable mouse */
- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
+ steam_write_settings(steam,
+ SETTING_LEFT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
+ SETTING_RIGHT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
0);
+ mutex_unlock(&steam->report_mutex);
}
}
}
@@ -376,22 +581,38 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
static int steam_input_open(struct input_dev *dev)
{
struct steam_device *steam = input_get_drvdata(dev);
+ unsigned long flags;
+ bool set_lizard_mode;
+
+ /*
+ * Disabling lizard mode automatically is only done on the Steam
+ * Controller. On the Steam Deck, this is toggled manually by holding
+ * the options button instead, handled by steam_mode_switch_cb.
+ */
+ if (!(steam->quirks & STEAM_QUIRK_DECK)) {
+ spin_lock_irqsave(&steam->lock, flags);
+ set_lizard_mode = !steam->client_opened && lizard_mode;
+ spin_unlock_irqrestore(&steam->lock, flags);
+ if (set_lizard_mode)
+ steam_set_lizard_mode(steam, false);
+ }
- mutex_lock(&steam->mutex);
- if (!steam->client_opened && lizard_mode)
- steam_set_lizard_mode(steam, false);
- mutex_unlock(&steam->mutex);
return 0;
}
static void steam_input_close(struct input_dev *dev)
{
struct steam_device *steam = input_get_drvdata(dev);
+ unsigned long flags;
+ bool set_lizard_mode;
- mutex_lock(&steam->mutex);
- if (!steam->client_opened && lizard_mode)
- steam_set_lizard_mode(steam, true);
- mutex_unlock(&steam->mutex);
+ if (!(steam->quirks & STEAM_QUIRK_DECK)) {
+ spin_lock_irqsave(&steam->lock, flags);
+ set_lizard_mode = !steam->client_opened && lizard_mode;
+ spin_unlock_irqrestore(&steam->lock, flags);
+ if (set_lizard_mode)
+ steam_set_lizard_mode(steam, true);
+ }
}
static enum power_supply_property steam_battery_props[] = {
@@ -635,7 +856,8 @@ static void steam_battery_unregister(struct steam_device *steam)
static int steam_register(struct steam_device *steam)
{
int ret;
- bool client_opened;
+ unsigned long client_opened;
+ unsigned long flags;
/*
* This function can be called several times in a row with the
@@ -648,11 +870,9 @@ static int steam_register(struct steam_device *steam)
* Unlikely, but getting the serial could fail, and it is not so
* important, so make up a serial number and go on.
*/
- mutex_lock(&steam->mutex);
if (steam_get_serial(steam) < 0)
strscpy(steam->serial_no, "XXXXXXXXXX",
sizeof(steam->serial_no));
- mutex_unlock(&steam->mutex);
hid_info(steam->hdev, "Steam Controller '%s' connected",
steam->serial_no);
@@ -667,15 +887,13 @@ static int steam_register(struct steam_device *steam)
mutex_unlock(&steam_devices_lock);
}
- mutex_lock(&steam->mutex);
+ spin_lock_irqsave(&steam->lock, flags);
client_opened = steam->client_opened;
- if (!client_opened)
+ spin_unlock_irqrestore(&steam->lock, flags);
+ if (!client_opened) {
steam_set_lizard_mode(steam, lizard_mode);
- mutex_unlock(&steam->mutex);
-
- if (!client_opened)
ret = steam_input_register(steam);
- else
+ } else
ret = 0;
return ret;
@@ -719,6 +937,34 @@ static void steam_work_connect_cb(struct work_struct *work)
}
}
+static void steam_mode_switch_cb(struct work_struct *work)
+{
+ struct steam_device *steam = container_of(to_delayed_work(work),
+ struct steam_device, mode_switch);
+ unsigned long flags;
+ bool client_opened;
+ steam->gamepad_mode = !steam->gamepad_mode;
+ if (!lizard_mode)
+ return;
+
+ if (steam->gamepad_mode)
+ steam_set_lizard_mode(steam, false);
+ else {
+ spin_lock_irqsave(&steam->lock, flags);
+ client_opened = steam->client_opened;
+ spin_unlock_irqrestore(&steam->lock, flags);
+ if (!client_opened)
+ steam_set_lizard_mode(steam, lizard_mode);
+ }
+
+ steam_haptic_pulse(steam, STEAM_PAD_RIGHT, 0x190, 0, 1, 0);
+ if (steam->gamepad_mode) {
+ steam_haptic_pulse(steam, STEAM_PAD_LEFT, 0x14D, 0x14D, 0x2D, 0);
+ } else {
+ steam_haptic_pulse(steam, STEAM_PAD_LEFT, 0x1F4, 0x1F4, 0x1E, 0);
+ }
+}
+
static bool steam_is_valve_interface(struct hid_device *hdev)
{
struct hid_report_enum *rep_enum;
@@ -738,22 +984,6 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
return !list_empty(&rep_enum->report_list);
}
-static void steam_lizard_mode_heartbeat(struct work_struct *work)
-{
- struct steam_device *steam = container_of(work, struct steam_device,
- heartbeat.work);
-
- mutex_lock(&steam->mutex);
- if (!steam->client_opened && steam->client_hdev) {
- steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
- steam_write_registers(steam,
- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
- 0);
- schedule_delayed_work(&steam->heartbeat, 5 * HZ);
- }
- mutex_unlock(&steam->mutex);
-}
-
static int steam_client_ll_parse(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
@@ -774,10 +1004,11 @@ static void steam_client_ll_stop(struct hid_device *hdev)
static int steam_client_ll_open(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
+ unsigned long flags;
- mutex_lock(&steam->mutex);
- steam->client_opened = true;
- mutex_unlock(&steam->mutex);
+ spin_lock_irqsave(&steam->lock, flags);
+ steam->client_opened++;
+ spin_unlock_irqrestore(&steam->lock, flags);
steam_input_unregister(steam);
@@ -792,17 +1023,14 @@ static void steam_client_ll_close(struct hid_device *hdev)
bool connected;
spin_lock_irqsave(&steam->lock, flags);
- connected = steam->connected;
+ steam->client_opened--;
+ connected = steam->connected && !steam->client_opened;
spin_unlock_irqrestore(&steam->lock, flags);
- mutex_lock(&steam->mutex);
- steam->client_opened = false;
- if (connected)
+ if (connected) {
steam_set_lizard_mode(steam, lizard_mode);
- mutex_unlock(&steam->mutex);
-
- if (connected)
steam_input_register(steam);
+ }
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
@@ -881,45 +1109,33 @@ static int steam_probe(struct hid_device *hdev,
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL);
- if (!steam) {
- ret = -ENOMEM;
- goto steam_alloc_fail;
- }
+ if (!steam)
+ return -ENOMEM;
+
steam->hdev = hdev;
hid_set_drvdata(hdev, steam);
spin_lock_init(&steam->lock);
- mutex_init(&steam->mutex);
+ mutex_init(&steam->report_mutex);
steam->quirks = id->driver_data;
INIT_WORK(&steam->work_connect, steam_work_connect_cb);
+ INIT_DELAYED_WORK(&steam->mode_switch, steam_mode_switch_cb);
INIT_LIST_HEAD(&steam->list);
- INIT_DEFERRABLE_WORK(&steam->heartbeat, steam_lizard_mode_heartbeat);
INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
- steam->client_hdev = steam_create_client_hid(hdev);
- if (IS_ERR(steam->client_hdev)) {
- ret = PTR_ERR(steam->client_hdev);
- goto client_hdev_fail;
- }
- steam->client_hdev->driver_data = steam;
-
/*
* With the real steam controller interface, do not connect hidraw.
* Instead, create the client_hid and connect that.
*/
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW);
if (ret)
- goto hid_hw_start_fail;
-
- ret = hid_add_device(steam->client_hdev);
- if (ret)
- goto client_hdev_add_fail;
+ goto err_cancel_work;
ret = hid_hw_open(hdev);
if (ret) {
hid_err(hdev,
"%s:hid_hw_open\n",
__func__);
- goto hid_hw_open_fail;
+ goto err_hw_stop;
}
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
@@ -935,25 +1151,37 @@ static int steam_probe(struct hid_device *hdev,
hid_err(hdev,
"%s:steam_register failed with error %d\n",
__func__, ret);
- goto input_register_fail;
+ goto err_hw_close;
}
}
+ steam->client_hdev = steam_create_client_hid(hdev);
+ if (IS_ERR(steam->client_hdev)) {
+ ret = PTR_ERR(steam->client_hdev);
+ goto err_stream_unregister;
+ }
+ steam->client_hdev->driver_data = steam;
+
+ ret = hid_add_device(steam->client_hdev);
+ if (ret)
+ goto err_destroy;
+
return 0;
-input_register_fail:
-hid_hw_open_fail:
-client_hdev_add_fail:
- hid_hw_stop(hdev);
-hid_hw_start_fail:
+err_destroy:
hid_destroy_device(steam->client_hdev);
-client_hdev_fail:
+err_stream_unregister:
+ if (steam->connected)
+ steam_unregister(steam);
+err_hw_close:
+ hid_hw_close(hdev);
+err_hw_stop:
+ hid_hw_stop(hdev);
+err_cancel_work:
cancel_work_sync(&steam->work_connect);
- cancel_delayed_work_sync(&steam->heartbeat);
+ cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->rumble_work);
-steam_alloc_fail:
- hid_err(hdev, "%s: failed with error %d\n",
- __func__, ret);
+
return ret;
}
@@ -966,13 +1194,11 @@ static void steam_remove(struct hid_device *hdev)
return;
}
+ cancel_delayed_work_sync(&steam->mode_switch);
+ cancel_work_sync(&steam->work_connect);
hid_destroy_device(steam->client_hdev);
- mutex_lock(&steam->mutex);
steam->client_hdev = NULL;
- steam->client_opened = false;
- cancel_delayed_work_sync(&steam->heartbeat);
- mutex_unlock(&steam->mutex);
- cancel_work_sync(&steam->work_connect);
+ steam->client_opened = 0;
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
hid_info(hdev, "Steam wireless receiver disconnected");
}
@@ -1254,6 +1480,17 @@ static void steam_do_deck_input_event(struct steam_device *steam,
b13 = data[13];
b14 = data[14];
+ if (!(b9 & BIT(6)) && steam->did_mode_switch) {
+ steam->did_mode_switch = false;
+ cancel_delayed_work_sync(&steam->mode_switch);
+ } else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) {
+ steam->did_mode_switch = true;
+ schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
+ }
+
+ if (!steam->gamepad_mode)
+ return;
+
lpad_touched = b10 & BIT(3);
rpad_touched = b10 & BIT(4);
@@ -1375,7 +1612,7 @@ static int steam_raw_event(struct hid_device *hdev,
return 0;
switch (data[2]) {
- case STEAM_EV_INPUT_DATA:
+ case ID_CONTROLLER_STATE:
if (steam->client_opened)
return 0;
rcu_read_lock();
@@ -1384,7 +1621,7 @@ static int steam_raw_event(struct hid_device *hdev,
steam_do_input_event(steam, input, data);
rcu_read_unlock();
break;
- case STEAM_EV_DECK_INPUT_DATA:
+ case ID_CONTROLLER_DECK_STATE:
if (steam->client_opened)
return 0;
rcu_read_lock();
@@ -1393,7 +1630,7 @@ static int steam_raw_event(struct hid_device *hdev,
steam_do_deck_input_event(steam, input, data);
rcu_read_unlock();
break;
- case STEAM_EV_CONNECT:
+ case ID_CONTROLLER_WIRELESS:
/*
* The payload of this event is a single byte:
* 0x01: disconnected.
@@ -1408,7 +1645,7 @@ static int steam_raw_event(struct hid_device *hdev,
break;
}
break;
- case STEAM_EV_BATTERY:
+ case ID_CONTROLLER_STATUS:
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
rcu_read_lock();
battery = rcu_dereference(steam->battery);
@@ -1439,10 +1676,8 @@ static int steam_param_set_lizard_mode(const char *val,
mutex_lock(&steam_devices_lock);
list_for_each_entry(steam, &steam_devices, list) {
- mutex_lock(&steam->mutex);
if (!steam->client_opened)
steam_set_lizard_mode(steam, lizard_mode);
- mutex_unlock(&steam->mutex);
}
mutex_unlock(&steam_devices_lock);
return 0;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 2735cd585..d96538219 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -44,12 +44,12 @@
#include "i2c-hid.h"
/* quirks to control the device */
-#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
-#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
-#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
-#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
-#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
-#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
+#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(0)
+#define I2C_HID_QUIRK_BOGUS_IRQ BIT(1)
+#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(2)
+#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3)
+#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
+#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
/* Command opcodes */
#define I2C_HID_OPCODE_RESET 0x01
@@ -64,7 +64,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
-#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@@ -120,8 +119,6 @@ static const struct i2c_hid_quirks {
__u16 idProduct;
__u32 quirks;
} i2c_hid_quirks[] = {
- { USB_VENDOR_ID_WEIDA, HID_ANY_ID,
- I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ I2C_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_VOYO_WINPAD_A15,
@@ -134,6 +131,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_RESET_ON_RESUME },
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
I2C_HID_QUIRK_BAD_INPUT_SIZE },
+ { I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
+ I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
/*
* Sending the wakeup after reset actually break ELAN touchscreen controller
*/
@@ -190,15 +189,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
msgs[n].len = recv_len;
msgs[n].buf = recv_buf;
n++;
-
- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
ret = i2c_transfer(client->adapter, msgs, n);
- if (recv_len)
- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
-
if (ret != n)
return ret < 0 ? ret : -EIO;
@@ -395,8 +389,7 @@ static int i2c_hid_set_power(struct i2c_hid *ihid, int power_state)
* The call will get a return value (EREMOTEIO) but device will be
* triggered and activated. After that, it goes like a normal device.
*/
- if (power_state == I2C_HID_PWR_ON &&
- ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
+ if (power_state == I2C_HID_PWR_ON) {
ret = i2c_hid_set_power_command(ihid, I2C_HID_PWR_ON);
/* Device was already activated */
@@ -426,12 +419,23 @@ set_pwr_exit:
return ret;
}
-static int i2c_hid_execute_reset(struct i2c_hid *ihid)
+static int i2c_hid_start_hwreset(struct i2c_hid *ihid)
{
size_t length = 0;
int ret;
- i2c_hid_dbg(ihid, "resetting...\n");
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ /*
+ * This prevents sending feature reports while the device is
+ * being reset. Otherwise we may lose the reset complete
+ * interrupt.
+ */
+ lockdep_assert_held(&ihid->reset_lock);
+
+ ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
+ if (ret)
+ return ret;
/* Prepare reset command. Command register goes first. */
*(__le16 *)ihid->cmdbuf = ihid->hdesc.wCommandRegister;
@@ -444,60 +448,40 @@ static int i2c_hid_execute_reset(struct i2c_hid *ihid)
ret = i2c_hid_xfer(ihid, ihid->cmdbuf, length, NULL, 0);
if (ret) {
- dev_err(&ihid->client->dev, "failed to reset device.\n");
- goto out;
- }
-
- if (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET) {
- msleep(100);
- goto out;
+ dev_err(&ihid->client->dev,
+ "failed to reset device: %d\n", ret);
+ goto err_clear_reset;
}
- i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
- if (!wait_event_timeout(ihid->wait,
- !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
- msecs_to_jiffies(5000))) {
- ret = -ENODATA;
- goto out;
- }
- i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
+ return 0;
-out:
+err_clear_reset:
clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
+ i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
return ret;
}
-static int i2c_hid_hwreset(struct i2c_hid *ihid)
+static int i2c_hid_finish_hwreset(struct i2c_hid *ihid)
{
- int ret;
-
- i2c_hid_dbg(ihid, "%s\n", __func__);
-
- /*
- * This prevents sending feature reports while the device is
- * being reset. Otherwise we may lose the reset complete
- * interrupt.
- */
- mutex_lock(&ihid->reset_lock);
+ int ret = 0;
- ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
- if (ret)
- goto out_unlock;
+ i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
- ret = i2c_hid_execute_reset(ihid);
- if (ret) {
- dev_err(&ihid->client->dev,
- "failed to reset device: %d\n", ret);
- i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
- goto out_unlock;
+ if (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET) {
+ msleep(100);
+ clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
+ } else if (!wait_event_timeout(ihid->wait,
+ !test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
+ msecs_to_jiffies(1000))) {
+ dev_warn(&ihid->client->dev, "device did not ack reset within 1000 ms\n");
+ clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
}
+ i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
/* At least some SIS devices need this after reset */
if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
-out_unlock:
- mutex_unlock(&ihid->reset_lock);
return ret;
}
@@ -566,9 +550,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
- return IRQ_HANDLED;
-
i2c_hid_get_input(ihid);
return IRQ_HANDLED;
@@ -729,11 +710,10 @@ static int i2c_hid_parse(struct hid_device *hid)
struct i2c_client *client = hid->driver_data;
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct i2c_hid_desc *hdesc = &ihid->hdesc;
+ char *rdesc = NULL, *use_override = NULL;
unsigned int rsize;
- char *rdesc;
int ret;
int tries = 3;
- char *use_override;
i2c_hid_dbg(ihid, "entering %s\n", __func__);
@@ -743,11 +723,15 @@ static int i2c_hid_parse(struct hid_device *hid)
return -EINVAL;
}
+ mutex_lock(&ihid->reset_lock);
do {
- ret = i2c_hid_hwreset(ihid);
- if (ret)
+ ret = i2c_hid_start_hwreset(ihid);
+ if (ret == 0)
+ ret = i2c_hid_finish_hwreset(ihid);
+ else
msleep(1000);
} while (tries-- > 0 && ret);
+ mutex_unlock(&ihid->reset_lock);
if (ret)
return ret;
@@ -760,11 +744,8 @@ static int i2c_hid_parse(struct hid_device *hid)
i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
} else {
rdesc = kzalloc(rsize, GFP_KERNEL);
-
- if (!rdesc) {
- dbg_hid("couldn't allocate rdesc memory\n");
+ if (!rdesc)
return -ENOMEM;
- }
i2c_hid_dbg(ihid, "asking HID report descriptor\n");
@@ -773,23 +754,21 @@ static int i2c_hid_parse(struct hid_device *hid)
rdesc, rsize);
if (ret) {
hid_err(hid, "reading report descriptor failed\n");
- kfree(rdesc);
- return -EIO;
+ goto out;
}
}
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
+ if (ret)
+ dbg_hid("parsing report descriptor failed\n");
+
+out:
if (!use_override)
kfree(rdesc);
- if (ret) {
- dbg_hid("parsing report descriptor failed\n");
- return ret;
- }
-
- return 0;
+ return ret;
}
static int i2c_hid_start(struct hid_device *hid)
@@ -958,7 +937,8 @@ static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
return ret;
/* Save some power */
- i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND))
+ i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
disable_irq(client->irq);
@@ -987,10 +967,15 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid)
* However some ALPS touchpads generate IRQ storm without reset, so
* let's still reset them here.
*/
- if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME)
- ret = i2c_hid_hwreset(ihid);
- else
+ if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME) {
+ mutex_lock(&ihid->reset_lock);
+ ret = i2c_hid_start_hwreset(ihid);
+ if (ret == 0)
+ ret = i2c_hid_finish_hwreset(ihid);
+ mutex_unlock(&ihid->reset_lock);
+ } else {
ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
+ }
if (ret)
return ret;
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 31abab57a..5b91fb106 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -130,9 +130,17 @@ static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = {
.main_supply_name = NULL,
};
+static const struct elan_i2c_hid_chip_data ilitek_ili2901_chip_data = {
+ .post_power_delay_ms = 10,
+ .post_gpio_reset_on_delay_ms = 100,
+ .hid_descriptor_address = 0x0001,
+ .main_supply_name = "vcc33",
+};
+
static const struct of_device_id elan_i2c_hid_of_match[] = {
{ .compatible = "elan,ekth6915", .data = &elan_ekth6915_chip_data },
{ .compatible = "ilitek,ili9882t", .data = &ilitek_ili9882t_chip_data },
+ { .compatible = "ilitek,ili2901", .data = &ilitek_ili2901_chip_data },
{ }
};
MODULE_DEVICE_TABLE(of, elan_i2c_hid_of_match);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index a49c6affd..dd5fc6087 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
+ dev->devc = &pdev->dev;
ishtp_device_init(dev);
init_waitqueue_head(&dev->wait_hw_ready);
@@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
}
dev->ops = &ish_hw_ops;
- dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
return dev;
}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 710fda5f1..65e7eeb2f 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -119,50 +119,6 @@ static inline bool ish_should_leave_d0i3(struct pci_dev *pdev)
return !pm_resume_via_firmware() || pdev->device == CHV_DEVICE_ID;
}
-static int enable_gpe(struct device *dev)
-{
-#ifdef CONFIG_ACPI
- acpi_status acpi_sts;
- struct acpi_device *adev;
- struct acpi_device_wakeup *wakeup;
-
- adev = ACPI_COMPANION(dev);
- if (!adev) {
- dev_err(dev, "get acpi handle failed\n");
- return -ENODEV;
- }
- wakeup = &adev->wakeup;
-
- /*
- * Call acpi_disable_gpe(), so that reference count
- * gpe_event_info->runtime_count doesn't overflow.
- * When gpe_event_info->runtime_count = 0, the call
- * to acpi_disable_gpe() simply return.
- */
- acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
-
- acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- if (ACPI_FAILURE(acpi_sts)) {
- dev_err(dev, "enable ose_gpe failed\n");
- return -EIO;
- }
-
- return 0;
-#else
- return -ENODEV;
-#endif
-}
-
-static void enable_pme_wake(struct pci_dev *pdev)
-{
- if ((pci_pme_capable(pdev, PCI_D0) ||
- pci_pme_capable(pdev, PCI_D3hot) ||
- pci_pme_capable(pdev, PCI_D3cold)) && !enable_gpe(&pdev->dev)) {
- pci_pme_active(pdev, true);
- dev_dbg(&pdev->dev, "ish ipc driver pme wake enabled\n");
- }
-}
-
/**
* ish_probe() - PCI driver probe callback
* @pdev: pci device
@@ -233,7 +189,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Enable PME for EHL */
if (pdev->device == EHL_Ax_DEVICE_ID)
- enable_pme_wake(pdev);
+ device_init_wakeup(dev, true);
ret = ish_init(ishtp);
if (ret)
@@ -256,6 +212,19 @@ static void ish_remove(struct pci_dev *pdev)
ish_device_disable(ishtp_dev);
}
+
+/**
+ * ish_shutdown() - PCI driver shutdown callback
+ * @pdev: pci device
+ *
+ * This function sets up wakeup for S5
+ */
+static void ish_shutdown(struct pci_dev *pdev)
+{
+ if (pdev->device == EHL_Ax_DEVICE_ID)
+ pci_prepare_to_sleep(pdev);
+}
+
static struct device __maybe_unused *ish_resume_device;
/* 50ms to get resume response */
@@ -378,13 +347,6 @@ static int __maybe_unused ish_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
- /* add this to finish power flow for EHL */
- if (dev->pdev->device == EHL_Ax_DEVICE_ID) {
- pci_set_power_state(pdev, PCI_D0);
- enable_pme_wake(pdev);
- dev_dbg(dev->devc, "set power state to D0 for ehl\n");
- }
-
ish_resume_device = device;
dev->resume_flag = 1;
@@ -400,6 +362,7 @@ static struct pci_driver ish_driver = {
.id_table = ish_pci_tbl,
.probe = ish_probe,
.remove = ish_remove,
+ .shutdown = ish_shutdown,
.driver.pm = &ish_pm_ops,
};
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 16aa030af..e157863a8 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -840,43 +840,22 @@ static void load_fw_from_host_handler(struct work_struct *work)
*
* Return: 0 for success, negative error code for failure
*/
-static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
+static int loader_init(struct ishtp_cl *loader_ishtp_cl, bool reset)
{
int rv;
- struct ishtp_fw_client *fw_client;
struct ishtp_cl_data *client_data =
ishtp_get_client_data(loader_ishtp_cl);
dev_dbg(cl_data_to_dev(client_data), "reset flag: %d\n", reset);
- rv = ishtp_cl_link(loader_ishtp_cl);
- if (rv < 0) {
- dev_err(cl_data_to_dev(client_data), "ishtp_cl_link failed\n");
- return rv;
- }
-
- /* Connect to firmware client */
- ishtp_set_tx_ring_size(loader_ishtp_cl, LOADER_CL_TX_RING_SIZE);
- ishtp_set_rx_ring_size(loader_ishtp_cl, LOADER_CL_RX_RING_SIZE);
-
- fw_client =
- ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl),
- &loader_ishtp_id_table[0].guid);
- if (!fw_client) {
- dev_err(cl_data_to_dev(client_data),
- "ISH client uuid not found\n");
- rv = -ENOENT;
- goto err_cl_unlink;
- }
-
- ishtp_cl_set_fw_client_id(loader_ishtp_cl,
- ishtp_get_fw_client_id(fw_client));
- ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_CONNECTING);
-
- rv = ishtp_cl_connect(loader_ishtp_cl);
+ rv = ishtp_cl_establish_connection(loader_ishtp_cl,
+ &loader_ishtp_id_table[0].guid,
+ LOADER_CL_TX_RING_SIZE,
+ LOADER_CL_RX_RING_SIZE,
+ reset);
if (rv < 0) {
dev_err(cl_data_to_dev(client_data), "Client connect fail\n");
- goto err_cl_unlink;
+ goto err_cl_disconnect;
}
dev_dbg(cl_data_to_dev(client_data), "Client connected\n");
@@ -885,17 +864,14 @@ static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
return 0;
-err_cl_unlink:
- ishtp_cl_unlink(loader_ishtp_cl);
+err_cl_disconnect:
+ ishtp_cl_destroy_connection(loader_ishtp_cl, reset);
return rv;
}
static void loader_deinit(struct ishtp_cl *loader_ishtp_cl)
{
- ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_DISCONNECTING);
- ishtp_cl_disconnect(loader_ishtp_cl);
- ishtp_cl_unlink(loader_ishtp_cl);
- ishtp_cl_flush_queues(loader_ishtp_cl);
+ ishtp_cl_destroy_connection(loader_ishtp_cl, false);
/* Disband and free all Tx and Rx client-level rings */
ishtp_cl_free(loader_ishtp_cl);
@@ -914,19 +890,7 @@ static void reset_handler(struct work_struct *work)
loader_ishtp_cl = client_data->loader_ishtp_cl;
cl_device = client_data->cl_device;
- /* Unlink, flush queues & start again */
- ishtp_cl_unlink(loader_ishtp_cl);
- ishtp_cl_flush_queues(loader_ishtp_cl);
- ishtp_cl_free(loader_ishtp_cl);
-
- loader_ishtp_cl = ishtp_cl_allocate(cl_device);
- if (!loader_ishtp_cl)
- return;
-
- ishtp_set_drvdata(cl_device, loader_ishtp_cl);
- ishtp_set_client_data(loader_ishtp_cl, client_data);
- client_data->loader_ishtp_cl = loader_ishtp_cl;
- client_data->cl_device = cl_device;
+ ishtp_cl_destroy_connection(loader_ishtp_cl, true);
rv = loader_init(loader_ishtp_cl, 1);
if (rv < 0) {
@@ -974,7 +938,7 @@ static int loader_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
INIT_WORK(&client_data->work_fw_load,
load_fw_from_host_handler);
- rv = loader_init(loader_ishtp_cl, 0);
+ rv = loader_init(loader_ishtp_cl, false);
if (rv < 0) {
ishtp_cl_free(loader_ishtp_cl);
return rv;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index e3d70c546..fbd4f8ea1 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -639,47 +639,26 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
*
* Return: 0 on success, non zero on error
*/
-static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
+static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, bool reset)
{
- struct ishtp_device *dev;
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
- struct ishtp_fw_client *fw_client;
int i;
int rv;
dev_dbg(cl_data_to_dev(client_data), "%s\n", __func__);
hid_ishtp_trace(client_data, "%s reset flag: %d\n", __func__, reset);
- rv = ishtp_cl_link(hid_ishtp_cl);
- if (rv) {
- dev_err(cl_data_to_dev(client_data),
- "ishtp_cl_link failed\n");
- return -ENOMEM;
- }
-
client_data->init_done = 0;
- dev = ishtp_get_ishtp_device(hid_ishtp_cl);
-
- /* Connect to FW client */
- ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE);
- ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE);
-
- fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_id_table[0].guid);
- if (!fw_client) {
- dev_err(cl_data_to_dev(client_data),
- "ish client uuid not found\n");
- return -ENOENT;
- }
- ishtp_cl_set_fw_client_id(hid_ishtp_cl,
- ishtp_get_fw_client_id(fw_client));
- ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_CONNECTING);
-
- rv = ishtp_cl_connect(hid_ishtp_cl);
+ rv = ishtp_cl_establish_connection(hid_ishtp_cl,
+ &hid_ishtp_id_table[0].guid,
+ HID_CL_TX_RING_SIZE,
+ HID_CL_RX_RING_SIZE,
+ reset);
if (rv) {
dev_err(cl_data_to_dev(client_data),
"client connect fail\n");
- goto err_cl_unlink;
+ goto err_cl_disconnect;
}
hid_ishtp_trace(client_data, "%s client connected\n", __func__);
@@ -723,10 +702,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
return 0;
err_cl_disconnect:
- ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
- ishtp_cl_disconnect(hid_ishtp_cl);
-err_cl_unlink:
- ishtp_cl_unlink(hid_ishtp_cl);
+ ishtp_cl_destroy_connection(hid_ishtp_cl, reset);
return rv;
}
@@ -738,8 +714,7 @@ err_cl_unlink:
*/
static void hid_ishtp_cl_deinit(struct ishtp_cl *hid_ishtp_cl)
{
- ishtp_cl_unlink(hid_ishtp_cl);
- ishtp_cl_flush_queues(hid_ishtp_cl);
+ ishtp_cl_destroy_connection(hid_ishtp_cl, false);
/* disband and free all Tx and Rx client-level rings */
ishtp_cl_free(hid_ishtp_cl);
@@ -749,33 +724,23 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
{
struct ishtp_cl_data *client_data;
struct ishtp_cl *hid_ishtp_cl;
- struct ishtp_cl_device *cl_device;
int retry;
int rv;
client_data = container_of(work, struct ishtp_cl_data, work);
hid_ishtp_cl = client_data->hid_ishtp_cl;
- cl_device = client_data->cl_device;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
dev_dbg(ishtp_device(client_data->cl_device), "%s\n", __func__);
- hid_ishtp_cl_deinit(hid_ishtp_cl);
-
- hid_ishtp_cl = ishtp_cl_allocate(cl_device);
- if (!hid_ishtp_cl)
- return;
-
- ishtp_set_drvdata(cl_device, hid_ishtp_cl);
- ishtp_set_client_data(hid_ishtp_cl, client_data);
- client_data->hid_ishtp_cl = hid_ishtp_cl;
+ ishtp_cl_destroy_connection(hid_ishtp_cl, true);
client_data->num_hid_devices = 0;
for (retry = 0; retry < 3; ++retry) {
- rv = hid_ishtp_cl_init(hid_ishtp_cl, 1);
+ rv = hid_ishtp_cl_init(hid_ishtp_cl, true);
if (!rv)
break;
dev_err(cl_data_to_dev(client_data), "Retry reset init\n");
@@ -841,7 +806,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
ishtp_hid_print_trace = ishtp_trace_callback(cl_device);
- rv = hid_ishtp_cl_init(hid_ishtp_cl, 0);
+ rv = hid_ishtp_cl_init(hid_ishtp_cl, false);
if (rv) {
ishtp_cl_free(hid_ishtp_cl);
return rv;
@@ -868,11 +833,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
hid_ishtp_cl);
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
- ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
- ishtp_cl_disconnect(hid_ishtp_cl);
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
- hid_ishtp_cl_deinit(hid_ishtp_cl);
hid_ishtp_cl = NULL;
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 7fc738a22..03d5601ce 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -378,7 +378,7 @@ static const struct dev_pm_ops ishtp_cl_bus_dev_pm_ops = {
.restore = ishtp_cl_device_resume,
};
-static struct bus_type ishtp_cl_bus_type = {
+static const struct bus_type ishtp_cl_bus_type = {
.name = "ishtp",
.dev_groups = ishtp_cl_dev_groups,
.probe = ishtp_cl_device_probe,
@@ -722,6 +722,8 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
cl->state = ISHTP_CL_DISCONNECTED;
+ if (warm_reset && cl->device->reference_count)
+ continue;
/*
* Wake any pending process. The waiter would check dev->state
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 2d92fc129..8a7f2f6a4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -49,7 +49,9 @@ static void ishtp_read_list_flush(struct ishtp_cl *cl)
list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
list_del(&rb->list);
- ishtp_io_rb_free(rb);
+ spin_lock(&cl->free_list_spinlock);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock(&cl->free_list_spinlock);
}
spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
}
@@ -339,16 +341,17 @@ static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
}
/**
- * ishtp_cl_connect() - Send connect request to firmware
+ * ishtp_cl_connect_to_fw() - Send connect request to firmware
* @cl: client device instance
*
- * Send a connect request for a client to firmware. If successful it will
- * RX and TX ring buffers
+ * Send a connect request to the firmware and wait for firmware response.
+ * If there is successful connection response from the firmware, change
+ * client state to ISHTP_CL_CONNECTED, and bind client to related
+ * firmware client_id.
*
- * Return: 0 if successful connect response from the firmware and able
- * to bind and allocate ring buffers or error code on failure
+ * Return: 0 for success and error code on failure
*/
-int ishtp_cl_connect(struct ishtp_cl *cl)
+static int ishtp_cl_connect_to_fw(struct ishtp_cl *cl)
{
struct ishtp_device *dev;
int rets;
@@ -358,8 +361,6 @@ int ishtp_cl_connect(struct ishtp_cl *cl)
dev = cl->dev;
- dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
-
if (ishtp_cl_is_other_connecting(cl)) {
dev->print_log(dev, "%s() Busy\n", __func__);
return -EBUSY;
@@ -405,6 +406,38 @@ int ishtp_cl_connect(struct ishtp_cl *cl)
return rets;
}
+ return rets;
+}
+
+/**
+ * ishtp_cl_connect() - Build connection with firmware
+ * @cl: client device instance
+ *
+ * Call ishtp_cl_connect_to_fw() to connect and bind to firmware. If successful,
+ * allocate RX and TX ring buffers, and start flow control with firmware to
+ * start communication.
+ *
+ * Return: 0 if there is successful connection to the firmware, allocate
+ * ring buffers.
+ */
+int ishtp_cl_connect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ int rets;
+
+ if (!cl || !cl->dev)
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
+
+ rets = ishtp_cl_connect_to_fw(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Connect to fw failed\n", __func__);
+ return rets;
+ }
+
rets = ishtp_cl_alloc_rx_ring(cl);
if (rets) {
dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
@@ -422,16 +455,148 @@ int ishtp_cl_connect(struct ishtp_cl *cl)
return rets;
}
- /* Upon successful connection and allocation, emit flow-control */
+ /*
+ * Upon successful connection and allocation, start flow-control.
+ */
rets = ishtp_cl_read_start(cl);
- dev->print_log(dev, "%s() successful\n", __func__);
-
return rets;
}
EXPORT_SYMBOL(ishtp_cl_connect);
/**
+ * ishtp_cl_establish_connection() - Establish connection with the firmware
+ * @cl: client device instance
+ * @uuid: uuid of the client to search
+ * @tx_size: TX ring buffer size
+ * @rx_size: RX ring buffer size
+ * @reset: true if called for reset connection, otherwise for first connection
+ *
+ * This is a helper function for client driver to build connection with firmware.
+ * If it's first time connecting to the firmware, set reset to false, this
+ * function will link client to bus, find client id and send connect request to
+ * the firmware.
+ *
+ * If it's called for reset handler where client lost connection after
+ * firmware reset, set reset to true, this function will reinit client state and
+ * establish connection again. In this case, this function reuses current client
+ * structure and ring buffers to avoid allocation failure and memory fragments.
+ *
+ * Return: 0 for successful connection with the firmware,
+ * or error code on failure
+ */
+int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid,
+ int tx_size, int rx_size, bool reset)
+{
+ struct ishtp_device *dev;
+ struct ishtp_fw_client *fw_client;
+ int rets;
+
+ if (!cl || !cl->dev)
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ ishtp_set_connection_state(cl, ISHTP_CL_INITIALIZING);
+
+ /* reinit ishtp_cl structure if call for reset */
+ if (reset) {
+ cl->host_client_id = 0;
+ cl->fw_client_id = 0;
+ cl->ishtp_flow_ctrl_creds = 0;
+ cl->out_flow_ctrl_creds = 0;
+
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->last_dma_acked = 1;
+ cl->last_dma_addr = NULL;
+ cl->last_ipc_acked = 1;
+
+ cl->sending = 0;
+ cl->err_send_msg = 0;
+ cl->err_send_fc = 0;
+
+ cl->send_msg_cnt_ipc = 0;
+ cl->send_msg_cnt_dma = 0;
+ cl->recv_msg_cnt_ipc = 0;
+ cl->recv_msg_cnt_dma = 0;
+ cl->recv_msg_num_frags = 0;
+ cl->ishtp_flow_ctrl_cnt = 0;
+ cl->out_flow_ctrl_cnt = 0;
+ }
+
+ /* link to bus */
+ rets = ishtp_cl_link(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() ishtp_cl_link failed\n", __func__);
+ return rets;
+ }
+
+ /* find firmware client */
+ fw_client = ishtp_fw_cl_get_client(dev, uuid);
+ if (!fw_client) {
+ dev->print_log(dev,
+ "%s() ish client uuid not found\n", __func__);
+ return -ENOENT;
+ }
+
+ ishtp_set_tx_ring_size(cl, tx_size);
+ ishtp_set_rx_ring_size(cl, rx_size);
+
+ ishtp_cl_set_fw_client_id(cl, ishtp_get_fw_client_id(fw_client));
+
+ ishtp_set_connection_state(cl, ISHTP_CL_CONNECTING);
+
+ /*
+ * For reset case, not allocate tx/rx ring buffer which are already
+ * done in ishtp_cl_connect() during first connection.
+ */
+ if (reset) {
+ rets = ishtp_cl_connect_to_fw(cl);
+ if (!rets)
+ rets = ishtp_cl_read_start(cl);
+ else
+ dev->print_log(dev,
+ "%s() connect to fw failed\n", __func__);
+ } else {
+ rets = ishtp_cl_connect(cl);
+ }
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_establish_connection);
+
+/**
+ * ishtp_cl_destroy_connection() - Disconnect with the firmware
+ * @cl: client device instance
+ * @reset: true if called for firmware reset, false for normal disconnection
+ *
+ * This is a helper function for client driver to disconnect with firmware,
+ * unlink to bus and flush message queue.
+ */
+void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset)
+{
+ if (!cl)
+ return;
+
+ if (reset) {
+ /*
+ * For reset case, connection is already lost during fw reset.
+ * Just set state to DISCONNECTED is enough.
+ */
+ ishtp_set_connection_state(cl, ISHTP_CL_DISCONNECTED);
+ } else {
+ if (cl->state != ISHTP_CL_DISCONNECTED) {
+ ishtp_set_connection_state(cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(cl);
+ }
+ }
+
+ ishtp_cl_unlink(cl);
+ ishtp_cl_flush_queues(cl);
+}
+EXPORT_SYMBOL(ishtp_cl_destroy_connection);
+
+/**
* ishtp_cl_read_start() - Prepare to read client message
* @cl: client device instance
*
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 4588d2cd4..a54c7995b 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -490,7 +490,7 @@ static int uhid_dev_create2(struct uhid_device *uhid,
const struct uhid_event *ev)
{
struct hid_device *hid;
- size_t rd_size, len;
+ size_t rd_size;
void *rd_data;
int ret;
@@ -514,13 +514,12 @@ static int uhid_dev_create2(struct uhid_device *uhid,
goto err_free;
}
- /* @hid is zero-initialized, strncpy() is correct, strlcpy() not */
- len = min(sizeof(hid->name), sizeof(ev->u.create2.name)) - 1;
- strncpy(hid->name, ev->u.create2.name, len);
- len = min(sizeof(hid->phys), sizeof(ev->u.create2.phys)) - 1;
- strncpy(hid->phys, ev->u.create2.phys, len);
- len = min(sizeof(hid->uniq), sizeof(ev->u.create2.uniq)) - 1;
- strncpy(hid->uniq, ev->u.create2.uniq, len);
+ BUILD_BUG_ON(sizeof(hid->name) != sizeof(ev->u.create2.name));
+ strscpy(hid->name, ev->u.create2.name, sizeof(hid->name));
+ BUILD_BUG_ON(sizeof(hid->phys) != sizeof(ev->u.create2.phys));
+ strscpy(hid->phys, ev->u.create2.phys, sizeof(hid->phys));
+ BUILD_BUG_ON(sizeof(hid->uniq) != sizeof(ev->u.create2.uniq));
+ strscpy(hid->uniq, ev->u.create2.uniq, sizeof(hid->uniq));
hid->ll_driver = &uhid_hid_driver;
hid->bus = ev->u.create2.bus;
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 166a76c9b..77c5fb26c 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -164,6 +164,7 @@ struct wacom {
struct work_struct battery_work;
struct work_struct remote_work;
struct delayed_work init_work;
+ struct delayed_work aes_battery_work;
struct wacom_remote *remote;
struct work_struct mode_change_work;
struct timer_list idleprox_timer;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 7659c98d9..2bc45b240 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1813,6 +1813,13 @@ static void wacom_destroy_battery(struct wacom *wacom)
}
}
+static void wacom_aes_battery_handler(struct work_struct *work)
+{
+ struct wacom *wacom = container_of(work, struct wacom, aes_battery_work.work);
+
+ wacom_destroy_battery(wacom);
+}
+
static ssize_t wacom_show_speed(struct device *dev,
struct device_attribute
*attr, char *buf)
@@ -2817,6 +2824,7 @@ static int wacom_probe(struct hid_device *hdev,
mutex_init(&wacom->lock);
INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
+ INIT_DELAYED_WORK(&wacom->aes_battery_work, wacom_aes_battery_handler);
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
INIT_WORK(&wacom->battery_work, wacom_battery_work);
INIT_WORK(&wacom->remote_work, wacom_remote_work);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 002cbaa16..fbe10fbc5 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2528,11 +2528,12 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
struct input_dev *input = wacom_wac->pen_input;
bool range = wacom_wac->hid_data.inrange_state;
bool sense = wacom_wac->hid_data.sense_state;
+ bool entering_range = !wacom_wac->tool[0] && range;
if (wacom_wac->is_invalid_bt_frame)
return;
- if (!wacom_wac->tool[0] && range) { /* first in range */
+ if (entering_range) { /* first in range */
/* Going into range select tool */
if (wacom_wac->hid_data.invert_state)
wacom_wac->tool[0] = BTN_TOOL_RUBBER;
@@ -2590,6 +2591,15 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
input_sync(input);
}
+ /* Handle AES battery timeout behavior */
+ if (wacom_wac->features.quirks & WACOM_QUIRK_AESPEN) {
+ if (entering_range)
+ cancel_delayed_work(&wacom->aes_battery_work);
+ if (!sense)
+ schedule_delayed_work(&wacom->aes_battery_work,
+ msecs_to_jiffies(WACOM_AES_BATTERY_TIMEOUT));
+ }
+
if (!sense) {
wacom_wac->tool[0] = 0;
wacom_wac->id[0] = 0;
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 57e185f18..e63b1e806 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -14,6 +14,7 @@
#define WACOM_MAX_REMOTES 5
#define WACOM_STATUS_UNKNOWN 255
#define WACOM_REMOTE_BATTERY_TIMEOUT 21000000000ll
+#define WACOM_AES_BATTERY_TIMEOUT 1800000
/* packet length for individual models */
#define WACOM_PKGLEN_BBFUN 9
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 4c1a00f99..6802efb4d 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -355,7 +355,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
if (!omap_ssi)
return -ENOMEM;
- err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
+ err = ida_alloc(&platform_omap_ssi_ida, GFP_KERNEL);
if (err < 0)
return err;
ssi->id = err;
@@ -417,7 +417,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
return 0;
out_err:
- ida_simple_remove(&platform_omap_ssi_ida, ssi->id);
+ ida_free(&platform_omap_ssi_ida, ssi->id);
return err;
}
@@ -451,7 +451,7 @@ static void ssi_remove_controller(struct hsi_controller *ssi)
tasklet_kill(&omap_ssi->gdd_tasklet);
hsi_unregister_controller(ssi);
clk_notifier_unregister(omap_ssi->fck, &omap_ssi->fck_nb);
- ida_simple_remove(&platform_omap_ssi_ida, id);
+ ida_free(&platform_omap_ssi_ida, id);
}
static inline int ssi_of_get_available_ports_count(const struct device_node *np)
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 56f7e06c6..adbf67435 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -322,125 +322,89 @@ static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
- /* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
sizeof(struct vmbus_channel_gpadl_header) -
sizeof(struct gpa_range);
+ pfncount = umin(pagecount, pfnsize / sizeof(u64));
+
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pfncount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgheader)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
+ for (i = 0; i < pfncount; i++)
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
+ *msginfo = msgheader;
+
+ pfnsum = pfncount;
+ pfnleft = pagecount - pfncount;
+
+ /* how many pfns can we fit in a body message */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_body);
pfncount = pfnsize / sizeof(u64);
- if (pagecount > pfncount) {
- /* we need a gpadl body */
- /* fill in the header */
+ /*
+ * If pfnleft is zero, everything fits in the header and no body
+ * messages are needed
+ */
+ while (pfnleft) {
+ pfncurr = umin(pfncount, pfnleft);
msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pfncount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (!msgheader)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
- *msginfo = msgheader;
-
- pfnsum = pfncount;
- pfnleft = pagecount - pfncount;
-
- /* how many pfns can we fit */
- pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
- sizeof(struct vmbus_channel_gpadl_body);
- pfncount = pfnsize / sizeof(u64);
-
- /* fill in the body */
- while (pfnleft) {
- if (pfnleft > pfncount)
- pfncurr = pfncount;
- else
- pfncurr = pfnleft;
-
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_body) +
- pfncurr * sizeof(u64);
- msgbody = kzalloc(msgsize, GFP_KERNEL);
-
- if (!msgbody) {
- struct vmbus_channel_msginfo *pos = NULL;
- struct vmbus_channel_msginfo *tmp = NULL;
- /*
- * Free up all the allocated messages.
- */
- list_for_each_entry_safe(pos, tmp,
- &msgheader->submsglist,
- msglistentry) {
-
- list_del(&pos->msglistentry);
- kfree(pos);
- }
-
- goto nomem;
- }
-
- msgbody->msgsize = msgsize;
- gpadl_body =
- (struct vmbus_channel_gpadl_body *)msgbody->msg;
+ sizeof(struct vmbus_channel_gpadl_body) +
+ pfncurr * sizeof(u64);
+ msgbody = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgbody) {
+ struct vmbus_channel_msginfo *pos = NULL;
+ struct vmbus_channel_msginfo *tmp = NULL;
/*
- * Gpadl is u32 and we are using a pointer which could
- * be 64-bit
- * This is governed by the guest/host protocol and
- * so the hypervisor guarantees that this is ok.
+ * Free up all the allocated messages.
*/
- for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
- kbuffer, size, send_offset, pfnsum + i);
-
- /* add to msg header */
- list_add_tail(&msgbody->msglistentry,
- &msgheader->submsglist);
- pfnsum += pfncurr;
- pfnleft -= pfncurr;
+ list_for_each_entry_safe(pos, tmp,
+ &msgheader->submsglist,
+ msglistentry) {
+
+ list_del(&pos->msglistentry);
+ kfree(pos);
+ }
+ kfree(msgheader);
+ return -ENOMEM;
}
- } else {
- /* everything fits in a header */
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pagecount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (msgheader == NULL)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
-
- *msginfo = msgheader;
+
+ msgbody->msgsize = msgsize;
+ gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
+
+ /*
+ * Gpadl is u32 and we are using a pointer which could
+ * be 64-bit
+ * This is governed by the guest/host protocol and
+ * so the hypervisor guarantees that this is ok.
+ */
+ for (i = 0; i < pfncurr; i++)
+ gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+ kbuffer, size, send_offset, pfnsum + i);
+
+ /* add to msg header */
+ list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
+ pfnsum += pfncurr;
+ pfnleft -= pfncurr;
}
return 0;
-nomem:
- kfree(msgheader);
- kfree(msgbody);
- return -ENOMEM;
}
/*
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 4372f5d14..0285a7436 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -488,7 +488,7 @@ void hv_setup_dma_ops(struct device *dev, bool coherent)
* Hyper-V does not offer a vIOMMU in the guest
* VM, so pass 0/NULL for the IOMMU settings
*/
- arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+ arch_setup_dma_ops(dev, 0, 0, coherent);
}
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 42aec2c56..9c97c4065 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -296,6 +296,11 @@ static struct {
spinlock_t lock;
} host_ts;
+static bool timesync_implicit;
+
+module_param(timesync_implicit, bool, 0644);
+MODULE_PARM_DESC(timesync_implicit, "If set treat SAMPLE as SYNC when clock is behind");
+
static inline u64 reftime_to_ns(u64 reftime)
{
return (reftime - WLTIMEDELTA) * 100;
@@ -345,6 +350,29 @@ static void hv_set_host_time(struct work_struct *work)
}
/*
+ * Due to a bug on Hyper-V hosts, the sync flag may not always be sent on resume.
+ * Force a sync if the guest is behind.
+ */
+static inline bool hv_implicit_sync(u64 host_time)
+{
+ struct timespec64 new_ts;
+ struct timespec64 threshold_ts;
+
+ new_ts = ns_to_timespec64(reftime_to_ns(host_time));
+ ktime_get_real_ts64(&threshold_ts);
+
+ threshold_ts.tv_sec += 5;
+
+ /*
+ * If guest behind the host by 5 or more seconds.
+ */
+ if (timespec64_compare(&new_ts, &threshold_ts) >= 0)
+ return true;
+
+ return false;
+}
+
+/*
* Synchronize time with host after reboot, restore, etc.
*
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
@@ -384,7 +412,8 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
spin_unlock_irqrestore(&host_ts.lock, flags);
/* Schedule work to do do_settimeofday64() */
- if (adj_flags & ICTIMESYNCFLAG_SYNC)
+ if ((adj_flags & ICTIMESYNCFLAG_SYNC) ||
+ (timesync_implicit && hv_implicit_sync(host_ts.host_time)))
schedule_work(&adj_time_work);
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b33d5abd9..7f7965f3d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -988,7 +988,7 @@ static const struct dev_pm_ops vmbus_pm = {
};
/* The one and only one */
-static struct bus_type hv_bus = {
+static const struct bus_type hv_bus = {
.name = "vmbus",
.match = vmbus_match,
.shutdown = vmbus_shutdown,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index cf27523ee..a608264da 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -512,6 +512,7 @@ config SENSORS_DS1621
config SENSORS_DELL_SMM
tristate "Dell laptop SMM BIOS hwmon driver"
+ depends on ACPI_WMI
depends on X86
imply THERMAL
help
@@ -663,6 +664,16 @@ config SENSORS_FTSTEUTATES
This driver can also be built as a module. If so, the module
will be called ftsteutates.
+config SENSORS_GIGABYTE_WATERFORCE
+ tristate "Gigabyte Waterforce X240/X280/X360 AIO CPU coolers"
+ depends on USB_HID
+ help
+ If you say yes here you get support for hardware monitoring for the
+ Gigabyte Waterforce X240/X280/X360 all-in-one CPU liquid coolers.
+
+ This driver can also be built as a module. If so, the module
+ will be called gigabyte_waterforce.
+
config SENSORS_GL518SM
tristate "Genesys Logic GL518SM"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e84bd9685..47be39af5 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o
obj-$(CONFIG_SENSORS_FTSTEUTATES) += ftsteutates.o
obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_G762) += g762.o
+obj-$(CONFIG_SENSORS_GIGABYTE_WATERFORCE) += gigabyte_waterforce.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
obj-$(CONFIG_SENSORS_GSC) += gsc-hwmon.o
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index 4fdd2e124..2efe97f8d 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -1476,8 +1476,6 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-
static int serial_number_show(struct seq_file *seqf, void *unused)
{
struct aqc_data *priv = seqf->private;
@@ -1527,14 +1525,6 @@ static void aqc_debugfs_init(struct aqc_data *priv)
debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops);
}
-#else
-
-static void aqc_debugfs_init(struct aqc_data *priv)
-{
-}
-
-#endif
-
static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct aqc_data *priv;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index b2ae2176f..4acc1858d 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -166,6 +166,8 @@
#define MAX_CDEV_NAME_LEN 16
+#define MAX_ASPEED_FAN_TACH_CHANNELS 16
+
struct aspeed_cooling_device {
char name[16];
struct aspeed_pwm_tacho_data *priv;
@@ -181,7 +183,7 @@ struct aspeed_pwm_tacho_data {
struct reset_control *rst;
unsigned long clk_freq;
bool pwm_present[8];
- bool fan_tach_present[16];
+ bool fan_tach_present[MAX_ASPEED_FAN_TACH_CHANNELS];
u8 type_pwm_clock_unit[3];
u8 type_pwm_clock_division_h[3];
u8 type_pwm_clock_division_l[3];
@@ -190,7 +192,7 @@ struct aspeed_pwm_tacho_data {
u16 type_fan_tach_unit[3];
u8 pwm_port_type[8];
u8 pwm_port_fan_ctrl[8];
- u8 fan_tach_ch_source[16];
+ u8 fan_tach_ch_source[MAX_ASPEED_FAN_TACH_CHANNELS];
struct aspeed_cooling_device *cdev[8];
const struct attribute_group *groups[3];
/* protects access to shared ASPEED_PTCR_RESULT */
@@ -743,20 +745,27 @@ static void aspeed_create_pwm_port(struct aspeed_pwm_tacho_data *priv,
aspeed_set_pwm_port_fan_ctrl(priv, pwm_port, INIT_FAN_CTRL);
}
-static void aspeed_create_fan_tach_channel(struct aspeed_pwm_tacho_data *priv,
- u8 *fan_tach_ch,
- int count,
- u8 pwm_source)
+static int aspeed_create_fan_tach_channel(struct device *dev,
+ struct aspeed_pwm_tacho_data *priv,
+ u8 *fan_tach_ch,
+ int count,
+ u8 pwm_source)
{
u8 val, index;
for (val = 0; val < count; val++) {
index = fan_tach_ch[val];
+ if (index >= MAX_ASPEED_FAN_TACH_CHANNELS) {
+ dev_err(dev, "Invalid Fan Tach input channel %u\n.", index);
+ return -EINVAL;
+ }
aspeed_set_fan_tach_ch_enable(priv->regmap, index, true);
priv->fan_tach_present[index] = true;
priv->fan_tach_ch_source[index] = pwm_source;
aspeed_set_fan_tach_ch_source(priv->regmap, index, pwm_source);
}
+
+ return 0;
}
static int
@@ -880,7 +889,10 @@ static int aspeed_create_fan(struct device *dev,
fan_tach_ch, count);
if (ret)
return ret;
- aspeed_create_fan_tach_channel(priv, fan_tach_ch, count, pwm_port);
+
+ ret = aspeed_create_fan_tach_channel(dev, priv, fan_tach_ch, count, pwm_port);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
index 463ab4296..a284a0283 100644
--- a/drivers/hwmon/corsair-cpro.c
+++ b/drivers/hwmon/corsair-cpro.c
@@ -524,7 +524,7 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto out_hw_close;
ccp->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsaircpro",
- ccp, &ccp_chip_info, 0);
+ ccp, &ccp_chip_info, NULL);
if (IS_ERR(ccp->hwmon_dev)) {
ret = PTR_ERR(ccp->hwmon_dev);
goto out_hw_close;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 44aaf9b91..6d8c0f328 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/ctype.h>
@@ -34,8 +35,10 @@
#include <linux/thermal.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/wmi.h>
#include <linux/i8k.h>
+#include <asm/unaligned.h>
#define I8K_SMM_FN_STATUS 0x0025
#define I8K_SMM_POWER_STATUS 0x0069
@@ -66,9 +69,26 @@
#define I8K_POWER_AC 0x05
#define I8K_POWER_BATTERY 0x01
+#define DELL_SMM_WMI_GUID "F1DDEE52-063C-4784-A11E-8A06684B9B01"
+#define DELL_SMM_LEGACY_EXECUTE 0x1
+
#define DELL_SMM_NO_TEMP 10
#define DELL_SMM_NO_FANS 3
+struct smm_regs {
+ unsigned int eax;
+ unsigned int ebx;
+ unsigned int ecx;
+ unsigned int edx;
+ unsigned int esi;
+ unsigned int edi;
+};
+
+struct dell_smm_ops {
+ struct device *smm_dev;
+ int (*smm_call)(struct device *smm_dev, struct smm_regs *regs);
+};
+
struct dell_smm_data {
struct mutex i8k_mutex; /* lock for sensors writes */
char bios_version[4];
@@ -76,14 +96,11 @@ struct dell_smm_data {
uint i8k_fan_mult;
uint i8k_pwm_mult;
uint i8k_fan_max;
- bool disallow_fan_type_call;
- bool disallow_fan_support;
- unsigned int manual_fan;
- unsigned int auto_fan;
int temp_type[DELL_SMM_NO_TEMP];
bool fan[DELL_SMM_NO_FANS];
int fan_type[DELL_SMM_NO_FANS];
int *fan_nominal_speed[DELL_SMM_NO_FANS];
+ const struct dell_smm_ops *ops;
};
struct dell_smm_cooling_data {
@@ -123,14 +140,9 @@ static uint fan_max;
module_param(fan_max, uint, 0);
MODULE_PARM_DESC(fan_max, "Maximum configurable fan speed (default: autodetect)");
-struct smm_regs {
- unsigned int eax;
- unsigned int ebx;
- unsigned int ecx;
- unsigned int edx;
- unsigned int esi;
- unsigned int edi;
-};
+static bool disallow_fan_type_call, disallow_fan_support;
+
+static unsigned int manual_fan, auto_fan;
static const char * const temp_labels[] = {
"CPU",
@@ -171,12 +183,8 @@ static inline const char __init *i8k_get_dmi_data(int field)
*/
static int i8k_smm_func(void *par)
{
- ktime_t calltime = ktime_get();
struct smm_regs *regs = par;
- int eax = regs->eax;
- int ebx = regs->ebx;
unsigned char carry;
- long long duration;
/* SMM requires CPU 0 */
if (smp_processor_id() != 0)
@@ -193,14 +201,7 @@ static int i8k_smm_func(void *par)
"+S" (regs->esi),
"+D" (regs->edi));
- duration = ktime_us_delta(ktime_get(), calltime);
- pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x carry: %d (took %7lld usecs)\n",
- eax, ebx, regs->eax & 0xffff, carry, duration);
-
- if (duration > DELL_SMM_MAX_DURATION)
- pr_warn_once("SMM call took %lld usecs!\n", duration);
-
- if (carry || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
+ if (carry)
return -EINVAL;
return 0;
@@ -209,7 +210,7 @@ static int i8k_smm_func(void *par)
/*
* Call the System Management Mode BIOS.
*/
-static int i8k_smm(struct smm_regs *regs)
+static int i8k_smm_call(struct device *dummy, struct smm_regs *regs)
{
int ret;
@@ -220,6 +221,134 @@ static int i8k_smm(struct smm_regs *regs)
return ret;
}
+static const struct dell_smm_ops i8k_smm_ops = {
+ .smm_call = i8k_smm_call,
+};
+
+/*
+ * Call the System Management Mode BIOS over WMI.
+ */
+static ssize_t wmi_parse_register(u8 *buffer, u32 length, unsigned int *reg)
+{
+ __le32 value;
+ u32 reg_size;
+
+ if (length <= sizeof(reg_size))
+ return -ENODATA;
+
+ reg_size = get_unaligned_le32(buffer);
+ if (!reg_size || reg_size > sizeof(value))
+ return -ENOMSG;
+
+ if (length < sizeof(reg_size) + reg_size)
+ return -ENODATA;
+
+ memcpy_and_pad(&value, sizeof(value), buffer + sizeof(reg_size), reg_size, 0);
+ *reg = le32_to_cpu(value);
+
+ return reg_size + sizeof(reg_size);
+}
+
+static int wmi_parse_response(u8 *buffer, u32 length, struct smm_regs *regs)
+{
+ unsigned int *registers[] = {
+ &regs->eax,
+ &regs->ebx,
+ &regs->ecx,
+ &regs->edx
+ };
+ u32 offset = 0;
+ ssize_t ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ if (offset >= length)
+ return -ENODATA;
+
+ ret = wmi_parse_register(buffer + offset, length - offset, registers[i]);
+ if (ret < 0)
+ return ret;
+
+ offset += ret;
+ }
+
+ if (offset != length)
+ return -ENOMSG;
+
+ return 0;
+}
+
+static int wmi_smm_call(struct device *dev, struct smm_regs *regs)
+{
+ struct wmi_device *wdev = container_of(dev, struct wmi_device, dev);
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 wmi_payload[] = {
+ sizeof(regs->eax),
+ regs->eax,
+ sizeof(regs->ebx),
+ regs->ebx,
+ sizeof(regs->ecx),
+ regs->ecx,
+ sizeof(regs->edx),
+ regs->edx
+ };
+ const struct acpi_buffer in = {
+ .length = sizeof(wmi_payload),
+ .pointer = &wmi_payload,
+ };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret;
+
+ status = wmidev_evaluate_method(wdev, 0x0, DELL_SMM_LEGACY_EXECUTE, &in, &out);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = out.pointer;
+ if (!obj)
+ return -ENODATA;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ ret = -ENOMSG;
+
+ goto err_free;
+ }
+
+ ret = wmi_parse_response(obj->buffer.pointer, obj->buffer.length, regs);
+
+err_free:
+ kfree(obj);
+
+ return ret;
+}
+
+static int dell_smm_call(const struct dell_smm_ops *ops, struct smm_regs *regs)
+{
+ unsigned int eax = regs->eax;
+ unsigned int ebx = regs->ebx;
+ long long duration;
+ ktime_t calltime;
+ int ret;
+
+ calltime = ktime_get();
+ ret = ops->smm_call(ops->smm_dev, regs);
+ duration = ktime_us_delta(ktime_get(), calltime);
+
+ pr_debug("SMM(0x%.4x 0x%.4x) = 0x%.4x status: %d (took %7lld usecs)\n",
+ eax, ebx, regs->eax & 0xffff, ret, duration);
+
+ if (duration > DELL_SMM_MAX_DURATION)
+ pr_warn_once("SMM call took %lld usecs!\n", duration);
+
+ if (ret < 0)
+ return ret;
+
+ if ((regs->eax & 0xffff) == 0xffff || regs->eax == eax)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* Read the fan status.
*/
@@ -230,10 +359,10 @@ static int i8k_get_fan_status(const struct dell_smm_data *data, u8 fan)
.ebx = fan,
};
- if (data->disallow_fan_support)
+ if (disallow_fan_support)
return -EINVAL;
- return i8k_smm(&regs) ? : regs.eax & 0xff;
+ return dell_smm_call(data->ops, &regs) ? : regs.eax & 0xff;
}
/*
@@ -246,10 +375,10 @@ static int i8k_get_fan_speed(const struct dell_smm_data *data, u8 fan)
.ebx = fan,
};
- if (data->disallow_fan_support)
+ if (disallow_fan_support)
return -EINVAL;
- return i8k_smm(&regs) ? : (regs.eax & 0xffff) * data->i8k_fan_mult;
+ return dell_smm_call(data->ops, &regs) ? : (regs.eax & 0xffff) * data->i8k_fan_mult;
}
/*
@@ -262,10 +391,10 @@ static int _i8k_get_fan_type(const struct dell_smm_data *data, u8 fan)
.ebx = fan,
};
- if (data->disallow_fan_support || data->disallow_fan_type_call)
+ if (disallow_fan_support || disallow_fan_type_call)
return -EINVAL;
- return i8k_smm(&regs) ? : regs.eax & 0xff;
+ return dell_smm_call(data->ops, &regs) ? : regs.eax & 0xff;
}
static int i8k_get_fan_type(struct dell_smm_data *data, u8 fan)
@@ -280,17 +409,17 @@ static int i8k_get_fan_type(struct dell_smm_data *data, u8 fan)
/*
* Read the fan nominal rpm for specific fan speed.
*/
-static int __init i8k_get_fan_nominal_speed(const struct dell_smm_data *data, u8 fan, int speed)
+static int i8k_get_fan_nominal_speed(const struct dell_smm_data *data, u8 fan, int speed)
{
struct smm_regs regs = {
.eax = I8K_SMM_GET_NOM_SPEED,
.ebx = fan | (speed << 8),
};
- if (data->disallow_fan_support)
+ if (disallow_fan_support)
return -EINVAL;
- return i8k_smm(&regs) ? : (regs.eax & 0xffff);
+ return dell_smm_call(data->ops, &regs) ? : (regs.eax & 0xffff);
}
/*
@@ -300,11 +429,11 @@ static int i8k_enable_fan_auto_mode(const struct dell_smm_data *data, bool enabl
{
struct smm_regs regs = { };
- if (data->disallow_fan_support)
+ if (disallow_fan_support)
return -EINVAL;
- regs.eax = enable ? data->auto_fan : data->manual_fan;
- return i8k_smm(&regs);
+ regs.eax = enable ? auto_fan : manual_fan;
+ return dell_smm_call(data->ops, &regs);
}
/*
@@ -314,41 +443,41 @@ static int i8k_set_fan(const struct dell_smm_data *data, u8 fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
- if (data->disallow_fan_support)
+ if (disallow_fan_support)
return -EINVAL;
speed = (speed < 0) ? 0 : ((speed > data->i8k_fan_max) ? data->i8k_fan_max : speed);
regs.ebx = fan | (speed << 8);
- return i8k_smm(&regs);
+ return dell_smm_call(data->ops, &regs);
}
-static int __init i8k_get_temp_type(u8 sensor)
+static int i8k_get_temp_type(const struct dell_smm_data *data, u8 sensor)
{
struct smm_regs regs = {
.eax = I8K_SMM_GET_TEMP_TYPE,
.ebx = sensor,
};
- return i8k_smm(&regs) ? : regs.eax & 0xff;
+ return dell_smm_call(data->ops, &regs) ? : regs.eax & 0xff;
}
/*
* Read the cpu temperature.
*/
-static int _i8k_get_temp(u8 sensor)
+static int _i8k_get_temp(const struct dell_smm_data *data, u8 sensor)
{
struct smm_regs regs = {
.eax = I8K_SMM_GET_TEMP,
.ebx = sensor,
};
- return i8k_smm(&regs) ? : regs.eax & 0xff;
+ return dell_smm_call(data->ops, &regs) ? : regs.eax & 0xff;
}
-static int i8k_get_temp(u8 sensor)
+static int i8k_get_temp(const struct dell_smm_data *data, u8 sensor)
{
- int temp = _i8k_get_temp(sensor);
+ int temp = _i8k_get_temp(data, sensor);
/*
* Sometimes the temperature sensor returns 0x99, which is out of range.
@@ -359,7 +488,7 @@ static int i8k_get_temp(u8 sensor)
*/
if (temp == 0x99) {
msleep(100);
- temp = _i8k_get_temp(sensor);
+ temp = _i8k_get_temp(data, sensor);
}
/*
* Return -ENODATA for all invalid temperatures.
@@ -375,12 +504,12 @@ static int i8k_get_temp(u8 sensor)
return temp;
}
-static int __init i8k_get_dell_signature(int req_fn)
+static int dell_smm_get_signature(const struct dell_smm_ops *ops, int req_fn)
{
struct smm_regs regs = { .eax = req_fn, };
int rc;
- rc = i8k_smm(&regs);
+ rc = dell_smm_call(ops, &regs);
if (rc < 0)
return rc;
@@ -392,12 +521,12 @@ static int __init i8k_get_dell_signature(int req_fn)
/*
* Read the Fn key status.
*/
-static int i8k_get_fn_status(void)
+static int i8k_get_fn_status(const struct dell_smm_data *data)
{
struct smm_regs regs = { .eax = I8K_SMM_FN_STATUS, };
int rc;
- rc = i8k_smm(&regs);
+ rc = dell_smm_call(data->ops, &regs);
if (rc < 0)
return rc;
@@ -416,12 +545,12 @@ static int i8k_get_fn_status(void)
/*
* Read the power status.
*/
-static int i8k_get_power_status(void)
+static int i8k_get_power_status(const struct dell_smm_data *data)
{
struct smm_regs regs = { .eax = I8K_SMM_POWER_STATUS, };
int rc;
- rc = i8k_smm(&regs);
+ rc = dell_smm_call(data->ops, &regs);
if (rc < 0)
return rc;
@@ -464,15 +593,15 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
return 0;
case I8K_FN_STATUS:
- val = i8k_get_fn_status();
+ val = i8k_get_fn_status(data);
break;
case I8K_POWER_STATUS:
- val = i8k_get_power_status();
+ val = i8k_get_power_status(data);
break;
case I8K_GET_TEMP:
- val = i8k_get_temp(0);
+ val = i8k_get_temp(data, 0);
break;
case I8K_GET_SPEED:
@@ -539,14 +668,14 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
int fn_key, cpu_temp, ac_power;
int left_fan, right_fan, left_speed, right_speed;
- cpu_temp = i8k_get_temp(0); /* 11100 µs */
+ cpu_temp = i8k_get_temp(data, 0); /* 11100 µs */
left_fan = i8k_get_fan_status(data, I8K_FAN_LEFT); /* 580 µs */
right_fan = i8k_get_fan_status(data, I8K_FAN_RIGHT); /* 580 µs */
left_speed = i8k_get_fan_speed(data, I8K_FAN_LEFT); /* 580 µs */
right_speed = i8k_get_fan_speed(data, I8K_FAN_RIGHT); /* 580 µs */
- fn_key = i8k_get_fn_status(); /* 750 µs */
+ fn_key = i8k_get_fn_status(data); /* 750 µs */
if (power_status)
- ac_power = i8k_get_power_status(); /* 14700 µs */
+ ac_power = i8k_get_power_status(data); /* 14700 µs */
else
ac_power = -1;
@@ -597,6 +726,11 @@ static void __init i8k_init_procfs(struct device *dev)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
+ strscpy(data->bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
+ sizeof(data->bios_version));
+ strscpy(data->bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+ sizeof(data->bios_machineid));
+
/* Only register exit function if creation was successful */
if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data))
devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
@@ -665,7 +799,7 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
switch (attr) {
case hwmon_temp_input:
/* _i8k_get_temp() is fine since we do not care about the actual value */
- if (data->temp_type[channel] >= 0 || _i8k_get_temp(channel) >= 0)
+ if (data->temp_type[channel] >= 0 || _i8k_get_temp(data, channel) >= 0)
return 0444;
break;
@@ -679,7 +813,7 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
}
break;
case hwmon_fan:
- if (data->disallow_fan_support)
+ if (disallow_fan_support)
break;
switch (attr) {
@@ -689,7 +823,7 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
break;
case hwmon_fan_label:
- if (data->fan[channel] && !data->disallow_fan_type_call)
+ if (data->fan[channel] && !disallow_fan_type_call)
return 0444;
break;
@@ -705,7 +839,7 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
}
break;
case hwmon_pwm:
- if (data->disallow_fan_support)
+ if (disallow_fan_support)
break;
switch (attr) {
@@ -715,7 +849,7 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
break;
case hwmon_pwm_enable:
- if (data->auto_fan)
+ if (auto_fan)
/*
* There is no command for retrieve the current status
* from BIOS, and userspace/firmware itself can change
@@ -747,7 +881,7 @@ static int dell_smm_read(struct device *dev, enum hwmon_sensor_types type, u32 a
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- ret = i8k_get_temp(channel);
+ ret = i8k_get_temp(data, channel);
if (ret < 0)
return ret;
@@ -955,7 +1089,7 @@ static const struct hwmon_chip_info dell_smm_chip_info = {
.info = dell_smm_info,
};
-static int __init dell_smm_init_cdev(struct device *dev, u8 fan_num)
+static int dell_smm_init_cdev(struct device *dev, u8 fan_num)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
struct thermal_cooling_device *cdev;
@@ -986,7 +1120,7 @@ static int __init dell_smm_init_cdev(struct device *dev, u8 fan_num)
return ret;
}
-static int __init dell_smm_init_hwmon(struct device *dev)
+static int dell_smm_init_hwmon(struct device *dev)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
struct device *dell_smm_hwmon_dev;
@@ -994,7 +1128,7 @@ static int __init dell_smm_init_hwmon(struct device *dev)
u8 i;
for (i = 0; i < DELL_SMM_NO_TEMP; i++) {
- data->temp_type[i] = i8k_get_temp_type(i);
+ data->temp_type[i] = i8k_get_temp_type(data, i);
if (data->temp_type[i] < 0)
continue;
@@ -1052,41 +1186,25 @@ static int __init dell_smm_init_hwmon(struct device *dev)
return PTR_ERR_OR_ZERO(dell_smm_hwmon_dev);
}
-struct i8k_config_data {
- uint fan_mult;
- uint fan_max;
-};
+static int dell_smm_init_data(struct device *dev, const struct dell_smm_ops *ops)
+{
+ struct dell_smm_data *data;
-enum i8k_configs {
- DELL_LATITUDE_D520,
- DELL_PRECISION_490,
- DELL_STUDIO,
- DELL_XPS,
-};
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
-/*
- * Only use for machines which need some special configuration
- * in order to work correctly (e.g. if autoconfig fails on this machines).
- */
+ mutex_init(&data->i8k_mutex);
+ dev_set_drvdata(dev, data);
-static const struct i8k_config_data i8k_config_data[] __initconst = {
- [DELL_LATITUDE_D520] = {
- .fan_mult = 1,
- .fan_max = I8K_FAN_TURBO,
- },
- [DELL_PRECISION_490] = {
- .fan_mult = 1,
- .fan_max = I8K_FAN_TURBO,
- },
- [DELL_STUDIO] = {
- .fan_mult = 1,
- .fan_max = I8K_FAN_HIGH,
- },
- [DELL_XPS] = {
- .fan_mult = 1,
- .fan_max = I8K_FAN_HIGH,
- },
-};
+ data->ops = ops;
+ /* All options must not be 0 */
+ data->i8k_fan_mult = fan_mult ? : I8K_FAN_MULT;
+ data->i8k_fan_max = fan_max ? : I8K_FAN_HIGH;
+ data->i8k_pwm_mult = DIV_ROUND_UP(255, data->i8k_fan_max);
+
+ return 0;
+}
static const struct dmi_system_id i8k_dmi_table[] __initconst = {
{
@@ -1118,14 +1236,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
- .ident = "Dell Latitude D520",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D520"),
- },
- .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
- },
- {
.ident = "Dell Latitude 2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1147,15 +1257,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
- .ident = "Dell Precision 490",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME,
- "Precision WorkStation 490"),
- },
- .driver_data = (void *)&i8k_config_data[DELL_PRECISION_490],
- },
- {
.ident = "Dell Precision",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1175,7 +1276,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Studio"),
},
- .driver_data = (void *)&i8k_config_data[DELL_STUDIO],
},
{
.ident = "Dell XPS M140",
@@ -1183,7 +1283,6 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"),
},
- .driver_data = (void *)&i8k_config_data[DELL_XPS],
},
{
.ident = "Dell XPS",
@@ -1198,6 +1297,78 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
/*
+ * Only use for machines which need some special configuration
+ * in order to work correctly (e.g. if autoconfig fails on this machines).
+ */
+struct i8k_config_data {
+ uint fan_mult;
+ uint fan_max;
+};
+
+enum i8k_configs {
+ DELL_LATITUDE_D520,
+ DELL_PRECISION_490,
+ DELL_STUDIO,
+ DELL_XPS,
+};
+
+static const struct i8k_config_data i8k_config_data[] __initconst = {
+ [DELL_LATITUDE_D520] = {
+ .fan_mult = 1,
+ .fan_max = I8K_FAN_TURBO,
+ },
+ [DELL_PRECISION_490] = {
+ .fan_mult = 1,
+ .fan_max = I8K_FAN_TURBO,
+ },
+ [DELL_STUDIO] = {
+ .fan_mult = 1,
+ .fan_max = I8K_FAN_HIGH,
+ },
+ [DELL_XPS] = {
+ .fan_mult = 1,
+ .fan_max = I8K_FAN_HIGH,
+ },
+};
+
+static const struct dmi_system_id i8k_config_dmi_table[] __initconst = {
+ {
+ .ident = "Dell Latitude D520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D520"),
+ },
+ .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
+ },
+ {
+ .ident = "Dell Precision 490",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME,
+ "Precision WorkStation 490"),
+ },
+ .driver_data = (void *)&i8k_config_data[DELL_PRECISION_490],
+ },
+ {
+ .ident = "Dell Studio",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Studio"),
+ },
+ .driver_data = (void *)&i8k_config_data[DELL_STUDIO],
+ },
+ {
+ .ident = "Dell XPS M140",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MXC051"),
+ },
+ .driver_data = (void *)&i8k_config_data[DELL_XPS],
+ },
+ { }
+};
+
+/*
* On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
* randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
* of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
@@ -1338,119 +1509,174 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
},
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
+ {
+ .ident = "Dell Optiplex 7000",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7000"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
{ }
};
+/*
+ * Legacy SMM backend driver.
+ */
static int __init dell_smm_probe(struct platform_device *pdev)
{
- struct dell_smm_data *data;
- const struct dmi_system_id *id, *fan_control;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(struct dell_smm_data), GFP_KERNEL);
- if (!data)
+ ret = dell_smm_init_data(&pdev->dev, &i8k_smm_ops);
+ if (ret < 0)
+ return ret;
+
+ ret = dell_smm_init_hwmon(&pdev->dev);
+ if (ret)
+ return ret;
+
+ i8k_init_procfs(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver dell_smm_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static struct platform_device *dell_smm_device;
+
+/*
+ * WMI SMM backend driver.
+ */
+static int dell_smm_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct dell_smm_ops *ops;
+ int ret;
+
+ ops = devm_kzalloc(&wdev->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
return -ENOMEM;
- mutex_init(&data->i8k_mutex);
- platform_set_drvdata(pdev, data);
+ ops->smm_call = wmi_smm_call;
+ ops->smm_dev = &wdev->dev;
+
+ if (dell_smm_get_signature(ops, I8K_SMM_GET_DELL_SIG1) &&
+ dell_smm_get_signature(ops, I8K_SMM_GET_DELL_SIG2))
+ return -ENODEV;
+
+ ret = dell_smm_init_data(&wdev->dev, ops);
+ if (ret < 0)
+ return ret;
+
+ return dell_smm_init_hwmon(&wdev->dev);
+}
+
+static const struct wmi_device_id dell_smm_wmi_id_table[] = {
+ { DELL_SMM_WMI_GUID, NULL },
+ { }
+};
+MODULE_DEVICE_TABLE(wmi, dell_smm_wmi_id_table);
+
+static struct wmi_driver dell_smm_wmi_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = dell_smm_wmi_id_table,
+ .probe = dell_smm_wmi_probe,
+};
+
+/*
+ * Probe for the presence of a supported laptop.
+ */
+static void __init dell_smm_init_dmi(void)
+{
+ struct i8k_fan_control_data *control;
+ struct i8k_config_data *config;
+ const struct dmi_system_id *id;
if (dmi_check_system(i8k_blacklist_fan_support_dmi_table)) {
if (!force) {
- dev_notice(&pdev->dev, "Disabling fan support due to BIOS bugs\n");
- data->disallow_fan_support = true;
+ pr_notice("Disabling fan support due to BIOS bugs\n");
+ disallow_fan_support = true;
} else {
- dev_warn(&pdev->dev, "Enabling fan support despite BIOS bugs\n");
+ pr_warn("Enabling fan support despite BIOS bugs\n");
}
}
if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) {
if (!force) {
- dev_notice(&pdev->dev, "Disabling fan type call due to BIOS bugs\n");
- data->disallow_fan_type_call = true;
+ pr_notice("Disabling fan type call due to BIOS bugs\n");
+ disallow_fan_type_call = true;
} else {
- dev_warn(&pdev->dev, "Enabling fan type call despite BIOS bugs\n");
+ pr_warn("Enabling fan type call despite BIOS bugs\n");
}
}
- strscpy(data->bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
- sizeof(data->bios_version));
- strscpy(data->bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
- sizeof(data->bios_machineid));
-
/*
- * Set fan multiplier and maximal fan speed from dmi config
- * Values specified in module parameters override values from dmi
+ * Set fan multiplier and maximal fan speed from DMI config.
+ * Values specified in module parameters override values from DMI.
*/
- id = dmi_first_match(i8k_dmi_table);
+ id = dmi_first_match(i8k_config_dmi_table);
if (id && id->driver_data) {
- const struct i8k_config_data *conf = id->driver_data;
+ config = id->driver_data;
+ if (!fan_mult && config->fan_mult)
+ fan_mult = config->fan_mult;
- if (!fan_mult && conf->fan_mult)
- fan_mult = conf->fan_mult;
-
- if (!fan_max && conf->fan_max)
- fan_max = conf->fan_max;
+ if (!fan_max && config->fan_max)
+ fan_max = config->fan_max;
}
- /* All options must not be 0 */
- data->i8k_fan_mult = fan_mult ? : I8K_FAN_MULT;
- data->i8k_fan_max = fan_max ? : I8K_FAN_HIGH;
- data->i8k_pwm_mult = DIV_ROUND_UP(255, data->i8k_fan_max);
-
- fan_control = dmi_first_match(i8k_whitelist_fan_control);
- if (fan_control && fan_control->driver_data) {
- const struct i8k_fan_control_data *control = fan_control->driver_data;
+ id = dmi_first_match(i8k_whitelist_fan_control);
+ if (id && id->driver_data) {
+ control = id->driver_data;
+ manual_fan = control->manual_fan;
+ auto_fan = control->auto_fan;
- data->manual_fan = control->manual_fan;
- data->auto_fan = control->auto_fan;
- dev_info(&pdev->dev, "enabling support for setting automatic/manual fan control\n");
+ pr_info("Enabling support for setting automatic/manual fan control\n");
}
-
- ret = dell_smm_init_hwmon(&pdev->dev);
- if (ret)
- return ret;
-
- i8k_init_procfs(&pdev->dev);
-
- return 0;
}
-static struct platform_driver dell_smm_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- },
-};
-
-static struct platform_device *dell_smm_device;
-
-/*
- * Probe for the presence of a supported laptop.
- */
-static int __init i8k_init(void)
+static int __init dell_smm_legacy_check(void)
{
- /*
- * Get DMI information
- */
if (!dmi_check_system(i8k_dmi_table)) {
if (!ignore_dmi && !force)
return -ENODEV;
- pr_info("not running on a supported Dell system.\n");
+ pr_info("Probing for legacy SMM handler on unsupported machine\n");
pr_info("vendor=%s, model=%s, version=%s\n",
i8k_get_dmi_data(DMI_SYS_VENDOR),
i8k_get_dmi_data(DMI_PRODUCT_NAME),
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
- /*
- * Get SMM Dell signature
- */
- if (i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG1) &&
- i8k_get_dell_signature(I8K_SMM_GET_DELL_SIG2)) {
+ if (dell_smm_get_signature(&i8k_smm_ops, I8K_SMM_GET_DELL_SIG1) &&
+ dell_smm_get_signature(&i8k_smm_ops, I8K_SMM_GET_DELL_SIG2)) {
if (!force)
return -ENODEV;
- pr_err("Unable to get Dell SMM signature\n");
+ pr_warn("Forcing legacy SMM calls on a possibly incompatible machine\n");
+ }
+
+ return 0;
+}
+
+static int __init i8k_init(void)
+{
+ int ret;
+
+ dell_smm_init_dmi();
+
+ ret = dell_smm_legacy_check();
+ if (ret < 0) {
+ /*
+ * On modern machines, SMM communication happens over WMI, meaning
+ * the SMM handler might not react to legacy SMM calls.
+ */
+ return wmi_driver_register(&dell_smm_wmi_driver);
}
dell_smm_device = platform_create_bundle(&dell_smm_driver, dell_smm_probe, NULL, 0, NULL,
@@ -1461,8 +1687,12 @@ static int __init i8k_init(void)
static void __exit i8k_exit(void)
{
- platform_device_unregister(dell_smm_device);
- platform_driver_unregister(&dell_smm_driver);
+ if (dell_smm_device) {
+ platform_device_unregister(dell_smm_device);
+ platform_driver_unregister(&dell_smm_driver);
+ } else {
+ wmi_driver_unregister(&dell_smm_wmi_driver);
+ }
}
module_init(i8k_init);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index bb7c859e7..1332e4ac0 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -346,6 +346,9 @@ static int emc1403_detect(struct i2c_client *client,
case 0x27:
strscpy(info->type, "emc1424", I2C_NAME_SIZE);
break;
+ case 0x60:
+ strscpy(info->type, "emc1442", I2C_NAME_SIZE);
+ break;
default:
return -ENODEV;
}
@@ -430,7 +433,7 @@ static int emc1403_probe(struct i2c_client *client)
}
static const unsigned short emc1403_address_list[] = {
- 0x18, 0x1c, 0x29, 0x4c, 0x4d, 0x5c, I2C_CLIENT_END
+ 0x18, 0x1c, 0x29, 0x3c, 0x4c, 0x4d, 0x5c, I2C_CLIENT_END
};
/* Last digit of chip name indicates number of channels */
@@ -444,6 +447,7 @@ static const struct i2c_device_id emc1403_idtable[] = {
{ "emc1422", emc1402 },
{ "emc1423", emc1403 },
{ "emc1424", emc1404 },
+ { "emc1442", emc1402 },
{ }
};
MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
diff --git a/drivers/hwmon/gigabyte_waterforce.c b/drivers/hwmon/gigabyte_waterforce.c
new file mode 100644
index 000000000..8129d7b3c
--- /dev/null
+++ b/drivers/hwmon/gigabyte_waterforce.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * hwmon driver for Gigabyte AORUS Waterforce AIO CPU coolers: X240, X280 and X360.
+ *
+ * Copyright 2023 Aleksa Savic <savicaleksa83@gmail.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/hid.h>
+#include <linux/hwmon.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME "gigabyte_waterforce"
+
+#define USB_VENDOR_ID_GIGABYTE 0x1044
+#define USB_PRODUCT_ID_WATERFORCE 0x7a4d /* Gigabyte AORUS WATERFORCE X240, X280 and X360 */
+
+#define STATUS_VALIDITY (2 * 1000) /* ms */
+#define MAX_REPORT_LENGTH 6144
+
+#define WATERFORCE_TEMP_SENSOR 0xD
+#define WATERFORCE_FAN_SPEED 0x02
+#define WATERFORCE_PUMP_SPEED 0x05
+#define WATERFORCE_FAN_DUTY 0x08
+#define WATERFORCE_PUMP_DUTY 0x09
+
+/* Control commands, inner offsets and lengths */
+static const u8 get_status_cmd[] = { 0x99, 0xDA };
+
+#define FIRMWARE_VER_START_OFFSET_1 2
+#define FIRMWARE_VER_START_OFFSET_2 3
+static const u8 get_firmware_ver_cmd[] = { 0x99, 0xD6 };
+
+/* Command lengths */
+#define GET_STATUS_CMD_LENGTH 2
+#define GET_FIRMWARE_VER_CMD_LENGTH 2
+
+static const char *const waterforce_temp_label[] = {
+ "Coolant temp"
+};
+
+static const char *const waterforce_speed_label[] = {
+ "Fan speed",
+ "Pump speed"
+};
+
+struct waterforce_data {
+ struct hid_device *hdev;
+ struct device *hwmon_dev;
+ struct dentry *debugfs;
+ /* For locking access to buffer */
+ struct mutex buffer_lock;
+ /* For queueing multiple readers */
+ struct mutex status_report_request_mutex;
+ /* For reinitializing the completion below */
+ spinlock_t status_report_request_lock;
+ struct completion status_report_received;
+ struct completion fw_version_processed;
+
+ /* Sensor data */
+ s32 temp_input[1];
+ u16 speed_input[2]; /* Fan and pump speed in RPM */
+ u8 duty_input[2]; /* Fan and pump duty in 0-100% */
+
+ u8 *buffer;
+ int firmware_version;
+ unsigned long updated; /* jiffies */
+};
+
+static umode_t waterforce_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_label:
+ case hwmon_fan_input:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Writes the command to the device with the rest of the report filled with zeroes */
+static int waterforce_write_expanded(struct waterforce_data *priv, const u8 *cmd, int cmd_length)
+{
+ int ret;
+
+ mutex_lock(&priv->buffer_lock);
+
+ memcpy_and_pad(priv->buffer, MAX_REPORT_LENGTH, cmd, cmd_length, 0x00);
+ ret = hid_hw_output_report(priv->hdev, priv->buffer, MAX_REPORT_LENGTH);
+
+ mutex_unlock(&priv->buffer_lock);
+ return ret;
+}
+
+static int waterforce_get_status(struct waterforce_data *priv)
+{
+ int ret = mutex_lock_interruptible(&priv->status_report_request_mutex);
+
+ if (ret < 0)
+ return ret;
+
+ if (!time_after(jiffies, priv->updated + msecs_to_jiffies(STATUS_VALIDITY))) {
+ /* Data is up to date */
+ goto unlock_and_return;
+ }
+
+ /*
+ * Disable raw event parsing for a moment to safely reinitialize the
+ * completion. Reinit is done because hidraw could have triggered
+ * the raw event parsing and marked the priv->status_report_received
+ * completion as done.
+ */
+ spin_lock_bh(&priv->status_report_request_lock);
+ reinit_completion(&priv->status_report_received);
+ spin_unlock_bh(&priv->status_report_request_lock);
+
+ /* Send command for getting status */
+ ret = waterforce_write_expanded(priv, get_status_cmd, GET_STATUS_CMD_LENGTH);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->status_report_received,
+ msecs_to_jiffies(STATUS_VALIDITY));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+unlock_and_return:
+ mutex_unlock(&priv->status_report_request_mutex);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int waterforce_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct waterforce_data *priv = dev_get_drvdata(dev);
+ int ret = waterforce_get_status(priv);
+
+ if (ret < 0)
+ return ret;
+
+ switch (type) {
+ case hwmon_temp:
+ *val = priv->temp_input[channel];
+ break;
+ case hwmon_fan:
+ *val = priv->speed_input[channel];
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = DIV_ROUND_CLOSEST(priv->duty_input[channel] * 255, 100);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP; /* unreachable */
+ }
+
+ return 0;
+}
+
+static int waterforce_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ *str = waterforce_temp_label[channel];
+ break;
+ case hwmon_fan:
+ *str = waterforce_speed_label[channel];
+ break;
+ default:
+ return -EOPNOTSUPP; /* unreachable */
+ }
+
+ return 0;
+}
+
+static int waterforce_get_fw_ver(struct hid_device *hdev)
+{
+ struct waterforce_data *priv = hid_get_drvdata(hdev);
+ int ret;
+
+ ret = waterforce_write_expanded(priv, get_firmware_ver_cmd, GET_FIRMWARE_VER_CMD_LENGTH);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->fw_version_processed,
+ msecs_to_jiffies(STATUS_VALIDITY));
+ if (ret == 0)
+ return -ETIMEDOUT;
+ else if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct hwmon_ops waterforce_hwmon_ops = {
+ .is_visible = waterforce_is_visible,
+ .read = waterforce_read,
+ .read_string = waterforce_read_string
+};
+
+static const struct hwmon_channel_info *waterforce_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info waterforce_chip_info = {
+ .ops = &waterforce_hwmon_ops,
+ .info = waterforce_info,
+};
+
+static int waterforce_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data,
+ int size)
+{
+ struct waterforce_data *priv = hid_get_drvdata(hdev);
+
+ if (data[0] == get_firmware_ver_cmd[0] && data[1] == get_firmware_ver_cmd[1]) {
+ /* Received a firmware version report */
+ priv->firmware_version =
+ data[FIRMWARE_VER_START_OFFSET_1] * 10 + data[FIRMWARE_VER_START_OFFSET_2];
+
+ if (!completion_done(&priv->fw_version_processed))
+ complete_all(&priv->fw_version_processed);
+ return 0;
+ }
+
+ if (data[0] != get_status_cmd[0] || data[1] != get_status_cmd[1])
+ return 0;
+
+ priv->temp_input[0] = data[WATERFORCE_TEMP_SENSOR] * 1000;
+ priv->speed_input[0] = get_unaligned_le16(data + WATERFORCE_FAN_SPEED);
+ priv->speed_input[1] = get_unaligned_le16(data + WATERFORCE_PUMP_SPEED);
+ priv->duty_input[0] = data[WATERFORCE_FAN_DUTY];
+ priv->duty_input[1] = data[WATERFORCE_PUMP_DUTY];
+
+ spin_lock(&priv->status_report_request_lock);
+ if (!completion_done(&priv->status_report_received))
+ complete_all(&priv->status_report_received);
+ spin_unlock(&priv->status_report_request_lock);
+
+ priv->updated = jiffies;
+
+ return 0;
+}
+
+static int firmware_version_show(struct seq_file *seqf, void *unused)
+{
+ struct waterforce_data *priv = seqf->private;
+
+ seq_printf(seqf, "%u\n", priv->firmware_version);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(firmware_version);
+
+static void waterforce_debugfs_init(struct waterforce_data *priv)
+{
+ char name[64];
+
+ if (!priv->firmware_version)
+ return; /* There's nothing to show in debugfs */
+
+ scnprintf(name, sizeof(name), "%s-%s", DRIVER_NAME, dev_name(&priv->hdev->dev));
+
+ priv->debugfs = debugfs_create_dir(name, NULL);
+ debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops);
+}
+
+static int waterforce_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct waterforce_data *priv;
+ int ret;
+
+ priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hdev = hdev;
+ hid_set_drvdata(hdev, priv);
+
+ /*
+ * Initialize priv->updated to STATUS_VALIDITY seconds in the past, making
+ * the initial empty data invalid for waterforce_read() without the need for
+ * a special case there.
+ */
+ priv->updated = jiffies - msecs_to_jiffies(STATUS_VALIDITY);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid parse failed with %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Enable hidraw so existing user-space tools can continue to work.
+ */
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "hid hw start failed with %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ hid_err(hdev, "hid hw open failed with %d\n", ret);
+ goto fail_and_stop;
+ }
+
+ priv->buffer = devm_kzalloc(&hdev->dev, MAX_REPORT_LENGTH, GFP_KERNEL);
+ if (!priv->buffer) {
+ ret = -ENOMEM;
+ goto fail_and_close;
+ }
+
+ mutex_init(&priv->status_report_request_mutex);
+ mutex_init(&priv->buffer_lock);
+ spin_lock_init(&priv->status_report_request_lock);
+ init_completion(&priv->status_report_received);
+ init_completion(&priv->fw_version_processed);
+
+ hid_device_io_start(hdev);
+ ret = waterforce_get_fw_ver(hdev);
+ if (ret < 0)
+ hid_warn(hdev, "fw version request failed with %d\n", ret);
+
+ priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "waterforce",
+ priv, &waterforce_chip_info, NULL);
+ if (IS_ERR(priv->hwmon_dev)) {
+ ret = PTR_ERR(priv->hwmon_dev);
+ hid_err(hdev, "hwmon registration failed with %d\n", ret);
+ goto fail_and_close;
+ }
+
+ waterforce_debugfs_init(priv);
+
+ return 0;
+
+fail_and_close:
+ hid_hw_close(hdev);
+fail_and_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void waterforce_remove(struct hid_device *hdev)
+{
+ struct waterforce_data *priv = hid_get_drvdata(hdev);
+
+ debugfs_remove_recursive(priv->debugfs);
+ hwmon_device_unregister(priv->hwmon_dev);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id waterforce_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_GIGABYTE, USB_PRODUCT_ID_WATERFORCE) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, waterforce_table);
+
+static struct hid_driver waterforce_driver = {
+ .name = "waterforce",
+ .id_table = waterforce_table,
+ .probe = waterforce_probe,
+ .remove = waterforce_remove,
+ .raw_event = waterforce_raw_event,
+};
+
+static int __init waterforce_init(void)
+{
+ return hid_register_driver(&waterforce_driver);
+}
+
+static void __exit waterforce_exit(void)
+{
+ hid_unregister_driver(&waterforce_driver);
+}
+
+/* When compiled into the kernel, initialize after the HID bus */
+late_initcall(waterforce_init);
+module_exit(waterforce_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Aleksa Savic <savicaleksa83@gmail.com>");
+MODULE_DESCRIPTION("Hwmon driver for Gigabyte AORUS Waterforce AIO coolers");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index bae0becfa..8092312c0 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -455,6 +455,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
switch (boot_cpu_data.x86_model) {
case 0x0 ... 0x1: /* Zen3 SP3/TR */
+ case 0x8: /* Zen3 TR Chagall */
case 0x21: /* Zen3 Ryzen Desktop */
case 0x50 ... 0x5f: /* Green Sardine */
data->ccd_offset = 0x154;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 5b2ea05c9..e00750718 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -7,11 +7,11 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/regmap.h>
@@ -25,6 +25,7 @@
enum lm75_type { /* keep sorted in alphabetical order */
adt75,
+ as6200,
at30ts74,
ds1775,
ds75,
@@ -55,6 +56,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
/**
* struct lm75_params - lm75 configuration parameters.
+ * @config_reg_16bits: Configure register size is 2 bytes.
* @set_mask: Bits to set in configuration register when configuring
* the chip.
* @clr_mask: Bits to clear in configuration register when configuring
@@ -75,17 +77,20 @@ enum lm75_type { /* keep sorted in alphabetical order */
* @sample_times: All the possible sample times to be set. Mandatory if
* num_sample_times is larger than 1. If set, number of
* entries must match num_sample_times.
+ * @alarm: Alarm bit is supported.
*/
struct lm75_params {
- u8 set_mask;
- u8 clr_mask;
+ bool config_reg_16bits;
+ u16 set_mask;
+ u16 clr_mask;
u8 default_resolution;
u8 resolution_limits;
const u8 *resolutions;
unsigned int default_sample_time;
u8 num_sample_times;
const unsigned int *sample_times;
+ bool alarm;
};
/* Addresses scanned */
@@ -104,8 +109,8 @@ struct lm75_data {
struct i2c_client *client;
struct regmap *regmap;
struct regulator *vs;
- u8 orig_conf;
- u8 current_conf;
+ u16 orig_conf;
+ u16 current_conf;
u8 resolution; /* In bits, 9 to 16 */
unsigned int sample_time; /* In ms */
enum lm75_type kind;
@@ -128,6 +133,15 @@ static const struct lm75_params device_params[] = {
.default_resolution = 12,
.default_sample_time = MSEC_PER_SEC / 10,
},
+ [as6200] = {
+ .config_reg_16bits = true,
+ .set_mask = 0x94C0, /* 8 sample/s, 4 CF, positive polarity */
+ .default_resolution = 12,
+ .default_sample_time = 125,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 125, 250, 1000, 4000 },
+ .alarm = true,
+ },
[at30ts74] = {
.set_mask = 3 << 5, /* 12-bit mode*/
.default_resolution = 12,
@@ -255,8 +269,9 @@ static const struct lm75_params device_params[] = {
.resolutions = (u8 []) {9, 10, 11, 12 },
},
[tmp112] = {
- .set_mask = 3 << 5, /* 8 samples / second */
- .clr_mask = 1 << 7, /* no one-shot mode*/
+ .config_reg_16bits = true,
+ .set_mask = 0x60C0, /* 12-bit mode, 8 samples / second */
+ .clr_mask = 1 << 15, /* no one-shot mode*/
.default_resolution = 12,
.default_sample_time = 125,
.num_sample_times = 4,
@@ -317,20 +332,23 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
}
-static int lm75_write_config(struct lm75_data *data, u8 set_mask,
- u8 clr_mask)
+static int lm75_write_config(struct lm75_data *data, u16 set_mask,
+ u16 clr_mask)
{
- u8 value;
+ unsigned int value;
- clr_mask |= LM75_SHUTDOWN;
+ clr_mask |= LM75_SHUTDOWN << (8 * data->params->config_reg_16bits);
value = data->current_conf & ~clr_mask;
value |= set_mask;
if (data->current_conf != value) {
s32 err;
-
- err = i2c_smbus_write_byte_data(data->client, LM75_REG_CONF,
- value);
+ if (data->params->config_reg_16bits)
+ err = regmap_write(data->regmap, LM75_REG_CONF, value);
+ else
+ err = i2c_smbus_write_byte_data(data->client,
+ LM75_REG_CONF,
+ value);
if (err)
return err;
data->current_conf = value;
@@ -338,6 +356,27 @@ static int lm75_write_config(struct lm75_data *data, u8 set_mask,
return 0;
}
+static int lm75_read_config(struct lm75_data *data)
+{
+ int ret;
+ unsigned int status;
+
+ if (data->params->config_reg_16bits) {
+ ret = regmap_read(data->regmap, LM75_REG_CONF, &status);
+ return ret ? ret : status;
+ }
+
+ return i2c_smbus_read_byte_data(data->client, LM75_REG_CONF);
+}
+
+static irqreturn_t lm75_alarm_handler(int irq, void *private)
+{
+ struct device *hwmon_dev = private;
+
+ hwmon_notify_event(hwmon_dev, hwmon_temp, hwmon_temp_alarm, 0);
+ return IRQ_HANDLED;
+}
+
static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -366,6 +405,9 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp_max_hyst:
reg = LM75_REG_HYST;
break;
+ case hwmon_temp_alarm:
+ reg = LM75_REG_CONF;
+ break;
default:
return -EINVAL;
}
@@ -373,7 +415,17 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
if (err < 0)
return err;
- *val = lm75_reg_to_mc(regval, data->resolution);
+ if (attr == hwmon_temp_alarm) {
+ switch (data->kind) {
+ case as6200:
+ *val = (regval >> 5) & 0x1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ *val = lm75_reg_to_mc(regval, data->resolution);
+ }
break;
default:
return -EINVAL;
@@ -436,6 +488,7 @@ static int lm75_update_interval(struct device *dev, long val)
data->resolution = data->params->resolutions[index];
break;
case tmp112:
+ case as6200:
err = regmap_read(data->regmap, LM75_REG_CONF, &reg);
if (err < 0)
return err;
@@ -503,6 +556,10 @@ static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_temp_max:
case hwmon_temp_max_hyst:
return 0644;
+ case hwmon_temp_alarm:
+ if (config_data->params->alarm)
+ return 0444;
+ break;
}
break;
default:
@@ -515,7 +572,8 @@ static const struct hwmon_channel_info * const lm75_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
HWMON_CHANNEL_INFO(temp,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_ALARM),
NULL
};
@@ -623,7 +681,7 @@ static int lm75_probe(struct i2c_client *client)
return err;
/* Cache original configuration */
- status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
+ status = lm75_read_config(data);
if (status < 0) {
dev_dbg(dev, "Can't read config? %d\n", status);
return status;
@@ -646,6 +704,23 @@ static int lm75_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
+ if (client->irq) {
+ if (data->params->alarm) {
+ err = devm_request_threaded_irq(dev,
+ client->irq,
+ NULL,
+ &lm75_alarm_handler,
+ IRQF_ONESHOT,
+ client->name,
+ hwmon_dev);
+ if (err)
+ return err;
+ } else {
+ /* alarm is only supported for chips with alarm bit */
+ dev_err(dev, "alarm interrupt is not supported\n");
+ }
+ }
+
dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name);
return 0;
@@ -653,6 +728,7 @@ static int lm75_probe(struct i2c_client *client)
static const struct i2c_device_id lm75_ids[] = {
{ "adt75", adt75, },
+ { "as6200", as6200, },
{ "at30ts74", at30ts74, },
{ "ds1775", ds1775, },
{ "ds75", ds75, },
@@ -690,6 +766,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.data = (void *)adt75
},
{
+ .compatible = "ams,as6200",
+ .data = (void *)as6200
+ },
+ {
.compatible = "atmel,at30ts74",
.data = (void *)at30ts74
},
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index fc53fdcb2..80a6e391f 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -54,7 +54,6 @@
#define LTC2991_VCC_CH_NR 0
struct ltc2991_state {
- struct device *dev;
struct regmap *regmap;
u32 r_sense_uohm[LTC2991_MAX_CHANNEL];
bool temp_en[LTC2991_MAX_CHANNEL];
@@ -283,19 +282,19 @@ static const struct regmap_config ltc2991_regmap_config = {
.max_register = 0x1D,
};
-static int ltc2991_init(struct ltc2991_state *st)
+static int ltc2991_init(struct ltc2991_state *st, struct device *dev)
{
struct fwnode_handle *child;
int ret;
u32 val, addr;
u8 v5_v8_reg_data = 0, v1_v4_reg_data = 0;
- ret = devm_regulator_get_enable(st->dev, "vcc");
+ ret = devm_regulator_get_enable(dev, "vcc");
if (ret)
- return dev_err_probe(st->dev, ret,
+ return dev_err_probe(dev, ret,
"failed to enable regulator\n");
- device_for_each_child_node(st->dev, child) {
+ device_for_each_child_node(dev, child) {
ret = fwnode_property_read_u32(child, "reg", &addr);
if (ret < 0) {
fwnode_handle_put(child);
@@ -312,7 +311,7 @@ static int ltc2991_init(struct ltc2991_state *st)
&val);
if (!ret) {
if (!val)
- return dev_err_probe(st->dev, -EINVAL,
+ return dev_err_probe(dev, -EINVAL,
"shunt resistor value cannot be zero\n");
st->r_sense_uohm[addr] = val;
@@ -361,18 +360,18 @@ static int ltc2991_init(struct ltc2991_state *st)
ret = regmap_write(st->regmap, LTC2991_V5_V8_CTRL, v5_v8_reg_data);
if (ret)
- return dev_err_probe(st->dev, ret,
+ return dev_err_probe(dev, ret,
"Error: Failed to set V5-V8 CTRL reg.\n");
ret = regmap_write(st->regmap, LTC2991_V1_V4_CTRL, v1_v4_reg_data);
if (ret)
- return dev_err_probe(st->dev, ret,
+ return dev_err_probe(dev, ret,
"Error: Failed to set V1-V4 CTRL reg.\n");
ret = regmap_write(st->regmap, LTC2991_PWM_TH_LSB_T_INT,
LTC2991_REPEAT_ACQ_EN);
if (ret)
- return dev_err_probe(st->dev, ret,
+ return dev_err_probe(dev, ret,
"Error: Failed to set continuous mode.\n");
/* Enable all channels and trigger conversions */
@@ -392,12 +391,11 @@ static int ltc2991_i2c_probe(struct i2c_client *client)
if (!st)
return -ENOMEM;
- st->dev = &client->dev;
st->regmap = devm_regmap_init_i2c(client, &ltc2991_regmap_config);
if (IS_ERR(st->regmap))
return PTR_ERR(st->regmap);
- ret = ltc2991_init(st);
+ ret = ltc2991_init(st, &client->dev);
if (ret)
return ret;
diff --git a/drivers/hwmon/max31827.c b/drivers/hwmon/max31827.c
index a1ce65145..4a8c3e37c 100644
--- a/drivers/hwmon/max31827.c
+++ b/drivers/hwmon/max31827.c
@@ -11,6 +11,7 @@
#include <linux/hwmon.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -23,15 +24,30 @@
#define MAX31827_CONFIGURATION_1SHOT_MASK BIT(0)
#define MAX31827_CONFIGURATION_CNV_RATE_MASK GENMASK(3, 1)
+#define MAX31827_CONFIGURATION_TIMEOUT_MASK BIT(5)
+#define MAX31827_CONFIGURATION_RESOLUTION_MASK GENMASK(7, 6)
+#define MAX31827_CONFIGURATION_ALRM_POL_MASK BIT(8)
+#define MAX31827_CONFIGURATION_COMP_INT_MASK BIT(9)
+#define MAX31827_CONFIGURATION_FLT_Q_MASK GENMASK(11, 10)
#define MAX31827_CONFIGURATION_U_TEMP_STAT_MASK BIT(14)
#define MAX31827_CONFIGURATION_O_TEMP_STAT_MASK BIT(15)
+#define MAX31827_ALRM_POL_LOW 0x0
+#define MAX31827_ALRM_POL_HIGH 0x1
+#define MAX31827_FLT_Q_1 0x0
+#define MAX31827_FLT_Q_4 0x2
+
+#define MAX31827_8_BIT_CNV_TIME 9
+#define MAX31827_9_BIT_CNV_TIME 18
+#define MAX31827_10_BIT_CNV_TIME 35
#define MAX31827_12_BIT_CNV_TIME 140
#define MAX31827_16_BIT_TO_M_DGR(x) (sign_extend32(x, 15) * 1000 / 16)
#define MAX31827_M_DGR_TO_16_BIT(x) (((x) << 4) / 1000)
#define MAX31827_DEVICE_ENABLE(x) ((x) ? 0xA : 0x0)
+enum chips { max31827 = 1, max31828, max31829 };
+
enum max31827_cnv {
MAX31827_CNV_1_DIV_64_HZ = 1,
MAX31827_CNV_1_DIV_32_HZ,
@@ -52,6 +68,27 @@ static const u16 max31827_conversions[] = {
[MAX31827_CNV_8_HZ] = 125,
};
+enum max31827_resolution {
+ MAX31827_RES_8_BIT = 0,
+ MAX31827_RES_9_BIT,
+ MAX31827_RES_10_BIT,
+ MAX31827_RES_12_BIT,
+};
+
+static const u16 max31827_resolutions[] = {
+ [MAX31827_RES_8_BIT] = 1000,
+ [MAX31827_RES_9_BIT] = 500,
+ [MAX31827_RES_10_BIT] = 250,
+ [MAX31827_RES_12_BIT] = 62,
+};
+
+static const u16 max31827_conv_times[] = {
+ [MAX31827_RES_8_BIT] = MAX31827_8_BIT_CNV_TIME,
+ [MAX31827_RES_9_BIT] = MAX31827_9_BIT_CNV_TIME,
+ [MAX31827_RES_10_BIT] = MAX31827_10_BIT_CNV_TIME,
+ [MAX31827_RES_12_BIT] = MAX31827_12_BIT_CNV_TIME,
+};
+
struct max31827_state {
/*
* Prevent simultaneous access to the i2c client.
@@ -59,6 +96,8 @@ struct max31827_state {
struct mutex lock;
struct regmap *regmap;
bool enable;
+ unsigned int resolution;
+ unsigned int update_interval;
};
static const struct regmap_config max31827_regmap = {
@@ -68,16 +107,16 @@ static const struct regmap_config max31827_regmap = {
};
static int shutdown_write(struct max31827_state *st, unsigned int reg,
- unsigned int val)
+ unsigned int mask, unsigned int val)
{
unsigned int cfg;
unsigned int cnv_rate;
int ret;
/*
- * Before the Temperature Threshold Alarm and Alarm Hysteresis Threshold
- * register values are changed over I2C, the part must be in shutdown
- * mode.
+ * Before the Temperature Threshold Alarm, Alarm Hysteresis Threshold
+ * and Resolution bits from Configuration register are changed over I2C,
+ * the part must be in shutdown mode.
*
* Mutex is used to ensure, that some other process doesn't change the
* configuration register.
@@ -85,7 +124,10 @@ static int shutdown_write(struct max31827_state *st, unsigned int reg,
mutex_lock(&st->lock);
if (!st->enable) {
- ret = regmap_write(st->regmap, reg, val);
+ if (!mask)
+ ret = regmap_write(st->regmap, reg, val);
+ else
+ ret = regmap_update_bits(st->regmap, reg, mask, val);
goto unlock;
}
@@ -100,7 +142,11 @@ static int shutdown_write(struct max31827_state *st, unsigned int reg,
if (ret)
goto unlock;
- ret = regmap_write(st->regmap, reg, val);
+ if (!mask)
+ ret = regmap_write(st->regmap, reg, val);
+ else
+ ret = regmap_update_bits(st->regmap, reg, mask, val);
+
if (ret)
goto unlock;
@@ -118,7 +164,7 @@ static int write_alarm_val(struct max31827_state *st, unsigned int reg,
{
val = MAX31827_M_DGR_TO_16_BIT(val);
- return shutdown_write(st, reg, val);
+ return shutdown_write(st, reg, 0, val);
}
static umode_t max31827_is_visible(const void *state,
@@ -188,9 +234,18 @@ static int max31827_read(struct device *dev, enum hwmon_sensor_types type,
mutex_unlock(&st->lock);
return ret;
}
-
- msleep(MAX31827_12_BIT_CNV_TIME);
+ msleep(max31827_conv_times[st->resolution]);
}
+
+ /*
+ * For 12-bit resolution the conversion time is 140 ms,
+ * thus an additional 15 ms is needed to complete the
+ * conversion: 125 ms + 15 ms = 140 ms
+ */
+ if (max31827_resolutions[st->resolution] == 12 &&
+ st->update_interval == 125)
+ usleep_range(15000, 20000);
+
ret = regmap_read(st->regmap, MAX31827_T_REG, &uval);
mutex_unlock(&st->lock);
@@ -341,17 +396,20 @@ static int max31827_write(struct device *dev, enum hwmon_sensor_types type,
val < max31827_conversions[res])
res++;
- if (res == ARRAY_SIZE(max31827_conversions) ||
- val != max31827_conversions[res])
- return -EINVAL;
+ if (res == ARRAY_SIZE(max31827_conversions))
+ res = ARRAY_SIZE(max31827_conversions) - 1;
res = FIELD_PREP(MAX31827_CONFIGURATION_CNV_RATE_MASK,
res);
- return regmap_update_bits(st->regmap,
- MAX31827_CONFIGURATION_REG,
- MAX31827_CONFIGURATION_CNV_RATE_MASK,
- res);
+ ret = regmap_update_bits(st->regmap,
+ MAX31827_CONFIGURATION_REG,
+ MAX31827_CONFIGURATION_CNV_RATE_MASK,
+ res);
+ if (ret)
+ return ret;
+
+ st->update_interval = val;
}
break;
@@ -359,17 +417,165 @@ static int max31827_write(struct device *dev, enum hwmon_sensor_types type,
return -EOPNOTSUPP;
}
- return -EOPNOTSUPP;
+ return 0;
+}
+
+static ssize_t temp1_resolution_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct max31827_state *st = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(st->regmap, MAX31827_CONFIGURATION_REG, &val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(MAX31827_CONFIGURATION_RESOLUTION_MASK, val);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", max31827_resolutions[val]);
}
-static int max31827_init_client(struct max31827_state *st)
+static ssize_t temp1_resolution_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
+ struct max31827_state *st = dev_get_drvdata(dev);
+ unsigned int idx = 0;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Convert the desired resolution into register
+ * bits. idx is already initialized with 0.
+ *
+ * This was inspired by lm73 driver.
+ */
+ while (idx < ARRAY_SIZE(max31827_resolutions) &&
+ val < max31827_resolutions[idx])
+ idx++;
+
+ if (idx == ARRAY_SIZE(max31827_resolutions))
+ idx = ARRAY_SIZE(max31827_resolutions) - 1;
+
+ st->resolution = idx;
+
+ ret = shutdown_write(st, MAX31827_CONFIGURATION_REG,
+ MAX31827_CONFIGURATION_RESOLUTION_MASK,
+ FIELD_PREP(MAX31827_CONFIGURATION_RESOLUTION_MASK,
+ idx));
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(temp1_resolution);
+
+static struct attribute *max31827_attrs[] = {
+ &dev_attr_temp1_resolution.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(max31827);
+
+static const struct i2c_device_id max31827_i2c_ids[] = {
+ { "max31827", max31827 },
+ { "max31828", max31828 },
+ { "max31829", max31829 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max31827_i2c_ids);
+
+static int max31827_init_client(struct max31827_state *st,
+ struct device *dev)
+{
+ struct fwnode_handle *fwnode;
+ unsigned int res = 0;
+ u32 data, lsb_idx;
+ enum chips type;
+ bool prop;
+ int ret;
+
+ fwnode = dev_fwnode(dev);
+
st->enable = true;
+ res |= MAX31827_DEVICE_ENABLE(1);
- return regmap_update_bits(st->regmap, MAX31827_CONFIGURATION_REG,
- MAX31827_CONFIGURATION_1SHOT_MASK |
- MAX31827_CONFIGURATION_CNV_RATE_MASK,
- MAX31827_DEVICE_ENABLE(1));
+ res |= MAX31827_CONFIGURATION_RESOLUTION_MASK;
+
+ prop = fwnode_property_read_bool(fwnode, "adi,comp-int");
+ res |= FIELD_PREP(MAX31827_CONFIGURATION_COMP_INT_MASK, prop);
+
+ prop = fwnode_property_read_bool(fwnode, "adi,timeout-enable");
+ res |= FIELD_PREP(MAX31827_CONFIGURATION_TIMEOUT_MASK, !prop);
+
+ type = (enum chips)(uintptr_t)device_get_match_data(dev);
+
+ if (fwnode_property_present(fwnode, "adi,alarm-pol")) {
+ ret = fwnode_property_read_u32(fwnode, "adi,alarm-pol", &data);
+ if (ret)
+ return ret;
+
+ res |= FIELD_PREP(MAX31827_CONFIGURATION_ALRM_POL_MASK, !!data);
+ } else {
+ /*
+ * Set default value.
+ */
+ switch (type) {
+ case max31827:
+ case max31828:
+ res |= FIELD_PREP(MAX31827_CONFIGURATION_ALRM_POL_MASK,
+ MAX31827_ALRM_POL_LOW);
+ break;
+ case max31829:
+ res |= FIELD_PREP(MAX31827_CONFIGURATION_ALRM_POL_MASK,
+ MAX31827_ALRM_POL_HIGH);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (fwnode_property_present(fwnode, "adi,fault-q")) {
+ ret = fwnode_property_read_u32(fwnode, "adi,fault-q", &data);
+ if (ret)
+ return ret;
+
+ /*
+ * Convert the desired fault queue into register bits.
+ */
+ if (data != 0)
+ lsb_idx = __ffs(data);
+
+ if (hweight32(data) != 1 || lsb_idx > 4) {
+ dev_err(dev, "Invalid data in adi,fault-q\n");
+ return -EINVAL;
+ }
+
+ res |= FIELD_PREP(MAX31827_CONFIGURATION_FLT_Q_MASK, lsb_idx);
+ } else {
+ /*
+ * Set default value.
+ */
+ switch (type) {
+ case max31827:
+ res |= FIELD_PREP(MAX31827_CONFIGURATION_FLT_Q_MASK,
+ MAX31827_FLT_Q_1);
+ break;
+ case max31828:
+ case max31829:
+ res |= FIELD_PREP(MAX31827_CONFIGURATION_FLT_Q_MASK,
+ MAX31827_FLT_Q_4);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return regmap_write(st->regmap, MAX31827_CONFIGURATION_REG, res);
}
static const struct hwmon_channel_info *max31827_info[] = {
@@ -417,25 +623,30 @@ static int max31827_probe(struct i2c_client *client)
if (err)
return dev_err_probe(dev, err, "failed to enable regulator\n");
- err = max31827_init_client(st);
+ err = max31827_init_client(st, dev);
if (err)
return err;
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, st,
&max31827_chip_info,
- NULL);
+ max31827_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static const struct i2c_device_id max31827_i2c_ids[] = {
- { "max31827", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, max31827_i2c_ids);
-
static const struct of_device_id max31827_of_match[] = {
- { .compatible = "adi,max31827" },
+ {
+ .compatible = "adi,max31827",
+ .data = (void *)max31827
+ },
+ {
+ .compatible = "adi,max31828",
+ .data = (void *)max31828
+ },
+ {
+ .compatible = "adi,max31829",
+ .data = (void *)max31829
+ },
{ }
};
MODULE_DEVICE_TABLE(of, max31827_of_match);
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index cc8428a30..9649c6611 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -26,7 +26,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/thermal.h>
/*
@@ -763,8 +763,6 @@ static int max6650_probe(struct i2c_client *client)
{
struct thermal_cooling_device *cooling_dev;
struct device *dev = &client->dev;
- const struct of_device_id *of_id =
- of_match_device(of_match_ptr(max6650_dt_match), dev);
struct max6650_data *data;
struct device *hwmon_dev;
int err;
@@ -776,8 +774,8 @@ static int max6650_probe(struct i2c_client *client)
data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- data->nr_fans = of_id ? (int)(uintptr_t)of_id->data :
- i2c_match_id(max6650_id, client)->driver_data;
+
+ data->nr_fans = (uintptr_t)i2c_get_match_data(client);
/*
* Initialize the max6650 chip
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index f3bf2e470..9fbab8f02 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -63,19 +63,19 @@
/* used to set data->name = nct6775_device_names[data->sio_kind] */
static const char * const nct6775_device_names[] = {
- "nct6106",
- "nct6116",
- "nct6775",
- "nct6776",
- "nct6779",
- "nct6791",
- "nct6792",
- "nct6793",
- "nct6795",
- "nct6796",
- "nct6797",
- "nct6798",
- "nct6799",
+ [nct6106] = "nct6106",
+ [nct6116] = "nct6116",
+ [nct6775] = "nct6775",
+ [nct6776] = "nct6776",
+ [nct6779] = "nct6779",
+ [nct6791] = "nct6791",
+ [nct6792] = "nct6792",
+ [nct6793] = "nct6793",
+ [nct6795] = "nct6795",
+ [nct6796] = "nct6796",
+ [nct6797] = "nct6797",
+ [nct6798] = "nct6798",
+ [nct6799] = "nct6799",
};
/* Common and NCT6775 specific data */
@@ -767,9 +767,9 @@ static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 };
static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
-static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
-static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
-static const u16 NCT6106_REG_PWM_READ[] = { 0x4a, 0x4b, 0x4c };
+static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3, 0, 0 };
+static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04, 0, 0 };
+static const u16 NCT6106_REG_PWM_READ[] = { 0x4a, 0x4b, 0x4c, 0xd8, 0xd9 };
static const u16 NCT6106_REG_FAN_MODE[] = { 0x113, 0x123, 0x133 };
static const u16 NCT6106_REG_TEMP_SOURCE[] = {
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5 };
@@ -3604,7 +3604,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
break;
case nct6116:
data->in_num = 9;
- data->pwm_num = 3;
+ data->pwm_num = 5;
data->auto_pwm_num = 4;
data->temp_fixed_num = 3;
data->num_temp_alarms = 3;
diff --git a/drivers/hwmon/nct6775-i2c.c b/drivers/hwmon/nct6775-i2c.c
index 87a4fc78c..aff69fa50 100644
--- a/drivers/hwmon/nct6775-i2c.c
+++ b/drivers/hwmon/nct6775-i2c.c
@@ -21,7 +21,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include "nct6775.h"
@@ -155,23 +155,13 @@ static const struct regmap_config nct6775_i2c_regmap_config = {
static int nct6775_i2c_probe(struct i2c_client *client)
{
struct nct6775_data *data;
- const struct of_device_id *of_id;
- const struct i2c_device_id *i2c_id;
struct device *dev = &client->dev;
- of_id = of_match_device(nct6775_i2c_of_match, dev);
- i2c_id = i2c_match_id(nct6775_i2c_id, client);
-
- if (of_id && (unsigned long)of_id->data != i2c_id->driver_data)
- dev_notice(dev, "Device mismatch: %s in device tree, %s detected\n",
- of_id->name, i2c_id->name);
-
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->kind = i2c_id->driver_data;
-
+ data->kind = (enum kinds)(uintptr_t)i2c_get_match_data(client);
data->read_only = true;
data->driver_data = client;
data->driver_init = nct6775_i2c_probe_init;
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 0adeeab7e..9aa4dcf4a 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -23,19 +23,19 @@
enum sensor_access { access_direct, access_asuswmi };
static const char * const nct6775_sio_names[] __initconst = {
- "NCT6106D",
- "NCT6116D",
- "NCT6775F",
- "NCT6776D/F",
- "NCT6779D",
- "NCT6791D",
- "NCT6792D",
- "NCT6793D",
- "NCT6795D",
- "NCT6796D",
- "NCT6797D",
- "NCT6798D",
- "NCT6796D-S/NCT6799D-R",
+ [nct6106] = "NCT6106D",
+ [nct6116] = "NCT6116D",
+ [nct6775] = "NCT6775F",
+ [nct6776] = "NCT6776D/F",
+ [nct6779] = "NCT6779D",
+ [nct6791] = "NCT6791D",
+ [nct6792] = "NCT6792D",
+ [nct6793] = "NCT6793D",
+ [nct6795] = "NCT6795D",
+ [nct6796] = "NCT6796D",
+ [nct6797] = "NCT6797D",
+ [nct6798] = "NCT6798D",
+ [nct6799] = "NCT6796D-S/NCT6799D-R",
};
static unsigned short force_id;
diff --git a/drivers/hwmon/nct6775.h b/drivers/hwmon/nct6775.h
index 296eff99d..d31e7a030 100644
--- a/drivers/hwmon/nct6775.h
+++ b/drivers/hwmon/nct6775.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
-enum kinds { nct6106, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792,
+enum kinds { nct6106 = 1, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792,
nct6793, nct6795, nct6796, nct6797, nct6798, nct6799 };
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 4702e4edc..904816abb 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -46,9 +46,9 @@
#define NPCM7XX_PWM_CTRL_CH3_EN_BIT BIT(16)
/* Define the maximum PWM channel number */
-#define NPCM7XX_PWM_MAX_CHN_NUM 8
+#define NPCM7XX_PWM_MAX_CHN_NUM 12
#define NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE 4
-#define NPCM7XX_PWM_MAX_MODULES 2
+#define NPCM7XX_PWM_MAX_MODULES 3
/* Define the Counter Register, value = 100 for match 100% */
#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255
@@ -171,6 +171,10 @@
#define FAN_PREPARE_TO_GET_FIRST_CAPTURE 0x01
#define FAN_ENOUGH_SAMPLE 0x02
+struct npcm_hwmon_info {
+ u32 pwm_max_channel;
+};
+
struct npcm7xx_fan_dev {
u8 fan_st_flg;
u8 fan_pls_per_rev;
@@ -191,6 +195,7 @@ struct npcm7xx_cooling_device {
struct npcm7xx_pwm_fan_data {
void __iomem *pwm_base;
void __iomem *fan_base;
+ int pwm_modules;
unsigned long pwm_clk_freq;
unsigned long fan_clk_freq;
struct clk *pwm_clk;
@@ -204,6 +209,7 @@ struct npcm7xx_pwm_fan_data {
struct timer_list fan_timer;
struct npcm7xx_fan_dev fan_dev[NPCM7XX_FAN_MAX_CHN_NUM];
struct npcm7xx_cooling_device *cdev[NPCM7XX_PWM_MAX_CHN_NUM];
+ const struct npcm_hwmon_info *info;
u8 fan_select;
};
@@ -542,7 +548,7 @@ static umode_t npcm7xx_pwm_is_visible(const void *_data, u32 attr, int channel)
{
const struct npcm7xx_pwm_fan_data *data = _data;
- if (!data->pwm_present[channel])
+ if (!data->pwm_present[channel] || channel >= data->info->pwm_max_channel)
return 0;
switch (attr) {
@@ -638,6 +644,10 @@ static const struct hwmon_channel_info * const npcm7xx_info[] = {
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
HWMON_PWM_INPUT),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT,
@@ -670,6 +680,14 @@ static const struct hwmon_chip_info npcm7xx_chip_info = {
.info = npcm7xx_info,
};
+static const struct npcm_hwmon_info npxm7xx_hwmon_info = {
+ .pwm_max_channel = 8,
+};
+
+static const struct npcm_hwmon_info npxm8xx_hwmon_info = {
+ .pwm_max_channel = 12,
+};
+
static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
{
int m, ch;
@@ -693,7 +711,7 @@ static u32 npcm7xx_pwm_init(struct npcm7xx_pwm_fan_data *data)
/* Setting PWM Prescale Register value register to both modules */
prescale_val |= (prescale_val << NPCM7XX_PWM_PRESCALE_SHIFT_CH01);
- for (m = 0; m < NPCM7XX_PWM_MAX_MODULES ; m++) {
+ for (m = 0; m < data->pwm_modules; m++) {
iowrite32(prescale_val, NPCM7XX_PWM_REG_PR(data->pwm_base, m));
iowrite32(NPCM7XX_PWM_PRESCALE2_DEFAULT,
NPCM7XX_PWM_REG_CSR(data->pwm_base, m));
@@ -925,6 +943,12 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ data->info = device_get_match_data(dev);
+ if (!data->info)
+ return -EINVAL;
+
+ data->pwm_modules = data->info->pwm_max_channel / NPCM7XX_PWM_MAX_CHN_NUM_IN_A_MODULE;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
if (!res) {
dev_err(dev, "pwm resource not found\n");
@@ -962,7 +986,7 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
output_freq = npcm7xx_pwm_init(data);
npcm7xx_fan_init(data);
- for (cnt = 0; cnt < NPCM7XX_PWM_MAX_MODULES ; cnt++)
+ for (cnt = 0; cnt < data->pwm_modules; cnt++)
mutex_init(&data->pwm_lock[cnt]);
for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) {
@@ -1017,7 +1041,8 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
}
static const struct of_device_id of_pwm_fan_match_table[] = {
- { .compatible = "nuvoton,npcm750-pwm-fan", },
+ { .compatible = "nuvoton,npcm750-pwm-fan", .data = &npxm7xx_hwmon_info},
+ { .compatible = "nuvoton,npcm845-pwm-fan", .data = &npxm8xx_hwmon_info},
{},
};
MODULE_DEVICE_TABLE(of, of_pwm_fan_match_table);
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 926ea1fe1..9e9681b2e 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -323,7 +323,11 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
}
/* Voltages */
- for (i = 0; i < data->innr; i++) {
+ /*
+ * The min() below does not have any practical meaning and is
+ * only needed to silence a warning observed with gcc 12+.
+ */
+ for (i = 0; i < min(data->innr, ARRAY_SIZE(data->in)); i++) {
data->in_status[i] = pc87360_read_value(data, LD_IN, i,
PC87365_REG_IN_STATUS);
/* Clear bits */
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index 5ca4d04e4..4a72e9712 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -47,7 +47,7 @@
#define GET_TEMP_MAX(x) (((x) & DIMM_TEMP_MAX) >> 8)
#define GET_TEMP_CRIT(x) (((x) & DIMM_TEMP_CRIT) >> 16)
-#define NO_DIMM_RETRY_COUNT_MAX 5
+#define NO_DIMM_RETRY_COUNT_MAX 120
struct peci_dimmtemp;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index b4e93bd58..294808f52 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -227,6 +227,16 @@ config SENSORS_LTC3815
This driver can also be built as a module. If so, the module will
be called ltc3815.
+config SENSORS_LTC4286
+ bool "Analog Devices LTC4286"
+ help
+ LTC4286 is an integrated solution for hot swap applications that
+ allows a board to be safely inserted and removed from a live
+ backplane.
+ This chip could be used to monitor voltage, current, ...etc.
+ If you say yes here you get hardware monitoring support for Analog
+ Devices LTC4286.
+
config SENSORS_MAX15301
tristate "Maxim MAX15301"
help
@@ -299,6 +309,15 @@ config SENSORS_MAX8688
This driver can also be built as a module. If so, the module will
be called max8688.
+config SENSORS_MP2856
+ tristate "MPS MP2856"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2856 MP2857 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2856.
+
config SENSORS_MP2888
tristate "MPS MP2888"
help
@@ -333,6 +352,15 @@ config SENSORS_MP5023
This driver can also be built as a module. If so, the module will
be called mp5023.
+config SENSORS_MP5990
+ tristate "MPS MP5990"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP5990.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp5990.
+
config SENSORS_MPQ7932_REGULATOR
bool "Regulator support for MPQ7932"
depends on SENSORS_MPQ7932 && REGULATOR
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 84ee960a6..cf8a76744 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LT7182S) += lt7182s.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
+obj-$(CONFIG_SENSORS_LTC4286) += ltc4286.o
obj-$(CONFIG_SENSORS_MAX15301) += max15301.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
obj-$(CONFIG_SENSORS_MAX16601) += max16601.o
@@ -32,9 +33,11 @@ obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+obj-$(CONFIG_SENSORS_MP2856) += mp2856.o
obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
+obj-$(CONFIG_SENSORS_MP5990) += mp5990.o
obj-$(CONFIG_SENSORS_MPQ7932) += mpq7932.o
obj-$(CONFIG_SENSORS_PLI1209BC) += pli1209bc.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 929fa6d34..3a20df5a4 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -14,10 +14,10 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/log2.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "pmbus.h"
-enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
+enum chips { lm25056 = 1, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -468,8 +468,6 @@ static int lm25066_probe(struct i2c_client *client)
struct lm25066_data *data;
struct pmbus_driver_info *info;
const struct __coeff *coeff;
- const struct of_device_id *of_id;
- const struct i2c_device_id *i2c_id;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA))
@@ -484,14 +482,8 @@ static int lm25066_probe(struct i2c_client *client)
if (config < 0)
return config;
- i2c_id = i2c_match_id(lm25066_id, client);
+ data->id = (enum chips)(unsigned long)i2c_get_match_data(client);
- of_id = of_match_device(lm25066_of_match, &client->dev);
- if (of_id && (unsigned long)of_id->data != i2c_id->driver_data)
- dev_notice(&client->dev, "Device mismatch: %s in device tree, %s detected\n",
- of_id->name, i2c_id->name);
-
- data->id = i2c_id->driver_data;
info = &data->info;
info->pages = 1;
diff --git a/drivers/hwmon/pmbus/ltc4286.c b/drivers/hwmon/pmbus/ltc4286.c
new file mode 100644
index 000000000..9e7ceeb7e
--- /dev/null
+++ b/drivers/hwmon/pmbus/ltc4286.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include "pmbus.h"
+
+/* LTC4286 register */
+#define LTC4286_MFR_CONFIG1 0xF2
+
+/* LTC4286 configuration */
+#define VRANGE_SELECT_BIT BIT(1)
+
+#define LTC4286_MFR_ID_SIZE 3
+
+/*
+ * Initialize the MBR as default settings which is referred to LTC4286 datasheet
+ * (March 22, 2022 version) table 3 page 16
+ */
+static struct pmbus_driver_info ltc4286_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 32,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 1,
+ .m[PSC_VOLTAGE_OUT] = 32,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 1,
+ .m[PSC_CURRENT_OUT] = 1024,
+ .b[PSC_CURRENT_OUT] = 0,
+ /*
+ * The rsense value used in MBR formula in LTC4286 datasheet should be ohm unit.
+ * However, the rsense value that user input is micro ohm.
+ * Thus, the MBR setting which involves rsense should be shifted by 6 digits.
+ */
+ .R[PSC_CURRENT_OUT] = 3 - 6,
+ .m[PSC_POWER] = 1,
+ .b[PSC_POWER] = 0,
+ /*
+ * The rsense value used in MBR formula in LTC4286 datasheet should be ohm unit.
+ * However, the rsense value that user input is micro ohm.
+ * Thus, the MBR setting which involves rsense should be shifted by 6 digits.
+ */
+ .R[PSC_POWER] = 4 - 6,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 273,
+ .R[PSC_TEMPERATURE] = 0,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_TEMP,
+};
+
+static const struct i2c_device_id ltc4286_id[] = {
+ { "ltc4286", 0 },
+ { "ltc4287", 1 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc4286_id);
+
+static int ltc4286_probe(struct i2c_client *client)
+{
+ int ret;
+ const struct i2c_device_id *mid;
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
+ struct pmbus_driver_info *info;
+ u32 rsense;
+ int vrange_nval, vrange_oval;
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, block_buffer);
+ if (ret < 0) {
+ return dev_err_probe(&client->dev, ret,
+ "Failed to read manufacturer id\n");
+ }
+
+ /*
+ * Refer to ltc4286 datasheet page 20
+ * the manufacturer id is LTC
+ */
+ if (ret != LTC4286_MFR_ID_SIZE ||
+ strncmp(block_buffer, "LTC", LTC4286_MFR_ID_SIZE)) {
+ return dev_err_probe(&client->dev, -ENODEV,
+ "Manufacturer id mismatch\n");
+ }
+
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, block_buffer);
+ if (ret < 0) {
+ return dev_err_probe(&client->dev, ret,
+ "Failed to read manufacturer model\n");
+ }
+
+ for (mid = ltc4286_id; mid->name[0]; mid++) {
+ if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
+ break;
+ }
+ if (!mid->name[0])
+ return dev_err_probe(&client->dev, -ENODEV,
+ "Unsupported device\n");
+
+ if (of_property_read_u32(client->dev.of_node,
+ "shunt-resistor-micro-ohms", &rsense))
+ rsense = 300; /* 0.3 mOhm if not set via DT */
+
+ if (rsense == 0)
+ return -EINVAL;
+
+ /* Check for the latter MBR value won't overflow */
+ if (rsense > (INT_MAX / 1024))
+ return -EINVAL;
+
+ info = devm_kmemdup(&client->dev, &ltc4286_info, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* Check MFR1 CONFIG register bit 1 VRANGE_SELECT before driver loading */
+ vrange_oval = i2c_smbus_read_word_data(client, LTC4286_MFR_CONFIG1);
+ if (vrange_oval < 0)
+ return dev_err_probe(&client->dev, vrange_oval,
+ "Failed to read manufacturer configuration one\n");
+ vrange_nval = vrange_oval;
+
+ if (device_property_read_bool(&client->dev, "adi,vrange-low-enable")) {
+ vrange_nval &=
+ ~VRANGE_SELECT_BIT; /* VRANGE_SELECT = 0, 25.6 volts */
+
+ info->m[PSC_VOLTAGE_IN] = 128;
+ info->m[PSC_VOLTAGE_OUT] = 128;
+ info->m[PSC_POWER] = 4 * rsense;
+ } else {
+ vrange_nval |=
+ VRANGE_SELECT_BIT; /* VRANGE_SELECT = 1, 102.4 volts */
+
+ info->m[PSC_POWER] = rsense;
+ }
+ if (vrange_nval != vrange_oval) {
+ /* Set MFR1 CONFIG register bit 1 VRANGE_SELECT */
+ ret = i2c_smbus_write_word_data(client, LTC4286_MFR_CONFIG1,
+ vrange_nval);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to set vrange\n");
+ }
+
+ info->m[PSC_CURRENT_OUT] = 1024 * rsense;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct of_device_id ltc4286_of_match[] = {
+ { .compatible = "lltc,ltc4286" },
+ { .compatible = "lltc,ltc4287" },
+ {}
+};
+
+static struct i2c_driver ltc4286_driver = {
+ .driver = {
+ .name = "ltc4286",
+ .of_match_table = ltc4286_of_match,
+ },
+ .probe = ltc4286_probe,
+ .id_table = ltc4286_id,
+};
+
+module_i2c_driver(ltc4286_driver);
+
+MODULE_AUTHOR("Delphine CC Chiu <Delphine_CC_Chiu@wiwynn.com>");
+MODULE_DESCRIPTION("PMBUS driver for LTC4286 and compatibles");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/mp2856.c b/drivers/hwmon/pmbus/mp2856.c
new file mode 100644
index 000000000..6969350f5
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2856.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS2856/2857
+ * Monolithic Power Systems VR Controllers
+ *
+ * Copyright (C) 2023 Quanta Computer lnc.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include "pmbus.h"
+
+/* Vendor specific registers. */
+#define MP2856_MFR_VR_MULTI_CONFIG_R1 0x0d
+#define MP2856_MFR_VR_MULTI_CONFIG_R2 0x1d
+
+#define MP2856_MUL1_BOOT_SR_R2 0x10
+#define MP2856_VR_ACTIVE BIT(15)
+
+#define MP2856_MFR_VR_CONFIG2 0x5e
+#define MP2856_VOUT_MODE BIT(11)
+
+#define MP2856_MFR_VR_CONFIG1 0x68
+#define MP2856_DRMOS_KCS GENMASK(13, 12)
+
+#define MP2856_MFR_READ_CS1_2_R1 0x82
+#define MP2856_MFR_READ_CS3_4_R1 0x83
+#define MP2856_MFR_READ_CS5_6_R1 0x84
+#define MP2856_MFR_READ_CS7_8_R1 0x85
+#define MP2856_MFR_READ_CS9_10_R1 0x86
+#define MP2856_MFR_READ_CS11_12_R1 0x87
+
+#define MP2856_MFR_READ_CS1_2_R2 0x85
+#define MP2856_MFR_READ_CS3_4_R2 0x86
+#define MP2856_MFR_READ_CS5_6_R2 0x87
+
+#define MP2856_MAX_PHASE_RAIL1 8
+#define MP2856_MAX_PHASE_RAIL2 4
+
+#define MP2857_MAX_PHASE_RAIL1 12
+#define MP2857_MAX_PHASE_RAIL2 4
+
+#define MP2856_PAGE_NUM 2
+
+enum chips { mp2856 = 1, mp2857 };
+
+static const int mp2856_max_phases[][MP2856_PAGE_NUM] = {
+ [mp2856] = { MP2856_MAX_PHASE_RAIL1, MP2856_MAX_PHASE_RAIL2 },
+ [mp2857] = { MP2857_MAX_PHASE_RAIL1, MP2857_MAX_PHASE_RAIL2 },
+};
+
+static const struct i2c_device_id mp2856_id[] = {
+ {"mp2856", mp2856},
+ {"mp2857", mp2857},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mp2856_id);
+
+struct mp2856_data {
+ struct pmbus_driver_info info;
+ int vout_format[MP2856_PAGE_NUM];
+ int curr_sense_gain[MP2856_PAGE_NUM];
+ int max_phases[MP2856_PAGE_NUM];
+ enum chips chip_id;
+};
+
+#define to_mp2856_data(x) container_of(x, struct mp2856_data, info)
+
+#define MAX_LIN_MANTISSA (1023 * 1000)
+#define MIN_LIN_MANTISSA (511 * 1000)
+
+static u16 val2linear11(s64 val)
+{
+ s16 exponent = 0, mantissa;
+ bool negative = false;
+
+ if (val == 0)
+ return 0;
+
+ if (val < 0) {
+ negative = true;
+ val = -val;
+ }
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val >= MAX_LIN_MANTISSA && exponent < 15) {
+ exponent++;
+ val >>= 1;
+ }
+ /* Increase small mantissa to improve precision */
+ while (val < MIN_LIN_MANTISSA && exponent > -15) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = clamp_val(DIV_ROUND_CLOSEST_ULL(val, 1000), 0, 0x3ff);
+
+ /* restore sign */
+ if (negative)
+ mantissa = -mantissa;
+
+ /* Convert to 5 bit exponent, 11 bit mantissa */
+ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
+static int
+mp2856_read_word_helper(struct i2c_client *client, int page, int phase, u8 reg,
+ u16 mask)
+{
+ int ret = pmbus_read_word_data(client, page, phase, reg);
+
+ return (ret > 0) ? ret & mask : ret;
+}
+
+static int
+mp2856_read_vout(struct i2c_client *client, struct mp2856_data *data, int page,
+ int phase, u8 reg)
+{
+ int ret;
+
+ ret = mp2856_read_word_helper(client, page, phase, reg,
+ GENMASK(9, 0));
+ if (ret < 0)
+ return ret;
+
+ /* convert vout result to direct format */
+ ret = (data->vout_format[page] == vid) ?
+ ((ret + 49) * 5) : ((ret * 1000) >> 8);
+
+ return ret;
+}
+
+static int
+mp2856_read_phase(struct i2c_client *client, struct mp2856_data *data,
+ int page, int phase, u8 reg)
+{
+ int ret;
+ int val;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (!((phase + 1) % MP2856_PAGE_NUM))
+ ret >>= 8;
+ ret &= 0xff;
+
+ /*
+ * Output value is calculated as: (READ_CSx * 12.5mV - 1.23V) / (Kcs * Rcs)
+ */
+ val = (ret * 125) - 12300;
+
+ return val2linear11(val);
+}
+
+static int
+mp2856_read_phases(struct i2c_client *client, struct mp2856_data *data,
+ int page, int phase)
+{
+ int ret;
+
+ if (page == 0) {
+ switch (phase) {
+ case 0 ... 1:
+ ret = mp2856_read_phase(client, data, page, phase,
+ MP2856_MFR_READ_CS1_2_R1);
+ break;
+ case 2 ... 3:
+ ret = mp2856_read_phase(client, data, page, phase,
+ MP2856_MFR_READ_CS3_4_R1);
+ break;
+ case 4 ... 5:
+ ret = mp2856_read_phase(client, data, page, phase,
+ MP2856_MFR_READ_CS5_6_R1);
+ break;
+ case 6 ... 7:
+ ret = mp2856_read_phase(client, data, page, phase,
+ MP2856_MFR_READ_CS7_8_R1);
+ break;
+ default:
+ return -ENODATA;
+ }
+ } else {
+ switch (phase) {
+ case 0 ... 1:
+ ret = mp2856_read_phase(client, data, page, phase,
+ MP2856_MFR_READ_CS1_2_R2);
+ break;
+ case 2 ... 3:
+ ret = mp2856_read_phase(client, data, page, phase,
+ MP2856_MFR_READ_CS1_2_R2);
+ break;
+ default:
+ return -ENODATA;
+ }
+ }
+ return ret;
+}
+
+static int
+mp2856_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2856_data *data = to_mp2856_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_READ_VOUT:
+ ret = mp2856_read_vout(client, data, page, phase, reg);
+ break;
+ case PMBUS_READ_IOUT:
+ if (phase != 0xff)
+ ret = mp2856_read_phases(client, data, page, phase);
+ else
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ break;
+ default:
+ return -ENODATA;
+ }
+
+ return ret;
+}
+
+static int
+mp2856_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /* Enforce VOUT direct format. */
+ return PB_VOUT_MODE_DIRECT;
+ default:
+ return -ENODATA;
+ }
+}
+
+static int
+mp2856_identify_multiphase(struct i2c_client *client, u8 reg, u8 max_phase,
+ u16 mask)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ ret &= mask;
+ return (ret >= max_phase) ? max_phase : ret;
+}
+
+static int
+mp2856_identify_multiphase_rail1(struct i2c_client *client,
+ struct mp2856_data *data)
+{
+ int ret, i;
+
+ ret = mp2856_identify_multiphase(client, MP2856_MFR_VR_MULTI_CONFIG_R1,
+ MP2856_MAX_PHASE_RAIL1, GENMASK(3, 0));
+ if (ret < 0)
+ return ret;
+
+ data->info.phases[0] = (ret > data->max_phases[0]) ?
+ data->max_phases[0] : ret;
+
+ for (i = 0 ; i < data->info.phases[0]; i++)
+ data->info.pfunc[i] |= PMBUS_HAVE_IOUT;
+
+ return 0;
+}
+
+static int
+mp2856_identify_multiphase_rail2(struct i2c_client *client,
+ struct mp2856_data *data)
+{
+ int ret, i;
+
+ ret = mp2856_identify_multiphase(client, MP2856_MFR_VR_MULTI_CONFIG_R2,
+ MP2856_MAX_PHASE_RAIL2, GENMASK(2, 0));
+ if (ret < 0)
+ return ret;
+
+ data->info.phases[1] = (ret > data->max_phases[1]) ?
+ data->max_phases[1] : ret;
+
+ for (i = 0 ; i < data->info.phases[0]; i++)
+ data->info.pfunc[i] |= PMBUS_HAVE_IOUT;
+
+ return 0;
+}
+
+static int
+mp2856_current_sense_gain_get(struct i2c_client *client,
+ struct mp2856_data *data)
+{
+ int i, ret;
+
+ /*
+ * Obtain DrMOS current sense gain of power stage from the register
+ * MP2856_MFR_VR_CONFIG1, bits 13-12. The value is selected as below:
+ * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A. Other
+ * values are invalid.
+ */
+ for (i = 0 ; i < data->info.pages; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_read_word_data(client,
+ MP2856_MFR_VR_CONFIG1);
+ if (ret < 0)
+ return ret;
+
+ switch ((ret & MP2856_DRMOS_KCS) >> 12) {
+ case 0:
+ data->curr_sense_gain[i] = 50;
+ break;
+ case 1:
+ data->curr_sense_gain[i] = 85;
+ break;
+ case 2:
+ data->curr_sense_gain[i] = 97;
+ break;
+ default:
+ data->curr_sense_gain[i] = 100;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int
+mp2856_identify_vout_format(struct i2c_client *client,
+ struct mp2856_data *data)
+{
+ int i, ret;
+
+ for (i = 0; i < data->info.pages; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MP2856_MFR_VR_CONFIG2);
+ if (ret < 0)
+ return ret;
+
+ data->vout_format[i] = (ret & MP2856_VOUT_MODE) ? linear : vid;
+ }
+ return 0;
+}
+
+static bool
+mp2856_is_rail2_active(struct i2c_client *client)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
+ if (ret < 0)
+ return true;
+
+ ret = i2c_smbus_read_word_data(client, MP2856_MUL1_BOOT_SR_R2);
+ if (ret < 0)
+ return true;
+
+ return (ret & MP2856_VR_ACTIVE) ? true : false;
+}
+
+static struct pmbus_driver_info mp2856_info = {
+ .pages = MP2856_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP,
+ .read_byte_data = mp2856_read_byte_data,
+ .read_word_data = mp2856_read_word_data,
+};
+
+static int mp2856_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp2856_data *data;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp2856_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->chip_id = (enum chips)(uintptr_t)i2c_get_match_data(client);
+
+ memcpy(data->max_phases, mp2856_max_phases[data->chip_id],
+ sizeof(data->max_phases));
+
+ memcpy(&data->info, &mp2856_info, sizeof(*info));
+ info = &data->info;
+
+ /* Identify multiphase configuration. */
+ ret = mp2856_identify_multiphase_rail1(client, data);
+ if (ret < 0)
+ return ret;
+
+ if (mp2856_is_rail2_active(client)) {
+ ret = mp2856_identify_multiphase_rail2(client, data);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* rail2 is not active */
+ info->pages = 1;
+ }
+
+ /* Obtain current sense gain of power stage. */
+ ret = mp2856_current_sense_gain_get(client, data);
+ if (ret)
+ return ret;
+
+ /* Identify vout format. */
+ ret = mp2856_identify_vout_format(client, data);
+ if (ret)
+ return ret;
+
+ /* set the device to page 0 */
+ i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct of_device_id __maybe_unused mp2856_of_match[] = {
+ {.compatible = "mps,mp2856", .data = (void *)mp2856},
+ {.compatible = "mps,mp2857", .data = (void *)mp2857},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2856_of_match);
+
+static struct i2c_driver mp2856_driver = {
+ .driver = {
+ .name = "mp2856",
+ .of_match_table = mp2856_of_match,
+ },
+ .probe = mp2856_probe,
+ .id_table = mp2856_id,
+};
+
+module_i2c_driver(mp2856_driver);
+
+MODULE_AUTHOR("Peter Yin <peter.yin@quantatw.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2856/MP2857 device");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index b9bb469e2..e5fa10b3b 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -126,6 +126,21 @@ static const struct regulator_desc __maybe_unused mp2975_reg_desc[] = {
#define to_mp2975_data(x) container_of(x, struct mp2975_data, info)
+static int mp2975_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * Report direct format as configured by MFR_DC_LOOP_CTRL.
+ * Unlike on MP2971/MP2973 the reported VOUT_MODE isn't automatically
+ * internally updated, but always reads as PB_VOUT_MODE_VID.
+ */
+ return PB_VOUT_MODE_DIRECT;
+ default:
+ return -ENODATA;
+ }
+}
+
static int
mp2975_read_word_helper(struct i2c_client *client, int page, int phase, u8 reg,
u16 mask)
@@ -869,6 +884,7 @@ static struct pmbus_driver_info mp2975_info = {
PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT | PMBUS_PHASE_VIRTUAL,
+ .read_byte_data = mp2975_read_byte_data,
.read_word_data = mp2975_read_word_data,
#if IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR)
.num_regulators = 1,
diff --git a/drivers/hwmon/pmbus/mp5990.c b/drivers/hwmon/pmbus/mp5990.c
new file mode 100644
index 000000000..1dfbab25a
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp5990.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MPS MP5990 Hot-Swap Controller
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+#define MP5990_EFUSE_CFG (0xC4)
+#define MP5990_VOUT_FORMAT BIT(9)
+
+struct mp5990_data {
+ struct pmbus_driver_info info;
+ u8 vout_mode;
+ u8 vout_linear_exponent;
+};
+
+#define to_mp5990_data(x) container_of(x, struct mp5990_data, info)
+
+static int mp5990_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp5990_data *data = to_mp5990_data(info);
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ if (data->vout_mode == linear) {
+ /*
+ * The VOUT format used by the chip is linear11,
+ * not linear16. Report that VOUT is in linear mode
+ * and return exponent value extracted while probing
+ * the chip.
+ */
+ return data->vout_linear_exponent;
+ }
+
+ /*
+ * The datasheet does not support the VOUT command,
+ * but the device responds with a default value of 0x17.
+ * In the standard, 0x17 represents linear mode.
+ * Therefore, we should report that VOUT is in direct
+ * format when the chip is configured for it.
+ */
+ return PB_VOUT_MODE_DIRECT;
+
+ default:
+ return -ENODATA;
+ }
+}
+
+static int mp5990_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp5990_data *data = to_mp5990_data(info);
+ int ret;
+ s32 mantissa;
+
+ switch (reg) {
+ case PMBUS_READ_VOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+ /*
+ * Because the VOUT format used by the chip is linear11 and not
+ * linear16, we disregard bits[15:11]. The exponent is reported
+ * as part of the VOUT_MODE command.
+ */
+ if (data->vout_mode == linear) {
+ mantissa = ((s16)((ret & 0x7ff) << 5)) >> 5;
+ ret = mantissa;
+ }
+ break;
+ default:
+ return -ENODATA;
+ }
+
+ return ret;
+}
+
+static struct pmbus_driver_info mp5990_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 32,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 32,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_OUT] = 16,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 0,
+ .m[PSC_POWER] = 1,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 0,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ .func[0] =
+ PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_PIN |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = mp5990_read_byte_data,
+ .read_word_data = mp5990_read_word_data,
+};
+
+static int mp5990_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp5990_data *data;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp5990_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp5990_info, sizeof(*info));
+ info = &data->info;
+
+ /* Read Vout Config */
+ ret = i2c_smbus_read_word_data(client, MP5990_EFUSE_CFG);
+ if (ret < 0) {
+ dev_err(&client->dev, "Can't get vout mode.");
+ return ret;
+ }
+
+ /*
+ * EFUSE_CFG (0xC4) bit9=1 is linear mode, bit=0 is direct mode.
+ */
+ if (ret & MP5990_VOUT_FORMAT) {
+ data->vout_mode = linear;
+ data->info.format[PSC_VOLTAGE_IN] = linear;
+ data->info.format[PSC_VOLTAGE_OUT] = linear;
+ data->info.format[PSC_CURRENT_OUT] = linear;
+ data->info.format[PSC_POWER] = linear;
+ ret = i2c_smbus_read_word_data(client, PMBUS_READ_VOUT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Can't get vout exponent.");
+ return ret;
+ }
+ data->vout_linear_exponent = (u8)((ret >> 11) & 0x1f);
+ } else {
+ data->vout_mode = direct;
+ }
+ return pmbus_do_probe(client, info);
+}
+
+static const struct of_device_id mp5990_of_match[] = {
+ { .compatible = "mps,mp5990" },
+ {}
+};
+
+static const struct i2c_device_id mp5990_id[] = {
+ {"mp5990", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mp5990_id);
+
+static struct i2c_driver mp5990_driver = {
+ .driver = {
+ .name = "mp5990",
+ .of_match_table = mp5990_of_match,
+ },
+ .probe = mp5990_probe,
+ .id_table = mp5990_id,
+};
+module_i2c_driver(mp5990_driver);
+
+MODULE_AUTHOR("Peter Yin <peter.yin@quantatw.com>");
+MODULE_DESCRIPTION("PMBus driver for MP5990 HSC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 6e4516c2a..b67bc9e83 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -151,7 +151,7 @@ static int pwm_fan_power_on(struct pwm_fan_ctx *ctx)
}
state->enabled = true;
- ret = pwm_apply_state(ctx->pwm, state);
+ ret = pwm_apply_might_sleep(ctx->pwm, state);
if (ret) {
dev_err(ctx->dev, "failed to enable PWM\n");
goto disable_regulator;
@@ -181,7 +181,7 @@ static int pwm_fan_power_off(struct pwm_fan_ctx *ctx)
state->enabled = false;
state->duty_cycle = 0;
- ret = pwm_apply_state(ctx->pwm, state);
+ ret = pwm_apply_might_sleep(ctx->pwm, state);
if (ret) {
dev_err(ctx->dev, "failed to disable PWM\n");
return ret;
@@ -207,7 +207,7 @@ static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
period = state->period;
state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
- ret = pwm_apply_state(ctx->pwm, state);
+ ret = pwm_apply_might_sleep(ctx->pwm, state);
if (ret)
return ret;
ret = pwm_fan_power_on(ctx);
@@ -278,7 +278,7 @@ static int pwm_fan_update_enable(struct pwm_fan_ctx *ctx, long val)
state,
&enable_regulator);
- pwm_apply_state(ctx->pwm, state);
+ pwm_apply_might_sleep(ctx->pwm, state);
pwm_fan_switch_power(ctx, enable_regulator);
pwm_fan_update_state(ctx, 0);
}
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index 7ee797410..4883755d4 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -49,6 +49,7 @@ DECLARE_CRC8_TABLE(sht4x_crc8_table);
* struct sht4x_data - All the data required to operate an SHT4X chip
* @client: the i2c client associated with the SHT4X
* @lock: a mutex that is used to prevent parallel access to the i2c client
+ * @valid: validity of fields below
* @update_interval: the minimum poll interval
* @last_updated: the previous time that the SHT4X was polled
* @temperature: the latest temperature value received from the SHT4X
@@ -66,7 +67,7 @@ struct sht4x_data {
/**
* sht4x_read_values() - read and parse the raw data from the SHT4X
- * @sht4x_data: the struct sht4x_data to use for the lock
+ * @data: the struct sht4x_data to use for the lock
* Return: 0 if successful, -ERRNO if not
*/
static int sht4x_read_values(struct sht4x_data *data)
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 37531b5c8..0d46edbcb 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -33,7 +33,7 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static struct platform_device *pdev;
+static struct platform_device *smsc47m1_pdev;
#define DRVNAME "smsc47m1"
enum chips { smsc47m1, smsc47m2 };
@@ -840,70 +840,57 @@ error_remove_files:
return err;
}
-static int __exit smsc47m1_remove(struct platform_device *pdev)
+static void __exit smsc47m1_remove(struct platform_device *pdev)
{
struct smsc47m1_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->hwmon_dev);
smsc47m1_remove_files(&pdev->dev);
-
- return 0;
}
-static struct platform_driver smsc47m1_driver = {
+/*
+ * smsc47m1_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this ok because they cannot get unbound at
+ * runtime. The driver needs to be marked with __refdata, otherwise modpost
+ * triggers a section mismatch warning.
+ */
+static struct platform_driver smsc47m1_driver __refdata = {
.driver = {
.name = DRVNAME,
},
- .remove = __exit_p(smsc47m1_remove),
+ .remove_new = __exit_p(smsc47m1_remove),
};
static int __init smsc47m1_device_add(unsigned short address,
const struct smsc47m1_sio_data *sio_data)
{
- struct resource res = {
+ const struct resource res = {
.start = address,
.end = address + SMSC_EXTENT - 1,
.name = DRVNAME,
.flags = IORESOURCE_IO,
};
+ const struct platform_device_info pdevinfo = {
+ .name = DRVNAME,
+ .id = address,
+ .res = &res,
+ .num_res = 1,
+ .data = sio_data,
+ .size_data = sizeof(struct smsc47m1_sio_data),
+ };
int err;
err = smsc47m1_handle_resources(address, sio_data->type, CHECK, NULL);
if (err)
- goto exit;
+ return err;
- pdev = platform_device_alloc(DRVNAME, address);
- if (!pdev) {
- err = -ENOMEM;
+ smsc47m1_pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(smsc47m1_pdev)) {
pr_err("Device allocation failed\n");
- goto exit;
- }
-
- err = platform_device_add_resources(pdev, &res, 1);
- if (err) {
- pr_err("Device resource addition failed (%d)\n", err);
- goto exit_device_put;
- }
-
- err = platform_device_add_data(pdev, sio_data,
- sizeof(struct smsc47m1_sio_data));
- if (err) {
- pr_err("Platform data allocation failed\n");
- goto exit_device_put;
- }
-
- err = platform_device_add(pdev);
- if (err) {
- pr_err("Device addition failed (%d)\n", err);
- goto exit_device_put;
+ return PTR_ERR(smsc47m1_pdev);
}
return 0;
-
-exit_device_put:
- platform_device_put(pdev);
-exit:
- return err;
}
static int __init sm_smsc47m1_init(void)
@@ -917,7 +904,7 @@ static int __init sm_smsc47m1_init(void)
return err;
address = err;
- /* Sets global pdev as a side effect */
+ /* Sets global smsc47m1_pdev as a side effect */
err = smsc47m1_device_add(address, &sio_data);
if (err)
return err;
@@ -929,7 +916,7 @@ static int __init sm_smsc47m1_init(void)
return 0;
exit_device:
- platform_device_unregister(pdev);
+ platform_device_unregister(smsc47m1_pdev);
smsc47m1_restore(&sio_data);
return err;
}
@@ -937,8 +924,8 @@ exit_device:
static void __exit sm_smsc47m1_exit(void)
{
platform_driver_unregister(&smsc47m1_driver);
- smsc47m1_restore(dev_get_platdata(&pdev->dev));
- platform_device_unregister(pdev);
+ smsc47m1_restore(dev_get_platdata(&smsc47m1_pdev->dev));
+ platform_device_unregister(smsc47m1_pdev);
}
MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 8a7cf0873..ea6f4416c 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -19,15 +19,20 @@
* the Free Software Foundation; version 2 of the License.
*/
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/hwmon.h>
#include <linux/i2c.h>
#include <linux/init.h>
-#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/util_macros.h>
+#include <linux/types.h>
+#include <linux/units.h>
// Common register definition
#define TMP51X_SHUNT_CONFIG 0x00
@@ -97,8 +102,8 @@
#define TMP51X_REMOTE_TEMP_LIMIT_2_POS 8
#define TMP513_REMOTE_TEMP_LIMIT_3_POS 7
-#define TMP51X_VBUS_RANGE_32V 32000000
-#define TMP51X_VBUS_RANGE_16V 16000000
+#define TMP51X_VBUS_RANGE_32V (32 * MICRO)
+#define TMP51X_VBUS_RANGE_16V (16 * MICRO)
// Max and Min value
#define MAX_BUS_VOLTAGE_32_LIMIT 32764
@@ -200,7 +205,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
* on the pga gain setting. 1lsb = 10uV
*/
*val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
- *val = DIV_ROUND_CLOSEST(*val * 10000, data->shunt_uohms);
+ *val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
break;
case TMP51X_BUS_VOLTAGE_RESULT:
case TMP51X_BUS_VOLTAGE_H_LIMIT:
@@ -216,7 +221,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
case TMP51X_BUS_CURRENT_RESULT:
// Current = (ShuntVoltage * CalibrationRegister) / 4096
*val = sign_extend32(regval, 16) * data->curr_lsb_ua;
- *val = DIV_ROUND_CLOSEST(*val, 1000);
+ *val = DIV_ROUND_CLOSEST(*val, MILLI);
break;
case TMP51X_LOCAL_TEMP_RESULT:
case TMP51X_REMOTE_TEMP_RESULT_1:
@@ -256,7 +261,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
* The user enter current value and we convert it to
* voltage. 1lsb = 10uV
*/
- val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10000);
+ val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI);
max_val = U16_MAX >> tmp51x_get_pga_shift(data);
regval = clamp_val(val, -max_val, max_val);
break;
@@ -546,18 +551,16 @@ static int tmp51x_calibrate(struct tmp51x_data *data)
if (data->shunt_uohms == 0)
return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION, 0);
- max_curr_ma = DIV_ROUND_CLOSEST_ULL(vshunt_max * 1000 * 1000,
- data->shunt_uohms);
+ max_curr_ma = DIV_ROUND_CLOSEST_ULL(vshunt_max * MICRO, data->shunt_uohms);
/*
* Calculate the minimal bit resolution for the current and the power.
* Those values will be used during register interpretation.
*/
- data->curr_lsb_ua = DIV_ROUND_CLOSEST_ULL(max_curr_ma * 1000, 32767);
+ data->curr_lsb_ua = DIV_ROUND_CLOSEST_ULL(max_curr_ma * MILLI, 32767);
data->pwr_lsb_uw = 20 * data->curr_lsb_ua;
- div = DIV_ROUND_CLOSEST_ULL(data->curr_lsb_ua * data->shunt_uohms,
- 1000 * 1000);
+ div = DIV_ROUND_CLOSEST_ULL(data->curr_lsb_ua * data->shunt_uohms, MICRO);
return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION,
DIV_ROUND_CLOSEST(40960, div));
@@ -626,9 +629,9 @@ static int tmp51x_vbus_range_to_reg(struct device *dev,
} else if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_16V) {
data->shunt_config &= ~TMP51X_BUS_VOLTAGE_MASK;
} else {
- dev_err(dev, "ti,bus-range-microvolt is invalid: %u\n",
- data->vbus_range_uvolt);
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL,
+ "ti,bus-range-microvolt is invalid: %u\n",
+ data->vbus_range_uvolt);
}
return 0;
}
@@ -644,8 +647,8 @@ static int tmp51x_pga_gain_to_reg(struct device *dev, struct tmp51x_data *data)
} else if (data->pga_gain == 1) {
data->shunt_config |= CURRENT_SENSE_VOLTAGE_40_MASK;
} else {
- dev_err(dev, "ti,pga-gain is invalid: %u\n", data->pga_gain);
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL,
+ "ti,pga-gain is invalid: %u\n", data->pga_gain);
}
return 0;
}
@@ -674,10 +677,10 @@ static int tmp51x_read_properties(struct device *dev, struct tmp51x_data *data)
data->max_channels - 1);
// Check if shunt value is compatible with pga-gain
- if (data->shunt_uohms > data->pga_gain * 40 * 1000 * 1000) {
- dev_err(dev, "shunt-resistor: %u too big for pga_gain: %u\n",
- data->shunt_uohms, data->pga_gain);
- return -EINVAL;
+ if (data->shunt_uohms > data->pga_gain * 40 * MICRO) {
+ return dev_err_probe(dev, -EINVAL,
+ "shunt-resistor: %u too big for pga_gain: %u\n",
+ data->shunt_uohms, data->pga_gain);
}
return 0;
@@ -717,22 +720,17 @@ static int tmp51x_probe(struct i2c_client *client)
data->max_channels = (uintptr_t)i2c_get_match_data(client);
ret = tmp51x_configure(dev, data);
- if (ret < 0) {
- dev_err(dev, "error configuring the device: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "error configuring the device\n");
data->regmap = devm_regmap_init_i2c(client, &tmp51x_regmap_config);
- if (IS_ERR(data->regmap)) {
- dev_err(dev, "failed to allocate register map\n");
- return PTR_ERR(data->regmap);
- }
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "failed to allocate register map\n");
ret = tmp51x_init(data);
- if (ret < 0) {
- dev_err(dev, "error configuring the device: %d\n", ret);
- return -ENODEV;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "error configuring the device\n");
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data,
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index ada694ba9..0c0a932c0 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -84,8 +84,9 @@ static DEFINE_MUTEX(hwspinlock_tree_lock);
* should decide between spin_trylock, spin_trylock_irq and
* spin_trylock_irqsave.
*
- * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
+ * Returns: %0 if we successfully locked the hwspinlock or -EBUSY if
* the hwspinlock was already taken.
+ *
* This function will never sleep.
*/
int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
@@ -171,7 +172,7 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
/**
* __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
- * @timeout: timeout value in msecs
+ * @to: timeout value in msecs
* @mode: mode which controls whether local interrupts are disabled or not
* @flags: a pointer to where the caller's interrupt state will be saved at (if
* requested)
@@ -199,9 +200,11 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock);
* to choose the appropriate @mode of operation, exactly the same way users
* should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
*
- * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * Returns: %0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
- * busy after @timeout msecs). The function will never sleep.
+ * busy after @timeout msecs).
+ *
+ * The function will never sleep.
*/
int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
int mode, unsigned long *flags)
@@ -304,13 +307,12 @@ EXPORT_SYMBOL_GPL(__hwspin_unlock);
/**
* of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
- * @bank: the hwspinlock device bank
* @hwlock_spec: hwlock specifier as found in the device tree
*
* This is a simple translation function, suitable for hwspinlock platform
* drivers that only has a lock specifier length of 1.
*
- * Returns a relative index of the lock within a specified bank on success,
+ * Returns: a relative index of the lock within a specified bank on success,
* or -EINVAL on invalid specifier cell count.
*/
static inline int
@@ -332,9 +334,10 @@ of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
* hwspinlock device, so that it can be requested using the normal
* hwspin_lock_request_specific() API.
*
- * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
- * device is not yet registered, -EINVAL on invalid args specifier value or an
- * appropriate error as returned from the OF parsing of the DT client node.
+ * Returns: the global lock id number on success, -EPROBE_DEFER if the
+ * hwspinlock device is not yet registered, -EINVAL on invalid args
+ * specifier value or an appropriate error as returned from the OF parsing
+ * of the DT client node.
*/
int of_hwspin_lock_get_id(struct device_node *np, int index)
{
@@ -399,9 +402,10 @@ EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
* the hwspinlock device, so that it can be requested using the normal
* hwspin_lock_request_specific() API.
*
- * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
- * device is not yet registered, -EINVAL on invalid args specifier value or an
- * appropriate error as returned from the OF parsing of the DT client node.
+ * Returns: the global lock id number on success, -EPROBE_DEFER if the
+ * hwspinlock device is not yet registered, -EINVAL on invalid args
+ * specifier value or an appropriate error as returned from the OF parsing
+ * of the DT client node.
*/
int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
{
@@ -481,7 +485,7 @@ out:
*
* Should be called from a process context (might sleep)
*
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
*/
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks)
@@ -529,7 +533,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_register);
*
* Should be called from a process context (might sleep)
*
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
*/
int hwspin_lock_unregister(struct hwspinlock_device *bank)
{
@@ -578,7 +582,7 @@ static int devm_hwspin_lock_device_match(struct device *dev, void *res,
*
* Should be called from a process context (might sleep)
*
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
*/
int devm_hwspin_lock_unregister(struct device *dev,
struct hwspinlock_device *bank)
@@ -607,7 +611,7 @@ EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
*
* Should be called from a process context (might sleep)
*
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
*/
int devm_hwspin_lock_register(struct device *dev,
struct hwspinlock_device *bank,
@@ -635,12 +639,13 @@ EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
/**
* __hwspin_lock_request() - tag an hwspinlock as used and power it up
+ * @hwlock: the target hwspinlock
*
* This is an internal function that prepares an hwspinlock instance
* before it is given to the user. The function assumes that
* hwspinlock_tree_lock is taken.
*
- * Returns 0 or positive to indicate success, and a negative value to
+ * Returns: %0 or positive to indicate success, and a negative value to
* indicate an error (with the appropriate error code)
*/
static int __hwspin_lock_request(struct hwspinlock *hwlock)
@@ -680,7 +685,7 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
* hwspin_lock_get_id() - retrieve id number of a given hwspinlock
* @hwlock: a valid hwspinlock instance
*
- * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
+ * Returns: the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
*/
int hwspin_lock_get_id(struct hwspinlock *hwlock)
{
@@ -704,7 +709,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
*
* Should be called from a process context (might sleep)
*
- * Returns the address of the assigned hwspinlock, or NULL on error
+ * Returns: the address of the assigned hwspinlock, or %NULL on error
*/
struct hwspinlock *hwspin_lock_request(void)
{
@@ -747,7 +752,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
*
* Should be called from a process context (might sleep)
*
- * Returns the address of the assigned hwspinlock, or NULL on error
+ * Returns: the address of the assigned hwspinlock, or %NULL on error
*/
struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
@@ -795,7 +800,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
*
* Should be called from a process context (might sleep)
*
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
*/
int hwspin_lock_free(struct hwspinlock *hwlock)
{
@@ -865,7 +870,7 @@ static void devm_hwspin_lock_release(struct device *dev, void *res)
*
* Should be called from a process context (might sleep)
*
- * Returns 0 on success, or an appropriate error code on failure
+ * Returns: %0 on success, or an appropriate error code on failure
*/
int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
{
@@ -891,7 +896,7 @@ EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
*
* Should be called from a process context (might sleep)
*
- * Returns the address of the assigned hwspinlock, or NULL on error
+ * Returns: the address of the assigned hwspinlock, or %NULL on error
*/
struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
{
@@ -926,7 +931,7 @@ EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
*
* Should be called from a process context (might sleep)
*
- * Returns the address of the assigned hwspinlock, or NULL on error
+ * Returns: the address of the assigned hwspinlock, or %NULL on error
*/
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
unsigned int id)
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index a0fd67fd2..814dfe869 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -115,7 +115,6 @@ static const struct of_device_id qcom_hwspinlock_of_match[] = {
{ .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex },
{ .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex },
{ .compatible = "qcom,apq8084-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
- { .compatible = "qcom,ipq6018-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
{ .compatible = "qcom,msm8226-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
{ .compatible = "qcom,msm8974-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
{ .compatible = "qcom,msm8994-tcsr-mutex", .data = &of_msm8226_tcsr_mutex },
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 4b80026db..965bb6d4e 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1109,6 +1109,7 @@ static int coresight_validate_source(struct coresight_device *csdev,
if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM &&
subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS) {
dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
return -EINVAL;
@@ -1178,6 +1179,7 @@ int coresight_enable(struct coresight_device *csdev)
per_cpu(tracer_path, cpu) = path;
break;
case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
/*
* Use the hash of source's device name as ID
@@ -1228,6 +1230,7 @@ void coresight_disable(struct coresight_device *csdev)
per_cpu(tracer_path, cpu) = NULL;
break;
case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
/* Find the path by the hash. */
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index e4deafae7..ac70c0b49 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -122,14 +122,13 @@ static int dummy_probe(struct platform_device *pdev)
return 0;
}
-static int dummy_remove(struct platform_device *pdev)
+static void dummy_remove(struct platform_device *pdev)
{
struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
pm_runtime_disable(dev);
coresight_unregister(drvdata->csdev);
- return 0;
}
static const struct of_device_id dummy_match[] = {
@@ -140,7 +139,7 @@ static const struct of_device_id dummy_match[] = {
static struct platform_driver dummy_driver = {
.probe = dummy_probe,
- .remove = dummy_remove,
+ .remove_new = dummy_remove,
.driver = {
.name = "coresight-dummy",
.of_match_table = dummy_match,
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 58b32b399..c0c60e6a1 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -68,6 +68,7 @@ PMU_FORMAT_ATTR(preset, "config:0-3");
PMU_FORMAT_ATTR(sinkid, "config2:0-31");
/* config ID - set if a system configuration is selected */
PMU_FORMAT_ATTR(configid, "config2:32-63");
+PMU_FORMAT_ATTR(cc_threshold, "config3:0-11");
/*
@@ -101,6 +102,7 @@ static struct attribute *etm_config_formats_attr[] = {
&format_attr_preset.attr,
&format_attr_configid.attr,
&format_attr_branch_broadcast.attr,
+ &format_attr_cc_threshold.attr,
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 18c4544f6..2e2cabc5f 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -644,7 +644,7 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
struct etmv4_config *config = &drvdata->config;
struct perf_event_attr *attr = &event->attr;
unsigned long cfg_hash;
- int preset;
+ int preset, cc_threshold;
/* Clear configuration from previous run */
memset(config, 0, sizeof(struct etmv4_config));
@@ -667,7 +667,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
if (attr->config & BIT(ETM_OPT_CYCACC)) {
config->cfg |= TRCCONFIGR_CCI;
/* TRM: Must program this for cycacc to work */
- config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
+ cc_threshold = attr->config3 & ETM_CYC_THRESHOLD_MASK;
+ if (!cc_threshold)
+ cc_threshold = ETM_CYC_THRESHOLD_DEFAULT;
+ if (cc_threshold < drvdata->ccitmin)
+ cc_threshold = drvdata->ccitmin;
+ config->ccctlr = cc_threshold;
}
if (attr->config & BIT(ETM_OPT_TS)) {
/*
@@ -1150,6 +1155,41 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
drvdata->trfcr = trfcr;
}
+/*
+ * The following errata on applicable cpu ranges, affect the CCITMIN filed
+ * in TCRIDR3 register. Software read for the field returns 0x100 limiting
+ * the cycle threshold granularity, whereas the right value should have
+ * been 0x4, which is well supported in the hardware.
+ */
+static struct midr_range etm_wrong_ccitmin_cpus[] = {
+ /* Erratum #1490853 - Cortex-A76 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 4, 0),
+ /* Erratum #1490853 - Neoverse-N1 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 4, 0),
+ /* Erratum #1491015 - Cortex-A77 */
+ MIDR_RANGE(MIDR_CORTEX_A77, 0, 0, 1, 0),
+ /* Erratum #1502854 - Cortex-X1 */
+ MIDR_REV(MIDR_CORTEX_X1, 0, 0),
+ /* Erratum #1619801 - Neoverse-V1 */
+ MIDR_REV(MIDR_NEOVERSE_V1, 0, 0),
+ {},
+};
+
+static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
+{
+ /*
+ * Erratum affected cpus will read 256 as the minimum
+ * instruction trace cycle counting threshold whereas
+ * the correct value should be 4 instead. Override the
+ * recorded value for 'drvdata->ccitmin' to workaround
+ * this problem.
+ */
+ if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) {
+ if (drvdata->ccitmin == 256)
+ drvdata->ccitmin = 4;
+ }
+}
+
static void etm4_init_arch_data(void *info)
{
u32 etmidr0;
@@ -1219,6 +1259,8 @@ static void etm4_init_arch_data(void *info)
etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3);
/* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3);
+ etm4_fixup_wrong_ccitmin(drvdata);
+
/* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3);
drvdata->config.s_ex_level = drvdata->s_ex_level;
@@ -2261,7 +2303,7 @@ static void etm4_remove_amba(struct amba_device *adev)
etm4_remove_dev(drvdata);
}
-static int etm4_remove_platform_dev(struct platform_device *pdev)
+static void etm4_remove_platform_dev(struct platform_device *pdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
@@ -2271,8 +2313,6 @@ static int etm4_remove_platform_dev(struct platform_device *pdev)
if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
clk_put(drvdata->pclk);
-
- return 0;
}
static const struct amba_id etm4_ids[] = {
@@ -2358,7 +2398,7 @@ MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids);
static struct platform_driver etm4_platform_driver = {
.probe = etm4_probe_platform_dev,
- .remove = etm4_remove_platform_dev,
+ .remove_new = etm4_remove_platform_dev,
.driver = {
.name = "coresight-etm4x",
.of_match_table = etm4_sysreg_match,
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index b8e150e45..a5b1fc787 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -335,11 +335,10 @@ static int static_funnel_probe(struct platform_device *pdev)
return ret;
}
-static int static_funnel_remove(struct platform_device *pdev)
+static void static_funnel_remove(struct platform_device *pdev)
{
funnel_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static const struct of_device_id static_funnel_match[] = {
@@ -360,7 +359,7 @@ MODULE_DEVICE_TABLE(acpi, static_funnel_ids);
static struct platform_driver static_funnel_driver = {
.probe = static_funnel_probe,
- .remove = static_funnel_remove,
+ .remove_new = static_funnel_remove,
.driver = {
.name = "coresight-static-funnel",
/* THIS_MODULE is taken care of by platform_driver_register() */
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b6be73034..91d93060d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -320,11 +320,10 @@ static int static_replicator_probe(struct platform_device *pdev)
return ret;
}
-static int static_replicator_remove(struct platform_device *pdev)
+static void static_replicator_remove(struct platform_device *pdev)
{
replicator_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
#ifdef CONFIG_PM
@@ -373,7 +372,7 @@ MODULE_DEVICE_TABLE(acpi, static_replicator_acpi_ids);
static struct platform_driver static_replicator_driver = {
.probe = static_replicator_probe,
- .remove = static_replicator_remove,
+ .remove_new = static_replicator_remove,
.driver = {
.name = "coresight-static-replicator",
/* THIS_MODULE is taken care of by platform_driver_register() */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index c106d142e..7ec5365e2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
@@ -344,7 +345,14 @@ static const struct attribute_group coresight_tmc_mgmt_group = {
.name = "mgmt",
};
-static const struct attribute_group *coresight_tmc_groups[] = {
+static const struct attribute_group *coresight_etf_groups[] = {
+ &coresight_tmc_group,
+ &coresight_tmc_mgmt_group,
+ NULL,
+};
+
+static const struct attribute_group *coresight_etr_groups[] = {
+ &coresight_etr_group,
&coresight_tmc_group,
&coresight_tmc_mgmt_group,
NULL,
@@ -465,6 +473,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->memwidth = tmc_get_memwidth(devid);
/* This device is not associated with a session */
drvdata->pid = -1;
+ drvdata->etr_mode = ETR_MODE_AUTO;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
drvdata->size = tmc_etr_get_default_buffer_size(dev);
@@ -474,16 +483,17 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
}
desc.dev = dev;
- desc.groups = coresight_tmc_groups;
switch (drvdata->config_type) {
case TMC_CONFIG_TYPE_ETB:
+ desc.groups = coresight_etf_groups;
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etb_cs_ops;
dev_list = &etb_devs;
break;
case TMC_CONFIG_TYPE_ETR:
+ desc.groups = coresight_etr_groups;
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
desc.ops = &tmc_etr_cs_ops;
@@ -496,6 +506,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
dev_list = &etr_devs;
break;
case TMC_CONFIG_TYPE_ETF:
+ desc.groups = coresight_etf_groups;
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 8311e1028..af02ba5d5 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -26,6 +26,12 @@ struct etr_flat_buf {
size_t size;
};
+struct etr_buf_hw {
+ bool has_iommu;
+ bool has_etr_sg;
+ bool has_catu;
+};
+
/*
* etr_perf_buffer - Perf buffer used for ETR
* @drvdata - The ETR drvdaga this buffer has been allocated for.
@@ -830,6 +836,22 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
}
}
+static void get_etr_buf_hw(struct device *dev, struct etr_buf_hw *buf_hw)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent);
+ buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
+ buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata);
+}
+
+static bool etr_can_use_flat_mode(struct etr_buf_hw *buf_hw, ssize_t etr_buf_size)
+{
+ bool has_sg = buf_hw->has_catu || buf_hw->has_etr_sg;
+
+ return !has_sg || buf_hw->has_iommu || etr_buf_size < SZ_1M;
+}
+
/*
* tmc_alloc_etr_buf: Allocate a buffer use by ETR.
* @drvdata : ETR device details.
@@ -843,23 +865,22 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
int node, void **pages)
{
int rc = -ENOMEM;
- bool has_etr_sg, has_iommu;
- bool has_sg, has_catu;
struct etr_buf *etr_buf;
+ struct etr_buf_hw buf_hw;
struct device *dev = &drvdata->csdev->dev;
- has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
- has_iommu = iommu_get_domain_for_dev(dev->parent);
- has_catu = !!tmc_etr_get_catu_device(drvdata);
-
- has_sg = has_catu || has_etr_sg;
-
+ get_etr_buf_hw(dev, &buf_hw);
etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
if (!etr_buf)
return ERR_PTR(-ENOMEM);
etr_buf->size = size;
+ /* If there is user directive for buffer mode, try that first */
+ if (drvdata->etr_mode != ETR_MODE_AUTO)
+ rc = tmc_etr_mode_alloc_buf(drvdata->etr_mode, drvdata,
+ etr_buf, node, pages);
+
/*
* If we have to use an existing list of pages, we cannot reliably
* use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
@@ -872,14 +893,13 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
* Fallback to available mechanisms.
*
*/
- if (!pages &&
- (!has_sg || has_iommu || size < SZ_1M))
+ if (rc && !pages && etr_can_use_flat_mode(&buf_hw, size))
rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
etr_buf, node, pages);
- if (rc && has_etr_sg)
+ if (rc && buf_hw.has_etr_sg)
rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
etr_buf, node, pages);
- if (rc && has_catu)
+ if (rc && buf_hw.has_catu)
rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
etr_buf, node, pages);
if (rc) {
@@ -1804,3 +1824,70 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
return 0;
}
+
+static const char *const buf_modes_str[] = {
+ [ETR_MODE_FLAT] = "flat",
+ [ETR_MODE_ETR_SG] = "tmc-sg",
+ [ETR_MODE_CATU] = "catu",
+ [ETR_MODE_AUTO] = "auto",
+};
+
+static ssize_t buf_modes_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct etr_buf_hw buf_hw;
+ ssize_t size = 0;
+
+ get_etr_buf_hw(dev, &buf_hw);
+ size += sysfs_emit(buf, "%s ", buf_modes_str[ETR_MODE_AUTO]);
+ size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_FLAT]);
+ if (buf_hw.has_etr_sg)
+ size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_ETR_SG]);
+
+ if (buf_hw.has_catu)
+ size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_CATU]);
+
+ size += sysfs_emit_at(buf, size, "\n");
+ return size;
+}
+static DEVICE_ATTR_RO(buf_modes_available);
+
+static ssize_t buf_mode_preferred_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]);
+}
+
+static ssize_t buf_mode_preferred_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etr_buf_hw buf_hw;
+
+ get_etr_buf_hw(dev, &buf_hw);
+ if (sysfs_streq(buf, buf_modes_str[ETR_MODE_FLAT]))
+ drvdata->etr_mode = ETR_MODE_FLAT;
+ else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_ETR_SG]) && buf_hw.has_etr_sg)
+ drvdata->etr_mode = ETR_MODE_ETR_SG;
+ else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_CATU]) && buf_hw.has_catu)
+ drvdata->etr_mode = ETR_MODE_CATU;
+ else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_AUTO]))
+ drvdata->etr_mode = ETR_MODE_AUTO;
+ else
+ return -EINVAL;
+ return size;
+}
+static DEVICE_ATTR_RW(buf_mode_preferred);
+
+static struct attribute *coresight_etr_attrs[] = {
+ &dev_attr_buf_modes_available.attr,
+ &dev_attr_buf_mode_preferred.attr,
+ NULL,
+};
+
+const struct attribute_group coresight_etr_group = {
+ .attrs = coresight_etr_attrs,
+};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 0ee48c5ba..8dcb426ac 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -135,6 +135,7 @@ enum etr_mode {
ETR_MODE_FLAT, /* Uses contiguous flat buffer */
ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
ETR_MODE_CATU, /* Use SG mechanism in CATU */
+ ETR_MODE_AUTO, /* Use the default mechanism */
};
struct etr_buf_operations;
@@ -207,6 +208,7 @@ struct tmc_drvdata {
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
u32 etr_caps;
+ enum etr_mode etr_mode;
struct idr idr;
struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
@@ -334,5 +336,6 @@ void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu);
void tmc_etr_remove_catu_ops(void);
struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev,
enum cs_mode mode, void *data);
+extern const struct attribute_group coresight_etr_group;
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 8d2b9d292..5f82737c3 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -21,6 +21,80 @@
DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
+static bool coresight_device_is_tpdm(struct coresight_device *csdev)
+{
+ return (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ (csdev->subtype.source_subtype ==
+ CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
+}
+
+/*
+ * Read the DSB element size from the TPDM device
+ * Returns
+ * The dsb element size read from the devicetree if available.
+ * 0 - Otherwise, with a warning once.
+ */
+static int tpdm_read_dsb_element_size(struct coresight_device *csdev)
+{
+ int rc = 0;
+ u8 size = 0;
+
+ rc = fwnode_property_read_u8(dev_fwnode(csdev->dev.parent),
+ "qcom,dsb-element-size", &size);
+ if (rc)
+ dev_warn_once(&csdev->dev,
+ "Failed to read TPDM DSB Element size: %d\n", rc);
+
+ return size;
+}
+
+/*
+ * Search and read element data size from the TPDM node in
+ * the devicetree. Each input port of TPDA is connected to
+ * a TPDM. Different TPDM supports different types of dataset,
+ * and some may support more than one type of dataset.
+ * Parameter "inport" is used to pass in the input port number
+ * of TPDA, and it is set to -1 in the recursize call.
+ */
+static int tpda_get_element_size(struct coresight_device *csdev,
+ int inport)
+{
+ int dsb_size = -ENOENT;
+ int i, size;
+ struct coresight_device *in;
+
+ for (i = 0; i < csdev->pdata->nr_inconns; i++) {
+ in = csdev->pdata->in_conns[i]->src_dev;
+ if (!in)
+ continue;
+
+ /* Ignore the paths that do not match port */
+ if (inport > 0 &&
+ csdev->pdata->in_conns[i]->dest_port != inport)
+ continue;
+
+ if (coresight_device_is_tpdm(in)) {
+ size = tpdm_read_dsb_element_size(in);
+ } else {
+ /* Recurse down the path */
+ size = tpda_get_element_size(in, -1);
+ }
+
+ if (size < 0)
+ return size;
+
+ if (dsb_size < 0) {
+ /* Found a size, save it. */
+ dsb_size = size;
+ } else {
+ /* Found duplicate TPDMs */
+ return -EEXIST;
+ }
+ }
+
+ return dsb_size;
+}
+
/* Settings pre enabling port control register */
static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
{
@@ -32,26 +106,55 @@ static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
writel_relaxed(val, drvdata->base + TPDA_CR);
}
-static void tpda_enable_port(struct tpda_drvdata *drvdata, int port)
+static int tpda_enable_port(struct tpda_drvdata *drvdata, int port)
{
u32 val;
+ int size;
val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
+ /*
+ * Configure aggregator port n DSB data set element size
+ * Set the bit to 0 if the size is 32
+ * Set the bit to 1 if the size is 64
+ */
+ size = tpda_get_element_size(drvdata->csdev, port);
+ switch (size) {
+ case 32:
+ val &= ~TPDA_Pn_CR_DSBSIZE;
+ break;
+ case 64:
+ val |= TPDA_Pn_CR_DSBSIZE;
+ break;
+ case 0:
+ return -EEXIST;
+ case -EEXIST:
+ dev_warn_once(&drvdata->csdev->dev,
+ "Detected multiple TPDMs on port %d", -EEXIST);
+ return -EEXIST;
+ default:
+ return -EINVAL;
+ }
+
/* Enable the port */
val |= TPDA_Pn_CR_ENA;
writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
+
+ return 0;
}
-static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
+static int __tpda_enable(struct tpda_drvdata *drvdata, int port)
{
+ int ret;
+
CS_UNLOCK(drvdata->base);
if (!drvdata->csdev->enable)
tpda_enable_pre_port(drvdata);
- tpda_enable_port(drvdata, port);
-
+ ret = tpda_enable_port(drvdata, port);
CS_LOCK(drvdata->base);
+
+ return ret;
}
static int tpda_enable(struct coresight_device *csdev,
@@ -59,16 +162,19 @@ static int tpda_enable(struct coresight_device *csdev,
struct coresight_connection *out)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret = 0;
spin_lock(&drvdata->spinlock);
- if (atomic_read(&in->dest_refcnt) == 0)
- __tpda_enable(drvdata, in->dest_port);
+ if (atomic_read(&in->dest_refcnt) == 0) {
+ ret = __tpda_enable(drvdata, in->dest_port);
+ if (!ret) {
+ atomic_inc(&in->dest_refcnt);
+ dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
+ }
+ }
- atomic_inc(&in->dest_refcnt);
spin_unlock(&drvdata->spinlock);
-
- dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
- return 0;
+ return ret;
}
static void __tpda_disable(struct tpda_drvdata *drvdata, int port)
diff --git a/drivers/hwtracing/coresight/coresight-tpda.h b/drivers/hwtracing/coresight/coresight-tpda.h
index 0399678df..b3b38fd41 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.h
+++ b/drivers/hwtracing/coresight/coresight-tpda.h
@@ -10,6 +10,8 @@
#define TPDA_Pn_CR(n) (0x004 + (n * 4))
/* Aggregator port enable bit */
#define TPDA_Pn_CR_ENA BIT(0)
+/* Aggregator port DSB data set element size bit */
+#define TPDA_Pn_CR_DSBSIZE BIT(8)
#define TPDA_MAX_INPORTS 32
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index f4854af04..97654aa4b 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -4,6 +4,7 @@
*/
#include <linux/amba/bus.h>
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
@@ -20,23 +21,265 @@
DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm");
-static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
+/* Read dataset array member with the index number */
+static ssize_t tpdm_simple_dataset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(attr, struct tpdm_dataset_attribute, attr);
+
+ switch (tpdm_attr->mem) {
+ case DSB_EDGE_CTRL:
+ if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCR)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->dsb->edge_ctrl[tpdm_attr->idx]);
+ case DSB_EDGE_CTRL_MASK:
+ if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCMR)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->dsb->edge_ctrl_mask[tpdm_attr->idx]);
+ case DSB_TRIG_PATT:
+ if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->dsb->trig_patt[tpdm_attr->idx]);
+ case DSB_TRIG_PATT_MASK:
+ if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->dsb->trig_patt_mask[tpdm_attr->idx]);
+ case DSB_PATT:
+ if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->dsb->patt_val[tpdm_attr->idx]);
+ case DSB_PATT_MASK:
+ if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->dsb->patt_mask[tpdm_attr->idx]);
+ case DSB_MSR:
+ if (tpdm_attr->idx >= drvdata->dsb_msr_num)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->dsb->msr[tpdm_attr->idx]);
+ }
+ return -EINVAL;
+}
+
+/* Write dataset array member with the index number */
+static ssize_t tpdm_simple_dataset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ unsigned long val;
+ ssize_t ret = size;
+
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(attr, struct tpdm_dataset_attribute, attr);
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ switch (tpdm_attr->mem) {
+ case DSB_TRIG_PATT:
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ drvdata->dsb->trig_patt[tpdm_attr->idx] = val;
+ else
+ ret = -EINVAL;
+ break;
+ case DSB_TRIG_PATT_MASK:
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ drvdata->dsb->trig_patt_mask[tpdm_attr->idx] = val;
+ else
+ ret = -EINVAL;
+ break;
+ case DSB_PATT:
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ drvdata->dsb->patt_val[tpdm_attr->idx] = val;
+ else
+ ret = -EINVAL;
+ break;
+ case DSB_PATT_MASK:
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ drvdata->dsb->patt_mask[tpdm_attr->idx] = val;
+ else
+ ret = -EINVAL;
+ break;
+ case DSB_MSR:
+ if (tpdm_attr->idx < drvdata->dsb_msr_num)
+ drvdata->dsb->msr[tpdm_attr->idx] = val;
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ spin_unlock(&drvdata->spinlock);
+
+ return ret;
+}
+
+static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata)
+{
+ return (drvdata->datasets & TPDM_PIDR0_DS_DSB);
+}
+
+static umode_t tpdm_dsb_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (drvdata && tpdm_has_dsb_dataset(drvdata))
+ return attr->mode;
+
+ return 0;
+}
+
+static umode_t tpdm_dsb_msr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct device_attribute *dev_attr =
+ container_of(attr, struct device_attribute, attr);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(dev_attr, struct tpdm_dataset_attribute, attr);
+
+ if (tpdm_attr->idx < drvdata->dsb_msr_num)
+ return attr->mode;
+
+ return 0;
+}
+
+static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata)
+{
+ if (tpdm_has_dsb_dataset(drvdata)) {
+ memset(drvdata->dsb, 0, sizeof(struct dsb_dataset));
+
+ drvdata->dsb->trig_ts = true;
+ drvdata->dsb->trig_type = false;
+ }
+}
+
+static void set_dsb_mode(struct tpdm_drvdata *drvdata, u32 *val)
+{
+ u32 mode;
+
+ /* Set the test accurate mode */
+ mode = TPDM_DSB_MODE_TEST(drvdata->dsb->mode);
+ *val &= ~TPDM_DSB_CR_TEST_MODE;
+ *val |= FIELD_PREP(TPDM_DSB_CR_TEST_MODE, mode);
+
+ /* Set the byte lane for high-performance mode */
+ mode = TPDM_DSB_MODE_HPBYTESEL(drvdata->dsb->mode);
+ *val &= ~TPDM_DSB_CR_HPSEL;
+ *val |= FIELD_PREP(TPDM_DSB_CR_HPSEL, mode);
+
+ /* Set the performance mode */
+ if (drvdata->dsb->mode & TPDM_DSB_MODE_PERF)
+ *val |= TPDM_DSB_CR_MODE;
+ else
+ *val &= ~TPDM_DSB_CR_MODE;
+}
+
+static void set_dsb_tier(struct tpdm_drvdata *drvdata)
{
u32 val;
- /* Set the enable bit of DSB control register to 1 */
+ val = readl_relaxed(drvdata->base + TPDM_DSB_TIER);
+
+ /* Clear all relevant fields */
+ val &= ~(TPDM_DSB_TIER_PATT_TSENAB | TPDM_DSB_TIER_PATT_TYPE |
+ TPDM_DSB_TIER_XTRIG_TSENAB);
+
+ /* Set pattern timestamp type and enablement */
+ if (drvdata->dsb->patt_ts) {
+ val |= TPDM_DSB_TIER_PATT_TSENAB;
+ if (drvdata->dsb->patt_type)
+ val |= TPDM_DSB_TIER_PATT_TYPE;
+ else
+ val &= ~TPDM_DSB_TIER_PATT_TYPE;
+ } else {
+ val &= ~TPDM_DSB_TIER_PATT_TSENAB;
+ }
+
+ /* Set trigger timestamp */
+ if (drvdata->dsb->trig_ts)
+ val |= TPDM_DSB_TIER_XTRIG_TSENAB;
+ else
+ val &= ~TPDM_DSB_TIER_XTRIG_TSENAB;
+
+ writel_relaxed(val, drvdata->base + TPDM_DSB_TIER);
+}
+
+static void set_dsb_msr(struct tpdm_drvdata *drvdata)
+{
+ int i;
+
+ for (i = 0; i < drvdata->dsb_msr_num; i++)
+ writel_relaxed(drvdata->dsb->msr[i],
+ drvdata->base + TPDM_DSB_MSR(i));
+}
+
+static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
+{
+ u32 val, i;
+
+ for (i = 0; i < TPDM_DSB_MAX_EDCR; i++)
+ writel_relaxed(drvdata->dsb->edge_ctrl[i],
+ drvdata->base + TPDM_DSB_EDCR(i));
+ for (i = 0; i < TPDM_DSB_MAX_EDCMR; i++)
+ writel_relaxed(drvdata->dsb->edge_ctrl_mask[i],
+ drvdata->base + TPDM_DSB_EDCMR(i));
+ for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+ writel_relaxed(drvdata->dsb->patt_val[i],
+ drvdata->base + TPDM_DSB_TPR(i));
+ writel_relaxed(drvdata->dsb->patt_mask[i],
+ drvdata->base + TPDM_DSB_TPMR(i));
+ writel_relaxed(drvdata->dsb->trig_patt[i],
+ drvdata->base + TPDM_DSB_XPR(i));
+ writel_relaxed(drvdata->dsb->trig_patt_mask[i],
+ drvdata->base + TPDM_DSB_XPMR(i));
+ }
+
+ set_dsb_tier(drvdata);
+
+ set_dsb_msr(drvdata);
+
val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
+ /* Set the mode of DSB dataset */
+ set_dsb_mode(drvdata, &val);
+ /* Set trigger type */
+ if (drvdata->dsb->trig_type)
+ val |= TPDM_DSB_CR_TRIG_TYPE;
+ else
+ val &= ~TPDM_DSB_CR_TRIG_TYPE;
+ /* Set the enable bit of DSB control register to 1 */
val |= TPDM_DSB_CR_ENA;
writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
}
-/* TPDM enable operations */
+/*
+ * TPDM enable operations
+ * The TPDM or Monitor serves as data collection component for various
+ * dataset types. It covers Basic Counts(BC), Tenure Counts(TC),
+ * Continuous Multi-Bit(CMB), Multi-lane CMB(MCMB) and Discrete Single
+ * Bit(DSB). This function will initialize the configuration according
+ * to the dataset type supported by the TPDM.
+ */
static void __tpdm_enable(struct tpdm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- /* Check if DSB datasets is present for TPDM. */
- if (drvdata->datasets & TPDM_PIDR0_DS_DSB)
+ if (tpdm_has_dsb_dataset(drvdata))
tpdm_enable_dsb(drvdata);
CS_LOCK(drvdata->base);
@@ -76,8 +319,7 @@ static void __tpdm_disable(struct tpdm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- /* Check if DSB datasets is present for TPDM. */
- if (drvdata->datasets & TPDM_PIDR0_DS_DSB)
+ if (tpdm_has_dsb_dataset(drvdata))
tpdm_disable_dsb(drvdata);
CS_LOCK(drvdata->base);
@@ -110,16 +352,45 @@ static const struct coresight_ops tpdm_cs_ops = {
.source_ops = &tpdm_source_ops,
};
-static void tpdm_init_default_data(struct tpdm_drvdata *drvdata)
+static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata)
{
u32 pidr;
- CS_UNLOCK(drvdata->base);
/* Get the datasets present on the TPDM. */
pidr = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0);
drvdata->datasets |= pidr & GENMASK(TPDM_DATASETS - 1, 0);
- CS_LOCK(drvdata->base);
+
+ if (tpdm_has_dsb_dataset(drvdata) && (!drvdata->dsb)) {
+ drvdata->dsb = devm_kzalloc(drvdata->dev,
+ sizeof(*drvdata->dsb), GFP_KERNEL);
+ if (!drvdata->dsb)
+ return -ENOMEM;
+ }
+ tpdm_reset_datasets(drvdata);
+
+ return 0;
+}
+
+static ssize_t reset_dataset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret || val != 1)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ tpdm_reset_datasets(drvdata);
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
}
+static DEVICE_ATTR_WO(reset_dataset);
/*
* value 1: 64 bits test data
@@ -161,6 +432,7 @@ static ssize_t integration_test_store(struct device *dev,
static DEVICE_ATTR_WO(integration_test);
static struct attribute *tpdm_attrs[] = {
+ &dev_attr_reset_dataset.attr,
&dev_attr_integration_test.attr,
NULL,
};
@@ -169,8 +441,421 @@ static struct attribute_group tpdm_attr_grp = {
.attrs = tpdm_attrs,
};
+static ssize_t dsb_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%x\n", drvdata->dsb->mode);
+}
+
+static ssize_t dsb_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val < 0) ||
+ (val & ~TPDM_DSB_MODE_MASK))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->dsb->mode = val & TPDM_DSB_MODE_MASK;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(dsb_mode);
+
+static ssize_t ctrl_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->dsb->edge_ctrl_idx);
+}
+
+/*
+ * The EDCR registers can include up to 16 32-bit registers, and each
+ * one can be configured to control up to 16 edge detections(2 bits
+ * control one edge detection). So a total 256 edge detections can be
+ * configured. This function provides a way to set the index number of
+ * the edge detection which needs to be configured.
+ */
+static ssize_t ctrl_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val >= TPDM_DSB_MAX_LINES))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->dsb->edge_ctrl_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(ctrl_idx);
+
+/*
+ * This function is used to control the edge detection according
+ * to the index number that has been set.
+ * "edge_ctrl" should be one of the following values.
+ * 0 - Rising edge detection
+ * 1 - Falling edge detection
+ * 2 - Rising and falling edge detection (toggle detection)
+ */
+static ssize_t ctrl_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val, edge_ctrl;
+ int reg;
+
+ if ((kstrtoul(buf, 0, &edge_ctrl)) || (edge_ctrl > 0x2))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /*
+ * There are 2 bit per DSB Edge Control line.
+ * Thus we have 16 lines in a 32bit word.
+ */
+ reg = EDCR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx);
+ val = drvdata->dsb->edge_ctrl[reg];
+ val &= ~EDCR_TO_WORD_MASK(drvdata->dsb->edge_ctrl_idx);
+ val |= EDCR_TO_WORD_VAL(edge_ctrl, drvdata->dsb->edge_ctrl_idx);
+ drvdata->dsb->edge_ctrl[reg] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_WO(ctrl_val);
+
+static ssize_t ctrl_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ u32 set;
+ int reg;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /*
+ * There is 1 bit per DSB Edge Control Mark line.
+ * Thus we have 32 lines in a 32bit word.
+ */
+ reg = EDCMR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx);
+ set = drvdata->dsb->edge_ctrl_mask[reg];
+ if (val)
+ set |= BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx));
+ else
+ set &= ~BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx));
+ drvdata->dsb->edge_ctrl_mask[reg] = set;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_WO(ctrl_mask);
+
+static ssize_t enable_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->dsb->patt_ts);
+}
+
+/*
+ * value 1: Enable/Disable DSB pattern timestamp
+ */
+static ssize_t enable_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->dsb->patt_ts = !!val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(enable_ts);
+
+static ssize_t set_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->dsb->patt_type);
+}
+
+/*
+ * value 1: Set DSB pattern type
+ */
+static ssize_t set_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->dsb->patt_type = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(set_type);
+
+static ssize_t dsb_trig_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->dsb->trig_type);
+}
+
+/*
+ * Trigger type (boolean):
+ * false - Disable trigger type.
+ * true - Enable trigger type.
+ */
+static ssize_t dsb_trig_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ if (val)
+ drvdata->dsb->trig_type = true;
+ else
+ drvdata->dsb->trig_type = false;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(dsb_trig_type);
+
+static ssize_t dsb_trig_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->dsb->trig_ts);
+}
+
+/*
+ * Trigger timestamp (boolean):
+ * false - Disable trigger timestamp.
+ * true - Enable trigger timestamp.
+ */
+static ssize_t dsb_trig_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ if (val)
+ drvdata->dsb->trig_ts = true;
+ else
+ drvdata->dsb->trig_ts = false;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(dsb_trig_ts);
+
+static struct attribute *tpdm_dsb_edge_attrs[] = {
+ &dev_attr_ctrl_idx.attr,
+ &dev_attr_ctrl_val.attr,
+ &dev_attr_ctrl_mask.attr,
+ DSB_EDGE_CTRL_ATTR(0),
+ DSB_EDGE_CTRL_ATTR(1),
+ DSB_EDGE_CTRL_ATTR(2),
+ DSB_EDGE_CTRL_ATTR(3),
+ DSB_EDGE_CTRL_ATTR(4),
+ DSB_EDGE_CTRL_ATTR(5),
+ DSB_EDGE_CTRL_ATTR(6),
+ DSB_EDGE_CTRL_ATTR(7),
+ DSB_EDGE_CTRL_ATTR(8),
+ DSB_EDGE_CTRL_ATTR(9),
+ DSB_EDGE_CTRL_ATTR(10),
+ DSB_EDGE_CTRL_ATTR(11),
+ DSB_EDGE_CTRL_ATTR(12),
+ DSB_EDGE_CTRL_ATTR(13),
+ DSB_EDGE_CTRL_ATTR(14),
+ DSB_EDGE_CTRL_ATTR(15),
+ DSB_EDGE_CTRL_MASK_ATTR(0),
+ DSB_EDGE_CTRL_MASK_ATTR(1),
+ DSB_EDGE_CTRL_MASK_ATTR(2),
+ DSB_EDGE_CTRL_MASK_ATTR(3),
+ DSB_EDGE_CTRL_MASK_ATTR(4),
+ DSB_EDGE_CTRL_MASK_ATTR(5),
+ DSB_EDGE_CTRL_MASK_ATTR(6),
+ DSB_EDGE_CTRL_MASK_ATTR(7),
+ NULL,
+};
+
+static struct attribute *tpdm_dsb_trig_patt_attrs[] = {
+ DSB_TRIG_PATT_ATTR(0),
+ DSB_TRIG_PATT_ATTR(1),
+ DSB_TRIG_PATT_ATTR(2),
+ DSB_TRIG_PATT_ATTR(3),
+ DSB_TRIG_PATT_ATTR(4),
+ DSB_TRIG_PATT_ATTR(5),
+ DSB_TRIG_PATT_ATTR(6),
+ DSB_TRIG_PATT_ATTR(7),
+ DSB_TRIG_PATT_MASK_ATTR(0),
+ DSB_TRIG_PATT_MASK_ATTR(1),
+ DSB_TRIG_PATT_MASK_ATTR(2),
+ DSB_TRIG_PATT_MASK_ATTR(3),
+ DSB_TRIG_PATT_MASK_ATTR(4),
+ DSB_TRIG_PATT_MASK_ATTR(5),
+ DSB_TRIG_PATT_MASK_ATTR(6),
+ DSB_TRIG_PATT_MASK_ATTR(7),
+ NULL,
+};
+
+static struct attribute *tpdm_dsb_patt_attrs[] = {
+ DSB_PATT_ATTR(0),
+ DSB_PATT_ATTR(1),
+ DSB_PATT_ATTR(2),
+ DSB_PATT_ATTR(3),
+ DSB_PATT_ATTR(4),
+ DSB_PATT_ATTR(5),
+ DSB_PATT_ATTR(6),
+ DSB_PATT_ATTR(7),
+ DSB_PATT_MASK_ATTR(0),
+ DSB_PATT_MASK_ATTR(1),
+ DSB_PATT_MASK_ATTR(2),
+ DSB_PATT_MASK_ATTR(3),
+ DSB_PATT_MASK_ATTR(4),
+ DSB_PATT_MASK_ATTR(5),
+ DSB_PATT_MASK_ATTR(6),
+ DSB_PATT_MASK_ATTR(7),
+ &dev_attr_enable_ts.attr,
+ &dev_attr_set_type.attr,
+ NULL,
+};
+
+static struct attribute *tpdm_dsb_msr_attrs[] = {
+ DSB_MSR_ATTR(0),
+ DSB_MSR_ATTR(1),
+ DSB_MSR_ATTR(2),
+ DSB_MSR_ATTR(3),
+ DSB_MSR_ATTR(4),
+ DSB_MSR_ATTR(5),
+ DSB_MSR_ATTR(6),
+ DSB_MSR_ATTR(7),
+ DSB_MSR_ATTR(8),
+ DSB_MSR_ATTR(9),
+ DSB_MSR_ATTR(10),
+ DSB_MSR_ATTR(11),
+ DSB_MSR_ATTR(12),
+ DSB_MSR_ATTR(13),
+ DSB_MSR_ATTR(14),
+ DSB_MSR_ATTR(15),
+ DSB_MSR_ATTR(16),
+ DSB_MSR_ATTR(17),
+ DSB_MSR_ATTR(18),
+ DSB_MSR_ATTR(19),
+ DSB_MSR_ATTR(20),
+ DSB_MSR_ATTR(21),
+ DSB_MSR_ATTR(22),
+ DSB_MSR_ATTR(23),
+ DSB_MSR_ATTR(24),
+ DSB_MSR_ATTR(25),
+ DSB_MSR_ATTR(26),
+ DSB_MSR_ATTR(27),
+ DSB_MSR_ATTR(28),
+ DSB_MSR_ATTR(29),
+ DSB_MSR_ATTR(30),
+ DSB_MSR_ATTR(31),
+ NULL,
+};
+
+static struct attribute *tpdm_dsb_attrs[] = {
+ &dev_attr_dsb_mode.attr,
+ &dev_attr_dsb_trig_ts.attr,
+ &dev_attr_dsb_trig_type.attr,
+ NULL,
+};
+
+static struct attribute_group tpdm_dsb_attr_grp = {
+ .attrs = tpdm_dsb_attrs,
+ .is_visible = tpdm_dsb_is_visible,
+};
+
+static struct attribute_group tpdm_dsb_edge_grp = {
+ .attrs = tpdm_dsb_edge_attrs,
+ .is_visible = tpdm_dsb_is_visible,
+ .name = "dsb_edge",
+};
+
+static struct attribute_group tpdm_dsb_trig_patt_grp = {
+ .attrs = tpdm_dsb_trig_patt_attrs,
+ .is_visible = tpdm_dsb_is_visible,
+ .name = "dsb_trig_patt",
+};
+
+static struct attribute_group tpdm_dsb_patt_grp = {
+ .attrs = tpdm_dsb_patt_attrs,
+ .is_visible = tpdm_dsb_is_visible,
+ .name = "dsb_patt",
+};
+
+static struct attribute_group tpdm_dsb_msr_grp = {
+ .attrs = tpdm_dsb_msr_attrs,
+ .is_visible = tpdm_dsb_msr_is_visible,
+ .name = "dsb_msr",
+};
+
static const struct attribute_group *tpdm_attr_grps[] = {
&tpdm_attr_grp,
+ &tpdm_dsb_attr_grp,
+ &tpdm_dsb_edge_grp,
+ &tpdm_dsb_trig_patt_grp,
+ &tpdm_dsb_patt_grp,
+ &tpdm_dsb_msr_grp,
NULL,
};
@@ -181,6 +866,7 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata;
struct tpdm_drvdata *drvdata;
struct coresight_desc desc = { 0 };
+ int ret;
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
@@ -200,12 +886,20 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->base = base;
+ ret = tpdm_datasets_setup(drvdata);
+ if (ret)
+ return ret;
+
+ if (drvdata && tpdm_has_dsb_dataset(drvdata))
+ of_property_read_u32(drvdata->dev->of_node,
+ "qcom,dsb-msrs-num", &drvdata->dsb_msr_num);
+
/* Set up coresight component description */
desc.name = coresight_alloc_device_name(&tpdm_devs, dev);
if (!desc.name)
return -ENOMEM;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
- desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
+ desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM;
desc.ops = &tpdm_cs_ops;
desc.pdata = adev->dev.platform_data;
desc.dev = &adev->dev;
@@ -216,7 +910,7 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(drvdata->csdev);
spin_lock_init(&drvdata->spinlock);
- tpdm_init_default_data(drvdata);
+
/* Decrease pm refcount when probe is done.*/
pm_runtime_put(&adev->dev);
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.h b/drivers/hwtracing/coresight/coresight-tpdm.h
index 543854043..4115b2a17 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.h
+++ b/drivers/hwtracing/coresight/coresight-tpdm.h
@@ -11,8 +11,52 @@
/* DSB Subunit Registers */
#define TPDM_DSB_CR (0x780)
+#define TPDM_DSB_TIER (0x784)
+#define TPDM_DSB_TPR(n) (0x788 + (n * 4))
+#define TPDM_DSB_TPMR(n) (0x7A8 + (n * 4))
+#define TPDM_DSB_XPR(n) (0x7C8 + (n * 4))
+#define TPDM_DSB_XPMR(n) (0x7E8 + (n * 4))
+#define TPDM_DSB_EDCR(n) (0x808 + (n * 4))
+#define TPDM_DSB_EDCMR(n) (0x848 + (n * 4))
+#define TPDM_DSB_MSR(n) (0x980 + (n * 4))
+
/* Enable bit for DSB subunit */
#define TPDM_DSB_CR_ENA BIT(0)
+/* Enable bit for DSB subunit perfmance mode */
+#define TPDM_DSB_CR_MODE BIT(1)
+/* Enable bit for DSB subunit trigger type */
+#define TPDM_DSB_CR_TRIG_TYPE BIT(12)
+/* Data bits for DSB high performace mode */
+#define TPDM_DSB_CR_HPSEL GENMASK(6, 2)
+/* Data bits for DSB test mode */
+#define TPDM_DSB_CR_TEST_MODE GENMASK(10, 9)
+
+/* Enable bit for DSB subunit pattern timestamp */
+#define TPDM_DSB_TIER_PATT_TSENAB BIT(0)
+/* Enable bit for DSB subunit trigger timestamp */
+#define TPDM_DSB_TIER_XTRIG_TSENAB BIT(1)
+/* Bit for DSB subunit pattern type */
+#define TPDM_DSB_TIER_PATT_TYPE BIT(2)
+
+/* DSB programming modes */
+/* DSB mode bits mask */
+#define TPDM_DSB_MODE_MASK GENMASK(8, 0)
+/* Test mode control bit*/
+#define TPDM_DSB_MODE_TEST(val) (val & GENMASK(1, 0))
+/* Performance mode */
+#define TPDM_DSB_MODE_PERF BIT(3)
+/* High performance mode */
+#define TPDM_DSB_MODE_HPBYTESEL(val) (val & GENMASK(8, 4))
+
+#define EDCRS_PER_WORD 16
+#define EDCR_TO_WORD_IDX(r) ((r) / EDCRS_PER_WORD)
+#define EDCR_TO_WORD_SHIFT(r) ((r % EDCRS_PER_WORD) * 2)
+#define EDCR_TO_WORD_VAL(val, r) (val << EDCR_TO_WORD_SHIFT(r))
+#define EDCR_TO_WORD_MASK(r) EDCR_TO_WORD_VAL(0x3, r)
+
+#define EDCMRS_PER_WORD 32
+#define EDCMR_TO_WORD_IDX(r) ((r) / EDCMRS_PER_WORD)
+#define EDCMR_TO_WORD_SHIFT(r) ((r) % EDCMRS_PER_WORD)
/* TPDM integration test registers */
#define TPDM_ITATBCNTRL (0xEF0)
@@ -40,6 +84,95 @@
#define TPDM_PIDR0_DS_IMPDEF BIT(0)
#define TPDM_PIDR0_DS_DSB BIT(1)
+#define TPDM_DSB_MAX_LINES 256
+/* MAX number of EDCR registers */
+#define TPDM_DSB_MAX_EDCR 16
+/* MAX number of EDCMR registers */
+#define TPDM_DSB_MAX_EDCMR 8
+/* MAX number of DSB pattern */
+#define TPDM_DSB_MAX_PATT 8
+/* MAX number of DSB MSR */
+#define TPDM_DSB_MAX_MSR 32
+
+#define tpdm_simple_dataset_ro(name, mem, idx) \
+ (&((struct tpdm_dataset_attribute[]) { \
+ { \
+ __ATTR(name, 0444, tpdm_simple_dataset_show, NULL), \
+ mem, \
+ idx, \
+ } \
+ })[0].attr.attr)
+
+#define tpdm_simple_dataset_rw(name, mem, idx) \
+ (&((struct tpdm_dataset_attribute[]) { \
+ { \
+ __ATTR(name, 0644, tpdm_simple_dataset_show, \
+ tpdm_simple_dataset_store), \
+ mem, \
+ idx, \
+ } \
+ })[0].attr.attr)
+
+#define DSB_EDGE_CTRL_ATTR(nr) \
+ tpdm_simple_dataset_ro(edcr##nr, \
+ DSB_EDGE_CTRL, nr)
+
+#define DSB_EDGE_CTRL_MASK_ATTR(nr) \
+ tpdm_simple_dataset_ro(edcmr##nr, \
+ DSB_EDGE_CTRL_MASK, nr)
+
+#define DSB_TRIG_PATT_ATTR(nr) \
+ tpdm_simple_dataset_rw(xpr##nr, \
+ DSB_TRIG_PATT, nr)
+
+#define DSB_TRIG_PATT_MASK_ATTR(nr) \
+ tpdm_simple_dataset_rw(xpmr##nr, \
+ DSB_TRIG_PATT_MASK, nr)
+
+#define DSB_PATT_ATTR(nr) \
+ tpdm_simple_dataset_rw(tpr##nr, \
+ DSB_PATT, nr)
+
+#define DSB_PATT_MASK_ATTR(nr) \
+ tpdm_simple_dataset_rw(tpmr##nr, \
+ DSB_PATT_MASK, nr)
+
+#define DSB_MSR_ATTR(nr) \
+ tpdm_simple_dataset_rw(msr##nr, \
+ DSB_MSR, nr)
+
+/**
+ * struct dsb_dataset - specifics associated to dsb dataset
+ * @mode: DSB programming mode
+ * @edge_ctrl_idx Index number of the edge control
+ * @edge_ctrl: Save value for edge control
+ * @edge_ctrl_mask: Save value for edge control mask
+ * @patt_val: Save value for pattern
+ * @patt_mask: Save value for pattern mask
+ * @trig_patt: Save value for trigger pattern
+ * @trig_patt_mask: Save value for trigger pattern mask
+ * @msr Save value for MSR
+ * @patt_ts: Enable/Disable pattern timestamp
+ * @patt_type: Set pattern type
+ * @trig_ts: Enable/Disable trigger timestamp.
+ * @trig_type: Enable/Disable trigger type.
+ */
+struct dsb_dataset {
+ u32 mode;
+ u32 edge_ctrl_idx;
+ u32 edge_ctrl[TPDM_DSB_MAX_EDCR];
+ u32 edge_ctrl_mask[TPDM_DSB_MAX_EDCMR];
+ u32 patt_val[TPDM_DSB_MAX_PATT];
+ u32 patt_mask[TPDM_DSB_MAX_PATT];
+ u32 trig_patt[TPDM_DSB_MAX_PATT];
+ u32 trig_patt_mask[TPDM_DSB_MAX_PATT];
+ u32 msr[TPDM_DSB_MAX_MSR];
+ bool patt_ts;
+ bool patt_type;
+ bool trig_ts;
+ bool trig_type;
+};
+
/**
* struct tpdm_drvdata - specifics associated to an TPDM component
* @base: memory mapped base address for this component.
@@ -48,6 +181,8 @@
* @spinlock: lock for the drvdata value.
* @enable: enable status of the component.
* @datasets: The datasets types present of the TPDM.
+ * @dsb Specifics associated to TPDM DSB.
+ * @dsb_msr_num Number of MSR supported by DSB TPDM
*/
struct tpdm_drvdata {
@@ -57,6 +192,32 @@ struct tpdm_drvdata {
spinlock_t spinlock;
bool enable;
unsigned long datasets;
+ struct dsb_dataset *dsb;
+ u32 dsb_msr_num;
+};
+
+/* Enumerate members of various datasets */
+enum dataset_mem {
+ DSB_EDGE_CTRL,
+ DSB_EDGE_CTRL_MASK,
+ DSB_TRIG_PATT,
+ DSB_TRIG_PATT_MASK,
+ DSB_PATT,
+ DSB_PATT_MASK,
+ DSB_MSR,
+};
+
+/**
+ * struct tpdm_dataset_attribute - Record the member variables and
+ * index number of datasets that need to be operated by sysfs file
+ * @attr: The device attribute
+ * @mem: The member in the dataset data structure
+ * @idx: The index number of the array data
+ */
+struct tpdm_dataset_attribute {
+ struct device_attribute attr;
+ enum dataset_mem mem;
+ u32 idx;
};
#endif /* _CORESIGHT_CORESIGHT_TPDM_H */
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index e20c1c6ac..613677648 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -1253,8 +1253,18 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
if (!desc.name)
goto cpu_clear;
-
- desc.pdata = coresight_get_platform_data(dev);
+ /*
+ * TRBE coresight devices do not need regular connections
+ * information, as the paths get built between all percpu
+ * source and their respective percpu sink devices. Though
+ * coresight_register() expect device connections via the
+ * platform_data, which TRBE devices do not have. As they
+ * are not real ACPI devices, coresight_get_platform_data()
+ * ends up failing. Instead let's allocate a dummy zeroed
+ * coresight_platform_data structure and assign that back
+ * into the device for that purpose.
+ */
+ desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL);
if (IS_ERR(desc.pdata))
goto cpu_clear;
@@ -1520,14 +1530,13 @@ probe_failed:
return ret;
}
-static int arm_trbe_device_remove(struct platform_device *pdev)
+static void arm_trbe_device_remove(struct platform_device *pdev)
{
struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
arm_trbe_remove_cpuhp(drvdata);
arm_trbe_remove_coresight(drvdata);
arm_trbe_remove_irq(drvdata);
- return 0;
}
static const struct of_device_id arm_trbe_of_match[] = {
@@ -1536,14 +1545,23 @@ static const struct of_device_id arm_trbe_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
+#ifdef CONFIG_ACPI
+static const struct platform_device_id arm_trbe_acpi_match[] = {
+ { ARMV8_TRBE_PDEV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, arm_trbe_acpi_match);
+#endif
+
static struct platform_driver arm_trbe_driver = {
+ .id_table = ACPI_PTR(arm_trbe_acpi_match),
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(arm_trbe_of_match),
.suppress_bind_attrs = true,
},
.probe = arm_trbe_device_probe,
- .remove = arm_trbe_device_remove,
+ .remove_new = arm_trbe_device_remove,
};
static int __init arm_trbe_init(void)
diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h
index e915e749b..45202c48a 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.h
+++ b/drivers/hwtracing/coresight/coresight-trbe.h
@@ -7,11 +7,13 @@
*
* Author: Anshuman Khandual <anshuman.khandual@arm.com>
*/
+#include <linux/acpi.h>
#include <linux/coresight.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/smp.h>
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index 6e32d31a9..10e886455 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -97,27 +97,19 @@ static int smb_open(struct inode *inode, struct file *file)
{
struct smb_drv_data *drvdata = container_of(file->private_data,
struct smb_drv_data, miscdev);
- int ret = 0;
- spin_lock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
- if (drvdata->reading) {
- ret = -EBUSY;
- goto out;
- }
+ if (drvdata->reading)
+ return -EBUSY;
- if (atomic_read(&drvdata->csdev->refcnt)) {
- ret = -EBUSY;
- goto out;
- }
+ if (atomic_read(&drvdata->csdev->refcnt))
+ return -EBUSY;
smb_update_data_size(drvdata);
-
drvdata->reading = true;
-out:
- spin_unlock(&drvdata->spinlock);
- return ret;
+ return 0;
}
static ssize_t smb_read(struct file *file, char __user *data, size_t len,
@@ -160,9 +152,8 @@ static int smb_release(struct inode *inode, struct file *file)
struct smb_drv_data *drvdata = container_of(file->private_data,
struct smb_drv_data, miscdev);
- spin_lock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
drvdata->reading = false;
- spin_unlock(&drvdata->spinlock);
return 0;
}
@@ -255,19 +246,15 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret = 0;
- spin_lock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
/* Do nothing, the trace data is reading by other interface now */
- if (drvdata->reading) {
- ret = -EBUSY;
- goto out;
- }
+ if (drvdata->reading)
+ return -EBUSY;
/* Do nothing, the SMB is already enabled as other mode */
- if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode) {
- ret = -EBUSY;
- goto out;
- }
+ if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode)
+ return -EBUSY;
switch (mode) {
case CS_MODE_SYSFS:
@@ -281,13 +268,10 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
}
if (ret)
- goto out;
+ return ret;
atomic_inc(&csdev->refcnt);
-
dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
-out:
- spin_unlock(&drvdata->spinlock);
return ret;
}
@@ -295,19 +279,14 @@ out:
static int smb_disable(struct coresight_device *csdev)
{
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret = 0;
- spin_lock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
- if (drvdata->reading) {
- ret = -EBUSY;
- goto out;
- }
+ if (drvdata->reading)
+ return -EBUSY;
- if (atomic_dec_return(&csdev->refcnt)) {
- ret = -EBUSY;
- goto out;
- }
+ if (atomic_dec_return(&csdev->refcnt))
+ return -EBUSY;
/* Complain if we (somehow) got out of sync */
WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
@@ -317,12 +296,9 @@ static int smb_disable(struct coresight_device *csdev)
/* Dissociate from the target process. */
drvdata->pid = -1;
drvdata->mode = CS_MODE_DISABLED;
-
dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
-out:
- spin_unlock(&drvdata->spinlock);
- return ret;
+ return 0;
}
static void *smb_alloc_buffer(struct coresight_device *csdev,
@@ -395,17 +371,17 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
struct smb_data_buffer *sdb = &drvdata->sdb;
struct cs_buffers *buf = sink_config;
- unsigned long data_size = 0;
+ unsigned long data_size;
bool lost = false;
if (!buf)
return 0;
- spin_lock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
/* Don't do anything if another tracer is using this sink. */
if (atomic_read(&csdev->refcnt) != 1)
- goto out;
+ return 0;
smb_disable_hw(drvdata);
smb_update_data_size(drvdata);
@@ -424,8 +400,6 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
smb_sync_perf_buffer(drvdata, buf, handle->head);
if (!buf->snapshot && lost)
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
-out:
- spin_unlock(&drvdata->spinlock);
return data_size;
}
@@ -601,15 +575,13 @@ static int smb_probe(struct platform_device *pdev)
return 0;
}
-static int smb_remove(struct platform_device *pdev)
+static void smb_remove(struct platform_device *pdev)
{
struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
smb_unregister_sink(drvdata);
smb_config_inport(&pdev->dev, false);
-
- return 0;
}
#ifdef CONFIG_ACPI
@@ -627,7 +599,7 @@ static struct platform_driver smb_driver = {
.suppress_bind_attrs = true,
},
.probe = smb_probe,
- .remove = smb_remove,
+ .remove_new = smb_remove,
};
module_platform_driver(smb_driver);
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
index 24a1f7797..4bf04a977 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.c
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -183,6 +183,10 @@ static void hisi_ptt_wait_dma_reset_done(struct hisi_ptt *hisi_ptt)
static void hisi_ptt_trace_end(struct hisi_ptt *hisi_ptt)
{
writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+
+ /* Mask the interrupt on the end */
+ writel(HISI_PTT_TRACE_INT_MASK_ALL, hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK);
+
hisi_ptt->trace_ctrl.started = false;
}
@@ -270,15 +274,14 @@ static int hisi_ptt_update_aux(struct hisi_ptt *hisi_ptt, int index, bool stop)
buf->pos += size;
/*
- * Just commit the traced data if we're going to stop. Otherwise if the
- * resident AUX buffer cannot contain the data of next trace buffer,
- * apply a new one.
+ * Always commit the data to the AUX buffer in time to make sure
+ * userspace got enough time to consume the data.
+ *
+ * If we're not going to stop, apply a new one and check whether
+ * there's enough room for the next trace.
*/
- if (stop) {
- perf_aux_output_end(handle, buf->pos);
- } else if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
- perf_aux_output_end(handle, buf->pos);
-
+ perf_aux_output_end(handle, size);
+ if (!stop) {
buf = perf_aux_output_begin(handle, event);
if (!buf)
return -EINVAL;
diff --git a/drivers/hwtracing/ptt/hisi_ptt.h b/drivers/hwtracing/ptt/hisi_ptt.h
index e17f045d7..46030aa88 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.h
+++ b/drivers/hwtracing/ptt/hisi_ptt.h
@@ -47,6 +47,7 @@
#define HISI_PTT_TRACE_INT_STAT 0x0890
#define HISI_PTT_TRACE_INT_STAT_MASK GENMASK(3, 0)
#define HISI_PTT_TRACE_INT_MASK 0x0894
+#define HISI_PTT_TRACE_INT_MASK_ALL GENMASK(3, 0)
#define HISI_PTT_TUNING_INT_STAT 0x0898
#define HISI_PTT_TUNING_INT_STAT_MASK BIT(0)
#define HISI_PTT_TRACE_WR_STS 0x08a0
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index ee83c4581..461eb23f9 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -477,7 +477,7 @@ static const struct i2c_algorithm smbus_algorithm = {
static struct i2c_adapter ali1535_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 55a9e93fb..307fb0666 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -390,7 +390,7 @@ static const struct i2c_algorithm ali1563_algorithm = {
static struct i2c_adapter ali1563_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &ali1563_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 0231c5be6..d2fa30deb 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -461,7 +461,7 @@ static const struct i2c_algorithm smbus_algorithm = {
static struct i2c_adapter ali15x3_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index ef1307a25..208310db9 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -285,7 +285,7 @@ static const struct i2c_algorithm smbus_algorithm = {
struct i2c_adapter amd756_smbus = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 1ed7e945b..42a9b1221 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -449,7 +449,7 @@ static int amd8111_probe(struct pci_dev *dev, const struct pci_device_id *id)
smbus->adapter.owner = THIS_MODULE;
snprintf(smbus->adapter.name, sizeof(smbus->adapter.name),
"SMBus2 AMD8111 adapter at %04x", smbus->base);
- smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ smbus->adapter.class = I2C_CLASS_HWMON;
smbus->adapter.algo = &smbus_algorithm;
smbus->adapter.algo_data = smbus;
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 5511fd46a..ce8c4846b 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -445,6 +445,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
irq_status);
irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
+ irq_handled = irq_status;
bus->cmd_err = ret;
bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
goto out_complete;
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 9a664abf7..4404b4aac 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -658,7 +658,7 @@ static int cpm_i2c_probe(struct platform_device *ofdev)
/* register new adapter to i2c module... */
data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len);
- cpm->adap.nr = (data && len == 4) ? be32_to_cpup(data) : -1;
+ cpm->adap.nr = (data && len == 4) ? *data : -1;
result = i2c_add_numbered_adapter(&cpm->adap);
if (result < 0)
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index a7f6f3eaf..ae835000f 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -318,7 +318,7 @@ struct dw_i2c_dev {
#define AMD_UCSI_INTR_EN 0xd
#define TXGBE_TX_FIFO_DEPTH 4
-#define TXGBE_RX_FIFO_DEPTH 0
+#define TXGBE_RX_FIFO_DEPTH 1
struct i2c_dw_semaphore_callbacks {
int (*probe)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index b0f50dce9..cfe8665ca 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -188,7 +188,7 @@ static struct i2c_algo_pcf_data pcf_isa_data = {
static struct i2c_adapter pcf_isa_ops = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo_data = &pcf_isa_data,
.name = "i2c-elektor",
};
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index fb35a75fe..4f1411b1a 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -25,7 +25,6 @@ struct i2c_gpio_private_data {
struct i2c_algo_bit_data bit_data;
struct i2c_gpio_platform_data pdata;
#ifdef CONFIG_I2C_GPIO_FAULT_INJECTOR
- struct dentry *debug_dir;
/* these must be protected by bus lock */
struct completion scl_irq_completion;
u64 scl_irq_data;
@@ -72,7 +71,6 @@ static int i2c_gpio_getscl(void *data)
}
#ifdef CONFIG_I2C_GPIO_FAULT_INJECTOR
-static struct dentry *i2c_gpio_debug_dir;
#define setsda(bd, val) ((bd)->setsda((bd)->data, val))
#define setscl(bd, val) ((bd)->setscl((bd)->data, val))
@@ -258,41 +256,23 @@ static void i2c_gpio_fault_injector_init(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv = platform_get_drvdata(pdev);
- /*
- * If there will be a debugfs-dir per i2c adapter somewhen, put the
- * 'fault-injector' dir there. Until then, we have a global dir with
- * all adapters as subdirs.
- */
- if (!i2c_gpio_debug_dir)
- i2c_gpio_debug_dir = debugfs_create_dir("i2c-fault-injector", NULL);
-
- priv->debug_dir = debugfs_create_dir(pdev->name, i2c_gpio_debug_dir);
-
init_completion(&priv->scl_irq_completion);
- debugfs_create_file_unsafe("incomplete_address_phase", 0200, priv->debug_dir,
+ debugfs_create_file_unsafe("incomplete_address_phase", 0200, priv->adap.debugfs,
priv, &fops_incomplete_addr_phase);
- debugfs_create_file_unsafe("incomplete_write_byte", 0200, priv->debug_dir,
+ debugfs_create_file_unsafe("incomplete_write_byte", 0200, priv->adap.debugfs,
priv, &fops_incomplete_write_byte);
if (priv->bit_data.getscl) {
- debugfs_create_file_unsafe("inject_panic", 0200, priv->debug_dir,
+ debugfs_create_file_unsafe("inject_panic", 0200, priv->adap.debugfs,
priv, &fops_inject_panic);
- debugfs_create_file_unsafe("lose_arbitration", 0200, priv->debug_dir,
+ debugfs_create_file_unsafe("lose_arbitration", 0200, priv->adap.debugfs,
priv, &fops_lose_arbitration);
}
- debugfs_create_file_unsafe("scl", 0600, priv->debug_dir, priv, &fops_scl);
- debugfs_create_file_unsafe("sda", 0600, priv->debug_dir, priv, &fops_sda);
-}
-
-static void i2c_gpio_fault_injector_exit(struct platform_device *pdev)
-{
- struct i2c_gpio_private_data *priv = platform_get_drvdata(pdev);
-
- debugfs_remove_recursive(priv->debug_dir);
+ debugfs_create_file_unsafe("scl", 0600, priv->adap.debugfs, priv, &fops_scl);
+ debugfs_create_file_unsafe("sda", 0600, priv->adap.debugfs, priv, &fops_sda);
}
#else
static inline void i2c_gpio_fault_injector_init(struct platform_device *pdev) {}
-static inline void i2c_gpio_fault_injector_exit(struct platform_device *pdev) {}
#endif /* CONFIG_I2C_GPIO_FAULT_INJECTOR*/
/* Get i2c-gpio properties from DT or ACPI table */
@@ -444,7 +424,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
adap->algo_data = bit_data;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adap->class = I2C_CLASS_HWMON;
adap->dev.parent = dev;
device_set_node(&adap->dev, fwnode);
@@ -475,8 +455,6 @@ static void i2c_gpio_remove(struct platform_device *pdev)
struct i2c_gpio_private_data *priv;
struct i2c_adapter *adap;
- i2c_gpio_fault_injector_exit(pdev);
-
priv = platform_get_drvdata(pdev);
adap = &priv->adap;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 643a81688..274e987e4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1230,8 +1230,10 @@ static const struct {
* Additional individual entries were added after verification.
*/
{ "Latitude 5480", 0x29 },
+ { "Precision 3540", 0x29 },
{ "Vostro V131", 0x1d },
{ "Vostro 5568", 0x29 },
+ { "XPS 15 7590", 0x29 },
};
static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 408820319..7fb87b789 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -739,7 +739,7 @@ static int iic_probe(struct platform_device *ofdev)
adap->dev.of_node = of_node_get(np);
strscpy(adap->name, "IBM IIC", sizeof(adap->name));
i2c_set_adapdata(adap, dev);
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adap->class = I2C_CLASS_HWMON;
adap->algo = &iic_algo;
adap->timeout = HZ;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 0951bfdc8..60e813137 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1406,7 +1406,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
PINCTRL_STATE_DEFAULT);
i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
"gpio");
- rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
+ rinfo->sda_gpiod = devm_gpiod_get_optional(&pdev->dev, "sda", GPIOD_IN);
rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index f2f7ebeea..2e5f0165c 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -478,7 +478,7 @@ iop3xx_i2c_probe(struct platform_device *pdev)
memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
new_adapter->owner = THIS_MODULE;
- new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ new_adapter->class = I2C_CLASS_HWMON;
new_adapter->dev.parent = &pdev->dev;
new_adapter->dev.of_node = pdev->dev.of_node;
new_adapter->nr = pdev->id;
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 1dc1ceaa4..416a9968e 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -249,7 +249,7 @@ static const struct i2c_algorithm smbus_algorithm = {
static struct i2c_adapter sch_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index e01d75308..c3a529a73 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -283,8 +283,7 @@ static const struct i2c_algorithm kempld_i2c_algorithm = {
static const struct i2c_adapter kempld_i2c_adapter = {
.owner = THIS_MODULE,
.name = "i2c-kempld",
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD |
- I2C_CLASS_DEPRECATED,
+ .class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED,
.algo = &kempld_i2c_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 6fec64ea6..099291a04 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -477,7 +477,7 @@ static const struct i2c_adapter_quirks mlxcpld_i2c_quirks_ext2 = {
static struct i2c_adapter mlxcpld_i2c_adapter = {
.owner = THIS_MODULE,
.name = "i2c-mlxcpld",
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &mlxcpld_i2c_algo,
.quirks = &mlxcpld_i2c_quirks,
.retries = MLXCPLD_I2C_RETR_NUM,
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 38d203d93..fab662e6b 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -349,7 +349,7 @@ static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
return -EBUSY;
}
smbus->adapter.owner = THIS_MODULE;
- smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ smbus->adapter.class = I2C_CLASS_HWMON;
smbus->adapter.algo = &smbus_algorithm;
smbus->adapter.algo_data = smbus;
smbus->adapter.dev.parent = &dev->dev;
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index ae4bae63a..54181b3f1 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -326,7 +326,6 @@ struct npcm_i2c {
u8 slv_rd_buf[MAX_I2C_HW_FIFO_SIZE];
u8 slv_wr_buf[MAX_I2C_HW_FIFO_SIZE];
#endif
- struct dentry *debugfs; /* debugfs device directory */
u64 ber_cnt;
u64 rec_succ_cnt;
u64 rec_fail_cnt;
@@ -2250,27 +2249,15 @@ static const struct i2c_algorithm npcm_i2c_algo = {
#endif
};
-/* i2c debugfs directory: used to keep health monitor of i2c devices */
-static struct dentry *npcm_i2c_debugfs_dir;
-
static void npcm_i2c_init_debugfs(struct platform_device *pdev,
struct npcm_i2c *bus)
{
- struct dentry *d;
-
- if (!npcm_i2c_debugfs_dir)
- return;
- d = debugfs_create_dir(dev_name(&pdev->dev), npcm_i2c_debugfs_dir);
- if (IS_ERR_OR_NULL(d))
- return;
- debugfs_create_u64("ber_cnt", 0444, d, &bus->ber_cnt);
- debugfs_create_u64("nack_cnt", 0444, d, &bus->nack_cnt);
- debugfs_create_u64("rec_succ_cnt", 0444, d, &bus->rec_succ_cnt);
- debugfs_create_u64("rec_fail_cnt", 0444, d, &bus->rec_fail_cnt);
- debugfs_create_u64("timeout_cnt", 0444, d, &bus->timeout_cnt);
- debugfs_create_u64("tx_complete_cnt", 0444, d, &bus->tx_complete_cnt);
-
- bus->debugfs = d;
+ debugfs_create_u64("ber_cnt", 0444, bus->adap.debugfs, &bus->ber_cnt);
+ debugfs_create_u64("nack_cnt", 0444, bus->adap.debugfs, &bus->nack_cnt);
+ debugfs_create_u64("rec_succ_cnt", 0444, bus->adap.debugfs, &bus->rec_succ_cnt);
+ debugfs_create_u64("rec_fail_cnt", 0444, bus->adap.debugfs, &bus->rec_fail_cnt);
+ debugfs_create_u64("timeout_cnt", 0444, bus->adap.debugfs, &bus->timeout_cnt);
+ debugfs_create_u64("tx_complete_cnt", 0444, bus->adap.debugfs, &bus->tx_complete_cnt);
}
static int npcm_i2c_probe_bus(struct platform_device *pdev)
@@ -2362,7 +2349,6 @@ static void npcm_i2c_remove_bus(struct platform_device *pdev)
unsigned long lock_flags;
struct npcm_i2c *bus = platform_get_drvdata(pdev);
- debugfs_remove_recursive(bus->debugfs);
spin_lock_irqsave(&bus->lock, lock_flags);
npcm_i2c_disable(bus);
spin_unlock_irqrestore(&bus->lock, lock_flags);
@@ -2385,28 +2371,7 @@ static struct platform_driver npcm_i2c_bus_driver = {
}
};
-static int __init npcm_i2c_init(void)
-{
- int ret;
-
- npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
-
- ret = platform_driver_register(&npcm_i2c_bus_driver);
- if (ret) {
- debugfs_remove_recursive(npcm_i2c_debugfs_dir);
- return ret;
- }
-
- return 0;
-}
-module_init(npcm_i2c_init);
-
-static void __exit npcm_i2c_exit(void)
-{
- platform_driver_unregister(&npcm_i2c_bus_driver);
- debugfs_remove_recursive(npcm_i2c_debugfs_dir);
-}
-module_exit(npcm_i2c_exit);
+module_platform_driver(npcm_i2c_bus_driver);
MODULE_AUTHOR("Avi Fishman <avi.fishman@gmail.com>");
MODULE_AUTHOR("Tali Perry <tali.perry@nuvoton.com>");
diff --git a/drivers/i2c/busses/i2c-pasemi-pci.c b/drivers/i2c/busses/i2c-pasemi-pci.c
index cfc89e04e..77f90c743 100644
--- a/drivers/i2c/busses/i2c-pasemi-pci.c
+++ b/drivers/i2c/busses/i2c-pasemi-pci.c
@@ -56,7 +56,7 @@ static int pasemi_smb_pci_probe(struct pci_dev *dev,
if (!smbus->ioaddr)
return -EBUSY;
- smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ smbus->adapter.class = I2C_CLASS_HWMON;
error = pasemi_i2c_common_probe(smbus);
if (error)
return error;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 809fbd014..6a0392172 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -943,7 +943,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
}
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adap->class = I2C_CLASS_HWMON;
adap->algo = sb800_main ? &piix4_smbus_algorithm_sb800
: &smbus_algorithm;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 829ac053b..828aa2ea0 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -89,6 +89,7 @@
#define TMDMAE BIT(0) /* DMA Master Transmitted Enable */
/* ICCCR2 */
+#define FMPE BIT(7) /* Fast Mode Plus Enable */
#define CDFD BIT(2) /* CDF Disable */
#define HLSE BIT(1) /* HIGH/LOW Separate Control Enable */
#define SME BIT(0) /* SCL Mask Enable */
@@ -122,16 +123,18 @@
#define ID_NACK BIT(4)
#define ID_EPROTO BIT(5)
/* persistent flags */
+#define ID_P_FMPLUS BIT(27)
#define ID_P_NOT_ATOMIC BIT(28)
#define ID_P_HOST_NOTIFY BIT(29)
#define ID_P_NO_RXDMA BIT(30) /* HW forbids RXDMA sometimes */
#define ID_P_PM_BLOCKED BIT(31)
-#define ID_P_MASK GENMASK(31, 28)
+#define ID_P_MASK GENMASK(31, 27)
enum rcar_i2c_type {
I2C_RCAR_GEN1,
I2C_RCAR_GEN2,
I2C_RCAR_GEN3,
+ I2C_RCAR_GEN4,
};
struct rcar_i2c_priv {
@@ -148,6 +151,7 @@ struct rcar_i2c_priv {
u32 icccr;
u16 schd;
u16 scld;
+ u8 smd;
u8 recovery_icmcr; /* protected by adapter lock */
enum rcar_i2c_type devtype;
struct i2c_client *slave;
@@ -239,9 +243,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
if (priv->devtype < I2C_RCAR_GEN3) {
rcar_i2c_write(priv, ICCCR, priv->icccr);
} else {
- rcar_i2c_write(priv, ICCCR2, CDFD | HLSE | SME);
+ u32 icccr2 = CDFD | HLSE | SME;
+
+ if (priv->flags & ID_P_FMPLUS)
+ icccr2 |= FMPE;
+
+ rcar_i2c_write(priv, ICCCR2, icccr2);
rcar_i2c_write(priv, ICCCR, priv->icccr);
- rcar_i2c_write(priv, ICMPR, RCAR_DEFAULT_SMD);
+ rcar_i2c_write(priv, ICMPR, priv->smd);
rcar_i2c_write(priv, ICHPR, priv->schd);
rcar_i2c_write(priv, ICLPR, priv->scld);
rcar_i2c_write(priv, ICFBSCR, TCYC17);
@@ -278,6 +287,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
/* Fall back to previously used values if not supplied */
i2c_parse_fw_timings(dev, &t, false);
+ priv->smd = RCAR_DEFAULT_SMD;
/*
* calculate SCL clock
@@ -303,6 +313,11 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
if (cdf >= 1U << cdf_width)
goto err_no_val;
+ if (t.bus_freq_hz > I2C_MAX_FAST_MODE_FREQ && priv->devtype >= I2C_RCAR_GEN4)
+ priv->flags |= ID_P_FMPLUS;
+ else
+ priv->flags &= ~ID_P_FMPLUS;
+
/* On Gen3+, we use cdf only for the filters, not as a SCL divider */
ick = rate / (priv->devtype < I2C_RCAR_GEN3 ? (cdf + 1) : 1);
@@ -344,30 +359,30 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
* x as a base value for the SCLD/SCHD ratio:
*
* SCL = clkp / (8 + 2 * SMD + SCLD + SCHD + F[(ticf + tr + intd) * clkp])
- * SCL = clkp / (8 + 2 * RCAR_DEFAULT_SMD + RCAR_SCLD_RATIO * x
+ * SCL = clkp / (8 + 2 * SMD + RCAR_SCLD_RATIO * x
* + RCAR_SCHD_RATIO * x + F[...])
*
* with: sum_ratio = RCAR_SCLD_RATIO + RCAR_SCHD_RATIO
- * and: smd = RCAR_DEFAULT_SMD
*
* SCL = clkp / (8 + 2 * smd + sum_ratio * x + F[...])
* 8 + 2 * smd + sum_ratio * x + F[...] = clkp / SCL
* x = ((clkp / SCL) - 8 - 2 * smd - F[...]) / sum_ratio
*/
x = DIV_ROUND_UP(rate, t.bus_freq_hz ?: 1);
- x = DIV_ROUND_UP(x - 8 - 2 * RCAR_DEFAULT_SMD - round, sum_ratio);
- scl = rate / (8 + 2 * RCAR_DEFAULT_SMD + sum_ratio * x + round);
+ x = DIV_ROUND_UP(x - 8 - 2 * priv->smd - round, sum_ratio);
+ scl = rate / (8 + 2 * priv->smd + sum_ratio * x + round);
- /* Bail out if values don't fit into 16 bit or SMD became too large */
- if (x * RCAR_SCLD_RATIO > 0xffff || RCAR_DEFAULT_SMD > x * RCAR_SCHD_RATIO)
+ if (x == 0 || x * RCAR_SCLD_RATIO > 0xffff)
goto err_no_val;
priv->icccr = cdf;
priv->schd = RCAR_SCHD_RATIO * x;
priv->scld = RCAR_SCLD_RATIO * x;
+ if (priv->smd >= priv->schd)
+ priv->smd = priv->schd - 1;
- dev_dbg(dev, "clk %u/%u(%lu), round %u, CDF: %u SCHD %u SCLD %u\n",
- scl, t.bus_freq_hz, rate, round, cdf, priv->schd, priv->scld);
+ dev_dbg(dev, "clk %u/%u(%lu), round %u, CDF: %u SCHD %u SCLD %u SMD %u\n",
+ scl, t.bus_freq_hz, rate, round, cdf, priv->schd, priv->scld, priv->smd);
}
return 0;
@@ -431,8 +446,8 @@ static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate)
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
- /* Gen3 can only do one RXDMA per transfer and we just completed it */
- if (priv->devtype == I2C_RCAR_GEN3 &&
+ /* Gen3+ can only do one RXDMA per transfer and we just completed it */
+ if (priv->devtype >= I2C_RCAR_GEN3 &&
priv->dma_direction == DMA_FROM_DEVICE)
priv->flags |= ID_P_NO_RXDMA;
@@ -886,8 +901,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
if (ret < 0)
goto out;
- /* Gen3 needs a reset before allowing RXDMA once */
- if (priv->devtype == I2C_RCAR_GEN3) {
+ /* Gen3+ needs a reset. That also allows RXDMA once */
+ if (priv->devtype >= I2C_RCAR_GEN3) {
priv->flags &= ~ID_P_NO_RXDMA;
ret = rcar_i2c_do_reset(priv);
if (ret)
@@ -1072,10 +1087,12 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 },
{ .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 },
+ /* S4 has no FM+ bit */
+ { .compatible = "renesas,i2c-r8a779f0", .data = (void *)I2C_RCAR_GEN3 },
{ .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
- { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN3 },
+ { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN4 },
{},
};
MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
@@ -1151,7 +1168,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (of_property_read_bool(dev->of_node, "smbus"))
priv->flags |= ID_P_HOST_NOTIFY;
- if (priv->devtype == I2C_RCAR_GEN3) {
+ if (priv->devtype >= I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc)) {
ret = PTR_ERR(priv->rstc);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index c0fe96a4f..275f7c421 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -76,6 +76,7 @@
#define QUIRK_HDMIPHY (1 << 1)
#define QUIRK_NO_GPIO (1 << 2)
#define QUIRK_POLL (1 << 3)
+#define QUIRK_ATOMIC (1 << 4)
/* Max time to wait for bus to become idle after a xfer (in us) */
#define S3C2410_IDLE_TIMEOUT 5000
@@ -174,7 +175,7 @@ static inline void s3c24xx_i2c_master_complete(struct s3c24xx_i2c *i2c, int ret)
if (ret)
i2c->msg_idx = ret;
- if (!(i2c->quirks & QUIRK_POLL))
+ if (!(i2c->quirks & (QUIRK_POLL | QUIRK_ATOMIC)))
wake_up(&i2c->wait);
}
@@ -703,7 +704,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
s3c24xx_i2c_enable_irq(i2c);
s3c24xx_i2c_message_start(i2c, msgs);
- if (i2c->quirks & QUIRK_POLL) {
+ if (i2c->quirks & (QUIRK_POLL | QUIRK_ATOMIC)) {
while ((i2c->msg_num != 0) && is_ack(i2c)) {
unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT);
@@ -775,6 +776,21 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
return -EREMOTEIO;
}
+static int s3c24xx_i2c_xfer_atomic(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct s3c24xx_i2c *i2c = (struct s3c24xx_i2c *)adap->algo_data;
+ int ret;
+
+ disable_irq(i2c->irq);
+ i2c->quirks |= QUIRK_ATOMIC;
+ ret = s3c24xx_i2c_xfer(adap, msgs, num);
+ i2c->quirks &= ~QUIRK_ATOMIC;
+ enable_irq(i2c->irq);
+
+ return ret;
+}
+
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
@@ -785,6 +801,7 @@ static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
/* i2c bus registration info */
static const struct i2c_algorithm s3c24xx_i2c_algorithm = {
.master_xfer = s3c24xx_i2c_xfer,
+ .master_xfer_atomic = s3c24xx_i2c_xfer_atomic,
.functionality = s3c24xx_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 421735acf..d7af8e0d7 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -385,7 +385,7 @@ static int smbus_cmi_probe(struct platform_device *device)
smbus_cmi->adapter.owner = THIS_MODULE;
smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
smbus_cmi->adapter.algo_data = smbus_cmi;
- smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ smbus_cmi->adapter.class = I2C_CLASS_HWMON;
smbus_cmi->adapter.dev.parent = &device->dev;
ret = i2c_add_adapter(&smbus_cmi->adapter);
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 1ad2a2615..8a043f5fc 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -477,7 +477,7 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
id->adap.nr = pdev->id;
id->adap.algo = &sh7760_i2c_algo;
- id->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ id->adap.class = I2C_CLASS_HWMON;
id->adap.retries = 3;
id->adap.algo_data = id;
id->adap.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 8f71f01cb..49f8f4f1b 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -142,7 +142,7 @@ static struct i2c_algo_sibyte_data sibyte_board_data[2] = {
static struct i2c_adapter sibyte_board_adapter[2] = {
{
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = NULL,
.algo_data = &sibyte_board_data[0],
.nr = 0,
@@ -150,7 +150,7 @@ static struct i2c_adapter sibyte_board_adapter[2] = {
},
{
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = NULL,
.algo_data = &sibyte_board_data[1],
.nr = 1,
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 486f1e9df..32476dc10 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -353,7 +353,7 @@ static const struct i2c_algorithm smbus_algorithm = {
static struct i2c_adapter sis5595_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 87d56250d..3505cf29c 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -493,7 +493,7 @@ static const struct i2c_algorithm smbus_algorithm = {
static struct i2c_adapter sis630_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
.retries = 3
};
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index cde800398..77529dda6 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -228,7 +228,7 @@ static const struct i2c_algorithm smbus_algorithm = {
static struct i2c_adapter sis96x_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 983509936..012104522 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -50,6 +50,7 @@
#define STM32F7_I2C_TXDR 0x28
/* STM32F7 I2C control 1 */
+#define STM32_I2C_CR1_FMP BIT(24)
#define STM32F7_I2C_CR1_PECEN BIT(23)
#define STM32F7_I2C_CR1_ALERTEN BIT(22)
#define STM32F7_I2C_CR1_SMBHEN BIT(20)
@@ -226,6 +227,8 @@ struct stm32f7_i2c_spec {
* @rise_time: Rise time (ns)
* @fall_time: Fall time (ns)
* @fmp_clr_offset: Fast Mode Plus clear register offset from set register
+ * @single_it_line: Only a single IT line is used for both events/errors
+ * @fmp_cr1_bit: Fast Mode Plus control is done via a bit in CR1
*/
struct stm32f7_i2c_setup {
u32 speed_freq;
@@ -233,6 +236,8 @@ struct stm32f7_i2c_setup {
u32 rise_time;
u32 fall_time;
u32 fmp_clr_offset;
+ bool single_it_line;
+ bool fmp_cr1_bit;
};
/**
@@ -418,6 +423,13 @@ static const struct stm32f7_i2c_setup stm32mp13_setup = {
.fmp_clr_offset = 0x4,
};
+static const struct stm32f7_i2c_setup stm32mp25_setup = {
+ .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
+ .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
+ .single_it_line = true,
+ .fmp_cr1_bit = true,
+};
+
static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask)
{
writel_relaxed(readl_relaxed(reg) | mask, reg);
@@ -1419,15 +1431,13 @@ static bool stm32f7_i2c_is_slave_busy(struct stm32f7_i2c_dev *i2c_dev)
return i == busy;
}
-static irqreturn_t stm32f7_i2c_slave_isr_event(struct stm32f7_i2c_dev *i2c_dev)
+static irqreturn_t stm32f7_i2c_slave_isr_event(struct stm32f7_i2c_dev *i2c_dev, u32 status)
{
void __iomem *base = i2c_dev->base;
- u32 cr2, status, mask;
+ u32 cr2, mask;
u8 val;
int ret;
- status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
-
/* Slave transmitter mode */
if (status & STM32F7_I2C_ISR_TXIS) {
i2c_slave_event(i2c_dev->slave_running,
@@ -1494,23 +1504,81 @@ static irqreturn_t stm32f7_i2c_slave_isr_event(struct stm32f7_i2c_dev *i2c_dev)
return IRQ_HANDLED;
}
-static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
+static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev, u32 status)
{
- struct stm32f7_i2c_dev *i2c_dev = data;
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
- struct stm32_i2c_dma *dma = i2c_dev->dma;
+ u16 addr = f7_msg->addr;
void __iomem *base = i2c_dev->base;
- u32 status, mask;
- int ret = IRQ_HANDLED;
+ struct device *dev = i2c_dev->dev;
+ struct stm32_i2c_dma *dma = i2c_dev->dma;
- /* Check if the interrupt if for a slave device */
- if (!i2c_dev->master_mode) {
- ret = stm32f7_i2c_slave_isr_event(i2c_dev);
- return ret;
+ /* Bus error */
+ if (status & STM32F7_I2C_ISR_BERR) {
+ dev_err(dev, "Bus error accessing addr 0x%x\n", addr);
+ writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR);
+ stm32f7_i2c_release_bus(&i2c_dev->adap);
+ f7_msg->result = -EIO;
+ }
+
+ /* Arbitration loss */
+ if (status & STM32F7_I2C_ISR_ARLO) {
+ dev_dbg(dev, "Arbitration loss accessing addr 0x%x\n", addr);
+ writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR);
+ f7_msg->result = -EAGAIN;
+ }
+
+ if (status & STM32F7_I2C_ISR_PECERR) {
+ dev_err(dev, "PEC error in reception accessing addr 0x%x\n", addr);
+ writel_relaxed(STM32F7_I2C_ICR_PECCF, base + STM32F7_I2C_ICR);
+ f7_msg->result = -EINVAL;
}
+ if (status & STM32F7_I2C_ISR_ALERT) {
+ dev_dbg(dev, "SMBus alert received\n");
+ writel_relaxed(STM32F7_I2C_ICR_ALERTCF, base + STM32F7_I2C_ICR);
+ i2c_handle_smbus_alert(i2c_dev->alert->ara);
+ return IRQ_HANDLED;
+ }
+
+ if (!i2c_dev->slave_running) {
+ u32 mask;
+ /* Disable interrupts */
+ if (stm32f7_i2c_is_slave_registered(i2c_dev))
+ mask = STM32F7_I2C_XFER_IRQ_MASK;
+ else
+ mask = STM32F7_I2C_ALL_IRQ_MASK;
+ stm32f7_i2c_disable_irq(i2c_dev, mask);
+ }
+
+ /* Disable dma */
+ if (i2c_dev->use_dma) {
+ stm32f7_i2c_disable_dma_req(i2c_dev);
+ dmaengine_terminate_async(dma->chan_using);
+ }
+
+ i2c_dev->master_mode = false;
+ complete(&i2c_dev->complete);
+
+ return IRQ_HANDLED;
+}
+
+#define STM32F7_ERR_EVENTS (STM32F7_I2C_ISR_BERR | STM32F7_I2C_ISR_ARLO |\
+ STM32F7_I2C_ISR_PECERR | STM32F7_I2C_ISR_ALERT)
+static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
+{
+ struct stm32f7_i2c_dev *i2c_dev = data;
+ u32 status;
+
status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
+ /*
+ * Check if the interrupt is for a slave device or related
+ * to errors flags (in case of single it line mode)
+ */
+ if (!i2c_dev->master_mode ||
+ (i2c_dev->setup.single_it_line && (status & STM32F7_ERR_EVENTS)))
+ return IRQ_WAKE_THREAD;
+
/* Tx empty */
if (status & STM32F7_I2C_ISR_TXIS)
stm32f7_i2c_write_tx_data(i2c_dev);
@@ -1519,6 +1587,33 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
if (status & STM32F7_I2C_ISR_RXNE)
stm32f7_i2c_read_rx_data(i2c_dev);
+ /* Wake up the thread if other flags are raised */
+ if (status &
+ (STM32F7_I2C_ISR_NACKF | STM32F7_I2C_ISR_STOPF |
+ STM32F7_I2C_ISR_TC | STM32F7_I2C_ISR_TCR))
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
+{
+ struct stm32f7_i2c_dev *i2c_dev = data;
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ struct stm32_i2c_dma *dma = i2c_dev->dma;
+ void __iomem *base = i2c_dev->base;
+ u32 status, mask;
+ int ret;
+
+ status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
+
+ if (!i2c_dev->master_mode)
+ return stm32f7_i2c_slave_isr_event(i2c_dev, status);
+
+ /* Handle errors in case of this handler is used for events/errors */
+ if (i2c_dev->setup.single_it_line && (status & STM32F7_ERR_EVENTS))
+ return stm32f7_i2c_handle_isr_errs(i2c_dev, status);
+
/* NACK received */
if (status & STM32F7_I2C_ISR_NACKF) {
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
@@ -1531,33 +1626,28 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
f7_msg->result = -ENXIO;
}
- /* STOP detection flag */
- if (status & STM32F7_I2C_ISR_STOPF) {
- /* Disable interrupts */
- if (stm32f7_i2c_is_slave_registered(i2c_dev))
- mask = STM32F7_I2C_XFER_IRQ_MASK;
+ if (status & STM32F7_I2C_ISR_TCR) {
+ if (f7_msg->smbus)
+ stm32f7_i2c_smbus_reload(i2c_dev);
else
- mask = STM32F7_I2C_ALL_IRQ_MASK;
- stm32f7_i2c_disable_irq(i2c_dev, mask);
-
- /* Clear STOP flag */
- writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
-
- if (i2c_dev->use_dma && !f7_msg->result) {
- ret = IRQ_WAKE_THREAD;
- } else {
- i2c_dev->master_mode = false;
- complete(&i2c_dev->complete);
- }
+ stm32f7_i2c_reload(i2c_dev);
}
/* Transfer complete */
if (status & STM32F7_I2C_ISR_TC) {
+ /* Wait for dma transfer completion before sending next message */
+ if (i2c_dev->use_dma && !f7_msg->result) {
+ ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
+ if (!ret) {
+ dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
+ stm32f7_i2c_disable_dma_req(i2c_dev);
+ dmaengine_terminate_async(dma->chan_using);
+ f7_msg->result = -ETIMEDOUT;
+ }
+ }
if (f7_msg->stop) {
mask = STM32F7_I2C_CR2_STOP;
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
- } else if (i2c_dev->use_dma && !f7_msg->result) {
- ret = IRQ_WAKE_THREAD;
} else if (f7_msg->smbus) {
stm32f7_i2c_smbus_rep_start(i2c_dev);
} else {
@@ -1567,47 +1657,18 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
}
}
- if (status & STM32F7_I2C_ISR_TCR) {
- if (f7_msg->smbus)
- stm32f7_i2c_smbus_reload(i2c_dev);
+ /* STOP detection flag */
+ if (status & STM32F7_I2C_ISR_STOPF) {
+ /* Disable interrupts */
+ if (stm32f7_i2c_is_slave_registered(i2c_dev))
+ mask = STM32F7_I2C_XFER_IRQ_MASK;
else
- stm32f7_i2c_reload(i2c_dev);
- }
-
- return ret;
-}
-
-static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
-{
- struct stm32f7_i2c_dev *i2c_dev = data;
- struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
- struct stm32_i2c_dma *dma = i2c_dev->dma;
- u32 status;
- int ret;
-
- /*
- * Wait for dma transfer completion before sending next message or
- * notity the end of xfer to the client
- */
- ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
- if (!ret) {
- dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
- stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_async(dma->chan_using);
- f7_msg->result = -ETIMEDOUT;
- }
+ mask = STM32F7_I2C_ALL_IRQ_MASK;
+ stm32f7_i2c_disable_irq(i2c_dev, mask);
- status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
+ /* Clear STOP flag */
+ writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
- if (status & STM32F7_I2C_ISR_TC) {
- if (f7_msg->smbus) {
- stm32f7_i2c_smbus_rep_start(i2c_dev);
- } else {
- i2c_dev->msg_id++;
- i2c_dev->msg++;
- stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg);
- }
- } else {
i2c_dev->master_mode = false;
complete(&i2c_dev->complete);
}
@@ -1615,68 +1676,14 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
+static irqreturn_t stm32f7_i2c_isr_error_thread(int irq, void *data)
{
struct stm32f7_i2c_dev *i2c_dev = data;
- struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
- void __iomem *base = i2c_dev->base;
- struct device *dev = i2c_dev->dev;
- struct stm32_i2c_dma *dma = i2c_dev->dma;
u32 status;
status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
- /* Bus error */
- if (status & STM32F7_I2C_ISR_BERR) {
- dev_err(dev, "<%s>: Bus error accessing addr 0x%x\n",
- __func__, f7_msg->addr);
- writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR);
- stm32f7_i2c_release_bus(&i2c_dev->adap);
- f7_msg->result = -EIO;
- }
-
- /* Arbitration loss */
- if (status & STM32F7_I2C_ISR_ARLO) {
- dev_dbg(dev, "<%s>: Arbitration loss accessing addr 0x%x\n",
- __func__, f7_msg->addr);
- writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR);
- f7_msg->result = -EAGAIN;
- }
-
- if (status & STM32F7_I2C_ISR_PECERR) {
- dev_err(dev, "<%s>: PEC error in reception accessing addr 0x%x\n",
- __func__, f7_msg->addr);
- writel_relaxed(STM32F7_I2C_ICR_PECCF, base + STM32F7_I2C_ICR);
- f7_msg->result = -EINVAL;
- }
-
- if (status & STM32F7_I2C_ISR_ALERT) {
- dev_dbg(dev, "<%s>: SMBus alert received\n", __func__);
- writel_relaxed(STM32F7_I2C_ICR_ALERTCF, base + STM32F7_I2C_ICR);
- i2c_handle_smbus_alert(i2c_dev->alert->ara);
- return IRQ_HANDLED;
- }
-
- if (!i2c_dev->slave_running) {
- u32 mask;
- /* Disable interrupts */
- if (stm32f7_i2c_is_slave_registered(i2c_dev))
- mask = STM32F7_I2C_XFER_IRQ_MASK;
- else
- mask = STM32F7_I2C_ALL_IRQ_MASK;
- stm32f7_i2c_disable_irq(i2c_dev, mask);
- }
-
- /* Disable dma */
- if (i2c_dev->use_dma) {
- stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_async(dma->chan_using);
- }
-
- i2c_dev->master_mode = false;
- complete(&i2c_dev->complete);
-
- return IRQ_HANDLED;
+ return stm32f7_i2c_handle_isr_errs(i2c_dev, status);
}
static int stm32f7_i2c_wait_polling(struct stm32f7_i2c_dev *i2c_dev)
@@ -2012,23 +2019,27 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
static int stm32f7_i2c_write_fm_plus_bits(struct stm32f7_i2c_dev *i2c_dev,
bool enable)
{
- int ret;
+ int ret = 0;
if (i2c_dev->bus_rate <= I2C_MAX_FAST_MODE_FREQ ||
- IS_ERR_OR_NULL(i2c_dev->regmap))
+ (!i2c_dev->setup.fmp_cr1_bit && IS_ERR_OR_NULL(i2c_dev->regmap)))
/* Optional */
return 0;
- if (i2c_dev->fmp_sreg == i2c_dev->fmp_creg)
- ret = regmap_update_bits(i2c_dev->regmap,
- i2c_dev->fmp_sreg,
- i2c_dev->fmp_mask,
- enable ? i2c_dev->fmp_mask : 0);
- else
- ret = regmap_write(i2c_dev->regmap,
- enable ? i2c_dev->fmp_sreg :
- i2c_dev->fmp_creg,
- i2c_dev->fmp_mask);
+ if (i2c_dev->setup.fmp_cr1_bit) {
+ if (enable)
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32_I2C_CR1_FMP);
+ else
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32_I2C_CR1_FMP);
+ } else {
+ if (i2c_dev->fmp_sreg == i2c_dev->fmp_creg)
+ ret = regmap_update_bits(i2c_dev->regmap, i2c_dev->fmp_sreg,
+ i2c_dev->fmp_mask, enable ? i2c_dev->fmp_mask : 0);
+ else
+ ret = regmap_write(i2c_dev->regmap,
+ enable ? i2c_dev->fmp_sreg : i2c_dev->fmp_creg,
+ i2c_dev->fmp_mask);
+ }
return ret;
}
@@ -2162,6 +2173,13 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
+ setup = of_device_get_match_data(&pdev->dev);
+ if (!setup) {
+ dev_err(&pdev->dev, "Can't get device data\n");
+ return -ENODEV;
+ }
+ i2c_dev->setup = *setup;
+
i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2c_dev->base))
return PTR_ERR(i2c_dev->base);
@@ -2171,10 +2189,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
if (irq_event < 0)
return irq_event;
- irq_error = platform_get_irq(pdev, 1);
- if (irq_error < 0)
- return irq_error;
-
i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
"wakeup-source");
@@ -2199,26 +2213,22 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
stm32f7_i2c_isr_event_thread,
IRQF_ONESHOT,
pdev->name, i2c_dev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request irq event %i\n",
- irq_event);
- return ret;
- }
-
- ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0,
- pdev->name, i2c_dev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request irq error %i\n",
- irq_error);
- return ret;
- }
-
- setup = of_device_get_match_data(&pdev->dev);
- if (!setup) {
- dev_err(&pdev->dev, "Can't get device data\n");
- return -ENODEV;
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to request irq event\n");
+
+ if (!i2c_dev->setup.single_it_line) {
+ irq_error = platform_get_irq(pdev, 1);
+ if (irq_error < 0)
+ return irq_error;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq_error,
+ NULL,
+ stm32f7_i2c_isr_error_thread,
+ IRQF_ONESHOT,
+ pdev->name, i2c_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to request irq error\n");
}
- i2c_dev->setup = *setup;
ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
if (ret)
@@ -2226,9 +2236,12 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
/* Setup Fast mode plus if necessary */
if (i2c_dev->bus_rate > I2C_MAX_FAST_MODE_FREQ) {
- ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
- if (ret)
- return ret;
+ if (!i2c_dev->setup.fmp_cr1_bit) {
+ ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
+ if (ret)
+ return ret;
+ }
+
ret = stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
if (ret)
return ret;
@@ -2507,6 +2520,7 @@ static const struct of_device_id stm32f7_i2c_match[] = {
{ .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup},
{ .compatible = "st,stm32mp15-i2c", .data = &stm32mp15_setup},
{ .compatible = "st,stm32mp13-i2c", .data = &stm32mp13_setup},
+ { .compatible = "st,stm32mp25-i2c", .data = &stm32mp25_setup},
{},
};
MODULE_DEVICE_TABLE(of, stm32f7_i2c_match);
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index ad4f09c7f..7ed29992a 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -70,7 +70,7 @@ static struct i2c_algo_bit_data bit_data = {
static struct i2c_adapter vt586b_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.name = "VIA i2c",
.algo_data = &bit_data,
};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 970ccdcbb..2cc7bba3b 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -304,7 +304,7 @@ static const struct i2c_algorithm smbus_algorithm = {
static struct i2c_adapter vt596_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
};
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index 76118abc6..198afee52 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -74,9 +74,6 @@
#define MCR_APB_96M 7
#define MCR_APB_166M 12
-#define I2C_MODE_STANDARD 0
-#define I2C_MODE_FAST 1
-
#define WMT_I2C_TIMEOUT (msecs_to_jiffies(1000))
struct wmt_i2c_dev {
@@ -85,7 +82,7 @@ struct wmt_i2c_dev {
struct device *dev;
void __iomem *base;
struct clk *clk;
- int mode;
+ u16 tcr;
int irq;
u16 cmd_status;
};
@@ -109,6 +106,12 @@ static int wmt_i2c_wait_bus_not_busy(struct wmt_i2c_dev *i2c_dev)
static int wmt_check_status(struct wmt_i2c_dev *i2c_dev)
{
int ret = 0;
+ unsigned long wait_result;
+
+ wait_result = wait_for_completion_timeout(&i2c_dev->complete,
+ msecs_to_jiffies(500));
+ if (!wait_result)
+ return -ETIMEDOUT;
if (i2c_dev->cmd_status & ISR_NACK_ADDR)
ret = -EIO;
@@ -119,21 +122,13 @@ static int wmt_check_status(struct wmt_i2c_dev *i2c_dev)
return ret;
}
-static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
+static int wmt_i2c_write(struct wmt_i2c_dev *i2c_dev, struct i2c_msg *pmsg,
int last)
{
- struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
- u16 val, tcr_val;
+ u16 val, tcr_val = i2c_dev->tcr;
int ret;
- unsigned long wait_result;
int xfer_len = 0;
- if (!(pmsg->flags & I2C_M_NOSTART)) {
- ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
- if (ret < 0)
- return ret;
- }
-
if (pmsg->len == 0) {
/*
* We still need to run through the while (..) once, so
@@ -148,20 +143,12 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
if (!(pmsg->flags & I2C_M_NOSTART)) {
val = readw(i2c_dev->base + REG_CR);
val &= ~CR_TX_END;
- writew(val, i2c_dev->base + REG_CR);
-
- val = readw(i2c_dev->base + REG_CR);
val |= CR_CPU_RDY;
writew(val, i2c_dev->base + REG_CR);
}
reinit_completion(&i2c_dev->complete);
- if (i2c_dev->mode == I2C_MODE_STANDARD)
- tcr_val = TCR_STANDARD_MODE;
- else
- tcr_val = TCR_FAST_MODE;
-
tcr_val |= (TCR_MASTER_WRITE | (pmsg->addr & TCR_SLAVE_ADDR_MASK));
writew(tcr_val, i2c_dev->base + REG_TCR);
@@ -173,12 +160,6 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
}
while (xfer_len < pmsg->len) {
- wait_result = wait_for_completion_timeout(&i2c_dev->complete,
- msecs_to_jiffies(500));
-
- if (wait_result == 0)
- return -ETIMEDOUT;
-
ret = wmt_check_status(i2c_dev);
if (ret)
return ret;
@@ -210,47 +191,24 @@ static int wmt_i2c_write(struct i2c_adapter *adap, struct i2c_msg *pmsg,
return 0;
}
-static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
- int last)
+static int wmt_i2c_read(struct wmt_i2c_dev *i2c_dev, struct i2c_msg *pmsg)
{
- struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
- u16 val, tcr_val;
+ u16 val, tcr_val = i2c_dev->tcr;
int ret;
- unsigned long wait_result;
u32 xfer_len = 0;
- if (!(pmsg->flags & I2C_M_NOSTART)) {
- ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
- if (ret < 0)
- return ret;
- }
-
val = readw(i2c_dev->base + REG_CR);
- val &= ~CR_TX_END;
- writew(val, i2c_dev->base + REG_CR);
+ val &= ~(CR_TX_END | CR_TX_NEXT_NO_ACK);
- val = readw(i2c_dev->base + REG_CR);
- val &= ~CR_TX_NEXT_NO_ACK;
- writew(val, i2c_dev->base + REG_CR);
-
- if (!(pmsg->flags & I2C_M_NOSTART)) {
- val = readw(i2c_dev->base + REG_CR);
+ if (!(pmsg->flags & I2C_M_NOSTART))
val |= CR_CPU_RDY;
- writew(val, i2c_dev->base + REG_CR);
- }
- if (pmsg->len == 1) {
- val = readw(i2c_dev->base + REG_CR);
+ if (pmsg->len == 1)
val |= CR_TX_NEXT_NO_ACK;
- writew(val, i2c_dev->base + REG_CR);
- }
- reinit_completion(&i2c_dev->complete);
+ writew(val, i2c_dev->base + REG_CR);
- if (i2c_dev->mode == I2C_MODE_STANDARD)
- tcr_val = TCR_STANDARD_MODE;
- else
- tcr_val = TCR_FAST_MODE;
+ reinit_completion(&i2c_dev->complete);
tcr_val |= TCR_MASTER_READ | (pmsg->addr & TCR_SLAVE_ADDR_MASK);
@@ -263,12 +221,6 @@ static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
}
while (xfer_len < pmsg->len) {
- wait_result = wait_for_completion_timeout(&i2c_dev->complete,
- msecs_to_jiffies(500));
-
- if (!wait_result)
- return -ETIMEDOUT;
-
ret = wmt_check_status(i2c_dev);
if (ret)
return ret;
@@ -276,15 +228,10 @@ static int wmt_i2c_read(struct i2c_adapter *adap, struct i2c_msg *pmsg,
pmsg->buf[xfer_len] = readw(i2c_dev->base + REG_CDR) >> 8;
xfer_len++;
- if (xfer_len == pmsg->len - 1) {
- val = readw(i2c_dev->base + REG_CR);
- val |= (CR_TX_NEXT_NO_ACK | CR_CPU_RDY);
- writew(val, i2c_dev->base + REG_CR);
- } else {
- val = readw(i2c_dev->base + REG_CR);
- val |= CR_CPU_RDY;
- writew(val, i2c_dev->base + REG_CR);
- }
+ val = readw(i2c_dev->base + REG_CR) | CR_CPU_RDY;
+ if (xfer_len == pmsg->len - 1)
+ val |= CR_TX_NEXT_NO_ACK;
+ writew(val, i2c_dev->base + REG_CR);
}
return 0;
@@ -295,17 +242,22 @@ static int wmt_i2c_xfer(struct i2c_adapter *adap,
int num)
{
struct i2c_msg *pmsg;
- int i, is_last;
+ int i;
int ret = 0;
+ struct wmt_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
for (i = 0; ret >= 0 && i < num; i++) {
- is_last = ((i + 1) == num);
-
pmsg = &msgs[i];
+ if (!(pmsg->flags & I2C_M_NOSTART)) {
+ ret = wmt_i2c_wait_bus_not_busy(i2c_dev);
+ if (ret < 0)
+ return ret;
+ }
+
if (pmsg->flags & I2C_M_RD)
- ret = wmt_i2c_read(adap, pmsg, is_last);
+ ret = wmt_i2c_read(i2c_dev, pmsg);
else
- ret = wmt_i2c_write(adap, pmsg, is_last);
+ ret = wmt_i2c_write(i2c_dev, pmsg, (i + 1) == num);
}
return (ret < 0) ? ret : i;
@@ -359,10 +311,10 @@ static int wmt_i2c_reset_hardware(struct wmt_i2c_dev *i2c_dev)
readw(i2c_dev->base + REG_CSR); /* read clear */
writew(ISR_WRITE_ALL, i2c_dev->base + REG_ISR);
- if (i2c_dev->mode == I2C_MODE_STANDARD)
- writew(SCL_TIMEOUT(128) | TR_STD, i2c_dev->base + REG_TR);
- else
+ if (i2c_dev->tcr == TCR_FAST_MODE)
writew(SCL_TIMEOUT(128) | TR_HS, i2c_dev->base + REG_TR);
+ else
+ writew(SCL_TIMEOUT(128) | TR_STD, i2c_dev->base + REG_TR);
return 0;
}
@@ -395,10 +347,9 @@ static int wmt_i2c_probe(struct platform_device *pdev)
return PTR_ERR(i2c_dev->clk);
}
- i2c_dev->mode = I2C_MODE_STANDARD;
err = of_property_read_u32(np, "clock-frequency", &clk_rate);
if (!err && (clk_rate == I2C_MAX_FAST_MODE_FREQ))
- i2c_dev->mode = I2C_MODE_FAST;
+ i2c_dev->tcr = TCR_FAST_MODE;
i2c_dev->dev = &pdev->dev;
@@ -427,11 +378,15 @@ static int wmt_i2c_probe(struct platform_device *pdev)
err = i2c_add_adapter(adap);
if (err)
- return err;
+ goto err_disable_clk;
platform_set_drvdata(pdev, i2c_dev);
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(i2c_dev->clk);
+ return err;
}
static void wmt_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 83c1db610..3648382b8 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -427,7 +427,7 @@ static struct scx200_acb_iface *scx200_create_iface(const char *text,
snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index);
adapter->owner = THIS_MODULE;
adapter->algo = &scx200_acb_algorithm;
- adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adapter->class = I2C_CLASS_HWMON;
adapter->dev.parent = dev;
mutex_init(&iface->mutex);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index eac90a3cf..5e2cefb37 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/clk/clk-conf.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -67,6 +68,8 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
static DEFINE_STATIC_KEY_FALSE(i2c_trace_msg_key);
static bool is_registered;
+static struct dentry *i2c_debugfs_root;
+
int i2c_transfer_trace_reg(void)
{
static_branch_inc(&i2c_trace_msg_key);
@@ -689,7 +692,7 @@ static struct attribute *i2c_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(i2c_dev);
-struct bus_type i2c_bus_type = {
+const struct bus_type i2c_bus_type = {
.name = "i2c",
.match = i2c_device_match,
.probe = i2c_device_probe,
@@ -1524,6 +1527,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
goto out_list;
}
+ adap->debugfs = debugfs_create_dir(dev_name(&adap->dev), i2c_debugfs_root);
+
res = i2c_setup_smbus_alert(adap);
if (res)
goto out_reg;
@@ -1563,6 +1568,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
return 0;
out_reg:
+ debugfs_remove_recursive(adap->debugfs);
init_completion(&adap->dev_released);
device_unregister(&adap->dev);
wait_for_completion(&adap->dev_released);
@@ -1764,6 +1770,8 @@ void i2c_del_adapter(struct i2c_adapter *adap)
i2c_host_notify_irq_teardown(adap);
+ debugfs_remove_recursive(adap->debugfs);
+
/* wait until all references to the device are gone
*
* FIXME: This is old code and should ideally be replaced by an
@@ -2061,6 +2069,8 @@ static int __init i2c_init(void)
is_registered = true;
+ i2c_debugfs_root = debugfs_create_dir("i2c", NULL);
+
#ifdef CONFIG_I2C_COMPAT
i2c_adapter_compat_class = class_compat_register("i2c-adapter");
if (!i2c_adapter_compat_class) {
@@ -2099,6 +2109,7 @@ static void __exit i2c_exit(void)
#ifdef CONFIG_I2C_COMPAT
class_compat_unregister(i2c_adapter_compat_class);
#endif
+ debugfs_remove_recursive(i2c_debugfs_root);
bus_unregister(&i2c_bus_type);
tracepoint_synchronize_unregister();
}
@@ -2189,13 +2200,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
- * takes place. adap->algo->master_xfer existence isn't checked.
+ * takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
+ if (!adap->algo->master_xfer) {
+ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@@ -2262,11 +2278,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
- if (!adap->algo->master_xfer) {
- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
- return -EOPNOTSUPP;
- }
-
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 138c3f5e0..74807c6db 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -308,8 +308,8 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device);
* target systems are the same.
* Restrictions to automatic SPD instantiation:
* - Only works if all filled slots have the same memory type
- * - Only works for DDR2, DDR3 and DDR4 for now
- * - Only works on systems with 1 to 4 memory slots
+ * - Only works for DDR, DDR2, DDR3 and DDR4 for now
+ * - Only works on systems with 1 to 8 memory slots
*/
#if IS_ENABLED(CONFIG_DMI)
void i2c_register_spd(struct i2c_adapter *adap)
@@ -354,9 +354,9 @@ void i2c_register_spd(struct i2c_adapter *adap)
dev_info(&adap->dev, "%d/%d memory slots populated (from DMI)\n",
dimm_count, slot_count);
- if (slot_count > 4) {
+ if (slot_count > 8) {
dev_warn(&adap->dev,
- "Systems with more than 4 memory slots not supported yet, not instantiating SPD\n");
+ "Systems with more than 8 memory slots not supported yet, not instantiating SPD\n");
return;
}
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index d642cad21..09e7b7bf4 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -308,7 +308,7 @@ static const struct i2c_algorithm smbus_algorithm = {
static struct i2c_adapter stub_adapter = {
.owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
.name = "SMBus stub driver",
};
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 9efc1ed01..8489971ba 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -159,7 +159,6 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
struct regmux *mux;
struct i2c_adapter *parent;
struct resource *res;
- unsigned int class;
int i, ret, nr;
mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
@@ -213,9 +212,8 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
for (i = 0; i < mux->data.n_values; i++) {
nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
- class = mux->data.classes ? mux->data.classes[i] : 0;
- ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
+ ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], 0);
if (ret)
goto err_del_mux_adapters;
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 95caa1627..3afa530c5 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -557,6 +557,88 @@ static ssize_t i2c_scl_frequency_show(struct device *dev,
}
static DEVICE_ATTR_RO(i2c_scl_frequency);
+static int i3c_set_hotjoin(struct i3c_master_controller *master, bool enable)
+{
+ int ret;
+
+ if (!master || !master->ops)
+ return -EINVAL;
+
+ if (!master->ops->enable_hotjoin || !master->ops->disable_hotjoin)
+ return -EINVAL;
+
+ i3c_bus_normaluse_lock(&master->bus);
+
+ if (enable)
+ ret = master->ops->enable_hotjoin(master);
+ else
+ ret = master->ops->disable_hotjoin(master);
+
+ master->hotjoin = enable;
+
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return ret;
+}
+
+static ssize_t hotjoin_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ int ret;
+ bool res;
+
+ if (!i3cbus->cur_master)
+ return -EINVAL;
+
+ if (kstrtobool(buf, &res))
+ return -EINVAL;
+
+ ret = i3c_set_hotjoin(i3cbus->cur_master->common.master, res);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/*
+ * i3c_master_enable_hotjoin - Enable hotjoin
+ * @master: I3C master object
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master)
+{
+ return i3c_set_hotjoin(master, true);
+}
+EXPORT_SYMBOL_GPL(i3c_master_enable_hotjoin);
+
+/*
+ * i3c_master_disable_hotjoin - Disable hotjoin
+ * @master: I3C master object
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master)
+{
+ return i3c_set_hotjoin(master, false);
+}
+EXPORT_SYMBOL_GPL(i3c_master_disable_hotjoin);
+
+static ssize_t hotjoin_show(struct device *dev, struct device_attribute *da, char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sysfs_emit(buf, "%d\n", i3cbus->cur_master->common.master->hotjoin);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(hotjoin);
+
static struct attribute *i3c_masterdev_attrs[] = {
&dev_attr_mode.attr,
&dev_attr_current_master.attr,
@@ -567,6 +649,7 @@ static struct attribute *i3c_masterdev_attrs[] = {
&dev_attr_pid.attr,
&dev_attr_dynamic_address.attr,
&dev_attr_hdrcap.attr,
+ &dev_attr_hotjoin.attr,
NULL,
};
ATTRIBUTE_GROUPS(i3c_masterdev);
@@ -1130,8 +1213,16 @@ static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
- if (ret)
- goto out;
+ if (ret) {
+ /*
+ * Retry when the device does not support max read turnaround
+ * while expecting shorter length from this CCC command.
+ */
+ dest.payload.len -= 3;
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+ }
if (dest.payload.len != 2 && dest.payload.len != 5) {
ret = -EIO;
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index 2b2323aa6..638b054d6 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -298,7 +298,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
unsigned int dcr, bcr;
DECLARE_COMPLETION_ONSTACK(done);
- xfer = hci_alloc_xfer(2);
+ xfer = hci_alloc_xfer(1);
if (!xfer)
return -ENOMEM;
@@ -339,12 +339,13 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
ret = -ETIME;
break;
}
- if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
+ if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER ||
+ RESP_STATUS(xfer->response) == RESP_ERR_NACK) &&
RESP_DATA_LENGTH(xfer->response) == 1) {
ret = 0; /* no more devices to be assigned */
break;
}
- if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+ if (RESP_STATUS(xfer->response) != RESP_SUCCESS) {
ret = -EIO;
break;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 1ae56a569..d7e966a25 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -245,7 +245,14 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
if (ccc->rnw)
ccc->dests[i - prefixed].payload.len =
RESP_DATA_LENGTH(xfer[i].response);
- if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ switch (RESP_STATUS(xfer[i].response)) {
+ case RESP_SUCCESS:
+ continue;
+ case RESP_ERR_ADDR_HEADER:
+ case RESP_ERR_NACK:
+ ccc->err = I3C_ERROR_M2;
+ fallthrough;
+ default:
ret = -EIO;
goto out;
}
@@ -269,6 +276,34 @@ static int i3c_hci_daa(struct i3c_master_controller *m)
return hci->cmd->perform_daa(hci);
}
+static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
+ struct hci_xfer *xfer)
+{
+ if (hci->io != &mipi_i3c_hci_dma ||
+ xfer->data == NULL || !is_vmalloc_addr(xfer->data))
+ return 0;
+
+ if (xfer->rnw)
+ xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
+ else
+ xfer->bounce_buf = kmemdup(xfer->data,
+ xfer->data_len, GFP_KERNEL);
+
+ return xfer->bounce_buf == NULL ? -ENOMEM : 0;
+}
+
+static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
+ struct hci_xfer *xfer)
+{
+ if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
+ return;
+
+ if (xfer->rnw)
+ memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
+
+ kfree(xfer->bounce_buf);
+}
+
static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *i3c_xfers,
int nxfers)
@@ -302,6 +337,9 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
}
hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
+ if (ret)
+ goto out;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -325,6 +363,9 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
}
out:
+ for (i = 0; i < nxfers; i++)
+ i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
+
hci_free_xfer(xfer, nxfers);
return ret;
}
@@ -350,6 +391,9 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
+ if (ret)
+ goto out;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -371,6 +415,9 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
}
out:
+ for (i = 0; i < nxfers; i++)
+ i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
+
hci_free_xfer(xfer, nxfers);
return ret;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index c805a8497..4e01a95cc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -362,6 +362,7 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
struct hci_rh_data *rh;
unsigned int i, ring, enqueue_ptr;
u32 op1_val, op2_val;
+ void *buf;
/* For now we only use ring 0 */
ring = 0;
@@ -390,9 +391,10 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
if (xfer->data) {
+ buf = xfer->bounce_buf ? xfer->bounce_buf : xfer->data;
xfer->data_dma =
dma_map_single(&hci->master.dev,
- xfer->data,
+ buf,
xfer->data_len,
xfer->rnw ?
DMA_FROM_DEVICE :
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index f109923f6..f94d95e02 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -90,6 +90,7 @@ struct hci_xfer {
struct {
/* DMA specific */
dma_addr_t data_dma;
+ void *bounce_buf;
int ring_number;
int ring_entry;
};
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index cf703c00f..5ee4db689 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -128,13 +128,17 @@
/* This parameter depends on the implementation and may be tuned */
#define SVC_I3C_FIFO_SIZE 16
+#define SVC_I3C_EVENT_IBI BIT(0)
+#define SVC_I3C_EVENT_HOTJOIN BIT(1)
+
struct svc_i3c_cmd {
u8 addr;
bool rnw;
u8 *in;
const void *out;
unsigned int len;
- unsigned int read_len;
+ unsigned int actual_len;
+ struct i3c_priv_xfer *xfer;
bool continued;
};
@@ -177,6 +181,7 @@ struct svc_i3c_regs_save {
* @ibi.tbq_slot: To be queued IBI slot
* @ibi.lock: IBI lock
* @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ * @enabled_events: Bit masks for enable events (IBI, HotJoin).
*/
struct svc_i3c_master {
struct i3c_master_controller base;
@@ -206,6 +211,7 @@ struct svc_i3c_master {
spinlock_t lock;
} ibi;
struct mutex lock;
+ int enabled_events;
};
/**
@@ -220,6 +226,11 @@ struct svc_i3c_i2c_dev_data {
struct i3c_generic_ibi_pool *ibi_pool;
};
+static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
+{
+ return !!(master->enabled_events & mask);
+}
+
static bool svc_i3c_master_error(struct svc_i3c_master *master)
{
u32 mstatus, merrwarn;
@@ -429,13 +440,16 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
- if (!dev)
+ if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
svc_i3c_master_nack_ibi(master);
else
svc_i3c_master_handle_ibi(master, dev);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
- svc_i3c_master_ack_ibi(master, false);
+ if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
+ svc_i3c_master_ack_ibi(master, false);
+ else
+ svc_i3c_master_nack_ibi(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
svc_i3c_master_nack_ibi(master);
@@ -472,7 +486,9 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
svc_i3c_master_emit_stop(master);
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
- queue_work(master->base.wq, &master->hj_work);
+ svc_i3c_master_emit_stop(master);
+ if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
+ queue_work(master->base.wq, &master->hj_work);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
default:
@@ -1024,7 +1040,7 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
bool rnw, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
- unsigned int *read_len, bool continued)
+ unsigned int *actual_len, bool continued)
{
u32 reg;
int ret;
@@ -1037,7 +1053,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
SVC_I3C_MCTRL_IBIRESP_NACK |
SVC_I3C_MCTRL_DIR(rnw) |
SVC_I3C_MCTRL_ADDR(addr) |
- SVC_I3C_MCTRL_RDTERM(*read_len),
+ SVC_I3C_MCTRL_RDTERM(*actual_len),
master->regs + SVC_I3C_MCTRL);
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
@@ -1047,6 +1063,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
ret = -ENXIO;
+ *actual_len = 0;
goto emit_stop;
}
@@ -1064,6 +1081,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
*/
if (SVC_I3C_MSTATUS_IBIWON(reg)) {
ret = -ENXIO;
+ *actual_len = 0;
goto emit_stop;
}
@@ -1075,7 +1093,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
goto emit_stop;
if (rnw)
- *read_len = ret;
+ *actual_len = ret;
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
@@ -1157,8 +1175,12 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
cmd->addr, cmd->in, cmd->out,
- cmd->len, &cmd->read_len,
+ cmd->len, &cmd->actual_len,
cmd->continued);
+ /* cmd->xfer is NULL if I2C or CCC transfer */
+ if (cmd->xfer)
+ cmd->xfer->actual_len = cmd->actual_len;
+
if (ret)
break;
}
@@ -1243,7 +1265,7 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
cmd->in = NULL;
cmd->out = buf;
cmd->len = xfer_len;
- cmd->read_len = 0;
+ cmd->actual_len = 0;
cmd->continued = false;
mutex_lock(&master->lock);
@@ -1263,7 +1285,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
struct i3c_ccc_cmd *ccc)
{
unsigned int xfer_len = ccc->dests[0].payload.len;
- unsigned int read_len = ccc->rnw ? xfer_len : 0;
+ unsigned int actual_len = ccc->rnw ? xfer_len : 0;
struct svc_i3c_xfer *xfer;
struct svc_i3c_cmd *cmd;
int ret;
@@ -1281,7 +1303,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
cmd->in = NULL;
cmd->out = &ccc->id;
cmd->len = 1;
- cmd->read_len = 0;
+ cmd->actual_len = 0;
cmd->continued = true;
/* Directed message */
@@ -1291,7 +1313,7 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data,
cmd->len = xfer_len;
- cmd->read_len = read_len;
+ cmd->actual_len = actual_len;
cmd->continued = false;
mutex_lock(&master->lock);
@@ -1300,8 +1322,8 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
svc_i3c_master_dequeue_xfer(master, xfer);
mutex_unlock(&master->lock);
- if (cmd->read_len != xfer_len)
- ccc->dests[0].payload.len = cmd->read_len;
+ if (cmd->actual_len != xfer_len)
+ ccc->dests[0].payload.len = cmd->actual_len;
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
@@ -1346,12 +1368,13 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
for (i = 0; i < nxfers; i++) {
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
+ cmd->xfer = &xfers[i];
cmd->addr = master->addrs[data->index];
cmd->rnw = xfers[i].rnw;
cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
cmd->len = xfers[i].len;
- cmd->read_len = xfers[i].rnw ? xfers[i].len : 0;
+ cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
cmd->continued = (i + 1) < nxfers;
}
@@ -1391,7 +1414,7 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
cmd->in = cmd->rnw ? xfers[i].buf : NULL;
cmd->out = cmd->rnw ? NULL : xfers[i].buf;
cmd->len = xfers[i].len;
- cmd->read_len = cmd->rnw ? xfers[i].len : 0;
+ cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
cmd->continued = (i + 1 < nxfers);
}
@@ -1472,6 +1495,7 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
return ret;
}
+ master->enabled_events |= SVC_I3C_EVENT_IBI;
svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
@@ -1483,7 +1507,9 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
struct svc_i3c_master *master = to_svc_i3c_master(m);
int ret;
- svc_i3c_master_disable_interrupts(master);
+ master->enabled_events &= ~SVC_I3C_EVENT_IBI;
+ if (!master->enabled_events)
+ svc_i3c_master_disable_interrupts(master);
ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
@@ -1493,6 +1519,39 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
return ret;
}
+static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
+
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+
+ return 0;
+}
+
+static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
+{
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+
+ master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
+
+ if (!master->enabled_events)
+ svc_i3c_master_disable_interrupts(master);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
+ return 0;
+}
+
static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot)
{
@@ -1519,6 +1578,8 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
.enable_ibi = svc_i3c_master_enable_ibi,
.disable_ibi = svc_i3c_master_disable_ibi,
+ .enable_hotjoin = svc_i3c_master_enable_hotjoin,
+ .disable_hotjoin = svc_i3c_master_disable_hotjoin,
};
static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3e01a6b23..bcf1198e8 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -918,6 +918,35 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state mtl_l_cstates[] __initdata = {
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 420,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C10",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 310,
+ .target_residency = 930,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state gmt_cstates[] __initdata = {
{
.name = "C1",
@@ -1237,6 +1266,72 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state grr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 2,
+ .target_residency = 10,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6S",
+ .desc = "MWAIT 0x22",
+ .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 140,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
+static struct cpuidle_state srf_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 2,
+ .target_residency = 10,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6S",
+ .desc = "MWAIT 0x22",
+ .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 270,
+ .target_residency = 700,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6SP",
+ .desc = "MWAIT 0x23",
+ .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 310,
+ .target_residency = 900,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1344,6 +1439,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = {
.state_table = adl_l_cstates,
};
+static const struct idle_cpu idle_cpu_mtl_l __initconst = {
+ .state_table = mtl_l_cstates,
+};
+
static const struct idle_cpu idle_cpu_gmt __initconst = {
.state_table = gmt_cstates,
};
@@ -1382,6 +1481,18 @@ static const struct idle_cpu idle_cpu_snr __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_grr __initconst = {
+ .state_table = grr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
+static const struct idle_cpu idle_cpu_srf __initconst = {
+ .state_table = srf_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
@@ -1418,6 +1529,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &idle_cpu_mtl_l),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
@@ -1427,6 +1539,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &idle_cpu_grr),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf),
{}
};
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index d2221fe9a..c9d7afe48 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -262,10 +262,11 @@ config BMI088_ACCEL
select REGMAP
select BMI088_ACCEL_SPI
help
- Say yes here to build support for the Bosch BMI088 accelerometer.
+ Say yes here to build support for the following Bosch accelerometers:
+ BMI088, BMI085, BMI090L. Note that all of these are combo module that
+ include both accelerometer and gyroscope.
- This is a combo module with both accelerometer and gyroscope. This
- driver only implements the accelerometer part, which has its own
+ This driver only implements the accelerometer part, which has its own
address and register map. BMG160 provides the gyroscope driver.
config BMI088_ACCEL_SPI
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index 84edcc78d..4d989708e 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -2,6 +2,8 @@
/*
* 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
* - BMI088
+ * - BMI085
+ * - BMI090L
*
* Copyright (c) 2018-2021, Topic Embedded Products
*/
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index ee540edd8..7b419a7b2 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -2,6 +2,8 @@
/*
* 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
* - BMI088
+ * - BMI085
+ * - BMI090L
*
* Copyright (c) 2018-2020, Topic Embedded Products
*/
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 35f9867da..3b73c509b 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -36,13 +36,29 @@ config AD4130
To compile this driver as a module, choose M here: the module will be
called ad4130.
+config AD7091R
+ tristate
+
config AD7091R5
tristate "Analog Devices AD7091R5 ADC Driver"
depends on I2C
+ select AD7091R
select REGMAP_I2C
help
Say yes here to build support for Analog Devices AD7091R-5 ADC.
+config AD7091R8
+ tristate "Analog Devices AD7091R8 ADC Driver"
+ depends on SPI
+ select AD7091R
+ select REGMAP_SPI
+ help
+ Say yes here to build support for Analog Devices AD7091R-2, AD7091R-4,
+ and AD7091R-8 ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad7091r8.
+
config AD7124
tristate "Analog Devices AD7124 and similar sigma-delta ADCs driver"
depends on SPI_MASTER
@@ -292,7 +308,7 @@ config ADI_AXI_ADC
select IIO_BUFFER
select IIO_BUFFER_HW_CONSUMER
select IIO_BUFFER_DMAENGINE
- depends on HAS_IOMEM
+ select REGMAP_MMIO
depends on OF
help
Say yes here to build support for Analog Devices Generic
@@ -745,6 +761,17 @@ config MAX1363
To compile this driver as a module, choose M here: the module will be
called max1363.
+config MAX34408
+ tristate "Maxim max34408/max344089 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Maxim max34408/max34409 current sense
+ monitor with 8-bits ADC interface with overcurrent delay/threshold and
+ shutdown delay.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max34408.
+
config MAX77541_ADC
tristate "Analog Devices MAX77541 ADC driver"
depends on MFD_MAX77541
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index bee11d442..d2fda54a3 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -7,7 +7,9 @@
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD4130) += ad4130.o
-obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o
+obj-$(CONFIG_AD7091R) += ad7091r-base.o
+obj-$(CONFIG_AD7091R5) += ad7091r5.o
+obj-$(CONFIG_AD7091R8) += ad7091r8.o
obj-$(CONFIG_AD7124) += ad7124.o
obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7266) += ad7266.o
@@ -68,6 +70,7 @@ obj-$(CONFIG_MAX11205) += max11205.o
obj-$(CONFIG_MAX11410) += max11410.o
obj-$(CONFIG_MAX1241) += max1241.o
obj-$(CONFIG_MAX1363) += max1363.o
+obj-$(CONFIG_MAX34408) += max34408.o
obj-$(CONFIG_MAX77541_ADC) += max77541-adc.o
obj-$(CONFIG_MAX9611) += max9611.o
obj-$(CONFIG_MCP320X) += mcp320x.o
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
index 76002b91c..f4255b91a 100644
--- a/drivers/iio/adc/ad7091r-base.c
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -16,41 +16,6 @@
#include "ad7091r-base.h"
-#define AD7091R_REG_RESULT 0
-#define AD7091R_REG_CHANNEL 1
-#define AD7091R_REG_CONF 2
-#define AD7091R_REG_ALERT 3
-#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4)
-#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5)
-#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6)
-
-/* AD7091R_REG_RESULT */
-#define AD7091R_REG_RESULT_CH_ID(x) (((x) >> 13) & 0x3)
-#define AD7091R_REG_RESULT_CONV_RESULT(x) ((x) & 0xfff)
-
-/* AD7091R_REG_CONF */
-#define AD7091R_REG_CONF_ALERT_EN BIT(4)
-#define AD7091R_REG_CONF_AUTO BIT(8)
-#define AD7091R_REG_CONF_CMD BIT(10)
-
-#define AD7091R_REG_CONF_MODE_MASK \
- (AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD)
-
-enum ad7091r_mode {
- AD7091R_MODE_SAMPLE,
- AD7091R_MODE_COMMAND,
- AD7091R_MODE_AUTOCYCLE,
-};
-
-struct ad7091r_state {
- struct device *dev;
- struct regmap *map;
- struct regulator *vref;
- const struct ad7091r_chip_info *chip_info;
- enum ad7091r_mode mode;
- struct mutex lock; /*lock to prevent concurent reads */
-};
-
const struct iio_event_spec ad7091r_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -72,34 +37,6 @@ const struct iio_event_spec ad7091r_events[] = {
};
EXPORT_SYMBOL_NS_GPL(ad7091r_events, IIO_AD7091R);
-static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
-{
- int ret, conf;
-
- switch (mode) {
- case AD7091R_MODE_SAMPLE:
- conf = 0;
- break;
- case AD7091R_MODE_COMMAND:
- conf = AD7091R_REG_CONF_CMD;
- break;
- case AD7091R_MODE_AUTOCYCLE:
- conf = AD7091R_REG_CONF_AUTO;
- break;
- default:
- return -EINVAL;
- }
-
- ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
- AD7091R_REG_CONF_MODE_MASK, conf);
- if (ret)
- return ret;
-
- st->mode = mode;
-
- return 0;
-}
-
static int ad7091r_set_channel(struct ad7091r_state *st, unsigned int channel)
{
unsigned int dummy;
@@ -133,7 +70,7 @@ static int ad7091r_read_one(struct iio_dev *iio_dev,
if (ret)
return ret;
- if (AD7091R_REG_RESULT_CH_ID(val) != channel)
+ if (st->chip_info->reg_result_chan_id(val) != channel)
return -EIO;
*read_val = AD7091R_REG_RESULT_CONV_RESULT(val);
@@ -364,9 +301,8 @@ static void ad7091r_remove(void *data)
regulator_disable(st->vref);
}
-int ad7091r_probe(struct device *dev, const char *name,
- const struct ad7091r_chip_info *chip_info,
- struct regmap *map, int irq)
+int ad7091r_probe(struct device *dev, const struct ad7091r_init_info *init_info,
+ int irq)
{
struct iio_dev *iio_dev;
struct ad7091r_state *st;
@@ -378,29 +314,42 @@ int ad7091r_probe(struct device *dev, const char *name,
st = iio_priv(iio_dev);
st->dev = dev;
- st->chip_info = chip_info;
- st->map = map;
+ init_info->init_adc_regmap(st, init_info->regmap_config);
+ if (IS_ERR(st->map))
+ return dev_err_probe(st->dev, PTR_ERR(st->map),
+ "Error initializing regmap\n");
- iio_dev->name = name;
iio_dev->info = &ad7091r_info;
iio_dev->modes = INDIO_DIRECT_MODE;
- iio_dev->num_channels = chip_info->num_channels;
- iio_dev->channels = chip_info->channels;
+ if (init_info->setup) {
+ ret = init_info->setup(st);
+ if (ret < 0)
+ return ret;
+ }
if (irq) {
+ st->chip_info = init_info->info_irq;
ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
AD7091R_REG_CONF_ALERT_EN, BIT(4));
if (ret)
return ret;
ret = devm_request_threaded_irq(dev, irq, NULL,
- ad7091r_event_handler,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, iio_dev);
+ ad7091r_event_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ st->chip_info->name, iio_dev);
if (ret)
return ret;
+ } else {
+ st->chip_info = init_info->info_no_irq;
}
+ iio_dev->name = st->chip_info->name;
+ iio_dev->num_channels = st->chip_info->num_channels;
+ iio_dev->channels = st->chip_info->channels;
+
st->vref = devm_regulator_get_optional(dev, "vref");
if (IS_ERR(st->vref)) {
if (PTR_ERR(st->vref) == -EPROBE_DEFER)
@@ -423,7 +372,7 @@ int ad7091r_probe(struct device *dev, const char *name,
}
/* Use command mode by default to convert only desired channels*/
- ret = ad7091r_set_mode(st, AD7091R_MODE_COMMAND);
+ ret = st->chip_info->set_mode(st, AD7091R_MODE_COMMAND);
if (ret)
return ret;
@@ -431,7 +380,7 @@ int ad7091r_probe(struct device *dev, const char *name,
}
EXPORT_SYMBOL_NS_GPL(ad7091r_probe, IIO_AD7091R);
-static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
+bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case AD7091R_REG_RESULT:
@@ -441,8 +390,9 @@ static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg)
return true;
}
}
+EXPORT_SYMBOL_NS_GPL(ad7091r_writeable_reg, IIO_AD7091R);
-static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
+bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case AD7091R_REG_RESULT:
@@ -452,14 +402,7 @@ static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
}
-
-const struct regmap_config ad7091r_regmap_config = {
- .reg_bits = 8,
- .val_bits = 16,
- .writeable_reg = ad7091r_writeable_reg,
- .volatile_reg = ad7091r_volatile_reg,
-};
-EXPORT_SYMBOL_NS_GPL(ad7091r_regmap_config, IIO_AD7091R);
+EXPORT_SYMBOL_NS_GPL(ad7091r_volatile_reg, IIO_AD7091R);
MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7091Rx multi-channel converters");
diff --git a/drivers/iio/adc/ad7091r-base.h b/drivers/iio/adc/ad7091r-base.h
index b9e1c8bf3..696bf7a89 100644
--- a/drivers/iio/adc/ad7091r-base.h
+++ b/drivers/iio/adc/ad7091r-base.h
@@ -8,27 +8,92 @@
#ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__
#define __DRIVERS_IIO_ADC_AD7091R_BASE_H__
+#include <linux/regmap.h>
+
+#define AD7091R_REG_RESULT 0
+#define AD7091R_REG_CHANNEL 1
+#define AD7091R_REG_CONF 2
+#define AD7091R_REG_ALERT 3
+#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4)
+#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5)
+#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6)
+
+/* AD7091R_REG_RESULT */
+#define AD7091R5_REG_RESULT_CH_ID(x) (((x) >> 13) & 0x3)
+#define AD7091R8_REG_RESULT_CH_ID(x) (((x) >> 13) & 0x7)
+#define AD7091R_REG_RESULT_CONV_RESULT(x) ((x) & 0xfff)
+
+/* AD7091R_REG_CONF */
#define AD7091R_REG_CONF_INT_VREF BIT(0)
+#define AD7091R_REG_CONF_ALERT_EN BIT(4)
+#define AD7091R_REG_CONF_AUTO BIT(8)
+#define AD7091R_REG_CONF_CMD BIT(10)
+
+#define AD7091R_REG_CONF_MODE_MASK \
+ (AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD)
/* AD7091R_REG_CH_LIMIT */
#define AD7091R_HIGH_LIMIT 0xFFF
#define AD7091R_LOW_LIMIT 0x0
+#define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = 1, \
+ .channel = idx, \
+ .event_spec = ev, \
+ .num_event_specs = num_ev, \
+ .scan_type.storagebits = 16, \
+ .scan_type.realbits = bits, \
+}
+
struct device;
-struct ad7091r_state;
+struct gpio_desc;
+
+enum ad7091r_mode {
+ AD7091R_MODE_SAMPLE,
+ AD7091R_MODE_COMMAND,
+ AD7091R_MODE_AUTOCYCLE,
+};
+
+struct ad7091r_state {
+ struct device *dev;
+ struct regmap *map;
+ struct gpio_desc *convst_gpio;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vref;
+ const struct ad7091r_chip_info *chip_info;
+ enum ad7091r_mode mode;
+ struct mutex lock; /*lock to prevent concurent reads */
+ __be16 tx_buf __aligned(IIO_DMA_MINALIGN);
+ __be16 rx_buf;
+};
struct ad7091r_chip_info {
+ const char *name;
unsigned int num_channels;
const struct iio_chan_spec *channels;
unsigned int vref_mV;
+ unsigned int (*reg_result_chan_id)(unsigned int val);
+ int (*set_mode)(struct ad7091r_state *st, enum ad7091r_mode mode);
+};
+
+struct ad7091r_init_info {
+ const struct ad7091r_chip_info *info_irq;
+ const struct ad7091r_chip_info *info_no_irq;
+ const struct regmap_config *regmap_config;
+ void (*init_adc_regmap)(struct ad7091r_state *st,
+ const struct regmap_config *regmap_conf);
+ int (*setup)(struct ad7091r_state *st);
};
extern const struct iio_event_spec ad7091r_events[3];
-extern const struct regmap_config ad7091r_regmap_config;
+int ad7091r_probe(struct device *dev, const struct ad7091r_init_info *init_info,
+ int irq);
-int ad7091r_probe(struct device *dev, const char *name,
- const struct ad7091r_chip_info *chip_info,
- struct regmap *map, int irq);
+bool ad7091r_volatile_reg(struct device *dev, unsigned int reg);
+bool ad7091r_writeable_reg(struct device *dev, unsigned int reg);
#endif /* __DRIVERS_IIO_ADC_AD7091R_BASE_H__ */
diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c
index dae98c95e..a75837334 100644
--- a/drivers/iio/adc/ad7091r5.c
+++ b/drivers/iio/adc/ad7091r5.c
@@ -12,17 +12,6 @@
#include "ad7091r-base.h"
-#define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \
- .type = IIO_VOLTAGE, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .indexed = 1, \
- .channel = idx, \
- .event_spec = ev, \
- .num_event_specs = num_ev, \
- .scan_type.storagebits = 16, \
- .scan_type.realbits = bits, \
-}
static const struct iio_chan_spec ad7091r5_channels_irq[] = {
AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
@@ -37,43 +26,98 @@ static const struct iio_chan_spec ad7091r5_channels_noirq[] = {
AD7091R_CHANNEL(3, 12, NULL, 0),
};
+static int ad7091r5_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+{
+ int ret, conf;
+
+ switch (mode) {
+ case AD7091R_MODE_SAMPLE:
+ conf = 0;
+ break;
+ case AD7091R_MODE_COMMAND:
+ conf = AD7091R_REG_CONF_CMD;
+ break;
+ case AD7091R_MODE_AUTOCYCLE:
+ conf = AD7091R_REG_CONF_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->map, AD7091R_REG_CONF,
+ AD7091R_REG_CONF_MODE_MASK, conf);
+ if (ret)
+ return ret;
+
+ st->mode = mode;
+
+ return 0;
+}
+
+static unsigned int ad7091r5_reg_result_chan_id(unsigned int val)
+{
+ return AD7091R5_REG_RESULT_CH_ID(val);
+}
+
static const struct ad7091r_chip_info ad7091r5_chip_info_irq = {
+ .name = "ad7091r-5",
.channels = ad7091r5_channels_irq,
.num_channels = ARRAY_SIZE(ad7091r5_channels_irq),
.vref_mV = 2500,
+ .reg_result_chan_id = &ad7091r5_reg_result_chan_id,
+ .set_mode = &ad7091r5_set_mode,
};
static const struct ad7091r_chip_info ad7091r5_chip_info_noirq = {
+ .name = "ad7091r-5",
.channels = ad7091r5_channels_noirq,
.num_channels = ARRAY_SIZE(ad7091r5_channels_noirq),
.vref_mV = 2500,
+ .reg_result_chan_id = &ad7091r5_reg_result_chan_id,
+ .set_mode = &ad7091r5_set_mode,
};
-static int ad7091r5_i2c_probe(struct i2c_client *i2c)
+static const struct regmap_config ad7091r_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .writeable_reg = ad7091r_writeable_reg,
+ .volatile_reg = ad7091r_volatile_reg,
+};
+
+static void ad7091r5_regmap_init(struct ad7091r_state *st,
+ const struct regmap_config *regmap_conf)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
- const struct ad7091r_chip_info *chip_info;
- struct regmap *map = devm_regmap_init_i2c(i2c, &ad7091r_regmap_config);
+ struct i2c_client *i2c = container_of(st->dev, struct i2c_client, dev);
+
+ st->map = devm_regmap_init_i2c(i2c, regmap_conf);
+}
+
+static struct ad7091r_init_info ad7091r5_init_info = {
+ .info_irq = &ad7091r5_chip_info_irq,
+ .info_no_irq = &ad7091r5_chip_info_noirq,
+ .regmap_config = &ad7091r_regmap_config,
+ .init_adc_regmap = &ad7091r5_regmap_init
+};
- if (IS_ERR(map))
- return PTR_ERR(map);
+static int ad7091r5_i2c_probe(struct i2c_client *i2c)
+{
+ const struct ad7091r_init_info *init_info;
- if (i2c->irq)
- chip_info = &ad7091r5_chip_info_irq;
- else
- chip_info = &ad7091r5_chip_info_noirq;
+ init_info = i2c_get_match_data(i2c);
+ if (!init_info)
+ return -EINVAL;
- return ad7091r_probe(&i2c->dev, id->name, chip_info, map, i2c->irq);
+ return ad7091r_probe(&i2c->dev, init_info, i2c->irq);
}
static const struct of_device_id ad7091r5_dt_ids[] = {
- { .compatible = "adi,ad7091r5" },
+ { .compatible = "adi,ad7091r5", .data = &ad7091r5_init_info },
{},
};
MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids);
static const struct i2c_device_id ad7091r5_i2c_ids[] = {
- {"ad7091r5", 0},
+ {"ad7091r5", (kernel_ulong_t)&ad7091r5_init_info },
{}
};
MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids);
diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c
new file mode 100644
index 000000000..700564305
--- /dev/null
+++ b/drivers/iio/adc/ad7091r8.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices AD7091R8 12-bit SAR ADC driver
+ *
+ * Copyright 2023 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+
+#include "ad7091r-base.h"
+
+#define AD7091R8_REG_ADDR_MSK GENMASK(15, 11)
+#define AD7091R8_RD_WR_FLAG_MSK BIT(10)
+#define AD7091R8_REG_DATA_MSK GENMASK(9, 0)
+
+#define AD7091R_SPI_REGMAP_CONFIG(n) { \
+ .reg_bits = 8, \
+ .val_bits = 16, \
+ .volatile_reg = ad7091r_volatile_reg, \
+ .writeable_reg = ad7091r_writeable_reg, \
+ .max_register = AD7091R_REG_CH_HYSTERESIS(n), \
+}
+
+static int ad7091r8_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode)
+{
+ /* AD7091R-2/-4/-8 don't set sample/command/autocycle mode in conf reg */
+ st->mode = mode;
+ return 0;
+}
+
+static unsigned int ad7091r8_reg_result_chan_id(unsigned int val)
+{
+ return AD7091R8_REG_RESULT_CH_ID(val);
+}
+
+#define AD7091R_SPI_CHIP_INFO(_n, _name) { \
+ .name = _name, \
+ .channels = ad7091r##_n##_channels, \
+ .num_channels = ARRAY_SIZE(ad7091r##_n##_channels), \
+ .vref_mV = 2500, \
+ .reg_result_chan_id = &ad7091r8_reg_result_chan_id, \
+ .set_mode = &ad7091r8_set_mode, \
+}
+
+#define AD7091R_SPI_CHIP_INFO_IRQ(_n, _name) { \
+ .name = _name, \
+ .channels = ad7091r##_n##_channels_irq, \
+ .num_channels = ARRAY_SIZE(ad7091r##_n##_channels_irq), \
+ .vref_mV = 2500, \
+ .reg_result_chan_id = &ad7091r8_reg_result_chan_id, \
+ .set_mode = &ad7091r8_set_mode, \
+}
+
+enum ad7091r8_info_ids {
+ AD7091R2_INFO,
+ AD7091R4_INFO,
+ AD7091R4_INFO_IRQ,
+ AD7091R8_INFO,
+ AD7091R8_INFO_IRQ,
+};
+
+static const struct iio_chan_spec ad7091r2_channels[] = {
+ AD7091R_CHANNEL(0, 12, NULL, 0),
+ AD7091R_CHANNEL(1, 12, NULL, 0),
+};
+
+static const struct iio_chan_spec ad7091r4_channels[] = {
+ AD7091R_CHANNEL(0, 12, NULL, 0),
+ AD7091R_CHANNEL(1, 12, NULL, 0),
+ AD7091R_CHANNEL(2, 12, NULL, 0),
+ AD7091R_CHANNEL(3, 12, NULL, 0),
+};
+
+static const struct iio_chan_spec ad7091r4_channels_irq[] = {
+ AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+};
+
+static const struct iio_chan_spec ad7091r8_channels[] = {
+ AD7091R_CHANNEL(0, 12, NULL, 0),
+ AD7091R_CHANNEL(1, 12, NULL, 0),
+ AD7091R_CHANNEL(2, 12, NULL, 0),
+ AD7091R_CHANNEL(3, 12, NULL, 0),
+ AD7091R_CHANNEL(4, 12, NULL, 0),
+ AD7091R_CHANNEL(5, 12, NULL, 0),
+ AD7091R_CHANNEL(6, 12, NULL, 0),
+ AD7091R_CHANNEL(7, 12, NULL, 0),
+};
+
+static const struct iio_chan_spec ad7091r8_channels_irq[] = {
+ AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(4, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(5, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(6, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+ AD7091R_CHANNEL(7, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)),
+};
+
+static void ad7091r_pulse_convst(struct ad7091r_state *st)
+{
+ gpiod_set_value_cansleep(st->convst_gpio, 1);
+ gpiod_set_value_cansleep(st->convst_gpio, 0);
+}
+
+static int ad7091r_regmap_bus_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct ad7091r_state *st = context;
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+ int ret;
+
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->tx_buf,
+ .len = 2,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->rx_buf,
+ .len = 2,
+ }
+ };
+
+ if (reg == AD7091R_REG_RESULT)
+ ad7091r_pulse_convst(st);
+
+ st->tx_buf = cpu_to_be16(reg << 11);
+
+ ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t));
+ if (ret < 0)
+ return ret;
+
+ *val = be16_to_cpu(st->rx_buf);
+ return 0;
+}
+
+static int ad7091r_regmap_bus_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct ad7091r_state *st = context;
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+
+ /*
+ * AD7091R-2/-4/-8 protocol (datasheet page 31) is to do a single SPI
+ * transfer with reg address set in bits B15:B11 and value set in B9:B0.
+ */
+ st->tx_buf = cpu_to_be16(FIELD_PREP(AD7091R8_REG_DATA_MSK, val) |
+ FIELD_PREP(AD7091R8_RD_WR_FLAG_MSK, 1) |
+ FIELD_PREP(AD7091R8_REG_ADDR_MSK, reg));
+
+ return spi_write(spi, &st->tx_buf, 2);
+}
+
+static struct regmap_bus ad7091r8_regmap_bus = {
+ .reg_read = ad7091r_regmap_bus_reg_read,
+ .reg_write = ad7091r_regmap_bus_reg_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static const struct ad7091r_chip_info ad7091r8_infos[] = {
+ [AD7091R2_INFO] = AD7091R_SPI_CHIP_INFO(2, "ad7091r-2"),
+ [AD7091R4_INFO] = AD7091R_SPI_CHIP_INFO(4, "ad7091r-4"),
+ [AD7091R4_INFO_IRQ] = AD7091R_SPI_CHIP_INFO_IRQ(4, "ad7091r-4"),
+ [AD7091R8_INFO] = AD7091R_SPI_CHIP_INFO(8, "ad7091r-8"),
+ [AD7091R8_INFO_IRQ] = AD7091R_SPI_CHIP_INFO_IRQ(8, "ad7091r-8")
+};
+
+static const struct regmap_config ad7091r2_reg_conf = AD7091R_SPI_REGMAP_CONFIG(2);
+static const struct regmap_config ad7091r4_reg_conf = AD7091R_SPI_REGMAP_CONFIG(4);
+static const struct regmap_config ad7091r8_reg_conf = AD7091R_SPI_REGMAP_CONFIG(8);
+
+static void ad7091r8_regmap_init(struct ad7091r_state *st,
+ const struct regmap_config *regmap_conf)
+{
+ st->map = devm_regmap_init(st->dev, &ad7091r8_regmap_bus, st,
+ regmap_conf);
+}
+
+static int ad7091r8_gpio_setup(struct ad7091r_state *st)
+{
+ st->convst_gpio = devm_gpiod_get(st->dev, "convst", GPIOD_OUT_LOW);
+ if (IS_ERR(st->convst_gpio))
+ return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio),
+ "Error getting convst GPIO\n");
+
+ st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->reset_gpio))
+ return dev_err_probe(st->dev, PTR_ERR(st->reset_gpio),
+ "Error on requesting reset GPIO\n");
+
+ if (st->reset_gpio) {
+ fsleep(20);
+ gpiod_set_value_cansleep(st->reset_gpio, 0);
+ }
+
+ return 0;
+}
+
+static struct ad7091r_init_info ad7091r2_init_info = {
+ .info_no_irq = &ad7091r8_infos[AD7091R2_INFO],
+ .regmap_config = &ad7091r2_reg_conf,
+ .init_adc_regmap = &ad7091r8_regmap_init,
+ .setup = &ad7091r8_gpio_setup
+};
+
+static struct ad7091r_init_info ad7091r4_init_info = {
+ .info_no_irq = &ad7091r8_infos[AD7091R4_INFO],
+ .info_irq = &ad7091r8_infos[AD7091R4_INFO_IRQ],
+ .regmap_config = &ad7091r4_reg_conf,
+ .init_adc_regmap = &ad7091r8_regmap_init,
+ .setup = &ad7091r8_gpio_setup
+};
+
+static struct ad7091r_init_info ad7091r8_init_info = {
+ .info_no_irq = &ad7091r8_infos[AD7091R8_INFO],
+ .info_irq = &ad7091r8_infos[AD7091R8_INFO_IRQ],
+ .regmap_config = &ad7091r8_reg_conf,
+ .init_adc_regmap = &ad7091r8_regmap_init,
+ .setup = &ad7091r8_gpio_setup
+};
+
+static int ad7091r8_spi_probe(struct spi_device *spi)
+{
+ const struct ad7091r_init_info *init_info;
+
+ init_info = spi_get_device_match_data(spi);
+ if (!init_info)
+ return -EINVAL;
+
+ return ad7091r_probe(&spi->dev, init_info, spi->irq);
+}
+
+static const struct of_device_id ad7091r8_of_match[] = {
+ { .compatible = "adi,ad7091r2", .data = &ad7091r2_init_info },
+ { .compatible = "adi,ad7091r4", .data = &ad7091r4_init_info },
+ { .compatible = "adi,ad7091r8", .data = &ad7091r8_init_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7091r8_of_match);
+
+static const struct spi_device_id ad7091r8_spi_id[] = {
+ { "ad7091r2", (kernel_ulong_t)&ad7091r2_init_info },
+ { "ad7091r4", (kernel_ulong_t)&ad7091r4_init_info },
+ { "ad7091r8", (kernel_ulong_t)&ad7091r8_init_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad7091r8_spi_id);
+
+static struct spi_driver ad7091r8_driver = {
+ .driver = {
+ .name = "ad7091r8",
+ .of_match_table = ad7091r8_of_match,
+ },
+ .probe = ad7091r8_spi_probe,
+ .id_table = ad7091r8_spi_id,
+};
+module_spi_driver(ad7091r8_driver);
+
+MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7091R8 ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_AD7091R);
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index f66831373..6581fce4b 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -101,12 +101,6 @@
#define AD9467_DEF_OUTPUT_MODE 0x08
#define AD9467_REG_VREF_MASK 0x0F
-enum {
- ID_AD9265,
- ID_AD9434,
- ID_AD9467,
-};
-
struct ad9467_chip_info {
struct adi_axi_adc_chip_info axi_adc_info;
unsigned int default_output_mode;
@@ -164,7 +158,7 @@ static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
struct spi_device *spi = st->spi;
int ret;
- if (readval == NULL) {
+ if (!readval) {
guard(mutex)(&st->lock);
ret = ad9467_spi_write(spi, reg, writeval);
if (ret)
@@ -234,43 +228,46 @@ static const struct iio_chan_spec ad9467_channels[] = {
AD9467_CHAN(0, 0, 16, 'S'),
};
-static const struct ad9467_chip_info ad9467_chip_tbl[] = {
- [ID_AD9265] = {
- .axi_adc_info = {
- .id = CHIPID_AD9265,
- .max_rate = 125000000UL,
- .scale_table = ad9265_scale_table,
- .num_scales = ARRAY_SIZE(ad9265_scale_table),
- .channels = ad9467_channels,
- .num_channels = ARRAY_SIZE(ad9467_channels),
- },
- .default_output_mode = AD9265_DEF_OUTPUT_MODE,
- .vref_mask = AD9265_REG_VREF_MASK,
+static const struct ad9467_chip_info ad9467_chip_tbl = {
+ .axi_adc_info = {
+ .name = "ad9467",
+ .id = CHIPID_AD9467,
+ .max_rate = 250000000UL,
+ .scale_table = ad9467_scale_table,
+ .num_scales = ARRAY_SIZE(ad9467_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
},
- [ID_AD9434] = {
- .axi_adc_info = {
- .id = CHIPID_AD9434,
- .max_rate = 500000000UL,
- .scale_table = ad9434_scale_table,
- .num_scales = ARRAY_SIZE(ad9434_scale_table),
- .channels = ad9434_channels,
- .num_channels = ARRAY_SIZE(ad9434_channels),
- },
- .default_output_mode = AD9434_DEF_OUTPUT_MODE,
- .vref_mask = AD9434_REG_VREF_MASK,
+ .default_output_mode = AD9467_DEF_OUTPUT_MODE,
+ .vref_mask = AD9467_REG_VREF_MASK,
+};
+
+static const struct ad9467_chip_info ad9434_chip_tbl = {
+ .axi_adc_info = {
+ .name = "ad9434",
+ .id = CHIPID_AD9434,
+ .max_rate = 500000000UL,
+ .scale_table = ad9434_scale_table,
+ .num_scales = ARRAY_SIZE(ad9434_scale_table),
+ .channels = ad9434_channels,
+ .num_channels = ARRAY_SIZE(ad9434_channels),
},
- [ID_AD9467] = {
- .axi_adc_info = {
- .id = CHIPID_AD9467,
- .max_rate = 250000000UL,
- .scale_table = ad9467_scale_table,
- .num_scales = ARRAY_SIZE(ad9467_scale_table),
- .channels = ad9467_channels,
- .num_channels = ARRAY_SIZE(ad9467_channels),
- },
- .default_output_mode = AD9467_DEF_OUTPUT_MODE,
- .vref_mask = AD9467_REG_VREF_MASK,
+ .default_output_mode = AD9434_DEF_OUTPUT_MODE,
+ .vref_mask = AD9434_REG_VREF_MASK,
+};
+
+static const struct ad9467_chip_info ad9265_chip_tbl = {
+ .axi_adc_info = {
+ .name = "ad9265",
+ .id = CHIPID_AD9265,
+ .max_rate = 125000000UL,
+ .scale_table = ad9265_scale_table,
+ .num_scales = ARRAY_SIZE(ad9265_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
},
+ .default_output_mode = AD9265_DEF_OUTPUT_MODE,
+ .vref_mask = AD9265_REG_VREF_MASK,
};
static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
@@ -454,9 +451,7 @@ static int ad9467_probe(struct spi_device *spi)
unsigned int id;
int ret;
- info = of_device_get_match_data(&spi->dev);
- if (!info)
- info = (void *)spi_get_device_id(spi)->driver_data;
+ info = spi_get_device_match_data(spi);
if (!info)
return -ENODEV;
@@ -506,17 +501,17 @@ static int ad9467_probe(struct spi_device *spi)
}
static const struct of_device_id ad9467_of_match[] = {
- { .compatible = "adi,ad9265", .data = &ad9467_chip_tbl[ID_AD9265], },
- { .compatible = "adi,ad9434", .data = &ad9467_chip_tbl[ID_AD9434], },
- { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl[ID_AD9467], },
+ { .compatible = "adi,ad9265", .data = &ad9265_chip_tbl, },
+ { .compatible = "adi,ad9434", .data = &ad9434_chip_tbl, },
+ { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl, },
{}
};
MODULE_DEVICE_TABLE(of, ad9467_of_match);
static const struct spi_device_id ad9467_ids[] = {
- { "ad9265", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9265] },
- { "ad9434", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9434] },
- { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9467] },
+ { "ad9265", (kernel_ulong_t)&ad9265_chip_tbl },
+ { "ad9434", (kernel_ulong_t)&ad9434_chip_tbl },
+ { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl },
{}
};
MODULE_DEVICE_TABLE(spi, ad9467_ids);
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index ae83ada7f..c247ff154 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -14,6 +14,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
@@ -62,7 +63,7 @@ struct adi_axi_adc_state {
struct mutex lock;
struct adi_axi_adc_client *client;
- void __iomem *regs;
+ struct regmap *regmap;
};
struct adi_axi_adc_client {
@@ -90,19 +91,6 @@ void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
}
EXPORT_SYMBOL_NS_GPL(adi_axi_adc_conv_priv, IIO_ADI_AXI);
-static void adi_axi_adc_write(struct adi_axi_adc_state *st,
- unsigned int reg,
- unsigned int val)
-{
- iowrite32(val, st->regs + reg);
-}
-
-static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st,
- unsigned int reg)
-{
- return ioread32(st->regs + reg);
-}
-
static int adi_axi_adc_config_dma_buffer(struct device *dev,
struct iio_dev *indio_dev)
{
@@ -163,17 +151,20 @@ static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
{
struct adi_axi_adc_state *st = iio_priv(indio_dev);
struct adi_axi_adc_conv *conv = &st->client->conv;
- unsigned int i, ctrl;
+ unsigned int i;
+ int ret;
for (i = 0; i < conv->chip_info->num_channels; i++) {
- ctrl = adi_axi_adc_read(st, ADI_AXI_REG_CHAN_CTRL(i));
-
if (test_bit(i, scan_mask))
- ctrl |= ADI_AXI_REG_CHAN_CTRL_ENABLE;
+ ret = regmap_set_bits(st->regmap,
+ ADI_AXI_REG_CHAN_CTRL(i),
+ ADI_AXI_REG_CHAN_CTRL_ENABLE);
else
- ctrl &= ~ADI_AXI_REG_CHAN_CTRL_ENABLE;
-
- adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), ctrl);
+ ret = regmap_clear_bits(st->regmap,
+ ADI_AXI_REG_CHAN_CTRL(i),
+ ADI_AXI_REG_CHAN_CTRL_ENABLE);
+ if (ret)
+ return ret;
}
return 0;
@@ -310,21 +301,32 @@ static int adi_axi_adc_setup_channels(struct device *dev,
}
for (i = 0; i < conv->chip_info->num_channels; i++) {
- adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i),
- ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
+ ret = regmap_write(st->regmap, ADI_AXI_REG_CHAN_CTRL(i),
+ ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
+ if (ret)
+ return ret;
}
return 0;
}
-static void axi_adc_reset(struct adi_axi_adc_state *st)
+static int axi_adc_reset(struct adi_axi_adc_state *st)
{
- adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 0);
+ int ret;
+
+ ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
+ if (ret)
+ return ret;
+
mdelay(10);
- adi_axi_adc_write(st, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN);
+ ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_MMCM_RSTN);
+ if (ret)
+ return ret;
+
mdelay(10);
- adi_axi_adc_write(st, ADI_AXI_REG_RSTN,
- ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+ return regmap_write(st->regmap, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
}
static void adi_axi_adc_cleanup(void *data)
@@ -335,12 +337,20 @@ static void adi_axi_adc_cleanup(void *data)
module_put(cl->dev->driver->owner);
}
+static const struct regmap_config axi_adc_regmap_config = {
+ .val_bits = 32,
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x0800,
+};
+
static int adi_axi_adc_probe(struct platform_device *pdev)
{
struct adi_axi_adc_conv *conv;
struct iio_dev *indio_dev;
struct adi_axi_adc_client *cl;
struct adi_axi_adc_state *st;
+ void __iomem *base;
unsigned int ver;
int ret;
@@ -361,15 +371,24 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
cl->state = st;
mutex_init(&st->lock);
- st->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(st->regs))
- return PTR_ERR(st->regs);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &axi_adc_regmap_config);
+ if (IS_ERR(st->regmap))
+ return PTR_ERR(st->regmap);
conv = &st->client->conv;
- axi_adc_reset(st);
+ ret = axi_adc_reset(st);
+ if (ret)
+ return ret;
- ver = adi_axi_adc_read(st, ADI_AXI_REG_VERSION);
+ ret = regmap_read(st->regmap, ADI_AXI_REG_VERSION, &ver);
+ if (ret)
+ return ret;
if (cl->info->version > ver) {
dev_err(&pdev->dev,
diff --git a/drivers/iio/adc/max34408.c b/drivers/iio/adc/max34408.c
new file mode 100644
index 000000000..6c2ea2bc5
--- /dev/null
+++ b/drivers/iio/adc/max34408.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IIO driver for Maxim MAX34409/34408 ADC, 4-Channels/2-Channels, 8bits, I2C
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/MAX34408-MAX34409.pdf
+ *
+ * TODO: ALERT interrupt, Overcurrent delay, Shutdown delay
+ */
+
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+#define MAX34408_STATUS_REG 0x0
+#define MAX34408_CONTROL_REG 0x1
+#define MAX34408_OCDELAY_REG 0x2
+#define MAX34408_SDDELAY_REG 0x3
+
+#define MAX34408_ADC1_REG 0x4
+#define MAX34408_ADC2_REG 0x5
+/* ADC3 & ADC4 always returns 0x0 on 34408 */
+#define MAX34409_ADC3_REG 0x6
+#define MAX34409_ADC4_REG 0x7
+
+#define MAX34408_OCT1_REG 0x8
+#define MAX34408_OCT2_REG 0x9
+#define MAX34409_OCT3_REG 0xA
+#define MAX34409_OCT4_REG 0xB
+
+#define MAX34408_DID_REG 0xC
+#define MAX34408_DCYY_REG 0xD
+#define MAX34408_DCWW_REG 0xE
+
+/* Bit masks for status register */
+#define MAX34408_STATUS_OC_MSK GENMASK(1, 0)
+#define MAX34409_STATUS_OC_MSK GENMASK(3, 0)
+#define MAX34408_STATUS_SHTDN BIT(4)
+#define MAX34408_STATUS_ENA BIT(5)
+
+/* Bit masks for control register */
+#define MAX34408_CONTROL_AVG0 BIT(0)
+#define MAX34408_CONTROL_AVG1 BIT(1)
+#define MAX34408_CONTROL_AVG2 BIT(2)
+#define MAX34408_CONTROL_ALERT BIT(3)
+
+#define MAX34408_DEFAULT_AVG 0x4
+
+/* Bit masks for over current delay */
+#define MAX34408_OCDELAY_OCD_MSK GENMASK(6, 0)
+#define MAX34408_OCDELAY_RESET BIT(7)
+
+/* Bit masks for shutdown delay */
+#define MAX34408_SDDELAY_SHD_MSK GENMASK(6, 0)
+#define MAX34408_SDDELAY_RESET BIT(7)
+
+#define MAX34408_DEFAULT_RSENSE 1000
+
+/**
+ * struct max34408_data - max34408/max34409 specific data.
+ * @regmap: device register map.
+ * @dev: max34408 device.
+ * @lock: lock for protecting access to device hardware registers, mostly
+ * for read modify write cycles for control registers.
+ * @input_rsense: Rsense values in uOhm, will be overwritten by
+ * values from channel nodes.
+ */
+struct max34408_data {
+ struct regmap *regmap;
+ struct device *dev;
+ struct mutex lock;
+ u32 input_rsense[4];
+};
+
+static const struct regmap_config max34408_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX34408_DCWW_REG,
+};
+
+struct max34408_adc_model_data {
+ const char *model_name;
+ const struct iio_chan_spec *channels;
+ const int num_channels;
+};
+
+#define MAX34008_CHANNEL(_index, _address) \
+ { \
+ .type = IIO_CURRENT, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .channel = (_index), \
+ .address = (_address), \
+ .indexed = 1, \
+ }
+
+static const struct iio_chan_spec max34408_channels[] = {
+ MAX34008_CHANNEL(0, MAX34408_ADC1_REG),
+ MAX34008_CHANNEL(1, MAX34408_ADC2_REG),
+};
+
+static const struct iio_chan_spec max34409_channels[] = {
+ MAX34008_CHANNEL(0, MAX34408_ADC1_REG),
+ MAX34008_CHANNEL(1, MAX34408_ADC2_REG),
+ MAX34008_CHANNEL(2, MAX34409_ADC3_REG),
+ MAX34008_CHANNEL(3, MAX34409_ADC4_REG),
+};
+
+static int max34408_read_adc_avg(struct max34408_data *max34408,
+ const struct iio_chan_spec *chan, int *val)
+{
+ unsigned int ctrl;
+ int rc;
+
+ guard(mutex)(&max34408->lock);
+ rc = regmap_read(max34408->regmap, MAX34408_CONTROL_REG, (u32 *)&ctrl);
+ if (rc)
+ return rc;
+
+ /* set averaging (0b100) default values*/
+ rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG,
+ MAX34408_DEFAULT_AVG);
+ if (rc) {
+ dev_err(max34408->dev,
+ "Error (%d) writing control register\n", rc);
+ return rc;
+ }
+
+ rc = regmap_read(max34408->regmap, chan->address, val);
+ if (rc)
+ return rc;
+
+ /* back to old values */
+ rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG, ctrl);
+ if (rc)
+ dev_err(max34408->dev,
+ "Error (%d) writing control register\n", rc);
+
+ return rc;
+}
+
+static int max34408_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max34408_data *max34408 = iio_priv(indio_dev);
+ int rc;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ rc = max34408_read_adc_avg(max34408, chan, val);
+ if (rc)
+ return rc;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * calcluate current for 8bit ADC with Rsense
+ * value.
+ * 10 mV * 1000 / Rsense uOhm = max current
+ * (max current * adc val * 1000) / (2^8 - 1) mA
+ */
+ *val = 10000 / max34408->input_rsense[chan->channel];
+ *val2 = 8;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info max34408_info = {
+ .read_raw = max34408_read_raw,
+};
+
+static const struct max34408_adc_model_data max34408_model_data = {
+ .model_name = "max34408",
+ .channels = max34408_channels,
+ .num_channels = 2,
+};
+
+static const struct max34408_adc_model_data max34409_model_data = {
+ .model_name = "max34409",
+ .channels = max34409_channels,
+ .num_channels = 4,
+};
+
+static int max34408_probe(struct i2c_client *client)
+{
+ const struct max34408_adc_model_data *model_data;
+ struct device *dev = &client->dev;
+ struct max34408_data *max34408;
+ struct fwnode_handle *node;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int rc, i = 0;
+
+ model_data = i2c_get_match_data(client);
+ if (!model_data)
+ return -EINVAL;
+
+ regmap = devm_regmap_init_i2c(client, &max34408_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err_probe(dev, PTR_ERR(regmap),
+ "regmap_init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*max34408));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ max34408 = iio_priv(indio_dev);
+ max34408->regmap = regmap;
+ max34408->dev = dev;
+ mutex_init(&max34408->lock);
+
+ device_for_each_child_node(dev, node) {
+ fwnode_property_read_u32(node, "maxim,rsense-val-micro-ohms",
+ &max34408->input_rsense[i]);
+ i++;
+ }
+
+ /* disable ALERT and averaging */
+ rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG, 0x0);
+ if (rc)
+ return rc;
+
+ indio_dev->channels = model_data->channels;
+ indio_dev->num_channels = model_data->num_channels;
+ indio_dev->name = model_data->model_name;
+
+ indio_dev->info = &max34408_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id max34408_of_match[] = {
+ {
+ .compatible = "maxim,max34408",
+ .data = &max34408_model_data,
+ },
+ {
+ .compatible = "maxim,max34409",
+ .data = &max34409_model_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max34408_of_match);
+
+static const struct i2c_device_id max34408_id[] = {
+ { "max34408", (kernel_ulong_t)&max34408_model_data },
+ { "max34409", (kernel_ulong_t)&max34409_model_data },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, max34408_id);
+
+static struct i2c_driver max34408_driver = {
+ .driver = {
+ .name = "max34408",
+ .of_match_table = max34408_of_match,
+ },
+ .probe = max34408_probe,
+ .id_table = max34408_id,
+};
+module_i2c_driver(max34408_driver);
+
+MODULE_AUTHOR("Ivan Mikhaylov <fr0st61te@gmail.com>");
+MODULE_DESCRIPTION("Maxim MAX34408/34409 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index d864558bc..7a32e7a1b 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -316,47 +317,37 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct mcp3911 *adc = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret;
- mutex_lock(&adc->lock);
+ guard(mutex)(&adc->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = mcp3911_read(adc,
MCP3911_CHANNEL(channel->channel), val, 3);
if (ret)
- goto out;
+ return ret;
*val = sign_extend32(*val, 23);
-
- ret = IIO_VAL_INT;
- break;
-
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
-
ret = adc->chip->get_offset(adc, channel->channel, val);
if (ret)
- goto out;
+ return ret;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
ret = adc->chip->get_osr(adc, val);
if (ret)
- goto out;
-
- ret = IIO_VAL_INT;
- break;
+ return ret;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = mcp3911_scale_table[ilog2(adc->gain[channel->channel])][0];
*val2 = mcp3911_scale_table[ilog2(adc->gain[channel->channel])][1];
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
}
-
-out:
- mutex_unlock(&adc->lock);
- return ret;
}
static int mcp3911_write_raw(struct iio_dev *indio_dev,
@@ -364,9 +355,8 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct mcp3911 *adc = iio_priv(indio_dev);
- int ret = -EINVAL;
- mutex_lock(&adc->lock);
+ guard(mutex)(&adc->lock);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
for (int i = 0; i < MCP3911_NUM_SCALES; i++) {
@@ -374,32 +364,25 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
val2 == mcp3911_scale_table[i][1]) {
adc->gain[channel->channel] = BIT(i);
- ret = adc->chip->set_scale(adc, channel->channel, i);
+ return adc->chip->set_scale(adc, channel->channel, i);
}
}
- break;
+ return -EINVAL;
case IIO_CHAN_INFO_OFFSET:
- if (val2 != 0) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = adc->chip->set_offset(adc, channel->channel, val);
- break;
+ if (val2 != 0)
+ return -EINVAL;
+ return adc->chip->set_offset(adc, channel->channel, val);
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
for (int i = 0; i < ARRAY_SIZE(mcp3911_osr_table); i++) {
if (val == mcp3911_osr_table[i]) {
- ret = adc->chip->set_osr(adc, i);
- break;
+ return adc->chip->set_osr(adc, i);
}
}
- break;
+ return -EINVAL;
+ default:
+ return -EINVAL;
}
-
-out:
- mutex_unlock(&adc->lock);
- return ret;
}
static int mcp3911_calc_scale_table(struct mcp3911 *adc)
@@ -532,7 +515,7 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
int i = 0;
int ret;
- mutex_lock(&adc->lock);
+ guard(mutex)(&adc->lock);
adc->tx_buf = MCP3911_REG_READ(MCP3911_CHANNEL(0), adc->dev_addr);
ret = spi_sync_transfer(adc->spi, xfer, ARRAY_SIZE(xfer));
if (ret < 0) {
@@ -549,7 +532,6 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
iio_get_time_ns(indio_dev));
out:
- mutex_unlock(&adc->lock);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index e87d35d50..ed4d72922 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -5,6 +5,7 @@
* Copyright 2020 Analog Devices Inc.
*/
+#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
@@ -22,6 +23,7 @@
enum hmc425a_type {
ID_HMC425A,
ID_HMC540S,
+ ID_ADRF5740
};
struct hmc425a_chip_info {
@@ -74,6 +76,10 @@ static int hmc425a_read_raw(struct iio_dev *indio_dev,
case ID_HMC540S:
gain = ~code * -1000;
break;
+ case ID_ADRF5740:
+ code = code & BIT(3) ? code & ~BIT(2) : code;
+ gain = code * -2000;
+ break;
}
*val = gain / 1000;
@@ -113,6 +119,10 @@ static int hmc425a_write_raw(struct iio_dev *indio_dev,
case ID_HMC540S:
code = ~((abs(gain) / 1000) & 0xF);
break;
+ case ID_ADRF5740:
+ code = (abs(gain) / 2000) & 0xF;
+ code = code & BIT(3) ? code | BIT(2) : code;
+ break;
}
mutex_lock(&st->lock);
@@ -165,6 +175,7 @@ static const struct iio_chan_spec hmc425a_channels[] = {
static const struct of_device_id hmc425a_of_match[] = {
{ .compatible = "adi,hmc425a", .data = (void *)ID_HMC425A },
{ .compatible = "adi,hmc540s", .data = (void *)ID_HMC540S },
+ { .compatible = "adi,adrf5740", .data = (void *)ID_ADRF5740 },
{},
};
MODULE_DEVICE_TABLE(of, hmc425a_of_match);
@@ -188,6 +199,15 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
.gain_max = 0,
.default_gain = -0x10, /* set default gain -15.0db*/
},
+ [ID_ADRF5740] = {
+ .name = "adrf5740",
+ .channels = hmc425a_channels,
+ .num_channels = ARRAY_SIZE(hmc425a_channels),
+ .num_gpios = 4,
+ .gain_min = -22000,
+ .gain_max = 0,
+ .default_gain = 0xF, /* set default gain -22.0db*/
+ },
};
static int hmc425a_probe(struct platform_device *pdev)
@@ -229,6 +249,9 @@ static int hmc425a_probe(struct platform_device *pdev)
indio_dev->info = &hmc425a_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ /* Set default gain */
+ hmc425a_write(indio_dev, st->gain);
+
return devm_iio_device_register(&pdev->dev, indio_dev);
}
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index d348af8b9..5610ba679 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -179,7 +179,7 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
}
block->size = size;
- block->state = IIO_BLOCK_STATE_DEQUEUED;
+ block->state = IIO_BLOCK_STATE_DONE;
block->queue = queue;
INIT_LIST_HEAD(&block->head);
kref_init(&block->kref);
@@ -191,16 +191,8 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
{
- struct iio_dma_buffer_queue *queue = block->queue;
-
- /*
- * The buffer has already been freed by the application, just drop the
- * reference.
- */
- if (block->state != IIO_BLOCK_STATE_DEAD) {
+ if (block->state != IIO_BLOCK_STATE_DEAD)
block->state = IIO_BLOCK_STATE_DONE;
- list_add_tail(&block->head, &queue->outgoing);
- }
}
/**
@@ -261,7 +253,6 @@ static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
* not support abort and has not given back the block yet.
*/
switch (block->state) {
- case IIO_BLOCK_STATE_DEQUEUED:
case IIO_BLOCK_STATE_QUEUED:
case IIO_BLOCK_STATE_DONE:
return true;
@@ -317,7 +308,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
* dead. This means we can reset the lists without having to fear
* corrution.
*/
- INIT_LIST_HEAD(&queue->outgoing);
spin_unlock_irq(&queue->list_lock);
INIT_LIST_HEAD(&queue->incoming);
@@ -356,6 +346,29 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
+static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
+{
+ unsigned int i;
+
+ spin_lock_irq(&queue->list_lock);
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+ if (!queue->fileio.blocks[i])
+ continue;
+ queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
+ }
+ spin_unlock_irq(&queue->list_lock);
+
+ INIT_LIST_HEAD(&queue->incoming);
+
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+ if (!queue->fileio.blocks[i])
+ continue;
+ iio_buffer_block_put(queue->fileio.blocks[i]);
+ queue->fileio.blocks[i] = NULL;
+ }
+ queue->fileio.active_block = NULL;
+}
+
static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
struct iio_dma_buffer_block *block)
{
@@ -456,14 +469,20 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
struct iio_dma_buffer_queue *queue)
{
struct iio_dma_buffer_block *block;
+ unsigned int idx;
spin_lock_irq(&queue->list_lock);
- block = list_first_entry_or_null(&queue->outgoing, struct
- iio_dma_buffer_block, head);
- if (block != NULL) {
- list_del(&block->head);
- block->state = IIO_BLOCK_STATE_DEQUEUED;
+
+ idx = queue->fileio.next_dequeue;
+ block = queue->fileio.blocks[idx];
+
+ if (block->state == IIO_BLOCK_STATE_DONE) {
+ idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
+ queue->fileio.next_dequeue = idx;
+ } else {
+ block = NULL;
}
+
spin_unlock_irq(&queue->list_lock);
return block;
@@ -539,6 +558,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
struct iio_dma_buffer_block *block;
size_t data_available = 0;
+ unsigned int i;
/*
* For counting the available bytes we'll use the size of the block not
@@ -552,8 +572,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
data_available += queue->fileio.active_block->size;
spin_lock_irq(&queue->list_lock);
- list_for_each_entry(block, &queue->outgoing, head)
- data_available += block->size;
+
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
+ block = queue->fileio.blocks[i];
+
+ if (block != queue->fileio.active_block
+ && block->state == IIO_BLOCK_STATE_DONE)
+ data_available += block->size;
+ }
+
spin_unlock_irq(&queue->list_lock);
mutex_unlock(&queue->lock);
@@ -617,7 +644,6 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
queue->ops = ops;
INIT_LIST_HEAD(&queue->incoming);
- INIT_LIST_HEAD(&queue->outgoing);
mutex_init(&queue->lock);
spin_lock_init(&queue->list_lock);
@@ -635,28 +661,9 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
*/
void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
{
- unsigned int i;
-
mutex_lock(&queue->lock);
- spin_lock_irq(&queue->list_lock);
- for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
- if (!queue->fileio.blocks[i])
- continue;
- queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
- }
- INIT_LIST_HEAD(&queue->outgoing);
- spin_unlock_irq(&queue->list_lock);
-
- INIT_LIST_HEAD(&queue->incoming);
-
- for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
- if (!queue->fileio.blocks[i])
- continue;
- iio_buffer_block_put(queue->fileio.blocks[i]);
- queue->fileio.blocks[i] = NULL;
- }
- queue->fileio.active_block = NULL;
+ iio_dma_buffer_fileio_free(queue);
queue->ops = NULL;
mutex_unlock(&queue->lock);
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index c30657e10..02649ab81 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -5,6 +5,17 @@
menu "Chemical Sensors"
+config AOSONG_AGS02MA
+ tristate "Aosong AGS02MA TVOC sensor driver"
+ depends on I2C
+ select CRC8
+ help
+ Say Y here to build support for Aosong AGS02MA TVOC (Total Volatile
+ Organic Compounds) sensor.
+
+ To compile this driver as module, choose M here: the module will be
+ called ags02ma.
+
config ATLAS_PH_SENSOR
tristate "Atlas Scientific OEM SM sensors"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index a11e777a7..2f3dee8bb 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -4,6 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AOSONG_AGS02MA) += ags02ma.o
obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-sensor.o
obj-$(CONFIG_ATLAS_EZO_SENSOR) += atlas-ezo-sensor.o
obj-$(CONFIG_BME680) += bme680_core.o
diff --git a/drivers/iio/chemical/ags02ma.c b/drivers/iio/chemical/ags02ma.c
new file mode 100644
index 000000000..8fcd80946
--- /dev/null
+++ b/drivers/iio/chemical/ags02ma.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com>
+ *
+ * Driver for Aosong AGS02MA
+ *
+ * Datasheet:
+ * https://asairsensors.com/wp-content/uploads/2021/09/AGS02MA.pdf
+ * Product Page:
+ * http://www.aosong.com/m/en/products-33.html
+ */
+
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+
+#define AGS02MA_TVOC_READ_REG 0x00
+#define AGS02MA_VERSION_REG 0x11
+
+#define AGS02MA_VERSION_PROCESSING_DELAY 30
+#define AGS02MA_TVOC_READ_PROCESSING_DELAY 1500
+
+#define AGS02MA_CRC8_INIT 0xff
+#define AGS02MA_CRC8_POLYNOMIAL 0x31
+
+DECLARE_CRC8_TABLE(ags02ma_crc8_table);
+
+struct ags02ma_data {
+ struct i2c_client *client;
+};
+
+struct ags02ma_reading {
+ __be32 data;
+ u8 crc;
+} __packed;
+
+static int ags02ma_register_read(struct i2c_client *client, u8 reg, u16 delay,
+ u32 *val)
+{
+ int ret;
+ u8 crc;
+ struct ags02ma_reading read_buffer;
+
+ ret = i2c_master_send(client, &reg, sizeof(reg));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to send data to register 0x%x: %d", reg, ret);
+ return ret;
+ }
+
+ /* Processing Delay, Check Table 7.7 in the datasheet */
+ msleep_interruptible(delay);
+
+ ret = i2c_master_recv(client, (u8 *)&read_buffer, sizeof(read_buffer));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to receive from register 0x%x: %d", reg, ret);
+ return ret;
+ }
+
+ crc = crc8(ags02ma_crc8_table, (u8 *)&read_buffer.data,
+ sizeof(read_buffer.data), AGS02MA_CRC8_INIT);
+ if (crc != read_buffer.crc) {
+ dev_err(&client->dev, "CRC error\n");
+ return -EIO;
+ }
+
+ *val = be32_to_cpu(read_buffer.data);
+ return 0;
+}
+
+static int ags02ma_read_raw(struct iio_dev *iio_device,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret;
+ struct ags02ma_data *data = iio_priv(iio_device);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ags02ma_register_read(data->client, AGS02MA_TVOC_READ_REG,
+ AGS02MA_TVOC_READ_PROCESSING_DELAY,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* The sensor reads data as ppb */
+ *val = 0;
+ *val2 = 100;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ags02ma_info = {
+ .read_raw = ags02ma_read_raw,
+};
+
+static const struct iio_chan_spec ags02ma_channel = {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_VOC,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+};
+
+static int ags02ma_probe(struct i2c_client *client)
+{
+ int ret;
+ struct ags02ma_data *data;
+ struct iio_dev *indio_dev;
+ u32 version;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ crc8_populate_msb(ags02ma_crc8_table, AGS02MA_CRC8_POLYNOMIAL);
+
+ ret = ags02ma_register_read(client, AGS02MA_VERSION_REG,
+ AGS02MA_VERSION_PROCESSING_DELAY, &version);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to read device version\n");
+ dev_dbg(&client->dev, "Aosong AGS02MA, Version: 0x%x", version);
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ indio_dev->info = &ags02ma_info;
+ indio_dev->channels = &ags02ma_channel;
+ indio_dev->num_channels = 1;
+ indio_dev->name = "ags02ma";
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id ags02ma_id_table[] = {
+ { "ags02ma" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ags02ma_id_table);
+
+static const struct of_device_id ags02ma_of_table[] = {
+ { .compatible = "aosong,ags02ma" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ags02ma_of_table);
+
+static struct i2c_driver ags02ma_driver = {
+ .driver = {
+ .name = "ags02ma",
+ .of_match_table = ags02ma_of_table,
+ },
+ .id_table = ags02ma_id_table,
+ .probe = ags02ma_probe,
+};
+module_i2c_driver(ags02ma_driver);
+
+MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>");
+MODULE_DESCRIPTION("Aosong AGS02MA TVOC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index e9857d93b..b5cf15a51 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -211,13 +211,13 @@ static bool pms7003_frame_is_okay(struct pms7003_frame *frame)
return checksum == pms7003_calc_checksum(frame);
}
-static int pms7003_receive_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t size)
+static ssize_t pms7003_receive_buf(struct serdev_device *serdev, const u8 *buf,
+ size_t size)
{
struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
struct pms7003_state *state = iio_priv(indio_dev);
struct pms7003_frame *frame = &state->frame;
- int num;
+ size_t num;
if (!frame->expected_length) {
u16 magic;
diff --git a/drivers/iio/chemical/scd30_serial.c b/drivers/iio/chemical/scd30_serial.c
index 3c519103d..a47654591 100644
--- a/drivers/iio/chemical/scd30_serial.c
+++ b/drivers/iio/chemical/scd30_serial.c
@@ -174,13 +174,13 @@ static int scd30_serdev_command(struct scd30_state *state, enum scd30_cmd cmd, u
return 0;
}
-static int scd30_serdev_receive_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t size)
+static ssize_t scd30_serdev_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
struct scd30_serdev_priv *priv;
struct scd30_state *state;
- int num;
+ size_t num;
if (!indio_dev)
return 0;
diff --git a/drivers/iio/chemical/sps30_serial.c b/drivers/iio/chemical/sps30_serial.c
index 164f4b3e0..3afa89f8a 100644
--- a/drivers/iio/chemical/sps30_serial.c
+++ b/drivers/iio/chemical/sps30_serial.c
@@ -74,8 +74,8 @@ static int sps30_serial_xfer(struct sps30_state *state, const unsigned char *buf
}
static const struct {
- unsigned char byte;
- unsigned char byte2;
+ u8 byte;
+ u8 byte2;
} sps30_serial_bytes[] = {
{ 0x11, 0x31 },
{ 0x13, 0x33 },
@@ -83,7 +83,7 @@ static const struct {
{ 0x7d, 0x5d },
};
-static int sps30_serial_put_byte(unsigned char *buf, unsigned char byte)
+static int sps30_serial_put_byte(u8 *buf, u8 byte)
{
int i;
@@ -102,7 +102,7 @@ static int sps30_serial_put_byte(unsigned char *buf, unsigned char byte)
return 1;
}
-static char sps30_serial_get_byte(bool escaped, unsigned char byte2)
+static u8 sps30_serial_get_byte(bool escaped, u8 byte2)
{
int i;
@@ -130,8 +130,8 @@ static unsigned char sps30_serial_calc_chksum(const unsigned char *buf, size_t n
return ~chksum;
}
-static int sps30_serial_prep_frame(unsigned char *buf, unsigned char cmd,
- const unsigned char *arg, size_t arg_size)
+static int sps30_serial_prep_frame(u8 *buf, u8 cmd, const u8 *arg,
+ size_t arg_size)
{
unsigned char chksum;
int num = 0;
@@ -210,14 +210,14 @@ static int sps30_serial_command(struct sps30_state *state, unsigned char cmd,
return rsp_size;
}
-static int sps30_serial_receive_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t size)
+static ssize_t sps30_serial_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct iio_dev *indio_dev = dev_get_drvdata(&serdev->dev);
struct sps30_serial_priv *priv;
struct sps30_state *state;
- unsigned char byte;
size_t i;
+ u8 byte;
if (!indio_dev)
return 0;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 93b8be183..34eb40bb9 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -400,6 +400,16 @@ config MCP4728
To compile this driver as a module, choose M here: the module
will be called mcp4728.
+config MCP4821
+ tristate "MCP4801/02/11/12/21/22 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build the driver for the Microchip MCP4801
+ MCP4802, MCP4811, MCP4812, MCP4821 and MCP4822 DAC devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mcp4821.
+
config MCP4922
tristate "MCP4902, MCP4912, MCP4922 DAC driver"
depends on SPI
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 5b2bac900..55bf89739 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_MAX5522) += max5522.o
obj-$(CONFIG_MAX5821) += max5821.o
obj-$(CONFIG_MCP4725) += mcp4725.o
obj-$(CONFIG_MCP4728) += mcp4728.o
+obj-$(CONFIG_MCP4821) += mcp4821.o
obj-$(CONFIG_MCP4922) += mcp4922.o
obj-$(CONFIG_STM32_DAC_CORE) += stm32-dac-core.o
obj-$(CONFIG_STM32_DAC) += stm32-dac.o
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index a4167454d..75b549827 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -345,6 +345,7 @@ static int ad5791_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
struct ad5791_state *st;
int ret, pos_voltage_uv = 0, neg_voltage_uv = 0;
+ bool use_rbuf_gain2;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
@@ -379,6 +380,12 @@ static int ad5791_probe(struct spi_device *spi)
st->pwr_down = true;
st->spi = spi;
+ if (pdata)
+ use_rbuf_gain2 = pdata->use_rbuf_gain2;
+ else
+ use_rbuf_gain2 = device_property_read_bool(&spi->dev,
+ "adi,rbuf-gain2-en");
+
if (!IS_ERR(st->reg_vss) && !IS_ERR(st->reg_vdd)) {
st->vref_mv = (pos_voltage_uv + neg_voltage_uv) / 1000;
st->vref_neg_mv = neg_voltage_uv / 1000;
@@ -398,7 +405,7 @@ static int ad5791_probe(struct spi_device *spi)
st->ctrl = AD5761_CTRL_LINCOMP(st->chip_info->get_lin_comp(st->vref_mv))
- | ((pdata && pdata->use_rbuf_gain2) ? 0 : AD5791_CTRL_RBUF) |
+ | (use_rbuf_gain2 ? 0 : AD5791_CTRL_RBUF) |
AD5791_CTRL_BIN2SC;
ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl |
diff --git a/drivers/iio/dac/mcp4821.c b/drivers/iio/dac/mcp4821.c
new file mode 100644
index 000000000..8a0480d33
--- /dev/null
+++ b/drivers/iio/dac/mcp4821.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com>
+ *
+ * Driver for Microchip MCP4801, MCP4802, MCP4811, MCP4812, MCP4821 and MCP4822
+ *
+ * Based on the work of:
+ * Michael Welling (MCP4922 Driver)
+ *
+ * Datasheet:
+ * MCP48x1: https://ww1.microchip.com/downloads/en/DeviceDoc/22244B.pdf
+ * MCP48x2: https://ww1.microchip.com/downloads/en/DeviceDoc/20002249B.pdf
+ *
+ * TODO:
+ * - Configurable gain
+ * - Regulator control
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+#include <asm/unaligned.h>
+
+#define MCP4821_ACTIVE_MODE BIT(12)
+#define MCP4802_SECOND_CHAN BIT(15)
+
+/* DAC uses an internal Voltage reference of 4.096V at a gain of 2x */
+#define MCP4821_2X_GAIN_VREF_MV 4096
+
+enum mcp4821_supported_drvice_ids {
+ ID_MCP4801,
+ ID_MCP4802,
+ ID_MCP4811,
+ ID_MCP4812,
+ ID_MCP4821,
+ ID_MCP4822,
+};
+
+struct mcp4821_state {
+ struct spi_device *spi;
+ u16 dac_value[2];
+};
+
+struct mcp4821_chip_info {
+ const char *name;
+ int num_channels;
+ const struct iio_chan_spec channels[2];
+};
+
+#define MCP4821_CHAN(channel_id, resolution) \
+ { \
+ .type = IIO_VOLTAGE, .output = 1, .indexed = 1, \
+ .channel = (channel_id), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .realbits = (resolution), \
+ .shift = 12 - (resolution), \
+ }, \
+ }
+
+static const struct mcp4821_chip_info mcp4821_chip_info_table[6] = {
+ [ID_MCP4801] = {
+ .name = "mcp4801",
+ .num_channels = 1,
+ .channels = {
+ MCP4821_CHAN(0, 8),
+ },
+ },
+ [ID_MCP4802] = {
+ .name = "mcp4802",
+ .num_channels = 2,
+ .channels = {
+ MCP4821_CHAN(0, 8),
+ MCP4821_CHAN(1, 8),
+ },
+ },
+ [ID_MCP4811] = {
+ .name = "mcp4811",
+ .num_channels = 1,
+ .channels = {
+ MCP4821_CHAN(0, 10),
+ },
+ },
+ [ID_MCP4812] = {
+ .name = "mcp4812",
+ .num_channels = 2,
+ .channels = {
+ MCP4821_CHAN(0, 10),
+ MCP4821_CHAN(1, 10),
+ },
+ },
+ [ID_MCP4821] = {
+ .name = "mcp4821",
+ .num_channels = 1,
+ .channels = {
+ MCP4821_CHAN(0, 12),
+ },
+ },
+ [ID_MCP4822] = {
+ .name = "mcp4822",
+ .num_channels = 2,
+ .channels = {
+ MCP4821_CHAN(0, 12),
+ MCP4821_CHAN(1, 12),
+ },
+ },
+};
+
+static int mcp4821_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct mcp4821_state *state;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ state = iio_priv(indio_dev);
+ *val = state->dac_value[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = MCP4821_2X_GAIN_VREF_MV;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp4821_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct mcp4821_state *state = iio_priv(indio_dev);
+ u16 write_val;
+ __be16 write_buffer;
+ int ret;
+
+ if (val2 != 0)
+ return -EINVAL;
+
+ if (val < 0 || val >= BIT(chan->scan_type.realbits))
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ write_val = MCP4821_ACTIVE_MODE | val << chan->scan_type.shift;
+ if (chan->channel)
+ write_val |= MCP4802_SECOND_CHAN;
+
+ write_buffer = cpu_to_be16(write_val);
+ ret = spi_write(state->spi, &write_buffer, sizeof(write_buffer));
+ if (ret) {
+ dev_err(&state->spi->dev, "Failed to write to device: %d", ret);
+ return ret;
+ }
+
+ state->dac_value[chan->channel] = val;
+
+ return 0;
+}
+
+static const struct iio_info mcp4821_info = {
+ .read_raw = &mcp4821_read_raw,
+ .write_raw = &mcp4821_write_raw,
+};
+
+static int mcp4821_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct mcp4821_state *state;
+ const struct mcp4821_chip_info *info;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ state = iio_priv(indio_dev);
+ state->spi = spi;
+
+ info = spi_get_device_match_data(spi);
+ indio_dev->name = info->name;
+ indio_dev->info = &mcp4821_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = info->channels;
+ indio_dev->num_channels = info->num_channels;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+#define MCP4821_COMPATIBLE(of_compatible, id) \
+ { \
+ .compatible = of_compatible, \
+ .data = &mcp4821_chip_info_table[id] \
+ }
+
+static const struct of_device_id mcp4821_of_table[] = {
+ MCP4821_COMPATIBLE("microchip,mcp4801", ID_MCP4801),
+ MCP4821_COMPATIBLE("microchip,mcp4802", ID_MCP4802),
+ MCP4821_COMPATIBLE("microchip,mcp4811", ID_MCP4811),
+ MCP4821_COMPATIBLE("microchip,mcp4812", ID_MCP4812),
+ MCP4821_COMPATIBLE("microchip,mcp4821", ID_MCP4821),
+ MCP4821_COMPATIBLE("microchip,mcp4822", ID_MCP4822),
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mcp4821_of_table);
+
+static const struct spi_device_id mcp4821_id_table[] = {
+ { "mcp4801", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4801]},
+ { "mcp4802", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4802]},
+ { "mcp4811", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4811]},
+ { "mcp4812", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4812]},
+ { "mcp4821", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4821]},
+ { "mcp4822", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4822]},
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, mcp4821_id_table);
+
+static struct spi_driver mcp4821_driver = {
+ .driver = {
+ .name = "mcp4821",
+ .of_match_table = mcp4821_of_table,
+ },
+ .probe = mcp4821_probe,
+ .id_table = mcp4821_id_table,
+};
+module_spi_driver(mcp4821_driver);
+
+MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>");
+MODULE_DESCRIPTION("Microchip MCP4821 DAC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c
index 26abecbd5..9284c13f1 100644
--- a/drivers/iio/frequency/adf4377.c
+++ b/drivers/iio/frequency/adf4377.c
@@ -870,7 +870,6 @@ static const struct iio_chan_spec adf4377_channels[] = {
static int adf4377_properties_parse(struct adf4377_state *st)
{
struct spi_device *spi = st->spi;
- const char *str;
int ret;
st->clkin = devm_clk_get_enabled(&spi->dev, "ref_in");
@@ -896,16 +895,13 @@ static int adf4377_properties_parse(struct adf4377_state *st)
return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2),
"failed to get the CE GPIO\n");
- ret = device_property_read_string(&spi->dev, "adi,muxout-select", &str);
- if (ret) {
- st->muxout_select = ADF4377_MUXOUT_HIGH_Z;
- } else {
- ret = match_string(adf4377_muxout_modes, ARRAY_SIZE(adf4377_muxout_modes), str);
- if (ret < 0)
- return ret;
-
+ ret = device_property_match_property_string(&spi->dev, "adi,muxout-select",
+ adf4377_muxout_modes,
+ ARRAY_SIZE(adf4377_muxout_modes));
+ if (ret >= 0)
st->muxout_select = ret;
- }
+ else
+ st->muxout_select = ADF4377_MUXOUT_HIGH_Z;
return 0;
}
diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c
index bb5e1feef..b46b73b89 100644
--- a/drivers/iio/frequency/admv1014.c
+++ b/drivers/iio/frequency/admv1014.c
@@ -710,7 +710,6 @@ static int admv1014_init(struct admv1014_state *st)
static int admv1014_properties_parse(struct admv1014_state *st)
{
- const char *str;
unsigned int i;
struct spi_device *spi = st->spi;
int ret;
@@ -719,27 +718,21 @@ static int admv1014_properties_parse(struct admv1014_state *st)
st->p1db_comp = device_property_read_bool(&spi->dev, "adi,p1db-compensation-enable");
- ret = device_property_read_string(&spi->dev, "adi,input-mode", &str);
- if (ret) {
- st->input_mode = ADMV1014_IQ_MODE;
- } else {
- ret = match_string(input_mode_names, ARRAY_SIZE(input_mode_names), str);
- if (ret < 0)
- return ret;
-
+ ret = device_property_match_property_string(&spi->dev, "adi,input-mode",
+ input_mode_names,
+ ARRAY_SIZE(input_mode_names));
+ if (ret >= 0)
st->input_mode = ret;
- }
-
- ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str);
- if (ret) {
- st->quad_se_mode = ADMV1014_SE_MODE_POS;
- } else {
- ret = match_string(quad_se_mode_names, ARRAY_SIZE(quad_se_mode_names), str);
- if (ret < 0)
- return ret;
+ else
+ st->input_mode = ADMV1014_IQ_MODE;
+ ret = device_property_match_property_string(&spi->dev, "adi,quad-se-mode",
+ quad_se_mode_names,
+ ARRAY_SIZE(quad_se_mode_names));
+ if (ret >= 0)
st->quad_se_mode = ADMV1014_SE_MODE_POS + (ret * 3);
- }
+ else
+ st->quad_se_mode = ADMV1014_SE_MODE_POS;
for (i = 0; i < ADMV1014_NUM_REGULATORS; ++i)
st->regulators[i].supply = admv1014_reg_name[i];
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 2de5494e7..b15b7a3b6 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -48,6 +48,18 @@ config HDC2010
To compile this driver as a module, choose M here: the module
will be called hdc2010.
+config HDC3020
+ tristate "TI HDC3020 relative humidity and temperature sensor"
+ depends on I2C
+ select CRC8
+ help
+ Say yes here to build support for the Texas Instruments
+ HDC3020, HDC3021 and HDC3022 relative humidity and temperature
+ sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hdc3020.
+
config HID_SENSOR_HUMIDITY
tristate "HID Environmental humidity sensor"
depends on HID_SENSOR_HUB
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index f19ff3de9..5fbeef299 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
obj-$(CONFIG_HDC2010) += hdc2010.o
+obj-$(CONFIG_HDC3020) += hdc3020.o
obj-$(CONFIG_HID_SENSOR_HUMIDITY) += hid-sensor-humidity.o
hts221-y := hts221_core.o \
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
new file mode 100644
index 000000000..ed7041551
--- /dev/null
+++ b/drivers/iio/humidity/hdc3020.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * hdc3020.c - Support for the TI HDC3020,HDC3021 and HDC3022
+ * temperature + relative humidity sensors
+ *
+ * Copyright (C) 2023
+ *
+ * Datasheet: https://www.ti.com/lit/ds/symlink/hdc3020.pdf
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/iio.h>
+
+#define HDC3020_HEATER_CMD_MSB 0x30 /* shared by all heater commands */
+#define HDC3020_HEATER_ENABLE 0x6D
+#define HDC3020_HEATER_DISABLE 0x66
+#define HDC3020_HEATER_CONFIG 0x6E
+
+#define HDC3020_READ_RETRY_TIMES 10
+#define HDC3020_BUSY_DELAY_MS 10
+
+#define HDC3020_CRC8_POLYNOMIAL 0x31
+
+static const u8 HDC3020_S_AUTO_10HZ_MOD0[2] = { 0x27, 0x37 };
+
+static const u8 HDC3020_EXIT_AUTO[2] = { 0x30, 0x93 };
+
+static const u8 HDC3020_R_T_RH_AUTO[2] = { 0xE0, 0x00 };
+static const u8 HDC3020_R_T_LOW_AUTO[2] = { 0xE0, 0x02 };
+static const u8 HDC3020_R_T_HIGH_AUTO[2] = { 0xE0, 0x03 };
+static const u8 HDC3020_R_RH_LOW_AUTO[2] = { 0xE0, 0x04 };
+static const u8 HDC3020_R_RH_HIGH_AUTO[2] = { 0xE0, 0x05 };
+
+struct hdc3020_data {
+ struct i2c_client *client;
+ /*
+ * Ensure that the sensor configuration (currently only heater is
+ * supported) will not be changed during the process of reading
+ * sensor data (this driver will try HDC3020_READ_RETRY_TIMES times
+ * if the device does not respond).
+ */
+ struct mutex lock;
+};
+
+static const int hdc3020_heater_vals[] = {0, 1, 0x3FFF};
+
+static const struct iio_chan_spec hdc3020_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) |
+ BIT(IIO_CHAN_INFO_TROUGH) | BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) |
+ BIT(IIO_CHAN_INFO_TROUGH),
+ },
+ {
+ /*
+ * For setting the internal heater, which can be switched on to
+ * prevent or remove any condensation that may develop when the
+ * ambient environment approaches its dew point temperature.
+ */
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ .output = 1,
+ },
+};
+
+DECLARE_CRC8_TABLE(hdc3020_crc8_table);
+
+static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len)
+{
+ struct i2c_client *client = data->client;
+ struct i2c_msg msg;
+ int ret, cnt;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.buf = (char *)buf;
+ msg.len = len;
+
+ /*
+ * During the measurement process, HDC3020 will not return data.
+ * So wait for a while and try again
+ */
+ for (cnt = 0; cnt < HDC3020_READ_RETRY_TIMES; cnt++) {
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1)
+ return 0;
+
+ mdelay(HDC3020_BUSY_DELAY_MS);
+ }
+ dev_err(&client->dev, "Could not write sensor command\n");
+
+ return -ETIMEDOUT;
+}
+
+static int hdc3020_read_bytes(struct hdc3020_data *data, const u8 *buf,
+ void *val, int len)
+{
+ int ret, cnt;
+ struct i2c_client *client = data->client;
+ struct i2c_msg msg[2] = {
+ [0] = {
+ .addr = client->addr,
+ .flags = 0,
+ .buf = (char *)buf,
+ .len = 2,
+ },
+ [1] = {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = val,
+ .len = len,
+ },
+ };
+
+ /*
+ * During the measurement process, HDC3020 will not return data.
+ * So wait for a while and try again
+ */
+ for (cnt = 0; cnt < HDC3020_READ_RETRY_TIMES; cnt++) {
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret == 2)
+ return 0;
+
+ mdelay(HDC3020_BUSY_DELAY_MS);
+ }
+ dev_err(&client->dev, "Could not read sensor data\n");
+
+ return -ETIMEDOUT;
+}
+
+static int hdc3020_read_measurement(struct hdc3020_data *data,
+ enum iio_chan_type type, int *val)
+{
+ u8 crc, buf[6];
+ int ret;
+
+ ret = hdc3020_read_bytes(data, HDC3020_R_T_RH_AUTO, buf, 6);
+ if (ret < 0)
+ return ret;
+
+ /* CRC check of the temperature measurement */
+ crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+ if (crc != buf[2])
+ return -EINVAL;
+
+ /* CRC check of the relative humidity measurement */
+ crc = crc8(hdc3020_crc8_table, buf + 3, 2, CRC8_INIT_VALUE);
+ if (crc != buf[5])
+ return -EINVAL;
+
+ if (type == IIO_TEMP)
+ *val = get_unaligned_be16(buf);
+ else if (type == IIO_HUMIDITYRELATIVE)
+ *val = get_unaligned_be16(&buf[3]);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * After exiting the automatic measurement mode or resetting, the peak
+ * value will be reset to the default value
+ * This method is used to get the highest temp measured during automatic
+ * measurement
+ */
+static int hdc3020_read_high_peak_t(struct hdc3020_data *data, int *val)
+{
+ u8 crc, buf[3];
+ int ret;
+
+ ret = hdc3020_read_bytes(data, HDC3020_R_T_HIGH_AUTO, buf, 3);
+ if (ret < 0)
+ return ret;
+
+ crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+ if (crc != buf[2])
+ return -EINVAL;
+
+ *val = get_unaligned_be16(buf);
+
+ return 0;
+}
+
+/*
+ * This method is used to get the lowest temp measured during automatic
+ * measurement
+ */
+static int hdc3020_read_low_peak_t(struct hdc3020_data *data, int *val)
+{
+ u8 crc, buf[3];
+ int ret;
+
+ ret = hdc3020_read_bytes(data, HDC3020_R_T_LOW_AUTO, buf, 3);
+ if (ret < 0)
+ return ret;
+
+ crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+ if (crc != buf[2])
+ return -EINVAL;
+
+ *val = get_unaligned_be16(buf);
+
+ return 0;
+}
+
+/*
+ * This method is used to get the highest humidity measured during automatic
+ * measurement
+ */
+static int hdc3020_read_high_peak_rh(struct hdc3020_data *data, int *val)
+{
+ u8 crc, buf[3];
+ int ret;
+
+ ret = hdc3020_read_bytes(data, HDC3020_R_RH_HIGH_AUTO, buf, 3);
+ if (ret < 0)
+ return ret;
+
+ crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+ if (crc != buf[2])
+ return -EINVAL;
+
+ *val = get_unaligned_be16(buf);
+
+ return 0;
+}
+
+/*
+ * This method is used to get the lowest humidity measured during automatic
+ * measurement
+ */
+static int hdc3020_read_low_peak_rh(struct hdc3020_data *data, int *val)
+{
+ u8 crc, buf[3];
+ int ret;
+
+ ret = hdc3020_read_bytes(data, HDC3020_R_RH_LOW_AUTO, buf, 3);
+ if (ret < 0)
+ return ret;
+
+ crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
+ if (crc != buf[2])
+ return -EINVAL;
+
+ *val = get_unaligned_be16(buf);
+
+ return 0;
+}
+
+static int hdc3020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct hdc3020_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_TEMP && chan->type != IIO_HUMIDITYRELATIVE)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ guard(mutex)(&data->lock);
+ ret = hdc3020_read_measurement(data, chan->type, val);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_PEAK: {
+ guard(mutex)(&data->lock);
+ if (chan->type == IIO_TEMP) {
+ ret = hdc3020_read_high_peak_t(data, val);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = hdc3020_read_high_peak_rh(data, val);
+ if (ret < 0)
+ return ret;
+ }
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_TROUGH: {
+ guard(mutex)(&data->lock);
+ if (chan->type == IIO_TEMP) {
+ ret = hdc3020_read_low_peak_t(data, val);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = hdc3020_read_low_peak_rh(data, val);
+ if (ret < 0)
+ return ret;
+ }
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val2 = 65536;
+ if (chan->type == IIO_TEMP)
+ *val = 175;
+ else
+ *val = 100;
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_CHAN_INFO_OFFSET:
+ if (chan->type != IIO_TEMP)
+ return -EINVAL;
+
+ *val = -16852;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hdc3020_read_available(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type, int *length, long mask)
+{
+ if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ *vals = hdc3020_heater_vals;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_RANGE;
+}
+
+static int hdc3020_update_heater(struct hdc3020_data *data, int val)
+{
+ u8 buf[5];
+ int ret;
+
+ if (val < hdc3020_heater_vals[0] || val > hdc3020_heater_vals[2])
+ return -EINVAL;
+
+ buf[0] = HDC3020_HEATER_CMD_MSB;
+
+ if (!val) {
+ buf[1] = HDC3020_HEATER_DISABLE;
+ return hdc3020_write_bytes(data, buf, 2);
+ }
+
+ buf[1] = HDC3020_HEATER_CONFIG;
+ put_unaligned_be16(val & GENMASK(13, 0), &buf[2]);
+ buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+ ret = hdc3020_write_bytes(data, buf, 5);
+ if (ret < 0)
+ return ret;
+
+ buf[1] = HDC3020_HEATER_ENABLE;
+
+ return hdc3020_write_bytes(data, buf, 2);
+}
+
+static int hdc3020_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct hdc3020_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ guard(mutex)(&data->lock);
+ return hdc3020_update_heater(data, val);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info hdc3020_info = {
+ .read_raw = hdc3020_read_raw,
+ .write_raw = hdc3020_write_raw,
+ .read_avail = hdc3020_read_available,
+};
+
+static void hdc3020_stop(void *data)
+{
+ hdc3020_write_bytes((struct hdc3020_data *)data, HDC3020_EXIT_AUTO, 2);
+}
+
+static int hdc3020_probe(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev;
+ struct hdc3020_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ crc8_populate_msb(hdc3020_crc8_table, HDC3020_CRC8_POLYNOMIAL);
+
+ indio_dev->name = "hdc3020";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &hdc3020_info;
+ indio_dev->channels = hdc3020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hdc3020_channels);
+
+ ret = hdc3020_write_bytes(data, HDC3020_S_AUTO_10HZ_MOD0, 2);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Unable to set up measurement\n");
+
+ ret = devm_add_action_or_reset(&data->client->dev, hdc3020_stop, data);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(&data->client->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "Failed to add device");
+
+ return 0;
+}
+
+static const struct i2c_device_id hdc3020_id[] = {
+ { "hdc3020" },
+ { "hdc3021" },
+ { "hdc3022" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, hdc3020_id);
+
+static const struct of_device_id hdc3020_dt_ids[] = {
+ { .compatible = "ti,hdc3020" },
+ { .compatible = "ti,hdc3021" },
+ { .compatible = "ti,hdc3022" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hdc3020_dt_ids);
+
+static struct i2c_driver hdc3020_driver = {
+ .driver = {
+ .name = "hdc3020",
+ .of_match_table = hdc3020_dt_ids,
+ },
+ .probe = hdc3020_probe,
+ .id_table = hdc3020_id,
+};
+module_i2c_driver(hdc3020_driver);
+
+MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gmail.com>");
+MODULE_AUTHOR("Li peiyu <579lpy@gmail.com>");
+MODULE_DESCRIPTION("TI HDC3020 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index c2f97629e..52a155ff3 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -53,6 +53,7 @@ config ADIS16480
ADIS16485, ADIS16488 inertial sensors.
source "drivers/iio/imu/bmi160/Kconfig"
+source "drivers/iio/imu/bmi323/Kconfig"
source "drivers/iio/imu/bno055/Kconfig"
config FXOS8700
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 6eb612034..7e2d7d5c3 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -15,6 +15,7 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
obj-y += bmi160/
+obj-y += bmi323/
obj-y += bno055/
obj-$(CONFIG_FXOS8700) += fxos8700_core.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index bc40240b2..495caf4ce 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -44,8 +44,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value,
.cs_change = 1,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
- .cs_change_delay.value = adis->data->cs_change_delay,
- .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
.bits_per_word = 8,
@@ -53,8 +51,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value,
.cs_change = 1,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
- .cs_change_delay.value = adis->data->cs_change_delay,
- .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
.bits_per_word = 8,
@@ -62,8 +58,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value,
.cs_change = 1,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
- .cs_change_delay.value = adis->data->cs_change_delay,
- .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 6,
.bits_per_word = 8,
@@ -144,8 +138,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val,
.cs_change = 1,
.delay.value = adis->data->write_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
- .cs_change_delay.value = adis->data->cs_change_delay,
- .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 2,
.bits_per_word = 8,
@@ -153,8 +145,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val,
.cs_change = 1,
.delay.value = adis->data->read_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
- .cs_change_delay.value = adis->data->cs_change_delay,
- .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.tx_buf = adis->tx + 4,
.rx_buf = adis->rx,
@@ -163,8 +153,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val,
.cs_change = 1,
.delay.value = adis->data->read_delay,
.delay.unit = SPI_DELAY_UNIT_USECS,
- .cs_change_delay.value = adis->data->cs_change_delay,
- .cs_change_delay.unit = SPI_DELAY_UNIT_USECS,
}, {
.rx_buf = adis->rx + 2,
.bits_per_word = 8,
@@ -524,6 +512,12 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev,
}
mutex_init(&adis->state_lock);
+
+ if (!spi->cs_inactive.value) {
+ spi->cs_inactive.value = data->cs_change_delay;
+ spi->cs_inactive.unit = SPI_DELAY_UNIT_USECS;
+ }
+
adis->spi = spi;
adis->data = data;
iio_device_set_drvdata(indio_dev, adis);
diff --git a/drivers/iio/imu/bmi323/Kconfig b/drivers/iio/imu/bmi323/Kconfig
new file mode 100644
index 000000000..ab37b2853
--- /dev/null
+++ b/drivers/iio/imu/bmi323/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# BMI323 IMU driver
+#
+
+config BMI323
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config BMI323_I2C
+ tristate "Bosch BMI323 I2C driver"
+ depends on I2C
+ select BMI323
+ select REGMAP_I2C
+ help
+ Enable support for the Bosch BMI323 6-Axis IMU connected to I2C
+ interface.
+
+ This driver can also be built as a module. If so, the module will be
+ called bmi323_i2c.
+
+config BMI323_SPI
+ tristate "Bosch BMI323 SPI driver"
+ depends on SPI
+ select BMI323
+ select REGMAP_SPI
+ help
+ Enable support for the Bosch BMI323 6-Axis IMU connected to SPI
+ interface.
+
+ This driver can also be built as a module. If so, the module will be
+ called bmi323_spi.
diff --git a/drivers/iio/imu/bmi323/Makefile b/drivers/iio/imu/bmi323/Makefile
new file mode 100644
index 000000000..a6a6dc020
--- /dev/null
+++ b/drivers/iio/imu/bmi323/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Bosch BMI323 IMU
+#
+obj-$(CONFIG_BMI323) += bmi323_core.o
+obj-$(CONFIG_BMI323_I2C) += bmi323_i2c.o
+obj-$(CONFIG_BMI323_SPI) += bmi323_spi.o
diff --git a/drivers/iio/imu/bmi323/bmi323.h b/drivers/iio/imu/bmi323/bmi323.h
new file mode 100644
index 000000000..dff126d41
--- /dev/null
+++ b/drivers/iio/imu/bmi323/bmi323.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IIO driver for Bosch BMI323 6-Axis IMU
+ *
+ * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com>
+ */
+
+#ifndef _BMI323_H_
+#define _BMI323_H_
+
+#include <linux/bits.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#define BMI323_I2C_DUMMY 2
+#define BMI323_SPI_DUMMY 1
+
+/* Register map */
+
+#define BMI323_CHIP_ID_REG 0x00
+#define BMI323_CHIP_ID_VAL 0x0043
+#define BMI323_CHIP_ID_MSK GENMASK(7, 0)
+#define BMI323_ERR_REG 0x01
+#define BMI323_STATUS_REG 0x02
+#define BMI323_STATUS_POR_MSK BIT(0)
+
+/* Accelero/Gyro/Temp data registers */
+#define BMI323_ACCEL_X_REG 0x03
+#define BMI323_GYRO_X_REG 0x06
+#define BMI323_TEMP_REG 0x09
+#define BMI323_ALL_CHAN_MSK GENMASK(5, 0)
+
+/* Status registers */
+#define BMI323_STATUS_INT1_REG 0x0D
+#define BMI323_STATUS_INT2_REG 0x0E
+#define BMI323_STATUS_NOMOTION_MSK BIT(0)
+#define BMI323_STATUS_MOTION_MSK BIT(1)
+#define BMI323_STATUS_STP_WTR_MSK BIT(5)
+#define BMI323_STATUS_TAP_MSK BIT(8)
+#define BMI323_STATUS_ERROR_MSK BIT(10)
+#define BMI323_STATUS_TMP_DRDY_MSK BIT(11)
+#define BMI323_STATUS_GYR_DRDY_MSK BIT(12)
+#define BMI323_STATUS_ACC_DRDY_MSK BIT(13)
+#define BMI323_STATUS_ACC_GYR_DRDY_MSK GENMASK(13, 12)
+#define BMI323_STATUS_FIFO_WTRMRK_MSK BIT(14)
+#define BMI323_STATUS_FIFO_FULL_MSK BIT(15)
+
+/* Feature registers */
+#define BMI323_FEAT_IO0_REG 0x10
+#define BMI323_FEAT_IO0_XYZ_NOMOTION_MSK GENMASK(2, 0)
+#define BMI323_FEAT_IO0_XYZ_MOTION_MSK GENMASK(5, 3)
+#define BMI323_FEAT_XYZ_MSK GENMASK(2, 0)
+#define BMI323_FEAT_IO0_STP_CNT_MSK BIT(9)
+#define BMI323_FEAT_IO0_S_TAP_MSK BIT(12)
+#define BMI323_FEAT_IO0_D_TAP_MSK BIT(13)
+#define BMI323_FEAT_IO1_REG 0x11
+#define BMI323_FEAT_IO1_ERR_MSK GENMASK(3, 0)
+#define BMI323_FEAT_IO2_REG 0x12
+#define BMI323_FEAT_IO_STATUS_REG 0x14
+#define BMI323_FEAT_IO_STATUS_MSK BIT(0)
+#define BMI323_FEAT_ENG_POLL 2000
+#define BMI323_FEAT_ENG_TIMEOUT 10000
+
+/* FIFO registers */
+#define BMI323_FIFO_FILL_LEVEL_REG 0x15
+#define BMI323_FIFO_DATA_REG 0x16
+
+/* Accelero/Gyro config registers */
+#define BMI323_ACC_CONF_REG 0x20
+#define BMI323_GYRO_CONF_REG 0x21
+#define BMI323_ACC_GYRO_CONF_MODE_MSK GENMASK(14, 12)
+#define BMI323_ACC_GYRO_CONF_ODR_MSK GENMASK(3, 0)
+#define BMI323_ACC_GYRO_CONF_SCL_MSK GENMASK(6, 4)
+#define BMI323_ACC_GYRO_CONF_BW_MSK BIT(7)
+#define BMI323_ACC_GYRO_CONF_AVG_MSK GENMASK(10, 8)
+
+/* FIFO registers */
+#define BMI323_FIFO_WTRMRK_REG 0x35
+#define BMI323_FIFO_CONF_REG 0x36
+#define BMI323_FIFO_CONF_STP_FUL_MSK BIT(0)
+#define BMI323_FIFO_CONF_ACC_GYR_EN_MSK GENMASK(10, 9)
+#define BMI323_FIFO_ACC_GYR_MSK GENMASK(1, 0)
+#define BMI323_FIFO_CTRL_REG 0x37
+#define BMI323_FIFO_FLUSH_MSK BIT(0)
+
+/* Interrupt pin config registers */
+#define BMI323_IO_INT_CTR_REG 0x38
+#define BMI323_IO_INT1_LVL_MSK BIT(0)
+#define BMI323_IO_INT1_OD_MSK BIT(1)
+#define BMI323_IO_INT1_OP_EN_MSK BIT(2)
+#define BMI323_IO_INT1_LVL_OD_OP_MSK GENMASK(2, 0)
+#define BMI323_IO_INT2_LVL_MSK BIT(8)
+#define BMI323_IO_INT2_OD_MSK BIT(9)
+#define BMI323_IO_INT2_OP_EN_MSK BIT(10)
+#define BMI323_IO_INT2_LVL_OD_OP_MSK GENMASK(10, 8)
+#define BMI323_IO_INT_CONF_REG 0x39
+#define BMI323_IO_INT_LTCH_MSK BIT(0)
+#define BMI323_INT_MAP1_REG 0x3A
+#define BMI323_INT_MAP2_REG 0x3B
+#define BMI323_NOMOTION_MSK GENMASK(1, 0)
+#define BMI323_MOTION_MSK GENMASK(3, 2)
+#define BMI323_STEP_CNT_MSK GENMASK(11, 10)
+#define BMI323_TAP_MSK GENMASK(1, 0)
+#define BMI323_TMP_DRDY_MSK GENMASK(7, 6)
+#define BMI323_GYR_DRDY_MSK GENMASK(9, 8)
+#define BMI323_ACC_DRDY_MSK GENMASK(11, 10)
+#define BMI323_FIFO_WTRMRK_MSK GENMASK(13, 12)
+#define BMI323_FIFO_FULL_MSK GENMASK(15, 14)
+
+/* Feature registers */
+#define BMI323_FEAT_CTRL_REG 0x40
+#define BMI323_FEAT_ENG_EN_MSK BIT(0)
+#define BMI323_FEAT_DATA_ADDR 0x41
+#define BMI323_FEAT_DATA_TX 0x42
+#define BMI323_FEAT_DATA_STATUS 0x43
+#define BMI323_FEAT_DATA_TX_RDY_MSK BIT(1)
+#define BMI323_FEAT_EVNT_EXT_REG 0x47
+#define BMI323_FEAT_EVNT_EXT_S_MSK BIT(3)
+#define BMI323_FEAT_EVNT_EXT_D_MSK BIT(4)
+
+#define BMI323_CMD_REG 0x7E
+#define BMI323_RST_VAL 0xDEAF
+#define BMI323_CFG_RES_REG 0x7F
+
+/* Extended registers */
+#define BMI323_GEN_SET1_REG 0x02
+#define BMI323_GEN_SET1_MODE_MSK BIT(0)
+#define BMI323_GEN_HOLD_DUR_MSK GENMASK(4, 1)
+
+/* Any Motion/No Motion config registers */
+#define BMI323_ANYMO1_REG 0x05
+#define BMI323_NOMO1_REG 0x08
+#define BMI323_MO2_OFFSET 0x01
+#define BMI323_MO3_OFFSET 0x02
+#define BMI323_MO1_REF_UP_MSK BIT(12)
+#define BMI323_MO1_SLOPE_TH_MSK GENMASK(11, 0)
+#define BMI323_MO2_HYSTR_MSK GENMASK(9, 0)
+#define BMI323_MO3_DURA_MSK GENMASK(12, 0)
+
+/* Step counter config registers */
+#define BMI323_STEP_SC1_REG 0x10
+#define BMI323_STEP_SC1_WTRMRK_MSK GENMASK(9, 0)
+#define BMI323_STEP_SC1_RST_CNT_MSK BIT(10)
+#define BMI323_STEP_SC1_REG 0x10
+#define BMI323_STEP_LEN 2
+
+/* Tap gesture config registers */
+#define BMI323_TAP1_REG 0x1E
+#define BMI323_TAP1_AXIS_SEL_MSK GENMASK(1, 0)
+#define BMI323_AXIS_XYZ_MSK GENMASK(1, 0)
+#define BMI323_TAP1_TIMOUT_MSK BIT(2)
+#define BMI323_TAP1_MAX_PEAKS_MSK GENMASK(5, 3)
+#define BMI323_TAP1_MODE_MSK GENMASK(7, 6)
+#define BMI323_TAP2_REG 0x1F
+#define BMI323_TAP2_THRES_MSK GENMASK(9, 0)
+#define BMI323_TAP2_MAX_DUR_MSK GENMASK(15, 10)
+#define BMI323_TAP3_REG 0x20
+#define BMI323_TAP3_QUIET_TIM_MSK GENMASK(15, 12)
+#define BMI323_TAP3_QT_BW_TAP_MSK GENMASK(11, 8)
+#define BMI323_TAP3_QT_AFT_GES_MSK GENMASK(15, 12)
+
+#define BMI323_MOTION_THRES_SCALE 512
+#define BMI323_MOTION_HYSTR_SCALE 512
+#define BMI323_MOTION_DURAT_SCALE 50
+#define BMI323_TAP_THRES_SCALE 512
+#define BMI323_DUR_BW_TAP_SCALE 200
+#define BMI323_QUITE_TIM_GES_SCALE 25
+#define BMI323_MAX_GES_DUR_SCALE 25
+
+/*
+ * The formula to calculate temperature in C.
+ * See datasheet section 6.1.1, Register Map Overview
+ *
+ * T_C = (temp_raw / 512) + 23
+ */
+#define BMI323_TEMP_OFFSET 11776
+#define BMI323_TEMP_SCALE 1953125
+
+/*
+ * The BMI323 features a FIFO with a capacity of 2048 bytes. Each frame
+ * consists of accelerometer (X, Y, Z) data and gyroscope (X, Y, Z) data,
+ * totaling 6 words or 12 bytes. The FIFO buffer can hold a total of
+ * 170 frames.
+ *
+ * If a watermark interrupt is configured for 170 frames, the interrupt will
+ * trigger when the FIFO reaches 169 frames, so limit the maximum watermark
+ * level to 169 frames. In terms of data, 169 frames would equal 1014 bytes,
+ * which is approximately 2 frames before the FIFO reaches its full capacity.
+ * See datasheet section 5.7.3 FIFO Buffer Interrupts
+ */
+#define BMI323_BYTES_PER_SAMPLE 2
+#define BMI323_FIFO_LENGTH_IN_BYTES 2048
+#define BMI323_FIFO_FRAME_LENGTH 6
+#define BMI323_FIFO_FULL_IN_FRAMES \
+ ((BMI323_FIFO_LENGTH_IN_BYTES / \
+ (BMI323_BYTES_PER_SAMPLE * BMI323_FIFO_FRAME_LENGTH)) - 1)
+#define BMI323_FIFO_FULL_IN_WORDS \
+ (BMI323_FIFO_FULL_IN_FRAMES * BMI323_FIFO_FRAME_LENGTH)
+
+#define BMI323_INT_MICRO_TO_RAW(val, val2, scale) ((val) * (scale) + \
+ ((val2) * (scale)) / MEGA)
+
+#define BMI323_RAW_TO_MICRO(raw, scale) ((((raw) % (scale)) * MEGA) / scale)
+
+struct device;
+int bmi323_core_probe(struct device *dev);
+extern const struct regmap_config bmi323_regmap_config;
+
+#endif
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
new file mode 100644
index 000000000..183af4828
--- /dev/null
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -0,0 +1,2139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IIO core driver for Bosch BMI323 6-Axis IMU.
+ *
+ * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com>
+ *
+ * Datasheet: https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmi323-ds000.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "bmi323.h"
+
+enum bmi323_sensor_type {
+ BMI323_ACCEL,
+ BMI323_GYRO,
+ BMI323_SENSORS_CNT,
+};
+
+enum bmi323_opr_mode {
+ ACC_GYRO_MODE_DISABLE = 0x00,
+ GYRO_DRIVE_MODE_ENABLED = 0x01,
+ ACC_GYRO_MODE_DUTYCYCLE = 0x03,
+ ACC_GYRO_MODE_CONTINOUS = 0x04,
+ ACC_GYRO_MODE_HIGH_PERF = 0x07,
+};
+
+enum bmi323_state {
+ BMI323_IDLE,
+ BMI323_BUFFER_DRDY_TRIGGERED,
+ BMI323_BUFFER_FIFO,
+};
+
+enum bmi323_irq_pin {
+ BMI323_IRQ_DISABLED,
+ BMI323_IRQ_INT1,
+ BMI323_IRQ_INT2,
+};
+
+enum bmi323_3db_bw {
+ BMI323_BW_ODR_BY_2,
+ BMI323_BW_ODR_BY_4,
+};
+
+enum bmi323_scan {
+ BMI323_ACCEL_X,
+ BMI323_ACCEL_Y,
+ BMI323_ACCEL_Z,
+ BMI323_GYRO_X,
+ BMI323_GYRO_Y,
+ BMI323_GYRO_Z,
+ BMI323_CHAN_MAX
+};
+
+struct bmi323_hw {
+ u8 data;
+ u8 config;
+ const int (*scale_table)[2];
+ int scale_table_len;
+};
+
+/*
+ * The accelerometer supports +-2G/4G/8G/16G ranges, and the resolution of
+ * each sample is 16 bits, signed.
+ * At +-8G the scale can calculated by
+ * ((8 + 8) * 9.80665 / (2^16 - 1)) * 10^6 = 2394.23819 scale in micro
+ *
+ */
+static const int bmi323_accel_scale[][2] = {
+ { 0, 598 },
+ { 0, 1197 },
+ { 0, 2394 },
+ { 0, 4788 },
+};
+
+static const int bmi323_gyro_scale[][2] = {
+ { 0, 66 },
+ { 0, 133 },
+ { 0, 266 },
+ { 0, 532 },
+ { 0, 1065 },
+};
+
+static const int bmi323_accel_gyro_avrg[] = {0, 2, 4, 8, 16, 32, 64};
+
+static const struct bmi323_hw bmi323_hw[2] = {
+ [BMI323_ACCEL] = {
+ .data = BMI323_ACCEL_X_REG,
+ .config = BMI323_ACC_CONF_REG,
+ .scale_table = bmi323_accel_scale,
+ .scale_table_len = ARRAY_SIZE(bmi323_accel_scale),
+ },
+ [BMI323_GYRO] = {
+ .data = BMI323_GYRO_X_REG,
+ .config = BMI323_GYRO_CONF_REG,
+ .scale_table = bmi323_gyro_scale,
+ .scale_table_len = ARRAY_SIZE(bmi323_gyro_scale),
+ },
+};
+
+struct bmi323_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct iio_mount_matrix orientation;
+ enum bmi323_irq_pin irq_pin;
+ struct iio_trigger *trig;
+ bool drdy_trigger_enabled;
+ enum bmi323_state state;
+ s64 fifo_tstamp, old_fifo_tstamp;
+ u32 odrns[BMI323_SENSORS_CNT];
+ u32 odrhz[BMI323_SENSORS_CNT];
+ unsigned int feature_events;
+
+ /*
+ * Lock to protect the members of device's private data from concurrent
+ * access and also to serialize the access of extended registers.
+ * See bmi323_write_ext_reg(..) for more info.
+ */
+ struct mutex mutex;
+ int watermark;
+ __le16 fifo_buff[BMI323_FIFO_FULL_IN_WORDS] __aligned(IIO_DMA_MINALIGN);
+ struct {
+ __le16 channels[BMI323_CHAN_MAX];
+ s64 ts __aligned(8);
+ } buffer;
+ __le16 steps_count[BMI323_STEP_LEN];
+};
+
+static const struct iio_mount_matrix *
+bmi323_get_mount_matrix(const struct iio_dev *idev,
+ const struct iio_chan_spec *chan)
+{
+ struct bmi323_data *data = iio_priv(idev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bmi323_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, bmi323_get_mount_matrix),
+ { }
+};
+
+static const struct iio_event_spec bmi323_step_wtrmrk_event = {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_NONE,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+};
+
+static const struct iio_event_spec bmi323_accel_event[] = {
+ {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_HYSTERESIS) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_HYSTERESIS) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_SINGLETAP,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT),
+ },
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_DOUBLETAP,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT) |
+ BIT(IIO_EV_INFO_TAP2_MIN_DELAY),
+ },
+};
+
+#define BMI323_ACCEL_CHANNEL(_type, _axis, _index) { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ .ext_info = bmi323_ext_info, \
+ .event_spec = bmi323_accel_event, \
+ .num_event_specs = ARRAY_SIZE(bmi323_accel_event), \
+}
+
+#define BMI323_GYRO_CHANNEL(_type, _axis, _index) { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ .ext_info = bmi323_ext_info, \
+}
+
+static const struct iio_chan_spec bmi323_channels[] = {
+ BMI323_ACCEL_CHANNEL(IIO_ACCEL, X, BMI323_ACCEL_X),
+ BMI323_ACCEL_CHANNEL(IIO_ACCEL, Y, BMI323_ACCEL_Y),
+ BMI323_ACCEL_CHANNEL(IIO_ACCEL, Z, BMI323_ACCEL_Z),
+ BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, X, BMI323_GYRO_X),
+ BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, Y, BMI323_GYRO_Y),
+ BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, Z, BMI323_GYRO_Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ },
+ {
+ .type = IIO_STEPS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_ENABLE),
+ .scan_index = -1,
+ .event_spec = &bmi323_step_wtrmrk_event,
+ .num_event_specs = 1,
+
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(BMI323_CHAN_MAX),
+};
+
+static const int bmi323_acc_gyro_odr[][2] = {
+ { 0, 781250 },
+ { 1, 562500 },
+ { 3, 125000 },
+ { 6, 250000 },
+ { 12, 500000 },
+ { 25, 0 },
+ { 50, 0 },
+ { 100, 0 },
+ { 200, 0 },
+ { 400, 0 },
+ { 800, 0 },
+};
+
+static const int bmi323_acc_gyro_odrns[] = {
+ 1280 * MEGA,
+ 640 * MEGA,
+ 320 * MEGA,
+ 160 * MEGA,
+ 80 * MEGA,
+ 40 * MEGA,
+ 20 * MEGA,
+ 10 * MEGA,
+ 5 * MEGA,
+ 2500 * KILO,
+ 1250 * KILO,
+};
+
+static enum bmi323_sensor_type bmi323_iio_to_sensor(enum iio_chan_type iio_type)
+{
+ switch (iio_type) {
+ case IIO_ACCEL:
+ return BMI323_ACCEL;
+ case IIO_ANGL_VEL:
+ return BMI323_GYRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmi323_set_mode(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor,
+ enum bmi323_opr_mode mode)
+{
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+ BMI323_ACC_GYRO_CONF_MODE_MSK,
+ FIELD_PREP(BMI323_ACC_GYRO_CONF_MODE_MSK,
+ mode));
+}
+
+/*
+ * When writing data to extended register there must be no communication to
+ * any other register before write transaction is complete.
+ * See datasheet section 6.2 Extended Register Map Description.
+ */
+static int bmi323_write_ext_reg(struct bmi323_data *data, unsigned int ext_addr,
+ unsigned int ext_data)
+{
+ int ret, feature_status;
+
+ ret = regmap_read(data->regmap, BMI323_FEAT_DATA_STATUS,
+ &feature_status);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(BMI323_FEAT_DATA_TX_RDY_MSK, feature_status))
+ return -EBUSY;
+
+ ret = regmap_write(data->regmap, BMI323_FEAT_DATA_ADDR, ext_addr);
+ if (ret)
+ return ret;
+
+ return regmap_write(data->regmap, BMI323_FEAT_DATA_TX, ext_data);
+}
+
+/*
+ * When reading data from extended register there must be no communication to
+ * any other register before read transaction is complete.
+ * See datasheet section 6.2 Extended Register Map Description.
+ */
+static int bmi323_read_ext_reg(struct bmi323_data *data, unsigned int ext_addr,
+ unsigned int *ext_data)
+{
+ int ret, feature_status;
+
+ ret = regmap_read(data->regmap, BMI323_FEAT_DATA_STATUS,
+ &feature_status);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(BMI323_FEAT_DATA_TX_RDY_MSK, feature_status))
+ return -EBUSY;
+
+ ret = regmap_write(data->regmap, BMI323_FEAT_DATA_ADDR, ext_addr);
+ if (ret)
+ return ret;
+
+ return regmap_read(data->regmap, BMI323_FEAT_DATA_TX, ext_data);
+}
+
+static int bmi323_update_ext_reg(struct bmi323_data *data,
+ unsigned int ext_addr,
+ unsigned int mask, unsigned int ext_data)
+{
+ unsigned int value;
+ int ret;
+
+ ret = bmi323_read_ext_reg(data, ext_addr, &value);
+ if (ret)
+ return ret;
+
+ set_mask_bits(&value, mask, ext_data);
+
+ return bmi323_write_ext_reg(data, ext_addr, value);
+}
+
+static int bmi323_get_error_status(struct bmi323_data *data)
+{
+ int error, ret;
+
+ guard(mutex)(&data->mutex);
+ ret = regmap_read(data->regmap, BMI323_ERR_REG, &error);
+ if (ret)
+ return ret;
+
+ if (error)
+ dev_err(data->dev, "Sensor error 0x%x\n", error);
+
+ return error;
+}
+
+static int bmi323_feature_engine_events(struct bmi323_data *data,
+ const unsigned int event_mask,
+ bool state)
+{
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMI323_FEAT_IO0_REG, &value);
+ if (ret)
+ return ret;
+
+ /* Register must be cleared before changing an active config */
+ ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, 0);
+ if (ret)
+ return ret;
+
+ if (state)
+ value |= event_mask;
+ else
+ value &= ~event_mask;
+
+ ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, value);
+ if (ret)
+ return ret;
+
+ return regmap_write(data->regmap, BMI323_FEAT_IO_STATUS_REG,
+ BMI323_FEAT_IO_STATUS_MSK);
+}
+
+static int bmi323_step_wtrmrk_en(struct bmi323_data *data, int state)
+{
+ enum bmi323_irq_pin step_irq;
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ if (!FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK, data->feature_events))
+ return -EINVAL;
+
+ if (state)
+ step_irq = data->irq_pin;
+ else
+ step_irq = BMI323_IRQ_DISABLED;
+
+ ret = bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
+ BMI323_STEP_SC1_WTRMRK_MSK,
+ FIELD_PREP(BMI323_STEP_SC1_WTRMRK_MSK,
+ state ? 1 : 0));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, BMI323_INT_MAP1_REG,
+ BMI323_STEP_CNT_MSK,
+ FIELD_PREP(BMI323_STEP_CNT_MSK, step_irq));
+}
+
+static int bmi323_motion_config_reg(enum iio_event_direction dir)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return BMI323_ANYMO1_REG;
+ case IIO_EV_DIR_FALLING:
+ return BMI323_NOMO1_REG;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmi323_motion_event_en(struct bmi323_data *data,
+ enum iio_event_direction dir, int state)
+{
+ unsigned int state_value = state ? BMI323_FEAT_XYZ_MSK : 0;
+ int config, ret, msk, raw, field_value;
+ enum bmi323_irq_pin motion_irq;
+ int irq_msk, irq_field_val;
+
+ if (state)
+ motion_irq = data->irq_pin;
+ else
+ motion_irq = BMI323_IRQ_DISABLED;
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ msk = BMI323_FEAT_IO0_XYZ_MOTION_MSK;
+ raw = 512;
+ config = BMI323_ANYMO1_REG;
+ irq_msk = BMI323_MOTION_MSK;
+ irq_field_val = FIELD_PREP(BMI323_MOTION_MSK, motion_irq);
+ field_value = FIELD_PREP(BMI323_FEAT_IO0_XYZ_MOTION_MSK,
+ state_value);
+ break;
+ case IIO_EV_DIR_FALLING:
+ msk = BMI323_FEAT_IO0_XYZ_NOMOTION_MSK;
+ raw = 0;
+ config = BMI323_NOMO1_REG;
+ irq_msk = BMI323_NOMOTION_MSK;
+ irq_field_val = FIELD_PREP(BMI323_NOMOTION_MSK, motion_irq);
+ field_value = FIELD_PREP(BMI323_FEAT_IO0_XYZ_NOMOTION_MSK,
+ state_value);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ guard(mutex)(&data->mutex);
+ ret = bmi323_feature_engine_events(data, msk, state);
+ if (ret)
+ return ret;
+
+ ret = bmi323_update_ext_reg(data, config,
+ BMI323_MO1_REF_UP_MSK,
+ FIELD_PREP(BMI323_MO1_REF_UP_MSK, 0));
+ if (ret)
+ return ret;
+
+ /* Set initial value to avoid interrupts while enabling*/
+ ret = bmi323_update_ext_reg(data, config,
+ BMI323_MO1_SLOPE_TH_MSK,
+ FIELD_PREP(BMI323_MO1_SLOPE_TH_MSK, raw));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, BMI323_INT_MAP1_REG, irq_msk,
+ irq_field_val);
+ if (ret)
+ return ret;
+
+ set_mask_bits(&data->feature_events, msk, field_value);
+
+ return 0;
+}
+
+static int bmi323_tap_event_en(struct bmi323_data *data,
+ enum iio_event_direction dir, int state)
+{
+ enum bmi323_irq_pin tap_irq;
+ int ret, tap_enabled;
+
+ guard(mutex)(&data->mutex);
+
+ if (data->odrhz[BMI323_ACCEL] < 200) {
+ dev_err(data->dev, "Invalid accelerometer parameter\n");
+ return -EINVAL;
+ }
+
+ switch (dir) {
+ case IIO_EV_DIR_SINGLETAP:
+ ret = bmi323_feature_engine_events(data,
+ BMI323_FEAT_IO0_S_TAP_MSK,
+ state);
+ if (ret)
+ return ret;
+
+ set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_S_TAP_MSK,
+ FIELD_PREP(BMI323_FEAT_IO0_S_TAP_MSK, state));
+ break;
+ case IIO_EV_DIR_DOUBLETAP:
+ ret = bmi323_feature_engine_events(data,
+ BMI323_FEAT_IO0_D_TAP_MSK,
+ state);
+ if (ret)
+ return ret;
+
+ set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_D_TAP_MSK,
+ FIELD_PREP(BMI323_FEAT_IO0_D_TAP_MSK, state));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tap_enabled = FIELD_GET(BMI323_FEAT_IO0_S_TAP_MSK |
+ BMI323_FEAT_IO0_D_TAP_MSK,
+ data->feature_events);
+
+ if (tap_enabled)
+ tap_irq = data->irq_pin;
+ else
+ tap_irq = BMI323_IRQ_DISABLED;
+
+ ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+ BMI323_TAP_MSK,
+ FIELD_PREP(BMI323_TAP_MSK, tap_irq));
+ if (ret)
+ return ret;
+
+ if (!state)
+ return 0;
+
+ ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG,
+ BMI323_TAP1_MAX_PEAKS_MSK,
+ FIELD_PREP(BMI323_TAP1_MAX_PEAKS_MSK,
+ 0x04));
+ if (ret)
+ return ret;
+
+ ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG,
+ BMI323_TAP1_AXIS_SEL_MSK,
+ FIELD_PREP(BMI323_TAP1_AXIS_SEL_MSK,
+ BMI323_AXIS_XYZ_MSK));
+ if (ret)
+ return ret;
+
+ return bmi323_update_ext_reg(data, BMI323_TAP1_REG,
+ BMI323_TAP1_TIMOUT_MSK,
+ FIELD_PREP(BMI323_TAP1_TIMOUT_MSK,
+ 0));
+}
+
+static ssize_t in_accel_gesture_tap_wait_dur_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ unsigned int reg_value, raw;
+ int ret, val[2];
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = bmi323_read_ext_reg(data, BMI323_TAP2_REG, &reg_value);
+ if (ret)
+ return ret;
+ }
+
+ raw = FIELD_GET(BMI323_TAP2_MAX_DUR_MSK, reg_value);
+ val[0] = raw / BMI323_MAX_GES_DUR_SCALE;
+ val[1] = BMI323_RAW_TO_MICRO(raw, BMI323_MAX_GES_DUR_SCALE);
+
+ return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(val),
+ val);
+}
+
+static ssize_t in_accel_gesture_tap_wait_dur_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ int ret, val_int, val_fract, raw;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val_int, &val_fract);
+ if (ret)
+ return ret;
+
+ raw = BMI323_INT_MICRO_TO_RAW(val_int, val_fract,
+ BMI323_MAX_GES_DUR_SCALE);
+ if (!in_range(raw, 0, 64))
+ return -EINVAL;
+
+ guard(mutex)(&data->mutex);
+ ret = bmi323_update_ext_reg(data, BMI323_TAP2_REG,
+ BMI323_TAP2_MAX_DUR_MSK,
+ FIELD_PREP(BMI323_TAP2_MAX_DUR_MSK, raw));
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+/*
+ * Maximum duration from first tap within the second tap is expected to happen.
+ * This timeout is applicable only if gesture_tap_wait_timeout is enabled.
+ */
+static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_wait_dur, 0);
+
+static ssize_t in_accel_gesture_tap_wait_timeout_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ unsigned int reg_value, raw;
+ int ret;
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = bmi323_read_ext_reg(data, BMI323_TAP1_REG, &reg_value);
+ if (ret)
+ return ret;
+ }
+
+ raw = FIELD_GET(BMI323_TAP1_TIMOUT_MSK, reg_value);
+
+ return iio_format_value(buf, IIO_VAL_INT, 1, &raw);
+}
+
+static ssize_t in_accel_gesture_tap_wait_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ bool val;
+ int ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&data->mutex);
+ ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG,
+ BMI323_TAP1_TIMOUT_MSK,
+ FIELD_PREP(BMI323_TAP1_TIMOUT_MSK, val));
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+/* Enable/disable gesture confirmation with wait time */
+static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_wait_timeout, 0);
+
+static IIO_CONST_ATTR(in_accel_gesture_tap_wait_dur_available,
+ "[0.0 0.04 2.52]");
+
+static IIO_CONST_ATTR(in_accel_gesture_doubletap_tap2_min_delay_available,
+ "[0.005 0.005 0.075]");
+
+static IIO_CONST_ATTR(in_accel_gesture_tap_reset_timeout_available,
+ "[0.04 0.04 0.6]");
+
+static IIO_CONST_ATTR(in_accel_gesture_tap_value_available, "[0.0 0.002 1.99]");
+
+static IIO_CONST_ATTR(in_accel_mag_value_available, "[0.0 0.002 7.99]");
+
+static IIO_CONST_ATTR(in_accel_mag_period_available, "[0.0 0.02 162.0]");
+
+static IIO_CONST_ATTR(in_accel_mag_hysteresis_available, "[0.0 0.002 1.99]");
+
+static struct attribute *bmi323_event_attributes[] = {
+ &iio_const_attr_in_accel_gesture_tap_value_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_tap_reset_timeout_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_doubletap_tap2_min_delay_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_tap_wait_dur_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_gesture_tap_wait_timeout.dev_attr.attr,
+ &iio_dev_attr_in_accel_gesture_tap_wait_dur.dev_attr.attr,
+ &iio_const_attr_in_accel_mag_value_available.dev_attr.attr,
+ &iio_const_attr_in_accel_mag_period_available.dev_attr.attr,
+ &iio_const_attr_in_accel_mag_hysteresis_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group bmi323_event_attribute_group = {
+ .attrs = bmi323_event_attributes,
+};
+
+static int bmi323_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+
+ switch (type) {
+ case IIO_EV_TYPE_MAG:
+ return bmi323_motion_event_en(data, dir, state);
+ case IIO_EV_TYPE_GESTURE:
+ return bmi323_tap_event_en(data, dir, state);
+ case IIO_EV_TYPE_CHANGE:
+ return bmi323_step_wtrmrk_en(data, state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmi323_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+ int ret, value, reg_val;
+
+ guard(mutex)(&data->mutex);
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ switch (dir) {
+ case IIO_EV_DIR_SINGLETAP:
+ ret = FIELD_GET(BMI323_FEAT_IO0_S_TAP_MSK,
+ data->feature_events);
+ break;
+ case IIO_EV_DIR_DOUBLETAP:
+ ret = FIELD_GET(BMI323_FEAT_IO0_D_TAP_MSK,
+ data->feature_events);
+ break;
+ case IIO_EV_DIR_RISING:
+ value = FIELD_GET(BMI323_FEAT_IO0_XYZ_MOTION_MSK,
+ data->feature_events);
+ ret = value ? 1 : 0;
+ break;
+ case IIO_EV_DIR_FALLING:
+ value = FIELD_GET(BMI323_FEAT_IO0_XYZ_NOMOTION_MSK,
+ data->feature_events);
+ ret = value ? 1 : 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+ case IIO_STEPS:
+ ret = regmap_read(data->regmap, BMI323_INT_MAP1_REG, &reg_val);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(BMI323_STEP_CNT_MSK, reg_val) ? 1 : 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmi323_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+ unsigned int raw;
+ int reg;
+
+ guard(mutex)(&data->mutex);
+
+ switch (type) {
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (!in_range(val, 0, 2))
+ return -EINVAL;
+
+ raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+ BMI323_TAP_THRES_SCALE);
+
+ return bmi323_update_ext_reg(data, BMI323_TAP2_REG,
+ BMI323_TAP2_THRES_MSK,
+ FIELD_PREP(BMI323_TAP2_THRES_MSK,
+ raw));
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ if (val || !in_range(val2, 40000, 560001))
+ return -EINVAL;
+
+ raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+ BMI323_QUITE_TIM_GES_SCALE);
+
+ return bmi323_update_ext_reg(data, BMI323_TAP3_REG,
+ BMI323_TAP3_QT_AFT_GES_MSK,
+ FIELD_PREP(BMI323_TAP3_QT_AFT_GES_MSK,
+ raw));
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ if (val || !in_range(val2, 5000, 70001))
+ return -EINVAL;
+
+ raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+ BMI323_DUR_BW_TAP_SCALE);
+
+ return bmi323_update_ext_reg(data, BMI323_TAP3_REG,
+ BMI323_TAP3_QT_BW_TAP_MSK,
+ FIELD_PREP(BMI323_TAP3_QT_BW_TAP_MSK,
+ raw));
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_TYPE_MAG:
+ reg = bmi323_motion_config_reg(dir);
+ if (reg < 0)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (!in_range(val, 0, 8))
+ return -EINVAL;
+
+ raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+ BMI323_MOTION_THRES_SCALE);
+
+ return bmi323_update_ext_reg(data, reg,
+ BMI323_MO1_SLOPE_TH_MSK,
+ FIELD_PREP(BMI323_MO1_SLOPE_TH_MSK,
+ raw));
+ case IIO_EV_INFO_PERIOD:
+ if (!in_range(val, 0, 163))
+ return -EINVAL;
+
+ raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+ BMI323_MOTION_DURAT_SCALE);
+
+ return bmi323_update_ext_reg(data,
+ reg + BMI323_MO3_OFFSET,
+ BMI323_MO3_DURA_MSK,
+ FIELD_PREP(BMI323_MO3_DURA_MSK,
+ raw));
+ case IIO_EV_INFO_HYSTERESIS:
+ if (!in_range(val, 0, 2))
+ return -EINVAL;
+
+ raw = BMI323_INT_MICRO_TO_RAW(val, val2,
+ BMI323_MOTION_HYSTR_SCALE);
+
+ return bmi323_update_ext_reg(data,
+ reg + BMI323_MO2_OFFSET,
+ BMI323_MO2_HYSTR_MSK,
+ FIELD_PREP(BMI323_MO2_HYSTR_MSK,
+ raw));
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_TYPE_CHANGE:
+ if (!in_range(val, 0, 20461))
+ return -EINVAL;
+
+ raw = val / 20;
+ return bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
+ BMI323_STEP_SC1_WTRMRK_MSK,
+ FIELD_PREP(BMI323_STEP_SC1_WTRMRK_MSK,
+ raw));
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmi323_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+ unsigned int raw, reg_value;
+ int ret, reg;
+
+ guard(mutex)(&data->mutex);
+
+ switch (type) {
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = bmi323_read_ext_reg(data, BMI323_TAP2_REG,
+ &reg_value);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI323_TAP2_THRES_MSK, reg_value);
+ *val = raw / BMI323_TAP_THRES_SCALE;
+ *val2 = BMI323_RAW_TO_MICRO(raw, BMI323_TAP_THRES_SCALE);
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ ret = bmi323_read_ext_reg(data, BMI323_TAP3_REG,
+ &reg_value);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI323_TAP3_QT_AFT_GES_MSK, reg_value);
+ *val = 0;
+ *val2 = BMI323_RAW_TO_MICRO(raw,
+ BMI323_QUITE_TIM_GES_SCALE);
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ ret = bmi323_read_ext_reg(data, BMI323_TAP3_REG,
+ &reg_value);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI323_TAP3_QT_BW_TAP_MSK, reg_value);
+ *val = 0;
+ *val2 = BMI323_RAW_TO_MICRO(raw,
+ BMI323_DUR_BW_TAP_SCALE);
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_TYPE_MAG:
+ reg = bmi323_motion_config_reg(dir);
+ if (reg < 0)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = bmi323_read_ext_reg(data, reg, &reg_value);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI323_MO1_SLOPE_TH_MSK, reg_value);
+ *val = raw / BMI323_MOTION_THRES_SCALE;
+ *val2 = BMI323_RAW_TO_MICRO(raw,
+ BMI323_MOTION_THRES_SCALE);
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_EV_INFO_PERIOD:
+ ret = bmi323_read_ext_reg(data,
+ reg + BMI323_MO3_OFFSET,
+ &reg_value);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI323_MO3_DURA_MSK, reg_value);
+ *val = raw / BMI323_MOTION_DURAT_SCALE;
+ *val2 = BMI323_RAW_TO_MICRO(raw,
+ BMI323_MOTION_DURAT_SCALE);
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = bmi323_read_ext_reg(data,
+ reg + BMI323_MO2_OFFSET,
+ &reg_value);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI323_MO2_HYSTR_MSK, reg_value);
+ *val = raw / BMI323_MOTION_HYSTR_SCALE;
+ *val2 = BMI323_RAW_TO_MICRO(raw,
+ BMI323_MOTION_HYSTR_SCALE);
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_TYPE_CHANGE:
+ ret = bmi323_read_ext_reg(data, BMI323_STEP_SC1_REG,
+ &reg_value);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI323_STEP_SC1_WTRMRK_MSK, reg_value);
+ *val = raw * 20;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int __bmi323_fifo_flush(struct iio_dev *indio_dev)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+ int i, ret, fifo_lvl, frame_count, bit, index;
+ __le16 *frame, *pchannels;
+ u64 sample_period;
+ s64 tstamp;
+
+ guard(mutex)(&data->mutex);
+ ret = regmap_read(data->regmap, BMI323_FIFO_FILL_LEVEL_REG, &fifo_lvl);
+ if (ret)
+ return ret;
+
+ fifo_lvl = min(fifo_lvl, BMI323_FIFO_FULL_IN_WORDS);
+
+ frame_count = fifo_lvl / BMI323_FIFO_FRAME_LENGTH;
+ if (!frame_count)
+ return -EINVAL;
+
+ if (fifo_lvl % BMI323_FIFO_FRAME_LENGTH)
+ dev_warn(data->dev, "Bad FIFO alignment\n");
+
+ /*
+ * Approximate timestamps for each of the sample based on the sampling
+ * frequency, timestamp for last sample and number of samples.
+ */
+ if (data->old_fifo_tstamp) {
+ sample_period = data->fifo_tstamp - data->old_fifo_tstamp;
+ do_div(sample_period, frame_count);
+ } else {
+ sample_period = data->odrns[BMI323_ACCEL];
+ }
+
+ tstamp = data->fifo_tstamp - (frame_count - 1) * sample_period;
+
+ ret = regmap_noinc_read(data->regmap, BMI323_FIFO_DATA_REG,
+ &data->fifo_buff[0],
+ fifo_lvl * BMI323_BYTES_PER_SAMPLE);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < frame_count; i++) {
+ frame = &data->fifo_buff[i * BMI323_FIFO_FRAME_LENGTH];
+ pchannels = &data->buffer.channels[0];
+
+ index = 0;
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ BMI323_CHAN_MAX)
+ pchannels[index++] = frame[bit];
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+ tstamp);
+
+ tstamp += sample_period;
+ }
+
+ return frame_count;
+}
+
+static int bmi323_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+
+ val = min(val, (u32)BMI323_FIFO_FULL_IN_FRAMES);
+
+ guard(mutex)(&data->mutex);
+ data->watermark = val;
+
+ return 0;
+}
+
+static int bmi323_fifo_disable(struct bmi323_data *data)
+{
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ ret = regmap_write(data->regmap, BMI323_FIFO_CONF_REG, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+ BMI323_FIFO_WTRMRK_MSK,
+ FIELD_PREP(BMI323_FIFO_WTRMRK_MSK, 0));
+ if (ret)
+ return ret;
+
+ data->fifo_tstamp = 0;
+ data->state = BMI323_IDLE;
+
+ return 0;
+}
+
+static int bmi323_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+
+ if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
+ return 0;
+
+ return bmi323_fifo_disable(data);
+}
+
+static int bmi323_update_watermark(struct bmi323_data *data)
+{
+ int wtrmrk;
+
+ wtrmrk = data->watermark * BMI323_FIFO_FRAME_LENGTH;
+
+ return regmap_write(data->regmap, BMI323_FIFO_WTRMRK_REG, wtrmrk);
+}
+
+static int bmi323_fifo_enable(struct bmi323_data *data)
+{
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ ret = regmap_update_bits(data->regmap, BMI323_FIFO_CONF_REG,
+ BMI323_FIFO_CONF_ACC_GYR_EN_MSK,
+ FIELD_PREP(BMI323_FIFO_CONF_ACC_GYR_EN_MSK,
+ BMI323_FIFO_ACC_GYR_MSK));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+ BMI323_FIFO_WTRMRK_MSK,
+ FIELD_PREP(BMI323_FIFO_WTRMRK_MSK,
+ data->irq_pin));
+ if (ret)
+ return ret;
+
+ ret = bmi323_update_watermark(data);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMI323_FIFO_CTRL_REG,
+ BMI323_FIFO_FLUSH_MSK);
+ if (ret)
+ return ret;
+
+ data->state = BMI323_BUFFER_FIFO;
+
+ return 0;
+}
+
+static int bmi323_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->mutex);
+ /*
+ * When the ODR of the accelerometer and gyroscope do not match, the
+ * maximum ODR value between the accelerometer and gyroscope is used
+ * for FIFO and the signal with lower ODR will insert dummy frame.
+ * So allow buffer read only when ODR's of accelero and gyro are equal.
+ * See datasheet section 5.7 "FIFO Data Buffering".
+ */
+ if (data->odrns[BMI323_ACCEL] != data->odrns[BMI323_GYRO]) {
+ dev_err(data->dev, "Accelero and Gyro ODR doesn't match\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bmi323_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+
+ if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED)
+ return 0;
+
+ return bmi323_fifo_enable(data);
+}
+
+static ssize_t hwfifo_watermark_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ int wm;
+
+ scoped_guard(mutex, &data->mutex)
+ wm = data->watermark;
+
+ return sysfs_emit(buf, "%d\n", wm);
+}
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark, 0);
+
+static ssize_t hwfifo_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bmi323_data *data = iio_priv(indio_dev);
+ bool state;
+
+ scoped_guard(mutex, &data->mutex)
+ state = data->state == BMI323_BUFFER_FIFO;
+
+ return sysfs_emit(buf, "%d\n", state);
+}
+static IIO_DEVICE_ATTR_RO(hwfifo_enabled, 0);
+
+static const struct iio_dev_attr *bmi323_fifo_attributes[] = {
+ &iio_dev_attr_hwfifo_watermark,
+ &iio_dev_attr_hwfifo_enabled,
+ NULL
+};
+
+static const struct iio_buffer_setup_ops bmi323_buffer_ops = {
+ .preenable = bmi323_buffer_preenable,
+ .postenable = bmi323_buffer_postenable,
+ .predisable = bmi323_buffer_predisable,
+};
+
+static irqreturn_t bmi323_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bmi323_data *data = iio_priv(indio_dev);
+ unsigned int status_addr, status, feature_event;
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ int ret;
+
+ if (data->irq_pin == BMI323_IRQ_INT1)
+ status_addr = BMI323_STATUS_INT1_REG;
+ else
+ status_addr = BMI323_STATUS_INT2_REG;
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap, status_addr, &status);
+ if (ret)
+ return IRQ_NONE;
+ }
+
+ if (!status || FIELD_GET(BMI323_STATUS_ERROR_MSK, status))
+ return IRQ_NONE;
+
+ if (FIELD_GET(BMI323_STATUS_FIFO_WTRMRK_MSK, status)) {
+ data->old_fifo_tstamp = data->fifo_tstamp;
+ data->fifo_tstamp = iio_get_time_ns(indio_dev);
+ ret = __bmi323_fifo_flush(indio_dev);
+ if (ret < 0)
+ return IRQ_NONE;
+ }
+
+ if (FIELD_GET(BMI323_STATUS_ACC_GYR_DRDY_MSK, status))
+ iio_trigger_poll_nested(data->trig);
+
+ if (FIELD_GET(BMI323_STATUS_MOTION_MSK, status))
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ if (FIELD_GET(BMI323_STATUS_NOMOTION_MSK, status))
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ if (FIELD_GET(BMI323_STATUS_STP_WTR_MSK, status))
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_STEPS, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_NONE),
+ timestamp);
+
+ if (FIELD_GET(BMI323_STATUS_TAP_MSK, status)) {
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap,
+ BMI323_FEAT_EVNT_EXT_REG,
+ &feature_event);
+ if (ret)
+ return IRQ_NONE;
+ }
+
+ if (FIELD_GET(BMI323_FEAT_EVNT_EXT_S_MSK, feature_event)) {
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE,
+ IIO_EV_DIR_SINGLETAP),
+ timestamp);
+ }
+
+ if (FIELD_GET(BMI323_FEAT_EVNT_EXT_D_MSK, feature_event))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE,
+ IIO_EV_DIR_DOUBLETAP),
+ timestamp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bmi323_set_drdy_irq(struct bmi323_data *data,
+ enum bmi323_irq_pin irq_pin)
+{
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+ BMI323_GYR_DRDY_MSK,
+ FIELD_PREP(BMI323_GYR_DRDY_MSK, irq_pin));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG,
+ BMI323_ACC_DRDY_MSK,
+ FIELD_PREP(BMI323_ACC_DRDY_MSK, irq_pin));
+}
+
+static int bmi323_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct bmi323_data *data = iio_trigger_get_drvdata(trig);
+ enum bmi323_irq_pin irq_pin;
+
+ guard(mutex)(&data->mutex);
+
+ if (data->state == BMI323_BUFFER_FIFO) {
+ dev_warn(data->dev, "Can't set trigger when FIFO enabled\n");
+ return -EBUSY;
+ }
+
+ if (state) {
+ data->state = BMI323_BUFFER_DRDY_TRIGGERED;
+ irq_pin = data->irq_pin;
+ } else {
+ data->state = BMI323_IDLE;
+ irq_pin = BMI323_IRQ_DISABLED;
+ }
+
+ return bmi323_set_drdy_irq(data, irq_pin);
+}
+
+static const struct iio_trigger_ops bmi323_trigger_ops = {
+ .set_trigger_state = &bmi323_data_rdy_trigger_set_state,
+};
+
+static irqreturn_t bmi323_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmi323_data *data = iio_priv(indio_dev);
+ int ret, bit, index = 0;
+
+ /* Lock to protect the data->buffer */
+ guard(mutex)(&data->mutex);
+
+ if (*indio_dev->active_scan_mask == BMI323_ALL_CHAN_MSK) {
+ ret = regmap_bulk_read(data->regmap, BMI323_ACCEL_X_REG,
+ &data->buffer.channels,
+ ARRAY_SIZE(data->buffer.channels));
+ if (ret)
+ return IRQ_NONE;
+ } else {
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ BMI323_CHAN_MAX) {
+ ret = regmap_raw_read(data->regmap,
+ BMI323_ACCEL_X_REG + bit,
+ &data->buffer.channels[index++],
+ BMI323_BYTES_PER_SAMPLE);
+ if (ret)
+ return IRQ_NONE;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
+ iio_get_time_ns(indio_dev));
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bmi323_set_average(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor, int avg)
+{
+ int raw = ARRAY_SIZE(bmi323_accel_gyro_avrg);
+
+ while (raw--)
+ if (avg == bmi323_accel_gyro_avrg[raw])
+ break;
+ if (raw < 0)
+ return -EINVAL;
+
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+ BMI323_ACC_GYRO_CONF_AVG_MSK,
+ FIELD_PREP(BMI323_ACC_GYRO_CONF_AVG_MSK,
+ raw));
+}
+
+static int bmi323_get_average(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor, int *avg)
+{
+ int ret, value, raw;
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap, bmi323_hw[sensor].config, &value);
+ if (ret)
+ return ret;
+ }
+
+ raw = FIELD_GET(BMI323_ACC_GYRO_CONF_AVG_MSK, value);
+ *avg = bmi323_accel_gyro_avrg[raw];
+
+ return IIO_VAL_INT;
+}
+
+static int bmi323_enable_steps(struct bmi323_data *data, int val)
+{
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ if (data->odrhz[BMI323_ACCEL] < 200) {
+ dev_err(data->dev, "Invalid accelerometer parameter\n");
+ return -EINVAL;
+ }
+
+ ret = bmi323_feature_engine_events(data, BMI323_FEAT_IO0_STP_CNT_MSK,
+ val ? 1 : 0);
+ if (ret)
+ return ret;
+
+ set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_STP_CNT_MSK,
+ FIELD_PREP(BMI323_FEAT_IO0_STP_CNT_MSK, val ? 1 : 0));
+
+ return 0;
+}
+
+static int bmi323_read_steps(struct bmi323_data *data, int *val)
+{
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ if (!FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK, data->feature_events))
+ return -EINVAL;
+
+ ret = regmap_bulk_read(data->regmap, BMI323_FEAT_IO2_REG,
+ data->steps_count,
+ ARRAY_SIZE(data->steps_count));
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_le32(data->steps_count);
+
+ return IIO_VAL_INT;
+}
+
+static int bmi323_read_axis(struct bmi323_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ enum bmi323_sensor_type sensor;
+ unsigned int value;
+ u8 addr;
+ int ret;
+
+ ret = bmi323_get_error_status(data);
+ if (ret)
+ return -EINVAL;
+
+ sensor = bmi323_iio_to_sensor(chan->type);
+ addr = bmi323_hw[sensor].data + (chan->channel2 - IIO_MOD_X);
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap, addr, &value);
+ if (ret)
+ return ret;
+ }
+
+ *val = sign_extend32(value, chan->scan_type.realbits - 1);
+
+ return IIO_VAL_INT;
+}
+
+static int bmi323_get_temp_data(struct bmi323_data *data, int *val)
+{
+ unsigned int value;
+ int ret;
+
+ ret = bmi323_get_error_status(data);
+ if (ret)
+ return -EINVAL;
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap, BMI323_TEMP_REG, &value);
+ if (ret)
+ return ret;
+ }
+
+ *val = sign_extend32(value, 15);
+
+ return IIO_VAL_INT;
+}
+
+static int bmi323_get_odr(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor, int *odr, int *uodr)
+{
+ int ret, value, odr_raw;
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap, bmi323_hw[sensor].config, &value);
+ if (ret)
+ return ret;
+ }
+
+ odr_raw = FIELD_GET(BMI323_ACC_GYRO_CONF_ODR_MSK, value);
+ *odr = bmi323_acc_gyro_odr[odr_raw - 1][0];
+ *uodr = bmi323_acc_gyro_odr[odr_raw - 1][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int bmi323_configure_power_mode(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor,
+ int odr_index)
+{
+ enum bmi323_opr_mode mode;
+
+ if (bmi323_acc_gyro_odr[odr_index][0] > 25)
+ mode = ACC_GYRO_MODE_CONTINOUS;
+ else
+ mode = ACC_GYRO_MODE_DUTYCYCLE;
+
+ return bmi323_set_mode(data, sensor, mode);
+}
+
+static int bmi323_set_odr(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor, int odr, int uodr)
+{
+ int odr_raw, ret;
+
+ odr_raw = ARRAY_SIZE(bmi323_acc_gyro_odr);
+
+ while (odr_raw--)
+ if (odr == bmi323_acc_gyro_odr[odr_raw][0] &&
+ uodr == bmi323_acc_gyro_odr[odr_raw][1])
+ break;
+ if (odr_raw < 0)
+ return -EINVAL;
+
+ ret = bmi323_configure_power_mode(data, sensor, odr_raw);
+ if (ret)
+ return -EINVAL;
+
+ guard(mutex)(&data->mutex);
+ data->odrhz[sensor] = bmi323_acc_gyro_odr[odr_raw][0];
+ data->odrns[sensor] = bmi323_acc_gyro_odrns[odr_raw];
+
+ odr_raw++;
+
+ return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+ BMI323_ACC_GYRO_CONF_ODR_MSK,
+ FIELD_PREP(BMI323_ACC_GYRO_CONF_ODR_MSK,
+ odr_raw));
+}
+
+static int bmi323_get_scale(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor, int *val2)
+{
+ int ret, value, scale_raw;
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap, bmi323_hw[sensor].config,
+ &value);
+ if (ret)
+ return ret;
+ }
+
+ scale_raw = FIELD_GET(BMI323_ACC_GYRO_CONF_SCL_MSK, value);
+ *val2 = bmi323_hw[sensor].scale_table[scale_raw][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int bmi323_set_scale(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor, int val, int val2)
+{
+ int scale_raw;
+
+ scale_raw = bmi323_hw[sensor].scale_table_len;
+
+ while (scale_raw--)
+ if (val == bmi323_hw[sensor].scale_table[scale_raw][0] &&
+ val2 == bmi323_hw[sensor].scale_table[scale_raw][1])
+ break;
+ if (scale_raw < 0)
+ return -EINVAL;
+
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+ BMI323_ACC_GYRO_CONF_SCL_MSK,
+ FIELD_PREP(BMI323_ACC_GYRO_CONF_SCL_MSK,
+ scale_raw));
+}
+
+static int bmi323_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ enum bmi323_sensor_type sensor;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *vals = (const int *)bmi323_acc_gyro_odr;
+ *length = ARRAY_SIZE(bmi323_acc_gyro_odr) * 2;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SCALE:
+ sensor = bmi323_iio_to_sensor(chan->type);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *vals = (const int *)bmi323_hw[sensor].scale_table;
+ *length = bmi323_hw[sensor].scale_table_len * 2;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *type = IIO_VAL_INT;
+ *vals = (const int *)bmi323_accel_gyro_avrg;
+ *length = ARRAY_SIZE(bmi323_accel_gyro_avrg);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmi323_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = bmi323_set_odr(data, bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = bmi323_set_scale(data, bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = bmi323_set_average(data, bmi323_iio_to_sensor(chan->type),
+ val);
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_ENABLE:
+ return bmi323_enable_steps(data, val);
+ case IIO_CHAN_INFO_PROCESSED:
+ scoped_guard(mutex, &data->mutex) {
+ if (val || !FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK,
+ data->feature_events))
+ return -EINVAL;
+
+ /* Clear step counter value */
+ ret = bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
+ BMI323_STEP_SC1_RST_CNT_MSK,
+ FIELD_PREP(BMI323_STEP_SC1_RST_CNT_MSK,
+ 1));
+ }
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmi323_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct bmi323_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ return bmi323_read_steps(data, val);
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ case IIO_ANGL_VEL:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = bmi323_read_axis(data, chan, val);
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_TEMP:
+ return bmi323_get_temp_data(data, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return bmi323_get_odr(data, bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ case IIO_ANGL_VEL:
+ *val = 0;
+ return bmi323_get_scale(data,
+ bmi323_iio_to_sensor(chan->type),
+ val2);
+ case IIO_TEMP:
+ *val = BMI323_TEMP_SCALE / MEGA;
+ *val2 = BMI323_TEMP_SCALE % MEGA;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return bmi323_get_average(data,
+ bmi323_iio_to_sensor(chan->type),
+ val);
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = BMI323_TEMP_OFFSET;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_ENABLE:
+ scoped_guard(mutex, &data->mutex)
+ *val = FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK,
+ data->feature_events);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bmi323_info = {
+ .read_raw = bmi323_read_raw,
+ .write_raw = bmi323_write_raw,
+ .read_avail = bmi323_read_avail,
+ .hwfifo_set_watermark = bmi323_set_watermark,
+ .write_event_config = bmi323_write_event_config,
+ .read_event_config = bmi323_read_event_config,
+ .write_event_value = bmi323_write_event_value,
+ .read_event_value = bmi323_read_event_value,
+ .event_attrs = &bmi323_event_attribute_group,
+};
+
+#define BMI323_SCAN_MASK_ACCEL_3AXIS \
+ (BIT(BMI323_ACCEL_X) | BIT(BMI323_ACCEL_Y) | BIT(BMI323_ACCEL_Z))
+
+#define BMI323_SCAN_MASK_GYRO_3AXIS \
+ (BIT(BMI323_GYRO_X) | BIT(BMI323_GYRO_Y) | BIT(BMI323_GYRO_Z))
+
+static const unsigned long bmi323_avail_scan_masks[] = {
+ /* 3-axis accel */
+ BMI323_SCAN_MASK_ACCEL_3AXIS,
+ /* 3-axis gyro */
+ BMI323_SCAN_MASK_GYRO_3AXIS,
+ /* 3-axis accel + 3-axis gyro */
+ BMI323_SCAN_MASK_ACCEL_3AXIS | BMI323_SCAN_MASK_GYRO_3AXIS,
+ 0
+};
+
+static int bmi323_int_pin_config(struct bmi323_data *data,
+ enum bmi323_irq_pin irq_pin,
+ bool active_high, bool open_drain, bool latch)
+{
+ unsigned int mask, field_value;
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, BMI323_IO_INT_CONF_REG,
+ BMI323_IO_INT_LTCH_MSK,
+ FIELD_PREP(BMI323_IO_INT_LTCH_MSK, latch));
+ if (ret)
+ return ret;
+
+ ret = bmi323_update_ext_reg(data, BMI323_GEN_SET1_REG,
+ BMI323_GEN_HOLD_DUR_MSK,
+ FIELD_PREP(BMI323_GEN_HOLD_DUR_MSK, 0));
+ if (ret)
+ return ret;
+
+ switch (irq_pin) {
+ case BMI323_IRQ_INT1:
+ mask = BMI323_IO_INT1_LVL_OD_OP_MSK;
+
+ field_value = FIELD_PREP(BMI323_IO_INT1_LVL_MSK, active_high) |
+ FIELD_PREP(BMI323_IO_INT1_OD_MSK, open_drain) |
+ FIELD_PREP(BMI323_IO_INT1_OP_EN_MSK, 1);
+ break;
+ case BMI323_IRQ_INT2:
+ mask = BMI323_IO_INT2_LVL_OD_OP_MSK;
+
+ field_value = FIELD_PREP(BMI323_IO_INT2_LVL_MSK, active_high) |
+ FIELD_PREP(BMI323_IO_INT2_OD_MSK, open_drain) |
+ FIELD_PREP(BMI323_IO_INT2_OP_EN_MSK, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(data->regmap, BMI323_IO_INT_CTR_REG, mask,
+ field_value);
+}
+
+static int bmi323_trigger_probe(struct bmi323_data *data,
+ struct iio_dev *indio_dev)
+{
+ bool open_drain, active_high, latch;
+ struct fwnode_handle *fwnode;
+ enum bmi323_irq_pin irq_pin;
+ int ret, irq, irq_type;
+ struct irq_data *desc;
+
+ fwnode = dev_fwnode(data->dev);
+ if (!fwnode)
+ return -ENODEV;
+
+ irq = fwnode_irq_get_byname(fwnode, "INT1");
+ if (irq > 0) {
+ irq_pin = BMI323_IRQ_INT1;
+ } else {
+ irq = fwnode_irq_get_byname(fwnode, "INT2");
+ if (irq < 0)
+ return 0;
+
+ irq_pin = BMI323_IRQ_INT2;
+ }
+
+ desc = irq_get_irq_data(irq);
+ if (!desc)
+ return dev_err_probe(data->dev, -EINVAL,
+ "Could not find IRQ %d\n", irq);
+
+ irq_type = irqd_get_trigger_type(desc);
+ switch (irq_type) {
+ case IRQF_TRIGGER_RISING:
+ latch = false;
+ active_high = true;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ latch = true;
+ active_high = true;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ latch = false;
+ active_high = false;
+ break;
+ case IRQF_TRIGGER_LOW:
+ latch = true;
+ active_high = false;
+ break;
+ default:
+ return dev_err_probe(data->dev, -EINVAL,
+ "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ }
+
+ open_drain = fwnode_property_read_bool(fwnode, "drive-open-drain");
+
+ ret = bmi323_int_pin_config(data, irq_pin, active_high, open_drain,
+ latch);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Failed to configure irq line\n");
+
+ data->trig = devm_iio_trigger_alloc(data->dev, "%s-trig-%d",
+ indio_dev->name, irq_pin);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->ops = &bmi323_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, data);
+
+ ret = devm_request_threaded_irq(data->dev, irq, NULL,
+ bmi323_irq_thread_handler,
+ IRQF_ONESHOT, "bmi323-int", indio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Failed to request IRQ\n");
+
+ ret = devm_iio_trigger_register(data->dev, data->trig);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Trigger registration failed\n");
+
+ data->irq_pin = irq_pin;
+
+ return 0;
+}
+
+static int bmi323_feature_engine_enable(struct bmi323_data *data, bool en)
+{
+ unsigned int feature_status;
+ int ret;
+
+ if (!en)
+ return regmap_write(data->regmap, BMI323_FEAT_CTRL_REG, 0);
+
+ ret = regmap_write(data->regmap, BMI323_FEAT_IO2_REG, 0x012c);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMI323_FEAT_IO_STATUS_REG,
+ BMI323_FEAT_IO_STATUS_MSK);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMI323_FEAT_CTRL_REG,
+ BMI323_FEAT_ENG_EN_MSK);
+ if (ret)
+ return ret;
+
+ /*
+ * It takes around 4 msec to enable the Feature engine, so check
+ * the status of the feature engine every 2 msec for a maximum
+ * of 5 trials.
+ */
+ ret = regmap_read_poll_timeout(data->regmap, BMI323_FEAT_IO1_REG,
+ feature_status,
+ FIELD_GET(BMI323_FEAT_IO1_ERR_MSK,
+ feature_status) == 1,
+ BMI323_FEAT_ENG_POLL,
+ BMI323_FEAT_ENG_TIMEOUT);
+ if (ret)
+ return dev_err_probe(data->dev, -EINVAL,
+ "Failed to enable feature engine\n");
+
+ return 0;
+}
+
+static void bmi323_disable(void *data_ptr)
+{
+ struct bmi323_data *data = data_ptr;
+
+ bmi323_set_mode(data, BMI323_ACCEL, ACC_GYRO_MODE_DISABLE);
+ bmi323_set_mode(data, BMI323_GYRO, ACC_GYRO_MODE_DISABLE);
+}
+
+static int bmi323_set_bw(struct bmi323_data *data,
+ enum bmi323_sensor_type sensor, enum bmi323_3db_bw bw)
+{
+ return regmap_update_bits(data->regmap, bmi323_hw[sensor].config,
+ BMI323_ACC_GYRO_CONF_BW_MSK,
+ FIELD_PREP(BMI323_ACC_GYRO_CONF_BW_MSK, bw));
+}
+
+static int bmi323_init(struct bmi323_data *data)
+{
+ int ret, val;
+
+ /*
+ * Perform soft reset to make sure the device is in a known state after
+ * start up. A delay of 1.5 ms is required after reset.
+ * See datasheet section 5.17 "Soft Reset".
+ */
+ ret = regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL);
+ if (ret)
+ return ret;
+
+ usleep_range(1500, 2000);
+
+ /*
+ * Dummy read is required to enable SPI interface after reset.
+ * See datasheet section 7.2.1 "Protocol Selection".
+ */
+ regmap_read(data->regmap, BMI323_CHIP_ID_REG, &val);
+
+ ret = regmap_read(data->regmap, BMI323_STATUS_REG, &val);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(BMI323_STATUS_POR_MSK, val))
+ return dev_err_probe(data->dev, -EINVAL,
+ "Sensor initialization error\n");
+
+ ret = regmap_read(data->regmap, BMI323_CHIP_ID_REG, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(BMI323_CHIP_ID_MSK, val) != BMI323_CHIP_ID_VAL)
+ return dev_err_probe(data->dev, -EINVAL, "Chip ID mismatch\n");
+
+ ret = bmi323_feature_engine_enable(data, true);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(data->regmap, BMI323_ERR_REG, &val);
+ if (ret)
+ return ret;
+
+ if (val)
+ return dev_err_probe(data->dev, -EINVAL,
+ "Sensor power error = 0x%x\n", val);
+
+ /*
+ * Set the Bandwidth coefficient which defines the 3 dB cutoff
+ * frequency in relation to the ODR.
+ */
+ ret = bmi323_set_bw(data, BMI323_ACCEL, BMI323_BW_ODR_BY_2);
+ if (ret)
+ return ret;
+
+ ret = bmi323_set_bw(data, BMI323_GYRO, BMI323_BW_ODR_BY_2);
+ if (ret)
+ return ret;
+
+ ret = bmi323_set_odr(data, BMI323_ACCEL, 25, 0);
+ if (ret)
+ return ret;
+
+ ret = bmi323_set_odr(data, BMI323_GYRO, 25, 0);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(data->dev, bmi323_disable, data);
+}
+
+int bmi323_core_probe(struct device *dev)
+{
+ static const char * const regulator_names[] = { "vdd", "vddio" };
+ struct iio_dev *indio_dev;
+ struct bmi323_data *data;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = dev_get_regmap(dev, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to allocate device\n");
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
+ regulator_names);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulators\n");
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->regmap = regmap;
+ mutex_init(&data->mutex);
+
+ ret = bmi323_init(data);
+ if (ret)
+ return -EINVAL;
+
+ ret = iio_read_mount_matrix(dev, &data->orientation);
+ if (ret)
+ return ret;
+
+ indio_dev->name = "bmi323-imu";
+ indio_dev->info = &bmi323_info;
+ indio_dev->channels = bmi323_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bmi323_channels);
+ indio_dev->available_scan_masks = bmi323_avail_scan_masks;
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+ dev_set_drvdata(data->dev, indio_dev);
+
+ ret = bmi323_trigger_probe(data, indio_dev);
+ if (ret)
+ return -EINVAL;
+
+ ret = devm_iio_triggered_buffer_setup_ext(data->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ bmi323_trigger_handler,
+ IIO_BUFFER_DIRECTION_IN,
+ &bmi323_buffer_ops,
+ bmi323_fifo_attributes);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Failed to setup trigger buffer\n");
+
+ ret = devm_iio_device_register(data->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Unable to register iio device\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(bmi323_core_probe, IIO_BMI323);
+
+MODULE_DESCRIPTION("Bosch BMI323 IMU driver");
+MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bmi323/bmi323_i2c.c b/drivers/iio/imu/bmi323/bmi323_i2c.c
new file mode 100644
index 000000000..20a8001b9
--- /dev/null
+++ b/drivers/iio/imu/bmi323/bmi323_i2c.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I2C driver for Bosch BMI323 6-Axis IMU.
+ *
+ * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bmi323.h"
+
+struct bmi323_i2c_priv {
+ struct i2c_client *i2c;
+ u8 i2c_rx_buffer[BMI323_FIFO_LENGTH_IN_BYTES + BMI323_I2C_DUMMY];
+};
+
+/*
+ * From BMI323 datasheet section 4: Notes on the Serial Interface Support.
+ * Each I2C register read operation requires to read two dummy bytes before
+ * the actual payload.
+ */
+static int bmi323_regmap_i2c_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct bmi323_i2c_priv *priv = context;
+ struct i2c_msg msgs[2];
+ int ret;
+
+ msgs[0].addr = priv->i2c->addr;
+ msgs[0].flags = priv->i2c->flags;
+ msgs[0].len = reg_size;
+ msgs[0].buf = (u8 *)reg_buf;
+
+ msgs[1].addr = priv->i2c->addr;
+ msgs[1].len = val_size + BMI323_I2C_DUMMY;
+ msgs[1].buf = priv->i2c_rx_buffer;
+ msgs[1].flags = priv->i2c->flags | I2C_M_RD;
+
+ ret = i2c_transfer(priv->i2c->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return -EIO;
+
+ memcpy(val_buf, priv->i2c_rx_buffer + BMI323_I2C_DUMMY, val_size);
+
+ return 0;
+}
+
+static int bmi323_regmap_i2c_write(void *context, const void *data,
+ size_t count)
+{
+ struct bmi323_i2c_priv *priv = context;
+ u8 reg;
+
+ reg = *(u8 *)data;
+ return i2c_smbus_write_i2c_block_data(priv->i2c, reg,
+ count - sizeof(u8),
+ data + sizeof(u8));
+}
+
+static struct regmap_bus bmi323_regmap_bus = {
+ .read = bmi323_regmap_i2c_read,
+ .write = bmi323_regmap_i2c_write,
+};
+
+static const struct regmap_config bmi323_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = BMI323_CFG_RES_REG,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int bmi323_i2c_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct bmi323_i2c_priv *priv;
+ struct regmap *regmap;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->i2c = i2c;
+ regmap = devm_regmap_init(dev, &bmi323_regmap_bus, priv,
+ &bmi323_i2c_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to initialize I2C Regmap\n");
+
+ return bmi323_core_probe(dev);
+}
+
+static const struct i2c_device_id bmi323_i2c_ids[] = {
+ { "bmi323" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bmi323_i2c_ids);
+
+static const struct of_device_id bmi323_of_i2c_match[] = {
+ { .compatible = "bosch,bmi323" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bmi323_of_i2c_match);
+
+static struct i2c_driver bmi323_i2c_driver = {
+ .driver = {
+ .name = "bmi323",
+ .of_match_table = bmi323_of_i2c_match,
+ },
+ .probe = bmi323_i2c_probe,
+ .id_table = bmi323_i2c_ids,
+};
+module_i2c_driver(bmi323_i2c_driver);
+
+MODULE_DESCRIPTION("Bosch BMI323 IMU driver");
+MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_BMI323);
diff --git a/drivers/iio/imu/bmi323/bmi323_spi.c b/drivers/iio/imu/bmi323/bmi323_spi.c
new file mode 100644
index 000000000..7b1e8127d
--- /dev/null
+++ b/drivers/iio/imu/bmi323/bmi323_spi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI driver for Bosch BMI323 6-Axis IMU.
+ *
+ * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "bmi323.h"
+
+/*
+ * From BMI323 datasheet section 4: Notes on the Serial Interface Support.
+ * Each SPI register read operation requires to read one dummy byte before
+ * the actual payload.
+ */
+static int bmi323_regmap_spi_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct spi_device *spi = context;
+
+ return spi_write_then_read(spi, reg_buf, reg_size, val_buf, val_size);
+}
+
+static int bmi323_regmap_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct spi_device *spi = context;
+ u8 *data_buff = (u8 *)data;
+
+ data_buff[1] = data_buff[0];
+ return spi_write(spi, data_buff + 1, count - 1);
+}
+
+static struct regmap_bus bmi323_regmap_bus = {
+ .read = bmi323_regmap_spi_read,
+ .write = bmi323_regmap_spi_write,
+};
+
+static const struct regmap_config bmi323_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .pad_bits = 8,
+ .read_flag_mask = BIT(7),
+ .max_register = BMI323_CFG_RES_REG,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int bmi323_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init(dev, &bmi323_regmap_bus, dev,
+ &bmi323_spi_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to initialize SPI Regmap\n");
+
+ return bmi323_core_probe(dev);
+}
+
+static const struct spi_device_id bmi323_spi_ids[] = {
+ { "bmi323" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, bmi323_spi_ids);
+
+static const struct of_device_id bmi323_of_spi_match[] = {
+ { .compatible = "bosch,bmi323" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bmi323_of_spi_match);
+
+static struct spi_driver bmi323_spi_driver = {
+ .driver = {
+ .name = "bmi323",
+ .of_match_table = bmi323_of_spi_match,
+ },
+ .probe = bmi323_spi_probe,
+ .id_table = bmi323_spi_ids,
+};
+module_spi_driver(bmi323_spi_driver);
+
+MODULE_DESCRIPTION("Bosch BMI323 IMU driver");
+MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_BMI323);
diff --git a/drivers/iio/imu/bno055/bno055_ser_core.c b/drivers/iio/imu/bno055/bno055_ser_core.c
index 57728a568..5677bdf4f 100644
--- a/drivers/iio/imu/bno055/bno055_ser_core.c
+++ b/drivers/iio/imu/bno055/bno055_ser_core.c
@@ -378,12 +378,12 @@ static void bno055_ser_handle_rx(struct bno055_ser_priv *priv, int status)
* Also, we assume to RX one pkt per time (i.e. the HW doesn't send anything
* unless we require to AND we don't queue more than one request per time).
*/
-static int bno055_ser_receive_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t size)
+static ssize_t bno055_ser_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
int status;
struct bno055_ser_priv *priv = serdev_device_get_drvdata(serdev);
- int remaining = size;
+ size_t remaining = size;
if (size == 0)
return 0;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index b1e4fde27..f67bd5a39 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -137,10 +137,7 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
out_unlock:
mutex_unlock(&st->lock);
/* sleep maximum required time */
- if (sleep_accel > sleep_temp)
- sleep = sleep_accel;
- else
- sleep = sleep_temp;
+ sleep = max(sleep_accel, sleep_temp);
if (sleep)
msleep(sleep);
return ret;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 6ef1df9d6..b52f328fd 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -424,10 +424,7 @@ out_unlock:
mutex_unlock(&st->lock);
/* sleep maximum required time */
- if (sleep_sensor > sleep_temp)
- sleep = sleep_sensor;
- else
- sleep = sleep_temp;
+ sleep = max(sleep_sensor, sleep_temp);
if (sleep)
msleep(sleep);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index 3bf946e56..3df0a715e 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -137,10 +137,7 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
out_unlock:
mutex_unlock(&st->lock);
/* sleep maximum required time */
- if (sleep_gyro > sleep_temp)
- sleep = sleep_gyro;
- else
- sleep = sleep_temp;
+ sleep = max(sleep_gyro, sleep_temp);
if (sleep)
msleep(sleep);
return ret;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 6b034dccc..0e94e5335 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -567,15 +567,12 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg,
int axis, int val)
{
- int ind, result;
+ int ind;
__be16 d = cpu_to_be16(val);
ind = (axis - IIO_MOD_X) * 2;
- result = regmap_bulk_write(st->map, reg + ind, &d, sizeof(d));
- if (result)
- return -EINVAL;
- return 0;
+ return regmap_bulk_write(st->map, reg + ind, &d, sizeof(d));
}
static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
@@ -587,7 +584,7 @@ static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
ind = (axis - IIO_MOD_X) * 2;
result = regmap_bulk_read(st->map, reg + ind, &d, sizeof(d));
if (result)
- return -EINVAL;
+ return result;
*val = (short)be16_to_cpup(&d);
return IIO_VAL_INT;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 176d31d9f..b581a7e80 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -413,6 +413,22 @@ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
{
if (bitmap_empty(mask, masklength))
return NULL;
+ /*
+ * The condition here do not handle multi-long masks correctly.
+ * It only checks the first long to be zero, and will use such mask
+ * as a terminator even if there was bits set after the first long.
+ *
+ * Correct check would require using:
+ * while (!bitmap_empty(av_masks, masklength))
+ * instead. This is potentially hazardous because the
+ * avaliable_scan_masks is a zero terminated array of longs - and
+ * using the proper bitmap_empty() check for multi-long wide masks
+ * would require the array to be terminated with multiple zero longs -
+ * which is not such an usual pattern.
+ *
+ * As writing of this no multi-long wide masks were found in-tree, so
+ * the simple while (*av_masks) check is working.
+ */
while (*av_masks) {
if (strict) {
if (bitmap_equal(mask, av_masks, masklength))
@@ -600,7 +616,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&iio_show_fixed_type,
NULL,
0,
- 0,
+ IIO_SEPARATE,
&indio_dev->dev,
buffer,
&buffer->buffer_attr_list);
@@ -613,7 +629,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&iio_scan_el_show,
&iio_scan_el_store,
chan->scan_index,
- 0,
+ IIO_SEPARATE,
&indio_dev->dev,
buffer,
&buffer->buffer_attr_list);
@@ -623,7 +639,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
&iio_scan_el_ts_show,
&iio_scan_el_ts_store,
chan->scan_index,
- 0,
+ IIO_SEPARATE,
&indio_dev->dev,
buffer,
&buffer->buffer_attr_list);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index e6d3d07a4..173dc0076 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -117,6 +117,8 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
[IIO_MOD_LIGHT_UV] = "uv",
+ [IIO_MOD_LIGHT_UVA] = "uva",
+ [IIO_MOD_LIGHT_UVB] = "uvb",
[IIO_MOD_LIGHT_DUV] = "duv",
[IIO_MOD_QUATERNION] = "quaternion",
[IIO_MOD_TEMP_AMBIENT] = "ambient",
@@ -182,6 +184,7 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
[IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
+ [IIO_CHAN_INFO_TROUGH] = "trough_raw",
};
/**
* iio_device_id() - query the unique ID for the device
@@ -1899,6 +1902,66 @@ static int iio_check_extended_name(const struct iio_dev *indio_dev)
static const struct iio_buffer_setup_ops noop_ring_setup_ops;
+static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
+{
+ unsigned int num_masks, masklength, longs_per_mask;
+ const unsigned long *av_masks;
+ int i;
+
+ av_masks = indio_dev->available_scan_masks;
+ masklength = indio_dev->masklength;
+ longs_per_mask = BITS_TO_LONGS(masklength);
+
+ /*
+ * The code determining how many available_scan_masks is in the array
+ * will be assuming the end of masks when first long with all bits
+ * zeroed is encountered. This is incorrect for masks where mask
+ * consists of more than one long, and where some of the available masks
+ * has long worth of bits zeroed (but has subsequent bit(s) set). This
+ * is a safety measure against bug where array of masks is terminated by
+ * a single zero while mask width is greater than width of a long.
+ */
+ if (longs_per_mask > 1)
+ dev_warn(indio_dev->dev.parent,
+ "multi long available scan masks not fully supported\n");
+
+ if (bitmap_empty(av_masks, masklength))
+ dev_warn(indio_dev->dev.parent, "empty scan mask\n");
+
+ for (num_masks = 0; *av_masks; num_masks++)
+ av_masks += longs_per_mask;
+
+ if (num_masks < 2)
+ return;
+
+ av_masks = indio_dev->available_scan_masks;
+
+ /*
+ * Go through all the masks from first to one before the last, and see
+ * that no mask found later from the available_scan_masks array is a
+ * subset of mask found earlier. If this happens, then the mask found
+ * later will never get used because scanning the array is stopped when
+ * the first suitable mask is found. Drivers should order the array of
+ * available masks in the order of preference (presumably the least
+ * costy to access masks first).
+ */
+ for (i = 0; i < num_masks - 1; i++) {
+ const unsigned long *mask1;
+ int j;
+
+ mask1 = av_masks + i * longs_per_mask;
+ for (j = i + 1; j < num_masks; j++) {
+ const unsigned long *mask2;
+
+ mask2 = av_masks + j * longs_per_mask;
+ if (bitmap_subset(mask2, mask1, masklength))
+ dev_warn(indio_dev->dev.parent,
+ "available_scan_mask %d subset of %d. Never used\n",
+ j, i);
+ }
+ }
+}
+
int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -1937,6 +2000,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
goto error_unreg_debugfs;
}
+ if (indio_dev->available_scan_masks)
+ iio_sanity_check_avail_scan_masks(indio_dev);
+
ret = iio_device_register_sysfs(indio_dev);
if (ret) {
dev_err(indio_dev->dev.parent,
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 45edba797..143003232 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -252,6 +252,21 @@ config ISL29125
To compile this driver as a module, choose M here: the module will be
called isl29125.
+config ISL76682
+ tristate "Intersil ISL76682 Light Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build a driver for the Intersil ISL76682
+ Ambient Light Sensor and IR Intensity sensor. This driver provides
+ the readouts via standard IIO sysfs and device interface. Both ALS
+ illuminance and IR illuminance are provided raw with separate scale
+ setting which can be configured via sysfs, the default scale is 1000
+ lux, other options are 4000/16000/64000 lux.
+
+ To compile this driver as a module, choose M here: the module will be
+ called isl76682.
+
config HID_SENSOR_ALS
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -347,6 +362,17 @@ config SENSORS_LM3533
changes. The ALS-control output values can be set per zone for the
three current output channels.
+config LTR390
+ tristate "LTR-390UV-01 ambient light and UV sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Lite-On LTR-390UV-01
+ ambient light and UV sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called ltr390.
+
config LTR501
tristate "LTR-501ALS-01 light sensor"
depends on I2C
@@ -637,6 +663,17 @@ config VEML6070
To compile this driver as a module, choose M here: the
module will be called veml6070.
+config VEML6075
+ tristate "VEML6075 UVA and UVB light sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6075 UVA
+ and UVB light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6075.
+
config VL6180
tristate "VL6180 ALS, range and proximity sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c0db4c4c3..2e5fdb33e 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -28,8 +28,10 @@ obj-$(CONFIG_IQS621_ALS) += iqs621-als.o
obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
obj-$(CONFIG_ISL29125) += isl29125.o
+obj-$(CONFIG_ISL76682) += isl76682.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
+obj-$(CONFIG_LTR390) += ltr390.o
obj-$(CONFIG_LTR501) += ltr501.o
obj-$(CONFIG_LTRF216A) += ltrf216a.o
obj-$(CONFIG_LV0104CS) += lv0104cs.o
@@ -60,5 +62,6 @@ obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_VCNL4035) += vcnl4035.o
obj-$(CONFIG_VEML6030) += veml6030.o
obj-$(CONFIG_VEML6070) += veml6070.o
+obj-$(CONFIG_VEML6075) += veml6075.o
obj-$(CONFIG_VL6180) += vl6180.o
obj-$(CONFIG_ZOPT2201) += zopt2201.o
diff --git a/drivers/iio/light/isl76682.c b/drivers/iio/light/isl76682.c
new file mode 100644
index 000000000..cf6ddee44
--- /dev/null
+++ b/drivers/iio/light/isl76682.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IIO driver for the light sensor ISL76682.
+ * ISL76682 is Ambient Light Sensor
+ *
+ * Copyright (c) 2023 Marek Vasut <marex@denx.de>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#define ISL76682_REG_COMMAND 0x00
+
+#define ISL76682_COMMAND_EN BIT(7)
+#define ISL76682_COMMAND_MODE_CONTINUOUS BIT(6)
+#define ISL76682_COMMAND_LIGHT_IR BIT(5)
+
+#define ISL76682_COMMAND_RANGE_LUX_1K 0x0
+#define ISL76682_COMMAND_RANGE_LUX_4K 0x1
+#define ISL76682_COMMAND_RANGE_LUX_16K 0x2
+#define ISL76682_COMMAND_RANGE_LUX_64K 0x3
+#define ISL76682_COMMAND_RANGE_LUX_MASK GENMASK(1, 0)
+
+#define ISL76682_REG_ALSIR_L 0x01
+
+#define ISL76682_REG_ALSIR_U 0x02
+
+#define ISL76682_NUM_REGS (ISL76682_REG_ALSIR_U + 1)
+
+#define ISL76682_CONV_TIME_MS 100
+#define ISL76682_INT_TIME_US 90000
+
+#define ISL76682_ADC_MAX (BIT(16) - 1)
+
+struct isl76682_chip {
+ /*
+ * Lock to synchronize access to device command register
+ * and the content of range variable below.
+ */
+ struct mutex lock;
+ struct regmap *regmap;
+ u8 range;
+ u8 command;
+};
+
+struct isl76682_range {
+ u8 range;
+ u32 als;
+ u32 ir;
+};
+
+static struct isl76682_range isl76682_range_table[] = {
+ { ISL76682_COMMAND_RANGE_LUX_1K, 15000, 10500 },
+ { ISL76682_COMMAND_RANGE_LUX_4K, 60000, 42000 },
+ { ISL76682_COMMAND_RANGE_LUX_16K, 240000, 168000 },
+ { ISL76682_COMMAND_RANGE_LUX_64K, 960000, 673000 }
+};
+
+static int isl76682_get(struct isl76682_chip *chip, bool mode_ir, int *data)
+{
+ u8 command;
+ int ret;
+
+ command = ISL76682_COMMAND_EN | ISL76682_COMMAND_MODE_CONTINUOUS |
+ chip->range;
+
+ if (mode_ir)
+ command |= ISL76682_COMMAND_LIGHT_IR;
+
+ if (command != chip->command) {
+ ret = regmap_write(chip->regmap, ISL76682_REG_COMMAND, command);
+ if (ret)
+ return ret;
+
+ /* Need to wait for conversion time if ALS/IR mode enabled */
+ msleep(ISL76682_CONV_TIME_MS);
+
+ chip->command = command;
+ }
+
+ ret = regmap_bulk_read(chip->regmap, ISL76682_REG_ALSIR_L, data, 2);
+ *data &= ISL76682_ADC_MAX;
+ return ret;
+}
+
+static int isl76682_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct isl76682_chip *chip = iio_priv(indio_dev);
+ int i;
+
+ if (mask != IIO_CHAN_INFO_SCALE)
+ return -EINVAL;
+
+ if (val != 0)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(isl76682_range_table); i++) {
+ if (chan->type == IIO_LIGHT && val2 != isl76682_range_table[i].als)
+ continue;
+ if (chan->type == IIO_INTENSITY && val2 != isl76682_range_table[i].ir)
+ continue;
+
+ scoped_guard(mutex, &chip->lock)
+ chip->range = isl76682_range_table[i].range;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int isl76682_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct isl76682_chip *chip = iio_priv(indio_dev);
+ int ret;
+ int i;
+
+ guard(mutex)(&chip->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = isl76682_get(chip, false, val);
+ return (ret < 0) ? ret : IIO_VAL_INT;
+ case IIO_INTENSITY:
+ ret = isl76682_get(chip, true, val);
+ return (ret < 0) ? ret : IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ for (i = 0; i < ARRAY_SIZE(isl76682_range_table); i++) {
+ if (chip->range != isl76682_range_table[i].range)
+ continue;
+
+ *val = 0;
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *val2 = isl76682_range_table[i].als;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_INTENSITY:
+ *val2 = isl76682_range_table[i].ir;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ }
+ return -EINVAL;
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = ISL76682_INT_TIME_US;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int illuminance_scale_available[] = {
+ 0, 15000,
+ 0, 60000,
+ 0, 240000,
+ 0, 960000,
+};
+
+static int intensity_scale_available[] = {
+ 0, 10500,
+ 0, 42000,
+ 0, 168000,
+ 0, 673000,
+};
+
+static int isl76682_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *vals = illuminance_scale_available;
+ *length = ARRAY_SIZE(illuminance_scale_available);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ case IIO_INTENSITY:
+ *vals = intensity_scale_available;
+ *length = ARRAY_SIZE(intensity_scale_available);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec isl76682_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ }, {
+ .type = IIO_INTENSITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ }
+};
+
+static const struct iio_info isl76682_info = {
+ .read_avail = isl76682_read_avail,
+ .read_raw = isl76682_read_raw,
+ .write_raw = isl76682_write_raw,
+};
+
+static int isl76682_clear_configure_reg(struct isl76682_chip *chip)
+{
+ struct device *dev = regmap_get_device(chip->regmap);
+ int ret;
+
+ ret = regmap_write(chip->regmap, ISL76682_REG_COMMAND, 0x0);
+ if (ret < 0)
+ dev_err(dev, "Error %d clearing the CONFIGURE register\n", ret);
+
+ /*
+ * In the success case, the command register was zeroed out.
+ *
+ * In the error case, we do not know in which state the command
+ * register is, so we assume it is zeroed out, so that it would
+ * be reprogrammed at the next data read out, and at that time
+ * we hope it would be reprogrammed successfully. That is very
+ * much a best effort approach.
+ */
+ chip->command = 0;
+
+ return ret;
+}
+
+static void isl76682_reset_action(void *chip)
+{
+ isl76682_clear_configure_reg(chip);
+}
+
+static bool isl76682_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ISL76682_REG_ALSIR_L:
+ case ISL76682_REG_ALSIR_U:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config isl76682_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = isl76682_is_volatile_reg,
+ .max_register = ISL76682_NUM_REGS - 1,
+ .num_reg_defaults_raw = ISL76682_NUM_REGS,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int isl76682_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct isl76682_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+
+ mutex_init(&chip->lock);
+
+ chip->regmap = devm_regmap_init_i2c(client, &isl76682_regmap_config);
+ ret = PTR_ERR_OR_ZERO(chip->regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Error initializing regmap\n");
+
+ chip->range = ISL76682_COMMAND_RANGE_LUX_1K;
+
+ ret = isl76682_clear_configure_reg(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, isl76682_reset_action, chip);
+ if (ret)
+ return ret;
+
+ indio_dev->info = &isl76682_info;
+ indio_dev->channels = isl76682_channels;
+ indio_dev->num_channels = ARRAY_SIZE(isl76682_channels);
+ indio_dev->name = "isl76682";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id isl76682_id[] = {
+ { "isl76682" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, isl76682_id);
+
+static const struct of_device_id isl76682_of_match[] = {
+ { .compatible = "isil,isl76682" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, isl76682_of_match);
+
+static struct i2c_driver isl76682_driver = {
+ .driver = {
+ .name = "isl76682",
+ .of_match_table = isl76682_of_match,
+ },
+ .probe = isl76682_probe,
+ .id_table = isl76682_id,
+};
+module_i2c_driver(isl76682_driver);
+
+MODULE_DESCRIPTION("ISL76682 Ambient Light Sensor driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c
new file mode 100644
index 000000000..fff1e8990
--- /dev/null
+++ b/drivers/iio/light/ltr390.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * IIO driver for Lite-On LTR390 ALS and UV sensor
+ * (7-bit I2C slave address 0x53)
+ *
+ * Based on the work of:
+ * Shreeya Patel and Shi Zhigang (LTRF216 Driver)
+ *
+ * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com>
+ *
+ * Datasheet:
+ * https://optoelectronics.liteon.com/upload/download/DS86-2015-0004/LTR-390UV_Final_%20DS_V1%201.pdf
+ *
+ * TODO:
+ * - Support for configurable gain and resolution
+ * - Sensor suspend/resume support
+ * - Add support for reading the ALS
+ * - Interrupt support
+ */
+
+#include <linux/i2c.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+
+#include <asm/unaligned.h>
+
+#define LTR390_MAIN_CTRL 0x00
+#define LTR390_PART_ID 0x06
+#define LTR390_UVS_DATA 0x10
+
+#define LTR390_SW_RESET BIT(4)
+#define LTR390_UVS_MODE BIT(3)
+#define LTR390_SENSOR_ENABLE BIT(1)
+
+#define LTR390_PART_NUMBER_ID 0xb
+
+/*
+ * At 20-bit resolution (integration time: 400ms) and 18x gain, 2300 counts of
+ * the sensor are equal to 1 UV Index [Datasheet Page#8].
+ *
+ * For the default resolution of 18-bit (integration time: 100ms) and default
+ * gain of 3x, the counts/uvi are calculated as follows:
+ * 2300 / ((3/18) * (100/400)) = 95.83
+ */
+#define LTR390_COUNTS_PER_UVI 96
+
+/*
+ * Window Factor is needed when the device is under Window glass with coated
+ * tinted ink. This is to compensate for the light loss due to the lower
+ * transmission rate of the window glass and helps * in calculating lux.
+ */
+#define LTR390_WINDOW_FACTOR 1
+
+struct ltr390_data {
+ struct regmap *regmap;
+ struct i2c_client *client;
+ /* Protects device from simulataneous reads */
+ struct mutex lock;
+};
+
+static const struct regmap_config ltr390_regmap_config = {
+ .name = "ltr390",
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+};
+
+static int ltr390_register_read(struct ltr390_data *data, u8 register_address)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ u8 recieve_buffer[3];
+
+ guard(mutex)(&data->lock);
+
+ ret = regmap_bulk_read(data->regmap, register_address, recieve_buffer,
+ sizeof(recieve_buffer));
+ if (ret) {
+ dev_err(dev, "failed to read measurement data");
+ return ret;
+ }
+
+ return get_unaligned_le24(recieve_buffer);
+}
+
+static int ltr390_read_raw(struct iio_dev *iio_device,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret;
+ struct ltr390_data *data = iio_priv(iio_device);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ltr390_register_read(data, LTR390_UVS_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = LTR390_WINDOW_FACTOR;
+ *val2 = LTR390_COUNTS_PER_UVI;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ltr390_info = {
+ .read_raw = ltr390_read_raw,
+};
+
+static const struct iio_chan_spec ltr390_channel = {
+ .type = IIO_UVINDEX,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE)
+};
+
+static int ltr390_probe(struct i2c_client *client)
+{
+ struct ltr390_data *data;
+ struct iio_dev *indio_dev;
+ struct device *dev;
+ int ret, part_number;
+
+ dev = &client->dev;
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &ltr390_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "regmap initialization failed\n");
+
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->info = &ltr390_info;
+ indio_dev->channels = &ltr390_channel;
+ indio_dev->num_channels = 1;
+ indio_dev->name = "ltr390";
+
+ ret = regmap_read(data->regmap, LTR390_PART_ID, &part_number);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get sensor's part id\n");
+ /* Lower 4 bits of `part_number` change with hardware revisions */
+ if (part_number >> 4 != LTR390_PART_NUMBER_ID)
+ dev_info(dev, "received invalid product id: 0x%x", part_number);
+ dev_dbg(dev, "LTR390, product id: 0x%x\n", part_number);
+
+ /* reset sensor, chip fails to respond to this, so ignore any errors */
+ regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SW_RESET);
+
+ /* Wait for the registers to reset before proceeding */
+ usleep_range(1000, 2000);
+
+ ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL,
+ LTR390_SENSOR_ENABLE | LTR390_UVS_MODE);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable the sensor\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id ltr390_id[] = {
+ { "ltr390" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ltr390_id);
+
+static const struct of_device_id ltr390_of_table[] = {
+ { .compatible = "liteon,ltr390" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltr390_of_table);
+
+static struct i2c_driver ltr390_driver = {
+ .driver = {
+ .name = "ltr390",
+ .of_match_table = ltr390_of_table,
+ },
+ .probe = ltr390_probe,
+ .id_table = ltr390_id,
+};
+module_i2c_driver(ltr390_driver);
+
+MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>");
+MODULE_DESCRIPTION("Lite-On LTR390 ALS and UV sensor Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
index 8de4dd849..68dc48420 100644
--- a/drivers/iio/light/ltrf216a.c
+++ b/drivers/iio/light/ltrf216a.c
@@ -234,7 +234,7 @@ static int ltrf216a_read_data(struct ltrf216a_data *data, u8 addr)
static int ltrf216a_get_lux(struct ltrf216a_data *data)
{
int ret, greendata;
- u64 lux, div;
+ u64 lux;
ret = ltrf216a_set_power_state(data, true);
if (ret)
@@ -246,10 +246,9 @@ static int ltrf216a_get_lux(struct ltrf216a_data *data)
ltrf216a_set_power_state(data, false);
- lux = greendata * 45 * LTRF216A_WIN_FAC * 100;
- div = data->als_gain_fac * data->int_time_fac * 100;
+ lux = greendata * 45 * LTRF216A_WIN_FAC;
- return div_u64(lux, div);
+ return lux;
}
static int ltrf216a_read_raw(struct iio_dev *indio_dev,
@@ -279,7 +278,8 @@ static int ltrf216a_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
*val = ret;
- return IIO_VAL_INT;
+ *val2 = data->als_gain_fac * data->int_time_fac;
+ return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_INT_TIME:
mutex_lock(&data->lock);
ret = ltrf216a_get_int_time(data, val, val2);
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index ed241598a..636432c45 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -472,7 +472,7 @@ static struct i2c_driver pa12203001_driver = {
.driver = {
.name = PA12203001_DRIVER_NAME,
.pm = &pa12203001_pm_ops,
- .acpi_match_table = ACPI_PTR(pa12203001_acpi_match),
+ .acpi_match_table = pa12203001_acpi_match,
},
.probe = pa12203001_probe,
.remove = pa12203001_remove,
diff --git a/drivers/iio/light/rohm-bu27008.c b/drivers/iio/light/rohm-bu27008.c
index 6a6d77805..0f010eff1 100644
--- a/drivers/iio/light/rohm-bu27008.c
+++ b/drivers/iio/light/rohm-bu27008.c
@@ -130,6 +130,7 @@
* @BU27008_BLUE: Blue channel. Via data2 (when used).
* @BU27008_CLEAR: Clear channel. Via data2 or data3 (when used).
* @BU27008_IR: IR channel. Via data3 (when used).
+ * @BU27008_LUX: Illuminance channel, computed using RGB and IR.
* @BU27008_NUM_CHANS: Number of channel types.
*/
enum bu27008_chan_type {
@@ -138,6 +139,7 @@ enum bu27008_chan_type {
BU27008_BLUE,
BU27008_CLEAR,
BU27008_IR,
+ BU27008_LUX,
BU27008_NUM_CHANS
};
@@ -172,6 +174,8 @@ static const unsigned long bu27008_scan_masks[] = {
ALWAYS_SCANNABLE | BIT(BU27008_CLEAR) | BIT(BU27008_IR),
/* buffer is R, G, B, IR */
ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR),
+ /* buffer is R, G, B, IR, LUX */
+ ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR) | BIT(BU27008_LUX),
0
};
@@ -331,6 +335,19 @@ static const struct iio_chan_spec bu27008_channels[] = {
* Hence we don't advertise available ones either.
*/
BU27008_CHAN(IR, DATA3, 0),
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .channel = BU27008_LUX,
+ .scan_index = BU27008_LUX,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 64,
+ .storagebits = 64,
+ .endianness = IIO_CPU,
+ },
+ },
IIO_CHAN_SOFT_TIMESTAMP(BU27008_NUM_CHANS),
};
@@ -1004,6 +1021,169 @@ static int bu27008_read_one(struct bu27008_data *data, struct iio_dev *idev,
return ret;
}
+#define BU27008_LUX_DATA_RED 0
+#define BU27008_LUX_DATA_GREEN 1
+#define BU27008_LUX_DATA_BLUE 2
+#define BU27008_LUX_DATA_IR 3
+#define LUX_DATA_SIZE (BU27008_NUM_HW_CHANS * sizeof(__le16))
+
+static int bu27008_read_lux_chans(struct bu27008_data *data, unsigned int time,
+ __le16 *chan_data)
+{
+ int ret, chan_sel, tmpret, valid;
+
+ chan_sel = BU27008_BLUE2_IR3 << (ffs(data->cd->chan_sel_mask) - 1);
+
+ ret = regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
+ data->cd->chan_sel_mask, chan_sel);
+ if (ret)
+ return ret;
+
+ ret = bu27008_meas_set(data, true);
+ if (ret)
+ return ret;
+
+ msleep(time / USEC_PER_MSEC);
+
+ ret = regmap_read_poll_timeout(data->regmap, data->cd->valid_reg,
+ valid, (valid & BU27008_MASK_VALID),
+ BU27008_VALID_RESULT_WAIT_QUANTA_US,
+ BU27008_MAX_VALID_RESULT_WAIT_US);
+ if (ret)
+ goto out;
+
+ ret = regmap_bulk_read(data->regmap, BU27008_REG_DATA0_LO, chan_data,
+ LUX_DATA_SIZE);
+ if (ret)
+ goto out;
+out:
+ tmpret = bu27008_meas_set(data, false);
+ if (tmpret)
+ dev_warn(data->dev, "Stopping measurement failed\n");
+
+ return ret;
+}
+
+/*
+ * Following equation for computing lux out of register values was given by
+ * ROHM HW colleagues;
+ *
+ * Red = RedData*1024 / Gain * 20 / meas_mode
+ * Green = GreenData* 1024 / Gain * 20 / meas_mode
+ * Blue = BlueData* 1024 / Gain * 20 / meas_mode
+ * IR = IrData* 1024 / Gain * 20 / meas_mode
+ *
+ * where meas_mode is the integration time in mS / 10
+ *
+ * IRratio = (IR > 0.18 * Green) ? 0 : 1
+ *
+ * Lx = max(c1*Red + c2*Green + c3*Blue,0)
+ *
+ * for
+ * IRratio 0: c1 = -0.00002237, c2 = 0.0003219, c3 = -0.000120371
+ * IRratio 1: c1 = -0.00001074, c2 = 0.000305415, c3 = -0.000129367
+ */
+
+/*
+ * The max chan data is 0xffff. When we multiply it by 1024 * 20, we'll get
+ * 0x4FFFB000 which still fits in 32-bit integer. This won't overflow.
+ */
+#define NORM_CHAN_DATA_FOR_LX_CALC(chan, gain, time) (le16_to_cpu(chan) * \
+ 1024 * 20 / (gain) / (time))
+static u64 bu27008_calc_nlux(struct bu27008_data *data, __le16 *lux_data,
+ unsigned int gain, unsigned int gain_ir, unsigned int time)
+{
+ unsigned int red, green, blue, ir;
+ s64 c1, c2, c3, nlux;
+
+ time /= 10000;
+ ir = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_IR], gain_ir, time);
+ red = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_RED], gain, time);
+ green = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_GREEN], gain, time);
+ blue = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_BLUE], gain, time);
+
+ if ((u64)ir * 100LLU > (u64)green * 18LLU) {
+ c1 = -22370;
+ c2 = 321900;
+ c3 = -120371;
+ } else {
+ c1 = -10740;
+ c2 = 305415;
+ c3 = -129367;
+ }
+ nlux = c1 * red + c2 * green + c3 * blue;
+
+ return max_t(s64, 0, nlux);
+}
+
+static int bu27008_get_time_n_gains(struct bu27008_data *data,
+ unsigned int *gain, unsigned int *gain_ir, unsigned int *time)
+{
+ int ret;
+
+ ret = bu27008_get_gain(data, &data->gts, gain);
+ if (ret < 0)
+ return ret;
+
+ ret = bu27008_get_gain(data, &data->gts_ir, gain_ir);
+ if (ret < 0)
+ return ret;
+
+ ret = bu27008_get_int_time_us(data);
+ if (ret < 0)
+ return ret;
+
+ /* Max integration time is 400000. Fits in signed int. */
+ *time = ret;
+
+ return 0;
+}
+
+struct bu27008_buf {
+ __le16 chan[BU27008_NUM_HW_CHANS];
+ u64 lux __aligned(8);
+ s64 ts __aligned(8);
+};
+
+static int bu27008_buffer_fill_lux(struct bu27008_data *data,
+ struct bu27008_buf *raw)
+{
+ unsigned int gain, gain_ir, time;
+ int ret;
+
+ ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time);
+ if (ret)
+ return ret;
+
+ raw->lux = bu27008_calc_nlux(data, raw->chan, gain, gain_ir, time);
+
+ return 0;
+}
+
+static int bu27008_read_lux(struct bu27008_data *data, struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ __le16 lux_data[BU27008_NUM_HW_CHANS];
+ unsigned int gain, gain_ir, time;
+ u64 nlux;
+ int ret;
+
+ ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time);
+ if (ret)
+ return ret;
+
+ ret = bu27008_read_lux_chans(data, time, lux_data);
+ if (ret)
+ return ret;
+
+ nlux = bu27008_calc_nlux(data, lux_data, gain, gain_ir, time);
+ *val = (int)nlux;
+ *val2 = nlux >> 32LLU;
+
+ return IIO_VAL_INT_64;
+}
+
static int bu27008_read_raw(struct iio_dev *idev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -1018,7 +1198,10 @@ static int bu27008_read_raw(struct iio_dev *idev,
return -EBUSY;
mutex_lock(&data->mutex);
- ret = bu27008_read_one(data, idev, chan, val, val2);
+ if (chan->type == IIO_LIGHT)
+ ret = bu27008_read_lux(data, idev, chan, val, val2);
+ else
+ ret = bu27008_read_one(data, idev, chan, val, val2);
mutex_unlock(&data->mutex);
iio_device_release_direct_mode(idev);
@@ -1026,6 +1209,11 @@ static int bu27008_read_raw(struct iio_dev *idev,
return ret;
case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_LIGHT) {
+ *val = 0;
+ *val2 = 1;
+ return IIO_VAL_INT_PLUS_NANO;
+ }
ret = bu27008_get_scale(data, chan->scan_index == BU27008_IR,
val, val2);
if (ret)
@@ -1236,10 +1424,7 @@ static irqreturn_t bu27008_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *idev = pf->indio_dev;
struct bu27008_data *data = iio_priv(idev);
- struct {
- __le16 chan[BU27008_NUM_HW_CHANS];
- s64 ts __aligned(8);
- } raw;
+ struct bu27008_buf raw;
int ret, dummy;
memset(&raw, 0, sizeof(raw));
@@ -1257,6 +1442,12 @@ static irqreturn_t bu27008_trigger_handler(int irq, void *p)
if (ret < 0)
goto err_read;
+ if (test_bit(BU27008_LUX, idev->active_scan_mask)) {
+ ret = bu27008_buffer_fill_lux(data, &raw);
+ if (ret)
+ goto err_read;
+ }
+
iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
err_read:
iio_trigger_notify_done(idev->trig);
diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c
new file mode 100644
index 000000000..05d4c0e90
--- /dev/null
+++ b/drivers/iio/light/veml6075.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Vishay VEML6075 UVA and UVB light sensor
+ *
+ * Copyright 2023 Javier Carrasco <javier.carrasco.cruz@gmail.com>
+ *
+ * 7-bit I2C slave, address 0x10
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+
+#define VEML6075_CMD_CONF 0x00 /* configuration register */
+#define VEML6075_CMD_UVA 0x07 /* UVA channel */
+#define VEML6075_CMD_UVB 0x09 /* UVB channel */
+#define VEML6075_CMD_COMP1 0x0A /* visible light compensation */
+#define VEML6075_CMD_COMP2 0x0B /* infrarred light compensation */
+#define VEML6075_CMD_ID 0x0C /* device ID */
+
+#define VEML6075_CONF_IT GENMASK(6, 4) /* intregration time */
+#define VEML6075_CONF_HD BIT(3) /* dynamic setting */
+#define VEML6075_CONF_TRIG BIT(2) /* trigger */
+#define VEML6075_CONF_AF BIT(1) /* active force enable */
+#define VEML6075_CONF_SD BIT(0) /* shutdown */
+
+#define VEML6075_IT_50_MS 0x00
+#define VEML6075_IT_100_MS 0x01
+#define VEML6075_IT_200_MS 0x02
+#define VEML6075_IT_400_MS 0x03
+#define VEML6075_IT_800_MS 0x04
+
+#define VEML6075_AF_DISABLE 0x00
+#define VEML6075_AF_ENABLE 0x01
+
+#define VEML6075_SD_DISABLE 0x00
+#define VEML6075_SD_ENABLE 0x01
+
+/* Open-air coefficients and responsivity */
+#define VEML6075_A_COEF 2220
+#define VEML6075_B_COEF 1330
+#define VEML6075_C_COEF 2950
+#define VEML6075_D_COEF 1740
+#define VEML6075_UVA_RESP 1461
+#define VEML6075_UVB_RESP 2591
+
+static const int veml6075_it_ms[] = { 50, 100, 200, 400, 800 };
+
+struct veml6075_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ /*
+ * prevent integration time modification and triggering
+ * measurements while a measurement is underway.
+ */
+ struct mutex lock;
+};
+
+/* channel number */
+enum veml6075_chan {
+ CH_UVA,
+ CH_UVB,
+};
+
+static const struct iio_chan_spec veml6075_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .channel = CH_UVA,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_UVA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .channel = CH_UVB,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_UVB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+ {
+ .type = IIO_UVINDEX,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+};
+
+static int veml6075_request_measurement(struct veml6075_data *data)
+{
+ int ret, conf, int_time;
+
+ ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf);
+ if (ret < 0)
+ return ret;
+
+ /* disable shutdown and trigger measurement */
+ ret = regmap_write(data->regmap, VEML6075_CMD_CONF,
+ (conf | VEML6075_CONF_TRIG) & ~VEML6075_CONF_SD);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * A measurement requires between 1.30 and 1.40 times the integration
+ * time for all possible configurations. Using a 1.50 factor simplifies
+ * operations and ensures reliability under all circumstances.
+ */
+ int_time = veml6075_it_ms[FIELD_GET(VEML6075_CONF_IT, conf)];
+ msleep(int_time + (int_time / 2));
+
+ /* shutdown again, data registers are still accessible */
+ return regmap_update_bits(data->regmap, VEML6075_CMD_CONF,
+ VEML6075_CONF_SD, VEML6075_CONF_SD);
+}
+
+static int veml6075_uva_comp(int raw_uva, int comp1, int comp2)
+{
+ int comp1a_c, comp2a_c, uva_comp;
+
+ comp1a_c = (comp1 * VEML6075_A_COEF) / 1000U;
+ comp2a_c = (comp2 * VEML6075_B_COEF) / 1000U;
+ uva_comp = raw_uva - comp1a_c - comp2a_c;
+
+ return clamp_val(uva_comp, 0, U16_MAX);
+}
+
+static int veml6075_uvb_comp(int raw_uvb, int comp1, int comp2)
+{
+ int comp1b_c, comp2b_c, uvb_comp;
+
+ comp1b_c = (comp1 * VEML6075_C_COEF) / 1000U;
+ comp2b_c = (comp2 * VEML6075_D_COEF) / 1000U;
+ uvb_comp = raw_uvb - comp1b_c - comp2b_c;
+
+ return clamp_val(uvb_comp, 0, U16_MAX);
+}
+
+static int veml6075_read_comp(struct veml6075_data *data, int *c1, int *c2)
+{
+ int ret;
+
+ ret = regmap_read(data->regmap, VEML6075_CMD_COMP1, c1);
+ if (ret < 0)
+ return ret;
+
+ return regmap_read(data->regmap, VEML6075_CMD_COMP2, c2);
+}
+
+static int veml6075_read_uv_direct(struct veml6075_data *data, int chan,
+ int *val)
+{
+ int c1, c2, ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = veml6075_request_measurement(data);
+ if (ret < 0)
+ return ret;
+
+ ret = veml6075_read_comp(data, &c1, &c2);
+ if (ret < 0)
+ return ret;
+
+ switch (chan) {
+ case CH_UVA:
+ ret = regmap_read(data->regmap, VEML6075_CMD_UVA, val);
+ if (ret < 0)
+ return ret;
+
+ *val = veml6075_uva_comp(*val, c1, c2);
+ return IIO_VAL_INT;
+ case CH_UVB:
+ ret = regmap_read(data->regmap, VEML6075_CMD_UVB, val);
+ if (ret < 0)
+ return ret;
+
+ *val = veml6075_uvb_comp(*val, c1, c2);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6075_read_int_time_index(struct veml6075_data *data)
+{
+ int ret, conf;
+
+ ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(VEML6075_CONF_IT, conf);
+}
+
+static int veml6075_read_int_time_ms(struct veml6075_data *data, int *val)
+{
+ int int_index;
+
+ guard(mutex)(&data->lock);
+ int_index = veml6075_read_int_time_index(data);
+ if (int_index < 0)
+ return int_index;
+
+ *val = veml6075_it_ms[int_index];
+
+ return IIO_VAL_INT;
+}
+
+static int veml6075_get_uvi_micro(struct veml6075_data *data, int uva_comp,
+ int uvb_comp)
+{
+ int uvia_micro = uva_comp * VEML6075_UVA_RESP;
+ int uvib_micro = uvb_comp * VEML6075_UVB_RESP;
+ int int_index;
+
+ int_index = veml6075_read_int_time_index(data);
+ if (int_index < 0)
+ return int_index;
+
+ switch (int_index) {
+ case VEML6075_IT_50_MS:
+ return uvia_micro + uvib_micro;
+ case VEML6075_IT_100_MS:
+ case VEML6075_IT_200_MS:
+ case VEML6075_IT_400_MS:
+ case VEML6075_IT_800_MS:
+ return (uvia_micro + uvib_micro) / (2 << int_index);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6075_read_uvi(struct veml6075_data *data, int *val, int *val2)
+{
+ int ret, c1, c2, uva, uvb, uvi_micro;
+
+ guard(mutex)(&data->lock);
+
+ ret = veml6075_request_measurement(data);
+ if (ret < 0)
+ return ret;
+
+ ret = veml6075_read_comp(data, &c1, &c2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, VEML6075_CMD_UVA, &uva);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, VEML6075_CMD_UVB, &uvb);
+ if (ret < 0)
+ return ret;
+
+ uvi_micro = veml6075_get_uvi_micro(data, veml6075_uva_comp(uva, c1, c2),
+ veml6075_uvb_comp(uvb, c1, c2));
+ if (uvi_micro < 0)
+ return uvi_micro;
+
+ *val = uvi_micro / MICRO;
+ *val2 = uvi_micro % MICRO;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6075_read_responsivity(int chan, int *val, int *val2)
+{
+ /* scale = 1 / resp */
+ switch (chan) {
+ case CH_UVA:
+ /* resp = 0.93 c/uW/cm2: scale = 1.75268817 */
+ *val = 1;
+ *val2 = 75268817;
+ return IIO_VAL_INT_PLUS_NANO;
+ case CH_UVB:
+ /* resp = 2.1 c/uW/cm2: scale = 0.476190476 */
+ *val = 0;
+ *val2 = 476190476;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6075_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(veml6075_it_ms);
+ *vals = veml6075_it_ms;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6075_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct veml6075_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return veml6075_read_uv_direct(data, chan->channel, val);
+ case IIO_CHAN_INFO_PROCESSED:
+ return veml6075_read_uvi(data, val, val2);
+ case IIO_CHAN_INFO_INT_TIME:
+ return veml6075_read_int_time_ms(data, val);
+ case IIO_CHAN_INFO_SCALE:
+ return veml6075_read_responsivity(chan->channel, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6075_write_int_time_ms(struct veml6075_data *data, int val)
+{
+ int i = ARRAY_SIZE(veml6075_it_ms);
+
+ guard(mutex)(&data->lock);
+
+ while (i-- > 0) {
+ if (val == veml6075_it_ms[i])
+ break;
+ }
+ if (i < 0)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap, VEML6075_CMD_CONF,
+ VEML6075_CONF_IT,
+ FIELD_PREP(VEML6075_CONF_IT, i));
+}
+
+static int veml6075_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct veml6075_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return veml6075_write_int_time_ms(data, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info veml6075_info = {
+ .read_avail = veml6075_read_avail,
+ .read_raw = veml6075_read_raw,
+ .write_raw = veml6075_write_raw,
+};
+
+static bool veml6075_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case VEML6075_CMD_CONF:
+ case VEML6075_CMD_UVA:
+ case VEML6075_CMD_UVB:
+ case VEML6075_CMD_COMP1:
+ case VEML6075_CMD_COMP2:
+ case VEML6075_CMD_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool veml6075_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case VEML6075_CMD_CONF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config veml6075_regmap_config = {
+ .name = "veml6075",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = VEML6075_CMD_ID,
+ .readable_reg = veml6075_readable_reg,
+ .writeable_reg = veml6075_writable_reg,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int veml6075_probe(struct i2c_client *client)
+{
+ struct veml6075_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int config, ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &veml6075_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+
+ mutex_init(&data->lock);
+
+ indio_dev->name = "veml6075";
+ indio_dev->info = &veml6075_info;
+ indio_dev->channels = veml6075_channels;
+ indio_dev->num_channels = ARRAY_SIZE(veml6075_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_regulator_get_enable(&client->dev, "vdd");
+ if (ret < 0)
+ return ret;
+
+ /* default: 100ms integration time, active force enable, shutdown */
+ config = FIELD_PREP(VEML6075_CONF_IT, VEML6075_IT_100_MS) |
+ FIELD_PREP(VEML6075_CONF_AF, VEML6075_AF_ENABLE) |
+ FIELD_PREP(VEML6075_CONF_SD, VEML6075_SD_ENABLE);
+ ret = regmap_write(data->regmap, VEML6075_CMD_CONF, config);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id veml6075_id[] = {
+ { "veml6075" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6075_id);
+
+static const struct of_device_id veml6075_of_match[] = {
+ { .compatible = "vishay,veml6075" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, veml6075_of_match);
+
+static struct i2c_driver veml6075_driver = {
+ .driver = {
+ .name = "veml6075",
+ .of_match_table = veml6075_of_match,
+ },
+ .probe = veml6075_probe,
+ .id_table = veml6075_id,
+};
+
+module_i2c_driver(veml6075_driver);
+
+MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gmail.com>");
+MODULE_DESCRIPTION("Vishay VEML6075 UVA and UVB light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index e8c4ca142..218b1ce07 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -497,17 +497,13 @@ static int tmag5273_set_operating_mode(struct tmag5273_data *data,
static void tmag5273_read_device_property(struct tmag5273_data *data)
{
struct device *dev = data->dev;
- const char *str;
int ret;
data->angle_measurement = TMAG5273_ANGLE_EN_X_Y;
- ret = device_property_read_string(dev, "ti,angle-measurement", &str);
- if (ret)
- return;
-
- ret = match_string(tmag5273_angle_names,
- ARRAY_SIZE(tmag5273_angle_names), str);
+ ret = device_property_match_property_string(dev, "ti,angle-measurement",
+ tmag5273_angle_names,
+ ARRAY_SIZE(tmag5273_angle_names));
if (ret >= 0)
data->angle_measurement = ret;
}
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 95efa32e4..79adfd059 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -109,6 +109,28 @@ config HP03
To compile this driver as a module, choose M here: the module
will be called hp03.
+config HSC030PA
+ tristate "Honeywell HSC/SSC TruStability pressure sensor series"
+ depends on (I2C || SPI_MASTER)
+ select HSC030PA_I2C if I2C
+ select HSC030PA_SPI if SPI_MASTER
+ help
+ Say Y here to build support for the Honeywell TruStability
+ HSC and SSC pressure and temperature sensor series.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hsc030pa.
+
+config HSC030PA_I2C
+ tristate
+ depends on HSC030PA
+ depends on I2C
+
+config HSC030PA_SPI
+ tristate
+ depends on HSC030PA
+ depends on SPI_MASTER
+
config ICP10100
tristate "InvenSense ICP-101xx pressure and temperature sensor"
depends on I2C
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 436aec7e6..b0f8b9466 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -15,6 +15,9 @@ obj-$(CONFIG_DPS310) += dps310.o
obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_HP03) += hp03.o
+obj-$(CONFIG_HSC030PA) += hsc030pa.o
+obj-$(CONFIG_HSC030PA_I2C) += hsc030pa_i2c.o
+obj-$(CONFIG_HSC030PA_SPI) += hsc030pa_spi.o
obj-$(CONFIG_ICP10100) += icp10100.o
obj-$(CONFIG_MPL115) += mpl115.o
obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index a2ef1373a..fe8734468 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -13,6 +13,7 @@
* https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp280-ds001.pdf
* https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf
* https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp388-ds001.pdf
+ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp390-ds002.pdf
* https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp581-ds004.pdf
*
* Notice:
@@ -794,10 +795,12 @@ static int bmp280_chip_config(struct bmp280_data *data)
}
static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
+static const u8 bmp280_chip_ids[] = { BMP280_CHIP_ID };
const struct bmp280_chip_info bmp280_chip_info = {
.id_reg = BMP280_REG_ID,
- .chip_id = BMP280_CHIP_ID,
+ .chip_id = bmp280_chip_ids,
+ .num_chip_id = ARRAY_SIZE(bmp280_chip_ids),
.regmap_config = &bmp280_regmap_config,
.start_up_time = 2000,
.channels = bmp280_channels,
@@ -846,9 +849,12 @@ static int bme280_chip_config(struct bmp280_data *data)
return bmp280_chip_config(data);
}
+static const u8 bme280_chip_ids[] = { BME280_CHIP_ID };
+
const struct bmp280_chip_info bme280_chip_info = {
.id_reg = BMP280_REG_ID,
- .chip_id = BME280_CHIP_ID,
+ .chip_id = bme280_chip_ids,
+ .num_chip_id = ARRAY_SIZE(bme280_chip_ids),
.regmap_config = &bmp280_regmap_config,
.start_up_time = 2000,
.channels = bmp280_channels,
@@ -920,7 +926,7 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
}
/*
- * Returns temperature in Celsius dregrees, resolution is 0.01º C. Output value of
+ * Returns temperature in Celsius degrees, resolution is 0.01º C. Output value of
* "5123" equals 51.2º C. t_fine carries fine temperature as global value.
*
* Taken from datasheet, Section Appendix 9, "Compensation formula" and repo
@@ -1220,10 +1226,12 @@ static int bmp380_chip_config(struct bmp280_data *data)
static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 };
static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128};
+static const u8 bmp380_chip_ids[] = { BMP380_CHIP_ID, BMP390_CHIP_ID };
const struct bmp280_chip_info bmp380_chip_info = {
.id_reg = BMP380_REG_ID,
- .chip_id = BMP380_CHIP_ID,
+ .chip_id = bmp380_chip_ids,
+ .num_chip_id = ARRAY_SIZE(bmp380_chip_ids),
.regmap_config = &bmp380_regmap_config,
.start_up_time = 2000,
.channels = bmp380_channels,
@@ -1385,7 +1393,7 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
/*
* Temperature is returned in Celsius degrees in fractional
- * form down 2^16. We reescale by x1000 to return milli Celsius
+ * form down 2^16. We rescale by x1000 to return milli Celsius
* to respect IIO ABI.
*/
*val = raw_temp * 1000;
@@ -1412,7 +1420,7 @@ static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
}
/*
* Pressure is returned in Pascals in fractional form down 2^16.
- * We reescale /1000 to convert to kilopascal to respect IIO ABI.
+ * We rescale /1000 to convert to kilopascal to respect IIO ABI.
*/
*val = raw_press;
*val2 = 64000; /* 2^6 * 1000 */
@@ -1720,10 +1728,12 @@ static int bmp580_chip_config(struct bmp280_data *data)
}
static const int bmp580_oversampling_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128 };
+static const u8 bmp580_chip_ids[] = { BMP580_CHIP_ID, BMP580_CHIP_ID_ALT };
const struct bmp280_chip_info bmp580_chip_info = {
.id_reg = BMP580_REG_CHIP_ID,
- .chip_id = BMP580_CHIP_ID,
+ .chip_id = bmp580_chip_ids,
+ .num_chip_id = ARRAY_SIZE(bmp580_chip_ids),
.regmap_config = &bmp580_regmap_config,
.start_up_time = 2000,
.channels = bmp380_channels,
@@ -1983,10 +1993,12 @@ static int bmp180_chip_config(struct bmp280_data *data)
static const int bmp180_oversampling_temp_avail[] = { 1 };
static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
+static const u8 bmp180_chip_ids[] = { BMP180_CHIP_ID };
const struct bmp280_chip_info bmp180_chip_info = {
.id_reg = BMP280_REG_ID,
- .chip_id = BMP180_CHIP_ID,
+ .chip_id = bmp180_chip_ids,
+ .num_chip_id = ARRAY_SIZE(bmp180_chip_ids),
.regmap_config = &bmp180_regmap_config,
.start_up_time = 2000,
.channels = bmp280_channels,
@@ -2077,6 +2089,7 @@ int bmp280_common_probe(struct device *dev,
struct bmp280_data *data;
struct gpio_desc *gpiod;
unsigned int chip_id;
+ unsigned int i;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
@@ -2142,12 +2155,17 @@ int bmp280_common_probe(struct device *dev,
ret = regmap_read(regmap, data->chip_info->id_reg, &chip_id);
if (ret < 0)
return ret;
- if (chip_id != data->chip_info->chip_id) {
- dev_err(dev, "bad chip id: expected %x got %x\n",
- data->chip_info->chip_id, chip_id);
- return -EINVAL;
+
+ for (i = 0; i < data->chip_info->num_chip_id; i++) {
+ if (chip_id == data->chip_info->chip_id[i]) {
+ dev_info(dev, "0x%x is a known chip id for %s\n", chip_id, name);
+ break;
+ }
}
+ if (i == data->chip_info->num_chip_id)
+ dev_warn(dev, "bad chip id: 0x%x is not a known chip id\n", chip_id);
+
if (data->chip_info->preinit) {
ret = data->chip_info->preinit(data);
if (ret)
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index dbe630ad0..34e3bc758 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -7,13 +7,11 @@
static int bmp280_i2c_probe(struct i2c_client *client)
{
- struct regmap *regmap;
- const struct bmp280_chip_info *chip_info;
const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ const struct bmp280_chip_info *chip_info;
+ struct regmap *regmap;
- chip_info = device_get_match_data(&client->dev);
- if (!chip_info)
- chip_info = (const struct bmp280_chip_info *) id->driver_data;
+ chip_info = i2c_get_match_data(client);
regmap = devm_regmap_init_i2c(client, chip_info->regmap_config);
if (IS_ERR(regmap)) {
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 967de99c1..a444d4b29 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -4,6 +4,7 @@
*
* Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
*/
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/err.h>
@@ -14,8 +15,7 @@
static int bmp280_regmap_spi_write(void *context, const void *data,
size_t count)
{
- struct device *dev = context;
- struct spi_device *spi = to_spi_device(dev);
+ struct spi_device *spi = to_spi_device(context);
u8 buf[2];
memcpy(buf, data, 2);
@@ -31,12 +31,39 @@ static int bmp280_regmap_spi_write(void *context, const void *data,
static int bmp280_regmap_spi_read(void *context, const void *reg,
size_t reg_size, void *val, size_t val_size)
{
- struct device *dev = context;
- struct spi_device *spi = to_spi_device(dev);
+ struct spi_device *spi = to_spi_device(context);
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
+static int bmp380_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct spi_device *spi = to_spi_device(context);
+ u8 rx_buf[4];
+ ssize_t status;
+
+ /*
+ * Maximum number of consecutive bytes read for a temperature or
+ * pressure measurement is 3.
+ */
+ if (val_size > 3)
+ return -EINVAL;
+
+ /*
+ * According to the BMP3xx datasheets, for a basic SPI read opertion,
+ * the first byte needs to be dropped and the rest are the requested
+ * data.
+ */
+ status = spi_write_then_read(spi, reg, 1, rx_buf, val_size + 1);
+ if (status)
+ return status;
+
+ memcpy(val, rx_buf + 1, val_size);
+
+ return 0;
+}
+
static struct regmap_bus bmp280_regmap_bus = {
.write = bmp280_regmap_spi_write,
.read = bmp280_regmap_spi_read,
@@ -44,10 +71,19 @@ static struct regmap_bus bmp280_regmap_bus = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
+static struct regmap_bus bmp380_regmap_bus = {
+ .write = bmp280_regmap_spi_write,
+ .read = bmp380_regmap_spi_read,
+ .read_flag_mask = BIT(7),
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
static int bmp280_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct bmp280_chip_info *chip_info;
+ struct regmap_bus *bmp_regmap_bus;
struct regmap *regmap;
int ret;
@@ -58,12 +94,20 @@ static int bmp280_spi_probe(struct spi_device *spi)
return ret;
}
- chip_info = device_get_match_data(&spi->dev);
- if (!chip_info)
- chip_info = (const struct bmp280_chip_info *) id->driver_data;
+ chip_info = spi_get_device_match_data(spi);
+
+ switch (chip_info->chip_id[0]) {
+ case BMP380_CHIP_ID:
+ case BMP390_CHIP_ID:
+ bmp_regmap_bus = &bmp380_regmap_bus;
+ break;
+ default:
+ bmp_regmap_bus = &bmp280_regmap_bus;
+ break;
+ }
regmap = devm_regmap_init(&spi->dev,
- &bmp280_regmap_bus,
+ bmp_regmap_bus,
&spi->dev,
chip_info->regmap_config);
if (IS_ERR(regmap)) {
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 5c0563ce7..4012387d7 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -292,6 +292,7 @@
#define BMP580_CHIP_ID_ALT 0x51
#define BMP180_CHIP_ID 0x55
#define BMP280_CHIP_ID 0x58
+#define BMP390_CHIP_ID 0x60
#define BME280_CHIP_ID 0x60
#define BMP280_SOFT_RESET_VAL 0xB6
@@ -410,7 +411,7 @@ struct bmp280_data {
__le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
__be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT];
- /* Miscellaneous, endianess-aware data buffers */
+ /* Miscellaneous, endianness-aware data buffers */
__le16 le16;
__be16 be16;
} __aligned(IIO_DMA_MINALIGN);
@@ -418,7 +419,8 @@ struct bmp280_data {
struct bmp280_chip_info {
unsigned int id_reg;
- const unsigned int chip_id;
+ const u8 *chip_id;
+ int num_chip_id;
const struct regmap_config *regmap_config;
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 28c8269ba..0bba4c5a8 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -250,18 +250,17 @@ static irqreturn_t dlh_trigger_handler(int irq, void *private)
struct dlh_state *st = iio_priv(indio_dev);
int ret;
unsigned int chn, i = 0;
- __be32 tmp_buf[2];
+ __be32 tmp_buf[2] = { };
ret = dlh_start_capture_and_read(st);
if (ret)
goto out;
for_each_set_bit(chn, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- memcpy(tmp_buf + i,
+ indio_dev->masklength) {
+ memcpy(&tmp_buf[i++],
&st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
DLH_NUM_DATA_BYTES);
- i++;
}
iio_push_to_buffers(indio_dev, tmp_buf);
diff --git a/drivers/iio/pressure/hsc030pa.c b/drivers/iio/pressure/hsc030pa.c
new file mode 100644
index 000000000..d6a51f0c3
--- /dev/null
+++ b/drivers/iio/pressure/hsc030pa.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Honeywell TruStability HSC Series pressure/temperature sensor
+ *
+ * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/init.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <asm/unaligned.h>
+
+#include "hsc030pa.h"
+
+/*
+ * HSC_PRESSURE_TRIPLET_LEN - length for the string that defines the
+ * pressure range, measurement unit and type as per the part nomenclature.
+ * Consult honeywell,pressure-triplet in the bindings file for details.
+ */
+#define HSC_PRESSURE_TRIPLET_LEN 6
+#define HSC_STATUS_MASK GENMASK(7, 6)
+#define HSC_TEMPERATURE_MASK GENMASK(15, 5)
+#define HSC_PRESSURE_MASK GENMASK(29, 16)
+
+struct hsc_func_spec {
+ u32 output_min;
+ u32 output_max;
+};
+
+/*
+ * function A: 10% - 90% of 2^14
+ * function B: 5% - 95% of 2^14
+ * function C: 5% - 85% of 2^14
+ * function F: 4% - 94% of 2^14
+ */
+static const struct hsc_func_spec hsc_func_spec[] = {
+ [HSC_FUNCTION_A] = { .output_min = 1638, .output_max = 14746 },
+ [HSC_FUNCTION_B] = { .output_min = 819, .output_max = 15565 },
+ [HSC_FUNCTION_C] = { .output_min = 819, .output_max = 13926 },
+ [HSC_FUNCTION_F] = { .output_min = 655, .output_max = 15401 },
+};
+
+enum hsc_variants {
+ HSC001BA = 0x00, HSC1_6BA = 0x01, HSC2_5BA = 0x02, HSC004BA = 0x03,
+ HSC006BA = 0x04, HSC010BA = 0x05, HSC1_6MD = 0x06, HSC2_5MD = 0x07,
+ HSC004MD = 0x08, HSC006MD = 0x09, HSC010MD = 0x0a, HSC016MD = 0x0b,
+ HSC025MD = 0x0c, HSC040MD = 0x0d, HSC060MD = 0x0e, HSC100MD = 0x0f,
+ HSC160MD = 0x10, HSC250MD = 0x11, HSC400MD = 0x12, HSC600MD = 0x13,
+ HSC001BD = 0x14, HSC1_6BD = 0x15, HSC2_5BD = 0x16, HSC004BD = 0x17,
+ HSC2_5MG = 0x18, HSC004MG = 0x19, HSC006MG = 0x1a, HSC010MG = 0x1b,
+ HSC016MG = 0x1c, HSC025MG = 0x1d, HSC040MG = 0x1e, HSC060MG = 0x1f,
+ HSC100MG = 0x20, HSC160MG = 0x21, HSC250MG = 0x22, HSC400MG = 0x23,
+ HSC600MG = 0x24, HSC001BG = 0x25, HSC1_6BG = 0x26, HSC2_5BG = 0x27,
+ HSC004BG = 0x28, HSC006BG = 0x29, HSC010BG = 0x2a, HSC100KA = 0x2b,
+ HSC160KA = 0x2c, HSC250KA = 0x2d, HSC400KA = 0x2e, HSC600KA = 0x2f,
+ HSC001GA = 0x30, HSC160LD = 0x31, HSC250LD = 0x32, HSC400LD = 0x33,
+ HSC600LD = 0x34, HSC001KD = 0x35, HSC1_6KD = 0x36, HSC2_5KD = 0x37,
+ HSC004KD = 0x38, HSC006KD = 0x39, HSC010KD = 0x3a, HSC016KD = 0x3b,
+ HSC025KD = 0x3c, HSC040KD = 0x3d, HSC060KD = 0x3e, HSC100KD = 0x3f,
+ HSC160KD = 0x40, HSC250KD = 0x41, HSC400KD = 0x42, HSC250LG = 0x43,
+ HSC400LG = 0x44, HSC600LG = 0x45, HSC001KG = 0x46, HSC1_6KG = 0x47,
+ HSC2_5KG = 0x48, HSC004KG = 0x49, HSC006KG = 0x4a, HSC010KG = 0x4b,
+ HSC016KG = 0x4c, HSC025KG = 0x4d, HSC040KG = 0x4e, HSC060KG = 0x4f,
+ HSC100KG = 0x50, HSC160KG = 0x51, HSC250KG = 0x52, HSC400KG = 0x53,
+ HSC600KG = 0x54, HSC001GG = 0x55, HSC015PA = 0x56, HSC030PA = 0x57,
+ HSC060PA = 0x58, HSC100PA = 0x59, HSC150PA = 0x5a, HSC0_5ND = 0x5b,
+ HSC001ND = 0x5c, HSC002ND = 0x5d, HSC004ND = 0x5e, HSC005ND = 0x5f,
+ HSC010ND = 0x60, HSC020ND = 0x61, HSC030ND = 0x62, HSC001PD = 0x63,
+ HSC005PD = 0x64, HSC015PD = 0x65, HSC030PD = 0x66, HSC060PD = 0x67,
+ HSC001NG = 0x68, HSC002NG = 0x69, HSC004NG = 0x6a, HSC005NG = 0x6b,
+ HSC010NG = 0x6c, HSC020NG = 0x6d, HSC030NG = 0x6e, HSC001PG = 0x6f,
+ HSC005PG = 0x70, HSC015PG = 0x71, HSC030PG = 0x72, HSC060PG = 0x73,
+ HSC100PG = 0x74, HSC150PG = 0x75, HSC_VARIANTS_MAX
+};
+
+static const char * const hsc_triplet_variants[HSC_VARIANTS_MAX] = {
+ [HSC001BA] = "001BA", [HSC1_6BA] = "1.6BA", [HSC2_5BA] = "2.5BA",
+ [HSC004BA] = "004BA", [HSC006BA] = "006BA", [HSC010BA] = "010BA",
+ [HSC1_6MD] = "1.6MD", [HSC2_5MD] = "2.5MD", [HSC004MD] = "004MD",
+ [HSC006MD] = "006MD", [HSC010MD] = "010MD", [HSC016MD] = "016MD",
+ [HSC025MD] = "025MD", [HSC040MD] = "040MD", [HSC060MD] = "060MD",
+ [HSC100MD] = "100MD", [HSC160MD] = "160MD", [HSC250MD] = "250MD",
+ [HSC400MD] = "400MD", [HSC600MD] = "600MD", [HSC001BD] = "001BD",
+ [HSC1_6BD] = "1.6BD", [HSC2_5BD] = "2.5BD", [HSC004BD] = "004BD",
+ [HSC2_5MG] = "2.5MG", [HSC004MG] = "004MG", [HSC006MG] = "006MG",
+ [HSC010MG] = "010MG", [HSC016MG] = "016MG", [HSC025MG] = "025MG",
+ [HSC040MG] = "040MG", [HSC060MG] = "060MG", [HSC100MG] = "100MG",
+ [HSC160MG] = "160MG", [HSC250MG] = "250MG", [HSC400MG] = "400MG",
+ [HSC600MG] = "600MG", [HSC001BG] = "001BG", [HSC1_6BG] = "1.6BG",
+ [HSC2_5BG] = "2.5BG", [HSC004BG] = "004BG", [HSC006BG] = "006BG",
+ [HSC010BG] = "010BG", [HSC100KA] = "100KA", [HSC160KA] = "160KA",
+ [HSC250KA] = "250KA", [HSC400KA] = "400KA", [HSC600KA] = "600KA",
+ [HSC001GA] = "001GA", [HSC160LD] = "160LD", [HSC250LD] = "250LD",
+ [HSC400LD] = "400LD", [HSC600LD] = "600LD", [HSC001KD] = "001KD",
+ [HSC1_6KD] = "1.6KD", [HSC2_5KD] = "2.5KD", [HSC004KD] = "004KD",
+ [HSC006KD] = "006KD", [HSC010KD] = "010KD", [HSC016KD] = "016KD",
+ [HSC025KD] = "025KD", [HSC040KD] = "040KD", [HSC060KD] = "060KD",
+ [HSC100KD] = "100KD", [HSC160KD] = "160KD", [HSC250KD] = "250KD",
+ [HSC400KD] = "400KD", [HSC250LG] = "250LG", [HSC400LG] = "400LG",
+ [HSC600LG] = "600LG", [HSC001KG] = "001KG", [HSC1_6KG] = "1.6KG",
+ [HSC2_5KG] = "2.5KG", [HSC004KG] = "004KG", [HSC006KG] = "006KG",
+ [HSC010KG] = "010KG", [HSC016KG] = "016KG", [HSC025KG] = "025KG",
+ [HSC040KG] = "040KG", [HSC060KG] = "060KG", [HSC100KG] = "100KG",
+ [HSC160KG] = "160KG", [HSC250KG] = "250KG", [HSC400KG] = "400KG",
+ [HSC600KG] = "600KG", [HSC001GG] = "001GG", [HSC015PA] = "015PA",
+ [HSC030PA] = "030PA", [HSC060PA] = "060PA", [HSC100PA] = "100PA",
+ [HSC150PA] = "150PA", [HSC0_5ND] = "0.5ND", [HSC001ND] = "001ND",
+ [HSC002ND] = "002ND", [HSC004ND] = "004ND", [HSC005ND] = "005ND",
+ [HSC010ND] = "010ND", [HSC020ND] = "020ND", [HSC030ND] = "030ND",
+ [HSC001PD] = "001PD", [HSC005PD] = "005PD", [HSC015PD] = "015PD",
+ [HSC030PD] = "030PD", [HSC060PD] = "060PD", [HSC001NG] = "001NG",
+ [HSC002NG] = "002NG", [HSC004NG] = "004NG", [HSC005NG] = "005NG",
+ [HSC010NG] = "010NG", [HSC020NG] = "020NG", [HSC030NG] = "030NG",
+ [HSC001PG] = "001PG", [HSC005PG] = "005PG", [HSC015PG] = "015PG",
+ [HSC030PG] = "030PG", [HSC060PG] = "060PG", [HSC100PG] = "100PG",
+ [HSC150PG] = "150PG",
+};
+
+/**
+ * struct hsc_range_config - list of pressure ranges based on nomenclature
+ * @pmin: lowest pressure that can be measured
+ * @pmax: highest pressure that can be measured
+ */
+struct hsc_range_config {
+ const s32 pmin;
+ const s32 pmax;
+};
+
+/* All min max limits have been converted to pascals */
+static const struct hsc_range_config hsc_range_config[HSC_VARIANTS_MAX] = {
+ [HSC001BA] = { .pmin = 0, .pmax = 100000 },
+ [HSC1_6BA] = { .pmin = 0, .pmax = 160000 },
+ [HSC2_5BA] = { .pmin = 0, .pmax = 250000 },
+ [HSC004BA] = { .pmin = 0, .pmax = 400000 },
+ [HSC006BA] = { .pmin = 0, .pmax = 600000 },
+ [HSC010BA] = { .pmin = 0, .pmax = 1000000 },
+ [HSC1_6MD] = { .pmin = -160, .pmax = 160 },
+ [HSC2_5MD] = { .pmin = -250, .pmax = 250 },
+ [HSC004MD] = { .pmin = -400, .pmax = 400 },
+ [HSC006MD] = { .pmin = -600, .pmax = 600 },
+ [HSC010MD] = { .pmin = -1000, .pmax = 1000 },
+ [HSC016MD] = { .pmin = -1600, .pmax = 1600 },
+ [HSC025MD] = { .pmin = -2500, .pmax = 2500 },
+ [HSC040MD] = { .pmin = -4000, .pmax = 4000 },
+ [HSC060MD] = { .pmin = -6000, .pmax = 6000 },
+ [HSC100MD] = { .pmin = -10000, .pmax = 10000 },
+ [HSC160MD] = { .pmin = -16000, .pmax = 16000 },
+ [HSC250MD] = { .pmin = -25000, .pmax = 25000 },
+ [HSC400MD] = { .pmin = -40000, .pmax = 40000 },
+ [HSC600MD] = { .pmin = -60000, .pmax = 60000 },
+ [HSC001BD] = { .pmin = -100000, .pmax = 100000 },
+ [HSC1_6BD] = { .pmin = -160000, .pmax = 160000 },
+ [HSC2_5BD] = { .pmin = -250000, .pmax = 250000 },
+ [HSC004BD] = { .pmin = -400000, .pmax = 400000 },
+ [HSC2_5MG] = { .pmin = 0, .pmax = 250 },
+ [HSC004MG] = { .pmin = 0, .pmax = 400 },
+ [HSC006MG] = { .pmin = 0, .pmax = 600 },
+ [HSC010MG] = { .pmin = 0, .pmax = 1000 },
+ [HSC016MG] = { .pmin = 0, .pmax = 1600 },
+ [HSC025MG] = { .pmin = 0, .pmax = 2500 },
+ [HSC040MG] = { .pmin = 0, .pmax = 4000 },
+ [HSC060MG] = { .pmin = 0, .pmax = 6000 },
+ [HSC100MG] = { .pmin = 0, .pmax = 10000 },
+ [HSC160MG] = { .pmin = 0, .pmax = 16000 },
+ [HSC250MG] = { .pmin = 0, .pmax = 25000 },
+ [HSC400MG] = { .pmin = 0, .pmax = 40000 },
+ [HSC600MG] = { .pmin = 0, .pmax = 60000 },
+ [HSC001BG] = { .pmin = 0, .pmax = 100000 },
+ [HSC1_6BG] = { .pmin = 0, .pmax = 160000 },
+ [HSC2_5BG] = { .pmin = 0, .pmax = 250000 },
+ [HSC004BG] = { .pmin = 0, .pmax = 400000 },
+ [HSC006BG] = { .pmin = 0, .pmax = 600000 },
+ [HSC010BG] = { .pmin = 0, .pmax = 1000000 },
+ [HSC100KA] = { .pmin = 0, .pmax = 100000 },
+ [HSC160KA] = { .pmin = 0, .pmax = 160000 },
+ [HSC250KA] = { .pmin = 0, .pmax = 250000 },
+ [HSC400KA] = { .pmin = 0, .pmax = 400000 },
+ [HSC600KA] = { .pmin = 0, .pmax = 600000 },
+ [HSC001GA] = { .pmin = 0, .pmax = 1000000 },
+ [HSC160LD] = { .pmin = -160, .pmax = 160 },
+ [HSC250LD] = { .pmin = -250, .pmax = 250 },
+ [HSC400LD] = { .pmin = -400, .pmax = 400 },
+ [HSC600LD] = { .pmin = -600, .pmax = 600 },
+ [HSC001KD] = { .pmin = -1000, .pmax = 1000 },
+ [HSC1_6KD] = { .pmin = -1600, .pmax = 1600 },
+ [HSC2_5KD] = { .pmin = -2500, .pmax = 2500 },
+ [HSC004KD] = { .pmin = -4000, .pmax = 4000 },
+ [HSC006KD] = { .pmin = -6000, .pmax = 6000 },
+ [HSC010KD] = { .pmin = -10000, .pmax = 10000 },
+ [HSC016KD] = { .pmin = -16000, .pmax = 16000 },
+ [HSC025KD] = { .pmin = -25000, .pmax = 25000 },
+ [HSC040KD] = { .pmin = -40000, .pmax = 40000 },
+ [HSC060KD] = { .pmin = -60000, .pmax = 60000 },
+ [HSC100KD] = { .pmin = -100000, .pmax = 100000 },
+ [HSC160KD] = { .pmin = -160000, .pmax = 160000 },
+ [HSC250KD] = { .pmin = -250000, .pmax = 250000 },
+ [HSC400KD] = { .pmin = -400000, .pmax = 400000 },
+ [HSC250LG] = { .pmin = 0, .pmax = 250 },
+ [HSC400LG] = { .pmin = 0, .pmax = 400 },
+ [HSC600LG] = { .pmin = 0, .pmax = 600 },
+ [HSC001KG] = { .pmin = 0, .pmax = 1000 },
+ [HSC1_6KG] = { .pmin = 0, .pmax = 1600 },
+ [HSC2_5KG] = { .pmin = 0, .pmax = 2500 },
+ [HSC004KG] = { .pmin = 0, .pmax = 4000 },
+ [HSC006KG] = { .pmin = 0, .pmax = 6000 },
+ [HSC010KG] = { .pmin = 0, .pmax = 10000 },
+ [HSC016KG] = { .pmin = 0, .pmax = 16000 },
+ [HSC025KG] = { .pmin = 0, .pmax = 25000 },
+ [HSC040KG] = { .pmin = 0, .pmax = 40000 },
+ [HSC060KG] = { .pmin = 0, .pmax = 60000 },
+ [HSC100KG] = { .pmin = 0, .pmax = 100000 },
+ [HSC160KG] = { .pmin = 0, .pmax = 160000 },
+ [HSC250KG] = { .pmin = 0, .pmax = 250000 },
+ [HSC400KG] = { .pmin = 0, .pmax = 400000 },
+ [HSC600KG] = { .pmin = 0, .pmax = 600000 },
+ [HSC001GG] = { .pmin = 0, .pmax = 1000000 },
+ [HSC015PA] = { .pmin = 0, .pmax = 103421 },
+ [HSC030PA] = { .pmin = 0, .pmax = 206843 },
+ [HSC060PA] = { .pmin = 0, .pmax = 413685 },
+ [HSC100PA] = { .pmin = 0, .pmax = 689476 },
+ [HSC150PA] = { .pmin = 0, .pmax = 1034214 },
+ [HSC0_5ND] = { .pmin = -125, .pmax = 125 },
+ [HSC001ND] = { .pmin = -249, .pmax = 249 },
+ [HSC002ND] = { .pmin = -498, .pmax = 498 },
+ [HSC004ND] = { .pmin = -996, .pmax = 996 },
+ [HSC005ND] = { .pmin = -1245, .pmax = 1245 },
+ [HSC010ND] = { .pmin = -2491, .pmax = 2491 },
+ [HSC020ND] = { .pmin = -4982, .pmax = 4982 },
+ [HSC030ND] = { .pmin = -7473, .pmax = 7473 },
+ [HSC001PD] = { .pmin = -6895, .pmax = 6895 },
+ [HSC005PD] = { .pmin = -34474, .pmax = 34474 },
+ [HSC015PD] = { .pmin = -103421, .pmax = 103421 },
+ [HSC030PD] = { .pmin = -206843, .pmax = 206843 },
+ [HSC060PD] = { .pmin = -413685, .pmax = 413685 },
+ [HSC001NG] = { .pmin = 0, .pmax = 249 },
+ [HSC002NG] = { .pmin = 0, .pmax = 498 },
+ [HSC004NG] = { .pmin = 0, .pmax = 996 },
+ [HSC005NG] = { .pmin = 0, .pmax = 1245 },
+ [HSC010NG] = { .pmin = 0, .pmax = 2491 },
+ [HSC020NG] = { .pmin = 0, .pmax = 4982 },
+ [HSC030NG] = { .pmin = 0, .pmax = 7473 },
+ [HSC001PG] = { .pmin = 0, .pmax = 6895 },
+ [HSC005PG] = { .pmin = 0, .pmax = 34474 },
+ [HSC015PG] = { .pmin = 0, .pmax = 103421 },
+ [HSC030PG] = { .pmin = 0, .pmax = 206843 },
+ [HSC060PG] = { .pmin = 0, .pmax = 413685 },
+ [HSC100PG] = { .pmin = 0, .pmax = 689476 },
+ [HSC150PG] = { .pmin = 0, .pmax = 1034214 },
+};
+
+/**
+ * hsc_measurement_is_valid() - validate last conversion via status bits
+ * @data: structure containing instantiated sensor data
+ * Return: true only if both status bits are zero
+ *
+ * the two MSB from the first transfered byte contain a status code
+ * 00 - normal operation, valid data
+ * 01 - device in factory programming mode
+ * 10 - stale data
+ * 11 - diagnostic condition
+ */
+static bool hsc_measurement_is_valid(struct hsc_data *data)
+{
+ return !(data->buffer[0] & HSC_STATUS_MASK);
+}
+
+static int hsc_get_measurement(struct hsc_data *data)
+{
+ const struct hsc_chip_data *chip = data->chip;
+ int ret;
+
+ ret = data->recv_cb(data);
+ if (ret < 0)
+ return ret;
+
+ data->is_valid = chip->valid(data);
+ if (!data->is_valid)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * IIO ABI expects
+ * value = (conv + offset) * scale
+ *
+ * datasheet provides the following formula for determining the temperature
+ * temp[C] = conv * a + b
+ * where a = 200/2047; b = -50
+ *
+ * temp[C] = (conv + (b/a)) * a * (1000)
+ * =>
+ * scale = a * 1000 = .097703957 * 1000 = 97.703957
+ * offset = b/a = -50 / .097703957 = -50000000 / 97704
+ *
+ * based on the datasheet
+ * pressure = (conv - Omin) * Q + Pmin =
+ * ((conv - Omin) + Pmin/Q) * Q
+ * =>
+ * scale = Q = (Pmax - Pmin) / (Omax - Omin)
+ * offset = Pmin/Q - Omin = Pmin * (Omax - Omin) / (Pmax - Pmin) - Omin
+ */
+static int hsc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct hsc_data *data = iio_priv(indio_dev);
+ int ret;
+ u32 recvd;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = hsc_get_measurement(data);
+ if (ret)
+ return ret;
+
+ recvd = get_unaligned_be32(data->buffer);
+ switch (channel->type) {
+ case IIO_PRESSURE:
+ *val = FIELD_GET(HSC_PRESSURE_MASK, recvd);
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val = FIELD_GET(HSC_TEMPERATURE_MASK, recvd);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (channel->type) {
+ case IIO_TEMP:
+ *val = 97;
+ *val2 = 703957;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_PRESSURE:
+ *val = data->p_scale;
+ *val2 = data->p_scale_dec;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_OFFSET:
+ switch (channel->type) {
+ case IIO_TEMP:
+ *val = -50000000;
+ *val2 = 97704;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_PRESSURE:
+ *val = data->p_offset;
+ *val2 = data->p_offset_dec;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec hsc_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ },
+};
+
+static const struct iio_info hsc_info = {
+ .read_raw = hsc_read_raw,
+};
+
+static const struct hsc_chip_data hsc_chip = {
+ .valid = hsc_measurement_is_valid,
+ .channels = hsc_channels,
+ .num_channels = ARRAY_SIZE(hsc_channels),
+};
+
+int hsc_common_probe(struct device *dev, hsc_recv_fn recv)
+{
+ struct hsc_data *hsc;
+ struct iio_dev *indio_dev;
+ const char *triplet;
+ u64 tmp;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*hsc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ hsc = iio_priv(indio_dev);
+
+ hsc->chip = &hsc_chip;
+ hsc->recv_cb = recv;
+ hsc->dev = dev;
+
+ ret = device_property_read_u32(dev, "honeywell,transfer-function",
+ &hsc->function);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,transfer-function could not be read\n");
+ if (hsc->function > HSC_FUNCTION_F)
+ return dev_err_probe(dev, -EINVAL,
+ "honeywell,transfer-function %d invalid\n",
+ hsc->function);
+
+ ret = device_property_read_string(dev, "honeywell,pressure-triplet",
+ &triplet);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,pressure-triplet could not be read\n");
+
+ if (str_has_prefix(triplet, "NA")) {
+ ret = device_property_read_u32(dev, "honeywell,pmin-pascal",
+ &hsc->pmin);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,pmin-pascal could not be read\n");
+
+ ret = device_property_read_u32(dev, "honeywell,pmax-pascal",
+ &hsc->pmax);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,pmax-pascal could not be read\n");
+ } else {
+ ret = device_property_match_property_string(dev,
+ "honeywell,pressure-triplet",
+ hsc_triplet_variants,
+ HSC_VARIANTS_MAX);
+ if (ret < 0)
+ return dev_err_probe(dev, -EINVAL,
+ "honeywell,pressure-triplet is invalid\n");
+
+ hsc->pmin = hsc_range_config[ret].pmin;
+ hsc->pmax = hsc_range_config[ret].pmax;
+ }
+
+ if (hsc->pmin >= hsc->pmax)
+ return dev_err_probe(dev, -EINVAL,
+ "pressure limits are invalid\n");
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "can't get vdd supply\n");
+
+ hsc->outmin = hsc_func_spec[hsc->function].output_min;
+ hsc->outmax = hsc_func_spec[hsc->function].output_max;
+
+ tmp = div_s64(((s64)(hsc->pmax - hsc->pmin)) * MICRO,
+ hsc->outmax - hsc->outmin);
+ hsc->p_scale = div_s64_rem(tmp, NANO, &hsc->p_scale_dec);
+ tmp = div_s64(((s64)hsc->pmin * (s64)(hsc->outmax - hsc->outmin)) * MICRO,
+ hsc->pmax - hsc->pmin);
+ tmp -= (s64)hsc->outmin * MICRO;
+ hsc->p_offset = div_s64_rem(tmp, MICRO, &hsc->p_offset_dec);
+
+ indio_dev->name = "hsc030pa";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &hsc_info;
+ indio_dev->channels = hsc->chip->channels;
+ indio_dev->num_channels = hsc->chip->num_channels;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_NS(hsc_common_probe, IIO_HONEYWELL_HSC030PA);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/hsc030pa.h b/drivers/iio/pressure/hsc030pa.h
new file mode 100644
index 000000000..d20420dba
--- /dev/null
+++ b/drivers/iio/pressure/hsc030pa.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Honeywell TruStability HSC Series pressure/temperature sensor
+ *
+ * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
+ */
+
+#ifndef _HSC030PA_H
+#define _HSC030PA_H
+
+#include <linux/types.h>
+
+#define HSC_REG_MEASUREMENT_RD_SIZE 4
+
+struct device;
+
+struct iio_chan_spec;
+struct iio_dev;
+
+struct hsc_data;
+struct hsc_chip_data;
+
+typedef int (*hsc_recv_fn)(struct hsc_data *);
+
+/**
+ * struct hsc_data
+ * @dev: current device structure
+ * @chip: structure containing chip's channel properties
+ * @recv_cb: function that implements the chip reads
+ * @is_valid: true if last transfer has been validated
+ * @pmin: minimum measurable pressure limit
+ * @pmax: maximum measurable pressure limit
+ * @outmin: minimum raw pressure in counts (based on transfer function)
+ * @outmax: maximum raw pressure in counts (based on transfer function)
+ * @function: transfer function
+ * @p_scale: pressure scale
+ * @p_scale_dec: pressure scale, decimal places
+ * @p_offset: pressure offset
+ * @p_offset_dec: pressure offset, decimal places
+ * @buffer: raw conversion data
+ */
+struct hsc_data {
+ struct device *dev;
+ const struct hsc_chip_data *chip;
+ hsc_recv_fn recv_cb;
+ bool is_valid;
+ s32 pmin;
+ s32 pmax;
+ u32 outmin;
+ u32 outmax;
+ u32 function;
+ s64 p_scale;
+ s32 p_scale_dec;
+ s64 p_offset;
+ s32 p_offset_dec;
+ u8 buffer[HSC_REG_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
+};
+
+struct hsc_chip_data {
+ bool (*valid)(struct hsc_data *data);
+ const struct iio_chan_spec *channels;
+ u8 num_channels;
+};
+
+enum hsc_func_id {
+ HSC_FUNCTION_A,
+ HSC_FUNCTION_B,
+ HSC_FUNCTION_C,
+ HSC_FUNCTION_F,
+};
+
+int hsc_common_probe(struct device *dev, hsc_recv_fn recv);
+
+#endif
diff --git a/drivers/iio/pressure/hsc030pa_i2c.c b/drivers/iio/pressure/hsc030pa_i2c.c
new file mode 100644
index 000000000..e2b524b36
--- /dev/null
+++ b/drivers/iio/pressure/hsc030pa_i2c.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Honeywell TruStability HSC Series pressure/temperature sensor
+ *
+ * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf [hsc]
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/common/documents/sps-siot-i2c-comms-digital-output-pressure-sensors-tn-008201-3-en-ciid-45841.pdf [i2c related]
+ */
+
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+
+#include "hsc030pa.h"
+
+static int hsc_i2c_recv(struct hsc_data *data)
+{
+ struct i2c_client *client = to_i2c_client(data->dev);
+ struct i2c_msg msg;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags | I2C_M_RD;
+ msg.len = HSC_REG_MEASUREMENT_RD_SIZE;
+ msg.buf = data->buffer;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return (ret == 2) ? 0 : ret;
+}
+
+static int hsc_i2c_probe(struct i2c_client *client)
+{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ return hsc_common_probe(&client->dev, hsc_i2c_recv);
+}
+
+static const struct of_device_id hsc_i2c_match[] = {
+ { .compatible = "honeywell,hsc030pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hsc_i2c_match);
+
+static const struct i2c_device_id hsc_i2c_id[] = {
+ { "hsc030pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, hsc_i2c_id);
+
+static struct i2c_driver hsc_i2c_driver = {
+ .driver = {
+ .name = "hsc030pa",
+ .of_match_table = hsc_i2c_match,
+ },
+ .probe = hsc_i2c_probe,
+ .id_table = hsc_i2c_id,
+};
+module_i2c_driver(hsc_i2c_driver);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor i2c driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HONEYWELL_HSC030PA);
diff --git a/drivers/iio/pressure/hsc030pa_spi.c b/drivers/iio/pressure/hsc030pa_spi.c
new file mode 100644
index 000000000..a719bade8
--- /dev/null
+++ b/drivers/iio/pressure/hsc030pa_spi.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Honeywell TruStability HSC Series pressure/temperature sensor
+ *
+ * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/stddef.h>
+
+#include <linux/iio/iio.h>
+
+#include "hsc030pa.h"
+
+static int hsc_spi_recv(struct hsc_data *data)
+{
+ struct spi_device *spi = to_spi_device(data->dev);
+ struct spi_transfer xfer = {
+ .tx_buf = NULL,
+ .rx_buf = data->buffer,
+ .len = HSC_REG_MEASUREMENT_RD_SIZE,
+ };
+
+ return spi_sync_transfer(spi, &xfer, 1);
+}
+
+static int hsc_spi_probe(struct spi_device *spi)
+{
+ return hsc_common_probe(&spi->dev, hsc_spi_recv);
+}
+
+static const struct of_device_id hsc_spi_match[] = {
+ { .compatible = "honeywell,hsc030pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hsc_spi_match);
+
+static const struct spi_device_id hsc_spi_id[] = {
+ { "hsc030pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, hsc_spi_id);
+
+static struct spi_driver hsc_spi_driver = {
+ .driver = {
+ .name = "hsc030pa",
+ .of_match_table = hsc_spi_match,
+ },
+ .probe = hsc_spi_probe,
+ .id_table = hsc_spi_id,
+};
+module_spi_driver(hsc_spi_driver);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor spi driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HONEYWELL_HSC030PA);
diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c
index bdff91f6b..323ac6dac 100644
--- a/drivers/iio/proximity/irsd200.c
+++ b/drivers/iio/proximity/irsd200.c
@@ -7,7 +7,6 @@
#include <asm/unaligned.h>
#include <linux/bitfield.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index 438f9c9ab..ac2ed2da2 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -888,7 +888,6 @@ sx9324_get_default_reg(struct device *dev, int idx,
char prop[] = SX9324_PROXRAW_DEF;
u32 start = 0, raw = 0, pos = 0;
int ret, count, ph, pin;
- const char *res;
memcpy(reg_def, &sx9324_default_regs[idx], sizeof(*reg_def));
@@ -915,24 +914,21 @@ sx9324_get_default_reg(struct device *dev, int idx,
reg_def->def = raw;
break;
case SX9324_REG_AFE_CTRL0:
- ret = device_property_read_string(dev,
- "semtech,cs-idle-sleep", &res);
- if (!ret)
- ret = match_string(sx9324_csidle, ARRAY_SIZE(sx9324_csidle), res);
+ ret = device_property_match_property_string(dev, "semtech,cs-idle-sleep",
+ sx9324_csidle,
+ ARRAY_SIZE(sx9324_csidle));
if (ret >= 0) {
reg_def->def &= ~SX9324_REG_AFE_CTRL0_CSIDLE_MASK;
reg_def->def |= ret << SX9324_REG_AFE_CTRL0_CSIDLE_SHIFT;
}
- ret = device_property_read_string(dev,
- "semtech,int-comp-resistor", &res);
- if (ret)
- break;
- ret = match_string(sx9324_rints, ARRAY_SIZE(sx9324_rints), res);
- if (ret < 0)
- break;
- reg_def->def &= ~SX9324_REG_AFE_CTRL0_RINT_MASK;
- reg_def->def |= ret << SX9324_REG_AFE_CTRL0_RINT_SHIFT;
+ ret = device_property_match_property_string(dev, "semtech,int-comp-resistor",
+ sx9324_rints,
+ ARRAY_SIZE(sx9324_rints));
+ if (ret >= 0) {
+ reg_def->def &= ~SX9324_REG_AFE_CTRL0_RINT_MASK;
+ reg_def->def |= ret << SX9324_REG_AFE_CTRL0_RINT_SHIFT;
+ }
break;
case SX9324_REG_AFE_CTRL4:
case SX9324_REG_AFE_CTRL7:
diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c
index 1bd1b950e..a414eef12 100644
--- a/drivers/iio/resolver/ad2s1210.c
+++ b/drivers/iio/resolver/ad2s1210.c
@@ -141,7 +141,7 @@ struct ad2s1210_state {
struct spi_device *sdev;
/** GPIO pin connected to SAMPLE line. */
struct gpio_desc *sample_gpio;
- /** GPIO pins connected to A0 and A1 lines. */
+ /** GPIO pins connected to A0 and A1 lines (optional). */
struct gpio_descs *mode_gpios;
/** Used to access config registers. */
struct regmap *regmap;
@@ -149,6 +149,8 @@ struct ad2s1210_state {
unsigned long clkin_hz;
/** Available raw hysteresis values based on resolution. */
int hysteresis_available[2];
+ /* adi,fixed-mode property - only valid when mode_gpios == NULL. */
+ enum ad2s1210_mode fixed_mode;
/** The selected resolution */
enum ad2s1210_resolution resolution;
/** Copy of fault register from the previous read. */
@@ -175,6 +177,9 @@ static int ad2s1210_set_mode(struct ad2s1210_state *st, enum ad2s1210_mode mode)
struct gpio_descs *gpios = st->mode_gpios;
DECLARE_BITMAP(bitmap, 2);
+ if (!gpios)
+ return mode == st->fixed_mode ? 0 : -EOPNOTSUPP;
+
bitmap[0] = mode;
return gpiod_set_array_value(gpios->ndescs, gpios->desc, gpios->info,
@@ -276,7 +281,8 @@ static int ad2s1210_regmap_reg_read(void *context, unsigned int reg,
* parity error. The fault register is read-only and the D7 bit means
* something else there.
*/
- if (reg != AD2S1210_REG_FAULT && st->rx[1] & AD2S1210_ADDRESS_DATA)
+ if ((reg > AD2S1210_REG_VELOCITY_LSB && reg != AD2S1210_REG_FAULT)
+ && st->rx[1] & AD2S1210_ADDRESS_DATA)
return -EBADMSG;
*val = st->rx[1];
@@ -450,21 +456,53 @@ static int ad2s1210_single_conversion(struct iio_dev *indio_dev,
ad2s1210_toggle_sample_line(st);
timestamp = iio_get_time_ns(indio_dev);
- switch (chan->type) {
- case IIO_ANGL:
- ret = ad2s1210_set_mode(st, MOD_POS);
- break;
- case IIO_ANGL_VEL:
- ret = ad2s1210_set_mode(st, MOD_VEL);
- break;
- default:
- return -EINVAL;
+ if (st->fixed_mode == MOD_CONFIG) {
+ unsigned int reg_val;
+
+ switch (chan->type) {
+ case IIO_ANGL:
+ ret = regmap_bulk_read(st->regmap,
+ AD2S1210_REG_POSITION_MSB,
+ &st->sample.raw, 2);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case IIO_ANGL_VEL:
+ ret = regmap_bulk_read(st->regmap,
+ AD2S1210_REG_VELOCITY_MSB,
+ &st->sample.raw, 2);
+ if (ret < 0)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(st->regmap, AD2S1210_REG_FAULT, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ st->sample.fault = reg_val;
+ } else {
+ switch (chan->type) {
+ case IIO_ANGL:
+ ret = ad2s1210_set_mode(st, MOD_POS);
+ break;
+ case IIO_ANGL_VEL:
+ ret = ad2s1210_set_mode(st, MOD_VEL);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret < 0)
+ return ret;
+
+ ret = spi_read(st->sdev, &st->sample, 3);
+ if (ret < 0)
+ return ret;
}
- if (ret < 0)
- return ret;
- ret = spi_read(st->sdev, &st->sample, 3);
- if (ret < 0)
- return ret;
switch (chan->type) {
case IIO_ANGL:
@@ -1252,27 +1290,53 @@ static irqreturn_t ad2s1210_trigger_handler(int irq, void *p)
ad2s1210_toggle_sample_line(st);
if (test_bit(0, indio_dev->active_scan_mask)) {
- ret = ad2s1210_set_mode(st, MOD_POS);
- if (ret < 0)
- goto error_ret;
-
- ret = spi_read(st->sdev, &st->sample, 3);
- if (ret < 0)
- goto error_ret;
+ if (st->fixed_mode == MOD_CONFIG) {
+ ret = regmap_bulk_read(st->regmap,
+ AD2S1210_REG_POSITION_MSB,
+ &st->sample.raw, 2);
+ if (ret < 0)
+ goto error_ret;
+ } else {
+ ret = ad2s1210_set_mode(st, MOD_POS);
+ if (ret < 0)
+ goto error_ret;
+
+ ret = spi_read(st->sdev, &st->sample, 3);
+ if (ret < 0)
+ goto error_ret;
+ }
memcpy(&st->scan.chan[chan++], &st->sample.raw, 2);
}
if (test_bit(1, indio_dev->active_scan_mask)) {
- ret = ad2s1210_set_mode(st, MOD_VEL);
- if (ret < 0)
- goto error_ret;
+ if (st->fixed_mode == MOD_CONFIG) {
+ ret = regmap_bulk_read(st->regmap,
+ AD2S1210_REG_VELOCITY_MSB,
+ &st->sample.raw, 2);
+ if (ret < 0)
+ goto error_ret;
+ } else {
+ ret = ad2s1210_set_mode(st, MOD_VEL);
+ if (ret < 0)
+ goto error_ret;
+
+ ret = spi_read(st->sdev, &st->sample, 3);
+ if (ret < 0)
+ goto error_ret;
+ }
- ret = spi_read(st->sdev, &st->sample, 3);
+ memcpy(&st->scan.chan[chan++], &st->sample.raw, 2);
+ }
+
+ if (st->fixed_mode == MOD_CONFIG) {
+ unsigned int reg_val;
+
+ ret = regmap_read(st->regmap, AD2S1210_REG_FAULT, &reg_val);
if (ret < 0)
- goto error_ret;
+ return ret;
- memcpy(&st->scan.chan[chan++], &st->sample.raw, 2);
+ st->sample.fault = reg_val;
}
ad2s1210_push_events(indio_dev, st->sample.fault, pf->timestamp);
@@ -1299,9 +1363,24 @@ static const struct iio_info ad2s1210_info = {
static int ad2s1210_setup_properties(struct ad2s1210_state *st)
{
struct device *dev = &st->sdev->dev;
+ const char *str_val;
u32 val;
int ret;
+ ret = device_property_read_string(dev, "adi,fixed-mode", &str_val);
+ if (ret == -EINVAL)
+ st->fixed_mode = -1;
+ else if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to read adi,fixed-mode property\n");
+ else {
+ if (strcmp(str_val, "config"))
+ return dev_err_probe(dev, -EINVAL,
+ "only adi,fixed-mode=\"config\" is supported\n");
+
+ st->fixed_mode = MOD_CONFIG;
+ }
+
ret = device_property_read_u32(dev, "assigned-resolution-bits", &val);
if (ret < 0)
return dev_err_probe(dev, ret,
@@ -1347,6 +1426,7 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
{
struct device *dev = &st->sdev->dev;
struct gpio_descs *resolution_gpios;
+ struct gpio_desc *reset_gpio;
DECLARE_BITMAP(bitmap, 2);
int ret;
@@ -1357,12 +1437,21 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
"failed to request sample GPIO\n");
/* both pins high means that we start in config mode */
- st->mode_gpios = devm_gpiod_get_array(dev, "mode", GPIOD_OUT_HIGH);
+ st->mode_gpios = devm_gpiod_get_array_optional(dev, "mode",
+ GPIOD_OUT_HIGH);
if (IS_ERR(st->mode_gpios))
return dev_err_probe(dev, PTR_ERR(st->mode_gpios),
"failed to request mode GPIOs\n");
- if (st->mode_gpios->ndescs != 2)
+ if (!st->mode_gpios && st->fixed_mode == -1)
+ return dev_err_probe(dev, -EINVAL,
+ "must specify either adi,fixed-mode or mode-gpios\n");
+
+ if (st->mode_gpios && st->fixed_mode != -1)
+ return dev_err_probe(dev, -EINVAL,
+ "must specify only one of adi,fixed-mode or mode-gpios\n");
+
+ if (st->mode_gpios && st->mode_gpios->ndescs != 2)
return dev_err_probe(dev, -EINVAL,
"requires exactly 2 mode-gpios\n");
@@ -1393,6 +1482,17 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
"failed to set resolution gpios\n");
}
+ /* If the optional reset GPIO is present, toggle it to do a hard reset. */
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "failed to request reset GPIO\n");
+
+ if (reset_gpio) {
+ udelay(10);
+ gpiod_set_value(reset_gpio, 0);
+ }
+
return 0;
}
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index ed384f33e..ed0e49633 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -76,6 +76,18 @@ config MLX90632
This driver can also be built as a module. If so, the module will
be called mlx90632.
+config MLX90635
+ tristate "MLX90635 contact-less infrared sensor with medical accuracy"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Melexis
+ MLX90635 contact-less infrared sensor with medical accuracy
+ connected with I2C.
+
+ This driver can also be built as a module. If so, the module will
+ be called mlx90635.
+
config TMP006
tristate "TMP006 infrared thermopile sensor"
depends on I2C
@@ -158,4 +170,14 @@ config MAX31865
This driver can also be build as a module. If so, the module
will be called max31865.
+config MCP9600
+ tristate "MCP9600 thermocouple EMF converter"
+ depends on I2C
+ help
+ If you say yes here you get support for MCP9600
+ thermocouple EMF converter connected via I2C.
+
+ This driver can also be built as a module. If so, the module
+ will be called mcp9600.
+
endmenu
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index dfec8c6d3..07d6e6570 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -10,8 +10,10 @@ obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
obj-$(CONFIG_MAX30208) += max30208.o
obj-$(CONFIG_MAX31856) += max31856.o
obj-$(CONFIG_MAX31865) += max31865.o
+obj-$(CONFIG_MCP9600) += mcp9600.o
obj-$(CONFIG_MLX90614) += mlx90614.o
obj-$(CONFIG_MLX90632) += mlx90632.o
+obj-$(CONFIG_MLX90632) += mlx90635.o
obj-$(CONFIG_TMP006) += tmp006.o
obj-$(CONFIG_TMP007) += tmp007.o
obj-$(CONFIG_TMP117) += tmp117.o
diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c
new file mode 100644
index 000000000..468458042
--- /dev/null
+++ b/drivers/iio/temperature/mcp9600.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * mcp9600.c - Support for Microchip MCP9600 thermocouple EMF converter
+ *
+ * Copyright (c) 2022 Andrew Hepp
+ * Author: <andrew.hepp@ahepp.dev>
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+
+/* MCP9600 registers */
+#define MCP9600_HOT_JUNCTION 0x0
+#define MCP9600_COLD_JUNCTION 0x2
+#define MCP9600_DEVICE_ID 0x20
+
+/* MCP9600 device id value */
+#define MCP9600_DEVICE_ID_MCP9600 0x40
+
+static const struct iio_chan_spec mcp9600_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .address = MCP9600_HOT_JUNCTION,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_TEMP,
+ .address = MCP9600_COLD_JUNCTION,
+ .channel2 = IIO_MOD_TEMP_AMBIENT,
+ .modified = 1,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+struct mcp9600_data {
+ struct i2c_client *client;
+};
+
+static int mcp9600_read(struct mcp9600_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_swapped(data->client, chan->address);
+
+ if (ret < 0)
+ return ret;
+ *val = ret;
+
+ return 0;
+}
+
+static int mcp9600_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct mcp9600_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = mcp9600_read(data, chan, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 62;
+ *val2 = 500000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mcp9600_info = {
+ .read_raw = mcp9600_read_raw,
+};
+
+static int mcp9600_probe(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev;
+ struct mcp9600_data *data;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, MCP9600_DEVICE_ID);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret, "Failed to read device ID\n");
+ if (ret != MCP9600_DEVICE_ID_MCP9600)
+ dev_warn(&client->dev, "Expected ID %x, got %x\n",
+ MCP9600_DEVICE_ID_MCP9600, ret);
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+
+ indio_dev->info = &mcp9600_info;
+ indio_dev->name = "mcp9600";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mcp9600_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mcp9600_channels);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id mcp9600_id[] = {
+ { "mcp9600" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mcp9600_id);
+
+static const struct of_device_id mcp9600_of_match[] = {
+ { .compatible = "microchip,mcp9600" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mcp9600_of_match);
+
+static struct i2c_driver mcp9600_driver = {
+ .driver = {
+ .name = "mcp9600",
+ .of_match_table = mcp9600_of_match,
+ },
+ .probe = mcp9600_probe,
+ .id_table = mcp9600_id
+};
+module_i2c_driver(mcp9600_driver);
+
+MODULE_AUTHOR("Andrew Hepp <andrew.hepp@ahepp.dev>");
+MODULE_DESCRIPTION("Microchip MCP9600 thermocouple EMF converter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
new file mode 100644
index 000000000..1f5c962c1
--- /dev/null
+++ b/drivers/iio/temperature/mlx90635.c
@@ -0,0 +1,1097 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mlx90635.c - Melexis MLX90635 contactless IR temperature sensor
+ *
+ * Copyright (c) 2023 Melexis <cmo@melexis.com>
+ *
+ * Driver for the Melexis MLX90635 I2C 16-bit IR thermopile sensor
+ */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/math64.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+
+/* Memory sections addresses */
+#define MLX90635_ADDR_RAM 0x0000 /* Start address of ram */
+#define MLX90635_ADDR_EEPROM 0x0018 /* Start address of user eeprom */
+
+/* EEPROM addresses - used at startup */
+#define MLX90635_EE_I2C_CFG 0x0018 /* I2C address register initial value */
+#define MLX90635_EE_CTRL1 0x001A /* Control register1 initial value */
+#define MLX90635_EE_CTRL2 0x001C /* Control register2 initial value */
+
+#define MLX90635_EE_Ha 0x001E /* Ha customer calib value reg 16bit */
+#define MLX90635_EE_Hb 0x0020 /* Hb customer calib value reg 16bit */
+#define MLX90635_EE_Fa 0x0026 /* Fa calibration register 32bit */
+#define MLX90635_EE_FASCALE 0x002A /* Scaling coefficient for Fa register 16bit */
+#define MLX90635_EE_Ga 0x002C /* Ga calibration register 16bit */
+#define MLX90635_EE_Fb 0x002E /* Fb calibration register 16bit */
+#define MLX90635_EE_Ea 0x0030 /* Ea calibration register 32bit */
+#define MLX90635_EE_Eb 0x0034 /* Eb calibration register 32bit */
+#define MLX90635_EE_P_G 0x0038 /* P_G calibration register 16bit */
+#define MLX90635_EE_P_O 0x003A /* P_O calibration register 16bit */
+#define MLX90635_EE_Aa 0x003C /* Aa calibration register 16bit */
+#define MLX90635_EE_VERSION 0x003E /* Version bits 4:7 and 12:15 */
+#define MLX90635_EE_Gb 0x0040 /* Gb calibration register 16bit */
+
+/* Device status register - volatile */
+#define MLX90635_REG_STATUS 0x0000
+#define MLX90635_STAT_BUSY BIT(6) /* Device busy indicator */
+#define MLX90635_STAT_BRST BIT(5) /* Brown out reset indicator */
+#define MLX90635_STAT_CYCLE_POS GENMASK(4, 2) /* Data position */
+#define MLX90635_STAT_END_CONV BIT(1) /* End of conversion indicator */
+#define MLX90635_STAT_DATA_RDY BIT(0) /* Data ready indicator */
+
+/* EEPROM control register address - volatile */
+#define MLX90635_REG_EE 0x000C
+#define MLX90635_EE_ACTIVE BIT(4) /* Power-on EEPROM */
+#define MLX90635_EE_BUSY_MASK BIT(15)
+
+#define MLX90635_REG_CMD 0x0010 /* Command register address */
+
+/* Control register1 address - volatile */
+#define MLX90635_REG_CTRL1 0x0014
+#define MLX90635_CTRL1_REFRESH_RATE_MASK GENMASK(2, 0)
+#define MLX90635_CTRL1_RES_CTRL_MASK GENMASK(4, 3)
+#define MLX90635_CTRL1_TABLE_MASK BIT(15) /* Table select */
+
+/* Control register2 address - volatile */
+#define MLX90635_REG_CTRL2 0x0016
+#define MLX90635_CTRL2_BURST_CNT_MASK GENMASK(10, 6) /* Burst count */
+#define MLX90635_CTRL2_MODE_MASK GENMASK(12, 11) /* Power mode */
+#define MLX90635_CTRL2_SOB_MASK BIT(15)
+
+/* PowerModes statuses */
+#define MLX90635_PWR_STATUS_HALT 0
+#define MLX90635_PWR_STATUS_SLEEP_STEP 1
+#define MLX90635_PWR_STATUS_STEP 2
+#define MLX90635_PWR_STATUS_CONTINUOUS 3
+
+/* Measurement data addresses */
+#define MLX90635_RESULT_1 0x0002
+#define MLX90635_RESULT_2 0x0004
+#define MLX90635_RESULT_3 0x0006
+#define MLX90635_RESULT_4 0x0008
+#define MLX90635_RESULT_5 0x000A
+
+/* Timings (ms) */
+#define MLX90635_TIMING_RST_MIN 200 /* Minimum time after addressed reset command */
+#define MLX90635_TIMING_RST_MAX 250 /* Maximum time after addressed reset command */
+#define MLX90635_TIMING_POLLING 10000 /* Time between bit polling*/
+#define MLX90635_TIMING_EE_ACTIVE_MIN 100 /* Minimum time after activating the EEPROM for read */
+#define MLX90635_TIMING_EE_ACTIVE_MAX 150 /* Maximum time after activating the EEPROM for read */
+
+/* Magic constants */
+#define MLX90635_ID_DSPv1 0x01 /* EEPROM DSP version */
+#define MLX90635_RESET_CMD 0x0006 /* Reset sensor (address or global) */
+#define MLX90635_MAX_MEAS_NUM 31 /* Maximum number of measurements in list */
+#define MLX90635_PTAT_DIV 12 /* Used to divide the PTAT value in pre-processing */
+#define MLX90635_IR_DIV 24 /* Used to divide the IR value in pre-processing */
+#define MLX90635_SLEEP_DELAY_MS 6000 /* Autosleep delay */
+#define MLX90635_MEAS_MAX_TIME 2000 /* Max measurement time in ms for the lowest refresh rate */
+#define MLX90635_READ_RETRIES 100 /* Number of read retries before quitting with timeout error */
+#define MLX90635_VERSION_MASK (GENMASK(15, 12) | GENMASK(7, 4))
+#define MLX90635_DSP_VERSION(reg) (((reg & GENMASK(14, 12)) >> 9) | ((reg & GENMASK(6, 4)) >> 4))
+#define MLX90635_DSP_FIXED BIT(15)
+
+
+/**
+ * struct mlx90635_data - private data for the MLX90635 device
+ * @client: I2C client of the device
+ * @lock: Internal mutex because multiple reads are needed for single triggered
+ * measurement to ensure data consistency
+ * @regmap: Regmap of the device registers
+ * @regmap_ee: Regmap of the device EEPROM which can be cached
+ * @emissivity: Object emissivity from 0 to 1000 where 1000 = 1
+ * @regulator: Regulator of the device
+ * @powerstatus: Current POWER status of the device
+ * @interaction_ts: Timestamp of the last temperature read that is used
+ * for power management in jiffies
+ */
+struct mlx90635_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct regmap *regmap;
+ struct regmap *regmap_ee;
+ u16 emissivity;
+ struct regulator *regulator;
+ int powerstatus;
+ unsigned long interaction_ts;
+};
+
+static const struct regmap_range mlx90635_volatile_reg_range[] = {
+ regmap_reg_range(MLX90635_REG_STATUS, MLX90635_REG_STATUS),
+ regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5),
+ regmap_reg_range(MLX90635_REG_EE, MLX90635_REG_EE),
+ regmap_reg_range(MLX90635_REG_CMD, MLX90635_REG_CMD),
+ regmap_reg_range(MLX90635_REG_CTRL1, MLX90635_REG_CTRL2),
+};
+
+static const struct regmap_access_table mlx90635_volatile_regs_tbl = {
+ .yes_ranges = mlx90635_volatile_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(mlx90635_volatile_reg_range),
+};
+
+static const struct regmap_range mlx90635_read_reg_range[] = {
+ regmap_reg_range(MLX90635_REG_STATUS, MLX90635_REG_STATUS),
+ regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5),
+ regmap_reg_range(MLX90635_REG_EE, MLX90635_REG_EE),
+ regmap_reg_range(MLX90635_REG_CMD, MLX90635_REG_CMD),
+ regmap_reg_range(MLX90635_REG_CTRL1, MLX90635_REG_CTRL2),
+};
+
+static const struct regmap_access_table mlx90635_readable_regs_tbl = {
+ .yes_ranges = mlx90635_read_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(mlx90635_read_reg_range),
+};
+
+static const struct regmap_range mlx90635_no_write_reg_range[] = {
+ regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5),
+};
+
+static const struct regmap_access_table mlx90635_writeable_regs_tbl = {
+ .no_ranges = mlx90635_no_write_reg_range,
+ .n_no_ranges = ARRAY_SIZE(mlx90635_no_write_reg_range),
+};
+
+static const struct regmap_config mlx90635_regmap = {
+ .name = "mlx90635-registers",
+ .reg_stride = 1,
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .volatile_table = &mlx90635_volatile_regs_tbl,
+ .rd_table = &mlx90635_readable_regs_tbl,
+ .wr_table = &mlx90635_writeable_regs_tbl,
+
+ .use_single_read = true,
+ .use_single_write = true,
+ .can_multi_write = false,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regmap_range mlx90635_read_ee_range[] = {
+ regmap_reg_range(MLX90635_EE_I2C_CFG, MLX90635_EE_CTRL2),
+ regmap_reg_range(MLX90635_EE_Ha, MLX90635_EE_Gb),
+};
+
+static const struct regmap_access_table mlx90635_readable_ees_tbl = {
+ .yes_ranges = mlx90635_read_ee_range,
+ .n_yes_ranges = ARRAY_SIZE(mlx90635_read_ee_range),
+};
+
+static const struct regmap_range mlx90635_no_write_ee_range[] = {
+ regmap_reg_range(MLX90635_ADDR_EEPROM, MLX90635_EE_Gb),
+};
+
+static const struct regmap_access_table mlx90635_writeable_ees_tbl = {
+ .no_ranges = mlx90635_no_write_ee_range,
+ .n_no_ranges = ARRAY_SIZE(mlx90635_no_write_ee_range),
+};
+
+static const struct regmap_config mlx90635_regmap_ee = {
+ .name = "mlx90635-eeprom",
+ .reg_stride = 1,
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .volatile_table = NULL,
+ .rd_table = &mlx90635_readable_ees_tbl,
+ .wr_table = &mlx90635_writeable_ees_tbl,
+
+ .use_single_read = true,
+ .use_single_write = true,
+ .can_multi_write = false,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/**
+ * mlx90635_reset_delay() - Give the mlx90635 some time to reset properly
+ * If this is not done, the following I2C command(s) will not be accepted.
+ */
+static void mlx90635_reset_delay(void)
+{
+ usleep_range(MLX90635_TIMING_RST_MIN, MLX90635_TIMING_RST_MAX);
+}
+
+static int mlx90635_pwr_sleep_step(struct mlx90635_data *data)
+{
+ int ret;
+
+ if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP)
+ return 0;
+
+ ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, MLX90635_CTRL2_MODE_MASK,
+ FIELD_PREP(MLX90635_CTRL2_MODE_MASK, MLX90635_PWR_STATUS_SLEEP_STEP));
+ if (ret < 0)
+ return ret;
+
+ data->powerstatus = MLX90635_PWR_STATUS_SLEEP_STEP;
+ return 0;
+}
+
+static int mlx90635_pwr_continuous(struct mlx90635_data *data)
+{
+ int ret;
+
+ if (data->powerstatus == MLX90635_PWR_STATUS_CONTINUOUS)
+ return 0;
+
+ ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, MLX90635_CTRL2_MODE_MASK,
+ FIELD_PREP(MLX90635_CTRL2_MODE_MASK, MLX90635_PWR_STATUS_CONTINUOUS));
+ if (ret < 0)
+ return ret;
+
+ data->powerstatus = MLX90635_PWR_STATUS_CONTINUOUS;
+ return 0;
+}
+
+static int mlx90635_read_ee_register(struct regmap *regmap, u16 reg_lsb,
+ s32 *reg_value)
+{
+ unsigned int read;
+ u32 value;
+ int ret;
+
+ ret = regmap_read(regmap, reg_lsb + 2, &read);
+ if (ret < 0)
+ return ret;
+
+ value = read;
+
+ ret = regmap_read(regmap, reg_lsb, &read);
+ if (ret < 0)
+ return ret;
+
+ *reg_value = (read << 16) | (value & 0xffff);
+
+ return 0;
+}
+
+static int mlx90635_read_ee_ambient(struct regmap *regmap, s16 *PG, s16 *PO, s16 *Gb)
+{
+ unsigned int read_tmp;
+ int ret;
+
+ ret = regmap_read(regmap, MLX90635_EE_P_O, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *PO = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_EE_P_G, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *PG = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_EE_Gb, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *Gb = (u16)read_tmp;
+
+ return 0;
+}
+
+static int mlx90635_read_ee_object(struct regmap *regmap, u32 *Ea, u32 *Eb, u32 *Fa, s16 *Fb,
+ s16 *Ga, s16 *Gb, s16 *Ha, s16 *Hb, u16 *Fa_scale)
+{
+ unsigned int read_tmp;
+ int ret;
+
+ ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Ea, Ea);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Eb, Eb);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Fa, Fa);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, MLX90635_EE_Ha, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *Ha = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_EE_Hb, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *Hb = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_EE_Ga, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *Ga = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_EE_Gb, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *Gb = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_EE_Fb, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *Fb = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_EE_FASCALE, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *Fa_scale = (u16)read_tmp;
+
+ return 0;
+}
+
+static int mlx90635_calculate_dataset_ready_time(struct mlx90635_data *data, int *refresh_time)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(data->regmap, MLX90635_REG_CTRL1, &reg);
+ if (ret < 0)
+ return ret;
+
+ *refresh_time = 2 * (MLX90635_MEAS_MAX_TIME >> FIELD_GET(MLX90635_CTRL1_REFRESH_RATE_MASK, reg)) + 80;
+
+ return 0;
+}
+
+static int mlx90635_perform_measurement_burst(struct mlx90635_data *data)
+{
+ unsigned int reg_status;
+ int refresh_time;
+ int ret;
+
+ ret = regmap_write_bits(data->regmap, MLX90635_REG_STATUS,
+ MLX90635_STAT_END_CONV, MLX90635_STAT_END_CONV);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx90635_calculate_dataset_ready_time(data, &refresh_time);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2,
+ FIELD_PREP(MLX90635_CTRL2_SOB_MASK, 1),
+ FIELD_PREP(MLX90635_CTRL2_SOB_MASK, 1));
+ if (ret < 0)
+ return ret;
+
+ msleep(refresh_time); /* Wait minimum time for dataset to be ready */
+
+ ret = regmap_read_poll_timeout(data->regmap, MLX90635_REG_STATUS, reg_status,
+ (!(reg_status & MLX90635_STAT_END_CONV)) == 0,
+ MLX90635_TIMING_POLLING, MLX90635_READ_RETRIES * 10000);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "data not ready");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mlx90635_read_ambient_raw(struct regmap *regmap,
+ s16 *ambient_new_raw, s16 *ambient_old_raw)
+{
+ unsigned int read_tmp;
+ int ret;
+
+ ret = regmap_read(regmap, MLX90635_RESULT_2, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *ambient_new_raw = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_RESULT_3, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *ambient_old_raw = (s16)read_tmp;
+
+ return 0;
+}
+
+static int mlx90635_read_object_raw(struct regmap *regmap, s16 *object_raw)
+{
+ unsigned int read_tmp;
+ s16 read;
+ int ret;
+
+ ret = regmap_read(regmap, MLX90635_RESULT_1, &read_tmp);
+ if (ret < 0)
+ return ret;
+
+ read = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90635_RESULT_4, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *object_raw = (read - (s16)read_tmp) / 2;
+
+ return 0;
+}
+
+static int mlx90635_read_all_channel(struct mlx90635_data *data,
+ s16 *ambient_new_raw, s16 *ambient_old_raw,
+ s16 *object_raw)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) {
+ /* Trigger measurement in Sleep Step mode */
+ ret = mlx90635_perform_measurement_burst(data);
+ if (ret < 0)
+ goto read_unlock;
+ }
+
+ ret = mlx90635_read_ambient_raw(data->regmap, ambient_new_raw,
+ ambient_old_raw);
+ if (ret < 0)
+ goto read_unlock;
+
+ ret = mlx90635_read_object_raw(data->regmap, object_raw);
+read_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static s64 mlx90635_preprocess_temp_amb(s16 ambient_new_raw,
+ s16 ambient_old_raw, s16 Gb)
+{
+ s64 VR_Ta, kGb, tmp;
+
+ kGb = ((s64)Gb * 1000LL) >> 10ULL;
+ VR_Ta = (s64)ambient_old_raw * 1000000LL +
+ kGb * div64_s64(((s64)ambient_new_raw * 1000LL),
+ (MLX90635_PTAT_DIV));
+ tmp = div64_s64(
+ div64_s64(((s64)ambient_new_raw * 1000000000000LL),
+ (MLX90635_PTAT_DIV)), VR_Ta);
+ return div64_s64(tmp << 19ULL, 1000LL);
+}
+
+static s64 mlx90635_preprocess_temp_obj(s16 object_raw,
+ s16 ambient_new_raw,
+ s16 ambient_old_raw, s16 Gb)
+{
+ s64 VR_IR, kGb, tmp;
+
+ kGb = ((s64)Gb * 1000LL) >> 10ULL;
+ VR_IR = (s64)ambient_old_raw * 1000000LL +
+ kGb * (div64_s64((s64)ambient_new_raw * 1000LL,
+ MLX90635_PTAT_DIV));
+ tmp = div64_s64(
+ div64_s64((s64)(object_raw * 1000000LL),
+ MLX90635_IR_DIV) * 1000000LL,
+ VR_IR);
+ return div64_s64((tmp << 19ULL), 1000LL);
+}
+
+static s32 mlx90635_calc_temp_ambient(s16 ambient_new_raw, s16 ambient_old_raw,
+ u16 P_G, u16 P_O, s16 Gb)
+{
+ s64 kPG, kPO, AMB;
+
+ AMB = mlx90635_preprocess_temp_amb(ambient_new_raw, ambient_old_raw,
+ Gb);
+ kPG = ((s64)P_G * 1000000LL) >> 9ULL;
+ kPO = AMB - (((s64)P_O * 1000LL) >> 1ULL);
+
+ return 30 * 1000LL + div64_s64(kPO * 1000000LL, kPG);
+}
+
+static s32 mlx90635_calc_temp_object_iteration(s32 prev_object_temp, s64 object,
+ s64 TAdut, s64 TAdut4, s16 Ga,
+ u32 Fa, u16 Fa_scale, s16 Fb,
+ s16 Ha, s16 Hb, u16 emissivity)
+{
+ s64 calcedGa, calcedGb, calcedFa, Alpha_corr;
+ s64 Ha_customer, Hb_customer;
+
+ Ha_customer = ((s64)Ha * 1000000LL) >> 14ULL;
+ Hb_customer = ((s64)Hb * 100) >> 10ULL;
+
+ calcedGa = ((s64)((s64)Ga * (prev_object_temp - 35 * 1000LL)
+ * 1000LL)) >> 24LL;
+ calcedGb = ((s64)(Fb * (TAdut - 30 * 1000000LL))) >> 24LL;
+
+ Alpha_corr = ((s64)((s64)Fa * Ha_customer * 10000LL) >> Fa_scale);
+ Alpha_corr *= ((s64)(1 * 1000000LL + calcedGa + calcedGb));
+
+ Alpha_corr = div64_s64(Alpha_corr, 1000LL);
+ Alpha_corr *= emissivity;
+ Alpha_corr = div64_s64(Alpha_corr, 100LL);
+ calcedFa = div64_s64((s64)object * 100000000000LL, Alpha_corr);
+
+ return (int_sqrt64(int_sqrt64(calcedFa * 100000000LL + TAdut4))
+ - 27315 - Hb_customer) * 10;
+}
+
+static s64 mlx90635_calc_ta4(s64 TAdut, s64 scale)
+{
+ return (div64_s64(TAdut, scale) + 27315) *
+ (div64_s64(TAdut, scale) + 27315) *
+ (div64_s64(TAdut, scale) + 27315) *
+ (div64_s64(TAdut, scale) + 27315);
+}
+
+static s32 mlx90635_calc_temp_object(s64 object, s64 ambient, u32 Ea, u32 Eb,
+ s16 Ga, u32 Fa, u16 Fa_scale, s16 Fb, s16 Ha, s16 Hb,
+ u16 tmp_emi)
+{
+ s64 kTA, kTA0, TAdut, TAdut4;
+ s64 temp = 35000;
+ s8 i;
+
+ kTA = (Ea * 1000LL) >> 16LL;
+ kTA0 = (Eb * 1000LL) >> 8LL;
+ TAdut = div64_s64(((ambient - kTA0) * 1000000LL), kTA) + 30 * 1000000LL;
+ TAdut4 = mlx90635_calc_ta4(TAdut, 10000LL);
+
+ /* Iterations of calculation as described in datasheet */
+ for (i = 0; i < 5; ++i) {
+ temp = mlx90635_calc_temp_object_iteration(temp, object, TAdut, TAdut4,
+ Ga, Fa, Fa_scale, Fb, Ha, Hb,
+ tmp_emi);
+ }
+ return temp;
+}
+
+static int mlx90635_calc_object(struct mlx90635_data *data, int *val)
+{
+ s16 ambient_new_raw, ambient_old_raw, object_raw;
+ s16 Fb, Ga, Gb, Ha, Hb;
+ s64 object, ambient;
+ u32 Ea, Eb, Fa;
+ u16 Fa_scale;
+ int ret;
+
+ ret = mlx90635_read_ee_object(data->regmap_ee, &Ea, &Eb, &Fa, &Fb, &Ga, &Gb, &Ha, &Hb, &Fa_scale);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx90635_read_all_channel(data,
+ &ambient_new_raw, &ambient_old_raw,
+ &object_raw);
+ if (ret < 0)
+ return ret;
+
+ ambient = mlx90635_preprocess_temp_amb(ambient_new_raw,
+ ambient_old_raw, Gb);
+ object = mlx90635_preprocess_temp_obj(object_raw,
+ ambient_new_raw,
+ ambient_old_raw, Gb);
+
+ *val = mlx90635_calc_temp_object(object, ambient, Ea, Eb, Ga, Fa, Fa_scale, Fb,
+ Ha, Hb, data->emissivity);
+ return 0;
+}
+
+static int mlx90635_calc_ambient(struct mlx90635_data *data, int *val)
+{
+ s16 ambient_new_raw, ambient_old_raw;
+ s16 PG, PO, Gb;
+ int ret;
+
+ ret = mlx90635_read_ee_ambient(data->regmap_ee, &PG, &PO, &Gb);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->lock);
+ if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) {
+ ret = mlx90635_perform_measurement_burst(data);
+ if (ret < 0)
+ goto read_ambient_unlock;
+ }
+
+ ret = mlx90635_read_ambient_raw(data->regmap, &ambient_new_raw,
+ &ambient_old_raw);
+read_ambient_unlock:
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ *val = mlx90635_calc_temp_ambient(ambient_new_raw, ambient_old_raw,
+ PG, PO, Gb);
+ return ret;
+}
+
+static int mlx90635_get_refresh_rate(struct mlx90635_data *data,
+ unsigned int *refresh_rate)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(data->regmap, MLX90635_REG_CTRL1, &reg);
+ if (ret < 0)
+ return ret;
+
+ *refresh_rate = FIELD_GET(MLX90635_CTRL1_REFRESH_RATE_MASK, reg);
+
+ return 0;
+}
+
+static const struct {
+ int val;
+ int val2;
+} mlx90635_freqs[] = {
+ { 0, 200000 },
+ { 0, 500000 },
+ { 0, 900000 },
+ { 1, 700000 },
+ { 3, 0 },
+ { 4, 800000 },
+ { 6, 900000 },
+ { 8, 900000 }
+};
+
+/**
+ * mlx90635_pm_interaction_wakeup() - Measure time between user interactions to change powermode
+ * @data: pointer to mlx90635_data object containing interaction_ts information
+ *
+ * Switch to continuous mode when interaction is faster than MLX90635_MEAS_MAX_TIME. Update the
+ * interaction_ts for each function call with the jiffies to enable measurement between function
+ * calls. Initial value of the interaction_ts needs to be set before this function call.
+ */
+static int mlx90635_pm_interaction_wakeup(struct mlx90635_data *data)
+{
+ unsigned long now;
+ int ret;
+
+ now = jiffies;
+ if (time_in_range(now, data->interaction_ts,
+ data->interaction_ts +
+ msecs_to_jiffies(MLX90635_MEAS_MAX_TIME + 100))) {
+ ret = mlx90635_pwr_continuous(data);
+ if (ret < 0)
+ return ret;
+ }
+
+ data->interaction_ts = now;
+
+ return 0;
+}
+
+static int mlx90635_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct mlx90635_data *data = iio_priv(indio_dev);
+ int ret;
+ int cr;
+
+ pm_runtime_get_sync(&data->client->dev);
+ ret = mlx90635_pm_interaction_wakeup(data);
+ if (ret < 0)
+ goto mlx90635_read_raw_pm;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (channel->channel2) {
+ case IIO_MOD_TEMP_AMBIENT:
+ ret = mlx90635_calc_ambient(data, val);
+ if (ret < 0)
+ goto mlx90635_read_raw_pm;
+
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_MOD_TEMP_OBJECT:
+ ret = mlx90635_calc_object(data, val);
+ if (ret < 0)
+ goto mlx90635_read_raw_pm;
+
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBEMISSIVITY:
+ if (data->emissivity == 1000) {
+ *val = 1;
+ *val2 = 0;
+ } else {
+ *val = 0;
+ *val2 = data->emissivity * 1000;
+ }
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = mlx90635_get_refresh_rate(data, &cr);
+ if (ret < 0)
+ goto mlx90635_read_raw_pm;
+
+ *val = mlx90635_freqs[cr].val;
+ *val2 = mlx90635_freqs[cr].val2;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+mlx90635_read_raw_pm:
+ pm_runtime_mark_last_busy(&data->client->dev);
+ pm_runtime_put_autosuspend(&data->client->dev);
+ return ret;
+}
+
+static int mlx90635_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int val,
+ int val2, long mask)
+{
+ struct mlx90635_data *data = iio_priv(indio_dev);
+ int ret;
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBEMISSIVITY:
+ /* Confirm we are within 0 and 1.0 */
+ if (val < 0 || val2 < 0 || val > 1 ||
+ (val == 1 && val2 != 0))
+ return -EINVAL;
+ data->emissivity = val * 1000 + val2 / 1000;
+ return 0;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0; i < ARRAY_SIZE(mlx90635_freqs); i++) {
+ if (val == mlx90635_freqs[i].val &&
+ val2 == mlx90635_freqs[i].val2)
+ break;
+ }
+ if (i == ARRAY_SIZE(mlx90635_freqs))
+ return -EINVAL;
+
+ ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL1,
+ MLX90635_CTRL1_REFRESH_RATE_MASK, i);
+
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx90635_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (int *)mlx90635_freqs;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = 2 * ARRAY_SIZE(mlx90635_freqs);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec mlx90635_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .modified = 1,
+ .channel2 = IIO_MOD_TEMP_AMBIENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+ {
+ .type = IIO_TEMP,
+ .modified = 1,
+ .channel2 = IIO_MOD_TEMP_OBJECT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBEMISSIVITY),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+};
+
+static const struct iio_info mlx90635_info = {
+ .read_raw = mlx90635_read_raw,
+ .write_raw = mlx90635_write_raw,
+ .read_avail = mlx90635_read_avail,
+};
+
+static void mlx90635_sleep(void *_data)
+{
+ struct mlx90635_data *data = _data;
+
+ mlx90635_pwr_sleep_step(data);
+}
+
+static int mlx90635_suspend(struct mlx90635_data *data)
+{
+ return mlx90635_pwr_sleep_step(data);
+}
+
+static int mlx90635_wakeup(struct mlx90635_data *data)
+{
+ s16 Fb, Ga, Gb, Ha, Hb, PG, PO;
+ unsigned int dsp_version;
+ u32 Ea, Eb, Fa;
+ u16 Fa_scale;
+ int ret;
+
+ regcache_cache_bypass(data->regmap_ee, false);
+ regcache_cache_only(data->regmap_ee, false);
+ regcache_cache_only(data->regmap, false);
+
+ ret = mlx90635_pwr_continuous(data);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Switch to continuous mode failed\n");
+ return ret;
+ }
+ ret = regmap_write_bits(data->regmap, MLX90635_REG_EE,
+ MLX90635_EE_ACTIVE, MLX90635_EE_ACTIVE);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Powering EEPROM failed\n");
+ return ret;
+ }
+ usleep_range(MLX90635_TIMING_EE_ACTIVE_MIN, MLX90635_TIMING_EE_ACTIVE_MAX);
+
+ regcache_mark_dirty(data->regmap_ee);
+
+ ret = regcache_sync(data->regmap_ee);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed to sync cache: %d\n", ret);
+ return ret;
+ }
+
+ ret = mlx90635_read_ee_ambient(data->regmap_ee, &PG, &PO, &Gb);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed to read to cache Ambient coefficients EEPROM region: %d\n", ret);
+ return ret;
+ }
+
+ ret = mlx90635_read_ee_object(data->regmap_ee, &Ea, &Eb, &Fa, &Fb, &Ga, &Gb, &Ha, &Hb, &Fa_scale);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed to read to cache Object coefficients EEPROM region: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap_ee, MLX90635_EE_VERSION, &dsp_version);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "Failed to read to cache of EEPROM version: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(data->regmap_ee, true);
+
+ return ret;
+}
+
+static void mlx90635_disable_regulator(void *_data)
+{
+ struct mlx90635_data *data = _data;
+ int ret;
+
+ ret = regulator_disable(data->regulator);
+ if (ret < 0)
+ dev_err(regmap_get_device(data->regmap),
+ "Failed to disable power regulator: %d\n", ret);
+}
+
+static int mlx90635_enable_regulator(struct mlx90635_data *data)
+{
+ int ret;
+
+ ret = regulator_enable(data->regulator);
+ if (ret < 0) {
+ dev_err(regmap_get_device(data->regmap), "Failed to enable power regulator!\n");
+ return ret;
+ }
+
+ mlx90635_reset_delay();
+
+ return ret;
+}
+
+static int mlx90635_probe(struct i2c_client *client)
+{
+ struct mlx90635_data *mlx90635;
+ struct iio_dev *indio_dev;
+ unsigned int dsp_version;
+ struct regmap *regmap;
+ struct regmap *regmap_ee;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90635));
+ if (!indio_dev)
+ return dev_err_probe(&client->dev, -ENOMEM, "failed to allocate device\n");
+
+ regmap = devm_regmap_init_i2c(client, &mlx90635_regmap);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap),
+ "failed to allocate regmap\n");
+
+ regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap),
+ "failed to allocate regmap\n");
+
+ mlx90635 = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ mlx90635->client = client;
+ mlx90635->regmap = regmap;
+ mlx90635->regmap_ee = regmap_ee;
+ mlx90635->powerstatus = MLX90635_PWR_STATUS_SLEEP_STEP;
+
+ mutex_init(&mlx90635->lock);
+ indio_dev->name = "mlx90635";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mlx90635_info;
+ indio_dev->channels = mlx90635_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mlx90635_channels);
+
+ mlx90635->regulator = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(mlx90635->regulator))
+ return dev_err_probe(&client->dev, PTR_ERR(mlx90635->regulator),
+ "failed to get vdd regulator");
+
+ ret = mlx90635_enable_regulator(mlx90635);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev, mlx90635_disable_regulator,
+ mlx90635);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "failed to setup regulator cleanup action\n");
+
+ ret = mlx90635_wakeup(mlx90635);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret, "wakeup failed\n");
+
+ ret = devm_add_action_or_reset(&client->dev, mlx90635_sleep, mlx90635);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "failed to setup low power cleanup\n");
+
+ ret = regmap_read(mlx90635->regmap_ee, MLX90635_EE_VERSION, &dsp_version);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret, "read of version failed\n");
+
+ dsp_version = dsp_version & MLX90635_VERSION_MASK;
+
+ if (FIELD_GET(MLX90635_DSP_FIXED, dsp_version)) {
+ if (MLX90635_DSP_VERSION(dsp_version) == MLX90635_ID_DSPv1) {
+ dev_dbg(&client->dev,
+ "Detected DSP v1 calibration %x\n", dsp_version);
+ } else {
+ dev_dbg(&client->dev,
+ "Detected Unknown EEPROM calibration %lx\n",
+ MLX90635_DSP_VERSION(dsp_version));
+ }
+ } else {
+ return dev_err_probe(&client->dev, -EPROTONOSUPPORT,
+ "Wrong fixed top bit %x (expected 0x8X0X)\n",
+ dsp_version);
+ }
+
+ mlx90635->emissivity = 1000;
+ mlx90635->interaction_ts = jiffies; /* Set initial value */
+
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+
+ ret = devm_pm_runtime_enable(&client->dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to enable powermanagement\n");
+
+ pm_runtime_set_autosuspend_delay(&client->dev, MLX90635_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id mlx90635_id[] = {
+ { "mlx90635" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mlx90635_id);
+
+static const struct of_device_id mlx90635_of_match[] = {
+ { .compatible = "melexis,mlx90635" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mlx90635_of_match);
+
+static int mlx90635_pm_suspend(struct device *dev)
+{
+ struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev));
+ int ret;
+
+ ret = mlx90635_suspend(data);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_disable(data->regulator);
+ if (ret < 0)
+ dev_err(regmap_get_device(data->regmap),
+ "Failed to disable power regulator: %d\n", ret);
+
+ return ret;
+}
+
+static int mlx90635_pm_resume(struct device *dev)
+{
+ struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev));
+ int ret;
+
+ ret = mlx90635_enable_regulator(data);
+ if (ret < 0)
+ return ret;
+
+ return mlx90635_wakeup(data);
+}
+
+static int mlx90635_pm_runtime_suspend(struct device *dev)
+{
+ struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return mlx90635_pwr_sleep_step(data);
+}
+
+static const struct dev_pm_ops mlx90635_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(mlx90635_pm_suspend, mlx90635_pm_resume)
+ RUNTIME_PM_OPS(mlx90635_pm_runtime_suspend, NULL, NULL)
+};
+
+static struct i2c_driver mlx90635_driver = {
+ .driver = {
+ .name = "mlx90635",
+ .of_match_table = mlx90635_of_match,
+ .pm = pm_ptr(&mlx90635_pm_ops),
+ },
+ .probe = mlx90635_probe,
+ .id_table = mlx90635_id,
+};
+module_i2c_driver(mlx90635_driver);
+
+MODULE_AUTHOR("Crt Mori <cmo@melexis.com>");
+MODULE_DESCRIPTION("Melexis MLX90635 contactless Infra Red temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ff58058ae..07fb8d3c0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -34,6 +34,7 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
+#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
[IB_CM_REJ_NO_EEC] = "no EEC",
@@ -1025,13 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ enum ib_cm_state old_state)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+}
+
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
+ enum ib_cm_state old_state;
struct cm_work *work;
+ int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
+ old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@@ -1135,7 +1149,14 @@ retest:
xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
- wait_for_completion(&cm_id_priv->comp);
+ do {
+ ret = wait_for_completion_timeout(&cm_id_priv->comp,
+ msecs_to_jiffies(
+ CM_DESTROY_ID_WAIT_TIMEOUT));
+ if (!ret) /* timeout happened */
+ cm_destroy_id_wait_timeout(cm_id, old_state);
+ } while (!ret);
+
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 9fd9849eb..9dca451ed 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -41,6 +41,7 @@
#define __BNXT_RE_H__
#include <rdma/uverbs_ioctl.h>
#include "hw_counters.h"
+#include <linux/hashtable.h>
#define ROCE_DRV_MODULE_NAME "bnxt_re"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
@@ -135,6 +136,7 @@ struct bnxt_re_pacing {
#define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
+#define MAX_CQ_HASH_BITS (16)
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
@@ -189,6 +191,7 @@ struct bnxt_re_dev {
struct bnxt_re_pacing pacing;
struct work_struct dbq_fifo_check_work;
struct delayed_work dbq_pacing_work;
+ DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 93572405d..128651c01 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -371,7 +371,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
}
done:
- return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
+ return bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
}
@@ -381,7 +381,7 @@ struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
int num_counters = 0;
- if (bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
num_counters = BNXT_RE_NUM_EXT_COUNTERS;
else
num_counters = BNXT_RE_NUM_STD_COUNTERS;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index cc466dfd7..ce9c5bae8 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -50,6 +50,7 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
+#include <linux/hashtable.h>
#include "bnxt_ulp.h"
@@ -400,6 +401,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
struct bnxt_re_fence_data *fence = &pd->fence;
struct ib_mr *ib_mr = &fence->mr->ib_mr;
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
memset(wqe, 0, sizeof(*wqe));
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
@@ -454,6 +459,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
struct device *dev = &rdev->en_dev->pdev->dev;
struct bnxt_re_mr *mr = fence->mr;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
if (fence->mw) {
bnxt_re_dealloc_mw(fence->mw);
fence->mw = NULL;
@@ -485,6 +493,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
struct ib_mw *mw;
int rc;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
@@ -567,6 +578,7 @@ bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
case BNXT_RE_MMAP_WC_DB:
case BNXT_RE_MMAP_DBR_BAR:
case BNXT_RE_MMAP_DBR_PAGE:
+ case BNXT_RE_MMAP_TOGGLE_PAGE:
ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
&entry->rdma_entry, PAGE_SIZE);
break;
@@ -1023,7 +1035,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
- psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
+ psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
@@ -1184,7 +1196,8 @@ fail:
}
static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
- struct ib_qp_init_attr *init_attr)
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_re_ucontext *uctx)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
@@ -1213,7 +1226,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty.
*/
- entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
+ entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
rq->q_full_delta = 0;
rq->sg_info.pgsize = PAGE_SIZE;
@@ -1233,7 +1246,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
qplqp->rq.max_sge = dev_attr->max_qp_sges;
if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
qplqp->rq.max_sge = dev_attr->max_qp_sges;
@@ -1243,7 +1256,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+ struct bnxt_re_ucontext *uctx)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
@@ -1272,7 +1285,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
/* Allocate 128 + 1 more than what's provided */
diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
0 : BNXT_QPLIB_RESERVED_QP_WRS;
- entries = roundup_pow_of_two(entries + diff + 1);
+ entries = bnxt_re_init_depth(entries + diff + 1, uctx);
sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
sq->q_full_delta = diff + 1;
/*
@@ -1288,7 +1301,8 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
}
static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
- struct ib_qp_init_attr *init_attr)
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_re_ucontext *uctx)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
@@ -1299,8 +1313,8 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
- entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
qplqp->sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
@@ -1326,7 +1340,7 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
goto out;
}
- if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
+ if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
init_attr->qp_type == IB_QPT_GSI)
qptype = CMDQ_CREATE_QP_TYPE_GSI;
out:
@@ -1338,6 +1352,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
struct ib_udata *udata)
{
struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
struct bnxt_re_cq *cq;
@@ -1347,6 +1362,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
qplqp->pd = &pd->qplib_pd;
@@ -1388,18 +1404,18 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
}
/* Setup RQ/SRQ */
- rc = bnxt_re_init_rq_attr(qp, init_attr);
+ rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
if (rc)
goto out;
if (init_attr->qp_type == IB_QPT_GSI)
bnxt_re_adjust_gsi_rq_attr(qp);
/* Setup SQ */
- rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
+ rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
if (rc)
goto out;
if (init_attr->qp_type == IB_QPT_GSI)
- bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
if (udata) /* This will update DPI and qp_handle */
rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
@@ -1523,7 +1539,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
goto fail;
if (qp_init_attr->qp_type == IB_QPT_GSI &&
- !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
+ !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
if (rc == -ENODEV)
goto qp_destroy;
@@ -1715,6 +1731,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_nq *nq = NULL;
+ struct bnxt_re_ucontext *uctx;
struct bnxt_re_dev *rdev;
struct bnxt_re_srq *srq;
struct bnxt_re_pd *pd;
@@ -1739,13 +1756,14 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
goto exit;
}
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
srq->rdev = rdev;
srq->qplib_srq.pd = &pd->qplib_pd;
srq->qplib_srq.dpi = &rdev->dpi_privileged;
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty
*/
- entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
+ entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
if (entries > dev_attr->max_srq_wqes + 1)
entries = dev_attr->max_srq_wqes + 1;
srq->qplib_srq.max_wqe = entries;
@@ -2102,6 +2120,9 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
}
if (qp_attr_mask & IB_QP_CAP) {
+ struct bnxt_re_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
@@ -2118,7 +2139,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
"Create QP failed - max exceeded");
return -EINVAL;
}
- entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
+ entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
@@ -2131,7 +2152,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.sq.q_full_delta -= 1;
qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
if (qp->qplib_qp.rq.max_wqe) {
- entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
+ entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
@@ -2544,11 +2565,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
- /* Need unconditional fence for local invalidate
- * opcode to work as expected.
- */
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
if (wr->send_flags & IB_SEND_SOLICITED)
@@ -2571,12 +2587,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.levels = qplib_frpl->hwq.level;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
- /* Need unconditional fence for reg_mr
- * opcode to function as expected.
- */
-
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
@@ -2707,6 +2717,18 @@ bad:
return rc;
}
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+ /* Need unconditional fence for non-wire memory opcode
+ * to work as expected.
+ */
+ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
@@ -2786,8 +2808,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = -EINVAL;
goto bad;
}
- if (!rc)
+ if (!rc) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_re_legacy_set_uc_fence(&wqe);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ }
bad:
if (rc) {
ibdev_err(&qp->rdev->ibdev,
@@ -2899,14 +2924,20 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
- struct bnxt_re_cq *cq;
+ struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
nq = cq->qplib_cq.nq;
+ cctx = rdev->chip_ctx;
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ free_page((unsigned long)cq->uctx_cq_page);
+ hash_del(&cq->hash_entry);
+ }
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
ib_umem_release(cq->umem);
@@ -2919,13 +2950,16 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
+ struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
+ struct bnxt_re_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
- int rc, entries;
- int cqe = attr->cqe;
+ struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq = NULL;
unsigned int nq_alloc_cnt;
+ int cqe = attr->cqe;
+ int rc, entries;
u32 active_cqs;
if (attr->flags)
@@ -2938,9 +2972,10 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->rdev = rdev;
+ cctx = rdev->chip_ctx;
cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
- entries = roundup_pow_of_two(cqe + 1);
+ entries = bnxt_re_init_depth(cqe + 1, uctx);
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;
@@ -2948,8 +2983,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
if (udata) {
struct bnxt_re_cq_req req;
- struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
- udata, struct bnxt_re_ucontext, ib_uctx);
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
rc = -EFAULT;
goto fail;
@@ -3001,22 +3034,34 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
spin_lock_init(&cq->cq_lock);
if (udata) {
- struct bnxt_re_cq_resp resp;
+ struct bnxt_re_cq_resp resp = {};
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
+ /* Allocate a page */
+ cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cq->uctx_cq_page) {
+ rc = -ENOMEM;
+ goto c2fail;
+ }
+ resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
+ }
resp.cqid = cq->qplib_cq.id;
resp.tail = cq->qplib_cq.hwq.cons;
resp.phase = cq->qplib_cq.period;
resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
- goto c2fail;
+ goto free_mem;
}
}
return 0;
+free_mem:
+ free_page((unsigned long)cq->uctx_cq_page);
c2fail:
ib_umem_release(cq->umem);
fail:
@@ -3071,12 +3116,11 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
return -EINVAL;
}
- entries = roundup_pow_of_two(cqe + 1);
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ entries = bnxt_re_init_depth(cqe + 1, uctx);
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;
- uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
- ib_uctx);
/* uverbs consumer */
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
rc = -EFAULT;
@@ -4107,6 +4151,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_user_mmap_entry *entry;
struct bnxt_re_uctx_resp resp = {};
+ struct bnxt_re_uctx_req ureq = {};
u32 chip_met_rev_num = 0;
int rc;
@@ -4156,6 +4201,16 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
if (rdev->pacing.dbr_pacing)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
+ if (udata->inlen >= sizeof(ureq)) {
+ rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ goto cfail;
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
+ uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
+ }
+ }
+
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
ibdev_err(ibdev, "Failed to copy user context");
@@ -4193,6 +4248,19 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
}
}
+static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
+{
+ struct bnxt_re_cq *cq = NULL, *tmp_cq;
+
+ hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
+ if (tmp_cq->qplib_cq.id == cq_id) {
+ cq = tmp_cq;
+ break;
+ }
+ }
+ return cq;
+}
+
/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{
@@ -4234,6 +4302,7 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
rdma_entry);
break;
case BNXT_RE_MMAP_DBR_PAGE:
+ case BNXT_RE_MMAP_TOGGLE_PAGE:
/* Driver doesn't expect write access for user space */
if (vma->vm_flags & VM_WRITE)
return -EFAULT;
@@ -4410,8 +4479,126 @@ DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
&UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
+/* Toggle MEM */
+static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
+ enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
+ enum bnxt_re_get_toggle_mem_type res_type;
+ struct bnxt_re_user_mmap_entry *entry;
+ struct bnxt_re_ucontext *uctx;
+ struct ib_ucontext *ib_uctx;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ u64 mem_offset;
+ u64 addr = 0;
+ u32 length;
+ u32 offset;
+ u32 cq_id;
+ int err;
+
+ ib_uctx = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ib_uctx))
+ return PTR_ERR(ib_uctx);
+
+ err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
+ if (err)
+ return err;
+
+ uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
+ rdev = uctx->rdev;
+
+ switch (res_type) {
+ case BNXT_RE_CQ_TOGGLE_MEM:
+ err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
+ if (err)
+ return err;
+
+ cq = bnxt_re_search_for_cq(rdev, cq_id);
+ if (!cq)
+ return -EINVAL;
+
+ length = PAGE_SIZE;
+ addr = (u64)cq->uctx_cq_page;
+ mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
+ offset = 0;
+ break;
+ case BNXT_RE_SRQ_TOGGLE_MEM:
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
+ if (!entry)
+ return -ENOMEM;
+
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ &mem_offset, sizeof(mem_offset));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+ &length, sizeof(length));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ &offset, sizeof(length));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_user_mmap_entry *entry = uobject->object;
+
+ rdma_user_mmap_entry_remove(&entry->rdma_entry);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
+ UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
+ enum bnxt_re_get_toggle_mem_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
+ UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
+ &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
+ &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
+
const struct uapi_definition bnxt_re_uapi_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
{}
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 84715b7e7..b267d6d59 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -108,6 +108,8 @@ struct bnxt_re_cq {
struct ib_umem *umem;
struct ib_umem *resize_umem;
int resize_cqe;
+ void *uctx_cq_page;
+ struct hlist_node hash_entry;
};
struct bnxt_re_mr {
@@ -140,6 +142,7 @@ struct bnxt_re_ucontext {
void *shpg;
spinlock_t sh_lock; /* protect shpg */
struct rdma_user_mmap_entry *shpage_mmap;
+ u64 cmask;
};
enum bnxt_re_mmap_flag {
@@ -148,6 +151,7 @@ enum bnxt_re_mmap_flag {
BNXT_RE_MMAP_WC_DB,
BNXT_RE_MMAP_DBR_PAGE,
BNXT_RE_MMAP_DBR_BAR,
+ BNXT_RE_MMAP_TOGGLE_PAGE,
};
struct bnxt_re_user_mmap_entry {
@@ -167,6 +171,12 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
}
+static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
+{
+ return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ?
+ ent : roundup_pow_of_two(ent) : ent;
+}
+
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index a99c68247..54b4d2f3a 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -54,6 +54,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_addr.h>
+#include <linux/hashtable.h>
#include "bnxt_ulp.h"
#include "roce_hsi.h"
@@ -107,9 +108,14 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
dev_info(rdev_to_dev(rdev),
"Couldn't get DB bar size, Low latency framework is disabled\n");
/* set register offsets for both UC and WC */
- res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
- BNXT_QPLIB_DBR_PF_DB_OFFSET;
- res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
+ if (bnxt_qplib_is_chip_gen_p7(cctx)) {
+ res->dpi_tbl.ucreg.offset = offset;
+ res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
+ } else {
+ res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
+ BNXT_QPLIB_DBR_PF_DB_OFFSET;
+ res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
+ }
/* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
* is equal to the DB-Bar actual size. This indicates that L2
@@ -128,11 +134,13 @@ static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
struct bnxt_qplib_chip_ctx *cctx;
cctx = rdev->chip_ctx;
- cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
+ cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
mode : BNXT_QPLIB_WQE_MODE_STATIC;
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
"Failed to query hwrm qcaps\n");
+ if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx))
+ cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
}
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
@@ -215,7 +223,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
attr->max_srq);
ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
rdev->qplib_ctx.tqm_ctx.qcount[i] =
rdev->dev_attr.tqm_alloc_reqs[i];
@@ -264,7 +272,7 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
bnxt_re_limit_pf_res(rdev);
- num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
+ num_vfs = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
if (num_vfs)
bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
@@ -272,11 +280,8 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{
-
- if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
- return;
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
&rdev->qplib_ctx);
@@ -1203,25 +1208,19 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
{
struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
qplib_cq);
+ u32 *cq_ptr;
if (cq->ib_cq.comp_handler) {
- /* Lock comp_handler? */
+ if (cq->uctx_cq_page) {
+ cq_ptr = (u32 *)cq->uctx_cq_page;
+ *cq_ptr = cq->qplib_cq.toggle;
+ }
(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
}
return 0;
}
-#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
-#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
-static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
-{
- return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
- (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
- BNXT_RE_GEN_P5_PF_NQ_DB) :
- rdev->en_dev->msix_entries[indx].db_offset;
-}
-
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
int i;
@@ -1242,7 +1241,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
bnxt_qplib_init_res(&rdev->qplib_res);
for (i = 1; i < rdev->num_msix ; i++) {
- db_offt = bnxt_re_get_nqdb_offset(rdev, i);
+ db_offt = rdev->en_dev->msix_entries[i].db_offset;
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->en_dev->msix_entries[i].vector,
db_offt, &bnxt_re_cqn_handler,
@@ -1653,7 +1652,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
- db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
+ db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt,
@@ -1681,7 +1680,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
- bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
+ bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate QPLIB context: %#x\n", rc);
@@ -1737,6 +1736,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
*/
bnxt_re_vf_res_config(rdev);
}
+ hash_init(rdev->cq_hash);
return 0;
free_sctx:
@@ -1804,7 +1804,7 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
return;
/* Currently enabling only for GenP5 adapters */
- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
return;
if (enable) {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 2a6223918..439d0c7c5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -237,18 +237,15 @@ static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
struct bnxt_qplib_hwq *hwq = &nq->hwq;
struct nq_base *nqe, **nq_ptr;
int budget = nq->budget;
- u32 sw_cons, raw_cons;
uintptr_t q_handle;
u16 type;
spin_lock_bh(&hwq->lock);
/* Service the NQ until empty */
- raw_cons = hwq->cons;
while (budget--) {
- sw_cons = HWQ_CMP(raw_cons, hwq);
nq_ptr = (struct nq_base **)hwq->pbl_ptr;
- nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
- if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
+ nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
+ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
break;
/*
@@ -276,7 +273,8 @@ static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
default:
break;
}
- raw_cons++;
+ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &nq->nq_db.dbinfo.flags);
}
spin_unlock_bh(&hwq->lock);
}
@@ -302,18 +300,16 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
struct bnxt_qplib_hwq *hwq = &nq->hwq;
struct bnxt_qplib_cq *cq;
int budget = nq->budget;
- u32 sw_cons, raw_cons;
struct nq_base *nqe;
uintptr_t q_handle;
+ u32 hw_polled = 0;
u16 type;
spin_lock_bh(&hwq->lock);
/* Service the NQ until empty */
- raw_cons = hwq->cons;
while (budget--) {
- sw_cons = HWQ_CMP(raw_cons, hwq);
- nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
- if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
+ nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
+ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
break;
/*
@@ -334,6 +330,9 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
if (!cq)
break;
+ cq->toggle = (le16_to_cpu(nqe->info10_type) &
+ NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
+ cq->dbinfo.toggle = cq->toggle;
bnxt_qplib_armen_db(&cq->dbinfo,
DBC_DBC_TYPE_CQ_ARMENA);
spin_lock_bh(&cq->compl_lock);
@@ -372,12 +371,12 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
"nqe with type = 0x%x not handled\n", type);
break;
}
- raw_cons++;
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &nq->nq_db.dbinfo.flags);
}
- if (hwq->cons != raw_cons) {
- hwq->cons = raw_cons;
+ if (hw_polled)
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
- }
spin_unlock_bh(&hwq->lock);
}
@@ -505,6 +504,7 @@ static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
pdev = nq->pdev;
nq_db = &nq->nq_db;
+ nq_db->dbinfo.flags = 0;
nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
if (!nq_db->reg.bar_base) {
@@ -649,7 +649,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
rc = -ENOMEM;
goto fail;
}
-
+ srq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_SRQ,
sizeof(req));
@@ -703,13 +703,9 @@ int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- u32 sw_prod, sw_cons, count = 0;
-
- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+ u32 count;
- count = sw_prod > sw_cons ? sw_prod - sw_cons :
- srq_hwq->max_elements - sw_cons + sw_prod;
+ count = __bnxt_qplib_get_avail(srq_hwq);
if (count > srq->threshold) {
srq->arm_req = false;
bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
@@ -762,7 +758,7 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
struct rq_wqe *srqe;
struct sq_sge *hw_sge;
- u32 sw_prod, sw_cons, count = 0;
+ u32 count = 0;
int i, next;
spin_lock(&srq_hwq->lock);
@@ -776,8 +772,7 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
srq->start_idx = srq->swq[next].next_idx;
spin_unlock(&srq_hwq->lock);
- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
+ srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
memset(srqe, 0, srq->wqe_size);
/* Calculate wqe_size16 and data_len */
for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
@@ -793,17 +788,10 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
srqe->wr_id[0] = cpu_to_le32((u32)next);
srq->swq[next].wr_id = wqe->wr_id;
- srq_hwq->prod++;
+ bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
spin_lock(&srq_hwq->lock);
- sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- /* retaining srq_hwq->cons for this logic
- * actually the lock is only required to
- * read srq_hwq->cons.
- */
- sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
- count = sw_prod > sw_cons ? sw_prod - sw_cons :
- srq_hwq->max_elements - sw_cons + sw_prod;
+ count = __bnxt_qplib_get_avail(srq_hwq);
spin_unlock(&srq_hwq->lock);
/* Ring DB */
bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
@@ -850,6 +838,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u32 tbl_indx;
int rc;
+ sq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_QP1,
sizeof(req));
@@ -886,6 +875,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (rq->max_wqe) {
+ rq->dbinfo.flags = 0;
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
@@ -993,6 +983,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u32 tbl_indx;
u16 nsge;
+ if (res->dattr)
+ qp->dev_cap_flags = res->dattr->dev_cap_flags;
+
+ sq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_QP,
sizeof(req));
@@ -1004,9 +998,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* SQ */
if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
- psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
+ psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
+
+ if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
+ psn_sz = sizeof(struct sq_msn_search);
+ qp->msn = 0;
+ }
}
hwq_attr.res = res;
@@ -1015,6 +1014,13 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.depth = bnxt_qplib_get_depth(sq);
hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
+ /* Update msn tbl size */
+ if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
+ hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ qp->msn_tbl_sz = hwq_attr.aux_depth;
+ qp->msn = 0;
+ }
+
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -1041,6 +1047,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* RQ */
if (!qp->srq) {
+ rq->dbinfo.flags = 0;
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
@@ -1455,12 +1462,15 @@ bail:
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
{
struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
+ u32 peek_flags, peek_cons;
struct cq_base *hw_cqe;
int i;
+ peek_flags = cq->dbinfo.flags;
+ peek_cons = cq_hwq->cons;
for (i = 0; i < cq_hwq->max_elements; i++) {
- hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
- if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
+ hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
+ if (!CQE_CMP_VALID(hw_cqe, peek_flags))
continue;
/*
* The valid test of the entry must be done first before
@@ -1490,6 +1500,8 @@ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
default:
break;
}
+ bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
+ 1, &peek_flags);
}
}
@@ -1591,6 +1603,27 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
return NULL;
}
+/* Fil the MSN table into the next psn row */
+static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_msn_search *msns;
+ u32 start_psn, next_psn;
+ u16 start_idx;
+
+ msns = (struct sq_msn_search *)swq->psn_search;
+ msns->start_idx_next_psn_start_psn = 0;
+
+ start_psn = swq->start_psn;
+ next_psn = swq->next_psn;
+ start_idx = swq->slot_idx;
+ msns->start_idx_next_psn_start_psn |=
+ bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
+ qp->msn++;
+ qp->msn %= qp->msn_tbl_sz;
+}
+
static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe,
struct bnxt_qplib_swq *swq)
@@ -1602,6 +1635,12 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
if (!swq->psn_search)
return;
+ /* Handle MSN differently on cap flags */
+ if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
+ bnxt_qplib_fill_msn_search(qp, wqe, swq);
+ return;
+ }
+ psns = (struct sq_psn_search *)swq->psn_search;
psns = swq->psn_search;
psns_ext = swq->psn_ext;
@@ -1612,7 +1651,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
SQ_PSN_SEARCH_NEXT_PSN_MASK);
- if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
+ if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
@@ -1710,8 +1749,8 @@ static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
return slot;
}
-static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
- struct bnxt_qplib_swq *swq)
+static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
+ struct bnxt_qplib_swq *swq, bool hw_retx)
{
struct bnxt_qplib_hwq *hwq;
u32 pg_num, pg_indx;
@@ -1722,6 +1761,11 @@ static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
if (!hwq->pad_pg)
return;
tail = swq->slot_idx / sq->dbinfo.max_slot;
+ if (hw_retx) {
+ /* For HW retx use qp msn index */
+ tail = qp->msn;
+ tail %= qp->msn_tbl_sz;
+ }
pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
@@ -1746,6 +1790,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swq *swq;
bool sch_handler = false;
u16 wqe_sz, qdf = 0;
+ bool msn_update;
void *base_hdr;
void *ext_hdr;
__le32 temp32;
@@ -1773,7 +1818,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
- bnxt_qplib_pull_psn_buff(sq, swq);
+ bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
idx = 0;
swq->slot_idx = hwq->prod;
@@ -1805,6 +1850,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
&idx);
if (data_len < 0)
goto queue_err;
+ /* Make sure we update MSN table only for wired wqes */
+ msn_update = true;
/* Specifics */
switch (wqe->type) {
case BNXT_QPLIB_SWQE_TYPE_SEND:
@@ -1845,6 +1892,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
SQ_SEND_DST_QP_MASK);
ext_sqe->avid = cpu_to_le32(wqe->send.avid &
SQ_SEND_AVID_MASK);
+ msn_update = false;
} else {
sqe->length = cpu_to_le32(data_len);
if (qp->mtu)
@@ -1902,7 +1950,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
sqe->wqe_type = wqe->type;
sqe->flags = wqe->flags;
sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
-
+ msn_update = false;
break;
}
case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
@@ -1934,6 +1982,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
PTU_PTE_VALID);
ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
ext_sqe->va = cpu_to_le64(wqe->frmr.va);
+ msn_update = false;
break;
}
@@ -1951,6 +2000,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
sqe->l_key = cpu_to_le32(wqe->bind.r_key);
ext_sqe->va = cpu_to_le64(wqe->bind.va);
ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
+ msn_update = false;
break;
}
default:
@@ -1958,11 +2008,13 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
rc = -EINVAL;
goto done;
}
- swq->next_psn = sq->psn & BTH_PSN_MASK;
- bnxt_qplib_fill_psn_search(qp, wqe, swq);
+ if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
+ swq->next_psn = sq->psn & BTH_PSN_MASK;
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
+ }
queue_err:
bnxt_qplib_swq_mod_start(sq, wqe_idx);
- bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
+ bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
qp->wqe_cnt++;
done:
if (sch_handler) {
@@ -2050,7 +2102,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
queue_err:
bnxt_qplib_swq_mod_start(rq, wqe_idx);
- bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
+ bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
done:
if (sch_handler) {
nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
@@ -2087,6 +2139,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
return -EINVAL;
}
+ cq->dbinfo.flags = 0;
hwq_attr.res = res;
hwq_attr.depth = cq->max_wqe;
hwq_attr.stride = sizeof(struct cq_base);
@@ -2102,7 +2155,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
req.dpi = cpu_to_le32(cq->dpi->dpi);
req.cq_handle = cpu_to_le64(cq->cq_handle);
- req.cq_size = cpu_to_le32(cq->hwq.max_elements);
+ req.cq_size = cpu_to_le32(cq->max_wqe);
pbl = &cq->hwq.pbl[PBL_LVL_0];
pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
CMDQ_CREATE_CQ_PG_SIZE_SFT);
@@ -2130,6 +2183,8 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
cq->dbinfo.xid = cq->id;
cq->dbinfo.db = cq->dpi->dbr;
cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ cq->dbinfo.flags = 0;
+ cq->dbinfo.toggle = 0;
bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
@@ -2145,6 +2200,8 @@ void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
{
bnxt_qplib_free_hwq(res, &cq->hwq);
memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
+ /* Reset only the cons bit in the flags */
+ cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
}
int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
@@ -2241,7 +2298,8 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
cqe++;
(*budget)--;
skip_compl:
- bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ sq->swq[last].slots, &sq->dbinfo.flags);
sq->swq_last = sq->swq[last].next_idx;
}
*pcqe = cqe;
@@ -2288,7 +2346,8 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
cqe->wr_id = rq->swq[last].wr_id;
cqe++;
(*budget)--;
- bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ rq->swq[last].slots, &rq->dbinfo.flags);
rq->swq_last = rq->swq[last].next_idx;
}
*pcqe = cqe;
@@ -2317,7 +2376,7 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
{
- u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
+ u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
struct bnxt_qplib_q *sq = &qp->sq;
struct cq_req *peek_req_hwcqe;
struct bnxt_qplib_qp *peek_qp;
@@ -2348,16 +2407,14 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
}
if (sq->condition) {
/* Peek at the completions */
- peek_raw_cq_cons = cq->hwq.cons;
+ peek_flags = cq->dbinfo.flags;
peek_sw_cq_cons = cq_cons;
i = cq->hwq.max_elements;
while (i--) {
- peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
peek_sw_cq_cons, NULL);
/* If the next hwcqe is VALID */
- if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
- cq->hwq.max_elements)) {
+ if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
/*
* The valid test of the entry must be done first before
* reading any further.
@@ -2400,8 +2457,9 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
rc = -EINVAL;
goto out;
}
- peek_sw_cq_cons++;
- peek_raw_cq_cons++;
+ bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
+ &peek_sw_cq_cons,
+ 1, &peek_flags);
}
dev_err(&cq->hwq.pdev->dev,
"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
@@ -2488,7 +2546,8 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
}
skip:
- bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ swq->slots, &sq->dbinfo.flags);
sq->swq_last = swq->next_idx;
if (sq->single)
break;
@@ -2515,7 +2574,8 @@ static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
srq->swq[srq->last_idx].next_idx = (int)tag;
srq->last_idx = (int)tag;
srq->swq[srq->last_idx].next_idx = -1;
- srq->hwq.cons++; /* Support for SRQE counter */
+ bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
+ srq->dbinfo.max_slot, &srq->dbinfo.flags);
spin_unlock(&srq->hwq.lock);
}
@@ -2584,7 +2644,8 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
cqe->wr_id = swq->wr_id;
cqe++;
(*budget)--;
- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
rq->swq_last = swq->next_idx;
*pcqe = cqe;
@@ -2670,7 +2731,8 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
cqe->wr_id = swq->wr_id;
cqe++;
(*budget)--;
- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
rq->swq_last = swq->next_idx;
*pcqe = cqe;
@@ -2687,14 +2749,11 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{
struct cq_base *hw_cqe;
- u32 sw_cons, raw_cons;
bool rc = true;
- raw_cons = cq->hwq.cons;
- sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
/* Check for Valid bit. If the CQE is valid, return false */
- rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
+ rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
return rc;
}
@@ -2776,7 +2835,8 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
cqe->wr_id = swq->wr_id;
cqe++;
(*budget)--;
- bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
+ bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
+ swq->slots, &rq->dbinfo.flags);
rq->swq_last = swq->next_idx;
*pcqe = cqe;
@@ -2849,7 +2909,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe++;
(*budget)--;
}
- bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
+ bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
+ sq->swq[swq_last].slots, &sq->dbinfo.flags);
sq->swq_last = sq->swq[swq_last].next_idx;
}
*pcqe = cqe;
@@ -2934,19 +2995,17 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
struct cq_base *hw_cqe;
- u32 sw_cons, raw_cons;
int budget, rc = 0;
+ u32 hw_polled = 0;
u8 type;
- raw_cons = cq->hwq.cons;
budget = num_cqes;
while (budget) {
- sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
/* Check for Valid bit */
- if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
+ if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
break;
/*
@@ -2961,7 +3020,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
rc = bnxt_qplib_cq_process_req(cq,
(struct cq_req *)hw_cqe,
&cqe, &budget,
- sw_cons, lib_qp);
+ cq->hwq.cons, lib_qp);
break;
case CQ_BASE_CQE_TYPE_RES_RC:
rc = bnxt_qplib_cq_process_res_rc(cq,
@@ -3007,18 +3066,20 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
dev_err(&cq->hwq.pdev->dev,
"process_cqe error rc = 0x%x\n", rc);
}
- raw_cons++;
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
+ 1, &cq->dbinfo.flags);
+
}
- if (cq->hwq.cons != raw_cons) {
- cq->hwq.cons = raw_cons;
+ if (hw_polled)
bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
- }
exit:
return num_cqes - budget;
}
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
{
+ cq->dbinfo.toggle = cq->toggle;
if (arm_type)
bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
/* Using cq->arm_state variable to track whether to issue cq handler */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 404b85109..7fd4506b3 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -338,6 +338,9 @@ struct bnxt_qplib_qp {
dma_addr_t rq_hdr_buf_map;
struct list_head sq_flush;
struct list_head rq_flush;
+ u32 msn;
+ u32 msn_tbl_sz;
+ u16 dev_cap_flags;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
@@ -348,9 +351,21 @@ struct bnxt_qplib_qp {
#define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG)
#define ROCE_CQE_CMP_V 0
-#define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \
+#define CQE_CMP_VALID(hdr, pass) \
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
- !((raw_cons) & (cp_bit)))
+ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+
+static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq)
+{
+ int cons, prod, avail;
+
+ cons = hwq->cons;
+ prod = hwq->prod;
+ avail = cons - prod;
+ if (cons <= prod)
+ avail += hwq->depth;
+ return avail;
+}
static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que,
u8 slots)
@@ -406,6 +421,7 @@ struct bnxt_qplib_cq {
bool resize_in_progress;
struct bnxt_qplib_sg_info sg_info;
u64 cq_handle;
+ u8 toggle;
#define CQ_RESIZE_WAIT_TIME_MS 500
unsigned long flags;
@@ -443,9 +459,9 @@ struct bnxt_qplib_cq {
#define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG)
#define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG)
-#define NQE_CMP_VALID(hdr, raw_cons, cp_bit) \
+#define NQE_CMP_VALID(hdr, pass) \
(!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \
- !((raw_cons) & (cp_bit)))
+ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
#define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
@@ -614,4 +630,15 @@ static inline u16 bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe *wqe, u16 max)
return size;
}
+
+/* MSN table update inlin */
+static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn)
+{
+ return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) &
+ SQ_MSN_SEARCH_START_IDX_MASK) |
+ (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_MSN_SEARCH_NEXT_PSN_MASK) |
+ (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
+ SQ_MSN_SEARCH_START_PSN_MASK));
+}
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index e47b4ca64..3ffaef0c2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -734,17 +734,15 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq;
struct creq_base *creqe;
- u32 sw_cons, raw_cons;
unsigned long flags;
u32 num_wakeup = 0;
+ u32 hw_polled = 0;
/* Service the CREQ until budget is over */
spin_lock_irqsave(&hwq->lock, flags);
- raw_cons = hwq->cons;
while (budget > 0) {
- sw_cons = HWQ_CMP(raw_cons, hwq);
- creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
- if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
+ creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
+ if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
break;
/* The valid test of the entry must be done first before
* reading any further.
@@ -775,15 +773,15 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
type);
break;
}
- raw_cons++;
budget--;
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &creq->creq_db.dbinfo.flags);
}
- if (hwq->cons != raw_cons) {
- hwq->cons = raw_cons;
+ if (hw_polled)
bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
rcfw->res->cctx, true);
- }
spin_unlock_irqrestore(&hwq->lock, flags);
if (num_wakeup)
wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
@@ -854,7 +852,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
*/
if (is_virtfn)
goto skip_ctx_setup;
- if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ if (bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
goto config_vf_res;
lvl = ctx->qpc_tbl.level;
@@ -907,6 +905,8 @@ config_vf_res:
req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
skip_ctx_setup:
+ if (BNXT_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
+ req.flags |= cpu_to_le16(CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED);
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
@@ -1113,6 +1113,7 @@ static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
pdev = rcfw->pdev;
creq_db = &rcfw->creq.creq_db;
+ creq_db->dbinfo.flags = 0;
creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
if (!creq_db->reg.bar_id)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 7b31bee3e..45996e60a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -141,9 +141,9 @@ struct bnxt_qplib_crsbe {
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
-#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
+#define CREQ_CMP_VALID(hdr, pass) \
(!!((hdr)->v & CREQ_BASE_V) == \
- !((raw_cons) & (cp_bit)))
+ !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
#define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 157db6b7e..dfc943fab 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -343,7 +343,7 @@ done:
hwq->cons = 0;
hwq->pdev = pdev;
hwq->depth = hwq_attr->depth;
- hwq->max_elements = depth;
+ hwq->max_elements = hwq->depth;
hwq->element_size = stride;
hwq->qe_ppg = pg_size / stride;
/* For direct access to the elements */
@@ -805,7 +805,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
dpit = &res->dpi_tbl;
reg = &dpit->wcreg;
- if (!bnxt_qplib_is_chip_gen_p5(res->cctx)) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
/* Offest should come from L2 driver */
dbr_offset = dev_attr->l2_db_size;
dpit->ucreg.offset = dbr_offset;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 5949f004f..61628f7f1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -44,11 +44,23 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_57508 0x1750
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
+#define CHIP_NUM_58818 0xd818
+#define CHIP_NUM_57608 0x1760
+
+#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
+#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
+#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
struct bnxt_qplib_drv_modes {
u8 wqe_mode;
bool db_push;
bool dbr_pacing;
+ u32 toggle_bits;
+};
+
+enum bnxt_re_toggle_modes {
+ BNXT_QPLIB_CQ_TOGGLE_BIT = 0x1,
+ BNXT_QPLIB_SRQ_TOGGLE_BIT = 0x2,
};
struct bnxt_qplib_chip_ctx {
@@ -186,6 +198,20 @@ struct bnxt_qplib_db_info {
struct bnxt_qplib_hwq *hwq;
u32 xid;
u32 max_slot;
+ u32 flags;
+ u8 toggle;
+};
+
+enum bnxt_qplib_db_info_flags_mask {
+ BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
+ BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
+ BNXT_QPLIB_FLAG_EPOCH_CONS_MASK = 0x1UL,
+ BNXT_QPLIB_FLAG_EPOCH_PROD_MASK = 0x2UL,
+};
+
+enum bnxt_qplib_db_epoch_flag_shift {
+ BNXT_QPLIB_DB_EPOCH_CONS_SHIFT = BNXT_QPLIB_DBR_EPOCH_SHIFT,
+ BNXT_QPLIB_DB_EPOCH_PROD_SHIFT = (BNXT_QPLIB_DBR_EPOCH_SHIFT - 1),
};
/* Tables */
@@ -288,6 +314,12 @@ struct bnxt_qplib_res {
struct bnxt_qplib_db_pacing_data *pacing_data;
};
+static inline bool bnxt_qplib_is_chip_gen_p7(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return (cctx->chip_num == CHIP_NUM_58818 ||
+ cctx->chip_num == CHIP_NUM_57608);
+}
+
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
{
return (cctx->chip_num == CHIP_NUM_57508 ||
@@ -295,15 +327,20 @@ static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
cctx->chip_num == CHIP_NUM_57502);
}
+static inline bool bnxt_qplib_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return bnxt_qplib_is_chip_gen_p5(cctx) || bnxt_qplib_is_chip_gen_p7(cctx);
+}
+
static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
{
- return bnxt_qplib_is_chip_gen_p5(res->cctx) ?
+ return bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
}
static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
{
- return bnxt_qplib_is_chip_gen_p5(cctx) ?
+ return bnxt_qplib_is_chip_gen_p5_p7(cctx) ?
RING_ALLOC_REQ_RING_TYPE_NQ :
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
}
@@ -396,39 +433,61 @@ void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
int bnxt_qplib_determine_atomics(struct pci_dev *dev);
-static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_hwq *hwq, u32 cnt)
+static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
+ struct bnxt_qplib_hwq *hwq, u32 cnt)
{
- hwq->prod = (hwq->prod + cnt) % hwq->depth;
+ /* move prod and update toggle/epoch if wrap around */
+ hwq->prod += cnt;
+ if (hwq->prod >= hwq->depth) {
+ hwq->prod %= hwq->depth;
+ dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT;
+ }
}
-static inline void bnxt_qplib_hwq_incr_cons(struct bnxt_qplib_hwq *hwq,
- u32 cnt)
+static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons, u32 cnt,
+ u32 *dbinfo_flags)
{
- hwq->cons = (hwq->cons + cnt) % hwq->depth;
+ /* move cons and update toggle/epoch if wrap around */
+ *cons += cnt;
+ if (*cons >= max_elements) {
+ *cons %= max_elements;
+ *dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT;
+ }
}
static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
bool arm)
{
- u32 key;
+ u32 key = 0;
- key = info->hwq->cons & (info->hwq->max_elements - 1);
- key |= (CMPL_DOORBELL_IDX_VALID |
+ key |= info->hwq->cons | (CMPL_DOORBELL_IDX_VALID |
(CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
if (!arm)
key |= CMPL_DOORBELL_MASK;
writel(key, info->db);
}
+#define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle) \
+ (((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
+ (type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) | \
+ (((u32)(toggle)) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT)))
+
static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
u32 type)
{
u64 key = 0;
+ u32 indx;
+ u8 toggle = 0;
+
+ if (type == DBC_DBC_TYPE_CQ_ARMALL ||
+ type == DBC_DBC_TYPE_CQ_ARMSE)
+ toggle = info->toggle;
+
+ indx = (info->hwq->cons & DBC_DBC_INDEX_MASK) |
+ ((info->flags & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK) <<
+ BNXT_QPLIB_DB_EPOCH_CONS_SHIFT);
- key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
- key <<= 32;
- key |= (info->hwq->cons & (info->hwq->max_elements - 1)) &
- DBC_DBC_INDEX_MASK;
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, toggle);
writeq(key, info->db);
}
@@ -436,10 +495,12 @@ static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info,
u32 type)
{
u64 key = 0;
+ u32 indx;
- key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
- key <<= 32;
- key |= ((info->hwq->prod / info->max_slot)) & DBC_DBC_INDEX_MASK;
+ indx = (((info->hwq->prod / info->max_slot) & DBC_DBC_INDEX_MASK) |
+ ((info->flags & BNXT_QPLIB_FLAG_EPOCH_PROD_MASK) <<
+ BNXT_QPLIB_DB_EPOCH_PROD_SHIFT));
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, 0);
writeq(key, info->db);
}
@@ -447,9 +508,12 @@ static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
u32 type)
{
u64 key = 0;
+ u8 toggle = 0;
- key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | type;
- key <<= 32;
+ if (type == DBC_DBC_TYPE_CQ_ARMENA || type == DBC_DBC_TYPE_SRQ_ARMENA)
+ toggle = info->toggle;
+ /* Index always at 0 */
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle);
writeq(key, info->priv_db);
}
@@ -458,9 +522,7 @@ static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info,
{
u64 key = 0;
- key = (info->xid & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | th;
- key <<= 32;
- key |= th & DBC_DBC_INDEX_MASK;
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, th, info->toggle);
writeq(key, info->priv_db);
}
@@ -471,7 +533,7 @@ static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
u32 type;
type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
- if (bnxt_qplib_is_chip_gen_p5(cctx))
+ if (bnxt_qplib_is_chip_gen_p5_p7(cctx))
bnxt_qplib_ring_db(info, type);
else
bnxt_qplib_ring_db32(info, arm);
@@ -483,6 +545,15 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
+static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ (CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
+ CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
+}
+
+#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
+
static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
{
return cctx->modes.dbr_pacing;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index a27b68515..8beeedd15 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -59,7 +59,7 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
{
u16 pcie_ctl2 = 0;
- if (!bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
return false;
pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
@@ -133,7 +133,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
* reporting the max number
*/
attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
- attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx) ?
+ attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
6 : sb->max_sge;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
@@ -151,8 +151,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_srq_sges = sb->max_srq_sge;
attr->max_pkey = 1;
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
- attr->l2_db_size = (sb->l2_db_space_size + 1) *
- (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
+ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
+ attr->l2_db_size = (sb->l2_db_space_size + 1) *
+ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
@@ -934,7 +935,7 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
req->inactivity_th = cpu_to_le16(cc_param->inact_th);
/* For chip gen P5 onwards fill extended cmd and header */
- if (bnxt_qplib_is_chip_gen_p5(res->cctx)) {
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
struct roce_tlv *hdr;
u32 payload;
u32 chunks;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 84b5acd7f..605c9463c 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -555,7 +555,12 @@ struct cmdq_modify_qp {
__le16 flags;
__le16 cookie;
u8 resp_size;
- u8 reserved8;
+ u8 qp_type;
+ #define CMDQ_MODIFY_QP_QP_TYPE_RC 0x2UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_UD 0x4UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_RAW_ETHERTYPE 0x6UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_GSI 0x7UL
+ #define CMDQ_MODIFY_QP_QP_TYPE_LAST CMDQ_MODIFY_QP_QP_TYPE_GSI
__le64 resp_addr;
__le32 modify_mask;
#define CMDQ_MODIFY_QP_MODIFY_MASK_STATE 0x1UL
@@ -611,14 +616,12 @@ struct cmdq_modify_qp {
#define CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6 (0x3UL << 6)
#define CMDQ_MODIFY_QP_NETWORK_TYPE_LAST CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6
u8 access;
- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK \
- 0xffUL
- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT \
- 0
- #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL
- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL
- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL
- #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_MASK 0xffUL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC_REMOTE_READ_REMOTE_WRITE_LOCAL_WRITE_SFT 0
+ #define CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE 0x1UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE 0x2UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ 0x4UL
+ #define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC 0x8UL
__le16 pkey;
__le32 qkey;
__le32 dgid[4];
@@ -673,6 +676,13 @@ struct cmdq_modify_qp {
#define CMDQ_MODIFY_QP_VLAN_PCP_SFT 13
__le64 irrq_addr;
__le64 orrq_addr;
+ __le32 ext_modify_mask;
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_EXT_STATS_CTX 0x1UL
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_SCHQ_ID_VALID 0x2UL
+ __le32 ext_stats_ctx_id;
+ __le16 schq_id;
+ __le16 unused_0;
+ __le32 reserved32;
};
/* creq_modify_qp_resp (size:128b/16B) */
@@ -3075,6 +3085,17 @@ struct sq_psn_search_ext {
__le32 reserved32;
};
+/* sq_msn_search (size:64b/8B) */
+struct sq_msn_search {
+ __le64 start_idx_next_psn_start_psn;
+ #define SQ_MSN_SEARCH_START_PSN_MASK 0xffffffUL
+ #define SQ_MSN_SEARCH_START_PSN_SFT 0
+ #define SQ_MSN_SEARCH_NEXT_PSN_MASK 0xffffff000000ULL
+ #define SQ_MSN_SEARCH_NEXT_PSN_SFT 24
+ #define SQ_MSN_SEARCH_START_IDX_MASK 0xffff000000000000ULL
+ #define SQ_MSN_SEARCH_START_IDX_SFT 48
+};
+
/* sq_send (size:1024b/128B) */
struct sq_send {
u8 wqe_type;
@@ -3763,13 +3784,35 @@ struct cq_base {
#define CQ_BASE_CQE_TYPE_RES_UD (0x2UL << 1)
#define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (0x3UL << 1)
#define CQ_BASE_CQE_TYPE_RES_UD_CFA (0x4UL << 1)
+ #define CQ_BASE_CQE_TYPE_REQ_V3 (0x8UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RC_V3 (0x9UL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_V3 (0xaUL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1_V3 (0xbUL << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD_CFA_V3 (0xcUL << 1)
#define CQ_BASE_CQE_TYPE_NO_OP (0xdUL << 1)
#define CQ_BASE_CQE_TYPE_TERMINAL (0xeUL << 1)
#define CQ_BASE_CQE_TYPE_CUT_OFF (0xfUL << 1)
#define CQ_BASE_CQE_TYPE_LAST CQ_BASE_CQE_TYPE_CUT_OFF
u8 status;
+ #define CQ_BASE_STATUS_OK 0x0UL
+ #define CQ_BASE_STATUS_BAD_RESPONSE_ERR 0x1UL
+ #define CQ_BASE_STATUS_LOCAL_LENGTH_ERR 0x2UL
+ #define CQ_BASE_STATUS_HW_LOCAL_LENGTH_ERR 0x3UL
+ #define CQ_BASE_STATUS_LOCAL_QP_OPERATION_ERR 0x4UL
+ #define CQ_BASE_STATUS_LOCAL_PROTECTION_ERR 0x5UL
+ #define CQ_BASE_STATUS_LOCAL_ACCESS_ERROR 0x6UL
+ #define CQ_BASE_STATUS_MEMORY_MGT_OPERATION_ERR 0x7UL
+ #define CQ_BASE_STATUS_REMOTE_INVALID_REQUEST_ERR 0x8UL
+ #define CQ_BASE_STATUS_REMOTE_ACCESS_ERR 0x9UL
+ #define CQ_BASE_STATUS_REMOTE_OPERATION_ERR 0xaUL
+ #define CQ_BASE_STATUS_RNR_NAK_RETRY_CNT_ERR 0xbUL
+ #define CQ_BASE_STATUS_TRANSPORT_RETRY_CNT_ERR 0xcUL
+ #define CQ_BASE_STATUS_WORK_REQUEST_FLUSHED_ERR 0xdUL
+ #define CQ_BASE_STATUS_HW_FLUSH_ERR 0xeUL
+ #define CQ_BASE_STATUS_OVERFLOW_ERR 0xfUL
+ #define CQ_BASE_STATUS_LAST CQ_BASE_STATUS_OVERFLOW_ERR
__le16 reserved16;
- __le32 reserved32;
+ __le32 opaque;
};
/* cq_req (size:256b/32B) */
@@ -4384,6 +4427,8 @@ struct cq_cutoff {
#define CQ_CUTOFF_CQE_TYPE_SFT 1
#define CQ_CUTOFF_CQE_TYPE_CUT_OFF (0xfUL << 1)
#define CQ_CUTOFF_CQE_TYPE_LAST CQ_CUTOFF_CQE_TYPE_CUT_OFF
+ #define CQ_CUTOFF_RESIZE_TOGGLE_MASK 0x60UL
+ #define CQ_CUTOFF_RESIZE_TOGGLE_SFT 5
u8 status;
#define CQ_CUTOFF_STATUS_OK 0x0UL
#define CQ_CUTOFF_STATUS_LAST CQ_CUTOFF_STATUS_OK
@@ -4435,6 +4480,8 @@ struct nq_srq_event {
#define NQ_SRQ_EVENT_TYPE_SFT 0
#define NQ_SRQ_EVENT_TYPE_SRQ_EVENT 0x32UL
#define NQ_SRQ_EVENT_TYPE_LAST NQ_SRQ_EVENT_TYPE_SRQ_EVENT
+ #define NQ_SRQ_EVENT_TOGGLE_MASK 0xc0UL
+ #define NQ_SRQ_EVENT_TOGGLE_SFT 6
u8 event;
#define NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT 0x1UL
#define NQ_SRQ_EVENT_EVENT_LAST NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 7352a1f5d..e2bdec32a 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -80,9 +80,19 @@ struct efa_pd {
u16 pdn;
};
+struct efa_mr_interconnect_info {
+ u16 recv_ic_id;
+ u16 rdma_read_ic_id;
+ u16 rdma_recv_ic_id;
+ u8 recv_ic_id_valid : 1;
+ u8 rdma_read_ic_id_valid : 1;
+ u8 rdma_recv_ic_id_valid : 1;
+};
+
struct efa_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
+ struct efa_mr_interconnect_info ic_info;
};
struct efa_cq {
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 9c65bd27b..7377c8a9f 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_CMDS_H_
@@ -415,6 +415,32 @@ struct efa_admin_reg_mr_resp {
* memory region
*/
u32 r_key;
+
+ /*
+ * Mask indicating which fields have valid values
+ * 0 : recv_ic_id
+ * 1 : rdma_read_ic_id
+ * 2 : rdma_recv_ic_id
+ */
+ u8 validity;
+
+ /*
+ * Physical interconnect used by the device to reach the MR for receive
+ * operation
+ */
+ u8 recv_ic_id;
+
+ /*
+ * Physical interconnect used by the device to reach the MR for RDMA
+ * read operation
+ */
+ u8 rdma_read_ic_id;
+
+ /*
+ * Physical interconnect used by the device to reach the MR for RDMA
+ * write receive
+ */
+ u8 rdma_recv_ic_id;
};
struct efa_admin_dereg_mr_cmd {
@@ -999,6 +1025,11 @@ struct efa_admin_host_info {
#define EFA_ADMIN_REG_MR_CMD_REMOTE_WRITE_ENABLE_MASK BIT(1)
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
+/* reg_mr_resp */
+#define EFA_ADMIN_REG_MR_RESP_RECV_IC_ID_MASK BIT(0)
+#define EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID_MASK BIT(1)
+#define EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID_MASK BIT(2)
+
/* create_cq_cmd */
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index 576811885..d3398c7b0 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -270,6 +270,15 @@ int efa_com_register_mr(struct efa_com_dev *edev,
result->l_key = cmd_completion.l_key;
result->r_key = cmd_completion.r_key;
+ result->ic_info.recv_ic_id = cmd_completion.recv_ic_id;
+ result->ic_info.rdma_read_ic_id = cmd_completion.rdma_read_ic_id;
+ result->ic_info.rdma_recv_ic_id = cmd_completion.rdma_recv_ic_id;
+ result->ic_info.recv_ic_id_valid = EFA_GET(&cmd_completion.validity,
+ EFA_ADMIN_REG_MR_RESP_RECV_IC_ID);
+ result->ic_info.rdma_read_ic_id_valid = EFA_GET(&cmd_completion.validity,
+ EFA_ADMIN_REG_MR_RESP_RDMA_READ_IC_ID);
+ result->ic_info.rdma_recv_ic_id_valid = EFA_GET(&cmd_completion.validity,
+ EFA_ADMIN_REG_MR_RESP_RDMA_RECV_IC_ID);
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index fc97f37bb..720a99ba0 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -199,6 +199,15 @@ struct efa_com_reg_mr_params {
u8 indirect;
};
+struct efa_com_mr_interconnect_info {
+ u16 recv_ic_id;
+ u16 rdma_read_ic_id;
+ u16 rdma_recv_ic_id;
+ u8 recv_ic_id_valid : 1;
+ u8 rdma_read_ic_id_valid : 1;
+ u8 rdma_recv_ic_id_valid : 1;
+};
+
struct efa_com_reg_mr_result {
/*
* To be used in conjunction with local buffers references in SQ and
@@ -210,6 +219,7 @@ struct efa_com_reg_mr_result {
* accessed memory region
*/
u32 r_key;
+ struct efa_com_mr_interconnect_info ic_info;
};
struct efa_com_dereg_mr_params {
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 15ee92081..7b1910a86 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/uverbs_ioctl.h>
#include "efa.h"
@@ -36,6 +37,8 @@ MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
+extern const struct uapi_definition efa_uapi_defs[];
+
/* This handler will called for unknown event group or unimplemented handlers */
static void unimplemented_aenq_handler(void *data,
struct efa_admin_aenq_entry *aenq_e)
@@ -432,6 +435,8 @@ static int efa_ib_device_add(struct efa_dev *dev)
ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
+ dev->ibdev.driver_def = efa_uapi_defs;
+
err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
if (err)
goto err_destroy_eqs;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 0f8ca99d0..2f412db2e 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/dma-buf.h>
@@ -13,6 +13,9 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_ioctl.h>
+#define UVERBS_MODULE_NAME efa_ib
+#include <rdma/uverbs_named_ioctl.h>
+#include <rdma/ib_user_ioctl_cmds.h>
#include "efa.h"
#include "efa_io_defs.h"
@@ -1653,6 +1656,12 @@ static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
mr->ibmr.lkey = result.l_key;
mr->ibmr.rkey = result.r_key;
mr->ibmr.length = length;
+ mr->ic_info.recv_ic_id = result.ic_info.recv_ic_id;
+ mr->ic_info.rdma_read_ic_id = result.ic_info.rdma_read_ic_id;
+ mr->ic_info.rdma_recv_ic_id = result.ic_info.rdma_recv_ic_id;
+ mr->ic_info.recv_ic_id_valid = result.ic_info.recv_ic_id_valid;
+ mr->ic_info.rdma_read_ic_id_valid = result.ic_info.rdma_read_ic_id_valid;
+ mr->ic_info.rdma_recv_ic_id_valid = result.ic_info.rdma_recv_ic_id_valid;
ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
return 0;
@@ -1735,6 +1744,39 @@ err_out:
return ERR_PTR(err);
}
+static int UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_mr *ibmr = uverbs_attr_get_obj(attrs, EFA_IB_ATTR_QUERY_MR_HANDLE);
+ struct efa_mr *mr = to_emr(ibmr);
+ u16 ic_id_validity = 0;
+ int ret;
+
+ ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
+ &mr->ic_info.recv_ic_id, sizeof(mr->ic_info.recv_ic_id));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
+ &mr->ic_info.rdma_read_ic_id, sizeof(mr->ic_info.rdma_read_ic_id));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
+ &mr->ic_info.rdma_recv_ic_id, sizeof(mr->ic_info.rdma_recv_ic_id));
+ if (ret)
+ return ret;
+
+ if (mr->ic_info.recv_ic_id_valid)
+ ic_id_validity |= EFA_QUERY_MR_VALIDITY_RECV_IC_ID;
+ if (mr->ic_info.rdma_read_ic_id_valid)
+ ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID;
+ if (mr->ic_info.rdma_recv_ic_id_valid)
+ ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID;
+
+ return uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
+ &ic_id_validity, sizeof(ic_id_validity));
+}
+
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibmr->device);
@@ -2157,3 +2199,30 @@ enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_UNSPECIFIED;
}
+DECLARE_UVERBS_NAMED_METHOD(EFA_IB_METHOD_MR_QUERY,
+ UVERBS_ATTR_IDR(EFA_IB_ATTR_QUERY_MR_HANDLE,
+ UVERBS_OBJECT_MR,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(efa_mr,
+ UVERBS_OBJECT_MR,
+ &UVERBS_METHOD(EFA_IB_METHOD_MR_QUERY));
+
+const struct uapi_definition efa_uapi_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR,
+ &efa_mr),
+ {},
+};
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index f19011184..5df401a30 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -212,6 +212,8 @@ struct erdma_dev {
atomic_t num_ctx;
struct list_head cep_list;
+
+ struct dma_pool *resp_pool;
};
static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index 9d316fdc6..3212a1222 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -11,8 +11,6 @@
#include <linux/types.h>
/* PCIe device related definition. */
-#define PCI_VENDOR_ID_ALIBABA 0x1ded
-
#define ERDMA_PCI_WIDTH 64
#define ERDMA_FUNC_BAR 0
#define ERDMA_MISX_BAR 2
@@ -146,6 +144,7 @@ enum CMDQ_COMMON_OPCODE {
CMDQ_OPCODE_DESTROY_EQ = 1,
CMDQ_OPCODE_QUERY_FW_INFO = 2,
CMDQ_OPCODE_CONF_MTU = 3,
+ CMDQ_OPCODE_GET_STATS = 4,
CMDQ_OPCODE_CONF_DEVICE = 5,
CMDQ_OPCODE_ALLOC_DB = 8,
CMDQ_OPCODE_FREE_DB = 9,
@@ -357,6 +356,44 @@ struct erdma_cmdq_reflush_req {
u32 rq_pi;
};
+#define ERDMA_HW_RESP_SIZE 256
+
+struct erdma_cmdq_query_req {
+ u64 hdr;
+ u32 rsvd;
+ u32 index;
+
+ u64 target_addr;
+ u32 target_length;
+};
+
+#define ERDMA_HW_RESP_MAGIC 0x5566
+
+struct erdma_cmdq_query_resp_hdr {
+ u16 magic;
+ u8 ver;
+ u8 length;
+
+ u32 index;
+ u32 rsvd[2];
+};
+
+struct erdma_cmdq_query_stats_resp {
+ struct erdma_cmdq_query_resp_hdr hdr;
+
+ u64 tx_req_cnt;
+ u64 tx_packets_cnt;
+ u64 tx_bytes_cnt;
+ u64 tx_drop_packets_cnt;
+ u64 tx_bps_meter_drop_packets_cnt;
+ u64 tx_pps_meter_drop_packets_cnt;
+ u64 rx_packets_cnt;
+ u64 rx_bytes_cnt;
+ u64 rx_drop_packets_cnt;
+ u64 rx_bps_meter_drop_packets_cnt;
+ u64 rx_pps_meter_drop_packets_cnt;
+};
+
/* cap qword 0 definition */
#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
#define ERDMA_CMD_DEV_CAP_FLAGS_MASK GENMASK_ULL(31, 24)
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 0880c79a9..472939172 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -172,14 +172,30 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
{
int ret;
+ dev->resp_pool = dma_pool_create("erdma_resp_pool", &pdev->dev,
+ ERDMA_HW_RESP_SIZE, ERDMA_HW_RESP_SIZE,
+ 0);
+ if (!dev->resp_pool)
+ return -ENOMEM;
+
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ERDMA_PCI_WIDTH));
if (ret)
- return ret;
+ goto destroy_pool;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
+
+destroy_pool:
+ dma_pool_destroy(dev->resp_pool);
+
+ return ret;
+}
+
+static void erdma_device_uninit(struct erdma_dev *dev)
+{
+ dma_pool_destroy(dev->resp_pool);
}
static void erdma_hw_reset(struct erdma_dev *dev)
@@ -273,7 +289,7 @@ static int erdma_probe_dev(struct pci_dev *pdev)
err = erdma_request_vectors(dev);
if (err)
- goto err_iounmap_func_bar;
+ goto err_uninit_device;
err = erdma_comm_irq_init(dev);
if (err)
@@ -314,6 +330,9 @@ err_uninit_comm_irq:
err_free_vectors:
pci_free_irq_vectors(dev->pdev);
+err_uninit_device:
+ erdma_device_uninit(dev);
+
err_iounmap_func_bar:
devm_iounmap(&pdev->dev, dev->func_bar);
@@ -339,6 +358,7 @@ static void erdma_remove_dev(struct pci_dev *pdev)
erdma_aeq_destroy(dev);
erdma_comm_irq_uninit(dev);
pci_free_irq_vectors(dev->pdev);
+ erdma_device_uninit(dev);
devm_iounmap(&pdev->dev, dev->func_bar);
pci_release_selected_regions(pdev, ERDMA_BAR_MASK);
@@ -448,6 +468,7 @@ static const struct ib_device_ops erdma_device_ops = {
.driver_id = RDMA_DRIVER_ERDMA,
.uverbs_abi_ver = ERDMA_ABI_VERSION,
+ .alloc_hw_port_stats = erdma_alloc_hw_port_stats,
.alloc_mr = erdma_ib_alloc_mr,
.alloc_pd = erdma_alloc_pd,
.alloc_ucontext = erdma_alloc_ucontext,
@@ -459,6 +480,7 @@ static const struct ib_device_ops erdma_device_ops = {
.destroy_cq = erdma_destroy_cq,
.destroy_qp = erdma_destroy_qp,
.get_dma_mr = erdma_get_dma_mr,
+ .get_hw_stats = erdma_get_hw_stats,
.get_port_immutable = erdma_get_port_immutable,
.iw_accept = erdma_accept,
.iw_add_ref = erdma_qp_get_ref,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index c31794756..23dfc0160 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -1708,3 +1708,93 @@ void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
ib_dispatch_event(&event);
}
+
+enum counters {
+ ERDMA_STATS_TX_REQS_CNT,
+ ERDMA_STATS_TX_PACKETS_CNT,
+ ERDMA_STATS_TX_BYTES_CNT,
+ ERDMA_STATS_TX_DISABLE_DROP_CNT,
+ ERDMA_STATS_TX_BPS_METER_DROP_CNT,
+ ERDMA_STATS_TX_PPS_METER_DROP_CNT,
+
+ ERDMA_STATS_RX_PACKETS_CNT,
+ ERDMA_STATS_RX_BYTES_CNT,
+ ERDMA_STATS_RX_DISABLE_DROP_CNT,
+ ERDMA_STATS_RX_BPS_METER_DROP_CNT,
+ ERDMA_STATS_RX_PPS_METER_DROP_CNT,
+
+ ERDMA_STATS_MAX
+};
+
+static const struct rdma_stat_desc erdma_descs[] = {
+ [ERDMA_STATS_TX_REQS_CNT].name = "tx_reqs_cnt",
+ [ERDMA_STATS_TX_PACKETS_CNT].name = "tx_packets_cnt",
+ [ERDMA_STATS_TX_BYTES_CNT].name = "tx_bytes_cnt",
+ [ERDMA_STATS_TX_DISABLE_DROP_CNT].name = "tx_disable_drop_cnt",
+ [ERDMA_STATS_TX_BPS_METER_DROP_CNT].name = "tx_bps_limit_drop_cnt",
+ [ERDMA_STATS_TX_PPS_METER_DROP_CNT].name = "tx_pps_limit_drop_cnt",
+ [ERDMA_STATS_RX_PACKETS_CNT].name = "rx_packets_cnt",
+ [ERDMA_STATS_RX_BYTES_CNT].name = "rx_bytes_cnt",
+ [ERDMA_STATS_RX_DISABLE_DROP_CNT].name = "rx_disable_drop_cnt",
+ [ERDMA_STATS_RX_BPS_METER_DROP_CNT].name = "rx_bps_limit_drop_cnt",
+ [ERDMA_STATS_RX_PPS_METER_DROP_CNT].name = "rx_pps_limit_drop_cnt",
+};
+
+struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
+ u32 port_num)
+{
+ return rdma_alloc_hw_stats_struct(erdma_descs, ERDMA_STATS_MAX,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int erdma_query_hw_stats(struct erdma_dev *dev,
+ struct rdma_hw_stats *stats)
+{
+ struct erdma_cmdq_query_stats_resp *resp;
+ struct erdma_cmdq_query_req req;
+ dma_addr_t dma_addr;
+ int err;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_GET_STATS);
+
+ resp = dma_pool_zalloc(dev->resp_pool, GFP_KERNEL, &dma_addr);
+ if (!resp)
+ return -ENOMEM;
+
+ req.target_addr = dma_addr;
+ req.target_length = ERDMA_HW_RESP_SIZE;
+
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+ if (err)
+ goto out;
+
+ if (resp->hdr.magic != ERDMA_HW_RESP_MAGIC) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&stats->value[0], &resp->tx_req_cnt,
+ sizeof(u64) * stats->num_counters);
+
+out:
+ dma_pool_free(dev->resp_pool, resp, dma_addr);
+
+ return err;
+}
+
+int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ struct erdma_dev *dev = to_edev(ibdev);
+ int ret;
+
+ if (port == 0)
+ return 0;
+
+ ret = erdma_query_hw_stats(dev, stats);
+ if (ret)
+ return ret;
+
+ return stats->num_counters;
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index eb9c0f92f..db6018529 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -361,5 +361,9 @@ int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
+struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device,
+ u32 port_num);
+int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port, int index);
#endif
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 6419872f9..cf2d29098 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -491,8 +491,8 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
if (unlikely(tinfo->tidcnt > fd->tid_used))
return -EINVAL;
- tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
- sizeof(tidinfo[0]) * tinfo->tidcnt);
+ tidinfo = memdup_array_user(u64_to_user_ptr(tinfo->tidlist),
+ tinfo->tidcnt, sizeof(tidinfo[0]));
if (IS_ERR(tidinfo))
return PTR_ERR(tidinfo);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 6a4aa59c0..b72625283 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -494,8 +494,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
* equal to the pkt count. However, there is no way to
* tell at this point.
*/
- tmp = memdup_user(iovec[idx].iov_base,
- ntids * sizeof(*req->tids));
+ tmp = memdup_array_user(iovec[idx].iov_base,
+ ntids, sizeof(*req->tids));
if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index a7d259238..be1e1cdbc 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -7,7 +7,8 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
- hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
+ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
+ hns_roce_debugfs.o
ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 3df032ddd..b4209b6ae 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -57,6 +57,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
+ struct hns_roce_ib_create_ah_resp resp = {};
struct hns_roce_ah *ah = to_hr_ah(ibah);
int ret = 0;
u32 max_sl;
@@ -92,11 +93,21 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
&ah->av.vlan_id, NULL);
if (ret)
- return ret;
+ goto err_out;
ah->av.vlan_en = ah->av.vlan_id < VLAN_N_VID;
}
+ if (udata) {
+ memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ }
+
+err_out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AH_CREATE_ERR_CNT]);
+
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 864413607..873e8a69a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -41,7 +41,15 @@
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
struct hns_roce_mbox_msg *mbox_msg)
{
- return hr_dev->hw->post_mbox(hr_dev, mbox_msg);
+ int ret;
+
+ ret = hr_dev->hw->post_mbox(hr_dev, mbox_msg);
+ if (ret)
+ return ret;
+
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POSTED_CNT]);
+
+ return 0;
}
/* this should be called with "poll_sem" */
@@ -58,7 +66,13 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev,
return ret;
}
- return hr_dev->hw->poll_mbox_done(hr_dev);
+ ret = hr_dev->hw->poll_mbox_done(hr_dev);
+ if (ret)
+ return ret;
+
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POLLED_CNT]);
+
+ return 0;
}
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev,
@@ -89,6 +103,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
context->out_param = out_param;
complete(&context->done);
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_EVENT_CNT]);
}
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 736dc2f99..1b6d16af8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -363,29 +363,31 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct hns_roce_ib_create_cq ucmd = {};
int ret;
- if (attr->flags)
- return -EOPNOTSUPP;
+ if (attr->flags) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
ret = verify_cq_create_attr(hr_dev, attr);
if (ret)
- return ret;
+ goto err_out;
if (udata) {
ret = get_cq_ucmd(hr_cq, udata, &ucmd);
if (ret)
- return ret;
+ goto err_out;
}
set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
ret = set_cqe_size(hr_cq, udata, &ucmd);
if (ret)
- return ret;
+ goto err_out;
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
if (ret) {
ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
- return ret;
+ goto err_out;
}
ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
@@ -430,6 +432,9 @@ err_cq_db:
free_cq_db(hr_dev, hr_cq, udata);
err_cq_buf:
free_cq_buf(hr_dev, hr_cq);
+err_out:
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CQ_CREATE_ERR_CNT]);
+
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.c b/drivers/infiniband/hw/hns/hns_roce_debugfs.c
new file mode 100644
index 000000000..e8febb40f
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2023 Hisilicon Limited.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hns_roce_device.h"
+
+static struct dentry *hns_roce_dbgfs_root;
+
+static int hns_debugfs_seqfile_open(struct inode *inode, struct file *f)
+{
+ struct hns_debugfs_seqfile *seqfile = inode->i_private;
+
+ return single_open(f, seqfile->read, seqfile->data);
+}
+
+static const struct file_operations hns_debugfs_seqfile_fops = {
+ .owner = THIS_MODULE,
+ .open = hns_debugfs_seqfile_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static void init_debugfs_seqfile(struct hns_debugfs_seqfile *seq,
+ const char *name, struct dentry *parent,
+ int (*read_fn)(struct seq_file *, void *),
+ void *data)
+{
+ debugfs_create_file(name, 0400, parent, seq, &hns_debugfs_seqfile_fops);
+
+ seq->read = read_fn;
+ seq->data = data;
+}
+
+static const char * const sw_stat_info[] = {
+ [HNS_ROCE_DFX_AEQE_CNT] = "aeqe",
+ [HNS_ROCE_DFX_CEQE_CNT] = "ceqe",
+ [HNS_ROCE_DFX_CMDS_CNT] = "cmds",
+ [HNS_ROCE_DFX_CMDS_ERR_CNT] = "cmds_err",
+ [HNS_ROCE_DFX_MBX_POSTED_CNT] = "posted_mbx",
+ [HNS_ROCE_DFX_MBX_POLLED_CNT] = "polled_mbx",
+ [HNS_ROCE_DFX_MBX_EVENT_CNT] = "mbx_event",
+ [HNS_ROCE_DFX_QP_CREATE_ERR_CNT] = "qp_create_err",
+ [HNS_ROCE_DFX_QP_MODIFY_ERR_CNT] = "qp_modify_err",
+ [HNS_ROCE_DFX_CQ_CREATE_ERR_CNT] = "cq_create_err",
+ [HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT] = "cq_modify_err",
+ [HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT] = "srq_create_err",
+ [HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT] = "srq_modify_err",
+ [HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT] = "xrcd_alloc_err",
+ [HNS_ROCE_DFX_MR_REG_ERR_CNT] = "mr_reg_err",
+ [HNS_ROCE_DFX_MR_REREG_ERR_CNT] = "mr_rereg_err",
+ [HNS_ROCE_DFX_AH_CREATE_ERR_CNT] = "ah_create_err",
+ [HNS_ROCE_DFX_MMAP_ERR_CNT] = "mmap_err",
+ [HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT] = "uctx_alloc_err",
+};
+
+static int sw_stat_debugfs_show(struct seq_file *file, void *offset)
+{
+ struct hns_roce_dev *hr_dev = file->private;
+ int i;
+
+ for (i = 0; i < HNS_ROCE_DFX_CNT_TOTAL; i++)
+ seq_printf(file, "%-20s --- %lld\n", sw_stat_info[i],
+ atomic64_read(&hr_dev->dfx_cnt[i]));
+
+ return 0;
+}
+
+static void create_sw_stat_debugfs(struct hns_roce_dev *hr_dev,
+ struct dentry *parent)
+{
+ struct hns_sw_stat_debugfs *dbgfs = &hr_dev->dbgfs.sw_stat_root;
+
+ dbgfs->root = debugfs_create_dir("sw_stat", parent);
+
+ init_debugfs_seqfile(&dbgfs->sw_stat, "sw_stat", dbgfs->root,
+ sw_stat_debugfs_show, hr_dev);
+}
+
+/* debugfs for device */
+void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_dev_debugfs *dbgfs = &hr_dev->dbgfs;
+
+ dbgfs->root = debugfs_create_dir(dev_name(&hr_dev->ib_dev.dev),
+ hns_roce_dbgfs_root);
+
+ create_sw_stat_debugfs(hr_dev, dbgfs->root);
+}
+
+void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev)
+{
+ debugfs_remove_recursive(hr_dev->dbgfs.root);
+}
+
+/* debugfs for hns module */
+void hns_roce_init_debugfs(void)
+{
+ hns_roce_dbgfs_root = debugfs_create_dir("hns_roce", NULL);
+}
+
+void hns_roce_cleanup_debugfs(void)
+{
+ debugfs_remove_recursive(hns_roce_dbgfs_root);
+ hns_roce_dbgfs_root = NULL;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_debugfs.h b/drivers/infiniband/hw/hns/hns_roce_debugfs.h
new file mode 100644
index 000000000..98e87bd31
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_debugfs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2023 Hisilicon Limited.
+ */
+
+#ifndef __HNS_ROCE_DEBUGFS_H
+#define __HNS_ROCE_DEBUGFS_H
+
+/* debugfs seqfile */
+struct hns_debugfs_seqfile {
+ int (*read)(struct seq_file *seq, void *data);
+ void *data;
+};
+
+struct hns_sw_stat_debugfs {
+ struct dentry *root;
+ struct hns_debugfs_seqfile sw_stat;
+};
+
+/* Debugfs for device */
+struct hns_roce_dev_debugfs {
+ struct dentry *root;
+ struct hns_sw_stat_debugfs sw_stat_root;
+};
+
+struct hns_roce_dev;
+
+void hns_roce_init_debugfs(void);
+void hns_roce_cleanup_debugfs(void);
+void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev);
+void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev);
+
+#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f08417d37..46f8a6310 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -35,6 +35,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/hns-abi.h>
+#include "hns_roce_debugfs.h"
#define PCI_REVISION_ID_HIP08 0x21
#define PCI_REVISION_ID_HIP09 0x30
@@ -870,6 +871,29 @@ enum hns_roce_hw_pkt_stat_index {
HNS_ROCE_HW_CNT_TOTAL
};
+enum hns_roce_sw_dfx_stat_index {
+ HNS_ROCE_DFX_AEQE_CNT,
+ HNS_ROCE_DFX_CEQE_CNT,
+ HNS_ROCE_DFX_CMDS_CNT,
+ HNS_ROCE_DFX_CMDS_ERR_CNT,
+ HNS_ROCE_DFX_MBX_POSTED_CNT,
+ HNS_ROCE_DFX_MBX_POLLED_CNT,
+ HNS_ROCE_DFX_MBX_EVENT_CNT,
+ HNS_ROCE_DFX_QP_CREATE_ERR_CNT,
+ HNS_ROCE_DFX_QP_MODIFY_ERR_CNT,
+ HNS_ROCE_DFX_CQ_CREATE_ERR_CNT,
+ HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT,
+ HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT,
+ HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT,
+ HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT,
+ HNS_ROCE_DFX_MR_REG_ERR_CNT,
+ HNS_ROCE_DFX_MR_REREG_ERR_CNT,
+ HNS_ROCE_DFX_AH_CREATE_ERR_CNT,
+ HNS_ROCE_DFX_MMAP_ERR_CNT,
+ HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT,
+ HNS_ROCE_DFX_CNT_TOTAL
+};
+
struct hns_roce_hw {
int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
@@ -980,6 +1004,8 @@ struct hns_roce_dev {
u32 is_vf;
u32 cong_algo_tmpl_id;
u64 dwqe_page;
+ struct hns_roce_dev_debugfs dbgfs;
+ atomic64_t *dfx_cnt;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 81d6d4331..34e58e09b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1297,6 +1297,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
/* Write to hardware */
roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
+
do {
if (hns_roce_cmq_csq_done(hr_dev))
break;
@@ -1334,6 +1336,9 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
spin_unlock_bh(&csq->lock);
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
+
return ret;
}
@@ -2055,6 +2060,7 @@ static void set_hem_page_size(struct hns_roce_dev *hr_dev)
/* Apply all loaded caps before setting to hardware */
static void apply_func_caps(struct hns_roce_dev *hr_dev)
{
+#define MAX_GID_TBL_LEN 256
struct hns_roce_caps *caps = &hr_dev->caps;
struct hns_roce_v2_priv *priv = hr_dev->priv;
@@ -2090,8 +2096,14 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->gid_table_len[0] = caps->gmv_bt_num *
- (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
+
+ /* It's meaningless to support excessively large gid_table_len,
+ * as the type of sgid_index in kernel struct ib_global_route
+ * and userspace struct ibv_global_route are u8/uint8_t (0-255).
+ */
+ caps->gid_table_len[0] = min_t(u32, MAX_GID_TBL_LEN,
+ caps->gmv_bt_num *
+ (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));
caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
caps->gmv_entry_sz);
@@ -5675,19 +5687,25 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
struct hns_roce_srq_context *srq_context;
struct hns_roce_srq_context *srqc_mask;
struct hns_roce_cmd_mailbox *mailbox;
- int ret;
+ int ret = 0;
/* Resizing SRQs is not supported yet */
- if (srq_attr_mask & IB_SRQ_MAX_WR)
- return -EOPNOTSUPP;
+ if (srq_attr_mask & IB_SRQ_MAX_WR) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit > srq->wqe_cnt)
- return -EINVAL;
+ if (srq_attr->srq_limit > srq->wqe_cnt) {
+ ret = -EINVAL;
+ goto out;
+ }
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto out;
+ }
srq_context = mailbox->buf;
srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
@@ -5700,15 +5718,17 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0,
HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret) {
+ if (ret)
ibdev_err(&hr_dev->ib_dev,
"failed to handle cmd of modifying SRQ, ret = %d.\n",
ret);
- return ret;
- }
}
- return 0;
+out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT]);
+
+ return ret;
}
static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
@@ -5752,8 +5772,9 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ ret = PTR_ERR_OR_ZERO(mailbox);
+ if (ret)
+ goto err_out;
cq_context = mailbox->buf;
cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
@@ -5783,6 +5804,10 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
"failed to process cmd when modifying CQ, ret = %d.\n",
ret);
+err_out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT]);
+
return ret;
}
@@ -6022,6 +6047,8 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
aeqe_found = IRQ_HANDLED;
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]);
+
hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
aeqe = next_aeqe_sw_v2(eq);
@@ -6063,6 +6090,7 @@ static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index;
ceqe_found = IRQ_HANDLED;
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE_CNT]);
ceqe = next_ceqe_sw_v2(eq);
}
@@ -6465,15 +6493,16 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
/* irq contains: abnormal + AEQ + CEQ */
for (j = 0; j < other_num; j++)
snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
- "hns-abn-%d", j);
+ "hns-%s-abn-%d", pci_name(hr_dev->pci_dev), j);
for (j = other_num; j < (other_num + aeq_num); j++)
snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
- "hns-aeq-%d", j - other_num);
+ "hns-%s-aeq-%d", pci_name(hr_dev->pci_dev), j - other_num);
for (j = (other_num + aeq_num); j < irq_num; j++)
snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
- "hns-ceq-%d", j - other_num - aeq_num);
+ "hns-%s-ceq-%d", pci_name(hr_dev->pci_dev),
+ j - other_num - aeq_num);
for (j = 0; j < irq_num; j++) {
if (j < other_num)
@@ -6971,12 +7000,14 @@ static struct hnae3_client hns_roce_hw_v2_client = {
static int __init hns_roce_hw_v2_init(void)
{
+ hns_roce_init_debugfs();
return hnae3_register_client(&hns_roce_hw_v2_client);
}
static void __exit hns_roce_hw_v2_exit(void)
{
hnae3_unregister_client(&hns_roce_hw_v2_client);
+ hns_roce_cleanup_debugfs();
}
module_init(hns_roce_hw_v2_init);
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index a4a10a4e1..b55fe6911 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -361,10 +361,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
struct hns_roce_ib_alloc_ucontext_resp resp = {};
struct hns_roce_ib_alloc_ucontext ucmd = {};
- int ret;
+ int ret = -EAGAIN;
if (!hr_dev->active)
- return -EAGAIN;
+ goto error_out;
resp.qp_tab_size = hr_dev->caps.num_qps;
resp.srq_tab_size = hr_dev->caps.num_srqs;
@@ -372,7 +372,7 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
ret = ib_copy_from_udata(&ucmd, udata,
min(udata->inlen, sizeof(ucmd)));
if (ret)
- return ret;
+ goto error_out;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;
@@ -396,7 +396,7 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
if (ret)
- goto error_fail_uar_alloc;
+ goto error_out;
ret = hns_roce_alloc_uar_entry(uctx);
if (ret)
@@ -423,7 +423,9 @@ error_fail_copy_to_udata:
error_fail_uar_entry:
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
-error_fail_uar_alloc:
+error_out:
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT]);
+
return ret;
}
@@ -439,6 +441,7 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
struct rdma_user_mmap_entry *rdma_entry;
struct hns_user_mmap_entry *entry;
phys_addr_t pfn;
@@ -446,8 +449,10 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
int ret;
rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
- if (!rdma_entry)
+ if (!rdma_entry) {
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
return -EINVAL;
+ }
entry = to_hns_mmap(rdma_entry);
pfn = entry->address >> PAGE_SHIFT;
@@ -467,6 +472,9 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
out:
rdma_user_mmap_entry_put(rdma_entry);
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]);
+
return ret;
}
@@ -1009,6 +1017,21 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
}
+static int hns_roce_alloc_dfx_cnt(struct hns_roce_dev *hr_dev)
+{
+ hr_dev->dfx_cnt = kvcalloc(HNS_ROCE_DFX_CNT_TOTAL, sizeof(atomic64_t),
+ GFP_KERNEL);
+ if (!hr_dev->dfx_cnt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void hns_roce_dealloc_dfx_cnt(struct hns_roce_dev *hr_dev)
+{
+ kvfree(hr_dev->dfx_cnt);
+}
+
int hns_roce_init(struct hns_roce_dev *hr_dev)
{
struct device *dev = hr_dev->dev;
@@ -1016,11 +1039,15 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
hr_dev->is_reset = false;
+ ret = hns_roce_alloc_dfx_cnt(hr_dev);
+ if (ret)
+ return ret;
+
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
dev_err(dev, "init RoCE Command Queue failed!\n");
- return ret;
+ goto error_failed_alloc_dfx_cnt;
}
}
@@ -1079,6 +1106,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (ret)
goto error_failed_register_device;
+ hns_roce_register_debugfs(hr_dev);
+
return 0;
error_failed_register_device:
@@ -1103,11 +1132,15 @@ error_failed_cmd_init:
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
+error_failed_alloc_dfx_cnt:
+ hns_roce_dealloc_dfx_cnt(hr_dev);
+
return ret;
}
void hns_roce_exit(struct hns_roce_dev *hr_dev)
{
+ hns_roce_unregister_debugfs(hr_dev);
hns_roce_unregister_device(hr_dev);
if (hr_dev->hw->hw_exit)
@@ -1122,6 +1155,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
+ hns_roce_dealloc_dfx_cnt(hr_dev);
}
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 14376490a..91cd58048 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -228,8 +228,10 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int ret;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ if (!mr) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
mr->iova = virt_addr;
mr->size = length;
@@ -259,6 +261,9 @@ err_alloc_key:
free_mr_key(hr_dev, mr);
err_alloc_mr:
kfree(mr);
+err_out:
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REG_ERR_CNT]);
+
return ERR_PTR(ret);
}
@@ -274,12 +279,15 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
unsigned long mtpt_idx;
int ret;
- if (!mr->enabled)
- return ERR_PTR(-EINVAL);
+ if (!mr->enabled) {
+ ret = -EINVAL;
+ goto err_out;
+ }
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return ERR_CAST(mailbox);
+ ret = PTR_ERR_OR_ZERO(mailbox);
+ if (ret)
+ goto err_out;
mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
@@ -331,8 +339,12 @@ struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret)
+err_out:
+ if (ret) {
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REREG_ERR_CNT]);
return ERR_PTR(ret);
+ }
+
return NULL;
}
@@ -674,7 +686,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->kmem = NULL;
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access);
- if (IS_ERR_OR_NULL(mtr->umem)) {
+ if (IS_ERR(mtr->umem)) {
ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
PTR_ERR(mtr->umem));
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index bd1fe89ca..d35cf59d0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -149,14 +149,18 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
struct hns_roce_xrcd *xrcd = to_hr_xrcd(ib_xrcd);
int ret;
- if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
- return -EOPNOTSUPP;
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
+
+err_out:
if (ret)
- return ret;
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT]);
- return 0;
+ return ret;
}
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 828b58534..31b147210 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1216,7 +1216,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
if (ret)
- return ret;
+ goto err_out;
if (init_attr->qp_type == IB_QPT_XRC_TGT)
hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
@@ -1231,6 +1231,10 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret);
+err_out:
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]);
+
return ret;
}
@@ -1366,6 +1370,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
out:
mutex_unlock(&hr_qp->mutex);
+ if (ret)
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]);
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 4e2d1c8e1..4abae9477 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -475,11 +475,11 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
ret = set_srq_param(srq, init_attr, udata);
if (ret)
- return ret;
+ goto err_out;
ret = alloc_srq_buf(hr_dev, srq, udata);
if (ret)
- return ret;
+ goto err_out;
ret = alloc_srq_db(hr_dev, srq, udata, &resp);
if (ret)
@@ -517,6 +517,8 @@ err_srq_db:
free_srq_db(hr_dev, srq, udata);
err_srq_buf:
free_srq_buf(hr_dev, srq);
+err_out:
+ atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT]);
return ret;
}
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 916bfe2a9..042278759 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -1393,17 +1393,12 @@ int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
u32 val)
{
u32 crc = 0;
- int ret;
- int ret_code = 0;
- crypto_shash_init(desc);
- ret = crypto_shash_update(desc, addr, len);
- if (!ret)
- crypto_shash_final(desc, (u8 *)&crc);
+ crypto_shash_digest(desc, addr, len, (u8 *)&crc);
if (crc != val)
- ret_code = -EINVAL;
+ return -EINVAL;
- return ret_code;
+ return 0;
}
/**
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index d141cab8a..4a71e678d 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -12,13 +12,20 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
+ struct gdma_context *gc;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev_to_gc(mdev);
if (udata->inlen < sizeof(ucmd))
return -EINVAL;
+ if (attr->comp_vector > gc->max_num_queues)
+ return -EINVAL;
+
+ cq->comp_vector = attr->comp_vector;
+
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) {
ibdev_dbg(ibdev,
@@ -26,7 +33,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}
- if (attr->cqe > MAX_SEND_BUFFERS_PER_QUEUE) {
+ if (attr->cqe > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
return -EINVAL;
}
@@ -41,7 +48,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}
- err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
if (err) {
ibdev_dbg(ibdev,
"Failed to create dma region for create cq, %d\n",
@@ -50,12 +57,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
ibdev_dbg(ibdev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, cq->gdma_region);
/*
* The CQ ID is not known at this time. The ID is generated at create_qp
*/
+ cq->id = INVALID_QUEUE_ID;
return 0;
@@ -69,11 +77,52 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device;
struct mana_ib_dev *mdev;
+ struct gdma_context *gc;
+ int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ gc = mdev_to_gc(mdev);
+
+ err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
+ if (err) {
+ ibdev_dbg(ibdev,
+ "Failed to destroy dma region, %d\n", err);
+ return err;
+ }
+
+ if (cq->id != INVALID_QUEUE_ID) {
+ kfree(gc->cq_table[cq->id]);
+ gc->cq_table[cq->id] = NULL;
+ }
- mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
ib_umem_release(cq->umem);
return 0;
}
+
+static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
+{
+ struct mana_ib_cq *cq = ctx;
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_queue *gdma_cq;
+
+ /* Create CQ table entry */
+ WARN_ON(gc->cq_table[cq->id]);
+ gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
+ if (!gdma_cq)
+ return -ENOMEM;
+
+ gdma_cq->cq.context = cq;
+ gdma_cq->type = GDMA_CQ;
+ gdma_cq->cq.callback = mana_ib_cq_handler;
+ gdma_cq->id = cq->id;
+ gc->cq_table[cq->id] = gdma_cq;
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index d4541b870..6fa902ee8 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -68,7 +68,6 @@ static int mana_ib_probe(struct auxiliary_device *adev,
ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
- dev->gdma_dev = mdev;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
/*
@@ -78,16 +77,35 @@ static int mana_ib_probe(struct auxiliary_device *adev,
dev->ib_dev.num_comp_vectors = 1;
dev->ib_dev.dev.parent = mdev->gdma_context->dev;
- ret = ib_register_device(&dev->ib_dev, "mana_%d",
- mdev->gdma_context->dev);
+ ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
+ ret);
+ goto free_ib_device;
+ }
+ dev->gdma_dev = &mdev->gdma_context->mana_ib;
+
+ ret = mana_ib_gd_query_adapter_caps(dev);
if (ret) {
- ib_dealloc_device(&dev->ib_dev);
- return ret;
+ ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
+ ret);
+ goto deregister_device;
}
+ ret = ib_register_device(&dev->ib_dev, "mana_%d",
+ mdev->gdma_context->dev);
+ if (ret)
+ goto deregister_device;
+
dev_set_drvdata(&adev->dev, dev);
return 0;
+
+deregister_device:
+ mana_gd_deregister_device(dev->gdma_dev);
+free_ib_device:
+ ib_dealloc_device(&dev->ib_dev);
+ return ret;
}
static void mana_ib_remove(struct auxiliary_device *adev)
@@ -95,6 +113,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
ib_unregister_device(&dev->ib_dev);
+
+ mana_gd_deregister_device(dev->gdma_dev);
+
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index ab91009ae..71e33feee 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -8,13 +8,10 @@
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
{
- struct gdma_dev *gd = dev->gdma_dev;
struct mana_port_context *mpc;
struct net_device *ndev;
- struct mana_context *mc;
- mc = gd->driver_data;
- ndev = mc->ports[port];
+ ndev = mana_ib_get_netdev(&dev->ib_dev, port);
mpc = netdev_priv(ndev);
mutex_lock(&pd->vport_mutex);
@@ -31,14 +28,11 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
u32 doorbell_id)
{
- struct gdma_dev *mdev = dev->gdma_dev;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
int err;
- mc = mdev->driver_data;
- ndev = mc->ports[port];
+ ndev = mana_ib_get_netdev(&dev->ib_dev, port);
mpc = netdev_priv(ndev);
mutex_lock(&pd->vport_mutex);
@@ -79,17 +73,17 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_create_pd_req req = {};
enum gdma_pd_flags flags = 0;
struct mana_ib_dev *dev;
- struct gdma_dev *mdev;
+ struct gdma_context *gc;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- mdev = dev->gdma_dev;
+ gc = mdev_to_gc(dev);
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
sizeof(resp));
req.flags = flags;
- err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req,
+ err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
if (err || resp.hdr.status) {
@@ -119,17 +113,17 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_destory_pd_resp resp = {};
struct gdma_destroy_pd_req req = {};
struct mana_ib_dev *dev;
- struct gdma_dev *mdev;
+ struct gdma_context *gc;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- mdev = dev->gdma_dev;
+ gc = mdev_to_gc(dev);
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
sizeof(resp));
req.pd_handle = pd->pd_handle;
- err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req,
+ err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
if (err || resp.hdr.status) {
@@ -206,13 +200,11 @@ int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
struct ib_device *ibdev = ibcontext->device;
struct mana_ib_dev *mdev;
struct gdma_context *gc;
- struct gdma_dev *dev;
int doorbell_page;
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- dev = mdev->gdma_dev;
- gc = dev->gdma_context;
+ gc = mdev_to_gc(mdev);
/* Allocate a doorbell page index */
ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
@@ -238,7 +230,7 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
if (ret)
@@ -309,8 +301,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
return 0;
}
-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
- mana_handle_t *gdma_region)
+static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, unsigned long page_sz)
{
struct gdma_dma_region_add_pages_req *add_req = NULL;
size_t num_pages_processed = 0, num_pages_to_handle;
@@ -322,23 +314,14 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
size_t max_pgs_create_cmd;
struct gdma_context *gc;
size_t num_pages_total;
- struct gdma_dev *mdev;
- unsigned long page_sz;
unsigned int tail = 0;
u64 *page_addr_list;
void *request_buf;
int err;
- mdev = dev->gdma_dev;
- gc = mdev->gdma_context;
+ gc = mdev_to_gc(dev);
hwc = gc->hwc.driver_data;
- /* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
- if (!page_sz) {
- ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
- return -ENOMEM;
- }
num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
max_pgs_create_cmd =
@@ -424,12 +407,39 @@ out:
return err;
}
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt)
+{
+ unsigned long page_sz;
+
+ page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region)
+{
+ unsigned long page_sz;
+
+ /* Hardware requires dma region to align to chosen page size */
+ page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
{
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
- gc = mdev->gdma_context;
ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
return mana_gd_destroy_dma_region(gc, gdma_region);
@@ -447,7 +457,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
if (vma->vm_pgoff != 0) {
ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
@@ -486,20 +496,17 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- props->max_qp = MANA_MAX_NUM_QUEUES;
- props->max_qp_wr = MAX_SEND_BUFFERS_PER_QUEUE;
-
- /*
- * max_cqe could be potentially much bigger.
- * As this version of driver only support RAW QP, set it to the same
- * value as max_qp_wr
- */
- props->max_cqe = MAX_SEND_BUFFERS_PER_QUEUE;
-
+ struct mana_ib_dev *dev = container_of(ibdev,
+ struct mana_ib_dev, ib_dev);
+
+ props->max_qp = dev->adapter_caps.max_qp_count;
+ props->max_qp_wr = dev->adapter_caps.max_qp_wr;
+ props->max_cq = dev->adapter_caps.max_cq_count;
+ props->max_cqe = dev->adapter_caps.max_qp_wr;
+ props->max_mr = dev->adapter_caps.max_mr_count;
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
- props->max_mr = MANA_IB_MAX_MR;
- props->max_send_sge = MAX_TX_WQE_SGL_ENTRIES;
- props->max_recv_sge = MAX_RX_WQE_SGL_ENTRIES;
+ props->max_send_sge = dev->adapter_caps.max_send_sge_count;
+ props->max_recv_sge = dev->adapter_caps.max_recv_sge_count;
return 0;
}
@@ -521,3 +528,45 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
}
+
+int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct mana_ib_query_adapter_caps_resp resp = {};
+ struct mana_ib_query_adapter_caps_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req),
+ sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
+ req.hdr.dev_id = dev->gdma_dev->dev_id;
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
+ &req, sizeof(resp), &resp);
+
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_sq_id = resp.max_sq_id;
+ caps->max_rq_id = resp.max_rq_id;
+ caps->max_cq_id = resp.max_cq_id;
+ caps->max_qp_count = resp.max_qp_count;
+ caps->max_cq_count = resp.max_cq_count;
+ caps->max_mr_count = resp.max_mr_count;
+ caps->max_pd_count = resp.max_pd_count;
+ caps->max_inbound_read_limit = resp.max_inbound_read_limit;
+ caps->max_outbound_read_limit = resp.max_outbound_read_limit;
+ caps->mw_count = resp.mw_count;
+ caps->max_srq_count = resp.max_srq_count;
+ caps->max_qp_wr = min_t(u32,
+ resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE,
+ resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE);
+ caps->max_inline_data_size = resp.max_inline_data_size;
+ caps->max_send_sge_count = resp.max_send_sge_count;
+ caps->max_recv_sge_count = resp.max_recv_sge_count;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 502cc8672..f83390eeb 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -27,9 +27,28 @@
*/
#define MANA_IB_MAX_MR 0xFFFFFFu
+struct mana_ib_adapter_caps {
+ u32 max_sq_id;
+ u32 max_rq_id;
+ u32 max_cq_id;
+ u32 max_qp_count;
+ u32 max_cq_count;
+ u32 max_mr_count;
+ u32 max_pd_count;
+ u32 max_inbound_read_limit;
+ u32 max_outbound_read_limit;
+ u32 mw_count;
+ u32 max_srq_count;
+ u32 max_qp_wr;
+ u32 max_send_sge_count;
+ u32 max_recv_sge_count;
+ u32 max_inline_data_size;
+};
+
struct mana_ib_dev {
struct ib_device ib_dev;
struct gdma_dev *gdma_dev;
+ struct mana_ib_adapter_caps adapter_caps;
};
struct mana_ib_wq {
@@ -67,6 +86,7 @@ struct mana_ib_cq {
int cqe;
u64 gdma_region;
u64 id;
+ u32 comp_vector;
};
struct mana_ib_qp {
@@ -92,8 +112,59 @@ struct mana_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_ind_table;
};
-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
- mana_handle_t *gdma_region);
+enum mana_ib_command_code {
+ MANA_IB_GET_ADAPTER_CAP = 0x30001,
+};
+
+struct mana_ib_query_adapter_caps_req {
+ struct gdma_req_hdr hdr;
+}; /*HW Data */
+
+struct mana_ib_query_adapter_caps_resp {
+ struct gdma_resp_hdr hdr;
+ u32 max_sq_id;
+ u32 max_rq_id;
+ u32 max_cq_id;
+ u32 max_qp_count;
+ u32 max_cq_count;
+ u32 max_mr_count;
+ u32 max_pd_count;
+ u32 max_inbound_read_limit;
+ u32 max_outbound_read_limit;
+ u32 mw_count;
+ u32 max_srq_count;
+ u32 max_requester_sq_size;
+ u32 max_responder_sq_size;
+ u32 max_requester_rq_size;
+ u32 max_responder_rq_size;
+ u32 max_send_sge_count;
+ u32 max_recv_sge_count;
+ u32 max_inline_data_size;
+}; /* HW Data */
+
+static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->gdma_context;
+}
+
+static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_context *mc = gc->mana.driver_data;
+
+ if (port < 1 || port > mc->num_ports)
+ return NULL;
+ return mc->ports[port - 1];
+}
+
+int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region);
+
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt);
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
@@ -159,4 +230,5 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
+int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 351207c60..b70b13484 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -30,12 +30,9 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
{
struct gdma_create_mr_response resp = {};
struct gdma_create_mr_request req = {};
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
int err;
- gc = mdev->gdma_context;
-
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
sizeof(resp));
req.pd_handle = mr_params->pd_handle;
@@ -77,12 +74,9 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
{
struct gdma_destroy_mr_response resp = {};
struct gdma_destroy_mr_request req = {};
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
int err;
- gc = mdev->gdma_context;
-
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
sizeof(resp));
@@ -133,7 +127,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_free;
}
- err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
+ err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
if (err) {
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
err);
@@ -141,7 +135,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
}
ibdev_dbg(ibdev,
- "mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
+ "create_dma_region ret %d gdma_region %llx\n", err,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
@@ -164,8 +158,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
return &mr->ibmr;
err_dma_region:
- mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
- dma_region_handle);
+ mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
err_umem:
ib_umem_release(mr->umem);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 4b3b5b274..6e7627745 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -17,12 +17,10 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
struct mana_cfg_rx_steer_resp resp = {};
mana_handle_t *req_indir_tab;
struct gdma_context *gc;
- struct gdma_dev *mdev;
u32 req_buf_size;
int i, err;
- mdev = dev->gdma_dev;
- gc = mdev->gdma_context;
+ gc = mdev_to_gc(dev);
req_buf_size =
sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
@@ -39,7 +37,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
req->rx_enable = 1;
req->update_default_rxobj = 1;
req->default_rxobj = default_rxobj;
- req->hdr.dev_id = mdev->dev_id;
+ req->hdr.dev_id = gc->mana.dev_id;
/* If there are more than 1 entries in indirection table, enable RSS */
if (log_ind_tbl_size)
@@ -99,25 +97,24 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
struct mana_ib_dev *mdev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
+ struct gdma_context *gc = mdev_to_gc(mdev);
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {};
- struct gdma_dev *gd = mdev->gdma_dev;
+ struct gdma_queue **gdma_cq_allocated;
mana_handle_t *mana_ind_table;
struct mana_port_context *mpc;
- struct mana_context *mc;
+ unsigned int ind_tbl_size;
struct net_device *ndev;
struct mana_ib_cq *cq;
struct mana_ib_wq *wq;
- unsigned int ind_tbl_size;
+ struct mana_eq *eq;
struct ib_cq *ibcq;
struct ib_wq *ibwq;
int i = 0;
u32 port;
int ret;
- mc = gd->driver_data;
-
if (!udata || udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -129,7 +126,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
return ret;
}
- if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
+ if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(&mdev->ib_dev,
"Requested max_recv_wr %d exceeding limit\n",
attr->cap.max_recv_wr);
@@ -160,12 +157,12 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
/* IB ports start with 1, MANA start with 0 */
port = ucmd.port;
- if (port < 1 || port > mc->num_ports) {
+ ndev = mana_ib_get_netdev(pd->device, port);
+ if (!ndev) {
ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
port);
return -EINVAL;
}
- ndev = mc->ports[port - 1];
mpc = netdev_priv(ndev);
ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
@@ -178,6 +175,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
goto fail;
}
+ gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
+ GFP_KERNEL);
+ if (!gdma_cq_allocated) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
qp->port = port;
for (i = 0; i < ind_tbl_size; i++) {
@@ -196,12 +200,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
cq_spec.gdma_region = cq->gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- cq_spec.attached_eq = GDMA_CQ_NO_EQ;
+ eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
+ cq_spec.attached_eq = eq->eq->id;
ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
&wq_spec, &cq_spec, &wq->rx_object);
- if (ret)
+ if (ret) {
+ /* Do cleanup starting with index i-1 */
+ i--;
goto fail;
+ }
/* The GDMA regions are now owned by the WQ object */
wq->gdma_region = GDMA_INVALID_DMA_REGION;
@@ -218,6 +226,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
resp.entries[i].wqid = wq->id;
mana_ind_table[i] = wq->rx_object;
+
+ /* Create CQ table entry */
+ ret = mana_ib_install_cq_cb(mdev, cq);
+ if (ret)
+ goto fail;
+
+ gdma_cq_allocated[i] = gc->cq_table[cq->id];
}
resp.num_entries = i;
@@ -237,6 +252,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
goto fail;
}
+ kfree(gdma_cq_allocated);
kfree(mana_ind_table);
return 0;
@@ -244,10 +260,17 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
fail:
while (i-- > 0) {
ibwq = ind_tbl->ind_tbl[i];
+ ibcq = ibwq->cq;
wq = container_of(ibwq, struct mana_ib_wq, ibwq);
+ cq = container_of(ibcq, struct mana_ib_cq, ibcq);
+
+ gc->cq_table[cq->id] = NULL;
+ kfree(gdma_cq_allocated[i]);
+
mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
}
+ kfree(gdma_cq_allocated);
kfree(mana_ind_table);
return ret;
@@ -266,19 +289,19 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_ib_ucontext *mana_ucontext =
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext);
+ struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_ib_create_qp_resp resp = {};
- struct gdma_dev *gd = mdev->gdma_dev;
struct mana_ib_create_qp ucmd = {};
+ struct gdma_queue *gdma_cq = NULL;
struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {};
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct ib_umem *umem;
- int err;
+ struct mana_eq *eq;
+ int eq_vec;
u32 port;
-
- mc = gd->driver_data;
+ int err;
if (!mana_ucontext || udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -290,12 +313,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return err;
}
- /* IB ports start with 1, MANA Ethernet ports start with 0 */
- port = ucmd.port;
- if (port < 1 || port > mc->num_ports)
- return -EINVAL;
-
- if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
+ if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(&mdev->ib_dev,
"Requested max_send_wr %d exceeding limit\n",
attr->cap.max_send_wr);
@@ -309,11 +327,17 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return -EINVAL;
}
- ndev = mc->ports[port - 1];
+ port = ucmd.port;
+ ndev = mana_ib_get_netdev(ibpd->device, port);
+ if (!ndev) {
+ ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
+ port);
+ return -EINVAL;
+ }
mpc = netdev_priv(ndev);
ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
- err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
+ err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
if (err)
return -ENODEV;
@@ -333,8 +357,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
qp->sq_umem = umem;
- err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
- &qp->sq_gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
+ &qp->sq_gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create qp-raw, %d\n",
@@ -343,7 +367,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
ibdev_dbg(&mdev->ib_dev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, qp->sq_gdma_region);
/* Create a WQ on the same port handle used by the Ethernet */
@@ -353,7 +377,9 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
cq_spec.gdma_region = send_cq->gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- cq_spec.attached_eq = GDMA_CQ_NO_EQ;
+ eq_vec = send_cq->comp_vector % gc->max_num_queues;
+ eq = &mpc->ac->eqs[eq_vec];
+ cq_spec.attached_eq = eq->eq->id;
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
&cq_spec, &qp->tx_object);
@@ -371,6 +397,11 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
qp->sq_id = wq_spec.queue_index;
send_cq->id = cq_spec.queue_index;
+ /* Create CQ table entry */
+ err = mana_ib_install_cq_cb(mdev, send_cq);
+ if (err)
+ goto err_destroy_wq_obj;
+
ibdev_dbg(&mdev->ib_dev,
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
qp->tx_object, qp->sq_id, send_cq->id);
@@ -384,11 +415,15 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev,
"Failed copy udata for create qp-raw, %d\n",
err);
- goto err_destroy_wq_obj;
+ goto err_release_gdma_cq;
}
return 0;
+err_release_gdma_cq:
+ kfree(gdma_cq);
+ gc->cq_table[send_cq->id] = NULL;
+
err_destroy_wq_obj:
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
@@ -399,7 +434,7 @@ err_release_umem:
ib_umem_release(umem);
err_free_vport:
- mana_ib_uncfg_vport(mdev, pd, port - 1);
+ mana_ib_uncfg_vport(mdev, pd, port);
return err;
}
@@ -437,16 +472,13 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = mdev->gdma_dev;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_wq *wq;
struct ib_wq *ibwq;
int i;
- mc = gd->driver_data;
- ndev = mc->ports[qp->port - 1];
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
@@ -464,15 +496,12 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = mdev->gdma_dev;
struct ib_pd *ibpd = qp->ibqp.pd;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_pd *pd;
- mc = gd->driver_data;
- ndev = mc->ports[qp->port - 1];
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -483,7 +512,7 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
ib_umem_release(qp->sq_umem);
}
- mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
+ mana_ib_uncfg_vport(mdev, pd, qp->port);
return 0;
}
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 372d36151..7c9c69962 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -46,7 +46,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
- err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create wq, %d\n",
@@ -55,7 +55,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
}
ibdev_dbg(&mdev->ib_dev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, wq->gdma_region);
/* WQ ID is returned at wq_create time, doesn't know the value yet */
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
index 3669c90b2..b4c97fb62 100644
--- a/drivers/infiniband/hw/mlx5/dm.c
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -341,6 +341,8 @@ static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
return MLX5_SW_ICM_TYPE_HEADER_MODIFY;
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
return MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
+ return MLX5_SW_ICM_TYPE_SW_ENCAP;
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
default:
return MLX5_SW_ICM_TYPE_STEERING;
@@ -364,6 +366,7 @@ static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
switch (type) {
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
@@ -438,6 +441,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
return handle_alloc_dm_sw_icm(context, attr, attrs, type);
default:
return ERR_PTR(-EOPNOTSUPP);
@@ -491,6 +495,7 @@ static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
default:
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 0c3c4e648..3e43687a7 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+ !mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 650a15b6c..c2b557e64 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -818,6 +818,17 @@ static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
MLX5_REG_NODE_DESC, 0, 0);
}
+static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
+ struct mlx5_ib_query_device_resp *resp)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ u16 vport = mlx5_eswitch_manager_vport(mdev);
+
+ resp->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(esw,
+ vport);
+ resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
+}
+
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -1209,6 +1220,19 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
}
+ if (offsetofend(typeof(resp), reserved) <= uhw_outlen)
+ resp.response_length += sizeof(resp.reserved);
+
+ if (offsetofend(typeof(resp), reg_c0) <= uhw_outlen) {
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+ resp.response_length += sizeof(resp.reg_c0);
+
+ if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw))
+ fill_esw_mgr_reg_c0(mdev, &resp);
+ }
+
if (uhw_outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 18e459b55..a8ee2ca1f 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1347,6 +1347,7 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM:
if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 54c723a6e..6f9ec8db0 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
+
+ mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index cec5cccd2..75253f2b3 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -121,11 +121,10 @@ struct siw_page_chunk {
};
struct siw_umem {
+ struct ib_umem *base_mem;
struct siw_page_chunk *page_chunk;
int num_pages;
- bool writable;
u64 fp_addr; /* First page base address */
- struct mm_struct *owning_mm;
};
struct siw_pble {
@@ -289,10 +288,11 @@ struct siw_rx_stream {
int skb_offset; /* offset in skb */
int skb_copied; /* processed bytes in skb */
+ enum siw_rx_state state;
+
union iwarp_hdr hdr;
struct mpa_trailer trailer;
-
- enum siw_rx_state state;
+ struct shash_desc *mpa_crc_hd;
/*
* For each FPDU, main RX loop runs through 3 stages:
@@ -314,7 +314,6 @@ struct siw_rx_stream {
u64 ddp_to;
u32 inval_stag; /* Stag to be invalidated */
- struct shash_desc *mpa_crc_hd;
u8 rx_suspend : 1;
u8 pad : 2; /* # of pad bytes expected */
u8 rdmap_op : 4; /* opcode of current frame */
@@ -418,10 +417,10 @@ struct siw_iwarp_tx {
struct siw_qp {
struct ib_qp base_qp;
struct siw_device *sdev;
+ int tx_cpu;
struct kref ref;
struct completion qp_free;
struct list_head devq;
- int tx_cpu;
struct siw_qp_attrs attrs;
struct siw_cep *cep;
@@ -466,7 +465,6 @@ struct siw_qp {
} term_info;
struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
- struct rcu_head rcu;
};
/* helper macros */
@@ -659,7 +657,7 @@ static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
static inline int siw_orq_empty(struct siw_qp *qp)
{
- return qp->orq[qp->orq_get % qp->attrs.orq_size].flags == 0 ? 1 : 0;
+ return orq_get_current(qp)->flags == 0 ? 1 : 0;
}
static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp)
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 7de651cb4..86323918a 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -41,16 +41,6 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
static void siw_sk_assign_cm_upcalls(struct sock *sk)
{
- write_lock_bh(&sk->sk_callback_lock);
- sk->sk_state_change = siw_cm_llp_state_change;
- sk->sk_data_ready = siw_cm_llp_data_ready;
- sk->sk_write_space = siw_cm_llp_write_space;
- sk->sk_error_report = siw_cm_llp_error_report;
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void siw_sk_save_upcalls(struct sock *sk)
-{
struct siw_cep *cep = sk_to_cep(sk);
write_lock_bh(&sk->sk_callback_lock);
@@ -58,6 +48,11 @@ static void siw_sk_save_upcalls(struct sock *sk)
cep->sk_data_ready = sk->sk_data_ready;
cep->sk_write_space = sk->sk_write_space;
cep->sk_error_report = sk->sk_error_report;
+
+ sk->sk_state_change = siw_cm_llp_state_change;
+ sk->sk_data_ready = siw_cm_llp_data_ready;
+ sk->sk_write_space = siw_cm_llp_write_space;
+ sk->sk_error_report = siw_cm_llp_error_report;
write_unlock_bh(&sk->sk_callback_lock);
}
@@ -156,7 +151,6 @@ static void siw_cep_socket_assoc(struct siw_cep *cep, struct socket *s)
siw_cep_get(cep);
s->sk->sk_user_data = cep;
- siw_sk_save_upcalls(s->sk);
siw_sk_assign_cm_upcalls(s->sk);
}
@@ -364,6 +358,24 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
return id->event_handler(id, &event);
}
+static void siw_free_cm_id(struct siw_cep *cep)
+{
+ if (!cep->cm_id)
+ return;
+
+ cep->cm_id->rem_ref(cep->cm_id);
+ cep->cm_id = NULL;
+}
+
+static void siw_destroy_cep_sock(struct siw_cep *cep)
+{
+ if (cep->sock) {
+ siw_socket_disassoc(cep->sock);
+ sock_release(cep->sock);
+ cep->sock = NULL;
+ }
+}
+
/*
* siw_qp_cm_drop()
*
@@ -393,8 +405,7 @@ void siw_qp_cm_drop(struct siw_qp *qp, int schedule)
}
siw_dbg_cep(cep, "immediate close, state %d\n", cep->state);
- if (qp->term_info.valid)
- siw_send_terminate(qp);
+ siw_send_terminate(qp);
if (cep->cm_id) {
switch (cep->state) {
@@ -416,20 +427,12 @@ void siw_qp_cm_drop(struct siw_qp *qp, int schedule)
default:
break;
}
- cep->cm_id->rem_ref(cep->cm_id);
- cep->cm_id = NULL;
+ siw_free_cm_id(cep);
siw_cep_put(cep);
}
cep->state = SIW_EPSTATE_CLOSED;
- if (cep->sock) {
- siw_socket_disassoc(cep->sock);
- /*
- * Immediately close socket
- */
- sock_release(cep->sock);
- cep->sock = NULL;
- }
+ siw_destroy_cep_sock(cep);
if (cep->qp) {
cep->qp = NULL;
siw_qp_put(qp);
@@ -445,6 +448,12 @@ void siw_cep_put(struct siw_cep *cep)
kref_put(&cep->ref, __siw_cep_dealloc);
}
+static void siw_cep_set_free_and_put(struct siw_cep *cep)
+{
+ siw_cep_set_free(cep);
+ siw_cep_put(cep);
+}
+
void siw_cep_get(struct siw_cep *cep)
{
kref_get(&cep->ref);
@@ -1061,7 +1070,7 @@ static void siw_cm_work_handler(struct work_struct *w)
/*
* QP scheduled LLP close
*/
- if (cep->qp && cep->qp->term_info.valid)
+ if (cep->qp)
siw_send_terminate(cep->qp);
if (cep->cm_id)
@@ -1175,8 +1184,7 @@ static void siw_cm_work_handler(struct work_struct *w)
cep->sock = NULL;
}
if (cep->cm_id) {
- cep->cm_id->rem_ref(cep->cm_id);
- cep->cm_id = NULL;
+ siw_free_cm_id(cep);
siw_cep_put(cep);
}
}
@@ -1515,9 +1523,7 @@ error:
cep->state = SIW_EPSTATE_CLOSED;
- siw_cep_set_free(cep);
-
- siw_cep_put(cep);
+ siw_cep_set_free_and_put(cep);
} else if (s) {
sock_release(s);
@@ -1548,7 +1554,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
struct siw_cep *cep = (struct siw_cep *)id->provider_data;
struct siw_qp *qp;
struct siw_qp_attrs qp_attrs;
- int rv, max_priv_data = MPA_MAX_PRIVDATA;
+ int rv = -EINVAL, max_priv_data = MPA_MAX_PRIVDATA;
bool wait_for_peer_rts = false;
siw_cep_set_inuse(cep);
@@ -1564,26 +1570,17 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
siw_dbg_cep(cep, "out of state\n");
-
- siw_cep_set_free(cep);
- siw_cep_put(cep);
-
- return -ECONNRESET;
+ rv = -ECONNRESET;
+ goto free_cep;
}
qp = siw_qp_id2obj(sdev, params->qpn);
if (!qp) {
WARN(1, "[QP %d] does not exist\n", params->qpn);
- siw_cep_set_free(cep);
- siw_cep_put(cep);
-
- return -EINVAL;
+ goto free_cep;
}
down_write(&qp->state_lock);
- if (qp->attrs.state > SIW_QP_STATE_RTR) {
- rv = -EINVAL;
- up_write(&qp->state_lock);
- goto error;
- }
+ if (qp->attrs.state > SIW_QP_STATE_RTR)
+ goto error_unlock;
siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
@@ -1597,9 +1594,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
"[QP %u]: ord %d (max %d), ird %d (max %d)\n",
qp_id(qp), params->ord, sdev->attrs.max_ord,
params->ird, sdev->attrs.max_ird);
- rv = -EINVAL;
- up_write(&qp->state_lock);
- goto error;
+ goto error_unlock;
}
if (cep->enhanced_rdma_conn_est)
max_priv_data -= sizeof(struct mpa_v2_data);
@@ -1609,9 +1604,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
cep,
"[QP %u]: private data length: %d (max %d)\n",
qp_id(qp), params->private_data_len, max_priv_data);
- rv = -EINVAL;
- up_write(&qp->state_lock);
- goto error;
+ goto error_unlock;
}
if (cep->enhanced_rdma_conn_est) {
if (params->ord > cep->ord) {
@@ -1620,9 +1613,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
} else {
cep->ird = params->ird;
cep->ord = params->ord;
- rv = -EINVAL;
- up_write(&qp->state_lock);
- goto error;
+ goto error_unlock;
}
}
if (params->ird < cep->ird) {
@@ -1631,8 +1622,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
params->ird = cep->ird;
else {
rv = -ENOMEM;
- up_write(&qp->state_lock);
- goto error;
+ goto error_unlock;
}
}
if (cep->mpa.v2_ctrl.ord &
@@ -1679,7 +1669,6 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD |
SIW_QP_ATTR_MPA);
up_write(&qp->state_lock);
-
if (rv)
goto error;
@@ -1702,27 +1691,23 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
siw_cep_set_free(cep);
return 0;
+
+error_unlock:
+ up_write(&qp->state_lock);
error:
- siw_socket_disassoc(cep->sock);
- sock_release(cep->sock);
- cep->sock = NULL;
+ siw_destroy_cep_sock(cep);
cep->state = SIW_EPSTATE_CLOSED;
- if (cep->cm_id) {
- cep->cm_id->rem_ref(id);
- cep->cm_id = NULL;
- }
+ siw_free_cm_id(cep);
if (qp->cep) {
siw_cep_put(cep);
qp->cep = NULL;
}
cep->qp = NULL;
siw_qp_put(qp);
-
- siw_cep_set_free(cep);
- siw_cep_put(cep);
-
+free_cep:
+ siw_cep_set_free_and_put(cep);
return rv;
}
@@ -1744,8 +1729,7 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
siw_dbg_cep(cep, "out of state\n");
- siw_cep_set_free(cep);
- siw_cep_put(cep); /* put last reference */
+ siw_cep_set_free_and_put(cep); /* put last reference */
return -ECONNRESET;
}
@@ -1756,14 +1740,11 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
siw_send_mpareqrep(cep, pdata, pd_len);
}
- siw_socket_disassoc(cep->sock);
- sock_release(cep->sock);
- cep->sock = NULL;
+ siw_destroy_cep_sock(cep);
cep->state = SIW_EPSTATE_CLOSED;
- siw_cep_set_free(cep);
- siw_cep_put(cep);
+ siw_cep_set_free_and_put(cep);
return 0;
}
@@ -1890,16 +1871,12 @@ error:
if (cep) {
siw_cep_set_inuse(cep);
- if (cep->cm_id) {
- cep->cm_id->rem_ref(cep->cm_id);
- cep->cm_id = NULL;
- }
+ siw_free_cm_id(cep);
cep->sock = NULL;
siw_socket_disassoc(s);
cep->state = SIW_EPSTATE_CLOSED;
- siw_cep_set_free(cep);
- siw_cep_put(cep);
+ siw_cep_set_free_and_put(cep);
}
sock_release(s);
@@ -1923,18 +1900,14 @@ static void siw_drop_listeners(struct iw_cm_id *id)
siw_cep_set_inuse(cep);
- if (cep->cm_id) {
- cep->cm_id->rem_ref(cep->cm_id);
- cep->cm_id = NULL;
- }
+ siw_free_cm_id(cep);
if (cep->sock) {
siw_socket_disassoc(cep->sock);
sock_release(cep->sock);
cep->sock = NULL;
}
cep->state = SIW_EPSTATE_CLOSED;
- siw_cep_set_free(cep);
- siw_cep_put(cep);
+ siw_cep_set_free_and_put(cep);
}
}
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 1ab62982d..61ad8ca3d 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -109,6 +109,17 @@ static struct {
int num_nodes;
} siw_cpu_info;
+static void siw_destroy_cpulist(int number)
+{
+ int i = 0;
+
+ while (i < number)
+ kfree(siw_cpu_info.tx_valid_cpus[i++]);
+
+ kfree(siw_cpu_info.tx_valid_cpus);
+ siw_cpu_info.tx_valid_cpus = NULL;
+}
+
static int siw_init_cpulist(void)
{
int i, num_nodes = nr_node_ids;
@@ -138,24 +149,11 @@ static int siw_init_cpulist(void)
out_err:
siw_cpu_info.num_nodes = 0;
- while (--i >= 0)
- kfree(siw_cpu_info.tx_valid_cpus[i]);
- kfree(siw_cpu_info.tx_valid_cpus);
- siw_cpu_info.tx_valid_cpus = NULL;
+ siw_destroy_cpulist(i);
return -ENOMEM;
}
-static void siw_destroy_cpulist(void)
-{
- int i = 0;
-
- while (i < siw_cpu_info.num_nodes)
- kfree(siw_cpu_info.tx_valid_cpus[i++]);
-
- kfree(siw_cpu_info.tx_valid_cpus);
-}
-
/*
* Choose CPU with least number of active QP's from NUMA node of
* TX interface.
@@ -558,7 +556,7 @@ out_error:
pr_info("SoftIWARP attach failed. Error: %d\n", rv);
siw_cm_exit();
- siw_destroy_cpulist();
+ siw_destroy_cpulist(siw_cpu_info.num_nodes);
return rv;
}
@@ -573,7 +571,7 @@ static void __exit siw_exit_module(void)
siw_cm_exit();
- siw_destroy_cpulist();
+ siw_destroy_cpulist(siw_cpu_info.num_nodes);
if (siw_crypto_shash)
crypto_free_shash(siw_crypto_shash);
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index c5f7f1669..dcb963607 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -5,6 +5,7 @@
#include <linux/gfp.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
@@ -13,18 +14,20 @@
#include "siw.h"
#include "siw_mem.h"
+/* Stag lookup is based on its index part only (24 bits). */
+#define SIW_STAG_MAX_INDEX 0x00ffffff
+
/*
- * Stag lookup is based on its index part only (24 bits).
* The code avoids special Stag of zero and tries to randomize
* STag values between 1 and SIW_STAG_MAX_INDEX.
*/
int siw_mem_add(struct siw_device *sdev, struct siw_mem *m)
{
- struct xa_limit limit = XA_LIMIT(1, 0x00ffffff);
+ struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX);
u32 id, next;
get_random_bytes(&next, 4);
- next &= 0x00ffffff;
+ next &= SIW_STAG_MAX_INDEX;
if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next,
GFP_KERNEL) < 0)
@@ -60,28 +63,17 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
return NULL;
}
-static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
- bool dirty)
-{
- unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
-}
-
-void siw_umem_release(struct siw_umem *umem, bool dirty)
+void siw_umem_release(struct siw_umem *umem)
{
- struct mm_struct *mm_s = umem->owning_mm;
int i, num_pages = umem->num_pages;
- for (i = 0; num_pages; i++) {
- int to_free = min_t(int, PAGES_PER_CHUNK, num_pages);
+ if (umem->base_mem)
+ ib_umem_release(umem->base_mem);
- siw_free_plist(&umem->page_chunk[i], to_free,
- umem->writable && dirty);
+ for (i = 0; num_pages > 0; i++) {
kfree(umem->page_chunk[i].plist);
- num_pages -= to_free;
+ num_pages -= PAGES_PER_CHUNK;
}
- atomic64_sub(umem->num_pages, &mm_s->pinned_vm);
-
- mmdrop(mm_s);
kfree(umem->page_chunk);
kfree(umem);
}
@@ -91,7 +83,7 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
{
struct siw_device *sdev = to_siw_dev(pd->device);
struct siw_mem *mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- struct xa_limit limit = XA_LIMIT(1, 0x00ffffff);
+ struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX);
u32 id, next;
if (!mem)
@@ -107,7 +99,7 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
kref_init(&mem->ref);
get_random_bytes(&next, 4);
- next &= 0x00ffffff;
+ next &= SIW_STAG_MAX_INDEX;
if (xa_alloc_cyclic(&sdev->mem_xa, &id, mem, limit, &next,
GFP_KERNEL) < 0) {
@@ -145,7 +137,7 @@ void siw_free_mem(struct kref *ref)
if (!mem->is_mw && mem->mem_obj) {
if (mem->is_pbl == 0)
- siw_umem_release(mem->umem, true);
+ siw_umem_release(mem->umem);
else
kfree(mem->pbl);
}
@@ -362,18 +354,16 @@ struct siw_pbl *siw_pbl_alloc(u32 num_buf)
return pbl;
}
-struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
+struct siw_umem *siw_umem_get(struct ib_device *base_dev, u64 start,
+ u64 len, int rights)
{
struct siw_umem *umem;
- struct mm_struct *mm_s;
+ struct ib_umem *base_mem;
+ struct sg_page_iter sg_iter;
+ struct sg_table *sgt;
u64 first_page_va;
- unsigned long mlock_limit;
- unsigned int foll_flags = FOLL_LONGTERM;
int num_pages, num_chunks, i, rv = 0;
- if (!can_do_mlock())
- return ERR_PTR(-EPERM);
-
if (!len)
return ERR_PTR(-EINVAL);
@@ -385,65 +375,50 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
if (!umem)
return ERR_PTR(-ENOMEM);
- mm_s = current->mm;
- umem->owning_mm = mm_s;
- umem->writable = writable;
-
- mmgrab(mm_s);
-
- if (writable)
- foll_flags |= FOLL_WRITE;
-
- mmap_read_lock(mm_s);
-
- mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) {
- rv = -ENOMEM;
- goto out_sem_up;
- }
- umem->fp_addr = first_page_va;
-
umem->page_chunk =
kcalloc(num_chunks, sizeof(struct siw_page_chunk), GFP_KERNEL);
if (!umem->page_chunk) {
rv = -ENOMEM;
- goto out_sem_up;
+ goto err_out;
}
- for (i = 0; num_pages; i++) {
+ base_mem = ib_umem_get(base_dev, start, len, rights);
+ if (IS_ERR(base_mem)) {
+ rv = PTR_ERR(base_mem);
+ siw_dbg(base_dev, "Cannot pin user memory: %d\n", rv);
+ goto err_out;
+ }
+ umem->fp_addr = first_page_va;
+ umem->base_mem = base_mem;
+
+ sgt = &base_mem->sgt_append.sgt;
+ __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
+
+ if (!__sg_page_iter_next(&sg_iter)) {
+ rv = -EINVAL;
+ goto err_out;
+ }
+ for (i = 0; num_pages > 0; i++) {
int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
struct page **plist =
kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
if (!plist) {
rv = -ENOMEM;
- goto out_sem_up;
+ goto err_out;
}
umem->page_chunk[i].plist = plist;
- while (nents) {
- rv = pin_user_pages(first_page_va, nents, foll_flags,
- plist);
- if (rv < 0)
- goto out_sem_up;
-
- umem->num_pages += rv;
- first_page_va += rv * PAGE_SIZE;
- plist += rv;
- nents -= rv;
- num_pages -= rv;
+ while (nents--) {
+ *plist = sg_page_iter_page(&sg_iter);
+ umem->num_pages++;
+ num_pages--;
+ plist++;
+ if (!__sg_page_iter_next(&sg_iter))
+ break;
}
}
-out_sem_up:
- mmap_read_unlock(mm_s);
-
- if (rv > 0)
- return umem;
-
- /* Adjust accounting for pages not pinned */
- if (num_pages)
- atomic64_sub(num_pages, &mm_s->pinned_vm);
-
- siw_umem_release(umem, false);
+ return umem;
+err_out:
+ siw_umem_release(umem);
return ERR_PTR(rv);
}
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index a2835284f..e74cfcd6d 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -6,8 +6,9 @@
#ifndef _SIW_MEM_H
#define _SIW_MEM_H
-struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
-void siw_umem_release(struct siw_umem *umem, bool dirty);
+struct siw_umem *siw_umem_get(struct ib_device *base_dave, u64 start,
+ u64 len, int rights);
+void siw_umem_release(struct siw_umem *umem);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 26e3904d2..da92cfa20 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1183,7 +1183,7 @@ int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
/*
* siw_sq_flush()
*
- * Flush SQ and ORRQ entries to CQ.
+ * Flush SQ and ORQ entries to CQ.
*
* Must be called with QP state write lock held.
* Therefore, SQ and ORQ lock must not be taken.
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 33e0fdb36..ed4fc3971 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -405,6 +405,20 @@ out:
return wqe;
}
+static int siw_rx_data(struct siw_mem *mem_p, struct siw_rx_stream *srx,
+ unsigned int *pbl_idx, u64 addr, int bytes)
+{
+ int rv;
+
+ if (mem_p->mem_obj == NULL)
+ rv = siw_rx_kva(srx, ib_virt_dma_to_ptr(addr), bytes);
+ else if (!mem_p->is_pbl)
+ rv = siw_rx_umem(srx, mem_p->umem, addr, bytes);
+ else
+ rv = siw_rx_pbl(srx, pbl_idx, mem_p, addr, bytes);
+ return rv;
+}
+
/*
* siw_proc_send:
*
@@ -485,17 +499,8 @@ int siw_proc_send(struct siw_qp *qp)
break;
}
mem_p = *mem;
- if (mem_p->mem_obj == NULL)
- rv = siw_rx_kva(srx,
- ib_virt_dma_to_ptr(sge->laddr + frx->sge_off),
- sge_bytes);
- else if (!mem_p->is_pbl)
- rv = siw_rx_umem(srx, mem_p->umem,
- sge->laddr + frx->sge_off, sge_bytes);
- else
- rv = siw_rx_pbl(srx, &frx->pbl_idx, mem_p,
- sge->laddr + frx->sge_off, sge_bytes);
-
+ rv = siw_rx_data(mem_p, srx, &frx->pbl_idx,
+ sge->laddr + frx->sge_off, sge_bytes);
if (unlikely(rv != sge_bytes)) {
wqe->processed += rcvd_bytes;
@@ -598,17 +603,8 @@ int siw_proc_write(struct siw_qp *qp)
return -EINVAL;
}
- if (mem->mem_obj == NULL)
- rv = siw_rx_kva(srx,
- (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
- bytes);
- else if (!mem->is_pbl)
- rv = siw_rx_umem(srx, mem->umem,
- srx->ddp_to + srx->fpdu_part_rcvd, bytes);
- else
- rv = siw_rx_pbl(srx, &frx->pbl_idx, mem,
- srx->ddp_to + srx->fpdu_part_rcvd, bytes);
-
+ rv = siw_rx_data(mem, srx, &frx->pbl_idx,
+ srx->ddp_to + srx->fpdu_part_rcvd, bytes);
if (unlikely(rv != bytes)) {
siw_init_terminate(qp, TERM_ERROR_LAYER_DDP,
DDP_ETYPE_CATASTROPHIC,
@@ -849,17 +845,8 @@ int siw_proc_rresp(struct siw_qp *qp)
mem_p = *mem;
bytes = min(srx->fpdu_part_rem, srx->skb_new);
-
- if (mem_p->mem_obj == NULL)
- rv = siw_rx_kva(srx,
- ib_virt_dma_to_ptr(sge->laddr + wqe->processed),
- bytes);
- else if (!mem_p->is_pbl)
- rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
- bytes);
- else
- rv = siw_rx_pbl(srx, &frx->pbl_idx, mem_p,
- sge->laddr + wqe->processed, bytes);
+ rv = siw_rx_data(mem_p, srx, &frx->pbl_idx,
+ sge->laddr + wqe->processed, bytes);
if (rv != bytes) {
wqe->wc_status = SIW_WC_GENERAL_ERR;
rv = -EINVAL;
@@ -881,6 +868,13 @@ error_term:
return rv;
}
+static void siw_update_skb_rcvd(struct siw_rx_stream *srx, u16 length)
+{
+ srx->skb_offset += length;
+ srx->skb_new -= length;
+ srx->skb_copied += length;
+}
+
int siw_proc_terminate(struct siw_qp *qp)
{
struct siw_rx_stream *srx = &qp->rx_stream;
@@ -925,9 +919,7 @@ int siw_proc_terminate(struct siw_qp *qp)
goto out;
infop += to_copy;
- srx->skb_offset += to_copy;
- srx->skb_new -= to_copy;
- srx->skb_copied += to_copy;
+ siw_update_skb_rcvd(srx, to_copy);
srx->fpdu_part_rcvd += to_copy;
srx->fpdu_part_rem -= to_copy;
@@ -949,9 +941,7 @@ int siw_proc_terminate(struct siw_qp *qp)
term->flag_m ? "valid" : "invalid");
}
out:
- srx->skb_new -= to_copy;
- srx->skb_offset += to_copy;
- srx->skb_copied += to_copy;
+ siw_update_skb_rcvd(srx, to_copy);
srx->fpdu_part_rcvd += to_copy;
srx->fpdu_part_rem -= to_copy;
@@ -970,9 +960,7 @@ static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
- srx->skb_new -= avail;
- srx->skb_offset += avail;
- srx->skb_copied += avail;
+ siw_update_skb_rcvd(srx, avail);
srx->fpdu_part_rem -= avail;
if (srx->fpdu_part_rem)
@@ -1023,12 +1011,8 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
skb_copy_bits(skb, srx->skb_offset,
(char *)c_hdr + srx->fpdu_part_rcvd, bytes);
+ siw_update_skb_rcvd(srx, bytes);
srx->fpdu_part_rcvd += bytes;
-
- srx->skb_new -= bytes;
- srx->skb_offset += bytes;
- srx->skb_copied += bytes;
-
if (srx->fpdu_part_rcvd < MIN_DDP_HDR)
return -EAGAIN;
@@ -1091,12 +1075,8 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
skb_copy_bits(skb, srx->skb_offset,
(char *)c_hdr + srx->fpdu_part_rcvd, bytes);
+ siw_update_skb_rcvd(srx, bytes);
srx->fpdu_part_rcvd += bytes;
-
- srx->skb_new -= bytes;
- srx->skb_offset += bytes;
- srx->skb_copied += bytes;
-
if (srx->fpdu_part_rcvd < hdrlen)
return -EAGAIN;
}
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index b2c06100c..64ad9e089 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -34,6 +34,15 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
return NULL;
}
+static struct page *siw_get_page(struct siw_mem *mem, struct siw_sge *sge,
+ unsigned long offset, int *pbl_idx)
+{
+ if (!mem->is_pbl)
+ return siw_get_upage(mem->umem, sge->laddr + offset);
+ else
+ return siw_get_pblpage(mem, sge->laddr + offset, pbl_idx);
+}
+
/*
* Copy short payload at provided destination payload address
*/
@@ -67,11 +76,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
char *buffer;
int pbl_idx = 0;
- if (!mem->is_pbl)
- p = siw_get_upage(mem->umem, sge->laddr);
- else
- p = siw_get_pblpage(mem, sge->laddr, &pbl_idx);
-
+ p = siw_get_page(mem, sge, 0, &pbl_idx);
if (unlikely(!p))
return -EFAULT;
@@ -85,13 +90,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
memcpy(paddr, buffer + off, part);
kunmap_local(buffer);
- if (!mem->is_pbl)
- p = siw_get_upage(mem->umem,
- sge->laddr + part);
- else
- p = siw_get_pblpage(mem,
- sge->laddr + part,
- &pbl_idx);
+ p = siw_get_page(mem, sge, part, &pbl_idx);
if (unlikely(!p))
return -EFAULT;
@@ -249,14 +248,10 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
/*
* Do complete CRC if enabled and short packet
*/
- if (c_tx->mpa_crc_hd) {
- crypto_shash_init(c_tx->mpa_crc_hd);
- if (crypto_shash_update(c_tx->mpa_crc_hd,
- (u8 *)&c_tx->pkt,
- c_tx->ctrl_len))
- return -EINVAL;
- crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)crc);
- }
+ if (c_tx->mpa_crc_hd &&
+ crypto_shash_digest(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt,
+ c_tx->ctrl_len, (u8 *)crc) != 0)
+ return -EINVAL;
c_tx->ctrl_len += MPA_CRC_SIZE;
return PKT_COMPLETE;
@@ -297,8 +292,7 @@ static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
(char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent,
.iov_len = c_tx->ctrl_len - c_tx->ctrl_sent };
- int rv = kernel_sendmsg(s, &msg, &iov, 1,
- c_tx->ctrl_len - c_tx->ctrl_sent);
+ int rv = kernel_sendmsg(s, &msg, &iov, 1, iov.iov_len);
if (rv >= 0) {
c_tx->ctrl_sent += rv;
@@ -502,13 +496,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (!is_kva) {
struct page *p;
- if (mem->is_pbl)
- p = siw_get_pblpage(
- mem, sge->laddr + sge_off,
- &pbl_idx);
- else
- p = siw_get_upage(mem->umem,
- sge->laddr + sge_off);
+ p = siw_get_page(mem, sge, sge_off, &pbl_idx);
if (unlikely(!p)) {
siw_unmap_pages(iov, kmap_mask, seg);
wqe->processed -= c_tx->bytes_unsent;
@@ -1009,13 +997,12 @@ static int siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe)
* MPA FPDUs, each containing a DDP segment.
*
* SQ processing may occur in user context as a result of posting
- * new WQE's or from siw_sq_work_handler() context. Processing in
+ * new WQE's or from siw_tx_thread context. Processing in
* user context is limited to non-kernel verbs users.
*
* SQ processing may get paused anytime, possibly in the middle of a WR
* or FPDU, if insufficient send space is available. SQ processing
- * gets resumed from siw_sq_work_handler(), if send space becomes
- * available again.
+ * gets resumed from siw_tx_thread, if send space becomes available again.
*
* Must be called with the QP state read-locked.
*
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index c5c27db9c..ecf044466 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -19,6 +19,15 @@
#include "siw_verbs.h"
#include "siw_mem.h"
+static int siw_qp_state_to_ib_qp_state[SIW_QP_STATE_COUNT] = {
+ [SIW_QP_STATE_IDLE] = IB_QPS_INIT,
+ [SIW_QP_STATE_RTR] = IB_QPS_RTR,
+ [SIW_QP_STATE_RTS] = IB_QPS_RTS,
+ [SIW_QP_STATE_CLOSING] = IB_QPS_SQD,
+ [SIW_QP_STATE_TERMINATE] = IB_QPS_SQE,
+ [SIW_QP_STATE_ERROR] = IB_QPS_ERR
+};
+
static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = SIW_QP_STATE_IDLE,
[IB_QPS_INIT] = SIW_QP_STATE_IDLE,
@@ -66,12 +75,9 @@ int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
entry = to_siw_mmap_entry(rdma_entry);
rv = remap_vmalloc_range(vma, entry->address, 0);
- if (rv) {
+ if (rv)
pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
size);
- goto out;
- }
-out:
rdma_user_mmap_entry_put(rdma_entry);
return rv;
@@ -336,11 +342,10 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
goto err_atomic;
}
/*
- * NOTE: we allow for zero element SQ and RQ WQE's SGL's
- * but not for a QP unable to hold any WQE (SQ + RQ)
+ * NOTE: we don't allow for a QP unable to hold any SQ WQE
*/
- if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
- siw_dbg(base_dev, "QP must have send or receive queue\n");
+ if (attrs->cap.max_send_wr == 0) {
+ siw_dbg(base_dev, "QP must have send queue\n");
rv = -EINVAL;
goto err_atomic;
}
@@ -360,21 +365,14 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (rv)
goto err_atomic;
- num_sqe = attrs->cap.max_send_wr;
- num_rqe = attrs->cap.max_recv_wr;
/* All queue indices are derived from modulo operations
* on a free running 'get' (consumer) and 'put' (producer)
* unsigned counter. Having queue sizes at power of two
* avoids handling counter wrap around.
*/
- if (num_sqe)
- num_sqe = roundup_pow_of_two(num_sqe);
- else {
- /* Zero sized SQ is not supported */
- rv = -EINVAL;
- goto err_out_xa;
- }
+ num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
+ num_rqe = attrs->cap.max_recv_wr;
if (num_rqe)
num_rqe = roundup_pow_of_two(num_rqe);
@@ -515,6 +513,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
} else {
return -EINVAL;
}
+ qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
qp_attr->cap.max_send_wr = qp->attrs.sq_size;
qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
@@ -1321,8 +1320,6 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct siw_umem *umem = NULL;
struct siw_ureq_reg_mr ureq;
struct siw_device *sdev = to_siw_dev(pd->device);
-
- unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
int rv;
siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
@@ -1338,20 +1335,7 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
rv = -EINVAL;
goto err_out;
}
- if (mem_limit != RLIM_INFINITY) {
- unsigned long num_pages =
- (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
- mem_limit >>= PAGE_SHIFT;
-
- if (num_pages > mem_limit - current->mm->locked_vm) {
- siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n",
- num_pages, mem_limit,
- current->mm->locked_vm);
- rv = -ENOMEM;
- goto err_out;
- }
- }
- umem = siw_umem_get(start, len, ib_access_writable(rights));
+ umem = siw_umem_get(pd->device, start, len, rights);
if (IS_ERR(umem)) {
rv = PTR_ERR(umem);
siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
@@ -1404,7 +1388,7 @@ err_out:
kfree_rcu(mr, rcu);
} else {
if (umem)
- siw_umem_release(umem, false);
+ siw_umem_release(umem);
}
return ERR_PTR(rv);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 35e9c8a33..963e936da 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -351,10 +351,12 @@ struct ipoib_dev_priv {
struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
+ struct work_struct reschedule_napi_work;
struct work_struct flush_light;
struct work_struct flush_normal;
struct work_struct flush_heavy;
struct work_struct restart_task;
+ struct work_struct tx_timeout_work;
struct delayed_work ah_reap_task;
struct delayed_work neigh_reap_task;
struct ib_device *ca;
@@ -499,6 +501,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ib_ah *address, u32 dqpn);
void ipoib_reap_ah(struct work_struct *work);
+void ipoib_napi_schedule_work(struct work_struct *work);
struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
@@ -510,6 +513,7 @@ void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
+void ipoib_ib_tx_timeout_work(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 7f84d9866..5cde275da 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -531,11 +531,35 @@ void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
napi_schedule(&priv->recv_napi);
}
+/* The function will force napi_schedule */
+void ipoib_napi_schedule_work(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, reschedule_napi_work);
+ bool ret;
+
+ do {
+ ret = napi_schedule(&priv->send_napi);
+ if (!ret)
+ msleep(3);
+ } while (!ret && netif_queue_stopped(priv->dev) &&
+ test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags));
+}
+
void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
{
struct ipoib_dev_priv *priv = ctx_ptr;
+ bool ret;
- napi_schedule(&priv->send_napi);
+ ret = napi_schedule(&priv->send_napi);
+ /*
+ * if the queue is closed the driver must be able to schedule napi,
+ * otherwise we can end with closed queue forever, because no new
+ * packets to send and napi callback might not get new event after
+ * its re-arm of the napi.
+ */
+ if (!ret && netif_queue_stopped(priv->dev))
+ schedule_work(&priv->reschedule_napi_work);
}
static inline int post_send(struct ipoib_dev_priv *priv,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 967004cca..7a5be705d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1200,7 +1200,34 @@ static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
priv->global_tx_head, priv->global_tx_tail);
- /* XXX reset QP, etc. */
+
+ schedule_work(&priv->tx_timeout_work);
+}
+
+void ipoib_ib_tx_timeout_work(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work,
+ struct ipoib_dev_priv,
+ tx_timeout_work);
+ int err;
+
+ rtnl_lock();
+
+ if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ goto unlock;
+
+ ipoib_stop(priv->dev);
+ err = ipoib_open(priv->dev);
+ if (err) {
+ ipoib_warn(priv, "ipoib_open failed recovering from a tx_timeout, err(%d).\n",
+ err);
+ goto unlock;
+ }
+
+ netif_tx_wake_all_queues(priv->dev);
+unlock:
+ rtnl_unlock();
+
}
static int ipoib_hard_header(struct sk_buff *skb,
@@ -2112,7 +2139,7 @@ void ipoib_setup_common(struct net_device *dev)
ipoib_set_ethtool_ops(dev);
- dev->watchdog_timeo = HZ;
+ dev->watchdog_timeo = 10 * HZ;
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
@@ -2150,10 +2177,12 @@ static void ipoib_build_priv(struct net_device *dev)
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
+ INIT_WORK(&priv->reschedule_napi_work, ipoib_napi_schedule_work);
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+ INIT_WORK(&priv->tx_timeout_work, ipoib_ib_tx_timeout_work);
INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index d967d5532..68429a5f7 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -182,7 +182,7 @@ enum iser_data_dir {
*
* @sg: pointer to the sg list
* @size: num entries of this sg
- * @data_len: total beffer byte len
+ * @data_len: total buffer byte len
* @dma_nents: returned by dma_map_sg
*/
struct iser_data_buf {
@@ -299,7 +299,6 @@ struct ib_conn;
*
* @ib_device: RDMA device
* @pd: Protection Domain for this device
- * @mr: Global DMA memory region
* @event_handler: IB events handle routine
* @ig_list: entry in devices list
* @refcount: Reference counter, dominated by open iser connections
@@ -387,7 +386,7 @@ struct ib_conn {
* to max number of post recvs
* @max_cmds: maximum cmds allowed for this connection
* @name: connection peer portal
- * @release_work: deffered work for release job
+ * @release_work: deferred work for release job
* @state_mutex: protects iser onnection state
* @stop_completion: conn_stop completion
* @ib_completion: RDMA cleanup completion
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 7f3167ce2..88106cf5c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1391,9 +1391,9 @@ static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
clt_path->max_pages_per_mr);
if (IS_ERR(req->mr)) {
err = PTR_ERR(req->mr);
+ pr_err("Failed to alloc clt_path->max_pages_per_mr %d: %pe\n",
+ clt_path->max_pages_per_mr, req->mr);
req->mr = NULL;
- pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
- clt_path->max_pages_per_mr);
goto out;
}
@@ -2025,6 +2025,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
/*
* Device removal is a special case. Queue close and return 0.
*/
+ rtrs_wrn_rl(s, "CM event: %s, status: %d\n", rdma_event_msg(ev->event),
+ ev->status);
rtrs_clt_close_conns(clt_path, false);
return 0;
default:
@@ -2058,10 +2060,8 @@ static int create_cm(struct rtrs_clt_con *con)
clt_path->s.dst_addr.ss_family == AF_IB ?
RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
- err = PTR_ERR(cm_id);
- rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
-
- return err;
+ rtrs_err(s, "Failed to create CM ID, err: %pe\n", cm_id);
+ return PTR_ERR(cm_id);
}
con->c.cm_id = cm_id;
con->cm_err = 0;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index d80edfffd..4e17d546d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -242,8 +242,8 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
if (IS_ERR(cq)) {
- rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n",
- PTR_ERR(cq));
+ rtrs_err(con->path, "Creating completion queue failed, errno: %pe\n",
+ cq);
return PTR_ERR(cq);
}
con->cq = cq;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 8c5fdb0f8..f71ea4fb1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1365,8 +1365,8 @@ static ssize_t input_dev_show_##name(struct device *dev, \
{ \
struct input_dev *input_dev = to_input_dev(dev); \
\
- return scnprintf(buf, PAGE_SIZE, "%s\n", \
- input_dev->name ? input_dev->name : ""); \
+ return sysfs_emit(buf, "%s\n", \
+ input_dev->name ? input_dev->name : ""); \
} \
static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
@@ -1458,7 +1458,7 @@ static ssize_t inhibited_show(struct device *dev,
{
struct input_dev *input_dev = to_input_dev(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited);
+ return sysfs_emit(buf, "%d\n", input_dev->inhibited);
}
static ssize_t inhibited_store(struct device *dev,
@@ -1505,7 +1505,7 @@ static ssize_t input_dev_show_id_##name(struct device *dev, \
char *buf) \
{ \
struct input_dev *input_dev = to_input_dev(dev); \
- return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
+ return sysfs_emit(buf, "%04x\n", input_dev->id.name); \
} \
static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index ac6925ce8..7755e5b45 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -412,4 +412,14 @@ config JOYSTICK_SENSEHAT
To compile this driver as a module, choose M here: the
module will be called sensehat_joystick.
+config JOYSTICK_SEESAW
+ tristate "Adafruit Mini I2C Gamepad with Seesaw"
+ depends on I2C
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want to use the Adafruit Mini I2C Gamepad.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adafruit-seesaw.
+
endif
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index 3937535f0..9976f596a 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_JOYSTICK_N64) += n64joy.o
obj-$(CONFIG_JOYSTICK_PSXPAD_SPI) += psxpad-spi.o
obj-$(CONFIG_JOYSTICK_PXRC) += pxrc.o
obj-$(CONFIG_JOYSTICK_QWIIC) += qwiic-joystick.o
+obj-$(CONFIG_JOYSTICK_SEESAW) += adafruit-seesaw.o
obj-$(CONFIG_JOYSTICK_SENSEHAT) += sensehat-joystick.o
obj-$(CONFIG_JOYSTICK_SIDEWINDER) += sidewinder.o
obj-$(CONFIG_JOYSTICK_SPACEBALL) += spaceball.o
diff --git a/drivers/input/joystick/adafruit-seesaw.c b/drivers/input/joystick/adafruit-seesaw.c
new file mode 100644
index 000000000..1b9279f02
--- /dev/null
+++ b/drivers/input/joystick/adafruit-seesaw.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com>
+ *
+ * Driver for Adafruit Mini I2C Gamepad
+ *
+ * Based on the work of:
+ * Oleh Kravchenko (Sparkfun Qwiic Joystick driver)
+ *
+ * Datasheet: https://cdn-learn.adafruit.com/downloads/pdf/gamepad-qt.pdf
+ * Product page: https://www.adafruit.com/product/5743
+ * Firmware and hardware sources: https://github.com/adafruit/Adafruit_Seesaw
+ *
+ * TODO:
+ * - Add interrupt support
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define SEESAW_DEVICE_NAME "seesaw-gamepad"
+
+#define SEESAW_ADC_BASE 0x0900
+
+#define SEESAW_GPIO_DIRCLR_BULK 0x0103
+#define SEESAW_GPIO_BULK 0x0104
+#define SEESAW_GPIO_BULK_SET 0x0105
+#define SEESAW_GPIO_PULLENSET 0x010b
+
+#define SEESAW_STATUS_HW_ID 0x0001
+#define SEESAW_STATUS_SWRST 0x007f
+
+#define SEESAW_ADC_OFFSET 0x07
+
+#define SEESAW_BUTTON_A 0x05
+#define SEESAW_BUTTON_B 0x01
+#define SEESAW_BUTTON_X 0x06
+#define SEESAW_BUTTON_Y 0x02
+#define SEESAW_BUTTON_START 0x10
+#define SEESAW_BUTTON_SELECT 0x00
+
+#define SEESAW_ANALOG_X 0x0e
+#define SEESAW_ANALOG_Y 0x0f
+
+#define SEESAW_JOYSTICK_MAX_AXIS 1023
+#define SEESAW_JOYSTICK_FUZZ 2
+#define SEESAW_JOYSTICK_FLAT 4
+
+#define SEESAW_GAMEPAD_POLL_INTERVAL_MS 16
+#define SEESAW_GAMEPAD_POLL_MIN 8
+#define SEESAW_GAMEPAD_POLL_MAX 32
+
+static const unsigned long SEESAW_BUTTON_MASK =
+ BIT(SEESAW_BUTTON_A) | BIT(SEESAW_BUTTON_B) | BIT(SEESAW_BUTTON_X) |
+ BIT(SEESAW_BUTTON_Y) | BIT(SEESAW_BUTTON_START) |
+ BIT(SEESAW_BUTTON_SELECT);
+
+struct seesaw_gamepad {
+ struct input_dev *input_dev;
+ struct i2c_client *i2c_client;
+};
+
+struct seesaw_data {
+ u16 x;
+ u16 y;
+ u32 button_state;
+};
+
+static const struct key_entry seesaw_buttons_new[] = {
+ { KE_KEY, SEESAW_BUTTON_A, .keycode = BTN_SOUTH },
+ { KE_KEY, SEESAW_BUTTON_B, .keycode = BTN_EAST },
+ { KE_KEY, SEESAW_BUTTON_X, .keycode = BTN_NORTH },
+ { KE_KEY, SEESAW_BUTTON_Y, .keycode = BTN_WEST },
+ { KE_KEY, SEESAW_BUTTON_START, .keycode = BTN_START },
+ { KE_KEY, SEESAW_BUTTON_SELECT, .keycode = BTN_SELECT },
+ { KE_END, 0 }
+};
+
+static int seesaw_register_read(struct i2c_client *client, u16 reg, void *buf,
+ int count)
+{
+ __be16 register_buf = cpu_to_be16(reg);
+ struct i2c_msg message_buf[2] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags,
+ .len = sizeof(register_buf),
+ .buf = (u8 *)&register_buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = client->flags | I2C_M_RD,
+ .len = count,
+ .buf = (u8 *)buf,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, message_buf,
+ ARRAY_SIZE(message_buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int seesaw_register_write_u8(struct i2c_client *client, u16 reg,
+ u8 value)
+{
+ u8 write_buf[sizeof(reg) + sizeof(value)];
+ int ret;
+
+ put_unaligned_be16(reg, write_buf);
+ write_buf[sizeof(reg)] = value;
+
+ ret = i2c_master_send(client, write_buf, sizeof(write_buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int seesaw_register_write_u32(struct i2c_client *client, u16 reg,
+ u32 value)
+{
+ u8 write_buf[sizeof(reg) + sizeof(value)];
+ int ret;
+
+ put_unaligned_be16(reg, write_buf);
+ put_unaligned_be32(value, write_buf + sizeof(reg));
+ ret = i2c_master_send(client, write_buf, sizeof(write_buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int seesaw_read_data(struct i2c_client *client, struct seesaw_data *data)
+{
+ __be16 adc_data;
+ __be32 read_buf;
+ int err;
+
+ err = seesaw_register_read(client, SEESAW_GPIO_BULK,
+ &read_buf, sizeof(read_buf));
+ if (err)
+ return err;
+
+ data->button_state = ~be32_to_cpu(read_buf);
+
+ err = seesaw_register_read(client,
+ SEESAW_ADC_BASE |
+ (SEESAW_ADC_OFFSET + SEESAW_ANALOG_X),
+ &adc_data, sizeof(adc_data));
+ if (err)
+ return err;
+ /*
+ * ADC reads left as max and right as 0, must be reversed since kernel
+ * expects reports in opposite order.
+ */
+ data->x = SEESAW_JOYSTICK_MAX_AXIS - be16_to_cpu(adc_data);
+
+ err = seesaw_register_read(client,
+ SEESAW_ADC_BASE |
+ (SEESAW_ADC_OFFSET + SEESAW_ANALOG_Y),
+ &adc_data, sizeof(adc_data));
+ if (err)
+ return err;
+
+ data->y = be16_to_cpu(adc_data);
+
+ return 0;
+}
+
+static void seesaw_poll(struct input_dev *input)
+{
+ struct seesaw_gamepad *private = input_get_drvdata(input);
+ struct seesaw_data data;
+ int err, i;
+
+ err = seesaw_read_data(private->i2c_client, &data);
+ if (err) {
+ dev_err_ratelimited(&input->dev,
+ "failed to read joystick state: %d\n", err);
+ return;
+ }
+
+ input_report_abs(input, ABS_X, data.x);
+ input_report_abs(input, ABS_Y, data.y);
+
+ for_each_set_bit(i, &SEESAW_BUTTON_MASK,
+ BITS_PER_TYPE(SEESAW_BUTTON_MASK)) {
+ if (!sparse_keymap_report_event(input, i,
+ data.button_state & BIT(i),
+ false))
+ dev_err_ratelimited(&input->dev,
+ "failed to report keymap event");
+ }
+
+ input_sync(input);
+}
+
+static int seesaw_probe(struct i2c_client *client)
+{
+ struct seesaw_gamepad *seesaw;
+ u8 hardware_id;
+ int err;
+
+ err = seesaw_register_write_u8(client, SEESAW_STATUS_SWRST, 0xFF);
+ if (err)
+ return err;
+
+ /* Wait for the registers to reset before proceeding */
+ usleep_range(10000, 15000);
+
+ seesaw = devm_kzalloc(&client->dev, sizeof(*seesaw), GFP_KERNEL);
+ if (!seesaw)
+ return -ENOMEM;
+
+ err = seesaw_register_read(client, SEESAW_STATUS_HW_ID,
+ &hardware_id, sizeof(hardware_id));
+ if (err)
+ return err;
+
+ dev_dbg(&client->dev, "Adafruit Seesaw Gamepad, Hardware ID: %02x\n",
+ hardware_id);
+
+ /* Set Pin Mode to input and enable pull-up resistors */
+ err = seesaw_register_write_u32(client, SEESAW_GPIO_DIRCLR_BULK,
+ SEESAW_BUTTON_MASK);
+ if (err)
+ return err;
+ err = seesaw_register_write_u32(client, SEESAW_GPIO_PULLENSET,
+ SEESAW_BUTTON_MASK);
+ if (err)
+ return err;
+ err = seesaw_register_write_u32(client, SEESAW_GPIO_BULK_SET,
+ SEESAW_BUTTON_MASK);
+ if (err)
+ return err;
+
+ seesaw->i2c_client = client;
+ seesaw->input_dev = devm_input_allocate_device(&client->dev);
+ if (!seesaw->input_dev)
+ return -ENOMEM;
+
+ seesaw->input_dev->id.bustype = BUS_I2C;
+ seesaw->input_dev->name = "Adafruit Seesaw Gamepad";
+ seesaw->input_dev->phys = "i2c/" SEESAW_DEVICE_NAME;
+ input_set_drvdata(seesaw->input_dev, seesaw);
+ input_set_abs_params(seesaw->input_dev, ABS_X,
+ 0, SEESAW_JOYSTICK_MAX_AXIS,
+ SEESAW_JOYSTICK_FUZZ, SEESAW_JOYSTICK_FLAT);
+ input_set_abs_params(seesaw->input_dev, ABS_Y,
+ 0, SEESAW_JOYSTICK_MAX_AXIS,
+ SEESAW_JOYSTICK_FUZZ, SEESAW_JOYSTICK_FLAT);
+
+ err = sparse_keymap_setup(seesaw->input_dev, seesaw_buttons_new, NULL);
+ if (err) {
+ dev_err(&client->dev,
+ "failed to set up input device keymap: %d\n", err);
+ return err;
+ }
+
+ err = input_setup_polling(seesaw->input_dev, seesaw_poll);
+ if (err) {
+ dev_err(&client->dev, "failed to set up polling: %d\n", err);
+ return err;
+ }
+
+ input_set_poll_interval(seesaw->input_dev,
+ SEESAW_GAMEPAD_POLL_INTERVAL_MS);
+ input_set_max_poll_interval(seesaw->input_dev, SEESAW_GAMEPAD_POLL_MAX);
+ input_set_min_poll_interval(seesaw->input_dev, SEESAW_GAMEPAD_POLL_MIN);
+
+ err = input_register_device(seesaw->input_dev);
+ if (err) {
+ dev_err(&client->dev, "failed to register joystick: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id seesaw_id_table[] = {
+ { SEESAW_DEVICE_NAME },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, seesaw_id_table);
+
+static const struct of_device_id seesaw_of_table[] = {
+ { .compatible = "adafruit,seesaw-gamepad"},
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, seesaw_of_table);
+
+static struct i2c_driver seesaw_driver = {
+ .driver = {
+ .name = SEESAW_DEVICE_NAME,
+ .of_match_table = seesaw_of_table,
+ },
+ .id_table = seesaw_id_table,
+ .probe = seesaw_probe,
+};
+module_i2c_driver(seesaw_driver);
+
+MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>");
+MODULE_DESCRIPTION("Adafruit Mini I2C Gamepad driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index bf8b1cc0e..f1822c19a 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -13,7 +13,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/input/as5011.h>
#include <linux/slab.h>
@@ -61,7 +61,7 @@ MODULE_LICENSE("GPL");
struct as5011_device {
struct input_dev *input_dev;
struct i2c_client *i2c_client;
- unsigned int button_gpio;
+ struct gpio_desc *button_gpiod;
unsigned int button_irq;
unsigned int axis_irq;
};
@@ -114,7 +114,7 @@ static int as5011_i2c_read(struct i2c_client *client,
static irqreturn_t as5011_button_interrupt(int irq, void *dev_id)
{
struct as5011_device *as5011 = dev_id;
- int val = gpio_get_value_cansleep(as5011->button_gpio);
+ int val = gpiod_get_value_cansleep(as5011->button_gpiod);
input_report_key(as5011->input_dev, BTN_JOYSTICK, !val);
input_sync(as5011->input_dev);
@@ -248,7 +248,6 @@ static int as5011_probe(struct i2c_client *client)
as5011->i2c_client = client;
as5011->input_dev = input_dev;
- as5011->button_gpio = plat_data->button_gpio;
as5011->axis_irq = plat_data->axis_irq;
input_dev->name = "Austria Microsystem as5011 joystick";
@@ -262,18 +261,20 @@ static int as5011_probe(struct i2c_client *client)
input_set_abs_params(as5011->input_dev, ABS_Y,
AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
- error = gpio_request(as5011->button_gpio, "AS5011 button");
- if (error < 0) {
- dev_err(&client->dev, "Failed to request button gpio\n");
+ as5011->button_gpiod = devm_gpiod_get(&client->dev, NULL, GPIOD_IN);
+ if (IS_ERR(as5011->button_gpiod)) {
+ error = PTR_ERR(as5011->button_gpiod);
+ dev_err(&client->dev, "Failed to request button GPIO\n");
goto err_free_mem;
}
+ gpiod_set_consumer_name(as5011->button_gpiod, "AS5011 button");
- irq = gpio_to_irq(as5011->button_gpio);
+ irq = gpiod_to_irq(as5011->button_gpiod);
if (irq < 0) {
dev_err(&client->dev,
"Failed to get irq number for button gpio\n");
error = irq;
- goto err_free_button_gpio;
+ goto err_free_mem;
}
as5011->button_irq = irq;
@@ -286,7 +287,7 @@ static int as5011_probe(struct i2c_client *client)
if (error < 0) {
dev_err(&client->dev,
"Can't allocate button irq %d\n", as5011->button_irq);
- goto err_free_button_gpio;
+ goto err_free_mem;
}
error = as5011_configure_chip(as5011, plat_data);
@@ -317,8 +318,6 @@ err_free_axis_irq:
free_irq(as5011->axis_irq, as5011);
err_free_button_irq:
free_irq(as5011->button_irq, as5011);
-err_free_button_gpio:
- gpio_free(as5011->button_gpio);
err_free_mem:
input_free_device(input_dev);
kfree(as5011);
@@ -332,7 +331,6 @@ static void as5011_remove(struct i2c_client *client)
free_irq(as5011->axis_irq, as5011);
free_irq(as5011->button_irq, as5011);
- gpio_free(as5011->button_gpio);
input_unregister_device(as5011->input_dev);
kfree(as5011);
diff --git a/drivers/input/joystick/pxrc.c b/drivers/input/joystick/pxrc.c
index ea2bf5951..52d9eab66 100644
--- a/drivers/input/joystick/pxrc.c
+++ b/drivers/input/joystick/pxrc.c
@@ -5,15 +5,17 @@
* Copyright (C) 2018 Marcus Folkesson <marcus.folkesson@gmail.com>
*/
-#include <linux/kernel.h>
+#include <linux/cleanup.h>
#include <linux/errno.h>
-#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
+
#include <linux/usb.h>
#include <linux/usb/input.h>
-#include <linux/mutex.h>
-#include <linux/input.h>
#define PXRC_VENDOR_ID 0x1781
#define PXRC_PRODUCT_ID 0x0898
@@ -81,33 +83,28 @@ exit:
static int pxrc_open(struct input_dev *input)
{
struct pxrc *pxrc = input_get_drvdata(input);
- int retval;
+ int error;
- mutex_lock(&pxrc->pm_mutex);
- retval = usb_submit_urb(pxrc->urb, GFP_KERNEL);
- if (retval) {
+ guard(mutex)(&pxrc->pm_mutex);
+ error = usb_submit_urb(pxrc->urb, GFP_KERNEL);
+ if (error) {
dev_err(&pxrc->intf->dev,
"%s - usb_submit_urb failed, error: %d\n",
- __func__, retval);
- retval = -EIO;
- goto out;
+ __func__, error);
+ return -EIO;
}
pxrc->is_open = true;
-
-out:
- mutex_unlock(&pxrc->pm_mutex);
- return retval;
+ return 0;
}
static void pxrc_close(struct input_dev *input)
{
struct pxrc *pxrc = input_get_drvdata(input);
- mutex_lock(&pxrc->pm_mutex);
+ guard(mutex)(&pxrc->pm_mutex);
usb_kill_urb(pxrc->urb);
pxrc->is_open = false;
- mutex_unlock(&pxrc->pm_mutex);
}
static void pxrc_free_urb(void *_pxrc)
@@ -208,10 +205,9 @@ static int pxrc_suspend(struct usb_interface *intf, pm_message_t message)
{
struct pxrc *pxrc = usb_get_intfdata(intf);
- mutex_lock(&pxrc->pm_mutex);
+ guard(mutex)(&pxrc->pm_mutex);
if (pxrc->is_open)
usb_kill_urb(pxrc->urb);
- mutex_unlock(&pxrc->pm_mutex);
return 0;
}
@@ -219,14 +215,12 @@ static int pxrc_suspend(struct usb_interface *intf, pm_message_t message)
static int pxrc_resume(struct usb_interface *intf)
{
struct pxrc *pxrc = usb_get_intfdata(intf);
- int retval = 0;
- mutex_lock(&pxrc->pm_mutex);
+ guard(mutex)(&pxrc->pm_mutex);
if (pxrc->is_open && usb_submit_urb(pxrc->urb, GFP_KERNEL) < 0)
- retval = -EIO;
+ return -EIO;
- mutex_unlock(&pxrc->pm_mutex);
- return retval;
+ return 0;
}
static int pxrc_pre_reset(struct usb_interface *intf)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index c11af4441..1fad51b51 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -366,6 +366,8 @@ static const struct xpad_device {
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
+ { 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
+ { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x3106, "8BitDo Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
@@ -507,6 +509,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
+ XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */
XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */
XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
@@ -1678,7 +1681,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
if (!led)
return -ENOMEM;
- xpad->pad_nr = ida_simple_get(&xpad_pad_seq, 0, 0, GFP_KERNEL);
+ xpad->pad_nr = ida_alloc(&xpad_pad_seq, GFP_KERNEL);
if (xpad->pad_nr < 0) {
error = xpad->pad_nr;
goto err_free_mem;
@@ -1701,7 +1704,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
return 0;
err_free_id:
- ida_simple_remove(&xpad_pad_seq, xpad->pad_nr);
+ ida_free(&xpad_pad_seq, xpad->pad_nr);
err_free_mem:
kfree(led);
xpad->led = NULL;
@@ -1714,7 +1717,7 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
if (xpad_led) {
led_classdev_unregister(&xpad_led->led_cdev);
- ida_simple_remove(&xpad_pad_seq, xpad->pad_nr);
+ ida_free(&xpad_pad_seq, xpad->pad_nr);
kfree(xpad_led);
}
}
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index c229bd6b3..7f67f9f29 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -826,7 +826,7 @@ static int atkbd_probe(struct atkbd *atkbd)
if (atkbd_skip_getid(atkbd)) {
atkbd->id = 0xab83;
- return 0;
+ goto deactivate_kbd;
}
/*
@@ -863,6 +863,7 @@ static int atkbd_probe(struct atkbd *atkbd)
return -1;
}
+deactivate_kbd:
/*
* Make sure nothing is coming from the keyboard and disturbs our
* internal state.
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 1b4937dce..52fba9ee7 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -10,10 +10,11 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/leds.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/i2c.h>
#include <linux/gpio/consumer.h>
+#include <linux/bitfield.h>
#define CAP11XX_REG_MAIN_CONTROL 0x00
#define CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT (6)
@@ -24,6 +25,7 @@
#define CAP11XX_REG_NOISE_FLAG_STATUS 0x0a
#define CAP11XX_REG_SENOR_DELTA(X) (0x10 + (X))
#define CAP11XX_REG_SENSITIVITY_CONTROL 0x1f
+#define CAP11XX_REG_SENSITIVITY_CONTROL_DELTA_SENSE_MASK 0x70
#define CAP11XX_REG_CONFIG 0x20
#define CAP11XX_REG_SENSOR_ENABLE 0x21
#define CAP11XX_REG_SENSOR_CONFIG 0x22
@@ -32,6 +34,7 @@
#define CAP11XX_REG_CALIBRATION 0x26
#define CAP11XX_REG_INT_ENABLE 0x27
#define CAP11XX_REG_REPEAT_RATE 0x28
+#define CAP11XX_REG_SIGNAL_GUARD_ENABLE 0x29
#define CAP11XX_REG_MT_CONFIG 0x2a
#define CAP11XX_REG_MT_PATTERN_CONFIG 0x2b
#define CAP11XX_REG_MT_PATTERN 0x2d
@@ -47,6 +50,8 @@
#define CAP11XX_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
#define CAP11XX_REG_LED_POLARITY 0x73
#define CAP11XX_REG_LED_OUTPUT_CONTROL 0x74
+#define CAP11XX_REG_CALIB_SENSITIVITY_CONFIG 0x80
+#define CAP11XX_REG_CALIB_SENSITIVITY_CONFIG2 0x81
#define CAP11XX_REG_LED_DUTY_CYCLE_1 0x90
#define CAP11XX_REG_LED_DUTY_CYCLE_2 0x91
@@ -78,12 +83,20 @@ struct cap11xx_led {
struct cap11xx_priv {
struct regmap *regmap;
+ struct device *dev;
struct input_dev *idev;
+ const struct cap11xx_hw_model *model;
+ u8 id;
struct cap11xx_led *leds;
int num_leds;
/* config */
+ u8 analog_gain;
+ u8 sensitivity_delta_sense;
+ u8 signal_guard_inputs_mask;
+ u32 thresholds[8];
+ u32 calib_sensitivities[8];
u32 keycodes[];
};
@@ -160,9 +173,6 @@ static bool cap11xx_volatile_reg(struct device *dev, unsigned int reg)
case CAP11XX_REG_SENOR_DELTA(3):
case CAP11XX_REG_SENOR_DELTA(4):
case CAP11XX_REG_SENOR_DELTA(5):
- case CAP11XX_REG_PRODUCT_ID:
- case CAP11XX_REG_MANUFACTURER_ID:
- case CAP11XX_REG_REVISION:
return true;
}
@@ -177,10 +187,179 @@ static const struct regmap_config cap11xx_regmap_config = {
.reg_defaults = cap11xx_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(cap11xx_reg_defaults),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.volatile_reg = cap11xx_volatile_reg,
};
+static int cap11xx_write_calib_sens_config_1(struct cap11xx_priv *priv)
+{
+ return regmap_write(priv->regmap,
+ CAP11XX_REG_CALIB_SENSITIVITY_CONFIG,
+ (priv->calib_sensitivities[3] << 6) |
+ (priv->calib_sensitivities[2] << 4) |
+ (priv->calib_sensitivities[1] << 2) |
+ priv->calib_sensitivities[0]);
+}
+
+static int cap11xx_write_calib_sens_config_2(struct cap11xx_priv *priv)
+{
+ return regmap_write(priv->regmap,
+ CAP11XX_REG_CALIB_SENSITIVITY_CONFIG2,
+ (priv->calib_sensitivities[7] << 6) |
+ (priv->calib_sensitivities[6] << 4) |
+ (priv->calib_sensitivities[5] << 2) |
+ priv->calib_sensitivities[4]);
+}
+
+static int cap11xx_init_keys(struct cap11xx_priv *priv)
+{
+ struct device_node *node = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ int i, error;
+ u32 u32_val;
+
+ if (!node) {
+ dev_err(dev, "Corresponding DT entry is not available\n");
+ return -ENODEV;
+ }
+
+ if (!of_property_read_u32(node, "microchip,sensor-gain", &u32_val)) {
+ if (priv->model->no_gain) {
+ dev_warn(dev,
+ "This model doesn't support 'sensor-gain'\n");
+ } else if (is_power_of_2(u32_val) && u32_val <= 8) {
+ priv->analog_gain = (u8)ilog2(u32_val);
+
+ error = regmap_update_bits(priv->regmap,
+ CAP11XX_REG_MAIN_CONTROL,
+ CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
+ priv->analog_gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
+ if (error)
+ return error;
+ } else {
+ dev_err(dev, "Invalid sensor-gain value %u\n", u32_val);
+ return -EINVAL;
+ }
+ }
+
+ if (of_property_read_bool(node, "microchip,irq-active-high")) {
+ if (priv->id == CAP1106 ||
+ priv->id == CAP1126 ||
+ priv->id == CAP1188) {
+ error = regmap_update_bits(priv->regmap,
+ CAP11XX_REG_CONFIG2,
+ CAP11XX_REG_CONFIG2_ALT_POL,
+ 0);
+ if (error)
+ return error;
+ } else {
+ dev_warn(dev,
+ "This model doesn't support 'irq-active-high'\n");
+ }
+ }
+
+ if (!of_property_read_u32(node, "microchip,sensitivity-delta-sense", &u32_val)) {
+ if (!is_power_of_2(u32_val) || u32_val > 128) {
+ dev_err(dev, "Invalid sensitivity-delta-sense value %u\n", u32_val);
+ return -EINVAL;
+ }
+
+ priv->sensitivity_delta_sense = (u8)ilog2(u32_val);
+ u32_val = ~(FIELD_PREP(CAP11XX_REG_SENSITIVITY_CONTROL_DELTA_SENSE_MASK,
+ priv->sensitivity_delta_sense));
+
+ error = regmap_update_bits(priv->regmap,
+ CAP11XX_REG_SENSITIVITY_CONTROL,
+ CAP11XX_REG_SENSITIVITY_CONTROL_DELTA_SENSE_MASK,
+ u32_val);
+ if (error)
+ return error;
+ }
+
+ if (!of_property_read_u32_array(node, "microchip,input-threshold",
+ priv->thresholds, priv->model->num_channels)) {
+ for (i = 0; i < priv->model->num_channels; i++) {
+ if (priv->thresholds[i] > 127) {
+ dev_err(dev, "Invalid input-threshold value %u\n",
+ priv->thresholds[i]);
+ return -EINVAL;
+ }
+
+ error = regmap_write(priv->regmap,
+ CAP11XX_REG_SENSOR_THRESH(i),
+ priv->thresholds[i]);
+ if (error)
+ return error;
+ }
+ }
+
+ if (!of_property_read_u32_array(node, "microchip,calib-sensitivity",
+ priv->calib_sensitivities,
+ priv->model->num_channels)) {
+ if (priv->id == CAP1293 || priv->id == CAP1298) {
+ for (i = 0; i < priv->model->num_channels; i++) {
+ if (!is_power_of_2(priv->calib_sensitivities[i]) ||
+ priv->calib_sensitivities[i] > 4) {
+ dev_err(dev, "Invalid calib-sensitivity value %u\n",
+ priv->calib_sensitivities[i]);
+ return -EINVAL;
+ }
+ priv->calib_sensitivities[i] = ilog2(priv->calib_sensitivities[i]);
+ }
+
+ error = cap11xx_write_calib_sens_config_1(priv);
+ if (error)
+ return error;
+
+ if (priv->id == CAP1298) {
+ error = cap11xx_write_calib_sens_config_2(priv);
+ if (error)
+ return error;
+ }
+ } else {
+ dev_warn(dev,
+ "This model doesn't support 'calib-sensitivity'\n");
+ }
+ }
+
+ for (i = 0; i < priv->model->num_channels; i++) {
+ if (!of_property_read_u32_index(node, "microchip,signal-guard",
+ i, &u32_val)) {
+ if (u32_val > 1)
+ return -EINVAL;
+ if (u32_val)
+ priv->signal_guard_inputs_mask |= 0x01 << i;
+ }
+ }
+
+ if (priv->signal_guard_inputs_mask) {
+ if (priv->id == CAP1293 || priv->id == CAP1298) {
+ error = regmap_write(priv->regmap,
+ CAP11XX_REG_SIGNAL_GUARD_ENABLE,
+ priv->signal_guard_inputs_mask);
+ if (error)
+ return error;
+ } else {
+ dev_warn(dev,
+ "This model doesn't support 'signal-guard'\n");
+ }
+ }
+
+ /* Provide some useful defaults */
+ for (i = 0; i < priv->model->num_channels; i++)
+ priv->keycodes[i] = KEY_A + i;
+
+ of_property_read_u32_array(node, "linux,keycodes",
+ priv->keycodes, priv->model->num_channels);
+
+ /* Disable autorepeat. The Linux input system has its own handling. */
+ error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0);
+ if (error)
+ return error;
+
+ return 0;
+}
+
static irqreturn_t cap11xx_thread_func(int irq_num, void *data)
{
struct cap11xx_priv *priv = data;
@@ -332,11 +511,9 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
const struct i2c_device_id *id = i2c_client_get_device_id(i2c_client);
struct device *dev = &i2c_client->dev;
struct cap11xx_priv *priv;
- struct device_node *node;
const struct cap11xx_hw_model *cap;
- int i, error, irq, gain = 0;
+ int i, error;
unsigned int val, rev;
- u32 gain32;
if (id->driver_data >= ARRAY_SIZE(cap11xx_devices)) {
dev_err(dev, "Invalid device ID %lu\n", id->driver_data);
@@ -355,6 +532,8 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
if (!priv)
return -ENOMEM;
+ priv->dev = dev;
+
priv->regmap = devm_regmap_init_i2c(i2c_client, &cap11xx_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
@@ -384,50 +563,15 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
return error;
dev_info(dev, "CAP11XX detected, model %s, revision 0x%02x\n",
- id->name, rev);
- node = dev->of_node;
-
- if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
- if (cap->no_gain)
- dev_warn(dev,
- "This version doesn't support sensor gain\n");
- else if (is_power_of_2(gain32) && gain32 <= 8)
- gain = ilog2(gain32);
- else
- dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
- }
+ id->name, rev);
- if (id->driver_data == CAP1106 ||
- id->driver_data == CAP1126 ||
- id->driver_data == CAP1188) {
- if (of_property_read_bool(node, "microchip,irq-active-high")) {
- error = regmap_update_bits(priv->regmap,
- CAP11XX_REG_CONFIG2,
- CAP11XX_REG_CONFIG2_ALT_POL,
- 0);
- if (error)
- return error;
- }
- }
-
- /* Provide some useful defaults */
- for (i = 0; i < cap->num_channels; i++)
- priv->keycodes[i] = KEY_A + i;
-
- of_property_read_u32_array(node, "linux,keycodes",
- priv->keycodes, cap->num_channels);
+ priv->model = cap;
+ priv->id = id->driver_data;
- if (!cap->no_gain) {
- error = regmap_update_bits(priv->regmap,
- CAP11XX_REG_MAIN_CONTROL,
- CAP11XX_REG_MAIN_CONTROL_GAIN_MASK,
- gain << CAP11XX_REG_MAIN_CONTROL_GAIN_SHIFT);
- if (error)
- return error;
- }
+ dev_info(dev, "CAP11XX device detected, model %s, revision 0x%02x\n",
+ id->name, rev);
- /* Disable autorepeat. The Linux input system has its own handling. */
- error = regmap_write(priv->regmap, CAP11XX_REG_REPEAT_RATE, 0);
+ error = cap11xx_init_keys(priv);
if (error)
return error;
@@ -439,7 +583,7 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
priv->idev->id.bustype = BUS_I2C;
priv->idev->evbit[0] = BIT_MASK(EV_KEY);
- if (of_property_read_bool(node, "autorepeat"))
+ if (of_property_read_bool(dev->of_node, "autorepeat"))
__set_bit(EV_REP, priv->idev->evbit);
for (i = 0; i < cap->num_channels; i++)
@@ -474,13 +618,8 @@ static int cap11xx_i2c_probe(struct i2c_client *i2c_client)
if (error)
return error;
- irq = irq_of_parse_and_map(node, 0);
- if (!irq) {
- dev_err(dev, "Unable to parse or map IRQ\n");
- return -ENXIO;
- }
-
- error = devm_request_threaded_irq(dev, irq, NULL, cap11xx_thread_func,
+ error = devm_request_threaded_irq(dev, i2c_client->irq,
+ NULL, cap11xx_thread_func,
IRQF_ONESHOT, dev_name(dev), priv);
if (error)
return error;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 2e7c2c046..9f3bcd41c 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -45,7 +45,9 @@ struct gpio_button_data {
unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
unsigned int irq;
+ unsigned int wakeirq;
unsigned int wakeup_trigger_type;
+
spinlock_t lock;
bool disabled;
bool key_pressed;
@@ -511,6 +513,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
struct gpio_button_data *bdata = &ddata->data[idx];
irq_handler_t isr;
unsigned long irqflags;
+ const char *wakedesc;
int irq;
int error;
@@ -575,15 +578,23 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
!gpiod_cansleep(bdata->gpiod);
}
+ /*
+ * If an interrupt was specified, use it instead of the gpio
+ * interrupt and use the gpio for reading the state. A separate
+ * interrupt may be used as the main button interrupt for
+ * runtime PM to detect events also in deeper idle states. If a
+ * dedicated wakeirq is used for system suspend only, see below
+ * for bdata->wakeirq setup.
+ */
if (button->irq) {
bdata->irq = button->irq;
} else {
irq = gpiod_to_irq(bdata->gpiod);
if (irq < 0) {
error = irq;
- dev_err(dev,
- "Unable to get irq number for GPIO %d, error %d\n",
- button->gpio, error);
+ dev_err_probe(dev, error,
+ "Unable to get irq number for GPIO %d\n",
+ button->gpio);
return error;
}
bdata->irq = irq;
@@ -672,6 +683,36 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
return error;
}
+ if (!button->wakeirq)
+ return 0;
+
+ /* Use :wakeup suffix like drivers/base/power/wakeirq.c does */
+ wakedesc = devm_kasprintf(dev, GFP_KERNEL, "%s:wakeup", desc);
+ if (!wakedesc)
+ return -ENOMEM;
+
+ bdata->wakeirq = button->wakeirq;
+ irqflags |= IRQF_NO_SUSPEND;
+
+ /*
+ * Wakeirq shares the handler with the main interrupt, it's only
+ * active during system suspend. See gpio_keys_button_enable_wakeup()
+ * and gpio_keys_button_disable_wakeup().
+ */
+ error = devm_request_any_context_irq(dev, bdata->wakeirq, isr,
+ irqflags, wakedesc, bdata);
+ if (error < 0) {
+ dev_err(dev, "Unable to claim wakeirq %d; error %d\n",
+ bdata->irq, error);
+ return error;
+ }
+
+ /*
+ * Disable wakeirq until suspend. IRQF_NO_AUTOEN won't work if
+ * IRQF_SHARED was set based on !button->can_disable.
+ */
+ disable_irq(bdata->wakeirq);
+
return 0;
}
@@ -728,7 +769,7 @@ gpio_keys_get_devtree_pdata(struct device *dev)
struct gpio_keys_platform_data *pdata;
struct gpio_keys_button *button;
struct fwnode_handle *child;
- int nbuttons;
+ int nbuttons, irq;
nbuttons = device_get_child_node_count(dev);
if (nbuttons == 0)
@@ -750,9 +791,19 @@ gpio_keys_get_devtree_pdata(struct device *dev)
device_property_read_string(dev, "label", &pdata->name);
device_for_each_child_node(dev, child) {
- if (is_of_node(child))
- button->irq =
- irq_of_parse_and_map(to_of_node(child), 0);
+ if (is_of_node(child)) {
+ irq = of_irq_get_byname(to_of_node(child), "irq");
+ if (irq > 0)
+ button->irq = irq;
+
+ irq = of_irq_get_byname(to_of_node(child), "wakeup");
+ if (irq > 0)
+ button->wakeirq = irq;
+
+ if (!button->irq && !button->wakeirq)
+ button->irq =
+ irq_of_parse_and_map(to_of_node(child), 0);
+ }
if (fwnode_property_read_u32(child, "linux,code",
&button->code)) {
@@ -921,6 +972,11 @@ gpio_keys_button_enable_wakeup(struct gpio_button_data *bdata)
}
}
+ if (bdata->wakeirq) {
+ enable_irq(bdata->wakeirq);
+ disable_irq(bdata->irq);
+ }
+
return 0;
}
@@ -929,6 +985,11 @@ gpio_keys_button_disable_wakeup(struct gpio_button_data *bdata)
{
int error;
+ if (bdata->wakeirq) {
+ enable_irq(bdata->irq);
+ disable_irq(bdata->wakeirq);
+ }
+
/*
* The trigger type is always both edges for gpio-based keys and we do
* not support changing wakeup trigger for interrupt-based keys.
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 454fb8675..16f936db7 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -21,7 +21,6 @@
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/platform_data/keypad-omap.h>
#include <linux/soc/ti/omap1-io.h>
@@ -49,9 +48,6 @@ struct omap_kp {
static DECLARE_TASKLET_DISABLED_OLD(kp_tasklet, omap_kp_tasklet);
-static unsigned int *row_gpios;
-static unsigned int *col_gpios;
-
static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
{
/* disable keyboard interrupt and schedule for handling */
@@ -180,7 +176,7 @@ static int omap_kp_probe(struct platform_device *pdev)
struct omap_kp *omap_kp;
struct input_dev *input_dev;
struct omap_kp_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int i, col_idx, row_idx, ret;
+ int ret;
unsigned int row_shift, keycodemax;
if (!pdata->rows || !pdata->cols || !pdata->keymap_data) {
@@ -209,17 +205,9 @@ static int omap_kp_probe(struct platform_device *pdev)
if (pdata->delay)
omap_kp->delay = pdata->delay;
- if (pdata->row_gpios && pdata->col_gpios) {
- row_gpios = pdata->row_gpios;
- col_gpios = pdata->col_gpios;
- }
-
omap_kp->rows = pdata->rows;
omap_kp->cols = pdata->cols;
- col_idx = 0;
- row_idx = 0;
-
timer_setup(&omap_kp->timer, omap_kp_timer, 0);
/* get the irq and init timer*/
@@ -276,11 +264,6 @@ err4:
err3:
device_remove_file(&pdev->dev, &dev_attr_enable);
err2:
- for (i = row_idx - 1; i >= 0; i--)
- gpio_free(row_gpios[i]);
- for (i = col_idx - 1; i >= 0; i--)
- gpio_free(col_gpios[i]);
-
kfree(omap_kp);
input_free_device(input_dev);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index d3f8688fd..040b34099 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -83,6 +84,7 @@ struct omap4_keypad {
bool no_autorepeat;
u64 keys;
unsigned short *keymap;
+ struct clk *fck;
};
static int kbd_readl(struct omap4_keypad *keypad_data, u32 offset)
@@ -209,6 +211,10 @@ static int omap4_keypad_open(struct input_dev *input)
if (error)
return error;
+ error = clk_prepare_enable(keypad_data->fck);
+ if (error)
+ goto out;
+
disable_irq(keypad_data->irq);
kbd_writel(keypad_data, OMAP4_KBD_CTRL,
@@ -226,10 +232,11 @@ static int omap4_keypad_open(struct input_dev *input)
enable_irq(keypad_data->irq);
+out:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
- return 0;
+ return error;
}
static void omap4_keypad_stop(struct omap4_keypad *keypad_data)
@@ -258,6 +265,7 @@ static void omap4_keypad_close(struct input_dev *input)
disable_irq(keypad_data->irq);
omap4_keypad_stop(keypad_data);
enable_irq(keypad_data->irq);
+ clk_disable_unprepare(keypad_data->fck);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -356,6 +364,11 @@ static int omap4_keypad_probe(struct platform_device *pdev)
}
keypad_data->irq = irq;
+ keypad_data->fck = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(keypad_data->fck))
+ return dev_err_probe(&pdev->dev, PTR_ERR(keypad_data->fck),
+ "unable to get fck");
+
mutex_init(&keypad_data->lock);
platform_set_drvdata(pdev, keypad_data);
diff --git a/drivers/input/keyboard/qt1050.c b/drivers/input/keyboard/qt1050.c
index 6953097db..b51dfcd76 100644
--- a/drivers/input/keyboard/qt1050.c
+++ b/drivers/input/keyboard/qt1050.c
@@ -213,7 +213,7 @@ static struct regmap_config qt1050_regmap_config = {
.val_bits = 8,
.max_register = QT1050_RES_CAL,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.wr_table = &qt1050_writeable_table,
.rd_table = &qt1050_readable_table,
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 8af59ced1..677bc4baa 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/tca6416_keypad.h>
diff --git a/drivers/input/misc/da7280.c b/drivers/input/misc/da7280.c
index ce8254891..c1fa75c0f 100644
--- a/drivers/input/misc/da7280.c
+++ b/drivers/input/misc/da7280.c
@@ -352,7 +352,7 @@ static int da7280_haptic_set_pwm(struct da7280_haptic *haptics, bool enabled)
state.duty_cycle = period_mag_multi;
}
- error = pwm_apply_state(haptics->pwm_dev, &state);
+ error = pwm_apply_might_sleep(haptics->pwm_dev, &state);
if (error)
dev_err(haptics->dev, "Failed to apply pwm state: %d\n", error);
@@ -1175,7 +1175,7 @@ static int da7280_probe(struct i2c_client *client)
/* Sync up PWM state and ensure it is off. */
pwm_init_state(haptics->pwm_dev, &state);
state.enabled = false;
- error = pwm_apply_state(haptics->pwm_dev, &state);
+ error = pwm_apply_might_sleep(haptics->pwm_dev, &state);
if (error) {
dev_err(dev, "Failed to apply PWM state: %d\n", error);
return error;
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index 74808bae3..c338765e0 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -9,11 +9,12 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
+#include <linux/property.h>
#include <linux/workqueue.h>
#include <linux/regmap.h>
-#include <linux/of.h>
#include <linux/mfd/da9063/core.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/mfd/da9062/core.h>
@@ -74,13 +75,6 @@ static const struct da906x_chip_config da9062_regs = {
.name = "da9062-onkey",
};
-static const struct of_device_id da9063_compatible_reg_id_table[] = {
- { .compatible = "dlg,da9063-onkey", .data = &da9063_regs },
- { .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
- { },
-};
-MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
-
static void da9063_poll_on(struct work_struct *work)
{
struct da9063_onkey *onkey = container_of(work,
@@ -187,56 +181,43 @@ static irqreturn_t da9063_onkey_irq_handler(int irq, void *data)
static int da9063_onkey_probe(struct platform_device *pdev)
{
struct da9063_onkey *onkey;
- const struct of_device_id *match;
- int irq;
int error;
-
- match = of_match_node(da9063_compatible_reg_id_table,
- pdev->dev.of_node);
- if (!match)
- return -ENXIO;
+ int irq;
onkey = devm_kzalloc(&pdev->dev, sizeof(struct da9063_onkey),
GFP_KERNEL);
- if (!onkey) {
- dev_err(&pdev->dev, "Failed to allocate memory.\n");
+ if (!onkey)
return -ENOMEM;
- }
- onkey->config = match->data;
+ onkey->config = device_get_match_data(&pdev->dev);
+ if (!onkey->config)
+ return -ENXIO;
+
onkey->dev = &pdev->dev;
onkey->regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!onkey->regmap) {
- dev_err(&pdev->dev, "Parent regmap unavailable.\n");
- return -ENXIO;
- }
+ if (!onkey->regmap)
+ return dev_err_probe(&pdev->dev, -ENXIO,
+ "Parent regmap unavailable.\n");
- onkey->key_power = !of_property_read_bool(pdev->dev.of_node,
- "dlg,disable-key-power");
+ onkey->key_power = !device_property_read_bool(&pdev->dev,
+ "dlg,disable-key-power");
onkey->input = devm_input_allocate_device(&pdev->dev);
- if (!onkey->input) {
- dev_err(&pdev->dev, "Failed to allocated input device.\n");
+ if (!onkey->input)
return -ENOMEM;
- }
onkey->input->name = onkey->config->name;
snprintf(onkey->phys, sizeof(onkey->phys), "%s/input0",
onkey->config->name);
onkey->input->phys = onkey->phys;
- onkey->input->dev.parent = &pdev->dev;
input_set_capability(onkey->input, EV_KEY, KEY_POWER);
error = devm_delayed_work_autocancel(&pdev->dev, &onkey->work,
da9063_poll_on);
- if (error) {
- dev_err(&pdev->dev,
- "Failed to add cancel poll action: %d\n",
- error);
+ if (error)
return error;
- }
irq = platform_get_irq_byname(pdev, "ONKEY");
if (irq < 0)
@@ -246,11 +227,9 @@ static int da9063_onkey_probe(struct platform_device *pdev)
NULL, da9063_onkey_irq_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"ONKEY", onkey);
- if (error) {
- dev_err(&pdev->dev,
- "Failed to request IRQ %d: %d\n", irq, error);
- return error;
- }
+ if (error)
+ return dev_err_probe(&pdev->dev, error,
+ "Failed to allocate onkey IRQ\n");
error = dev_pm_set_wake_irq(&pdev->dev, irq);
if (error)
@@ -261,15 +240,19 @@ static int da9063_onkey_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, true);
error = input_register_device(onkey->input);
- if (error) {
- dev_err(&pdev->dev,
- "Failed to register input device: %d\n", error);
+ if (error)
return error;
- }
return 0;
}
+static const struct of_device_id da9063_compatible_reg_id_table[] = {
+ { .compatible = "dlg,da9063-onkey", .data = &da9063_regs },
+ { .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
+
static struct platform_driver da9063_onkey_driver = {
.probe = da9063_onkey_probe,
.driver = {
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index b2f1292e2..6e8cc28de 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1050,7 +1050,7 @@ static ssize_t ims_pcu_attribute_show(struct device *dev,
container_of(dattr, struct ims_pcu_attribute, dattr);
char *field = (char *)pcu + attr->field_offset;
- return scnprintf(buf, PAGE_SIZE, "%.*s\n", attr->field_length, field);
+ return sysfs_emit(buf, "%.*s\n", attr->field_length, field);
}
static ssize_t ims_pcu_attribute_store(struct device *dev,
@@ -1206,7 +1206,7 @@ ims_pcu_update_firmware_status_show(struct device *dev,
struct usb_interface *intf = to_usb_interface(dev);
struct ims_pcu *pcu = usb_get_intfdata(intf);
- return scnprintf(buf, PAGE_SIZE, "%d\n", pcu->update_firmware_status);
+ return sysfs_emit(buf, "%d\n", pcu->update_firmware_status);
}
static DEVICE_ATTR(update_firmware_status, S_IRUGO,
@@ -1309,7 +1309,7 @@ static ssize_t ims_pcu_ofn_reg_data_show(struct device *dev,
if (error)
return error;
- return scnprintf(buf, PAGE_SIZE, "%x\n", data);
+ return sysfs_emit(buf, "%x\n", data);
}
static ssize_t ims_pcu_ofn_reg_data_store(struct device *dev,
@@ -1344,7 +1344,7 @@ static ssize_t ims_pcu_ofn_reg_addr_show(struct device *dev,
int error;
mutex_lock(&pcu->cmd_mutex);
- error = scnprintf(buf, PAGE_SIZE, "%x\n", pcu->ofn_reg_addr);
+ error = sysfs_emit(buf, "%x\n", pcu->ofn_reg_addr);
mutex_unlock(&pcu->cmd_mutex);
return error;
@@ -1397,7 +1397,7 @@ static ssize_t ims_pcu_ofn_bit_show(struct device *dev,
if (error)
return error;
- return scnprintf(buf, PAGE_SIZE, "%d\n", !!(data & (1 << attr->nr)));
+ return sysfs_emit(buf, "%d\n", !!(data & (1 << attr->nr)));
}
static ssize_t ims_pcu_ofn_bit_store(struct device *dev,
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
index 3c636c75e..cd14ff9f5 100644
--- a/drivers/input/misc/iqs269a.c
+++ b/drivers/input/misc/iqs269a.c
@@ -9,6 +9,7 @@
* axial sliders presented by the device.
*/
+#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -26,6 +27,8 @@
#define IQS269_VER_INFO 0x00
#define IQS269_VER_INFO_PROD_NUM 0x4F
+#define IQS269_VER_INFO_FW_NUM_2 0x03
+#define IQS269_VER_INFO_FW_NUM_3 0x10
#define IQS269_SYS_FLAGS 0x02
#define IQS269_SYS_FLAGS_SHOW_RESET BIT(15)
@@ -53,6 +56,7 @@
#define IQS269_SYS_SETTINGS_ULP_UPDATE_MASK GENMASK(10, 8)
#define IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT 8
#define IQS269_SYS_SETTINGS_ULP_UPDATE_MAX 7
+#define IQS269_SYS_SETTINGS_SLIDER_SWIPE BIT(7)
#define IQS269_SYS_SETTINGS_RESEED_OFFSET BIT(6)
#define IQS269_SYS_SETTINGS_EVENT_MODE BIT(5)
#define IQS269_SYS_SETTINGS_EVENT_MODE_LP BIT(4)
@@ -69,6 +73,7 @@
#define IQS269_FILT_STR_MAX 3
#define IQS269_EVENT_MASK_SYS BIT(6)
+#define IQS269_EVENT_MASK_GESTURE BIT(3)
#define IQS269_EVENT_MASK_DEEP BIT(2)
#define IQS269_EVENT_MASK_TOUCH BIT(1)
#define IQS269_EVENT_MASK_PROX BIT(0)
@@ -97,6 +102,15 @@
#define IQS269_MISC_B_TRACKING_UI_ENABLE BIT(4)
#define IQS269_MISC_B_FILT_STR_SLIDER GENMASK(1, 0)
+#define IQS269_TOUCH_HOLD_SLIDER_SEL 0x89
+#define IQS269_TOUCH_HOLD_DEFAULT 0x14
+#define IQS269_TOUCH_HOLD_MS_MIN 256
+#define IQS269_TOUCH_HOLD_MS_MAX 65280
+
+#define IQS269_TIMEOUT_TAP_MS_MAX 4080
+#define IQS269_TIMEOUT_SWIPE_MS_MAX 4080
+#define IQS269_THRESH_SWIPE_MAX 255
+
#define IQS269_CHx_ENG_A_MEAS_CAP_SIZE BIT(15)
#define IQS269_CHx_ENG_A_RX_GND_INACTIVE BIT(13)
#define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE BIT(12)
@@ -142,6 +156,10 @@
#define IQS269_MAX_REG 0xFF
+#define IQS269_OTP_OPTION_DEFAULT 0x00
+#define IQS269_OTP_OPTION_TWS 0xD0
+#define IQS269_OTP_OPTION_HOLD BIT(7)
+
#define IQS269_NUM_CH 8
#define IQS269_NUM_SL 2
@@ -175,6 +193,20 @@ enum iqs269_event_id {
IQS269_EVENT_DEEP_UP,
};
+enum iqs269_slider_id {
+ IQS269_SLIDER_NONE,
+ IQS269_SLIDER_KEY,
+ IQS269_SLIDER_RAW,
+};
+
+enum iqs269_gesture_id {
+ IQS269_GESTURE_TAP,
+ IQS269_GESTURE_HOLD,
+ IQS269_GESTURE_FLICK_POS,
+ IQS269_GESTURE_FLICK_NEG,
+ IQS269_NUM_GESTURES,
+};
+
struct iqs269_switch_desc {
unsigned int code;
bool enabled;
@@ -234,7 +266,7 @@ struct iqs269_ver_info {
u8 prod_num;
u8 sw_num;
u8 hw_num;
- u8 padding;
+ u8 fw_num;
} __packed;
struct iqs269_ch_reg {
@@ -285,16 +317,42 @@ struct iqs269_private {
struct regmap *regmap;
struct mutex lock;
struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)];
+ struct iqs269_ver_info ver_info;
struct iqs269_sys_reg sys_reg;
struct completion ati_done;
struct input_dev *keypad;
struct input_dev *slider[IQS269_NUM_SL];
unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH];
+ unsigned int sl_code[IQS269_NUM_SL][IQS269_NUM_GESTURES];
+ unsigned int otp_option;
unsigned int ch_num;
bool hall_enable;
bool ati_current;
};
+static enum iqs269_slider_id iqs269_slider_type(struct iqs269_private *iqs269,
+ int slider_num)
+{
+ int i;
+
+ /*
+ * Slider 1 is unavailable if the touch-and-hold option is enabled via
+ * OTP. In that case, the channel selection register is repurposed for
+ * the touch-and-hold timer ceiling.
+ */
+ if (slider_num && (iqs269->otp_option & IQS269_OTP_OPTION_HOLD))
+ return IQS269_SLIDER_NONE;
+
+ if (!iqs269->sys_reg.slider_select[slider_num])
+ return IQS269_SLIDER_NONE;
+
+ for (i = 0; i < IQS269_NUM_GESTURES; i++)
+ if (iqs269->sl_code[slider_num][i] != KEY_RESERVED)
+ return IQS269_SLIDER_KEY;
+
+ return IQS269_SLIDER_RAW;
+}
+
static int iqs269_ati_mode_set(struct iqs269_private *iqs269,
unsigned int ch_num, unsigned int mode)
{
@@ -525,7 +583,8 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269,
if (fwnode_property_present(ch_node, "azoteq,slider0-select"))
iqs269->sys_reg.slider_select[0] |= BIT(reg);
- if (fwnode_property_present(ch_node, "azoteq,slider1-select"))
+ if (fwnode_property_present(ch_node, "azoteq,slider1-select") &&
+ !(iqs269->otp_option & IQS269_OTP_OPTION_HOLD))
iqs269->sys_reg.slider_select[1] |= BIT(reg);
ch_reg = &iqs269->sys_reg.ch_reg[reg];
@@ -950,7 +1009,43 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
sys_reg->blocking = 0;
sys_reg->slider_select[0] = 0;
- sys_reg->slider_select[1] = 0;
+
+ /*
+ * If configured via OTP to do so, the device asserts a pulse on the
+ * GPIO4 pin for approximately 60 ms once a selected channel is held
+ * in a state of touch for a configurable length of time.
+ *
+ * In that case, the register used for slider 1 channel selection is
+ * repurposed for the touch-and-hold timer ceiling.
+ */
+ if (iqs269->otp_option & IQS269_OTP_OPTION_HOLD) {
+ if (!device_property_read_u32(&client->dev,
+ "azoteq,touch-hold-ms", &val)) {
+ if (val < IQS269_TOUCH_HOLD_MS_MIN ||
+ val > IQS269_TOUCH_HOLD_MS_MAX) {
+ dev_err(&client->dev,
+ "Invalid touch-and-hold ceiling: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->slider_select[1] = val / 256;
+ } else if (iqs269->ver_info.fw_num < IQS269_VER_INFO_FW_NUM_3) {
+ /*
+ * The default touch-and-hold timer ceiling initially
+ * read from early revisions of silicon is invalid if
+ * the device experienced a soft reset between power-
+ * on and the read operation.
+ *
+ * To protect against this case, explicitly cache the
+ * default value so that it is restored each time the
+ * device is re-initialized.
+ */
+ sys_reg->slider_select[1] = IQS269_TOUCH_HOLD_DEFAULT;
+ }
+ } else {
+ sys_reg->slider_select[1] = 0;
+ }
sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS);
@@ -1004,6 +1099,76 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
general |= (val << IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT);
}
+ if (device_property_present(&client->dev, "linux,keycodes")) {
+ int scale = 1;
+ int count = device_property_count_u32(&client->dev,
+ "linux,keycodes");
+ if (count > IQS269_NUM_GESTURES * IQS269_NUM_SL) {
+ dev_err(&client->dev, "Too many keycodes present\n");
+ return -EINVAL;
+ } else if (count < 0) {
+ dev_err(&client->dev, "Failed to count keycodes: %d\n",
+ count);
+ return count;
+ }
+
+ error = device_property_read_u32_array(&client->dev,
+ "linux,keycodes",
+ *iqs269->sl_code, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read keycodes: %d\n",
+ error);
+ return error;
+ }
+
+ if (device_property_present(&client->dev,
+ "azoteq,gesture-swipe"))
+ general |= IQS269_SYS_SETTINGS_SLIDER_SWIPE;
+
+ /*
+ * Early revisions of silicon use a more granular step size for
+ * tap and swipe gesture timeouts; scale them appropriately.
+ */
+ if (iqs269->ver_info.fw_num < IQS269_VER_INFO_FW_NUM_3)
+ scale = 4;
+
+ if (!device_property_read_u32(&client->dev,
+ "azoteq,timeout-tap-ms", &val)) {
+ if (val > IQS269_TIMEOUT_TAP_MS_MAX / scale) {
+ dev_err(&client->dev, "Invalid timeout: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_tap = val / (16 / scale);
+ }
+
+ if (!device_property_read_u32(&client->dev,
+ "azoteq,timeout-swipe-ms",
+ &val)) {
+ if (val > IQS269_TIMEOUT_SWIPE_MS_MAX / scale) {
+ dev_err(&client->dev, "Invalid timeout: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_swipe = val / (16 / scale);
+ }
+
+ if (!device_property_read_u32(&client->dev,
+ "azoteq,thresh-swipe", &val)) {
+ if (val > IQS269_THRESH_SWIPE_MAX) {
+ dev_err(&client->dev, "Invalid threshold: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->thresh_swipe = val;
+ }
+
+ sys_reg->event_mask &= ~IQS269_EVENT_MASK_GESTURE;
+ }
+
general &= ~IQS269_SYS_SETTINGS_RESEED_OFFSET;
if (device_property_present(&client->dev, "azoteq,reseed-offset"))
general |= IQS269_SYS_SETTINGS_RESEED_OFFSET;
@@ -1012,10 +1177,11 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
/*
* As per the datasheet, enable streaming during normal-power mode if
- * either slider is in use. In that case, the device returns to event
- * mode during low-power mode.
+ * raw coordinates will be read from either slider. In that case, the
+ * device returns to event mode during low-power mode.
*/
- if (sys_reg->slider_select[0] || sys_reg->slider_select[1])
+ if (iqs269_slider_type(iqs269, 0) == IQS269_SLIDER_RAW ||
+ iqs269_slider_type(iqs269, 1) == IQS269_SLIDER_RAW)
general |= IQS269_SYS_SETTINGS_EVENT_MODE_LP;
general |= IQS269_SYS_SETTINGS_REDO_ATI;
@@ -1026,12 +1192,30 @@ static int iqs269_parse_prop(struct iqs269_private *iqs269)
return 0;
}
+static const struct reg_sequence iqs269_tws_init[] = {
+ { IQS269_TOUCH_HOLD_SLIDER_SEL, IQS269_TOUCH_HOLD_DEFAULT },
+ { 0xF0, 0x580F },
+ { 0xF0, 0x59EF },
+};
+
static int iqs269_dev_init(struct iqs269_private *iqs269)
{
int error;
mutex_lock(&iqs269->lock);
+ /*
+ * Early revisions of silicon require the following workaround in order
+ * to restore any OTP-enabled functionality after a soft reset.
+ */
+ if (iqs269->otp_option == IQS269_OTP_OPTION_TWS &&
+ iqs269->ver_info.fw_num < IQS269_VER_INFO_FW_NUM_3) {
+ error = regmap_multi_reg_write(iqs269->regmap, iqs269_tws_init,
+ ARRAY_SIZE(iqs269_tws_init));
+ if (error)
+ goto err_mutex;
+ }
+
error = regmap_update_bits(iqs269->regmap, IQS269_HALL_UI,
IQS269_HALL_UI_ENABLE,
iqs269->hall_enable ? ~0 : 0);
@@ -1106,19 +1290,37 @@ static int iqs269_input_init(struct iqs269_private *iqs269)
}
for (i = 0; i < IQS269_NUM_SL; i++) {
- if (!iqs269->sys_reg.slider_select[i])
+ if (iqs269_slider_type(iqs269, i) == IQS269_SLIDER_NONE)
continue;
iqs269->slider[i] = devm_input_allocate_device(&client->dev);
if (!iqs269->slider[i])
return -ENOMEM;
+ iqs269->slider[i]->keycodemax = ARRAY_SIZE(iqs269->sl_code[i]);
+ iqs269->slider[i]->keycode = iqs269->sl_code[i];
+ iqs269->slider[i]->keycodesize = sizeof(**iqs269->sl_code);
+
iqs269->slider[i]->name = i ? "iqs269a_slider_1"
: "iqs269a_slider_0";
iqs269->slider[i]->id.bustype = BUS_I2C;
- input_set_capability(iqs269->slider[i], EV_KEY, BTN_TOUCH);
- input_set_abs_params(iqs269->slider[i], ABS_X, 0, 255, 0, 0);
+ for (j = 0; j < IQS269_NUM_GESTURES; j++)
+ if (iqs269->sl_code[i][j] != KEY_RESERVED)
+ input_set_capability(iqs269->slider[i], EV_KEY,
+ iqs269->sl_code[i][j]);
+
+ /*
+ * Present the slider as a narrow trackpad if one or more chan-
+ * nels have been selected to participate, but no gestures have
+ * been mapped to a keycode.
+ */
+ if (iqs269_slider_type(iqs269, i) == IQS269_SLIDER_RAW) {
+ input_set_capability(iqs269->slider[i],
+ EV_KEY, BTN_TOUCH);
+ input_set_abs_params(iqs269->slider[i],
+ ABS_X, 0, 255, 0, 0);
+ }
error = input_register_device(iqs269->slider[i]);
if (error) {
@@ -1167,28 +1369,62 @@ static int iqs269_report(struct iqs269_private *iqs269)
if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_IN_ATI)
return 0;
- error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x,
- sizeof(slider_x));
- if (error) {
- dev_err(&client->dev, "Failed to read slider position: %d\n",
- error);
- return error;
+ if (iqs269_slider_type(iqs269, 0) == IQS269_SLIDER_RAW ||
+ iqs269_slider_type(iqs269, 1) == IQS269_SLIDER_RAW) {
+ error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X,
+ slider_x, sizeof(slider_x));
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read slider position: %d\n", error);
+ return error;
+ }
}
for (i = 0; i < IQS269_NUM_SL; i++) {
- if (!iqs269->sys_reg.slider_select[i])
+ flags.gesture >>= (i * IQS269_NUM_GESTURES);
+
+ switch (iqs269_slider_type(iqs269, i)) {
+ case IQS269_SLIDER_NONE:
continue;
- /*
- * Report BTN_TOUCH if any channel that participates in the
- * slider is in a state of touch.
- */
- if (flags.states[IQS269_ST_OFFS_TOUCH] &
- iqs269->sys_reg.slider_select[i]) {
- input_report_key(iqs269->slider[i], BTN_TOUCH, 1);
- input_report_abs(iqs269->slider[i], ABS_X, slider_x[i]);
- } else {
- input_report_key(iqs269->slider[i], BTN_TOUCH, 0);
+ case IQS269_SLIDER_KEY:
+ for (j = 0; j < IQS269_NUM_GESTURES; j++)
+ input_report_key(iqs269->slider[i],
+ iqs269->sl_code[i][j],
+ flags.gesture & BIT(j));
+
+ if (!(flags.gesture & (BIT(IQS269_GESTURE_FLICK_NEG) |
+ BIT(IQS269_GESTURE_FLICK_POS) |
+ BIT(IQS269_GESTURE_TAP))))
+ break;
+
+ input_sync(iqs269->slider[i]);
+
+ /*
+ * Momentary gestures are followed by a complementary
+ * release cycle so as to emulate a full keystroke.
+ */
+ for (j = 0; j < IQS269_NUM_GESTURES; j++)
+ if (j != IQS269_GESTURE_HOLD)
+ input_report_key(iqs269->slider[i],
+ iqs269->sl_code[i][j],
+ 0);
+ break;
+
+ case IQS269_SLIDER_RAW:
+ /*
+ * The slider is considered to be in a state of touch
+ * if any selected channels are in a state of touch.
+ */
+ state = flags.states[IQS269_ST_OFFS_TOUCH];
+ state &= iqs269->sys_reg.slider_select[i];
+
+ input_report_key(iqs269->slider[i], BTN_TOUCH, state);
+
+ if (state)
+ input_report_abs(iqs269->slider[i],
+ ABS_X, slider_x[i]);
+ break;
}
input_sync(iqs269->slider[i]);
@@ -1286,7 +1522,7 @@ static ssize_t counts_show(struct device *dev,
if (error)
return error;
- return scnprintf(buf, PAGE_SIZE, "%u\n", le16_to_cpu(counts));
+ return sysfs_emit(buf, "%u\n", le16_to_cpu(counts));
}
static ssize_t hall_bin_show(struct device *dev,
@@ -1324,7 +1560,7 @@ static ssize_t hall_bin_show(struct device *dev,
return -EINVAL;
}
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t hall_enable_show(struct device *dev,
@@ -1332,7 +1568,7 @@ static ssize_t hall_enable_show(struct device *dev,
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->hall_enable);
+ return sysfs_emit(buf, "%u\n", iqs269->hall_enable);
}
static ssize_t hall_enable_store(struct device *dev,
@@ -1362,7 +1598,7 @@ static ssize_t ch_number_show(struct device *dev,
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ch_num);
+ return sysfs_emit(buf, "%u\n", iqs269->ch_num);
}
static ssize_t ch_number_store(struct device *dev,
@@ -1391,8 +1627,7 @@ static ssize_t rx_enable_show(struct device *dev,
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg;
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- ch_reg[iqs269->ch_num].rx_enable);
+ return sysfs_emit(buf, "%u\n", ch_reg[iqs269->ch_num].rx_enable);
}
static ssize_t rx_enable_store(struct device *dev,
@@ -1432,7 +1667,7 @@ static ssize_t ati_mode_show(struct device *dev,
if (error)
return error;
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t ati_mode_store(struct device *dev,
@@ -1465,7 +1700,7 @@ static ssize_t ati_base_show(struct device *dev,
if (error)
return error;
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t ati_base_store(struct device *dev,
@@ -1498,7 +1733,7 @@ static ssize_t ati_target_show(struct device *dev,
if (error)
return error;
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t ati_target_store(struct device *dev,
@@ -1525,9 +1760,9 @@ static ssize_t ati_trigger_show(struct device *dev,
{
struct iqs269_private *iqs269 = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- iqs269->ati_current &&
- completion_done(&iqs269->ati_done));
+ return sysfs_emit(buf, "%u\n",
+ iqs269->ati_current &&
+ completion_done(&iqs269->ati_done));
}
static ssize_t ati_trigger_store(struct device *dev,
@@ -1596,7 +1831,6 @@ static const struct regmap_config iqs269_regmap_config = {
static int iqs269_probe(struct i2c_client *client)
{
- struct iqs269_ver_info ver_info;
struct iqs269_private *iqs269;
int error;
@@ -1618,14 +1852,16 @@ static int iqs269_probe(struct i2c_client *client)
mutex_init(&iqs269->lock);
init_completion(&iqs269->ati_done);
- error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info,
- sizeof(ver_info));
+ iqs269->otp_option = (uintptr_t)device_get_match_data(&client->dev);
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO,
+ &iqs269->ver_info, sizeof(iqs269->ver_info));
if (error)
return error;
- if (ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) {
+ if (iqs269->ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) {
dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
- ver_info.prod_num);
+ iqs269->ver_info.prod_num);
return -EINVAL;
}
@@ -1728,7 +1964,18 @@ static int iqs269_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume);
static const struct of_device_id iqs269_of_match[] = {
- { .compatible = "azoteq,iqs269a" },
+ {
+ .compatible = "azoteq,iqs269a",
+ .data = (void *)IQS269_OTP_OPTION_DEFAULT,
+ },
+ {
+ .compatible = "azoteq,iqs269a-00",
+ .data = (void *)IQS269_OTP_OPTION_DEFAULT,
+ },
+ {
+ .compatible = "azoteq,iqs269a-d0",
+ .data = (void *)IQS269_OTP_OPTION_TWS,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, iqs269_of_match);
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 80f4416ff..0e646f1b2 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -307,7 +307,7 @@ static int max77693_haptic_probe(struct platform_device *pdev)
haptic->suspend_state = false;
/* Variant-specific init */
- haptic->dev_type = platform_get_device_id(pdev)->driver_data;
+ haptic->dev_type = max77693->type;
switch (haptic->dev_type) {
case TYPE_MAX77693:
haptic->regmap_haptic = max77693->regmap_haptic;
@@ -406,16 +406,24 @@ static DEFINE_SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
max77693_haptic_resume);
static const struct platform_device_id max77693_haptic_id[] = {
- { "max77693-haptic", TYPE_MAX77693 },
- { "max77843-haptic", TYPE_MAX77843 },
+ { "max77693-haptic", },
+ { "max77843-haptic", },
{},
};
MODULE_DEVICE_TABLE(platform, max77693_haptic_id);
+static const struct of_device_id of_max77693_haptic_dt_match[] = {
+ { .compatible = "maxim,max77693-haptic", },
+ { .compatible = "maxim,max77843-haptic", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_max77693_haptic_dt_match);
+
static struct platform_driver max77693_haptic_driver = {
.driver = {
.name = "max77693-haptic",
.pm = pm_sleep_ptr(&max77693_haptic_pm_ops),
+ .of_match_table = of_max77693_haptic_dt_match,
},
.probe = max77693_haptic_probe,
.id_table = max77693_haptic_id,
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 1e731d839..5b9aedf43 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -39,7 +39,7 @@ static int pwm_beeper_on(struct pwm_beeper *beeper, unsigned long period)
state.period = period;
pwm_set_relative_duty_cycle(&state, 50, 100);
- error = pwm_apply_state(beeper->pwm, &state);
+ error = pwm_apply_might_sleep(beeper->pwm, &state);
if (error)
return error;
@@ -138,7 +138,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
/* Sync up PWM state and ensure it is off. */
pwm_init_state(beeper->pwm, &state);
state.enabled = false;
- error = pwm_apply_state(beeper->pwm, &state);
+ error = pwm_apply_might_sleep(beeper->pwm, &state);
if (error) {
dev_err(dev, "failed to apply initial PWM state: %d\n",
error);
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index acac79c48..3e5ed685e 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -56,7 +56,7 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
pwm_set_relative_duty_cycle(&state, vibrator->level, 0xffff);
state.enabled = true;
- err = pwm_apply_state(vibrator->pwm, &state);
+ err = pwm_apply_might_sleep(vibrator->pwm, &state);
if (err) {
dev_err(pdev, "failed to apply pwm state: %d\n", err);
return err;
@@ -67,7 +67,7 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
state.duty_cycle = vibrator->direction_duty_cycle;
state.enabled = true;
- err = pwm_apply_state(vibrator->pwm_dir, &state);
+ err = pwm_apply_might_sleep(vibrator->pwm_dir, &state);
if (err) {
dev_err(pdev, "failed to apply dir-pwm state: %d\n", err);
pwm_disable(vibrator->pwm);
@@ -160,7 +160,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
/* Sync up PWM state and ensure it is off. */
pwm_init_state(vibrator->pwm, &state);
state.enabled = false;
- err = pwm_apply_state(vibrator->pwm, &state);
+ err = pwm_apply_might_sleep(vibrator->pwm, &state);
if (err) {
dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
err);
@@ -174,7 +174,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
/* Sync up PWM state and ensure it is off. */
pwm_init_state(vibrator->pwm_dir, &state);
state.enabled = false;
- err = pwm_apply_state(vibrator->pwm_dir, &state);
+ err = pwm_apply_might_sleep(vibrator->pwm_dir, &state);
if (err) {
dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
err);
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index a84098448..5979deabe 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -756,16 +756,16 @@ static ssize_t cyapa_show_suspend_scanrate(struct device *dev,
switch (pwr_cmd) {
case PWR_MODE_BTN_ONLY:
- len = scnprintf(buf, PAGE_SIZE, "%s\n", BTN_ONLY_MODE_NAME);
+ len = sysfs_emit(buf, "%s\n", BTN_ONLY_MODE_NAME);
break;
case PWR_MODE_OFF:
- len = scnprintf(buf, PAGE_SIZE, "%s\n", OFF_MODE_NAME);
+ len = sysfs_emit(buf, "%s\n", OFF_MODE_NAME);
break;
default:
- len = scnprintf(buf, PAGE_SIZE, "%u\n",
- cyapa->gen == CYAPA_GEN3 ?
+ len = sysfs_emit(buf, "%u\n",
+ cyapa->gen == CYAPA_GEN3 ?
cyapa_pwr_cmd_to_sleep_time(pwr_cmd) :
sleep_time);
break;
@@ -877,8 +877,8 @@ static ssize_t cyapa_show_rt_suspend_scanrate(struct device *dev,
mutex_unlock(&cyapa->state_sync_lock);
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- cyapa->gen == CYAPA_GEN3 ?
+ return sysfs_emit(buf, "%u\n",
+ cyapa->gen == CYAPA_GEN3 ?
cyapa_pwr_cmd_to_sleep_time(pwr_cmd) :
sleep_time);
}
@@ -988,8 +988,8 @@ static ssize_t cyapa_show_fm_ver(struct device *dev,
error = mutex_lock_interruptible(&cyapa->state_sync_lock);
if (error)
return error;
- error = scnprintf(buf, PAGE_SIZE, "%d.%d\n", cyapa->fw_maj_ver,
- cyapa->fw_min_ver);
+ error = sysfs_emit(buf, "%d.%d\n",
+ cyapa->fw_maj_ver, cyapa->fw_min_ver);
mutex_unlock(&cyapa->state_sync_lock);
return error;
}
@@ -1004,7 +1004,7 @@ static ssize_t cyapa_show_product_id(struct device *dev,
error = mutex_lock_interruptible(&cyapa->state_sync_lock);
if (error)
return error;
- size = scnprintf(buf, PAGE_SIZE, "%s\n", cyapa->product_id);
+ size = sysfs_emit(buf, "%s\n", cyapa->product_id);
mutex_unlock(&cyapa->state_sync_lock);
return size;
}
@@ -1209,8 +1209,8 @@ static ssize_t cyapa_show_mode(struct device *dev,
if (error)
return error;
- size = scnprintf(buf, PAGE_SIZE, "gen%d %s\n",
- cyapa->gen, cyapa_state_to_string(cyapa));
+ size = sysfs_emit(buf, "gen%d %s\n",
+ cyapa->gen, cyapa_state_to_string(cyapa));
mutex_unlock(&cyapa->state_sync_lock);
return size;
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index a97f4acb6..60c83bc71 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -860,7 +860,7 @@ static ssize_t cyapa_gen3_show_baseline(struct device *dev,
dev_dbg(dev, "Baseline report successful. Max: %d Min: %d\n",
max_baseline, min_baseline);
- ret = scnprintf(buf, PAGE_SIZE, "%d %d\n", max_baseline, min_baseline);
+ ret = sysfs_emit(buf, "%d %d\n", max_baseline, min_baseline);
out:
return ret;
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index abf42f77b..2e6bcb072 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -2418,12 +2418,12 @@ resume_scanning:
return resume_error ? resume_error : error;
/* 12. Output data strings */
- size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d %d %d %d %d %d ",
+ size = sysfs_emit(buf, "%d %d %d %d %d %d %d %d %d %d %d ",
gidac_mutual_min, gidac_mutual_max, gidac_mutual_ave,
lidac_mutual_min, lidac_mutual_max, lidac_mutual_ave,
gidac_self_rx, gidac_self_tx,
lidac_self_min, lidac_self_max, lidac_self_ave);
- size += scnprintf(buf + size, PAGE_SIZE - size,
+ size += sysfs_emit_at(buf, size,
"%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
raw_cap_mutual_min, raw_cap_mutual_max, raw_cap_mutual_ave,
raw_cap_self_min, raw_cap_self_max, raw_cap_self_ave,
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 0caaf3e64..4ffe08fee 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -629,14 +629,14 @@ static ssize_t cyapa_gen6_show_baseline(struct device *dev,
if (error)
goto resume_scanning;
- size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d ",
- data[0], /* RX Attenuator Mutual */
- data[1], /* IDAC Mutual */
- data[2], /* RX Attenuator Self RX */
- data[3], /* IDAC Self RX */
- data[4], /* RX Attenuator Self TX */
- data[5] /* IDAC Self TX */
- );
+ size = sysfs_emit(buf, "%d %d %d %d %d %d ",
+ data[0], /* RX Attenuator Mutual */
+ data[1], /* IDAC Mutual */
+ data[2], /* RX Attenuator Self RX */
+ data[3], /* IDAC Self RX */
+ data[4], /* RX Attenuator Self TX */
+ data[5] /* IDAC Self TX */
+ );
/* 3. Read Attenuator Trim. */
data_len = sizeof(data);
@@ -648,8 +648,8 @@ static ssize_t cyapa_gen6_show_baseline(struct device *dev,
/* set attenuator trim values. */
for (i = 0; i < data_len; i++)
- size += scnprintf(buf + size, PAGE_SIZE - size, "%d ", data[i]);
- size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+ size += sysfs_emit_at(buf, size, "%d ", data[i]);
+ size += sysfs_emit_at(buf, size, "\n");
resume_scanning:
/* 4. Resume Scanning*/
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 148a60139..8a72c200c 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -572,7 +572,7 @@ static ssize_t elan_sysfs_read_fw_checksum(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct elan_tp_data *data = i2c_get_clientdata(client);
- return sprintf(buf, "0x%04x\n", data->fw_checksum);
+ return sysfs_emit(buf, "0x%04x\n", data->fw_checksum);
}
static ssize_t elan_sysfs_read_product_id(struct device *dev,
@@ -582,8 +582,8 @@ static ssize_t elan_sysfs_read_product_id(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct elan_tp_data *data = i2c_get_clientdata(client);
- return sprintf(buf, ETP_PRODUCT_ID_FORMAT_STRING "\n",
- data->product_id);
+ return sysfs_emit(buf, ETP_PRODUCT_ID_FORMAT_STRING "\n",
+ data->product_id);
}
static ssize_t elan_sysfs_read_fw_ver(struct device *dev,
@@ -593,7 +593,7 @@ static ssize_t elan_sysfs_read_fw_ver(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct elan_tp_data *data = i2c_get_clientdata(client);
- return sprintf(buf, "%d.0\n", data->fw_version);
+ return sysfs_emit(buf, "%d.0\n", data->fw_version);
}
static ssize_t elan_sysfs_read_sm_ver(struct device *dev,
@@ -603,7 +603,7 @@ static ssize_t elan_sysfs_read_sm_ver(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct elan_tp_data *data = i2c_get_clientdata(client);
- return sprintf(buf, "%d.0\n", data->sm_version);
+ return sysfs_emit(buf, "%d.0\n", data->sm_version);
}
static ssize_t elan_sysfs_read_iap_ver(struct device *dev,
@@ -613,7 +613,7 @@ static ssize_t elan_sysfs_read_iap_ver(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct elan_tp_data *data = i2c_get_clientdata(client);
- return sprintf(buf, "%d.0\n", data->iap_version);
+ return sysfs_emit(buf, "%d.0\n", data->iap_version);
}
static ssize_t elan_sysfs_update_fw(struct device *dev,
@@ -754,7 +754,7 @@ static ssize_t elan_sysfs_read_mode(struct device *dev,
if (error)
return error;
- return sprintf(buf, "%d\n", (int)mode);
+ return sysfs_emit(buf, "%d\n", (int)mode);
}
static DEVICE_ATTR(product_id, S_IRUGO, elan_sysfs_read_product_id, NULL);
@@ -858,7 +858,7 @@ static ssize_t min_show(struct device *dev,
goto out;
}
- retval = snprintf(buf, PAGE_SIZE, "%d", data->min_baseline);
+ retval = sysfs_emit(buf, "%d", data->min_baseline);
out:
mutex_unlock(&data->sysfs_mutex);
@@ -881,7 +881,7 @@ static ssize_t max_show(struct device *dev,
goto out;
}
- retval = snprintf(buf, PAGE_SIZE, "%d", data->max_baseline);
+ retval = sysfs_emit(buf, "%d", data->max_baseline);
out:
mutex_unlock(&data->sysfs_mutex);
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
index c00dc1275..ba757783c 100644
--- a/drivers/input/mouse/navpoint.c
+++ b/drivers/input/mouse/navpoint.c
@@ -10,7 +10,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/input/navpoint.h>
#include <linux/interrupt.h>
@@ -32,7 +32,7 @@ struct navpoint {
struct ssp_device *ssp;
struct input_dev *input;
struct device *dev;
- int gpio;
+ struct gpio_desc *gpiod;
int index;
u8 data[1 + HEADER_LENGTH(0xff)];
};
@@ -170,16 +170,14 @@ static void navpoint_up(struct navpoint *navpoint)
dev_err(navpoint->dev,
"timeout waiting for SSSR[CSS] to clear\n");
- if (gpio_is_valid(navpoint->gpio))
- gpio_set_value(navpoint->gpio, 1);
+ gpiod_set_value(navpoint->gpiod, 1);
}
static void navpoint_down(struct navpoint *navpoint)
{
struct ssp_device *ssp = navpoint->ssp;
- if (gpio_is_valid(navpoint->gpio))
- gpio_set_value(navpoint->gpio, 0);
+ gpiod_set_value(navpoint->gpiod, 0);
pxa_ssp_write_reg(ssp, SSCR0, 0);
@@ -216,18 +214,9 @@ static int navpoint_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (gpio_is_valid(pdata->gpio)) {
- error = gpio_request_one(pdata->gpio, GPIOF_OUT_INIT_LOW,
- "SYNAPTICS_ON");
- if (error)
- return error;
- }
-
ssp = pxa_ssp_request(pdata->port, pdev->name);
- if (!ssp) {
- error = -ENODEV;
- goto err_free_gpio;
- }
+ if (!ssp)
+ return -ENODEV;
/* HaRET does not disable devices before jumping into Linux */
if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) {
@@ -242,10 +231,18 @@ static int navpoint_probe(struct platform_device *pdev)
goto err_free_mem;
}
+ navpoint->gpiod = gpiod_get_optional(&pdev->dev,
+ NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(navpoint->gpiod)) {
+ error = PTR_ERR(navpoint->gpiod);
+ dev_err(&pdev->dev, "error getting GPIO\n");
+ goto err_free_mem;
+ }
+ gpiod_set_consumer_name(navpoint->gpiod, "SYNAPTICS_ON");
+
navpoint->ssp = ssp;
navpoint->input = input;
navpoint->dev = &pdev->dev;
- navpoint->gpio = pdata->gpio;
input->name = pdev->name;
input->dev.parent = &pdev->dev;
@@ -288,17 +285,12 @@ err_free_mem:
input_free_device(input);
kfree(navpoint);
pxa_ssp_free(ssp);
-err_free_gpio:
- if (gpio_is_valid(pdata->gpio))
- gpio_free(pdata->gpio);
return error;
}
static void navpoint_remove(struct platform_device *pdev)
{
- const struct navpoint_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
struct navpoint *navpoint = platform_get_drvdata(pdev);
struct ssp_device *ssp = navpoint->ssp;
@@ -308,9 +300,6 @@ static void navpoint_remove(struct platform_device *pdev)
kfree(navpoint);
pxa_ssp_free(ssp);
-
- if (gpio_is_valid(pdata->gpio))
- gpio_free(pdata->gpio);
}
static int navpoint_suspend(struct device *dev)
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 258d5fe3d..ef9ea295f 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -978,12 +978,12 @@ static int rmi_driver_remove(struct device *dev)
rmi_disable_irq(rmi_dev, false);
- irq_domain_remove(data->irqdomain);
- data->irqdomain = NULL;
-
rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
+ irq_domain_remove(data->irqdomain);
+ data->irqdomain = NULL;
+
return 0;
}
@@ -1196,7 +1196,11 @@ static int rmi_driver_probe(struct device *dev)
}
rmi_driver_set_input_params(rmi_dev, data->input);
data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
- "%s/input0", dev_name(dev));
+ "%s/input0", dev_name(dev));
+ if (!data->input->phys) {
+ retval = -ENOMEM;
+ goto err;
+ }
}
retval = rmi_init_functions(data);
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index d7603c50f..cc1d4b424 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -267,8 +267,7 @@ static ssize_t rmi_driver_manufacturer_id_show(struct device *dev,
struct rmi_driver_data *data = dev_get_drvdata(dev);
struct f01_data *f01 = dev_get_drvdata(&data->f01_container->dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- f01->properties.manufacturer_id);
+ return sysfs_emit(buf, "%d\n", f01->properties.manufacturer_id);
}
static DEVICE_ATTR(manufacturer_id, 0444,
@@ -280,7 +279,7 @@ static ssize_t rmi_driver_dom_show(struct device *dev,
struct rmi_driver_data *data = dev_get_drvdata(dev);
struct f01_data *f01 = dev_get_drvdata(&data->f01_container->dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", f01->properties.dom);
+ return sysfs_emit(buf, "%s\n", f01->properties.dom);
}
static DEVICE_ATTR(date_of_manufacture, 0444, rmi_driver_dom_show, NULL);
@@ -292,7 +291,7 @@ static ssize_t rmi_driver_product_id_show(struct device *dev,
struct rmi_driver_data *data = dev_get_drvdata(dev);
struct f01_data *f01 = dev_get_drvdata(&data->f01_container->dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", f01->properties.product_id);
+ return sysfs_emit(buf, "%s\n", f01->properties.product_id);
}
static DEVICE_ATTR(product_id, 0444, rmi_driver_product_id_show, NULL);
@@ -304,7 +303,7 @@ static ssize_t rmi_driver_firmware_id_show(struct device *dev,
struct rmi_driver_data *data = dev_get_drvdata(dev);
struct f01_data *f01 = dev_get_drvdata(&data->f01_container->dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", f01->properties.firmware_id);
+ return sysfs_emit(buf, "%d\n", f01->properties.firmware_id);
}
static DEVICE_ATTR(firmware_id, 0444, rmi_driver_firmware_id_show, NULL);
@@ -318,8 +317,8 @@ static ssize_t rmi_driver_package_id_show(struct device *dev,
u32 package_id = f01->properties.package_id;
- return scnprintf(buf, PAGE_SIZE, "%04x.%04x\n",
- package_id & 0xffff, (package_id >> 16) & 0xffff);
+ return sysfs_emit(buf, "%04x.%04x\n",
+ package_id & 0xffff, (package_id >> 16) & 0xffff);
}
static DEVICE_ATTR(package_id, 0444, rmi_driver_package_id_show, NULL);
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 852aeb0b2..07c866f42 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -375,7 +375,7 @@ static int rmi_spi_probe(struct spi_device *spi)
struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
int error;
- if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
return -EINVAL;
rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 20094b989..542a31448 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2546,7 +2546,7 @@ static const struct vb2_queue mxt_queue = {
.ops = &mxt_queue_ops,
.mem_ops = &vb2_vmalloc_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
- .min_buffers_needed = 1,
+ .min_queued_buffers = 1,
};
static int mxt_vidioc_querycap(struct file *file, void *priv,
@@ -2818,8 +2818,8 @@ static ssize_t mxt_fw_version_show(struct device *dev,
{
struct mxt_data *data = dev_get_drvdata(dev);
struct mxt_info *info = data->info;
- return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
- info->version >> 4, info->version & 0xf, info->build);
+ return sysfs_emit(buf, "%u.%u.%02X\n",
+ info->version >> 4, info->version & 0xf, info->build);
}
/* Hardware Version is returned as FamilyID.VariantID */
@@ -2828,8 +2828,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
{
struct mxt_data *data = dev_get_drvdata(dev);
struct mxt_info *info = data->info;
- return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
- info->family_id, info->variant_id);
+ return sysfs_emit(buf, "%u.%u\n", info->family_id, info->variant_id);
}
static ssize_t mxt_show_instance(char *buf, int count,
@@ -2839,19 +2838,18 @@ static ssize_t mxt_show_instance(char *buf, int count,
int i;
if (mxt_obj_instances(object) > 1)
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Instance %u\n", instance);
+ count += sysfs_emit_at(buf, count, "Instance %u\n", instance);
for (i = 0; i < mxt_obj_size(object); i++)
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\t[%2u]: %02x (%d)\n", i, val[i], val[i]);
- count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+ count += sysfs_emit_at(buf, count, "\t[%2u]: %02x (%d)\n",
+ i, val[i], val[i]);
+ count += sysfs_emit_at(buf, count, "\n");
return count;
}
static ssize_t mxt_object_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct mxt_data *data = dev_get_drvdata(dev);
struct mxt_object *object;
@@ -2872,8 +2870,7 @@ static ssize_t mxt_object_show(struct device *dev,
if (!mxt_object_readable(object->type))
continue;
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "T%u:\n", object->type);
+ count += sysfs_emit_at(buf, count, "T%u:\n", object->type);
for (j = 0; j < mxt_obj_instances(object); j++) {
u16 size = mxt_obj_size(object);
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3e102bcc4..2a1db1134 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -431,7 +431,7 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
*field = val;
}
- count = scnprintf(buf, PAGE_SIZE, "%d\n", val);
+ count = sysfs_emit(buf, "%d\n", val);
out:
mutex_unlock(&tsdata->mutex);
return error ?: count;
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index 0f5825830..eae90676f 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -928,8 +928,7 @@ static ssize_t hideep_fw_version_show(struct device *dev,
ssize_t len;
mutex_lock(&ts->dev_mutex);
- len = scnprintf(buf, PAGE_SIZE, "%04x\n",
- be16_to_cpu(ts->dwz_info.release_ver));
+ len = sysfs_emit(buf, "%04x\n", be16_to_cpu(ts->dwz_info.release_ver));
mutex_unlock(&ts->dev_mutex);
return len;
@@ -943,8 +942,7 @@ static ssize_t hideep_product_id_show(struct device *dev,
ssize_t len;
mutex_lock(&ts->dev_mutex);
- len = scnprintf(buf, PAGE_SIZE, "%04x\n",
- be16_to_cpu(ts->dwz_info.product_id));
+ len = sysfs_emit(buf, "%04x\n", be16_to_cpu(ts->dwz_info.product_id));
mutex_unlock(&ts->dev_mutex);
return len;
diff --git a/drivers/input/touchscreen/hycon-hy46xx.c b/drivers/input/touchscreen/hycon-hy46xx.c
index d0f257989..2e01d8797 100644
--- a/drivers/input/touchscreen/hycon-hy46xx.c
+++ b/drivers/input/touchscreen/hycon-hy46xx.c
@@ -202,7 +202,7 @@ static ssize_t hycon_hy46xx_setting_show(struct device *dev,
*field = val;
}
- count = scnprintf(buf, PAGE_SIZE, "%d\n", val);
+ count = sysfs_emit(buf, "%d\n", val);
out:
mutex_unlock(&tsdata->mutex);
diff --git a/drivers/input/touchscreen/ilitek_ts_i2c.c b/drivers/input/touchscreen/ilitek_ts_i2c.c
index 90c4934e7..fc4e39b66 100644
--- a/drivers/input/touchscreen/ilitek_ts_i2c.c
+++ b/drivers/input/touchscreen/ilitek_ts_i2c.c
@@ -512,12 +512,12 @@ static ssize_t firmware_version_show(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct ilitek_ts_data *ts = i2c_get_clientdata(client);
- return scnprintf(buf, PAGE_SIZE,
- "fw version: [%02X%02X.%02X%02X.%02X%02X.%02X%02X]\n",
- ts->firmware_ver[0], ts->firmware_ver[1],
- ts->firmware_ver[2], ts->firmware_ver[3],
- ts->firmware_ver[4], ts->firmware_ver[5],
- ts->firmware_ver[6], ts->firmware_ver[7]);
+ return sysfs_emit(buf,
+ "fw version: [%02X%02X.%02X%02X.%02X%02X.%02X%02X]\n",
+ ts->firmware_ver[0], ts->firmware_ver[1],
+ ts->firmware_ver[2], ts->firmware_ver[3],
+ ts->firmware_ver[4], ts->firmware_ver[5],
+ ts->firmware_ver[6], ts->firmware_ver[7]);
}
static DEVICE_ATTR_RO(firmware_version);
@@ -527,8 +527,8 @@ static ssize_t product_id_show(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct ilitek_ts_data *ts = i2c_get_clientdata(client);
- return scnprintf(buf, PAGE_SIZE, "product id: [%04X], module: [%s]\n",
- ts->mcu_ver, ts->product_id);
+ return sysfs_emit(buf, "product id: [%04X], module: [%s]\n",
+ ts->mcu_ver, ts->product_id);
}
static DEVICE_ATTR_RO(product_id);
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
index 07111ca24..55ecebe98 100644
--- a/drivers/input/touchscreen/imagis.c
+++ b/drivers/input/touchscreen/imagis.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -23,12 +24,9 @@
#define IST3038C_I2C_RETRY_COUNT 3
#define IST3038C_MAX_FINGER_NUM 10
#define IST3038C_X_MASK GENMASK(23, 12)
-#define IST3038C_X_SHIFT 12
#define IST3038C_Y_MASK GENMASK(11, 0)
#define IST3038C_AREA_MASK GENMASK(27, 24)
-#define IST3038C_AREA_SHIFT 24
#define IST3038C_FINGER_COUNT_MASK GENMASK(15, 12)
-#define IST3038C_FINGER_COUNT_SHIFT 12
#define IST3038C_FINGER_STATUS_MASK GENMASK(9, 0)
struct imagis_ts {
@@ -92,8 +90,7 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
goto out;
}
- finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
- IST3038C_FINGER_COUNT_SHIFT;
+ finger_count = FIELD_GET(IST3038C_FINGER_COUNT_MASK, intr_message);
if (finger_count > IST3038C_MAX_FINGER_NUM) {
dev_err(&ts->client->dev,
"finger count %d is more than maximum supported\n",
@@ -101,7 +98,7 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
goto out;
}
- finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
+ finger_pressed = FIELD_GET(IST3038C_FINGER_STATUS_MASK, intr_message);
for (i = 0; i < finger_count; i++) {
error = imagis_i2c_read_reg(ts,
@@ -118,12 +115,11 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
finger_pressed & BIT(i));
touchscreen_report_pos(ts->input_dev, &ts->prop,
- (finger_status & IST3038C_X_MASK) >>
- IST3038C_X_SHIFT,
- finger_status & IST3038C_Y_MASK, 1);
+ FIELD_GET(IST3038C_X_MASK, finger_status),
+ FIELD_GET(IST3038C_Y_MASK, finger_status),
+ true);
input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
- (finger_status & IST3038C_AREA_MASK) >>
- IST3038C_AREA_SHIFT);
+ FIELD_GET(IST3038C_AREA_MASK, finger_status));
}
input_mt_sync_frame(ts->input_dev);
@@ -210,7 +206,7 @@ static int imagis_init_input_dev(struct imagis_ts *ts)
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 16, 0, 0);
touchscreen_parse_properties(input_dev, true, &ts->prop);
if (!ts->prop.max_x || !ts->prop.max_y) {
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index a3f4fb85b..4d226118f 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -943,12 +943,12 @@ static ssize_t fw_info_show(struct device *dev,
if (!iqs5xx->dev_id_info.bl_status)
return -ENODATA;
- return scnprintf(buf, PAGE_SIZE, "%u.%u.%u.%u:%u.%u\n",
- be16_to_cpu(iqs5xx->dev_id_info.prod_num),
- be16_to_cpu(iqs5xx->dev_id_info.proj_num),
- iqs5xx->dev_id_info.major_ver,
- iqs5xx->dev_id_info.minor_ver,
- iqs5xx->exp_file[0], iqs5xx->exp_file[1]);
+ return sysfs_emit(buf, "%u.%u.%u.%u:%u.%u\n",
+ be16_to_cpu(iqs5xx->dev_id_info.prod_num),
+ be16_to_cpu(iqs5xx->dev_id_info.proj_num),
+ iqs5xx->dev_id_info.major_ver,
+ iqs5xx->dev_id_info.minor_ver,
+ iqs5xx->exp_file[0], iqs5xx->exp_file[1]);
}
static DEVICE_ATTR_WO(fw_file);
diff --git a/drivers/input/touchscreen/iqs7211.c b/drivers/input/touchscreen/iqs7211.c
index dc084f873..f0a56cde8 100644
--- a/drivers/input/touchscreen/iqs7211.c
+++ b/drivers/input/touchscreen/iqs7211.c
@@ -2401,12 +2401,12 @@ static ssize_t fw_info_show(struct device *dev,
{
struct iqs7211_private *iqs7211 = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%u.%u.%u.%u:%u.%u\n",
- le16_to_cpu(iqs7211->ver_info.prod_num),
- le32_to_cpu(iqs7211->ver_info.patch),
- le16_to_cpu(iqs7211->ver_info.major),
- le16_to_cpu(iqs7211->ver_info.minor),
- iqs7211->exp_file[1], iqs7211->exp_file[0]);
+ return sysfs_emit(buf, "%u.%u.%u.%u:%u.%u\n",
+ le16_to_cpu(iqs7211->ver_info.prod_num),
+ le32_to_cpu(iqs7211->ver_info.patch),
+ le16_to_cpu(iqs7211->ver_info.major),
+ le16_to_cpu(iqs7211->ver_info.minor),
+ iqs7211->exp_file[1], iqs7211->exp_file[0]);
}
static DEVICE_ATTR_RO(fw_info);
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index aa325486f..78e1c63e5 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1336,9 +1336,9 @@ static ssize_t mip4_sysfs_read_fw_version(struct device *dev,
/* Take lock to prevent racing with firmware update */
mutex_lock(&ts->input->mutex);
- count = snprintf(buf, PAGE_SIZE, "%04X %04X %04X %04X\n",
- ts->fw_version.boot, ts->fw_version.core,
- ts->fw_version.app, ts->fw_version.param);
+ count = sysfs_emit(buf, "%04X %04X %04X %04X\n",
+ ts->fw_version.boot, ts->fw_version.core,
+ ts->fw_version.app, ts->fw_version.param);
mutex_unlock(&ts->input->mutex);
@@ -1362,8 +1362,8 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
* product_name shows the name or version of the hardware
* paired with current firmware in the chip.
*/
- count = snprintf(buf, PAGE_SIZE, "%.*s\n",
- (int)sizeof(ts->product_name), ts->product_name);
+ count = sysfs_emit(buf, "%.*s\n",
+ (int)sizeof(ts->product_name), ts->product_name);
mutex_unlock(&ts->input->mutex);
@@ -1382,7 +1382,7 @@ static ssize_t mip4_sysfs_read_product_id(struct device *dev,
mutex_lock(&ts->input->mutex);
- count = snprintf(buf, PAGE_SIZE, "%04X\n", ts->product_id);
+ count = sysfs_emit(buf, "%04X\n", ts->product_id);
mutex_unlock(&ts->input->mutex);
@@ -1401,8 +1401,8 @@ static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
mutex_lock(&ts->input->mutex);
- count = snprintf(buf, PAGE_SIZE, "%.*s\n",
- (int)sizeof(ts->ic_name), ts->ic_name);
+ count = sysfs_emit(buf, "%.*s\n",
+ (int)sizeof(ts->ic_name), ts->ic_name);
mutex_unlock(&ts->input->mutex);
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 8ddb3f7d3..ae3aab428 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -847,9 +847,10 @@ static int sur40_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct sur40_state *sur40 = vb2_get_drv_priv(q);
+ unsigned int q_num_bufs = vb2_get_num_buffers(q);
- if (q->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - q->num_buffers;
+ if (q_num_bufs + *nbuffers < 3)
+ *nbuffers = 3 - q_num_bufs;
if (*nplanes)
return sizes[0] < sur40->pix_fmt.sizeimage ? -EINVAL : 0;
@@ -1123,7 +1124,7 @@ static const struct vb2_queue sur40_queue = {
.ops = &sur40_queue_ops,
.mem_ops = &vb2_dma_sg_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
- .min_buffers_needed = 3,
+ .min_queued_buffers = 3,
};
static const struct v4l2_file_operations sur40_video_fops = {
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index d6d04b9f0..60354ebc7 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -456,8 +456,8 @@ static ssize_t mtouch_firmware_rev_show(struct device *dev,
struct usbtouch_usb *usbtouch = usb_get_intfdata(intf);
struct mtouch_priv *priv = usbtouch->priv;
- return scnprintf(output, PAGE_SIZE, "%1x.%1x\n",
- priv->fw_rev_major, priv->fw_rev_minor);
+ return sysfs_emit(output, "%1x.%1x\n",
+ priv->fw_rev_major, priv->fw_rev_minor);
}
static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL);
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 128341a66..32c7be544 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -887,7 +887,7 @@ static ssize_t config_csum_show(struct device *dev,
cfg_csum = wdt->param.xmls_id1;
cfg_csum = (cfg_csum << 16) | wdt->param.xmls_id2;
- return scnprintf(buf, PAGE_SIZE, "%x\n", cfg_csum);
+ return sysfs_emit(buf, "%x\n", cfg_csum);
}
static ssize_t fw_version_show(struct device *dev,
@@ -896,7 +896,7 @@ static ssize_t fw_version_show(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct wdt87xx_data *wdt = i2c_get_clientdata(client);
- return scnprintf(buf, PAGE_SIZE, "%x\n", wdt->param.fw_id);
+ return sysfs_emit(buf, "%x\n", wdt->param.fw_id);
}
static ssize_t plat_id_show(struct device *dev,
@@ -905,7 +905,7 @@ static ssize_t plat_id_show(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct wdt87xx_data *wdt = i2c_get_clientdata(client);
- return scnprintf(buf, PAGE_SIZE, "%x\n", wdt->param.plat_id);
+ return sysfs_emit(buf, "%x\n", wdt->param.plat_id);
}
static ssize_t update_config_store(struct device *dev,
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 5be511284..5680075f0 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/platform_data/zforce_ts.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
@@ -106,6 +107,7 @@ struct zforce_point {
struct zforce_ts {
struct i2c_client *client;
struct input_dev *input;
+ struct touchscreen_properties prop;
const struct zforce_ts_platdata *pdata;
char phys[32];
@@ -266,7 +268,6 @@ static int zforce_setconfig(struct zforce_ts *ts, char b1)
static int zforce_start(struct zforce_ts *ts)
{
struct i2c_client *client = ts->client;
- const struct zforce_ts_platdata *pdata = ts->pdata;
int ret;
dev_dbg(&client->dev, "starting device\n");
@@ -277,7 +278,7 @@ static int zforce_start(struct zforce_ts *ts)
return ret;
}
- ret = zforce_resolution(ts, pdata->x_max, pdata->y_max);
+ ret = zforce_resolution(ts, ts->prop.max_x, ts->prop.max_y);
if (ret) {
dev_err(&client->dev, "Unable to set resolution, %d\n", ret);
goto error;
@@ -337,7 +338,6 @@ static int zforce_stop(struct zforce_ts *ts)
static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
{
struct i2c_client *client = ts->client;
- const struct zforce_ts_platdata *pdata = ts->pdata;
struct zforce_point point;
int count, i, num = 0;
@@ -355,8 +355,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
point.coord_y =
payload[9 * i + 4] << 8 | payload[9 * i + 3];
- if (point.coord_x > pdata->x_max ||
- point.coord_y > pdata->y_max) {
+ if (point.coord_x > ts->prop.max_x ||
+ point.coord_y > ts->prop.max_y) {
dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
point.coord_x, point.coord_y);
point.coord_x = point.coord_y = 0;
@@ -390,10 +390,9 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
point.state != STATE_UP);
if (point.state != STATE_UP) {
- input_report_abs(ts->input, ABS_MT_POSITION_X,
- point.coord_x);
- input_report_abs(ts->input, ABS_MT_POSITION_Y,
- point.coord_y);
+ touchscreen_report_pos(ts->input, &ts->prop,
+ point.coord_x, point.coord_y,
+ true);
input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
point.area_major);
input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
@@ -719,15 +718,8 @@ static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
- dev_err(dev, "failed to get x-size property\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(np, "y-size", &pdata->y_max)) {
- dev_err(dev, "failed to get y-size property\n");
- return ERR_PTR(-EINVAL);
- }
+ of_property_read_u32(np, "x-size", &pdata->x_max);
+ of_property_read_u32(np, "y-size", &pdata->y_max);
return pdata;
}
@@ -856,6 +848,12 @@ static int zforce_probe(struct i2c_client *client)
input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
pdata->y_max, 0, 0);
+ touchscreen_parse_properties(input_dev, true, &ts->prop);
+ if (ts->prop.max_x == 0 || ts->prop.max_y == 0) {
+ dev_err(&client->dev, "no size specified\n");
+ return -EINVAL;
+ }
+
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
ZFORCE_MAX_AREA, 0, 0);
input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
diff --git a/drivers/input/vivaldi-fmap.c b/drivers/input/vivaldi-fmap.c
index 6dae83d96..0d29ec014 100644
--- a/drivers/input/vivaldi-fmap.c
+++ b/drivers/input/vivaldi-fmap.c
@@ -27,10 +27,10 @@ ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
return 0;
for (i = 0; i < data->num_function_row_keys; i++)
- size += scnprintf(buf + size, PAGE_SIZE - size,
- "%s%02X", size ? " " : "", physmap[i]);
+ size += sysfs_emit_at(buf, size,
+ "%s%02X", size ? " " : "", physmap[i]);
if (size)
- size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+ size += sysfs_emit_at(buf, size, "\n");
return size;
}
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 50bac2d79..68edb07d4 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -176,6 +176,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
path->num_nodes = num_nodes;
+ mutex_lock(&icc_bw_lock);
+
for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
@@ -186,6 +188,8 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
node = node->reverse;
}
+ mutex_unlock(&icc_bw_lock);
+
return path;
}
@@ -792,12 +796,16 @@ void icc_put(struct icc_path *path)
pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
+
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
+
+ mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
kfree_const(path->name);
diff --git a/drivers/interconnect/imx/imx8mm.c b/drivers/interconnect/imx/imx8mm.c
index b43325364..8c40f4182 100644
--- a/drivers/interconnect/imx/imx8mm.c
+++ b/drivers/interconnect/imx/imx8mm.c
@@ -86,16 +86,9 @@ static int imx8mm_icc_probe(struct platform_device *pdev)
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
-static int imx8mm_icc_remove(struct platform_device *pdev)
-{
- imx_icc_unregister(pdev);
-
- return 0;
-}
-
static struct platform_driver imx8mm_icc_driver = {
.probe = imx8mm_icc_probe,
- .remove = imx8mm_icc_remove,
+ .remove_new = imx_icc_unregister,
.driver = {
.name = "imx8mm-interconnect",
},
diff --git a/drivers/interconnect/imx/imx8mn.c b/drivers/interconnect/imx/imx8mn.c
index 8ce6d8e4b..fa3d4f97d 100644
--- a/drivers/interconnect/imx/imx8mn.c
+++ b/drivers/interconnect/imx/imx8mn.c
@@ -75,16 +75,9 @@ static int imx8mn_icc_probe(struct platform_device *pdev)
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
-static int imx8mn_icc_remove(struct platform_device *pdev)
-{
- imx_icc_unregister(pdev);
-
- return 0;
-}
-
static struct platform_driver imx8mn_icc_driver = {
.probe = imx8mn_icc_probe,
- .remove = imx8mn_icc_remove,
+ .remove_new = imx_icc_unregister,
.driver = {
.name = "imx8mn-interconnect",
},
diff --git a/drivers/interconnect/imx/imx8mp.c b/drivers/interconnect/imx/imx8mp.c
index a66ae3638..d218bb477 100644
--- a/drivers/interconnect/imx/imx8mp.c
+++ b/drivers/interconnect/imx/imx8mp.c
@@ -239,16 +239,9 @@ static int imx8mp_icc_probe(struct platform_device *pdev)
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), noc_setting_nodes);
}
-static int imx8mp_icc_remove(struct platform_device *pdev)
-{
- imx_icc_unregister(pdev);
-
- return 0;
-}
-
static struct platform_driver imx8mp_icc_driver = {
.probe = imx8mp_icc_probe,
- .remove = imx8mp_icc_remove,
+ .remove_new = imx_icc_unregister,
.driver = {
.name = "imx8mp-interconnect",
},
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index b6fb71305..8bbd672b3 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -85,16 +85,9 @@ static int imx8mq_icc_probe(struct platform_device *pdev)
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
-static int imx8mq_icc_remove(struct platform_device *pdev)
-{
- imx_icc_unregister(pdev);
-
- return 0;
-}
-
static struct platform_driver imx8mq_icc_driver = {
.probe = imx8mq_icc_probe,
- .remove = imx8mq_icc_remove,
+ .remove_new = imx_icc_unregister,
.driver = {
.name = "imx8mq-interconnect",
.sync_state = icc_sync_state,
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 62b516d38..697f96c49 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -191,6 +191,15 @@ config INTERCONNECT_QCOM_SDX75
This is a driver for the Qualcomm Network-on-Chip on sdx75-based
platforms.
+config INTERCONNECT_QCOM_SM6115
+ tristate "Qualcomm SM6115 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sm6115-based
+ platforms.
+
config INTERCONNECT_QCOM_SM6350
tristate "Qualcomm SM6350 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
@@ -245,5 +254,23 @@ config INTERCONNECT_QCOM_SM8550
This is a driver for the Qualcomm Network-on-Chip on SM8550-based
platforms.
+config INTERCONNECT_QCOM_SM8650
+ tristate "Qualcomm SM8650 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SM8650-based
+ platforms.
+
+config INTERCONNECT_QCOM_X1E80100
+ tristate "Qualcomm X1E80100 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on X1E80100-based
+ platforms.
+
config INTERCONNECT_QCOM_SMD_RPM
tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index c5320e293..704846165 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -24,12 +24,15 @@ qnoc-sdm845-objs := sdm845.o
qnoc-sdx55-objs := sdx55.o
qnoc-sdx65-objs := sdx65.o
qnoc-sdx75-objs := sdx75.o
+qnoc-sm6115-objs := sm6115.o
qnoc-sm6350-objs := sm6350.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
qnoc-sm8450-objs := sm8450.o
qnoc-sm8550-objs := sm8550.o
+qnoc-sm8650-objs := sm8650.o
+qnoc-x1e80100-objs := x1e80100.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
@@ -53,10 +56,13 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX75) += qnoc-sdx75.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM6115) += qnoc-sm6115.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM6350) += qnoc-sm6350.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8550) += qnoc-sm8550.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8650) += qnoc-sm8650.o
+obj-$(CONFIG_INTERCONNECT_QCOM_X1E80100) += qnoc-x1e80100.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index dbacb2a7a..a8ed435f6 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -627,14 +627,12 @@ err_disable_unprepare_clk:
}
EXPORT_SYMBOL(qnoc_probe);
-int qnoc_remove(struct platform_device *pdev)
+void qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
clk_disable_unprepare(qp->bus_clk);
-
- return 0;
}
EXPORT_SYMBOL(qnoc_remove);
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index a13768cfd..f4883d43e 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -161,7 +161,7 @@ extern const struct rpm_clk_resource aggre1_branch_clk;
extern const struct rpm_clk_resource aggre2_branch_clk;
int qnoc_probe(struct platform_device *pdev);
-int qnoc_remove(struct platform_device *pdev);
+void qnoc_remove(struct platform_device *pdev);
bool qcom_icc_rpm_smd_available(void);
int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val);
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index 35148880b..499b1a9ac 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -1344,7 +1344,7 @@ MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
static struct platform_driver msm8916_noc_driver = {
.probe = qnoc_probe,
- .remove = qnoc_remove,
+ .remove_new = qnoc_remove,
.driver = {
.name = "qnoc-msm8916",
.of_match_table = msm8916_noc_of_match,
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index b52c5ac11..8ff2c23b1 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -1421,7 +1421,7 @@ MODULE_DEVICE_TABLE(of, msm8939_noc_of_match);
static struct platform_driver msm8939_noc_driver = {
.probe = qnoc_probe,
- .remove = qnoc_remove,
+ .remove_new = qnoc_remove,
.driver = {
.name = "qnoc-msm8939",
.of_match_table = msm8939_noc_of_match,
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index 21f6c8521..241076b5f 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -740,15 +740,13 @@ err_remove_nodes:
return ret;
}
-static int msm8974_icc_remove(struct platform_device *pdev)
+static void msm8974_icc_remove(struct platform_device *pdev)
{
struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
-
- return 0;
}
static const struct of_device_id msm8974_noc_of_match[] = {
@@ -764,7 +762,7 @@ MODULE_DEVICE_TABLE(of, msm8974_noc_of_match);
static struct platform_driver msm8974_noc_driver = {
.probe = msm8974_icc_probe,
- .remove = msm8974_icc_remove,
+ .remove_new = msm8974_icc_remove,
.driver = {
.name = "qnoc-msm8974",
.of_match_table = msm8974_noc_of_match,
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
index b73566c9b..788131400 100644
--- a/drivers/interconnect/qcom/msm8996.c
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -2108,7 +2108,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qnoc_probe,
- .remove = qnoc_remove,
+ .remove_new = qnoc_remove,
.driver = {
.name = "qnoc-msm8996",
.of_match_table = qnoc_of_match,
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index e97478bbc..61a8695a9 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -148,14 +148,12 @@ static int qcom_osm_l3_set(struct icc_node *src, struct icc_node *dst)
return 0;
}
-static int qcom_osm_l3_remove(struct platform_device *pdev)
+static void qcom_osm_l3_remove(struct platform_device *pdev)
{
struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
-
- return 0;
}
static int qcom_osm_l3_probe(struct platform_device *pdev)
@@ -292,7 +290,7 @@ MODULE_DEVICE_TABLE(of, osm_l3_of_match);
static struct platform_driver osm_l3_driver = {
.probe = qcom_osm_l3_probe,
- .remove = qcom_osm_l3_remove,
+ .remove_new = qcom_osm_l3_remove,
.driver = {
.name = "osm-l3",
.of_match_table = osm_l3_of_match,
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
index b88cf9a02..96735800b 100644
--- a/drivers/interconnect/qcom/qcm2290.c
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -1367,7 +1367,7 @@ MODULE_DEVICE_TABLE(of, qcm2290_noc_of_match);
static struct platform_driver qcm2290_noc_driver = {
.probe = qnoc_probe,
- .remove = qnoc_remove,
+ .remove_new = qnoc_remove,
.driver = {
.name = "qnoc-qcm2290",
.of_match_table = qcm2290_noc_of_match,
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 9fa1da70c..11b49a89c 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -1083,7 +1083,7 @@ MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
static struct platform_driver qcs404_noc_driver = {
.probe = qnoc_probe,
- .remove = qnoc_remove,
+ .remove_new = qnoc_remove,
.driver = {
.name = "qnoc-qcs404",
.of_match_table = qcs404_noc_of_match,
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 7392bebba..ab91de446 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -1714,7 +1714,7 @@ MODULE_DEVICE_TABLE(of, sdm660_noc_of_match);
static struct platform_driver sdm660_noc_driver = {
.probe = qnoc_probe,
- .remove = qnoc_remove,
+ .remove_new = qnoc_remove,
.driver = {
.name = "qnoc-sdm660",
.of_match_table = sdm660_noc_of_match,
diff --git a/drivers/interconnect/qcom/sm6115.c b/drivers/interconnect/qcom/sm6115.c
new file mode 100644
index 000000000..88b67634a
--- /dev/null
+++ b/drivers/interconnect/qcom/sm6115.c
@@ -0,0 +1,1423 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <dt-bindings/interconnect/qcom,sm6115.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "icc-rpm.h"
+
+static const char * const snoc_intf_clocks[] = {
+ "cpu_axi",
+ "ufs_axi",
+ "usb_axi",
+ "ipa", /* Required by qxm_ipa */
+};
+
+static const char * const cnoc_intf_clocks[] = {
+ "usb_axi",
+};
+
+enum {
+ SM6115_MASTER_AMPSS_M0,
+ SM6115_MASTER_ANOC_SNOC,
+ SM6115_MASTER_BIMC_SNOC,
+ SM6115_MASTER_CAMNOC_HF,
+ SM6115_MASTER_CAMNOC_SF,
+ SM6115_MASTER_CRYPTO_CORE0,
+ SM6115_MASTER_GRAPHICS_3D,
+ SM6115_MASTER_IPA,
+ SM6115_MASTER_MDP_PORT0,
+ SM6115_MASTER_PIMEM,
+ SM6115_MASTER_QDSS_BAM,
+ SM6115_MASTER_QDSS_DAP,
+ SM6115_MASTER_QDSS_ETR,
+ SM6115_MASTER_QPIC,
+ SM6115_MASTER_QUP_0,
+ SM6115_MASTER_QUP_CORE_0,
+ SM6115_MASTER_SDCC_1,
+ SM6115_MASTER_SDCC_2,
+ SM6115_MASTER_SNOC_BIMC_NRT,
+ SM6115_MASTER_SNOC_BIMC_RT,
+ SM6115_MASTER_SNOC_BIMC,
+ SM6115_MASTER_SNOC_CFG,
+ SM6115_MASTER_SNOC_CNOC,
+ SM6115_MASTER_TCU_0,
+ SM6115_MASTER_TIC,
+ SM6115_MASTER_USB3,
+ SM6115_MASTER_VIDEO_P0,
+ SM6115_MASTER_VIDEO_PROC,
+
+ SM6115_SLAVE_AHB2PHY_USB,
+ SM6115_SLAVE_ANOC_SNOC,
+ SM6115_SLAVE_APPSS,
+ SM6115_SLAVE_APSS_THROTTLE_CFG,
+ SM6115_SLAVE_BIMC_CFG,
+ SM6115_SLAVE_BIMC_SNOC,
+ SM6115_SLAVE_BOOT_ROM,
+ SM6115_SLAVE_CAMERA_CFG,
+ SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SM6115_SLAVE_CLK_CTL,
+ SM6115_SLAVE_CNOC_MSS,
+ SM6115_SLAVE_CRYPTO_0_CFG,
+ SM6115_SLAVE_DCC_CFG,
+ SM6115_SLAVE_DDR_PHY_CFG,
+ SM6115_SLAVE_DDR_SS_CFG,
+ SM6115_SLAVE_DISPLAY_CFG,
+ SM6115_SLAVE_DISPLAY_THROTTLE_CFG,
+ SM6115_SLAVE_EBI_CH0,
+ SM6115_SLAVE_GPU_CFG,
+ SM6115_SLAVE_GPU_THROTTLE_CFG,
+ SM6115_SLAVE_HWKM_CORE,
+ SM6115_SLAVE_IMEM_CFG,
+ SM6115_SLAVE_IPA_CFG,
+ SM6115_SLAVE_LPASS,
+ SM6115_SLAVE_MAPSS,
+ SM6115_SLAVE_MDSP_MPU_CFG,
+ SM6115_SLAVE_MESSAGE_RAM,
+ SM6115_SLAVE_OCIMEM,
+ SM6115_SLAVE_PDM,
+ SM6115_SLAVE_PIMEM_CFG,
+ SM6115_SLAVE_PIMEM,
+ SM6115_SLAVE_PKA_CORE,
+ SM6115_SLAVE_PMIC_ARB,
+ SM6115_SLAVE_QDSS_CFG,
+ SM6115_SLAVE_QDSS_STM,
+ SM6115_SLAVE_QM_CFG,
+ SM6115_SLAVE_QM_MPU_CFG,
+ SM6115_SLAVE_QPIC,
+ SM6115_SLAVE_QUP_0,
+ SM6115_SLAVE_QUP_CORE_0,
+ SM6115_SLAVE_RBCPR_CX_CFG,
+ SM6115_SLAVE_RBCPR_MX_CFG,
+ SM6115_SLAVE_RPM,
+ SM6115_SLAVE_SDCC_1,
+ SM6115_SLAVE_SDCC_2,
+ SM6115_SLAVE_SECURITY,
+ SM6115_SLAVE_SERVICE_CNOC,
+ SM6115_SLAVE_SERVICE_SNOC,
+ SM6115_SLAVE_SNOC_BIMC_NRT,
+ SM6115_SLAVE_SNOC_BIMC_RT,
+ SM6115_SLAVE_SNOC_BIMC,
+ SM6115_SLAVE_SNOC_CFG,
+ SM6115_SLAVE_SNOC_CNOC,
+ SM6115_SLAVE_TCSR,
+ SM6115_SLAVE_TCU,
+ SM6115_SLAVE_TLMM,
+ SM6115_SLAVE_USB3,
+ SM6115_SLAVE_VENUS_CFG,
+ SM6115_SLAVE_VENUS_THROTTLE_CFG,
+ SM6115_SLAVE_VSENSE_CTRL_CFG,
+};
+
+static const u16 slv_ebi_slv_bimc_snoc_links[] = {
+ SM6115_SLAVE_EBI_CH0,
+ SM6115_SLAVE_BIMC_SNOC,
+};
+
+static struct qcom_icc_node apps_proc = {
+ .name = "apps_proc",
+ .id = SM6115_MASTER_AMPSS_M0,
+ .channels = 1,
+ .buswidth = 16,
+ .qos.qos_port = 0,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 0,
+ .qos.areq_prio = 0,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links),
+ .links = slv_ebi_slv_bimc_snoc_links,
+};
+
+static const u16 link_slv_ebi[] = {
+ SM6115_SLAVE_EBI_CH0,
+};
+
+static struct qcom_icc_node mas_snoc_bimc_rt = {
+ .name = "mas_snoc_bimc_rt",
+ .id = SM6115_MASTER_SNOC_BIMC_RT,
+ .channels = 1,
+ .buswidth = 16,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_ebi),
+ .links = link_slv_ebi,
+};
+
+static struct qcom_icc_node mas_snoc_bimc_nrt = {
+ .name = "mas_snoc_bimc_nrt",
+ .id = SM6115_MASTER_SNOC_BIMC_NRT,
+ .channels = 1,
+ .buswidth = 16,
+ .qos.qos_port = 3,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_ebi),
+ .links = link_slv_ebi,
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+ .name = "mas_snoc_bimc",
+ .id = SM6115_MASTER_SNOC_BIMC,
+ .channels = 1,
+ .buswidth = 16,
+ .qos.qos_port = 6,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_ebi),
+ .links = link_slv_ebi,
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM6115_MASTER_GRAPHICS_3D,
+ .channels = 1,
+ .buswidth = 32,
+ .qos.qos_port = 1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 0,
+ .qos.areq_prio = 0,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links),
+ .links = slv_ebi_slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node tcu_0 = {
+ .name = "tcu_0",
+ .id = SM6115_MASTER_TCU_0,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 4,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 6,
+ .qos.areq_prio = 6,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links),
+ .links = slv_ebi_slv_bimc_snoc_links,
+};
+
+static const u16 qup_core_0_links[] = {
+ SM6115_SLAVE_QUP_CORE_0,
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM6115_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = 170,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(qup_core_0_links),
+ .links = qup_core_0_links,
+};
+
+static const u16 link_slv_anoc_snoc[] = {
+ SM6115_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node crypto_c0 = {
+ .name = "crypto_c0",
+ .id = SM6115_MASTER_CRYPTO_CORE0,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 43,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static const u16 mas_snoc_cnoc_links[] = {
+ SM6115_SLAVE_AHB2PHY_USB,
+ SM6115_SLAVE_APSS_THROTTLE_CFG,
+ SM6115_SLAVE_BIMC_CFG,
+ SM6115_SLAVE_BOOT_ROM,
+ SM6115_SLAVE_CAMERA_CFG,
+ SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SM6115_SLAVE_CLK_CTL,
+ SM6115_SLAVE_CNOC_MSS,
+ SM6115_SLAVE_CRYPTO_0_CFG,
+ SM6115_SLAVE_DCC_CFG,
+ SM6115_SLAVE_DDR_PHY_CFG,
+ SM6115_SLAVE_DDR_SS_CFG,
+ SM6115_SLAVE_DISPLAY_CFG,
+ SM6115_SLAVE_DISPLAY_THROTTLE_CFG,
+ SM6115_SLAVE_GPU_CFG,
+ SM6115_SLAVE_GPU_THROTTLE_CFG,
+ SM6115_SLAVE_HWKM_CORE,
+ SM6115_SLAVE_IMEM_CFG,
+ SM6115_SLAVE_IPA_CFG,
+ SM6115_SLAVE_LPASS,
+ SM6115_SLAVE_MAPSS,
+ SM6115_SLAVE_MDSP_MPU_CFG,
+ SM6115_SLAVE_MESSAGE_RAM,
+ SM6115_SLAVE_PDM,
+ SM6115_SLAVE_PIMEM_CFG,
+ SM6115_SLAVE_PKA_CORE,
+ SM6115_SLAVE_PMIC_ARB,
+ SM6115_SLAVE_QDSS_CFG,
+ SM6115_SLAVE_QM_CFG,
+ SM6115_SLAVE_QM_MPU_CFG,
+ SM6115_SLAVE_QPIC,
+ SM6115_SLAVE_QUP_0,
+ SM6115_SLAVE_RBCPR_CX_CFG,
+ SM6115_SLAVE_RBCPR_MX_CFG,
+ SM6115_SLAVE_RPM,
+ SM6115_SLAVE_SDCC_1,
+ SM6115_SLAVE_SDCC_2,
+ SM6115_SLAVE_SECURITY,
+ SM6115_SLAVE_SERVICE_CNOC,
+ SM6115_SLAVE_SNOC_CFG,
+ SM6115_SLAVE_TCSR,
+ SM6115_SLAVE_TLMM,
+ SM6115_SLAVE_USB3,
+ SM6115_SLAVE_VENUS_CFG,
+ SM6115_SLAVE_VENUS_THROTTLE_CFG,
+ SM6115_SLAVE_VSENSE_CTRL_CFG,
+};
+
+static struct qcom_icc_node mas_snoc_cnoc = {
+ .name = "mas_snoc_cnoc",
+ .id = SM6115_MASTER_SNOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+ .links = mas_snoc_cnoc_links,
+};
+
+static struct qcom_icc_node xm_dap = {
+ .name = "xm_dap",
+ .id = SM6115_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+ .links = mas_snoc_cnoc_links,
+};
+
+static const u16 link_slv_snoc_bimc_nrt[] = {
+ SM6115_SLAVE_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node qnm_camera_nrt = {
+ .name = "qnm_camera_nrt",
+ .id = SM6115_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .qos.qos_port = 25,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt),
+ .links = link_slv_snoc_bimc_nrt,
+};
+
+static struct qcom_icc_node qxm_venus0 = {
+ .name = "qxm_venus0",
+ .id = SM6115_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 16,
+ .qos.qos_port = 30,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt),
+ .links = link_slv_snoc_bimc_nrt,
+};
+
+static struct qcom_icc_node qxm_venus_cpu = {
+ .name = "qxm_venus_cpu",
+ .id = SM6115_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 34,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt),
+ .links = link_slv_snoc_bimc_nrt,
+};
+
+static const u16 link_slv_snoc_bimc_rt[] = {
+ SM6115_SLAVE_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node qnm_camera_rt = {
+ .name = "qnm_camera_rt",
+ .id = SM6115_MASTER_CAMNOC_HF,
+ .channels = 1,
+ .buswidth = 32,
+ .qos.qos_port = 31,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_snoc_bimc_rt),
+ .links = link_slv_snoc_bimc_rt,
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SM6115_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 16,
+ .qos.qos_port = 26,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_snoc_bimc_rt),
+ .links = link_slv_snoc_bimc_rt,
+};
+
+static const u16 slv_service_snoc_links[] = {
+ SM6115_SLAVE_SERVICE_SNOC,
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SM6115_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(slv_service_snoc_links),
+ .links = slv_service_snoc_links,
+};
+
+static const u16 mas_tic_links[] = {
+ SM6115_SLAVE_APPSS,
+ SM6115_SLAVE_OCIMEM,
+ SM6115_SLAVE_PIMEM,
+ SM6115_SLAVE_QDSS_STM,
+ SM6115_SLAVE_TCU,
+ SM6115_SLAVE_SNOC_BIMC,
+ SM6115_SLAVE_SNOC_CNOC,
+};
+
+static struct qcom_icc_node qhm_tic = {
+ .name = "qhm_tic",
+ .id = SM6115_MASTER_TIC,
+ .channels = 1,
+ .buswidth = 4,
+ .qos.qos_port = 29,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_tic_links),
+ .links = mas_tic_links,
+};
+
+static struct qcom_icc_node mas_anoc_snoc = {
+ .name = "mas_anoc_snoc",
+ .id = SM6115_MASTER_ANOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_tic_links),
+ .links = mas_tic_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ SM6115_SLAVE_APPSS,
+ SM6115_SLAVE_SNOC_CNOC,
+ SM6115_SLAVE_OCIMEM,
+ SM6115_SLAVE_PIMEM,
+ SM6115_SLAVE_QDSS_STM,
+ SM6115_SLAVE_TCU,
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = SM6115_MASTER_BIMC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_pimem_links[] = {
+ SM6115_SLAVE_OCIMEM,
+ SM6115_SLAVE_SNOC_BIMC,
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM6115_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 41,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pimem_links),
+ .links = mas_pimem_links,
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM6115_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .qos.qos_port = 23,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node qhm_qpic = {
+ .name = "qhm_qpic",
+ .id = SM6115_MASTER_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SM6115_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .qos.qos_port = 21,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 166,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM6115_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 24,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 59,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SM6115_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 33,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node xm_sdc1 = {
+ .name = "xm_sdc1",
+ .id = SM6115_MASTER_SDCC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 38,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM6115_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 44,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM6115_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .qos.qos_port = 45,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(link_slv_anoc_snoc),
+ .links = link_slv_anoc_snoc,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM6115_SLAVE_EBI_CH0,
+ .channels = 2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ SM6115_MASTER_BIMC_SNOC,
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = SM6115_SLAVE_BIMC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM6115_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_ahb2phy_usb = {
+ .name = "qhs_ahb2phy_usb",
+ .id = SM6115_SLAVE_AHB2PHY_USB,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_apss_throttle_cfg = {
+ .name = "qhs_apss_throttle_cfg",
+ .id = SM6115_SLAVE_APSS_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_bimc_cfg = {
+ .name = "qhs_bimc_cfg",
+ .id = SM6115_SLAVE_BIMC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_boot_rom = {
+ .name = "qhs_boot_rom",
+ .id = SM6115_SLAVE_BOOT_ROM,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
+ .name = "qhs_camera_nrt_throttle_cfg",
+ .id = SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
+ .name = "qhs_camera_rt_throttle_cfg",
+ .id = SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_camera_ss_cfg = {
+ .name = "qhs_camera_ss_cfg",
+ .id = SM6115_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM6115_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM6115_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SM6115_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM6115_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SM6115_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_ddr_phy_cfg = {
+ .name = "qhs_ddr_phy_cfg",
+ .id = SM6115_SLAVE_DDR_PHY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_ddr_ss_cfg = {
+ .name = "qhs_ddr_ss_cfg",
+ .id = SM6115_SLAVE_DDR_SS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_disp_ss_cfg = {
+ .name = "qhs_disp_ss_cfg",
+ .id = SM6115_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_display_throttle_cfg = {
+ .name = "qhs_display_throttle_cfg",
+ .id = SM6115_SLAVE_DISPLAY_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_gpu_cfg = {
+ .name = "qhs_gpu_cfg",
+ .id = SM6115_SLAVE_GPU_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_gpu_throttle_cfg = {
+ .name = "qhs_gpu_throttle_cfg",
+ .id = SM6115_SLAVE_GPU_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_hwkm = {
+ .name = "qhs_hwkm",
+ .id = SM6115_SLAVE_HWKM_CORE,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM6115_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_ipa_cfg = {
+ .name = "qhs_ipa_cfg",
+ .id = SM6115_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_lpass = {
+ .name = "qhs_lpass",
+ .id = SM6115_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_mapss = {
+ .name = "qhs_mapss",
+ .id = SM6115_SLAVE_MAPSS,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_mdsp_mpu_cfg = {
+ .name = "qhs_mdsp_mpu_cfg",
+ .id = SM6115_SLAVE_MDSP_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_mesg_ram = {
+ .name = "qhs_mesg_ram",
+ .id = SM6115_SLAVE_MESSAGE_RAM,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_mss = {
+ .name = "qhs_mss",
+ .id = SM6115_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM6115_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM6115_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_pka_wrapper = {
+ .name = "qhs_pka_wrapper",
+ .id = SM6115_SLAVE_PKA_CORE,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_pmic_arb = {
+ .name = "qhs_pmic_arb",
+ .id = SM6115_SLAVE_PMIC_ARB,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM6115_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qm_cfg = {
+ .name = "qhs_qm_cfg",
+ .id = SM6115_SLAVE_QM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qm_mpu_cfg = {
+ .name = "qhs_qm_mpu_cfg",
+ .id = SM6115_SLAVE_QM_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qpic = {
+ .name = "qhs_qpic",
+ .id = SM6115_SLAVE_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SM6115_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_rpm = {
+ .name = "qhs_rpm",
+ .id = SM6115_SLAVE_RPM,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_sdc1 = {
+ .name = "qhs_sdc1",
+ .id = SM6115_SLAVE_SDCC_1,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM6115_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_security = {
+ .name = "qhs_security",
+ .id = SM6115_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static const u16 slv_snoc_cfg_links[] = {
+ SM6115_MASTER_SNOC_CFG,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SM6115_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(slv_snoc_cfg_links),
+ .links = slv_snoc_cfg_links,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM6115_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SM6115_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_usb3 = {
+ .name = "qhs_usb3",
+ .id = SM6115_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM6115_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_venus_throttle_cfg = {
+ .name = "qhs_venus_throttle_cfg",
+ .id = SM6115_SLAVE_VENUS_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM6115_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM6115_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static const u16 slv_snoc_bimc_nrt_links[] = {
+ SM6115_MASTER_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node slv_snoc_bimc_nrt = {
+ .name = "slv_snoc_bimc_nrt",
+ .id = SM6115_SLAVE_SNOC_BIMC_NRT,
+ .channels = 1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_nrt_links),
+ .links = slv_snoc_bimc_nrt_links,
+};
+
+static const u16 slv_snoc_bimc_rt_links[] = {
+ SM6115_MASTER_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node slv_snoc_bimc_rt = {
+ .name = "slv_snoc_bimc_rt",
+ .id = SM6115_SLAVE_SNOC_BIMC_RT,
+ .channels = 1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_rt_links),
+ .links = slv_snoc_bimc_rt_links,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SM6115_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static const u16 slv_snoc_cnoc_links[] = {
+ SM6115_MASTER_SNOC_CNOC
+};
+
+static struct qcom_icc_node slv_snoc_cnoc = {
+ .name = "slv_snoc_cnoc",
+ .id = SM6115_SLAVE_SNOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 25,
+ .num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
+ .links = slv_snoc_cnoc_links,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM6115_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM6115_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+ SM6115_MASTER_SNOC_BIMC,
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+ .name = "slv_snoc_bimc",
+ .id = SM6115_SLAVE_SNOC_BIMC,
+ .channels = 1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+ .links = slv_snoc_bimc_links,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM6115_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM6115_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM6115_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+};
+
+static const u16 slv_anoc_snoc_links[] = {
+ SM6115_MASTER_ANOC_SNOC,
+};
+
+static struct qcom_icc_node slv_anoc_snoc = {
+ .name = "slv_anoc_snoc",
+ .id = SM6115_SLAVE_ANOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(slv_anoc_snoc_links),
+ .links = slv_anoc_snoc_links,
+};
+
+static struct qcom_icc_node *bimc_nodes[] = {
+ [MASTER_AMPSS_M0] = &apps_proc,
+ [MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
+ [MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
+ [SNOC_BIMC_MAS] = &mas_snoc_bimc,
+ [MASTER_GRAPHICS_3D] = &qnm_gpu,
+ [MASTER_TCU_0] = &tcu_0,
+ [SLAVE_EBI_CH0] = &ebi,
+ [BIMC_SNOC_SLV] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc sm6115_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = bimc_nodes,
+ .num_nodes = ARRAY_SIZE(bimc_nodes),
+ .regmap_cfg = &bimc_regmap_config,
+ .bus_clk_desc = &bimc_clk,
+ .keep_alive = true,
+ .qos_offset = 0x8000,
+ .ab_coeff = 153,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [SNOC_CNOC_MAS] = &mas_snoc_cnoc,
+ [MASTER_QDSS_DAP] = &xm_dap,
+ [SLAVE_AHB2PHY_USB] = &qhs_ahb2phy_usb,
+ [SLAVE_APSS_THROTTLE_CFG] = &qhs_apss_throttle_cfg,
+ [SLAVE_BIMC_CFG] = &qhs_bimc_cfg,
+ [SLAVE_BOOT_ROM] = &qhs_boot_rom,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_throttle_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_ss_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_DDR_PHY_CFG] = &qhs_ddr_phy_cfg,
+ [SLAVE_DDR_SS_CFG] = &qhs_ddr_ss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_disp_ss_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
+ [SLAVE_GPU_CFG] = &qhs_gpu_cfg,
+ [SLAVE_GPU_THROTTLE_CFG] = &qhs_gpu_throttle_cfg,
+ [SLAVE_HWKM_CORE] = &qhs_hwkm,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa_cfg,
+ [SLAVE_LPASS] = &qhs_lpass,
+ [SLAVE_MAPSS] = &qhs_mapss,
+ [SLAVE_MDSP_MPU_CFG] = &qhs_mdsp_mpu_cfg,
+ [SLAVE_MESSAGE_RAM] = &qhs_mesg_ram,
+ [SLAVE_CNOC_MSS] = &qhs_mss,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PKA_CORE] = &qhs_pka_wrapper,
+ [SLAVE_PMIC_ARB] = &qhs_pmic_arb,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QM_CFG] = &qhs_qm_cfg,
+ [SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
+ [SLAVE_QPIC] = &qhs_qpic,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_RPM] = &qhs_rpm,
+ [SLAVE_SDCC_1] = &qhs_sdc1,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static const struct regmap_config cnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x6200,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc sm6115_config_noc = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .regmap_cfg = &cnoc_regmap_config,
+ .intf_clocks = cnoc_intf_clocks,
+ .num_intf_clocks = ARRAY_SIZE(cnoc_intf_clocks),
+ .bus_clk_desc = &bus_1_clk,
+ .keep_alive = true,
+};
+
+static struct qcom_icc_node *sys_noc_nodes[] = {
+ [MASTER_CRYPTO_CORE0] = &crypto_c0,
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_TIC] = &qhm_tic,
+ [MASTER_ANOC_SNOC] = &mas_anoc_snoc,
+ [BIMC_SNOC_MAS] = &mas_bimc_snoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QPIC] = &qhm_qpic,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_1] = &xm_sdc1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_USB3] = &xm_usb3_0,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SNOC_CNOC_SLV] = &slv_snoc_cnoc,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SNOC_BIMC_SLV] = &slv_snoc_bimc,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+ [SLAVE_ANOC_SNOC] = &slv_anoc_snoc,
+};
+
+static const struct regmap_config sys_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5f080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc sm6115_sys_noc = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = sys_noc_nodes,
+ .num_nodes = ARRAY_SIZE(sys_noc_nodes),
+ .regmap_cfg = &sys_noc_regmap_config,
+ .intf_clocks = snoc_intf_clocks,
+ .num_intf_clocks = ARRAY_SIZE(snoc_intf_clocks),
+ .bus_clk_desc = &bus_2_clk,
+ .keep_alive = true,
+};
+
+static struct qcom_icc_node *clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+};
+
+static const struct qcom_icc_desc sm6115_clk_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .regmap_cfg = &sys_noc_regmap_config,
+ .bus_clk_desc = &qup_clk,
+ .keep_alive = true,
+};
+
+static struct qcom_icc_node *mmnrt_virt_nodes[] = {
+ [MASTER_CAMNOC_SF] = &qnm_camera_nrt,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_PROC] = &qxm_venus_cpu,
+ [SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt,
+};
+
+static const struct qcom_icc_desc sm6115_mmnrt_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = mmnrt_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mmnrt_virt_nodes),
+ .regmap_cfg = &sys_noc_regmap_config,
+ .bus_clk_desc = &mmaxi_0_clk,
+ .keep_alive = true,
+ .ab_coeff = 142,
+};
+
+static struct qcom_icc_node *mmrt_virt_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camera_rt,
+ [MASTER_MDP_PORT0] = &qxm_mdp0,
+ [SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
+};
+
+static const struct qcom_icc_desc sm6115_mmrt_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = mmrt_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mmrt_virt_nodes),
+ .regmap_cfg = &sys_noc_regmap_config,
+ .bus_clk_desc = &mmaxi_1_clk,
+ .keep_alive = true,
+ .ab_coeff = 139,
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm6115-bimc", .data = &sm6115_bimc },
+ { .compatible = "qcom,sm6115-clk-virt", .data = &sm6115_clk_virt },
+ { .compatible = "qcom,sm6115-cnoc", .data = &sm6115_config_noc },
+ { .compatible = "qcom,sm6115-mmrt-virt", .data = &sm6115_mmrt_virt },
+ { .compatible = "qcom,sm6115-mmnrt-virt", .data = &sm6115_mmnrt_virt },
+ { .compatible = "qcom,sm6115-snoc", .data = &sm6115_sys_noc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove_new = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sm6115",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("SM6115 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c
new file mode 100644
index 000000000..b962e6c23
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8650.c
@@ -0,0 +1,1674 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8650-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-common.h"
+#include "icc-rpmh.h"
+#include "sm8650.h"
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8650_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8650_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_qup02 = {
+ .name = "qxm_qup02",
+ .id = SM8650_MASTER_QUP_3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8650_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8650_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8650_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8650_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8650_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8650_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8650_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = SM8650_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .id = SM8650_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .id = SM8650_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8650_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM8650_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SM8650_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SM8650_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .id = SM8650_MASTER_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 46,
+ .links = { SM8650_SLAVE_AHB2PHY_SOUTH, SM8650_SLAVE_AHB2PHY_NORTH,
+ SM8650_SLAVE_CAMERA_CFG, SM8650_SLAVE_CLK_CTL,
+ SM8650_SLAVE_RBCPR_CX_CFG, SM8650_SLAVE_CPR_HMX,
+ SM8650_SLAVE_RBCPR_MMCX_CFG, SM8650_SLAVE_RBCPR_MXA_CFG,
+ SM8650_SLAVE_RBCPR_MXC_CFG, SM8650_SLAVE_CPR_NSPCX,
+ SM8650_SLAVE_CRYPTO_0_CFG, SM8650_SLAVE_CX_RDPM,
+ SM8650_SLAVE_DISPLAY_CFG, SM8650_SLAVE_GFX3D_CFG,
+ SM8650_SLAVE_I2C, SM8650_SLAVE_I3C_IBI0_CFG,
+ SM8650_SLAVE_I3C_IBI1_CFG, SM8650_SLAVE_IMEM_CFG,
+ SM8650_SLAVE_CNOC_MSS, SM8650_SLAVE_MX_2_RDPM,
+ SM8650_SLAVE_MX_RDPM, SM8650_SLAVE_PCIE_0_CFG,
+ SM8650_SLAVE_PCIE_1_CFG, SM8650_SLAVE_PCIE_RSCC,
+ SM8650_SLAVE_PDM, SM8650_SLAVE_PRNG,
+ SM8650_SLAVE_QDSS_CFG, SM8650_SLAVE_QSPI_0,
+ SM8650_SLAVE_QUP_3, SM8650_SLAVE_QUP_1,
+ SM8650_SLAVE_QUP_2, SM8650_SLAVE_SDCC_2,
+ SM8650_SLAVE_SDCC_4, SM8650_SLAVE_SPSS_CFG,
+ SM8650_SLAVE_TCSR, SM8650_SLAVE_TLMM,
+ SM8650_SLAVE_UFS_MEM_CFG, SM8650_SLAVE_USB3_0,
+ SM8650_SLAVE_VENUS_CFG, SM8650_SLAVE_VSENSE_CTRL_CFG,
+ SM8650_SLAVE_CNOC_MNOC_CFG, SM8650_SLAVE_NSP_QTB_CFG,
+ SM8650_SLAVE_PCIE_ANOC_CFG, SM8650_SLAVE_SERVICE_CNOC_CFG,
+ SM8650_SLAVE_QDSS_STM, SM8650_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SM8650_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 9,
+ .links = { SM8650_SLAVE_AOSS, SM8650_SLAVE_IPA_CFG,
+ SM8650_SLAVE_IPC_ROUTER_CFG, SM8650_SLAVE_TME_CFG,
+ SM8650_SLAVE_APPSS, SM8650_SLAVE_CNOC_CFG,
+ SM8650_SLAVE_DDRSS_CFG, SM8650_SLAVE_IMEM,
+ SM8650_SLAVE_SERVICE_CNOC },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SM8650_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM8650_SLAVE_PCIE_0, SM8650_SLAVE_PCIE_1 },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SM8650_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SM8650_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_ubwc_p_tcu = {
+ .name = "alm_ubwc_p_tcu",
+ .id = SM8650_MASTER_UBWC_P_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SM8650_MASTER_APPSS_PROC,
+ .channels = 3,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+ SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8650_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpass_gemnoc = {
+ .name = "qnm_lpass_gemnoc",
+ .id = SM8650_MASTER_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+ SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+ .name = "qnm_mdsp",
+ .id = SM8650_MASTER_MSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+ SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8650_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8650_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_gemnoc = {
+ .name = "qnm_nsp_gemnoc",
+ .id = SM8650_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+ SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8650_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8650_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
+ SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_ubwc_p = {
+ .name = "qnm_ubwc_p",
+ .id = SM8650_MASTER_UBWC_P,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8650_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .id = SM8650_MASTER_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .id = SM8650_MASTER_LPASS_LPINOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
+ .name = "qxm_lpinoc_dsp_axim",
+ .id = SM8650_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_LPICX_NOC_LPIAON_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8650_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SM8650_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .id = SM8650_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SM8650_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .id = SM8650_MASTER_MDP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .id = SM8650_MASTER_CDSP_HCP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video = {
+ .name = "qnm_video",
+ .id = SM8650_MASTER_VIDEO,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .id = SM8650_MASTER_VIDEO_CV_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SM8650_MASTER_VIDEO_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .id = SM8650_MASTER_VIDEO_V_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .id = SM8650_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_nsp = {
+ .name = "qnm_nsp",
+ .id = SM8650_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_pcie_anoc_cfg = {
+ .name = "qsm_pcie_anoc_cfg",
+ .id = SM8650_MASTER_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_SERVICE_PCIE_ANOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM8650_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SM8650_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8650_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8650_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8650_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8650_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM8650_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SM8650_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SM8650_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM8650_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SM8650_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8650_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8650_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM8650_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_hmx = {
+ .name = "qhs_cpr_hmx",
+ .id = SM8650_SLAVE_CPR_HMX,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SM8650_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxa = {
+ .name = "qhs_cpr_mxa",
+ .id = SM8650_SLAVE_RBCPR_MXA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxc = {
+ .name = "qhs_cpr_mxc",
+ .id = SM8650_SLAVE_RBCPR_MXC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_nspcx = {
+ .name = "qhs_cpr_nspcx",
+ .id = SM8650_SLAVE_CPR_NSPCX,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8650_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SM8650_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8650_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8650_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i2c = {
+ .name = "qhs_i2c",
+ .id = SM8650_SLAVE_I2C,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi0_cfg = {
+ .name = "qhs_i3c_ibi0_cfg",
+ .id = SM8650_SLAVE_I3C_IBI0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi1_cfg = {
+ .name = "qhs_i3c_ibi1_cfg",
+ .id = SM8650_SLAVE_I3C_IBI1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8650_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SM8650_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mx_2_rdpm = {
+ .name = "qhs_mx_2_rdpm",
+ .id = SM8650_SLAVE_MX_2_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SM8650_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SM8650_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SM8650_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie_rscc = {
+ .name = "qhs_pcie_rscc",
+ .id = SM8650_SLAVE_PCIE_RSCC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM8650_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM8650_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8650_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8650_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup02 = {
+ .name = "qhs_qup02",
+ .id = SM8650_SLAVE_QUP_3,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM8650_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SM8650_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8650_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8650_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SM8650_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8650_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SM8650_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8650_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8650_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8650_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8650_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .id = SM8650_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qss_nsp_qtb_cfg = {
+ .name = "qss_nsp_qtb_cfg",
+ .id = SM8650_SLAVE_NSP_QTB_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_pcie_anoc_cfg = {
+ .name = "qss_pcie_anoc_cfg",
+ .id = SM8650_SLAVE_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_MASTER_PCIE_ANOC_CFG },
+};
+
+static struct qcom_icc_node srvc_cnoc_cfg = {
+ .name = "srvc_cnoc_cfg",
+ .id = SM8650_SLAVE_SERVICE_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8650_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8650_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8650_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8650_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SM8650_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .id = SM8650_SLAVE_TME_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_apss = {
+ .name = "qss_apss",
+ .id = SM8650_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .id = SM8650_SLAVE_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8650_MASTER_CNOC_CFG },
+};
+
+static struct qcom_icc_node qss_ddrss_cfg = {
+ .name = "qss_ddrss_cfg",
+ .id = SM8650_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8650_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_cnoc_main = {
+ .name = "srvc_cnoc_main",
+ .id = SM8650_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SM8650_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SM8650_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SM8650_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8650_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SM8650_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8650_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .id = SM8650_SLAVE_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .id = SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_LPIAON_NOC },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .id = SM8650_SLAVE_LPICX_NOC_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_LPASS_LPINOC },
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8650_SLAVE_EBI1,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8650_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM8650_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8650_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SM8650_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8650_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8650_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_pcie_aggre_noc = {
+ .name = "srvc_pcie_aggre_noc",
+ .id = SM8650_SLAVE_SERVICE_PCIE_ANOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8650_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8650_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(0),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .enable_mask = BIT(0),
+ .keepalive = true,
+ .num_nodes = 59,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_cpr_cx,
+ &qhs_cpr_hmx, &qhs_cpr_mmcx,
+ &qhs_cpr_mxa, &qhs_cpr_mxc,
+ &qhs_cpr_nspcx, &qhs_crypto0_cfg,
+ &qhs_cx_rdpm, &qhs_display_cfg,
+ &qhs_gpuss_cfg, &qhs_i2c,
+ &qhs_i3c_ibi0_cfg, &qhs_i3c_ibi1_cfg,
+ &qhs_imem_cfg, &qhs_mss_cfg,
+ &qhs_mx_2_rdpm, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pcie_rscc, &qhs_pdm,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup02,
+ &qhs_qup1, &qhs_qup2,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_spss_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_ufs_mem_cfg,
+ &qhs_usb3_0, &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_anoc_cfg,
+ &srvc_cnoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg, &qnm_gemnoc_cnoc,
+ &qnm_gemnoc_pcie, &qhs_aoss,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_tme_cfg, &qss_apss,
+ &qss_cfg, &qss_ddrss_cfg,
+ &qxs_imem, &srvc_cnoc_main,
+ &xs_pcie_0, &xs_pcie_1 },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .enable_mask = BIT(0),
+ .num_nodes = 2,
+ .nodes = { &qnm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .enable_mask = BIT(0),
+ .num_nodes = 8,
+ .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
+ &qnm_camnoc_sf, &qnm_vapss_hcp,
+ &qnm_video_cv_cpu, &qnm_video_cvp,
+ &qnm_video_v_cpu, &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .enable_mask = BIT(0),
+ .num_nodes = 15,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+ &alm_ubwc_p_tcu, &chm_apps,
+ &qnm_gpu, &qnm_mdsp,
+ &qnm_mnoc_hf, &qnm_mnoc_sf,
+ &qnm_nsp_gemnoc, &qnm_pcie,
+ &qnm_snoc_sf, &qnm_ubwc_p,
+ &xm_gic, &qns_gem_noc_cnoc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_3] = &qxm_qup02,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8650_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8650_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sm8650_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_CPR_HMX] = &qhs_cpr_hmx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa,
+ [SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc,
+ [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_I2C] = &qhs_i2c,
+ [SLAVE_I3C_IBI0_CFG] = &qhs_i3c_ibi0_cfg,
+ [SLAVE_I3C_IBI1_CFG] = &qhs_i3c_ibi1_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_MX_2_RDPM] = &qhs_mx_2_rdpm,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_RSCC] = &qhs_pcie_rscc,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_3] = &qhs_qup02,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qss_pcie_anoc_cfg,
+ [SLAVE_SERVICE_CNOC_CFG] = &srvc_cnoc_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sm8650_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_APPSS] = &qss_apss,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_DDRSS_CFG] = &qss_ddrss_cfg,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc_main,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+};
+
+static const struct qcom_icc_desc sm8650_cnoc_main = {
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_UBWC_P_TCU] = &alm_ubwc_p_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass_gemnoc,
+ [MASTER_MSS_PROC] = &qnm_mdsp,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_UBWC_P] = &qnm_ubwc_p,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+};
+
+static const struct qcom_icc_desc sm8650_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8650_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct qcom_icc_desc sm8650_lpass_lpiaon_noc = {
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qxm_lpinoc_dsp_axim,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct qcom_icc_desc sm8650_lpass_lpicx_noc = {
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sm8650_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO] = &qnm_video,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sm8650_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qnm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8650_nsp_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
+ [MASTER_PCIE_ANOC_CFG] = &qsm_pcie_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
+};
+
+static const struct qcom_icc_desc sm8650_pcie_anoc = {
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn2,
+ &bcm_sn3,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct qcom_icc_desc sm8650_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8650-aggre1-noc", .data = &sm8650_aggre1_noc },
+ { .compatible = "qcom,sm8650-aggre2-noc", .data = &sm8650_aggre2_noc },
+ { .compatible = "qcom,sm8650-clk-virt", .data = &sm8650_clk_virt },
+ { .compatible = "qcom,sm8650-config-noc", .data = &sm8650_config_noc },
+ { .compatible = "qcom,sm8650-cnoc-main", .data = &sm8650_cnoc_main },
+ { .compatible = "qcom,sm8650-gem-noc", .data = &sm8650_gem_noc },
+ { .compatible = "qcom,sm8650-lpass-ag-noc", .data = &sm8650_lpass_ag_noc },
+ { .compatible = "qcom,sm8650-lpass-lpiaon-noc", .data = &sm8650_lpass_lpiaon_noc },
+ { .compatible = "qcom,sm8650-lpass-lpicx-noc", .data = &sm8650_lpass_lpicx_noc },
+ { .compatible = "qcom,sm8650-mc-virt", .data = &sm8650_mc_virt },
+ { .compatible = "qcom,sm8650-mmss-noc", .data = &sm8650_mmss_noc },
+ { .compatible = "qcom,sm8650-nsp-noc", .data = &sm8650_nsp_noc },
+ { .compatible = "qcom,sm8650-pcie-anoc", .data = &sm8650_pcie_anoc },
+ { .compatible = "qcom,sm8650-system-noc", .data = &sm8650_system_noc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove_new = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sm8650",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("sm8650 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sm8650.h b/drivers/interconnect/qcom/sm8650.h
new file mode 100644
index 000000000..de35c956f
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8650.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SM8650 interconnect IDs
+ *
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8650_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8650_H
+
+#define SM8650_MASTER_A1NOC_SNOC 0
+#define SM8650_MASTER_A2NOC_SNOC 1
+#define SM8650_MASTER_ANOC_PCIE_GEM_NOC 2
+#define SM8650_MASTER_APPSS_PROC 3
+#define SM8650_MASTER_CAMNOC_HF 4
+#define SM8650_MASTER_CAMNOC_ICP 5
+#define SM8650_MASTER_CAMNOC_SF 6
+#define SM8650_MASTER_CDSP_HCP 7
+#define SM8650_MASTER_CDSP_PROC 8
+#define SM8650_MASTER_CNOC_CFG 9
+#define SM8650_MASTER_CNOC_MNOC_CFG 10
+#define SM8650_MASTER_COMPUTE_NOC 11
+#define SM8650_MASTER_CRYPTO 12
+#define SM8650_MASTER_GEM_NOC_CNOC 13
+#define SM8650_MASTER_GEM_NOC_PCIE_SNOC 14
+#define SM8650_MASTER_GFX3D 15
+#define SM8650_MASTER_GIC 16
+#define SM8650_MASTER_GPU_TCU 17
+#define SM8650_MASTER_IPA 18
+#define SM8650_MASTER_LLCC 19
+#define SM8650_MASTER_LPASS_GEM_NOC 20
+#define SM8650_MASTER_LPASS_LPINOC 21
+#define SM8650_MASTER_LPASS_PROC 22
+#define SM8650_MASTER_LPIAON_NOC 23
+#define SM8650_MASTER_MDP 24
+#define SM8650_MASTER_MNOC_HF_MEM_NOC 25
+#define SM8650_MASTER_MNOC_SF_MEM_NOC 26
+#define SM8650_MASTER_MSS_PROC 27
+#define SM8650_MASTER_PCIE_0 28
+#define SM8650_MASTER_PCIE_1 29
+#define SM8650_MASTER_PCIE_ANOC_CFG 30
+#define SM8650_MASTER_QDSS_BAM 31
+#define SM8650_MASTER_QDSS_ETR 32
+#define SM8650_MASTER_QDSS_ETR_1 33
+#define SM8650_MASTER_QSPI_0 34
+#define SM8650_MASTER_QUP_1 35
+#define SM8650_MASTER_QUP_2 36
+#define SM8650_MASTER_QUP_3 37
+#define SM8650_MASTER_QUP_CORE_0 38
+#define SM8650_MASTER_QUP_CORE_1 39
+#define SM8650_MASTER_QUP_CORE_2 40
+#define SM8650_MASTER_SDCC_2 41
+#define SM8650_MASTER_SDCC_4 42
+#define SM8650_MASTER_SNOC_SF_MEM_NOC 43
+#define SM8650_MASTER_SP 44
+#define SM8650_MASTER_SYS_TCU 45
+#define SM8650_MASTER_UBWC_P 46
+#define SM8650_MASTER_UBWC_P_TCU 47
+#define SM8650_MASTER_UFS_MEM 48
+#define SM8650_MASTER_USB3_0 49
+#define SM8650_MASTER_VIDEO 50
+#define SM8650_MASTER_VIDEO_CV_PROC 51
+#define SM8650_MASTER_VIDEO_PROC 52
+#define SM8650_MASTER_VIDEO_V_PROC 53
+#define SM8650_SLAVE_A1NOC_SNOC 54
+#define SM8650_SLAVE_A2NOC_SNOC 55
+#define SM8650_SLAVE_AHB2PHY_NORTH 56
+#define SM8650_SLAVE_AHB2PHY_SOUTH 57
+#define SM8650_SLAVE_ANOC_PCIE_GEM_NOC 58
+#define SM8650_SLAVE_AOSS 59
+#define SM8650_SLAVE_APPSS 60
+#define SM8650_SLAVE_CAMERA_CFG 61
+#define SM8650_SLAVE_CDSP_MEM_NOC 62
+#define SM8650_SLAVE_CLK_CTL 63
+#define SM8650_SLAVE_CNOC_CFG 64
+#define SM8650_SLAVE_CNOC_MNOC_CFG 65
+#define SM8650_SLAVE_CNOC_MSS 66
+#define SM8650_SLAVE_CPR_HMX 67
+#define SM8650_SLAVE_CPR_NSPCX 68
+#define SM8650_SLAVE_CRYPTO_0_CFG 69
+#define SM8650_SLAVE_CX_RDPM 70
+#define SM8650_SLAVE_DDRSS_CFG 71
+#define SM8650_SLAVE_DISPLAY_CFG 72
+#define SM8650_SLAVE_EBI1 73
+#define SM8650_SLAVE_GEM_NOC_CNOC 74
+#define SM8650_SLAVE_GFX3D_CFG 75
+#define SM8650_SLAVE_I2C 76
+#define SM8650_SLAVE_I3C_IBI0_CFG 77
+#define SM8650_SLAVE_I3C_IBI1_CFG 78
+#define SM8650_SLAVE_IMEM 79
+#define SM8650_SLAVE_IMEM_CFG 80
+#define SM8650_SLAVE_IPA_CFG 81
+#define SM8650_SLAVE_IPC_ROUTER_CFG 82
+#define SM8650_SLAVE_LLCC 83
+#define SM8650_SLAVE_LPASS_GEM_NOC 84
+#define SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC 85
+#define SM8650_SLAVE_LPICX_NOC_LPIAON_NOC 86
+#define SM8650_SLAVE_MEM_NOC_PCIE_SNOC 87
+#define SM8650_SLAVE_MNOC_HF_MEM_NOC 88
+#define SM8650_SLAVE_MNOC_SF_MEM_NOC 89
+#define SM8650_SLAVE_MX_2_RDPM 90
+#define SM8650_SLAVE_MX_RDPM 91
+#define SM8650_SLAVE_NSP_QTB_CFG 92
+#define SM8650_SLAVE_PCIE_0 93
+#define SM8650_SLAVE_PCIE_1 94
+#define SM8650_SLAVE_PCIE_0_CFG 95
+#define SM8650_SLAVE_PCIE_1_CFG 96
+#define SM8650_SLAVE_PCIE_ANOC_CFG 97
+#define SM8650_SLAVE_PCIE_RSCC 98
+#define SM8650_SLAVE_PDM 99
+#define SM8650_SLAVE_PRNG 100
+#define SM8650_SLAVE_QDSS_CFG 101
+#define SM8650_SLAVE_QDSS_STM 102
+#define SM8650_SLAVE_QSPI_0 103
+#define SM8650_SLAVE_QUP_1 104
+#define SM8650_SLAVE_QUP_2 105
+#define SM8650_SLAVE_QUP_3 106
+#define SM8650_SLAVE_QUP_CORE_0 107
+#define SM8650_SLAVE_QUP_CORE_1 108
+#define SM8650_SLAVE_QUP_CORE_2 109
+#define SM8650_SLAVE_RBCPR_CX_CFG 110
+#define SM8650_SLAVE_RBCPR_MMCX_CFG 111
+#define SM8650_SLAVE_RBCPR_MXA_CFG 112
+#define SM8650_SLAVE_RBCPR_MXC_CFG 113
+#define SM8650_SLAVE_SDCC_2 114
+#define SM8650_SLAVE_SDCC_4 115
+#define SM8650_SLAVE_SERVICE_CNOC 116
+#define SM8650_SLAVE_SERVICE_CNOC_CFG 117
+#define SM8650_SLAVE_SERVICE_MNOC 118
+#define SM8650_SLAVE_SERVICE_PCIE_ANOC 119
+#define SM8650_SLAVE_SNOC_GEM_NOC_SF 120
+#define SM8650_SLAVE_SPSS_CFG 121
+#define SM8650_SLAVE_TCSR 122
+#define SM8650_SLAVE_TCU 123
+#define SM8650_SLAVE_TLMM 124
+#define SM8650_SLAVE_TME_CFG 125
+#define SM8650_SLAVE_UFS_MEM_CFG 126
+#define SM8650_SLAVE_USB3_0 127
+#define SM8650_SLAVE_VENUS_CFG 128
+#define SM8650_SLAVE_VSENSE_CTRL_CFG 129
+
+#endif
diff --git a/drivers/interconnect/qcom/smd-rpm.c b/drivers/interconnect/qcom/smd-rpm.c
index 16a145a3c..3816bfb4e 100644
--- a/drivers/interconnect/qcom/smd-rpm.c
+++ b/drivers/interconnect/qcom/smd-rpm.c
@@ -63,11 +63,9 @@ int qcom_icc_rpm_set_bus_rate(const struct rpm_clk_resource *clk, int ctx, u32 r
}
EXPORT_SYMBOL_GPL(qcom_icc_rpm_set_bus_rate);
-static int qcom_icc_rpm_smd_remove(struct platform_device *pdev)
+static void qcom_icc_rpm_smd_remove(struct platform_device *pdev)
{
icc_smd_rpm = NULL;
-
- return 0;
}
static int qcom_icc_rpm_smd_probe(struct platform_device *pdev)
@@ -87,7 +85,7 @@ static struct platform_driver qcom_interconnect_rpm_smd_driver = {
.name = "icc_smd_rpm",
},
.probe = qcom_icc_rpm_smd_probe,
- .remove = qcom_icc_rpm_smd_remove,
+ .remove_new = qcom_icc_rpm_smd_remove,
};
module_platform_driver(qcom_interconnect_rpm_smd_driver);
MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
new file mode 100644
index 000000000..06f0a6d6c
--- /dev/null
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -0,0 +1,2303 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,x1e80100-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-common.h"
+#include "icc-rpmh.h"
+#include "x1e80100.h"
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = X1E80100_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = X1E80100_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = X1E80100_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = X1E80100_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = X1E80100_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = X1E80100_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = X1E80100_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = X1E80100_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .id = X1E80100_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .id = X1E80100_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = X1E80100_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = X1E80100_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = X1E80100_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = X1E80100_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .id = X1E80100_MASTER_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 47,
+ .links = { X1E80100_SLAVE_AHB2PHY_SOUTH, X1E80100_SLAVE_AHB2PHY_NORTH,
+ X1E80100_SLAVE_AHB2PHY_2, X1E80100_SLAVE_AV1_ENC_CFG,
+ X1E80100_SLAVE_CAMERA_CFG, X1E80100_SLAVE_CLK_CTL,
+ X1E80100_SLAVE_CRYPTO_0_CFG, X1E80100_SLAVE_DISPLAY_CFG,
+ X1E80100_SLAVE_GFX3D_CFG, X1E80100_SLAVE_IMEM_CFG,
+ X1E80100_SLAVE_IPC_ROUTER_CFG, X1E80100_SLAVE_PCIE_0_CFG,
+ X1E80100_SLAVE_PCIE_1_CFG, X1E80100_SLAVE_PCIE_2_CFG,
+ X1E80100_SLAVE_PCIE_3_CFG, X1E80100_SLAVE_PCIE_4_CFG,
+ X1E80100_SLAVE_PCIE_5_CFG, X1E80100_SLAVE_PCIE_6A_CFG,
+ X1E80100_SLAVE_PCIE_6B_CFG, X1E80100_SLAVE_PCIE_RSC_CFG,
+ X1E80100_SLAVE_PDM, X1E80100_SLAVE_PRNG,
+ X1E80100_SLAVE_QDSS_CFG, X1E80100_SLAVE_QSPI_0,
+ X1E80100_SLAVE_QUP_0, X1E80100_SLAVE_QUP_1,
+ X1E80100_SLAVE_QUP_2, X1E80100_SLAVE_SDCC_2,
+ X1E80100_SLAVE_SDCC_4, X1E80100_SLAVE_SMMUV3_CFG,
+ X1E80100_SLAVE_TCSR, X1E80100_SLAVE_TLMM,
+ X1E80100_SLAVE_UFS_MEM_CFG, X1E80100_SLAVE_USB2,
+ X1E80100_SLAVE_USB3_0, X1E80100_SLAVE_USB3_1,
+ X1E80100_SLAVE_USB3_2, X1E80100_SLAVE_USB3_MP,
+ X1E80100_SLAVE_USB4_0, X1E80100_SLAVE_USB4_1,
+ X1E80100_SLAVE_USB4_2, X1E80100_SLAVE_VENUS_CFG,
+ X1E80100_SLAVE_LPASS_QTB_CFG, X1E80100_SLAVE_CNOC_MNOC_CFG,
+ X1E80100_SLAVE_NSP_QTB_CFG, X1E80100_SLAVE_QDSS_STM,
+ X1E80100_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = X1E80100_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { X1E80100_SLAVE_AOSS, X1E80100_SLAVE_TME_CFG,
+ X1E80100_SLAVE_APPSS, X1E80100_SLAVE_CNOC_CFG,
+ X1E80100_SLAVE_BOOT_IMEM, X1E80100_SLAVE_IMEM },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = X1E80100_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 8,
+ .links = { X1E80100_SLAVE_PCIE_0, X1E80100_SLAVE_PCIE_1,
+ X1E80100_SLAVE_PCIE_2, X1E80100_SLAVE_PCIE_3,
+ X1E80100_SLAVE_PCIE_4, X1E80100_SLAVE_PCIE_5,
+ X1E80100_SLAVE_PCIE_6A, X1E80100_SLAVE_PCIE_6B },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = X1E80100_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_pcie_tcu = {
+ .name = "alm_pcie_tcu",
+ .id = X1E80100_MASTER_PCIE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = X1E80100_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = X1E80100_MASTER_APPSS_PROC,
+ .channels = 6,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
+ X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = X1E80100_MASTER_GFX3D,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpass = {
+ .name = "qnm_lpass",
+ .id = X1E80100_MASTER_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
+ X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = X1E80100_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = X1E80100_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_noc = {
+ .name = "qnm_nsp_noc",
+ .id = X1E80100_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
+ X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 2,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = X1E80100_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 3,
+ .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
+ X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = X1E80100_MASTER_GIC2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .id = X1E80100_MASTER_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .id = X1E80100_MASTER_LPASS_LPINOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
+ .name = "qxm_lpinoc_dsp_axim",
+ .id = X1E80100_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = X1E80100_MASTER_LLCC,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_av1_enc = {
+ .name = "qnm_av1_enc",
+ .id = X1E80100_MASTER_AV1_ENC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = X1E80100_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .id = X1E80100_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = X1E80100_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_eva = {
+ .name = "qnm_eva",
+ .id = X1E80100_MASTER_EVA,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .id = X1E80100_MASTER_MDP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video = {
+ .name = "qnm_video",
+ .id = X1E80100_MASTER_VIDEO,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .id = X1E80100_MASTER_VIDEO_CV_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .id = X1E80100_MASTER_VIDEO_V_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .id = X1E80100_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = X1E80100_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_pcie_north_gem_noc = {
+ .name = "qnm_pcie_north_gem_noc",
+ .id = X1E80100_MASTER_PCIE_NORTH,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_pcie_south_gem_noc = {
+ .name = "qnm_pcie_south_gem_noc",
+ .id = X1E80100_MASTER_PCIE_SOUTH,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie_3 = {
+ .name = "xm_pcie_3",
+ .id = X1E80100_MASTER_PCIE_3,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_NORTH },
+};
+
+static struct qcom_icc_node xm_pcie_4 = {
+ .name = "xm_pcie_4",
+ .id = X1E80100_MASTER_PCIE_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_NORTH },
+};
+
+static struct qcom_icc_node xm_pcie_5 = {
+ .name = "xm_pcie_5",
+ .id = X1E80100_MASTER_PCIE_5,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_NORTH },
+};
+
+static struct qcom_icc_node xm_pcie_0 = {
+ .name = "xm_pcie_0",
+ .id = X1E80100_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node xm_pcie_1 = {
+ .name = "xm_pcie_1",
+ .id = X1E80100_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node xm_pcie_2 = {
+ .name = "xm_pcie_2",
+ .id = X1E80100_MASTER_PCIE_2,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node xm_pcie_6a = {
+ .name = "xm_pcie_6a",
+ .id = X1E80100_MASTER_PCIE_6A,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node xm_pcie_6b = {
+ .name = "xm_pcie_6b",
+ .id = X1E80100_MASTER_PCIE_6B,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = X1E80100_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = X1E80100_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_gic = {
+ .name = "qnm_gic",
+ .id = X1E80100_MASTER_GIC1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_usb_anoc = {
+ .name = "qnm_usb_anoc",
+ .id = X1E80100_MASTER_USB_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre_usb_north_snoc = {
+ .name = "qnm_aggre_usb_north_snoc",
+ .id = X1E80100_MASTER_AGGRE_USB_NORTH,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre_usb_south_snoc = {
+ .name = "qnm_aggre_usb_south_snoc",
+ .id = X1E80100_MASTER_AGGRE_USB_SOUTH,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb2_0 = {
+ .name = "xm_usb2_0",
+ .id = X1E80100_MASTER_USB2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_AGGRE_USB_NORTH },
+};
+
+static struct qcom_icc_node xm_usb3_mp = {
+ .name = "xm_usb3_mp",
+ .id = X1E80100_MASTER_USB3_MP,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_AGGRE_USB_NORTH },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = X1E80100_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .id = X1E80100_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb3_2 = {
+ .name = "xm_usb3_2",
+ .id = X1E80100_MASTER_USB3_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb4_0 = {
+ .name = "xm_usb4_0",
+ .id = X1E80100_MASTER_USB4_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb4_1 = {
+ .name = "xm_usb4_1",
+ .id = X1E80100_MASTER_USB4_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node xm_usb4_2 = {
+ .name = "xm_usb4_2",
+ .id = X1E80100_MASTER_USB4_2,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_disp = {
+ .name = "qnm_mnoc_hf_disp",
+ .id = X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node qnm_pcie_disp = {
+ .name = "qnm_pcie_disp",
+ .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node llcc_mc_disp = {
+ .name = "llcc_mc_disp",
+ .id = X1E80100_MASTER_LLCC_DISP,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_EBI1_DISP },
+};
+
+static struct qcom_icc_node qnm_mdp_disp = {
+ .name = "qnm_mdp_disp",
+ .id = X1E80100_MASTER_MDP_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qnm_pcie_pcie = {
+ .name = "qnm_pcie_pcie",
+ .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_LLCC_PCIE },
+};
+
+static struct qcom_icc_node llcc_mc_pcie = {
+ .name = "llcc_mc_pcie",
+ .id = X1E80100_MASTER_LLCC_PCIE,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_EBI1_PCIE },
+};
+
+static struct qcom_icc_node qnm_pcie_north_gem_noc_pcie = {
+ .name = "qnm_pcie_north_gem_noc_pcie",
+ .id = X1E80100_MASTER_PCIE_NORTH_PCIE,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE },
+};
+
+static struct qcom_icc_node qnm_pcie_south_gem_noc_pcie = {
+ .name = "qnm_pcie_south_gem_noc_pcie",
+ .id = X1E80100_MASTER_PCIE_SOUTH_PCIE,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_3_pcie = {
+ .name = "xm_pcie_3_pcie",
+ .id = X1E80100_MASTER_PCIE_3_PCIE,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_4_pcie = {
+ .name = "xm_pcie_4_pcie",
+ .id = X1E80100_MASTER_PCIE_4_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_5_pcie = {
+ .name = "xm_pcie_5_pcie",
+ .id = X1E80100_MASTER_PCIE_5_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_0_pcie = {
+ .name = "xm_pcie_0_pcie",
+ .id = X1E80100_MASTER_PCIE_0_PCIE,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_1_pcie = {
+ .name = "xm_pcie_1_pcie",
+ .id = X1E80100_MASTER_PCIE_1_PCIE,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_2_pcie = {
+ .name = "xm_pcie_2_pcie",
+ .id = X1E80100_MASTER_PCIE_2_PCIE,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_6a_pcie = {
+ .name = "xm_pcie_6a_pcie",
+ .id = X1E80100_MASTER_PCIE_6A_PCIE,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node xm_pcie_6b_pcie = {
+ .name = "xm_pcie_6b_pcie",
+ .id = X1E80100_MASTER_PCIE_6B_PCIE,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = X1E80100_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = X1E80100_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = X1E80100_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = X1E80100_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = X1E80100_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = X1E80100_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = X1E80100_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .id = X1E80100_SLAVE_AHB2PHY_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_av1_enc_cfg = {
+ .name = "qhs_av1_enc_cfg",
+ .id = X1E80100_SLAVE_AV1_ENC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = X1E80100_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = X1E80100_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = X1E80100_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = X1E80100_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = X1E80100_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = X1E80100_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = X1E80100_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = X1E80100_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = X1E80100_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie2_cfg = {
+ .name = "qhs_pcie2_cfg",
+ .id = X1E80100_SLAVE_PCIE_2_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie3_cfg = {
+ .name = "qhs_pcie3_cfg",
+ .id = X1E80100_SLAVE_PCIE_3_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie4_cfg = {
+ .name = "qhs_pcie4_cfg",
+ .id = X1E80100_SLAVE_PCIE_4_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie5_cfg = {
+ .name = "qhs_pcie5_cfg",
+ .id = X1E80100_SLAVE_PCIE_5_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie6a_cfg = {
+ .name = "qhs_pcie6a_cfg",
+ .id = X1E80100_SLAVE_PCIE_6A_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie6b_cfg = {
+ .name = "qhs_pcie6b_cfg",
+ .id = X1E80100_SLAVE_PCIE_6B_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie_rsc_cfg = {
+ .name = "qhs_pcie_rsc_cfg",
+ .id = X1E80100_SLAVE_PCIE_RSC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = X1E80100_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = X1E80100_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = X1E80100_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = X1E80100_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = X1E80100_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = X1E80100_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = X1E80100_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = X1E80100_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = X1E80100_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_smmuv3_cfg = {
+ .name = "qhs_smmuv3_cfg",
+ .id = X1E80100_SLAVE_SMMUV3_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = X1E80100_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = X1E80100_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = X1E80100_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb2_0_cfg = {
+ .name = "qhs_usb2_0_cfg",
+ .id = X1E80100_SLAVE_USB2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0_cfg = {
+ .name = "qhs_usb3_0_cfg",
+ .id = X1E80100_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_1_cfg = {
+ .name = "qhs_usb3_1_cfg",
+ .id = X1E80100_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_2_cfg = {
+ .name = "qhs_usb3_2_cfg",
+ .id = X1E80100_SLAVE_USB3_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_mp_cfg = {
+ .name = "qhs_usb3_mp_cfg",
+ .id = X1E80100_SLAVE_USB3_MP,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb4_0_cfg = {
+ .name = "qhs_usb4_0_cfg",
+ .id = X1E80100_SLAVE_USB4_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb4_1_cfg = {
+ .name = "qhs_usb4_1_cfg",
+ .id = X1E80100_SLAVE_USB4_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb4_2_cfg = {
+ .name = "qhs_usb4_2_cfg",
+ .id = X1E80100_SLAVE_USB4_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = X1E80100_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_lpass_qtb_cfg = {
+ .name = "qss_lpass_qtb_cfg",
+ .id = X1E80100_SLAVE_LPASS_QTB_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .id = X1E80100_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qss_nsp_qtb_cfg = {
+ .name = "qss_nsp_qtb_cfg",
+ .id = X1E80100_SLAVE_NSP_QTB_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = X1E80100_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = X1E80100_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = X1E80100_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .id = X1E80100_SLAVE_TME_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_apss = {
+ .name = "qns_apss",
+ .id = X1E80100_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .id = X1E80100_SLAVE_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_CNOC_CFG },
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .id = X1E80100_SLAVE_BOOT_IMEM,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = X1E80100_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = X1E80100_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = X1E80100_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_2 = {
+ .name = "xs_pcie_2",
+ .id = X1E80100_SLAVE_PCIE_2,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_3 = {
+ .name = "xs_pcie_3",
+ .id = X1E80100_SLAVE_PCIE_3,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_4 = {
+ .name = "xs_pcie_4",
+ .id = X1E80100_SLAVE_PCIE_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_5 = {
+ .name = "xs_pcie_5",
+ .id = X1E80100_SLAVE_PCIE_5,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_6a = {
+ .name = "xs_pcie_6a",
+ .id = X1E80100_SLAVE_PCIE_6A,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_6b = {
+ .name = "xs_pcie_6b",
+ .id = X1E80100_SLAVE_PCIE_6B,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = X1E80100_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = X1E80100_SLAVE_LLCC,
+ .channels = 8,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = X1E80100_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .id = X1E80100_SLAVE_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .id = X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_LPIAON_NOC },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .id = X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_LPASS_LPINOC },
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = X1E80100_SLAVE_EBI1,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = X1E80100_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = X1E80100_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = X1E80100_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_north_gem_noc = {
+ .name = "qns_pcie_north_gem_noc",
+ .id = X1E80100_SLAVE_PCIE_NORTH,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_PCIE_NORTH },
+};
+
+static struct qcom_icc_node qns_pcie_south_gem_noc = {
+ .name = "qns_pcie_south_gem_noc",
+ .id = X1E80100_SLAVE_PCIE_SOUTH,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_PCIE_SOUTH },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = X1E80100_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_aggre_usb_snoc = {
+ .name = "qns_aggre_usb_snoc",
+ .id = X1E80100_SLAVE_USB_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_USB_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_aggre_usb_north_snoc = {
+ .name = "qns_aggre_usb_north_snoc",
+ .id = X1E80100_SLAVE_AGGRE_USB_NORTH,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_AGGRE_USB_NORTH },
+};
+
+static struct qcom_icc_node qns_aggre_usb_south_snoc = {
+ .name = "qns_aggre_usb_south_snoc",
+ .id = X1E80100_SLAVE_AGGRE_USB_SOUTH,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_AGGRE_USB_SOUTH },
+};
+
+static struct qcom_icc_node qns_llcc_disp = {
+ .name = "qns_llcc_disp",
+ .id = X1E80100_SLAVE_LLCC_DISP,
+ .channels = 8,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_LLCC_DISP },
+};
+
+static struct qcom_icc_node ebi_disp = {
+ .name = "ebi_disp",
+ .id = X1E80100_SLAVE_EBI1_DISP,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_disp = {
+ .name = "qns_mem_noc_hf_disp",
+ .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_llcc_pcie = {
+ .name = "qns_llcc_pcie",
+ .id = X1E80100_SLAVE_LLCC_PCIE,
+ .channels = 8,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_LLCC_PCIE },
+};
+
+static struct qcom_icc_node ebi_pcie = {
+ .name = "ebi_pcie",
+ .id = X1E80100_SLAVE_EBI1_PCIE,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc_pcie = {
+ .name = "qns_pcie_mem_noc_pcie",
+ .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE },
+};
+
+static struct qcom_icc_node qns_pcie_north_gem_noc_pcie = {
+ .name = "qns_pcie_north_gem_noc_pcie",
+ .id = X1E80100_SLAVE_PCIE_NORTH_PCIE,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_PCIE_NORTH_PCIE },
+};
+
+static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = {
+ .name = "qns_pcie_south_gem_noc_pcie",
+ .id = X1E80100_SLAVE_PCIE_SOUTH_PCIE,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { X1E80100_MASTER_PCIE_SOUTH_PCIE },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 63,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_ahb2phy2,
+ &qhs_av1_enc_cfg, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_crypto0_cfg,
+ &qhs_gpuss_cfg, &qhs_imem_cfg,
+ &qhs_ipc_router, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+ &qhs_pcie3_cfg, &qhs_pcie4_cfg,
+ &qhs_pcie5_cfg, &qhs_pcie6a_cfg,
+ &qhs_pcie6b_cfg, &qhs_pcie_rsc_cfg,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_smmuv3_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+ &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+ &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+ &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+ &qhs_usb4_2_cfg, &qhs_venus_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg, &qnm_gemnoc_cnoc,
+ &qnm_gemnoc_pcie, &qhs_aoss,
+ &qhs_tme_cfg, &qns_apss,
+ &qss_cfg, &qxs_boot_imem,
+ &qxs_imem, &xs_pcie_0,
+ &xs_pcie_1, &xs_pcie_2,
+ &xs_pcie_3, &xs_pcie_4,
+ &xs_pcie_5, &xs_pcie_6a,
+ &xs_pcie_6b },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 1,
+ .nodes = { &qhs_display_cfg },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .num_nodes = 2,
+ .nodes = { &qxm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 10,
+ .nodes = { &qnm_av1_enc, &qnm_camnoc_hf,
+ &qnm_camnoc_icp, &qnm_camnoc_sf,
+ &qnm_eva, &qnm_mdp,
+ &qnm_video, &qnm_video_cv_cpu,
+ &qnm_video_v_cpu, &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_pc0 = {
+ .name = "PC0",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .num_nodes = 13,
+ .nodes = { &alm_gpu_tcu, &alm_pcie_tcu,
+ &alm_sys_tcu, &chm_apps,
+ &qnm_gpu, &qnm_lpass,
+ &qnm_mnoc_hf, &qnm_mnoc_sf,
+ &qnm_nsp_noc, &qnm_pcie,
+ &xm_gic, &qns_gem_noc_cnoc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qnm_usb_anoc },
+};
+
+static struct qcom_icc_bcm bcm_acv_disp = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mc0_disp = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm0_disp = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm1_disp = {
+ .name = "MM1",
+ .num_nodes = 1,
+ .nodes = { &qnm_mdp_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh0_disp = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh1_disp = {
+ .name = "SH1",
+ .num_nodes = 2,
+ .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
+};
+
+static struct qcom_icc_bcm bcm_acv_pcie = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi_pcie },
+};
+
+static struct qcom_icc_bcm bcm_mc0_pcie = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi_pcie },
+};
+
+static struct qcom_icc_bcm bcm_pc0_pcie = {
+ .name = "PC0",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sh0_pcie = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sh1_pcie = {
+ .name = "SH1",
+ .num_nodes = 1,
+ .nodes = { &qnm_pcie_pcie },
+};
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc x1e80100_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_cfg_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const cnoc_cfg_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+ [SLAVE_AV1_ENC_CFG] = &qhs_av1_enc_cfg,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_2_CFG] = &qhs_pcie2_cfg,
+ [SLAVE_PCIE_3_CFG] = &qhs_pcie3_cfg,
+ [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
+ [SLAVE_PCIE_5_CFG] = &qhs_pcie5_cfg,
+ [SLAVE_PCIE_6A_CFG] = &qhs_pcie6a_cfg,
+ [SLAVE_PCIE_6B_CFG] = &qhs_pcie6b_cfg,
+ [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB2] = &qhs_usb2_0_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0_cfg,
+ [SLAVE_USB3_1] = &qhs_usb3_1_cfg,
+ [SLAVE_USB3_2] = &qhs_usb3_2_cfg,
+ [SLAVE_USB3_MP] = &qhs_usb3_mp_cfg,
+ [SLAVE_USB4_0] = &qhs_usb4_0_cfg,
+ [SLAVE_USB4_1] = &qhs_usb4_1_cfg,
+ [SLAVE_USB4_2] = &qhs_usb4_2_cfg,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_LPASS_QTB_CFG] = &qss_lpass_qtb_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc x1e80100_cnoc_cfg = {
+ .nodes = cnoc_cfg_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_cfg_nodes),
+ .bcms = cnoc_cfg_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_cfg_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_APPSS] = &qns_apss,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_PCIE_2] = &xs_pcie_2,
+ [SLAVE_PCIE_3] = &xs_pcie_3,
+ [SLAVE_PCIE_4] = &xs_pcie_4,
+ [SLAVE_PCIE_5] = &xs_pcie_5,
+ [SLAVE_PCIE_6A] = &xs_pcie_6a,
+ [SLAVE_PCIE_6B] = &xs_pcie_6b,
+};
+
+static const struct qcom_icc_desc x1e80100_cnoc_main = {
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_sh0_disp,
+ &bcm_sh1_disp,
+ &bcm_sh0_pcie,
+ &bcm_sh1_pcie,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_PCIE_TCU] = &alm_pcie_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_noc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GIC2] = &xm_gic,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+ [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
+ [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
+ [SLAVE_LLCC_DISP] = &qns_llcc_disp,
+ [MASTER_ANOC_PCIE_GEM_NOC_PCIE] = &qnm_pcie_pcie,
+ [SLAVE_LLCC_PCIE] = &qns_llcc_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct qcom_icc_desc x1e80100_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct qcom_icc_desc x1e80100_lpass_lpiaon_noc = {
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_lpicx_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qxm_lpinoc_dsp_axim,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct qcom_icc_desc x1e80100_lpass_lpicx_noc = {
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+ .bcms = lpass_lpicx_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpicx_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_acv_disp,
+ &bcm_mc0_disp,
+ &bcm_acv_pcie,
+ &bcm_mc0_pcie,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+ [MASTER_LLCC_DISP] = &llcc_mc_disp,
+ [SLAVE_EBI1_DISP] = &ebi_disp,
+ [MASTER_LLCC_PCIE] = &llcc_mc_pcie,
+ [SLAVE_EBI1_PCIE] = &ebi_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm0_disp,
+ &bcm_mm1_disp,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_AV1_ENC] = &qnm_av1_enc,
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_EVA] = &qnm_eva,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_VIDEO] = &qnm_video,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [MASTER_MDP_DISP] = &qnm_mdp_disp,
+ [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
+};
+
+static const struct qcom_icc_desc x1e80100_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+};
+
+static const struct qcom_icc_desc x1e80100_nsp_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_center_anoc_bcms[] = {
+ &bcm_pc0,
+ &bcm_pc0_pcie,
+};
+
+static struct qcom_icc_node * const pcie_center_anoc_nodes[] = {
+ [MASTER_PCIE_NORTH] = &qnm_pcie_north_gem_noc,
+ [MASTER_PCIE_SOUTH] = &qnm_pcie_south_gem_noc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [MASTER_PCIE_NORTH_PCIE] = &qnm_pcie_north_gem_noc_pcie,
+ [MASTER_PCIE_SOUTH_PCIE] = &qnm_pcie_south_gem_noc_pcie,
+ [SLAVE_ANOC_PCIE_GEM_NOC_PCIE] = &qns_pcie_mem_noc_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_pcie_center_anoc = {
+ .nodes = pcie_center_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_center_anoc_nodes),
+ .bcms = pcie_center_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_center_anoc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_north_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const pcie_north_anoc_nodes[] = {
+ [MASTER_PCIE_3] = &xm_pcie_3,
+ [MASTER_PCIE_4] = &xm_pcie_4,
+ [MASTER_PCIE_5] = &xm_pcie_5,
+ [SLAVE_PCIE_NORTH] = &qns_pcie_north_gem_noc,
+ [MASTER_PCIE_3_PCIE] = &xm_pcie_3_pcie,
+ [MASTER_PCIE_4_PCIE] = &xm_pcie_4_pcie,
+ [MASTER_PCIE_5_PCIE] = &xm_pcie_5_pcie,
+ [SLAVE_PCIE_NORTH_PCIE] = &qns_pcie_north_gem_noc_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_pcie_north_anoc = {
+ .nodes = pcie_north_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_north_anoc_nodes),
+ .bcms = pcie_north_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_north_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *pcie_south_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const pcie_south_anoc_nodes[] = {
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [MASTER_PCIE_1] = &xm_pcie_1,
+ [MASTER_PCIE_2] = &xm_pcie_2,
+ [MASTER_PCIE_6A] = &xm_pcie_6a,
+ [MASTER_PCIE_6B] = &xm_pcie_6b,
+ [SLAVE_PCIE_SOUTH] = &qns_pcie_south_gem_noc,
+ [MASTER_PCIE_0_PCIE] = &xm_pcie_0_pcie,
+ [MASTER_PCIE_1_PCIE] = &xm_pcie_1_pcie,
+ [MASTER_PCIE_2_PCIE] = &xm_pcie_2_pcie,
+ [MASTER_PCIE_6A_PCIE] = &xm_pcie_6a_pcie,
+ [MASTER_PCIE_6B_PCIE] = &xm_pcie_6b_pcie,
+ [SLAVE_PCIE_SOUTH_PCIE] = &qns_pcie_south_gem_noc_pcie,
+};
+
+static const struct qcom_icc_desc x1e80100_pcie_south_anoc = {
+ .nodes = pcie_south_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_south_anoc_nodes),
+ .bcms = pcie_south_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_south_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GIC1] = &qnm_gic,
+ [MASTER_USB_NOC_SNOC] = &qnm_usb_anoc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct qcom_icc_desc x1e80100_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const usb_center_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const usb_center_anoc_nodes[] = {
+ [MASTER_AGGRE_USB_NORTH] = &qnm_aggre_usb_north_snoc,
+ [MASTER_AGGRE_USB_SOUTH] = &qnm_aggre_usb_south_snoc,
+ [SLAVE_USB_NOC_SNOC] = &qns_aggre_usb_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_usb_center_anoc = {
+ .nodes = usb_center_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(usb_center_anoc_nodes),
+ .bcms = usb_center_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(usb_center_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *usb_north_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const usb_north_anoc_nodes[] = {
+ [MASTER_USB2] = &xm_usb2_0,
+ [MASTER_USB3_MP] = &xm_usb3_mp,
+ [SLAVE_AGGRE_USB_NORTH] = &qns_aggre_usb_north_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_usb_north_anoc = {
+ .nodes = usb_north_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(usb_north_anoc_nodes),
+ .bcms = usb_north_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(usb_north_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *usb_south_anoc_bcms[] = {
+};
+
+static struct qcom_icc_node * const usb_south_anoc_nodes[] = {
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [MASTER_USB3_2] = &xm_usb3_2,
+ [MASTER_USB4_0] = &xm_usb4_0,
+ [MASTER_USB4_1] = &xm_usb4_1,
+ [MASTER_USB4_2] = &xm_usb4_2,
+ [SLAVE_AGGRE_USB_SOUTH] = &qns_aggre_usb_south_snoc,
+};
+
+static const struct qcom_icc_desc x1e80100_usb_south_anoc = {
+ .nodes = usb_south_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(usb_south_anoc_nodes),
+ .bcms = usb_south_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(usb_south_anoc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,x1e80100-aggre1-noc", .data = &x1e80100_aggre1_noc},
+ { .compatible = "qcom,x1e80100-aggre2-noc", .data = &x1e80100_aggre2_noc},
+ { .compatible = "qcom,x1e80100-clk-virt", .data = &x1e80100_clk_virt},
+ { .compatible = "qcom,x1e80100-cnoc-cfg", .data = &x1e80100_cnoc_cfg},
+ { .compatible = "qcom,x1e80100-cnoc-main", .data = &x1e80100_cnoc_main},
+ { .compatible = "qcom,x1e80100-gem-noc", .data = &x1e80100_gem_noc},
+ { .compatible = "qcom,x1e80100-lpass-ag-noc", .data = &x1e80100_lpass_ag_noc},
+ { .compatible = "qcom,x1e80100-lpass-lpiaon-noc", .data = &x1e80100_lpass_lpiaon_noc},
+ { .compatible = "qcom,x1e80100-lpass-lpicx-noc", .data = &x1e80100_lpass_lpicx_noc},
+ { .compatible = "qcom,x1e80100-mc-virt", .data = &x1e80100_mc_virt},
+ { .compatible = "qcom,x1e80100-mmss-noc", .data = &x1e80100_mmss_noc},
+ { .compatible = "qcom,x1e80100-nsp-noc", .data = &x1e80100_nsp_noc},
+ { .compatible = "qcom,x1e80100-pcie-center-anoc", .data = &x1e80100_pcie_center_anoc},
+ { .compatible = "qcom,x1e80100-pcie-north-anoc", .data = &x1e80100_pcie_north_anoc},
+ { .compatible = "qcom,x1e80100-pcie-south-anoc", .data = &x1e80100_pcie_south_anoc},
+ { .compatible = "qcom,x1e80100-system-noc", .data = &x1e80100_system_noc},
+ { .compatible = "qcom,x1e80100-usb-center-anoc", .data = &x1e80100_usb_center_anoc},
+ { .compatible = "qcom,x1e80100-usb-north-anoc", .data = &x1e80100_usb_north_anoc},
+ { .compatible = "qcom,x1e80100-usb-south-anoc", .data = &x1e80100_usb_south_anoc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove_new = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-x1e80100",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("x1e80100 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/x1e80100.h b/drivers/interconnect/qcom/x1e80100.h
new file mode 100644
index 000000000..2e14264f4
--- /dev/null
+++ b/drivers/interconnect/qcom/x1e80100.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * X1E80100 interconnect IDs
+ *
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_X1E80100_H
+#define __DRIVERS_INTERCONNECT_QCOM_X1E80100_H
+
+#define X1E80100_MASTER_A1NOC_SNOC 0
+#define X1E80100_MASTER_A2NOC_SNOC 1
+#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC 2
+#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP 3
+#define X1E80100_MASTER_APPSS_PROC 4
+#define X1E80100_MASTER_CAMNOC_HF 5
+#define X1E80100_MASTER_CAMNOC_ICP 6
+#define X1E80100_MASTER_CAMNOC_SF 7
+#define X1E80100_MASTER_CDSP_PROC 8
+#define X1E80100_MASTER_CNOC_CFG 9
+#define X1E80100_MASTER_CNOC_MNOC_CFG 10
+#define X1E80100_MASTER_COMPUTE_NOC 11
+#define X1E80100_MASTER_CRYPTO 12
+#define X1E80100_MASTER_GEM_NOC_CNOC 13
+#define X1E80100_MASTER_GEM_NOC_PCIE_SNOC 14
+#define X1E80100_MASTER_GFX3D 15
+#define X1E80100_MASTER_GPU_TCU 16
+#define X1E80100_MASTER_IPA 17
+#define X1E80100_MASTER_LLCC 18
+#define X1E80100_MASTER_LLCC_DISP 19
+#define X1E80100_MASTER_LPASS_GEM_NOC 20
+#define X1E80100_MASTER_LPASS_LPINOC 21
+#define X1E80100_MASTER_LPASS_PROC 22
+#define X1E80100_MASTER_LPIAON_NOC 23
+#define X1E80100_MASTER_MDP 24
+#define X1E80100_MASTER_MDP_DISP 25
+#define X1E80100_MASTER_MNOC_HF_MEM_NOC 26
+#define X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP 27
+#define X1E80100_MASTER_MNOC_SF_MEM_NOC 28
+#define X1E80100_MASTER_PCIE_0 29
+#define X1E80100_MASTER_PCIE_1 30
+#define X1E80100_MASTER_QDSS_ETR 31
+#define X1E80100_MASTER_QDSS_ETR_1 32
+#define X1E80100_MASTER_QSPI_0 33
+#define X1E80100_MASTER_QUP_0 34
+#define X1E80100_MASTER_QUP_1 35
+#define X1E80100_MASTER_QUP_2 36
+#define X1E80100_MASTER_QUP_CORE_0 37
+#define X1E80100_MASTER_QUP_CORE_1 38
+#define X1E80100_MASTER_SDCC_2 39
+#define X1E80100_MASTER_SDCC_4 40
+#define X1E80100_MASTER_SNOC_SF_MEM_NOC 41
+#define X1E80100_MASTER_SP 42
+#define X1E80100_MASTER_SYS_TCU 43
+#define X1E80100_MASTER_UFS_MEM 44
+#define X1E80100_MASTER_USB3_0 45
+#define X1E80100_MASTER_VIDEO 46
+#define X1E80100_MASTER_VIDEO_CV_PROC 47
+#define X1E80100_MASTER_VIDEO_V_PROC 48
+#define X1E80100_SLAVE_A1NOC_SNOC 49
+#define X1E80100_SLAVE_A2NOC_SNOC 50
+#define X1E80100_SLAVE_AHB2PHY_NORTH 51
+#define X1E80100_SLAVE_AHB2PHY_SOUTH 52
+#define X1E80100_SLAVE_ANOC_PCIE_GEM_NOC 53
+#define X1E80100_SLAVE_AOSS 54
+#define X1E80100_SLAVE_APPSS 55
+#define X1E80100_SLAVE_BOOT_IMEM 56
+#define X1E80100_SLAVE_CAMERA_CFG 57
+#define X1E80100_SLAVE_CDSP_MEM_NOC 58
+#define X1E80100_SLAVE_CLK_CTL 59
+#define X1E80100_SLAVE_CNOC_CFG 60
+#define X1E80100_SLAVE_CNOC_MNOC_CFG 61
+#define X1E80100_SLAVE_CRYPTO_0_CFG 62
+#define X1E80100_SLAVE_DISPLAY_CFG 63
+#define X1E80100_SLAVE_EBI1 64
+#define X1E80100_SLAVE_EBI1_DISP 65
+#define X1E80100_SLAVE_GEM_NOC_CNOC 66
+#define X1E80100_SLAVE_GFX3D_CFG 67
+#define X1E80100_SLAVE_IMEM 68
+#define X1E80100_SLAVE_IMEM_CFG 69
+#define X1E80100_SLAVE_IPC_ROUTER_CFG 70
+#define X1E80100_SLAVE_LLCC 71
+#define X1E80100_SLAVE_LLCC_DISP 72
+#define X1E80100_SLAVE_LPASS_GEM_NOC 73
+#define X1E80100_SLAVE_LPASS_QTB_CFG 74
+#define X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC 75
+#define X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC 76
+#define X1E80100_SLAVE_MEM_NOC_PCIE_SNOC 77
+#define X1E80100_SLAVE_MNOC_HF_MEM_NOC 78
+#define X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP 79
+#define X1E80100_SLAVE_MNOC_SF_MEM_NOC 80
+#define X1E80100_SLAVE_NSP_QTB_CFG 81
+#define X1E80100_SLAVE_PCIE_0 82
+#define X1E80100_SLAVE_PCIE_0_CFG 83
+#define X1E80100_SLAVE_PCIE_1 84
+#define X1E80100_SLAVE_PCIE_1_CFG 85
+#define X1E80100_SLAVE_PDM 86
+#define X1E80100_SLAVE_PRNG 87
+#define X1E80100_SLAVE_QDSS_CFG 88
+#define X1E80100_SLAVE_QDSS_STM 89
+#define X1E80100_SLAVE_QSPI_0 90
+#define X1E80100_SLAVE_QUP_1 91
+#define X1E80100_SLAVE_QUP_2 92
+#define X1E80100_SLAVE_QUP_CORE_0 93
+#define X1E80100_SLAVE_QUP_CORE_1 94
+#define X1E80100_SLAVE_QUP_CORE_2 95
+#define X1E80100_SLAVE_SDCC_2 96
+#define X1E80100_SLAVE_SDCC_4 97
+#define X1E80100_SLAVE_SERVICE_MNOC 98
+#define X1E80100_SLAVE_SNOC_GEM_NOC_SF 99
+#define X1E80100_SLAVE_TCSR 100
+#define X1E80100_SLAVE_TCU 101
+#define X1E80100_SLAVE_TLMM 102
+#define X1E80100_SLAVE_TME_CFG 103
+#define X1E80100_SLAVE_UFS_MEM_CFG 104
+#define X1E80100_SLAVE_USB3_0 105
+#define X1E80100_SLAVE_VENUS_CFG 106
+#define X1E80100_MASTER_DDR_PERF_MODE 107
+#define X1E80100_MASTER_QUP_CORE_2 108
+#define X1E80100_MASTER_PCIE_TCU 109
+#define X1E80100_MASTER_GIC2 110
+#define X1E80100_MASTER_AV1_ENC 111
+#define X1E80100_MASTER_EVA 112
+#define X1E80100_MASTER_PCIE_NORTH 113
+#define X1E80100_MASTER_PCIE_SOUTH 114
+#define X1E80100_MASTER_PCIE_3 115
+#define X1E80100_MASTER_PCIE_4 116
+#define X1E80100_MASTER_PCIE_5 117
+#define X1E80100_MASTER_PCIE_2 118
+#define X1E80100_MASTER_PCIE_6A 119
+#define X1E80100_MASTER_PCIE_6B 120
+#define X1E80100_MASTER_GIC1 121
+#define X1E80100_MASTER_USB_NOC_SNOC 122
+#define X1E80100_MASTER_AGGRE_USB_NORTH 123
+#define X1E80100_MASTER_AGGRE_USB_SOUTH 124
+#define X1E80100_MASTER_USB2 125
+#define X1E80100_MASTER_USB3_MP 126
+#define X1E80100_MASTER_USB3_1 127
+#define X1E80100_MASTER_USB3_2 128
+#define X1E80100_MASTER_USB4_0 129
+#define X1E80100_MASTER_USB4_1 130
+#define X1E80100_MASTER_USB4_2 131
+#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE 132
+#define X1E80100_MASTER_LLCC_PCIE 133
+#define X1E80100_MASTER_PCIE_NORTH_PCIE 134
+#define X1E80100_MASTER_PCIE_SOUTH_PCIE 135
+#define X1E80100_MASTER_PCIE_3_PCIE 136
+#define X1E80100_MASTER_PCIE_4_PCIE 137
+#define X1E80100_MASTER_PCIE_5_PCIE 138
+#define X1E80100_MASTER_PCIE_0_PCIE 139
+#define X1E80100_MASTER_PCIE_1_PCIE 140
+#define X1E80100_MASTER_PCIE_2_PCIE 141
+#define X1E80100_MASTER_PCIE_6A_PCIE 142
+#define X1E80100_MASTER_PCIE_6B_PCIE 143
+#define X1E80100_SLAVE_AHB2PHY_2 144
+#define X1E80100_SLAVE_AV1_ENC_CFG 145
+#define X1E80100_SLAVE_PCIE_2_CFG 146
+#define X1E80100_SLAVE_PCIE_3_CFG 147
+#define X1E80100_SLAVE_PCIE_4_CFG 148
+#define X1E80100_SLAVE_PCIE_5_CFG 149
+#define X1E80100_SLAVE_PCIE_6A_CFG 150
+#define X1E80100_SLAVE_PCIE_6B_CFG 151
+#define X1E80100_SLAVE_PCIE_RSC_CFG 152
+#define X1E80100_SLAVE_QUP_0 153
+#define X1E80100_SLAVE_SMMUV3_CFG 154
+#define X1E80100_SLAVE_USB2 155
+#define X1E80100_SLAVE_USB3_1 156
+#define X1E80100_SLAVE_USB3_2 157
+#define X1E80100_SLAVE_USB3_MP 158
+#define X1E80100_SLAVE_USB4_0 159
+#define X1E80100_SLAVE_USB4_1 160
+#define X1E80100_SLAVE_USB4_2 161
+#define X1E80100_SLAVE_PCIE_2 162
+#define X1E80100_SLAVE_PCIE_3 163
+#define X1E80100_SLAVE_PCIE_4 164
+#define X1E80100_SLAVE_PCIE_5 165
+#define X1E80100_SLAVE_PCIE_6A 166
+#define X1E80100_SLAVE_PCIE_6B 167
+#define X1E80100_SLAVE_DDR_PERF_MODE 168
+#define X1E80100_SLAVE_PCIE_NORTH 169
+#define X1E80100_SLAVE_PCIE_SOUTH 170
+#define X1E80100_SLAVE_USB_NOC_SNOC 171
+#define X1E80100_SLAVE_AGGRE_USB_NORTH 172
+#define X1E80100_SLAVE_AGGRE_USB_SOUTH 173
+#define X1E80100_SLAVE_LLCC_PCIE 174
+#define X1E80100_SLAVE_EBI1_PCIE 175
+#define X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE 176
+#define X1E80100_SLAVE_PCIE_NORTH_PCIE 177
+#define X1E80100_SLAVE_PCIE_SOUTH_PCIE 178
+
+#endif
diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
index ebf09bbf7..1ba14cb45 100644
--- a/drivers/interconnect/samsung/exynos.c
+++ b/drivers/interconnect/samsung/exynos.c
@@ -93,14 +93,12 @@ static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec,
return priv->node;
}
-static int exynos_generic_icc_remove(struct platform_device *pdev)
+static void exynos_generic_icc_remove(struct platform_device *pdev)
{
struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
icc_provider_deregister(&priv->provider);
icc_nodes_remove(&priv->provider);
-
- return 0;
}
static int exynos_generic_icc_probe(struct platform_device *pdev)
@@ -182,7 +180,7 @@ static struct platform_driver exynos_generic_icc_driver = {
.sync_state = icc_sync_state,
},
.probe = exynos_generic_icc_probe,
- .remove = exynos_generic_icc_remove,
+ .remove_new = exynos_generic_icc_remove,
};
module_platform_driver(exynos_generic_icc_driver);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index a66f7df7a..9dbb55e74 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -160,6 +160,7 @@ config IOMMU_DMA
# Shared Virtual Addressing
config IOMMU_SVA
+ select IOMMU_MM_DATA
bool
config FSL_PAMU
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 86be1edd5..8b3601f28 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -53,10 +53,16 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
+/*
+ * This function flushes all internal caches of
+ * the IOMMU used by this driver.
+ */
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size);
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 90b7d7950..809d74faa 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -902,12 +902,6 @@ extern int amd_iommu_max_glx_val;
extern u64 amd_iommu_efr;
extern u64 amd_iommu_efr2;
-/*
- * This function flushes all internal caches of
- * the IOMMU used by this driver.
- */
-void iommu_flush_all_caches(struct amd_iommu *iommu);
-
static inline int get_ioapic_devid(int id)
{
struct devid_map *entry;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 7f65f3ecb..40979b0f5 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2226,7 +2226,7 @@ static int __init amd_iommu_init_pci(void)
init_device_table_dma(pci_seg);
for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
+ amd_iommu_flush_all_caches(iommu);
print_iommu_info();
@@ -2776,7 +2776,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
iommu_enable(iommu);
- iommu_flush_all_caches(iommu);
+ amd_iommu_flush_all_caches(iommu);
}
/*
@@ -2832,7 +2832,7 @@ static void early_enable_iommus(void)
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
iommu_set_device_table(iommu);
- iommu_flush_all_caches(iommu);
+ amd_iommu_flush_all_caches(iommu);
}
}
}
@@ -3296,7 +3296,7 @@ static int __init state_next(void)
uninit_device_table_dma(pci_seg);
for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
+ amd_iommu_flush_all_caches(iommu);
}
}
return ret;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 6c0621f6f..2a0d1e97e 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -369,6 +369,8 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
+ size_t size = pgcount << __ffs(pgsize);
+ unsigned long o_iova = iova;
BUG_ON(!IS_ALIGNED(iova, pgsize));
BUG_ON(!IS_ALIGNED(paddr, pgsize));
@@ -424,8 +426,7 @@ out:
* Updates and flushing already happened in
* increase_address_space().
*/
- amd_iommu_domain_flush_tlb_pde(dom);
- amd_iommu_domain_flush_complete(dom);
+ amd_iommu_domain_flush_pages(dom, o_iova, size);
spin_unlock_irqrestore(&dom->lock, flags);
}
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index f818a7e25..6d69ba607 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -244,7 +244,6 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
unsigned long mapped_size = 0;
unsigned long o_iova = iova;
size_t size = pgcount << __ffs(pgsize);
- int count = 0;
int ret = 0;
bool updated = false;
@@ -265,19 +264,14 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
*pte = set_pte_attr(paddr, map_size, prot);
- count++;
iova += map_size;
paddr += map_size;
mapped_size += map_size;
}
out:
- if (updated) {
- if (count > 1)
- amd_iommu_flush_tlb(&pdom->domain, 0);
- else
- amd_iommu_flush_page(&pdom->domain, 0, o_iova);
- }
+ if (updated)
+ amd_iommu_domain_flush_pages(pdom, o_iova, size);
if (mapped)
*mapped += mapped_size;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index fcc987f5d..4283dd819 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -64,7 +64,7 @@ LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
const struct iommu_ops amd_iommu_ops;
-const struct iommu_dirty_ops amd_dirty_ops;
+static const struct iommu_dirty_ops amd_dirty_ops;
int amd_iommu_max_glx_val = -1;
@@ -85,6 +85,11 @@ static void detach_device(struct device *dev);
*
****************************************************************************/
+static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
+{
+ return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
+}
+
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
@@ -551,8 +556,6 @@ static void amd_iommu_uninit_device(struct device *dev)
if (dev_data->domain)
detach_device(dev);
- dev_iommu_priv_set(dev, NULL);
-
/*
* We keep dev_data around for unplugged devices and reuse it when the
* device is re-plugged - not doing so would introduce a ton of races.
@@ -1124,68 +1127,44 @@ static inline u64 build_inv_address(u64 address, size_t size)
}
static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
- size_t size, u16 domid, int pde)
+ size_t size, u16 domid,
+ ioasid_t pasid, bool gn)
{
u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
+
cmd->data[1] |= domid;
cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address);
+ /* PDE bit - we want to flush everything, not only the PTEs */
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ if (gn) {
+ cmd->data[0] |= pasid;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
- if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
}
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
- u64 address, size_t size)
+ u64 address, size_t size,
+ ioasid_t pasid, bool gn)
{
u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd));
+
cmd->data[0] = devid;
cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid;
cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address);
- CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
-}
-
-static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
- u64 address, bool size)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- address &= ~(0xfffULL);
-
- cmd->data[0] = pasid;
- cmd->data[1] = domid;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[3] = upper_32_bits(address);
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
- if (size)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
- CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
-}
-
-static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
- int qdep, u64 address, bool size)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- address &= ~(0xfffULL);
+ if (gn) {
+ cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
+ cmd->data[1] |= (pasid & 0xff) << 16;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
- cmd->data[0] = devid;
- cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
- cmd->data[0] |= (qdep & 0xff) << 24;
- cmd->data[1] = devid;
- cmd->data[1] |= (pasid & 0xff) << 16;
- cmd->data[2] = lower_32_bits(address);
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
- cmd->data[3] = upper_32_bits(address);
- if (size)
- cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
}
@@ -1341,7 +1320,7 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
+ dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd);
}
@@ -1353,7 +1332,7 @@ static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- dom_id, 1);
+ dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd);
iommu_completion_wait(iommu);
@@ -1392,7 +1371,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
iommu_completion_wait(iommu);
}
-void iommu_flush_all_caches(struct amd_iommu *iommu)
+void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
{
if (check_feature(FEATURE_IA)) {
amd_iommu_flush_all(iommu);
@@ -1406,8 +1385,8 @@ void iommu_flush_all_caches(struct amd_iommu *iommu)
/*
* Command send function for flushing on-device TLB
*/
-static int device_flush_iotlb(struct iommu_dev_data *dev_data,
- u64 address, size_t size)
+static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
+ size_t size, ioasid_t pasid, bool gn)
{
struct amd_iommu *iommu;
struct iommu_cmd cmd;
@@ -1418,7 +1397,8 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
if (!iommu)
return -EINVAL;
- build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
+ build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
+ size, pasid, gn);
return iommu_queue_command(iommu, &cmd);
}
@@ -1464,8 +1444,11 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret;
}
- if (dev_data->ats_enabled)
- ret = device_flush_iotlb(dev_data, 0, ~0UL);
+ if (dev_data->ats_enabled) {
+ /* Invalidate the entire contents of an IOTLB */
+ ret = device_flush_iotlb(dev_data, 0, ~0UL,
+ IOMMU_NO_PASID, false);
+ }
return ret;
}
@@ -1476,13 +1459,18 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
* page. Otherwise it flushes the whole TLB of the IOMMU.
*/
static void __domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
+ u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
int ret = 0, i;
+ ioasid_t pasid = IOMMU_NO_PASID;
+ bool gn = false;
+
+ if (pdom_is_v2_pgtbl_mode(domain))
+ gn = true;
- build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (!domain->dev_iommu[i])
@@ -1500,17 +1488,21 @@ static void __domain_flush_pages(struct protection_domain *domain,
if (!dev_data->ats_enabled)
continue;
- ret |= device_flush_iotlb(dev_data, address, size);
+ ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
}
WARN_ON(ret);
}
-static void domain_flush_pages(struct protection_domain *domain,
- u64 address, size_t size, int pde)
+void amd_iommu_domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
{
if (likely(!amd_iommu_np_cache)) {
- __domain_flush_pages(domain, address, size, pde);
+ __domain_flush_pages(domain, address, size);
+
+ /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+ amd_iommu_domain_flush_complete(domain);
+
return;
}
@@ -1543,16 +1535,20 @@ static void domain_flush_pages(struct protection_domain *domain,
flush_size = 1ul << min_alignment;
- __domain_flush_pages(domain, address, flush_size, pde);
+ __domain_flush_pages(domain, address, flush_size);
address += flush_size;
size -= flush_size;
}
+
+ /* Wait until IOMMU TLB and all device IOTLB flushes are complete */
+ amd_iommu_domain_flush_complete(domain);
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
-void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
+static void amd_iommu_domain_flush_all(struct protection_domain *domain)
{
- domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+ amd_iommu_domain_flush_pages(domain, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
@@ -1579,8 +1575,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
- domain_flush_pages(domain, iova, size, 1);
- amd_iommu_domain_flush_complete(domain);
+ amd_iommu_domain_flush_pages(domain, iova, size);
spin_unlock_irqrestore(&domain->lock, flags);
}
}
@@ -1858,11 +1853,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */
device_flush_dte(dev_data);
- /* Flush IOTLB */
- amd_iommu_domain_flush_tlb_pde(domain);
-
- /* Wait for the flushes to finish */
- amd_iommu_domain_flush_complete(domain);
+ /* Flush IOTLB and wait for the flushes to finish */
+ amd_iommu_domain_flush_all(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
@@ -1896,15 +1888,6 @@ static int attach_device(struct device *dev,
do_attach(dev_data, domain);
- /*
- * We might boot into a crash-kernel here. The crashed kernel
- * left the caches in the IOMMU dirty. So we have to flush
- * here to evict all dirty stuff.
- */
- amd_iommu_domain_flush_tlb_pde(domain);
-
- amd_iommu_domain_flush_complete(domain);
-
out:
spin_unlock(&dev_data->lock);
@@ -2048,8 +2031,7 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_tlb_pde(domain);
- amd_iommu_domain_flush_complete(domain);
+ amd_iommu_domain_flush_all(domain);
}
/*****************************************************************************
@@ -2482,10 +2464,9 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
- if (domain_flush) {
- amd_iommu_domain_flush_tlb_pde(pdomain);
- amd_iommu_domain_flush_complete(pdomain);
- }
+ if (domain_flush)
+ amd_iommu_domain_flush_all(pdomain);
+
pdomain->dirty_tracking = enable;
spin_unlock_irqrestore(&pdomain->lock, flags);
@@ -2588,8 +2569,7 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- amd_iommu_domain_flush_tlb_pde(dom);
- amd_iommu_domain_flush_complete(dom);
+ amd_iommu_domain_flush_all(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -2600,8 +2580,8 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
- domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
- amd_iommu_domain_flush_complete(dom);
+ amd_iommu_domain_flush_pages(dom, gather->start,
+ gather->end - gather->start + 1);
spin_unlock_irqrestore(&dom->lock, flags);
}
@@ -2635,7 +2615,7 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
}
-const struct iommu_dirty_ops amd_dirty_ops = {
+static const struct iommu_dirty_ops amd_dirty_ops = {
.set_dirty_tracking = amd_iommu_set_dirty_tracking,
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
};
@@ -2666,7 +2646,7 @@ const struct iommu_ops amd_iommu_ops = {
};
static int __flush_pasid(struct protection_domain *domain, u32 pasid,
- u64 address, bool size)
+ u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
@@ -2675,7 +2655,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
if (!(domain->flags & PD_IOMMUV2_MASK))
return -EINVAL;
- build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
/*
* IOMMU TLB needs to be flushed before Device TLB to
@@ -2709,8 +2689,8 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
iommu = rlookup_amd_iommu(dev_data->dev);
if (!iommu)
continue;
- build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
- qdep, address, size);
+ build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
+ address, size, pasid, true);
ret = iommu_queue_command(iommu, &cmd);
if (ret != 0)
@@ -2730,7 +2710,7 @@ out:
static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
u64 address)
{
- return __flush_pasid(domain, pasid, address, false);
+ return __flush_pasid(domain, pasid, address, PAGE_SIZE);
}
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
@@ -2749,8 +2729,7 @@ int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
{
- return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
- true);
+ return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
@@ -3111,8 +3090,8 @@ out:
return index;
}
-static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
- struct irte_ga *irte)
+static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte)
{
struct irq_remap_table *table;
struct irte_ga *entry;
@@ -3139,6 +3118,18 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
raw_spin_unlock_irqrestore(&table->lock, flags);
+ return 0;
+}
+
+static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
+ struct irte_ga *irte)
+{
+ bool ret;
+
+ ret = __modify_irte_ga(iommu, devid, index, irte);
+ if (ret)
+ return ret;
+
iommu_flush_irt_and_complete(iommu, devid);
return 0;
@@ -3357,7 +3348,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
data->irq_2_irte.devid = devid;
data->irq_2_irte.index = index + sub_handle;
- iommu->irte_ops->prepare(data->entry, apic->delivery_mode,
+ iommu->irte_ops->prepare(data->entry, APIC_DELIVERY_MODE_FIXED,
apic->dest_mode_logical, irq_cfg->vector,
irq_cfg->dest_apicid, devid);
@@ -3634,7 +3625,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->lo.fields_remap.valid = valid;
entry->lo.fields_remap.dm = apic->dest_mode_logical;
- entry->lo.fields_remap.int_type = apic->delivery_mode;
+ entry->lo.fields_remap.int_type = APIC_DELIVERY_MODE_FIXED;
entry->hi.fields.vector = cfg->vector;
entry->lo.fields_remap.destination =
APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
@@ -3822,8 +3813,8 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
}
entry->lo.fields_vapic.is_run = is_run;
- return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
- ir_data->irq_2_irte.index, entry);
+ return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry);
}
EXPORT_SYMBOL(amd_iommu_update_ga);
#endif
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index ee05f4824..ef3ee9570 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -81,6 +81,7 @@
#define DART_T8020_TCR_BYPASS_DAPF BIT(12)
#define DART_T8020_TTBR 0x200
+#define DART_T8020_USB4_TTBR 0x400
#define DART_T8020_TTBR_VALID BIT(31)
#define DART_T8020_TTBR_ADDR_FIELD_SHIFT 0
#define DART_T8020_TTBR_SHIFT 12
@@ -368,12 +369,14 @@ apple_dart_t8020_hw_stream_command(struct apple_dart_stream_map *stream_map,
u32 command)
{
unsigned long flags;
- int ret;
+ int ret, i;
u32 command_reg;
spin_lock_irqsave(&stream_map->dart->lock, flags);
- writel(stream_map->sidmap[0], stream_map->dart->regs + DART_T8020_STREAM_SELECT);
+ for (i = 0; i < BITS_TO_U32(stream_map->dart->num_streams); i++)
+ writel(stream_map->sidmap[i],
+ stream_map->dart->regs + DART_T8020_STREAM_SELECT + 4 * i);
writel(command, stream_map->dart->regs + DART_T8020_STREAM_COMMAND);
ret = readl_poll_timeout_atomic(
@@ -740,7 +743,6 @@ static void apple_dart_release_device(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- dev_iommu_priv_set(dev, NULL);
kfree(cfg);
}
@@ -908,7 +910,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
ret = apple_dart_merge_master_cfg(group_master_cfg, cfg);
if (ret) {
- dev_err(dev, "Failed to merge DART IOMMU grups.\n");
+ dev_err(dev, "Failed to merge DART IOMMU groups.\n");
iommu_group_put(group);
res = ERR_PTR(ret);
goto out;
@@ -1215,6 +1217,33 @@ static const struct apple_dart_hw apple_dart_hw_t8103 = {
.ttbr_shift = DART_T8020_TTBR_SHIFT,
.ttbr_count = 4,
};
+
+static const struct apple_dart_hw apple_dart_hw_t8103_usb4 = {
+ .type = DART_T8020,
+ .irq_handler = apple_dart_t8020_irq,
+ .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
+ .oas = 36,
+ .fmt = APPLE_DART,
+ .max_sid_count = 64,
+
+ .enable_streams = DART_T8020_STREAMS_ENABLE,
+ .lock = DART_T8020_CONFIG,
+ .lock_bit = DART_T8020_CONFIG_LOCK,
+
+ .error = DART_T8020_ERROR,
+
+ .tcr = DART_T8020_TCR,
+ .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
+ .tcr_disabled = 0,
+ .tcr_bypass = 0,
+
+ .ttbr = DART_T8020_USB4_TTBR,
+ .ttbr_valid = DART_T8020_TTBR_VALID,
+ .ttbr_addr_field_shift = DART_T8020_TTBR_ADDR_FIELD_SHIFT,
+ .ttbr_shift = DART_T8020_TTBR_SHIFT,
+ .ttbr_count = 4,
+};
+
static const struct apple_dart_hw apple_dart_hw_t6000 = {
.type = DART_T6000,
.irq_handler = apple_dart_t8020_irq,
@@ -1272,7 +1301,7 @@ static __maybe_unused int apple_dart_suspend(struct device *dev)
unsigned int sid, idx;
for (sid = 0; sid < dart->num_streams; sid++) {
- dart->save_tcr[sid] = readl_relaxed(dart->regs + DART_TCR(dart, sid));
+ dart->save_tcr[sid] = readl(dart->regs + DART_TCR(dart, sid));
for (idx = 0; idx < dart->hw->ttbr_count; idx++)
dart->save_ttbr[sid][idx] =
readl(dart->regs + DART_TTBR(dart, sid, idx));
@@ -1307,6 +1336,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dar
static const struct of_device_id apple_dart_of_match[] = {
{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+ { .compatible = "apple,t8103-usb4-dart", .data = &apple_dart_hw_t8103_usb4 },
{ .compatible = "apple,t8110-dart", .data = &apple_dart_hw_t8110 },
{ .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
{},
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 7445454c2..f3f2e47b6 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1063,6 +1063,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
bool cd_live;
__le64 *cdptr;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ struct arm_smmu_device *smmu = master->smmu;
if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
return -E2BIG;
@@ -1077,6 +1078,8 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
if (!cd) { /* (5) */
val = 0;
} else if (cd == &quiet_cd) { /* (4) */
+ if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
+ val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
val |= CTXDESC_CD_0_TCR_EPD0;
} else if (cd_live) { /* (3) */
val &= ~CTXDESC_CD_0_ASID;
@@ -1249,7 +1252,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
- __le64 *dst)
+ struct arm_smmu_ste *dst)
{
/*
* This is hideously complicated, but we only really care about
@@ -1267,12 +1270,12 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
* 2. Write everything apart from dword 0, sync, write dword 0, sync
* 3. Update Config, sync
*/
- u64 val = le64_to_cpu(dst[0]);
+ u64 val = le64_to_cpu(dst->data[0]);
bool ste_live = false;
- struct arm_smmu_device *smmu = NULL;
+ struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
struct arm_smmu_s2_cfg *s2_cfg = NULL;
- struct arm_smmu_domain *smmu_domain = NULL;
+ struct arm_smmu_domain *smmu_domain = master->domain;
struct arm_smmu_cmdq_ent prefetch_cmd = {
.opcode = CMDQ_OP_PREFETCH_CFG,
.prefetch = {
@@ -1280,18 +1283,12 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
},
};
- if (master) {
- smmu_domain = master->domain;
- smmu = master->smmu;
- }
-
if (smmu_domain) {
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
cd_table = &master->cd_table;
break;
case ARM_SMMU_DOMAIN_S2:
- case ARM_SMMU_DOMAIN_NESTED:
s2_cfg = &smmu_domain->s2_cfg;
break;
default:
@@ -1325,10 +1322,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
- dst[0] = cpu_to_le64(val);
- dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
+ dst->data[0] = cpu_to_le64(val);
+ dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING));
- dst[2] = 0; /* Nuke the VMID */
+ dst->data[2] = 0; /* Nuke the VMID */
/*
* The SMMU can perform negative caching, so we must sync
* the STE regardless of whether the old value was live.
@@ -1343,7 +1340,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
BUG_ON(ste_live);
- dst[1] = cpu_to_le64(
+ dst->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
@@ -1352,7 +1349,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!master->stall_enabled)
- dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
+ dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
@@ -1362,7 +1359,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
if (s2_cfg) {
BUG_ON(ste_live);
- dst[2] = cpu_to_le64(
+ dst->data[2] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
#ifdef __BIG_ENDIAN
@@ -1371,18 +1368,18 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
STRTAB_STE_2_S2R);
- dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
+ dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
}
if (master->ats_enabled)
- dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
+ dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
STRTAB_STE_1_EATS_TRANS));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* See comment in arm_smmu_write_ctx_desc() */
- WRITE_ONCE(dst[0], cpu_to_le64(val));
+ WRITE_ONCE(dst->data[0], cpu_to_le64(val));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
@@ -1390,7 +1387,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
-static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force)
+static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
+ unsigned int nent, bool force)
{
unsigned int i;
u64 val = STRTAB_STE_0_V;
@@ -1401,11 +1399,11 @@ static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool fo
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- strtab[0] = cpu_to_le64(val);
- strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
- strtab[2] = 0;
- strtab += STRTAB_STE_DWORDS;
+ strtab->data[0] = cpu_to_le64(val);
+ strtab->data[1] = cpu_to_le64(FIELD_PREP(
+ STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+ strtab->data[2] = 0;
+ strtab++;
}
}
@@ -2171,7 +2169,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
fmt = ARM_64_LPAE_S1;
finalise_stage_fn = arm_smmu_domain_finalise_s1;
break;
- case ARM_SMMU_DOMAIN_NESTED:
case ARM_SMMU_DOMAIN_S2:
ias = smmu->ias;
oas = smmu->oas;
@@ -2209,26 +2206,23 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
return 0;
}
-static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
+static struct arm_smmu_ste *
+arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
{
- __le64 *step;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- struct arm_smmu_strtab_l1_desc *l1_desc;
- int idx;
+ unsigned int idx1, idx2;
/* Two-level walk */
- idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
- l1_desc = &cfg->l1_desc[idx];
- idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
- step = &l1_desc->l2ptr[idx];
+ idx1 = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
+ idx2 = sid & ((1 << STRTAB_SPLIT) - 1);
+ return &cfg->l1_desc[idx1].l2ptr[idx2];
} else {
/* Simple linear lookup */
- step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
+ return (struct arm_smmu_ste *)&cfg
+ ->strtab[sid * STRTAB_STE_DWORDS];
}
-
- return step;
}
static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
@@ -2238,7 +2232,8 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
for (i = 0; i < master->num_streams; ++i) {
u32 sid = master->streams[i].id;
- __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+ struct arm_smmu_ste *step =
+ arm_smmu_get_step_for_sid(smmu, sid);
/* Bridged PCI devices may end up with duplicated IDs */
for (j = 0; j < i; j++)
@@ -2403,8 +2398,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EBUSY;
}
- arm_smmu_detach_dev(master);
-
mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) {
@@ -2419,6 +2412,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (ret)
return ret;
+ /*
+ * Prevent arm_smmu_share_asid() from trying to change the ASID
+ * of either the old or new domain while we are working on it.
+ * This allows the STE and the smmu_domain->devices list to
+ * be inconsistent during this routine.
+ */
+ mutex_lock(&arm_smmu_asid_lock);
+
+ arm_smmu_detach_dev(master);
+
master->domain = smmu_domain;
/*
@@ -2444,13 +2447,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
}
- /*
- * Prevent SVA from concurrently modifying the CD or writing to
- * the CD entry
- */
- mutex_lock(&arm_smmu_asid_lock);
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
- mutex_unlock(&arm_smmu_asid_lock);
if (ret) {
master->domain = NULL;
goto out_list_del;
@@ -2460,13 +2457,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_install_ste_for_dev(master);
arm_smmu_enable_ats(master);
- return 0;
+ goto out_unlock;
out_list_del:
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_del(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+out_unlock:
+ mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
@@ -2649,9 +2648,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return ERR_PTR(-ENODEV);
-
if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
return ERR_PTR(-EBUSY);
@@ -2698,7 +2694,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
err_free_master:
kfree(master);
- dev_iommu_priv_set(dev, NULL);
return ERR_PTR(ret);
}
@@ -2742,7 +2737,7 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
if (smmu_domain->smmu)
ret = -EPERM;
else
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -3769,7 +3764,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) {
- __le64 *step;
+ struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr;
int ret, i;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 961205ba8..65fb388d5 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -188,7 +188,7 @@
#ifdef CONFIG_CMA_ALIGNMENT
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
#else
-#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER)
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_PAGE_ORDER)
#endif
/*
@@ -206,6 +206,11 @@
#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
#define STRTAB_STE_DWORDS 8
+
+struct arm_smmu_ste {
+ __le64 data[STRTAB_STE_DWORDS];
+};
+
#define STRTAB_STE_0_V (1UL << 0)
#define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
#define STRTAB_STE_0_CFG_ABORT 0
@@ -571,7 +576,7 @@ struct arm_smmu_priq {
struct arm_smmu_strtab_l1_desc {
u8 span;
- __le64 *l2ptr;
+ struct arm_smmu_ste *l2ptr;
dma_addr_t l2ptr_dma;
};
@@ -710,7 +715,6 @@ struct arm_smmu_master {
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
- ARM_SMMU_DOMAIN_NESTED,
ARM_SMMU_DOMAIN_BYPASS,
};
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index d326fa230..8b04ece00 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -246,6 +246,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,adreno-gmu" },
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
+ { .compatible = "qcom,qcm2290-mdss" },
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
{ .compatible = "qcom,sc7280-mdss" },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index d6d1a2a55..6317aaf7b 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -82,6 +82,23 @@ static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
pm_runtime_put_autosuspend(smmu->dev);
}
+static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
+{
+ /*
+ * Setup an autosuspend delay to avoid bouncing runpm state.
+ * Otherwise, if a driver for a suspended consumer device
+ * unmaps buffers, it will runpm resume/suspend for each one.
+ *
+ * For example, when used by a GPU device, when an application
+ * or game exits, it can trigger unmapping 100s or 1000s of
+ * buffers. With a runpm cycle for each buffer, that adds up
+ * to 5-10sec worth of reprogramming the context bank, while
+ * the system appears to be locked up to the user.
+ */
+ pm_runtime_set_autosuspend_delay(smmu->dev, 20);
+ pm_runtime_use_autosuspend(smmu->dev);
+}
+
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
@@ -392,8 +409,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
u32 fsr, fsynr, cbfrsynra;
unsigned long iova;
- struct iommu_domain *domain = dev;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_domain *smmu_domain = dev;
struct arm_smmu_device *smmu = smmu_domain->smmu;
int idx = smmu_domain->cfg.cbndx;
int ret;
@@ -406,7 +422,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
- ret = report_iommu_fault(domain, NULL, iova,
+ ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (ret == -ENOSYS)
@@ -607,7 +623,7 @@ static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
}
-static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu,
struct device *dev)
{
@@ -616,7 +632,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
enum io_pgtable_fmt fmt;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct iommu_domain *domain = &smmu_domain->domain;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
irqreturn_t (*context_fault)(int irq, void *dev);
@@ -624,12 +640,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
- smmu_domain->smmu = smmu;
- goto out_unlock;
- }
-
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -796,8 +806,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
else
context_fault = arm_smmu_context_fault;
- ret = devm_request_irq(smmu->dev, irq, context_fault,
- IRQF_SHARED, "arm-smmu-context-fault", domain);
+ ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
+ "arm-smmu-context-fault", smmu_domain);
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
@@ -818,14 +828,13 @@ out_unlock:
return ret;
}
-static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
int ret, irq;
- if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
+ if (!smmu)
return;
ret = arm_smmu_rpm_get(smmu);
@@ -841,7 +850,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
irq = smmu->irqs[cfg->irptndx];
- devm_free_irq(smmu->dev, irq, domain);
+ devm_free_irq(smmu->dev, irq, smmu_domain);
}
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
@@ -854,7 +863,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
+ if (type != IOMMU_DOMAIN_UNMANAGED) {
if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
return NULL;
}
@@ -881,7 +890,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
* Free the domain resources. We assume that all devices have
* already been detached.
*/
- arm_smmu_destroy_domain_context(domain);
+ arm_smmu_destroy_domain_context(smmu_domain);
kfree(smmu_domain);
}
@@ -1081,21 +1090,14 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
mutex_unlock(&smmu->stream_map_mutex);
}
-static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_master_cfg *cfg,
- struct iommu_fwspec *fwspec)
+static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
+ enum arm_smmu_s2cr_type type,
+ u8 cbndx, struct iommu_fwspec *fwspec)
{
- struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_s2cr *s2cr = smmu->s2crs;
- u8 cbndx = smmu_domain->cfg.cbndx;
- enum arm_smmu_s2cr_type type;
int i, idx;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
- type = S2CR_TYPE_BYPASS;
- else
- type = S2CR_TYPE_TRANS;
-
for_each_cfg_sme(cfg, fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue;
@@ -1105,7 +1107,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
s2cr[idx].cbndx = cbndx;
arm_smmu_write_s2cr(smmu, idx);
}
- return 0;
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1116,11 +1117,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_device *smmu;
int ret;
- if (!fwspec || fwspec->ops != &arm_smmu_ops) {
- dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
- return -ENXIO;
- }
-
/*
* FIXME: The arch/arm DMA API code tries to attach devices to its own
* domains between of_xlate() and probe_device() - we have no way to cope
@@ -1139,7 +1135,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return ret;
/* Ensure that the domain is finalised */
- ret = arm_smmu_init_domain_context(domain, smmu, dev);
+ ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev);
if (ret < 0)
goto rpm_put;
@@ -1153,27 +1149,66 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* Looks ok, so add the device to the domain */
- ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
-
- /*
- * Setup an autosuspend delay to avoid bouncing runpm state.
- * Otherwise, if a driver for a suspended consumer device
- * unmaps buffers, it will runpm resume/suspend for each one.
- *
- * For example, when used by a GPU device, when an application
- * or game exits, it can trigger unmapping 100s or 1000s of
- * buffers. With a runpm cycle for each buffer, that adds up
- * to 5-10sec worth of reprogramming the context bank, while
- * the system appears to be locked up to the user.
- */
- pm_runtime_set_autosuspend_delay(smmu->dev, 20);
- pm_runtime_use_autosuspend(smmu->dev);
-
+ arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
+ smmu_domain->cfg.cbndx, fwspec);
+ arm_smmu_rpm_use_autosuspend(smmu);
rpm_put:
arm_smmu_rpm_put(smmu);
return ret;
}
+static int arm_smmu_attach_dev_type(struct device *dev,
+ enum arm_smmu_s2cr_type type)
+{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_device *smmu;
+ int ret;
+
+ if (!cfg)
+ return -ENODEV;
+ smmu = cfg->smmu;
+
+ ret = arm_smmu_rpm_get(smmu);
+ if (ret < 0)
+ return ret;
+
+ arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
+ arm_smmu_rpm_use_autosuspend(smmu);
+ arm_smmu_rpm_put(smmu);
+ return 0;
+}
+
+static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
+}
+
+static const struct iommu_domain_ops arm_smmu_identity_ops = {
+ .attach_dev = arm_smmu_attach_dev_identity,
+};
+
+static struct iommu_domain arm_smmu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &arm_smmu_identity_ops,
+};
+
+static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
+}
+
+static const struct iommu_domain_ops arm_smmu_blocked_ops = {
+ .attach_dev = arm_smmu_attach_dev_blocked,
+};
+
+static struct iommu_domain arm_smmu_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &arm_smmu_blocked_ops,
+};
+
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -1357,10 +1392,8 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
fwspec = dev_iommu_fwspec_get(dev);
if (ret)
goto out_free;
- } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
- smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
- return ERR_PTR(-ENODEV);
+ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
}
ret = -EINVAL;
@@ -1427,7 +1460,6 @@ static void arm_smmu_release_device(struct device *dev)
arm_smmu_rpm_put(cfg->smmu);
- dev_iommu_priv_set(dev, NULL);
kfree(cfg);
}
@@ -1560,6 +1592,8 @@ static int arm_smmu_def_domain_type(struct device *dev)
}
static struct iommu_ops arm_smmu_ops = {
+ .identity_domain = &arm_smmu_identity_domain,
+ .blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
.probe_device = arm_smmu_probe_device,
@@ -2161,7 +2195,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return err;
}
- err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
+ err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
+ using_legacy_binding ? NULL : dev);
if (err) {
dev_err(dev, "Failed to register iommu\n");
iommu_device_sysfs_remove(&smmu->iommu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 703fd5817..836ed6799 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -361,7 +361,6 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
- ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 97b212203..17a1c163f 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -79,16 +79,6 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
static const struct iommu_ops qcom_iommu_ops;
-static struct qcom_iommu_dev * to_iommu(struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- if (!fwspec || fwspec->ops != &qcom_iommu_ops)
- return NULL;
-
- return dev_iommu_priv_get(dev);
-}
-
static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
{
struct qcom_iommu_dev *qcom_iommu = d->iommu;
@@ -372,7 +362,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+ struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
int ret;
@@ -404,7 +394,7 @@ static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain,
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct qcom_iommu_domain *qcom_domain;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+ struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
unsigned int i;
if (domain == identity_domain || !domain)
@@ -535,7 +525,7 @@ static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+ struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
struct device_link *link;
if (!qcom_iommu)
@@ -900,8 +890,16 @@ static void qcom_iommu_device_remove(struct platform_device *pdev)
static int __maybe_unused qcom_iommu_resume(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
+ if (ret < 0)
+ return ret;
+
+ if (dev->pm_domain)
+ return qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, 0);
- return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
+ return ret;
}
static int __maybe_unused qcom_iommu_suspend(struct device *dev)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a0767ce1b..639efa0c4 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -885,7 +885,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
struct page **pages;
unsigned int i = 0, nid = dev_to_node(dev);
- order_mask &= GENMASK(MAX_ORDER, 0);
+ order_mask &= GENMASK(MAX_PAGE_ORDER, 0);
if (!order_mask)
return NULL;
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index dee61e513..86b506af7 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -106,9 +106,6 @@ static const struct iommu_regset iommu_regs_64[] = {
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
- IOMMU_REGSET_ENTRY(VCCAP),
- IOMMU_REGSET_ENTRY(VCMD),
- IOMMU_REGSET_ENTRY(VCRSP),
};
static struct dentry *intel_iommu_debug;
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 23cb80d62..36d7427b1 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1095,7 +1095,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->agaw = agaw;
iommu->msagaw = msagaw;
iommu->segment = drhd->segment;
-
+ iommu->device_rbtree = RB_ROOT;
+ spin_lock_init(&iommu->device_rbtree_lock);
+ mutex_init(&iommu->iopf_lock);
iommu->node = NUMA_NO_NODE;
ver = readl(iommu->reg + DMAR_VER_REG);
@@ -1271,6 +1273,8 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
{
u32 fault;
int head, tail;
+ struct device *dev;
+ u64 iqe_err, ite_sid;
struct q_inval *qi = iommu->qi;
int shift = qi_shift(iommu);
@@ -1315,6 +1319,13 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
tail = readl(iommu->reg + DMAR_IQT_REG);
tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
+ /*
+ * SID field is valid only when the ITE field is Set in FSTS_REG
+ * see Intel VT-d spec r4.1, section 11.4.9.9
+ */
+ iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
+ ite_sid = DMAR_IQER_REG_ITESID(iqe_err);
+
writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
pr_info("Invalidation Time-out Error (ITE) cleared\n");
@@ -1324,6 +1335,19 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
head = (head - 2 + QI_LENGTH) % QI_LENGTH;
} while (head != tail);
+ /*
+ * If device was released or isn't present, no need to retry
+ * the ATS invalidate request anymore.
+ *
+ * 0 value of ite_sid means old VT-d device, no ite_sid value.
+ * see Intel VT-d spec r4.1, section 11.4.9.9
+ */
+ if (ite_sid) {
+ dev = device_rbtree_find(iommu, ite_sid);
+ if (!dev || !dev_is_pci(dev) ||
+ !pci_device_is_present(to_pci_dev(dev)))
+ return -ETIMEDOUT;
+ }
if (qi->desc_status[wait_index] == QI_ABORT)
return -EAGAIN;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index a8366b1f4..d7e10f131 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -46,9 +46,6 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
-#define MAX_AGAW_WIDTH 64
-#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
-
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
@@ -63,74 +60,6 @@
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-/* page table handling */
-#define LEVEL_STRIDE (9)
-#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
-
-static inline int agaw_to_level(int agaw)
-{
- return agaw + 2;
-}
-
-static inline int agaw_to_width(int agaw)
-{
- return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
-}
-
-static inline int width_to_agaw(int width)
-{
- return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
-}
-
-static inline unsigned int level_to_offset_bits(int level)
-{
- return (level - 1) * LEVEL_STRIDE;
-}
-
-static inline int pfn_level_offset(u64 pfn, int level)
-{
- return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
-}
-
-static inline u64 level_mask(int level)
-{
- return -1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 level_size(int level)
-{
- return 1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 align_to_level(u64 pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
-static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
-{
- return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
-}
-
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
-static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
-{
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
-{
- return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
- return mm_to_dma_pfn_start(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
- return page_to_dma_pfn(virt_to_page(p));
-}
-
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
@@ -168,76 +97,79 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-static inline void context_set_present(struct context_entry *context)
+static int device_rid_cmp_key(const void *key, const struct rb_node *node)
{
- context->lo |= 1;
-}
+ struct device_domain_info *info =
+ rb_entry(node, struct device_domain_info, node);
+ const u16 *rid_lhs = key;
-static inline void context_set_fault_enable(struct context_entry *context)
-{
- context->lo &= (((u64)-1) << 2) | 1;
-}
+ if (*rid_lhs < PCI_DEVID(info->bus, info->devfn))
+ return -1;
-static inline void context_set_translation_type(struct context_entry *context,
- unsigned long value)
-{
- context->lo &= (((u64)-1) << 4) | 3;
- context->lo |= (value & 3) << 2;
-}
+ if (*rid_lhs > PCI_DEVID(info->bus, info->devfn))
+ return 1;
-static inline void context_set_address_root(struct context_entry *context,
- unsigned long value)
-{
- context->lo &= ~VTD_PAGE_MASK;
- context->lo |= value & VTD_PAGE_MASK;
+ return 0;
}
-static inline void context_set_address_width(struct context_entry *context,
- unsigned long value)
+static int device_rid_cmp(struct rb_node *lhs, const struct rb_node *rhs)
{
- context->hi |= value & 7;
-}
+ struct device_domain_info *info =
+ rb_entry(lhs, struct device_domain_info, node);
+ u16 key = PCI_DEVID(info->bus, info->devfn);
-static inline void context_set_domain_id(struct context_entry *context,
- unsigned long value)
-{
- context->hi |= (value & ((1 << 16) - 1)) << 8;
+ return device_rid_cmp_key(&key, rhs);
}
-static inline void context_set_pasid(struct context_entry *context)
+/*
+ * Looks up an IOMMU-probed device using its source ID.
+ *
+ * Returns the pointer to the device if there is a match. Otherwise,
+ * returns NULL.
+ *
+ * Note that this helper doesn't guarantee that the device won't be
+ * released by the iommu subsystem after being returned. The caller
+ * should use its own synchronization mechanism to avoid the device
+ * being released during its use if its possibly the case.
+ */
+struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
{
- context->lo |= CONTEXT_PASIDE;
-}
+ struct device_domain_info *info = NULL;
+ struct rb_node *node;
+ unsigned long flags;
-static inline int context_domain_id(struct context_entry *c)
-{
- return((c->hi >> 8) & 0xffff);
-}
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key);
+ if (node)
+ info = rb_entry(node, struct device_domain_info, node);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
-static inline void context_clear_entry(struct context_entry *context)
-{
- context->lo = 0;
- context->hi = 0;
+ return info ? info->dev : NULL;
}
-static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static int device_rbtree_insert(struct intel_iommu *iommu,
+ struct device_domain_info *info)
{
- if (!iommu->copied_tables)
- return false;
+ struct rb_node *curr;
+ unsigned long flags;
- return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
-}
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
+ if (WARN_ON(curr))
+ return -EEXIST;
-static inline void
-set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
- set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+ return 0;
}
-static inline void
-clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void device_rbtree_remove(struct device_domain_info *info)
{
- clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+ struct intel_iommu *iommu = info->iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ rb_erase(&info->node, &iommu->device_rbtree);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
}
/*
@@ -383,13 +315,12 @@ void free_pgtable_page(void *vaddr)
free_page((unsigned long)vaddr);
}
-static inline int domain_type_is_si(struct dmar_domain *domain)
+static int domain_type_is_si(struct dmar_domain *domain)
{
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
}
-static inline int domain_pfn_supported(struct dmar_domain *domain,
- unsigned long pfn)
+static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
@@ -451,7 +382,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
-static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
@@ -701,7 +632,7 @@ static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
return false;
}
-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
+static struct intel_iommu *device_lookup_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct pci_dev *pdev = NULL;
@@ -1510,6 +1441,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_unlock_irqrestore(&domain->lock, flags);
}
+static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ unsigned long pfn, unsigned int pages,
+ int ih)
+{
+ unsigned int aligned_pages = __roundup_pow_of_two(pages);
+ unsigned long bitmask = aligned_pages - 1;
+ unsigned int mask = ilog2(aligned_pages);
+ u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
+ /*
+ * Fallback to domain selective flush if no PSI support or
+ * the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+}
+
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@@ -1526,42 +1497,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih)
ih = 1 << 6;
- if (domain->use_first_level) {
+ if (domain->use_first_level)
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
- } else {
- unsigned long bitmask = aligned_pages - 1;
-
- /*
- * PSI masks the low order bits of the base address. If the
- * address isn't aligned to the mask, then compute a mask value
- * needed to ensure the target range is flushed.
- */
- if (unlikely(bitmask & pfn)) {
- unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
- /*
- * Since end_pfn <= pfn + bitmask, the only way bits
- * higher than bitmask can differ in pfn and end_pfn is
- * by carrying. This means after masking out bitmask,
- * high bits starting with the first set bit in
- * shared_bits are all equal in both pfn and end_pfn.
- */
- shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
- mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
- }
-
- /*
- * Fallback to domain selective flush if no PSI support or
- * the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ else
+ __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
/*
* In caching mode, changes of pages from non-present to present require
@@ -1572,9 +1511,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
}
/* Notification for newly created mappings */
-static inline void __mapping_notify_one(struct intel_iommu *iommu,
- struct dmar_domain *domain,
- unsigned long pfn, unsigned int pages)
+static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain,
+ unsigned long pfn, unsigned int pages)
{
/*
* It's a non-present to present mapping. Only flush if caching mode
@@ -1586,6 +1524,46 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
iommu_flush_write_buffer(iommu);
}
+/*
+ * Flush the relevant caches in nested translation if the domain
+ * also serves as a parent
+ */
+static void parent_domain_flush(struct dmar_domain *domain,
+ unsigned long pfn,
+ unsigned long pages, int ih)
+{
+ struct dmar_domain *s1_domain;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ struct device_domain_info *device_info;
+ struct iommu_domain_info *info;
+ unsigned long flags;
+ unsigned long i;
+
+ xa_for_each(&s1_domain->iommu_array, i, info)
+ __iommu_flush_iotlb_psi(info->iommu, info->did,
+ pfn, pages, ih);
+
+ if (!s1_domain->has_iotlb_device)
+ continue;
+
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ list_for_each_entry(device_info, &s1_domain->devices, link)
+ /*
+ * Address translation cache in device side caches the
+ * result of nested translation. There is no easy way
+ * to identify the exact set of nested translations
+ * affected by a change in S2. So just flush the entire
+ * device cache.
+ */
+ __iommu_flush_dev_iotlb(device_info, 0,
+ MAX_AGAW_PFN_WIDTH);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+}
+
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@@ -1605,6 +1583,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
}
+
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, 0, -1, 0);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1841,7 +1822,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
spin_unlock(&iommu->lock);
}
-static inline int guestwidth_to_adjustwidth(int gaw)
+static int guestwidth_to_adjustwidth(int gaw)
{
int agaw;
int r = (gaw - 12) % 9;
@@ -1875,7 +1856,7 @@ static void domain_exit(struct dmar_domain *domain)
* Value of X in the PDTS field of a scalable mode context entry
* indicates PASID directory with 2^(X + 7) entries.
*/
-static inline unsigned long context_get_sm_pds(struct pasid_table *table)
+static unsigned long context_get_sm_pds(struct pasid_table *table)
{
unsigned long pds, max_pde;
@@ -1887,38 +1868,6 @@ static inline unsigned long context_get_sm_pds(struct pasid_table *table)
return pds - 7;
}
-/*
- * Set the RID_PASID field of a scalable mode context entry. The
- * IOMMU hardware will use the PASID value set in this field for
- * DMA translations of DMA requests without PASID.
- */
-static inline void
-context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
-{
- context->hi |= pasid & ((1 << 20) - 1);
-}
-
-/*
- * Set the DTE(Device-TLB Enable) field of a scalable mode context
- * entry.
- */
-static inline void context_set_sm_dte(struct context_entry *context)
-{
- context->lo |= BIT_ULL(2);
-}
-
-/*
- * Set the PRE(Page Request Enable) field of a scalable mode context
- * entry.
- */
-static inline void context_set_sm_pre(struct context_entry *context)
-{
- context->lo |= BIT_ULL(4);
-}
-
-/* Convert value to context PASID directory size field coding. */
-#define context_pdts(pds) (((pds) & 0x7) << 9)
-
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
struct pasid_table *table,
@@ -2079,14 +2028,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct domain_context_mapping_data data;
+ struct intel_iommu *iommu = info->iommu;
+ u8 bus = info->bus, devfn = info->devfn;
struct pasid_table *table;
- struct intel_iommu *iommu;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
table = intel_pasid_get_table(dev);
@@ -2103,18 +2049,15 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
}
/* Returns a number of VTD pages, but aligned to MM page size */
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
+static unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
{
host_addr &= ~PAGE_MASK;
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
/* Return largest possible superpage level for a given mapping */
-static inline int hardware_largepage_caps(struct dmar_domain *domain,
- unsigned long iov_pfn,
- unsigned long phy_pfn,
- unsigned long pages)
+static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phy_pfn, unsigned long pages)
{
int support, level = 1;
unsigned long pfnmerge;
@@ -2166,6 +2109,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
+ if (domain->nested_parent)
+ parent_domain_flush(domain, start_pfn,
+ lvl_pages, 0);
}
pte++;
@@ -2447,15 +2393,10 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu;
+ struct intel_iommu *iommu = info->iommu;
unsigned long flags;
- u8 bus, devfn;
int ret;
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
-
ret = domain_attach_iommu(domain, iommu);
if (ret)
return ret;
@@ -2468,7 +2409,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
/* Setup the PASID entry for requests without PASID: */
if (hw_pass_through && domain_type_is_si(domain))
- ret = intel_pasid_setup_pass_through(iommu, domain,
+ ret = intel_pasid_setup_pass_through(iommu,
dev, IOMMU_NO_PASID);
else if (domain->use_first_level)
ret = domain_setup_first_level(iommu, domain, dev,
@@ -3613,7 +3554,7 @@ void intel_iommu_shutdown(void)
up_write(&dmar_global_lock);
}
-static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
+static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
@@ -3692,7 +3633,7 @@ const struct attribute_group *intel_iommu_groups[] = {
NULL,
};
-static inline bool has_external_pci(void)
+static bool has_external_pci(void)
{
struct pci_dev *pdev = NULL;
@@ -3933,30 +3874,6 @@ static void domain_context_clear(struct device_domain_info *info)
&domain_context_clear_one_cb, info);
}
-static void dmar_remove_one_dev_info(struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *domain = info->domain;
- struct intel_iommu *iommu = info->iommu;
- unsigned long flags;
-
- if (!dev_is_real_dma_subdevice(info->dev)) {
- if (dev_is_pci(info->dev) && sm_supported(iommu))
- intel_pasid_tear_down_entry(iommu, info->dev,
- IOMMU_NO_PASID, false);
-
- iommu_disable_pci_caps(info);
- domain_context_clear(info);
- }
-
- spin_lock_irqsave(&domain->lock, flags);
- list_del(&info->link);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- domain_detach_iommu(domain, iommu);
- info->domain = NULL;
-}
-
/*
* Clear the page table pointer in context or pasid table entries so that
* all DMA requests without PASID from the device are blocked. If the page
@@ -4127,14 +4044,11 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
int prepare_domain_attach_device(struct iommu_domain *domain,
struct device *dev)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
+ struct intel_iommu *iommu = info->iommu;
int addr_width;
- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu)
- return -ENODEV;
-
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EINVAL;
@@ -4306,6 +4220,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, start_pfn, nrpages,
+ list_empty(&gather->freelist));
put_pages_list(&gather->freelist);
}
@@ -4411,7 +4328,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
u8 bus, devfn;
int ret;
- iommu = device_to_iommu(dev, &bus, &devfn);
+ iommu = device_lookup_iommu(dev, &bus, &devfn);
if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV);
@@ -4464,30 +4381,47 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
+ if (pdev && pci_ats_supported(pdev)) {
+ ret = device_rbtree_insert(iommu, info);
+ if (ret)
+ goto free;
+ }
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
- dev_iommu_priv_set(dev, NULL);
- kfree(info);
- return ERR_PTR(ret);
+ goto clear_rbtree;
}
}
intel_iommu_debugfs_create_dev(info);
return &iommu->iommu;
+clear_rbtree:
+ device_rbtree_remove(info);
+free:
+ kfree(info);
+
+ return ERR_PTR(ret);
}
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
+ mutex_lock(&iommu->iopf_lock);
+ if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
+ device_rbtree_remove(info);
+ mutex_unlock(&iommu->iopf_lock);
+
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+ !context_copied(iommu, info->bus, info->devfn))
+ intel_pasid_teardown_sm_context(dev);
- dmar_remove_one_dev_info(dev);
intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info);
- dev_iommu_priv_set(dev, NULL);
kfree(info);
set_dma_ops(dev, NULL);
}
@@ -4747,8 +4681,9 @@ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
- struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dev_pasid_info *curr, *dev_pasid = NULL;
+ struct intel_iommu *iommu = info->iommu;
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
unsigned long flags;
@@ -4819,8 +4754,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
goto out_free;
if (domain_type_is_si(dmar_domain))
- ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
- dev, pasid);
+ ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
else if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid);
@@ -4991,6 +4925,7 @@ static const struct iommu_dirty_ops intel_dirty_ops = {
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
+ .release_domain = &blocking_domain,
.capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index efc00d2b4..cd267ba64 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -140,9 +140,6 @@
#define DMAR_ECEO_REG 0x408
#define DMAR_ECRSP_REG 0x410
#define DMAR_ECCAP_REG 0x430
-#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
-#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
-#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
@@ -722,9 +719,16 @@ struct intel_iommu {
#endif
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
+ /* Synchronization between fault report and iommu device release. */
+ struct mutex iopf_lock;
struct q_inval *qi; /* Queued invalidation info */
u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
+ /* rb tree for all probed devices */
+ struct rb_root device_rbtree;
+ /* protect the device_rbtree */
+ spinlock_t device_rbtree_lock;
+
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
struct irq_domain *ir_domain;
@@ -758,6 +762,8 @@ struct device_domain_info {
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
struct pasid_table *pasid_table; /* pasid table */
+ /* device tracking node(lookup by PCI RID) */
+ struct rb_node node;
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
struct dentry *debugfs_dentry; /* pointer to device directory dentry */
#endif
@@ -860,6 +866,181 @@ static inline bool context_present(struct context_entry *context)
return (context->lo & 1);
}
+#define LEVEL_STRIDE (9)
+#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+#define MAX_AGAW_WIDTH (64)
+#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
+
+static inline int agaw_to_level(int agaw)
+{
+ return agaw + 2;
+}
+
+static inline int agaw_to_width(int agaw)
+{
+ return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
+}
+
+static inline int width_to_agaw(int width)
+{
+ return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
+}
+
+static inline unsigned int level_to_offset_bits(int level)
+{
+ return (level - 1) * LEVEL_STRIDE;
+}
+
+static inline int pfn_level_offset(u64 pfn, int level)
+{
+ return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
+}
+
+static inline u64 level_mask(int level)
+{
+ return -1ULL << level_to_offset_bits(level);
+}
+
+static inline u64 level_size(int level)
+{
+ return 1ULL << level_to_offset_bits(level);
+}
+
+static inline u64 align_to_level(u64 pfn, int level)
+{
+ return (pfn + level_size(level) - 1) & level_mask(level);
+}
+
+static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
+{
+ return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
+}
+
+/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
+ are never going to work. */
+static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
+{
+ return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
+}
+static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
+{
+ return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
+}
+static inline unsigned long page_to_dma_pfn(struct page *pg)
+{
+ return mm_to_dma_pfn_start(page_to_pfn(pg));
+}
+static inline unsigned long virt_to_dma_pfn(void *p)
+{
+ return page_to_dma_pfn(virt_to_page(p));
+}
+
+static inline void context_set_present(struct context_entry *context)
+{
+ context->lo |= 1;
+}
+
+static inline void context_set_fault_enable(struct context_entry *context)
+{
+ context->lo &= (((u64)-1) << 2) | 1;
+}
+
+static inline void context_set_translation_type(struct context_entry *context,
+ unsigned long value)
+{
+ context->lo &= (((u64)-1) << 4) | 3;
+ context->lo |= (value & 3) << 2;
+}
+
+static inline void context_set_address_root(struct context_entry *context,
+ unsigned long value)
+{
+ context->lo &= ~VTD_PAGE_MASK;
+ context->lo |= value & VTD_PAGE_MASK;
+}
+
+static inline void context_set_address_width(struct context_entry *context,
+ unsigned long value)
+{
+ context->hi |= value & 7;
+}
+
+static inline void context_set_domain_id(struct context_entry *context,
+ unsigned long value)
+{
+ context->hi |= (value & ((1 << 16) - 1)) << 8;
+}
+
+static inline void context_set_pasid(struct context_entry *context)
+{
+ context->lo |= CONTEXT_PASIDE;
+}
+
+static inline int context_domain_id(struct context_entry *c)
+{
+ return((c->hi >> 8) & 0xffff);
+}
+
+static inline void context_clear_entry(struct context_entry *context)
+{
+ context->lo = 0;
+ context->hi = 0;
+}
+
+#ifdef CONFIG_INTEL_IOMMU
+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ if (!iommu->copied_tables)
+ return false;
+
+ return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+#endif /* CONFIG_INTEL_IOMMU */
+
+/*
+ * Set the RID_PASID field of a scalable mode context entry. The
+ * IOMMU hardware will use the PASID value set in this field for
+ * DMA translations of DMA requests without PASID.
+ */
+static inline void
+context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
+{
+ context->hi |= pasid & ((1 << 20) - 1);
+}
+
+/*
+ * Set the DTE(Device-TLB Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_dte(struct context_entry *context)
+{
+ context->lo |= BIT_ULL(2);
+}
+
+/*
+ * Set the PRE(Page Request Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_pre(struct context_entry *context)
+{
+ context->lo |= BIT_ULL(4);
+}
+
+/* Convert value to context PASID directory size field coding. */
+#define context_pdts(pds) (((pds) & 0x7) << 9)
+
struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);
int dmar_enable_qi(struct intel_iommu *iommu);
@@ -907,9 +1088,9 @@ int dmar_ir_support(void);
void *alloc_pgtable_page(int node, gfp_t gfp);
void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
const struct iommu_user_data *user_data);
+struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 29b9e55dc..566297bc8 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1112,7 +1112,7 @@ static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
* irq migration in the presence of interrupt-remapping.
*/
irte->trigger_mode = 0;
- irte->dlvry_mode = apic->delivery_mode;
+ irte->dlvry_mode = APIC_DELIVERY_MODE_FIXED;
irte->vector = vector;
irte->dest_id = IRTE_DEST(dest);
irte->redir_hint = 1;
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index 92e82b33e..a7d68f3d5 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -81,9 +81,97 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
kfree(dmar_domain);
}
+static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
+ unsigned int mask)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+ u16 sid, qdep;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry(info, &domain->devices, link) {
+ if (!info->ats_enabled)
+ continue;
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
+ quirk_extra_dev_tlb_flush(info, addr, mask,
+ IOMMU_NO_PASID, qdep);
+ }
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
+ u64 npages, bool ih)
+{
+ struct iommu_domain_info *info;
+ unsigned int mask;
+ unsigned long i;
+
+ xa_for_each(&domain->iommu_array, i, info)
+ qi_flush_piotlb(info->iommu,
+ domain_id_iommu(domain, info->iommu),
+ IOMMU_NO_PASID, addr, npages, ih);
+
+ if (!domain->has_iotlb_device)
+ return;
+
+ if (npages == U64_MAX)
+ mask = 64 - VTD_PAGE_SHIFT;
+ else
+ mask = ilog2(__roundup_pow_of_two(npages));
+
+ nested_flush_dev_iotlb(domain, addr, mask);
+}
+
+static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
+ struct iommu_user_data_array *array)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct iommu_hwpt_vtd_s1_invalidate inv_entry;
+ u32 index, processed = 0;
+ int ret = 0;
+
+ if (array->type != IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (index = 0; index < array->entry_num; index++) {
+ ret = iommu_copy_struct_from_user_array(&inv_entry, array,
+ IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+ index, __reserved);
+ if (ret)
+ break;
+
+ if ((inv_entry.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) ||
+ inv_entry.__reserved) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (!IS_ALIGNED(inv_entry.addr, VTD_PAGE_SIZE) ||
+ ((inv_entry.npages == U64_MAX) && inv_entry.addr)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ intel_nested_flush_cache(dmar_domain, inv_entry.addr,
+ inv_entry.npages,
+ inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
+ processed++;
+ }
+
+out:
+ array->entry_num = processed;
+ return ret;
+}
+
static const struct iommu_domain_ops intel_nested_domain_ops = {
.attach_dev = intel_nested_attach_dev,
.free = intel_nested_domain_free,
+ .cache_invalidate_user = intel_nested_cache_invalidate_user,
};
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index cc2490e5c..a51e895d9 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -26,63 +26,6 @@
*/
u32 intel_pasid_max_id = PASID_MAX;
-int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
-{
- unsigned long flags;
- u8 status_code;
- int ret = 0;
- u64 res;
-
- raw_spin_lock_irqsave(&iommu->register_lock, flags);
- dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
- IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
- !(res & VCMD_VRSP_IP), res);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- status_code = VCMD_VRSP_SC(res);
- switch (status_code) {
- case VCMD_VRSP_SC_SUCCESS:
- *pasid = VCMD_VRSP_RESULT_PASID(res);
- break;
- case VCMD_VRSP_SC_NO_PASID_AVAIL:
- pr_info("IOMMU: %s: No PASID available\n", iommu->name);
- ret = -ENOSPC;
- break;
- default:
- ret = -ENODEV;
- pr_warn("IOMMU: %s: Unexpected error code %d\n",
- iommu->name, status_code);
- }
-
- return ret;
-}
-
-void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
-{
- unsigned long flags;
- u8 status_code;
- u64 res;
-
- raw_spin_lock_irqsave(&iommu->register_lock, flags);
- dmar_writeq(iommu->reg + DMAR_VCMD_REG,
- VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
- IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
- !(res & VCMD_VRSP_IP), res);
- raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- status_code = VCMD_VRSP_SC(res);
- switch (status_code) {
- case VCMD_VRSP_SC_SUCCESS:
- break;
- case VCMD_VRSP_SC_INVALID_PASID:
- pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
- break;
- default:
- pr_warn("IOMMU: %s: Unexpected error code %d\n",
- iommu->name, status_code);
- }
-}
-
/*
* Per device pasid table management:
*/
@@ -230,30 +173,6 @@ retry:
/*
* Interfaces for PASID table entry manipulation:
*/
-static inline void pasid_clear_entry(struct pasid_entry *pe)
-{
- WRITE_ONCE(pe->val[0], 0);
- WRITE_ONCE(pe->val[1], 0);
- WRITE_ONCE(pe->val[2], 0);
- WRITE_ONCE(pe->val[3], 0);
- WRITE_ONCE(pe->val[4], 0);
- WRITE_ONCE(pe->val[5], 0);
- WRITE_ONCE(pe->val[6], 0);
- WRITE_ONCE(pe->val[7], 0);
-}
-
-static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
-{
- WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
- WRITE_ONCE(pe->val[1], 0);
- WRITE_ONCE(pe->val[2], 0);
- WRITE_ONCE(pe->val[3], 0);
- WRITE_ONCE(pe->val[4], 0);
- WRITE_ONCE(pe->val[5], 0);
- WRITE_ONCE(pe->val[6], 0);
- WRITE_ONCE(pe->val[7], 0);
-}
-
static void
intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
{
@@ -269,192 +188,6 @@ intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
pasid_clear_entry(pe);
}
-static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
-{
- u64 old;
-
- old = READ_ONCE(*ptr);
- WRITE_ONCE(*ptr, (old & ~mask) | bits);
-}
-
-static inline u64 pasid_get_bits(u64 *ptr)
-{
- return READ_ONCE(*ptr);
-}
-
-/*
- * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
- * PASID entry.
- */
-static inline void
-pasid_set_domain_id(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
-}
-
-/*
- * Get domain ID value of a scalable mode PASID entry.
- */
-static inline u16
-pasid_get_domain_id(struct pasid_entry *pe)
-{
- return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
-}
-
-/*
- * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_slptr(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
-}
-
-/*
- * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
- * entry.
- */
-static inline void
-pasid_set_address_width(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
-}
-
-/*
- * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_translation_type(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
-}
-
-/*
- * Enable fault processing by clearing the FPD(Fault Processing
- * Disable) field (Bit 1) of a scalable mode PASID entry.
- */
-static inline void pasid_set_fault_enable(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[0], 1 << 1, 0);
-}
-
-/*
- * Enable second level A/D bits by setting the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_ssade(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
-}
-
-/*
- * Disable second level A/D bits by clearing the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_clear_ssade(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[0], 1 << 9, 0);
-}
-
-/*
- * Checks if second level A/D bits specifically the SLADE (Second Level
- * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
- * entry is set.
- */
-static inline bool pasid_get_ssade(struct pasid_entry *pe)
-{
- return pasid_get_bits(&pe->val[0]) & (1 << 9);
-}
-
-/*
- * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
- * scalable mode PASID entry.
- */
-static inline void pasid_set_sre(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 0, 1);
-}
-
-/*
- * Setup the WPE(Write Protect Enable) field (Bit 132) of a
- * scalable mode PASID entry.
- */
-static inline void pasid_set_wpe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
-}
-
-/*
- * Setup the P(Present) field (Bit 0) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_present(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[0], 1 << 0, 1);
-}
-
-/*
- * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
- * entry.
- */
-static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
-{
- pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
-}
-
-/*
- * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
- * entry. It is required when XD bit of the first level page table
- * entry is about to be set.
- */
-static inline void pasid_set_nxe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
-}
-
-/*
- * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
- * PASID entry.
- */
-static inline void
-pasid_set_pgsnp(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
-}
-
-/*
- * Setup the First Level Page table Pointer field (Bit 140~191)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_flptr(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
-}
-
-/*
- * Setup the First Level Paging Mode field (Bit 130~131) of a
- * scalable mode PASID entry.
- */
-static inline void
-pasid_set_flpm(struct pasid_entry *pe, u64 value)
-{
- pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
-}
-
-/*
- * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
- * of a scalable mode PASID entry.
- */
-static inline void pasid_set_eafe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
-}
-
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, u32 pasid)
@@ -616,9 +349,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
* Skip top levels of page tables for iommu which has less agaw
* than default. Unnecessary for PT mode.
*/
-static inline int iommu_skip_agaw(struct dmar_domain *domain,
- struct intel_iommu *iommu,
- struct dma_pte **pgd)
+static int iommu_skip_agaw(struct dmar_domain *domain,
+ struct intel_iommu *iommu,
+ struct dma_pte **pgd)
{
int agaw;
@@ -769,7 +502,6 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
* Set up the scalable mode pasid entry for passthrough translation type.
*/
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid)
{
u16 did = FLPT_DEFAULT_DID;
@@ -938,3 +670,67 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
return 0;
}
+
+/*
+ * Interfaces to setup or teardown a pasid table to the scalable-mode
+ * context table entry:
+ */
+
+static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct context_entry *context;
+
+ spin_lock(&iommu->lock);
+ context = iommu_context_addr(iommu, bus, devfn, false);
+ if (!context) {
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ context_clear_entry(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+ spin_unlock(&iommu->lock);
+
+ /*
+ * Cache invalidation for changes to a scalable-mode context table
+ * entry.
+ *
+ * Section 6.5.3.3 of the VT-d spec:
+ * - Device-selective context-cache invalidation;
+ * - Domain-selective PASID-cache invalidation to affected domains
+ * (can be skipped if all PASID entries were not-present);
+ * - Domain-selective IOTLB invalidation to affected domains;
+ * - Global Device-TLB invalidation to affected functions.
+ *
+ * The iommu has been parked in the blocking state. All domains have
+ * been detached from the device or PASID. The PASID and IOTLB caches
+ * have been invalidated during the domain detach path.
+ */
+ iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
+ devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
+}
+
+static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct device *dev = data;
+
+ if (dev == &pdev->dev)
+ device_pasid_table_teardown(dev, PCI_BUS_NUM(alias), alias & 0xff);
+
+ return 0;
+}
+
+void intel_pasid_teardown_sm_context(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (!dev_is_pci(dev)) {
+ device_pasid_table_teardown(dev, info->bus, info->devfn);
+ return;
+ }
+
+ pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_teardown, dev);
+}
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 3568adca1..42fda97fd 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -22,16 +22,6 @@
#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
-/* Virtual command interface for enlightened pasid management. */
-#define VCMD_CMD_ALLOC 0x1
-#define VCMD_CMD_FREE 0x2
-#define VCMD_VRSP_IP 0x1
-#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1)
-#define VCMD_VRSP_SC_SUCCESS 0
-#define VCMD_VRSP_SC_NO_PASID_AVAIL 16
-#define VCMD_VRSP_SC_INVALID_PASID 16
-#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff)
-#define VCMD_CMD_OPERAND(e) ((e) << 16)
/*
* Domain ID reserved for pasid entries programmed for first-level
* only and pass-through transfer modes.
@@ -96,6 +86,216 @@ static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
}
+static inline void pasid_clear_entry(struct pasid_entry *pe)
+{
+ WRITE_ONCE(pe->val[0], 0);
+ WRITE_ONCE(pe->val[1], 0);
+ WRITE_ONCE(pe->val[2], 0);
+ WRITE_ONCE(pe->val[3], 0);
+ WRITE_ONCE(pe->val[4], 0);
+ WRITE_ONCE(pe->val[5], 0);
+ WRITE_ONCE(pe->val[6], 0);
+ WRITE_ONCE(pe->val[7], 0);
+}
+
+static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
+{
+ WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
+ WRITE_ONCE(pe->val[1], 0);
+ WRITE_ONCE(pe->val[2], 0);
+ WRITE_ONCE(pe->val[3], 0);
+ WRITE_ONCE(pe->val[4], 0);
+ WRITE_ONCE(pe->val[5], 0);
+ WRITE_ONCE(pe->val[6], 0);
+ WRITE_ONCE(pe->val[7], 0);
+}
+
+static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
+{
+ u64 old;
+
+ old = READ_ONCE(*ptr);
+ WRITE_ONCE(*ptr, (old & ~mask) | bits);
+}
+
+static inline u64 pasid_get_bits(u64 *ptr)
+{
+ return READ_ONCE(*ptr);
+}
+
+/*
+ * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_domain_id(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
+}
+
+/*
+ * Get domain ID value of a scalable mode PASID entry.
+ */
+static inline u16
+pasid_get_domain_id(struct pasid_entry *pe)
+{
+ return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
+}
+
+/*
+ * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_slptr(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
+}
+
+/*
+ * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
+ * entry.
+ */
+static inline void
+pasid_set_address_width(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
+}
+
+/*
+ * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_translation_type(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
+}
+
+/*
+ * Enable fault processing by clearing the FPD(Fault Processing
+ * Disable) field (Bit 1) of a scalable mode PASID entry.
+ */
+static inline void pasid_set_fault_enable(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[0], 1 << 1, 0);
+}
+
+/*
+ * Enable second level A/D bits by setting the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_ssade(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
+}
+
+/*
+ * Disable second level A/D bits by clearing the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_clear_ssade(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[0], 1 << 9, 0);
+}
+
+/*
+ * Checks if second level A/D bits specifically the SLADE (Second Level
+ * Access Dirty Enable) field (Bit 9) of a scalable mode PASID
+ * entry is set.
+ */
+static inline bool pasid_get_ssade(struct pasid_entry *pe)
+{
+ return pasid_get_bits(&pe->val[0]) & (1 << 9);
+}
+
+/*
+ * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_sre(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 0, 1);
+}
+
+/*
+ * Setup the WPE(Write Protect Enable) field (Bit 132) of a
+ * scalable mode PASID entry.
+ */
+static inline void pasid_set_wpe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
+}
+
+/*
+ * Setup the P(Present) field (Bit 0) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_present(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[0], 1 << 0, 1);
+}
+
+/*
+ * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
+ * entry.
+ */
+static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
+{
+ pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
+}
+
+/*
+ * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
+ * entry. It is required when XD bit of the first level page table
+ * entry is about to be set.
+ */
+static inline void pasid_set_nxe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
+}
+
+/*
+ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+ * PASID entry.
+ */
+static inline void
+pasid_set_pgsnp(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
+}
+
+/*
+ * Setup the First Level Page table Pointer field (Bit 140~191)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_flptr(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
+}
+
+/*
+ * Setup the First Level Paging Mode field (Bit 130~131) of a
+ * scalable mode PASID entry.
+ */
+static inline void
+pasid_set_flpm(struct pasid_entry *pe, u64 value)
+{
+ pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
+}
+
+/*
+ * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
+ * of a scalable mode PASID entry.
+ */
+static inline void pasid_set_eafe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
+}
+
extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
@@ -110,15 +310,13 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
u32 pasid, struct dmar_domain *domain);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool fault_ignore);
-int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
-void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
+void intel_pasid_teardown_sm_context(struct device *dev);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
index cf43e798e..44083d018 100644
--- a/drivers/iommu/intel/perfmon.c
+++ b/drivers/iommu/intel/perfmon.c
@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
iommu_pmu_set_filter(domain, event->attr.config1,
IOMMU_PMU_FILTER_DOMAIN, idx,
event->attr.config1);
- iommu_pmu_set_filter(pasid, event->attr.config1,
+ iommu_pmu_set_filter(pasid, event->attr.config2,
IOMMU_PMU_FILTER_PASID, idx,
event->attr.config1);
iommu_pmu_set_filter(ats, event->attr.config2,
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index ac12f76c1..4d269df00 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -67,7 +67,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
struct page *pages;
int irq, ret;
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
@@ -316,21 +316,22 @@ out:
}
static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
- struct mm_struct *mm)
+ struct iommu_domain *domain, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct mm_struct *mm = domain->mm;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
unsigned long sflags;
int ret = 0;
- svm = pasid_private_find(mm->pasid);
+ svm = pasid_private_find(pasid);
if (!svm) {
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
if (!svm)
return -ENOMEM;
- svm->pasid = mm->pasid;
+ svm->pasid = pasid;
svm->mm = mm;
INIT_LIST_HEAD_RCU(&svm->devs);
@@ -368,7 +369,7 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
/* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
- ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
+ ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid,
FLPT_DEFAULT_DID, sflags);
if (ret)
goto free_sdev;
@@ -382,7 +383,7 @@ free_sdev:
free_svm:
if (list_empty(&svm->devs)) {
mmu_notifier_unregister(&svm->notifier, mm);
- pasid_private_remove(mm->pasid);
+ pasid_private_remove(pasid);
kfree(svm);
}
@@ -392,14 +393,9 @@ free_svm:
void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
{
struct intel_svm_dev *sdev;
- struct intel_iommu *iommu;
struct intel_svm *svm;
struct mm_struct *mm;
- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu)
- return;
-
if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
return;
mm = svm->mm;
@@ -654,7 +650,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
struct intel_iommu *iommu = d;
struct page_req_dsc *req;
int head, tail, handled;
- struct pci_dev *pdev;
+ struct device *dev;
u64 address;
/*
@@ -700,23 +696,24 @@ bad_req:
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
goto prq_advance;
- pdev = pci_get_domain_bus_and_slot(iommu->segment,
- PCI_BUS_NUM(req->rid),
- req->rid & 0xff);
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (!pdev)
+ mutex_lock(&iommu->iopf_lock);
+ dev = device_rbtree_find(iommu, req->rid);
+ if (!dev) {
+ mutex_unlock(&iommu->iopf_lock);
goto bad_req;
+ }
- if (intel_svm_prq_report(iommu, &pdev->dev, req))
+ if (intel_svm_prq_report(iommu, dev, req))
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
else
- trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
+ trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
req->priv_data[0], req->priv_data[1],
iommu->prq_seq_number++);
- pci_dev_put(pdev);
+ mutex_unlock(&iommu->iopf_lock);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -750,25 +747,16 @@ int intel_svm_page_response(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg)
{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ u8 bus = info->bus, devfn = info->devfn;
struct iommu_fault_page_request *prm;
- struct intel_iommu *iommu;
bool private_present;
bool pasid_present;
bool last_page;
- u8 bus, devfn;
int ret = 0;
u16 sid;
- if (!dev || !dev_is_pci(dev))
- return -ENODEV;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
-
- if (!msg || !evt)
- return -EINVAL;
-
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
@@ -822,9 +810,8 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
- struct mm_struct *mm = domain->mm;
- return intel_svm_bind_mm(iommu, dev, mm);
+ return intel_svm_bind_mm(iommu, dev, domain, pasid);
}
static void intel_svm_domain_free(struct iommu_domain *domain)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 72dcdd468..f7828a7aa 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -188,20 +188,28 @@ static dma_addr_t __arm_lpae_dma_addr(void *pages)
}
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
- struct io_pgtable_cfg *cfg)
+ struct io_pgtable_cfg *cfg,
+ void *cookie)
{
struct device *dev = cfg->iommu_dev;
int order = get_order(size);
- struct page *p;
dma_addr_t dma;
void *pages;
VM_BUG_ON((gfp & __GFP_HIGHMEM));
- p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
- if (!p)
+
+ if (cfg->alloc) {
+ pages = cfg->alloc(cookie, size, gfp);
+ } else {
+ struct page *p;
+
+ p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
+ pages = p ? page_address(p) : NULL;
+ }
+
+ if (!pages)
return NULL;
- pages = page_address(p);
if (!cfg->coherent_walk) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
@@ -220,18 +228,28 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
out_unmap:
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+
out_free:
- __free_pages(p, order);
+ if (cfg->free)
+ cfg->free(cookie, pages, size);
+ else
+ free_pages((unsigned long)pages, order);
+
return NULL;
}
static void __arm_lpae_free_pages(void *pages, size_t size,
- struct io_pgtable_cfg *cfg)
+ struct io_pgtable_cfg *cfg,
+ void *cookie)
{
if (!cfg->coherent_walk)
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
size, DMA_TO_DEVICE);
- free_pages((unsigned long)pages, get_order(size));
+
+ if (cfg->free)
+ cfg->free(cookie, pages, size);
+ else
+ free_pages((unsigned long)pages, get_order(size));
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@@ -373,13 +391,13 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
- cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
+ cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
if (!cptep)
return -ENOMEM;
pte = arm_lpae_install_table(cptep, ptep, 0, data);
if (pte)
- __arm_lpae_free_pages(cptep, tblsz, cfg);
+ __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
__arm_lpae_sync_pte(ptep, 1, cfg);
}
@@ -524,7 +542,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
}
- __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
+ __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
}
static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -552,7 +570,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return 0;
- tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
+ tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
if (!tablep)
return 0; /* Bytes unmapped */
@@ -575,7 +593,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
if (pte != blk_pte) {
- __arm_lpae_free_pages(tablep, tablesz, cfg);
+ __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
/*
* We may race against someone unmapping another part of this
* block, but anything else is invalid. We can't misinterpret
@@ -882,7 +900,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
/* Looking good; allocate a pgd */
data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
- GFP_KERNEL, cfg);
+ GFP_KERNEL, cfg, cookie);
if (!data->pgd)
goto out_free_data;
@@ -984,7 +1002,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
/* Allocate pgd pages */
data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
- GFP_KERNEL, cfg);
+ GFP_KERNEL, cfg, cookie);
if (!data->pgd)
goto out_free_data;
@@ -1059,7 +1077,7 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
- cfg);
+ cfg, cookie);
if (!data->pgd)
goto out_free_data;
@@ -1080,26 +1098,31 @@ out_free_data:
}
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
+ .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
};
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
+ .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_64_lpae_alloc_pgtable_s2,
.free = arm_lpae_free_pgtable,
};
struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
+ .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_32_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
};
struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
+ .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_32_lpae_alloc_pgtable_s2,
.free = arm_lpae_free_pgtable,
};
struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
+ .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_mali_lpae_alloc_pgtable,
.free = arm_lpae_free_pgtable,
};
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index b843fcd36..8841c1487 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -34,6 +34,26 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#endif
};
+static int check_custom_allocator(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg)
+{
+ /* No custom allocator, no need to check the format. */
+ if (!cfg->alloc && !cfg->free)
+ return 0;
+
+ /* When passing a custom allocator, both the alloc and free
+ * functions should be provided.
+ */
+ if (!cfg->alloc || !cfg->free)
+ return -EINVAL;
+
+ /* Make sure the format supports custom allocators. */
+ if (io_pgtable_init_table[fmt]->caps & IO_PGTABLE_CAP_CUSTOM_ALLOCATOR)
+ return 0;
+
+ return -EINVAL;
+}
+
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
struct io_pgtable_cfg *cfg,
void *cookie)
@@ -44,6 +64,9 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
if (fmt >= IO_PGTABLE_NUM_FMTS)
return NULL;
+ if (check_custom_allocator(fmt, cfg))
+ return NULL;
+
fns = io_pgtable_init_table[fmt];
if (!fns)
return NULL;
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index 4a2f56997..65814cbc8 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -12,32 +12,43 @@
static DEFINE_MUTEX(iommu_sva_lock);
/* Allocate a PASID for the mm within range (inclusive) */
-static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
+static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
{
+ struct iommu_mm_data *iommu_mm;
ioasid_t pasid;
- int ret = 0;
+
+ lockdep_assert_held(&iommu_sva_lock);
if (!arch_pgtable_dma_compat(mm))
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
- mutex_lock(&iommu_sva_lock);
+ iommu_mm = mm->iommu_mm;
/* Is a PASID already associated with this mm? */
- if (mm_valid_pasid(mm)) {
- if (mm->pasid >= dev->iommu->max_pasids)
- ret = -EOVERFLOW;
- goto out;
+ if (iommu_mm) {
+ if (iommu_mm->pasid >= dev->iommu->max_pasids)
+ return ERR_PTR(-EOVERFLOW);
+ return iommu_mm;
}
+ iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
+ if (!iommu_mm)
+ return ERR_PTR(-ENOMEM);
+
pasid = iommu_alloc_global_pasid(dev);
if (pasid == IOMMU_PASID_INVALID) {
- ret = -ENOSPC;
- goto out;
+ kfree(iommu_mm);
+ return ERR_PTR(-ENOSPC);
}
- mm->pasid = pasid;
- ret = 0;
-out:
- mutex_unlock(&iommu_sva_lock);
- return ret;
+ iommu_mm->pasid = pasid;
+ INIT_LIST_HEAD(&iommu_mm->sva_domains);
+ INIT_LIST_HEAD(&iommu_mm->sva_handles);
+ /*
+ * Make sure the write to mm->iommu_mm is not reordered in front of
+ * initialization to iommu_mm fields. If it does, readers may see a
+ * valid iommu_mm with uninitialized values.
+ */
+ smp_store_release(&mm->iommu_mm, iommu_mm);
+ return iommu_mm;
}
/**
@@ -58,57 +69,70 @@ out:
*/
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
{
+ struct iommu_mm_data *iommu_mm;
struct iommu_domain *domain;
struct iommu_sva *handle;
int ret;
+ mutex_lock(&iommu_sva_lock);
+
/* Allocate mm->pasid if necessary. */
- ret = iommu_sva_alloc_pasid(mm, dev);
- if (ret)
- return ERR_PTR(ret);
+ iommu_mm = iommu_alloc_mm_data(mm, dev);
+ if (IS_ERR(iommu_mm)) {
+ ret = PTR_ERR(iommu_mm);
+ goto out_unlock;
+ }
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return ERR_PTR(-ENOMEM);
+ list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
+ if (handle->dev == dev) {
+ refcount_inc(&handle->users);
+ mutex_unlock(&iommu_sva_lock);
+ return handle;
+ }
+ }
- mutex_lock(&iommu_sva_lock);
- /* Search for an existing domain. */
- domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid,
- IOMMU_DOMAIN_SVA);
- if (IS_ERR(domain)) {
- ret = PTR_ERR(domain);
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle) {
+ ret = -ENOMEM;
goto out_unlock;
}
- if (domain) {
- domain->users++;
- goto out;
+ /* Search for an existing domain. */
+ list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
+ ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
+ if (!ret) {
+ domain->users++;
+ goto out;
+ }
}
/* Allocate a new domain and set it on device pasid. */
domain = iommu_sva_domain_alloc(dev, mm);
if (!domain) {
ret = -ENOMEM;
- goto out_unlock;
+ goto out_free_handle;
}
- ret = iommu_attach_device_pasid(domain, dev, mm->pasid);
+ ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
if (ret)
goto out_free_domain;
domain->users = 1;
+ list_add(&domain->next, &mm->iommu_mm->sva_domains);
+
out:
+ refcount_set(&handle->users, 1);
+ list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
mutex_unlock(&iommu_sva_lock);
handle->dev = dev;
handle->domain = domain;
-
return handle;
out_free_domain:
iommu_domain_free(domain);
+out_free_handle:
+ kfree(handle);
out_unlock:
mutex_unlock(&iommu_sva_lock);
- kfree(handle);
-
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
@@ -124,12 +148,19 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
void iommu_sva_unbind_device(struct iommu_sva *handle)
{
struct iommu_domain *domain = handle->domain;
- ioasid_t pasid = domain->mm->pasid;
+ struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
struct device *dev = handle->dev;
mutex_lock(&iommu_sva_lock);
+ if (!refcount_dec_and_test(&handle->users)) {
+ mutex_unlock(&iommu_sva_lock);
+ return;
+ }
+ list_del(&handle->handle_item);
+
+ iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
if (--domain->users == 0) {
- iommu_detach_device_pasid(domain, dev, pasid);
+ list_del(&domain->next);
iommu_domain_free(domain);
}
mutex_unlock(&iommu_sva_lock);
@@ -205,8 +236,11 @@ out_put_mm:
void mm_pasid_drop(struct mm_struct *mm)
{
- if (likely(!mm_valid_pasid(mm)))
+ struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+
+ if (!iommu_mm)
return;
- iommu_free_global_pasid(mm->pasid);
+ iommu_free_global_pasid(iommu_mm->pasid);
+ kfree(iommu_mm);
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33e2a9b5d..ad33161f2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -148,7 +148,7 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);
-static struct bus_type * const iommu_buses[] = {
+static const struct bus_type * const iommu_buses[] = {
&platform_bus_type,
#ifdef CONFIG_PCI
&pci_bus_type,
@@ -257,13 +257,6 @@ int iommu_device_register(struct iommu_device *iommu,
/* We need to be able to take module references appropriately */
if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
return -EINVAL;
- /*
- * Temporarily enforce global restriction to a single driver. This was
- * already the de-facto behaviour, since any possible combination of
- * existing drivers would compete for at least the PCI or platform bus.
- */
- if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
- return -EBUSY;
iommu->ops = ops;
if (hwdev)
@@ -273,10 +266,8 @@ int iommu_device_register(struct iommu_device *iommu,
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
- for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
- iommu_buses[i]->iommu_ops = ops;
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++)
err = bus_iommu_probe(iommu_buses[i]);
- }
if (err)
iommu_device_unregister(iommu);
return err;
@@ -329,7 +320,6 @@ int iommu_device_register_bus(struct iommu_device *iommu,
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
- bus->iommu_ops = ops;
err = bus_iommu_probe(bus);
if (err) {
iommu_device_unregister_bus(iommu, bus, nb);
@@ -344,6 +334,8 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
{
struct dev_iommu *param = dev->iommu;
+ lockdep_assert_held(&iommu_probe_device_lock);
+
if (param)
return param;
@@ -368,6 +360,15 @@ static void dev_iommu_free(struct device *dev)
kfree(param);
}
+/*
+ * Internal equivalent of device_iommu_mapped() for when we care that a device
+ * actually has API ops, and don't want false positives from VFIO-only groups.
+ */
+static bool dev_has_iommu(struct device *dev)
+{
+ return dev->iommu && dev->iommu->iommu_dev;
+}
+
static u32 dev_iommu_get_max_pasids(struct device *dev)
{
u32 max_pasids = 0, bits = 0;
@@ -386,6 +387,15 @@ static u32 dev_iommu_get_max_pasids(struct device *dev)
return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
}
+void dev_iommu_priv_set(struct device *dev, void *priv)
+{
+ /* FSL_PAMU does something weird */
+ if (!IS_ENABLED(CONFIG_FSL_PAMU))
+ lockdep_assert_held(&iommu_probe_device_lock);
+ dev->iommu->priv = priv;
+}
+EXPORT_SYMBOL_GPL(dev_iommu_priv_set);
+
/*
* Init the dev->iommu and dev->iommu_group in the struct device and get the
* driver probed
@@ -453,13 +463,24 @@ static void iommu_deinit_device(struct device *dev)
/*
* release_device() must stop using any attached domain on the device.
- * If there are still other devices in the group they are not effected
+ * If there are still other devices in the group, they are not affected
* by this callback.
*
- * The IOMMU driver must set the device to either an identity or
- * blocking translation and stop using any domain pointer, as it is
- * going to be freed.
+ * If the iommu driver provides release_domain, the core code ensures
+ * that domain is attached prior to calling release_device. Drivers can
+ * use this to enforce a translation on the idle iommu. Typically, the
+ * global static blocked_domain is a good choice.
+ *
+ * Otherwise, the iommu driver must set the device to either an identity
+ * or a blocking translation in release_device() and stop using any
+ * domain pointer, as it is going to be freed.
+ *
+ * Regardless, if a delayed attach never occurred, then the release
+ * should still avoid touching any hardware configuration either.
*/
+ if (!dev->iommu->attach_deferred && ops->release_domain)
+ ops->release_domain->ops->attach_dev(ops->release_domain, dev);
+
if (ops->release_device)
ops->release_device(dev);
@@ -489,11 +510,26 @@ DEFINE_MUTEX(iommu_probe_device_lock);
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops;
+ struct iommu_fwspec *fwspec;
struct iommu_group *group;
struct group_device *gdev;
int ret;
+ /*
+ * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU
+ * instances with non-NULL fwnodes, and client devices should have been
+ * identified with a fwspec by this point. Otherwise, we can currently
+ * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
+ * be present, and that any of their registered instances has suitable
+ * ops for probing, and thus cheekily co-opt the same mechanism.
+ */
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && fwspec->ops)
+ ops = fwspec->ops;
+ else
+ ops = iommu_ops_from_fwnode(NULL);
+
if (!ops)
return -ENODEV;
/*
@@ -618,7 +654,7 @@ static void __iommu_group_remove_device(struct device *dev)
list_del(&device->list);
__iommu_group_free_device(group, device);
- if (dev->iommu && dev->iommu->iommu_dev)
+ if (dev_has_iommu(dev))
iommu_deinit_device(dev);
else
dev->iommu_group = NULL;
@@ -817,7 +853,7 @@ int iommu_get_group_resv_regions(struct iommu_group *group,
* Non-API groups still expose reserved_regions in sysfs,
* so filter out calls that get here that way.
*/
- if (!device->dev->iommu)
+ if (!dev_has_iommu(device->dev))
break;
INIT_LIST_HEAD(&dev_resv_regions);
@@ -1223,6 +1259,12 @@ void iommu_group_remove_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
+static struct device *iommu_group_first_dev(struct iommu_group *group)
+{
+ lockdep_assert_held(&group->mutex);
+ return list_first_entry(&group->devices, struct group_device, list)->dev;
+}
+
/**
* iommu_group_for_each_dev - iterate over each device in the group
* @group: the group
@@ -1751,30 +1793,13 @@ __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
}
/*
- * Returns the iommu_ops for the devices in an iommu group.
- *
- * It is assumed that all devices in an iommu group are managed by a single
- * IOMMU unit. Therefore, this returns the dev_iommu_ops of the first device
- * in the group.
- */
-static const struct iommu_ops *group_iommu_ops(struct iommu_group *group)
-{
- struct group_device *device =
- list_first_entry(&group->devices, struct group_device, list);
-
- lockdep_assert_held(&group->mutex);
-
- return dev_iommu_ops(device->dev);
-}
-
-/*
* req_type of 0 means "auto" which means to select a domain based on
* iommu_def_domain_type or what the driver actually supports.
*/
static struct iommu_domain *
iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{
- const struct iommu_ops *ops = group_iommu_ops(group);
+ const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group));
struct iommu_domain *dom;
lockdep_assert_held(&group->mutex);
@@ -1785,7 +1810,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
* domain. Do not use in new drivers.
*/
if (ops->default_domain) {
- if (req_type)
+ if (req_type != ops->default_domain->type)
return ERR_PTR(-EINVAL);
return ops->default_domain;
}
@@ -1854,13 +1879,21 @@ static int iommu_bus_notifier(struct notifier_block *nb,
static int iommu_get_def_domain_type(struct iommu_group *group,
struct device *dev, int cur_type)
{
- const struct iommu_ops *ops = group_iommu_ops(group);
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
int type;
- if (!ops->def_domain_type)
- return cur_type;
-
- type = ops->def_domain_type(dev);
+ if (ops->default_domain) {
+ /*
+ * Drivers that declare a global static default_domain will
+ * always choose that.
+ */
+ type = ops->default_domain->type;
+ } else {
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+ else
+ return cur_type;
+ }
if (!type || cur_type == type)
return cur_type;
if (!cur_type)
@@ -2003,9 +2036,28 @@ int bus_iommu_probe(const struct bus_type *bus)
return 0;
}
+/**
+ * iommu_present() - make platform-specific assumptions about an IOMMU
+ * @bus: bus to check
+ *
+ * Do not use this function. You want device_iommu_mapped() instead.
+ *
+ * Return: true if some IOMMU is present and aware of devices on the given bus;
+ * in general it may not be the only IOMMU, and it may not have anything to do
+ * with whatever device you are ultimately interested in.
+ */
bool iommu_present(const struct bus_type *bus)
{
- return bus->iommu_ops != NULL;
+ bool ret = false;
+
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
+ if (iommu_buses[i] == bus) {
+ spin_lock(&iommu_device_lock);
+ ret = !list_empty(&iommu_device_list);
+ spin_unlock(&iommu_device_lock);
+ }
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_present);
@@ -2021,7 +2073,7 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{
const struct iommu_ops *ops;
- if (!dev->iommu || !dev->iommu->iommu_dev)
+ if (!dev_has_iommu(dev))
return false;
ops = dev_iommu_ops(dev);
@@ -2107,6 +2159,7 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
return ERR_PTR(-ENOMEM);
domain->type = type;
+ domain->owner = ops;
/*
* If not already set, assume all sizes by default; the driver
* may override this later
@@ -2132,21 +2185,37 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
static struct iommu_domain *
__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
{
- struct device *dev =
- list_first_entry(&group->devices, struct group_device, list)
- ->dev;
+ struct device *dev = iommu_group_first_dev(group);
+
+ return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type);
+}
+
+static int __iommu_domain_alloc_dev(struct device *dev, void *data)
+{
+ const struct iommu_ops **ops = data;
- return __iommu_domain_alloc(group_iommu_ops(group), dev, type);
+ if (!dev_has_iommu(dev))
+ return 0;
+
+ if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev),
+ "Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n",
+ dev_bus_name(dev)))
+ return -EBUSY;
+
+ *ops = dev_iommu_ops(dev);
+ return 0;
}
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
+ const struct iommu_ops *ops = NULL;
+ int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev);
struct iommu_domain *domain;
- if (bus == NULL || bus->iommu_ops == NULL)
+ if (err || !ops)
return NULL;
- domain = __iommu_domain_alloc(bus->iommu_ops, NULL,
- IOMMU_DOMAIN_UNMANAGED);
+
+ domain = __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED);
if (IS_ERR(domain))
return NULL;
return domain;
@@ -2284,10 +2353,16 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
+ struct device *dev;
+
if (group->domain && group->domain != group->default_domain &&
group->domain != group->blocking_domain)
return -EBUSY;
+ dev = iommu_group_first_dev(group);
+ if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
+ return -EINVAL;
+
return __iommu_group_set_domain(group, domain);
}
@@ -3004,8 +3079,8 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
*/
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
- if (dev->iommu && dev->iommu->iommu_dev) {
- const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ if (dev_has_iommu(dev)) {
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->dev_enable_feat)
return ops->dev_enable_feat(dev, feat);
@@ -3020,8 +3095,8 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
*/
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
- if (dev->iommu && dev->iommu->iommu_dev) {
- const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
+ if (dev_has_iommu(dev)) {
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->dev_disable_feat)
return ops->dev_disable_feat(dev, feat);
@@ -3472,6 +3547,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
{
/* Caller must be a probed driver on dev */
struct iommu_group *group = dev->iommu_group;
+ struct group_device *device;
void *curr;
int ret;
@@ -3481,7 +3557,18 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
if (!group)
return -ENODEV;
+ if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner ||
+ pasid == IOMMU_NO_PASID)
+ return -EINVAL;
+
mutex_lock(&group->mutex);
+ for_each_group_device(group, device) {
+ if (pasid >= device->dev->iommu->max_pasids) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
@@ -3569,6 +3656,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
domain->type = IOMMU_DOMAIN_SVA;
mmgrab(mm);
domain->mm = mm;
+ domain->owner = ops;
domain->iopf_handler = iommu_sva_handle_iopf;
domain->fault_data = mm;
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 99d4b075d..76656fe04 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -37,6 +37,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
+ select IOMMUFD_DRIVER
default n
help
This is dangerous, do not enable unless running
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 6f680959b..33d142f80 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -135,6 +135,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
hwpt->domain = NULL;
goto out_abort;
}
+ hwpt->domain->owner = ops;
} else {
hwpt->domain = iommu_domain_alloc(idev->dev->bus);
if (!hwpt->domain) {
@@ -233,6 +234,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
hwpt->domain = NULL;
goto out_abort;
}
+ hwpt->domain->owner = ops;
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
@@ -372,3 +374,44 @@ int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd)
iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
return rc;
}
+
+int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd)
+{
+ struct iommu_hwpt_invalidate *cmd = ucmd->cmd;
+ struct iommu_user_data_array data_array = {
+ .type = cmd->data_type,
+ .uptr = u64_to_user_ptr(cmd->data_uptr),
+ .entry_len = cmd->entry_len,
+ .entry_num = cmd->entry_num,
+ };
+ struct iommufd_hw_pagetable *hwpt;
+ u32 done_num = 0;
+ int rc;
+
+ if (cmd->__reserved) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (cmd->entry_num && (!cmd->data_uptr || !cmd->entry_len)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ hwpt = iommufd_get_hwpt_nested(ucmd, cmd->hwpt_id);
+ if (IS_ERR(hwpt)) {
+ rc = PTR_ERR(hwpt);
+ goto out;
+ }
+
+ rc = hwpt->domain->ops->cache_invalidate_user(hwpt->domain,
+ &data_array);
+ done_num = data_array.entry_num;
+
+ iommufd_put_object(ucmd->ictx, &hwpt->obj);
+out:
+ cmd->entry_num = done_num;
+ if (iommufd_ucmd_respond(ucmd, sizeof(*cmd)))
+ return -EFAULT;
+ return rc;
+}
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index abae041e2..991f864d1 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -328,6 +328,15 @@ iommufd_get_hwpt_paging(struct iommufd_ucmd *ucmd, u32 id)
IOMMUFD_OBJ_HWPT_PAGING),
struct iommufd_hwpt_paging, common.obj);
}
+
+static inline struct iommufd_hw_pagetable *
+iommufd_get_hwpt_nested(struct iommufd_ucmd *ucmd, u32 id)
+{
+ return container_of(iommufd_get_object(ucmd->ictx, id,
+ IOMMUFD_OBJ_HWPT_NESTED),
+ struct iommufd_hw_pagetable, obj);
+}
+
int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd);
int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd);
@@ -345,6 +354,7 @@ void iommufd_hwpt_paging_abort(struct iommufd_object *obj);
void iommufd_hwpt_nested_destroy(struct iommufd_object *obj);
void iommufd_hwpt_nested_abort(struct iommufd_object *obj);
int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd);
+int iommufd_hwpt_invalidate(struct iommufd_ucmd *ucmd);
static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
struct iommufd_hw_pagetable *hwpt)
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index 7910fbe19..e854d3f67 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -21,6 +21,7 @@ enum {
IOMMU_TEST_OP_ACCESS_REPLACE_IOAS,
IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS,
IOMMU_TEST_OP_DIRTY,
+ IOMMU_TEST_OP_MD_CHECK_IOTLB,
};
enum {
@@ -44,6 +45,7 @@ enum {
enum {
MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0,
+ MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1,
};
enum {
@@ -121,6 +123,10 @@ struct iommu_test_cmd {
__aligned_u64 uptr;
__aligned_u64 out_nr_dirty;
} dirty;
+ struct {
+ __u32 id;
+ __u32 iotlb;
+ } check_iotlb;
};
__u32 last;
};
@@ -148,4 +154,22 @@ struct iommu_hwpt_selftest {
__u32 iotlb;
};
+/* Should not be equal to any defined value in enum iommu_hwpt_invalidate_data_type */
+#define IOMMU_HWPT_INVALIDATE_DATA_SELFTEST 0xdeadbeef
+#define IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID 0xdadbeef
+
+/**
+ * struct iommu_hwpt_invalidate_selftest - Invalidation data for Mock driver
+ * (IOMMU_HWPT_INVALIDATE_DATA_SELFTEST)
+ * @flags: Invalidate flags
+ * @iotlb_id: Invalidate iotlb entry index
+ *
+ * If IOMMU_TEST_INVALIDATE_ALL is set in @flags, @iotlb_id will be ignored
+ */
+struct iommu_hwpt_invalidate_selftest {
+#define IOMMU_TEST_INVALIDATE_FLAG_ALL (1 << 0)
+ __u32 flags;
+ __u32 iotlb_id;
+};
+
#endif
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index c9091e46d..39b32932c 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -322,6 +322,7 @@ union ucmd_buffer {
struct iommu_hw_info info;
struct iommu_hwpt_alloc hwpt;
struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap;
+ struct iommu_hwpt_invalidate cache;
struct iommu_hwpt_set_dirty_tracking set_dirty_tracking;
struct iommu_ioas_alloc alloc;
struct iommu_ioas_allow_iovas allow_iovas;
@@ -360,6 +361,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
__reserved),
IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap,
struct iommu_hwpt_get_dirty_bitmap, data),
+ IOCTL_OP(IOMMU_HWPT_INVALIDATE, iommufd_hwpt_invalidate,
+ struct iommu_hwpt_invalidate, __reserved),
IOCTL_OP(IOMMU_HWPT_SET_DIRTY_TRACKING, iommufd_hwpt_set_dirty_tracking,
struct iommu_hwpt_set_dirty_tracking, __reserved),
IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl,
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index d437ffb71..7a2199470 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -25,9 +25,23 @@ static struct iommu_domain_ops domain_nested_ops;
size_t iommufd_test_memory_limit = 65536;
+struct mock_bus_type {
+ struct bus_type bus;
+ struct notifier_block nb;
+};
+
+static struct mock_bus_type iommufd_mock_bus_type = {
+ .bus = {
+ .name = "iommufd_mock",
+ },
+};
+
+static DEFINE_IDA(mock_dev_ida);
+
enum {
MOCK_DIRTY_TRACK = 1,
MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
+ MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
/*
* Like a real page table alignment requires the low bits of the address
@@ -40,6 +54,7 @@ enum {
MOCK_PFN_START_IOVA = _MOCK_PFN_START,
MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
+ MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
/*
@@ -123,6 +138,7 @@ enum selftest_obj_type {
struct mock_dev {
struct device dev;
unsigned long flags;
+ int id;
};
struct selftest_obj {
@@ -193,6 +209,34 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
return 0;
}
+static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
+ unsigned long iova, size_t page_size,
+ unsigned long flags)
+{
+ unsigned long cur, end = iova + page_size - 1;
+ bool dirty = false;
+ void *ent, *old;
+
+ for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
+ ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+ if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
+ continue;
+
+ dirty = true;
+ /* Clear dirty */
+ if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
+ unsigned long val;
+
+ val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+ old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+ xa_mk_value(val), GFP_KERNEL);
+ WARN_ON_ONCE(ent != old);
+ }
+ }
+
+ return dirty;
+}
+
static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
unsigned long iova, size_t size,
unsigned long flags,
@@ -200,31 +244,31 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
{
struct mock_iommu_domain *mock =
container_of(domain, struct mock_iommu_domain, domain);
- unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
- void *ent, *old;
+ unsigned long end = iova + size;
+ void *ent;
if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
return -EINVAL;
- for (i = 0; i < max; i++) {
- unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+ do {
+ unsigned long pgsize = MOCK_IO_PAGE_SIZE;
+ unsigned long head;
- ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
- if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
- /* Clear dirty */
- if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
- unsigned long val;
-
- val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns,
- cur / MOCK_IO_PAGE_SIZE,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- }
- iommu_dirty_bitmap_record(dirty, cur,
- MOCK_IO_PAGE_SIZE);
+ ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+ if (!ent) {
+ iova += pgsize;
+ continue;
}
- }
+
+ if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
+ pgsize = MOCK_HUGE_PAGE_SIZE;
+ head = iova & ~(pgsize - 1);
+
+ /* Clear dirty */
+ if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
+ iommu_dirty_bitmap_record(dirty, head, pgsize);
+ iova = head + pgsize;
+ } while (iova < end);
return 0;
}
@@ -236,6 +280,7 @@ const struct iommu_dirty_ops dirty_ops = {
static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
{
+ struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
struct mock_iommu_domain *mock;
mock = kzalloc(sizeof(*mock), GFP_KERNEL);
@@ -244,6 +289,8 @@ static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+ if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+ mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
mock->domain.ops = mock_ops.default_domain_ops;
mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
xa_init(&mock->pfns);
@@ -289,7 +336,7 @@ mock_domain_alloc_user(struct device *dev, u32 flags,
return ERR_PTR(-EOPNOTSUPP);
if (user_data || (has_dirty_flag && no_dirty_ops))
return ERR_PTR(-EOPNOTSUPP);
- domain = mock_domain_alloc_paging(NULL);
+ domain = mock_domain_alloc_paging(dev);
if (!domain)
return ERR_PTR(-ENOMEM);
if (has_dirty_flag)
@@ -352,6 +399,9 @@ static int mock_domain_map_pages(struct iommu_domain *domain,
if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
flags = MOCK_PFN_LAST_IOVA;
+ if (pgsize != MOCK_IO_PAGE_SIZE) {
+ flags |= MOCK_PFN_HUGE_IOVA;
+ }
old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
flags),
@@ -396,20 +446,27 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
/*
* iommufd generates unmaps that must be a strict
- * superset of the map's performend So every starting
- * IOVA should have been an iova passed to map, and the
+ * superset of the map's performend So every
+ * starting/ending IOVA should have been an iova passed
+ * to map.
*
- * First IOVA must be present and have been a first IOVA
- * passed to map_pages
+ * This simple logic doesn't work when the HUGE_PAGE is
+ * turned on since the core code will automatically
+ * switch between the two page sizes creating a break in
+ * the unmap calls. The break can land in the middle of
+ * contiguous IOVA.
*/
- if (first) {
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_START_IOVA));
- first = false;
+ if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
+ if (first) {
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_START_IOVA));
+ first = false;
+ }
+ if (pgcount == 1 &&
+ cur + MOCK_IO_PAGE_SIZE == pgsize)
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_LAST_IOVA));
}
- if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_LAST_IOVA));
iova += MOCK_IO_PAGE_SIZE;
ret += MOCK_IO_PAGE_SIZE;
@@ -452,6 +509,8 @@ static struct iommu_device mock_iommu_device = {
static struct iommu_device *mock_probe_device(struct device *dev)
{
+ if (dev->bus != &iommufd_mock_bus_type.bus)
+ return ERR_PTR(-ENODEV);
return &mock_iommu_device;
}
@@ -488,9 +547,59 @@ static void mock_domain_free_nested(struct iommu_domain *domain)
kfree(mock_nested);
}
+static int
+mock_domain_cache_invalidate_user(struct iommu_domain *domain,
+ struct iommu_user_data_array *array)
+{
+ struct mock_iommu_domain_nested *mock_nested =
+ container_of(domain, struct mock_iommu_domain_nested, domain);
+ struct iommu_hwpt_invalidate_selftest inv;
+ u32 processed = 0;
+ int i = 0, j;
+ int rc = 0;
+
+ if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ for ( ; i < array->entry_num; i++) {
+ rc = iommu_copy_struct_from_user_array(&inv, array,
+ IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
+ i, iotlb_id);
+ if (rc)
+ break;
+
+ if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
+ rc = -EINVAL;
+ break;
+ }
+
+ if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
+ /* Invalidate all mock iotlb entries and ignore iotlb_id */
+ for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
+ mock_nested->iotlb[j] = 0;
+ } else {
+ mock_nested->iotlb[inv.iotlb_id] = 0;
+ }
+
+ processed++;
+ }
+
+out:
+ array->entry_num = processed;
+ return rc;
+}
+
static struct iommu_domain_ops domain_nested_ops = {
.free = mock_domain_free_nested,
.attach_dev = mock_domain_nop_attach,
+ .cache_invalidate_user = mock_domain_cache_invalidate_user,
};
static inline struct iommufd_hw_pagetable *
@@ -541,24 +650,11 @@ get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
return hwpt;
}
-struct mock_bus_type {
- struct bus_type bus;
- struct notifier_block nb;
-};
-
-static struct mock_bus_type iommufd_mock_bus_type = {
- .bus = {
- .name = "iommufd_mock",
- },
-};
-
-static atomic_t mock_dev_num;
-
static void mock_dev_release(struct device *dev)
{
struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
- atomic_dec(&mock_dev_num);
+ ida_free(&mock_dev_ida, mdev->id);
kfree(mdev);
}
@@ -567,7 +663,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
struct mock_dev *mdev;
int rc;
- if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY))
+ if (dev_flags &
+ ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
return ERR_PTR(-EINVAL);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
@@ -579,8 +676,12 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
mdev->dev.release = mock_dev_release;
mdev->dev.bus = &iommufd_mock_bus_type.bus;
- rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
- atomic_inc_return(&mock_dev_num));
+ rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
+ if (rc < 0)
+ goto err_put;
+ mdev->id = rc;
+
+ rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
if (rc)
goto err_put;
@@ -808,6 +909,28 @@ static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
return 0;
}
+static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
+ u32 mockpt_id, unsigned int iotlb_id,
+ u32 iotlb)
+{
+ struct mock_iommu_domain_nested *mock_nested;
+ struct iommufd_hw_pagetable *hwpt;
+ int rc = 0;
+
+ hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
+ if (IS_ERR(hwpt))
+ return PTR_ERR(hwpt);
+
+ mock_nested = container_of(hwpt->domain,
+ struct mock_iommu_domain_nested, domain);
+
+ if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
+ mock_nested->iotlb[iotlb_id] != iotlb)
+ rc = -EINVAL;
+ iommufd_put_object(ucmd->ictx, &hwpt->obj);
+ return rc;
+}
+
struct selftest_access {
struct iommufd_access *access;
struct file *file;
@@ -1289,6 +1412,10 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
return iommufd_test_md_check_refs(
ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
cmd->check_refs.length, cmd->check_refs.refs);
+ case IOMMU_TEST_OP_MD_CHECK_IOTLB:
+ return iommufd_test_md_check_iotlb(ucmd, cmd->id,
+ cmd->check_iotlb.id,
+ cmd->check_iotlb.iotlb);
case IOMMU_TEST_OP_CREATE_ACCESS:
return iommufd_test_create_access(ucmd, cmd->id,
cmd->create_access.flags);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 75279500a..7abe9e85a 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -863,16 +863,11 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct mtk_iommu_data *data;
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct device_link *link;
struct device *larbdev;
unsigned int larbid, larbidx, i;
- if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return ERR_PTR(-ENODEV); /* Not a iommu client device */
-
- data = dev_iommu_priv_get(dev);
-
if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
return &data->iommu;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 67e044c1a..25b41222a 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -481,9 +481,6 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
idx++;
}
- if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
- return ERR_PTR(-ENODEV); /* Not a iommu client device */
-
data = dev_iommu_priv_get(dev);
/* Link the consumer device with the smi-larb device(supplier) */
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 42cffb0ee..719652b60 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/fsl/mc.h>
-#define NO_IOMMU 1
-
static int of_iommu_xlate(struct device *dev,
struct of_phandle_args *iommu_spec)
{
@@ -29,7 +27,7 @@ static int of_iommu_xlate(struct device *dev,
ops = iommu_ops_from_fwnode(fwnode);
if ((ops && !ops->of_xlate) ||
!of_device_is_available(iommu_spec->np))
- return NO_IOMMU;
+ return -ENODEV;
ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
if (ret)
@@ -61,7 +59,7 @@ static int of_iommu_configure_dev_id(struct device_node *master_np,
"iommu-map-mask", &iommu_spec.np,
iommu_spec.args);
if (err)
- return err == -ENODEV ? NO_IOMMU : err;
+ return err;
err = of_iommu_xlate(dev, &iommu_spec);
of_node_put(iommu_spec.np);
@@ -72,7 +70,7 @@ static int of_iommu_configure_dev(struct device_node *master_np,
struct device *dev)
{
struct of_phandle_args iommu_spec;
- int err = NO_IOMMU, idx = 0;
+ int err = -ENODEV, idx = 0;
while (!of_parse_phandle_with_args(master_np, "iommus",
"#iommu-cells",
@@ -107,16 +105,21 @@ static int of_iommu_configure_device(struct device_node *master_np,
of_iommu_configure_dev(master_np, dev);
}
-const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id)
+/*
+ * Returns:
+ * 0 on success, an iommu was configured
+ * -ENODEV if the device does not have any IOMMU
+ * -EPROBEDEFER if probing should be tried again
+ * -errno fatal errors
+ */
+int of_iommu_configure(struct device *dev, struct device_node *master_np,
+ const u32 *id)
{
- const struct iommu_ops *ops = NULL;
struct iommu_fwspec *fwspec;
- int err = NO_IOMMU;
+ int err;
if (!master_np)
- return NULL;
+ return -ENODEV;
/* Serialise to make dev->iommu stable under our potential fwspec */
mutex_lock(&iommu_probe_device_lock);
@@ -124,7 +127,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
if (fwspec) {
if (fwspec->ops) {
mutex_unlock(&iommu_probe_device_lock);
- return fwspec->ops;
+ return 0;
}
/* In the deferred case, start again from scratch */
iommu_fwspec_free(dev);
@@ -147,36 +150,21 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
} else {
err = of_iommu_configure_device(master_np, dev, id);
}
-
- /*
- * Two success conditions can be represented by non-negative err here:
- * >0 : there is no IOMMU, or one was unavailable for non-fatal reasons
- * 0 : we found an IOMMU, and dev->fwspec is initialised appropriately
- * <0 : any actual error
- */
- if (!err) {
- /* The fwspec pointer changed, read it again */
- fwspec = dev_iommu_fwspec_get(dev);
- ops = fwspec->ops;
- }
mutex_unlock(&iommu_probe_device_lock);
- /*
- * If we have reason to believe the IOMMU driver missed the initial
- * probe for dev, replay it to get things in order.
- */
- if (!err && dev->bus)
- err = iommu_probe_device(dev);
-
- /* Ignore all other errors apart from EPROBE_DEFER */
- if (err == -EPROBE_DEFER) {
- ops = ERR_PTR(err);
- } else if (err < 0) {
- dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
- ops = NULL;
- }
+ if (err == -ENODEV || err == -EPROBE_DEFER)
+ return err;
+ if (err)
+ goto err_log;
+
+ err = iommu_probe_device(dev);
+ if (err)
+ goto err_log;
+ return 0;
- return ops;
+err_log:
+ dev_dbg(dev, "Adding to IOMMU failed: %pe\n", ERR_PTR(err));
+ return err;
}
static enum iommu_resv_type __maybe_unused
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index c66b07084..c9528065a 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1719,7 +1719,6 @@ static void omap_iommu_release_device(struct device *dev)
if (!dev->of_node || !arch_data)
return;
- dev_iommu_priv_set(dev, NULL);
kfree(arch_data);
}
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 2eb9fb467..537359f10 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -385,13 +385,7 @@ static phys_addr_t sprd_iommu_iova_to_phys(struct iommu_domain *domain,
static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct sprd_iommu_device *sdev;
-
- if (!fwspec || fwspec->ops != &sprd_iommu_ops)
- return ERR_PTR(-ENODEV);
-
- sdev = dev_iommu_priv_get(dev);
+ struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
return &sdev->iommu;
}
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 379ebe03e..34db37fd9 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -843,7 +843,7 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
.flags = cpu_to_le32(flags),
};
- ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+ ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
if (ret) {
viommu_del_mappings(vdomain, iova, end);
return ret;
@@ -912,6 +912,33 @@ static void viommu_iotlb_sync(struct iommu_domain *domain,
viommu_sync_req(vdomain->viommu);
}
+static int viommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+ /*
+ * May be called before the viommu is initialized including
+ * while creating direct mapping
+ */
+ if (!vdomain->nr_endpoints)
+ return 0;
+ return viommu_sync_req(vdomain->viommu);
+}
+
+static void viommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+ /*
+ * May be called before the viommu is initialized including
+ * while creating direct mapping
+ */
+ if (!vdomain->nr_endpoints)
+ return;
+ viommu_sync_req(vdomain->viommu);
+}
+
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
@@ -969,9 +996,6 @@ static struct iommu_device *viommu_probe_device(struct device *dev)
struct viommu_dev *viommu = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!fwspec || fwspec->ops != &viommu_ops)
- return ERR_PTR(-ENODEV);
-
viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
if (!viommu)
return ERR_PTR(-ENODEV);
@@ -1037,6 +1061,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
+ case IOMMU_CAP_DEFERRED_FLUSH:
+ return true;
default:
return false;
}
@@ -1057,7 +1083,9 @@ static struct iommu_ops viommu_ops = {
.map_pages = viommu_map_pages,
.unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
+ .flush_iotlb_all = viommu_flush_iotlb_all,
.iotlb_sync = viommu_iotlb_sync,
+ .iotlb_sync_map = viommu_iotlb_sync_map,
.free = viommu_domain_free,
}
};
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index da308be6c..ba2e9e52d 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -158,9 +158,7 @@ static int ipoctal_get_icount(struct tty_struct *tty,
static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
{
struct tty_port *port = &channel->tty_port;
- unsigned char value;
- unsigned char flag;
- u8 isr;
+ u8 isr, value, flag;
do {
value = ioread8(&channel->regs->r.rhr);
@@ -202,8 +200,8 @@ static void ipoctal_irq_rx(struct ipoctal_channel *channel, u8 sr)
static void ipoctal_irq_tx(struct ipoctal_channel *channel)
{
- unsigned char value;
unsigned int *pointer_write = &channel->pointer_write;
+ u8 value;
if (channel->nb_bytes == 0)
return;
@@ -436,11 +434,11 @@ err_put_driver:
return res;
}
-static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
- const u8 *buf, int count)
+static inline size_t ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
+ const u8 *buf, size_t count)
{
unsigned long flags;
- int i;
+ size_t i;
unsigned int *pointer_read = &channel->pointer_read;
/* Copy the bytes from the user buffer to the internal one */
@@ -462,7 +460,7 @@ static ssize_t ipoctal_write_tty(struct tty_struct *tty, const u8 *buf,
size_t count)
{
struct ipoctal_channel *channel = tty->driver_data;
- unsigned int char_copied;
+ size_t char_copied;
char_copied = ipoctal_copy_write_buffer(channel, buf, count);
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index cc1ecfd49..b1471ba01 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -207,7 +207,7 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
if (!bus)
return NULL;
- bus_nr = ida_simple_get(&ipack_ida, 0, 0, GFP_KERNEL);
+ bus_nr = ida_alloc(&ipack_ida, GFP_KERNEL);
if (bus_nr < 0) {
kfree(bus);
return NULL;
@@ -237,7 +237,7 @@ int ipack_bus_unregister(struct ipack_bus_device *bus)
{
bus_for_each_dev(&ipack_bus_type, NULL, bus,
ipack_unregister_bus_member);
- ida_simple_remove(&ipack_ida, bus->bus_nr);
+ ida_free(&ipack_ida, bus->bus_nr);
kfree(bus);
return 0;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 676c9250d..6b386511c 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2470,8 +2470,8 @@ static bool its_parse_indirect_baser(struct its_node *its,
* feature is not supported by hardware.
*/
new_order = max_t(u32, get_order(esz << ids), new_order);
- if (new_order > MAX_ORDER) {
- new_order = MAX_ORDER;
+ if (new_order > MAX_PAGE_ORDER) {
+ new_order = MAX_PAGE_ORDER;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
&its->phys_base, its_base_type_string[type],
@@ -4561,13 +4561,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i));
}
- if (err) {
- if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i);
-
- its_lpi_free(bitmap, base, nr_ids);
- its_free_prop_table(vprop_page);
- }
+ if (err)
+ its_vpe_irq_domain_free(domain, virq, i);
return err;
}
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 94d56a03b..ca32ac19d 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
+#include <linux/pid.h>
#include <linux/sched.h>
#include <linux/irqchip/arm-gic-v4.h>
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index 712456523..7942d8eb3 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -14,6 +14,7 @@
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -322,8 +323,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
struct device *dev = &pdev->dev;
struct irq_domain *parent_domain;
struct generic_pm_domain *genpd;
+ struct device_node *msgram_np;
struct qcom_mpm_priv *priv;
unsigned int pin_cnt;
+ struct resource res;
int i, irq;
int ret;
@@ -374,9 +377,26 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
raw_spin_lock_init(&priv->lock);
- priv->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ /* If we have a handle to an RPM message ram partition, use it. */
+ msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
+ if (msgram_np) {
+ ret = of_address_to_resource(msgram_np, 0, &res);
+ if (ret) {
+ of_node_put(msgram_np);
+ return ret;
+ }
+
+ /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
+ priv->base = devm_ioremap(dev, res.start, resource_size(&res));
+ of_node_put(msgram_np);
+ if (!priv->base)
+ return -ENOMEM;
+ } else {
+ /* Otherwise, fall back to simple MMIO. */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+ }
for (i = 0; i < priv->reg_stride; i++) {
qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index dc822111f..8803facbb 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
#define IRQC_IRQ_START 1
#define IRQC_IRQ_COUNT 8
@@ -52,15 +53,33 @@
#define IITSR_IITSEL_EDGE_BOTH 3
#define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3)
-#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
-#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
+#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
+#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
-struct rzg2l_irqc_priv {
- void __iomem *base;
- struct irq_fwspec fwspec[IRQC_NUM_IRQ];
- raw_spinlock_t lock;
+/**
+ * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume)
+ * @iitsr: IITSR register
+ * @titsr: TITSR registers
+ */
+struct rzg2l_irqc_reg_cache {
+ u32 iitsr;
+ u32 titsr[2];
};
+/**
+ * struct rzg2l_irqc_priv - IRQ controller private data structure
+ * @base: Controller's base address
+ * @fwspec: IRQ firmware specific data
+ * @lock: Lock to serialize access to hardware registers
+ * @cache: Registers cache for suspend/resume
+ */
+static struct rzg2l_irqc_priv {
+ void __iomem *base;
+ struct irq_fwspec fwspec[IRQC_NUM_IRQ];
+ raw_spinlock_t lock;
+ struct rzg2l_irqc_reg_cache cache;
+} *rzg2l_irqc_data;
+
static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
{
return data->domain->host_data;
@@ -277,6 +296,38 @@ static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
+static int rzg2l_irqc_irq_suspend(void)
+{
+ struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
+ void __iomem *base = rzg2l_irqc_data->base;
+
+ cache->iitsr = readl_relaxed(base + IITSR);
+ for (u8 i = 0; i < 2; i++)
+ cache->titsr[i] = readl_relaxed(base + TITSR(i));
+
+ return 0;
+}
+
+static void rzg2l_irqc_irq_resume(void)
+{
+ struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
+ void __iomem *base = rzg2l_irqc_data->base;
+
+ /*
+ * Restore only interrupt type. TSSRx will be restored at the
+ * request of pin controller to avoid spurious interrupts due
+ * to invalid PIN states.
+ */
+ for (u8 i = 0; i < 2; i++)
+ writel_relaxed(cache->titsr[i], base + TITSR(i));
+ writel_relaxed(cache->iitsr, base + IITSR);
+}
+
+static struct syscore_ops rzg2l_irqc_syscore_ops = {
+ .suspend = rzg2l_irqc_irq_suspend,
+ .resume = rzg2l_irqc_irq_resume,
+};
+
static const struct irq_chip irqc_chip = {
.name = "rzg2l-irqc",
.irq_eoi = rzg2l_irqc_eoi,
@@ -362,7 +413,6 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
struct irq_domain *irq_domain, *parent_domain;
struct platform_device *pdev;
struct reset_control *resetn;
- struct rzg2l_irqc_priv *priv;
int ret;
pdev = of_find_device_by_node(node);
@@ -375,15 +425,15 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
return -ENODEV;
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
+ if (!rzg2l_irqc_data)
return -ENOMEM;
- priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ if (IS_ERR(rzg2l_irqc_data->base))
+ return PTR_ERR(rzg2l_irqc_data->base);
- ret = rzg2l_irqc_parse_interrupts(priv, node);
+ ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
return ret;
@@ -406,17 +456,19 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
goto pm_disable;
}
- raw_spin_lock_init(&priv->lock);
+ raw_spin_lock_init(&rzg2l_irqc_data->lock);
irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
node, &rzg2l_irqc_domain_ops,
- priv);
+ rzg2l_irqc_data);
if (!irq_domain) {
dev_err(&pdev->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto pm_put;
}
+ register_syscore_ops(&rzg2l_irqc_syscore_ops);
+
return 0;
pm_put:
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 0c18d1f1e..f9d6fce4d 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -12,6 +12,7 @@
* Kevin Chea
*/
+#include <linux/bits.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
@@ -19,8 +20,6 @@
#include <linux/irqchip/xtensa-pic.h>
#include <linux/of.h>
-unsigned int cached_irq_mask;
-
/*
* Device Tree IRQ specifier translation function which works with one or
* two cell bindings. First cell value maps directly to the hwirq number.
@@ -44,34 +43,30 @@ static const struct irq_domain_ops xtensa_irq_domain_ops = {
static void xtensa_irq_mask(struct irq_data *d)
{
- cached_irq_mask &= ~(1 << d->hwirq);
- xtensa_set_sr(cached_irq_mask, intenable);
-}
+ u32 irq_mask;
-static void xtensa_irq_unmask(struct irq_data *d)
-{
- cached_irq_mask |= 1 << d->hwirq;
- xtensa_set_sr(cached_irq_mask, intenable);
+ irq_mask = xtensa_get_sr(intenable);
+ irq_mask &= ~BIT(d->hwirq);
+ xtensa_set_sr(irq_mask, intenable);
}
-static void xtensa_irq_enable(struct irq_data *d)
+static void xtensa_irq_unmask(struct irq_data *d)
{
- xtensa_irq_unmask(d);
-}
+ u32 irq_mask;
-static void xtensa_irq_disable(struct irq_data *d)
-{
- xtensa_irq_mask(d);
+ irq_mask = xtensa_get_sr(intenable);
+ irq_mask |= BIT(d->hwirq);
+ xtensa_set_sr(irq_mask, intenable);
}
static void xtensa_irq_ack(struct irq_data *d)
{
- xtensa_set_sr(1 << d->hwirq, intclear);
+ xtensa_set_sr(BIT(d->hwirq), intclear);
}
static int xtensa_irq_retrigger(struct irq_data *d)
{
- unsigned int mask = 1u << d->hwirq;
+ unsigned int mask = BIT(d->hwirq);
if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
return 0;
@@ -81,8 +76,6 @@ static int xtensa_irq_retrigger(struct irq_data *d)
static struct irq_chip xtensa_irq_chip = {
.name = "xtensa",
- .irq_enable = xtensa_irq_enable,
- .irq_disable = xtensa_irq_disable,
.irq_mask = xtensa_irq_mask,
.irq_unmask = xtensa_irq_unmask,
.irq_ack = xtensa_irq_ack,
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 2f3789515..6e80d7bd3 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1231,9 +1231,9 @@ static void capinc_tty_hangup(struct tty_struct *tty)
tty_port_hangup(&mp->port);
}
-static void capinc_tty_send_xchar(struct tty_struct *tty, char ch)
+static void capinc_tty_send_xchar(struct tty_struct *tty, u8 ch)
{
- pr_debug("capinc_tty_send_xchar(%d)\n", ch);
+ pr_debug("capinc_tty_send_xchar(%u)\n", ch);
}
static const struct tty_operations capinc_ops = {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a3a9ac5b5..d721b254e 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -95,14 +95,18 @@ config LEDS_ARIEL
Say Y to if your machine is a Dell Wyse 3020 thin client.
config LEDS_AW200XX
- tristate "LED support for Awinic AW20036/AW20054/AW20072"
+ tristate "LED support for Awinic AW20036/AW20054/AW20072/AW20108"
depends on LEDS_CLASS
depends on I2C
help
- This option enables support for the AW20036/AW20054/AW20072 LED driver.
- It is a 3x12/6x9/6x12 matrix LED driver programmed via
- an I2C interface, up to 36/54/72 LEDs or 12/18/24 RGBs,
- 3 pattern controllers for auto breathing or group dimming control.
+ This option enables support for the Awinic AW200XX LED controllers.
+ It is a matrix LED driver programmed via an I2C interface. Devices have
+ a set of individually controlled LEDs and support 3 pattern controllers
+ for auto breathing or group dimming control. Supported devices:
+ - AW20036 (3x12) 36 LEDs
+ - AW20054 (6x9) 54 LEDs
+ - AW20072 (6x12) 72 LEDs
+ - AW20108 (9x12) 108 LEDs
To compile this driver as a module, choose M here: the module
will be called leds-aw200xx.
@@ -299,6 +303,15 @@ config LEDS_COBALT_RAQ
help
This option enables support for the Cobalt Raq series LEDs.
+config LEDS_SUN50I_A100
+ tristate "LED support for Allwinner A100 RGB LED controller"
+ depends on LEDS_CLASS_MULTICOLOR
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ This option enables support for the RGB LED controller found
+ in some Allwinner sunxi SoCs, including A100, R329, and D1.
+ It uses a one-wire interface to control up to 1024 LEDs.
+
config LEDS_SUNFIRE
tristate "LED support for SunFire servers."
depends on LEDS_CLASS
@@ -639,6 +652,17 @@ config LEDS_ADP5520
To compile this driver as a module, choose M here: the module will
be called leds-adp5520.
+config LEDS_MAX5970
+ tristate "LED Support for Maxim 5970"
+ depends on LEDS_CLASS
+ depends on MFD_MAX5970
+ help
+ This option enables support for the Maxim MAX5970 & MAX5978 smart
+ switch indication LEDs via the I2C bus.
+
+ To compile this driver as a module, choose M here: the module will
+ be called leds-max5970.
+
config LEDS_MC13783
tristate "LED Support for MC13XXX PMIC"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index d7348e8bc..ce07dc295 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
+obj-$(CONFIG_LEDS_MAX5970) += leds-max5970.o
obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
+obj-$(CONFIG_LEDS_SUN50I_A100) += leds-sun50i-a100.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6a5e1f41f..bd59a14a4 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -269,19 +269,6 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
}
EXPORT_SYMBOL_GPL(led_trigger_set_default);
-void led_trigger_rename_static(const char *name, struct led_trigger *trig)
-{
- /* new name must be on a temporary string to prevent races */
- BUG_ON(name == trig->name);
-
- down_write(&triggers_list_lock);
- /* this assumes that trig->name was originaly allocated to
- * non constant storage */
- strcpy((char *)trig->name, name);
- up_write(&triggers_list_lock);
-}
-EXPORT_SYMBOL_GPL(led_trigger_rename_static);
-
/* LED Trigger Interface */
int led_trigger_register(struct led_trigger *trig)
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
index f3bed4f05..f584a7f98 100644
--- a/drivers/leds/leds-aw200xx.c
+++ b/drivers/leds/leds-aw200xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Awinic AW20036/AW20054/AW20072 LED driver
+ * Awinic AW20036/AW20054/AW20072/AW20108 LED driver
*
* Copyright (c) 2023, SberDevices. All Rights Reserved.
*
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/container_of.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/mod_devicetable.h>
@@ -86,6 +87,8 @@
#define AW200XX_REG_DIM(x, columns) \
AW200XX_REG(AW200XX_PAGE4, AW200XX_LED2REG(x, columns) * 2)
#define AW200XX_REG_DIM2FADE(x) ((x) + 1)
+#define AW200XX_REG_FADE2DIM(fade) \
+ DIV_ROUND_UP((fade) * AW200XX_DIM_MAX, AW200XX_FADE_MAX)
/*
* Duty ratio of display scan (see p.15 of datasheet for formula):
@@ -116,6 +119,7 @@ struct aw200xx {
struct mutex mutex;
u32 num_leds;
u32 display_rows;
+ struct gpio_desc *hwen;
struct aw200xx_led leds[] __counted_by(num_leds);
};
@@ -193,9 +197,7 @@ static int aw200xx_brightness_set(struct led_classdev *cdev,
dim = led->dim;
if (dim < 0)
- dim = max_t(int,
- brightness / (AW200XX_FADE_MAX / AW200XX_DIM_MAX),
- 1);
+ dim = AW200XX_REG_FADE2DIM(brightness);
ret = regmap_write(chip->regmap, reg, dim);
if (ret)
@@ -319,6 +321,9 @@ static int aw200xx_chip_reset(const struct aw200xx *const chip)
if (ret)
return ret;
+ /* According to the datasheet software reset takes at least 1ms */
+ fsleep(1000);
+
regcache_mark_dirty(chip->regmap);
return regmap_write(chip->regmap, AW200XX_REG_FCD, AW200XX_FCD_CLEAR);
}
@@ -358,6 +363,50 @@ static int aw200xx_chip_check(const struct aw200xx *const chip)
return 0;
}
+static void aw200xx_enable(const struct aw200xx *const chip)
+{
+ gpiod_set_value_cansleep(chip->hwen, 1);
+
+ /*
+ * After HWEN pin set high the chip begins to load the OTP information,
+ * which takes 200us to complete. About 200us wait time is needed for
+ * internal oscillator startup and display SRAM initialization. After
+ * display SRAM initialization, the registers in page1 to page5 can be
+ * configured via i2c interface.
+ */
+ fsleep(400);
+}
+
+static void aw200xx_disable(const struct aw200xx *const chip)
+{
+ return gpiod_set_value_cansleep(chip->hwen, 0);
+}
+
+static int aw200xx_probe_get_display_rows(struct device *dev,
+ struct aw200xx *chip)
+{
+ struct fwnode_handle *child;
+ u32 max_source = 0;
+
+ device_for_each_child_node(dev, child) {
+ u32 source;
+ int ret;
+
+ ret = fwnode_property_read_u32(child, "reg", &source);
+ if (ret || source >= chip->cdef->channels)
+ continue;
+
+ max_source = max(max_source, source);
+ }
+
+ if (max_source == 0)
+ return -EINVAL;
+
+ chip->display_rows = max_source / chip->cdef->display_size_columns + 1;
+
+ return 0;
+}
+
static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
{
struct fwnode_handle *child;
@@ -365,18 +414,10 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
int ret;
int i;
- ret = device_property_read_u32(dev, "awinic,display-rows",
- &chip->display_rows);
+ ret = aw200xx_probe_get_display_rows(dev, chip);
if (ret)
return dev_err_probe(dev, ret,
- "Failed to read 'display-rows' property\n");
-
- if (!chip->display_rows ||
- chip->display_rows > chip->cdef->display_size_rows_max) {
- return dev_err_probe(dev, -EINVAL,
- "Invalid leds display size %u\n",
- chip->display_rows);
- }
+ "No valid led definitions found\n");
current_max = aw200xx_imax_from_global(chip, AW200XX_IMAX_MAX_uA);
current_min = aw200xx_imax_from_global(chip, AW200XX_IMAX_MIN_uA);
@@ -421,6 +462,7 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
led->num = source;
led->chip = chip;
led->cdev.brightness_set_blocking = aw200xx_brightness_set;
+ led->cdev.max_brightness = AW200XX_FADE_MAX;
led->cdev.groups = dim_groups;
init_data.fwnode = child;
@@ -485,6 +527,7 @@ static const struct regmap_config aw200xx_regmap_config = {
.rd_table = &aw200xx_readable_table,
.wr_table = &aw200xx_writeable_table,
.cache_type = REGCACHE_MAPLE,
+ .disable_locking = true,
};
static int aw200xx_probe(struct i2c_client *client)
@@ -517,6 +560,14 @@ static int aw200xx_probe(struct i2c_client *client)
if (IS_ERR(chip->regmap))
return PTR_ERR(chip->regmap);
+ chip->hwen = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->hwen))
+ return dev_err_probe(&client->dev, PTR_ERR(chip->hwen),
+ "Cannot get enable GPIO");
+
+ aw200xx_enable(chip);
+
ret = aw200xx_chip_check(chip);
if (ret)
return ret;
@@ -537,6 +588,9 @@ static int aw200xx_probe(struct i2c_client *client)
ret = aw200xx_chip_init(chip);
out_unlock:
+ if (ret)
+ aw200xx_disable(chip);
+
mutex_unlock(&chip->mutex);
return ret;
}
@@ -546,6 +600,7 @@ static void aw200xx_remove(struct i2c_client *client)
struct aw200xx *chip = i2c_get_clientdata(client);
aw200xx_chip_reset(chip);
+ aw200xx_disable(chip);
mutex_destroy(&chip->mutex);
}
@@ -567,10 +622,17 @@ static const struct aw200xx_chipdef aw20072_cdef = {
.display_size_columns = 12,
};
+static const struct aw200xx_chipdef aw20108_cdef = {
+ .channels = 108,
+ .display_size_rows_max = 9,
+ .display_size_columns = 12,
+};
+
static const struct i2c_device_id aw200xx_id[] = {
{ "aw20036" },
{ "aw20054" },
{ "aw20072" },
+ { "aw20108" },
{}
};
MODULE_DEVICE_TABLE(i2c, aw200xx_id);
@@ -579,6 +641,7 @@ static const struct of_device_id aw200xx_match_table[] = {
{ .compatible = "awinic,aw20036", .data = &aw20036_cdef, },
{ .compatible = "awinic,aw20054", .data = &aw20054_cdef, },
{ .compatible = "awinic,aw20072", .data = &aw20072_cdef, },
+ { .compatible = "awinic,aw20108", .data = &aw20108_cdef, },
{}
};
MODULE_DEVICE_TABLE(of, aw200xx_match_table);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 710c319ad..83fcd7b6a 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -172,6 +172,8 @@ static struct gpio_leds_priv *gpio_leds_create(struct device *dev)
led.gpiod = devm_fwnode_gpiod_get(dev, child, NULL, GPIOD_ASIS,
NULL);
if (IS_ERR(led.gpiod)) {
+ dev_err_probe(dev, PTR_ERR(led.gpiod), "Failed to get GPIO '%pfw'\n",
+ child);
fwnode_handle_put(child);
return ERR_CAST(led.gpiod);
}
diff --git a/drivers/leds/leds-max5970.c b/drivers/leds/leds-max5970.c
new file mode 100644
index 000000000..56a584311
--- /dev/null
+++ b/drivers/leds/leds-max5970.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver for leds in MAX5970 and MAX5978 IC
+ *
+ * Copyright (c) 2022 9elements GmbH
+ *
+ * Author: Patrick Rudolph <patrick.rudolph@9elements.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+#include <linux/mfd/max5970.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define ldev_to_maxled(c) container_of(c, struct max5970_led, cdev)
+
+struct max5970_led {
+ struct device *dev;
+ struct regmap *regmap;
+ struct led_classdev cdev;
+ unsigned int index;
+};
+
+static int max5970_led_set_brightness(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct max5970_led *ddata = ldev_to_maxled(cdev);
+ int ret, val;
+
+ /* Set/clear corresponding bit for given led index */
+ val = !brightness ? BIT(ddata->index) : 0;
+
+ ret = regmap_update_bits(ddata->regmap, MAX5970_REG_LED_FLASH, BIT(ddata->index), val);
+ if (ret < 0)
+ dev_err(cdev->dev, "failed to set brightness %d", ret);
+
+ return ret;
+}
+
+static int max5970_led_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *led_node, *child;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ struct max5970_led *ddata;
+ int ret = -ENODEV;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ led_node = device_get_named_child_node(dev->parent, "leds");
+ if (!led_node)
+ return -ENODEV;
+
+ fwnode_for_each_available_child_node(led_node, child) {
+ u32 reg;
+
+ if (fwnode_property_read_u32(child, "reg", &reg))
+ continue;
+
+ if (reg >= MAX5970_NUM_LEDS) {
+ dev_err_probe(dev, -EINVAL, "invalid LED (%u >= %d)\n", reg, MAX5970_NUM_LEDS);
+ continue;
+ }
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata) {
+ fwnode_handle_put(child);
+ return -ENOMEM;
+ }
+
+ ddata->index = reg;
+ ddata->regmap = regmap;
+ ddata->dev = dev;
+
+ if (fwnode_property_read_string(child, "label", &ddata->cdev.name))
+ ddata->cdev.name = fwnode_get_name(child);
+
+ ddata->cdev.max_brightness = 1;
+ ddata->cdev.brightness_set_blocking = max5970_led_set_brightness;
+ ddata->cdev.default_trigger = "none";
+
+ ret = devm_led_classdev_register(dev, &ddata->cdev);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, ret, "Failed to initialize LED %u\n", reg);
+ }
+ }
+
+ return ret;
+}
+
+static struct platform_driver max5970_led_driver = {
+ .driver = {
+ .name = "max5970-led",
+ },
+ .probe = max5970_led_probe,
+};
+module_platform_driver(max5970_led_driver);
+
+MODULE_AUTHOR("Patrick Rudolph <patrick.rudolph@9elements.com>");
+MODULE_AUTHOR("Naresh Solanki <Naresh.Solanki@9elements.com>");
+MODULE_DESCRIPTION("MAX5970_hot-swap controller LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 2b3bf1353..4e3936a39 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -54,7 +54,7 @@ static int led_pwm_set(struct led_classdev *led_cdev,
led_dat->pwmstate.duty_cycle = duty;
led_dat->pwmstate.enabled = true;
- return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
+ return pwm_apply_might_sleep(led_dat->pwm, &led_dat->pwmstate);
}
__attribute__((nonnull))
diff --git a/drivers/leds/leds-sun50i-a100.c b/drivers/leds/leds-sun50i-a100.c
new file mode 100644
index 000000000..62d21c3a3
--- /dev/null
+++ b/drivers/leds/leds-sun50i-a100.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021-2023 Samuel Holland <samuel@sholland.org>
+ *
+ * Partly based on drivers/leds/leds-turris-omnia.c, which is:
+ * Copyright (c) 2020 by Marek Behún <kabel@kernel.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define LEDC_CTRL_REG 0x0000
+#define LEDC_CTRL_REG_DATA_LENGTH GENMASK(28, 16)
+#define LEDC_CTRL_REG_RGB_MODE GENMASK(8, 6)
+#define LEDC_CTRL_REG_LEDC_EN BIT(0)
+#define LEDC_T01_TIMING_CTRL_REG 0x0004
+#define LEDC_T01_TIMING_CTRL_REG_T1H GENMASK(26, 21)
+#define LEDC_T01_TIMING_CTRL_REG_T1L GENMASK(20, 16)
+#define LEDC_T01_TIMING_CTRL_REG_T0H GENMASK(10, 6)
+#define LEDC_T01_TIMING_CTRL_REG_T0L GENMASK(5, 0)
+#define LEDC_RESET_TIMING_CTRL_REG 0x000c
+#define LEDC_RESET_TIMING_CTRL_REG_TR GENMASK(28, 16)
+#define LEDC_RESET_TIMING_CTRL_REG_LED_NUM GENMASK(9, 0)
+#define LEDC_DATA_REG 0x0014
+#define LEDC_DMA_CTRL_REG 0x0018
+#define LEDC_DMA_CTRL_REG_DMA_EN BIT(5)
+#define LEDC_DMA_CTRL_REG_FIFO_TRIG_LEVEL GENMASK(4, 0)
+#define LEDC_INT_CTRL_REG 0x001c
+#define LEDC_INT_CTRL_REG_GLOBAL_INT_EN BIT(5)
+#define LEDC_INT_CTRL_REG_FIFO_CPUREQ_INT_EN BIT(1)
+#define LEDC_INT_CTRL_REG_TRANS_FINISH_INT_EN BIT(0)
+#define LEDC_INT_STS_REG 0x0020
+#define LEDC_INT_STS_REG_FIFO_WLW GENMASK(15, 10)
+#define LEDC_INT_STS_REG_FIFO_CPUREQ_INT BIT(1)
+#define LEDC_INT_STS_REG_TRANS_FINISH_INT BIT(0)
+
+#define LEDC_FIFO_DEPTH 32U
+#define LEDC_MAX_LEDS 1024
+#define LEDC_CHANNELS_PER_LED 3 /* RGB */
+
+#define LEDS_TO_BYTES(n) ((n) * sizeof(u32))
+
+struct sun50i_a100_ledc_led {
+ struct led_classdev_mc mc_cdev;
+ struct mc_subled subled_info[LEDC_CHANNELS_PER_LED];
+ u32 addr;
+};
+
+#define to_ledc_led(mc) container_of(mc, struct sun50i_a100_ledc_led, mc_cdev)
+
+struct sun50i_a100_ledc_timing {
+ u32 t0h_ns;
+ u32 t0l_ns;
+ u32 t1h_ns;
+ u32 t1l_ns;
+ u32 treset_ns;
+};
+
+struct sun50i_a100_ledc {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct reset_control *reset;
+
+ u32 *buffer;
+ struct dma_chan *dma_chan;
+ dma_addr_t dma_handle;
+ unsigned int pio_length;
+ unsigned int pio_offset;
+
+ spinlock_t lock;
+ unsigned int next_length;
+ bool xfer_active;
+
+ u32 format;
+ struct sun50i_a100_ledc_timing timing;
+
+ u32 max_addr;
+ u32 num_leds;
+ struct sun50i_a100_ledc_led leds[] __counted_by(num_leds);
+};
+
+static int sun50i_a100_ledc_dma_xfer(struct sun50i_a100_ledc *priv, unsigned int length)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+
+ desc = dmaengine_prep_slave_single(priv->dma_chan, priv->dma_handle,
+ LEDS_TO_BYTES(length), DMA_MEM_TO_DEV, 0);
+ if (!desc)
+ return -ENOMEM;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie))
+ return -EIO;
+
+ dma_async_issue_pending(priv->dma_chan);
+
+ return 0;
+}
+
+static void sun50i_a100_ledc_pio_xfer(struct sun50i_a100_ledc *priv, unsigned int fifo_used)
+{
+ unsigned int burst, length, offset;
+ u32 control;
+
+ length = priv->pio_length;
+ offset = priv->pio_offset;
+ burst = min(length, LEDC_FIFO_DEPTH - fifo_used);
+
+ iowrite32_rep(priv->base + LEDC_DATA_REG, priv->buffer + offset, burst);
+
+ if (burst < length) {
+ priv->pio_length = length - burst;
+ priv->pio_offset = offset + burst;
+
+ if (!offset) {
+ control = readl(priv->base + LEDC_INT_CTRL_REG);
+ control |= LEDC_INT_CTRL_REG_FIFO_CPUREQ_INT_EN;
+ writel(control, priv->base + LEDC_INT_CTRL_REG);
+ }
+ } else {
+ /* Disable the request IRQ once all data is written. */
+ control = readl(priv->base + LEDC_INT_CTRL_REG);
+ control &= ~LEDC_INT_CTRL_REG_FIFO_CPUREQ_INT_EN;
+ writel(control, priv->base + LEDC_INT_CTRL_REG);
+ }
+}
+
+static void sun50i_a100_ledc_start_xfer(struct sun50i_a100_ledc *priv, unsigned int length)
+{
+ bool use_dma = false;
+ u32 control;
+
+ if (priv->dma_chan && length > LEDC_FIFO_DEPTH) {
+ int ret;
+
+ ret = sun50i_a100_ledc_dma_xfer(priv, length);
+ if (ret)
+ dev_warn(priv->dev, "Failed to set up DMA (%d), using PIO\n", ret);
+ else
+ use_dma = true;
+ }
+
+ /* The DMA trigger level must be at least the burst length. */
+ control = FIELD_PREP(LEDC_DMA_CTRL_REG_DMA_EN, use_dma) |
+ FIELD_PREP_CONST(LEDC_DMA_CTRL_REG_FIFO_TRIG_LEVEL, LEDC_FIFO_DEPTH / 2);
+ writel(control, priv->base + LEDC_DMA_CTRL_REG);
+
+ control = readl(priv->base + LEDC_CTRL_REG);
+ control &= ~LEDC_CTRL_REG_DATA_LENGTH;
+ control |= FIELD_PREP(LEDC_CTRL_REG_DATA_LENGTH, length) | LEDC_CTRL_REG_LEDC_EN;
+ writel(control, priv->base + LEDC_CTRL_REG);
+
+ if (!use_dma) {
+ /* The FIFO is empty when starting a new transfer. */
+ unsigned int fifo_used = 0;
+
+ priv->pio_length = length;
+ priv->pio_offset = 0;
+
+ sun50i_a100_ledc_pio_xfer(priv, fifo_used);
+ }
+}
+
+static irqreturn_t sun50i_a100_ledc_irq(int irq, void *data)
+{
+ struct sun50i_a100_ledc *priv = data;
+ u32 status;
+
+ status = readl(priv->base + LEDC_INT_STS_REG);
+
+ if (status & LEDC_INT_STS_REG_TRANS_FINISH_INT) {
+ unsigned int next_length;
+
+ spin_lock(&priv->lock);
+
+ /* If another transfer is queued, dequeue and start it. */
+ next_length = priv->next_length;
+ if (next_length)
+ priv->next_length = 0;
+ else
+ priv->xfer_active = false;
+
+ spin_unlock(&priv->lock);
+
+ if (next_length)
+ sun50i_a100_ledc_start_xfer(priv, next_length);
+ } else if (status & LEDC_INT_STS_REG_FIFO_CPUREQ_INT) {
+ /* Continue the current transfer. */
+ sun50i_a100_ledc_pio_xfer(priv, FIELD_GET(LEDC_INT_STS_REG_FIFO_WLW, status));
+ }
+
+ /* Clear the W1C status bits. */
+ writel(status, priv->base + LEDC_INT_STS_REG);
+
+ return IRQ_HANDLED;
+}
+
+static void sun50i_a100_ledc_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct sun50i_a100_ledc *priv = dev_get_drvdata(cdev->dev->parent);
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+ struct sun50i_a100_ledc_led *led = to_ledc_led(mc_cdev);
+ unsigned int next_length;
+ unsigned long flags;
+ bool xfer_active;
+
+ led_mc_calc_color_components(mc_cdev, brightness);
+
+ priv->buffer[led->addr] = led->subled_info[0].brightness << 16 |
+ led->subled_info[1].brightness << 8 |
+ led->subled_info[2].brightness;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Start, enqueue, or extend an enqueued transfer, as appropriate. */
+ next_length = max(priv->next_length, led->addr + 1);
+ xfer_active = priv->xfer_active;
+ if (xfer_active)
+ priv->next_length = next_length;
+ else
+ priv->xfer_active = true;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!xfer_active)
+ sun50i_a100_ledc_start_xfer(priv, next_length);
+}
+
+static const char *const sun50i_a100_ledc_formats[] = {
+ "rgb", "rbg", "grb", "gbr", "brg", "bgr",
+};
+
+static int sun50i_a100_ledc_parse_format(struct device *dev,
+ struct sun50i_a100_ledc *priv)
+{
+ const char *format = "grb";
+ u32 i;
+
+ device_property_read_string(dev, "allwinner,pixel-format", &format);
+
+ for (i = 0; i < ARRAY_SIZE(sun50i_a100_ledc_formats); i++) {
+ if (!strcmp(format, sun50i_a100_ledc_formats[i])) {
+ priv->format = i;
+ return 0;
+ }
+ }
+
+ return dev_err_probe(dev, -EINVAL, "Bad pixel format '%s'\n", format);
+}
+
+static void sun50i_a100_ledc_set_format(struct sun50i_a100_ledc *priv)
+{
+ u32 control;
+
+ control = readl(priv->base + LEDC_CTRL_REG);
+ control &= ~LEDC_CTRL_REG_RGB_MODE;
+ control |= FIELD_PREP(LEDC_CTRL_REG_RGB_MODE, priv->format);
+ writel(control, priv->base + LEDC_CTRL_REG);
+}
+
+static const struct sun50i_a100_ledc_timing sun50i_a100_ledc_default_timing = {
+ .t0h_ns = 336,
+ .t0l_ns = 840,
+ .t1h_ns = 882,
+ .t1l_ns = 294,
+ .treset_ns = 300000,
+};
+
+static int sun50i_a100_ledc_parse_timing(struct device *dev,
+ struct sun50i_a100_ledc *priv)
+{
+ struct sun50i_a100_ledc_timing *timing = &priv->timing;
+
+ *timing = sun50i_a100_ledc_default_timing;
+
+ device_property_read_u32(dev, "allwinner,t0h-ns", &timing->t0h_ns);
+ device_property_read_u32(dev, "allwinner,t0l-ns", &timing->t0l_ns);
+ device_property_read_u32(dev, "allwinner,t1h-ns", &timing->t1h_ns);
+ device_property_read_u32(dev, "allwinner,t1l-ns", &timing->t1l_ns);
+ device_property_read_u32(dev, "allwinner,treset-ns", &timing->treset_ns);
+
+ return 0;
+}
+
+static void sun50i_a100_ledc_set_timing(struct sun50i_a100_ledc *priv)
+{
+ const struct sun50i_a100_ledc_timing *timing = &priv->timing;
+ unsigned long mod_freq = clk_get_rate(priv->mod_clk);
+ u32 cycle_ns;
+ u32 control;
+
+ if (!mod_freq)
+ return;
+
+ cycle_ns = NSEC_PER_SEC / mod_freq;
+ control = FIELD_PREP(LEDC_T01_TIMING_CTRL_REG_T1H, timing->t1h_ns / cycle_ns) |
+ FIELD_PREP(LEDC_T01_TIMING_CTRL_REG_T1L, timing->t1l_ns / cycle_ns) |
+ FIELD_PREP(LEDC_T01_TIMING_CTRL_REG_T0H, timing->t0h_ns / cycle_ns) |
+ FIELD_PREP(LEDC_T01_TIMING_CTRL_REG_T0L, timing->t0l_ns / cycle_ns);
+ writel(control, priv->base + LEDC_T01_TIMING_CTRL_REG);
+
+ control = FIELD_PREP(LEDC_RESET_TIMING_CTRL_REG_TR, timing->treset_ns / cycle_ns) |
+ FIELD_PREP(LEDC_RESET_TIMING_CTRL_REG_LED_NUM, priv->max_addr);
+ writel(control, priv->base + LEDC_RESET_TIMING_CTRL_REG);
+}
+
+static int sun50i_a100_ledc_resume(struct device *dev)
+{
+ struct sun50i_a100_ledc *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(priv->reset);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret)
+ goto err_assert_reset;
+
+ ret = clk_prepare_enable(priv->mod_clk);
+ if (ret)
+ goto err_disable_bus_clk;
+
+ sun50i_a100_ledc_set_format(priv);
+ sun50i_a100_ledc_set_timing(priv);
+
+ writel(LEDC_INT_CTRL_REG_GLOBAL_INT_EN | LEDC_INT_CTRL_REG_TRANS_FINISH_INT_EN,
+ priv->base + LEDC_INT_CTRL_REG);
+
+ return 0;
+
+err_disable_bus_clk:
+ clk_disable_unprepare(priv->bus_clk);
+err_assert_reset:
+ reset_control_assert(priv->reset);
+
+ return ret;
+}
+
+static int sun50i_a100_ledc_suspend(struct device *dev)
+{
+ struct sun50i_a100_ledc *priv = dev_get_drvdata(dev);
+
+ /* Wait for all transfers to complete. */
+ for (;;) {
+ unsigned long flags;
+ bool xfer_active;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ xfer_active = priv->xfer_active;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (!xfer_active)
+ break;
+
+ msleep(1);
+ }
+
+ clk_disable_unprepare(priv->mod_clk);
+ clk_disable_unprepare(priv->bus_clk);
+ reset_control_assert(priv->reset);
+
+ return 0;
+}
+
+static void sun50i_a100_ledc_dma_cleanup(void *data)
+{
+ struct sun50i_a100_ledc *priv = data;
+
+ dma_release_channel(priv->dma_chan);
+}
+
+static int sun50i_a100_ledc_probe(struct platform_device *pdev)
+{
+ struct dma_slave_config dma_cfg = {};
+ struct led_init_data init_data = {};
+ struct sun50i_a100_ledc_led *led;
+ struct device *dev = &pdev->dev;
+ struct sun50i_a100_ledc *priv;
+ struct fwnode_handle *child;
+ struct resource *mem;
+ u32 max_addr = 0;
+ u32 num_leds = 0;
+ int irq, ret;
+
+ /*
+ * The maximum LED address must be known in sun50i_a100_ledc_resume() before
+ * class device registration, so parse and validate the subnodes up front.
+ */
+ device_for_each_child_node(dev, child) {
+ u32 addr, color;
+
+ ret = fwnode_property_read_u32(child, "reg", &addr);
+ if (ret || addr >= LEDC_MAX_LEDS) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, -EINVAL, "'reg' must be between 0 and %d\n",
+ LEDC_MAX_LEDS - 1);
+ }
+
+ ret = fwnode_property_read_u32(child, "color", &color);
+ if (ret || color != LED_COLOR_ID_RGB) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, -EINVAL, "'color' must be LED_COLOR_ID_RGB\n");
+ }
+
+ max_addr = max(max_addr, addr);
+ num_leds++;
+ }
+
+ if (!num_leds)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, struct_size(priv, leds, num_leds), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->max_addr = max_addr;
+ priv->num_leds = num_leds;
+ spin_lock_init(&priv->lock);
+ dev_set_drvdata(dev, priv);
+
+ ret = sun50i_a100_ledc_parse_format(dev, priv);
+ if (ret)
+ return ret;
+
+ ret = sun50i_a100_ledc_parse_timing(dev, priv);
+ if (ret)
+ return ret;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(priv->bus_clk))
+ return PTR_ERR(priv->bus_clk);
+
+ priv->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(priv->mod_clk))
+ return PTR_ERR(priv->mod_clk);
+
+ priv->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
+
+ priv->dma_chan = dma_request_chan(dev, "tx");
+ if (IS_ERR(priv->dma_chan)) {
+ if (PTR_ERR(priv->dma_chan) != -ENODEV)
+ return PTR_ERR(priv->dma_chan);
+
+ priv->dma_chan = NULL;
+
+ priv->buffer = devm_kzalloc(dev, LEDS_TO_BYTES(LEDC_MAX_LEDS), GFP_KERNEL);
+ if (!priv->buffer)
+ return -ENOMEM;
+ } else {
+ ret = devm_add_action_or_reset(dev, sun50i_a100_ledc_dma_cleanup, priv);
+ if (ret)
+ return ret;
+
+ dma_cfg.dst_addr = mem->start + LEDC_DATA_REG;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_cfg.dst_maxburst = LEDC_FIFO_DEPTH / 2;
+
+ ret = dmaengine_slave_config(priv->dma_chan, &dma_cfg);
+ if (ret)
+ return ret;
+
+ priv->buffer = dmam_alloc_attrs(dmaengine_get_dma_device(priv->dma_chan),
+ LEDS_TO_BYTES(LEDC_MAX_LEDS), &priv->dma_handle,
+ GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
+ if (!priv->buffer)
+ return -ENOMEM;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, sun50i_a100_ledc_irq, 0, dev_name(dev), priv);
+ if (ret)
+ return ret;
+
+ ret = sun50i_a100_ledc_resume(dev);
+ if (ret)
+ return ret;
+
+ led = priv->leds;
+ device_for_each_child_node(dev, child) {
+ struct led_classdev *cdev;
+
+ /* The node was already validated above. */
+ fwnode_property_read_u32(child, "reg", &led->addr);
+
+ led->subled_info[0].color_index = LED_COLOR_ID_RED;
+ led->subled_info[0].channel = 0;
+ led->subled_info[1].color_index = LED_COLOR_ID_GREEN;
+ led->subled_info[1].channel = 1;
+ led->subled_info[2].color_index = LED_COLOR_ID_BLUE;
+ led->subled_info[2].channel = 2;
+
+ led->mc_cdev.num_colors = ARRAY_SIZE(led->subled_info);
+ led->mc_cdev.subled_info = led->subled_info;
+
+ cdev = &led->mc_cdev.led_cdev;
+ cdev->max_brightness = U8_MAX;
+ cdev->brightness_set = sun50i_a100_ledc_brightness_set;
+
+ init_data.fwnode = child;
+
+ ret = led_classdev_multicolor_register_ext(dev, &led->mc_cdev, &init_data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to register multicolor LED %u", led->addr);
+ goto err_put_child;
+ }
+
+ led++;
+ }
+
+ dev_info(dev, "Registered %u LEDs\n", num_leds);
+
+ return 0;
+
+err_put_child:
+ fwnode_handle_put(child);
+ while (led-- > priv->leds)
+ led_classdev_multicolor_unregister(&led->mc_cdev);
+ sun50i_a100_ledc_suspend(&pdev->dev);
+
+ return ret;
+}
+
+static void sun50i_a100_ledc_remove(struct platform_device *pdev)
+{
+ struct sun50i_a100_ledc *priv = platform_get_drvdata(pdev);
+
+ for (u32 i = 0; i < priv->num_leds; i++)
+ led_classdev_multicolor_unregister(&priv->leds[i].mc_cdev);
+ sun50i_a100_ledc_suspend(&pdev->dev);
+}
+
+static const struct of_device_id sun50i_a100_ledc_of_match[] = {
+ { .compatible = "allwinner,sun50i-a100-ledc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun50i_a100_ledc_of_match);
+
+static DEFINE_SIMPLE_DEV_PM_OPS(sun50i_a100_ledc_pm,
+ sun50i_a100_ledc_suspend,
+ sun50i_a100_ledc_resume);
+
+static struct platform_driver sun50i_a100_ledc_driver = {
+ .probe = sun50i_a100_ledc_probe,
+ .remove_new = sun50i_a100_ledc_remove,
+ .shutdown = sun50i_a100_ledc_remove,
+ .driver = {
+ .name = "sun50i-a100-ledc",
+ .of_match_table = sun50i_a100_ledc_of_match,
+ .pm = pm_ptr(&sun50i_a100_ledc_pm),
+ },
+};
+module_platform_driver(sun50i_a100_ledc_driver);
+
+MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>");
+MODULE_DESCRIPTION("Allwinner A100 LED controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index 360a376fa..d633ad519 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -81,7 +81,8 @@ static int syscon_led_probe(struct platform_device *pdev)
sled->map = map;
- if (of_property_read_u32(np, "offset", &sled->offset))
+ if (of_property_read_u32(np, "reg", &sled->offset) &&
+ of_property_read_u32(np, "offset", &sled->offset))
return -EINVAL;
if (of_property_read_u32(np, "mask", &sled->mask))
return -EINVAL;
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index e19074614..4f22f4224 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -638,19 +638,13 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.direction_output = tca6507_gpio_direction_output;
tca->gpio.set = tca6507_gpio_set_value;
tca->gpio.parent = dev;
- err = gpiochip_add_data(&tca->gpio, tca);
+ err = devm_gpiochip_add_data(dev, &tca->gpio, tca);
if (err) {
tca->gpio.ngpio = 0;
return err;
}
return 0;
}
-
-static void tca6507_remove_gpio(struct tca6507_chip *tca)
-{
- if (tca->gpio.ngpio)
- gpiochip_remove(&tca->gpio);
-}
#else /* CONFIG_GPIOLIB */
static int tca6507_probe_gpios(struct device *dev,
struct tca6507_chip *tca,
@@ -658,9 +652,6 @@ static int tca6507_probe_gpios(struct device *dev,
{
return 0;
}
-static void tca6507_remove_gpio(struct tca6507_chip *tca)
-{
-}
#endif /* CONFIG_GPIOLIB */
static struct tca6507_platform_data *
@@ -762,38 +753,25 @@ static int tca6507_probe(struct i2c_client *client)
l->led_cdev.brightness_set = tca6507_brightness_set;
l->led_cdev.blink_set = tca6507_blink_set;
l->bank = -1;
- err = led_classdev_register(dev, &l->led_cdev);
+ err = devm_led_classdev_register(dev, &l->led_cdev);
if (err < 0)
- goto exit;
+ return err;
}
}
err = tca6507_probe_gpios(dev, tca, pdata);
if (err)
- goto exit;
+ return err;
/* set all registers to known state - zero */
tca->reg_set = 0x7f;
schedule_work(&tca->work);
return 0;
-exit:
- while (i--) {
- if (tca->leds[i].led_cdev.name)
- led_classdev_unregister(&tca->leds[i].led_cdev);
- }
- return err;
}
static void tca6507_remove(struct i2c_client *client)
{
- int i;
struct tca6507_chip *tca = i2c_get_clientdata(client);
- struct tca6507_led *tca_leds = tca->leds;
- for (i = 0; i < NUM_LEDS; i++) {
- if (tca_leds[i].led_cdev.name)
- led_classdev_unregister(&tca_leds[i].led_cdev);
- }
- tca6507_remove_gpio(tca);
cancel_work_sync(&tca->work);
}
diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig
index a6a21f564..e66bd21b9 100644
--- a/drivers/leds/rgb/Kconfig
+++ b/drivers/leds/rgb/Kconfig
@@ -4,7 +4,7 @@ if LEDS_CLASS_MULTICOLOR
config LEDS_GROUP_MULTICOLOR
tristate "LEDs group multi-color support"
- depends on OF || COMPILE_TEST
+ depends on OF
help
This option enables support for monochrome LEDs that are grouped
into multicolor LEDs which is useful in the case where LEDs of
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index 46cd062b8..e1a81e010 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -51,8 +51,8 @@ static int led_pwm_mc_set(struct led_classdev *cdev,
priv->leds[i].state.duty_cycle = duty;
priv->leds[i].state.enabled = duty > 0;
- ret = pwm_apply_state(priv->leds[i].pwm,
- &priv->leds[i].state);
+ ret = pwm_apply_might_sleep(priv->leds[i].pwm,
+ &priv->leds[i].state);
if (ret)
break;
}
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 68d82a682..156b73d1f 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -552,9 +552,9 @@ static int lpg_parse_dtest(struct lpg *lpg)
ret = count;
goto err_malformed;
} else if (count != lpg->data->num_channels * 2) {
- dev_err(lpg->dev, "qcom,dtest needs to be %d items\n",
- lpg->data->num_channels * 2);
- return -EINVAL;
+ return dev_err_probe(lpg->dev, -EINVAL,
+ "qcom,dtest needs to be %d items\n",
+ lpg->data->num_channels * 2);
}
for (i = 0; i < lpg->data->num_channels; i++) {
@@ -574,8 +574,7 @@ static int lpg_parse_dtest(struct lpg *lpg)
return 0;
err_malformed:
- dev_err(lpg->dev, "malformed qcom,dtest\n");
- return ret;
+ return dev_err_probe(lpg->dev, ret, "malformed qcom,dtest\n");
}
static void lpg_apply_dtest(struct lpg_channel *chan)
@@ -977,9 +976,14 @@ static int lpg_pattern_mc_clear(struct led_classdev *cdev)
return lpg_pattern_clear(led);
}
+static inline struct lpg *lpg_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct lpg, pwm);
+}
+
static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct lpg *lpg = container_of(chip, struct lpg, pwm);
+ struct lpg *lpg = lpg_pwm_from_chip(chip);
struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
return chan->in_use ? -EBUSY : 0;
@@ -995,7 +999,7 @@ static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
static int lpg_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct lpg *lpg = container_of(chip, struct lpg, pwm);
+ struct lpg *lpg = lpg_pwm_from_chip(chip);
struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
int ret = 0;
@@ -1026,7 +1030,7 @@ out_unlock:
static int lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
- struct lpg *lpg = container_of(chip, struct lpg, pwm);
+ struct lpg *lpg = lpg_pwm_from_chip(chip);
struct lpg_channel *chan = &lpg->channels[pwm->hwpwm];
unsigned int resolution;
unsigned int pre_div;
@@ -1095,9 +1099,9 @@ static int lpg_add_pwm(struct lpg *lpg)
lpg->pwm.npwm = lpg->num_channels;
lpg->pwm.ops = &lpg_pwm_ops;
- ret = pwmchip_add(&lpg->pwm);
+ ret = devm_pwmchip_add(lpg->dev, &lpg->pwm);
if (ret)
- dev_err(lpg->dev, "failed to add PWM chip: ret %d\n", ret);
+ dev_err_probe(lpg->dev, ret, "failed to add PWM chip\n");
return ret;
}
@@ -1111,19 +1115,16 @@ static int lpg_parse_channel(struct lpg *lpg, struct device_node *np,
int ret;
ret = of_property_read_u32(np, "reg", &reg);
- if (ret || !reg || reg > lpg->num_channels) {
- dev_err(lpg->dev, "invalid \"reg\" of %pOFn\n", np);
- return -EINVAL;
- }
+ if (ret || !reg || reg > lpg->num_channels)
+ return dev_err_probe(lpg->dev, -EINVAL, "invalid \"reg\" of %pOFn\n", np);
chan = &lpg->channels[reg - 1];
chan->in_use = true;
ret = of_property_read_u32(np, "color", &color);
- if (ret < 0 && ret != -EINVAL) {
- dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np);
- return ret;
- }
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(lpg->dev, ret,
+ "failed to parse \"color\" of %pOF\n", np);
chan->color = color;
@@ -1146,10 +1147,9 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
int i;
ret = of_property_read_u32(np, "color", &color);
- if (ret < 0 && ret != -EINVAL) {
- dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np);
- return ret;
- }
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(lpg->dev, ret,
+ "failed to parse \"color\" of %pOF\n", np);
if (color == LED_COLOR_ID_RGB)
num_channels = of_get_available_child_count(np);
@@ -1226,7 +1226,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
else
ret = devm_led_classdev_register_ext(lpg->dev, &led->cdev, &init_data);
if (ret)
- dev_err(lpg->dev, "unable to register %s\n", cdev->name);
+ dev_err_probe(lpg->dev, ret, "unable to register %s\n", cdev->name);
return ret;
}
@@ -1272,10 +1272,9 @@ static int lpg_init_triled(struct lpg *lpg)
if (lpg->triled_has_src_sel) {
ret = of_property_read_u32(np, "qcom,power-source", &lpg->triled_src);
- if (ret || lpg->triled_src == 2 || lpg->triled_src > 3) {
- dev_err(lpg->dev, "invalid power source\n");
- return -EINVAL;
- }
+ if (ret || lpg->triled_src == 2 || lpg->triled_src > 3)
+ return dev_err_probe(lpg->dev, -EINVAL,
+ "invalid power source\n");
}
/* Disable automatic trickle charge LED */
@@ -1324,8 +1323,6 @@ static int lpg_probe(struct platform_device *pdev)
if (!lpg->data)
return -EINVAL;
- platform_set_drvdata(pdev, lpg);
-
lpg->dev = &pdev->dev;
mutex_init(&lpg->lock);
@@ -1363,13 +1360,6 @@ static int lpg_probe(struct platform_device *pdev)
return lpg_add_pwm(lpg);
}
-static void lpg_remove(struct platform_device *pdev)
-{
- struct lpg *lpg = platform_get_drvdata(pdev);
-
- pwmchip_remove(&lpg->pwm);
-}
-
static const struct lpg_data pm8916_pwm_data = {
.num_channels = 1,
.channels = (const struct lpg_channel_data[]) {
@@ -1529,7 +1519,6 @@ MODULE_DEVICE_TABLE(of, lpg_of_table);
static struct platform_driver lpg_driver = {
.probe = lpg_probe,
- .remove_new = lpg_remove,
.driver = {
.name = "qcom-spmi-lpg",
.of_match_table = lpg_of_table,
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 9b7fe5dd5..7f6a2352b 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -41,33 +41,30 @@ static irqreturn_t gpio_trig_irq(int irq, void *_led)
return IRQ_HANDLED;
}
-static ssize_t gpio_trig_brightness_show(struct device *dev,
+static ssize_t desired_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
- return sprintf(buf, "%u\n", gpio_data->desired_brightness);
+ return sysfs_emit(buf, "%u\n", gpio_data->desired_brightness);
}
-static ssize_t gpio_trig_brightness_store(struct device *dev,
+static ssize_t desired_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct gpio_trig_data *gpio_data = led_trigger_get_drvdata(dev);
- unsigned desired_brightness;
+ u8 desired_brightness;
int ret;
- ret = sscanf(buf, "%u", &desired_brightness);
- if (ret < 1 || desired_brightness > 255) {
- dev_err(dev, "invalid value\n");
- return -EINVAL;
- }
+ ret = kstrtou8(buf, 10, &desired_brightness);
+ if (ret)
+ return ret;
gpio_data->desired_brightness = desired_brightness;
return n;
}
-static DEVICE_ATTR(desired_brightness, 0644, gpio_trig_brightness_show,
- gpio_trig_brightness_store);
+static DEVICE_ATTR_RW(desired_brightness);
static struct attribute *gpio_trig_attrs[] = {
&dev_attr_desired_brightness.attr,
@@ -89,10 +86,7 @@ static int gpio_trig_activate(struct led_classdev *led)
* The generic property "trigger-sources" is followed,
* and we hope that this is a GPIO.
*/
- gpio_data->gpiod = fwnode_gpiod_get_index(dev->fwnode,
- "trigger-sources",
- 0, GPIOD_IN,
- "led-trigger");
+ gpio_data->gpiod = gpiod_get_optional(dev, "trigger-sources", GPIOD_IN);
if (IS_ERR(gpio_data->gpiod)) {
ret = PTR_ERR(gpio_data->gpiod);
kfree(gpio_data);
@@ -104,6 +98,8 @@ static int gpio_trig_activate(struct led_classdev *led)
return -EINVAL;
}
+ gpiod_set_consumer_name(gpio_data->gpiod, "led-trigger");
+
gpio_data->led = led;
led_set_trigger_data(led, gpio_data);
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 79719fc8a..df1b1d846 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -38,6 +38,16 @@
* tx - LED blinks on transmitted data
* rx - LED blinks on receive data
*
+ * Note: If the user selects a mode that is not supported by hw, default
+ * behavior is to fall back to software control of the LED. However not every
+ * hw supports software control. LED callbacks brightness_set() and
+ * brightness_set_blocking() are NULL in this case. hw_control_is_supported()
+ * should use available means supported by hw to inform the user that selected
+ * mode isn't supported by hw. This could be switching off the LED or any
+ * hw blink mode. If software control fallback isn't possible, we return
+ * -EOPNOTSUPP to the user, but still store the selected mode. This is needed
+ * in case an intermediate unsupported mode is necessary to switch from one
+ * supported mode to another.
*/
struct led_netdev_data {
@@ -99,6 +109,18 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
trigger_data->link_speed == SPEED_1000)
blink_on = true;
+ if (test_bit(TRIGGER_NETDEV_LINK_2500, &trigger_data->mode) &&
+ trigger_data->link_speed == SPEED_2500)
+ blink_on = true;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_5000, &trigger_data->mode) &&
+ trigger_data->link_speed == SPEED_5000)
+ blink_on = true;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_10000, &trigger_data->mode) &&
+ trigger_data->link_speed == SPEED_10000)
+ blink_on = true;
+
if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &trigger_data->mode) &&
trigger_data->duplex == DUPLEX_HALF)
blink_on = true;
@@ -289,6 +311,9 @@ static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
case TRIGGER_NETDEV_LINK_10:
case TRIGGER_NETDEV_LINK_100:
case TRIGGER_NETDEV_LINK_1000:
+ case TRIGGER_NETDEV_LINK_2500:
+ case TRIGGER_NETDEV_LINK_5000:
+ case TRIGGER_NETDEV_LINK_10000:
case TRIGGER_NETDEV_HALF_DUPLEX:
case TRIGGER_NETDEV_FULL_DUPLEX:
case TRIGGER_NETDEV_TX:
@@ -306,6 +331,7 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
size_t size, enum led_trigger_netdev_modes attr)
{
struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
unsigned long state, mode = trigger_data->mode;
int ret;
int bit;
@@ -319,6 +345,9 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
case TRIGGER_NETDEV_LINK_10:
case TRIGGER_NETDEV_LINK_100:
case TRIGGER_NETDEV_LINK_1000:
+ case TRIGGER_NETDEV_LINK_2500:
+ case TRIGGER_NETDEV_LINK_5000:
+ case TRIGGER_NETDEV_LINK_10000:
case TRIGGER_NETDEV_HALF_DUPLEX:
case TRIGGER_NETDEV_FULL_DUPLEX:
case TRIGGER_NETDEV_TX:
@@ -337,7 +366,10 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
if (test_bit(TRIGGER_NETDEV_LINK, &mode) &&
(test_bit(TRIGGER_NETDEV_LINK_10, &mode) ||
test_bit(TRIGGER_NETDEV_LINK_100, &mode) ||
- test_bit(TRIGGER_NETDEV_LINK_1000, &mode)))
+ test_bit(TRIGGER_NETDEV_LINK_1000, &mode) ||
+ test_bit(TRIGGER_NETDEV_LINK_2500, &mode) ||
+ test_bit(TRIGGER_NETDEV_LINK_5000, &mode) ||
+ test_bit(TRIGGER_NETDEV_LINK_10000, &mode)))
return -EINVAL;
cancel_delayed_work_sync(&trigger_data->work);
@@ -345,6 +377,10 @@ static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
trigger_data->mode = mode;
trigger_data->hw_control = can_hw_control(trigger_data);
+ if (!led_cdev->brightness_set && !led_cdev->brightness_set_blocking &&
+ !trigger_data->hw_control)
+ return -EOPNOTSUPP;
+
set_baseline_state(trigger_data);
return size;
@@ -367,6 +403,9 @@ DEFINE_NETDEV_TRIGGER(link, TRIGGER_NETDEV_LINK);
DEFINE_NETDEV_TRIGGER(link_10, TRIGGER_NETDEV_LINK_10);
DEFINE_NETDEV_TRIGGER(link_100, TRIGGER_NETDEV_LINK_100);
DEFINE_NETDEV_TRIGGER(link_1000, TRIGGER_NETDEV_LINK_1000);
+DEFINE_NETDEV_TRIGGER(link_2500, TRIGGER_NETDEV_LINK_2500);
+DEFINE_NETDEV_TRIGGER(link_5000, TRIGGER_NETDEV_LINK_5000);
+DEFINE_NETDEV_TRIGGER(link_10000, TRIGGER_NETDEV_LINK_10000);
DEFINE_NETDEV_TRIGGER(half_duplex, TRIGGER_NETDEV_HALF_DUPLEX);
DEFINE_NETDEV_TRIGGER(full_duplex, TRIGGER_NETDEV_FULL_DUPLEX);
DEFINE_NETDEV_TRIGGER(tx, TRIGGER_NETDEV_TX);
@@ -425,6 +464,9 @@ static struct attribute *netdev_trig_attrs[] = {
&dev_attr_link_10.attr,
&dev_attr_link_100.attr,
&dev_attr_link_1000.attr,
+ &dev_attr_link_2500.attr,
+ &dev_attr_link_5000.attr,
+ &dev_attr_link_10000.attr,
&dev_attr_full_duplex.attr,
&dev_attr_half_duplex.attr,
&dev_attr_rx.attr,
@@ -522,6 +564,9 @@ static void netdev_trig_work(struct work_struct *work)
test_bit(TRIGGER_NETDEV_LINK_10, &trigger_data->mode) ||
test_bit(TRIGGER_NETDEV_LINK_100, &trigger_data->mode) ||
test_bit(TRIGGER_NETDEV_LINK_1000, &trigger_data->mode) ||
+ test_bit(TRIGGER_NETDEV_LINK_2500, &trigger_data->mode) ||
+ test_bit(TRIGGER_NETDEV_LINK_5000, &trigger_data->mode) ||
+ test_bit(TRIGGER_NETDEV_LINK_10000, &trigger_data->mode) ||
test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &trigger_data->mode) ||
test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &trigger_data->mode);
interval = jiffies_to_msecs(
diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
index 3e69a7bde..8cf1485e8 100644
--- a/drivers/leds/trigger/ledtrig-tty.c
+++ b/drivers/leds/trigger/ledtrig-tty.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/leds.h>
#include <linux/module.h>
@@ -12,15 +13,45 @@
struct ledtrig_tty_data {
struct led_classdev *led_cdev;
struct delayed_work dwork;
- struct mutex mutex;
+ struct completion sysfs;
const char *ttyname;
struct tty_struct *tty;
int rx, tx;
+ bool mode_rx;
+ bool mode_tx;
+ bool mode_cts;
+ bool mode_dsr;
+ bool mode_dcd;
+ bool mode_rng;
};
-static void ledtrig_tty_restart(struct ledtrig_tty_data *trigger_data)
+/* Indicates which state the LED should now display */
+enum led_trigger_tty_state {
+ TTY_LED_BLINK,
+ TTY_LED_ENABLE,
+ TTY_LED_DISABLE,
+};
+
+enum led_trigger_tty_modes {
+ TRIGGER_TTY_RX = 0,
+ TRIGGER_TTY_TX,
+ TRIGGER_TTY_CTS,
+ TRIGGER_TTY_DSR,
+ TRIGGER_TTY_DCD,
+ TRIGGER_TTY_RNG,
+};
+
+static int ledtrig_tty_wait_for_completion(struct device *dev)
{
- schedule_delayed_work(&trigger_data->dwork, 0);
+ struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
+ int ret;
+
+ ret = wait_for_completion_timeout(&trigger_data->sysfs,
+ msecs_to_jiffies(LEDTRIG_TTY_INTERVAL * 20));
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return ret;
}
static ssize_t ttyname_show(struct device *dev,
@@ -28,14 +59,16 @@ static ssize_t ttyname_show(struct device *dev,
{
struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
ssize_t len = 0;
+ int completion;
- mutex_lock(&trigger_data->mutex);
+ reinit_completion(&trigger_data->sysfs);
+ completion = ledtrig_tty_wait_for_completion(dev);
+ if (completion < 0)
+ return completion;
if (trigger_data->ttyname)
len = sprintf(buf, "%s\n", trigger_data->ttyname);
- mutex_unlock(&trigger_data->mutex);
-
return len;
}
@@ -46,7 +79,7 @@ static ssize_t ttyname_store(struct device *dev,
struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
char *ttyname;
ssize_t ret = size;
- bool running;
+ int completion;
if (size > 0 && buf[size - 1] == '\n')
size -= 1;
@@ -59,9 +92,10 @@ static ssize_t ttyname_store(struct device *dev,
ttyname = NULL;
}
- mutex_lock(&trigger_data->mutex);
-
- running = trigger_data->ttyname != NULL;
+ reinit_completion(&trigger_data->sysfs);
+ completion = ledtrig_tty_wait_for_completion(dev);
+ if (completion < 0)
+ return completion;
kfree(trigger_data->ttyname);
tty_kref_put(trigger_data->tty);
@@ -69,29 +103,107 @@ static ssize_t ttyname_store(struct device *dev,
trigger_data->ttyname = ttyname;
- mutex_unlock(&trigger_data->mutex);
-
- if (ttyname && !running)
- ledtrig_tty_restart(trigger_data);
-
return ret;
}
static DEVICE_ATTR_RW(ttyname);
+static ssize_t ledtrig_tty_attr_show(struct device *dev, char *buf,
+ enum led_trigger_tty_modes attr)
+{
+ struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
+ bool state;
+
+ switch (attr) {
+ case TRIGGER_TTY_RX:
+ state = trigger_data->mode_rx;
+ break;
+ case TRIGGER_TTY_TX:
+ state = trigger_data->mode_tx;
+ break;
+ case TRIGGER_TTY_CTS:
+ state = trigger_data->mode_cts;
+ break;
+ case TRIGGER_TTY_DSR:
+ state = trigger_data->mode_dsr;
+ break;
+ case TRIGGER_TTY_DCD:
+ state = trigger_data->mode_dcd;
+ break;
+ case TRIGGER_TTY_RNG:
+ state = trigger_data->mode_rng;
+ break;
+ }
+
+ return sysfs_emit(buf, "%u\n", state);
+}
+
+static ssize_t ledtrig_tty_attr_store(struct device *dev, const char *buf,
+ size_t size, enum led_trigger_tty_modes attr)
+{
+ struct ledtrig_tty_data *trigger_data = led_trigger_get_drvdata(dev);
+ bool state;
+ int ret;
+
+ ret = kstrtobool(buf, &state);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case TRIGGER_TTY_RX:
+ trigger_data->mode_rx = state;
+ break;
+ case TRIGGER_TTY_TX:
+ trigger_data->mode_tx = state;
+ break;
+ case TRIGGER_TTY_CTS:
+ trigger_data->mode_cts = state;
+ break;
+ case TRIGGER_TTY_DSR:
+ trigger_data->mode_dsr = state;
+ break;
+ case TRIGGER_TTY_DCD:
+ trigger_data->mode_dcd = state;
+ break;
+ case TRIGGER_TTY_RNG:
+ trigger_data->mode_rng = state;
+ break;
+ }
+
+ return size;
+}
+
+#define DEFINE_TTY_TRIGGER(trigger_name, trigger) \
+ static ssize_t trigger_name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ return ledtrig_tty_attr_show(dev, buf, trigger); \
+ } \
+ static ssize_t trigger_name##_store(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t size) \
+ { \
+ return ledtrig_tty_attr_store(dev, buf, size, trigger); \
+ } \
+ static DEVICE_ATTR_RW(trigger_name)
+
+DEFINE_TTY_TRIGGER(rx, TRIGGER_TTY_RX);
+DEFINE_TTY_TRIGGER(tx, TRIGGER_TTY_TX);
+DEFINE_TTY_TRIGGER(cts, TRIGGER_TTY_CTS);
+DEFINE_TTY_TRIGGER(dsr, TRIGGER_TTY_DSR);
+DEFINE_TTY_TRIGGER(dcd, TRIGGER_TTY_DCD);
+DEFINE_TTY_TRIGGER(rng, TRIGGER_TTY_RNG);
+
static void ledtrig_tty_work(struct work_struct *work)
{
struct ledtrig_tty_data *trigger_data =
container_of(work, struct ledtrig_tty_data, dwork.work);
- struct serial_icounter_struct icount;
+ enum led_trigger_tty_state state = TTY_LED_DISABLE;
+ unsigned long interval = LEDTRIG_TTY_INTERVAL;
+ bool invert = false;
+ int status;
int ret;
- mutex_lock(&trigger_data->mutex);
-
- if (!trigger_data->ttyname) {
- /* exit without rescheduling */
- mutex_unlock(&trigger_data->mutex);
- return;
- }
+ if (!trigger_data->ttyname)
+ goto out;
/* try to get the tty corresponding to $ttyname */
if (!trigger_data->tty) {
@@ -115,32 +227,83 @@ static void ledtrig_tty_work(struct work_struct *work)
trigger_data->tty = tty;
}
- ret = tty_get_icount(trigger_data->tty, &icount);
- if (ret) {
- dev_info(trigger_data->tty->dev, "Failed to get icount, stopped polling\n");
- mutex_unlock(&trigger_data->mutex);
- return;
+ status = tty_get_tiocm(trigger_data->tty);
+ if (status > 0) {
+ if (trigger_data->mode_cts) {
+ if (status & TIOCM_CTS)
+ state = TTY_LED_ENABLE;
+ }
+
+ if (trigger_data->mode_dsr) {
+ if (status & TIOCM_DSR)
+ state = TTY_LED_ENABLE;
+ }
+
+ if (trigger_data->mode_dcd) {
+ if (status & TIOCM_CAR)
+ state = TTY_LED_ENABLE;
+ }
+
+ if (trigger_data->mode_rng) {
+ if (status & TIOCM_RNG)
+ state = TTY_LED_ENABLE;
+ }
}
- if (icount.rx != trigger_data->rx ||
- icount.tx != trigger_data->tx) {
- unsigned long interval = LEDTRIG_TTY_INTERVAL;
+ /*
+ * The evaluation of rx/tx must be done after the evaluation
+ * of TIOCM_*, because rx/tx has priority.
+ */
+ if (trigger_data->mode_rx || trigger_data->mode_tx) {
+ struct serial_icounter_struct icount;
- led_blink_set_oneshot(trigger_data->led_cdev, &interval,
- &interval, 0);
+ ret = tty_get_icount(trigger_data->tty, &icount);
+ if (ret)
+ goto out;
- trigger_data->rx = icount.rx;
- trigger_data->tx = icount.tx;
+ if (trigger_data->mode_tx && (icount.tx != trigger_data->tx)) {
+ trigger_data->tx = icount.tx;
+ invert = state == TTY_LED_ENABLE;
+ state = TTY_LED_BLINK;
+ }
+
+ if (trigger_data->mode_rx && (icount.rx != trigger_data->rx)) {
+ trigger_data->rx = icount.rx;
+ invert = state == TTY_LED_ENABLE;
+ state = TTY_LED_BLINK;
+ }
}
out:
- mutex_unlock(&trigger_data->mutex);
+ switch (state) {
+ case TTY_LED_BLINK:
+ led_blink_set_oneshot(trigger_data->led_cdev, &interval,
+ &interval, invert);
+ break;
+ case TTY_LED_ENABLE:
+ led_set_brightness(trigger_data->led_cdev,
+ trigger_data->led_cdev->blink_brightness);
+ break;
+ case TTY_LED_DISABLE:
+ fallthrough;
+ default:
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ break;
+ }
+
+ complete_all(&trigger_data->sysfs);
schedule_delayed_work(&trigger_data->dwork,
msecs_to_jiffies(LEDTRIG_TTY_INTERVAL * 2));
}
static struct attribute *ledtrig_tty_attrs[] = {
&dev_attr_ttyname.attr,
+ &dev_attr_rx.attr,
+ &dev_attr_tx.attr,
+ &dev_attr_cts.attr,
+ &dev_attr_dsr.attr,
+ &dev_attr_dcd.attr,
+ &dev_attr_rng.attr,
NULL
};
ATTRIBUTE_GROUPS(ledtrig_tty);
@@ -153,11 +316,17 @@ static int ledtrig_tty_activate(struct led_classdev *led_cdev)
if (!trigger_data)
return -ENOMEM;
+ /* Enable default rx/tx mode */
+ trigger_data->mode_rx = true;
+ trigger_data->mode_tx = true;
+
led_set_trigger_data(led_cdev, trigger_data);
INIT_DELAYED_WORK(&trigger_data->dwork, ledtrig_tty_work);
trigger_data->led_cdev = led_cdev;
- mutex_init(&trigger_data->mutex);
+ init_completion(&trigger_data->sysfs);
+
+ schedule_delayed_work(&trigger_data->dwork, 0);
return 0;
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index bc2e265cb..42940108a 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -8,18 +8,6 @@ menuconfig MAILBOX
if MAILBOX
-config APPLE_MAILBOX
- tristate "Apple Mailbox driver"
- depends on ARCH_APPLE || (ARM64 && COMPILE_TEST)
- default ARCH_APPLE
- help
- Apple SoCs have various co-processors required for certain
- peripherals to work (NVMe, display controller, etc.). This
- driver adds support for the mailbox controller used to
- communicate with those.
-
- Say Y here if you have a Apple SoC.
-
config ARM_MHU
tristate "ARM MHU Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index fc9376117..18793e6ca 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -60,5 +60,3 @@ obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
-
-obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
deleted file mode 100644
index 2a3e8d8ff..000000000
--- a/drivers/mailbox/apple-mailbox.c
+++ /dev/null
@@ -1,441 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only OR MIT
-/*
- * Apple mailbox driver
- *
- * Copyright (C) 2021 The Asahi Linux Contributors
- *
- * This driver adds support for two mailbox variants (called ASC and M3 by
- * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
- * exchange 64+32 bit messages between the main CPU and a co-processor.
- * Various coprocessors implement different IPC protocols based on these simple
- * messages and shared memory buffers.
- *
- * Both the main CPU and the co-processor see the same set of registers but
- * the first FIFO (A2I) is always used to transfer messages from the application
- * processor (us) to the I/O processor and the second one (I2A) for the
- * other direction.
- */
-
-#include <linux/apple-mailbox.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/gfp.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/mailbox_controller.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
-#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
-
-#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
-#define APPLE_ASC_MBOX_A2I_SEND0 0x800
-#define APPLE_ASC_MBOX_A2I_SEND1 0x808
-#define APPLE_ASC_MBOX_A2I_RECV0 0x810
-#define APPLE_ASC_MBOX_A2I_RECV1 0x818
-
-#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
-#define APPLE_ASC_MBOX_I2A_SEND0 0x820
-#define APPLE_ASC_MBOX_I2A_SEND1 0x828
-#define APPLE_ASC_MBOX_I2A_RECV0 0x830
-#define APPLE_ASC_MBOX_I2A_RECV1 0x838
-
-#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
-#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
-
-#define APPLE_M3_MBOX_A2I_CONTROL 0x50
-#define APPLE_M3_MBOX_A2I_SEND0 0x60
-#define APPLE_M3_MBOX_A2I_SEND1 0x68
-#define APPLE_M3_MBOX_A2I_RECV0 0x70
-#define APPLE_M3_MBOX_A2I_RECV1 0x78
-
-#define APPLE_M3_MBOX_I2A_CONTROL 0x80
-#define APPLE_M3_MBOX_I2A_SEND0 0x90
-#define APPLE_M3_MBOX_I2A_SEND1 0x98
-#define APPLE_M3_MBOX_I2A_RECV0 0xa0
-#define APPLE_M3_MBOX_I2A_RECV1 0xa8
-
-#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
-#define APPLE_M3_MBOX_IRQ_ACK 0x4c
-#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
-#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
-#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
-#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
-
-#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
-#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
-#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
-#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
-#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
-
-struct apple_mbox_hw {
- unsigned int control_full;
- unsigned int control_empty;
-
- unsigned int a2i_control;
- unsigned int a2i_send0;
- unsigned int a2i_send1;
-
- unsigned int i2a_control;
- unsigned int i2a_recv0;
- unsigned int i2a_recv1;
-
- bool has_irq_controls;
- unsigned int irq_enable;
- unsigned int irq_ack;
- unsigned int irq_bit_recv_not_empty;
- unsigned int irq_bit_send_empty;
-};
-
-struct apple_mbox {
- void __iomem *regs;
- const struct apple_mbox_hw *hw;
-
- int irq_recv_not_empty;
- int irq_send_empty;
-
- struct mbox_chan chan;
-
- struct device *dev;
- struct mbox_controller controller;
- spinlock_t rx_lock;
-};
-
-static const struct of_device_id apple_mbox_of_match[];
-
-static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
-{
- u32 mbox_ctrl =
- readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
-
- return !(mbox_ctrl & apple_mbox->hw->control_full);
-}
-
-static bool apple_mbox_hw_send_empty(struct apple_mbox *apple_mbox)
-{
- u32 mbox_ctrl =
- readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
-
- return mbox_ctrl & apple_mbox->hw->control_empty;
-}
-
-static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
- struct apple_mbox_msg *msg)
-{
- if (!apple_mbox_hw_can_send(apple_mbox))
- return -EBUSY;
-
- dev_dbg(apple_mbox->dev, "> TX %016llx %08x\n", msg->msg0, msg->msg1);
-
- writeq_relaxed(msg->msg0, apple_mbox->regs + apple_mbox->hw->a2i_send0);
- writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg->msg1),
- apple_mbox->regs + apple_mbox->hw->a2i_send1);
-
- return 0;
-}
-
-static bool apple_mbox_hw_can_recv(struct apple_mbox *apple_mbox)
-{
- u32 mbox_ctrl =
- readl_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_control);
-
- return !(mbox_ctrl & apple_mbox->hw->control_empty);
-}
-
-static int apple_mbox_hw_recv(struct apple_mbox *apple_mbox,
- struct apple_mbox_msg *msg)
-{
- if (!apple_mbox_hw_can_recv(apple_mbox))
- return -ENOMSG;
-
- msg->msg0 = readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv0);
- msg->msg1 = FIELD_GET(
- APPLE_MBOX_MSG1_MSG,
- readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv1));
-
- dev_dbg(apple_mbox->dev, "< RX %016llx %08x\n", msg->msg0, msg->msg1);
-
- return 0;
-}
-
-static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
- struct apple_mbox_msg *msg = data;
- int ret;
-
- ret = apple_mbox_hw_send(apple_mbox, msg);
- if (ret)
- return ret;
-
- /*
- * The interrupt is level triggered and will keep firing as long as the
- * FIFO is empty. It will also keep firing if the FIFO was empty
- * at any point in the past until it has been acknowledged at the
- * mailbox level. By acknowledging it here we can ensure that we will
- * only get the interrupt once the FIFO has been cleared again.
- * If the FIFO is already empty before the ack it will fire again
- * immediately after the ack.
- */
- if (apple_mbox->hw->has_irq_controls) {
- writel_relaxed(apple_mbox->hw->irq_bit_send_empty,
- apple_mbox->regs + apple_mbox->hw->irq_ack);
- }
- enable_irq(apple_mbox->irq_send_empty);
-
- return 0;
-}
-
-static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
-{
- struct apple_mbox *apple_mbox = data;
-
- /*
- * We don't need to acknowledge the interrupt at the mailbox level
- * here even if supported by the hardware. It will keep firing but that
- * doesn't matter since it's disabled at the main interrupt controller.
- * apple_mbox_chan_send_data will acknowledge it before enabling
- * it at the main controller again.
- */
- disable_irq_nosync(apple_mbox->irq_send_empty);
- mbox_chan_txdone(&apple_mbox->chan, 0);
- return IRQ_HANDLED;
-}
-
-static int apple_mbox_poll(struct apple_mbox *apple_mbox)
-{
- struct apple_mbox_msg msg;
- int ret = 0;
-
- while (apple_mbox_hw_recv(apple_mbox, &msg) == 0) {
- mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
- ret++;
- }
-
- /*
- * The interrupt will keep firing even if there are no more messages
- * unless we also acknowledge it at the mailbox level here.
- * There's no race if a message comes in between the check in the while
- * loop above and the ack below: If a new messages arrives inbetween
- * those two the interrupt will just fire again immediately after the
- * ack since it's level triggered.
- */
- if (apple_mbox->hw->has_irq_controls) {
- writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty,
- apple_mbox->regs + apple_mbox->hw->irq_ack);
- }
-
- return ret;
-}
-
-static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
-{
- struct apple_mbox *apple_mbox = data;
-
- spin_lock(&apple_mbox->rx_lock);
- apple_mbox_poll(apple_mbox);
- spin_unlock(&apple_mbox->rx_lock);
-
- return IRQ_HANDLED;
-}
-
-static bool apple_mbox_chan_peek_data(struct mbox_chan *chan)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&apple_mbox->rx_lock, flags);
- ret = apple_mbox_poll(apple_mbox);
- spin_unlock_irqrestore(&apple_mbox->rx_lock, flags);
-
- return ret > 0;
-}
-
-static int apple_mbox_chan_flush(struct mbox_chan *chan, unsigned long timeout)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
- unsigned long deadline = jiffies + msecs_to_jiffies(timeout);
-
- while (time_before(jiffies, deadline)) {
- if (apple_mbox_hw_send_empty(apple_mbox)) {
- mbox_chan_txdone(&apple_mbox->chan, 0);
- return 0;
- }
-
- udelay(1);
- }
-
- return -ETIME;
-}
-
-static int apple_mbox_chan_startup(struct mbox_chan *chan)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
-
- /*
- * Only some variants of this mailbox HW provide interrupt control
- * at the mailbox level. We therefore need to handle enabling/disabling
- * interrupts at the main interrupt controller anyway for hardware that
- * doesn't. Just always keep the interrupts we care about enabled at
- * the mailbox level so that both hardware revisions behave almost
- * the same.
- */
- if (apple_mbox->hw->has_irq_controls) {
- writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty |
- apple_mbox->hw->irq_bit_send_empty,
- apple_mbox->regs + apple_mbox->hw->irq_enable);
- }
-
- enable_irq(apple_mbox->irq_recv_not_empty);
- return 0;
-}
-
-static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
-{
- struct apple_mbox *apple_mbox = chan->con_priv;
-
- disable_irq(apple_mbox->irq_recv_not_empty);
-}
-
-static const struct mbox_chan_ops apple_mbox_ops = {
- .send_data = apple_mbox_chan_send_data,
- .peek_data = apple_mbox_chan_peek_data,
- .flush = apple_mbox_chan_flush,
- .startup = apple_mbox_chan_startup,
- .shutdown = apple_mbox_chan_shutdown,
-};
-
-static struct mbox_chan *apple_mbox_of_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *args)
-{
- if (args->args_count != 0)
- return ERR_PTR(-EINVAL);
-
- return &mbox->chans[0];
-}
-
-static int apple_mbox_probe(struct platform_device *pdev)
-{
- int ret;
- const struct of_device_id *match;
- char *irqname;
- struct apple_mbox *mbox;
- struct device *dev = &pdev->dev;
-
- match = of_match_node(apple_mbox_of_match, pdev->dev.of_node);
- if (!match)
- return -EINVAL;
- if (!match->data)
- return -EINVAL;
-
- mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
- platform_set_drvdata(pdev, mbox);
-
- mbox->dev = dev;
- mbox->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mbox->regs))
- return PTR_ERR(mbox->regs);
-
- mbox->hw = match->data;
- mbox->irq_recv_not_empty =
- platform_get_irq_byname(pdev, "recv-not-empty");
- if (mbox->irq_recv_not_empty < 0)
- return -ENODEV;
-
- mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
- if (mbox->irq_send_empty < 0)
- return -ENODEV;
-
- mbox->controller.dev = mbox->dev;
- mbox->controller.num_chans = 1;
- mbox->controller.chans = &mbox->chan;
- mbox->controller.ops = &apple_mbox_ops;
- mbox->controller.txdone_irq = true;
- mbox->controller.of_xlate = apple_mbox_of_xlate;
- mbox->chan.con_priv = mbox;
- spin_lock_init(&mbox->rx_lock);
-
- irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
- if (!irqname)
- return -ENOMEM;
-
- ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL,
- apple_mbox_recv_irq,
- IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname,
- mbox);
- if (ret)
- return ret;
-
- irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
- if (!irqname)
- return -ENOMEM;
-
- ret = devm_request_irq(dev, mbox->irq_send_empty,
- apple_mbox_send_empty_irq, IRQF_NO_AUTOEN,
- irqname, mbox);
- if (ret)
- return ret;
-
- return devm_mbox_controller_register(dev, &mbox->controller);
-}
-
-static const struct apple_mbox_hw apple_mbox_asc_hw = {
- .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
- .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
-
- .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
- .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
- .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
-
- .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
- .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
- .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
-
- .has_irq_controls = false,
-};
-
-static const struct apple_mbox_hw apple_mbox_m3_hw = {
- .control_full = APPLE_M3_MBOX_CONTROL_FULL,
- .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
-
- .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
- .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
- .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
-
- .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
- .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
- .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
-
- .has_irq_controls = true,
- .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
- .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
- .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
- .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
-};
-
-static const struct of_device_id apple_mbox_of_match[] = {
- { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
- { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
- {}
-};
-MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
-
-static struct platform_driver apple_mbox_driver = {
- .driver = {
- .name = "apple-mailbox",
- .of_match_table = apple_mbox_of_match,
- },
- .probe = apple_mbox_probe,
-};
-module_platform_driver(apple_mbox_driver);
-
-MODULE_LICENSE("Dual MIT/GPL");
-MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
-MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index a2b8839d4..e3e28a4f7 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1650,7 +1650,7 @@ fail:
return ret;
}
-static int flexrm_mbox_remove(struct platform_device *pdev)
+static void flexrm_mbox_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
@@ -1661,8 +1661,6 @@ static int flexrm_mbox_remove(struct platform_device *pdev)
dma_pool_destroy(mbox->cmpl_pool);
dma_pool_destroy(mbox->bd_pool);
-
- return 0;
}
static const struct of_device_id flexrm_mbox_of_match[] = {
@@ -1677,7 +1675,7 @@ static struct platform_driver flexrm_mbox_driver = {
.of_match_table = flexrm_mbox_of_match,
},
.probe = flexrm_mbox_probe,
- .remove = flexrm_mbox_remove,
+ .remove_new = flexrm_mbox_remove,
};
module_platform_driver(flexrm_mbox_driver);
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 778faeced..1768d3d5a 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -1605,7 +1605,7 @@ cleanup:
return err;
}
-static int pdc_remove(struct platform_device *pdev)
+static void pdc_remove(struct platform_device *pdev)
{
struct pdc_state *pdcs = platform_get_drvdata(pdev);
@@ -1617,12 +1617,11 @@ static int pdc_remove(struct platform_device *pdev)
dma_pool_destroy(pdcs->rx_buf_pool);
dma_pool_destroy(pdcs->ring_pool);
- return 0;
}
static struct platform_driver pdc_mbox_driver = {
.probe = pdc_probe,
- .remove = pdc_remove,
+ .remove_new = pdc_remove,
.driver = {
.name = "brcm-iproc-pdc-mbox",
.of_match_table = pdc_mbox_of_match,
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 0af739ab5..656171362 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -903,13 +903,11 @@ disable_runtime_pm:
return ret;
}
-static int imx_mu_remove(struct platform_device *pdev)
+static void imx_mu_remove(struct platform_device *pdev)
{
struct imx_mu_priv *priv = platform_get_drvdata(pdev);
pm_runtime_disable(priv->dev);
-
- return 0;
}
static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
@@ -1070,7 +1068,7 @@ static const struct dev_pm_ops imx_mu_pm_ops = {
static struct platform_driver imx_mu_driver = {
.probe = imx_mu_probe,
- .remove = imx_mu_remove,
+ .remove_new = imx_mu_remove,
.driver = {
.name = "imx_mu",
.of_match_table = imx_mu_dt_ids,
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 22d6018ce..3386b4e72 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -418,7 +418,7 @@ static int mbox_test_probe(struct platform_device *pdev)
return 0;
}
-static int mbox_test_remove(struct platform_device *pdev)
+static void mbox_test_remove(struct platform_device *pdev)
{
struct mbox_test_device *tdev = platform_get_drvdata(pdev);
@@ -428,8 +428,6 @@ static int mbox_test_remove(struct platform_device *pdev)
mbox_free_channel(tdev->tx_channel);
if (tdev->rx_channel)
mbox_free_channel(tdev->rx_channel);
-
- return 0;
}
static const struct of_device_id mbox_test_match[] = {
@@ -444,7 +442,7 @@ static struct platform_driver mbox_test_driver = {
.of_match_table = mbox_test_match,
},
.probe = mbox_test_probe,
- .remove = mbox_test_remove,
+ .remove_new = mbox_test_remove,
};
module_platform_driver(mbox_test_driver);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index de862e913..ead2200f3 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -367,7 +367,7 @@ static int cmdq_resume(struct device *dev)
return 0;
}
-static int cmdq_remove(struct platform_device *pdev)
+static void cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
@@ -378,7 +378,6 @@ static int cmdq_remove(struct platform_device *pdev)
cmdq_runtime_suspend(&pdev->dev);
clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
- return 0;
}
static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
@@ -706,62 +705,70 @@ static const struct dev_pm_ops cmdq_pm_ops = {
cmdq_runtime_resume, NULL)
};
-static const struct gce_plat gce_plat_v2 = {
- .thread_nr = 16,
- .shift = 0,
+static const struct gce_plat gce_plat_mt6779 = {
+ .thread_nr = 24,
+ .shift = 3,
.control_by_sw = false,
.gce_num = 1
};
-static const struct gce_plat gce_plat_v3 = {
- .thread_nr = 24,
+static const struct gce_plat gce_plat_mt8173 = {
+ .thread_nr = 16,
.shift = 0,
.control_by_sw = false,
.gce_num = 1
};
-static const struct gce_plat gce_plat_v4 = {
+static const struct gce_plat gce_plat_mt8183 = {
.thread_nr = 24,
- .shift = 3,
+ .shift = 0,
.control_by_sw = false,
.gce_num = 1
};
-static const struct gce_plat gce_plat_v5 = {
+static const struct gce_plat gce_plat_mt8186 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
+ .sw_ddr_en = true,
.gce_num = 1
};
-static const struct gce_plat gce_plat_v6 = {
- .thread_nr = 24,
+static const struct gce_plat gce_plat_mt8188 = {
+ .thread_nr = 32,
.shift = 3,
.control_by_sw = true,
.gce_num = 2
};
-static const struct gce_plat gce_plat_v7 = {
+static const struct gce_plat gce_plat_mt8192 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
- .sw_ddr_en = true,
.gce_num = 1
};
+static const struct gce_plat gce_plat_mt8195 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = true,
+ .gce_num = 2
+};
+
static const struct of_device_id cmdq_of_ids[] = {
- {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
- {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
- {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
- {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
- {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
- {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
+ {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779},
+ {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173},
+ {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183},
+ {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186},
+ {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188},
+ {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192},
+ {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195},
{}
};
static struct platform_driver cmdq_drv = {
.probe = cmdq_probe,
- .remove = cmdq_remove,
+ .remove_new = cmdq_remove,
.driver = {
.name = "mtk_cmdq",
.pm = &cmdq_pm_ops,
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 792bcaebb..c961706fe 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -865,19 +865,17 @@ unregister:
return ret;
}
-static int omap_mbox_remove(struct platform_device *pdev)
+static void omap_mbox_remove(struct platform_device *pdev)
{
struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
pm_runtime_disable(mdev->dev);
omap_mbox_unregister(mdev);
-
- return 0;
}
static struct platform_driver omap_mbox_driver = {
.probe = omap_mbox_probe,
- .remove = omap_mbox_remove,
+ .remove_new = omap_mbox_remove,
.driver = {
.name = "omap-mailbox",
.pm = &omap_mbox_pm_ops,
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 002a135ee..7d91e7c01 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -129,14 +129,12 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
return 0;
}
-static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+static void qcom_apcs_ipc_remove(struct platform_device *pdev)
{
struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
struct platform_device *clk = apcs->clk;
platform_device_unregister(clk);
-
- return 0;
}
/* .data is the offset of the ipc register within the global block */
@@ -145,19 +143,19 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
- { .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
- { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
+ /* Do not add any more entries using existing driver data */
+ { .compatible = "qcom,msm8976-apcs-kpss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
- { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
- { .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
- /* Do not add any more entries using existing driver data */
{ .compatible = "qcom,ipq5332-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
@@ -169,7 +167,7 @@ MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
static struct platform_driver qcom_apcs_ipc_driver = {
.probe = qcom_apcs_ipc_probe,
- .remove = qcom_apcs_ipc_remove,
+ .remove_new = qcom_apcs_ipc_remove,
.driver = {
.name = "qcom_apcs_ipc",
.of_match_table = qcom_apcs_ipc_of_match,
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index f597a1bd5..d537cc9c4 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -326,14 +326,12 @@ err_mbox:
return ret;
}
-static int qcom_ipcc_remove(struct platform_device *pdev)
+static void qcom_ipcc_remove(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
disable_irq_wake(ipcc->irq);
irq_domain_remove(ipcc->irq_domain);
-
- return 0;
}
static const struct of_device_id qcom_ipcc_of_match[] = {
@@ -348,7 +346,7 @@ static const struct dev_pm_ops qcom_ipcc_dev_pm_ops = {
static struct platform_driver qcom_ipcc_driver = {
.probe = qcom_ipcc_probe,
- .remove = qcom_ipcc_remove,
+ .remove_new = qcom_ipcc_remove,
.driver = {
.name = "qcom-ipcc",
.of_match_table = qcom_ipcc_of_match,
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index 4ad3653f3..1442f2757 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -331,7 +331,7 @@ err_clk:
return ret;
}
-static int stm32_ipcc_remove(struct platform_device *pdev)
+static void stm32_ipcc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -339,8 +339,6 @@ static int stm32_ipcc_remove(struct platform_device *pdev)
dev_pm_clear_wake_irq(&pdev->dev);
device_set_wakeup_capable(dev, false);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -381,7 +379,7 @@ static struct platform_driver stm32_ipcc_driver = {
.of_match_table = stm32_ipcc_of_match,
},
.probe = stm32_ipcc_probe,
- .remove = stm32_ipcc_remove,
+ .remove_new = stm32_ipcc_remove,
};
module_platform_driver(stm32_ipcc_driver);
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
index 7f8d93104..3dcc54dc8 100644
--- a/drivers/mailbox/sun6i-msgbox.c
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -287,15 +287,13 @@ err_disable_unprepare:
return ret;
}
-static int sun6i_msgbox_remove(struct platform_device *pdev)
+static void sun6i_msgbox_remove(struct platform_device *pdev)
{
struct sun6i_msgbox *mbox = platform_get_drvdata(pdev);
mbox_controller_unregister(&mbox->controller);
/* See the comment in sun6i_msgbox_probe about the reset line. */
clk_disable_unprepare(mbox->clk);
-
- return 0;
}
static const struct of_device_id sun6i_msgbox_of_match[] = {
@@ -310,7 +308,7 @@ static struct platform_driver sun6i_msgbox_driver = {
.of_match_table = sun6i_msgbox_of_match,
},
.probe = sun6i_msgbox_probe,
- .remove = sun6i_msgbox_remove,
+ .remove_new = sun6i_msgbox_remove,
};
module_platform_driver(sun6i_msgbox_driver);
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index fe29fc2ca..19ef56cbc 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -868,13 +868,11 @@ static int tegra_hsp_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_hsp_remove(struct platform_device *pdev)
+static void tegra_hsp_remove(struct platform_device *pdev)
{
struct tegra_hsp *hsp = platform_get_drvdata(pdev);
lockdep_unregister_key(&hsp->lock_key);
-
- return 0;
}
static int __maybe_unused tegra_hsp_resume(struct device *dev)
@@ -953,7 +951,7 @@ static struct platform_driver tegra_hsp_driver = {
.pm = &tegra_hsp_pm_ops,
},
.probe = tegra_hsp_probe,
- .remove = tegra_hsp_remove,
+ .remove_new = tegra_hsp_remove,
};
static int __init tegra_hsp_init(void)
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 7fa533e80..25c65afc0 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -81,7 +81,6 @@ struct zynqmp_ipi_mchan {
* @remote_id: remote IPI agent ID
* @mbox: mailbox Controller
* @mchans: array for channels, tx channel and rx channel.
- * @irq: IPI agent interrupt ID
*/
struct zynqmp_ipi_mbox {
struct zynqmp_ipi_pdata *pdata;
@@ -688,19 +687,17 @@ free_mbox_dev:
return ret;
}
-static int zynqmp_ipi_remove(struct platform_device *pdev)
+static void zynqmp_ipi_remove(struct platform_device *pdev)
{
struct zynqmp_ipi_pdata *pdata;
pdata = platform_get_drvdata(pdev);
zynqmp_ipi_free_mboxes(pdata);
-
- return 0;
}
static struct platform_driver zynqmp_ipi_driver = {
.probe = zynqmp_ipi_probe,
- .remove = zynqmp_ipi_remove,
+ .remove_new = zynqmp_ipi_remove,
.driver = {
.name = "zynqmp-ipi",
.of_match_table = of_match_ptr(zynqmp_ipi_of_match),
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index ba4530459..61994da7b 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -263,6 +263,7 @@ static void mcb_free_bus(struct device *dev)
/**
* mcb_alloc_bus() - Allocate a new @mcb_bus
+ * @carrier: generic &struct device for the carrier device
*
* Allocate a new @mcb_bus.
*/
@@ -327,7 +328,7 @@ void mcb_release_bus(struct mcb_bus *bus)
EXPORT_SYMBOL_NS_GPL(mcb_release_bus, MCB);
/**
- * mcb_bus_put() - Increment refcnt
+ * mcb_bus_get() - Increment refcnt
* @bus: The @mcb_bus
*
* Get a @mcb_bus' ref
@@ -455,7 +456,7 @@ EXPORT_SYMBOL_NS_GPL(mcb_request_mem, MCB);
/**
* mcb_release_mem() - Release memory requested by device
- * @dev: The @mcb_device that requested the memory
+ * @mem: The memory resource to be released
*
* Release memory that was prior requested via @mcb_request_mem().
*/
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 3ff87cb4d..a743e2c57 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -61,19 +61,6 @@ config MD_BITMAP_FILE
various kernel APIs and can only work with files on a file system not
actually sitting on the MD device.
-config MD_LINEAR
- tristate "Linear (append) mode (deprecated)"
- depends on BLK_DEV_MD
- help
- If you say Y here, then your multiple devices driver will be able to
- use the so-called linear mode, i.e. it will combine the hard disk
- partitions by simply appending one to the other.
-
- To compile this as a module, choose M here: the module
- will be called linear.
-
- If unsure, say Y.
-
config MD_RAID0
tristate "RAID-0 (striping) mode"
depends on BLK_DEV_MD
@@ -172,27 +159,6 @@ config MD_RAID456
If unsure, say Y.
-config MD_MULTIPATH
- tristate "Multipath I/O support (deprecated)"
- depends on BLK_DEV_MD
- help
- MD_MULTIPATH provides a simple multi-path personality for use
- the MD framework. It is not under active development. New
- projects should consider using DM_MULTIPATH which has more
- features and more testing.
-
- If unsure, say N.
-
-config MD_FAULTY
- tristate "Faulty test module for MD (deprecated)"
- depends on BLK_DEV_MD
- help
- The "faulty" module allows for a block device that occasionally returns
- read or write errors. It is useful for testing.
-
- In unsure, say N.
-
-
config MD_CLUSTER
tristate "Cluster Support for MD"
depends on BLK_DEV_MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 84291e38d..027d7cfec 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -29,22 +29,16 @@ dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
md-mod-y += md.o md-bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
-linear-y += md-linear.o
-multipath-y += md-multipath.o
-faulty-y += md-faulty.o
# Note: link order is important. All raid personalities
-# and must come before md.o, as they each initialise
-# themselves, and md.o may use the personalities when it
+# and must come before md.o, as they each initialise
+# themselves, and md.o may use the personalities when it
# auto-initialised.
-obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o
obj-$(CONFIG_MD_RAID456) += raid456.o
-obj-$(CONFIG_MD_MULTIPATH) += multipath.o
-obj-$(CONFIG_MD_FAULTY) += faulty.o
obj-$(CONFIG_MD_CLUSTER) += md-cluster.o
obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1402096b8..dc3f50f69 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -954,7 +954,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
q->limits.max_segment_size = UINT_MAX;
q->limits.max_segments = BIO_MAX_VECS;
blk_queue_max_discard_sectors(q, UINT_MAX);
- q->limits.discard_granularity = 512;
q->limits.io_min = block_size;
q->limits.logical_block_size = block_size;
q->limits.physical_block_size = block_size;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 4f2808ef3..f5541b8f6 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1170,7 +1170,7 @@ static void __cache_size_refresh(void)
* If the allocation may fail we use __get_free_pages. Memory fragmentation
* won't have a fatal effect here, but it just causes flushes of some other
* buffers and more I/O will be performed. Don't use __get_free_pages if it
- * always fails (i.e. order > MAX_ORDER).
+ * always fails (i.e. order > MAX_PAGE_ORDER).
*
* If the allocation shouldn't fail we use __vmalloc. This is only for the
* initial reserve allocation, so there's no risk of wasting all vmalloc
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 35f501939..59445763e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1678,7 +1678,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned int remaining_size;
- unsigned int order = MAX_ORDER;
+ unsigned int order = MAX_PAGE_ORDER;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
@@ -3702,7 +3702,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 24, 0},
+ .version = {1, 25, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index f57fb8215..7916ed9f1 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -434,7 +434,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
remaining_size = size;
- order = MAX_ORDER;
+ order = MAX_PAGE_ORDER;
while (remaining_size) {
struct page *pages;
unsigned size_to_add, to_copy;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 2cc30b9ab..3b4218a2e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4221,7 +4221,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
- if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
goto bad;
@@ -4742,7 +4742,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 79c65c9ad..6ea75436a 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -807,7 +807,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
*/
if (!(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) {
for (i = 0; i < job->num_dests; i++) {
- if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
+ if (bdev_is_zoned(dests[i].bdev)) {
job->flags |= BIT(DM_KCOPYD_WRITE_SEQ);
break;
}
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index bdc14ec99..1e5d988f4 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -66,6 +66,9 @@ struct dm_stats_last_position {
unsigned int last_rw;
};
+#define DM_STAT_MAX_ENTRIES 8388608
+#define DM_STAT_MAX_HISTOGRAM_ENTRIES 134217728
+
/*
* A typo on the command line could possibly make the kernel run out of memory
* and crash. To prevent the crash we account all used memory. We fail if we
@@ -285,6 +288,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
return -EOVERFLOW;
+ if (n_entries > DM_STAT_MAX_ENTRIES)
+ return -EOVERFLOW;
+
shared_alloc_size = struct_size(s, stat_shared, n_entries);
if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
return -EOVERFLOW;
@@ -297,6 +303,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
return -EOVERFLOW;
+ if ((n_histogram_entries + 1) * (size_t)n_entries > DM_STAT_MAX_HISTOGRAM_ENTRIES)
+ return -EOVERFLOW;
+
if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
return -ENOMEM;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 08c9c20f9..41f1d731a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1584,21 +1584,18 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
return true;
}
-static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
- enum blk_zoned_model *zoned_model = data;
+ bool *zoned = data;
- return blk_queue_zoned_model(q) != *zoned_model;
+ return bdev_is_zoned(dev->bdev) != *zoned;
}
static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return blk_queue_zoned_model(q) != BLK_ZONED_NONE;
+ return bdev_is_zoned(dev->bdev);
}
/*
@@ -1608,8 +1605,7 @@ static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
* has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
* zoned model with all zoned devices having the same zone size.
*/
-static bool dm_table_supports_zoned_model(struct dm_table *t,
- enum blk_zoned_model zoned_model)
+static bool dm_table_supports_zoned(struct dm_table *t, bool zoned)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
@@ -1628,11 +1624,11 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
if (dm_target_supports_zoned_hm(ti->type)) {
if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, device_not_zoned_model,
- &zoned_model))
+ ti->type->iterate_devices(ti, device_not_zoned,
+ &zoned))
return false;
} else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
- if (zoned_model == BLK_ZONED_HM)
+ if (zoned)
return false;
}
}
@@ -1655,14 +1651,13 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
* zone sectors, if the destination device is a zoned block device, it shall
* have the specified zone_sectors.
*/
-static int validate_hardware_zoned_model(struct dm_table *t,
- enum blk_zoned_model zoned_model,
- unsigned int zone_sectors)
+static int validate_hardware_zoned(struct dm_table *t, bool zoned,
+ unsigned int zone_sectors)
{
- if (zoned_model == BLK_ZONED_NONE)
+ if (!zoned)
return 0;
- if (!dm_table_supports_zoned_model(t, zoned_model)) {
+ if (!dm_table_supports_zoned(t, zoned)) {
DMERR("%s: zoned model is not consistent across all devices",
dm_device_name(t->md));
return -EINVAL;
@@ -1688,8 +1683,8 @@ int dm_calculate_queue_limits(struct dm_table *t,
struct queue_limits *limits)
{
struct queue_limits ti_limits;
- enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
unsigned int zone_sectors = 0;
+ bool zoned = false;
blk_set_stacking_limits(limits);
@@ -1711,12 +1706,12 @@ int dm_calculate_queue_limits(struct dm_table *t,
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
- if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
+ if (!zoned && ti_limits.zoned) {
/*
* After stacking all limits, validate all devices
* in table support this zoned model and zone sectors.
*/
- zoned_model = ti_limits.zoned;
+ zoned = ti_limits.zoned;
zone_sectors = ti_limits.chunk_sectors;
}
@@ -1749,18 +1744,18 @@ combine_limits:
* Verify that the zoned model and zone sectors, as determined before
* any .io_hints override, are the same across all devices in the table.
* - this is especially relevant if .io_hints is emulating a disk-managed
- * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
+ * zoned model on host-managed zoned block devices.
* BUT...
*/
- if (limits->zoned != BLK_ZONED_NONE) {
+ if (limits->zoned) {
/*
* ...IF the above limits stacking determined a zoned model
* validate that all of the table's devices conform to it.
*/
- zoned_model = limits->zoned;
+ zoned = limits->zoned;
zone_sectors = limits->chunk_sectors;
}
- if (validate_hardware_zoned_model(t, zoned_model, zone_sectors))
+ if (validate_hardware_zoned(t, zoned, zone_sectors))
return -EINVAL;
return validate_hardware_logical_block_alignment(t, limits);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 49e4a35d7..abc008bae 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1560,7 +1560,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 6a4279bfb..01ab141bc 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -299,7 +299,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
long i;
wc->memory_map = NULL;
- pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
+ pages = vmalloc_array(p, sizeof(struct page *));
if (!pages) {
r = -ENOMEM;
goto err2;
@@ -330,7 +330,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
r = -ENOMEM;
goto err3;
}
- kvfree(pages);
+ vfree(pages);
wc->memory_vmapped = true;
}
@@ -341,7 +341,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
return 0;
err3:
- kvfree(pages);
+ vfree(pages);
err2:
dax_read_unlock(id);
err1:
@@ -962,7 +962,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
if (wc->entries)
return 0;
- wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
+ wc->entries = vmalloc_array(wc->n_blocks, sizeof(struct wc_entry));
if (!wc->entries)
return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) {
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 60a4dc01e..fdfe30f7b 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -2836,12 +2836,11 @@ static void dmz_print_dev(struct dmz_metadata *zmd, int num)
{
struct dmz_dev *dev = &zmd->dev[num];
- if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
+ if (!bdev_is_zoned(dev->bdev))
dmz_dev_info(dev, "Regular block device");
else
- dmz_dev_info(dev, "Host-%s zoned block device",
- bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
- "aware" : "managed");
+ dmz_dev_info(dev, "Host-managed zoned block device");
+
if (zmd->sb_version > 1) {
sector_t sector_offset =
dev->zone_offset << zmd->zone_nr_sectors_shift;
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index b487f7acc..621794a9e 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -702,7 +702,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
}
bdev = ddev->bdev;
- if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
+ if (!bdev_is_zoned(bdev)) {
if (nr_devs == 1) {
ti->error = "Invalid regular device";
goto err;
@@ -1010,7 +1010,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_sectors = chunk_sectors;
/* We are exposing a drive-managed zoned block device */
- limits->zoned = BLK_ZONED_NONE;
+ limits->zoned = false;
}
/*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4ff9bebb8..0dc3650c7 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2675,7 +2675,7 @@ static int lock_fs(struct mapped_device *md)
WARN_ON(test_bit(DMF_FROZEN, &md->flags));
- r = freeze_bdev(md->disk->part0);
+ r = bdev_freeze(md->disk->part0);
if (!r)
set_bit(DMF_FROZEN, &md->flags);
return r;
@@ -2685,7 +2685,7 @@ static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
- thaw_bdev(md->disk->part0);
+ bdev_thaw(md->disk->part0);
clear_bit(DMF_FROZEN, &md->flags);
}
diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c
index 4b80165af..b2a00f213 100644
--- a/drivers/md/md-autodetect.c
+++ b/drivers/md/md-autodetect.c
@@ -49,7 +49,6 @@ static int md_setup_ents __initdata;
* instead of just one. -- KTK
* 18May2000: Added support for persistent-superblock arrays:
* md=n,0,factor,fault,device-list uses RAID0 for device n
- * md=n,-1,factor,fault,device-list uses LINEAR for device n
* md=n,device-list reads a RAID superblock from the devices
* elements in device-list are read by name_to_kdev_t so can be
* a hex number or something like /dev/hda1 /dev/sdb
@@ -88,7 +87,7 @@ static int __init md_setup(char *str)
md_setup_ents++;
switch (get_option(&str, &level)) { /* RAID level */
case 2: /* could be 0 or -1.. */
- if (level == 0 || level == LEVEL_LINEAR) {
+ if (level == 0) {
if (get_option(&str, &factor) != 2 || /* Chunk Size */
get_option(&str, &fault) != 2) {
printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
@@ -96,10 +95,7 @@ static int __init md_setup(char *str)
}
md_setup_args[ent].level = level;
md_setup_args[ent].chunk = 1 << (factor+12);
- if (level == LEVEL_LINEAR)
- pername = "linear";
- else
- pername = "raid0";
+ pername = "raid0";
break;
}
fallthrough;
diff --git a/drivers/md/md-faulty.c b/drivers/md/md-faulty.c
deleted file mode 100644
index a039e8e20..000000000
--- a/drivers/md/md-faulty.c
+++ /dev/null
@@ -1,365 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * faulty.c : Multiple Devices driver for Linux
- *
- * Copyright (C) 2004 Neil Brown
- *
- * fautly-device-simulator personality for md
- */
-
-
-/*
- * The "faulty" personality causes some requests to fail.
- *
- * Possible failure modes are:
- * reads fail "randomly" but succeed on retry
- * writes fail "randomly" but succeed on retry
- * reads for some address fail and then persist until a write
- * reads for some address fail and then persist irrespective of write
- * writes for some address fail and persist
- * all writes fail
- *
- * Different modes can be active at a time, but only
- * one can be set at array creation. Others can be added later.
- * A mode can be one-shot or recurrent with the recurrence being
- * once in every N requests.
- * The bottom 5 bits of the "layout" indicate the mode. The
- * remainder indicate a period, or 0 for one-shot.
- *
- * There is an implementation limit on the number of concurrently
- * persisting-faulty blocks. When a new fault is requested that would
- * exceed the limit, it is ignored.
- * All current faults can be clear using a layout of "0".
- *
- * Requests are always sent to the device. If they are to fail,
- * we clone the bio and insert a new b_end_io into the chain.
- */
-
-#define WriteTransient 0
-#define ReadTransient 1
-#define WritePersistent 2
-#define ReadPersistent 3
-#define WriteAll 4 /* doesn't go to device */
-#define ReadFixable 5
-#define Modes 6
-
-#define ClearErrors 31
-#define ClearFaults 30
-
-#define AllPersist 100 /* internal use only */
-#define NoPersist 101
-
-#define ModeMask 0x1f
-#define ModeShift 5
-
-#define MaxFault 50
-#include <linux/blkdev.h>
-#include <linux/module.h>
-#include <linux/raid/md_u.h>
-#include <linux/slab.h>
-#include "md.h"
-#include <linux/seq_file.h>
-
-
-static void faulty_fail(struct bio *bio)
-{
- struct bio *b = bio->bi_private;
-
- b->bi_iter.bi_size = bio->bi_iter.bi_size;
- b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
-
- bio_put(bio);
-
- bio_io_error(b);
-}
-
-struct faulty_conf {
- int period[Modes];
- atomic_t counters[Modes];
- sector_t faults[MaxFault];
- int modes[MaxFault];
- int nfaults;
- struct md_rdev *rdev;
-};
-
-static int check_mode(struct faulty_conf *conf, int mode)
-{
- if (conf->period[mode] == 0 &&
- atomic_read(&conf->counters[mode]) <= 0)
- return 0; /* no failure, no decrement */
-
-
- if (atomic_dec_and_test(&conf->counters[mode])) {
- if (conf->period[mode])
- atomic_set(&conf->counters[mode], conf->period[mode]);
- return 1;
- }
- return 0;
-}
-
-static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
-{
- /* If we find a ReadFixable sector, we fix it ... */
- int i;
- for (i=0; i<conf->nfaults; i++)
- if (conf->faults[i] >= start &&
- conf->faults[i] < end) {
- /* found it ... */
- switch (conf->modes[i] * 2 + dir) {
- case WritePersistent*2+WRITE: return 1;
- case ReadPersistent*2+READ: return 1;
- case ReadFixable*2+READ: return 1;
- case ReadFixable*2+WRITE:
- conf->modes[i] = NoPersist;
- return 0;
- case AllPersist*2+READ:
- case AllPersist*2+WRITE: return 1;
- default:
- return 0;
- }
- }
- return 0;
-}
-
-static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
-{
- int i;
- int n = conf->nfaults;
- for (i=0; i<conf->nfaults; i++)
- if (conf->faults[i] == start) {
- switch(mode) {
- case NoPersist: conf->modes[i] = mode; return;
- case WritePersistent:
- if (conf->modes[i] == ReadPersistent ||
- conf->modes[i] == ReadFixable)
- conf->modes[i] = AllPersist;
- else
- conf->modes[i] = WritePersistent;
- return;
- case ReadPersistent:
- if (conf->modes[i] == WritePersistent)
- conf->modes[i] = AllPersist;
- else
- conf->modes[i] = ReadPersistent;
- return;
- case ReadFixable:
- if (conf->modes[i] == WritePersistent ||
- conf->modes[i] == ReadPersistent)
- conf->modes[i] = AllPersist;
- else
- conf->modes[i] = ReadFixable;
- return;
- }
- } else if (conf->modes[i] == NoPersist)
- n = i;
-
- if (n >= MaxFault)
- return;
- conf->faults[n] = start;
- conf->modes[n] = mode;
- if (conf->nfaults == n)
- conf->nfaults = n+1;
-}
-
-static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
-{
- struct faulty_conf *conf = mddev->private;
- int failit = 0;
-
- if (bio_data_dir(bio) == WRITE) {
- /* write request */
- if (atomic_read(&conf->counters[WriteAll])) {
- /* special case - don't decrement, don't submit_bio_noacct,
- * just fail immediately
- */
- bio_io_error(bio);
- return true;
- }
-
- if (check_sector(conf, bio->bi_iter.bi_sector,
- bio_end_sector(bio), WRITE))
- failit = 1;
- if (check_mode(conf, WritePersistent)) {
- add_sector(conf, bio->bi_iter.bi_sector,
- WritePersistent);
- failit = 1;
- }
- if (check_mode(conf, WriteTransient))
- failit = 1;
- } else {
- /* read request */
- if (check_sector(conf, bio->bi_iter.bi_sector,
- bio_end_sector(bio), READ))
- failit = 1;
- if (check_mode(conf, ReadTransient))
- failit = 1;
- if (check_mode(conf, ReadPersistent)) {
- add_sector(conf, bio->bi_iter.bi_sector,
- ReadPersistent);
- failit = 1;
- }
- if (check_mode(conf, ReadFixable)) {
- add_sector(conf, bio->bi_iter.bi_sector,
- ReadFixable);
- failit = 1;
- }
- }
-
- md_account_bio(mddev, &bio);
- if (failit) {
- struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO,
- &mddev->bio_set);
-
- b->bi_private = bio;
- b->bi_end_io = faulty_fail;
- bio = b;
- } else
- bio_set_dev(bio, conf->rdev->bdev);
-
- submit_bio_noacct(bio);
- return true;
-}
-
-static void faulty_status(struct seq_file *seq, struct mddev *mddev)
-{
- struct faulty_conf *conf = mddev->private;
- int n;
-
- if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
- seq_printf(seq, " WriteTransient=%d(%d)",
- n, conf->period[WriteTransient]);
-
- if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
- seq_printf(seq, " ReadTransient=%d(%d)",
- n, conf->period[ReadTransient]);
-
- if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
- seq_printf(seq, " WritePersistent=%d(%d)",
- n, conf->period[WritePersistent]);
-
- if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
- seq_printf(seq, " ReadPersistent=%d(%d)",
- n, conf->period[ReadPersistent]);
-
-
- if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
- seq_printf(seq, " ReadFixable=%d(%d)",
- n, conf->period[ReadFixable]);
-
- if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
- seq_printf(seq, " WriteAll");
-
- seq_printf(seq, " nfaults=%d", conf->nfaults);
-}
-
-
-static int faulty_reshape(struct mddev *mddev)
-{
- int mode = mddev->new_layout & ModeMask;
- int count = mddev->new_layout >> ModeShift;
- struct faulty_conf *conf = mddev->private;
-
- if (mddev->new_layout < 0)
- return 0;
-
- /* new layout */
- if (mode == ClearFaults)
- conf->nfaults = 0;
- else if (mode == ClearErrors) {
- int i;
- for (i=0 ; i < Modes ; i++) {
- conf->period[i] = 0;
- atomic_set(&conf->counters[i], 0);
- }
- } else if (mode < Modes) {
- conf->period[mode] = count;
- if (!count) count++;
- atomic_set(&conf->counters[mode], count);
- } else
- return -EINVAL;
- mddev->new_layout = -1;
- mddev->layout = -1; /* makes sure further changes come through */
- return 0;
-}
-
-static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
-{
- WARN_ONCE(raid_disks,
- "%s does not support generic reshape\n", __func__);
-
- if (sectors == 0)
- return mddev->dev_sectors;
-
- return sectors;
-}
-
-static int faulty_run(struct mddev *mddev)
-{
- struct md_rdev *rdev;
- int i;
- struct faulty_conf *conf;
-
- if (md_check_no_bitmap(mddev))
- return -EINVAL;
-
- conf = kmalloc(sizeof(*conf), GFP_KERNEL);
- if (!conf)
- return -ENOMEM;
-
- for (i=0; i<Modes; i++) {
- atomic_set(&conf->counters[i], 0);
- conf->period[i] = 0;
- }
- conf->nfaults = 0;
-
- rdev_for_each(rdev, mddev) {
- conf->rdev = rdev;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- }
-
- md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
- mddev->private = conf;
-
- faulty_reshape(mddev);
-
- return 0;
-}
-
-static void faulty_free(struct mddev *mddev, void *priv)
-{
- struct faulty_conf *conf = priv;
-
- kfree(conf);
-}
-
-static struct md_personality faulty_personality =
-{
- .name = "faulty",
- .level = LEVEL_FAULTY,
- .owner = THIS_MODULE,
- .make_request = faulty_make_request,
- .run = faulty_run,
- .free = faulty_free,
- .status = faulty_status,
- .check_reshape = faulty_reshape,
- .size = faulty_size,
-};
-
-static int __init raid_init(void)
-{
- return register_md_personality(&faulty_personality);
-}
-
-static void raid_exit(void)
-{
- unregister_md_personality(&faulty_personality);
-}
-
-module_init(raid_init);
-module_exit(raid_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Fault injection personality for MD (deprecated)");
-MODULE_ALIAS("md-personality-10"); /* faulty */
-MODULE_ALIAS("md-faulty");
-MODULE_ALIAS("md-level--5");
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
deleted file mode 100644
index 8eca7693b..000000000
--- a/drivers/md/md-linear.c
+++ /dev/null
@@ -1,318 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- linear.c : Multiple Devices driver for Linux
- Copyright (C) 1994-96 Marc ZYNGIER
- <zyngier@ufr-info-p7.ibp.fr> or
- <maz@gloups.fdn.fr>
-
- Linear mode management functions.
-
-*/
-
-#include <linux/blkdev.h>
-#include <linux/raid/md_u.h>
-#include <linux/seq_file.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <trace/events/block.h>
-#include "md.h"
-#include "md-linear.h"
-
-/*
- * find which device holds a particular offset
- */
-static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
-{
- int lo, mid, hi;
- struct linear_conf *conf;
-
- lo = 0;
- hi = mddev->raid_disks - 1;
- conf = mddev->private;
-
- /*
- * Binary Search
- */
-
- while (hi > lo) {
-
- mid = (hi + lo) / 2;
- if (sector < conf->disks[mid].end_sector)
- hi = mid;
- else
- lo = mid + 1;
- }
-
- return conf->disks + lo;
-}
-
-static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
-{
- struct linear_conf *conf;
- sector_t array_sectors;
-
- conf = mddev->private;
- WARN_ONCE(sectors || raid_disks,
- "%s does not support generic reshape\n", __func__);
- array_sectors = conf->array_sectors;
-
- return array_sectors;
-}
-
-static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
-{
- struct linear_conf *conf;
- struct md_rdev *rdev;
- int i, cnt;
-
- conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL);
- if (!conf)
- return NULL;
-
- /*
- * conf->raid_disks is copy of mddev->raid_disks. The reason to
- * keep a copy of mddev->raid_disks in struct linear_conf is,
- * mddev->raid_disks may not be consistent with pointers number of
- * conf->disks[] when it is updated in linear_add() and used to
- * iterate old conf->disks[] earray in linear_congested().
- * Here conf->raid_disks is always consitent with number of
- * pointers in conf->disks[] array, and mddev->private is updated
- * with rcu_assign_pointer() in linear_addr(), such race can be
- * avoided.
- */
- conf->raid_disks = raid_disks;
-
- cnt = 0;
- conf->array_sectors = 0;
-
- rdev_for_each(rdev, mddev) {
- int j = rdev->raid_disk;
- struct dev_info *disk = conf->disks + j;
- sector_t sectors;
-
- if (j < 0 || j >= raid_disks || disk->rdev) {
- pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
- mdname(mddev));
- goto out;
- }
-
- disk->rdev = rdev;
- if (mddev->chunk_sectors) {
- sectors = rdev->sectors;
- sector_div(sectors, mddev->chunk_sectors);
- rdev->sectors = sectors * mddev->chunk_sectors;
- }
-
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
-
- conf->array_sectors += rdev->sectors;
- cnt++;
- }
- if (cnt != raid_disks) {
- pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
- mdname(mddev));
- goto out;
- }
-
- /*
- * Here we calculate the device offsets.
- */
- conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
-
- for (i = 1; i < raid_disks; i++)
- conf->disks[i].end_sector =
- conf->disks[i-1].end_sector +
- conf->disks[i].rdev->sectors;
-
- return conf;
-
-out:
- kfree(conf);
- return NULL;
-}
-
-static int linear_run (struct mddev *mddev)
-{
- struct linear_conf *conf;
- int ret;
-
- if (md_check_no_bitmap(mddev))
- return -EINVAL;
- conf = linear_conf(mddev, mddev->raid_disks);
-
- if (!conf)
- return 1;
- mddev->private = conf;
- md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
-
- ret = md_integrity_register(mddev);
- if (ret) {
- kfree(conf);
- mddev->private = NULL;
- }
- return ret;
-}
-
-static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
-{
- /* Adding a drive to a linear array allows the array to grow.
- * It is permitted if the new drive has a matching superblock
- * already on it, with raid_disk equal to raid_disks.
- * It is achieved by creating a new linear_private_data structure
- * and swapping it in in-place of the current one.
- * The current one is never freed until the array is stopped.
- * This avoids races.
- */
- struct linear_conf *newconf, *oldconf;
-
- if (rdev->saved_raid_disk != mddev->raid_disks)
- return -EINVAL;
-
- rdev->raid_disk = rdev->saved_raid_disk;
- rdev->saved_raid_disk = -1;
-
- newconf = linear_conf(mddev,mddev->raid_disks+1);
-
- if (!newconf)
- return -ENOMEM;
-
- /* newconf->raid_disks already keeps a copy of * the increased
- * value of mddev->raid_disks, WARN_ONCE() is just used to make
- * sure of this. It is possible that oldconf is still referenced
- * in linear_congested(), therefore kfree_rcu() is used to free
- * oldconf until no one uses it anymore.
- */
- oldconf = rcu_dereference_protected(mddev->private,
- lockdep_is_held(&mddev->reconfig_mutex));
- mddev->raid_disks++;
- WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
- "copied raid_disks doesn't match mddev->raid_disks");
- rcu_assign_pointer(mddev->private, newconf);
- md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
- set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
- kfree_rcu(oldconf, rcu);
- return 0;
-}
-
-static void linear_free(struct mddev *mddev, void *priv)
-{
- struct linear_conf *conf = priv;
-
- kfree(conf);
-}
-
-static bool linear_make_request(struct mddev *mddev, struct bio *bio)
-{
- struct dev_info *tmp_dev;
- sector_t start_sector, end_sector, data_offset;
- sector_t bio_sector = bio->bi_iter.bi_sector;
-
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)
- && md_flush_request(mddev, bio))
- return true;
-
- tmp_dev = which_dev(mddev, bio_sector);
- start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
- end_sector = tmp_dev->end_sector;
- data_offset = tmp_dev->rdev->data_offset;
-
- if (unlikely(bio_sector >= end_sector ||
- bio_sector < start_sector))
- goto out_of_bounds;
-
- if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
- md_error(mddev, tmp_dev->rdev);
- bio_io_error(bio);
- return true;
- }
-
- if (unlikely(bio_end_sector(bio) > end_sector)) {
- /* This bio crosses a device boundary, so we have to split it */
- struct bio *split = bio_split(bio, end_sector - bio_sector,
- GFP_NOIO, &mddev->bio_set);
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
- }
-
- md_account_bio(mddev, &bio);
- bio_set_dev(bio, tmp_dev->rdev->bdev);
- bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
- start_sector + data_offset;
-
- if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !bdev_max_discard_sectors(bio->bi_bdev))) {
- /* Just ignore it */
- bio_endio(bio);
- } else {
- if (mddev->gendisk)
- trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
- bio_sector);
- mddev_check_write_zeroes(mddev, bio);
- submit_bio_noacct(bio);
- }
- return true;
-
-out_of_bounds:
- pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %pg: %llu sectors, offset %llu\n",
- mdname(mddev),
- (unsigned long long)bio->bi_iter.bi_sector,
- tmp_dev->rdev->bdev,
- (unsigned long long)tmp_dev->rdev->sectors,
- (unsigned long long)start_sector);
- bio_io_error(bio);
- return true;
-}
-
-static void linear_status (struct seq_file *seq, struct mddev *mddev)
-{
- seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
-}
-
-static void linear_error(struct mddev *mddev, struct md_rdev *rdev)
-{
- if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
- char *md_name = mdname(mddev);
-
- pr_crit("md/linear%s: Disk failure on %pg detected, failing array.\n",
- md_name, rdev->bdev);
- }
-}
-
-static void linear_quiesce(struct mddev *mddev, int state)
-{
-}
-
-static struct md_personality linear_personality =
-{
- .name = "linear",
- .level = LEVEL_LINEAR,
- .owner = THIS_MODULE,
- .make_request = linear_make_request,
- .run = linear_run,
- .free = linear_free,
- .status = linear_status,
- .hot_add_disk = linear_add,
- .size = linear_size,
- .quiesce = linear_quiesce,
- .error_handler = linear_error,
-};
-
-static int __init linear_init (void)
-{
- return register_md_personality (&linear_personality);
-}
-
-static void linear_exit (void)
-{
- unregister_md_personality (&linear_personality);
-}
-
-module_init(linear_init);
-module_exit(linear_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Linear device concatenation personality for MD (deprecated)");
-MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
-MODULE_ALIAS("md-linear");
-MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
deleted file mode 100644
index aa77133f3..000000000
--- a/drivers/md/md-multipath.c
+++ /dev/null
@@ -1,462 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * multipath.c : Multiple Devices driver for Linux
- *
- * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
- *
- * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
- *
- * MULTIPATH management functions.
- *
- * derived from raid1.c.
- */
-
-#include <linux/blkdev.h>
-#include <linux/module.h>
-#include <linux/raid/md_u.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include "md.h"
-#include "md-multipath.h"
-
-#define MAX_WORK_PER_DISK 128
-
-#define NR_RESERVED_BUFS 32
-
-static int multipath_map (struct mpconf *conf)
-{
- int i, disks = conf->raid_disks;
-
- /*
- * Later we do read balancing on the read side
- * now we use the first available disk.
- */
-
- rcu_read_lock();
- for (i = 0; i < disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags)) {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- return i;
- }
- }
- rcu_read_unlock();
-
- pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
- return (-1);
-}
-
-static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
-{
- unsigned long flags;
- struct mddev *mddev = mp_bh->mddev;
- struct mpconf *conf = mddev->private;
-
- spin_lock_irqsave(&conf->device_lock, flags);
- list_add(&mp_bh->retry_list, &conf->retry_list);
- spin_unlock_irqrestore(&conf->device_lock, flags);
- md_wakeup_thread(mddev->thread);
-}
-
-/*
- * multipath_end_bh_io() is called when we have finished servicing a multipathed
- * operation and are ready to return a success/failure code to the buffer
- * cache layer.
- */
-static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status)
-{
- struct bio *bio = mp_bh->master_bio;
- struct mpconf *conf = mp_bh->mddev->private;
-
- bio->bi_status = status;
- bio_endio(bio);
- mempool_free(mp_bh, &conf->pool);
-}
-
-static void multipath_end_request(struct bio *bio)
-{
- struct multipath_bh *mp_bh = bio->bi_private;
- struct mpconf *conf = mp_bh->mddev->private;
- struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
-
- if (!bio->bi_status)
- multipath_end_bh_io(mp_bh, 0);
- else if (!(bio->bi_opf & REQ_RAHEAD)) {
- /*
- * oops, IO error:
- */
- md_error (mp_bh->mddev, rdev);
- pr_info("multipath: %pg: rescheduling sector %llu\n",
- rdev->bdev,
- (unsigned long long)bio->bi_iter.bi_sector);
- multipath_reschedule_retry(mp_bh);
- } else
- multipath_end_bh_io(mp_bh, bio->bi_status);
- rdev_dec_pending(rdev, conf->mddev);
-}
-
-static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
-{
- struct mpconf *conf = mddev->private;
- struct multipath_bh * mp_bh;
- struct multipath_info *multipath;
-
- if (unlikely(bio->bi_opf & REQ_PREFLUSH)
- && md_flush_request(mddev, bio))
- return true;
-
- md_account_bio(mddev, &bio);
- mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
-
- mp_bh->master_bio = bio;
- mp_bh->mddev = mddev;
-
- mp_bh->path = multipath_map(conf);
- if (mp_bh->path < 0) {
- bio_io_error(bio);
- mempool_free(mp_bh, &conf->pool);
- return true;
- }
- multipath = conf->multipaths + mp_bh->path;
-
- bio_init_clone(multipath->rdev->bdev, &mp_bh->bio, bio, GFP_NOIO);
-
- mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
- mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
- mp_bh->bio.bi_end_io = multipath_end_request;
- mp_bh->bio.bi_private = mp_bh;
- mddev_check_write_zeroes(mddev, &mp_bh->bio);
- submit_bio_noacct(&mp_bh->bio);
- return true;
-}
-
-static void multipath_status(struct seq_file *seq, struct mddev *mddev)
-{
- struct mpconf *conf = mddev->private;
- int i;
-
- seq_printf (seq, " [%d/%d] [", conf->raid_disks,
- conf->raid_disks - mddev->degraded);
- rcu_read_lock();
- for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
- seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
- }
- rcu_read_unlock();
- seq_putc(seq, ']');
-}
-
-/*
- * Careful, this can execute in IRQ contexts as well!
- */
-static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
-{
- struct mpconf *conf = mddev->private;
-
- if (conf->raid_disks - mddev->degraded <= 1) {
- /*
- * Uh oh, we can do nothing if this is our last path, but
- * first check if this is a queued request for a device
- * which has just failed.
- */
- pr_warn("multipath: only one IO path left and IO error.\n");
- /* leave it active... it's all we have */
- return;
- }
- /*
- * Mark disk as unusable
- */
- if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded++;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- }
- set_bit(Faulty, &rdev->flags);
- set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- pr_err("multipath: IO failure on %pg, disabling IO path.\n"
- "multipath: Operation continuing on %d IO paths.\n",
- rdev->bdev,
- conf->raid_disks - mddev->degraded);
-}
-
-static void print_multipath_conf (struct mpconf *conf)
-{
- int i;
- struct multipath_info *tmp;
-
- pr_debug("MULTIPATH conf printout:\n");
- if (!conf) {
- pr_debug("(conf==NULL)\n");
- return;
- }
- pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
-
- for (i = 0; i < conf->raid_disks; i++) {
- tmp = conf->multipaths + i;
- if (tmp->rdev)
- pr_debug(" disk%d, o:%d, dev:%pg\n",
- i,!test_bit(Faulty, &tmp->rdev->flags),
- tmp->rdev->bdev);
- }
-}
-
-static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
-{
- struct mpconf *conf = mddev->private;
- int err = -EEXIST;
- int path;
- struct multipath_info *p;
- int first = 0;
- int last = mddev->raid_disks - 1;
-
- if (rdev->raid_disk >= 0)
- first = last = rdev->raid_disk;
-
- print_multipath_conf(conf);
-
- for (path = first; path <= last; path++)
- if ((p=conf->multipaths+path)->rdev == NULL) {
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
-
- err = md_integrity_add_rdev(rdev, mddev);
- if (err)
- break;
- spin_lock_irq(&conf->device_lock);
- mddev->degraded--;
- rdev->raid_disk = path;
- set_bit(In_sync, &rdev->flags);
- spin_unlock_irq(&conf->device_lock);
- rcu_assign_pointer(p->rdev, rdev);
- err = 0;
- break;
- }
-
- print_multipath_conf(conf);
-
- return err;
-}
-
-static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
-{
- struct mpconf *conf = mddev->private;
- int err = 0;
- int number = rdev->raid_disk;
- struct multipath_info *p = conf->multipaths + number;
-
- print_multipath_conf(conf);
-
- if (rdev == p->rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
- pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number);
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- err = md_integrity_register(mddev);
- }
-abort:
-
- print_multipath_conf(conf);
- return err;
-}
-
-/*
- * This is a kernel thread which:
- *
- * 1. Retries failed read operations on working multipaths.
- * 2. Updates the raid superblock when problems encounter.
- * 3. Performs writes following reads for array syncronising.
- */
-
-static void multipathd(struct md_thread *thread)
-{
- struct mddev *mddev = thread->mddev;
- struct multipath_bh *mp_bh;
- struct bio *bio;
- unsigned long flags;
- struct mpconf *conf = mddev->private;
- struct list_head *head = &conf->retry_list;
-
- md_check_recovery(mddev);
- for (;;) {
- spin_lock_irqsave(&conf->device_lock, flags);
- if (list_empty(head))
- break;
- mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
- list_del(head->prev);
- spin_unlock_irqrestore(&conf->device_lock, flags);
-
- bio = &mp_bh->bio;
- bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
-
- if ((mp_bh->path = multipath_map (conf))<0) {
- pr_err("multipath: %pg: unrecoverable IO read error for block %llu\n",
- bio->bi_bdev,
- (unsigned long long)bio->bi_iter.bi_sector);
- multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
- } else {
- pr_err("multipath: %pg: redirecting sector %llu to another IO path\n",
- bio->bi_bdev,
- (unsigned long long)bio->bi_iter.bi_sector);
- *bio = *(mp_bh->master_bio);
- bio->bi_iter.bi_sector +=
- conf->multipaths[mp_bh->path].rdev->data_offset;
- bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
- bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
- bio->bi_end_io = multipath_end_request;
- bio->bi_private = mp_bh;
- submit_bio_noacct(bio);
- }
- }
- spin_unlock_irqrestore(&conf->device_lock, flags);
-}
-
-static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks)
-{
- WARN_ONCE(sectors || raid_disks,
- "%s does not support generic reshape\n", __func__);
-
- return mddev->dev_sectors;
-}
-
-static int multipath_run (struct mddev *mddev)
-{
- struct mpconf *conf;
- int disk_idx;
- struct multipath_info *disk;
- struct md_rdev *rdev;
- int working_disks;
- int ret;
-
- if (md_check_no_bitmap(mddev))
- return -EINVAL;
-
- if (mddev->level != LEVEL_MULTIPATH) {
- pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n",
- mdname(mddev), mddev->level);
- goto out;
- }
- /*
- * copy the already verified devices into our private MULTIPATH
- * bookkeeping area. [whatever we allocate in multipath_run(),
- * should be freed in multipath_free()]
- */
-
- conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
- mddev->private = conf;
- if (!conf)
- goto out;
-
- conf->multipaths = kcalloc(mddev->raid_disks,
- sizeof(struct multipath_info),
- GFP_KERNEL);
- if (!conf->multipaths)
- goto out_free_conf;
-
- working_disks = 0;
- rdev_for_each(rdev, mddev) {
- disk_idx = rdev->raid_disk;
- if (disk_idx < 0 ||
- disk_idx >= mddev->raid_disks)
- continue;
-
- disk = conf->multipaths + disk_idx;
- disk->rdev = rdev;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
-
- if (!test_bit(Faulty, &rdev->flags))
- working_disks++;
- }
-
- conf->raid_disks = mddev->raid_disks;
- conf->mddev = mddev;
- spin_lock_init(&conf->device_lock);
- INIT_LIST_HEAD(&conf->retry_list);
-
- if (!working_disks) {
- pr_warn("multipath: no operational IO paths for %s\n",
- mdname(mddev));
- goto out_free_conf;
- }
- mddev->degraded = conf->raid_disks - working_disks;
-
- ret = mempool_init_kmalloc_pool(&conf->pool, NR_RESERVED_BUFS,
- sizeof(struct multipath_bh));
- if (ret)
- goto out_free_conf;
-
- rcu_assign_pointer(mddev->thread,
- md_register_thread(multipathd, mddev, "multipath"));
- if (!mddev->thread)
- goto out_free_conf;
-
- pr_info("multipath: array %s active with %d out of %d IO paths\n",
- mdname(mddev), conf->raid_disks - mddev->degraded,
- mddev->raid_disks);
- /*
- * Ok, everything is just fine now
- */
- md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
-
- if (md_integrity_register(mddev))
- goto out_free_conf;
-
- return 0;
-
-out_free_conf:
- mempool_exit(&conf->pool);
- kfree(conf->multipaths);
- kfree(conf);
- mddev->private = NULL;
-out:
- return -EIO;
-}
-
-static void multipath_free(struct mddev *mddev, void *priv)
-{
- struct mpconf *conf = priv;
-
- mempool_exit(&conf->pool);
- kfree(conf->multipaths);
- kfree(conf);
-}
-
-static struct md_personality multipath_personality =
-{
- .name = "multipath",
- .level = LEVEL_MULTIPATH,
- .owner = THIS_MODULE,
- .make_request = multipath_make_request,
- .run = multipath_run,
- .free = multipath_free,
- .status = multipath_status,
- .error_handler = multipath_error,
- .hot_add_disk = multipath_add_disk,
- .hot_remove_disk= multipath_remove_disk,
- .size = multipath_size,
-};
-
-static int __init multipath_init (void)
-{
- return register_md_personality (&multipath_personality);
-}
-
-static void __exit multipath_exit (void)
-{
- unregister_md_personality (&multipath_personality);
-}
-
-module_init(multipath_init);
-module_exit(multipath_exit);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("simple multi-path personality for MD (deprecated)");
-MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
-MODULE_ALIAS("md-multipath");
-MODULE_ALIAS("md-level--4");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 67befb598..f54012d68 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1296,17 +1296,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
rdev->sb_size = MD_SB_BYTES;
rdev->badblocks.shift = -1;
- if (sb->level == LEVEL_MULTIPATH)
- rdev->desc_nr = -1;
- else
- rdev->desc_nr = sb->this_disk.number;
-
- /* not spare disk, or LEVEL_MULTIPATH */
- if (sb->level == LEVEL_MULTIPATH ||
- (rdev->desc_nr >= 0 &&
- rdev->desc_nr < MD_SB_DISKS &&
- sb->disks[rdev->desc_nr].state &
- ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
+ rdev->desc_nr = sb->this_disk.number;
+
+ /* not spare disk */
+ if (rdev->desc_nr >= 0 && rdev->desc_nr < MD_SB_DISKS &&
+ sb->disks[rdev->desc_nr].state & ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
spare_disk = false;
if (!refdev) {
@@ -1453,31 +1447,28 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru
return 0;
}
- if (mddev->level != LEVEL_MULTIPATH) {
- desc = sb->disks + rdev->desc_nr;
+ desc = sb->disks + rdev->desc_nr;
- if (desc->state & (1<<MD_DISK_FAULTY))
- set_bit(Faulty, &rdev->flags);
- else if (desc->state & (1<<MD_DISK_SYNC) /* &&
- desc->raid_disk < mddev->raid_disks */) {
- set_bit(In_sync, &rdev->flags);
+ if (desc->state & (1<<MD_DISK_FAULTY))
+ set_bit(Faulty, &rdev->flags);
+ else if (desc->state & (1<<MD_DISK_SYNC)) {
+ set_bit(In_sync, &rdev->flags);
+ rdev->raid_disk = desc->raid_disk;
+ rdev->saved_raid_disk = desc->raid_disk;
+ } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
+ /* active but not in sync implies recovery up to
+ * reshape position. We don't know exactly where
+ * that is, so set to zero for now
+ */
+ if (mddev->minor_version >= 91) {
+ rdev->recovery_offset = 0;
rdev->raid_disk = desc->raid_disk;
- rdev->saved_raid_disk = desc->raid_disk;
- } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
- /* active but not in sync implies recovery up to
- * reshape position. We don't know exactly where
- * that is, so set to zero for now */
- if (mddev->minor_version >= 91) {
- rdev->recovery_offset = 0;
- rdev->raid_disk = desc->raid_disk;
- }
}
- if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
- set_bit(WriteMostly, &rdev->flags);
- if (desc->state & (1<<MD_DISK_FAILFAST))
- set_bit(FailFast, &rdev->flags);
- } else /* MULTIPATH are always insync */
- set_bit(In_sync, &rdev->flags);
+ }
+ if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
+ set_bit(WriteMostly, &rdev->flags);
+ if (desc->state & (1<<MD_DISK_FAILFAST))
+ set_bit(FailFast, &rdev->flags);
return 0;
}
@@ -1767,10 +1758,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
&& rdev->new_data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
- if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
- rdev->desc_nr = -1;
- else
- rdev->desc_nr = le32_to_cpu(sb->dev_number);
+ rdev->desc_nr = le32_to_cpu(sb->dev_number);
if (!rdev->bb_page) {
rdev->bb_page = alloc_page(GFP_KERNEL);
@@ -1823,12 +1811,10 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
sb->level != 0)
return -EINVAL;
- /* not spare disk, or LEVEL_MULTIPATH */
- if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
- (rdev->desc_nr >= 0 &&
- rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
- (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
- le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
+ /* not spare disk */
+ if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
+ (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
+ le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
spare_disk = false;
if (!refdev) {
@@ -1871,6 +1857,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
{
struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
+ int role;
rdev->raid_disk = -1;
clear_bit(Faulty, &rdev->flags);
@@ -1986,88 +1973,85 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
/* just a hot-add of a new device, leave raid_disk at -1 */
return 0;
}
- if (mddev->level != LEVEL_MULTIPATH) {
- int role;
- if (rdev->desc_nr < 0 ||
- rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
- role = MD_DISK_ROLE_SPARE;
- rdev->desc_nr = -1;
- } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
- /*
- * If we are assembling, and our event counter is smaller than the
- * highest event counter, we cannot trust our superblock about the role.
- * It could happen that our rdev was marked as Faulty, and all other
- * superblocks were updated with +1 event counter.
- * Then, before the next superblock update, which typically happens when
- * remove_and_add_spares() removes the device from the array, there was
- * a crash or reboot.
- * If we allow current rdev without consulting the freshest superblock,
- * we could cause data corruption.
- * Note that in this case our event counter is smaller by 1 than the
- * highest, otherwise, this rdev would not be allowed into array;
- * both kernel and mdadm allow event counter difference of 1.
- */
- struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
- u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
-
- if (rdev->desc_nr >= freshest_max_dev) {
- /* this is unexpected, better not proceed */
- pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
- mdname(mddev), rdev->bdev, rdev->desc_nr,
- freshest->bdev, freshest_max_dev);
- return -EUCLEAN;
- }
- role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
- pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
- mdname(mddev), rdev->bdev, role, role, freshest->bdev);
- } else {
- role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
+ if (rdev->desc_nr < 0 ||
+ rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
+ role = MD_DISK_ROLE_SPARE;
+ rdev->desc_nr = -1;
+ } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) {
+ /*
+ * If we are assembling, and our event counter is smaller than the
+ * highest event counter, we cannot trust our superblock about the role.
+ * It could happen that our rdev was marked as Faulty, and all other
+ * superblocks were updated with +1 event counter.
+ * Then, before the next superblock update, which typically happens when
+ * remove_and_add_spares() removes the device from the array, there was
+ * a crash or reboot.
+ * If we allow current rdev without consulting the freshest superblock,
+ * we could cause data corruption.
+ * Note that in this case our event counter is smaller by 1 than the
+ * highest, otherwise, this rdev would not be allowed into array;
+ * both kernel and mdadm allow event counter difference of 1.
+ */
+ struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page);
+ u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev);
+
+ if (rdev->desc_nr >= freshest_max_dev) {
+ /* this is unexpected, better not proceed */
+ pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
+ mdname(mddev), rdev->bdev, rdev->desc_nr,
+ freshest->bdev, freshest_max_dev);
+ return -EUCLEAN;
}
- switch(role) {
- case MD_DISK_ROLE_SPARE: /* spare */
- break;
- case MD_DISK_ROLE_FAULTY: /* faulty */
- set_bit(Faulty, &rdev->flags);
- break;
- case MD_DISK_ROLE_JOURNAL: /* journal device */
- if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
- /* journal device without journal feature */
- pr_warn("md: journal device provided without journal feature, ignoring the device\n");
- return -EINVAL;
- }
- set_bit(Journal, &rdev->flags);
- rdev->journal_tail = le64_to_cpu(sb->journal_tail);
- rdev->raid_disk = 0;
- break;
- default:
- rdev->saved_raid_disk = role;
- if ((le32_to_cpu(sb->feature_map) &
- MD_FEATURE_RECOVERY_OFFSET)) {
- rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
- if (!(le32_to_cpu(sb->feature_map) &
- MD_FEATURE_RECOVERY_BITMAP))
- rdev->saved_raid_disk = -1;
- } else {
- /*
- * If the array is FROZEN, then the device can't
- * be in_sync with rest of array.
- */
- if (!test_bit(MD_RECOVERY_FROZEN,
- &mddev->recovery))
- set_bit(In_sync, &rdev->flags);
- }
- rdev->raid_disk = role;
- break;
+
+ role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
+ pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
+ mdname(mddev), rdev->bdev, role, role, freshest->bdev);
+ } else {
+ role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
+ }
+ switch (role) {
+ case MD_DISK_ROLE_SPARE: /* spare */
+ break;
+ case MD_DISK_ROLE_FAULTY: /* faulty */
+ set_bit(Faulty, &rdev->flags);
+ break;
+ case MD_DISK_ROLE_JOURNAL: /* journal device */
+ if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
+ /* journal device without journal feature */
+ pr_warn("md: journal device provided without journal feature, ignoring the device\n");
+ return -EINVAL;
}
- if (sb->devflags & WriteMostly1)
- set_bit(WriteMostly, &rdev->flags);
- if (sb->devflags & FailFast1)
- set_bit(FailFast, &rdev->flags);
- if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
- set_bit(Replacement, &rdev->flags);
- } else /* MULTIPATH are always insync */
- set_bit(In_sync, &rdev->flags);
+ set_bit(Journal, &rdev->flags);
+ rdev->journal_tail = le64_to_cpu(sb->journal_tail);
+ rdev->raid_disk = 0;
+ break;
+ default:
+ rdev->saved_raid_disk = role;
+ if ((le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RECOVERY_OFFSET)) {
+ rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
+ if (!(le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RECOVERY_BITMAP))
+ rdev->saved_raid_disk = -1;
+ } else {
+ /*
+ * If the array is FROZEN, then the device can't
+ * be in_sync with rest of array.
+ */
+ if (!test_bit(MD_RECOVERY_FROZEN,
+ &mddev->recovery))
+ set_bit(In_sync, &rdev->flags);
+ }
+ rdev->raid_disk = role;
+ break;
+ }
+ if (sb->devflags & WriteMostly1)
+ set_bit(WriteMostly, &rdev->flags);
+ if (sb->devflags & FailFast1)
+ set_bit(FailFast, &rdev->flags);
+ if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
+ set_bit(Replacement, &rdev->flags);
return 0;
}
@@ -2886,10 +2870,6 @@ rewrite:
} else
pr_debug("md: %pg (skipping faulty)\n",
rdev->bdev);
-
- if (mddev->level == LEVEL_MULTIPATH)
- /* only need to write one superblock... */
- break;
}
if (md_super_wait(mddev) < 0)
goto rewrite;
@@ -3890,13 +3870,8 @@ static int analyze_sbs(struct mddev *mddev)
continue;
}
}
- if (mddev->level == LEVEL_MULTIPATH) {
- rdev->desc_nr = i++;
- rdev->raid_disk = rdev->desc_nr;
- set_bit(In_sync, &rdev->flags);
- } else if (rdev->raid_disk >=
- (mddev->raid_disks - min(0, mddev->delta_disks)) &&
- !test_bit(Journal, &rdev->flags)) {
+ if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks)) &&
+ !test_bit(Journal, &rdev->flags)) {
rdev->raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
}
@@ -8156,7 +8131,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
return;
mddev->pers->error_handler(mddev, rdev);
- if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
+ if (mddev->pers->level == 0)
return;
if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 3f22edec7..512746551 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -173,3 +173,57 @@ static inline void raid1_prepare_flush_writes(struct bitmap *bitmap)
else
md_bitmap_unplug(bitmap);
}
+
+/*
+ * Used by fix_read_error() to decay the per rdev read_errors.
+ * We halve the read error count for every hour that has elapsed
+ * since the last recorded read error.
+ */
+static inline void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+{
+ long cur_time_mon;
+ unsigned long hours_since_last;
+ unsigned int read_errors = atomic_read(&rdev->read_errors);
+
+ cur_time_mon = ktime_get_seconds();
+
+ if (rdev->last_read_error == 0) {
+ /* first time we've seen a read error */
+ rdev->last_read_error = cur_time_mon;
+ return;
+ }
+
+ hours_since_last = (long)(cur_time_mon -
+ rdev->last_read_error) / 3600;
+
+ rdev->last_read_error = cur_time_mon;
+
+ /*
+ * if hours_since_last is > the number of bits in read_errors
+ * just set read errors to 0. We do this to avoid
+ * overflowing the shift of read_errors by hours_since_last.
+ */
+ if (hours_since_last >= 8 * sizeof(read_errors))
+ atomic_set(&rdev->read_errors, 0);
+ else
+ atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
+}
+
+static inline bool exceed_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+{
+ int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
+ int read_errors;
+
+ check_decay_read_errors(mddev, rdev);
+ read_errors = atomic_inc_return(&rdev->read_errors);
+ if (read_errors > max_read_errors) {
+ pr_notice("md/"RAID_1_10_NAME":%s: %pg: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+ mdname(mddev), rdev->bdev, read_errors, max_read_errors);
+ pr_notice("md/"RAID_1_10_NAME":%s: %pg: Failing raid device\n",
+ mdname(mddev), rdev->bdev);
+ md_error(mddev, rdev);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 750a80247..4f3c35f13 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -49,6 +49,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#define raid1_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
+#define RAID_1_10_NAME "raid1"
#include "raid1-10.c"
#define START(node) ((node)->start)
@@ -1131,8 +1132,6 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
&r1_bio->mddev->bio_set);
- if (!behind_bio)
- return;
/* discard op, we don't support writezero/writesame yet */
if (!bio_has_data(bio)) {
@@ -1475,7 +1474,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
- free_r1bio(r1_bio);
+ mempool_free(r1_bio, &conf->r1bio_pool);
allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) {
@@ -2299,16 +2298,24 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
* 3. Performs writes following reads for array synchronising.
*/
-static void fix_read_error(struct r1conf *conf, int read_disk,
- sector_t sect, int sectors)
+static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
{
+ sector_t sect = r1_bio->sector;
+ int sectors = r1_bio->sectors;
+ int read_disk = r1_bio->read_disk;
struct mddev *mddev = conf->mddev;
+ struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
+
+ if (exceed_read_errors(mddev, rdev)) {
+ r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
+ return;
+ }
+
while(sectors) {
int s = sectors;
int d = read_disk;
int success = 0;
int start;
- struct md_rdev *rdev;
if (s > (PAGE_SIZE>>9))
s = PAGE_SIZE >> 9;
@@ -2549,8 +2556,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
if (mddev->ro == 0
&& !test_bit(FailFast, &rdev->flags)) {
freeze_array(conf, 1);
- fix_read_error(conf, r1_bio->read_disk,
- r1_bio->sector, r1_bio->sectors);
+ fix_read_error(conf, r1_bio);
unfreeze_array(conf);
} else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
md_error(mddev, rdev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6e828a6aa..a5f8419e2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -19,6 +19,8 @@
#include <linux/raid/md_p.h>
#include <trace/events/block.h>
#include "md.h"
+
+#define RAID_1_10_NAME "raid10"
#include "raid10.h"
#include "raid0.h"
#include "md-bitmap.h"
@@ -743,7 +745,6 @@ static struct md_rdev *read_balance(struct r10conf *conf,
struct geom *geo = &conf->geo;
raid10_find_phys(conf, r10_bio);
- rcu_read_lock();
best_dist_slot = -1;
min_pending = UINT_MAX;
best_dist_rdev = NULL;
@@ -775,18 +776,11 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (r10_bio->devs[slot].bio == IO_BLOCKED)
continue;
disk = r10_bio->devs[slot].devnum;
- rdev = rcu_dereference(conf->mirrors[disk].replacement);
+ rdev = conf->mirrors[disk].replacement;
if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
r10_bio->devs[slot].addr + sectors >
- rdev->recovery_offset) {
- /*
- * Read replacement first to prevent reading both rdev
- * and replacement as NULL during replacement replace
- * rdev.
- */
- smp_mb();
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
- }
+ rdev->recovery_offset)
+ rdev = conf->mirrors[disk].rdev;
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags))
continue;
@@ -876,7 +870,6 @@ static struct md_rdev *read_balance(struct r10conf *conf,
r10_bio->read_slot = slot;
} else
rdev = NULL;
- rcu_read_unlock();
*max_sectors = best_good_sectors;
return rdev;
@@ -1198,9 +1191,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
*/
gfp = GFP_NOIO | __GFP_HIGH;
- rcu_read_lock();
disk = r10_bio->devs[slot].devnum;
- err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ err_rdev = conf->mirrors[disk].rdev;
if (err_rdev)
snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
else {
@@ -1208,7 +1200,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
/* This never gets dereferenced */
err_rdev = r10_bio->devs[slot].rdev;
}
- rcu_read_unlock();
}
if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
@@ -1279,15 +1270,8 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
int devnum = r10_bio->devs[n_copy].devnum;
struct bio *mbio;
- if (replacement) {
- rdev = conf->mirrors[devnum].replacement;
- if (rdev == NULL) {
- /* Replacement just got moved to main 'rdev' */
- smp_mb();
- rdev = conf->mirrors[devnum].rdev;
- }
- } else
- rdev = conf->mirrors[devnum].rdev;
+ rdev = replacement ? conf->mirrors[devnum].replacement :
+ conf->mirrors[devnum].rdev;
mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
if (replacement)
@@ -1321,25 +1305,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
}
}
-static struct md_rdev *dereference_rdev_and_rrdev(struct raid10_info *mirror,
- struct md_rdev **prrdev)
-{
- struct md_rdev *rdev, *rrdev;
-
- rrdev = rcu_dereference(mirror->replacement);
- /*
- * Read replacement first to prevent reading both rdev and
- * replacement as NULL during replacement replace rdev.
- */
- smp_mb();
- rdev = rcu_dereference(mirror->rdev);
- if (rdev == rrdev)
- rrdev = NULL;
-
- *prrdev = rrdev;
- return rdev;
-}
-
static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
{
int i;
@@ -1348,11 +1313,11 @@ static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
retry_wait:
blocked_rdev = NULL;
- rcu_read_lock();
for (i = 0; i < conf->copies; i++) {
struct md_rdev *rdev, *rrdev;
- rdev = dereference_rdev_and_rrdev(&conf->mirrors[i], &rrdev);
+ rdev = conf->mirrors[i].rdev;
+ rrdev = conf->mirrors[i].replacement;
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
atomic_inc(&rdev->nr_pending);
blocked_rdev = rdev;
@@ -1391,7 +1356,6 @@ retry_wait:
}
}
}
- rcu_read_unlock();
if (unlikely(blocked_rdev)) {
/* Have to wait for this device to get unblocked, then retry */
@@ -1474,14 +1438,14 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
wait_blocked_dev(mddev, r10_bio);
- rcu_read_lock();
max_sectors = r10_bio->sectors;
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
struct md_rdev *rdev, *rrdev;
- rdev = dereference_rdev_and_rrdev(&conf->mirrors[d], &rrdev);
+ rdev = conf->mirrors[d].rdev;
+ rrdev = conf->mirrors[d].replacement;
if (rdev && (test_bit(Faulty, &rdev->flags)))
rdev = NULL;
if (rrdev && (test_bit(Faulty, &rrdev->flags)))
@@ -1535,7 +1499,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
atomic_inc(&rrdev->nr_pending);
}
}
- rcu_read_unlock();
if (max_sectors < r10_bio->sectors)
r10_bio->sectors = max_sectors;
@@ -1625,17 +1588,8 @@ static void raid10_end_discard_request(struct bio *bio)
set_bit(R10BIO_Uptodate, &r10_bio->state);
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
- if (repl)
- rdev = conf->mirrors[dev].replacement;
- if (!rdev) {
- /*
- * raid10_remove_disk uses smp_mb to make sure rdev is set to
- * replacement before setting replacement to NULL. It can read
- * rdev first without barrier protect even replacement is NULL
- */
- smp_rmb();
- rdev = conf->mirrors[dev].rdev;
- }
+ rdev = repl ? conf->mirrors[dev].replacement :
+ conf->mirrors[dev].rdev;
raid_end_discard_bio(r10_bio);
rdev_dec_pending(rdev, conf->mddev);
@@ -1785,11 +1739,11 @@ retry_discard:
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
*/
- rcu_read_lock();
for (disk = 0; disk < geo->raid_disks; disk++) {
struct md_rdev *rdev, *rrdev;
- rdev = dereference_rdev_and_rrdev(&conf->mirrors[disk], &rrdev);
+ rdev = conf->mirrors[disk].rdev;
+ rrdev = conf->mirrors[disk].replacement;
r10_bio->devs[disk].bio = NULL;
r10_bio->devs[disk].repl_bio = NULL;
@@ -1809,7 +1763,6 @@ retry_discard:
atomic_inc(&rrdev->nr_pending);
}
}
- rcu_read_unlock();
atomic_set(&r10_bio->remaining, 1);
for (disk = 0; disk < geo->raid_disks; disk++) {
@@ -1939,6 +1892,8 @@ static void raid10_status(struct seq_file *seq, struct mddev *mddev)
struct r10conf *conf = mddev->private;
int i;
+ lockdep_assert_held(&mddev->lock);
+
if (conf->geo.near_copies < conf->geo.raid_disks)
seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
if (conf->geo.near_copies > 1)
@@ -1953,12 +1908,11 @@ static void raid10_status(struct seq_file *seq, struct mddev *mddev)
}
seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
conf->geo.raid_disks - mddev->degraded);
- rcu_read_lock();
for (i = 0; i < conf->geo.raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
+
seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
- rcu_read_unlock();
seq_printf(seq, "]");
}
@@ -1980,7 +1934,6 @@ static int _enough(struct r10conf *conf, int previous, int ignore)
ncopies = conf->geo.near_copies;
}
- rcu_read_lock();
do {
int n = conf->copies;
int cnt = 0;
@@ -1988,7 +1941,7 @@ static int _enough(struct r10conf *conf, int previous, int ignore)
while (n--) {
struct md_rdev *rdev;
if (this != ignore &&
- (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
+ (rdev = conf->mirrors[this].rdev) &&
test_bit(In_sync, &rdev->flags))
cnt++;
this = (this+1) % disks;
@@ -1999,7 +1952,6 @@ static int _enough(struct r10conf *conf, int previous, int ignore)
} while (first != 0);
has_enough = 1;
out:
- rcu_read_unlock();
return has_enough;
}
@@ -2072,8 +2024,7 @@ static void print_conf(struct r10conf *conf)
pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
conf->geo.raid_disks);
- /* This is only called with ->reconfix_mutex held, so
- * rcu protection of rdev is not needed */
+ lockdep_assert_held(&conf->mddev->reconfig_mutex);
for (i = 0; i < conf->geo.raid_disks; i++) {
rdev = conf->mirrors[i].rdev;
if (rdev)
@@ -2190,7 +2141,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
err = 0;
if (rdev->saved_raid_disk != mirror)
conf->fullsync = 1;
- rcu_assign_pointer(p->rdev, rdev);
+ WRITE_ONCE(p->rdev, rdev);
break;
}
@@ -2204,7 +2155,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
conf->fullsync = 1;
- rcu_assign_pointer(p->replacement, rdev);
+ WRITE_ONCE(p->replacement, rdev);
}
print_conf(conf);
@@ -2246,15 +2197,12 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
err = -EBUSY;
goto abort;
}
- *rdevp = NULL;
+ WRITE_ONCE(*rdevp, NULL);
if (p->replacement) {
/* We must have just cleared 'rdev' */
- p->rdev = p->replacement;
+ WRITE_ONCE(p->rdev, p->replacement);
clear_bit(Replacement, &p->replacement->flags);
- smp_mb(); /* Make sure other CPUs may see both as identical
- * but will never see neither -- if they are careful.
- */
- p->replacement = NULL;
+ WRITE_ONCE(p->replacement, NULL);
}
clear_bit(WantReplacement, &rdev->flags);
@@ -2646,42 +2594,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
}
}
-/*
- * Used by fix_read_error() to decay the per rdev read_errors.
- * We halve the read error count for every hour that has elapsed
- * since the last recorded read error.
- *
- */
-static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
-{
- long cur_time_mon;
- unsigned long hours_since_last;
- unsigned int read_errors = atomic_read(&rdev->read_errors);
-
- cur_time_mon = ktime_get_seconds();
-
- if (rdev->last_read_error == 0) {
- /* first time we've seen a read error */
- rdev->last_read_error = cur_time_mon;
- return;
- }
-
- hours_since_last = (long)(cur_time_mon -
- rdev->last_read_error) / 3600;
-
- rdev->last_read_error = cur_time_mon;
-
- /*
- * if hours_since_last is > the number of bits in read_errors
- * just set read errors to 0. We do this to avoid
- * overflowing the shift of read_errors by hours_since_last.
- */
- if (hours_since_last >= 8 * sizeof(read_errors))
- atomic_set(&rdev->read_errors, 0);
- else
- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
-}
-
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
int sectors, struct page *page, enum req_op op)
{
@@ -2719,7 +2631,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
int sect = 0; /* Offset from r10_bio->sector */
int sectors = r10_bio->sectors, slot = r10_bio->read_slot;
struct md_rdev *rdev;
- int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
int d = r10_bio->devs[slot].devnum;
/* still own a reference to this rdev, so it cannot
@@ -2732,15 +2643,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
more fix_read_error() attempts */
return;
- check_decay_read_errors(mddev, rdev);
- atomic_inc(&rdev->read_errors);
- if (atomic_read(&rdev->read_errors) > max_read_errors) {
- pr_notice("md/raid10:%s: %pg: Raid device exceeded read_error threshold [cur %d:max %d]\n",
- mdname(mddev), rdev->bdev,
- atomic_read(&rdev->read_errors), max_read_errors);
- pr_notice("md/raid10:%s: %pg: Failing raid device\n",
- mdname(mddev), rdev->bdev);
- md_error(mddev, rdev);
+ if (exceed_read_errors(mddev, rdev)) {
r10_bio->devs[slot].bio = IO_BLOCKED;
return;
}
@@ -2754,20 +2657,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (s > (PAGE_SIZE>>9))
s = PAGE_SIZE >> 9;
- rcu_read_lock();
do {
sector_t first_bad;
int bad_sectors;
d = r10_bio->devs[sl].devnum;
- rdev = rcu_dereference(conf->mirrors[d].rdev);
+ rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
&first_bad, &bad_sectors) == 0) {
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
success = sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
@@ -2775,7 +2676,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
conf->tmppage,
REQ_OP_READ, false);
rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
if (success)
break;
}
@@ -2783,7 +2683,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (sl == conf->copies)
sl = 0;
} while (sl != slot);
- rcu_read_unlock();
if (!success) {
/* Cannot read from anywhere, just mark the block
@@ -2807,20 +2706,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
start = sl;
/* write it back and re-read */
- rcu_read_lock();
while (sl != slot) {
if (sl==0)
sl = conf->copies;
sl--;
d = r10_bio->devs[sl].devnum;
- rdev = rcu_dereference(conf->mirrors[d].rdev);
+ rdev = conf->mirrors[d].rdev;
if (!rdev ||
test_bit(Faulty, &rdev->flags) ||
!test_bit(In_sync, &rdev->flags))
continue;
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
@@ -2839,7 +2736,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
rdev->bdev);
}
rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
}
sl = start;
while (sl != slot) {
@@ -2847,14 +2743,13 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
sl = conf->copies;
sl--;
d = r10_bio->devs[sl].devnum;
- rdev = rcu_dereference(conf->mirrors[d].rdev);
+ rdev = conf->mirrors[d].rdev;
if (!rdev ||
test_bit(Faulty, &rdev->flags) ||
!test_bit(In_sync, &rdev->flags))
continue;
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
@@ -2882,9 +2777,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
}
rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
}
- rcu_read_unlock();
sectors -= s;
sect += s;
@@ -3358,14 +3251,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Completed a full sync so the replacements
* are now fully recovered.
*/
- rcu_read_lock();
for (i = 0; i < conf->geo.raid_disks; i++) {
struct md_rdev *rdev =
- rcu_dereference(conf->mirrors[i].replacement);
+ conf->mirrors[i].replacement;
+
if (rdev)
rdev->recovery_offset = MaxSector;
}
- rcu_read_unlock();
}
conf->fullsync = 0;
}
@@ -3446,9 +3338,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace;
- rcu_read_lock();
- mrdev = rcu_dereference(mirror->rdev);
- mreplace = rcu_dereference(mirror->replacement);
+ mrdev = mirror->rdev;
+ mreplace = mirror->replacement;
if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
test_bit(In_sync, &mrdev->flags)))
@@ -3456,22 +3347,18 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (mreplace && test_bit(Faulty, &mreplace->flags))
mreplace = NULL;
- if (!mrdev && !mreplace) {
- rcu_read_unlock();
+ if (!mrdev && !mreplace)
continue;
- }
still_degraded = 0;
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
- if (sect >= mddev->resync_max_sectors) {
+ if (sect >= mddev->resync_max_sectors)
/* last stripe is not complete - don't
* try to recover this sector.
*/
- rcu_read_unlock();
continue;
- }
/* Unless we are doing a full sync, or a replacement
* we only need to recover the block if it is set in
* the bitmap
@@ -3487,14 +3374,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* that there will never be anything to do here
*/
chunks_skipped = -1;
- rcu_read_unlock();
continue;
}
if (mrdev)
atomic_inc(&mrdev->nr_pending);
if (mreplace)
atomic_inc(&mreplace->nr_pending);
- rcu_read_unlock();
r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0;
@@ -3513,10 +3398,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to check if the array will still be
* degraded
*/
- rcu_read_lock();
for (j = 0; j < conf->geo.raid_disks; j++) {
- struct md_rdev *rdev = rcu_dereference(
- conf->mirrors[j].rdev);
+ struct md_rdev *rdev = conf->mirrors[j].rdev;
+
if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
still_degraded = 1;
break;
@@ -3531,8 +3415,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
int k;
int d = r10_bio->devs[j].devnum;
sector_t from_addr, to_addr;
- struct md_rdev *rdev =
- rcu_dereference(conf->mirrors[d].rdev);
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
sector_t sector, first_bad;
int bad_sectors;
if (!rdev ||
@@ -3611,7 +3494,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_inc(&r10_bio->remaining);
break;
}
- rcu_read_unlock();
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
@@ -3738,12 +3620,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[i].bio;
bio->bi_status = BLK_STS_IOERR;
- rcu_read_lock();
- rdev = rcu_dereference(conf->mirrors[d].rdev);
- if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
- rcu_read_unlock();
+ rdev = conf->mirrors[d].rdev;
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags))
continue;
- }
+
sector = r10_bio->devs[i].addr;
if (is_badblock(rdev, sector, max_sync,
&first_bad, &bad_sectors)) {
@@ -3753,7 +3633,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bad_sectors -= (sector - first_bad);
if (max_sync > bad_sectors)
max_sync = bad_sectors;
- rcu_read_unlock();
continue;
}
}
@@ -3769,11 +3648,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio_set_dev(bio, rdev->bdev);
count++;
- rdev = rcu_dereference(conf->mirrors[d].replacement);
- if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
- rcu_read_unlock();
+ rdev = conf->mirrors[d].replacement;
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags))
continue;
- }
+
atomic_inc(&rdev->nr_pending);
/* Need to set up for writing to the replacement */
@@ -3790,7 +3668,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_set_dev(bio, rdev->bdev);
count++;
- rcu_read_unlock();
}
if (count < 2) {
@@ -4496,11 +4373,11 @@ static int calc_degraded(struct r10conf *conf)
int degraded, degraded2;
int i;
- rcu_read_lock();
degraded = 0;
/* 'prev' section first */
for (i = 0; i < conf->prev.raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
+
if (!rdev || test_bit(Faulty, &rdev->flags))
degraded++;
else if (!test_bit(In_sync, &rdev->flags))
@@ -4510,13 +4387,12 @@ static int calc_degraded(struct r10conf *conf)
*/
degraded++;
}
- rcu_read_unlock();
if (conf->geo.raid_disks == conf->prev.raid_disks)
return degraded;
- rcu_read_lock();
degraded2 = 0;
for (i = 0; i < conf->geo.raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
+
if (!rdev || test_bit(Faulty, &rdev->flags))
degraded2++;
else if (!test_bit(In_sync, &rdev->flags)) {
@@ -4529,7 +4405,6 @@ static int calc_degraded(struct r10conf *conf)
degraded2++;
}
}
- rcu_read_unlock();
if (degraded2 > degraded)
return degraded2;
return degraded;
@@ -4953,16 +4828,15 @@ read_more:
blist = read_bio;
read_bio->bi_next = NULL;
- rcu_read_lock();
for (s = 0; s < conf->copies*2; s++) {
struct bio *b;
int d = r10_bio->devs[s/2].devnum;
struct md_rdev *rdev2;
if (s&1) {
- rdev2 = rcu_dereference(conf->mirrors[d].replacement);
+ rdev2 = conf->mirrors[d].replacement;
b = r10_bio->devs[s/2].repl_bio;
} else {
- rdev2 = rcu_dereference(conf->mirrors[d].rdev);
+ rdev2 = conf->mirrors[d].rdev;
b = r10_bio->devs[s/2].bio;
}
if (!rdev2 || test_bit(Faulty, &rdev2->flags))
@@ -4996,7 +4870,6 @@ read_more:
sector_nr += len >> 9;
nr_sectors += len >> 9;
}
- rcu_read_unlock();
r10_bio->sectors = nr_sectors;
/* Now submit the read */
@@ -5049,20 +4922,17 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
struct bio *b;
int d = r10_bio->devs[s/2].devnum;
struct md_rdev *rdev;
- rcu_read_lock();
if (s&1) {
- rdev = rcu_dereference(conf->mirrors[d].replacement);
+ rdev = conf->mirrors[d].replacement;
b = r10_bio->devs[s/2].repl_bio;
} else {
- rdev = rcu_dereference(conf->mirrors[d].rdev);
+ rdev = conf->mirrors[d].rdev;
b = r10_bio->devs[s/2].bio;
}
- if (!rdev || test_bit(Faulty, &rdev->flags)) {
- rcu_read_unlock();
+ if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
- }
+
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
@@ -5133,10 +5003,9 @@ static int handle_reshape_read_error(struct mddev *mddev,
if (s > (PAGE_SIZE >> 9))
s = PAGE_SIZE >> 9;
- rcu_read_lock();
while (!success) {
int d = r10b->devs[slot].devnum;
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
sector_t addr;
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags) ||
@@ -5145,14 +5014,12 @@ static int handle_reshape_read_error(struct mddev *mddev,
addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
success = sync_page_io(rdev,
addr,
s << 9,
pages[idx],
REQ_OP_READ, false);
rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
if (success)
break;
failed:
@@ -5162,7 +5029,6 @@ static int handle_reshape_read_error(struct mddev *mddev,
if (slot == first_slot)
break;
}
- rcu_read_unlock();
if (!success) {
/* couldn't read this block, must give up */
set_bit(MD_RECOVERY_INTR,
@@ -5188,12 +5054,8 @@ static void end_reshape_write(struct bio *bio)
struct md_rdev *rdev = NULL;
d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
- if (repl)
- rdev = conf->mirrors[d].replacement;
- if (!rdev) {
- smp_mb();
- rdev = conf->mirrors[d].rdev;
- }
+ rdev = repl ? conf->mirrors[d].replacement :
+ conf->mirrors[d].rdev;
if (bio->bi_status) {
/* FIXME should record badblock */
@@ -5228,18 +5090,16 @@ static void raid10_finish_reshape(struct mddev *mddev)
mddev->resync_max_sectors = mddev->array_sectors;
} else {
int d;
- rcu_read_lock();
for (d = conf->geo.raid_disks ;
d < conf->geo.raid_disks - mddev->delta_disks;
d++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
if (rdev)
clear_bit(In_sync, &rdev->flags);
- rdev = rcu_dereference(conf->mirrors[d].replacement);
+ rdev = conf->mirrors[d].replacement;
if (rdev)
clear_bit(In_sync, &rdev->flags);
}
- rcu_read_unlock();
}
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 6157f5beb..874874fe4 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1890,28 +1890,22 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
continue;
/* in case device is broken */
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[disk_index].rdev);
+ rdev = conf->disks[disk_index].rdev;
if (rdev) {
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
sync_page_io(rdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE,
false);
rdev_dec_pending(rdev, rdev->mddev);
- rcu_read_lock();
}
- rrdev = rcu_dereference(conf->disks[disk_index].replacement);
+ rrdev = conf->disks[disk_index].replacement;
if (rrdev) {
atomic_inc(&rrdev->nr_pending);
- rcu_read_unlock();
sync_page_io(rrdev, sh->sector, PAGE_SIZE,
sh->dev[disk_index].page, REQ_OP_WRITE,
false);
rdev_dec_pending(rrdev, rrdev->mddev);
- rcu_read_lock();
}
- rcu_read_unlock();
}
ctx->data_parity_stripes++;
out:
@@ -2948,7 +2942,6 @@ bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
if (!log)
return false;
- WARN_ON_ONCE(!rcu_read_lock_held());
tree_index = r5c_tree_index(conf, sect);
slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
return slot != NULL;
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index eaea57aee..da4ba736c 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -620,11 +620,9 @@ static void ppl_do_flush(struct ppl_io_unit *io)
struct md_rdev *rdev;
struct block_device *bdev = NULL;
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
if (rdev && !test_bit(Faulty, &rdev->flags))
bdev = rdev->bdev;
- rcu_read_unlock();
if (bdev) {
struct bio *bio;
@@ -882,9 +880,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)r_sector, dd_idx,
(unsigned long long)sector);
- /* Array has not started so rcu dereference is safe */
- rdev = rcu_dereference_protected(
- conf->disks[dd_idx].rdev, 1);
+ rdev = conf->disks[dd_idx].rdev;
if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
sector >= rdev->recovery_offset)) {
pr_debug("%s:%*s data member disk %d missing\n",
@@ -936,9 +932,7 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
0, &disk, &sh);
BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
- /* Array has not started so rcu dereference is safe */
- parity_rdev = rcu_dereference_protected(
- conf->disks[sh.pd_idx].rdev, 1);
+ parity_rdev = conf->disks[sh.pd_idx].rdev;
BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
pr_debug("%s:%*s write parity at sector %llu, disk %pg\n",
@@ -1404,9 +1398,7 @@ int ppl_init_log(struct r5conf *conf)
for (i = 0; i < ppl_conf->count; i++) {
struct ppl_log *log = &ppl_conf->child_logs[i];
- /* Array has not started so rcu dereference is safe */
- struct md_rdev *rdev =
- rcu_dereference_protected(conf->disks[i].rdev, 1);
+ struct md_rdev *rdev = conf->disks[i].rdev;
mutex_init(&log->io_mutex);
spin_lock_init(&log->io_list_lock);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e1d8b5199..69452e439 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -694,12 +694,12 @@ int raid5_calc_degraded(struct r5conf *conf)
int degraded, degraded2;
int i;
- rcu_read_lock();
degraded = 0;
for (i = 0; i < conf->previous_raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
+
if (rdev && test_bit(Faulty, &rdev->flags))
- rdev = rcu_dereference(conf->disks[i].replacement);
+ rdev = READ_ONCE(conf->disks[i].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags))
degraded++;
else if (test_bit(In_sync, &rdev->flags))
@@ -717,15 +717,14 @@ int raid5_calc_degraded(struct r5conf *conf)
if (conf->raid_disks >= conf->previous_raid_disks)
degraded++;
}
- rcu_read_unlock();
if (conf->raid_disks == conf->previous_raid_disks)
return degraded;
- rcu_read_lock();
degraded2 = 0;
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
+
if (rdev && test_bit(Faulty, &rdev->flags))
- rdev = rcu_dereference(conf->disks[i].replacement);
+ rdev = READ_ONCE(conf->disks[i].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags))
degraded2++;
else if (test_bit(In_sync, &rdev->flags))
@@ -739,7 +738,6 @@ int raid5_calc_degraded(struct r5conf *conf)
if (conf->raid_disks <= conf->previous_raid_disks)
degraded2++;
}
- rcu_read_unlock();
if (degraded2 > degraded)
return degraded2;
return degraded;
@@ -1185,14 +1183,8 @@ again:
bi = &dev->req;
rbi = &dev->rreq; /* For writing to replacement */
- rcu_read_lock();
- rrdev = rcu_dereference(conf->disks[i].replacement);
- smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (!rdev) {
- rdev = rrdev;
- rrdev = NULL;
- }
+ rdev = conf->disks[i].rdev;
+ rrdev = conf->disks[i].replacement;
if (op_is_write(op)) {
if (replace_only)
rdev = NULL;
@@ -1213,7 +1205,6 @@ again:
rrdev = NULL;
if (rrdev)
atomic_inc(&rrdev->nr_pending);
- rcu_read_unlock();
/* We have already checked bad blocks for reads. Now
* need to check for writes. We never accept write errors
@@ -2732,28 +2723,6 @@ static void shrink_stripes(struct r5conf *conf)
conf->slab_cache = NULL;
}
-/*
- * This helper wraps rcu_dereference_protected() and can be used when
- * it is known that the nr_pending of the rdev is elevated.
- */
-static struct md_rdev *rdev_pend_deref(struct md_rdev __rcu *rdev)
-{
- return rcu_dereference_protected(rdev,
- atomic_read(&rcu_access_pointer(rdev)->nr_pending));
-}
-
-/*
- * This helper wraps rcu_dereference_protected() and should be used
- * when it is known that the mddev_lock() is held. This is safe
- * seeing raid5_remove_disk() has the same lock held.
- */
-static struct md_rdev *rdev_mdlock_deref(struct mddev *mddev,
- struct md_rdev __rcu *rdev)
-{
- return rcu_dereference_protected(rdev,
- lockdep_is_held(&mddev->reconfig_mutex));
-}
-
static void raid5_end_read_request(struct bio * bi)
{
struct stripe_head *sh = bi->bi_private;
@@ -2779,9 +2748,9 @@ static void raid5_end_read_request(struct bio * bi)
* In that case it moved down to 'rdev'.
* rdev is not removed until all requests are finished.
*/
- rdev = rdev_pend_deref(conf->disks[i].replacement);
+ rdev = conf->disks[i].replacement;
if (!rdev)
- rdev = rdev_pend_deref(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
if (use_new_offset(conf, sh))
s = sh->sector + rdev->new_data_offset;
@@ -2894,11 +2863,11 @@ static void raid5_end_write_request(struct bio *bi)
for (i = 0 ; i < disks; i++) {
if (bi == &sh->dev[i].req) {
- rdev = rdev_pend_deref(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
break;
}
if (bi == &sh->dev[i].rreq) {
- rdev = rdev_pend_deref(conf->disks[i].replacement);
+ rdev = conf->disks[i].replacement;
if (rdev)
replacement = 1;
else
@@ -2906,7 +2875,7 @@ static void raid5_end_write_request(struct bio *bi)
* replaced it. rdev is not removed
* until all requests are finished.
*/
- rdev = rdev_pend_deref(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
break;
}
}
@@ -3668,15 +3637,13 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
int bitmap_end = 0;
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- struct md_rdev *rdev;
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
+ struct md_rdev *rdev = conf->disks[i].rdev;
+
if (rdev && test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
atomic_inc(&rdev->nr_pending);
else
rdev = NULL;
- rcu_read_unlock();
if (rdev) {
if (!rdev_set_badblocks(
rdev,
@@ -3794,16 +3761,17 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
/* During recovery devices cannot be removed, so
* locking and refcounting of rdevs is not needed
*/
- rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+ struct md_rdev *rdev = conf->disks[i].rdev;
+
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& !rdev_set_badblocks(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0))
abort = 1;
- rdev = rcu_dereference(conf->disks[i].replacement);
+ rdev = conf->disks[i].replacement;
+
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
@@ -3811,7 +3779,6 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
RAID5_STRIPE_SECTORS(conf), 0))
abort = 1;
}
- rcu_read_unlock();
if (abort)
conf->recovery_disabled =
conf->mddev->recovery_disabled;
@@ -3824,15 +3791,13 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
struct md_rdev *rdev;
int rv = 0;
- rcu_read_lock();
- rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement);
+ rdev = sh->raid_conf->disks[disk_idx].replacement;
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& (rdev->recovery_offset <= sh->sector
|| rdev->mddev->recovery_cp <= sh->sector))
rv = 1;
- rcu_read_unlock();
return rv;
}
@@ -4709,7 +4674,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
s->log_failed = r5l_log_disk_error(conf);
/* Now to look around and see what can be done */
- rcu_read_lock();
for (i=disks; i--; ) {
struct md_rdev *rdev;
sector_t first_bad;
@@ -4754,7 +4718,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
/* Prefer to use the replacement for reads, but only
* if it is recovered enough and has no bad blocks.
*/
- rdev = rcu_dereference(conf->disks[i].replacement);
+ rdev = conf->disks[i].replacement;
if (rdev && !test_bit(Faulty, &rdev->flags) &&
rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
!is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
@@ -4765,7 +4729,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(R5_NeedReplace, &dev->flags);
else
clear_bit(R5_NeedReplace, &dev->flags);
- rdev = rcu_dereference(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
clear_bit(R5_ReadRepl, &dev->flags);
}
if (rdev && test_bit(Faulty, &rdev->flags))
@@ -4812,8 +4776,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_WriteError, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
- struct md_rdev *rdev2 = rcu_dereference(
- conf->disks[i].rdev);
+ struct md_rdev *rdev2 = conf->disks[i].rdev;
+
if (rdev2 == rdev)
clear_bit(R5_Insync, &dev->flags);
if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
@@ -4825,8 +4789,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_MadeGood, &dev->flags)) {
/* This flag does not apply to '.replacement'
* only to .rdev, so make sure to check that*/
- struct md_rdev *rdev2 = rcu_dereference(
- conf->disks[i].rdev);
+ struct md_rdev *rdev2 = conf->disks[i].rdev;
+
if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
s->handle_bad_blocks = 1;
atomic_inc(&rdev2->nr_pending);
@@ -4834,8 +4798,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
clear_bit(R5_MadeGood, &dev->flags);
}
if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
- struct md_rdev *rdev2 = rcu_dereference(
- conf->disks[i].replacement);
+ struct md_rdev *rdev2 = conf->disks[i].replacement;
+
if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
s->handle_bad_blocks = 1;
atomic_inc(&rdev2->nr_pending);
@@ -4856,8 +4820,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
else if (!rdev) {
- rdev = rcu_dereference(
- conf->disks[i].replacement);
+ rdev = conf->disks[i].replacement;
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
}
@@ -4884,7 +4847,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
else
s->replacing = 1;
}
- rcu_read_unlock();
}
/*
@@ -5341,23 +5303,23 @@ finish:
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
/* We own a safe reference to the rdev */
- rdev = rdev_pend_deref(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
if (!rdev_set_badblocks(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0))
md_error(conf->mddev, rdev);
rdev_dec_pending(rdev, conf->mddev);
}
if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
- rdev = rdev_pend_deref(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
rdev_clear_badblocks(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0);
rdev_dec_pending(rdev, conf->mddev);
}
if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
- rdev = rdev_pend_deref(conf->disks[i].replacement);
+ rdev = conf->disks[i].replacement;
if (!rdev)
/* rdev have been moved down */
- rdev = rdev_pend_deref(conf->disks[i].rdev);
+ rdev = conf->disks[i].rdev;
rdev_clear_badblocks(rdev, sh->sector,
RAID5_STRIPE_SECTORS(conf), 0);
rdev_dec_pending(rdev, conf->mddev);
@@ -5516,24 +5478,22 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
&dd_idx, NULL);
end_sector = sector + bio_sectors(raid_bio);
- rcu_read_lock();
if (r5c_big_stripe_cached(conf, sector))
- goto out_rcu_unlock;
+ return 0;
- rdev = rcu_dereference(conf->disks[dd_idx].replacement);
+ rdev = conf->disks[dd_idx].replacement;
if (!rdev || test_bit(Faulty, &rdev->flags) ||
rdev->recovery_offset < end_sector) {
- rdev = rcu_dereference(conf->disks[dd_idx].rdev);
+ rdev = conf->disks[dd_idx].rdev;
if (!rdev)
- goto out_rcu_unlock;
+ return 0;
if (test_bit(Faulty, &rdev->flags) ||
!(test_bit(In_sync, &rdev->flags) ||
rdev->recovery_offset >= end_sector))
- goto out_rcu_unlock;
+ return 0;
}
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
&bad_sectors)) {
@@ -5577,10 +5537,6 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
raid_bio->bi_iter.bi_sector);
submit_bio_noacct(align_bio);
return 1;
-
-out_rcu_unlock:
- rcu_read_unlock();
- return 0;
}
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
@@ -6595,14 +6551,12 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
* Note in case of > 1 drive failures it's possible we're rebuilding
* one drive while leaving another faulty drive in array.
*/
- rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+ struct md_rdev *rdev = conf->disks[i].rdev;
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
}
- rcu_read_unlock();
md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
@@ -7926,18 +7880,10 @@ static int raid5_run(struct mddev *mddev)
for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
i++) {
- rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev);
- if (!rdev && conf->disks[i].replacement) {
- /* The replacement is all we have yet */
- rdev = rdev_mdlock_deref(mddev,
- conf->disks[i].replacement);
- conf->disks[i].replacement = NULL;
- clear_bit(Replacement, &rdev->flags);
- rcu_assign_pointer(conf->disks[i].rdev, rdev);
- }
+ rdev = conf->disks[i].rdev;
if (!rdev)
continue;
- if (rcu_access_pointer(conf->disks[i].replacement) &&
+ if (conf->disks[i].replacement &&
conf->reshape_progress != MaxSector) {
/* replacements and reshape simply do not mix. */
pr_warn("md: cannot handle concurrent replacement and reshape.\n");
@@ -8117,15 +8063,16 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev)
struct r5conf *conf = mddev->private;
int i;
+ lockdep_assert_held(&mddev->lock);
+
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
conf->chunk_sectors / 2, mddev->layout);
seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
- rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
+
seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
- rcu_read_unlock();
seq_printf (seq, "]");
}
@@ -8163,9 +8110,8 @@ static int raid5_spare_active(struct mddev *mddev)
unsigned long flags;
for (i = 0; i < conf->raid_disks; i++) {
- rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev);
- replacement = rdev_mdlock_deref(mddev,
- conf->disks[i].replacement);
+ rdev = conf->disks[i].rdev;
+ replacement = conf->disks[i].replacement;
if (replacement
&& replacement->recovery_offset == MaxSector
&& !test_bit(Faulty, &replacement->flags)
@@ -8204,7 +8150,7 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct r5conf *conf = mddev->private;
int err = 0;
int number = rdev->raid_disk;
- struct md_rdev __rcu **rdevp;
+ struct md_rdev **rdevp;
struct disk_info *p;
struct md_rdev *tmp;
@@ -8227,9 +8173,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (unlikely(number >= conf->pool_size))
return 0;
p = conf->disks + number;
- if (rdev == rcu_access_pointer(p->rdev))
+ if (rdev == p->rdev)
rdevp = &p->rdev;
- else if (rdev == rcu_access_pointer(p->replacement))
+ else if (rdev == p->replacement)
rdevp = &p->replacement;
else
return 0;
@@ -8249,28 +8195,24 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (!test_bit(Faulty, &rdev->flags) &&
mddev->recovery_disabled != conf->recovery_disabled &&
!has_failed(conf) &&
- (!rcu_access_pointer(p->replacement) ||
- rcu_access_pointer(p->replacement) == rdev) &&
+ (!p->replacement || p->replacement == rdev) &&
number < conf->raid_disks) {
err = -EBUSY;
goto abort;
}
- *rdevp = NULL;
+ WRITE_ONCE(*rdevp, NULL);
if (!err) {
err = log_modify(conf, rdev, false);
if (err)
goto abort;
}
- tmp = rcu_access_pointer(p->replacement);
+ tmp = p->replacement;
if (tmp) {
/* We must have just cleared 'rdev' */
- rcu_assign_pointer(p->rdev, tmp);
+ WRITE_ONCE(p->rdev, tmp);
clear_bit(Replacement, &tmp->flags);
- smp_mb(); /* Make sure other CPUs may see both as identical
- * but will never see neither - if they are careful
- */
- rcu_assign_pointer(p->replacement, NULL);
+ WRITE_ONCE(p->replacement, NULL);
if (!err)
err = log_modify(conf, tmp, true);
@@ -8338,7 +8280,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = disk;
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
- rcu_assign_pointer(p->rdev, rdev);
+ WRITE_ONCE(p->rdev, rdev);
err = log_modify(conf, rdev, true);
@@ -8347,7 +8289,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
}
for (disk = first; disk <= last; disk++) {
p = conf->disks + disk;
- tmp = rdev_mdlock_deref(mddev, p->rdev);
+ tmp = p->rdev;
if (test_bit(WantReplacement, &tmp->flags) &&
mddev->reshape_position == MaxSector &&
p->replacement == NULL) {
@@ -8356,7 +8298,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rdev->raid_disk = disk;
err = 0;
conf->fullsync = 1;
- rcu_assign_pointer(p->replacement, rdev);
+ WRITE_ONCE(p->replacement, rdev);
break;
}
}
@@ -8489,7 +8431,7 @@ static int raid5_start_reshape(struct mddev *mddev)
if (mddev->recovery_cp < MaxSector)
return -EBUSY;
for (i = 0; i < conf->raid_disks; i++)
- if (rdev_mdlock_deref(mddev, conf->disks[i].replacement))
+ if (conf->disks[i].replacement)
return -EBUSY;
rdev_for_each(rdev, mddev) {
@@ -8639,12 +8581,10 @@ static void raid5_finish_reshape(struct mddev *mddev)
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
d++) {
- rdev = rdev_mdlock_deref(mddev,
- conf->disks[d].rdev);
+ rdev = conf->disks[d].rdev;
if (rdev)
clear_bit(In_sync, &rdev->flags);
- rdev = rdev_mdlock_deref(mddev,
- conf->disks[d].replacement);
+ rdev = conf->disks[d].replacement;
if (rdev)
clear_bit(In_sync, &rdev->flags);
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 22bea20ec..9b5a7dc3f 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -473,8 +473,8 @@ enum {
*/
struct disk_info {
- struct md_rdev __rcu *rdev;
- struct md_rdev __rcu *replacement;
+ struct md_rdev *rdev;
+ struct md_rdev *replacement;
struct page *extra_page; /* extra page to use in prexor */
};
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 6bb49bb3f..559a172eb 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -511,7 +511,7 @@ int cec_thread_func(void *_adap)
pr_warn("cec-%s: transmit timed out\n", adap->name);
}
adap->transmit_in_progress = false;
- adap->tx_timeouts++;
+ adap->tx_timeout_cnt++;
goto unlock;
}
@@ -625,6 +625,33 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
msg->tx_low_drive_cnt += low_drive_cnt;
msg->tx_error_cnt += error_cnt;
+ adap->tx_arb_lost_cnt += arb_lost_cnt;
+ adap->tx_low_drive_cnt += low_drive_cnt;
+ adap->tx_error_cnt += error_cnt;
+
+ /*
+ * Low Drive transmission errors should really not happen for
+ * well-behaved CEC devices and proper HDMI cables.
+ *
+ * Ditto for the 'Error' status.
+ *
+ * For the first few times that this happens, log this.
+ * Stop logging after that, since that will not add any more
+ * useful information and instead it will just flood the kernel log.
+ */
+ if (done && adap->tx_low_drive_log_cnt < 8 && msg->tx_low_drive_cnt) {
+ adap->tx_low_drive_log_cnt++;
+ dprintk(0, "low drive counter: %u (seq %u: %*ph)\n",
+ msg->tx_low_drive_cnt, msg->sequence,
+ msg->len, msg->msg);
+ }
+ if (done && adap->tx_error_log_cnt < 8 && msg->tx_error_cnt) {
+ adap->tx_error_log_cnt++;
+ dprintk(0, "error counter: %u (seq %u: %*ph)\n",
+ msg->tx_error_cnt, msg->sequence,
+ msg->len, msg->msg);
+ }
+
/* Mark that we're done with this transmit */
adap->transmitting = NULL;
@@ -1124,20 +1151,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (valid_la && min_len) {
/* These messages have special length requirements */
switch (cmd) {
- case CEC_MSG_TIMER_STATUS:
- if (msg->msg[2] & 0x10) {
- switch (msg->msg[2] & 0xf) {
- case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
- case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
- if (msg->len < 5)
- valid_la = false;
- break;
- }
- } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
- if (msg->len < 5)
- valid_la = false;
- }
- break;
case CEC_MSG_RECORD_ON:
switch (msg->msg[2]) {
case CEC_OP_RECORD_SRC_OWN:
@@ -1607,6 +1620,8 @@ int cec_adap_enable(struct cec_adapter *adap)
if (enable) {
adap->last_initiator = 0xff;
adap->transmit_in_progress = false;
+ adap->tx_low_drive_log_cnt = 0;
+ adap->tx_error_log_cnt = 0;
ret = adap->ops->adap_enable(adap, true);
if (!ret) {
/*
@@ -2265,10 +2280,25 @@ int cec_adap_status(struct seq_file *file, void *priv)
if (adap->monitor_pin_cnt)
seq_printf(file, "file handles in Monitor Pin mode: %u\n",
adap->monitor_pin_cnt);
- if (adap->tx_timeouts) {
- seq_printf(file, "transmit timeouts: %u\n",
- adap->tx_timeouts);
- adap->tx_timeouts = 0;
+ if (adap->tx_timeout_cnt) {
+ seq_printf(file, "transmit timeout count: %u\n",
+ adap->tx_timeout_cnt);
+ adap->tx_timeout_cnt = 0;
+ }
+ if (adap->tx_low_drive_cnt) {
+ seq_printf(file, "transmit low drive count: %u\n",
+ adap->tx_low_drive_cnt);
+ adap->tx_low_drive_cnt = 0;
+ }
+ if (adap->tx_arb_lost_cnt) {
+ seq_printf(file, "transmit arbitration lost count: %u\n",
+ adap->tx_arb_lost_cnt);
+ adap->tx_arb_lost_cnt = 0;
+ }
+ if (adap->tx_error_cnt) {
+ seq_printf(file, "transmit error count: %u\n",
+ adap->tx_error_cnt);
+ adap->tx_error_cnt = 0;
}
data = adap->transmitting;
if (data)
diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
index 42dde3f0d..52ec0ba4b 100644
--- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
+++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c
@@ -324,6 +324,8 @@ static const struct cec_dmi_match cec_dmi_match_table[] = {
{ "Google", "Boxy", "0000:00:02.0", port_d_conns },
/* Google Taranza */
{ "Google", "Taranza", "0000:00:02.0", port_db_conns },
+ /* Google Dexi */
+ { "Google", "Dexi", "0000:00:02.0", port_db_conns },
};
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 792144593..a7047e548 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -387,7 +387,7 @@ int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev,
q->gfp_flags = __GFP_DMA32;
q->buf_struct_size = sizeof(struct saa7146_buf);
q->lock = &dev->v4l2_lock;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err)
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 19f80ff49..b6bf8f232 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -31,6 +31,16 @@
#include <trace/events/vb2.h>
+#define PLANE_INDEX_BITS 3
+#define PLANE_INDEX_SHIFT (PAGE_SHIFT + PLANE_INDEX_BITS)
+#define PLANE_INDEX_MASK (BIT_MASK(PLANE_INDEX_BITS) - 1)
+#define MAX_BUFFER_INDEX BIT_MASK(30 - PLANE_INDEX_SHIFT)
+#define BUFFER_INDEX_MASK (MAX_BUFFER_INDEX - 1)
+
+#if BIT(PLANE_INDEX_BITS) != VIDEO_MAX_PLANES
+#error PLANE_INDEX_BITS order must be equal to VIDEO_MAX_PLANES
+#endif
+
static int debug;
module_param(debug, int, 0644);
@@ -356,23 +366,29 @@ static void __setup_offsets(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
unsigned int plane;
- unsigned long off = 0;
-
- if (vb->index) {
- struct vb2_buffer *prev = q->bufs[vb->index - 1];
- struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
+ unsigned long offset = 0;
- off = PAGE_ALIGN(p->m.offset + p->length);
- }
+ /*
+ * The offset "cookie" value has the following constraints:
+ * - a buffer can have up to 8 planes.
+ * - v4l2 mem2mem uses bit 30 to distinguish between
+ * OUTPUT (aka "source", bit 30 is 0) and
+ * CAPTURE (aka "destination", bit 30 is 1) buffers.
+ * - must be page aligned
+ * That led to this bit mapping when PAGE_SHIFT = 12:
+ * |30 |29 15|14 12|11 0|
+ * |DST_QUEUE_OFF_BASE|buffer index|plane index| 0 |
+ * where there are 15 bits to store the buffer index.
+ * Depending on PAGE_SHIFT value we can have fewer bits
+ * to store the buffer index.
+ */
+ offset = vb->index << PLANE_INDEX_SHIFT;
for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->planes[plane].m.offset = off;
+ vb->planes[plane].m.offset = offset + (plane << PAGE_SHIFT);
dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n",
- vb->index, plane, off);
-
- off += vb->planes[plane].length;
- off = PAGE_ALIGN(off);
+ vb->index, plane, offset);
}
}
@@ -397,6 +413,31 @@ static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
vb->skip_cache_sync_on_finish = 1;
}
+/**
+ * vb2_queue_add_buffer() - add a buffer to a queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @vb: pointer to &struct vb2_buffer to be added to the queue.
+ * @index: index where add vb2_buffer in the queue
+ */
+static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, unsigned int index)
+{
+ WARN_ON(index >= q->max_num_buffers || q->bufs[index] || vb->vb2_queue);
+
+ q->bufs[index] = vb;
+ vb->index = index;
+ vb->vb2_queue = q;
+}
+
+/**
+ * vb2_queue_remove_buffer() - remove a buffer from a queue
+ * @vb: pointer to &struct vb2_buffer to be removed from the queue.
+ */
+static void vb2_queue_remove_buffer(struct vb2_buffer *vb)
+{
+ vb->vb2_queue->bufs[vb->index] = NULL;
+ vb->vb2_queue = NULL;
+}
+
/*
* __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type)
* video buffer memory for all buffers/planes on the queue and initializes the
@@ -408,13 +449,17 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
unsigned int num_buffers, unsigned int num_planes,
const unsigned plane_sizes[VB2_MAX_PLANES])
{
+ unsigned int q_num_buffers = vb2_get_num_buffers(q);
unsigned int buffer, plane;
struct vb2_buffer *vb;
int ret;
- /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
+ /*
+ * Ensure that the number of already queue + the number of buffers already
+ * in the queue is below q->max_num_buffers
+ */
num_buffers = min_t(unsigned int, num_buffers,
- VB2_MAX_FRAME - q->num_buffers);
+ q->max_num_buffers - q_num_buffers);
for (buffer = 0; buffer < num_buffers; ++buffer) {
/* Allocate vb2 buffer structures */
@@ -425,9 +470,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
}
vb->state = VB2_BUF_STATE_DEQUEUED;
- vb->vb2_queue = q;
vb->num_planes = num_planes;
- vb->index = q->num_buffers + buffer;
vb->type = q->type;
vb->memory = memory;
init_buffer_cache_hints(q, vb);
@@ -435,9 +478,9 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
}
- call_void_bufop(q, init_buffer, vb);
- q->bufs[vb->index] = vb;
+ vb2_queue_add_buffer(q, vb, q_num_buffers + buffer);
+ call_void_bufop(q, init_buffer, vb);
/* Allocate video buffer memory for the MMAP type */
if (memory == VB2_MEMORY_MMAP) {
@@ -445,7 +488,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
if (ret) {
dprintk(q, 1, "failed allocating memory for buffer %d\n",
buffer);
- q->bufs[vb->index] = NULL;
+ vb2_queue_remove_buffer(vb);
kfree(vb);
break;
}
@@ -460,7 +503,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
dprintk(q, 1, "buffer %d %p initialization failed\n",
buffer, vb);
__vb2_buf_mem_free(vb);
- q->bufs[vb->index] = NULL;
+ vb2_queue_remove_buffer(vb);
kfree(vb);
break;
}
@@ -480,10 +523,11 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
{
unsigned int buffer;
struct vb2_buffer *vb;
+ unsigned int q_num_buffers = vb2_get_num_buffers(q);
- for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ for (buffer = q_num_buffers - buffers; buffer < q_num_buffers;
++buffer) {
- vb = q->bufs[buffer];
+ vb = vb2_get_buffer(q, buffer);
if (!vb)
continue;
@@ -505,13 +549,14 @@ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
{
unsigned int buffer;
+ unsigned int q_num_buffers = vb2_get_num_buffers(q);
lockdep_assert_held(&q->mmap_lock);
/* Call driver-provided cleanup function for each buffer, if provided */
- for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ for (buffer = q_num_buffers - buffers; buffer < q_num_buffers;
++buffer) {
- struct vb2_buffer *vb = q->bufs[buffer];
+ struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
if (vb && vb->planes[0].mem_priv)
call_void_vb_qop(vb, buf_cleanup, vb);
@@ -522,25 +567,26 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
#ifdef CONFIG_VIDEO_ADV_DEBUG
/*
- * Check that all the calls were balances during the life-time of this
- * queue. If not (or if the debug level is 1 or up), then dump the
- * counters to the kernel log.
+ * Check that all the calls were balanced during the life-time of this
+ * queue. If not then dump the counters to the kernel log.
*/
- if (q->num_buffers) {
+ if (q_num_buffers) {
bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
q->cnt_prepare_streaming != q->cnt_unprepare_streaming ||
q->cnt_wait_prepare != q->cnt_wait_finish;
- if (unbalanced || debug) {
- pr_info("counters for queue %p:%s\n", q,
- unbalanced ? " UNBALANCED!" : "");
- pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n",
- q->cnt_queue_setup, q->cnt_start_streaming,
- q->cnt_stop_streaming);
- pr_info(" prepare_streaming: %u unprepare_streaming: %u\n",
- q->cnt_prepare_streaming, q->cnt_unprepare_streaming);
- pr_info(" wait_prepare: %u wait_finish: %u\n",
- q->cnt_wait_prepare, q->cnt_wait_finish);
+ if (unbalanced) {
+ pr_info("unbalanced counters for queue %p:\n", q);
+ if (q->cnt_start_streaming != q->cnt_stop_streaming)
+ pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n",
+ q->cnt_queue_setup, q->cnt_start_streaming,
+ q->cnt_stop_streaming);
+ if (q->cnt_prepare_streaming != q->cnt_unprepare_streaming)
+ pr_info(" prepare_streaming: %u unprepare_streaming: %u\n",
+ q->cnt_prepare_streaming, q->cnt_unprepare_streaming);
+ if (q->cnt_wait_prepare != q->cnt_wait_finish)
+ pr_info(" wait_prepare: %u wait_finish: %u\n",
+ q->cnt_wait_prepare, q->cnt_wait_finish);
}
q->cnt_queue_setup = 0;
q->cnt_wait_prepare = 0;
@@ -550,53 +596,71 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
q->cnt_stop_streaming = 0;
q->cnt_unprepare_streaming = 0;
}
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
- struct vb2_buffer *vb = q->bufs[buffer];
- bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
- vb->cnt_mem_prepare != vb->cnt_mem_finish ||
- vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
- vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
- vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
- vb->cnt_buf_queue != vb->cnt_buf_done ||
- vb->cnt_buf_prepare != vb->cnt_buf_finish ||
- vb->cnt_buf_init != vb->cnt_buf_cleanup;
-
- if (unbalanced || debug) {
- pr_info(" counters for queue %p, buffer %d:%s\n",
- q, buffer, unbalanced ? " UNBALANCED!" : "");
- pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
- vb->cnt_buf_init, vb->cnt_buf_cleanup,
- vb->cnt_buf_prepare, vb->cnt_buf_finish);
- pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
- vb->cnt_buf_out_validate, vb->cnt_buf_queue,
- vb->cnt_buf_done, vb->cnt_buf_request_complete);
- pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
- vb->cnt_mem_alloc, vb->cnt_mem_put,
- vb->cnt_mem_prepare, vb->cnt_mem_finish,
- vb->cnt_mem_mmap);
- pr_info(" get_userptr: %u put_userptr: %u\n",
- vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
- pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
- vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
- vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
- pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
+ for (buffer = 0; buffer < vb2_get_num_buffers(q); buffer++) {
+ struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
+ bool unbalanced;
+
+ if (!vb)
+ continue;
+
+ unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
+ vb->cnt_mem_prepare != vb->cnt_mem_finish ||
+ vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
+ vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
+ vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
+ vb->cnt_buf_queue != vb->cnt_buf_done ||
+ vb->cnt_buf_prepare != vb->cnt_buf_finish ||
+ vb->cnt_buf_init != vb->cnt_buf_cleanup;
+
+ if (unbalanced) {
+ pr_info("unbalanced counters for queue %p, buffer %d:\n",
+ q, buffer);
+ if (vb->cnt_buf_init != vb->cnt_buf_cleanup)
+ pr_info(" buf_init: %u buf_cleanup: %u\n",
+ vb->cnt_buf_init, vb->cnt_buf_cleanup);
+ if (vb->cnt_buf_prepare != vb->cnt_buf_finish)
+ pr_info(" buf_prepare: %u buf_finish: %u\n",
+ vb->cnt_buf_prepare, vb->cnt_buf_finish);
+ if (vb->cnt_buf_queue != vb->cnt_buf_done)
+ pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
+ vb->cnt_buf_out_validate, vb->cnt_buf_queue,
+ vb->cnt_buf_done, vb->cnt_buf_request_complete);
+ if (vb->cnt_mem_alloc != vb->cnt_mem_put)
+ pr_info(" alloc: %u put: %u\n",
+ vb->cnt_mem_alloc, vb->cnt_mem_put);
+ if (vb->cnt_mem_prepare != vb->cnt_mem_finish)
+ pr_info(" prepare: %u finish: %u\n",
+ vb->cnt_mem_prepare, vb->cnt_mem_finish);
+ if (vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr)
+ pr_info(" get_userptr: %u put_userptr: %u\n",
+ vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
+ if (vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf)
+ pr_info(" attach_dmabuf: %u detach_dmabuf: %u\n",
+ vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf);
+ if (vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf)
+ pr_info(" map_dmabuf: %u unmap_dmabuf: %u\n",
+ vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
+ pr_info(" get_dmabuf: %u num_users: %u\n",
vb->cnt_mem_get_dmabuf,
- vb->cnt_mem_num_users,
- vb->cnt_mem_vaddr,
- vb->cnt_mem_cookie);
+ vb->cnt_mem_num_users);
}
}
#endif
/* Free vb2 buffers */
- for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ for (buffer = q_num_buffers - buffers; buffer < q_num_buffers;
++buffer) {
- kfree(q->bufs[buffer]);
- q->bufs[buffer] = NULL;
+ struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
+
+ if (!vb)
+ continue;
+
+ vb2_queue_remove_buffer(vb);
+ kfree(vb);
}
q->num_buffers -= buffers;
- if (!q->num_buffers) {
+ if (!vb2_get_num_buffers(q)) {
q->memory = VB2_MEMORY_UNKNOWN;
INIT_LIST_HEAD(&q->queued_list);
}
@@ -627,16 +691,21 @@ EXPORT_SYMBOL(vb2_buffer_in_use);
static bool __buffers_in_use(struct vb2_queue *q)
{
unsigned int buffer;
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
- if (vb2_buffer_in_use(q, q->bufs[buffer]))
+ for (buffer = 0; buffer < vb2_get_num_buffers(q); ++buffer) {
+ struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
+
+ if (!vb)
+ continue;
+
+ if (vb2_buffer_in_use(q, vb))
return true;
}
return false;
}
-void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
+void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb)
{
- call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
+ call_void_bufop(q, fill_user_buffer, vb, pb);
}
EXPORT_SYMBOL_GPL(vb2_core_querybuf);
@@ -748,10 +817,11 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int flags, unsigned int *count)
{
unsigned int num_buffers, allocated_buffers, num_planes = 0;
+ unsigned int q_num_bufs = vb2_get_num_buffers(q);
unsigned plane_sizes[VB2_MAX_PLANES] = { };
bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
unsigned int i;
- int ret;
+ int ret = 0;
if (q->streaming) {
dprintk(q, 1, "streaming active\n");
@@ -763,7 +833,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
return -EBUSY;
}
- if (*count == 0 || q->num_buffers != 0 ||
+ if (*count == 0 || q_num_bufs != 0 ||
(q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
!verify_coherency_flags(q, non_coherent_mem)) {
/*
@@ -781,7 +851,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* queued without ever calling STREAMON.
*/
__vb2_queue_cancel(q);
- __vb2_queue_free(q, q->num_buffers);
+ __vb2_queue_free(q, q_num_bufs);
mutex_unlock(&q->mmap_lock);
/*
@@ -795,17 +865,22 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
/*
* Make sure the requested values and current defaults are sane.
*/
- WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME);
- num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
- num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
+ num_buffers = max_t(unsigned int, *count, q->min_queued_buffers);
+ num_buffers = min_t(unsigned int, num_buffers, q->max_num_buffers);
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
/*
* Set this now to ensure that drivers see the correct q->memory value
* in the queue_setup op.
*/
mutex_lock(&q->mmap_lock);
+ if (!q->bufs)
+ q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL);
+ if (!q->bufs)
+ ret = -ENOMEM;
q->memory = memory;
mutex_unlock(&q->mmap_lock);
+ if (ret)
+ return ret;
set_queue_coherency(q, non_coherent_mem);
/*
@@ -842,7 +917,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* There is no point in continuing if we can't allocate the minimum
* number of buffers needed by this vb2_queue.
*/
- if (allocated_buffers < q->min_buffers_needed)
+ if (allocated_buffers < q->min_queued_buffers)
ret = -ENOMEM;
/*
@@ -876,7 +951,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
if (ret < 0) {
/*
* Note: __vb2_queue_free() will subtract 'allocated_buffers'
- * from q->num_buffers and it will reset q->memory to
+ * from already queued buffers and it will reset q->memory to
* VB2_MEMORY_UNKNOWN.
*/
__vb2_queue_free(q, allocated_buffers);
@@ -910,10 +985,11 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int num_planes = 0, num_buffers, allocated_buffers;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
- bool no_previous_buffers = !q->num_buffers;
- int ret;
+ unsigned int q_num_bufs = vb2_get_num_buffers(q);
+ bool no_previous_buffers = !q_num_bufs;
+ int ret = 0;
- if (q->num_buffers == VB2_MAX_FRAME) {
+ if (q_num_bufs == q->max_num_buffers) {
dprintk(q, 1, "maximum number of buffers already allocated\n");
return -ENOBUFS;
}
@@ -930,7 +1006,13 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
*/
mutex_lock(&q->mmap_lock);
q->memory = memory;
+ if (!q->bufs)
+ q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL);
+ if (!q->bufs)
+ ret = -ENOMEM;
mutex_unlock(&q->mmap_lock);
+ if (ret)
+ return ret;
q->waiting_for_buffers = !q->is_output;
set_queue_coherency(q, non_coherent_mem);
} else {
@@ -942,7 +1024,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
return -EINVAL;
}
- num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
+ num_buffers = min(*count, q->max_num_buffers - q_num_bufs);
if (requested_planes && requested_sizes) {
num_planes = requested_planes;
@@ -974,7 +1056,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
num_buffers = allocated_buffers;
/*
- * q->num_buffers contains the total number of buffers, that the
+ * num_buffers contains the total number of buffers, that the
* queue driver has set up
*/
ret = call_qop(q, queue_setup, q, &num_buffers,
@@ -995,7 +1077,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
if (ret < 0) {
/*
* Note: __vb2_queue_free() will subtract 'allocated_buffers'
- * from q->num_buffers and it will reset q->memory to
+ * from already queued buffers and it will reset q->memory to
* VB2_MEMORY_UNKNOWN.
*/
__vb2_queue_free(q, allocated_buffers);
@@ -1470,9 +1552,6 @@ static void vb2_req_unprepare(struct media_request_object *obj)
WARN_ON(!vb->req_obj.req);
}
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
- struct media_request *req);
-
static void vb2_req_queue(struct media_request_object *obj)
{
struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
@@ -1487,7 +1566,7 @@ static void vb2_req_queue(struct media_request_object *obj)
* set. We just ignore that, and expect this will be caught the
* next time vb2_req_prepare() is called.
*/
- err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ err = vb2_core_qbuf(vb->vb2_queue, vb, NULL, NULL);
WARN_ON_ONCE(err && err != -EIO);
mutex_unlock(vb->vb2_queue->lock);
}
@@ -1542,12 +1621,10 @@ unsigned int vb2_request_buffer_cnt(struct media_request *req)
}
EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt);
-int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
+int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb)
{
- struct vb2_buffer *vb;
int ret;
- vb = q->bufs[index];
if (vb->state != VB2_BUF_STATE_DEQUEUED) {
dprintk(q, 1, "invalid buffer state %s\n",
vb2_state_name(vb->state));
@@ -1576,7 +1653,7 @@ EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
* @q: videobuf2 queue
*
* Attempt to start streaming. When this function is called there must be
- * at least q->min_buffers_needed buffers queued up (i.e. the minimum
+ * at least q->min_queued_buffers queued up (i.e. the minimum
* number of buffers required for the DMA engine to function). If the
* @start_streaming op fails it is supposed to return all the driver-owned
* buffers back to vb2 in state QUEUED. Check if that happened and if
@@ -1617,8 +1694,12 @@ static int vb2_start_streaming(struct vb2_queue *q)
* Forcefully reclaim buffers if the driver did not
* correctly return them to vb2.
*/
- for (i = 0; i < q->num_buffers; ++i) {
- vb = q->bufs[i];
+ for (i = 0; i < vb2_get_num_buffers(q); ++i) {
+ vb = vb2_get_buffer(q, i);
+
+ if (!vb)
+ continue;
+
if (vb->state == VB2_BUF_STATE_ACTIVE)
vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
}
@@ -1634,10 +1715,9 @@ static int vb2_start_streaming(struct vb2_queue *q)
return ret;
}
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb,
struct media_request *req)
{
- struct vb2_buffer *vb;
enum vb2_buffer_state orig_state;
int ret;
@@ -1646,8 +1726,6 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
return -EIO;
}
- vb = q->bufs[index];
-
if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
q->requires_requests) {
dprintk(q, 1, "qbuf requires a request\n");
@@ -1768,7 +1846,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
* then we can finally call start_streaming().
*/
if (q->streaming && !q->start_streaming_called &&
- q->queued_count >= q->min_buffers_needed) {
+ q->queued_count >= q->min_queued_buffers) {
ret = vb2_start_streaming(q);
if (ret) {
/*
@@ -2022,12 +2100,18 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* to vb2 in stop_streaming().
*/
if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
- for (i = 0; i < q->num_buffers; ++i)
- if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
- pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n",
- q->bufs[i]);
- vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
+ for (i = 0; i < vb2_get_num_buffers(q); i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(q, i);
+
+ if (!vb)
+ continue;
+
+ if (vb->state == VB2_BUF_STATE_ACTIVE) {
+ pr_warn("driver bug: stop_streaming operation is leaving buffer %u in active state\n",
+ vb->index);
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
+ }
/* Must be zero now */
WARN_ON(atomic_read(&q->owned_by_drv_count));
}
@@ -2060,10 +2144,15 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* call to __fill_user_buffer() after buf_finish(). That order can't
* be changed, so we can't move the buf_finish() to __vb2_dqbuf().
*/
- for (i = 0; i < q->num_buffers; ++i) {
- struct vb2_buffer *vb = q->bufs[i];
- struct media_request *req = vb->req_obj.req;
+ for (i = 0; i < vb2_get_num_buffers(q); i++) {
+ struct vb2_buffer *vb;
+ struct media_request *req;
+
+ vb = vb2_get_buffer(q, i);
+ if (!vb)
+ continue;
+ req = vb->req_obj.req;
/*
* If a request is associated with this buffer, then
* call buf_request_cancel() to give the driver to complete()
@@ -2103,6 +2192,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
{
+ unsigned int q_num_bufs = vb2_get_num_buffers(q);
int ret;
if (type != q->type) {
@@ -2115,14 +2205,14 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
return 0;
}
- if (!q->num_buffers) {
+ if (!q_num_bufs) {
dprintk(q, 1, "no buffers have been allocated\n");
return -EINVAL;
}
- if (q->num_buffers < q->min_buffers_needed) {
- dprintk(q, 1, "need at least %u allocated buffers\n",
- q->min_buffers_needed);
+ if (q_num_bufs < q->min_queued_buffers) {
+ dprintk(q, 1, "need at least %u queued buffers\n",
+ q->min_queued_buffers);
return -EINVAL;
}
@@ -2134,7 +2224,7 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
* Tell driver to start streaming provided sufficient buffers
* are available.
*/
- if (q->queued_count >= q->min_buffers_needed) {
+ if (q->queued_count >= q->min_queued_buffers) {
ret = vb2_start_streaming(q);
if (ret)
goto unprepare;
@@ -2185,13 +2275,12 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
EXPORT_SYMBOL_GPL(vb2_core_streamoff);
/*
- * __find_plane_by_offset() - find plane associated with the given offset off
+ * __find_plane_by_offset() - find plane associated with the given offset
*/
-static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
- unsigned int *_buffer, unsigned int *_plane)
+static int __find_plane_by_offset(struct vb2_queue *q, unsigned long offset,
+ struct vb2_buffer **vb, unsigned int *plane)
{
- struct vb2_buffer *vb;
- unsigned int buffer, plane;
+ unsigned int buffer;
/*
* Sanity checks to ensure the lock is held, MEMORY_MMAP is
@@ -2209,30 +2298,22 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
return -EBUSY;
}
- /*
- * Go over all buffers and their planes, comparing the given offset
- * with an offset assigned to each plane. If a match is found,
- * return its buffer and plane numbers.
- */
- for (buffer = 0; buffer < q->num_buffers; ++buffer) {
- vb = q->bufs[buffer];
+ /* Get buffer and plane from the offset */
+ buffer = (offset >> PLANE_INDEX_SHIFT) & BUFFER_INDEX_MASK;
+ *plane = (offset >> PAGE_SHIFT) & PLANE_INDEX_MASK;
- for (plane = 0; plane < vb->num_planes; ++plane) {
- if (vb->planes[plane].m.offset == off) {
- *_buffer = buffer;
- *_plane = plane;
- return 0;
- }
- }
- }
+ *vb = vb2_get_buffer(q, buffer);
+ if (!*vb)
+ return -EINVAL;
+ if (*plane >= (*vb)->num_planes)
+ return -EINVAL;
- return -EINVAL;
+ return 0;
}
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
- unsigned int index, unsigned int plane, unsigned int flags)
+ struct vb2_buffer *vb, unsigned int plane, unsigned int flags)
{
- struct vb2_buffer *vb = NULL;
struct vb2_plane *vb_plane;
int ret;
struct dma_buf *dbuf;
@@ -2257,13 +2338,6 @@ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
return -EINVAL;
}
- if (index >= q->num_buffers) {
- dprintk(q, 1, "buffer index out of range\n");
- return -EINVAL;
- }
-
- vb = q->bufs[index];
-
if (plane >= vb->num_planes) {
dprintk(q, 1, "buffer plane out of range\n");
return -EINVAL;
@@ -2282,20 +2356,20 @@ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(q, 1, "failed to export buffer %d, plane %d\n",
- index, plane);
+ vb->index, plane);
return -EINVAL;
}
ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
if (ret < 0) {
dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n",
- index, plane, ret);
+ vb->index, plane, ret);
dma_buf_put(dbuf);
return ret;
}
dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n",
- index, plane, ret);
+ vb->index, plane, ret);
*fd = ret;
return 0;
@@ -2304,9 +2378,9 @@ EXPORT_SYMBOL_GPL(vb2_core_expbuf);
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
{
- unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
struct vb2_buffer *vb;
- unsigned int buffer = 0, plane = 0;
+ unsigned int plane = 0;
int ret;
unsigned long length;
@@ -2335,12 +2409,10 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
* Find the plane corresponding to the offset passed by userspace. This
* will return an error if not MEMORY_MMAP or file I/O is in progress.
*/
- ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ ret = __find_plane_by_offset(q, offset, &vb, &plane);
if (ret)
goto unlock;
- vb = q->bufs[buffer];
-
/*
* MMAP requires page_aligned buffers.
* The buffer length was page_aligned at __vb2_buf_mem_alloc(),
@@ -2368,7 +2440,7 @@ unlock:
if (ret)
return ret;
- dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
+ dprintk(q, 3, "buffer %u, plane %d successfully mapped\n", vb->index, plane);
return 0;
}
EXPORT_SYMBOL_GPL(vb2_mmap);
@@ -2380,9 +2452,9 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
unsigned long pgoff,
unsigned long flags)
{
- unsigned long off = pgoff << PAGE_SHIFT;
+ unsigned long offset = pgoff << PAGE_SHIFT;
struct vb2_buffer *vb;
- unsigned int buffer, plane;
+ unsigned int plane;
void *vaddr;
int ret;
@@ -2392,12 +2464,10 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
* Find the plane corresponding to the offset passed by userspace. This
* will return an error if not MEMORY_MMAP or file I/O is in progress.
*/
- ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ ret = __find_plane_by_offset(q, offset, &vb, &plane);
if (ret)
goto unlock;
- vb = q->bufs[buffer];
-
vaddr = vb2_plane_vaddr(vb, plane);
mutex_unlock(&q->mmap_lock);
return vaddr ? (unsigned long)vaddr : -EINVAL;
@@ -2414,6 +2484,16 @@ int vb2_core_queue_init(struct vb2_queue *q)
/*
* Sanity check
*/
+ /*
+ * For drivers who don't support max_num_buffers ensure
+ * a backward compatibility.
+ */
+ if (!q->max_num_buffers)
+ q->max_num_buffers = VB2_MAX_FRAME;
+
+ /* The maximum is limited by offset cookie encoding pattern */
+ q->max_num_buffers = min_t(unsigned int, q->max_num_buffers, MAX_BUFFER_INDEX);
+
if (WARN_ON(!q) ||
WARN_ON(!q->ops) ||
WARN_ON(!q->mem_ops) ||
@@ -2423,18 +2503,22 @@ int vb2_core_queue_init(struct vb2_queue *q)
WARN_ON(!q->ops->buf_queue))
return -EINVAL;
+ if (WARN_ON(q->max_num_buffers > MAX_BUFFER_INDEX) ||
+ WARN_ON(q->min_queued_buffers > q->max_num_buffers))
+ return -EINVAL;
+
if (WARN_ON(q->requires_requests && !q->supports_requests))
return -EINVAL;
/*
* This combination is not allowed since a non-zero value of
- * q->min_buffers_needed can cause vb2_core_qbuf() to fail if
+ * q->min_queued_buffers can cause vb2_core_qbuf() to fail if
* it has to call start_streaming(), and the Request API expects
* that queueing a request (and thus queueing a buffer contained
* in that request) will always succeed. There is no method of
* propagating an error back to userspace.
*/
- if (WARN_ON(q->supports_requests && q->min_buffers_needed))
+ if (WARN_ON(q->supports_requests && q->min_queued_buffers))
return -EINVAL;
INIT_LIST_HEAD(&q->queued_list);
@@ -2468,7 +2552,9 @@ void vb2_core_queue_release(struct vb2_queue *q)
__vb2_cleanup_fileio(q);
__vb2_queue_cancel(q);
mutex_lock(&q->mmap_lock);
- __vb2_queue_free(q, q->num_buffers);
+ __vb2_queue_free(q, vb2_get_num_buffers(q));
+ kfree(q->bufs);
+ q->bufs = NULL;
mutex_unlock(&q->mmap_lock);
}
EXPORT_SYMBOL_GPL(vb2_core_queue_release);
@@ -2497,7 +2583,7 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
/*
* Start file I/O emulator only if streaming API has not been used yet.
*/
- if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
+ if (vb2_get_num_buffers(q) == 0 && !vb2_fileio_is_active(q)) {
if (!q->is_output && (q->io_modes & VB2_READ) &&
(req_events & (EPOLLIN | EPOLLRDNORM))) {
if (__vb2_init_fileio(q, 1))
@@ -2535,7 +2621,7 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
* For output streams you can call write() as long as there are fewer
* buffers queued than there are buffers available.
*/
- if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
+ if (q->is_output && q->fileio && q->queued_count < vb2_get_num_buffers(q))
return EPOLLOUT | EPOLLWRNORM;
if (list_empty(&q->done_list)) {
@@ -2584,8 +2670,8 @@ struct vb2_fileio_buf {
* struct vb2_fileio_data - queue context used by file io emulator
*
* @cur_index: the index of the buffer currently being read from or
- * written to. If equal to q->num_buffers then a new buffer
- * must be dequeued.
+ * written to. If equal to number of buffers in the vb2_queue
+ * then a new buffer must be dequeued.
* @initial_index: in the read() case all buffers are queued up immediately
* in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
* buffers. However, in the write() case no buffers are initially
@@ -2595,9 +2681,9 @@ struct vb2_fileio_buf {
* buffers. This means that initially __vb2_perform_fileio()
* needs to know what buffer index to use when it is queuing up
* the buffers for the first time. That initial index is stored
- * in this field. Once it is equal to q->num_buffers all
- * available buffers have been queued and __vb2_perform_fileio()
- * should start the normal dequeue/queue cycle.
+ * in this field. Once it is equal to number of buffers in the
+ * vb2_queue all available buffers have been queued and
+ * __vb2_perform_fileio() should start the normal dequeue/queue cycle.
*
* vb2 provides a compatibility layer and emulator of file io (read and
* write) calls on top of streaming API. For proper operation it required
@@ -2625,6 +2711,7 @@ struct vb2_fileio_data {
static int __vb2_init_fileio(struct vb2_queue *q, int read)
{
struct vb2_fileio_data *fileio;
+ struct vb2_buffer *vb;
int i, ret;
unsigned int count = 0;
@@ -2644,18 +2731,18 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
/*
* Check if streaming api has not been already activated.
*/
- if (q->streaming || q->num_buffers > 0)
+ if (q->streaming || vb2_get_num_buffers(q) > 0)
return -EBUSY;
/*
- * Start with q->min_buffers_needed + 1, driver can increase it in
+ * Start with q->min_queued_buffers + 1, driver can increase it in
* queue_setup()
*
- * 'min_buffers_needed' buffers need to be queued up before you
+ * 'min_queued_buffers' buffers need to be queued up before you
* can start streaming, plus 1 for userspace (or in this case,
* kernelspace) processing.
*/
- count = max(2, q->min_buffers_needed + 1);
+ count = max(2, q->min_queued_buffers + 1);
dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
(read) ? "read" : "write", count, q->fileio_read_once,
@@ -2681,10 +2768,17 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
goto err_kfree;
/*
+ * Userspace can never add or delete buffers later, so there
+ * will never be holes. It is safe to assume that vb2_get_buffer(q, 0)
+ * will always return a valid vb pointer
+ */
+ vb = vb2_get_buffer(q, 0);
+
+ /*
* Check if plane_count is correct
* (multiplane buffers are not supported).
*/
- if (q->bufs[0]->num_planes != 1) {
+ if (vb->num_planes != 1) {
ret = -EBUSY;
goto err_reqbufs;
}
@@ -2692,13 +2786,16 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
/*
* Get kernel address of each buffer.
*/
- for (i = 0; i < q->num_buffers; i++) {
- fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
+ for (i = 0; i < vb2_get_num_buffers(q); i++) {
+ /* vb can never be NULL when using fileio. */
+ vb = vb2_get_buffer(q, i);
+
+ fileio->bufs[i].vaddr = vb2_plane_vaddr(vb, 0);
if (fileio->bufs[i].vaddr == NULL) {
ret = -EINVAL;
goto err_reqbufs;
}
- fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
+ fileio->bufs[i].size = vb2_plane_size(vb, 0);
}
/*
@@ -2708,18 +2805,23 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
/*
* Queue all buffers.
*/
- for (i = 0; i < q->num_buffers; i++) {
- ret = vb2_core_qbuf(q, i, NULL, NULL);
+ for (i = 0; i < vb2_get_num_buffers(q); i++) {
+ struct vb2_buffer *vb2 = vb2_get_buffer(q, i);
+
+ if (!vb2)
+ continue;
+
+ ret = vb2_core_qbuf(q, vb2, NULL, NULL);
if (ret)
goto err_reqbufs;
fileio->bufs[i].queued = 1;
}
/*
* All buffers have been queued, so mark that by setting
- * initial_index to q->num_buffers
+ * initial_index to the number of buffers in the vb2_queue
*/
- fileio->initial_index = q->num_buffers;
- fileio->cur_index = q->num_buffers;
+ fileio->initial_index = vb2_get_num_buffers(q);
+ fileio->cur_index = fileio->initial_index;
}
/*
@@ -2812,7 +2914,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
* Check if we need to dequeue the buffer.
*/
index = fileio->cur_index;
- if (index >= q->num_buffers) {
+ if (index >= vb2_get_num_buffers(q)) {
struct vb2_buffer *b;
/*
@@ -2826,15 +2928,17 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
fileio->cur_index = index;
buf = &fileio->bufs[index];
- b = q->bufs[index];
+
+ /* b can never be NULL when using fileio. */
+ b = vb2_get_buffer(q, index);
/*
* Get number of bytes filled by the driver
*/
buf->pos = 0;
buf->queued = 0;
- buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
- : vb2_plane_size(q->bufs[index], 0);
+ buf->size = read ? vb2_get_plane_payload(b, 0)
+ : vb2_plane_size(b, 0);
/* Compensate for data_offset on read in the multiplanar case. */
if (is_multiplanar && read &&
b->planes[0].data_offset < buf->size) {
@@ -2877,7 +2981,8 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
* Queue next buffer if required.
*/
if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
- struct vb2_buffer *b = q->bufs[index];
+ /* b can never be NULL when using fileio. */
+ struct vb2_buffer *b = vb2_get_buffer(q, index);
/*
* Check if this is the last buffer to read.
@@ -2894,7 +2999,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
if (copy_timestamp)
b->timestamp = ktime_get_ns();
- ret = vb2_core_qbuf(q, index, NULL, NULL);
+ ret = vb2_core_qbuf(q, b, NULL, NULL);
dprintk(q, 5, "vb2_qbuf result: %d\n", ret);
if (ret)
return ret;
@@ -2904,20 +3009,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
*/
buf->pos = 0;
buf->queued = 1;
- buf->size = vb2_plane_size(q->bufs[index], 0);
+ buf->size = vb2_plane_size(b, 0);
fileio->q_count += 1;
/*
* If we are queuing up buffers for the first time, then
* increase initial_index by one.
*/
- if (fileio->initial_index < q->num_buffers)
+ if (fileio->initial_index < vb2_get_num_buffers(q))
fileio->initial_index++;
/*
* The next buffer to use is either a buffer that's going to be
- * queued for the first time (initial_index < q->num_buffers)
- * or it is equal to q->num_buffers, meaning that the next
- * time we need to dequeue a buffer since we've now queued up
- * all the 'first time' buffers.
+ * queued for the first time (initial_index < number of buffers in the vb2_queue)
+ * or it is equal to the number of buffers in the vb2_queue,
+ * meaning that the next time we need to dequeue a buffer since
+ * we've now queued up all the 'first time' buffers.
*/
fileio->cur_index = fileio->initial_index;
}
@@ -2962,7 +3067,7 @@ static int vb2_thread(void *data)
int ret = 0;
if (q->is_output) {
- prequeue = q->num_buffers;
+ prequeue = vb2_get_num_buffers(q);
copy_timestamp = q->copy_timestamp;
}
@@ -2975,7 +3080,9 @@ static int vb2_thread(void *data)
* Call vb2_dqbuf to get buffer back.
*/
if (prequeue) {
- vb = q->bufs[index++];
+ vb = vb2_get_buffer(q, index++);
+ if (!vb)
+ continue;
prequeue--;
} else {
call_void_qop(q, wait_finish, q);
@@ -2984,7 +3091,7 @@ static int vb2_thread(void *data)
call_void_qop(q, wait_prepare, q);
dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret);
if (!ret)
- vb = q->bufs[index];
+ vb = vb2_get_buffer(q, index);
}
if (ret || threadio->stop)
break;
@@ -2997,7 +3104,7 @@ static int vb2_thread(void *data)
if (copy_timestamp)
vb->timestamp = ktime_get_ns();
if (!threadio->stop)
- ret = vb2_core_qbuf(q, vb->index, NULL, NULL);
+ ret = vb2_core_qbuf(q, vb, NULL, NULL);
call_void_qop(q, wait_prepare, q);
if (ret || threadio->stop)
break;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index c7a54d82a..c575198e8 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -364,13 +364,12 @@ static void set_buffer_cache_hints(struct vb2_queue *q,
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
- struct v4l2_buffer *b, bool is_prepare,
- struct media_request **p_req)
+ struct vb2_buffer *vb, struct v4l2_buffer *b,
+ bool is_prepare, struct media_request **p_req)
{
const char *opname = is_prepare ? "prepare_buf" : "qbuf";
struct media_request *req;
struct vb2_v4l2_buffer *vbuf;
- struct vb2_buffer *vb;
int ret;
if (b->type != q->type) {
@@ -378,23 +377,11 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
return -EINVAL;
}
- if (b->index >= q->num_buffers) {
- dprintk(q, 1, "%s: buffer index out of range\n", opname);
- return -EINVAL;
- }
-
- if (q->bufs[b->index] == NULL) {
- /* Should never happen */
- dprintk(q, 1, "%s: buffer is NULL\n", opname);
- return -EINVAL;
- }
-
if (b->memory != q->memory) {
dprintk(q, 1, "%s: invalid memory type\n", opname);
return -EINVAL;
}
- vb = q->bufs[b->index];
vbuf = to_vb2_v4l2_buffer(vb);
ret = __verify_planes_array(vb, b);
if (ret)
@@ -628,11 +615,22 @@ static const struct vb2_buf_ops v4l2_buf_ops = {
struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
{
unsigned int i;
+ struct vb2_buffer *vb2;
- for (i = 0; i < q->num_buffers; i++)
- if (q->bufs[i]->copied_timestamp &&
- q->bufs[i]->timestamp == timestamp)
- return vb2_get_buffer(q, i);
+ /*
+ * This loop doesn't scale if there is a really large number of buffers.
+ * Maybe something more efficient will be needed in this case.
+ */
+ for (i = 0; i < q->max_num_buffers; i++) {
+ vb2 = vb2_get_buffer(q, i);
+
+ if (!vb2)
+ continue;
+
+ if (vb2->copied_timestamp &&
+ vb2->timestamp == timestamp)
+ return vb2;
+ }
return NULL;
}
EXPORT_SYMBOL_GPL(vb2_find_buffer);
@@ -660,20 +658,33 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
return -EINVAL;
}
- if (b->index >= q->num_buffers) {
- dprintk(q, 1, "buffer index out of range\n");
+ vb = vb2_get_buffer(q, b->index);
+ if (!vb) {
+ dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
return -EINVAL;
}
- vb = q->bufs[b->index];
+
ret = __verify_planes_array(vb, b);
if (!ret)
- vb2_core_querybuf(q, b->index, b);
+ vb2_core_querybuf(q, vb, b);
return ret;
}
EXPORT_SYMBOL(vb2_querybuf);
-static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
+static void vb2_set_flags_and_caps(struct vb2_queue *q, u32 memory,
+ u32 *flags, u32 *caps, u32 *max_num_bufs)
{
+ if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+ /*
+ * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+ * but in order to avoid bugs we zero out all bits.
+ */
+ *flags = 0;
+ } else {
+ /* Clear all unknown flags. */
+ *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ }
+
*caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
if (q->io_modes & VB2_MMAP)
*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
@@ -685,25 +696,11 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
-#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
-#endif
-}
-
-static void validate_memory_flags(struct vb2_queue *q,
- int memory,
- u32 *flags)
-{
- if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
- /*
- * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
- * but in order to avoid bugs we zero out all bits.
- */
- *flags = 0;
- } else {
- /* Clear all unknown flags. */
- *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ if (max_num_bufs) {
+ *max_num_bufs = q->max_num_buffers;
+ *caps |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
}
}
@@ -712,8 +709,8 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
int ret = vb2_verify_memory_type(q, req->memory, req->type);
u32 flags = req->flags;
- fill_buf_caps(q, &req->capabilities);
- validate_memory_flags(q, req->memory, &flags);
+ vb2_set_flags_and_caps(q, req->memory, &flags,
+ &req->capabilities, NULL);
req->flags = flags;
return ret ? ret : vb2_core_reqbufs(q, req->memory,
req->flags, &req->count);
@@ -723,6 +720,7 @@ EXPORT_SYMBOL_GPL(vb2_reqbufs);
int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
struct v4l2_buffer *b)
{
+ struct vb2_buffer *vb;
int ret;
if (vb2_fileio_is_active(q)) {
@@ -733,9 +731,15 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
return -EINVAL;
- ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
+ vb = vb2_get_buffer(q, b->index);
+ if (!vb) {
+ dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
+ return -EINVAL;
+ }
+
+ ret = vb2_queue_or_prepare_buf(q, mdev, vb, b, true, NULL);
- return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
+ return ret ? ret : vb2_core_prepare_buf(q, vb, b);
}
EXPORT_SYMBOL_GPL(vb2_prepare_buf);
@@ -747,9 +751,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
int ret = vb2_verify_memory_type(q, create->memory, f->type);
unsigned i;
- fill_buf_caps(q, &create->capabilities);
- validate_memory_flags(q, create->memory, &create->flags);
- create->index = q->num_buffers;
+ create->index = vb2_get_num_buffers(q);
+ vb2_set_flags_and_caps(q, create->memory, &create->flags,
+ &create->capabilities, &create->max_num_buffers);
if (create->count == 0)
return ret != -EBUSY ? ret : 0;
@@ -803,6 +807,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
struct v4l2_buffer *b)
{
struct media_request *req = NULL;
+ struct vb2_buffer *vb;
int ret;
if (vb2_fileio_is_active(q)) {
@@ -810,10 +815,16 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
return -EBUSY;
}
- ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
+ vb = vb2_get_buffer(q, b->index);
+ if (!vb) {
+ dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
+ return -EINVAL;
+ }
+
+ ret = vb2_queue_or_prepare_buf(q, mdev, vb, b, false, &req);
if (ret)
return ret;
- ret = vb2_core_qbuf(q, b->index, b, req);
+ ret = vb2_core_qbuf(q, vb, b, req);
if (req)
media_request_put(req);
return ret;
@@ -873,7 +884,15 @@ EXPORT_SYMBOL_GPL(vb2_streamoff);
int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
{
- return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
+ struct vb2_buffer *vb;
+
+ vb = vb2_get_buffer(q, eb->index);
+ if (!vb) {
+ dprintk(q, 1, "can't find the requested buffer %u\n", eb->index);
+ return -EINVAL;
+ }
+
+ return vb2_core_expbuf(q, &eb->fd, eb->type, vb,
eb->plane, eb->flags);
}
EXPORT_SYMBOL_GPL(vb2_expbuf);
@@ -985,8 +1004,8 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
u32 flags = p->flags;
- fill_buf_caps(vdev->queue, &p->capabilities);
- validate_memory_flags(vdev->queue, p->memory, &flags);
+ vb2_set_flags_and_caps(vdev->queue, p->memory, &flags,
+ &p->capabilities, NULL);
p->flags = flags;
if (res)
return res;
@@ -1005,12 +1024,11 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
struct v4l2_create_buffers *p)
{
struct video_device *vdev = video_devdata(file);
- int res = vb2_verify_memory_type(vdev->queue, p->memory,
- p->format.type);
+ int res = vb2_verify_memory_type(vdev->queue, p->memory, p->format.type);
- p->index = vdev->queue->num_buffers;
- fill_buf_caps(vdev->queue, &p->capabilities);
- validate_memory_flags(vdev->queue, p->memory, &p->flags);
+ p->index = vb2_get_num_buffers(vdev->queue);
+ vb2_set_flags_and_caps(vdev->queue, p->memory, &p->flags,
+ &p->capabilities, &p->max_num_buffers);
/*
* If count == 0, then just check if memory and type are valid.
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
@@ -1115,7 +1133,7 @@ int _vb2_fop_release(struct file *file, struct mutex *lock)
if (lock)
mutex_lock(lock);
- if (file->private_data == vdev->queue->owner) {
+ if (!vdev->queue->owner || file->private_data == vdev->queue->owner) {
vb2_queue_release(vdev->queue);
vdev->queue->owner = NULL;
}
@@ -1243,7 +1261,7 @@ void vb2_video_unregister_device(struct video_device *vdev)
*/
get_device(&vdev->dev);
video_unregister_device(vdev);
- if (vdev->queue && vdev->queue->owner) {
+ if (vdev->queue) {
struct mutex *lock = vdev->queue->lock ?
vdev->queue->lock : vdev->lock;
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 909df82fe..192a8230c 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -167,17 +167,14 @@ int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int nonblocking)
memset(ctx, 0, sizeof(struct dvb_vb2_ctx));
q->type = DVB_BUF_TYPE_CAPTURE;
- /**capture type*/
- q->is_output = 0;
/**only mmap is supported currently*/
q->io_modes = VB2_MMAP;
q->drv_priv = ctx;
q->buf_struct_size = sizeof(struct dvb_buffer);
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->ops = &dvb_vb2_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->buf_ops = &dvb_vb2_buf_ops;
- q->num_buffers = 0;
ret = vb2_core_queue_init(q);
if (ret) {
ctx->state = DVB_VB2_STATE_NONE;
@@ -355,12 +352,13 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
struct vb2_queue *q = &ctx->vb_q;
+ struct vb2_buffer *vb2 = vb2_get_buffer(q, b->index);
- if (b->index >= q->num_buffers) {
- dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ if (!vb2) {
+ dprintk(1, "[%s] invalid buffer index\n", ctx->name);
return -EINVAL;
}
- vb2_core_querybuf(&ctx->vb_q, b->index, b);
+ vb2_core_querybuf(&ctx->vb_q, vb2, b);
dprintk(3, "[%s] index=%d\n", ctx->name, b->index);
return 0;
}
@@ -370,7 +368,7 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
struct vb2_queue *q = &ctx->vb_q;
int ret;
- ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, exp->index,
+ ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, q->bufs[exp->index],
0, exp->flags);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
@@ -385,13 +383,14 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
struct vb2_queue *q = &ctx->vb_q;
+ struct vb2_buffer *vb2 = vb2_get_buffer(q, b->index);
int ret;
- if (b->index >= q->num_buffers) {
- dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ if (!vb2) {
+ dprintk(1, "[%s] invalid buffer index\n", ctx->name);
return -EINVAL;
}
- ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
+ ret = vb2_core_qbuf(&ctx->vb_q, vb2, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
b->index, ret);
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 02c619e51..023db6e79 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -439,12 +439,13 @@ static int rtl2832_sdr_queue_setup(struct vb2_queue *vq,
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
struct platform_device *pdev = dev->pdev;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
dev_dbg(&pdev->dev, "nbuffers=%d\n", *nbuffers);
/* Need at least 8 buffers */
- if (vq->num_buffers + *nbuffers < 8)
- *nbuffers = 8 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 8)
+ *nbuffers = 8 - q_num_bufs;
*nplanes = 1;
sizes[0] = PAGE_ALIGN(dev->buffersize);
dev_dbg(&pdev->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 59ee0ca2c..4c3435921 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -41,6 +41,16 @@ config VIDEO_APTINA_PLL
config VIDEO_CCS_PLL
tristate
+config VIDEO_ALVIUM_CSI2
+ tristate "Allied Vision ALVIUM MIPI CSI-2 camera support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor-level driver for the Allied Vision
+ ALVIUM camera connected via MIPI CSI-2 interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called alvium-csi2.
+
config VIDEO_AR0521
tristate "ON Semiconductor AR0521 sensor support"
help
@@ -50,6 +60,26 @@ config VIDEO_AR0521
To compile this driver as a module, choose M here: the
module will be called ar0521.
+config VIDEO_GC0308
+ tristate "GalaxyCore GC0308 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the GalaxyCore
+ GC0308 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gc0308.
+
+config VIDEO_GC2145
+ select V4L2_CCI_I2C
+ tristate "GalaxyCore GC2145 sensor support"
+ help
+ This is a V4L2 sensor-level driver for GalaxyCore GC2145
+ 2 Mpixel camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gc2145.
+
config VIDEO_HI556
tristate "Hynix Hi-556 sensor support"
help
@@ -455,6 +485,16 @@ config VIDEO_OV5695
To compile this driver as a module, choose M here: the
module will be called ov5695.
+config VIDEO_OV64A40
+ tristate "OmniVision OV64A40 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV64A40 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov64a40.
+
config VIDEO_OV6650
tristate "OmniVision OV6650 sensor support"
help
@@ -628,6 +668,23 @@ source "drivers/media/i2c/et8ek8/Kconfig"
endif
+menu "Camera ISPs"
+ visible if MEDIA_CAMERA_SUPPORT
+
+config VIDEO_THP7312
+ tristate "THine THP7312 support"
+ depends on I2C
+ select FW_LOADER
+ select MEDIA_CONTROLLER
+ select V4L2_CCI_I2C
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ This is a Video4Linux2 sensor-level driver for the THine
+ THP7312 ISP.
+
+endmenu
+
menu "Lens drivers"
visible if MEDIA_CAMERA_SUPPORT
@@ -1186,6 +1243,21 @@ config VIDEO_TW2804
To compile this driver as a module, choose M here: the
module will be called tw2804.
+config VIDEO_TW9900
+ tristate "Techwell TW9900 video decoder"
+ depends on GPIOLIB
+ depends on VIDEO_DEV && I2C
+ depends on PM
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
+ help
+ Support for the Techwell TW9900 multi-standard video decoder.
+ It supports NTSC, PAL standards with auto-detection features.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tw9900.
+
config VIDEO_TW9903
tristate "Techwell TW9903 video decoder"
depends on VIDEO_DEV && I2C
@@ -1432,6 +1504,7 @@ config VIDEO_ST_MIPID02
depends on I2C && VIDEO_DEV
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
+ select V4L2_CCI_I2C
select V4L2_FWNODE
help
Support for STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index f5010f80a..dfbe6448b 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
obj-$(CONFIG_VIDEO_AK7375) += ak7375.o
obj-$(CONFIG_VIDEO_AK881X) += ak881x.o
+obj-$(CONFIG_VIDEO_ALVIUM_CSI2) += alvium-csi2.o
obj-$(CONFIG_VIDEO_APTINA_PLL) += aptina-pll.o
obj-$(CONFIG_VIDEO_AR0521) += ar0521.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
@@ -36,6 +37,8 @@ obj-$(CONFIG_VIDEO_DW9719) += dw9719.o
obj-$(CONFIG_VIDEO_DW9768) += dw9768.o
obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
+obj-$(CONFIG_VIDEO_GC0308) += gc0308.o
+obj-$(CONFIG_VIDEO_GC2145) += gc2145.o
obj-$(CONFIG_VIDEO_HI556) += hi556.o
obj-$(CONFIG_VIDEO_HI846) += hi846.o
obj-$(CONFIG_VIDEO_HI847) += hi847.o
@@ -92,6 +95,7 @@ obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
obj-$(CONFIG_VIDEO_OV5675) += ov5675.o
obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
obj-$(CONFIG_VIDEO_OV5695) += ov5695.o
+obj-$(CONFIG_VIDEO_OV64A40) += ov64a40.o
obj-$(CONFIG_VIDEO_OV6650) += ov6650.o
obj-$(CONFIG_VIDEO_OV7251) += ov7251.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
@@ -128,6 +132,7 @@ obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
obj-$(CONFIG_VIDEO_TEA6420) += tea6420.o
+obj-$(CONFIG_VIDEO_THP7312) += thp7312.o
obj-$(CONFIG_VIDEO_THS7303) += ths7303.o
obj-$(CONFIG_VIDEO_THS8200) += ths8200.o
obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
@@ -136,6 +141,7 @@ obj-$(CONFIG_VIDEO_TVP514X) += tvp514x.o
obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
obj-$(CONFIG_VIDEO_TVP7002) += tvp7002.o
obj-$(CONFIG_VIDEO_TW2804) += tw2804.o
+obj-$(CONFIG_VIDEO_TW9900) += tw9900.o
obj-$(CONFIG_VIDEO_TW9903) += tw9903.o
obj-$(CONFIG_VIDEO_TW9906) += tw9906.o
obj-$(CONFIG_VIDEO_TW9910) += tw9910.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 541344731..409b9a37f 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -463,11 +463,19 @@ static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
return 0;
}
-static int adv7180_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int adv7180_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct adv7180_state *state = to_state(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (state->curr_norm & V4L2_STD_525_60) {
fi->interval.numerator = 1001;
fi->interval.denominator = 30000;
@@ -769,7 +777,7 @@ static int adv7180_get_pad_format(struct v4l2_subdev *sd,
struct adv7180_state *state = to_state(sd);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+ format->format = *v4l2_subdev_state_get_format(sd_state, 0);
} else {
adv7180_mbus_fmt(sd, &format->format);
format->format.field = state->field;
@@ -806,15 +814,15 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
adv7180_set_power(state, true);
}
} else {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ framefmt = v4l2_subdev_state_get_format(sd_state, 0);
*framefmt = format->format;
}
return ret;
}
-static int adv7180_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int adv7180_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
@@ -913,7 +921,6 @@ static int adv7180_subscribe_event(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.s_std = adv7180_s_std,
.g_std = adv7180_g_std,
- .g_frame_interval = adv7180_g_frame_interval,
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
@@ -929,10 +936,10 @@ static const struct v4l2_subdev_core_ops adv7180_core_ops = {
};
static const struct v4l2_subdev_pad_ops adv7180_pad_ops = {
- .init_cfg = adv7180_init_cfg,
.enum_mbus_code = adv7180_enum_mbus_code,
.set_fmt = adv7180_set_pad_format,
.get_fmt = adv7180_get_pad_format,
+ .get_frame_interval = adv7180_get_frame_interval,
.get_mbus_config = adv7180_get_mbus_config,
};
@@ -947,6 +954,10 @@ static const struct v4l2_subdev_ops adv7180_ops = {
.sensor = &adv7180_sensor_ops,
};
+static const struct v4l2_subdev_internal_ops adv7180_internal_ops = {
+ .init_state = adv7180_init_state,
+};
+
static irqreturn_t adv7180_irq(int irq, void *devid)
{
struct adv7180_state *state = devid;
@@ -1458,6 +1469,7 @@ static int adv7180_probe(struct i2c_client *client)
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
+ sd->internal_ops = &adv7180_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
ret = adv7180_init_controls(state);
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 3659feafa..2a2cace4a 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -442,8 +442,6 @@ static int adv7183_set_fmt(struct v4l2_subdev *sd,
}
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
decoder->fmt = *fmt;
- else
- sd_state->pads->try_fmt = *fmt;
return 0;
}
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
index 00095c776..50d9fbadb 100644
--- a/drivers/media/i2c/adv748x/adv748x-afe.c
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -354,8 +354,8 @@ static int adv748x_afe_get_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) {
- mbusformat = v4l2_subdev_get_try_format(sd, sd_state,
- sdformat->pad);
+ mbusformat = v4l2_subdev_state_get_format(sd_state,
+ sdformat->pad);
sdformat->format = *mbusformat;
} else {
adv748x_afe_fill_format(afe, &sdformat->format);
@@ -378,7 +378,7 @@ static int adv748x_afe_set_format(struct v4l2_subdev *sd,
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return adv748x_afe_get_format(sd, sd_state, sdformat);
- mbusformat = v4l2_subdev_get_try_format(sd, sd_state, sdformat->pad);
+ mbusformat = v4l2_subdev_state_get_format(sd_state, sdformat->pad);
*mbusformat = sdformat->format;
return 0;
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index a5a7cb228..5b265b722 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -147,7 +147,7 @@ adv748x_csi2_get_pad_format(struct v4l2_subdev *sd,
struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &tx->format;
}
diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
index 400d71c27..ec151dc69 100644
--- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
+++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
@@ -441,8 +441,8 @@ static int adv748x_hdmi_get_format(struct v4l2_subdev *sd,
return -EINVAL;
if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) {
- mbusformat = v4l2_subdev_get_try_format(sd, sd_state,
- sdformat->pad);
+ mbusformat = v4l2_subdev_state_get_format(sd_state,
+ sdformat->pad);
sdformat->format = *mbusformat;
} else {
adv748x_hdmi_fill_format(hdmi, &sdformat->format);
@@ -464,7 +464,7 @@ static int adv748x_hdmi_set_format(struct v4l2_subdev *sd,
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return adv748x_hdmi_get_format(sd, sd_state, sdformat);
- mbusformat = v4l2_subdev_get_try_format(sd, sd_state, sdformat->pad);
+ mbusformat = v4l2_subdev_state_get_format(sd_state, sdformat->pad);
*mbusformat = sdformat->format;
return 0;
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index a9183d928..0f780eb6e 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1238,7 +1238,7 @@ static int adv7511_get_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
format->format.code = fmt->code;
format->format.colorspace = fmt->colorspace;
format->format.ycbcr_enc = fmt->ycbcr_enc;
@@ -1293,7 +1293,7 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
fmt->code = format->format.code;
fmt->colorspace = format->format.colorspace;
fmt->ycbcr_enc = format->format.ycbcr_enc;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index b202a85fb..810fa8826 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1929,7 +1929,7 @@ static int adv76xx_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
format->format.code = fmt->code;
} else {
format->format.code = state->format->code;
@@ -1978,7 +1978,7 @@ static int adv76xx_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
fmt->code = format->format.code;
} else {
state->format = info;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index c1664a362..2ad0f9f55 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2087,7 +2087,7 @@ static int adv7842_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
format->format.code = fmt->code;
} else {
format->format.code = state->format->code;
@@ -2119,7 +2119,7 @@ static int adv7842_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
fmt->code = format->format.code;
} else {
state->format = info;
diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c
index 463b51d46..9a2432cea 100644
--- a/drivers/media/i2c/ak7375.c
+++ b/drivers/media/i2c/ak7375.c
@@ -10,30 +10,60 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#define AK7375_MAX_FOCUS_POS 4095
-/*
- * This sets the minimum granularity for the focus positions.
- * A value of 1 gives maximum accuracy for a desired focus position
- */
-#define AK7375_FOCUS_STEPS 1
-/*
- * This acts as the minimum granularity of lens movement.
- * Keep this value power of 2, so the control steps can be
- * uniformly adjusted for gradual lens movement, with desired
- * number of control steps.
- */
-#define AK7375_CTRL_STEPS 64
-#define AK7375_CTRL_DELAY_US 1000
-/*
- * The vcm may take up 10 ms (tDELAY) to power on and start taking
- * I2C messages. Based on AK7371 datasheet.
- */
-#define AK7375_POWER_DELAY_US 10000
+struct ak73xx_chipdef {
+ u8 reg_position;
+ u8 reg_cont;
+ u8 shift_pos;
+ u8 mode_active;
+ u8 mode_standby;
+ bool has_standby; /* Some chips may not have standby mode */
+ u16 focus_pos_max;
+ /*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position
+ */
+ u16 focus_steps;
+ /*
+ * This acts as the minimum granularity of lens movement.
+ * Keep this value power of 2, so the control steps can be
+ * uniformly adjusted for gradual lens movement, with desired
+ * number of control steps.
+ */
+ u16 ctrl_steps;
+ u16 ctrl_delay_us;
+ /*
+ * The vcm may take time (tDELAY) to power on and start taking
+ * I2C messages.
+ */
+ u16 power_delay_us;
+};
-#define AK7375_REG_POSITION 0x0
-#define AK7375_REG_CONT 0x2
-#define AK7375_MODE_ACTIVE 0x0
-#define AK7375_MODE_STANDBY 0x40
+static const struct ak73xx_chipdef ak7345_cdef = {
+ .reg_position = 0x0,
+ .reg_cont = 0x2,
+ .shift_pos = 7, /* 9 bits position values, need to << 7 */
+ .mode_active = 0x0,
+ .has_standby = false,
+ .focus_pos_max = 511,
+ .focus_steps = 1,
+ .ctrl_steps = 16,
+ .ctrl_delay_us = 1000,
+ .power_delay_us = 20000,
+};
+
+static const struct ak73xx_chipdef ak7375_cdef = {
+ .reg_position = 0x0,
+ .reg_cont = 0x2,
+ .shift_pos = 4, /* 12 bits position values, need to << 4 */
+ .mode_active = 0x0,
+ .mode_standby = 0x40,
+ .has_standby = true,
+ .focus_pos_max = 4095,
+ .focus_steps = 1,
+ .ctrl_steps = 64,
+ .ctrl_delay_us = 1000,
+ .power_delay_us = 10000,
+};
static const char * const ak7375_supply_names[] = {
"vdd",
@@ -42,6 +72,7 @@ static const char * const ak7375_supply_names[] = {
/* ak7375 device structure */
struct ak7375_device {
+ const struct ak73xx_chipdef *cdef;
struct v4l2_ctrl_handler ctrls_vcm;
struct v4l2_subdev sd;
struct v4l2_ctrl *focus;
@@ -86,10 +117,11 @@ static int ak7375_i2c_write(struct ak7375_device *ak7375,
static int ak7375_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ak7375_device *dev_vcm = to_ak7375_vcm(ctrl);
+ const struct ak73xx_chipdef *cdef = dev_vcm->cdef;
if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE)
- return ak7375_i2c_write(dev_vcm, AK7375_REG_POSITION,
- ctrl->val << 4, 2);
+ return ak7375_i2c_write(dev_vcm, cdef->reg_position,
+ ctrl->val << cdef->shift_pos, 2);
return -EINVAL;
}
@@ -128,11 +160,12 @@ static int ak7375_init_controls(struct ak7375_device *dev_vcm)
{
struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm;
const struct v4l2_ctrl_ops *ops = &ak7375_vcm_ctrl_ops;
+ const struct ak73xx_chipdef *cdef = dev_vcm->cdef;
v4l2_ctrl_handler_init(hdl, 1);
dev_vcm->focus = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
- 0, AK7375_MAX_FOCUS_POS, AK7375_FOCUS_STEPS, 0);
+ 0, cdef->focus_pos_max, cdef->focus_steps, 0);
if (hdl->error)
dev_err(dev_vcm->sd.dev, "%s fail error: 0x%x\n",
@@ -153,6 +186,8 @@ static int ak7375_probe(struct i2c_client *client)
if (!ak7375_dev)
return -ENOMEM;
+ ak7375_dev->cdef = device_get_match_data(&client->dev);
+
for (i = 0; i < ARRAY_SIZE(ak7375_supply_names); i++)
ak7375_dev->supplies[i].supply = ak7375_supply_names[i];
@@ -206,32 +241,35 @@ static void ak7375_remove(struct i2c_client *client)
/*
* This function sets the vcm position, so it consumes least current
- * The lens position is gradually moved in units of AK7375_CTRL_STEPS,
+ * The lens position is gradually moved in units of ctrl_steps,
* to make the movements smoothly.
*/
static int __maybe_unused ak7375_vcm_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+ const struct ak73xx_chipdef *cdef = ak7375_dev->cdef;
int ret, val;
if (!ak7375_dev->active)
return 0;
- for (val = ak7375_dev->focus->val & ~(AK7375_CTRL_STEPS - 1);
- val >= 0; val -= AK7375_CTRL_STEPS) {
- ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION,
- val << 4, 2);
+ for (val = ak7375_dev->focus->val & ~(cdef->ctrl_steps - 1);
+ val >= 0; val -= cdef->ctrl_steps) {
+ ret = ak7375_i2c_write(ak7375_dev, cdef->reg_position,
+ val << cdef->shift_pos, 2);
if (ret)
dev_err_once(dev, "%s I2C failure: %d\n",
__func__, ret);
- usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10);
+ usleep_range(cdef->ctrl_delay_us, cdef->ctrl_delay_us + 10);
}
- ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT,
- AK7375_MODE_STANDBY, 1);
- if (ret)
- dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
+ if (cdef->has_standby) {
+ ret = ak7375_i2c_write(ak7375_dev, cdef->reg_cont,
+ cdef->mode_standby, 1);
+ if (ret)
+ dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
+ }
ret = regulator_bulk_disable(ARRAY_SIZE(ak7375_supply_names),
ak7375_dev->supplies);
@@ -246,13 +284,14 @@ static int __maybe_unused ak7375_vcm_suspend(struct device *dev)
/*
* This function sets the vcm position to the value set by the user
* through v4l2_ctrl_ops s_ctrl handler
- * The lens position is gradually moved in units of AK7375_CTRL_STEPS,
+ * The lens position is gradually moved in units of ctrl_steps,
* to make the movements smoothly.
*/
static int __maybe_unused ak7375_vcm_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+ const struct ak73xx_chipdef *cdef = ak7375_dev->cdef;
int ret, val;
if (ak7375_dev->active)
@@ -264,24 +303,24 @@ static int __maybe_unused ak7375_vcm_resume(struct device *dev)
return ret;
/* Wait for vcm to become ready */
- usleep_range(AK7375_POWER_DELAY_US, AK7375_POWER_DELAY_US + 500);
+ usleep_range(cdef->power_delay_us, cdef->power_delay_us + 500);
- ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT,
- AK7375_MODE_ACTIVE, 1);
+ ret = ak7375_i2c_write(ak7375_dev, cdef->reg_cont,
+ cdef->mode_active, 1);
if (ret) {
dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
return ret;
}
- for (val = ak7375_dev->focus->val % AK7375_CTRL_STEPS;
+ for (val = ak7375_dev->focus->val % cdef->ctrl_steps;
val <= ak7375_dev->focus->val;
- val += AK7375_CTRL_STEPS) {
- ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION,
- val << 4, 2);
+ val += cdef->ctrl_steps) {
+ ret = ak7375_i2c_write(ak7375_dev, cdef->reg_position,
+ val << cdef->shift_pos, 2);
if (ret)
dev_err_ratelimited(dev, "%s I2C failure: %d\n",
__func__, ret);
- usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10);
+ usleep_range(cdef->ctrl_delay_us, cdef->ctrl_delay_us + 10);
}
ak7375_dev->active = true;
@@ -290,7 +329,8 @@ static int __maybe_unused ak7375_vcm_resume(struct device *dev)
}
static const struct of_device_id ak7375_of_table[] = {
- { .compatible = "asahi-kasei,ak7375" },
+ { .compatible = "asahi-kasei,ak7345", .data = &ak7345_cdef, },
+ { .compatible = "asahi-kasei,ak7375", .data = &ak7375_cdef, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ak7375_of_table);
diff --git a/drivers/media/i2c/alvium-csi2.c b/drivers/media/i2c/alvium-csi2.c
new file mode 100644
index 000000000..34ff7fad3
--- /dev/null
+++ b/drivers/media/i2c/alvium-csi2.c
@@ -0,0 +1,2558 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allied Vision Technologies GmbH Alvium camera driver
+ *
+ * Copyright (C) 2023 Tommaso Merciai
+ * Copyright (C) 2023 Martin Hecht
+ * Copyright (C) 2023 Avnet EMG GmbH
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#include "alvium-csi2.h"
+
+static const struct v4l2_mbus_framefmt alvium_csi2_default_fmt = {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .width = 640,
+ .height = 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(V4L2_COLORSPACE_SRGB),
+ .quantization = V4L2_QUANTIZATION_FULL_RANGE,
+ .xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(V4L2_COLORSPACE_SRGB),
+ .field = V4L2_FIELD_NONE,
+};
+
+static const struct alvium_pixfmt alvium_csi2_fmts[] = {
+ {
+ /* UYVY8_2X8 */
+ .id = ALVIUM_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_YUV422_8,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_YUV422_8B,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* UYVY8_1X16 */
+ .id = ALVIUM_FMT_UYVY8_1X16,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_YUV422_8,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_YUV422_8B,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* YUYV8_1X16 */
+ .id = ALVIUM_FMT_YUYV8_1X16,
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_YUV422_8,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_YUV422_8B,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* YUYV8_2X8 */
+ .id = ALVIUM_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_YUV422_8,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_YUV422_8B,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* YUYV10_1X20 */
+ .id = ALVIUM_FMT_YUYV10_1X20,
+ .code = MEDIA_BUS_FMT_YUYV10_1X20,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_YUV422_10,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_YUV422_10B,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* RGB888_1X24 */
+ .id = ALVIUM_FMT_RGB888_1X24,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_RGB888,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RGB888,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* RBG888_1X24 */
+ .id = ALVIUM_FMT_RBG888_1X24,
+ .code = MEDIA_BUS_FMT_RBG888_1X24,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_RGB888,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RGB888,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* BGR888_1X24 */
+ .id = ALVIUM_FMT_BGR888_1X24,
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_RGB888,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RGB888,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* RGB888_3X8 */
+ .id = ALVIUM_FMT_RGB888_3X8,
+ .code = MEDIA_BUS_FMT_RGB888_3X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt_av_bit = ALVIUM_BIT_RGB888,
+ .bay_av_bit = ALVIUM_BIT_BAY_NONE,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RGB888,
+ .bay_fmt_regval = -1,
+ .is_raw = 0,
+ }, {
+ /* Y8_1X8 */
+ .id = ALVIUM_FMT_Y8_1X8,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW8,
+ .bay_av_bit = ALVIUM_BIT_BAY_MONO,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW8,
+ .bay_fmt_regval = 0x00,
+ .is_raw = 1,
+ }, {
+ /* SGRBG8_1X8 */
+ .id = ALVIUM_FMT_SGRBG8_1X8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW8,
+ .bay_av_bit = ALVIUM_BIT_BAY_GR,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW8,
+ .bay_fmt_regval = 0x01,
+ .is_raw = 1,
+ }, {
+ /* SRGGB8_1X8 */
+ .id = ALVIUM_FMT_SRGGB8_1X8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW8,
+ .bay_av_bit = ALVIUM_BIT_BAY_RG,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW8,
+ .bay_fmt_regval = 0x02,
+ .is_raw = 1,
+ }, {
+ /* SGBRG8_1X8 */
+ .id = ALVIUM_FMT_SGBRG8_1X8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW8,
+ .bay_av_bit = ALVIUM_BIT_BAY_GB,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW8,
+ .bay_fmt_regval = 0x03,
+ .is_raw = 1,
+ }, {
+ /* SBGGR8_1X8 */
+ .id = ALVIUM_FMT_SBGGR8_1X8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW8,
+ .bay_av_bit = ALVIUM_BIT_BAY_BG,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW8,
+ .bay_fmt_regval = 0x04,
+ .is_raw = 1,
+ }, {
+ /* Y10_1X10 */
+ .id = ALVIUM_FMT_Y10_1X10,
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW10,
+ .bay_av_bit = ALVIUM_BIT_BAY_MONO,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW10,
+ .bay_fmt_regval = 0x00,
+ .is_raw = 1,
+ }, {
+ /* SGRBG10_1X10 */
+ .id = ALVIUM_FMT_SGRBG10_1X10,
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW10,
+ .bay_av_bit = ALVIUM_BIT_BAY_GR,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW10,
+ .bay_fmt_regval = 0x01,
+ .is_raw = 1,
+ }, {
+ /* SRGGB10_1X10 */
+ .id = ALVIUM_FMT_SRGGB10_1X10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW10,
+ .bay_av_bit = ALVIUM_BIT_BAY_RG,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW10,
+ .bay_fmt_regval = 0x02,
+ .is_raw = 1,
+ }, {
+ /* SGBRG10_1X10 */
+ .id = ALVIUM_FMT_SGBRG10_1X10,
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW10,
+ .bay_av_bit = ALVIUM_BIT_BAY_GB,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW10,
+ .bay_fmt_regval = 0x03,
+ .is_raw = 1,
+ }, {
+ /* SBGGR10_1X10 */
+ .id = ALVIUM_FMT_SBGGR10_1X10,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW10,
+ .bay_av_bit = ALVIUM_BIT_BAY_BG,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW10,
+ .bay_fmt_regval = 0x04,
+ .is_raw = 1,
+ }, {
+ /* Y12_1X12 */
+ .id = ALVIUM_FMT_Y12_1X12,
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW12,
+ .bay_av_bit = ALVIUM_BIT_BAY_MONO,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW12,
+ .bay_fmt_regval = 0x00,
+ .is_raw = 1,
+ }, {
+ /* SGRBG12_1X12 */
+ .id = ALVIUM_FMT_SGRBG12_1X12,
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW12,
+ .bay_av_bit = ALVIUM_BIT_BAY_GR,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW12,
+ .bay_fmt_regval = 0x01,
+ .is_raw = 1,
+ }, {
+ /* SRGGB12_1X12 */
+ .id = ALVIUM_FMT_SRGGB12_1X12,
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW12,
+ .bay_av_bit = ALVIUM_BIT_BAY_RG,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW12,
+ .bay_fmt_regval = 0x02,
+ .is_raw = 1,
+ }, {
+ /* SGBRG12_1X12 */
+ .id = ALVIUM_FMT_SGBRG12_1X12,
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW12,
+ .bay_av_bit = ALVIUM_BIT_BAY_GB,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW12,
+ .bay_fmt_regval = 0x03,
+ .is_raw = 1,
+ }, {
+ /* SBGGR12_1X12 */
+ .id = ALVIUM_FMT_SBGGR12_1X12,
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW12,
+ .bay_av_bit = ALVIUM_BIT_BAY_BG,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW12,
+ .bay_fmt_regval = 0x04,
+ .is_raw = 1,
+ }, {
+ /* SBGGR14_1X14 */
+ .id = ALVIUM_FMT_SBGGR14_1X14,
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW14,
+ .bay_av_bit = ALVIUM_BIT_BAY_GR,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW14,
+ .bay_fmt_regval = 0x01,
+ .is_raw = 1,
+ }, {
+ /* SGBRG14_1X14 */
+ .id = ALVIUM_FMT_SGBRG14_1X14,
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW14,
+ .bay_av_bit = ALVIUM_BIT_BAY_RG,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW14,
+ .bay_fmt_regval = 0x02,
+ .is_raw = 1,
+ }, {
+ /* SRGGB14_1X14 */
+ .id = ALVIUM_FMT_SRGGB14_1X14,
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW14,
+ .bay_av_bit = ALVIUM_BIT_BAY_GB,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW14,
+ .bay_fmt_regval = 0x03,
+ .is_raw = 1,
+ }, {
+ /* SGRBG14_1X14 */
+ .id = ALVIUM_FMT_SGRBG14_1X14,
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .fmt_av_bit = ALVIUM_BIT_RAW14,
+ .bay_av_bit = ALVIUM_BIT_BAY_BG,
+ .mipi_fmt_regval = MIPI_CSI2_DT_RAW14,
+ .bay_fmt_regval = 0x04,
+ .is_raw = 1,
+ },
+ { /* sentinel */ }
+};
+
+static int alvium_read(struct alvium_dev *alvium, u32 reg, u64 *val, int *err)
+{
+ if (reg & REG_BCRM_V4L2) {
+ reg &= ~REG_BCRM_V4L2;
+ reg += alvium->bcrm_addr;
+ }
+
+ return cci_read(alvium->regmap, reg, val, err);
+}
+
+static int alvium_write(struct alvium_dev *alvium, u32 reg, u64 val, int *err)
+{
+ if (reg & REG_BCRM_V4L2) {
+ reg &= ~REG_BCRM_V4L2;
+ reg += alvium->bcrm_addr;
+ }
+
+ return cci_write(alvium->regmap, reg, val, err);
+}
+
+static int alvium_write_hshake(struct alvium_dev *alvium, u32 reg, u64 val)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ u64 hshake_bit;
+ int ret = 0;
+
+ /* reset handshake bit and write alvium reg */
+ alvium_write(alvium, REG_BCRM_WRITE_HANDSHAKE_RW, 0, &ret);
+ alvium_write(alvium, reg, val, &ret);
+ if (ret) {
+ dev_err(dev, "Fail to write reg\n");
+ return ret;
+ }
+
+ /* poll handshake bit since bit0 = 1 */
+ read_poll_timeout(alvium_read, hshake_bit,
+ ((hshake_bit & BCRM_HANDSHAKE_W_DONE_EN_BIT) == 1),
+ 15000, 45000, true,
+ alvium, REG_BCRM_WRITE_HANDSHAKE_RW,
+ &hshake_bit, &ret);
+ if (ret) {
+ dev_err(dev, "poll bit[0] = 1, hshake reg fail\n");
+ return ret;
+ }
+
+ /* reset handshake bit, write 0 to bit0 */
+ alvium_write(alvium, REG_BCRM_WRITE_HANDSHAKE_RW, 0, &ret);
+ if (ret) {
+ dev_err(dev, "Fail to reset hshake reg\n");
+ return ret;
+ }
+
+ /* poll handshake bit since bit0 = 0 */
+ read_poll_timeout(alvium_read, hshake_bit,
+ ((hshake_bit & BCRM_HANDSHAKE_W_DONE_EN_BIT) == 0),
+ 15000, 45000, true,
+ alvium, REG_BCRM_WRITE_HANDSHAKE_RW,
+ &hshake_bit, &ret);
+ if (ret) {
+ dev_err(dev, "poll bit[0] = 0, hshake reg fail\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_bcrm_vers(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ u64 min, maj;
+ int ret = 0;
+
+ ret = alvium_read(alvium, REG_BCRM_MINOR_VERSION_R, &min, &ret);
+ ret = alvium_read(alvium, REG_BCRM_MAJOR_VERSION_R, &maj, &ret);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "bcrm version: %llu.%llu\n", min, maj);
+
+ return 0;
+}
+
+static int alvium_get_fw_version(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ u64 spec, maj, min, pat;
+ int ret = 0;
+
+ ret = alvium_read(alvium, REG_BCRM_DEVICE_FW_SPEC_VERSION_R,
+ &spec, &ret);
+ ret = alvium_read(alvium, REG_BCRM_DEVICE_FW_MAJOR_VERSION_R,
+ &maj, &ret);
+ ret = alvium_read(alvium, REG_BCRM_DEVICE_FW_MINOR_VERSION_R,
+ &min, &ret);
+ ret = alvium_read(alvium, REG_BCRM_DEVICE_FW_PATCH_VERSION_R,
+ &pat, &ret);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "fw version: %llu.%llu.%llu.%llu\n", spec, maj, min, pat);
+
+ return 0;
+}
+
+static int alvium_get_bcrm_addr(struct alvium_dev *alvium)
+{
+ u64 val;
+ int ret;
+
+ ret = alvium_read(alvium, REG_BCRM_REG_ADDR_R, &val, NULL);
+ if (ret)
+ return ret;
+
+ alvium->bcrm_addr = val;
+
+ return 0;
+}
+
+static int alvium_is_alive(struct alvium_dev *alvium)
+{
+ u64 bcrm, hbeat;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_MINOR_VERSION_R, &bcrm, &ret);
+ alvium_read(alvium, REG_BCRM_HEARTBEAT_RW, &hbeat, &ret);
+ if (ret)
+ return ret;
+
+ return hbeat;
+}
+
+static void alvium_print_avail_mipi_fmt(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+
+ dev_dbg(dev, "avail mipi_fmt yuv420_8_leg: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_8_LEG]);
+ dev_dbg(dev, "avail mipi_fmt yuv420_8: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_8]);
+ dev_dbg(dev, "avail mipi_fmt yuv420_10: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_10]);
+ dev_dbg(dev, "avail mipi_fmt yuv420_8_csps: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_8_CSPS]);
+ dev_dbg(dev, "avail mipi_fmt yuv420_10_csps: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_10_CSPS]);
+ dev_dbg(dev, "avail mipi_fmt yuv422_8: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV422_8]);
+ dev_dbg(dev, "avail mipi_fmt yuv422_10: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV422_10]);
+ dev_dbg(dev, "avail mipi_fmt rgb888: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB888]);
+ dev_dbg(dev, "avail mipi_fmt rgb666: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB666]);
+ dev_dbg(dev, "avail mipi_fmt rgb565: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB565]);
+ dev_dbg(dev, "avail mipi_fmt rgb555: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB555]);
+ dev_dbg(dev, "avail mipi_fmt rgb444: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB444]);
+ dev_dbg(dev, "avail mipi_fmt raw6: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW6]);
+ dev_dbg(dev, "avail mipi_fmt raw7: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW7]);
+ dev_dbg(dev, "avail mipi_fmt raw8: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW8]);
+ dev_dbg(dev, "avail mipi_fmt raw10: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW10]);
+ dev_dbg(dev, "avail mipi_fmt raw12: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW12]);
+ dev_dbg(dev, "avail mipi_fmt raw14: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW14]);
+ dev_dbg(dev, "avail mipi_fmt jpeg: %u\n",
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_JPEG]);
+}
+
+static void alvium_print_avail_feat(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+
+ dev_dbg(dev, "feature rev_x: %u\n", alvium->avail_ft.rev_x);
+ dev_dbg(dev, "feature rev_y: %u\n", alvium->avail_ft.rev_y);
+ dev_dbg(dev, "feature int_autop: %u\n", alvium->avail_ft.int_autop);
+ dev_dbg(dev, "feature black_lvl: %u\n", alvium->avail_ft.black_lvl);
+ dev_dbg(dev, "feature gain: %u\n", alvium->avail_ft.gain);
+ dev_dbg(dev, "feature gamma: %u\n", alvium->avail_ft.gamma);
+ dev_dbg(dev, "feature contrast: %u\n", alvium->avail_ft.contrast);
+ dev_dbg(dev, "feature sat: %u\n", alvium->avail_ft.sat);
+ dev_dbg(dev, "feature hue: %u\n", alvium->avail_ft.hue);
+ dev_dbg(dev, "feature whiteb: %u\n", alvium->avail_ft.whiteb);
+ dev_dbg(dev, "feature sharp: %u\n", alvium->avail_ft.sharp);
+ dev_dbg(dev, "feature auto_exp: %u\n", alvium->avail_ft.auto_exp);
+ dev_dbg(dev, "feature auto_gain: %u\n", alvium->avail_ft.auto_gain);
+ dev_dbg(dev, "feature auto_whiteb: %u\n", alvium->avail_ft.auto_whiteb);
+ dev_dbg(dev, "feature dev_temp: %u\n", alvium->avail_ft.dev_temp);
+ dev_dbg(dev, "feature acq_abort: %u\n", alvium->avail_ft.acq_abort);
+ dev_dbg(dev, "feature acq_fr: %u\n", alvium->avail_ft.acq_fr);
+ dev_dbg(dev, "feature fr_trigger: %u\n", alvium->avail_ft.fr_trigger);
+ dev_dbg(dev, "feature exp_acq_line: %u\n",
+ alvium->avail_ft.exp_acq_line);
+}
+
+static void alvium_print_avail_bayer(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+
+ dev_dbg(dev, "avail bayer mono: %u\n",
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_MONO]);
+ dev_dbg(dev, "avail bayer gr: %u\n",
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_GR]);
+ dev_dbg(dev, "avail bayer rg: %u\n",
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_RG]);
+ dev_dbg(dev, "avail bayer gb: %u\n",
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_GB]);
+ dev_dbg(dev, "avail bayer bg: %u\n",
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_BG]);
+}
+
+static int alvium_get_feat_inq(struct alvium_dev *alvium)
+{
+ struct alvium_avail_feat *f;
+ u64 val;
+ int ret;
+
+ ret = alvium_read(alvium, REG_BCRM_FEATURE_INQUIRY_R, &val, NULL);
+ if (ret)
+ return ret;
+
+ f = (struct alvium_avail_feat *)&val;
+ alvium->avail_ft = *f;
+ alvium_print_avail_feat(alvium);
+
+ return 0;
+}
+
+static int alvium_get_host_supp_csi_lanes(struct alvium_dev *alvium)
+{
+ u64 val;
+ int ret;
+
+ ret = alvium_read(alvium, REG_BCRM_CSI2_LANE_COUNT_RW, &val, NULL);
+ if (ret)
+ return ret;
+
+ alvium->h_sup_csi_lanes = val;
+
+ return 0;
+}
+
+static int alvium_set_csi_lanes(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ u64 num_lanes;
+ int ret;
+
+ num_lanes = alvium->ep.bus.mipi_csi2.num_data_lanes;
+
+ if (num_lanes > alvium->h_sup_csi_lanes)
+ return -EINVAL;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_CSI2_LANE_COUNT_RW,
+ num_lanes);
+ if (ret) {
+ dev_err(dev, "Fail to set csi lanes reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_lp2hs_delay(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret = 0;
+
+ /*
+ * The purpose of this reg is force a DPhy reset
+ * for the period described by the millisecond on
+ * the reg, before it starts streaming.
+ *
+ * To be clear, with that value bigger than 0 the
+ * Alvium forces a dphy-reset on all lanes for that period.
+ * That means all lanes go up into low power state.
+ *
+ */
+ alvium_write(alvium, REG_BCRM_LP2HS_DELAY_RW,
+ ALVIUM_LP2HS_DELAY_MS, &ret);
+ if (ret) {
+ dev_err(dev, "Fail to set lp2hs delay reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_csi_clk_params(struct alvium_dev *alvium)
+{
+ u64 min_csi_clk, max_csi_clk;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_CSI2_CLOCK_MIN_R, &min_csi_clk, &ret);
+ alvium_read(alvium, REG_BCRM_CSI2_CLOCK_MAX_R, &max_csi_clk, &ret);
+ if (ret)
+ return ret;
+
+ alvium->min_csi_clk = min_csi_clk;
+ alvium->max_csi_clk = max_csi_clk;
+
+ return 0;
+}
+
+static int alvium_set_csi_clk(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ u64 csi_clk;
+ int ret;
+
+ csi_clk = clamp(alvium->ep.link_frequencies[0],
+ (u64)alvium->min_csi_clk, (u64)alvium->max_csi_clk);
+
+ if (alvium->ep.link_frequencies[0] != (u64)csi_clk) {
+ dev_warn(dev,
+ "requested csi clock (%llu MHz) out of range [%u, %u] Adjusted to %llu\n",
+ alvium->ep.link_frequencies[0],
+ alvium->min_csi_clk, alvium->max_csi_clk, csi_clk);
+ }
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_CSI2_CLOCK_RW, csi_clk);
+ if (ret) {
+ dev_err(dev, "Fail to set csi clock reg\n");
+ return ret;
+ }
+
+ alvium->link_freq = csi_clk;
+
+ return 0;
+}
+
+static int alvium_get_img_width_params(struct alvium_dev *alvium)
+{
+ u64 imgw, imgw_min, imgw_max, imgw_inc;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_IMG_WIDTH_RW, &imgw, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_WIDTH_MIN_R, &imgw_min, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_WIDTH_MAX_R, &imgw_max, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_WIDTH_INC_R, &imgw_inc, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_img_width = imgw;
+ alvium->img_min_width = imgw_min;
+ alvium->img_max_width = imgw_max;
+ alvium->img_inc_width = imgw_inc;
+
+ return 0;
+}
+
+static int alvium_get_img_height_params(struct alvium_dev *alvium)
+{
+ u64 imgh, imgh_min, imgh_max, imgh_inc;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_IMG_HEIGHT_RW, &imgh, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_HEIGHT_MIN_R, &imgh_min, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_HEIGHT_MAX_R, &imgh_max, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_HEIGHT_INC_R, &imgh_inc, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_img_height = imgh;
+ alvium->img_min_height = imgh_min;
+ alvium->img_max_height = imgh_max;
+ alvium->img_inc_height = imgh_inc;
+
+ return 0;
+}
+
+static int alvium_set_img_width(struct alvium_dev *alvium, u32 width)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_IMG_WIDTH_RW, width);
+ if (ret) {
+ dev_err(dev, "Fail to set img width\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_img_height(struct alvium_dev *alvium, u32 height)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_IMG_HEIGHT_RW, height);
+ if (ret) {
+ dev_err(dev, "Fail to set img height\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_img_offx(struct alvium_dev *alvium, u32 offx)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_IMG_OFFSET_X_RW, offx);
+ if (ret) {
+ dev_err(dev, "Fail to set img offx\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_img_offy(struct alvium_dev *alvium, u32 offy)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_IMG_OFFSET_Y_RW, offy);
+ if (ret) {
+ dev_err(dev, "Fail to set img offy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_offx_params(struct alvium_dev *alvium)
+{
+ u64 min_offx, max_offx, inc_offx;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_IMG_OFFSET_X_MIN_R, &min_offx, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_OFFSET_X_MAX_R, &max_offx, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_OFFSET_X_INC_R, &inc_offx, &ret);
+ if (ret)
+ return ret;
+
+ alvium->min_offx = min_offx;
+ alvium->max_offx = max_offx;
+ alvium->inc_offx = inc_offx;
+
+ return 0;
+}
+
+static int alvium_get_offy_params(struct alvium_dev *alvium)
+{
+ u64 min_offy, max_offy, inc_offy;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_IMG_OFFSET_Y_MIN_R, &min_offy, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_OFFSET_Y_MAX_R, &max_offy, &ret);
+ alvium_read(alvium, REG_BCRM_IMG_OFFSET_Y_INC_R, &inc_offy, &ret);
+ if (ret)
+ return ret;
+
+ alvium->min_offy = min_offy;
+ alvium->max_offy = max_offy;
+ alvium->inc_offy = inc_offy;
+
+ return 0;
+}
+
+static int alvium_get_gain_params(struct alvium_dev *alvium)
+{
+ u64 dft_gain, min_gain, max_gain, inc_gain;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_GAIN_RW, &dft_gain, &ret);
+ alvium_read(alvium, REG_BCRM_GAIN_MIN_R, &min_gain, &ret);
+ alvium_read(alvium, REG_BCRM_GAIN_MAX_R, &max_gain, &ret);
+ alvium_read(alvium, REG_BCRM_GAIN_INC_R, &inc_gain, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_gain = dft_gain;
+ alvium->min_gain = min_gain;
+ alvium->max_gain = max_gain;
+ alvium->inc_gain = inc_gain;
+
+ return 0;
+}
+
+static int alvium_get_exposure_params(struct alvium_dev *alvium)
+{
+ u64 dft_exp, min_exp, max_exp, inc_exp;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_EXPOSURE_TIME_RW, &dft_exp, &ret);
+ alvium_read(alvium, REG_BCRM_EXPOSURE_TIME_MIN_R, &min_exp, &ret);
+ alvium_read(alvium, REG_BCRM_EXPOSURE_TIME_MAX_R, &max_exp, &ret);
+ alvium_read(alvium, REG_BCRM_EXPOSURE_TIME_INC_R, &inc_exp, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_exp = dft_exp;
+ alvium->min_exp = min_exp;
+ alvium->max_exp = max_exp;
+ alvium->inc_exp = inc_exp;
+
+ return 0;
+}
+
+static int alvium_get_red_balance_ratio_params(struct alvium_dev *alvium)
+{
+ u64 dft_rb, min_rb, max_rb, inc_rb;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_RED_BALANCE_RATIO_RW, &dft_rb, &ret);
+ alvium_read(alvium, REG_BCRM_RED_BALANCE_RATIO_MIN_R, &min_rb, &ret);
+ alvium_read(alvium, REG_BCRM_RED_BALANCE_RATIO_MAX_R, &max_rb, &ret);
+ alvium_read(alvium, REG_BCRM_RED_BALANCE_RATIO_INC_R, &inc_rb, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_rbalance = dft_rb;
+ alvium->min_rbalance = min_rb;
+ alvium->max_rbalance = max_rb;
+ alvium->inc_rbalance = inc_rb;
+
+ return 0;
+}
+
+static int alvium_get_blue_balance_ratio_params(struct alvium_dev *alvium)
+{
+ u64 dft_bb, min_bb, max_bb, inc_bb;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_BLUE_BALANCE_RATIO_RW, &dft_bb, &ret);
+ alvium_read(alvium, REG_BCRM_BLUE_BALANCE_RATIO_MIN_R, &min_bb, &ret);
+ alvium_read(alvium, REG_BCRM_BLUE_BALANCE_RATIO_MAX_R, &max_bb, &ret);
+ alvium_read(alvium, REG_BCRM_BLUE_BALANCE_RATIO_INC_R, &inc_bb, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_bbalance = dft_bb;
+ alvium->min_bbalance = min_bb;
+ alvium->max_bbalance = max_bb;
+ alvium->inc_bbalance = inc_bb;
+
+ return 0;
+}
+
+static int alvium_get_hue_params(struct alvium_dev *alvium)
+{
+ u64 dft_hue, min_hue, max_hue, inc_hue;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_HUE_RW, &dft_hue, &ret);
+ alvium_read(alvium, REG_BCRM_HUE_MIN_R, &min_hue, &ret);
+ alvium_read(alvium, REG_BCRM_HUE_MAX_R, &max_hue, &ret);
+ alvium_read(alvium, REG_BCRM_HUE_INC_R, &inc_hue, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_hue = (s32)dft_hue;
+ alvium->min_hue = (s32)min_hue;
+ alvium->max_hue = (s32)max_hue;
+ alvium->inc_hue = (s32)inc_hue;
+
+ return 0;
+}
+
+static int alvium_get_black_lvl_params(struct alvium_dev *alvium)
+{
+ u64 dft_blvl, min_blvl, max_blvl, inc_blvl;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_BLACK_LEVEL_RW, &dft_blvl, &ret);
+ alvium_read(alvium, REG_BCRM_BLACK_LEVEL_MIN_R, &min_blvl, &ret);
+ alvium_read(alvium, REG_BCRM_BLACK_LEVEL_MAX_R, &max_blvl, &ret);
+ alvium_read(alvium, REG_BCRM_BLACK_LEVEL_INC_R, &inc_blvl, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_black_lvl = (s32)dft_blvl;
+ alvium->min_black_lvl = (s32)min_blvl;
+ alvium->max_black_lvl = (s32)max_blvl;
+ alvium->inc_black_lvl = (s32)inc_blvl;
+
+ return 0;
+}
+
+static int alvium_get_gamma_params(struct alvium_dev *alvium)
+{
+ u64 dft_g, min_g, max_g, inc_g;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_GAMMA_RW, &dft_g, &ret);
+ alvium_read(alvium, REG_BCRM_GAMMA_MIN_R, &min_g, &ret);
+ alvium_read(alvium, REG_BCRM_GAMMA_MAX_R, &max_g, &ret);
+ alvium_read(alvium, REG_BCRM_GAMMA_INC_R, &inc_g, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_gamma = dft_g;
+ alvium->min_gamma = min_g;
+ alvium->max_gamma = max_g;
+ alvium->inc_gamma = inc_g;
+
+ return 0;
+}
+
+static int alvium_get_sharpness_params(struct alvium_dev *alvium)
+{
+ u64 dft_sh, min_sh, max_sh, inc_sh;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_SHARPNESS_RW, &dft_sh, &ret);
+ alvium_read(alvium, REG_BCRM_SHARPNESS_MIN_R, &min_sh, &ret);
+ alvium_read(alvium, REG_BCRM_BLACK_LEVEL_MAX_R, &max_sh, &ret);
+ alvium_read(alvium, REG_BCRM_SHARPNESS_INC_R, &inc_sh, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_sharp = (s32)dft_sh;
+ alvium->min_sharp = (s32)min_sh;
+ alvium->max_sharp = (s32)max_sh;
+ alvium->inc_sharp = (s32)inc_sh;
+
+ return 0;
+}
+
+static int alvium_get_contrast_params(struct alvium_dev *alvium)
+{
+ u64 dft_c, min_c, max_c, inc_c;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_CONTRAST_VALUE_RW, &dft_c, &ret);
+ alvium_read(alvium, REG_BCRM_CONTRAST_VALUE_MIN_R, &min_c, &ret);
+ alvium_read(alvium, REG_BCRM_CONTRAST_VALUE_MAX_R, &max_c, &ret);
+ alvium_read(alvium, REG_BCRM_CONTRAST_VALUE_INC_R, &inc_c, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_contrast = dft_c;
+ alvium->min_contrast = min_c;
+ alvium->max_contrast = max_c;
+ alvium->inc_contrast = inc_c;
+
+ return 0;
+}
+
+static int alvium_get_saturation_params(struct alvium_dev *alvium)
+{
+ u64 dft_sat, min_sat, max_sat, inc_sat;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_SATURATION_RW, &dft_sat, &ret);
+ alvium_read(alvium, REG_BCRM_SATURATION_MIN_R, &min_sat, &ret);
+ alvium_read(alvium, REG_BCRM_SATURATION_MAX_R, &max_sat, &ret);
+ alvium_read(alvium, REG_BCRM_SATURATION_INC_R, &inc_sat, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_sat = dft_sat;
+ alvium->min_sat = min_sat;
+ alvium->max_sat = max_sat;
+ alvium->inc_sat = inc_sat;
+
+ return 0;
+}
+
+static int alvium_set_bcm_mode(struct alvium_dev *alvium)
+{
+ int ret = 0;
+
+ alvium_write(alvium, REG_GENCP_CHANGEMODE_W, ALVIUM_BCM_MODE, &ret);
+ alvium->bcrm_mode = ALVIUM_BCM_MODE;
+
+ return ret;
+}
+
+static int alvium_get_mode(struct alvium_dev *alvium)
+{
+ u64 bcrm_mode;
+ int ret;
+
+ ret = alvium_read(alvium, REG_GENCP_CURRENTMODE_R, &bcrm_mode, NULL);
+ if (ret)
+ return ret;
+
+ switch (bcrm_mode) {
+ case ALVIUM_BCM_MODE:
+ alvium->bcrm_mode = ALVIUM_BCM_MODE;
+ break;
+ case ALVIUM_GENCP_MODE:
+ alvium->bcrm_mode = ALVIUM_GENCP_MODE;
+ break;
+ }
+
+ return 0;
+}
+
+static int alvium_get_avail_mipi_data_format(struct alvium_dev *alvium)
+{
+ struct alvium_avail_mipi_fmt *avail_fmt;
+ u64 val;
+ int ret;
+
+ ret = alvium_read(alvium, REG_BCRM_IMG_AVAILABLE_MIPI_DATA_FORMATS_R,
+ &val, NULL);
+ if (ret)
+ return ret;
+
+ avail_fmt = (struct alvium_avail_mipi_fmt *)&val;
+
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_8_LEG] =
+ avail_fmt->yuv420_8_leg;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_8] =
+ avail_fmt->yuv420_8;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_10] =
+ avail_fmt->yuv420_10;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_8_CSPS] =
+ avail_fmt->yuv420_8_csps;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV420_10_CSPS] =
+ avail_fmt->yuv420_10_csps;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV422_8] =
+ avail_fmt->yuv422_8;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_YUV422_10] =
+ avail_fmt->yuv422_10;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB888] =
+ avail_fmt->rgb888;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB666] =
+ avail_fmt->rgb666;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB565] =
+ avail_fmt->rgb565;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB555] =
+ avail_fmt->rgb555;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RGB444] =
+ avail_fmt->rgb444;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW6] =
+ avail_fmt->raw6;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW7] =
+ avail_fmt->raw7;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW8] =
+ avail_fmt->raw8;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW10] =
+ avail_fmt->raw10;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW12] =
+ avail_fmt->raw12;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_RAW14] =
+ avail_fmt->raw14;
+ alvium->is_mipi_fmt_avail[ALVIUM_BIT_JPEG] =
+ avail_fmt->jpeg;
+
+ alvium_print_avail_mipi_fmt(alvium);
+
+ return 0;
+}
+
+static int alvium_setup_mipi_fmt(struct alvium_dev *alvium)
+{
+ unsigned int avail_fmt_cnt = 0;
+ unsigned int fmt = 0;
+ size_t sz = 0;
+
+ /* calculate fmt array size */
+ for (fmt = 0; fmt < ALVIUM_NUM_SUPP_MIPI_DATA_FMT; fmt++) {
+ if (!alvium->is_mipi_fmt_avail[alvium_csi2_fmts[fmt].fmt_av_bit])
+ continue;
+
+ if (!alvium_csi2_fmts[fmt].is_raw ||
+ alvium->is_bay_avail[alvium_csi2_fmts[fmt].bay_av_bit])
+ sz++;
+ }
+
+ /* init alvium_csi2_fmt array */
+ alvium->alvium_csi2_fmt_n = sz;
+ alvium->alvium_csi2_fmt =
+ kmalloc_array(sz, sizeof(struct alvium_pixfmt), GFP_KERNEL);
+ if (!alvium->alvium_csi2_fmt)
+ return -ENOMEM;
+
+ /* Create the alvium_csi2 fmt array from formats available */
+ for (fmt = 0; fmt < ALVIUM_NUM_SUPP_MIPI_DATA_FMT; fmt++) {
+ if (!alvium->is_mipi_fmt_avail[alvium_csi2_fmts[fmt].fmt_av_bit])
+ continue;
+
+ if (!alvium_csi2_fmts[fmt].is_raw ||
+ alvium->is_bay_avail[alvium_csi2_fmts[fmt].bay_av_bit]) {
+ alvium->alvium_csi2_fmt[avail_fmt_cnt] =
+ alvium_csi2_fmts[fmt];
+ avail_fmt_cnt++;
+ }
+ }
+
+ return 0;
+}
+
+static int alvium_set_mipi_fmt(struct alvium_dev *alvium,
+ const struct alvium_pixfmt *pixfmt)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_IMG_MIPI_DATA_FORMAT_RW,
+ pixfmt->mipi_fmt_regval);
+ if (ret) {
+ dev_err(dev, "Fail to set mipi fmt\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_avail_bayer(struct alvium_dev *alvium)
+{
+ struct alvium_avail_bayer *avail_bay;
+ u64 val;
+ int ret;
+
+ ret = alvium_read(alvium, REG_BCRM_IMG_BAYER_PATTERN_INQUIRY_R,
+ &val, NULL);
+ if (ret)
+ return ret;
+
+ avail_bay = (struct alvium_avail_bayer *)&val;
+
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_MONO] = avail_bay->mono;
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_GR] = avail_bay->gr;
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_RG] = avail_bay->rg;
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_GB] = avail_bay->gb;
+ alvium->is_bay_avail[ALVIUM_BIT_BAY_BG] = avail_bay->bg;
+
+ alvium_print_avail_bayer(alvium);
+
+ return 0;
+}
+
+static int alvium_set_bayer_pattern(struct alvium_dev *alvium,
+ const struct alvium_pixfmt *pixfmt)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_IMG_BAYER_PATTERN_RW,
+ pixfmt->bay_fmt_regval);
+ if (ret) {
+ dev_err(dev, "Fail to set bayer pattern\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_frame_interval(struct alvium_dev *alvium)
+{
+ u64 dft_fr, min_fr, max_fr;
+ int ret = 0;
+
+ alvium_read(alvium, REG_BCRM_ACQUISITION_FRAME_RATE_RW,
+ &dft_fr, &ret);
+ alvium_read(alvium, REG_BCRM_ACQUISITION_FRAME_RATE_MIN_R,
+ &min_fr, &ret);
+ alvium_read(alvium, REG_BCRM_ACQUISITION_FRAME_RATE_MAX_R,
+ &max_fr, &ret);
+ if (ret)
+ return ret;
+
+ alvium->dft_fr = dft_fr;
+ alvium->min_fr = min_fr;
+ alvium->max_fr = max_fr;
+
+ return 0;
+}
+
+static int alvium_set_frame_rate(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_ACQUISITION_FRAME_RATE_RW,
+ alvium->fr);
+ if (ret) {
+ dev_err(dev, "Fail to set frame rate lanes reg\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "set frame rate: %llu us\n", alvium->fr);
+
+ return 0;
+}
+
+static int alvium_set_stream_mipi(struct alvium_dev *alvium, bool on)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, on ? REG_BCRM_ACQUISITION_START_RW :
+ REG_BCRM_ACQUISITION_STOP_RW, 0x01);
+ if (ret) {
+ dev_err(dev, "Fail set_stream_mipi\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_gain(struct alvium_dev *alvium)
+{
+ u64 gain;
+ int ret;
+
+ /* The unit is millibel (1 mB = 0.01 dB) */
+ ret = alvium_read(alvium, REG_BCRM_GAIN_RW, &gain, NULL);
+ if (ret)
+ return ret;
+
+ return gain;
+}
+
+static int alvium_set_ctrl_gain(struct alvium_dev *alvium, int gain)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ /* The unit is millibel (1 mB = 0.01 dB) */
+ ret = alvium_write_hshake(alvium, REG_BCRM_GAIN_RW, (u64)gain);
+ if (ret) {
+ dev_err(dev, "Fail to set gain value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_auto_gain(struct alvium_dev *alvium, bool on)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_GAIN_AUTO_RW,
+ on ? 0x02 : 0x00);
+ if (ret) {
+ dev_err(dev, "Fail to set autogain reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_exposure(struct alvium_dev *alvium)
+{
+ u64 exp;
+ int ret;
+
+ /* Exposure time in ns */
+ ret = alvium_read(alvium, REG_BCRM_EXPOSURE_TIME_RW, &exp, NULL);
+ if (ret)
+ return ret;
+
+ return exp;
+}
+
+static int alvium_set_ctrl_auto_exposure(struct alvium_dev *alvium, bool on)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_WHITE_BALANCE_AUTO_RW,
+ on ? 0x02 : 0x00);
+ if (ret) {
+ dev_err(dev, "Fail to set autoexposure reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_exposure(struct alvium_dev *alvium, int exposure_ns)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_EXPOSURE_TIME_RW,
+ (u64)exposure_ns);
+ if (ret) {
+ dev_err(dev, "Fail to set exposure value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_blue_balance_ratio(struct alvium_dev *alvium,
+ int blue)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_BLUE_BALANCE_RATIO_RW,
+ (u64)blue);
+ if (ret) {
+ dev_err(dev, "Fail to set blue ratio value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_red_balance_ratio(struct alvium_dev *alvium, int red)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_RED_BALANCE_RATIO_RW,
+ (u64)red);
+ if (ret) {
+ dev_err(dev, "Fail to set red ratio value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_awb(struct alvium_dev *alvium, bool on)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_WHITE_BALANCE_AUTO_RW,
+ on ? 0x02 : 0x00);
+ if (ret) {
+ dev_err(dev, "Fail to set awb reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_hue(struct alvium_dev *alvium, int val)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_HUE_RW, (u64)val);
+ if (ret) {
+ dev_err(dev, "Fail to set hue value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_contrast(struct alvium_dev *alvium, int val)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_CONTRAST_VALUE_RW, (u64)val);
+ if (ret) {
+ dev_err(dev, "Fail to set contrast value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_saturation(struct alvium_dev *alvium, int val)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_SATURATION_RW, (u64)val);
+ if (ret) {
+ dev_err(dev, "Fail to set contrast value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_gamma(struct alvium_dev *alvium, int val)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_GAMMA_RW, (u64)val);
+ if (ret) {
+ dev_err(dev, "Fail to set gamma value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_sharpness(struct alvium_dev *alvium, int val)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_SHARPNESS_RW, (u64)val);
+ if (ret) {
+ dev_err(dev, "Fail to set sharpness value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_hflip(struct alvium_dev *alvium, int val)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_IMG_REVERSE_X_RW, (u64)val);
+ if (ret) {
+ dev_err(dev, "Fail to set reverse_x value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_set_ctrl_vflip(struct alvium_dev *alvium, int val)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_write_hshake(alvium, REG_BCRM_IMG_REVERSE_Y_RW, (u64)val);
+ if (ret) {
+ dev_err(dev, "Fail to set reverse_y value reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_hw_features_params(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_get_csi_clk_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read min/max csi clock regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_img_width_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read img width regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_img_height_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read img heigth regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_offx_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read offx regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_offy_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read offy regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_gain_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read gain regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_exposure_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read min/max exp regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_red_balance_ratio_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read red balance ratio regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_blue_balance_ratio_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read blue balance ratio regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_hue_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read hue regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_contrast_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read contrast regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_saturation_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read saturation regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_black_lvl_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read black lvl regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_gamma_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read gamma regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_sharpness_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read sharpness regs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_get_hw_info(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ ret = alvium_get_bcrm_vers(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read bcrm version reg\n");
+ return ret;
+ }
+
+ ret = alvium_get_bcrm_addr(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to bcrm address reg\n");
+ return ret;
+ }
+
+ ret = alvium_get_fw_version(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read fw version reg\n");
+ return ret;
+ }
+
+ ret = alvium_get_host_supp_csi_lanes(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read host supported csi lanes reg\n");
+ return ret;
+ }
+
+ ret = alvium_get_feat_inq(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read bcrm feature inquiry reg\n");
+ return ret;
+ }
+
+ ret = alvium_get_hw_features_params(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read features params regs\n");
+ return ret;
+ }
+
+ ret = alvium_get_avail_mipi_data_format(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read available mipi data formats reg\n");
+ return ret;
+ }
+
+ ret = alvium_get_avail_bayer(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to read available Bayer patterns reg\n");
+ return ret;
+ }
+
+ ret = alvium_get_mode(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to get current mode reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_hw_init(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ int ret;
+
+ /* Set Alvium BCM mode*/
+ ret = alvium_set_bcm_mode(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to set BCM mode\n");
+ return ret;
+ }
+
+ ret = alvium_set_csi_lanes(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to set csi lanes\n");
+ return ret;
+ }
+
+ ret = alvium_set_csi_clk(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to set csi clk\n");
+ return ret;
+ }
+
+ ret = alvium_set_lp2hs_delay(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to set lp2hs reg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* --------------- Subdev Operations --------------- */
+
+static int alvium_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ fi->interval = alvium->frame_interval;
+
+ return 0;
+}
+
+static int alvium_set_frame_interval(struct alvium_dev *alvium,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ u64 req_fr, min_fr, max_fr;
+ int ret;
+
+ if (fi->interval.denominator == 0)
+ return -EINVAL;
+
+ ret = alvium_get_frame_interval(alvium);
+ if (ret) {
+ dev_err(dev, "Fail to get frame interval\n");
+ return ret;
+ }
+
+ min_fr = alvium->min_fr;
+ max_fr = alvium->max_fr;
+
+ dev_dbg(dev, "fi->interval.numerator = %d\n",
+ fi->interval.numerator);
+ dev_dbg(dev, "fi->interval.denominator = %d\n",
+ fi->interval.denominator);
+
+ req_fr = (u64)((fi->interval.denominator * USEC_PER_SEC) /
+ fi->interval.numerator);
+
+ if (req_fr >= max_fr && req_fr <= min_fr)
+ req_fr = alvium->dft_fr;
+
+ alvium->fr = req_fr;
+ alvium->frame_interval.numerator = fi->interval.numerator;
+ alvium->frame_interval.denominator = fi->interval.denominator;
+
+ return 0;
+}
+
+static int alvium_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ int ret;
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (alvium->streaming)
+ return -EBUSY;
+
+ ret = alvium_set_frame_interval(alvium, fi);
+ if (!ret)
+ ret = alvium_set_frame_rate(alvium);
+
+ return ret;
+}
+
+static int alvium_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+
+ if (code->index >= alvium->alvium_csi2_fmt_n)
+ return -EINVAL;
+
+ code->code = alvium->alvium_csi2_fmt[code->index].code;
+
+ return 0;
+}
+
+static const struct alvium_pixfmt *
+alvium_code_to_pixfmt(struct alvium_dev *alvium, u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; alvium->alvium_csi2_fmt[i].code; ++i)
+ if (alvium->alvium_csi2_fmt[i].code == code)
+ return &alvium->alvium_csi2_fmt[i];
+
+ return &alvium->alvium_csi2_fmt[0];
+}
+
+static int alvium_set_mode(struct alvium_dev *alvium,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+ int ret;
+
+ crop = v4l2_subdev_state_get_crop(state, 0);
+ fmt = v4l2_subdev_state_get_format(state, 0);
+
+ v4l_bound_align_image(&fmt->width, alvium->img_min_width,
+ alvium->img_max_width, 0,
+ &fmt->height, alvium->img_min_height,
+ alvium->img_max_height, 0, 0);
+
+ /* alvium don't accept negative crop left/top */
+ crop->left = clamp((u32)max(0, crop->left), alvium->min_offx,
+ (u32)(alvium->img_max_width - fmt->width));
+ crop->top = clamp((u32)max(0, crop->top), alvium->min_offy,
+ (u32)(alvium->img_max_height - fmt->height));
+
+ ret = alvium_set_img_width(alvium, fmt->width);
+ if (ret)
+ return ret;
+
+ ret = alvium_set_img_height(alvium, fmt->height);
+ if (ret)
+ return ret;
+
+ ret = alvium_set_img_offx(alvium, crop->left);
+ if (ret)
+ return ret;
+
+ ret = alvium_set_img_offy(alvium, crop->top);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int alvium_set_framefmt(struct alvium_dev *alvium,
+ struct v4l2_mbus_framefmt *format)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ const struct alvium_pixfmt *alvium_csi2_fmt;
+ int ret = 0;
+
+ alvium_csi2_fmt = alvium_code_to_pixfmt(alvium, format->code);
+
+ ret = alvium_set_mipi_fmt(alvium, alvium_csi2_fmt);
+ if (ret)
+ return ret;
+
+ if (alvium_csi2_fmt->is_raw) {
+ ret = alvium_set_bayer_pattern(alvium, alvium_csi2_fmt);
+ if (ret)
+ return ret;
+ }
+
+ dev_dbg(dev, "start: %s, mipi_fmt_regval regval = 0x%llx",
+ __func__, alvium_csi2_fmt->mipi_fmt_regval);
+
+ return ret;
+}
+
+static int alvium_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(&alvium->sd);
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ goto out;
+
+ ret = __v4l2_ctrl_handler_setup(&alvium->ctrls.handler);
+ if (ret)
+ goto out;
+
+ ret = alvium_set_mode(alvium, state);
+ if (ret)
+ goto out;
+
+ fmt = v4l2_subdev_state_get_format(state, 0);
+ ret = alvium_set_framefmt(alvium, fmt);
+ if (ret)
+ goto out;
+
+ ret = alvium_set_stream_mipi(alvium, enable);
+ if (ret)
+ goto out;
+
+ } else {
+ alvium_set_stream_mipi(alvium, enable);
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+ }
+
+ alvium->streaming = !!enable;
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+
+out:
+ pm_runtime_put(&client->dev);
+ v4l2_subdev_unlock_state(state);
+ return ret;
+}
+
+static int alvium_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ struct alvium_mode *mode = &alvium->mode;
+ struct v4l2_subdev_format sd_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .format = alvium_csi2_default_fmt,
+ };
+ struct v4l2_subdev_crop sd_crop = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .rect = {
+ .left = mode->crop.left,
+ .top = mode->crop.top,
+ .width = mode->crop.width,
+ .height = mode->crop.height,
+ },
+ };
+
+ *v4l2_subdev_state_get_crop(state, 0) = sd_crop.rect;
+ *v4l2_subdev_state_get_format(state, 0) = sd_fmt.format;
+
+ return 0;
+}
+
+static int alvium_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ const struct alvium_pixfmt *alvium_csi2_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
+ crop = v4l2_subdev_state_get_crop(sd_state, 0);
+
+ v4l_bound_align_image(&format->format.width, alvium->img_min_width,
+ alvium->img_max_width, 0,
+ &format->format.height, alvium->img_min_height,
+ alvium->img_max_height, 0, 0);
+
+ /* Adjust left and top to prevent roll over sensor area */
+ crop->left = clamp((u32)crop->left, (u32)0,
+ (alvium->img_max_width - fmt->width));
+ crop->top = clamp((u32)crop->top, (u32)0,
+ (alvium->img_max_height - fmt->height));
+
+ /* Set also the crop width and height when set a new fmt */
+ crop->width = fmt->width;
+ crop->height = fmt->height;
+
+ alvium_csi2_fmt = alvium_code_to_pixfmt(alvium, format->format.code);
+ fmt->code = alvium_csi2_fmt->code;
+
+ *fmt = format->format;
+
+ return 0;
+}
+
+static int alvium_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
+
+ /*
+ * Alvium can only shift the origin of the img
+ * then we accept only value with the same value of the actual fmt
+ */
+ if (sel->r.width != fmt->width)
+ sel->r.width = fmt->width;
+
+ if (sel->r.height != fmt->height)
+ sel->r.height = fmt->height;
+
+ /* alvium don't accept negative crop left/top */
+ crop->left = clamp((u32)max(0, sel->r.left), alvium->min_offx,
+ alvium->img_max_width - sel->r.width);
+ crop->top = clamp((u32)max(0, sel->r.top), alvium->min_offy,
+ alvium->img_max_height - sel->r.height);
+
+ sel->r = *crop;
+
+ return 0;
+}
+
+static int alvium_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+
+ switch (sel->target) {
+ /* Current cropping area */
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
+ break;
+ /* Cropping bounds */
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = alvium->img_max_width;
+ sel->r.height = alvium->img_max_height;
+ break;
+ /* Default cropping area */
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = alvium->min_offy;
+ sel->r.left = alvium->min_offx;
+ sel->r.width = alvium->img_max_width;
+ sel->r.height = alvium->img_max_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int alvium_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ int val;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ val = alvium_get_gain(alvium);
+ if (val < 0)
+ return val;
+ alvium->ctrls.gain->val = val;
+ break;
+ case V4L2_CID_EXPOSURE:
+ val = alvium_get_exposure(alvium);
+ if (val < 0)
+ return val;
+ alvium->ctrls.exposure->val = val;
+ break;
+ }
+
+ return 0;
+}
+
+static int alvium_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(&alvium->sd);
+ int ret;
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ret = alvium_set_ctrl_gain(alvium, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ ret = alvium_set_ctrl_auto_gain(alvium, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = alvium_set_ctrl_exposure(alvium, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = alvium_set_ctrl_auto_exposure(alvium, ctrl->val);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ ret = alvium_set_ctrl_red_balance_ratio(alvium, ctrl->val);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ ret = alvium_set_ctrl_blue_balance_ratio(alvium, ctrl->val);
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = alvium_set_ctrl_awb(alvium, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ ret = alvium_set_ctrl_hue(alvium, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ ret = alvium_set_ctrl_contrast(alvium, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ ret = alvium_set_ctrl_saturation(alvium, ctrl->val);
+ break;
+ case V4L2_CID_GAMMA:
+ ret = alvium_set_ctrl_gamma(alvium, ctrl->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ ret = alvium_set_ctrl_sharpness(alvium, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ret = alvium_set_ctrl_hflip(alvium, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = alvium_set_ctrl_vflip(alvium, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops alvium_ctrl_ops = {
+ .g_volatile_ctrl = alvium_g_volatile_ctrl,
+ .s_ctrl = alvium_s_ctrl,
+};
+
+static int alvium_ctrl_init(struct alvium_dev *alvium)
+{
+ const struct v4l2_ctrl_ops *ops = &alvium_ctrl_ops;
+ struct alvium_ctrls *ctrls = &alvium->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ struct v4l2_fwnode_device_properties props;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 32);
+
+ /* Pixel rate is fixed */
+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ ALVIUM_DEFAULT_PIXEL_RATE_MHZ, 1,
+ ALVIUM_DEFAULT_PIXEL_RATE_MHZ);
+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* Link freq is fixed */
+ ctrls->link_freq = v4l2_ctrl_new_int_menu(hdl, ops,
+ V4L2_CID_LINK_FREQ,
+ 0, 0, &alvium->link_freq);
+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* Auto/manual white balance */
+ if (alvium->avail_ft.auto_whiteb) {
+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
+ }
+
+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_BLUE_BALANCE,
+ alvium->min_bbalance,
+ alvium->max_bbalance,
+ alvium->inc_bbalance,
+ alvium->dft_bbalance);
+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_RED_BALANCE,
+ alvium->min_rbalance,
+ alvium->max_rbalance,
+ alvium->inc_rbalance,
+ alvium->dft_rbalance);
+
+ /* Auto/manual exposure */
+ if (alvium->avail_ft.auto_exp) {
+ ctrls->auto_exp =
+ v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
+ }
+
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_EXPOSURE,
+ alvium->min_exp,
+ alvium->max_exp,
+ alvium->inc_exp,
+ alvium->dft_exp);
+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ /* Auto/manual gain */
+ if (alvium->avail_ft.auto_gain) {
+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTOGAIN,
+ 0, 1, 1, 1);
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
+ }
+
+ if (alvium->avail_ft.gain) {
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_GAIN,
+ alvium->min_gain,
+ alvium->max_gain,
+ alvium->inc_gain,
+ alvium->dft_gain);
+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+
+ if (alvium->avail_ft.sat)
+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_SATURATION,
+ alvium->min_sat,
+ alvium->max_sat,
+ alvium->inc_sat,
+ alvium->dft_sat);
+
+ if (alvium->avail_ft.hue)
+ ctrls->hue = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_HUE,
+ alvium->min_hue,
+ alvium->max_hue,
+ alvium->inc_hue,
+ alvium->dft_hue);
+
+ if (alvium->avail_ft.contrast)
+ ctrls->contrast = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_CONTRAST,
+ alvium->min_contrast,
+ alvium->max_contrast,
+ alvium->inc_contrast,
+ alvium->dft_contrast);
+
+ if (alvium->avail_ft.gamma)
+ ctrls->gamma = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_GAMMA,
+ alvium->min_gamma,
+ alvium->max_gamma,
+ alvium->inc_gamma,
+ alvium->dft_gamma);
+
+ if (alvium->avail_ft.sharp)
+ ctrls->sharpness = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_SHARPNESS,
+ alvium->min_sharp,
+ alvium->max_sharp,
+ alvium->inc_sharp,
+ alvium->dft_sharp);
+
+ if (alvium->avail_ft.rev_x)
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+
+ if (alvium->avail_ft.rev_y)
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto free_ctrls;
+ }
+
+ ret = v4l2_fwnode_device_parse(&alvium->i2c_client->dev, &props);
+ if (ret)
+ goto free_ctrls;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, ops, &props);
+ if (ret)
+ goto free_ctrls;
+
+ alvium->sd.ctrl_handler = hdl;
+ return 0;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops alvium_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops alvium_video_ops = {
+ .s_stream = alvium_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops alvium_pad_ops = {
+ .enum_mbus_code = alvium_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = alvium_set_fmt,
+ .get_selection = alvium_get_selection,
+ .set_selection = alvium_set_selection,
+ .get_frame_interval = alvium_g_frame_interval,
+ .set_frame_interval = alvium_s_frame_interval,
+};
+
+static const struct v4l2_subdev_internal_ops alvium_internal_ops = {
+ .init_state = alvium_init_state,
+};
+
+static const struct v4l2_subdev_ops alvium_subdev_ops = {
+ .core = &alvium_core_ops,
+ .pad = &alvium_pad_ops,
+ .video = &alvium_video_ops,
+};
+
+static int alvium_subdev_init(struct alvium_dev *alvium)
+{
+ struct i2c_client *client = alvium->i2c_client;
+ struct device *dev = &alvium->i2c_client->dev;
+ struct v4l2_subdev *sd = &alvium->sd;
+ int ret;
+
+ /* Setup initial frame interval*/
+ alvium->frame_interval.numerator = 1;
+ alvium->frame_interval.denominator = ALVIUM_DEFAULT_FR_HZ;
+ alvium->fr = ALVIUM_DEFAULT_FR_HZ;
+
+ /* Setup the initial mode */
+ alvium->mode.fmt = alvium_csi2_default_fmt;
+ alvium->mode.width = alvium_csi2_default_fmt.width;
+ alvium->mode.height = alvium_csi2_default_fmt.height;
+ alvium->mode.crop.left = alvium->min_offx;
+ alvium->mode.crop.top = alvium->min_offy;
+ alvium->mode.crop.width = alvium_csi2_default_fmt.width;
+ alvium->mode.crop.height = alvium_csi2_default_fmt.height;
+
+ /* init alvium sd */
+ v4l2_i2c_subdev_init(sd, client, &alvium_subdev_ops);
+
+ sd->internal_ops = &alvium_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+ alvium->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&sd->entity, 1, &alvium->pad);
+ if (ret) {
+ dev_err(dev, "Could not register media entity\n");
+ return ret;
+ }
+
+ ret = alvium_ctrl_init(alvium);
+ if (ret) {
+ dev_err(dev, "Control initialization error %d\n", ret);
+ goto entity_cleanup;
+ }
+
+ alvium->sd.state_lock = alvium->ctrls.handler.lock;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret < 0) {
+ dev_err(dev, "subdev initialization error %d\n", ret);
+ goto err_ctrls;
+ }
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(&alvium->ctrls.handler);
+entity_cleanup:
+ media_entity_cleanup(&alvium->sd.entity);
+ return ret;
+}
+
+static void alvium_subdev_cleanup(struct alvium_dev *alvium)
+{
+ v4l2_fwnode_endpoint_free(&alvium->ep);
+ v4l2_subdev_cleanup(&alvium->sd);
+ media_entity_cleanup(&alvium->sd.entity);
+ v4l2_ctrl_handler_free(&alvium->ctrls.handler);
+}
+
+static int alvium_get_dt_data(struct alvium_dev *alvium)
+{
+ struct device *dev = &alvium->i2c_client->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_handle *endpoint;
+
+ if (!fwnode)
+ return -EINVAL;
+
+ /* Only CSI2 is supported for now: */
+ alvium->ep.bus_type = V4L2_MBUS_CSI2_DPHY;
+
+ endpoint = fwnode_graph_get_endpoint_by_id(fwnode, 0, 0, 0);
+ if (!endpoint) {
+ dev_err(dev, "endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ if (v4l2_fwnode_endpoint_alloc_parse(endpoint, &alvium->ep)) {
+ dev_err(dev, "could not parse endpoint\n");
+ goto error_out;
+ }
+
+ if (!alvium->ep.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ goto error_out;
+ }
+
+ return 0;
+
+error_out:
+ v4l2_fwnode_endpoint_free(&alvium->ep);
+ fwnode_handle_put(endpoint);
+
+ return -EINVAL;
+}
+
+static int alvium_set_power(struct alvium_dev *alvium, bool on)
+{
+ int ret;
+
+ if (!on)
+ return regulator_disable(alvium->reg_vcc);
+
+ ret = regulator_enable(alvium->reg_vcc);
+ if (ret)
+ return ret;
+
+ /* alvium boot time 7s */
+ msleep(7000);
+ return 0;
+}
+
+static int alvium_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ int ret;
+
+ ret = alvium_set_power(alvium, true);
+ if (ret)
+ return ret;
+
+ ret = alvium_hw_init(alvium);
+ if (ret) {
+ alvium_set_power(alvium, false);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int alvium_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+
+ alvium_set_power(alvium, false);
+
+ return 0;
+}
+
+static const struct dev_pm_ops alvium_pm_ops = {
+ RUNTIME_PM_OPS(alvium_runtime_suspend, alvium_runtime_resume, NULL)
+};
+
+static int alvium_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct alvium_dev *alvium;
+ int ret;
+
+ alvium = devm_kzalloc(dev, sizeof(*alvium), GFP_KERNEL);
+ if (!alvium)
+ return -ENOMEM;
+
+ alvium->i2c_client = client;
+
+ alvium->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(alvium->regmap))
+ return PTR_ERR(alvium->regmap);
+
+ ret = alvium_get_dt_data(alvium);
+ if (ret)
+ return ret;
+
+ alvium->reg_vcc = devm_regulator_get_optional(dev, "vcc-ext-in");
+ if (IS_ERR(alvium->reg_vcc))
+ return dev_err_probe(dev, PTR_ERR(alvium->reg_vcc),
+ "no vcc-ext-in regulator provided\n");
+
+ ret = alvium_set_power(alvium, true);
+ if (ret)
+ goto err_powerdown;
+
+ if (!alvium_is_alive(alvium)) {
+ ret = -ENODEV;
+ dev_err_probe(dev, ret, "Device detection failed\n");
+ goto err_powerdown;
+ }
+
+ ret = alvium_get_hw_info(alvium);
+ if (ret) {
+ dev_err_probe(dev, ret, "get_hw_info fail\n");
+ goto err_powerdown;
+ }
+
+ ret = alvium_hw_init(alvium);
+ if (ret) {
+ dev_err_probe(dev, ret, "hw_init fail\n");
+ goto err_powerdown;
+ }
+
+ ret = alvium_setup_mipi_fmt(alvium);
+ if (ret) {
+ dev_err_probe(dev, ret, "setup_mipi_fmt fail\n");
+ goto err_powerdown;
+ }
+
+ /*
+ * Enable runtime PM without autosuspend:
+ *
+ * Don't use pm autosuspend (alvium have ~7s boot time).
+ * Alvium has been powered manually:
+ * - mark it as active
+ * - increase the usage count without resuming the device.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+
+ /* Initialize the V4L2 subdev. */
+ ret = alvium_subdev_init(alvium);
+ if (ret)
+ goto err_pm;
+
+ ret = v4l2_async_register_subdev(&alvium->sd);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Could not register v4l2 device\n");
+ goto err_subdev;
+ }
+
+ return 0;
+
+err_subdev:
+ alvium_subdev_cleanup(alvium);
+err_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ kfree(alvium->alvium_csi2_fmt);
+err_powerdown:
+ alvium_set_power(alvium, false);
+
+ return ret;
+}
+
+static void alvium_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct alvium_dev *alvium = sd_to_alvium(sd);
+ struct device *dev = &alvium->i2c_client->dev;
+
+ v4l2_async_unregister_subdev(sd);
+ alvium_subdev_cleanup(alvium);
+ kfree(alvium->alvium_csi2_fmt);
+ /*
+ * Disable runtime PM. In case runtime PM is disabled in the kernel,
+ * make sure to turn power off manually.
+ */
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ alvium_set_power(alvium, false);
+ pm_runtime_set_suspended(dev);
+}
+
+static const struct of_device_id alvium_of_ids[] = {
+ { .compatible = "alliedvision,alvium-csi2", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, alvium_of_ids);
+
+static struct i2c_driver alvium_i2c_driver = {
+ .driver = {
+ .name = "alvium-csi2",
+ .pm = pm_ptr(&alvium_pm_ops),
+ .of_match_table = alvium_of_ids,
+ },
+ .probe = alvium_probe,
+ .remove = alvium_remove,
+};
+
+module_i2c_driver(alvium_i2c_driver);
+
+MODULE_DESCRIPTION("Allied Vision's Alvium Camera Driver");
+MODULE_AUTHOR("Tommaso Merciai <tomm.merciai@gmail.com>");
+MODULE_AUTHOR("Martin Hecht <martin.hecht@avnet.eu>");
+MODULE_AUTHOR("Avnet Silica Software & Services EMEA");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/alvium-csi2.h b/drivers/media/i2c/alvium-csi2.h
new file mode 100644
index 000000000..b85a25169
--- /dev/null
+++ b/drivers/media/i2c/alvium-csi2.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Allied Vision Technologies GmbH Alvium camera driver
+ *
+ * Copyright (C) 2023 Tommaso Merciai
+ * Copyright (C) 2023 Martin Hecht
+ * Copyright (C) 2023 Avnet EMG GmbH
+ */
+
+#ifndef ALVIUM_CSI2_H_
+#define ALVIUM_CSI2_H_
+
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define REG_BCRM_V4L2 BIT(31)
+
+#define REG_BCRM_V4L2_8BIT(n) (REG_BCRM_V4L2 | CCI_REG8(n))
+#define REG_BCRM_V4L2_16BIT(n) (REG_BCRM_V4L2 | CCI_REG16(n))
+#define REG_BCRM_V4L2_32BIT(n) (REG_BCRM_V4L2 | CCI_REG32(n))
+#define REG_BCRM_V4L2_64BIT(n) (REG_BCRM_V4L2 | CCI_REG64(n))
+
+/* Basic Control Register Map register offsets (BCRM) */
+#define REG_BCRM_MINOR_VERSION_R CCI_REG16(0x0000)
+#define REG_BCRM_MAJOR_VERSION_R CCI_REG16(0x0002)
+#define REG_BCRM_REG_ADDR_R CCI_REG16(0x0014)
+
+#define REG_BCRM_FEATURE_INQUIRY_R REG_BCRM_V4L2_64BIT(0x0008)
+#define REG_BCRM_DEVICE_FW_SPEC_VERSION_R REG_BCRM_V4L2_8BIT(0x0010)
+#define REG_BCRM_DEVICE_FW_MAJOR_VERSION_R REG_BCRM_V4L2_8BIT(0x0011)
+#define REG_BCRM_DEVICE_FW_MINOR_VERSION_R REG_BCRM_V4L2_16BIT(0x0012)
+#define REG_BCRM_DEVICE_FW_PATCH_VERSION_R REG_BCRM_V4L2_32BIT(0x0014)
+#define REG_BCRM_WRITE_HANDSHAKE_RW REG_BCRM_V4L2_8BIT(0x0018)
+
+/* Streaming Control Registers */
+#define REG_BCRM_SUPPORTED_CSI2_LANE_COUNTS_R REG_BCRM_V4L2_8BIT(0x0040)
+#define REG_BCRM_CSI2_LANE_COUNT_RW REG_BCRM_V4L2_8BIT(0x0044)
+#define REG_BCRM_CSI2_CLOCK_MIN_R REG_BCRM_V4L2_32BIT(0x0048)
+#define REG_BCRM_CSI2_CLOCK_MAX_R REG_BCRM_V4L2_32BIT(0x004c)
+#define REG_BCRM_CSI2_CLOCK_RW REG_BCRM_V4L2_32BIT(0x0050)
+#define REG_BCRM_BUFFER_SIZE_R REG_BCRM_V4L2_32BIT(0x0054)
+
+#define REG_BCRM_IPU_X_MIN_W REG_BCRM_V4L2_32BIT(0x0058)
+#define REG_BCRM_IPU_X_MAX_W REG_BCRM_V4L2_32BIT(0x005c)
+#define REG_BCRM_IPU_X_INC_W REG_BCRM_V4L2_32BIT(0x0060)
+#define REG_BCRM_IPU_Y_MIN_W REG_BCRM_V4L2_32BIT(0x0064)
+#define REG_BCRM_IPU_Y_MAX_W REG_BCRM_V4L2_32BIT(0x0068)
+#define REG_BCRM_IPU_Y_INC_W REG_BCRM_V4L2_32BIT(0x006c)
+#define REG_BCRM_IPU_X_R REG_BCRM_V4L2_32BIT(0x0070)
+#define REG_BCRM_IPU_Y_R REG_BCRM_V4L2_32BIT(0x0074)
+
+#define REG_BCRM_PHY_RESET_RW REG_BCRM_V4L2_8BIT(0x0078)
+#define REG_BCRM_LP2HS_DELAY_RW REG_BCRM_V4L2_32BIT(0x007c)
+
+/* Acquisition Control Registers */
+#define REG_BCRM_ACQUISITION_START_RW REG_BCRM_V4L2_8BIT(0x0080)
+#define REG_BCRM_ACQUISITION_STOP_RW REG_BCRM_V4L2_8BIT(0x0084)
+#define REG_BCRM_ACQUISITION_ABORT_RW REG_BCRM_V4L2_8BIT(0x0088)
+#define REG_BCRM_ACQUISITION_STATUS_R REG_BCRM_V4L2_8BIT(0x008c)
+#define REG_BCRM_ACQUISITION_FRAME_RATE_RW REG_BCRM_V4L2_64BIT(0x0090)
+#define REG_BCRM_ACQUISITION_FRAME_RATE_MIN_R REG_BCRM_V4L2_64BIT(0x0098)
+#define REG_BCRM_ACQUISITION_FRAME_RATE_MAX_R REG_BCRM_V4L2_64BIT(0x00a0)
+#define REG_BCRM_ACQUISITION_FRAME_RATE_INC_R REG_BCRM_V4L2_64BIT(0x00a8)
+#define REG_BCRM_ACQUISITION_FRAME_RATE_ENABLE_RW REG_BCRM_V4L2_8BIT(0x00b0)
+
+#define REG_BCRM_FRAME_START_TRIGGER_MODE_RW REG_BCRM_V4L2_8BIT(0x00b4)
+#define REG_BCRM_FRAME_START_TRIGGER_SOURCE_RW REG_BCRM_V4L2_8BIT(0x00b8)
+#define REG_BCRM_FRAME_START_TRIGGER_ACTIVATION_RW REG_BCRM_V4L2_8BIT(0x00bc)
+#define REG_BCRM_FRAME_START_TRIGGER_SOFTWARE_W REG_BCRM_V4L2_8BIT(0x00c0)
+#define REG_BCRM_FRAME_START_TRIGGER_DELAY_RW REG_BCRM_V4L2_32BIT(0x00c4)
+#define REG_BCRM_EXPOSURE_ACTIVE_LINE_MODE_RW REG_BCRM_V4L2_8BIT(0x00c8)
+#define REG_BCRM_EXPOSURE_ACTIVE_LINE_SELECTOR_RW REG_BCRM_V4L2_8BIT(0x00cc)
+#define REG_BCRM_LINE_CONFIGURATION_RW REG_BCRM_V4L2_32BIT(0x00d0)
+
+#define REG_BCRM_IMG_WIDTH_RW REG_BCRM_V4L2_32BIT(0x0100)
+#define REG_BCRM_IMG_WIDTH_MIN_R REG_BCRM_V4L2_32BIT(0x0104)
+#define REG_BCRM_IMG_WIDTH_MAX_R REG_BCRM_V4L2_32BIT(0x0108)
+#define REG_BCRM_IMG_WIDTH_INC_R REG_BCRM_V4L2_32BIT(0x010c)
+
+#define REG_BCRM_IMG_HEIGHT_RW REG_BCRM_V4L2_32BIT(0x0110)
+#define REG_BCRM_IMG_HEIGHT_MIN_R REG_BCRM_V4L2_32BIT(0x0114)
+#define REG_BCRM_IMG_HEIGHT_MAX_R REG_BCRM_V4L2_32BIT(0x0118)
+#define REG_BCRM_IMG_HEIGHT_INC_R REG_BCRM_V4L2_32BIT(0x011c)
+
+#define REG_BCRM_IMG_OFFSET_X_RW REG_BCRM_V4L2_32BIT(0x0120)
+#define REG_BCRM_IMG_OFFSET_X_MIN_R REG_BCRM_V4L2_32BIT(0x0124)
+#define REG_BCRM_IMG_OFFSET_X_MAX_R REG_BCRM_V4L2_32BIT(0x0128)
+#define REG_BCRM_IMG_OFFSET_X_INC_R REG_BCRM_V4L2_32BIT(0x012c)
+
+#define REG_BCRM_IMG_OFFSET_Y_RW REG_BCRM_V4L2_32BIT(0x0130)
+#define REG_BCRM_IMG_OFFSET_Y_MIN_R REG_BCRM_V4L2_32BIT(0x0134)
+#define REG_BCRM_IMG_OFFSET_Y_MAX_R REG_BCRM_V4L2_32BIT(0x0138)
+#define REG_BCRM_IMG_OFFSET_Y_INC_R REG_BCRM_V4L2_32BIT(0x013c)
+
+#define REG_BCRM_IMG_MIPI_DATA_FORMAT_RW REG_BCRM_V4L2_32BIT(0x0140)
+#define REG_BCRM_IMG_AVAILABLE_MIPI_DATA_FORMATS_R REG_BCRM_V4L2_64BIT(0x0148)
+#define REG_BCRM_IMG_BAYER_PATTERN_INQUIRY_R REG_BCRM_V4L2_8BIT(0x0150)
+#define REG_BCRM_IMG_BAYER_PATTERN_RW REG_BCRM_V4L2_8BIT(0x0154)
+#define REG_BCRM_IMG_REVERSE_X_RW REG_BCRM_V4L2_8BIT(0x0158)
+#define REG_BCRM_IMG_REVERSE_Y_RW REG_BCRM_V4L2_8BIT(0x015c)
+
+#define REG_BCRM_SENSOR_WIDTH_R REG_BCRM_V4L2_32BIT(0x0160)
+#define REG_BCRM_SENSOR_HEIGHT_R REG_BCRM_V4L2_32BIT(0x0164)
+#define REG_BCRM_WIDTH_MAX_R REG_BCRM_V4L2_32BIT(0x0168)
+#define REG_BCRM_HEIGHT_MAX_R REG_BCRM_V4L2_32BIT(0x016c)
+
+#define REG_BCRM_EXPOSURE_TIME_RW REG_BCRM_V4L2_64BIT(0x0180)
+#define REG_BCRM_EXPOSURE_TIME_MIN_R REG_BCRM_V4L2_64BIT(0x0188)
+#define REG_BCRM_EXPOSURE_TIME_MAX_R REG_BCRM_V4L2_64BIT(0x0190)
+#define REG_BCRM_EXPOSURE_TIME_INC_R REG_BCRM_V4L2_64BIT(0x0198)
+#define REG_BCRM_EXPOSURE_AUTO_RW REG_BCRM_V4L2_8BIT(0x01a0)
+
+#define REG_BCRM_INTENSITY_AUTO_PRECEDENCE_RW REG_BCRM_V4L2_8BIT(0x01a4)
+#define REG_BCRM_INTENSITY_AUTO_PRECEDENCE_VALUE_RW REG_BCRM_V4L2_32BIT(0x01a8)
+#define REG_BCRM_INTENSITY_AUTO_PRECEDENCE_MIN_R REG_BCRM_V4L2_32BIT(0x01ac)
+#define REG_BCRM_INTENSITY_AUTO_PRECEDENCE_MAX_R REG_BCRM_V4L2_32BIT(0x01b0)
+#define REG_BCRM_INTENSITY_AUTO_PRECEDENCE_INC_R REG_BCRM_V4L2_32BIT(0x01b4)
+
+#define REG_BCRM_BLACK_LEVEL_RW REG_BCRM_V4L2_32BIT(0x01b8)
+#define REG_BCRM_BLACK_LEVEL_MIN_R REG_BCRM_V4L2_32BIT(0x01bc)
+#define REG_BCRM_BLACK_LEVEL_MAX_R REG_BCRM_V4L2_32BIT(0x01c0)
+#define REG_BCRM_BLACK_LEVEL_INC_R REG_BCRM_V4L2_32BIT(0x01c4)
+
+#define REG_BCRM_GAIN_RW REG_BCRM_V4L2_64BIT(0x01c8)
+#define REG_BCRM_GAIN_MIN_R REG_BCRM_V4L2_64BIT(0x01d0)
+#define REG_BCRM_GAIN_MAX_R REG_BCRM_V4L2_64BIT(0x01d8)
+#define REG_BCRM_GAIN_INC_R REG_BCRM_V4L2_64BIT(0x01e0)
+#define REG_BCRM_GAIN_AUTO_RW REG_BCRM_V4L2_8BIT(0x01e8)
+
+#define REG_BCRM_GAMMA_RW REG_BCRM_V4L2_64BIT(0x01f0)
+#define REG_BCRM_GAMMA_MIN_R REG_BCRM_V4L2_64BIT(0x01f8)
+#define REG_BCRM_GAMMA_MAX_R REG_BCRM_V4L2_64BIT(0x0200)
+#define REG_BCRM_GAMMA_INC_R REG_BCRM_V4L2_64BIT(0x0208)
+
+#define REG_BCRM_CONTRAST_VALUE_RW REG_BCRM_V4L2_32BIT(0x0214)
+#define REG_BCRM_CONTRAST_VALUE_MIN_R REG_BCRM_V4L2_32BIT(0x0218)
+#define REG_BCRM_CONTRAST_VALUE_MAX_R REG_BCRM_V4L2_32BIT(0x021c)
+#define REG_BCRM_CONTRAST_VALUE_INC_R REG_BCRM_V4L2_32BIT(0x0220)
+
+#define REG_BCRM_SATURATION_RW REG_BCRM_V4L2_32BIT(0x0240)
+#define REG_BCRM_SATURATION_MIN_R REG_BCRM_V4L2_32BIT(0x0244)
+#define REG_BCRM_SATURATION_MAX_R REG_BCRM_V4L2_32BIT(0x0248)
+#define REG_BCRM_SATURATION_INC_R REG_BCRM_V4L2_32BIT(0x024c)
+
+#define REG_BCRM_HUE_RW REG_BCRM_V4L2_32BIT(0x0250)
+#define REG_BCRM_HUE_MIN_R REG_BCRM_V4L2_32BIT(0x0254)
+#define REG_BCRM_HUE_MAX_R REG_BCRM_V4L2_32BIT(0x0258)
+#define REG_BCRM_HUE_INC_R REG_BCRM_V4L2_32BIT(0x025c)
+
+#define REG_BCRM_ALL_BALANCE_RATIO_RW REG_BCRM_V4L2_64BIT(0x0260)
+#define REG_BCRM_ALL_BALANCE_RATIO_MIN_R REG_BCRM_V4L2_64BIT(0x0268)
+#define REG_BCRM_ALL_BALANCE_RATIO_MAX_R REG_BCRM_V4L2_64BIT(0x0270)
+#define REG_BCRM_ALL_BALANCE_RATIO_INC_R REG_BCRM_V4L2_64BIT(0x0278)
+
+#define REG_BCRM_RED_BALANCE_RATIO_RW REG_BCRM_V4L2_64BIT(0x0280)
+#define REG_BCRM_RED_BALANCE_RATIO_MIN_R REG_BCRM_V4L2_64BIT(0x0288)
+#define REG_BCRM_RED_BALANCE_RATIO_MAX_R REG_BCRM_V4L2_64BIT(0x0290)
+#define REG_BCRM_RED_BALANCE_RATIO_INC_R REG_BCRM_V4L2_64BIT(0x0298)
+
+#define REG_BCRM_GREEN_BALANCE_RATIO_RW REG_BCRM_V4L2_64BIT(0x02a0)
+#define REG_BCRM_GREEN_BALANCE_RATIO_MIN_R REG_BCRM_V4L2_64BIT(0x02a8)
+#define REG_BCRM_GREEN_BALANCE_RATIO_MAX_R REG_BCRM_V4L2_64BIT(0x02b0)
+#define REG_BCRM_GREEN_BALANCE_RATIO_INC_R REG_BCRM_V4L2_64BIT(0x02b8)
+
+#define REG_BCRM_BLUE_BALANCE_RATIO_RW REG_BCRM_V4L2_64BIT(0x02c0)
+#define REG_BCRM_BLUE_BALANCE_RATIO_MIN_R REG_BCRM_V4L2_64BIT(0x02c8)
+#define REG_BCRM_BLUE_BALANCE_RATIO_MAX_R REG_BCRM_V4L2_64BIT(0x02d0)
+#define REG_BCRM_BLUE_BALANCE_RATIO_INC_R REG_BCRM_V4L2_64BIT(0x02d8)
+
+#define REG_BCRM_WHITE_BALANCE_AUTO_RW REG_BCRM_V4L2_8BIT(0x02e0)
+#define REG_BCRM_SHARPNESS_RW REG_BCRM_V4L2_32BIT(0x0300)
+#define REG_BCRM_SHARPNESS_MIN_R REG_BCRM_V4L2_32BIT(0x0304)
+#define REG_BCRM_SHARPNESS_MAX_R REG_BCRM_V4L2_32BIT(0x0308)
+#define REG_BCRM_SHARPNESS_INC_R REG_BCRM_V4L2_32BIT(0x030c)
+
+#define REG_BCRM_DEVICE_TEMPERATURE_R REG_BCRM_V4L2_32BIT(0x0310)
+#define REG_BCRM_EXPOSURE_AUTO_MIN_RW REG_BCRM_V4L2_64BIT(0x0330)
+#define REG_BCRM_EXPOSURE_AUTO_MAX_RW REG_BCRM_V4L2_64BIT(0x0338)
+#define REG_BCRM_GAIN_AUTO_MIN_RW REG_BCRM_V4L2_64BIT(0x0340)
+#define REG_BCRM_GAIN_AUTO_MAX_RW REG_BCRM_V4L2_64BIT(0x0348)
+
+/* Heartbeat reg*/
+#define REG_BCRM_HEARTBEAT_RW CCI_REG8(0x021f)
+
+/* GenCP Registers */
+#define REG_GENCP_CHANGEMODE_W CCI_REG8(0x021c)
+#define REG_GENCP_CURRENTMODE_R CCI_REG8(0x021d)
+#define REG_GENCP_IN_HANDSHAKE_RW CCI_REG8(0x001c)
+#define REG_GENCP_OUT_SIZE_W CCI_REG16(0x0020)
+#define REG_GENCP_IN_SIZE_R CCI_REG16(0x0024)
+
+/* defines */
+#define REG_BCRM_HANDSHAKE_STATUS_MASK 0x01
+#define REG_BCRM_HANDSHAKE_AVAILABLE_MASK 0x80
+
+#define BCRM_HANDSHAKE_W_DONE_EN_BIT BIT(0)
+
+#define ALVIUM_DEFAULT_FR_HZ 10
+#define ALVIUM_DEFAULT_PIXEL_RATE_MHZ 148000000
+
+#define ALVIUM_LP2HS_DELAY_MS 100
+
+enum alvium_bcrm_mode {
+ ALVIUM_BCM_MODE,
+ ALVIUM_GENCP_MODE,
+ ALVIUM_NUM_MODE
+};
+
+enum alvium_mipi_fmt {
+ ALVIUM_FMT_UYVY8_2X8 = 0,
+ ALVIUM_FMT_UYVY8_1X16,
+ ALVIUM_FMT_YUYV8_1X16,
+ ALVIUM_FMT_YUYV8_2X8,
+ ALVIUM_FMT_YUYV10_1X20,
+ ALVIUM_FMT_RGB888_1X24,
+ ALVIUM_FMT_RBG888_1X24,
+ ALVIUM_FMT_BGR888_1X24,
+ ALVIUM_FMT_RGB888_3X8,
+ ALVIUM_FMT_Y8_1X8,
+ ALVIUM_FMT_SGRBG8_1X8,
+ ALVIUM_FMT_SRGGB8_1X8,
+ ALVIUM_FMT_SGBRG8_1X8,
+ ALVIUM_FMT_SBGGR8_1X8,
+ ALVIUM_FMT_Y10_1X10,
+ ALVIUM_FMT_SGRBG10_1X10,
+ ALVIUM_FMT_SRGGB10_1X10,
+ ALVIUM_FMT_SGBRG10_1X10,
+ ALVIUM_FMT_SBGGR10_1X10,
+ ALVIUM_FMT_Y12_1X12,
+ ALVIUM_FMT_SGRBG12_1X12,
+ ALVIUM_FMT_SRGGB12_1X12,
+ ALVIUM_FMT_SGBRG12_1X12,
+ ALVIUM_FMT_SBGGR12_1X12,
+ ALVIUM_FMT_SBGGR14_1X14,
+ ALVIUM_FMT_SGBRG14_1X14,
+ ALVIUM_FMT_SRGGB14_1X14,
+ ALVIUM_FMT_SGRBG14_1X14,
+ ALVIUM_NUM_SUPP_MIPI_DATA_FMT
+};
+
+enum alvium_av_bayer_bit {
+ ALVIUM_BIT_BAY_NONE = -1,
+ ALVIUM_BIT_BAY_MONO = 0,
+ ALVIUM_BIT_BAY_GR,
+ ALVIUM_BIT_BAY_RG,
+ ALVIUM_BIT_BAY_GB,
+ ALVIUM_BIT_BAY_BG,
+ ALVIUM_NUM_BAY_AV_BIT
+};
+
+enum alvium_av_mipi_bit {
+ ALVIUM_BIT_YUV420_8_LEG = 0,
+ ALVIUM_BIT_YUV420_8,
+ ALVIUM_BIT_YUV420_10,
+ ALVIUM_BIT_YUV420_8_CSPS,
+ ALVIUM_BIT_YUV420_10_CSPS,
+ ALVIUM_BIT_YUV422_8,
+ ALVIUM_BIT_YUV422_10,
+ ALVIUM_BIT_RGB888,
+ ALVIUM_BIT_RGB666,
+ ALVIUM_BIT_RGB565,
+ ALVIUM_BIT_RGB555,
+ ALVIUM_BIT_RGB444,
+ ALVIUM_BIT_RAW6,
+ ALVIUM_BIT_RAW7,
+ ALVIUM_BIT_RAW8,
+ ALVIUM_BIT_RAW10,
+ ALVIUM_BIT_RAW12,
+ ALVIUM_BIT_RAW14,
+ ALVIUM_BIT_JPEG,
+ ALVIUM_NUM_SUPP_MIPI_DATA_BIT
+};
+
+struct alvium_avail_feat {
+ u64 rev_x:1;
+ u64 rev_y:1;
+ u64 int_autop:1;
+ u64 black_lvl:1;
+ u64 gain:1;
+ u64 gamma:1;
+ u64 contrast:1;
+ u64 sat:1;
+ u64 hue:1;
+ u64 whiteb:1;
+ u64 sharp:1;
+ u64 auto_exp:1;
+ u64 auto_gain:1;
+ u64 auto_whiteb:1;
+ u64 dev_temp:1;
+ u64 acq_abort:1;
+ u64 acq_fr:1;
+ u64 fr_trigger:1;
+ u64 exp_acq_line:1;
+ u64 reserved:45;
+};
+
+struct alvium_avail_mipi_fmt {
+ u64 yuv420_8_leg:1;
+ u64 yuv420_8:1;
+ u64 yuv420_10:1;
+ u64 yuv420_8_csps:1;
+ u64 yuv420_10_csps:1;
+ u64 yuv422_8:1;
+ u64 yuv422_10:1;
+ u64 rgb888:1;
+ u64 rgb666:1;
+ u64 rgb565:1;
+ u64 rgb555:1;
+ u64 rgb444:1;
+ u64 raw6:1;
+ u64 raw7:1;
+ u64 raw8:1;
+ u64 raw10:1;
+ u64 raw12:1;
+ u64 raw14:1;
+ u64 jpeg:1;
+ u64 reserved:45;
+};
+
+struct alvium_avail_bayer {
+ u8 mono:1;
+ u8 gr:1;
+ u8 rg:1;
+ u8 gb:1;
+ u8 bg:1;
+ u8 reserved:3;
+};
+
+struct alvium_mode {
+ struct v4l2_rect crop;
+ struct v4l2_mbus_framefmt fmt;
+ u32 width;
+ u32 height;
+};
+
+struct alvium_pixfmt {
+ u32 code;
+ u32 colorspace;
+ u64 mipi_fmt_regval;
+ u64 bay_fmt_regval;
+ u8 id;
+ u8 is_raw;
+ u8 fmt_av_bit;
+ u8 bay_av_bit;
+};
+
+struct alvium_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *auto_wb;
+ struct v4l2_ctrl *blue_balance;
+ struct v4l2_ctrl *red_balance;
+ struct v4l2_ctrl *auto_gain;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *hue;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *gamma;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+};
+
+struct alvium_dev {
+ struct i2c_client *i2c_client;
+ struct v4l2_subdev sd;
+ struct v4l2_fwnode_endpoint ep;
+ struct media_pad pad;
+ struct regmap *regmap;
+
+ struct regulator *reg_vcc;
+
+ u16 bcrm_addr;
+
+ struct alvium_avail_feat avail_ft;
+ u8 is_mipi_fmt_avail[ALVIUM_NUM_SUPP_MIPI_DATA_BIT];
+ u8 is_bay_avail[ALVIUM_NUM_BAY_AV_BIT];
+
+ u32 min_csi_clk;
+ u32 max_csi_clk;
+ u32 dft_img_width;
+ u32 img_min_width;
+ u32 img_max_width;
+ u32 img_inc_width;
+ u32 dft_img_height;
+ u32 img_min_height;
+ u32 img_max_height;
+ u32 img_inc_height;
+ u32 min_offx;
+ u32 max_offx;
+ u32 inc_offx;
+ u32 min_offy;
+ u32 max_offy;
+ u32 inc_offy;
+ u64 dft_gain;
+ u64 min_gain;
+ u64 max_gain;
+ u64 inc_gain;
+ u64 dft_exp;
+ u64 min_exp;
+ u64 max_exp;
+ u64 inc_exp;
+ u64 dft_rbalance;
+ u64 min_rbalance;
+ u64 max_rbalance;
+ u64 inc_rbalance;
+ u64 dft_bbalance;
+ u64 min_bbalance;
+ u64 max_bbalance;
+ u64 inc_bbalance;
+ s32 dft_hue;
+ s32 min_hue;
+ s32 max_hue;
+ s32 inc_hue;
+ u32 dft_contrast;
+ u32 min_contrast;
+ u32 max_contrast;
+ u32 inc_contrast;
+ u32 dft_sat;
+ u32 min_sat;
+ u32 max_sat;
+ u32 inc_sat;
+ s32 dft_black_lvl;
+ s32 min_black_lvl;
+ s32 max_black_lvl;
+ s32 inc_black_lvl;
+ u64 dft_gamma;
+ u64 min_gamma;
+ u64 max_gamma;
+ u64 inc_gamma;
+ s32 dft_sharp;
+ s32 min_sharp;
+ s32 max_sharp;
+ s32 inc_sharp;
+
+ struct alvium_mode mode;
+ struct v4l2_fract frame_interval;
+ u64 dft_fr;
+ u64 min_fr;
+ u64 max_fr;
+ u64 fr;
+
+ u8 h_sup_csi_lanes;
+ u64 link_freq;
+
+ struct alvium_ctrls ctrls;
+
+ u8 bcrm_mode;
+
+ struct alvium_pixfmt *alvium_csi2_fmt;
+ u8 alvium_csi2_fmt_n;
+
+ u8 streaming;
+ u8 apply_fiv;
+};
+
+static inline struct alvium_dev *sd_to_alvium(struct v4l2_subdev *sd)
+{
+ return container_of_const(sd, struct alvium_dev, sd);
+}
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of_const(ctrl->handler, struct alvium_dev,
+ ctrls.handler)->sd;
+}
+#endif /* ALVIUM_CSI2_H_ */
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index 701f36345..c7d5fa532 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -446,8 +446,7 @@ static int ar0521_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&sensor->lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state, 0
- /* pad */);
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
else
fmt = &sensor->fmt;
@@ -472,7 +471,7 @@ static int ar0521_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, 0 /* pad */);
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
*fmt = format->format;
mutex_unlock(&sensor->lock);
diff --git a/drivers/media/i2c/ccs/Kconfig b/drivers/media/i2c/ccs/Kconfig
index b55c93a2e..710a729ae 100644
--- a/drivers/media/i2c/ccs/Kconfig
+++ b/drivers/media/i2c/ccs/Kconfig
@@ -2,6 +2,7 @@
config VIDEO_CCS
tristate "MIPI CCS/SMIA++/SMIA sensor support"
depends on HAVE_CLK
+ select V4L2_CCI_I2C
select VIDEO_CCS_PLL
help
This is a generic driver for MIPI CCS, SMIA++ and SMIA compliant
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 12e6f0a26..e21287d50 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -25,8 +25,9 @@
#include <linux/slab.h>
#include <linux/smiapp.h>
#include <linux/v4l2-mediabus.h>
-#include <media/v4l2-fwnode.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <uapi/linux/ccs.h>
#include "ccs.h"
@@ -98,7 +99,7 @@ static int ccs_limit_ptr(struct ccs_sensor *sensor, unsigned int limit,
linfo = &ccs_limits[ccs_limit_offsets[limit].info];
if (WARN_ON(!sensor->ccs_limits) ||
- WARN_ON(offset + ccs_reg_width(linfo->reg) >
+ WARN_ON(offset + CCI_REG_WIDTH_BYTES(linfo->reg) >
ccs_limit_offsets[limit + 1].lim))
return -EINVAL;
@@ -124,7 +125,7 @@ void ccs_replace_limit(struct ccs_sensor *sensor,
dev_dbg(&client->dev, "quirk: 0x%8.8x \"%s\" %u = %u, 0x%x\n",
linfo->reg, linfo->name, offset, val, val);
- ccs_assign_limit(ptr, ccs_reg_width(linfo->reg), val);
+ ccs_assign_limit(ptr, CCI_REG_WIDTH_BYTES(linfo->reg), val);
}
u32 ccs_get_limit(struct ccs_sensor *sensor, unsigned int limit,
@@ -138,7 +139,7 @@ u32 ccs_get_limit(struct ccs_sensor *sensor, unsigned int limit,
if (ret)
return 0;
- switch (ccs_reg_width(ccs_limits[ccs_limit_offsets[limit].info].reg)) {
+ switch (CCI_REG_WIDTH_BYTES(ccs_limits[ccs_limit_offsets[limit].info].reg)) {
case sizeof(u8):
val = *(u8 *)ptr;
break;
@@ -172,9 +173,11 @@ static int ccs_read_all_limits(struct ccs_sensor *sensor)
end = alloc + ccs_limit_offsets[CCS_L_LAST].lim;
+ sensor->ccs_limits = alloc;
+
for (i = 0, l = 0, ptr = alloc; ccs_limits[i].size; i++) {
u32 reg = ccs_limits[i].reg;
- unsigned int width = ccs_reg_width(reg);
+ unsigned int width = CCI_REG_WIDTH_BYTES(reg);
unsigned int j;
if (l == CCS_L_LAST) {
@@ -186,6 +189,7 @@ static int ccs_read_all_limits(struct ccs_sensor *sensor)
for (j = 0; j < ccs_limits[i].size / width;
j++, reg += width, ptr += width) {
+ char str[16] = "";
u32 val;
ret = ccs_read_addr_noconv(sensor, reg, &val);
@@ -204,8 +208,15 @@ static int ccs_read_all_limits(struct ccs_sensor *sensor)
ccs_assign_limit(ptr, width, val);
- dev_dbg(&client->dev, "0x%8.8x \"%s\" = %u, 0x%x\n",
- reg, ccs_limits[i].name, val, val);
+#ifdef CONFIG_DYNAMIC_DEBUG
+ if (reg & (CCS_FL_FLOAT_IREAL | CCS_FL_IREAL))
+ snprintf(str, sizeof(str), ", %u",
+ ccs_reg_conv(sensor, reg, val));
+#endif
+
+ dev_dbg(&client->dev,
+ "0x%8.8x \"%s\" = %u, 0x%x%s\n",
+ reg, ccs_limits[i].name, val, val, str);
}
if (ccs_limits[i].flags & CCS_L_FL_SAME_REG)
@@ -222,14 +233,13 @@ static int ccs_read_all_limits(struct ccs_sensor *sensor)
goto out_err;
}
- sensor->ccs_limits = alloc;
-
if (CCS_LIM(sensor, SCALER_N_MIN) < 16)
ccs_replace_limit(sensor, CCS_L_SCALER_N_MIN, 0, 16);
return 0;
out_err:
+ sensor->ccs_limits = NULL;
kfree(alloc);
return ret;
@@ -1878,9 +1888,11 @@ static int ccs_pm_get_init(struct ccs_sensor *sensor)
goto error;
/* Device was already active, so don't set controls */
- if (rval == 1)
+ if (rval == 1 && !sensor->handler_setup_needed)
return 0;
+ sensor->handler_setup_needed = false;
+
/* Restore V4L2 controls to the previously suspended device */
rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
if (rval)
@@ -2030,7 +2042,7 @@ static int __ccs_get_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
- fmt->format = *v4l2_subdev_get_pad_format(subdev, sd_state, fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format.code = __ccs_get_mbus_code(subdev, fmt->pad);
return 0;
@@ -2061,10 +2073,10 @@ static void ccs_get_crop_compose(struct v4l2_subdev *subdev,
if (crops)
for (i = 0; i < subdev->entity.num_pads; i++)
crops[i] =
- v4l2_subdev_get_pad_crop(subdev, sd_state, i);
+ v4l2_subdev_state_get_crop(sd_state, i);
if (comps)
- *comps = v4l2_subdev_get_pad_compose(subdev, sd_state,
- ssd->sink_pad);
+ *comps = v4l2_subdev_state_get_compose(sd_state,
+ ssd->sink_pad);
}
/* Changes require propagation only on sink pad. */
@@ -2097,7 +2109,7 @@ static void ccs_propagate(struct v4l2_subdev *subdev,
fallthrough;
case V4L2_SEL_TGT_COMPOSE:
*crops[CCS_PAD_SRC] = *comp;
- fmt = v4l2_subdev_get_pad_format(subdev, sd_state, CCS_PAD_SRC);
+ fmt = v4l2_subdev_state_get_format(sd_state, CCS_PAD_SRC);
fmt->width = comp->width;
fmt->height = comp->height;
if (which == V4L2_SUBDEV_FORMAT_ACTIVE && ssd == sensor->src)
@@ -2507,7 +2519,7 @@ static int ccs_set_crop(struct v4l2_subdev *subdev,
if (sel->pad == ssd->sink_pad) {
struct v4l2_mbus_framefmt *mfmt =
- v4l2_subdev_get_pad_format(subdev, sd_state, sel->pad);
+ v4l2_subdev_state_get_format(sd_state, sel->pad);
src_size.width = mfmt->width;
src_size.height = mfmt->height;
@@ -2567,8 +2579,8 @@ static int ccs_get_selection(struct v4l2_subdev *subdev,
ccs_get_native_size(ssd, &sel->r);
} else if (sel->pad == ssd->sink_pad) {
struct v4l2_mbus_framefmt *sink_fmt =
- v4l2_subdev_get_pad_format(subdev, sd_state,
- ssd->sink_pad);
+ v4l2_subdev_state_get_format(sd_state,
+ ssd->sink_pad);
sel->r.top = sel->r.left = 0;
sel->r.width = sink_fmt->width;
sel->r.height = sink_fmt->height;
@@ -2714,66 +2726,54 @@ static int ccs_identify_module(struct ccs_sensor *sensor)
rval = ccs_read(sensor, MODULE_MANUFACTURER_ID,
&minfo->mipi_manufacturer_id);
if (!rval && !minfo->mipi_manufacturer_id)
- rval = ccs_read_addr_8only(sensor,
- SMIAPP_REG_U8_MANUFACTURER_ID,
- &minfo->smia_manufacturer_id);
+ rval = ccs_read_addr(sensor, SMIAPP_REG_U8_MANUFACTURER_ID,
+ &minfo->smia_manufacturer_id);
if (!rval)
- rval = ccs_read_addr_8only(sensor, CCS_R_MODULE_MODEL_ID,
- &minfo->model_id);
+ rval = ccs_read(sensor, MODULE_MODEL_ID, &minfo->model_id);
if (!rval)
- rval = ccs_read_addr_8only(sensor,
- CCS_R_MODULE_REVISION_NUMBER_MAJOR,
- &rev);
+ rval = ccs_read(sensor, MODULE_REVISION_NUMBER_MAJOR, &rev);
if (!rval) {
- rval = ccs_read_addr_8only(sensor,
- CCS_R_MODULE_REVISION_NUMBER_MINOR,
- &minfo->revision_number);
+ rval = ccs_read(sensor, MODULE_REVISION_NUMBER_MINOR,
+ &minfo->revision_number);
minfo->revision_number |= rev << 8;
}
if (!rval)
- rval = ccs_read_addr_8only(sensor, CCS_R_MODULE_DATE_YEAR,
- &minfo->module_year);
+ rval = ccs_read(sensor, MODULE_DATE_YEAR, &minfo->module_year);
if (!rval)
- rval = ccs_read_addr_8only(sensor, CCS_R_MODULE_DATE_MONTH,
- &minfo->module_month);
+ rval = ccs_read(sensor, MODULE_DATE_MONTH,
+ &minfo->module_month);
if (!rval)
- rval = ccs_read_addr_8only(sensor, CCS_R_MODULE_DATE_DAY,
- &minfo->module_day);
+ rval = ccs_read(sensor, MODULE_DATE_DAY, &minfo->module_day);
/* Sensor info */
if (!rval)
rval = ccs_read(sensor, SENSOR_MANUFACTURER_ID,
&minfo->sensor_mipi_manufacturer_id);
if (!rval && !minfo->sensor_mipi_manufacturer_id)
- rval = ccs_read_addr_8only(sensor,
- CCS_R_SENSOR_MANUFACTURER_ID,
- &minfo->sensor_smia_manufacturer_id);
+ rval = ccs_read(sensor, SENSOR_MANUFACTURER_ID,
+ &minfo->sensor_smia_manufacturer_id);
if (!rval)
- rval = ccs_read_addr_8only(sensor,
- CCS_R_SENSOR_MODEL_ID,
- &minfo->sensor_model_id);
+ rval = ccs_read(sensor, SENSOR_MODEL_ID,
+ &minfo->sensor_model_id);
if (!rval)
- rval = ccs_read_addr_8only(sensor,
- CCS_R_SENSOR_REVISION_NUMBER,
- &minfo->sensor_revision_number);
+ rval = ccs_read(sensor, SENSOR_REVISION_NUMBER,
+ &minfo->sensor_revision_number);
if (!rval && !minfo->sensor_revision_number)
- rval = ccs_read_addr_8only(sensor,
- CCS_R_SENSOR_REVISION_NUMBER_16,
- &minfo->sensor_revision_number);
+ rval = ccs_read(sensor, SENSOR_REVISION_NUMBER_16,
+ &minfo->sensor_revision_number);
if (!rval)
- rval = ccs_read_addr_8only(sensor,
- CCS_R_SENSOR_FIRMWARE_VERSION,
- &minfo->sensor_firmware_version);
+ rval = ccs_read(sensor, SENSOR_FIRMWARE_VERSION,
+ &minfo->sensor_firmware_version);
/* SMIA */
if (!rval)
rval = ccs_read(sensor, MIPI_CCS_VERSION, &minfo->ccs_version);
if (!rval && !minfo->ccs_version)
- rval = ccs_read_addr_8only(sensor, SMIAPP_REG_U8_SMIA_VERSION,
- &minfo->smia_version);
+ rval = ccs_read_addr(sensor, SMIAPP_REG_U8_SMIA_VERSION,
+ &minfo->smia_version);
if (!rval && !minfo->ccs_version)
- rval = ccs_read_addr_8only(sensor, SMIAPP_REG_U8_SMIAPP_VERSION,
- &minfo->smiapp_version);
+ rval = ccs_read_addr(sensor, SMIAPP_REG_U8_SMIAPP_VERSION,
+ &minfo->smiapp_version);
if (rval) {
dev_err(&client->dev, "sensor detection failed\n");
@@ -3004,17 +3004,17 @@ static int ccs_init_subdev(struct ccs_sensor *sensor,
return 0;
}
-static int ccs_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int ccs_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct ccs_subdev *ssd = to_ccs_subdev(sd);
struct ccs_sensor *sensor = ssd->sensor;
unsigned int pad = ssd == sensor->pixel_array ?
CCS_PA_PAD_SRC : CCS_PAD_SINK;
struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_pad_format(sd, sd_state, pad);
+ v4l2_subdev_state_get_format(sd_state, pad);
struct v4l2_rect *crop =
- v4l2_subdev_get_pad_crop(sd, sd_state, pad);
+ v4l2_subdev_state_get_crop(sd_state, pad);
bool is_active = !sd->active_state || sd->active_state == sd_state;
mutex_lock(&sensor->mutex);
@@ -3034,7 +3034,7 @@ static int ccs_init_cfg(struct v4l2_subdev *sd,
return 0;
}
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, CCS_PAD_SRC);
+ fmt = v4l2_subdev_state_get_format(sd_state, CCS_PAD_SRC);
fmt->code = ssd == sensor->src ?
sensor->csi_format->code : sensor->internal_csi_format->code;
fmt->field = V4L2_FIELD_NONE;
@@ -3053,7 +3053,6 @@ static const struct v4l2_subdev_video_ops ccs_video_ops = {
};
static const struct v4l2_subdev_pad_ops ccs_pad_ops = {
- .init_cfg = ccs_init_cfg,
.enum_mbus_code = ccs_enum_mbus_code,
.get_fmt = ccs_get_format,
.set_fmt = ccs_set_format,
@@ -3077,6 +3076,7 @@ static const struct media_entity_operations ccs_entity_ops = {
};
static const struct v4l2_subdev_internal_ops ccs_internal_src_ops = {
+ .init_state = ccs_init_state,
.registered = ccs_registered,
.unregistered = ccs_unregistered,
};
@@ -3307,6 +3307,13 @@ static int ccs_probe(struct i2c_client *client)
if (IS_ERR(sensor->xshutdown))
return PTR_ERR(sensor->xshutdown);
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap)) {
+ dev_err(&client->dev, "can't initialise CCI (%ld)\n",
+ PTR_ERR(sensor->regmap));
+ return PTR_ERR(sensor->regmap);
+ }
+
rval = ccs_power_on(&client->dev);
if (rval < 0)
return rval;
@@ -3532,6 +3539,7 @@ static int ccs_probe(struct i2c_client *client)
sensor->streaming = false;
sensor->dev_init_done = true;
+ sensor->handler_setup_needed = true;
rval = ccs_write_msr_regs(sensor);
if (rval)
@@ -3636,12 +3644,16 @@ static int ccs_module_init(void)
{
unsigned int i, l;
+ CCS_BUILD_BUG;
+
for (i = 0, l = 0; ccs_limits[i].size && l < CCS_L_LAST; i++) {
if (!(ccs_limits[i].flags & CCS_L_FL_SAME_REG)) {
ccs_limit_offsets[l + 1].lim =
ALIGN(ccs_limit_offsets[l].lim +
ccs_limits[i].size,
- ccs_reg_width(ccs_limits[i + 1].reg));
+ ccs_limits[i + 1].reg ?
+ CCI_REG_WIDTH_BYTES(ccs_limits[i + 1].reg) :
+ 1U);
ccs_limit_offsets[l].info = i;
l++;
} else {
diff --git a/drivers/media/i2c/ccs/ccs-reg-access.c b/drivers/media/i2c/ccs/ccs-reg-access.c
index 25993445f..ed7907550 100644
--- a/drivers/media/i2c/ccs/ccs-reg-access.c
+++ b/drivers/media/i2c/ccs/ccs-reg-access.c
@@ -62,87 +62,6 @@ static u32 float_to_u32_mul_1000000(struct i2c_client *client, u32 phloat)
}
-/*
- * Read a 8/16/32-bit i2c register. The value is returned in 'val'.
- * Returns zero if successful, or non-zero otherwise.
- */
-static int ____ccs_read_addr(struct ccs_sensor *sensor, u16 reg, u16 len,
- u32 *val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- struct i2c_msg msg;
- unsigned char data_buf[sizeof(u32)] = { 0 };
- unsigned char offset_buf[sizeof(u16)];
- int r;
-
- if (len > sizeof(data_buf))
- return -EINVAL;
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = sizeof(offset_buf);
- msg.buf = offset_buf;
- put_unaligned_be16(reg, offset_buf);
-
- r = i2c_transfer(client->adapter, &msg, 1);
- if (r != 1) {
- if (r >= 0)
- r = -EBUSY;
- goto err;
- }
-
- msg.len = len;
- msg.flags = I2C_M_RD;
- msg.buf = &data_buf[sizeof(data_buf) - len];
-
- r = i2c_transfer(client->adapter, &msg, 1);
- if (r != 1) {
- if (r >= 0)
- r = -EBUSY;
- goto err;
- }
-
- *val = get_unaligned_be32(data_buf);
-
- return 0;
-
-err:
- dev_err(&client->dev, "read from offset 0x%x error %d\n", reg, r);
-
- return r;
-}
-
-/* Read a register using 8-bit access only. */
-static int ____ccs_read_addr_8only(struct ccs_sensor *sensor, u16 reg,
- u16 len, u32 *val)
-{
- unsigned int i;
- int rval;
-
- *val = 0;
-
- for (i = 0; i < len; i++) {
- u32 val8;
-
- rval = ____ccs_read_addr(sensor, reg + i, 1, &val8);
- if (rval < 0)
- return rval;
- *val |= val8 << ((len - i - 1) << 3);
- }
-
- return 0;
-}
-
-unsigned int ccs_reg_width(u32 reg)
-{
- if (reg & CCS_FL_16BIT)
- return sizeof(u16);
- if (reg & CCS_FL_32BIT)
- return sizeof(u32);
-
- return sizeof(u8);
-}
-
static u32 ireal32_to_u32_mul_1000000(struct i2c_client *client, u32 val)
{
if (val >> 10 > U32_MAX / 15625) {
@@ -178,29 +97,22 @@ u32 ccs_reg_conv(struct ccs_sensor *sensor, u32 reg, u32 val)
static int __ccs_read_addr(struct ccs_sensor *sensor, u32 reg, u32 *val,
bool only8, bool conv)
{
- unsigned int len = ccs_reg_width(reg);
+ u64 __val;
int rval;
- if (!only8)
- rval = ____ccs_read_addr(sensor, CCS_REG_ADDR(reg), len, val);
- else
- rval = ____ccs_read_addr_8only(sensor, CCS_REG_ADDR(reg), len,
- val);
+ rval = cci_read(sensor->regmap, reg, &__val, NULL);
if (rval < 0)
return rval;
- if (!conv)
- return 0;
-
- *val = ccs_reg_conv(sensor, reg, *val);
+ *val = conv ? ccs_reg_conv(sensor, reg, __val) : __val;
return 0;
}
-static int __ccs_read_data(struct ccs_reg *regs, size_t num_regs,
- u32 reg, u32 *val)
+static int __ccs_static_data_read_ro_reg(struct ccs_reg *regs, size_t num_regs,
+ u32 reg, u32 *val)
{
- unsigned int width = ccs_reg_width(reg);
+ unsigned int width = CCI_REG_WIDTH_BYTES(reg);
size_t i;
for (i = 0; i < num_regs; i++, regs++) {
@@ -235,16 +147,17 @@ static int __ccs_read_data(struct ccs_reg *regs, size_t num_regs,
return -ENOENT;
}
-static int ccs_read_data(struct ccs_sensor *sensor, u32 reg, u32 *val)
+static int
+ccs_static_data_read_ro_reg(struct ccs_sensor *sensor, u32 reg, u32 *val)
{
- if (!__ccs_read_data(sensor->sdata.sensor_read_only_regs,
- sensor->sdata.num_sensor_read_only_regs,
- reg, val))
+ if (!__ccs_static_data_read_ro_reg(sensor->sdata.sensor_read_only_regs,
+ sensor->sdata.num_sensor_read_only_regs,
+ reg, val))
return 0;
- return __ccs_read_data(sensor->mdata.module_read_only_regs,
- sensor->mdata.num_module_read_only_regs,
- reg, val);
+ return __ccs_static_data_read_ro_reg(sensor->mdata.module_read_only_regs,
+ sensor->mdata.num_module_read_only_regs,
+ reg, val);
}
static int ccs_read_addr_raw(struct ccs_sensor *sensor, u32 reg, u32 *val,
@@ -253,7 +166,7 @@ static int ccs_read_addr_raw(struct ccs_sensor *sensor, u32 reg, u32 *val,
int rval;
if (data) {
- rval = ccs_read_data(sensor, reg, val);
+ rval = ccs_static_data_read_ro_reg(sensor, reg, val);
if (!rval)
return 0;
}
@@ -291,71 +204,13 @@ int ccs_read_addr_noconv(struct ccs_sensor *sensor, u32 reg, u32 *val)
return ccs_read_addr_raw(sensor, reg, val, false, true, false, true);
}
-static int ccs_write_retry(struct i2c_client *client, struct i2c_msg *msg)
-{
- unsigned int retries;
- int r;
-
- for (retries = 0; retries < 10; retries++) {
- /*
- * Due to unknown reason sensor stops responding. This
- * loop is a temporaty solution until the root cause
- * is found.
- */
- r = i2c_transfer(client->adapter, msg, 1);
- if (r != 1) {
- usleep_range(1000, 2000);
- continue;
- }
-
- if (retries)
- dev_err(&client->dev,
- "sensor i2c stall encountered. retries: %d\n",
- retries);
- return 0;
- }
-
- return r;
-}
-
-int ccs_write_addr_no_quirk(struct ccs_sensor *sensor, u32 reg, u32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- struct i2c_msg msg;
- unsigned char data[6];
- unsigned int len = ccs_reg_width(reg);
- int r;
-
- if (len > sizeof(data) - 2)
- return -EINVAL;
-
- msg.addr = client->addr;
- msg.flags = 0; /* Write */
- msg.len = 2 + len;
- msg.buf = data;
-
- put_unaligned_be16(CCS_REG_ADDR(reg), data);
- put_unaligned_be32(val << (8 * (sizeof(val) - len)), data + 2);
-
- dev_dbg(&client->dev, "writing reg 0x%4.4x value 0x%*.*x (%u)\n",
- CCS_REG_ADDR(reg), ccs_reg_width(reg) << 1,
- ccs_reg_width(reg) << 1, val, val);
-
- r = ccs_write_retry(client, &msg);
- if (r)
- dev_err(&client->dev,
- "wrote 0x%x to offset 0x%x error %d\n", val,
- CCS_REG_ADDR(reg), r);
-
- return r;
-}
-
/*
* Write to a 8/16-bit register.
* Returns zero if successful, or non-zero otherwise.
*/
int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val)
{
+ unsigned int retries = 10;
int rval;
rval = ccs_call_quirk(sensor, reg_access, true, &reg, &val);
@@ -364,7 +219,13 @@ int ccs_write_addr(struct ccs_sensor *sensor, u32 reg, u32 val)
if (rval < 0)
return rval;
- return ccs_write_addr_no_quirk(sensor, reg, val);
+ rval = 0;
+ do {
+ if (cci_write(sensor->regmap, reg, val, &rval))
+ fsleep(1000);
+ } while (rval && --retries);
+
+ return rval;
}
#define MAX_WRITE_LEN 32U
@@ -373,40 +234,38 @@ int ccs_write_data_regs(struct ccs_sensor *sensor, struct ccs_reg *regs,
size_t num_regs)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- unsigned char buf[2 + MAX_WRITE_LEN];
- struct i2c_msg msg = {
- .addr = client->addr,
- .buf = buf,
- };
size_t i;
for (i = 0; i < num_regs; i++, regs++) {
unsigned char *regdata = regs->value;
unsigned int j;
+ int len;
- for (j = 0; j < regs->len;
- j += msg.len - 2, regdata += msg.len - 2) {
+ for (j = 0; j < regs->len; j += len, regdata += len) {
char printbuf[(MAX_WRITE_LEN << 1) +
1 /* \0 */] = { 0 };
+ unsigned int retries = 10;
int rval;
- msg.len = min(regs->len - j, MAX_WRITE_LEN);
+ len = min(regs->len - j, MAX_WRITE_LEN);
- bin2hex(printbuf, regdata, msg.len);
+ bin2hex(printbuf, regdata, len);
dev_dbg(&client->dev,
"writing msr reg 0x%4.4x value 0x%s\n",
regs->addr + j, printbuf);
- put_unaligned_be16(regs->addr + j, buf);
- memcpy(buf + 2, regdata, msg.len);
-
- msg.len += 2;
+ do {
+ rval = regmap_bulk_write(sensor->regmap,
+ regs->addr + j,
+ regdata, len);
+ if (rval)
+ fsleep(1000);
+ } while (rval && --retries);
- rval = ccs_write_retry(client, &msg);
if (rval) {
dev_err(&client->dev,
"error writing %u octets to address 0x%4.4x\n",
- msg.len, regs->addr + j);
+ len, regs->addr + j);
return rval;
}
}
diff --git a/drivers/media/i2c/ccs/ccs-regs.h b/drivers/media/i2c/ccs/ccs-regs.h
index 6ce84c5ec..7b5dbc86e 100644
--- a/drivers/media/i2c/ccs/ccs-regs.h
+++ b/drivers/media/i2c/ccs/ccs-regs.h
@@ -10,59 +10,59 @@
#include <linux/bits.h>
-#define CCS_FL_BASE 16
-#define CCS_FL_16BIT BIT(CCS_FL_BASE)
-#define CCS_FL_32BIT BIT(CCS_FL_BASE + 1)
-#define CCS_FL_FLOAT_IREAL BIT(CCS_FL_BASE + 2)
-#define CCS_FL_IREAL BIT(CCS_FL_BASE + 3)
-#define CCS_R_ADDR(r) ((r) & 0xffff)
+#include <media/v4l2-cci.h>
-#define CCS_R_MODULE_MODEL_ID (0x0000 | CCS_FL_16BIT)
-#define CCS_R_MODULE_REVISION_NUMBER_MAJOR 0x0002
-#define CCS_R_FRAME_COUNT 0x0005
-#define CCS_R_PIXEL_ORDER 0x0006
+#define CCS_FL_BASE CCI_REG_PRIVATE_SHIFT
+#define CCS_FL_FLOAT_IREAL BIT(CCS_FL_BASE)
+#define CCS_FL_IREAL BIT(CCS_FL_BASE + 1)
+#define CCS_BUILD_BUG \
+ BUILD_BUG_ON(~CCI_REG_PRIVATE_MASK & (BIT(CCS_FL_BASE) | BIT(CCS_FL_BASE + 1)))
+#define CCS_R_MODULE_MODEL_ID CCI_REG16(0x0000)
+#define CCS_R_MODULE_REVISION_NUMBER_MAJOR CCI_REG8(0x0002)
+#define CCS_R_FRAME_COUNT CCI_REG8(0x0005)
+#define CCS_R_PIXEL_ORDER CCI_REG8(0x0006)
#define CCS_PIXEL_ORDER_GRBG 0U
#define CCS_PIXEL_ORDER_RGGB 1U
#define CCS_PIXEL_ORDER_BGGR 2U
#define CCS_PIXEL_ORDER_GBRG 3U
-#define CCS_R_MIPI_CCS_VERSION 0x0007
+#define CCS_R_MIPI_CCS_VERSION CCI_REG8(0x0007)
#define CCS_MIPI_CCS_VERSION_V1_0 0x10
#define CCS_MIPI_CCS_VERSION_V1_1 0x11
#define CCS_MIPI_CCS_VERSION_MAJOR_SHIFT 4U
#define CCS_MIPI_CCS_VERSION_MAJOR_MASK 0xf0
#define CCS_MIPI_CCS_VERSION_MINOR_SHIFT 0U
#define CCS_MIPI_CCS_VERSION_MINOR_MASK 0xf
-#define CCS_R_DATA_PEDESTAL (0x0008 | CCS_FL_16BIT)
-#define CCS_R_MODULE_MANUFACTURER_ID (0x000e | CCS_FL_16BIT)
-#define CCS_R_MODULE_REVISION_NUMBER_MINOR 0x0010
-#define CCS_R_MODULE_DATE_YEAR 0x0012
-#define CCS_R_MODULE_DATE_MONTH 0x0013
-#define CCS_R_MODULE_DATE_DAY 0x0014
-#define CCS_R_MODULE_DATE_PHASE 0x0015
+#define CCS_R_DATA_PEDESTAL CCI_REG16(0x0008)
+#define CCS_R_MODULE_MANUFACTURER_ID CCI_REG16(0x000e)
+#define CCS_R_MODULE_REVISION_NUMBER_MINOR CCI_REG8(0x0010)
+#define CCS_R_MODULE_DATE_YEAR CCI_REG8(0x0012)
+#define CCS_R_MODULE_DATE_MONTH CCI_REG8(0x0013)
+#define CCS_R_MODULE_DATE_DAY CCI_REG8(0x0014)
+#define CCS_R_MODULE_DATE_PHASE CCI_REG8(0x0015)
#define CCS_MODULE_DATE_PHASE_SHIFT 0U
#define CCS_MODULE_DATE_PHASE_MASK 0x7
#define CCS_MODULE_DATE_PHASE_TS 0U
#define CCS_MODULE_DATE_PHASE_ES 1U
#define CCS_MODULE_DATE_PHASE_CS 2U
#define CCS_MODULE_DATE_PHASE_MP 3U
-#define CCS_R_SENSOR_MODEL_ID (0x0016 | CCS_FL_16BIT)
-#define CCS_R_SENSOR_REVISION_NUMBER 0x0018
-#define CCS_R_SENSOR_FIRMWARE_VERSION 0x001a
-#define CCS_R_SERIAL_NUMBER (0x001c | CCS_FL_32BIT)
-#define CCS_R_SENSOR_MANUFACTURER_ID (0x0020 | CCS_FL_16BIT)
-#define CCS_R_SENSOR_REVISION_NUMBER_16 (0x0022 | CCS_FL_16BIT)
-#define CCS_R_FRAME_FORMAT_MODEL_TYPE 0x0040
+#define CCS_R_SENSOR_MODEL_ID CCI_REG16(0x0016)
+#define CCS_R_SENSOR_REVISION_NUMBER CCI_REG8(0x0018)
+#define CCS_R_SENSOR_FIRMWARE_VERSION CCI_REG8(0x001a)
+#define CCS_R_SERIAL_NUMBER CCI_REG32(0x001c)
+#define CCS_R_SENSOR_MANUFACTURER_ID CCI_REG16(0x0020)
+#define CCS_R_SENSOR_REVISION_NUMBER_16 CCI_REG16(0x0022)
+#define CCS_R_FRAME_FORMAT_MODEL_TYPE CCI_REG8(0x0040)
#define CCS_FRAME_FORMAT_MODEL_TYPE_2_BYTE 1U
#define CCS_FRAME_FORMAT_MODEL_TYPE_4_BYTE 2U
-#define CCS_R_FRAME_FORMAT_MODEL_SUBTYPE 0x0041
+#define CCS_R_FRAME_FORMAT_MODEL_SUBTYPE CCI_REG8(0x0041)
#define CCS_FRAME_FORMAT_MODEL_SUBTYPE_ROWS_SHIFT 0U
#define CCS_FRAME_FORMAT_MODEL_SUBTYPE_ROWS_MASK 0xf
#define CCS_FRAME_FORMAT_MODEL_SUBTYPE_COLUMNS_SHIFT 4U
#define CCS_FRAME_FORMAT_MODEL_SUBTYPE_COLUMNS_MASK 0xf0
-#define CCS_R_FRAME_FORMAT_DESCRIPTOR(n) ((0x0042 | CCS_FL_16BIT) + (n) * 2)
+#define CCS_R_FRAME_FORMAT_DESCRIPTOR(n) CCI_REG16(0x0042 + (n) * 2)
#define CCS_LIM_FRAME_FORMAT_DESCRIPTOR_MIN_N 0U
#define CCS_LIM_FRAME_FORMAT_DESCRIPTOR_MAX_N 14U
-#define CCS_R_FRAME_FORMAT_DESCRIPTOR_4(n) ((0x0060 | CCS_FL_32BIT) + (n) * 4)
+#define CCS_R_FRAME_FORMAT_DESCRIPTOR_4(n) CCI_REG32(0x0060 + (n) * 4)
#define CCS_FRAME_FORMAT_DESCRIPTOR_PIXELS_SHIFT 0U
#define CCS_FRAME_FORMAT_DESCRIPTOR_PIXELS_MASK 0xfff
#define CCS_FRAME_FORMAT_DESCRIPTOR_PCODE_SHIFT 12U
@@ -97,91 +97,91 @@
#define CCS_FRAME_FORMAT_DESCRIPTOR_4_PCODE_MANUF_SPECIFIC_4 12U
#define CCS_FRAME_FORMAT_DESCRIPTOR_4_PCODE_MANUF_SPECIFIC_5 13U
#define CCS_FRAME_FORMAT_DESCRIPTOR_4_PCODE_MANUF_SPECIFIC_6 14U
-#define CCS_R_ANALOG_GAIN_CAPABILITY (0x0080 | CCS_FL_16BIT)
+#define CCS_R_ANALOG_GAIN_CAPABILITY CCI_REG16(0x0080)
#define CCS_ANALOG_GAIN_CAPABILITY_GLOBAL 0U
#define CCS_ANALOG_GAIN_CAPABILITY_ALTERNATE_GLOBAL 2U
-#define CCS_R_ANALOG_GAIN_CODE_MIN (0x0084 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_GAIN_CODE_MAX (0x0086 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_GAIN_CODE_STEP (0x0088 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_GAIN_TYPE (0x008a | CCS_FL_16BIT)
-#define CCS_R_ANALOG_GAIN_M0 (0x008c | CCS_FL_16BIT)
-#define CCS_R_ANALOG_GAIN_C0 (0x008e | CCS_FL_16BIT)
-#define CCS_R_ANALOG_GAIN_M1 (0x0090 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_GAIN_C1 (0x0092 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_LINEAR_GAIN_MIN (0x0094 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_LINEAR_GAIN_MAX (0x0096 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_LINEAR_GAIN_STEP_SIZE (0x0098 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_EXPONENTIAL_GAIN_MIN (0x009a | CCS_FL_16BIT)
-#define CCS_R_ANALOG_EXPONENTIAL_GAIN_MAX (0x009c | CCS_FL_16BIT)
-#define CCS_R_ANALOG_EXPONENTIAL_GAIN_STEP_SIZE (0x009e | CCS_FL_16BIT)
-#define CCS_R_DATA_FORMAT_MODEL_TYPE 0x00c0
+#define CCS_R_ANALOG_GAIN_CODE_MIN CCI_REG16(0x0084)
+#define CCS_R_ANALOG_GAIN_CODE_MAX CCI_REG16(0x0086)
+#define CCS_R_ANALOG_GAIN_CODE_STEP CCI_REG16(0x0088)
+#define CCS_R_ANALOG_GAIN_TYPE CCI_REG16(0x008a)
+#define CCS_R_ANALOG_GAIN_M0 CCI_REG16(0x008c)
+#define CCS_R_ANALOG_GAIN_C0 CCI_REG16(0x008e)
+#define CCS_R_ANALOG_GAIN_M1 CCI_REG16(0x0090)
+#define CCS_R_ANALOG_GAIN_C1 CCI_REG16(0x0092)
+#define CCS_R_ANALOG_LINEAR_GAIN_MIN CCI_REG16(0x0094)
+#define CCS_R_ANALOG_LINEAR_GAIN_MAX CCI_REG16(0x0096)
+#define CCS_R_ANALOG_LINEAR_GAIN_STEP_SIZE CCI_REG16(0x0098)
+#define CCS_R_ANALOG_EXPONENTIAL_GAIN_MIN CCI_REG16(0x009a)
+#define CCS_R_ANALOG_EXPONENTIAL_GAIN_MAX CCI_REG16(0x009c)
+#define CCS_R_ANALOG_EXPONENTIAL_GAIN_STEP_SIZE CCI_REG16(0x009e)
+#define CCS_R_DATA_FORMAT_MODEL_TYPE CCI_REG8(0x00c0)
#define CCS_DATA_FORMAT_MODEL_TYPE_NORMAL 1U
#define CCS_DATA_FORMAT_MODEL_TYPE_EXTENDED 2U
-#define CCS_R_DATA_FORMAT_MODEL_SUBTYPE 0x00c1
+#define CCS_R_DATA_FORMAT_MODEL_SUBTYPE CCI_REG8(0x00c1)
#define CCS_DATA_FORMAT_MODEL_SUBTYPE_ROWS_SHIFT 0U
#define CCS_DATA_FORMAT_MODEL_SUBTYPE_ROWS_MASK 0xf
#define CCS_DATA_FORMAT_MODEL_SUBTYPE_COLUMNS_SHIFT 4U
#define CCS_DATA_FORMAT_MODEL_SUBTYPE_COLUMNS_MASK 0xf0
-#define CCS_R_DATA_FORMAT_DESCRIPTOR(n) ((0x00c2 | CCS_FL_16BIT) + (n) * 2)
+#define CCS_R_DATA_FORMAT_DESCRIPTOR(n) CCI_REG16(0x00c2 + (n) * 2)
#define CCS_LIM_DATA_FORMAT_DESCRIPTOR_MIN_N 0U
#define CCS_LIM_DATA_FORMAT_DESCRIPTOR_MAX_N 15U
#define CCS_DATA_FORMAT_DESCRIPTOR_COMPRESSED_SHIFT 0U
#define CCS_DATA_FORMAT_DESCRIPTOR_COMPRESSED_MASK 0xff
#define CCS_DATA_FORMAT_DESCRIPTOR_UNCOMPRESSED_SHIFT 8U
#define CCS_DATA_FORMAT_DESCRIPTOR_UNCOMPRESSED_MASK 0xff00
-#define CCS_R_MODE_SELECT 0x0100
+#define CCS_R_MODE_SELECT CCI_REG8(0x0100)
#define CCS_MODE_SELECT_SOFTWARE_STANDBY 0U
#define CCS_MODE_SELECT_STREAMING 1U
-#define CCS_R_IMAGE_ORIENTATION 0x0101
+#define CCS_R_IMAGE_ORIENTATION CCI_REG8(0x0101)
#define CCS_IMAGE_ORIENTATION_HORIZONTAL_MIRROR BIT(0)
#define CCS_IMAGE_ORIENTATION_VERTICAL_FLIP BIT(1)
-#define CCS_R_SOFTWARE_RESET 0x0103
+#define CCS_R_SOFTWARE_RESET CCI_REG8(0x0103)
#define CCS_SOFTWARE_RESET_OFF 0U
#define CCS_SOFTWARE_RESET_ON 1U
-#define CCS_R_GROUPED_PARAMETER_HOLD 0x0104
-#define CCS_R_MASK_CORRUPTED_FRAMES 0x0105
+#define CCS_R_GROUPED_PARAMETER_HOLD CCI_REG8(0x0104)
+#define CCS_R_MASK_CORRUPTED_FRAMES CCI_REG8(0x0105)
#define CCS_MASK_CORRUPTED_FRAMES_ALLOW 0U
#define CCS_MASK_CORRUPTED_FRAMES_MASK 1U
-#define CCS_R_FAST_STANDBY_CTRL 0x0106
+#define CCS_R_FAST_STANDBY_CTRL CCI_REG8(0x0106)
#define CCS_FAST_STANDBY_CTRL_COMPLETE_FRAMES 0U
#define CCS_FAST_STANDBY_CTRL_FRAME_TRUNCATION 1U
-#define CCS_R_CCI_ADDRESS_CTRL 0x0107
-#define CCS_R_2ND_CCI_IF_CTRL 0x0108
+#define CCS_R_CCI_ADDRESS_CTRL CCI_REG8(0x0107)
+#define CCS_R_2ND_CCI_IF_CTRL CCI_REG8(0x0108)
#define CCS_2ND_CCI_IF_CTRL_ENABLE BIT(0)
#define CCS_2ND_CCI_IF_CTRL_ACK BIT(1)
-#define CCS_R_2ND_CCI_ADDRESS_CTRL 0x0109
-#define CCS_R_CSI_CHANNEL_IDENTIFIER 0x0110
-#define CCS_R_CSI_SIGNALING_MODE 0x0111
+#define CCS_R_2ND_CCI_ADDRESS_CTRL CCI_REG8(0x0109)
+#define CCS_R_CSI_CHANNEL_IDENTIFIER CCI_REG8(0x0110)
+#define CCS_R_CSI_SIGNALING_MODE CCI_REG8(0x0111)
#define CCS_CSI_SIGNALING_MODE_CSI_2_DPHY 2U
#define CCS_CSI_SIGNALING_MODE_CSI_2_CPHY 3U
-#define CCS_R_CSI_DATA_FORMAT (0x0112 | CCS_FL_16BIT)
-#define CCS_R_CSI_LANE_MODE 0x0114
-#define CCS_R_DPCM_FRAME_DT 0x011d
-#define CCS_R_BOTTOM_EMBEDDED_DATA_DT 0x011e
-#define CCS_R_BOTTOM_EMBEDDED_DATA_VC 0x011f
-#define CCS_R_GAIN_MODE 0x0120
+#define CCS_R_CSI_DATA_FORMAT CCI_REG16(0x0112)
+#define CCS_R_CSI_LANE_MODE CCI_REG8(0x0114)
+#define CCS_R_DPCM_FRAME_DT CCI_REG8(0x011d)
+#define CCS_R_BOTTOM_EMBEDDED_DATA_DT CCI_REG8(0x011e)
+#define CCS_R_BOTTOM_EMBEDDED_DATA_VC CCI_REG8(0x011f)
+#define CCS_R_GAIN_MODE CCI_REG8(0x0120)
#define CCS_GAIN_MODE_GLOBAL 0U
#define CCS_GAIN_MODE_ALTERNATE 1U
-#define CCS_R_ADC_BIT_DEPTH 0x0121
-#define CCS_R_EMB_DATA_CTRL 0x0122
+#define CCS_R_ADC_BIT_DEPTH CCI_REG8(0x0121)
+#define CCS_R_EMB_DATA_CTRL CCI_REG8(0x0122)
#define CCS_EMB_DATA_CTRL_RAW8_PACKING_FOR_RAW16 BIT(0)
#define CCS_EMB_DATA_CTRL_RAW10_PACKING_FOR_RAW20 BIT(1)
#define CCS_EMB_DATA_CTRL_RAW12_PACKING_FOR_RAW24 BIT(2)
-#define CCS_R_GPIO_TRIG_MODE 0x0130
-#define CCS_R_EXTCLK_FREQUENCY_MHZ (0x0136 | (CCS_FL_16BIT | CCS_FL_IREAL))
-#define CCS_R_TEMP_SENSOR_CTRL 0x0138
+#define CCS_R_GPIO_TRIG_MODE CCI_REG8(0x0130)
+#define CCS_R_EXTCLK_FREQUENCY_MHZ (CCI_REG16(0x0136) | CCS_FL_IREAL)
+#define CCS_R_TEMP_SENSOR_CTRL CCI_REG8(0x0138)
#define CCS_TEMP_SENSOR_CTRL_ENABLE BIT(0)
-#define CCS_R_TEMP_SENSOR_MODE 0x0139
-#define CCS_R_TEMP_SENSOR_OUTPUT 0x013a
-#define CCS_R_FINE_INTEGRATION_TIME (0x0200 | CCS_FL_16BIT)
-#define CCS_R_COARSE_INTEGRATION_TIME (0x0202 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_GAIN_CODE_GLOBAL (0x0204 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_LINEAR_GAIN_GLOBAL (0x0206 | CCS_FL_16BIT)
-#define CCS_R_ANALOG_EXPONENTIAL_GAIN_GLOBAL (0x0208 | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_GAIN_GLOBAL (0x020e | CCS_FL_16BIT)
-#define CCS_R_SHORT_ANALOG_GAIN_GLOBAL (0x0216 | CCS_FL_16BIT)
-#define CCS_R_SHORT_DIGITAL_GAIN_GLOBAL (0x0218 | CCS_FL_16BIT)
-#define CCS_R_HDR_MODE 0x0220
+#define CCS_R_TEMP_SENSOR_MODE CCI_REG8(0x0139)
+#define CCS_R_TEMP_SENSOR_OUTPUT CCI_REG8(0x013a)
+#define CCS_R_FINE_INTEGRATION_TIME CCI_REG16(0x0200)
+#define CCS_R_COARSE_INTEGRATION_TIME CCI_REG16(0x0202)
+#define CCS_R_ANALOG_GAIN_CODE_GLOBAL CCI_REG16(0x0204)
+#define CCS_R_ANALOG_LINEAR_GAIN_GLOBAL CCI_REG16(0x0206)
+#define CCS_R_ANALOG_EXPONENTIAL_GAIN_GLOBAL CCI_REG16(0x0208)
+#define CCS_R_DIGITAL_GAIN_GLOBAL CCI_REG16(0x020e)
+#define CCS_R_SHORT_ANALOG_GAIN_GLOBAL CCI_REG16(0x0216)
+#define CCS_R_SHORT_DIGITAL_GAIN_GLOBAL CCI_REG16(0x0218)
+#define CCS_R_HDR_MODE CCI_REG8(0x0220)
#define CCS_HDR_MODE_ENABLED BIT(0)
#define CCS_HDR_MODE_SEPARATE_ANALOG_GAIN BIT(1)
#define CCS_HDR_MODE_UPSCALING BIT(2)
@@ -189,421 +189,421 @@
#define CCS_HDR_MODE_TIMING_MODE BIT(4)
#define CCS_HDR_MODE_EXPOSURE_CTRL_DIRECT BIT(5)
#define CCS_HDR_MODE_SEPARATE_DIGITAL_GAIN BIT(6)
-#define CCS_R_HDR_RESOLUTION_REDUCTION 0x0221
+#define CCS_R_HDR_RESOLUTION_REDUCTION CCI_REG8(0x0221)
#define CCS_HDR_RESOLUTION_REDUCTION_ROW_SHIFT 0U
#define CCS_HDR_RESOLUTION_REDUCTION_ROW_MASK 0xf
#define CCS_HDR_RESOLUTION_REDUCTION_COLUMN_SHIFT 4U
#define CCS_HDR_RESOLUTION_REDUCTION_COLUMN_MASK 0xf0
-#define CCS_R_EXPOSURE_RATIO 0x0222
-#define CCS_R_HDR_INTERNAL_BIT_DEPTH 0x0223
-#define CCS_R_DIRECT_SHORT_INTEGRATION_TIME (0x0224 | CCS_FL_16BIT)
-#define CCS_R_SHORT_ANALOG_LINEAR_GAIN_GLOBAL (0x0226 | CCS_FL_16BIT)
-#define CCS_R_SHORT_ANALOG_EXPONENTIAL_GAIN_GLOBAL (0x0228 | CCS_FL_16BIT)
-#define CCS_R_VT_PIX_CLK_DIV (0x0300 | CCS_FL_16BIT)
-#define CCS_R_VT_SYS_CLK_DIV (0x0302 | CCS_FL_16BIT)
-#define CCS_R_PRE_PLL_CLK_DIV (0x0304 | CCS_FL_16BIT)
-#define CCS_R_PLL_MULTIPLIER (0x0306 | CCS_FL_16BIT)
-#define CCS_R_OP_PIX_CLK_DIV (0x0308 | CCS_FL_16BIT)
-#define CCS_R_OP_SYS_CLK_DIV (0x030a | CCS_FL_16BIT)
-#define CCS_R_OP_PRE_PLL_CLK_DIV (0x030c | CCS_FL_16BIT)
-#define CCS_R_OP_PLL_MULTIPLIER (0x030e | CCS_FL_16BIT)
-#define CCS_R_PLL_MODE 0x0310
+#define CCS_R_EXPOSURE_RATIO CCI_REG8(0x0222)
+#define CCS_R_HDR_INTERNAL_BIT_DEPTH CCI_REG8(0x0223)
+#define CCS_R_DIRECT_SHORT_INTEGRATION_TIME CCI_REG16(0x0224)
+#define CCS_R_SHORT_ANALOG_LINEAR_GAIN_GLOBAL CCI_REG16(0x0226)
+#define CCS_R_SHORT_ANALOG_EXPONENTIAL_GAIN_GLOBAL CCI_REG16(0x0228)
+#define CCS_R_VT_PIX_CLK_DIV CCI_REG16(0x0300)
+#define CCS_R_VT_SYS_CLK_DIV CCI_REG16(0x0302)
+#define CCS_R_PRE_PLL_CLK_DIV CCI_REG16(0x0304)
+#define CCS_R_PLL_MULTIPLIER CCI_REG16(0x0306)
+#define CCS_R_OP_PIX_CLK_DIV CCI_REG16(0x0308)
+#define CCS_R_OP_SYS_CLK_DIV CCI_REG16(0x030a)
+#define CCS_R_OP_PRE_PLL_CLK_DIV CCI_REG16(0x030c)
+#define CCS_R_OP_PLL_MULTIPLIER CCI_REG16(0x030e)
+#define CCS_R_PLL_MODE CCI_REG8(0x0310)
#define CCS_PLL_MODE_SHIFT 0U
#define CCS_PLL_MODE_MASK 0x1
#define CCS_PLL_MODE_SINGLE 0U
#define CCS_PLL_MODE_DUAL 1U
-#define CCS_R_OP_PIX_CLK_DIV_REV (0x0312 | CCS_FL_16BIT)
-#define CCS_R_OP_SYS_CLK_DIV_REV (0x0314 | CCS_FL_16BIT)
-#define CCS_R_FRAME_LENGTH_LINES (0x0340 | CCS_FL_16BIT)
-#define CCS_R_LINE_LENGTH_PCK (0x0342 | CCS_FL_16BIT)
-#define CCS_R_X_ADDR_START (0x0344 | CCS_FL_16BIT)
-#define CCS_R_Y_ADDR_START (0x0346 | CCS_FL_16BIT)
-#define CCS_R_X_ADDR_END (0x0348 | CCS_FL_16BIT)
-#define CCS_R_Y_ADDR_END (0x034a | CCS_FL_16BIT)
-#define CCS_R_X_OUTPUT_SIZE (0x034c | CCS_FL_16BIT)
-#define CCS_R_Y_OUTPUT_SIZE (0x034e | CCS_FL_16BIT)
-#define CCS_R_FRAME_LENGTH_CTRL 0x0350
+#define CCS_R_OP_PIX_CLK_DIV_REV CCI_REG16(0x0312)
+#define CCS_R_OP_SYS_CLK_DIV_REV CCI_REG16(0x0314)
+#define CCS_R_FRAME_LENGTH_LINES CCI_REG16(0x0340)
+#define CCS_R_LINE_LENGTH_PCK CCI_REG16(0x0342)
+#define CCS_R_X_ADDR_START CCI_REG16(0x0344)
+#define CCS_R_Y_ADDR_START CCI_REG16(0x0346)
+#define CCS_R_X_ADDR_END CCI_REG16(0x0348)
+#define CCS_R_Y_ADDR_END CCI_REG16(0x034a)
+#define CCS_R_X_OUTPUT_SIZE CCI_REG16(0x034c)
+#define CCS_R_Y_OUTPUT_SIZE CCI_REG16(0x034e)
+#define CCS_R_FRAME_LENGTH_CTRL CCI_REG8(0x0350)
#define CCS_FRAME_LENGTH_CTRL_AUTOMATIC BIT(0)
-#define CCS_R_TIMING_MODE_CTRL 0x0352
+#define CCS_R_TIMING_MODE_CTRL CCI_REG8(0x0352)
#define CCS_TIMING_MODE_CTRL_MANUAL_READOUT BIT(0)
#define CCS_TIMING_MODE_CTRL_DELAYED_EXPOSURE BIT(1)
-#define CCS_R_START_READOUT_RS 0x0353
+#define CCS_R_START_READOUT_RS CCI_REG8(0x0353)
#define CCS_START_READOUT_RS_MANUAL_READOUT_START BIT(0)
-#define CCS_R_FRAME_MARGIN (0x0354 | CCS_FL_16BIT)
-#define CCS_R_X_EVEN_INC (0x0380 | CCS_FL_16BIT)
-#define CCS_R_X_ODD_INC (0x0382 | CCS_FL_16BIT)
-#define CCS_R_Y_EVEN_INC (0x0384 | CCS_FL_16BIT)
-#define CCS_R_Y_ODD_INC (0x0386 | CCS_FL_16BIT)
-#define CCS_R_MONOCHROME_EN 0x0390
+#define CCS_R_FRAME_MARGIN CCI_REG16(0x0354)
+#define CCS_R_X_EVEN_INC CCI_REG16(0x0380)
+#define CCS_R_X_ODD_INC CCI_REG16(0x0382)
+#define CCS_R_Y_EVEN_INC CCI_REG16(0x0384)
+#define CCS_R_Y_ODD_INC CCI_REG16(0x0386)
+#define CCS_R_MONOCHROME_EN CCI_REG8(0x0390)
#define CCS_MONOCHROME_EN_ENABLED 0U
-#define CCS_R_SCALING_MODE (0x0400 | CCS_FL_16BIT)
+#define CCS_R_SCALING_MODE CCI_REG16(0x0400)
#define CCS_SCALING_MODE_NO_SCALING 0U
#define CCS_SCALING_MODE_HORIZONTAL 1U
-#define CCS_R_SCALE_M (0x0404 | CCS_FL_16BIT)
-#define CCS_R_SCALE_N (0x0406 | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_CROP_X_OFFSET (0x0408 | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_CROP_Y_OFFSET (0x040a | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_CROP_IMAGE_WIDTH (0x040c | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_CROP_IMAGE_HEIGHT (0x040e | CCS_FL_16BIT)
-#define CCS_R_COMPRESSION_MODE (0x0500 | CCS_FL_16BIT)
+#define CCS_R_SCALE_M CCI_REG16(0x0404)
+#define CCS_R_SCALE_N CCI_REG16(0x0406)
+#define CCS_R_DIGITAL_CROP_X_OFFSET CCI_REG16(0x0408)
+#define CCS_R_DIGITAL_CROP_Y_OFFSET CCI_REG16(0x040a)
+#define CCS_R_DIGITAL_CROP_IMAGE_WIDTH CCI_REG16(0x040c)
+#define CCS_R_DIGITAL_CROP_IMAGE_HEIGHT CCI_REG16(0x040e)
+#define CCS_R_COMPRESSION_MODE CCI_REG16(0x0500)
#define CCS_COMPRESSION_MODE_NONE 0U
#define CCS_COMPRESSION_MODE_DPCM_PCM_SIMPLE 1U
-#define CCS_R_TEST_PATTERN_MODE (0x0600 | CCS_FL_16BIT)
+#define CCS_R_TEST_PATTERN_MODE CCI_REG16(0x0600)
#define CCS_TEST_PATTERN_MODE_NONE 0U
#define CCS_TEST_PATTERN_MODE_SOLID_COLOR 1U
#define CCS_TEST_PATTERN_MODE_COLOR_BARS 2U
#define CCS_TEST_PATTERN_MODE_FADE_TO_GREY 3U
#define CCS_TEST_PATTERN_MODE_PN9 4U
#define CCS_TEST_PATTERN_MODE_COLOR_TILE 5U
-#define CCS_R_TEST_DATA_RED (0x0602 | CCS_FL_16BIT)
-#define CCS_R_TEST_DATA_GREENR (0x0604 | CCS_FL_16BIT)
-#define CCS_R_TEST_DATA_BLUE (0x0606 | CCS_FL_16BIT)
-#define CCS_R_TEST_DATA_GREENB (0x0608 | CCS_FL_16BIT)
-#define CCS_R_VALUE_STEP_SIZE_SMOOTH 0x060a
-#define CCS_R_VALUE_STEP_SIZE_QUANTISED 0x060b
-#define CCS_R_TCLK_POST 0x0800
-#define CCS_R_THS_PREPARE 0x0801
-#define CCS_R_THS_ZERO_MIN 0x0802
-#define CCS_R_THS_TRAIL 0x0803
-#define CCS_R_TCLK_TRAIL_MIN 0x0804
-#define CCS_R_TCLK_PREPARE 0x0805
-#define CCS_R_TCLK_ZERO 0x0806
-#define CCS_R_TLPX 0x0807
-#define CCS_R_PHY_CTRL 0x0808
+#define CCS_R_TEST_DATA_RED CCI_REG16(0x0602)
+#define CCS_R_TEST_DATA_GREENR CCI_REG16(0x0604)
+#define CCS_R_TEST_DATA_BLUE CCI_REG16(0x0606)
+#define CCS_R_TEST_DATA_GREENB CCI_REG16(0x0608)
+#define CCS_R_VALUE_STEP_SIZE_SMOOTH CCI_REG8(0x060a)
+#define CCS_R_VALUE_STEP_SIZE_QUANTISED CCI_REG8(0x060b)
+#define CCS_R_TCLK_POST CCI_REG8(0x0800)
+#define CCS_R_THS_PREPARE CCI_REG8(0x0801)
+#define CCS_R_THS_ZERO_MIN CCI_REG8(0x0802)
+#define CCS_R_THS_TRAIL CCI_REG8(0x0803)
+#define CCS_R_TCLK_TRAIL_MIN CCI_REG8(0x0804)
+#define CCS_R_TCLK_PREPARE CCI_REG8(0x0805)
+#define CCS_R_TCLK_ZERO CCI_REG8(0x0806)
+#define CCS_R_TLPX CCI_REG8(0x0807)
+#define CCS_R_PHY_CTRL CCI_REG8(0x0808)
#define CCS_PHY_CTRL_AUTO 0U
#define CCS_PHY_CTRL_UI 1U
#define CCS_PHY_CTRL_MANUAL 2U
-#define CCS_R_TCLK_POST_EX (0x080a | CCS_FL_16BIT)
-#define CCS_R_THS_PREPARE_EX (0x080c | CCS_FL_16BIT)
-#define CCS_R_THS_ZERO_MIN_EX (0x080e | CCS_FL_16BIT)
-#define CCS_R_THS_TRAIL_EX (0x0810 | CCS_FL_16BIT)
-#define CCS_R_TCLK_TRAIL_MIN_EX (0x0812 | CCS_FL_16BIT)
-#define CCS_R_TCLK_PREPARE_EX (0x0814 | CCS_FL_16BIT)
-#define CCS_R_TCLK_ZERO_EX (0x0816 | CCS_FL_16BIT)
-#define CCS_R_TLPX_EX (0x0818 | CCS_FL_16BIT)
-#define CCS_R_REQUESTED_LINK_RATE (0x0820 | CCS_FL_32BIT)
-#define CCS_R_DPHY_EQUALIZATION_MODE 0x0824
+#define CCS_R_TCLK_POST_EX CCI_REG16(0x080a)
+#define CCS_R_THS_PREPARE_EX CCI_REG16(0x080c)
+#define CCS_R_THS_ZERO_MIN_EX CCI_REG16(0x080e)
+#define CCS_R_THS_TRAIL_EX CCI_REG16(0x0810)
+#define CCS_R_TCLK_TRAIL_MIN_EX CCI_REG16(0x0812)
+#define CCS_R_TCLK_PREPARE_EX CCI_REG16(0x0814)
+#define CCS_R_TCLK_ZERO_EX CCI_REG16(0x0816)
+#define CCS_R_TLPX_EX CCI_REG16(0x0818)
+#define CCS_R_REQUESTED_LINK_RATE CCI_REG32(0x0820)
+#define CCS_R_DPHY_EQUALIZATION_MODE CCI_REG8(0x0824)
#define CCS_DPHY_EQUALIZATION_MODE_EQ2 BIT(0)
-#define CCS_R_PHY_EQUALIZATION_CTRL 0x0825
+#define CCS_R_PHY_EQUALIZATION_CTRL CCI_REG8(0x0825)
#define CCS_PHY_EQUALIZATION_CTRL_ENABLE BIT(0)
-#define CCS_R_DPHY_PREAMBLE_CTRL 0x0826
+#define CCS_R_DPHY_PREAMBLE_CTRL CCI_REG8(0x0826)
#define CCS_DPHY_PREAMBLE_CTRL_ENABLE BIT(0)
-#define CCS_R_DPHY_PREAMBLE_LENGTH 0x0826
-#define CCS_R_PHY_SSC_CTRL 0x0828
+#define CCS_R_DPHY_PREAMBLE_LENGTH CCI_REG8(0x0826)
+#define CCS_R_PHY_SSC_CTRL CCI_REG8(0x0828)
#define CCS_PHY_SSC_CTRL_ENABLE BIT(0)
-#define CCS_R_MANUAL_LP_CTRL 0x0829
+#define CCS_R_MANUAL_LP_CTRL CCI_REG8(0x0829)
#define CCS_MANUAL_LP_CTRL_ENABLE BIT(0)
-#define CCS_R_TWAKEUP 0x082a
-#define CCS_R_TINIT 0x082b
-#define CCS_R_THS_EXIT 0x082c
-#define CCS_R_THS_EXIT_EX (0x082e | CCS_FL_16BIT)
-#define CCS_R_PHY_PERIODIC_CALIBRATION_CTRL 0x0830
+#define CCS_R_TWAKEUP CCI_REG8(0x082a)
+#define CCS_R_TINIT CCI_REG8(0x082b)
+#define CCS_R_THS_EXIT CCI_REG8(0x082c)
+#define CCS_R_THS_EXIT_EX CCI_REG16(0x082e)
+#define CCS_R_PHY_PERIODIC_CALIBRATION_CTRL CCI_REG8(0x0830)
#define CCS_PHY_PERIODIC_CALIBRATION_CTRL_FRAME_BLANKING BIT(0)
-#define CCS_R_PHY_PERIODIC_CALIBRATION_INTERVAL 0x0831
-#define CCS_R_PHY_INIT_CALIBRATION_CTRL 0x0832
+#define CCS_R_PHY_PERIODIC_CALIBRATION_INTERVAL CCI_REG8(0x0831)
+#define CCS_R_PHY_INIT_CALIBRATION_CTRL CCI_REG8(0x0832)
#define CCS_PHY_INIT_CALIBRATION_CTRL_STREAM_START BIT(0)
-#define CCS_R_DPHY_CALIBRATION_MODE 0x0833
+#define CCS_R_DPHY_CALIBRATION_MODE CCI_REG8(0x0833)
#define CCS_DPHY_CALIBRATION_MODE_ALSO_ALTERNATE BIT(0)
-#define CCS_R_CPHY_CALIBRATION_MODE 0x0834
+#define CCS_R_CPHY_CALIBRATION_MODE CCI_REG8(0x0834)
#define CCS_CPHY_CALIBRATION_MODE_FORMAT_1 0U
#define CCS_CPHY_CALIBRATION_MODE_FORMAT_2 1U
#define CCS_CPHY_CALIBRATION_MODE_FORMAT_3 2U
-#define CCS_R_T3_CALPREAMBLE_LENGTH 0x0835
-#define CCS_R_T3_CALPREAMBLE_LENGTH_PER 0x0836
-#define CCS_R_T3_CALALTSEQ_LENGTH 0x0837
-#define CCS_R_T3_CALALTSEQ_LENGTH_PER 0x0838
-#define CCS_R_FM2_INIT_SEED (0x083a | CCS_FL_16BIT)
-#define CCS_R_T3_CALUDEFSEQ_LENGTH (0x083c | CCS_FL_16BIT)
-#define CCS_R_T3_CALUDEFSEQ_LENGTH_PER (0x083e | CCS_FL_16BIT)
-#define CCS_R_TGR_PREAMBLE_LENGTH 0x0841
+#define CCS_R_T3_CALPREAMBLE_LENGTH CCI_REG8(0x0835)
+#define CCS_R_T3_CALPREAMBLE_LENGTH_PER CCI_REG8(0x0836)
+#define CCS_R_T3_CALALTSEQ_LENGTH CCI_REG8(0x0837)
+#define CCS_R_T3_CALALTSEQ_LENGTH_PER CCI_REG8(0x0838)
+#define CCS_R_FM2_INIT_SEED CCI_REG16(0x083a)
+#define CCS_R_T3_CALUDEFSEQ_LENGTH CCI_REG16(0x083c)
+#define CCS_R_T3_CALUDEFSEQ_LENGTH_PER CCI_REG16(0x083e)
+#define CCS_R_TGR_PREAMBLE_LENGTH CCI_REG8(0x0841)
#define CCS_TGR_PREAMBLE_LENGTH_PREAMABLE_PROG_SEQ BIT(7)
#define CCS_TGR_PREAMBLE_LENGTH_BEGIN_PREAMBLE_LENGTH_SHIFT 0U
#define CCS_TGR_PREAMBLE_LENGTH_BEGIN_PREAMBLE_LENGTH_MASK 0x3f
-#define CCS_R_TGR_POST_LENGTH 0x0842
+#define CCS_R_TGR_POST_LENGTH CCI_REG8(0x0842)
#define CCS_TGR_POST_LENGTH_POST_LENGTH_SHIFT 0U
#define CCS_TGR_POST_LENGTH_POST_LENGTH_MASK 0x1f
-#define CCS_R_TGR_PREAMBLE_PROG_SEQUENCE(n2) (0x0843 + (n2))
+#define CCS_R_TGR_PREAMBLE_PROG_SEQUENCE(n2) CCI_REG8(0x0843 + (n2))
#define CCS_LIM_TGR_PREAMBLE_PROG_SEQUENCE_MIN_N2 0U
#define CCS_LIM_TGR_PREAMBLE_PROG_SEQUENCE_MAX_N2 6U
#define CCS_TGR_PREAMBLE_PROG_SEQUENCE_SYMBOL_N_1_SHIFT 3U
#define CCS_TGR_PREAMBLE_PROG_SEQUENCE_SYMBOL_N_1_MASK 0x38
#define CCS_TGR_PREAMBLE_PROG_SEQUENCE_SYMBOL_N_SHIFT 0U
#define CCS_TGR_PREAMBLE_PROG_SEQUENCE_SYMBOL_N_MASK 0x7
-#define CCS_R_T3_PREPARE (0x084e | CCS_FL_16BIT)
-#define CCS_R_T3_LPX (0x0850 | CCS_FL_16BIT)
-#define CCS_R_ALPS_CTRL 0x085a
+#define CCS_R_T3_PREPARE CCI_REG16(0x084e)
+#define CCS_R_T3_LPX CCI_REG16(0x0850)
+#define CCS_R_ALPS_CTRL CCI_REG8(0x085a)
#define CCS_ALPS_CTRL_LVLP_DPHY BIT(0)
#define CCS_ALPS_CTRL_LVLP_CPHY BIT(1)
#define CCS_ALPS_CTRL_ALP_CPHY BIT(2)
-#define CCS_R_TX_REG_CSI_EPD_EN_SSP_CPHY (0x0860 | CCS_FL_16BIT)
-#define CCS_R_TX_REG_CSI_EPD_OP_SLP_CPHY (0x0862 | CCS_FL_16BIT)
-#define CCS_R_TX_REG_CSI_EPD_EN_SSP_DPHY (0x0864 | CCS_FL_16BIT)
-#define CCS_R_TX_REG_CSI_EPD_OP_SLP_DPHY (0x0866 | CCS_FL_16BIT)
-#define CCS_R_TX_REG_CSI_EPD_MISC_OPTION_CPHY 0x0868
-#define CCS_R_TX_REG_CSI_EPD_MISC_OPTION_DPHY 0x0869
-#define CCS_R_SCRAMBLING_CTRL 0x0870
+#define CCS_R_TX_REG_CSI_EPD_EN_SSP_CPHY CCI_REG16(0x0860)
+#define CCS_R_TX_REG_CSI_EPD_OP_SLP_CPHY CCI_REG16(0x0862)
+#define CCS_R_TX_REG_CSI_EPD_EN_SSP_DPHY CCI_REG16(0x0864)
+#define CCS_R_TX_REG_CSI_EPD_OP_SLP_DPHY CCI_REG16(0x0866)
+#define CCS_R_TX_REG_CSI_EPD_MISC_OPTION_CPHY CCI_REG8(0x0868)
+#define CCS_R_TX_REG_CSI_EPD_MISC_OPTION_DPHY CCI_REG8(0x0869)
+#define CCS_R_SCRAMBLING_CTRL CCI_REG8(0x0870)
#define CCS_SCRAMBLING_CTRL_ENABLED BIT(0)
#define CCS_SCRAMBLING_CTRL_SHIFT 2U
#define CCS_SCRAMBLING_CTRL_MASK 0xc
#define CCS_SCRAMBLING_CTRL_1_SEED_CPHY 0U
#define CCS_SCRAMBLING_CTRL_4_SEED_CPHY 3U
-#define CCS_R_LANE_SEED_VALUE(seed, lane) ((0x0872 | CCS_FL_16BIT) + (seed) * 16 + (lane) * 2)
+#define CCS_R_LANE_SEED_VALUE(seed, lane) CCI_REG16(0x0872 + (seed) * 16 + (lane) * 2)
#define CCS_LIM_LANE_SEED_VALUE_MIN_SEED 0U
#define CCS_LIM_LANE_SEED_VALUE_MAX_SEED 3U
#define CCS_LIM_LANE_SEED_VALUE_MIN_LANE 0U
#define CCS_LIM_LANE_SEED_VALUE_MAX_LANE 7U
-#define CCS_R_TX_USL_REV_ENTRY (0x08c0 | CCS_FL_16BIT)
-#define CCS_R_TX_USL_REV_CLOCK_COUNTER (0x08c2 | CCS_FL_16BIT)
-#define CCS_R_TX_USL_REV_LP_COUNTER (0x08c4 | CCS_FL_16BIT)
-#define CCS_R_TX_USL_REV_FRAME_COUNTER (0x08c6 | CCS_FL_16BIT)
-#define CCS_R_TX_USL_REV_CHRONOLOGICAL_TIMER (0x08c8 | CCS_FL_16BIT)
-#define CCS_R_TX_USL_FWD_ENTRY (0x08ca | CCS_FL_16BIT)
-#define CCS_R_TX_USL_GPIO (0x08cc | CCS_FL_16BIT)
-#define CCS_R_TX_USL_OPERATION (0x08ce | CCS_FL_16BIT)
+#define CCS_R_TX_USL_REV_ENTRY CCI_REG16(0x08c0)
+#define CCS_R_TX_USL_REV_CLOCK_COUNTER CCI_REG16(0x08c2)
+#define CCS_R_TX_USL_REV_LP_COUNTER CCI_REG16(0x08c4)
+#define CCS_R_TX_USL_REV_FRAME_COUNTER CCI_REG16(0x08c6)
+#define CCS_R_TX_USL_REV_CHRONOLOGICAL_TIMER CCI_REG16(0x08c8)
+#define CCS_R_TX_USL_FWD_ENTRY CCI_REG16(0x08ca)
+#define CCS_R_TX_USL_GPIO CCI_REG16(0x08cc)
+#define CCS_R_TX_USL_OPERATION CCI_REG16(0x08ce)
#define CCS_TX_USL_OPERATION_RESET BIT(0)
-#define CCS_R_TX_USL_ALP_CTRL (0x08d0 | CCS_FL_16BIT)
+#define CCS_R_TX_USL_ALP_CTRL CCI_REG16(0x08d0)
#define CCS_TX_USL_ALP_CTRL_CLOCK_PAUSE BIT(0)
-#define CCS_R_TX_USL_APP_BTA_ACK_TIMEOUT (0x08d2 | CCS_FL_16BIT)
-#define CCS_R_TX_USL_SNS_BTA_ACK_TIMEOUT (0x08d2 | CCS_FL_16BIT)
-#define CCS_R_USL_CLOCK_MODE_D_CTRL 0x08d2
+#define CCS_R_TX_USL_APP_BTA_ACK_TIMEOUT CCI_REG16(0x08d2)
+#define CCS_R_TX_USL_SNS_BTA_ACK_TIMEOUT CCI_REG16(0x08d2)
+#define CCS_R_USL_CLOCK_MODE_D_CTRL CCI_REG8(0x08d2)
#define CCS_USL_CLOCK_MODE_D_CTRL_CONT_CLOCK_STANDBY BIT(0)
#define CCS_USL_CLOCK_MODE_D_CTRL_CONT_CLOCK_VBLANK BIT(1)
#define CCS_USL_CLOCK_MODE_D_CTRL_CONT_CLOCK_HBLANK BIT(2)
-#define CCS_R_BINNING_MODE 0x0900
-#define CCS_R_BINNING_TYPE 0x0901
-#define CCS_R_BINNING_WEIGHTING 0x0902
-#define CCS_R_DATA_TRANSFER_IF_1_CTRL 0x0a00
+#define CCS_R_BINNING_MODE CCI_REG8(0x0900)
+#define CCS_R_BINNING_TYPE CCI_REG8(0x0901)
+#define CCS_R_BINNING_WEIGHTING CCI_REG8(0x0902)
+#define CCS_R_DATA_TRANSFER_IF_1_CTRL CCI_REG8(0x0a00)
#define CCS_DATA_TRANSFER_IF_1_CTRL_ENABLE BIT(0)
#define CCS_DATA_TRANSFER_IF_1_CTRL_WRITE BIT(1)
#define CCS_DATA_TRANSFER_IF_1_CTRL_CLEAR_ERROR BIT(2)
-#define CCS_R_DATA_TRANSFER_IF_1_STATUS 0x0a01
+#define CCS_R_DATA_TRANSFER_IF_1_STATUS CCI_REG8(0x0a01)
#define CCS_DATA_TRANSFER_IF_1_STATUS_READ_IF_READY BIT(0)
#define CCS_DATA_TRANSFER_IF_1_STATUS_WRITE_IF_READY BIT(1)
#define CCS_DATA_TRANSFER_IF_1_STATUS_DATA_CORRUPTED BIT(2)
#define CCS_DATA_TRANSFER_IF_1_STATUS_IMPROPER_IF_USAGE BIT(3)
-#define CCS_R_DATA_TRANSFER_IF_1_PAGE_SELECT 0x0a02
-#define CCS_R_DATA_TRANSFER_IF_1_DATA(p) (0x0a04 + (p))
+#define CCS_R_DATA_TRANSFER_IF_1_PAGE_SELECT CCI_REG8(0x0a02)
+#define CCS_R_DATA_TRANSFER_IF_1_DATA(p) CCI_REG8(0x0a04 + (p))
#define CCS_LIM_DATA_TRANSFER_IF_1_DATA_MIN_P 0U
#define CCS_LIM_DATA_TRANSFER_IF_1_DATA_MAX_P 63U
-#define CCS_R_SHADING_CORRECTION_EN 0x0b00
+#define CCS_R_SHADING_CORRECTION_EN CCI_REG8(0x0b00)
#define CCS_SHADING_CORRECTION_EN_ENABLE BIT(0)
-#define CCS_R_LUMINANCE_CORRECTION_LEVEL 0x0b01
-#define CCS_R_GREEN_IMBALANCE_FILTER_EN 0x0b02
+#define CCS_R_LUMINANCE_CORRECTION_LEVEL CCI_REG8(0x0b01)
+#define CCS_R_GREEN_IMBALANCE_FILTER_EN CCI_REG8(0x0b02)
#define CCS_GREEN_IMBALANCE_FILTER_EN_ENABLE BIT(0)
-#define CCS_R_MAPPED_DEFECT_CORRECT_EN 0x0b05
+#define CCS_R_MAPPED_DEFECT_CORRECT_EN CCI_REG8(0x0b05)
#define CCS_MAPPED_DEFECT_CORRECT_EN_ENABLE BIT(0)
-#define CCS_R_SINGLE_DEFECT_CORRECT_EN 0x0b06
+#define CCS_R_SINGLE_DEFECT_CORRECT_EN CCI_REG8(0x0b06)
#define CCS_SINGLE_DEFECT_CORRECT_EN_ENABLE BIT(0)
-#define CCS_R_DYNAMIC_COUPLET_CORRECT_EN 0x0b08
+#define CCS_R_DYNAMIC_COUPLET_CORRECT_EN CCI_REG8(0x0b08)
#define CCS_DYNAMIC_COUPLET_CORRECT_EN_ENABLE BIT(0)
-#define CCS_R_COMBINED_DEFECT_CORRECT_EN 0x0b0a
+#define CCS_R_COMBINED_DEFECT_CORRECT_EN CCI_REG8(0x0b0a)
#define CCS_COMBINED_DEFECT_CORRECT_EN_ENABLE BIT(0)
-#define CCS_R_MODULE_SPECIFIC_CORRECTION_EN 0x0b0c
+#define CCS_R_MODULE_SPECIFIC_CORRECTION_EN CCI_REG8(0x0b0c)
#define CCS_MODULE_SPECIFIC_CORRECTION_EN_ENABLE BIT(0)
-#define CCS_R_DYNAMIC_TRIPLET_DEFECT_CORRECT_EN 0x0b13
+#define CCS_R_DYNAMIC_TRIPLET_DEFECT_CORRECT_EN CCI_REG8(0x0b13)
#define CCS_DYNAMIC_TRIPLET_DEFECT_CORRECT_EN_ENABLE BIT(0)
-#define CCS_R_NF_CTRL 0x0b15
+#define CCS_R_NF_CTRL CCI_REG8(0x0b15)
#define CCS_NF_CTRL_LUMA BIT(0)
#define CCS_NF_CTRL_CHROMA BIT(1)
#define CCS_NF_CTRL_COMBINED BIT(2)
-#define CCS_R_OB_READOUT_CONTROL 0x0b30
+#define CCS_R_OB_READOUT_CONTROL CCI_REG8(0x0b30)
#define CCS_OB_READOUT_CONTROL_ENABLE BIT(0)
#define CCS_OB_READOUT_CONTROL_INTERLEAVING BIT(1)
-#define CCS_R_OB_VIRTUAL_CHANNEL 0x0b31
-#define CCS_R_OB_DT 0x0b32
-#define CCS_R_OB_DATA_FORMAT 0x0b33
-#define CCS_R_COLOR_TEMPERATURE (0x0b8c | CCS_FL_16BIT)
-#define CCS_R_ABSOLUTE_GAIN_GREENR (0x0b8e | CCS_FL_16BIT)
-#define CCS_R_ABSOLUTE_GAIN_RED (0x0b90 | CCS_FL_16BIT)
-#define CCS_R_ABSOLUTE_GAIN_BLUE (0x0b92 | CCS_FL_16BIT)
-#define CCS_R_ABSOLUTE_GAIN_GREENB (0x0b94 | CCS_FL_16BIT)
-#define CCS_R_CFA_CONVERSION_CTRL 0x0ba0
+#define CCS_R_OB_VIRTUAL_CHANNEL CCI_REG8(0x0b31)
+#define CCS_R_OB_DT CCI_REG8(0x0b32)
+#define CCS_R_OB_DATA_FORMAT CCI_REG8(0x0b33)
+#define CCS_R_COLOR_TEMPERATURE CCI_REG16(0x0b8c)
+#define CCS_R_ABSOLUTE_GAIN_GREENR CCI_REG16(0x0b8e)
+#define CCS_R_ABSOLUTE_GAIN_RED CCI_REG16(0x0b90)
+#define CCS_R_ABSOLUTE_GAIN_BLUE CCI_REG16(0x0b92)
+#define CCS_R_ABSOLUTE_GAIN_GREENB CCI_REG16(0x0b94)
+#define CCS_R_CFA_CONVERSION_CTRL CCI_REG8(0x0ba0)
#define CCS_CFA_CONVERSION_CTRL_BAYER_CONVERSION_ENABLE BIT(0)
-#define CCS_R_FLASH_STROBE_ADJUSTMENT 0x0c12
-#define CCS_R_FLASH_STROBE_START_POINT (0x0c14 | CCS_FL_16BIT)
-#define CCS_R_TFLASH_STROBE_DELAY_RS_CTRL (0x0c16 | CCS_FL_16BIT)
-#define CCS_R_TFLASH_STROBE_WIDTH_HIGH_RS_CTRL (0x0c18 | CCS_FL_16BIT)
-#define CCS_R_FLASH_MODE_RS 0x0c1a
+#define CCS_R_FLASH_STROBE_ADJUSTMENT CCI_REG8(0x0c12)
+#define CCS_R_FLASH_STROBE_START_POINT CCI_REG16(0x0c14)
+#define CCS_R_TFLASH_STROBE_DELAY_RS_CTRL CCI_REG16(0x0c16)
+#define CCS_R_TFLASH_STROBE_WIDTH_HIGH_RS_CTRL CCI_REG16(0x0c18)
+#define CCS_R_FLASH_MODE_RS CCI_REG8(0x0c1a)
#define CCS_FLASH_MODE_RS_CONTINUOUS BIT(0)
#define CCS_FLASH_MODE_RS_TRUNCATE BIT(1)
#define CCS_FLASH_MODE_RS_ASYNC BIT(3)
-#define CCS_R_FLASH_TRIGGER_RS 0x0c1b
-#define CCS_R_FLASH_STATUS 0x0c1c
+#define CCS_R_FLASH_TRIGGER_RS CCI_REG8(0x0c1b)
+#define CCS_R_FLASH_STATUS CCI_REG8(0x0c1c)
#define CCS_FLASH_STATUS_RETIMED BIT(0)
-#define CCS_R_SA_STROBE_MODE 0x0c1d
+#define CCS_R_SA_STROBE_MODE CCI_REG8(0x0c1d)
#define CCS_SA_STROBE_MODE_CONTINUOUS BIT(0)
#define CCS_SA_STROBE_MODE_TRUNCATE BIT(1)
#define CCS_SA_STROBE_MODE_ASYNC BIT(3)
#define CCS_SA_STROBE_MODE_ADJUST_EDGE BIT(4)
-#define CCS_R_SA_STROBE_START_POINT (0x0c1e | CCS_FL_16BIT)
-#define CCS_R_TSA_STROBE_DELAY_CTRL (0x0c20 | CCS_FL_16BIT)
-#define CCS_R_TSA_STROBE_WIDTH_CTRL (0x0c22 | CCS_FL_16BIT)
-#define CCS_R_SA_STROBE_TRIGGER 0x0c24
-#define CCS_R_SA_STROBE_STATUS 0x0c25
+#define CCS_R_SA_STROBE_START_POINT CCI_REG16(0x0c1e)
+#define CCS_R_TSA_STROBE_DELAY_CTRL CCI_REG16(0x0c20)
+#define CCS_R_TSA_STROBE_WIDTH_CTRL CCI_REG16(0x0c22)
+#define CCS_R_SA_STROBE_TRIGGER CCI_REG8(0x0c24)
+#define CCS_R_SA_STROBE_STATUS CCI_REG8(0x0c25)
#define CCS_SA_STROBE_STATUS_RETIMED BIT(0)
-#define CCS_R_TSA_STROBE_RE_DELAY_CTRL (0x0c30 | CCS_FL_16BIT)
-#define CCS_R_TSA_STROBE_FE_DELAY_CTRL (0x0c32 | CCS_FL_16BIT)
-#define CCS_R_PDAF_CTRL (0x0d00 | CCS_FL_16BIT)
+#define CCS_R_TSA_STROBE_RE_DELAY_CTRL CCI_REG16(0x0c30)
+#define CCS_R_TSA_STROBE_FE_DELAY_CTRL CCI_REG16(0x0c32)
+#define CCS_R_PDAF_CTRL CCI_REG16(0x0d00)
#define CCS_PDAF_CTRL_ENABLE BIT(0)
#define CCS_PDAF_CTRL_PROCESSED BIT(1)
#define CCS_PDAF_CTRL_INTERLEAVED BIT(2)
#define CCS_PDAF_CTRL_VISIBLE_PDAF_CORRECTION BIT(3)
-#define CCS_R_PDAF_VC 0x0d02
-#define CCS_R_PDAF_DT 0x0d03
-#define CCS_R_PD_X_ADDR_START (0x0d04 | CCS_FL_16BIT)
-#define CCS_R_PD_Y_ADDR_START (0x0d06 | CCS_FL_16BIT)
-#define CCS_R_PD_X_ADDR_END (0x0d08 | CCS_FL_16BIT)
-#define CCS_R_PD_Y_ADDR_END (0x0d0a | CCS_FL_16BIT)
-#define CCS_R_BRACKETING_LUT_CTRL 0x0e00
-#define CCS_R_BRACKETING_LUT_MODE 0x0e01
+#define CCS_R_PDAF_VC CCI_REG8(0x0d02)
+#define CCS_R_PDAF_DT CCI_REG8(0x0d03)
+#define CCS_R_PD_X_ADDR_START CCI_REG16(0x0d04)
+#define CCS_R_PD_Y_ADDR_START CCI_REG16(0x0d06)
+#define CCS_R_PD_X_ADDR_END CCI_REG16(0x0d08)
+#define CCS_R_PD_Y_ADDR_END CCI_REG16(0x0d0a)
+#define CCS_R_BRACKETING_LUT_CTRL CCI_REG8(0x0e00)
+#define CCS_R_BRACKETING_LUT_MODE CCI_REG8(0x0e01)
#define CCS_BRACKETING_LUT_MODE_CONTINUE_STREAMING BIT(0)
#define CCS_BRACKETING_LUT_MODE_LOOP_MODE BIT(1)
-#define CCS_R_BRACKETING_LUT_ENTRY_CTRL 0x0e02
-#define CCS_R_BRACKETING_LUT_FRAME(n) (0x0e10 + (n))
+#define CCS_R_BRACKETING_LUT_ENTRY_CTRL CCI_REG8(0x0e02)
+#define CCS_R_BRACKETING_LUT_FRAME(n) CCI_REG8(0x0e10 + (n))
#define CCS_LIM_BRACKETING_LUT_FRAME_MIN_N 0U
#define CCS_LIM_BRACKETING_LUT_FRAME_MAX_N 239U
-#define CCS_R_INTEGRATION_TIME_CAPABILITY (0x1000 | CCS_FL_16BIT)
+#define CCS_R_INTEGRATION_TIME_CAPABILITY CCI_REG16(0x1000)
#define CCS_INTEGRATION_TIME_CAPABILITY_FINE BIT(0)
-#define CCS_R_COARSE_INTEGRATION_TIME_MIN (0x1004 | CCS_FL_16BIT)
-#define CCS_R_COARSE_INTEGRATION_TIME_MAX_MARGIN (0x1006 | CCS_FL_16BIT)
-#define CCS_R_FINE_INTEGRATION_TIME_MIN (0x1008 | CCS_FL_16BIT)
-#define CCS_R_FINE_INTEGRATION_TIME_MAX_MARGIN (0x100a | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_GAIN_CAPABILITY 0x1081
+#define CCS_R_COARSE_INTEGRATION_TIME_MIN CCI_REG16(0x1004)
+#define CCS_R_COARSE_INTEGRATION_TIME_MAX_MARGIN CCI_REG16(0x1006)
+#define CCS_R_FINE_INTEGRATION_TIME_MIN CCI_REG16(0x1008)
+#define CCS_R_FINE_INTEGRATION_TIME_MAX_MARGIN CCI_REG16(0x100a)
+#define CCS_R_DIGITAL_GAIN_CAPABILITY CCI_REG8(0x1081)
#define CCS_DIGITAL_GAIN_CAPABILITY_NONE 0U
#define CCS_DIGITAL_GAIN_CAPABILITY_GLOBAL 2U
-#define CCS_R_DIGITAL_GAIN_MIN (0x1084 | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_GAIN_MAX (0x1086 | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_GAIN_STEP_SIZE (0x1088 | CCS_FL_16BIT)
-#define CCS_R_PEDESTAL_CAPABILITY 0x10e0
-#define CCS_R_ADC_CAPABILITY 0x10f0
+#define CCS_R_DIGITAL_GAIN_MIN CCI_REG16(0x1084)
+#define CCS_R_DIGITAL_GAIN_MAX CCI_REG16(0x1086)
+#define CCS_R_DIGITAL_GAIN_STEP_SIZE CCI_REG16(0x1088)
+#define CCS_R_PEDESTAL_CAPABILITY CCI_REG8(0x10e0)
+#define CCS_R_ADC_CAPABILITY CCI_REG8(0x10f0)
#define CCS_ADC_CAPABILITY_BIT_DEPTH_CTRL BIT(0)
-#define CCS_R_ADC_BIT_DEPTH_CAPABILITY (0x10f4 | CCS_FL_32BIT)
-#define CCS_R_MIN_EXT_CLK_FREQ_MHZ (0x1100 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_EXT_CLK_FREQ_MHZ (0x1104 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MIN_PRE_PLL_CLK_DIV (0x1108 | CCS_FL_16BIT)
-#define CCS_R_MAX_PRE_PLL_CLK_DIV (0x110a | CCS_FL_16BIT)
-#define CCS_R_MIN_PLL_IP_CLK_FREQ_MHZ (0x110c | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_PLL_IP_CLK_FREQ_MHZ (0x1110 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MIN_PLL_MULTIPLIER (0x1114 | CCS_FL_16BIT)
-#define CCS_R_MAX_PLL_MULTIPLIER (0x1116 | CCS_FL_16BIT)
-#define CCS_R_MIN_PLL_OP_CLK_FREQ_MHZ (0x1118 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_PLL_OP_CLK_FREQ_MHZ (0x111c | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MIN_VT_SYS_CLK_DIV (0x1120 | CCS_FL_16BIT)
-#define CCS_R_MAX_VT_SYS_CLK_DIV (0x1122 | CCS_FL_16BIT)
-#define CCS_R_MIN_VT_SYS_CLK_FREQ_MHZ (0x1124 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_VT_SYS_CLK_FREQ_MHZ (0x1128 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MIN_VT_PIX_CLK_FREQ_MHZ (0x112c | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_VT_PIX_CLK_FREQ_MHZ (0x1130 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MIN_VT_PIX_CLK_DIV (0x1134 | CCS_FL_16BIT)
-#define CCS_R_MAX_VT_PIX_CLK_DIV (0x1136 | CCS_FL_16BIT)
-#define CCS_R_CLOCK_CALCULATION 0x1138
+#define CCS_R_ADC_BIT_DEPTH_CAPABILITY CCI_REG32(0x10f4)
+#define CCS_R_MIN_EXT_CLK_FREQ_MHZ (CCI_REG32(0x1100) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_EXT_CLK_FREQ_MHZ (CCI_REG32(0x1104) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MIN_PRE_PLL_CLK_DIV CCI_REG16(0x1108)
+#define CCS_R_MAX_PRE_PLL_CLK_DIV CCI_REG16(0x110a)
+#define CCS_R_MIN_PLL_IP_CLK_FREQ_MHZ (CCI_REG32(0x110c) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_PLL_IP_CLK_FREQ_MHZ (CCI_REG32(0x1110) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MIN_PLL_MULTIPLIER CCI_REG16(0x1114)
+#define CCS_R_MAX_PLL_MULTIPLIER CCI_REG16(0x1116)
+#define CCS_R_MIN_PLL_OP_CLK_FREQ_MHZ (CCI_REG32(0x1118) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_PLL_OP_CLK_FREQ_MHZ (CCI_REG32(0x111c) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MIN_VT_SYS_CLK_DIV CCI_REG16(0x1120)
+#define CCS_R_MAX_VT_SYS_CLK_DIV CCI_REG16(0x1122)
+#define CCS_R_MIN_VT_SYS_CLK_FREQ_MHZ (CCI_REG32(0x1124) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_VT_SYS_CLK_FREQ_MHZ (CCI_REG32(0x1128) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MIN_VT_PIX_CLK_FREQ_MHZ (CCI_REG32(0x112c) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_VT_PIX_CLK_FREQ_MHZ (CCI_REG32(0x1130) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MIN_VT_PIX_CLK_DIV CCI_REG16(0x1134)
+#define CCS_R_MAX_VT_PIX_CLK_DIV CCI_REG16(0x1136)
+#define CCS_R_CLOCK_CALCULATION CCI_REG8(0x1138)
#define CCS_CLOCK_CALCULATION_LANE_SPEED BIT(0)
#define CCS_CLOCK_CALCULATION_LINK_DECOUPLED BIT(1)
#define CCS_CLOCK_CALCULATION_DUAL_PLL_OP_SYS_DDR BIT(2)
#define CCS_CLOCK_CALCULATION_DUAL_PLL_OP_PIX_DDR BIT(3)
-#define CCS_R_NUM_OF_VT_LANES 0x1139
-#define CCS_R_NUM_OF_OP_LANES 0x113a
-#define CCS_R_OP_BITS_PER_LANE 0x113b
-#define CCS_R_MIN_FRAME_LENGTH_LINES (0x1140 | CCS_FL_16BIT)
-#define CCS_R_MAX_FRAME_LENGTH_LINES (0x1142 | CCS_FL_16BIT)
-#define CCS_R_MIN_LINE_LENGTH_PCK (0x1144 | CCS_FL_16BIT)
-#define CCS_R_MAX_LINE_LENGTH_PCK (0x1146 | CCS_FL_16BIT)
-#define CCS_R_MIN_LINE_BLANKING_PCK (0x1148 | CCS_FL_16BIT)
-#define CCS_R_MIN_FRAME_BLANKING_LINES (0x114a | CCS_FL_16BIT)
-#define CCS_R_MIN_LINE_LENGTH_PCK_STEP_SIZE 0x114c
-#define CCS_R_TIMING_MODE_CAPABILITY 0x114d
+#define CCS_R_NUM_OF_VT_LANES CCI_REG8(0x1139)
+#define CCS_R_NUM_OF_OP_LANES CCI_REG8(0x113a)
+#define CCS_R_OP_BITS_PER_LANE CCI_REG8(0x113b)
+#define CCS_R_MIN_FRAME_LENGTH_LINES CCI_REG16(0x1140)
+#define CCS_R_MAX_FRAME_LENGTH_LINES CCI_REG16(0x1142)
+#define CCS_R_MIN_LINE_LENGTH_PCK CCI_REG16(0x1144)
+#define CCS_R_MAX_LINE_LENGTH_PCK CCI_REG16(0x1146)
+#define CCS_R_MIN_LINE_BLANKING_PCK CCI_REG16(0x1148)
+#define CCS_R_MIN_FRAME_BLANKING_LINES CCI_REG16(0x114a)
+#define CCS_R_MIN_LINE_LENGTH_PCK_STEP_SIZE CCI_REG8(0x114c)
+#define CCS_R_TIMING_MODE_CAPABILITY CCI_REG8(0x114d)
#define CCS_TIMING_MODE_CAPABILITY_AUTO_FRAME_LENGTH BIT(0)
#define CCS_TIMING_MODE_CAPABILITY_ROLLING_SHUTTER_MANUAL_READOUT BIT(2)
#define CCS_TIMING_MODE_CAPABILITY_DELAYED_EXPOSURE_START BIT(3)
#define CCS_TIMING_MODE_CAPABILITY_MANUAL_EXPOSURE_EMBEDDED_DATA BIT(4)
-#define CCS_R_FRAME_MARGIN_MAX_VALUE (0x114e | CCS_FL_16BIT)
-#define CCS_R_FRAME_MARGIN_MIN_VALUE 0x1150
-#define CCS_R_GAIN_DELAY_TYPE 0x1151
+#define CCS_R_FRAME_MARGIN_MAX_VALUE CCI_REG16(0x114e)
+#define CCS_R_FRAME_MARGIN_MIN_VALUE CCI_REG8(0x1150)
+#define CCS_R_GAIN_DELAY_TYPE CCI_REG8(0x1151)
#define CCS_GAIN_DELAY_TYPE_FIXED 0U
#define CCS_GAIN_DELAY_TYPE_VARIABLE 1U
-#define CCS_R_MIN_OP_SYS_CLK_DIV (0x1160 | CCS_FL_16BIT)
-#define CCS_R_MAX_OP_SYS_CLK_DIV (0x1162 | CCS_FL_16BIT)
-#define CCS_R_MIN_OP_SYS_CLK_FREQ_MHZ (0x1164 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_OP_SYS_CLK_FREQ_MHZ (0x1168 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MIN_OP_PIX_CLK_DIV (0x116c | CCS_FL_16BIT)
-#define CCS_R_MAX_OP_PIX_CLK_DIV (0x116e | CCS_FL_16BIT)
-#define CCS_R_MIN_OP_PIX_CLK_FREQ_MHZ (0x1170 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_OP_PIX_CLK_FREQ_MHZ (0x1174 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_X_ADDR_MIN (0x1180 | CCS_FL_16BIT)
-#define CCS_R_Y_ADDR_MIN (0x1182 | CCS_FL_16BIT)
-#define CCS_R_X_ADDR_MAX (0x1184 | CCS_FL_16BIT)
-#define CCS_R_Y_ADDR_MAX (0x1186 | CCS_FL_16BIT)
-#define CCS_R_MIN_X_OUTPUT_SIZE (0x1188 | CCS_FL_16BIT)
-#define CCS_R_MIN_Y_OUTPUT_SIZE (0x118a | CCS_FL_16BIT)
-#define CCS_R_MAX_X_OUTPUT_SIZE (0x118c | CCS_FL_16BIT)
-#define CCS_R_MAX_Y_OUTPUT_SIZE (0x118e | CCS_FL_16BIT)
-#define CCS_R_X_ADDR_START_DIV_CONSTANT 0x1190
-#define CCS_R_Y_ADDR_START_DIV_CONSTANT 0x1191
-#define CCS_R_X_ADDR_END_DIV_CONSTANT 0x1192
-#define CCS_R_Y_ADDR_END_DIV_CONSTANT 0x1193
-#define CCS_R_X_SIZE_DIV 0x1194
-#define CCS_R_Y_SIZE_DIV 0x1195
-#define CCS_R_X_OUTPUT_DIV 0x1196
-#define CCS_R_Y_OUTPUT_DIV 0x1197
-#define CCS_R_NON_FLEXIBLE_RESOLUTION_SUPPORT 0x1198
+#define CCS_R_MIN_OP_SYS_CLK_DIV CCI_REG16(0x1160)
+#define CCS_R_MAX_OP_SYS_CLK_DIV CCI_REG16(0x1162)
+#define CCS_R_MIN_OP_SYS_CLK_FREQ_MHZ (CCI_REG32(0x1164) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_OP_SYS_CLK_FREQ_MHZ (CCI_REG32(0x1168) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MIN_OP_PIX_CLK_DIV CCI_REG16(0x116c)
+#define CCS_R_MAX_OP_PIX_CLK_DIV CCI_REG16(0x116e)
+#define CCS_R_MIN_OP_PIX_CLK_FREQ_MHZ (CCI_REG32(0x1170) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_OP_PIX_CLK_FREQ_MHZ (CCI_REG32(0x1174) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_X_ADDR_MIN CCI_REG16(0x1180)
+#define CCS_R_Y_ADDR_MIN CCI_REG16(0x1182)
+#define CCS_R_X_ADDR_MAX CCI_REG16(0x1184)
+#define CCS_R_Y_ADDR_MAX CCI_REG16(0x1186)
+#define CCS_R_MIN_X_OUTPUT_SIZE CCI_REG16(0x1188)
+#define CCS_R_MIN_Y_OUTPUT_SIZE CCI_REG16(0x118a)
+#define CCS_R_MAX_X_OUTPUT_SIZE CCI_REG16(0x118c)
+#define CCS_R_MAX_Y_OUTPUT_SIZE CCI_REG16(0x118e)
+#define CCS_R_X_ADDR_START_DIV_CONSTANT CCI_REG8(0x1190)
+#define CCS_R_Y_ADDR_START_DIV_CONSTANT CCI_REG8(0x1191)
+#define CCS_R_X_ADDR_END_DIV_CONSTANT CCI_REG8(0x1192)
+#define CCS_R_Y_ADDR_END_DIV_CONSTANT CCI_REG8(0x1193)
+#define CCS_R_X_SIZE_DIV CCI_REG8(0x1194)
+#define CCS_R_Y_SIZE_DIV CCI_REG8(0x1195)
+#define CCS_R_X_OUTPUT_DIV CCI_REG8(0x1196)
+#define CCS_R_Y_OUTPUT_DIV CCI_REG8(0x1197)
+#define CCS_R_NON_FLEXIBLE_RESOLUTION_SUPPORT CCI_REG8(0x1198)
#define CCS_NON_FLEXIBLE_RESOLUTION_SUPPORT_NEW_PIX_ADDR BIT(0)
#define CCS_NON_FLEXIBLE_RESOLUTION_SUPPORT_NEW_OUTPUT_RES BIT(1)
#define CCS_NON_FLEXIBLE_RESOLUTION_SUPPORT_OUTPUT_CROP_NO_PAD BIT(2)
#define CCS_NON_FLEXIBLE_RESOLUTION_SUPPORT_OUTPUT_SIZE_LANE_DEP BIT(3)
-#define CCS_R_MIN_OP_PRE_PLL_CLK_DIV (0x11a0 | CCS_FL_16BIT)
-#define CCS_R_MAX_OP_PRE_PLL_CLK_DIV (0x11a2 | CCS_FL_16BIT)
-#define CCS_R_MIN_OP_PLL_IP_CLK_FREQ_MHZ (0x11a4 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_OP_PLL_IP_CLK_FREQ_MHZ (0x11a8 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MIN_OP_PLL_MULTIPLIER (0x11ac | CCS_FL_16BIT)
-#define CCS_R_MAX_OP_PLL_MULTIPLIER (0x11ae | CCS_FL_16BIT)
-#define CCS_R_MIN_OP_PLL_OP_CLK_FREQ_MHZ (0x11b0 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_OP_PLL_OP_CLK_FREQ_MHZ (0x11b4 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_CLOCK_TREE_PLL_CAPABILITY 0x11b8
+#define CCS_R_MIN_OP_PRE_PLL_CLK_DIV CCI_REG16(0x11a0)
+#define CCS_R_MAX_OP_PRE_PLL_CLK_DIV CCI_REG16(0x11a2)
+#define CCS_R_MIN_OP_PLL_IP_CLK_FREQ_MHZ (CCI_REG32(0x11a4) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_OP_PLL_IP_CLK_FREQ_MHZ (CCI_REG32(0x11a8) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MIN_OP_PLL_MULTIPLIER CCI_REG16(0x11ac)
+#define CCS_R_MAX_OP_PLL_MULTIPLIER CCI_REG16(0x11ae)
+#define CCS_R_MIN_OP_PLL_OP_CLK_FREQ_MHZ (CCI_REG32(0x11b0) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_OP_PLL_OP_CLK_FREQ_MHZ (CCI_REG32(0x11b4) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_CLOCK_TREE_PLL_CAPABILITY CCI_REG8(0x11b8)
#define CCS_CLOCK_TREE_PLL_CAPABILITY_DUAL_PLL BIT(0)
#define CCS_CLOCK_TREE_PLL_CAPABILITY_SINGLE_PLL BIT(1)
#define CCS_CLOCK_TREE_PLL_CAPABILITY_EXT_DIVIDER BIT(2)
#define CCS_CLOCK_TREE_PLL_CAPABILITY_FLEXIBLE_OP_PIX_CLK_DIV BIT(3)
-#define CCS_R_CLOCK_CAPA_TYPE_CAPABILITY 0x11b9
+#define CCS_R_CLOCK_CAPA_TYPE_CAPABILITY CCI_REG8(0x11b9)
#define CCS_CLOCK_CAPA_TYPE_CAPABILITY_IREAL BIT(0)
-#define CCS_R_MIN_EVEN_INC (0x11c0 | CCS_FL_16BIT)
-#define CCS_R_MIN_ODD_INC (0x11c2 | CCS_FL_16BIT)
-#define CCS_R_MAX_EVEN_INC (0x11c4 | CCS_FL_16BIT)
-#define CCS_R_MAX_ODD_INC (0x11c6 | CCS_FL_16BIT)
-#define CCS_R_AUX_SUBSAMP_CAPABILITY 0x11c8
+#define CCS_R_MIN_EVEN_INC CCI_REG16(0x11c0)
+#define CCS_R_MIN_ODD_INC CCI_REG16(0x11c2)
+#define CCS_R_MAX_EVEN_INC CCI_REG16(0x11c4)
+#define CCS_R_MAX_ODD_INC CCI_REG16(0x11c6)
+#define CCS_R_AUX_SUBSAMP_CAPABILITY CCI_REG8(0x11c8)
#define CCS_AUX_SUBSAMP_CAPABILITY_FACTOR_POWER_OF_2 BIT(1)
-#define CCS_R_AUX_SUBSAMP_MONO_CAPABILITY 0x11c9
+#define CCS_R_AUX_SUBSAMP_MONO_CAPABILITY CCI_REG8(0x11c9)
#define CCS_AUX_SUBSAMP_MONO_CAPABILITY_FACTOR_POWER_OF_2 BIT(1)
-#define CCS_R_MONOCHROME_CAPABILITY 0x11ca
+#define CCS_R_MONOCHROME_CAPABILITY CCI_REG8(0x11ca)
#define CCS_MONOCHROME_CAPABILITY_INC_ODD 0U
#define CCS_MONOCHROME_CAPABILITY_INC_EVEN 1U
-#define CCS_R_PIXEL_READOUT_CAPABILITY 0x11cb
+#define CCS_R_PIXEL_READOUT_CAPABILITY CCI_REG8(0x11cb)
#define CCS_PIXEL_READOUT_CAPABILITY_BAYER 0U
#define CCS_PIXEL_READOUT_CAPABILITY_MONOCHROME 1U
#define CCS_PIXEL_READOUT_CAPABILITY_BAYER_AND_MONO 2U
-#define CCS_R_MIN_EVEN_INC_MONO (0x11cc | CCS_FL_16BIT)
-#define CCS_R_MAX_EVEN_INC_MONO (0x11ce | CCS_FL_16BIT)
-#define CCS_R_MIN_ODD_INC_MONO (0x11d0 | CCS_FL_16BIT)
-#define CCS_R_MAX_ODD_INC_MONO (0x11d2 | CCS_FL_16BIT)
-#define CCS_R_MIN_EVEN_INC_BC2 (0x11d4 | CCS_FL_16BIT)
-#define CCS_R_MAX_EVEN_INC_BC2 (0x11d6 | CCS_FL_16BIT)
-#define CCS_R_MIN_ODD_INC_BC2 (0x11d8 | CCS_FL_16BIT)
-#define CCS_R_MAX_ODD_INC_BC2 (0x11da | CCS_FL_16BIT)
-#define CCS_R_MIN_EVEN_INC_MONO_BC2 (0x11dc | CCS_FL_16BIT)
-#define CCS_R_MAX_EVEN_INC_MONO_BC2 (0x11de | CCS_FL_16BIT)
-#define CCS_R_MIN_ODD_INC_MONO_BC2 (0x11f0 | CCS_FL_16BIT)
-#define CCS_R_MAX_ODD_INC_MONO_BC2 (0x11f2 | CCS_FL_16BIT)
-#define CCS_R_SCALING_CAPABILITY (0x1200 | CCS_FL_16BIT)
+#define CCS_R_MIN_EVEN_INC_MONO CCI_REG16(0x11cc)
+#define CCS_R_MAX_EVEN_INC_MONO CCI_REG16(0x11ce)
+#define CCS_R_MIN_ODD_INC_MONO CCI_REG16(0x11d0)
+#define CCS_R_MAX_ODD_INC_MONO CCI_REG16(0x11d2)
+#define CCS_R_MIN_EVEN_INC_BC2 CCI_REG16(0x11d4)
+#define CCS_R_MAX_EVEN_INC_BC2 CCI_REG16(0x11d6)
+#define CCS_R_MIN_ODD_INC_BC2 CCI_REG16(0x11d8)
+#define CCS_R_MAX_ODD_INC_BC2 CCI_REG16(0x11da)
+#define CCS_R_MIN_EVEN_INC_MONO_BC2 CCI_REG16(0x11dc)
+#define CCS_R_MAX_EVEN_INC_MONO_BC2 CCI_REG16(0x11de)
+#define CCS_R_MIN_ODD_INC_MONO_BC2 CCI_REG16(0x11f0)
+#define CCS_R_MAX_ODD_INC_MONO_BC2 CCI_REG16(0x11f2)
+#define CCS_R_SCALING_CAPABILITY CCI_REG16(0x1200)
#define CCS_SCALING_CAPABILITY_NONE 0U
#define CCS_SCALING_CAPABILITY_HORIZONTAL 1U
#define CCS_SCALING_CAPABILITY_RESERVED 2U
-#define CCS_R_SCALER_M_MIN (0x1204 | CCS_FL_16BIT)
-#define CCS_R_SCALER_M_MAX (0x1206 | CCS_FL_16BIT)
-#define CCS_R_SCALER_N_MIN (0x1208 | CCS_FL_16BIT)
-#define CCS_R_SCALER_N_MAX (0x120a | CCS_FL_16BIT)
-#define CCS_R_DIGITAL_CROP_CAPABILITY 0x120e
+#define CCS_R_SCALER_M_MIN CCI_REG16(0x1204)
+#define CCS_R_SCALER_M_MAX CCI_REG16(0x1206)
+#define CCS_R_SCALER_N_MIN CCI_REG16(0x1208)
+#define CCS_R_SCALER_N_MAX CCI_REG16(0x120a)
+#define CCS_R_DIGITAL_CROP_CAPABILITY CCI_REG8(0x120e)
#define CCS_DIGITAL_CROP_CAPABILITY_NONE 0U
#define CCS_DIGITAL_CROP_CAPABILITY_INPUT_CROP 1U
-#define CCS_R_HDR_CAPABILITY_1 0x1210
+#define CCS_R_HDR_CAPABILITY_1 CCI_REG8(0x1210)
#define CCS_HDR_CAPABILITY_1_2X2_BINNING BIT(0)
#define CCS_HDR_CAPABILITY_1_COMBINED_ANALOG_GAIN BIT(1)
#define CCS_HDR_CAPABILITY_1_SEPARATE_ANALOG_GAIN BIT(2)
@@ -611,66 +611,66 @@
#define CCS_HDR_CAPABILITY_1_RESET_SYNC BIT(4)
#define CCS_HDR_CAPABILITY_1_DIRECT_SHORT_EXP_TIMING BIT(5)
#define CCS_HDR_CAPABILITY_1_DIRECT_SHORT_EXP_SYNTHESIS BIT(6)
-#define CCS_R_MIN_HDR_BIT_DEPTH 0x1211
-#define CCS_R_HDR_RESOLUTION_SUB_TYPES 0x1212
-#define CCS_R_HDR_RESOLUTION_SUB_TYPE(n) (0x1213 + (n))
+#define CCS_R_MIN_HDR_BIT_DEPTH CCI_REG8(0x1211)
+#define CCS_R_HDR_RESOLUTION_SUB_TYPES CCI_REG8(0x1212)
+#define CCS_R_HDR_RESOLUTION_SUB_TYPE(n) CCI_REG8(0x1213 + (n))
#define CCS_LIM_HDR_RESOLUTION_SUB_TYPE_MIN_N 0U
#define CCS_LIM_HDR_RESOLUTION_SUB_TYPE_MAX_N 1U
#define CCS_HDR_RESOLUTION_SUB_TYPE_ROW_SHIFT 0U
#define CCS_HDR_RESOLUTION_SUB_TYPE_ROW_MASK 0xf
#define CCS_HDR_RESOLUTION_SUB_TYPE_COLUMN_SHIFT 4U
#define CCS_HDR_RESOLUTION_SUB_TYPE_COLUMN_MASK 0xf0
-#define CCS_R_HDR_CAPABILITY_2 0x121b
+#define CCS_R_HDR_CAPABILITY_2 CCI_REG8(0x121b)
#define CCS_HDR_CAPABILITY_2_COMBINED_DIGITAL_GAIN BIT(0)
#define CCS_HDR_CAPABILITY_2_SEPARATE_DIGITAL_GAIN BIT(1)
#define CCS_HDR_CAPABILITY_2_TIMING_MODE BIT(3)
#define CCS_HDR_CAPABILITY_2_SYNTHESIS_MODE BIT(4)
-#define CCS_R_MAX_HDR_BIT_DEPTH 0x121c
-#define CCS_R_USL_SUPPORT_CAPABILITY 0x1230
+#define CCS_R_MAX_HDR_BIT_DEPTH CCI_REG8(0x121c)
+#define CCS_R_USL_SUPPORT_CAPABILITY CCI_REG8(0x1230)
#define CCS_USL_SUPPORT_CAPABILITY_CLOCK_TREE BIT(0)
#define CCS_USL_SUPPORT_CAPABILITY_REV_CLOCK_TREE BIT(1)
#define CCS_USL_SUPPORT_CAPABILITY_REV_CLOCK_CALC BIT(2)
-#define CCS_R_USL_CLOCK_MODE_D_CAPABILITY 0x1231
+#define CCS_R_USL_CLOCK_MODE_D_CAPABILITY CCI_REG8(0x1231)
#define CCS_USL_CLOCK_MODE_D_CAPABILITY_CONT_CLOCK_STANDBY BIT(0)
#define CCS_USL_CLOCK_MODE_D_CAPABILITY_CONT_CLOCK_VBLANK BIT(1)
#define CCS_USL_CLOCK_MODE_D_CAPABILITY_CONT_CLOCK_HBLANK BIT(2)
#define CCS_USL_CLOCK_MODE_D_CAPABILITY_NONCONT_CLOCK_STANDBY BIT(3)
#define CCS_USL_CLOCK_MODE_D_CAPABILITY_NONCONT_CLOCK_VBLANK BIT(4)
#define CCS_USL_CLOCK_MODE_D_CAPABILITY_NONCONT_CLOCK_HBLANK BIT(5)
-#define CCS_R_MIN_OP_SYS_CLK_DIV_REV 0x1234
-#define CCS_R_MAX_OP_SYS_CLK_DIV_REV 0x1236
-#define CCS_R_MIN_OP_PIX_CLK_DIV_REV 0x1238
-#define CCS_R_MAX_OP_PIX_CLK_DIV_REV 0x123a
-#define CCS_R_MIN_OP_SYS_CLK_FREQ_REV_MHZ (0x123c | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_OP_SYS_CLK_FREQ_REV_MHZ (0x1240 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MIN_OP_PIX_CLK_FREQ_REV_MHZ (0x1244 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_OP_PIX_CLK_FREQ_REV_MHZ (0x1248 | (CCS_FL_32BIT | CCS_FL_FLOAT_IREAL))
-#define CCS_R_MAX_BITRATE_REV_D_MODE_MBPS (0x124c | (CCS_FL_32BIT | CCS_FL_IREAL))
-#define CCS_R_MAX_SYMRATE_REV_C_MODE_MSPS (0x1250 | (CCS_FL_32BIT | CCS_FL_IREAL))
-#define CCS_R_COMPRESSION_CAPABILITY 0x1300
+#define CCS_R_MIN_OP_SYS_CLK_DIV_REV CCI_REG8(0x1234)
+#define CCS_R_MAX_OP_SYS_CLK_DIV_REV CCI_REG8(0x1236)
+#define CCS_R_MIN_OP_PIX_CLK_DIV_REV CCI_REG8(0x1238)
+#define CCS_R_MAX_OP_PIX_CLK_DIV_REV CCI_REG8(0x123a)
+#define CCS_R_MIN_OP_SYS_CLK_FREQ_REV_MHZ (CCI_REG32(0x123c) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_OP_SYS_CLK_FREQ_REV_MHZ (CCI_REG32(0x1240) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MIN_OP_PIX_CLK_FREQ_REV_MHZ (CCI_REG32(0x1244) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_OP_PIX_CLK_FREQ_REV_MHZ (CCI_REG32(0x1248) | CCS_FL_FLOAT_IREAL)
+#define CCS_R_MAX_BITRATE_REV_D_MODE_MBPS (CCI_REG32(0x124c) | CCS_FL_IREAL)
+#define CCS_R_MAX_SYMRATE_REV_C_MODE_MSPS (CCI_REG32(0x1250) | CCS_FL_IREAL)
+#define CCS_R_COMPRESSION_CAPABILITY CCI_REG8(0x1300)
#define CCS_COMPRESSION_CAPABILITY_DPCM_PCM_SIMPLE BIT(0)
-#define CCS_R_TEST_MODE_CAPABILITY (0x1310 | CCS_FL_16BIT)
+#define CCS_R_TEST_MODE_CAPABILITY CCI_REG16(0x1310)
#define CCS_TEST_MODE_CAPABILITY_SOLID_COLOR BIT(0)
#define CCS_TEST_MODE_CAPABILITY_COLOR_BARS BIT(1)
#define CCS_TEST_MODE_CAPABILITY_FADE_TO_GREY BIT(2)
#define CCS_TEST_MODE_CAPABILITY_PN9 BIT(3)
#define CCS_TEST_MODE_CAPABILITY_COLOR_TILE BIT(5)
-#define CCS_R_PN9_DATA_FORMAT1 0x1312
-#define CCS_R_PN9_DATA_FORMAT2 0x1313
-#define CCS_R_PN9_DATA_FORMAT3 0x1314
-#define CCS_R_PN9_DATA_FORMAT4 0x1315
-#define CCS_R_PN9_MISC_CAPABILITY 0x1316
+#define CCS_R_PN9_DATA_FORMAT1 CCI_REG8(0x1312)
+#define CCS_R_PN9_DATA_FORMAT2 CCI_REG8(0x1313)
+#define CCS_R_PN9_DATA_FORMAT3 CCI_REG8(0x1314)
+#define CCS_R_PN9_DATA_FORMAT4 CCI_REG8(0x1315)
+#define CCS_R_PN9_MISC_CAPABILITY CCI_REG8(0x1316)
#define CCS_PN9_MISC_CAPABILITY_NUM_PIXELS_SHIFT 0U
#define CCS_PN9_MISC_CAPABILITY_NUM_PIXELS_MASK 0x7
#define CCS_PN9_MISC_CAPABILITY_COMPRESSION BIT(3)
-#define CCS_R_TEST_PATTERN_CAPABILITY 0x1317
+#define CCS_R_TEST_PATTERN_CAPABILITY CCI_REG8(0x1317)
#define CCS_TEST_PATTERN_CAPABILITY_NO_REPEAT BIT(1)
-#define CCS_R_PATTERN_SIZE_DIV_M1 0x1318
-#define CCS_R_FIFO_SUPPORT_CAPABILITY 0x1502
+#define CCS_R_PATTERN_SIZE_DIV_M1 CCI_REG8(0x1318)
+#define CCS_R_FIFO_SUPPORT_CAPABILITY CCI_REG8(0x1502)
#define CCS_FIFO_SUPPORT_CAPABILITY_NONE 0U
#define CCS_FIFO_SUPPORT_CAPABILITY_DERATING 1U
#define CCS_FIFO_SUPPORT_CAPABILITY_DERATING_OVERRATING 2U
-#define CCS_R_PHY_CTRL_CAPABILITY 0x1600
+#define CCS_R_PHY_CTRL_CAPABILITY CCI_REG8(0x1600)
#define CCS_PHY_CTRL_CAPABILITY_AUTO_PHY_CTL BIT(0)
#define CCS_PHY_CTRL_CAPABILITY_UI_PHY_CTL BIT(1)
#define CCS_PHY_CTRL_CAPABILITY_DPHY_TIME_UI_REG_1_CTL BIT(2)
@@ -679,7 +679,7 @@
#define CCS_PHY_CTRL_CAPABILITY_DPHY_EXT_TIME_UI_REG_1_CTL BIT(5)
#define CCS_PHY_CTRL_CAPABILITY_DPHY_EXT_TIME_UI_REG_2_CTL BIT(6)
#define CCS_PHY_CTRL_CAPABILITY_DPHY_EXT_TIME_CTL BIT(7)
-#define CCS_R_CSI_DPHY_LANE_MODE_CAPABILITY 0x1601
+#define CCS_R_CSI_DPHY_LANE_MODE_CAPABILITY CCI_REG8(0x1601)
#define CCS_CSI_DPHY_LANE_MODE_CAPABILITY_1_LANE BIT(0)
#define CCS_CSI_DPHY_LANE_MODE_CAPABILITY_2_LANE BIT(1)
#define CCS_CSI_DPHY_LANE_MODE_CAPABILITY_3_LANE BIT(2)
@@ -688,22 +688,22 @@
#define CCS_CSI_DPHY_LANE_MODE_CAPABILITY_6_LANE BIT(5)
#define CCS_CSI_DPHY_LANE_MODE_CAPABILITY_7_LANE BIT(6)
#define CCS_CSI_DPHY_LANE_MODE_CAPABILITY_8_LANE BIT(7)
-#define CCS_R_CSI_SIGNALING_MODE_CAPABILITY 0x1602
+#define CCS_R_CSI_SIGNALING_MODE_CAPABILITY CCI_REG8(0x1602)
#define CCS_CSI_SIGNALING_MODE_CAPABILITY_CSI_DPHY BIT(2)
#define CCS_CSI_SIGNALING_MODE_CAPABILITY_CSI_CPHY BIT(3)
-#define CCS_R_FAST_STANDBY_CAPABILITY 0x1603
+#define CCS_R_FAST_STANDBY_CAPABILITY CCI_REG8(0x1603)
#define CCS_FAST_STANDBY_CAPABILITY_NO_FRAME_TRUNCATION 0U
#define CCS_FAST_STANDBY_CAPABILITY_FRAME_TRUNCATION 1U
-#define CCS_R_CSI_ADDRESS_CONTROL_CAPABILITY 0x1604
+#define CCS_R_CSI_ADDRESS_CONTROL_CAPABILITY CCI_REG8(0x1604)
#define CCS_CSI_ADDRESS_CONTROL_CAPABILITY_CCI_ADDR_CHANGE BIT(0)
#define CCS_CSI_ADDRESS_CONTROL_CAPABILITY_2ND_CCI_ADDR BIT(1)
#define CCS_CSI_ADDRESS_CONTROL_CAPABILITY_SW_CHANGEABLE_2ND_CCI_ADDR BIT(2)
-#define CCS_R_DATA_TYPE_CAPABILITY 0x1605
+#define CCS_R_DATA_TYPE_CAPABILITY CCI_REG8(0x1605)
#define CCS_DATA_TYPE_CAPABILITY_DPCM_PROGRAMMABLE BIT(0)
#define CCS_DATA_TYPE_CAPABILITY_BOTTOM_EMBEDDED_DT_PROGRAMMABLE BIT(1)
#define CCS_DATA_TYPE_CAPABILITY_BOTTOM_EMBEDDED_VC_PROGRAMMABLE BIT(2)
#define CCS_DATA_TYPE_CAPABILITY_EXT_VC_RANGE BIT(3)
-#define CCS_R_CSI_CPHY_LANE_MODE_CAPABILITY 0x1606
+#define CCS_R_CSI_CPHY_LANE_MODE_CAPABILITY CCI_REG8(0x1606)
#define CCS_CSI_CPHY_LANE_MODE_CAPABILITY_1_LANE BIT(0)
#define CCS_CSI_CPHY_LANE_MODE_CAPABILITY_2_LANE BIT(1)
#define CCS_CSI_CPHY_LANE_MODE_CAPABILITY_3_LANE BIT(2)
@@ -712,44 +712,44 @@
#define CCS_CSI_CPHY_LANE_MODE_CAPABILITY_6_LANE BIT(5)
#define CCS_CSI_CPHY_LANE_MODE_CAPABILITY_7_LANE BIT(6)
#define CCS_CSI_CPHY_LANE_MODE_CAPABILITY_8_LANE BIT(7)
-#define CCS_R_EMB_DATA_CAPABILITY 0x1607
+#define CCS_R_EMB_DATA_CAPABILITY CCI_REG8(0x1607)
#define CCS_EMB_DATA_CAPABILITY_TWO_BYTES_PER_RAW16 BIT(0)
#define CCS_EMB_DATA_CAPABILITY_TWO_BYTES_PER_RAW20 BIT(1)
#define CCS_EMB_DATA_CAPABILITY_TWO_BYTES_PER_RAW24 BIT(2)
#define CCS_EMB_DATA_CAPABILITY_NO_ONE_BYTE_PER_RAW16 BIT(3)
#define CCS_EMB_DATA_CAPABILITY_NO_ONE_BYTE_PER_RAW20 BIT(4)
#define CCS_EMB_DATA_CAPABILITY_NO_ONE_BYTE_PER_RAW24 BIT(5)
-#define CCS_R_MAX_PER_LANE_BITRATE_LANE_D_MODE_MBPS(n) ((0x1608 | (CCS_FL_32BIT | CCS_FL_IREAL)) + ((n) < 4 ? (n) * 4 : 0x32 + ((n) - 4) * 4))
+#define CCS_R_MAX_PER_LANE_BITRATE_LANE_D_MODE_MBPS(n) (CCI_REG32(0x1608 + ((n) < 4 ? (n) * 4 : 0x32 + ((n) - 4) * 4)) | CCS_FL_IREAL)
#define CCS_LIM_MAX_PER_LANE_BITRATE_LANE_D_MODE_MBPS_MIN_N 0U
#define CCS_LIM_MAX_PER_LANE_BITRATE_LANE_D_MODE_MBPS_MAX_N 7U
-#define CCS_R_TEMP_SENSOR_CAPABILITY 0x1618
+#define CCS_R_TEMP_SENSOR_CAPABILITY CCI_REG8(0x1618)
#define CCS_TEMP_SENSOR_CAPABILITY_SUPPORTED BIT(0)
#define CCS_TEMP_SENSOR_CAPABILITY_CCS_FORMAT BIT(1)
#define CCS_TEMP_SENSOR_CAPABILITY_RESET_0X80 BIT(2)
-#define CCS_R_MAX_PER_LANE_BITRATE_LANE_C_MODE_MBPS(n) ((0x161a | (CCS_FL_32BIT | CCS_FL_IREAL)) + ((n) < 4 ? (n) * 4 : 0x30 + ((n) - 4) * 4))
+#define CCS_R_MAX_PER_LANE_BITRATE_LANE_C_MODE_MBPS(n) (CCI_REG32(0x161a + ((n) < 4 ? (n) * 4 : 0x30 + ((n) - 4) * 4)) | CCS_FL_IREAL)
#define CCS_LIM_MAX_PER_LANE_BITRATE_LANE_C_MODE_MBPS_MIN_N 0U
#define CCS_LIM_MAX_PER_LANE_BITRATE_LANE_C_MODE_MBPS_MAX_N 7U
-#define CCS_R_DPHY_EQUALIZATION_CAPABILITY 0x162b
+#define CCS_R_DPHY_EQUALIZATION_CAPABILITY CCI_REG8(0x162b)
#define CCS_DPHY_EQUALIZATION_CAPABILITY_EQUALIZATION_CTRL BIT(0)
#define CCS_DPHY_EQUALIZATION_CAPABILITY_EQ1 BIT(1)
#define CCS_DPHY_EQUALIZATION_CAPABILITY_EQ2 BIT(2)
-#define CCS_R_CPHY_EQUALIZATION_CAPABILITY 0x162c
+#define CCS_R_CPHY_EQUALIZATION_CAPABILITY CCI_REG8(0x162c)
#define CCS_CPHY_EQUALIZATION_CAPABILITY_EQUALIZATION_CTRL BIT(0)
-#define CCS_R_DPHY_PREAMBLE_CAPABILITY 0x162d
+#define CCS_R_DPHY_PREAMBLE_CAPABILITY CCI_REG8(0x162d)
#define CCS_DPHY_PREAMBLE_CAPABILITY_PREAMBLE_SEQ_CTRL BIT(0)
-#define CCS_R_DPHY_SSC_CAPABILITY 0x162e
+#define CCS_R_DPHY_SSC_CAPABILITY CCI_REG8(0x162e)
#define CCS_DPHY_SSC_CAPABILITY_SUPPORTED BIT(0)
-#define CCS_R_CPHY_CALIBRATION_CAPABILITY 0x162f
+#define CCS_R_CPHY_CALIBRATION_CAPABILITY CCI_REG8(0x162f)
#define CCS_CPHY_CALIBRATION_CAPABILITY_MANUAL BIT(0)
#define CCS_CPHY_CALIBRATION_CAPABILITY_MANUAL_STREAMING BIT(1)
#define CCS_CPHY_CALIBRATION_CAPABILITY_FORMAT_1_CTRL BIT(2)
#define CCS_CPHY_CALIBRATION_CAPABILITY_FORMAT_2_CTRL BIT(3)
#define CCS_CPHY_CALIBRATION_CAPABILITY_FORMAT_3_CTRL BIT(4)
-#define CCS_R_DPHY_CALIBRATION_CAPABILITY 0x1630
+#define CCS_R_DPHY_CALIBRATION_CAPABILITY CCI_REG8(0x1630)
#define CCS_DPHY_CALIBRATION_CAPABILITY_MANUAL BIT(0)
#define CCS_DPHY_CALIBRATION_CAPABILITY_MANUAL_STREAMING BIT(1)
#define CCS_DPHY_CALIBRATION_CAPABILITY_ALTERNATE_SEQ BIT(2)
-#define CCS_R_PHY_CTRL_CAPABILITY_2 0x1631
+#define CCS_R_PHY_CTRL_CAPABILITY_2 CCI_REG8(0x1631)
#define CCS_PHY_CTRL_CAPABILITY_2_TGR_LENGTH BIT(0)
#define CCS_PHY_CTRL_CAPABILITY_2_TGR_PREAMBLE_PROG_SEQ BIT(1)
#define CCS_PHY_CTRL_CAPABILITY_2_EXTRA_CPHY_MANUAL_TIMING BIT(2)
@@ -758,13 +758,13 @@
#define CCS_PHY_CTRL_CAPABILITY_2_CLOCK_BASED_MANUAL_CPHY BIT(5)
#define CCS_PHY_CTRL_CAPABILITY_2_MANUAL_LP_DPHY BIT(6)
#define CCS_PHY_CTRL_CAPABILITY_2_MANUAL_LP_CPHY BIT(7)
-#define CCS_R_LRTE_CPHY_CAPABILITY 0x1632
+#define CCS_R_LRTE_CPHY_CAPABILITY CCI_REG8(0x1632)
#define CCS_LRTE_CPHY_CAPABILITY_PDQ_SHORT BIT(0)
#define CCS_LRTE_CPHY_CAPABILITY_SPACER_SHORT BIT(1)
#define CCS_LRTE_CPHY_CAPABILITY_PDQ_LONG BIT(2)
#define CCS_LRTE_CPHY_CAPABILITY_SPACER_LONG BIT(3)
#define CCS_LRTE_CPHY_CAPABILITY_SPACER_NO_PDQ BIT(4)
-#define CCS_R_LRTE_DPHY_CAPABILITY 0x1633
+#define CCS_R_LRTE_DPHY_CAPABILITY CCI_REG8(0x1633)
#define CCS_LRTE_DPHY_CAPABILITY_PDQ_SHORT_OPT1 BIT(0)
#define CCS_LRTE_DPHY_CAPABILITY_SPACER_SHORT_OPT1 BIT(1)
#define CCS_LRTE_DPHY_CAPABILITY_PDQ_LONG_OPT1 BIT(2)
@@ -773,18 +773,18 @@
#define CCS_LRTE_DPHY_CAPABILITY_SPACER_LONG_OPT2 BIT(5)
#define CCS_LRTE_DPHY_CAPABILITY_SPACER_NO_PDQ_OPT1 BIT(6)
#define CCS_LRTE_DPHY_CAPABILITY_SPACER_VARIABLE_OPT2 BIT(7)
-#define CCS_R_ALPS_CAPABILITY_DPHY 0x1634
+#define CCS_R_ALPS_CAPABILITY_DPHY CCI_REG8(0x1634)
#define CCS_ALPS_CAPABILITY_DPHY_LVLP_NOT_SUPPORTED 0U
#define CCS_ALPS_CAPABILITY_DPHY_LVLP_SUPPORTED 1U
#define CCS_ALPS_CAPABILITY_DPHY_CONTROLLABLE_LVLP 2U
-#define CCS_R_ALPS_CAPABILITY_CPHY 0x1635
+#define CCS_R_ALPS_CAPABILITY_CPHY CCI_REG8(0x1635)
#define CCS_ALPS_CAPABILITY_CPHY_LVLP_NOT_SUPPORTED 0U
#define CCS_ALPS_CAPABILITY_CPHY_LVLP_SUPPORTED 1U
#define CCS_ALPS_CAPABILITY_CPHY_CONTROLLABLE_LVLP 2U
#define CCS_ALPS_CAPABILITY_CPHY_ALP_NOT_SUPPORTED 0xc
#define CCS_ALPS_CAPABILITY_CPHY_ALP_SUPPORTED 0xd
#define CCS_ALPS_CAPABILITY_CPHY_CONTROLLABLE_ALP 0xe
-#define CCS_R_SCRAMBLING_CAPABILITY 0x1636
+#define CCS_R_SCRAMBLING_CAPABILITY CCI_REG8(0x1636)
#define CCS_SCRAMBLING_CAPABILITY_SCRAMBLING_SUPPORTED BIT(0)
#define CCS_SCRAMBLING_CAPABILITY_MAX_SEEDS_PER_LANE_C_SHIFT 1U
#define CCS_SCRAMBLING_CAPABILITY_MAX_SEEDS_PER_LANE_C_MASK 0x6
@@ -796,11 +796,11 @@
#define CCS_SCRAMBLING_CAPABILITY_NUM_SEED_REGS_1 1U
#define CCS_SCRAMBLING_CAPABILITY_NUM_SEED_REGS_4 4U
#define CCS_SCRAMBLING_CAPABILITY_NUM_SEED_PER_LANE BIT(6)
-#define CCS_R_DPHY_MANUAL_CONSTANT 0x1637
-#define CCS_R_CPHY_MANUAL_CONSTANT 0x1638
-#define CCS_R_CSI2_INTERFACE_CAPABILITY_MISC 0x1639
+#define CCS_R_DPHY_MANUAL_CONSTANT CCI_REG8(0x1637)
+#define CCS_R_CPHY_MANUAL_CONSTANT CCI_REG8(0x1638)
+#define CCS_R_CSI2_INTERFACE_CAPABILITY_MISC CCI_REG8(0x1639)
#define CCS_CSI2_INTERFACE_CAPABILITY_MISC_EOTP_SHORT_PKT_OPT2 BIT(0)
-#define CCS_R_PHY_CTRL_CAPABILITY_3 0x165c
+#define CCS_R_PHY_CTRL_CAPABILITY_3 CCI_REG8(0x165c)
#define CCS_PHY_CTRL_CAPABILITY_3_DPHY_TIMING_NOT_MULTIPLE BIT(0)
#define CCS_PHY_CTRL_CAPABILITY_3_DPHY_MIN_TIMING_VALUE_1 BIT(1)
#define CCS_PHY_CTRL_CAPABILITY_3_TWAKEUP_SUPPORTED BIT(2)
@@ -808,130 +808,130 @@
#define CCS_PHY_CTRL_CAPABILITY_3_THS_EXIT_SUPPORTED BIT(4)
#define CCS_PHY_CTRL_CAPABILITY_3_CPHY_TIMING_NOT_MULTIPLE BIT(5)
#define CCS_PHY_CTRL_CAPABILITY_3_CPHY_MIN_TIMING_VALUE_1 BIT(6)
-#define CCS_R_DPHY_SF 0x165d
-#define CCS_R_CPHY_SF 0x165e
+#define CCS_R_DPHY_SF CCI_REG8(0x165d)
+#define CCS_R_CPHY_SF CCI_REG8(0x165e)
#define CCS_CPHY_SF_TWAKEUP_SHIFT 0U
#define CCS_CPHY_SF_TWAKEUP_MASK 0xf
#define CCS_CPHY_SF_TINIT_SHIFT 4U
#define CCS_CPHY_SF_TINIT_MASK 0xf0
-#define CCS_R_DPHY_LIMITS_1 0x165f
+#define CCS_R_DPHY_LIMITS_1 CCI_REG8(0x165f)
#define CCS_DPHY_LIMITS_1_THS_PREPARE_SHIFT 0U
#define CCS_DPHY_LIMITS_1_THS_PREPARE_MASK 0xf
#define CCS_DPHY_LIMITS_1_THS_ZERO_SHIFT 4U
#define CCS_DPHY_LIMITS_1_THS_ZERO_MASK 0xf0
-#define CCS_R_DPHY_LIMITS_2 0x1660
+#define CCS_R_DPHY_LIMITS_2 CCI_REG8(0x1660)
#define CCS_DPHY_LIMITS_2_THS_TRAIL_SHIFT 0U
#define CCS_DPHY_LIMITS_2_THS_TRAIL_MASK 0xf
#define CCS_DPHY_LIMITS_2_TCLK_TRAIL_MIN_SHIFT 4U
#define CCS_DPHY_LIMITS_2_TCLK_TRAIL_MIN_MASK 0xf0
-#define CCS_R_DPHY_LIMITS_3 0x1661
+#define CCS_R_DPHY_LIMITS_3 CCI_REG8(0x1661)
#define CCS_DPHY_LIMITS_3_TCLK_PREPARE_SHIFT 0U
#define CCS_DPHY_LIMITS_3_TCLK_PREPARE_MASK 0xf
#define CCS_DPHY_LIMITS_3_TCLK_ZERO_SHIFT 4U
#define CCS_DPHY_LIMITS_3_TCLK_ZERO_MASK 0xf0
-#define CCS_R_DPHY_LIMITS_4 0x1662
+#define CCS_R_DPHY_LIMITS_4 CCI_REG8(0x1662)
#define CCS_DPHY_LIMITS_4_TCLK_POST_SHIFT 0U
#define CCS_DPHY_LIMITS_4_TCLK_POST_MASK 0xf
#define CCS_DPHY_LIMITS_4_TLPX_SHIFT 4U
#define CCS_DPHY_LIMITS_4_TLPX_MASK 0xf0
-#define CCS_R_DPHY_LIMITS_5 0x1663
+#define CCS_R_DPHY_LIMITS_5 CCI_REG8(0x1663)
#define CCS_DPHY_LIMITS_5_THS_EXIT_SHIFT 0U
#define CCS_DPHY_LIMITS_5_THS_EXIT_MASK 0xf
#define CCS_DPHY_LIMITS_5_TWAKEUP_SHIFT 4U
#define CCS_DPHY_LIMITS_5_TWAKEUP_MASK 0xf0
-#define CCS_R_DPHY_LIMITS_6 0x1664
+#define CCS_R_DPHY_LIMITS_6 CCI_REG8(0x1664)
#define CCS_DPHY_LIMITS_6_TINIT_SHIFT 0U
#define CCS_DPHY_LIMITS_6_TINIT_MASK 0xf
-#define CCS_R_CPHY_LIMITS_1 0x1665
+#define CCS_R_CPHY_LIMITS_1 CCI_REG8(0x1665)
#define CCS_CPHY_LIMITS_1_T3_PREPARE_MAX_SHIFT 0U
#define CCS_CPHY_LIMITS_1_T3_PREPARE_MAX_MASK 0xf
#define CCS_CPHY_LIMITS_1_T3_LPX_MAX_SHIFT 4U
#define CCS_CPHY_LIMITS_1_T3_LPX_MAX_MASK 0xf0
-#define CCS_R_CPHY_LIMITS_2 0x1666
+#define CCS_R_CPHY_LIMITS_2 CCI_REG8(0x1666)
#define CCS_CPHY_LIMITS_2_THS_EXIT_MAX_SHIFT 0U
#define CCS_CPHY_LIMITS_2_THS_EXIT_MAX_MASK 0xf
#define CCS_CPHY_LIMITS_2_TWAKEUP_MAX_SHIFT 4U
#define CCS_CPHY_LIMITS_2_TWAKEUP_MAX_MASK 0xf0
-#define CCS_R_CPHY_LIMITS_3 0x1667
+#define CCS_R_CPHY_LIMITS_3 CCI_REG8(0x1667)
#define CCS_CPHY_LIMITS_3_TINIT_MAX_SHIFT 0U
#define CCS_CPHY_LIMITS_3_TINIT_MAX_MASK 0xf
-#define CCS_R_MIN_FRAME_LENGTH_LINES_BIN (0x1700 | CCS_FL_16BIT)
-#define CCS_R_MAX_FRAME_LENGTH_LINES_BIN (0x1702 | CCS_FL_16BIT)
-#define CCS_R_MIN_LINE_LENGTH_PCK_BIN (0x1704 | CCS_FL_16BIT)
-#define CCS_R_MAX_LINE_LENGTH_PCK_BIN (0x1706 | CCS_FL_16BIT)
-#define CCS_R_MIN_LINE_BLANKING_PCK_BIN (0x1708 | CCS_FL_16BIT)
-#define CCS_R_FINE_INTEGRATION_TIME_MIN_BIN (0x170a | CCS_FL_16BIT)
-#define CCS_R_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN (0x170c | CCS_FL_16BIT)
-#define CCS_R_BINNING_CAPABILITY 0x1710
+#define CCS_R_MIN_FRAME_LENGTH_LINES_BIN CCI_REG16(0x1700)
+#define CCS_R_MAX_FRAME_LENGTH_LINES_BIN CCI_REG16(0x1702)
+#define CCS_R_MIN_LINE_LENGTH_PCK_BIN CCI_REG16(0x1704)
+#define CCS_R_MAX_LINE_LENGTH_PCK_BIN CCI_REG16(0x1706)
+#define CCS_R_MIN_LINE_BLANKING_PCK_BIN CCI_REG16(0x1708)
+#define CCS_R_FINE_INTEGRATION_TIME_MIN_BIN CCI_REG16(0x170a)
+#define CCS_R_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN CCI_REG16(0x170c)
+#define CCS_R_BINNING_CAPABILITY CCI_REG8(0x1710)
#define CCS_BINNING_CAPABILITY_UNSUPPORTED 0U
#define CCS_BINNING_CAPABILITY_BINNING_THEN_SUBSAMPLING 1U
#define CCS_BINNING_CAPABILITY_SUBSAMPLING_THEN_BINNING 2U
-#define CCS_R_BINNING_WEIGHTING_CAPABILITY 0x1711
+#define CCS_R_BINNING_WEIGHTING_CAPABILITY CCI_REG8(0x1711)
#define CCS_BINNING_WEIGHTING_CAPABILITY_AVERAGED BIT(0)
#define CCS_BINNING_WEIGHTING_CAPABILITY_SUMMED BIT(1)
#define CCS_BINNING_WEIGHTING_CAPABILITY_BAYER_CORRECTED BIT(2)
#define CCS_BINNING_WEIGHTING_CAPABILITY_MODULE_SPECIFIC_WEIGHT BIT(3)
-#define CCS_R_BINNING_SUB_TYPES 0x1712
-#define CCS_R_BINNING_SUB_TYPE(n) (0x1713 + (n))
+#define CCS_R_BINNING_SUB_TYPES CCI_REG8(0x1712)
+#define CCS_R_BINNING_SUB_TYPE(n) CCI_REG8(0x1713 + (n))
#define CCS_LIM_BINNING_SUB_TYPE_MIN_N 0U
#define CCS_LIM_BINNING_SUB_TYPE_MAX_N 63U
#define CCS_BINNING_SUB_TYPE_ROW_SHIFT 0U
#define CCS_BINNING_SUB_TYPE_ROW_MASK 0xf
#define CCS_BINNING_SUB_TYPE_COLUMN_SHIFT 4U
#define CCS_BINNING_SUB_TYPE_COLUMN_MASK 0xf0
-#define CCS_R_BINNING_WEIGHTING_MONO_CAPABILITY 0x1771
+#define CCS_R_BINNING_WEIGHTING_MONO_CAPABILITY CCI_REG8(0x1771)
#define CCS_BINNING_WEIGHTING_MONO_CAPABILITY_AVERAGED BIT(0)
#define CCS_BINNING_WEIGHTING_MONO_CAPABILITY_SUMMED BIT(1)
#define CCS_BINNING_WEIGHTING_MONO_CAPABILITY_BAYER_CORRECTED BIT(2)
#define CCS_BINNING_WEIGHTING_MONO_CAPABILITY_MODULE_SPECIFIC_WEIGHT BIT(3)
-#define CCS_R_BINNING_SUB_TYPES_MONO 0x1772
-#define CCS_R_BINNING_SUB_TYPE_MONO(n) (0x1773 + (n))
+#define CCS_R_BINNING_SUB_TYPES_MONO CCI_REG8(0x1772)
+#define CCS_R_BINNING_SUB_TYPE_MONO(n) CCI_REG8(0x1773 + (n))
#define CCS_LIM_BINNING_SUB_TYPE_MONO_MIN_N 0U
#define CCS_LIM_BINNING_SUB_TYPE_MONO_MAX_N 63U
-#define CCS_R_DATA_TRANSFER_IF_CAPABILITY 0x1800
+#define CCS_R_DATA_TRANSFER_IF_CAPABILITY CCI_REG8(0x1800)
#define CCS_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED BIT(0)
#define CCS_DATA_TRANSFER_IF_CAPABILITY_POLLING BIT(2)
-#define CCS_R_SHADING_CORRECTION_CAPABILITY 0x1900
+#define CCS_R_SHADING_CORRECTION_CAPABILITY CCI_REG8(0x1900)
#define CCS_SHADING_CORRECTION_CAPABILITY_COLOR_SHADING BIT(0)
#define CCS_SHADING_CORRECTION_CAPABILITY_LUMINANCE_CORRECTION BIT(1)
-#define CCS_R_GREEN_IMBALANCE_CAPABILITY 0x1901
+#define CCS_R_GREEN_IMBALANCE_CAPABILITY CCI_REG8(0x1901)
#define CCS_GREEN_IMBALANCE_CAPABILITY_SUPPORTED BIT(0)
-#define CCS_R_MODULE_SPECIFIC_CORRECTION_CAPABILITY 0x1903
-#define CCS_R_DEFECT_CORRECTION_CAPABILITY (0x1904 | CCS_FL_16BIT)
+#define CCS_R_MODULE_SPECIFIC_CORRECTION_CAPABILITY CCI_REG8(0x1903)
+#define CCS_R_DEFECT_CORRECTION_CAPABILITY CCI_REG16(0x1904)
#define CCS_DEFECT_CORRECTION_CAPABILITY_MAPPED_DEFECT BIT(0)
#define CCS_DEFECT_CORRECTION_CAPABILITY_DYNAMIC_COUPLET BIT(2)
#define CCS_DEFECT_CORRECTION_CAPABILITY_DYNAMIC_SINGLE BIT(5)
#define CCS_DEFECT_CORRECTION_CAPABILITY_COMBINED_DYNAMIC BIT(8)
-#define CCS_R_DEFECT_CORRECTION_CAPABILITY_2 (0x1906 | CCS_FL_16BIT)
+#define CCS_R_DEFECT_CORRECTION_CAPABILITY_2 CCI_REG16(0x1906)
#define CCS_DEFECT_CORRECTION_CAPABILITY_2_DYNAMIC_TRIPLET BIT(3)
-#define CCS_R_NF_CAPABILITY 0x1908
+#define CCS_R_NF_CAPABILITY CCI_REG8(0x1908)
#define CCS_NF_CAPABILITY_LUMA BIT(0)
#define CCS_NF_CAPABILITY_CHROMA BIT(1)
#define CCS_NF_CAPABILITY_COMBINED BIT(2)
-#define CCS_R_OB_READOUT_CAPABILITY 0x1980
+#define CCS_R_OB_READOUT_CAPABILITY CCI_REG8(0x1980)
#define CCS_OB_READOUT_CAPABILITY_CONTROLLABLE_READOUT BIT(0)
#define CCS_OB_READOUT_CAPABILITY_VISIBLE_PIXEL_READOUT BIT(1)
#define CCS_OB_READOUT_CAPABILITY_DIFFERENT_VC_READOUT BIT(2)
#define CCS_OB_READOUT_CAPABILITY_DIFFERENT_DT_READOUT BIT(3)
#define CCS_OB_READOUT_CAPABILITY_PROG_DATA_FORMAT BIT(4)
-#define CCS_R_COLOR_FEEDBACK_CAPABILITY 0x1987
+#define CCS_R_COLOR_FEEDBACK_CAPABILITY CCI_REG8(0x1987)
#define CCS_COLOR_FEEDBACK_CAPABILITY_KELVIN BIT(0)
#define CCS_COLOR_FEEDBACK_CAPABILITY_AWB_GAIN BIT(1)
-#define CCS_R_CFA_PATTERN_CAPABILITY 0x1990
+#define CCS_R_CFA_PATTERN_CAPABILITY CCI_REG8(0x1990)
#define CCS_CFA_PATTERN_CAPABILITY_BAYER 0U
#define CCS_CFA_PATTERN_CAPABILITY_MONOCHROME 1U
#define CCS_CFA_PATTERN_CAPABILITY_4X4_QUAD_BAYER 2U
#define CCS_CFA_PATTERN_CAPABILITY_VENDOR_SPECIFIC 3U
-#define CCS_R_CFA_PATTERN_CONVERSION_CAPABILITY 0x1991
+#define CCS_R_CFA_PATTERN_CONVERSION_CAPABILITY CCI_REG8(0x1991)
#define CCS_CFA_PATTERN_CONVERSION_CAPABILITY_BAYER BIT(0)
-#define CCS_R_FLASH_MODE_CAPABILITY 0x1a02
+#define CCS_R_FLASH_MODE_CAPABILITY CCI_REG8(0x1a02)
#define CCS_FLASH_MODE_CAPABILITY_SINGLE_STROBE BIT(0)
-#define CCS_R_SA_STROBE_MODE_CAPABILITY 0x1a03
+#define CCS_R_SA_STROBE_MODE_CAPABILITY CCI_REG8(0x1a03)
#define CCS_SA_STROBE_MODE_CAPABILITY_FIXED_WIDTH BIT(0)
#define CCS_SA_STROBE_MODE_CAPABILITY_EDGE_CTRL BIT(1)
-#define CCS_R_RESET_MAX_DELAY 0x1a10
-#define CCS_R_RESET_MIN_TIME 0x1a11
-#define CCS_R_PDAF_CAPABILITY_1 0x1b80
+#define CCS_R_RESET_MAX_DELAY CCI_REG8(0x1a10)
+#define CCS_R_RESET_MIN_TIME CCI_REG8(0x1a11)
+#define CCS_R_PDAF_CAPABILITY_1 CCI_REG8(0x1b80)
#define CCS_PDAF_CAPABILITY_1_SUPPORTED BIT(0)
#define CCS_PDAF_CAPABILITY_1_PROCESSED_BOTTOM_EMBEDDED BIT(1)
#define CCS_PDAF_CAPABILITY_1_PROCESSED_INTERLEAVED BIT(2)
@@ -940,19 +940,19 @@
#define CCS_PDAF_CAPABILITY_1_VISIBLE_PDAF_CORRECTION BIT(5)
#define CCS_PDAF_CAPABILITY_1_VC_INTERLEAVING BIT(6)
#define CCS_PDAF_CAPABILITY_1_DT_INTERLEAVING BIT(7)
-#define CCS_R_PDAF_CAPABILITY_2 0x1b81
+#define CCS_R_PDAF_CAPABILITY_2 CCI_REG8(0x1b81)
#define CCS_PDAF_CAPABILITY_2_ROI BIT(0)
#define CCS_PDAF_CAPABILITY_2_AFTER_DIGITAL_CROP BIT(1)
#define CCS_PDAF_CAPABILITY_2_CTRL_RETIMED BIT(2)
-#define CCS_R_BRACKETING_LUT_CAPABILITY_1 0x1c00
+#define CCS_R_BRACKETING_LUT_CAPABILITY_1 CCI_REG8(0x1c00)
#define CCS_BRACKETING_LUT_CAPABILITY_1_COARSE_INTEGRATION BIT(0)
#define CCS_BRACKETING_LUT_CAPABILITY_1_GLOBAL_ANALOG_GAIN BIT(1)
#define CCS_BRACKETING_LUT_CAPABILITY_1_FLASH BIT(4)
#define CCS_BRACKETING_LUT_CAPABILITY_1_GLOBAL_DIGITAL_GAIN BIT(5)
#define CCS_BRACKETING_LUT_CAPABILITY_1_ALTERNATE_GLOBAL_ANALOG_GAIN BIT(6)
-#define CCS_R_BRACKETING_LUT_CAPABILITY_2 0x1c01
+#define CCS_R_BRACKETING_LUT_CAPABILITY_2 CCI_REG8(0x1c01)
#define CCS_BRACKETING_LUT_CAPABILITY_2_SINGLE_BRACKETING_MODE BIT(0)
#define CCS_BRACKETING_LUT_CAPABILITY_2_LOOPED_BRACKETING_MODE BIT(1)
-#define CCS_R_BRACKETING_LUT_SIZE 0x1c02
+#define CCS_R_BRACKETING_LUT_SIZE CCI_REG8(0x1c02)
#endif /* __CCS_REGS_H__ */
diff --git a/drivers/media/i2c/ccs/ccs.h b/drivers/media/i2c/ccs/ccs.h
index 9c3587b2f..096573845 100644
--- a/drivers/media/i2c/ccs/ccs.h
+++ b/drivers/media/i2c/ccs/ccs.h
@@ -13,6 +13,7 @@
#define __CCS_H__
#include <linux/mutex.h>
+#include <linux/regmap.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
@@ -211,6 +212,7 @@ struct ccs_sensor {
struct clk *ext_clk;
struct gpio_desc *xshutdown;
struct gpio_desc *reset;
+ struct regmap *regmap;
void *ccs_limits;
u8 nbinning_subtypes;
struct ccs_binning_subtype binning_subtypes[CCS_LIM_BINNING_SUB_TYPE_MAX_N + 1];
@@ -236,6 +238,7 @@ struct ccs_sensor {
bool streaming;
bool dev_init_done;
+ bool handler_setup_needed;
u8 compressed_min_bpp;
struct ccs_module_info minfo;
diff --git a/drivers/media/i2c/ccs/smiapp-reg-defs.h b/drivers/media/i2c/ccs/smiapp-reg-defs.h
index 177e3e512..ebd0f90e1 100644
--- a/drivers/media/i2c/ccs/smiapp-reg-defs.h
+++ b/drivers/media/i2c/ccs/smiapp-reg-defs.h
@@ -12,481 +12,484 @@
#ifndef __SMIAPP_REG_DEFS_H__
#define __SMIAPP_REG_DEFS_H__
+#include <linux/bits.h>
+#include <media/v4l2-cci.h>
+
/* Register addresses */
-#define SMIAPP_REG_U16_MODEL_ID (0x0000 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_REVISION_NUMBER_MAJOR 0x0002
-#define SMIAPP_REG_U8_MANUFACTURER_ID 0x0003
-#define SMIAPP_REG_U8_SMIA_VERSION 0x0004
-#define SMIAPP_REG_U8_FRAME_COUNT 0x0005
-#define SMIAPP_REG_U8_PIXEL_ORDER 0x0006
-#define SMIAPP_REG_U16_DATA_PEDESTAL (0x0008 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_PIXEL_DEPTH 0x000c
-#define SMIAPP_REG_U8_REVISION_NUMBER_MINOR 0x0010
-#define SMIAPP_REG_U8_SMIAPP_VERSION 0x0011
-#define SMIAPP_REG_U8_MODULE_DATE_YEAR 0x0012
-#define SMIAPP_REG_U8_MODULE_DATE_MONTH 0x0013
-#define SMIAPP_REG_U8_MODULE_DATE_DAY 0x0014
-#define SMIAPP_REG_U8_MODULE_DATE_PHASE 0x0015
-#define SMIAPP_REG_U16_SENSOR_MODEL_ID (0x0016 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_SENSOR_REVISION_NUMBER 0x0018
-#define SMIAPP_REG_U8_SENSOR_MANUFACTURER_ID 0x0019
-#define SMIAPP_REG_U8_SENSOR_FIRMWARE_VERSION 0x001a
-#define SMIAPP_REG_U32_SERIAL_NUMBER (0x001c | CCS_FL_32BIT)
-#define SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE 0x0040
-#define SMIAPP_REG_U8_FRAME_FORMAT_MODEL_SUBTYPE 0x0041
-#define SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(n) ((0x0042 + ((n) << 1)) | CCS_FL_16BIT) /* 0 <= n <= 14 */
-#define SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(n) ((0x0060 + ((n) << 2)) | CCS_FL_32BIT) /* 0 <= n <= 7 */
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CAPABILITY (0x0080 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MIN (0x0084 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MAX (0x0086 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_STEP (0x0088 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_TYPE (0x008a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_M0 (0x008c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_C0 (0x008e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_M1 (0x0090 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_C1 (0x0092 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_DATA_FORMAT_MODEL_TYPE 0x00c0
-#define SMIAPP_REG_U8_DATA_FORMAT_MODEL_SUBTYPE 0x00c1
-#define SMIAPP_REG_U16_DATA_FORMAT_DESCRIPTOR(n) ((0x00c2 + ((n) << 1)) | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_MODE_SELECT 0x0100
-#define SMIAPP_REG_U8_IMAGE_ORIENTATION 0x0101
-#define SMIAPP_REG_U8_SOFTWARE_RESET 0x0103
-#define SMIAPP_REG_U8_GROUPED_PARAMETER_HOLD 0x0104
-#define SMIAPP_REG_U8_MASK_CORRUPTED_FRAMES 0x0105
-#define SMIAPP_REG_U8_FAST_STANDBY_CTRL 0x0106
-#define SMIAPP_REG_U8_CCI_ADDRESS_CONTROL 0x0107
-#define SMIAPP_REG_U8_2ND_CCI_IF_CONTROL 0x0108
-#define SMIAPP_REG_U8_2ND_CCI_ADDRESS_CONTROL 0x0109
-#define SMIAPP_REG_U8_CSI_CHANNEL_IDENTIFIER 0x0110
-#define SMIAPP_REG_U8_CSI_SIGNALLING_MODE 0x0111
-#define SMIAPP_REG_U16_CSI_DATA_FORMAT (0x0112 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_CSI_LANE_MODE 0x0114
-#define SMIAPP_REG_U8_CSI2_10_TO_8_DT 0x0115
-#define SMIAPP_REG_U8_CSI2_10_TO_7_DT 0x0116
-#define SMIAPP_REG_U8_CSI2_10_TO_6_DT 0x0117
-#define SMIAPP_REG_U8_CSI2_12_TO_8_DT 0x0118
-#define SMIAPP_REG_U8_CSI2_12_TO_7_DT 0x0119
-#define SMIAPP_REG_U8_CSI2_12_TO_6_DT 0x011a
-#define SMIAPP_REG_U8_CSI2_14_TO_10_DT 0x011b
-#define SMIAPP_REG_U8_CSI2_14_TO_8_DT 0x011c
-#define SMIAPP_REG_U8_CSI2_16_TO_10_DT 0x011d
-#define SMIAPP_REG_U8_CSI2_16_TO_8_DT 0x011e
-#define SMIAPP_REG_U8_GAIN_MODE 0x0120
-#define SMIAPP_REG_U16_VANA_VOLTAGE (0x0130 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_VDIG_VOLTAGE (0x0132 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_VIO_VOLTAGE (0x0134 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_EXTCLK_FREQUENCY_MHZ (0x0136 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_TEMP_SENSOR_CONTROL 0x0138
-#define SMIAPP_REG_U8_TEMP_SENSOR_MODE 0x0139
-#define SMIAPP_REG_U8_TEMP_SENSOR_OUTPUT 0x013a
-#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME (0x0200 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME (0x0202 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL (0x0204 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GREENR (0x0206 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_RED (0x0208 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_BLUE (0x020a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GREENB (0x020c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_GAIN_GREENR (0x020e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_GAIN_RED (0x0210 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_GAIN_BLUE (0x0212 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_GAIN_GREENB (0x0214 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_VT_PIX_CLK_DIV (0x0300 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_VT_SYS_CLK_DIV (0x0302 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_PRE_PLL_CLK_DIV (0x0304 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_PLL_MULTIPLIER (0x0306 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_OP_PIX_CLK_DIV (0x0308 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_OP_SYS_CLK_DIV (0x030a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FRAME_LENGTH_LINES (0x0340 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_LINE_LENGTH_PCK (0x0342 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_X_ADDR_START (0x0344 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_Y_ADDR_START (0x0346 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_X_ADDR_END (0x0348 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_Y_ADDR_END (0x034a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_X_OUTPUT_SIZE (0x034c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_Y_OUTPUT_SIZE (0x034e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_X_EVEN_INC (0x0380 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_X_ODD_INC (0x0382 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_Y_EVEN_INC (0x0384 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_Y_ODD_INC (0x0386 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SCALING_MODE (0x0400 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SPATIAL_SAMPLING (0x0402 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SCALE_M (0x0404 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SCALE_N (0x0406 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_CROP_X_OFFSET (0x0408 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_CROP_Y_OFFSET (0x040a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_WIDTH (0x040c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_HEIGHT (0x040e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_COMPRESSION_MODE (0x0500 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TEST_PATTERN_MODE (0x0600 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TEST_DATA_RED (0x0602 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TEST_DATA_GREENR (0x0604 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TEST_DATA_BLUE (0x0606 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TEST_DATA_GREENB (0x0608 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_HORIZONTAL_CURSOR_WIDTH (0x060a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_HORIZONTAL_CURSOR_POSITION (0x060c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_VERTICAL_CURSOR_WIDTH (0x060e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_VERTICAL_CURSOR_POSITION (0x0610 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FIFO_WATER_MARK_PIXELS (0x0700 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_TCLK_POST 0x0800
-#define SMIAPP_REG_U8_THS_PREPARE 0x0801
-#define SMIAPP_REG_U8_THS_ZERO_MIN 0x0802
-#define SMIAPP_REG_U8_THS_TRAIL 0x0803
-#define SMIAPP_REG_U8_TCLK_TRAIL_MIN 0x0804
-#define SMIAPP_REG_U8_TCLK_PREPARE 0x0805
-#define SMIAPP_REG_U8_TCLK_ZERO 0x0806
-#define SMIAPP_REG_U8_TLPX 0x0807
-#define SMIAPP_REG_U8_DPHY_CTRL 0x0808
-#define SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS (0x0820 | CCS_FL_32BIT)
-#define SMIAPP_REG_U8_BINNING_MODE 0x0900
-#define SMIAPP_REG_U8_BINNING_TYPE 0x0901
-#define SMIAPP_REG_U8_BINNING_WEIGHTING 0x0902
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL 0x0a00
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS 0x0a01
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT 0x0a02
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 0x0a04
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_1 0x0a05
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_2 0x0a06
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_3 0x0a07
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_4 0x0a08
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_5 0x0a09
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_12 0x0a10
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_13 0x0a11
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_14 0x0a12
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_15 0x0a13
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_16 0x0a14
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_17 0x0a15
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_18 0x0a16
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_19 0x0a17
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_20 0x0a18
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_21 0x0a19
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_22 0x0a1a
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_23 0x0a1b
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_24 0x0a1c
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_25 0x0a1d
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_26 0x0a1e
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_27 0x0a1f
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_28 0x0a20
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_29 0x0a21
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_30 0x0a22
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_31 0x0a23
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_32 0x0a24
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_33 0x0a25
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_34 0x0a26
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_35 0x0a27
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_36 0x0a28
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_37 0x0a29
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_38 0x0a2a
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_39 0x0a2b
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_40 0x0a2c
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_41 0x0a2d
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_42 0x0a2e
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_43 0x0a2f
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_44 0x0a30
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_45 0x0a31
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_46 0x0a32
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_47 0x0a33
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_48 0x0a34
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_49 0x0a35
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_50 0x0a36
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_51 0x0a37
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_52 0x0a38
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_53 0x0a39
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_54 0x0a3a
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_55 0x0a3b
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_56 0x0a3c
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_57 0x0a3d
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_58 0x0a3e
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_59 0x0a3f
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_60 0x0a40
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_61 0x0a41
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_62 0x0a42
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_63 0x0a43
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_CTRL 0x0a44
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_STATUS 0x0a45
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_PAGE_SELECT 0x0a46
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_0 0x0a48
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_1 0x0a49
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_2 0x0a4a
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_3 0x0a4b
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_4 0x0a4c
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_5 0x0a4d
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_6 0x0a4e
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_7 0x0a4f
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_8 0x0a50
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_9 0x0a51
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_10 0x0a52
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_11 0x0a53
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_12 0x0a54
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_13 0x0a55
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_14 0x0a56
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_15 0x0a57
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_16 0x0a58
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_17 0x0a59
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_18 0x0a5a
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_19 0x0a5b
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_20 0x0a5c
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_21 0x0a5d
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_22 0x0a5e
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_23 0x0a5f
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_24 0x0a60
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_25 0x0a61
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_26 0x0a62
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_27 0x0a63
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_28 0x0a64
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_29 0x0a65
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_30 0x0a66
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_31 0x0a67
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_32 0x0a68
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_33 0x0a69
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_34 0x0a6a
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_35 0x0a6b
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_36 0x0a6c
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_37 0x0a6d
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_38 0x0a6e
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_39 0x0a6f
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_40 0x0a70
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_41 0x0a71
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_42 0x0a72
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_43 0x0a73
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_44 0x0a74
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_45 0x0a75
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_46 0x0a76
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_47 0x0a77
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_48 0x0a78
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_49 0x0a79
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_50 0x0a7a
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_51 0x0a7b
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_52 0x0a7c
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_53 0x0a7d
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_54 0x0a7e
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_55 0x0a7f
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_56 0x0a80
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_57 0x0a81
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_58 0x0a82
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_59 0x0a83
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_60 0x0a84
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_61 0x0a85
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_62 0x0a86
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_63 0x0a87
-#define SMIAPP_REG_U8_SHADING_CORRECTION_ENABLE 0x0b00
-#define SMIAPP_REG_U8_LUMINANCE_CORRECTION_LEVEL 0x0b01
-#define SMIAPP_REG_U8_GREEN_IMBALANCE_FILTER_ENABLE 0x0b02
-#define SMIAPP_REG_U8_GREEN_IMBALANCE_FILTER_WEIGHT 0x0b03
-#define SMIAPP_REG_U8_BLACK_LEVEL_CORRECTION_ENABLE 0x0b04
-#define SMIAPP_REG_U8_MAPPED_COUPLET_CORRECT_ENABLE 0x0b05
-#define SMIAPP_REG_U8_SINGLE_DEFECT_CORRECT_ENABLE 0x0b06
-#define SMIAPP_REG_U8_SINGLE_DEFECT_CORRECT_WEIGHT 0x0b07
-#define SMIAPP_REG_U8_DYNAMIC_COUPLET_CORRECT_ENABLE 0x0b08
-#define SMIAPP_REG_U8_DYNAMIC_COUPLET_CORRECT_WEIGHT 0x0b09
-#define SMIAPP_REG_U8_COMBINED_DEFECT_CORRECT_ENABLE 0x0b0a
-#define SMIAPP_REG_U8_COMBINED_DEFECT_CORRECT_WEIGHT 0x0b0b
-#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_ENABLE 0x0b0c
-#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_WEIGHT 0x0b0d
-#define SMIAPP_REG_U8_MAPPED_LINE_DEFECT_CORRECT_ENABLE 0x0b0e
-#define SMIAPP_REG_U8_MAPPED_LINE_DEFECT_CORRECT_ADJUST 0x0b0f
-#define SMIAPP_REG_U8_MAPPED_COUPLET_CORRECT_ADJUST 0x0b10
-#define SMIAPP_REG_U8_MAPPED_TRIPLET_DEFECT_CORRECT_ENABLE 0x0b11
-#define SMIAPP_REG_U8_MAPPED_TRIPLET_DEFECT_CORRECT_ADJUST 0x0b12
-#define SMIAPP_REG_U8_DYNAMIC_TRIPLET_DEFECT_CORRECT_ENABLE 0x0b13
-#define SMIAPP_REG_U8_DYNAMIC_TRIPLET_DEFECT_CORRECT_ADJUST 0x0b14
-#define SMIAPP_REG_U8_DYNAMIC_LINE_DEFECT_CORRECT_ENABLE 0x0b15
-#define SMIAPP_REG_U8_DYNAMIC_LINE_DEFECT_CORRECT_ADJUST 0x0b16
-#define SMIAPP_REG_U8_EDOF_MODE 0x0b80
-#define SMIAPP_REG_U8_SHARPNESS 0x0b83
-#define SMIAPP_REG_U8_DENOISING 0x0b84
-#define SMIAPP_REG_U8_MODULE_SPECIFIC 0x0b85
-#define SMIAPP_REG_U16_DEPTH_OF_FIELD (0x0b86 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FOCUS_DISTANCE (0x0b88 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_ESTIMATION_MODE_CTRL 0x0b8a
-#define SMIAPP_REG_U16_COLOUR_TEMPERATURE (0x0b8c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ABSOLUTE_GAIN_GREENR (0x0b8e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ABSOLUTE_GAIN_RED (0x0b90 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ABSOLUTE_GAIN_BLUE (0x0b92 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_ABSOLUTE_GAIN_GREENB (0x0b94 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_ESTIMATION_ZONE_MODE 0x0bc0
-#define SMIAPP_REG_U16_FIXED_ZONE_WEIGHTING (0x0bc2 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_CUSTOM_ZONE_X_START (0x0bc4 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_CUSTOM_ZONE_Y_START (0x0bc6 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_CUSTOM_ZONE_WIDTH (0x0bc8 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_CUSTOM_ZONE_HEIGHT (0x0bca | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_GLOBAL_RESET_CTRL1 0x0c00
-#define SMIAPP_REG_U8_GLOBAL_RESET_CTRL2 0x0c01
-#define SMIAPP_REG_U8_GLOBAL_RESET_MODE_CONFIG_1 0x0c02
-#define SMIAPP_REG_U8_GLOBAL_RESET_MODE_CONFIG_2 0x0c03
-#define SMIAPP_REG_U16_TRDY_CTRL (0x0c04 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TRDOUT_CTRL (0x0c06 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TSHUTTER_STROBE_DELAY_CTRL (0x0c08 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TSHUTTER_STROBE_WIDTH_CTRL (0x0c0a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TFLASH_STROBE_DELAY_CTRL (0x0c0c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_CTRL (0x0c0e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TGRST_INTERVAL_CTRL (0x0c10 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_FLASH_STROBE_ADJUSTMENT 0x0c12
-#define SMIAPP_REG_U16_FLASH_STROBE_START_POINT (0x0c14 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TFLASH_STROBE_DELAY_RS_CTRL (0x0c16 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_RS_CTRL (0x0c18 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_FLASH_MODE_RS 0x0c1a
-#define SMIAPP_REG_U8_FLASH_TRIGGER_RS 0x0c1b
-#define SMIAPP_REG_U8_FLASH_STATUS 0x0c1c
-#define SMIAPP_REG_U8_SA_STROBE_MODE 0x0c1d
-#define SMIAPP_REG_U16_SA_STROBE_START_POINT (0x0c1e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TSA_STROBE_DELAY_CTRL (0x0c20 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TSA_STROBE_WIDTH_CTRL (0x0c22 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_SA_STROBE_TRIGGER 0x0c24
-#define SMIAPP_REG_U8_SPECIAL_ACTUATOR_STATUS 0x0c25
-#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH2_HIGH_RS_CTRL (0x0c26 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_LOW_RS_CTRL (0x0c28 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_TFLASH_STROBE_COUNT_RS_CTRL 0x0c2a
-#define SMIAPP_REG_U8_TFLASH_STROBE_COUNT_CTRL 0x0c2b
-#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH2_HIGH_CTRL (0x0c2c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_LOW_CTRL (0x0c2e | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_LOW_LEVEL_CTRL 0x0c80
-#define SMIAPP_REG_U16_MAIN_TRIGGER_REF_POINT (0x0c82 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAIN_TRIGGER_T3 (0x0c84 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_MAIN_TRIGGER_COUNT 0x0c86
-#define SMIAPP_REG_U16_PHASE1_TRIGGER_T3 (0x0c88 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_PHASE1_TRIGGER_COUNT 0x0c8a
-#define SMIAPP_REG_U16_PHASE2_TRIGGER_T3 (0x0c8c | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_PHASE2_TRIGGER_COUNT 0x0c8e
-#define SMIAPP_REG_U8_MECH_SHUTTER_CTRL 0x0d00
-#define SMIAPP_REG_U8_OPERATION_MODE 0x0d01
-#define SMIAPP_REG_U8_ACT_STATE1 0x0d02
-#define SMIAPP_REG_U8_ACT_STATE2 0x0d03
-#define SMIAPP_REG_U16_FOCUS_CHANGE (0x0d80 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FOCUS_CHANGE_CONTROL (0x0d82 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FOCUS_CHANGE_NUMBER_PHASE1 (0x0d84 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FOCUS_CHANGE_NUMBER_PHASE2 (0x0d86 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_STROBE_COUNT_PHASE1 0x0d88
-#define SMIAPP_REG_U8_STROBE_COUNT_PHASE2 0x0d89
-#define SMIAPP_REG_U8_POSITION 0x0d8a
-#define SMIAPP_REG_U8_BRACKETING_LUT_CONTROL 0x0e00
-#define SMIAPP_REG_U8_BRACKETING_LUT_MODE 0x0e01
-#define SMIAPP_REG_U8_BRACKETING_LUT_ENTRY_CONTROL 0x0e02
-#define SMIAPP_REG_U8_LUT_PARAMETERS_START 0x0e10
-#define SMIAPP_REG_U8_LUT_PARAMETERS_END 0x0eff
-#define SMIAPP_REG_U16_INTEGRATION_TIME_CAPABILITY (0x1000 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MIN (0x1004 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MAX_MARGIN (0x1006 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN (0x1008 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN (0x100a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_GAIN_CAPABILITY (0x1080 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_GAIN_MIN (0x1084 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_GAIN_MAX (0x1086 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DIGITAL_GAIN_STEP_SIZE (0x1088 | CCS_FL_16BIT)
-#define SMIAPP_REG_F32_MIN_EXT_CLK_FREQ_HZ (0x1100 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_F32_MAX_EXT_CLK_FREQ_HZ (0x1104 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_U16_MIN_PRE_PLL_CLK_DIV (0x1108 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_PRE_PLL_CLK_DIV (0x110a | CCS_FL_16BIT)
-#define SMIAPP_REG_F32_MIN_PLL_IP_FREQ_HZ (0x110c | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_F32_MAX_PLL_IP_FREQ_HZ (0x1110 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_U16_MIN_PLL_MULTIPLIER (0x1114 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_PLL_MULTIPLIER (0x1116 | CCS_FL_16BIT)
-#define SMIAPP_REG_F32_MIN_PLL_OP_FREQ_HZ (0x1118 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_F32_MAX_PLL_OP_FREQ_HZ (0x111c | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_U16_MIN_VT_SYS_CLK_DIV (0x1120 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_VT_SYS_CLK_DIV (0x1122 | CCS_FL_16BIT)
-#define SMIAPP_REG_F32_MIN_VT_SYS_CLK_FREQ_HZ (0x1124 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_F32_MAX_VT_SYS_CLK_FREQ_HZ (0x1128 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_F32_MIN_VT_PIX_CLK_FREQ_HZ (0x112c | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_F32_MAX_VT_PIX_CLK_FREQ_HZ (0x1130 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_U16_MIN_VT_PIX_CLK_DIV (0x1134 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_VT_PIX_CLK_DIV (0x1136 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES (0x1140 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES (0x1142 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK (0x1144 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK (0x1146 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK (0x1148 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_FRAME_BLANKING_LINES (0x114a | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_MIN_LINE_LENGTH_PCK_STEP_SIZE 0x114c
-#define SMIAPP_REG_U16_MIN_OP_SYS_CLK_DIV (0x1160 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_OP_SYS_CLK_DIV (0x1162 | CCS_FL_16BIT)
-#define SMIAPP_REG_F32_MIN_OP_SYS_CLK_FREQ_HZ (0x1164 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_F32_MAX_OP_SYS_CLK_FREQ_HZ (0x1168 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_U16_MIN_OP_PIX_CLK_DIV (0x116c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_OP_PIX_CLK_DIV (0x116e | CCS_FL_16BIT)
-#define SMIAPP_REG_F32_MIN_OP_PIX_CLK_FREQ_HZ (0x1170 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_F32_MAX_OP_PIX_CLK_FREQ_HZ (0x1174 | CCS_FL_FLOAT_IREAL | CCS_FL_32BIT)
-#define SMIAPP_REG_U16_X_ADDR_MIN (0x1180 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_Y_ADDR_MIN (0x1182 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_X_ADDR_MAX (0x1184 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_Y_ADDR_MAX (0x1186 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_X_OUTPUT_SIZE (0x1188 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_Y_OUTPUT_SIZE (0x118a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_X_OUTPUT_SIZE (0x118c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_Y_OUTPUT_SIZE (0x118e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_EVEN_INC (0x11c0 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_EVEN_INC (0x11c2 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_ODD_INC (0x11c4 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_ODD_INC (0x11c6 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SCALING_CAPABILITY (0x1200 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SCALER_M_MIN (0x1204 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SCALER_M_MAX (0x1206 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SCALER_N_MIN (0x1208 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SCALER_N_MAX (0x120a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_SPATIAL_SAMPLING_CAPABILITY (0x120c | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_DIGITAL_CROP_CAPABILITY 0x120e
-#define SMIAPP_REG_U16_COMPRESSION_CAPABILITY (0x1300 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINRED (0x1400 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINRED (0x1402 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINRED (0x1404 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINGREEN (0x1406 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINGREEN (0x1408 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINGREEN (0x140a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINBLUE (0x140c | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINBLUE (0x140e | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINBLUE (0x1410 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FIFO_SIZE_PIXELS (0x1500 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_FIFO_SUPPORT_CAPABILITY 0x1502
-#define SMIAPP_REG_U8_DPHY_CTRL_CAPABILITY 0x1600
-#define SMIAPP_REG_U8_CSI_LANE_MODE_CAPABILITY 0x1601
-#define SMIAPP_REG_U8_CSI_SIGNALLING_MODE_CAPABILITY 0x1602
-#define SMIAPP_REG_U8_FAST_STANDBY_CAPABILITY 0x1603
-#define SMIAPP_REG_U8_CCI_ADDRESS_CONTROL_CAPABILITY 0x1604
-#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_1_LANE_MODE_MBPS (0x1608 | CCS_FL_32BIT)
-#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_2_LANE_MODE_MBPS (0x160c | CCS_FL_32BIT)
-#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_3_LANE_MODE_MBPS (0x1610 | CCS_FL_32BIT)
-#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_4_LANE_MODE_MBPS (0x1614 | CCS_FL_32BIT)
-#define SMIAPP_REG_U8_TEMP_SENSOR_CAPABILITY 0x1618
-#define SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES_BIN (0x1700 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES_BIN (0x1702 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK_BIN (0x1704 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK_BIN (0x1706 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK_BIN (0x1708 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN_BIN (0x170a | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN (0x170c | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_BINNING_CAPABILITY 0x1710
-#define SMIAPP_REG_U8_BINNING_WEIGHTING_CAPABILITY 0x1711
-#define SMIAPP_REG_U8_BINNING_SUBTYPES 0x1712
-#define SMIAPP_REG_U8_BINNING_TYPE_n(n) (0x1713 + (n)) /* 1 <= n <= 237 */
-#define SMIAPP_REG_U8_DATA_TRANSFER_IF_CAPABILITY 0x1800
-#define SMIAPP_REG_U8_SHADING_CORRECTION_CAPABILITY 0x1900
-#define SMIAPP_REG_U8_GREEN_IMBALANCE_CAPABILITY 0x1901
-#define SMIAPP_REG_U8_BLACK_LEVEL_CAPABILITY 0x1902
-#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_CAPABILITY 0x1903
-#define SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY (0x1904 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY_2 (0x1906 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_EDOF_CAPABILITY 0x1980
-#define SMIAPP_REG_U8_ESTIMATION_FRAMES 0x1981
-#define SMIAPP_REG_U8_SUPPORTS_SHARPNESS_ADJ 0x1982
-#define SMIAPP_REG_U8_SUPPORTS_DENOISING_ADJ 0x1983
-#define SMIAPP_REG_U8_SUPPORTS_MODULE_SPECIFIC_ADJ 0x1984
-#define SMIAPP_REG_U8_SUPPORTS_DEPTH_OF_FIELD_ADJ 0x1985
-#define SMIAPP_REG_U8_SUPPORTS_FOCUS_DISTANCE_ADJ 0x1986
-#define SMIAPP_REG_U8_COLOUR_FEEDBACK_CAPABILITY 0x1987
-#define SMIAPP_REG_U8_EDOF_SUPPORT_AB_NXM 0x1988
-#define SMIAPP_REG_U8_ESTIMATION_MODE_CAPABILITY 0x19c0
-#define SMIAPP_REG_U8_ESTIMATION_ZONE_CAPABILITY 0x19c1
-#define SMIAPP_REG_U16_EST_DEPTH_OF_FIELD (0x19c2 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_EST_FOCUS_DISTANCE (0x19c4 | CCS_FL_16BIT)
-#define SMIAPP_REG_U16_CAPABILITY_TRDY_MIN (0x1a00 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_FLASH_MODE_CAPABILITY 0x1a02
-#define SMIAPP_REG_U16_MECH_SHUT_AND_ACT_START_ADDR (0x1b02 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_ACTUATOR_CAPABILITY 0x1b04
-#define SMIAPP_REG_U16_ACTUATOR_TYPE (0x1b40 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_AF_DEVICE_ADDRESS 0x1b42
-#define SMIAPP_REG_U16_FOCUS_CHANGE_ADDRESS (0x1b44 | CCS_FL_16BIT)
-#define SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_1 0x1c00
-#define SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_2 0x1c01
-#define SMIAPP_REG_U8_BRACKETING_LUT_SIZE 0x1c02
+#define SMIAPP_REG_U16_MODEL_ID CCI_REG16(0x0000)
+#define SMIAPP_REG_U8_REVISION_NUMBER_MAJOR CCI_REG8(0x0002)
+#define SMIAPP_REG_U8_MANUFACTURER_ID CCI_REG8(0x0003)
+#define SMIAPP_REG_U8_SMIA_VERSION CCI_REG8(0x0004)
+#define SMIAPP_REG_U8_FRAME_COUNT CCI_REG8(0x0005)
+#define SMIAPP_REG_U8_PIXEL_ORDER CCI_REG8(0x0006)
+#define SMIAPP_REG_U16_DATA_PEDESTAL CCI_REG16(0x0008)
+#define SMIAPP_REG_U8_PIXEL_DEPTH CCI_REG8(0x000c)
+#define SMIAPP_REG_U8_REVISION_NUMBER_MINOR CCI_REG8(0x0010)
+#define SMIAPP_REG_U8_SMIAPP_VERSION CCI_REG8(0x0011)
+#define SMIAPP_REG_U8_MODULE_DATE_YEAR CCI_REG8(0x0012)
+#define SMIAPP_REG_U8_MODULE_DATE_MONTH CCI_REG8(0x0013)
+#define SMIAPP_REG_U8_MODULE_DATE_DAY CCI_REG8(0x0014)
+#define SMIAPP_REG_U8_MODULE_DATE_PHASE CCI_REG8(0x0015)
+#define SMIAPP_REG_U16_SENSOR_MODEL_ID CCI_REG16(0x0016)
+#define SMIAPP_REG_U8_SENSOR_REVISION_NUMBER CCI_REG8(0x0018)
+#define SMIAPP_REG_U8_SENSOR_MANUFACTURER_ID CCI_REG8(0x0019)
+#define SMIAPP_REG_U8_SENSOR_FIRMWARE_VERSION CCI_REG8(0x001a)
+#define SMIAPP_REG_U32_SERIAL_NUMBER CCI_REG32(0x001c)
+#define SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE CCI_REG8(0x0040)
+#define SMIAPP_REG_U8_FRAME_FORMAT_MODEL_SUBTYPE CCI_REG8(0x0041)
+#define SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(n) CCI_REG16(0x0042 + ((n) << 1)) /* 0 <= n <= 14 */
+#define SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(n) CCI_REG32(0x0060 + ((n) << 2)) /* 0 <= n <= 7 */
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CAPABILITY CCI_REG16(0x0080)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MIN CCI_REG16(0x0084)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MAX CCI_REG16(0x0086)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_STEP CCI_REG16(0x0088)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_TYPE CCI_REG16(0x008a)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_M0 CCI_REG16(0x008c)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_C0 CCI_REG16(0x008e)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_M1 CCI_REG16(0x0090)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_C1 CCI_REG16(0x0092)
+#define SMIAPP_REG_U8_DATA_FORMAT_MODEL_TYPE CCI_REG8(0x00c0)
+#define SMIAPP_REG_U8_DATA_FORMAT_MODEL_SUBTYPE CCI_REG8(0x00c1)
+#define SMIAPP_REG_U16_DATA_FORMAT_DESCRIPTOR(n) CCI_REG16(0x00c2 + ((n) << 1))
+#define SMIAPP_REG_U8_MODE_SELECT CCI_REG8(0x0100)
+#define SMIAPP_REG_U8_IMAGE_ORIENTATION CCI_REG8(0x0101)
+#define SMIAPP_REG_U8_SOFTWARE_RESET CCI_REG8(0x0103)
+#define SMIAPP_REG_U8_GROUPED_PARAMETER_HOLD CCI_REG8(0x0104)
+#define SMIAPP_REG_U8_MASK_CORRUPTED_FRAMES CCI_REG8(0x0105)
+#define SMIAPP_REG_U8_FAST_STANDBY_CTRL CCI_REG8(0x0106)
+#define SMIAPP_REG_U8_CCI_ADDRESS_CONTROL CCI_REG8(0x0107)
+#define SMIAPP_REG_U8_2ND_CCI_IF_CONTROL CCI_REG8(0x0108)
+#define SMIAPP_REG_U8_2ND_CCI_ADDRESS_CONTROL CCI_REG8(0x0109)
+#define SMIAPP_REG_U8_CSI_CHANNEL_IDENTIFIER CCI_REG8(0x0110)
+#define SMIAPP_REG_U8_CSI_SIGNALLING_MODE CCI_REG8(0x0111)
+#define SMIAPP_REG_U16_CSI_DATA_FORMAT CCI_REG16(0x0112)
+#define SMIAPP_REG_U8_CSI_LANE_MODE CCI_REG8(0x0114)
+#define SMIAPP_REG_U8_CSI2_10_TO_8_DT CCI_REG8(0x0115)
+#define SMIAPP_REG_U8_CSI2_10_TO_7_DT CCI_REG8(0x0116)
+#define SMIAPP_REG_U8_CSI2_10_TO_6_DT CCI_REG8(0x0117)
+#define SMIAPP_REG_U8_CSI2_12_TO_8_DT CCI_REG8(0x0118)
+#define SMIAPP_REG_U8_CSI2_12_TO_7_DT CCI_REG8(0x0119)
+#define SMIAPP_REG_U8_CSI2_12_TO_6_DT CCI_REG8(0x011a)
+#define SMIAPP_REG_U8_CSI2_14_TO_10_DT CCI_REG8(0x011b)
+#define SMIAPP_REG_U8_CSI2_14_TO_8_DT CCI_REG8(0x011c)
+#define SMIAPP_REG_U8_CSI2_16_TO_10_DT CCI_REG8(0x011d)
+#define SMIAPP_REG_U8_CSI2_16_TO_8_DT CCI_REG8(0x011e)
+#define SMIAPP_REG_U8_GAIN_MODE CCI_REG8(0x0120)
+#define SMIAPP_REG_U16_VANA_VOLTAGE CCI_REG16(0x0130)
+#define SMIAPP_REG_U16_VDIG_VOLTAGE CCI_REG16(0x0132)
+#define SMIAPP_REG_U16_VIO_VOLTAGE CCI_REG16(0x0134)
+#define SMIAPP_REG_U16_EXTCLK_FREQUENCY_MHZ CCI_REG16(0x0136)
+#define SMIAPP_REG_U8_TEMP_SENSOR_CONTROL CCI_REG8(0x0138)
+#define SMIAPP_REG_U8_TEMP_SENSOR_MODE CCI_REG8(0x0139)
+#define SMIAPP_REG_U8_TEMP_SENSOR_OUTPUT CCI_REG8(0x013a)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME CCI_REG16(0x0200)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME CCI_REG16(0x0202)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL CCI_REG16(0x0204)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GREENR CCI_REG16(0x0206)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_RED CCI_REG16(0x0208)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_BLUE CCI_REG16(0x020a)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GREENB CCI_REG16(0x020c)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_GREENR CCI_REG16(0x020e)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_RED CCI_REG16(0x0210)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_BLUE CCI_REG16(0x0212)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_GREENB CCI_REG16(0x0214)
+#define SMIAPP_REG_U16_VT_PIX_CLK_DIV CCI_REG16(0x0300)
+#define SMIAPP_REG_U16_VT_SYS_CLK_DIV CCI_REG16(0x0302)
+#define SMIAPP_REG_U16_PRE_PLL_CLK_DIV CCI_REG16(0x0304)
+#define SMIAPP_REG_U16_PLL_MULTIPLIER CCI_REG16(0x0306)
+#define SMIAPP_REG_U16_OP_PIX_CLK_DIV CCI_REG16(0x0308)
+#define SMIAPP_REG_U16_OP_SYS_CLK_DIV CCI_REG16(0x030a)
+#define SMIAPP_REG_U16_FRAME_LENGTH_LINES CCI_REG16(0x0340)
+#define SMIAPP_REG_U16_LINE_LENGTH_PCK CCI_REG16(0x0342)
+#define SMIAPP_REG_U16_X_ADDR_START CCI_REG16(0x0344)
+#define SMIAPP_REG_U16_Y_ADDR_START CCI_REG16(0x0346)
+#define SMIAPP_REG_U16_X_ADDR_END CCI_REG16(0x0348)
+#define SMIAPP_REG_U16_Y_ADDR_END CCI_REG16(0x034a)
+#define SMIAPP_REG_U16_X_OUTPUT_SIZE CCI_REG16(0x034c)
+#define SMIAPP_REG_U16_Y_OUTPUT_SIZE CCI_REG16(0x034e)
+#define SMIAPP_REG_U16_X_EVEN_INC CCI_REG16(0x0380)
+#define SMIAPP_REG_U16_X_ODD_INC CCI_REG16(0x0382)
+#define SMIAPP_REG_U16_Y_EVEN_INC CCI_REG16(0x0384)
+#define SMIAPP_REG_U16_Y_ODD_INC CCI_REG16(0x0386)
+#define SMIAPP_REG_U16_SCALING_MODE CCI_REG16(0x0400)
+#define SMIAPP_REG_U16_SPATIAL_SAMPLING CCI_REG16(0x0402)
+#define SMIAPP_REG_U16_SCALE_M CCI_REG16(0x0404)
+#define SMIAPP_REG_U16_SCALE_N CCI_REG16(0x0406)
+#define SMIAPP_REG_U16_DIGITAL_CROP_X_OFFSET CCI_REG16(0x0408)
+#define SMIAPP_REG_U16_DIGITAL_CROP_Y_OFFSET CCI_REG16(0x040a)
+#define SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_WIDTH CCI_REG16(0x040c)
+#define SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_HEIGHT CCI_REG16(0x040e)
+#define SMIAPP_REG_U16_COMPRESSION_MODE CCI_REG16(0x0500)
+#define SMIAPP_REG_U16_TEST_PATTERN_MODE CCI_REG16(0x0600)
+#define SMIAPP_REG_U16_TEST_DATA_RED CCI_REG16(0x0602)
+#define SMIAPP_REG_U16_TEST_DATA_GREENR CCI_REG16(0x0604)
+#define SMIAPP_REG_U16_TEST_DATA_BLUE CCI_REG16(0x0606)
+#define SMIAPP_REG_U16_TEST_DATA_GREENB CCI_REG16(0x0608)
+#define SMIAPP_REG_U16_HORIZONTAL_CURSOR_WIDTH CCI_REG16(0x060a)
+#define SMIAPP_REG_U16_HORIZONTAL_CURSOR_POSITION CCI_REG16(0x060c)
+#define SMIAPP_REG_U16_VERTICAL_CURSOR_WIDTH CCI_REG16(0x060e)
+#define SMIAPP_REG_U16_VERTICAL_CURSOR_POSITION CCI_REG16(0x0610)
+#define SMIAPP_REG_U16_FIFO_WATER_MARK_PIXELS CCI_REG16(0x0700)
+#define SMIAPP_REG_U8_TCLK_POST CCI_REG8(0x0800)
+#define SMIAPP_REG_U8_THS_PREPARE CCI_REG8(0x0801)
+#define SMIAPP_REG_U8_THS_ZERO_MIN CCI_REG8(0x0802)
+#define SMIAPP_REG_U8_THS_TRAIL CCI_REG8(0x0803)
+#define SMIAPP_REG_U8_TCLK_TRAIL_MIN CCI_REG8(0x0804)
+#define SMIAPP_REG_U8_TCLK_PREPARE CCI_REG8(0x0805)
+#define SMIAPP_REG_U8_TCLK_ZERO CCI_REG8(0x0806)
+#define SMIAPP_REG_U8_TLPX CCI_REG8(0x0807)
+#define SMIAPP_REG_U8_DPHY_CTRL CCI_REG8(0x0808)
+#define SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS CCI_REG32(0x0820)
+#define SMIAPP_REG_U8_BINNING_MODE CCI_REG8(0x0900)
+#define SMIAPP_REG_U8_BINNING_TYPE CCI_REG8(0x0901)
+#define SMIAPP_REG_U8_BINNING_WEIGHTING CCI_REG8(0x0902)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL CCI_REG8(0x0a00)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS CCI_REG8(0x0a01)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT CCI_REG8(0x0a02)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 CCI_REG8(0x0a04)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_1 CCI_REG8(0x0a05)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_2 CCI_REG8(0x0a06)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_3 CCI_REG8(0x0a07)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_4 CCI_REG8(0x0a08)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_5 CCI_REG8(0x0a09)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_12 CCI_REG8(0x0a10)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_13 CCI_REG8(0x0a11)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_14 CCI_REG8(0x0a12)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_15 CCI_REG8(0x0a13)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_16 CCI_REG8(0x0a14)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_17 CCI_REG8(0x0a15)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_18 CCI_REG8(0x0a16)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_19 CCI_REG8(0x0a17)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_20 CCI_REG8(0x0a18)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_21 CCI_REG8(0x0a19)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_22 CCI_REG8(0x0a1a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_23 CCI_REG8(0x0a1b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_24 CCI_REG8(0x0a1c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_25 CCI_REG8(0x0a1d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_26 CCI_REG8(0x0a1e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_27 CCI_REG8(0x0a1f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_28 CCI_REG8(0x0a20)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_29 CCI_REG8(0x0a21)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_30 CCI_REG8(0x0a22)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_31 CCI_REG8(0x0a23)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_32 CCI_REG8(0x0a24)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_33 CCI_REG8(0x0a25)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_34 CCI_REG8(0x0a26)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_35 CCI_REG8(0x0a27)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_36 CCI_REG8(0x0a28)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_37 CCI_REG8(0x0a29)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_38 CCI_REG8(0x0a2a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_39 CCI_REG8(0x0a2b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_40 CCI_REG8(0x0a2c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_41 CCI_REG8(0x0a2d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_42 CCI_REG8(0x0a2e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_43 CCI_REG8(0x0a2f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_44 CCI_REG8(0x0a30)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_45 CCI_REG8(0x0a31)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_46 CCI_REG8(0x0a32)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_47 CCI_REG8(0x0a33)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_48 CCI_REG8(0x0a34)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_49 CCI_REG8(0x0a35)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_50 CCI_REG8(0x0a36)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_51 CCI_REG8(0x0a37)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_52 CCI_REG8(0x0a38)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_53 CCI_REG8(0x0a39)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_54 CCI_REG8(0x0a3a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_55 CCI_REG8(0x0a3b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_56 CCI_REG8(0x0a3c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_57 CCI_REG8(0x0a3d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_58 CCI_REG8(0x0a3e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_59 CCI_REG8(0x0a3f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_60 CCI_REG8(0x0a40)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_61 CCI_REG8(0x0a41)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_62 CCI_REG8(0x0a42)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_63 CCI_REG8(0x0a43)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_CTRL CCI_REG8(0x0a44)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_STATUS CCI_REG8(0x0a45)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_PAGE_SELECT CCI_REG8(0x0a46)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_0 CCI_REG8(0x0a48)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_1 CCI_REG8(0x0a49)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_2 CCI_REG8(0x0a4a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_3 CCI_REG8(0x0a4b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_4 CCI_REG8(0x0a4c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_5 CCI_REG8(0x0a4d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_6 CCI_REG8(0x0a4e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_7 CCI_REG8(0x0a4f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_8 CCI_REG8(0x0a50)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_9 CCI_REG8(0x0a51)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_10 CCI_REG8(0x0a52)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_11 CCI_REG8(0x0a53)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_12 CCI_REG8(0x0a54)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_13 CCI_REG8(0x0a55)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_14 CCI_REG8(0x0a56)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_15 CCI_REG8(0x0a57)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_16 CCI_REG8(0x0a58)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_17 CCI_REG8(0x0a59)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_18 CCI_REG8(0x0a5a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_19 CCI_REG8(0x0a5b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_20 CCI_REG8(0x0a5c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_21 CCI_REG8(0x0a5d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_22 CCI_REG8(0x0a5e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_23 CCI_REG8(0x0a5f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_24 CCI_REG8(0x0a60)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_25 CCI_REG8(0x0a61)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_26 CCI_REG8(0x0a62)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_27 CCI_REG8(0x0a63)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_28 CCI_REG8(0x0a64)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_29 CCI_REG8(0x0a65)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_30 CCI_REG8(0x0a66)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_31 CCI_REG8(0x0a67)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_32 CCI_REG8(0x0a68)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_33 CCI_REG8(0x0a69)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_34 CCI_REG8(0x0a6a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_35 CCI_REG8(0x0a6b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_36 CCI_REG8(0x0a6c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_37 CCI_REG8(0x0a6d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_38 CCI_REG8(0x0a6e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_39 CCI_REG8(0x0a6f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_40 CCI_REG8(0x0a70)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_41 CCI_REG8(0x0a71)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_42 CCI_REG8(0x0a72)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_43 CCI_REG8(0x0a73)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_44 CCI_REG8(0x0a74)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_45 CCI_REG8(0x0a75)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_46 CCI_REG8(0x0a76)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_47 CCI_REG8(0x0a77)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_48 CCI_REG8(0x0a78)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_49 CCI_REG8(0x0a79)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_50 CCI_REG8(0x0a7a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_51 CCI_REG8(0x0a7b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_52 CCI_REG8(0x0a7c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_53 CCI_REG8(0x0a7d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_54 CCI_REG8(0x0a7e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_55 CCI_REG8(0x0a7f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_56 CCI_REG8(0x0a80)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_57 CCI_REG8(0x0a81)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_58 CCI_REG8(0x0a82)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_59 CCI_REG8(0x0a83)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_60 CCI_REG8(0x0a84)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_61 CCI_REG8(0x0a85)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_62 CCI_REG8(0x0a86)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_63 CCI_REG8(0x0a87)
+#define SMIAPP_REG_U8_SHADING_CORRECTION_ENABLE CCI_REG8(0x0b00)
+#define SMIAPP_REG_U8_LUMINANCE_CORRECTION_LEVEL CCI_REG8(0x0b01)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_FILTER_ENABLE CCI_REG8(0x0b02)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_FILTER_WEIGHT CCI_REG8(0x0b03)
+#define SMIAPP_REG_U8_BLACK_LEVEL_CORRECTION_ENABLE CCI_REG8(0x0b04)
+#define SMIAPP_REG_U8_MAPPED_COUPLET_CORRECT_ENABLE CCI_REG8(0x0b05)
+#define SMIAPP_REG_U8_SINGLE_DEFECT_CORRECT_ENABLE CCI_REG8(0x0b06)
+#define SMIAPP_REG_U8_SINGLE_DEFECT_CORRECT_WEIGHT CCI_REG8(0x0b07)
+#define SMIAPP_REG_U8_DYNAMIC_COUPLET_CORRECT_ENABLE CCI_REG8(0x0b08)
+#define SMIAPP_REG_U8_DYNAMIC_COUPLET_CORRECT_WEIGHT CCI_REG8(0x0b09)
+#define SMIAPP_REG_U8_COMBINED_DEFECT_CORRECT_ENABLE CCI_REG8(0x0b0a)
+#define SMIAPP_REG_U8_COMBINED_DEFECT_CORRECT_WEIGHT CCI_REG8(0x0b0b)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_ENABLE CCI_REG8(0x0b0c)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_WEIGHT CCI_REG8(0x0b0d)
+#define SMIAPP_REG_U8_MAPPED_LINE_DEFECT_CORRECT_ENABLE CCI_REG8(0x0b0e)
+#define SMIAPP_REG_U8_MAPPED_LINE_DEFECT_CORRECT_ADJUST CCI_REG8(0x0b0f)
+#define SMIAPP_REG_U8_MAPPED_COUPLET_CORRECT_ADJUST CCI_REG8(0x0b10)
+#define SMIAPP_REG_U8_MAPPED_TRIPLET_DEFECT_CORRECT_ENABLE CCI_REG8(0x0b11)
+#define SMIAPP_REG_U8_MAPPED_TRIPLET_DEFECT_CORRECT_ADJUST CCI_REG8(0x0b12)
+#define SMIAPP_REG_U8_DYNAMIC_TRIPLET_DEFECT_CORRECT_ENABLE CCI_REG8(0x0b13)
+#define SMIAPP_REG_U8_DYNAMIC_TRIPLET_DEFECT_CORRECT_ADJUST CCI_REG8(0x0b14)
+#define SMIAPP_REG_U8_DYNAMIC_LINE_DEFECT_CORRECT_ENABLE CCI_REG8(0x0b15)
+#define SMIAPP_REG_U8_DYNAMIC_LINE_DEFECT_CORRECT_ADJUST CCI_REG8(0x0b16)
+#define SMIAPP_REG_U8_EDOF_MODE CCI_REG8(0x0b80)
+#define SMIAPP_REG_U8_SHARPNESS CCI_REG8(0x0b83)
+#define SMIAPP_REG_U8_DENOISING CCI_REG8(0x0b84)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC CCI_REG8(0x0b85)
+#define SMIAPP_REG_U16_DEPTH_OF_FIELD CCI_REG16(0x0b86)
+#define SMIAPP_REG_U16_FOCUS_DISTANCE CCI_REG16(0x0b88)
+#define SMIAPP_REG_U8_ESTIMATION_MODE_CTRL CCI_REG8(0x0b8a)
+#define SMIAPP_REG_U16_COLOUR_TEMPERATURE CCI_REG16(0x0b8c)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_GREENR CCI_REG16(0x0b8e)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_RED CCI_REG16(0x0b90)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_BLUE CCI_REG16(0x0b92)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_GREENB CCI_REG16(0x0b94)
+#define SMIAPP_REG_U8_ESTIMATION_ZONE_MODE CCI_REG8(0x0bc0)
+#define SMIAPP_REG_U16_FIXED_ZONE_WEIGHTING CCI_REG16(0x0bc2)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_X_START CCI_REG16(0x0bc4)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_Y_START CCI_REG16(0x0bc6)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_WIDTH CCI_REG16(0x0bc8)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_HEIGHT CCI_REG16(0x0bca)
+#define SMIAPP_REG_U8_GLOBAL_RESET_CTRL1 CCI_REG8(0x0c00)
+#define SMIAPP_REG_U8_GLOBAL_RESET_CTRL2 CCI_REG8(0x0c01)
+#define SMIAPP_REG_U8_GLOBAL_RESET_MODE_CONFIG_1 CCI_REG8(0x0c02)
+#define SMIAPP_REG_U8_GLOBAL_RESET_MODE_CONFIG_2 CCI_REG8(0x0c03)
+#define SMIAPP_REG_U16_TRDY_CTRL CCI_REG16(0x0c04)
+#define SMIAPP_REG_U16_TRDOUT_CTRL CCI_REG16(0x0c06)
+#define SMIAPP_REG_U16_TSHUTTER_STROBE_DELAY_CTRL CCI_REG16(0x0c08)
+#define SMIAPP_REG_U16_TSHUTTER_STROBE_WIDTH_CTRL CCI_REG16(0x0c0a)
+#define SMIAPP_REG_U16_TFLASH_STROBE_DELAY_CTRL CCI_REG16(0x0c0c)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_CTRL CCI_REG16(0x0c0e)
+#define SMIAPP_REG_U16_TGRST_INTERVAL_CTRL CCI_REG16(0x0c10)
+#define SMIAPP_REG_U8_FLASH_STROBE_ADJUSTMENT CCI_REG8(0x0c12)
+#define SMIAPP_REG_U16_FLASH_STROBE_START_POINT CCI_REG16(0x0c14)
+#define SMIAPP_REG_U16_TFLASH_STROBE_DELAY_RS_CTRL CCI_REG16(0x0c16)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_RS_CTRL CCI_REG16(0x0c18)
+#define SMIAPP_REG_U8_FLASH_MODE_RS CCI_REG8(0x0c1a)
+#define SMIAPP_REG_U8_FLASH_TRIGGER_RS CCI_REG8(0x0c1b)
+#define SMIAPP_REG_U8_FLASH_STATUS CCI_REG8(0x0c1c)
+#define SMIAPP_REG_U8_SA_STROBE_MODE CCI_REG8(0x0c1d)
+#define SMIAPP_REG_U16_SA_STROBE_START_POINT CCI_REG16(0x0c1e)
+#define SMIAPP_REG_U16_TSA_STROBE_DELAY_CTRL CCI_REG16(0x0c20)
+#define SMIAPP_REG_U16_TSA_STROBE_WIDTH_CTRL CCI_REG16(0x0c22)
+#define SMIAPP_REG_U8_SA_STROBE_TRIGGER CCI_REG8(0x0c24)
+#define SMIAPP_REG_U8_SPECIAL_ACTUATOR_STATUS CCI_REG8(0x0c25)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH2_HIGH_RS_CTRL CCI_REG16(0x0c26)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_LOW_RS_CTRL CCI_REG16(0x0c28)
+#define SMIAPP_REG_U8_TFLASH_STROBE_COUNT_RS_CTRL CCI_REG8(0x0c2a)
+#define SMIAPP_REG_U8_TFLASH_STROBE_COUNT_CTRL CCI_REG8(0x0c2b)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH2_HIGH_CTRL CCI_REG16(0x0c2c)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_LOW_CTRL CCI_REG16(0x0c2e)
+#define SMIAPP_REG_U8_LOW_LEVEL_CTRL CCI_REG8(0x0c80)
+#define SMIAPP_REG_U16_MAIN_TRIGGER_REF_POINT CCI_REG16(0x0c82)
+#define SMIAPP_REG_U16_MAIN_TRIGGER_T3 CCI_REG16(0x0c84)
+#define SMIAPP_REG_U8_MAIN_TRIGGER_COUNT CCI_REG8(0x0c86)
+#define SMIAPP_REG_U16_PHASE1_TRIGGER_T3 CCI_REG16(0x0c88)
+#define SMIAPP_REG_U8_PHASE1_TRIGGER_COUNT CCI_REG8(0x0c8a)
+#define SMIAPP_REG_U16_PHASE2_TRIGGER_T3 CCI_REG16(0x0c8c)
+#define SMIAPP_REG_U8_PHASE2_TRIGGER_COUNT CCI_REG8(0x0c8e)
+#define SMIAPP_REG_U8_MECH_SHUTTER_CTRL CCI_REG8(0x0d00)
+#define SMIAPP_REG_U8_OPERATION_MODE CCI_REG8(0x0d01)
+#define SMIAPP_REG_U8_ACT_STATE1 CCI_REG8(0x0d02)
+#define SMIAPP_REG_U8_ACT_STATE2 CCI_REG8(0x0d03)
+#define SMIAPP_REG_U16_FOCUS_CHANGE CCI_REG16(0x0d80)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_CONTROL CCI_REG16(0x0d82)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_NUMBER_PHASE1 CCI_REG16(0x0d84)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_NUMBER_PHASE2 CCI_REG16(0x0d86)
+#define SMIAPP_REG_U8_STROBE_COUNT_PHASE1 CCI_REG8(0x0d88)
+#define SMIAPP_REG_U8_STROBE_COUNT_PHASE2 CCI_REG8(0x0d89)
+#define SMIAPP_REG_U8_POSITION CCI_REG8(0x0d8a)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CONTROL CCI_REG8(0x0e00)
+#define SMIAPP_REG_U8_BRACKETING_LUT_MODE CCI_REG8(0x0e01)
+#define SMIAPP_REG_U8_BRACKETING_LUT_ENTRY_CONTROL CCI_REG8(0x0e02)
+#define SMIAPP_REG_U8_LUT_PARAMETERS_START CCI_REG8(0x0e10)
+#define SMIAPP_REG_U8_LUT_PARAMETERS_END CCI_REG8(0x0eff)
+#define SMIAPP_REG_U16_INTEGRATION_TIME_CAPABILITY CCI_REG16(0x1000)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MIN CCI_REG16(0x1004)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MAX_MARGIN CCI_REG16(0x1006)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN CCI_REG16(0x1008)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN CCI_REG16(0x100a)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_CAPABILITY CCI_REG16(0x1080)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_MIN CCI_REG16(0x1084)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_MAX CCI_REG16(0x1086)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_STEP_SIZE CCI_REG16(0x1088)
+#define SMIAPP_REG_F32_MIN_EXT_CLK_FREQ_HZ (CCI_REG32(0x1100) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_F32_MAX_EXT_CLK_FREQ_HZ (CCI_REG32(0x1104) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_U16_MIN_PRE_PLL_CLK_DIV CCI_REG16(0x1108)
+#define SMIAPP_REG_U16_MAX_PRE_PLL_CLK_DIV CCI_REG16(0x110a)
+#define SMIAPP_REG_F32_MIN_PLL_IP_FREQ_HZ (CCI_REG32(0x110c) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_F32_MAX_PLL_IP_FREQ_HZ (CCI_REG32(0x1110) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_U16_MIN_PLL_MULTIPLIER CCI_REG16(0x1114)
+#define SMIAPP_REG_U16_MAX_PLL_MULTIPLIER CCI_REG16(0x1116)
+#define SMIAPP_REG_F32_MIN_PLL_OP_FREQ_HZ (CCI_REG32(0x1118) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_F32_MAX_PLL_OP_FREQ_HZ (CCI_REG32(0x111c) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_U16_MIN_VT_SYS_CLK_DIV CCI_REG16(0x1120)
+#define SMIAPP_REG_U16_MAX_VT_SYS_CLK_DIV CCI_REG16(0x1122)
+#define SMIAPP_REG_F32_MIN_VT_SYS_CLK_FREQ_HZ (CCI_REG32(0x1124) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_F32_MAX_VT_SYS_CLK_FREQ_HZ (CCI_REG32(0x1128) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_F32_MIN_VT_PIX_CLK_FREQ_HZ (CCI_REG32(0x112c) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_F32_MAX_VT_PIX_CLK_FREQ_HZ (CCI_REG32(0x1130) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_U16_MIN_VT_PIX_CLK_DIV CCI_REG16(0x1134)
+#define SMIAPP_REG_U16_MAX_VT_PIX_CLK_DIV CCI_REG16(0x1136)
+#define SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES CCI_REG16(0x1140)
+#define SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES CCI_REG16(0x1142)
+#define SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK CCI_REG16(0x1144)
+#define SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK CCI_REG16(0x1146)
+#define SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK CCI_REG16(0x1148)
+#define SMIAPP_REG_U16_MIN_FRAME_BLANKING_LINES CCI_REG16(0x114a)
+#define SMIAPP_REG_U8_MIN_LINE_LENGTH_PCK_STEP_SIZE CCI_REG8(0x114c)
+#define SMIAPP_REG_U16_MIN_OP_SYS_CLK_DIV CCI_REG16(0x1160)
+#define SMIAPP_REG_U16_MAX_OP_SYS_CLK_DIV CCI_REG16(0x1162)
+#define SMIAPP_REG_F32_MIN_OP_SYS_CLK_FREQ_HZ (CCI_REG32(0x1164) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_F32_MAX_OP_SYS_CLK_FREQ_HZ (CCI_REG32(0x1168) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_U16_MIN_OP_PIX_CLK_DIV CCI_REG16(0x116c)
+#define SMIAPP_REG_U16_MAX_OP_PIX_CLK_DIV CCI_REG16(0x116e)
+#define SMIAPP_REG_F32_MIN_OP_PIX_CLK_FREQ_HZ (CCI_REG32(0x1170) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_F32_MAX_OP_PIX_CLK_FREQ_HZ (CCI_REG32(0x1174) | CCS_FL_FLOAT_IREAL)
+#define SMIAPP_REG_U16_X_ADDR_MIN CCI_REG16(0x1180)
+#define SMIAPP_REG_U16_Y_ADDR_MIN CCI_REG16(0x1182)
+#define SMIAPP_REG_U16_X_ADDR_MAX CCI_REG16(0x1184)
+#define SMIAPP_REG_U16_Y_ADDR_MAX CCI_REG16(0x1186)
+#define SMIAPP_REG_U16_MIN_X_OUTPUT_SIZE CCI_REG16(0x1188)
+#define SMIAPP_REG_U16_MIN_Y_OUTPUT_SIZE CCI_REG16(0x118a)
+#define SMIAPP_REG_U16_MAX_X_OUTPUT_SIZE CCI_REG16(0x118c)
+#define SMIAPP_REG_U16_MAX_Y_OUTPUT_SIZE CCI_REG16(0x118e)
+#define SMIAPP_REG_U16_MIN_EVEN_INC CCI_REG16(0x11c0)
+#define SMIAPP_REG_U16_MAX_EVEN_INC CCI_REG16(0x11c2)
+#define SMIAPP_REG_U16_MIN_ODD_INC CCI_REG16(0x11c4)
+#define SMIAPP_REG_U16_MAX_ODD_INC CCI_REG16(0x11c6)
+#define SMIAPP_REG_U16_SCALING_CAPABILITY CCI_REG16(0x1200)
+#define SMIAPP_REG_U16_SCALER_M_MIN CCI_REG16(0x1204)
+#define SMIAPP_REG_U16_SCALER_M_MAX CCI_REG16(0x1206)
+#define SMIAPP_REG_U16_SCALER_N_MIN CCI_REG16(0x1208)
+#define SMIAPP_REG_U16_SCALER_N_MAX CCI_REG16(0x120a)
+#define SMIAPP_REG_U16_SPATIAL_SAMPLING_CAPABILITY CCI_REG16(0x120c)
+#define SMIAPP_REG_U8_DIGITAL_CROP_CAPABILITY CCI_REG8(0x120e)
+#define SMIAPP_REG_U16_COMPRESSION_CAPABILITY CCI_REG16(0x1300)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINRED CCI_REG16(0x1400)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINRED CCI_REG16(0x1402)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINRED CCI_REG16(0x1404)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINGREEN CCI_REG16(0x1406)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINGREEN CCI_REG16(0x1408)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINGREEN CCI_REG16(0x140a)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINBLUE CCI_REG16(0x140c)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINBLUE CCI_REG16(0x140e)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINBLUE CCI_REG16(0x1410)
+#define SMIAPP_REG_U16_FIFO_SIZE_PIXELS CCI_REG16(0x1500)
+#define SMIAPP_REG_U8_FIFO_SUPPORT_CAPABILITY CCI_REG8(0x1502)
+#define SMIAPP_REG_U8_DPHY_CTRL_CAPABILITY CCI_REG8(0x1600)
+#define SMIAPP_REG_U8_CSI_LANE_MODE_CAPABILITY CCI_REG8(0x1601)
+#define SMIAPP_REG_U8_CSI_SIGNALLING_MODE_CAPABILITY CCI_REG8(0x1602)
+#define SMIAPP_REG_U8_FAST_STANDBY_CAPABILITY CCI_REG8(0x1603)
+#define SMIAPP_REG_U8_CCI_ADDRESS_CONTROL_CAPABILITY CCI_REG8(0x1604)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_1_LANE_MODE_MBPS CCI_REG32(0x1608)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_2_LANE_MODE_MBPS CCI_REG32(0x160c)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_3_LANE_MODE_MBPS CCI_REG32(0x1610)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_4_LANE_MODE_MBPS CCI_REG32(0x1614)
+#define SMIAPP_REG_U8_TEMP_SENSOR_CAPABILITY CCI_REG8(0x1618)
+#define SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES_BIN CCI_REG16(0x1700)
+#define SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES_BIN CCI_REG16(0x1702)
+#define SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK_BIN CCI_REG16(0x1704)
+#define SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK_BIN CCI_REG16(0x1706)
+#define SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK_BIN CCI_REG16(0x1708)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN_BIN CCI_REG16(0x170a)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN CCI_REG16(0x170c)
+#define SMIAPP_REG_U8_BINNING_CAPABILITY CCI_REG8(0x1710)
+#define SMIAPP_REG_U8_BINNING_WEIGHTING_CAPABILITY CCI_REG8(0x1711)
+#define SMIAPP_REG_U8_BINNING_SUBTYPES CCI_REG8(0x1712)
+#define SMIAPP_REG_U8_BINNING_TYPE_n(n) CCI_REG8(0x1713 + (n)) /* 1 <= n <= 237 */
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_CAPABILITY CCI_REG8(0x1800)
+#define SMIAPP_REG_U8_SHADING_CORRECTION_CAPABILITY CCI_REG8(0x1900)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_CAPABILITY CCI_REG8(0x1901)
+#define SMIAPP_REG_U8_BLACK_LEVEL_CAPABILITY CCI_REG8(0x1902)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_CAPABILITY CCI_REG8(0x1903)
+#define SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY CCI_REG16(0x1904)
+#define SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY_2 CCI_REG16(0x1906)
+#define SMIAPP_REG_U8_EDOF_CAPABILITY CCI_REG8(0x1980)
+#define SMIAPP_REG_U8_ESTIMATION_FRAMES CCI_REG8(0x1981)
+#define SMIAPP_REG_U8_SUPPORTS_SHARPNESS_ADJ CCI_REG8(0x1982)
+#define SMIAPP_REG_U8_SUPPORTS_DENOISING_ADJ CCI_REG8(0x1983)
+#define SMIAPP_REG_U8_SUPPORTS_MODULE_SPECIFIC_ADJ CCI_REG8(0x1984)
+#define SMIAPP_REG_U8_SUPPORTS_DEPTH_OF_FIELD_ADJ CCI_REG8(0x1985)
+#define SMIAPP_REG_U8_SUPPORTS_FOCUS_DISTANCE_ADJ CCI_REG8(0x1986)
+#define SMIAPP_REG_U8_COLOUR_FEEDBACK_CAPABILITY CCI_REG8(0x1987)
+#define SMIAPP_REG_U8_EDOF_SUPPORT_AB_NXM CCI_REG8(0x1988)
+#define SMIAPP_REG_U8_ESTIMATION_MODE_CAPABILITY CCI_REG8(0x19c0)
+#define SMIAPP_REG_U8_ESTIMATION_ZONE_CAPABILITY CCI_REG8(0x19c1)
+#define SMIAPP_REG_U16_EST_DEPTH_OF_FIELD CCI_REG16(0x19c2)
+#define SMIAPP_REG_U16_EST_FOCUS_DISTANCE CCI_REG16(0x19c4)
+#define SMIAPP_REG_U16_CAPABILITY_TRDY_MIN CCI_REG16(0x1a00)
+#define SMIAPP_REG_U8_FLASH_MODE_CAPABILITY CCI_REG8(0x1a02)
+#define SMIAPP_REG_U16_MECH_SHUT_AND_ACT_START_ADDR CCI_REG16(0x1b02)
+#define SMIAPP_REG_U8_ACTUATOR_CAPABILITY CCI_REG8(0x1b04)
+#define SMIAPP_REG_U16_ACTUATOR_TYPE CCI_REG16(0x1b40)
+#define SMIAPP_REG_U8_AF_DEVICE_ADDRESS CCI_REG8(0x1b42)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_ADDRESS CCI_REG16(0x1b44)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_1 CCI_REG8(0x1c00)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_2 CCI_REG8(0x1c01)
+#define SMIAPP_REG_U8_BRACKETING_LUT_SIZE CCI_REG8(0x1c02)
/* Register bit definitions */
#define SMIAPP_IMAGE_ORIENTATION_HFLIP BIT(0)
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index 8e9ebed09..ca9bb29da 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -424,8 +424,7 @@ static int ub913_set_fmt(struct v4l2_subdev *sd,
}
/* Set sink format */
- fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
- format->stream);
+ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
if (!fmt)
return -EINVAL;
@@ -444,8 +443,8 @@ static int ub913_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ub913_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int ub913_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_route routes[] = {
{
@@ -504,7 +503,6 @@ static const struct v4l2_subdev_pad_ops ub913_pad_ops = {
.get_frame_desc = ub913_get_frame_desc,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = ub913_set_fmt,
- .init_cfg = ub913_init_cfg,
};
static const struct v4l2_subdev_ops ub913_subdev_ops = {
@@ -512,6 +510,10 @@ static const struct v4l2_subdev_ops ub913_subdev_ops = {
.pad = &ub913_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ub913_internal_ops = {
+ .init_state = ub913_init_state,
+};
+
static const struct media_entity_operations ub913_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -745,6 +747,7 @@ static int ub913_subdev_init(struct ub913_data *priv)
int ret;
v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub913_subdev_ops);
+ priv->sd.internal_ops = &ub913_internal_ops;
priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
priv->sd.entity.ops = &ub913_entity_ops;
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index 644022312..16f88db14 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -558,8 +558,7 @@ static int ub953_set_fmt(struct v4l2_subdev *sd,
return v4l2_subdev_get_fmt(sd, state, format);
/* Set sink format */
- fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
- format->stream);
+ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
if (!fmt)
return -EINVAL;
@@ -576,8 +575,8 @@ static int ub953_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ub953_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int ub953_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_route routes[] = {
{
@@ -714,7 +713,6 @@ static const struct v4l2_subdev_pad_ops ub953_pad_ops = {
.get_frame_desc = ub953_get_frame_desc,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = ub953_set_fmt,
- .init_cfg = ub953_init_cfg,
};
static const struct v4l2_subdev_core_ops ub953_subdev_core_ops = {
@@ -728,6 +726,10 @@ static const struct v4l2_subdev_ops ub953_subdev_ops = {
.pad = &ub953_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ub953_internal_ops = {
+ .init_state = ub953_init_state,
+};
+
static const struct media_entity_operations ub953_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -1241,6 +1243,7 @@ static int ub953_subdev_init(struct ub953_data *priv)
int ret;
v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub953_subdev_ops);
+ priv->sd.internal_ops = &ub953_internal_ops;
priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_STREAMS;
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
index b8f3e5ca0..ffe5f25f8 100644
--- a/drivers/media/i2c/ds90ub960.c
+++ b/drivers/media/i2c/ds90ub960.c
@@ -2451,9 +2451,8 @@ static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
if (rx_data[nport].num_streams > 2)
return -EPIPE;
- fmt = v4l2_subdev_state_get_stream_format(state,
- route->sink_pad,
- route->sink_stream);
+ fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
+ route->sink_stream);
if (!fmt)
return -EPIPE;
@@ -2842,8 +2841,8 @@ static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
const struct ub960_format_info *ub960_fmt;
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_state_get_stream_format(state, pad,
- route->source_stream);
+ fmt = v4l2_subdev_state_get_format(state, pad,
+ route->source_stream);
if (!fmt) {
ret = -EINVAL;
@@ -2891,8 +2890,7 @@ static int ub960_set_fmt(struct v4l2_subdev *sd,
if (!ub960_find_format(format->format.code))
format->format.code = ub960_formats[0].code;
- fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
- format->stream);
+ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
if (!fmt)
return -EINVAL;
@@ -2908,8 +2906,8 @@ static int ub960_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ub960_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int ub960_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct ub960_data *priv = sd_to_ub960(sd);
@@ -2940,8 +2938,6 @@ static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = ub960_set_fmt,
-
- .init_cfg = ub960_init_cfg,
};
static int ub960_log_status(struct v4l2_subdev *sd)
@@ -3093,6 +3089,10 @@ static const struct v4l2_subdev_core_ops ub960_subdev_core_ops = {
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
+static const struct v4l2_subdev_internal_ops ub960_internal_ops = {
+ .init_state = ub960_init_state,
+};
+
static const struct v4l2_subdev_ops ub960_subdev_ops = {
.core = &ub960_subdev_core_ops,
.pad = &ub960_pad_ops,
@@ -3652,6 +3652,7 @@ static int ub960_create_subdev(struct ub960_data *priv)
int ret;
v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub960_subdev_ops);
+ priv->sd.internal_ops = &ub960_internal_ops;
v4l2_ctrl_handler_init(&priv->ctrl_handler, 1);
priv->sd.ctrl_handler = &priv->ctrl_handler;
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index d6fc843f9..f548b1bb7 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -995,8 +995,7 @@ __et8ek8_get_pad_format(struct et8ek8_sensor *sensor,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&sensor->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->format;
default:
@@ -1047,10 +1046,18 @@ static int et8ek8_set_pad_format(struct v4l2_subdev *subdev,
}
static int et8ek8_get_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fi)
{
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
memset(fi, 0, sizeof(*fi));
fi->interval = sensor->current_reglist->mode.timeperframe;
@@ -1058,11 +1065,19 @@ static int et8ek8_get_frame_interval(struct v4l2_subdev *subdev,
}
static int et8ek8_set_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fi)
{
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
struct et8ek8_reglist *reglist;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
reglist = et8ek8_reglist_find_mode_ival(&meta_reglist,
sensor->current_reglist,
&fi->interval);
@@ -1343,8 +1358,6 @@ static int et8ek8_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static const struct v4l2_subdev_video_ops et8ek8_video_ops = {
.s_stream = et8ek8_s_stream,
- .g_frame_interval = et8ek8_get_frame_interval,
- .s_frame_interval = et8ek8_set_frame_interval,
};
static const struct v4l2_subdev_core_ops et8ek8_core_ops = {
@@ -1357,6 +1370,8 @@ static const struct v4l2_subdev_pad_ops et8ek8_pad_ops = {
.enum_frame_interval = et8ek8_enum_frame_ival,
.get_fmt = et8ek8_get_pad_format,
.set_fmt = et8ek8_set_pad_format,
+ .get_frame_interval = et8ek8_get_frame_interval,
+ .set_frame_interval = et8ek8_set_frame_interval,
};
static const struct v4l2_subdev_ops et8ek8_ops = {
diff --git a/drivers/media/i2c/gc0308.c b/drivers/media/i2c/gc0308.c
new file mode 100644
index 000000000..fa754a8a3
--- /dev/null
+++ b/drivers/media/i2c/gc0308.c
@@ -0,0 +1,1451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the GalaxyCore GC0308 camera sensor.
+ *
+ * Copyright (c) 2023 Sebastian Reichel <sre@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* Analog & CISCTL*/
+#define GC0308_CHIP_ID CCI_REG8(0x000)
+#define GC0308_HBLANK CCI_REG8(0x001)
+#define GC0308_VBLANK CCI_REG8(0x002)
+#define GC0308_EXP CCI_REG16(0x003)
+#define GC0308_ROW_START CCI_REG16(0x005)
+#define GC0308_COL_START CCI_REG16(0x007)
+#define GC0308_WIN_HEIGHT CCI_REG16(0x009)
+#define GC0308_WIN_WIDTH CCI_REG16(0x00b)
+#define GC0308_VS_START_TIME CCI_REG8(0x00d) /* in rows */
+#define GC0308_VS_END_TIME CCI_REG8(0x00e) /* in rows */
+#define GC0308_VB_HB CCI_REG8(0x00f)
+#define GC0308_RSH_WIDTH CCI_REG8(0x010)
+#define GC0308_TSP_WIDTH CCI_REG8(0x011)
+#define GC0308_SAMPLE_HOLD_DELAY CCI_REG8(0x012)
+#define GC0308_ROW_TAIL_WIDTH CCI_REG8(0x013)
+#define GC0308_CISCTL_MODE1 CCI_REG8(0x014)
+#define GC0308_CISCTL_MODE2 CCI_REG8(0x015)
+#define GC0308_CISCTL_MODE3 CCI_REG8(0x016)
+#define GC0308_CISCTL_MODE4 CCI_REG8(0x017)
+#define GC0308_ANALOG_MODE1 CCI_REG8(0x01a)
+#define GC0308_ANALOG_MODE2 CCI_REG8(0x01b)
+#define GC0308_HRST_RSG_V18 CCI_REG8(0x01c)
+#define GC0308_VREF_V25 CCI_REG8(0x01d)
+#define GC0308_ADC_R CCI_REG8(0x01e)
+#define GC0308_PAD_DRV CCI_REG8(0x01f)
+#define GC0308_SOFT_RESET CCI_REG8(0x0fe)
+
+/* ISP */
+#define GC0308_BLOCK_EN1 CCI_REG8(0x020)
+#define GC0308_BLOCK_EN2 CCI_REG8(0x021)
+#define GC0308_AAAA_EN CCI_REG8(0x022)
+#define GC0308_SPECIAL_EFFECT CCI_REG8(0x023)
+#define GC0308_OUT_FORMAT CCI_REG8(0x024)
+#define GC0308_OUT_EN CCI_REG8(0x025)
+#define GC0308_SYNC_MODE CCI_REG8(0x026)
+#define GC0308_CLK_DIV_MODE CCI_REG8(0x028)
+#define GC0308_BYPASS_MODE CCI_REG8(0x029)
+#define GC0308_CLK_GATING CCI_REG8(0x02a)
+#define GC0308_DITHER_MODE CCI_REG8(0x02b)
+#define GC0308_DITHER_BIT CCI_REG8(0x02c)
+#define GC0308_DEBUG_MODE1 CCI_REG8(0x02d)
+#define GC0308_DEBUG_MODE2 CCI_REG8(0x02e)
+#define GC0308_DEBUG_MODE3 CCI_REG8(0x02f)
+#define GC0308_CROP_WIN_MODE CCI_REG8(0x046)
+#define GC0308_CROP_WIN_Y1 CCI_REG8(0x047)
+#define GC0308_CROP_WIN_X1 CCI_REG8(0x048)
+#define GC0308_CROP_WIN_HEIGHT CCI_REG16(0x049)
+#define GC0308_CROP_WIN_WIDTH CCI_REG16(0x04b)
+
+/* BLK */
+#define GC0308_BLK_MODE CCI_REG8(0x030)
+#define GC0308_BLK_LIMIT_VAL CCI_REG8(0x031)
+#define GC0308_GLOBAL_OFF CCI_REG8(0x032)
+#define GC0308_CURRENT_R_OFF CCI_REG8(0x033)
+#define GC0308_CURRENT_G_OFF CCI_REG8(0x034)
+#define GC0308_CURRENT_B_OFF CCI_REG8(0x035)
+#define GC0308_CURRENT_R_DARK_CURRENT CCI_REG8(0x036)
+#define GC0308_CURRENT_G_DARK_CURRENT CCI_REG8(0x037)
+#define GC0308_CURRENT_B_DARK_CURRENT CCI_REG8(0x038)
+#define GC0308_EXP_RATE_DARKC CCI_REG8(0x039)
+#define GC0308_OFF_SUBMODE CCI_REG8(0x03a)
+#define GC0308_DARKC_SUBMODE CCI_REG8(0x03b)
+#define GC0308_MANUAL_G1_OFF CCI_REG8(0x03c)
+#define GC0308_MANUAL_R1_OFF CCI_REG8(0x03d)
+#define GC0308_MANUAL_B2_OFF CCI_REG8(0x03e)
+#define GC0308_MANUAL_G2_OFF CCI_REG8(0x03f)
+
+/* PREGAIN */
+#define GC0308_GLOBAL_GAIN CCI_REG8(0x050)
+#define GC0308_AUTO_PREGAIN CCI_REG8(0x051)
+#define GC0308_AUTO_POSTGAIN CCI_REG8(0x052)
+#define GC0308_CHANNEL_GAIN_G1 CCI_REG8(0x053)
+#define GC0308_CHANNEL_GAIN_R CCI_REG8(0x054)
+#define GC0308_CHANNEL_GAIN_B CCI_REG8(0x055)
+#define GC0308_CHANNEL_GAIN_G2 CCI_REG8(0x056)
+#define GC0308_R_RATIO CCI_REG8(0x057)
+#define GC0308_G_RATIO CCI_REG8(0x058)
+#define GC0308_B_RATIO CCI_REG8(0x059)
+#define GC0308_AWB_R_GAIN CCI_REG8(0x05a)
+#define GC0308_AWB_G_GAIN CCI_REG8(0x05b)
+#define GC0308_AWB_B_GAIN CCI_REG8(0x05c)
+#define GC0308_LSC_DEC_LVL1 CCI_REG8(0x05d)
+#define GC0308_LSC_DEC_LVL2 CCI_REG8(0x05e)
+#define GC0308_LSC_DEC_LVL3 CCI_REG8(0x05f)
+
+/* DNDD */
+#define GC0308_DN_MODE_EN CCI_REG8(0x060)
+#define GC0308_DN_MODE_RATIO CCI_REG8(0x061)
+#define GC0308_DN_BILAT_B_BASE CCI_REG8(0x062)
+#define GC0308_DN_B_INCR CCI_REG8(0x063)
+#define GC0308_DN_BILAT_N_BASE CCI_REG8(0x064)
+#define GC0308_DN_N_INCR CCI_REG8(0x065)
+#define GC0308_DD_DARK_BRIGHT_TH CCI_REG8(0x066)
+#define GC0308_DD_FLAT_TH CCI_REG8(0x067)
+#define GC0308_DD_LIMIT CCI_REG8(0x068)
+
+/* ASDE - Auto Saturation De-noise and Edge-Enhancement */
+#define GC0308_ASDE_GAIN_TRESH CCI_REG8(0x069)
+#define GC0308_ASDE_GAIN_MODE CCI_REG8(0x06a)
+#define GC0308_ASDE_DN_SLOPE CCI_REG8(0x06b)
+#define GC0308_ASDE_DD_BRIGHT CCI_REG8(0x06c)
+#define GC0308_ASDE_DD_LIMIT CCI_REG8(0x06d)
+#define GC0308_ASDE_AUTO_EE1 CCI_REG8(0x06e)
+#define GC0308_ASDE_AUTO_EE2 CCI_REG8(0x06f)
+#define GC0308_ASDE_AUTO_SAT_DEC_SLOPE CCI_REG8(0x070)
+#define GC0308_ASDE_AUTO_SAT_LOW_LIMIT CCI_REG8(0x071)
+
+/* INTPEE - Interpolation and Edge-Enhancement */
+#define GC0308_EEINTP_MODE_1 CCI_REG8(0x072)
+#define GC0308_EEINTP_MODE_2 CCI_REG8(0x073)
+#define GC0308_DIRECTION_TH1 CCI_REG8(0x074)
+#define GC0308_DIRECTION_TH2 CCI_REG8(0x075)
+#define GC0308_DIFF_HV_TI_TH CCI_REG8(0x076)
+#define GC0308_EDGE12_EFFECT CCI_REG8(0x077)
+#define GC0308_EDGE_POS_RATIO CCI_REG8(0x078)
+#define GC0308_EDGE1_MINMAX CCI_REG8(0x079)
+#define GC0308_EDGE2_MINMAX CCI_REG8(0x07a)
+#define GC0308_EDGE12_TH CCI_REG8(0x07b)
+#define GC0308_EDGE_MAX CCI_REG8(0x07c)
+
+/* ABB - Auto Black Balance */
+#define GC0308_ABB_MODE CCI_REG8(0x080)
+#define GC0308_ABB_TARGET_AVGH CCI_REG8(0x081)
+#define GC0308_ABB_TARGET_AVGL CCI_REG8(0x082)
+#define GC0308_ABB_LIMIT_VAL CCI_REG8(0x083)
+#define GC0308_ABB_SPEED CCI_REG8(0x084)
+#define GC0308_CURR_R_BLACK_LVL CCI_REG8(0x085)
+#define GC0308_CURR_G_BLACK_LVL CCI_REG8(0x086)
+#define GC0308_CURR_B_BLACK_LVL CCI_REG8(0x087)
+#define GC0308_CURR_R_BLACK_FACTOR CCI_REG8(0x088)
+#define GC0308_CURR_G_BLACK_FACTOR CCI_REG8(0x089)
+#define GC0308_CURR_B_BLACK_FACTOR CCI_REG8(0x08a)
+
+/* LSC - Lens Shading Correction */
+#define GC0308_LSC_RED_B2 CCI_REG8(0x08b)
+#define GC0308_LSC_GREEN_B2 CCI_REG8(0x08c)
+#define GC0308_LSC_BLUE_B2 CCI_REG8(0x08d)
+#define GC0308_LSC_RED_B4 CCI_REG8(0x08e)
+#define GC0308_LSC_GREEN_B4 CCI_REG8(0x08f)
+#define GC0308_LSC_BLUE_B4 CCI_REG8(0x090)
+#define GC0308_LSC_ROW_CENTER CCI_REG8(0x091)
+#define GC0308_LSC_COL_CENTER CCI_REG8(0x092)
+
+/* CC - Channel Coefficient */
+#define GC0308_CC_MATRIX_C11 CCI_REG8(0x093)
+#define GC0308_CC_MATRIX_C12 CCI_REG8(0x094)
+#define GC0308_CC_MATRIX_C13 CCI_REG8(0x095)
+#define GC0308_CC_MATRIX_C21 CCI_REG8(0x096)
+#define GC0308_CC_MATRIX_C22 CCI_REG8(0x097)
+#define GC0308_CC_MATRIX_C23 CCI_REG8(0x098)
+#define GC0308_CC_MATRIX_C41 CCI_REG8(0x09c)
+#define GC0308_CC_MATRIX_C42 CCI_REG8(0x09d)
+#define GC0308_CC_MATRIX_C43 CCI_REG8(0x09e)
+
+/* GAMMA */
+#define GC0308_GAMMA_OUT0 CCI_REG8(0x09f)
+#define GC0308_GAMMA_OUT1 CCI_REG8(0x0a0)
+#define GC0308_GAMMA_OUT2 CCI_REG8(0x0a1)
+#define GC0308_GAMMA_OUT3 CCI_REG8(0x0a2)
+#define GC0308_GAMMA_OUT4 CCI_REG8(0x0a3)
+#define GC0308_GAMMA_OUT5 CCI_REG8(0x0a4)
+#define GC0308_GAMMA_OUT6 CCI_REG8(0x0a5)
+#define GC0308_GAMMA_OUT7 CCI_REG8(0x0a6)
+#define GC0308_GAMMA_OUT8 CCI_REG8(0x0a7)
+#define GC0308_GAMMA_OUT9 CCI_REG8(0x0a8)
+#define GC0308_GAMMA_OUT10 CCI_REG8(0x0a9)
+#define GC0308_GAMMA_OUT11 CCI_REG8(0x0aa)
+#define GC0308_GAMMA_OUT12 CCI_REG8(0x0ab)
+#define GC0308_GAMMA_OUT13 CCI_REG8(0x0ac)
+#define GC0308_GAMMA_OUT14 CCI_REG8(0x0ad)
+#define GC0308_GAMMA_OUT15 CCI_REG8(0x0ae)
+#define GC0308_GAMMA_OUT16 CCI_REG8(0x0af)
+
+/* YCP */
+#define GC0308_GLOBAL_SATURATION CCI_REG8(0x0b0)
+#define GC0308_SATURATION_CB CCI_REG8(0x0b1)
+#define GC0308_SATURATION_CR CCI_REG8(0x0b2)
+#define GC0308_LUMA_CONTRAST CCI_REG8(0x0b3)
+#define GC0308_CONTRAST_CENTER CCI_REG8(0x0b4)
+#define GC0308_LUMA_OFFSET CCI_REG8(0x0b5)
+#define GC0308_SKIN_CB_CENTER CCI_REG8(0x0b6)
+#define GC0308_SKIN_CR_CENTER CCI_REG8(0x0b7)
+#define GC0308_SKIN_RADIUS_SQUARE CCI_REG8(0x0b8)
+#define GC0308_SKIN_BRIGHTNESS CCI_REG8(0x0b9)
+#define GC0308_FIXED_CB CCI_REG8(0x0ba)
+#define GC0308_FIXED_CR CCI_REG8(0x0bb)
+#define GC0308_EDGE_DEC_SA CCI_REG8(0x0bd)
+#define GC0308_AUTO_GRAY_MODE CCI_REG8(0x0be)
+#define GC0308_SATURATION_SUB_STRENGTH CCI_REG8(0x0bf)
+#define GC0308_Y_GAMMA_OUT0 CCI_REG8(0x0c0)
+#define GC0308_Y_GAMMA_OUT1 CCI_REG8(0x0c1)
+#define GC0308_Y_GAMMA_OUT2 CCI_REG8(0x0c2)
+#define GC0308_Y_GAMMA_OUT3 CCI_REG8(0x0c3)
+#define GC0308_Y_GAMMA_OUT4 CCI_REG8(0x0c4)
+#define GC0308_Y_GAMMA_OUT5 CCI_REG8(0x0c5)
+#define GC0308_Y_GAMMA_OUT6 CCI_REG8(0x0c6)
+#define GC0308_Y_GAMMA_OUT7 CCI_REG8(0x0c7)
+#define GC0308_Y_GAMMA_OUT8 CCI_REG8(0x0c8)
+#define GC0308_Y_GAMMA_OUT9 CCI_REG8(0x0c9)
+#define GC0308_Y_GAMMA_OUT10 CCI_REG8(0x0ca)
+#define GC0308_Y_GAMMA_OUT11 CCI_REG8(0x0cb)
+#define GC0308_Y_GAMMA_OUT12 CCI_REG8(0x0cc)
+
+/* AEC - Automatic Exposure Control */
+#define GC0308_AEC_MODE1 CCI_REG8(0x0d0)
+#define GC0308_AEC_MODE2 CCI_REG8(0x0d1)
+#define GC0308_AEC_MODE3 CCI_REG8(0x0d2)
+#define GC0308_AEC_TARGET_Y CCI_REG8(0x0d3)
+#define GC0308_Y_AVG CCI_REG8(0x0d4)
+#define GC0308_AEC_HIGH_LOW_RANGE CCI_REG8(0x0d5)
+#define GC0308_AEC_IGNORE CCI_REG8(0x0d6)
+#define GC0308_AEC_LIMIT_HIGH_RANGE CCI_REG8(0x0d7)
+#define GC0308_AEC_R_OFFSET CCI_REG8(0x0d9)
+#define GC0308_AEC_GB_OFFSET CCI_REG8(0x0da)
+#define GC0308_AEC_SLOW_MARGIN CCI_REG8(0x0db)
+#define GC0308_AEC_FAST_MARGIN CCI_REG8(0x0dc)
+#define GC0308_AEC_EXP_CHANGE_GAIN CCI_REG8(0x0dd)
+#define GC0308_AEC_STEP2_SUNLIGHT CCI_REG8(0x0de)
+#define GC0308_AEC_I_FRAMES CCI_REG8(0x0df)
+#define GC0308_AEC_I_STOP_L_MARGIN CCI_REG8(0x0e0)
+#define GC0308_AEC_I_STOP_MARGIN CCI_REG8(0x0e1)
+#define GC0308_ANTI_FLICKER_STEP CCI_REG16(0x0e2)
+#define GC0308_EXP_LVL_1 CCI_REG16(0x0e4)
+#define GC0308_EXP_LVL_2 CCI_REG16(0x0e6)
+#define GC0308_EXP_LVL_3 CCI_REG16(0x0e8)
+#define GC0308_EXP_LVL_4 CCI_REG16(0x0ea)
+#define GC0308_MAX_EXP_LVL CCI_REG8(0x0ec)
+#define GC0308_EXP_MIN_L CCI_REG8(0x0ed)
+#define GC0308_MAX_POST_DF_GAIN CCI_REG8(0x0ee)
+#define GC0308_MAX_PRE_DG_GAIN CCI_REG8(0x0ef)
+
+/* ABS */
+#define GC0308_ABS_RANGE_COMP CCI_REG8(0x0f0)
+#define GC0308_ABS_STOP_MARGIN CCI_REG8(0x0f1)
+#define GC0308_Y_S_COMP CCI_REG8(0x0f2)
+#define GC0308_Y_STRETCH_LIMIT CCI_REG8(0x0f3)
+#define GC0308_Y_TILT CCI_REG8(0x0f4)
+#define GC0308_Y_STRETCH CCI_REG8(0x0f5)
+
+/* Measure Window */
+#define GC0308_BIG_WIN_X0 CCI_REG8(0x0f7)
+#define GC0308_BIG_WIN_Y0 CCI_REG8(0x0f8)
+#define GC0308_BIG_WIN_X1 CCI_REG8(0x0f9)
+#define GC0308_BIG_WIN_Y1 CCI_REG8(0x0fa)
+#define GC0308_DIFF_Y_BIG_THD CCI_REG8(0x0fb)
+
+/* OUT Module (P1) */
+#define GC0308_CLOSE_FRAME_EN CCI_REG8(0x150)
+#define GC0308_CLOSE_FRAME_NUM1 CCI_REG8(0x151)
+#define GC0308_CLOSE_FRAME_NUM2 CCI_REG8(0x152)
+#define GC0308_BAYER_MODE CCI_REG8(0x153)
+#define GC0308_SUBSAMPLE CCI_REG8(0x154)
+#define GC0308_SUBMODE CCI_REG8(0x155)
+#define GC0308_SUB_ROW_N1 CCI_REG8(0x156)
+#define GC0308_SUB_ROW_N2 CCI_REG8(0x157)
+#define GC0308_SUB_COL_N1 CCI_REG8(0x158)
+#define GC0308_SUB_COL_N2 CCI_REG8(0x159)
+
+/* AWB (P1) - Auto White Balance */
+#define GC0308_AWB_RGB_HIGH_LOW CCI_REG8(0x100)
+#define GC0308_AWB_Y_TO_C_DIFF2 CCI_REG8(0x102)
+#define GC0308_AWB_C_MAX CCI_REG8(0x104)
+#define GC0308_AWB_C_INTER CCI_REG8(0x105)
+#define GC0308_AWB_C_INTER2 CCI_REG8(0x106)
+#define GC0308_AWB_C_MAX_BIG CCI_REG8(0x108)
+#define GC0308_AWB_Y_HIGH CCI_REG8(0x109)
+#define GC0308_AWB_NUMBER_LIMIT CCI_REG8(0x10a)
+#define GC0308_KWIN_RATIO CCI_REG8(0x10b)
+#define GC0308_KWIN_THD CCI_REG8(0x10c)
+#define GC0308_LIGHT_GAIN_RANGE CCI_REG8(0x10d)
+#define GC0308_SMALL_WIN_WIDTH_STEP CCI_REG8(0x10e)
+#define GC0308_SMALL_WIN_HEIGHT_STEP CCI_REG8(0x10f)
+#define GC0308_AWB_YELLOW_TH CCI_REG8(0x110)
+#define GC0308_AWB_MODE CCI_REG8(0x111)
+#define GC0308_AWB_ADJUST_SPEED CCI_REG8(0x112)
+#define GC0308_AWB_EVERY_N CCI_REG8(0x113)
+#define GC0308_R_AVG_USE CCI_REG8(0x1d0)
+#define GC0308_G_AVG_USE CCI_REG8(0x1d1)
+#define GC0308_B_AVG_USE CCI_REG8(0x1d2)
+
+#define GC0308_HBLANK_MIN 0x021
+#define GC0308_HBLANK_MAX 0xfff
+#define GC0308_HBLANK_DEF 0x040
+
+#define GC0308_VBLANK_MIN 0x000
+#define GC0308_VBLANK_MAX 0xfff
+#define GC0308_VBLANK_DEF 0x020
+
+#define GC0308_PIXEL_RATE 24000000
+
+/*
+ * frame_time = (BT + height + 8) * row_time
+ * width = 640 (driver does not change window size)
+ * height = 480 (driver does not change window size)
+ * row_time = HBLANK + SAMPLE_HOLD_DELAY + width + 8 + 4
+ *
+ * When EXP_TIME > (BT + height):
+ * BT = EXP_TIME - height - 8 - VS_START_TIME + VS_END_TIME
+ * else:
+ * BT = VBLANK + VS_START_TIME + VS_END_TIME
+ *
+ * max is 30 FPS
+ *
+ * In my tests frame rate mostly depends on exposure time. Unfortuantely
+ * it's unclear how this is calculated exactly. Also since we enable AEC,
+ * the frame times vary depending on ambient light conditions.
+ */
+#define GC0308_FRAME_RATE_MAX 30
+
+enum gc0308_exp_val {
+ GC0308_EXP_M4 = 0,
+ GC0308_EXP_M3,
+ GC0308_EXP_M2,
+ GC0308_EXP_M1,
+ GC0308_EXP_0,
+ GC0308_EXP_P1,
+ GC0308_EXP_P2,
+ GC0308_EXP_P3,
+ GC0308_EXP_P4,
+};
+
+static const s64 gc0308_exposure_menu[] = {
+ -4, -3, -2, -1, 0, 1, 2, 3, 4
+};
+
+struct gc0308_exposure {
+ u8 luma_offset;
+ u8 aec_target_y;
+};
+
+#define GC0308_EXPOSURE(luma_offset_reg, aec_target_y_reg) \
+ { .luma_offset = luma_offset_reg, .aec_target_y = aec_target_y_reg }
+
+static const struct gc0308_exposure gc0308_exposure_values[] = {
+ [GC0308_EXP_M4] = GC0308_EXPOSURE(0xc0, 0x30),
+ [GC0308_EXP_M3] = GC0308_EXPOSURE(0xd0, 0x38),
+ [GC0308_EXP_M2] = GC0308_EXPOSURE(0xe0, 0x40),
+ [GC0308_EXP_M1] = GC0308_EXPOSURE(0xf0, 0x48),
+ [GC0308_EXP_0] = GC0308_EXPOSURE(0x08, 0x50),
+ [GC0308_EXP_P1] = GC0308_EXPOSURE(0x10, 0x5c),
+ [GC0308_EXP_P2] = GC0308_EXPOSURE(0x20, 0x60),
+ [GC0308_EXP_P3] = GC0308_EXPOSURE(0x30, 0x68),
+ [GC0308_EXP_P4] = GC0308_EXPOSURE(0x40, 0x70),
+};
+
+struct gc0308_awb_gains {
+ u8 r;
+ u8 g;
+ u8 b;
+};
+
+#define GC0308_AWB_GAINS(red, green, blue) \
+ { .r = red, .g = green, .b = blue }
+
+static const struct gc0308_awb_gains gc0308_awb_gains[] = {
+ [V4L2_WHITE_BALANCE_AUTO] = GC0308_AWB_GAINS(0x56, 0x40, 0x4a),
+ [V4L2_WHITE_BALANCE_CLOUDY] = GC0308_AWB_GAINS(0x8c, 0x50, 0x40),
+ [V4L2_WHITE_BALANCE_DAYLIGHT] = GC0308_AWB_GAINS(0x74, 0x52, 0x40),
+ [V4L2_WHITE_BALANCE_INCANDESCENT] = GC0308_AWB_GAINS(0x48, 0x40, 0x5c),
+ [V4L2_WHITE_BALANCE_FLUORESCENT] = GC0308_AWB_GAINS(0x40, 0x42, 0x50),
+};
+
+struct gc0308_format {
+ u32 code;
+ u8 regval;
+};
+
+#define GC0308_FORMAT(v4l2_code, gc0308_regval) \
+ { .code = v4l2_code, .regval = gc0308_regval }
+
+static const struct gc0308_format gc0308_formats[] = {
+ GC0308_FORMAT(MEDIA_BUS_FMT_UYVY8_2X8, 0x00),
+ GC0308_FORMAT(MEDIA_BUS_FMT_VYUY8_2X8, 0x01),
+ GC0308_FORMAT(MEDIA_BUS_FMT_YUYV8_2X8, 0x02),
+ GC0308_FORMAT(MEDIA_BUS_FMT_YVYU8_2X8, 0x03),
+ GC0308_FORMAT(MEDIA_BUS_FMT_RGB565_2X8_BE, 0x06),
+ GC0308_FORMAT(MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, 0x07),
+ GC0308_FORMAT(MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE, 0x09),
+};
+
+struct gc0308_frame_size {
+ u8 subsample;
+ u32 width;
+ u32 height;
+};
+
+#define GC0308_FRAME_SIZE(s, w, h) \
+ { .subsample = s, .width = w, .height = h }
+
+static const struct gc0308_frame_size gc0308_frame_sizes[] = {
+ GC0308_FRAME_SIZE(0x11, 640, 480),
+ GC0308_FRAME_SIZE(0x22, 320, 240),
+ GC0308_FRAME_SIZE(0x44, 160, 120),
+};
+
+struct gc0308_mode_registers {
+ u8 out_format;
+ u8 subsample;
+ u16 width;
+ u16 height;
+};
+
+struct gc0308 {
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ struct media_pad pad;
+ struct device *dev;
+ struct clk *clk;
+ struct regmap *regmap;
+ struct regulator *vdd;
+ struct gpio_desc *pwdn_gpio;
+ struct gpio_desc *reset_gpio;
+ unsigned int mbus_config;
+ struct gc0308_mode_registers mode;
+ struct {
+ /* mirror cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+ struct {
+ /* blanking cluster */
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ };
+};
+
+static inline struct gc0308 *to_gc0308(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct gc0308, sd);
+}
+
+static const struct regmap_range_cfg gc0308_ranges[] = {
+ {
+ .range_min = 0x0000,
+ .range_max = 0x01ff,
+ .selector_reg = 0xfe,
+ .selector_mask = 0x01,
+ .selector_shift = 0x00,
+ .window_start = 0x00,
+ .window_len = 0x100,
+ },
+};
+
+static const struct regmap_config gc0308_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .max_register = 0x1ff,
+ .ranges = gc0308_ranges,
+ .num_ranges = ARRAY_SIZE(gc0308_ranges),
+ .disable_locking = true,
+};
+
+static const struct cci_reg_sequence sensor_default_regs[] = {
+ {GC0308_VB_HB, 0x00},
+ {GC0308_HBLANK, 0x40},
+ {GC0308_VBLANK, 0x20},
+ {GC0308_EXP, 0x0258},
+ {GC0308_AWB_R_GAIN, 0x56},
+ {GC0308_AWB_G_GAIN, 0x40},
+ {GC0308_AWB_B_GAIN, 0x4a},
+ {GC0308_ANTI_FLICKER_STEP, 0x0078},
+ {GC0308_EXP_LVL_1, 0x0258},
+ {GC0308_EXP_LVL_2, 0x0258},
+ {GC0308_EXP_LVL_3, 0x0258},
+ {GC0308_EXP_LVL_4, 0x0ea6},
+ {GC0308_MAX_EXP_LVL, 0x20},
+ {GC0308_ROW_START, 0x0000},
+ {GC0308_COL_START, 0x0000},
+ {GC0308_WIN_HEIGHT, 488},
+ {GC0308_WIN_WIDTH, 648},
+ {GC0308_VS_START_TIME, 0x02},
+ {GC0308_VS_END_TIME, 0x02},
+ {GC0308_RSH_WIDTH, 0x22},
+ {GC0308_TSP_WIDTH, 0x0d},
+ {GC0308_SAMPLE_HOLD_DELAY, 0x50},
+ {GC0308_ROW_TAIL_WIDTH, 0x0f},
+ {GC0308_CISCTL_MODE1, 0x10},
+ {GC0308_CISCTL_MODE2, 0x0a},
+ {GC0308_CISCTL_MODE3, 0x05},
+ {GC0308_CISCTL_MODE4, 0x01},
+ {CCI_REG8(0x018), 0x44}, /* undocumented */
+ {CCI_REG8(0x019), 0x44}, /* undocumented */
+ {GC0308_ANALOG_MODE1, 0x2a},
+ {GC0308_ANALOG_MODE2, 0x00},
+ {GC0308_HRST_RSG_V18, 0x49},
+ {GC0308_VREF_V25, 0x9a},
+ {GC0308_ADC_R, 0x61},
+ {GC0308_PAD_DRV, 0x01}, /* drv strength: pclk=4mA */
+ {GC0308_BLOCK_EN1, 0x7f},
+ {GC0308_BLOCK_EN2, 0xfa},
+ {GC0308_AAAA_EN, 0x57},
+ {GC0308_OUT_FORMAT, 0xa2}, /* YCbYCr */
+ {GC0308_OUT_EN, 0x0f},
+ {GC0308_SYNC_MODE, 0x03},
+ {GC0308_CLK_DIV_MODE, 0x00},
+ {GC0308_DEBUG_MODE1, 0x0a},
+ {GC0308_DEBUG_MODE2, 0x00},
+ {GC0308_DEBUG_MODE3, 0x01},
+ {GC0308_BLK_MODE, 0xf7},
+ {GC0308_BLK_LIMIT_VAL, 0x50},
+ {GC0308_GLOBAL_OFF, 0x00},
+ {GC0308_CURRENT_R_OFF, 0x28},
+ {GC0308_CURRENT_G_OFF, 0x2a},
+ {GC0308_CURRENT_B_OFF, 0x28},
+ {GC0308_EXP_RATE_DARKC, 0x04},
+ {GC0308_OFF_SUBMODE, 0x20},
+ {GC0308_DARKC_SUBMODE, 0x20},
+ {GC0308_MANUAL_G1_OFF, 0x00},
+ {GC0308_MANUAL_R1_OFF, 0x00},
+ {GC0308_MANUAL_B2_OFF, 0x00},
+ {GC0308_MANUAL_G2_OFF, 0x00},
+ {GC0308_GLOBAL_GAIN, 0x14},
+ {GC0308_AUTO_POSTGAIN, 0x41},
+ {GC0308_CHANNEL_GAIN_G1, 0x80},
+ {GC0308_CHANNEL_GAIN_R, 0x80},
+ {GC0308_CHANNEL_GAIN_B, 0x80},
+ {GC0308_CHANNEL_GAIN_G2, 0x80},
+ {GC0308_LSC_RED_B2, 0x20},
+ {GC0308_LSC_GREEN_B2, 0x20},
+ {GC0308_LSC_BLUE_B2, 0x20},
+ {GC0308_LSC_RED_B4, 0x14},
+ {GC0308_LSC_GREEN_B4, 0x10},
+ {GC0308_LSC_BLUE_B4, 0x14},
+ {GC0308_LSC_ROW_CENTER, 0x3c},
+ {GC0308_LSC_COL_CENTER, 0x50},
+ {GC0308_LSC_DEC_LVL1, 0x12},
+ {GC0308_LSC_DEC_LVL2, 0x1a},
+ {GC0308_LSC_DEC_LVL3, 0x24},
+ {GC0308_DN_MODE_EN, 0x07},
+ {GC0308_DN_MODE_RATIO, 0x15},
+ {GC0308_DN_BILAT_B_BASE, 0x08},
+ {GC0308_DN_BILAT_N_BASE, 0x03},
+ {GC0308_DD_DARK_BRIGHT_TH, 0xe8},
+ {GC0308_DD_FLAT_TH, 0x86},
+ {GC0308_DD_LIMIT, 0x82},
+ {GC0308_ASDE_GAIN_TRESH, 0x18},
+ {GC0308_ASDE_GAIN_MODE, 0x0f},
+ {GC0308_ASDE_DN_SLOPE, 0x00},
+ {GC0308_ASDE_DD_BRIGHT, 0x5f},
+ {GC0308_ASDE_DD_LIMIT, 0x8f},
+ {GC0308_ASDE_AUTO_EE1, 0x55},
+ {GC0308_ASDE_AUTO_EE2, 0x38},
+ {GC0308_ASDE_AUTO_SAT_DEC_SLOPE, 0x15},
+ {GC0308_ASDE_AUTO_SAT_LOW_LIMIT, 0x33},
+ {GC0308_EEINTP_MODE_1, 0xdc},
+ {GC0308_EEINTP_MODE_2, 0x00},
+ {GC0308_DIRECTION_TH1, 0x02},
+ {GC0308_DIRECTION_TH2, 0x3f},
+ {GC0308_DIFF_HV_TI_TH, 0x02},
+ {GC0308_EDGE12_EFFECT, 0x38},
+ {GC0308_EDGE_POS_RATIO, 0x88},
+ {GC0308_EDGE1_MINMAX, 0x81},
+ {GC0308_EDGE2_MINMAX, 0x81},
+ {GC0308_EDGE12_TH, 0x22},
+ {GC0308_EDGE_MAX, 0xff},
+ {GC0308_CC_MATRIX_C11, 0x48},
+ {GC0308_CC_MATRIX_C12, 0x02},
+ {GC0308_CC_MATRIX_C13, 0x07},
+ {GC0308_CC_MATRIX_C21, 0xe0},
+ {GC0308_CC_MATRIX_C22, 0x40},
+ {GC0308_CC_MATRIX_C23, 0xf0},
+ {GC0308_SATURATION_CB, 0x40},
+ {GC0308_SATURATION_CR, 0x40},
+ {GC0308_LUMA_CONTRAST, 0x40},
+ {GC0308_SKIN_CB_CENTER, 0xe0},
+ {GC0308_EDGE_DEC_SA, 0x38},
+ {GC0308_AUTO_GRAY_MODE, 0x36},
+ {GC0308_AEC_MODE1, 0xcb},
+ {GC0308_AEC_MODE2, 0x10},
+ {GC0308_AEC_MODE3, 0x90},
+ {GC0308_AEC_TARGET_Y, 0x48},
+ {GC0308_AEC_HIGH_LOW_RANGE, 0xf2},
+ {GC0308_AEC_IGNORE, 0x16},
+ {GC0308_AEC_SLOW_MARGIN, 0x92},
+ {GC0308_AEC_FAST_MARGIN, 0xa5},
+ {GC0308_AEC_I_FRAMES, 0x23},
+ {GC0308_AEC_R_OFFSET, 0x00},
+ {GC0308_AEC_GB_OFFSET, 0x00},
+ {GC0308_AEC_I_STOP_L_MARGIN, 0x09},
+ {GC0308_EXP_MIN_L, 0x04},
+ {GC0308_MAX_POST_DF_GAIN, 0xa0},
+ {GC0308_MAX_PRE_DG_GAIN, 0x40},
+ {GC0308_ABB_MODE, 0x03},
+ {GC0308_GAMMA_OUT0, 0x10},
+ {GC0308_GAMMA_OUT1, 0x20},
+ {GC0308_GAMMA_OUT2, 0x38},
+ {GC0308_GAMMA_OUT3, 0x4e},
+ {GC0308_GAMMA_OUT4, 0x63},
+ {GC0308_GAMMA_OUT5, 0x76},
+ {GC0308_GAMMA_OUT6, 0x87},
+ {GC0308_GAMMA_OUT7, 0xa2},
+ {GC0308_GAMMA_OUT8, 0xb8},
+ {GC0308_GAMMA_OUT9, 0xca},
+ {GC0308_GAMMA_OUT10, 0xd8},
+ {GC0308_GAMMA_OUT11, 0xe3},
+ {GC0308_GAMMA_OUT12, 0xeb},
+ {GC0308_GAMMA_OUT13, 0xf0},
+ {GC0308_GAMMA_OUT14, 0xf8},
+ {GC0308_GAMMA_OUT15, 0xfd},
+ {GC0308_GAMMA_OUT16, 0xff},
+ {GC0308_Y_GAMMA_OUT0, 0x00},
+ {GC0308_Y_GAMMA_OUT1, 0x10},
+ {GC0308_Y_GAMMA_OUT2, 0x1c},
+ {GC0308_Y_GAMMA_OUT3, 0x30},
+ {GC0308_Y_GAMMA_OUT4, 0x43},
+ {GC0308_Y_GAMMA_OUT5, 0x54},
+ {GC0308_Y_GAMMA_OUT6, 0x65},
+ {GC0308_Y_GAMMA_OUT7, 0x75},
+ {GC0308_Y_GAMMA_OUT8, 0x93},
+ {GC0308_Y_GAMMA_OUT9, 0xb0},
+ {GC0308_Y_GAMMA_OUT10, 0xcb},
+ {GC0308_Y_GAMMA_OUT11, 0xe6},
+ {GC0308_Y_GAMMA_OUT12, 0xff},
+ {GC0308_ABS_RANGE_COMP, 0x02},
+ {GC0308_ABS_STOP_MARGIN, 0x01},
+ {GC0308_Y_S_COMP, 0x02},
+ {GC0308_Y_STRETCH_LIMIT, 0x30},
+ {GC0308_BIG_WIN_X0, 0x12},
+ {GC0308_BIG_WIN_Y0, 0x0a},
+ {GC0308_BIG_WIN_X1, 0x9f},
+ {GC0308_BIG_WIN_Y1, 0x78},
+ {GC0308_AWB_RGB_HIGH_LOW, 0xf5},
+ {GC0308_AWB_Y_TO_C_DIFF2, 0x20},
+ {GC0308_AWB_C_MAX, 0x10},
+ {GC0308_AWB_C_INTER, 0x08},
+ {GC0308_AWB_C_INTER2, 0x20},
+ {GC0308_AWB_C_MAX_BIG, 0x0a},
+ {GC0308_AWB_NUMBER_LIMIT, 0xa0},
+ {GC0308_KWIN_RATIO, 0x60},
+ {GC0308_KWIN_THD, 0x08},
+ {GC0308_SMALL_WIN_WIDTH_STEP, 0x44},
+ {GC0308_SMALL_WIN_HEIGHT_STEP, 0x32},
+ {GC0308_AWB_YELLOW_TH, 0x41},
+ {GC0308_AWB_MODE, 0x37},
+ {GC0308_AWB_ADJUST_SPEED, 0x22},
+ {GC0308_AWB_EVERY_N, 0x19},
+ {CCI_REG8(0x114), 0x44}, /* AWB set1 */
+ {CCI_REG8(0x115), 0x44}, /* AWB set1 */
+ {CCI_REG8(0x116), 0xc2}, /* AWB set1 */
+ {CCI_REG8(0x117), 0xa8}, /* AWB set1 */
+ {CCI_REG8(0x118), 0x18}, /* AWB set1 */
+ {CCI_REG8(0x119), 0x50}, /* AWB set1 */
+ {CCI_REG8(0x11a), 0xd8}, /* AWB set1 */
+ {CCI_REG8(0x11b), 0xf5}, /* AWB set1 */
+ {CCI_REG8(0x170), 0x40}, /* AWB set2 */
+ {CCI_REG8(0x171), 0x58}, /* AWB set2 */
+ {CCI_REG8(0x172), 0x30}, /* AWB set2 */
+ {CCI_REG8(0x173), 0x48}, /* AWB set2 */
+ {CCI_REG8(0x174), 0x20}, /* AWB set2 */
+ {CCI_REG8(0x175), 0x60}, /* AWB set2 */
+ {CCI_REG8(0x177), 0x20}, /* AWB set2 */
+ {CCI_REG8(0x178), 0x32}, /* AWB set2 */
+ {CCI_REG8(0x130), 0x03}, /* undocumented */
+ {CCI_REG8(0x131), 0x40}, /* undocumented */
+ {CCI_REG8(0x132), 0x10}, /* undocumented */
+ {CCI_REG8(0x133), 0xe0}, /* undocumented */
+ {CCI_REG8(0x134), 0xe0}, /* undocumented */
+ {CCI_REG8(0x135), 0x00}, /* undocumented */
+ {CCI_REG8(0x136), 0x80}, /* undocumented */
+ {CCI_REG8(0x137), 0x00}, /* undocumented */
+ {CCI_REG8(0x138), 0x04}, /* undocumented */
+ {CCI_REG8(0x139), 0x09}, /* undocumented */
+ {CCI_REG8(0x13a), 0x12}, /* undocumented */
+ {CCI_REG8(0x13b), 0x1c}, /* undocumented */
+ {CCI_REG8(0x13c), 0x28}, /* undocumented */
+ {CCI_REG8(0x13d), 0x31}, /* undocumented */
+ {CCI_REG8(0x13e), 0x44}, /* undocumented */
+ {CCI_REG8(0x13f), 0x57}, /* undocumented */
+ {CCI_REG8(0x140), 0x6c}, /* undocumented */
+ {CCI_REG8(0x141), 0x81}, /* undocumented */
+ {CCI_REG8(0x142), 0x94}, /* undocumented */
+ {CCI_REG8(0x143), 0xa7}, /* undocumented */
+ {CCI_REG8(0x144), 0xb8}, /* undocumented */
+ {CCI_REG8(0x145), 0xd6}, /* undocumented */
+ {CCI_REG8(0x146), 0xee}, /* undocumented */
+ {CCI_REG8(0x147), 0x0d}, /* undocumented */
+ {CCI_REG8(0x162), 0xf7}, /* undocumented */
+ {CCI_REG8(0x163), 0x68}, /* undocumented */
+ {CCI_REG8(0x164), 0xd3}, /* undocumented */
+ {CCI_REG8(0x165), 0xd3}, /* undocumented */
+ {CCI_REG8(0x166), 0x60}, /* undocumented */
+};
+
+struct gc0308_colormode {
+ u8 special_effect;
+ u8 dbg_mode1;
+ u8 block_en1;
+ u8 aec_mode3;
+ u8 eeintp_mode_2;
+ u8 edge12_effect;
+ u8 luma_contrast;
+ u8 contrast_center;
+ u8 fixed_cb;
+ u8 fixed_cr;
+};
+
+#define GC0308_COLOR_FX(reg_special_effect, reg_dbg_mode1, reg_block_en1, \
+ reg_aec_mode3, reg_eeintp_mode_2, reg_edge12_effect, \
+ reg_luma_contrast, reg_contrast_center, \
+ reg_fixed_cb, reg_fixed_cr) \
+ { \
+ .special_effect = reg_special_effect, \
+ .dbg_mode1 = reg_dbg_mode1, \
+ .block_en1 = reg_block_en1, \
+ .aec_mode3 = reg_aec_mode3, \
+ .eeintp_mode_2 = reg_eeintp_mode_2, \
+ .edge12_effect = reg_edge12_effect, \
+ .luma_contrast = reg_luma_contrast, \
+ .contrast_center = reg_contrast_center, \
+ .fixed_cb = reg_fixed_cb, \
+ .fixed_cr = reg_fixed_cr, \
+ }
+
+static const struct gc0308_colormode gc0308_colormodes[] = {
+ [V4L2_COLORFX_NONE] =
+ GC0308_COLOR_FX(0x00, 0x0a, 0xff, 0x90, 0x00,
+ 0x54, 0x3c, 0x80, 0x00, 0x00),
+ [V4L2_COLORFX_BW] =
+ GC0308_COLOR_FX(0x02, 0x0a, 0xff, 0x90, 0x00,
+ 0x54, 0x40, 0x80, 0x00, 0x00),
+ [V4L2_COLORFX_SEPIA] =
+ GC0308_COLOR_FX(0x02, 0x0a, 0xff, 0x90, 0x00,
+ 0x38, 0x40, 0x80, 0xd0, 0x28),
+ [V4L2_COLORFX_NEGATIVE] =
+ GC0308_COLOR_FX(0x01, 0x0a, 0xff, 0x90, 0x00,
+ 0x38, 0x40, 0x80, 0x00, 0x00),
+ [V4L2_COLORFX_EMBOSS] =
+ GC0308_COLOR_FX(0x02, 0x0a, 0xbf, 0x10, 0x01,
+ 0x38, 0x40, 0x80, 0x00, 0x00),
+ [V4L2_COLORFX_SKETCH] =
+ GC0308_COLOR_FX(0x02, 0x0a, 0xff, 0x10, 0x80,
+ 0x38, 0x80, 0x90, 0x00, 0x00),
+ [V4L2_COLORFX_SKY_BLUE] =
+ GC0308_COLOR_FX(0x02, 0x0a, 0xff, 0x90, 0x00,
+ 0x38, 0x40, 0x80, 0x50, 0xe0),
+ [V4L2_COLORFX_GRASS_GREEN] =
+ GC0308_COLOR_FX(0x02, 0x0a, 0xff, 0x90, 0x01,
+ 0x38, 0x40, 0x80, 0xc0, 0xc0),
+ [V4L2_COLORFX_SKIN_WHITEN] =
+ GC0308_COLOR_FX(0x02, 0x0a, 0xbf, 0x10, 0x01,
+ 0x38, 0x60, 0x40, 0x00, 0x00),
+};
+
+static int gc0308_power_on(struct device *dev)
+{
+ struct gc0308 *gc0308 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_enable(gc0308->vdd);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(gc0308->clk);
+ if (ret)
+ goto clk_fail;
+
+ gpiod_set_value_cansleep(gc0308->pwdn_gpio, 0);
+ usleep_range(10000, 20000);
+
+ gpiod_set_value_cansleep(gc0308->reset_gpio, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gc0308->reset_gpio, 0);
+ msleep(30);
+
+ return 0;
+
+clk_fail:
+ regulator_disable(gc0308->vdd);
+ return ret;
+}
+
+static int gc0308_power_off(struct device *dev)
+{
+ struct gc0308 *gc0308 = dev_get_drvdata(dev);
+
+ gpiod_set_value_cansleep(gc0308->pwdn_gpio, 1);
+ clk_disable_unprepare(gc0308->clk);
+ regulator_disable(gc0308->vdd);
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int gc0308_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct gc0308 *gc0308 = to_gc0308(sd);
+
+ return cci_read(gc0308->regmap, CCI_REG8(reg->reg), &reg->val, NULL);
+}
+
+static int gc0308_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ struct gc0308 *gc0308 = to_gc0308(sd);
+
+ return cci_write(gc0308->regmap, CCI_REG8(reg->reg), reg->val, NULL);
+}
+#endif
+
+static int gc0308_set_exposure(struct gc0308 *gc0308, enum gc0308_exp_val exp)
+{
+ const struct gc0308_exposure *regs = &gc0308_exposure_values[exp];
+ struct cci_reg_sequence exposure_reg_seq[] = {
+ {GC0308_LUMA_OFFSET, regs->luma_offset},
+ {GC0308_AEC_TARGET_Y, regs->aec_target_y},
+ };
+
+ return cci_multi_reg_write(gc0308->regmap, exposure_reg_seq,
+ ARRAY_SIZE(exposure_reg_seq), NULL);
+}
+
+static int gc0308_set_awb_mode(struct gc0308 *gc0308,
+ enum v4l2_auto_n_preset_white_balance val)
+{
+ const struct gc0308_awb_gains *regs = &gc0308_awb_gains[val];
+ struct cci_reg_sequence awb_reg_seq[] = {
+ {GC0308_AWB_R_GAIN, regs->r},
+ {GC0308_AWB_G_GAIN, regs->g},
+ {GC0308_AWB_B_GAIN, regs->b},
+ };
+ int ret;
+
+ ret = cci_update_bits(gc0308->regmap, GC0308_AAAA_EN,
+ BIT(1), val == V4L2_WHITE_BALANCE_AUTO, NULL);
+ ret = cci_multi_reg_write(gc0308->regmap, awb_reg_seq,
+ ARRAY_SIZE(awb_reg_seq), &ret);
+
+ return ret;
+}
+
+static int gc0308_set_colormode(struct gc0308 *gc0308, enum v4l2_colorfx mode)
+{
+ const struct gc0308_colormode *regs = &gc0308_colormodes[mode];
+ struct cci_reg_sequence colormode_reg_seq[] = {
+ {GC0308_SPECIAL_EFFECT, regs->special_effect},
+ {GC0308_DEBUG_MODE1, regs->dbg_mode1},
+ {GC0308_BLOCK_EN1, regs->block_en1},
+ {GC0308_AEC_MODE3, regs->aec_mode3},
+ {GC0308_EEINTP_MODE_2, regs->eeintp_mode_2},
+ {GC0308_EDGE12_EFFECT, regs->edge12_effect},
+ {GC0308_LUMA_CONTRAST, regs->luma_contrast},
+ {GC0308_CONTRAST_CENTER, regs->contrast_center},
+ {GC0308_FIXED_CB, regs->fixed_cb},
+ {GC0308_FIXED_CR, regs->fixed_cr},
+ };
+
+ return cci_multi_reg_write(gc0308->regmap, colormode_reg_seq,
+ ARRAY_SIZE(colormode_reg_seq), NULL);
+}
+
+static int gc0308_set_power_line_freq(struct gc0308 *gc0308, int frequency)
+{
+ static const struct cci_reg_sequence pwr_line_50hz[] = {
+ {GC0308_ANTI_FLICKER_STEP, 0x0078},
+ {GC0308_EXP_LVL_1, 0x0258},
+ {GC0308_EXP_LVL_2, 0x0348},
+ {GC0308_EXP_LVL_3, 0x04b0},
+ {GC0308_EXP_LVL_4, 0x05a0},
+ };
+ static const struct cci_reg_sequence pwr_line_60hz[] = {
+ {GC0308_ANTI_FLICKER_STEP, 0x0064},
+ {GC0308_EXP_LVL_1, 0x0258},
+ {GC0308_EXP_LVL_2, 0x0384},
+ {GC0308_EXP_LVL_3, 0x04b0},
+ {GC0308_EXP_LVL_4, 0x05dc},
+ };
+
+ switch (frequency) {
+ case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
+ return cci_multi_reg_write(gc0308->regmap, pwr_line_60hz,
+ ARRAY_SIZE(pwr_line_60hz), NULL);
+ case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
+ return cci_multi_reg_write(gc0308->regmap, pwr_line_50hz,
+ ARRAY_SIZE(pwr_line_50hz), NULL);
+ }
+
+ return -EINVAL;
+}
+
+static int gc0308_update_mirror(struct gc0308 *gc0308)
+{
+ u8 regval = 0x00;
+
+ if (gc0308->vflip->val)
+ regval |= BIT(1);
+
+ if (gc0308->hflip->val)
+ regval |= BIT(0);
+
+ return cci_update_bits(gc0308->regmap, GC0308_CISCTL_MODE1,
+ GENMASK(1, 0), regval, NULL);
+}
+
+static int gc0308_update_blanking(struct gc0308 *gc0308)
+{
+ u16 vblank = gc0308->vblank->val;
+ u16 hblank = gc0308->hblank->val;
+ u8 vbhb = ((vblank >> 4) & 0xf0) | ((hblank >> 8) & 0x0f);
+ int ret = 0;
+
+ cci_write(gc0308->regmap, GC0308_VB_HB, vbhb, &ret);
+ cci_write(gc0308->regmap, GC0308_HBLANK, hblank & 0xff, &ret);
+ cci_write(gc0308->regmap, GC0308_VBLANK, vblank & 0xff, &ret);
+
+ return ret;
+}
+
+static int _gc0308_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc0308 *gc0308 = container_of(ctrl->handler, struct gc0308, hdl);
+ u8 flipval = ctrl->val ? 0xff : 0x00;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HBLANK:
+ case V4L2_CID_VBLANK:
+ return gc0308_update_blanking(gc0308);
+ case V4L2_CID_VFLIP:
+ case V4L2_CID_HFLIP:
+ return gc0308_update_mirror(gc0308);
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ return cci_update_bits(gc0308->regmap, GC0308_AAAA_EN,
+ BIT(1), flipval, NULL);
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ return gc0308_set_awb_mode(gc0308, ctrl->val);
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ return gc0308_set_power_line_freq(gc0308, ctrl->val);
+ case V4L2_CID_COLORFX:
+ return gc0308_set_colormode(gc0308, ctrl->val);
+ case V4L2_CID_TEST_PATTERN:
+ return cci_update_bits(gc0308->regmap, GC0308_DEBUG_MODE2,
+ GENMASK(1, 0), ctrl->val, NULL);
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ return gc0308_set_exposure(gc0308, ctrl->val);
+ }
+
+ return -EINVAL;
+}
+
+static int gc0308_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc0308 *gc0308 = container_of(ctrl->handler, struct gc0308, hdl);
+ int ret;
+
+ if (!pm_runtime_get_if_in_use(gc0308->dev))
+ return 0;
+
+ ret = _gc0308_s_ctrl(ctrl);
+ if (ret)
+ dev_err(gc0308->dev, "failed to set control: %d\n", ret);
+
+ pm_runtime_mark_last_busy(gc0308->dev);
+ pm_runtime_put_autosuspend(gc0308->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops gc0308_ctrl_ops = {
+ .s_ctrl = gc0308_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops gc0308_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = gc0308_g_register,
+ .s_register = gc0308_s_register,
+#endif
+};
+
+static int gc0308_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(gc0308_formats))
+ return -EINVAL;
+
+ code->code = gc0308_formats[code->index].code;
+
+ return 0;
+}
+
+static int gc0308_get_format_idx(u32 code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gc0308_formats); i++) {
+ if (gc0308_formats[i].code == code)
+ return i;
+ }
+
+ return -1;
+}
+
+static int gc0308_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(gc0308_frame_sizes))
+ return -EINVAL;
+
+ if (gc0308_get_format_idx(fse->code) < 0)
+ return -EINVAL;
+
+ fse->min_width = gc0308_frame_sizes[fse->index].width;
+ fse->max_width = gc0308_frame_sizes[fse->index].width;
+ fse->min_height = gc0308_frame_sizes[fse->index].height;
+ fse->max_height = gc0308_frame_sizes[fse->index].height;
+
+ return 0;
+}
+
+static void gc0308_update_pad_format(const struct gc0308_frame_size *mode,
+ struct v4l2_mbus_framefmt *fmt, u32 code)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = code;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int gc0308_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct gc0308 *gc0308 = to_gc0308(sd);
+ const struct gc0308_frame_size *mode;
+ int i = gc0308_get_format_idx(fmt->format.code);
+
+ if (i < 0)
+ i = 0;
+
+ mode = v4l2_find_nearest_size(gc0308_frame_sizes,
+ ARRAY_SIZE(gc0308_frame_sizes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ gc0308_update_pad_format(mode, &fmt->format, gc0308_formats[i].code);
+ *v4l2_subdev_state_get_format(sd_state, 0) = fmt->format;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ gc0308->mode.out_format = gc0308_formats[i].regval;
+ gc0308->mode.subsample = mode->subsample;
+ gc0308->mode.width = mode->width;
+ gc0308->mode.height = mode->height;
+
+ return 0;
+}
+
+static int gc0308_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(sd_state, 0);
+
+ format->width = 640;
+ format->height = 480;
+ format->code = gc0308_formats[0].code;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+ format->field = V4L2_FIELD_NONE;
+ format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ format->quantization = V4L2_QUANTIZATION_DEFAULT;
+ format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops gc0308_pad_ops = {
+ .enum_mbus_code = gc0308_enum_mbus_code,
+ .enum_frame_size = gc0308_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = gc0308_set_format,
+};
+
+static int gc0308_set_resolution(struct gc0308 *gc0308, int *ret)
+{
+ struct cci_reg_sequence resolution_regs[] = {
+ {GC0308_SUBSAMPLE, gc0308->mode.subsample},
+ {GC0308_SUBMODE, 0x03},
+ {GC0308_SUB_ROW_N1, 0x00},
+ {GC0308_SUB_ROW_N2, 0x00},
+ {GC0308_SUB_COL_N1, 0x00},
+ {GC0308_SUB_COL_N2, 0x00},
+ {GC0308_CROP_WIN_MODE, 0x80},
+ {GC0308_CROP_WIN_Y1, 0x00},
+ {GC0308_CROP_WIN_X1, 0x00},
+ {GC0308_CROP_WIN_HEIGHT, gc0308->mode.height},
+ {GC0308_CROP_WIN_WIDTH, gc0308->mode.width},
+ };
+
+ return cci_multi_reg_write(gc0308->regmap, resolution_regs,
+ ARRAY_SIZE(resolution_regs), ret);
+}
+
+static int gc0308_start_stream(struct gc0308 *gc0308)
+{
+ int ret, sync_mode;
+
+ ret = pm_runtime_resume_and_get(gc0308->dev);
+ if (ret < 0)
+ return ret;
+
+ cci_multi_reg_write(gc0308->regmap, sensor_default_regs,
+ ARRAY_SIZE(sensor_default_regs), &ret);
+ cci_update_bits(gc0308->regmap, GC0308_OUT_FORMAT,
+ GENMASK(4, 0), gc0308->mode.out_format, &ret);
+ gc0308_set_resolution(gc0308, &ret);
+
+ if (ret) {
+ dev_err(gc0308->dev, "failed to update registers: %d\n", ret);
+ goto disable_pm;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(&gc0308->hdl);
+ if (ret) {
+ dev_err(gc0308->dev, "failed to setup controls\n");
+ goto disable_pm;
+ }
+
+ /* HSYNC/VSYNC polarity */
+ sync_mode = 0x3;
+ if (gc0308->mbus_config & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ sync_mode &= ~BIT(0);
+ if (gc0308->mbus_config & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ sync_mode &= ~BIT(1);
+ ret = cci_write(gc0308->regmap, GC0308_SYNC_MODE, sync_mode, NULL);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_mark_last_busy(gc0308->dev);
+ pm_runtime_put_autosuspend(gc0308->dev);
+ return ret;
+}
+
+static int gc0308_stop_stream(struct gc0308 *gc0308)
+{
+ pm_runtime_mark_last_busy(gc0308->dev);
+ pm_runtime_put_autosuspend(gc0308->dev);
+ return 0;
+}
+
+static int gc0308_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct gc0308 *gc0308 = to_gc0308(sd);
+ struct v4l2_subdev_state *sd_state;
+ int ret;
+
+ sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ if (enable)
+ ret = gc0308_start_stream(gc0308);
+ else
+ ret = gc0308_stop_stream(gc0308);
+
+ v4l2_subdev_unlock_state(sd_state);
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops gc0308_video_ops = {
+ .s_stream = gc0308_s_stream,
+};
+
+static const struct v4l2_subdev_ops gc0308_subdev_ops = {
+ .core = &gc0308_core_ops,
+ .pad = &gc0308_pad_ops,
+ .video = &gc0308_video_ops,
+};
+
+static const struct v4l2_subdev_internal_ops gc0308_internal_ops = {
+ .init_state = gc0308_init_state,
+};
+
+static int gc0308_bus_config(struct gc0308 *gc0308)
+{
+ struct device *dev = gc0308->dev;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_PARALLEL
+ };
+ struct fwnode_handle *ep;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, 0);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ gc0308->mbus_config = bus_cfg.bus.parallel.flags;
+
+ return 0;
+}
+
+static const char * const gc0308_test_pattern_menu[] = {
+ "Disabled",
+ "Test Image 1",
+ "Test Image 2",
+};
+
+static int gc0308_init_controls(struct gc0308 *gc0308)
+{
+ int ret;
+
+ v4l2_ctrl_handler_init(&gc0308->hdl, 11);
+ gc0308->hblank = v4l2_ctrl_new_std(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_HBLANK, GC0308_HBLANK_MIN,
+ GC0308_HBLANK_MAX, 1,
+ GC0308_HBLANK_DEF);
+ gc0308->vblank = v4l2_ctrl_new_std(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_VBLANK, GC0308_VBLANK_MIN,
+ GC0308_VBLANK_MAX, 1,
+ GC0308_VBLANK_DEF);
+ gc0308->hflip = v4l2_ctrl_new_std(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ gc0308->vflip = v4l2_ctrl_new_std(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&gc0308->hdl, &gc0308_ctrl_ops, V4L2_CID_PIXEL_RATE,
+ GC0308_PIXEL_RATE, GC0308_PIXEL_RATE, 1,
+ GC0308_PIXEL_RATE);
+ v4l2_ctrl_new_std(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std_menu_items(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(gc0308_test_pattern_menu) - 1,
+ 0, 0, gc0308_test_pattern_menu);
+ v4l2_ctrl_new_std_menu(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ 8, ~0x14e, V4L2_WHITE_BALANCE_AUTO);
+ v4l2_ctrl_new_std_menu(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_COLORFX, 8, 0, V4L2_COLORFX_NONE);
+ v4l2_ctrl_new_std_menu(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
+ ~0x6, V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
+ v4l2_ctrl_new_int_menu(&gc0308->hdl, &gc0308_ctrl_ops,
+ V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(gc0308_exposure_menu) - 1,
+ ARRAY_SIZE(gc0308_exposure_menu) / 2,
+ gc0308_exposure_menu);
+
+ gc0308->sd.ctrl_handler = &gc0308->hdl;
+ if (gc0308->hdl.error) {
+ ret = gc0308->hdl.error;
+ v4l2_ctrl_handler_free(&gc0308->hdl);
+ return ret;
+ }
+
+ v4l2_ctrl_cluster(2, &gc0308->hflip);
+ v4l2_ctrl_cluster(2, &gc0308->hblank);
+
+ return 0;
+}
+
+static int gc0308_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct gc0308 *gc0308;
+ unsigned long clkrate;
+ u64 regval;
+ int ret;
+
+ gc0308 = devm_kzalloc(dev, sizeof(*gc0308), GFP_KERNEL);
+ if (!gc0308)
+ return -ENOMEM;
+
+ gc0308->dev = dev;
+ dev_set_drvdata(dev, gc0308);
+
+ ret = gc0308_bus_config(gc0308);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get bus config\n");
+
+ gc0308->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(gc0308->clk))
+ return dev_err_probe(dev, PTR_ERR(gc0308->clk),
+ "could not get clk\n");
+
+ gc0308->vdd = devm_regulator_get(dev, "vdd28");
+ if (IS_ERR(gc0308->vdd))
+ return dev_err_probe(dev, PTR_ERR(gc0308->vdd),
+ "failed to get vdd28 regulator\n");
+
+ gc0308->pwdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
+ if (IS_ERR(gc0308->pwdn_gpio))
+ return dev_err_probe(dev, PTR_ERR(gc0308->pwdn_gpio),
+ "failed to get powerdown gpio\n");
+
+ gc0308->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gc0308->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(gc0308->reset_gpio),
+ "failed to get reset gpio\n");
+
+ /*
+ * This is not using devm_cci_regmap_init_i2c(), because the driver
+ * makes use of regmap's pagination feature. The chosen settings are
+ * compatible with the CCI helpers.
+ */
+ gc0308->regmap = devm_regmap_init_i2c(client, &gc0308_regmap_config);
+ if (IS_ERR(gc0308->regmap))
+ return dev_err_probe(dev, PTR_ERR(gc0308->regmap),
+ "failed to init regmap\n");
+
+ v4l2_i2c_subdev_init(&gc0308->sd, client, &gc0308_subdev_ops);
+ gc0308->sd.internal_ops = &gc0308_internal_ops;
+ gc0308->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ gc0308->sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ ret = gc0308_init_controls(gc0308);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init controls\n");
+
+ gc0308->sd.state_lock = gc0308->hdl.lock;
+ gc0308->pad.flags = MEDIA_PAD_FL_SOURCE;
+ gc0308->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&gc0308->sd.entity, 1, &gc0308->pad);
+ if (ret < 0)
+ goto fail_ctrl_hdl_cleanup;
+
+ ret = v4l2_subdev_init_finalize(&gc0308->sd);
+ if (ret)
+ goto fail_media_entity_cleanup;
+
+ ret = gc0308_power_on(dev);
+ if (ret)
+ goto fail_subdev_cleanup;
+
+ if (gc0308->clk) {
+ clkrate = clk_get_rate(gc0308->clk);
+ if (clkrate != 24000000)
+ dev_warn(dev, "unexpected clock rate: %lu\n", clkrate);
+ }
+
+ ret = cci_read(gc0308->regmap, GC0308_CHIP_ID, &regval, NULL);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to read chip ID\n");
+ goto fail_power_off;
+ }
+
+ if (regval != 0x9b) {
+ ret = -EINVAL;
+ dev_err_probe(dev, ret, "invalid chip ID (%02llx)\n", regval);
+ goto fail_power_off;
+ }
+
+ /*
+ * Enable runtime PM with autosuspend. As the device has been powered
+ * manually, mark it as active, and increase the usage count without
+ * resuming the device.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = v4l2_async_register_subdev(&gc0308->sd);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register v4l subdev\n");
+ goto fail_rpm;
+ }
+
+ return 0;
+
+fail_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+fail_power_off:
+ gc0308_power_off(dev);
+fail_subdev_cleanup:
+ v4l2_subdev_cleanup(&gc0308->sd);
+fail_media_entity_cleanup:
+ media_entity_cleanup(&gc0308->sd.entity);
+fail_ctrl_hdl_cleanup:
+ v4l2_ctrl_handler_free(&gc0308->hdl);
+ return ret;
+}
+
+static void gc0308_remove(struct i2c_client *client)
+{
+ struct gc0308 *gc0308 = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+
+ v4l2_async_unregister_subdev(&gc0308->sd);
+ v4l2_ctrl_handler_free(&gc0308->hdl);
+ media_entity_cleanup(&gc0308->sd.entity);
+
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ gc0308_power_off(dev);
+ pm_runtime_set_suspended(dev);
+}
+
+static const struct dev_pm_ops gc0308_pm_ops = {
+ SET_RUNTIME_PM_OPS(gc0308_power_off, gc0308_power_on, NULL)
+};
+
+static const struct of_device_id gc0308_of_match[] = {
+ { .compatible = "galaxycore,gc0308" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, gc0308_of_match);
+
+static struct i2c_driver gc0308_i2c_driver = {
+ .driver = {
+ .name = "gc0308",
+ .pm = &gc0308_pm_ops,
+ .of_match_table = gc0308_of_match,
+ },
+ .probe = gc0308_probe,
+ .remove = gc0308_remove,
+};
+module_i2c_driver(gc0308_i2c_driver);
+
+MODULE_DESCRIPTION("GalaxyCore GC0308 Camera Driver");
+MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/gc2145.c b/drivers/media/i2c/gc2145.c
new file mode 100644
index 000000000..bef7b0e05
--- /dev/null
+++ b/drivers/media/i2c/gc2145.c
@@ -0,0 +1,1450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A V4L2 driver for Galaxycore GC2145 camera.
+ * Copyright (C) 2023, STMicroelectronics SA
+ *
+ * Inspired by the imx219.c driver
+ *
+ * Datasheet v1.0 available at http://files.pine64.org/doc/datasheet/PinebookPro/GC2145%20CSP%20DataSheet%20release%20V1.0_20131201.pdf
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+
+/* Chip ID */
+#define GC2145_CHIP_ID 0x2145
+
+/* Page 0 */
+#define GC2145_REG_EXPOSURE CCI_REG16(0x03)
+#define GC2145_REG_HBLANK CCI_REG16(0x05)
+#define GC2145_REG_VBLANK CCI_REG16(0x07)
+#define GC2145_REG_ROW_START CCI_REG16(0x09)
+#define GC2145_REG_COL_START CCI_REG16(0x0b)
+#define GC2145_REG_WIN_HEIGHT CCI_REG16(0x0d)
+#define GC2145_REG_WIN_WIDTH CCI_REG16(0x0f)
+#define GC2145_REG_ANALOG_MODE1 CCI_REG8(0x17)
+#define GC2145_REG_OUTPUT_FMT CCI_REG8(0x84)
+#define GC2145_REG_SYNC_MODE CCI_REG8(0x86)
+#define GC2145_SYNC_MODE_COL_SWITCH BIT(4)
+#define GC2145_SYNC_MODE_ROW_SWITCH BIT(5)
+#define GC2145_REG_BYPASS_MODE CCI_REG8(0x89)
+#define GC2145_BYPASS_MODE_SWITCH BIT(5)
+#define GC2145_REG_DEBUG_MODE2 CCI_REG8(0x8c)
+#define GC2145_REG_DEBUG_MODE3 CCI_REG8(0x8d)
+#define GC2145_REG_CROP_ENABLE CCI_REG8(0x90)
+#define GC2145_REG_CROP_Y CCI_REG16(0x91)
+#define GC2145_REG_CROP_X CCI_REG16(0x93)
+#define GC2145_REG_CROP_HEIGHT CCI_REG16(0x95)
+#define GC2145_REG_CROP_WIDTH CCI_REG16(0x97)
+#define GC2145_REG_GLOBAL_GAIN CCI_REG8(0xb0)
+#define GC2145_REG_CHIP_ID CCI_REG16(0xf0)
+#define GC2145_REG_PAD_IO CCI_REG8(0xf2)
+#define GC2145_REG_PAGE_SELECT CCI_REG8(0xfe)
+/* Page 3 */
+#define GC2145_REG_DPHY_ANALOG_MODE1 CCI_REG8(0x01)
+#define GC2145_DPHY_MODE_PHY_CLK_EN BIT(0)
+#define GC2145_DPHY_MODE_PHY_LANE0_EN BIT(1)
+#define GC2145_DPHY_MODE_PHY_LANE1_EN BIT(2)
+#define GC2145_DPHY_MODE_PHY_CLK_LANE_P2S_SEL BIT(7)
+#define GC2145_REG_DPHY_ANALOG_MODE2 CCI_REG8(0x02)
+#define GC2145_DPHY_CLK_DIFF(a) ((a) & 0x07)
+#define GC2145_DPHY_LANE0_DIFF(a) (((a) & 0x07) << 4)
+#define GC2145_REG_DPHY_ANALOG_MODE3 CCI_REG8(0x03)
+#define GC2145_DPHY_LANE1_DIFF(a) ((a) & 0x07)
+#define GC2145_DPHY_CLK_DELAY BIT(4)
+#define GC2145_DPHY_LANE0_DELAY BIT(5)
+#define GC2145_DPHY_LANE1_DELAY BIT(6)
+#define GC2145_REG_FIFO_FULL_LVL_LOW CCI_REG8(0x04)
+#define GC2145_REG_FIFO_FULL_LVL_HIGH CCI_REG8(0x05)
+#define GC2145_REG_FIFO_MODE CCI_REG8(0x06)
+#define GC2145_FIFO_MODE_READ_GATE BIT(3)
+#define GC2145_FIFO_MODE_MIPI_CLK_MODULE BIT(7)
+#define GC2145_REG_BUF_CSI2_MODE CCI_REG8(0x10)
+#define GC2145_CSI2_MODE_DOUBLE BIT(0)
+#define GC2145_CSI2_MODE_RAW8 BIT(2)
+#define GC2145_CSI2_MODE_MIPI_EN BIT(4)
+#define GC2145_CSI2_MODE_EN BIT(7)
+#define GC2145_REG_MIPI_DT CCI_REG8(0x11)
+#define GC2145_REG_LWC_LOW CCI_REG8(0x12)
+#define GC2145_REG_LWC_HIGH CCI_REG8(0x13)
+#define GC2145_REG_DPHY_MODE CCI_REG8(0x15)
+#define GC2145_DPHY_MODE_TRIGGER_PROG BIT(4)
+#define GC2145_REG_FIFO_GATE_MODE CCI_REG8(0x17)
+#define GC2145_REG_T_LPX CCI_REG8(0x21)
+#define GC2145_REG_T_CLK_HS_PREPARE CCI_REG8(0x22)
+#define GC2145_REG_T_CLK_ZERO CCI_REG8(0x23)
+#define GC2145_REG_T_CLK_PRE CCI_REG8(0x24)
+#define GC2145_REG_T_CLK_POST CCI_REG8(0x25)
+#define GC2145_REG_T_CLK_TRAIL CCI_REG8(0x26)
+#define GC2145_REG_T_HS_EXIT CCI_REG8(0x27)
+#define GC2145_REG_T_WAKEUP CCI_REG8(0x28)
+#define GC2145_REG_T_HS_PREPARE CCI_REG8(0x29)
+#define GC2145_REG_T_HS_ZERO CCI_REG8(0x2a)
+#define GC2145_REG_T_HS_TRAIL CCI_REG8(0x2b)
+
+/* External clock frequency is 24.0MHz */
+#define GC2145_XCLK_FREQ (24 * HZ_PER_MHZ)
+
+#define GC2145_NATIVE_WIDTH 1616U
+#define GC2145_NATIVE_HEIGHT 1232U
+
+/**
+ * struct gc2145_mode - GC2145 mode description
+ * @width: frame width (in pixels)
+ * @height: frame height (in pixels)
+ * @reg_seq: registers config sequence to enter into the mode
+ * @reg_seq_size: size of the sequence
+ * @pixel_rate: pixel rate associated with the mode
+ * @crop: window area captured
+ * @hblank: default horizontal blanking
+ * @vblank: default vertical blanking
+ * @link_freq_index: index within the link frequency menu
+ */
+struct gc2145_mode {
+ unsigned int width;
+ unsigned int height;
+ const struct cci_reg_sequence *reg_seq;
+ size_t reg_seq_size;
+ unsigned long pixel_rate;
+ struct v4l2_rect crop;
+ unsigned int hblank;
+ unsigned int vblank;
+ unsigned int link_freq_index;
+};
+
+#define GC2145_DEFAULT_EXPOSURE 0x04e2
+#define GC2145_DEFAULT_GLOBAL_GAIN 0x55
+static const struct cci_reg_sequence gc2145_common_regs[] = {
+ {GC2145_REG_PAGE_SELECT, 0x00},
+ /* SH Delay */
+ {CCI_REG8(0x12), 0x2e},
+ /* Flip */
+ {GC2145_REG_ANALOG_MODE1, 0x14},
+ /* Analog Conf */
+ {CCI_REG8(0x18), 0x22}, {CCI_REG8(0x19), 0x0e}, {CCI_REG8(0x1a), 0x01},
+ {CCI_REG8(0x1b), 0x4b}, {CCI_REG8(0x1c), 0x07}, {CCI_REG8(0x1d), 0x10},
+ {CCI_REG8(0x1e), 0x88}, {CCI_REG8(0x1f), 0x78}, {CCI_REG8(0x20), 0x03},
+ {CCI_REG8(0x21), 0x40}, {CCI_REG8(0x22), 0xa0}, {CCI_REG8(0x24), 0x16},
+ {CCI_REG8(0x25), 0x01}, {CCI_REG8(0x26), 0x10}, {CCI_REG8(0x2d), 0x60},
+ {CCI_REG8(0x30), 0x01}, {CCI_REG8(0x31), 0x90}, {CCI_REG8(0x33), 0x06},
+ {CCI_REG8(0x34), 0x01},
+ /* ISP related */
+ {CCI_REG8(0x80), 0x7f}, {CCI_REG8(0x81), 0x26}, {CCI_REG8(0x82), 0xfa},
+ {CCI_REG8(0x83), 0x00}, {CCI_REG8(0x84), 0x02}, {CCI_REG8(0x86), 0x02},
+ {CCI_REG8(0x88), 0x03},
+ {GC2145_REG_BYPASS_MODE, 0x03},
+ {CCI_REG8(0x85), 0x08}, {CCI_REG8(0x8a), 0x00}, {CCI_REG8(0x8b), 0x00},
+ {GC2145_REG_GLOBAL_GAIN, GC2145_DEFAULT_GLOBAL_GAIN},
+ {CCI_REG8(0xc3), 0x00}, {CCI_REG8(0xc4), 0x80}, {CCI_REG8(0xc5), 0x90},
+ {CCI_REG8(0xc6), 0x3b}, {CCI_REG8(0xc7), 0x46},
+ /* BLK */
+ {GC2145_REG_PAGE_SELECT, 0x00},
+ {CCI_REG8(0x40), 0x42}, {CCI_REG8(0x41), 0x00}, {CCI_REG8(0x43), 0x5b},
+ {CCI_REG8(0x5e), 0x00}, {CCI_REG8(0x5f), 0x00}, {CCI_REG8(0x60), 0x00},
+ {CCI_REG8(0x61), 0x00}, {CCI_REG8(0x62), 0x00}, {CCI_REG8(0x63), 0x00},
+ {CCI_REG8(0x64), 0x00}, {CCI_REG8(0x65), 0x00}, {CCI_REG8(0x66), 0x20},
+ {CCI_REG8(0x67), 0x20}, {CCI_REG8(0x68), 0x20}, {CCI_REG8(0x69), 0x20},
+ {CCI_REG8(0x76), 0x00}, {CCI_REG8(0x6a), 0x08}, {CCI_REG8(0x6b), 0x08},
+ {CCI_REG8(0x6c), 0x08}, {CCI_REG8(0x6d), 0x08}, {CCI_REG8(0x6e), 0x08},
+ {CCI_REG8(0x6f), 0x08}, {CCI_REG8(0x70), 0x08}, {CCI_REG8(0x71), 0x08},
+ {CCI_REG8(0x76), 0x00}, {CCI_REG8(0x72), 0xf0}, {CCI_REG8(0x7e), 0x3c},
+ {CCI_REG8(0x7f), 0x00},
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0x48), 0x15}, {CCI_REG8(0x49), 0x00}, {CCI_REG8(0x4b), 0x0b},
+ /* AEC */
+ {GC2145_REG_PAGE_SELECT, 0x00},
+ {GC2145_REG_EXPOSURE, GC2145_DEFAULT_EXPOSURE},
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ {CCI_REG8(0x01), 0x04}, {CCI_REG8(0x02), 0xc0}, {CCI_REG8(0x03), 0x04},
+ {CCI_REG8(0x04), 0x90}, {CCI_REG8(0x05), 0x30}, {CCI_REG8(0x06), 0x90},
+ {CCI_REG8(0x07), 0x30}, {CCI_REG8(0x08), 0x80}, {CCI_REG8(0x09), 0x00},
+ {CCI_REG8(0x0a), 0x82}, {CCI_REG8(0x0b), 0x11}, {CCI_REG8(0x0c), 0x10},
+ {CCI_REG8(0x11), 0x10}, {CCI_REG8(0x13), 0x7b}, {CCI_REG8(0x17), 0x00},
+ {CCI_REG8(0x1c), 0x11}, {CCI_REG8(0x1e), 0x61}, {CCI_REG8(0x1f), 0x35},
+ {CCI_REG8(0x20), 0x40}, {CCI_REG8(0x22), 0x40}, {CCI_REG8(0x23), 0x20},
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0x0f), 0x04},
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ {CCI_REG8(0x12), 0x35}, {CCI_REG8(0x15), 0xb0}, {CCI_REG8(0x10), 0x31},
+ {CCI_REG8(0x3e), 0x28}, {CCI_REG8(0x3f), 0xb0}, {CCI_REG8(0x40), 0x90},
+ {CCI_REG8(0x41), 0x0f},
+ /* INTPEE */
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0x90), 0x6c}, {CCI_REG8(0x91), 0x03}, {CCI_REG8(0x92), 0xcb},
+ {CCI_REG8(0x94), 0x33}, {CCI_REG8(0x95), 0x84}, {CCI_REG8(0x97), 0x65},
+ {CCI_REG8(0xa2), 0x11},
+ /* DNDD */
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0x80), 0xc1}, {CCI_REG8(0x81), 0x08}, {CCI_REG8(0x82), 0x05},
+ {CCI_REG8(0x83), 0x08}, {CCI_REG8(0x84), 0x0a}, {CCI_REG8(0x86), 0xf0},
+ {CCI_REG8(0x87), 0x50}, {CCI_REG8(0x88), 0x15}, {CCI_REG8(0x89), 0xb0},
+ {CCI_REG8(0x8a), 0x30}, {CCI_REG8(0x8b), 0x10},
+ /* ASDE */
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ {CCI_REG8(0x21), 0x04},
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0xa3), 0x50}, {CCI_REG8(0xa4), 0x20}, {CCI_REG8(0xa5), 0x40},
+ {CCI_REG8(0xa6), 0x80}, {CCI_REG8(0xab), 0x40}, {CCI_REG8(0xae), 0x0c},
+ {CCI_REG8(0xb3), 0x46}, {CCI_REG8(0xb4), 0x64}, {CCI_REG8(0xb6), 0x38},
+ {CCI_REG8(0xb7), 0x01}, {CCI_REG8(0xb9), 0x2b}, {CCI_REG8(0x3c), 0x04},
+ {CCI_REG8(0x3d), 0x15}, {CCI_REG8(0x4b), 0x06}, {CCI_REG8(0x4c), 0x20},
+ /* Gamma */
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0x10), 0x09}, {CCI_REG8(0x11), 0x0d}, {CCI_REG8(0x12), 0x13},
+ {CCI_REG8(0x13), 0x19}, {CCI_REG8(0x14), 0x27}, {CCI_REG8(0x15), 0x37},
+ {CCI_REG8(0x16), 0x45}, {CCI_REG8(0x17), 0x53}, {CCI_REG8(0x18), 0x69},
+ {CCI_REG8(0x19), 0x7d}, {CCI_REG8(0x1a), 0x8f}, {CCI_REG8(0x1b), 0x9d},
+ {CCI_REG8(0x1c), 0xa9}, {CCI_REG8(0x1d), 0xbd}, {CCI_REG8(0x1e), 0xcd},
+ {CCI_REG8(0x1f), 0xd9}, {CCI_REG8(0x20), 0xe3}, {CCI_REG8(0x21), 0xea},
+ {CCI_REG8(0x22), 0xef}, {CCI_REG8(0x23), 0xf5}, {CCI_REG8(0x24), 0xf9},
+ {CCI_REG8(0x25), 0xff},
+ {GC2145_REG_PAGE_SELECT, 0x00},
+ {CCI_REG8(0xc6), 0x20}, {CCI_REG8(0xc7), 0x2b},
+ /* Gamma 2 */
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0x26), 0x0f}, {CCI_REG8(0x27), 0x14}, {CCI_REG8(0x28), 0x19},
+ {CCI_REG8(0x29), 0x1e}, {CCI_REG8(0x2a), 0x27}, {CCI_REG8(0x2b), 0x33},
+ {CCI_REG8(0x2c), 0x3b}, {CCI_REG8(0x2d), 0x45}, {CCI_REG8(0x2e), 0x59},
+ {CCI_REG8(0x2f), 0x69}, {CCI_REG8(0x30), 0x7c}, {CCI_REG8(0x31), 0x89},
+ {CCI_REG8(0x32), 0x98}, {CCI_REG8(0x33), 0xae}, {CCI_REG8(0x34), 0xc0},
+ {CCI_REG8(0x35), 0xcf}, {CCI_REG8(0x36), 0xda}, {CCI_REG8(0x37), 0xe2},
+ {CCI_REG8(0x38), 0xe9}, {CCI_REG8(0x39), 0xf3}, {CCI_REG8(0x3a), 0xf9},
+ {CCI_REG8(0x3b), 0xff},
+ /* YCP */
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0xd1), 0x32}, {CCI_REG8(0xd2), 0x32}, {CCI_REG8(0xd3), 0x40},
+ {CCI_REG8(0xd6), 0xf0}, {CCI_REG8(0xd7), 0x10}, {CCI_REG8(0xd8), 0xda},
+ {CCI_REG8(0xdd), 0x14}, {CCI_REG8(0xde), 0x86}, {CCI_REG8(0xed), 0x80},
+ {CCI_REG8(0xee), 0x00}, {CCI_REG8(0xef), 0x3f}, {CCI_REG8(0xd8), 0xd8},
+ /* ABS */
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ {CCI_REG8(0x9f), 0x40},
+ /* LSC */
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ {CCI_REG8(0xc2), 0x14}, {CCI_REG8(0xc3), 0x0d}, {CCI_REG8(0xc4), 0x0c},
+ {CCI_REG8(0xc8), 0x15}, {CCI_REG8(0xc9), 0x0d}, {CCI_REG8(0xca), 0x0a},
+ {CCI_REG8(0xbc), 0x24}, {CCI_REG8(0xbd), 0x10}, {CCI_REG8(0xbe), 0x0b},
+ {CCI_REG8(0xb6), 0x25}, {CCI_REG8(0xb7), 0x16}, {CCI_REG8(0xb8), 0x15},
+ {CCI_REG8(0xc5), 0x00}, {CCI_REG8(0xc6), 0x00}, {CCI_REG8(0xc7), 0x00},
+ {CCI_REG8(0xcb), 0x00}, {CCI_REG8(0xcc), 0x00}, {CCI_REG8(0xcd), 0x00},
+ {CCI_REG8(0xbf), 0x07}, {CCI_REG8(0xc0), 0x00}, {CCI_REG8(0xc1), 0x00},
+ {CCI_REG8(0xb9), 0x00}, {CCI_REG8(0xba), 0x00}, {CCI_REG8(0xbb), 0x00},
+ {CCI_REG8(0xaa), 0x01}, {CCI_REG8(0xab), 0x01}, {CCI_REG8(0xac), 0x00},
+ {CCI_REG8(0xad), 0x05}, {CCI_REG8(0xae), 0x06}, {CCI_REG8(0xaf), 0x0e},
+ {CCI_REG8(0xb0), 0x0b}, {CCI_REG8(0xb1), 0x07}, {CCI_REG8(0xb2), 0x06},
+ {CCI_REG8(0xb3), 0x17}, {CCI_REG8(0xb4), 0x0e}, {CCI_REG8(0xb5), 0x0e},
+ {CCI_REG8(0xd0), 0x09}, {CCI_REG8(0xd1), 0x00}, {CCI_REG8(0xd2), 0x00},
+ {CCI_REG8(0xd6), 0x08}, {CCI_REG8(0xd7), 0x00}, {CCI_REG8(0xd8), 0x00},
+ {CCI_REG8(0xd9), 0x00}, {CCI_REG8(0xda), 0x00}, {CCI_REG8(0xdb), 0x00},
+ {CCI_REG8(0xd3), 0x0a}, {CCI_REG8(0xd4), 0x00}, {CCI_REG8(0xd5), 0x00},
+ {CCI_REG8(0xa4), 0x00}, {CCI_REG8(0xa5), 0x00}, {CCI_REG8(0xa6), 0x77},
+ {CCI_REG8(0xa7), 0x77}, {CCI_REG8(0xa8), 0x77}, {CCI_REG8(0xa9), 0x77},
+ {CCI_REG8(0xa1), 0x80}, {CCI_REG8(0xa2), 0x80},
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ {CCI_REG8(0xdf), 0x0d}, {CCI_REG8(0xdc), 0x25}, {CCI_REG8(0xdd), 0x30},
+ {CCI_REG8(0xe0), 0x77}, {CCI_REG8(0xe1), 0x80}, {CCI_REG8(0xe2), 0x77},
+ {CCI_REG8(0xe3), 0x90}, {CCI_REG8(0xe6), 0x90}, {CCI_REG8(0xe7), 0xa0},
+ {CCI_REG8(0xe8), 0x90}, {CCI_REG8(0xe9), 0xa0},
+ /* AWB */
+ /* measure window */
+ {GC2145_REG_PAGE_SELECT, 0x00},
+ {CCI_REG8(0xec), 0x06}, {CCI_REG8(0xed), 0x04}, {CCI_REG8(0xee), 0x60},
+ {CCI_REG8(0xef), 0x90}, {CCI_REG8(0xb6), 0x01},
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ {CCI_REG8(0x4f), 0x00}, {CCI_REG8(0x4f), 0x00}, {CCI_REG8(0x4b), 0x01},
+ {CCI_REG8(0x4f), 0x00},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x71}, {CCI_REG8(0x4e), 0x01},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x91}, {CCI_REG8(0x4e), 0x01},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x70}, {CCI_REG8(0x4e), 0x01},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x90}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xb0}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x8f}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x6f}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xaf}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xd0}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xf0}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xcf}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xef}, {CCI_REG8(0x4e), 0x02},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x6e}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x8e}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xae}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xce}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x4d}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x6d}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x8d}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xad}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xcd}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x4c}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x6c}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x8c}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xac}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xcc}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xcb}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x4b}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x6b}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x8b}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xab}, {CCI_REG8(0x4e), 0x03},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x8a}, {CCI_REG8(0x4e), 0x04},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xaa}, {CCI_REG8(0x4e), 0x04},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xca}, {CCI_REG8(0x4e), 0x04},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xca}, {CCI_REG8(0x4e), 0x04},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xc9}, {CCI_REG8(0x4e), 0x04},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x8a}, {CCI_REG8(0x4e), 0x04},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0x89}, {CCI_REG8(0x4e), 0x04},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xa9}, {CCI_REG8(0x4e), 0x04},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x0b}, {CCI_REG8(0x4e), 0x05},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x0a}, {CCI_REG8(0x4e), 0x05},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xeb}, {CCI_REG8(0x4e), 0x05},
+ {CCI_REG8(0x4c), 0x01}, {CCI_REG8(0x4d), 0xea}, {CCI_REG8(0x4e), 0x05},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x09}, {CCI_REG8(0x4e), 0x05},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x29}, {CCI_REG8(0x4e), 0x05},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x2a}, {CCI_REG8(0x4e), 0x05},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x4a}, {CCI_REG8(0x4e), 0x05},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x8a}, {CCI_REG8(0x4e), 0x06},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x49}, {CCI_REG8(0x4e), 0x06},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x69}, {CCI_REG8(0x4e), 0x06},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x89}, {CCI_REG8(0x4e), 0x06},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xa9}, {CCI_REG8(0x4e), 0x06},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x48}, {CCI_REG8(0x4e), 0x06},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x68}, {CCI_REG8(0x4e), 0x06},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0x69}, {CCI_REG8(0x4e), 0x06},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xca}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xc9}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xe9}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x03}, {CCI_REG8(0x4d), 0x09}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xc8}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xe8}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xa7}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xc7}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x02}, {CCI_REG8(0x4d), 0xe7}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4c), 0x03}, {CCI_REG8(0x4d), 0x07}, {CCI_REG8(0x4e), 0x07},
+ {CCI_REG8(0x4f), 0x01},
+ {CCI_REG8(0x50), 0x80}, {CCI_REG8(0x51), 0xa8}, {CCI_REG8(0x52), 0x47},
+ {CCI_REG8(0x53), 0x38}, {CCI_REG8(0x54), 0xc7}, {CCI_REG8(0x56), 0x0e},
+ {CCI_REG8(0x58), 0x08}, {CCI_REG8(0x5b), 0x00}, {CCI_REG8(0x5c), 0x74},
+ {CCI_REG8(0x5d), 0x8b}, {CCI_REG8(0x61), 0xdb}, {CCI_REG8(0x62), 0xb8},
+ {CCI_REG8(0x63), 0x86}, {CCI_REG8(0x64), 0xc0}, {CCI_REG8(0x65), 0x04},
+ {CCI_REG8(0x67), 0xa8}, {CCI_REG8(0x68), 0xb0}, {CCI_REG8(0x69), 0x00},
+ {CCI_REG8(0x6a), 0xa8}, {CCI_REG8(0x6b), 0xb0}, {CCI_REG8(0x6c), 0xaf},
+ {CCI_REG8(0x6d), 0x8b}, {CCI_REG8(0x6e), 0x50}, {CCI_REG8(0x6f), 0x18},
+ {CCI_REG8(0x73), 0xf0}, {CCI_REG8(0x70), 0x0d}, {CCI_REG8(0x71), 0x60},
+ {CCI_REG8(0x72), 0x80}, {CCI_REG8(0x74), 0x01}, {CCI_REG8(0x75), 0x01},
+ {CCI_REG8(0x7f), 0x0c}, {CCI_REG8(0x76), 0x70}, {CCI_REG8(0x77), 0x58},
+ {CCI_REG8(0x78), 0xa0}, {CCI_REG8(0x79), 0x5e}, {CCI_REG8(0x7a), 0x54},
+ {CCI_REG8(0x7b), 0x58},
+ /* CC */
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0xc0), 0x01}, {CCI_REG8(0xc1), 0x44}, {CCI_REG8(0xc2), 0xfd},
+ {CCI_REG8(0xc3), 0x04}, {CCI_REG8(0xc4), 0xf0}, {CCI_REG8(0xc5), 0x48},
+ {CCI_REG8(0xc6), 0xfd}, {CCI_REG8(0xc7), 0x46}, {CCI_REG8(0xc8), 0xfd},
+ {CCI_REG8(0xc9), 0x02}, {CCI_REG8(0xca), 0xe0}, {CCI_REG8(0xcb), 0x45},
+ {CCI_REG8(0xcc), 0xec}, {CCI_REG8(0xcd), 0x48}, {CCI_REG8(0xce), 0xf0},
+ {CCI_REG8(0xcf), 0xf0}, {CCI_REG8(0xe3), 0x0c}, {CCI_REG8(0xe4), 0x4b},
+ {CCI_REG8(0xe5), 0xe0},
+ /* ABS */
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ {CCI_REG8(0x9f), 0x40},
+ /* Dark sun */
+ {GC2145_REG_PAGE_SELECT, 0x02},
+ {CCI_REG8(0x40), 0xbf}, {CCI_REG8(0x46), 0xcf},
+};
+
+#define GC2145_640_480_PIXELRATE 30000000
+#define GC2145_640_480_LINKFREQ 120000000
+#define GC2145_640_480_HBLANK 0x0130
+#define GC2145_640_480_VBLANK 0x000c
+static const struct cci_reg_sequence gc2145_mode_640_480_regs[] = {
+ {GC2145_REG_PAGE_SELECT, 0xf0}, {GC2145_REG_PAGE_SELECT, 0xf0},
+ {GC2145_REG_PAGE_SELECT, 0xf0}, {CCI_REG8(0xfc), 0x06},
+ {CCI_REG8(0xf6), 0x00}, {CCI_REG8(0xf7), 0x1d}, {CCI_REG8(0xf8), 0x86},
+ {CCI_REG8(0xfa), 0x00}, {CCI_REG8(0xf9), 0x8e},
+ /* Disable PAD IO */
+ {GC2145_REG_PAD_IO, 0x00},
+ {GC2145_REG_PAGE_SELECT, 0x00},
+ /* Row/Col start - 0/0 */
+ {GC2145_REG_ROW_START, 0x0000},
+ {GC2145_REG_COL_START, 0x0000},
+ /* Window size 1216/1618 */
+ {GC2145_REG_WIN_HEIGHT, 0x04c0},
+ {GC2145_REG_WIN_WIDTH, 0x0652},
+ /* Scalar more */
+ {CCI_REG8(0xfd), 0x01}, {CCI_REG8(0xfa), 0x00},
+ /* Crop 640-480@0-0 */
+ {GC2145_REG_CROP_ENABLE, 0x01},
+ {GC2145_REG_CROP_Y, 0x0000},
+ {GC2145_REG_CROP_X, 0x0000},
+ {GC2145_REG_CROP_HEIGHT, 0x01e0},
+ {GC2145_REG_CROP_WIDTH, 0x0280},
+ /* Subsampling configuration */
+ {CCI_REG8(0x99), 0x55}, {CCI_REG8(0x9a), 0x06}, {CCI_REG8(0x9b), 0x01},
+ {CCI_REG8(0x9c), 0x23}, {CCI_REG8(0x9d), 0x00}, {CCI_REG8(0x9e), 0x00},
+ {CCI_REG8(0x9f), 0x01}, {CCI_REG8(0xa0), 0x23}, {CCI_REG8(0xa1), 0x00},
+ {CCI_REG8(0xa2), 0x00},
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ /* AEC anti-flicker */
+ {CCI_REG16(0x25), 0x0175},
+ /* AEC exposure level 1-5 */
+ {CCI_REG16(0x27), 0x045f}, {CCI_REG16(0x29), 0x045f},
+ {CCI_REG16(0x2b), 0x045f}, {CCI_REG16(0x2d), 0x045f},
+};
+
+#define GC2145_1280_720_PIXELRATE 48000000
+#define GC2145_1280_720_LINKFREQ 192000000
+#define GC2145_1280_720_HBLANK 0x0156
+#define GC2145_1280_720_VBLANK 0x0011
+static const struct cci_reg_sequence gc2145_mode_1280_720_regs[] = {
+ {GC2145_REG_PAGE_SELECT, 0xf0}, {GC2145_REG_PAGE_SELECT, 0xf0},
+ {GC2145_REG_PAGE_SELECT, 0xf0}, {CCI_REG8(0xfc), 0x06},
+ {CCI_REG8(0xf6), 0x00}, {CCI_REG8(0xf7), 0x1d}, {CCI_REG8(0xf8), 0x83},
+ {CCI_REG8(0xfa), 0x00}, {CCI_REG8(0xf9), 0x8e},
+ /* Disable PAD IO */
+ {GC2145_REG_PAD_IO, 0x00},
+ {GC2145_REG_PAGE_SELECT, 0x00},
+ /* Row/Col start - 240/160 */
+ {GC2145_REG_ROW_START, 0x00f0},
+ {GC2145_REG_COL_START, 0x00a0},
+ /* Window size 736/1296 */
+ {GC2145_REG_WIN_HEIGHT, 0x02e0},
+ {GC2145_REG_WIN_WIDTH, 0x0510},
+ /* Crop 1280-720@0-0 */
+ {GC2145_REG_CROP_ENABLE, 0x01},
+ {GC2145_REG_CROP_Y, 0x0000},
+ {GC2145_REG_CROP_X, 0x0000},
+ {GC2145_REG_CROP_HEIGHT, 0x02d0},
+ {GC2145_REG_CROP_WIDTH, 0x0500},
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ /* AEC anti-flicker */
+ {CCI_REG16(0x25), 0x00e6},
+ /* AEC exposure level 1-5 */
+ {CCI_REG16(0x27), 0x02b2}, {CCI_REG16(0x29), 0x02b2},
+ {CCI_REG16(0x2b), 0x02b2}, {CCI_REG16(0x2d), 0x02b2},
+};
+
+#define GC2145_1600_1200_PIXELRATE 60000000
+#define GC2145_1600_1200_LINKFREQ 240000000
+#define GC2145_1600_1200_HBLANK 0x0156
+#define GC2145_1600_1200_VBLANK 0x0010
+static const struct cci_reg_sequence gc2145_mode_1600_1200_regs[] = {
+ {GC2145_REG_PAGE_SELECT, 0xf0}, {GC2145_REG_PAGE_SELECT, 0xf0},
+ {GC2145_REG_PAGE_SELECT, 0xf0}, {CCI_REG8(0xfc), 0x06},
+ {CCI_REG8(0xf6), 0x00}, {CCI_REG8(0xf7), 0x1d}, {CCI_REG8(0xf8), 0x84},
+ {CCI_REG8(0xfa), 0x00}, {CCI_REG8(0xf9), 0x8e},
+ /* Disable PAD IO */
+ {GC2145_REG_PAD_IO, 0x00},
+ {GC2145_REG_PAGE_SELECT, 0x00},
+ /* Row/Col start - 0/0 */
+ {GC2145_REG_ROW_START, 0x0000},
+ {GC2145_REG_COL_START, 0x0000},
+ /* Window size: 1216/1618 */
+ {GC2145_REG_WIN_HEIGHT, 0x04c0},
+ {GC2145_REG_WIN_WIDTH, 0x0652},
+ /* Crop 1600-1200@0-0 */
+ {GC2145_REG_CROP_ENABLE, 0x01},
+ {GC2145_REG_CROP_Y, 0x0000},
+ {GC2145_REG_CROP_X, 0x0000},
+ {GC2145_REG_CROP_HEIGHT, 0x04b0},
+ {GC2145_REG_CROP_WIDTH, 0x0640},
+ {GC2145_REG_PAGE_SELECT, 0x01},
+ /* AEC anti-flicker */
+ {CCI_REG16(0x25), 0x00fa},
+ /* AEC exposure level 1-5 */
+ {CCI_REG16(0x27), 0x04e2}, {CCI_REG16(0x29), 0x04e2},
+ {CCI_REG16(0x2b), 0x04e2}, {CCI_REG16(0x2d), 0x04e2},
+};
+
+static const s64 gc2145_link_freq_menu[] = {
+ GC2145_640_480_LINKFREQ,
+ GC2145_1280_720_LINKFREQ,
+ GC2145_1600_1200_LINKFREQ,
+};
+
+/* Regulators supplies */
+static const char * const gc2145_supply_name[] = {
+ "iovdd", /* Digital I/O (1.7-3V) suppply */
+ "avdd", /* Analog (2.7-3V) supply */
+ "dvdd", /* Digital Core (1.7-1.9V) supply */
+};
+
+#define GC2145_NUM_SUPPLIES ARRAY_SIZE(gc2145_supply_name)
+
+/* Mode configs */
+#define GC2145_MODE_640X480 0
+#define GC2145_MODE_1280X720 1
+#define GC2145_MODE_1600X1200 2
+static const struct gc2145_mode supported_modes[] = {
+ {
+ /* 640x480 30fps mode */
+ .width = 640,
+ .height = 480,
+ .reg_seq = gc2145_mode_640_480_regs,
+ .reg_seq_size = ARRAY_SIZE(gc2145_mode_640_480_regs),
+ .pixel_rate = GC2145_640_480_PIXELRATE,
+ .crop = {
+ .top = 0,
+ .left = 0,
+ .width = 640,
+ .height = 480,
+ },
+ .hblank = GC2145_640_480_HBLANK,
+ .vblank = GC2145_640_480_VBLANK,
+ .link_freq_index = GC2145_MODE_640X480,
+ },
+ {
+ /* 1280x720 30fps mode */
+ .width = 1280,
+ .height = 720,
+ .reg_seq = gc2145_mode_1280_720_regs,
+ .reg_seq_size = ARRAY_SIZE(gc2145_mode_1280_720_regs),
+ .pixel_rate = GC2145_1280_720_PIXELRATE,
+ .crop = {
+ .top = 160,
+ .left = 240,
+ .width = 1280,
+ .height = 720,
+ },
+ .hblank = GC2145_1280_720_HBLANK,
+ .vblank = GC2145_1280_720_VBLANK,
+ .link_freq_index = GC2145_MODE_1280X720,
+ },
+ {
+ /* 1600x1200 20fps mode */
+ .width = 1600,
+ .height = 1200,
+ .reg_seq = gc2145_mode_1600_1200_regs,
+ .reg_seq_size = ARRAY_SIZE(gc2145_mode_1600_1200_regs),
+ .pixel_rate = GC2145_1600_1200_PIXELRATE,
+ .crop = {
+ .top = 0,
+ .left = 0,
+ .width = 1600,
+ .height = 1200,
+ },
+ .hblank = GC2145_1600_1200_HBLANK,
+ .vblank = GC2145_1600_1200_VBLANK,
+ .link_freq_index = GC2145_MODE_1600X1200,
+ },
+};
+
+/**
+ * struct gc2145_format - GC2145 pixel format description
+ * @code: media bus (MBUS) associated code
+ * @datatype: MIPI CSI2 data type
+ * @output_fmt: GC2145 output format
+ * @switch_bit: GC2145 first/second switch
+ */
+struct gc2145_format {
+ unsigned int code;
+ unsigned char datatype;
+ unsigned char output_fmt;
+ bool switch_bit;
+};
+
+/* All supported formats */
+static const struct gc2145_format supported_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .output_fmt = 0x00,
+ },
+ {
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .output_fmt = 0x01,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .output_fmt = 0x02,
+ },
+ {
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .output_fmt = 0x03,
+ },
+ {
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
+ .datatype = MIPI_CSI2_DT_RGB565,
+ .output_fmt = 0x06,
+ .switch_bit = true,
+ },
+};
+
+struct gc2145_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *test_pattern;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+};
+
+struct gc2145 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct regmap *regmap;
+ struct clk *xclk;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *powerdown_gpio;
+ struct regulator_bulk_data supplies[GC2145_NUM_SUPPLIES];
+
+ /* V4L2 controls */
+ struct gc2145_ctrls ctrls;
+
+ /* Current mode */
+ const struct gc2145_mode *mode;
+};
+
+static inline struct gc2145 *to_gc2145(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct gc2145, sd);
+}
+
+static inline struct v4l2_subdev *gc2145_ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct gc2145,
+ ctrls.handler)->sd;
+}
+
+static const struct gc2145_format *
+gc2145_get_format_code(struct gc2145 *gc2145, u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
+ if (supported_formats[i].code == code)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(supported_formats))
+ i = 0;
+
+ return &supported_formats[i];
+}
+
+static void gc2145_update_pad_format(struct gc2145 *gc2145,
+ const struct gc2145_mode *mode,
+ struct v4l2_mbus_framefmt *fmt, u32 code)
+{
+ fmt->code = code;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static int gc2145_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct gc2145 *gc2145 = to_gc2145(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ /* Initialize pad format */
+ format = v4l2_subdev_state_get_format(state, 0);
+ gc2145_update_pad_format(gc2145, &supported_modes[0], format,
+ MEDIA_BUS_FMT_RGB565_1X16);
+
+ /* Initialize crop rectangle. */
+ crop = v4l2_subdev_state_get_crop(state, 0);
+ *crop = supported_modes[0].crop;
+
+ return 0;
+}
+
+static int gc2145_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
+ return 0;
+
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = GC2145_NATIVE_WIDTH;
+ sel->r.height = GC2145_NATIVE_HEIGHT;
+
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = 1600;
+ sel->r.height = 1200;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int gc2145_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(supported_formats))
+ return -EINVAL;
+
+ code->code = supported_formats[code->index].code;
+ return 0;
+}
+
+static int gc2145_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct gc2145 *gc2145 = to_gc2145(sd);
+ const struct gc2145_format *gc2145_format;
+ u32 code;
+
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ gc2145_format = gc2145_get_format_code(gc2145, fse->code);
+ code = gc2145_format->code;
+ if (fse->code != code)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int gc2145_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct gc2145 *gc2145 = to_gc2145(sd);
+ const struct gc2145_mode *mode;
+ const struct gc2145_format *gc2145_fmt;
+ struct v4l2_mbus_framefmt *framefmt;
+ struct gc2145_ctrls *ctrls = &gc2145->ctrls;
+ struct v4l2_rect *crop;
+
+ gc2145_fmt = gc2145_get_format_code(gc2145, fmt->format.code);
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ gc2145_update_pad_format(gc2145, mode, &fmt->format, gc2145_fmt->code);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ gc2145->mode = mode;
+ /* Update pixel_rate based on the mode */
+ __v4l2_ctrl_s_ctrl_int64(ctrls->pixel_rate, mode->pixel_rate);
+ /* Update link_freq based on the mode */
+ __v4l2_ctrl_s_ctrl(ctrls->link_freq, mode->link_freq_index);
+ /* Update hblank/vblank based on the mode */
+ __v4l2_ctrl_s_ctrl(ctrls->hblank, mode->hblank);
+ __v4l2_ctrl_s_ctrl(ctrls->vblank, mode->vblank);
+ }
+ *framefmt = fmt->format;
+ crop = v4l2_subdev_state_get_crop(sd_state, fmt->pad);
+ *crop = mode->crop;
+
+ return 0;
+}
+
+static const struct cci_reg_sequence gc2145_common_mipi_regs[] = {
+ {GC2145_REG_PAGE_SELECT, 0x03},
+ {GC2145_REG_DPHY_ANALOG_MODE1, GC2145_DPHY_MODE_PHY_CLK_EN |
+ GC2145_DPHY_MODE_PHY_LANE0_EN |
+ GC2145_DPHY_MODE_PHY_LANE1_EN |
+ GC2145_DPHY_MODE_PHY_CLK_LANE_P2S_SEL},
+ {GC2145_REG_DPHY_ANALOG_MODE2, GC2145_DPHY_CLK_DIFF(2) |
+ GC2145_DPHY_LANE0_DIFF(2)},
+ {GC2145_REG_DPHY_ANALOG_MODE3, GC2145_DPHY_LANE1_DIFF(0) |
+ GC2145_DPHY_CLK_DELAY},
+ {GC2145_REG_FIFO_MODE, GC2145_FIFO_MODE_READ_GATE |
+ GC2145_FIFO_MODE_MIPI_CLK_MODULE},
+ {GC2145_REG_DPHY_MODE, GC2145_DPHY_MODE_TRIGGER_PROG},
+ /* Clock & Data lanes timing */
+ {GC2145_REG_T_LPX, 0x10},
+ {GC2145_REG_T_CLK_HS_PREPARE, 0x04}, {GC2145_REG_T_CLK_ZERO, 0x10},
+ {GC2145_REG_T_CLK_PRE, 0x10}, {GC2145_REG_T_CLK_POST, 0x10},
+ {GC2145_REG_T_CLK_TRAIL, 0x05},
+ {GC2145_REG_T_HS_PREPARE, 0x03}, {GC2145_REG_T_HS_ZERO, 0x0a},
+ {GC2145_REG_T_HS_TRAIL, 0x06},
+};
+
+static int gc2145_config_mipi_mode(struct gc2145 *gc2145,
+ const struct gc2145_format *gc2145_format)
+{
+ u16 lwc, fifo_full_lvl;
+ int ret = 0;
+
+ /* Common MIPI settings */
+ cci_multi_reg_write(gc2145->regmap, gc2145_common_mipi_regs,
+ ARRAY_SIZE(gc2145_common_mipi_regs), &ret);
+
+ /*
+ * Adjust the MIPI buffer settings.
+ * For YUV/RGB, LWC = image width * 2
+ * For RAW8, LWC = image width
+ * For RAW10, LWC = image width * 1.25
+ */
+ lwc = gc2145->mode->width * 2;
+ cci_write(gc2145->regmap, GC2145_REG_LWC_HIGH, lwc >> 8, &ret);
+ cci_write(gc2145->regmap, GC2145_REG_LWC_LOW, lwc & 0xff, &ret);
+
+ /*
+ * Adjust the MIPI FIFO Full Level
+ * 640x480 RGB: 0x0190
+ * 1280x720 / 1600x1200 (aka no scaler) non RAW: 0x0001
+ * 1600x1200 RAW: 0x0190
+ */
+ if (gc2145->mode->width == 1280 || gc2145->mode->width == 1600)
+ fifo_full_lvl = 0x0001;
+ else
+ fifo_full_lvl = 0x0190;
+
+ cci_write(gc2145->regmap, GC2145_REG_FIFO_FULL_LVL_HIGH,
+ fifo_full_lvl >> 8, &ret);
+ cci_write(gc2145->regmap, GC2145_REG_FIFO_FULL_LVL_LOW,
+ fifo_full_lvl & 0xff, &ret);
+
+ /*
+ * Set the FIFO gate mode / MIPI wdiv set:
+ * 0xf1 in case of RAW mode and 0xf0 otherwise
+ */
+ cci_write(gc2145->regmap, GC2145_REG_FIFO_GATE_MODE, 0xf0, &ret);
+
+ /* Set the MIPI data type */
+ cci_write(gc2145->regmap, GC2145_REG_MIPI_DT,
+ gc2145_format->datatype, &ret);
+
+ /* Configure mode and enable CSI */
+ cci_write(gc2145->regmap, GC2145_REG_BUF_CSI2_MODE,
+ GC2145_CSI2_MODE_RAW8 | GC2145_CSI2_MODE_DOUBLE |
+ GC2145_CSI2_MODE_EN | GC2145_CSI2_MODE_MIPI_EN, &ret);
+
+ return ret;
+}
+
+static int gc2145_start_streaming(struct gc2145 *gc2145,
+ struct v4l2_subdev_state *state)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&gc2145->sd);
+ const struct gc2145_format *gc2145_format;
+ struct v4l2_mbus_framefmt *fmt;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Apply default values of current mode */
+ cci_multi_reg_write(gc2145->regmap, gc2145->mode->reg_seq,
+ gc2145->mode->reg_seq_size, &ret);
+ cci_multi_reg_write(gc2145->regmap, gc2145_common_regs,
+ ARRAY_SIZE(gc2145_common_regs), &ret);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to write regs\n", __func__);
+ goto err_rpm_put;
+ }
+
+ fmt = v4l2_subdev_state_get_format(state, 0);
+ gc2145_format = gc2145_get_format_code(gc2145, fmt->code);
+
+ /* Set the output format */
+ cci_write(gc2145->regmap, GC2145_REG_PAGE_SELECT, 0x00, &ret);
+
+ cci_write(gc2145->regmap, GC2145_REG_OUTPUT_FMT,
+ gc2145_format->output_fmt, &ret);
+ cci_update_bits(gc2145->regmap, GC2145_REG_BYPASS_MODE,
+ GC2145_BYPASS_MODE_SWITCH,
+ gc2145_format->switch_bit ? GC2145_BYPASS_MODE_SWITCH
+ : 0, &ret);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to write regs\n", __func__);
+ goto err_rpm_put;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(&gc2145->ctrls.handler);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to apply ctrls\n", __func__);
+ goto err_rpm_put;
+ }
+
+ /* Perform MIPI specific configuration */
+ ret = gc2145_config_mipi_mode(gc2145, gc2145_format);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to write mipi conf\n",
+ __func__);
+ goto err_rpm_put;
+ }
+
+ cci_write(gc2145->regmap, GC2145_REG_PAGE_SELECT, 0x00, &ret);
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+ return ret;
+}
+
+static void gc2145_stop_streaming(struct gc2145 *gc2145)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&gc2145->sd);
+ int ret = 0;
+
+ /* Disable lanes & mipi streaming */
+ cci_write(gc2145->regmap, GC2145_REG_PAGE_SELECT, 0x03, &ret);
+ cci_update_bits(gc2145->regmap, GC2145_REG_BUF_CSI2_MODE,
+ GC2145_CSI2_MODE_EN | GC2145_CSI2_MODE_MIPI_EN, 0,
+ &ret);
+ cci_write(gc2145->regmap, GC2145_REG_PAGE_SELECT, 0x00, &ret);
+ if (ret)
+ dev_err(&client->dev, "%s failed to write regs\n", __func__);
+
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+}
+
+static int gc2145_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct gc2145 *gc2145 = to_gc2145(sd);
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ if (enable)
+ ret = gc2145_start_streaming(gc2145, state);
+ else
+ gc2145_stop_streaming(gc2145);
+
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+/* Power/clock management functions */
+static int gc2145_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct gc2145 *gc2145 = to_gc2145(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(GC2145_NUM_SUPPLIES, gc2145->supplies);
+ if (ret) {
+ dev_err(dev, "failed to enable regulators\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(gc2145->xclk);
+ if (ret) {
+ dev_err(dev, "failed to enable clock\n");
+ goto reg_off;
+ }
+
+ gpiod_set_value_cansleep(gc2145->powerdown_gpio, 0);
+ gpiod_set_value_cansleep(gc2145->reset_gpio, 0);
+
+ /*
+ * Datasheet doesn't mention timing between PWDN/RESETB control and
+ * i2c access however, experimentation shows that a rather big delay is
+ * needed.
+ */
+ msleep(41);
+
+ return 0;
+
+reg_off:
+ regulator_bulk_disable(GC2145_NUM_SUPPLIES, gc2145->supplies);
+
+ return ret;
+}
+
+static int gc2145_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct gc2145 *gc2145 = to_gc2145(sd);
+
+ gpiod_set_value_cansleep(gc2145->powerdown_gpio, 1);
+ gpiod_set_value_cansleep(gc2145->reset_gpio, 1);
+ clk_disable_unprepare(gc2145->xclk);
+ regulator_bulk_disable(GC2145_NUM_SUPPLIES, gc2145->supplies);
+
+ return 0;
+}
+
+static int gc2145_get_regulators(struct gc2145 *gc2145)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&gc2145->sd);
+ unsigned int i;
+
+ for (i = 0; i < GC2145_NUM_SUPPLIES; i++)
+ gc2145->supplies[i].supply = gc2145_supply_name[i];
+
+ return devm_regulator_bulk_get(&client->dev, GC2145_NUM_SUPPLIES,
+ gc2145->supplies);
+}
+
+/* Verify chip ID */
+static int gc2145_identify_module(struct gc2145 *gc2145)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&gc2145->sd);
+ int ret;
+ u64 chip_id;
+
+ ret = cci_read(gc2145->regmap, GC2145_REG_CHIP_ID, &chip_id, NULL);
+ if (ret) {
+ dev_err(&client->dev, "failed to read chip id (%d)\n", ret);
+ return ret;
+ }
+
+ if (chip_id != GC2145_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
+ GC2145_CHIP_ID, chip_id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Colored patterns",
+ "Uniform white",
+ "Uniform yellow",
+ "Uniform cyan",
+ "Uniform green",
+ "Uniform magenta",
+ "Uniform red",
+ "Uniform black",
+};
+
+#define GC2145_TEST_PATTERN_ENABLE BIT(0)
+#define GC2145_TEST_PATTERN_UXGA BIT(3)
+
+#define GC2145_TEST_UNIFORM BIT(3)
+#define GC2145_TEST_WHITE (4 << 4)
+#define GC2145_TEST_YELLOW (8 << 4)
+#define GC2145_TEST_CYAN (9 << 4)
+#define GC2145_TEST_GREEN (6 << 4)
+#define GC2145_TEST_MAGENTA (10 << 4)
+#define GC2145_TEST_RED (5 << 4)
+#define GC2145_TEST_BLACK (0)
+
+static const u8 test_pattern_val[] = {
+ 0,
+ GC2145_TEST_PATTERN_ENABLE,
+ GC2145_TEST_UNIFORM | GC2145_TEST_WHITE,
+ GC2145_TEST_UNIFORM | GC2145_TEST_YELLOW,
+ GC2145_TEST_UNIFORM | GC2145_TEST_CYAN,
+ GC2145_TEST_UNIFORM | GC2145_TEST_GREEN,
+ GC2145_TEST_UNIFORM | GC2145_TEST_MAGENTA,
+ GC2145_TEST_UNIFORM | GC2145_TEST_RED,
+ GC2145_TEST_UNIFORM | GC2145_TEST_BLACK,
+};
+
+static const struct v4l2_subdev_core_ops gc2145_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops gc2145_video_ops = {
+ .s_stream = gc2145_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops gc2145_pad_ops = {
+ .enum_mbus_code = gc2145_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = gc2145_set_pad_format,
+ .get_selection = gc2145_get_selection,
+ .enum_frame_size = gc2145_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops gc2145_subdev_ops = {
+ .core = &gc2145_core_ops,
+ .video = &gc2145_video_ops,
+ .pad = &gc2145_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops gc2145_subdev_internal_ops = {
+ .init_state = gc2145_init_state,
+};
+
+static int gc2145_set_ctrl_test_pattern(struct gc2145 *gc2145, int value)
+{
+ int ret = 0;
+
+ if (!value) {
+ /* Disable test pattern */
+ cci_write(gc2145->regmap, GC2145_REG_DEBUG_MODE2, 0, &ret);
+ return cci_write(gc2145->regmap, GC2145_REG_DEBUG_MODE3, 0,
+ &ret);
+ }
+
+ /* Enable test pattern, colored or uniform */
+ cci_write(gc2145->regmap, GC2145_REG_DEBUG_MODE2,
+ GC2145_TEST_PATTERN_ENABLE | GC2145_TEST_PATTERN_UXGA, &ret);
+
+ if (!(test_pattern_val[value] & GC2145_TEST_UNIFORM))
+ return cci_write(gc2145->regmap, GC2145_REG_DEBUG_MODE3, 0,
+ &ret);
+
+ /* Uniform */
+ return cci_write(gc2145->regmap, GC2145_REG_DEBUG_MODE3,
+ test_pattern_val[value], &ret);
+}
+
+static int gc2145_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = gc2145_ctrl_to_sd(ctrl);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct gc2145 *gc2145 = to_gc2145(sd);
+ int ret;
+
+ if (pm_runtime_get_if_in_use(&client->dev) == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HBLANK:
+ ret = cci_write(gc2145->regmap, GC2145_REG_HBLANK, ctrl->val,
+ NULL);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = cci_write(gc2145->regmap, GC2145_REG_VBLANK, ctrl->val,
+ NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = gc2145_set_ctrl_test_pattern(gc2145, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ret = cci_update_bits(gc2145->regmap, GC2145_REG_ANALOG_MODE1,
+ BIT(0), (ctrl->val ? BIT(0) : 0), NULL);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = cci_update_bits(gc2145->regmap, GC2145_REG_ANALOG_MODE1,
+ BIT(1), (ctrl->val ? BIT(1) : 0), NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops gc2145_ctrl_ops = {
+ .s_ctrl = gc2145_s_ctrl,
+};
+
+/* Initialize control handlers */
+static int gc2145_init_controls(struct gc2145 *gc2145)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&gc2145->sd);
+ const struct v4l2_ctrl_ops *ops = &gc2145_ctrl_ops;
+ struct gc2145_ctrls *ctrls = &gc2145->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ struct v4l2_fwnode_device_properties props;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdl, 12);
+ if (ret)
+ return ret;
+
+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
+ GC2145_640_480_PIXELRATE,
+ GC2145_1600_1200_PIXELRATE, 1,
+ supported_modes[0].pixel_rate);
+
+ ctrls->link_freq = v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(gc2145_link_freq_menu) - 1,
+ 0, gc2145_link_freq_menu);
+ if (ctrls->link_freq)
+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrls->hblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK,
+ 0, 0xfff, 1, GC2145_640_480_HBLANK);
+
+ ctrls->vblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK,
+ 0, 0x1fff, 1, GC2145_640_480_VBLANK);
+
+ ctrls->test_pattern =
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ 0, 0, test_pattern_menu);
+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ dev_err(&client->dev, "control init failed (%d)\n", ret);
+ goto error;
+ }
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, &gc2145_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
+ gc2145->sd.ctrl_handler = hdl;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(hdl);
+
+ return ret;
+}
+
+static int gc2145_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_fwnode_endpoint ep_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ int ret;
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint) {
+ dev_err(dev, "endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep_cfg);
+ fwnode_handle_put(endpoint);
+ if (ret)
+ return ret;
+
+ /* Check the number of MIPI CSI2 data lanes */
+ if (ep_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(dev, "only 2 data lanes are currently supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check the link frequency set in device tree */
+ if (!ep_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ep_cfg.nr_of_link_frequencies != 3 ||
+ ep_cfg.link_frequencies[0] != GC2145_640_480_LINKFREQ ||
+ ep_cfg.link_frequencies[1] != GC2145_1280_720_LINKFREQ ||
+ ep_cfg.link_frequencies[2] != GC2145_1600_1200_LINKFREQ) {
+ dev_err(dev, "Invalid link-frequencies provided\n");
+ ret = -EINVAL;
+ }
+
+out:
+ v4l2_fwnode_endpoint_free(&ep_cfg);
+
+ return ret;
+}
+
+static int gc2145_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ unsigned int xclk_freq;
+ struct gc2145 *gc2145;
+ int ret;
+
+ gc2145 = devm_kzalloc(&client->dev, sizeof(*gc2145), GFP_KERNEL);
+ if (!gc2145)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&gc2145->sd, client, &gc2145_subdev_ops);
+ gc2145->sd.internal_ops = &gc2145_subdev_internal_ops;
+
+ /* Check the hardware configuration in device tree */
+ if (gc2145_check_hwcfg(dev))
+ return -EINVAL;
+
+ /* Get system clock (xclk) */
+ gc2145->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(gc2145->xclk))
+ return dev_err_probe(dev, PTR_ERR(gc2145->xclk),
+ "failed to get xclk\n");
+
+ xclk_freq = clk_get_rate(gc2145->xclk);
+ if (xclk_freq != GC2145_XCLK_FREQ) {
+ dev_err(dev, "xclk frequency not supported: %d Hz\n",
+ xclk_freq);
+ return -EINVAL;
+ }
+
+ ret = gc2145_get_regulators(gc2145);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulators\n");
+
+ /* Request optional reset pin */
+ gc2145->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(gc2145->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(gc2145->reset_gpio),
+ "failed to get reset_gpio\n");
+
+ /* Request optional powerdown pin */
+ gc2145->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(gc2145->powerdown_gpio))
+ return dev_err_probe(dev, PTR_ERR(gc2145->powerdown_gpio),
+ "failed to get powerdown_gpio\n");
+
+ /* Initialise the regmap for further cci access */
+ gc2145->regmap = devm_cci_regmap_init_i2c(client, 8);
+ if (IS_ERR(gc2145->regmap))
+ return dev_err_probe(dev, PTR_ERR(gc2145->regmap),
+ "failed to get cci regmap\n");
+
+ /*
+ * The sensor must be powered for gc2145_identify_module()
+ * to be able to read the CHIP_ID register
+ */
+ ret = gc2145_power_on(dev);
+ if (ret)
+ return ret;
+
+ ret = gc2145_identify_module(gc2145);
+ if (ret)
+ goto error_power_off;
+
+ /* Set default mode */
+ gc2145->mode = &supported_modes[0];
+
+ ret = gc2145_init_controls(gc2145);
+ if (ret)
+ goto error_power_off;
+
+ /* Initialize subdev */
+ gc2145->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ gc2145->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ gc2145->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&gc2145->sd.entity, 1, &gc2145->pad);
+ if (ret) {
+ dev_err(dev, "failed to init entity pads: %d\n", ret);
+ goto error_handler_free;
+ }
+
+ gc2145->sd.state_lock = gc2145->ctrls.handler.lock;
+ ret = v4l2_subdev_init_finalize(&gc2145->sd);
+ if (ret < 0) {
+ dev_err(dev, "subdev init error: %d\n", ret);
+ goto error_media_entity;
+ }
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(dev);
+
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&gc2145->sd);
+ if (ret < 0) {
+ dev_err(dev, "failed to register sensor sub-device: %d\n", ret);
+ goto error_subdev_cleanup;
+ }
+
+ return 0;
+
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&gc2145->sd);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+error_media_entity:
+ media_entity_cleanup(&gc2145->sd.entity);
+
+error_handler_free:
+ v4l2_ctrl_handler_free(&gc2145->ctrls.handler);
+
+error_power_off:
+ gc2145_power_off(dev);
+
+ return ret;
+}
+
+static void gc2145_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc2145 *gc2145 = to_gc2145(sd);
+
+ v4l2_subdev_cleanup(sd);
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&gc2145->ctrls.handler);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ gc2145_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+}
+
+static const struct of_device_id gc2145_dt_ids[] = {
+ { .compatible = "galaxycore,gc2145" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, gc2145_dt_ids);
+
+static const struct dev_pm_ops gc2145_pm_ops = {
+ RUNTIME_PM_OPS(gc2145_power_off, gc2145_power_on, NULL)
+};
+
+static struct i2c_driver gc2145_i2c_driver = {
+ .driver = {
+ .name = "gc2145",
+ .of_match_table = gc2145_dt_ids,
+ .pm = pm_ptr(&gc2145_pm_ops),
+ },
+ .probe = gc2145_probe,
+ .remove = gc2145_remove,
+};
+
+module_i2c_driver(gc2145_i2c_driver);
+
+MODULE_AUTHOR("Alain Volmat <alain.volmat@foss.st.com>");
+MODULE_DESCRIPTION("GalaxyCore GC2145 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index f6ea9b7b9..38c77d515 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -935,7 +935,7 @@ __hi556_get_pad_crop(struct hi556 *hi556,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&hi556->sd, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &hi556->cur_mode->crop;
}
@@ -1075,7 +1075,7 @@ static int hi556_set_format(struct v4l2_subdev *sd,
mutex_lock(&hi556->mutex);
hi556_assign_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
} else {
hi556->cur_mode = mode;
__v4l2_ctrl_s_ctrl(hi556->link_freq, mode->link_freq_index);
@@ -1109,9 +1109,8 @@ static int hi556_get_format(struct v4l2_subdev *sd,
mutex_lock(&hi556->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&hi556->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
hi556_assign_pad_format(hi556->cur_mode, &fmt->format);
@@ -1157,10 +1156,10 @@ static int hi556_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&hi556->mutex);
hi556_assign_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->state, 0));
+ v4l2_subdev_state_get_format(fh->state, 0));
/* Initialize try_crop rectangle. */
- try_crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
+ try_crop = v4l2_subdev_state_get_crop(fh->state, 0);
try_crop->top = HI556_PIXEL_ARRAY_TOP;
try_crop->left = HI556_PIXEL_ARRAY_LEFT;
try_crop->width = HI556_PIXEL_ARRAY_WIDTH;
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 825fc8dc4..9c565ec03 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -1705,7 +1705,7 @@ static int hi846_set_format(struct v4l2_subdev *sd,
}
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = *mf;
+ *v4l2_subdev_state_get_format(sd_state, format->pad) = *mf;
return 0;
}
@@ -1783,9 +1783,8 @@ static int hi846_get_format(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(&hi846->sd,
- sd_state,
- format->pad);
+ format->format = *v4l2_subdev_state_get_format(sd_state,
+ format->pad);
return 0;
}
@@ -1852,7 +1851,7 @@ static int hi846_get_selection(struct v4l2_subdev *sd,
mutex_lock(&hi846->mutex);
switch (sel->which) {
case V4L2_SUBDEV_FORMAT_TRY:
- v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+ v4l2_subdev_state_get_crop(sd_state, sel->pad);
break;
case V4L2_SUBDEV_FORMAT_ACTIVE:
sel->r = hi846->cur_mode->crop;
@@ -1872,13 +1871,13 @@ static int hi846_get_selection(struct v4l2_subdev *sd,
}
}
-static int hi846_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int hi846_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct hi846 *hi846 = to_hi846(sd);
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mf = v4l2_subdev_state_get_format(sd_state, 0);
mutex_lock(&hi846->mutex);
mf->code = HI846_MEDIA_BUS_FORMAT;
@@ -1896,7 +1895,6 @@ static const struct v4l2_subdev_video_ops hi846_video_ops = {
};
static const struct v4l2_subdev_pad_ops hi846_pad_ops = {
- .init_cfg = hi846_init_cfg,
.enum_frame_size = hi846_enum_frame_size,
.enum_mbus_code = hi846_enum_mbus_code,
.set_fmt = hi846_set_format,
@@ -1909,6 +1907,10 @@ static const struct v4l2_subdev_ops hi846_subdev_ops = {
.pad = &hi846_pad_ops,
};
+static const struct v4l2_subdev_internal_ops hi846_internal_ops = {
+ .init_state = hi846_init_state,
+};
+
static const struct media_entity_operations hi846_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -2072,6 +2074,7 @@ static int hi846_probe(struct i2c_client *client)
return ret;
v4l2_i2c_subdev_init(&hi846->sd, client, &hi846_subdev_ops);
+ hi846->sd.internal_ops = &hi846_internal_ops;
mutex_init(&hi846->mutex);
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 4075c3898..72c60747a 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -2655,7 +2655,7 @@ static int hi847_set_format(struct v4l2_subdev *sd,
mutex_lock(&hi847->mutex);
hi847_assign_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) =
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) =
fmt->format;
} else {
hi847->cur_mode = mode;
@@ -2690,9 +2690,8 @@ static int hi847_get_format(struct v4l2_subdev *sd,
mutex_lock(&hi847->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&hi847->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
hi847_assign_pad_format(hi847->cur_mode, &fmt->format);
@@ -2737,7 +2736,7 @@ static int hi847_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&hi847->mutex);
hi847_assign_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->state, 0));
+ v4l2_subdev_state_get_format(fh->state, 0));
mutex_unlock(&hi847->mutex);
return 0;
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index a9b0aea1a..639e05340 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -395,7 +395,7 @@ static int imx208_write_regs(struct imx208 *imx208,
static int imx208_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
/* Initialize try_fmt */
try_fmt->width = supported_modes[0].width;
@@ -548,9 +548,8 @@ static int __imx208_get_pad_format(struct imx208 *imx208,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&imx208->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
imx208_mode_to_pad_format(imx208, imx208->cur_mode, fmt);
@@ -591,7 +590,7 @@ static int imx208_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx208_mode_to_pad_format(imx208, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
} else {
imx208->cur_mode = mode;
__v4l2_ctrl_s_ctrl(imx208->link_freq, mode->link_freq_index);
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 4f77ea02c..b148b1bd2 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -19,12 +19,31 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+#define IMX214_REG_MODE_SELECT 0x0100
+#define IMX214_MODE_STANDBY 0x00
+#define IMX214_MODE_STREAMING 0x01
+
#define IMX214_DEFAULT_CLK_FREQ 24000000
#define IMX214_DEFAULT_LINK_FREQ 480000000
#define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
#define IMX214_FPS 30
#define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+/* Exposure control */
+#define IMX214_REG_EXPOSURE 0x0202
+#define IMX214_EXPOSURE_MIN 0
+#define IMX214_EXPOSURE_MAX 3184
+#define IMX214_EXPOSURE_STEP 1
+#define IMX214_EXPOSURE_DEFAULT 3184
+
+/* IMX214 native and active pixel array size */
+#define IMX214_NATIVE_WIDTH 4224U
+#define IMX214_NATIVE_HEIGHT 3136U
+#define IMX214_PIXEL_ARRAY_LEFT 8U
+#define IMX214_PIXEL_ARRAY_TOP 8U
+#define IMX214_PIXEL_ARRAY_WIDTH 4208U
+#define IMX214_PIXEL_ARRAY_HEIGHT 3120U
+
static const char * const imx214_supply_name[] = {
"vdda",
"vddd",
@@ -538,7 +557,7 @@ __imx214_get_pad_format(struct imx214 *imx214,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&imx214->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &imx214->fmt;
default:
@@ -568,7 +587,7 @@ __imx214_get_pad_crop(struct imx214 *imx214,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&imx214->sd, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &imx214->crop;
default:
@@ -623,18 +642,35 @@ static int imx214_get_selection(struct v4l2_subdev *sd,
{
struct imx214 *imx214 = to_imx214(sd);
- if (sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ mutex_lock(&imx214->mutex);
+ sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
+ sel->which);
+ mutex_unlock(&imx214->mutex);
+ return 0;
- mutex_lock(&imx214->mutex);
- sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
- sel->which);
- mutex_unlock(&imx214->mutex);
- return 0;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = IMX214_NATIVE_WIDTH;
+ sel->r.height = IMX214_NATIVE_HEIGHT;
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = IMX214_PIXEL_ARRAY_TOP;
+ sel->r.left = IMX214_PIXEL_ARRAY_LEFT;
+ sel->r.width = IMX214_PIXEL_ARRAY_WIDTH;
+ sel->r.height = IMX214_PIXEL_ARRAY_HEIGHT;
+ return 0;
+ }
+
+ return -EINVAL;
}
-static int imx214_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state)
+static int imx214_entity_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { };
@@ -665,7 +701,7 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_EXPOSURE:
vals[1] = ctrl->val;
vals[0] = ctrl->val >> 8;
- ret = regmap_bulk_write(imx214->regmap, 0x202, vals, 2);
+ ret = regmap_bulk_write(imx214->regmap, IMX214_REG_EXPOSURE, vals, 2);
if (ret < 0)
dev_err(imx214->dev, "Error %d\n", ret);
ret = 0;
@@ -684,6 +720,76 @@ static const struct v4l2_ctrl_ops imx214_ctrl_ops = {
.s_ctrl = imx214_set_ctrl,
};
+static int imx214_ctrls_init(struct imx214 *imx214)
+{
+ static const s64 link_freq[] = {
+ IMX214_DEFAULT_LINK_FREQ
+ };
+ static const struct v4l2_area unit_size = {
+ .width = 1120,
+ .height = 1120,
+ };
+ struct v4l2_fwnode_device_properties props;
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ int ret;
+
+ ret = v4l2_fwnode_device_parse(imx214->dev, &props);
+ if (ret < 0)
+ return ret;
+
+ ctrl_hdlr = &imx214->ctrls;
+ ret = v4l2_ctrl_handler_init(&imx214->ctrls, 6);
+ if (ret)
+ return ret;
+
+ imx214->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, NULL,
+ V4L2_CID_PIXEL_RATE, 0,
+ IMX214_DEFAULT_PIXEL_RATE, 1,
+ IMX214_DEFAULT_PIXEL_RATE);
+
+ imx214->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, NULL,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq) - 1,
+ 0, link_freq);
+ if (imx214->link_freq)
+ imx214->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /*
+ * WARNING!
+ * Values obtained reverse engineering blobs and/or devices.
+ * Ranges and functionality might be wrong.
+ *
+ * Sony, please release some register set documentation for the
+ * device.
+ *
+ * Yours sincerely, Ricardo.
+ */
+ imx214->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ IMX214_EXPOSURE_MIN,
+ IMX214_EXPOSURE_MAX,
+ IMX214_EXPOSURE_STEP,
+ IMX214_EXPOSURE_DEFAULT);
+
+ imx214->unit_size = v4l2_ctrl_new_std_compound(ctrl_hdlr,
+ NULL,
+ V4L2_CID_UNIT_CELL_SIZE,
+ v4l2_ctrl_ptr_create((void *)&unit_size));
+
+ v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx214_ctrl_ops, &props);
+
+ ret = ctrl_hdlr->error;
+ if (ret) {
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ dev_err(imx214->dev, "failed to add controls: %d\n", ret);
+ return ret;
+ }
+
+ imx214->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+};
+
#define MAX_CMD 4
static int imx214_write_table(struct imx214 *imx214,
const struct reg_8 table[])
@@ -743,7 +849,7 @@ static int imx214_start_streaming(struct imx214 *imx214)
dev_err(imx214->dev, "could not sync v4l2 controls\n");
goto error;
}
- ret = regmap_write(imx214->regmap, 0x100, 1);
+ ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STREAMING);
if (ret < 0) {
dev_err(imx214->dev, "could not sent start table %d\n", ret);
goto error;
@@ -761,7 +867,7 @@ static int imx214_stop_streaming(struct imx214 *imx214)
{
int ret;
- ret = regmap_write(imx214->regmap, 0x100, 0);
+ ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY);
if (ret < 0)
dev_err(imx214->dev, "could not sent stop table %d\n", ret);
@@ -795,9 +901,17 @@ err_rpm_put:
return ret;
}
-static int imx214_g_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_frame_interval *fival)
+static int imx214_get_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fival)
{
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
fival->interval.numerator = 1;
fival->interval.denominator = IMX214_FPS;
@@ -828,8 +942,6 @@ static int imx214_enum_frame_interval(struct v4l2_subdev *subdev,
static const struct v4l2_subdev_video_ops imx214_video_ops = {
.s_stream = imx214_s_stream,
- .g_frame_interval = imx214_g_frame_interval,
- .s_frame_interval = imx214_g_frame_interval,
};
static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = {
@@ -839,7 +951,8 @@ static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = {
.get_fmt = imx214_get_format,
.set_fmt = imx214_set_format,
.get_selection = imx214_get_selection,
- .init_cfg = imx214_entity_init_cfg,
+ .get_frame_interval = imx214_get_frame_interval,
+ .set_frame_interval = imx214_get_frame_interval,
};
static const struct v4l2_subdev_ops imx214_subdev_ops = {
@@ -848,6 +961,10 @@ static const struct v4l2_subdev_ops imx214_subdev_ops = {
.pad = &imx214_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx214_internal_ops = {
+ .init_state = imx214_entity_init_state,
+};
+
static const struct regmap_config sensor_regmap_config = {
.reg_bits = 16,
.val_bits = 8,
@@ -907,13 +1024,6 @@ static int imx214_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct imx214 *imx214;
- static const s64 link_freq[] = {
- IMX214_DEFAULT_LINK_FREQ,
- };
- static const struct v4l2_area unit_size = {
- .width = 1120,
- .height = 1120,
- };
int ret;
ret = imx214_parse_fwnode(dev);
@@ -957,6 +1067,7 @@ static int imx214_probe(struct i2c_client *client)
}
v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops);
+ imx214->sd.internal_ops = &imx214_internal_ops;
/*
* Enable power initially, to avoid warnings
@@ -968,45 +1079,10 @@ static int imx214_probe(struct i2c_client *client)
pm_runtime_enable(imx214->dev);
pm_runtime_idle(imx214->dev);
- v4l2_ctrl_handler_init(&imx214->ctrls, 3);
-
- imx214->pixel_rate = v4l2_ctrl_new_std(&imx214->ctrls, NULL,
- V4L2_CID_PIXEL_RATE, 0,
- IMX214_DEFAULT_PIXEL_RATE, 1,
- IMX214_DEFAULT_PIXEL_RATE);
- imx214->link_freq = v4l2_ctrl_new_int_menu(&imx214->ctrls, NULL,
- V4L2_CID_LINK_FREQ,
- ARRAY_SIZE(link_freq) - 1,
- 0, link_freq);
- if (imx214->link_freq)
- imx214->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-
- /*
- * WARNING!
- * Values obtained reverse engineering blobs and/or devices.
- * Ranges and functionality might be wrong.
- *
- * Sony, please release some register set documentation for the
- * device.
- *
- * Yours sincerely, Ricardo.
- */
- imx214->exposure = v4l2_ctrl_new_std(&imx214->ctrls, &imx214_ctrl_ops,
- V4L2_CID_EXPOSURE,
- 0, 3184, 1, 0x0c70);
-
- imx214->unit_size = v4l2_ctrl_new_std_compound(&imx214->ctrls,
- NULL,
- V4L2_CID_UNIT_CELL_SIZE,
- v4l2_ctrl_ptr_create((void *)&unit_size));
- ret = imx214->ctrls.error;
- if (ret) {
- dev_err(&client->dev, "%s control init failed (%d)\n",
- __func__, ret);
- goto free_ctrl;
- }
+ ret = imx214_ctrls_init(imx214);
+ if (ret < 0)
+ goto error_power_off;
- imx214->sd.ctrl_handler = &imx214->ctrls;
mutex_init(&imx214->mutex);
imx214->ctrls.lock = &imx214->mutex;
@@ -1021,7 +1097,7 @@ static int imx214_probe(struct i2c_client *client)
goto free_ctrl;
}
- imx214_entity_init_cfg(&imx214->sd, NULL);
+ imx214_entity_init_state(&imx214->sd, NULL);
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
@@ -1036,6 +1112,7 @@ free_entity:
free_ctrl:
mutex_destroy(&imx214->mutex);
v4l2_ctrl_handler_free(&imx214->ctrls);
+error_power_off:
pm_runtime_disable(imx214->dev);
return ret;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 8436880dc..e17ef2e9d 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -374,7 +374,7 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
int ret = 0;
state = v4l2_subdev_get_locked_active_state(&imx219->sd);
- format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
if (ctrl->id == V4L2_CID_VBLANK) {
int exposure_max, exposure_def;
@@ -593,8 +593,8 @@ static int imx219_set_framefmt(struct imx219 *imx219,
u64 bin_h, bin_v;
int ret = 0;
- format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
- crop = v4l2_subdev_get_pad_crop(&imx219->sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
switch (format->code) {
case MEDIA_BUS_FMT_SRGGB8_1X8:
@@ -826,7 +826,7 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
- format = v4l2_subdev_get_pad_format(sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
*format = fmt->format;
/*
@@ -836,7 +836,7 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
bin_h = min(IMX219_PIXEL_ARRAY_WIDTH / format->width, 2U);
bin_v = min(IMX219_PIXEL_ARRAY_HEIGHT / format->height, 2U);
- crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
crop->width = format->width * bin_h;
crop->height = format->height * bin_v;
crop->left = (IMX219_NATIVE_WIDTH - crop->width) / 2;
@@ -880,7 +880,7 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
{
switch (sel->target) {
case V4L2_SEL_TGT_CROP: {
- sel->r = *v4l2_subdev_get_pad_crop(sd, state, 0);
+ sel->r = *v4l2_subdev_state_get_crop(state, 0);
return 0;
}
@@ -905,8 +905,8 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
}
-static int imx219_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int imx219_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_TRY,
@@ -933,7 +933,6 @@ static const struct v4l2_subdev_video_ops imx219_video_ops = {
};
static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
- .init_cfg = imx219_init_cfg,
.enum_mbus_code = imx219_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx219_set_pad_format,
@@ -947,6 +946,9 @@ static const struct v4l2_subdev_ops imx219_subdev_ops = {
.pad = &imx219_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx219_internal_ops = {
+ .init_state = imx219_init_state,
+};
/* -----------------------------------------------------------------------------
* Power management
@@ -1098,6 +1100,7 @@ static int imx219_probe(struct i2c_client *client)
return -ENOMEM;
v4l2_i2c_subdev_init(&imx219->sd, client, &imx219_subdev_ops);
+ imx219->sd.internal_ops = &imx219_internal_ops;
/* Check the hardware configuration in device tree */
if (imx219_check_hwcfg(dev, imx219))
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index b3827f4bc..a577afb53 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -708,7 +708,7 @@ static int imx258_write_regs(struct imx258 *imx258,
static int imx258_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
/* Initialize try_fmt */
try_fmt->width = supported_modes[0].width;
@@ -862,9 +862,8 @@ static int __imx258_get_pad_format(struct imx258 *imx258,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&imx258->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
imx258_update_pad_format(imx258->cur_mode, fmt);
@@ -908,7 +907,7 @@ static int imx258_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx258_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx258->cur_mode = mode;
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index f33b692e6..352da68b8 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -594,8 +594,8 @@ static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl);
static int imx274_set_exposure(struct stimx274 *priv, int val);
static int imx274_set_vflip(struct stimx274 *priv, int val);
static int imx274_set_test_pattern(struct stimx274 *priv, int val);
-static int imx274_set_frame_interval(struct stimx274 *priv,
- struct v4l2_fract frame_interval);
+static int __imx274_set_frame_interval(struct stimx274 *priv,
+ struct v4l2_fract frame_interval);
static inline void msleep_range(unsigned int delay_base)
{
@@ -1018,8 +1018,8 @@ static int __imx274_change_compose(struct stimx274 *imx274,
int best_goodness = INT_MIN;
if (which == V4L2_SUBDEV_FORMAT_TRY) {
- cur_crop = &sd_state->pads->try_crop;
- tgt_fmt = &sd_state->pads->try_fmt;
+ cur_crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ tgt_fmt = v4l2_subdev_state_get_format(sd_state, 0);
} else {
cur_crop = &imx274->crop;
tgt_fmt = &imx274->format;
@@ -1112,7 +1112,7 @@ static int imx274_set_fmt(struct v4l2_subdev *sd,
*/
fmt->field = V4L2_FIELD_NONE;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- sd_state->pads->try_fmt = *fmt;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
else
imx274->format = *fmt;
@@ -1143,8 +1143,8 @@ static int imx274_get_selection(struct v4l2_subdev *sd,
}
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- src_crop = &sd_state->pads->try_crop;
- src_fmt = &sd_state->pads->try_fmt;
+ src_crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ src_fmt = v4l2_subdev_state_get_format(sd_state, 0);
} else {
src_crop = &imx274->crop;
src_fmt = &imx274->format;
@@ -1215,7 +1215,7 @@ static int imx274_set_selection_crop(struct stimx274 *imx274,
sel->r = new_crop;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
- tgt_crop = &sd_state->pads->try_crop;
+ tgt_crop = v4l2_subdev_state_get_crop(sd_state, 0);
else
tgt_crop = &imx274->crop;
@@ -1327,20 +1327,19 @@ static int imx274_apply_trimming(struct stimx274 *imx274)
return err;
}
-/**
- * imx274_g_frame_interval - Get the frame interval
- * @sd: Pointer to V4L2 Sub device structure
- * @fi: Pointer to V4l2 Sub device frame interval structure
- *
- * This function is used to get the frame interval.
- *
- * Return: 0 on success
- */
-static int imx274_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int imx274_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct stimx274 *imx274 = to_imx274(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
fi->interval = imx274->frame_interval;
dev_dbg(&imx274->client->dev, "%s frame rate = %d / %d\n",
__func__, imx274->frame_interval.numerator,
@@ -1349,29 +1348,28 @@ static int imx274_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/**
- * imx274_s_frame_interval - Set the frame interval
- * @sd: Pointer to V4L2 Sub device structure
- * @fi: Pointer to V4l2 Sub device frame interval structure
- *
- * This function is used to set the frame intervavl.
- *
- * Return: 0 on success
- */
-static int imx274_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int imx274_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct stimx274 *imx274 = to_imx274(sd);
struct v4l2_ctrl *ctrl = imx274->ctrls.exposure;
int min, max, def;
int ret;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
ret = pm_runtime_resume_and_get(&imx274->client->dev);
if (ret < 0)
return ret;
mutex_lock(&imx274->lock);
- ret = imx274_set_frame_interval(imx274, fi->interval);
+ ret = __imx274_set_frame_interval(imx274, fi->interval);
if (!ret) {
fi->interval = imx274->frame_interval;
@@ -1466,8 +1464,8 @@ static int imx274_s_stream(struct v4l2_subdev *sd, int on)
* are changed.
* gain is not affected.
*/
- ret = imx274_set_frame_interval(imx274,
- imx274->frame_interval);
+ ret = __imx274_set_frame_interval(imx274,
+ imx274->frame_interval);
if (ret)
goto fail;
@@ -1830,7 +1828,7 @@ fail:
}
/*
- * imx274_set_frame_interval - Function called when setting frame interval
+ * __imx274_set_frame_interval - Function called when setting frame interval
* @priv: Pointer to device structure
* @frame_interval: Variable for frame interval
*
@@ -1839,8 +1837,8 @@ fail:
*
* Return: 0 on success
*/
-static int imx274_set_frame_interval(struct stimx274 *priv,
- struct v4l2_fract frame_interval)
+static int __imx274_set_frame_interval(struct stimx274 *priv,
+ struct v4l2_fract frame_interval)
{
int err;
u32 frame_length, req_frame_rate;
@@ -1927,11 +1925,11 @@ static const struct v4l2_subdev_pad_ops imx274_pad_ops = {
.set_fmt = imx274_set_fmt,
.get_selection = imx274_get_selection,
.set_selection = imx274_set_selection,
+ .get_frame_interval = imx274_get_frame_interval,
+ .set_frame_interval = imx274_set_frame_interval,
};
static const struct v4l2_subdev_video_ops imx274_video_ops = {
- .g_frame_interval = imx274_g_frame_interval,
- .s_frame_interval = imx274_s_frame_interval,
.s_stream = imx274_s_stream,
};
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 6dacd38ae..4150e6e4b 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -758,7 +758,7 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
return 0;
state = v4l2_subdev_get_locked_active_state(&imx290->sd);
- format = v4l2_subdev_get_pad_format(&imx290->sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
switch (ctrl->id) {
case V4L2_CID_ANALOGUE_GAIN:
@@ -994,7 +994,7 @@ static int imx290_start_streaming(struct imx290 *imx290,
}
/* Apply the register values related to current frame format */
- format = v4l2_subdev_get_pad_format(&imx290->sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
ret = imx290_setup_format(imx290, format);
if (ret < 0) {
dev_err(imx290->dev, "Could not set frame format - %d\n", ret);
@@ -1132,7 +1132,7 @@ static int imx290_set_fmt(struct v4l2_subdev *sd,
fmt->format.quantization = V4L2_QUANTIZATION_FULL_RANGE;
fmt->format.xfer_func = V4L2_XFER_FUNC_NONE;
- format = v4l2_subdev_get_pad_format(sd, sd_state, 0);
+ format = v4l2_subdev_state_get_format(sd_state, 0);
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
imx290->current_mode = mode;
@@ -1155,7 +1155,7 @@ static int imx290_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP: {
- format = v4l2_subdev_get_pad_format(sd, sd_state, 0);
+ format = v4l2_subdev_state_get_format(sd_state, 0);
/*
* The sensor moves the readout by 1 pixel based on flips to
@@ -1195,8 +1195,8 @@ static int imx290_get_selection(struct v4l2_subdev *sd,
}
}
-static int imx290_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state)
+static int imx290_entity_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_TRY,
@@ -1221,7 +1221,6 @@ static const struct v4l2_subdev_video_ops imx290_video_ops = {
};
static const struct v4l2_subdev_pad_ops imx290_pad_ops = {
- .init_cfg = imx290_entity_init_cfg,
.enum_mbus_code = imx290_enum_mbus_code,
.enum_frame_size = imx290_enum_frame_size,
.get_fmt = v4l2_subdev_get_fmt,
@@ -1235,6 +1234,10 @@ static const struct v4l2_subdev_ops imx290_subdev_ops = {
.pad = &imx290_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx290_internal_ops = {
+ .init_state = imx290_entity_init_state,
+};
+
static const struct media_entity_operations imx290_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -1248,6 +1251,7 @@ static int imx290_subdev_init(struct imx290 *imx290)
imx290->current_mode = &imx290_modes_ptr(imx290)[0];
v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
+ imx290->sd.internal_ops = &imx290_internal_ops;
imx290->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
imx290->sd.dev = imx290->dev;
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index 94aac9d27..83149fa72 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -323,7 +323,7 @@ static int imx296_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
state = v4l2_subdev_get_locked_active_state(&sensor->subdev);
- format = v4l2_subdev_get_pad_format(&sensor->subdev, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
@@ -511,8 +511,8 @@ static int imx296_setup(struct imx296 *sensor, struct v4l2_subdev_state *state)
unsigned int i;
int ret = 0;
- format = v4l2_subdev_get_pad_format(&sensor->subdev, state, 0);
- crop = v4l2_subdev_get_pad_crop(&sensor->subdev, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
for (i = 0; i < ARRAY_SIZE(imx296_init_table); ++i)
imx296_write(sensor, imx296_init_table[i].reg,
@@ -662,7 +662,7 @@ static int imx296_enum_frame_size(struct v4l2_subdev *sd,
{
const struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_pad_format(sd, state, fse->pad);
+ format = v4l2_subdev_state_get_format(state, fse->pad);
if (fse->index >= 2 || fse->code != format->code)
return -EINVAL;
@@ -683,8 +683,8 @@ static int imx296_set_format(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_pad_crop(sd, state, fmt->pad);
- format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+ crop = v4l2_subdev_state_get_crop(state, fmt->pad);
+ format = v4l2_subdev_state_get_format(state, fmt->pad);
/*
* Binning is only allowed when cropping is disabled according to the
@@ -732,7 +732,7 @@ static int imx296_get_selection(struct v4l2_subdev *sd,
{
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *v4l2_subdev_get_pad_crop(sd, state, sel->pad);
+ sel->r = *v4l2_subdev_state_get_crop(state, sel->pad);
break;
case V4L2_SEL_TGT_CROP_DEFAULT:
@@ -780,14 +780,14 @@ static int imx296_set_selection(struct v4l2_subdev *sd,
rect.height = min_t(unsigned int, rect.height,
IMX296_PIXEL_ARRAY_HEIGHT - rect.top);
- crop = v4l2_subdev_get_pad_crop(sd, state, sel->pad);
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
if (rect.width != crop->width || rect.height != crop->height) {
/*
* Reset the output image size if the crop rectangle size has
* been modified.
*/
- format = v4l2_subdev_get_pad_format(sd, state, sel->pad);
+ format = v4l2_subdev_state_get_format(state, sel->pad);
format->width = rect.width;
format->height = rect.height;
}
@@ -798,8 +798,8 @@ static int imx296_set_selection(struct v4l2_subdev *sd,
return 0;
}
-static int imx296_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int imx296_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_selection sel = {
.target = V4L2_SEL_TGT_CROP,
@@ -830,7 +830,6 @@ static const struct v4l2_subdev_pad_ops imx296_subdev_pad_ops = {
.set_fmt = imx296_set_format,
.get_selection = imx296_get_selection,
.set_selection = imx296_set_selection,
- .init_cfg = imx296_init_cfg,
};
static const struct v4l2_subdev_ops imx296_subdev_ops = {
@@ -838,12 +837,17 @@ static const struct v4l2_subdev_ops imx296_subdev_ops = {
.pad = &imx296_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx296_internal_ops = {
+ .init_state = imx296_init_state,
+};
+
static int imx296_subdev_init(struct imx296 *sensor)
{
struct i2c_client *client = to_i2c_client(sensor->dev);
int ret;
v4l2_i2c_subdev_init(&sensor->subdev, client, &imx296_subdev_ops);
+ sensor->subdev.internal_ops = &imx296_internal_ops;
ret = imx296_ctrls_init(sensor);
if (ret < 0)
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 5378f607f..e47eff672 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -1860,7 +1860,7 @@ static int imx319_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imx319 *imx319 = to_imx319(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
mutex_lock(&imx319->mutex);
@@ -2001,10 +2001,9 @@ static int imx319_do_get_pad_format(struct imx319 *imx319,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
- struct v4l2_subdev *sd = &imx319->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx319_update_pad_format(imx319, imx319->cur_mode, fmt);
@@ -2055,7 +2054,7 @@ imx319_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx319_update_pad_format(imx319, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx319->cur_mode = mode;
@@ -2464,19 +2463,21 @@ static int imx319_probe(struct i2c_client *client)
goto error_handler_free;
}
- ret = v4l2_async_register_subdev_sensor(&imx319->sd);
- if (ret < 0)
- goto error_media_entity;
-
/* Set the device's state to active if it's in D0 state. */
if (full_power)
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
+ ret = v4l2_async_register_subdev_sensor(&imx319->sd);
+ if (ret < 0)
+ goto error_media_entity_pm;
+
return 0;
-error_media_entity:
+error_media_entity_pm:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
media_entity_cleanup(&imx319->sd.entity);
error_handler_free:
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 1196fe935..6725b3e2a 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -879,7 +879,7 @@ static int imx334_get_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
fmt->format.code = imx334->cur_code;
@@ -920,7 +920,7 @@ static int imx334_set_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else if (imx334->cur_mode != mode || imx334->cur_code != fmt->format.code) {
imx334->cur_code = fmt->format.code;
@@ -935,14 +935,14 @@ static int imx334_set_pad_format(struct v4l2_subdev *sd,
}
/**
- * imx334_init_pad_cfg() - Initialize sub-device pad configuration
+ * imx334_init_state() - Initialize sub-device state
* @sd: pointer to imx334 V4L2 sub-device structure
* @sd_state: V4L2 sub-device state
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx334_init_pad_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int imx334_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct imx334 *imx334 = to_imx334(sd);
struct v4l2_subdev_format fmt = { 0 };
@@ -1190,7 +1190,6 @@ static const struct v4l2_subdev_video_ops imx334_video_ops = {
};
static const struct v4l2_subdev_pad_ops imx334_pad_ops = {
- .init_cfg = imx334_init_pad_cfg,
.enum_mbus_code = imx334_enum_mbus_code,
.enum_frame_size = imx334_enum_frame_size,
.get_fmt = imx334_get_pad_format,
@@ -1202,6 +1201,10 @@ static const struct v4l2_subdev_ops imx334_subdev_ops = {
.pad = &imx334_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx334_internal_ops = {
+ .init_state = imx334_init_state,
+};
+
/**
* imx334_power_on() - Sensor power on sequence
* @dev: pointer to i2c device
@@ -1359,6 +1362,7 @@ static int imx334_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx334->sd, client, &imx334_subdev_ops);
+ imx334->sd.internal_ops = &imx334_internal_ops;
ret = imx334_parse_hw_config(imx334);
if (ret) {
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 964a81bec..7a37eb327 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -55,6 +55,14 @@
#define IMX335_REG_MIN 0x00
#define IMX335_REG_MAX 0xfffff
+/* IMX335 native and active pixel array size. */
+#define IMX335_NATIVE_WIDTH 2616U
+#define IMX335_NATIVE_HEIGHT 1964U
+#define IMX335_PIXEL_ARRAY_LEFT 12U
+#define IMX335_PIXEL_ARRAY_TOP 12U
+#define IMX335_PIXEL_ARRAY_WIDTH 2592U
+#define IMX335_PIXEL_ARRAY_HEIGHT 1944U
+
/**
* struct imx335_reg - imx335 sensor register
* @address: Register address
@@ -75,6 +83,12 @@ struct imx335_reg_list {
const struct imx335_reg *regs;
};
+static const char * const imx335_supply_name[] = {
+ "avdd", /* Analog (2.9V) supply */
+ "ovdd", /* Digital I/O (1.8V) supply */
+ "dvdd", /* Digital Core (1.2V) supply */
+};
+
/**
* struct imx335_mode - imx335 sensor mode structure
* @width: Frame width
@@ -108,6 +122,7 @@ struct imx335_mode {
* @sd: V4L2 sub-device
* @pad: Media pad. Only one pad supported
* @reset_gpio: Sensor reset gpio
+ * @supplies: Regulator supplies to handle power control
* @inclk: Sensor input clock
* @ctrl_handler: V4L2 control handler
* @link_freq_ctrl: Pointer to link frequency control
@@ -119,6 +134,7 @@ struct imx335_mode {
* @vblank: Vertical blanking in lines
* @cur_mode: Pointer to current selected sensor mode
* @mutex: Mutex for serializing sensor controls
+ * @cur_mbus_code: Currently selected media bus format code
*/
struct imx335 {
struct device *dev;
@@ -126,6 +142,8 @@ struct imx335 {
struct v4l2_subdev sd;
struct media_pad pad;
struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(imx335_supply_name)];
+
struct clk *inclk;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *link_freq_ctrl;
@@ -139,6 +157,7 @@ struct imx335 {
u32 vblank;
const struct imx335_mode *cur_mode;
struct mutex mutex;
+ u32 cur_mbus_code;
};
static const s64 link_freq[] = {
@@ -233,6 +252,25 @@ static const struct imx335_reg mode_2592x1940_regs[] = {
{0x3a00, 0x01},
};
+static const struct imx335_reg raw10_framefmt_regs[] = {
+ {0x3050, 0x00},
+ {0x319d, 0x00},
+ {0x341c, 0xff},
+ {0x341d, 0x01},
+};
+
+static const struct imx335_reg raw12_framefmt_regs[] = {
+ {0x3050, 0x01},
+ {0x319d, 0x01},
+ {0x341c, 0x47},
+ {0x341d, 0x00},
+};
+
+static const u32 imx335_mbus_codes[] = {
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+};
+
/* Supported sensor mode configurations */
static const struct imx335_mode supported_mode = {
.width = 2592,
@@ -243,7 +281,6 @@ static const struct imx335_mode supported_mode = {
.vblank_max = 133060,
.pclk = 396000000,
.link_freq_idx = 0,
- .code = MEDIA_BUS_FMT_SRGGB12_1X12,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_2592x1940_regs),
.regs = mode_2592x1940_regs,
@@ -396,7 +433,7 @@ static int imx335_update_exp_gain(struct imx335 *imx335, u32 exposure, u32 gain)
lpfr = imx335->vblank + imx335->cur_mode->height;
shutter = lpfr - exposure;
- dev_dbg(imx335->dev, "Set exp %u, analog gain %u, shutter %u, lpfr %u",
+ dev_dbg(imx335->dev, "Set exp %u, analog gain %u, shutter %u, lpfr %u\n",
exposure, gain, shutter, lpfr);
ret = imx335_write_reg(imx335, IMX335_REG_HOLD, 1, 1);
@@ -443,7 +480,7 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_VBLANK:
imx335->vblank = imx335->vblank_ctrl->val;
- dev_dbg(imx335->dev, "Received vblank %u, new lpfr %u",
+ dev_dbg(imx335->dev, "Received vblank %u, new lpfr %u\n",
imx335->vblank,
imx335->vblank + imx335->cur_mode->height);
@@ -462,7 +499,7 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
exposure = ctrl->val;
analog_gain = imx335->again_ctrl->val;
- dev_dbg(imx335->dev, "Received exp %u, analog gain %u",
+ dev_dbg(imx335->dev, "Received exp %u, analog gain %u\n",
exposure, analog_gain);
ret = imx335_update_exp_gain(imx335, exposure, analog_gain);
@@ -471,7 +508,7 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
- dev_err(imx335->dev, "Invalid control %d", ctrl->id);
+ dev_err(imx335->dev, "Invalid control %d\n", ctrl->id);
ret = -EINVAL;
}
@@ -483,6 +520,18 @@ static const struct v4l2_ctrl_ops imx335_ctrl_ops = {
.s_ctrl = imx335_set_ctrl,
};
+static int imx335_get_format_code(struct imx335 *imx335, u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx335_mbus_codes); i++) {
+ if (imx335_mbus_codes[i] == code)
+ return imx335_mbus_codes[i];
+ }
+
+ return imx335_mbus_codes[0];
+}
+
/**
* imx335_enum_mbus_code() - Enumerate V4L2 sub-device mbus codes
* @sd: pointer to imx335 V4L2 sub-device structure
@@ -495,10 +544,10 @@ static int imx335_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- if (code->index > 0)
+ if (code->index >= ARRAY_SIZE(imx335_mbus_codes))
return -EINVAL;
- code->code = supported_mode.code;
+ code->code = imx335_mbus_codes[code->index];
return 0;
}
@@ -515,10 +564,14 @@ static int imx335_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fsize)
{
- if (fsize->index > 0)
+ struct imx335 *imx335 = to_imx335(sd);
+ u32 code;
+
+ if (fsize->index > ARRAY_SIZE(imx335_mbus_codes))
return -EINVAL;
- if (fsize->code != supported_mode.code)
+ code = imx335_get_format_code(imx335, fsize->code);
+ if (fsize->code != code)
return -EINVAL;
fsize->min_width = supported_mode.width;
@@ -542,7 +595,7 @@ static void imx335_fill_pad_format(struct imx335 *imx335,
{
fmt->format.width = mode->width;
fmt->format.height = mode->height;
- fmt->format.code = mode->code;
+ fmt->format.code = imx335->cur_mbus_code;
fmt->format.field = V4L2_FIELD_NONE;
fmt->format.colorspace = V4L2_COLORSPACE_RAW;
fmt->format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
@@ -569,7 +622,7 @@ static int imx335_get_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx335_fill_pad_format(imx335, imx335->cur_mode, fmt);
@@ -594,17 +647,22 @@ static int imx335_set_pad_format(struct v4l2_subdev *sd,
{
struct imx335 *imx335 = to_imx335(sd);
const struct imx335_mode *mode;
- int ret = 0;
+ int i, ret = 0;
mutex_lock(&imx335->mutex);
mode = &supported_mode;
+ for (i = 0; i < ARRAY_SIZE(imx335_mbus_codes); i++) {
+ if (imx335_mbus_codes[i] == fmt->format.code)
+ imx335->cur_mbus_code = imx335_mbus_codes[i];
+ }
+
imx335_fill_pad_format(imx335, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ret = imx335_update_controls(imx335, mode);
@@ -618,14 +676,14 @@ static int imx335_set_pad_format(struct v4l2_subdev *sd,
}
/**
- * imx335_init_pad_cfg() - Initialize sub-device pad configuration
+ * imx335_init_state() - Initialize sub-device state
* @sd: pointer to imx335 V4L2 sub-device structure
* @sd_state: V4L2 sub-device configuration
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx335_init_pad_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int imx335_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct imx335 *imx335 = to_imx335(sd);
struct v4l2_subdev_format fmt = { 0 };
@@ -637,6 +695,56 @@ static int imx335_init_pad_cfg(struct v4l2_subdev *sd,
}
/**
+ * imx335_get_selection() - Selection API
+ * @sd: pointer to imx335 V4L2 sub-device structure
+ * @sd_state: V4L2 sub-device configuration
+ * @sel: V4L2 selection info
+ *
+ * Return: 0 if successful, error code otherwise.
+ */
+static int imx335_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = IMX335_NATIVE_WIDTH;
+ sel->r.height = IMX335_NATIVE_HEIGHT;
+
+ return 0;
+
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = IMX335_PIXEL_ARRAY_TOP;
+ sel->r.left = IMX335_PIXEL_ARRAY_LEFT;
+ sel->r.width = IMX335_PIXEL_ARRAY_WIDTH;
+ sel->r.height = IMX335_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int imx335_set_framefmt(struct imx335 *imx335)
+{
+ switch (imx335->cur_mbus_code) {
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ return imx335_write_regs(imx335, raw10_framefmt_regs,
+ ARRAY_SIZE(raw10_framefmt_regs));
+
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ return imx335_write_regs(imx335, raw12_framefmt_regs,
+ ARRAY_SIZE(raw12_framefmt_regs));
+ }
+
+ return -EINVAL;
+}
+
+/**
* imx335_start_streaming() - Start sensor stream
* @imx335: pointer to imx335 device
*
@@ -652,14 +760,21 @@ static int imx335_start_streaming(struct imx335 *imx335)
ret = imx335_write_regs(imx335, reg_list->regs,
reg_list->num_of_regs);
if (ret) {
- dev_err(imx335->dev, "fail to write initial registers");
+ dev_err(imx335->dev, "fail to write initial registers\n");
+ return ret;
+ }
+
+ ret = imx335_set_framefmt(imx335);
+ if (ret) {
+ dev_err(imx335->dev, "%s failed to set frame format: %d\n",
+ __func__, ret);
return ret;
}
/* Setup handler will write actual exposure and gain */
ret = __v4l2_ctrl_handler_setup(imx335->sd.ctrl_handler);
if (ret) {
- dev_err(imx335->dev, "fail to setup handler");
+ dev_err(imx335->dev, "fail to setup handler\n");
return ret;
}
@@ -667,7 +782,7 @@ static int imx335_start_streaming(struct imx335 *imx335)
ret = imx335_write_reg(imx335, IMX335_REG_MODE_SELECT,
1, IMX335_MODE_STREAMING);
if (ret) {
- dev_err(imx335->dev, "fail to start streaming");
+ dev_err(imx335->dev, "fail to start streaming\n");
return ret;
}
@@ -744,7 +859,7 @@ static int imx335_detect(struct imx335 *imx335)
return ret;
if (val != IMX335_ID) {
- dev_err(imx335->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx335->dev, "chip id mismatch: %x!=%x\n",
IMX335_ID, val);
return -ENXIO;
}
@@ -776,27 +891,40 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
imx335->reset_gpio = devm_gpiod_get_optional(imx335->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(imx335->reset_gpio)) {
- dev_err(imx335->dev, "failed to get reset gpio %ld",
+ dev_err(imx335->dev, "failed to get reset gpio %ld\n",
PTR_ERR(imx335->reset_gpio));
return PTR_ERR(imx335->reset_gpio);
}
+ for (i = 0; i < ARRAY_SIZE(imx335_supply_name); i++)
+ imx335->supplies[i].supply = imx335_supply_name[i];
+
+ ret = devm_regulator_bulk_get(imx335->dev,
+ ARRAY_SIZE(imx335_supply_name),
+ imx335->supplies);
+ if (ret) {
+ dev_err(imx335->dev, "Failed to get regulators\n");
+ return ret;
+ }
+
/* Get sensor input clock */
imx335->inclk = devm_clk_get(imx335->dev, NULL);
if (IS_ERR(imx335->inclk)) {
- dev_err(imx335->dev, "could not get inclk");
+ dev_err(imx335->dev, "could not get inclk\n");
return PTR_ERR(imx335->inclk);
}
rate = clk_get_rate(imx335->inclk);
if (rate != IMX335_INCLK_RATE) {
- dev_err(imx335->dev, "inclk frequency mismatch");
+ dev_err(imx335->dev, "inclk frequency mismatch\n");
return -EINVAL;
}
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
- if (!ep)
+ if (!ep) {
+ dev_err(imx335->dev, "Failed to get next endpoint\n");
return -ENXIO;
+ }
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
@@ -805,14 +933,14 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX335_NUM_DATA_LANES) {
dev_err(imx335->dev,
- "number of CSI2 data lanes %d is not supported",
+ "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto done_endpoint_free;
}
if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(imx335->dev, "no link frequencies defined");
+ dev_err(imx335->dev, "no link frequencies defined\n");
ret = -EINVAL;
goto done_endpoint_free;
}
@@ -821,6 +949,8 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
if (bus_cfg.link_frequencies[i] == IMX335_LINK_FREQ)
goto done_endpoint_free;
+ dev_err(imx335->dev, "no compatible link frequencies found\n");
+
ret = -EINVAL;
done_endpoint_free:
@@ -835,9 +965,10 @@ static const struct v4l2_subdev_video_ops imx335_video_ops = {
};
static const struct v4l2_subdev_pad_ops imx335_pad_ops = {
- .init_cfg = imx335_init_pad_cfg,
.enum_mbus_code = imx335_enum_mbus_code,
.enum_frame_size = imx335_enum_frame_size,
+ .get_selection = imx335_get_selection,
+ .set_selection = imx335_get_selection,
.get_fmt = imx335_get_pad_format,
.set_fmt = imx335_set_pad_format,
};
@@ -847,6 +978,10 @@ static const struct v4l2_subdev_ops imx335_subdev_ops = {
.pad = &imx335_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx335_internal_ops = {
+ .init_state = imx335_init_state,
+};
+
/**
* imx335_power_on() - Sensor power on sequence
* @dev: pointer to i2c device
@@ -859,20 +994,32 @@ static int imx335_power_on(struct device *dev)
struct imx335 *imx335 = to_imx335(sd);
int ret;
+ ret = regulator_bulk_enable(ARRAY_SIZE(imx335_supply_name),
+ imx335->supplies);
+ if (ret) {
+ dev_err(dev, "%s: failed to enable regulators\n",
+ __func__);
+ return ret;
+ }
+
+ usleep_range(500, 550); /* Tlow */
+
+ /* Set XCLR */
gpiod_set_value_cansleep(imx335->reset_gpio, 1);
ret = clk_prepare_enable(imx335->inclk);
if (ret) {
- dev_err(imx335->dev, "fail to enable inclk");
+ dev_err(imx335->dev, "fail to enable inclk\n");
goto error_reset;
}
- usleep_range(20, 22);
+ usleep_range(20, 22); /* T4 */
return 0;
error_reset:
gpiod_set_value_cansleep(imx335->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies);
return ret;
}
@@ -889,8 +1036,8 @@ static int imx335_power_off(struct device *dev)
struct imx335 *imx335 = to_imx335(sd);
gpiod_set_value_cansleep(imx335->reset_gpio, 0);
-
clk_disable_unprepare(imx335->inclk);
+ regulator_bulk_disable(ARRAY_SIZE(imx335_supply_name), imx335->supplies);
return 0;
}
@@ -969,7 +1116,7 @@ static int imx335_init_controls(struct imx335 *imx335)
imx335->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (ctrl_hdlr->error) {
- dev_err(imx335->dev, "control init failed: %d",
+ dev_err(imx335->dev, "control init failed: %d\n",
ctrl_hdlr->error);
v4l2_ctrl_handler_free(ctrl_hdlr);
return ctrl_hdlr->error;
@@ -999,10 +1146,11 @@ static int imx335_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx335->sd, client, &imx335_subdev_ops);
+ imx335->sd.internal_ops = &imx335_internal_ops;
ret = imx335_parse_hw_config(imx335);
if (ret) {
- dev_err(imx335->dev, "HW configuration is not supported");
+ dev_err(imx335->dev, "HW configuration is not supported\n");
return ret;
}
@@ -1010,24 +1158,25 @@ static int imx335_probe(struct i2c_client *client)
ret = imx335_power_on(imx335->dev);
if (ret) {
- dev_err(imx335->dev, "failed to power-on the sensor");
+ dev_err(imx335->dev, "failed to power-on the sensor\n");
goto error_mutex_destroy;
}
/* Check module identity */
ret = imx335_detect(imx335);
if (ret) {
- dev_err(imx335->dev, "failed to find sensor: %d", ret);
+ dev_err(imx335->dev, "failed to find sensor: %d\n", ret);
goto error_power_off;
}
/* Set default mode to max resolution */
imx335->cur_mode = &supported_mode;
+ imx335->cur_mbus_code = imx335_mbus_codes[0];
imx335->vblank = imx335->cur_mode->vblank;
ret = imx335_init_controls(imx335);
if (ret) {
- dev_err(imx335->dev, "failed to init controls: %d", ret);
+ dev_err(imx335->dev, "failed to init controls: %d\n", ret);
goto error_power_off;
}
@@ -1039,14 +1188,14 @@ static int imx335_probe(struct i2c_client *client)
imx335->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx335->sd.entity, 1, &imx335->pad);
if (ret) {
- dev_err(imx335->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx335->dev, "failed to init entity pads: %d\n", ret);
goto error_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&imx335->sd);
if (ret < 0) {
dev_err(imx335->dev,
- "failed to register async subdev: %d", ret);
+ "failed to register async subdev: %d\n", ret);
goto error_media_entity;
}
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 81bb61407..8c995c587 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1158,7 +1158,7 @@ static int imx355_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct imx355 *imx355 = to_imx355(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
mutex_lock(&imx355->mutex);
@@ -1299,10 +1299,9 @@ static int imx355_do_get_pad_format(struct imx355 *imx355,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
- struct v4l2_subdev *sd = &imx355->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx355_update_pad_format(imx355, imx355->cur_mode, fmt);
@@ -1353,7 +1352,7 @@ imx355_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx355_update_pad_format(imx355, mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
imx355->cur_mode = mode;
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index 962b3136c..0efce3295 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -721,7 +721,7 @@ static int imx412_get_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
imx412_fill_pad_format(imx412, imx412->cur_mode, fmt);
@@ -756,7 +756,7 @@ static int imx412_set_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ret = imx412_update_controls(imx412, mode);
@@ -770,14 +770,14 @@ static int imx412_set_pad_format(struct v4l2_subdev *sd,
}
/**
- * imx412_init_pad_cfg() - Initialize sub-device pad configuration
+ * imx412_init_state() - Initialize sub-device state
* @sd: pointer to imx412 V4L2 sub-device structure
* @sd_state: V4L2 sub-device configuration
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx412_init_pad_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int imx412_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct imx412 *imx412 = to_imx412(sd);
struct v4l2_subdev_format fmt = { 0 };
@@ -997,7 +997,6 @@ static const struct v4l2_subdev_video_ops imx412_video_ops = {
};
static const struct v4l2_subdev_pad_ops imx412_pad_ops = {
- .init_cfg = imx412_init_pad_cfg,
.enum_mbus_code = imx412_enum_mbus_code,
.enum_frame_size = imx412_enum_frame_size,
.get_fmt = imx412_get_pad_format,
@@ -1009,6 +1008,10 @@ static const struct v4l2_subdev_ops imx412_subdev_ops = {
.pad = &imx412_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx412_internal_ops = {
+ .init_state = imx412_init_state,
+};
+
/**
* imx412_power_on() - Sensor power on sequence
* @dev: pointer to i2c device
@@ -1177,6 +1180,7 @@ static int imx412_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx412->sd, client, &imx412_subdev_ops);
+ imx412->sd.internal_ops = &imx412_internal_ops;
ret = imx412_parse_hw_config(imx412);
if (ret) {
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index b3fa71a16..1e5f20c3e 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -546,7 +546,7 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
state = v4l2_subdev_get_locked_active_state(&sensor->subdev);
- format = v4l2_subdev_get_pad_format(&sensor->subdev, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
@@ -828,7 +828,7 @@ static int imx415_enum_frame_size(struct v4l2_subdev *sd,
{
const struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_pad_format(sd, state, fse->pad);
+ format = v4l2_subdev_state_get_format(state, fse->pad);
if (fse->index > 0 || fse->code != format->code)
return -EINVAL;
@@ -846,7 +846,7 @@ static int imx415_set_format(struct v4l2_subdev *sd,
{
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+ format = v4l2_subdev_state_get_format(state, fmt->pad);
format->width = fmt->format.width;
format->height = fmt->format.height;
@@ -880,8 +880,8 @@ static int imx415_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
}
-static int imx415_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int imx415_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format format = {
.format = {
@@ -905,7 +905,6 @@ static const struct v4l2_subdev_pad_ops imx415_subdev_pad_ops = {
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx415_set_format,
.get_selection = imx415_get_selection,
- .init_cfg = imx415_init_cfg,
};
static const struct v4l2_subdev_ops imx415_subdev_ops = {
@@ -913,12 +912,17 @@ static const struct v4l2_subdev_ops imx415_subdev_ops = {
.pad = &imx415_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx415_internal_ops = {
+ .init_state = imx415_init_state,
+};
+
static int imx415_subdev_init(struct imx415 *sensor)
{
struct i2c_client *client = to_i2c_client(sensor->dev);
int ret;
v4l2_i2c_subdev_init(&sensor->subdev, client, &imx415_subdev_ops);
+ sensor->subdev.internal_ops = &imx415_internal_ops;
ret = imx415_ctrls_init(sensor);
if (ret)
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
index 73460688c..89e13ebbc 100644
--- a/drivers/media/i2c/isl7998x.c
+++ b/drivers/media/i2c/isl7998x.c
@@ -1007,8 +1007,8 @@ static int isl7998x_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&isl7998x->lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, sd_state,
- format->pad);
+ format->format = *v4l2_subdev_state_get_format(sd_state,
+ format->pad);
goto out;
}
@@ -1044,7 +1044,7 @@ static int isl7998x_set_fmt(struct v4l2_subdev *sd,
mf->field = mode->field;
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = format->format;
+ *v4l2_subdev_state_get_format(sd_state, format->pad) = format->format;
mutex_unlock(&isl7998x->lock);
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index fc1cf196e..d685d445c 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -868,11 +868,19 @@ static int max9286_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int max9286_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int max9286_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
struct max9286_priv *priv = sd_to_max9286(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (interval->pad != MAX9286_SRC_PAD)
return -EINVAL;
@@ -881,11 +889,19 @@ static int max9286_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int max9286_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int max9286_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
struct max9286_priv *priv = sd_to_max9286(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (interval->pad != MAX9286_SRC_PAD)
return -EINVAL;
@@ -913,7 +929,7 @@ max9286_get_pad_format(struct max9286_priv *priv,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &priv->fmt[pad];
default:
@@ -983,14 +999,14 @@ static int max9286_get_fmt(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops max9286_video_ops = {
.s_stream = max9286_s_stream,
- .g_frame_interval = max9286_g_frame_interval,
- .s_frame_interval = max9286_s_frame_interval,
};
static const struct v4l2_subdev_pad_ops max9286_pad_ops = {
.enum_mbus_code = max9286_enum_mbus_code,
.get_fmt = max9286_get_fmt,
.set_fmt = max9286_set_fmt,
+ .get_frame_interval = max9286_get_frame_interval,
+ .set_frame_interval = max9286_set_frame_interval,
};
static const struct v4l2_subdev_ops max9286_subdev_ops = {
@@ -1020,7 +1036,7 @@ static int max9286_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
unsigned int i;
for (i = 0; i < MAX9286_N_SINKS; i++) {
- format = v4l2_subdev_get_try_format(subdev, fh->state, i);
+ format = v4l2_subdev_state_get_format(fh->state, i);
max9286_init_format(format);
}
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 79192cf79..ad1a3ab77 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -325,7 +325,7 @@ static int mt9m001_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mf = v4l2_subdev_state_get_format(sd_state, 0);
format->format = *mf;
return 0;
}
@@ -405,7 +405,7 @@ static int mt9m001_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return mt9m001_s_fmt(sd, fmt, mf);
- sd_state->pads->try_fmt = *mf;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *mf;
return 0;
}
@@ -650,13 +650,13 @@ static const struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
#endif
};
-static int mt9m001_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int mt9m001_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, sd_state, 0);
+ v4l2_subdev_state_get_format(sd_state, 0);
try_fmt->width = MT9M001_MAX_WIDTH;
try_fmt->height = MT9M001_MAX_HEIGHT;
@@ -708,7 +708,6 @@ static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
};
static const struct v4l2_subdev_pad_ops mt9m001_subdev_pad_ops = {
- .init_cfg = mt9m001_init_cfg,
.enum_mbus_code = mt9m001_enum_mbus_code,
.get_selection = mt9m001_get_selection,
.set_selection = mt9m001_set_selection,
@@ -724,6 +723,10 @@ static const struct v4l2_subdev_ops mt9m001_subdev_ops = {
.pad = &mt9m001_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mt9m001_internal_ops = {
+ .init_state = mt9m001_init_state,
+};
+
static int mt9m001_probe(struct i2c_client *client)
{
struct mt9m001 *mt9m001;
@@ -755,6 +758,7 @@ static int mt9m001_probe(struct i2c_client *client)
return PTR_ERR(mt9m001->reset_gpio);
v4l2_i2c_subdev_init(&mt9m001->subdev, client, &mt9m001_subdev_ops);
+ mt9m001->subdev.internal_ops = &mt9m001_internal_ops;
mt9m001->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
v4l2_ctrl_handler_init(&mt9m001->hdl, 4);
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 1f44b72e8..ceeeb94c3 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -525,7 +525,7 @@ static int mt9m111_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, format->pad);
format->format = *mf;
return 0;
}
@@ -671,7 +671,7 @@ static int mt9m111_set_fmt(struct v4l2_subdev *sd,
mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *mf;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *mf;
return 0;
}
@@ -1045,18 +1045,27 @@ static const struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
#endif
};
-static int mt9m111_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int mt9m111_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
fi->interval = mt9m111->frame_interval;
return 0;
}
-static int mt9m111_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int mt9m111_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
const struct mt9m111_mode_info *mode;
@@ -1066,6 +1075,13 @@ static int mt9m111_s_frame_interval(struct v4l2_subdev *sd,
if (mt9m111->is_streaming)
return -EBUSY;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad != 0)
return -EINVAL;
@@ -1111,11 +1127,11 @@ static int mt9m111_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int mt9m111_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int mt9m111_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, sd_state, 0);
+ v4l2_subdev_state_get_format(sd_state, 0);
format->width = MT9M111_MAX_WIDTH;
format->height = MT9M111_MAX_HEIGHT;
@@ -1151,17 +1167,16 @@ static int mt9m111_get_mbus_config(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
.s_stream = mt9m111_s_stream,
- .g_frame_interval = mt9m111_g_frame_interval,
- .s_frame_interval = mt9m111_s_frame_interval,
};
static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = {
- .init_cfg = mt9m111_init_cfg,
.enum_mbus_code = mt9m111_enum_mbus_code,
.get_selection = mt9m111_get_selection,
.set_selection = mt9m111_set_selection,
.get_fmt = mt9m111_get_fmt,
.set_fmt = mt9m111_set_fmt,
+ .get_frame_interval = mt9m111_get_frame_interval,
+ .set_frame_interval = mt9m111_set_frame_interval,
.get_mbus_config = mt9m111_get_mbus_config,
};
@@ -1171,6 +1186,10 @@ static const struct v4l2_subdev_ops mt9m111_subdev_ops = {
.pad = &mt9m111_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mt9m111_internal_ops = {
+ .init_state = mt9m111_init_state,
+};
+
/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
@@ -1275,6 +1294,7 @@ static int mt9m111_probe(struct i2c_client *client)
mt9m111->ctx = &context_b;
v4l2_i2c_subdev_init(&mt9m111->subdev, client, &mt9m111_subdev_ops);
+ mt9m111->subdev.internal_ops = &mt9m111_internal_ops;
mt9m111->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
index 1a535c098..5f0b0ad8f 100644
--- a/drivers/media/i2c/mt9m114.c
+++ b/drivers/media/i2c/mt9m114.c
@@ -796,13 +796,13 @@ static int mt9m114_configure(struct mt9m114 *sensor,
u64 read_mode;
int ret = 0;
- pa_format = v4l2_subdev_get_pad_format(&sensor->pa.sd, pa_state, 0);
- pa_crop = v4l2_subdev_get_pad_crop(&sensor->pa.sd, pa_state, 0);
+ pa_format = v4l2_subdev_state_get_format(pa_state, 0);
+ pa_crop = v4l2_subdev_state_get_crop(pa_state, 0);
- ifp_format = v4l2_subdev_get_pad_format(&sensor->ifp.sd, ifp_state, 1);
+ ifp_format = v4l2_subdev_state_get_format(ifp_state, 1);
ifp_info = mt9m114_format_info(sensor, 1, ifp_format->code);
- ifp_crop = v4l2_subdev_get_pad_crop(&sensor->ifp.sd, ifp_state, 0);
- ifp_compose = v4l2_subdev_get_pad_compose(&sensor->ifp.sd, ifp_state, 0);
+ ifp_crop = v4l2_subdev_state_get_crop(ifp_state, 0);
+ ifp_compose = v4l2_subdev_state_get_compose(ifp_state, 0);
ret = cci_read(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
&read_mode, NULL);
@@ -1045,7 +1045,7 @@ static int mt9m114_pa_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
state = v4l2_subdev_get_locked_active_state(&sensor->pa.sd);
- format = v4l2_subdev_get_pad_format(&sensor->pa.sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
switch (ctrl->id) {
case V4L2_CID_HBLANK:
@@ -1152,20 +1152,20 @@ static inline struct mt9m114 *pa_to_mt9m114(struct v4l2_subdev *sd)
return container_of(sd, struct mt9m114, pa.sd);
}
-static int mt9m114_pa_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int mt9m114_pa_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
crop->left = 0;
crop->top = 0;
crop->width = MT9M114_PIXEL_ARRAY_WIDTH;
crop->height = MT9M114_PIXEL_ARRAY_HEIGHT;
- format = v4l2_subdev_get_pad_format(sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
format->width = MT9M114_PIXEL_ARRAY_WIDTH;
format->height = MT9M114_PIXEL_ARRAY_HEIGHT;
@@ -1220,8 +1220,8 @@ static int mt9m114_pa_set_fmt(struct v4l2_subdev *sd,
unsigned int hscale;
unsigned int vscale;
- crop = v4l2_subdev_get_pad_crop(sd, state, fmt->pad);
- format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+ crop = v4l2_subdev_state_get_crop(state, fmt->pad);
+ format = v4l2_subdev_state_get_format(state, fmt->pad);
/* The sensor can bin horizontally and vertically. */
hscale = DIV_ROUND_CLOSEST(crop->width, fmt->format.width ? : 1);
@@ -1243,7 +1243,7 @@ static int mt9m114_pa_get_selection(struct v4l2_subdev *sd,
{
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *v4l2_subdev_get_pad_crop(sd, state, sel->pad);
+ sel->r = *v4l2_subdev_state_get_crop(state, sel->pad);
return 0;
case V4L2_SEL_TGT_CROP_DEFAULT:
@@ -1271,8 +1271,8 @@ static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- crop = v4l2_subdev_get_pad_crop(sd, state, sel->pad);
- format = v4l2_subdev_get_pad_format(sd, state, sel->pad);
+ crop = v4l2_subdev_state_get_crop(state, sel->pad);
+ format = v4l2_subdev_state_get_format(state, sel->pad);
/*
* Clamp the crop rectangle. The vertical coordinates must be even, and
@@ -1304,7 +1304,6 @@ static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops mt9m114_pa_pad_ops = {
- .init_cfg = mt9m114_pa_init_cfg,
.enum_mbus_code = mt9m114_pa_enum_mbus_code,
.enum_frame_size = mt9m114_pa_enum_framesizes,
.get_fmt = v4l2_subdev_get_fmt,
@@ -1317,6 +1316,10 @@ static const struct v4l2_subdev_ops mt9m114_pa_ops = {
.pad = &mt9m114_pa_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mt9m114_pa_internal_ops = {
+ .init_state = mt9m114_pa_init_state,
+};
+
static int mt9m114_pa_init(struct mt9m114 *sensor)
{
struct v4l2_ctrl_handler *hdl = &sensor->pa.hdl;
@@ -1329,6 +1332,7 @@ static int mt9m114_pa_init(struct mt9m114 *sensor)
/* Initialize the subdev. */
v4l2_subdev_init(sd, &mt9m114_pa_ops);
+ sd->internal_ops = &mt9m114_pa_internal_ops;
v4l2_i2c_subdev_set_name(sd, sensor->client, NULL, " pixel array");
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
@@ -1402,7 +1406,7 @@ static int mt9m114_pa_init(struct mt9m114 *sensor)
/* Update the range of the blanking controls based on the format. */
state = v4l2_subdev_lock_and_get_active_state(sd);
- format = v4l2_subdev_get_pad_format(sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
mt9m114_pa_ctrl_update_blanking(sensor, format);
v4l2_subdev_unlock_state(state);
@@ -1581,12 +1585,20 @@ static int mt9m114_ifp_s_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
-static int mt9m114_ifp_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int mt9m114_ifp_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
struct v4l2_fract *ival = &interval->interval;
struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(sensor->ifp.hdl.lock);
ival->numerator = 1;
@@ -1597,13 +1609,21 @@ static int mt9m114_ifp_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int mt9m114_ifp_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int mt9m114_ifp_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
struct v4l2_fract *ival = &interval->interval;
struct mt9m114 *sensor = ifp_to_mt9m114(sd);
int ret = 0;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(sensor->ifp.hdl.lock);
if (ival->numerator != 0 && ival->denominator != 0)
@@ -1624,15 +1644,15 @@ static int mt9m114_ifp_s_frame_interval(struct v4l2_subdev *sd,
return ret;
}
-static int mt9m114_ifp_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int mt9m114_ifp_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct mt9m114 *sensor = ifp_to_mt9m114(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
struct v4l2_rect *compose;
- format = v4l2_subdev_get_pad_format(sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
format->width = MT9M114_PIXEL_ARRAY_WIDTH;
format->height = MT9M114_PIXEL_ARRAY_HEIGHT;
@@ -1643,21 +1663,21 @@ static int mt9m114_ifp_init_cfg(struct v4l2_subdev *sd,
format->quantization = V4L2_QUANTIZATION_FULL_RANGE;
format->xfer_func = V4L2_XFER_FUNC_NONE;
- crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
crop->left = 4;
crop->top = 4;
crop->width = format->width - 8;
crop->height = format->height - 8;
- compose = v4l2_subdev_get_pad_compose(sd, state, 0);
+ compose = v4l2_subdev_state_get_compose(state, 0);
compose->left = 0;
compose->top = 0;
compose->width = crop->width;
compose->height = crop->height;
- format = v4l2_subdev_get_pad_format(sd, state, 1);
+ format = v4l2_subdev_state_get_format(state, 1);
format->width = compose->width;
format->height = compose->height;
@@ -1738,7 +1758,7 @@ static int mt9m114_ifp_enum_framesizes(struct v4l2_subdev *sd,
} else {
const struct v4l2_rect *crop;
- crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
fse->max_width = crop->width;
fse->max_height = crop->height;
@@ -1777,7 +1797,7 @@ static int mt9m114_ifp_set_fmt(struct v4l2_subdev *sd,
struct mt9m114 *sensor = ifp_to_mt9m114(sd);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+ format = v4l2_subdev_state_get_format(state, fmt->pad);
if (fmt->pad == 0) {
/* Only the size can be changed on the sink pad. */
@@ -1797,7 +1817,7 @@ static int mt9m114_ifp_set_fmt(struct v4l2_subdev *sd,
/* If the output format is RAW10, bypass the scaler. */
if (format->code == MEDIA_BUS_FMT_SGRBG10_1X10)
- *format = *v4l2_subdev_get_pad_format(sd, state, 0);
+ *format = *v4l2_subdev_state_get_format(state, 0);
}
fmt->format = *format;
@@ -1819,7 +1839,7 @@ static int mt9m114_ifp_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *v4l2_subdev_get_pad_crop(sd, state, 0);
+ sel->r = *v4l2_subdev_state_get_crop(state, 0);
break;
case V4L2_SEL_TGT_CROP_DEFAULT:
@@ -1828,7 +1848,7 @@ static int mt9m114_ifp_get_selection(struct v4l2_subdev *sd,
* The crop default and bounds are equal to the sink
* format size minus 4 pixels on each side for demosaicing.
*/
- format = v4l2_subdev_get_pad_format(sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
sel->r.left = 4;
sel->r.top = 4;
@@ -1837,7 +1857,7 @@ static int mt9m114_ifp_get_selection(struct v4l2_subdev *sd,
break;
case V4L2_SEL_TGT_COMPOSE:
- sel->r = *v4l2_subdev_get_pad_compose(sd, state, 0);
+ sel->r = *v4l2_subdev_state_get_compose(state, 0);
break;
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
@@ -1846,7 +1866,7 @@ static int mt9m114_ifp_get_selection(struct v4l2_subdev *sd,
* The compose default and bounds sizes are equal to the sink
* crop rectangle size.
*/
- crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = crop->width;
@@ -1877,9 +1897,9 @@ static int mt9m114_ifp_set_selection(struct v4l2_subdev *sd,
if (sel->pad != 0)
return -EINVAL;
- format = v4l2_subdev_get_pad_format(sd, state, 0);
- crop = v4l2_subdev_get_pad_crop(sd, state, 0);
- compose = v4l2_subdev_get_pad_compose(sd, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+ compose = v4l2_subdev_state_get_compose(state, 0);
if (sel->target == V4L2_SEL_TGT_CROP) {
/*
@@ -1921,7 +1941,7 @@ static int mt9m114_ifp_set_selection(struct v4l2_subdev *sd,
}
/* Propagate the compose rectangle to the source format. */
- format = v4l2_subdev_get_pad_format(sd, state, 1);
+ format = v4l2_subdev_state_get_format(state, 1);
format->width = compose->width;
format->height = compose->height;
@@ -1963,12 +1983,9 @@ static int mt9m114_ifp_registered(struct v4l2_subdev *sd)
static const struct v4l2_subdev_video_ops mt9m114_ifp_video_ops = {
.s_stream = mt9m114_ifp_s_stream,
- .g_frame_interval = mt9m114_ifp_g_frame_interval,
- .s_frame_interval = mt9m114_ifp_s_frame_interval,
};
static const struct v4l2_subdev_pad_ops mt9m114_ifp_pad_ops = {
- .init_cfg = mt9m114_ifp_init_cfg,
.enum_mbus_code = mt9m114_ifp_enum_mbus_code,
.enum_frame_size = mt9m114_ifp_enum_framesizes,
.enum_frame_interval = mt9m114_ifp_enum_frameintervals,
@@ -1976,6 +1993,8 @@ static const struct v4l2_subdev_pad_ops mt9m114_ifp_pad_ops = {
.set_fmt = mt9m114_ifp_set_fmt,
.get_selection = mt9m114_ifp_get_selection,
.set_selection = mt9m114_ifp_set_selection,
+ .get_frame_interval = mt9m114_ifp_get_frame_interval,
+ .set_frame_interval = mt9m114_ifp_set_frame_interval,
};
static const struct v4l2_subdev_ops mt9m114_ifp_ops = {
@@ -1984,6 +2003,7 @@ static const struct v4l2_subdev_ops mt9m114_ifp_ops = {
};
static const struct v4l2_subdev_internal_ops mt9m114_ifp_internal_ops = {
+ .init_state = mt9m114_ifp_init_state,
.registered = mt9m114_ifp_registered,
.unregistered = mt9m114_ifp_unregistered,
};
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 348f1e109..596200d02 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -549,8 +549,7 @@ __mt9p031_get_pad_format(struct mt9p031 *mt9p031,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9p031->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->format;
default:
@@ -565,8 +564,7 @@ __mt9p031_get_pad_crop(struct mt9p031 *mt9p031,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&mt9p031->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->crop;
default:
@@ -698,8 +696,8 @@ static int mt9p031_set_selection(struct v4l2_subdev *subdev,
return 0;
}
-static int mt9p031_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state)
+static int mt9p031_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
struct v4l2_mbus_framefmt *format;
@@ -1043,7 +1041,6 @@ static const struct v4l2_subdev_video_ops mt9p031_subdev_video_ops = {
};
static const struct v4l2_subdev_pad_ops mt9p031_subdev_pad_ops = {
- .init_cfg = mt9p031_init_cfg,
.enum_mbus_code = mt9p031_enum_mbus_code,
.enum_frame_size = mt9p031_enum_frame_size,
.get_fmt = mt9p031_get_format,
@@ -1059,6 +1056,7 @@ static const struct v4l2_subdev_ops mt9p031_subdev_ops = {
};
static const struct v4l2_subdev_internal_ops mt9p031_subdev_internal_ops = {
+ .init_state = mt9p031_init_state,
.registered = mt9p031_registered,
.open = mt9p031_open,
.close = mt9p031_close,
@@ -1191,7 +1189,7 @@ static int mt9p031_probe(struct i2c_client *client)
mt9p031->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- ret = mt9p031_init_cfg(&mt9p031->subdev, NULL);
+ ret = mt9p031_init_state(&mt9p031->subdev, NULL);
if (ret)
goto done;
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index 93f34b767..fb1588c57 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -982,7 +982,6 @@ static int mt9t112_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return mt9t112_s_fmt(sd, mf);
- sd_state->pads->try_fmt = *mf;
return 0;
}
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 37a634b92..8834ff878 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -356,15 +356,23 @@ static int mt9v011_set_fmt(struct v4l2_subdev *sd,
set_res(sd);
} else {
- sd_state->pads->try_fmt = *fmt;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
}
return 0;
}
-static int mt9v011_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int mt9v011_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
calc_fps(sd,
&ival->interval.numerator,
&ival->interval.denominator);
@@ -372,12 +380,20 @@ static int mt9v011_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int mt9v011_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int mt9v011_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct v4l2_fract *tpf = &ival->interval;
u16 speed;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
speed = calc_speed(sd, tpf->numerator, tpf->denominator);
mt9v011_write(sd, R0A_MT9V011_CLK_SPEED, speed);
@@ -455,19 +471,15 @@ static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
#endif
};
-static const struct v4l2_subdev_video_ops mt9v011_video_ops = {
- .g_frame_interval = mt9v011_g_frame_interval,
- .s_frame_interval = mt9v011_s_frame_interval,
-};
-
static const struct v4l2_subdev_pad_ops mt9v011_pad_ops = {
.enum_mbus_code = mt9v011_enum_mbus_code,
.set_fmt = mt9v011_set_fmt,
+ .get_frame_interval = mt9v011_get_frame_interval,
+ .set_frame_interval = mt9v011_set_frame_interval,
};
static const struct v4l2_subdev_ops mt9v011_ops = {
.core = &mt9v011_core_ops,
- .video = &mt9v011_video_ops,
.pad = &mt9v011_pad_ops,
};
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 1c6f6cea1..3ca76eeae 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -356,8 +356,7 @@ __mt9v032_get_pad_format(struct mt9v032 *mt9v032,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9v032->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v032->format;
default:
@@ -372,8 +371,7 @@ __mt9v032_get_pad_crop(struct mt9v032 *mt9v032,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&mt9v032->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v032->crop;
default:
@@ -931,13 +929,13 @@ static int mt9v032_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- crop = v4l2_subdev_get_try_crop(subdev, fh->state, 0);
+ crop = v4l2_subdev_state_get_crop(fh->state, 0);
crop->left = MT9V032_COLUMN_START_DEF;
crop->top = MT9V032_ROW_START_DEF;
crop->width = MT9V032_WINDOW_WIDTH_DEF;
crop->height = MT9V032_WINDOW_HEIGHT_DEF;
- format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
+ format = v4l2_subdev_state_get_format(fh->state, 0);
if (mt9v032->model->color)
format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index f859b49e1..b0b98ed3c 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -35,7 +35,7 @@
* The IFP can produce several output image formats from the sensor core
* output. This driver currently supports only YUYV format permutations.
*
- * The driver allows manual frame rate control through s_frame_interval subdev
+ * The driver allows manual frame rate control through set_frame_interval subdev
* operation or V4L2_CID_V/HBLANK controls, but it is known that the
* auto-exposure algorithm might modify the programmed frame rate. While the
* driver initially programs the sensor with auto-exposure and
@@ -719,8 +719,9 @@ error_unlock:
return ret;
}
-static int mt9v111_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int mt9v111_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
struct v4l2_fract *tpf = &ival->interval;
@@ -729,6 +730,13 @@ static int mt9v111_s_frame_interval(struct v4l2_subdev *sd,
tpf->denominator;
unsigned int max_fps;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (!tpf->numerator)
tpf->numerator = 1;
@@ -771,12 +779,20 @@ static int mt9v111_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int mt9v111_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int mt9v111_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
struct v4l2_fract *tpf = &ival->interval;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&mt9v111->stream_mutex);
tpf->numerator = 1;
@@ -795,7 +811,7 @@ static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&mt9v111->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9v111->fmt;
default:
@@ -948,10 +964,10 @@ done:
return 0;
}
-static int mt9v111_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state)
+static int mt9v111_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
{
- sd_state->pads->try_fmt = mt9v111_def_fmt;
+ *v4l2_subdev_state_get_format(sd_state, 0) = mt9v111_def_fmt;
return 0;
}
@@ -962,17 +978,16 @@ static const struct v4l2_subdev_core_ops mt9v111_core_ops = {
static const struct v4l2_subdev_video_ops mt9v111_video_ops = {
.s_stream = mt9v111_s_stream,
- .s_frame_interval = mt9v111_s_frame_interval,
- .g_frame_interval = mt9v111_g_frame_interval,
};
static const struct v4l2_subdev_pad_ops mt9v111_pad_ops = {
- .init_cfg = mt9v111_init_cfg,
.enum_mbus_code = mt9v111_enum_mbus_code,
.enum_frame_size = mt9v111_enum_frame_size,
.enum_frame_interval = mt9v111_enum_frame_interval,
.get_fmt = mt9v111_get_format,
.set_fmt = mt9v111_set_format,
+ .get_frame_interval = mt9v111_get_frame_interval,
+ .set_frame_interval = mt9v111_set_frame_interval,
};
static const struct v4l2_subdev_ops mt9v111_ops = {
@@ -981,6 +996,10 @@ static const struct v4l2_subdev_ops mt9v111_ops = {
.pad = &mt9v111_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mt9v111_internal_ops = {
+ .init_state = mt9v111_init_state,
+};
+
static const struct media_entity_operations mt9v111_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -1194,6 +1213,7 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->pending = true;
v4l2_i2c_subdev_init(&mt9v111->sd, client, &mt9v111_ops);
+ mt9v111->sd.internal_ops = &mt9v111_internal_ops;
mt9v111->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
mt9v111->sd.entity.ops = &mt9v111_subdev_entity_ops;
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index 51378ba16..bac9597fa 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -769,8 +769,7 @@ static int og01a1b_set_format(struct v4l2_subdev *sd,
mutex_lock(&og01a1b->mutex);
og01a1b_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state,
- fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
} else {
og01a1b->cur_mode = mode;
__v4l2_ctrl_s_ctrl(og01a1b->link_freq, mode->link_freq_index);
@@ -803,9 +802,8 @@ static int og01a1b_get_format(struct v4l2_subdev *sd,
mutex_lock(&og01a1b->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&og01a1b->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
og01a1b_update_pad_format(og01a1b->cur_mode, &fmt->format);
@@ -850,7 +848,7 @@ static int og01a1b_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&og01a1b->mutex);
og01a1b_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->state, 0));
+ v4l2_subdev_state_get_format(fh->state, 0));
mutex_unlock(&og01a1b->mutex);
return 0;
diff --git a/drivers/media/i2c/ov01a10.c b/drivers/media/i2c/ov01a10.c
index 02de09edc..5606437f3 100644
--- a/drivers/media/i2c/ov01a10.c
+++ b/drivers/media/i2c/ov01a10.c
@@ -723,14 +723,14 @@ static int ov01a10_set_format(struct v4l2_subdev *sd,
h_blank);
}
- format = v4l2_subdev_get_pad_format(sd, sd_state, fmt->stream);
+ format = v4l2_subdev_state_get_format(sd_state, fmt->stream);
*format = fmt->format;
return 0;
}
-static int ov01a10_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int ov01a10_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_TRY,
@@ -813,7 +813,6 @@ static const struct v4l2_subdev_video_ops ov01a10_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov01a10_pad_ops = {
- .init_cfg = ov01a10_init_cfg,
.set_fmt = ov01a10_set_format,
.get_fmt = v4l2_subdev_get_fmt,
.get_selection = ov01a10_get_selection,
@@ -827,6 +826,10 @@ static const struct v4l2_subdev_ops ov01a10_subdev_ops = {
.pad = &ov01a10_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov01a10_internal_ops = {
+ .init_state = ov01a10_init_state,
+};
+
static const struct media_entity_operations ov01a10_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -873,6 +876,7 @@ static int ov01a10_probe(struct i2c_client *client)
return -ENOMEM;
v4l2_i2c_subdev_init(&ov01a10->sd, client, &ov01a10_subdev_ops);
+ ov01a10->sd.internal_ops = &ov01a10_internal_ops;
ret = ov01a10_identify_module(ov01a10);
if (ret)
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 848e47a46..6c30e1a0d 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -315,7 +315,7 @@ static int ov02a10_set_fmt(struct v4l2_subdev *sd,
ov02a10_fill_fmt(ov02a10->cur_mode, mbus_fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- frame_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ frame_fmt = v4l2_subdev_state_get_format(sd_state, 0);
else
frame_fmt = &ov02a10->fmt;
@@ -336,8 +336,8 @@ static int ov02a10_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov02a10->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
} else {
fmt->format = ov02a10->fmt;
mbus_fmt->code = ov02a10->fmt.code;
@@ -511,8 +511,8 @@ static int __ov02a10_stop_stream(struct ov02a10 *ov02a10)
SC_CTRL_MODE_STANDBY);
}
-static int ov02a10_entity_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int ov02a10_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_TRY,
@@ -709,7 +709,6 @@ static const struct v4l2_subdev_video_ops ov02a10_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov02a10_pad_ops = {
- .init_cfg = ov02a10_entity_init_cfg,
.enum_mbus_code = ov02a10_enum_mbus_code,
.enum_frame_size = ov02a10_enum_frame_sizes,
.get_fmt = ov02a10_get_fmt,
@@ -721,6 +720,10 @@ static const struct v4l2_subdev_ops ov02a10_subdev_ops = {
.pad = &ov02a10_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov02a10_internal_ops = {
+ .init_state = ov02a10_init_state,
+};
+
static const struct media_entity_operations ov02a10_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -869,6 +872,7 @@ static int ov02a10_probe(struct i2c_client *client)
"failed to check HW configuration\n");
v4l2_i2c_subdev_init(&ov02a10->subdev, client, &ov02a10_subdev_ops);
+ ov02a10->subdev.internal_ops = &ov02a10_internal_ops;
ov02a10->mipi_clock_voltage = OV02A10_MIPI_TX_SPEED_DEFAULT;
ov02a10->fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
index 3d49e3fa8..1bacbdfa4 100644
--- a/drivers/media/i2c/ov08d10.c
+++ b/drivers/media/i2c/ov08d10.c
@@ -1145,7 +1145,7 @@ static int ov08d10_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov08d10->mutex);
ov08d10_update_pad_format(ov08d10, mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) =
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) =
fmt->format;
} else {
ov08d10->cur_mode = mode;
@@ -1184,9 +1184,8 @@ static int ov08d10_get_format(struct v4l2_subdev *sd,
mutex_lock(&ov08d10->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov08d10->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
ov08d10_update_pad_format(ov08d10, ov08d10->cur_mode,
&fmt->format);
@@ -1242,7 +1241,7 @@ static int ov08d10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov08d10->mutex);
ov08d10_update_pad_format(ov08d10, &ov08d10->priv_lane->sp_modes[0],
- v4l2_subdev_get_try_format(sd, fh->state, 0));
+ v4l2_subdev_state_get_format(fh->state, 0));
mutex_unlock(&ov08d10->mutex);
return 0;
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index b41b6866a..abbb0b774 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -2536,7 +2536,7 @@ static int ov08x40_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
const struct ov08x40_mode *default_mode = &supported_modes[0];
struct ov08x40 *ov08x = to_ov08x40(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
mutex_lock(&ov08x->mutex);
@@ -2774,10 +2774,9 @@ static int ov08x40_do_get_pad_format(struct ov08x40 *ov08x,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
- struct v4l2_subdev *sd = &ov08x->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
ov08x40_update_pad_format(ov08x->cur_mode, fmt);
@@ -2826,7 +2825,7 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
ov08x40_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ov08x->cur_mode = mode;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 4c419014d..09387e335 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1147,9 +1147,8 @@ static int ov13858_write_reg_list(struct ov13858 *ov13858,
static int ov13858_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov13858 *ov13858 = to_ov13858(sd);
- struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd,
- fh->state,
- 0);
+ struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_state_get_format(fh->state,
+ 0);
mutex_lock(&ov13858->mutex);
@@ -1317,10 +1316,9 @@ static int ov13858_do_get_pad_format(struct ov13858 *ov13858,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
- struct v4l2_subdev *sd = &ov13858->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
ov13858_update_pad_format(ov13858->cur_mode, fmt);
@@ -1369,7 +1367,7 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
ov13858_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ov13858->cur_mode = mode;
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index 2793e4675..73c844aa5 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -755,9 +755,8 @@ static int ov13b10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
const struct ov13b10_mode *default_mode = &supported_modes[0];
struct ov13b10 *ov13b = to_ov13b10(sd);
- struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd,
- fh->state,
- 0);
+ struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_state_get_format(fh->state,
+ 0);
mutex_lock(&ov13b->mutex);
@@ -1002,10 +1001,9 @@ static int ov13b10_do_get_pad_format(struct ov13b10 *ov13b,
struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *framefmt;
- struct v4l2_subdev *sd = &ov13b->sd;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
ov13b10_update_pad_format(ov13b->cur_mode, fmt);
@@ -1054,7 +1052,7 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
ov13b10_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ov13b->cur_mode = mode;
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 28a01c6ef..67c4bd291 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -920,7 +920,7 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
return -EINVAL;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mf = v4l2_subdev_state_get_format(sd_state, 0);
format->format = *mf;
return 0;
}
@@ -988,7 +988,7 @@ static int ov2640_set_fmt(struct v4l2_subdev *sd,
/* select format */
priv->cfmt_code = mf->code;
} else {
- sd_state->pads->try_fmt = *mf;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *mf;
}
out:
mutex_unlock(&priv->lock);
@@ -996,11 +996,11 @@ out:
return ret;
}
-static int ov2640_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int ov2640_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, sd_state, 0);
+ v4l2_subdev_state_get_format(sd_state, 0);
const struct ov2640_win_size *win =
ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
@@ -1125,7 +1125,6 @@ static const struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
};
static const struct v4l2_subdev_pad_ops ov2640_subdev_pad_ops = {
- .init_cfg = ov2640_init_cfg,
.enum_mbus_code = ov2640_enum_mbus_code,
.get_selection = ov2640_get_selection,
.get_fmt = ov2640_get_fmt,
@@ -1142,6 +1141,10 @@ static const struct v4l2_subdev_ops ov2640_subdev_ops = {
.video = &ov2640_subdev_video_ops,
};
+static const struct v4l2_subdev_internal_ops ov2640_internal_ops = {
+ .init_state = ov2640_init_state,
+};
+
static int ov2640_probe_dt(struct i2c_client *client,
struct ov2640_priv *priv)
{
@@ -1211,6 +1214,7 @@ static int ov2640_probe(struct i2c_client *client)
priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
+ priv->subdev.internal_ops = &ov2640_internal_ops;
priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
mutex_init(&priv->lock);
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 2c3dbe164..1d0ef72a6 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1033,7 +1033,7 @@ static int ov2659_get_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mf = v4l2_subdev_state_get_format(sd_state, 0);
mutex_lock(&ov2659->lock);
fmt->format = *mf;
mutex_unlock(&ov2659->lock);
@@ -1109,7 +1109,7 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov2659->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*mf = fmt->format;
} else {
s64 val;
@@ -1304,7 +1304,7 @@ static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
dev_dbg(&client->dev, "%s:\n", __func__);
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 72bab0ff8..39d321e2b 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -309,7 +309,7 @@ __ov2680_get_pad_format(struct ov2680_dev *sensor,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&sensor->sd, state, pad);
+ return v4l2_subdev_state_get_format(state, pad);
return &sensor->mode.fmt;
}
@@ -321,7 +321,7 @@ __ov2680_get_pad_crop(struct ov2680_dev *sensor,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&sensor->sd, state, pad);
+ return v4l2_subdev_state_get_crop(state, pad);
return &sensor->mode.crop;
}
@@ -552,11 +552,19 @@ err_disable_regulators:
return ret;
}
-static int ov2680_s_g_frame_interval(struct v4l2_subdev *sd,
+static int ov2680_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fi)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&sensor->lock);
fi->interval = sensor->mode.frame_interval;
mutex_unlock(&sensor->lock);
@@ -650,7 +658,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
ov2680_fill_format(sensor, &format->format, width, height);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- try_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ try_fmt = v4l2_subdev_state_get_format(sd_state, 0);
*try_fmt = format->format;
return 0;
}
@@ -755,14 +763,14 @@ static int ov2680_set_selection(struct v4l2_subdev *sd,
return 0;
}
-static int ov2680_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int ov2680_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
- sd_state->pads[0].try_crop = ov2680_default_crop;
+ *v4l2_subdev_state_get_crop(sd_state, 0) = ov2680_default_crop;
- ov2680_fill_format(sensor, &sd_state->pads[0].try_fmt,
+ ov2680_fill_format(sensor, v4l2_subdev_state_get_format(sd_state, 0),
OV2680_DEFAULT_WIDTH, OV2680_DEFAULT_HEIGHT);
return 0;
}
@@ -870,13 +878,10 @@ static const struct v4l2_ctrl_ops ov2680_ctrl_ops = {
};
static const struct v4l2_subdev_video_ops ov2680_video_ops = {
- .g_frame_interval = ov2680_s_g_frame_interval,
- .s_frame_interval = ov2680_s_g_frame_interval,
.s_stream = ov2680_s_stream,
};
static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
- .init_cfg = ov2680_init_cfg,
.enum_mbus_code = ov2680_enum_mbus_code,
.enum_frame_size = ov2680_enum_frame_size,
.enum_frame_interval = ov2680_enum_frame_interval,
@@ -884,6 +889,8 @@ static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
.set_fmt = ov2680_set_fmt,
.get_selection = ov2680_get_selection,
.set_selection = ov2680_set_selection,
+ .get_frame_interval = ov2680_get_frame_interval,
+ .set_frame_interval = ov2680_get_frame_interval,
};
static const struct v4l2_subdev_ops ov2680_subdev_ops = {
@@ -891,6 +898,10 @@ static const struct v4l2_subdev_ops ov2680_subdev_ops = {
.pad = &ov2680_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov2680_internal_ops = {
+ .init_state = ov2680_init_state,
+};
+
static int ov2680_mode_init(struct ov2680_dev *sensor)
{
/* set initial mode */
@@ -915,6 +926,7 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
int ret = 0;
v4l2_i2c_subdev_init(&sensor->sd, client, &ov2680_subdev_ops);
+ sensor->sd.internal_ops = &ov2680_internal_ops;
sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index 396583826..9b8481b8d 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -404,7 +404,7 @@ __ov2685_get_pad_crop(struct ov2685 *ov2685,
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov2685->subdev, state, pad);
+ return v4l2_subdev_state_get_crop(state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return mode->analog_crop;
}
@@ -547,7 +547,7 @@ static int ov2685_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov2685->mutex);
- try_fmt = v4l2_subdev_get_try_format(sd, fh->state, 0);
+ try_fmt = v4l2_subdev_state_get_format(fh->state, 0);
/* Initialize try_fmt */
ov2685_fill_fmt(&supported_modes[0], try_fmt);
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 6be22586c..552935ccb 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -3,7 +3,9 @@
#include <asm/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -14,6 +16,7 @@
#include <media/v4l2-fwnode.h>
#define OV2740_LINK_FREQ_360MHZ 360000000ULL
+#define OV2740_LINK_FREQ_180MHZ 180000000ULL
#define OV2740_SCLK 72000000LL
#define OV2740_MCLK 19200000
#define OV2740_DATA_LANES 2
@@ -28,9 +31,6 @@
/* vertical-timings from sensor */
#define OV2740_REG_VTS 0x380e
-#define OV2740_VTS_DEF 0x088a
-#define OV2740_VTS_MIN 0x0460
-#define OV2740_VTS_MAX 0x7fff
/* horizontal-timings from sensor */
#define OV2740_REG_HTS 0x380c
@@ -84,6 +84,7 @@ struct nvm_data {
enum {
OV2740_LINK_FREQ_360MHZ_INDEX,
+ OV2740_LINK_FREQ_180MHZ_INDEX,
};
struct ov2740_reg {
@@ -116,6 +117,9 @@ struct ov2740_mode {
/* Min vertical timining size */
u32 vts_min;
+ /* Max vertical timining size */
+ u32 vts_max;
+
/* Link frequency needed for this resolution */
u32 link_freq_index;
@@ -124,7 +128,6 @@ struct ov2740_mode {
};
static const struct ov2740_reg mipi_data_rate_720mbps[] = {
- {0x0103, 0x01},
{0x0302, 0x4b},
{0x030d, 0x4b},
{0x030e, 0x02},
@@ -132,7 +135,17 @@ static const struct ov2740_reg mipi_data_rate_720mbps[] = {
{0x0312, 0x11},
};
-static const struct ov2740_reg mode_1932x1092_regs[] = {
+static const struct ov2740_reg mipi_data_rate_360mbps[] = {
+ {0x0302, 0x4b},
+ {0x0303, 0x01},
+ {0x030d, 0x4b},
+ {0x030e, 0x02},
+ {0x030a, 0x01},
+ {0x0312, 0x11},
+ {0x4837, 0x2c},
+};
+
+static const struct ov2740_reg mode_1932x1092_regs_360mhz[] = {
{0x3000, 0x00},
{0x3018, 0x32},
{0x3031, 0x0a},
@@ -285,6 +298,159 @@ static const struct ov2740_reg mode_1932x1092_regs[] = {
{0x3813, 0x01},
};
+static const struct ov2740_reg mode_1932x1092_regs_180mhz[] = {
+ {0x3000, 0x00},
+ {0x3018, 0x32}, /* 0x32 for 2 lanes, 0x12 for 1 lane */
+ {0x3031, 0x0a},
+ {0x3080, 0x08},
+ {0x3083, 0xB4},
+ {0x3103, 0x00},
+ {0x3104, 0x01},
+ {0x3106, 0x01},
+ {0x3500, 0x00},
+ {0x3501, 0x44},
+ {0x3502, 0x40},
+ {0x3503, 0x88},
+ {0x3507, 0x00},
+ {0x3508, 0x00},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x3510, 0x00},
+ {0x3511, 0x00},
+ {0x3512, 0x20},
+ {0x3632, 0x00},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3645, 0x13},
+ {0x3646, 0x81},
+ {0x3636, 0x10},
+ {0x3651, 0x0a},
+ {0x3656, 0x02},
+ {0x3659, 0x04},
+ {0x365a, 0xda},
+ {0x365b, 0xa2},
+ {0x365c, 0x04},
+ {0x365d, 0x1d},
+ {0x365e, 0x1a},
+ {0x3662, 0xd7},
+ {0x3667, 0x78},
+ {0x3669, 0x0a},
+ {0x366a, 0x92},
+ {0x3700, 0x54},
+ {0x3702, 0x10},
+ {0x3706, 0x42},
+ {0x3709, 0x30},
+ {0x370b, 0xc2},
+ {0x3714, 0x63},
+ {0x3715, 0x01},
+ {0x3716, 0x00},
+ {0x371a, 0x3e},
+ {0x3732, 0x0e},
+ {0x3733, 0x10},
+ {0x375f, 0x0e},
+ {0x3768, 0x30},
+ {0x3769, 0x44},
+ {0x376a, 0x22},
+ {0x377b, 0x20},
+ {0x377c, 0x00},
+ {0x377d, 0x0c},
+ {0x3798, 0x00},
+ {0x37a1, 0x55},
+ {0x37a8, 0x6d},
+ {0x37c2, 0x04},
+ {0x37c5, 0x00},
+ {0x37c8, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x07},
+ {0x3805, 0x8f},
+ {0x3806, 0x04},
+ {0x3807, 0x47},
+ {0x3808, 0x07},
+ {0x3809, 0x88},
+ {0x380a, 0x04},
+ {0x380b, 0x40},
+ {0x380c, 0x08},
+ {0x380d, 0x70},
+ {0x380e, 0x04},
+ {0x380f, 0x56},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x04},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3820, 0x80},
+ {0x3821, 0x46},
+ {0x3822, 0x84},
+ {0x3829, 0x00},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x04},
+ {0x3836, 0x01},
+ {0x3837, 0x08},
+ {0x3839, 0x01},
+ {0x383a, 0x00},
+ {0x383b, 0x08},
+ {0x383c, 0x00},
+ {0x3f0b, 0x00},
+ {0x4001, 0x20},
+ {0x4009, 0x07},
+ {0x4003, 0x10},
+ {0x4010, 0xe0},
+ {0x4016, 0x00},
+ {0x4017, 0x10},
+ {0x4044, 0x02},
+ {0x4304, 0x08},
+ {0x4307, 0x30},
+ {0x4320, 0x80},
+ {0x4322, 0x00},
+ {0x4323, 0x00},
+ {0x4324, 0x00},
+ {0x4325, 0x00},
+ {0x4326, 0x00},
+ {0x4327, 0x00},
+ {0x4328, 0x00},
+ {0x4329, 0x00},
+ {0x432c, 0x03},
+ {0x432d, 0x81},
+ {0x4501, 0x84},
+ {0x4502, 0x40},
+ {0x4503, 0x18},
+ {0x4504, 0x04},
+ {0x4508, 0x02},
+ {0x4601, 0x10},
+ {0x4800, 0x00},
+ {0x4816, 0x52},
+ {0x5000, 0x73}, /* 0x7f enable DPC */
+ {0x5001, 0x00},
+ {0x5005, 0x38},
+ {0x501e, 0x0d},
+ {0x5040, 0x00},
+ {0x5901, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x07},
+ {0x3805, 0x8f},
+ {0x3806, 0x04},
+ {0x3807, 0x47},
+ {0x3808, 0x07},
+ {0x3809, 0x8c},
+ {0x380a, 0x04},
+ {0x380b, 0x44},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x01},
+ {0x4003, 0x40}, /* set Black level to 0x40 */
+};
+
static const char * const ov2740_test_pattern_menu[] = {
"Disabled",
"Color Bar",
@@ -295,6 +461,7 @@ static const char * const ov2740_test_pattern_menu[] = {
static const s64 link_freq_menu_items[] = {
OV2740_LINK_FREQ_360MHZ,
+ OV2740_LINK_FREQ_180MHZ,
};
static const struct ov2740_link_freq_config link_freq_configs[] = {
@@ -304,23 +471,46 @@ static const struct ov2740_link_freq_config link_freq_configs[] = {
.regs = mipi_data_rate_720mbps,
}
},
+ [OV2740_LINK_FREQ_180MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_360mbps),
+ .regs = mipi_data_rate_360mbps,
+ }
+ },
};
-static const struct ov2740_mode supported_modes[] = {
+static const struct ov2740_mode supported_modes_360mhz[] = {
{
.width = 1932,
.height = 1092,
.hts = 2160,
- .vts_def = OV2740_VTS_DEF,
- .vts_min = OV2740_VTS_MIN,
+ .vts_min = 1120,
+ .vts_def = 2186,
+ .vts_max = 32767,
.reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_1932x1092_regs),
- .regs = mode_1932x1092_regs,
+ .num_of_regs = ARRAY_SIZE(mode_1932x1092_regs_360mhz),
+ .regs = mode_1932x1092_regs_360mhz,
},
.link_freq_index = OV2740_LINK_FREQ_360MHZ_INDEX,
},
};
+static const struct ov2740_mode supported_modes_180mhz[] = {
+ {
+ .width = 1932,
+ .height = 1092,
+ .hts = 2160,
+ .vts_min = 1110,
+ .vts_def = 1110,
+ .vts_max = 2047,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1932x1092_regs_180mhz),
+ .regs = mode_1932x1092_regs_180mhz,
+ },
+ .link_freq_index = OV2740_LINK_FREQ_180MHZ_INDEX,
+ },
+};
+
struct ov2740 {
struct v4l2_subdev sd;
struct media_pad pad;
@@ -333,12 +523,20 @@ struct ov2740 {
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
+ /* GPIOs, clocks */
+ struct gpio_desc *reset_gpio;
+ struct clk *clk;
+
/* Current mode */
const struct ov2740_mode *cur_mode;
/* NVM data inforamtion */
struct nvm_data *nvm;
+ /* Supported modes */
+ const struct ov2740_mode *supported_modes;
+ int supported_modes_count;
+
/* True if the device has been identified */
bool identified;
};
@@ -583,7 +781,7 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
pixel_rate, 1, pixel_rate);
vblank_min = cur_mode->vts_min - cur_mode->height;
- vblank_max = OV2740_VTS_MAX - cur_mode->height;
+ vblank_max = cur_mode->vts_max - cur_mode->height;
vblank_default = cur_mode->vts_def - cur_mode->height;
ov2740->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
V4L2_CID_VBLANK, vblank_min,
@@ -735,6 +933,15 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
if (ov2740->nvm)
ov2740_load_otp_data(ov2740->nvm);
+ /* Reset the sensor */
+ ret = ov2740_write_reg(ov2740, 0x0103, 1, 0x01);
+ if (ret) {
+ dev_err(&client->dev, "failed to reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 15000);
+
link_freq_index = ov2740->cur_mode->link_freq_index;
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov2740_write_reg_list(ov2740, reg_list);
@@ -810,13 +1017,13 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
const struct ov2740_mode *mode;
s32 vblank_def, h_blank;
- mode = v4l2_find_nearest_size(supported_modes,
- ARRAY_SIZE(supported_modes), width,
- height, fmt->format.width,
- fmt->format.height);
+ mode = v4l2_find_nearest_size(ov2740->supported_modes,
+ ov2740->supported_modes_count,
+ width, height,
+ fmt->format.width, fmt->format.height);
ov2740_update_pad_format(mode, &fmt->format);
- *v4l2_subdev_get_pad_format(sd, sd_state, fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
@@ -830,7 +1037,7 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
vblank_def = mode->vts_def - mode->height;
__v4l2_ctrl_modify_range(ov2740->vblank,
mode->vts_min - mode->height,
- OV2740_VTS_MAX - mode->height, 1, vblank_def);
+ mode->vts_max - mode->height, 1, vblank_def);
__v4l2_ctrl_s_ctrl(ov2740->vblank, vblank_def);
h_blank = mode->hts - mode->width;
__v4l2_ctrl_modify_range(ov2740->hblank, h_blank, h_blank, 1, h_blank);
@@ -854,7 +1061,10 @@ static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= ARRAY_SIZE(supported_modes))
+ struct ov2740 *ov2740 = to_ov2740(sd);
+ const struct ov2740_mode *supported_modes = ov2740->supported_modes;
+
+ if (fse->index >= ov2740->supported_modes_count)
return -EINVAL;
if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
@@ -868,12 +1078,13 @@ static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
return 0;
}
-static int ov2740_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int ov2740_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
- ov2740_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_pad_format(sd, sd_state, 0));
+ struct ov2740 *ov2740 = to_ov2740(sd);
+ ov2740_update_pad_format(&ov2740->supported_modes[0],
+ v4l2_subdev_state_get_format(sd_state, 0));
return 0;
}
@@ -886,7 +1097,6 @@ static const struct v4l2_subdev_pad_ops ov2740_pad_ops = {
.set_fmt = ov2740_set_format,
.enum_mbus_code = ov2740_enum_mbus_code,
.enum_frame_size = ov2740_enum_frame_size,
- .init_cfg = ov2740_init_cfg,
};
static const struct v4l2_subdev_ops ov2740_subdev_ops = {
@@ -894,12 +1104,18 @@ static const struct v4l2_subdev_ops ov2740_subdev_ops = {
.pad = &ov2740_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov2740_internal_ops = {
+ .init_state = ov2740_init_state,
+};
+
static const struct media_entity_operations ov2740_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
static int ov2740_check_hwcfg(struct device *dev)
{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2740 *ov2740 = to_ov2740(sd);
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -909,23 +1125,32 @@ static int ov2740_check_hwcfg(struct device *dev)
int ret;
unsigned int i, j;
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver,
+ * wait for this.
+ */
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EPROBE_DEFER;
+
ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret)
- return ret;
+ if (ret) {
+ fwnode_handle_put(ep);
+ return dev_err_probe(dev, ret,
+ "reading clock-frequency property\n");
+ }
- if (mclk != OV2740_MCLK)
+ if (mclk != OV2740_MCLK) {
+ fwnode_handle_put(ep);
return dev_err_probe(dev, -EINVAL,
"external clock %d is not supported\n",
mclk);
-
- ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
- if (!ep)
- return -EPROBE_DEFER;
+ }
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV2740_DATA_LANES) {
ret = dev_err_probe(dev, -EINVAL,
@@ -946,14 +1171,29 @@ static int ov2740_check_hwcfg(struct device *dev)
break;
}
- if (j == bus_cfg.nr_of_link_frequencies) {
- ret = dev_err_probe(dev, -EINVAL,
- "no link frequency %lld supported\n",
- link_freq_menu_items[i]);
- goto check_hwcfg_error;
+ if (j == bus_cfg.nr_of_link_frequencies)
+ continue;
+
+ switch (i) {
+ case OV2740_LINK_FREQ_360MHZ_INDEX:
+ ov2740->supported_modes = supported_modes_360mhz;
+ ov2740->supported_modes_count =
+ ARRAY_SIZE(supported_modes_360mhz);
+ break;
+ case OV2740_LINK_FREQ_180MHZ_INDEX:
+ ov2740->supported_modes = supported_modes_180mhz;
+ ov2740->supported_modes_count =
+ ARRAY_SIZE(supported_modes_180mhz);
+ break;
}
+
+ break; /* Prefer modes from first available link-freq */
}
+ if (!ov2740->supported_modes)
+ ret = dev_err_probe(dev, -EINVAL,
+ "no supported link frequencies\n");
+
check_hwcfg_error:
v4l2_fwnode_endpoint_free(&bus_cfg);
@@ -1047,6 +1287,32 @@ static int ov2740_register_nvmem(struct i2c_client *client,
return 0;
}
+static int ov2740_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2740 *ov2740 = to_ov2740(sd);
+
+ gpiod_set_value_cansleep(ov2740->reset_gpio, 1);
+ clk_disable_unprepare(ov2740->clk);
+ return 0;
+}
+
+static int ov2740_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2740 *ov2740 = to_ov2740(sd);
+ int ret;
+
+ ret = clk_prepare_enable(ov2740->clk);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(ov2740->reset_gpio, 0);
+ msleep(20);
+
+ return 0;
+}
+
static int ov2740_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1054,23 +1320,42 @@ static int ov2740_probe(struct i2c_client *client)
bool full_power;
int ret;
- ret = ov2740_check_hwcfg(&client->dev);
- if (ret)
- return dev_err_probe(dev, ret, "failed to check HW configuration\n");
-
ov2740 = devm_kzalloc(&client->dev, sizeof(*ov2740), GFP_KERNEL);
if (!ov2740)
return -ENOMEM;
v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
+ ov2740->sd.internal_ops = &ov2740_internal_ops;
+
+ ret = ov2740_check_hwcfg(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to check HW configuration\n");
+
+ ov2740->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov2740->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ov2740->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ ov2740->clk = devm_clk_get_optional(dev, "clk");
+ if (IS_ERR(ov2740->clk))
+ return dev_err_probe(dev, PTR_ERR(ov2740->clk),
+ "failed to get clock\n");
+
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
- ret = ov2740_identify_module(ov2740);
+ /* ACPI does not always clear the reset GPIO / enable the clock */
+ ret = ov2740_resume(dev);
if (ret)
- return dev_err_probe(dev, ret, "failed to find sensor\n");
+ return dev_err_probe(dev, ret, "failed to power on sensor\n");
+
+ ret = ov2740_identify_module(ov2740);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to find sensor\n");
+ goto probe_error_power_off;
+ }
}
- ov2740->cur_mode = &supported_modes[0];
+ ov2740->cur_mode = &ov2740->supported_modes[0];
ret = ov2740_init_controls(ov2740);
if (ret) {
dev_err_probe(dev, ret, "failed to init controls\n");
@@ -1121,9 +1406,16 @@ probe_error_media_entity_cleanup:
probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler);
+probe_error_power_off:
+ if (full_power)
+ ov2740_suspend(dev);
+
return ret;
}
+static DEFINE_RUNTIME_DEV_PM_OPS(ov2740_pm_ops, ov2740_suspend, ov2740_resume,
+ NULL);
+
static const struct acpi_device_id ov2740_acpi_ids[] = {
{"INT3474"},
{}
@@ -1135,6 +1427,7 @@ static struct i2c_driver ov2740_i2c_driver = {
.driver = {
.name = "ov2740",
.acpi_match_table = ov2740_acpi_ids,
+ .pm = pm_sleep_ptr(&ov2740_pm_ops),
},
.probe = ov2740_probe,
.remove = ov2740_remove,
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index 3bd972a82..403091651 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -570,7 +570,7 @@ static int ov4689_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov4689->mutex);
- try_fmt = v4l2_subdev_get_try_format(sd, fh->state, 0);
+ try_fmt = v4l2_subdev_state_get_format(fh->state, 0);
/* Initialize try_fmt */
ov4689_fill_fmt(&supported_modes[OV4689_MODE_2688_1520], try_fmt);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 40532f7bc..5162d45fe 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -399,7 +399,7 @@ struct ov5640_mode_info {
const struct reg_value *reg_data;
u32 reg_data_size;
- /* Used by s_frame_interval only. */
+ /* Used by set_frame_interval only. */
u32 max_fps;
u32 def_fps;
};
@@ -2797,8 +2797,7 @@ static int ov5640_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&sensor->lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
- format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
else
fmt = &sensor->fmt;
@@ -2971,7 +2970,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
goto out;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, 0) = *mbus_fmt;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *mbus_fmt;
goto out;
}
@@ -3605,11 +3604,19 @@ static int ov5640_enum_frame_interval(
return 0;
}
-static int ov5640_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int ov5640_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&sensor->lock);
fi->interval = sensor->frame_interval;
mutex_unlock(&sensor->lock);
@@ -3617,13 +3624,21 @@ static int ov5640_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int ov5640_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
const struct ov5640_mode_info *mode;
int frame_rate, ret = 0;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad != 0)
return -EINVAL;
@@ -3745,13 +3760,13 @@ out:
return ret;
}
-static int ov5640_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int ov5640_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct ov5640_dev *sensor = to_ov5640_dev(sd);
struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_try_format(sd, state, 0);
- struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, state, 0);
+ v4l2_subdev_state_get_format(state, 0);
+ struct v4l2_rect *crop = v4l2_subdev_state_get_crop(state, 0);
*fmt = ov5640_is_csi2(sensor) ? ov5640_csi2_default_fmt :
ov5640_dvp_default_fmt;
@@ -3771,17 +3786,16 @@ static const struct v4l2_subdev_core_ops ov5640_core_ops = {
};
static const struct v4l2_subdev_video_ops ov5640_video_ops = {
- .g_frame_interval = ov5640_g_frame_interval,
- .s_frame_interval = ov5640_s_frame_interval,
.s_stream = ov5640_s_stream,
};
static const struct v4l2_subdev_pad_ops ov5640_pad_ops = {
- .init_cfg = ov5640_init_cfg,
.enum_mbus_code = ov5640_enum_mbus_code,
.get_fmt = ov5640_get_fmt,
.set_fmt = ov5640_set_fmt,
.get_selection = ov5640_get_selection,
+ .get_frame_interval = ov5640_get_frame_interval,
+ .set_frame_interval = ov5640_set_frame_interval,
.enum_frame_size = ov5640_enum_frame_size,
.enum_frame_interval = ov5640_enum_frame_interval,
};
@@ -3792,6 +3806,10 @@ static const struct v4l2_subdev_ops ov5640_subdev_ops = {
.pad = &ov5640_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov5640_internal_ops = {
+ .init_state = ov5640_init_state,
+};
+
static int ov5640_get_regulators(struct ov5640_dev *sensor)
{
int i;
@@ -3906,6 +3924,7 @@ static int ov5640_probe(struct i2c_client *client)
return PTR_ERR(sensor->reset_gpio);
v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
+ sensor->sd.internal_ops = &ov5640_internal_ops;
sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index a70db7e60..a26ac11c9 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -851,7 +851,7 @@ __ov5645_get_pad_format(struct ov5645 *ov5645,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&ov5645->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5645->fmt;
default:
@@ -878,7 +878,7 @@ __ov5645_get_pad_crop(struct ov5645 *ov5645,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov5645->sd, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5645->crop;
default:
@@ -934,8 +934,8 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
return 0;
}
-static int ov5645_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state)
+static int ov5645_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { 0 };
@@ -1023,7 +1023,6 @@ static const struct v4l2_subdev_video_ops ov5645_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov5645_subdev_pad_ops = {
- .init_cfg = ov5645_entity_init_cfg,
.enum_mbus_code = ov5645_enum_mbus_code,
.enum_frame_size = ov5645_enum_frame_size,
.get_fmt = ov5645_get_format,
@@ -1036,6 +1035,10 @@ static const struct v4l2_subdev_ops ov5645_subdev_ops = {
.pad = &ov5645_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov5645_internal_ops = {
+ .init_state = ov5645_init_state,
+};
+
static int ov5645_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1162,6 +1165,7 @@ static int ov5645_probe(struct i2c_client *client)
}
v4l2_i2c_subdev_init(&ov5645->sd, client, &ov5645_subdev_ops);
+ ov5645->sd.internal_ops = &ov5645_internal_ops;
ov5645->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ov5645->pad.flags = MEDIA_PAD_FL_SOURCE;
ov5645->sd.dev = &client->dev;
@@ -1220,7 +1224,7 @@ static int ov5645_probe(struct i2c_client *client)
pm_runtime_get_noresume(dev);
pm_runtime_enable(dev);
- ov5645_entity_init_cfg(&ov5645->sd, NULL);
+ ov5645_init_state(&ov5645->sd, NULL);
ret = v4l2_async_register_subdev(&ov5645->sd);
if (ret < 0) {
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index dcfe3129c..96c0fd4ff 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -882,7 +882,7 @@ __ov5647_get_pad_crop(struct ov5647 *ov5647,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov5647->sd, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5647->mode->crop;
}
@@ -975,8 +975,8 @@ static int ov5647_get_pad_fmt(struct v4l2_subdev *sd,
mutex_lock(&sensor->lock);
switch (format->which) {
case V4L2_SUBDEV_FORMAT_TRY:
- sensor_format = v4l2_subdev_get_try_format(sd, sd_state,
- format->pad);
+ sensor_format = v4l2_subdev_state_get_format(sd_state,
+ format->pad);
break;
default:
sensor_format = &sensor->mode->format;
@@ -1004,7 +1004,7 @@ static int ov5647_set_pad_fmt(struct v4l2_subdev *sd,
/* Update the sensor mode and apply at it at streamon time. */
mutex_lock(&sensor->lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = mode->format;
+ *v4l2_subdev_state_get_format(sd_state, format->pad) = mode->format;
} else {
int exposure_max, exposure_def;
int hblank, vblank;
@@ -1121,8 +1121,8 @@ static int ov5647_detect(struct v4l2_subdev *sd)
static int ov5647_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
- struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
+ struct v4l2_rect *crop = v4l2_subdev_state_get_crop(fh->state, 0);
crop->left = OV5647_PIXEL_ARRAY_LEFT;
crop->top = OV5647_PIXEL_ARRAY_TOP;
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index aa10eb4e3..4b86d2631 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -2158,37 +2158,8 @@ static int ov5648_s_stream(struct v4l2_subdev *subdev, int enable)
return 0;
}
-static int ov5648_g_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
- const struct ov5648_mode *mode;
- int ret = 0;
-
- mutex_lock(&sensor->mutex);
-
- mode = sensor->state.mode;
-
- switch (sensor->state.mbus_code) {
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- interval->interval = mode->frame_interval[0];
- break;
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- interval->interval = mode->frame_interval[1];
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&sensor->mutex);
-
- return ret;
-}
-
static const struct v4l2_subdev_video_ops ov5648_subdev_video_ops = {
.s_stream = ov5648_s_stream,
- .g_frame_interval = ov5648_g_frame_interval,
- .s_frame_interval = ov5648_g_frame_interval,
};
/* Subdev Pad Operations */
@@ -2232,8 +2203,8 @@ static int ov5648_get_fmt(struct v4l2_subdev *subdev,
mutex_lock(&sensor->mutex);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *mbus_format = *v4l2_subdev_get_try_format(subdev, sd_state,
- format->pad);
+ *mbus_format = *v4l2_subdev_state_get_format(sd_state,
+ format->pad);
else
ov5648_mbus_format_fill(mbus_format, sensor->state.mbus_code,
sensor->state.mode);
@@ -2285,7 +2256,7 @@ static int ov5648_set_fmt(struct v4l2_subdev *subdev,
ov5648_mbus_format_fill(mbus_format, mbus_code, mode);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(subdev, sd_state, format->pad) =
+ *v4l2_subdev_state_get_format(sd_state, format->pad) =
*mbus_format;
else if (sensor->state.mode != mode ||
sensor->state.mbus_code != mbus_code)
@@ -2297,6 +2268,41 @@ complete:
return ret;
}
+static int ov5648_get_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
+ const struct ov5648_mode *mode;
+ int ret = 0;
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ mutex_lock(&sensor->mutex);
+
+ mode = sensor->state.mode;
+
+ switch (sensor->state.mbus_code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ interval->interval = mode->frame_interval[0];
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ interval->interval = mode->frame_interval[1];
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&sensor->mutex);
+
+ return ret;
+}
+
static int ov5648_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *size_enum)
@@ -2363,6 +2369,8 @@ static const struct v4l2_subdev_pad_ops ov5648_subdev_pad_ops = {
.enum_mbus_code = ov5648_enum_mbus_code,
.get_fmt = ov5648_get_fmt,
.set_fmt = ov5648_set_fmt,
+ .get_frame_interval = ov5648_get_frame_interval,
+ .set_frame_interval = ov5648_get_frame_interval,
.enum_frame_size = ov5648_enum_frame_size,
.enum_frame_interval = ov5648_enum_frame_interval,
};
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index e80db3ecd..2aee85965 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -2196,13 +2196,13 @@ error:
return ret;
}
-static int ov5670_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int ov5670_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_try_format(sd, state, 0);
+ v4l2_subdev_state_get_format(state, 0);
const struct ov5670_mode *default_mode = &supported_modes[0];
- struct v4l2_rect *crop = v4l2_subdev_get_try_crop(sd, state, 0);
+ struct v4l2_rect *crop = v4l2_subdev_state_get_crop(state, 0);
fmt->width = default_mode->width;
fmt->height = default_mode->height;
@@ -2263,9 +2263,8 @@ static int ov5670_do_get_pad_format(struct ov5670 *ov5670,
struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov5670->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
ov5670_update_pad_format(ov5670->cur_mode, fmt);
@@ -2310,7 +2309,7 @@ static int ov5670_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
ov5670_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
} else {
ov5670->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov5670->link_freq, mode->link_freq_index);
@@ -2550,7 +2549,7 @@ __ov5670_get_pad_crop(struct ov5670 *sensor, struct v4l2_subdev_state *state,
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&sensor->sd, state, pad);
+ return v4l2_subdev_state_get_crop(state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return mode->analog_crop;
}
@@ -2593,7 +2592,6 @@ static const struct v4l2_subdev_video_ops ov5670_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov5670_pad_ops = {
- .init_cfg = ov5670_init_cfg,
.enum_mbus_code = ov5670_enum_mbus_code,
.get_fmt = ov5670_get_pad_format,
.set_fmt = ov5670_set_pad_format,
@@ -2613,6 +2611,10 @@ static const struct v4l2_subdev_ops ov5670_subdev_ops = {
.sensor = &ov5670_sensor_ops,
};
+static const struct v4l2_subdev_internal_ops ov5670_internal_ops = {
+ .init_state = ov5670_init_state,
+};
+
static const struct media_entity_operations ov5670_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -2676,6 +2678,7 @@ static int ov5670_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov5670->sd, client, &ov5670_subdev_ops);
+ ov5670->sd.internal_ops = &ov5670_internal_ops;
ret = ov5670_regulators_probe(ov5670);
if (ret)
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index e63d9d402..3641911bc 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1036,7 +1036,7 @@ static int ov5675_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov5675->mutex);
ov5675_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
} else {
ov5675->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov5675->link_freq, mode->link_freq_index);
@@ -1069,9 +1069,8 @@ static int ov5675_get_format(struct v4l2_subdev *sd,
mutex_lock(&ov5675->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov5675->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
ov5675_update_pad_format(ov5675->cur_mode, &fmt->format);
@@ -1141,7 +1140,7 @@ static int ov5675_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov5675->mutex);
ov5675_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->state, 0));
+ v4l2_subdev_state_get_format(fh->state, 0));
mutex_unlock(&ov5675->mutex);
return 0;
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 819425e21..8deb28b55 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -775,7 +775,7 @@ __ov5693_get_pad_format(struct ov5693_device *ov5693,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&ov5693->sd, state, pad);
+ return v4l2_subdev_state_get_format(state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5693->mode.format;
default:
@@ -790,7 +790,7 @@ __ov5693_get_pad_crop(struct ov5693_device *ov5693,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov5693->sd, state, pad);
+ return v4l2_subdev_state_get_crop(state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov5693->mode.crop;
}
@@ -1004,14 +1004,22 @@ err_power_down:
return ret;
}
-static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int ov5693_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
unsigned int framesize = OV5693_FIXED_PPL * (ov5693->mode.format.height +
ov5693->ctrls.vblank->val);
unsigned int fps = DIV_ROUND_CLOSEST(OV5693_PIXEL_RATE, framesize);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
interval->interval.numerator = 1;
interval->interval.denominator = fps;
@@ -1054,7 +1062,6 @@ static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops ov5693_video_ops = {
.s_stream = ov5693_s_stream,
- .g_frame_interval = ov5693_g_frame_interval,
};
static const struct v4l2_subdev_pad_ops ov5693_pad_ops = {
@@ -1064,6 +1071,7 @@ static const struct v4l2_subdev_pad_ops ov5693_pad_ops = {
.set_fmt = ov5693_set_fmt,
.get_selection = ov5693_get_selection,
.set_selection = ov5693_set_selection,
+ .get_frame_interval = ov5693_get_frame_interval,
};
static const struct v4l2_subdev_ops ov5693_ops = {
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index c8f57ce15..663eccdfe 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -820,7 +820,7 @@ static int ov5695_set_fmt(struct v4l2_subdev *sd,
fmt->format.height = mode->height;
fmt->format.field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
} else {
ov5695->cur_mode = mode;
h_blank = mode->hts_def - mode->width;
@@ -846,8 +846,8 @@ static int ov5695_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov5695->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
} else {
fmt->format.width = mode->width;
fmt->format.height = mode->height;
@@ -1039,7 +1039,7 @@ static int ov5695_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov5695 *ov5695 = to_ov5695(sd);
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
const struct ov5695_mode *def_mode = &supported_modes[0];
mutex_lock(&ov5695->mutex);
diff --git a/drivers/media/i2c/ov64a40.c b/drivers/media/i2c/ov64a40.c
new file mode 100644
index 000000000..4fba4c2cb
--- /dev/null
+++ b/drivers/media/i2c/ov64a40.c
@@ -0,0 +1,3690 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 sensor driver for OmniVision OV64A40
+ *
+ * Copyright (C) 2023 Ideas On Board Oy
+ * Copyright (C) 2023 Arducam
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#define OV64A40_XCLK_FREQ 24000000
+
+#define OV64A40_NATIVE_WIDTH 9286
+#define OV64A40_NATIVE_HEIGHT 6976
+#define OV64A40_PIXEL_ARRAY_TOP 0
+#define OV64A40_PIXEL_ARRAY_LEFT 0
+#define OV64A40_PIXEL_ARRAY_WIDTH 9248
+#define OV64A40_PIXEL_ARRAY_HEIGHT 6944
+
+#define OV64A40_PIXEL_RATE 300000000
+
+#define OV64A40_LINK_FREQ_360M 360000000
+#define OV64A40_LINK_FREQ_456M 456000000
+
+#define OV64A40_PLL1_PRE_DIV0 CCI_REG8(0x0301)
+#define OV64A40_PLL1_PRE_DIV CCI_REG8(0x0303)
+#define OV64A40_PLL1_MULTIPLIER CCI_REG16(0x0304)
+#define OV64A40_PLL1_M_DIV CCI_REG8(0x0307)
+#define OV64A40_PLL2_SEL_BAK_SA1 CCI_REG8(0x0320)
+#define OV64A40_PLL2_PRE_DIV CCI_REG8(0x0323)
+#define OV64A40_PLL2_MULTIPLIER CCI_REG16(0x0324)
+#define OV64A40_PLL2_PRE_DIV0 CCI_REG8(0x0326)
+#define OV64A40_PLL2_DIVDAC CCI_REG8(0x0329)
+#define OV64A40_PLL2_DIVSP CCI_REG8(0x032d)
+#define OV64A40_PLL2_DACPREDIV CCI_REG8(0x032e)
+
+/* TODO: validate vblank_min, it's not characterized in the datasheet. */
+#define OV64A40_VBLANK_MIN 128
+#define OV64A40_VTS_MAX 0xffffff
+
+#define OV64A40_REG_MEC_LONG_EXPO CCI_REG24(0x3500)
+#define OV64A40_EXPOSURE_MIN 16
+#define OV64A40_EXPOSURE_MARGIN 32
+
+#define OV64A40_REG_MEC_LONG_GAIN CCI_REG16(0x3508)
+#define OV64A40_ANA_GAIN_MIN 0x80
+#define OV64A40_ANA_GAIN_MAX 0x7ff
+#define OV64A40_ANA_GAIN_DEFAULT 0x80
+
+#define OV64A40_REG_TIMING_CTRL0 CCI_REG16(0x3800)
+#define OV64A40_REG_TIMING_CTRL2 CCI_REG16(0x3802)
+#define OV64A40_REG_TIMING_CTRL4 CCI_REG16(0x3804)
+#define OV64A40_REG_TIMING_CTRL6 CCI_REG16(0x3806)
+#define OV64A40_REG_TIMING_CTRL8 CCI_REG16(0x3808)
+#define OV64A40_REG_TIMING_CTRLA CCI_REG16(0x380a)
+#define OV64A40_REG_TIMING_CTRLC CCI_REG16(0x380c)
+#define OV64A40_REG_TIMING_CTRLE CCI_REG16(0x380e)
+#define OV64A40_REG_TIMING_CTRL10 CCI_REG16(0x3810)
+#define OV64A40_REG_TIMING_CTRL12 CCI_REG16(0x3812)
+
+/*
+ * Careful: a typo in the datasheet calls this register
+ * OV64A40_REG_TIMING_CTRL20.
+ */
+#define OV64A40_REG_TIMING_CTRL14 CCI_REG8(0x3814)
+#define OV64A40_REG_TIMING_CTRL15 CCI_REG8(0x3815)
+#define OV64A40_ODD_INC_SHIFT 4
+#define OV64A40_SKIPPING_CONFIG(_odd, _even) \
+ (((_odd) << OV64A40_ODD_INC_SHIFT) | (_even))
+
+#define OV64A40_REG_TIMING_CTRL_20 CCI_REG8(0x3820)
+#define OV64A40_TIMING_CTRL_20_VFLIP BIT(2)
+#define OV64A40_TIMING_CTRL_20_VBIN BIT(1)
+
+#define OV64A40_REG_TIMING_CTRL_21 CCI_REG8(0x3821)
+#define OV64A40_TIMING_CTRL_21_HBIN BIT(4)
+#define OV64A40_TIMING_CTRL_21_HFLIP BIT(2)
+#define OV64A40_TIMING_CTRL_21_DSPEED BIT(0)
+#define OV64A40_TIMING_CTRL_21_HBIN_CONF \
+ (OV64A40_TIMING_CTRL_21_HBIN | \
+ OV64A40_TIMING_CTRL_21_DSPEED)
+
+#define OV64A40_REG_TIMINGS_VTS_HIGH CCI_REG8(0x3840)
+#define OV64A40_REG_TIMINGS_VTS_MID CCI_REG8(0x380e)
+#define OV64A40_REG_TIMINGS_VTS_LOW CCI_REG8(0x380f)
+
+/* The test pattern control is weirdly named PRE_ISP_2325_D2V2_TOP_1 in TRM. */
+#define OV64A40_REG_TEST_PATTERN CCI_REG8(0x50c1)
+#define OV64A40_TEST_PATTERN_DISABLED 0x00
+#define OV64A40_TEST_PATTERN_TYPE1 BIT(0)
+#define OV64A40_TEST_PATTERN_TYPE2 (BIT(4) | BIT(0))
+#define OV64A40_TEST_PATTERN_TYPE3 (BIT(5) | BIT(0))
+#define OV64A40_TEST_PATTERN_TYPE4 (BIT(5) | BIT(4) | BIT(0))
+
+#define OV64A40_REG_CHIP_ID CCI_REG24(0x300a)
+#define OV64A40_CHIP_ID 0x566441
+
+#define OV64A40_REG_SMIA CCI_REG8(0x0100)
+#define OV64A40_REG_SMIA_STREAMING BIT(0)
+
+enum ov64a40_link_freq_ids {
+ OV64A40_LINK_FREQ_456M_ID,
+ OV64A40_LINK_FREQ_360M_ID,
+ OV64A40_NUM_LINK_FREQ,
+};
+
+static const char * const ov64a40_supply_names[] = {
+ /* Supplies can be enabled in any order */
+ "avdd", /* Analog (2.8V) supply */
+ "dovdd", /* Digital Core (1.8V) supply */
+ "dvdd", /* IF (1.1V) supply */
+};
+
+static const char * const ov64a40_test_pattern_menu[] = {
+ "Disabled",
+ "Type1",
+ "Type2",
+ "Type3",
+ "Type4",
+};
+
+static const int ov64a40_test_pattern_val[] = {
+ OV64A40_TEST_PATTERN_DISABLED,
+ OV64A40_TEST_PATTERN_TYPE1,
+ OV64A40_TEST_PATTERN_TYPE2,
+ OV64A40_TEST_PATTERN_TYPE3,
+ OV64A40_TEST_PATTERN_TYPE4,
+};
+
+static const unsigned int ov64a40_mbus_codes[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+};
+
+static const struct cci_reg_sequence ov64a40_init[] = {
+ { CCI_REG8(0x0103), 0x01 }, { CCI_REG8(0x0301), 0x88 },
+ { CCI_REG8(0x0304), 0x00 }, { CCI_REG8(0x0305), 0x96 },
+ { CCI_REG8(0x0306), 0x03 }, { CCI_REG8(0x0307), 0x00 },
+ { CCI_REG8(0x0345), 0x2c }, { CCI_REG8(0x034a), 0x02 },
+ { CCI_REG8(0x034b), 0x02 }, { CCI_REG8(0x0350), 0xc0 },
+ { CCI_REG8(0x0360), 0x09 }, { CCI_REG8(0x3012), 0x31 },
+ { CCI_REG8(0x3015), 0xf0 }, { CCI_REG8(0x3017), 0xf0 },
+ { CCI_REG8(0x301d), 0xf6 }, { CCI_REG8(0x301e), 0xf1 },
+ { CCI_REG8(0x3022), 0xf0 }, { CCI_REG8(0x3400), 0x08 },
+ { CCI_REG8(0x3608), 0x41 }, { CCI_REG8(0x3421), 0x02 },
+ { CCI_REG8(0x3500), 0x00 }, { CCI_REG8(0x3501), 0x00 },
+ { CCI_REG8(0x3502), 0x18 }, { CCI_REG8(0x3504), 0x0c },
+ { CCI_REG8(0x3508), 0x01 }, { CCI_REG8(0x3509), 0x00 },
+ { CCI_REG8(0x350a), 0x01 }, { CCI_REG8(0x350b), 0x00 },
+ { CCI_REG8(0x350b), 0x00 }, { CCI_REG8(0x3540), 0x00 },
+ { CCI_REG8(0x3541), 0x00 }, { CCI_REG8(0x3542), 0x08 },
+ { CCI_REG8(0x3548), 0x01 }, { CCI_REG8(0x3549), 0xa0 },
+ { CCI_REG8(0x3549), 0x00 }, { CCI_REG8(0x3549), 0x00 },
+ { CCI_REG8(0x3549), 0x00 }, { CCI_REG8(0x3580), 0x00 },
+ { CCI_REG8(0x3581), 0x00 }, { CCI_REG8(0x3582), 0x04 },
+ { CCI_REG8(0x3588), 0x01 }, { CCI_REG8(0x3589), 0xf0 },
+ { CCI_REG8(0x3589), 0x00 }, { CCI_REG8(0x3589), 0x00 },
+ { CCI_REG8(0x3589), 0x00 }, { CCI_REG8(0x360d), 0x83 },
+ { CCI_REG8(0x3616), 0xa0 }, { CCI_REG8(0x3617), 0x31 },
+ { CCI_REG8(0x3623), 0x10 }, { CCI_REG8(0x3633), 0x03 },
+ { CCI_REG8(0x3634), 0x03 }, { CCI_REG8(0x3635), 0x77 },
+ { CCI_REG8(0x3640), 0x19 }, { CCI_REG8(0x3641), 0x80 },
+ { CCI_REG8(0x364d), 0x0f }, { CCI_REG8(0x3680), 0x80 },
+ { CCI_REG8(0x3682), 0x00 }, { CCI_REG8(0x3683), 0x00 },
+ { CCI_REG8(0x3684), 0x07 }, { CCI_REG8(0x3688), 0x01 },
+ { CCI_REG8(0x3689), 0x08 }, { CCI_REG8(0x368a), 0x26 },
+ { CCI_REG8(0x368b), 0xc8 }, { CCI_REG8(0x368e), 0x70 },
+ { CCI_REG8(0x368f), 0x00 }, { CCI_REG8(0x3692), 0x04 },
+ { CCI_REG8(0x3693), 0x00 }, { CCI_REG8(0x3696), 0xd1 },
+ { CCI_REG8(0x3697), 0xe0 }, { CCI_REG8(0x3698), 0x80 },
+ { CCI_REG8(0x3699), 0x2b }, { CCI_REG8(0x369a), 0x00 },
+ { CCI_REG8(0x369d), 0x00 }, { CCI_REG8(0x369e), 0x14 },
+ { CCI_REG8(0x369f), 0x20 }, { CCI_REG8(0x36a5), 0x80 },
+ { CCI_REG8(0x36a6), 0x00 }, { CCI_REG8(0x36a7), 0x00 },
+ { CCI_REG8(0x36a8), 0x00 }, { CCI_REG8(0x36b5), 0x17 },
+ { CCI_REG8(0x3701), 0x30 }, { CCI_REG8(0x3706), 0x2b },
+ { CCI_REG8(0x3709), 0x8d }, { CCI_REG8(0x370b), 0x4f },
+ { CCI_REG8(0x3711), 0x00 }, { CCI_REG8(0x3712), 0x01 },
+ { CCI_REG8(0x3713), 0x00 }, { CCI_REG8(0x3720), 0x08 },
+ { CCI_REG8(0x3727), 0x22 }, { CCI_REG8(0x3728), 0x01 },
+ { CCI_REG8(0x375e), 0x00 }, { CCI_REG8(0x3760), 0x08 },
+ { CCI_REG8(0x3761), 0x10 }, { CCI_REG8(0x3762), 0x08 },
+ { CCI_REG8(0x3765), 0x10 }, { CCI_REG8(0x3766), 0x18 },
+ { CCI_REG8(0x376a), 0x08 }, { CCI_REG8(0x376b), 0x00 },
+ { CCI_REG8(0x376d), 0x1b }, { CCI_REG8(0x3791), 0x2b },
+ { CCI_REG8(0x3793), 0x2b }, { CCI_REG8(0x3795), 0x2b },
+ { CCI_REG8(0x3797), 0x4f }, { CCI_REG8(0x3799), 0x4f },
+ { CCI_REG8(0x379b), 0x4f }, { CCI_REG8(0x37a0), 0x22 },
+ { CCI_REG8(0x37da), 0x04 }, { CCI_REG8(0x37f9), 0x02 },
+ { CCI_REG8(0x37fa), 0x02 }, { CCI_REG8(0x37fb), 0x02 },
+ { CCI_REG8(0x3814), 0x11 }, { CCI_REG8(0x3815), 0x11 },
+ { CCI_REG8(0x3820), 0x40 }, { CCI_REG8(0x3821), 0x04 },
+ { CCI_REG8(0x3822), 0x00 }, { CCI_REG8(0x3823), 0x04 },
+ { CCI_REG8(0x3827), 0x08 }, { CCI_REG8(0x3828), 0x00 },
+ { CCI_REG8(0x382a), 0x81 }, { CCI_REG8(0x382e), 0x70 },
+ { CCI_REG8(0x3837), 0x10 }, { CCI_REG8(0x3839), 0x00 },
+ { CCI_REG8(0x383b), 0x00 }, { CCI_REG8(0x383c), 0x00 },
+ { CCI_REG8(0x383d), 0x10 }, { CCI_REG8(0x383f), 0x00 },
+ { CCI_REG8(0x384c), 0x02 }, { CCI_REG8(0x384d), 0x8c },
+ { CCI_REG8(0x3852), 0x00 }, { CCI_REG8(0x3856), 0x10 },
+ { CCI_REG8(0x3857), 0x10 }, { CCI_REG8(0x3858), 0x20 },
+ { CCI_REG8(0x3859), 0x20 }, { CCI_REG8(0x3894), 0x00 },
+ { CCI_REG8(0x3895), 0x00 }, { CCI_REG8(0x3896), 0x00 },
+ { CCI_REG8(0x3897), 0x00 }, { CCI_REG8(0x3900), 0x40 },
+ { CCI_REG8(0x3aed), 0x6e }, { CCI_REG8(0x3af1), 0x73 },
+ { CCI_REG8(0x3d86), 0x12 }, { CCI_REG8(0x3d87), 0x30 },
+ { CCI_REG8(0x3d8c), 0xab }, { CCI_REG8(0x3d8d), 0xb0 },
+ { CCI_REG8(0x3f00), 0x12 }, { CCI_REG8(0x3f00), 0x12 },
+ { CCI_REG8(0x3f00), 0x12 }, { CCI_REG8(0x3f01), 0x03 },
+ { CCI_REG8(0x4009), 0x01 }, { CCI_REG8(0x400e), 0xc6 },
+ { CCI_REG8(0x400f), 0x00 }, { CCI_REG8(0x4010), 0x28 },
+ { CCI_REG8(0x4011), 0x01 }, { CCI_REG8(0x4012), 0x0c },
+ { CCI_REG8(0x4015), 0x00 }, { CCI_REG8(0x4016), 0x1f },
+ { CCI_REG8(0x4017), 0x00 }, { CCI_REG8(0x4018), 0x07 },
+ { CCI_REG8(0x401a), 0x40 }, { CCI_REG8(0x4028), 0x01 },
+ { CCI_REG8(0x4504), 0x00 }, { CCI_REG8(0x4506), 0x01 },
+ { CCI_REG8(0x4508), 0x00 }, { CCI_REG8(0x4509), 0x35 },
+ { CCI_REG8(0x450a), 0x08 }, { CCI_REG8(0x450c), 0x00 },
+ { CCI_REG8(0x450d), 0x20 }, { CCI_REG8(0x450e), 0x00 },
+ { CCI_REG8(0x450f), 0x20 }, { CCI_REG8(0x451e), 0x00 },
+ { CCI_REG8(0x451f), 0x00 }, { CCI_REG8(0x4523), 0x00 },
+ { CCI_REG8(0x4526), 0x00 }, { CCI_REG8(0x4527), 0x18 },
+ { CCI_REG8(0x4580), 0x01 }, { CCI_REG8(0x4583), 0x00 },
+ { CCI_REG8(0x4584), 0x00 }, { CCI_REG8(0x45c0), 0xa1 },
+ { CCI_REG8(0x4602), 0x08 }, { CCI_REG8(0x4603), 0x05 },
+ { CCI_REG8(0x4606), 0x12 }, { CCI_REG8(0x4607), 0x30 },
+ { CCI_REG8(0x460b), 0x00 }, { CCI_REG8(0x460d), 0x00 },
+ { CCI_REG8(0x4640), 0x00 }, { CCI_REG8(0x4641), 0x24 },
+ { CCI_REG8(0x4643), 0x08 }, { CCI_REG8(0x4645), 0x14 },
+ { CCI_REG8(0x4648), 0x0a }, { CCI_REG8(0x4649), 0x06 },
+ { CCI_REG8(0x464a), 0x00 }, { CCI_REG8(0x464b), 0x30 },
+ { CCI_REG8(0x4800), 0x04 }, { CCI_REG8(0x4802), 0x02 },
+ { CCI_REG8(0x480b), 0x10 }, { CCI_REG8(0x480c), 0x80 },
+ { CCI_REG8(0x480e), 0x04 }, { CCI_REG8(0x480f), 0x32 },
+ { CCI_REG8(0x481b), 0x12 }, { CCI_REG8(0x4833), 0x30 },
+ { CCI_REG8(0x4837), 0x08 }, { CCI_REG8(0x484b), 0x27 },
+ { CCI_REG8(0x4850), 0x42 }, { CCI_REG8(0x4851), 0xaa },
+ { CCI_REG8(0x4860), 0x01 }, { CCI_REG8(0x4861), 0xec },
+ { CCI_REG8(0x4862), 0x25 }, { CCI_REG8(0x4888), 0x00 },
+ { CCI_REG8(0x4889), 0x03 }, { CCI_REG8(0x488c), 0x60 },
+ { CCI_REG8(0x4910), 0x28 }, { CCI_REG8(0x4911), 0x01 },
+ { CCI_REG8(0x4912), 0x0c }, { CCI_REG8(0x491a), 0x40 },
+ { CCI_REG8(0x4915), 0x00 }, { CCI_REG8(0x4916), 0x0f },
+ { CCI_REG8(0x4917), 0x00 }, { CCI_REG8(0x4918), 0x07 },
+ { CCI_REG8(0x4a10), 0x28 }, { CCI_REG8(0x4a11), 0x01 },
+ { CCI_REG8(0x4a12), 0x0c }, { CCI_REG8(0x4a1a), 0x40 },
+ { CCI_REG8(0x4a15), 0x00 }, { CCI_REG8(0x4a16), 0x0f },
+ { CCI_REG8(0x4a17), 0x00 }, { CCI_REG8(0x4a18), 0x07 },
+ { CCI_REG8(0x4d00), 0x04 }, { CCI_REG8(0x4d01), 0x5a },
+ { CCI_REG8(0x4d02), 0xbb }, { CCI_REG8(0x4d03), 0x84 },
+ { CCI_REG8(0x4d04), 0xd1 }, { CCI_REG8(0x4d05), 0x68 },
+ { CCI_REG8(0xc4fa), 0x10 }, { CCI_REG8(0x3b56), 0x0a },
+ { CCI_REG8(0x3b57), 0x0a }, { CCI_REG8(0x3b58), 0x0c },
+ { CCI_REG8(0x3b59), 0x10 }, { CCI_REG8(0x3a1d), 0x30 },
+ { CCI_REG8(0x3a1e), 0x30 }, { CCI_REG8(0x3a21), 0x30 },
+ { CCI_REG8(0x3a22), 0x30 }, { CCI_REG8(0x3992), 0x02 },
+ { CCI_REG8(0x399e), 0x02 }, { CCI_REG8(0x39fb), 0x30 },
+ { CCI_REG8(0x39fc), 0x30 }, { CCI_REG8(0x39fd), 0x30 },
+ { CCI_REG8(0x39fe), 0x30 }, { CCI_REG8(0x3a6d), 0x83 },
+ { CCI_REG8(0x3a5e), 0x83 }, { CCI_REG8(0xc500), 0x12 },
+ { CCI_REG8(0xc501), 0x12 }, { CCI_REG8(0xc502), 0x12 },
+ { CCI_REG8(0xc503), 0x12 }, { CCI_REG8(0xc505), 0x12 },
+ { CCI_REG8(0xc506), 0x12 }, { CCI_REG8(0xc507), 0x12 },
+ { CCI_REG8(0xc508), 0x12 }, { CCI_REG8(0x3a77), 0x12 },
+ { CCI_REG8(0x3a73), 0x12 }, { CCI_REG8(0x3a7b), 0x12 },
+ { CCI_REG8(0x3a7f), 0x12 }, { CCI_REG8(0x3b2e), 0x13 },
+ { CCI_REG8(0x3b29), 0x13 }, { CCI_REG8(0xc439), 0x13 },
+ { CCI_REG8(0xc469), 0x13 }, { CCI_REG8(0xc41c), 0x89 },
+ { CCI_REG8(0x3618), 0x80 }, { CCI_REG8(0xc514), 0x51 },
+ { CCI_REG8(0xc515), 0x2c }, { CCI_REG8(0xc516), 0x16 },
+ { CCI_REG8(0xc517), 0x0d }, { CCI_REG8(0x3615), 0x7f },
+ { CCI_REG8(0x3632), 0x99 }, { CCI_REG8(0x3642), 0x00 },
+ { CCI_REG8(0x3645), 0x80 }, { CCI_REG8(0x3702), 0x2a },
+ { CCI_REG8(0x3703), 0x2a }, { CCI_REG8(0x3708), 0x2f },
+ { CCI_REG8(0x3721), 0x15 }, { CCI_REG8(0x3744), 0x28 },
+ { CCI_REG8(0x3991), 0x0c }, { CCI_REG8(0x371d), 0x24 },
+ { CCI_REG8(0x371f), 0x0c }, { CCI_REG8(0x374b), 0x03 },
+ { CCI_REG8(0x37d0), 0x00 }, { CCI_REG8(0x391d), 0x55 },
+ { CCI_REG8(0x391e), 0x52 }, { CCI_REG8(0x399d), 0x0c },
+ { CCI_REG8(0x3a2f), 0x01 }, { CCI_REG8(0x3a30), 0x01 },
+ { CCI_REG8(0x3a31), 0x01 }, { CCI_REG8(0x3a32), 0x01 },
+ { CCI_REG8(0x3a34), 0x01 }, { CCI_REG8(0x3a35), 0x01 },
+ { CCI_REG8(0x3a36), 0x01 }, { CCI_REG8(0x3a37), 0x01 },
+ { CCI_REG8(0x3a43), 0x01 }, { CCI_REG8(0x3a44), 0x01 },
+ { CCI_REG8(0x3a45), 0x01 }, { CCI_REG8(0x3a46), 0x01 },
+ { CCI_REG8(0x3a48), 0x01 }, { CCI_REG8(0x3a49), 0x01 },
+ { CCI_REG8(0x3a4a), 0x01 }, { CCI_REG8(0x3a4b), 0x01 },
+ { CCI_REG8(0x3a50), 0x14 }, { CCI_REG8(0x3a54), 0x14 },
+ { CCI_REG8(0x3a60), 0x20 }, { CCI_REG8(0x3a6f), 0x20 },
+ { CCI_REG8(0x3ac5), 0x01 }, { CCI_REG8(0x3ac6), 0x01 },
+ { CCI_REG8(0x3ac7), 0x01 }, { CCI_REG8(0x3ac8), 0x01 },
+ { CCI_REG8(0x3ac9), 0x01 }, { CCI_REG8(0x3aca), 0x01 },
+ { CCI_REG8(0x3acb), 0x01 }, { CCI_REG8(0x3acc), 0x01 },
+ { CCI_REG8(0x3acd), 0x01 }, { CCI_REG8(0x3ace), 0x01 },
+ { CCI_REG8(0x3acf), 0x01 }, { CCI_REG8(0x3ad0), 0x01 },
+ { CCI_REG8(0x3ad1), 0x01 }, { CCI_REG8(0x3ad2), 0x01 },
+ { CCI_REG8(0x3ad3), 0x01 }, { CCI_REG8(0x3ad4), 0x01 },
+ { CCI_REG8(0x3add), 0x1f }, { CCI_REG8(0x3adf), 0x24 },
+ { CCI_REG8(0x3aef), 0x1f }, { CCI_REG8(0x3af0), 0x24 },
+ { CCI_REG8(0x3b92), 0x08 }, { CCI_REG8(0x3b93), 0x08 },
+ { CCI_REG8(0x3b94), 0x08 }, { CCI_REG8(0x3b95), 0x08 },
+ { CCI_REG8(0x3be7), 0x1e }, { CCI_REG8(0x3be8), 0x26 },
+ { CCI_REG8(0xc44a), 0x20 }, { CCI_REG8(0xc44c), 0x20 },
+ { CCI_REG8(0xc483), 0x00 }, { CCI_REG8(0xc484), 0x00 },
+ { CCI_REG8(0xc485), 0x00 }, { CCI_REG8(0xc486), 0x00 },
+ { CCI_REG8(0xc487), 0x01 }, { CCI_REG8(0xc488), 0x01 },
+ { CCI_REG8(0xc489), 0x01 }, { CCI_REG8(0xc48a), 0x01 },
+ { CCI_REG8(0xc4c1), 0x00 }, { CCI_REG8(0xc4c2), 0x00 },
+ { CCI_REG8(0xc4c3), 0x00 }, { CCI_REG8(0xc4c4), 0x00 },
+ { CCI_REG8(0xc4c6), 0x10 }, { CCI_REG8(0xc4c7), 0x10 },
+ { CCI_REG8(0xc4c8), 0x10 }, { CCI_REG8(0xc4c9), 0x10 },
+ { CCI_REG8(0xc4ca), 0x10 }, { CCI_REG8(0xc4cb), 0x10 },
+ { CCI_REG8(0xc4cc), 0x10 }, { CCI_REG8(0xc4cd), 0x10 },
+ { CCI_REG8(0xc4ea), 0x07 }, { CCI_REG8(0xc4eb), 0x07 },
+ { CCI_REG8(0xc4ec), 0x07 }, { CCI_REG8(0xc4ed), 0x07 },
+ { CCI_REG8(0xc4ee), 0x07 }, { CCI_REG8(0xc4f6), 0x10 },
+ { CCI_REG8(0xc4f7), 0x10 }, { CCI_REG8(0xc4f8), 0x10 },
+ { CCI_REG8(0xc4f9), 0x10 }, { CCI_REG8(0xc518), 0x0e },
+ { CCI_REG8(0xc519), 0x0e }, { CCI_REG8(0xc51a), 0x0e },
+ { CCI_REG8(0xc51b), 0x0e }, { CCI_REG8(0xc51c), 0x0e },
+ { CCI_REG8(0xc51d), 0x0e }, { CCI_REG8(0xc51e), 0x0e },
+ { CCI_REG8(0xc51f), 0x0e }, { CCI_REG8(0xc520), 0x0e },
+ { CCI_REG8(0xc521), 0x0e }, { CCI_REG8(0xc522), 0x0e },
+ { CCI_REG8(0xc523), 0x0e }, { CCI_REG8(0xc524), 0x0e },
+ { CCI_REG8(0xc525), 0x0e }, { CCI_REG8(0xc526), 0x0e },
+ { CCI_REG8(0xc527), 0x0e }, { CCI_REG8(0xc528), 0x0e },
+ { CCI_REG8(0xc529), 0x0e }, { CCI_REG8(0xc52a), 0x0e },
+ { CCI_REG8(0xc52b), 0x0e }, { CCI_REG8(0xc52c), 0x0e },
+ { CCI_REG8(0xc52d), 0x0e }, { CCI_REG8(0xc52e), 0x0e },
+ { CCI_REG8(0xc52f), 0x0e }, { CCI_REG8(0xc530), 0x0e },
+ { CCI_REG8(0xc531), 0x0e }, { CCI_REG8(0xc532), 0x0e },
+ { CCI_REG8(0xc533), 0x0e }, { CCI_REG8(0xc534), 0x0e },
+ { CCI_REG8(0xc535), 0x0e }, { CCI_REG8(0xc536), 0x0e },
+ { CCI_REG8(0xc537), 0x0e }, { CCI_REG8(0xc538), 0x0e },
+ { CCI_REG8(0xc539), 0x0e }, { CCI_REG8(0xc53a), 0x0e },
+ { CCI_REG8(0xc53b), 0x0e }, { CCI_REG8(0xc53c), 0x0e },
+ { CCI_REG8(0xc53d), 0x0e }, { CCI_REG8(0xc53e), 0x0e },
+ { CCI_REG8(0xc53f), 0x0e }, { CCI_REG8(0xc540), 0x0e },
+ { CCI_REG8(0xc541), 0x0e }, { CCI_REG8(0xc542), 0x0e },
+ { CCI_REG8(0xc543), 0x0e }, { CCI_REG8(0xc544), 0x0e },
+ { CCI_REG8(0xc545), 0x0e }, { CCI_REG8(0xc546), 0x0e },
+ { CCI_REG8(0xc547), 0x0e }, { CCI_REG8(0xc548), 0x0e },
+ { CCI_REG8(0xc549), 0x0e }, { CCI_REG8(0xc57f), 0x22 },
+ { CCI_REG8(0xc580), 0x22 }, { CCI_REG8(0xc581), 0x22 },
+ { CCI_REG8(0xc582), 0x22 }, { CCI_REG8(0xc583), 0x22 },
+ { CCI_REG8(0xc584), 0x22 }, { CCI_REG8(0xc585), 0x22 },
+ { CCI_REG8(0xc586), 0x22 }, { CCI_REG8(0xc587), 0x22 },
+ { CCI_REG8(0xc588), 0x22 }, { CCI_REG8(0xc589), 0x22 },
+ { CCI_REG8(0xc58a), 0x22 }, { CCI_REG8(0xc58b), 0x22 },
+ { CCI_REG8(0xc58c), 0x22 }, { CCI_REG8(0xc58d), 0x22 },
+ { CCI_REG8(0xc58e), 0x22 }, { CCI_REG8(0xc58f), 0x22 },
+ { CCI_REG8(0xc590), 0x22 }, { CCI_REG8(0xc591), 0x22 },
+ { CCI_REG8(0xc592), 0x22 }, { CCI_REG8(0xc598), 0x22 },
+ { CCI_REG8(0xc599), 0x22 }, { CCI_REG8(0xc59a), 0x22 },
+ { CCI_REG8(0xc59b), 0x22 }, { CCI_REG8(0xc59c), 0x22 },
+ { CCI_REG8(0xc59d), 0x22 }, { CCI_REG8(0xc59e), 0x22 },
+ { CCI_REG8(0xc59f), 0x22 }, { CCI_REG8(0xc5a0), 0x22 },
+ { CCI_REG8(0xc5a1), 0x22 }, { CCI_REG8(0xc5a2), 0x22 },
+ { CCI_REG8(0xc5a3), 0x22 }, { CCI_REG8(0xc5a4), 0x22 },
+ { CCI_REG8(0xc5a5), 0x22 }, { CCI_REG8(0xc5a6), 0x22 },
+ { CCI_REG8(0xc5a7), 0x22 }, { CCI_REG8(0xc5a8), 0x22 },
+ { CCI_REG8(0xc5a9), 0x22 }, { CCI_REG8(0xc5aa), 0x22 },
+ { CCI_REG8(0xc5ab), 0x22 }, { CCI_REG8(0xc5b1), 0x2a },
+ { CCI_REG8(0xc5b2), 0x2a }, { CCI_REG8(0xc5b3), 0x2a },
+ { CCI_REG8(0xc5b4), 0x2a }, { CCI_REG8(0xc5b5), 0x2a },
+ { CCI_REG8(0xc5b6), 0x2a }, { CCI_REG8(0xc5b7), 0x2a },
+ { CCI_REG8(0xc5b8), 0x2a }, { CCI_REG8(0xc5b9), 0x2a },
+ { CCI_REG8(0xc5ba), 0x2a }, { CCI_REG8(0xc5bb), 0x2a },
+ { CCI_REG8(0xc5bc), 0x2a }, { CCI_REG8(0xc5bd), 0x2a },
+ { CCI_REG8(0xc5be), 0x2a }, { CCI_REG8(0xc5bf), 0x2a },
+ { CCI_REG8(0xc5c0), 0x2a }, { CCI_REG8(0xc5c1), 0x2a },
+ { CCI_REG8(0xc5c2), 0x2a }, { CCI_REG8(0xc5c3), 0x2a },
+ { CCI_REG8(0xc5c4), 0x2a }, { CCI_REG8(0xc5ca), 0x2a },
+ { CCI_REG8(0xc5cb), 0x2a }, { CCI_REG8(0xc5cc), 0x2a },
+ { CCI_REG8(0xc5cd), 0x2a }, { CCI_REG8(0xc5ce), 0x2a },
+ { CCI_REG8(0xc5cf), 0x2a }, { CCI_REG8(0xc5d0), 0x2a },
+ { CCI_REG8(0xc5d1), 0x2a }, { CCI_REG8(0xc5d2), 0x2a },
+ { CCI_REG8(0xc5d3), 0x2a }, { CCI_REG8(0xc5d4), 0x2a },
+ { CCI_REG8(0xc5d5), 0x2a }, { CCI_REG8(0xc5d6), 0x2a },
+ { CCI_REG8(0xc5d7), 0x2a }, { CCI_REG8(0xc5d8), 0x2a },
+ { CCI_REG8(0xc5d9), 0x2a }, { CCI_REG8(0xc5da), 0x2a },
+ { CCI_REG8(0xc5db), 0x2a }, { CCI_REG8(0xc5dc), 0x2a },
+ { CCI_REG8(0xc5dd), 0x2a }, { CCI_REG8(0xc5e8), 0x22 },
+ { CCI_REG8(0xc5ea), 0x22 }, { CCI_REG8(0x4540), 0x12 },
+ { CCI_REG8(0x4541), 0x30 }, { CCI_REG8(0x3d86), 0x12 },
+ { CCI_REG8(0x3d87), 0x30 }, { CCI_REG8(0x4606), 0x12 },
+ { CCI_REG8(0x4607), 0x30 }, { CCI_REG8(0x4648), 0x0a },
+ { CCI_REG8(0x4649), 0x06 }, { CCI_REG8(0x3220), 0x12 },
+ { CCI_REG8(0x3221), 0x30 }, { CCI_REG8(0x40c2), 0x12 },
+ { CCI_REG8(0x49c2), 0x12 }, { CCI_REG8(0x4ac2), 0x12 },
+ { CCI_REG8(0x40c3), 0x30 }, { CCI_REG8(0x49c3), 0x30 },
+ { CCI_REG8(0x4ac3), 0x30 }, { CCI_REG8(0x36b0), 0x12 },
+ { CCI_REG8(0x36b1), 0x30 }, { CCI_REG8(0x45cb), 0x12 },
+ { CCI_REG8(0x45cc), 0x30 }, { CCI_REG8(0x4585), 0x12 },
+ { CCI_REG8(0x4586), 0x30 }, { CCI_REG8(0x36b2), 0x12 },
+ { CCI_REG8(0x36b3), 0x30 }, { CCI_REG8(0x5a40), 0x75 },
+ { CCI_REG8(0x5a41), 0x75 }, { CCI_REG8(0x5a42), 0x75 },
+ { CCI_REG8(0x5a43), 0x75 }, { CCI_REG8(0x5a44), 0x75 },
+ { CCI_REG8(0x5a45), 0x75 }, { CCI_REG8(0x5a46), 0x75 },
+ { CCI_REG8(0x5a47), 0x75 }, { CCI_REG8(0x5a48), 0x75 },
+ { CCI_REG8(0x5a49), 0x75 }, { CCI_REG8(0x5a4a), 0x75 },
+ { CCI_REG8(0x5a4b), 0x75 }, { CCI_REG8(0x5a4c), 0x75 },
+ { CCI_REG8(0x5a4d), 0x75 }, { CCI_REG8(0x5a4e), 0x75 },
+ { CCI_REG8(0x5a4f), 0x75 }, { CCI_REG8(0x5a50), 0x75 },
+ { CCI_REG8(0x5a51), 0x75 }, { CCI_REG8(0x5a52), 0x75 },
+ { CCI_REG8(0x5a53), 0x75 }, { CCI_REG8(0x5a54), 0x75 },
+ { CCI_REG8(0x5a55), 0x75 }, { CCI_REG8(0x5a56), 0x75 },
+ { CCI_REG8(0x5a57), 0x75 }, { CCI_REG8(0x5a58), 0x75 },
+ { CCI_REG8(0x5a59), 0x75 }, { CCI_REG8(0x5a5a), 0x75 },
+ { CCI_REG8(0x5a5b), 0x75 }, { CCI_REG8(0x5a5c), 0x75 },
+ { CCI_REG8(0x5a5d), 0x75 }, { CCI_REG8(0x5a5e), 0x75 },
+ { CCI_REG8(0x5a5f), 0x75 }, { CCI_REG8(0x5a60), 0x75 },
+ { CCI_REG8(0x5a61), 0x75 }, { CCI_REG8(0x5a62), 0x75 },
+ { CCI_REG8(0x5a63), 0x75 }, { CCI_REG8(0x5a64), 0x75 },
+ { CCI_REG8(0x5a65), 0x75 }, { CCI_REG8(0x5a66), 0x75 },
+ { CCI_REG8(0x5a67), 0x75 }, { CCI_REG8(0x5a68), 0x75 },
+ { CCI_REG8(0x5a69), 0x75 }, { CCI_REG8(0x5a6a), 0x75 },
+ { CCI_REG8(0x5a6b), 0x75 }, { CCI_REG8(0x5a6c), 0x75 },
+ { CCI_REG8(0x5a6d), 0x75 }, { CCI_REG8(0x5a6e), 0x75 },
+ { CCI_REG8(0x5a6f), 0x75 }, { CCI_REG8(0x5a70), 0x75 },
+ { CCI_REG8(0x5a71), 0x75 }, { CCI_REG8(0x5a72), 0x75 },
+ { CCI_REG8(0x5a73), 0x75 }, { CCI_REG8(0x5a74), 0x75 },
+ { CCI_REG8(0x5a75), 0x75 }, { CCI_REG8(0x5a76), 0x75 },
+ { CCI_REG8(0x5a77), 0x75 }, { CCI_REG8(0x5a78), 0x75 },
+ { CCI_REG8(0x5a79), 0x75 }, { CCI_REG8(0x5a7a), 0x75 },
+ { CCI_REG8(0x5a7b), 0x75 }, { CCI_REG8(0x5a7c), 0x75 },
+ { CCI_REG8(0x5a7d), 0x75 }, { CCI_REG8(0x5a7e), 0x75 },
+ { CCI_REG8(0x5a7f), 0x75 }, { CCI_REG8(0x5a80), 0x75 },
+ { CCI_REG8(0x5a81), 0x75 }, { CCI_REG8(0x5a82), 0x75 },
+ { CCI_REG8(0x5a83), 0x75 }, { CCI_REG8(0x5a84), 0x75 },
+ { CCI_REG8(0x5a85), 0x75 }, { CCI_REG8(0x5a86), 0x75 },
+ { CCI_REG8(0x5a87), 0x75 }, { CCI_REG8(0x5a88), 0x75 },
+ { CCI_REG8(0x5a89), 0x75 }, { CCI_REG8(0x5a8a), 0x75 },
+ { CCI_REG8(0x5a8b), 0x75 }, { CCI_REG8(0x5a8c), 0x75 },
+ { CCI_REG8(0x5a8d), 0x75 }, { CCI_REG8(0x5a8e), 0x75 },
+ { CCI_REG8(0x5a8f), 0x75 }, { CCI_REG8(0x5a90), 0x75 },
+ { CCI_REG8(0x5a91), 0x75 }, { CCI_REG8(0x5a92), 0x75 },
+ { CCI_REG8(0x5a93), 0x75 }, { CCI_REG8(0x5a94), 0x75 },
+ { CCI_REG8(0x5a95), 0x75 }, { CCI_REG8(0x5a96), 0x75 },
+ { CCI_REG8(0x5a97), 0x75 }, { CCI_REG8(0x5a98), 0x75 },
+ { CCI_REG8(0x5a99), 0x75 }, { CCI_REG8(0x5a9a), 0x75 },
+ { CCI_REG8(0x5a9b), 0x75 }, { CCI_REG8(0x5a9c), 0x75 },
+ { CCI_REG8(0x5a9d), 0x75 }, { CCI_REG8(0x5a9e), 0x75 },
+ { CCI_REG8(0x5a9f), 0x75 }, { CCI_REG8(0x5aa0), 0x75 },
+ { CCI_REG8(0x5aa1), 0x75 }, { CCI_REG8(0x5aa2), 0x75 },
+ { CCI_REG8(0x5aa3), 0x75 }, { CCI_REG8(0x5aa4), 0x75 },
+ { CCI_REG8(0x5aa5), 0x75 }, { CCI_REG8(0x5aa6), 0x75 },
+ { CCI_REG8(0x5aa7), 0x75 }, { CCI_REG8(0x5aa8), 0x75 },
+ { CCI_REG8(0x5aa9), 0x75 }, { CCI_REG8(0x5aaa), 0x75 },
+ { CCI_REG8(0x5aab), 0x75 }, { CCI_REG8(0x5aac), 0x75 },
+ { CCI_REG8(0x5aad), 0x75 }, { CCI_REG8(0x5aae), 0x75 },
+ { CCI_REG8(0x5aaf), 0x75 }, { CCI_REG8(0x5ab0), 0x75 },
+ { CCI_REG8(0x5ab1), 0x75 }, { CCI_REG8(0x5ab2), 0x75 },
+ { CCI_REG8(0x5ab3), 0x75 }, { CCI_REG8(0x5ab4), 0x75 },
+ { CCI_REG8(0x5ab5), 0x75 }, { CCI_REG8(0x5ab6), 0x75 },
+ { CCI_REG8(0x5ab7), 0x75 }, { CCI_REG8(0x5ab8), 0x75 },
+ { CCI_REG8(0x5ab9), 0x75 }, { CCI_REG8(0x5aba), 0x75 },
+ { CCI_REG8(0x5abb), 0x75 }, { CCI_REG8(0x5abc), 0x75 },
+ { CCI_REG8(0x5abd), 0x75 }, { CCI_REG8(0x5abe), 0x75 },
+ { CCI_REG8(0x5abf), 0x75 }, { CCI_REG8(0x5ac0), 0x75 },
+ { CCI_REG8(0x5ac1), 0x75 }, { CCI_REG8(0x5ac2), 0x75 },
+ { CCI_REG8(0x5ac3), 0x75 }, { CCI_REG8(0x5ac4), 0x75 },
+ { CCI_REG8(0x5ac5), 0x75 }, { CCI_REG8(0x5ac6), 0x75 },
+ { CCI_REG8(0x5ac7), 0x75 }, { CCI_REG8(0x5ac8), 0x75 },
+ { CCI_REG8(0x5ac9), 0x75 }, { CCI_REG8(0x5aca), 0x75 },
+ { CCI_REG8(0x5acb), 0x75 }, { CCI_REG8(0x5acc), 0x75 },
+ { CCI_REG8(0x5acd), 0x75 }, { CCI_REG8(0x5ace), 0x75 },
+ { CCI_REG8(0x5acf), 0x75 }, { CCI_REG8(0x5ad0), 0x75 },
+ { CCI_REG8(0x5ad1), 0x75 }, { CCI_REG8(0x5ad2), 0x75 },
+ { CCI_REG8(0x5ad3), 0x75 }, { CCI_REG8(0x5ad4), 0x75 },
+ { CCI_REG8(0x5ad5), 0x75 }, { CCI_REG8(0x5ad6), 0x75 },
+ { CCI_REG8(0x5ad7), 0x75 }, { CCI_REG8(0x5ad8), 0x75 },
+ { CCI_REG8(0x5ad9), 0x75 }, { CCI_REG8(0x5ada), 0x75 },
+ { CCI_REG8(0x5adb), 0x75 }, { CCI_REG8(0x5adc), 0x75 },
+ { CCI_REG8(0x5add), 0x75 }, { CCI_REG8(0x5ade), 0x75 },
+ { CCI_REG8(0x5adf), 0x75 }, { CCI_REG8(0x5ae0), 0x75 },
+ { CCI_REG8(0x5ae1), 0x75 }, { CCI_REG8(0x5ae2), 0x75 },
+ { CCI_REG8(0x5ae3), 0x75 }, { CCI_REG8(0x5ae4), 0x75 },
+ { CCI_REG8(0x5ae5), 0x75 }, { CCI_REG8(0x5ae6), 0x75 },
+ { CCI_REG8(0x5ae7), 0x75 }, { CCI_REG8(0x5ae8), 0x75 },
+ { CCI_REG8(0x5ae9), 0x75 }, { CCI_REG8(0x5aea), 0x75 },
+ { CCI_REG8(0x5aeb), 0x75 }, { CCI_REG8(0x5aec), 0x75 },
+ { CCI_REG8(0x5aed), 0x75 }, { CCI_REG8(0x5aee), 0x75 },
+ { CCI_REG8(0x5aef), 0x75 }, { CCI_REG8(0x5af0), 0x75 },
+ { CCI_REG8(0x5af1), 0x75 }, { CCI_REG8(0x5af2), 0x75 },
+ { CCI_REG8(0x5af3), 0x75 }, { CCI_REG8(0x5af4), 0x75 },
+ { CCI_REG8(0x5af5), 0x75 }, { CCI_REG8(0x5af6), 0x75 },
+ { CCI_REG8(0x5af7), 0x75 }, { CCI_REG8(0x5af8), 0x75 },
+ { CCI_REG8(0x5af9), 0x75 }, { CCI_REG8(0x5afa), 0x75 },
+ { CCI_REG8(0x5afb), 0x75 }, { CCI_REG8(0x5afc), 0x75 },
+ { CCI_REG8(0x5afd), 0x75 }, { CCI_REG8(0x5afe), 0x75 },
+ { CCI_REG8(0x5aff), 0x75 }, { CCI_REG8(0x5b00), 0x75 },
+ { CCI_REG8(0x5b01), 0x75 }, { CCI_REG8(0x5b02), 0x75 },
+ { CCI_REG8(0x5b03), 0x75 }, { CCI_REG8(0x5b04), 0x75 },
+ { CCI_REG8(0x5b05), 0x75 }, { CCI_REG8(0x5b06), 0x75 },
+ { CCI_REG8(0x5b07), 0x75 }, { CCI_REG8(0x5b08), 0x75 },
+ { CCI_REG8(0x5b09), 0x75 }, { CCI_REG8(0x5b0a), 0x75 },
+ { CCI_REG8(0x5b0b), 0x75 }, { CCI_REG8(0x5b0c), 0x75 },
+ { CCI_REG8(0x5b0d), 0x75 }, { CCI_REG8(0x5b0e), 0x75 },
+ { CCI_REG8(0x5b0f), 0x75 }, { CCI_REG8(0x5b10), 0x75 },
+ { CCI_REG8(0x5b11), 0x75 }, { CCI_REG8(0x5b12), 0x75 },
+ { CCI_REG8(0x5b13), 0x75 }, { CCI_REG8(0x5b14), 0x75 },
+ { CCI_REG8(0x5b15), 0x75 }, { CCI_REG8(0x5b16), 0x75 },
+ { CCI_REG8(0x5b17), 0x75 }, { CCI_REG8(0x5b18), 0x75 },
+ { CCI_REG8(0x5b19), 0x75 }, { CCI_REG8(0x5b1a), 0x75 },
+ { CCI_REG8(0x5b1b), 0x75 }, { CCI_REG8(0x5b1c), 0x75 },
+ { CCI_REG8(0x5b1d), 0x75 }, { CCI_REG8(0x5b1e), 0x75 },
+ { CCI_REG8(0x5b1f), 0x75 }, { CCI_REG8(0x5b20), 0x75 },
+ { CCI_REG8(0x5b21), 0x75 }, { CCI_REG8(0x5b22), 0x75 },
+ { CCI_REG8(0x5b23), 0x75 }, { CCI_REG8(0x5b24), 0x75 },
+ { CCI_REG8(0x5b25), 0x75 }, { CCI_REG8(0x5b26), 0x75 },
+ { CCI_REG8(0x5b27), 0x75 }, { CCI_REG8(0x5b28), 0x75 },
+ { CCI_REG8(0x5b29), 0x75 }, { CCI_REG8(0x5b2a), 0x75 },
+ { CCI_REG8(0x5b2b), 0x75 }, { CCI_REG8(0x5b2c), 0x75 },
+ { CCI_REG8(0x5b2d), 0x75 }, { CCI_REG8(0x5b2e), 0x75 },
+ { CCI_REG8(0x5b2f), 0x75 }, { CCI_REG8(0x5b30), 0x75 },
+ { CCI_REG8(0x5b31), 0x75 }, { CCI_REG8(0x5b32), 0x75 },
+ { CCI_REG8(0x5b33), 0x75 }, { CCI_REG8(0x5b34), 0x75 },
+ { CCI_REG8(0x5b35), 0x75 }, { CCI_REG8(0x5b36), 0x75 },
+ { CCI_REG8(0x5b37), 0x75 }, { CCI_REG8(0x5b38), 0x75 },
+ { CCI_REG8(0x5b39), 0x75 }, { CCI_REG8(0x5b3a), 0x75 },
+ { CCI_REG8(0x5b3b), 0x75 }, { CCI_REG8(0x5b3c), 0x75 },
+ { CCI_REG8(0x5b3d), 0x75 }, { CCI_REG8(0x5b3e), 0x75 },
+ { CCI_REG8(0x5b3f), 0x75 }, { CCI_REG8(0x5b40), 0x75 },
+ { CCI_REG8(0x5b41), 0x75 }, { CCI_REG8(0x5b42), 0x75 },
+ { CCI_REG8(0x5b43), 0x75 }, { CCI_REG8(0x5b44), 0x75 },
+ { CCI_REG8(0x5b45), 0x75 }, { CCI_REG8(0x5b46), 0x75 },
+ { CCI_REG8(0x5b47), 0x75 }, { CCI_REG8(0x5b48), 0x75 },
+ { CCI_REG8(0x5b49), 0x75 }, { CCI_REG8(0x5b4a), 0x75 },
+ { CCI_REG8(0x5b4b), 0x75 }, { CCI_REG8(0x5b4c), 0x75 },
+ { CCI_REG8(0x5b4d), 0x75 }, { CCI_REG8(0x5b4e), 0x75 },
+ { CCI_REG8(0x5b4f), 0x75 }, { CCI_REG8(0x5b50), 0x75 },
+ { CCI_REG8(0x5b51), 0x75 }, { CCI_REG8(0x5b52), 0x75 },
+ { CCI_REG8(0x5b53), 0x75 }, { CCI_REG8(0x5b54), 0x75 },
+ { CCI_REG8(0x5b55), 0x75 }, { CCI_REG8(0x5b56), 0x75 },
+ { CCI_REG8(0x5b57), 0x75 }, { CCI_REG8(0x5b58), 0x75 },
+ { CCI_REG8(0x5b59), 0x75 }, { CCI_REG8(0x5b5a), 0x75 },
+ { CCI_REG8(0x5b5b), 0x75 }, { CCI_REG8(0x5b5c), 0x75 },
+ { CCI_REG8(0x5b5d), 0x75 }, { CCI_REG8(0x5b5e), 0x75 },
+ { CCI_REG8(0x5b5f), 0x75 }, { CCI_REG8(0x5b80), 0x75 },
+ { CCI_REG8(0x5b81), 0x75 }, { CCI_REG8(0x5b82), 0x75 },
+ { CCI_REG8(0x5b83), 0x75 }, { CCI_REG8(0x5b84), 0x75 },
+ { CCI_REG8(0x5b85), 0x75 }, { CCI_REG8(0x5b86), 0x75 },
+ { CCI_REG8(0x5b87), 0x75 }, { CCI_REG8(0x5b88), 0x75 },
+ { CCI_REG8(0x5b89), 0x75 }, { CCI_REG8(0x5b8a), 0x75 },
+ { CCI_REG8(0x5b8b), 0x75 }, { CCI_REG8(0x5b8c), 0x75 },
+ { CCI_REG8(0x5b8d), 0x75 }, { CCI_REG8(0x5b8e), 0x75 },
+ { CCI_REG8(0x5b8f), 0x75 }, { CCI_REG8(0x5b90), 0x75 },
+ { CCI_REG8(0x5b91), 0x75 }, { CCI_REG8(0x5b92), 0x75 },
+ { CCI_REG8(0x5b93), 0x75 }, { CCI_REG8(0x5b94), 0x75 },
+ { CCI_REG8(0x5b95), 0x75 }, { CCI_REG8(0x5b96), 0x75 },
+ { CCI_REG8(0x5b97), 0x75 }, { CCI_REG8(0x5b98), 0x75 },
+ { CCI_REG8(0x5b99), 0x75 }, { CCI_REG8(0x5b9a), 0x75 },
+ { CCI_REG8(0x5b9b), 0x75 }, { CCI_REG8(0x5b9c), 0x75 },
+ { CCI_REG8(0x5b9d), 0x75 }, { CCI_REG8(0x5b9e), 0x75 },
+ { CCI_REG8(0x5b9f), 0x75 }, { CCI_REG8(0x5ba0), 0x75 },
+ { CCI_REG8(0x5ba1), 0x75 }, { CCI_REG8(0x5ba2), 0x75 },
+ { CCI_REG8(0x5ba3), 0x75 }, { CCI_REG8(0x5ba4), 0x75 },
+ { CCI_REG8(0x5ba5), 0x75 }, { CCI_REG8(0x5ba6), 0x75 },
+ { CCI_REG8(0x5ba7), 0x75 }, { CCI_REG8(0x5ba8), 0x75 },
+ { CCI_REG8(0x5ba9), 0x75 }, { CCI_REG8(0x5baa), 0x75 },
+ { CCI_REG8(0x5bab), 0x75 }, { CCI_REG8(0x5bac), 0x75 },
+ { CCI_REG8(0x5bad), 0x75 }, { CCI_REG8(0x5bae), 0x75 },
+ { CCI_REG8(0x5baf), 0x75 }, { CCI_REG8(0x5bb0), 0x75 },
+ { CCI_REG8(0x5bb1), 0x75 }, { CCI_REG8(0x5bb2), 0x75 },
+ { CCI_REG8(0x5bb3), 0x75 }, { CCI_REG8(0x5bb4), 0x75 },
+ { CCI_REG8(0x5bb5), 0x75 }, { CCI_REG8(0x5bb6), 0x75 },
+ { CCI_REG8(0x5bb7), 0x75 }, { CCI_REG8(0x5bb8), 0x75 },
+ { CCI_REG8(0x5bb9), 0x75 }, { CCI_REG8(0x5bba), 0x75 },
+ { CCI_REG8(0x5bbb), 0x75 }, { CCI_REG8(0x5bbc), 0x75 },
+ { CCI_REG8(0x5bbd), 0x75 }, { CCI_REG8(0x5bbe), 0x75 },
+ { CCI_REG8(0x5bbf), 0x75 }, { CCI_REG8(0x5bc0), 0x75 },
+ { CCI_REG8(0x5bc1), 0x75 }, { CCI_REG8(0x5bc2), 0x75 },
+ { CCI_REG8(0x5bc3), 0x75 }, { CCI_REG8(0x5bc4), 0x75 },
+ { CCI_REG8(0x5bc5), 0x75 }, { CCI_REG8(0x5bc6), 0x75 },
+ { CCI_REG8(0x5bc7), 0x75 }, { CCI_REG8(0x5bc8), 0x75 },
+ { CCI_REG8(0x5bc9), 0x75 }, { CCI_REG8(0x5bca), 0x75 },
+ { CCI_REG8(0x5bcb), 0x75 }, { CCI_REG8(0x5bcc), 0x75 },
+ { CCI_REG8(0x5bcd), 0x75 }, { CCI_REG8(0x5bce), 0x75 },
+ { CCI_REG8(0x5bcf), 0x75 }, { CCI_REG8(0x5bd0), 0x75 },
+ { CCI_REG8(0x5bd1), 0x75 }, { CCI_REG8(0x5bd2), 0x75 },
+ { CCI_REG8(0x5bd3), 0x75 }, { CCI_REG8(0x5bd4), 0x75 },
+ { CCI_REG8(0x5bd5), 0x75 }, { CCI_REG8(0x5bd6), 0x75 },
+ { CCI_REG8(0x5bd7), 0x75 }, { CCI_REG8(0x5bd8), 0x75 },
+ { CCI_REG8(0x5bd9), 0x75 }, { CCI_REG8(0x5bda), 0x75 },
+ { CCI_REG8(0x5bdb), 0x75 }, { CCI_REG8(0x5bdc), 0x75 },
+ { CCI_REG8(0x5bdd), 0x75 }, { CCI_REG8(0x5bde), 0x75 },
+ { CCI_REG8(0x5bdf), 0x75 }, { CCI_REG8(0x5be0), 0x75 },
+ { CCI_REG8(0x5be1), 0x75 }, { CCI_REG8(0x5be2), 0x75 },
+ { CCI_REG8(0x5be3), 0x75 }, { CCI_REG8(0x5be4), 0x75 },
+ { CCI_REG8(0x5be5), 0x75 }, { CCI_REG8(0x5be6), 0x75 },
+ { CCI_REG8(0x5be7), 0x75 }, { CCI_REG8(0x5be8), 0x75 },
+ { CCI_REG8(0x5be9), 0x75 }, { CCI_REG8(0x5bea), 0x75 },
+ { CCI_REG8(0x5beb), 0x75 }, { CCI_REG8(0x5bec), 0x75 },
+ { CCI_REG8(0x5bed), 0x75 }, { CCI_REG8(0x5bee), 0x75 },
+ { CCI_REG8(0x5bef), 0x75 }, { CCI_REG8(0x5bf0), 0x75 },
+ { CCI_REG8(0x5bf1), 0x75 }, { CCI_REG8(0x5bf2), 0x75 },
+ { CCI_REG8(0x5bf3), 0x75 }, { CCI_REG8(0x5bf4), 0x75 },
+ { CCI_REG8(0x5bf5), 0x75 }, { CCI_REG8(0x5bf6), 0x75 },
+ { CCI_REG8(0x5bf7), 0x75 }, { CCI_REG8(0x5bf8), 0x75 },
+ { CCI_REG8(0x5bf9), 0x75 }, { CCI_REG8(0x5bfa), 0x75 },
+ { CCI_REG8(0x5bfb), 0x75 }, { CCI_REG8(0x5bfc), 0x75 },
+ { CCI_REG8(0x5bfd), 0x75 }, { CCI_REG8(0x5bfe), 0x75 },
+ { CCI_REG8(0x5bff), 0x75 }, { CCI_REG8(0x5c00), 0x75 },
+ { CCI_REG8(0x5c01), 0x75 }, { CCI_REG8(0x5c02), 0x75 },
+ { CCI_REG8(0x5c03), 0x75 }, { CCI_REG8(0x5c04), 0x75 },
+ { CCI_REG8(0x5c05), 0x75 }, { CCI_REG8(0x5c06), 0x75 },
+ { CCI_REG8(0x5c07), 0x75 }, { CCI_REG8(0x5c08), 0x75 },
+ { CCI_REG8(0x5c09), 0x75 }, { CCI_REG8(0x5c0a), 0x75 },
+ { CCI_REG8(0x5c0b), 0x75 }, { CCI_REG8(0x5c0c), 0x75 },
+ { CCI_REG8(0x5c0d), 0x75 }, { CCI_REG8(0x5c0e), 0x75 },
+ { CCI_REG8(0x5c0f), 0x75 }, { CCI_REG8(0x5c10), 0x75 },
+ { CCI_REG8(0x5c11), 0x75 }, { CCI_REG8(0x5c12), 0x75 },
+ { CCI_REG8(0x5c13), 0x75 }, { CCI_REG8(0x5c14), 0x75 },
+ { CCI_REG8(0x5c15), 0x75 }, { CCI_REG8(0x5c16), 0x75 },
+ { CCI_REG8(0x5c17), 0x75 }, { CCI_REG8(0x5c18), 0x75 },
+ { CCI_REG8(0x5c19), 0x75 }, { CCI_REG8(0x5c1a), 0x75 },
+ { CCI_REG8(0x5c1b), 0x75 }, { CCI_REG8(0x5c1c), 0x75 },
+ { CCI_REG8(0x5c1d), 0x75 }, { CCI_REG8(0x5c1e), 0x75 },
+ { CCI_REG8(0x5c1f), 0x75 }, { CCI_REG8(0x5c20), 0x75 },
+ { CCI_REG8(0x5c21), 0x75 }, { CCI_REG8(0x5c22), 0x75 },
+ { CCI_REG8(0x5c23), 0x75 }, { CCI_REG8(0x5c24), 0x75 },
+ { CCI_REG8(0x5c25), 0x75 }, { CCI_REG8(0x5c26), 0x75 },
+ { CCI_REG8(0x5c27), 0x75 }, { CCI_REG8(0x5c28), 0x75 },
+ { CCI_REG8(0x5c29), 0x75 }, { CCI_REG8(0x5c2a), 0x75 },
+ { CCI_REG8(0x5c2b), 0x75 }, { CCI_REG8(0x5c2c), 0x75 },
+ { CCI_REG8(0x5c2d), 0x75 }, { CCI_REG8(0x5c2e), 0x75 },
+ { CCI_REG8(0x5c2f), 0x75 }, { CCI_REG8(0x5c30), 0x75 },
+ { CCI_REG8(0x5c31), 0x75 }, { CCI_REG8(0x5c32), 0x75 },
+ { CCI_REG8(0x5c33), 0x75 }, { CCI_REG8(0x5c34), 0x75 },
+ { CCI_REG8(0x5c35), 0x75 }, { CCI_REG8(0x5c36), 0x75 },
+ { CCI_REG8(0x5c37), 0x75 }, { CCI_REG8(0x5c38), 0x75 },
+ { CCI_REG8(0x5c39), 0x75 }, { CCI_REG8(0x5c3a), 0x75 },
+ { CCI_REG8(0x5c3b), 0x75 }, { CCI_REG8(0x5c3c), 0x75 },
+ { CCI_REG8(0x5c3d), 0x75 }, { CCI_REG8(0x5c3e), 0x75 },
+ { CCI_REG8(0x5c3f), 0x75 }, { CCI_REG8(0x5c40), 0x75 },
+ { CCI_REG8(0x5c41), 0x75 }, { CCI_REG8(0x5c42), 0x75 },
+ { CCI_REG8(0x5c43), 0x75 }, { CCI_REG8(0x5c44), 0x75 },
+ { CCI_REG8(0x5c45), 0x75 }, { CCI_REG8(0x5c46), 0x75 },
+ { CCI_REG8(0x5c47), 0x75 }, { CCI_REG8(0x5c48), 0x75 },
+ { CCI_REG8(0x5c49), 0x75 }, { CCI_REG8(0x5c4a), 0x75 },
+ { CCI_REG8(0x5c4b), 0x75 }, { CCI_REG8(0x5c4c), 0x75 },
+ { CCI_REG8(0x5c4d), 0x75 }, { CCI_REG8(0x5c4e), 0x75 },
+ { CCI_REG8(0x5c4f), 0x75 }, { CCI_REG8(0x5c50), 0x75 },
+ { CCI_REG8(0x5c51), 0x75 }, { CCI_REG8(0x5c52), 0x75 },
+ { CCI_REG8(0x5c53), 0x75 }, { CCI_REG8(0x5c54), 0x75 },
+ { CCI_REG8(0x5c55), 0x75 }, { CCI_REG8(0x5c56), 0x75 },
+ { CCI_REG8(0x5c57), 0x75 }, { CCI_REG8(0x5c58), 0x75 },
+ { CCI_REG8(0x5c59), 0x75 }, { CCI_REG8(0x5c5a), 0x75 },
+ { CCI_REG8(0x5c5b), 0x75 }, { CCI_REG8(0x5c5c), 0x75 },
+ { CCI_REG8(0x5c5d), 0x75 }, { CCI_REG8(0x5c5e), 0x75 },
+ { CCI_REG8(0x5c5f), 0x75 }, { CCI_REG8(0x5c60), 0x75 },
+ { CCI_REG8(0x5c61), 0x75 }, { CCI_REG8(0x5c62), 0x75 },
+ { CCI_REG8(0x5c63), 0x75 }, { CCI_REG8(0x5c64), 0x75 },
+ { CCI_REG8(0x5c65), 0x75 }, { CCI_REG8(0x5c66), 0x75 },
+ { CCI_REG8(0x5c67), 0x75 }, { CCI_REG8(0x5c68), 0x75 },
+ { CCI_REG8(0x5c69), 0x75 }, { CCI_REG8(0x5c6a), 0x75 },
+ { CCI_REG8(0x5c6b), 0x75 }, { CCI_REG8(0x5c6c), 0x75 },
+ { CCI_REG8(0x5c6d), 0x75 }, { CCI_REG8(0x5c6e), 0x75 },
+ { CCI_REG8(0x5c6f), 0x75 }, { CCI_REG8(0x5c70), 0x75 },
+ { CCI_REG8(0x5c71), 0x75 }, { CCI_REG8(0x5c72), 0x75 },
+ { CCI_REG8(0x5c73), 0x75 }, { CCI_REG8(0x5c74), 0x75 },
+ { CCI_REG8(0x5c75), 0x75 }, { CCI_REG8(0x5c76), 0x75 },
+ { CCI_REG8(0x5c77), 0x75 }, { CCI_REG8(0x5c78), 0x75 },
+ { CCI_REG8(0x5c79), 0x75 }, { CCI_REG8(0x5c7a), 0x75 },
+ { CCI_REG8(0x5c7b), 0x75 }, { CCI_REG8(0x5c7c), 0x75 },
+ { CCI_REG8(0x5c7d), 0x75 }, { CCI_REG8(0x5c7e), 0x75 },
+ { CCI_REG8(0x5c7f), 0x75 }, { CCI_REG8(0x5c80), 0x75 },
+ { CCI_REG8(0x5c81), 0x75 }, { CCI_REG8(0x5c82), 0x75 },
+ { CCI_REG8(0x5c83), 0x75 }, { CCI_REG8(0x5c84), 0x75 },
+ { CCI_REG8(0x5c85), 0x75 }, { CCI_REG8(0x5c86), 0x75 },
+ { CCI_REG8(0x5c87), 0x75 }, { CCI_REG8(0x5c88), 0x75 },
+ { CCI_REG8(0x5c89), 0x75 }, { CCI_REG8(0x5c8a), 0x75 },
+ { CCI_REG8(0x5c8b), 0x75 }, { CCI_REG8(0x5c8c), 0x75 },
+ { CCI_REG8(0x5c8d), 0x75 }, { CCI_REG8(0x5c8e), 0x75 },
+ { CCI_REG8(0x5c8f), 0x75 }, { CCI_REG8(0x5c90), 0x75 },
+ { CCI_REG8(0x5c91), 0x75 }, { CCI_REG8(0x5c92), 0x75 },
+ { CCI_REG8(0x5c93), 0x75 }, { CCI_REG8(0x5c94), 0x75 },
+ { CCI_REG8(0x5c95), 0x75 }, { CCI_REG8(0x5c96), 0x75 },
+ { CCI_REG8(0x5c97), 0x75 }, { CCI_REG8(0x5c98), 0x75 },
+ { CCI_REG8(0x5c99), 0x75 }, { CCI_REG8(0x5c9a), 0x75 },
+ { CCI_REG8(0x5c9b), 0x75 }, { CCI_REG8(0x5c9c), 0x75 },
+ { CCI_REG8(0x5c9d), 0x75 }, { CCI_REG8(0x5c9e), 0x75 },
+ { CCI_REG8(0x5c9f), 0x75 }, { CCI_REG8(0x5ca0), 0x75 },
+ { CCI_REG8(0x5ca1), 0x75 }, { CCI_REG8(0x5ca2), 0x75 },
+ { CCI_REG8(0x5ca3), 0x75 }, { CCI_REG8(0x5ca4), 0x75 },
+ { CCI_REG8(0x5ca5), 0x75 }, { CCI_REG8(0x5ca6), 0x75 },
+ { CCI_REG8(0x5ca7), 0x75 }, { CCI_REG8(0x5ca8), 0x75 },
+ { CCI_REG8(0x5ca9), 0x75 }, { CCI_REG8(0x5caa), 0x75 },
+ { CCI_REG8(0x5cab), 0x75 }, { CCI_REG8(0x5cac), 0x75 },
+ { CCI_REG8(0x5cad), 0x75 }, { CCI_REG8(0x5cae), 0x75 },
+ { CCI_REG8(0x5caf), 0x75 }, { CCI_REG8(0x5cb0), 0x75 },
+ { CCI_REG8(0x5cb1), 0x75 }, { CCI_REG8(0x5cb2), 0x75 },
+ { CCI_REG8(0x5cb3), 0x75 }, { CCI_REG8(0x5cb4), 0x75 },
+ { CCI_REG8(0x5cb5), 0x75 }, { CCI_REG8(0x5cb6), 0x75 },
+ { CCI_REG8(0x5cb7), 0x75 }, { CCI_REG8(0x5cb8), 0x75 },
+ { CCI_REG8(0x5cb9), 0x75 }, { CCI_REG8(0x5cba), 0x75 },
+ { CCI_REG8(0x5cbb), 0x75 }, { CCI_REG8(0x5cbc), 0x75 },
+ { CCI_REG8(0x5cbd), 0x75 }, { CCI_REG8(0x5cbe), 0x75 },
+ { CCI_REG8(0x5cbf), 0x75 }, { CCI_REG8(0x5cc0), 0x75 },
+ { CCI_REG8(0x5cc1), 0x75 }, { CCI_REG8(0x5cc2), 0x75 },
+ { CCI_REG8(0x5cc3), 0x75 }, { CCI_REG8(0x5cc4), 0x75 },
+ { CCI_REG8(0x5cc5), 0x75 }, { CCI_REG8(0x5cc6), 0x75 },
+ { CCI_REG8(0x5cc7), 0x75 }, { CCI_REG8(0x5cc8), 0x75 },
+ { CCI_REG8(0x5cc9), 0x75 }, { CCI_REG8(0x5cca), 0x75 },
+ { CCI_REG8(0x5ccb), 0x75 }, { CCI_REG8(0x5ccc), 0x75 },
+ { CCI_REG8(0x5ccd), 0x75 }, { CCI_REG8(0x5cce), 0x75 },
+ { CCI_REG8(0x5ccf), 0x75 }, { CCI_REG8(0x5cd0), 0x75 },
+ { CCI_REG8(0x5cd1), 0x75 }, { CCI_REG8(0x5cd2), 0x75 },
+ { CCI_REG8(0x5cd3), 0x75 }, { CCI_REG8(0x5cd4), 0x75 },
+ { CCI_REG8(0x5cd5), 0x75 }, { CCI_REG8(0x5cd6), 0x75 },
+ { CCI_REG8(0x5cd7), 0x75 }, { CCI_REG8(0x5cd8), 0x75 },
+ { CCI_REG8(0x5cd9), 0x75 }, { CCI_REG8(0x5cda), 0x75 },
+ { CCI_REG8(0x5cdb), 0x75 }, { CCI_REG8(0x5cdc), 0x75 },
+ { CCI_REG8(0x5cdd), 0x75 }, { CCI_REG8(0x5cde), 0x75 },
+ { CCI_REG8(0x5cdf), 0x75 }, { CCI_REG8(0x5ce0), 0x75 },
+ { CCI_REG8(0x5ce1), 0x75 }, { CCI_REG8(0x5ce2), 0x75 },
+ { CCI_REG8(0x5ce3), 0x75 }, { CCI_REG8(0x5ce4), 0x75 },
+ { CCI_REG8(0x5ce5), 0x75 }, { CCI_REG8(0x5ce6), 0x75 },
+ { CCI_REG8(0x5ce7), 0x75 }, { CCI_REG8(0x5ce8), 0x75 },
+ { CCI_REG8(0x5ce9), 0x75 }, { CCI_REG8(0x5cea), 0x75 },
+ { CCI_REG8(0x5ceb), 0x75 }, { CCI_REG8(0x5cec), 0x75 },
+ { CCI_REG8(0x5ced), 0x75 }, { CCI_REG8(0x5cee), 0x75 },
+ { CCI_REG8(0x5cef), 0x75 }, { CCI_REG8(0x5cf0), 0x75 },
+ { CCI_REG8(0x5cf1), 0x75 }, { CCI_REG8(0x5cf2), 0x75 },
+ { CCI_REG8(0x5cf3), 0x75 }, { CCI_REG8(0x5cf4), 0x75 },
+ { CCI_REG8(0x5cf5), 0x75 }, { CCI_REG8(0x5cf6), 0x75 },
+ { CCI_REG8(0x5cf7), 0x75 }, { CCI_REG8(0x5cf8), 0x75 },
+ { CCI_REG8(0x5cf9), 0x75 }, { CCI_REG8(0x5cfa), 0x75 },
+ { CCI_REG8(0x5cfb), 0x75 }, { CCI_REG8(0x5cfc), 0x75 },
+ { CCI_REG8(0x5cfd), 0x75 }, { CCI_REG8(0x5cfe), 0x75 },
+ { CCI_REG8(0x5cff), 0x75 }, { CCI_REG8(0x5d00), 0x75 },
+ { CCI_REG8(0x5d01), 0x75 }, { CCI_REG8(0x5d02), 0x75 },
+ { CCI_REG8(0x5d03), 0x75 }, { CCI_REG8(0x5d04), 0x75 },
+ { CCI_REG8(0x5d05), 0x75 }, { CCI_REG8(0x5d06), 0x75 },
+ { CCI_REG8(0x5d07), 0x75 }, { CCI_REG8(0x5d08), 0x75 },
+ { CCI_REG8(0x5d09), 0x75 }, { CCI_REG8(0x5d0a), 0x75 },
+ { CCI_REG8(0x5d0b), 0x75 }, { CCI_REG8(0x5d0c), 0x75 },
+ { CCI_REG8(0x5d0d), 0x75 }, { CCI_REG8(0x5d0e), 0x75 },
+ { CCI_REG8(0x5d0f), 0x75 }, { CCI_REG8(0x5d10), 0x75 },
+ { CCI_REG8(0x5d11), 0x75 }, { CCI_REG8(0x5d12), 0x75 },
+ { CCI_REG8(0x5d13), 0x75 }, { CCI_REG8(0x5d14), 0x75 },
+ { CCI_REG8(0x5d15), 0x75 }, { CCI_REG8(0x5d16), 0x75 },
+ { CCI_REG8(0x5d17), 0x75 }, { CCI_REG8(0x5d18), 0x75 },
+ { CCI_REG8(0x5d19), 0x75 }, { CCI_REG8(0x5d1a), 0x75 },
+ { CCI_REG8(0x5d1b), 0x75 }, { CCI_REG8(0x5d1c), 0x75 },
+ { CCI_REG8(0x5d1d), 0x75 }, { CCI_REG8(0x5d1e), 0x75 },
+ { CCI_REG8(0x5d1f), 0x75 }, { CCI_REG8(0x5d20), 0x75 },
+ { CCI_REG8(0x5d21), 0x75 }, { CCI_REG8(0x5d22), 0x75 },
+ { CCI_REG8(0x5d23), 0x75 }, { CCI_REG8(0x5d24), 0x75 },
+ { CCI_REG8(0x5d25), 0x75 }, { CCI_REG8(0x5d26), 0x75 },
+ { CCI_REG8(0x5d27), 0x75 }, { CCI_REG8(0x5d28), 0x75 },
+ { CCI_REG8(0x5d29), 0x75 }, { CCI_REG8(0x5d2a), 0x75 },
+ { CCI_REG8(0x5d2b), 0x75 }, { CCI_REG8(0x5d2c), 0x75 },
+ { CCI_REG8(0x5d2d), 0x75 }, { CCI_REG8(0x5d2e), 0x75 },
+ { CCI_REG8(0x5d2f), 0x75 }, { CCI_REG8(0x5d30), 0x75 },
+ { CCI_REG8(0x5d31), 0x75 }, { CCI_REG8(0x5d32), 0x75 },
+ { CCI_REG8(0x5d33), 0x75 }, { CCI_REG8(0x5d34), 0x75 },
+ { CCI_REG8(0x5d35), 0x75 }, { CCI_REG8(0x5d36), 0x75 },
+ { CCI_REG8(0x5d37), 0x75 }, { CCI_REG8(0x5d38), 0x75 },
+ { CCI_REG8(0x5d39), 0x75 }, { CCI_REG8(0x5d3a), 0x75 },
+ { CCI_REG8(0x5d3b), 0x75 }, { CCI_REG8(0x5d3c), 0x75 },
+ { CCI_REG8(0x5d3d), 0x75 }, { CCI_REG8(0x5d3e), 0x75 },
+ { CCI_REG8(0x5d3f), 0x75 }, { CCI_REG8(0x5d40), 0x75 },
+ { CCI_REG8(0x5d41), 0x75 }, { CCI_REG8(0x5d42), 0x75 },
+ { CCI_REG8(0x5d43), 0x75 }, { CCI_REG8(0x5d44), 0x75 },
+ { CCI_REG8(0x5d45), 0x75 }, { CCI_REG8(0x5d46), 0x75 },
+ { CCI_REG8(0x5d47), 0x75 }, { CCI_REG8(0x5d48), 0x75 },
+ { CCI_REG8(0x5d49), 0x75 }, { CCI_REG8(0x5d4a), 0x75 },
+ { CCI_REG8(0x5d4b), 0x75 }, { CCI_REG8(0x5d4c), 0x75 },
+ { CCI_REG8(0x5d4d), 0x75 }, { CCI_REG8(0x5d4e), 0x75 },
+ { CCI_REG8(0x5d4f), 0x75 }, { CCI_REG8(0x5d50), 0x75 },
+ { CCI_REG8(0x5d51), 0x75 }, { CCI_REG8(0x5d52), 0x75 },
+ { CCI_REG8(0x5d53), 0x75 }, { CCI_REG8(0x5d54), 0x75 },
+ { CCI_REG8(0x5d55), 0x75 }, { CCI_REG8(0x5d56), 0x75 },
+ { CCI_REG8(0x5d57), 0x75 }, { CCI_REG8(0x5d58), 0x75 },
+ { CCI_REG8(0x5d59), 0x75 }, { CCI_REG8(0x5d5a), 0x75 },
+ { CCI_REG8(0x5d5b), 0x75 }, { CCI_REG8(0x5d5c), 0x75 },
+ { CCI_REG8(0x5d5d), 0x75 }, { CCI_REG8(0x5d5e), 0x75 },
+ { CCI_REG8(0x5d5f), 0x75 }, { CCI_REG8(0x5d60), 0x75 },
+ { CCI_REG8(0x5d61), 0x75 }, { CCI_REG8(0x5d62), 0x75 },
+ { CCI_REG8(0x5d63), 0x75 }, { CCI_REG8(0x5d64), 0x75 },
+ { CCI_REG8(0x5d65), 0x75 }, { CCI_REG8(0x5d66), 0x75 },
+ { CCI_REG8(0x5d67), 0x75 }, { CCI_REG8(0x5d68), 0x75 },
+ { CCI_REG8(0x5d69), 0x75 }, { CCI_REG8(0x5d6a), 0x75 },
+ { CCI_REG8(0x5d6b), 0x75 }, { CCI_REG8(0x5d6c), 0x75 },
+ { CCI_REG8(0x5d6d), 0x75 }, { CCI_REG8(0x5d6e), 0x75 },
+ { CCI_REG8(0x5d6f), 0x75 }, { CCI_REG8(0x5d70), 0x75 },
+ { CCI_REG8(0x5d71), 0x75 }, { CCI_REG8(0x5d72), 0x75 },
+ { CCI_REG8(0x5d73), 0x75 }, { CCI_REG8(0x5d74), 0x75 },
+ { CCI_REG8(0x5d75), 0x75 }, { CCI_REG8(0x5d76), 0x75 },
+ { CCI_REG8(0x5d77), 0x75 }, { CCI_REG8(0x5d78), 0x75 },
+ { CCI_REG8(0x5d79), 0x75 }, { CCI_REG8(0x5d7a), 0x75 },
+ { CCI_REG8(0x5d7b), 0x75 }, { CCI_REG8(0x5d7c), 0x75 },
+ { CCI_REG8(0x5d7d), 0x75 }, { CCI_REG8(0x5d7e), 0x75 },
+ { CCI_REG8(0x5d7f), 0x75 }, { CCI_REG8(0x5d80), 0x75 },
+ { CCI_REG8(0x5d81), 0x75 }, { CCI_REG8(0x5d82), 0x75 },
+ { CCI_REG8(0x5d83), 0x75 }, { CCI_REG8(0x5d84), 0x75 },
+ { CCI_REG8(0x5d85), 0x75 }, { CCI_REG8(0x5d86), 0x75 },
+ { CCI_REG8(0x5d87), 0x75 }, { CCI_REG8(0x5d88), 0x75 },
+ { CCI_REG8(0x5d89), 0x75 }, { CCI_REG8(0x5d8a), 0x75 },
+ { CCI_REG8(0x5d8b), 0x75 }, { CCI_REG8(0x5d8c), 0x75 },
+ { CCI_REG8(0x5d8d), 0x75 }, { CCI_REG8(0x5d8e), 0x75 },
+ { CCI_REG8(0x5d8f), 0x75 }, { CCI_REG8(0x5d90), 0x75 },
+ { CCI_REG8(0x5d91), 0x75 }, { CCI_REG8(0x5d92), 0x75 },
+ { CCI_REG8(0x5d93), 0x75 }, { CCI_REG8(0x5d94), 0x75 },
+ { CCI_REG8(0x5d95), 0x75 }, { CCI_REG8(0x5d96), 0x75 },
+ { CCI_REG8(0x5d97), 0x75 }, { CCI_REG8(0x5d98), 0x75 },
+ { CCI_REG8(0x5d99), 0x75 }, { CCI_REG8(0x5d9a), 0x75 },
+ { CCI_REG8(0x5d9b), 0x75 }, { CCI_REG8(0x5d9c), 0x75 },
+ { CCI_REG8(0x5d9d), 0x75 }, { CCI_REG8(0x5d9e), 0x75 },
+ { CCI_REG8(0x5d9f), 0x75 }, { CCI_REG8(0x5da0), 0x75 },
+ { CCI_REG8(0x5da1), 0x75 }, { CCI_REG8(0x5da2), 0x75 },
+ { CCI_REG8(0x5da3), 0x75 }, { CCI_REG8(0x5da4), 0x75 },
+ { CCI_REG8(0x5da5), 0x75 }, { CCI_REG8(0x5da6), 0x75 },
+ { CCI_REG8(0x5da7), 0x75 }, { CCI_REG8(0x5da8), 0x75 },
+ { CCI_REG8(0x5da9), 0x75 }, { CCI_REG8(0x5daa), 0x75 },
+ { CCI_REG8(0x5dab), 0x75 }, { CCI_REG8(0x5dac), 0x75 },
+ { CCI_REG8(0x5dad), 0x75 }, { CCI_REG8(0x5dae), 0x75 },
+ { CCI_REG8(0x5daf), 0x75 }, { CCI_REG8(0x5db0), 0x75 },
+ { CCI_REG8(0x5db1), 0x75 }, { CCI_REG8(0x5db2), 0x75 },
+ { CCI_REG8(0x5db3), 0x75 }, { CCI_REG8(0x5db4), 0x75 },
+ { CCI_REG8(0x5db5), 0x75 }, { CCI_REG8(0x5db6), 0x75 },
+ { CCI_REG8(0x5db7), 0x75 }, { CCI_REG8(0x5db8), 0x75 },
+ { CCI_REG8(0x5db9), 0x75 }, { CCI_REG8(0x5dba), 0x75 },
+ { CCI_REG8(0x5dbb), 0x75 }, { CCI_REG8(0x5dbc), 0x75 },
+ { CCI_REG8(0x5dbd), 0x75 }, { CCI_REG8(0x5dbe), 0x75 },
+ { CCI_REG8(0x5dbf), 0x75 }, { CCI_REG8(0x5dc0), 0x75 },
+ { CCI_REG8(0x5dc1), 0x75 }, { CCI_REG8(0x5dc2), 0x75 },
+ { CCI_REG8(0x5dc3), 0x75 }, { CCI_REG8(0x5dc4), 0x75 },
+ { CCI_REG8(0x5dc5), 0x75 }, { CCI_REG8(0x5dc6), 0x75 },
+ { CCI_REG8(0x5dc7), 0x75 }, { CCI_REG8(0x5dc8), 0x75 },
+ { CCI_REG8(0x5dc9), 0x75 }, { CCI_REG8(0x5dca), 0x75 },
+ { CCI_REG8(0x5dcb), 0x75 }, { CCI_REG8(0x5dcc), 0x75 },
+ { CCI_REG8(0x5dcd), 0x75 }, { CCI_REG8(0x5dce), 0x75 },
+ { CCI_REG8(0x5dcf), 0x75 }, { CCI_REG8(0x5dd0), 0x75 },
+ { CCI_REG8(0x5dd1), 0x75 }, { CCI_REG8(0x5dd2), 0x75 },
+ { CCI_REG8(0x5dd3), 0x75 }, { CCI_REG8(0x5dd4), 0x75 },
+ { CCI_REG8(0x5dd5), 0x75 }, { CCI_REG8(0x5dd6), 0x75 },
+ { CCI_REG8(0x5dd7), 0x75 }, { CCI_REG8(0x5dd8), 0x75 },
+ { CCI_REG8(0x5dd9), 0x75 }, { CCI_REG8(0x5dda), 0x75 },
+ { CCI_REG8(0x5ddb), 0x75 }, { CCI_REG8(0x5ddc), 0x75 },
+ { CCI_REG8(0x5ddd), 0x75 }, { CCI_REG8(0x5dde), 0x75 },
+ { CCI_REG8(0x5ddf), 0x75 }, { CCI_REG8(0x5de0), 0x75 },
+ { CCI_REG8(0x5de1), 0x75 }, { CCI_REG8(0x5de2), 0x75 },
+ { CCI_REG8(0x5de3), 0x75 }, { CCI_REG8(0x5de4), 0x75 },
+ { CCI_REG8(0x5de5), 0x75 }, { CCI_REG8(0x5de6), 0x75 },
+ { CCI_REG8(0x5de7), 0x75 }, { CCI_REG8(0x5de8), 0x75 },
+ { CCI_REG8(0x5de9), 0x75 }, { CCI_REG8(0x5dea), 0x75 },
+ { CCI_REG8(0x5deb), 0x75 }, { CCI_REG8(0x5dec), 0x75 },
+ { CCI_REG8(0x5ded), 0x75 }, { CCI_REG8(0x5dee), 0x75 },
+ { CCI_REG8(0x5def), 0x75 }, { CCI_REG8(0x5df0), 0x75 },
+ { CCI_REG8(0x5df1), 0x75 }, { CCI_REG8(0x5df2), 0x75 },
+ { CCI_REG8(0x5df3), 0x75 }, { CCI_REG8(0x5df4), 0x75 },
+ { CCI_REG8(0x5df5), 0x75 }, { CCI_REG8(0x5df6), 0x75 },
+ { CCI_REG8(0x5df7), 0x75 }, { CCI_REG8(0x5df8), 0x75 },
+ { CCI_REG8(0x5df9), 0x75 }, { CCI_REG8(0x5dfa), 0x75 },
+ { CCI_REG8(0x5dfb), 0x75 }, { CCI_REG8(0x5dfc), 0x75 },
+ { CCI_REG8(0x5dfd), 0x75 }, { CCI_REG8(0x5dfe), 0x75 },
+ { CCI_REG8(0x5dff), 0x75 }, { CCI_REG8(0x5e00), 0x75 },
+ { CCI_REG8(0x5e01), 0x75 }, { CCI_REG8(0x5e02), 0x75 },
+ { CCI_REG8(0x5e03), 0x75 }, { CCI_REG8(0x5e04), 0x75 },
+ { CCI_REG8(0x5e05), 0x75 }, { CCI_REG8(0x5e06), 0x75 },
+ { CCI_REG8(0x5e07), 0x75 }, { CCI_REG8(0x5e08), 0x75 },
+ { CCI_REG8(0x5e09), 0x75 }, { CCI_REG8(0x5e0a), 0x75 },
+ { CCI_REG8(0x5e0b), 0x75 }, { CCI_REG8(0x5e0c), 0x75 },
+ { CCI_REG8(0x5e0d), 0x75 }, { CCI_REG8(0x5e0e), 0x75 },
+ { CCI_REG8(0x5e0f), 0x75 }, { CCI_REG8(0x5e10), 0x75 },
+ { CCI_REG8(0x5e11), 0x75 }, { CCI_REG8(0x5e12), 0x75 },
+ { CCI_REG8(0x5e13), 0x75 }, { CCI_REG8(0x5e14), 0x75 },
+ { CCI_REG8(0x5e15), 0x75 }, { CCI_REG8(0x5e16), 0x75 },
+ { CCI_REG8(0x5e17), 0x75 }, { CCI_REG8(0x5e18), 0x75 },
+ { CCI_REG8(0x5e19), 0x75 }, { CCI_REG8(0x5e1a), 0x75 },
+ { CCI_REG8(0x5e1b), 0x75 }, { CCI_REG8(0x5e1c), 0x75 },
+ { CCI_REG8(0x5e1d), 0x75 }, { CCI_REG8(0x5e1e), 0x75 },
+ { CCI_REG8(0x5e1f), 0x75 }, { CCI_REG8(0x5e20), 0x75 },
+ { CCI_REG8(0x5e21), 0x75 }, { CCI_REG8(0x5e22), 0x75 },
+ { CCI_REG8(0x5e23), 0x75 }, { CCI_REG8(0x5e24), 0x75 },
+ { CCI_REG8(0x5e25), 0x75 }, { CCI_REG8(0x5e26), 0x75 },
+ { CCI_REG8(0x5e27), 0x75 }, { CCI_REG8(0x5e28), 0x75 },
+ { CCI_REG8(0x5e29), 0x75 }, { CCI_REG8(0x5e2a), 0x75 },
+ { CCI_REG8(0x5e2b), 0x75 }, { CCI_REG8(0x5e2c), 0x75 },
+ { CCI_REG8(0x5e2d), 0x75 }, { CCI_REG8(0x5e2e), 0x75 },
+ { CCI_REG8(0x5e2f), 0x75 }, { CCI_REG8(0x5e30), 0x75 },
+ { CCI_REG8(0x5e31), 0x75 }, { CCI_REG8(0x5e32), 0x75 },
+ { CCI_REG8(0x5e33), 0x75 }, { CCI_REG8(0x5e34), 0x75 },
+ { CCI_REG8(0x5e35), 0x75 }, { CCI_REG8(0x5e36), 0x75 },
+ { CCI_REG8(0x5e37), 0x75 }, { CCI_REG8(0x5e38), 0x75 },
+ { CCI_REG8(0x5e39), 0x75 }, { CCI_REG8(0x5e3a), 0x75 },
+ { CCI_REG8(0x5e3b), 0x75 }, { CCI_REG8(0x5e3c), 0x75 },
+ { CCI_REG8(0x5e3d), 0x75 }, { CCI_REG8(0x5e3e), 0x75 },
+ { CCI_REG8(0x5e3f), 0x75 }, { CCI_REG8(0x5e40), 0x75 },
+ { CCI_REG8(0x5e41), 0x75 }, { CCI_REG8(0x5e42), 0x75 },
+ { CCI_REG8(0x5e43), 0x75 }, { CCI_REG8(0x5e44), 0x75 },
+ { CCI_REG8(0x5e45), 0x75 }, { CCI_REG8(0x5e46), 0x75 },
+ { CCI_REG8(0x5e47), 0x75 }, { CCI_REG8(0x5e48), 0x75 },
+ { CCI_REG8(0x5e49), 0x75 }, { CCI_REG8(0x5e4a), 0x75 },
+ { CCI_REG8(0x5e4b), 0x75 }, { CCI_REG8(0x5e4c), 0x75 },
+ { CCI_REG8(0x5e4d), 0x75 }, { CCI_REG8(0x5e4e), 0x75 },
+ { CCI_REG8(0x5e4f), 0x75 }, { CCI_REG8(0x5e50), 0x75 },
+ { CCI_REG8(0x5e51), 0x75 }, { CCI_REG8(0x5e52), 0x75 },
+ { CCI_REG8(0x5e53), 0x75 }, { CCI_REG8(0x5e54), 0x75 },
+ { CCI_REG8(0x5e55), 0x75 }, { CCI_REG8(0x5e56), 0x75 },
+ { CCI_REG8(0x5e57), 0x75 }, { CCI_REG8(0x5e58), 0x75 },
+ { CCI_REG8(0x5e59), 0x75 }, { CCI_REG8(0x5e5a), 0x75 },
+ { CCI_REG8(0x5e5b), 0x75 }, { CCI_REG8(0x5e5c), 0x75 },
+ { CCI_REG8(0x5e5d), 0x75 }, { CCI_REG8(0x5e5e), 0x75 },
+ { CCI_REG8(0x5e5f), 0x75 }, { CCI_REG8(0x5e60), 0x75 },
+ { CCI_REG8(0x5e61), 0x75 }, { CCI_REG8(0x5e62), 0x75 },
+ { CCI_REG8(0x5e63), 0x75 }, { CCI_REG8(0x5e64), 0x75 },
+ { CCI_REG8(0x5e65), 0x75 }, { CCI_REG8(0x5e66), 0x75 },
+ { CCI_REG8(0x5e67), 0x75 }, { CCI_REG8(0x5e68), 0x75 },
+ { CCI_REG8(0x5e69), 0x75 }, { CCI_REG8(0x5e6a), 0x75 },
+ { CCI_REG8(0x5e6b), 0x75 }, { CCI_REG8(0x5e6c), 0x75 },
+ { CCI_REG8(0x5e6d), 0x75 }, { CCI_REG8(0x5e6e), 0x75 },
+ { CCI_REG8(0x5e6f), 0x75 }, { CCI_REG8(0x5e70), 0x75 },
+ { CCI_REG8(0x5e71), 0x75 }, { CCI_REG8(0x5e72), 0x75 },
+ { CCI_REG8(0x5e73), 0x75 }, { CCI_REG8(0x5e74), 0x75 },
+ { CCI_REG8(0x5e75), 0x75 }, { CCI_REG8(0x5e76), 0x75 },
+ { CCI_REG8(0x5e77), 0x75 }, { CCI_REG8(0x5e78), 0x75 },
+ { CCI_REG8(0x5e79), 0x75 }, { CCI_REG8(0x5e7a), 0x75 },
+ { CCI_REG8(0x5e7b), 0x75 }, { CCI_REG8(0x5e7c), 0x75 },
+ { CCI_REG8(0x5e7d), 0x75 }, { CCI_REG8(0x5e7e), 0x75 },
+ { CCI_REG8(0x5e7f), 0x75 }, { CCI_REG8(0x5e80), 0x75 },
+ { CCI_REG8(0x5e81), 0x75 }, { CCI_REG8(0x5e82), 0x75 },
+ { CCI_REG8(0x5e83), 0x75 }, { CCI_REG8(0x5e84), 0x75 },
+ { CCI_REG8(0x5e85), 0x75 }, { CCI_REG8(0x5e86), 0x75 },
+ { CCI_REG8(0x5e87), 0x75 }, { CCI_REG8(0x5e88), 0x75 },
+ { CCI_REG8(0x5e89), 0x75 }, { CCI_REG8(0x5e8a), 0x75 },
+ { CCI_REG8(0x5e8b), 0x75 }, { CCI_REG8(0x5e8c), 0x75 },
+ { CCI_REG8(0x5e8d), 0x75 }, { CCI_REG8(0x5e8e), 0x75 },
+ { CCI_REG8(0x5e8f), 0x75 }, { CCI_REG8(0x5e90), 0x75 },
+ { CCI_REG8(0x5e91), 0x75 }, { CCI_REG8(0x5e92), 0x75 },
+ { CCI_REG8(0x5e93), 0x75 }, { CCI_REG8(0x5e94), 0x75 },
+ { CCI_REG8(0x5e95), 0x75 }, { CCI_REG8(0x5e96), 0x75 },
+ { CCI_REG8(0x5e97), 0x75 }, { CCI_REG8(0x5e98), 0x75 },
+ { CCI_REG8(0x5e99), 0x75 }, { CCI_REG8(0x5e9a), 0x75 },
+ { CCI_REG8(0x5e9b), 0x75 }, { CCI_REG8(0x5e9c), 0x75 },
+ { CCI_REG8(0x5e9d), 0x75 }, { CCI_REG8(0x5e9e), 0x75 },
+ { CCI_REG8(0x5e9f), 0x75 }, { CCI_REG8(0x5ea0), 0x75 },
+ { CCI_REG8(0x5ea1), 0x75 }, { CCI_REG8(0x5ea2), 0x75 },
+ { CCI_REG8(0x5ea3), 0x75 }, { CCI_REG8(0x5ea4), 0x75 },
+ { CCI_REG8(0x5ea5), 0x75 }, { CCI_REG8(0x5ea6), 0x75 },
+ { CCI_REG8(0x5ea7), 0x75 }, { CCI_REG8(0x5ea8), 0x75 },
+ { CCI_REG8(0x5ea9), 0x75 }, { CCI_REG8(0x5eaa), 0x75 },
+ { CCI_REG8(0x5eab), 0x75 }, { CCI_REG8(0x5eac), 0x75 },
+ { CCI_REG8(0x5ead), 0x75 }, { CCI_REG8(0x5eae), 0x75 },
+ { CCI_REG8(0x5eaf), 0x75 }, { CCI_REG8(0x5eb0), 0x75 },
+ { CCI_REG8(0x5eb1), 0x75 }, { CCI_REG8(0x5eb2), 0x75 },
+ { CCI_REG8(0x5eb3), 0x75 }, { CCI_REG8(0x5eb4), 0x75 },
+ { CCI_REG8(0x5eb5), 0x75 }, { CCI_REG8(0x5eb6), 0x75 },
+ { CCI_REG8(0x5eb7), 0x75 }, { CCI_REG8(0x5eb8), 0x75 },
+ { CCI_REG8(0x5eb9), 0x75 }, { CCI_REG8(0x5eba), 0x75 },
+ { CCI_REG8(0x5ebb), 0x75 }, { CCI_REG8(0x5ebc), 0x75 },
+ { CCI_REG8(0x5ebd), 0x75 }, { CCI_REG8(0x5ebe), 0x75 },
+ { CCI_REG8(0x5ebf), 0x75 }, { CCI_REG8(0x5ec0), 0x75 },
+ { CCI_REG8(0x5ec1), 0x75 }, { CCI_REG8(0x5ec2), 0x75 },
+ { CCI_REG8(0x5ec3), 0x75 }, { CCI_REG8(0x5ec4), 0x75 },
+ { CCI_REG8(0x5ec5), 0x75 }, { CCI_REG8(0x5ec6), 0x75 },
+ { CCI_REG8(0x5ec7), 0x75 }, { CCI_REG8(0x5ec8), 0x75 },
+ { CCI_REG8(0x5ec9), 0x75 }, { CCI_REG8(0x5eca), 0x75 },
+ { CCI_REG8(0x5ecb), 0x75 }, { CCI_REG8(0x5ecc), 0x75 },
+ { CCI_REG8(0x5ecd), 0x75 }, { CCI_REG8(0x5ece), 0x75 },
+ { CCI_REG8(0x5ecf), 0x75 }, { CCI_REG8(0x5ed0), 0x75 },
+ { CCI_REG8(0x5ed1), 0x75 }, { CCI_REG8(0x5ed2), 0x75 },
+ { CCI_REG8(0x5ed3), 0x75 }, { CCI_REG8(0x5ed4), 0x75 },
+ { CCI_REG8(0x5ed5), 0x75 }, { CCI_REG8(0x5ed6), 0x75 },
+ { CCI_REG8(0x5ed7), 0x75 }, { CCI_REG8(0x5ed8), 0x75 },
+ { CCI_REG8(0x5ed9), 0x75 }, { CCI_REG8(0x5eda), 0x75 },
+ { CCI_REG8(0x5edb), 0x75 }, { CCI_REG8(0x5edc), 0x75 },
+ { CCI_REG8(0x5edd), 0x75 }, { CCI_REG8(0x5ede), 0x75 },
+ { CCI_REG8(0x5edf), 0x75 }, { CCI_REG8(0xfff9), 0x08 },
+ { CCI_REG8(0x1570), 0x00 }, { CCI_REG8(0x15d0), 0x00 },
+ { CCI_REG8(0x15a0), 0x02 }, { CCI_REG8(0x15a1), 0x00 },
+ { CCI_REG8(0x15a2), 0x02 }, { CCI_REG8(0x15a3), 0x76 },
+ { CCI_REG8(0x15a4), 0x03 }, { CCI_REG8(0x15a5), 0x08 },
+ { CCI_REG8(0x15a6), 0x00 }, { CCI_REG8(0x15a7), 0x60 },
+ { CCI_REG8(0x15a8), 0x01 }, { CCI_REG8(0x15a9), 0x00 },
+ { CCI_REG8(0x15aa), 0x02 }, { CCI_REG8(0x15ab), 0x00 },
+ { CCI_REG8(0x1600), 0x02 }, { CCI_REG8(0x1601), 0x00 },
+ { CCI_REG8(0x1602), 0x02 }, { CCI_REG8(0x1603), 0x76 },
+ { CCI_REG8(0x1604), 0x03 }, { CCI_REG8(0x1605), 0x08 },
+ { CCI_REG8(0x1606), 0x00 }, { CCI_REG8(0x1607), 0x60 },
+ { CCI_REG8(0x1608), 0x01 }, { CCI_REG8(0x1609), 0x00 },
+ { CCI_REG8(0x160a), 0x02 }, { CCI_REG8(0x160b), 0x00 },
+ { CCI_REG8(0x1633), 0x03 }, { CCI_REG8(0x1634), 0x01 },
+ { CCI_REG8(0x163c), 0x3a }, { CCI_REG8(0x163d), 0x01 },
+ { CCI_REG8(0x1648), 0x32 }, { CCI_REG8(0x1658), 0x01 },
+ { CCI_REG8(0x1659), 0x01 }, { CCI_REG8(0x165f), 0x01 },
+ { CCI_REG8(0x1677), 0x01 }, { CCI_REG8(0x1690), 0x08 },
+ { CCI_REG8(0x1691), 0x00 }, { CCI_REG8(0x1692), 0x20 },
+ { CCI_REG8(0x1693), 0x00 }, { CCI_REG8(0x1694), 0x10 },
+ { CCI_REG8(0x1695), 0x14 }, { CCI_REG8(0x1696), 0x10 },
+ { CCI_REG8(0x1697), 0x0e }, { CCI_REG8(0x1730), 0x01 },
+ { CCI_REG8(0x1732), 0x00 }, { CCI_REG8(0x1733), 0x10 },
+ { CCI_REG8(0x1734), 0x01 }, { CCI_REG8(0x1735), 0x00 },
+ { CCI_REG8(0x1748), 0x01 }, { CCI_REG8(0xfff9), 0x06 },
+ { CCI_REG8(0x5000), 0xff }, { CCI_REG8(0x5001), 0x3d },
+ { CCI_REG8(0x5002), 0xf5 }, { CCI_REG8(0x5004), 0x80 },
+ { CCI_REG8(0x5006), 0x04 }, { CCI_REG8(0x5061), 0x20 },
+ { CCI_REG8(0x5063), 0x20 }, { CCI_REG8(0x5064), 0x24 },
+ { CCI_REG8(0x5065), 0x00 }, { CCI_REG8(0x5066), 0x1b },
+ { CCI_REG8(0x5067), 0x00 }, { CCI_REG8(0x5068), 0x03 },
+ { CCI_REG8(0x5069), 0x10 }, { CCI_REG8(0x506a), 0x20 },
+ { CCI_REG8(0x506b), 0x04 }, { CCI_REG8(0x506c), 0x04 },
+ { CCI_REG8(0x506d), 0x0c }, { CCI_REG8(0x506e), 0x0c },
+ { CCI_REG8(0x506f), 0x04 }, { CCI_REG8(0x5070), 0x0c },
+ { CCI_REG8(0x5071), 0x14 }, { CCI_REG8(0x5072), 0x1c },
+ { CCI_REG8(0x5073), 0x01 }, { CCI_REG8(0x5074), 0x01 },
+ { CCI_REG8(0x5075), 0xbe }, { CCI_REG8(0x5083), 0x00 },
+ { CCI_REG8(0x5114), 0x03 }, { CCI_REG8(0x51b0), 0x00 },
+ { CCI_REG8(0x51b3), 0x0e }, { CCI_REG8(0x51b5), 0x02 },
+ { CCI_REG8(0x51b6), 0x00 }, { CCI_REG8(0x51b7), 0x00 },
+ { CCI_REG8(0x51b8), 0x00 }, { CCI_REG8(0x51b9), 0x70 },
+ { CCI_REG8(0x51ba), 0x00 }, { CCI_REG8(0x51bb), 0x10 },
+ { CCI_REG8(0x51bc), 0x00 }, { CCI_REG8(0x51bd), 0x00 },
+ { CCI_REG8(0x51d2), 0xff }, { CCI_REG8(0x51d3), 0x1c },
+ { CCI_REG8(0x5250), 0x34 }, { CCI_REG8(0x5251), 0x00 },
+ { CCI_REG8(0x525b), 0x00 }, { CCI_REG8(0x525d), 0x00 },
+ { CCI_REG8(0x527a), 0x00 }, { CCI_REG8(0x527b), 0x38 },
+ { CCI_REG8(0x527c), 0x00 }, { CCI_REG8(0x527d), 0x4b },
+ { CCI_REG8(0x5286), 0x1b }, { CCI_REG8(0x5287), 0x40 },
+ { CCI_REG8(0x5290), 0x00 }, { CCI_REG8(0x5291), 0x50 },
+ { CCI_REG8(0x5292), 0x00 }, { CCI_REG8(0x5293), 0x50 },
+ { CCI_REG8(0x5294), 0x00 }, { CCI_REG8(0x5295), 0x50 },
+ { CCI_REG8(0x5296), 0x00 }, { CCI_REG8(0x5297), 0x50 },
+ { CCI_REG8(0x5298), 0x00 }, { CCI_REG8(0x5299), 0x50 },
+ { CCI_REG8(0x529a), 0x01 }, { CCI_REG8(0x529b), 0x00 },
+ { CCI_REG8(0x529c), 0x01 }, { CCI_REG8(0x529d), 0x00 },
+ { CCI_REG8(0x529e), 0x00 }, { CCI_REG8(0x529f), 0x50 },
+ { CCI_REG8(0x52a0), 0x00 }, { CCI_REG8(0x52a1), 0x50 },
+ { CCI_REG8(0x52a2), 0x01 }, { CCI_REG8(0x52a3), 0x00 },
+ { CCI_REG8(0x52a4), 0x01 }, { CCI_REG8(0x52a5), 0x00 },
+ { CCI_REG8(0x52a6), 0x00 }, { CCI_REG8(0x52a7), 0x50 },
+ { CCI_REG8(0x52a8), 0x00 }, { CCI_REG8(0x52a9), 0x50 },
+ { CCI_REG8(0x52aa), 0x00 }, { CCI_REG8(0x52ab), 0x50 },
+ { CCI_REG8(0x52ac), 0x00 }, { CCI_REG8(0x52ad), 0x50 },
+ { CCI_REG8(0x52ae), 0x00 }, { CCI_REG8(0x52af), 0x50 },
+ { CCI_REG8(0x52b0), 0x00 }, { CCI_REG8(0x52b1), 0x50 },
+ { CCI_REG8(0x52b2), 0x00 }, { CCI_REG8(0x52b3), 0x50 },
+ { CCI_REG8(0x52b4), 0x00 }, { CCI_REG8(0x52b5), 0x50 },
+ { CCI_REG8(0x52b6), 0x00 }, { CCI_REG8(0x52b7), 0x50 },
+ { CCI_REG8(0x52b8), 0x00 }, { CCI_REG8(0x52b9), 0x50 },
+ { CCI_REG8(0x52ba), 0x01 }, { CCI_REG8(0x52bb), 0x00 },
+ { CCI_REG8(0x52bc), 0x01 }, { CCI_REG8(0x52bd), 0x00 },
+ { CCI_REG8(0x52be), 0x00 }, { CCI_REG8(0x52bf), 0x50 },
+ { CCI_REG8(0x52c0), 0x00 }, { CCI_REG8(0x52c1), 0x50 },
+ { CCI_REG8(0x52c2), 0x01 }, { CCI_REG8(0x52c3), 0x00 },
+ { CCI_REG8(0x52c4), 0x01 }, { CCI_REG8(0x52c5), 0x00 },
+ { CCI_REG8(0x52c6), 0x00 }, { CCI_REG8(0x52c7), 0x50 },
+ { CCI_REG8(0x52c8), 0x00 }, { CCI_REG8(0x52c9), 0x50 },
+ { CCI_REG8(0x52ca), 0x00 }, { CCI_REG8(0x52cb), 0x50 },
+ { CCI_REG8(0x52cc), 0x00 }, { CCI_REG8(0x52cd), 0x50 },
+ { CCI_REG8(0x52ce), 0x00 }, { CCI_REG8(0x52cf), 0x50 },
+ { CCI_REG8(0x52f0), 0x04 }, { CCI_REG8(0x52f1), 0x03 },
+ { CCI_REG8(0x52f2), 0x02 }, { CCI_REG8(0x52f3), 0x01 },
+ { CCI_REG8(0x52f4), 0x08 }, { CCI_REG8(0x52f5), 0x07 },
+ { CCI_REG8(0x52f6), 0x06 }, { CCI_REG8(0x52f7), 0x05 },
+ { CCI_REG8(0x52f8), 0x0c }, { CCI_REG8(0x52f9), 0x0b },
+ { CCI_REG8(0x52fa), 0x0a }, { CCI_REG8(0x52fb), 0x09 },
+ { CCI_REG8(0x52fc), 0x10 }, { CCI_REG8(0x52fd), 0x0f },
+ { CCI_REG8(0x52fe), 0x0e }, { CCI_REG8(0x52ff), 0x0d },
+ { CCI_REG8(0x5300), 0x14 }, { CCI_REG8(0x5301), 0x13 },
+ { CCI_REG8(0x5302), 0x12 }, { CCI_REG8(0x5303), 0x11 },
+ { CCI_REG8(0x5304), 0x18 }, { CCI_REG8(0x5305), 0x17 },
+ { CCI_REG8(0x5306), 0x16 }, { CCI_REG8(0x5307), 0x15 },
+ { CCI_REG8(0x5308), 0x1c }, { CCI_REG8(0x5309), 0x1b },
+ { CCI_REG8(0x530a), 0x1a }, { CCI_REG8(0x530b), 0x19 },
+ { CCI_REG8(0x530c), 0x20 }, { CCI_REG8(0x530d), 0x1f },
+ { CCI_REG8(0x530e), 0x1e }, { CCI_REG8(0x530f), 0x1d },
+ { CCI_REG8(0x5310), 0x03 }, { CCI_REG8(0x5311), 0xe8 },
+ { CCI_REG8(0x5331), 0x0a }, { CCI_REG8(0x5332), 0x43 },
+ { CCI_REG8(0x5333), 0x45 }, { CCI_REG8(0x5353), 0x09 },
+ { CCI_REG8(0x5354), 0x00 }, { CCI_REG8(0x5414), 0x03 },
+ { CCI_REG8(0x54b0), 0x10 }, { CCI_REG8(0x54b3), 0x0e },
+ { CCI_REG8(0x54b5), 0x02 }, { CCI_REG8(0x54b6), 0x00 },
+ { CCI_REG8(0x54b7), 0x00 }, { CCI_REG8(0x54b8), 0x00 },
+ { CCI_REG8(0x54b9), 0x70 }, { CCI_REG8(0x54ba), 0x00 },
+ { CCI_REG8(0x54bb), 0x10 }, { CCI_REG8(0x54bc), 0x00 },
+ { CCI_REG8(0x54bd), 0x00 }, { CCI_REG8(0x54d2), 0xff },
+ { CCI_REG8(0x54d3), 0x1c }, { CCI_REG8(0x5510), 0x03 },
+ { CCI_REG8(0x5511), 0xe8 }, { CCI_REG8(0x5550), 0x6c },
+ { CCI_REG8(0x5551), 0x00 }, { CCI_REG8(0x557a), 0x00 },
+ { CCI_REG8(0x557b), 0x38 }, { CCI_REG8(0x557c), 0x00 },
+ { CCI_REG8(0x557d), 0x4b }, { CCI_REG8(0x5590), 0x00 },
+ { CCI_REG8(0x5591), 0x50 }, { CCI_REG8(0x5592), 0x00 },
+ { CCI_REG8(0x5593), 0x50 }, { CCI_REG8(0x5594), 0x00 },
+ { CCI_REG8(0x5595), 0x50 }, { CCI_REG8(0x5596), 0x00 },
+ { CCI_REG8(0x5597), 0x50 }, { CCI_REG8(0x5598), 0x00 },
+ { CCI_REG8(0x5599), 0x50 }, { CCI_REG8(0x559a), 0x01 },
+ { CCI_REG8(0x559b), 0x00 }, { CCI_REG8(0x559c), 0x01 },
+ { CCI_REG8(0x559d), 0x00 }, { CCI_REG8(0x559e), 0x00 },
+ { CCI_REG8(0x559f), 0x50 }, { CCI_REG8(0x55a0), 0x00 },
+ { CCI_REG8(0x55a1), 0x50 }, { CCI_REG8(0x55a2), 0x01 },
+ { CCI_REG8(0x55a3), 0x00 }, { CCI_REG8(0x55a4), 0x01 },
+ { CCI_REG8(0x55a5), 0x00 }, { CCI_REG8(0x55a6), 0x00 },
+ { CCI_REG8(0x55a7), 0x50 }, { CCI_REG8(0x55a8), 0x00 },
+ { CCI_REG8(0x55a9), 0x50 }, { CCI_REG8(0x55aa), 0x00 },
+ { CCI_REG8(0x55ab), 0x50 }, { CCI_REG8(0x55ac), 0x00 },
+ { CCI_REG8(0x55ad), 0x50 }, { CCI_REG8(0x55ae), 0x00 },
+ { CCI_REG8(0x55af), 0x50 }, { CCI_REG8(0x55b0), 0x00 },
+ { CCI_REG8(0x55b1), 0x50 }, { CCI_REG8(0x55b2), 0x00 },
+ { CCI_REG8(0x55b3), 0x50 }, { CCI_REG8(0x55b4), 0x00 },
+ { CCI_REG8(0x55b5), 0x50 }, { CCI_REG8(0x55b6), 0x00 },
+ { CCI_REG8(0x55b7), 0x50 }, { CCI_REG8(0x55b8), 0x00 },
+ { CCI_REG8(0x55b9), 0x50 }, { CCI_REG8(0x55ba), 0x01 },
+ { CCI_REG8(0x55bb), 0x00 }, { CCI_REG8(0x55bc), 0x01 },
+ { CCI_REG8(0x55bd), 0x00 }, { CCI_REG8(0x55be), 0x00 },
+ { CCI_REG8(0x55bf), 0x50 }, { CCI_REG8(0x55c0), 0x00 },
+ { CCI_REG8(0x55c1), 0x50 }, { CCI_REG8(0x55c2), 0x01 },
+ { CCI_REG8(0x55c3), 0x00 }, { CCI_REG8(0x55c4), 0x01 },
+ { CCI_REG8(0x55c5), 0x00 }, { CCI_REG8(0x55c6), 0x00 },
+ { CCI_REG8(0x55c7), 0x50 }, { CCI_REG8(0x55c8), 0x00 },
+ { CCI_REG8(0x55c9), 0x50 }, { CCI_REG8(0x55ca), 0x00 },
+ { CCI_REG8(0x55cb), 0x50 }, { CCI_REG8(0x55cc), 0x00 },
+ { CCI_REG8(0x55cd), 0x50 }, { CCI_REG8(0x55ce), 0x00 },
+ { CCI_REG8(0x55cf), 0x50 }, { CCI_REG8(0x55f0), 0x04 },
+ { CCI_REG8(0x55f1), 0x03 }, { CCI_REG8(0x55f2), 0x02 },
+ { CCI_REG8(0x55f3), 0x01 }, { CCI_REG8(0x55f4), 0x08 },
+ { CCI_REG8(0x55f5), 0x07 }, { CCI_REG8(0x55f6), 0x06 },
+ { CCI_REG8(0x55f7), 0x05 }, { CCI_REG8(0x55f8), 0x0c },
+ { CCI_REG8(0x55f9), 0x0b }, { CCI_REG8(0x55fa), 0x0a },
+ { CCI_REG8(0x55fb), 0x09 }, { CCI_REG8(0x55fc), 0x10 },
+ { CCI_REG8(0x55fd), 0x0f }, { CCI_REG8(0x55fe), 0x0e },
+ { CCI_REG8(0x55ff), 0x0d }, { CCI_REG8(0x5600), 0x14 },
+ { CCI_REG8(0x5601), 0x13 }, { CCI_REG8(0x5602), 0x12 },
+ { CCI_REG8(0x5603), 0x11 }, { CCI_REG8(0x5604), 0x18 },
+ { CCI_REG8(0x5605), 0x17 }, { CCI_REG8(0x5606), 0x16 },
+ { CCI_REG8(0x5607), 0x15 }, { CCI_REG8(0x5608), 0x1c },
+ { CCI_REG8(0x5609), 0x1b }, { CCI_REG8(0x560a), 0x1a },
+ { CCI_REG8(0x560b), 0x19 }, { CCI_REG8(0x560c), 0x20 },
+ { CCI_REG8(0x560d), 0x1f }, { CCI_REG8(0x560e), 0x1e },
+ { CCI_REG8(0x560f), 0x1d }, { CCI_REG8(0x5631), 0x02 },
+ { CCI_REG8(0x5632), 0x42 }, { CCI_REG8(0x5633), 0x24 },
+ { CCI_REG8(0x5653), 0x09 }, { CCI_REG8(0x5654), 0x00 },
+ { CCI_REG8(0x5714), 0x03 }, { CCI_REG8(0x57b0), 0x10 },
+ { CCI_REG8(0x57b3), 0x0e }, { CCI_REG8(0x57b5), 0x02 },
+ { CCI_REG8(0x57b6), 0x00 }, { CCI_REG8(0x57b7), 0x00 },
+ { CCI_REG8(0x57b8), 0x00 }, { CCI_REG8(0x57b9), 0x70 },
+ { CCI_REG8(0x57ba), 0x00 }, { CCI_REG8(0x57bb), 0x10 },
+ { CCI_REG8(0x57bc), 0x00 }, { CCI_REG8(0x57bd), 0x00 },
+ { CCI_REG8(0x57d2), 0xff }, { CCI_REG8(0x57d3), 0x1c },
+ { CCI_REG8(0x5810), 0x03 }, { CCI_REG8(0x5811), 0xe8 },
+ { CCI_REG8(0x5850), 0x6c }, { CCI_REG8(0x5851), 0x00 },
+ { CCI_REG8(0x587a), 0x00 }, { CCI_REG8(0x587b), 0x38 },
+ { CCI_REG8(0x587c), 0x00 }, { CCI_REG8(0x587d), 0x4b },
+ { CCI_REG8(0x5890), 0x00 }, { CCI_REG8(0x5891), 0x50 },
+ { CCI_REG8(0x5892), 0x00 }, { CCI_REG8(0x5893), 0x50 },
+ { CCI_REG8(0x5894), 0x00 }, { CCI_REG8(0x5895), 0x50 },
+ { CCI_REG8(0x5896), 0x00 }, { CCI_REG8(0x5897), 0x50 },
+ { CCI_REG8(0x5898), 0x00 }, { CCI_REG8(0x5899), 0x50 },
+ { CCI_REG8(0x589a), 0x01 }, { CCI_REG8(0x589b), 0x00 },
+ { CCI_REG8(0x589c), 0x01 }, { CCI_REG8(0x589d), 0x00 },
+ { CCI_REG8(0x589e), 0x00 }, { CCI_REG8(0x589f), 0x50 },
+ { CCI_REG8(0x58a0), 0x00 }, { CCI_REG8(0x58a1), 0x50 },
+ { CCI_REG8(0x58a2), 0x01 }, { CCI_REG8(0x58a3), 0x00 },
+ { CCI_REG8(0x58a4), 0x01 }, { CCI_REG8(0x58a5), 0x00 },
+ { CCI_REG8(0x58a6), 0x00 }, { CCI_REG8(0x58a7), 0x50 },
+ { CCI_REG8(0x58a8), 0x00 }, { CCI_REG8(0x58a9), 0x50 },
+ { CCI_REG8(0x58aa), 0x00 }, { CCI_REG8(0x58ab), 0x50 },
+ { CCI_REG8(0x58ac), 0x00 }, { CCI_REG8(0x58ad), 0x50 },
+ { CCI_REG8(0x58ae), 0x00 }, { CCI_REG8(0x58af), 0x50 },
+ { CCI_REG8(0x58b0), 0x00 }, { CCI_REG8(0x58b1), 0x50 },
+ { CCI_REG8(0x58b2), 0x00 }, { CCI_REG8(0x58b3), 0x50 },
+ { CCI_REG8(0x58b4), 0x00 }, { CCI_REG8(0x58b5), 0x50 },
+ { CCI_REG8(0x58b6), 0x00 }, { CCI_REG8(0x58b7), 0x50 },
+ { CCI_REG8(0x58b8), 0x00 }, { CCI_REG8(0x58b9), 0x50 },
+ { CCI_REG8(0x58ba), 0x01 }, { CCI_REG8(0x58bb), 0x00 },
+ { CCI_REG8(0x58bc), 0x01 }, { CCI_REG8(0x58bd), 0x00 },
+ { CCI_REG8(0x58be), 0x00 }, { CCI_REG8(0x58bf), 0x50 },
+ { CCI_REG8(0x58c0), 0x00 }, { CCI_REG8(0x58c1), 0x50 },
+ { CCI_REG8(0x58c2), 0x01 }, { CCI_REG8(0x58c3), 0x00 },
+ { CCI_REG8(0x58c4), 0x01 }, { CCI_REG8(0x58c5), 0x00 },
+ { CCI_REG8(0x58c6), 0x00 }, { CCI_REG8(0x58c7), 0x50 },
+ { CCI_REG8(0x58c8), 0x00 }, { CCI_REG8(0x58c9), 0x50 },
+ { CCI_REG8(0x58ca), 0x00 }, { CCI_REG8(0x58cb), 0x50 },
+ { CCI_REG8(0x58cc), 0x00 }, { CCI_REG8(0x58cd), 0x50 },
+ { CCI_REG8(0x58ce), 0x00 }, { CCI_REG8(0x58cf), 0x50 },
+ { CCI_REG8(0x58f0), 0x04 }, { CCI_REG8(0x58f1), 0x03 },
+ { CCI_REG8(0x58f2), 0x02 }, { CCI_REG8(0x58f3), 0x01 },
+ { CCI_REG8(0x58f4), 0x08 }, { CCI_REG8(0x58f5), 0x07 },
+ { CCI_REG8(0x58f6), 0x06 }, { CCI_REG8(0x58f7), 0x05 },
+ { CCI_REG8(0x58f8), 0x0c }, { CCI_REG8(0x58f9), 0x0b },
+ { CCI_REG8(0x58fa), 0x0a }, { CCI_REG8(0x58fb), 0x09 },
+ { CCI_REG8(0x58fc), 0x10 }, { CCI_REG8(0x58fd), 0x0f },
+ { CCI_REG8(0x58fe), 0x0e }, { CCI_REG8(0x58ff), 0x0d },
+ { CCI_REG8(0x5900), 0x14 }, { CCI_REG8(0x5901), 0x13 },
+ { CCI_REG8(0x5902), 0x12 }, { CCI_REG8(0x5903), 0x11 },
+ { CCI_REG8(0x5904), 0x18 }, { CCI_REG8(0x5905), 0x17 },
+ { CCI_REG8(0x5906), 0x16 }, { CCI_REG8(0x5907), 0x15 },
+ { CCI_REG8(0x5908), 0x1c }, { CCI_REG8(0x5909), 0x1b },
+ { CCI_REG8(0x590a), 0x1a }, { CCI_REG8(0x590b), 0x19 },
+ { CCI_REG8(0x590c), 0x20 }, { CCI_REG8(0x590d), 0x1f },
+ { CCI_REG8(0x590e), 0x1e }, { CCI_REG8(0x590f), 0x1d },
+ { CCI_REG8(0x5931), 0x02 }, { CCI_REG8(0x5932), 0x42 },
+ { CCI_REG8(0x5933), 0x24 }, { CCI_REG8(0x5953), 0x09 },
+ { CCI_REG8(0x5954), 0x00 }, { CCI_REG8(0x5989), 0x84 },
+ { CCI_REG8(0x59c3), 0x04 }, { CCI_REG8(0x59c4), 0x24 },
+ { CCI_REG8(0x59c5), 0x40 }, { CCI_REG8(0x59c6), 0x1b },
+ { CCI_REG8(0x59c7), 0x40 }, { CCI_REG8(0x5a02), 0x0f },
+ { CCI_REG8(0x5f00), 0x29 }, { CCI_REG8(0x5f2d), 0x28 },
+ { CCI_REG8(0x5f2e), 0x28 }, { CCI_REG8(0x6801), 0x11 },
+ { CCI_REG8(0x6802), 0x3f }, { CCI_REG8(0x6803), 0xe7 },
+ { CCI_REG8(0x6825), 0x0f }, { CCI_REG8(0x6826), 0x20 },
+ { CCI_REG8(0x6827), 0x00 }, { CCI_REG8(0x6829), 0x16 },
+ { CCI_REG8(0x682b), 0xb3 }, { CCI_REG8(0x682c), 0x01 },
+ { CCI_REG8(0x6832), 0xff }, { CCI_REG8(0x6833), 0xff },
+ { CCI_REG8(0x6898), 0x80 }, { CCI_REG8(0x6899), 0x80 },
+ { CCI_REG8(0x689b), 0x40 }, { CCI_REG8(0x689c), 0x20 },
+ { CCI_REG8(0x689d), 0x20 }, { CCI_REG8(0x689e), 0x80 },
+ { CCI_REG8(0x689f), 0x60 }, { CCI_REG8(0x68a0), 0x40 },
+ { CCI_REG8(0x68a4), 0x40 }, { CCI_REG8(0x68a5), 0x20 },
+ { CCI_REG8(0x68a6), 0x00 }, { CCI_REG8(0x68b6), 0x80 },
+ { CCI_REG8(0x68b7), 0x80 }, { CCI_REG8(0x68b8), 0x80 },
+ { CCI_REG8(0x68bc), 0x80 }, { CCI_REG8(0x68bd), 0x80 },
+ { CCI_REG8(0x68be), 0x80 }, { CCI_REG8(0x68bf), 0x40 },
+ { CCI_REG8(0x68c2), 0x80 }, { CCI_REG8(0x68c3), 0x80 },
+ { CCI_REG8(0x68c4), 0x60 }, { CCI_REG8(0x68c5), 0x30 },
+ { CCI_REG8(0x6918), 0x80 }, { CCI_REG8(0x6919), 0x80 },
+ { CCI_REG8(0x691b), 0x40 }, { CCI_REG8(0x691c), 0x20 },
+ { CCI_REG8(0x691d), 0x20 }, { CCI_REG8(0x691e), 0x80 },
+ { CCI_REG8(0x691f), 0x60 }, { CCI_REG8(0x6920), 0x40 },
+ { CCI_REG8(0x6924), 0x40 }, { CCI_REG8(0x6925), 0x20 },
+ { CCI_REG8(0x6926), 0x00 }, { CCI_REG8(0x6936), 0x40 },
+ { CCI_REG8(0x6937), 0x40 }, { CCI_REG8(0x6938), 0x20 },
+ { CCI_REG8(0x6939), 0x20 }, { CCI_REG8(0x693a), 0x10 },
+ { CCI_REG8(0x693b), 0x10 }, { CCI_REG8(0x693c), 0x20 },
+ { CCI_REG8(0x693d), 0x20 }, { CCI_REG8(0x693e), 0x10 },
+ { CCI_REG8(0x693f), 0x10 }, { CCI_REG8(0x6940), 0x00 },
+ { CCI_REG8(0x6941), 0x00 }, { CCI_REG8(0x6942), 0x08 },
+ { CCI_REG8(0x6943), 0x08 }, { CCI_REG8(0x6944), 0x00 },
+ { CCI_REG8(0x69c2), 0x07 }, { CCI_REG8(0x6a20), 0x01 },
+ { CCI_REG8(0x6a23), 0x10 }, { CCI_REG8(0x6a26), 0x3d },
+ { CCI_REG8(0x6a27), 0x3e }, { CCI_REG8(0x6a38), 0x02 },
+ { CCI_REG8(0x6a39), 0x20 }, { CCI_REG8(0x6a3a), 0x02 },
+ { CCI_REG8(0x6a3b), 0x84 }, { CCI_REG8(0x6a3e), 0x02 },
+ { CCI_REG8(0x6a3f), 0x20 }, { CCI_REG8(0x6a47), 0x3b },
+ { CCI_REG8(0x6a63), 0x04 }, { CCI_REG8(0x6a65), 0x00 },
+ { CCI_REG8(0x6a67), 0x0f }, { CCI_REG8(0x6b22), 0x07 },
+ { CCI_REG8(0x6b23), 0xc2 }, { CCI_REG8(0x6b2f), 0x00 },
+ { CCI_REG8(0x6b60), 0x1f }, { CCI_REG8(0x6bd2), 0x5a },
+ { CCI_REG8(0x6c20), 0x50 }, { CCI_REG8(0x6c60), 0x50 },
+ { CCI_REG8(0x6c61), 0x06 }, { CCI_REG8(0x7318), 0x04 },
+ { CCI_REG8(0x7319), 0x01 }, { CCI_REG8(0x731a), 0x04 },
+ { CCI_REG8(0x731b), 0x01 }, { CCI_REG8(0x731c), 0x00 },
+ { CCI_REG8(0x731d), 0x00 }, { CCI_REG8(0x731e), 0x04 },
+ { CCI_REG8(0x731f), 0x01 }, { CCI_REG8(0x7320), 0x04 },
+ { CCI_REG8(0x7321), 0x00 }, { CCI_REG8(0x7322), 0x04 },
+ { CCI_REG8(0x7323), 0x00 }, { CCI_REG8(0x7324), 0x04 },
+ { CCI_REG8(0x7325), 0x00 }, { CCI_REG8(0x7326), 0x04 },
+ { CCI_REG8(0x7327), 0x00 }, { CCI_REG8(0x7600), 0x00 },
+ { CCI_REG8(0x7601), 0x00 }, { CCI_REG8(0x7602), 0x10 },
+ { CCI_REG8(0x7603), 0x00 }, { CCI_REG8(0x7604), 0x00 },
+ { CCI_REG8(0x7605), 0x00 }, { CCI_REG8(0x7606), 0x10 },
+ { CCI_REG8(0x7607), 0x00 }, { CCI_REG8(0x7608), 0x00 },
+ { CCI_REG8(0x7609), 0x00 }, { CCI_REG8(0x760a), 0x10 },
+ { CCI_REG8(0x760b), 0x00 }, { CCI_REG8(0x760c), 0x00 },
+ { CCI_REG8(0x760d), 0x00 }, { CCI_REG8(0x760e), 0x10 },
+ { CCI_REG8(0x760f), 0x00 }, { CCI_REG8(0x7610), 0x00 },
+ { CCI_REG8(0x7611), 0x00 }, { CCI_REG8(0x7612), 0x10 },
+ { CCI_REG8(0x7613), 0x00 }, { CCI_REG8(0x7614), 0x00 },
+ { CCI_REG8(0x7615), 0x00 }, { CCI_REG8(0x7616), 0x10 },
+ { CCI_REG8(0x7617), 0x00 }, { CCI_REG8(0x7618), 0x00 },
+ { CCI_REG8(0x7619), 0x00 }, { CCI_REG8(0x761a), 0x10 },
+ { CCI_REG8(0x761b), 0x00 }, { CCI_REG8(0x761c), 0x00 },
+ { CCI_REG8(0x761d), 0x00 }, { CCI_REG8(0x761e), 0x10 },
+ { CCI_REG8(0x761f), 0x00 }, { CCI_REG8(0x7620), 0x00 },
+ { CCI_REG8(0x7621), 0x00 }, { CCI_REG8(0x7622), 0x10 },
+ { CCI_REG8(0x7623), 0x00 }, { CCI_REG8(0x7624), 0x00 },
+ { CCI_REG8(0x7625), 0x00 }, { CCI_REG8(0x7626), 0x10 },
+ { CCI_REG8(0x7627), 0x00 }, { CCI_REG8(0x7628), 0x00 },
+ { CCI_REG8(0x7629), 0x00 }, { CCI_REG8(0x762a), 0x10 },
+ { CCI_REG8(0x762b), 0x00 }, { CCI_REG8(0x762c), 0x00 },
+ { CCI_REG8(0x762d), 0x00 }, { CCI_REG8(0x762e), 0x10 },
+ { CCI_REG8(0x762f), 0x00 }, { CCI_REG8(0x7630), 0x00 },
+ { CCI_REG8(0x7631), 0x00 }, { CCI_REG8(0x7632), 0x10 },
+ { CCI_REG8(0x7633), 0x00 }, { CCI_REG8(0x7634), 0x00 },
+ { CCI_REG8(0x7635), 0x00 }, { CCI_REG8(0x7636), 0x10 },
+ { CCI_REG8(0x7637), 0x00 }, { CCI_REG8(0x7638), 0x00 },
+ { CCI_REG8(0x7639), 0x00 }, { CCI_REG8(0x763a), 0x10 },
+ { CCI_REG8(0x763b), 0x00 }, { CCI_REG8(0x763c), 0x00 },
+ { CCI_REG8(0x763d), 0x00 }, { CCI_REG8(0x763e), 0x10 },
+ { CCI_REG8(0x763f), 0x00 }, { CCI_REG8(0x7640), 0x00 },
+ { CCI_REG8(0x7641), 0x00 }, { CCI_REG8(0x7642), 0x10 },
+ { CCI_REG8(0x7643), 0x00 }, { CCI_REG8(0x7644), 0x00 },
+ { CCI_REG8(0x7645), 0x00 }, { CCI_REG8(0x7646), 0x10 },
+ { CCI_REG8(0x7647), 0x00 }, { CCI_REG8(0x7648), 0x00 },
+ { CCI_REG8(0x7649), 0x00 }, { CCI_REG8(0x764a), 0x10 },
+ { CCI_REG8(0x764b), 0x00 }, { CCI_REG8(0x764c), 0x00 },
+ { CCI_REG8(0x764d), 0x00 }, { CCI_REG8(0x764e), 0x10 },
+ { CCI_REG8(0x764f), 0x00 }, { CCI_REG8(0x7650), 0x00 },
+ { CCI_REG8(0x7651), 0x00 }, { CCI_REG8(0x7652), 0x10 },
+ { CCI_REG8(0x7653), 0x00 }, { CCI_REG8(0x7654), 0x00 },
+ { CCI_REG8(0x7655), 0x00 }, { CCI_REG8(0x7656), 0x10 },
+ { CCI_REG8(0x7657), 0x00 }, { CCI_REG8(0x7658), 0x00 },
+ { CCI_REG8(0x7659), 0x00 }, { CCI_REG8(0x765a), 0x10 },
+ { CCI_REG8(0x765b), 0x00 }, { CCI_REG8(0x765c), 0x00 },
+ { CCI_REG8(0x765d), 0x00 }, { CCI_REG8(0x765e), 0x10 },
+ { CCI_REG8(0x765f), 0x00 }, { CCI_REG8(0x7660), 0x00 },
+ { CCI_REG8(0x7661), 0x00 }, { CCI_REG8(0x7662), 0x10 },
+ { CCI_REG8(0x7663), 0x00 }, { CCI_REG8(0x7664), 0x00 },
+ { CCI_REG8(0x7665), 0x00 }, { CCI_REG8(0x7666), 0x10 },
+ { CCI_REG8(0x7667), 0x00 }, { CCI_REG8(0x7668), 0x00 },
+ { CCI_REG8(0x7669), 0x00 }, { CCI_REG8(0x766a), 0x10 },
+ { CCI_REG8(0x766b), 0x00 }, { CCI_REG8(0x766c), 0x00 },
+ { CCI_REG8(0x766d), 0x00 }, { CCI_REG8(0x766e), 0x10 },
+ { CCI_REG8(0x766f), 0x00 }, { CCI_REG8(0x7670), 0x00 },
+ { CCI_REG8(0x7671), 0x00 }, { CCI_REG8(0x7672), 0x10 },
+ { CCI_REG8(0x7673), 0x00 }, { CCI_REG8(0x7674), 0x00 },
+ { CCI_REG8(0x7675), 0x00 }, { CCI_REG8(0x7676), 0x10 },
+ { CCI_REG8(0x7677), 0x00 }, { CCI_REG8(0x7678), 0x00 },
+ { CCI_REG8(0x7679), 0x00 }, { CCI_REG8(0x767a), 0x10 },
+ { CCI_REG8(0x767b), 0x00 }, { CCI_REG8(0x767c), 0x00 },
+ { CCI_REG8(0x767d), 0x00 }, { CCI_REG8(0x767e), 0x10 },
+ { CCI_REG8(0x767f), 0x00 }, { CCI_REG8(0x7680), 0x00 },
+ { CCI_REG8(0x7681), 0x00 }, { CCI_REG8(0x7682), 0x10 },
+ { CCI_REG8(0x7683), 0x00 }, { CCI_REG8(0x7684), 0x00 },
+ { CCI_REG8(0x7685), 0x00 }, { CCI_REG8(0x7686), 0x10 },
+ { CCI_REG8(0x7687), 0x00 }, { CCI_REG8(0x7688), 0x00 },
+ { CCI_REG8(0x7689), 0x00 }, { CCI_REG8(0x768a), 0x10 },
+ { CCI_REG8(0x768b), 0x00 }, { CCI_REG8(0x768c), 0x00 },
+ { CCI_REG8(0x768d), 0x00 }, { CCI_REG8(0x768e), 0x10 },
+ { CCI_REG8(0x768f), 0x00 }, { CCI_REG8(0x7690), 0x00 },
+ { CCI_REG8(0x7691), 0x00 }, { CCI_REG8(0x7692), 0x10 },
+ { CCI_REG8(0x7693), 0x00 }, { CCI_REG8(0x7694), 0x00 },
+ { CCI_REG8(0x7695), 0x00 }, { CCI_REG8(0x7696), 0x10 },
+ { CCI_REG8(0x7697), 0x00 }, { CCI_REG8(0x7698), 0x00 },
+ { CCI_REG8(0x7699), 0x00 }, { CCI_REG8(0x769a), 0x10 },
+ { CCI_REG8(0x769b), 0x00 }, { CCI_REG8(0x769c), 0x00 },
+ { CCI_REG8(0x769d), 0x00 }, { CCI_REG8(0x769e), 0x10 },
+ { CCI_REG8(0x769f), 0x00 }, { CCI_REG8(0x76a0), 0x00 },
+ { CCI_REG8(0x76a1), 0x00 }, { CCI_REG8(0x76a2), 0x10 },
+ { CCI_REG8(0x76a3), 0x00 }, { CCI_REG8(0x76a4), 0x00 },
+ { CCI_REG8(0x76a5), 0x00 }, { CCI_REG8(0x76a6), 0x10 },
+ { CCI_REG8(0x76a7), 0x00 }, { CCI_REG8(0x76a8), 0x00 },
+ { CCI_REG8(0x76a9), 0x00 }, { CCI_REG8(0x76aa), 0x10 },
+ { CCI_REG8(0x76ab), 0x00 }, { CCI_REG8(0x76ac), 0x00 },
+ { CCI_REG8(0x76ad), 0x00 }, { CCI_REG8(0x76ae), 0x10 },
+ { CCI_REG8(0x76af), 0x00 }, { CCI_REG8(0x76b0), 0x00 },
+ { CCI_REG8(0x76b1), 0x00 }, { CCI_REG8(0x76b2), 0x10 },
+ { CCI_REG8(0x76b3), 0x00 }, { CCI_REG8(0x76b4), 0x00 },
+ { CCI_REG8(0x76b5), 0x00 }, { CCI_REG8(0x76b6), 0x10 },
+ { CCI_REG8(0x76b7), 0x00 }, { CCI_REG8(0x76b8), 0x00 },
+ { CCI_REG8(0x76b9), 0x00 }, { CCI_REG8(0x76ba), 0x10 },
+ { CCI_REG8(0x76bb), 0x00 }, { CCI_REG8(0x76bc), 0x00 },
+ { CCI_REG8(0x76bd), 0x00 }, { CCI_REG8(0x76be), 0x10 },
+ { CCI_REG8(0x76bf), 0x00 }, { CCI_REG8(0x76c0), 0x00 },
+ { CCI_REG8(0x76c1), 0x00 }, { CCI_REG8(0x76c2), 0x10 },
+ { CCI_REG8(0x76c3), 0x00 }, { CCI_REG8(0x76c4), 0x00 },
+ { CCI_REG8(0x76c5), 0x00 }, { CCI_REG8(0x76c6), 0x10 },
+ { CCI_REG8(0x76c7), 0x00 }, { CCI_REG8(0x76c8), 0x00 },
+ { CCI_REG8(0x76c9), 0x00 }, { CCI_REG8(0x76ca), 0x10 },
+ { CCI_REG8(0x76cb), 0x00 }, { CCI_REG8(0x76cc), 0x00 },
+ { CCI_REG8(0x76cd), 0x00 }, { CCI_REG8(0x76ce), 0x10 },
+ { CCI_REG8(0x76cf), 0x00 }, { CCI_REG8(0x76d0), 0x00 },
+ { CCI_REG8(0x76d1), 0x00 }, { CCI_REG8(0x76d2), 0x10 },
+ { CCI_REG8(0x76d3), 0x00 }, { CCI_REG8(0x76d4), 0x00 },
+ { CCI_REG8(0x76d5), 0x00 }, { CCI_REG8(0x76d6), 0x10 },
+ { CCI_REG8(0x76d7), 0x00 }, { CCI_REG8(0x76d8), 0x00 },
+ { CCI_REG8(0x76d9), 0x00 }, { CCI_REG8(0x76da), 0x10 },
+ { CCI_REG8(0x76db), 0x00 }, { CCI_REG8(0x76dc), 0x00 },
+ { CCI_REG8(0x76dd), 0x00 }, { CCI_REG8(0x76de), 0x10 },
+ { CCI_REG8(0x76df), 0x00 }, { CCI_REG8(0x76e0), 0x00 },
+ { CCI_REG8(0x76e1), 0x00 }, { CCI_REG8(0x76e2), 0x10 },
+ { CCI_REG8(0x76e3), 0x00 }, { CCI_REG8(0x76e4), 0x00 },
+ { CCI_REG8(0x76e5), 0x00 }, { CCI_REG8(0x76e6), 0x10 },
+ { CCI_REG8(0x76e7), 0x00 }, { CCI_REG8(0x76e8), 0x00 },
+ { CCI_REG8(0x76e9), 0x00 }, { CCI_REG8(0x76ea), 0x10 },
+ { CCI_REG8(0x76eb), 0x00 }, { CCI_REG8(0x76ec), 0x00 },
+ { CCI_REG8(0x76ed), 0x00 }, { CCI_REG8(0x76ee), 0x10 },
+ { CCI_REG8(0x76ef), 0x00 }, { CCI_REG8(0x76f0), 0x00 },
+ { CCI_REG8(0x76f1), 0x00 }, { CCI_REG8(0x76f2), 0x10 },
+ { CCI_REG8(0x76f3), 0x00 }, { CCI_REG8(0x76f4), 0x00 },
+ { CCI_REG8(0x76f5), 0x00 }, { CCI_REG8(0x76f6), 0x10 },
+ { CCI_REG8(0x76f7), 0x00 }, { CCI_REG8(0x76f8), 0x00 },
+ { CCI_REG8(0x76f9), 0x00 }, { CCI_REG8(0x76fa), 0x10 },
+ { CCI_REG8(0x76fb), 0x00 }, { CCI_REG8(0x76fc), 0x00 },
+ { CCI_REG8(0x76fd), 0x00 }, { CCI_REG8(0x76fe), 0x10 },
+ { CCI_REG8(0x76ff), 0x00 }, { CCI_REG8(0x7700), 0x00 },
+ { CCI_REG8(0x7701), 0x00 }, { CCI_REG8(0x7702), 0x10 },
+ { CCI_REG8(0x7703), 0x00 }, { CCI_REG8(0x7704), 0x00 },
+ { CCI_REG8(0x7705), 0x00 }, { CCI_REG8(0x7706), 0x10 },
+ { CCI_REG8(0x7707), 0x00 }, { CCI_REG8(0x7708), 0x00 },
+ { CCI_REG8(0x7709), 0x00 }, { CCI_REG8(0x770a), 0x10 },
+ { CCI_REG8(0x770b), 0x00 }, { CCI_REG8(0x770c), 0x00 },
+ { CCI_REG8(0x770d), 0x00 }, { CCI_REG8(0x770e), 0x10 },
+ { CCI_REG8(0x770f), 0x00 }, { CCI_REG8(0x7710), 0x00 },
+ { CCI_REG8(0x7711), 0x00 }, { CCI_REG8(0x7712), 0x10 },
+ { CCI_REG8(0x7713), 0x00 }, { CCI_REG8(0x7714), 0x00 },
+ { CCI_REG8(0x7715), 0x00 }, { CCI_REG8(0x7716), 0x10 },
+ { CCI_REG8(0x7717), 0x00 }, { CCI_REG8(0x7718), 0x00 },
+ { CCI_REG8(0x7719), 0x00 }, { CCI_REG8(0x771a), 0x10 },
+ { CCI_REG8(0x771b), 0x00 }, { CCI_REG8(0x771c), 0x00 },
+ { CCI_REG8(0x771d), 0x00 }, { CCI_REG8(0x771e), 0x10 },
+ { CCI_REG8(0x771f), 0x00 }, { CCI_REG8(0x7720), 0x00 },
+ { CCI_REG8(0x7721), 0x00 }, { CCI_REG8(0x7722), 0x10 },
+ { CCI_REG8(0x7723), 0x00 }, { CCI_REG8(0x7724), 0x00 },
+ { CCI_REG8(0x7725), 0x00 }, { CCI_REG8(0x7726), 0x10 },
+ { CCI_REG8(0x7727), 0x00 }, { CCI_REG8(0x7728), 0x00 },
+ { CCI_REG8(0x7729), 0x00 }, { CCI_REG8(0x772a), 0x10 },
+ { CCI_REG8(0x772b), 0x00 }, { CCI_REG8(0x772c), 0x00 },
+ { CCI_REG8(0x772d), 0x00 }, { CCI_REG8(0x772e), 0x10 },
+ { CCI_REG8(0x772f), 0x00 }, { CCI_REG8(0x7730), 0x00 },
+ { CCI_REG8(0x7731), 0x00 }, { CCI_REG8(0x7732), 0x10 },
+ { CCI_REG8(0x7733), 0x00 }, { CCI_REG8(0x7734), 0x00 },
+ { CCI_REG8(0x7735), 0x00 }, { CCI_REG8(0x7736), 0x10 },
+ { CCI_REG8(0x7737), 0x00 }, { CCI_REG8(0x7738), 0x00 },
+ { CCI_REG8(0x7739), 0x00 }, { CCI_REG8(0x773a), 0x10 },
+ { CCI_REG8(0x773b), 0x00 }, { CCI_REG8(0x773c), 0x00 },
+ { CCI_REG8(0x773d), 0x00 }, { CCI_REG8(0x773e), 0x10 },
+ { CCI_REG8(0x773f), 0x00 }, { CCI_REG8(0x7740), 0x00 },
+ { CCI_REG8(0x7741), 0x00 }, { CCI_REG8(0x7742), 0x10 },
+ { CCI_REG8(0x7743), 0x00 }, { CCI_REG8(0x3421), 0x02 },
+ { CCI_REG8(0x37d0), 0x00 }, { CCI_REG8(0x3632), 0x99 },
+ { CCI_REG8(0xc518), 0x1f }, { CCI_REG8(0xc519), 0x1f },
+ { CCI_REG8(0xc51a), 0x1f }, { CCI_REG8(0xc51b), 0x1f },
+ { CCI_REG8(0xc51c), 0x1f }, { CCI_REG8(0xc51d), 0x1f },
+ { CCI_REG8(0xc51e), 0x1f }, { CCI_REG8(0xc51f), 0x1f },
+ { CCI_REG8(0xc520), 0x1f }, { CCI_REG8(0xc521), 0x1f },
+ { CCI_REG8(0x3616), 0xa0 }, { CCI_REG8(0x3615), 0xc5 },
+ { CCI_REG8(0xc4c1), 0x02 }, { CCI_REG8(0xc4c2), 0x02 },
+ { CCI_REG8(0xc4c3), 0x03 }, { CCI_REG8(0xc4c4), 0x03 },
+ { CCI_REG8(0xc4f6), 0x0a }, { CCI_REG8(0xc4f7), 0x0a },
+ { CCI_REG8(0xc4f8), 0x0a }, { CCI_REG8(0xc4f9), 0x0a },
+ { CCI_REG8(0xc4fa), 0x0a }, { CCI_REG8(0xc4c6), 0x0a },
+ { CCI_REG8(0xc4c7), 0x0a }, { CCI_REG8(0xc4c8), 0x0a },
+ { CCI_REG8(0xc4c9), 0x0a }, { CCI_REG8(0xc4ca), 0x14 },
+ { CCI_REG8(0xc4cb), 0x14 }, { CCI_REG8(0xc4cc), 0x14 },
+ { CCI_REG8(0xc4cd), 0x14 }, { CCI_REG8(0x3b92), 0x05 },
+ { CCI_REG8(0x3b93), 0x05 }, { CCI_REG8(0x3b94), 0x05 },
+ { CCI_REG8(0x3b95), 0x05 }, { CCI_REG8(0x3623), 0x10 },
+ { CCI_REG8(0xc522), 0x18 }, { CCI_REG8(0xc523), 0x12 },
+ { CCI_REG8(0xc524), 0x0e }, { CCI_REG8(0xc525), 0x0b },
+ { CCI_REG8(0xc526), 0x18 }, { CCI_REG8(0xc527), 0x12 },
+ { CCI_REG8(0xc528), 0x0c }, { CCI_REG8(0xc529), 0x08 },
+ { CCI_REG8(0xc52a), 0x18 }, { CCI_REG8(0xc52b), 0x12 },
+ { CCI_REG8(0xc52c), 0x0e }, { CCI_REG8(0xc52d), 0x0b },
+ { CCI_REG8(0xc52e), 0x18 }, { CCI_REG8(0xc52f), 0x12 },
+ { CCI_REG8(0xc530), 0x0e }, { CCI_REG8(0xc531), 0x0b },
+ { CCI_REG8(0xc532), 0x18 }, { CCI_REG8(0xc533), 0x12 },
+ { CCI_REG8(0xc534), 0x0e }, { CCI_REG8(0xc535), 0x0b },
+ { CCI_REG8(0xc536), 0x18 }, { CCI_REG8(0xc537), 0x12 },
+ { CCI_REG8(0xc538), 0x0e }, { CCI_REG8(0xc539), 0x0b },
+ { CCI_REG8(0xc53a), 0x18 }, { CCI_REG8(0xc53b), 0x12 },
+ { CCI_REG8(0xc53c), 0x0c }, { CCI_REG8(0xc53d), 0x08 },
+ { CCI_REG8(0xc53e), 0x18 }, { CCI_REG8(0xc53f), 0x12 },
+ { CCI_REG8(0xc540), 0x0e }, { CCI_REG8(0xc541), 0x0b },
+ { CCI_REG8(0xc542), 0x18 }, { CCI_REG8(0xc543), 0x12 },
+ { CCI_REG8(0xc544), 0x0e }, { CCI_REG8(0xc545), 0x0b },
+ { CCI_REG8(0xc546), 0x18 }, { CCI_REG8(0xc547), 0x12 },
+ { CCI_REG8(0xc548), 0x0e }, { CCI_REG8(0xc549), 0x0b },
+ { CCI_REG8(0x3701), 0x18 }, { CCI_REG8(0x3702), 0x38 },
+ { CCI_REG8(0x3703), 0x72 }, { CCI_REG8(0x3708), 0x26 },
+ { CCI_REG8(0x3709), 0xe6 }, { CCI_REG8(0x3a1d), 0x18 },
+ { CCI_REG8(0x3a1e), 0x18 }, { CCI_REG8(0x3a21), 0x18 },
+ { CCI_REG8(0x3a22), 0x18 }, { CCI_REG8(0x39fb), 0x18 },
+ { CCI_REG8(0x39fc), 0x18 }, { CCI_REG8(0x39fd), 0x18 },
+ { CCI_REG8(0x39fe), 0x18 }, { CCI_REG8(0xc44a), 0x08 },
+ { CCI_REG8(0xc44c), 0x08 }, { CCI_REG8(0xc5e8), 0x0a },
+ { CCI_REG8(0xc5ea), 0x0a }, { CCI_REG8(0x391d), 0x54 },
+ { CCI_REG8(0x391e), 0xca }, { CCI_REG8(0x3991), 0x0c },
+ { CCI_REG8(0x399d), 0x0c }, { CCI_REG8(0x3744), 0x24 },
+ { CCI_REG8(0x374b), 0x0c }, { CCI_REG8(0x3be7), 0x1e },
+ { CCI_REG8(0x3be8), 0x26 }, { CCI_REG8(0x3a50), 0x14 },
+ { CCI_REG8(0x3a54), 0x14 }, { CCI_REG8(0x3add), 0x1f },
+ { CCI_REG8(0x3adf), 0x24 }, { CCI_REG8(0x3aef), 0x1f },
+ { CCI_REG8(0x3af0), 0x24 }, { CCI_REG8(0xc57f), 0x30 },
+ { CCI_REG8(0xc580), 0x30 }, { CCI_REG8(0xc581), 0x30 },
+ { CCI_REG8(0xc582), 0x30 }, { CCI_REG8(0xc583), 0x30 },
+ { CCI_REG8(0xc584), 0x30 }, { CCI_REG8(0xc585), 0x30 },
+ { CCI_REG8(0xc586), 0x30 }, { CCI_REG8(0xc587), 0x30 },
+ { CCI_REG8(0xc588), 0x30 }, { CCI_REG8(0xc589), 0x30 },
+ { CCI_REG8(0xc58a), 0x30 }, { CCI_REG8(0xc58b), 0x30 },
+ { CCI_REG8(0xc58c), 0x30 }, { CCI_REG8(0xc58d), 0x30 },
+ { CCI_REG8(0xc58e), 0x30 }, { CCI_REG8(0xc58f), 0x30 },
+ { CCI_REG8(0xc590), 0x30 }, { CCI_REG8(0xc591), 0x30 },
+ { CCI_REG8(0xc592), 0x30 }, { CCI_REG8(0xc598), 0x30 },
+ { CCI_REG8(0xc599), 0x30 }, { CCI_REG8(0xc59a), 0x30 },
+ { CCI_REG8(0xc59b), 0x30 }, { CCI_REG8(0xc59c), 0x30 },
+ { CCI_REG8(0xc59d), 0x30 }, { CCI_REG8(0xc59e), 0x30 },
+ { CCI_REG8(0xc59f), 0x30 }, { CCI_REG8(0xc5a0), 0x30 },
+ { CCI_REG8(0xc5a1), 0x30 }, { CCI_REG8(0xc5a2), 0x30 },
+ { CCI_REG8(0xc5a3), 0x30 }, { CCI_REG8(0xc5a4), 0x30 },
+ { CCI_REG8(0xc5a5), 0x30 }, { CCI_REG8(0xc5a6), 0x30 },
+ { CCI_REG8(0xc5a7), 0x30 }, { CCI_REG8(0xc5a8), 0x30 },
+ { CCI_REG8(0xc5a9), 0x30 }, { CCI_REG8(0xc5aa), 0x30 },
+ { CCI_REG8(0xc5ab), 0x30 }, { CCI_REG8(0xc5b1), 0x38 },
+ { CCI_REG8(0xc5b2), 0x38 }, { CCI_REG8(0xc5b3), 0x38 },
+ { CCI_REG8(0xc5b4), 0x38 }, { CCI_REG8(0xc5b5), 0x38 },
+ { CCI_REG8(0xc5b6), 0x38 }, { CCI_REG8(0xc5b7), 0x38 },
+ { CCI_REG8(0xc5b8), 0x38 }, { CCI_REG8(0xc5b9), 0x38 },
+ { CCI_REG8(0xc5ba), 0x38 }, { CCI_REG8(0xc5bb), 0x38 },
+ { CCI_REG8(0xc5bc), 0x38 }, { CCI_REG8(0xc5bd), 0x38 },
+ { CCI_REG8(0xc5be), 0x38 }, { CCI_REG8(0xc5bf), 0x38 },
+ { CCI_REG8(0xc5c0), 0x38 }, { CCI_REG8(0xc5c1), 0x38 },
+ { CCI_REG8(0xc5c2), 0x38 }, { CCI_REG8(0xc5c3), 0x38 },
+ { CCI_REG8(0xc5c4), 0x38 }, { CCI_REG8(0xc5ca), 0x38 },
+ { CCI_REG8(0xc5cb), 0x38 }, { CCI_REG8(0xc5cc), 0x38 },
+ { CCI_REG8(0xc5cd), 0x38 }, { CCI_REG8(0xc5ce), 0x38 },
+ { CCI_REG8(0xc5cf), 0x38 }, { CCI_REG8(0xc5d0), 0x38 },
+ { CCI_REG8(0xc5d1), 0x38 }, { CCI_REG8(0xc5d2), 0x38 },
+ { CCI_REG8(0xc5d3), 0x38 }, { CCI_REG8(0xc5d4), 0x38 },
+ { CCI_REG8(0xc5d5), 0x38 }, { CCI_REG8(0xc5d6), 0x38 },
+ { CCI_REG8(0xc5d7), 0x38 }, { CCI_REG8(0xc5d8), 0x38 },
+ { CCI_REG8(0xc5d9), 0x38 }, { CCI_REG8(0xc5da), 0x38 },
+ { CCI_REG8(0xc5db), 0x38 }, { CCI_REG8(0xc5dc), 0x38 },
+ { CCI_REG8(0xc5dd), 0x38 }, { CCI_REG8(0x3a60), 0x68 },
+ { CCI_REG8(0x3a6f), 0x68 }, { CCI_REG8(0x3a5e), 0xdc },
+ { CCI_REG8(0x3a6d), 0xdc }, { CCI_REG8(0x3aed), 0x6e },
+ { CCI_REG8(0x3af1), 0x73 }, { CCI_REG8(0x3992), 0x02 },
+ { CCI_REG8(0x399e), 0x02 }, { CCI_REG8(0x371d), 0x17 },
+ { CCI_REG8(0x371f), 0x08 }, { CCI_REG8(0x3721), 0xc9 },
+ { CCI_REG8(0x401e), 0x00 }, { CCI_REG8(0x401f), 0xf8 },
+ { CCI_REG8(0x3642), 0x00 }, { CCI_REG8(0x3641), 0x7f },
+ { CCI_REG8(0x3ac5), 0x0c }, { CCI_REG8(0x3ac6), 0x09 },
+ { CCI_REG8(0x3ac7), 0x06 }, { CCI_REG8(0x3ac8), 0x02 },
+ { CCI_REG8(0x3ac9), 0x0c }, { CCI_REG8(0x3aca), 0x09 },
+ { CCI_REG8(0x3acb), 0x06 }, { CCI_REG8(0x3acc), 0x02 },
+ { CCI_REG8(0x3acd), 0x0c }, { CCI_REG8(0x3ace), 0x09 },
+ { CCI_REG8(0x3acf), 0x07 }, { CCI_REG8(0x3ad0), 0x04 },
+ { CCI_REG8(0x3ad1), 0x0c }, { CCI_REG8(0x3ad2), 0x09 },
+ { CCI_REG8(0x3ad3), 0x07 }, { CCI_REG8(0x3ad4), 0x04 },
+ { CCI_REG8(0xc483), 0x0c }, { CCI_REG8(0xc484), 0x0c },
+ { CCI_REG8(0xc485), 0x0c }, { CCI_REG8(0xc486), 0x0c },
+ { CCI_REG8(0x3a2f), 0x0c }, { CCI_REG8(0x3a30), 0x09 },
+ { CCI_REG8(0x3a31), 0x06 }, { CCI_REG8(0x3a32), 0x02 },
+ { CCI_REG8(0x3a34), 0x0c }, { CCI_REG8(0x3a35), 0x09 },
+ { CCI_REG8(0x3a36), 0x07 }, { CCI_REG8(0x3a37), 0x04 },
+ { CCI_REG8(0x3a43), 0x0c }, { CCI_REG8(0x3a44), 0x09 },
+ { CCI_REG8(0x3a45), 0x06 }, { CCI_REG8(0x3a46), 0x02 },
+ { CCI_REG8(0x3a48), 0x0c }, { CCI_REG8(0x3a49), 0x09 },
+ { CCI_REG8(0x3a4a), 0x07 }, { CCI_REG8(0x3a4b), 0x04 },
+ { CCI_REG8(0xc487), 0x0c }, { CCI_REG8(0xc488), 0x0c },
+ { CCI_REG8(0xc489), 0x0c }, { CCI_REG8(0xc48a), 0x0c },
+ { CCI_REG8(0x3645), 0xbd }, { CCI_REG8(0x373f), 0x00 },
+ { CCI_REG8(0x374f), 0x10 }, { CCI_REG8(0x3743), 0xc6 },
+ { CCI_REG8(0x3717), 0x82 }, { CCI_REG8(0x3732), 0x07 },
+ { CCI_REG8(0x3731), 0x16 }, { CCI_REG8(0x3730), 0x16 },
+ { CCI_REG8(0x3828), 0x07 }, { CCI_REG8(0x3714), 0x68 },
+ { CCI_REG8(0x371d), 0x02 }, { CCI_REG8(0x371f), 0x02 },
+ { CCI_REG8(0x37e0), 0x00 }, { CCI_REG8(0x37e1), 0x03 },
+ { CCI_REG8(0x37e2), 0x07 }, { CCI_REG8(0x3734), 0x3e },
+ { CCI_REG8(0x3736), 0x02 }, { CCI_REG8(0x37e4), 0x36 },
+ { CCI_REG8(0x37e9), 0x1c }, { CCI_REG8(0x37ea), 0x01 },
+ { CCI_REG8(0x37eb), 0x0a }, { CCI_REG8(0x37ec), 0x1c },
+ { CCI_REG8(0x37ed), 0x01 }, { CCI_REG8(0x37ee), 0x36 },
+ { CCI_REG8(0x373b), 0x1c }, { CCI_REG8(0x373c), 0x02 },
+ { CCI_REG8(0x37bb), 0x1c }, { CCI_REG8(0x37bc), 0x02 },
+ { CCI_REG8(0x37b8), 0x0c }, { CCI_REG8(0x371c), 0x01 },
+ { CCI_REG8(0x371e), 0x11 }, { CCI_REG8(0x371d), 0x01 },
+ { CCI_REG8(0x371f), 0x01 }, { CCI_REG8(0x3721), 0x01 },
+ { CCI_REG8(0x3725), 0x12 }, { CCI_REG8(0x37e3), 0x06 },
+ { CCI_REG8(0x37dd), 0x86 }, { CCI_REG8(0x37db), 0x0a },
+ { CCI_REG8(0x37dc), 0x14 }, { CCI_REG8(0x3727), 0x20 },
+ { CCI_REG8(0x37b2), 0x80 }, { CCI_REG8(0x37da), 0x04 },
+ { CCI_REG8(0x37df), 0x01 }, { CCI_REG8(0x3731), 0x11 },
+ { CCI_REG8(0x37dd), 0x86 }, { CCI_REG8(0x37df), 0x01 },
+ { CCI_REG8(0x37da), 0x03 }, { CCI_REG8(0x37b2), 0x80 },
+ { CCI_REG8(0x3727), 0x20 }, { CCI_REG8(0x4883), 0x26 },
+ { CCI_REG8(0x488b), 0x88 }, { CCI_REG8(0x3d85), 0x1f },
+ { CCI_REG8(0x3d81), 0x01 }, { CCI_REG8(0x3d84), 0x40 },
+ { CCI_REG8(0x3d88), 0x00 }, { CCI_REG8(0x3d89), 0x00 },
+ { CCI_REG8(0x3d8a), 0x0b }, { CCI_REG8(0x3d8b), 0xff },
+ { CCI_REG8(0x4d00), 0x05 }, { CCI_REG8(0x4d01), 0xc4 },
+ { CCI_REG8(0x4d02), 0xa3 }, { CCI_REG8(0x4d03), 0x8c },
+ { CCI_REG8(0x4d04), 0xfb }, { CCI_REG8(0x4d05), 0xed },
+ { CCI_REG8(0x4010), 0x28 }, { CCI_REG8(0x4030), 0x00 },
+ { CCI_REG8(0x4031), 0x00 }, { CCI_REG8(0x4032), 0x00 },
+ { CCI_REG8(0x4033), 0x00 }, { CCI_REG8(0x4034), 0x00 },
+ { CCI_REG8(0x4035), 0x00 }, { CCI_REG8(0x4036), 0x00 },
+ { CCI_REG8(0x4037), 0x00 }, { CCI_REG8(0x4040), 0x00 },
+ { CCI_REG8(0x4041), 0x00 }, { CCI_REG8(0x4042), 0x00 },
+ { CCI_REG8(0x4043), 0x00 }, { CCI_REG8(0x4044), 0x00 },
+ { CCI_REG8(0x4045), 0x00 }, { CCI_REG8(0x4046), 0x00 },
+ { CCI_REG8(0x4047), 0x00 }, { CCI_REG8(0x3400), 0x00 },
+ { CCI_REG8(0x3421), 0x23 }, { CCI_REG8(0x3422), 0xfc },
+ { CCI_REG8(0x3423), 0x07 }, { CCI_REG8(0x3424), 0x01 },
+ { CCI_REG8(0x3425), 0x04 }, { CCI_REG8(0x3426), 0x50 },
+ { CCI_REG8(0x3427), 0x55 }, { CCI_REG8(0x3428), 0x15 },
+ { CCI_REG8(0x3429), 0x00 }, { CCI_REG8(0x3025), 0x03 },
+ { CCI_REG8(0x3053), 0x00 }, { CCI_REG8(0x3054), 0x00 },
+ { CCI_REG8(0x3055), 0x00 }, { CCI_REG8(0x3056), 0x00 },
+ { CCI_REG8(0x3057), 0x00 }, { CCI_REG8(0x3058), 0x00 },
+ { CCI_REG8(0x305c), 0x00 }, { CCI_REG8(0x340c), 0x1f },
+ { CCI_REG8(0x340d), 0x00 }, { CCI_REG8(0x3501), 0x01 },
+ { CCI_REG8(0x3542), 0x48 }, { CCI_REG8(0x3582), 0x24 },
+ { CCI_REG8(0x3015), 0xf1 }, { CCI_REG8(0x3018), 0xf2 },
+ { CCI_REG8(0x301c), 0xf2 }, { CCI_REG8(0x301d), 0xf6 },
+ { CCI_REG8(0x301e), 0xf1 }, { CCI_REG8(0x0100), 0x01 },
+ { CCI_REG8(0xfff9), 0x08 }, { CCI_REG8(0x3900), 0xcd },
+ { CCI_REG8(0x3901), 0xcd }, { CCI_REG8(0x3902), 0xcd },
+ { CCI_REG8(0x3903), 0xcd }, { CCI_REG8(0x3904), 0xcd },
+ { CCI_REG8(0x3905), 0xcd }, { CCI_REG8(0x3906), 0xcd },
+ { CCI_REG8(0x3907), 0xcd }, { CCI_REG8(0x3908), 0xcd },
+ { CCI_REG8(0x3909), 0xcd }, { CCI_REG8(0x390a), 0xcd },
+ { CCI_REG8(0x390b), 0xcd }, { CCI_REG8(0x390c), 0xcd },
+ { CCI_REG8(0x390d), 0xcd }, { CCI_REG8(0x390e), 0xcd },
+ { CCI_REG8(0x390f), 0xcd }, { CCI_REG8(0x3910), 0xcd },
+ { CCI_REG8(0x3911), 0xcd }, { CCI_REG8(0x3912), 0xcd },
+ { CCI_REG8(0x3913), 0xcd }, { CCI_REG8(0x3914), 0xcd },
+ { CCI_REG8(0x3915), 0xcd }, { CCI_REG8(0x3916), 0xcd },
+ { CCI_REG8(0x3917), 0xcd }, { CCI_REG8(0x3918), 0xcd },
+ { CCI_REG8(0x3919), 0xcd }, { CCI_REG8(0x391a), 0xcd },
+ { CCI_REG8(0x391b), 0xcd }, { CCI_REG8(0x391c), 0xcd },
+ { CCI_REG8(0x391d), 0xcd }, { CCI_REG8(0x391e), 0xcd },
+ { CCI_REG8(0x391f), 0xcd }, { CCI_REG8(0x3920), 0xcd },
+ { CCI_REG8(0x3921), 0xcd }, { CCI_REG8(0x3922), 0xcd },
+ { CCI_REG8(0x3923), 0xcd }, { CCI_REG8(0x3924), 0xcd },
+ { CCI_REG8(0x3925), 0xcd }, { CCI_REG8(0x3926), 0xcd },
+ { CCI_REG8(0x3927), 0xcd }, { CCI_REG8(0x3928), 0xcd },
+ { CCI_REG8(0x3929), 0xcd }, { CCI_REG8(0x392a), 0xcd },
+ { CCI_REG8(0x392b), 0xcd }, { CCI_REG8(0x392c), 0xcd },
+ { CCI_REG8(0x392d), 0xcd }, { CCI_REG8(0x392e), 0xcd },
+ { CCI_REG8(0x392f), 0xcd }, { CCI_REG8(0x3930), 0xcd },
+ { CCI_REG8(0x3931), 0xcd }, { CCI_REG8(0x3932), 0xcd },
+ { CCI_REG8(0x3933), 0xcd }, { CCI_REG8(0x3934), 0xcd },
+ { CCI_REG8(0x3935), 0xcd }, { CCI_REG8(0x3936), 0xcd },
+ { CCI_REG8(0x3937), 0xcd }, { CCI_REG8(0x3938), 0xcd },
+ { CCI_REG8(0x3939), 0xcd }, { CCI_REG8(0x393a), 0xcd },
+ { CCI_REG8(0x393b), 0xcd }, { CCI_REG8(0x393c), 0xcd },
+ { CCI_REG8(0x393d), 0xcd }, { CCI_REG8(0x393e), 0xcd },
+ { CCI_REG8(0x393f), 0xcd }, { CCI_REG8(0x3940), 0xcd },
+ { CCI_REG8(0x3941), 0xcd }, { CCI_REG8(0x3942), 0xcd },
+ { CCI_REG8(0x3943), 0xcd }, { CCI_REG8(0x3944), 0xcd },
+ { CCI_REG8(0x3945), 0xcd }, { CCI_REG8(0x3946), 0xcd },
+ { CCI_REG8(0x3947), 0xcd }, { CCI_REG8(0x3948), 0xcd },
+ { CCI_REG8(0x3949), 0xcd }, { CCI_REG8(0x394a), 0xcd },
+ { CCI_REG8(0x394b), 0xcd }, { CCI_REG8(0x394c), 0xcd },
+ { CCI_REG8(0x394d), 0xcd }, { CCI_REG8(0x394e), 0xcd },
+ { CCI_REG8(0x394f), 0xcd }, { CCI_REG8(0x3950), 0xcd },
+ { CCI_REG8(0x3951), 0xcd }, { CCI_REG8(0x3952), 0xcd },
+ { CCI_REG8(0x3953), 0xcd }, { CCI_REG8(0x3954), 0xcd },
+ { CCI_REG8(0x3955), 0xcd }, { CCI_REG8(0x3956), 0xcd },
+ { CCI_REG8(0x3957), 0xcd }, { CCI_REG8(0x3958), 0xcd },
+ { CCI_REG8(0x3959), 0xcd }, { CCI_REG8(0x395a), 0xcd },
+ { CCI_REG8(0x395b), 0xcd }, { CCI_REG8(0x395c), 0xcd },
+ { CCI_REG8(0x395d), 0xcd }, { CCI_REG8(0x395e), 0xcd },
+ { CCI_REG8(0x395f), 0xcd }, { CCI_REG8(0x3960), 0xcd },
+ { CCI_REG8(0x3961), 0xcd }, { CCI_REG8(0x3962), 0xcd },
+ { CCI_REG8(0x3963), 0xcd }, { CCI_REG8(0x3964), 0xcd },
+ { CCI_REG8(0x3965), 0xcd }, { CCI_REG8(0x3966), 0xcd },
+ { CCI_REG8(0x3967), 0xcd }, { CCI_REG8(0x3968), 0xcd },
+ { CCI_REG8(0x3969), 0xcd }, { CCI_REG8(0x396a), 0xcd },
+ { CCI_REG8(0x396b), 0xcd }, { CCI_REG8(0x396c), 0xcd },
+ { CCI_REG8(0x396d), 0xcd }, { CCI_REG8(0x396e), 0xcd },
+ { CCI_REG8(0x396f), 0xcd }, { CCI_REG8(0x3970), 0xcd },
+ { CCI_REG8(0x3971), 0xcd }, { CCI_REG8(0x3972), 0xcd },
+ { CCI_REG8(0x3973), 0xcd }, { CCI_REG8(0x3974), 0xcd },
+ { CCI_REG8(0x3975), 0xcd }, { CCI_REG8(0x3976), 0xcd },
+ { CCI_REG8(0x3977), 0xcd }, { CCI_REG8(0x3978), 0xcd },
+ { CCI_REG8(0x3979), 0xcd }, { CCI_REG8(0x397a), 0xcd },
+ { CCI_REG8(0x397b), 0xcd }, { CCI_REG8(0x397c), 0xcd },
+ { CCI_REG8(0x397d), 0xcd }, { CCI_REG8(0x397e), 0xcd },
+ { CCI_REG8(0x397f), 0xcd }, { CCI_REG8(0x3980), 0xcd },
+ { CCI_REG8(0x3981), 0xcd }, { CCI_REG8(0x3982), 0xcd },
+ { CCI_REG8(0x3983), 0xcd }, { CCI_REG8(0x3984), 0xcd },
+ { CCI_REG8(0x3985), 0xcd }, { CCI_REG8(0x3986), 0xcd },
+ { CCI_REG8(0x3987), 0xcd }, { CCI_REG8(0x3988), 0xcd },
+ { CCI_REG8(0x3989), 0xcd }, { CCI_REG8(0x398a), 0xcd },
+ { CCI_REG8(0x398b), 0xcd }, { CCI_REG8(0x398c), 0xcd },
+ { CCI_REG8(0x398d), 0xcd }, { CCI_REG8(0x398e), 0xcd },
+ { CCI_REG8(0x398f), 0xcd }, { CCI_REG8(0x3990), 0xcd },
+ { CCI_REG8(0x3991), 0xcd }, { CCI_REG8(0x3992), 0xcd },
+ { CCI_REG8(0x3993), 0xcd }, { CCI_REG8(0x3994), 0xcd },
+ { CCI_REG8(0x3995), 0xcd }, { CCI_REG8(0x3996), 0xcd },
+ { CCI_REG8(0x3997), 0xcd }, { CCI_REG8(0x3998), 0xcd },
+ { CCI_REG8(0x3999), 0xcd }, { CCI_REG8(0x399a), 0xcd },
+ { CCI_REG8(0x399b), 0xcd }, { CCI_REG8(0x399c), 0xcd },
+ { CCI_REG8(0x399d), 0xcd }, { CCI_REG8(0x399e), 0xcd },
+ { CCI_REG8(0x399f), 0xcd }, { CCI_REG8(0x39a0), 0xcd },
+ { CCI_REG8(0x39a1), 0xcd }, { CCI_REG8(0x39a2), 0xcd },
+ { CCI_REG8(0x39a3), 0xcd }, { CCI_REG8(0x39a4), 0xcd },
+ { CCI_REG8(0x39a5), 0xcd }, { CCI_REG8(0x39a6), 0xcd },
+ { CCI_REG8(0x39a7), 0xcd }, { CCI_REG8(0x39a8), 0xcd },
+ { CCI_REG8(0x39a9), 0xcd }, { CCI_REG8(0x39aa), 0xcd },
+ { CCI_REG8(0x39ab), 0xcd }, { CCI_REG8(0x39ac), 0xcd },
+ { CCI_REG8(0x39ad), 0xcd }, { CCI_REG8(0x39ae), 0xcd },
+ { CCI_REG8(0x39af), 0xcd }, { CCI_REG8(0x39b0), 0xcd },
+ { CCI_REG8(0x39b1), 0xcd }, { CCI_REG8(0x39b2), 0xcd },
+ { CCI_REG8(0x39b3), 0xcd }, { CCI_REG8(0x39b4), 0xcd },
+ { CCI_REG8(0x39b5), 0xcd }, { CCI_REG8(0x39b6), 0xcd },
+ { CCI_REG8(0x39b7), 0xcd }, { CCI_REG8(0x39b8), 0xcd },
+ { CCI_REG8(0x39b9), 0xcd }, { CCI_REG8(0x39ba), 0xcd },
+ { CCI_REG8(0x39bb), 0xcd }, { CCI_REG8(0x39bc), 0xcd },
+ { CCI_REG8(0x39bd), 0xcd }, { CCI_REG8(0x39be), 0xcd },
+ { CCI_REG8(0x39bf), 0xcd }, { CCI_REG8(0x39c0), 0xcd },
+ { CCI_REG8(0x39c1), 0xcd }, { CCI_REG8(0x39c2), 0xcd },
+ { CCI_REG8(0x39c3), 0xcd }, { CCI_REG8(0x39c4), 0xcd },
+ { CCI_REG8(0x39c5), 0xcd }, { CCI_REG8(0x39c6), 0xcd },
+ { CCI_REG8(0x39c7), 0xcd }, { CCI_REG8(0x39c8), 0xcd },
+ { CCI_REG8(0x39c9), 0xcd }, { CCI_REG8(0x39ca), 0xcd },
+ { CCI_REG8(0x39cb), 0xcd }, { CCI_REG8(0x39cc), 0xcd },
+ { CCI_REG8(0x39cd), 0xcd }, { CCI_REG8(0x39ce), 0xcd },
+ { CCI_REG8(0x39cf), 0xcd }, { CCI_REG8(0x39d0), 0xcd },
+ { CCI_REG8(0x39d1), 0xcd }, { CCI_REG8(0x39d2), 0xcd },
+ { CCI_REG8(0x39d3), 0xcd }, { CCI_REG8(0x39d4), 0xcd },
+ { CCI_REG8(0x39d5), 0xcd }, { CCI_REG8(0x39d6), 0xcd },
+ { CCI_REG8(0x39d7), 0xcd }, { CCI_REG8(0x39d8), 0xcd },
+ { CCI_REG8(0x39d9), 0xcd }, { CCI_REG8(0x39da), 0xcd },
+ { CCI_REG8(0x39db), 0xcd }, { CCI_REG8(0x39dc), 0xcd },
+ { CCI_REG8(0x39dd), 0xcd }, { CCI_REG8(0x39de), 0xcd },
+ { CCI_REG8(0x39df), 0xcd }, { CCI_REG8(0x39e0), 0xcd },
+ { CCI_REG8(0x39e1), 0x40 }, { CCI_REG8(0x39e2), 0x40 },
+ { CCI_REG8(0x39e3), 0x40 }, { CCI_REG8(0x39e4), 0x40 },
+ { CCI_REG8(0x39e5), 0x40 }, { CCI_REG8(0x39e6), 0x40 },
+ { CCI_REG8(0x39e7), 0x40 }, { CCI_REG8(0x39e8), 0x40 },
+ { CCI_REG8(0x39e9), 0x40 }, { CCI_REG8(0x39ea), 0x40 },
+ { CCI_REG8(0x39eb), 0x40 }, { CCI_REG8(0x39ec), 0x40 },
+ { CCI_REG8(0x39ed), 0x40 }, { CCI_REG8(0x39ee), 0x40 },
+ { CCI_REG8(0x39ef), 0x40 }, { CCI_REG8(0x39f0), 0x40 },
+ { CCI_REG8(0x39f1), 0x40 }, { CCI_REG8(0x39f2), 0x40 },
+ { CCI_REG8(0x39f3), 0x40 }, { CCI_REG8(0x39f4), 0x40 },
+ { CCI_REG8(0x39f5), 0x40 }, { CCI_REG8(0x39f6), 0x40 },
+ { CCI_REG8(0x39f7), 0x40 }, { CCI_REG8(0x39f8), 0x40 },
+ { CCI_REG8(0x39f9), 0x40 }, { CCI_REG8(0x39fa), 0x40 },
+ { CCI_REG8(0x39fb), 0x40 }, { CCI_REG8(0x39fc), 0x40 },
+ { CCI_REG8(0x39fd), 0x40 }, { CCI_REG8(0x39fe), 0x40 },
+ { CCI_REG8(0x39ff), 0x40 }, { CCI_REG8(0x3a00), 0x40 },
+ { CCI_REG8(0x3a01), 0x40 }, { CCI_REG8(0x3a02), 0x40 },
+ { CCI_REG8(0x3a03), 0x40 }, { CCI_REG8(0x3a04), 0x40 },
+ { CCI_REG8(0x3a05), 0x40 }, { CCI_REG8(0x3a06), 0x40 },
+ { CCI_REG8(0x3a07), 0x40 }, { CCI_REG8(0x3a08), 0x40 },
+ { CCI_REG8(0x3a09), 0x40 }, { CCI_REG8(0x3a0a), 0x40 },
+ { CCI_REG8(0x3a0b), 0x40 }, { CCI_REG8(0x3a0c), 0x40 },
+ { CCI_REG8(0x3a0d), 0x40 }, { CCI_REG8(0x3a0e), 0x40 },
+ { CCI_REG8(0x3a0f), 0x40 }, { CCI_REG8(0x3a10), 0x40 },
+ { CCI_REG8(0x3a11), 0x40 }, { CCI_REG8(0x3a12), 0x40 },
+ { CCI_REG8(0x3a13), 0x40 }, { CCI_REG8(0x3a14), 0x40 },
+ { CCI_REG8(0x3a15), 0x40 }, { CCI_REG8(0x3a16), 0x40 },
+ { CCI_REG8(0x3a17), 0x40 }, { CCI_REG8(0x3a18), 0x40 },
+ { CCI_REG8(0x3a19), 0x40 }, { CCI_REG8(0x3a1a), 0x40 },
+ { CCI_REG8(0x3a1b), 0x40 }, { CCI_REG8(0x3a1c), 0x40 },
+ { CCI_REG8(0x3a1d), 0x40 }, { CCI_REG8(0x3a1e), 0x40 },
+ { CCI_REG8(0x3a1f), 0x40 }, { CCI_REG8(0x3a20), 0x40 },
+ { CCI_REG8(0x3a21), 0x40 }, { CCI_REG8(0x3a22), 0x40 },
+ { CCI_REG8(0x3a23), 0x40 }, { CCI_REG8(0x3a24), 0x40 },
+ { CCI_REG8(0x3a25), 0x40 }, { CCI_REG8(0x3a26), 0x40 },
+ { CCI_REG8(0x3a27), 0x40 }, { CCI_REG8(0x3a28), 0x40 },
+ { CCI_REG8(0x3a29), 0x40 }, { CCI_REG8(0x3a2a), 0x40 },
+ { CCI_REG8(0x3a2b), 0x40 }, { CCI_REG8(0x3a2c), 0x40 },
+ { CCI_REG8(0x3a2d), 0x40 }, { CCI_REG8(0x3a2e), 0x40 },
+ { CCI_REG8(0x3a2f), 0x40 }, { CCI_REG8(0x3a30), 0x40 },
+ { CCI_REG8(0x3a31), 0x40 }, { CCI_REG8(0x3a32), 0x40 },
+ { CCI_REG8(0x3a33), 0x40 }, { CCI_REG8(0x3a34), 0x40 },
+ { CCI_REG8(0x3a35), 0x40 }, { CCI_REG8(0x3a36), 0x40 },
+ { CCI_REG8(0x3a37), 0x40 }, { CCI_REG8(0x3a38), 0x40 },
+ { CCI_REG8(0x3a39), 0x40 }, { CCI_REG8(0x3a3a), 0x40 },
+ { CCI_REG8(0x3a3b), 0xcd }, { CCI_REG8(0x3a3c), 0xcd },
+ { CCI_REG8(0x3a3d), 0xcd }, { CCI_REG8(0x3a3e), 0xcd },
+ { CCI_REG8(0x3a3f), 0xcd }, { CCI_REG8(0x3a40), 0xcd },
+ { CCI_REG8(0x3a41), 0xcd }, { CCI_REG8(0x3a42), 0xcd },
+ { CCI_REG8(0x3a43), 0xcd }, { CCI_REG8(0x3a44), 0xcd },
+ { CCI_REG8(0x3a45), 0xcd }, { CCI_REG8(0x3a46), 0xcd },
+ { CCI_REG8(0x3a47), 0xcd }, { CCI_REG8(0x3a48), 0xcd },
+ { CCI_REG8(0x3a49), 0xcd }, { CCI_REG8(0x3a4a), 0xcd },
+ { CCI_REG8(0x3a4b), 0xcd }, { CCI_REG8(0x3a4c), 0xcd },
+ { CCI_REG8(0x3a4d), 0xcd }, { CCI_REG8(0x3a4e), 0xcd },
+ { CCI_REG8(0x3a4f), 0xcd }, { CCI_REG8(0x3a50), 0xcd },
+ { CCI_REG8(0x3a51), 0xcd }, { CCI_REG8(0x3a52), 0xcd },
+ { CCI_REG8(0x3a53), 0xcd }, { CCI_REG8(0x3a54), 0xcd },
+ { CCI_REG8(0x3a55), 0xcd }, { CCI_REG8(0x3a56), 0xcd },
+ { CCI_REG8(0x3a57), 0xcd }, { CCI_REG8(0x3a58), 0xcd },
+ { CCI_REG8(0x3a59), 0xcd }, { CCI_REG8(0x3a5a), 0xcd },
+ { CCI_REG8(0x3a5b), 0xcd }, { CCI_REG8(0x3a5c), 0xcd },
+ { CCI_REG8(0x3a5d), 0xcd }, { CCI_REG8(0x3a5e), 0xcd },
+ { CCI_REG8(0x3a5f), 0xcd }, { CCI_REG8(0x3a60), 0xcd },
+ { CCI_REG8(0x3a61), 0xcd }, { CCI_REG8(0x3a62), 0xcd },
+ { CCI_REG8(0x3a63), 0xcd }, { CCI_REG8(0x3a64), 0xcd },
+ { CCI_REG8(0x3a65), 0xcd }, { CCI_REG8(0x3a66), 0xcd },
+ { CCI_REG8(0x3a67), 0xcd }, { CCI_REG8(0x3a68), 0xcd },
+ { CCI_REG8(0x3a69), 0xcd }, { CCI_REG8(0x3a6a), 0xcd },
+ { CCI_REG8(0x3a6b), 0xcd }, { CCI_REG8(0x3a6c), 0xcd },
+ { CCI_REG8(0x3a6d), 0xcd }, { CCI_REG8(0x3a6e), 0xcd },
+ { CCI_REG8(0x3a6f), 0xcd }, { CCI_REG8(0x3a70), 0xcd },
+ { CCI_REG8(0x3a71), 0xcd }, { CCI_REG8(0x3a72), 0xcd },
+ { CCI_REG8(0x3a73), 0xcd }, { CCI_REG8(0x3a74), 0xcd },
+ { CCI_REG8(0x3a75), 0xcd }, { CCI_REG8(0x3a76), 0xcd },
+ { CCI_REG8(0x3a77), 0xcd }, { CCI_REG8(0x3a78), 0xcd },
+ { CCI_REG8(0x3a79), 0xcd }, { CCI_REG8(0x3a7a), 0xcd },
+ { CCI_REG8(0x3a7b), 0xcd }, { CCI_REG8(0x3a7c), 0xcd },
+ { CCI_REG8(0x3a7d), 0xcd }, { CCI_REG8(0x3a7e), 0xcd },
+ { CCI_REG8(0x3a7f), 0xcd }, { CCI_REG8(0x3a80), 0xcd },
+ { CCI_REG8(0x3a81), 0xcd }, { CCI_REG8(0x3a82), 0xcd },
+ { CCI_REG8(0x3a83), 0xcd }, { CCI_REG8(0x3a84), 0xcd },
+ { CCI_REG8(0x3a85), 0xcd }, { CCI_REG8(0x3a86), 0xcd },
+ { CCI_REG8(0x3a87), 0xcd }, { CCI_REG8(0x3a88), 0xcd },
+ { CCI_REG8(0x3a89), 0xcd }, { CCI_REG8(0x3a8a), 0xcd },
+ { CCI_REG8(0x3a8b), 0xcd }, { CCI_REG8(0x3a8c), 0xcd },
+ { CCI_REG8(0x3a8d), 0xcd }, { CCI_REG8(0x3a8e), 0xcd },
+ { CCI_REG8(0x3a8f), 0xcd }, { CCI_REG8(0x3a90), 0xcd },
+ { CCI_REG8(0x3a91), 0xcd }, { CCI_REG8(0x3a92), 0xcd },
+ { CCI_REG8(0x3a93), 0xcd }, { CCI_REG8(0x3a94), 0xcd },
+ { CCI_REG8(0x3a95), 0x40 }, { CCI_REG8(0x3a96), 0x40 },
+ { CCI_REG8(0x3a97), 0x40 }, { CCI_REG8(0x3a98), 0x40 },
+ { CCI_REG8(0x3a99), 0x40 }, { CCI_REG8(0x3a9a), 0x40 },
+ { CCI_REG8(0x3a9b), 0x40 }, { CCI_REG8(0x3a9c), 0x40 },
+ { CCI_REG8(0x3a9d), 0x40 }, { CCI_REG8(0x3a9e), 0x40 },
+ { CCI_REG8(0x3a9f), 0x40 }, { CCI_REG8(0x3aa0), 0x40 },
+ { CCI_REG8(0x3aa1), 0x40 }, { CCI_REG8(0x3aa2), 0x40 },
+ { CCI_REG8(0x3aa3), 0x40 }, { CCI_REG8(0x3aa4), 0x40 },
+ { CCI_REG8(0x3aa5), 0x40 }, { CCI_REG8(0x3aa6), 0x40 },
+ { CCI_REG8(0x3aa7), 0x40 }, { CCI_REG8(0x3aa8), 0x40 },
+ { CCI_REG8(0x3aa9), 0x40 }, { CCI_REG8(0x3aaa), 0x40 },
+ { CCI_REG8(0x3aab), 0x40 }, { CCI_REG8(0x3aac), 0x40 },
+ { CCI_REG8(0x3aad), 0x40 }, { CCI_REG8(0x3aae), 0x40 },
+ { CCI_REG8(0x3aaf), 0x40 }, { CCI_REG8(0x3ab0), 0x40 },
+ { CCI_REG8(0x3ab1), 0x40 }, { CCI_REG8(0x3ab2), 0x40 },
+ { CCI_REG8(0x3ab3), 0x40 }, { CCI_REG8(0x3ab4), 0x40 },
+ { CCI_REG8(0x3ab5), 0x40 }, { CCI_REG8(0x3ab6), 0x40 },
+ { CCI_REG8(0x3ab7), 0x40 }, { CCI_REG8(0x3ab8), 0x40 },
+ { CCI_REG8(0x3ab9), 0x40 }, { CCI_REG8(0x3aba), 0x40 },
+ { CCI_REG8(0x3abb), 0x40 }, { CCI_REG8(0x3abc), 0x40 },
+ { CCI_REG8(0x3abd), 0x40 }, { CCI_REG8(0x3abe), 0x40 },
+ { CCI_REG8(0x3abf), 0x40 }, { CCI_REG8(0x3ac0), 0x40 },
+ { CCI_REG8(0x3ac1), 0x40 }, { CCI_REG8(0x3ac2), 0x40 },
+ { CCI_REG8(0x3ac3), 0x40 }, { CCI_REG8(0x3ac4), 0x40 },
+ { CCI_REG8(0x3ac5), 0x40 }, { CCI_REG8(0x3ac6), 0x40 },
+ { CCI_REG8(0x3ac7), 0x40 }, { CCI_REG8(0x3ac8), 0x40 },
+ { CCI_REG8(0x3ac9), 0x40 }, { CCI_REG8(0x3aca), 0x40 },
+ { CCI_REG8(0x3acb), 0x40 }, { CCI_REG8(0x3acc), 0x40 },
+ { CCI_REG8(0x3acd), 0x40 }, { CCI_REG8(0x3ace), 0x40 },
+ { CCI_REG8(0x3acf), 0x40 }, { CCI_REG8(0x3ad0), 0x40 },
+ { CCI_REG8(0x3ad1), 0x40 }, { CCI_REG8(0x3ad2), 0x40 },
+ { CCI_REG8(0x3ad3), 0x40 }, { CCI_REG8(0x3ad4), 0x40 },
+ { CCI_REG8(0x3ad5), 0x40 }, { CCI_REG8(0x3ad6), 0x40 },
+ { CCI_REG8(0x3ad7), 0x40 }, { CCI_REG8(0x3ad8), 0x40 },
+ { CCI_REG8(0x3ad9), 0x40 }, { CCI_REG8(0x3ada), 0x40 },
+ { CCI_REG8(0x3adb), 0x40 }, { CCI_REG8(0x3adc), 0x40 },
+ { CCI_REG8(0x3add), 0x40 }, { CCI_REG8(0x3ade), 0x40 },
+ { CCI_REG8(0x3adf), 0x40 }, { CCI_REG8(0x3ae0), 0x40 },
+ { CCI_REG8(0x3ae1), 0x40 }, { CCI_REG8(0x3ae2), 0x40 },
+ { CCI_REG8(0x3ae3), 0x40 }, { CCI_REG8(0x3ae4), 0x40 },
+ { CCI_REG8(0x3ae5), 0x40 }, { CCI_REG8(0x3ae6), 0x40 },
+ { CCI_REG8(0x3ae7), 0x40 }, { CCI_REG8(0x3ae8), 0x40 },
+ { CCI_REG8(0x3ae9), 0x40 }, { CCI_REG8(0x3aea), 0x40 },
+ { CCI_REG8(0x3aeb), 0x40 }, { CCI_REG8(0x3aec), 0x40 },
+ { CCI_REG8(0x3aed), 0x40 }, { CCI_REG8(0x3aee), 0x40 },
+ { CCI_REG8(0x3aef), 0xcd }, { CCI_REG8(0x3af0), 0xcd },
+ { CCI_REG8(0x3af1), 0xcd }, { CCI_REG8(0x3af2), 0xcd },
+ { CCI_REG8(0x3af3), 0xcd }, { CCI_REG8(0x3af4), 0xcd },
+ { CCI_REG8(0x3af5), 0xcd }, { CCI_REG8(0x3af6), 0xcd },
+ { CCI_REG8(0x3af7), 0xcd }, { CCI_REG8(0x3af8), 0xcd },
+ { CCI_REG8(0x3af9), 0xcd }, { CCI_REG8(0x3afa), 0xcd },
+ { CCI_REG8(0x3afb), 0xcd }, { CCI_REG8(0x3afc), 0xcd },
+ { CCI_REG8(0x3afd), 0xcd }, { CCI_REG8(0x3afe), 0xcd },
+ { CCI_REG8(0x3aff), 0xcd }, { CCI_REG8(0x3b00), 0xcd },
+ { CCI_REG8(0x3b01), 0xcd }, { CCI_REG8(0x3b02), 0xcd },
+ { CCI_REG8(0x3b03), 0xcd }, { CCI_REG8(0x3b04), 0xcd },
+ { CCI_REG8(0x3b05), 0xcd }, { CCI_REG8(0x3b06), 0xcd },
+ { CCI_REG8(0x3b07), 0xcd }, { CCI_REG8(0x3b08), 0xcd },
+ { CCI_REG8(0x3b09), 0xcd }, { CCI_REG8(0x3b0a), 0xcd },
+ { CCI_REG8(0x3b0b), 0xcd }, { CCI_REG8(0x3b0c), 0xcd },
+ { CCI_REG8(0x3b0d), 0xcd }, { CCI_REG8(0x3b0e), 0xcd },
+ { CCI_REG8(0x3b0f), 0xcd }, { CCI_REG8(0x3b10), 0xcd },
+ { CCI_REG8(0x3b11), 0xcd }, { CCI_REG8(0x3b12), 0xcd },
+ { CCI_REG8(0x3b13), 0xcd }, { CCI_REG8(0x3b14), 0xcd },
+ { CCI_REG8(0x3b15), 0xcd }, { CCI_REG8(0x3b16), 0xcd },
+ { CCI_REG8(0x3b17), 0xcd }, { CCI_REG8(0x3b18), 0xcd },
+ { CCI_REG8(0x3b19), 0xcd }, { CCI_REG8(0x3b1a), 0xcd },
+ { CCI_REG8(0x3b1b), 0xcd }, { CCI_REG8(0x3b1c), 0xcd },
+ { CCI_REG8(0x3b1d), 0xcd }, { CCI_REG8(0x3b1e), 0xcd },
+ { CCI_REG8(0x3b1f), 0xcd }, { CCI_REG8(0x3b20), 0xcd },
+ { CCI_REG8(0x3b21), 0xcd }, { CCI_REG8(0x3b22), 0xcd },
+ { CCI_REG8(0x3b23), 0xcd }, { CCI_REG8(0x3b24), 0xcd },
+ { CCI_REG8(0x3b25), 0xcd }, { CCI_REG8(0x3b26), 0xcd },
+ { CCI_REG8(0x3b27), 0xcd }, { CCI_REG8(0x3b28), 0xcd },
+ { CCI_REG8(0x3b29), 0xcd }, { CCI_REG8(0x3b2a), 0xcd },
+ { CCI_REG8(0x3b2b), 0xcd }, { CCI_REG8(0x3b2c), 0xcd },
+ { CCI_REG8(0x3b2d), 0xcd }, { CCI_REG8(0x3b2e), 0xcd },
+ { CCI_REG8(0x3b2f), 0xcd }, { CCI_REG8(0x3b30), 0xcd },
+ { CCI_REG8(0x3b31), 0xcd }, { CCI_REG8(0x3b32), 0xcd },
+ { CCI_REG8(0x3b33), 0xcd }, { CCI_REG8(0x3b34), 0xcd },
+ { CCI_REG8(0x3b35), 0xcd }, { CCI_REG8(0x3b36), 0xcd },
+ { CCI_REG8(0x3b37), 0xcd }, { CCI_REG8(0x3b38), 0xcd },
+ { CCI_REG8(0x3b39), 0xcd }, { CCI_REG8(0x3b3a), 0xcd },
+ { CCI_REG8(0x3b3b), 0xcd }, { CCI_REG8(0x3b3c), 0xcd },
+ { CCI_REG8(0x3b3d), 0xcd }, { CCI_REG8(0x3b3e), 0xcd },
+ { CCI_REG8(0x3b3f), 0xcd }, { CCI_REG8(0x3b40), 0xcd },
+ { CCI_REG8(0x3b41), 0xcd }, { CCI_REG8(0x3b42), 0xcd },
+ { CCI_REG8(0x3b43), 0xcd }, { CCI_REG8(0x3b44), 0xcd },
+ { CCI_REG8(0x3b45), 0xcd }, { CCI_REG8(0x3b46), 0xcd },
+ { CCI_REG8(0x3b47), 0xcd }, { CCI_REG8(0x3b48), 0xcd },
+ { CCI_REG8(0x3b49), 0xcd }, { CCI_REG8(0x3b4a), 0xcd },
+ { CCI_REG8(0x3b4b), 0xcd }, { CCI_REG8(0x3b4c), 0xcd },
+ { CCI_REG8(0x3b4d), 0xcd }, { CCI_REG8(0x3b4e), 0xcd },
+ { CCI_REG8(0x3b4f), 0xcd }, { CCI_REG8(0x3b50), 0xcd },
+ { CCI_REG8(0x3b51), 0xcd }, { CCI_REG8(0x3b52), 0xcd },
+ { CCI_REG8(0x3b53), 0xcd }, { CCI_REG8(0x3b54), 0xcd },
+ { CCI_REG8(0x3b55), 0xcd }, { CCI_REG8(0x3b56), 0xcd },
+ { CCI_REG8(0x3b57), 0xcd }, { CCI_REG8(0x3b58), 0xcd },
+ { CCI_REG8(0x3b59), 0xcd }, { CCI_REG8(0x3b5a), 0xcd },
+ { CCI_REG8(0x3b5b), 0xcd }, { CCI_REG8(0x3b5c), 0xcd },
+ { CCI_REG8(0x3b5d), 0xcd }, { CCI_REG8(0x3b5e), 0xcd },
+ { CCI_REG8(0x3b5f), 0xcd }, { CCI_REG8(0x3b60), 0xcd },
+ { CCI_REG8(0x3b61), 0xcd }, { CCI_REG8(0x3b62), 0xcd },
+ { CCI_REG8(0x3b63), 0xcd }, { CCI_REG8(0x3b64), 0xcd },
+ { CCI_REG8(0x3b65), 0xcd }, { CCI_REG8(0x3b66), 0xcd },
+ { CCI_REG8(0x3b67), 0xcd }, { CCI_REG8(0x3b68), 0xcd },
+ { CCI_REG8(0x3b69), 0xcd }, { CCI_REG8(0x3b6a), 0xcd },
+ { CCI_REG8(0x3b6b), 0xcd }, { CCI_REG8(0x3b6c), 0xcd },
+ { CCI_REG8(0x3b6d), 0xcd }, { CCI_REG8(0x3b6e), 0xcd },
+ { CCI_REG8(0x3b6f), 0xcd }, { CCI_REG8(0x3b70), 0xcd },
+ { CCI_REG8(0x3b71), 0xcd }, { CCI_REG8(0x3b72), 0xcd },
+ { CCI_REG8(0x3b73), 0xcd }, { CCI_REG8(0x3b74), 0xcd },
+ { CCI_REG8(0x3b75), 0xcd }, { CCI_REG8(0x3b76), 0xcd },
+ { CCI_REG8(0x3b77), 0xcd }, { CCI_REG8(0x3b78), 0xcd },
+ { CCI_REG8(0x3b79), 0xcd }, { CCI_REG8(0x3b7a), 0xcd },
+ { CCI_REG8(0x3b7b), 0xcd }, { CCI_REG8(0x3b7c), 0xcd },
+ { CCI_REG8(0x3b7d), 0xcd }, { CCI_REG8(0x3b7e), 0xcd },
+ { CCI_REG8(0x3b7f), 0xcd }, { CCI_REG8(0x3b80), 0xcd },
+ { CCI_REG8(0x3b81), 0xcd }, { CCI_REG8(0x3b82), 0xcd },
+ { CCI_REG8(0x3b83), 0xcd }, { CCI_REG8(0x3b84), 0xcd },
+ { CCI_REG8(0x3b85), 0xcd }, { CCI_REG8(0x3b86), 0xcd },
+ { CCI_REG8(0x3b87), 0xcd }, { CCI_REG8(0x3b88), 0xcd },
+ { CCI_REG8(0x3b89), 0xcd }, { CCI_REG8(0x3b8a), 0xcd },
+ { CCI_REG8(0x3b8b), 0xcd }, { CCI_REG8(0x3b8c), 0xcd },
+ { CCI_REG8(0x3b8d), 0xcd }, { CCI_REG8(0x3b8e), 0xcd },
+ { CCI_REG8(0x3b8f), 0xcd }, { CCI_REG8(0x3b90), 0xcd },
+ { CCI_REG8(0x3b91), 0xcd }, { CCI_REG8(0x3b92), 0xcd },
+ { CCI_REG8(0x3b93), 0xcd }, { CCI_REG8(0x3b94), 0xcd },
+ { CCI_REG8(0x3b95), 0xcd }, { CCI_REG8(0x3b96), 0xcd },
+ { CCI_REG8(0x3b97), 0xcd }, { CCI_REG8(0x3b98), 0xcd },
+ { CCI_REG8(0x3b99), 0xcd }, { CCI_REG8(0x3b9a), 0xcd },
+ { CCI_REG8(0x3b9b), 0xcd }, { CCI_REG8(0x3b9c), 0xcd },
+ { CCI_REG8(0x3b9d), 0xcd }, { CCI_REG8(0x3b9e), 0xcd },
+ { CCI_REG8(0x3b9f), 0xcd }, { CCI_REG8(0x3ba0), 0xcd },
+ { CCI_REG8(0x3ba1), 0xcd }, { CCI_REG8(0x3ba2), 0xcd },
+ { CCI_REG8(0x3ba3), 0xcd }, { CCI_REG8(0x3ba4), 0xcd },
+ { CCI_REG8(0x3ba5), 0xcd }, { CCI_REG8(0x3ba6), 0xcd },
+ { CCI_REG8(0x3ba7), 0xcd }, { CCI_REG8(0x3ba8), 0xcd },
+ { CCI_REG8(0x3ba9), 0xcd }, { CCI_REG8(0x3baa), 0xcd },
+ { CCI_REG8(0x3bab), 0xcd }, { CCI_REG8(0x3bac), 0xcd },
+ { CCI_REG8(0x3bad), 0xcd }, { CCI_REG8(0x3bae), 0xcd },
+ { CCI_REG8(0x3baf), 0xcd }, { CCI_REG8(0x3bb0), 0xcd },
+ { CCI_REG8(0x3bb1), 0xcd }, { CCI_REG8(0x3bb2), 0xcd },
+ { CCI_REG8(0x3bb3), 0xcd }, { CCI_REG8(0x3bb4), 0xcd },
+ { CCI_REG8(0x3bb5), 0xcd }, { CCI_REG8(0x3bb6), 0xcd },
+ { CCI_REG8(0x3bb7), 0xcd }, { CCI_REG8(0x3bb8), 0xcd },
+ { CCI_REG8(0x3bb9), 0xcd }, { CCI_REG8(0x3bba), 0xcd },
+ { CCI_REG8(0x3bbb), 0xcd }, { CCI_REG8(0x3bbc), 0xcd },
+ { CCI_REG8(0x3bbd), 0xcd }, { CCI_REG8(0x3bbe), 0xcd },
+ { CCI_REG8(0x3bbf), 0xcd }, { CCI_REG8(0x3bc0), 0xcd },
+ { CCI_REG8(0x3bc1), 0xcd }, { CCI_REG8(0x3bc2), 0xcd },
+ { CCI_REG8(0x3bc3), 0xcd }, { CCI_REG8(0x3bc4), 0xcd },
+ { CCI_REG8(0x3bc5), 0xcd }, { CCI_REG8(0x3bc6), 0xcd },
+ { CCI_REG8(0x3bc7), 0xcd }, { CCI_REG8(0x3bc8), 0xcd },
+ { CCI_REG8(0x3bc9), 0xcd }, { CCI_REG8(0x3bca), 0xcd },
+ { CCI_REG8(0x3bcb), 0xcd }, { CCI_REG8(0x3bcc), 0xcd },
+ { CCI_REG8(0x3bcd), 0xcd }, { CCI_REG8(0x3bce), 0xcd },
+ { CCI_REG8(0x3bcf), 0xcd }, { CCI_REG8(0x3bd0), 0xcd },
+ { CCI_REG8(0x3bd1), 0xcd }, { CCI_REG8(0x3bd2), 0xcd },
+ { CCI_REG8(0x3bd3), 0xcd }, { CCI_REG8(0x3bd4), 0xcd },
+ { CCI_REG8(0x3bd5), 0xcd }, { CCI_REG8(0x3bd6), 0xcd },
+ { CCI_REG8(0x3bd7), 0xcd }, { CCI_REG8(0x3bd8), 0xcd },
+ { CCI_REG8(0x3bd9), 0xcd }, { CCI_REG8(0x3bda), 0xcd },
+ { CCI_REG8(0x3bdb), 0xcd }, { CCI_REG8(0x3bdc), 0xcd },
+ { CCI_REG8(0x3bdd), 0xcd }, { CCI_REG8(0x3bde), 0xcd },
+ { CCI_REG8(0x3bdf), 0xcd }, { CCI_REG8(0x3be0), 0xcd },
+ { CCI_REG8(0x3be1), 0xcd }, { CCI_REG8(0x3be2), 0xcd },
+ { CCI_REG8(0x3be3), 0xcd }, { CCI_REG8(0x3be4), 0xcd },
+ { CCI_REG8(0x3be5), 0xcd }, { CCI_REG8(0x3be6), 0xcd },
+ { CCI_REG8(0x3be7), 0xcd }, { CCI_REG8(0x3be8), 0xcd },
+ { CCI_REG8(0x3be9), 0xcd }, { CCI_REG8(0x3bea), 0xcd },
+ { CCI_REG8(0x3beb), 0xcd }, { CCI_REG8(0x3bec), 0xcd },
+ { CCI_REG8(0x3bed), 0xcd }, { CCI_REG8(0x3bee), 0xcd },
+ { CCI_REG8(0x3bef), 0xcd }, { CCI_REG8(0x3bf0), 0xcd },
+ { CCI_REG8(0x3bf1), 0xcd }, { CCI_REG8(0x3bf2), 0xcd },
+ { CCI_REG8(0x3bf3), 0xcd }, { CCI_REG8(0x3bf4), 0xcd },
+ { CCI_REG8(0x3bf5), 0xcd }, { CCI_REG8(0x3bf6), 0xcd },
+ { CCI_REG8(0x3bf7), 0xcd }, { CCI_REG8(0x3bf8), 0xcd },
+ { CCI_REG8(0x3bf9), 0xcd }, { CCI_REG8(0x3bfa), 0xcd },
+ { CCI_REG8(0x3bfb), 0xcd }, { CCI_REG8(0x3bfc), 0xcd },
+ { CCI_REG8(0x3bfd), 0xcd }, { CCI_REG8(0x3bfe), 0xcd },
+ { CCI_REG8(0x3bff), 0xcd }, { CCI_REG8(0x3c00), 0xcd },
+ { CCI_REG8(0x3c01), 0xcd }, { CCI_REG8(0x3c02), 0xcd },
+ { CCI_REG8(0x3c03), 0xcd }, { CCI_REG8(0x3c04), 0xcd },
+ { CCI_REG8(0x3c05), 0xcd }, { CCI_REG8(0x3c06), 0xcd },
+ { CCI_REG8(0x3c07), 0xcd }, { CCI_REG8(0x3c08), 0xcd },
+ { CCI_REG8(0x3c09), 0xcd }, { CCI_REG8(0x3c0a), 0xcd },
+ { CCI_REG8(0x3c0b), 0xcd }, { CCI_REG8(0x3c0c), 0xcd },
+ { CCI_REG8(0x3c0d), 0xcd }, { CCI_REG8(0x3c0e), 0xcd },
+ { CCI_REG8(0x3c0f), 0xcd }, { CCI_REG8(0x3c10), 0xcd },
+ { CCI_REG8(0x3c11), 0xcd }, { CCI_REG8(0x3c12), 0xcd },
+ { CCI_REG8(0x3c13), 0xcd }, { CCI_REG8(0x3c14), 0xcd },
+ { CCI_REG8(0x3c15), 0xcd }, { CCI_REG8(0x3c16), 0xcd },
+ { CCI_REG8(0x3c17), 0xcd }, { CCI_REG8(0x3c18), 0xcd },
+ { CCI_REG8(0x3c19), 0xcd }, { CCI_REG8(0x3c1a), 0xcd },
+ { CCI_REG8(0x3c1b), 0xcd }, { CCI_REG8(0x3c1c), 0xcd },
+ { CCI_REG8(0x3c1d), 0xcd }, { CCI_REG8(0x3c1e), 0xcd },
+ { CCI_REG8(0x3c1f), 0xcd }, { CCI_REG8(0x3c20), 0xcd },
+ { CCI_REG8(0x3c21), 0xcd }, { CCI_REG8(0x3c22), 0xcd },
+ { CCI_REG8(0x3c23), 0xcd }, { CCI_REG8(0x3c24), 0xcd },
+ { CCI_REG8(0x3c25), 0xcd }, { CCI_REG8(0x3c26), 0xcd },
+ { CCI_REG8(0x3c27), 0xcd }, { CCI_REG8(0x3c28), 0xcd },
+ { CCI_REG8(0x3c29), 0xcd }, { CCI_REG8(0x3c2a), 0xcd },
+ { CCI_REG8(0x3c2b), 0xcd }, { CCI_REG8(0x3c2c), 0xcd },
+ { CCI_REG8(0x3c2d), 0xcd }, { CCI_REG8(0x3c2e), 0xcd },
+ { CCI_REG8(0x3c2f), 0xcd }, { CCI_REG8(0x3c30), 0xcd },
+ { CCI_REG8(0x3c31), 0xcd }, { CCI_REG8(0x3c32), 0xcd },
+ { CCI_REG8(0x3c33), 0xcd }, { CCI_REG8(0x3c34), 0xcd },
+ { CCI_REG8(0x3c35), 0xcd }, { CCI_REG8(0x3c36), 0xcd },
+ { CCI_REG8(0x3c37), 0xcd }, { CCI_REG8(0x3c38), 0xcd },
+ { CCI_REG8(0x3c39), 0xcd }, { CCI_REG8(0x3c3a), 0xcd },
+ { CCI_REG8(0x3c3b), 0xcd }, { CCI_REG8(0x3c3c), 0xcd },
+ { CCI_REG8(0x3c3d), 0xcd }, { CCI_REG8(0x3c3e), 0xcd },
+ { CCI_REG8(0x3c3f), 0xcd }, { CCI_REG8(0x3c40), 0xcd },
+ { CCI_REG8(0x3c41), 0xcd }, { CCI_REG8(0x3c42), 0xcd },
+ { CCI_REG8(0x3c43), 0xcd }, { CCI_REG8(0x3c44), 0xcd },
+ { CCI_REG8(0x3c45), 0xcd }, { CCI_REG8(0x3c46), 0xcd },
+ { CCI_REG8(0x3c47), 0xcd }, { CCI_REG8(0x3c48), 0xcd },
+ { CCI_REG8(0x3c49), 0xcd }, { CCI_REG8(0x3c4a), 0xcd },
+ { CCI_REG8(0x3c4b), 0xcd }, { CCI_REG8(0x3c4c), 0xcd },
+ { CCI_REG8(0x3c4d), 0xcd }, { CCI_REG8(0x3c4e), 0xcd },
+ { CCI_REG8(0x3c4f), 0xcd }, { CCI_REG8(0x3c50), 0xcd },
+ { CCI_REG8(0x3c51), 0xcd }, { CCI_REG8(0x3c52), 0xcd },
+ { CCI_REG8(0x3c53), 0xcd }, { CCI_REG8(0x3c54), 0xcd },
+ { CCI_REG8(0x3c55), 0xcd }, { CCI_REG8(0x3c56), 0xcd },
+ { CCI_REG8(0x3c57), 0xcd }, { CCI_REG8(0x3c58), 0xcd },
+ { CCI_REG8(0x3c59), 0xcd }, { CCI_REG8(0x3c5a), 0xcd },
+ { CCI_REG8(0x3c5b), 0xcd }, { CCI_REG8(0x3c5c), 0xcd },
+ { CCI_REG8(0x3c5d), 0xcd }, { CCI_REG8(0x3c5e), 0xcd },
+ { CCI_REG8(0x3c5f), 0xcd }, { CCI_REG8(0x3c60), 0xcd },
+ { CCI_REG8(0x3c61), 0xcd }, { CCI_REG8(0x3c62), 0xcd },
+ { CCI_REG8(0x3c63), 0xcd }, { CCI_REG8(0x3c64), 0xcd },
+ { CCI_REG8(0x3c65), 0xcd }, { CCI_REG8(0x3c66), 0xcd },
+ { CCI_REG8(0x3c67), 0xcd }, { CCI_REG8(0x3c68), 0xcd },
+ { CCI_REG8(0x3c69), 0xcd }, { CCI_REG8(0x3c6a), 0xcd },
+ { CCI_REG8(0x3c6b), 0xcd }, { CCI_REG8(0x3c6c), 0xcd },
+ { CCI_REG8(0x3c6d), 0xcd }, { CCI_REG8(0x3c6e), 0xcd },
+ { CCI_REG8(0x3c6f), 0xcd }, { CCI_REG8(0x3c70), 0xcd },
+ { CCI_REG8(0x3c71), 0xcd }, { CCI_REG8(0x3c72), 0xcd },
+ { CCI_REG8(0x3c73), 0xcd }, { CCI_REG8(0x3c74), 0xcd },
+ { CCI_REG8(0x3c75), 0xcd }, { CCI_REG8(0x3c76), 0xcd },
+ { CCI_REG8(0x3c77), 0xcd }, { CCI_REG8(0x3c78), 0xcd },
+ { CCI_REG8(0x3c79), 0xcd }, { CCI_REG8(0x3c7a), 0xcd },
+ { CCI_REG8(0x3c7b), 0xcd }, { CCI_REG8(0x3c7c), 0xcd },
+ { CCI_REG8(0x3c7d), 0xcd }, { CCI_REG8(0x3c7e), 0xcd },
+ { CCI_REG8(0x3c7f), 0xcd }, { CCI_REG8(0x3c80), 0xcd },
+ { CCI_REG8(0x3c81), 0xcd }, { CCI_REG8(0x3c82), 0xcd },
+ { CCI_REG8(0x3c83), 0xcd }, { CCI_REG8(0x3c84), 0xcd },
+ { CCI_REG8(0x3c85), 0xcd }, { CCI_REG8(0x3c86), 0xcd },
+ { CCI_REG8(0x3c87), 0xcd }, { CCI_REG8(0x3c88), 0xcd },
+ { CCI_REG8(0x3c89), 0xcd }, { CCI_REG8(0x3c8a), 0xcd },
+ { CCI_REG8(0x3c8b), 0xcd }, { CCI_REG8(0x3c8c), 0xcd },
+ { CCI_REG8(0x3c8d), 0xcd }, { CCI_REG8(0x3c8e), 0xcd },
+ { CCI_REG8(0x3c8f), 0xcd }, { CCI_REG8(0x3c90), 0xcd },
+ { CCI_REG8(0x3c91), 0xcd }, { CCI_REG8(0x3c92), 0xcd },
+ { CCI_REG8(0x3c93), 0xcd }, { CCI_REG8(0x3c94), 0xcd },
+ { CCI_REG8(0x3c95), 0xcd }, { CCI_REG8(0x3c96), 0xcd },
+ { CCI_REG8(0x3c97), 0xcd }, { CCI_REG8(0x3c98), 0xcd },
+ { CCI_REG8(0x3c99), 0xcd }, { CCI_REG8(0x3c9a), 0xcd },
+ { CCI_REG8(0x3c9b), 0xcd }, { CCI_REG8(0x3c9c), 0xcd },
+ { CCI_REG8(0x3c9d), 0xcd }, { CCI_REG8(0x3c9e), 0xcd },
+ { CCI_REG8(0x3c9f), 0xcd }, { CCI_REG8(0x3ca0), 0xcd },
+ { CCI_REG8(0x3ca1), 0xcd }, { CCI_REG8(0x3ca2), 0xcd },
+ { CCI_REG8(0x3ca3), 0xcd }, { CCI_REG8(0x3ca4), 0xcd },
+ { CCI_REG8(0x3ca5), 0xcd }, { CCI_REG8(0x3ca6), 0xcd },
+ { CCI_REG8(0x3ca7), 0xcd }, { CCI_REG8(0x3ca8), 0xcd },
+ { CCI_REG8(0x3ca9), 0xcd }, { CCI_REG8(0x3caa), 0xcd },
+ { CCI_REG8(0x3cab), 0xcd }, { CCI_REG8(0x3cac), 0xcd },
+ { CCI_REG8(0x3cad), 0xcd }, { CCI_REG8(0x3cae), 0xcd },
+ { CCI_REG8(0x3caf), 0xcd }, { CCI_REG8(0x3cb0), 0xcd },
+ { CCI_REG8(0x3cb1), 0x40 }, { CCI_REG8(0x3cb2), 0x40 },
+ { CCI_REG8(0x3cb3), 0x40 }, { CCI_REG8(0x3cb4), 0x40 },
+ { CCI_REG8(0x3cb5), 0x40 }, { CCI_REG8(0x3cb6), 0x40 },
+ { CCI_REG8(0x3cb7), 0x40 }, { CCI_REG8(0x3cb8), 0x40 },
+ { CCI_REG8(0x3cb9), 0x40 }, { CCI_REG8(0x3cba), 0x40 },
+ { CCI_REG8(0x3cbb), 0x40 }, { CCI_REG8(0x3cbc), 0x40 },
+ { CCI_REG8(0x3cbd), 0x40 }, { CCI_REG8(0x3cbe), 0x40 },
+ { CCI_REG8(0x3cbf), 0x40 }, { CCI_REG8(0x3cc0), 0x40 },
+ { CCI_REG8(0x3cc1), 0x40 }, { CCI_REG8(0x3cc2), 0x40 },
+ { CCI_REG8(0x3cc3), 0x40 }, { CCI_REG8(0x3cc4), 0x40 },
+ { CCI_REG8(0x3cc5), 0x40 }, { CCI_REG8(0x3cc6), 0x40 },
+ { CCI_REG8(0x3cc7), 0x40 }, { CCI_REG8(0x3cc8), 0x40 },
+ { CCI_REG8(0x3cc9), 0x40 }, { CCI_REG8(0x3cca), 0x40 },
+ { CCI_REG8(0x3ccb), 0x40 }, { CCI_REG8(0x3ccc), 0x40 },
+ { CCI_REG8(0x3ccd), 0x40 }, { CCI_REG8(0x3cce), 0x40 },
+ { CCI_REG8(0x3ccf), 0x40 }, { CCI_REG8(0x3cd0), 0x40 },
+ { CCI_REG8(0x3cd1), 0x40 }, { CCI_REG8(0x3cd2), 0x40 },
+ { CCI_REG8(0x3cd3), 0x40 }, { CCI_REG8(0x3cd4), 0x40 },
+ { CCI_REG8(0x3cd5), 0x40 }, { CCI_REG8(0x3cd6), 0x40 },
+ { CCI_REG8(0x3cd7), 0x40 }, { CCI_REG8(0x3cd8), 0x40 },
+ { CCI_REG8(0x3cd9), 0x40 }, { CCI_REG8(0x3cda), 0x40 },
+ { CCI_REG8(0x3cdb), 0x40 }, { CCI_REG8(0x3cdc), 0x40 },
+ { CCI_REG8(0x3cdd), 0x40 }, { CCI_REG8(0x3cde), 0x40 },
+ { CCI_REG8(0x3cdf), 0x40 }, { CCI_REG8(0x3ce0), 0x40 },
+ { CCI_REG8(0x3ce1), 0x40 }, { CCI_REG8(0x3ce2), 0x40 },
+ { CCI_REG8(0x3ce3), 0x40 }, { CCI_REG8(0x3ce4), 0x40 },
+ { CCI_REG8(0x3ce5), 0x40 }, { CCI_REG8(0x3ce6), 0x40 },
+ { CCI_REG8(0x3ce7), 0x40 }, { CCI_REG8(0x3ce8), 0x40 },
+ { CCI_REG8(0x3ce9), 0x40 }, { CCI_REG8(0x3cea), 0x40 },
+ { CCI_REG8(0x3ceb), 0x40 }, { CCI_REG8(0x3cec), 0x40 },
+ { CCI_REG8(0x3ced), 0x40 }, { CCI_REG8(0x3cee), 0x40 },
+ { CCI_REG8(0x3cef), 0x40 }, { CCI_REG8(0x3cf0), 0x40 },
+ { CCI_REG8(0x3cf1), 0x40 }, { CCI_REG8(0x3cf2), 0x40 },
+ { CCI_REG8(0x3cf3), 0x40 }, { CCI_REG8(0x3cf4), 0x40 },
+ { CCI_REG8(0x3cf5), 0x40 }, { CCI_REG8(0x3cf6), 0x40 },
+ { CCI_REG8(0x3cf7), 0x40 }, { CCI_REG8(0x3cf8), 0x40 },
+ { CCI_REG8(0x3cf9), 0x40 }, { CCI_REG8(0x3cfa), 0x40 },
+ { CCI_REG8(0x3cfb), 0x40 }, { CCI_REG8(0x3cfc), 0x40 },
+ { CCI_REG8(0x3cfd), 0x40 }, { CCI_REG8(0x3cfe), 0x40 },
+ { CCI_REG8(0x3cff), 0x40 }, { CCI_REG8(0x3d00), 0x40 },
+ { CCI_REG8(0x3d01), 0x40 }, { CCI_REG8(0x3d02), 0x40 },
+ { CCI_REG8(0x3d03), 0x40 }, { CCI_REG8(0x3d04), 0x40 },
+ { CCI_REG8(0x3d05), 0x40 }, { CCI_REG8(0x3d06), 0x40 },
+ { CCI_REG8(0x3d07), 0x40 }, { CCI_REG8(0x3d08), 0x40 },
+ { CCI_REG8(0x3d09), 0x40 }, { CCI_REG8(0x3d0a), 0x40 },
+ { CCI_REG8(0x3d0b), 0xcd }, { CCI_REG8(0x3d0c), 0xcd },
+ { CCI_REG8(0x3d0d), 0xcd }, { CCI_REG8(0x3d0e), 0xcd },
+ { CCI_REG8(0x3d0f), 0xcd }, { CCI_REG8(0x3d10), 0xcd },
+ { CCI_REG8(0x3d11), 0xcd }, { CCI_REG8(0x3d12), 0xcd },
+ { CCI_REG8(0x3d13), 0xcd }, { CCI_REG8(0x3d14), 0xcd },
+ { CCI_REG8(0x3d15), 0xcd }, { CCI_REG8(0x3d16), 0xcd },
+ { CCI_REG8(0x3d17), 0xcd }, { CCI_REG8(0x3d18), 0xcd },
+ { CCI_REG8(0x3d19), 0xcd }, { CCI_REG8(0x3d1a), 0xcd },
+ { CCI_REG8(0x3d1b), 0xcd }, { CCI_REG8(0x3d1c), 0xcd },
+ { CCI_REG8(0x3d1d), 0xcd }, { CCI_REG8(0x3d1e), 0xcd },
+ { CCI_REG8(0x3d1f), 0xcd }, { CCI_REG8(0x3d20), 0xcd },
+ { CCI_REG8(0x3d21), 0xcd }, { CCI_REG8(0x3d22), 0xcd },
+ { CCI_REG8(0x3d23), 0xcd }, { CCI_REG8(0x3d24), 0xcd },
+ { CCI_REG8(0x3d25), 0xcd }, { CCI_REG8(0x3d26), 0xcd },
+ { CCI_REG8(0x3d27), 0xcd }, { CCI_REG8(0x3d28), 0xcd },
+ { CCI_REG8(0x3d29), 0xcd }, { CCI_REG8(0x3d2a), 0xcd },
+ { CCI_REG8(0x3d2b), 0xcd }, { CCI_REG8(0x3d2c), 0xcd },
+ { CCI_REG8(0x3d2d), 0xcd }, { CCI_REG8(0x3d2e), 0xcd },
+ { CCI_REG8(0x3d2f), 0xcd }, { CCI_REG8(0x3d30), 0xcd },
+ { CCI_REG8(0x3d31), 0xcd }, { CCI_REG8(0x3d32), 0xcd },
+ { CCI_REG8(0x3d33), 0xcd }, { CCI_REG8(0x3d34), 0xcd },
+ { CCI_REG8(0x3d35), 0xcd }, { CCI_REG8(0x3d36), 0xcd },
+ { CCI_REG8(0x3d37), 0xcd }, { CCI_REG8(0x3d38), 0xcd },
+ { CCI_REG8(0x3d39), 0xcd }, { CCI_REG8(0x3d3a), 0xcd },
+ { CCI_REG8(0x3d3b), 0xcd }, { CCI_REG8(0x3d3c), 0xcd },
+ { CCI_REG8(0x3d3d), 0xcd }, { CCI_REG8(0x3d3e), 0xcd },
+ { CCI_REG8(0x3d3f), 0xcd }, { CCI_REG8(0x3d40), 0xcd },
+ { CCI_REG8(0x3d41), 0xcd }, { CCI_REG8(0x3d42), 0xcd },
+ { CCI_REG8(0x3d43), 0xcd }, { CCI_REG8(0x3d44), 0xcd },
+ { CCI_REG8(0x3d45), 0xcd }, { CCI_REG8(0x3d46), 0xcd },
+ { CCI_REG8(0x3d47), 0xcd }, { CCI_REG8(0x3d48), 0xcd },
+ { CCI_REG8(0x3d49), 0xcd }, { CCI_REG8(0x3d4a), 0xcd },
+ { CCI_REG8(0x3d4b), 0xcd }, { CCI_REG8(0x3d4c), 0xcd },
+ { CCI_REG8(0x3d4d), 0xcd }, { CCI_REG8(0x3d4e), 0xcd },
+ { CCI_REG8(0x3d4f), 0xcd }, { CCI_REG8(0x3d50), 0xcd },
+ { CCI_REG8(0x3d51), 0xcd }, { CCI_REG8(0x3d52), 0xcd },
+ { CCI_REG8(0x3d53), 0xcd }, { CCI_REG8(0x3d54), 0xcd },
+ { CCI_REG8(0x3d55), 0xcd }, { CCI_REG8(0x3d56), 0xcd },
+ { CCI_REG8(0x3d57), 0xcd }, { CCI_REG8(0x3d58), 0xcd },
+ { CCI_REG8(0x3d59), 0xcd }, { CCI_REG8(0x3d5a), 0xcd },
+ { CCI_REG8(0x3d5b), 0xcd }, { CCI_REG8(0x3d5c), 0xcd },
+ { CCI_REG8(0x3d5d), 0xcd }, { CCI_REG8(0x3d5e), 0xcd },
+ { CCI_REG8(0x3d5f), 0xcd }, { CCI_REG8(0x3d60), 0xcd },
+ { CCI_REG8(0x3d61), 0xcd }, { CCI_REG8(0x3d62), 0xcd },
+ { CCI_REG8(0x3d63), 0xcd }, { CCI_REG8(0x3d64), 0xcd },
+ { CCI_REG8(0x3d65), 0x40 }, { CCI_REG8(0x3d66), 0x40 },
+ { CCI_REG8(0x3d67), 0x40 }, { CCI_REG8(0x3d68), 0x40 },
+ { CCI_REG8(0x3d69), 0x40 }, { CCI_REG8(0x3d6a), 0x40 },
+ { CCI_REG8(0x3d6b), 0x40 }, { CCI_REG8(0x3d6c), 0x40 },
+ { CCI_REG8(0x3d6d), 0x40 }, { CCI_REG8(0x3d6e), 0x40 },
+ { CCI_REG8(0x3d6f), 0x40 }, { CCI_REG8(0x3d70), 0x40 },
+ { CCI_REG8(0x3d71), 0x40 }, { CCI_REG8(0x3d72), 0x40 },
+ { CCI_REG8(0x3d73), 0x40 }, { CCI_REG8(0x3d74), 0x40 },
+ { CCI_REG8(0x3d75), 0x40 }, { CCI_REG8(0x3d76), 0x40 },
+ { CCI_REG8(0x3d77), 0x40 }, { CCI_REG8(0x3d78), 0x40 },
+ { CCI_REG8(0x3d79), 0x40 }, { CCI_REG8(0x3d7a), 0x40 },
+ { CCI_REG8(0x3d7b), 0x40 }, { CCI_REG8(0x3d7c), 0x40 },
+ { CCI_REG8(0x3d7d), 0x40 }, { CCI_REG8(0x3d7e), 0x40 },
+ { CCI_REG8(0x3d7f), 0x40 }, { CCI_REG8(0x3d80), 0x40 },
+ { CCI_REG8(0x3d81), 0x40 }, { CCI_REG8(0x3d82), 0x40 },
+ { CCI_REG8(0x3d83), 0x40 }, { CCI_REG8(0x3d84), 0x40 },
+ { CCI_REG8(0x3d85), 0x40 }, { CCI_REG8(0x3d86), 0x40 },
+ { CCI_REG8(0x3d87), 0x40 }, { CCI_REG8(0x3d88), 0x40 },
+ { CCI_REG8(0x3d89), 0x40 }, { CCI_REG8(0x3d8a), 0x40 },
+ { CCI_REG8(0x3d8b), 0x40 }, { CCI_REG8(0x3d8c), 0x40 },
+ { CCI_REG8(0x3d8d), 0x40 }, { CCI_REG8(0x3d8e), 0x40 },
+ { CCI_REG8(0x3d8f), 0x40 }, { CCI_REG8(0x3d90), 0x40 },
+ { CCI_REG8(0x3d91), 0x40 }, { CCI_REG8(0x3d92), 0x40 },
+ { CCI_REG8(0x3d93), 0x40 }, { CCI_REG8(0x3d94), 0x40 },
+ { CCI_REG8(0x3d95), 0x40 }, { CCI_REG8(0x3d96), 0x40 },
+ { CCI_REG8(0x3d97), 0x40 }, { CCI_REG8(0x3d98), 0x40 },
+ { CCI_REG8(0x3d99), 0x40 }, { CCI_REG8(0x3d9a), 0x40 },
+ { CCI_REG8(0x3d9b), 0x40 }, { CCI_REG8(0x3d9c), 0x40 },
+ { CCI_REG8(0x3d9d), 0x40 }, { CCI_REG8(0x3d9e), 0x40 },
+ { CCI_REG8(0x3d9f), 0x40 }, { CCI_REG8(0x3da0), 0x40 },
+ { CCI_REG8(0x3da1), 0x40 }, { CCI_REG8(0x3da2), 0x40 },
+ { CCI_REG8(0x3da3), 0x40 }, { CCI_REG8(0x3da4), 0x40 },
+ { CCI_REG8(0x3da5), 0x40 }, { CCI_REG8(0x3da6), 0x40 },
+ { CCI_REG8(0x3da7), 0x40 }, { CCI_REG8(0x3da8), 0x40 },
+ { CCI_REG8(0x3da9), 0x40 }, { CCI_REG8(0x3daa), 0x40 },
+ { CCI_REG8(0x3dab), 0x40 }, { CCI_REG8(0x3dac), 0x40 },
+ { CCI_REG8(0x3dad), 0x40 }, { CCI_REG8(0x3dae), 0x40 },
+ { CCI_REG8(0x3daf), 0x40 }, { CCI_REG8(0x3db0), 0x40 },
+ { CCI_REG8(0x3db1), 0x40 }, { CCI_REG8(0x3db2), 0x40 },
+ { CCI_REG8(0x3db3), 0x40 }, { CCI_REG8(0x3db4), 0x40 },
+ { CCI_REG8(0x3db5), 0x40 }, { CCI_REG8(0x3db6), 0x40 },
+ { CCI_REG8(0x3db7), 0x40 }, { CCI_REG8(0x3db8), 0x40 },
+ { CCI_REG8(0x3db9), 0x40 }, { CCI_REG8(0x3dba), 0x40 },
+ { CCI_REG8(0x3dbb), 0x40 }, { CCI_REG8(0x3dbc), 0x40 },
+ { CCI_REG8(0x3dbd), 0x40 }, { CCI_REG8(0x3dbe), 0x40 },
+ { CCI_REG8(0x3dbf), 0xcd }, { CCI_REG8(0x3dc0), 0xcd },
+ { CCI_REG8(0x3dc1), 0xcd }, { CCI_REG8(0x3dc2), 0xcd },
+ { CCI_REG8(0x3dc3), 0xcd }, { CCI_REG8(0x3dc4), 0xcd },
+ { CCI_REG8(0x3dc5), 0xcd }, { CCI_REG8(0x3dc6), 0xcd },
+ { CCI_REG8(0x3dc7), 0xcd }, { CCI_REG8(0x3dc8), 0xcd },
+ { CCI_REG8(0x3dc9), 0xcd }, { CCI_REG8(0x3dca), 0xcd },
+ { CCI_REG8(0x3dcb), 0xcd }, { CCI_REG8(0x3dcc), 0xcd },
+ { CCI_REG8(0x3dcd), 0xcd }, { CCI_REG8(0x3dce), 0xcd },
+ { CCI_REG8(0x3dcf), 0xcd }, { CCI_REG8(0x3dd0), 0xcd },
+ { CCI_REG8(0x3dd1), 0xcd }, { CCI_REG8(0x3dd2), 0xcd },
+ { CCI_REG8(0x3dd3), 0xcd }, { CCI_REG8(0x3dd4), 0xcd },
+ { CCI_REG8(0x3dd5), 0xcd }, { CCI_REG8(0x3dd6), 0xcd },
+ { CCI_REG8(0x3dd7), 0xcd }, { CCI_REG8(0x3dd8), 0xcd },
+ { CCI_REG8(0x3dd9), 0xcd }, { CCI_REG8(0x3dda), 0xcd },
+ { CCI_REG8(0x3ddb), 0xcd }, { CCI_REG8(0x3ddc), 0xcd },
+ { CCI_REG8(0x3ddd), 0xcd }, { CCI_REG8(0x3dde), 0xcd },
+ { CCI_REG8(0x3ddf), 0xcd }, { CCI_REG8(0x3de0), 0xcd },
+ { CCI_REG8(0x3de1), 0xcd }, { CCI_REG8(0x3de2), 0xcd },
+ { CCI_REG8(0x3de3), 0xcd }, { CCI_REG8(0x3de4), 0xcd },
+ { CCI_REG8(0x3de5), 0xcd }, { CCI_REG8(0x3de6), 0xcd },
+ { CCI_REG8(0x3de7), 0xcd }, { CCI_REG8(0x3de8), 0xcd },
+ { CCI_REG8(0x3de9), 0xcd }, { CCI_REG8(0x3dea), 0xcd },
+ { CCI_REG8(0x3deb), 0xcd }, { CCI_REG8(0x3dec), 0xcd },
+ { CCI_REG8(0x3ded), 0xcd }, { CCI_REG8(0x3dee), 0xcd },
+ { CCI_REG8(0x3def), 0xcd }, { CCI_REG8(0x3df0), 0xcd },
+ { CCI_REG8(0x3df1), 0xcd }, { CCI_REG8(0x3df2), 0xcd },
+ { CCI_REG8(0x3df3), 0xcd }, { CCI_REG8(0x3df4), 0xcd },
+ { CCI_REG8(0x3df5), 0xcd }, { CCI_REG8(0x3df6), 0xcd },
+ { CCI_REG8(0x3df7), 0xcd }, { CCI_REG8(0x3df8), 0xcd },
+ { CCI_REG8(0x3df9), 0xcd }, { CCI_REG8(0x3dfa), 0xcd },
+ { CCI_REG8(0x3dfb), 0xcd }, { CCI_REG8(0x3dfc), 0xcd },
+ { CCI_REG8(0x3dfd), 0xcd }, { CCI_REG8(0x3dfe), 0xcd },
+ { CCI_REG8(0x3dff), 0xcd }, { CCI_REG8(0x3e00), 0xcd },
+ { CCI_REG8(0x3e01), 0xcd }, { CCI_REG8(0x3e02), 0xcd },
+ { CCI_REG8(0x3e03), 0xcd }, { CCI_REG8(0x3e04), 0xcd },
+ { CCI_REG8(0x3e05), 0xcd }, { CCI_REG8(0x3e06), 0xcd },
+ { CCI_REG8(0x3e07), 0xcd }, { CCI_REG8(0x3e08), 0xcd },
+ { CCI_REG8(0x3e09), 0xcd }, { CCI_REG8(0x3e0a), 0xcd },
+ { CCI_REG8(0x3e0b), 0xcd }, { CCI_REG8(0x3e0c), 0xcd },
+ { CCI_REG8(0x3e0d), 0xcd }, { CCI_REG8(0x3e0e), 0xcd },
+ { CCI_REG8(0x3e0f), 0xcd }, { CCI_REG8(0x3e10), 0xcd },
+ { CCI_REG8(0x3e11), 0xcd }, { CCI_REG8(0x3e12), 0xcd },
+ { CCI_REG8(0x3e13), 0xcd }, { CCI_REG8(0x3e14), 0xcd },
+ { CCI_REG8(0x3e15), 0xcd }, { CCI_REG8(0x3e16), 0xcd },
+ { CCI_REG8(0x3e17), 0xcd }, { CCI_REG8(0x3e18), 0xcd },
+ { CCI_REG8(0x3e19), 0xcd }, { CCI_REG8(0x3e1a), 0xcd },
+ { CCI_REG8(0x3e1b), 0xcd }, { CCI_REG8(0x3e1c), 0xcd },
+ { CCI_REG8(0x3e1d), 0xcd }, { CCI_REG8(0x3e1e), 0xcd },
+ { CCI_REG8(0x3e1f), 0xcd }, { CCI_REG8(0x3e20), 0xcd },
+ { CCI_REG8(0x3e21), 0xcd }, { CCI_REG8(0x3e22), 0xcd },
+ { CCI_REG8(0x3e23), 0xcd }, { CCI_REG8(0x3e24), 0xcd },
+ { CCI_REG8(0x3e25), 0xcd }, { CCI_REG8(0x3e26), 0xcd },
+ { CCI_REG8(0x3e27), 0xcd }, { CCI_REG8(0x3e28), 0xcd },
+ { CCI_REG8(0x3e29), 0xcd }, { CCI_REG8(0x3e2a), 0xcd },
+ { CCI_REG8(0x3e2b), 0xcd }, { CCI_REG8(0x3e2c), 0xcd },
+ { CCI_REG8(0x3e2d), 0xcd }, { CCI_REG8(0x3e2e), 0xcd },
+ { CCI_REG8(0x3e2f), 0xcd }, { CCI_REG8(0x3e30), 0xcd },
+ { CCI_REG8(0x3e31), 0xcd }, { CCI_REG8(0x3e32), 0xcd },
+ { CCI_REG8(0x3e33), 0xcd }, { CCI_REG8(0x3e34), 0xcd },
+ { CCI_REG8(0x3e35), 0xcd }, { CCI_REG8(0x3e36), 0xcd },
+ { CCI_REG8(0x3e37), 0xcd }, { CCI_REG8(0x3e38), 0xcd },
+ { CCI_REG8(0x3e39), 0xcd }, { CCI_REG8(0x3e3a), 0xcd },
+ { CCI_REG8(0x3e3b), 0xcd }, { CCI_REG8(0x3e3c), 0xcd },
+ { CCI_REG8(0x3e3d), 0xcd }, { CCI_REG8(0x3e3e), 0xcd },
+ { CCI_REG8(0x3e3f), 0xcd }, { CCI_REG8(0x3e40), 0xcd },
+ { CCI_REG8(0x3e41), 0xcd }, { CCI_REG8(0x3e42), 0xcd },
+ { CCI_REG8(0x3e43), 0xcd }, { CCI_REG8(0x3e44), 0xcd },
+ { CCI_REG8(0x3e45), 0xcd }, { CCI_REG8(0x3e46), 0xcd },
+ { CCI_REG8(0x3e47), 0xcd }, { CCI_REG8(0x3e48), 0xcd },
+ { CCI_REG8(0x3e49), 0xcd }, { CCI_REG8(0x3e4a), 0xcd },
+ { CCI_REG8(0x3e4b), 0xcd }, { CCI_REG8(0x3e4c), 0xcd },
+ { CCI_REG8(0x3e4d), 0xcd }, { CCI_REG8(0x3e4e), 0xcd },
+ { CCI_REG8(0x3e4f), 0xcd }, { CCI_REG8(0x3e50), 0xcd },
+ { CCI_REG8(0x3e51), 0xcd }, { CCI_REG8(0x3e52), 0xcd },
+ { CCI_REG8(0x3e53), 0xcd }, { CCI_REG8(0x3e54), 0xcd },
+ { CCI_REG8(0x3e55), 0xcd }, { CCI_REG8(0x3e56), 0xcd },
+ { CCI_REG8(0x3e57), 0xcd }, { CCI_REG8(0x3e58), 0xcd },
+ { CCI_REG8(0x3e59), 0xcd }, { CCI_REG8(0x3e5a), 0xcd },
+ { CCI_REG8(0x3e5b), 0xcd }, { CCI_REG8(0x3e5c), 0xcd },
+ { CCI_REG8(0x3e5d), 0xcd }, { CCI_REG8(0x3e5e), 0xcd },
+ { CCI_REG8(0x3e5f), 0xcd }, { CCI_REG8(0x3e60), 0xcd },
+ { CCI_REG8(0x3e61), 0xcd }, { CCI_REG8(0x3e62), 0xcd },
+ { CCI_REG8(0x3e63), 0xcd }, { CCI_REG8(0x3e64), 0xcd },
+ { CCI_REG8(0x3e65), 0xcd }, { CCI_REG8(0x3e66), 0xcd },
+ { CCI_REG8(0x3e67), 0xcd }, { CCI_REG8(0x3e68), 0xcd },
+ { CCI_REG8(0x3e69), 0xcd }, { CCI_REG8(0x3e6a), 0xcd },
+ { CCI_REG8(0x3e6b), 0xcd }, { CCI_REG8(0x3e6c), 0xcd },
+ { CCI_REG8(0x3e6d), 0xcd }, { CCI_REG8(0x3e6e), 0xcd },
+ { CCI_REG8(0x3e6f), 0xcd }, { CCI_REG8(0x3e70), 0xcd },
+ { CCI_REG8(0x3e71), 0xcd }, { CCI_REG8(0x3e72), 0xcd },
+ { CCI_REG8(0x3e73), 0xcd }, { CCI_REG8(0x3e74), 0xcd },
+ { CCI_REG8(0x3e75), 0xcd }, { CCI_REG8(0x3e76), 0xcd },
+ { CCI_REG8(0x3e77), 0xcd }, { CCI_REG8(0x3e78), 0xcd },
+ { CCI_REG8(0x3e79), 0xcd }, { CCI_REG8(0x3e7a), 0xcd },
+ { CCI_REG8(0x3e7b), 0xcd }, { CCI_REG8(0x3e7c), 0xcd },
+ { CCI_REG8(0x3e7d), 0xcd }, { CCI_REG8(0x3e7e), 0xcd },
+ { CCI_REG8(0x3e7f), 0xcd }, { CCI_REG8(0x3e80), 0xcd },
+ { CCI_REG8(0x3e81), 0xcd }, { CCI_REG8(0x3e82), 0xcd },
+ { CCI_REG8(0x3e83), 0xcd }, { CCI_REG8(0x3e84), 0xcd },
+ { CCI_REG8(0x3e85), 0xcd }, { CCI_REG8(0x3e86), 0xcd },
+ { CCI_REG8(0x3e87), 0xcd }, { CCI_REG8(0x3e88), 0xcd },
+ { CCI_REG8(0x3e89), 0xcd }, { CCI_REG8(0x3e8a), 0xcd },
+ { CCI_REG8(0x3e8b), 0xcd }, { CCI_REG8(0x3e8c), 0xcd },
+ { CCI_REG8(0x3e8d), 0xcd }, { CCI_REG8(0x3e8e), 0xcd },
+ { CCI_REG8(0x3e8f), 0xcd }, { CCI_REG8(0x3e90), 0xcd },
+ { CCI_REG8(0x3e91), 0xcd }, { CCI_REG8(0x3e92), 0xcd },
+ { CCI_REG8(0x3e93), 0xcd }, { CCI_REG8(0x3e94), 0xcd },
+ { CCI_REG8(0x3e95), 0xcd }, { CCI_REG8(0x3e96), 0xcd },
+ { CCI_REG8(0x3e97), 0xcd }, { CCI_REG8(0x3e98), 0xcd },
+ { CCI_REG8(0x3e99), 0xcd }, { CCI_REG8(0x3e9a), 0xcd },
+ { CCI_REG8(0x3e9b), 0xcd }, { CCI_REG8(0x3e9c), 0xcd },
+ { CCI_REG8(0x3e9d), 0xcd }, { CCI_REG8(0x3e9e), 0xcd },
+ { CCI_REG8(0x3e9f), 0xcd }, { CCI_REG8(0xfff9), 0x06 },
+ { CCI_REG8(0xc03f), 0x01 }, { CCI_REG8(0xc03e), 0x08 },
+ { CCI_REG8(0xc02c), 0xff }, { CCI_REG8(0xc005), 0x06 },
+ { CCI_REG8(0xc006), 0x30 }, { CCI_REG8(0xc007), 0xc0 },
+ { CCI_REG8(0xc027), 0x01 }, { CCI_REG8(0x30c0), 0x05 },
+ { CCI_REG8(0x30c1), 0x9f }, { CCI_REG8(0x30c2), 0x06 },
+ { CCI_REG8(0x30c3), 0x5f }, { CCI_REG8(0x30c4), 0x80 },
+ { CCI_REG8(0x30c5), 0x08 }, { CCI_REG8(0x30c6), 0x39 },
+ { CCI_REG8(0x30c7), 0x00 }, { CCI_REG8(0xc046), 0x20 },
+ { CCI_REG8(0xc043), 0x01 }, { CCI_REG8(0xc04b), 0x01 },
+ { CCI_REG8(0x0102), 0x01 }, { CCI_REG8(0x0100), 0x00 },
+ { CCI_REG8(0x0102), 0x00 }, { CCI_REG8(0x3015), 0xf0 },
+ { CCI_REG8(0x3018), 0xf0 }, { CCI_REG8(0x301c), 0xf0 },
+ { CCI_REG8(0x301d), 0xf6 }, { CCI_REG8(0x301e), 0xf1 }
+};
+
+static const struct cci_reg_sequence ov64a40_9248x6944[] = {
+ { CCI_REG8(0x0305), 0x98 }, { CCI_REG8(0x0306), 0x04 },
+ { CCI_REG8(0x0307), 0x01 }, { CCI_REG8(0x4837), 0x1a },
+ { CCI_REG8(0x4888), 0x10 }, { CCI_REG8(0x4860), 0x00 },
+ { CCI_REG8(0x4850), 0x43 }, { CCI_REG8(0x480C), 0x92 },
+ { CCI_REG8(0x5001), 0x21 }
+};
+
+static const struct cci_reg_sequence ov64a40_8000x6000[] = {
+ { CCI_REG8(0x0305), 0x98 }, { CCI_REG8(0x0306), 0x04 },
+ { CCI_REG8(0x0307), 0x01 }, { CCI_REG8(0x4837), 0x1a },
+ { CCI_REG8(0x4888), 0x10 }, { CCI_REG8(0x4860), 0x00 },
+ { CCI_REG8(0x4850), 0x43 }, { CCI_REG8(0x480C), 0x92 },
+ { CCI_REG8(0x5001), 0x21 }
+};
+
+static const struct cci_reg_sequence ov64a40_4624_3472[] = {
+ { CCI_REG8(0x034b), 0x02 }, { CCI_REG8(0x3504), 0x08 },
+ { CCI_REG8(0x360d), 0x82 }, { CCI_REG8(0x368a), 0x2e },
+ { CCI_REG8(0x3712), 0x50 }, { CCI_REG8(0x3822), 0x00 },
+ { CCI_REG8(0x3827), 0x40 }, { CCI_REG8(0x383d), 0x08 },
+ { CCI_REG8(0x383f), 0x00 }, { CCI_REG8(0x384c), 0x02 },
+ { CCI_REG8(0x384d), 0xba }, { CCI_REG8(0x3852), 0x00 },
+ { CCI_REG8(0x3856), 0x08 }, { CCI_REG8(0x3857), 0x08 },
+ { CCI_REG8(0x3858), 0x10 }, { CCI_REG8(0x3859), 0x10 },
+ { CCI_REG8(0x4016), 0x0f }, { CCI_REG8(0x4018), 0x03 },
+ { CCI_REG8(0x4504), 0x1e }, { CCI_REG8(0x4523), 0x41 },
+ { CCI_REG8(0x45c0), 0x01 }, { CCI_REG8(0x4641), 0x12 },
+ { CCI_REG8(0x4643), 0x0c }, { CCI_REG8(0x4915), 0x02 },
+ { CCI_REG8(0x4916), 0x1d }, { CCI_REG8(0x4a15), 0x02 },
+ { CCI_REG8(0x4a16), 0x1d }, { CCI_REG8(0x3703), 0x72 },
+ { CCI_REG8(0x3709), 0xe6 }, { CCI_REG8(0x3a60), 0x68 },
+ { CCI_REG8(0x3a6f), 0x68 }, { CCI_REG8(0x3a5e), 0xdc },
+ { CCI_REG8(0x3a6d), 0xdc }, { CCI_REG8(0x3721), 0xc9 },
+ { CCI_REG8(0x5250), 0x06 }, { CCI_REG8(0x527a), 0x00 },
+ { CCI_REG8(0x527b), 0x65 }, { CCI_REG8(0x527c), 0x00 },
+ { CCI_REG8(0x527d), 0x82 }, { CCI_REG8(0x5280), 0x24 },
+ { CCI_REG8(0x5281), 0x40 }, { CCI_REG8(0x5282), 0x1b },
+ { CCI_REG8(0x5283), 0x40 }, { CCI_REG8(0x5284), 0x24 },
+ { CCI_REG8(0x5285), 0x40 }, { CCI_REG8(0x5286), 0x1b },
+ { CCI_REG8(0x5287), 0x40 }, { CCI_REG8(0x5200), 0x24 },
+ { CCI_REG8(0x5201), 0x40 }, { CCI_REG8(0x5202), 0x1b },
+ { CCI_REG8(0x5203), 0x40 }, { CCI_REG8(0x481b), 0x35 },
+ { CCI_REG8(0x4862), 0x25 }, { CCI_REG8(0x3400), 0x00 },
+ { CCI_REG8(0x3421), 0x23 }, { CCI_REG8(0x3422), 0xfc },
+ { CCI_REG8(0x3423), 0x07 }, { CCI_REG8(0x3424), 0x01 },
+ { CCI_REG8(0x3425), 0x04 }, { CCI_REG8(0x3426), 0x50 },
+ { CCI_REG8(0x3427), 0x55 }, { CCI_REG8(0x3428), 0x15 },
+ { CCI_REG8(0x3429), 0x00 }, { CCI_REG8(0x3025), 0x03 },
+ { CCI_REG8(0x5250), 0x06 }, { CCI_REG8(0x0305), 0x98 },
+ { CCI_REG8(0x0306), 0x04 }, { CCI_REG8(0x0307), 0x01 },
+ { CCI_REG8(0x4837), 0x1a }, { CCI_REG8(0x4888), 0x10 },
+ { CCI_REG8(0x4860), 0x00 }, { CCI_REG8(0x4850), 0x43 },
+ { CCI_REG8(0x480C), 0x92 }, { CCI_REG8(0x5001), 0x21 }
+};
+
+static const struct cci_reg_sequence ov64a40_3840x2160[] = {
+ { CCI_REG8(0x034a), 0x05 }, { CCI_REG8(0x034b), 0x05 },
+ { CCI_REG8(0x3504), 0x08 }, { CCI_REG8(0x360d), 0x82 },
+ { CCI_REG8(0x368a), 0x2e }, { CCI_REG8(0x3712), 0x50 },
+ { CCI_REG8(0x3822), 0x00 }, { CCI_REG8(0x3827), 0x40 },
+ { CCI_REG8(0x383d), 0x08 }, { CCI_REG8(0x383f), 0x00 },
+ { CCI_REG8(0x384c), 0x02 }, { CCI_REG8(0x384d), 0xba },
+ { CCI_REG8(0x3852), 0x00 }, { CCI_REG8(0x3856), 0x08 },
+ { CCI_REG8(0x3857), 0x08 }, { CCI_REG8(0x3858), 0x10 },
+ { CCI_REG8(0x3859), 0x10 }, { CCI_REG8(0x4016), 0x0f },
+ { CCI_REG8(0x4018), 0x03 }, { CCI_REG8(0x4504), 0x1e },
+ { CCI_REG8(0x4523), 0x41 }, { CCI_REG8(0x45c0), 0x01 },
+ { CCI_REG8(0x4641), 0x12 }, { CCI_REG8(0x4643), 0x0c },
+ { CCI_REG8(0x4915), 0x02 }, { CCI_REG8(0x4916), 0x1d },
+ { CCI_REG8(0x4a15), 0x02 }, { CCI_REG8(0x4a16), 0x1d },
+ { CCI_REG8(0x3703), 0x72 }, { CCI_REG8(0x3709), 0xe6 },
+ { CCI_REG8(0x3a60), 0x68 }, { CCI_REG8(0x3a6f), 0x68 },
+ { CCI_REG8(0x3a5e), 0xdc }, { CCI_REG8(0x3a6d), 0xdc },
+ { CCI_REG8(0x3721), 0xc9 }, { CCI_REG8(0x5250), 0x06 },
+ { CCI_REG8(0x527a), 0x00 }, { CCI_REG8(0x527b), 0x65 },
+ { CCI_REG8(0x527c), 0x00 }, { CCI_REG8(0x527d), 0x82 },
+ { CCI_REG8(0x5280), 0x24 }, { CCI_REG8(0x5281), 0x40 },
+ { CCI_REG8(0x5282), 0x1b }, { CCI_REG8(0x5283), 0x40 },
+ { CCI_REG8(0x5284), 0x24 }, { CCI_REG8(0x5285), 0x40 },
+ { CCI_REG8(0x5286), 0x1b }, { CCI_REG8(0x5287), 0x40 },
+ { CCI_REG8(0x5200), 0x24 }, { CCI_REG8(0x5201), 0x40 },
+ { CCI_REG8(0x5202), 0x1b }, { CCI_REG8(0x5203), 0x40 },
+ { CCI_REG8(0x481b), 0x35 }, { CCI_REG8(0x4862), 0x25 },
+ { CCI_REG8(0x3400), 0x00 }, { CCI_REG8(0x3421), 0x23 },
+ { CCI_REG8(0x3422), 0xfc }, { CCI_REG8(0x3423), 0x07 },
+ { CCI_REG8(0x3424), 0x01 }, { CCI_REG8(0x3425), 0x04 },
+ { CCI_REG8(0x3426), 0x50 }, { CCI_REG8(0x3427), 0x55 },
+ { CCI_REG8(0x3428), 0x15 }, { CCI_REG8(0x3429), 0x00 },
+ { CCI_REG8(0x3025), 0x03 }, { CCI_REG8(0x5250), 0x06 },
+ { CCI_REG8(0x0305), 0x98 }, { CCI_REG8(0x0306), 0x04 },
+ { CCI_REG8(0x0345), 0x90 }, { CCI_REG8(0x0307), 0x01 },
+ { CCI_REG8(0x4837), 0x1a }, { CCI_REG8(0x4888), 0x10 },
+ { CCI_REG8(0x4860), 0x00 }, { CCI_REG8(0x4850), 0x43 },
+ { CCI_REG8(0x480C), 0x92 }, { CCI_REG8(0x5001), 0x21 },
+ { CCI_REG8(0x5000), 0x01 }
+};
+
+static const struct cci_reg_sequence ov64a40_2312_1736[] = {
+ { CCI_REG8(0x034b), 0x02 }, { CCI_REG8(0x3504), 0x08 },
+ { CCI_REG8(0x360d), 0x82 }, { CCI_REG8(0x368a), 0x2e },
+ { CCI_REG8(0x3712), 0x00 }, { CCI_REG8(0x3822), 0x08 },
+ { CCI_REG8(0x3827), 0x40 }, { CCI_REG8(0x383d), 0x04 },
+ { CCI_REG8(0x383f), 0x00 }, { CCI_REG8(0x384c), 0x01 },
+ { CCI_REG8(0x384d), 0x12 }, { CCI_REG8(0x3852), 0x00 },
+ { CCI_REG8(0x3856), 0x04 }, { CCI_REG8(0x3857), 0x04 },
+ { CCI_REG8(0x3858), 0x08 }, { CCI_REG8(0x3859), 0x08 },
+ { CCI_REG8(0x4016), 0x07 }, { CCI_REG8(0x4018), 0x01 },
+ { CCI_REG8(0x4504), 0x00 }, { CCI_REG8(0x4523), 0x00 },
+ { CCI_REG8(0x45c0), 0x01 }, { CCI_REG8(0x4641), 0x24 },
+ { CCI_REG8(0x4643), 0x0c }, { CCI_REG8(0x4837), 0x0b },
+ { CCI_REG8(0x4915), 0x02 }, { CCI_REG8(0x4916), 0x1d },
+ { CCI_REG8(0x4a15), 0x02 }, { CCI_REG8(0x4a16), 0x1d },
+ { CCI_REG8(0x5000), 0x55 }, { CCI_REG8(0x5001), 0x00 },
+ { CCI_REG8(0x5002), 0x35 }, { CCI_REG8(0x5004), 0xc0 },
+ { CCI_REG8(0x5068), 0x02 }, { CCI_REG8(0x3703), 0x6a },
+ { CCI_REG8(0x3709), 0xa3 }, { CCI_REG8(0x3a60), 0x60 },
+ { CCI_REG8(0x3a6f), 0x60 }, { CCI_REG8(0x3a5e), 0x99 },
+ { CCI_REG8(0x3a6d), 0x99 }, { CCI_REG8(0x3721), 0xc1 },
+ { CCI_REG8(0x5250), 0x06 }, { CCI_REG8(0x527a), 0x00 },
+ { CCI_REG8(0x527b), 0x65 }, { CCI_REG8(0x527c), 0x00 },
+ { CCI_REG8(0x527d), 0x82 }, { CCI_REG8(0x5280), 0x24 },
+ { CCI_REG8(0x5281), 0x40 }, { CCI_REG8(0x5282), 0x1b },
+ { CCI_REG8(0x5283), 0x40 }, { CCI_REG8(0x5284), 0x24 },
+ { CCI_REG8(0x5285), 0x40 }, { CCI_REG8(0x5286), 0x1b },
+ { CCI_REG8(0x5287), 0x40 }, { CCI_REG8(0x5200), 0x24 },
+ { CCI_REG8(0x5201), 0x40 }, { CCI_REG8(0x5202), 0x1b },
+ { CCI_REG8(0x5203), 0x40 }, { CCI_REG8(0x3684), 0x05 },
+ { CCI_REG8(0x481b), 0x20 }, { CCI_REG8(0x51b0), 0x38 },
+ { CCI_REG8(0x51b3), 0x0e }, { CCI_REG8(0x51b5), 0x04 },
+ { CCI_REG8(0x51b6), 0x00 }, { CCI_REG8(0x51b7), 0x00 },
+ { CCI_REG8(0x51b9), 0x70 }, { CCI_REG8(0x51bb), 0x10 },
+ { CCI_REG8(0x51bc), 0x00 }, { CCI_REG8(0x51bd), 0x00 },
+ { CCI_REG8(0x51b0), 0x38 }, { CCI_REG8(0x54b0), 0x38 },
+ { CCI_REG8(0x54b3), 0x0e }, { CCI_REG8(0x54b5), 0x04 },
+ { CCI_REG8(0x54b6), 0x00 }, { CCI_REG8(0x54b7), 0x00 },
+ { CCI_REG8(0x54b9), 0x70 }, { CCI_REG8(0x54bb), 0x10 },
+ { CCI_REG8(0x54bc), 0x00 }, { CCI_REG8(0x54bd), 0x00 },
+ { CCI_REG8(0x57b0), 0x38 }, { CCI_REG8(0x57b3), 0x0e },
+ { CCI_REG8(0x57b5), 0x04 }, { CCI_REG8(0x57b6), 0x00 },
+ { CCI_REG8(0x57b7), 0x00 }, { CCI_REG8(0x57b9), 0x70 },
+ { CCI_REG8(0x57bb), 0x10 }, { CCI_REG8(0x57bc), 0x00 },
+ { CCI_REG8(0x57bd), 0x00 }, { CCI_REG8(0x0305), 0x98 },
+ { CCI_REG8(0x0306), 0x04 }, { CCI_REG8(0x0307), 0x01 },
+ { CCI_REG8(0x4837), 0x1a }, { CCI_REG8(0x4888), 0x10 },
+ { CCI_REG8(0x4860), 0x00 }, { CCI_REG8(0x4850), 0x43 },
+ { CCI_REG8(0x480C), 0x92 }
+};
+
+static const struct cci_reg_sequence ov64a40_1920x1080[] = {
+ { CCI_REG8(0x034b), 0x02 }, { CCI_REG8(0x3504), 0x08 },
+ { CCI_REG8(0x360d), 0x82 }, { CCI_REG8(0x368a), 0x2e },
+ { CCI_REG8(0x3712), 0x00 }, { CCI_REG8(0x3822), 0x08 },
+ { CCI_REG8(0x3827), 0x40 }, { CCI_REG8(0x383d), 0x04 },
+ { CCI_REG8(0x383f), 0x00 }, { CCI_REG8(0x384c), 0x01 },
+ { CCI_REG8(0x384d), 0x12 }, { CCI_REG8(0x3852), 0x00 },
+ { CCI_REG8(0x3856), 0x04 }, { CCI_REG8(0x3857), 0x04 },
+ { CCI_REG8(0x3858), 0x08 }, { CCI_REG8(0x3859), 0x08 },
+ { CCI_REG8(0x4016), 0x07 }, { CCI_REG8(0x4018), 0x01 },
+ { CCI_REG8(0x4504), 0x00 }, { CCI_REG8(0x4523), 0x00 },
+ { CCI_REG8(0x45c0), 0x01 }, { CCI_REG8(0x4641), 0x24 },
+ { CCI_REG8(0x4643), 0x0c }, { CCI_REG8(0x4837), 0x0b },
+ { CCI_REG8(0x4915), 0x02 }, { CCI_REG8(0x4916), 0x1d },
+ { CCI_REG8(0x4a15), 0x02 }, { CCI_REG8(0x4a16), 0x1d },
+ { CCI_REG8(0x5000), 0x55 }, { CCI_REG8(0x5001), 0x00 },
+ { CCI_REG8(0x5002), 0x35 }, { CCI_REG8(0x5004), 0xc0 },
+ { CCI_REG8(0x5068), 0x02 }, { CCI_REG8(0x3703), 0x6a },
+ { CCI_REG8(0x3709), 0xa3 }, { CCI_REG8(0x3a60), 0x60 },
+ { CCI_REG8(0x3a6f), 0x60 }, { CCI_REG8(0x3a5e), 0x99 },
+ { CCI_REG8(0x3a6d), 0x99 }, { CCI_REG8(0x3721), 0xc1 },
+ { CCI_REG8(0x5250), 0x06 }, { CCI_REG8(0x527a), 0x00 },
+ { CCI_REG8(0x527b), 0x65 }, { CCI_REG8(0x527c), 0x00 },
+ { CCI_REG8(0x527d), 0x82 }, { CCI_REG8(0x5280), 0x24 },
+ { CCI_REG8(0x5281), 0x40 }, { CCI_REG8(0x5282), 0x1b },
+ { CCI_REG8(0x5283), 0x40 }, { CCI_REG8(0x5284), 0x24 },
+ { CCI_REG8(0x5285), 0x40 }, { CCI_REG8(0x5286), 0x1b },
+ { CCI_REG8(0x5287), 0x40 }, { CCI_REG8(0x5200), 0x24 },
+ { CCI_REG8(0x5201), 0x40 }, { CCI_REG8(0x5202), 0x1b },
+ { CCI_REG8(0x5203), 0x40 }, { CCI_REG8(0x3684), 0x05 },
+ { CCI_REG8(0x481b), 0x20 }, { CCI_REG8(0x51b0), 0x38 },
+ { CCI_REG8(0x51b3), 0x0e }, { CCI_REG8(0x51b5), 0x04 },
+ { CCI_REG8(0x51b6), 0x00 }, { CCI_REG8(0x51b7), 0x00 },
+ { CCI_REG8(0x51b9), 0x70 }, { CCI_REG8(0x51bb), 0x10 },
+ { CCI_REG8(0x51bc), 0x00 }, { CCI_REG8(0x51bd), 0x00 },
+ { CCI_REG8(0x51b0), 0x38 }, { CCI_REG8(0x54b0), 0x38 },
+ { CCI_REG8(0x54b3), 0x0e }, { CCI_REG8(0x54b5), 0x04 },
+ { CCI_REG8(0x54b6), 0x00 }, { CCI_REG8(0x54b7), 0x00 },
+ { CCI_REG8(0x54b9), 0x70 }, { CCI_REG8(0x54bb), 0x10 },
+ { CCI_REG8(0x54bc), 0x00 }, { CCI_REG8(0x54bd), 0x00 },
+ { CCI_REG8(0x57b0), 0x38 }, { CCI_REG8(0x57b3), 0x0e },
+ { CCI_REG8(0x57b5), 0x04 }, { CCI_REG8(0x57b6), 0x00 },
+ { CCI_REG8(0x57b7), 0x00 }, { CCI_REG8(0x57b9), 0x70 },
+ { CCI_REG8(0x57bb), 0x10 }, { CCI_REG8(0x57bc), 0x00 },
+ { CCI_REG8(0x57bd), 0x00 }, { CCI_REG8(0x0305), 0x98 },
+ { CCI_REG8(0x0306), 0x04 }, { CCI_REG8(0x0307), 0x01 },
+ { CCI_REG8(0x4837), 0x1a }, { CCI_REG8(0x4888), 0x10 },
+ { CCI_REG8(0x4860), 0x00 }, { CCI_REG8(0x4850), 0x43 },
+ { CCI_REG8(0x480C), 0x92 }
+};
+
+/* 456MHz MIPI link frequency with 24MHz input clock. */
+static const struct cci_reg_sequence ov64a40_pll_config[] = {
+ { OV64A40_PLL1_PRE_DIV0, 0x88 },
+ { OV64A40_PLL1_PRE_DIV, 0x02 },
+ { OV64A40_PLL1_MULTIPLIER, 0x0098 },
+ { OV64A40_PLL1_M_DIV, 0x01 },
+ { OV64A40_PLL2_SEL_BAK_SA1, 0x00 },
+ { OV64A40_PLL2_PRE_DIV, 0x12 },
+ { OV64A40_PLL2_MULTIPLIER, 0x0190 },
+ { OV64A40_PLL2_PRE_DIV0, 0xd7 },
+ { OV64A40_PLL2_DIVSP, 0x00 },
+ { OV64A40_PLL2_DIVDAC, 0x00 },
+ { OV64A40_PLL2_DACPREDIV, 0x00 }
+};
+
+struct ov64a40_reglist {
+ unsigned int num_regs;
+ const struct cci_reg_sequence *regvals;
+};
+
+struct ov64a40_subsampling {
+ unsigned int x_odd_inc;
+ unsigned int x_even_inc;
+ unsigned int y_odd_inc;
+ unsigned int y_even_inc;
+ bool vbin;
+ bool hbin;
+};
+
+static struct ov64a40_mode {
+ unsigned int width;
+ unsigned int height;
+ struct ov64a40_timings {
+ unsigned int vts;
+ unsigned int ppl;
+ } timings_default[OV64A40_NUM_LINK_FREQ];
+ const struct ov64a40_reglist reglist;
+ struct v4l2_rect analogue_crop;
+ struct v4l2_rect digital_crop;
+ struct ov64a40_subsampling subsampling;
+} ov64a40_modes[] = {
+ /* Full resolution */
+ {
+ .width = 9248,
+ .height = 6944,
+ .timings_default = {
+ /* 2.6 FPS */
+ [OV64A40_LINK_FREQ_456M_ID] = {
+ .vts = 7072,
+ .ppl = 4072,
+ },
+ /* 2 FPS */
+ [OV64A40_LINK_FREQ_360M_ID] = {
+ .vts = 7072,
+ .ppl = 5248,
+ },
+ },
+ .reglist = {
+ .num_regs = ARRAY_SIZE(ov64a40_9248x6944),
+ .regvals = ov64a40_9248x6944,
+ },
+ .analogue_crop = {
+ .left = 0,
+ .top = 0,
+ .width = 9280,
+ .height = 6976,
+ },
+ .digital_crop = {
+ .left = 17,
+ .top = 16,
+ .width = 9248,
+ .height = 6944,
+ },
+ .subsampling = {
+ .x_odd_inc = 1,
+ .x_even_inc = 1,
+ .y_odd_inc = 1,
+ .y_even_inc = 1,
+ .vbin = false,
+ .hbin = false,
+ },
+ },
+ /* Analogue crop + digital crop */
+ {
+ .width = 8000,
+ .height = 6000,
+ .timings_default = {
+ /* 3.0 FPS */
+ [OV64A40_LINK_FREQ_456M_ID] = {
+ .vts = 6400,
+ .ppl = 3848,
+ },
+ /* 2.5 FPS */
+ [OV64A40_LINK_FREQ_360M_ID] = {
+ .vts = 6304,
+ .ppl = 4736,
+ },
+ },
+ .reglist = {
+ .num_regs = ARRAY_SIZE(ov64a40_8000x6000),
+ .regvals = ov64a40_8000x6000,
+ },
+ .analogue_crop = {
+ .left = 624,
+ .top = 472,
+ .width = 8048,
+ .height = 6032,
+ },
+ .digital_crop = {
+ .left = 17,
+ .top = 16,
+ .width = 8000,
+ .height = 6000,
+ },
+ .subsampling = {
+ .x_odd_inc = 1,
+ .x_even_inc = 1,
+ .y_odd_inc = 1,
+ .y_even_inc = 1,
+ .vbin = false,
+ .hbin = false,
+ },
+ },
+ /* 2x2 downscaled */
+ {
+ .width = 4624,
+ .height = 3472,
+ .timings_default = {
+ /* 10 FPS */
+ [OV64A40_LINK_FREQ_456M_ID] = {
+ .vts = 3533,
+ .ppl = 2112,
+ },
+ /* 7 FPS */
+ [OV64A40_LINK_FREQ_360M_ID] = {
+ .vts = 3939,
+ .ppl = 2720,
+ },
+ },
+ .reglist = {
+ .num_regs = ARRAY_SIZE(ov64a40_4624_3472),
+ .regvals = ov64a40_4624_3472,
+ },
+ .analogue_crop = {
+ .left = 0,
+ .top = 0,
+ .width = 9280,
+ .height = 6976,
+ },
+ .digital_crop = {
+ .left = 9,
+ .top = 8,
+ .width = 4624,
+ .height = 3472,
+ },
+ .subsampling = {
+ .x_odd_inc = 3,
+ .x_even_inc = 1,
+ .y_odd_inc = 1,
+ .y_even_inc = 1,
+ .vbin = true,
+ .hbin = false,
+ },
+ },
+ /* Analogue crop + 2x2 downscale + digital crop */
+ {
+ .width = 3840,
+ .height = 2160,
+ .timings_default = {
+ /* 20 FPS */
+ [OV64A40_LINK_FREQ_456M_ID] = {
+ .vts = 2218,
+ .ppl = 1690,
+ },
+ /* 15 FPS */
+ [OV64A40_LINK_FREQ_360M_ID] = {
+ .vts = 2270,
+ .ppl = 2202,
+ },
+ },
+ .reglist = {
+ .num_regs = ARRAY_SIZE(ov64a40_3840x2160),
+ .regvals = ov64a40_3840x2160,
+ },
+ .analogue_crop = {
+ .left = 784,
+ .top = 1312,
+ .width = 7712,
+ .height = 4352,
+ },
+ .digital_crop = {
+ .left = 9,
+ .top = 8,
+ .width = 3840,
+ .height = 2160,
+ },
+ .subsampling = {
+ .x_odd_inc = 3,
+ .x_even_inc = 1,
+ .y_odd_inc = 1,
+ .y_even_inc = 1,
+ .vbin = true,
+ .hbin = false,
+ },
+ },
+ /* 4x4 downscaled */
+ {
+ .width = 2312,
+ .height = 1736,
+ .timings_default = {
+ /* 30 FPS */
+ [OV64A40_LINK_FREQ_456M_ID] = {
+ .vts = 1998,
+ .ppl = 1248,
+ },
+ /* 25 FPS */
+ [OV64A40_LINK_FREQ_360M_ID] = {
+ .vts = 1994,
+ .ppl = 1504,
+ },
+ },
+ .reglist = {
+ .num_regs = ARRAY_SIZE(ov64a40_2312_1736),
+ .regvals = ov64a40_2312_1736,
+ },
+ .analogue_crop = {
+ .left = 0,
+ .top = 0,
+ .width = 9280,
+ .height = 6976,
+ },
+ .digital_crop = {
+ .left = 5,
+ .top = 4,
+ .width = 2312,
+ .height = 1736,
+ },
+ .subsampling = {
+ .x_odd_inc = 3,
+ .x_even_inc = 1,
+ .y_odd_inc = 3,
+ .y_even_inc = 1,
+ .vbin = true,
+ .hbin = true,
+ },
+ },
+ /* Analogue crop + 4x4 downscale + digital crop */
+ {
+ .width = 1920,
+ .height = 1080,
+ .timings_default = {
+ /* 60 FPS */
+ [OV64A40_LINK_FREQ_456M_ID] = {
+ .vts = 1397,
+ .ppl = 880,
+ },
+ /* 45 FPS */
+ [OV64A40_LINK_FREQ_360M_ID] = {
+ .vts = 1216,
+ .ppl = 1360,
+ },
+ },
+ .reglist = {
+ .num_regs = ARRAY_SIZE(ov64a40_1920x1080),
+ .regvals = ov64a40_1920x1080,
+ },
+ .analogue_crop = {
+ .left = 784,
+ .top = 1312,
+ .width = 7712,
+ .height = 4352,
+ },
+ .digital_crop = {
+ .left = 7,
+ .top = 6,
+ .width = 1920,
+ .height = 1080,
+ },
+ .subsampling = {
+ .x_odd_inc = 3,
+ .x_even_inc = 1,
+ .y_odd_inc = 3,
+ .y_even_inc = 1,
+ .vbin = true,
+ .hbin = true,
+ },
+ },
+};
+
+struct ov64a40 {
+ struct device *dev;
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct regmap *cci;
+
+ struct ov64a40_mode *mode;
+
+ struct clk *xclk;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov64a40_supply_names)];
+
+ s64 *link_frequencies;
+ unsigned int num_link_frequencies;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+};
+
+static inline struct ov64a40 *sd_to_ov64a40(struct v4l2_subdev *sd)
+{
+ return container_of_const(sd, struct ov64a40, sd);
+}
+
+static const struct ov64a40_timings *
+ov64a40_get_timings(struct ov64a40 *ov64a40, unsigned int link_freq_index)
+{
+ s64 link_freq = ov64a40->link_frequencies[link_freq_index];
+ unsigned int timings_index = link_freq == OV64A40_LINK_FREQ_360M
+ ? OV64A40_LINK_FREQ_360M_ID
+ : OV64A40_LINK_FREQ_456M_ID;
+
+ return &ov64a40->mode->timings_default[timings_index];
+}
+
+static int ov64a40_program_geometry(struct ov64a40 *ov64a40)
+{
+ struct ov64a40_mode *mode = ov64a40->mode;
+ struct v4l2_rect *anacrop = &mode->analogue_crop;
+ struct v4l2_rect *digicrop = &mode->digital_crop;
+ const struct ov64a40_timings *timings;
+ int ret = 0;
+
+ /* Analogue crop. */
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL0,
+ anacrop->left, &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL2,
+ anacrop->top, &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL4,
+ anacrop->width + anacrop->left - 1, &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL6,
+ anacrop->height + anacrop->top - 1, &ret);
+
+ /* ISP windowing. */
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL10,
+ digicrop->left, &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL12,
+ digicrop->top, &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL8,
+ digicrop->width, &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRLA,
+ digicrop->height, &ret);
+
+ /* Total timings. */
+ timings = ov64a40_get_timings(ov64a40, ov64a40->link_freq->cur.val);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRLC, timings->ppl, &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRLE, timings->vts, &ret);
+
+ return ret;
+}
+
+static int ov64a40_program_subsampling(struct ov64a40 *ov64a40)
+{
+ struct ov64a40_subsampling *subsampling = &ov64a40->mode->subsampling;
+ int ret = 0;
+
+ /* Skipping configuration */
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL14,
+ OV64A40_SKIPPING_CONFIG(subsampling->x_odd_inc,
+ subsampling->x_even_inc), &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMING_CTRL15,
+ OV64A40_SKIPPING_CONFIG(subsampling->y_odd_inc,
+ subsampling->y_even_inc), &ret);
+
+ /* Binning configuration */
+ cci_update_bits(ov64a40->cci, OV64A40_REG_TIMING_CTRL_20,
+ OV64A40_TIMING_CTRL_20_VBIN,
+ subsampling->vbin ? OV64A40_TIMING_CTRL_20_VBIN : 0,
+ &ret);
+ cci_update_bits(ov64a40->cci, OV64A40_REG_TIMING_CTRL_21,
+ OV64A40_TIMING_CTRL_21_HBIN_CONF,
+ subsampling->hbin ?
+ OV64A40_TIMING_CTRL_21_HBIN_CONF : 0, &ret);
+
+ return ret;
+}
+
+static int ov64a40_start_streaming(struct ov64a40 *ov64a40,
+ struct v4l2_subdev_state *state)
+{
+ const struct ov64a40_reglist *reglist = &ov64a40->mode->reglist;
+ const struct ov64a40_timings *timings;
+ unsigned long delay;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ov64a40->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = cci_multi_reg_write(ov64a40->cci, ov64a40_init,
+ ARRAY_SIZE(ov64a40_init), NULL);
+ if (ret)
+ goto error_power_off;
+
+ ret = cci_multi_reg_write(ov64a40->cci, reglist->regvals,
+ reglist->num_regs, NULL);
+ if (ret)
+ goto error_power_off;
+
+ ret = ov64a40_program_geometry(ov64a40);
+ if (ret)
+ goto error_power_off;
+
+ ret = ov64a40_program_subsampling(ov64a40);
+ if (ret)
+ goto error_power_off;
+
+ ret = __v4l2_ctrl_handler_setup(&ov64a40->ctrl_handler);
+ if (ret)
+ goto error_power_off;
+
+ ret = cci_write(ov64a40->cci, OV64A40_REG_SMIA,
+ OV64A40_REG_SMIA_STREAMING, NULL);
+ if (ret)
+ goto error_power_off;
+
+ /* Link frequency and flips cannot change while streaming. */
+ __v4l2_ctrl_grab(ov64a40->link_freq, true);
+ __v4l2_ctrl_grab(ov64a40->vflip, true);
+ __v4l2_ctrl_grab(ov64a40->hflip, true);
+
+ /* delay: max(4096 xclk pulses, 150usec) + exposure time */
+ timings = ov64a40_get_timings(ov64a40, ov64a40->link_freq->cur.val);
+ delay = DIV_ROUND_UP(4096, OV64A40_XCLK_FREQ / 1000 / 1000);
+ delay = max(delay, 150ul);
+
+ /* The sensor has an internal x4 multiplier on the line length. */
+ delay += DIV_ROUND_UP(timings->ppl * 4 * ov64a40->exposure->cur.val,
+ OV64A40_PIXEL_RATE / 1000 / 1000);
+ fsleep(delay);
+
+ return 0;
+
+error_power_off:
+ pm_runtime_mark_last_busy(ov64a40->dev);
+ pm_runtime_put_autosuspend(ov64a40->dev);
+
+ return ret;
+}
+
+static int ov64a40_stop_streaming(struct ov64a40 *ov64a40,
+ struct v4l2_subdev_state *state)
+{
+ cci_update_bits(ov64a40->cci, OV64A40_REG_SMIA, BIT(0), 0, NULL);
+ pm_runtime_mark_last_busy(ov64a40->dev);
+ pm_runtime_put_autosuspend(ov64a40->dev);
+
+ __v4l2_ctrl_grab(ov64a40->link_freq, false);
+ __v4l2_ctrl_grab(ov64a40->vflip, false);
+ __v4l2_ctrl_grab(ov64a40->hflip, false);
+
+ return 0;
+}
+
+static int ov64a40_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov64a40 *ov64a40 = sd_to_ov64a40(sd);
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ if (enable)
+ ret = ov64a40_start_streaming(ov64a40, state);
+ else
+ ret = ov64a40_stop_streaming(ov64a40, state);
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops ov64a40_video_ops = {
+ .s_stream = ov64a40_set_stream,
+};
+
+static u32 ov64a40_mbus_code(struct ov64a40 *ov64a40)
+{
+ unsigned int index = ov64a40->hflip->val << 1 | ov64a40->vflip->val;
+
+ return ov64a40_mbus_codes[index];
+}
+
+static void ov64a40_update_pad_fmt(struct ov64a40 *ov64a40,
+ struct ov64a40_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = ov64a40_mbus_code(ov64a40);
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+}
+
+static int ov64a40_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct ov64a40 *ov64a40 = sd_to_ov64a40(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+ ov64a40_update_pad_fmt(ov64a40, &ov64a40_modes[0], format);
+
+ crop = v4l2_subdev_state_get_crop(state, 0);
+ crop->top = OV64A40_PIXEL_ARRAY_TOP;
+ crop->left = OV64A40_PIXEL_ARRAY_LEFT;
+ crop->width = OV64A40_PIXEL_ARRAY_WIDTH;
+ crop->height = OV64A40_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+}
+
+static int ov64a40_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ov64a40 *ov64a40 = sd_to_ov64a40(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = ov64a40_mbus_code(ov64a40);
+
+ return 0;
+}
+
+static int ov64a40_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct ov64a40 *ov64a40 = sd_to_ov64a40(sd);
+ struct ov64a40_mode *mode;
+ u32 code;
+
+ if (fse->index >= ARRAY_SIZE(ov64a40_modes))
+ return -EINVAL;
+
+ code = ov64a40_mbus_code(ov64a40);
+ if (fse->code != code)
+ return -EINVAL;
+
+ mode = &ov64a40_modes[fse->index];
+ fse->min_width = mode->width;
+ fse->max_width = mode->width;
+ fse->min_height = mode->height;
+ fse->max_height = mode->height;
+
+ return 0;
+}
+
+static int ov64a40_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(state, 0);
+
+ return 0;
+
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV64A40_NATIVE_WIDTH;
+ sel->r.height = OV64A40_NATIVE_HEIGHT;
+
+ return 0;
+
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = OV64A40_PIXEL_ARRAY_TOP;
+ sel->r.left = OV64A40_PIXEL_ARRAY_LEFT;
+ sel->r.width = OV64A40_PIXEL_ARRAY_WIDTH;
+ sel->r.height = OV64A40_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ov64a40_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov64a40 *ov64a40 = sd_to_ov64a40(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct ov64a40_mode *mode;
+
+ mode = v4l2_find_nearest_size(ov64a40_modes,
+ ARRAY_SIZE(ov64a40_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ ov64a40_update_pad_fmt(ov64a40, mode, &fmt->format);
+
+ format = v4l2_subdev_state_get_format(state, 0);
+ if (ov64a40->mode == mode && format->code == fmt->format.code)
+ return 0;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ const struct ov64a40_timings *timings;
+ int vblank_max, vblank_def;
+ int hblank_val;
+ int exp_max;
+
+ ov64a40->mode = mode;
+ *v4l2_subdev_state_get_crop(state, 0) = mode->analogue_crop;
+
+ /* Update control limits according to the new mode. */
+ timings = ov64a40_get_timings(ov64a40,
+ ov64a40->link_freq->cur.val);
+ vblank_max = OV64A40_VTS_MAX - mode->height;
+ vblank_def = timings->vts - mode->height;
+ __v4l2_ctrl_modify_range(ov64a40->vblank, OV64A40_VBLANK_MIN,
+ vblank_max, 1, vblank_def);
+ __v4l2_ctrl_s_ctrl(ov64a40->vblank, vblank_def);
+
+ exp_max = timings->vts - OV64A40_EXPOSURE_MARGIN;
+ __v4l2_ctrl_modify_range(ov64a40->exposure,
+ OV64A40_EXPOSURE_MIN, exp_max,
+ 1, OV64A40_EXPOSURE_MIN);
+
+ hblank_val = timings->ppl * 4 - mode->width;
+ __v4l2_ctrl_modify_range(ov64a40->hblank,
+ hblank_val, hblank_val, 1, hblank_val);
+ }
+
+ *format = fmt->format;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops ov64a40_pad_ops = {
+ .enum_mbus_code = ov64a40_enum_mbus_code,
+ .enum_frame_size = ov64a40_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ov64a40_set_format,
+ .get_selection = ov64a40_get_selection,
+};
+
+static const struct v4l2_subdev_core_ops ov64a40_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops ov64a40_subdev_ops = {
+ .core = &ov64a40_core_ops,
+ .video = &ov64a40_video_ops,
+ .pad = &ov64a40_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov64a40_internal_ops = {
+ .init_state = ov64a40_init_state,
+};
+
+static int ov64a40_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov64a40 *ov64a40 = sd_to_ov64a40(sd);
+ int ret;
+
+ ret = clk_prepare_enable(ov64a40->xclk);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov64a40_supply_names),
+ ov64a40->supplies);
+ if (ret) {
+ clk_disable_unprepare(ov64a40->xclk);
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ov64a40->reset_gpio, 0);
+
+ fsleep(5000);
+
+ return 0;
+}
+
+static int ov64a40_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov64a40 *ov64a40 = sd_to_ov64a40(sd);
+
+ gpiod_set_value_cansleep(ov64a40->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ov64a40_supply_names),
+ ov64a40->supplies);
+ clk_disable_unprepare(ov64a40->xclk);
+
+ return 0;
+}
+
+static int ov64a40_link_freq_config(struct ov64a40 *ov64a40, int link_freq_id)
+{
+ s64 link_frequency;
+ int ret = 0;
+
+ /* Default 456MHz with 24MHz input clock. */
+ cci_multi_reg_write(ov64a40->cci, ov64a40_pll_config,
+ ARRAY_SIZE(ov64a40_pll_config), &ret);
+
+ /* Decrease the PLL1 multiplier to obtain 360MHz mipi link frequency. */
+ link_frequency = ov64a40->link_frequencies[link_freq_id];
+ if (link_frequency == OV64A40_LINK_FREQ_360M)
+ cci_write(ov64a40->cci, OV64A40_PLL1_MULTIPLIER, 0x0078, &ret);
+
+ return ret;
+}
+
+static int ov64a40_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov64a40 *ov64a40 = container_of(ctrl->handler, struct ov64a40,
+ ctrl_handler);
+ int pm_status;
+ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exp_max = ov64a40->mode->height + ctrl->val
+ - OV64A40_EXPOSURE_MARGIN;
+ int exp_val = min(ov64a40->exposure->cur.val, exp_max);
+
+ __v4l2_ctrl_modify_range(ov64a40->exposure,
+ ov64a40->exposure->minimum,
+ exp_max, 1, exp_val);
+ }
+
+ pm_status = pm_runtime_get_if_active(ov64a40->dev, true);
+ if (!pm_status)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ ret = cci_write(ov64a40->cci, OV64A40_REG_MEC_LONG_EXPO,
+ ctrl->val, NULL);
+ break;
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = cci_write(ov64a40->cci, OV64A40_REG_MEC_LONG_GAIN,
+ ctrl->val << 1, NULL);
+ break;
+ case V4L2_CID_VBLANK: {
+ int vts = ctrl->val + ov64a40->mode->height;
+
+ cci_write(ov64a40->cci, OV64A40_REG_TIMINGS_VTS_LOW, vts, &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMINGS_VTS_MID,
+ (vts >> 8), &ret);
+ cci_write(ov64a40->cci, OV64A40_REG_TIMINGS_VTS_HIGH,
+ (vts >> 16), &ret);
+ break;
+ }
+ case V4L2_CID_VFLIP:
+ ret = cci_update_bits(ov64a40->cci, OV64A40_REG_TIMING_CTRL_20,
+ OV64A40_TIMING_CTRL_20_VFLIP,
+ ctrl->val << 2,
+ NULL);
+ break;
+ case V4L2_CID_HFLIP:
+ ret = cci_update_bits(ov64a40->cci, OV64A40_REG_TIMING_CTRL_21,
+ OV64A40_TIMING_CTRL_21_HFLIP,
+ ctrl->val ? 0
+ : OV64A40_TIMING_CTRL_21_HFLIP,
+ NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = cci_write(ov64a40->cci, OV64A40_REG_TEST_PATTERN,
+ ov64a40_test_pattern_val[ctrl->val], NULL);
+ break;
+ case V4L2_CID_LINK_FREQ:
+ ret = ov64a40_link_freq_config(ov64a40, ctrl->val);
+ break;
+ default:
+ dev_err(ov64a40->dev, "Unhandled control: %#x\n", ctrl->id);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (pm_status > 0) {
+ pm_runtime_mark_last_busy(ov64a40->dev);
+ pm_runtime_put_autosuspend(ov64a40->dev);
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov64a40_ctrl_ops = {
+ .s_ctrl = ov64a40_set_ctrl,
+};
+
+static int ov64a40_init_controls(struct ov64a40 *ov64a40)
+{
+ int exp_max, hblank_val, vblank_max, vblank_def;
+ struct v4l2_ctrl_handler *hdlr = &ov64a40->ctrl_handler;
+ struct v4l2_fwnode_device_properties props;
+ const struct ov64a40_timings *timings;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(hdlr, 11);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_std(hdlr, &ov64a40_ctrl_ops, V4L2_CID_PIXEL_RATE,
+ OV64A40_PIXEL_RATE, OV64A40_PIXEL_RATE, 1,
+ OV64A40_PIXEL_RATE);
+
+ ov64a40->link_freq =
+ v4l2_ctrl_new_int_menu(hdlr, &ov64a40_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ov64a40->num_link_frequencies - 1,
+ 0, ov64a40->link_frequencies);
+
+ v4l2_ctrl_new_std_menu_items(hdlr, &ov64a40_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov64a40_test_pattern_menu) - 1,
+ 0, 0, ov64a40_test_pattern_menu);
+
+ timings = ov64a40_get_timings(ov64a40, 0);
+ exp_max = timings->vts - OV64A40_EXPOSURE_MARGIN;
+ ov64a40->exposure = v4l2_ctrl_new_std(hdlr, &ov64a40_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV64A40_EXPOSURE_MIN, exp_max, 1,
+ OV64A40_EXPOSURE_MIN);
+
+ hblank_val = timings->ppl * 4 - ov64a40->mode->width;
+ ov64a40->hblank = v4l2_ctrl_new_std(hdlr, &ov64a40_ctrl_ops,
+ V4L2_CID_HBLANK, hblank_val,
+ hblank_val, 1, hblank_val);
+ if (ov64a40->hblank)
+ ov64a40->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ vblank_def = timings->vts - ov64a40->mode->height;
+ vblank_max = OV64A40_VTS_MAX - ov64a40->mode->height;
+ ov64a40->vblank = v4l2_ctrl_new_std(hdlr, &ov64a40_ctrl_ops,
+ V4L2_CID_VBLANK, OV64A40_VBLANK_MIN,
+ vblank_max, 1, vblank_def);
+
+ v4l2_ctrl_new_std(hdlr, &ov64a40_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV64A40_ANA_GAIN_MIN, OV64A40_ANA_GAIN_MAX, 1,
+ OV64A40_ANA_GAIN_DEFAULT);
+
+ ov64a40->hflip = v4l2_ctrl_new_std(hdlr, &ov64a40_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (ov64a40->hflip)
+ ov64a40->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ ov64a40->vflip = v4l2_ctrl_new_std(hdlr, &ov64a40_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ov64a40->vflip)
+ ov64a40->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ if (hdlr->error) {
+ ret = hdlr->error;
+ dev_err(ov64a40->dev, "control init failed: %d\n", ret);
+ goto error_free_hdlr;
+ }
+
+ ret = v4l2_fwnode_device_parse(ov64a40->dev, &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdlr, &ov64a40_ctrl_ops,
+ &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ov64a40->sd.ctrl_handler = hdlr;
+
+ return 0;
+
+error_free_hdlr:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+static int ov64a40_identify(struct ov64a40 *ov64a40)
+{
+ int ret;
+ u64 id;
+
+ ret = cci_read(ov64a40->cci, OV64A40_REG_CHIP_ID, &id, NULL);
+ if (ret) {
+ dev_err(ov64a40->dev, "Failed to read chip id: %d\n", ret);
+ return ret;
+ }
+
+ if (id != OV64A40_CHIP_ID) {
+ dev_err(ov64a40->dev, "chip id mismatch: %#llx\n", id);
+ return -ENODEV;
+ }
+
+ dev_dbg(ov64a40->dev, "OV64A40 chip identified: %#llx\n", id);
+
+ return 0;
+}
+
+static int ov64a40_parse_dt(struct ov64a40 *ov64a40)
+{
+ struct v4l2_fwnode_endpoint v4l2_fwnode = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *endpoint;
+ unsigned int i;
+ int ret;
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(ov64a40->dev),
+ NULL);
+ if (!endpoint) {
+ dev_err(ov64a40->dev, "Failed to find endpoint\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &v4l2_fwnode);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(ov64a40->dev, "Failed to parse endpoint\n");
+ return ret;
+ }
+
+ if (v4l2_fwnode.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(ov64a40->dev, "Unsupported number of data lanes: %u\n",
+ v4l2_fwnode.bus.mipi_csi2.num_data_lanes);
+ v4l2_fwnode_endpoint_free(&v4l2_fwnode);
+ return -EINVAL;
+ }
+
+ if (!v4l2_fwnode.nr_of_link_frequencies) {
+ dev_warn(ov64a40->dev, "no link frequencies defined\n");
+ v4l2_fwnode_endpoint_free(&v4l2_fwnode);
+ return -EINVAL;
+ }
+
+ if (v4l2_fwnode.nr_of_link_frequencies > 2) {
+ dev_warn(ov64a40->dev,
+ "Unsupported number of link frequencies\n");
+ v4l2_fwnode_endpoint_free(&v4l2_fwnode);
+ return -EINVAL;
+ }
+
+ ov64a40->link_frequencies =
+ devm_kcalloc(ov64a40->dev, v4l2_fwnode.nr_of_link_frequencies,
+ sizeof(v4l2_fwnode.link_frequencies[0]),
+ GFP_KERNEL);
+ if (!ov64a40->link_frequencies) {
+ v4l2_fwnode_endpoint_free(&v4l2_fwnode);
+ return -ENOMEM;
+ }
+ ov64a40->num_link_frequencies = v4l2_fwnode.nr_of_link_frequencies;
+
+ for (i = 0; i < v4l2_fwnode.nr_of_link_frequencies; ++i) {
+ if (v4l2_fwnode.link_frequencies[i] != OV64A40_LINK_FREQ_360M &&
+ v4l2_fwnode.link_frequencies[i] != OV64A40_LINK_FREQ_456M) {
+ dev_err(ov64a40->dev,
+ "Unsupported link frequency %lld\n",
+ v4l2_fwnode.link_frequencies[i]);
+ v4l2_fwnode_endpoint_free(&v4l2_fwnode);
+ return -EINVAL;
+ }
+
+ ov64a40->link_frequencies[i] = v4l2_fwnode.link_frequencies[i];
+ }
+
+ v4l2_fwnode_endpoint_free(&v4l2_fwnode);
+
+ return 0;
+}
+
+static int ov64a40_get_regulators(struct ov64a40 *ov64a40)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov64a40->sd);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ov64a40_supply_names); i++)
+ ov64a40->supplies[i].supply = ov64a40_supply_names[i];
+
+ return devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(ov64a40_supply_names),
+ ov64a40->supplies);
+}
+
+static int ov64a40_probe(struct i2c_client *client)
+{
+ struct ov64a40 *ov64a40;
+ u32 xclk_freq;
+ int ret;
+
+ ov64a40 = devm_kzalloc(&client->dev, sizeof(*ov64a40), GFP_KERNEL);
+ if (!ov64a40)
+ return -ENOMEM;
+
+ ov64a40->dev = &client->dev;
+ v4l2_i2c_subdev_init(&ov64a40->sd, client, &ov64a40_subdev_ops);
+
+ ov64a40->cci = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(ov64a40->cci)) {
+ dev_err(&client->dev, "Failed to initialize CCI\n");
+ return PTR_ERR(ov64a40->cci);
+ }
+
+ ov64a40->xclk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(ov64a40->xclk))
+ return dev_err_probe(&client->dev, PTR_ERR(ov64a40->xclk),
+ "Failed to get clock\n");
+
+ xclk_freq = clk_get_rate(ov64a40->xclk);
+ if (xclk_freq != OV64A40_XCLK_FREQ) {
+ dev_err(&client->dev, "Unsupported xclk frequency %u\n",
+ xclk_freq);
+ return -EINVAL;
+ }
+
+ ret = ov64a40_get_regulators(ov64a40);
+ if (ret)
+ return ret;
+
+ ov64a40->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ov64a40->reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(ov64a40->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ ret = ov64a40_parse_dt(ov64a40);
+ if (ret)
+ return ret;
+
+ ret = ov64a40_power_on(&client->dev);
+ if (ret)
+ return ret;
+
+ ret = ov64a40_identify(ov64a40);
+ if (ret)
+ goto error_poweroff;
+
+ ov64a40->mode = &ov64a40_modes[0];
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ ret = ov64a40_init_controls(ov64a40);
+ if (ret)
+ goto error_poweroff;
+
+ /* Initialize subdev */
+ ov64a40->sd.internal_ops = &ov64a40_internal_ops;
+ ov64a40->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE
+ | V4L2_SUBDEV_FL_HAS_EVENTS;
+ ov64a40->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ov64a40->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov64a40->sd.entity, 1, &ov64a40->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d\n", ret);
+ goto error_handler_free;
+ }
+
+ ov64a40->sd.state_lock = ov64a40->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&ov64a40->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "subdev init error: %d\n", ret);
+ goto error_media_entity;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov64a40->sd);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "failed to register sensor sub-device: %d\n", ret);
+ goto error_subdev_cleanup;
+ }
+
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
+ return 0;
+
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&ov64a40->sd);
+error_media_entity:
+ media_entity_cleanup(&ov64a40->sd.entity);
+error_handler_free:
+ v4l2_ctrl_handler_free(ov64a40->sd.ctrl_handler);
+error_poweroff:
+ ov64a40_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ return ret;
+}
+
+static void ov64a40_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov64a40_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+}
+
+static const struct of_device_id ov64a40_of_ids[] = {
+ { .compatible = "ovti,ov64a40" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov64a40_of_ids);
+
+static const struct dev_pm_ops ov64a40_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov64a40_power_off, ov64a40_power_on, NULL)
+};
+
+static struct i2c_driver ov64a40_i2c_driver = {
+ .driver = {
+ .name = "ov64a40",
+ .of_match_table = ov64a40_of_ids,
+ .pm = &ov64a40_pm_ops,
+ },
+ .probe = ov64a40_probe,
+ .remove = ov64a40_remove,
+};
+
+module_i2c_driver(ov64a40_i2c_driver);
+
+MODULE_AUTHOR("Jacopo Mondi <jacopo.mondi@ideasonboard.com>");
+MODULE_DESCRIPTION("OmniVision OV64A40 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 1ad07935f..b65befb22 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -197,7 +197,7 @@ struct ov6650 {
struct clk *clk;
bool half_scale; /* scale down output by 2 */
struct v4l2_rect rect; /* sensor cropping window */
- struct v4l2_fract tpf; /* as requested with s_frame_interval */
+ struct v4l2_fract tpf; /* as requested with set_frame_interval */
u32 code;
};
@@ -476,7 +476,7 @@ static int ov6650_get_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
/* pre-select try crop rectangle */
- rect = &sd_state->pads->try_crop;
+ rect = v4l2_subdev_state_get_crop(sd_state, 0);
} else {
/* pre-select active crop rectangle */
@@ -531,8 +531,10 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
ov6650_bind_align_crop_rectangle(&sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_rect *crop = &sd_state->pads->try_crop;
- struct v4l2_mbus_framefmt *mf = &sd_state->pads->try_fmt;
+ struct v4l2_rect *crop =
+ v4l2_subdev_state_get_crop(sd_state, 0);
+ struct v4l2_mbus_framefmt *mf =
+ v4l2_subdev_state_get_format(sd_state, 0);
/* detect current pad config scaling factor */
bool half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
@@ -588,9 +590,12 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
/* update media bus format code and frame size */
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf->width = sd_state->pads->try_fmt.width;
- mf->height = sd_state->pads->try_fmt.height;
- mf->code = sd_state->pads->try_fmt.code;
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_state_get_format(sd_state, 0);
+
+ mf->width = try_fmt->width;
+ mf->height = try_fmt->height;
+ mf->code = try_fmt->code;
} else {
mf->width = priv->rect.width >> priv->half_scale;
@@ -717,23 +722,26 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
}
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- crop = &sd_state->pads->try_crop;
+ crop = v4l2_subdev_state_get_crop(sd_state, 0);
else
crop = &priv->rect;
half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_state_get_format(sd_state, 0);
+
/* store new mbus frame format code and size in pad config */
- sd_state->pads->try_fmt.width = crop->width >> half_scale;
- sd_state->pads->try_fmt.height = crop->height >> half_scale;
- sd_state->pads->try_fmt.code = mf->code;
+ try_fmt->width = crop->width >> half_scale;
+ try_fmt->height = crop->height >> half_scale;
+ try_fmt->code = mf->code;
/* return default mbus frame format updated with pad config */
*mf = ov6650_def_fmt;
- mf->width = sd_state->pads->try_fmt.width;
- mf->height = sd_state->pads->try_fmt.height;
- mf->code = sd_state->pads->try_fmt.code;
+ mf->width = try_fmt->width;
+ mf->height = try_fmt->height;
+ mf->code = try_fmt->code;
} else {
int ret = 0;
@@ -791,12 +799,20 @@ static int ov6650_enum_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int ov6650_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int ov6650_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
ival->interval = priv->tpf;
dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
@@ -805,14 +821,22 @@ static int ov6650_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int ov6650_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
struct v4l2_fract *tpf = &ival->interval;
int div, ret;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
else
@@ -998,8 +1022,6 @@ static int ov6650_get_mbus_config(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops ov6650_video_ops = {
.s_stream = ov6650_s_stream,
- .g_frame_interval = ov6650_g_frame_interval,
- .s_frame_interval = ov6650_s_frame_interval,
};
static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
@@ -1009,6 +1031,8 @@ static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
.set_selection = ov6650_set_selection,
.get_fmt = ov6650_get_fmt,
.set_fmt = ov6650_set_fmt,
+ .get_frame_interval = ov6650_get_frame_interval,
+ .set_frame_interval = ov6650_set_frame_interval,
.get_mbus_config = ov6650_get_mbus_config,
};
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 6582cc0e2..30f61e04e 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1139,7 +1139,7 @@ __ov7251_get_pad_format(struct ov7251 *ov7251,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&ov7251->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov7251->fmt;
default:
@@ -1169,7 +1169,7 @@ __ov7251_get_pad_crop(struct ov7251 *ov7251,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov7251->sd, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov7251->crop;
default:
@@ -1282,8 +1282,8 @@ exit:
return ret;
}
-static int ov7251_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state)
+static int ov7251_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
@@ -1386,10 +1386,18 @@ err_power_down:
}
static int ov7251_get_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fi)
{
struct ov7251 *ov7251 = to_ov7251(subdev);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&ov7251->lock);
fi->interval = ov7251->current_mode->timeperframe;
mutex_unlock(&ov7251->lock);
@@ -1398,12 +1406,20 @@ static int ov7251_get_frame_interval(struct v4l2_subdev *subdev,
}
static int ov7251_set_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fi)
{
struct ov7251 *ov7251 = to_ov7251(subdev);
const struct ov7251_mode_info *new_mode;
int ret = 0;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&ov7251->lock);
new_mode = ov7251_find_mode_by_ival(ov7251, &fi->interval);
@@ -1436,18 +1452,17 @@ exit:
static const struct v4l2_subdev_video_ops ov7251_video_ops = {
.s_stream = ov7251_s_stream,
- .g_frame_interval = ov7251_get_frame_interval,
- .s_frame_interval = ov7251_set_frame_interval,
};
static const struct v4l2_subdev_pad_ops ov7251_subdev_pad_ops = {
- .init_cfg = ov7251_entity_init_cfg,
.enum_mbus_code = ov7251_enum_mbus_code,
.enum_frame_size = ov7251_enum_frame_size,
.enum_frame_interval = ov7251_enum_frame_ival,
.get_fmt = ov7251_get_format,
.set_fmt = ov7251_set_format,
.get_selection = ov7251_get_selection,
+ .get_frame_interval = ov7251_get_frame_interval,
+ .set_frame_interval = ov7251_set_frame_interval,
};
static const struct v4l2_subdev_ops ov7251_subdev_ops = {
@@ -1455,6 +1470,10 @@ static const struct v4l2_subdev_ops ov7251_subdev_ops = {
.pad = &ov7251_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov7251_internal_ops = {
+ .init_state = ov7251_init_state,
+};
+
static int ov7251_check_hwcfg(struct ov7251 *ov7251)
{
struct fwnode_handle *fwnode = dev_fwnode(ov7251->dev);
@@ -1693,6 +1712,7 @@ static int ov7251_probe(struct i2c_client *client)
}
v4l2_i2c_subdev_init(&ov7251->sd, client, &ov7251_subdev_ops);
+ ov7251->sd.internal_ops = &ov7251_internal_ops;
ov7251->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ov7251->pad.flags = MEDIA_PAD_FL_SOURCE;
ov7251->sd.dev = &client->dev;
@@ -1750,7 +1770,7 @@ static int ov7251_probe(struct i2c_client *client)
goto free_entity;
}
- ov7251_entity_init_cfg(&ov7251->sd, NULL);
+ ov7251_init_state(&ov7251->sd, NULL);
return 0;
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 172483597..0cb96b6c9 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1112,8 +1112,7 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL);
if (ret)
return ret;
- mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
- format->pad);
+ mbus_fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
*mbus_fmt = format->format;
return 0;
}
@@ -1141,7 +1140,7 @@ static int ov7670_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mbus_fmt;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mbus_fmt = v4l2_subdev_state_get_format(sd_state, 0);
format->format = *mbus_fmt;
return 0;
} else {
@@ -1155,23 +1154,37 @@ static int ov7670_get_fmt(struct v4l2_subdev *sd,
* Implement G/S_PARM. There is a "high quality" mode we could try
* to do someday; for now, we just do the frame rate tweak.
*/
-static int ov7670_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int ov7670_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct ov7670_info *info = to_state(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
info->devtype->get_framerate(sd, &ival->interval);
return 0;
}
-static int ov7670_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int ov7670_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct v4l2_fract *tpf = &ival->interval;
struct ov7670_info *info = to_state(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
return info->devtype->set_framerate(sd, tpf);
}
@@ -1707,7 +1720,7 @@ static void ov7670_get_default_format(struct v4l2_subdev *sd,
static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
ov7670_get_default_format(sd, format);
@@ -1729,22 +1742,18 @@ static const struct v4l2_subdev_core_ops ov7670_core_ops = {
#endif
};
-static const struct v4l2_subdev_video_ops ov7670_video_ops = {
- .s_frame_interval = ov7670_s_frame_interval,
- .g_frame_interval = ov7670_g_frame_interval,
-};
-
static const struct v4l2_subdev_pad_ops ov7670_pad_ops = {
.enum_frame_interval = ov7670_enum_frame_interval,
.enum_frame_size = ov7670_enum_frame_size,
.enum_mbus_code = ov7670_enum_mbus_code,
.get_fmt = ov7670_get_fmt,
.set_fmt = ov7670_set_fmt,
+ .get_frame_interval = ov7670_get_frame_interval,
+ .set_frame_interval = ov7670_set_frame_interval,
};
static const struct v4l2_subdev_ops ov7670_ops = {
.core = &ov7670_core_ops,
- .video = &ov7670_video_ops,
.pad = &ov7670_pad_ops,
};
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 7618b58a7..3e36a5527 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -717,26 +717,42 @@ static int ov772x_set_frame_rate(struct ov772x_priv *priv,
return 0;
}
-static int ov772x_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int ov772x_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct ov772x_priv *priv = to_ov772x(sd);
struct v4l2_fract *tpf = &ival->interval;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
tpf->numerator = 1;
tpf->denominator = priv->fps;
return 0;
}
-static int ov772x_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+static int ov772x_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct ov772x_priv *priv = to_ov772x(sd);
struct v4l2_fract *tpf = &ival->interval;
unsigned int fps;
int ret = 0;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&priv->lock);
if (priv->streaming) {
@@ -1220,7 +1236,7 @@ static int ov772x_set_fmt(struct v4l2_subdev *sd,
mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *mf;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *mf;
return 0;
}
@@ -1349,8 +1365,6 @@ static int ov772x_enum_mbus_code(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
.s_stream = ov772x_s_stream,
- .s_frame_interval = ov772x_s_frame_interval,
- .g_frame_interval = ov772x_g_frame_interval,
};
static const struct v4l2_subdev_pad_ops ov772x_subdev_pad_ops = {
@@ -1359,6 +1373,8 @@ static const struct v4l2_subdev_pad_ops ov772x_subdev_pad_ops = {
.get_selection = ov772x_get_selection,
.get_fmt = ov772x_get_fmt,
.set_fmt = ov772x_set_fmt,
+ .get_frame_interval = ov772x_get_frame_interval,
+ .set_frame_interval = ov772x_set_frame_interval,
};
static const struct v4l2_subdev_ops ov772x_subdev_ops = {
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 356a45e65..47b1b14d8 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -638,34 +638,8 @@ err_unlock:
return ret;
}
-static int ov7740_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
-{
- struct v4l2_fract *tpf = &ival->interval;
-
-
- tpf->numerator = 1;
- tpf->denominator = 60;
-
- return 0;
-}
-
-static int ov7740_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
-{
- struct v4l2_fract *tpf = &ival->interval;
-
-
- tpf->numerator = 1;
- tpf->denominator = 60;
-
- return 0;
-}
-
static const struct v4l2_subdev_video_ops ov7740_subdev_video_ops = {
.s_stream = ov7740_set_stream,
- .s_frame_interval = ov7740_s_frame_interval,
- .g_frame_interval = ov7740_g_frame_interval,
};
static const struct reg_sequence ov7740_format_yuyv[] = {
@@ -812,8 +786,7 @@ static int ov7740_set_fmt(struct v4l2_subdev *sd,
if (ret)
goto error;
- mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
- format->pad);
+ mbus_fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
*mbus_fmt = format->format;
mutex_unlock(&ov7740->mutex);
return 0;
@@ -843,7 +816,7 @@ static int ov7740_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&ov7740->mutex);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mbus_fmt = v4l2_subdev_state_get_format(sd_state, 0);
format->format = *mbus_fmt;
} else {
format->format = ov7740->format;
@@ -853,12 +826,26 @@ static int ov7740_get_fmt(struct v4l2_subdev *sd,
return 0;
}
+static int ov7740_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
+{
+ struct v4l2_fract *tpf = &ival->interval;
+
+ tpf->numerator = 1;
+ tpf->denominator = 60;
+
+ return 0;
+}
+
static const struct v4l2_subdev_pad_ops ov7740_subdev_pad_ops = {
.enum_frame_interval = ov7740_enum_frame_interval,
.enum_frame_size = ov7740_enum_frame_size,
.enum_mbus_code = ov7740_enum_mbus_code,
.get_fmt = ov7740_get_fmt,
.set_fmt = ov7740_set_fmt,
+ .get_frame_interval = ov7740_get_frame_interval,
+ .set_frame_interval = ov7740_get_frame_interval,
};
static const struct v4l2_subdev_ops ov7740_subdev_ops = {
@@ -883,7 +870,7 @@ static int ov7740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
struct v4l2_mbus_framefmt *format =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
mutex_lock(&ov7740->mutex);
ov7740_get_default_format(sd, format);
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index a0f673a24..6ffe10e57 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -2134,7 +2134,7 @@ static int ov8856_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov8856->mutex);
ov8856_update_pad_format(ov8856, mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
} else {
ov8856->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov8856->link_freq, mode->link_freq_index);
@@ -2172,9 +2172,8 @@ static int ov8856_get_format(struct v4l2_subdev *sd,
mutex_lock(&ov8856->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
ov8856_update_pad_format(ov8856, ov8856->cur_mode, &fmt->format);
@@ -2225,7 +2224,7 @@ static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov8856->mutex);
ov8856_update_pad_format(ov8856, &ov8856->priv_lane->supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->state, 0));
+ v4l2_subdev_state_get_format(fh->state, 0));
mutex_unlock(&ov8856->mutex);
return 0;
diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
index 4d9fd76e2..174c65f76 100644
--- a/drivers/media/i2c/ov8858.c
+++ b/drivers/media/i2c/ov8858.c
@@ -1333,7 +1333,7 @@ static int ov8858_start_stream(struct ov8858 *ov8858,
if (ret)
return ret;
- format = v4l2_subdev_get_pad_format(&ov8858->subdev, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
mode = v4l2_find_nearest_size(ov8858_modes, ARRAY_SIZE(ov8858_modes),
width, height, format->width,
format->height);
@@ -1428,7 +1428,7 @@ static int ov8858_set_fmt(struct v4l2_subdev *sd,
fmt->format.field = V4L2_FIELD_NONE;
/* Store the format in the current subdev state. */
- *v4l2_subdev_get_pad_format(sd, state, 0) = fmt->format;
+ *v4l2_subdev_state_get_format(state, 0) = fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
@@ -1476,8 +1476,8 @@ static int ov8858_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int ov8858_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int ov8858_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
const struct ov8858_mode *def_mode = &ov8858_modes[0];
struct v4l2_subdev_format fmt = {
@@ -1494,7 +1494,6 @@ static int ov8858_init_cfg(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops ov8858_pad_ops = {
- .init_cfg = ov8858_init_cfg,
.enum_mbus_code = ov8858_enum_mbus_code,
.enum_frame_size = ov8858_enum_frame_sizes,
.get_fmt = v4l2_subdev_get_fmt,
@@ -1512,6 +1511,10 @@ static const struct v4l2_subdev_ops ov8858_subdev_ops = {
.pad = &ov8858_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov8858_internal_ops = {
+ .init_state = ov8858_init_state,
+};
+
/* ----------------------------------------------------------------------------
* Controls handling
*/
@@ -1547,7 +1550,7 @@ static int ov8858_set_ctrl(struct v4l2_ctrl *ctrl)
* - by the driver when s_ctrl is called in the s_stream(1) call path
*/
state = v4l2_subdev_get_locked_active_state(&ov8858->subdev);
- format = v4l2_subdev_get_pad_format(&ov8858->subdev, state, 0);
+ format = v4l2_subdev_state_get_format(state, 0);
/* Propagate change of current control to all related controls */
switch (ctrl->id) {
@@ -1899,6 +1902,7 @@ static int ov8858_probe(struct i2c_client *client)
"Failed to get powerdown gpio\n");
v4l2_i2c_subdev_init(&ov8858->subdev, client, &ov8858_subdev_ops);
+ ov8858->subdev.internal_ops = &ov8858_internal_ops;
ret = ov8858_configure_regulators(ov8858);
if (ret)
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index f2213c615..95ffe7536 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -2640,33 +2640,8 @@ static int ov8865_s_stream(struct v4l2_subdev *subdev, int enable)
return 0;
}
-static int ov8865_g_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
- const struct ov8865_mode *mode;
- unsigned int framesize;
- unsigned int fps;
-
- mutex_lock(&sensor->mutex);
-
- mode = sensor->state.mode;
- framesize = mode->hts * (mode->output_size_y +
- sensor->ctrls.vblank->val);
- fps = DIV_ROUND_CLOSEST(sensor->ctrls.pixel_rate->val, framesize);
-
- interval->interval.numerator = 1;
- interval->interval.denominator = fps;
-
- mutex_unlock(&sensor->mutex);
-
- return 0;
-}
-
static const struct v4l2_subdev_video_ops ov8865_subdev_video_ops = {
.s_stream = ov8865_s_stream,
- .g_frame_interval = ov8865_g_frame_interval,
- .s_frame_interval = ov8865_g_frame_interval,
};
/* Subdev Pad Operations */
@@ -2710,8 +2685,8 @@ static int ov8865_get_fmt(struct v4l2_subdev *subdev,
mutex_lock(&sensor->mutex);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *mbus_format = *v4l2_subdev_get_try_format(subdev, sd_state,
- format->pad);
+ *mbus_format = *v4l2_subdev_state_get_format(sd_state,
+ format->pad);
else
ov8865_mbus_format_fill(mbus_format, sensor->state.mbus_code,
sensor->state.mode);
@@ -2765,7 +2740,7 @@ static int ov8865_set_fmt(struct v4l2_subdev *subdev,
ov8865_mbus_format_fill(mbus_format, mbus_code, mode);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(subdev, sd_state, format->pad) =
+ *v4l2_subdev_state_get_format(sd_state, format->pad) =
*mbus_format;
else if (sensor->state.mode != mode ||
sensor->state.mbus_code != mbus_code)
@@ -2818,7 +2793,7 @@ __ov8865_get_pad_crop(struct ov8865_sensor *sensor,
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- *r = *v4l2_subdev_get_try_crop(&sensor->subdev, state, pad);
+ *r = *v4l2_subdev_state_get_crop(state, pad);
break;
case V4L2_SUBDEV_FORMAT_ACTIVE:
r->height = mode->output_size_y;
@@ -2862,6 +2837,37 @@ static int ov8865_get_selection(struct v4l2_subdev *subdev,
return 0;
}
+static int ov8865_get_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
+ const struct ov8865_mode *mode;
+ unsigned int framesize;
+ unsigned int fps;
+
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ mutex_lock(&sensor->mutex);
+
+ mode = sensor->state.mode;
+ framesize = mode->hts * (mode->output_size_y +
+ sensor->ctrls.vblank->val);
+ fps = DIV_ROUND_CLOSEST(sensor->ctrls.pixel_rate->val, framesize);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = fps;
+
+ mutex_unlock(&sensor->mutex);
+
+ return 0;
+}
+
static const struct v4l2_subdev_pad_ops ov8865_subdev_pad_ops = {
.enum_mbus_code = ov8865_enum_mbus_code,
.get_fmt = ov8865_get_fmt,
@@ -2869,6 +2875,8 @@ static const struct v4l2_subdev_pad_ops ov8865_subdev_pad_ops = {
.enum_frame_size = ov8865_enum_frame_size,
.get_selection = ov8865_get_selection,
.set_selection = ov8865_get_selection,
+ .get_frame_interval = ov8865_get_frame_interval,
+ .set_frame_interval = ov8865_get_frame_interval,
};
static const struct v4l2_subdev_ops ov8865_subdev_ops = {
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index bf6dfce1b..251a4b534 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -814,7 +814,7 @@ static int ov9282_get_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *framefmt;
} else {
ov9282_fill_pad_format(ov9282, ov9282->cur_mode, ov9282->code,
@@ -860,7 +860,7 @@ static int ov9282_set_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *framefmt;
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*framefmt = fmt->format;
} else {
ret = ov9282_update_controls(ov9282, mode, fmt);
@@ -876,14 +876,14 @@ static int ov9282_set_pad_format(struct v4l2_subdev *sd,
}
/**
- * ov9282_init_pad_cfg() - Initialize sub-device pad configuration
+ * ov9282_init_state() - Initialize sub-device state
* @sd: pointer to ov9282 V4L2 sub-device structure
* @sd_state: V4L2 sub-device configuration
*
* Return: 0 if successful, error code otherwise.
*/
-static int ov9282_init_pad_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int ov9282_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct ov9282 *ov9282 = to_ov9282(sd);
struct v4l2_subdev_format fmt = { 0 };
@@ -902,7 +902,7 @@ __ov9282_get_pad_crop(struct ov9282 *ov9282,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&ov9282->sd, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &ov9282->cur_mode->crop;
}
@@ -1192,7 +1192,6 @@ static const struct v4l2_subdev_video_ops ov9282_video_ops = {
};
static const struct v4l2_subdev_pad_ops ov9282_pad_ops = {
- .init_cfg = ov9282_init_pad_cfg,
.enum_mbus_code = ov9282_enum_mbus_code,
.enum_frame_size = ov9282_enum_frame_size,
.get_fmt = ov9282_get_pad_format,
@@ -1206,6 +1205,10 @@ static const struct v4l2_subdev_ops ov9282_subdev_ops = {
.pad = &ov9282_pad_ops,
};
+static const struct v4l2_subdev_internal_ops ov9282_internal_ops = {
+ .init_state = ov9282_init_state,
+};
+
/**
* ov9282_power_on() - Sensor power on sequence
* @dev: pointer to i2c device
@@ -1394,6 +1397,7 @@ static int ov9282_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov9282->sd, client, &ov9282_subdev_ops);
+ ov9282->sd.internal_ops = &ov9282_internal_ops;
v4l2_i2c_subdev_set_name(&ov9282->sd, client,
device_get_match_data(ov9282->dev), NULL);
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index cbaea0495..e9a52a8a9 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -547,8 +547,6 @@ static int ov9640_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return ov9640_s_fmt(sd, mf);
- sd_state->pads->try_fmt = *mf;
-
return 0;
}
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index da1ab5135..66cd0e9dd 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1101,11 +1101,19 @@ static int ov965x_enum_frame_sizes(struct v4l2_subdev *sd,
return 0;
}
-static int ov965x_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int ov965x_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct ov965x *ov965x = to_ov965x(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&ov965x->lock);
fi->interval = ov965x->fiv->interval;
mutex_unlock(&ov965x->lock);
@@ -1148,12 +1156,20 @@ static int __ov965x_set_frame_interval(struct ov965x *ov965x,
return 0;
}
-static int ov965x_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int ov965x_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct ov965x *ov965x = to_ov965x(sd);
int ret;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
v4l2_dbg(1, debug, sd, "Setting %d/%d frame interval\n",
fi->interval.numerator, fi->interval.denominator);
@@ -1172,7 +1188,7 @@ static int ov965x_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mf = v4l2_subdev_state_get_format(sd_state, 0);
fmt->format = *mf;
return 0;
}
@@ -1233,8 +1249,7 @@ static int ov965x_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
if (sd_state) {
- mf = v4l2_subdev_get_try_format(sd, sd_state,
- fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*mf = fmt->format;
}
} else {
@@ -1363,7 +1378,7 @@ static int ov965x_s_stream(struct v4l2_subdev *sd, int on)
static int ov965x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
+ v4l2_subdev_state_get_format(fh->state, 0);
ov965x_get_default_format(mf);
return 0;
@@ -1374,12 +1389,12 @@ static const struct v4l2_subdev_pad_ops ov965x_pad_ops = {
.enum_frame_size = ov965x_enum_frame_sizes,
.get_fmt = ov965x_get_fmt,
.set_fmt = ov965x_set_fmt,
+ .get_frame_interval = ov965x_get_frame_interval,
+ .set_frame_interval = ov965x_set_frame_interval,
};
static const struct v4l2_subdev_video_ops ov965x_video_ops = {
.s_stream = ov965x_s_stream,
- .g_frame_interval = ov965x_g_frame_interval,
- .s_frame_interval = ov965x_s_frame_interval,
};
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index f1377c305..d99728597 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -697,7 +697,7 @@ static int ov9734_set_format(struct v4l2_subdev *sd,
mutex_lock(&ov9734->mutex);
ov9734_update_pad_format(mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
} else {
ov9734->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov9734->link_freq, mode->link_freq_index);
@@ -730,9 +730,8 @@ static int ov9734_get_format(struct v4l2_subdev *sd,
mutex_lock(&ov9734->mutex);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(&ov9734->sd,
- sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
ov9734_update_pad_format(ov9734->cur_mode, &fmt->format);
@@ -777,7 +776,7 @@ static int ov9734_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_lock(&ov9734->mutex);
ov9734_update_pad_format(&supported_modes[0],
- v4l2_subdev_get_try_format(sd, fh->state, 0));
+ v4l2_subdev_state_get_format(fh->state, 0));
mutex_unlock(&ov9734->mutex);
return 0;
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index b430046f9..a59db1015 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1008,10 +1008,8 @@ static int rj54n1_set_fmt(struct v4l2_subdev *sd,
v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align,
&mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *mf;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
- }
/*
* Verify if the sensor has just been powered on. TODO: replace this
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index ed5b10731..af8d01f78 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -819,7 +819,6 @@ static void s5c73m3_oif_try_format(struct s5c73m3 *state,
struct v4l2_subdev_format *fmt,
const struct s5c73m3_frame_size **fs)
{
- struct v4l2_subdev *sd = &state->sensor_sd;
u32 code;
switch (fmt->pad) {
@@ -841,10 +840,8 @@ static void s5c73m3_oif_try_format(struct s5c73m3 *state,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
*fs = state->oif_pix_size[RES_ISP];
else
- *fs = s5c73m3_find_frame_size(
- v4l2_subdev_get_try_format(sd, sd_state,
- OIF_ISP_PAD),
- RES_ISP);
+ *fs = s5c73m3_find_frame_size(v4l2_subdev_state_get_format(sd_state, OIF_ISP_PAD),
+ RES_ISP);
break;
}
@@ -869,11 +866,19 @@ static void s5c73m3_try_format(struct s5c73m3 *state,
s5c73m3_fill_mbus_fmt(&fmt->format, *fs, code);
}
-static int s5c73m3_oif_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int s5c73m3_oif_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad != OIF_SOURCE_PAD)
return -EINVAL;
@@ -918,12 +923,20 @@ static int __s5c73m3_set_frame_interval(struct s5c73m3 *state,
return 0;
}
-static int s5c73m3_oif_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int s5c73m3_oif_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct s5c73m3 *state = oif_sd_to_s5c73m3(sd);
int ret;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad != OIF_SOURCE_PAD)
return -EINVAL;
@@ -990,8 +1003,8 @@ static int s5c73m3_get_fmt(struct v4l2_subdev *sd,
u32 code;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
return 0;
}
@@ -1025,8 +1038,8 @@ static int s5c73m3_oif_get_fmt(struct v4l2_subdev *sd,
u32 code;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
return 0;
}
@@ -1069,7 +1082,7 @@ static int s5c73m3_set_fmt(struct v4l2_subdev *sd,
s5c73m3_try_format(state, sd_state, fmt, &frame_size);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*mf = fmt->format;
} else {
switch (fmt->pad) {
@@ -1108,11 +1121,11 @@ static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
s5c73m3_oif_try_format(state, sd_state, fmt, &frame_size);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*mf = fmt->format;
if (fmt->pad == OIF_ISP_PAD) {
- mf = v4l2_subdev_get_try_format(sd, sd_state,
- OIF_SOURCE_PAD);
+ mf = v4l2_subdev_state_get_format(sd_state,
+ OIF_SOURCE_PAD);
mf->width = fmt->format.width;
mf->height = fmt->format.height;
}
@@ -1260,8 +1273,8 @@ static int s5c73m3_oif_enum_frame_size(struct v4l2_subdev *sd,
if (fse->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, sd_state,
- OIF_ISP_PAD);
+ mf = v4l2_subdev_state_get_format(sd_state,
+ OIF_ISP_PAD);
w = mf->width;
h = mf->height;
@@ -1316,11 +1329,11 @@ static int s5c73m3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->state, S5C73M3_ISP_PAD);
+ mf = v4l2_subdev_state_get_format(fh->state, S5C73M3_ISP_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->state, S5C73M3_JPEG_PAD);
+ mf = v4l2_subdev_state_get_format(fh->state, S5C73M3_JPEG_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
S5C73M3_JPEG_FMT);
@@ -1331,15 +1344,15 @@ static int s5c73m3_oif_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_ISP_PAD);
+ mf = v4l2_subdev_state_get_format(fh->state, OIF_ISP_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_JPEG_PAD);
+ mf = v4l2_subdev_state_get_format(fh->state, OIF_JPEG_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_jpeg_resolutions[1],
S5C73M3_JPEG_FMT);
- mf = v4l2_subdev_get_try_format(sd, fh->state, OIF_SOURCE_PAD);
+ mf = v4l2_subdev_state_get_format(fh->state, OIF_SOURCE_PAD);
s5c73m3_fill_mbus_fmt(mf, &s5c73m3_isp_resolutions[1],
S5C73M3_ISP_FMT);
return 0;
@@ -1500,6 +1513,8 @@ static const struct v4l2_subdev_pad_ops s5c73m3_oif_pad_ops = {
.enum_frame_interval = s5c73m3_oif_enum_frame_interval,
.get_fmt = s5c73m3_oif_get_fmt,
.set_fmt = s5c73m3_oif_set_fmt,
+ .get_frame_interval = s5c73m3_oif_get_frame_interval,
+ .set_frame_interval = s5c73m3_oif_set_frame_interval,
.get_frame_desc = s5c73m3_oif_get_frame_desc,
.set_frame_desc = s5c73m3_oif_set_frame_desc,
};
@@ -1511,8 +1526,6 @@ static const struct v4l2_subdev_core_ops s5c73m3_oif_core_ops = {
static const struct v4l2_subdev_video_ops s5c73m3_oif_video_ops = {
.s_stream = s5c73m3_oif_s_stream,
- .g_frame_interval = s5c73m3_oif_g_frame_interval,
- .s_frame_interval = s5c73m3_oif_s_frame_interval,
};
static const struct v4l2_subdev_ops oif_subdev_ops = {
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 67da2045f..de079d2c9 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -1118,11 +1118,19 @@ out:
return ret;
}
-static int s5k5baf_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int s5k5baf_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct s5k5baf *state = to_s5k5baf(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&state->lock);
fi->interval.numerator = state->fiv;
fi->interval.denominator = 10000;
@@ -1131,8 +1139,8 @@ static int s5k5baf_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static void s5k5baf_set_frame_interval(struct s5k5baf *state,
- struct v4l2_subdev_frame_interval *fi)
+static void __s5k5baf_set_frame_interval(struct s5k5baf *state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct v4l2_fract *i = &fi->interval;
@@ -1155,13 +1163,21 @@ static void s5k5baf_set_frame_interval(struct s5k5baf *state,
state->fiv);
}
-static int s5k5baf_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int s5k5baf_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct s5k5baf *state = to_s5k5baf(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&state->lock);
- s5k5baf_set_frame_interval(state, fi);
+ __s5k5baf_set_frame_interval(state, fi);
mutex_unlock(&state->lock);
return 0;
}
@@ -1273,7 +1289,7 @@ static int s5k5baf_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1307,7 +1323,7 @@ static int s5k5baf_set_fmt(struct v4l2_subdev *sd,
mf->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = *mf;
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = *mf;
return 0;
}
@@ -1379,11 +1395,11 @@ static int s5k5baf_get_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
if (rtype == R_COMPOSE)
- sel->r = *v4l2_subdev_get_try_compose(sd, sd_state,
- sel->pad);
+ sel->r = *v4l2_subdev_state_get_compose(sd_state,
+ sel->pad);
else
- sel->r = *v4l2_subdev_get_try_crop(sd, sd_state,
- sel->pad);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state,
+ sel->pad);
return 0;
}
@@ -1472,14 +1488,11 @@ static int s5k5baf_set_selection(struct v4l2_subdev *sd,
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
rects = (struct v4l2_rect * []) {
- &s5k5baf_cis_rect,
- v4l2_subdev_get_try_crop(sd, sd_state,
- PAD_CIS),
- v4l2_subdev_get_try_compose(sd, sd_state,
- PAD_CIS),
- v4l2_subdev_get_try_crop(sd, sd_state,
- PAD_OUT)
- };
+ &s5k5baf_cis_rect,
+ v4l2_subdev_state_get_crop(sd_state, PAD_CIS),
+ v4l2_subdev_state_get_compose(sd_state, PAD_CIS),
+ v4l2_subdev_state_get_crop(sd_state, PAD_OUT)
+ };
s5k5baf_set_rect_and_adjust(rects, rtype, &sel->r);
return 0;
}
@@ -1529,11 +1542,11 @@ static const struct v4l2_subdev_pad_ops s5k5baf_pad_ops = {
.set_fmt = s5k5baf_set_fmt,
.get_selection = s5k5baf_get_selection,
.set_selection = s5k5baf_set_selection,
+ .get_frame_interval = s5k5baf_get_frame_interval,
+ .set_frame_interval = s5k5baf_set_frame_interval,
};
static const struct v4l2_subdev_video_ops s5k5baf_video_ops = {
- .g_frame_interval = s5k5baf_g_frame_interval,
- .s_frame_interval = s5k5baf_s_frame_interval,
.s_stream = s5k5baf_s_stream,
};
@@ -1696,22 +1709,22 @@ static int s5k5baf_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, fh->state, PAD_CIS);
+ mf = v4l2_subdev_state_get_format(fh->state, PAD_CIS);
s5k5baf_try_cis_format(mf);
if (s5k5baf_is_cis_subdev(sd))
return 0;
- mf = v4l2_subdev_get_try_format(sd, fh->state, PAD_OUT);
+ mf = v4l2_subdev_state_get_format(fh->state, PAD_OUT);
mf->colorspace = s5k5baf_formats[0].colorspace;
mf->code = s5k5baf_formats[0].code;
mf->width = s5k5baf_cis_rect.width;
mf->height = s5k5baf_cis_rect.height;
mf->field = V4L2_FIELD_NONE;
- *v4l2_subdev_get_try_crop(sd, fh->state, PAD_CIS) = s5k5baf_cis_rect;
- *v4l2_subdev_get_try_compose(sd, fh->state, PAD_CIS) = s5k5baf_cis_rect;
- *v4l2_subdev_get_try_crop(sd, fh->state, PAD_OUT) = s5k5baf_cis_rect;
+ *v4l2_subdev_state_get_crop(fh->state, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_state_get_compose(fh->state, PAD_CIS) = s5k5baf_cis_rect;
+ *v4l2_subdev_state_get_crop(fh->state, PAD_OUT) = s5k5baf_cis_rect;
return 0;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index b3560c8f8..0c2674115 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -127,8 +127,7 @@ static struct v4l2_mbus_framefmt *__s5k6a3_get_format(
u32 pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return sd_state ? v4l2_subdev_get_try_format(&sensor->subdev,
- sd_state, pad) : NULL;
+ return sd_state ? v4l2_subdev_state_get_format(sd_state, pad) : NULL;
return &sensor->format;
}
@@ -174,9 +173,8 @@ static const struct v4l2_subdev_pad_ops s5k6a3_pad_ops = {
static int s5k6a3_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd,
- fh->state,
- 0);
+ struct v4l2_mbus_framefmt *format = v4l2_subdev_state_get_format(fh->state,
+ 0);
*format = s5k6a3_formats[0];
format->width = S5K6A3_DEFAULT_WIDTH;
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index c106e7a7d..897eaa669 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -594,10 +594,8 @@ static int saa6752hs_set_fmt(struct v4l2_subdev *sd,
f->field = V4L2_FIELD_INTERLACED;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *f;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
- }
/*
FIXME: translate and round width/height into EMPRESS
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index dab147871..f25064072 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -16,25 +16,27 @@
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
+#include <media/mipi-csi2.h>
#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define MIPID02_CLK_LANE_WR_REG1 0x01
-#define MIPID02_CLK_LANE_REG1 0x02
-#define MIPID02_CLK_LANE_REG3 0x04
-#define MIPID02_DATA_LANE0_REG1 0x05
-#define MIPID02_DATA_LANE0_REG2 0x06
-#define MIPID02_DATA_LANE1_REG1 0x09
-#define MIPID02_DATA_LANE1_REG2 0x0a
-#define MIPID02_MODE_REG1 0x14
-#define MIPID02_MODE_REG2 0x15
-#define MIPID02_DATA_ID_RREG 0x17
-#define MIPID02_DATA_SELECTION_CTRL 0x19
-#define MIPID02_PIX_WIDTH_CTRL 0x1e
-#define MIPID02_PIX_WIDTH_CTRL_EMB 0x1f
+#define MIPID02_CLK_LANE_WR_REG1 CCI_REG8(0x01)
+#define MIPID02_CLK_LANE_REG1 CCI_REG8(0x02)
+#define MIPID02_CLK_LANE_REG3 CCI_REG8(0x04)
+#define MIPID02_DATA_LANE0_REG1 CCI_REG8(0x05)
+#define MIPID02_DATA_LANE0_REG2 CCI_REG8(0x06)
+#define MIPID02_DATA_LANE1_REG1 CCI_REG8(0x09)
+#define MIPID02_DATA_LANE1_REG2 CCI_REG8(0x0a)
+#define MIPID02_MODE_REG1 CCI_REG8(0x14)
+#define MIPID02_MODE_REG2 CCI_REG8(0x15)
+#define MIPID02_DATA_ID_RREG CCI_REG8(0x17)
+#define MIPID02_DATA_SELECTION_CTRL CCI_REG8(0x19)
+#define MIPID02_PIX_WIDTH_CTRL CCI_REG8(0x1e)
+#define MIPID02_PIX_WIDTH_CTRL_EMB CCI_REG8(0x1f)
/* Bits definition for MIPID02_CLK_LANE_REG1 */
#define CLK_ENABLE BIT(0)
@@ -68,7 +70,7 @@ static const u32 mipid02_supported_fmt_codes[] = {
MEDIA_BUS_FMT_RGB565_2X8_LE, MEDIA_BUS_FMT_RGB565_2X8_BE,
MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_VYUY8_2X8,
- MEDIA_BUS_FMT_JPEG_1X8
+ MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_JPEG_1X8
};
/* regulator supplies */
@@ -88,12 +90,12 @@ struct mipid02_dev {
struct i2c_client *i2c_client;
struct regulator_bulk_data supplies[MIPID02_NUM_SUPPLIES];
struct v4l2_subdev sd;
+ struct regmap *regmap;
struct media_pad pad[MIPID02_PAD_NB];
struct clk *xclk;
struct gpio_desc *reset_gpio;
/* endpoints info */
struct v4l2_fwnode_endpoint rx;
- u64 link_frequency;
struct v4l2_fwnode_endpoint tx;
/* remote source */
struct v4l2_async_notifier notifier;
@@ -110,10 +112,6 @@ struct mipid02_dev {
u8 pix_width_ctrl;
u8 pix_width_ctrl_emb;
} r;
- /* lock to protect all members below */
- struct mutex lock;
- bool streaming;
- struct v4l2_mbus_framefmt fmt;
};
static int bpp_from_code(__u32 code)
@@ -123,6 +121,7 @@ static int bpp_from_code(__u32 code)
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
return 8;
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
@@ -160,17 +159,18 @@ static u8 data_type_from_code(__u32 code)
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
- return 0x2a;
+ case MEDIA_BUS_FMT_Y8_1X8:
+ return MIPI_CSI2_DT_RAW8;
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10:
- return 0x2b;
+ return MIPI_CSI2_DT_RAW10;
case MEDIA_BUS_FMT_SBGGR12_1X12:
case MEDIA_BUS_FMT_SGBRG12_1X12:
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
- return 0x2c;
+ return MIPI_CSI2_DT_RAW12;
case MEDIA_BUS_FMT_YUYV8_1X16:
case MEDIA_BUS_FMT_YVYU8_1X16:
case MEDIA_BUS_FMT_UYVY8_1X16:
@@ -179,30 +179,18 @@ static u8 data_type_from_code(__u32 code)
case MEDIA_BUS_FMT_YVYU8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
case MEDIA_BUS_FMT_VYUY8_2X8:
- return 0x1e;
+ return MIPI_CSI2_DT_YUV422_8B;
case MEDIA_BUS_FMT_BGR888_1X24:
- return 0x24;
+ return MIPI_CSI2_DT_RGB888;
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
case MEDIA_BUS_FMT_RGB565_2X8_BE:
- return 0x22;
+ return MIPI_CSI2_DT_RGB565;
default:
return 0;
}
}
-static void init_format(struct v4l2_mbus_framefmt *fmt)
-{
- fmt->code = MEDIA_BUS_FMT_SBGGR8_1X8;
- fmt->field = V4L2_FIELD_NONE;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(V4L2_COLORSPACE_SRGB);
- fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
- fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(V4L2_COLORSPACE_SRGB);
- fmt->width = 640;
- fmt->height = 480;
-}
-
static __u32 get_fmt_code(__u32 code)
{
unsigned int i;
@@ -238,62 +226,6 @@ static inline struct mipid02_dev *to_mipid02_dev(struct v4l2_subdev *sd)
return container_of(sd, struct mipid02_dev, sd);
}
-static int mipid02_read_reg(struct mipid02_dev *bridge, u16 reg, u8 *val)
-{
- struct i2c_client *client = bridge->i2c_client;
- struct i2c_msg msg[2];
- u8 buf[2];
- int ret;
-
- buf[0] = reg >> 8;
- buf[1] = reg & 0xff;
-
- msg[0].addr = client->addr;
- msg[0].flags = client->flags;
- msg[0].buf = buf;
- msg[0].len = sizeof(buf);
-
- msg[1].addr = client->addr;
- msg[1].flags = client->flags | I2C_M_RD;
- msg[1].buf = val;
- msg[1].len = 1;
-
- ret = i2c_transfer(client->adapter, msg, 2);
- if (ret < 0) {
- dev_dbg(&client->dev, "%s: %x i2c_transfer, reg: %x => %d\n",
- __func__, client->addr, reg, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int mipid02_write_reg(struct mipid02_dev *bridge, u16 reg, u8 val)
-{
- struct i2c_client *client = bridge->i2c_client;
- struct i2c_msg msg;
- u8 buf[3];
- int ret;
-
- buf[0] = reg >> 8;
- buf[1] = reg & 0xff;
- buf[2] = val;
-
- msg.addr = client->addr;
- msg.flags = client->flags;
- msg.buf = buf;
- msg.len = sizeof(buf);
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0) {
- dev_dbg(&client->dev, "%s: i2c_transfer, reg: %x => %d\n",
- __func__, reg, ret);
- return ret;
- }
-
- return 0;
-}
-
static int mipid02_get_regulators(struct mipid02_dev *bridge)
{
unsigned int i;
@@ -358,73 +290,44 @@ static void mipid02_set_power_off(struct mipid02_dev *bridge)
static int mipid02_detect(struct mipid02_dev *bridge)
{
- u8 reg;
+ u64 reg;
/*
* There is no version registers. Just try to read register
* MIPID02_CLK_LANE_WR_REG1.
*/
- return mipid02_read_reg(bridge, MIPID02_CLK_LANE_WR_REG1, &reg);
-}
-
-static u32 mipid02_get_link_freq_from_cid_link_freq(struct mipid02_dev *bridge,
- struct v4l2_subdev *subdev)
-{
- struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
- struct v4l2_ctrl *ctrl;
- int ret;
-
- ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_LINK_FREQ);
- if (!ctrl)
- return 0;
- qm.index = v4l2_ctrl_g_ctrl(ctrl);
-
- ret = v4l2_querymenu(subdev->ctrl_handler, &qm);
- if (ret)
- return 0;
-
- return qm.value;
-}
-
-static u32 mipid02_get_link_freq_from_cid_pixel_rate(struct mipid02_dev *bridge,
- struct v4l2_subdev *subdev)
-{
- struct v4l2_fwnode_endpoint *ep = &bridge->rx;
- struct v4l2_ctrl *ctrl;
- u32 pixel_clock;
- u32 bpp = bpp_from_code(bridge->fmt.code);
-
- ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
- if (!ctrl)
- return 0;
- pixel_clock = v4l2_ctrl_g_ctrl_int64(ctrl);
-
- return pixel_clock * bpp / (2 * ep->bus.mipi_csi2.num_data_lanes);
+ return cci_read(bridge->regmap, MIPID02_CLK_LANE_WR_REG1, &reg, NULL);
}
/*
* We need to know link frequency to setup clk_lane_reg1 timings. Link frequency
- * will be computed using connected device V4L2_CID_PIXEL_RATE, bit per pixel
+ * will be retrieve from connected device via v4l2_get_link_freq, bit per pixel
* and number of lanes.
*/
-static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge)
+static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge,
+ struct v4l2_mbus_framefmt *fmt)
{
struct i2c_client *client = bridge->i2c_client;
struct v4l2_subdev *subdev = bridge->s_subdev;
- u32 link_freq;
-
- link_freq = mipid02_get_link_freq_from_cid_link_freq(bridge, subdev);
- if (!link_freq) {
- link_freq = mipid02_get_link_freq_from_cid_pixel_rate(bridge,
- subdev);
- if (!link_freq) {
- dev_err(&client->dev, "Failed to get link frequency");
- return -EINVAL;
- }
+ struct v4l2_fwnode_endpoint *ep = &bridge->rx;
+ u32 bpp = bpp_from_code(fmt->code);
+ /*
+ * clk_lane_reg1 requires 4 times the unit interval time, and bitrate
+ * is twice the link frequency, hence ui_4 = 1000000000 * 4 / 2
+ */
+ u64 ui_4 = 2000000000;
+ s64 link_freq;
+
+ link_freq = v4l2_get_link_freq(subdev->ctrl_handler, bpp,
+ 2 * ep->bus.mipi_csi2.num_data_lanes);
+ if (link_freq < 0) {
+ dev_err(&client->dev, "Failed to get link frequency");
+ return -EINVAL;
}
- dev_dbg(&client->dev, "detect link_freq = %d Hz", link_freq);
- bridge->r.clk_lane_reg1 |= (2000000000 / link_freq) << 2;
+ dev_dbg(&client->dev, "detect link_freq = %lld Hz", link_freq);
+ do_div(ui_4, link_freq);
+ bridge->r.clk_lane_reg1 |= ui_4 << 2;
return 0;
}
@@ -479,7 +382,8 @@ static int mipid02_configure_data1_lane(struct mipid02_dev *bridge, int nb,
return 0;
}
-static int mipid02_configure_from_rx(struct mipid02_dev *bridge)
+static int mipid02_configure_from_rx(struct mipid02_dev *bridge,
+ struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_fwnode_endpoint *ep = &bridge->rx;
bool are_lanes_swap = ep->bus.mipi_csi2.data_lanes[0] == 2;
@@ -504,7 +408,7 @@ static int mipid02_configure_from_rx(struct mipid02_dev *bridge)
bridge->r.mode_reg1 |= are_lanes_swap ? MODE_DATA_SWAP : 0;
bridge->r.mode_reg1 |= (nb - 1) << 1;
- return mipid02_configure_from_rx_speed(bridge);
+ return mipid02_configure_from_rx_speed(bridge, fmt);
}
static int mipid02_configure_from_tx(struct mipid02_dev *bridge)
@@ -524,16 +428,17 @@ static int mipid02_configure_from_tx(struct mipid02_dev *bridge)
return 0;
}
-static int mipid02_configure_from_code(struct mipid02_dev *bridge)
+static int mipid02_configure_from_code(struct mipid02_dev *bridge,
+ struct v4l2_mbus_framefmt *fmt)
{
u8 data_type;
bridge->r.data_id_rreg = 0;
- if (bridge->fmt.code != MEDIA_BUS_FMT_JPEG_1X8) {
+ if (fmt->code != MEDIA_BUS_FMT_JPEG_1X8) {
bridge->r.data_selection_ctrl |= SELECTION_MANUAL_DATA;
- data_type = data_type_from_code(bridge->fmt.code);
+ data_type = data_type_from_code(fmt->code);
if (!data_type)
return -EINVAL;
bridge->r.data_id_rreg = data_type;
@@ -555,13 +460,9 @@ static int mipid02_stream_disable(struct mipid02_dev *bridge)
goto error;
/* Disable all lanes */
- ret = mipid02_write_reg(bridge, MIPID02_CLK_LANE_REG1, 0);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE0_REG1, 0);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE1_REG1, 0);
+ cci_write(bridge->regmap, MIPID02_CLK_LANE_REG1, 0, &ret);
+ cci_write(bridge->regmap, MIPID02_DATA_LANE0_REG1, 0, &ret);
+ cci_write(bridge->regmap, MIPID02_DATA_LANE1_REG1, 0, &ret);
if (ret)
goto error;
error:
@@ -574,69 +475,52 @@ error:
static int mipid02_stream_enable(struct mipid02_dev *bridge)
{
struct i2c_client *client = bridge->i2c_client;
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
int ret = -EINVAL;
if (!bridge->s_subdev)
goto error;
memset(&bridge->r, 0, sizeof(bridge->r));
+
+ state = v4l2_subdev_lock_and_get_active_state(&bridge->sd);
+ fmt = v4l2_subdev_state_get_format(state, MIPID02_SINK_0);
+
/* build registers content */
- ret = mipid02_configure_from_rx(bridge);
+ ret = mipid02_configure_from_rx(bridge, fmt);
if (ret)
goto error;
ret = mipid02_configure_from_tx(bridge);
if (ret)
goto error;
- ret = mipid02_configure_from_code(bridge);
+ ret = mipid02_configure_from_code(bridge, fmt);
if (ret)
goto error;
+ v4l2_subdev_unlock_state(state);
+
/* write mipi registers */
- ret = mipid02_write_reg(bridge, MIPID02_CLK_LANE_REG1,
- bridge->r.clk_lane_reg1);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_CLK_LANE_REG3, CLK_MIPI_CSI);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE0_REG1,
- bridge->r.data_lane0_reg1);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE0_REG2,
- DATA_MIPI_CSI);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE1_REG1,
- bridge->r.data_lane1_reg1);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_DATA_LANE1_REG2,
- DATA_MIPI_CSI);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_MODE_REG1,
- MODE_NO_BYPASS | bridge->r.mode_reg1);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_MODE_REG2,
- bridge->r.mode_reg2);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_DATA_ID_RREG,
- bridge->r.data_id_rreg);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_DATA_SELECTION_CTRL,
- bridge->r.data_selection_ctrl);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_PIX_WIDTH_CTRL,
- bridge->r.pix_width_ctrl);
- if (ret)
- goto error;
- ret = mipid02_write_reg(bridge, MIPID02_PIX_WIDTH_CTRL_EMB,
- bridge->r.pix_width_ctrl_emb);
+ cci_write(bridge->regmap, MIPID02_CLK_LANE_REG1,
+ bridge->r.clk_lane_reg1, &ret);
+ cci_write(bridge->regmap, MIPID02_CLK_LANE_REG3, CLK_MIPI_CSI, &ret);
+ cci_write(bridge->regmap, MIPID02_DATA_LANE0_REG1,
+ bridge->r.data_lane0_reg1, &ret);
+ cci_write(bridge->regmap, MIPID02_DATA_LANE0_REG2, DATA_MIPI_CSI, &ret);
+ cci_write(bridge->regmap, MIPID02_DATA_LANE1_REG1,
+ bridge->r.data_lane1_reg1, &ret);
+ cci_write(bridge->regmap, MIPID02_DATA_LANE1_REG2, DATA_MIPI_CSI, &ret);
+ cci_write(bridge->regmap, MIPID02_MODE_REG1,
+ MODE_NO_BYPASS | bridge->r.mode_reg1, &ret);
+ cci_write(bridge->regmap, MIPID02_MODE_REG2, bridge->r.mode_reg2, &ret);
+ cci_write(bridge->regmap, MIPID02_DATA_ID_RREG, bridge->r.data_id_rreg,
+ &ret);
+ cci_write(bridge->regmap, MIPID02_DATA_SELECTION_CTRL,
+ bridge->r.data_selection_ctrl, &ret);
+ cci_write(bridge->regmap, MIPID02_PIX_WIDTH_CTRL,
+ bridge->r.pix_width_ctrl, &ret);
+ cci_write(bridge->regmap, MIPID02_PIX_WIDTH_CTRL_EMB,
+ bridge->r.pix_width_ctrl_emb, &ret);
if (ret)
goto error;
@@ -659,31 +543,43 @@ static int mipid02_s_stream(struct v4l2_subdev *sd, int enable)
struct i2c_client *client = bridge->i2c_client;
int ret = 0;
- dev_dbg(&client->dev, "%s : requested %d / current = %d", __func__,
- enable, bridge->streaming);
- mutex_lock(&bridge->lock);
-
- if (bridge->streaming == enable)
- goto out;
+ dev_dbg(&client->dev, "%s : requested %d\n", __func__, enable);
ret = enable ? mipid02_stream_enable(bridge) :
mipid02_stream_disable(bridge);
- if (!ret)
- bridge->streaming = enable;
-
-out:
- dev_dbg(&client->dev, "%s current now = %d / %d", __func__,
- bridge->streaming, ret);
- mutex_unlock(&bridge->lock);
+ if (ret)
+ dev_err(&client->dev, "failed to stream %s (%d)\n",
+ enable ? "enable" : "disable", ret);
return ret;
}
+static const struct v4l2_mbus_framefmt default_fmt = {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_FULL_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+ .width = 640,
+ .height = 480,
+};
+
+static int mipid02_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ *v4l2_subdev_state_get_format(state, MIPID02_SINK_0) = default_fmt;
+ /* MIPID02_SINK_1 isn't supported yet */
+ *v4l2_subdev_state_get_format(state, MIPID02_SOURCE) = default_fmt;
+
+ return 0;
+}
+
static int mipid02_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- struct mipid02_dev *bridge = to_mipid02_dev(sd);
+ struct v4l2_mbus_framefmt *sink_fmt;
int ret = 0;
switch (code->pad) {
@@ -694,10 +590,13 @@ static int mipid02_enum_mbus_code(struct v4l2_subdev *sd,
code->code = mipid02_supported_fmt_codes[code->index];
break;
case MIPID02_SOURCE:
- if (code->index == 0)
- code->code = serial_to_parallel_code(bridge->fmt.code);
- else
+ if (code->index == 0) {
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ MIPID02_SINK_0);
+ code->code = serial_to_parallel_code(sink_fmt->code);
+ } else {
ret = -EINVAL;
+ }
break;
default:
ret = -EINVAL;
@@ -706,122 +605,38 @@ static int mipid02_enum_mbus_code(struct v4l2_subdev *sd,
return ret;
}
-static int mipid02_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
- struct mipid02_dev *bridge = to_mipid02_dev(sd);
- struct i2c_client *client = bridge->i2c_client;
- struct v4l2_mbus_framefmt *fmt;
-
- dev_dbg(&client->dev, "%s probe %d", __func__, format->pad);
-
- if (format->pad >= MIPID02_PAD_NB)
- return -EINVAL;
- /* second CSI-2 pad not yet supported */
- if (format->pad == MIPID02_SINK_1)
- return -EINVAL;
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(&bridge->sd, sd_state,
- format->pad);
- else
- fmt = &bridge->fmt;
-
- mutex_lock(&bridge->lock);
-
- *mbus_fmt = *fmt;
- /* code may need to be converted for source */
- if (format->pad == MIPID02_SOURCE)
- mbus_fmt->code = serial_to_parallel_code(mbus_fmt->code);
-
- mutex_unlock(&bridge->lock);
-
- return 0;
-}
-
-static void mipid02_set_fmt_source(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct mipid02_dev *bridge = to_mipid02_dev(sd);
-
- /* source pad mirror sink pad */
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- format->format = bridge->fmt;
- else
- format->format = *v4l2_subdev_get_try_format(sd, sd_state,
- MIPID02_SINK_0);
-
- /* but code may need to be converted */
- format->format.code = serial_to_parallel_code(format->format.code);
-
- /* only apply format for V4L2_SUBDEV_FORMAT_TRY case */
- if (format->which != V4L2_SUBDEV_FORMAT_TRY)
- return;
-
- *v4l2_subdev_get_try_format(sd, sd_state, MIPID02_SOURCE) =
- format->format;
-}
-
-static void mipid02_set_fmt_sink(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct mipid02_dev *bridge = to_mipid02_dev(sd);
- struct v4l2_subdev_format source_fmt;
- struct v4l2_mbus_framefmt *fmt;
-
- format->format.code = get_fmt_code(format->format.code);
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
- else
- fmt = &bridge->fmt;
-
- *fmt = format->format;
-
- /*
- * Propagate the format change to the source pad, taking
- * care not to update the format pointer given back to user
- */
- source_fmt = *format;
- mipid02_set_fmt_source(sd, sd_state, &source_fmt);
-}
-
static int mipid02_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
+ struct v4l2_subdev_format *fmt)
{
struct mipid02_dev *bridge = to_mipid02_dev(sd);
struct i2c_client *client = bridge->i2c_client;
- int ret = 0;
+ struct v4l2_mbus_framefmt *pad_fmt;
- dev_dbg(&client->dev, "%s for %d", __func__, format->pad);
+ dev_dbg(&client->dev, "%s for %d", __func__, fmt->pad);
- if (format->pad >= MIPID02_PAD_NB)
- return -EINVAL;
/* second CSI-2 pad not yet supported */
- if (format->pad == MIPID02_SINK_1)
+ if (fmt->pad == MIPID02_SINK_1)
return -EINVAL;
- mutex_lock(&bridge->lock);
+ pad_fmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
+ fmt->format.code = get_fmt_code(fmt->format.code);
- if (bridge->streaming) {
- ret = -EBUSY;
- goto error;
- }
+ /* code may need to be converted */
+ if (fmt->pad == MIPID02_SOURCE)
+ fmt->format.code = serial_to_parallel_code(fmt->format.code);
- if (format->pad == MIPID02_SOURCE)
- mipid02_set_fmt_source(sd, sd_state, format);
- else
- mipid02_set_fmt_sink(sd, sd_state, format);
+ *pad_fmt = fmt->format;
-error:
- mutex_unlock(&bridge->lock);
+ /* Propagate the format to the source pad in case of sink pad update */
+ if (fmt->pad == MIPID02_SINK_0) {
+ pad_fmt = v4l2_subdev_state_get_format(sd_state,
+ MIPID02_SOURCE);
+ *pad_fmt = fmt->format;
+ pad_fmt->code = serial_to_parallel_code(fmt->format.code);
+ }
- return ret;
+ return 0;
}
static const struct v4l2_subdev_video_ops mipid02_video_ops = {
@@ -830,7 +645,7 @@ static const struct v4l2_subdev_video_ops mipid02_video_ops = {
static const struct v4l2_subdev_pad_ops mipid02_pad_ops = {
.enum_mbus_code = mipid02_enum_mbus_code,
- .get_fmt = mipid02_get_fmt,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = mipid02_set_fmt,
};
@@ -839,6 +654,10 @@ static const struct v4l2_subdev_ops mipid02_subdev_ops = {
.pad = &mipid02_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mipid02_subdev_internal_ops = {
+ .init_state = mipid02_init_state,
+};
+
static const struct media_entity_operations mipid02_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -998,8 +817,6 @@ static int mipid02_probe(struct i2c_client *client)
if (!bridge)
return -ENOMEM;
- init_format(&bridge->fmt);
-
bridge->i2c_client = client;
v4l2_i2c_subdev_init(&bridge->sd, client, &mipid02_subdev_ops);
@@ -1031,9 +848,15 @@ static int mipid02_probe(struct i2c_client *client)
return ret;
}
- mutex_init(&bridge->lock);
+ /* Initialise the regmap for further cci access */
+ bridge->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(bridge->regmap))
+ return dev_err_probe(dev, PTR_ERR(bridge->regmap),
+ "failed to get cci regmap\n");
+
bridge->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
bridge->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ bridge->sd.internal_ops = &mipid02_subdev_internal_ops;
bridge->sd.entity.ops = &mipid02_subdev_entity_ops;
bridge->pad[0].flags = MEDIA_PAD_FL_SINK;
bridge->pad[1].flags = MEDIA_PAD_FL_SINK;
@@ -1042,7 +865,13 @@ static int mipid02_probe(struct i2c_client *client)
bridge->pad);
if (ret) {
dev_err(&client->dev, "pads init failed %d", ret);
- goto mutex_cleanup;
+ return ret;
+ }
+
+ ret = v4l2_subdev_init_finalize(&bridge->sd);
+ if (ret < 0) {
+ dev_err(dev, "subdev init error: %d\n", ret);
+ goto entity_cleanup;
}
/* enable clock, power and reset device if available */
@@ -1086,8 +915,6 @@ power_off:
mipid02_set_power_off(bridge);
entity_cleanup:
media_entity_cleanup(&bridge->sd.entity);
-mutex_cleanup:
- mutex_destroy(&bridge->lock);
return ret;
}
@@ -1102,7 +929,6 @@ static void mipid02_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&bridge->sd);
mipid02_set_power_off(bridge);
media_entity_cleanup(&bridge->sd.entity);
- mutex_destroy(&bridge->lock);
}
static const struct of_device_id mipid02_dt_ids[] = {
diff --git a/drivers/media/i2c/st-vgxy61.c b/drivers/media/i2c/st-vgxy61.c
index 5dbfb04b3..e4d37a197 100644
--- a/drivers/media/i2c/st-vgxy61.c
+++ b/drivers/media/i2c/st-vgxy61.c
@@ -21,6 +21,7 @@
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
@@ -780,8 +781,7 @@ static int vgxy61_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&sensor->lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
- format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
else
fmt = &sensor->fmt;
@@ -1289,7 +1289,7 @@ static int vgxy61_set_fmt(struct v4l2_subdev *sd,
goto out;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
*fmt = format->format;
} else if (sensor->current_mode != new_mode ||
sensor->fmt.code != format->format.code) {
@@ -1323,8 +1323,8 @@ out:
return ret;
}
-static int vgxy61_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int vgxy61_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct vgxy61_dev *sensor = to_vgxy61_dev(sd);
struct v4l2_subdev_format fmt = { 0 };
@@ -1403,6 +1403,7 @@ static int vgxy61_init_controls(struct vgxy61_dev *sensor)
const struct v4l2_ctrl_ops *ops = &vgxy61_ctrl_ops;
struct v4l2_ctrl_handler *hdl = &sensor->ctrl_handler;
const struct vgxy61_mode_info *cur_mode = sensor->current_mode;
+ struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl *ctrl;
int ret;
@@ -1457,6 +1458,14 @@ static int vgxy61_init_controls(struct vgxy61_dev *sensor)
goto free_ctrls;
}
+ ret = v4l2_fwnode_device_parse(&sensor->i2c_client->dev, &props);
+ if (ret)
+ goto free_ctrls;
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, ops, &props);
+ if (ret)
+ goto free_ctrls;
+
sensor->sd.ctrl_handler = hdl;
return 0;
@@ -1465,12 +1474,16 @@ free_ctrls:
return ret;
}
+static const struct v4l2_subdev_core_ops vgxy61_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops vgxy61_video_ops = {
.s_stream = vgxy61_s_stream,
};
static const struct v4l2_subdev_pad_ops vgxy61_pad_ops = {
- .init_cfg = vgxy61_init_cfg,
.enum_mbus_code = vgxy61_enum_mbus_code,
.get_fmt = vgxy61_get_fmt,
.set_fmt = vgxy61_set_fmt,
@@ -1479,10 +1492,15 @@ static const struct v4l2_subdev_pad_ops vgxy61_pad_ops = {
};
static const struct v4l2_subdev_ops vgxy61_subdev_ops = {
+ .core = &vgxy61_core_ops,
.video = &vgxy61_video_ops,
.pad = &vgxy61_pad_ops,
};
+static const struct v4l2_subdev_internal_ops vgxy61_internal_ops = {
+ .init_state = vgxy61_init_state,
+};
+
static const struct media_entity_operations vgxy61_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -1843,7 +1861,9 @@ static int vgxy61_probe(struct i2c_client *client)
device_property_read_bool(dev, "st,strobe-gpios-polarity");
v4l2_i2c_subdev_init(&sensor->sd, client, &vgxy61_subdev_ops);
- sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.internal_ops = &vgxy61_internal_ops;
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sensor->sd.entity.ops = &vgxy61_subdev_entity_ops;
sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index ce612a47b..106de4271 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -427,7 +427,7 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746)
sink_state = v4l2_subdev_lock_and_get_active_state(sd);
- mbusfmt = v4l2_subdev_get_pad_format(sd, sink_state, TC358746_SINK);
+ mbusfmt = v4l2_subdev_state_get_format(sink_state, TC358746_SINK);
fmt = tc358746_get_format_by_code(TC358746_SINK, mbusfmt->code);
/* Self defined CSI user data type id's are not supported yet */
@@ -740,15 +740,15 @@ err_out:
return v4l2_subdev_call(src, video, s_stream, 0);
}
-static int tc358746_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int tc358746_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_pad_format(sd, state, TC358746_SINK);
+ fmt = v4l2_subdev_state_get_format(state, TC358746_SINK);
*fmt = tc358746_def_fmt;
- fmt = v4l2_subdev_get_pad_format(sd, state, TC358746_SOURCE);
+ fmt = v4l2_subdev_state_get_format(state, TC358746_SOURCE);
*fmt = tc358746_def_fmt;
fmt->code = tc358746_src_mbus_code(tc358746_def_fmt.code);
@@ -781,7 +781,7 @@ static int tc358746_set_fmt(struct v4l2_subdev *sd,
if (format->pad == TC358746_SOURCE)
return v4l2_subdev_get_fmt(sd, sd_state, format);
- sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, TC358746_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, TC358746_SINK);
fmt = tc358746_get_format_by_code(format->pad, format->format.code);
if (IS_ERR(fmt)) {
@@ -800,7 +800,7 @@ static int tc358746_set_fmt(struct v4l2_subdev *sd,
*sink_fmt = format->format;
- src_fmt = v4l2_subdev_get_pad_format(sd, sd_state, TC358746_SOURCE);
+ src_fmt = v4l2_subdev_state_get_format(sd_state, TC358746_SOURCE);
*src_fmt = *sink_fmt;
src_fmt->code = tc358746_src_mbus_code(sink_fmt->code);
@@ -905,7 +905,7 @@ tc358746_link_validate(struct v4l2_subdev *sd, struct media_link *link,
return err;
sink_state = v4l2_subdev_lock_and_get_active_state(sd);
- mbusfmt = v4l2_subdev_get_pad_format(sd, sink_state, TC358746_SINK);
+ mbusfmt = v4l2_subdev_state_get_format(sink_state, TC358746_SINK);
/* Check the FIFO settings */
fmt = tc358746_get_format_by_code(TC358746_SINK, mbusfmt->code);
@@ -1038,7 +1038,6 @@ static const struct v4l2_subdev_video_ops tc358746_video_ops = {
};
static const struct v4l2_subdev_pad_ops tc358746_pad_ops = {
- .init_cfg = tc358746_init_cfg,
.enum_mbus_code = tc358746_enum_mbus_code,
.set_fmt = tc358746_set_fmt,
.get_fmt = v4l2_subdev_get_fmt,
@@ -1052,6 +1051,10 @@ static const struct v4l2_subdev_ops tc358746_ops = {
.pad = &tc358746_pad_ops,
};
+static const struct v4l2_subdev_internal_ops tc358746_internal_ops = {
+ .init_state = tc358746_init_state,
+};
+
static const struct media_entity_operations tc358746_entity_ops = {
.get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
.link_validate = v4l2_subdev_link_validate,
@@ -1282,6 +1285,7 @@ tc358746_init_subdev(struct tc358746 *tc358746, struct i2c_client *client)
int err;
v4l2_i2c_subdev_init(sd, client, &tc358746_ops);
+ sd->internal_ops = &tc358746_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
sd->entity.ops = &tc358746_entity_ops;
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 325e99125..1ea703a99 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1734,13 +1734,13 @@ static const struct v4l2_subdev_video_ops tda1997x_video_ops = {
* v4l2_subdev_pad_ops
*/
-static int tda1997x_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int tda1997x_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct tda1997x_state *state = to_state(sd);
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mf = v4l2_subdev_state_get_format(sd_state, 0);
mf->code = state->mbus_codes[0];
return 0;
@@ -1792,7 +1792,7 @@ static int tda1997x_get_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
format->format.code = fmt->code;
} else
format->format.code = state->mbus_code;
@@ -1826,7 +1826,7 @@ static int tda1997x_set_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, format->pad);
*fmt = format->format;
} else {
int ret = tda1997x_setup_format(state, format->format.code);
@@ -1925,7 +1925,6 @@ static int tda1997x_enum_dv_timings(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops tda1997x_pad_ops = {
- .init_cfg = tda1997x_init_cfg,
.enum_mbus_code = tda1997x_enum_mbus_code,
.get_fmt = tda1997x_get_format,
.set_fmt = tda1997x_set_format,
@@ -2047,6 +2046,10 @@ static const struct v4l2_subdev_ops tda1997x_subdev_ops = {
.pad = &tda1997x_pad_ops,
};
+static const struct v4l2_subdev_internal_ops tda1997x_internal_ops = {
+ .init_state = tda1997x_init_state,
+};
+
/* -----------------------------------------------------------------------------
* v4l2_controls
*/
@@ -2588,6 +2591,7 @@ static int tda1997x_probe(struct i2c_client *client)
/* initialize subdev */
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &tda1997x_subdev_ops);
+ sd->internal_ops = &tda1997x_internal_ops;
snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
id->name, i2c_adapter_id(client->adapter),
client->addr);
diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c
new file mode 100644
index 000000000..280688751
--- /dev/null
+++ b/drivers/media/i2c/thp7312.c
@@ -0,0 +1,2256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 THine Electronics, Inc.
+ * Copyright (C) 2023 Ideas on Board Oy
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#include <uapi/linux/thp7312.h>
+
+/* ISP registers */
+
+#define THP7312_REG_FIRMWARE_VERSION_1 CCI_REG8(0xf000)
+#define THP7312_REG_CAMERA_STATUS CCI_REG8(0xf001)
+#define THP7312_REG_FIRMWARE_VERSION_2 CCI_REG8(0xf005)
+#define THP7312_REG_SET_OUTPUT_ENABLE CCI_REG8(0xf008)
+#define THP7312_OUTPUT_ENABLE 0x01
+#define THP7312_OUTPUT_DISABLE 0x00
+#define THP7312_REG_SET_OUTPUT_COLOR_COMPRESSION CCI_REG8(0xf009)
+#define THP7312_REG_SET_OUTPUT_COLOR_UYVY 0x00
+#define THP7312_REG_SET_OUTPUT_COLOR_YUY2 0x04
+#define THP7312_REG_FLIP_MIRROR CCI_REG8(0xf00c)
+#define THP7312_REG_FLIP_MIRROR_FLIP BIT(0)
+#define THP7312_REG_FLIP_MIRROR_MIRROR BIT(1)
+#define THP7312_REG_VIDEO_IMAGE_SIZE CCI_REG8(0xf00d)
+#define THP7312_VIDEO_IMAGE_SIZE_640x360 0x52
+#define THP7312_VIDEO_IMAGE_SIZE_640x460 0x03
+#define THP7312_VIDEO_IMAGE_SIZE_1280x720 0x0a
+#define THP7312_VIDEO_IMAGE_SIZE_1920x1080 0x0b
+#define THP7312_VIDEO_IMAGE_SIZE_3840x2160 0x0d
+#define THP7312_VIDEO_IMAGE_SIZE_4160x3120 0x14
+#define THP7312_VIDEO_IMAGE_SIZE_2016x1512 0x20
+#define THP7312_VIDEO_IMAGE_SIZE_2048x1536 0x21
+#define THP7312_REG_VIDEO_FRAME_RATE_MODE CCI_REG8(0xf00f)
+#define THP7312_VIDEO_FRAME_RATE_MODE1 0x80
+#define THP7312_VIDEO_FRAME_RATE_MODE2 0x81
+#define THP7312_VIDEO_FRAME_RATE_MODE3 0x82
+#define THP7312_REG_SET_DRIVING_MODE CCI_REG8(0xf010)
+#define THP7312_REG_DRIVING_MODE_STATUS CCI_REG8(0xf011)
+#define THP7312_REG_JPEG_COMPRESSION_FACTOR CCI_REG8(0xf01b)
+#define THP7312_REG_AE_EXPOSURE_COMPENSATION CCI_REG8(0xf022)
+#define THP7312_REG_AE_FLICKER_MODE CCI_REG8(0xf023)
+#define THP7312_AE_FLICKER_MODE_50 0x00
+#define THP7312_AE_FLICKER_MODE_60 0x01
+#define THP7312_AE_FLICKER_MODE_DISABLE 0x80
+#define THP7312_REG_AE_FIX_FRAME_RATE CCI_REG8(0xf02e)
+#define THP7312_REG_MANUAL_WB_RED_GAIN CCI_REG8(0xf036)
+#define THP7312_REG_MANUAL_WB_BLUE_GAIN CCI_REG8(0xf037)
+#define THP7312_REG_WB_MODE CCI_REG8(0xf039)
+#define THP7312_WB_MODE_AUTO 0x00
+#define THP7312_WB_MODE_MANUAL 0x11
+#define THP7312_REG_MANUAL_FOCUS_POSITION CCI_REG16(0xf03c)
+#define THP7312_REG_AF_CONTROL CCI_REG8(0xf040)
+#define THP7312_REG_AF_CONTROL_AF 0x01
+#define THP7312_REG_AF_CONTROL_MANUAL 0x10
+#define THP7312_REG_AF_CONTROL_LOCK 0x80
+#define THP7312_REG_AF_SETTING CCI_REG8(0xf041)
+#define THP7312_REG_AF_SETTING_ONESHOT_CONTRAST 0x00
+#define THP7312_REG_AF_SETTING_ONESHOT_PDAF 0x40
+#define THP7312_REG_AF_SETTING_ONESHOT_HYBRID 0x80
+#define THP7312_REG_AF_SETTING_CONTINUOUS_CONTRAST 0x30
+#define THP7312_REG_AF_SETTING_CONTINUOUS_PDAF 0x70
+#define THP7312_REG_AF_SETTING_CONTINUOUS_HYBRID 0xf0
+#define THP7312_REG_AF_SUPPORT CCI_REG8(0xf043)
+#define THP7312_AF_SUPPORT_PDAF BIT(1)
+#define THP7312_AF_SUPPORT_CONTRAST BIT(0)
+#define THP7312_REG_SATURATION CCI_REG8(0xf052)
+#define THP7312_REG_SHARPNESS CCI_REG8(0xf053)
+#define THP7312_REG_BRIGHTNESS CCI_REG8(0xf056)
+#define THP7312_REG_CONTRAST CCI_REG8(0xf057)
+#define THP7312_REG_NOISE_REDUCTION CCI_REG8(0xf059)
+#define THP7312_REG_NOISE_REDUCTION_FIXED BIT(7)
+
+#define TH7312_REG_CUSTOM_MIPI_SET CCI_REG8(0xf0f6)
+#define TH7312_REG_CUSTOM_MIPI_STATUS CCI_REG8(0xf0f7)
+#define TH7312_REG_CUSTOM_MIPI_RD CCI_REG8(0xf0f8)
+#define TH7312_REG_CUSTOM_MIPI_TD CCI_REG8(0xf0f9)
+
+/*
+ * Firmware update registers. Those use a different address space than the
+ * normal operation ISP registers.
+ */
+
+#define THP7312_REG_FW_DRIVABILITY CCI_REG32(0xd65c)
+#define THP7312_REG_FW_DEST_BANK_ADDR CCI_REG32(0xff08)
+#define THP7312_REG_FW_VERIFY_RESULT CCI_REG8(0xff60)
+#define THP7312_REG_FW_RESET_FLASH CCI_REG8(0xff61)
+#define THP7312_REG_FW_MEMORY_IO_SETTING CCI_REG8(0xff62)
+#define THP7312_FW_MEMORY_IO_GPIO0 1
+#define THP7312_FW_MEMORY_IO_GPIO1 0
+#define THP7312_REG_FW_CRC_RESULT CCI_REG32(0xff64)
+#define THP7312_REG_FW_STATUS CCI_REG8(0xfffc)
+
+#define THP7312_FW_VERSION(major, minor) (((major) << 8) | (minor))
+#define THP7312_FW_VERSION_MAJOR(v) ((v) >> 8)
+#define THP7312_FW_VERSION_MINOR(v) ((v) & 0xff)
+
+enum thp7312_focus_method {
+ THP7312_FOCUS_METHOD_CONTRAST,
+ THP7312_FOCUS_METHOD_PDAF,
+ THP7312_FOCUS_METHOD_HYBRID,
+};
+
+/*
+ * enum thp7312_focus_state - State of the focus handler
+ *
+ * @THP7312_FOCUS_STATE_MANUAL: Manual focus, controlled through the
+ * V4L2_CID_FOCUS_ABSOLUTE control
+ * @THP7312_FOCUS_STATE_AUTO: Continuous auto-focus
+ * @THP7312_FOCUS_STATE_LOCKED: Lock the focus to a fixed position. This state
+ * is entered when switching from auto to manual mode.
+ * @THP7312_FOCUS_STATE_ONESHOT: One-shot auto-focus
+ *
+ * Valid transitions are as follow:
+ *
+ * digraph fsm {
+ * node [shape=circle];
+ *
+ * manual [label="MANUAL"];
+ * auto [label="AUTO"];
+ * locked [label="LOCKED"];
+ * oneshot [label="ONESHOT"];
+ *
+ * manual -> auto [label="FOCUS_AUTO <- true"]
+ * locked -> auto [label="FOCUS_AUTO <- true"]
+ * oneshot -> auto [label="FOCUS_AUTO <- true"]
+ * auto -> locked [label="FOCUS_AUTO <- false"]
+ *
+ * locked -> manual [label="FOCUS_ABSOLUTE <- *"]
+ * oneshot -> manual [label="FOCUS_ABSOLUTE <- *"]
+ *
+ * manual -> oneshot [label="FOCUS_START <- *"]
+ * locked -> oneshot [label="FOCUS_START <- *"]
+ * }
+ */
+enum thp7312_focus_state {
+ THP7312_FOCUS_STATE_MANUAL,
+ THP7312_FOCUS_STATE_AUTO,
+ THP7312_FOCUS_STATE_LOCKED,
+ THP7312_FOCUS_STATE_ONESHOT,
+};
+
+enum thp7312_boot_mode {
+ THP7312_BOOT_MODE_2WIRE_SLAVE = 0,
+ THP7312_BOOT_MODE_SPI_MASTER = 1,
+};
+
+struct thp7312_frame_rate {
+ u32 fps;
+ u32 link_freq;
+ u8 reg_frame_rate_mode;
+};
+
+struct thp7312_mode_info {
+ u32 width;
+ u32 height;
+ u8 reg_image_size;
+ const struct thp7312_frame_rate *rates;
+};
+
+static const u32 thp7312_colour_fmts[] = {
+ MEDIA_BUS_FMT_YUYV8_1X16,
+};
+
+/* regulator supplies */
+static const char * const thp7312_supply_name[] = {
+ "vddcore",
+ "vhtermrx",
+ "vddtx",
+ "vddhost",
+ "vddcmos",
+ "vddgpio-0",
+ "vddgpio-1",
+};
+
+static const struct thp7312_mode_info thp7312_mode_info_data[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .reg_image_size = THP7312_VIDEO_IMAGE_SIZE_1920x1080,
+ .rates = (const struct thp7312_frame_rate[]) {
+ { 30, 300000000, 0x81 },
+ { 60, 387500000, 0x82 },
+ { 0 }
+ },
+ }, {
+ .width = 2048,
+ .height = 1536,
+ .reg_image_size = THP7312_VIDEO_IMAGE_SIZE_2048x1536,
+ .rates = (const struct thp7312_frame_rate[]) {
+ { 30, 300000000, 0x81 },
+ { 0 }
+ }
+ }, {
+ .width = 3840,
+ .height = 2160,
+ .reg_image_size = THP7312_VIDEO_IMAGE_SIZE_3840x2160,
+ .rates = (const struct thp7312_frame_rate[]) {
+ { 30, 600000000, 0x81 },
+ { 0 }
+ },
+ }, {
+ .width = 4160,
+ .height = 3120,
+ .reg_image_size = THP7312_VIDEO_IMAGE_SIZE_4160x3120,
+ .rates = (const struct thp7312_frame_rate[]) {
+ { 20, 600000000, 0x81 },
+ { 0 }
+ },
+ },
+};
+
+struct thp7312_device;
+
+struct thp7312_sensor_info {
+ const char *model;
+};
+
+struct thp7312_sensor {
+ const struct thp7312_sensor_info *info;
+ u8 lane_remap;
+};
+
+struct thp7312_device {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(thp7312_supply_name)];
+ struct clk *iclk;
+
+ u8 lane_remap;
+
+ struct thp7312_sensor sensors[1];
+
+ enum thp7312_boot_mode boot_mode;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ bool ctrls_applied;
+
+ s64 link_freq;
+
+ struct {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+
+ struct {
+ struct v4l2_ctrl *focus_auto;
+ struct v4l2_ctrl *focus_absolute;
+ struct v4l2_ctrl *focus_start;
+ struct v4l2_ctrl *focus_method;
+ };
+
+ enum thp7312_focus_state focus_state;
+
+ struct {
+ struct v4l2_ctrl *noise_reduction_auto;
+ struct v4l2_ctrl *noise_reduction_absolute;
+ };
+
+ /* Lock to protect fw_cancel */
+ struct mutex fw_lock;
+ struct fw_upload *fwl;
+ u8 *fw_write_buf;
+ bool fw_cancel;
+
+ u16 fw_version;
+};
+
+static const struct thp7312_sensor_info thp7312_sensor_info[] = {
+ {
+ .model = "sony,imx258",
+ },
+};
+
+static inline struct thp7312_device *to_thp7312_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct thp7312_device, sd);
+}
+
+static const struct thp7312_mode_info *
+thp7312_find_mode(unsigned int width, unsigned int height, bool nearest)
+{
+ const struct thp7312_mode_info *mode;
+
+ mode = v4l2_find_nearest_size(thp7312_mode_info_data,
+ ARRAY_SIZE(thp7312_mode_info_data),
+ width, height, width, height);
+
+ if (!nearest && (mode->width != width || mode->height != height))
+ return NULL;
+
+ return mode;
+}
+
+static const struct thp7312_frame_rate *
+thp7312_find_rate(const struct thp7312_mode_info *mode, unsigned int fps,
+ bool nearest)
+{
+ const struct thp7312_frame_rate *best_rate = NULL;
+ const struct thp7312_frame_rate *rate;
+ unsigned int best_delta = UINT_MAX;
+
+ if (!mode)
+ return NULL;
+
+ for (rate = mode->rates; rate->fps && best_delta; ++rate) {
+ unsigned int delta = abs(rate->fps - fps);
+
+ if (delta <= best_delta) {
+ best_delta = delta;
+ best_rate = rate;
+ }
+ }
+
+ if (!nearest && best_delta)
+ return NULL;
+
+ return best_rate;
+}
+
+/* -----------------------------------------------------------------------------
+ * Device Access & Configuration
+ */
+
+#define thp7312_read_poll_timeout(dev, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ int __ret, __err; \
+ __ret = read_poll_timeout(cci_read, __err, __err || (cond), sleep_us, \
+ timeout_us, false, (dev)->regmap, addr, \
+ &(val), NULL); \
+ __ret ? : __err; \
+})
+
+static int thp7312_map_data_lanes(u8 *lane_remap, const u8 *lanes, u8 num_lanes)
+{
+ u8 used_lanes = 0;
+ u8 val = 0;
+ unsigned int i;
+
+ /*
+ * The value that we write to the register is the index in the
+ * data-lanes array, so we need to do a conversion. Do this in the same
+ * pass as validating data-lanes.
+ */
+ for (i = 0; i < num_lanes; i++) {
+ if (lanes[i] < 1 || lanes[i] > 4)
+ return -EINVAL;
+
+ if (used_lanes & (BIT(lanes[i])))
+ return -EINVAL;
+
+ used_lanes |= BIT(lanes[i]);
+
+ /*
+ * data-lanes is 1-indexed while the field position in the
+ * register is 0-indexed.
+ */
+ val |= i << ((lanes[i] - 1) * 2);
+ }
+
+ *lane_remap = val;
+
+ return 0;
+}
+
+static int thp7312_set_mipi_lanes(struct thp7312_device *thp7312)
+{
+ struct device *dev = thp7312->dev;
+ int ret = 0;
+ u64 val;
+
+ cci_write(thp7312->regmap, TH7312_REG_CUSTOM_MIPI_RD,
+ thp7312->sensors[0].lane_remap, &ret);
+ cci_write(thp7312->regmap, TH7312_REG_CUSTOM_MIPI_TD,
+ thp7312->lane_remap, &ret);
+ cci_write(thp7312->regmap, TH7312_REG_CUSTOM_MIPI_SET, 1, &ret);
+
+ if (ret)
+ return ret;
+
+ ret = thp7312_read_poll_timeout(thp7312, TH7312_REG_CUSTOM_MIPI_STATUS,
+ val, val == 0x00, 100000, 2000000);
+ if (ret) {
+ dev_err(dev, "Failed to poll MIPI lane status: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int thp7312_change_mode(struct thp7312_device *thp7312,
+ const struct thp7312_mode_info *mode,
+ const struct thp7312_frame_rate *rate)
+{
+ struct device *dev = thp7312->dev;
+ u64 val = 0;
+ int ret;
+
+ ret = thp7312_read_poll_timeout(thp7312, THP7312_REG_CAMERA_STATUS, val,
+ val == 0x80, 20000, 200000);
+ if (ret < 0) {
+ dev_err(dev, "%s(): failed to poll ISP: %d\n", __func__, ret);
+ return ret;
+ }
+
+ cci_write(thp7312->regmap, THP7312_REG_VIDEO_IMAGE_SIZE,
+ mode->reg_image_size, &ret);
+ cci_write(thp7312->regmap, THP7312_REG_VIDEO_FRAME_RATE_MODE,
+ rate->reg_frame_rate_mode, &ret);
+ cci_write(thp7312->regmap, THP7312_REG_JPEG_COMPRESSION_FACTOR, 0x5e,
+ &ret);
+ cci_write(thp7312->regmap, THP7312_REG_SET_DRIVING_MODE, 0x01, &ret);
+
+ if (ret)
+ return ret;
+
+ ret = thp7312_read_poll_timeout(thp7312, THP7312_REG_DRIVING_MODE_STATUS,
+ val, val == 0x01, 20000, 100000);
+ if (ret < 0) {
+ dev_err(dev, "%s(): failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int thp7312_set_framefmt(struct thp7312_device *thp7312,
+ struct v4l2_mbus_framefmt *format)
+{
+ u8 val;
+
+ switch (format->code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ /* YUV422, UYVY */
+ val = THP7312_REG_SET_OUTPUT_COLOR_UYVY;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ /* YUV422, YUYV */
+ val = THP7312_REG_SET_OUTPUT_COLOR_YUY2;
+ break;
+ default:
+ /* Should never happen */
+ return -EINVAL;
+ }
+
+ return cci_write(thp7312->regmap,
+ THP7312_REG_SET_OUTPUT_COLOR_COMPRESSION, val, NULL);
+}
+
+static int thp7312_init_mode(struct thp7312_device *thp7312,
+ struct v4l2_subdev_state *sd_state)
+{
+ const struct thp7312_mode_info *mode;
+ const struct thp7312_frame_rate *rate;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_fract *interval;
+ int ret;
+
+ /*
+ * TODO: The mode and rate should be cached in the subdev state, once
+ * support for extending states will be available.
+ */
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
+ interval = v4l2_subdev_state_get_interval(sd_state, 0);
+
+ mode = thp7312_find_mode(fmt->width, fmt->height, false);
+ rate = thp7312_find_rate(mode, interval->denominator, false);
+
+ if (WARN_ON(!mode || !rate))
+ return -EINVAL;
+
+ ret = thp7312_set_framefmt(thp7312, fmt);
+ if (ret)
+ return ret;
+
+ return thp7312_change_mode(thp7312, mode, rate);
+}
+
+static int thp7312_stream_enable(struct thp7312_device *thp7312, bool enable)
+{
+ return cci_write(thp7312->regmap, THP7312_REG_SET_OUTPUT_ENABLE,
+ enable ? THP7312_OUTPUT_ENABLE : THP7312_OUTPUT_DISABLE,
+ NULL);
+}
+
+static int thp7312_check_status_stream_mode(struct thp7312_device *thp7312)
+{
+ struct device *dev = thp7312->dev;
+ u64 status = 0;
+ int ret;
+
+ while (status != 0x80) {
+ ret = cci_read(thp7312->regmap, THP7312_REG_CAMERA_STATUS,
+ &status, NULL);
+ if (ret)
+ return ret;
+
+ if (status == 0x80) {
+ dev_dbg(dev, "Camera initialization done\n");
+ return 0;
+ }
+
+ if (status != 0x00) {
+ dev_err(dev, "Invalid camera status %llx\n", status);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Camera initializing...\n");
+ usleep_range(70000, 80000);
+ }
+
+ return 0;
+}
+
+static void thp7312_reset(struct thp7312_device *thp7312)
+{
+ unsigned long rate;
+
+ gpiod_set_value_cansleep(thp7312->reset_gpio, 1);
+
+ /*
+ * The minimum reset duration is 8 clock cycles, make it 10 to provide
+ * a safety margin.
+ */
+ rate = clk_get_rate(thp7312->iclk);
+ fsleep(DIV_ROUND_UP(10 * USEC_PER_SEC, rate));
+
+ gpiod_set_value_cansleep(thp7312->reset_gpio, 0);
+
+ /*
+ * TODO: The documentation states that the device needs 2ms to
+ * initialize after reset is deasserted. It then proceeds to load the
+ * firmware from the flash memory, which takes an unspecified amount of
+ * time. Check if this delay could be reduced.
+ */
+ fsleep(300000);
+}
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+static void __thp7312_power_off(struct thp7312_device *thp7312)
+{
+ regulator_bulk_disable(ARRAY_SIZE(thp7312->supplies), thp7312->supplies);
+ clk_disable_unprepare(thp7312->iclk);
+}
+
+static void thp7312_power_off(struct thp7312_device *thp7312)
+{
+ __thp7312_power_off(thp7312);
+}
+
+static int __thp7312_power_on(struct thp7312_device *thp7312)
+{
+ struct device *dev = thp7312->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(thp7312->supplies),
+ thp7312->supplies);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(thp7312->iclk);
+ if (ret < 0) {
+ dev_err(dev, "clk prepare enable failed\n");
+ regulator_bulk_disable(ARRAY_SIZE(thp7312->supplies),
+ thp7312->supplies);
+ return ret;
+ }
+
+ /*
+ * We cannot assume that turning off and on again will reset, so do a
+ * software reset on power up.
+ */
+ thp7312_reset(thp7312);
+
+ return 0;
+}
+
+static int thp7312_power_on(struct thp7312_device *thp7312)
+{
+ int ret;
+
+ ret = __thp7312_power_on(thp7312);
+ if (ret < 0)
+ return ret;
+
+ ret = thp7312_check_status_stream_mode(thp7312);
+ if (ret < 0)
+ goto error;
+
+ ret = thp7312_set_mipi_lanes(thp7312);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ thp7312_power_off(thp7312);
+ return ret;
+}
+
+static int __maybe_unused thp7312_pm_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct thp7312_device *thp7312 = to_thp7312_dev(sd);
+
+ thp7312_power_off(thp7312);
+
+ thp7312->ctrls_applied = false;
+
+ return 0;
+}
+
+static int __maybe_unused thp7312_pm_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct thp7312_device *thp7312 = to_thp7312_dev(sd);
+
+ return thp7312_power_on(thp7312);
+}
+
+static const struct dev_pm_ops thp7312_pm_ops = {
+ SET_RUNTIME_PM_OPS(thp7312_pm_runtime_suspend,
+ thp7312_pm_runtime_resume, NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdev Operations
+ */
+
+static bool thp7312_find_bus_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(thp7312_colour_fmts); ++i) {
+ if (thp7312_colour_fmts[i] == code)
+ return true;
+ }
+
+ return false;
+}
+
+static int thp7312_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(thp7312_colour_fmts))
+ return -EINVAL;
+
+ code->code = thp7312_colour_fmts[code->index];
+
+ return 0;
+}
+
+static int thp7312_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (!thp7312_find_bus_code(fse->code))
+ return -EINVAL;
+
+ if (fse->index >= ARRAY_SIZE(thp7312_mode_info_data))
+ return -EINVAL;
+
+ fse->min_width = thp7312_mode_info_data[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = thp7312_mode_info_data[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int thp7312_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ const struct thp7312_frame_rate *rate;
+ const struct thp7312_mode_info *mode;
+ unsigned int index = fie->index;
+
+ if (!thp7312_find_bus_code(fie->code))
+ return -EINVAL;
+
+ mode = thp7312_find_mode(fie->width, fie->height, false);
+ if (!mode)
+ return -EINVAL;
+
+ for (rate = mode->rates; rate->fps; ++rate, --index) {
+ if (!index) {
+ fie->interval.numerator = 1;
+ fie->interval.denominator = rate->fps;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int thp7312_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct thp7312_device *thp7312 = to_thp7312_dev(sd);
+ struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_fract *interval;
+ const struct thp7312_mode_info *mode;
+
+ if (!thp7312_find_bus_code(mbus_fmt->code))
+ mbus_fmt->code = thp7312_colour_fmts[0];
+
+ mode = thp7312_find_mode(mbus_fmt->width, mbus_fmt->height, true);
+
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
+
+ fmt->code = mbus_fmt->code;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
+
+ *mbus_fmt = *fmt;
+
+ interval = v4l2_subdev_state_get_interval(sd_state, 0);
+ interval->numerator = 1;
+ interval->denominator = mode->rates[0].fps;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ thp7312->link_freq = mode->rates[0].link_freq;
+
+ return 0;
+}
+
+static int thp7312_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct thp7312_device *thp7312 = to_thp7312_dev(sd);
+ const struct thp7312_mode_info *mode;
+ const struct thp7312_frame_rate *rate;
+ const struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_fract *interval;
+ unsigned int fps;
+
+ /* Avoid divisions by 0, pick the highest frame if the interval is 0. */
+ fps = fi->interval.numerator
+ ? DIV_ROUND_CLOSEST(fi->interval.denominator, fi->interval.numerator)
+ : UINT_MAX;
+
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
+ mode = thp7312_find_mode(fmt->width, fmt->height, false);
+ rate = thp7312_find_rate(mode, fps, true);
+
+ interval = v4l2_subdev_state_get_interval(sd_state, 0);
+ interval->numerator = 1;
+ interval->denominator = rate->fps;
+
+ if (fi->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ thp7312->link_freq = rate->link_freq;
+
+ fi->interval = *interval;
+
+ return 0;
+}
+
+static int thp7312_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct thp7312_device *thp7312 = to_thp7312_dev(sd);
+ struct v4l2_subdev_state *sd_state;
+ int ret;
+
+ sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ if (!enable) {
+ thp7312_stream_enable(thp7312, false);
+
+ pm_runtime_mark_last_busy(thp7312->dev);
+ pm_runtime_put_autosuspend(thp7312->dev);
+
+ v4l2_subdev_unlock_state(sd_state);
+
+ return 0;
+ }
+
+ ret = pm_runtime_resume_and_get(thp7312->dev);
+ if (ret)
+ goto finish_unlock;
+
+ ret = thp7312_init_mode(thp7312, sd_state);
+ if (ret)
+ goto finish_pm;
+
+ if (!thp7312->ctrls_applied) {
+ ret = __v4l2_ctrl_handler_setup(&thp7312->ctrl_handler);
+ if (ret)
+ goto finish_pm;
+
+ thp7312->ctrls_applied = true;
+ }
+
+ ret = thp7312_stream_enable(thp7312, true);
+ if (ret)
+ goto finish_pm;
+
+ goto finish_unlock;
+
+finish_pm:
+ pm_runtime_mark_last_busy(thp7312->dev);
+ pm_runtime_put_autosuspend(thp7312->dev);
+finish_unlock:
+ v4l2_subdev_unlock_state(sd_state);
+
+ return ret;
+}
+
+static int thp7312_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ const struct thp7312_mode_info *default_mode = &thp7312_mode_info_data[0];
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_fract *interval;
+
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
+ interval = v4l2_subdev_state_get_interval(sd_state, 0);
+
+ /*
+ * default init sequence initialize thp7312 to
+ * YUV422 YUYV VGA@30fps
+ */
+ fmt->code = MEDIA_BUS_FMT_YUYV8_1X16;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
+ fmt->width = default_mode->width;
+ fmt->height = default_mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+
+ interval->numerator = 1;
+ interval->denominator = default_mode->rates[0].fps;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops thp7312_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops thp7312_video_ops = {
+ .s_stream = thp7312_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops thp7312_pad_ops = {
+ .enum_mbus_code = thp7312_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = thp7312_set_fmt,
+ .get_frame_interval = v4l2_subdev_get_frame_interval,
+ .set_frame_interval = thp7312_set_frame_interval,
+ .enum_frame_size = thp7312_enum_frame_size,
+ .enum_frame_interval = thp7312_enum_frame_interval,
+};
+
+static const struct v4l2_subdev_ops thp7312_subdev_ops = {
+ .core = &thp7312_core_ops,
+ .video = &thp7312_video_ops,
+ .pad = &thp7312_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops thp7312_internal_ops = {
+ .init_state = thp7312_init_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Control Operations
+ */
+
+static inline struct thp7312_device *to_thp7312_from_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct thp7312_device, ctrl_handler);
+}
+
+/* 0: 3000cm, 18: 8cm */
+static const u16 thp7312_focus_values[] = {
+ 3000, 1000, 600, 450, 350,
+ 290, 240, 200, 170, 150,
+ 140, 130, 120, 110, 100,
+ 93, 87, 83, 80,
+};
+
+static int thp7312_set_focus(struct thp7312_device *thp7312)
+{
+ enum thp7312_focus_state new_state = thp7312->focus_state;
+ bool continuous;
+ u8 af_control;
+ u8 af_setting;
+ int ret = 0;
+
+ /* Start by programming the manual focus position if it has changed. */
+ if (thp7312->focus_absolute->is_new) {
+ unsigned int value;
+
+ value = thp7312_focus_values[thp7312->focus_absolute->val];
+
+ ret = cci_write(thp7312->regmap,
+ THP7312_REG_MANUAL_FOCUS_POSITION, value, NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* Calculate the new focus state. */
+ switch (thp7312->focus_state) {
+ case THP7312_FOCUS_STATE_MANUAL:
+ default:
+ if (thp7312->focus_auto->val)
+ new_state = THP7312_FOCUS_STATE_AUTO;
+ else if (thp7312->focus_start->is_new)
+ new_state = THP7312_FOCUS_STATE_ONESHOT;
+ break;
+
+ case THP7312_FOCUS_STATE_AUTO:
+ if (!thp7312->focus_auto->val)
+ new_state = THP7312_FOCUS_STATE_LOCKED;
+ break;
+
+ case THP7312_FOCUS_STATE_LOCKED:
+ if (thp7312->focus_auto->val)
+ new_state = THP7312_FOCUS_STATE_AUTO;
+ else if (thp7312->focus_start->is_new)
+ new_state = THP7312_FOCUS_STATE_ONESHOT;
+ else if (thp7312->focus_absolute->is_new)
+ new_state = THP7312_FOCUS_STATE_MANUAL;
+ break;
+
+ case THP7312_FOCUS_STATE_ONESHOT:
+ if (thp7312->focus_auto->val)
+ new_state = THP7312_FOCUS_STATE_AUTO;
+ else if (thp7312->focus_start->is_new)
+ new_state = THP7312_FOCUS_STATE_ONESHOT;
+ else if (thp7312->focus_absolute->is_new)
+ new_state = THP7312_FOCUS_STATE_MANUAL;
+ break;
+ }
+
+ /*
+ * If neither the state nor the focus method has changed, and no new
+ * one-shot focus is requested, there's nothing new to program to the
+ * hardware.
+ */
+ if (thp7312->focus_state == new_state &&
+ !thp7312->focus_method->is_new && !thp7312->focus_start->is_new)
+ return 0;
+
+ continuous = new_state == THP7312_FOCUS_STATE_MANUAL ||
+ new_state == THP7312_FOCUS_STATE_ONESHOT;
+
+ switch (thp7312->focus_method->val) {
+ case THP7312_FOCUS_METHOD_CONTRAST:
+ default:
+ af_setting = continuous
+ ? THP7312_REG_AF_SETTING_CONTINUOUS_CONTRAST
+ : THP7312_REG_AF_SETTING_ONESHOT_CONTRAST;
+ break;
+ case THP7312_FOCUS_METHOD_PDAF:
+ af_setting = continuous
+ ? THP7312_REG_AF_SETTING_CONTINUOUS_PDAF
+ : THP7312_REG_AF_SETTING_ONESHOT_PDAF;
+ break;
+ case THP7312_FOCUS_METHOD_HYBRID:
+ af_setting = continuous
+ ? THP7312_REG_AF_SETTING_CONTINUOUS_HYBRID
+ : THP7312_REG_AF_SETTING_ONESHOT_HYBRID;
+ break;
+ }
+
+ switch (new_state) {
+ case THP7312_FOCUS_STATE_MANUAL:
+ default:
+ af_control = THP7312_REG_AF_CONTROL_MANUAL;
+ break;
+ case THP7312_FOCUS_STATE_AUTO:
+ case THP7312_FOCUS_STATE_ONESHOT:
+ af_control = THP7312_REG_AF_CONTROL_AF;
+ break;
+ case THP7312_FOCUS_STATE_LOCKED:
+ af_control = THP7312_REG_AF_CONTROL_LOCK;
+ break;
+ }
+
+ cci_write(thp7312->regmap, THP7312_REG_AF_SETTING, af_setting, &ret);
+
+ if (new_state == THP7312_FOCUS_STATE_MANUAL &&
+ (thp7312->focus_state == THP7312_FOCUS_STATE_AUTO ||
+ thp7312->focus_state == THP7312_FOCUS_STATE_ONESHOT)) {
+ /* When switching to manual state, lock AF first. */
+ cci_write(thp7312->regmap, THP7312_REG_AF_CONTROL,
+ THP7312_REG_AF_CONTROL_LOCK, &ret);
+ }
+
+ cci_write(thp7312->regmap, THP7312_REG_AF_CONTROL, af_control, &ret);
+
+ if (ret)
+ return ret;
+
+ thp7312->focus_state = new_state;
+
+ return 0;
+}
+
+static int thp7312_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct thp7312_device *thp7312 = to_thp7312_from_ctrl(ctrl);
+ int ret = 0;
+ u8 value;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return -EINVAL;
+
+ if (!pm_runtime_get_if_active(thp7312->dev, true))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ cci_write(thp7312->regmap, THP7312_REG_BRIGHTNESS,
+ ctrl->val + 10, &ret);
+ break;
+
+ case V4L2_CID_THP7312_LOW_LIGHT_COMPENSATION:
+ /* 0 = Auto adjust frame rate, 1 = Fix frame rate */
+ cci_write(thp7312->regmap, THP7312_REG_AE_FIX_FRAME_RATE,
+ ctrl->val ? 0 : 1, &ret);
+ break;
+
+ case V4L2_CID_FOCUS_AUTO:
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ case V4L2_CID_AUTO_FOCUS_START:
+ case V4L2_CID_THP7312_AUTO_FOCUS_METHOD:
+ ret = thp7312_set_focus(thp7312);
+ break;
+
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ value = (thp7312->hflip->val ? THP7312_REG_FLIP_MIRROR_MIRROR : 0)
+ | (thp7312->vflip->val ? THP7312_REG_FLIP_MIRROR_FLIP : 0);
+
+ cci_write(thp7312->regmap, THP7312_REG_FLIP_MIRROR, value, &ret);
+ break;
+
+ case V4L2_CID_THP7312_NOISE_REDUCTION_AUTO:
+ case V4L2_CID_THP7312_NOISE_REDUCTION_ABSOLUTE:
+ value = thp7312->noise_reduction_auto->val ? 0
+ : THP7312_REG_NOISE_REDUCTION_FIXED |
+ thp7312->noise_reduction_absolute->val;
+
+ cci_write(thp7312->regmap, THP7312_REG_NOISE_REDUCTION, value,
+ &ret);
+ break;
+
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ value = ctrl->val ? THP7312_WB_MODE_AUTO : THP7312_WB_MODE_MANUAL;
+
+ cci_write(thp7312->regmap, THP7312_REG_WB_MODE, value, &ret);
+ break;
+
+ case V4L2_CID_RED_BALANCE:
+ cci_write(thp7312->regmap, THP7312_REG_MANUAL_WB_RED_GAIN,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_BLUE_BALANCE:
+ cci_write(thp7312->regmap, THP7312_REG_MANUAL_WB_BLUE_GAIN,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ cci_write(thp7312->regmap, THP7312_REG_AE_EXPOSURE_COMPENSATION,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ if (ctrl->val == V4L2_CID_POWER_LINE_FREQUENCY_60HZ) {
+ value = THP7312_AE_FLICKER_MODE_60;
+ } else if (ctrl->val == V4L2_CID_POWER_LINE_FREQUENCY_50HZ) {
+ value = THP7312_AE_FLICKER_MODE_50;
+ } else {
+ if (thp7312->fw_version == THP7312_FW_VERSION(40, 3)) {
+ /* THP7312_AE_FLICKER_MODE_DISABLE is not supported */
+ value = THP7312_AE_FLICKER_MODE_50;
+ } else {
+ value = THP7312_AE_FLICKER_MODE_DISABLE;
+ }
+ }
+
+ cci_write(thp7312->regmap, THP7312_REG_AE_FLICKER_MODE,
+ value, &ret);
+ break;
+
+ case V4L2_CID_SATURATION:
+ cci_write(thp7312->regmap, THP7312_REG_SATURATION,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_CONTRAST:
+ cci_write(thp7312->regmap, THP7312_REG_CONTRAST,
+ ctrl->val, &ret);
+ break;
+
+ case V4L2_CID_SHARPNESS:
+ cci_write(thp7312->regmap, THP7312_REG_SHARPNESS,
+ ctrl->val, &ret);
+ break;
+
+ default:
+ break;
+ }
+
+ pm_runtime_mark_last_busy(thp7312->dev);
+ pm_runtime_put_autosuspend(thp7312->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops thp7312_ctrl_ops = {
+ .s_ctrl = thp7312_s_ctrl,
+};
+
+/*
+ * Refer to Documentation/userspace-api/media/drivers/thp7312.rst for details.
+ */
+static const struct v4l2_ctrl_config thp7312_ctrl_focus_method_cdaf = {
+ .ops = &thp7312_ctrl_ops,
+ .id = V4L2_CID_THP7312_AUTO_FOCUS_METHOD,
+ .name = "Auto-Focus Method",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = THP7312_FOCUS_METHOD_CONTRAST,
+ .def = THP7312_FOCUS_METHOD_CONTRAST,
+ .max = THP7312_FOCUS_METHOD_CONTRAST,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config thp7312_ctrl_focus_method_pdaf = {
+ .ops = &thp7312_ctrl_ops,
+ .id = V4L2_CID_THP7312_AUTO_FOCUS_METHOD,
+ .name = "Auto-Focus Method",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = THP7312_FOCUS_METHOD_CONTRAST,
+ .def = THP7312_FOCUS_METHOD_HYBRID,
+ .max = THP7312_FOCUS_METHOD_HYBRID,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config thp7312_v4l2_ctrls_custom[] = {
+ {
+ .ops = &thp7312_ctrl_ops,
+ .id = V4L2_CID_THP7312_LOW_LIGHT_COMPENSATION,
+ .name = "Low Light Compensation",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .def = 1,
+ .max = 1,
+ .step = 1,
+ }, {
+ .ops = &thp7312_ctrl_ops,
+ .id = V4L2_CID_THP7312_NOISE_REDUCTION_AUTO,
+ .name = "Noise Reduction Auto",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .def = 1,
+ .max = 1,
+ .step = 1,
+ }, {
+ .ops = &thp7312_ctrl_ops,
+ .id = V4L2_CID_THP7312_NOISE_REDUCTION_ABSOLUTE,
+ .name = "Noise Reduction Level",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .def = 0,
+ .max = 10,
+ .step = 1,
+ },
+};
+
+static const s64 exp_bias_qmenu[] = {
+ -2000, -1667, -1333, -1000, -667, -333, 0, 333, 667, 1000, 1333, 1667, 2000
+};
+
+static int thp7312_init_controls(struct thp7312_device *thp7312)
+{
+ struct v4l2_ctrl_handler *hdl = &thp7312->ctrl_handler;
+ struct device *dev = thp7312->dev;
+ struct v4l2_fwnode_device_properties props;
+ struct v4l2_ctrl *link_freq;
+ unsigned int num_controls;
+ unsigned int i;
+ u8 af_support;
+ int ret;
+
+ /*
+ * Check what auto-focus methods the connected sensor supports, if any.
+ * Firmwares before v90.03 didn't expose the AF_SUPPORT register,
+ * consider both CDAF and PDAF as supported in that case.
+ */
+ if (thp7312->fw_version >= THP7312_FW_VERSION(90, 3)) {
+ u64 val;
+
+ ret = cci_read(thp7312->regmap, THP7312_REG_AF_SUPPORT, &val,
+ NULL);
+ if (ret)
+ return ret;
+
+ af_support = val & (THP7312_AF_SUPPORT_PDAF |
+ THP7312_AF_SUPPORT_CONTRAST);
+ } else {
+ af_support = THP7312_AF_SUPPORT_PDAF
+ | THP7312_AF_SUPPORT_CONTRAST;
+ }
+
+ num_controls = 14 + ARRAY_SIZE(thp7312_v4l2_ctrls_custom)
+ + (af_support ? 4 : 0);
+
+ v4l2_ctrl_handler_init(hdl, num_controls);
+
+ if (af_support) {
+ const struct v4l2_ctrl_config *af_method;
+
+ af_method = af_support & THP7312_AF_SUPPORT_PDAF
+ ? &thp7312_ctrl_focus_method_pdaf
+ : &thp7312_ctrl_focus_method_cdaf;
+
+ thp7312->focus_state = THP7312_FOCUS_STATE_MANUAL;
+
+ thp7312->focus_auto =
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops,
+ V4L2_CID_FOCUS_AUTO,
+ 0, 1, 1, 1);
+ thp7312->focus_absolute =
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops,
+ V4L2_CID_FOCUS_ABSOLUTE,
+ 0, ARRAY_SIZE(thp7312_focus_values),
+ 1, 0);
+ thp7312->focus_method =
+ v4l2_ctrl_new_custom(hdl, af_method, NULL);
+ thp7312->focus_start =
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops,
+ V4L2_CID_AUTO_FOCUS_START,
+ 1, 1, 1, 1);
+
+ v4l2_ctrl_cluster(4, &thp7312->focus_auto);
+ }
+
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ /* 32: 1x, 255: 7.95x */
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops, V4L2_CID_RED_BALANCE,
+ 32, 255, 1, 64);
+ /* 32: 1x, 255: 7.95x */
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops, V4L2_CID_BLUE_BALANCE,
+ 32, 255, 1, 50);
+
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops, V4L2_CID_BRIGHTNESS,
+ -10, 10, 1, 0);
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops, V4L2_CID_SATURATION,
+ 0, 31, 1, 10);
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops, V4L2_CID_CONTRAST,
+ 0, 20, 1, 10);
+ v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops, V4L2_CID_SHARPNESS,
+ 0, 31, 1, 8);
+
+ thp7312->hflip = v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ thp7312->vflip = v4l2_ctrl_new_std(hdl, &thp7312_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_cluster(2, &thp7312->hflip);
+
+ v4l2_ctrl_new_int_menu(hdl, &thp7312_ctrl_ops,
+ V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(exp_bias_qmenu) - 1,
+ ARRAY_SIZE(exp_bias_qmenu) / 2, exp_bias_qmenu);
+
+ v4l2_ctrl_new_std_menu(hdl, &thp7312_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
+
+ thp7312->link_freq = thp7312_mode_info_data[0].rates[0].link_freq;
+
+ link_freq = v4l2_ctrl_new_int_menu(hdl, &thp7312_ctrl_ops,
+ V4L2_CID_LINK_FREQ, 0, 0,
+ &thp7312->link_freq);
+
+ /* Set properties from fwnode (e.g. rotation, orientation). */
+ ret = v4l2_fwnode_device_parse(dev, &props);
+ if (ret) {
+ dev_err(dev, "Failed to parse fwnode: %d\n", ret);
+ goto error;
+ }
+
+ ret = v4l2_ctrl_new_fwnode_properties(hdl, &thp7312_ctrl_ops, &props);
+ if (ret) {
+ dev_err(dev, "Failed to create new v4l2 ctrl for fwnode properties: %d\n", ret);
+ goto error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(thp7312_v4l2_ctrls_custom); i++) {
+ const struct v4l2_ctrl_config *ctrl_cfg =
+ &thp7312_v4l2_ctrls_custom[i];
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_new_custom(hdl, ctrl_cfg, NULL);
+
+ if (ctrl_cfg->id == V4L2_CID_THP7312_NOISE_REDUCTION_AUTO)
+ thp7312->noise_reduction_auto = ctrl;
+ else if (ctrl_cfg->id == V4L2_CID_THP7312_NOISE_REDUCTION_ABSOLUTE)
+ thp7312->noise_reduction_absolute = ctrl;
+ }
+
+ v4l2_ctrl_cluster(2, &thp7312->noise_reduction_auto);
+
+ if (hdl->error) {
+ dev_err(dev, "v4l2_ctrl_handler error\n");
+ ret = hdl->error;
+ goto error;
+ }
+
+ link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ return ret;
+
+error:
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Firmware Update
+ */
+
+/*
+ * The firmware data is made of 128kB of RAM firmware, followed by a
+ * variable-size "header". Both are stored in flash memory.
+ */
+#define THP7312_FW_RAM_SIZE (128 * 1024)
+#define THP7312_FW_MIN_SIZE (THP7312_FW_RAM_SIZE + 4)
+#define THP7312_FW_MAX_SIZE (THP7312_FW_RAM_SIZE + 64 * 1024)
+
+/*
+ * Data is first uploaded to the THP7312 128kB SRAM, and then written to flash.
+ * The SRAM is exposed over I2C as 32kB banks, and up to 4kB of data can be
+ * transferred in a single I2C write.
+ */
+#define THP7312_RAM_BANK_SIZE (32 * 1024)
+#define THP7312_FW_DOWNLOAD_UNIT (4 * 1024)
+
+#define THP7312_FLASH_MEMORY_ERASE_TIMEOUT 40
+
+#define THP7312_FLASH_MAX_REG_READ_SIZE 10
+#define THP7312_FLASH_MAX_REG_DATA_SIZE 10
+
+static const u8 thp7312_cmd_config_flash_mem_if[] = {
+ 0xd5, 0x18, 0x00, 0x00, 0x00, 0x80
+};
+
+static const u8 thp7312_cmd_write_to_reg[] = {
+ 0xd5, 0x0c, 0x80, 0x00, 0x00, 0x00
+};
+
+static const u8 thp7312_cmd_read_reg[] = {
+ 0xd5, 0x04
+};
+
+/*
+ * THP7312 Write data from RAM to Flash Memory
+ * Command ID FF700F
+ * Format: FF700F AA AA AA BB BB BB
+ * AA AA AA: destination start address
+ * BB BB BB: (write size - 1)
+ * Source address always starts from 0
+ */
+static const u8 thp7312_cmd_write_ram_to_flash[] = { 0xff, 0x70, 0x0f };
+
+/*
+ * THP7312 Calculate CRC command
+ * Command ID: FF70 09
+ * Format: FF70 09 AA AA AA BB BB BB
+ * AA AA AA: Start address of calculation
+ * BB BB BB: (calculate size - 1)
+ */
+static const u8 thp7312_cmd_calc_crc[] = { 0xff, 0x70, 0x09 };
+
+static const u8 thp7312_jedec_rdid[] = { SPINOR_OP_RDID, 0x00, 0x00, 0x00 };
+static const u8 thp7312_jedec_rdsr[] = { SPINOR_OP_RDSR, 0x00, 0x00, 0x00 };
+static const u8 thp7312_jedec_wen[] = { SPINOR_OP_WREN };
+
+static int thp7312_read_firmware_version(struct thp7312_device *thp7312)
+{
+ u64 val = 0;
+ int ret = 0;
+ u8 major;
+ u8 minor;
+
+ cci_read(thp7312->regmap, THP7312_REG_FIRMWARE_VERSION_1, &val, &ret);
+ major = val;
+
+ cci_read(thp7312->regmap, THP7312_REG_FIRMWARE_VERSION_2, &val, &ret);
+ minor = val;
+
+ thp7312->fw_version = THP7312_FW_VERSION(major, minor);
+ return ret;
+}
+
+static int thp7312_write_buf(struct thp7312_device *thp7312,
+ const u8 *write_buf, u16 write_size)
+{
+ struct i2c_client *client = to_i2c_client(thp7312->dev);
+ int ret;
+
+ ret = i2c_master_send(client, write_buf, write_size);
+ return ret >= 0 ? 0 : ret;
+}
+
+static int __thp7312_flash_reg_write(struct thp7312_device *thp7312,
+ const u8 *write_buf, u16 write_size)
+{
+ struct device *dev = thp7312->dev;
+ u8 temp_write_buf[THP7312_FLASH_MAX_REG_DATA_SIZE + 2];
+ int ret;
+
+ if (write_size > THP7312_FLASH_MAX_REG_DATA_SIZE) {
+ dev_err(dev, "%s: Write size error size = %d\n",
+ __func__, write_size);
+ return -EINVAL;
+ }
+
+ ret = thp7312_write_buf(thp7312, thp7312_cmd_config_flash_mem_if,
+ sizeof(thp7312_cmd_config_flash_mem_if));
+ if (ret < 0) {
+ dev_err(dev, "%s: Failed to config flash memory IF: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ temp_write_buf[0] = 0xd5;
+ temp_write_buf[1] = 0x00;
+ memcpy((temp_write_buf + 2), write_buf, write_size);
+ ret = thp7312_write_buf(thp7312, temp_write_buf, write_size + 2);
+ if (ret < 0)
+ return ret;
+
+ thp7312_write_buf(thp7312, thp7312_cmd_write_to_reg,
+ sizeof(thp7312_cmd_write_to_reg));
+
+ return 0;
+}
+
+static int __thp7312_flash_reg_read(struct thp7312_device *thp7312,
+ const u8 *write_buf, u16 write_size,
+ u8 *read_buf, u16 read_size)
+{
+ struct i2c_client *client = to_i2c_client(thp7312->dev);
+ struct i2c_msg msgs[2];
+ int ret;
+
+ ret = __thp7312_flash_reg_write(thp7312, write_buf, write_size);
+ if (ret)
+ return ret;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(thp7312_cmd_read_reg),
+ msgs[0].buf = (u8 *)thp7312_cmd_read_reg;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = read_size;
+ msgs[1].buf = read_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ return ret >= 0 ? 0 : ret;
+}
+
+#define thp7312_flash_reg_write(thp7312, wrbuf) \
+ __thp7312_flash_reg_write(thp7312, wrbuf, sizeof(wrbuf))
+
+#define thp7312_flash_reg_read(thp7312, wrbuf, rdbuf) \
+ __thp7312_flash_reg_read(thp7312, wrbuf, sizeof(wrbuf), \
+ rdbuf, sizeof(rdbuf))
+
+static enum fw_upload_err thp7312_fw_prepare_config(struct thp7312_device *thp7312)
+{
+ struct device *dev = thp7312->dev;
+ int ret;
+
+ ret = cci_write(thp7312->regmap, THP7312_REG_FW_MEMORY_IO_SETTING,
+ THP7312_FW_MEMORY_IO_GPIO0, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to set flash memory I/O\n");
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ /* Set max drivability. */
+ ret = cci_write(thp7312->regmap, THP7312_REG_FW_DRIVABILITY, 0x00777777,
+ NULL);
+ if (ret) {
+ dev_err(dev, "Failed to set drivability: %d\n", ret);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err thp7312_fw_prepare_check(struct thp7312_device *thp7312)
+{
+ struct device *dev = thp7312->dev;
+ u8 read_buf[3] = { 0 };
+ int ret;
+
+ /* Get JEDEC ID */
+ ret = thp7312_flash_reg_read(thp7312, thp7312_jedec_rdid, read_buf);
+ if (ret) {
+ dev_err(dev, "Failed to get JEDEC ID: %d\n", ret);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ dev_dbg(dev, "Flash Memory: JEDEC ID = 0x%x 0x%x 0x%x\n",
+ read_buf[0], read_buf[1], read_buf[2]);
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err thp7312_fw_prepare_reset(struct thp7312_device *thp7312)
+{
+ struct device *dev = thp7312->dev;
+ int ret;
+
+ ret = cci_write(thp7312->regmap, THP7312_REG_FW_RESET_FLASH, 0x81, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to reset flash memory: %d\n", ret);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+/* TODO: Erase only the amount of blocks necessary */
+static enum fw_upload_err thp7312_flash_erase(struct thp7312_device *thp7312)
+{
+ struct device *dev = thp7312->dev;
+ u8 read_buf[1] = { 0 };
+ unsigned int i;
+ u8 block;
+ int ret;
+
+ for (block = 0; block < 3; block++) {
+ const u8 jedec_se[] = { SPINOR_OP_SE, block, 0x00, 0x00 };
+
+ ret = thp7312_flash_reg_write(thp7312, thp7312_jedec_wen);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable flash for writing\n");
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ ret = thp7312_flash_reg_write(thp7312, jedec_se);
+ if (ret < 0) {
+ dev_err(dev, "Failed to erase flash sector\n");
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ for (i = 0; i < THP7312_FLASH_MEMORY_ERASE_TIMEOUT; i++) {
+ usleep_range(100000, 101000);
+ thp7312_flash_reg_read(thp7312, thp7312_jedec_rdsr,
+ read_buf);
+
+ /* Check Busy bit. Busy == 0x0 means erase complete. */
+ if (!(read_buf[0] & SR_WIP))
+ break;
+ }
+
+ if (i == THP7312_FLASH_MEMORY_ERASE_TIMEOUT)
+ return FW_UPLOAD_ERR_TIMEOUT;
+ }
+
+ thp7312_flash_reg_read(thp7312, thp7312_jedec_rdsr, read_buf);
+
+ /* Check WEL bit. */
+ if (read_buf[0] & SR_WEL)
+ return FW_UPLOAD_ERR_HW_ERROR;
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err
+thp7312_write_download_data_by_unit(struct thp7312_device *thp7312,
+ unsigned int addr, const u8 *data,
+ unsigned int size)
+{
+ struct device *dev = thp7312->dev;
+ u8 *write_buf = thp7312->fw_write_buf;
+ int ret;
+
+ dev_dbg(dev, "%s: addr = 0x%04x, data = 0x%p, size = %u\n",
+ __func__, addr, data, size);
+
+ write_buf[0] = (addr >> 8) & 0xff;
+ write_buf[1] = (addr >> 0) & 0xff;
+ memcpy(&write_buf[2], data, size);
+
+ /*
+ * THP7312 Firmware download to RAM
+ * Command ID (address to download): 0x0000 - 0x7fff
+ * Format:: 0000 XX XX XX ........ XX
+ */
+ ret = thp7312_write_buf(thp7312, write_buf, size + 2);
+ if (ret < 0)
+ dev_err(dev, "Unit transfer ERROR %s(): ret = %d\n", __func__, ret);
+
+ return ret >= 0 ? FW_UPLOAD_ERR_NONE : FW_UPLOAD_ERR_RW_ERROR;
+}
+
+static enum fw_upload_err thp7312_fw_load_to_ram(struct thp7312_device *thp7312,
+ const u8 *data, u32 size)
+{
+ struct device *dev = thp7312->dev;
+ enum fw_upload_err ret;
+ unsigned int num_banks;
+ unsigned int i, j;
+
+ num_banks = DIV_ROUND_UP(size, THP7312_RAM_BANK_SIZE);
+
+ dev_dbg(dev, "%s: loading %u bytes in SRAM (%u banks)\n", __func__,
+ size, num_banks);
+
+ for (i = 0; i < num_banks; i++) {
+ const u32 bank_addr = 0x10000000 | (i * THP7312_RAM_BANK_SIZE);
+ unsigned int bank_size;
+ unsigned int num_chunks;
+
+ ret = cci_write(thp7312->regmap, THP7312_REG_FW_DEST_BANK_ADDR,
+ bank_addr, NULL);
+ if (ret)
+ return FW_UPLOAD_ERR_HW_ERROR;
+
+ bank_size = min_t(u32, size, THP7312_RAM_BANK_SIZE);
+ num_chunks = DIV_ROUND_UP(bank_size, THP7312_FW_DOWNLOAD_UNIT);
+
+ dev_dbg(dev, "%s: loading %u bytes in SRAM bank %u (%u chunks)\n",
+ __func__, bank_size, i, num_chunks);
+
+ for (j = 0 ; j < num_chunks; j++) {
+ unsigned int chunk_addr;
+ unsigned int chunk_size;
+
+ chunk_addr = j * THP7312_FW_DOWNLOAD_UNIT;
+ chunk_size = min_t(u32, size, THP7312_FW_DOWNLOAD_UNIT);
+
+ ret = thp7312_write_download_data_by_unit(thp7312, chunk_addr,
+ data, chunk_size);
+ if (ret != FW_UPLOAD_ERR_NONE) {
+ dev_err(dev, "Unit transfer ERROR at bank transfer %s(): %d\n",
+ __func__, j);
+ return ret;
+ }
+
+ data += chunk_size;
+ size -= chunk_size;
+ }
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err thp7312_fw_write_to_flash(struct thp7312_device *thp7312,
+ u32 dest, u32 write_size)
+{
+ u8 command[sizeof(thp7312_cmd_write_ram_to_flash) + 6];
+ static const u32 cmd_size = sizeof(thp7312_cmd_write_ram_to_flash);
+ u64 val;
+ int ret;
+
+ memcpy(command, thp7312_cmd_write_ram_to_flash, cmd_size);
+
+ command[cmd_size] = (dest & 0xff0000) >> 16;
+ command[cmd_size + 1] = (dest & 0x00ff00) >> 8;
+ command[cmd_size + 2] = (dest & 0x0000ff);
+ command[cmd_size + 3] = ((write_size - 1) & 0xff0000) >> 16;
+ command[cmd_size + 4] = ((write_size - 1) & 0x00ff00) >> 8;
+ command[cmd_size + 5] = ((write_size - 1) & 0x0000ff);
+
+ ret = thp7312_write_buf(thp7312, command, sizeof(command));
+ if (ret < 0)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ usleep_range(8000000, 8100000);
+
+ ret = cci_read(thp7312->regmap, THP7312_REG_FW_VERIFY_RESULT, &val,
+ NULL);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ return val ? FW_UPLOAD_ERR_HW_ERROR : FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err thp7312_fw_check_crc(struct thp7312_device *thp7312,
+ const u8 *fw_data, u32 fw_size)
+{
+ struct device *dev = thp7312->dev;
+ u16 header_size = fw_size - THP7312_FW_RAM_SIZE;
+ u8 command[sizeof(thp7312_cmd_calc_crc) + 6];
+ static const u32 cmd_size = sizeof(thp7312_cmd_calc_crc);
+ u32 size = THP7312_FW_RAM_SIZE - 4;
+ u32 fw_crc;
+ u64 crc;
+ int ret;
+
+ memcpy(command, thp7312_cmd_calc_crc, cmd_size);
+
+ command[cmd_size] = 0;
+ command[cmd_size + 1] = (header_size >> 8) & 0xff;
+ command[cmd_size + 2] = header_size & 0xff;
+
+ command[cmd_size + 3] = (size >> 16) & 0xff;
+ command[cmd_size + 4] = (size >> 8) & 0xff;
+ command[cmd_size + 5] = size & 0xff;
+
+ ret = thp7312_write_buf(thp7312, command, sizeof(command));
+ if (ret < 0)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ usleep_range(2000000, 2100000);
+
+ fw_crc = get_unaligned_be32(&fw_data[fw_size - 4]);
+
+ ret = cci_read(thp7312->regmap, THP7312_REG_FW_CRC_RESULT, &crc, NULL);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ if (fw_crc != crc) {
+ dev_err(dev, "CRC mismatch: firmware 0x%08x, flash 0x%08llx\n",
+ fw_crc, crc);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err thp7312_fw_prepare(struct fw_upload *fw_upload,
+ const u8 *data, u32 size)
+{
+ struct thp7312_device *thp7312 = fw_upload->dd_handle;
+ struct device *dev = thp7312->dev;
+ enum fw_upload_err ret;
+
+ mutex_lock(&thp7312->fw_lock);
+ thp7312->fw_cancel = false;
+ mutex_unlock(&thp7312->fw_lock);
+
+ if (size < THP7312_FW_MIN_SIZE || size > THP7312_FW_MAX_SIZE) {
+ dev_err(dev, "%s: Invalid firmware size %d; must be between %d and %d\n",
+ __func__, size, THP7312_FW_MIN_SIZE, THP7312_FW_MAX_SIZE);
+ return FW_UPLOAD_ERR_INVALID_SIZE;
+ }
+
+ ret = thp7312_fw_prepare_config(thp7312);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = thp7312_fw_prepare_check(thp7312);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = thp7312_fw_prepare_reset(thp7312);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ mutex_lock(&thp7312->fw_lock);
+ ret = thp7312->fw_cancel ? FW_UPLOAD_ERR_CANCELED : FW_UPLOAD_ERR_NONE;
+ mutex_unlock(&thp7312->fw_lock);
+
+ return ret;
+}
+
+static enum fw_upload_err thp7312_fw_write(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written)
+{
+ struct thp7312_device *thp7312 = fw_upload->dd_handle;
+ struct device *dev = thp7312->dev;
+ u16 header_size = size - THP7312_FW_RAM_SIZE;
+ enum fw_upload_err ret;
+ bool cancel;
+
+ mutex_lock(&thp7312->fw_lock);
+ cancel = thp7312->fw_cancel;
+ mutex_unlock(&thp7312->fw_lock);
+
+ if (cancel)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ ret = thp7312_flash_erase(thp7312);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = thp7312_fw_load_to_ram(thp7312, data, THP7312_FW_RAM_SIZE);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = thp7312_fw_write_to_flash(thp7312, 0, 0x1ffff);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = thp7312_fw_load_to_ram(thp7312, data + THP7312_FW_RAM_SIZE, header_size);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = thp7312_fw_write_to_flash(thp7312, 0x20000, header_size - 1);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ ret = thp7312_fw_check_crc(thp7312, data, size);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ return ret;
+
+ dev_info(dev, "Successfully wrote firmware\n");
+
+ *written = size;
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err thp7312_fw_poll_complete(struct fw_upload *fw_upload)
+{
+ return FW_UPLOAD_ERR_NONE;
+}
+
+/*
+ * This may be called asynchronously with an on-going update. All other
+ * functions are called sequentially in a single thread. To avoid contention on
+ * register accesses, only update the cancel_request flag. Other functions will
+ * check this flag and handle the cancel request synchronously.
+ */
+static void thp7312_fw_cancel(struct fw_upload *fw_upload)
+{
+ struct thp7312_device *thp7312 = fw_upload->dd_handle;
+
+ mutex_lock(&thp7312->fw_lock);
+ thp7312->fw_cancel = true;
+ mutex_unlock(&thp7312->fw_lock);
+}
+
+static const struct fw_upload_ops thp7312_fw_upload_ops = {
+ .prepare = thp7312_fw_prepare,
+ .write = thp7312_fw_write,
+ .poll_complete = thp7312_fw_poll_complete,
+ .cancel = thp7312_fw_cancel,
+};
+
+static int thp7312_register_flash_mode(struct thp7312_device *thp7312)
+{
+ struct device *dev = thp7312->dev;
+ struct fw_upload *fwl;
+ u64 val;
+ int ret;
+
+ dev_info(dev, "booted in flash mode\n");
+
+ mutex_init(&thp7312->fw_lock);
+
+ thp7312->fw_write_buf = devm_kzalloc(dev, THP7312_FW_DOWNLOAD_UNIT + 2,
+ GFP_KERNEL);
+ if (!thp7312->fw_write_buf)
+ return -ENOMEM;
+
+ ret = __thp7312_power_on(thp7312);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to power on\n");
+
+ ret = cci_read(thp7312->regmap, THP7312_REG_FW_STATUS, &val, NULL);
+ if (ret) {
+ dev_err_probe(dev, ret, "Camera status read failed\n");
+ goto error;
+ }
+
+ fwl = firmware_upload_register(THIS_MODULE, dev, "thp7312-firmware",
+ &thp7312_fw_upload_ops, thp7312);
+ if (IS_ERR(fwl)) {
+ ret = PTR_ERR(fwl);
+ dev_err_probe(dev, ret, "Failed to register firmware upload\n");
+ goto error;
+ }
+
+ thp7312->fwl = fwl;
+ return 0;
+
+error:
+ __thp7312_power_off(thp7312);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove
+ */
+
+static int thp7312_get_regulators(struct thp7312_device *thp7312)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(thp7312->supplies); i++)
+ thp7312->supplies[i].supply = thp7312_supply_name[i];
+
+ return devm_regulator_bulk_get(thp7312->dev,
+ ARRAY_SIZE(thp7312->supplies),
+ thp7312->supplies);
+}
+
+static int thp7312_sensor_parse_dt(struct thp7312_device *thp7312,
+ struct fwnode_handle *node)
+{
+ struct device *dev = thp7312->dev;
+ struct thp7312_sensor *sensor;
+ const char *model;
+ u8 data_lanes[4];
+ u32 values[4];
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* Retrieve the sensor index from the reg property. */
+ ret = fwnode_property_read_u32(node, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "'reg' property missing in sensor node\n");
+ return -EINVAL;
+ }
+
+ if (reg >= ARRAY_SIZE(thp7312->sensors)) {
+ dev_err(dev, "Out-of-bounds 'reg' value %u\n", reg);
+ return -EINVAL;
+ }
+
+ sensor = &thp7312->sensors[reg];
+ if (sensor->info) {
+ dev_err(dev, "Duplicate entry for sensor %u\n", reg);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_string(node, "thine,model", &model);
+ if (ret < 0) {
+ dev_err(dev, "'thine,model' property missing in sensor node\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(thp7312_sensor_info); i++) {
+ const struct thp7312_sensor_info *info =
+ &thp7312_sensor_info[i];
+
+ if (!strcmp(info->model, model)) {
+ sensor->info = info;
+ break;
+ }
+ }
+
+ if (!sensor->info) {
+ dev_err(dev, "Unsupported sensor model %s\n", model);
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32_array(node, "data-lanes", values,
+ ARRAY_SIZE(values));
+ if (ret < 0) {
+ dev_err(dev, "Failed to read property data-lanes: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data_lanes); ++i)
+ data_lanes[i] = values[i];
+
+ ret = thp7312_map_data_lanes(&sensor->lane_remap, data_lanes,
+ ARRAY_SIZE(data_lanes));
+ if (ret) {
+ dev_err(dev, "Invalid sensor@%u data-lanes value\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int thp7312_parse_dt(struct thp7312_device *thp7312)
+{
+ struct v4l2_fwnode_endpoint ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct device *dev = thp7312->dev;
+ struct fwnode_handle *endpoint;
+ struct fwnode_handle *sensors;
+ unsigned int num_sensors = 0;
+ struct fwnode_handle *node;
+ int ret;
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint)
+ return dev_err_probe(dev, -EINVAL, "Endpoint node not found\n");
+
+ ret = v4l2_fwnode_endpoint_parse(endpoint, &ep);
+ fwnode_handle_put(endpoint);
+ if (ret)
+ return dev_err_probe(dev, ret, "Could not parse endpoint\n");
+
+ ret = thp7312_map_data_lanes(&thp7312->lane_remap,
+ ep.bus.mipi_csi2.data_lanes,
+ ep.bus.mipi_csi2.num_data_lanes);
+ if (ret) {
+ dev_err(dev, "Invalid data-lanes value\n");
+ return ret;
+ }
+
+ /*
+ * The thine,boot-mode property is optional and default to
+ * THP7312_BOOT_MODE_SPI_MASTER (1).
+ */
+ thp7312->boot_mode = THP7312_BOOT_MODE_SPI_MASTER;
+ ret = device_property_read_u32(dev, "thine,boot-mode",
+ &thp7312->boot_mode);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "Property '%s' is invalid\n",
+ "thine,boot-mode");
+
+ if (thp7312->boot_mode != THP7312_BOOT_MODE_2WIRE_SLAVE &&
+ thp7312->boot_mode != THP7312_BOOT_MODE_SPI_MASTER)
+ return dev_err_probe(dev, -EINVAL, "Invalid '%s' value %u\n",
+ "thine,boot-mode", thp7312->boot_mode);
+
+ /* Sensors */
+ sensors = device_get_named_child_node(dev, "sensors");
+ if (!sensors) {
+ dev_err(dev, "'sensors' child node not found\n");
+ return -EINVAL;
+ }
+
+ fwnode_for_each_available_child_node(sensors, node) {
+ if (fwnode_name_eq(node, "sensor")) {
+ if (!thp7312_sensor_parse_dt(thp7312, node))
+ num_sensors++;
+ }
+ }
+
+ fwnode_handle_put(sensors);
+
+ if (!num_sensors) {
+ dev_err(dev, "No sensor found\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int thp7312_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct thp7312_device *thp7312;
+ int ret;
+
+ thp7312 = devm_kzalloc(dev, sizeof(*thp7312), GFP_KERNEL);
+ if (!thp7312)
+ return -ENOMEM;
+
+ thp7312->dev = dev;
+
+ thp7312->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(thp7312->regmap))
+ return dev_err_probe(dev, PTR_ERR(thp7312->regmap),
+ "Unable to initialize I2C\n");
+
+ ret = thp7312_parse_dt(thp7312);
+ if (ret < 0)
+ return ret;
+
+ ret = thp7312_get_regulators(thp7312);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ thp7312->iclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(thp7312->iclk))
+ return dev_err_probe(dev, PTR_ERR(thp7312->iclk),
+ "Failed to get iclk\n");
+
+ thp7312->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(thp7312->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(thp7312->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ if (thp7312->boot_mode == THP7312_BOOT_MODE_2WIRE_SLAVE)
+ return thp7312_register_flash_mode(thp7312);
+
+ v4l2_i2c_subdev_init(&thp7312->sd, client, &thp7312_subdev_ops);
+ thp7312->sd.internal_ops = &thp7312_internal_ops;
+ thp7312->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ thp7312->pad.flags = MEDIA_PAD_FL_SOURCE;
+ thp7312->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&thp7312->sd.entity, 1, &thp7312->pad);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable power management. The driver supports runtime PM, but needs to
+ * work when runtime PM is disabled in the kernel. To that end, power
+ * the device manually here.
+ */
+ ret = thp7312_power_on(thp7312);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = thp7312_read_firmware_version(thp7312);
+ if (ret < 0) {
+ dev_err(dev, "Camera is not found\n");
+ goto err_power_off;
+ }
+
+ ret = thp7312_init_controls(thp7312);
+ if (ret) {
+ dev_err(dev, "Failed to initialize controls\n");
+ goto err_power_off;
+ }
+
+ thp7312->sd.ctrl_handler = &thp7312->ctrl_handler;
+ thp7312->sd.state_lock = thp7312->ctrl_handler.lock;
+
+ ret = v4l2_subdev_init_finalize(&thp7312->sd);
+ if (ret < 0) {
+ dev_err(dev, "Subdev active state initialization failed\n");
+ goto err_free_ctrls;
+ }
+
+ /*
+ * Enable runtime PM with autosuspend. As the device has been powered
+ * manually, mark it as active, and increase the usage count without
+ * resuming the device.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = v4l2_async_register_subdev(&thp7312->sd);
+ if (ret < 0) {
+ dev_err(dev, "Subdev registration failed\n");
+ goto err_pm;
+ }
+
+ /*
+ * Decrease the PM usage count. The device will get suspended after the
+ * autosuspend delay, turning the power off.
+ */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_info(dev, "THP7312 firmware version %02u.%02u\n",
+ THP7312_FW_VERSION_MAJOR(thp7312->fw_version),
+ THP7312_FW_VERSION_MINOR(thp7312->fw_version));
+
+ return 0;
+
+err_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ v4l2_subdev_cleanup(&thp7312->sd);
+err_free_ctrls:
+ v4l2_ctrl_handler_free(&thp7312->ctrl_handler);
+err_power_off:
+ thp7312_power_off(thp7312);
+err_entity_cleanup:
+ media_entity_cleanup(&thp7312->sd.entity);
+ return ret;
+}
+
+static void thp7312_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct thp7312_device *thp7312 = to_thp7312_dev(sd);
+
+ if (thp7312->boot_mode == THP7312_BOOT_MODE_2WIRE_SLAVE) {
+ firmware_upload_unregister(thp7312->fwl);
+ __thp7312_power_off(thp7312);
+ return;
+ }
+
+ v4l2_async_unregister_subdev(&thp7312->sd);
+ v4l2_subdev_cleanup(&thp7312->sd);
+ media_entity_cleanup(&thp7312->sd.entity);
+ v4l2_ctrl_handler_free(&thp7312->ctrl_handler);
+
+ /*
+ * Disable runtime PM. In case runtime PM is disabled in the kernel,
+ * make sure to turn power off manually.
+ */
+ pm_runtime_disable(thp7312->dev);
+ if (!pm_runtime_status_suspended(thp7312->dev))
+ thp7312_power_off(thp7312);
+ pm_runtime_set_suspended(thp7312->dev);
+}
+
+static const struct of_device_id thp7312_dt_ids[] = {
+ { .compatible = "thine,thp7312" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, thp7312_dt_ids);
+
+static struct i2c_driver thp7312_i2c_driver = {
+ .driver = {
+ .name = "thp7312",
+ .pm = &thp7312_pm_ops,
+ .of_match_table = thp7312_dt_ids,
+ },
+ .probe = thp7312_probe,
+ .remove = thp7312_remove,
+};
+
+module_i2c_driver(thp7312_i2c_driver);
+
+MODULE_DESCRIPTION("THP7312 MIPI Camera Subdev Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index c37f605cb..5a561e5bf 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -738,20 +738,20 @@ static int tvp514x_s_ctrl(struct v4l2_ctrl *ctrl)
return err;
}
-/**
- * tvp514x_g_frame_interval() - V4L2 decoder interface handler
- * @sd: pointer to standard V4L2 sub-device structure
- * @ival: pointer to a v4l2_subdev_frame_interval structure
- *
- * Returns the decoder's video CAPTURE parameters.
- */
static int
-tvp514x_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+tvp514x_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
enum tvp514x_std current_std;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
/* get the current standard */
current_std = decoder->current_std;
@@ -762,22 +762,21 @@ tvp514x_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/**
- * tvp514x_s_frame_interval() - V4L2 decoder interface handler
- * @sd: pointer to standard V4L2 sub-device structure
- * @ival: pointer to a v4l2_subdev_frame_interval structure
- *
- * Configures the decoder to use the input parameters, if possible. If
- * not possible, returns the appropriate error code.
- */
static int
-tvp514x_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *ival)
+tvp514x_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *ival)
{
struct tvp514x_decoder *decoder = to_decoder(sd);
struct v4l2_fract *timeperframe;
enum tvp514x_std current_std;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
timeperframe = &ival->interval;
@@ -940,8 +939,6 @@ static const struct v4l2_subdev_video_ops tvp514x_video_ops = {
.s_std = tvp514x_s_std,
.s_routing = tvp514x_s_routing,
.querystd = tvp514x_querystd,
- .g_frame_interval = tvp514x_g_frame_interval,
- .s_frame_interval = tvp514x_s_frame_interval,
.s_stream = tvp514x_s_stream,
};
@@ -949,6 +946,8 @@ static const struct v4l2_subdev_pad_ops tvp514x_pad_ops = {
.enum_mbus_code = tvp514x_enum_mbus_code,
.get_fmt = tvp514x_get_pad_format,
.set_fmt = tvp514x_set_pad_format,
+ .get_frame_interval = tvp514x_get_frame_interval,
+ .set_frame_interval = tvp514x_set_frame_interval,
};
static const struct v4l2_subdev_ops tvp514x_ops = {
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index e543b3f7a..9fc586cfd 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1035,7 +1035,7 @@ tvp5150_get_pad_crop(struct tvp5150 *decoder,
return &decoder->rect;
case V4L2_SUBDEV_FORMAT_TRY:
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- return v4l2_subdev_get_try_crop(&decoder->sd, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
#else
return ERR_PTR(-EINVAL);
#endif
@@ -1209,8 +1209,8 @@ static int tvp5150_get_mbus_config(struct v4l2_subdev *sd,
/****************************************************************************
V4L2 subdev pad ops
****************************************************************************/
-static int tvp5150_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int tvp5150_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct tvp5150 *decoder = to_tvp5150(sd);
v4l2_std_id std;
@@ -1722,7 +1722,6 @@ static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
};
static const struct v4l2_subdev_pad_ops tvp5150_pad_ops = {
- .init_cfg = tvp5150_init_cfg,
.enum_mbus_code = tvp5150_enum_mbus_code,
.enum_frame_size = tvp5150_enum_frame_size,
.set_fmt = tvp5150_fill_fmt,
@@ -1741,6 +1740,7 @@ static const struct v4l2_subdev_ops tvp5150_ops = {
};
static const struct v4l2_subdev_internal_ops tvp5150_internal_ops = {
+ .init_state = tvp5150_init_state,
.registered = tvp5150_registered,
.open = tvp5150_open,
.close = tvp5150_close,
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index a2d7bc799..30831b4b5 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -791,7 +791,7 @@ static const struct v4l2_ctrl_ops tvp7002_ctrl_ops = {
/*
* tvp7002_enum_mbus_code() - Enum supported digital video format on pad
* @sd: pointer to standard V4L2 sub-device structure
- * @cfg: pad configuration
+ * @sd_state: V4L2 subdev state
* @code: pointer to subdev enum mbus code struct
*
* Enumerate supported digital video formats for pad.
@@ -813,7 +813,7 @@ tvp7002_enum_mbus_code(struct v4l2_subdev *sd,
/*
* tvp7002_get_pad_format() - get video format on pad
* @sd: pointer to standard V4L2 sub-device structure
- * @cfg: pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to subdev format struct
*
* get video format for pad.
@@ -837,7 +837,7 @@ tvp7002_get_pad_format(struct v4l2_subdev *sd,
/*
* tvp7002_set_pad_format() - set video format on pad
* @sd: pointer to standard V4L2 sub-device structure
- * @cfg: pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to subdev format struct
*
* set video format for pad.
diff --git a/drivers/media/i2c/tw9900.c b/drivers/media/i2c/tw9900.c
new file mode 100644
index 000000000..bc7623ec4
--- /dev/null
+++ b/drivers/media/i2c/tw9900.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Techwell TW9900 multi-standard video decoder.
+ *
+ * Copyright (C) 2018 Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (C) 2020 Maxime Chevallier <maxime.chevallier@bootlin.com>
+ * Copyright (C) 2023 Mehdi Djait <mehdi.djait@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#define TW9900_REG_CHIP_ID 0x00
+#define TW9900_REG_CHIP_STATUS 0x01
+#define TW9900_REG_CHIP_STATUS_VDLOSS BIT(7)
+#define TW9900_REG_CHIP_STATUS_HLOCK BIT(6)
+#define TW9900_REG_OUT_FMT_CTL 0x03
+#define TW9900_REG_OUT_FMT_CTL_STANDBY 0xA7
+#define TW9900_REG_OUT_FMT_CTL_STREAMING 0xA0
+#define TW9900_REG_CKHY_HSDLY 0x04
+#define TW9900_REG_OUT_CTRL_I 0x05
+#define TW9900_REG_ANALOG_CTL 0x06
+#define TW9900_REG_CROP_HI 0x07
+#define TW9900_REG_VDELAY_LO 0x08
+#define TW9900_REG_VACTIVE_LO 0x09
+#define TW9900_REG_HACTIVE_LO 0x0B
+#define TW9900_REG_CNTRL1 0x0C
+#define TW9900_REG_BRIGHT_CTL 0x10
+#define TW9900_REG_CONTRAST_CTL 0x11
+#define TW9900_REG_VBI_CNTL 0x19
+#define TW9900_REG_ANAL_CTL_II 0x1A
+#define TW9900_REG_OUT_CTRL_II 0x1B
+#define TW9900_REG_STD 0x1C
+#define TW9900_REG_STD_AUTO_PROGRESS BIT(7)
+#define TW9900_STDNOW_MASK GENMASK(6, 4)
+#define TW9900_REG_STDR 0x1D
+#define TW9900_REG_MISSCNT 0x26
+#define TW9900_REG_MISC_CTL_II 0x2F
+#define TW9900_REG_VVBI 0x55
+
+#define TW9900_CHIP_ID 0x00
+#define TW9900_STD_NTSC_M 0
+#define TW9900_STD_PAL_BDGHI 1
+#define TW9900_STD_AUTO 7
+
+#define TW9900_VIDEO_POLL_TRIES 20
+
+struct regval {
+ u8 addr;
+ u8 val;
+};
+
+struct tw9900_mode {
+ u32 width;
+ u32 height;
+ u32 std;
+ const struct regval *reg_list;
+ int n_regs;
+};
+
+struct tw9900 {
+ struct i2c_client *client;
+ struct gpio_desc *reset_gpio;
+ struct regulator *regulator;
+
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler hdl;
+ struct media_pad pad;
+
+ /* Serialize access to hardware and global state. */
+ struct mutex mutex;
+
+ bool streaming;
+ const struct tw9900_mode *cur_mode;
+};
+
+#define to_tw9900(sd) container_of(sd, struct tw9900, subdev)
+
+static const struct regval tw9900_init_regs[] = {
+ { TW9900_REG_MISC_CTL_II, 0xE6 },
+ { TW9900_REG_MISSCNT, 0x24 },
+ { TW9900_REG_OUT_FMT_CTL, 0xA7 },
+ { TW9900_REG_ANAL_CTL_II, 0x0A },
+ { TW9900_REG_VDELAY_LO, 0x19 },
+ { TW9900_REG_STD, 0x00 },
+ { TW9900_REG_VACTIVE_LO, 0xF0 },
+ { TW9900_REG_STD, 0x07 },
+ { TW9900_REG_CKHY_HSDLY, 0x00 },
+ { TW9900_REG_ANALOG_CTL, 0x80 },
+ { TW9900_REG_CNTRL1, 0xDC },
+ { TW9900_REG_OUT_CTRL_I, 0x98 },
+};
+
+static const struct regval tw9900_pal_regs[] = {
+ { TW9900_REG_STD, 0x01 },
+};
+
+static const struct regval tw9900_ntsc_regs[] = {
+ { TW9900_REG_OUT_FMT_CTL, 0xA4 },
+ { TW9900_REG_VDELAY_LO, 0x12 },
+ { TW9900_REG_VACTIVE_LO, 0xF0 },
+ { TW9900_REG_CROP_HI, 0x02 },
+ { TW9900_REG_HACTIVE_LO, 0xD0 },
+ { TW9900_REG_VBI_CNTL, 0x01 },
+ { TW9900_REG_STD, 0x00 },
+};
+
+static const struct tw9900_mode supported_modes[] = {
+ {
+ .width = 720,
+ .height = 480,
+ .std = V4L2_STD_NTSC,
+ .reg_list = tw9900_ntsc_regs,
+ .n_regs = ARRAY_SIZE(tw9900_ntsc_regs),
+ },
+ {
+ .width = 720,
+ .height = 576,
+ .std = V4L2_STD_PAL,
+ .reg_list = tw9900_pal_regs,
+ .n_regs = ARRAY_SIZE(tw9900_pal_regs),
+ },
+};
+
+static int tw9900_write_reg(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret < 0)
+ dev_err(&client->dev, "write reg error: %d\n", ret);
+
+ return ret;
+}
+
+static int tw9900_write_array(struct i2c_client *client,
+ const struct regval *regs, int n_regs)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_regs; i++) {
+ ret = tw9900_write_reg(client, regs[i].addr, regs[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tw9900_read_reg(struct i2c_client *client, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ dev_err(&client->dev, "read reg error: %d\n", ret);
+
+ return ret;
+}
+
+static void tw9900_fill_fmt(const struct tw9900_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(V4L2_COLORSPACE_SMPTE170M);
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(V4L2_COLORSPACE_SMPTE170M);
+}
+
+static int tw9900_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tw9900 *tw9900 = to_tw9900(sd);
+ struct v4l2_mbus_framefmt *mbus_fmt = &fmt->format;
+
+ mutex_lock(&tw9900->mutex);
+ tw9900_fill_fmt(tw9900->cur_mode, mbus_fmt);
+ mutex_unlock(&tw9900->mutex);
+
+ return 0;
+}
+
+static int tw9900_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tw9900 *tw9900 = to_tw9900(sd);
+ struct v4l2_mbus_framefmt *mbus_fmt = &fmt->format;
+
+ mutex_lock(&tw9900->mutex);
+
+ if (tw9900->streaming) {
+ mutex_unlock(&tw9900->mutex);
+ return -EBUSY;
+ }
+
+ tw9900_fill_fmt(tw9900->cur_mode, mbus_fmt);
+
+ mutex_unlock(&tw9900->mutex);
+
+ return 0;
+}
+
+static int tw9900_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_UYVY8_2X8;
+
+ return 0;
+}
+
+static int tw9900_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tw9900 *tw9900 = container_of(ctrl->handler, struct tw9900, hdl);
+ int ret;
+
+ if (pm_runtime_suspended(&tw9900->client->dev))
+ return 0;
+
+ /* v4l2_ctrl_lock() locks tw9900->mutex. */
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ret = tw9900_write_reg(tw9900->client, TW9900_REG_BRIGHT_CTL,
+ (u8)ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ ret = tw9900_write_reg(tw9900->client, TW9900_REG_CONTRAST_CTL,
+ (u8)ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int tw9900_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct tw9900 *tw9900 = to_tw9900(sd);
+ struct i2c_client *client = tw9900->client;
+ int ret;
+
+ mutex_lock(&tw9900->mutex);
+
+ if (tw9900->streaming == on) {
+ mutex_unlock(&tw9900->mutex);
+ return 0;
+ }
+
+ mutex_unlock(&tw9900->mutex);
+
+ if (on) {
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tw9900->mutex);
+
+ ret = __v4l2_ctrl_handler_setup(sd->ctrl_handler);
+ if (ret)
+ goto err_unlock;
+
+ ret = tw9900_write_array(tw9900->client,
+ tw9900->cur_mode->reg_list,
+ tw9900->cur_mode->n_regs);
+ if (ret)
+ goto err_unlock;
+
+ ret = tw9900_write_reg(client, TW9900_REG_OUT_FMT_CTL,
+ TW9900_REG_OUT_FMT_CTL_STREAMING);
+ if (ret)
+ goto err_unlock;
+
+ tw9900->streaming = on;
+
+ mutex_unlock(&tw9900->mutex);
+
+ } else {
+ mutex_lock(&tw9900->mutex);
+
+ ret = tw9900_write_reg(client, TW9900_REG_OUT_FMT_CTL,
+ TW9900_REG_OUT_FMT_CTL_STANDBY);
+ if (ret)
+ goto err_unlock;
+
+ tw9900->streaming = on;
+
+ mutex_unlock(&tw9900->mutex);
+
+ pm_runtime_put(&client->dev);
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&tw9900->mutex);
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static int tw9900_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tw9900_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct tw9900 *tw9900 = to_tw9900(sd);
+ const struct tw9900_mode *mode = NULL;
+ int i;
+
+ if (!(std & (V4L2_STD_NTSC | V4L2_STD_PAL)))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(supported_modes); i++)
+ if (supported_modes[i].std & std)
+ mode = &supported_modes[i];
+ if (!mode)
+ return -EINVAL;
+
+ mutex_lock(&tw9900->mutex);
+ tw9900->cur_mode = mode;
+ mutex_unlock(&tw9900->mutex);
+
+ return 0;
+}
+
+static int tw9900_get_stream_std(struct tw9900 *tw9900,
+ v4l2_std_id *std)
+{
+ int cur_std, ret;
+
+ lockdep_assert_held(&tw9900->mutex);
+
+ ret = tw9900_read_reg(tw9900->client, TW9900_REG_STD);
+ if (ret < 0) {
+ *std = V4L2_STD_UNKNOWN;
+ return ret;
+ }
+
+ cur_std = FIELD_GET(TW9900_STDNOW_MASK, ret);
+ switch (cur_std) {
+ case TW9900_STD_NTSC_M:
+ *std = V4L2_STD_NTSC;
+ break;
+ case TW9900_STD_PAL_BDGHI:
+ *std = V4L2_STD_PAL;
+ break;
+ case TW9900_STD_AUTO:
+ *std = V4L2_STD_UNKNOWN;
+ break;
+ default:
+ *std = V4L2_STD_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int tw9900_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct tw9900 *tw9900 = to_tw9900(sd);
+
+ mutex_lock(&tw9900->mutex);
+ *std = tw9900->cur_mode->std;
+ mutex_unlock(&tw9900->mutex);
+
+ return 0;
+}
+
+static int tw9900_start_autodetect(struct tw9900 *tw9900)
+{
+ int ret;
+
+ lockdep_assert_held(&tw9900->mutex);
+
+ ret = tw9900_write_reg(tw9900->client, TW9900_REG_STDR,
+ BIT(TW9900_STD_NTSC_M) |
+ BIT(TW9900_STD_PAL_BDGHI));
+ if (ret)
+ return ret;
+
+ ret = tw9900_write_reg(tw9900->client, TW9900_REG_STD,
+ TW9900_STD_AUTO);
+ if (ret)
+ return ret;
+
+ ret = tw9900_write_reg(tw9900->client, TW9900_REG_STDR,
+ BIT(TW9900_STD_NTSC_M) |
+ BIT(TW9900_STD_PAL_BDGHI) |
+ BIT(TW9900_STD_AUTO));
+ if (ret)
+ return ret;
+
+ /*
+ * Autodetect takes a while to start, and during the starting sequence
+ * the autodetection status is reported as done.
+ */
+ msleep(30);
+
+ return 0;
+}
+
+static int tw9900_detect_done(struct tw9900 *tw9900, bool *done)
+{
+ int ret;
+
+ lockdep_assert_held(&tw9900->mutex);
+
+ ret = tw9900_read_reg(tw9900->client, TW9900_REG_STD);
+ if (ret < 0)
+ return ret;
+
+ *done = !(ret & TW9900_REG_STD_AUTO_PROGRESS);
+
+ return 0;
+}
+
+static int tw9900_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct tw9900 *tw9900 = to_tw9900(sd);
+ bool done = false;
+ int i, ret;
+
+ mutex_lock(&tw9900->mutex);
+
+ if (tw9900->streaming) {
+ mutex_unlock(&tw9900->mutex);
+ return -EBUSY;
+ }
+
+ mutex_unlock(&tw9900->mutex);
+
+ ret = pm_runtime_resume_and_get(&tw9900->client->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tw9900->mutex);
+
+ ret = tw9900_start_autodetect(tw9900);
+ if (ret)
+ goto out_unlock;
+
+ for (i = 0; i < TW9900_VIDEO_POLL_TRIES; i++) {
+ ret = tw9900_detect_done(tw9900, &done);
+ if (ret)
+ goto out_unlock;
+
+ if (done)
+ break;
+
+ msleep(20);
+ }
+
+ if (!done) {
+ ret = -ETIMEDOUT;
+ goto out_unlock;
+ }
+
+ ret = tw9900_get_stream_std(tw9900, std);
+
+out_unlock:
+ mutex_unlock(&tw9900->mutex);
+ pm_runtime_put(&tw9900->client->dev);
+
+ return ret;
+}
+
+static int tw9900_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ *std = V4L2_STD_NTSC | V4L2_STD_PAL;
+
+ return 0;
+}
+
+static int tw9900_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct tw9900 *tw9900 = to_tw9900(sd);
+ int ret;
+
+ mutex_lock(&tw9900->mutex);
+
+ if (tw9900->streaming) {
+ mutex_unlock(&tw9900->mutex);
+ return -EBUSY;
+ }
+
+ mutex_unlock(&tw9900->mutex);
+
+ *status = V4L2_IN_ST_NO_SIGNAL;
+
+ ret = pm_runtime_resume_and_get(&tw9900->client->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tw9900->mutex);
+ ret = tw9900_read_reg(tw9900->client, TW9900_REG_CHIP_STATUS);
+ mutex_unlock(&tw9900->mutex);
+
+ pm_runtime_put(&tw9900->client->dev);
+
+ if (ret < 0)
+ return ret;
+
+ *status = ret & TW9900_REG_CHIP_STATUS_HLOCK ? 0 : V4L2_IN_ST_NO_SIGNAL;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops tw9900_core_ops = {
+ .subscribe_event = tw9900_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops tw9900_video_ops = {
+ .s_std = tw9900_s_std,
+ .g_std = tw9900_g_std,
+ .querystd = tw9900_querystd,
+ .g_tvnorms = tw9900_g_tvnorms,
+ .g_input_status = tw9900_g_input_status,
+ .s_stream = tw9900_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops tw9900_pad_ops = {
+ .enum_mbus_code = tw9900_enum_mbus_code,
+ .get_fmt = tw9900_get_fmt,
+ .set_fmt = tw9900_set_fmt,
+};
+
+static const struct v4l2_subdev_ops tw9900_subdev_ops = {
+ .core = &tw9900_core_ops,
+ .video = &tw9900_video_ops,
+ .pad = &tw9900_pad_ops,
+};
+
+static const struct v4l2_ctrl_ops tw9900_ctrl_ops = {
+ .s_ctrl = tw9900_s_ctrl,
+};
+
+static int tw9900_check_id(struct tw9900 *tw9900,
+ struct i2c_client *client)
+{
+ struct device *dev = &tw9900->client->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&tw9900->client->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tw9900->mutex);
+ ret = tw9900_read_reg(client, TW9900_CHIP_ID);
+ mutex_unlock(&tw9900->mutex);
+
+ pm_runtime_put(&tw9900->client->dev);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret != TW9900_CHIP_ID) {
+ dev_err(dev, "Unexpected decoder id %#x\n", ret);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int tw9900_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw9900 *tw9900 = to_tw9900(sd);
+ int ret;
+
+ mutex_lock(&tw9900->mutex);
+
+ if (tw9900->reset_gpio)
+ gpiod_set_value_cansleep(tw9900->reset_gpio, 1);
+
+ ret = regulator_enable(tw9900->regulator);
+ if (ret < 0) {
+ mutex_unlock(&tw9900->mutex);
+ return ret;
+ }
+
+ usleep_range(50000, 52000);
+
+ if (tw9900->reset_gpio)
+ gpiod_set_value_cansleep(tw9900->reset_gpio, 0);
+
+ usleep_range(1000, 2000);
+
+ ret = tw9900_write_array(tw9900->client, tw9900_init_regs,
+ ARRAY_SIZE(tw9900_init_regs));
+
+ mutex_unlock(&tw9900->mutex);
+
+ /* This sleep is needed for the Horizontal Sync PLL to lock. */
+ msleep(300);
+
+ return ret;
+}
+
+static int tw9900_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw9900 *tw9900 = to_tw9900(sd);
+
+ mutex_lock(&tw9900->mutex);
+
+ if (tw9900->reset_gpio)
+ gpiod_set_value_cansleep(tw9900->reset_gpio, 1);
+
+ regulator_disable(tw9900->regulator);
+
+ mutex_unlock(&tw9900->mutex);
+
+ return 0;
+}
+
+static int tw9900_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct v4l2_ctrl_handler *hdl;
+ struct tw9900 *tw9900;
+ int ret = 0;
+
+ tw9900 = devm_kzalloc(dev, sizeof(*tw9900), GFP_KERNEL);
+ if (!tw9900)
+ return -ENOMEM;
+
+ tw9900->client = client;
+ tw9900->cur_mode = &supported_modes[0];
+
+ tw9900->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(tw9900->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(tw9900->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ tw9900->regulator = devm_regulator_get(&tw9900->client->dev, "vdd");
+ if (IS_ERR(tw9900->regulator))
+ return dev_err_probe(dev, PTR_ERR(tw9900->regulator),
+ "Failed to get power regulator\n");
+
+ v4l2_i2c_subdev_init(&tw9900->subdev, client, &tw9900_subdev_ops);
+ tw9900->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ mutex_init(&tw9900->mutex);
+
+ hdl = &tw9900->hdl;
+
+ ret = v4l2_ctrl_handler_init(hdl, 2);
+ if (ret)
+ goto err_destory_mutex;
+
+ hdl->lock = &tw9900->mutex;
+
+ v4l2_ctrl_new_std(hdl, &tw9900_ctrl_ops, V4L2_CID_BRIGHTNESS,
+ -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &tw9900_ctrl_ops, V4L2_CID_CONTRAST,
+ 0, 255, 1, 0x60);
+
+ tw9900->subdev.ctrl_handler = hdl;
+ if (hdl->error) {
+ ret = hdl->error;
+ goto err_free_handler;
+ }
+
+ tw9900->pad.flags = MEDIA_PAD_FL_SOURCE;
+ tw9900->subdev.entity.function = MEDIA_ENT_F_DV_DECODER;
+
+ ret = media_entity_pads_init(&tw9900->subdev.entity, 1, &tw9900->pad);
+ if (ret < 0)
+ goto err_free_handler;
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ ret = tw9900_check_id(tw9900, client);
+ if (ret)
+ goto err_disable_pm;
+
+ ret = v4l2_async_register_subdev(&tw9900->subdev);
+ if (ret) {
+ dev_err(dev, "v4l2 async register subdev failed\n");
+ goto err_disable_pm;
+ }
+
+ return 0;
+
+err_disable_pm:
+ pm_runtime_disable(dev);
+ media_entity_cleanup(&tw9900->subdev.entity);
+err_free_handler:
+ v4l2_ctrl_handler_free(hdl);
+err_destory_mutex:
+ mutex_destroy(&tw9900->mutex);
+
+ return ret;
+}
+
+static void tw9900_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct tw9900 *tw9900 = to_tw9900(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+
+ mutex_destroy(&tw9900->mutex);
+}
+
+static const struct dev_pm_ops tw9900_pm_ops = {
+ .runtime_suspend = tw9900_runtime_suspend,
+ .runtime_resume = tw9900_runtime_resume,
+};
+
+static const struct i2c_device_id tw9900_id[] = {
+ { "tw9900", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tw9900_id);
+
+static const struct of_device_id tw9900_of_match[] = {
+ { .compatible = "techwell,tw9900" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tw9900_of_match);
+
+static struct i2c_driver tw9900_i2c_driver = {
+ .driver = {
+ .name = "tw9900",
+ .pm = &tw9900_pm_ops,
+ .of_match_table = tw9900_of_match,
+ },
+ .probe = tw9900_probe,
+ .remove = tw9900_remove,
+ .id_table = tw9900_id,
+};
+
+module_i2c_driver(tw9900_i2c_driver);
+
+MODULE_DESCRIPTION("tw9900 decoder driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index 477a64d8f..905af98c7 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -829,8 +829,6 @@ static int tw9910_set_fmt(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return tw9910_s_fmt(sd, mf);
- sd_state->pads->try_fmt = *mf;
-
return 0;
}
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 178bd06cc..56dbe07a1 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -405,9 +405,10 @@ static int queue_setup(struct vb2_queue *vq,
{
struct video_i2c_data *data = vb2_get_drv_priv(vq);
unsigned int size = data->chip->buffer_size;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2;
+ if (q_num_bufs + *nbuffers < 2)
+ *nbuffers = 2 - q_num_bufs;
if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
@@ -794,7 +795,7 @@ static int video_i2c_probe(struct i2c_client *client)
queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue->drv_priv = data;
queue->buf_struct_size = sizeof(struct video_i2c_buffer);
- queue->min_buffers_needed = 1;
+ queue->min_queued_buffers = 1;
queue->ops = &video_i2c_video_qops;
queue->mem_ops = &vb2_vmalloc_memops;
diff --git a/drivers/media/mc/Kconfig b/drivers/media/mc/Kconfig
index 375b09612..c82b07d2e 100644
--- a/drivers/media/mc/Kconfig
+++ b/drivers/media/mc/Kconfig
@@ -11,10 +11,3 @@ config MEDIA_CONTROLLER_DVB
Enable the media controller API support for DVB.
This is currently experimental.
-
-config MEDIA_CONTROLLER_REQUEST_API
- bool
- depends on MEDIA_CONTROLLER
- help
- This option enables the Request API for the Media controller and V4L2
- interfaces. It is currently needed by a few stateless codec drivers.
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c
index 8cee956e3..c0dd4ae57 100644
--- a/drivers/media/mc/mc-device.c
+++ b/drivers/media/mc/mc-device.c
@@ -372,16 +372,12 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
static long media_device_request_alloc(struct media_device *mdev, void *arg)
{
-#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
int *alloc_fd = arg;
if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
return -ENOTTY;
return media_request_alloc(mdev, alloc_fd);
-#else
- return -ENOTTY;
-#endif
}
static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 49a3dd70e..511f013cc 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3113,7 +3113,7 @@ static int vdev_init(struct bttv *btv, struct video_device *vfd,
q->gfp_flags = __GFP_DMA32;
q->buf_struct_size = sizeof(struct bttv_buffer);
q->lock = &btv->lock;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->dev = &btv->c.pci->dev;
err = vb2_queue_init(q);
if (err)
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 26bf58d17..77ba08ace 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -1260,7 +1260,7 @@ static int cobalt_node_register(struct cobalt *cobalt, int node)
q->ops = &cobalt_qops;
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->lock = &s->lock;
q->dev = &cobalt->pci_dev->dev;
vdev->queue = q;
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 597472754..acc6418db 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -104,6 +104,7 @@ static int cx18_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
struct cx18_stream *s = vb2_get_drv_priv(vq);
struct cx18 *cx = s->cx;
unsigned int szimage;
@@ -121,8 +122,8 @@ static int cx18_queue_setup(struct vb2_queue *vq,
* Let's request at least three buffers: two for the
* DMA engine and one for userspace.
*/
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 3)
+ *nbuffers = 3 - q_num_bufs;
if (*nplanes) {
if (*nplanes != 1 || sizes[0] < szimage)
@@ -286,7 +287,7 @@ static int cx18_stream_init(struct cx18 *cx, int type)
s->vidq.ops = &cx18_vb2_qops;
s->vidq.mem_ops = &vb2_vmalloc_memops;
s->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- s->vidq.min_buffers_needed = 2;
+ s->vidq.min_queued_buffers = 2;
s->vidq.gfp_flags = GFP_DMA32;
s->vidq.dev = &cx->pci_dev->dev;
s->vidq.lock = &cx->serialize_lock;
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 434677bd4..fdb96f80c 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1525,7 +1525,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct cx23885_buffer);
q->ops = &cx23885_qops;
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 7551ca4a3..3d01cdc4c 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -2667,7 +2667,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = port;
q->buf_struct_size = sizeof(struct cx23885_buffer);
q->ops = &dvb_qops;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 9af2c5596..42fdcf992 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -1321,7 +1321,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct cx23885_buffer);
q->ops = &cx23885_video_qops;
@@ -1338,7 +1338,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct cx23885_buffer);
q->ops = &cx23885_vbi_qops;
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index 1b80c990c..0bee4b728 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -730,7 +730,7 @@ int cx25821_video_register(struct cx25821_dev *dev)
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->io_modes |= is_output ? VB2_WRITE : VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = chan;
q->buf_struct_size = sizeof(struct cx25821_buffer);
q->ops = &cx25821_video_qops;
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index c1b41a928..d55df8fdb 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1195,7 +1195,7 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct cx88_buffer);
q->ops = &blackbird_qops;
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 2087f2491..b33b3a5e3 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1776,7 +1776,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct cx88_buffer);
q->ops = &dvb_qops;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index c0ef03ed7..cefb6b25e 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1411,7 +1411,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct cx88_buffer);
q->ops = &cx8800_video_qops;
@@ -1428,7 +1428,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
q->gfp_flags = GFP_DMA32;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct cx88_buffer);
q->ops = &cx8800_vbi_qops;
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 548156b19..dff853e73 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -128,8 +128,6 @@ dt3155_queue_setup(struct vb2_queue *vq,
struct dt3155_priv *pd = vb2_get_drv_priv(vq);
unsigned size = pd->width * pd->height;
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
*num_planes = 1;
@@ -519,7 +517,7 @@ static int dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pd->vidq.ops = &q_ops;
pd->vidq.mem_ops = &vb2_dma_contig_memops;
pd->vidq.drv_priv = pd;
- pd->vidq.min_buffers_needed = 2;
+ pd->vidq.min_queued_buffers = 2;
pd->vidq.gfp_flags = GFP_DMA32;
pd->vidq.lock = &pd->mux; /* for locking v4l2_file_operations */
pd->vidq.dev = &pdev->dev;
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index e38198e25..f980e3125 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -53,7 +53,7 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
/* Omnivision ov8856 */
IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
/* Omnivision ov2740 */
- IPU_SENSOR_CONFIG("INT3474", 1, 360000000),
+ IPU_SENSOR_CONFIG("INT3474", 1, 180000000),
/* Hynix hi556 */
IPU_SENSOR_CONFIG("INT3537", 1, 437000000),
/* Omnivision ov13b10 */
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 5dd69a251..ed08bf417 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1205,23 +1205,16 @@ static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
};
/* Initialize try_fmt */
- format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
+ format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SINK);
*format = fmt_default;
/* same as sink */
- format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
+ format = v4l2_subdev_state_get_format(fh->state, CIO2_PAD_SOURCE);
*format = fmt_default;
return 0;
}
-/*
- * cio2_subdev_get_fmt - Handle get format by pads subdev method
- * @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -1231,8 +1224,8 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&q->subdev_lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
- fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
else
fmt->format = q->subdev_fmt;
@@ -1241,13 +1234,6 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
return 0;
}
-/*
- * cio2_subdev_set_fmt - Handle set format by pads subdev method
- * @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -1265,7 +1251,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
return cio2_subdev_get_fmt(sd, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mbus = v4l2_subdev_state_get_format(sd_state, fmt->pad);
else
mbus = &q->subdev_fmt;
@@ -1603,7 +1589,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vbq->mem_ops = &vb2_dma_sg_memops;
vbq->buf_struct_size = sizeof(struct cio2_buffer);
vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vbq->min_buffers_needed = 1;
+ vbq->min_queued_buffers = 1;
vbq->drv_priv = cio2;
vbq->lock = &q->lock;
r = vb2_queue_init(vbq);
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index e111fd6ff..3c74d06a2 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -338,7 +338,7 @@ mei_csi_get_pad_format(struct v4l2_subdev *sd,
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &csi->format_mbus[pad];
default:
@@ -346,8 +346,8 @@ mei_csi_get_pad_format(struct v4l2_subdev *sd,
}
}
-static int mei_csi_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int mei_csi_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mbusformat;
struct mei_csi *csi = sd_to_csi(sd);
@@ -356,7 +356,7 @@ static int mei_csi_init_cfg(struct v4l2_subdev *sd,
mutex_lock(&csi->lock);
for (i = 0; i < sd->entity.num_pads; i++) {
- mbusformat = v4l2_subdev_get_try_format(sd, sd_state, i);
+ mbusformat = v4l2_subdev_state_get_format(sd_state, i);
*mbusformat = mei_csi_format_mbus_default;
}
@@ -554,7 +554,6 @@ static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
};
static const struct v4l2_subdev_pad_ops mei_csi_pad_ops = {
- .init_cfg = mei_csi_init_cfg,
.get_fmt = mei_csi_get_fmt,
.set_fmt = mei_csi_set_fmt,
};
@@ -564,6 +563,10 @@ static const struct v4l2_subdev_ops mei_csi_subdev_ops = {
.pad = &mei_csi_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mei_csi_internal_ops = {
+ .init_state = mei_csi_init_state,
+};
+
static const struct media_entity_operations mei_csi_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -645,47 +648,66 @@ static int mei_csi_parse_firmware(struct mei_csi *csi)
};
struct device *dev = &csi->cldev->dev;
struct v4l2_async_connection *asd;
- struct fwnode_handle *fwnode;
- struct fwnode_handle *ep;
+ struct fwnode_handle *sink_ep, *source_ep;
int ret;
- ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, 0);
- if (!ep) {
- dev_err(dev, "not connected to subdevice\n");
+ sink_ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, 0);
+ if (!sink_ep) {
+ dev_err(dev, "can't obtain sink endpoint\n");
return -EINVAL;
}
- ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ v4l2_async_subdev_nf_init(&csi->notifier, &csi->subdev);
+ csi->notifier.ops = &mei_csi_notify_ops;
+
+ ret = v4l2_fwnode_endpoint_parse(sink_ep, &v4l2_ep);
if (ret) {
- dev_err(dev, "could not parse v4l2 endpoint\n");
- fwnode_handle_put(ep);
- return -EINVAL;
+ dev_err(dev, "could not parse v4l2 sink endpoint\n");
+ goto out_nf_cleanup;
}
- fwnode = fwnode_graph_get_remote_endpoint(ep);
- fwnode_handle_put(ep);
+ csi->nr_of_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
- v4l2_async_subdev_nf_init(&csi->notifier, &csi->subdev);
- csi->notifier.ops = &mei_csi_notify_ops;
+ source_ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 1, 0, 0);
+ if (!source_ep) {
+ ret = -ENOTCONN;
+ dev_err(dev, "can't obtain source endpoint\n");
+ goto out_nf_cleanup;
+ }
- asd = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
- struct v4l2_async_connection);
- if (IS_ERR(asd)) {
- fwnode_handle_put(fwnode);
- return PTR_ERR(asd);
+ ret = v4l2_fwnode_endpoint_parse(source_ep, &v4l2_ep);
+ fwnode_handle_put(source_ep);
+ if (ret) {
+ dev_err(dev, "could not parse v4l2 source endpoint\n");
+ goto out_nf_cleanup;
}
- ret = v4l2_fwnode_endpoint_alloc_parse(fwnode, &v4l2_ep);
- fwnode_handle_put(fwnode);
- if (ret)
- return ret;
- csi->nr_of_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ if (csi->nr_of_lanes != v4l2_ep.bus.mipi_csi2.num_data_lanes) {
+ ret = -EINVAL;
+ dev_err(dev,
+ "the number of lanes does not match (%u vs. %u)\n",
+ csi->nr_of_lanes, v4l2_ep.bus.mipi_csi2.num_data_lanes);
+ goto out_nf_cleanup;
+ }
+
+ asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, sink_ep,
+ struct v4l2_async_connection);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto out_nf_cleanup;
+ }
ret = v4l2_async_nf_register(&csi->notifier);
if (ret)
- v4l2_async_nf_cleanup(&csi->notifier);
+ goto out_nf_cleanup;
+
+ fwnode_handle_put(sink_ep);
- v4l2_fwnode_endpoint_free(&v4l2_ep);
+ return 0;
+
+out_nf_cleanup:
+ v4l2_async_nf_cleanup(&csi->notifier);
+ fwnode_handle_put(sink_ep);
return ret;
}
@@ -728,6 +750,7 @@ static int mei_csi_probe(struct mei_cl_device *cldev,
csi->subdev.dev = &cldev->dev;
v4l2_subdev_init(&csi->subdev, &mei_csi_subdev_ops);
+ csi->subdev.internal_ops = &mei_csi_internal_ops;
v4l2_set_subdevdata(&csi->subdev, csi);
csi->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 9be52101b..2498f9079 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -48,9 +48,7 @@ config VIDEO_IVTV_ALSA
config VIDEO_FB_IVTV
tristate "Conexant cx23415 framebuffer support"
depends on VIDEO_IVTV && FB
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_HELPERS
help
This is a framebuffer driver for the Conexant cx23415 MPEG
encoder/decoder.
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index ce3a7ca51..a6ffa99e1 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -619,6 +619,7 @@ struct ivtv {
u32 hw_flags; /* hardware description of the board */
v4l2_std_id tuner_std; /* the norm of the card's tuner (fixed) */
struct v4l2_subdev *sd_video; /* controlling video decoder subdev */
+ bool sd_video_is_streaming; /* is video already streaming? */
struct v4l2_subdev *sd_audio; /* controlling audio subdev */
struct v4l2_subdev *sd_muxer; /* controlling audio muxer subdev */
resource_size_t base_addr; /* PCI resource base address */
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index 13d7d55e6..af9e6235c 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -623,10 +623,12 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
/* Avoid tinny audio problem - ensure audio clocks are going */
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
/* Avoid unpredictable PCI bus hang - disable video clocks */
- v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
+ if (itv->sd_video_is_streaming)
+ v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
ivtv_msleep_timeout(300, 0);
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
+ itv->sd_video_is_streaming = true;
}
/* begin_capture */
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 23c8c094e..410477e3e 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -927,17 +927,17 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
static const struct fb_ops ivtvfb_ops = {
.owner = THIS_MODULE,
+ .fb_read = fb_io_read,
.fb_write = ivtvfb_write,
.fb_check_var = ivtvfb_check_var,
.fb_set_par = ivtvfb_set_par,
.fb_setcolreg = ivtvfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ __FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_cursor = NULL,
.fb_ioctl = ivtvfb_ioctl,
.fb_pan_display = ivtvfb_pan_display,
.fb_blank = ivtvfb_blank,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
/* Restore hardware after firmware restart */
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index d72b07b87..2cd78c539 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -849,7 +849,7 @@ struct mgb4_vin_dev *mgb4_vin_create(struct mgb4_dev *mgbdev, int id)
vindev->queue.mem_ops = &vb2_dma_sg_memops;
vindev->queue.gfp_flags = GFP_DMA32;
vindev->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vindev->queue.min_buffers_needed = 2;
+ vindev->queue.min_queued_buffers = 2;
vindev->queue.drv_priv = vindev;
vindev->queue.lock = &vindev->lock;
vindev->queue.dev = dev;
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
index 857fc7bbd..241353ee7 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.c
+++ b/drivers/media/pci/mgb4/mgb4_vout.c
@@ -523,7 +523,7 @@ struct mgb4_vout_dev *mgb4_vout_create(struct mgb4_dev *mgbdev, int id)
voutdev->queue.mem_ops = &vb2_dma_sg_memops;
voutdev->queue.gfp_flags = GFP_DMA32;
voutdev->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- voutdev->queue.min_buffers_needed = 2;
+ voutdev->queue.min_queued_buffers = 2;
voutdev->queue.drv_priv = voutdev;
voutdev->queue.lock = &voutdev->lock;
voutdev->queue.dev = dev;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index d85bfbb77..557985ba2 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -293,12 +293,13 @@ static int netup_unidvb_queue_setup(struct vb2_queue *vq,
struct device *alloc_devs[])
{
struct netup_dma *dma = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
*nplanes = 1;
- if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
- *nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < VIDEO_MAX_FRAME)
+ *nbuffers = VIDEO_MAX_FRAME - q_num_bufs;
sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
__func__, *nbuffers, sizes[0]);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
index bd38ce444..46676f2c8 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -289,7 +289,7 @@ static const struct i2c_algorithm netup_i2c_algorithm = {
static const struct i2c_adapter netup_i2c_adapter = {
.owner = THIS_MODULE,
.name = NETUP_UNIDVB_NAME,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .class = I2C_CLASS_HWMON,
.algo = &netup_i2c_algorithm,
};
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index e4cf9d63e..364ce9e57 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -757,7 +757,7 @@ static const struct video_device video_dev_template = {
/**
* vip_irq - interrupt routine
* @irq: Number of interrupt ( not used, correct number is assumed )
- * @vip: local data structure containing all information
+ * @data: local data structure containing all information
*
* check for both frame interrupts set ( top and bottom ).
* check FIFO overflow, but limit number of log messages after open.
@@ -767,8 +767,9 @@ static const struct video_device video_dev_template = {
*
* IRQ_HANDLED, interrupt done.
*/
-static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
+static irqreturn_t vip_irq(int irq, void *data)
{
+ struct sta2x11_vip *vip = data;
unsigned int status;
status = reg_read(vip, DVP_ITS);
@@ -1053,9 +1054,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
spin_lock_init(&vip->slock);
- ret = request_irq(pdev->irq,
- (irq_handler_t) vip_irq,
- IRQF_SHARED, KBUILD_MODNAME, vip);
+ ret = request_irq(pdev->irq, vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
ret = -ENODEV;
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 197ed8978..8b1aae4b6 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -1114,7 +1114,7 @@ static int tw5864_video_input_init(struct tw5864_input *input, int video_nr)
input->vidq.gfp_flags = 0;
input->vidq.buf_struct_size = sizeof(struct tw5864_buf);
input->vidq.lock = &input->lock;
- input->vidq.min_buffers_needed = 2;
+ input->vidq.min_queued_buffers = 2;
input->vidq.dev = &input->root->pci->dev;
ret = vb2_queue_init(&input->vidq);
if (ret)
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 773a18702..cdf5d733b 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -360,13 +360,14 @@ static int tw68_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct tw68_dev *dev = vb2_get_drv_priv(q);
- unsigned tot_bufs = q->num_buffers + *num_buffers;
+ unsigned int q_num_bufs = vb2_get_num_buffers(q);
+ unsigned int tot_bufs = q_num_bufs + *num_buffers;
unsigned size = (dev->fmt->depth * dev->width * dev->height) >> 3;
if (tot_bufs < 2)
tot_bufs = 2;
tot_bufs = tw68_buffer_count(size, tot_bufs);
- *num_buffers = tot_bufs - q->num_buffers;
+ *num_buffers = tot_bufs - q_num_bufs;
/*
* We allow create_bufs, but only if the sizeimage is >= as the
* current sizeimage. The tw68_buffer_count calculation becomes quite
@@ -951,7 +952,7 @@ int tw68_video_init2(struct tw68_dev *dev, int video_nr)
dev->vidq.gfp_flags = __GFP_DMA32 | __GFP_KSWAPD_RECLAIM;
dev->vidq.buf_struct_size = sizeof(struct tw68_buf);
dev->vidq.lock = &dev->lock;
- dev->vidq.min_buffers_needed = 2;
+ dev->vidq.min_queued_buffers = 2;
dev->vidq.dev = &dev->pci->dev;
ret = vb2_queue_init(&dev->vidq);
if (ret)
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 3ebf7a2c9..63be95fce 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -423,6 +423,7 @@ static int tw686x_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
unsigned int szimage =
(vc->width * vc->height * vc->format->depth) >> 3;
@@ -430,8 +431,8 @@ static int tw686x_queue_setup(struct vb2_queue *vq,
* Let's request at least three buffers: two for the
* DMA engine and one for userspace.
*/
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 3)
+ *nbuffers = 3 - q_num_bufs;
if (*nplanes) {
if (*nplanes != 1 || sizes[0] < szimage)
@@ -1221,7 +1222,7 @@ int tw686x_video_init(struct tw686x_dev *dev)
vc->vidq.ops = &tw686x_video_qops;
vc->vidq.mem_ops = dev->dma_ops->mem_ops;
vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vc->vidq.min_buffers_needed = 2;
+ vc->vidq.min_queued_buffers = 2;
vc->vidq.lock = &vc->vb_mutex;
vc->vidq.gfp_flags = dev->dma_mode != TW686X_DMA_MODE_MEMCPY ?
GFP_DMA32 : 0;
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index fa672cc8b..5c05e64c7 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -749,8 +749,8 @@ static int zr_vb2_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsi
zr->buf_in_reserve = 0;
- if (*nbuffers < vq->min_buffers_needed)
- *nbuffers = vq->min_buffers_needed;
+ if (*nbuffers < vq->min_queued_buffers)
+ *nbuffers = vq->min_queued_buffers;
if (*nplanes) {
if (sizes[0] < size)
@@ -971,7 +971,7 @@ int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir)
vq->mem_ops = &vb2_dma_contig_memops;
vq->gfp_flags = GFP_DMA32;
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vq->min_buffers_needed = 9;
+ vq->min_queued_buffers = 9;
vq->lock = &zr->lock;
err = vb2_queue_init(vq);
if (err)
diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
index 982c2c777..940e5bda5 100644
--- a/drivers/media/platform/amphion/vpu_dbg.c
+++ b/drivers/media/platform/amphion/vpu_dbg.c
@@ -87,7 +87,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
num = scnprintf(str, sizeof(str),
"output (%2d, %2d): fmt = %c%c%c%c %d x %d, %d;",
vb2_is_streaming(vq),
- vq->num_buffers,
+ vb2_get_num_buffers(vq),
inst->out_format.pixfmt,
inst->out_format.pixfmt >> 8,
inst->out_format.pixfmt >> 16,
@@ -111,7 +111,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
num = scnprintf(str, sizeof(str),
"capture(%2d, %2d): fmt = %c%c%c%c %d x %d, %d;",
vb2_is_streaming(vq),
- vq->num_buffers,
+ vb2_get_num_buffers(vq),
inst->cap_format.pixfmt,
inst->cap_format.pixfmt >> 8,
inst->cap_format.pixfmt >> 16,
@@ -139,12 +139,19 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
return 0;
vq = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
- for (i = 0; i < vq->num_buffers; i++) {
- struct vb2_buffer *vb = vq->bufs[i];
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ for (i = 0; i < vb2_get_num_buffers(vq); i++) {
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
+
+ vb = vb2_get_buffer(vq, i);
+ if (!vb)
+ continue;
if (vb->state == VB2_BUF_STATE_DEQUEUED)
continue;
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+
num = scnprintf(str, sizeof(str),
"output [%2d] state = %10s, %8s\n",
i, vb2_stat_name[vb->state],
@@ -154,12 +161,19 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
}
vq = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
- for (i = 0; i < vq->num_buffers; i++) {
- struct vb2_buffer *vb = vq->bufs[i];
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ for (i = 0; i < vb2_get_num_buffers(vq); i++) {
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
+
+ vb = vb2_get_buffer(vq, i);
+ if (!vb)
+ continue;
if (vb->state == VB2_BUF_STATE_DEQUEUED)
continue;
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+
num = scnprintf(str, sizeof(str),
"capture[%2d] state = %10s, %8s\n",
i, vb2_stat_name[vb->state],
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index d7e0de49b..c88738e8f 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -439,7 +439,7 @@ int vpu_get_num_buffers(struct vpu_inst *inst, u32 type)
else
q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
- return q->num_buffers;
+ return vb2_get_num_buffers(q);
}
static void vpu_m2m_device_run(void *priv)
@@ -587,7 +587,7 @@ static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
fmt->sizeimage[0], fmt->bytesperline[0],
fmt->sizeimage[1], fmt->bytesperline[1],
fmt->sizeimage[2], fmt->bytesperline[2],
- q->num_buffers);
+ vb2_get_num_buffers(q));
vb2_clear_last_buffer_dequeued(q);
ret = call_vop(inst, start, q->type);
if (ret)
@@ -649,7 +649,7 @@ static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_q
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->drv_priv = inst;
src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
- src_vq->min_buffers_needed = 1;
+ src_vq->min_queued_buffers = 1;
src_vq->dev = inst->vpu->dev;
src_vq->lock = &inst->lock;
ret = vb2_queue_init(src_vq);
@@ -666,7 +666,7 @@ static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_q
dst_vq->mem_ops = &vb2_vmalloc_memops;
dst_vq->drv_priv = inst;
dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer);
- dst_vq->min_buffers_needed = 1;
+ dst_vq->min_queued_buffers = 1;
dst_vq->dev = inst->vpu->dev;
dst_vq->lock = &inst->lock;
ret = vb2_queue_init(dst_vq);
diff --git a/drivers/media/platform/aspeed/aspeed-video.c b/drivers/media/platform/aspeed/aspeed-video.c
index d08aa7f73..fc6050e3b 100644
--- a/drivers/media/platform/aspeed/aspeed-video.c
+++ b/drivers/media/platform/aspeed/aspeed-video.c
@@ -2034,7 +2034,7 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
vbq->drv_priv = video;
vbq->buf_struct_size = sizeof(struct aspeed_video_buffer);
vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vbq->min_buffers_needed = ASPEED_VIDEO_V4L2_MIN_BUF_REQ;
+ vbq->min_queued_buffers = ASPEED_VIDEO_V4L2_MIN_BUF_REQ;
rc = vb2_queue_init(vbq);
if (rc) {
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 4046212d4..f8450a8cc 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -559,11 +559,13 @@ static const struct isi_format *find_format_by_fourcc(struct atmel_isi *isi,
static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt,
struct v4l2_subdev_state *sd_state)
{
- int ret;
+ struct v4l2_rect *try_crop =
+ v4l2_subdev_state_get_crop(sd_state, 0);
struct v4l2_subdev_frame_size_enum fse = {
.code = isi_fmt->mbus_code,
.which = V4L2_SUBDEV_FORMAT_TRY,
};
+ int ret;
ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size,
sd_state, &fse);
@@ -572,11 +574,11 @@ static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt,
* just use the maximum ISI can receive.
*/
if (ret) {
- sd_state->pads->try_crop.width = MAX_SUPPORT_WIDTH;
- sd_state->pads->try_crop.height = MAX_SUPPORT_HEIGHT;
+ try_crop->width = MAX_SUPPORT_WIDTH;
+ try_crop->height = MAX_SUPPORT_HEIGHT;
} else {
- sd_state->pads->try_crop.width = fse.max_width;
- sd_state->pads->try_crop.height = fse.max_height;
+ try_crop->width = fse.max_width;
+ try_crop->height = fse.max_height;
}
}
@@ -587,6 +589,7 @@ static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg = {};
struct v4l2_subdev_state pad_state = {
+ .sd = isi->entity.subdev,
.pads = &pad_cfg,
};
struct v4l2_subdev_format format = {
@@ -1242,7 +1245,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
q->ops = &isi_video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->dev = &pdev->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index c2a979397..0ea5fa956 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -406,20 +406,20 @@ static int csi2rx_set_fmt(struct v4l2_subdev *subdev,
format->format.field = V4L2_FIELD_NONE;
/* Set sink format */
- fmt = v4l2_subdev_get_pad_format(subdev, state, format->pad);
+ fmt = v4l2_subdev_state_get_format(state, format->pad);
*fmt = format->format;
/* Propagate to source formats */
for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
- fmt = v4l2_subdev_get_pad_format(subdev, state, i);
+ fmt = v4l2_subdev_state_get_format(state, i);
*fmt = format->format;
}
return 0;
}
-static int csi2rx_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *state)
+static int csi2rx_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format format = {
.pad = CSI2RX_PAD_SINK,
@@ -441,7 +441,6 @@ static int csi2rx_init_cfg(struct v4l2_subdev *subdev,
static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = csi2rx_set_fmt,
- .init_cfg = csi2rx_init_cfg,
};
static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
@@ -453,6 +452,10 @@ static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
.pad = &csi2rx_pad_ops,
};
+static const struct v4l2_subdev_internal_ops csi2rx_internal_ops = {
+ .init_state = csi2rx_init_state,
+};
+
static const struct media_entity_operations csi2rx_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -663,6 +666,7 @@ static int csi2rx_probe(struct platform_device *pdev)
csi2rx->subdev.owner = THIS_MODULE;
csi2rx->subdev.dev = &pdev->dev;
v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops);
+ csi2rx->subdev.internal_ops = &csi2rx_internal_ops;
v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev);
snprintf(csi2rx->subdev.name, sizeof(csi2rx->subdev.name),
"%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev));
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index c115742f3..3d98f91f1 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -176,8 +176,7 @@ __csi2tx_get_pad_format(struct v4l2_subdev *subdev,
struct csi2tx_priv *csi2tx = v4l2_subdev_to_csi2tx(subdev);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(subdev, sd_state,
- fmt->pad);
+ return v4l2_subdev_state_get_format(sd_state, fmt->pad);
return &csi2tx->pad_fmts[fmt->pad];
}
diff --git a/drivers/media/platform/chips-media/Kconfig b/drivers/media/platform/chips-media/Kconfig
index 57f8f8a22..ad350eb6b 100644
--- a/drivers/media/platform/chips-media/Kconfig
+++ b/drivers/media/platform/chips-media/Kconfig
@@ -2,19 +2,5 @@
comment "Chips&Media media platform drivers"
-config VIDEO_CODA
- tristate "Chips&Media Coda multi-standard codec IP"
- depends on V4L_MEM2MEM_DRIVERS
- depends on VIDEO_DEV && OF && (ARCH_MXC || COMPILE_TEST)
- select SRAM
- select VIDEOBUF2_DMA_CONTIG
- select VIDEOBUF2_VMALLOC
- select V4L2_JPEG_HELPER
- select V4L2_MEM2MEM_DEV
- select GENERIC_ALLOCATOR
- help
- Coda is a range of video codec IPs that supports
- H.264, MPEG-4, and other video formats.
-
-config VIDEO_IMX_VDOA
- def_tristate VIDEO_CODA if SOC_IMX6Q || COMPILE_TEST
+source "drivers/media/platform/chips-media/coda/Kconfig"
+source "drivers/media/platform/chips-media/wave5/Kconfig"
diff --git a/drivers/media/platform/chips-media/Makefile b/drivers/media/platform/chips-media/Makefile
index bbb16425a..6b5d99de8 100644
--- a/drivers/media/platform/chips-media/Makefile
+++ b/drivers/media/platform/chips-media/Makefile
@@ -1,6 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-coda-vpu-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-mpeg2.o coda-mpeg4.o coda-jpeg.o
-
-obj-$(CONFIG_VIDEO_CODA) += coda-vpu.o
-obj-$(CONFIG_VIDEO_IMX_VDOA) += imx-vdoa.o
+obj-y += coda/
+obj-y += wave5/
diff --git a/drivers/media/platform/chips-media/coda/Kconfig b/drivers/media/platform/chips-media/coda/Kconfig
new file mode 100644
index 000000000..cb7b66c71
--- /dev/null
+++ b/drivers/media/platform/chips-media/coda/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_CODA
+ tristate "Chips&Media Coda multi-standard codec IP"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV && OF && (ARCH_MXC || COMPILE_TEST)
+ select SRAM
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_JPEG_HELPER
+ select V4L2_MEM2MEM_DEV
+ select GENERIC_ALLOCATOR
+ help
+ Coda is a range of video codec IPs that supports
+ H.264, MPEG-4, and other video formats.
+
+config VIDEO_IMX_VDOA
+ def_tristate VIDEO_CODA if SOC_IMX6Q || COMPILE_TEST
diff --git a/drivers/media/platform/chips-media/coda/Makefile b/drivers/media/platform/chips-media/coda/Makefile
new file mode 100644
index 000000000..bbb16425a
--- /dev/null
+++ b/drivers/media/platform/chips-media/coda/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+coda-vpu-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-mpeg2.o coda-mpeg4.o coda-jpeg.o
+
+obj-$(CONFIG_VIDEO_CODA) += coda-vpu.o
+obj-$(CONFIG_VIDEO_IMX_VDOA) += imx-vdoa.o
diff --git a/drivers/media/platform/chips-media/coda-bit.c b/drivers/media/platform/chips-media/coda/coda-bit.c
index ed47d5bd8..ed47d5bd8 100644
--- a/drivers/media/platform/chips-media/coda-bit.c
+++ b/drivers/media/platform/chips-media/coda/coda-bit.c
diff --git a/drivers/media/platform/chips-media/coda-common.c b/drivers/media/platform/chips-media/coda/coda-common.c
index cc4892129..7da0194ec 100644
--- a/drivers/media/platform/chips-media/coda-common.c
+++ b/drivers/media/platform/chips-media/coda/coda-common.c
@@ -794,7 +794,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
if (vb2_is_busy(vq)) {
v4l2_err(&ctx->dev->v4l2_dev, "%s: %s queue busy: %d\n",
- __func__, v4l2_type_names[f->type], vq->num_buffers);
+ __func__, v4l2_type_names[f->type], vb2_get_num_buffers(vq));
return -EBUSY;
}
@@ -2546,7 +2546,7 @@ static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq)
* would need to be reflected in job_ready(). Currently we expect all
* queues to have at least one buffer queued.
*/
- vq->min_buffers_needed = 1;
+ vq->min_queued_buffers = 1;
vq->dev = ctx->dev->dev;
return vb2_queue_init(vq);
diff --git a/drivers/media/platform/chips-media/coda-gdi.c b/drivers/media/platform/chips-media/coda/coda-gdi.c
index 59d65daca..59d65daca 100644
--- a/drivers/media/platform/chips-media/coda-gdi.c
+++ b/drivers/media/platform/chips-media/coda/coda-gdi.c
diff --git a/drivers/media/platform/chips-media/coda-h264.c b/drivers/media/platform/chips-media/coda/coda-h264.c
index 8bd0aa8af..8bd0aa8af 100644
--- a/drivers/media/platform/chips-media/coda-h264.c
+++ b/drivers/media/platform/chips-media/coda/coda-h264.c
diff --git a/drivers/media/platform/chips-media/coda-jpeg.c b/drivers/media/platform/chips-media/coda/coda-jpeg.c
index ba8f41002..ba8f41002 100644
--- a/drivers/media/platform/chips-media/coda-jpeg.c
+++ b/drivers/media/platform/chips-media/coda/coda-jpeg.c
diff --git a/drivers/media/platform/chips-media/coda-mpeg2.c b/drivers/media/platform/chips-media/coda/coda-mpeg2.c
index 6f3f6721d..6f3f6721d 100644
--- a/drivers/media/platform/chips-media/coda-mpeg2.c
+++ b/drivers/media/platform/chips-media/coda/coda-mpeg2.c
diff --git a/drivers/media/platform/chips-media/coda-mpeg4.c b/drivers/media/platform/chips-media/coda/coda-mpeg4.c
index 483a4fba1..483a4fba1 100644
--- a/drivers/media/platform/chips-media/coda-mpeg4.c
+++ b/drivers/media/platform/chips-media/coda/coda-mpeg4.c
diff --git a/drivers/media/platform/chips-media/coda.h b/drivers/media/platform/chips-media/coda/coda.h
index ddfd0a32c..ddfd0a32c 100644
--- a/drivers/media/platform/chips-media/coda.h
+++ b/drivers/media/platform/chips-media/coda/coda.h
diff --git a/drivers/media/platform/chips-media/coda_regs.h b/drivers/media/platform/chips-media/coda/coda_regs.h
index db81a904c..db81a904c 100644
--- a/drivers/media/platform/chips-media/coda_regs.h
+++ b/drivers/media/platform/chips-media/coda/coda_regs.h
diff --git a/drivers/media/platform/chips-media/imx-vdoa.c b/drivers/media/platform/chips-media/coda/imx-vdoa.c
index c3561fcec..c3561fcec 100644
--- a/drivers/media/platform/chips-media/imx-vdoa.c
+++ b/drivers/media/platform/chips-media/coda/imx-vdoa.c
diff --git a/drivers/media/platform/chips-media/imx-vdoa.h b/drivers/media/platform/chips-media/coda/imx-vdoa.h
index a62eab476..a62eab476 100644
--- a/drivers/media/platform/chips-media/imx-vdoa.h
+++ b/drivers/media/platform/chips-media/coda/imx-vdoa.h
diff --git a/drivers/media/platform/chips-media/trace.h b/drivers/media/platform/chips-media/coda/trace.h
index 19f98e6da..abc6a01a7 100644
--- a/drivers/media/platform/chips-media/trace.h
+++ b/drivers/media/platform/chips-media/coda/trace.h
@@ -167,7 +167,7 @@ DEFINE_EVENT(coda_buf_class, coda_jpeg_done,
#endif /* __CODA_TRACE_H__ */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/media/platform/chips-media
+#define TRACE_INCLUDE_PATH ../../drivers/media/platform/chips-media/coda
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
diff --git a/drivers/media/platform/chips-media/wave5/Kconfig b/drivers/media/platform/chips-media/wave5/Kconfig
new file mode 100644
index 000000000..f1bcef517
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_WAVE_VPU
+ tristate "Chips&Media Wave Codec Driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_K3 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ select GENERIC_ALLOCATOR
+ help
+ Chips&Media stateful encoder and decoder driver.
+ The driver supports HEVC and H264 formats.
+ To compile this driver as modules, choose M here: the
+ modules will be called wave5.
diff --git a/drivers/media/platform/chips-media/wave5/Makefile b/drivers/media/platform/chips-media/wave5/Makefile
new file mode 100644
index 000000000..3d738a03b
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_WAVE_VPU) += wave5.o
+wave5-objs += wave5-hw.o \
+ wave5-vpuapi.o \
+ wave5-vdi.o \
+ wave5-vpu-dec.o \
+ wave5-vpu.o \
+ wave5-vpu-enc.o \
+ wave5-helper.o
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.c b/drivers/media/platform/chips-media/wave5/wave5-helper.c
new file mode 100644
index 000000000..8433ecab2
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - decoder interface
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#include "wave5-helper.h"
+
+const char *state_to_str(enum vpu_instance_state state)
+{
+ switch (state) {
+ case VPU_INST_STATE_NONE:
+ return "NONE";
+ case VPU_INST_STATE_OPEN:
+ return "OPEN";
+ case VPU_INST_STATE_INIT_SEQ:
+ return "INIT_SEQ";
+ case VPU_INST_STATE_PIC_RUN:
+ return "PIC_RUN";
+ case VPU_INST_STATE_STOP:
+ return "STOP";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+void wave5_cleanup_instance(struct vpu_instance *inst)
+{
+ int i;
+
+ if (list_is_singular(&inst->list))
+ wave5_vdi_free_sram(inst->dev);
+
+ for (i = 0; i < inst->fbc_buf_count; i++)
+ wave5_vpu_dec_reset_framebuffer(inst, i);
+
+ wave5_vdi_free_dma_memory(inst->dev, &inst->bitstream_vbuf);
+ v4l2_ctrl_handler_free(&inst->v4l2_ctrl_hdl);
+ if (inst->v4l2_fh.vdev) {
+ v4l2_fh_del(&inst->v4l2_fh);
+ v4l2_fh_exit(&inst->v4l2_fh);
+ }
+ list_del_init(&inst->list);
+ ida_free(&inst->dev->inst_ida, inst->id);
+ kfree(inst->codec_info);
+ kfree(inst);
+}
+
+int wave5_vpu_release_device(struct file *filp,
+ int (*close_func)(struct vpu_instance *inst, u32 *fail_res),
+ char *name)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(filp->private_data);
+
+ v4l2_m2m_ctx_release(inst->v4l2_fh.m2m_ctx);
+ if (inst->state != VPU_INST_STATE_NONE) {
+ u32 fail_res;
+ int ret;
+
+ ret = close_func(inst, &fail_res);
+ if (fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING) {
+ dev_err(inst->dev->dev, "%s close failed, device is still running\n",
+ name);
+ return -EBUSY;
+ }
+ if (ret && ret != -EIO) {
+ dev_err(inst->dev->dev, "%s close, fail: %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ wave5_cleanup_instance(inst);
+
+ return 0;
+}
+
+int wave5_vpu_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq,
+ const struct vb2_ops *ops)
+{
+ struct vpu_instance *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->ops = ops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->buf_struct_size = sizeof(struct vpu_src_buffer);
+ src_vq->drv_priv = inst;
+ src_vq->lock = &inst->dev->dev_lock;
+ src_vq->dev = inst->dev->v4l2_dev.dev;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->ops = ops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->buf_struct_size = sizeof(struct vpu_src_buffer);
+ dst_vq->drv_priv = inst;
+ dst_vq->lock = &inst->dev->dev_lock;
+ dst_vq->dev = inst->dev->v4l2_dev.dev;
+ ret = vb2_queue_init(dst_vq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int wave5_vpu_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ bool is_decoder = inst->type == VPU_INST_TYPE_DEC;
+
+ dev_dbg(inst->dev->dev, "%s: [%s] type: %u id: %u | flags: %u\n", __func__,
+ is_decoder ? "decoder" : "encoder", sub->type, sub->id, sub->flags);
+
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ if (is_decoder)
+ return v4l2_src_change_event_subscribe(fh, sub);
+ return -EINVAL;
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+int wave5_vpu_g_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i;
+
+ f->fmt.pix_mp.width = inst->src_fmt.width;
+ f->fmt.pix_mp.height = inst->src_fmt.height;
+ f->fmt.pix_mp.pixelformat = inst->src_fmt.pixelformat;
+ f->fmt.pix_mp.field = inst->src_fmt.field;
+ f->fmt.pix_mp.flags = inst->src_fmt.flags;
+ f->fmt.pix_mp.num_planes = inst->src_fmt.num_planes;
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ f->fmt.pix_mp.plane_fmt[i].bytesperline = inst->src_fmt.plane_fmt[i].bytesperline;
+ f->fmt.pix_mp.plane_fmt[i].sizeimage = inst->src_fmt.plane_fmt[i].sizeimage;
+ }
+
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+
+ return 0;
+}
+
+const struct vpu_format *wave5_find_vpu_fmt(unsigned int v4l2_pix_fmt,
+ const struct vpu_format fmt_list[MAX_FMTS])
+{
+ unsigned int index;
+
+ for (index = 0; index < MAX_FMTS; index++) {
+ if (fmt_list[index].v4l2_pix_fmt == v4l2_pix_fmt)
+ return &fmt_list[index];
+ }
+
+ return NULL;
+}
+
+const struct vpu_format *wave5_find_vpu_fmt_by_idx(unsigned int idx,
+ const struct vpu_format fmt_list[MAX_FMTS])
+{
+ if (idx >= MAX_FMTS)
+ return NULL;
+
+ if (!fmt_list[idx].v4l2_pix_fmt)
+ return NULL;
+
+ return &fmt_list[idx];
+}
+
+enum wave_std wave5_to_vpu_std(unsigned int v4l2_pix_fmt, enum vpu_instance_type type)
+{
+ switch (v4l2_pix_fmt) {
+ case V4L2_PIX_FMT_H264:
+ return type == VPU_INST_TYPE_DEC ? W_AVC_DEC : W_AVC_ENC;
+ case V4L2_PIX_FMT_HEVC:
+ return type == VPU_INST_TYPE_DEC ? W_HEVC_DEC : W_HEVC_ENC;
+ default:
+ return STD_UNKNOWN;
+ }
+}
+
+void wave5_return_bufs(struct vb2_queue *q, u32 state)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct v4l2_ctrl_handler v4l2_ctrl_hdl = inst->v4l2_ctrl_hdl;
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ vbuf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!vbuf)
+ return;
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, &v4l2_ctrl_hdl);
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.h b/drivers/media/platform/chips-media/wave5/wave5-helper.h
new file mode 100644
index 000000000..6cee1c14d
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - basic types
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#ifndef __WAVE_HELPER_H__
+#define __WAVE_HELPER_H__
+
+#include "wave5-vpu.h"
+
+#define FMT_TYPES 2
+#define MAX_FMTS 12
+
+const char *state_to_str(enum vpu_instance_state state);
+void wave5_cleanup_instance(struct vpu_instance *inst);
+int wave5_vpu_release_device(struct file *filp,
+ int (*close_func)(struct vpu_instance *inst, u32 *fail_res),
+ char *name);
+int wave5_vpu_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq,
+ const struct vb2_ops *ops);
+int wave5_vpu_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub);
+int wave5_vpu_g_fmt_out(struct file *file, void *fh, struct v4l2_format *f);
+const struct vpu_format *wave5_find_vpu_fmt(unsigned int v4l2_pix_fmt,
+ const struct vpu_format fmt_list[MAX_FMTS]);
+const struct vpu_format *wave5_find_vpu_fmt_by_idx(unsigned int idx,
+ const struct vpu_format fmt_list[MAX_FMTS]);
+enum wave_std wave5_to_vpu_std(unsigned int v4l2_pix_fmt, enum vpu_instance_type type);
+void wave5_return_bufs(struct vb2_queue *q, u32 state);
+#endif
diff --git a/drivers/media/platform/chips-media/wave5/wave5-hw.c b/drivers/media/platform/chips-media/wave5/wave5-hw.c
new file mode 100644
index 000000000..f1e022fb1
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-hw.c
@@ -0,0 +1,2551 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - wave5 backend logic
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#include <linux/iopoll.h>
+#include <linux/bitfield.h>
+#include "wave5-vpu.h"
+#include "wave5.h"
+#include "wave5-regdefine.h"
+
+#define FIO_TIMEOUT 10000000
+#define FIO_CTRL_READY BIT(31)
+#define FIO_CTRL_WRITE BIT(16)
+#define VPU_BUSY_CHECK_TIMEOUT 10000000
+#define QUEUE_REPORT_MASK 0xffff
+
+/* Encoder support fields */
+#define FEATURE_HEVC10BIT_ENC BIT(3)
+#define FEATURE_AVC10BIT_ENC BIT(11)
+#define FEATURE_AVC_ENCODER BIT(1)
+#define FEATURE_HEVC_ENCODER BIT(0)
+
+/* Decoder support fields */
+#define FEATURE_AVC_DECODER BIT(3)
+#define FEATURE_HEVC_DECODER BIT(2)
+
+#define FEATURE_BACKBONE BIT(16)
+#define FEATURE_VCORE_BACKBONE BIT(22)
+#define FEATURE_VCPU_BACKBONE BIT(28)
+
+#define REMAP_CTRL_MAX_SIZE_BITS ((W5_REMAP_MAX_SIZE >> 12) & 0x1ff)
+#define REMAP_CTRL_REGISTER_VALUE(index) ( \
+ (BIT(31) | (index << 12) | BIT(11) | REMAP_CTRL_MAX_SIZE_BITS) \
+)
+
+#define FASTIO_ADDRESS_MASK GENMASK(15, 0)
+#define SEQ_PARAM_PROFILE_MASK GENMASK(30, 24)
+
+static void _wave5_print_reg_err(struct vpu_device *vpu_dev, u32 reg_fail_reason,
+ const char *func);
+#define PRINT_REG_ERR(dev, reason) _wave5_print_reg_err((dev), (reason), __func__)
+
+static inline const char *cmd_to_str(int cmd, bool is_dec)
+{
+ switch (cmd) {
+ case W5_INIT_VPU:
+ return "W5_INIT_VPU";
+ case W5_WAKEUP_VPU:
+ return "W5_WAKEUP_VPU";
+ case W5_SLEEP_VPU:
+ return "W5_SLEEP_VPU";
+ case W5_CREATE_INSTANCE:
+ return "W5_CREATE_INSTANCE";
+ case W5_FLUSH_INSTANCE:
+ return "W5_FLUSH_INSTANCE";
+ case W5_DESTROY_INSTANCE:
+ return "W5_DESTROY_INSTANCE";
+ case W5_INIT_SEQ:
+ return "W5_INIT_SEQ";
+ case W5_SET_FB:
+ return "W5_SET_FB";
+ case W5_DEC_ENC_PIC:
+ if (is_dec)
+ return "W5_DEC_PIC";
+ return "W5_ENC_PIC";
+ case W5_ENC_SET_PARAM:
+ return "W5_ENC_SET_PARAM";
+ case W5_QUERY:
+ return "W5_QUERY";
+ case W5_UPDATE_BS:
+ return "W5_UPDATE_BS";
+ case W5_MAX_VPU_COMD:
+ return "W5_MAX_VPU_COMD";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void _wave5_print_reg_err(struct vpu_device *vpu_dev, u32 reg_fail_reason,
+ const char *func)
+{
+ struct device *dev = vpu_dev->dev;
+ u32 reg_val;
+
+ switch (reg_fail_reason) {
+ case WAVE5_SYSERR_QUEUEING_FAIL:
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_QUEUE_FAIL_REASON);
+ dev_dbg(dev, "%s: queueing failure: 0x%x\n", func, reg_val);
+ break;
+ case WAVE5_SYSERR_RESULT_NOT_READY:
+ dev_err(dev, "%s: result not ready: 0x%x\n", func, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_ACCESS_VIOLATION_HW:
+ dev_err(dev, "%s: access violation: 0x%x\n", func, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_WATCHDOG_TIMEOUT:
+ dev_err(dev, "%s: watchdog timeout: 0x%x\n", func, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_BUS_ERROR:
+ dev_err(dev, "%s: bus error: 0x%x\n", func, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_DOUBLE_FAULT:
+ dev_err(dev, "%s: double fault: 0x%x\n", func, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_VPU_STILL_RUNNING:
+ dev_err(dev, "%s: still running: 0x%x\n", func, reg_fail_reason);
+ break;
+ case WAVE5_SYSERR_VLC_BUF_FULL:
+ dev_err(dev, "%s: vlc buf full: 0x%x\n", func, reg_fail_reason);
+ break;
+ default:
+ dev_err(dev, "%s: failure:: 0x%x\n", func, reg_fail_reason);
+ break;
+ }
+}
+
+static int wave5_wait_fio_readl(struct vpu_device *vpu_dev, u32 addr, u32 val)
+{
+ u32 ctrl;
+ int ret;
+
+ ctrl = addr & 0xffff;
+ wave5_vdi_write_register(vpu_dev, W5_VPU_FIO_CTRL_ADDR, ctrl);
+ ret = read_poll_timeout(wave5_vdi_read_register, ctrl, ctrl & FIO_CTRL_READY,
+ 0, FIO_TIMEOUT, false, vpu_dev, W5_VPU_FIO_CTRL_ADDR);
+ if (ret)
+ return ret;
+
+ if (wave5_vdi_read_register(vpu_dev, W5_VPU_FIO_DATA) != val)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void wave5_fio_writel(struct vpu_device *vpu_dev, unsigned int addr, unsigned int data)
+{
+ int ret;
+ unsigned int ctrl;
+
+ wave5_vdi_write_register(vpu_dev, W5_VPU_FIO_DATA, data);
+ ctrl = FIELD_GET(FASTIO_ADDRESS_MASK, addr);
+ ctrl |= FIO_CTRL_WRITE;
+ wave5_vdi_write_register(vpu_dev, W5_VPU_FIO_CTRL_ADDR, ctrl);
+ ret = read_poll_timeout(wave5_vdi_read_register, ctrl, ctrl & FIO_CTRL_READY, 0,
+ FIO_TIMEOUT, false, vpu_dev, W5_VPU_FIO_CTRL_ADDR);
+ if (ret)
+ dev_dbg_ratelimited(vpu_dev->dev, "FIO write timeout: addr=0x%x data=%x\n",
+ ctrl, data);
+}
+
+static int wave5_wait_bus_busy(struct vpu_device *vpu_dev, unsigned int addr)
+{
+ u32 gdi_status_check_value = 0x3f;
+
+ if (vpu_dev->product_code == WAVE521C_CODE ||
+ vpu_dev->product_code == WAVE521_CODE ||
+ vpu_dev->product_code == WAVE521E1_CODE)
+ gdi_status_check_value = 0x00ff1f3f;
+
+ return wave5_wait_fio_readl(vpu_dev, addr, gdi_status_check_value);
+}
+
+static int wave5_wait_vpu_busy(struct vpu_device *vpu_dev, unsigned int addr)
+{
+ u32 data;
+
+ return read_poll_timeout(wave5_vdi_read_register, data, data == 0,
+ 0, VPU_BUSY_CHECK_TIMEOUT, false, vpu_dev, addr);
+}
+
+static int wave5_wait_vcpu_bus_busy(struct vpu_device *vpu_dev, unsigned int addr)
+{
+ return wave5_wait_fio_readl(vpu_dev, addr, 0);
+}
+
+bool wave5_vpu_is_init(struct vpu_device *vpu_dev)
+{
+ return vpu_read_reg(vpu_dev, W5_VCPU_CUR_PC) != 0;
+}
+
+unsigned int wave5_vpu_get_product_id(struct vpu_device *vpu_dev)
+{
+ u32 val = vpu_read_reg(vpu_dev, W5_PRODUCT_NUMBER);
+
+ switch (val) {
+ case WAVE521C_CODE:
+ return PRODUCT_ID_521;
+ case WAVE521_CODE:
+ case WAVE521C_DUAL_CODE:
+ case WAVE521E1_CODE:
+ case WAVE511_CODE:
+ case WAVE517_CODE:
+ case WAVE537_CODE:
+ dev_err(vpu_dev->dev, "Unsupported product id (%x)\n", val);
+ break;
+ default:
+ dev_err(vpu_dev->dev, "Invalid product id (%x)\n", val);
+ break;
+ }
+
+ return PRODUCT_ID_NONE;
+}
+
+static void wave5_bit_issue_command(struct vpu_device *vpu_dev, struct vpu_instance *inst, u32 cmd)
+{
+ u32 instance_index;
+ u32 codec_mode;
+
+ if (inst) {
+ instance_index = inst->id;
+ codec_mode = inst->std;
+
+ vpu_write_reg(vpu_dev, W5_CMD_INSTANCE_INFO, (codec_mode << 16) |
+ (instance_index & 0xffff));
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ }
+
+ vpu_write_reg(vpu_dev, W5_COMMAND, cmd);
+
+ if (inst) {
+ dev_dbg(vpu_dev->dev, "%s: cmd=0x%x (%s)\n", __func__, cmd,
+ cmd_to_str(cmd, inst->type == VPU_INST_TYPE_DEC));
+ } else {
+ dev_dbg(vpu_dev->dev, "%s: cmd=0x%x\n", __func__, cmd);
+ }
+
+ vpu_write_reg(vpu_dev, W5_VPU_HOST_INT_REQ, 1);
+}
+
+static int wave5_vpu_firmware_command_queue_error_check(struct vpu_device *dev, u32 *fail_res)
+{
+ u32 reason = 0;
+
+ /* Check if we were able to add a command into the VCPU QUEUE */
+ if (!vpu_read_reg(dev, W5_RET_SUCCESS)) {
+ reason = vpu_read_reg(dev, W5_RET_FAIL_REASON);
+ PRINT_REG_ERR(dev, reason);
+
+ /*
+ * The fail_res argument will be either NULL or 0.
+ * If the fail_res argument is NULL, then just return -EIO.
+ * Otherwise, assign the reason to fail_res, so that the
+ * calling function can use it.
+ */
+ if (fail_res)
+ *fail_res = reason;
+ else
+ return -EIO;
+
+ if (reason == WAVE5_SYSERR_VPU_STILL_RUNNING)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int send_firmware_command(struct vpu_instance *inst, u32 cmd, bool check_success,
+ u32 *queue_status, u32 *fail_result)
+{
+ int ret;
+
+ wave5_bit_issue_command(inst->dev, inst, cmd);
+ ret = wave5_wait_vpu_busy(inst->dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(inst->dev->dev, "%s: command: '%s', timed out\n", __func__,
+ cmd_to_str(cmd, inst->type == VPU_INST_TYPE_DEC));
+ return -ETIMEDOUT;
+ }
+
+ if (queue_status)
+ *queue_status = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ /* In some cases we want to send multiple commands before checking
+ * whether they are queued properly
+ */
+ if (!check_success)
+ return 0;
+
+ return wave5_vpu_firmware_command_queue_error_check(inst->dev, fail_result);
+}
+
+static int wave5_send_query(struct vpu_device *vpu_dev, struct vpu_instance *inst,
+ enum query_opt query_opt)
+{
+ int ret;
+
+ vpu_write_reg(vpu_dev, W5_QUERY_OPTION, query_opt);
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ wave5_bit_issue_command(vpu_dev, inst, W5_QUERY);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_warn(vpu_dev->dev, "command: 'W5_QUERY', timed out opt=0x%x\n", query_opt);
+ return ret;
+ }
+
+ return wave5_vpu_firmware_command_queue_error_check(vpu_dev, NULL);
+}
+
+static int setup_wave5_properties(struct device *dev)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ struct vpu_attr *p_attr = &vpu_dev->attr;
+ u32 reg_val;
+ u8 *str;
+ int ret;
+ u32 hw_config_def0, hw_config_def1, hw_config_feature;
+
+ ret = wave5_send_query(vpu_dev, NULL, GET_VPU_INFO);
+ if (ret)
+ return ret;
+
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_PRODUCT_NAME);
+ str = (u8 *)&reg_val;
+ p_attr->product_name[0] = str[3];
+ p_attr->product_name[1] = str[2];
+ p_attr->product_name[2] = str[1];
+ p_attr->product_name[3] = str[0];
+ p_attr->product_name[4] = 0;
+
+ p_attr->product_id = wave5_vpu_get_product_id(vpu_dev);
+ p_attr->product_version = vpu_read_reg(vpu_dev, W5_RET_PRODUCT_VERSION);
+ p_attr->fw_version = vpu_read_reg(vpu_dev, W5_RET_FW_VERSION);
+ p_attr->customer_id = vpu_read_reg(vpu_dev, W5_RET_CUSTOMER_ID);
+ hw_config_def0 = vpu_read_reg(vpu_dev, W5_RET_STD_DEF0);
+ hw_config_def1 = vpu_read_reg(vpu_dev, W5_RET_STD_DEF1);
+ hw_config_feature = vpu_read_reg(vpu_dev, W5_RET_CONF_FEATURE);
+
+ p_attr->support_hevc10bit_enc = FIELD_GET(FEATURE_HEVC10BIT_ENC, hw_config_feature);
+ p_attr->support_avc10bit_enc = FIELD_GET(FEATURE_AVC10BIT_ENC, hw_config_feature);
+
+ p_attr->support_decoders = FIELD_GET(FEATURE_AVC_DECODER, hw_config_def1) << STD_AVC;
+ p_attr->support_decoders |= FIELD_GET(FEATURE_HEVC_DECODER, hw_config_def1) << STD_HEVC;
+ p_attr->support_encoders = FIELD_GET(FEATURE_AVC_ENCODER, hw_config_def1) << STD_AVC;
+ p_attr->support_encoders |= FIELD_GET(FEATURE_HEVC_ENCODER, hw_config_def1) << STD_HEVC;
+
+ p_attr->support_backbone = FIELD_GET(FEATURE_BACKBONE, hw_config_def0);
+ p_attr->support_vcpu_backbone = FIELD_GET(FEATURE_VCPU_BACKBONE, hw_config_def0);
+ p_attr->support_vcore_backbone = FIELD_GET(FEATURE_VCORE_BACKBONE, hw_config_def0);
+
+ return 0;
+}
+
+int wave5_vpu_get_version(struct vpu_device *vpu_dev, u32 *revision)
+{
+ u32 reg_val;
+ int ret;
+
+ ret = wave5_send_query(vpu_dev, NULL, GET_VPU_INFO);
+ if (ret)
+ return ret;
+
+ reg_val = vpu_read_reg(vpu_dev, W5_RET_FW_VERSION);
+ if (revision) {
+ *revision = reg_val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void remap_page(struct vpu_device *vpu_dev, dma_addr_t code_base, u32 index)
+{
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_CTRL, REMAP_CTRL_REGISTER_VALUE(index));
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_VADDR, index * W5_REMAP_MAX_SIZE);
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_PADDR, code_base + index * W5_REMAP_MAX_SIZE);
+}
+
+int wave5_vpu_init(struct device *dev, u8 *fw, size_t size)
+{
+ struct vpu_buf *common_vb;
+ dma_addr_t code_base, temp_base;
+ u32 code_size, temp_size;
+ u32 i, reg_val, reason_code;
+ int ret;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ common_vb = &vpu_dev->common_mem;
+
+ code_base = common_vb->daddr;
+ /* ALIGN TO 4KB */
+ code_size = (WAVE5_MAX_CODE_BUF_SIZE & ~0xfff);
+ if (code_size < size * 2)
+ return -EINVAL;
+
+ temp_base = common_vb->daddr + WAVE5_TEMPBUF_OFFSET;
+ temp_size = WAVE5_TEMPBUF_SIZE;
+
+ ret = wave5_vdi_write_memory(vpu_dev, common_vb, 0, fw, size);
+ if (ret < 0) {
+ dev_err(vpu_dev->dev, "VPU init, Writing firmware to common buffer, fail: %d\n",
+ ret);
+ return ret;
+ }
+
+ vpu_write_reg(vpu_dev, W5_PO_CONF, 0);
+
+ /* clear registers */
+
+ for (i = W5_CMD_REG_BASE; i < W5_CMD_REG_END; i += 4)
+ vpu_write_reg(vpu_dev, i, 0x00);
+
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX0);
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX1);
+
+ vpu_write_reg(vpu_dev, W5_ADDR_CODE_BASE, code_base);
+ vpu_write_reg(vpu_dev, W5_CODE_SIZE, code_size);
+ vpu_write_reg(vpu_dev, W5_CODE_PARAM, (WAVE5_UPPER_PROC_AXI_ID << 4) | 0);
+ vpu_write_reg(vpu_dev, W5_ADDR_TEMP_BASE, temp_base);
+ vpu_write_reg(vpu_dev, W5_TEMP_SIZE, temp_size);
+
+ /* These register must be reset explicitly */
+ vpu_write_reg(vpu_dev, W5_HW_OPTION, 0);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROC_EXT_ADDR, 0);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_AXI_PARAM, 0);
+ vpu_write_reg(vpu_dev, W5_SEC_AXI_PARAM, 0);
+
+ /* Encoder interrupt */
+ reg_val = BIT(INT_WAVE5_ENC_SET_PARAM);
+ reg_val |= BIT(INT_WAVE5_ENC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_FULL);
+ /* Decoder interrupt */
+ reg_val |= BIT(INT_WAVE5_INIT_SEQ);
+ reg_val |= BIT(INT_WAVE5_DEC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_EMPTY);
+ vpu_write_reg(vpu_dev, W5_VPU_VINT_ENABLE, reg_val);
+
+ reg_val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG0);
+ if (FIELD_GET(FEATURE_BACKBONE, reg_val)) {
+ reg_val = ((WAVE5_PROC_AXI_ID << 28) |
+ (WAVE5_PRP_AXI_ID << 24) |
+ (WAVE5_FBD_Y_AXI_ID << 20) |
+ (WAVE5_FBC_Y_AXI_ID << 16) |
+ (WAVE5_FBD_C_AXI_ID << 12) |
+ (WAVE5_FBC_C_AXI_ID << 8) |
+ (WAVE5_PRI_AXI_ID << 4) |
+ WAVE5_SEC_AXI_ID);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROG_AXI_ID, reg_val);
+ }
+
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_INIT_VPU);
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_CORE_START, 1);
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_err(vpu_dev->dev, "VPU init(W5_VPU_REMAP_CORE_START) timeout\n");
+ return ret;
+ }
+
+ ret = wave5_vpu_firmware_command_queue_error_check(vpu_dev, &reason_code);
+ if (ret)
+ return ret;
+
+ return setup_wave5_properties(dev);
+}
+
+int wave5_vpu_build_up_dec_param(struct vpu_instance *inst,
+ struct dec_open_param *param)
+{
+ int ret;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ p_dec_info->cycle_per_tick = 256;
+ if (vpu_dev->sram_buf.size) {
+ p_dec_info->sec_axi_info.use_bit_enable = 1;
+ p_dec_info->sec_axi_info.use_ip_enable = 1;
+ p_dec_info->sec_axi_info.use_lf_row_enable = 1;
+ }
+ switch (inst->std) {
+ case W_HEVC_DEC:
+ p_dec_info->seq_change_mask = SEQ_CHANGE_ENABLE_ALL_HEVC;
+ break;
+ case W_AVC_DEC:
+ p_dec_info->seq_change_mask = SEQ_CHANGE_ENABLE_ALL_AVC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_dec_info->vb_work.size = WAVE521DEC_WORKBUF_SIZE;
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &p_dec_info->vb_work);
+ if (ret)
+ return ret;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_VCORE_INFO, 1);
+
+ wave5_vdi_clear_memory(inst->dev, &p_dec_info->vb_work);
+
+ vpu_write_reg(inst->dev, W5_ADDR_WORK_BASE, p_dec_info->vb_work.daddr);
+ vpu_write_reg(inst->dev, W5_WORK_SIZE, p_dec_info->vb_work.size);
+
+ vpu_write_reg(inst->dev, W5_CMD_ADDR_SEC_AXI, vpu_dev->sram_buf.daddr);
+ vpu_write_reg(inst->dev, W5_CMD_SEC_AXI_SIZE, vpu_dev->sram_buf.size);
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_BS_START_ADDR, p_dec_info->stream_buf_start_addr);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_BS_SIZE, p_dec_info->stream_buf_size);
+
+ /* NOTE: SDMA reads MSB first */
+ vpu_write_reg(inst->dev, W5_CMD_BS_PARAM, BITSTREAM_ENDIANNESS_BIG_ENDIAN);
+ /* This register must be reset explicitly */
+ vpu_write_reg(inst->dev, W5_CMD_EXT_ADDR, 0);
+ vpu_write_reg(inst->dev, W5_CMD_NUM_CQ_DEPTH_M1, (COMMAND_QUEUE_DEPTH - 1));
+
+ ret = send_firmware_command(inst, W5_CREATE_INSTANCE, true, NULL, NULL);
+ if (ret) {
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
+ return ret;
+ }
+
+ p_dec_info->product_code = vpu_read_reg(inst->dev, W5_PRODUCT_NUMBER);
+
+ return 0;
+}
+
+int wave5_vpu_hw_flush_instance(struct vpu_instance *inst)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ u32 instance_queue_count, report_queue_count;
+ u32 reg_val = 0;
+ u32 fail_res = 0;
+ int ret;
+
+ ret = send_firmware_command(inst, W5_FLUSH_INSTANCE, true, &reg_val, &fail_res);
+ if (ret)
+ return ret;
+
+ instance_queue_count = (reg_val >> 16) & 0xff;
+ report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+ if (instance_queue_count != 0 || report_queue_count != 0) {
+ dev_warn(inst->dev->dev,
+ "FLUSH_INSTANCE cmd didn't reset the amount of queued commands & reports");
+ }
+
+ /* reset our local copy of the counts */
+ p_dec_info->instance_queue_count = 0;
+ p_dec_info->report_queue_count = 0;
+
+ return 0;
+}
+
+static u32 get_bitstream_options(struct dec_info *info)
+{
+ u32 bs_option = BSOPTION_ENABLE_EXPLICIT_END;
+
+ if (info->stream_endflag)
+ bs_option |= BSOPTION_HIGHLIGHT_STREAM_END;
+ return bs_option;
+}
+
+int wave5_vpu_dec_init_seq(struct vpu_instance *inst)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ u32 cmd_option = INIT_SEQ_NORMAL;
+ u32 reg_val, fail_res;
+ int ret;
+
+ if (!inst->codec_info)
+ return -EINVAL;
+
+ vpu_write_reg(inst->dev, W5_BS_RD_PTR, p_dec_info->stream_rd_ptr);
+ vpu_write_reg(inst->dev, W5_BS_WR_PTR, p_dec_info->stream_wr_ptr);
+
+ vpu_write_reg(inst->dev, W5_BS_OPTION, get_bitstream_options(p_dec_info));
+
+ vpu_write_reg(inst->dev, W5_COMMAND_OPTION, cmd_option);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_USER_MASK, p_dec_info->user_data_enable);
+
+ ret = send_firmware_command(inst, W5_INIT_SEQ, true, &reg_val, &fail_res);
+ if (ret)
+ return ret;
+
+ p_dec_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_dec_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ dev_dbg(inst->dev->dev, "%s: init seq sent (queue %u : %u)\n", __func__,
+ p_dec_info->instance_queue_count, p_dec_info->report_queue_count);
+
+ return 0;
+}
+
+static void wave5_get_dec_seq_result(struct vpu_instance *inst, struct dec_initial_info *info)
+{
+ u32 reg_val;
+ u32 profile_compatibility_flag;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+
+ p_dec_info->stream_rd_ptr = wave5_dec_get_rd_ptr(inst);
+ info->rd_ptr = p_dec_info->stream_rd_ptr;
+
+ p_dec_info->frame_display_flag = vpu_read_reg(inst->dev, W5_RET_DEC_DISP_IDC);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_SIZE);
+ info->pic_width = ((reg_val >> 16) & 0xffff);
+ info->pic_height = (reg_val & 0xffff);
+ info->min_frame_buffer_count = vpu_read_reg(inst->dev, W5_RET_DEC_NUM_REQUIRED_FB);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_CROP_LEFT_RIGHT);
+ info->pic_crop_rect.left = (reg_val >> 16) & 0xffff;
+ info->pic_crop_rect.right = reg_val & 0xffff;
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_CROP_TOP_BOTTOM);
+ info->pic_crop_rect.top = (reg_val >> 16) & 0xffff;
+ info->pic_crop_rect.bottom = reg_val & 0xffff;
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_COLOR_SAMPLE_INFO);
+ info->luma_bitdepth = reg_val & 0xf;
+ info->chroma_bitdepth = (reg_val >> 4) & 0xf;
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_SEQ_PARAM);
+ profile_compatibility_flag = (reg_val >> 12) & 0xff;
+ info->profile = (reg_val >> 24) & 0x1f;
+
+ if (inst->std == W_HEVC_DEC) {
+ /* guessing profile */
+ if (!info->profile) {
+ if ((profile_compatibility_flag & 0x06) == 0x06)
+ info->profile = HEVC_PROFILE_MAIN; /* main profile */
+ else if (profile_compatibility_flag & 0x04)
+ info->profile = HEVC_PROFILE_MAIN10; /* main10 profile */
+ else if (profile_compatibility_flag & 0x08)
+ /* main still picture profile */
+ info->profile = HEVC_PROFILE_STILLPICTURE;
+ else
+ info->profile = HEVC_PROFILE_MAIN; /* for old version HM */
+ }
+ } else if (inst->std == W_AVC_DEC) {
+ info->profile = FIELD_GET(SEQ_PARAM_PROFILE_MASK, reg_val);
+ }
+
+ info->vlc_buf_size = vpu_read_reg(inst->dev, W5_RET_VLC_BUF_SIZE);
+ info->param_buf_size = vpu_read_reg(inst->dev, W5_RET_PARAM_BUF_SIZE);
+ p_dec_info->vlc_buf_size = info->vlc_buf_size;
+ p_dec_info->param_buf_size = info->param_buf_size;
+}
+
+int wave5_vpu_dec_get_seq_info(struct vpu_instance *inst, struct dec_initial_info *info)
+{
+ int ret;
+ u32 reg_val;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_ADDR_REPORT_BASE, p_dec_info->user_data_buf_addr);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_REPORT_SIZE, p_dec_info->user_data_buf_size);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_REPORT_PARAM, REPORT_PARAM_ENDIANNESS_BIG_ENDIAN);
+
+ /* send QUERY cmd */
+ ret = wave5_send_query(inst->dev, inst, GET_RESULT);
+ if (ret)
+ return ret;
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_dec_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_dec_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ dev_dbg(inst->dev->dev, "%s: init seq complete (queue %u : %u)\n", __func__,
+ p_dec_info->instance_queue_count, p_dec_info->report_queue_count);
+
+ /* this is not a fatal error, set ret to -EIO but don't return immediately */
+ if (vpu_read_reg(inst->dev, W5_RET_DEC_DECODING_SUCCESS) != 1) {
+ info->seq_init_err_reason = vpu_read_reg(inst->dev, W5_RET_DEC_ERR_INFO);
+ ret = -EIO;
+ }
+
+ wave5_get_dec_seq_result(inst, info);
+
+ return ret;
+}
+
+int wave5_vpu_dec_register_framebuffer(struct vpu_instance *inst, struct frame_buffer *fb_arr,
+ enum tiled_map_type map_type, unsigned int count)
+{
+ int ret;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ struct dec_initial_info *init_info = &p_dec_info->initial_info;
+ size_t remain, idx, j, i, cnt_8_chunk, size;
+ u32 start_no, end_no;
+ u32 reg_val, cbcr_interleave, nv21, pic_size;
+ u32 addr_y, addr_cb, addr_cr;
+ u32 mv_col_size, frame_width, frame_height, fbc_y_tbl_size, fbc_c_tbl_size;
+ struct vpu_buf vb_buf;
+ bool justified = WTL_RIGHT_JUSTIFIED;
+ u32 format_no = WTL_PIXEL_8BIT;
+ u32 color_format = 0;
+ u32 pixel_order = 1;
+ u32 bwb_flag = (map_type == LINEAR_FRAME_MAP) ? 1 : 0;
+
+ cbcr_interleave = inst->cbcr_interleave;
+ nv21 = inst->nv21;
+ mv_col_size = 0;
+ fbc_y_tbl_size = 0;
+ fbc_c_tbl_size = 0;
+
+ if (map_type >= COMPRESSED_FRAME_MAP) {
+ cbcr_interleave = 0;
+ nv21 = 0;
+
+ switch (inst->std) {
+ case W_HEVC_DEC:
+ mv_col_size = WAVE5_DEC_HEVC_BUF_SIZE(init_info->pic_width,
+ init_info->pic_height);
+ break;
+ case W_AVC_DEC:
+ mv_col_size = WAVE5_DEC_AVC_BUF_SIZE(init_info->pic_width,
+ init_info->pic_height);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (inst->std == W_HEVC_DEC || inst->std == W_AVC_DEC) {
+ size = ALIGN(ALIGN(mv_col_size, 16), BUFFER_MARGIN) + BUFFER_MARGIN;
+ ret = wave5_vdi_allocate_array(inst->dev, p_dec_info->vb_mv, count, size);
+ if (ret)
+ goto free_mv_buffers;
+ }
+
+ frame_width = init_info->pic_width;
+ frame_height = init_info->pic_height;
+ fbc_y_tbl_size = ALIGN(WAVE5_FBC_LUMA_TABLE_SIZE(frame_width, frame_height), 16);
+ fbc_c_tbl_size = ALIGN(WAVE5_FBC_CHROMA_TABLE_SIZE(frame_width, frame_height), 16);
+
+ size = ALIGN(fbc_y_tbl_size, BUFFER_MARGIN) + BUFFER_MARGIN;
+ ret = wave5_vdi_allocate_array(inst->dev, p_dec_info->vb_fbc_y_tbl, count, size);
+ if (ret)
+ goto free_fbc_y_tbl_buffers;
+
+ size = ALIGN(fbc_c_tbl_size, BUFFER_MARGIN) + BUFFER_MARGIN;
+ ret = wave5_vdi_allocate_array(inst->dev, p_dec_info->vb_fbc_c_tbl, count, size);
+ if (ret)
+ goto free_fbc_c_tbl_buffers;
+
+ pic_size = (init_info->pic_width << 16) | (init_info->pic_height);
+
+ vb_buf.size = (p_dec_info->vlc_buf_size * VLC_BUF_NUM) +
+ (p_dec_info->param_buf_size * COMMAND_QUEUE_DEPTH);
+ vb_buf.daddr = 0;
+
+ if (vb_buf.size != p_dec_info->vb_task.size) {
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_task);
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &vb_buf);
+ if (ret)
+ goto free_fbc_c_tbl_buffers;
+
+ p_dec_info->vb_task = vb_buf;
+ }
+
+ vpu_write_reg(inst->dev, W5_CMD_SET_FB_ADDR_TASK_BUF,
+ p_dec_info->vb_task.daddr);
+ vpu_write_reg(inst->dev, W5_CMD_SET_FB_TASK_BUF_SIZE, vb_buf.size);
+ } else {
+ pic_size = (init_info->pic_width << 16) | (init_info->pic_height);
+
+ if (inst->output_format == FORMAT_422)
+ color_format = 1;
+ }
+ vpu_write_reg(inst->dev, W5_PIC_SIZE, pic_size);
+
+ reg_val = (bwb_flag << 28) |
+ (pixel_order << 23) |
+ (justified << 22) |
+ (format_no << 20) |
+ (color_format << 19) |
+ (nv21 << 17) |
+ (cbcr_interleave << 16) |
+ (fb_arr[0].stride);
+ vpu_write_reg(inst->dev, W5_COMMON_PIC_INFO, reg_val);
+
+ remain = count;
+ cnt_8_chunk = DIV_ROUND_UP(count, 8);
+ idx = 0;
+ for (j = 0; j < cnt_8_chunk; j++) {
+ reg_val = (j == cnt_8_chunk - 1) << 4 | ((j == 0) << 3);
+ vpu_write_reg(inst->dev, W5_SFB_OPTION, reg_val);
+ start_no = j * 8;
+ end_no = start_no + ((remain >= 8) ? 8 : remain) - 1;
+
+ vpu_write_reg(inst->dev, W5_SET_FB_NUM, (start_no << 8) | end_no);
+
+ for (i = 0; i < 8 && i < remain; i++) {
+ addr_y = fb_arr[i + start_no].buf_y;
+ addr_cb = fb_arr[i + start_no].buf_cb;
+ addr_cr = fb_arr[i + start_no].buf_cr;
+ vpu_write_reg(inst->dev, W5_ADDR_LUMA_BASE0 + (i << 4), addr_y);
+ vpu_write_reg(inst->dev, W5_ADDR_CB_BASE0 + (i << 4), addr_cb);
+ if (map_type >= COMPRESSED_FRAME_MAP) {
+ /* luma FBC offset table */
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_Y_OFFSET0 + (i << 4),
+ p_dec_info->vb_fbc_y_tbl[idx].daddr);
+ /* chroma FBC offset table */
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_C_OFFSET0 + (i << 4),
+ p_dec_info->vb_fbc_c_tbl[idx].daddr);
+ vpu_write_reg(inst->dev, W5_ADDR_MV_COL0 + (i << 2),
+ p_dec_info->vb_mv[idx].daddr);
+ } else {
+ vpu_write_reg(inst->dev, W5_ADDR_CR_BASE0 + (i << 4), addr_cr);
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_C_OFFSET0 + (i << 4), 0);
+ vpu_write_reg(inst->dev, W5_ADDR_MV_COL0 + (i << 2), 0);
+ }
+ idx++;
+ }
+ remain -= i;
+
+ ret = send_firmware_command(inst, W5_SET_FB, false, NULL, NULL);
+ if (ret)
+ goto free_buffers;
+ }
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_SUCCESS);
+ if (!reg_val) {
+ ret = -EIO;
+ goto free_buffers;
+ }
+
+ return 0;
+
+free_buffers:
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_task);
+free_fbc_c_tbl_buffers:
+ for (i = 0; i < count; i++)
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_c_tbl[i]);
+free_fbc_y_tbl_buffers:
+ for (i = 0; i < count; i++)
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_y_tbl[i]);
+free_mv_buffers:
+ for (i = 0; i < count; i++)
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_mv[i]);
+ return ret;
+}
+
+int wave5_vpu_decode(struct vpu_instance *inst, u32 *fail_res)
+{
+ u32 reg_val;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_BS_RD_PTR, p_dec_info->stream_rd_ptr);
+ vpu_write_reg(inst->dev, W5_BS_WR_PTR, p_dec_info->stream_wr_ptr);
+
+ vpu_write_reg(inst->dev, W5_BS_OPTION, get_bitstream_options(p_dec_info));
+
+ /* secondary AXI */
+ reg_val = p_dec_info->sec_axi_info.use_bit_enable |
+ (p_dec_info->sec_axi_info.use_ip_enable << 9) |
+ (p_dec_info->sec_axi_info.use_lf_row_enable << 15);
+ vpu_write_reg(inst->dev, W5_USE_SEC_AXI, reg_val);
+
+ /* set attributes of user buffer */
+ vpu_write_reg(inst->dev, W5_CMD_DEC_USER_MASK, p_dec_info->user_data_enable);
+
+ vpu_write_reg(inst->dev, W5_COMMAND_OPTION, DEC_PIC_NORMAL);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_TEMPORAL_ID_PLUS1,
+ (p_dec_info->target_spatial_id << 9) |
+ (p_dec_info->temp_id_select_mode << 8) | p_dec_info->target_temp_id);
+ vpu_write_reg(inst->dev, W5_CMD_SEQ_CHANGE_ENABLE_FLAG, p_dec_info->seq_change_mask);
+ /* When reordering is disabled we force the latency of the framebuffers */
+ vpu_write_reg(inst->dev, W5_CMD_DEC_FORCE_FB_LATENCY_PLUS1, !p_dec_info->reorder_enable);
+
+ ret = send_firmware_command(inst, W5_DEC_ENC_PIC, true, &reg_val, fail_res);
+ if (ret == -ETIMEDOUT)
+ return ret;
+
+ p_dec_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_dec_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ dev_dbg(inst->dev->dev, "%s: dec pic sent (queue %u : %u)\n", __func__,
+ p_dec_info->instance_queue_count, p_dec_info->report_queue_count);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int wave5_vpu_dec_get_result(struct vpu_instance *inst, struct dec_output_info *result)
+{
+ int ret;
+ u32 index, nal_unit_type, reg_val, sub_layer_info;
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_ADDR_REPORT_BASE, p_dec_info->user_data_buf_addr);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_REPORT_SIZE, p_dec_info->user_data_buf_size);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_REPORT_PARAM, REPORT_PARAM_ENDIANNESS_BIG_ENDIAN);
+
+ /* send QUERY cmd */
+ ret = wave5_send_query(vpu_dev, inst, GET_RESULT);
+ if (ret)
+ return ret;
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_dec_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_dec_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ dev_dbg(inst->dev->dev, "%s: dec pic complete (queue %u : %u)\n", __func__,
+ p_dec_info->instance_queue_count, p_dec_info->report_queue_count);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_TYPE);
+
+ nal_unit_type = (reg_val >> 4) & 0x3f;
+
+ if (inst->std == W_HEVC_DEC) {
+ if (reg_val & 0x04)
+ result->pic_type = PIC_TYPE_B;
+ else if (reg_val & 0x02)
+ result->pic_type = PIC_TYPE_P;
+ else if (reg_val & 0x01)
+ result->pic_type = PIC_TYPE_I;
+ else
+ result->pic_type = PIC_TYPE_MAX;
+ if ((nal_unit_type == 19 || nal_unit_type == 20) && result->pic_type == PIC_TYPE_I)
+ /* IDR_W_RADL, IDR_N_LP */
+ result->pic_type = PIC_TYPE_IDR;
+ } else if (inst->std == W_AVC_DEC) {
+ if (reg_val & 0x04)
+ result->pic_type = PIC_TYPE_B;
+ else if (reg_val & 0x02)
+ result->pic_type = PIC_TYPE_P;
+ else if (reg_val & 0x01)
+ result->pic_type = PIC_TYPE_I;
+ else
+ result->pic_type = PIC_TYPE_MAX;
+ if (nal_unit_type == 5 && result->pic_type == PIC_TYPE_I)
+ result->pic_type = PIC_TYPE_IDR;
+ }
+ index = vpu_read_reg(inst->dev, W5_RET_DEC_DISPLAY_INDEX);
+ result->index_frame_display = index;
+ index = vpu_read_reg(inst->dev, W5_RET_DEC_DECODED_INDEX);
+ result->index_frame_decoded = index;
+ result->index_frame_decoded_for_tiled = index;
+
+ sub_layer_info = vpu_read_reg(inst->dev, W5_RET_DEC_SUB_LAYER_INFO);
+ result->temporal_id = sub_layer_info & 0x7;
+
+ if (inst->std == W_HEVC_DEC || inst->std == W_AVC_DEC) {
+ result->decoded_poc = -1;
+ if (result->index_frame_decoded >= 0 ||
+ result->index_frame_decoded == DECODED_IDX_FLAG_SKIP)
+ result->decoded_poc = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_POC);
+ }
+
+ result->sequence_changed = vpu_read_reg(inst->dev, W5_RET_DEC_NOTIFICATION);
+ reg_val = vpu_read_reg(inst->dev, W5_RET_DEC_PIC_SIZE);
+ result->dec_pic_width = reg_val >> 16;
+ result->dec_pic_height = reg_val & 0xffff;
+
+ if (result->sequence_changed) {
+ memcpy((void *)&p_dec_info->new_seq_info, (void *)&p_dec_info->initial_info,
+ sizeof(struct dec_initial_info));
+ wave5_get_dec_seq_result(inst, &p_dec_info->new_seq_info);
+ }
+
+ result->dec_host_cmd_tick = vpu_read_reg(inst->dev, W5_RET_DEC_HOST_CMD_TICK);
+ result->dec_decode_end_tick = vpu_read_reg(inst->dev, W5_RET_DEC_DECODING_ENC_TICK);
+
+ if (!p_dec_info->first_cycle_check) {
+ result->frame_cycle =
+ (result->dec_decode_end_tick - result->dec_host_cmd_tick) *
+ p_dec_info->cycle_per_tick;
+ vpu_dev->last_performance_cycles = result->dec_decode_end_tick;
+ p_dec_info->first_cycle_check = true;
+ } else if (result->index_frame_decoded_for_tiled != -1) {
+ result->frame_cycle =
+ (result->dec_decode_end_tick - vpu_dev->last_performance_cycles) *
+ p_dec_info->cycle_per_tick;
+ vpu_dev->last_performance_cycles = result->dec_decode_end_tick;
+ if (vpu_dev->last_performance_cycles < result->dec_host_cmd_tick)
+ result->frame_cycle =
+ (result->dec_decode_end_tick - result->dec_host_cmd_tick) *
+ p_dec_info->cycle_per_tick;
+ }
+
+ /* no remaining command. reset frame cycle. */
+ if (p_dec_info->instance_queue_count == 0 && p_dec_info->report_queue_count == 0)
+ p_dec_info->first_cycle_check = false;
+
+ return 0;
+}
+
+int wave5_vpu_re_init(struct device *dev, u8 *fw, size_t size)
+{
+ struct vpu_buf *common_vb;
+ dma_addr_t code_base, temp_base;
+ dma_addr_t old_code_base, temp_size;
+ u32 code_size, reason_code;
+ u32 reg_val;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ common_vb = &vpu_dev->common_mem;
+
+ code_base = common_vb->daddr;
+ /* ALIGN TO 4KB */
+ code_size = (WAVE5_MAX_CODE_BUF_SIZE & ~0xfff);
+ if (code_size < size * 2)
+ return -EINVAL;
+ temp_base = common_vb->daddr + WAVE5_TEMPBUF_OFFSET;
+ temp_size = WAVE5_TEMPBUF_SIZE;
+
+ old_code_base = vpu_read_reg(vpu_dev, W5_VPU_REMAP_PADDR);
+
+ if (old_code_base != code_base + W5_REMAP_INDEX1 * W5_REMAP_MAX_SIZE) {
+ int ret;
+
+ ret = wave5_vdi_write_memory(vpu_dev, common_vb, 0, fw, size);
+ if (ret < 0) {
+ dev_err(vpu_dev->dev,
+ "VPU init, Writing firmware to common buffer, fail: %d\n", ret);
+ return ret;
+ }
+
+ vpu_write_reg(vpu_dev, W5_PO_CONF, 0);
+
+ ret = wave5_vpu_reset(dev, SW_RESET_ON_BOOT);
+ if (ret < 0) {
+ dev_err(vpu_dev->dev, "VPU init, Resetting the VPU, fail: %d\n", ret);
+ return ret;
+ }
+
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX0);
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX1);
+
+ vpu_write_reg(vpu_dev, W5_ADDR_CODE_BASE, code_base);
+ vpu_write_reg(vpu_dev, W5_CODE_SIZE, code_size);
+ vpu_write_reg(vpu_dev, W5_CODE_PARAM, (WAVE5_UPPER_PROC_AXI_ID << 4) | 0);
+ vpu_write_reg(vpu_dev, W5_ADDR_TEMP_BASE, temp_base);
+ vpu_write_reg(vpu_dev, W5_TEMP_SIZE, temp_size);
+
+ /* These register must be reset explicitly */
+ vpu_write_reg(vpu_dev, W5_HW_OPTION, 0);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROC_EXT_ADDR, 0);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_AXI_PARAM, 0);
+ vpu_write_reg(vpu_dev, W5_SEC_AXI_PARAM, 0);
+
+ /* Encoder interrupt */
+ reg_val = BIT(INT_WAVE5_ENC_SET_PARAM);
+ reg_val |= BIT(INT_WAVE5_ENC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_FULL);
+ /* Decoder interrupt */
+ reg_val |= BIT(INT_WAVE5_INIT_SEQ);
+ reg_val |= BIT(INT_WAVE5_DEC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_EMPTY);
+ vpu_write_reg(vpu_dev, W5_VPU_VINT_ENABLE, reg_val);
+
+ reg_val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG0);
+ if (FIELD_GET(FEATURE_BACKBONE, reg_val)) {
+ reg_val = ((WAVE5_PROC_AXI_ID << 28) |
+ (WAVE5_PRP_AXI_ID << 24) |
+ (WAVE5_FBD_Y_AXI_ID << 20) |
+ (WAVE5_FBC_Y_AXI_ID << 16) |
+ (WAVE5_FBD_C_AXI_ID << 12) |
+ (WAVE5_FBC_C_AXI_ID << 8) |
+ (WAVE5_PRI_AXI_ID << 4) |
+ WAVE5_SEC_AXI_ID);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROG_AXI_ID, reg_val);
+ }
+
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_INIT_VPU);
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_CORE_START, 1);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_err(vpu_dev->dev, "VPU reinit(W5_VPU_REMAP_CORE_START) timeout\n");
+ return ret;
+ }
+
+ ret = wave5_vpu_firmware_command_queue_error_check(vpu_dev, &reason_code);
+ if (ret)
+ return ret;
+ }
+
+ return setup_wave5_properties(dev);
+}
+
+static int wave5_vpu_sleep_wake(struct device *dev, bool i_sleep_wake, const uint16_t *code,
+ size_t size)
+{
+ u32 reg_val;
+ struct vpu_buf *common_vb;
+ dma_addr_t code_base;
+ u32 code_size, reason_code;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (i_sleep_wake) {
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ return ret;
+
+ /*
+ * Declare who has ownership for the host interface access
+ * 1 = VPU
+ * 0 = Host processor
+ */
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_SLEEP_VPU);
+ /* Send an interrupt named HOST to the VPU */
+ vpu_write_reg(vpu_dev, W5_VPU_HOST_INT_REQ, 1);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_firmware_command_queue_error_check(vpu_dev, &reason_code);
+ if (ret)
+ return ret;
+ } else { /* restore */
+ common_vb = &vpu_dev->common_mem;
+
+ code_base = common_vb->daddr;
+ /* ALIGN TO 4KB */
+ code_size = (WAVE5_MAX_CODE_BUF_SIZE & ~0xfff);
+ if (code_size < size * 2) {
+ dev_err(dev, "size too small\n");
+ return -EINVAL;
+ }
+
+ /* Power on without DEBUG mode */
+ vpu_write_reg(vpu_dev, W5_PO_CONF, 0);
+
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX0);
+ remap_page(vpu_dev, code_base, W5_REMAP_INDEX1);
+
+ vpu_write_reg(vpu_dev, W5_ADDR_CODE_BASE, code_base);
+ vpu_write_reg(vpu_dev, W5_CODE_SIZE, code_size);
+ vpu_write_reg(vpu_dev, W5_CODE_PARAM, (WAVE5_UPPER_PROC_AXI_ID << 4) | 0);
+
+ /* These register must be reset explicitly */
+ vpu_write_reg(vpu_dev, W5_HW_OPTION, 0);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROC_EXT_ADDR, 0);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_AXI_PARAM, 0);
+ vpu_write_reg(vpu_dev, W5_SEC_AXI_PARAM, 0);
+
+ /* Encoder interrupt */
+ reg_val = BIT(INT_WAVE5_ENC_SET_PARAM);
+ reg_val |= BIT(INT_WAVE5_ENC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_FULL);
+ /* Decoder interrupt */
+ reg_val |= BIT(INT_WAVE5_INIT_SEQ);
+ reg_val |= BIT(INT_WAVE5_DEC_PIC);
+ reg_val |= BIT(INT_WAVE5_BSBUF_EMPTY);
+ vpu_write_reg(vpu_dev, W5_VPU_VINT_ENABLE, reg_val);
+
+ reg_val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG0);
+ if (FIELD_GET(FEATURE_BACKBONE, reg_val)) {
+ reg_val = ((WAVE5_PROC_AXI_ID << 28) |
+ (WAVE5_PRP_AXI_ID << 24) |
+ (WAVE5_FBD_Y_AXI_ID << 20) |
+ (WAVE5_FBC_Y_AXI_ID << 16) |
+ (WAVE5_FBD_C_AXI_ID << 12) |
+ (WAVE5_FBC_C_AXI_ID << 8) |
+ (WAVE5_PRI_AXI_ID << 4) |
+ WAVE5_SEC_AXI_ID);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_PROG_AXI_ID, reg_val);
+ }
+
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 1);
+ vpu_write_reg(vpu_dev, W5_COMMAND, W5_WAKEUP_VPU);
+ /* Start VPU after settings */
+ vpu_write_reg(vpu_dev, W5_VPU_REMAP_CORE_START, 1);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_BUSY_STATUS);
+ if (ret) {
+ dev_err(vpu_dev->dev, "VPU wakeup(W5_VPU_REMAP_CORE_START) timeout\n");
+ return ret;
+ }
+
+ return wave5_vpu_firmware_command_queue_error_check(vpu_dev, &reason_code);
+ }
+
+ return 0;
+}
+
+int wave5_vpu_reset(struct device *dev, enum sw_reset_mode reset_mode)
+{
+ u32 val = 0;
+ int ret = 0;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ struct vpu_attr *p_attr = &vpu_dev->attr;
+ /* VPU doesn't send response. force to set BUSY flag to 0. */
+ vpu_write_reg(vpu_dev, W5_VPU_BUSY_STATUS, 0);
+
+ if (reset_mode == SW_RESET_SAFETY) {
+ ret = wave5_vpu_sleep_wake(dev, true, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
+ val = vpu_read_reg(vpu_dev, W5_VPU_RET_VPU_CONFIG0);
+ if ((val >> 16) & 0x1)
+ p_attr->support_backbone = true;
+ if ((val >> 22) & 0x1)
+ p_attr->support_vcore_backbone = true;
+ if ((val >> 28) & 0x1)
+ p_attr->support_vcpu_backbone = true;
+
+ /* waiting for completion of bus transaction */
+ if (p_attr->support_backbone) {
+ dev_dbg(dev, "%s: backbone supported\n", __func__);
+
+ if (p_attr->support_vcore_backbone) {
+ if (p_attr->support_vcpu_backbone) {
+ /* step1 : disable request */
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCPU, 0xFF);
+
+ /* step2 : waiting for completion of bus transaction */
+ ret = wave5_wait_vcpu_bus_busy(vpu_dev,
+ W5_BACKBONE_BUS_STATUS_VCPU);
+ if (ret) {
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCPU, 0x00);
+ return ret;
+ }
+ }
+ /* step1 : disable request */
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x7);
+
+ /* step2 : waiting for completion of bus transaction */
+ if (wave5_wait_bus_busy(vpu_dev, W5_BACKBONE_BUS_STATUS_VCORE0)) {
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x00);
+ return -EBUSY;
+ }
+ } else {
+ /* step1 : disable request */
+ wave5_fio_writel(vpu_dev, W5_COMBINED_BACKBONE_BUS_CTRL, 0x7);
+
+ /* step2 : waiting for completion of bus transaction */
+ if (wave5_wait_bus_busy(vpu_dev, W5_COMBINED_BACKBONE_BUS_STATUS)) {
+ wave5_fio_writel(vpu_dev, W5_COMBINED_BACKBONE_BUS_CTRL, 0x00);
+ return -EBUSY;
+ }
+ }
+ } else {
+ dev_dbg(dev, "%s: backbone NOT supported\n", __func__);
+ /* step1 : disable request */
+ wave5_fio_writel(vpu_dev, W5_GDI_BUS_CTRL, 0x100);
+
+ /* step2 : waiting for completion of bus transaction */
+ ret = wave5_wait_bus_busy(vpu_dev, W5_GDI_BUS_STATUS);
+ if (ret) {
+ wave5_fio_writel(vpu_dev, W5_GDI_BUS_CTRL, 0x00);
+ return ret;
+ }
+ }
+
+ switch (reset_mode) {
+ case SW_RESET_ON_BOOT:
+ case SW_RESET_FORCE:
+ case SW_RESET_SAFETY:
+ val = W5_RST_BLOCK_ALL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (val) {
+ vpu_write_reg(vpu_dev, W5_VPU_RESET_REQ, val);
+
+ ret = wave5_wait_vpu_busy(vpu_dev, W5_VPU_RESET_STATUS);
+ if (ret) {
+ vpu_write_reg(vpu_dev, W5_VPU_RESET_REQ, 0);
+ return ret;
+ }
+ vpu_write_reg(vpu_dev, W5_VPU_RESET_REQ, 0);
+ }
+ /* step3 : must clear GDI_BUS_CTRL after done SW_RESET */
+ if (p_attr->support_backbone) {
+ if (p_attr->support_vcore_backbone) {
+ if (p_attr->support_vcpu_backbone)
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCPU, 0x00);
+ wave5_fio_writel(vpu_dev, W5_BACKBONE_BUS_CTRL_VCORE0, 0x00);
+ } else {
+ wave5_fio_writel(vpu_dev, W5_COMBINED_BACKBONE_BUS_CTRL, 0x00);
+ }
+ } else {
+ wave5_fio_writel(vpu_dev, W5_GDI_BUS_CTRL, 0x00);
+ }
+ if (reset_mode == SW_RESET_SAFETY || reset_mode == SW_RESET_FORCE)
+ ret = wave5_vpu_sleep_wake(dev, false, NULL, 0);
+
+ return ret;
+}
+
+int wave5_vpu_dec_finish_seq(struct vpu_instance *inst, u32 *fail_res)
+{
+ return send_firmware_command(inst, W5_DESTROY_INSTANCE, true, NULL, fail_res);
+}
+
+int wave5_vpu_dec_set_bitstream_flag(struct vpu_instance *inst, bool eos)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+
+ p_dec_info->stream_endflag = eos ? 1 : 0;
+ vpu_write_reg(inst->dev, W5_BS_OPTION, get_bitstream_options(p_dec_info));
+ vpu_write_reg(inst->dev, W5_BS_WR_PTR, p_dec_info->stream_wr_ptr);
+
+ return send_firmware_command(inst, W5_UPDATE_BS, true, NULL, NULL);
+}
+
+int wave5_dec_clr_disp_flag(struct vpu_instance *inst, unsigned int index)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_CLR_DISP_IDC, BIT(index));
+ vpu_write_reg(inst->dev, W5_CMD_DEC_SET_DISP_IDC, 0);
+
+ ret = wave5_send_query(inst->dev, inst, UPDATE_DISP_FLAG);
+ if (ret)
+ return ret;
+
+ p_dec_info->frame_display_flag = vpu_read_reg(inst->dev, W5_RET_DEC_DISP_IDC);
+
+ return 0;
+}
+
+int wave5_dec_set_disp_flag(struct vpu_instance *inst, unsigned int index)
+{
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_CMD_DEC_CLR_DISP_IDC, 0);
+ vpu_write_reg(inst->dev, W5_CMD_DEC_SET_DISP_IDC, BIT(index));
+
+ ret = wave5_send_query(inst->dev, inst, UPDATE_DISP_FLAG);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int wave5_vpu_clear_interrupt(struct vpu_instance *inst, u32 flags)
+{
+ u32 interrupt_reason;
+
+ interrupt_reason = vpu_read_reg(inst->dev, W5_VPU_VINT_REASON_USR);
+ interrupt_reason &= ~flags;
+ vpu_write_reg(inst->dev, W5_VPU_VINT_REASON_USR, interrupt_reason);
+
+ return 0;
+}
+
+dma_addr_t wave5_dec_get_rd_ptr(struct vpu_instance *inst)
+{
+ int ret;
+
+ ret = wave5_send_query(inst->dev, inst, GET_BS_RD_PTR);
+ if (ret)
+ return inst->codec_info->dec_info.stream_rd_ptr;
+
+ return vpu_read_reg(inst->dev, W5_RET_QUERY_DEC_BS_RD_PTR);
+}
+
+int wave5_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr)
+{
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_RET_QUERY_DEC_SET_BS_RD_PTR, addr);
+
+ ret = wave5_send_query(inst->dev, inst, SET_BS_RD_PTR);
+
+ return ret;
+}
+
+/************************************************************************/
+/* ENCODER functions */
+/************************************************************************/
+
+int wave5_vpu_build_up_enc_param(struct device *dev, struct vpu_instance *inst,
+ struct enc_open_param *open_param)
+{
+ int ret;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ u32 reg_val;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ dma_addr_t buffer_addr;
+ size_t buffer_size;
+
+ p_enc_info->cycle_per_tick = 256;
+ if (vpu_dev->sram_buf.size) {
+ p_enc_info->sec_axi_info.use_enc_rdo_enable = 1;
+ p_enc_info->sec_axi_info.use_enc_lf_enable = 1;
+ }
+
+ p_enc_info->vb_work.size = WAVE521ENC_WORKBUF_SIZE;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &p_enc_info->vb_work);
+ if (ret) {
+ memset(&p_enc_info->vb_work, 0, sizeof(p_enc_info->vb_work));
+ return ret;
+ }
+
+ wave5_vdi_clear_memory(vpu_dev, &p_enc_info->vb_work);
+
+ vpu_write_reg(inst->dev, W5_ADDR_WORK_BASE, p_enc_info->vb_work.daddr);
+ vpu_write_reg(inst->dev, W5_WORK_SIZE, p_enc_info->vb_work.size);
+
+ vpu_write_reg(inst->dev, W5_CMD_ADDR_SEC_AXI, vpu_dev->sram_buf.daddr);
+ vpu_write_reg(inst->dev, W5_CMD_SEC_AXI_SIZE, vpu_dev->sram_buf.size);
+
+ reg_val = (open_param->line_buf_int_en << 6) | BITSTREAM_ENDIANNESS_BIG_ENDIAN;
+ vpu_write_reg(inst->dev, W5_CMD_BS_PARAM, reg_val);
+ vpu_write_reg(inst->dev, W5_CMD_EXT_ADDR, 0);
+ vpu_write_reg(inst->dev, W5_CMD_NUM_CQ_DEPTH_M1, (COMMAND_QUEUE_DEPTH - 1));
+
+ /* This register must be reset explicitly */
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SRC_OPTIONS, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_VCORE_INFO, 1);
+
+ ret = send_firmware_command(inst, W5_CREATE_INSTANCE, true, NULL, NULL);
+ if (ret)
+ goto free_vb_work;
+
+ buffer_addr = open_param->bitstream_buffer;
+ buffer_size = open_param->bitstream_buffer_size;
+ p_enc_info->stream_rd_ptr = buffer_addr;
+ p_enc_info->stream_wr_ptr = buffer_addr;
+ p_enc_info->line_buf_int_en = open_param->line_buf_int_en;
+ p_enc_info->stream_buf_start_addr = buffer_addr;
+ p_enc_info->stream_buf_size = buffer_size;
+ p_enc_info->stream_buf_end_addr = buffer_addr + buffer_size;
+ p_enc_info->stride = 0;
+ p_enc_info->initial_info_obtained = false;
+ p_enc_info->product_code = vpu_read_reg(inst->dev, W5_PRODUCT_NUMBER);
+
+ return 0;
+free_vb_work:
+ if (wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_work))
+ memset(&p_enc_info->vb_work, 0, sizeof(p_enc_info->vb_work));
+ return ret;
+}
+
+static void wave5_set_enc_crop_info(u32 codec, struct enc_wave_param *param, int rot_mode,
+ int src_width, int src_height)
+{
+ int aligned_width = (codec == W_HEVC_ENC) ? ALIGN(src_width, 32) : ALIGN(src_width, 16);
+ int aligned_height = (codec == W_HEVC_ENC) ? ALIGN(src_height, 32) : ALIGN(src_height, 16);
+ int pad_right, pad_bot;
+ int crop_right, crop_left, crop_top, crop_bot;
+ int prp_mode = rot_mode >> 1; /* remove prp_enable bit */
+
+ if (codec == W_HEVC_ENC &&
+ (!rot_mode || prp_mode == 14)) /* prp_mode 14 : hor_mir && ver_mir && rot_180 */
+ return;
+
+ pad_right = aligned_width - src_width;
+ pad_bot = aligned_height - src_height;
+
+ if (param->conf_win_right > 0)
+ crop_right = param->conf_win_right + pad_right;
+ else
+ crop_right = pad_right;
+
+ if (param->conf_win_bot > 0)
+ crop_bot = param->conf_win_bot + pad_bot;
+ else
+ crop_bot = pad_bot;
+
+ crop_top = param->conf_win_top;
+ crop_left = param->conf_win_left;
+
+ param->conf_win_top = crop_top;
+ param->conf_win_left = crop_left;
+ param->conf_win_bot = crop_bot;
+ param->conf_win_right = crop_right;
+
+ switch (prp_mode) {
+ case 0:
+ return;
+ case 1:
+ case 15:
+ param->conf_win_top = crop_right;
+ param->conf_win_left = crop_top;
+ param->conf_win_bot = crop_left;
+ param->conf_win_right = crop_bot;
+ break;
+ case 2:
+ case 12:
+ param->conf_win_top = crop_bot;
+ param->conf_win_left = crop_right;
+ param->conf_win_bot = crop_top;
+ param->conf_win_right = crop_left;
+ break;
+ case 3:
+ case 13:
+ param->conf_win_top = crop_left;
+ param->conf_win_left = crop_bot;
+ param->conf_win_bot = crop_right;
+ param->conf_win_right = crop_top;
+ break;
+ case 4:
+ case 10:
+ param->conf_win_top = crop_bot;
+ param->conf_win_bot = crop_top;
+ break;
+ case 8:
+ case 6:
+ param->conf_win_left = crop_right;
+ param->conf_win_right = crop_left;
+ break;
+ case 5:
+ case 11:
+ param->conf_win_top = crop_left;
+ param->conf_win_left = crop_top;
+ param->conf_win_bot = crop_right;
+ param->conf_win_right = crop_bot;
+ break;
+ case 7:
+ case 9:
+ param->conf_win_top = crop_right;
+ param->conf_win_left = crop_bot;
+ param->conf_win_bot = crop_left;
+ param->conf_win_right = crop_top;
+ break;
+ default:
+ WARN(1, "Invalid prp_mode: %d, must be in range of 1 - 15\n", prp_mode);
+ }
+}
+
+int wave5_vpu_enc_init_seq(struct vpu_instance *inst)
+{
+ u32 reg_val = 0, rot_mir_mode, fixed_cu_size_mode = 0x7;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ struct enc_open_param *p_open_param = &p_enc_info->open_param;
+ struct enc_wave_param *p_param = &p_open_param->wave_param;
+
+ /*
+ * OPT_COMMON:
+ * the last SET_PARAM command should be called with OPT_COMMON
+ */
+ rot_mir_mode = 0;
+ if (p_enc_info->rotation_enable) {
+ switch (p_enc_info->rotation_angle) {
+ case 0:
+ rot_mir_mode |= NONE_ROTATE;
+ break;
+ case 90:
+ rot_mir_mode |= ROT_CLOCKWISE_90;
+ break;
+ case 180:
+ rot_mir_mode |= ROT_CLOCKWISE_180;
+ break;
+ case 270:
+ rot_mir_mode |= ROT_CLOCKWISE_270;
+ break;
+ }
+ }
+
+ if (p_enc_info->mirror_enable) {
+ switch (p_enc_info->mirror_direction) {
+ case MIRDIR_NONE:
+ rot_mir_mode |= NONE_ROTATE;
+ break;
+ case MIRDIR_VER:
+ rot_mir_mode |= MIR_VER_FLIP;
+ break;
+ case MIRDIR_HOR:
+ rot_mir_mode |= MIR_HOR_FLIP;
+ break;
+ case MIRDIR_HOR_VER:
+ rot_mir_mode |= MIR_HOR_VER_FLIP;
+ break;
+ }
+ }
+
+ wave5_set_enc_crop_info(inst->std, p_param, rot_mir_mode, p_open_param->pic_width,
+ p_open_param->pic_height);
+
+ /* SET_PARAM + COMMON */
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_SET_PARAM_OPTION, OPT_COMMON);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_SRC_SIZE, p_open_param->pic_height << 16
+ | p_open_param->pic_width);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MAP_ENDIAN, VDI_LITTLE_ENDIAN);
+
+ reg_val = p_param->profile |
+ (p_param->level << 3) |
+ (p_param->internal_bit_depth << 14);
+ if (inst->std == W_HEVC_ENC)
+ reg_val |= (p_param->tier << 12) |
+ (p_param->tmvp_enable << 23) |
+ (p_param->sao_enable << 24) |
+ (p_param->skip_intra_trans << 25) |
+ (p_param->strong_intra_smooth_enable << 27) |
+ (p_param->en_still_picture << 30);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_SPS_PARAM, reg_val);
+
+ reg_val = (p_param->lossless_enable) |
+ (p_param->const_intra_pred_flag << 1) |
+ (p_param->lf_cross_slice_boundary_enable << 2) |
+ (p_param->wpp_enable << 4) |
+ (p_param->disable_deblk << 5) |
+ ((p_param->beta_offset_div2 & 0xF) << 6) |
+ ((p_param->tc_offset_div2 & 0xF) << 10) |
+ ((p_param->chroma_cb_qp_offset & 0x1F) << 14) |
+ ((p_param->chroma_cr_qp_offset & 0x1F) << 19) |
+ (p_param->transform8x8_enable << 29) |
+ (p_param->entropy_coding_mode << 30);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_PPS_PARAM, reg_val);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_GOP_PARAM, p_param->gop_preset_idx);
+
+ if (inst->std == W_AVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INTRA_PARAM, p_param->intra_qp |
+ ((p_param->intra_period & 0x7ff) << 6) |
+ ((p_param->avc_idr_period & 0x7ff) << 17));
+ else if (inst->std == W_HEVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INTRA_PARAM,
+ p_param->decoding_refresh_type | (p_param->intra_qp << 3) |
+ (p_param->intra_period << 16));
+
+ reg_val = (p_param->rdo_skip << 2) |
+ (p_param->lambda_scaling_enable << 3) |
+ (fixed_cu_size_mode << 5) |
+ (p_param->intra_nx_n_enable << 8) |
+ (p_param->max_num_merge << 18);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RDO_PARAM, reg_val);
+
+ if (inst->std == W_AVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INTRA_REFRESH,
+ p_param->intra_mb_refresh_arg << 16 | p_param->intra_mb_refresh_mode);
+ else if (inst->std == W_HEVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INTRA_REFRESH,
+ p_param->intra_refresh_arg << 16 | p_param->intra_refresh_mode);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_FRAME_RATE, p_open_param->frame_rate_info);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_TARGET_RATE, p_open_param->bit_rate);
+
+ reg_val = p_open_param->rc_enable |
+ (p_param->hvs_qp_enable << 2) |
+ (p_param->hvs_qp_scale << 4) |
+ ((p_param->initial_rc_qp & 0x3F) << 14) |
+ (p_open_param->vbv_buffer_size << 20);
+ if (inst->std == W_AVC_ENC)
+ reg_val |= (p_param->mb_level_rc_enable << 1);
+ else if (inst->std == W_HEVC_ENC)
+ reg_val |= (p_param->cu_level_rc_enable << 1);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_PARAM, reg_val);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_WEIGHT_PARAM,
+ p_param->rc_weight_buf << 8 | p_param->rc_weight_param);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_MIN_MAX_QP, p_param->min_qp_i |
+ (p_param->max_qp_i << 6) | (p_param->hvs_max_delta_qp << 12));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_INTER_MIN_MAX_QP, p_param->min_qp_p |
+ (p_param->max_qp_p << 6) | (p_param->min_qp_b << 12) |
+ (p_param->max_qp_b << 18));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_BIT_RATIO_LAYER_0_3, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_RC_BIT_RATIO_LAYER_4_7, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_ROT_PARAM, rot_mir_mode);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_BG_PARAM, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_LAMBDA_ADDR, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CONF_WIN_TOP_BOT,
+ p_param->conf_win_bot << 16 | p_param->conf_win_top);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CONF_WIN_LEFT_RIGHT,
+ p_param->conf_win_right << 16 | p_param->conf_win_left);
+
+ if (inst->std == W_AVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INDEPENDENT_SLICE,
+ p_param->avc_slice_arg << 16 | p_param->avc_slice_mode);
+ else if (inst->std == W_HEVC_ENC)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_INDEPENDENT_SLICE,
+ p_param->independ_slice_mode_arg << 16 |
+ p_param->independ_slice_mode);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_USER_SCALING_LIST_ADDR, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_NUM_UNITS_IN_TICK, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_TIME_SCALE, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_NUM_TICKS_POC_DIFF_ONE, 0);
+
+ if (inst->std == W_HEVC_ENC) {
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_PU04, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_PU08, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_PU16, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_PU32, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_CU08, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_CU16, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_CUSTOM_MD_CU32, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_DEPENDENT_SLICE,
+ p_param->depend_slice_mode_arg << 16 | p_param->depend_slice_mode);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_NR_PARAM, 0);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_NR_WEIGHT,
+ p_param->nr_intra_weight_y |
+ (p_param->nr_intra_weight_cb << 5) |
+ (p_param->nr_intra_weight_cr << 10) |
+ (p_param->nr_inter_weight_y << 15) |
+ (p_param->nr_inter_weight_cb << 20) |
+ (p_param->nr_inter_weight_cr << 25));
+ }
+ vpu_write_reg(inst->dev, W5_CMD_ENC_SEQ_VUI_HRD_PARAM, 0);
+
+ return send_firmware_command(inst, W5_ENC_SET_PARAM, true, NULL, NULL);
+}
+
+int wave5_vpu_enc_get_seq_info(struct vpu_instance *inst, struct enc_initial_info *info)
+{
+ int ret;
+ u32 reg_val;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+
+ /* send QUERY cmd */
+ ret = wave5_send_query(inst->dev, inst, GET_RESULT);
+ if (ret)
+ return ret;
+
+ dev_dbg(inst->dev->dev, "%s: init seq\n", __func__);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_enc_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_enc_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ if (vpu_read_reg(inst->dev, W5_RET_ENC_ENCODING_SUCCESS) != 1) {
+ info->seq_init_err_reason = vpu_read_reg(inst->dev, W5_RET_ENC_ERR_INFO);
+ ret = -EIO;
+ } else {
+ info->warn_info = vpu_read_reg(inst->dev, W5_RET_ENC_WARN_INFO);
+ }
+
+ info->min_frame_buffer_count = vpu_read_reg(inst->dev, W5_RET_ENC_NUM_REQUIRED_FB);
+ info->min_src_frame_count = vpu_read_reg(inst->dev, W5_RET_ENC_MIN_SRC_BUF_NUM);
+ info->vlc_buf_size = vpu_read_reg(inst->dev, W5_RET_VLC_BUF_SIZE);
+ info->param_buf_size = vpu_read_reg(inst->dev, W5_RET_PARAM_BUF_SIZE);
+ p_enc_info->vlc_buf_size = info->vlc_buf_size;
+ p_enc_info->param_buf_size = info->param_buf_size;
+
+ return ret;
+}
+
+static u32 calculate_luma_stride(u32 width, u32 bit_depth)
+{
+ return ALIGN(ALIGN(width, 16) * ((bit_depth > 8) ? 5 : 4), 32);
+}
+
+static u32 calculate_chroma_stride(u32 width, u32 bit_depth)
+{
+ return ALIGN(ALIGN(width / 2, 16) * ((bit_depth > 8) ? 5 : 4), 32);
+}
+
+int wave5_vpu_enc_register_framebuffer(struct device *dev, struct vpu_instance *inst,
+ struct frame_buffer *fb_arr, enum tiled_map_type map_type,
+ unsigned int count)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ int ret = 0;
+ u32 stride;
+ u32 start_no, end_no;
+ size_t remain, idx, j, i, cnt_8_chunk;
+ u32 reg_val = 0, pic_size = 0, mv_col_size, fbc_y_tbl_size, fbc_c_tbl_size;
+ u32 sub_sampled_size = 0;
+ u32 luma_stride, chroma_stride;
+ u32 buf_height = 0, buf_width = 0;
+ u32 bit_depth;
+ bool avc_encoding = (inst->std == W_AVC_ENC);
+ struct vpu_buf vb_mv = {0};
+ struct vpu_buf vb_fbc_y_tbl = {0};
+ struct vpu_buf vb_fbc_c_tbl = {0};
+ struct vpu_buf vb_sub_sam_buf = {0};
+ struct vpu_buf vb_task = {0};
+ struct enc_open_param *p_open_param;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+
+ p_open_param = &p_enc_info->open_param;
+ mv_col_size = 0;
+ fbc_y_tbl_size = 0;
+ fbc_c_tbl_size = 0;
+ stride = p_enc_info->stride;
+ bit_depth = p_open_param->wave_param.internal_bit_depth;
+
+ if (avc_encoding) {
+ buf_width = ALIGN(p_open_param->pic_width, 16);
+ buf_height = ALIGN(p_open_param->pic_height, 16);
+
+ if ((p_enc_info->rotation_angle || p_enc_info->mirror_direction) &&
+ !(p_enc_info->rotation_angle == 180 &&
+ p_enc_info->mirror_direction == MIRDIR_HOR_VER)) {
+ buf_width = ALIGN(p_open_param->pic_width, 16);
+ buf_height = ALIGN(p_open_param->pic_height, 16);
+ }
+
+ if (p_enc_info->rotation_angle == 90 || p_enc_info->rotation_angle == 270) {
+ buf_width = ALIGN(p_open_param->pic_height, 16);
+ buf_height = ALIGN(p_open_param->pic_width, 16);
+ }
+ } else {
+ buf_width = ALIGN(p_open_param->pic_width, 8);
+ buf_height = ALIGN(p_open_param->pic_height, 8);
+
+ if ((p_enc_info->rotation_angle || p_enc_info->mirror_direction) &&
+ !(p_enc_info->rotation_angle == 180 &&
+ p_enc_info->mirror_direction == MIRDIR_HOR_VER)) {
+ buf_width = ALIGN(p_open_param->pic_width, 32);
+ buf_height = ALIGN(p_open_param->pic_height, 32);
+ }
+
+ if (p_enc_info->rotation_angle == 90 || p_enc_info->rotation_angle == 270) {
+ buf_width = ALIGN(p_open_param->pic_height, 32);
+ buf_height = ALIGN(p_open_param->pic_width, 32);
+ }
+ }
+
+ pic_size = (buf_width << 16) | buf_height;
+
+ if (avc_encoding) {
+ mv_col_size = WAVE5_ENC_AVC_BUF_SIZE(buf_width, buf_height);
+ vb_mv.daddr = 0;
+ vb_mv.size = ALIGN(mv_col_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ } else {
+ mv_col_size = WAVE5_ENC_HEVC_BUF_SIZE(buf_width, buf_height);
+ mv_col_size = ALIGN(mv_col_size, 16);
+ vb_mv.daddr = 0;
+ vb_mv.size = ALIGN(mv_col_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ }
+
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_mv);
+ if (ret)
+ return ret;
+
+ p_enc_info->vb_mv = vb_mv;
+
+ fbc_y_tbl_size = ALIGN(WAVE5_FBC_LUMA_TABLE_SIZE(buf_width, buf_height), 16);
+ fbc_c_tbl_size = ALIGN(WAVE5_FBC_CHROMA_TABLE_SIZE(buf_width, buf_height), 16);
+
+ vb_fbc_y_tbl.daddr = 0;
+ vb_fbc_y_tbl.size = ALIGN(fbc_y_tbl_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_fbc_y_tbl);
+ if (ret)
+ goto free_vb_fbc_y_tbl;
+
+ p_enc_info->vb_fbc_y_tbl = vb_fbc_y_tbl;
+
+ vb_fbc_c_tbl.daddr = 0;
+ vb_fbc_c_tbl.size = ALIGN(fbc_c_tbl_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_fbc_c_tbl);
+ if (ret)
+ goto free_vb_fbc_c_tbl;
+
+ p_enc_info->vb_fbc_c_tbl = vb_fbc_c_tbl;
+
+ if (avc_encoding)
+ sub_sampled_size = WAVE5_SUBSAMPLED_ONE_SIZE_AVC(buf_width, buf_height);
+ else
+ sub_sampled_size = WAVE5_SUBSAMPLED_ONE_SIZE(buf_width, buf_height);
+ vb_sub_sam_buf.size = ALIGN(sub_sampled_size * count, BUFFER_MARGIN) + BUFFER_MARGIN;
+ vb_sub_sam_buf.daddr = 0;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_sub_sam_buf);
+ if (ret)
+ goto free_vb_sam_buf;
+
+ p_enc_info->vb_sub_sam_buf = vb_sub_sam_buf;
+
+ vb_task.size = (p_enc_info->vlc_buf_size * VLC_BUF_NUM) +
+ (p_enc_info->param_buf_size * COMMAND_QUEUE_DEPTH);
+ vb_task.daddr = 0;
+ if (p_enc_info->vb_task.size == 0) {
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_task);
+ if (ret)
+ goto free_vb_task;
+
+ p_enc_info->vb_task = vb_task;
+
+ vpu_write_reg(inst->dev, W5_CMD_SET_FB_ADDR_TASK_BUF,
+ p_enc_info->vb_task.daddr);
+ vpu_write_reg(inst->dev, W5_CMD_SET_FB_TASK_BUF_SIZE, vb_task.size);
+ }
+
+ /* set sub-sampled buffer base addr */
+ vpu_write_reg(inst->dev, W5_ADDR_SUB_SAMPLED_FB_BASE, vb_sub_sam_buf.daddr);
+ /* set sub-sampled buffer size for one frame */
+ vpu_write_reg(inst->dev, W5_SUB_SAMPLED_ONE_FB_SIZE, sub_sampled_size);
+
+ vpu_write_reg(inst->dev, W5_PIC_SIZE, pic_size);
+
+ /* set stride of luma/chroma for compressed buffer */
+ if ((p_enc_info->rotation_angle || p_enc_info->mirror_direction) &&
+ !(p_enc_info->rotation_angle == 180 &&
+ p_enc_info->mirror_direction == MIRDIR_HOR_VER)) {
+ luma_stride = calculate_luma_stride(buf_width, bit_depth);
+ chroma_stride = calculate_chroma_stride(buf_width / 2, bit_depth);
+ } else {
+ luma_stride = calculate_luma_stride(p_open_param->pic_width, bit_depth);
+ chroma_stride = calculate_chroma_stride(p_open_param->pic_width / 2, bit_depth);
+ }
+
+ vpu_write_reg(inst->dev, W5_FBC_STRIDE, luma_stride << 16 | chroma_stride);
+ vpu_write_reg(inst->dev, W5_COMMON_PIC_INFO, stride);
+
+ remain = count;
+ cnt_8_chunk = DIV_ROUND_UP(count, 8);
+ idx = 0;
+ for (j = 0; j < cnt_8_chunk; j++) {
+ reg_val = (j == cnt_8_chunk - 1) << 4 | ((j == 0) << 3);
+ vpu_write_reg(inst->dev, W5_SFB_OPTION, reg_val);
+ start_no = j * 8;
+ end_no = start_no + ((remain >= 8) ? 8 : remain) - 1;
+
+ vpu_write_reg(inst->dev, W5_SET_FB_NUM, (start_no << 8) | end_no);
+
+ for (i = 0; i < 8 && i < remain; i++) {
+ vpu_write_reg(inst->dev, W5_ADDR_LUMA_BASE0 + (i << 4), fb_arr[i +
+ start_no].buf_y);
+ vpu_write_reg(inst->dev, W5_ADDR_CB_BASE0 + (i << 4),
+ fb_arr[i + start_no].buf_cb);
+ /* luma FBC offset table */
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_Y_OFFSET0 + (i << 4),
+ vb_fbc_y_tbl.daddr + idx * fbc_y_tbl_size);
+ /* chroma FBC offset table */
+ vpu_write_reg(inst->dev, W5_ADDR_FBC_C_OFFSET0 + (i << 4),
+ vb_fbc_c_tbl.daddr + idx * fbc_c_tbl_size);
+
+ vpu_write_reg(inst->dev, W5_ADDR_MV_COL0 + (i << 2),
+ vb_mv.daddr + idx * mv_col_size);
+ idx++;
+ }
+ remain -= i;
+
+ ret = send_firmware_command(inst, W5_SET_FB, false, NULL, NULL);
+ if (ret)
+ goto free_vb_mem;
+ }
+
+ ret = wave5_vpu_firmware_command_queue_error_check(vpu_dev, NULL);
+ if (ret)
+ goto free_vb_mem;
+
+ return ret;
+
+free_vb_mem:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_task);
+free_vb_task:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_sub_sam_buf);
+free_vb_sam_buf:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_fbc_c_tbl);
+free_vb_fbc_c_tbl:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_fbc_y_tbl);
+free_vb_fbc_y_tbl:
+ wave5_vdi_free_dma_memory(vpu_dev, &vb_mv);
+ return ret;
+}
+
+int wave5_vpu_encode(struct vpu_instance *inst, struct enc_param *option, u32 *fail_res)
+{
+ u32 src_frame_format;
+ u32 reg_val = 0;
+ u32 src_stride_c = 0;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ struct frame_buffer *p_src_frame = option->source_frame;
+ struct enc_open_param *p_open_param = &p_enc_info->open_param;
+ bool justified = WTL_RIGHT_JUSTIFIED;
+ u32 format_no = WTL_PIXEL_8BIT;
+ int ret;
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_BS_START_ADDR, option->pic_stream_buffer_addr);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_BS_SIZE, option->pic_stream_buffer_size);
+ p_enc_info->stream_buf_start_addr = option->pic_stream_buffer_addr;
+ p_enc_info->stream_buf_size = option->pic_stream_buffer_size;
+ p_enc_info->stream_buf_end_addr =
+ option->pic_stream_buffer_addr + option->pic_stream_buffer_size;
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_AXI_SEL, DEFAULT_SRC_AXI);
+ /* secondary AXI */
+ reg_val = (p_enc_info->sec_axi_info.use_enc_rdo_enable << 11) |
+ (p_enc_info->sec_axi_info.use_enc_lf_enable << 15);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_USE_SEC_AXI, reg_val);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_REPORT_PARAM, 0);
+
+ /*
+ * CODEOPT_ENC_VCL is used to implicitly encode header/headers to generate bitstream.
+ * (use ENC_PUT_VIDEO_HEADER for give_command to encode only a header)
+ */
+ if (option->code_option.implicit_header_encode)
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_CODE_OPTION,
+ CODEOPT_ENC_HEADER_IMPLICIT | CODEOPT_ENC_VCL |
+ (option->code_option.encode_aud << 5) |
+ (option->code_option.encode_eos << 6) |
+ (option->code_option.encode_eob << 7));
+ else
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_CODE_OPTION,
+ option->code_option.implicit_header_encode |
+ (option->code_option.encode_vcl << 1) |
+ (option->code_option.encode_vps << 2) |
+ (option->code_option.encode_sps << 3) |
+ (option->code_option.encode_pps << 4) |
+ (option->code_option.encode_aud << 5) |
+ (option->code_option.encode_eos << 6) |
+ (option->code_option.encode_eob << 7));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_PIC_PARAM, 0);
+
+ if (option->src_end_flag)
+ /* no more source images. */
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_PIC_IDX, 0xFFFFFFFF);
+ else
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_PIC_IDX, option->src_idx);
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_ADDR_Y, p_src_frame->buf_y);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_ADDR_U, p_src_frame->buf_cb);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_ADDR_V, p_src_frame->buf_cr);
+
+ switch (p_open_param->src_format) {
+ case FORMAT_420:
+ case FORMAT_422:
+ case FORMAT_YUYV:
+ case FORMAT_YVYU:
+ case FORMAT_UYVY:
+ case FORMAT_VYUY:
+ justified = WTL_LEFT_JUSTIFIED;
+ format_no = WTL_PIXEL_8BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ (p_src_frame->stride / 2);
+ src_stride_c = (p_open_param->src_format == FORMAT_422) ? src_stride_c * 2 :
+ src_stride_c;
+ break;
+ case FORMAT_420_P10_16BIT_MSB:
+ case FORMAT_422_P10_16BIT_MSB:
+ case FORMAT_YUYV_P10_16BIT_MSB:
+ case FORMAT_YVYU_P10_16BIT_MSB:
+ case FORMAT_UYVY_P10_16BIT_MSB:
+ case FORMAT_VYUY_P10_16BIT_MSB:
+ justified = WTL_RIGHT_JUSTIFIED;
+ format_no = WTL_PIXEL_16BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ (p_src_frame->stride / 2);
+ src_stride_c = (p_open_param->src_format ==
+ FORMAT_422_P10_16BIT_MSB) ? src_stride_c * 2 : src_stride_c;
+ break;
+ case FORMAT_420_P10_16BIT_LSB:
+ case FORMAT_422_P10_16BIT_LSB:
+ case FORMAT_YUYV_P10_16BIT_LSB:
+ case FORMAT_YVYU_P10_16BIT_LSB:
+ case FORMAT_UYVY_P10_16BIT_LSB:
+ case FORMAT_VYUY_P10_16BIT_LSB:
+ justified = WTL_LEFT_JUSTIFIED;
+ format_no = WTL_PIXEL_16BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ (p_src_frame->stride / 2);
+ src_stride_c = (p_open_param->src_format ==
+ FORMAT_422_P10_16BIT_LSB) ? src_stride_c * 2 : src_stride_c;
+ break;
+ case FORMAT_420_P10_32BIT_MSB:
+ case FORMAT_422_P10_32BIT_MSB:
+ case FORMAT_YUYV_P10_32BIT_MSB:
+ case FORMAT_YVYU_P10_32BIT_MSB:
+ case FORMAT_UYVY_P10_32BIT_MSB:
+ case FORMAT_VYUY_P10_32BIT_MSB:
+ justified = WTL_RIGHT_JUSTIFIED;
+ format_no = WTL_PIXEL_32BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ ALIGN(p_src_frame->stride / 2, 16) * BIT(inst->cbcr_interleave);
+ src_stride_c = (p_open_param->src_format ==
+ FORMAT_422_P10_32BIT_MSB) ? src_stride_c * 2 : src_stride_c;
+ break;
+ case FORMAT_420_P10_32BIT_LSB:
+ case FORMAT_422_P10_32BIT_LSB:
+ case FORMAT_YUYV_P10_32BIT_LSB:
+ case FORMAT_YVYU_P10_32BIT_LSB:
+ case FORMAT_UYVY_P10_32BIT_LSB:
+ case FORMAT_VYUY_P10_32BIT_LSB:
+ justified = WTL_LEFT_JUSTIFIED;
+ format_no = WTL_PIXEL_32BIT;
+ src_stride_c = inst->cbcr_interleave ? p_src_frame->stride :
+ ALIGN(p_src_frame->stride / 2, 16) * BIT(inst->cbcr_interleave);
+ src_stride_c = (p_open_param->src_format ==
+ FORMAT_422_P10_32BIT_LSB) ? src_stride_c * 2 : src_stride_c;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ src_frame_format = (inst->cbcr_interleave << 1) | (inst->nv21);
+ switch (p_open_param->packed_format) {
+ case PACKED_YUYV:
+ src_frame_format = 4;
+ break;
+ case PACKED_YVYU:
+ src_frame_format = 5;
+ break;
+ case PACKED_UYVY:
+ src_frame_format = 6;
+ break;
+ case PACKED_VYUY:
+ src_frame_format = 7;
+ break;
+ default:
+ break;
+ }
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_STRIDE,
+ (p_src_frame->stride << 16) | src_stride_c);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SRC_FORMAT, src_frame_format |
+ (format_no << 3) | (justified << 5) | (PIC_SRC_ENDIANNESS_BIG_ENDIAN << 6));
+
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_CUSTOM_MAP_OPTION_ADDR, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_CUSTOM_MAP_OPTION_PARAM, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_LONGTERM_PIC, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_WP_PIXEL_SIGMA_Y, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_WP_PIXEL_SIGMA_C, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_WP_PIXEL_MEAN_Y, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_WP_PIXEL_MEAN_C, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_PREFIX_SEI_INFO, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_PREFIX_SEI_NAL_ADDR, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SUFFIX_SEI_INFO, 0);
+ vpu_write_reg(inst->dev, W5_CMD_ENC_PIC_SUFFIX_SEI_NAL_ADDR, 0);
+
+ ret = send_firmware_command(inst, W5_DEC_ENC_PIC, true, &reg_val, fail_res);
+ if (ret == -ETIMEDOUT)
+ return ret;
+
+ p_enc_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_enc_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int wave5_vpu_enc_get_result(struct vpu_instance *inst, struct enc_output_info *result)
+{
+ int ret;
+ u32 encoding_success;
+ u32 reg_val;
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = wave5_send_query(inst->dev, inst, GET_RESULT);
+ if (ret)
+ return ret;
+
+ dev_dbg(inst->dev->dev, "%s: enc pic complete\n", __func__);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_QUEUE_STATUS);
+
+ p_enc_info->instance_queue_count = (reg_val >> 16) & 0xff;
+ p_enc_info->report_queue_count = (reg_val & QUEUE_REPORT_MASK);
+
+ encoding_success = vpu_read_reg(inst->dev, W5_RET_ENC_ENCODING_SUCCESS);
+ if (!encoding_success) {
+ result->error_reason = vpu_read_reg(inst->dev, W5_RET_ENC_ERR_INFO);
+ return -EIO;
+ }
+
+ result->warn_info = vpu_read_reg(inst->dev, W5_RET_ENC_WARN_INFO);
+
+ reg_val = vpu_read_reg(inst->dev, W5_RET_ENC_PIC_TYPE);
+ result->pic_type = reg_val & 0xFFFF;
+
+ result->enc_vcl_nut = vpu_read_reg(inst->dev, W5_RET_ENC_VCL_NUT);
+ /*
+ * To get the reconstructed frame use the following index on
+ * inst->frame_buf
+ */
+ result->recon_frame_index = vpu_read_reg(inst->dev, W5_RET_ENC_PIC_IDX);
+ result->enc_pic_byte = vpu_read_reg(inst->dev, W5_RET_ENC_PIC_BYTE);
+ result->enc_src_idx = vpu_read_reg(inst->dev, W5_RET_ENC_USED_SRC_IDX);
+ p_enc_info->stream_wr_ptr = vpu_read_reg(inst->dev, W5_RET_ENC_WR_PTR);
+ p_enc_info->stream_rd_ptr = vpu_read_reg(inst->dev, W5_RET_ENC_RD_PTR);
+
+ result->bitstream_buffer = vpu_read_reg(inst->dev, W5_RET_ENC_RD_PTR);
+ result->rd_ptr = p_enc_info->stream_rd_ptr;
+ result->wr_ptr = p_enc_info->stream_wr_ptr;
+
+ /*result for header only(no vcl) encoding */
+ if (result->recon_frame_index == RECON_IDX_FLAG_HEADER_ONLY)
+ result->bitstream_size = result->enc_pic_byte;
+ else if (result->recon_frame_index < 0)
+ result->bitstream_size = 0;
+ else
+ result->bitstream_size = result->enc_pic_byte;
+
+ result->enc_host_cmd_tick = vpu_read_reg(inst->dev, W5_RET_ENC_HOST_CMD_TICK);
+ result->enc_encode_end_tick = vpu_read_reg(inst->dev, W5_RET_ENC_ENCODING_END_TICK);
+
+ if (!p_enc_info->first_cycle_check) {
+ result->frame_cycle = (result->enc_encode_end_tick - result->enc_host_cmd_tick) *
+ p_enc_info->cycle_per_tick;
+ p_enc_info->first_cycle_check = true;
+ } else {
+ result->frame_cycle =
+ (result->enc_encode_end_tick - vpu_dev->last_performance_cycles) *
+ p_enc_info->cycle_per_tick;
+ if (vpu_dev->last_performance_cycles < result->enc_host_cmd_tick)
+ result->frame_cycle = (result->enc_encode_end_tick -
+ result->enc_host_cmd_tick) * p_enc_info->cycle_per_tick;
+ }
+ vpu_dev->last_performance_cycles = result->enc_encode_end_tick;
+
+ return 0;
+}
+
+int wave5_vpu_enc_finish_seq(struct vpu_instance *inst, u32 *fail_res)
+{
+ return send_firmware_command(inst, W5_DESTROY_INSTANCE, true, NULL, fail_res);
+}
+
+static bool wave5_vpu_enc_check_common_param_valid(struct vpu_instance *inst,
+ struct enc_open_param *open_param)
+{
+ bool low_delay = true;
+ struct enc_wave_param *param = &open_param->wave_param;
+ struct vpu_device *vpu_dev = inst->dev;
+ struct device *dev = vpu_dev->dev;
+ u32 num_ctu_row = (open_param->pic_height + 64 - 1) / 64;
+ u32 num_ctu_col = (open_param->pic_width + 64 - 1) / 64;
+ u32 ctu_sz = num_ctu_col * num_ctu_row;
+
+ if (inst->std == W_HEVC_ENC && low_delay &&
+ param->decoding_refresh_type == DEC_REFRESH_TYPE_CRA) {
+ dev_warn(dev,
+ "dec_refresh_type(CRA) shouldn't be used together with low delay GOP\n");
+ dev_warn(dev, "Suggested configuration parameter: decoding refresh type (IDR)\n");
+ param->decoding_refresh_type = 2;
+ }
+
+ if (param->wpp_enable && param->independ_slice_mode) {
+ unsigned int num_ctb_in_width = ALIGN(open_param->pic_width, 64) >> 6;
+
+ if (param->independ_slice_mode_arg % num_ctb_in_width) {
+ dev_err(dev, "independ_slice_mode_arg %u must be a multiple of %u\n",
+ param->independ_slice_mode_arg, num_ctb_in_width);
+ return false;
+ }
+ }
+
+ /* multi-slice & wpp */
+ if (param->wpp_enable && param->depend_slice_mode) {
+ dev_err(dev, "wpp_enable && depend_slice_mode cannot be used simultaneously\n");
+ return false;
+ }
+
+ if (!param->independ_slice_mode && param->depend_slice_mode) {
+ dev_err(dev, "depend_slice_mode requires independ_slice_mode\n");
+ return false;
+ } else if (param->independ_slice_mode &&
+ param->depend_slice_mode == DEPEND_SLICE_MODE_RECOMMENDED &&
+ param->independ_slice_mode_arg < param->depend_slice_mode_arg) {
+ dev_err(dev, "independ_slice_mode_arg: %u must be smaller than %u\n",
+ param->independ_slice_mode_arg, param->depend_slice_mode_arg);
+ return false;
+ }
+
+ if (param->independ_slice_mode && param->independ_slice_mode_arg > 65535) {
+ dev_err(dev, "independ_slice_mode_arg: %u must be smaller than 65535\n",
+ param->independ_slice_mode_arg);
+ return false;
+ }
+
+ if (param->depend_slice_mode && param->depend_slice_mode_arg > 65535) {
+ dev_err(dev, "depend_slice_mode_arg: %u must be smaller than 65535\n",
+ param->depend_slice_mode_arg);
+ return false;
+ }
+
+ if (param->conf_win_top % 2) {
+ dev_err(dev, "conf_win_top: %u, must be a multiple of 2\n", param->conf_win_top);
+ return false;
+ }
+
+ if (param->conf_win_bot % 2) {
+ dev_err(dev, "conf_win_bot: %u, must be a multiple of 2\n", param->conf_win_bot);
+ return false;
+ }
+
+ if (param->conf_win_left % 2) {
+ dev_err(dev, "conf_win_left: %u, must be a multiple of 2\n", param->conf_win_left);
+ return false;
+ }
+
+ if (param->conf_win_right % 2) {
+ dev_err(dev, "conf_win_right: %u, Must be a multiple of 2\n",
+ param->conf_win_right);
+ return false;
+ }
+
+ if (param->lossless_enable && open_param->rc_enable) {
+ dev_err(dev, "option rate_control cannot be used with lossless_coding\n");
+ return false;
+ }
+
+ if (param->lossless_enable && !param->skip_intra_trans) {
+ dev_err(dev, "option intra_trans_skip must be enabled with lossless_coding\n");
+ return false;
+ }
+
+ /* intra refresh */
+ if (param->intra_refresh_mode && param->intra_refresh_arg == 0) {
+ dev_err(dev, "Invalid refresh argument, mode: %u, refresh: %u must be > 0\n",
+ param->intra_refresh_mode, param->intra_refresh_arg);
+ return false;
+ }
+ switch (param->intra_refresh_mode) {
+ case REFRESH_MODE_CTU_ROWS:
+ if (param->intra_mb_refresh_arg > num_ctu_row)
+ goto invalid_refresh_argument;
+ break;
+ case REFRESH_MODE_CTU_COLUMNS:
+ if (param->intra_refresh_arg > num_ctu_col)
+ goto invalid_refresh_argument;
+ break;
+ case REFRESH_MODE_CTU_STEP_SIZE:
+ if (param->intra_refresh_arg > ctu_sz)
+ goto invalid_refresh_argument;
+ break;
+ case REFRESH_MODE_CTUS:
+ if (param->intra_refresh_arg > ctu_sz)
+ goto invalid_refresh_argument;
+ if (param->lossless_enable) {
+ dev_err(dev, "mode: %u cannot be used lossless_enable",
+ param->intra_refresh_mode);
+ return false;
+ }
+ };
+ return true;
+
+invalid_refresh_argument:
+ dev_err(dev, "Invalid refresh argument, mode: %u, refresh: %u > W(%u)xH(%u)\n",
+ param->intra_refresh_mode, param->intra_refresh_arg,
+ num_ctu_row, num_ctu_col);
+ return false;
+}
+
+static bool wave5_vpu_enc_check_param_valid(struct vpu_device *vpu_dev,
+ struct enc_open_param *open_param)
+{
+ struct enc_wave_param *param = &open_param->wave_param;
+
+ if (open_param->rc_enable) {
+ if (param->min_qp_i > param->max_qp_i || param->min_qp_p > param->max_qp_p ||
+ param->min_qp_b > param->max_qp_b) {
+ dev_err(vpu_dev->dev, "Configuration failed because min_qp is greater than max_qp\n");
+ dev_err(vpu_dev->dev, "Suggested configuration parameters: min_qp = max_qp\n");
+ return false;
+ }
+
+ if (open_param->bit_rate <= (int)open_param->frame_rate_info) {
+ dev_err(vpu_dev->dev,
+ "enc_bit_rate: %u must be greater than the frame_rate: %u\n",
+ open_param->bit_rate, (int)open_param->frame_rate_info);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int wave5_vpu_enc_check_open_param(struct vpu_instance *inst, struct enc_open_param *open_param)
+{
+ u32 pic_width;
+ u32 pic_height;
+ s32 product_id = inst->dev->product;
+ struct vpu_attr *p_attr = &inst->dev->attr;
+ struct enc_wave_param *param;
+
+ if (!open_param)
+ return -EINVAL;
+
+ param = &open_param->wave_param;
+ pic_width = open_param->pic_width;
+ pic_height = open_param->pic_height;
+
+ if (inst->id >= MAX_NUM_INSTANCE) {
+ dev_err(inst->dev->dev, "Too many simultaneous instances: %d (max: %u)\n",
+ inst->id, MAX_NUM_INSTANCE);
+ return -EOPNOTSUPP;
+ }
+
+ if (inst->std != W_HEVC_ENC &&
+ !(inst->std == W_AVC_ENC && product_id == PRODUCT_ID_521)) {
+ dev_err(inst->dev->dev, "Unsupported encoder-codec & product combination\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (param->internal_bit_depth == 10) {
+ if (inst->std == W_HEVC_ENC && !p_attr->support_hevc10bit_enc) {
+ dev_err(inst->dev->dev,
+ "Flag support_hevc10bit_enc must be set to encode 10bit HEVC\n");
+ return -EOPNOTSUPP;
+ } else if (inst->std == W_AVC_ENC && !p_attr->support_avc10bit_enc) {
+ dev_err(inst->dev->dev,
+ "Flag support_avc10bit_enc must be set to encode 10bit AVC\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!open_param->frame_rate_info) {
+ dev_err(inst->dev->dev, "No frame rate information.\n");
+ return -EINVAL;
+ }
+
+ if (open_param->bit_rate > MAX_BIT_RATE) {
+ dev_err(inst->dev->dev, "Invalid encoding bit-rate: %u (valid: 0-%u)\n",
+ open_param->bit_rate, MAX_BIT_RATE);
+ return -EINVAL;
+ }
+
+ if (pic_width < W5_MIN_ENC_PIC_WIDTH || pic_width > W5_MAX_ENC_PIC_WIDTH ||
+ pic_height < W5_MIN_ENC_PIC_HEIGHT || pic_height > W5_MAX_ENC_PIC_HEIGHT) {
+ dev_err(inst->dev->dev, "Invalid encoding dimension: %ux%u\n",
+ pic_width, pic_height);
+ return -EINVAL;
+ }
+
+ if (param->profile) {
+ if (inst->std == W_HEVC_ENC) {
+ if ((param->profile != HEVC_PROFILE_MAIN ||
+ (param->profile == HEVC_PROFILE_MAIN &&
+ param->internal_bit_depth > 8)) &&
+ (param->profile != HEVC_PROFILE_MAIN10 ||
+ (param->profile == HEVC_PROFILE_MAIN10 &&
+ param->internal_bit_depth < 10)) &&
+ param->profile != HEVC_PROFILE_STILLPICTURE) {
+ dev_err(inst->dev->dev,
+ "Invalid HEVC encoding profile: %u (bit-depth: %u)\n",
+ param->profile, param->internal_bit_depth);
+ return -EINVAL;
+ }
+ } else if (inst->std == W_AVC_ENC) {
+ if ((param->internal_bit_depth > 8 &&
+ param->profile != H264_PROFILE_HIGH10)) {
+ dev_err(inst->dev->dev,
+ "Invalid AVC encoding profile: %u (bit-depth: %u)\n",
+ param->profile, param->internal_bit_depth);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (param->decoding_refresh_type > DEC_REFRESH_TYPE_IDR) {
+ dev_err(inst->dev->dev, "Invalid decoding refresh type: %u (valid: 0-2)\n",
+ param->decoding_refresh_type);
+ return -EINVAL;
+ }
+
+ if (param->intra_refresh_mode > REFRESH_MODE_CTUS) {
+ dev_err(inst->dev->dev, "Invalid intra refresh mode: %d (valid: 0-4)\n",
+ param->intra_refresh_mode);
+ return -EINVAL;
+ }
+
+ if (inst->std == W_HEVC_ENC && param->independ_slice_mode &&
+ param->depend_slice_mode > DEPEND_SLICE_MODE_BOOST) {
+ dev_err(inst->dev->dev,
+ "Can't combine slice modes: independent and fast dependent for HEVC\n");
+ return -EINVAL;
+ }
+
+ if (!param->disable_deblk) {
+ if (param->beta_offset_div2 < -6 || param->beta_offset_div2 > 6) {
+ dev_err(inst->dev->dev, "Invalid beta offset: %d (valid: -6-6)\n",
+ param->beta_offset_div2);
+ return -EINVAL;
+ }
+
+ if (param->tc_offset_div2 < -6 || param->tc_offset_div2 > 6) {
+ dev_err(inst->dev->dev, "Invalid tc offset: %d (valid: -6-6)\n",
+ param->tc_offset_div2);
+ return -EINVAL;
+ }
+ }
+
+ if (param->intra_qp > MAX_INTRA_QP) {
+ dev_err(inst->dev->dev,
+ "Invalid intra quantization parameter: %u (valid: 0-%u)\n",
+ param->intra_qp, MAX_INTRA_QP);
+ return -EINVAL;
+ }
+
+ if (open_param->rc_enable) {
+ if (param->min_qp_i > MAX_INTRA_QP || param->max_qp_i > MAX_INTRA_QP ||
+ param->min_qp_p > MAX_INTRA_QP || param->max_qp_p > MAX_INTRA_QP ||
+ param->min_qp_b > MAX_INTRA_QP || param->max_qp_b > MAX_INTRA_QP) {
+ dev_err(inst->dev->dev,
+ "Invalid quantization parameter min/max values: "
+ "I: %u-%u, P: %u-%u, B: %u-%u (valid for each: 0-%u)\n",
+ param->min_qp_i, param->max_qp_i, param->min_qp_p, param->max_qp_p,
+ param->min_qp_b, param->max_qp_b, MAX_INTRA_QP);
+ return -EINVAL;
+ }
+
+ if (param->hvs_qp_enable && param->hvs_max_delta_qp > MAX_HVS_MAX_DELTA_QP) {
+ dev_err(inst->dev->dev,
+ "Invalid HVS max delta quantization parameter: %u (valid: 0-%u)\n",
+ param->hvs_max_delta_qp, MAX_HVS_MAX_DELTA_QP);
+ return -EINVAL;
+ }
+
+ if (open_param->vbv_buffer_size < MIN_VBV_BUFFER_SIZE ||
+ open_param->vbv_buffer_size > MAX_VBV_BUFFER_SIZE) {
+ dev_err(inst->dev->dev, "VBV buffer size: %u (valid: %u-%u)\n",
+ open_param->vbv_buffer_size, MIN_VBV_BUFFER_SIZE,
+ MAX_VBV_BUFFER_SIZE);
+ return -EINVAL;
+ }
+ }
+
+ if (!wave5_vpu_enc_check_common_param_valid(inst, open_param))
+ return -EINVAL;
+
+ if (!wave5_vpu_enc_check_param_valid(inst->dev, open_param))
+ return -EINVAL;
+
+ if (param->chroma_cb_qp_offset < -12 || param->chroma_cb_qp_offset > 12) {
+ dev_err(inst->dev->dev,
+ "Invalid chroma Cb quantization parameter offset: %d (valid: -12-12)\n",
+ param->chroma_cb_qp_offset);
+ return -EINVAL;
+ }
+
+ if (param->chroma_cr_qp_offset < -12 || param->chroma_cr_qp_offset > 12) {
+ dev_err(inst->dev->dev,
+ "Invalid chroma Cr quantization parameter offset: %d (valid: -12-12)\n",
+ param->chroma_cr_qp_offset);
+ return -EINVAL;
+ }
+
+ if (param->intra_refresh_mode == REFRESH_MODE_CTU_STEP_SIZE && !param->intra_refresh_arg) {
+ dev_err(inst->dev->dev,
+ "Intra refresh mode CTU step-size requires an argument\n");
+ return -EINVAL;
+ }
+
+ if (inst->std == W_HEVC_ENC) {
+ if (param->nr_intra_weight_y > MAX_INTRA_WEIGHT ||
+ param->nr_intra_weight_cb > MAX_INTRA_WEIGHT ||
+ param->nr_intra_weight_cr > MAX_INTRA_WEIGHT) {
+ dev_err(inst->dev->dev,
+ "Invalid intra weight Y(%u) Cb(%u) Cr(%u) (valid: %u)\n",
+ param->nr_intra_weight_y, param->nr_intra_weight_cb,
+ param->nr_intra_weight_cr, MAX_INTRA_WEIGHT);
+ return -EINVAL;
+ }
+
+ if (param->nr_inter_weight_y > MAX_INTER_WEIGHT ||
+ param->nr_inter_weight_cb > MAX_INTER_WEIGHT ||
+ param->nr_inter_weight_cr > MAX_INTER_WEIGHT) {
+ dev_err(inst->dev->dev,
+ "Invalid inter weight Y(%u) Cb(%u) Cr(%u) (valid: %u)\n",
+ param->nr_inter_weight_y, param->nr_inter_weight_cb,
+ param->nr_inter_weight_cr, MAX_INTER_WEIGHT);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-regdefine.h b/drivers/media/platform/chips-media/wave5/wave5-regdefine.h
new file mode 100644
index 000000000..a15c6b2c3
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-regdefine.h
@@ -0,0 +1,732 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - wave5 register definitions
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#ifndef __WAVE5_REGISTER_DEFINE_H__
+#define __WAVE5_REGISTER_DEFINE_H__
+
+enum W5_VPU_COMMAND {
+ W5_INIT_VPU = 0x0001,
+ W5_WAKEUP_VPU = 0x0002,
+ W5_SLEEP_VPU = 0x0004,
+ W5_CREATE_INSTANCE = 0x0008, /* queuing command */
+ W5_FLUSH_INSTANCE = 0x0010,
+ W5_DESTROY_INSTANCE = 0x0020, /* queuing command */
+ W5_INIT_SEQ = 0x0040, /* queuing command */
+ W5_SET_FB = 0x0080,
+ W5_DEC_ENC_PIC = 0x0100, /* queuing command */
+ W5_ENC_SET_PARAM = 0x0200, /* queuing command */
+ W5_QUERY = 0x4000,
+ W5_UPDATE_BS = 0x8000,
+ W5_MAX_VPU_COMD = 0x10000,
+};
+
+enum query_opt {
+ GET_VPU_INFO = 0,
+ SET_WRITE_PROT = 1,
+ GET_RESULT = 2,
+ UPDATE_DISP_FLAG = 3,
+ GET_BW_REPORT = 4,
+ GET_BS_RD_PTR = 5, /* for decoder */
+ GET_BS_WR_PTR = 6, /* for encoder */
+ GET_SRC_BUF_FLAG = 7, /* for encoder */
+ SET_BS_RD_PTR = 8, /* for decoder */
+ GET_DEBUG_INFO = 0x61,
+};
+
+#define W5_REG_BASE 0x00000000
+#define W5_CMD_REG_BASE 0x00000100
+#define W5_CMD_REG_END 0x00000200
+
+/*
+ * COMMON
+ *
+ * ----
+ *
+ * Power on configuration
+ * PO_DEBUG_MODE [0] 1 - power on with debug mode
+ * USE_PO_CONF [3] 1 - use power-on-configuration
+ */
+#define W5_PO_CONF (W5_REG_BASE + 0x0000)
+#define W5_VCPU_CUR_PC (W5_REG_BASE + 0x0004)
+#define W5_VCPU_CUR_LR (W5_REG_BASE + 0x0008)
+#define W5_VPU_PDBG_STEP_MASK_V (W5_REG_BASE + 0x000C)
+#define W5_VPU_PDBG_CTRL (W5_REG_BASE + 0x0010) /* v_cpu debugger ctrl register */
+#define W5_VPU_PDBG_IDX_REG (W5_REG_BASE + 0x0014) /* v_cpu debugger index register */
+#define W5_VPU_PDBG_WDATA_REG (W5_REG_BASE + 0x0018) /* v_cpu debugger write data reg */
+#define W5_VPU_PDBG_RDATA_REG (W5_REG_BASE + 0x001C) /* v_cpu debugger read data reg */
+
+#define W5_VPU_FIO_CTRL_ADDR (W5_REG_BASE + 0x0020)
+#define W5_VPU_FIO_DATA (W5_REG_BASE + 0x0024)
+#define W5_VPU_VINT_REASON_USR (W5_REG_BASE + 0x0030)
+#define W5_VPU_VINT_REASON_CLR (W5_REG_BASE + 0x0034)
+#define W5_VPU_HOST_INT_REQ (W5_REG_BASE + 0x0038)
+#define W5_VPU_VINT_CLEAR (W5_REG_BASE + 0x003C)
+#define W5_VPU_HINT_CLEAR (W5_REG_BASE + 0x0040)
+#define W5_VPU_VPU_INT_STS (W5_REG_BASE + 0x0044)
+#define W5_VPU_VINT_ENABLE (W5_REG_BASE + 0x0048)
+#define W5_VPU_VINT_REASON (W5_REG_BASE + 0x004C)
+#define W5_VPU_RESET_REQ (W5_REG_BASE + 0x0050)
+#define W5_RST_BLOCK_CCLK(_core) BIT((_core))
+#define W5_RST_BLOCK_CCLK_ALL (0xff)
+#define W5_RST_BLOCK_BCLK(_core) (0x100 << (_core))
+#define W5_RST_BLOCK_BCLK_ALL (0xff00)
+#define W5_RST_BLOCK_ACLK(_core) (0x10000 << (_core))
+#define W5_RST_BLOCK_ACLK_ALL (0xff0000)
+#define W5_RST_BLOCK_VCPU_ALL (0x3f000000)
+#define W5_RST_BLOCK_ALL (0x3fffffff)
+#define W5_VPU_RESET_STATUS (W5_REG_BASE + 0x0054)
+
+#define W5_VCPU_RESTART (W5_REG_BASE + 0x0058)
+#define W5_VPU_CLK_MASK (W5_REG_BASE + 0x005C)
+
+/* REMAP_CTRL
+ * PAGE SIZE: [8:0] 0x001 - 4K
+ * 0x002 - 8K
+ * 0x004 - 16K
+ * ...
+ * 0x100 - 1M
+ * REGION ATTR1 [10] 0 - normal
+ * 1 - make bus error for the region
+ * REGION ATTR2 [11] 0 - normal
+ * 1 - bypass region
+ * REMAP INDEX [15:12] - 0 ~ 3
+ * ENDIAN [19:16] - NOTE: Currently not supported in this driver
+ * AXI-ID [23:20] - upper AXI-ID
+ * BUS_ERROR [29] 0 - bypass
+ * 1 - make BUS_ERROR for unmapped region
+ * BYPASS_ALL [30] 1 - bypass all
+ * ENABLE [31] 1 - update control register[30:16]
+ */
+#define W5_VPU_REMAP_CTRL (W5_REG_BASE + 0x0060)
+#define W5_VPU_REMAP_VADDR (W5_REG_BASE + 0x0064)
+#define W5_VPU_REMAP_PADDR (W5_REG_BASE + 0x0068)
+#define W5_VPU_REMAP_CORE_START (W5_REG_BASE + 0x006C)
+#define W5_VPU_BUSY_STATUS (W5_REG_BASE + 0x0070)
+#define W5_VPU_HALT_STATUS (W5_REG_BASE + 0x0074)
+#define W5_VPU_VCPU_STATUS (W5_REG_BASE + 0x0078)
+#define W5_VPU_RET_PRODUCT_VERSION (W5_REG_BASE + 0x0094)
+/*
+ * assign vpu_config0 = {conf_map_converter_reg, // [31]
+ * conf_map_converter_sig, // [30]
+ * 8'd0, // [29:22]
+ * conf_std_switch_en, // [21]
+ * conf_bg_detect, // [20]
+ * conf_3dnr_en, // [19]
+ * conf_one_axi_en, // [18]
+ * conf_sec_axi_en, // [17]
+ * conf_bus_info, // [16]
+ * conf_afbc_en, // [15]
+ * conf_afbc_version_id, // [14:12]
+ * conf_fbc_en, // [11]
+ * conf_fbc_version_id, // [10:08]
+ * conf_scaler_en, // [07]
+ * conf_scaler_version_id, // [06:04]
+ * conf_bwb_en, // [03]
+ * 3'd0}; // [02:00]
+ */
+#define W5_VPU_RET_VPU_CONFIG0 (W5_REG_BASE + 0x0098)
+/*
+ * assign vpu_config1 = {4'd0, // [31:28]
+ * conf_perf_timer_en, // [27]
+ * conf_multi_core_en, // [26]
+ * conf_gcu_en, // [25]
+ * conf_cu_report, // [24]
+ * 4'd0, // [23:20]
+ * conf_vcore_id_3, // [19]
+ * conf_vcore_id_2, // [18]
+ * conf_vcore_id_1, // [17]
+ * conf_vcore_id_0, // [16]
+ * conf_bwb_opt, // [15]
+ * 7'd0, // [14:08]
+ * conf_cod_std_en_reserved_7, // [7]
+ * conf_cod_std_en_reserved_6, // [6]
+ * conf_cod_std_en_reserved_5, // [5]
+ * conf_cod_std_en_reserved_4, // [4]
+ * conf_cod_std_en_reserved_3, // [3]
+ * conf_cod_std_en_reserved_2, // [2]
+ * conf_cod_std_en_vp9, // [1]
+ * conf_cod_std_en_hevc}; // [0]
+ * }
+ */
+#define W5_VPU_RET_VPU_CONFIG1 (W5_REG_BASE + 0x009C)
+
+#define W5_VPU_DBG_REG0 (W5_REG_BASE + 0x00f0)
+#define W5_VPU_DBG_REG1 (W5_REG_BASE + 0x00f4)
+#define W5_VPU_DBG_REG2 (W5_REG_BASE + 0x00f8)
+#define W5_VPU_DBG_REG3 (W5_REG_BASE + 0x00fc)
+
+/************************************************************************/
+/* PRODUCT INFORMATION */
+/************************************************************************/
+#define W5_PRODUCT_NAME (W5_REG_BASE + 0x1040)
+#define W5_PRODUCT_NUMBER (W5_REG_BASE + 0x1044)
+
+/************************************************************************/
+/* DECODER/ENCODER COMMON */
+/************************************************************************/
+#define W5_COMMAND (W5_REG_BASE + 0x0100)
+#define W5_COMMAND_OPTION (W5_REG_BASE + 0x0104)
+#define W5_QUERY_OPTION (W5_REG_BASE + 0x0104)
+#define W5_RET_SUCCESS (W5_REG_BASE + 0x0108)
+#define W5_RET_FAIL_REASON (W5_REG_BASE + 0x010C)
+#define W5_RET_QUEUE_FAIL_REASON (W5_REG_BASE + 0x0110)
+#define W5_CMD_INSTANCE_INFO (W5_REG_BASE + 0x0110)
+
+#define W5_RET_QUEUE_STATUS (W5_REG_BASE + 0x01E0)
+#define W5_RET_BS_EMPTY_INST (W5_REG_BASE + 0x01E4)
+#define W5_RET_QUEUE_CMD_DONE_INST (W5_REG_BASE + 0x01E8)
+#define W5_RET_STAGE0_INSTANCE_INFO (W5_REG_BASE + 0x01EC)
+#define W5_RET_STAGE1_INSTANCE_INFO (W5_REG_BASE + 0x01F0)
+#define W5_RET_STAGE2_INSTANCE_INFO (W5_REG_BASE + 0x01F4)
+
+#define W5_RET_SEQ_DONE_INSTANCE_INFO (W5_REG_BASE + 0x01FC)
+
+#define W5_BS_OPTION (W5_REG_BASE + 0x0120)
+
+/* return info when QUERY (GET_RESULT) for en/decoder */
+#define W5_RET_VLC_BUF_SIZE (W5_REG_BASE + 0x01B0)
+/* return info when QUERY (GET_RESULT) for en/decoder */
+#define W5_RET_PARAM_BUF_SIZE (W5_REG_BASE + 0x01B4)
+
+/* set when SET_FB for en/decoder */
+#define W5_CMD_SET_FB_ADDR_TASK_BUF (W5_REG_BASE + 0x01D4)
+#define W5_CMD_SET_FB_TASK_BUF_SIZE (W5_REG_BASE + 0x01D8)
+/************************************************************************/
+/* INIT_VPU - COMMON */
+/************************************************************************/
+/* note: W5_ADDR_CODE_BASE should be aligned to 4KB */
+#define W5_ADDR_CODE_BASE (W5_REG_BASE + 0x0110)
+#define W5_CODE_SIZE (W5_REG_BASE + 0x0114)
+#define W5_CODE_PARAM (W5_REG_BASE + 0x0118)
+#define W5_ADDR_TEMP_BASE (W5_REG_BASE + 0x011C)
+#define W5_TEMP_SIZE (W5_REG_BASE + 0x0120)
+#define W5_HW_OPTION (W5_REG_BASE + 0x012C)
+#define W5_SEC_AXI_PARAM (W5_REG_BASE + 0x0180)
+
+/************************************************************************/
+/* CREATE_INSTANCE - COMMON */
+/************************************************************************/
+#define W5_ADDR_WORK_BASE (W5_REG_BASE + 0x0114)
+#define W5_WORK_SIZE (W5_REG_BASE + 0x0118)
+#define W5_CMD_DEC_BS_START_ADDR (W5_REG_BASE + 0x011C)
+#define W5_CMD_DEC_BS_SIZE (W5_REG_BASE + 0x0120)
+#define W5_CMD_BS_PARAM (W5_REG_BASE + 0x0124)
+#define W5_CMD_ADDR_SEC_AXI (W5_REG_BASE + 0x0130)
+#define W5_CMD_SEC_AXI_SIZE (W5_REG_BASE + 0x0134)
+#define W5_CMD_EXT_ADDR (W5_REG_BASE + 0x0138)
+#define W5_CMD_NUM_CQ_DEPTH_M1 (W5_REG_BASE + 0x013C)
+#define W5_CMD_ERR_CONCEAL (W5_REG_BASE + 0x0140)
+
+/************************************************************************/
+/* DECODER - INIT_SEQ */
+/************************************************************************/
+#define W5_BS_RD_PTR (W5_REG_BASE + 0x0118)
+#define W5_BS_WR_PTR (W5_REG_BASE + 0x011C)
+/************************************************************************/
+/* SET_FRAME_BUF */
+/************************************************************************/
+/* SET_FB_OPTION 0x00 REGISTER FRAMEBUFFERS
+ * 0x01 UPDATE FRAMEBUFFER, just one framebuffer(linear, fbc and mvcol)
+ */
+#define W5_SFB_OPTION (W5_REG_BASE + 0x0104)
+#define W5_COMMON_PIC_INFO (W5_REG_BASE + 0x0118)
+#define W5_PIC_SIZE (W5_REG_BASE + 0x011C)
+#define W5_SET_FB_NUM (W5_REG_BASE + 0x0120)
+#define W5_EXTRA_PIC_INFO (W5_REG_BASE + 0x0124)
+
+#define W5_ADDR_LUMA_BASE0 (W5_REG_BASE + 0x0134)
+#define W5_ADDR_CB_BASE0 (W5_REG_BASE + 0x0138)
+#define W5_ADDR_CR_BASE0 (W5_REG_BASE + 0x013C)
+/* compression offset table for luma */
+#define W5_ADDR_FBC_Y_OFFSET0 (W5_REG_BASE + 0x013C)
+/* compression offset table for chroma */
+#define W5_ADDR_FBC_C_OFFSET0 (W5_REG_BASE + 0x0140)
+#define W5_ADDR_LUMA_BASE1 (W5_REG_BASE + 0x0144)
+#define W5_ADDR_CB_ADDR1 (W5_REG_BASE + 0x0148)
+#define W5_ADDR_CR_ADDR1 (W5_REG_BASE + 0x014C)
+/* compression offset table for luma */
+#define W5_ADDR_FBC_Y_OFFSET1 (W5_REG_BASE + 0x014C)
+/* compression offset table for chroma */
+#define W5_ADDR_FBC_C_OFFSET1 (W5_REG_BASE + 0x0150)
+#define W5_ADDR_LUMA_BASE2 (W5_REG_BASE + 0x0154)
+#define W5_ADDR_CB_ADDR2 (W5_REG_BASE + 0x0158)
+#define W5_ADDR_CR_ADDR2 (W5_REG_BASE + 0x015C)
+/* compression offset table for luma */
+#define W5_ADDR_FBC_Y_OFFSET2 (W5_REG_BASE + 0x015C)
+/* compression offset table for chroma */
+#define W5_ADDR_FBC_C_OFFSET2 (W5_REG_BASE + 0x0160)
+#define W5_ADDR_LUMA_BASE3 (W5_REG_BASE + 0x0164)
+#define W5_ADDR_CB_ADDR3 (W5_REG_BASE + 0x0168)
+#define W5_ADDR_CR_ADDR3 (W5_REG_BASE + 0x016C)
+/* compression offset table for luma */
+#define W5_ADDR_FBC_Y_OFFSET3 (W5_REG_BASE + 0x016C)
+/* compression offset table for chroma */
+#define W5_ADDR_FBC_C_OFFSET3 (W5_REG_BASE + 0x0170)
+#define W5_ADDR_LUMA_BASE4 (W5_REG_BASE + 0x0174)
+#define W5_ADDR_CB_ADDR4 (W5_REG_BASE + 0x0178)
+#define W5_ADDR_CR_ADDR4 (W5_REG_BASE + 0x017C)
+/* compression offset table for luma */
+#define W5_ADDR_FBC_Y_OFFSET4 (W5_REG_BASE + 0x017C)
+/* compression offset table for chroma */
+#define W5_ADDR_FBC_C_OFFSET4 (W5_REG_BASE + 0x0180)
+#define W5_ADDR_LUMA_BASE5 (W5_REG_BASE + 0x0184)
+#define W5_ADDR_CB_ADDR5 (W5_REG_BASE + 0x0188)
+#define W5_ADDR_CR_ADDR5 (W5_REG_BASE + 0x018C)
+/* compression offset table for luma */
+#define W5_ADDR_FBC_Y_OFFSET5 (W5_REG_BASE + 0x018C)
+/* compression offset table for chroma */
+#define W5_ADDR_FBC_C_OFFSET5 (W5_REG_BASE + 0x0190)
+#define W5_ADDR_LUMA_BASE6 (W5_REG_BASE + 0x0194)
+#define W5_ADDR_CB_ADDR6 (W5_REG_BASE + 0x0198)
+#define W5_ADDR_CR_ADDR6 (W5_REG_BASE + 0x019C)
+/* compression offset table for luma */
+#define W5_ADDR_FBC_Y_OFFSET6 (W5_REG_BASE + 0x019C)
+/* compression offset table for chroma */
+#define W5_ADDR_FBC_C_OFFSET6 (W5_REG_BASE + 0x01A0)
+#define W5_ADDR_LUMA_BASE7 (W5_REG_BASE + 0x01A4)
+#define W5_ADDR_CB_ADDR7 (W5_REG_BASE + 0x01A8)
+#define W5_ADDR_CR_ADDR7 (W5_REG_BASE + 0x01AC)
+/* compression offset table for luma */
+#define W5_ADDR_FBC_Y_OFFSET7 (W5_REG_BASE + 0x01AC)
+/* compression offset table for chroma */
+#define W5_ADDR_FBC_C_OFFSET7 (W5_REG_BASE + 0x01B0)
+#define W5_ADDR_MV_COL0 (W5_REG_BASE + 0x01B4)
+#define W5_ADDR_MV_COL1 (W5_REG_BASE + 0x01B8)
+#define W5_ADDR_MV_COL2 (W5_REG_BASE + 0x01BC)
+#define W5_ADDR_MV_COL3 (W5_REG_BASE + 0x01C0)
+#define W5_ADDR_MV_COL4 (W5_REG_BASE + 0x01C4)
+#define W5_ADDR_MV_COL5 (W5_REG_BASE + 0x01C8)
+#define W5_ADDR_MV_COL6 (W5_REG_BASE + 0x01CC)
+#define W5_ADDR_MV_COL7 (W5_REG_BASE + 0x01D0)
+
+/* UPDATE_FB */
+/* CMD_SET_FB_STRIDE [15:0] - FBC framebuffer stride
+ * [31:15] - linear framebuffer stride
+ */
+#define W5_CMD_SET_FB_STRIDE (W5_REG_BASE + 0x0118)
+#define W5_CMD_SET_FB_INDEX (W5_REG_BASE + 0x0120)
+#define W5_ADDR_LUMA_BASE (W5_REG_BASE + 0x0134)
+#define W5_ADDR_CB_BASE (W5_REG_BASE + 0x0138)
+#define W5_ADDR_CR_BASE (W5_REG_BASE + 0x013C)
+#define W5_ADDR_MV_COL (W5_REG_BASE + 0x0140)
+#define W5_ADDR_FBC_Y_BASE (W5_REG_BASE + 0x0144)
+#define W5_ADDR_FBC_C_BASE (W5_REG_BASE + 0x0148)
+#define W5_ADDR_FBC_Y_OFFSET (W5_REG_BASE + 0x014C)
+#define W5_ADDR_FBC_C_OFFSET (W5_REG_BASE + 0x0150)
+
+/************************************************************************/
+/* DECODER - DEC_PIC */
+/************************************************************************/
+#define W5_CMD_DEC_VCORE_INFO (W5_REG_BASE + 0x0194)
+/* sequence change enable mask register
+ * CMD_SEQ_CHANGE_ENABLE_FLAG [5] profile_idc
+ * [16] pic_width/height_in_luma_sample
+ * [19] sps_max_dec_pic_buffering, max_num_reorder, max_latency_increase
+ */
+#define W5_CMD_SEQ_CHANGE_ENABLE_FLAG (W5_REG_BASE + 0x0128)
+#define W5_CMD_DEC_USER_MASK (W5_REG_BASE + 0x012C)
+#define W5_CMD_DEC_TEMPORAL_ID_PLUS1 (W5_REG_BASE + 0x0130)
+#define W5_CMD_DEC_FORCE_FB_LATENCY_PLUS1 (W5_REG_BASE + 0x0134)
+#define W5_USE_SEC_AXI (W5_REG_BASE + 0x0150)
+
+/************************************************************************/
+/* DECODER - QUERY : GET_VPU_INFO */
+/************************************************************************/
+#define W5_RET_FW_VERSION (W5_REG_BASE + 0x0118)
+#define W5_RET_PRODUCT_NAME (W5_REG_BASE + 0x011C)
+#define W5_RET_PRODUCT_VERSION (W5_REG_BASE + 0x0120)
+#define W5_RET_STD_DEF0 (W5_REG_BASE + 0x0124)
+#define W5_RET_STD_DEF1 (W5_REG_BASE + 0x0128)
+#define W5_RET_CONF_FEATURE (W5_REG_BASE + 0x012C)
+#define W5_RET_CONF_DATE (W5_REG_BASE + 0x0130)
+#define W5_RET_CONF_REVISION (W5_REG_BASE + 0x0134)
+#define W5_RET_CONF_TYPE (W5_REG_BASE + 0x0138)
+#define W5_RET_PRODUCT_ID (W5_REG_BASE + 0x013C)
+#define W5_RET_CUSTOMER_ID (W5_REG_BASE + 0x0140)
+
+/************************************************************************/
+/* DECODER - QUERY : GET_RESULT */
+/************************************************************************/
+#define W5_CMD_DEC_ADDR_REPORT_BASE (W5_REG_BASE + 0x0114)
+#define W5_CMD_DEC_REPORT_SIZE (W5_REG_BASE + 0x0118)
+#define W5_CMD_DEC_REPORT_PARAM (W5_REG_BASE + 0x011C)
+
+#define W5_RET_DEC_BS_RD_PTR (W5_REG_BASE + 0x011C)
+#define W5_RET_DEC_SEQ_PARAM (W5_REG_BASE + 0x0120)
+#define W5_RET_DEC_COLOR_SAMPLE_INFO (W5_REG_BASE + 0x0124)
+#define W5_RET_DEC_ASPECT_RATIO (W5_REG_BASE + 0x0128)
+#define W5_RET_DEC_BIT_RATE (W5_REG_BASE + 0x012C)
+#define W5_RET_DEC_FRAME_RATE_NR (W5_REG_BASE + 0x0130)
+#define W5_RET_DEC_FRAME_RATE_DR (W5_REG_BASE + 0x0134)
+#define W5_RET_DEC_NUM_REQUIRED_FB (W5_REG_BASE + 0x0138)
+#define W5_RET_DEC_NUM_REORDER_DELAY (W5_REG_BASE + 0x013C)
+#define W5_RET_DEC_SUB_LAYER_INFO (W5_REG_BASE + 0x0140)
+#define W5_RET_DEC_NOTIFICATION (W5_REG_BASE + 0x0144)
+/*
+ * USER_DATA_FLAGS for HEVC/H264 only.
+ * Bits:
+ * [1] - User data buffer full boolean
+ * [2] - VUI parameter flag
+ * [4] - Pic_timing SEI flag
+ * [5] - 1st user_data_registed_itu_t_t35 prefix SEI flag
+ * [6] - user_data_unregistered prefix SEI flag
+ * [7] - 1st user_data_registed_itu_t_t35 suffix SEI flag
+ * [8] - user_data_unregistered suffix SEI flag
+ * [10]- mastering_display_color_volume prefix SEI flag
+ * [11]- chroma_resampling_display_color_volume prefix SEI flag
+ * [12]- knee_function_info SEI flag
+ * [13]- tone_mapping_info prefix SEI flag
+ * [14]- film_grain_characteristics_info prefix SEI flag
+ * [15]- content_light_level_info prefix SEI flag
+ * [16]- color_remapping_info prefix SEI flag
+ * [28]- 2nd user_data_registed_itu_t_t35 prefix SEI flag
+ * [29]- 3rd user_data_registed_itu_t_t35 prefix SEI flag
+ * [30]- 2nd user_data_registed_itu_t_t35 suffix SEI flag
+ * [31]- 3rd user_data_registed_itu_t_t35 suffix SEI flag
+ */
+#define W5_RET_DEC_USERDATA_IDC (W5_REG_BASE + 0x0148)
+#define W5_RET_DEC_PIC_SIZE (W5_REG_BASE + 0x014C)
+#define W5_RET_DEC_CROP_TOP_BOTTOM (W5_REG_BASE + 0x0150)
+#define W5_RET_DEC_CROP_LEFT_RIGHT (W5_REG_BASE + 0x0154)
+/*
+ * #define W5_RET_DEC_AU_START_POS (W5_REG_BASE + 0x0158)
+ * => Access unit (AU) Bitstream start position
+ * #define W5_RET_DEC_AU_END_POS (W5_REG_BASE + 0x015C)
+ * => Access unit (AU) Bitstream end position
+ */
+
+/*
+ * Decoded picture type:
+ * reg_val & 0x7 => picture type
+ * (reg_val >> 4) & 0x3f => VCL NAL unit type
+ * (reg_val >> 31) & 0x1 => output_flag
+ * 16 << ((reg_val >> 10) & 0x3) => ctu_size
+ */
+#define W5_RET_DEC_PIC_TYPE (W5_REG_BASE + 0x0160)
+#define W5_RET_DEC_PIC_POC (W5_REG_BASE + 0x0164)
+/*
+ * #define W5_RET_DEC_RECOVERY_POINT (W5_REG_BASE + 0x0168)
+ * => HEVC recovery point
+ * reg_val & 0xff => number of signed recovery picture order counts
+ * (reg_val >> 16) & 0x1 => exact match flag
+ * (reg_val >> 17) & 0x1 => broken link flag
+ * (reg_val >> 18) & 0x1 => exist flag
+ */
+#define W5_RET_DEC_DEBUG_INDEX (W5_REG_BASE + 0x016C)
+#define W5_RET_DEC_DECODED_INDEX (W5_REG_BASE + 0x0170)
+#define W5_RET_DEC_DISPLAY_INDEX (W5_REG_BASE + 0x0174)
+/*
+ * #define W5_RET_DEC_REALLOC_INDEX (W5_REG_BASE + 0x0178)
+ * => display picture index in decoded picture buffer
+ * reg_val & 0xf => display picture index for FBC buffer (by reordering)
+ */
+#define W5_RET_DEC_DISP_IDC (W5_REG_BASE + 0x017C)
+/*
+ * #define W5_RET_DEC_ERR_CTB_NUM (W5_REG_BASE + 0x0180)
+ * => Number of error CTUs
+ * reg_val >> 16 => erroneous CTUs in bitstream
+ * reg_val & 0xffff => total CTUs in bitstream
+ *
+ * #define W5_RET_DEC_PIC_PARAM (W5_REG_BASE + 0x01A0)
+ * => Bitstream sequence/picture parameter information (AV1 only)
+ * reg_val & 0x1 => intrabc tool enable
+ * (reg_val >> 1) & 0x1 => screen content tools enable
+ */
+#define W5_RET_DEC_HOST_CMD_TICK (W5_REG_BASE + 0x01B8)
+/*
+ * #define W5_RET_DEC_SEEK_START_TICK (W5_REG_BASE + 0x01BC)
+ * #define W5_RET_DEC_SEEK_END_TICK (W5_REG_BASE + 0x01C0)
+ * => Start and end ticks for seeking slices of the picture
+ * #define W5_RET_DEC_PARSING_START_TICK (W5_REG_BASE + 0x01C4)
+ * #define W5_RET_DEC_PARSING_END_TICK (W5_REG_BASE + 0x01C8)
+ * => Start and end ticks for parsing slices of the picture
+ * #define W5_RET_DEC_DECODING_START_TICK (W5_REG_BASE + 0x01CC)
+ * => Start tick for decoding slices of the picture
+ */
+#define W5_RET_DEC_DECODING_ENC_TICK (W5_REG_BASE + 0x01D0)
+#define W5_RET_DEC_WARN_INFO (W5_REG_BASE + 0x01D4)
+#define W5_RET_DEC_ERR_INFO (W5_REG_BASE + 0x01D8)
+#define W5_RET_DEC_DECODING_SUCCESS (W5_REG_BASE + 0x01DC)
+
+/************************************************************************/
+/* DECODER - FLUSH_INSTANCE */
+/************************************************************************/
+#define W5_CMD_FLUSH_INST_OPT (W5_REG_BASE + 0x104)
+
+/************************************************************************/
+/* DECODER - QUERY : UPDATE_DISP_FLAG */
+/************************************************************************/
+#define W5_CMD_DEC_SET_DISP_IDC (W5_REG_BASE + 0x0118)
+#define W5_CMD_DEC_CLR_DISP_IDC (W5_REG_BASE + 0x011C)
+
+/************************************************************************/
+/* DECODER - QUERY : SET_BS_RD_PTR */
+/************************************************************************/
+#define W5_RET_QUERY_DEC_SET_BS_RD_PTR (W5_REG_BASE + 0x011C)
+
+/************************************************************************/
+/* DECODER - QUERY : GET_BS_RD_PTR */
+/************************************************************************/
+#define W5_RET_QUERY_DEC_BS_RD_PTR (W5_REG_BASE + 0x011C)
+
+/************************************************************************/
+/* QUERY : GET_DEBUG_INFO */
+/************************************************************************/
+#define W5_RET_QUERY_DEBUG_PRI_REASON (W5_REG_BASE + 0x114)
+
+/************************************************************************/
+/* GDI register for debugging */
+/************************************************************************/
+#define W5_GDI_BASE 0x8800
+#define W5_GDI_BUS_CTRL (W5_GDI_BASE + 0x0F0)
+#define W5_GDI_BUS_STATUS (W5_GDI_BASE + 0x0F4)
+
+#define W5_BACKBONE_BASE_VCPU 0xFE00
+#define W5_BACKBONE_BUS_CTRL_VCPU (W5_BACKBONE_BASE_VCPU + 0x010)
+#define W5_BACKBONE_BUS_STATUS_VCPU (W5_BACKBONE_BASE_VCPU + 0x014)
+#define W5_BACKBONE_PROG_AXI_ID (W5_BACKBONE_BASE_VCPU + 0x00C)
+
+#define W5_BACKBONE_PROC_EXT_ADDR (W5_BACKBONE_BASE_VCPU + 0x0C0)
+#define W5_BACKBONE_AXI_PARAM (W5_BACKBONE_BASE_VCPU + 0x0E0)
+
+#define W5_BACKBONE_BASE_VCORE0 0x8E00
+#define W5_BACKBONE_BUS_CTRL_VCORE0 (W5_BACKBONE_BASE_VCORE0 + 0x010)
+#define W5_BACKBONE_BUS_STATUS_VCORE0 (W5_BACKBONE_BASE_VCORE0 + 0x014)
+
+#define W5_BACKBONE_BASE_VCORE1 0x9E00 /* for dual-core product */
+#define W5_BACKBONE_BUS_CTRL_VCORE1 (W5_BACKBONE_BASE_VCORE1 + 0x010)
+#define W5_BACKBONE_BUS_STATUS_VCORE1 (W5_BACKBONE_BASE_VCORE1 + 0x014)
+
+#define W5_COMBINED_BACKBONE_BASE 0xFE00
+#define W5_COMBINED_BACKBONE_BUS_CTRL (W5_COMBINED_BACKBONE_BASE + 0x010)
+#define W5_COMBINED_BACKBONE_BUS_STATUS (W5_COMBINED_BACKBONE_BASE + 0x014)
+
+/************************************************************************/
+/* */
+/* for ENCODER */
+/* */
+/************************************************************************/
+#define W5_RET_STAGE3_INSTANCE_INFO (W5_REG_BASE + 0x1F8)
+/************************************************************************/
+/* ENCODER - CREATE_INSTANCE */
+/************************************************************************/
+/* 0x114 ~ 0x124 : defined above (CREATE_INSTANCE COMMON) */
+#define W5_CMD_ENC_VCORE_INFO (W5_REG_BASE + 0x0194)
+#define W5_CMD_ENC_SRC_OPTIONS (W5_REG_BASE + 0x0128)
+
+/************************************************************************/
+/* ENCODER - SET_FB */
+/************************************************************************/
+#define W5_FBC_STRIDE (W5_REG_BASE + 0x128)
+#define W5_ADDR_SUB_SAMPLED_FB_BASE (W5_REG_BASE + 0x12C)
+#define W5_SUB_SAMPLED_ONE_FB_SIZE (W5_REG_BASE + 0x130)
+
+/************************************************************************/
+/* ENCODER - ENC_SET_PARAM (COMMON & CHANGE_PARAM) */
+/************************************************************************/
+#define W5_CMD_ENC_SEQ_SET_PARAM_OPTION (W5_REG_BASE + 0x104)
+#define W5_CMD_ENC_SEQ_SET_PARAM_ENABLE (W5_REG_BASE + 0x118)
+#define W5_CMD_ENC_SEQ_SRC_SIZE (W5_REG_BASE + 0x11C)
+#define W5_CMD_ENC_SEQ_CUSTOM_MAP_ENDIAN (W5_REG_BASE + 0x120)
+#define W5_CMD_ENC_SEQ_SPS_PARAM (W5_REG_BASE + 0x124)
+#define W5_CMD_ENC_SEQ_PPS_PARAM (W5_REG_BASE + 0x128)
+#define W5_CMD_ENC_SEQ_GOP_PARAM (W5_REG_BASE + 0x12C)
+#define W5_CMD_ENC_SEQ_INTRA_PARAM (W5_REG_BASE + 0x130)
+#define W5_CMD_ENC_SEQ_CONF_WIN_TOP_BOT (W5_REG_BASE + 0x134)
+#define W5_CMD_ENC_SEQ_CONF_WIN_LEFT_RIGHT (W5_REG_BASE + 0x138)
+#define W5_CMD_ENC_SEQ_RDO_PARAM (W5_REG_BASE + 0x13C)
+#define W5_CMD_ENC_SEQ_INDEPENDENT_SLICE (W5_REG_BASE + 0x140)
+#define W5_CMD_ENC_SEQ_DEPENDENT_SLICE (W5_REG_BASE + 0x144)
+#define W5_CMD_ENC_SEQ_INTRA_REFRESH (W5_REG_BASE + 0x148)
+#define W5_CMD_ENC_SEQ_INPUT_SRC_PARAM (W5_REG_BASE + 0x14C)
+
+#define W5_CMD_ENC_SEQ_RC_FRAME_RATE (W5_REG_BASE + 0x150)
+#define W5_CMD_ENC_SEQ_RC_TARGET_RATE (W5_REG_BASE + 0x154)
+#define W5_CMD_ENC_SEQ_RC_PARAM (W5_REG_BASE + 0x158)
+#define W5_CMD_ENC_SEQ_RC_MIN_MAX_QP (W5_REG_BASE + 0x15C)
+#define W5_CMD_ENC_SEQ_RC_BIT_RATIO_LAYER_0_3 (W5_REG_BASE + 0x160)
+#define W5_CMD_ENC_SEQ_RC_BIT_RATIO_LAYER_4_7 (W5_REG_BASE + 0x164)
+#define W5_CMD_ENC_SEQ_RC_INTER_MIN_MAX_QP (W5_REG_BASE + 0x168)
+#define W5_CMD_ENC_SEQ_RC_WEIGHT_PARAM (W5_REG_BASE + 0x16C)
+
+#define W5_CMD_ENC_SEQ_ROT_PARAM (W5_REG_BASE + 0x170)
+#define W5_CMD_ENC_SEQ_NUM_UNITS_IN_TICK (W5_REG_BASE + 0x174)
+#define W5_CMD_ENC_SEQ_TIME_SCALE (W5_REG_BASE + 0x178)
+#define W5_CMD_ENC_SEQ_NUM_TICKS_POC_DIFF_ONE (W5_REG_BASE + 0x17C)
+
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_PU04 (W5_REG_BASE + 0x184)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_PU08 (W5_REG_BASE + 0x188)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_PU16 (W5_REG_BASE + 0x18C)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_PU32 (W5_REG_BASE + 0x190)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_CU08 (W5_REG_BASE + 0x194)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_CU16 (W5_REG_BASE + 0x198)
+#define W5_CMD_ENC_SEQ_CUSTOM_MD_CU32 (W5_REG_BASE + 0x19C)
+#define W5_CMD_ENC_SEQ_NR_PARAM (W5_REG_BASE + 0x1A0)
+#define W5_CMD_ENC_SEQ_NR_WEIGHT (W5_REG_BASE + 0x1A4)
+#define W5_CMD_ENC_SEQ_BG_PARAM (W5_REG_BASE + 0x1A8)
+#define W5_CMD_ENC_SEQ_CUSTOM_LAMBDA_ADDR (W5_REG_BASE + 0x1AC)
+#define W5_CMD_ENC_SEQ_USER_SCALING_LIST_ADDR (W5_REG_BASE + 0x1B0)
+#define W5_CMD_ENC_SEQ_VUI_HRD_PARAM (W5_REG_BASE + 0x180)
+#define W5_CMD_ENC_SEQ_VUI_RBSP_ADDR (W5_REG_BASE + 0x1B8)
+#define W5_CMD_ENC_SEQ_HRD_RBSP_ADDR (W5_REG_BASE + 0x1BC)
+
+/************************************************************************/
+/* ENCODER - ENC_SET_PARAM (CUSTOM_GOP) */
+/************************************************************************/
+#define W5_CMD_ENC_CUSTOM_GOP_PARAM (W5_REG_BASE + 0x11C)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_0 (W5_REG_BASE + 0x120)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_1 (W5_REG_BASE + 0x124)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_2 (W5_REG_BASE + 0x128)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_3 (W5_REG_BASE + 0x12C)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_4 (W5_REG_BASE + 0x130)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_5 (W5_REG_BASE + 0x134)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_6 (W5_REG_BASE + 0x138)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_7 (W5_REG_BASE + 0x13C)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_8 (W5_REG_BASE + 0x140)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_9 (W5_REG_BASE + 0x144)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_10 (W5_REG_BASE + 0x148)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_11 (W5_REG_BASE + 0x14C)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_12 (W5_REG_BASE + 0x150)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_13 (W5_REG_BASE + 0x154)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_14 (W5_REG_BASE + 0x158)
+#define W5_CMD_ENC_CUSTOM_GOP_PIC_PARAM_15 (W5_REG_BASE + 0x15C)
+
+/************************************************************************/
+/* ENCODER - ENC_PIC */
+/************************************************************************/
+#define W5_CMD_ENC_BS_START_ADDR (W5_REG_BASE + 0x118)
+#define W5_CMD_ENC_BS_SIZE (W5_REG_BASE + 0x11C)
+#define W5_CMD_ENC_PIC_USE_SEC_AXI (W5_REG_BASE + 0x124)
+#define W5_CMD_ENC_PIC_REPORT_PARAM (W5_REG_BASE + 0x128)
+
+#define W5_CMD_ENC_PIC_CUSTOM_MAP_OPTION_PARAM (W5_REG_BASE + 0x138)
+#define W5_CMD_ENC_PIC_CUSTOM_MAP_OPTION_ADDR (W5_REG_BASE + 0x13C)
+#define W5_CMD_ENC_PIC_SRC_PIC_IDX (W5_REG_BASE + 0x144)
+#define W5_CMD_ENC_PIC_SRC_ADDR_Y (W5_REG_BASE + 0x148)
+#define W5_CMD_ENC_PIC_SRC_ADDR_U (W5_REG_BASE + 0x14C)
+#define W5_CMD_ENC_PIC_SRC_ADDR_V (W5_REG_BASE + 0x150)
+#define W5_CMD_ENC_PIC_SRC_STRIDE (W5_REG_BASE + 0x154)
+#define W5_CMD_ENC_PIC_SRC_FORMAT (W5_REG_BASE + 0x158)
+#define W5_CMD_ENC_PIC_SRC_AXI_SEL (W5_REG_BASE + 0x160)
+#define W5_CMD_ENC_PIC_CODE_OPTION (W5_REG_BASE + 0x164)
+#define W5_CMD_ENC_PIC_PIC_PARAM (W5_REG_BASE + 0x168)
+#define W5_CMD_ENC_PIC_LONGTERM_PIC (W5_REG_BASE + 0x16C)
+#define W5_CMD_ENC_PIC_WP_PIXEL_SIGMA_Y (W5_REG_BASE + 0x170)
+#define W5_CMD_ENC_PIC_WP_PIXEL_SIGMA_C (W5_REG_BASE + 0x174)
+#define W5_CMD_ENC_PIC_WP_PIXEL_MEAN_Y (W5_REG_BASE + 0x178)
+#define W5_CMD_ENC_PIC_WP_PIXEL_MEAN_C (W5_REG_BASE + 0x17C)
+#define W5_CMD_ENC_PIC_CF50_Y_OFFSET_TABLE_ADDR (W5_REG_BASE + 0x190)
+#define W5_CMD_ENC_PIC_CF50_CB_OFFSET_TABLE_ADDR (W5_REG_BASE + 0x194)
+#define W5_CMD_ENC_PIC_CF50_CR_OFFSET_TABLE_ADDR (W5_REG_BASE + 0x198)
+#define W5_CMD_ENC_PIC_PREFIX_SEI_NAL_ADDR (W5_REG_BASE + 0x180)
+#define W5_CMD_ENC_PIC_PREFIX_SEI_INFO (W5_REG_BASE + 0x184)
+#define W5_CMD_ENC_PIC_SUFFIX_SEI_NAL_ADDR (W5_REG_BASE + 0x188)
+#define W5_CMD_ENC_PIC_SUFFIX_SEI_INFO (W5_REG_BASE + 0x18c)
+
+/************************************************************************/
+/* ENCODER - QUERY (GET_RESULT) */
+/************************************************************************/
+#define W5_RET_ENC_NUM_REQUIRED_FB (W5_REG_BASE + 0x11C)
+#define W5_RET_ENC_MIN_SRC_BUF_NUM (W5_REG_BASE + 0x120)
+#define W5_RET_ENC_PIC_TYPE (W5_REG_BASE + 0x124)
+/*
+ * #define W5_RET_ENC_PIC_POC (W5_REG_BASE + 0x128)
+ * => picture order count value of current encoded picture
+ */
+#define W5_RET_ENC_PIC_IDX (W5_REG_BASE + 0x12C)
+/*
+ * #define W5_RET_ENC_PIC_SLICE_NUM (W5_REG_BASE + 0x130)
+ * reg_val & 0xffff = total independent slice segment number (16 bits)
+ * (reg_val >> 16) & 0xffff = total dependent slice segment number (16 bits)
+ *
+ * #define W5_RET_ENC_PIC_SKIP (W5_REG_BASE + 0x134)
+ * reg_val & 0xfe = picture skip flag (7 bits)
+ *
+ * #define W5_RET_ENC_PIC_NUM_INTRA (W5_REG_BASE + 0x138)
+ * => number of intra blocks in 8x8 (32 bits)
+ *
+ * #define W5_RET_ENC_PIC_NUM_MERGE (W5_REG_BASE + 0x13C)
+ * => number of merge blocks in 8x8 (32 bits)
+ *
+ * #define W5_RET_ENC_PIC_NUM_SKIP (W5_REG_BASE + 0x144)
+ * => number of skip blocks in 8x8 (32 bits)
+ *
+ * #define W5_RET_ENC_PIC_AVG_CTU_QP (W5_REG_BASE + 0x148)
+ * => Average CTU QP value (32 bits)
+ */
+#define W5_RET_ENC_PIC_BYTE (W5_REG_BASE + 0x14C)
+/*
+ * #define W5_RET_ENC_GOP_PIC_IDX (W5_REG_BASE + 0x150)
+ * => picture index in group of pictures
+ */
+#define W5_RET_ENC_USED_SRC_IDX (W5_REG_BASE + 0x154)
+/*
+ * #define W5_RET_ENC_PIC_NUM (W5_REG_BASE + 0x158)
+ * => encoded picture number
+ */
+#define W5_RET_ENC_VCL_NUT (W5_REG_BASE + 0x15C)
+/*
+ * Only for H264:
+ * #define W5_RET_ENC_PIC_DIST_LOW (W5_REG_BASE + 0x164)
+ * => lower 32 bits of the sum of squared difference between source Y picture
+ * and reconstructed Y picture
+ * #define W5_RET_ENC_PIC_DIST_HIGH (W5_REG_BASE + 0x168)
+ * => upper 32 bits of the sum of squared difference between source Y picture
+ * and reconstructed Y picture
+ */
+#define W5_RET_ENC_PIC_MAX_LATENCY_PICS (W5_REG_BASE + 0x16C)
+
+#define W5_RET_ENC_HOST_CMD_TICK (W5_REG_BASE + 0x1B8)
+/*
+ * #define W5_RET_ENC_PREPARE_START_TICK (W5_REG_BASE + 0x1BC)
+ * #define W5_RET_ENC_PREPARE_END_TICK (W5_REG_BASE + 0x1C0)
+ * => Start and end ticks for preparing slices of the picture
+ * #define W5_RET_ENC_PROCESSING_START_TICK (W5_REG_BASE + 0x1C4)
+ * #define W5_RET_ENC_PROCESSING_END_TICK (W5_REG_BASE + 0x1C8)
+ * => Start and end ticks for processing slices of the picture
+ * #define W5_RET_ENC_ENCODING_START_TICK (W5_REG_BASE + 0x1CC)
+ * => Start tick for encoding slices of the picture
+ */
+#define W5_RET_ENC_ENCODING_END_TICK (W5_REG_BASE + 0x1D0)
+
+#define W5_RET_ENC_WARN_INFO (W5_REG_BASE + 0x1D4)
+#define W5_RET_ENC_ERR_INFO (W5_REG_BASE + 0x1D8)
+#define W5_RET_ENC_ENCODING_SUCCESS (W5_REG_BASE + 0x1DC)
+
+/************************************************************************/
+/* ENCODER - QUERY (GET_BS_WR_PTR) */
+/************************************************************************/
+#define W5_RET_ENC_RD_PTR (W5_REG_BASE + 0x114)
+#define W5_RET_ENC_WR_PTR (W5_REG_BASE + 0x118)
+#define W5_CMD_ENC_REASON_SEL (W5_REG_BASE + 0x11C)
+
+/************************************************************************/
+/* ENCODER - QUERY (GET_BW_REPORT) */
+/************************************************************************/
+#define RET_QUERY_BW_PRP_AXI_READ (W5_REG_BASE + 0x118)
+#define RET_QUERY_BW_PRP_AXI_WRITE (W5_REG_BASE + 0x11C)
+#define RET_QUERY_BW_FBD_Y_AXI_READ (W5_REG_BASE + 0x120)
+#define RET_QUERY_BW_FBC_Y_AXI_WRITE (W5_REG_BASE + 0x124)
+#define RET_QUERY_BW_FBD_C_AXI_READ (W5_REG_BASE + 0x128)
+#define RET_QUERY_BW_FBC_C_AXI_WRITE (W5_REG_BASE + 0x12C)
+#define RET_QUERY_BW_PRI_AXI_READ (W5_REG_BASE + 0x130)
+#define RET_QUERY_BW_PRI_AXI_WRITE (W5_REG_BASE + 0x134)
+#define RET_QUERY_BW_SEC_AXI_READ (W5_REG_BASE + 0x138)
+#define RET_QUERY_BW_SEC_AXI_WRITE (W5_REG_BASE + 0x13C)
+#define RET_QUERY_BW_PROC_AXI_READ (W5_REG_BASE + 0x140)
+#define RET_QUERY_BW_PROC_AXI_WRITE (W5_REG_BASE + 0x144)
+#define RET_QUERY_BW_BWB_AXI_WRITE (W5_REG_BASE + 0x148)
+#define W5_CMD_BW_OPTION (W5_REG_BASE + 0x14C)
+
+/************************************************************************/
+/* ENCODER - QUERY (GET_SRC_FLAG) */
+/************************************************************************/
+#define W5_RET_RELEASED_SRC_INSTANCE (W5_REG_BASE + 0x1EC)
+
+#define W5_ENC_PIC_SUB_FRAME_SYNC_IF (W5_REG_BASE + 0x0300)
+
+#endif /* __WAVE5_REGISTER_DEFINE_H__ */
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vdi.c b/drivers/media/platform/chips-media/wave5/wave5-vdi.c
new file mode 100644
index 000000000..3809f70bc
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vdi.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - low level access functions
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#include <linux/bug.h>
+#include "wave5-vdi.h"
+#include "wave5-vpu.h"
+#include "wave5-regdefine.h"
+#include <linux/delay.h>
+
+static int wave5_vdi_allocate_common_memory(struct device *dev)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ if (!vpu_dev->common_mem.vaddr) {
+ int ret;
+
+ vpu_dev->common_mem.size = SIZE_COMMON;
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vpu_dev->common_mem);
+ if (ret) {
+ dev_err(dev, "unable to allocate common buffer\n");
+ return ret;
+ }
+ }
+
+ dev_dbg(dev, "[VDI] common_mem: daddr=%pad size=%zu vaddr=0x%p\n",
+ &vpu_dev->common_mem.daddr, vpu_dev->common_mem.size, vpu_dev->common_mem.vaddr);
+
+ return 0;
+}
+
+int wave5_vdi_init(struct device *dev)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = wave5_vdi_allocate_common_memory(dev);
+ if (ret < 0) {
+ dev_err(dev, "[VDI] failed to get vpu common buffer from driver\n");
+ return ret;
+ }
+
+ if (!PRODUCT_CODE_W_SERIES(vpu_dev->product_code)) {
+ WARN_ONCE(1, "unsupported product code: 0x%x\n", vpu_dev->product_code);
+ return -EOPNOTSUPP;
+ }
+
+ /* if BIT processor is not running. */
+ if (wave5_vdi_read_register(vpu_dev, W5_VCPU_CUR_PC) == 0) {
+ int i;
+
+ for (i = 0; i < 64; i++)
+ wave5_vdi_write_register(vpu_dev, (i * 4) + 0x100, 0x0);
+ }
+
+ dev_dbg(dev, "[VDI] driver initialized successfully\n");
+
+ return 0;
+}
+
+int wave5_vdi_release(struct device *dev)
+{
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ vpu_dev->vdb_register = NULL;
+ wave5_vdi_free_dma_memory(vpu_dev, &vpu_dev->common_mem);
+
+ return 0;
+}
+
+void wave5_vdi_write_register(struct vpu_device *vpu_dev, u32 addr, u32 data)
+{
+ writel(data, vpu_dev->vdb_register + addr);
+}
+
+unsigned int wave5_vdi_read_register(struct vpu_device *vpu_dev, u32 addr)
+{
+ return readl(vpu_dev->vdb_register + addr);
+}
+
+int wave5_vdi_clear_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
+{
+ if (!vb || !vb->vaddr) {
+ dev_err(vpu_dev->dev, "%s: unable to clear unmapped buffer\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(vb->vaddr, 0, vb->size);
+ return vb->size;
+}
+
+int wave5_vdi_write_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb, size_t offset,
+ u8 *data, size_t len)
+{
+ if (!vb || !vb->vaddr) {
+ dev_err(vpu_dev->dev, "%s: unable to write to unmapped buffer\n", __func__);
+ return -EINVAL;
+ }
+
+ if (offset > vb->size || len > vb->size || offset + len > vb->size) {
+ dev_err(vpu_dev->dev, "%s: buffer too small\n", __func__);
+ return -ENOSPC;
+ }
+
+ memcpy(vb->vaddr + offset, data, len);
+
+ return len;
+}
+
+int wave5_vdi_allocate_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
+{
+ void *vaddr;
+ dma_addr_t daddr;
+
+ if (!vb->size) {
+ dev_err(vpu_dev->dev, "%s: requested size==0\n", __func__);
+ return -EINVAL;
+ }
+
+ vaddr = dma_alloc_coherent(vpu_dev->dev, vb->size, &daddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+ vb->vaddr = vaddr;
+ vb->daddr = daddr;
+
+ return 0;
+}
+
+int wave5_vdi_free_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb)
+{
+ if (vb->size == 0)
+ return -EINVAL;
+
+ if (!vb->vaddr)
+ dev_err(vpu_dev->dev, "%s: requested free of unmapped buffer\n", __func__);
+ else
+ dma_free_coherent(vpu_dev->dev, vb->size, vb->vaddr, vb->daddr);
+
+ memset(vb, 0, sizeof(*vb));
+
+ return 0;
+}
+
+int wave5_vdi_allocate_array(struct vpu_device *vpu_dev, struct vpu_buf *array, unsigned int count,
+ size_t size)
+{
+ struct vpu_buf vb_buf;
+ int i, ret = 0;
+
+ vb_buf.size = size;
+
+ for (i = 0; i < count; i++) {
+ if (array[i].size == size)
+ continue;
+
+ if (array[i].size != 0)
+ wave5_vdi_free_dma_memory(vpu_dev, &array[i]);
+
+ ret = wave5_vdi_allocate_dma_memory(vpu_dev, &vb_buf);
+ if (ret)
+ return -ENOMEM;
+ array[i] = vb_buf;
+ }
+
+ for (i = count; i < MAX_REG_FRAME; i++)
+ wave5_vdi_free_dma_memory(vpu_dev, &array[i]);
+
+ return 0;
+}
+
+void wave5_vdi_allocate_sram(struct vpu_device *vpu_dev)
+{
+ struct vpu_buf *vb = &vpu_dev->sram_buf;
+
+ if (!vpu_dev->sram_pool || !vpu_dev->sram_size)
+ return;
+
+ if (!vb->vaddr) {
+ vb->size = vpu_dev->sram_size;
+ vb->vaddr = gen_pool_dma_alloc(vpu_dev->sram_pool, vb->size,
+ &vb->daddr);
+ if (!vb->vaddr)
+ vb->size = 0;
+ }
+
+ dev_dbg(vpu_dev->dev, "%s: sram daddr: %pad, size: %zu, vaddr: 0x%p\n",
+ __func__, &vb->daddr, vb->size, vb->vaddr);
+}
+
+void wave5_vdi_free_sram(struct vpu_device *vpu_dev)
+{
+ struct vpu_buf *vb = &vpu_dev->sram_buf;
+
+ if (!vb->size || !vb->vaddr)
+ return;
+
+ if (vb->vaddr)
+ gen_pool_free(vpu_dev->sram_pool, (unsigned long)vb->vaddr,
+ vb->size);
+
+ memset(vb, 0, sizeof(*vb));
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vdi.h b/drivers/media/platform/chips-media/wave5/wave5-vdi.h
new file mode 100644
index 000000000..3984ef3f1
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vdi.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - low level access functions
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#ifndef _VDI_H_
+#define _VDI_H_
+
+#include "wave5-vpuconfig.h"
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+/************************************************************************/
+/* COMMON REGISTERS */
+/************************************************************************/
+#define VPU_PRODUCT_CODE_REGISTER 0x1044
+
+/* system register write */
+#define vpu_write_reg(VPU_INST, ADDR, DATA) wave5_vdi_write_register(VPU_INST, ADDR, DATA)
+/* system register read */
+#define vpu_read_reg(CORE, ADDR) wave5_vdi_read_register(CORE, ADDR)
+
+struct vpu_buf {
+ size_t size;
+ dma_addr_t daddr;
+ void *vaddr;
+};
+
+int wave5_vdi_init(struct device *dev);
+int wave5_vdi_release(struct device *dev); //this function may be called only at system off.
+
+#endif //#ifndef _VDI_H_
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
new file mode 100644
index 000000000..ef227af72
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -0,0 +1,1932 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - decoder interface
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#include "wave5-helper.h"
+
+#define VPU_DEC_DEV_NAME "C&M Wave5 VPU decoder"
+#define VPU_DEC_DRV_NAME "wave5-dec"
+
+#define DEFAULT_SRC_SIZE(width, height) ({ \
+ (width) * (height) / 8 * 3; \
+})
+
+static const struct vpu_format dec_fmt_list[FMT_TYPES][MAX_FMTS] = {
+ [VPU_FMT_TYPE_CODEC] = {
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_HEVC,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_H264,
+ .max_width = 8192,
+ .min_width = 32,
+ .max_height = 4320,
+ .min_height = 32,
+ },
+ },
+ [VPU_FMT_TYPE_RAW] = {
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV420,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV12,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV21,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV422P,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV16,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV61,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV420M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV12M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV21M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV422M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV16M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV61M,
+ .max_width = 8192,
+ .min_width = 8,
+ .max_height = 4320,
+ .min_height = 8,
+ },
+ }
+};
+
+/*
+ * Make sure that the state switch is allowed and add logging for debugging
+ * purposes
+ */
+static int switch_state(struct vpu_instance *inst, enum vpu_instance_state state)
+{
+ switch (state) {
+ case VPU_INST_STATE_NONE:
+ break;
+ case VPU_INST_STATE_OPEN:
+ if (inst->state != VPU_INST_STATE_NONE)
+ goto invalid_state_switch;
+ goto valid_state_switch;
+ case VPU_INST_STATE_INIT_SEQ:
+ if (inst->state != VPU_INST_STATE_OPEN && inst->state != VPU_INST_STATE_STOP)
+ goto invalid_state_switch;
+ goto valid_state_switch;
+ case VPU_INST_STATE_PIC_RUN:
+ if (inst->state != VPU_INST_STATE_INIT_SEQ)
+ goto invalid_state_switch;
+ goto valid_state_switch;
+ case VPU_INST_STATE_STOP:
+ goto valid_state_switch;
+ }
+invalid_state_switch:
+ WARN(1, "Invalid state switch from %s to %s.\n",
+ state_to_str(inst->state), state_to_str(state));
+ return -EINVAL;
+valid_state_switch:
+ dev_dbg(inst->dev->dev, "Switch state from %s to %s.\n",
+ state_to_str(inst->state), state_to_str(state));
+ inst->state = state;
+ return 0;
+}
+
+static int wave5_vpu_dec_set_eos_on_firmware(struct vpu_instance *inst)
+{
+ int ret;
+
+ ret = wave5_vpu_dec_update_bitstream_buffer(inst, 0);
+ if (ret) {
+ /*
+ * To set the EOS flag, a command is sent to the firmware.
+ * That command may never return (timeout) or may report an error.
+ */
+ dev_err(inst->dev->dev,
+ "Setting EOS for the bitstream, fail: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static bool wave5_last_src_buffer_consumed(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct vpu_src_buffer *vpu_buf;
+
+ if (!m2m_ctx->last_src_buf)
+ return false;
+
+ vpu_buf = wave5_to_vpu_src_buf(m2m_ctx->last_src_buf);
+ return vpu_buf->consumed;
+}
+
+static void wave5_handle_src_buffer(struct vpu_instance *inst, dma_addr_t rd_ptr)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct v4l2_m2m_buffer *buf, *n;
+ size_t consumed_bytes = 0;
+
+ if (rd_ptr >= inst->last_rd_ptr) {
+ consumed_bytes = rd_ptr - inst->last_rd_ptr;
+ } else {
+ size_t rd_offs = rd_ptr - inst->bitstream_vbuf.daddr;
+ size_t last_rd_offs = inst->last_rd_ptr - inst->bitstream_vbuf.daddr;
+
+ consumed_bytes = rd_offs + (inst->bitstream_vbuf.size - last_rd_offs);
+ }
+
+ inst->last_rd_ptr = rd_ptr;
+ consumed_bytes += inst->remaining_consumed_bytes;
+
+ dev_dbg(inst->dev->dev, "%s: %zu bytes of bitstream was consumed", __func__,
+ consumed_bytes);
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ struct vb2_v4l2_buffer *src_buf = &buf->vb;
+ size_t src_size = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+
+ if (src_size > consumed_bytes)
+ break;
+
+ dev_dbg(inst->dev->dev, "%s: removing src buffer %i",
+ __func__, src_buf->vb2_buf.index);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ inst->timestamp = src_buf->vb2_buf.timestamp;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ consumed_bytes -= src_size;
+
+ /* Handle the case the last bitstream buffer has been picked */
+ if (src_buf == m2m_ctx->last_src_buf) {
+ int ret;
+
+ m2m_ctx->last_src_buf = NULL;
+ ret = wave5_vpu_dec_set_eos_on_firmware(inst);
+ if (ret)
+ dev_warn(inst->dev->dev,
+ "Setting EOS for the bitstream, fail: %d\n", ret);
+ break;
+ }
+ }
+
+ inst->remaining_consumed_bytes = consumed_bytes;
+}
+
+static void wave5_update_pix_fmt(struct v4l2_pix_format_mplane *pix_mp, unsigned int width,
+ unsigned int height)
+{
+ switch (pix_mp->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height * 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[1].sizeimage = width * height / 4;
+ pix_mp->plane_fmt[2].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[2].sizeimage = width * height / 4;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV21M:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[1].sizeimage = width * height / 2;
+ break;
+ case V4L2_PIX_FMT_YUV422M:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[1].sizeimage = width * height / 2;
+ pix_mp->plane_fmt[2].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[2].sizeimage = width * height / 2;
+ break;
+ case V4L2_PIX_FMT_NV16M:
+ case V4L2_PIX_FMT_NV61M:
+ pix_mp->width = round_up(width, 32);
+ pix_mp->height = round_up(height, 16);
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = width * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[1].sizeimage = width * height;
+ break;
+ default:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ pix_mp->plane_fmt[0].sizeimage = max(DEFAULT_SRC_SIZE(width, height),
+ pix_mp->plane_fmt[0].sizeimage);
+ break;
+ }
+}
+
+static int start_decode(struct vpu_instance *inst, u32 *fail_res)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ int ret = 0;
+
+ ret = wave5_vpu_dec_start_one_frame(inst, fail_res);
+ if (ret) {
+ struct vb2_v4l2_buffer *src_buf;
+
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ if (src_buf)
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ switch_state(inst, VPU_INST_STATE_STOP);
+
+ dev_dbg(inst->dev->dev, "%s: pic run failed / finish job", __func__);
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+ }
+
+ return ret;
+}
+
+static void flag_last_buffer_done(struct vpu_instance *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct vb2_v4l2_buffer *vb;
+ int i;
+
+ lockdep_assert_held(&inst->state_spinlock);
+
+ vb = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!vb) {
+ m2m_ctx->is_draining = true;
+ m2m_ctx->next_buf_last = true;
+ return;
+ }
+
+ for (i = 0; i < vb->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&vb->vb2_buf, i, 0);
+ vb->field = V4L2_FIELD_NONE;
+
+ v4l2_m2m_last_buffer_done(m2m_ctx, vb);
+}
+
+static void send_eos_event(struct vpu_instance *inst)
+{
+ static const struct v4l2_event vpu_event_eos = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ lockdep_assert_held(&inst->state_spinlock);
+
+ v4l2_event_queue_fh(&inst->v4l2_fh, &vpu_event_eos);
+ inst->eos = false;
+}
+
+static int handle_dynamic_resolution_change(struct vpu_instance *inst)
+{
+ struct v4l2_fh *fh = &inst->v4l2_fh;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
+ static const struct v4l2_event vpu_event_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ struct dec_initial_info *initial_info = &inst->codec_info->dec_info.initial_info;
+
+ lockdep_assert_held(&inst->state_spinlock);
+
+ dev_dbg(inst->dev->dev, "%s: rd_ptr %pad", __func__, &initial_info->rd_ptr);
+
+ dev_dbg(inst->dev->dev, "%s: width: %u height: %u profile: %u | minbuffer: %u\n",
+ __func__, initial_info->pic_width, initial_info->pic_height,
+ initial_info->profile, initial_info->min_frame_buffer_count);
+
+ inst->needs_reallocation = true;
+ inst->fbc_buf_count = initial_info->min_frame_buffer_count + 1;
+ if (inst->fbc_buf_count != v4l2_m2m_num_dst_bufs_ready(m2m_ctx)) {
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_find(&inst->v4l2_ctrl_hdl,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->fbc_buf_count);
+ }
+
+ if (p_dec_info->initial_info_obtained) {
+ inst->conf_win.left = initial_info->pic_crop_rect.left;
+ inst->conf_win.top = initial_info->pic_crop_rect.top;
+ inst->conf_win.width = initial_info->pic_width -
+ initial_info->pic_crop_rect.left - initial_info->pic_crop_rect.right;
+ inst->conf_win.height = initial_info->pic_height -
+ initial_info->pic_crop_rect.top - initial_info->pic_crop_rect.bottom;
+
+ wave5_update_pix_fmt(&inst->src_fmt, initial_info->pic_width,
+ initial_info->pic_height);
+ wave5_update_pix_fmt(&inst->dst_fmt, initial_info->pic_width,
+ initial_info->pic_height);
+ }
+
+ v4l2_event_queue_fh(fh, &vpu_event_src_ch);
+
+ return 0;
+}
+
+static void wave5_vpu_dec_finish_decode(struct vpu_instance *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct dec_output_info dec_info;
+ int ret;
+ struct vb2_v4l2_buffer *dec_buf = NULL;
+ struct vb2_v4l2_buffer *disp_buf = NULL;
+ struct vb2_queue *dst_vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+ struct queue_status_info q_status;
+
+ dev_dbg(inst->dev->dev, "%s: Fetch output info from firmware.", __func__);
+
+ ret = wave5_vpu_dec_get_output_info(inst, &dec_info);
+ if (ret) {
+ dev_warn(inst->dev->dev, "%s: could not get output info.", __func__);
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+ return;
+ }
+
+ dev_dbg(inst->dev->dev, "%s: rd_ptr %pad wr_ptr %pad", __func__, &dec_info.rd_ptr,
+ &dec_info.wr_ptr);
+ wave5_handle_src_buffer(inst, dec_info.rd_ptr);
+
+ dev_dbg(inst->dev->dev, "%s: dec_info dec_idx %i disp_idx %i", __func__,
+ dec_info.index_frame_decoded, dec_info.index_frame_display);
+
+ if (!vb2_is_streaming(dst_vq)) {
+ dev_dbg(inst->dev->dev, "%s: capture is not streaming..", __func__);
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+ return;
+ }
+
+ /* Remove decoded buffer from the ready queue now that it has been
+ * decoded.
+ */
+ if (dec_info.index_frame_decoded >= 0) {
+ struct vb2_buffer *vb = vb2_get_buffer(dst_vq,
+ dec_info.index_frame_decoded);
+ if (vb) {
+ dec_buf = to_vb2_v4l2_buffer(vb);
+ dec_buf->vb2_buf.timestamp = inst->timestamp;
+ } else {
+ dev_warn(inst->dev->dev, "%s: invalid decoded frame index %i",
+ __func__, dec_info.index_frame_decoded);
+ }
+ }
+
+ if (dec_info.index_frame_display >= 0) {
+ disp_buf = v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, dec_info.index_frame_display);
+ if (!disp_buf)
+ dev_warn(inst->dev->dev, "%s: invalid display frame index %i",
+ __func__, dec_info.index_frame_display);
+ }
+
+ /* If there is anything to display, do that now */
+ if (disp_buf) {
+ struct vpu_dst_buffer *dst_vpu_buf = wave5_to_vpu_dst_buf(disp_buf);
+
+ if (inst->dst_fmt.num_planes == 1) {
+ vb2_set_plane_payload(&disp_buf->vb2_buf, 0,
+ inst->dst_fmt.plane_fmt[0].sizeimage);
+ } else if (inst->dst_fmt.num_planes == 2) {
+ vb2_set_plane_payload(&disp_buf->vb2_buf, 0,
+ inst->dst_fmt.plane_fmt[0].sizeimage);
+ vb2_set_plane_payload(&disp_buf->vb2_buf, 1,
+ inst->dst_fmt.plane_fmt[1].sizeimage);
+ } else if (inst->dst_fmt.num_planes == 3) {
+ vb2_set_plane_payload(&disp_buf->vb2_buf, 0,
+ inst->dst_fmt.plane_fmt[0].sizeimage);
+ vb2_set_plane_payload(&disp_buf->vb2_buf, 1,
+ inst->dst_fmt.plane_fmt[1].sizeimage);
+ vb2_set_plane_payload(&disp_buf->vb2_buf, 2,
+ inst->dst_fmt.plane_fmt[2].sizeimage);
+ }
+
+ /* TODO implement interlace support */
+ disp_buf->field = V4L2_FIELD_NONE;
+ dst_vpu_buf->display = true;
+ v4l2_m2m_buf_done(disp_buf, VB2_BUF_STATE_DONE);
+
+ dev_dbg(inst->dev->dev, "%s: frame_cycle %8u (payload %lu)\n",
+ __func__, dec_info.frame_cycle,
+ vb2_get_plane_payload(&disp_buf->vb2_buf, 0));
+ }
+
+ if ((dec_info.index_frame_display == DISPLAY_IDX_FLAG_SEQ_END ||
+ dec_info.sequence_changed)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&inst->state_spinlock, flags);
+ if (!v4l2_m2m_has_stopped(m2m_ctx)) {
+ switch_state(inst, VPU_INST_STATE_STOP);
+
+ if (dec_info.sequence_changed)
+ handle_dynamic_resolution_change(inst);
+ else
+ send_eos_event(inst);
+
+ flag_last_buffer_done(inst);
+ }
+ spin_unlock_irqrestore(&inst->state_spinlock, flags);
+ }
+
+ /*
+ * During a resolution change and while draining, the firmware may flush
+ * the reorder queue regardless of having a matching decoding operation
+ * pending. Only terminate the job if there are no more IRQ coming.
+ */
+ wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, &q_status);
+ if (q_status.report_queue_count == 0 &&
+ (q_status.instance_queue_count == 0 || dec_info.sequence_changed)) {
+ dev_dbg(inst->dev->dev, "%s: finishing job.\n", __func__);
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+ }
+}
+
+static int wave5_vpu_dec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, VPU_DEC_DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, VPU_DEC_DRV_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int wave5_vpu_dec_enum_framesizes(struct file *f, void *fh, struct v4l2_frmsizeenum *fsize)
+{
+ const struct vpu_format *vpu_fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ vpu_fmt = wave5_find_vpu_fmt(fsize->pixel_format, dec_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt) {
+ vpu_fmt = wave5_find_vpu_fmt(fsize->pixel_format, dec_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt)
+ return -EINVAL;
+ }
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = vpu_fmt->min_width;
+ fsize->stepwise.max_width = vpu_fmt->max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = vpu_fmt->min_height;
+ fsize->stepwise.max_height = vpu_fmt->max_height;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_enum_fmt_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ const struct vpu_format *vpu_fmt;
+
+ vpu_fmt = wave5_find_vpu_fmt_by_idx(f->index, dec_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt)
+ return -EINVAL;
+
+ f->pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->flags = 0;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ const struct vpu_format *vpu_fmt;
+ int width, height;
+
+ dev_dbg(inst->dev->dev,
+ "%s: fourcc: %u width: %u height: %u nm planes: %u colorspace: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.colorspace, f->fmt.pix_mp.field);
+
+ vpu_fmt = wave5_find_vpu_fmt(f->fmt.pix_mp.pixelformat, dec_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt) {
+ width = inst->dst_fmt.width;
+ height = inst->dst_fmt.height;
+ f->fmt.pix_mp.pixelformat = inst->dst_fmt.pixelformat;
+ f->fmt.pix_mp.num_planes = inst->dst_fmt.num_planes;
+ } else {
+ const struct v4l2_format_info *info = v4l2_format_info(vpu_fmt->v4l2_pix_fmt);
+
+ width = clamp(f->fmt.pix_mp.width, vpu_fmt->min_width, vpu_fmt->max_width);
+ height = clamp(f->fmt.pix_mp.height, vpu_fmt->min_height, vpu_fmt->max_height);
+ f->fmt.pix_mp.pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->fmt.pix_mp.num_planes = info->mem_planes;
+ }
+
+ if (p_dec_info->initial_info_obtained) {
+ width = inst->dst_fmt.width;
+ height = inst->dst_fmt.height;
+ }
+
+ wave5_update_pix_fmt(&f->fmt.pix_mp, width, height);
+ f->fmt.pix_mp.flags = 0;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_s_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i, ret;
+
+ dev_dbg(inst->dev->dev,
+ "%s: fourcc: %u width: %u height: %u num_planes: %u colorspace: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.colorspace, f->fmt.pix_mp.field);
+
+ ret = wave5_vpu_dec_try_fmt_cap(file, fh, f);
+ if (ret)
+ return ret;
+
+ inst->dst_fmt.width = f->fmt.pix_mp.width;
+ inst->dst_fmt.height = f->fmt.pix_mp.height;
+ inst->dst_fmt.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->dst_fmt.field = f->fmt.pix_mp.field;
+ inst->dst_fmt.flags = f->fmt.pix_mp.flags;
+ inst->dst_fmt.num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < inst->dst_fmt.num_planes; i++) {
+ inst->dst_fmt.plane_fmt[i].bytesperline = f->fmt.pix_mp.plane_fmt[i].bytesperline;
+ inst->dst_fmt.plane_fmt[i].sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ if (inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV12 ||
+ inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV12M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = false;
+ inst->output_format = FORMAT_420;
+ } else if (inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV21 ||
+ inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV21M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = true;
+ inst->output_format = FORMAT_420;
+ } else if (inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV16 ||
+ inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV16M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = false;
+ inst->output_format = FORMAT_422;
+ } else if (inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV61 ||
+ inst->dst_fmt.pixelformat == V4L2_PIX_FMT_NV61M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = true;
+ inst->output_format = FORMAT_422;
+ } else if (inst->dst_fmt.pixelformat == V4L2_PIX_FMT_YUV422P ||
+ inst->dst_fmt.pixelformat == V4L2_PIX_FMT_YUV422M) {
+ inst->cbcr_interleave = false;
+ inst->nv21 = false;
+ inst->output_format = FORMAT_422;
+ } else {
+ inst->cbcr_interleave = false;
+ inst->nv21 = false;
+ inst->output_format = FORMAT_420;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_dec_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i;
+
+ f->fmt.pix_mp.width = inst->dst_fmt.width;
+ f->fmt.pix_mp.height = inst->dst_fmt.height;
+ f->fmt.pix_mp.pixelformat = inst->dst_fmt.pixelformat;
+ f->fmt.pix_mp.field = inst->dst_fmt.field;
+ f->fmt.pix_mp.flags = inst->dst_fmt.flags;
+ f->fmt.pix_mp.num_planes = inst->dst_fmt.num_planes;
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ f->fmt.pix_mp.plane_fmt[i].bytesperline = inst->dst_fmt.plane_fmt[i].bytesperline;
+ f->fmt.pix_mp.plane_fmt[i].sizeimage = inst->dst_fmt.plane_fmt[i].sizeimage;
+ }
+
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_enum_fmt_out(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
+
+ vpu_fmt = wave5_find_vpu_fmt_by_idx(f->index, dec_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt)
+ return -EINVAL;
+
+ f->pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->flags = V4L2_FMT_FLAG_DYN_RESOLUTION | V4L2_FMT_FLAG_COMPRESSED;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_try_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev,
+ "%s: fourcc: %u width: %u height: %u num_planes: %u colorspace: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.colorspace, f->fmt.pix_mp.field);
+
+ vpu_fmt = wave5_find_vpu_fmt(f->fmt.pix_mp.pixelformat, dec_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt) {
+ f->fmt.pix_mp.pixelformat = inst->src_fmt.pixelformat;
+ f->fmt.pix_mp.num_planes = inst->src_fmt.num_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, inst->src_fmt.width, inst->src_fmt.height);
+ } else {
+ int width = clamp(f->fmt.pix_mp.width, vpu_fmt->min_width, vpu_fmt->max_width);
+ int height = clamp(f->fmt.pix_mp.height, vpu_fmt->min_height, vpu_fmt->max_height);
+
+ f->fmt.pix_mp.pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->fmt.pix_mp.num_planes = 1;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, width, height);
+ }
+
+ f->fmt.pix_mp.flags = 0;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_s_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i, ret;
+
+ dev_dbg(inst->dev->dev,
+ "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ ret = wave5_vpu_dec_try_fmt_out(file, fh, f);
+ if (ret)
+ return ret;
+
+ inst->std = wave5_to_vpu_std(f->fmt.pix_mp.pixelformat, inst->type);
+ if (inst->std == STD_UNKNOWN) {
+ dev_warn(inst->dev->dev, "unsupported pixelformat: %.4s\n",
+ (char *)&f->fmt.pix_mp.pixelformat);
+ return -EINVAL;
+ }
+
+ inst->src_fmt.width = f->fmt.pix_mp.width;
+ inst->src_fmt.height = f->fmt.pix_mp.height;
+ inst->src_fmt.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->src_fmt.field = f->fmt.pix_mp.field;
+ inst->src_fmt.flags = f->fmt.pix_mp.flags;
+ inst->src_fmt.num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < inst->src_fmt.num_planes; i++) {
+ inst->src_fmt.plane_fmt[i].bytesperline = f->fmt.pix_mp.plane_fmt[i].bytesperline;
+ inst->src_fmt.plane_fmt[i].sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ inst->colorspace = f->fmt.pix_mp.colorspace;
+ inst->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ inst->quantization = f->fmt.pix_mp.quantization;
+ inst->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ wave5_update_pix_fmt(&inst->dst_fmt, f->fmt.pix_mp.width, f->fmt.pix_mp.height);
+
+ return 0;
+}
+
+static int wave5_vpu_dec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ dev_dbg(inst->dev->dev, "%s: type: %u | target: %u\n", __func__, s->type, s->target);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->dst_fmt.width;
+ s->r.height = inst->dst_fmt.height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ if (inst->state > VPU_INST_STATE_OPEN) {
+ s->r = inst->conf_win;
+ } else {
+ s->r.width = inst->src_fmt.width;
+ s->r.height = inst->src_fmt.height;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_dec_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (s->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ dev_dbg(inst->dev->dev, "V4L2_SEL_TGT_COMPOSE w: %u h: %u\n",
+ s->r.width, s->r.height);
+
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->dst_fmt.width;
+ s->r.height = inst->dst_fmt.height;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_stop(struct vpu_instance *inst)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
+ spin_lock_irqsave(&inst->state_spinlock, flags);
+
+ if (m2m_ctx->is_draining) {
+ ret = -EBUSY;
+ goto unlock_and_return;
+ }
+
+ if (inst->state != VPU_INST_STATE_NONE) {
+ /*
+ * Temporarily release the state_spinlock so that subsequent
+ * calls do not block on a mutex while inside this spinlock.
+ */
+ spin_unlock_irqrestore(&inst->state_spinlock, flags);
+ ret = wave5_vpu_dec_set_eos_on_firmware(inst);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&inst->state_spinlock, flags);
+ /*
+ * TODO eliminate this check by using a separate check for
+ * draining triggered by a resolution change.
+ */
+ if (m2m_ctx->is_draining) {
+ ret = -EBUSY;
+ goto unlock_and_return;
+ }
+ }
+
+ /*
+ * Used to remember the EOS state after the streamoff/on transition on
+ * the capture queue.
+ */
+ inst->eos = true;
+
+ if (m2m_ctx->has_stopped)
+ goto unlock_and_return;
+
+ m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx);
+ m2m_ctx->is_draining = true;
+
+ /*
+ * Deferred to device run in case it wasn't in the ring buffer
+ * yet. In other case, we have to send the EOS signal to the
+ * firmware so that any pending PIC_RUN ends without new
+ * bitstream buffer.
+ */
+ if (m2m_ctx->last_src_buf)
+ goto unlock_and_return;
+
+ if (inst->state == VPU_INST_STATE_NONE) {
+ send_eos_event(inst);
+ flag_last_buffer_done(inst);
+ }
+
+unlock_and_return:
+ spin_unlock_irqrestore(&inst->state_spinlock, flags);
+ return ret;
+}
+
+static int wave5_vpu_dec_start(struct vpu_instance *inst)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct vb2_queue *dst_vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+
+ spin_lock_irqsave(&inst->state_spinlock, flags);
+
+ if (m2m_ctx->is_draining) {
+ ret = -EBUSY;
+ goto unlock_and_return;
+ }
+
+ if (m2m_ctx->has_stopped)
+ m2m_ctx->has_stopped = false;
+
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ inst->eos = false;
+
+unlock_and_return:
+ spin_unlock_irqrestore(&inst->state_spinlock, flags);
+ return ret;
+}
+
+static int wave5_vpu_dec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dc)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ int ret;
+
+ dev_dbg(inst->dev->dev, "decoder command: %u\n", dc->cmd);
+
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
+ if (ret)
+ return ret;
+
+ switch (dc->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ ret = wave5_vpu_dec_stop(inst);
+ /* Just in case we don't have anything to decode anymore */
+ v4l2_m2m_try_schedule(m2m_ctx);
+ break;
+ case V4L2_DEC_CMD_START:
+ ret = wave5_vpu_dec_start(inst);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops wave5_vpu_dec_ioctl_ops = {
+ .vidioc_querycap = wave5_vpu_dec_querycap,
+ .vidioc_enum_framesizes = wave5_vpu_dec_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = wave5_vpu_dec_enum_fmt_cap,
+ .vidioc_s_fmt_vid_cap_mplane = wave5_vpu_dec_s_fmt_cap,
+ .vidioc_g_fmt_vid_cap_mplane = wave5_vpu_dec_g_fmt_cap,
+ .vidioc_try_fmt_vid_cap_mplane = wave5_vpu_dec_try_fmt_cap,
+
+ .vidioc_enum_fmt_vid_out = wave5_vpu_dec_enum_fmt_out,
+ .vidioc_s_fmt_vid_out_mplane = wave5_vpu_dec_s_fmt_out,
+ .vidioc_g_fmt_vid_out_mplane = wave5_vpu_g_fmt_out,
+ .vidioc_try_fmt_vid_out_mplane = wave5_vpu_dec_try_fmt_out,
+
+ .vidioc_g_selection = wave5_vpu_dec_g_selection,
+ .vidioc_s_selection = wave5_vpu_dec_s_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ /*
+ * Firmware does not support CREATE_BUFS for CAPTURE queue. Since
+ * there is no immediate use-case for supporting CREATE_BUFS on
+ * just the OUTPUT queue, disable CREATE_BUFS altogether.
+ */
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
+ .vidioc_decoder_cmd = wave5_vpu_dec_decoder_cmd,
+
+ .vidioc_subscribe_event = wave5_vpu_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int wave5_vpu_dec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_pix_format_mplane inst_format =
+ (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? inst->src_fmt : inst->dst_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: num_buffers: %u | num_planes: %u | type: %u\n", __func__,
+ *num_buffers, *num_planes, q->type);
+
+ *num_planes = inst_format.num_planes;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ sizes[0] = inst_format.plane_fmt[0].sizeimage;
+ dev_dbg(inst->dev->dev, "%s: size[0]: %u\n", __func__, sizes[0]);
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (*num_buffers < inst->fbc_buf_count)
+ *num_buffers = inst->fbc_buf_count;
+
+ if (*num_planes == 1) {
+ if (inst->output_format == FORMAT_422)
+ sizes[0] = inst_format.width * inst_format.height * 2;
+ else
+ sizes[0] = inst_format.width * inst_format.height * 3 / 2;
+ dev_dbg(inst->dev->dev, "%s: size[0]: %u\n", __func__, sizes[0]);
+ } else if (*num_planes == 2) {
+ sizes[0] = inst_format.width * inst_format.height;
+ if (inst->output_format == FORMAT_422)
+ sizes[1] = inst_format.width * inst_format.height;
+ else
+ sizes[1] = inst_format.width * inst_format.height / 2;
+ dev_dbg(inst->dev->dev, "%s: size[0]: %u | size[1]: %u\n",
+ __func__, sizes[0], sizes[1]);
+ } else if (*num_planes == 3) {
+ sizes[0] = inst_format.width * inst_format.height;
+ if (inst->output_format == FORMAT_422) {
+ sizes[1] = inst_format.width * inst_format.height / 2;
+ sizes[2] = inst_format.width * inst_format.height / 2;
+ } else {
+ sizes[1] = inst_format.width * inst_format.height / 4;
+ sizes[2] = inst_format.width * inst_format.height / 4;
+ }
+ dev_dbg(inst->dev->dev, "%s: size[0]: %u | size[1]: %u | size[2]: %u\n",
+ __func__, sizes[0], sizes[1], sizes[2]);
+ }
+ }
+
+ return 0;
+}
+
+static int wave5_prepare_fb(struct vpu_instance *inst)
+{
+ int linear_num;
+ int non_linear_num;
+ int fb_stride = 0, fb_height = 0;
+ int luma_size, chroma_size;
+ int ret, i;
+ struct v4l2_m2m_buffer *buf, *n;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
+ linear_num = v4l2_m2m_num_dst_bufs_ready(m2m_ctx);
+ non_linear_num = inst->fbc_buf_count;
+
+ for (i = 0; i < non_linear_num; i++) {
+ struct frame_buffer *frame = &inst->frame_buf[i];
+ struct vpu_buf *vframe = &inst->frame_vbuf[i];
+
+ fb_stride = inst->dst_fmt.width;
+ fb_height = ALIGN(inst->dst_fmt.height, 32);
+ luma_size = fb_stride * fb_height;
+
+ chroma_size = ALIGN(fb_stride / 2, 16) * fb_height;
+
+ if (vframe->size == (luma_size + chroma_size))
+ continue;
+
+ if (vframe->size)
+ wave5_vpu_dec_reset_framebuffer(inst, i);
+
+ vframe->size = luma_size + chroma_size;
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, vframe);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: Allocating FBC buf of size %zu, fail: %d\n",
+ __func__, vframe->size, ret);
+ return ret;
+ }
+
+ frame->buf_y = vframe->daddr;
+ frame->buf_cb = vframe->daddr + luma_size;
+ frame->buf_cr = (dma_addr_t)-1;
+ frame->size = vframe->size;
+ frame->width = inst->src_fmt.width;
+ frame->stride = fb_stride;
+ frame->map_type = COMPRESSED_FRAME_MAP;
+ frame->update_fb_info = true;
+ }
+ /* In case the count has reduced, clean up leftover framebuffer memory */
+ for (i = non_linear_num; i < MAX_REG_FRAME; i++) {
+ ret = wave5_vpu_dec_reset_framebuffer(inst, i);
+ if (ret)
+ break;
+ }
+
+ for (i = 0; i < linear_num; i++) {
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct vb2_queue *dst_vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+ struct vb2_buffer *vb = vb2_get_buffer(dst_vq, i);
+ struct frame_buffer *frame = &inst->frame_buf[non_linear_num + i];
+ dma_addr_t buf_addr_y = 0, buf_addr_cb = 0, buf_addr_cr = 0;
+ u32 buf_size = 0;
+ u32 fb_stride = inst->dst_fmt.width;
+ u32 luma_size = fb_stride * inst->dst_fmt.height;
+ u32 chroma_size;
+
+ if (inst->output_format == FORMAT_422)
+ chroma_size = fb_stride * inst->dst_fmt.height / 2;
+ else
+ chroma_size = fb_stride * inst->dst_fmt.height / 4;
+
+ if (inst->dst_fmt.num_planes == 1) {
+ buf_size = vb2_plane_size(vb, 0);
+ buf_addr_y = vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf_addr_cb = buf_addr_y + luma_size;
+ buf_addr_cr = buf_addr_cb + chroma_size;
+ } else if (inst->dst_fmt.num_planes == 2) {
+ buf_size = vb2_plane_size(vb, 0) +
+ vb2_plane_size(vb, 1);
+ buf_addr_y = vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf_addr_cb = vb2_dma_contig_plane_dma_addr(vb, 1);
+ buf_addr_cr = buf_addr_cb + chroma_size;
+ } else if (inst->dst_fmt.num_planes == 3) {
+ buf_size = vb2_plane_size(vb, 0) +
+ vb2_plane_size(vb, 1) +
+ vb2_plane_size(vb, 2);
+ buf_addr_y = vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf_addr_cb = vb2_dma_contig_plane_dma_addr(vb, 1);
+ buf_addr_cr = vb2_dma_contig_plane_dma_addr(vb, 2);
+ }
+
+ frame->buf_y = buf_addr_y;
+ frame->buf_cb = buf_addr_cb;
+ frame->buf_cr = buf_addr_cr;
+ frame->size = buf_size;
+ frame->width = inst->src_fmt.width;
+ frame->stride = fb_stride;
+ frame->map_type = LINEAR_FRAME_MAP;
+ frame->update_fb_info = true;
+ }
+
+ ret = wave5_vpu_dec_register_frame_buffer_ex(inst, non_linear_num, linear_num,
+ fb_stride, inst->dst_fmt.height);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: vpu_dec_register_frame_buffer_ex fail: %d",
+ __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Mark all frame buffers as out of display, to avoid using them before
+ * the application have them queued.
+ */
+ for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
+ ret = wave5_vpu_dec_set_disp_flag(inst, i);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: Setting display flag of buf index: %u, fail: %d\n",
+ __func__, i, ret);
+ }
+ }
+
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
+ struct vb2_v4l2_buffer *vbuf = &buf->vb;
+
+ ret = wave5_vpu_dec_clr_disp_flag(inst, vbuf->vb2_buf.index);
+ if (ret)
+ dev_dbg(inst->dev->dev,
+ "%s: Clearing display flag of buf index: %u, fail: %d\n",
+ __func__, i, ret);
+ }
+
+ return 0;
+}
+
+static int write_to_ringbuffer(struct vpu_instance *inst, void *buffer, size_t buffer_size,
+ struct vpu_buf *ring_buffer, dma_addr_t wr_ptr)
+{
+ size_t size;
+ size_t offset = wr_ptr - ring_buffer->daddr;
+ int ret;
+
+ if (wr_ptr + buffer_size > ring_buffer->daddr + ring_buffer->size) {
+ size = ring_buffer->daddr + ring_buffer->size - wr_ptr;
+ ret = wave5_vdi_write_memory(inst->dev, ring_buffer, offset, (u8 *)buffer, size);
+ if (ret < 0)
+ return ret;
+
+ ret = wave5_vdi_write_memory(inst->dev, ring_buffer, 0, (u8 *)buffer + size,
+ buffer_size - size);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wave5_vdi_write_memory(inst->dev, ring_buffer, offset, (u8 *)buffer,
+ buffer_size);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fill_ringbuffer(struct vpu_instance *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct v4l2_m2m_buffer *buf, *n;
+ int ret;
+
+ if (m2m_ctx->last_src_buf) {
+ struct vpu_src_buffer *vpu_buf = wave5_to_vpu_src_buf(m2m_ctx->last_src_buf);
+
+ if (vpu_buf->consumed) {
+ dev_dbg(inst->dev->dev, "last src buffer already written\n");
+ return 0;
+ }
+ }
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ struct vb2_v4l2_buffer *vbuf = &buf->vb;
+ struct vpu_src_buffer *vpu_buf = wave5_to_vpu_src_buf(vbuf);
+ struct vpu_buf *ring_buffer = &inst->bitstream_vbuf;
+ size_t src_size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ void *src_buf = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ dma_addr_t rd_ptr = 0;
+ dma_addr_t wr_ptr = 0;
+ size_t remain_size = 0;
+
+ if (vpu_buf->consumed) {
+ dev_dbg(inst->dev->dev, "already copied src buf (%u) to the ring buffer\n",
+ vbuf->vb2_buf.index);
+ continue;
+ }
+
+ if (!src_buf) {
+ dev_dbg(inst->dev->dev,
+ "%s: Acquiring kernel pointer to src buf (%u), fail\n",
+ __func__, vbuf->vb2_buf.index);
+ break;
+ }
+
+ ret = wave5_vpu_dec_get_bitstream_buffer(inst, &rd_ptr, &wr_ptr, &remain_size);
+ if (ret) {
+ /* Unable to acquire the mutex */
+ dev_err(inst->dev->dev, "Getting the bitstream buffer, fail: %d\n",
+ ret);
+ return ret;
+ }
+
+ dev_dbg(inst->dev->dev, "%s: rd_ptr %pad wr_ptr %pad", __func__, &rd_ptr, &wr_ptr);
+
+ if (remain_size < src_size) {
+ dev_dbg(inst->dev->dev,
+ "%s: remaining size: %zu < source size: %zu for src buf (%u)\n",
+ __func__, remain_size, src_size, vbuf->vb2_buf.index);
+ break;
+ }
+
+ ret = write_to_ringbuffer(inst, src_buf, src_size, ring_buffer, wr_ptr);
+ if (ret) {
+ dev_err(inst->dev->dev, "Write src buf (%u) to ring buffer, fail: %d\n",
+ vbuf->vb2_buf.index, ret);
+ return ret;
+ }
+
+ ret = wave5_vpu_dec_update_bitstream_buffer(inst, src_size);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "update_bitstream_buffer fail: %d for src buf (%u)\n",
+ ret, vbuf->vb2_buf.index);
+ break;
+ }
+
+ vpu_buf->consumed = true;
+
+ /* Don't write buffers passed the last one while draining. */
+ if (v4l2_m2m_is_last_draining_src_buf(m2m_ctx, vbuf)) {
+ dev_dbg(inst->dev->dev, "last src buffer written to the ring buffer\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void wave5_vpu_dec_buf_queue_src(struct vb2_buffer *vb)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_src_buffer *vpu_buf = wave5_to_vpu_src_buf(vbuf);
+
+ vpu_buf->consumed = false;
+ vbuf->sequence = inst->queued_src_buf_num++;
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+}
+
+static void wave5_vpu_dec_buf_queue_dst(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_instance *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
+ vbuf->sequence = inst->queued_dst_buf_num++;
+
+ if (inst->state == VPU_INST_STATE_PIC_RUN) {
+ struct vpu_dst_buffer *vpu_buf = wave5_to_vpu_dst_buf(vbuf);
+ int ret;
+
+ /*
+ * The buffer is already registered just clear the display flag
+ * to let the firmware know it can be used.
+ */
+ vpu_buf->display = false;
+ ret = wave5_vpu_dec_clr_disp_flag(inst, vb->index);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: Clearing the display flag of buffer index: %u, fail: %d\n",
+ __func__, vb->index, ret);
+ }
+ }
+
+ if (vb2_is_streaming(vb->vb2_queue) && v4l2_m2m_dst_buf_is_last(m2m_ctx)) {
+ unsigned int i;
+
+ for (i = 0; i < vb->num_planes; i++)
+ vb2_set_plane_payload(vb, i, 0);
+
+ vbuf->field = V4L2_FIELD_NONE;
+
+ send_eos_event(inst);
+ v4l2_m2m_last_buffer_done(m2m_ctx, vbuf);
+ } else {
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+ }
+}
+
+static void wave5_vpu_dec_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_instance *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ dev_dbg(inst->dev->dev, "%s: type: %4u index: %4u size: ([0]=%4lu, [1]=%4lu, [2]=%4lu)\n",
+ __func__, vb->type, vb->index, vb2_plane_size(&vbuf->vb2_buf, 0),
+ vb2_plane_size(&vbuf->vb2_buf, 1), vb2_plane_size(&vbuf->vb2_buf, 2));
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ wave5_vpu_dec_buf_queue_src(vb);
+ else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ wave5_vpu_dec_buf_queue_dst(vb);
+}
+
+static int wave5_vpu_dec_allocate_ring_buffer(struct vpu_instance *inst)
+{
+ int ret;
+ struct vpu_buf *ring_buffer = &inst->bitstream_vbuf;
+
+ ring_buffer->size = ALIGN(inst->src_fmt.plane_fmt[0].sizeimage, 1024) * 4;
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, ring_buffer);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: allocate ring buffer of size %zu fail: %d\n",
+ __func__, ring_buffer->size, ret);
+ return ret;
+ }
+
+ inst->last_rd_ptr = ring_buffer->daddr;
+
+ return 0;
+}
+
+static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ int ret = 0;
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
+
+ v4l2_m2m_update_start_streaming_state(m2m_ctx, q);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && inst->state == VPU_INST_STATE_NONE) {
+ struct dec_open_param open_param;
+
+ memset(&open_param, 0, sizeof(struct dec_open_param));
+
+ ret = wave5_vpu_dec_allocate_ring_buffer(inst);
+ if (ret)
+ goto return_buffers;
+
+ open_param.bitstream_buffer = inst->bitstream_vbuf.daddr;
+ open_param.bitstream_buffer_size = inst->bitstream_vbuf.size;
+
+ ret = wave5_vpu_dec_open(inst, &open_param);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: decoder opening, fail: %d\n",
+ __func__, ret);
+ goto free_bitstream_vbuf;
+ }
+
+ ret = switch_state(inst, VPU_INST_STATE_OPEN);
+ if (ret)
+ goto free_bitstream_vbuf;
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ struct dec_initial_info *initial_info =
+ &inst->codec_info->dec_info.initial_info;
+
+ if (inst->state == VPU_INST_STATE_STOP)
+ ret = switch_state(inst, VPU_INST_STATE_INIT_SEQ);
+ if (ret)
+ goto return_buffers;
+
+ if (inst->state == VPU_INST_STATE_INIT_SEQ) {
+ if (initial_info->luma_bitdepth != 8) {
+ dev_info(inst->dev->dev, "%s: no support for %d bit depth",
+ __func__, initial_info->luma_bitdepth);
+ ret = -EINVAL;
+ goto return_buffers;
+ }
+ }
+ }
+
+ return ret;
+
+free_bitstream_vbuf:
+ wave5_vdi_free_dma_memory(inst->dev, &inst->bitstream_vbuf);
+return_buffers:
+ wave5_return_bufs(q, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static int streamoff_output(struct vb2_queue *q)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct vb2_v4l2_buffer *buf;
+ int ret;
+ dma_addr_t new_rd_ptr;
+
+ while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx))) {
+ dev_dbg(inst->dev->dev, "%s: (Multiplanar) buf type %4u | index %4u\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+
+ ret = wave5_vpu_flush_instance(inst);
+ if (ret)
+ return ret;
+
+ /* Reset the ring buffer information */
+ new_rd_ptr = wave5_vpu_dec_get_rd_ptr(inst);
+ inst->last_rd_ptr = new_rd_ptr;
+ inst->codec_info->dec_info.stream_rd_ptr = new_rd_ptr;
+ inst->codec_info->dec_info.stream_wr_ptr = new_rd_ptr;
+
+ if (v4l2_m2m_has_stopped(m2m_ctx))
+ send_eos_event(inst);
+
+ /* streamoff on output cancels any draining operation */
+ inst->eos = false;
+
+ return 0;
+}
+
+static int streamoff_capture(struct vb2_queue *q)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct vb2_v4l2_buffer *buf;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
+ ret = wave5_vpu_dec_set_disp_flag(inst, i);
+ if (ret)
+ dev_dbg(inst->dev->dev,
+ "%s: Setting display flag of buf index: %u, fail: %d\n",
+ __func__, i, ret);
+ }
+
+ while ((buf = v4l2_m2m_dst_buf_remove(m2m_ctx))) {
+ u32 plane;
+
+ dev_dbg(inst->dev->dev, "%s: buf type %4u | index %4u\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+
+ for (plane = 0; plane < inst->dst_fmt.num_planes; plane++)
+ vb2_set_plane_payload(&buf->vb2_buf, plane, 0);
+
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+
+ if (inst->needs_reallocation) {
+ wave5_vpu_dec_give_command(inst, DEC_RESET_FRAMEBUF_INFO, NULL);
+ inst->needs_reallocation = false;
+ }
+
+ if (v4l2_m2m_has_stopped(m2m_ctx)) {
+ ret = switch_state(inst, VPU_INST_STATE_INIT_SEQ);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ bool check_cmd = TRUE;
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
+
+ while (check_cmd) {
+ struct queue_status_info q_status;
+ struct dec_output_info dec_output_info;
+
+ wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, &q_status);
+
+ if (q_status.report_queue_count == 0)
+ break;
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_DEC_TIMEOUT) < 0)
+ break;
+
+ if (wave5_vpu_dec_get_output_info(inst, &dec_output_info))
+ dev_dbg(inst->dev->dev, "Getting decoding results from fw, fail\n");
+ }
+
+ v4l2_m2m_update_stop_streaming_state(m2m_ctx, q);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ streamoff_output(q);
+ else
+ streamoff_capture(q);
+}
+
+static const struct vb2_ops wave5_vpu_dec_vb2_ops = {
+ .queue_setup = wave5_vpu_dec_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_queue = wave5_vpu_dec_buf_queue,
+ .start_streaming = wave5_vpu_dec_start_streaming,
+ .stop_streaming = wave5_vpu_dec_stop_streaming,
+};
+
+static void wave5_set_default_format(struct v4l2_pix_format_mplane *src_fmt,
+ struct v4l2_pix_format_mplane *dst_fmt)
+{
+ unsigned int dst_pix_fmt = dec_fmt_list[VPU_FMT_TYPE_RAW][0].v4l2_pix_fmt;
+ const struct v4l2_format_info *dst_fmt_info = v4l2_format_info(dst_pix_fmt);
+
+ src_fmt->pixelformat = dec_fmt_list[VPU_FMT_TYPE_CODEC][0].v4l2_pix_fmt;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->flags = 0;
+ src_fmt->num_planes = 1;
+ wave5_update_pix_fmt(src_fmt, 720, 480);
+
+ dst_fmt->pixelformat = dst_pix_fmt;
+ dst_fmt->field = V4L2_FIELD_NONE;
+ dst_fmt->flags = 0;
+ dst_fmt->num_planes = dst_fmt_info->mem_planes;
+ wave5_update_pix_fmt(dst_fmt, 736, 480);
+}
+
+static int wave5_vpu_dec_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ return wave5_vpu_queue_init(priv, src_vq, dst_vq, &wave5_vpu_dec_vb2_ops);
+}
+
+static const struct vpu_instance_ops wave5_vpu_dec_inst_ops = {
+ .finish_process = wave5_vpu_dec_finish_decode,
+};
+
+static int initialize_sequence(struct vpu_instance *inst)
+{
+ struct dec_initial_info initial_info;
+ int ret = 0;
+
+ memset(&initial_info, 0, sizeof(struct dec_initial_info));
+
+ ret = wave5_vpu_dec_issue_seq_init(inst);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_dec_issue_seq_init, fail: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_DEC_TIMEOUT) < 0)
+ dev_dbg(inst->dev->dev, "%s: failed to call vpu_wait_interrupt()\n", __func__);
+
+ ret = wave5_vpu_dec_complete_seq_init(inst, &initial_info);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: vpu_dec_complete_seq_init, fail: %d, reason: %u\n",
+ __func__, ret, initial_info.seq_init_err_reason);
+ wave5_handle_src_buffer(inst, initial_info.rd_ptr);
+ return ret;
+ }
+
+ handle_dynamic_resolution_change(inst);
+
+ return 0;
+}
+
+static bool wave5_is_draining_or_eos(struct vpu_instance *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
+ lockdep_assert_held(&inst->state_spinlock);
+ return m2m_ctx->is_draining || inst->eos;
+}
+
+static void wave5_vpu_dec_device_run(void *priv)
+{
+ struct vpu_instance *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct queue_status_info q_status;
+ u32 fail_res = 0;
+ int ret = 0;
+
+ dev_dbg(inst->dev->dev, "%s: Fill the ring buffer with new bitstream data", __func__);
+
+ ret = fill_ringbuffer(inst);
+ if (ret) {
+ dev_warn(inst->dev->dev, "Filling ring buffer failed\n");
+ goto finish_job_and_return;
+ }
+
+ switch (inst->state) {
+ case VPU_INST_STATE_OPEN:
+ ret = initialize_sequence(inst);
+ if (ret) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&inst->state_spinlock, flags);
+ if (wave5_is_draining_or_eos(inst) &&
+ wave5_last_src_buffer_consumed(m2m_ctx)) {
+ struct vb2_queue *dst_vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+
+ switch_state(inst, VPU_INST_STATE_STOP);
+
+ if (vb2_is_streaming(dst_vq))
+ send_eos_event(inst);
+ else
+ handle_dynamic_resolution_change(inst);
+
+ flag_last_buffer_done(inst);
+ }
+ spin_unlock_irqrestore(&inst->state_spinlock, flags);
+ } else {
+ switch_state(inst, VPU_INST_STATE_INIT_SEQ);
+ }
+
+ break;
+
+ case VPU_INST_STATE_INIT_SEQ:
+ /*
+ * Do this early, preparing the fb can trigger an IRQ before
+ * we had a chance to switch, which leads to an invalid state
+ * change.
+ */
+ switch_state(inst, VPU_INST_STATE_PIC_RUN);
+
+ /*
+ * During DRC, the picture decoding remains pending, so just leave the job
+ * active until this decode operation completes.
+ */
+ wave5_vpu_dec_give_command(inst, DEC_GET_QUEUE_STATUS, &q_status);
+
+ /*
+ * The sequence must be analyzed first to calculate the proper
+ * size of the auxiliary buffers.
+ */
+ ret = wave5_prepare_fb(inst);
+ if (ret) {
+ dev_warn(inst->dev->dev, "Framebuffer preparation, fail: %d\n", ret);
+ switch_state(inst, VPU_INST_STATE_STOP);
+ break;
+ }
+
+ if (q_status.instance_queue_count) {
+ dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
+ return;
+ }
+
+ fallthrough;
+ case VPU_INST_STATE_PIC_RUN:
+ ret = start_decode(inst, &fail_res);
+ if (ret) {
+ dev_err(inst->dev->dev,
+ "Frame decoding on m2m context (%p), fail: %d (result: %d)\n",
+ m2m_ctx, ret, fail_res);
+ break;
+ }
+ /* Return so that we leave this job active */
+ dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
+ return;
+ default:
+ WARN(1, "Execution of a job in state %s illegal.\n", state_to_str(inst->state));
+ break;
+ }
+
+finish_job_and_return:
+ dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+}
+
+static void wave5_vpu_dec_job_abort(void *priv)
+{
+ struct vpu_instance *inst = priv;
+ int ret;
+
+ ret = switch_state(inst, VPU_INST_STATE_STOP);
+ if (ret)
+ return;
+
+ ret = wave5_vpu_dec_set_eos_on_firmware(inst);
+ if (ret)
+ dev_warn(inst->dev->dev,
+ "Setting EOS for the bitstream, fail: %d\n", ret);
+}
+
+static int wave5_vpu_dec_job_ready(void *priv)
+{
+ struct vpu_instance *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&inst->state_spinlock, flags);
+
+ switch (inst->state) {
+ case VPU_INST_STATE_NONE:
+ dev_dbg(inst->dev->dev, "Decoder must be open to start queueing M2M jobs!\n");
+ break;
+ case VPU_INST_STATE_OPEN:
+ if (wave5_is_draining_or_eos(inst) || !v4l2_m2m_has_stopped(m2m_ctx) ||
+ v4l2_m2m_num_src_bufs_ready(m2m_ctx) > 0) {
+ ret = 1;
+ break;
+ }
+
+ dev_dbg(inst->dev->dev,
+ "Decoder must be draining or >= 1 OUTPUT queue buffer must be queued!\n");
+ break;
+ case VPU_INST_STATE_INIT_SEQ:
+ case VPU_INST_STATE_PIC_RUN:
+ if (!m2m_ctx->cap_q_ctx.q.streaming) {
+ dev_dbg(inst->dev->dev, "CAPTURE queue must be streaming to queue jobs!\n");
+ break;
+ } else if (v4l2_m2m_num_dst_bufs_ready(m2m_ctx) < (inst->fbc_buf_count - 1)) {
+ dev_dbg(inst->dev->dev,
+ "No capture buffer ready to decode!\n");
+ break;
+ } else if (!wave5_is_draining_or_eos(inst) &&
+ !v4l2_m2m_num_src_bufs_ready(m2m_ctx)) {
+ dev_dbg(inst->dev->dev,
+ "No bitstream data to decode!\n");
+ break;
+ }
+ ret = 1;
+ break;
+ case VPU_INST_STATE_STOP:
+ dev_dbg(inst->dev->dev, "Decoder is stopped, not running.\n");
+ break;
+ }
+
+ spin_unlock_irqrestore(&inst->state_spinlock, flags);
+
+ return ret;
+}
+
+static const struct v4l2_m2m_ops wave5_vpu_dec_m2m_ops = {
+ .device_run = wave5_vpu_dec_device_run,
+ .job_abort = wave5_vpu_dec_job_abort,
+ .job_ready = wave5_vpu_dec_job_ready,
+};
+
+static int wave5_vpu_open_dec(struct file *filp)
+{
+ struct video_device *vdev = video_devdata(filp);
+ struct vpu_device *dev = video_drvdata(filp);
+ struct vpu_instance *inst = NULL;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ int ret = 0;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->dev = dev;
+ inst->type = VPU_INST_TYPE_DEC;
+ inst->ops = &wave5_vpu_dec_inst_ops;
+
+ spin_lock_init(&inst->state_spinlock);
+
+ inst->codec_info = kzalloc(sizeof(*inst->codec_info), GFP_KERNEL);
+ if (!inst->codec_info)
+ return -ENOMEM;
+
+ v4l2_fh_init(&inst->v4l2_fh, vdev);
+ filp->private_data = &inst->v4l2_fh;
+ v4l2_fh_add(&inst->v4l2_fh);
+
+ INIT_LIST_HEAD(&inst->list);
+ list_add_tail(&inst->list, &dev->instances);
+
+ inst->v4l2_m2m_dev = inst->dev->v4l2_m2m_dec_dev;
+ inst->v4l2_fh.m2m_ctx =
+ v4l2_m2m_ctx_init(inst->v4l2_m2m_dev, inst, wave5_vpu_dec_queue_init);
+ if (IS_ERR(inst->v4l2_fh.m2m_ctx)) {
+ ret = PTR_ERR(inst->v4l2_fh.m2m_ctx);
+ goto cleanup_inst;
+ }
+ m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
+ v4l2_m2m_set_src_buffered(m2m_ctx, true);
+ v4l2_m2m_set_dst_buffered(m2m_ctx, true);
+ /*
+ * We use the M2M job queue to ensure synchronization of steps where
+ * needed, as IOCTLs can occur at anytime and we need to run commands on
+ * the firmware in a specified order.
+ * In order to initialize the sequence on the firmware within an M2M
+ * job, the M2M framework needs to be able to queue jobs before
+ * the CAPTURE queue has been started, because we need the results of the
+ * initialization to properly prepare the CAPTURE queue with the correct
+ * amount of buffers.
+ * By setting ignore_cap_streaming to true the m2m framework will call
+ * job_ready as soon as the OUTPUT queue is streaming, instead of
+ * waiting until both the CAPTURE and OUTPUT queues are streaming.
+ */
+ m2m_ctx->ignore_cap_streaming = true;
+
+ v4l2_ctrl_handler_init(&inst->v4l2_ctrl_hdl, 10);
+ v4l2_ctrl_new_std(&inst->v4l2_ctrl_hdl, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 1);
+
+ if (inst->v4l2_ctrl_hdl.error) {
+ ret = -ENODEV;
+ goto cleanup_inst;
+ }
+
+ inst->v4l2_fh.ctrl_handler = &inst->v4l2_ctrl_hdl;
+ v4l2_ctrl_handler_setup(&inst->v4l2_ctrl_hdl);
+
+ wave5_set_default_format(&inst->src_fmt, &inst->dst_fmt);
+ inst->colorspace = V4L2_COLORSPACE_REC709;
+ inst->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ inst->quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ init_completion(&inst->irq_done);
+
+ inst->id = ida_alloc(&inst->dev->inst_ida, GFP_KERNEL);
+ if (inst->id < 0) {
+ dev_warn(inst->dev->dev, "Allocating instance ID, fail: %d\n", inst->id);
+ ret = inst->id;
+ goto cleanup_inst;
+ }
+
+ wave5_vdi_allocate_sram(inst->dev);
+
+ return 0;
+
+cleanup_inst:
+ wave5_cleanup_instance(inst);
+ return ret;
+}
+
+static int wave5_vpu_dec_release(struct file *filp)
+{
+ return wave5_vpu_release_device(filp, wave5_vpu_dec_close, "decoder");
+}
+
+static const struct v4l2_file_operations wave5_vpu_dec_fops = {
+ .owner = THIS_MODULE,
+ .open = wave5_vpu_open_dec,
+ .release = wave5_vpu_dec_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+int wave5_vpu_dec_register_device(struct vpu_device *dev)
+{
+ struct video_device *vdev_dec;
+ int ret;
+
+ vdev_dec = devm_kzalloc(dev->v4l2_dev.dev, sizeof(*vdev_dec), GFP_KERNEL);
+ if (!vdev_dec)
+ return -ENOMEM;
+
+ dev->v4l2_m2m_dec_dev = v4l2_m2m_init(&wave5_vpu_dec_m2m_ops);
+ if (IS_ERR(dev->v4l2_m2m_dec_dev)) {
+ ret = PTR_ERR(dev->v4l2_m2m_dec_dev);
+ dev_err(dev->dev, "v4l2_m2m_init, fail: %d\n", ret);
+ return -EINVAL;
+ }
+
+ dev->video_dev_dec = vdev_dec;
+
+ strscpy(vdev_dec->name, VPU_DEC_DEV_NAME, sizeof(vdev_dec->name));
+ vdev_dec->fops = &wave5_vpu_dec_fops;
+ vdev_dec->ioctl_ops = &wave5_vpu_dec_ioctl_ops;
+ vdev_dec->release = video_device_release_empty;
+ vdev_dec->v4l2_dev = &dev->v4l2_dev;
+ vdev_dec->vfl_dir = VFL_DIR_M2M;
+ vdev_dec->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ vdev_dec->lock = &dev->dev_lock;
+
+ ret = video_register_device(vdev_dec, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ return ret;
+
+ video_set_drvdata(vdev_dec, dev);
+
+ return 0;
+}
+
+void wave5_vpu_dec_unregister_device(struct vpu_device *dev)
+{
+ video_unregister_device(dev->video_dev_dec);
+ if (dev->v4l2_m2m_dec_dev)
+ v4l2_m2m_release(dev->v4l2_m2m_dec_dev);
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
new file mode 100644
index 000000000..f29cfa3af
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
@@ -0,0 +1,1794 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - encoder interface
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#include "wave5-helper.h"
+
+#define VPU_ENC_DEV_NAME "C&M Wave5 VPU encoder"
+#define VPU_ENC_DRV_NAME "wave5-enc"
+
+static const struct vpu_format enc_fmt_list[FMT_TYPES][MAX_FMTS] = {
+ [VPU_FMT_TYPE_CODEC] = {
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_HEVC,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_H264,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ },
+ [VPU_FMT_TYPE_RAW] = {
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV420,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV12,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV21,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_YUV420M,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV12M,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ {
+ .v4l2_pix_fmt = V4L2_PIX_FMT_NV21M,
+ .max_width = W5_MAX_ENC_PIC_WIDTH,
+ .min_width = W5_MIN_ENC_PIC_WIDTH,
+ .max_height = W5_MAX_ENC_PIC_HEIGHT,
+ .min_height = W5_MIN_ENC_PIC_HEIGHT,
+ },
+ }
+};
+
+static int switch_state(struct vpu_instance *inst, enum vpu_instance_state state)
+{
+ switch (state) {
+ case VPU_INST_STATE_NONE:
+ goto invalid_state_switch;
+ case VPU_INST_STATE_OPEN:
+ if (inst->state != VPU_INST_STATE_NONE)
+ goto invalid_state_switch;
+ break;
+ case VPU_INST_STATE_INIT_SEQ:
+ if (inst->state != VPU_INST_STATE_OPEN && inst->state != VPU_INST_STATE_STOP)
+ goto invalid_state_switch;
+ break;
+ case VPU_INST_STATE_PIC_RUN:
+ if (inst->state != VPU_INST_STATE_INIT_SEQ)
+ goto invalid_state_switch;
+ break;
+ case VPU_INST_STATE_STOP:
+ break;
+ };
+
+ dev_dbg(inst->dev->dev, "Switch state from %s to %s.\n",
+ state_to_str(inst->state), state_to_str(state));
+ inst->state = state;
+ return 0;
+
+invalid_state_switch:
+ WARN(1, "Invalid state switch from %s to %s.\n",
+ state_to_str(inst->state), state_to_str(state));
+ return -EINVAL;
+}
+
+static void wave5_update_pix_fmt(struct v4l2_pix_format_mplane *pix_mp, unsigned int width,
+ unsigned int height)
+{
+ switch (pix_mp->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = round_up(width, 32) * height * 3 / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = round_up(width, 32) * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[1].sizeimage = round_up(width, 32) * height / 4;
+ pix_mp->plane_fmt[2].bytesperline = round_up(width, 32) / 2;
+ pix_mp->plane_fmt[2].sizeimage = round_up(width, 32) * height / 4;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV21M:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[0].sizeimage = round_up(width, 32) * height;
+ pix_mp->plane_fmt[1].bytesperline = round_up(width, 32);
+ pix_mp->plane_fmt[1].sizeimage = round_up(width, 32) * height / 2;
+ break;
+ default:
+ pix_mp->width = width;
+ pix_mp->height = height;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ pix_mp->plane_fmt[0].sizeimage = width * height / 8 * 3;
+ break;
+ }
+}
+
+static int start_encode(struct vpu_instance *inst, u32 *fail_res)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ int ret;
+ struct vb2_v4l2_buffer *src_buf;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct frame_buffer frame_buf;
+ struct enc_param pic_param;
+ u32 stride = ALIGN(inst->dst_fmt.width, 32);
+ u32 luma_size = (stride * inst->dst_fmt.height);
+ u32 chroma_size = ((stride / 2) * (inst->dst_fmt.height / 2));
+
+ memset(&pic_param, 0, sizeof(struct enc_param));
+ memset(&frame_buf, 0, sizeof(struct frame_buffer));
+
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
+ if (!dst_buf) {
+ dev_dbg(inst->dev->dev, "%s: No destination buffer found\n", __func__);
+ return -EAGAIN;
+ }
+
+ pic_param.pic_stream_buffer_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ pic_param.pic_stream_buffer_size =
+ vb2_plane_size(&dst_buf->vb2_buf, 0);
+
+ src_buf = v4l2_m2m_next_src_buf(m2m_ctx);
+ if (!src_buf) {
+ dev_dbg(inst->dev->dev, "%s: No source buffer found\n", __func__);
+ if (m2m_ctx->is_draining)
+ pic_param.src_end_flag = 1;
+ else
+ return -EAGAIN;
+ } else {
+ if (inst->src_fmt.num_planes == 1) {
+ frame_buf.buf_y =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ frame_buf.buf_cb = frame_buf.buf_y + luma_size;
+ frame_buf.buf_cr = frame_buf.buf_cb + chroma_size;
+ } else if (inst->src_fmt.num_planes == 2) {
+ frame_buf.buf_y =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ frame_buf.buf_cb =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
+ frame_buf.buf_cr = frame_buf.buf_cb + chroma_size;
+ } else if (inst->src_fmt.num_planes == 3) {
+ frame_buf.buf_y =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ frame_buf.buf_cb =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
+ frame_buf.buf_cr =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 2);
+ }
+ frame_buf.stride = stride;
+ pic_param.src_idx = src_buf->vb2_buf.index;
+ }
+
+ pic_param.source_frame = &frame_buf;
+ pic_param.code_option.implicit_header_encode = 1;
+ pic_param.code_option.encode_aud = inst->encode_aud;
+ ret = wave5_vpu_enc_start_one_frame(inst, &pic_param, fail_res);
+ if (ret) {
+ if (*fail_res == WAVE5_SYSERR_QUEUEING_FAIL)
+ return -EINVAL;
+
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_enc_start_one_frame fail: %d\n",
+ __func__, ret);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ if (!src_buf) {
+ dev_dbg(inst->dev->dev,
+ "%s: Removing src buf failed, the queue is empty\n",
+ __func__);
+ return -EINVAL;
+ }
+ dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!dst_buf) {
+ dev_dbg(inst->dev->dev,
+ "%s: Removing dst buf failed, the queue is empty\n",
+ __func__);
+ return -EINVAL;
+ }
+ switch_state(inst, VPU_INST_STATE_STOP);
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_enc_start_one_frame success\n",
+ __func__);
+ /*
+ * Remove the source buffer from the ready-queue now and finish
+ * it in the videobuf2 framework once the index is returned by the
+ * firmware in finish_encode
+ */
+ if (src_buf)
+ v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, src_buf->vb2_buf.index);
+ }
+
+ return 0;
+}
+
+static void wave5_vpu_enc_finish_encode(struct vpu_instance *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ int ret;
+ struct enc_output_info enc_output_info;
+ struct vb2_v4l2_buffer *src_buf = NULL;
+ struct vb2_v4l2_buffer *dst_buf = NULL;
+
+ ret = wave5_vpu_enc_get_output_info(inst, &enc_output_info);
+ if (ret) {
+ dev_dbg(inst->dev->dev,
+ "%s: vpu_enc_get_output_info fail: %d reason: %u | info: %u\n",
+ __func__, ret, enc_output_info.error_reason, enc_output_info.warn_info);
+ return;
+ }
+
+ dev_dbg(inst->dev->dev,
+ "%s: pic_type %i recon_idx %i src_idx %i pic_byte %u pts %llu\n",
+ __func__, enc_output_info.pic_type, enc_output_info.recon_frame_index,
+ enc_output_info.enc_src_idx, enc_output_info.enc_pic_byte, enc_output_info.pts);
+
+ /*
+ * The source buffer will not be found in the ready-queue as it has been
+ * dropped after sending of the encode firmware command, locate it in
+ * the videobuf2 queue directly
+ */
+ if (enc_output_info.enc_src_idx >= 0) {
+ struct vb2_buffer *vb = vb2_get_buffer(v4l2_m2m_get_src_vq(m2m_ctx),
+ enc_output_info.enc_src_idx);
+ if (vb->state != VB2_BUF_STATE_ACTIVE)
+ dev_warn(inst->dev->dev,
+ "%s: encoded buffer (%d) was not in ready queue %i.",
+ __func__, enc_output_info.enc_src_idx, vb->state);
+ else
+ src_buf = to_vb2_v4l2_buffer(vb);
+
+ if (src_buf) {
+ inst->timestamp = src_buf->vb2_buf.timestamp;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ } else {
+ dev_warn(inst->dev->dev, "%s: no source buffer with index: %d found\n",
+ __func__, enc_output_info.enc_src_idx);
+ }
+ }
+
+ dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (enc_output_info.recon_frame_index == RECON_IDX_FLAG_ENC_END) {
+ static const struct v4l2_event vpu_event_eos = {
+ .type = V4L2_EVENT_EOS
+ };
+
+ if (!WARN_ON(!dst_buf)) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ dst_buf->field = V4L2_FIELD_NONE;
+ v4l2_m2m_last_buffer_done(m2m_ctx, dst_buf);
+ }
+
+ v4l2_event_queue_fh(&inst->v4l2_fh, &vpu_event_eos);
+
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+ } else {
+ if (!dst_buf) {
+ dev_warn(inst->dev->dev, "No bitstream buffer.");
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+ return;
+ }
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, enc_output_info.bitstream_size);
+
+ dst_buf->vb2_buf.timestamp = inst->timestamp;
+ dst_buf->field = V4L2_FIELD_NONE;
+ if (enc_output_info.pic_type == PIC_TYPE_I) {
+ if (enc_output_info.enc_vcl_nut == 19 ||
+ enc_output_info.enc_vcl_nut == 20)
+ dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else
+ dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ } else if (enc_output_info.pic_type == PIC_TYPE_P) {
+ dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
+ } else if (enc_output_info.pic_type == PIC_TYPE_B) {
+ dst_buf->flags |= V4L2_BUF_FLAG_BFRAME;
+ }
+
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+
+ dev_dbg(inst->dev->dev, "%s: frame_cycle %8u\n",
+ __func__, enc_output_info.frame_cycle);
+
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+ }
+}
+
+static int wave5_vpu_enc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, VPU_ENC_DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, VPU_ENC_DRV_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int wave5_vpu_enc_enum_framesizes(struct file *f, void *fh, struct v4l2_frmsizeenum *fsize)
+{
+ const struct vpu_format *vpu_fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ vpu_fmt = wave5_find_vpu_fmt(fsize->pixel_format, enc_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt) {
+ vpu_fmt = wave5_find_vpu_fmt(fsize->pixel_format, enc_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt)
+ return -EINVAL;
+ }
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = vpu_fmt->min_width;
+ fsize->stepwise.max_width = vpu_fmt->max_width;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.min_height = vpu_fmt->min_height;
+ fsize->stepwise.max_height = vpu_fmt->max_height;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_enum_fmt_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
+
+ vpu_fmt = wave5_find_vpu_fmt_by_idx(f->index, enc_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt)
+ return -EINVAL;
+
+ f->pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->flags = 0;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ vpu_fmt = wave5_find_vpu_fmt(f->fmt.pix_mp.pixelformat, enc_fmt_list[VPU_FMT_TYPE_CODEC]);
+ if (!vpu_fmt) {
+ f->fmt.pix_mp.pixelformat = inst->dst_fmt.pixelformat;
+ f->fmt.pix_mp.num_planes = inst->dst_fmt.num_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, inst->dst_fmt.width, inst->dst_fmt.height);
+ } else {
+ int width = clamp(f->fmt.pix_mp.width, vpu_fmt->min_width, vpu_fmt->max_width);
+ int height = clamp(f->fmt.pix_mp.height, vpu_fmt->min_height, vpu_fmt->max_height);
+
+ f->fmt.pix_mp.pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->fmt.pix_mp.num_planes = 1;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, width, height);
+ }
+
+ f->fmt.pix_mp.flags = 0;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_s_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i, ret;
+
+ dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ ret = wave5_vpu_enc_try_fmt_cap(file, fh, f);
+ if (ret)
+ return ret;
+
+ inst->std = wave5_to_vpu_std(f->fmt.pix_mp.pixelformat, inst->type);
+ if (inst->std == STD_UNKNOWN) {
+ dev_warn(inst->dev->dev, "unsupported pixelformat: %.4s\n",
+ (char *)&f->fmt.pix_mp.pixelformat);
+ return -EINVAL;
+ }
+
+ inst->dst_fmt.width = f->fmt.pix_mp.width;
+ inst->dst_fmt.height = f->fmt.pix_mp.height;
+ inst->dst_fmt.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->dst_fmt.field = f->fmt.pix_mp.field;
+ inst->dst_fmt.flags = f->fmt.pix_mp.flags;
+ inst->dst_fmt.num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < inst->dst_fmt.num_planes; i++) {
+ inst->dst_fmt.plane_fmt[i].bytesperline = f->fmt.pix_mp.plane_fmt[i].bytesperline;
+ inst->dst_fmt.plane_fmt[i].sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_enc_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i;
+
+ f->fmt.pix_mp.width = inst->dst_fmt.width;
+ f->fmt.pix_mp.height = inst->dst_fmt.height;
+ f->fmt.pix_mp.pixelformat = inst->dst_fmt.pixelformat;
+ f->fmt.pix_mp.field = inst->dst_fmt.field;
+ f->fmt.pix_mp.flags = inst->dst_fmt.flags;
+ f->fmt.pix_mp.num_planes = inst->dst_fmt.num_planes;
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ f->fmt.pix_mp.plane_fmt[i].bytesperline = inst->dst_fmt.plane_fmt[i].bytesperline;
+ f->fmt.pix_mp.plane_fmt[i].sizeimage = inst->dst_fmt.plane_fmt[i].sizeimage;
+ }
+
+ f->fmt.pix_mp.colorspace = inst->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = inst->ycbcr_enc;
+ f->fmt.pix_mp.quantization = inst->quantization;
+ f->fmt.pix_mp.xfer_func = inst->xfer_func;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_enum_fmt_out(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
+
+ vpu_fmt = wave5_find_vpu_fmt_by_idx(f->index, enc_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt)
+ return -EINVAL;
+
+ f->pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->flags = 0;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_try_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ const struct vpu_format *vpu_fmt;
+
+ dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ vpu_fmt = wave5_find_vpu_fmt(f->fmt.pix_mp.pixelformat, enc_fmt_list[VPU_FMT_TYPE_RAW]);
+ if (!vpu_fmt) {
+ f->fmt.pix_mp.pixelformat = inst->src_fmt.pixelformat;
+ f->fmt.pix_mp.num_planes = inst->src_fmt.num_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, inst->src_fmt.width, inst->src_fmt.height);
+ } else {
+ int width = clamp(f->fmt.pix_mp.width, vpu_fmt->min_width, vpu_fmt->max_width);
+ int height = clamp(f->fmt.pix_mp.height, vpu_fmt->min_height, vpu_fmt->max_height);
+ const struct v4l2_format_info *info = v4l2_format_info(vpu_fmt->v4l2_pix_fmt);
+
+ f->fmt.pix_mp.pixelformat = vpu_fmt->v4l2_pix_fmt;
+ f->fmt.pix_mp.num_planes = info->mem_planes;
+ wave5_update_pix_fmt(&f->fmt.pix_mp, width, height);
+ }
+
+ f->fmt.pix_mp.flags = 0;
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_s_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ int i, ret;
+
+ dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
+ __func__, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height,
+ f->fmt.pix_mp.num_planes, f->fmt.pix_mp.field);
+
+ ret = wave5_vpu_enc_try_fmt_out(file, fh, f);
+ if (ret)
+ return ret;
+
+ inst->src_fmt.width = f->fmt.pix_mp.width;
+ inst->src_fmt.height = f->fmt.pix_mp.height;
+ inst->src_fmt.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->src_fmt.field = f->fmt.pix_mp.field;
+ inst->src_fmt.flags = f->fmt.pix_mp.flags;
+ inst->src_fmt.num_planes = f->fmt.pix_mp.num_planes;
+ for (i = 0; i < inst->src_fmt.num_planes; i++) {
+ inst->src_fmt.plane_fmt[i].bytesperline = f->fmt.pix_mp.plane_fmt[i].bytesperline;
+ inst->src_fmt.plane_fmt[i].sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ if (inst->src_fmt.pixelformat == V4L2_PIX_FMT_NV12 ||
+ inst->src_fmt.pixelformat == V4L2_PIX_FMT_NV12M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = false;
+ } else if (inst->src_fmt.pixelformat == V4L2_PIX_FMT_NV21 ||
+ inst->src_fmt.pixelformat == V4L2_PIX_FMT_NV21M) {
+ inst->cbcr_interleave = true;
+ inst->nv21 = true;
+ } else {
+ inst->cbcr_interleave = false;
+ inst->nv21 = false;
+ }
+
+ inst->colorspace = f->fmt.pix_mp.colorspace;
+ inst->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ inst->quantization = f->fmt.pix_mp.quantization;
+ inst->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ wave5_update_pix_fmt(&inst->dst_fmt, f->fmt.pix_mp.width, f->fmt.pix_mp.height);
+
+ return 0;
+}
+
+static int wave5_vpu_enc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ dev_dbg(inst->dev->dev, "%s: type: %u | target: %u\n", __func__, s->type, s->target);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->dst_fmt.width;
+ s->r.height = inst->dst_fmt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_enc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ dev_dbg(inst->dev->dev, "%s: V4L2_SEL_TGT_CROP width: %u | height: %u\n",
+ __func__, s->r.width, s->r.height);
+
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = inst->src_fmt.width;
+ s->r.height = inst->src_fmt.height;
+
+ return 0;
+}
+
+static int wave5_vpu_enc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *ec)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, ec);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_both_queues_are_streaming(inst))
+ return 0;
+
+ switch (ec->cmd) {
+ case V4L2_ENC_CMD_STOP:
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ return 0;
+
+ m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx);
+ m2m_ctx->is_draining = true;
+ break;
+ case V4L2_ENC_CMD_START:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wave5_vpu_enc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, a->type);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.output.timeperframe.numerator = 1;
+ a->parm.output.timeperframe.denominator = inst->frame_rate;
+
+ dev_dbg(inst->dev->dev, "%s: numerator: %u | denominator: %u\n",
+ __func__, a->parm.output.timeperframe.numerator,
+ a->parm.output.timeperframe.denominator);
+
+ return 0;
+}
+
+static int wave5_vpu_enc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, a->type);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ if (a->parm.output.timeperframe.denominator && a->parm.output.timeperframe.numerator) {
+ inst->frame_rate = a->parm.output.timeperframe.denominator /
+ a->parm.output.timeperframe.numerator;
+ } else {
+ a->parm.output.timeperframe.numerator = 1;
+ a->parm.output.timeperframe.denominator = inst->frame_rate;
+ }
+
+ dev_dbg(inst->dev->dev, "%s: numerator: %u | denominator: %u\n",
+ __func__, a->parm.output.timeperframe.numerator,
+ a->parm.output.timeperframe.denominator);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops wave5_vpu_enc_ioctl_ops = {
+ .vidioc_querycap = wave5_vpu_enc_querycap,
+ .vidioc_enum_framesizes = wave5_vpu_enc_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = wave5_vpu_enc_enum_fmt_cap,
+ .vidioc_s_fmt_vid_cap_mplane = wave5_vpu_enc_s_fmt_cap,
+ .vidioc_g_fmt_vid_cap_mplane = wave5_vpu_enc_g_fmt_cap,
+ .vidioc_try_fmt_vid_cap_mplane = wave5_vpu_enc_try_fmt_cap,
+
+ .vidioc_enum_fmt_vid_out = wave5_vpu_enc_enum_fmt_out,
+ .vidioc_s_fmt_vid_out_mplane = wave5_vpu_enc_s_fmt_out,
+ .vidioc_g_fmt_vid_out_mplane = wave5_vpu_g_fmt_out,
+ .vidioc_try_fmt_vid_out_mplane = wave5_vpu_enc_try_fmt_out,
+
+ .vidioc_g_selection = wave5_vpu_enc_g_selection,
+ .vidioc_s_selection = wave5_vpu_enc_s_selection,
+
+ .vidioc_g_parm = wave5_vpu_enc_g_parm,
+ .vidioc_s_parm = wave5_vpu_enc_s_parm,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = wave5_vpu_enc_encoder_cmd,
+
+ .vidioc_subscribe_event = wave5_vpu_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int wave5_vpu_enc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vpu_instance *inst = wave5_ctrl_to_vpu_inst(ctrl);
+
+ dev_dbg(inst->dev->dev, "%s: name: %s | value: %d\n", __func__, ctrl->name, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_AU_DELIMITER:
+ inst->encode_aud = ctrl->val;
+ break;
+ case V4L2_CID_HFLIP:
+ inst->mirror_direction |= (ctrl->val << 1);
+ break;
+ case V4L2_CID_VFLIP:
+ inst->mirror_direction |= ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ inst->rot_angle = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+ inst->vbv_buf_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR:
+ inst->rc_mode = 0;
+ break;
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR:
+ inst->rc_mode = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ inst->bit_rate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ inst->enc_param.avc_idr_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ inst->enc_param.independ_slice_mode = ctrl->val;
+ inst->enc_param.avc_slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ inst->enc_param.independ_slice_mode_arg = ctrl->val;
+ inst->enc_param.avc_slice_arg = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ inst->rc_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ inst->enc_param.mb_level_rc_enable = ctrl->val;
+ inst->enc_param.cu_level_rc_enable = ctrl->val;
+ inst->enc_param.hvs_qp_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ inst->enc_param.profile = HEVC_PROFILE_MAIN;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ inst->enc_param.profile = HEVC_PROFILE_STILLPICTURE;
+ inst->enc_param.en_still_picture = 1;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ inst->enc_param.profile = HEVC_PROFILE_MAIN10;
+ inst->bit_depth = 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ inst->enc_param.level = 10 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ inst->enc_param.level = 20 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ inst->enc_param.level = 21 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ inst->enc_param.level = 30 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ inst->enc_param.level = 31 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ inst->enc_param.level = 40 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ inst->enc_param.level = 41 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ inst->enc_param.level = 50 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ inst->enc_param.level = 51 * 3;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ inst->enc_param.level = 52 * 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
+ inst->enc_param.min_qp_i = ctrl->val;
+ inst->enc_param.min_qp_p = ctrl->val;
+ inst->enc_param.min_qp_b = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
+ inst->enc_param.max_qp_i = ctrl->val;
+ inst->enc_param.max_qp_p = ctrl->val;
+ inst->enc_param.max_qp_b = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+ inst->enc_param.intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED:
+ inst->enc_param.disable_deblk = 1;
+ inst->enc_param.sao_enable = 0;
+ inst->enc_param.lf_cross_slice_boundary_enable = 0;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED:
+ inst->enc_param.disable_deblk = 0;
+ inst->enc_param.sao_enable = 1;
+ inst->enc_param.lf_cross_slice_boundary_enable = 1;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
+ inst->enc_param.disable_deblk = 0;
+ inst->enc_param.sao_enable = 1;
+ inst->enc_param.lf_cross_slice_boundary_enable = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2:
+ inst->enc_param.beta_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2:
+ inst->enc_param.tc_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE:
+ inst->enc_param.decoding_refresh_type = 0;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA:
+ inst->enc_param.decoding_refresh_type = 1;
+ break;
+ case V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR:
+ inst->enc_param.decoding_refresh_type = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD:
+ inst->enc_param.intra_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU:
+ inst->enc_param.lossless_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED:
+ inst->enc_param.const_intra_pred_flag = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT:
+ inst->enc_param.wpp_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING:
+ inst->enc_param.strong_intra_smooth_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1:
+ inst->enc_param.max_num_merge = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION:
+ inst->enc_param.tmvp_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ inst->enc_param.profile = H264_PROFILE_BP;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ inst->enc_param.profile = H264_PROFILE_MP;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ inst->enc_param.profile = H264_PROFILE_EXTENDED;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ inst->enc_param.profile = H264_PROFILE_HP;
+ inst->bit_depth = 8;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10:
+ inst->enc_param.profile = H264_PROFILE_HIGH10;
+ inst->bit_depth = 10;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422:
+ inst->enc_param.profile = H264_PROFILE_HIGH422;
+ inst->bit_depth = 10;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE:
+ inst->enc_param.profile = H264_PROFILE_HIGH444;
+ inst->bit_depth = 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ inst->enc_param.level = 10;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ inst->enc_param.level = 9;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ inst->enc_param.level = 11;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ inst->enc_param.level = 12;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ inst->enc_param.level = 13;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ inst->enc_param.level = 20;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ inst->enc_param.level = 21;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ inst->enc_param.level = 22;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ inst->enc_param.level = 30;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ inst->enc_param.level = 31;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ inst->enc_param.level = 32;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ inst->enc_param.level = 40;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ inst->enc_param.level = 41;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ inst->enc_param.level = 42;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ inst->enc_param.level = 50;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ inst->enc_param.level = 51;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ inst->enc_param.min_qp_i = ctrl->val;
+ inst->enc_param.min_qp_p = ctrl->val;
+ inst->enc_param.min_qp_b = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ inst->enc_param.max_qp_i = ctrl->val;
+ inst->enc_param.max_qp_p = ctrl->val;
+ inst->enc_param.max_qp_b = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ inst->enc_param.intra_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED:
+ inst->enc_param.disable_deblk = 1;
+ inst->enc_param.lf_cross_slice_boundary_enable = 1;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
+ inst->enc_param.disable_deblk = 0;
+ inst->enc_param.lf_cross_slice_boundary_enable = 1;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
+ inst->enc_param.disable_deblk = 0;
+ inst->enc_param.lf_cross_slice_boundary_enable = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ inst->enc_param.beta_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ inst->enc_param.tc_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ inst->enc_param.transform8x8_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
+ inst->enc_param.const_intra_pred_flag = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET:
+ inst->enc_param.chroma_cb_qp_offset = ctrl->val;
+ inst->enc_param.chroma_cr_qp_offset = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ inst->enc_param.intra_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ inst->enc_param.entropy_coding_mode = ctrl->val;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops wave5_vpu_enc_ctrl_ops = {
+ .s_ctrl = wave5_vpu_enc_s_ctrl,
+};
+
+static int wave5_vpu_enc_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_pix_format_mplane inst_format =
+ (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? inst->src_fmt : inst->dst_fmt;
+ unsigned int i;
+
+ dev_dbg(inst->dev->dev, "%s: num_buffers: %u | num_planes: %u | type: %u\n", __func__,
+ *num_buffers, *num_planes, q->type);
+
+ if (*num_planes) {
+ if (inst_format.num_planes != *num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *num_planes; i++) {
+ if (sizes[i] < inst_format.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *num_planes = inst_format.num_planes;
+ for (i = 0; i < *num_planes; i++) {
+ sizes[i] = inst_format.plane_fmt[i].sizeimage;
+ dev_dbg(inst->dev->dev, "%s: size[%u]: %u\n", __func__, i, sizes[i]);
+ }
+ }
+
+ dev_dbg(inst->dev->dev, "%s: size: %u\n", __func__, sizes[0]);
+
+ return 0;
+}
+
+static void wave5_vpu_enc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vpu_instance *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
+ dev_dbg(inst->dev->dev, "%s: type: %4u index: %4u size: ([0]=%4lu, [1]=%4lu, [2]=%4lu)\n",
+ __func__, vb->type, vb->index, vb2_plane_size(&vbuf->vb2_buf, 0),
+ vb2_plane_size(&vbuf->vb2_buf, 1), vb2_plane_size(&vbuf->vb2_buf, 2));
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ vbuf->sequence = inst->queued_src_buf_num++;
+ else
+ vbuf->sequence = inst->queued_dst_buf_num++;
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+}
+
+static void wave5_set_enc_openparam(struct enc_open_param *open_param,
+ struct vpu_instance *inst)
+{
+ struct enc_wave_param input = inst->enc_param;
+ u32 num_ctu_row = ALIGN(inst->dst_fmt.height, 64) / 64;
+ u32 num_mb_row = ALIGN(inst->dst_fmt.height, 16) / 16;
+
+ open_param->wave_param.gop_preset_idx = PRESET_IDX_IPP_SINGLE;
+ open_param->wave_param.hvs_qp_scale = 2;
+ open_param->wave_param.hvs_max_delta_qp = 10;
+ open_param->wave_param.skip_intra_trans = 1;
+ open_param->wave_param.intra_nx_n_enable = 1;
+ open_param->wave_param.nr_intra_weight_y = 7;
+ open_param->wave_param.nr_intra_weight_cb = 7;
+ open_param->wave_param.nr_intra_weight_cr = 7;
+ open_param->wave_param.nr_inter_weight_y = 4;
+ open_param->wave_param.nr_inter_weight_cb = 4;
+ open_param->wave_param.nr_inter_weight_cr = 4;
+ open_param->wave_param.rdo_skip = 1;
+ open_param->wave_param.lambda_scaling_enable = 1;
+
+ open_param->line_buf_int_en = true;
+ open_param->pic_width = inst->dst_fmt.width;
+ open_param->pic_height = inst->dst_fmt.height;
+ open_param->frame_rate_info = inst->frame_rate;
+ open_param->rc_enable = inst->rc_enable;
+ if (inst->rc_enable) {
+ open_param->wave_param.initial_rc_qp = -1;
+ open_param->wave_param.rc_weight_param = 16;
+ open_param->wave_param.rc_weight_buf = 128;
+ }
+ open_param->wave_param.mb_level_rc_enable = input.mb_level_rc_enable;
+ open_param->wave_param.cu_level_rc_enable = input.cu_level_rc_enable;
+ open_param->wave_param.hvs_qp_enable = input.hvs_qp_enable;
+ open_param->bit_rate = inst->bit_rate;
+ open_param->vbv_buffer_size = inst->vbv_buf_size;
+ if (inst->rc_mode == 0)
+ open_param->vbv_buffer_size = 3000;
+ open_param->wave_param.profile = input.profile;
+ open_param->wave_param.en_still_picture = input.en_still_picture;
+ open_param->wave_param.level = input.level;
+ open_param->wave_param.internal_bit_depth = inst->bit_depth;
+ open_param->wave_param.intra_qp = input.intra_qp;
+ open_param->wave_param.min_qp_i = input.min_qp_i;
+ open_param->wave_param.max_qp_i = input.max_qp_i;
+ open_param->wave_param.min_qp_p = input.min_qp_p;
+ open_param->wave_param.max_qp_p = input.max_qp_p;
+ open_param->wave_param.min_qp_b = input.min_qp_b;
+ open_param->wave_param.max_qp_b = input.max_qp_b;
+ open_param->wave_param.disable_deblk = input.disable_deblk;
+ open_param->wave_param.lf_cross_slice_boundary_enable =
+ input.lf_cross_slice_boundary_enable;
+ open_param->wave_param.tc_offset_div2 = input.tc_offset_div2;
+ open_param->wave_param.beta_offset_div2 = input.beta_offset_div2;
+ open_param->wave_param.decoding_refresh_type = input.decoding_refresh_type;
+ open_param->wave_param.intra_period = input.intra_period;
+ if (inst->std == W_HEVC_ENC) {
+ if (input.intra_period == 0) {
+ open_param->wave_param.decoding_refresh_type = DEC_REFRESH_TYPE_IDR;
+ open_param->wave_param.intra_period = input.avc_idr_period;
+ }
+ } else {
+ open_param->wave_param.avc_idr_period = input.avc_idr_period;
+ }
+ open_param->wave_param.entropy_coding_mode = input.entropy_coding_mode;
+ open_param->wave_param.lossless_enable = input.lossless_enable;
+ open_param->wave_param.const_intra_pred_flag = input.const_intra_pred_flag;
+ open_param->wave_param.wpp_enable = input.wpp_enable;
+ open_param->wave_param.strong_intra_smooth_enable = input.strong_intra_smooth_enable;
+ open_param->wave_param.max_num_merge = input.max_num_merge;
+ open_param->wave_param.tmvp_enable = input.tmvp_enable;
+ open_param->wave_param.transform8x8_enable = input.transform8x8_enable;
+ open_param->wave_param.chroma_cb_qp_offset = input.chroma_cb_qp_offset;
+ open_param->wave_param.chroma_cr_qp_offset = input.chroma_cr_qp_offset;
+ open_param->wave_param.independ_slice_mode = input.independ_slice_mode;
+ open_param->wave_param.independ_slice_mode_arg = input.independ_slice_mode_arg;
+ open_param->wave_param.avc_slice_mode = input.avc_slice_mode;
+ open_param->wave_param.avc_slice_arg = input.avc_slice_arg;
+ open_param->wave_param.intra_mb_refresh_mode = input.intra_mb_refresh_mode;
+ if (input.intra_mb_refresh_mode != REFRESH_MB_MODE_NONE) {
+ if (num_mb_row >= input.intra_mb_refresh_arg)
+ open_param->wave_param.intra_mb_refresh_arg =
+ num_mb_row / input.intra_mb_refresh_arg;
+ else
+ open_param->wave_param.intra_mb_refresh_arg = num_mb_row;
+ }
+ open_param->wave_param.intra_refresh_mode = input.intra_refresh_mode;
+ if (input.intra_refresh_mode != 0) {
+ if (num_ctu_row >= input.intra_refresh_arg)
+ open_param->wave_param.intra_refresh_arg =
+ num_ctu_row / input.intra_refresh_arg;
+ else
+ open_param->wave_param.intra_refresh_arg = num_ctu_row;
+ }
+}
+
+static int initialize_sequence(struct vpu_instance *inst)
+{
+ struct enc_initial_info initial_info;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ ret = wave5_vpu_enc_issue_seq_init(inst);
+ if (ret) {
+ dev_err(inst->dev->dev, "%s: wave5_vpu_enc_issue_seq_init, fail: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_ENC_TIMEOUT) < 0) {
+ dev_err(inst->dev->dev, "%s: wave5_vpu_wait_interrupt failed\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = wave5_vpu_enc_complete_seq_init(inst, &initial_info);
+ if (ret)
+ return ret;
+
+ dev_dbg(inst->dev->dev, "%s: min_frame_buffer: %u | min_source_buffer: %u\n",
+ __func__, initial_info.min_frame_buffer_count,
+ initial_info.min_src_frame_count);
+ inst->min_src_buf_count = initial_info.min_src_frame_count +
+ COMMAND_QUEUE_DEPTH;
+
+ ctrl = v4l2_ctrl_find(&inst->v4l2_ctrl_hdl,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->min_src_buf_count);
+
+ inst->fbc_buf_count = initial_info.min_frame_buffer_count;
+
+ return 0;
+}
+
+static int prepare_fb(struct vpu_instance *inst)
+{
+ u32 fb_stride = ALIGN(inst->dst_fmt.width, 32);
+ u32 fb_height = ALIGN(inst->dst_fmt.height, 32);
+ int i, ret = 0;
+
+ for (i = 0; i < inst->fbc_buf_count; i++) {
+ u32 luma_size = fb_stride * fb_height;
+ u32 chroma_size = ALIGN(fb_stride / 2, 16) * fb_height;
+
+ inst->frame_vbuf[i].size = luma_size + chroma_size;
+ ret = wave5_vdi_allocate_dma_memory(inst->dev, &inst->frame_vbuf[i]);
+ if (ret < 0) {
+ dev_err(inst->dev->dev, "%s: failed to allocate FBC buffer %zu\n",
+ __func__, inst->frame_vbuf[i].size);
+ goto free_buffers;
+ }
+
+ inst->frame_buf[i].buf_y = inst->frame_vbuf[i].daddr;
+ inst->frame_buf[i].buf_cb = (dma_addr_t)-1;
+ inst->frame_buf[i].buf_cr = (dma_addr_t)-1;
+ inst->frame_buf[i].update_fb_info = true;
+ inst->frame_buf[i].size = inst->frame_vbuf[i].size;
+ }
+
+ ret = wave5_vpu_enc_register_frame_buffer(inst, inst->fbc_buf_count, fb_stride,
+ fb_height, COMPRESSED_FRAME_MAP);
+ if (ret) {
+ dev_err(inst->dev->dev,
+ "%s: wave5_vpu_enc_register_frame_buffer, fail: %d\n",
+ __func__, ret);
+ goto free_buffers;
+ }
+
+ return 0;
+free_buffers:
+ for (i = 0; i < inst->fbc_buf_count; i++)
+ wave5_vpu_dec_reset_framebuffer(inst, i);
+ return ret;
+}
+
+static int wave5_vpu_enc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ int ret = 0;
+
+ v4l2_m2m_update_start_streaming_state(m2m_ctx, q);
+
+ if (inst->state == VPU_INST_STATE_NONE && q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct enc_open_param open_param;
+
+ memset(&open_param, 0, sizeof(struct enc_open_param));
+
+ wave5_set_enc_openparam(&open_param, inst);
+
+ ret = wave5_vpu_enc_open(inst, &open_param);
+ if (ret) {
+ dev_dbg(inst->dev->dev, "%s: wave5_vpu_enc_open, fail: %d\n",
+ __func__, ret);
+ goto return_buffers;
+ }
+
+ if (inst->mirror_direction) {
+ wave5_vpu_enc_give_command(inst, ENABLE_MIRRORING, NULL);
+ wave5_vpu_enc_give_command(inst, SET_MIRROR_DIRECTION,
+ &inst->mirror_direction);
+ }
+ if (inst->rot_angle) {
+ wave5_vpu_enc_give_command(inst, ENABLE_ROTATION, NULL);
+ wave5_vpu_enc_give_command(inst, SET_ROTATION_ANGLE, &inst->rot_angle);
+ }
+
+ ret = switch_state(inst, VPU_INST_STATE_OPEN);
+ if (ret)
+ goto return_buffers;
+ }
+ if (inst->state == VPU_INST_STATE_OPEN && m2m_ctx->cap_q_ctx.q.streaming) {
+ ret = initialize_sequence(inst);
+ if (ret) {
+ dev_warn(inst->dev->dev, "Sequence not found: %d\n", ret);
+ goto return_buffers;
+ }
+ ret = switch_state(inst, VPU_INST_STATE_INIT_SEQ);
+ if (ret)
+ goto return_buffers;
+ /*
+ * The sequence must be analyzed first to calculate the proper
+ * size of the auxiliary buffers.
+ */
+ ret = prepare_fb(inst);
+ if (ret) {
+ dev_warn(inst->dev->dev, "Framebuffer preparation, fail: %d\n", ret);
+ goto return_buffers;
+ }
+
+ ret = switch_state(inst, VPU_INST_STATE_PIC_RUN);
+ }
+ if (ret)
+ goto return_buffers;
+
+ return 0;
+return_buffers:
+ wave5_return_bufs(q, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void streamoff_output(struct vpu_instance *inst, struct vb2_queue *q)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct vb2_v4l2_buffer *buf;
+
+ while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx))) {
+ dev_dbg(inst->dev->dev, "%s: buf type %4u | index %4u\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void streamoff_capture(struct vpu_instance *inst, struct vb2_queue *q)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ struct vb2_v4l2_buffer *buf;
+
+ while ((buf = v4l2_m2m_dst_buf_remove(m2m_ctx))) {
+ dev_dbg(inst->dev->dev, "%s: buf type %4u | index %4u\n",
+ __func__, buf->vb2_buf.type, buf->vb2_buf.index);
+ vb2_set_plane_payload(&buf->vb2_buf, 0, 0);
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+
+ v4l2_m2m_clear_state(m2m_ctx);
+}
+
+static void wave5_vpu_enc_stop_streaming(struct vb2_queue *q)
+{
+ struct vpu_instance *inst = vb2_get_drv_priv(q);
+ bool check_cmd = true;
+
+ /*
+ * Note that we don't need m2m_ctx->next_buf_last for this driver, so we
+ * don't call v4l2_m2m_update_stop_streaming_state().
+ */
+
+ dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
+
+ if (wave5_vpu_both_queues_are_streaming(inst))
+ switch_state(inst, VPU_INST_STATE_STOP);
+
+ while (check_cmd) {
+ struct queue_status_info q_status;
+ struct enc_output_info enc_output_info;
+
+ wave5_vpu_enc_give_command(inst, ENC_GET_QUEUE_STATUS, &q_status);
+
+ if (q_status.report_queue_count == 0)
+ break;
+
+ if (wave5_vpu_wait_interrupt(inst, VPU_ENC_TIMEOUT) < 0)
+ break;
+
+ if (wave5_vpu_enc_get_output_info(inst, &enc_output_info))
+ dev_dbg(inst->dev->dev, "Getting encoding results from fw, fail\n");
+ }
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ streamoff_output(inst, q);
+ else
+ streamoff_capture(inst, q);
+}
+
+static const struct vb2_ops wave5_vpu_enc_vb2_ops = {
+ .queue_setup = wave5_vpu_enc_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_queue = wave5_vpu_enc_buf_queue,
+ .start_streaming = wave5_vpu_enc_start_streaming,
+ .stop_streaming = wave5_vpu_enc_stop_streaming,
+};
+
+static void wave5_set_default_format(struct v4l2_pix_format_mplane *src_fmt,
+ struct v4l2_pix_format_mplane *dst_fmt)
+{
+ unsigned int src_pix_fmt = enc_fmt_list[VPU_FMT_TYPE_RAW][0].v4l2_pix_fmt;
+ const struct v4l2_format_info *src_fmt_info = v4l2_format_info(src_pix_fmt);
+
+ src_fmt->pixelformat = src_pix_fmt;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->flags = 0;
+ src_fmt->num_planes = src_fmt_info->mem_planes;
+ wave5_update_pix_fmt(src_fmt, 416, 240);
+
+ dst_fmt->pixelformat = enc_fmt_list[VPU_FMT_TYPE_CODEC][0].v4l2_pix_fmt;
+ dst_fmt->field = V4L2_FIELD_NONE;
+ dst_fmt->flags = 0;
+ dst_fmt->num_planes = 1;
+ wave5_update_pix_fmt(dst_fmt, 416, 240);
+}
+
+static int wave5_vpu_enc_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ return wave5_vpu_queue_init(priv, src_vq, dst_vq, &wave5_vpu_enc_vb2_ops);
+}
+
+static const struct vpu_instance_ops wave5_vpu_enc_inst_ops = {
+ .finish_process = wave5_vpu_enc_finish_encode,
+};
+
+static void wave5_vpu_enc_device_run(void *priv)
+{
+ struct vpu_instance *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+ u32 fail_res = 0;
+ int ret = 0;
+
+ switch (inst->state) {
+ case VPU_INST_STATE_PIC_RUN:
+ ret = start_encode(inst, &fail_res);
+ if (ret) {
+ if (ret == -EINVAL)
+ dev_err(inst->dev->dev,
+ "Frame encoding on m2m context (%p), fail: %d (res: %d)\n",
+ m2m_ctx, ret, fail_res);
+ else if (ret == -EAGAIN)
+ dev_dbg(inst->dev->dev, "Missing buffers for encode, try again\n");
+ break;
+ }
+ dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
+ return;
+ default:
+ WARN(1, "Execution of a job in state %s is invalid.\n",
+ state_to_str(inst->state));
+ break;
+ }
+ dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
+ v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
+}
+
+static int wave5_vpu_enc_job_ready(void *priv)
+{
+ struct vpu_instance *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
+
+ switch (inst->state) {
+ case VPU_INST_STATE_NONE:
+ dev_dbg(inst->dev->dev, "Encoder must be open to start queueing M2M jobs!\n");
+ return false;
+ case VPU_INST_STATE_PIC_RUN:
+ if (m2m_ctx->is_draining || v4l2_m2m_num_src_bufs_ready(m2m_ctx)) {
+ dev_dbg(inst->dev->dev, "Encoder ready for a job, state: %s\n",
+ state_to_str(inst->state));
+ return true;
+ }
+ fallthrough;
+ default:
+ dev_dbg(inst->dev->dev,
+ "Encoder not ready for a job, state: %s, %s draining, %d src bufs ready\n",
+ state_to_str(inst->state), m2m_ctx->is_draining ? "is" : "is not",
+ v4l2_m2m_num_src_bufs_ready(m2m_ctx));
+ break;
+ }
+ return false;
+}
+
+static const struct v4l2_m2m_ops wave5_vpu_enc_m2m_ops = {
+ .device_run = wave5_vpu_enc_device_run,
+ .job_ready = wave5_vpu_enc_job_ready,
+};
+
+static int wave5_vpu_open_enc(struct file *filp)
+{
+ struct video_device *vdev = video_devdata(filp);
+ struct vpu_device *dev = video_drvdata(filp);
+ struct vpu_instance *inst = NULL;
+ struct v4l2_ctrl_handler *v4l2_ctrl_hdl;
+ int ret = 0;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ v4l2_ctrl_hdl = &inst->v4l2_ctrl_hdl;
+
+ inst->dev = dev;
+ inst->type = VPU_INST_TYPE_ENC;
+ inst->ops = &wave5_vpu_enc_inst_ops;
+
+ inst->codec_info = kzalloc(sizeof(*inst->codec_info), GFP_KERNEL);
+ if (!inst->codec_info)
+ return -ENOMEM;
+
+ v4l2_fh_init(&inst->v4l2_fh, vdev);
+ filp->private_data = &inst->v4l2_fh;
+ v4l2_fh_add(&inst->v4l2_fh);
+
+ INIT_LIST_HEAD(&inst->list);
+ list_add_tail(&inst->list, &dev->instances);
+
+ inst->v4l2_m2m_dev = inst->dev->v4l2_m2m_enc_dev;
+ inst->v4l2_fh.m2m_ctx =
+ v4l2_m2m_ctx_init(inst->v4l2_m2m_dev, inst, wave5_vpu_enc_queue_init);
+ if (IS_ERR(inst->v4l2_fh.m2m_ctx)) {
+ ret = PTR_ERR(inst->v4l2_fh.m2m_ctx);
+ goto cleanup_inst;
+ }
+ v4l2_m2m_set_src_buffered(inst->v4l2_fh.m2m_ctx, true);
+
+ v4l2_ctrl_handler_init(v4l2_ctrl_hdl, 50);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10, 0,
+ V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 0,
+ V4L2_MPEG_VIDEO_HEVC_LEVEL_1);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
+ 0, 63, 1, 8);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP,
+ 0, 63, 1, 51);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
+ 0, 63, 1, 30);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE,
+ V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY, 0,
+ V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2,
+ -6, 6, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2,
+ -6, 6, 1, 0);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE,
+ V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR, 0,
+ V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD,
+ 0, 2047, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING,
+ 0, 1, 1, 1);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1,
+ 1, 2, 1, 2);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION,
+ 0, 1, 1, 1);
+
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE, 0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ 0, 63, 1, 8);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ 0, 63, 1, 51);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+ 0, 63, 1, 30);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY, 0,
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
+ -6, 6, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
+ -6, 6, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+ 0, 1, 1, 1);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET,
+ -12, 12, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ 0, 2047, 1, 0);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC, 0,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_AU_DELIMITER,
+ 0, 1, 1, 1);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_ROTATE,
+ 0, 270, 90, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VBV_SIZE,
+ 10, 3000, 1, 1000);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ 0, 700000000, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 0, 2047, 1, 0);
+ v4l2_ctrl_new_std_menu(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB, 0,
+ V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+ 0, 0xFFFF, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(v4l2_ctrl_hdl, &wave5_vpu_enc_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 32, 1, 1);
+
+ if (v4l2_ctrl_hdl->error) {
+ ret = -ENODEV;
+ goto cleanup_inst;
+ }
+
+ inst->v4l2_fh.ctrl_handler = v4l2_ctrl_hdl;
+ v4l2_ctrl_handler_setup(v4l2_ctrl_hdl);
+
+ wave5_set_default_format(&inst->src_fmt, &inst->dst_fmt);
+ inst->colorspace = V4L2_COLORSPACE_REC709;
+ inst->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ inst->quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ inst->frame_rate = 30;
+
+ init_completion(&inst->irq_done);
+
+ inst->id = ida_alloc(&inst->dev->inst_ida, GFP_KERNEL);
+ if (inst->id < 0) {
+ dev_warn(inst->dev->dev, "Allocating instance ID, fail: %d\n", inst->id);
+ ret = inst->id;
+ goto cleanup_inst;
+ }
+
+ wave5_vdi_allocate_sram(inst->dev);
+
+ return 0;
+
+cleanup_inst:
+ wave5_cleanup_instance(inst);
+ return ret;
+}
+
+static int wave5_vpu_enc_release(struct file *filp)
+{
+ return wave5_vpu_release_device(filp, wave5_vpu_enc_close, "encoder");
+}
+
+static const struct v4l2_file_operations wave5_vpu_enc_fops = {
+ .owner = THIS_MODULE,
+ .open = wave5_vpu_open_enc,
+ .release = wave5_vpu_enc_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+int wave5_vpu_enc_register_device(struct vpu_device *dev)
+{
+ struct video_device *vdev_enc;
+ int ret;
+
+ vdev_enc = devm_kzalloc(dev->v4l2_dev.dev, sizeof(*vdev_enc), GFP_KERNEL);
+ if (!vdev_enc)
+ return -ENOMEM;
+
+ dev->v4l2_m2m_enc_dev = v4l2_m2m_init(&wave5_vpu_enc_m2m_ops);
+ if (IS_ERR(dev->v4l2_m2m_enc_dev)) {
+ ret = PTR_ERR(dev->v4l2_m2m_enc_dev);
+ dev_err(dev->dev, "v4l2_m2m_init, fail: %d\n", ret);
+ return -EINVAL;
+ }
+
+ dev->video_dev_enc = vdev_enc;
+
+ strscpy(vdev_enc->name, VPU_ENC_DEV_NAME, sizeof(vdev_enc->name));
+ vdev_enc->fops = &wave5_vpu_enc_fops;
+ vdev_enc->ioctl_ops = &wave5_vpu_enc_ioctl_ops;
+ vdev_enc->release = video_device_release_empty;
+ vdev_enc->v4l2_dev = &dev->v4l2_dev;
+ vdev_enc->vfl_dir = VFL_DIR_M2M;
+ vdev_enc->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ vdev_enc->lock = &dev->dev_lock;
+
+ ret = video_register_device(vdev_enc, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ return ret;
+
+ video_set_drvdata(vdev_enc, dev);
+
+ return 0;
+}
+
+void wave5_vpu_enc_unregister_device(struct vpu_device *dev)
+{
+ video_unregister_device(dev->video_dev_enc);
+ if (dev->v4l2_m2m_enc_dev)
+ v4l2_m2m_release(dev->v4l2_m2m_enc_dev);
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
new file mode 100644
index 000000000..0d90b5820
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - platform driver
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include "wave5-vpu.h"
+#include "wave5-regdefine.h"
+#include "wave5-vpuconfig.h"
+#include "wave5.h"
+
+#define VPU_PLATFORM_DEVICE_NAME "vdec"
+#define VPU_CLK_NAME "vcodec"
+
+#define WAVE5_IS_ENC BIT(0)
+#define WAVE5_IS_DEC BIT(1)
+
+struct wave5_match_data {
+ int flags;
+ const char *fw_name;
+};
+
+int wave5_vpu_wait_interrupt(struct vpu_instance *inst, unsigned int timeout)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&inst->irq_done,
+ msecs_to_jiffies(timeout));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ reinit_completion(&inst->irq_done);
+
+ return 0;
+}
+
+static irqreturn_t wave5_vpu_irq_thread(int irq, void *dev_id)
+{
+ u32 seq_done;
+ u32 cmd_done;
+ u32 irq_reason;
+ struct vpu_instance *inst;
+ struct vpu_device *dev = dev_id;
+
+ if (wave5_vdi_read_register(dev, W5_VPU_VPU_INT_STS)) {
+ irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON);
+ wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason);
+ wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1);
+
+ list_for_each_entry(inst, &dev->instances, list) {
+ seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
+ cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
+
+ if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) ||
+ irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) {
+ if (seq_done & BIT(inst->id)) {
+ seq_done &= ~BIT(inst->id);
+ wave5_vdi_write_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO,
+ seq_done);
+ complete(&inst->irq_done);
+ }
+ }
+
+ if (irq_reason & BIT(INT_WAVE5_DEC_PIC) ||
+ irq_reason & BIT(INT_WAVE5_ENC_PIC)) {
+ if (cmd_done & BIT(inst->id)) {
+ cmd_done &= ~BIT(inst->id);
+ wave5_vdi_write_register(dev, W5_RET_QUEUE_CMD_DONE_INST,
+ cmd_done);
+ inst->ops->finish_process(inst);
+ }
+ }
+
+ wave5_vpu_clear_interrupt(inst, irq_reason);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int wave5_vpu_load_firmware(struct device *dev, const char *fw_name,
+ u32 *revision)
+{
+ const struct firmware *fw;
+ int ret;
+ unsigned int product_id;
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret) {
+ dev_err(dev, "request_firmware, fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = wave5_vpu_init_with_bitcode(dev, (u8 *)fw->data, fw->size);
+ if (ret) {
+ dev_err(dev, "vpu_init_with_bitcode, fail: %d\n", ret);
+ release_firmware(fw);
+ return ret;
+ }
+ release_firmware(fw);
+
+ ret = wave5_vpu_get_version_info(dev, revision, &product_id);
+ if (ret) {
+ dev_err(dev, "vpu_get_version_info fail: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s: enum product_id: %08x, fw revision: %u\n",
+ __func__, product_id, *revision);
+
+ return 0;
+}
+
+static int wave5_vpu_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct vpu_device *dev;
+ const struct wave5_match_data *match_data;
+ u32 fw_revision;
+
+ match_data = device_get_match_data(&pdev->dev);
+ if (!match_data) {
+ dev_err(&pdev->dev, "missing device match data\n");
+ return -EINVAL;
+ }
+
+ /* physical addresses limited to 32 bits */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vdb_register = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev->vdb_register))
+ return PTR_ERR(dev->vdb_register);
+ ida_init(&dev->inst_ida);
+
+ mutex_init(&dev->dev_lock);
+ mutex_init(&dev->hw_lock);
+ dev_set_drvdata(&pdev->dev, dev);
+ dev->dev = &pdev->dev;
+
+ ret = devm_clk_bulk_get_all(&pdev->dev, &dev->clks);
+
+ /* continue without clock, assume externally managed */
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "Getting clocks, fail: %d\n", ret);
+ ret = 0;
+ }
+ dev->num_clks = ret;
+
+ ret = clk_bulk_prepare_enable(dev->num_clks, dev->clks);
+ if (ret) {
+ dev_err(&pdev->dev, "Enabling clocks, fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "sram-size",
+ &dev->sram_size);
+ if (ret) {
+ dev_warn(&pdev->dev, "sram-size not found\n");
+ dev->sram_size = 0;
+ }
+
+ dev->sram_pool = of_gen_pool_get(pdev->dev.of_node, "sram", 0);
+ if (!dev->sram_pool)
+ dev_warn(&pdev->dev, "sram node not found\n");
+
+ dev->product_code = wave5_vdi_read_register(dev, VPU_PRODUCT_CODE_REGISTER);
+ ret = wave5_vdi_init(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "wave5_vdi_init, fail: %d\n", ret);
+ goto err_clk_dis;
+ }
+ dev->product = wave5_vpu_get_product_id(dev);
+
+ INIT_LIST_HEAD(&dev->instances);
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "v4l2_device_register, fail: %d\n", ret);
+ goto err_vdi_release;
+ }
+
+ if (match_data->flags & WAVE5_IS_DEC) {
+ ret = wave5_vpu_dec_register_device(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "wave5_vpu_dec_register_device, fail: %d\n", ret);
+ goto err_v4l2_unregister;
+ }
+ }
+ if (match_data->flags & WAVE5_IS_ENC) {
+ ret = wave5_vpu_enc_register_device(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "wave5_vpu_enc_register_device, fail: %d\n", ret);
+ goto err_dec_unreg;
+ }
+ }
+
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ ret = -ENXIO;
+ goto err_enc_unreg;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, dev->irq, NULL,
+ wave5_vpu_irq_thread, IRQF_ONESHOT, "vpu_irq", dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Register interrupt handler, fail: %d\n", ret);
+ goto err_enc_unreg;
+ }
+
+ ret = wave5_vpu_load_firmware(&pdev->dev, match_data->fw_name, &fw_revision);
+ if (ret) {
+ dev_err(&pdev->dev, "wave5_vpu_load_firmware, fail: %d\n", ret);
+ goto err_enc_unreg;
+ }
+
+ dev_info(&pdev->dev, "Added wave5 driver with caps: %s %s\n",
+ (match_data->flags & WAVE5_IS_ENC) ? "'ENCODE'" : "",
+ (match_data->flags & WAVE5_IS_DEC) ? "'DECODE'" : "");
+ dev_info(&pdev->dev, "Product Code: 0x%x\n", dev->product_code);
+ dev_info(&pdev->dev, "Firmware Revision: %u\n", fw_revision);
+ return 0;
+
+err_enc_unreg:
+ if (match_data->flags & WAVE5_IS_ENC)
+ wave5_vpu_enc_unregister_device(dev);
+err_dec_unreg:
+ if (match_data->flags & WAVE5_IS_DEC)
+ wave5_vpu_dec_unregister_device(dev);
+err_v4l2_unregister:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_vdi_release:
+ wave5_vdi_release(&pdev->dev);
+err_clk_dis:
+ clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
+
+ return ret;
+}
+
+static int wave5_vpu_remove(struct platform_device *pdev)
+{
+ struct vpu_device *dev = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&dev->dev_lock);
+ mutex_destroy(&dev->hw_lock);
+ clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
+ wave5_vpu_enc_unregister_device(dev);
+ wave5_vpu_dec_unregister_device(dev);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ wave5_vdi_release(&pdev->dev);
+ ida_destroy(&dev->inst_ida);
+
+ return 0;
+}
+
+static const struct wave5_match_data ti_wave521c_data = {
+ .flags = WAVE5_IS_ENC | WAVE5_IS_DEC,
+ .fw_name = "cnm/wave521c_k3_codec_fw.bin",
+};
+
+static const struct of_device_id wave5_dt_ids[] = {
+ { .compatible = "ti,j721s2-wave521c", .data = &ti_wave521c_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, wave5_dt_ids);
+
+static struct platform_driver wave5_vpu_driver = {
+ .driver = {
+ .name = VPU_PLATFORM_DEVICE_NAME,
+ .of_match_table = of_match_ptr(wave5_dt_ids),
+ },
+ .probe = wave5_vpu_probe,
+ .remove = wave5_vpu_remove,
+};
+
+module_platform_driver(wave5_vpu_driver);
+MODULE_DESCRIPTION("chips&media VPU V4L2 driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.h b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
new file mode 100644
index 000000000..32b7fd373
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - basic types
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+#ifndef __VPU_DRV_H__
+#define __VPU_DRV_H__
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+#include "wave5-vpuconfig.h"
+#include "wave5-vpuapi.h"
+
+#define VPU_BUF_SYNC_TO_DEVICE 0
+#define VPU_BUF_SYNC_FROM_DEVICE 1
+
+struct vpu_src_buffer {
+ struct v4l2_m2m_buffer v4l2_m2m_buf;
+ struct list_head list;
+ bool consumed;
+};
+
+struct vpu_dst_buffer {
+ struct v4l2_m2m_buffer v4l2_m2m_buf;
+ bool display;
+};
+
+enum vpu_fmt_type {
+ VPU_FMT_TYPE_CODEC = 0,
+ VPU_FMT_TYPE_RAW = 1
+};
+
+struct vpu_format {
+ unsigned int v4l2_pix_fmt;
+ unsigned int max_width;
+ unsigned int min_width;
+ unsigned int max_height;
+ unsigned int min_height;
+};
+
+static inline struct vpu_instance *wave5_to_vpu_inst(struct v4l2_fh *vfh)
+{
+ return container_of(vfh, struct vpu_instance, v4l2_fh);
+}
+
+static inline struct vpu_instance *wave5_ctrl_to_vpu_inst(struct v4l2_ctrl *vctrl)
+{
+ return container_of(vctrl->handler, struct vpu_instance, v4l2_ctrl_hdl);
+}
+
+static inline struct vpu_src_buffer *wave5_to_vpu_src_buf(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct vpu_src_buffer, v4l2_m2m_buf.vb);
+}
+
+static inline struct vpu_dst_buffer *wave5_to_vpu_dst_buf(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct vpu_dst_buffer, v4l2_m2m_buf.vb);
+}
+
+int wave5_vpu_wait_interrupt(struct vpu_instance *inst, unsigned int timeout);
+
+int wave5_vpu_dec_register_device(struct vpu_device *dev);
+void wave5_vpu_dec_unregister_device(struct vpu_device *dev);
+int wave5_vpu_enc_register_device(struct vpu_device *dev);
+void wave5_vpu_enc_unregister_device(struct vpu_device *dev);
+static inline bool wave5_vpu_both_queues_are_streaming(struct vpu_instance *inst)
+{
+ struct vb2_queue *vq_cap =
+ v4l2_m2m_get_vq(inst->v4l2_fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ struct vb2_queue *vq_out =
+ v4l2_m2m_get_vq(inst->v4l2_fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ return vb2_is_streaming(vq_cap) && vb2_is_streaming(vq_out);
+}
+
+#endif
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
new file mode 100644
index 000000000..1a3efb638
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Wave5 series multi-standard codec IP - helper functions
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#include <linux/bug.h>
+#include "wave5-vpuapi.h"
+#include "wave5-regdefine.h"
+#include "wave5.h"
+
+#define DECODE_ALL_TEMPORAL_LAYERS 0
+#define DECODE_ALL_SPATIAL_LAYERS 0
+
+static int wave5_initialize_vpu(struct device *dev, u8 *code, size_t size)
+{
+ int ret;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ if (wave5_vpu_is_init(vpu_dev)) {
+ wave5_vpu_re_init(dev, (void *)code, size);
+ ret = -EBUSY;
+ goto err_out;
+ }
+
+ ret = wave5_vpu_reset(dev, SW_RESET_ON_BOOT);
+ if (ret)
+ goto err_out;
+
+ ret = wave5_vpu_init(dev, (void *)code, size);
+
+err_out:
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+}
+
+int wave5_vpu_init_with_bitcode(struct device *dev, u8 *bitcode, size_t size)
+{
+ if (!bitcode || size == 0)
+ return -EINVAL;
+
+ return wave5_initialize_vpu(dev, bitcode, size);
+}
+
+int wave5_vpu_flush_instance(struct vpu_instance *inst)
+{
+ int ret = 0;
+ int retry = 0;
+
+ ret = mutex_lock_interruptible(&inst->dev->hw_lock);
+ if (ret)
+ return ret;
+ do {
+ /*
+ * Repeat the FLUSH command until the firmware reports that the
+ * VPU isn't running anymore
+ */
+ ret = wave5_vpu_hw_flush_instance(inst);
+ if (ret < 0 && ret != -EBUSY) {
+ dev_warn(inst->dev->dev, "Flush of %s instance with id: %d fail: %d\n",
+ inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id,
+ ret);
+ mutex_unlock(&inst->dev->hw_lock);
+ return ret;
+ }
+ if (ret == -EBUSY && retry++ >= MAX_FIRMWARE_CALL_RETRY) {
+ dev_warn(inst->dev->dev, "Flush of %s instance with id: %d timed out!\n",
+ inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id);
+ mutex_unlock(&inst->dev->hw_lock);
+ return -ETIMEDOUT;
+ }
+ } while (ret != 0);
+ mutex_unlock(&inst->dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_get_version_info(struct device *dev, u32 *revision, unsigned int *product_id)
+{
+ int ret;
+ struct vpu_device *vpu_dev = dev_get_drvdata(dev);
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_is_init(vpu_dev)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ if (product_id)
+ *product_id = vpu_dev->product;
+ ret = wave5_vpu_get_version(vpu_dev, revision);
+
+err_out:
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+}
+
+static int wave5_check_dec_open_param(struct vpu_instance *inst, struct dec_open_param *param)
+{
+ if (inst->id >= MAX_NUM_INSTANCE) {
+ dev_err(inst->dev->dev, "Too many simultaneous instances: %d (max: %u)\n",
+ inst->id, MAX_NUM_INSTANCE);
+ return -EOPNOTSUPP;
+ }
+
+ if (param->bitstream_buffer % 8) {
+ dev_err(inst->dev->dev,
+ "Bitstream buffer must be aligned to a multiple of 8\n");
+ return -EINVAL;
+ }
+
+ if (param->bitstream_buffer_size % 1024 ||
+ param->bitstream_buffer_size < MIN_BITSTREAM_BUFFER_SIZE) {
+ dev_err(inst->dev->dev,
+ "Bitstream buffer size must be aligned to a multiple of 1024 and have a minimum size of %d\n",
+ MIN_BITSTREAM_BUFFER_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int wave5_vpu_dec_open(struct vpu_instance *inst, struct dec_open_param *open_param)
+{
+ struct dec_info *p_dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+ dma_addr_t buffer_addr;
+ size_t buffer_size;
+
+ ret = wave5_check_dec_open_param(inst, open_param);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_is_init(vpu_dev)) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return -ENODEV;
+ }
+
+ p_dec_info = &inst->codec_info->dec_info;
+ memcpy(&p_dec_info->open_param, open_param, sizeof(struct dec_open_param));
+
+ buffer_addr = open_param->bitstream_buffer;
+ buffer_size = open_param->bitstream_buffer_size;
+ p_dec_info->stream_wr_ptr = buffer_addr;
+ p_dec_info->stream_rd_ptr = buffer_addr;
+ p_dec_info->stream_buf_start_addr = buffer_addr;
+ p_dec_info->stream_buf_size = buffer_size;
+ p_dec_info->stream_buf_end_addr = buffer_addr + buffer_size;
+ p_dec_info->reorder_enable = TRUE;
+ p_dec_info->temp_id_select_mode = TEMPORAL_ID_MODE_ABSOLUTE;
+ p_dec_info->target_temp_id = DECODE_ALL_TEMPORAL_LAYERS;
+ p_dec_info->target_spatial_id = DECODE_ALL_SPATIAL_LAYERS;
+
+ ret = wave5_vpu_build_up_dec_param(inst, open_param);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+static int reset_auxiliary_buffers(struct vpu_instance *inst, unsigned int index)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+
+ if (index >= MAX_REG_FRAME)
+ return 1;
+
+ if (p_dec_info->vb_mv[index].size == 0 && p_dec_info->vb_fbc_y_tbl[index].size == 0 &&
+ p_dec_info->vb_fbc_c_tbl[index].size == 0)
+ return 1;
+
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_mv[index]);
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_y_tbl[index]);
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_fbc_c_tbl[index]);
+
+ return 0;
+}
+
+int wave5_vpu_dec_close(struct vpu_instance *inst, u32 *fail_res)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ int retry = 0;
+ struct vpu_device *vpu_dev = inst->dev;
+ int i;
+
+ *fail_res = 0;
+ if (!inst->codec_info)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ do {
+ ret = wave5_vpu_dec_finish_seq(inst, fail_res);
+ if (ret < 0 && *fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING) {
+ dev_warn(inst->dev->dev, "dec_finish_seq timed out\n");
+ goto unlock_and_return;
+ }
+
+ if (*fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING &&
+ retry++ >= MAX_FIRMWARE_CALL_RETRY) {
+ ret = -ETIMEDOUT;
+ goto unlock_and_return;
+ }
+ } while (ret != 0);
+
+ dev_dbg(inst->dev->dev, "%s: dec_finish_seq complete\n", __func__);
+
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
+
+ for (i = 0 ; i < MAX_REG_FRAME; i++) {
+ ret = reset_auxiliary_buffers(inst, i);
+ if (ret) {
+ ret = 0;
+ break;
+ }
+ }
+
+ wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_task);
+
+unlock_and_return:
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_issue_seq_init(struct vpu_instance *inst)
+{
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_dec_init_seq(inst);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_complete_seq_init(struct vpu_instance *inst, struct dec_initial_info *info)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_dec_get_seq_info(inst, info);
+ if (!ret)
+ p_dec_info->initial_info_obtained = true;
+
+ info->rd_ptr = wave5_dec_get_rd_ptr(inst);
+ info->wr_ptr = p_dec_info->stream_wr_ptr;
+
+ p_dec_info->initial_info = *info;
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_register_frame_buffer_ex(struct vpu_instance *inst, int num_of_decoding_fbs,
+ int num_of_display_fbs, int stride, int height)
+{
+ struct dec_info *p_dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+ struct frame_buffer *fb;
+
+ if (num_of_decoding_fbs >= WAVE5_MAX_FBS || num_of_display_fbs >= WAVE5_MAX_FBS)
+ return -EINVAL;
+
+ p_dec_info = &inst->codec_info->dec_info;
+ p_dec_info->num_of_decoding_fbs = num_of_decoding_fbs;
+ p_dec_info->num_of_display_fbs = num_of_display_fbs;
+ p_dec_info->stride = stride;
+
+ if (!p_dec_info->initial_info_obtained)
+ return -EINVAL;
+
+ if (stride < p_dec_info->initial_info.pic_width || (stride % 8 != 0) ||
+ height < p_dec_info->initial_info.pic_height)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ fb = inst->frame_buf;
+ ret = wave5_vpu_dec_register_framebuffer(inst, &fb[p_dec_info->num_of_decoding_fbs],
+ LINEAR_FRAME_MAP, p_dec_info->num_of_display_fbs);
+ if (ret)
+ goto err_out;
+
+ ret = wave5_vpu_dec_register_framebuffer(inst, &fb[0], COMPRESSED_FRAME_MAP,
+ p_dec_info->num_of_decoding_fbs);
+
+err_out:
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_get_bitstream_buffer(struct vpu_instance *inst, dma_addr_t *prd_ptr,
+ dma_addr_t *pwr_ptr, size_t *size)
+{
+ struct dec_info *p_dec_info;
+ dma_addr_t rd_ptr;
+ dma_addr_t wr_ptr;
+ int room;
+ struct vpu_device *vpu_dev = inst->dev;
+ int ret;
+
+ p_dec_info = &inst->codec_info->dec_info;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+ rd_ptr = wave5_dec_get_rd_ptr(inst);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ wr_ptr = p_dec_info->stream_wr_ptr;
+
+ if (wr_ptr < rd_ptr)
+ room = rd_ptr - wr_ptr;
+ else
+ room = (p_dec_info->stream_buf_end_addr - wr_ptr) +
+ (rd_ptr - p_dec_info->stream_buf_start_addr);
+ room--;
+
+ if (prd_ptr)
+ *prd_ptr = rd_ptr;
+ if (pwr_ptr)
+ *pwr_ptr = wr_ptr;
+ if (size)
+ *size = room;
+
+ return 0;
+}
+
+int wave5_vpu_dec_update_bitstream_buffer(struct vpu_instance *inst, size_t size)
+{
+ struct dec_info *p_dec_info;
+ dma_addr_t wr_ptr;
+ dma_addr_t rd_ptr;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (!inst->codec_info)
+ return -EINVAL;
+
+ p_dec_info = &inst->codec_info->dec_info;
+ wr_ptr = p_dec_info->stream_wr_ptr;
+ rd_ptr = p_dec_info->stream_rd_ptr;
+
+ if (size > 0) {
+ if (wr_ptr < rd_ptr && rd_ptr <= wr_ptr + size)
+ return -EINVAL;
+
+ wr_ptr += size;
+
+ if (wr_ptr > p_dec_info->stream_buf_end_addr) {
+ u32 room = wr_ptr - p_dec_info->stream_buf_end_addr;
+
+ wr_ptr = p_dec_info->stream_buf_start_addr;
+ wr_ptr += room;
+ } else if (wr_ptr == p_dec_info->stream_buf_end_addr) {
+ wr_ptr = p_dec_info->stream_buf_start_addr;
+ }
+
+ p_dec_info->stream_wr_ptr = wr_ptr;
+ p_dec_info->stream_rd_ptr = rd_ptr;
+ }
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+ ret = wave5_vpu_dec_set_bitstream_flag(inst, (size == 0));
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_start_one_frame(struct vpu_instance *inst, u32 *res_fail)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (p_dec_info->stride == 0) /* this means frame buffers have not been registered. */
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_decode(inst, res_fail);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr, int update_wr_ptr)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_dec_set_rd_ptr(inst, addr);
+
+ p_dec_info->stream_rd_ptr = addr;
+ if (update_wr_ptr)
+ p_dec_info->stream_wr_ptr = addr;
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+dma_addr_t wave5_vpu_dec_get_rd_ptr(struct vpu_instance *inst)
+{
+ int ret;
+ dma_addr_t rd_ptr;
+
+ ret = mutex_lock_interruptible(&inst->dev->hw_lock);
+ if (ret)
+ return ret;
+
+ rd_ptr = wave5_dec_get_rd_ptr(inst);
+
+ mutex_unlock(&inst->dev->hw_lock);
+
+ return rd_ptr;
+}
+
+int wave5_vpu_dec_get_output_info(struct vpu_instance *inst, struct dec_output_info *info)
+{
+ struct dec_info *p_dec_info;
+ int ret;
+ struct vpu_rect rect_info;
+ u32 val;
+ u32 decoded_index;
+ u32 disp_idx;
+ u32 max_dec_index;
+ struct vpu_device *vpu_dev = inst->dev;
+ struct dec_output_info *disp_info;
+
+ if (!info)
+ return -EINVAL;
+
+ p_dec_info = &inst->codec_info->dec_info;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ memset(info, 0, sizeof(*info));
+
+ ret = wave5_vpu_dec_get_result(inst, info);
+ if (ret) {
+ info->rd_ptr = p_dec_info->stream_rd_ptr;
+ info->wr_ptr = p_dec_info->stream_wr_ptr;
+ goto err_out;
+ }
+
+ decoded_index = info->index_frame_decoded;
+
+ /* calculate display frame region */
+ val = 0;
+ rect_info.left = 0;
+ rect_info.right = 0;
+ rect_info.top = 0;
+ rect_info.bottom = 0;
+
+ if (decoded_index < WAVE5_MAX_FBS) {
+ if (inst->std == W_HEVC_DEC || inst->std == W_AVC_DEC)
+ rect_info = p_dec_info->initial_info.pic_crop_rect;
+
+ if (inst->std == W_HEVC_DEC)
+ p_dec_info->dec_out_info[decoded_index].decoded_poc = info->decoded_poc;
+
+ p_dec_info->dec_out_info[decoded_index].rc_decoded = rect_info;
+ }
+ info->rc_decoded = rect_info;
+
+ disp_idx = info->index_frame_display;
+ if (info->index_frame_display >= 0 && info->index_frame_display < WAVE5_MAX_FBS) {
+ disp_info = &p_dec_info->dec_out_info[disp_idx];
+ if (info->index_frame_display != info->index_frame_decoded) {
+ /*
+ * when index_frame_decoded < 0, and index_frame_display >= 0
+ * info->dec_pic_width and info->dec_pic_height are still valid
+ * but those of p_dec_info->dec_out_info[disp_idx] are invalid in VP9
+ */
+ info->disp_pic_width = disp_info->dec_pic_width;
+ info->disp_pic_height = disp_info->dec_pic_height;
+ } else {
+ info->disp_pic_width = info->dec_pic_width;
+ info->disp_pic_height = info->dec_pic_height;
+ }
+
+ info->rc_display = disp_info->rc_decoded;
+
+ } else {
+ info->rc_display.left = 0;
+ info->rc_display.right = 0;
+ info->rc_display.top = 0;
+ info->rc_display.bottom = 0;
+ info->disp_pic_width = 0;
+ info->disp_pic_height = 0;
+ }
+
+ p_dec_info->stream_rd_ptr = wave5_dec_get_rd_ptr(inst);
+ p_dec_info->frame_display_flag = vpu_read_reg(vpu_dev, W5_RET_DEC_DISP_IDC);
+
+ val = p_dec_info->num_of_decoding_fbs; //fb_offset
+
+ max_dec_index = (p_dec_info->num_of_decoding_fbs > p_dec_info->num_of_display_fbs) ?
+ p_dec_info->num_of_decoding_fbs : p_dec_info->num_of_display_fbs;
+
+ if (info->index_frame_display >= 0 &&
+ info->index_frame_display < (int)max_dec_index)
+ info->disp_frame = inst->frame_buf[val + info->index_frame_display];
+
+ info->rd_ptr = p_dec_info->stream_rd_ptr;
+ info->wr_ptr = p_dec_info->stream_wr_ptr;
+ info->frame_display_flag = p_dec_info->frame_display_flag;
+
+ info->sequence_no = p_dec_info->initial_info.sequence_no;
+ if (decoded_index < WAVE5_MAX_FBS)
+ p_dec_info->dec_out_info[decoded_index] = *info;
+
+ if (disp_idx < WAVE5_MAX_FBS)
+ info->disp_frame.sequence_no = info->sequence_no;
+
+ if (info->sequence_changed) {
+ memcpy((void *)&p_dec_info->initial_info, (void *)&p_dec_info->new_seq_info,
+ sizeof(struct dec_initial_info));
+ p_dec_info->initial_info.sequence_no++;
+ }
+
+err_out:
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_clr_disp_flag(struct vpu_instance *inst, int index)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (index >= p_dec_info->num_of_display_fbs)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+ ret = wave5_dec_clr_disp_flag(inst, index);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_set_disp_flag(struct vpu_instance *inst, int index)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret = 0;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (index >= p_dec_info->num_of_display_fbs)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+ ret = wave5_dec_set_disp_flag(inst, index);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_dec_reset_framebuffer(struct vpu_instance *inst, unsigned int index)
+{
+ if (index >= MAX_REG_FRAME)
+ return -EINVAL;
+
+ if (inst->frame_vbuf[index].size == 0)
+ return -EINVAL;
+
+ wave5_vdi_free_dma_memory(inst->dev, &inst->frame_vbuf[index]);
+
+ return 0;
+}
+
+int wave5_vpu_dec_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter)
+{
+ struct dec_info *p_dec_info = &inst->codec_info->dec_info;
+ int ret = 0;
+
+ switch (cmd) {
+ case DEC_GET_QUEUE_STATUS: {
+ struct queue_status_info *queue_info = parameter;
+
+ queue_info->instance_queue_count = p_dec_info->instance_queue_count;
+ queue_info->report_queue_count = p_dec_info->report_queue_count;
+ break;
+ }
+ case DEC_RESET_FRAMEBUF_INFO: {
+ int i;
+
+ for (i = 0; i < MAX_REG_FRAME; i++) {
+ ret = wave5_vpu_dec_reset_framebuffer(inst, i);
+ if (ret)
+ break;
+ }
+
+ for (i = 0; i < MAX_REG_FRAME; i++) {
+ ret = reset_auxiliary_buffers(inst, i);
+ if (ret)
+ break;
+ }
+
+ wave5_vdi_free_dma_memory(inst->dev, &p_dec_info->vb_task);
+ break;
+ }
+ case DEC_GET_SEQ_INFO: {
+ struct dec_initial_info *seq_info = parameter;
+
+ *seq_info = p_dec_info->initial_info;
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+int wave5_vpu_enc_open(struct vpu_instance *inst, struct enc_open_param *open_param)
+{
+ struct enc_info *p_enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = wave5_vpu_enc_check_open_param(inst, open_param);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ if (!wave5_vpu_is_init(vpu_dev)) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return -ENODEV;
+ }
+
+ p_enc_info = &inst->codec_info->enc_info;
+ p_enc_info->open_param = *open_param;
+
+ ret = wave5_vpu_build_up_enc_param(vpu_dev->dev, inst, open_param);
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_enc_close(struct vpu_instance *inst, u32 *fail_res)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ int retry = 0;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ *fail_res = 0;
+ if (!inst->codec_info)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ do {
+ ret = wave5_vpu_enc_finish_seq(inst, fail_res);
+ if (ret < 0 && *fail_res != WAVE5_SYSERR_VPU_STILL_RUNNING) {
+ dev_warn(inst->dev->dev, "enc_finish_seq timed out\n");
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+ }
+
+ if (*fail_res == WAVE5_SYSERR_VPU_STILL_RUNNING &&
+ retry++ >= MAX_FIRMWARE_CALL_RETRY) {
+ mutex_unlock(&vpu_dev->hw_lock);
+ return -ETIMEDOUT;
+ }
+ } while (ret != 0);
+
+ dev_dbg(inst->dev->dev, "%s: enc_finish_seq complete\n", __func__);
+
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_work);
+
+ if (inst->std == W_HEVC_ENC || inst->std == W_AVC_ENC) {
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_sub_sam_buf);
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_mv);
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_fbc_y_tbl);
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_fbc_c_tbl);
+ }
+
+ wave5_vdi_free_dma_memory(vpu_dev, &p_enc_info->vb_task);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return 0;
+}
+
+int wave5_vpu_enc_register_frame_buffer(struct vpu_instance *inst, unsigned int num,
+ unsigned int stride, int height,
+ enum tiled_map_type map_type)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+ unsigned int size_luma, size_chroma;
+ int i;
+
+ if (p_enc_info->stride)
+ return -EINVAL;
+
+ if (!p_enc_info->initial_info_obtained)
+ return -EINVAL;
+
+ if (num < p_enc_info->initial_info.min_frame_buffer_count)
+ return -EINVAL;
+
+ if (stride == 0 || stride % 8 != 0)
+ return -EINVAL;
+
+ if (height <= 0)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ p_enc_info->num_frame_buffers = num;
+ p_enc_info->stride = stride;
+
+ size_luma = stride * height;
+ size_chroma = ALIGN(stride / 2, 16) * height;
+
+ for (i = 0; i < num; i++) {
+ if (!inst->frame_buf[i].update_fb_info)
+ continue;
+
+ inst->frame_buf[i].update_fb_info = false;
+ inst->frame_buf[i].stride = stride;
+ inst->frame_buf[i].height = height;
+ inst->frame_buf[i].map_type = COMPRESSED_FRAME_MAP;
+ inst->frame_buf[i].buf_y_size = size_luma;
+ inst->frame_buf[i].buf_cb = inst->frame_buf[i].buf_y + size_luma;
+ inst->frame_buf[i].buf_cb_size = size_chroma;
+ inst->frame_buf[i].buf_cr_size = 0;
+ }
+
+ ret = wave5_vpu_enc_register_framebuffer(inst->dev->dev, inst, &inst->frame_buf[0],
+ COMPRESSED_FRAME_MAP,
+ p_enc_info->num_frame_buffers);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+static int wave5_check_enc_param(struct vpu_instance *inst, struct enc_param *param)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+
+ if (!param)
+ return -EINVAL;
+
+ if (!param->source_frame)
+ return -EINVAL;
+
+ if (p_enc_info->open_param.bit_rate == 0 && inst->std == W_HEVC_ENC) {
+ if (param->pic_stream_buffer_addr % 16 || param->pic_stream_buffer_size == 0)
+ return -EINVAL;
+ }
+ if (param->pic_stream_buffer_addr % 8 || param->pic_stream_buffer_size == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int wave5_vpu_enc_start_one_frame(struct vpu_instance *inst, struct enc_param *param, u32 *fail_res)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ *fail_res = 0;
+
+ if (p_enc_info->stride == 0) /* this means frame buffers have not been registered. */
+ return -EINVAL;
+
+ ret = wave5_check_enc_param(inst, param);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ p_enc_info->pts_map[param->src_idx] = param->pts;
+
+ ret = wave5_vpu_encode(inst, param, fail_res);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_enc_get_output_info(struct vpu_instance *inst, struct enc_output_info *info)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_enc_get_result(inst, info);
+ if (ret) {
+ info->pts = 0;
+ goto unlock;
+ }
+
+ if (info->recon_frame_index >= 0)
+ info->pts = p_enc_info->pts_map[info->enc_src_idx];
+
+unlock:
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_enc_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+
+ switch (cmd) {
+ case ENABLE_ROTATION:
+ p_enc_info->rotation_enable = true;
+ break;
+ case ENABLE_MIRRORING:
+ p_enc_info->mirror_enable = true;
+ break;
+ case SET_MIRROR_DIRECTION: {
+ enum mirror_direction mir_dir;
+
+ mir_dir = *(enum mirror_direction *)parameter;
+ if (mir_dir != MIRDIR_NONE && mir_dir != MIRDIR_HOR &&
+ mir_dir != MIRDIR_VER && mir_dir != MIRDIR_HOR_VER)
+ return -EINVAL;
+ p_enc_info->mirror_direction = mir_dir;
+ break;
+ }
+ case SET_ROTATION_ANGLE: {
+ int angle;
+
+ angle = *(int *)parameter;
+ if (angle && angle != 90 && angle != 180 && angle != 270)
+ return -EINVAL;
+ if (p_enc_info->initial_info_obtained && (angle == 90 || angle == 270))
+ return -EINVAL;
+ p_enc_info->rotation_angle = angle;
+ break;
+ }
+ case ENC_GET_QUEUE_STATUS: {
+ struct queue_status_info *queue_info = parameter;
+
+ queue_info->instance_queue_count = p_enc_info->instance_queue_count;
+ queue_info->report_queue_count = p_enc_info->report_queue_count;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int wave5_vpu_enc_issue_seq_init(struct vpu_instance *inst)
+{
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_enc_init_seq(inst);
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return ret;
+}
+
+int wave5_vpu_enc_complete_seq_init(struct vpu_instance *inst, struct enc_initial_info *info)
+{
+ struct enc_info *p_enc_info = &inst->codec_info->enc_info;
+ int ret;
+ struct vpu_device *vpu_dev = inst->dev;
+
+ if (!info)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&vpu_dev->hw_lock);
+ if (ret)
+ return ret;
+
+ ret = wave5_vpu_enc_get_seq_info(inst, info);
+ if (ret) {
+ p_enc_info->initial_info_obtained = false;
+ mutex_unlock(&vpu_dev->hw_lock);
+ return ret;
+ }
+
+ p_enc_info->initial_info_obtained = true;
+ p_enc_info->initial_info = *info;
+
+ mutex_unlock(&vpu_dev->hw_lock);
+
+ return 0;
+}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
new file mode 100644
index 000000000..352f6e904
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.h
@@ -0,0 +1,870 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - helper definitions
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#ifndef VPUAPI_H_INCLUDED
+#define VPUAPI_H_INCLUDED
+
+#include <linux/idr.h>
+#include <linux/genalloc.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ctrls.h>
+#include "wave5-vpuerror.h"
+#include "wave5-vpuconfig.h"
+#include "wave5-vdi.h"
+
+enum product_id {
+ PRODUCT_ID_521,
+ PRODUCT_ID_511,
+ PRODUCT_ID_517,
+ PRODUCT_ID_NONE,
+};
+
+struct vpu_attr;
+
+enum vpu_instance_type {
+ VPU_INST_TYPE_DEC = 0,
+ VPU_INST_TYPE_ENC = 1
+};
+
+enum vpu_instance_state {
+ VPU_INST_STATE_NONE = 0,
+ VPU_INST_STATE_OPEN = 1,
+ VPU_INST_STATE_INIT_SEQ = 2,
+ VPU_INST_STATE_PIC_RUN = 3,
+ VPU_INST_STATE_STOP = 4
+};
+
+/* Maximum available on hardware. */
+#define WAVE5_MAX_FBS 32
+
+#define MAX_REG_FRAME (WAVE5_MAX_FBS * 2)
+
+#define WAVE5_DEC_HEVC_BUF_SIZE(_w, _h) (DIV_ROUND_UP(_w, 64) * DIV_ROUND_UP(_h, 64) * 256 + 64)
+#define WAVE5_DEC_AVC_BUF_SIZE(_w, _h) ((((ALIGN(_w, 256) / 16) * (ALIGN(_h, 16) / 16)) + 16) * 80)
+
+#define WAVE5_FBC_LUMA_TABLE_SIZE(_w, _h) (ALIGN(_h, 64) * ALIGN(_w, 256) / 32)
+#define WAVE5_FBC_CHROMA_TABLE_SIZE(_w, _h) (ALIGN((_h), 64) * ALIGN((_w) / 2, 256) / 32)
+#define WAVE5_ENC_AVC_BUF_SIZE(_w, _h) (ALIGN(_w, 64) * ALIGN(_h, 64) / 32)
+#define WAVE5_ENC_HEVC_BUF_SIZE(_w, _h) (ALIGN(_w, 64) / 64 * ALIGN(_h, 64) / 64 * 128)
+
+/*
+ * common struct and definition
+ */
+enum cod_std {
+ STD_AVC = 0,
+ STD_HEVC = 12,
+ STD_MAX
+};
+
+enum wave_std {
+ W_HEVC_DEC = 0x00,
+ W_HEVC_ENC = 0x01,
+ W_AVC_DEC = 0x02,
+ W_AVC_ENC = 0x03,
+ STD_UNKNOWN = 0xFF
+};
+
+enum set_param_option {
+ OPT_COMMON = 0, /* SET_PARAM command option for encoding sequence */
+ OPT_CUSTOM_GOP = 1, /* SET_PARAM command option for setting custom GOP */
+ OPT_CUSTOM_HEADER = 2, /* SET_PARAM command option for setting custom VPS/SPS/PPS */
+ OPT_VUI = 3, /* SET_PARAM command option for encoding VUI */
+ OPT_CHANGE_PARAM = 0x10,
+};
+
+/************************************************************************/
+/* PROFILE & LEVEL */
+/************************************************************************/
+/* HEVC */
+#define HEVC_PROFILE_MAIN 1
+#define HEVC_PROFILE_MAIN10 2
+#define HEVC_PROFILE_STILLPICTURE 3
+#define HEVC_PROFILE_MAIN10_STILLPICTURE 2
+
+/* H.264 profile for encoder*/
+#define H264_PROFILE_BP 1
+#define H264_PROFILE_MP 2
+#define H264_PROFILE_EXTENDED 3
+#define H264_PROFILE_HP 4
+#define H264_PROFILE_HIGH10 5
+#define H264_PROFILE_HIGH422 6
+#define H264_PROFILE_HIGH444 7
+
+/************************************************************************/
+/* error codes */
+/************************************************************************/
+
+/************************************************************************/
+/* utility macros */
+/************************************************************************/
+
+/* Initialize sequence firmware command mode */
+#define INIT_SEQ_NORMAL 1
+
+/* Decode firmware command mode */
+#define DEC_PIC_NORMAL 0
+
+/* bit_alloc_mode */
+#define BIT_ALLOC_MODE_FIXED_RATIO 2
+
+/* bit_rate */
+#define MAX_BIT_RATE 700000000
+
+/* decoding_refresh_type */
+#define DEC_REFRESH_TYPE_NON_IRAP 0
+#define DEC_REFRESH_TYPE_CRA 1
+#define DEC_REFRESH_TYPE_IDR 2
+
+/* depend_slice_mode */
+#define DEPEND_SLICE_MODE_RECOMMENDED 1
+#define DEPEND_SLICE_MODE_BOOST 2
+#define DEPEND_SLICE_MODE_FAST 3
+
+/* hvs_max_delta_qp */
+#define MAX_HVS_MAX_DELTA_QP 51
+
+/* intra_refresh_mode */
+#define REFRESH_MODE_CTU_ROWS 1
+#define REFRESH_MODE_CTU_COLUMNS 2
+#define REFRESH_MODE_CTU_STEP_SIZE 3
+#define REFRESH_MODE_CTUS 4
+
+/* intra_mb_refresh_mode */
+#define REFRESH_MB_MODE_NONE 0
+#define REFRESH_MB_MODE_CTU_ROWS 1
+#define REFRESH_MB_MODE_CTU_COLUMNS 2
+#define REFRESH_MB_MODE_CTU_STEP_SIZE 3
+
+/* intra_qp */
+#define MAX_INTRA_QP 63
+
+/* nr_inter_weight_* */
+#define MAX_INTER_WEIGHT 31
+
+/* nr_intra_weight_* */
+#define MAX_INTRA_WEIGHT 31
+
+/* nr_noise_sigma_* */
+#define MAX_NOISE_SIGMA 255
+
+/* bitstream_buffer_size */
+#define MIN_BITSTREAM_BUFFER_SIZE 1024
+#define MIN_BITSTREAM_BUFFER_SIZE_WAVE521 (1024 * 64)
+
+/* vbv_buffer_size */
+#define MIN_VBV_BUFFER_SIZE 10
+#define MAX_VBV_BUFFER_SIZE 3000
+
+#define BUFFER_MARGIN 4096
+
+#define MAX_FIRMWARE_CALL_RETRY 10
+
+#define VDI_LITTLE_ENDIAN 0x0
+
+/*
+ * Parameters of DEC_SET_SEQ_CHANGE_MASK
+ */
+#define SEQ_CHANGE_ENABLE_PROFILE BIT(5)
+#define SEQ_CHANGE_ENABLE_SIZE BIT(16)
+#define SEQ_CHANGE_ENABLE_BITDEPTH BIT(18)
+#define SEQ_CHANGE_ENABLE_DPB_COUNT BIT(19)
+#define SEQ_CHANGE_ENABLE_ASPECT_RATIO BIT(21)
+#define SEQ_CHANGE_ENABLE_VIDEO_SIGNAL BIT(23)
+#define SEQ_CHANGE_ENABLE_VUI_TIMING_INFO BIT(29)
+
+#define SEQ_CHANGE_ENABLE_ALL_HEVC (SEQ_CHANGE_ENABLE_PROFILE | \
+ SEQ_CHANGE_ENABLE_SIZE | \
+ SEQ_CHANGE_ENABLE_BITDEPTH | \
+ SEQ_CHANGE_ENABLE_DPB_COUNT)
+
+#define SEQ_CHANGE_ENABLE_ALL_AVC (SEQ_CHANGE_ENABLE_SIZE | \
+ SEQ_CHANGE_ENABLE_BITDEPTH | \
+ SEQ_CHANGE_ENABLE_DPB_COUNT | \
+ SEQ_CHANGE_ENABLE_ASPECT_RATIO | \
+ SEQ_CHANGE_ENABLE_VIDEO_SIGNAL | \
+ SEQ_CHANGE_ENABLE_VUI_TIMING_INFO)
+
+#define DISPLAY_IDX_FLAG_SEQ_END -1
+#define DISPLAY_IDX_FLAG_NO_FB -3
+#define DECODED_IDX_FLAG_NO_FB -1
+#define DECODED_IDX_FLAG_SKIP -2
+
+#define RECON_IDX_FLAG_ENC_END -1
+#define RECON_IDX_FLAG_ENC_DELAY -2
+#define RECON_IDX_FLAG_HEADER_ONLY -3
+#define RECON_IDX_FLAG_CHANGE_PARAM -4
+
+enum codec_command {
+ ENABLE_ROTATION,
+ ENABLE_MIRRORING,
+ SET_MIRROR_DIRECTION,
+ SET_ROTATION_ANGLE,
+ DEC_GET_QUEUE_STATUS,
+ ENC_GET_QUEUE_STATUS,
+ DEC_RESET_FRAMEBUF_INFO,
+ DEC_GET_SEQ_INFO,
+};
+
+enum mirror_direction {
+ MIRDIR_NONE, /* no mirroring */
+ MIRDIR_VER, /* vertical mirroring */
+ MIRDIR_HOR, /* horizontal mirroring */
+ MIRDIR_HOR_VER /* horizontal and vertical mirroring */
+};
+
+enum frame_buffer_format {
+ FORMAT_ERR = -1,
+ FORMAT_420 = 0, /* 8bit */
+ FORMAT_422, /* 8bit */
+ FORMAT_224, /* 8bit */
+ FORMAT_444, /* 8bit */
+ FORMAT_400, /* 8bit */
+
+ /* little endian perspective */
+ /* | addr 0 | addr 1 | */
+ FORMAT_420_P10_16BIT_MSB = 5, /* lsb |000000xx|xxxxxxxx | msb */
+ FORMAT_420_P10_16BIT_LSB, /* lsb |xxxxxxx |xx000000 | msb */
+ FORMAT_420_P10_32BIT_MSB, /* lsb |00xxxxxxxxxxxxxxxxxxxxxxxxxxx| msb */
+ FORMAT_420_P10_32BIT_LSB, /* lsb |xxxxxxxxxxxxxxxxxxxxxxxxxxx00| msb */
+
+ /* 4:2:2 packed format */
+ /* little endian perspective */
+ /* | addr 0 | addr 1 | */
+ FORMAT_422_P10_16BIT_MSB, /* lsb |000000xx |xxxxxxxx | msb */
+ FORMAT_422_P10_16BIT_LSB, /* lsb |xxxxxxxx |xx000000 | msb */
+ FORMAT_422_P10_32BIT_MSB, /* lsb |00xxxxxxxxxxxxxxxxxxxxxxxxxxx| msb */
+ FORMAT_422_P10_32BIT_LSB, /* lsb |xxxxxxxxxxxxxxxxxxxxxxxxxxx00| msb */
+
+ FORMAT_YUYV, /* 8bit packed format : Y0U0Y1V0 Y2U1Y3V1 ... */
+ FORMAT_YUYV_P10_16BIT_MSB,
+ FORMAT_YUYV_P10_16BIT_LSB,
+ FORMAT_YUYV_P10_32BIT_MSB,
+ FORMAT_YUYV_P10_32BIT_LSB,
+
+ FORMAT_YVYU, /* 8bit packed format : Y0V0Y1U0 Y2V1Y3U1 ... */
+ FORMAT_YVYU_P10_16BIT_MSB,
+ FORMAT_YVYU_P10_16BIT_LSB,
+ FORMAT_YVYU_P10_32BIT_MSB,
+ FORMAT_YVYU_P10_32BIT_LSB,
+
+ FORMAT_UYVY, /* 8bit packed format : U0Y0V0Y1 U1Y2V1Y3 ... */
+ FORMAT_UYVY_P10_16BIT_MSB,
+ FORMAT_UYVY_P10_16BIT_LSB,
+ FORMAT_UYVY_P10_32BIT_MSB,
+ FORMAT_UYVY_P10_32BIT_LSB,
+
+ FORMAT_VYUY, /* 8bit packed format : V0Y0U0Y1 V1Y2U1Y3 ... */
+ FORMAT_VYUY_P10_16BIT_MSB,
+ FORMAT_VYUY_P10_16BIT_LSB,
+ FORMAT_VYUY_P10_32BIT_MSB,
+ FORMAT_VYUY_P10_32BIT_LSB,
+
+ FORMAT_MAX,
+};
+
+enum packed_format_num {
+ NOT_PACKED = 0,
+ PACKED_YUYV,
+ PACKED_YVYU,
+ PACKED_UYVY,
+ PACKED_VYUY,
+};
+
+enum wave5_interrupt_bit {
+ INT_WAVE5_INIT_VPU = 0,
+ INT_WAVE5_WAKEUP_VPU = 1,
+ INT_WAVE5_SLEEP_VPU = 2,
+ INT_WAVE5_CREATE_INSTANCE = 3,
+ INT_WAVE5_FLUSH_INSTANCE = 4,
+ INT_WAVE5_DESTROY_INSTANCE = 5,
+ INT_WAVE5_INIT_SEQ = 6,
+ INT_WAVE5_SET_FRAMEBUF = 7,
+ INT_WAVE5_DEC_PIC = 8,
+ INT_WAVE5_ENC_PIC = 8,
+ INT_WAVE5_ENC_SET_PARAM = 9,
+ INT_WAVE5_DEC_QUERY = 14,
+ INT_WAVE5_BSBUF_EMPTY = 15,
+ INT_WAVE5_BSBUF_FULL = 15,
+};
+
+enum pic_type {
+ PIC_TYPE_I = 0,
+ PIC_TYPE_P = 1,
+ PIC_TYPE_B = 2,
+ PIC_TYPE_IDR = 5, /* H.264/H.265 IDR (Instantaneous Decoder Refresh) picture */
+ PIC_TYPE_MAX /* no meaning */
+};
+
+enum sw_reset_mode {
+ SW_RESET_SAFETY,
+ SW_RESET_FORCE,
+ SW_RESET_ON_BOOT
+};
+
+enum tiled_map_type {
+ LINEAR_FRAME_MAP = 0, /* linear frame map type */
+ COMPRESSED_FRAME_MAP = 17, /* compressed frame map type*/
+};
+
+enum temporal_id_mode {
+ TEMPORAL_ID_MODE_ABSOLUTE,
+ TEMPORAL_ID_MODE_RELATIVE,
+};
+
+struct vpu_attr {
+ u32 product_id;
+ char product_name[8]; /* product name in ascii code */
+ u32 product_version;
+ u32 fw_version;
+ u32 customer_id;
+ u32 support_decoders; /* bitmask */
+ u32 support_encoders; /* bitmask */
+ u32 support_backbone: 1;
+ u32 support_avc10bit_enc: 1;
+ u32 support_hevc10bit_enc: 1;
+ u32 support_vcore_backbone: 1;
+ u32 support_vcpu_backbone: 1;
+};
+
+struct frame_buffer {
+ dma_addr_t buf_y;
+ dma_addr_t buf_cb;
+ dma_addr_t buf_cr;
+ unsigned int buf_y_size;
+ unsigned int buf_cb_size;
+ unsigned int buf_cr_size;
+ enum tiled_map_type map_type;
+ unsigned int stride; /* horizontal stride for the given frame buffer */
+ unsigned int width; /* width of the given frame buffer */
+ unsigned int height; /* height of the given frame buffer */
+ size_t size; /* size of the given frame buffer */
+ unsigned int sequence_no;
+ bool update_fb_info;
+};
+
+struct vpu_rect {
+ unsigned int left; /* horizontal pixel offset from left edge */
+ unsigned int top; /* vertical pixel offset from top edge */
+ unsigned int right; /* horizontal pixel offset from right edge */
+ unsigned int bottom; /* vertical pixel offset from bottom edge */
+};
+
+/*
+ * decode struct and definition
+ */
+
+struct dec_open_param {
+ dma_addr_t bitstream_buffer;
+ size_t bitstream_buffer_size;
+};
+
+struct dec_initial_info {
+ u32 pic_width;
+ u32 pic_height;
+ struct vpu_rect pic_crop_rect;
+ u32 min_frame_buffer_count; /* between 1 to 16 */
+
+ u32 profile;
+ u32 luma_bitdepth; /* bit-depth of the luma sample */
+ u32 chroma_bitdepth; /* bit-depth of the chroma sample */
+ u32 seq_init_err_reason;
+ dma_addr_t rd_ptr; /* read pointer of bitstream buffer */
+ dma_addr_t wr_ptr; /* write pointer of bitstream buffer */
+ u32 sequence_no;
+ u32 vlc_buf_size;
+ u32 param_buf_size;
+};
+
+struct dec_output_info {
+ /**
+ * This is a frame buffer index for the picture to be displayed at the moment
+ * among frame buffers which are registered using vpu_dec_register_frame_buffer().
+ * Frame data that will be displayed is stored in the frame buffer with this index
+ * When there is no display delay, this index is always the equal to
+ * index_frame_decoded, however, if displaying is delayed (for display
+ * reordering in AVC or B-frames in VC1), this index might be different to
+ * index_frame_decoded. By checking this index, HOST applications can easily figure
+ * out whether sequence decoding has been finished or not.
+ *
+ * -3(0xFFFD) or -2(0xFFFE) : when a display output cannot be given due to picture
+ * reordering or skip option
+ * -1(0xFFFF) : when there is no more output for display at the end of sequence
+ * decoding
+ */
+ s32 index_frame_display;
+ /**
+ * This is the frame buffer index of the decoded picture among the frame buffers which were
+ * registered using vpu_dec_register_frame_buffer(). The currently decoded frame is stored
+ * into the frame buffer specified by this index.
+ *
+ * -2 : indicates that no decoded output is generated because decoder meets EOS
+ * (end of sequence) or skip
+ * -1 : indicates that the decoder fails to decode a picture because there is no available
+ * frame buffer
+ */
+ s32 index_frame_decoded;
+ s32 index_frame_decoded_for_tiled;
+ u32 nal_type;
+ unsigned int pic_type;
+ struct vpu_rect rc_display;
+ unsigned int disp_pic_width;
+ unsigned int disp_pic_height;
+ struct vpu_rect rc_decoded;
+ u32 dec_pic_width;
+ u32 dec_pic_height;
+ s32 decoded_poc;
+ int temporal_id; /* temporal ID of the picture */
+ dma_addr_t rd_ptr; /* stream buffer read pointer for the current decoder instance */
+ dma_addr_t wr_ptr; /* stream buffer write pointer for the current decoder instance */
+ struct frame_buffer disp_frame;
+ u32 frame_display_flag; /* it reports a frame buffer flag to be displayed */
+ /**
+ * this variable reports that sequence has been changed while H.264/AVC stream decoding.
+ * if it is 1, HOST application can get the new sequence information by calling
+ * vpu_dec_get_initial_info() or wave5_vpu_dec_issue_seq_init().
+ *
+ * for H.265/HEVC decoder, each bit has a different meaning as follows.
+ *
+ * sequence_changed[5] : it indicates that the profile_idc has been changed
+ * sequence_changed[16] : it indicates that the resolution has been changed
+ * sequence_changed[19] : it indicates that the required number of frame buffer has
+ * been changed.
+ */
+ unsigned int frame_cycle; /* reports the number of cycles for processing a frame */
+ u32 sequence_no;
+
+ u32 dec_host_cmd_tick; /* tick of DEC_PIC command for the picture */
+ u32 dec_decode_end_tick; /* end tick of decoding slices of the picture */
+
+ u32 sequence_changed;
+};
+
+struct queue_status_info {
+ u32 instance_queue_count;
+ u32 report_queue_count;
+};
+
+/*
+ * encode struct and definition
+ */
+
+#define MAX_NUM_TEMPORAL_LAYER 7
+#define MAX_NUM_SPATIAL_LAYER 3
+#define MAX_GOP_NUM 8
+
+struct custom_gop_pic_param {
+ u32 pic_type; /* picture type of nth picture in the custom GOP */
+ u32 poc_offset; /* POC of nth picture in the custom GOP */
+ u32 pic_qp; /* quantization parameter of nth picture in the custom GOP */
+ u32 use_multi_ref_p; /* use multiref pic for P picture. valid only if PIC_TYPE is P */
+ u32 ref_poc_l0; /* POC of reference L0 of nth picture in the custom GOP */
+ u32 ref_poc_l1; /* POC of reference L1 of nth picture in the custom GOP */
+ s32 temporal_id; /* temporal ID of nth picture in the custom GOP */
+};
+
+struct enc_wave_param {
+ /*
+ * profile indicator (HEVC only)
+ *
+ * 0 : the firmware determines a profile according to the internal_bit_depth
+ * 1 : main profile
+ * 2 : main10 profile
+ * 3 : main still picture profile
+ * In the AVC encoder, a profile cannot be set by the host application.
+ * The firmware decides it based on internal_bit_depth.
+ * profile = HIGH (bitdepth 8) profile = HIGH10 (bitdepth 10)
+ */
+ u32 profile;
+ u32 level; /* level indicator (level * 10) */
+ u32 internal_bit_depth: 4; /* 8/10 */
+ u32 gop_preset_idx: 4; /* 0 - 9 */
+ u32 decoding_refresh_type: 2; /* 0=non-IRAP, 1=CRA, 2=IDR */
+ u32 intra_qp; /* quantization parameter of intra picture */
+ u32 intra_period; /* period of intra picture in GOP size */
+ u32 conf_win_top; /* top offset of conformance window */
+ u32 conf_win_bot; /* bottom offset of conformance window */
+ u32 conf_win_left; /* left offset of conformance window */
+ u32 conf_win_right; /* right offset of conformance window */
+ u32 intra_refresh_mode: 3;
+ /*
+ * Argument for intra_ctu_refresh_mode.
+ *
+ * Depending on intra_refresh_mode, it can mean one of the following:
+ * - intra_ctu_refresh_mode (1) -> number of consecutive CTU rows
+ * - intra_ctu_refresh_mode (2) -> the number of consecutive CTU columns
+ * - intra_ctu_refresh_mode (3) -> step size in CTU
+ * - intra_ctu_refresh_mode (4) -> number of intra ct_us to be encoded in a picture
+ */
+ u32 intra_refresh_arg;
+ /*
+ * 0 : custom setting
+ * 1 : recommended encoder parameters (slow encoding speed, highest picture quality)
+ * 2 : boost mode (normal encoding speed, moderate picture quality)
+ * 3 : fast mode (fast encoding speed, low picture quality)
+ */
+ u32 depend_slice_mode : 2;
+ u32 depend_slice_mode_arg;
+ u32 independ_slice_mode : 1; /* 0=no-multi-slice, 1=slice-in-ctu-number*/
+ u32 independ_slice_mode_arg;
+ u32 max_num_merge: 2;
+ s32 beta_offset_div2: 4; /* sets beta_offset_div2 for deblocking filter */
+ s32 tc_offset_div2: 4; /* sets tc_offset_div3 for deblocking filter */
+ u32 hvs_qp_scale: 4; /* QP scaling factor for CU QP adjust if hvs_qp_scale_enable is 1 */
+ u32 hvs_max_delta_qp; /* maximum delta QP for HVS */
+ s32 chroma_cb_qp_offset; /* the value of chroma(cb) QP offset */
+ s32 chroma_cr_qp_offset; /* the value of chroma(cr) QP offset */
+ s32 initial_rc_qp;
+ u32 nr_intra_weight_y;
+ u32 nr_intra_weight_cb; /* weight to cb noise level for intra picture (0 ~ 31) */
+ u32 nr_intra_weight_cr; /* weight to cr noise level for intra picture (0 ~ 31) */
+ u32 nr_inter_weight_y;
+ u32 nr_inter_weight_cb; /* weight to cb noise level for inter picture (0 ~ 31) */
+ u32 nr_inter_weight_cr; /* weight to cr noise level for inter picture (0 ~ 31) */
+ u32 min_qp_i; /* minimum QP of I picture for rate control */
+ u32 max_qp_i; /* maximum QP of I picture for rate control */
+ u32 min_qp_p; /* minimum QP of P picture for rate control */
+ u32 max_qp_p; /* maximum QP of P picture for rate control */
+ u32 min_qp_b; /* minimum QP of B picture for rate control */
+ u32 max_qp_b; /* maximum QP of B picture for rate control */
+ u32 avc_idr_period; /* period of IDR picture (0 ~ 1024). 0 - implies an infinite period */
+ u32 avc_slice_arg; /* the number of MB for a slice when avc_slice_mode is set with 1 */
+ u32 intra_mb_refresh_mode: 2; /* 0=none, 1=row, 2=column, 3=step-size-in-mb */
+ /**
+ * Argument for intra_mb_refresh_mode.
+ *
+ * intra_mb_refresh_mode (1) -> number of consecutive MB rows
+ * intra_mb_refresh_mode (2) ->the number of consecutive MB columns
+ * intra_mb_refresh_mode (3) -> step size in MB
+ */
+ u32 intra_mb_refresh_arg;
+ u32 rc_weight_param;
+ u32 rc_weight_buf;
+
+ /* flags */
+ u32 en_still_picture: 1; /* still picture profile */
+ u32 tier: 1; /* 0=main, 1=high */
+ u32 avc_slice_mode: 1; /* 0=none, 1=slice-in-mb-number */
+ u32 entropy_coding_mode: 1; /* 0=CAVLC, 1=CABAC */
+ u32 lossless_enable: 1; /* enable lossless encoding */
+ u32 const_intra_pred_flag: 1; /* enable constrained intra prediction */
+ u32 tmvp_enable: 1; /* enable temporal motion vector prediction */
+ u32 wpp_enable: 1;
+ u32 disable_deblk: 1; /* disable in-loop deblocking filtering */
+ u32 lf_cross_slice_boundary_enable: 1;
+ u32 skip_intra_trans: 1;
+ u32 sao_enable: 1; /* enable SAO (sample adaptive offset) */
+ u32 intra_nx_n_enable: 1; /* enables intra nx_n p_us */
+ u32 cu_level_rc_enable: 1; /* enable CU level rate control */
+ u32 hvs_qp_enable: 1; /* enable CU QP adjustment for subjective quality enhancement */
+ u32 strong_intra_smooth_enable: 1; /* enable strong intra smoothing */
+ u32 rdo_skip: 1; /* skip RDO (rate distortion optimization) */
+ u32 lambda_scaling_enable: 1; /* enable lambda scaling using custom GOP */
+ u32 transform8x8_enable: 1; /* enable 8x8 intra prediction and 8x8 transform */
+ u32 mb_level_rc_enable: 1; /* enable MB-level rate control */
+};
+
+struct enc_open_param {
+ dma_addr_t bitstream_buffer;
+ unsigned int bitstream_buffer_size;
+ u32 pic_width; /* width of a picture to be encoded in unit of sample */
+ u32 pic_height; /* height of a picture to be encoded in unit of sample */
+ u32 frame_rate_info;/* desired fps */
+ u32 vbv_buffer_size;
+ u32 bit_rate; /* target bitrate in bps */
+ struct enc_wave_param wave_param;
+ enum packed_format_num packed_format; /* <<vpuapi_h_packed_format_num>> */
+ enum frame_buffer_format src_format;
+ bool line_buf_int_en;
+ u32 rc_enable : 1; /* rate control */
+};
+
+struct enc_initial_info {
+ u32 min_frame_buffer_count; /* minimum number of frame buffers */
+ u32 min_src_frame_count; /* minimum number of source buffers */
+ u32 seq_init_err_reason;
+ u32 warn_info;
+ u32 vlc_buf_size; /* size of task buffer */
+ u32 param_buf_size; /* size of task buffer */
+};
+
+/*
+ * Flags to encode NAL units explicitly
+ */
+struct enc_code_opt {
+ u32 implicit_header_encode: 1;
+ u32 encode_vcl: 1;
+ u32 encode_vps: 1;
+ u32 encode_sps: 1;
+ u32 encode_pps: 1;
+ u32 encode_aud: 1;
+ u32 encode_eos: 1;
+ u32 encode_eob: 1;
+ u32 encode_vui: 1;
+};
+
+struct enc_param {
+ struct frame_buffer *source_frame;
+ u32 pic_stream_buffer_addr;
+ u64 pic_stream_buffer_size;
+ u32 src_idx; /* source frame buffer index */
+ struct enc_code_opt code_option;
+ u64 pts; /* presentation timestamp (PTS) of the input source */
+ bool src_end_flag;
+};
+
+struct enc_output_info {
+ u32 bitstream_buffer;
+ u32 bitstream_size; /* byte size of encoded bitstream */
+ u32 pic_type: 2; /* <<vpuapi_h_pic_type>> */
+ s32 recon_frame_index;
+ dma_addr_t rd_ptr;
+ dma_addr_t wr_ptr;
+ u32 enc_pic_byte; /* number of encoded picture bytes */
+ s32 enc_src_idx; /* source buffer index of the currently encoded picture */
+ u32 enc_vcl_nut;
+ u32 error_reason; /* error reason of the currently encoded picture */
+ u32 warn_info; /* warning information on the currently encoded picture */
+ unsigned int frame_cycle; /* param for reporting the cycle number of encoding one frame*/
+ u64 pts;
+ u32 enc_host_cmd_tick; /* tick of ENC_PIC command for the picture */
+ u32 enc_encode_end_tick; /* end tick of encoding slices of the picture */
+};
+
+enum enc_pic_code_option {
+ CODEOPT_ENC_HEADER_IMPLICIT = BIT(0),
+ CODEOPT_ENC_VCL = BIT(1), /* flag to encode VCL nal unit explicitly */
+};
+
+enum gop_preset_idx {
+ PRESET_IDX_CUSTOM_GOP = 0, /* user defined GOP structure */
+ PRESET_IDX_ALL_I = 1, /* all intra, gopsize = 1 */
+ PRESET_IDX_IPP = 2, /* consecutive P, cyclic gopsize = 1 */
+ PRESET_IDX_IBBB = 3, /* consecutive B, cyclic gopsize = 1 */
+ PRESET_IDX_IBPBP = 4, /* gopsize = 2 */
+ PRESET_IDX_IBBBP = 5, /* gopsize = 4 */
+ PRESET_IDX_IPPPP = 6, /* consecutive P, cyclic gopsize = 4 */
+ PRESET_IDX_IBBBB = 7, /* consecutive B, cyclic gopsize = 4 */
+ PRESET_IDX_RA_IB = 8, /* random access, cyclic gopsize = 8 */
+ PRESET_IDX_IPP_SINGLE = 9, /* consecutive P, cyclic gopsize = 1, with single ref */
+};
+
+struct sec_axi_info {
+ u32 use_ip_enable;
+ u32 use_bit_enable;
+ u32 use_lf_row_enable: 1;
+ u32 use_enc_rdo_enable: 1;
+ u32 use_enc_lf_enable: 1;
+};
+
+struct dec_info {
+ struct dec_open_param open_param;
+ struct dec_initial_info initial_info;
+ struct dec_initial_info new_seq_info; /* temporal new sequence information */
+ u32 stream_wr_ptr;
+ u32 stream_rd_ptr;
+ u32 frame_display_flag;
+ dma_addr_t stream_buf_start_addr;
+ dma_addr_t stream_buf_end_addr;
+ u32 stream_buf_size;
+ struct vpu_buf vb_mv[MAX_REG_FRAME];
+ struct vpu_buf vb_fbc_y_tbl[MAX_REG_FRAME];
+ struct vpu_buf vb_fbc_c_tbl[MAX_REG_FRAME];
+ unsigned int num_of_decoding_fbs: 7;
+ unsigned int num_of_display_fbs: 7;
+ unsigned int stride;
+ struct sec_axi_info sec_axi_info;
+ dma_addr_t user_data_buf_addr;
+ u32 user_data_enable;
+ u32 user_data_buf_size;
+ struct vpu_buf vb_work;
+ struct vpu_buf vb_task;
+ struct dec_output_info dec_out_info[WAVE5_MAX_FBS];
+ u32 seq_change_mask;
+ enum temporal_id_mode temp_id_select_mode;
+ u32 target_temp_id;
+ u32 target_spatial_id;
+ u32 instance_queue_count;
+ u32 report_queue_count;
+ u32 cycle_per_tick;
+ u32 product_code;
+ u32 vlc_buf_size;
+ u32 param_buf_size;
+ bool initial_info_obtained;
+ bool reorder_enable;
+ bool first_cycle_check;
+ u32 stream_endflag: 1;
+};
+
+struct enc_info {
+ struct enc_open_param open_param;
+ struct enc_initial_info initial_info;
+ u32 stream_rd_ptr;
+ u32 stream_wr_ptr;
+ dma_addr_t stream_buf_start_addr;
+ dma_addr_t stream_buf_end_addr;
+ u32 stream_buf_size;
+ unsigned int num_frame_buffers;
+ unsigned int stride;
+ bool rotation_enable;
+ bool mirror_enable;
+ enum mirror_direction mirror_direction;
+ unsigned int rotation_angle;
+ bool initial_info_obtained;
+ struct sec_axi_info sec_axi_info;
+ bool line_buf_int_en;
+ struct vpu_buf vb_work;
+ struct vpu_buf vb_mv; /* col_mv buffer */
+ struct vpu_buf vb_fbc_y_tbl; /* FBC luma table buffer */
+ struct vpu_buf vb_fbc_c_tbl; /* FBC chroma table buffer */
+ struct vpu_buf vb_sub_sam_buf; /* sub-sampled buffer for ME */
+ struct vpu_buf vb_task;
+ u64 cur_pts; /* current timestamp in 90_k_hz */
+ u64 pts_map[32]; /* PTS mapped with source frame index */
+ u32 instance_queue_count;
+ u32 report_queue_count;
+ bool first_cycle_check;
+ u32 cycle_per_tick;
+ u32 product_code;
+ u32 vlc_buf_size;
+ u32 param_buf_size;
+};
+
+struct vpu_device {
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *v4l2_m2m_dec_dev;
+ struct v4l2_m2m_dev *v4l2_m2m_enc_dev;
+ struct list_head instances;
+ struct video_device *video_dev_dec;
+ struct video_device *video_dev_enc;
+ struct mutex dev_lock; /* lock for the src, dst v4l2 queues */
+ struct mutex hw_lock; /* lock hw configurations */
+ int irq;
+ enum product_id product;
+ struct vpu_attr attr;
+ struct vpu_buf common_mem;
+ u32 last_performance_cycles;
+ u32 sram_size;
+ struct gen_pool *sram_pool;
+ struct vpu_buf sram_buf;
+ void __iomem *vdb_register;
+ u32 product_code;
+ struct ida inst_ida;
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+struct vpu_instance;
+
+struct vpu_instance_ops {
+ void (*finish_process)(struct vpu_instance *inst);
+};
+
+struct vpu_instance {
+ struct list_head list;
+ struct v4l2_fh v4l2_fh;
+ struct v4l2_m2m_dev *v4l2_m2m_dev;
+ struct v4l2_ctrl_handler v4l2_ctrl_hdl;
+ struct vpu_device *dev;
+ struct completion irq_done;
+
+ struct v4l2_pix_format_mplane src_fmt;
+ struct v4l2_pix_format_mplane dst_fmt;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+
+ enum vpu_instance_state state;
+ enum vpu_instance_type type;
+ const struct vpu_instance_ops *ops;
+ spinlock_t state_spinlock; /* This protects the instance state */
+
+ enum wave_std std;
+ s32 id;
+ union {
+ struct enc_info enc_info;
+ struct dec_info dec_info;
+ } *codec_info;
+ struct frame_buffer frame_buf[MAX_REG_FRAME];
+ struct vpu_buf frame_vbuf[MAX_REG_FRAME];
+ u32 fbc_buf_count;
+ u32 queued_src_buf_num;
+ u32 queued_dst_buf_num;
+ struct list_head avail_src_bufs;
+ struct list_head avail_dst_bufs;
+ struct v4l2_rect conf_win;
+ u64 timestamp;
+ enum frame_buffer_format output_format;
+ bool cbcr_interleave;
+ bool nv21;
+ bool eos;
+ struct vpu_buf bitstream_vbuf;
+ dma_addr_t last_rd_ptr;
+ size_t remaining_consumed_bytes;
+ bool needs_reallocation;
+
+ unsigned int min_src_buf_count;
+ unsigned int rot_angle;
+ unsigned int mirror_direction;
+ unsigned int bit_depth;
+ unsigned int frame_rate;
+ unsigned int vbv_buf_size;
+ unsigned int rc_mode;
+ unsigned int rc_enable;
+ unsigned int bit_rate;
+ unsigned int encode_aud;
+ struct enc_wave_param enc_param;
+};
+
+void wave5_vdi_write_register(struct vpu_device *vpu_dev, u32 addr, u32 data);
+u32 wave5_vdi_read_register(struct vpu_device *vpu_dev, u32 addr);
+int wave5_vdi_clear_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb);
+int wave5_vdi_allocate_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb);
+int wave5_vdi_allocate_array(struct vpu_device *vpu_dev, struct vpu_buf *array, unsigned int count,
+ size_t size);
+int wave5_vdi_write_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb, size_t offset,
+ u8 *data, size_t len);
+int wave5_vdi_free_dma_memory(struct vpu_device *vpu_dev, struct vpu_buf *vb);
+void wave5_vdi_allocate_sram(struct vpu_device *vpu_dev);
+void wave5_vdi_free_sram(struct vpu_device *vpu_dev);
+
+int wave5_vpu_init_with_bitcode(struct device *dev, u8 *bitcode, size_t size);
+int wave5_vpu_flush_instance(struct vpu_instance *inst);
+int wave5_vpu_get_version_info(struct device *dev, u32 *revision, unsigned int *product_id);
+int wave5_vpu_dec_open(struct vpu_instance *inst, struct dec_open_param *open_param);
+int wave5_vpu_dec_close(struct vpu_instance *inst, u32 *fail_res);
+int wave5_vpu_dec_issue_seq_init(struct vpu_instance *inst);
+int wave5_vpu_dec_complete_seq_init(struct vpu_instance *inst, struct dec_initial_info *info);
+int wave5_vpu_dec_register_frame_buffer_ex(struct vpu_instance *inst, int num_of_decoding_fbs,
+ int num_of_display_fbs, int stride, int height);
+int wave5_vpu_dec_start_one_frame(struct vpu_instance *inst, u32 *res_fail);
+int wave5_vpu_dec_get_output_info(struct vpu_instance *inst, struct dec_output_info *info);
+int wave5_vpu_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr, int update_wr_ptr);
+dma_addr_t wave5_vpu_dec_get_rd_ptr(struct vpu_instance *inst);
+int wave5_vpu_dec_reset_framebuffer(struct vpu_instance *inst, unsigned int index);
+int wave5_vpu_dec_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter);
+int wave5_vpu_dec_get_bitstream_buffer(struct vpu_instance *inst, dma_addr_t *prd_ptr,
+ dma_addr_t *pwr_ptr, size_t *size);
+int wave5_vpu_dec_update_bitstream_buffer(struct vpu_instance *inst, size_t size);
+int wave5_vpu_dec_clr_disp_flag(struct vpu_instance *inst, int index);
+int wave5_vpu_dec_set_disp_flag(struct vpu_instance *inst, int index);
+
+int wave5_vpu_enc_open(struct vpu_instance *inst, struct enc_open_param *open_param);
+int wave5_vpu_enc_close(struct vpu_instance *inst, u32 *fail_res);
+int wave5_vpu_enc_issue_seq_init(struct vpu_instance *inst);
+int wave5_vpu_enc_complete_seq_init(struct vpu_instance *inst, struct enc_initial_info *info);
+int wave5_vpu_enc_register_frame_buffer(struct vpu_instance *inst, unsigned int num,
+ unsigned int stride, int height,
+ enum tiled_map_type map_type);
+int wave5_vpu_enc_start_one_frame(struct vpu_instance *inst, struct enc_param *param,
+ u32 *fail_res);
+int wave5_vpu_enc_get_output_info(struct vpu_instance *inst, struct enc_output_info *info);
+int wave5_vpu_enc_give_command(struct vpu_instance *inst, enum codec_command cmd, void *parameter);
+
+#endif
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h b/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h
new file mode 100644
index 000000000..d9751eedb
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - product config definitions
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#ifndef _VPU_CONFIG_H_
+#define _VPU_CONFIG_H_
+
+#define WAVE517_CODE 0x5170
+#define WAVE537_CODE 0x5370
+#define WAVE511_CODE 0x5110
+#define WAVE521_CODE 0x5210
+#define WAVE521C_CODE 0x521c
+#define WAVE521C_DUAL_CODE 0x521d // wave521 dual core
+#define WAVE521E1_CODE 0x5211
+
+#define PRODUCT_CODE_W_SERIES(x) ({ \
+ int c = x; \
+ ((c) == WAVE517_CODE || (c) == WAVE537_CODE || \
+ (c) == WAVE511_CODE || (c) == WAVE521_CODE || \
+ (c) == WAVE521E1_CODE || (c) == WAVE521C_CODE || \
+ (c) == WAVE521C_DUAL_CODE); \
+})
+
+#define WAVE517_WORKBUF_SIZE (2 * 1024 * 1024)
+#define WAVE521ENC_WORKBUF_SIZE (128 * 1024) //HEVC 128K, AVC 40K
+#define WAVE521DEC_WORKBUF_SIZE (1784 * 1024)
+
+#define MAX_NUM_INSTANCE 32
+
+#define W5_MIN_ENC_PIC_WIDTH 256
+#define W5_MIN_ENC_PIC_HEIGHT 128
+#define W5_MAX_ENC_PIC_WIDTH 8192
+#define W5_MAX_ENC_PIC_HEIGHT 8192
+
+// application specific configuration
+#define VPU_ENC_TIMEOUT 60000
+#define VPU_DEC_TIMEOUT 60000
+
+// for WAVE encoder
+#define USE_SRC_PRP_AXI 0
+#define USE_SRC_PRI_AXI 1
+#define DEFAULT_SRC_AXI USE_SRC_PRP_AXI
+
+/************************************************************************/
+/* VPU COMMON MEMORY */
+/************************************************************************/
+#define VLC_BUF_NUM (2)
+
+#define COMMAND_QUEUE_DEPTH (2)
+
+#define W5_REMAP_INDEX0 0
+#define W5_REMAP_INDEX1 1
+#define W5_REMAP_MAX_SIZE (1024 * 1024)
+
+#define WAVE5_MAX_CODE_BUF_SIZE (2 * 1024 * 1024)
+#define WAVE5_TEMPBUF_OFFSET WAVE5_MAX_CODE_BUF_SIZE
+#define WAVE5_TEMPBUF_SIZE (1024 * 1024)
+
+#define SIZE_COMMON (WAVE5_MAX_CODE_BUF_SIZE + WAVE5_TEMPBUF_SIZE)
+
+//=====4. VPU REPORT MEMORY ======================//
+
+#define WAVE5_UPPER_PROC_AXI_ID 0x0
+
+#define WAVE5_PROC_AXI_ID 0x0
+#define WAVE5_PRP_AXI_ID 0x0
+#define WAVE5_FBD_Y_AXI_ID 0x0
+#define WAVE5_FBC_Y_AXI_ID 0x0
+#define WAVE5_FBD_C_AXI_ID 0x0
+#define WAVE5_FBC_C_AXI_ID 0x0
+#define WAVE5_SEC_AXI_ID 0x0
+#define WAVE5_PRI_AXI_ID 0x0
+
+#endif /* _VPU_CONFIG_H_ */
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuerror.h b/drivers/media/platform/chips-media/wave5/wave5-vpuerror.h
new file mode 100644
index 000000000..905d5c34f
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuerror.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - error values
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#ifndef ERROR_CODE_H_INCLUDED
+#define ERROR_CODE_H_INCLUDED
+
+/*
+ * WAVE5
+ */
+
+/************************************************************************/
+/* WAVE5 COMMON SYSTEM ERROR (FAIL_REASON) */
+/************************************************************************/
+#define WAVE5_SYSERR_QUEUEING_FAIL 0x00000001
+#define WAVE5_SYSERR_ACCESS_VIOLATION_HW 0x00000040
+#define WAVE5_SYSERR_BUS_ERROR 0x00000200
+#define WAVE5_SYSERR_DOUBLE_FAULT 0x00000400
+#define WAVE5_SYSERR_RESULT_NOT_READY 0x00000800
+#define WAVE5_SYSERR_VPU_STILL_RUNNING 0x00001000
+#define WAVE5_SYSERR_UNKNOWN_CMD 0x00002000
+#define WAVE5_SYSERR_UNKNOWN_CODEC_STD 0x00004000
+#define WAVE5_SYSERR_UNKNOWN_QUERY_OPTION 0x00008000
+#define WAVE5_SYSERR_VLC_BUF_FULL 0x00010000
+#define WAVE5_SYSERR_WATCHDOG_TIMEOUT 0x00020000
+#define WAVE5_SYSERR_VCPU_TIMEOUT 0x00080000
+#define WAVE5_SYSERR_TEMP_SEC_BUF_OVERFLOW 0x00200000
+#define WAVE5_SYSERR_NEED_MORE_TASK_BUF 0x00400000
+#define WAVE5_SYSERR_PRESCAN_ERR 0x00800000
+#define WAVE5_SYSERR_ENC_GBIN_OVERCONSUME 0x01000000
+#define WAVE5_SYSERR_ENC_MAX_ZERO_DETECT 0x02000000
+#define WAVE5_SYSERR_ENC_LVL_FIRST_ERROR 0x04000000
+#define WAVE5_SYSERR_ENC_EG_RANGE_OVER 0x08000000
+#define WAVE5_SYSERR_ENC_IRB_FRAME_DROP 0x10000000
+#define WAVE5_SYSERR_INPLACE_V 0x20000000
+#define WAVE5_SYSERR_FATAL_VPU_HANGUP 0xf0000000
+
+/************************************************************************/
+/* WAVE5 COMMAND QUEUE ERROR (FAIL_REASON) */
+/************************************************************************/
+#define WAVE5_CMDQ_ERR_NOT_QUEABLE_CMD 0x00000001
+#define WAVE5_CMDQ_ERR_SKIP_MODE_ENABLE 0x00000002
+#define WAVE5_CMDQ_ERR_INST_FLUSHING 0x00000003
+#define WAVE5_CMDQ_ERR_INST_INACTIVE 0x00000004
+#define WAVE5_CMDQ_ERR_QUEUE_FAIL 0x00000005
+#define WAVE5_CMDQ_ERR_CMD_BUF_FULL 0x00000006
+
+/************************************************************************/
+/* WAVE5 ERROR ON DECODER (ERR_INFO) */
+/************************************************************************/
+// HEVC
+#define HEVC_SPSERR_SEQ_PARAMETER_SET_ID 0x00001000
+#define HEVC_SPSERR_CHROMA_FORMAT_IDC 0x00001001
+#define HEVC_SPSERR_PIC_WIDTH_IN_LUMA_SAMPLES 0x00001002
+#define HEVC_SPSERR_PIC_HEIGHT_IN_LUMA_SAMPLES 0x00001003
+#define HEVC_SPSERR_CONF_WIN_LEFT_OFFSET 0x00001004
+#define HEVC_SPSERR_CONF_WIN_RIGHT_OFFSET 0x00001005
+#define HEVC_SPSERR_CONF_WIN_TOP_OFFSET 0x00001006
+#define HEVC_SPSERR_CONF_WIN_BOTTOM_OFFSET 0x00001007
+#define HEVC_SPSERR_BIT_DEPTH_LUMA_MINUS8 0x00001008
+#define HEVC_SPSERR_BIT_DEPTH_CHROMA_MINUS8 0x00001009
+#define HEVC_SPSERR_LOG2_MAX_PIC_ORDER_CNT_LSB_MINUS4 0x0000100A
+#define HEVC_SPSERR_SPS_MAX_DEC_PIC_BUFFERING 0x0000100B
+#define HEVC_SPSERR_SPS_MAX_NUM_REORDER_PICS 0x0000100C
+#define HEVC_SPSERR_SPS_MAX_LATENCY_INCREASE 0x0000100D
+#define HEVC_SPSERR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3 0x0000100E
+#define HEVC_SPSERR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE 0x0000100F
+#define HEVC_SPSERR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2 0x00001010
+#define HEVC_SPSERR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE 0x00001011
+#define HEVC_SPSERR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER 0x00001012
+#define HEVC_SPSERR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA 0x00001013
+#define HEVC_SPSERR_SCALING_LIST 0x00001014
+#define HEVC_SPSERR_LOG2_DIFF_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3 0x00001015
+#define HEVC_SPSERR_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE 0x00001016
+#define HEVC_SPSERR_NUM_SHORT_TERM_REF_PIC_SETS 0x00001017
+#define HEVC_SPSERR_NUM_LONG_TERM_REF_PICS_SPS 0x00001018
+#define HEVC_SPSERR_GBU_PARSING_ERROR 0x00001019
+#define HEVC_SPSERR_EXTENSION_FLAG 0x0000101A
+#define HEVC_SPSERR_VUI_ERROR 0x0000101B
+#define HEVC_SPSERR_ACTIVATE_SPS 0x0000101C
+#define HEVC_SPSERR_PROFILE_SPACE 0x0000101D
+#define HEVC_PPSERR_PPS_PIC_PARAMETER_SET_ID 0x00002000
+#define HEVC_PPSERR_PPS_SEQ_PARAMETER_SET_ID 0x00002001
+#define HEVC_PPSERR_NUM_REF_IDX_L0_DEFAULT_ACTIVE_MINUS1 0x00002002
+#define HEVC_PPSERR_NUM_REF_IDX_L1_DEFAULT_ACTIVE_MINUS1 0x00002003
+#define HEVC_PPSERR_INIT_QP_MINUS26 0x00002004
+#define HEVC_PPSERR_DIFF_CU_QP_DELTA_DEPTH 0x00002005
+#define HEVC_PPSERR_PPS_CB_QP_OFFSET 0x00002006
+#define HEVC_PPSERR_PPS_CR_QP_OFFSET 0x00002007
+#define HEVC_PPSERR_NUM_TILE_COLUMNS_MINUS1 0x00002008
+#define HEVC_PPSERR_NUM_TILE_ROWS_MINUS1 0x00002009
+#define HEVC_PPSERR_COLUMN_WIDTH_MINUS1 0x0000200A
+#define HEVC_PPSERR_ROW_HEIGHT_MINUS1 0x0000200B
+#define HEVC_PPSERR_PPS_BETA_OFFSET_DIV2 0x0000200C
+#define HEVC_PPSERR_PPS_TC_OFFSET_DIV2 0x0000200D
+#define HEVC_PPSERR_SCALING_LIST 0x0000200E
+#define HEVC_PPSERR_LOG2_PARALLEL_MERGE_LEVEL_MINUS2 0x0000200F
+#define HEVC_PPSERR_NUM_TILE_COLUMNS_RANGE_OUT 0x00002010
+#define HEVC_PPSERR_NUM_TILE_ROWS_RANGE_OUT 0x00002011
+#define HEVC_PPSERR_MORE_RBSP_DATA_ERROR 0x00002012
+#define HEVC_PPSERR_PPS_PIC_PARAMETER_SET_ID_RANGE_OUT 0x00002013
+#define HEVC_PPSERR_PPS_SEQ_PARAMETER_SET_ID_RANGE_OUT 0x00002014
+#define HEVC_PPSERR_NUM_REF_IDX_L0_DEFAULT_ACTIVE_MINUS1_RANGE_OUT 0x00002015
+#define HEVC_PPSERR_NUM_REF_IDX_L1_DEFAULT_ACTIVE_MINUS1_RANGE_OUT 0x00002016
+#define HEVC_PPSERR_PPS_CB_QP_OFFSET_RANGE_OUT 0x00002017
+#define HEVC_PPSERR_PPS_CR_QP_OFFSET_RANGE_OUT 0x00002018
+#define HEVC_PPSERR_COLUMN_WIDTH_MINUS1_RANGE_OUT 0x00002019
+#define HEVC_PPSERR_ROW_HEIGHT_MINUS1_RANGE_OUT 0x00002020
+#define HEVC_PPSERR_PPS_BETA_OFFSET_DIV2_RANGE_OUT 0x00002021
+#define HEVC_PPSERR_PPS_TC_OFFSET_DIV2_RANGE_OUT 0x00002022
+#define HEVC_SHERR_SLICE_PIC_PARAMETER_SET_ID 0x00003000
+#define HEVC_SHERR_ACTIVATE_PPS 0x00003001
+#define HEVC_SHERR_ACTIVATE_SPS 0x00003002
+#define HEVC_SHERR_SLICE_TYPE 0x00003003
+#define HEVC_SHERR_FIRST_SLICE_IS_DEPENDENT_SLICE 0x00003004
+#define HEVC_SHERR_SHORT_TERM_REF_PIC_SET_SPS_FLAG 0x00003005
+#define HEVC_SHERR_SHORT_TERM_REF_PIC_SET 0x00003006
+#define HEVC_SHERR_SHORT_TERM_REF_PIC_SET_IDX 0x00003007
+#define HEVC_SHERR_NUM_LONG_TERM_SPS 0x00003008
+#define HEVC_SHERR_NUM_LONG_TERM_PICS 0x00003009
+#define HEVC_SHERR_LT_IDX_SPS_IS_OUT_OF_RANGE 0x0000300A
+#define HEVC_SHERR_DELTA_POC_MSB_CYCLE_LT 0x0000300B
+#define HEVC_SHERR_NUM_REF_IDX_L0_ACTIVE_MINUS1 0x0000300C
+#define HEVC_SHERR_NUM_REF_IDX_L1_ACTIVE_MINUS1 0x0000300D
+#define HEVC_SHERR_COLLOCATED_REF_IDX 0x0000300E
+#define HEVC_SHERR_PRED_WEIGHT_TABLE 0x0000300F
+#define HEVC_SHERR_FIVE_MINUS_MAX_NUM_MERGE_CAND 0x00003010
+#define HEVC_SHERR_SLICE_QP_DELTA 0x00003011
+#define HEVC_SHERR_SLICE_QP_DELTA_IS_OUT_OF_RANGE 0x00003012
+#define HEVC_SHERR_SLICE_CB_QP_OFFSET 0x00003013
+#define HEVC_SHERR_SLICE_CR_QP_OFFSET 0x00003014
+#define HEVC_SHERR_SLICE_BETA_OFFSET_DIV2 0x00003015
+#define HEVC_SHERR_SLICE_TC_OFFSET_DIV2 0x00003016
+#define HEVC_SHERR_NUM_ENTRY_POINT_OFFSETS 0x00003017
+#define HEVC_SHERR_OFFSET_LEN_MINUS1 0x00003018
+#define HEVC_SHERR_SLICE_SEGMENT_HEADER_EXTENSION_LENGTH 0x00003019
+#define HEVC_SHERR_WRONG_POC_IN_STILL_PICTURE_PROFILE 0x0000301A
+#define HEVC_SHERR_SLICE_TYPE_ERROR_IN_STILL_PICTURE_PROFILE 0x0000301B
+#define HEVC_SHERR_PPS_ID_NOT_EQUAL_PREV_VALUE 0x0000301C
+#define HEVC_SPECERR_OVER_PICTURE_WIDTH_SIZE 0x00004000
+#define HEVC_SPECERR_OVER_PICTURE_HEIGHT_SIZE 0x00004001
+#define HEVC_SPECERR_OVER_CHROMA_FORMAT 0x00004002
+#define HEVC_SPECERR_OVER_BIT_DEPTH 0x00004003
+#define HEVC_SPECERR_OVER_BUFFER_OVER_FLOW 0x00004004
+#define HEVC_SPECERR_OVER_WRONG_BUFFER_ACCESS 0x00004005
+#define HEVC_ETCERR_INIT_SEQ_SPS_NOT_FOUND 0x00005000
+#define HEVC_ETCERR_DEC_PIC_VCL_NOT_FOUND 0x00005001
+#define HEVC_ETCERR_NO_VALID_SLICE_IN_AU 0x00005002
+#define HEVC_ETCERR_INPLACE_V 0x0000500F
+
+// AVC
+#define AVC_SPSERR_SEQ_PARAMETER_SET_ID 0x00001000
+#define AVC_SPSERR_CHROMA_FORMAT_IDC 0x00001001
+#define AVC_SPSERR_PIC_WIDTH_IN_LUMA_SAMPLES 0x00001002
+#define AVC_SPSERR_PIC_HEIGHT_IN_LUMA_SAMPLES 0x00001003
+#define AVC_SPSERR_CONF_WIN_LEFT_OFFSET 0x00001004
+#define AVC_SPSERR_CONF_WIN_RIGHT_OFFSET 0x00001005
+#define AVC_SPSERR_CONF_WIN_TOP_OFFSET 0x00001006
+#define AVC_SPSERR_CONF_WIN_BOTTOM_OFFSET 0x00001007
+#define AVC_SPSERR_BIT_DEPTH_LUMA_MINUS8 0x00001008
+#define AVC_SPSERR_BIT_DEPTH_CHROMA_MINUS8 0x00001009
+#define AVC_SPSERR_SPS_MAX_DEC_PIC_BUFFERING 0x0000100B
+#define AVC_SPSERR_SPS_MAX_NUM_REORDER_PICS 0x0000100C
+#define AVC_SPSERR_SCALING_LIST 0x00001014
+#define AVC_SPSERR_GBU_PARSING_ERROR 0x00001019
+#define AVC_SPSERR_VUI_ERROR 0x0000101B
+#define AVC_SPSERR_ACTIVATE_SPS 0x0000101C
+#define AVC_PPSERR_PPS_PIC_PARAMETER_SET_ID 0x00002000
+#define AVC_PPSERR_PPS_SEQ_PARAMETER_SET_ID 0x00002001
+#define AVC_PPSERR_NUM_REF_IDX_L0_DEFAULT_ACTIVE_MINUS1 0x00002002
+#define AVC_PPSERR_NUM_REF_IDX_L1_DEFAULT_ACTIVE_MINUS1 0x00002003
+#define AVC_PPSERR_INIT_QP_MINUS26 0x00002004
+#define AVC_PPSERR_PPS_CB_QP_OFFSET 0x00002006
+#define AVC_PPSERR_PPS_CR_QP_OFFSET 0x00002007
+#define AVC_PPSERR_SCALING_LIST 0x0000200E
+#define AVC_PPSERR_MORE_RBSP_DATA_ERROR 0x00002012
+#define AVC_PPSERR_PPS_PIC_PARAMETER_SET_ID_RANGE_OUT 0x00002013
+#define AVC_PPSERR_PPS_SEQ_PARAMETER_SET_ID_RANGE_OUT 0x00002014
+#define AVC_PPSERR_NUM_REF_IDX_L0_DEFAULT_ACTIVE_MINUS1_RANGE_OUT 0x00002015
+#define AVC_PPSERR_NUM_REF_IDX_L1_DEFAULT_ACTIVE_MINUS1_RANGE_OUT 0x00002016
+#define AVC_PPSERR_PPS_CB_QP_OFFSET_RANGE_OUT 0x00002017
+#define AVC_PPSERR_PPS_CR_QP_OFFSET_RANGE_OUT 0x00002018
+#define AVC_SHERR_SLICE_PIC_PARAMETER_SET_ID 0x00003000
+#define AVC_SHERR_ACTIVATE_PPS 0x00003001
+#define AVC_SHERR_ACTIVATE_SPS 0x00003002
+#define AVC_SHERR_SLICE_TYPE 0x00003003
+#define AVC_SHERR_FIRST_MB_IN_SLICE 0x00003004
+#define AVC_SHERR_RPLM 0x00003006
+#define AVC_SHERR_LT_IDX_SPS_IS_OUT_OF_RANGE 0x0000300A
+#define AVC_SHERR_NUM_REF_IDX_L0_ACTIVE_MINUS1 0x0000300C
+#define AVC_SHERR_NUM_REF_IDX_L1_ACTIVE_MINUS1 0x0000300D
+#define AVC_SHERR_PRED_WEIGHT_TABLE 0x0000300F
+#define AVC_SHERR_SLICE_QP_DELTA 0x00003011
+#define AVC_SHERR_SLICE_BETA_OFFSET_DIV2 0x00003015
+#define AVC_SHERR_SLICE_TC_OFFSET_DIV2 0x00003016
+#define AVC_SHERR_DISABLE_DEBLOCK_FILTER_IDC 0x00003017
+#define AVC_SPECERR_OVER_PICTURE_WIDTH_SIZE 0x00004000
+#define AVC_SPECERR_OVER_PICTURE_HEIGHT_SIZE 0x00004001
+#define AVC_SPECERR_OVER_CHROMA_FORMAT 0x00004002
+#define AVC_SPECERR_OVER_BIT_DEPTH 0x00004003
+#define AVC_SPECERR_OVER_BUFFER_OVER_FLOW 0x00004004
+#define AVC_SPECERR_OVER_WRONG_BUFFER_ACCESS 0x00004005
+#define AVC_ETCERR_INIT_SEQ_SPS_NOT_FOUND 0x00005000
+#define AVC_ETCERR_DEC_PIC_VCL_NOT_FOUND 0x00005001
+#define AVC_ETCERR_NO_VALID_SLICE_IN_AU 0x00005002
+#define AVC_ETCERR_ASO 0x00005004
+#define AVC_ETCERR_FMO 0x00005005
+#define AVC_ETCERR_INPLACE_V 0x0000500F
+
+/************************************************************************/
+/* WAVE5 WARNING ON DECODER (WARN_INFO) */
+/************************************************************************/
+// HEVC
+#define HEVC_SPSWARN_MAX_SUB_LAYERS_MINUS1 0x00000001
+#define HEVC_SPSWARN_GENERAL_RESERVED_ZERO_44BITS 0x00000002
+#define HEVC_SPSWARN_RESERVED_ZERO_2BITS 0x00000004
+#define HEVC_SPSWARN_SUB_LAYER_RESERVED_ZERO_44BITS 0x00000008
+#define HEVC_SPSWARN_GENERAL_LEVEL_IDC 0x00000010
+#define HEVC_SPSWARN_SPS_MAX_DEC_PIC_BUFFERING_VALUE_OVER 0x00000020
+#define HEVC_SPSWARN_RBSP_TRAILING_BITS 0x00000040
+#define HEVC_SPSWARN_ST_RPS_UE_ERROR 0x00000080
+#define HEVC_SPSWARN_EXTENSION_FLAG 0x01000000
+#define HEVC_SPSWARN_REPLACED_WITH_PREV_SPS 0x02000000
+#define HEVC_PPSWARN_RBSP_TRAILING_BITS 0x00000100
+#define HEVC_PPSWARN_REPLACED_WITH_PREV_PPS 0x00000200
+#define HEVC_SHWARN_FIRST_SLICE_SEGMENT_IN_PIC_FLAG 0x00001000
+#define HEVC_SHWARN_NO_OUTPUT_OF_PRIOR_PICS_FLAG 0x00002000
+#define HEVC_SHWARN_PIC_OUTPUT_FLAG 0x00004000
+#define HEVC_SHWARN_DUPLICATED_SLICE_SEGMENT 0x00008000
+#define HEVC_ETCWARN_INIT_SEQ_VCL_NOT_FOUND 0x00010000
+#define HEVC_ETCWARN_MISSING_REFERENCE_PICTURE 0x00020000
+#define HEVC_ETCWARN_WRONG_TEMPORAL_ID 0x00040000
+#define HEVC_ETCWARN_ERROR_PICTURE_IS_REFERENCED 0x00080000
+#define HEVC_SPECWARN_OVER_PROFILE 0x00100000
+#define HEVC_SPECWARN_OVER_LEVEL 0x00200000
+#define HEVC_PRESWARN_PARSING_ERR 0x04000000
+#define HEVC_PRESWARN_MVD_OUT_OF_RANGE 0x08000000
+#define HEVC_PRESWARN_CU_QP_DELTA_VAL_OUT_OF_RANGE 0x09000000
+#define HEVC_PRESWARN_COEFF_LEVEL_REMAINING_OUT_OF_RANGE 0x0A000000
+#define HEVC_PRESWARN_PCM_ERR 0x0B000000
+#define HEVC_PRESWARN_OVERCONSUME 0x0C000000
+#define HEVC_PRESWARN_END_OF_SUBSET_ONE_BIT_ERR 0x10000000
+#define HEVC_PRESWARN_END_OF_SLICE_SEGMENT_FLAG 0x20000000
+
+// AVC
+#define AVC_SPSWARN_RESERVED_ZERO_2BITS 0x00000004
+#define AVC_SPSWARN_GENERAL_LEVEL_IDC 0x00000010
+#define AVC_SPSWARN_RBSP_TRAILING_BITS 0x00000040
+#define AVC_PPSWARN_RBSP_TRAILING_BITS 0x00000100
+#define AVC_SHWARN_NO_OUTPUT_OF_PRIOR_PICS_FLAG 0x00002000
+#define AVC_ETCWARN_INIT_SEQ_VCL_NOT_FOUND 0x00010000
+#define AVC_ETCWARN_MISSING_REFERENCE_PICTURE 0x00020000
+#define AVC_ETCWARN_ERROR_PICTURE_IS_REFERENCED 0x00080000
+#define AVC_SPECWARN_OVER_PROFILE 0x00100000
+#define AVC_SPECWARN_OVER_LEVEL 0x00200000
+#define AVC_PRESWARN_MVD_RANGE_OUT 0x00400000
+#define AVC_PRESWARN_MB_QPD_RANGE_OUT 0x00500000
+#define AVC_PRESWARN_COEFF_RANGE_OUT 0x00600000
+#define AVC_PRESWARN_MV_RANGE_OUT 0x00700000
+#define AVC_PRESWARN_MB_SKIP_RUN_RANGE_OUT 0x00800000
+#define AVC_PRESWARN_MB_TYPE_RANGE_OUT 0x00900000
+#define AVC_PRESWARN_SUB_MB_TYPE_RANGE_OUT 0x00A00000
+#define AVC_PRESWARN_CBP_RANGE_OUT 0x00B00000
+#define AVC_PRESWARN_INTRA_CHROMA_PRED_MODE_RANGE_OUT 0x00C00000
+#define AVC_PRESWARN_REF_IDX_RANGE_OUT 0x00D00000
+#define AVC_PRESWARN_COEFF_TOKEN_RANGE_OUT 0x00E00000
+#define AVC_PRESWARN_TOTAL_ZERO_RANGE_OUT 0x00F00000
+#define AVC_PRESWARN_RUN_BEFORE_RANGE_OUT 0x01000000
+#define AVC_PRESWARN_OVERCONSUME 0x01100000
+#define AVC_PRESWARN_MISSING_SLICE 0x01200000
+
+/************************************************************************/
+/* WAVE5 ERROR ON ENCODER (ERR_INFO) */
+/************************************************************************/
+
+/************************************************************************/
+/* WAVE5 WARNING ON ENCODER (WARN_INFO) */
+/************************************************************************/
+#define WAVE5_ETCWARN_FORCED_SPLIT_BY_CU8X8 0x000000001
+
+/************************************************************************/
+/* WAVE5 debug info (PRI_REASON) */
+/************************************************************************/
+#define WAVE5_DEC_VCORE_VCE_HANGUP 0x0001
+#define WAVE5_DEC_VCORE_UNDETECTED_SYNTAX_ERR 0x0002
+#define WAVE5_DEC_VCORE_MIB_BUSY 0x0003
+#define WAVE5_DEC_VCORE_VLC_BUSY 0x0004
+
+#endif /* ERROR_CODE_H_INCLUDED */
diff --git a/drivers/media/platform/chips-media/wave5/wave5.h b/drivers/media/platform/chips-media/wave5/wave5.h
new file mode 100644
index 000000000..063028ecc
--- /dev/null
+++ b/drivers/media/platform/chips-media/wave5/wave5.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Wave5 series multi-standard codec IP - wave5 backend definitions
+ *
+ * Copyright (C) 2021-2023 CHIPS&MEDIA INC
+ */
+
+#ifndef __WAVE5_FUNCTION_H__
+#define __WAVE5_FUNCTION_H__
+
+#define WAVE5_SUBSAMPLED_ONE_SIZE(_w, _h) (ALIGN((_w) / 4, 16) * ALIGN((_h) / 4, 8))
+#define WAVE5_SUBSAMPLED_ONE_SIZE_AVC(_w, _h) (ALIGN((_w) / 4, 32) * ALIGN((_h) / 4, 4))
+
+/*
+ * Bitstream buffer option: Explicit End
+ * When set to 1 the VPU assumes that the bitstream has at least one frame and
+ * will read until the end of the bitstream buffer.
+ * When set to 0 the VPU will not read the last few bytes.
+ * This option can be set anytime but cannot be cleared during processing.
+ * It can be set to force finish decoding even though there is not enough
+ * bitstream data for a full frame.
+ */
+#define BSOPTION_ENABLE_EXPLICIT_END BIT(0)
+#define BSOPTION_HIGHLIGHT_STREAM_END BIT(1)
+
+/*
+ * Currently the driver only supports hardware with little endian but for source
+ * picture format, the bitstream and the report parameter the hardware works
+ * with the opposite endianness, thus hard-code big endian for the register
+ * writes
+ */
+#define PIC_SRC_ENDIANNESS_BIG_ENDIAN 0xf
+#define BITSTREAM_ENDIANNESS_BIG_ENDIAN 0xf
+#define REPORT_PARAM_ENDIANNESS_BIG_ENDIAN 0xf
+
+#define WTL_RIGHT_JUSTIFIED 0
+#define WTL_LEFT_JUSTIFIED 1
+#define WTL_PIXEL_8BIT 0
+#define WTL_PIXEL_16BIT 1
+#define WTL_PIXEL_32BIT 2
+
+/* Mirror & rotation modes of the PRP (pre-processing) module */
+#define NONE_ROTATE 0x0
+#define ROT_CLOCKWISE_90 0x3
+#define ROT_CLOCKWISE_180 0x5
+#define ROT_CLOCKWISE_270 0x7
+#define MIR_HOR_FLIP 0x11
+#define MIR_VER_FLIP 0x9
+#define MIR_HOR_VER_FLIP (MIR_HOR_FLIP | MIR_VER_FLIP)
+
+bool wave5_vpu_is_init(struct vpu_device *vpu_dev);
+
+unsigned int wave5_vpu_get_product_id(struct vpu_device *vpu_dev);
+
+int wave5_vpu_get_version(struct vpu_device *vpu_dev, u32 *revision);
+
+int wave5_vpu_init(struct device *dev, u8 *fw, size_t size);
+
+int wave5_vpu_reset(struct device *dev, enum sw_reset_mode reset_mode);
+
+int wave5_vpu_build_up_dec_param(struct vpu_instance *inst, struct dec_open_param *param);
+
+int wave5_vpu_dec_set_bitstream_flag(struct vpu_instance *inst, bool eos);
+
+int wave5_vpu_hw_flush_instance(struct vpu_instance *inst);
+
+int wave5_vpu_dec_register_framebuffer(struct vpu_instance *inst,
+ struct frame_buffer *fb_arr, enum tiled_map_type map_type,
+ unsigned int count);
+
+int wave5_vpu_re_init(struct device *dev, u8 *fw, size_t size);
+
+int wave5_vpu_dec_init_seq(struct vpu_instance *inst);
+
+int wave5_vpu_dec_get_seq_info(struct vpu_instance *inst, struct dec_initial_info *info);
+
+int wave5_vpu_decode(struct vpu_instance *inst, u32 *fail_res);
+
+int wave5_vpu_dec_get_result(struct vpu_instance *inst, struct dec_output_info *result);
+
+int wave5_vpu_dec_finish_seq(struct vpu_instance *inst, u32 *fail_res);
+
+int wave5_dec_clr_disp_flag(struct vpu_instance *inst, unsigned int index);
+
+int wave5_dec_set_disp_flag(struct vpu_instance *inst, unsigned int index);
+
+int wave5_vpu_clear_interrupt(struct vpu_instance *inst, u32 flags);
+
+dma_addr_t wave5_dec_get_rd_ptr(struct vpu_instance *inst);
+
+int wave5_dec_set_rd_ptr(struct vpu_instance *inst, dma_addr_t addr);
+
+/***< WAVE5 encoder >******/
+
+int wave5_vpu_build_up_enc_param(struct device *dev, struct vpu_instance *inst,
+ struct enc_open_param *open_param);
+
+int wave5_vpu_enc_init_seq(struct vpu_instance *inst);
+
+int wave5_vpu_enc_get_seq_info(struct vpu_instance *inst, struct enc_initial_info *info);
+
+int wave5_vpu_enc_register_framebuffer(struct device *dev, struct vpu_instance *inst,
+ struct frame_buffer *fb_arr, enum tiled_map_type map_type,
+ unsigned int count);
+
+int wave5_vpu_encode(struct vpu_instance *inst, struct enc_param *option, u32 *fail_res);
+
+int wave5_vpu_enc_get_result(struct vpu_instance *inst, struct enc_output_info *result);
+
+int wave5_vpu_enc_finish_seq(struct vpu_instance *inst, u32 *fail_res);
+
+int wave5_vpu_enc_check_open_param(struct vpu_instance *inst, struct enc_open_param *open_param);
+
+#endif /* __WAVE5_FUNCTION_H__ */
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index c3456c700..ac48658e2 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -598,12 +598,11 @@ static int mtk_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
goto end;
vq = v4l2_m2m_get_vq(fh->m2m_ctx, buf->type);
- if (buf->index >= vq->num_buffers) {
- dev_err(ctx->jpeg->dev, "buffer index out of range\n");
+ vb = vb2_get_buffer(vq, buf->index);
+ if (!vb) {
+ dev_err(ctx->jpeg->dev, "buffer not found\n");
return -EINVAL;
}
-
- vb = vq->bufs[buf->index];
jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
jpeg_src_buf->bs_size = buf->m.planes[0].bytesused;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index cc44be10f..94f4ed785 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -208,13 +208,17 @@ static int mdp_probe(struct platform_device *pdev)
goto err_destroy_job_wq;
}
- mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_SCP);
- if (WARN_ON(!mm_pdev)) {
- dev_err(&pdev->dev, "Could not get scp device\n");
- ret = -ENODEV;
- goto err_destroy_clock_wq;
+ mdp->scp = scp_get(pdev);
+ if (!mdp->scp) {
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_SCP);
+ if (WARN_ON(!mm_pdev)) {
+ dev_err(&pdev->dev, "Could not get scp device\n");
+ ret = -ENODEV;
+ goto err_destroy_clock_wq;
+ }
+ mdp->scp = platform_get_drvdata(mm_pdev);
}
- mdp->scp = platform_get_drvdata(mm_pdev);
+
mdp->rproc_handle = scp_get_rproc(mdp->scp);
dev_dbg(&pdev->dev, "MDP rproc_handle: %pK", mdp->rproc_handle);
diff --git a/drivers/media/platform/mediatek/vcodec/Kconfig b/drivers/media/platform/mediatek/vcodec/Kconfig
index 74b00eb1b..bc8292232 100644
--- a/drivers/media/platform/mediatek/vcodec/Kconfig
+++ b/drivers/media/platform/mediatek/vcodec/Kconfig
@@ -24,7 +24,6 @@ config VIDEO_MEDIATEK_VCODEC
select V4L2_H264
select V4L2_VP9
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
help
Mediatek video codec driver provides HW capability to
encode and decode in a range of video formats on MT8173
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
index 4c34344dc..d7027d600 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
@@ -50,12 +50,12 @@ static void mtk_vcodec_vpu_reset_dec_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
@@ -65,12 +65,12 @@ static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
- mutex_lock(&dev->dev_mutex);
+ mutex_lock(&dev->dev_ctx_lock);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
- mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&dev->dev_ctx_lock);
}
static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
index 91ed576d6..ba742f0e3 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
@@ -208,36 +208,14 @@ static int vidioc_vdec_dqbuf(struct file *file, void *priv,
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
-static int mtk_vcodec_dec_get_chip_name(void *priv)
-{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
- struct device *dev = &ctx->dev->plat_dev->dev;
-
- if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-dec"))
- return 8173;
- else if (of_device_is_compatible(dev->of_node, "mediatek,mt8183-vcodec-dec"))
- return 8183;
- else if (of_device_is_compatible(dev->of_node, "mediatek,mt8192-vcodec-dec"))
- return 8192;
- else if (of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-dec"))
- return 8195;
- else if (of_device_is_compatible(dev->of_node, "mediatek,mt8186-vcodec-dec"))
- return 8186;
- else if (of_device_is_compatible(dev->of_node, "mediatek,mt8188-vcodec-dec"))
- return 8188;
- else
- return 8173;
-}
-
static int vidioc_vdec_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
struct device *dev = &ctx->dev->plat_dev->dev;
- int platform_name = mtk_vcodec_dec_get_chip_name(priv);
strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
- snprintf(cap->card, sizeof(cap->card), "MT%d video decoder", platform_name);
+ snprintf(cap->card, sizeof(cap->card), "MT%d video decoder", ctx->dev->chip_name);
return 0;
}
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
index 0a89ce452..2073781cc 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
@@ -268,7 +268,9 @@ static int fops_vcodec_open(struct file *file)
ctx->dev->vdec_pdata->init_vdec_params(ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mtk_vcodec_dbgfs_create(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -311,7 +313,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
mtk_vcodec_dbgfs_remove(dev, ctx->id);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -326,6 +330,26 @@ static const struct v4l2_file_operations mtk_vcodec_fops = {
.mmap = v4l2_m2m_fop_mmap,
};
+static void mtk_vcodec_dec_get_chip_name(struct mtk_vcodec_dec_dev *vdec_dev)
+{
+ struct device *dev = &vdec_dev->plat_dev->dev;
+
+ if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-dec"))
+ vdec_dev->chip_name = MTK_VDEC_MT8173;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8183-vcodec-dec"))
+ vdec_dev->chip_name = MTK_VDEC_MT8183;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8192-vcodec-dec"))
+ vdec_dev->chip_name = MTK_VDEC_MT8192;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-dec"))
+ vdec_dev->chip_name = MTK_VDEC_MT8195;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8186-vcodec-dec"))
+ vdec_dev->chip_name = MTK_VDEC_MT8186;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8188-vcodec-dec"))
+ vdec_dev->chip_name = MTK_VDEC_MT8188;
+ else
+ vdec_dev->chip_name = MTK_VDEC_INVAL;
+}
+
static int mtk_vcodec_probe(struct platform_device *pdev)
{
struct mtk_vcodec_dec_dev *dev;
@@ -341,6 +365,12 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dev->ctx_list);
dev->plat_dev = pdev;
+ mtk_vcodec_dec_get_chip_name(dev);
+ if (dev->chip_name == MTK_VDEC_INVAL) {
+ dev_err(&pdev->dev, "Failed to get decoder chip name");
+ return -EINVAL;
+ }
+
dev->vdec_pdata = of_device_get_match_data(&pdev->dev);
if (!of_property_read_u32(pdev->dev.of_node, "mediatek,vpu",
&rproc_phandle)) {
@@ -378,6 +408,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
for (i = 0; i < MTK_VDEC_HW_MAX; i++)
mutex_init(&dev->dec_mutex[i]);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index 7e36b2c69..85b2c0d3d 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -18,6 +18,16 @@
#define IS_VDEC_LAT_ARCH(hw_arch) ((hw_arch) >= MTK_VDEC_LAT_SINGLE_CORE)
#define IS_VDEC_INNER_RACING(capability) ((capability) & MTK_VCODEC_INNER_RACING)
+enum mtk_vcodec_dec_chip_name {
+ MTK_VDEC_INVAL = 0,
+ MTK_VDEC_MT8173 = 8173,
+ MTK_VDEC_MT8183 = 8183,
+ MTK_VDEC_MT8186 = 8186,
+ MTK_VDEC_MT8188 = 8188,
+ MTK_VDEC_MT8192 = 8192,
+ MTK_VDEC_MT8195 = 8195,
+};
+
/*
* enum mtk_vdec_format_types - Structure used to get supported
* format types according to decoder capability
@@ -231,6 +241,7 @@ struct mtk_vcodec_dec_ctx {
*
* @dec_mutex: decoder hardware lock
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @decode_workqueue: decode work queue
*
* @irqlock: protect data access by irq handler and work thread
@@ -249,6 +260,8 @@ struct mtk_vcodec_dec_ctx {
* @vdec_racing_info: record register value
* @dec_racing_info_mutex: mutex lock used for inner racing mode
* @dbgfs: debug log related information
+ *
+ * @chip_name: used to distinguish platforms and select the correct codec configuration values
*/
struct mtk_vcodec_dec_dev {
struct v4l2_device v4l2_dev;
@@ -270,6 +283,7 @@ struct mtk_vcodec_dec_dev {
/* decoder hardware mutex lock */
struct mutex dec_mutex[MTK_VDEC_HW_MAX];
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *decode_workqueue;
spinlock_t irqlock;
@@ -289,6 +303,8 @@ struct mtk_vcodec_dec_dev {
/* Protects access to vdec_racing_info data */
struct mutex dec_racing_info_mutex;
struct mtk_vcodec_dbgfs dbgfs;
+
+ enum mtk_vcodec_dec_chip_name chip_name;
};
static inline struct mtk_vcodec_dec_ctx *fh_to_dec_ctx(struct v4l2_fh *fh)
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
index e29c9c58f..d54b38337 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
@@ -58,6 +58,15 @@ static const struct mtk_stateless_control mtk_stateless_controls[] = {
},
{
.cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .def = V4L2_MPEG_VIDEO_H264_LEVEL_4_1,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
.id = V4L2_CID_STATELESS_H264_DECODE_MODE,
.min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
.def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
@@ -100,7 +109,17 @@ static const struct mtk_stateless_control mtk_stateless_controls[] = {
.id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
.min = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
.def = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
- .max = V4L2_MPEG_VIDEO_VP9_PROFILE_3,
+ .max = V4L2_MPEG_VIDEO_VP9_PROFILE_2,
+ .menu_skip_mask = BIT(V4L2_MPEG_VIDEO_VP9_PROFILE_1),
+ },
+ .codec_type = V4L2_PIX_FMT_VP9_FRAME,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_VP9_LEVEL,
+ .min = V4L2_MPEG_VIDEO_VP9_LEVEL_1_0,
+ .def = V4L2_MPEG_VIDEO_VP9_LEVEL_4_0,
+ .max = V4L2_MPEG_VIDEO_VP9_LEVEL_4_1,
},
.codec_type = V4L2_PIX_FMT_VP9_FRAME,
},
@@ -140,6 +159,16 @@ static const struct mtk_stateless_control mtk_stateless_controls[] = {
},
{
.cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .def = V4L2_MPEG_VIDEO_HEVC_LEVEL_4,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1,
+ },
+ .codec_type = V4L2_PIX_FMT_HEVC_SLICE,
+ },
+
+ {
+ .cfg = {
.id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
.min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
.def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
@@ -519,6 +548,141 @@ static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
.s_ctrl = mtk_vdec_s_ctrl,
};
+static void mtk_vcodec_dec_fill_h264_level(struct v4l2_ctrl_config *cfg,
+ struct mtk_vcodec_dec_ctx *ctx)
+{
+ switch (ctx->dev->chip_name) {
+ case MTK_VDEC_MT8192:
+ case MTK_VDEC_MT8188:
+ cfg->max = V4L2_MPEG_VIDEO_H264_LEVEL_5_2;
+ break;
+ case MTK_VDEC_MT8195:
+ cfg->max = V4L2_MPEG_VIDEO_H264_LEVEL_6_0;
+ break;
+ case MTK_VDEC_MT8183:
+ case MTK_VDEC_MT8186:
+ cfg->max = V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
+ break;
+ default:
+ cfg->max = V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ break;
+ };
+}
+
+static void mtk_vcodec_dec_fill_h264_profile(struct v4l2_ctrl_config *cfg,
+ struct mtk_vcodec_dec_ctx *ctx)
+{
+ switch (ctx->dev->chip_name) {
+ case MTK_VDEC_MT8188:
+ case MTK_VDEC_MT8195:
+ cfg->max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10;
+ break;
+ default:
+ cfg->max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+ break;
+ };
+}
+
+static void mtk_vcodec_dec_fill_h265_level(struct v4l2_ctrl_config *cfg,
+ struct mtk_vcodec_dec_ctx *ctx)
+{
+ switch (ctx->dev->chip_name) {
+ case MTK_VDEC_MT8188:
+ cfg->max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1;
+ break;
+ case MTK_VDEC_MT8195:
+ cfg->max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2;
+ break;
+ default:
+ cfg->max = V4L2_MPEG_VIDEO_HEVC_LEVEL_4;
+ break;
+ };
+}
+
+static void mtk_vcodec_dec_fill_h265_profile(struct v4l2_ctrl_config *cfg,
+ struct mtk_vcodec_dec_ctx *ctx)
+{
+ switch (ctx->dev->chip_name) {
+ case MTK_VDEC_MT8188:
+ case MTK_VDEC_MT8195:
+ cfg->max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10;
+ break;
+ default:
+ cfg->max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE;
+ break;
+ };
+}
+
+static void mtk_vcodec_dec_fill_vp9_level(struct v4l2_ctrl_config *cfg,
+ struct mtk_vcodec_dec_ctx *ctx)
+{
+ switch (ctx->dev->chip_name) {
+ case MTK_VDEC_MT8192:
+ case MTK_VDEC_MT8188:
+ cfg->max = V4L2_MPEG_VIDEO_VP9_LEVEL_5_1;
+ break;
+ case MTK_VDEC_MT8195:
+ cfg->max = V4L2_MPEG_VIDEO_VP9_LEVEL_5_2;
+ break;
+ case MTK_VDEC_MT8186:
+ cfg->max = V4L2_MPEG_VIDEO_VP9_LEVEL_4_1;
+ break;
+ default:
+ cfg->max = V4L2_MPEG_VIDEO_VP9_LEVEL_4_0;
+ break;
+ };
+}
+
+static void mtk_vcodec_dec_fill_vp9_profile(struct v4l2_ctrl_config *cfg,
+ struct mtk_vcodec_dec_ctx *ctx)
+{
+ switch (ctx->dev->chip_name) {
+ case MTK_VDEC_MT8188:
+ case MTK_VDEC_MT8195:
+ cfg->max = V4L2_MPEG_VIDEO_VP9_PROFILE_2;
+ break;
+ default:
+ cfg->max = V4L2_MPEG_VIDEO_VP9_PROFILE_1;
+ break;
+ };
+}
+
+static void mtk_vcodec_dec_reset_controls(struct v4l2_ctrl_config *cfg,
+ struct mtk_vcodec_dec_ctx *ctx)
+{
+ switch (cfg->id) {
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ mtk_vcodec_dec_fill_h264_level(cfg, ctx);
+ mtk_v4l2_vdec_dbg(3, ctx, "h264 supported level: %lld %lld", cfg->max, cfg->def);
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ mtk_vcodec_dec_fill_h265_level(cfg, ctx);
+ mtk_v4l2_vdec_dbg(3, ctx, "h265 supported level: %lld %lld", cfg->max, cfg->def);
+ break;
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ mtk_vcodec_dec_fill_vp9_level(cfg, ctx);
+ mtk_v4l2_vdec_dbg(3, ctx, "vp9 supported level: %lld %lld", cfg->max, cfg->def);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ mtk_vcodec_dec_fill_h264_profile(cfg, ctx);
+ mtk_v4l2_vdec_dbg(3, ctx, "h264 supported profile: %lld %lld", cfg->max,
+ cfg->menu_skip_mask);
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ mtk_vcodec_dec_fill_h265_profile(cfg, ctx);
+ mtk_v4l2_vdec_dbg(3, ctx, "h265 supported profile: %lld %lld", cfg->max,
+ cfg->menu_skip_mask);
+ break;
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ mtk_vcodec_dec_fill_vp9_profile(cfg, ctx);
+ mtk_v4l2_vdec_dbg(3, ctx, "vp9 supported profile: %lld %lld", cfg->max,
+ cfg->menu_skip_mask);
+ break;
+ default:
+ break;
+ };
+}
+
static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_dec_ctx *ctx)
{
unsigned int i;
@@ -532,6 +696,8 @@ static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_dec_ctx *ctx)
for (i = 0; i < NUM_CTRLS; i++) {
struct v4l2_ctrl_config cfg = mtk_stateless_controls[i].cfg;
cfg.ops = &mtk_vcodec_dec_ctrl_ops;
+
+ mtk_vcodec_dec_reset_controls(&cfg, ctx);
v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &cfg, NULL);
if (ctx->ctrl_hdl.error) {
mtk_v4l2_vdec_err(ctx, "Adding control %d failed %d", i,
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
index 06ed47df6..21836dd6e 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
@@ -869,7 +869,6 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
inst->vpu.codec_type = ctx->current_codec;
inst->vpu.capture_type = ctx->capture_fourcc;
- ctx->drv_handle = inst;
err = vpu_dec_init(&inst->vpu);
if (err) {
mtk_vdec_err(ctx, "vdec_hevc init err=%d", err);
@@ -898,6 +897,7 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
mtk_vdec_debug(ctx, "lat hevc instance >> %p, codec_type = 0x%x",
inst, inst->vpu.codec_type);
+ ctx->drv_handle = inst;
return 0;
error_free_inst:
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
index e393e3e66..69d37b93b 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
@@ -1695,13 +1695,8 @@ static int vdec_vp9_slice_setup_core_buffer(struct vdec_vp9_slice_instance *inst
return -EINVAL;
/* update internal buffer's width/height */
- for (i = 0; i < vq->num_buffers; i++) {
- if (vb == vq->bufs[i]) {
- instance->dpb[i].width = w;
- instance->dpb[i].height = h;
- break;
- }
- }
+ instance->dpb[vb->index].width = w;
+ instance->dpb[vb->index].height = h;
/*
* get buffer's width/height from instance
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
index 82e57ae98..da6be5567 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
@@ -77,12 +77,14 @@ static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vde
struct mtk_vcodec_dec_ctx *ctx;
int ret = false;
+ mutex_lock(&dec_dev->dev_ctx_lock);
list_for_each_entry(ctx, &dec_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&dec_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
index eb381fa6e..181884e79 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
@@ -912,7 +912,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
err_start_stream:
- for (i = 0; i < q->num_buffers; ++i) {
+ for (i = 0; i < vb2_get_num_buffers(q); ++i) {
struct vb2_buffer *buf = vb2_get_buffer(q, i);
/*
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
index 6319f24bc..3cb8a1622 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
@@ -177,7 +177,9 @@ static int fops_vcodec_open(struct file *file)
mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ",
ctx->id, ctx, ctx->m2m_ctx);
+ mutex_lock(&dev->dev_ctx_lock);
list_add(&ctx->list, &dev->ctx_list);
+ mutex_unlock(&dev->dev_ctx_lock);
mutex_unlock(&dev->dev_mutex);
mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
@@ -212,7 +214,9 @@ static int fops_vcodec_release(struct file *file)
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ mutex_lock(&dev->dev_ctx_lock);
list_del_init(&ctx->list);
+ mutex_unlock(&dev->dev_ctx_lock);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -294,6 +298,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
mutex_init(&dev->enc_mutex);
mutex_init(&dev->dev_mutex);
+ mutex_init(&dev->dev_ctx_lock);
spin_lock_init(&dev->irqlock);
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
index a042f607e..0bd85d0fb 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
@@ -178,6 +178,7 @@ struct mtk_vcodec_enc_ctx {
*
* @enc_mutex: encoder hardware lock.
* @dev_mutex: video_device lock
+ * @dev_ctx_lock: the lock of context list
* @encode_workqueue: encode work queue
*
* @enc_irq: h264 encoder irq resource
@@ -205,6 +206,7 @@ struct mtk_vcodec_enc_dev {
/* encoder hardware mutex lock */
struct mutex enc_mutex;
struct mutex dev_mutex;
+ struct mutex dev_ctx_lock;
struct workqueue_struct *encode_workqueue;
int enc_irq;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
index 84ad1cc6a..51bb7ee14 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
@@ -47,12 +47,14 @@ static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct ven
struct mtk_vcodec_enc_ctx *ctx;
int ret = false;
+ mutex_lock(&enc_dev->dev_ctx_lock);
list_for_each_entry(ctx, &enc_dev->ctx_list, list) {
if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
ret = true;
break;
}
}
+ mutex_unlock(&enc_dev->dev_ctx_lock);
return ret;
}
diff --git a/drivers/media/platform/microchip/microchip-csi2dc.c b/drivers/media/platform/microchip/microchip-csi2dc.c
index 988c1cc1d..fee73260b 100644
--- a/drivers/media/platform/microchip/microchip-csi2dc.c
+++ b/drivers/media/platform/microchip/microchip-csi2dc.c
@@ -232,8 +232,8 @@ static int csi2dc_get_fmt(struct v4l2_subdev *csi2dc_sd,
struct v4l2_mbus_framefmt *v4l2_try_fmt;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd, sd_state,
- format->pad);
+ v4l2_try_fmt = v4l2_subdev_state_get_format(sd_state,
+ format->pad);
format->format = *v4l2_try_fmt;
return 0;
@@ -281,13 +281,12 @@ static int csi2dc_set_fmt(struct v4l2_subdev *csi2dc_sd,
req_fmt->format.field = V4L2_FIELD_NONE;
if (req_fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd, sd_state,
- req_fmt->pad);
+ v4l2_try_fmt = v4l2_subdev_state_get_format(sd_state,
+ req_fmt->pad);
*v4l2_try_fmt = req_fmt->format;
/* Trying on the sink pad makes the source pad change too */
- v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd,
- sd_state,
- CSI2DC_PAD_SOURCE);
+ v4l2_try_fmt = v4l2_subdev_state_get_format(sd_state,
+ CSI2DC_PAD_SOURCE);
*v4l2_try_fmt = req_fmt->format;
/* if we are just trying, we are done */
@@ -436,11 +435,11 @@ static int csi2dc_s_stream(struct v4l2_subdev *csi2dc_sd, int enable)
return ret;
}
-static int csi2dc_init_cfg(struct v4l2_subdev *csi2dc_sd,
- struct v4l2_subdev_state *sd_state)
+static int csi2dc_init_state(struct v4l2_subdev *csi2dc_sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *v4l2_try_fmt =
- v4l2_subdev_get_try_format(csi2dc_sd, sd_state, 0);
+ v4l2_subdev_state_get_format(sd_state, 0);
v4l2_try_fmt->height = 480;
v4l2_try_fmt->width = 640;
@@ -462,7 +461,6 @@ static const struct v4l2_subdev_pad_ops csi2dc_pad_ops = {
.enum_mbus_code = csi2dc_enum_mbus_code,
.set_fmt = csi2dc_set_fmt,
.get_fmt = csi2dc_get_fmt,
- .init_cfg = csi2dc_init_cfg,
};
static const struct v4l2_subdev_video_ops csi2dc_video_ops = {
@@ -474,6 +472,10 @@ static const struct v4l2_subdev_ops csi2dc_subdev_ops = {
.video = &csi2dc_video_ops,
};
+static const struct v4l2_subdev_internal_ops csi2dc_internal_ops = {
+ .init_state = csi2dc_init_state,
+};
+
static int csi2dc_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asd)
@@ -678,6 +680,7 @@ static int csi2dc_probe(struct platform_device *pdev)
}
v4l2_subdev_init(&csi2dc->csi2dc_sd, &csi2dc_subdev_ops);
+ csi2dc->csi2dc_sd.internal_ops = &csi2dc_internal_ops;
csi2dc->csi2dc_sd.owner = THIS_MODULE;
csi2dc->csi2dc_sd.dev = dev;
diff --git a/drivers/media/platform/microchip/microchip-isc-base.c b/drivers/media/platform/microchip/microchip-isc-base.c
index 1f8528844..f3a5cbaca 100644
--- a/drivers/media/platform/microchip/microchip-isc-base.c
+++ b/drivers/media/platform/microchip/microchip-isc-base.c
@@ -851,38 +851,6 @@ static int isc_try_configure_pipeline(struct isc_device *isc)
return 0;
}
-static void isc_try_fse(struct isc_device *isc,
- struct v4l2_subdev_state *sd_state)
-{
- struct v4l2_subdev_frame_size_enum fse = {
- .which = V4L2_SUBDEV_FORMAT_TRY,
- };
- int ret;
-
- /*
- * If we do not know yet which format the subdev is using, we cannot
- * do anything.
- */
- if (!isc->config.sd_format)
- return;
-
- fse.code = isc->try_config.sd_format->mbus_code;
-
- ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
- sd_state, &fse);
- /*
- * Attempt to obtain format size from subdev. If not available,
- * just use the maximum ISC can receive.
- */
- if (ret) {
- sd_state->pads->try_crop.width = isc->max_width;
- sd_state->pads->try_crop.height = isc->max_height;
- } else {
- sd_state->pads->try_crop.width = fse.max_width;
- sd_state->pads->try_crop.height = fse.max_height;
- }
-}
-
static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f)
{
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
@@ -944,10 +912,6 @@ static int isc_validate(struct isc_device *isc)
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.pad = isc->remote_pad,
};
- struct v4l2_subdev_pad_config pad_cfg = {};
- struct v4l2_subdev_state pad_state = {
- .pads = &pad_cfg,
- };
/* Get current format from subdev */
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, get_fmt, NULL,
@@ -1008,9 +972,6 @@ static int isc_validate(struct isc_device *isc)
if (ret)
return ret;
- /* Obtain frame sizes if possible to have crop requirements ready */
- isc_try_fse(isc, &pad_state);
-
/* Configure ISC pipeline for the config */
ret = isc_try_configure_pipeline(isc);
if (ret)
@@ -1819,7 +1780,7 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &isc->lock;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->dev = isc->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/microchip/microchip-isc-scaler.c b/drivers/media/platform/microchip/microchip-isc-scaler.c
index 0f29a32d1..e83463543 100644
--- a/drivers/media/platform/microchip/microchip-isc-scaler.c
+++ b/drivers/media/platform/microchip/microchip-isc-scaler.c
@@ -33,8 +33,8 @@ static int isc_scaler_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *v4l2_try_fmt;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- v4l2_try_fmt = v4l2_subdev_get_try_format(sd, sd_state,
- format->pad);
+ v4l2_try_fmt = v4l2_subdev_state_get_format(sd_state,
+ format->pad);
format->format = *v4l2_try_fmt;
return 0;
@@ -74,12 +74,12 @@ static int isc_scaler_set_fmt(struct v4l2_subdev *sd,
req_fmt->format.code = fmt->mbus_code;
if (req_fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- v4l2_try_fmt = v4l2_subdev_get_try_format(sd, sd_state,
- req_fmt->pad);
+ v4l2_try_fmt = v4l2_subdev_state_get_format(sd_state,
+ req_fmt->pad);
*v4l2_try_fmt = req_fmt->format;
/* Trying on the sink pad makes the source pad change too */
- v4l2_try_fmt = v4l2_subdev_get_try_format(sd, sd_state,
- ISC_SCALER_PAD_SOURCE);
+ v4l2_try_fmt = v4l2_subdev_state_get_format(sd_state,
+ ISC_SCALER_PAD_SOURCE);
*v4l2_try_fmt = req_fmt->format;
v4l_bound_align_image(&v4l2_try_fmt->width,
@@ -145,17 +145,17 @@ static int isc_scaler_g_sel(struct v4l2_subdev *sd,
return 0;
}
-static int isc_scaler_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int isc_scaler_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *v4l2_try_fmt =
- v4l2_subdev_get_try_format(sd, sd_state, 0);
+ v4l2_subdev_state_get_format(sd_state, 0);
struct v4l2_rect *try_crop;
struct isc_device *isc = container_of(sd, struct isc_device, scaler_sd);
*v4l2_try_fmt = isc->scaler_format[ISC_SCALER_PAD_SOURCE];
- try_crop = v4l2_subdev_get_try_crop(sd, sd_state, 0);
+ try_crop = v4l2_subdev_state_get_crop(sd_state, 0);
try_crop->top = 0;
try_crop->left = 0;
@@ -170,7 +170,6 @@ static const struct v4l2_subdev_pad_ops isc_scaler_pad_ops = {
.set_fmt = isc_scaler_set_fmt,
.get_fmt = isc_scaler_get_fmt,
.get_selection = isc_scaler_g_sel,
- .init_cfg = isc_scaler_init_cfg,
};
static const struct media_entity_operations isc_scaler_entity_ops = {
@@ -181,11 +180,16 @@ static const struct v4l2_subdev_ops xisc_scaler_subdev_ops = {
.pad = &isc_scaler_pad_ops,
};
+static const struct v4l2_subdev_internal_ops isc_scaler_internal_ops = {
+ .init_state = isc_scaler_init_state,
+};
+
int isc_scaler_init(struct isc_device *isc)
{
int ret;
v4l2_subdev_init(&isc->scaler_sd, &xisc_scaler_subdev_ops);
+ isc->scaler_sd.internal_ops = &isc_scaler_internal_ops;
isc->scaler_sd.owner = THIS_MODULE;
isc->scaler_sd.dev = isc->dev;
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
index b9e6782f5..a1fcb616b 100644
--- a/drivers/media/platform/nuvoton/npcm-video.c
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -26,7 +26,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/sched.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/v4l2-controls.h>
#include <linux/videodev2.h>
@@ -120,7 +119,7 @@ struct npcm_video {
struct mutex video_lock; /* v4l2 and videobuf2 lock */
struct list_head buffers;
- spinlock_t lock; /* buffer list lock */
+ struct mutex buffer_lock; /* buffer list lock */
unsigned long flags;
unsigned int sequence;
@@ -393,7 +392,7 @@ static void npcm_video_free_diff_table(struct npcm_video *video)
struct rect_list *tmp;
unsigned int i;
- for (i = 0; i < video->queue.num_buffers; i++) {
+ for (i = 0; i < vb2_get_num_buffers(&video->queue); i++) {
head = &video->list[i];
list_for_each_safe(pos, nx, head) {
tmp = list_entry(pos, struct rect_list, list);
@@ -782,7 +781,6 @@ static int npcm_video_start_frame(struct npcm_video *video)
{
struct npcm_video_buffer *buf;
struct regmap *vcd = video->vcd_regmap;
- unsigned long flags;
unsigned int val;
int ret;
@@ -798,17 +796,17 @@ static int npcm_video_start_frame(struct npcm_video *video)
return -EBUSY;
}
- spin_lock_irqsave(&video->lock, flags);
+ mutex_lock(&video->buffer_lock);
buf = list_first_entry_or_null(&video->buffers,
struct npcm_video_buffer, link);
if (!buf) {
- spin_unlock_irqrestore(&video->lock, flags);
+ mutex_unlock(&video->buffer_lock);
dev_dbg(video->dev, "No empty buffers; skip capture frame\n");
return 0;
}
set_bit(VIDEO_CAPTURING, &video->flags);
- spin_unlock_irqrestore(&video->lock, flags);
+ mutex_unlock(&video->buffer_lock);
npcm_video_vcd_state_machine_reset(video);
@@ -834,14 +832,13 @@ static void npcm_video_bufs_done(struct npcm_video *video,
enum vb2_buffer_state state)
{
struct npcm_video_buffer *buf;
- unsigned long flags;
- spin_lock_irqsave(&video->lock, flags);
+ mutex_lock(&video->buffer_lock);
list_for_each_entry(buf, &video->buffers, link)
vb2_buffer_done(&buf->vb.vb2_buf, state);
INIT_LIST_HEAD(&video->buffers);
- spin_unlock_irqrestore(&video->lock, flags);
+ mutex_unlock(&video->buffer_lock);
}
static void npcm_video_get_diff_rect(struct npcm_video *video, unsigned int index)
@@ -1071,12 +1068,12 @@ static irqreturn_t npcm_video_irq(int irq, void *arg)
if (status & VCD_STAT_DONE) {
regmap_write(vcd, VCD_INTE, 0);
- spin_lock(&video->lock);
+ mutex_lock(&video->buffer_lock);
clear_bit(VIDEO_CAPTURING, &video->flags);
buf = list_first_entry_or_null(&video->buffers,
struct npcm_video_buffer, link);
if (!buf) {
- spin_unlock(&video->lock);
+ mutex_unlock(&video->buffer_lock);
return IRQ_NONE;
}
@@ -1093,7 +1090,7 @@ static irqreturn_t npcm_video_irq(int irq, void *arg)
size = npcm_video_hextile(video, index, dma_addr, addr);
break;
default:
- spin_unlock(&video->lock);
+ mutex_unlock(&video->buffer_lock);
return IRQ_NONE;
}
@@ -1104,7 +1101,7 @@ static irqreturn_t npcm_video_irq(int irq, void *arg)
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
list_del(&buf->link);
- spin_unlock(&video->lock);
+ mutex_unlock(&video->buffer_lock);
if (npcm_video_start_frame(video))
dev_err(video->dev, "Failed to capture next frame\n");
@@ -1508,13 +1505,12 @@ static void npcm_video_buf_queue(struct vb2_buffer *vb)
struct npcm_video *video = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct npcm_video_buffer *nvb = to_npcm_video_buffer(vbuf);
- unsigned long flags;
bool empty;
- spin_lock_irqsave(&video->lock, flags);
+ mutex_lock(&video->buffer_lock);
empty = list_empty(&video->buffers);
list_add_tail(&nvb->link, &video->buffers);
- spin_unlock_irqrestore(&video->lock, flags);
+ mutex_unlock(&video->buffer_lock);
if (test_bit(VIDEO_STREAMING, &video->flags) &&
!test_bit(VIDEO_CAPTURING, &video->flags) && empty) {
@@ -1616,7 +1612,7 @@ static int npcm_video_setup_video(struct npcm_video *video)
vbq->drv_priv = video;
vbq->buf_struct_size = sizeof(struct npcm_video_buffer);
vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vbq->min_buffers_needed = 3;
+ vbq->min_queued_buffers = 3;
rc = vb2_queue_init(vbq);
if (rc) {
@@ -1744,8 +1740,8 @@ static int npcm_video_probe(struct platform_device *pdev)
return -ENOMEM;
video->dev = &pdev->dev;
- spin_lock_init(&video->lock);
mutex_init(&video->video_lock);
+ mutex_init(&video->buffer_lock);
INIT_LIST_HEAD(&video->buffers);
regs = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/media/platform/nvidia/tegra-vde/Kconfig b/drivers/media/platform/nvidia/tegra-vde/Kconfig
index f7454823b..2fe13f39c 100644
--- a/drivers/media/platform/nvidia/tegra-vde/Kconfig
+++ b/drivers/media/platform/nvidia/tegra-vde/Kconfig
@@ -6,7 +6,6 @@ config VIDEO_TEGRA_VDE
select DMA_SHARED_BUFFER
select IOMMU_IOVA
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
select SRAM
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_DMA_SG
diff --git a/drivers/media/platform/nvidia/tegra-vde/v4l2.c b/drivers/media/platform/nvidia/tegra-vde/v4l2.c
index bd8c207d5..0f48ce6f2 100644
--- a/drivers/media/platform/nvidia/tegra-vde/v4l2.c
+++ b/drivers/media/platform/nvidia/tegra-vde/v4l2.c
@@ -813,7 +813,7 @@ static int tegra_open(struct file *file)
struct tegra_ctx *ctx;
int err;
- ctx = kzalloc(offsetof(struct tegra_ctx, ctrls[ARRAY_SIZE(ctrl_cfgs)]),
+ ctx = kzalloc(struct_size(ctx, ctrls, ARRAY_SIZE(ctrl_cfgs)),
GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index b08f6d2e7..db8ff5f5c 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -954,7 +954,7 @@ static int mipi_csis_s_stream(struct v4l2_subdev *sd, int enable)
state = v4l2_subdev_lock_and_get_active_state(sd);
- format = v4l2_subdev_get_pad_format(sd, state, CSIS_PAD_SINK);
+ format = v4l2_subdev_state_get_format(state, CSIS_PAD_SINK);
csis_fmt = find_csis_format(format->code);
ret = mipi_csis_calculate_params(csis, csis_fmt);
@@ -1002,7 +1002,7 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, code->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, code->pad);
code->code = fmt->code;
return 0;
}
@@ -1069,7 +1069,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
&sdformat->format.height, 1,
CSIS_MAX_PIX_HEIGHT, 0, 0);
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, sdformat->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, sdformat->pad);
fmt->code = csis_fmt->code;
fmt->width = sdformat->format.width;
@@ -1083,7 +1083,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
sdformat->format = *fmt;
/* Propagate the format from sink to source. */
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, CSIS_PAD_SOURCE);
+ fmt = v4l2_subdev_state_get_format(sd_state, CSIS_PAD_SOURCE);
*fmt = sdformat->format;
/* The format on the source pad might change due to unpacking. */
@@ -1104,7 +1104,7 @@ static int mipi_csis_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
return -EINVAL;
state = v4l2_subdev_lock_and_get_active_state(sd);
- fmt = v4l2_subdev_get_pad_format(sd, state, CSIS_PAD_SOURCE);
+ fmt = v4l2_subdev_state_get_format(state, CSIS_PAD_SOURCE);
csis_fmt = find_csis_format(fmt->code);
v4l2_subdev_unlock_state(state);
@@ -1122,8 +1122,8 @@ static int mipi_csis_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
return 0;
}
-static int mipi_csis_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int mipi_csis_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = {
.pad = CSIS_PAD_SINK,
@@ -1163,7 +1163,6 @@ static const struct v4l2_subdev_video_ops mipi_csis_video_ops = {
};
static const struct v4l2_subdev_pad_ops mipi_csis_pad_ops = {
- .init_cfg = mipi_csis_init_cfg,
.enum_mbus_code = mipi_csis_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = mipi_csis_set_fmt,
@@ -1176,6 +1175,10 @@ static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
.pad = &mipi_csis_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mipi_csis_internal_ops = {
+ .init_state = mipi_csis_init_state,
+};
+
/* -----------------------------------------------------------------------------
* Media entity operations
*/
@@ -1350,6 +1353,7 @@ static int mipi_csis_subdev_init(struct mipi_csis_device *csis)
int ret;
v4l2_subdev_init(sd, &mipi_csis_subdev_ops);
+ sd->internal_ops = &mipi_csis_internal_ops;
sd->owner = THIS_MODULE;
snprintf(sd->name, sizeof(sd->name), "csis-%s",
dev_name(csis->dev));
diff --git a/drivers/media/platform/nxp/imx7-media-csi.c b/drivers/media/platform/nxp/imx7-media-csi.c
index 15049c6aa..9566ff738 100644
--- a/drivers/media/platform/nxp/imx7-media-csi.c
+++ b/drivers/media/platform/nxp/imx7-media-csi.c
@@ -3,31 +3,46 @@
* V4L2 Capture CSI Subdev for Freescale i.MX6UL/L / i.MX7 SOC
*
* Copyright (c) 2019 Linaro Ltd
- *
*/
#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/container_of.h>
#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/math.h>
-#include <linux/mfd/syscon.h>
#include <linux/minmax.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timekeeping.h>
#include <linux/types.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-fwnode.h>
+#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
+#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
#define IMX7_CSI_PAD_SINK 0
#define IMX7_CSI_PAD_SRC 1
@@ -542,8 +557,8 @@ static void imx7_csi_configure(struct imx7_csi *csi,
} else {
const struct v4l2_mbus_framefmt *sink_fmt;
- sink_fmt = v4l2_subdev_get_pad_format(&csi->sd, sd_state,
- IMX7_CSI_PAD_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ IMX7_CSI_PAD_SINK);
cr1 = BIT_SOF_POL | BIT_REDGE | BIT_HSYNC_POL | BIT_FCC
| BIT_MCLKDIV(1) | BIT_MCLKEN;
@@ -1245,6 +1260,7 @@ static int imx7_csi_video_queue_setup(struct vb2_queue *vq,
struct device *alloc_devs[])
{
struct imx7_csi *csi = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
struct v4l2_pix_format *pix = &csi->vdev_fmt;
unsigned int count = *nbuffers;
@@ -1254,14 +1270,14 @@ static int imx7_csi_video_queue_setup(struct vb2_queue *vq,
if (*nplanes) {
if (*nplanes != 1 || sizes[0] < pix->sizeimage)
return -EINVAL;
- count += vq->num_buffers;
+ count += q_num_bufs;
}
count = min_t(__u32, IMX7_CSI_VIDEO_MEM_LIMIT / pix->sizeimage, count);
if (*nplanes)
- *nbuffers = (count < vq->num_buffers) ? 0 :
- count - vq->num_buffers;
+ *nbuffers = (count < q_num_bufs) ? 0 :
+ count - q_num_bufs;
else
*nbuffers = count;
@@ -1675,7 +1691,7 @@ static int imx7_csi_video_init(struct imx7_csi *csi)
vq->mem_ops = &vb2_dma_contig_memops;
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vq->lock = &csi->vdev_mutex;
- vq->min_buffers_needed = 2;
+ vq->min_queued_buffers = 2;
vq->dev = csi->dev;
ret = vb2_queue_init(vq);
@@ -1728,8 +1744,8 @@ out_unlock:
return ret;
}
-static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int imx7_csi_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
const struct imx7_csi_pixfmt *cc;
int i;
@@ -1738,7 +1754,7 @@ static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
struct v4l2_mbus_framefmt *mf =
- v4l2_subdev_get_pad_format(sd, sd_state, i);
+ v4l2_subdev_state_get_format(sd_state, i);
mf->code = IMX7_CSI_DEF_MBUS_CODE;
mf->width = IMX7_CSI_DEF_PIX_WIDTH;
@@ -1762,7 +1778,7 @@ static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *in_fmt;
int ret = 0;
- in_fmt = v4l2_subdev_get_pad_format(sd, sd_state, IMX7_CSI_PAD_SINK);
+ in_fmt = v4l2_subdev_state_get_format(sd_state, IMX7_CSI_PAD_SINK);
switch (code->pad) {
case IMX7_CSI_PAD_SINK:
@@ -1841,7 +1857,7 @@ static void imx7_csi_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *in_fmt;
u32 code;
- in_fmt = v4l2_subdev_get_pad_format(sd, sd_state, IMX7_CSI_PAD_SINK);
+ in_fmt = v4l2_subdev_state_get_format(sd_state, IMX7_CSI_PAD_SINK);
switch (sdformat->pad) {
case IMX7_CSI_PAD_SRC:
@@ -1891,7 +1907,7 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
imx7_csi_try_fmt(sd, sd_state, sdformat, &cc);
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, sdformat->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, sdformat->pad);
*fmt = sdformat->format;
@@ -1902,8 +1918,8 @@ static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
format.format = sdformat->format;
imx7_csi_try_fmt(sd, sd_state, &format, &outcc);
- outfmt = v4l2_subdev_get_pad_format(sd, sd_state,
- IMX7_CSI_PAD_SRC);
+ outfmt = v4l2_subdev_state_get_format(sd_state,
+ IMX7_CSI_PAD_SRC);
*outfmt = format.format;
}
@@ -2005,7 +2021,6 @@ static const struct v4l2_subdev_video_ops imx7_csi_video_ops = {
};
static const struct v4l2_subdev_pad_ops imx7_csi_pad_ops = {
- .init_cfg = imx7_csi_init_cfg,
.enum_mbus_code = imx7_csi_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx7_csi_set_fmt,
@@ -2018,6 +2033,7 @@ static const struct v4l2_subdev_ops imx7_csi_subdev_ops = {
};
static const struct v4l2_subdev_internal_ops imx7_csi_internal_ops = {
+ .init_state = imx7_csi_init_state,
.registered = imx7_csi_registered,
.unregistered = imx7_csi_unregistered,
};
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
index c9a4d091b..93a55c97c 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
@@ -58,7 +58,7 @@ static int mxc_isi_crossbar_gasket_enable(struct mxc_isi_crossbar *xbar,
return -EINVAL;
}
- fmt = v4l2_subdev_state_get_stream_format(state, port, 0);
+ fmt = v4l2_subdev_state_get_format(state, port, 0);
if (!fmt)
return -EINVAL;
@@ -175,8 +175,8 @@ mxc_isi_crossbar_xlate_streams(struct mxc_isi_crossbar *xbar,
return sd;
}
-static int mxc_isi_crossbar_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int mxc_isi_crossbar_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct mxc_isi_crossbar *xbar = to_isi_crossbar(sd);
struct v4l2_subdev_krouting routing = { };
@@ -280,8 +280,7 @@ static int mxc_isi_crossbar_set_fmt(struct v4l2_subdev *sd,
* Set the format on the sink stream and propagate it to the source
* streams.
*/
- sink_fmt = v4l2_subdev_state_get_stream_format(state, fmt->pad,
- fmt->stream);
+ sink_fmt = v4l2_subdev_state_get_format(state, fmt->pad, fmt->stream);
if (!sink_fmt)
return -EINVAL;
@@ -295,8 +294,9 @@ static int mxc_isi_crossbar_set_fmt(struct v4l2_subdev *sd,
route->sink_stream != fmt->stream)
continue;
- source_fmt = v4l2_subdev_state_get_stream_format(state, route->source_pad,
- route->source_stream);
+ source_fmt = v4l2_subdev_state_get_format(state,
+ route->source_pad,
+ route->source_stream);
if (!source_fmt)
return -EINVAL;
@@ -403,7 +403,6 @@ static int mxc_isi_crossbar_disable_streams(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops mxc_isi_crossbar_subdev_pad_ops = {
- .init_cfg = mxc_isi_crossbar_init_cfg,
.enum_mbus_code = mxc_isi_crossbar_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = mxc_isi_crossbar_set_fmt,
@@ -416,6 +415,10 @@ static const struct v4l2_subdev_ops mxc_isi_crossbar_subdev_ops = {
.pad = &mxc_isi_crossbar_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mxc_isi_crossbar_internal_ops = {
+ .init_state = mxc_isi_crossbar_init_state,
+};
+
static const struct media_entity_operations mxc_isi_cross_entity_ops = {
.get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
.link_validate = v4l2_subdev_link_validate,
@@ -437,6 +440,7 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi)
xbar->isi = isi;
v4l2_subdev_init(sd, &mxc_isi_crossbar_subdev_ops);
+ sd->internal_ops = &mxc_isi_crossbar_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
strscpy(sd->name, "crossbar", sizeof(sd->name));
sd->dev = isi->dev;
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-debug.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-debug.c
index 6709ab7ea..5e8a177da 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-debug.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-debug.c
@@ -22,10 +22,11 @@ static inline u32 mxc_isi_read(struct mxc_isi_pipe *pipe, u32 reg)
static int mxc_isi_debug_dump_regs_show(struct seq_file *m, void *p)
{
#define MXC_ISI_DEBUG_REG(name) { name, #name }
- static const struct {
+ struct debug_regs {
u32 offset;
const char * const name;
- } registers[] = {
+ };
+ static const struct debug_regs registers[] = {
MXC_ISI_DEBUG_REG(CHNL_CTRL),
MXC_ISI_DEBUG_REG(CHNL_IMG_CTRL),
MXC_ISI_DEBUG_REG(CHNL_OUT_BUF_CTRL),
@@ -67,6 +68,16 @@ static int mxc_isi_debug_dump_regs_show(struct seq_file *m, void *p)
MXC_ISI_DEBUG_REG(CHNL_SCL_IMG_CFG),
MXC_ISI_DEBUG_REG(CHNL_FLOW_CTRL),
};
+ /* These registers contain the upper 4 bits of 36-bit DMA addresses. */
+ static const struct debug_regs registers_36bit_dma[] = {
+ MXC_ISI_DEBUG_REG(CHNL_Y_BUF1_XTND_ADDR),
+ MXC_ISI_DEBUG_REG(CHNL_U_BUF1_XTND_ADDR),
+ MXC_ISI_DEBUG_REG(CHNL_V_BUF1_XTND_ADDR),
+ MXC_ISI_DEBUG_REG(CHNL_Y_BUF2_XTND_ADDR),
+ MXC_ISI_DEBUG_REG(CHNL_U_BUF2_XTND_ADDR),
+ MXC_ISI_DEBUG_REG(CHNL_V_BUF2_XTND_ADDR),
+ MXC_ISI_DEBUG_REG(CHNL_IN_BUF_XTND_ADDR),
+ };
struct mxc_isi_pipe *pipe = m->private;
unsigned int i;
@@ -77,10 +88,20 @@ static int mxc_isi_debug_dump_regs_show(struct seq_file *m, void *p)
seq_printf(m, "--- ISI pipe %u registers ---\n", pipe->id);
for (i = 0; i < ARRAY_SIZE(registers); ++i)
- seq_printf(m, "%20s[0x%02x]: 0x%08x\n",
+ seq_printf(m, "%21s[0x%02x]: 0x%08x\n",
registers[i].name, registers[i].offset,
mxc_isi_read(pipe, registers[i].offset));
+ if (pipe->isi->pdata->has_36bit_dma) {
+ for (i = 0; i < ARRAY_SIZE(registers_36bit_dma); ++i) {
+ const struct debug_regs *reg = &registers_36bit_dma[i];
+
+ seq_printf(m, "%21s[0x%02x]: 0x%08x\n",
+ reg->name, reg->offset,
+ mxc_isi_read(pipe, reg->offset));
+ }
+ }
+
pm_runtime_put(pipe->isi->dev);
return 0;
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
index 65d20e9ba..d76eb58de 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
@@ -263,10 +263,10 @@ int mxc_isi_pipe_enable(struct mxc_isi_pipe *pipe)
/* Configure the pipeline. */
state = v4l2_subdev_lock_and_get_active_state(sd);
- sink_fmt = v4l2_subdev_get_try_format(sd, state, MXC_ISI_PIPE_PAD_SINK);
- src_fmt = v4l2_subdev_get_try_format(sd, state, MXC_ISI_PIPE_PAD_SOURCE);
- compose = v4l2_subdev_get_try_compose(sd, state, MXC_ISI_PIPE_PAD_SINK);
- crop = *v4l2_subdev_get_try_crop(sd, state, MXC_ISI_PIPE_PAD_SOURCE);
+ sink_fmt = v4l2_subdev_state_get_format(state, MXC_ISI_PIPE_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(state, MXC_ISI_PIPE_PAD_SOURCE);
+ compose = v4l2_subdev_state_get_compose(state, MXC_ISI_PIPE_PAD_SINK);
+ crop = *v4l2_subdev_state_get_crop(state, MXC_ISI_PIPE_PAD_SOURCE);
sink_info = mxc_isi_bus_format_by_code(sink_fmt->code,
MXC_ISI_PIPE_PAD_SINK);
@@ -322,7 +322,7 @@ mxc_isi_pipe_get_pad_format(struct mxc_isi_pipe *pipe,
struct v4l2_subdev_state *state,
unsigned int pad)
{
- return v4l2_subdev_get_try_format(&pipe->sd, state, pad);
+ return v4l2_subdev_state_get_format(state, pad);
}
static struct v4l2_rect *
@@ -330,7 +330,7 @@ mxc_isi_pipe_get_pad_crop(struct mxc_isi_pipe *pipe,
struct v4l2_subdev_state *state,
unsigned int pad)
{
- return v4l2_subdev_get_try_crop(&pipe->sd, state, pad);
+ return v4l2_subdev_state_get_crop(state, pad);
}
static struct v4l2_rect *
@@ -338,11 +338,11 @@ mxc_isi_pipe_get_pad_compose(struct mxc_isi_pipe *pipe,
struct v4l2_subdev_state *state,
unsigned int pad)
{
- return v4l2_subdev_get_try_compose(&pipe->sd, state, pad);
+ return v4l2_subdev_state_get_compose(state, pad);
}
-static int mxc_isi_pipe_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int mxc_isi_pipe_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct mxc_isi_pipe *pipe = to_isi_pipe(sd);
struct v4l2_mbus_framefmt *fmt_source;
@@ -682,7 +682,6 @@ static int mxc_isi_pipe_set_selection(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops mxc_isi_pipe_subdev_pad_ops = {
- .init_cfg = mxc_isi_pipe_init_cfg,
.enum_mbus_code = mxc_isi_pipe_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = mxc_isi_pipe_set_fmt,
@@ -694,6 +693,10 @@ static const struct v4l2_subdev_ops mxc_isi_pipe_subdev_ops = {
.pad = &mxc_isi_pipe_subdev_pad_ops,
};
+static const struct v4l2_subdev_internal_ops mxc_isi_pipe_internal_ops = {
+ .init_state = mxc_isi_pipe_init_state,
+};
+
/* -----------------------------------------------------------------------------
* IRQ handling
*/
@@ -767,6 +770,7 @@ int mxc_isi_pipe_init(struct mxc_isi_dev *isi, unsigned int id)
sd = &pipe->sd;
v4l2_subdev_init(sd, &mxc_isi_pipe_subdev_ops);
+ sd->internal_ops = &mxc_isi_pipe_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(sd->name, sizeof(sd->name), "mxc_isi.%d", pipe->id);
sd->dev = isi->dev;
@@ -832,8 +836,8 @@ int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe,
int ret;
state = v4l2_subdev_lock_and_get_active_state(sd);
- sink_fmt = v4l2_subdev_get_try_format(sd, state, MXC_ISI_PIPE_PAD_SINK);
- src_fmt = v4l2_subdev_get_try_format(sd, state, MXC_ISI_PIPE_PAD_SOURCE);
+ sink_fmt = v4l2_subdev_state_get_format(state, MXC_ISI_PIPE_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(state, MXC_ISI_PIPE_PAD_SOURCE);
v4l2_subdev_unlock_state(state);
sink_info = mxc_isi_bus_format_by_code(sink_fmt->code,
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
index 10840c9a0..4091f1c0e 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
@@ -713,7 +713,7 @@ static int mxc_isi_video_validate_format(struct mxc_isi_video *video)
info = mxc_isi_format_by_fourcc(video->pix.pixelformat,
MXC_ISI_VIDEO_CAP);
- format = v4l2_subdev_get_try_format(sd, state, MXC_ISI_PIPE_PAD_SOURCE);
+ format = v4l2_subdev_state_get_format(state, MXC_ISI_PIPE_PAD_SOURCE);
if (format->code != info->mbus_code ||
format->width != video->pix.width ||
@@ -1453,7 +1453,7 @@ int mxc_isi_video_register(struct mxc_isi_pipe *pipe,
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct mxc_isi_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->lock = &video->lock;
q->dev = pipe->isi->dev;
diff --git a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
index ed048f73c..ba2e81f24 100644
--- a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
+++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
@@ -296,7 +296,7 @@ static int imx8mq_mipi_csi_calc_hs_settle(struct csi_state *state,
/* Calculate the line rate from the pixel rate. */
- fmt = v4l2_subdev_get_pad_format(&state->sd, sd_state, MIPI_CSI2_PAD_SINK);
+ fmt = v4l2_subdev_state_get_format(sd_state, MIPI_CSI2_PAD_SINK);
csi2_fmt = find_csi2_format(fmt->code);
link_freq = v4l2_get_link_freq(state->src_sd->ctrl_handler,
@@ -437,14 +437,15 @@ unlock:
return ret;
}
-static int imx8mq_mipi_csi_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int imx8mq_mipi_csi_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *fmt_sink;
struct v4l2_mbus_framefmt *fmt_source;
- fmt_sink = v4l2_subdev_get_pad_format(sd, sd_state, MIPI_CSI2_PAD_SINK);
- fmt_source = v4l2_subdev_get_pad_format(sd, sd_state, MIPI_CSI2_PAD_SOURCE);
+ fmt_sink = v4l2_subdev_state_get_format(sd_state, MIPI_CSI2_PAD_SINK);
+ fmt_source = v4l2_subdev_state_get_format(sd_state,
+ MIPI_CSI2_PAD_SOURCE);
fmt_sink->code = MEDIA_BUS_FMT_SGBRG10_1X10;
fmt_sink->width = MIPI_CSI2_DEF_PIX_WIDTH;
@@ -477,7 +478,7 @@ static int imx8mq_mipi_csi_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, code->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, code->pad);
code->code = fmt->code;
return 0;
}
@@ -514,7 +515,7 @@ static int imx8mq_mipi_csi_set_fmt(struct v4l2_subdev *sd,
if (!csi2_fmt)
csi2_fmt = &imx8mq_mipi_csi_formats[0];
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, sdformat->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, sdformat->pad);
fmt->code = csi2_fmt->code;
fmt->width = sdformat->format.width;
@@ -523,7 +524,7 @@ static int imx8mq_mipi_csi_set_fmt(struct v4l2_subdev *sd,
sdformat->format = *fmt;
/* Propagate the format from sink to source. */
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, MIPI_CSI2_PAD_SOURCE);
+ fmt = v4l2_subdev_state_get_format(sd_state, MIPI_CSI2_PAD_SOURCE);
*fmt = sdformat->format;
return 0;
@@ -534,7 +535,6 @@ static const struct v4l2_subdev_video_ops imx8mq_mipi_csi_video_ops = {
};
static const struct v4l2_subdev_pad_ops imx8mq_mipi_csi_pad_ops = {
- .init_cfg = imx8mq_mipi_csi_init_cfg,
.enum_mbus_code = imx8mq_mipi_csi_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx8mq_mipi_csi_set_fmt,
@@ -545,6 +545,10 @@ static const struct v4l2_subdev_ops imx8mq_mipi_csi_subdev_ops = {
.pad = &imx8mq_mipi_csi_pad_ops,
};
+static const struct v4l2_subdev_internal_ops imx8mq_mipi_csi_internal_ops = {
+ .init_state = imx8mq_mipi_csi_init_state,
+};
+
/* -----------------------------------------------------------------------------
* Media entity operations
*/
@@ -759,6 +763,7 @@ static int imx8mq_mipi_csi_subdev_init(struct csi_state *state)
int ret;
v4l2_subdev_init(sd, &imx8mq_mipi_csi_subdev_ops);
+ sd->internal_ops = &imx8mq_mipi_csi_internal_ops;
sd->owner = THIS_MODULE;
snprintf(sd->name, sizeof(sd->name), "%s %s",
MIPI_CSI2_SUBDEV_NAME, dev_name(state->dev));
diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
index 05ff5fa80..b11de4797 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
@@ -21,7 +21,6 @@
* interface support. As a result of that it has an
* alternate register layout.
*/
-#define IS_LITE (csid->id >= 2 ? 1 : 0)
#define CSID_HW_VERSION 0x0
#define HW_VERSION_STEPPING 0
@@ -35,13 +34,13 @@
#define CSID_CSI2_RX_IRQ_MASK 0x24
#define CSID_CSI2_RX_IRQ_CLEAR 0x28
-#define CSID_CSI2_RDIN_IRQ_STATUS(rdi) ((IS_LITE ? 0x30 : 0x40) \
+#define CSID_CSI2_RDIN_IRQ_STATUS(rdi) ((csid_is_lite(csid) ? 0x30 : 0x40) \
+ 0x10 * (rdi))
-#define CSID_CSI2_RDIN_IRQ_MASK(rdi) ((IS_LITE ? 0x34 : 0x44) \
+#define CSID_CSI2_RDIN_IRQ_MASK(rdi) ((csid_is_lite(csid) ? 0x34 : 0x44) \
+ 0x10 * (rdi))
-#define CSID_CSI2_RDIN_IRQ_CLEAR(rdi) ((IS_LITE ? 0x38 : 0x48) \
+#define CSID_CSI2_RDIN_IRQ_CLEAR(rdi) ((csid_is_lite(csid) ? 0x38 : 0x48) \
+ 0x10 * (rdi))
-#define CSID_CSI2_RDIN_IRQ_SET(rdi) ((IS_LITE ? 0x3C : 0x4C) \
+#define CSID_CSI2_RDIN_IRQ_SET(rdi) ((csid_is_lite(csid) ? 0x3C : 0x4C) \
+ 0x10 * (rdi))
#define CSID_TOP_IRQ_STATUS 0x70
@@ -73,7 +72,7 @@
#define CGC_MODE_DYNAMIC_GATING 0
#define CGC_MODE_ALWAYS_ON 1
-#define CSID_RDI_CFG0(rdi) ((IS_LITE ? 0x200 : 0x300) \
+#define CSID_RDI_CFG0(rdi) ((csid_is_lite(csid) ? 0x200 : 0x300) \
+ 0x100 * (rdi))
#define RDI_CFG0_BYTE_CNTR_EN 0
#define RDI_CFG0_FORMAT_MEASURE_EN 1
@@ -98,32 +97,32 @@
#define RDI_CFG0_PACKING_FORMAT 30
#define RDI_CFG0_ENABLE 31
-#define CSID_RDI_CFG1(rdi) ((IS_LITE ? 0x204 : 0x304)\
+#define CSID_RDI_CFG1(rdi) ((csid_is_lite(csid) ? 0x204 : 0x304)\
+ 0x100 * (rdi))
#define RDI_CFG1_TIMESTAMP_STB_SEL 0
-#define CSID_RDI_CTRL(rdi) ((IS_LITE ? 0x208 : 0x308)\
+#define CSID_RDI_CTRL(rdi) ((csid_is_lite(csid) ? 0x208 : 0x308)\
+ 0x100 * (rdi))
#define RDI_CTRL_HALT_CMD 0
#define HALT_CMD_HALT_AT_FRAME_BOUNDARY 0
#define HALT_CMD_RESUME_AT_FRAME_BOUNDARY 1
#define RDI_CTRL_HALT_MODE 2
-#define CSID_RDI_FRM_DROP_PATTERN(rdi) ((IS_LITE ? 0x20C : 0x30C)\
+#define CSID_RDI_FRM_DROP_PATTERN(rdi) ((csid_is_lite(csid) ? 0x20C : 0x30C)\
+ 0x100 * (rdi))
-#define CSID_RDI_FRM_DROP_PERIOD(rdi) ((IS_LITE ? 0x210 : 0x310)\
+#define CSID_RDI_FRM_DROP_PERIOD(rdi) ((csid_is_lite(csid) ? 0x210 : 0x310)\
+ 0x100 * (rdi))
-#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) ((IS_LITE ? 0x214 : 0x314)\
+#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) ((csid_is_lite(csid) ? 0x214 : 0x314)\
+ 0x100 * (rdi))
-#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) ((IS_LITE ? 0x218 : 0x318)\
+#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) ((csid_is_lite(csid) ? 0x218 : 0x318)\
+ 0x100 * (rdi))
-#define CSID_RDI_RPP_PIX_DROP_PATTERN(rdi) ((IS_LITE ? 0x224 : 0x324)\
+#define CSID_RDI_RPP_PIX_DROP_PATTERN(rdi) ((csid_is_lite(csid) ? 0x224 : 0x324)\
+ 0x100 * (rdi))
-#define CSID_RDI_RPP_PIX_DROP_PERIOD(rdi) ((IS_LITE ? 0x228 : 0x328)\
+#define CSID_RDI_RPP_PIX_DROP_PERIOD(rdi) ((csid_is_lite(csid) ? 0x228 : 0x328)\
+ 0x100 * (rdi))
-#define CSID_RDI_RPP_LINE_DROP_PATTERN(rdi) ((IS_LITE ? 0x22C : 0x32C)\
+#define CSID_RDI_RPP_LINE_DROP_PATTERN(rdi) ((csid_is_lite(csid) ? 0x22C : 0x32C)\
+ 0x100 * (rdi))
-#define CSID_RDI_RPP_LINE_DROP_PERIOD(rdi) ((IS_LITE ? 0x230 : 0x330)\
+#define CSID_RDI_RPP_LINE_DROP_PERIOD(rdi) ((csid_is_lite(csid) ? 0x230 : 0x330)\
+ 0x100 * (rdi))
#define CSID_TPG_CTRL 0x600
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 95873f988..eb27d69e8 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -263,7 +263,7 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
/*
* __csid_get_format - Get pointer to format structure
* @csid: CSID device
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad from which format is requested
* @which: TRY or ACTIVE format
*
@@ -276,8 +276,7 @@ __csid_get_format(struct csid_device *csid,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csid->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &csid->fmt[pad];
}
@@ -285,7 +284,7 @@ __csid_get_format(struct csid_device *csid,
/*
* csid_try_format - Handle try format by pad subdev method
* @csid: CSID device
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad on which format is requested
* @fmt: pointer to v4l2 format structure
* @which: wanted subdev format
@@ -353,7 +352,7 @@ static void csid_try_format(struct csid_device *csid,
/*
* csid_enum_mbus_code - Handle pixel format enumeration
* @sd: CSID V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code: pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -394,7 +393,7 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
/*
* csid_enum_frame_size - Handle frame size enumeration
* @sd: CSID V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fse: pointer to v4l2_subdev_frame_size_enum structure
* return -EINVAL or zero on success
*/
@@ -431,7 +430,7 @@ static int csid_enum_frame_size(struct v4l2_subdev *sd,
/*
* csid_get_format - Handle get format by pads subdev method
* @sd: CSID V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
@@ -455,7 +454,7 @@ static int csid_get_format(struct v4l2_subdev *sd,
/*
* csid_set_format - Handle set format by pads subdev method
* @sd: CSID V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
@@ -897,3 +896,8 @@ void msm_csid_unregister_entity(struct csid_device *csid)
media_entity_cleanup(&csid->subdev.entity);
v4l2_ctrl_handler_free(&csid->ctrls);
}
+
+inline bool csid_is_lite(struct csid_device *csid)
+{
+ return csid->camss->res->csid_res[csid->id].is_lite;
+}
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 30d94eb2e..fddccb69d 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -215,5 +215,12 @@ extern const struct csid_hw_ops csid_ops_4_1;
extern const struct csid_hw_ops csid_ops_4_7;
extern const struct csid_hw_ops csid_ops_gen2;
+/*
+ * csid_is_lite - Check if CSID is CSID lite.
+ * @csid: CSID Device
+ *
+ * Return whether CSID is CSID lite
+ */
+bool csid_is_lite(struct csid_device *csid);
#endif /* QC_MSM_CAMSS_CSID_H */
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index edd573606..264c99efe 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -312,7 +312,7 @@ static int csiphy_set_stream(struct v4l2_subdev *sd, int enable)
/*
* __csiphy_get_format - Get pointer to format structure
* @csiphy: CSIPHY device
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad from which format is requested
* @which: TRY or ACTIVE format
*
@@ -325,8 +325,7 @@ __csiphy_get_format(struct csiphy_device *csiphy,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csiphy->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &csiphy->fmt[pad];
}
@@ -334,7 +333,7 @@ __csiphy_get_format(struct csiphy_device *csiphy,
/*
* csiphy_try_format - Handle try format by pad subdev method
* @csiphy: CSIPHY device
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad on which format is requested
* @fmt: pointer to v4l2 format structure
* @which: wanted subdev format
@@ -381,7 +380,7 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
/*
* csiphy_enum_mbus_code - Handle pixel format enumeration
* @sd: CSIPHY V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code: pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -414,7 +413,7 @@ static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
/*
* csiphy_enum_frame_size - Handle frame size enumeration
* @sd: CSIPHY V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fse: pointer to v4l2_subdev_frame_size_enum structure
* return -EINVAL or zero on success
*/
@@ -451,7 +450,7 @@ static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
/*
* csiphy_get_format - Handle get format by pads subdev method
* @sd: CSIPHY V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
@@ -475,7 +474,7 @@ static int csiphy_get_format(struct v4l2_subdev *sd,
/*
* csiphy_set_format - Handle set format by pads subdev method
* @sd: CSIPHY V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index be9d2f0a1..a12dcc7ff 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -270,7 +270,7 @@ static int ispif_vfe_reset(struct ispif_device *ispif, u8 vfe_id)
unsigned long time;
u32 val;
- if (vfe_id > camss->res->vfe_num - 1) {
+ if (vfe_id >= camss->res->vfe_num) {
dev_err(camss->dev,
"Error: asked reset for invalid VFE%d\n", vfe_id);
return -ENOENT;
@@ -866,7 +866,7 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
/*
* __ispif_get_format - Get pointer to format structure
* @ispif: ISPIF line
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad from which format is requested
* @which: TRY or ACTIVE format
*
@@ -879,8 +879,7 @@ __ispif_get_format(struct ispif_line *line,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&line->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &line->fmt[pad];
}
@@ -888,7 +887,7 @@ __ispif_get_format(struct ispif_line *line,
/*
* ispif_try_format - Handle try format by pad subdev method
* @ispif: ISPIF line
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad on which format is requested
* @fmt: pointer to v4l2 format structure
* @which: wanted subdev format
@@ -936,7 +935,7 @@ static void ispif_try_format(struct ispif_line *line,
/*
* ispif_enum_mbus_code - Handle pixel format enumeration
* @sd: ISPIF V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code: pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -969,7 +968,7 @@ static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
/*
* ispif_enum_frame_size - Handle frame size enumeration
* @sd: ISPIF V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fse: pointer to v4l2_subdev_frame_size_enum structure
* return -EINVAL or zero on success
*/
@@ -1006,7 +1005,7 @@ static int ispif_enum_frame_size(struct v4l2_subdev *sd,
/*
* ispif_get_format - Handle get format by pads subdev method
* @sd: ISPIF V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
@@ -1030,7 +1029,7 @@ static int ispif_get_format(struct v4l2_subdev *sd,
/*
* ispif_set_format - Handle set format by pads subdev method
* @sd: ISPIF V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
index 0b211fed1..795ac3815 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
@@ -628,42 +628,6 @@ out_unlock:
}
/*
- * vfe_pm_domain_off - Disable power domains specific to this VFE.
- * @vfe: VFE Device
- */
-static void vfe_pm_domain_off(struct vfe_device *vfe)
-{
- struct camss *camss = vfe->camss;
-
- if (vfe->id >= camss->res->vfe_num)
- return;
-
- device_link_del(camss->genpd_link[vfe->id]);
-}
-
-/*
- * vfe_pm_domain_on - Enable power domains specific to this VFE.
- * @vfe: VFE Device
- */
-static int vfe_pm_domain_on(struct vfe_device *vfe)
-{
- struct camss *camss = vfe->camss;
- enum vfe_line_id id = vfe->id;
-
- if (id >= camss->res->vfe_num)
- return 0;
-
- camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
- DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_RPM_ACTIVE);
- if (!camss->genpd_link[id])
- return -EINVAL;
-
- return 0;
-}
-
-/*
* vfe_queue_buffer - Add empty buffer
* @vid: Video device structure
* @buf: Buffer to be enqueued
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index 2911e4126..ef6b34c91 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -936,7 +936,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
* vfe_pm_domain_off - Disable power domains specific to this VFE.
* @vfe: VFE Device
*/
-static void vfe_pm_domain_off(struct vfe_device *vfe)
+static void vfe_4_1_pm_domain_off(struct vfe_device *vfe)
{
/* nop */
}
@@ -945,7 +945,7 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
* vfe_pm_domain_on - Enable power domains specific to this VFE.
* @vfe: VFE Device
*/
-static int vfe_pm_domain_on(struct vfe_device *vfe)
+static int vfe_4_1_pm_domain_on(struct vfe_device *vfe)
{
return 0;
}
@@ -999,8 +999,8 @@ const struct vfe_hw_ops vfe_ops_4_1 = {
.hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
- .pm_domain_off = vfe_pm_domain_off,
- .pm_domain_on = vfe_pm_domain_on,
+ .pm_domain_off = vfe_4_1_pm_domain_off,
+ .pm_domain_on = vfe_4_1_pm_domain_on,
.reg_update_clear = vfe_reg_update_clear,
.reg_update = vfe_reg_update,
.subdev_init = vfe_subdev_init,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index b65ed0fef..7655d22a9 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -1103,42 +1103,6 @@ static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
}
-/*
- * vfe_pm_domain_off - Disable power domains specific to this VFE.
- * @vfe: VFE Device
- */
-static void vfe_pm_domain_off(struct vfe_device *vfe)
-{
- struct camss *camss;
-
- if (!vfe)
- return;
-
- camss = vfe->camss;
-
- device_link_del(camss->genpd_link[vfe->id]);
-}
-
-/*
- * vfe_pm_domain_on - Enable power domains specific to this VFE.
- * @vfe: VFE Device
- */
-static int vfe_pm_domain_on(struct vfe_device *vfe)
-{
- struct camss *camss = vfe->camss;
- enum vfe_line_id id = vfe->id;
-
- camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id], DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
-
- if (!camss->genpd_link[id]) {
- dev_err(vfe->camss->dev, "Failed to add VFE#%d to power domain\n", id);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void vfe_violation_read(struct vfe_device *vfe)
{
u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
index 7b3805177..f52fa30f3 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
@@ -1093,37 +1093,6 @@ static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
}
-/*
- * vfe_pm_domain_off - Disable power domains specific to this VFE.
- * @vfe: VFE Device
- */
-static void vfe_pm_domain_off(struct vfe_device *vfe)
-{
- struct camss *camss = vfe->camss;
-
- device_link_del(camss->genpd_link[vfe->id]);
-}
-
-/*
- * vfe_pm_domain_on - Enable power domains specific to this VFE.
- * @vfe: VFE Device
- */
-static int vfe_pm_domain_on(struct vfe_device *vfe)
-{
- struct camss *camss = vfe->camss;
- enum vfe_line_id id = vfe->id;
-
- camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id], DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
-
- if (!camss->genpd_link[id]) {
- dev_err(vfe->camss->dev, "Failed to add VFE#%d to power domain\n", id);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void vfe_violation_read(struct vfe_device *vfe)
{
u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
index f2368b77f..dc2735476 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
@@ -15,31 +15,28 @@
#include "camss.h"
#include "camss-vfe.h"
-/* VFE 2/3 are lite and have a different register layout */
-#define IS_LITE (vfe->id >= 2 ? 1 : 0)
-
#define VFE_HW_VERSION (0x00)
-#define VFE_GLOBAL_RESET_CMD (IS_LITE ? 0x0c : 0x1c)
-#define GLOBAL_RESET_HW_AND_REG (IS_LITE ? BIT(1) : BIT(0))
+#define VFE_GLOBAL_RESET_CMD (vfe_is_lite(vfe) ? 0x0c : 0x1c)
+#define GLOBAL_RESET_HW_AND_REG (vfe_is_lite(vfe) ? BIT(1) : BIT(0))
-#define VFE_REG_UPDATE_CMD (IS_LITE ? 0x20 : 0x34)
+#define VFE_REG_UPDATE_CMD (vfe_is_lite(vfe) ? 0x20 : 0x34)
static inline int reg_update_rdi(struct vfe_device *vfe, int n)
{
- return IS_LITE ? BIT(n) : BIT(1 + (n));
+ return vfe_is_lite(vfe) ? BIT(n) : BIT(1 + (n));
}
#define REG_UPDATE_RDI reg_update_rdi
-#define VFE_IRQ_CMD (IS_LITE ? 0x24 : 0x38)
+#define VFE_IRQ_CMD (vfe_is_lite(vfe) ? 0x24 : 0x38)
#define IRQ_CMD_GLOBAL_CLEAR BIT(0)
-#define VFE_IRQ_MASK(n) ((IS_LITE ? 0x28 : 0x3c) + (n) * 4)
-#define IRQ_MASK_0_RESET_ACK (IS_LITE ? BIT(17) : BIT(0))
-#define IRQ_MASK_0_BUS_TOP_IRQ (IS_LITE ? BIT(4) : BIT(7))
-#define VFE_IRQ_CLEAR(n) ((IS_LITE ? 0x34 : 0x48) + (n) * 4)
-#define VFE_IRQ_STATUS(n) ((IS_LITE ? 0x40 : 0x54) + (n) * 4)
+#define VFE_IRQ_MASK(n) ((vfe_is_lite(vfe) ? 0x28 : 0x3c) + (n) * 4)
+#define IRQ_MASK_0_RESET_ACK (vfe_is_lite(vfe) ? BIT(17) : BIT(0))
+#define IRQ_MASK_0_BUS_TOP_IRQ (vfe_is_lite(vfe) ? BIT(4) : BIT(7))
+#define VFE_IRQ_CLEAR(n) ((vfe_is_lite(vfe) ? 0x34 : 0x48) + (n) * 4)
+#define VFE_IRQ_STATUS(n) ((vfe_is_lite(vfe) ? 0x40 : 0x54) + (n) * 4)
-#define BUS_REG_BASE (IS_LITE ? 0x1a00 : 0xaa00)
+#define BUS_REG_BASE (vfe_is_lite(vfe) ? 0x1a00 : 0xaa00)
#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08)
#define WM_CGC_OVERRIDE_ALL (0x3FFFFFF)
@@ -49,13 +46,13 @@ static inline int reg_update_rdi(struct vfe_device *vfe, int n)
#define VFE_BUS_IRQ_MASK(n) (BUS_REG_BASE + 0x18 + (n) * 4)
static inline int bus_irq_mask_0_rdi_rup(struct vfe_device *vfe, int n)
{
- return IS_LITE ? BIT(n) : BIT(3 + (n));
+ return vfe_is_lite(vfe) ? BIT(n) : BIT(3 + (n));
}
#define BUS_IRQ_MASK_0_RDI_RUP bus_irq_mask_0_rdi_rup
static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
{
- return IS_LITE ? BIT(4 + (n)) : BIT(6 + (n));
+ return vfe_is_lite(vfe) ? BIT(4 + (n)) : BIT(6 + (n));
}
#define BUS_IRQ_MASK_0_COMP_DONE bus_irq_mask_0_comp_done
@@ -90,8 +87,8 @@ static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
/* for titan 480, each bus client is hardcoded to a specific path
* and each bus client is part of a hardcoded "comp group"
*/
-#define RDI_WM(n) ((IS_LITE ? 0 : 23) + (n))
-#define RDI_COMP_GROUP(n) ((IS_LITE ? 0 : 11) + (n))
+#define RDI_WM(n) ((vfe_is_lite(vfe) ? 0 : 23) + (n))
+#define RDI_COMP_GROUP(n) ((vfe_is_lite(vfe) ? 0 : 11) + (n))
#define MAX_VFE_OUTPUT_LINES 4
@@ -453,42 +450,6 @@ out_unlock:
}
/*
- * vfe_pm_domain_off - Disable power domains specific to this VFE.
- * @vfe: VFE Device
- */
-static void vfe_pm_domain_off(struct vfe_device *vfe)
-{
- struct camss *camss = vfe->camss;
-
- if (vfe->id >= camss->res->vfe_num)
- return;
-
- device_link_del(camss->genpd_link[vfe->id]);
-}
-
-/*
- * vfe_pm_domain_on - Enable power domains specific to this VFE.
- * @vfe: VFE Device
- */
-static int vfe_pm_domain_on(struct vfe_device *vfe)
-{
- struct camss *camss = vfe->camss;
- enum vfe_line_id id = vfe->id;
-
- if (id >= camss->res->vfe_num)
- return 0;
-
- camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
- DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_RPM_ACTIVE);
- if (!camss->genpd_link[id])
- return -EINVAL;
-
- return 0;
-}
-
-/*
* vfe_queue_buffer - Add empty buffer
* @vid: Video device structure
* @buf: Buffer to be enqueued
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 4839e2ced..2062be668 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock_types.h>
#include <linux/spinlock.h>
@@ -474,6 +475,40 @@ void vfe_isr_reset_ack(struct vfe_device *vfe)
complete(&vfe->reset_complete);
}
+/*
+ * vfe_pm_domain_off - Disable power domains specific to this VFE.
+ * @vfe: VFE Device
+ */
+void vfe_pm_domain_off(struct vfe_device *vfe)
+{
+ if (!vfe->genpd)
+ return;
+
+ device_link_del(vfe->genpd_link);
+ vfe->genpd_link = NULL;
+}
+
+/*
+ * vfe_pm_domain_on - Enable power domains specific to this VFE.
+ * @vfe: VFE Device
+ */
+int vfe_pm_domain_on(struct vfe_device *vfe)
+{
+ struct camss *camss = vfe->camss;
+
+ if (!vfe->genpd)
+ return 0;
+
+ vfe->genpd_link = device_link_add(camss->dev, vfe->genpd,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!vfe->genpd_link)
+ return -EINVAL;
+
+ return 0;
+}
+
static int vfe_match_clock_names(struct vfe_device *vfe,
struct camss_clock *clock)
{
@@ -815,7 +850,7 @@ static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
/*
* __vfe_get_format - Get pointer to format structure
* @line: VFE line
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad from which format is requested
* @which: TRY or ACTIVE format
*
@@ -828,8 +863,7 @@ __vfe_get_format(struct vfe_line *line,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&line->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &line->fmt[pad];
}
@@ -837,7 +871,7 @@ __vfe_get_format(struct vfe_line *line,
/*
* __vfe_get_compose - Get pointer to compose selection structure
* @line: VFE line
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @which: TRY or ACTIVE format
*
* Return pointer to TRY or ACTIVE compose rectangle structure
@@ -848,8 +882,8 @@ __vfe_get_compose(struct vfe_line *line,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_compose(&line->subdev, sd_state,
- MSM_VFE_PAD_SINK);
+ return v4l2_subdev_state_get_compose(sd_state,
+ MSM_VFE_PAD_SINK);
return &line->compose;
}
@@ -857,7 +891,7 @@ __vfe_get_compose(struct vfe_line *line,
/*
* __vfe_get_crop - Get pointer to crop selection structure
* @line: VFE line
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @which: TRY or ACTIVE format
*
* Return pointer to TRY or ACTIVE crop rectangle structure
@@ -868,8 +902,7 @@ __vfe_get_crop(struct vfe_line *line,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&line->subdev, sd_state,
- MSM_VFE_PAD_SRC);
+ return v4l2_subdev_state_get_crop(sd_state, MSM_VFE_PAD_SRC);
return &line->crop;
}
@@ -877,7 +910,7 @@ __vfe_get_crop(struct vfe_line *line,
/*
* vfe_try_format - Handle try format by pad subdev method
* @line: VFE line
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad on which format is requested
* @fmt: pointer to v4l2 format structure
* @which: wanted subdev format
@@ -938,7 +971,7 @@ static void vfe_try_format(struct vfe_line *line,
/*
* vfe_try_compose - Handle try compose selection by pad subdev method
* @line: VFE line
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @rect: pointer to v4l2 rect structure
* @which: wanted subdev format
*/
@@ -977,7 +1010,7 @@ static void vfe_try_compose(struct vfe_line *line,
/*
* vfe_try_crop - Handle try crop selection by pad subdev method
* @line: VFE line
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @rect: pointer to v4l2 rect structure
* @which: wanted subdev format
*/
@@ -1020,7 +1053,7 @@ static void vfe_try_crop(struct vfe_line *line,
/*
* vfe_enum_mbus_code - Handle pixel format enumeration
* @sd: VFE V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*
* return -EINVAL or zero on success
@@ -1054,7 +1087,7 @@ static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
/*
* vfe_enum_frame_size - Handle frame size enumeration
* @sd: VFE V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fse: pointer to v4l2_subdev_frame_size_enum structure
*
* Return -EINVAL or zero on success
@@ -1092,7 +1125,7 @@ static int vfe_enum_frame_size(struct v4l2_subdev *sd,
/*
* vfe_get_format - Handle get format by pads subdev method
* @sd: VFE V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
@@ -1120,7 +1153,7 @@ static int vfe_set_selection(struct v4l2_subdev *sd,
/*
* vfe_set_format - Handle set format by pads subdev method
* @sd: VFE V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
@@ -1171,7 +1204,7 @@ static int vfe_set_format(struct v4l2_subdev *sd,
/*
* vfe_get_selection - Handle get selection by pads subdev method
* @sd: VFE V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @sel: pointer to v4l2 subdev selection structure
*
* Return -EINVAL or zero on success
@@ -1241,7 +1274,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
/*
* vfe_set_selection - Handle set selection by pads subdev method
* @sd: VFE V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @sel: pointer to v4l2 subdev selection structure
*
* Return -EINVAL or zero on success
@@ -1347,6 +1380,34 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
if (!res->line_num)
return -EINVAL;
+ /* Power domain */
+
+ if (res->pd_name) {
+ vfe->genpd = dev_pm_domain_attach_by_name(camss->dev,
+ res->pd_name);
+ if (IS_ERR(vfe->genpd)) {
+ ret = PTR_ERR(vfe->genpd);
+ return ret;
+ }
+ }
+
+ if (!vfe->genpd && res->has_pd) {
+ /*
+ * Legacy magic index.
+ * Requires
+ * power-domain = <VFE_X>,
+ * <VFE_Y>,
+ * <TITAN_TOP>
+ * id must correspondng to the index of the VFE which must
+ * come before the TOP GDSC. VFE Lite has no individually
+ * collapasible domain which is why id < vfe_num is a valid
+ * check.
+ */
+ vfe->genpd = dev_pm_domain_attach_by_id(camss->dev, id);
+ if (IS_ERR(vfe->genpd))
+ return PTR_ERR(vfe->genpd);
+ }
+
vfe->line_num = res->line_num;
vfe->ops->subdev_init(dev, vfe);
@@ -1470,6 +1531,19 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
}
/*
+ * msm_vfe_genpd_cleanup - Cleanup VFE genpd linkages
+ * @vfe: VFE device
+ */
+void msm_vfe_genpd_cleanup(struct vfe_device *vfe)
+{
+ if (vfe->genpd_link)
+ device_link_del(vfe->genpd_link);
+
+ if (vfe->genpd)
+ dev_pm_domain_detach(vfe->genpd, true);
+}
+
+/*
* vfe_link_setup - Setup VFE connections
* @entity: Pointer to media entity structure
* @local: Pointer to local pad
@@ -1663,3 +1737,8 @@ void msm_vfe_unregister_entities(struct vfe_device *vfe)
media_entity_cleanup(&sd->entity);
}
}
+
+bool vfe_is_lite(struct vfe_device *vfe)
+{
+ return vfe->camss->res->vfe_res[vfe->id].is_lite;
+}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 09baded0d..0572c9b08 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -150,6 +150,8 @@ struct vfe_device {
const struct vfe_hw_ops_gen1 *ops_gen1;
struct vfe_isr_ops isr_ops;
struct camss_video_ops video_ops;
+ struct device *genpd;
+ struct device_link *genpd_link;
};
struct camss_subdev_resources;
@@ -157,6 +159,8 @@ struct camss_subdev_resources;
int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
const struct camss_subdev_resources *res, u8 id);
+void msm_vfe_genpd_cleanup(struct vfe_device *vfe);
+
int msm_vfe_register_entities(struct vfe_device *vfe,
struct v4l2_device *v4l2_dev);
@@ -201,6 +205,18 @@ int vfe_reset(struct vfe_device *vfe);
*/
int vfe_disable(struct vfe_line *line);
+/*
+ * vfe_pm_domain_off - Disable power domains specific to this VFE.
+ * @vfe: VFE Device
+ */
+void vfe_pm_domain_off(struct vfe_device *vfe);
+
+/*
+ * vfe_pm_domain_on - Enable power domains specific to this VFE.
+ * @vfe: VFE Device
+ */
+int vfe_pm_domain_on(struct vfe_device *vfe);
+
extern const struct vfe_hw_ops vfe_ops_4_1;
extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
@@ -210,4 +226,14 @@ extern const struct vfe_hw_ops vfe_ops_480;
int vfe_get(struct vfe_device *vfe);
void vfe_put(struct vfe_device *vfe);
+/*
+ * vfe_is_lite - Return if VFE is VFE lite.
+ * @vfe: VFE Device
+ *
+ * Some VFE lites have a different register layout.
+ *
+ * Return whether VFE is VFE lite
+ */
+bool vfe_is_lite(struct vfe_device *vfe);
+
#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 8e78dd8d5..58f4be660 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -278,6 +278,7 @@ static const struct camss_subdev_resources vfe_res_8x96[] = {
.reg = { "vfe0" },
.interrupt = { "vfe0" },
.line_num = 3,
+ .has_pd = true,
.ops = &vfe_ops_4_7
},
@@ -298,6 +299,7 @@ static const struct camss_subdev_resources vfe_res_8x96[] = {
.reg = { "vfe1" },
.interrupt = { "vfe1" },
.line_num = 3,
+ .has_pd = true,
.ops = &vfe_ops_4_7
}
};
@@ -468,6 +470,7 @@ static const struct camss_subdev_resources vfe_res_660[] = {
.reg = { "vfe0" },
.interrupt = { "vfe0" },
.line_num = 3,
+ .has_pd = true,
.ops = &vfe_ops_4_8
},
@@ -491,6 +494,7 @@ static const struct camss_subdev_resources vfe_res_660[] = {
.reg = { "vfe1" },
.interrupt = { "vfe1" },
.line_num = 3,
+ .has_pd = true,
.ops = &vfe_ops_4_8
}
};
@@ -634,6 +638,7 @@ static const struct camss_subdev_resources csid_res_845[] = {
{ 384000000 } },
.reg = { "csid2" },
.interrupt = { "csid2" },
+ .is_lite = true,
.ops = &csid_ops_gen2
}
};
@@ -658,6 +663,7 @@ static const struct camss_subdev_resources vfe_res_845[] = {
.reg = { "vfe0" },
.interrupt = { "vfe0" },
.line_num = 4,
+ .has_pd = true,
.ops = &vfe_ops_170
},
@@ -680,6 +686,7 @@ static const struct camss_subdev_resources vfe_res_845[] = {
.reg = { "vfe1" },
.interrupt = { "vfe1" },
.line_num = 4,
+ .has_pd = true,
.ops = &vfe_ops_170
},
@@ -700,6 +707,7 @@ static const struct camss_subdev_resources vfe_res_845[] = {
{ 384000000 } },
.reg = { "vfe_lite" },
.interrupt = { "vfe_lite" },
+ .is_lite = true,
.line_num = 4,
.ops = &vfe_ops_170
}
@@ -805,6 +813,7 @@ static const struct camss_subdev_resources csid_res_8250[] = {
{ 0 } },
.reg = { "csid2" },
.interrupt = { "csid2" },
+ .is_lite = true,
.ops = &csid_ops_gen2
},
/* CSID3 */
@@ -817,6 +826,7 @@ static const struct camss_subdev_resources csid_res_8250[] = {
{ 0 } },
.reg = { "csid3" },
.interrupt = { "csid3" },
+ .is_lite = true,
.ops = &csid_ops_gen2
}
};
@@ -839,7 +849,9 @@ static const struct camss_subdev_resources vfe_res_8250[] = {
{ 0 } },
.reg = { "vfe0" },
.interrupt = { "vfe0" },
+ .pd_name = "ife0",
.line_num = 3,
+ .has_pd = true,
.ops = &vfe_ops_480
},
/* VFE1 */
@@ -859,7 +871,9 @@ static const struct camss_subdev_resources vfe_res_8250[] = {
{ 0 } },
.reg = { "vfe1" },
.interrupt = { "vfe1" },
+ .pd_name = "ife1",
.line_num = 3,
+ .has_pd = true,
.ops = &vfe_ops_480
},
/* VFE2 (lite) */
@@ -878,6 +892,7 @@ static const struct camss_subdev_resources vfe_res_8250[] = {
{ 0 } },
.reg = { "vfe_lite0" },
.interrupt = { "vfe_lite0" },
+ .is_lite = true,
.line_num = 4,
.ops = &vfe_ops_480
},
@@ -897,6 +912,7 @@ static const struct camss_subdev_resources vfe_res_8250[] = {
{ 0 } },
.reg = { "vfe_lite1" },
.interrupt = { "vfe_lite1" },
+ .is_lite = true,
.line_num = 4,
.ops = &vfe_ops_480
},
@@ -1196,7 +1212,7 @@ static int camss_init_subdevices(struct camss *camss)
}
/* note: SM8250 requires VFE to be initialized before CSID */
- for (i = 0; i < camss->vfe_total_num; i++) {
+ for (i = 0; i < camss->res->vfe_num; i++) {
ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
&res->vfe_res[i], i);
if (ret < 0) {
@@ -1268,7 +1284,7 @@ static int camss_register_entities(struct camss *camss)
goto err_reg_ispif;
}
- for (i = 0; i < camss->vfe_total_num; i++) {
+ for (i = 0; i < camss->res->vfe_num; i++) {
ret = msm_vfe_register_entities(&camss->vfe[i],
&camss->v4l2_dev);
if (ret < 0) {
@@ -1340,7 +1356,7 @@ static int camss_register_entities(struct camss *camss)
}
} else {
for (i = 0; i < camss->res->csid_num; i++)
- for (k = 0; k < camss->vfe_total_num; k++)
+ for (k = 0; k < camss->res->vfe_num; k++)
for (j = 0; j < camss->vfe[k].line_num; j++) {
struct v4l2_subdev *csid = &camss->csid[i].subdev;
struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev;
@@ -1364,7 +1380,7 @@ static int camss_register_entities(struct camss *camss)
return 0;
err_link:
- i = camss->vfe_total_num;
+ i = camss->res->vfe_num;
err_reg_vfe:
for (i--; i >= 0; i--)
msm_vfe_unregister_entities(&camss->vfe[i]);
@@ -1403,7 +1419,7 @@ static void camss_unregister_entities(struct camss *camss)
msm_ispif_unregister_entities(camss->ispif);
- for (i = 0; i < camss->vfe_total_num; i++)
+ for (i = 0; i < camss->res->vfe_num; i++)
msm_vfe_unregister_entities(&camss->vfe[i]);
}
@@ -1478,7 +1494,9 @@ static const struct media_device_ops camss_media_ops = {
static int camss_configure_pd(struct camss *camss)
{
+ const struct camss_resources *res = camss->res;
struct device *dev = camss->dev;
+ int vfepd_num;
int i;
int ret;
@@ -1498,45 +1516,60 @@ static int camss_configure_pd(struct camss *camss)
if (camss->genpd_num == 1)
return 0;
- camss->genpd = devm_kmalloc_array(dev, camss->genpd_num,
- sizeof(*camss->genpd), GFP_KERNEL);
- if (!camss->genpd)
- return -ENOMEM;
+ /* count the # of VFEs which have flagged power-domain */
+ for (vfepd_num = i = 0; i < camss->res->vfe_num; i++) {
+ if (res->vfe_res[i].has_pd)
+ vfepd_num++;
+ }
- camss->genpd_link = devm_kmalloc_array(dev, camss->genpd_num,
- sizeof(*camss->genpd_link),
- GFP_KERNEL);
- if (!camss->genpd_link)
- return -ENOMEM;
+ /*
+ * If the number of power-domains is greater than the number of VFEs
+ * then the additional power-domain is for the entire CAMSS block.
+ */
+ if (!(camss->genpd_num > vfepd_num))
+ return 0;
/*
- * VFE power domains are in the beginning of the list, and while all
- * power domains should be attached, only if TITAN_TOP power domain is
- * found in the list, it should be linked over here.
+ * If a power-domain name is defined try to use it.
+ * It is possible we are running a new kernel with an old dtb so
+ * fallback to indexes even if a pd_name is defined but not found.
*/
- for (i = 0; i < camss->genpd_num; i++) {
- camss->genpd[i] = dev_pm_domain_attach_by_id(camss->dev, i);
- if (IS_ERR(camss->genpd[i])) {
- ret = PTR_ERR(camss->genpd[i]);
+ if (camss->res->pd_name) {
+ camss->genpd = dev_pm_domain_attach_by_name(camss->dev,
+ camss->res->pd_name);
+ if (IS_ERR(camss->genpd)) {
+ ret = PTR_ERR(camss->genpd);
goto fail_pm;
}
}
- if (i > camss->res->vfe_num) {
- camss->genpd_link[i - 1] = device_link_add(camss->dev, camss->genpd[i - 1],
- DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
- DL_FLAG_RPM_ACTIVE);
- if (!camss->genpd_link[i - 1]) {
- ret = -EINVAL;
- goto fail_pm;
- }
+ if (!camss->genpd) {
+ /*
+ * Legacy magic index. TITAN_TOP GDSC must be the last
+ * item in the power-domain list.
+ */
+ camss->genpd = dev_pm_domain_attach_by_id(camss->dev,
+ camss->genpd_num - 1);
+ }
+ if (IS_ERR_OR_NULL(camss->genpd)) {
+ if (!camss->genpd)
+ ret = -ENODEV;
+ else
+ ret = PTR_ERR(camss->genpd);
+ goto fail_pm;
+ }
+ camss->genpd_link = device_link_add(camss->dev, camss->genpd,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!camss->genpd_link) {
+ ret = -EINVAL;
+ goto fail_pm;
}
return 0;
fail_pm:
- for (--i ; i >= 0; i--)
- dev_pm_domain_detach(camss->genpd[i], true);
+ dev_pm_domain_detach(camss->genpd, true);
return ret;
}
@@ -1558,18 +1591,25 @@ static int camss_icc_get(struct camss *camss)
return 0;
}
-static void camss_genpd_cleanup(struct camss *camss)
+static void camss_genpd_subdevice_cleanup(struct camss *camss)
{
int i;
+ for (i = 0; i < camss->res->vfe_num; i++)
+ msm_vfe_genpd_cleanup(&camss->vfe[i]);
+}
+
+static void camss_genpd_cleanup(struct camss *camss)
+{
if (camss->genpd_num == 1)
return;
- if (camss->genpd_num > camss->res->vfe_num)
- device_link_del(camss->genpd_link[camss->genpd_num - 1]);
+ camss_genpd_subdevice_cleanup(camss);
+
+ if (camss->genpd_link)
+ device_link_del(camss->genpd_link);
- for (i = 0; i < camss->genpd_num; i++)
- dev_pm_domain_detach(camss->genpd[i], true);
+ dev_pm_domain_detach(camss->genpd, true);
}
/*
@@ -1612,8 +1652,7 @@ static int camss_probe(struct platform_device *pdev)
return -ENOMEM;
}
- camss->vfe_total_num = camss->res->vfe_num + camss->res->vfe_lite_num;
- camss->vfe = devm_kcalloc(dev, camss->vfe_total_num,
+ camss->vfe = devm_kcalloc(dev, camss->res->vfe_num,
sizeof(*camss->vfe), GFP_KERNEL);
if (!camss->vfe)
return -ENOMEM;
@@ -1771,12 +1810,12 @@ static const struct camss_resources sdm845_resources = {
.vfe_res = vfe_res_845,
.csiphy_num = ARRAY_SIZE(csiphy_res_845),
.csid_num = ARRAY_SIZE(csid_res_845),
- .vfe_num = 2,
- .vfe_lite_num = 1,
+ .vfe_num = ARRAY_SIZE(vfe_res_845),
};
static const struct camss_resources sm8250_resources = {
.version = CAMSS_8250,
+ .pd_name = "top",
.csiphy_res = csiphy_res_8250,
.csid_res = csid_res_8250,
.vfe_res = vfe_res_8250,
@@ -1784,8 +1823,7 @@ static const struct camss_resources sm8250_resources = {
.icc_path_num = ARRAY_SIZE(icc_res_sm8250),
.csiphy_num = ARRAY_SIZE(csiphy_res_8250),
.csid_num = ARRAY_SIZE(csid_res_8250),
- .vfe_num = 2,
- .vfe_lite_num = 2,
+ .vfe_num = ARRAY_SIZE(vfe_res_8250),
};
static const struct of_device_id camss_dt_match[] = {
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 8acad7321..a0c2dcc77 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -48,7 +48,10 @@ struct camss_subdev_resources {
u32 clock_rate[CAMSS_RES_MAX][CAMSS_RES_MAX];
char *reg[CAMSS_RES_MAX];
char *interrupt[CAMSS_RES_MAX];
+ char *pd_name;
u8 line_num;
+ bool has_pd;
+ bool is_lite;
const void *ops;
};
@@ -83,6 +86,7 @@ enum icc_count {
struct camss_resources {
enum camss_version version;
+ const char *pd_name;
const struct camss_subdev_resources *csiphy_res;
const struct camss_subdev_resources *csid_res;
const struct camss_subdev_resources *ispif_res;
@@ -92,7 +96,6 @@ struct camss_resources {
const unsigned int csiphy_num;
const unsigned int csid_num;
const unsigned int vfe_num;
- const unsigned int vfe_lite_num;
};
struct camss {
@@ -106,11 +109,10 @@ struct camss {
struct vfe_device *vfe;
atomic_t ref_count;
int genpd_num;
- struct device **genpd;
- struct device_link **genpd_link;
+ struct device *genpd;
+ struct device_link *genpd_link;
struct icc_path *icc_path[ICC_SM8250_COUNT];
const struct camss_resources *res;
- unsigned int vfe_total_num;
};
struct camss_camera_interface {
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 9cffe9755..a712dd4f0 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -881,6 +881,10 @@ static const struct venus_resources sc7280_res = {
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
+ .cp_start = 0,
+ .cp_size = 0x25800000,
+ .cp_nonpixel_start = 0x1000000,
+ .cp_nonpixel_size = 0x24800000,
.fwname = "qcom/vpu-2.0/venus.mbn",
};
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index dbf305cec..29130a944 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -1641,7 +1641,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->drv_priv = inst;
src_vq->buf_struct_size = sizeof(struct venus_buffer);
src_vq->allow_zero_bytesused = 1;
- src_vq->min_buffers_needed = 0;
+ src_vq->min_queued_buffers = 0;
src_vq->dev = inst->core->dev;
src_vq->lock = &inst->ctx_q_lock;
ret = vb2_queue_init(src_vq);
@@ -1656,7 +1656,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->drv_priv = inst;
dst_vq->buf_struct_size = sizeof(struct venus_buffer);
dst_vq->allow_zero_bytesused = 1;
- dst_vq->min_buffers_needed = 0;
+ dst_vq->min_queued_buffers = 0;
dst_vq->dev = inst->core->dev;
dst_vq->lock = &inst->ctx_q_lock;
return vb2_queue_init(dst_vq);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 44b13696c..3ec2fb8d9 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -1398,7 +1398,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->drv_priv = inst;
src_vq->buf_struct_size = sizeof(struct venus_buffer);
src_vq->allow_zero_bytesused = 1;
- src_vq->min_buffers_needed = 1;
+ src_vq->min_queued_buffers = 1;
src_vq->dev = inst->core->dev;
src_vq->lock = &inst->ctx_q_lock;
if (inst->core->res->hfi_version == HFI_VERSION_1XX)
@@ -1415,7 +1415,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->drv_priv = inst;
dst_vq->buf_struct_size = sizeof(struct venus_buffer);
dst_vq->allow_zero_bytesused = 1;
- dst_vq->min_buffers_needed = 1;
+ dst_vq->min_queued_buffers = 1;
dst_vq->dev = inst->core->dev;
dst_vq->lock = &inst->ctx_q_lock;
return vb2_queue_init(dst_vq);
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
index 19a005d83..530d65fc5 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -282,7 +282,7 @@ static int risp_set_pad_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
isp->mf = format->format;
} else {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ framefmt = v4l2_subdev_state_get_format(sd_state, 0);
*framefmt = format->format;
}
@@ -302,7 +302,7 @@ static int risp_get_pad_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
format->format = isp->mf;
else
- format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+ format->format = *v4l2_subdev_state_get_format(sd_state, 0);
mutex_unlock(&isp->lock);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
index 66fe553a0..582d5e35d 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
@@ -1185,7 +1185,7 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
priv->mf = format->format;
} else {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ framefmt = v4l2_subdev_state_get_format(sd_state, 0);
*framefmt = format->format;
}
@@ -1205,7 +1205,7 @@ static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
format->format = priv->mf;
else
- format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+ format->format = *v4l2_subdev_state_get_format(sd_state, 0);
mutex_unlock(&priv->lock);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 2a77353f1..e2c40abc6 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -1559,7 +1559,7 @@ int rvin_dma_register(struct rvin_dev *vin, int irq)
q->ops = &rvin_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 4;
+ q->min_queued_buffers = 4;
q->dev = vin->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/renesas/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c
index 292c5bf9e..f21d05054 100644
--- a/drivers/media/platform/renesas/rcar_drif.c
+++ b/drivers/media/platform/renesas/rcar_drif.c
@@ -424,10 +424,11 @@ static int rcar_drif_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
/* Need at least 16 buffers */
- if (vq->num_buffers + *num_buffers < 16)
- *num_buffers = 16 - vq->num_buffers;
+ if (q_num_bufs + *num_buffers < 16)
+ *num_buffers = 16 - q_num_bufs;
*num_planes = 1;
sizes[0] = PAGE_ALIGN(sdr->fmt->buffersize);
diff --git a/drivers/media/platform/renesas/renesas-ceu.c b/drivers/media/platform/renesas/renesas-ceu.c
index 2562b30ac..167760276 100644
--- a/drivers/media/platform/renesas/renesas-ceu.c
+++ b/drivers/media/platform/renesas/renesas-ceu.c
@@ -1399,7 +1399,7 @@ static int ceu_notify_complete(struct v4l2_async_notifier *notifier)
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct ceu_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->lock = &ceudev->mlock;
q->dev = ceudev->v4l2_dev.dev;
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index ad2bd7103..d20f4eff9 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -250,7 +250,7 @@ static int rzg2l_csi2_calc_mbps(struct rzg2l_csi2 *csi2)
}
state = v4l2_subdev_lock_and_get_active_state(&csi2->subdev);
- fmt = v4l2_subdev_get_pad_format(&csi2->subdev, state, RZG2L_CSI2_SINK);
+ fmt = v4l2_subdev_state_get_format(state, RZG2L_CSI2_SINK);
format = rzg2l_csi2_code_to_fmt(fmt->code);
v4l2_subdev_unlock_state(state);
@@ -500,13 +500,13 @@ static int rzg2l_csi2_set_format(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *src_format;
struct v4l2_mbus_framefmt *sink_format;
- src_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CSI2_SOURCE);
+ src_format = v4l2_subdev_state_get_format(state, RZG2L_CSI2_SOURCE);
if (fmt->pad == RZG2L_CSI2_SOURCE) {
fmt->format = *src_format;
return 0;
}
- sink_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CSI2_SINK);
+ sink_format = v4l2_subdev_state_get_format(state, RZG2L_CSI2_SINK);
if (!rzg2l_csi2_code_to_fmt(fmt->format.code))
sink_format->code = rzg2l_csi2_formats[0].code;
@@ -530,8 +530,8 @@ static int rzg2l_csi2_set_format(struct v4l2_subdev *sd,
return 0;
}
-static int rzg2l_csi2_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int rzg2l_csi2_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { .pad = RZG2L_CSI2_SINK, };
@@ -582,7 +582,6 @@ static const struct v4l2_subdev_video_ops rzg2l_csi2_video_ops = {
static const struct v4l2_subdev_pad_ops rzg2l_csi2_pad_ops = {
.enum_mbus_code = rzg2l_csi2_enum_mbus_code,
- .init_cfg = rzg2l_csi2_init_config,
.enum_frame_size = rzg2l_csi2_enum_frame_size,
.set_fmt = rzg2l_csi2_set_format,
.get_fmt = v4l2_subdev_get_fmt,
@@ -593,6 +592,10 @@ static const struct v4l2_subdev_ops rzg2l_csi2_subdev_ops = {
.pad = &rzg2l_csi2_pad_ops,
};
+static const struct v4l2_subdev_internal_ops rzg2l_csi2_internal_ops = {
+ .init_state = rzg2l_csi2_init_state,
+};
+
/* -----------------------------------------------------------------------------
* Async handling and registration of subdevices and links.
*/
@@ -777,6 +780,7 @@ static int rzg2l_csi2_probe(struct platform_device *pdev)
csi2->subdev.dev = &pdev->dev;
v4l2_subdev_init(&csi2->subdev, &rzg2l_csi2_subdev_ops);
+ csi2->subdev.internal_ops = &rzg2l_csi2_internal_ops;
v4l2_set_subdevdata(&csi2->subdev, &pdev->dev);
snprintf(csi2->subdev.name, sizeof(csi2->subdev.name),
"csi-%s", dev_name(&pdev->dev));
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
index 4dcd2faff..9f351a058 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
@@ -39,7 +39,7 @@ struct v4l2_mbus_framefmt *rzg2l_cru_ip_get_src_fmt(struct rzg2l_cru_dev *cru)
struct v4l2_mbus_framefmt *fmt;
state = v4l2_subdev_lock_and_get_active_state(&cru->ip.subdev);
- fmt = v4l2_subdev_get_pad_format(&cru->ip.subdev, state, 1);
+ fmt = v4l2_subdev_state_get_format(state, 1);
v4l2_subdev_unlock_state(state);
return fmt;
@@ -108,13 +108,13 @@ static int rzg2l_cru_ip_set_format(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *src_format;
struct v4l2_mbus_framefmt *sink_format;
- src_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CRU_IP_SOURCE);
+ src_format = v4l2_subdev_state_get_format(state, RZG2L_CRU_IP_SOURCE);
if (fmt->pad == RZG2L_CRU_IP_SOURCE) {
fmt->format = *src_format;
return 0;
}
- sink_format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+ sink_format = v4l2_subdev_state_get_format(state, fmt->pad);
if (!rzg2l_cru_ip_code_to_fmt(fmt->format.code))
sink_format->code = rzg2l_cru_ip_formats[0].code;
@@ -168,8 +168,8 @@ static int rzg2l_cru_ip_enum_frame_size(struct v4l2_subdev *sd,
return 0;
}
-static int rzg2l_cru_ip_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int rzg2l_cru_ip_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_subdev_format fmt = { .pad = RZG2L_CRU_IP_SINK, };
@@ -192,7 +192,6 @@ static const struct v4l2_subdev_video_ops rzg2l_cru_ip_video_ops = {
static const struct v4l2_subdev_pad_ops rzg2l_cru_ip_pad_ops = {
.enum_mbus_code = rzg2l_cru_ip_enum_mbus_code,
.enum_frame_size = rzg2l_cru_ip_enum_frame_size,
- .init_cfg = rzg2l_cru_ip_init_config,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = rzg2l_cru_ip_set_format,
};
@@ -202,6 +201,10 @@ static const struct v4l2_subdev_ops rzg2l_cru_ip_subdev_ops = {
.pad = &rzg2l_cru_ip_pad_ops,
};
+static const struct v4l2_subdev_internal_ops rzg2l_cru_ip_internal_ops = {
+ .init_state = rzg2l_cru_ip_init_state,
+};
+
static const struct media_entity_operations rzg2l_cru_ip_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -213,6 +216,7 @@ int rzg2l_cru_ip_subdev_register(struct rzg2l_cru_dev *cru)
ip->subdev.dev = cru->dev;
v4l2_subdev_init(&ip->subdev, &rzg2l_cru_ip_subdev_ops);
+ ip->subdev.internal_ops = &rzg2l_cru_ip_internal_ops;
v4l2_set_subdevdata(&ip->subdev, cru);
snprintf(ip->subdev.name, sizeof(ip->subdev.name),
"cru-ip-%s", dev_name(cru->dev));
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index e6eedd65b..d0ffa90bc 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -767,7 +767,7 @@ int rzg2l_cru_dma_register(struct rzg2l_cru_dev *cru)
q->ops = &rzg2l_cru_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 4;
+ q->min_queued_buffers = 4;
q->dev = cru->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/renesas/sh_vou.c b/drivers/media/platform/renesas/sh_vou.c
index f792aedc9..1e74dd601 100644
--- a/drivers/media/platform/renesas/sh_vou.c
+++ b/drivers/media/platform/renesas/sh_vou.c
@@ -1297,7 +1297,7 @@ static int sh_vou_probe(struct platform_device *pdev)
q->ops = &sh_vou_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->lock = &vou_dev->fop_lock;
q->dev = &pdev->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_brx.c b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
index 89385b4ca..a8535c6e2 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_brx.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
@@ -100,7 +100,7 @@ static struct v4l2_rect *brx_get_compose(struct vsp1_brx *brx,
struct v4l2_subdev_state *sd_state,
unsigned int pad)
{
- return v4l2_subdev_get_try_compose(&brx->entity.subdev, sd_state, pad);
+ return v4l2_subdev_state_get_compose(sd_state, pad);
}
static void brx_try_format(struct vsp1_brx *brx,
@@ -136,29 +136,28 @@ static int brx_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_format *fmt)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&brx->entity.lock);
- config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
- fmt->which);
- if (!config) {
+ state = vsp1_entity_get_state(&brx->entity, sd_state, fmt->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
- brx_try_format(brx, config, fmt->pad, &fmt->format);
+ brx_try_format(brx, state, fmt->pad, &fmt->format);
- format = vsp1_entity_get_pad_format(&brx->entity, config, fmt->pad);
+ format = vsp1_entity_get_pad_format(&brx->entity, state, fmt->pad);
*format = fmt->format;
/* Reset the compose rectangle. */
if (fmt->pad != brx->entity.source_pad) {
struct v4l2_rect *compose;
- compose = brx_get_compose(brx, config, fmt->pad);
+ compose = brx_get_compose(brx, state, fmt->pad);
compose->left = 0;
compose->top = 0;
compose->width = format->width;
@@ -171,7 +170,7 @@ static int brx_set_format(struct v4l2_subdev *subdev,
for (i = 0; i <= brx->entity.source_pad; ++i) {
format = vsp1_entity_get_pad_format(&brx->entity,
- config, i);
+ state, i);
format->code = fmt->format.code;
}
}
@@ -186,7 +185,7 @@ static int brx_get_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
if (sel->pad == brx->entity.source_pad)
return -EINVAL;
@@ -200,13 +199,13 @@ static int brx_get_selection(struct v4l2_subdev *subdev,
return 0;
case V4L2_SEL_TGT_COMPOSE:
- config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
- sel->which);
- if (!config)
+ state = vsp1_entity_get_state(&brx->entity, sd_state,
+ sel->which);
+ if (!state)
return -EINVAL;
mutex_lock(&brx->entity.lock);
- sel->r = *brx_get_compose(brx, config, sel->pad);
+ sel->r = *brx_get_compose(brx, state, sel->pad);
mutex_unlock(&brx->entity.lock);
return 0;
@@ -220,7 +219,7 @@ static int brx_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_brx *brx = to_brx(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *compose;
int ret = 0;
@@ -233,9 +232,8 @@ static int brx_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&brx->entity.lock);
- config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
- sel->which);
- if (!config) {
+ state = vsp1_entity_get_state(&brx->entity, sd_state, sel->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
@@ -244,7 +242,7 @@ static int brx_set_selection(struct v4l2_subdev *subdev,
* The compose rectangle top left corner must be inside the output
* frame.
*/
- format = vsp1_entity_get_pad_format(&brx->entity, config,
+ format = vsp1_entity_get_pad_format(&brx->entity, state,
brx->entity.source_pad);
sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
@@ -253,11 +251,11 @@ static int brx_set_selection(struct v4l2_subdev *subdev,
* Scaling isn't supported, the compose rectangle size must be identical
* to the sink format size.
*/
- format = vsp1_entity_get_pad_format(&brx->entity, config, sel->pad);
+ format = vsp1_entity_get_pad_format(&brx->entity, state, sel->pad);
sel->r.width = format->width;
sel->r.height = format->height;
- compose = brx_get_compose(brx, config, sel->pad);
+ compose = brx_get_compose(brx, state, sel->pad);
*compose = sel->r;
done:
@@ -266,7 +264,6 @@ done:
}
static const struct v4l2_subdev_pad_ops brx_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = brx_enum_mbus_code,
.enum_frame_size = brx_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
@@ -293,7 +290,7 @@ static void brx_configure_stream(struct vsp1_entity *entity,
unsigned int flags;
unsigned int i;
- format = vsp1_entity_get_pad_format(&brx->entity, brx->entity.config,
+ format = vsp1_entity_get_pad_format(&brx->entity, brx->entity.state,
brx->entity.source_pad);
/*
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_clu.c b/drivers/media/platform/renesas/vsp1/vsp1_clu.c
index c5217fee2..625776a9b 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_clu.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_clu.c
@@ -155,7 +155,6 @@ static int clu_set_format(struct v4l2_subdev *subdev,
*/
static const struct v4l2_subdev_pad_ops clu_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = clu_enum_mbus_code,
.enum_frame_size = clu_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
@@ -182,8 +181,7 @@ static void clu_configure_stream(struct vsp1_entity *entity,
* The yuv_mode can't be changed during streaming. Cache it internally
* for future runtime configuration calls.
*/
- format = vsp1_entity_get_pad_format(&clu->entity,
- clu->entity.config,
+ format = vsp1_entity_get_pad_format(&clu->entity, clu->entity.state,
CLU_PAD_SINK);
clu->yuv_mode = format->code == MEDIA_BUS_FMT_AYUV8_1X32;
}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.c b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
index c31f05a80..0a5a7f9cc 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
@@ -101,27 +101,26 @@ void vsp1_entity_configure_partition(struct vsp1_entity *entity,
*/
/**
- * vsp1_entity_get_pad_config - Get the pad configuration for an entity
+ * vsp1_entity_get_state - Get the subdev state for an entity
* @entity: the entity
* @sd_state: the TRY state
- * @which: configuration selector (ACTIVE or TRY)
+ * @which: state selector (ACTIVE or TRY)
*
* When called with which set to V4L2_SUBDEV_FORMAT_ACTIVE the caller must hold
* the entity lock to access the returned configuration.
*
- * Return the pad configuration requested by the which argument. The TRY
- * configuration is passed explicitly to the function through the cfg argument
- * and simply returned when requested. The ACTIVE configuration comes from the
- * entity structure.
+ * Return the subdev state requested by the which argument. The TRY state is
+ * passed explicitly to the function through the sd_state argument and simply
+ * returned when requested. The ACTIVE state comes from the entity structure.
*/
struct v4l2_subdev_state *
-vsp1_entity_get_pad_config(struct vsp1_entity *entity,
- struct v4l2_subdev_state *sd_state,
- enum v4l2_subdev_format_whence which)
+vsp1_entity_get_state(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_ACTIVE:
- return entity->config;
+ return entity->state;
case V4L2_SUBDEV_FORMAT_TRY:
default:
return sd_state;
@@ -142,7 +141,7 @@ vsp1_entity_get_pad_format(struct vsp1_entity *entity,
struct v4l2_subdev_state *sd_state,
unsigned int pad)
{
- return v4l2_subdev_get_try_format(&entity->subdev, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
}
/**
@@ -163,46 +162,18 @@ vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
{
switch (target) {
case V4L2_SEL_TGT_COMPOSE:
- return v4l2_subdev_get_try_compose(&entity->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_compose(sd_state, pad);
case V4L2_SEL_TGT_CROP:
- return v4l2_subdev_get_try_crop(&entity->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
default:
return NULL;
}
}
/*
- * vsp1_entity_init_cfg - Initialize formats on all pads
- * @subdev: V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
- *
- * Initialize all pad formats with default values in the given pad config. This
- * function can be used as a handler for the subdev pad::init_cfg operation.
- */
-int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state)
-{
- unsigned int pad;
-
- for (pad = 0; pad < subdev->entity.num_pads - 1; ++pad) {
- struct v4l2_subdev_format format = {
- .pad = pad,
- .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
- };
-
- v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &format);
- }
-
- return 0;
-}
-
-/*
* vsp1_subdev_get_pad_format - Subdev pad get_fmt handler
* @subdev: V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: V4L2 subdev format
*
* This function implements the subdev get_fmt pad operation. It can be used as
@@ -213,14 +184,14 @@ int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_format *fmt)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
- config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which);
- if (!config)
+ state = vsp1_entity_get_state(entity, sd_state, fmt->which);
+ if (!state)
return -EINVAL;
mutex_lock(&entity->lock);
- fmt->format = *vsp1_entity_get_pad_format(entity, config, fmt->pad);
+ fmt->format = *vsp1_entity_get_pad_format(entity, state, fmt->pad);
mutex_unlock(&entity->lock);
return 0;
@@ -229,7 +200,7 @@ int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
/*
* vsp1_subdev_enum_mbus_code - Subdev pad enum_mbus_code handler
* @subdev: V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code: Media bus code enumeration
* @codes: Array of supported media bus codes
* @ncodes: Number of supported media bus codes
@@ -252,7 +223,7 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
code->code = codes[code->index];
} else {
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
/*
@@ -262,13 +233,12 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- config = vsp1_entity_get_pad_config(entity, sd_state,
- code->which);
- if (!config)
+ state = vsp1_entity_get_state(entity, sd_state, code->which);
+ if (!state)
return -EINVAL;
mutex_lock(&entity->lock);
- format = vsp1_entity_get_pad_format(entity, config, 0);
+ format = vsp1_entity_get_pad_format(entity, state, 0);
code->code = format->code;
mutex_unlock(&entity->lock);
}
@@ -279,7 +249,7 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
/*
* vsp1_subdev_enum_frame_size - Subdev pad enum_frame_size handler
* @subdev: V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fse: Frame size enumeration
* @min_width: Minimum image width
* @min_height: Minimum image height
@@ -298,15 +268,15 @@ int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
unsigned int max_width, unsigned int max_height)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(entity, sd_state, fse->which);
- if (!config)
+ state = vsp1_entity_get_state(entity, sd_state, fse->which);
+ if (!state)
return -EINVAL;
- format = vsp1_entity_get_pad_format(entity, config, fse->pad);
+ format = vsp1_entity_get_pad_format(entity, state, fse->pad);
mutex_lock(&entity->lock);
@@ -339,7 +309,7 @@ done:
/*
* vsp1_subdev_set_pad_format - Subdev pad set_fmt handler
* @subdev: V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: V4L2 subdev format
* @codes: Array of supported media bus codes
* @ncodes: Number of supported media bus codes
@@ -362,7 +332,7 @@ int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
unsigned int max_width, unsigned int max_height)
{
struct vsp1_entity *entity = to_vsp1_entity(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *selection;
unsigned int i;
@@ -370,13 +340,13 @@ int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
mutex_lock(&entity->lock);
- config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which);
- if (!config) {
+ state = vsp1_entity_get_state(entity, sd_state, fmt->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
- format = vsp1_entity_get_pad_format(entity, config, fmt->pad);
+ format = vsp1_entity_get_pad_format(entity, state, fmt->pad);
if (fmt->pad == entity->source_pad) {
/* The output format can't be modified. */
@@ -404,18 +374,18 @@ int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
fmt->format = *format;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(entity, config, entity->source_pad);
+ format = vsp1_entity_get_pad_format(entity, state, entity->source_pad);
*format = fmt->format;
/* Reset the crop and compose rectangles. */
- selection = vsp1_entity_get_pad_selection(entity, config, fmt->pad,
+ selection = vsp1_entity_get_pad_selection(entity, state, fmt->pad,
V4L2_SEL_TGT_CROP);
selection->left = 0;
selection->top = 0;
selection->width = format->width;
selection->height = format->height;
- selection = vsp1_entity_get_pad_selection(entity, config, fmt->pad,
+ selection = vsp1_entity_get_pad_selection(entity, state, fmt->pad,
V4L2_SEL_TGT_COMPOSE);
selection->left = 0;
selection->top = 0;
@@ -427,6 +397,29 @@ done:
return ret;
}
+static int vsp1_entity_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int pad;
+
+ /* Initialize all pad formats with default values. */
+ for (pad = 0; pad < subdev->entity.num_pads - 1; ++pad) {
+ struct v4l2_subdev_format format = {
+ .pad = pad,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &format);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops vsp1_entity_internal_ops = {
+ .init_state = vsp1_entity_init_state,
+};
+
/* -----------------------------------------------------------------------------
* Media Operations
*/
@@ -661,6 +654,7 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
/* Initialize the V4L2 subdev. */
subdev = &entity->subdev;
v4l2_subdev_init(subdev, ops);
+ subdev->internal_ops = &vsp1_entity_internal_ops;
subdev->entity.function = function;
subdev->entity.ops = &vsp1->media_ops;
@@ -669,21 +663,21 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
snprintf(subdev->name, sizeof(subdev->name), "%s %s",
dev_name(vsp1->dev), name);
- vsp1_entity_init_cfg(subdev, NULL);
+ vsp1_entity_init_state(subdev, NULL);
/*
- * Allocate the pad configuration to store formats and selection
+ * Allocate the subdev state to store formats and selection
* rectangles.
*/
/*
* FIXME: Drop this call, drivers are not supposed to use
* __v4l2_subdev_state_alloc().
*/
- entity->config = __v4l2_subdev_state_alloc(&entity->subdev,
- "vsp1:config->lock", &key);
- if (IS_ERR(entity->config)) {
+ entity->state = __v4l2_subdev_state_alloc(&entity->subdev,
+ "vsp1:state->lock", &key);
+ if (IS_ERR(entity->state)) {
media_entity_cleanup(&entity->subdev.entity);
- return PTR_ERR(entity->config);
+ return PTR_ERR(entity->state);
}
return 0;
@@ -695,6 +689,6 @@ void vsp1_entity_destroy(struct vsp1_entity *entity)
entity->ops->destroy(entity);
if (entity->subdev.ctrl_handler)
v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
- __v4l2_subdev_state_free(entity->config);
+ __v4l2_subdev_state_free(entity->state);
media_entity_cleanup(&entity->subdev.entity);
}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.h b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
index 17f98a6a9..735f32dde 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
@@ -115,9 +115,9 @@ struct vsp1_entity {
unsigned int sink_pad;
struct v4l2_subdev subdev;
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
- struct mutex lock; /* Protects the pad config */
+ struct mutex lock; /* Protects the state */
};
static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
@@ -135,9 +135,9 @@ int vsp1_entity_link_setup(struct media_entity *entity,
const struct media_pad *remote, u32 flags);
struct v4l2_subdev_state *
-vsp1_entity_get_pad_config(struct vsp1_entity *entity,
- struct v4l2_subdev_state *sd_state,
- enum v4l2_subdev_format_whence which);
+vsp1_entity_get_state(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which);
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
struct v4l2_subdev_state *sd_state,
@@ -146,8 +146,6 @@ struct v4l2_rect *
vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
struct v4l2_subdev_state *sd_state,
unsigned int pad, unsigned int target);
-int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state);
void vsp1_entity_route_setup(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hgo.c b/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
index e6492deb0..40c571a98 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
@@ -140,9 +140,9 @@ static void hgo_configure_stream(struct vsp1_entity *entity,
unsigned int hratio;
unsigned int vratio;
- crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ crop = vsp1_entity_get_pad_selection(entity, entity->state,
HISTO_PAD_SINK, V4L2_SEL_TGT_CROP);
- compose = vsp1_entity_get_pad_selection(entity, entity->config,
+ compose = vsp1_entity_get_pad_selection(entity, entity->state,
HISTO_PAD_SINK,
V4L2_SEL_TGT_COMPOSE);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hgt.c b/drivers/media/platform/renesas/vsp1/vsp1_hgt.c
index aa1c718e0..8281b8687 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_hgt.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgt.c
@@ -139,9 +139,9 @@ static void hgt_configure_stream(struct vsp1_entity *entity,
u8 upper;
unsigned int i;
- crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ crop = vsp1_entity_get_pad_selection(entity, entity->state,
HISTO_PAD_SINK, V4L2_SEL_TGT_CROP);
- compose = vsp1_entity_get_pad_selection(entity, entity->config,
+ compose = vsp1_entity_get_pad_selection(entity, entity->state,
HISTO_PAD_SINK,
V4L2_SEL_TGT_COMPOSE);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_histo.c b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
index f22449dd6..71155282c 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_histo.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
@@ -203,7 +203,7 @@ static int histo_get_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
int ret = 0;
@@ -213,9 +213,8 @@ static int histo_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&histo->entity.lock);
- config = vsp1_entity_get_pad_config(&histo->entity, sd_state,
- sel->which);
- if (!config) {
+ state = vsp1_entity_get_state(&histo->entity, sd_state, sel->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
@@ -223,7 +222,7 @@ static int histo_get_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- crop = vsp1_entity_get_pad_selection(&histo->entity, config,
+ crop = vsp1_entity_get_pad_selection(&histo->entity, state,
HISTO_PAD_SINK,
V4L2_SEL_TGT_CROP);
sel->r.left = 0;
@@ -234,7 +233,7 @@ static int histo_get_selection(struct v4l2_subdev *subdev,
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
- format = vsp1_entity_get_pad_format(&histo->entity, config,
+ format = vsp1_entity_get_pad_format(&histo->entity, state,
HISTO_PAD_SINK);
sel->r.left = 0;
sel->r.top = 0;
@@ -244,7 +243,7 @@ static int histo_get_selection(struct v4l2_subdev *subdev,
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_CROP:
- sel->r = *vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->r = *vsp1_entity_get_pad_selection(&histo->entity, state,
sel->pad, sel->target);
break;
@@ -346,7 +345,7 @@ static int histo_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_histogram *histo = subdev_to_histo(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
int ret;
if (sel->pad != HISTO_PAD_SINK)
@@ -354,17 +353,16 @@ static int histo_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&histo->entity.lock);
- config = vsp1_entity_get_pad_config(&histo->entity, sd_state,
- sel->which);
- if (!config) {
+ state = vsp1_entity_get_state(&histo->entity, sd_state, sel->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
if (sel->target == V4L2_SEL_TGT_CROP)
- ret = histo_set_crop(subdev, config, sel);
+ ret = histo_set_crop(subdev, state, sel);
else if (sel->target == V4L2_SEL_TGT_COMPOSE)
- ret = histo_set_compose(subdev, config, sel);
+ ret = histo_set_compose(subdev, state, sel);
else
ret = -EINVAL;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hsit.c b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
index 361a87038..bc1299c29 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
@@ -66,20 +66,19 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_format *fmt)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&hsit->entity.lock);
- config = vsp1_entity_get_pad_config(&hsit->entity, sd_state,
- fmt->which);
- if (!config) {
+ state = vsp1_entity_get_state(&hsit->entity, sd_state, fmt->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
- format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad);
+ format = vsp1_entity_get_pad_format(&hsit->entity, state, fmt->pad);
if (fmt->pad == HSIT_PAD_SOURCE) {
/*
@@ -102,7 +101,7 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
fmt->format = *format;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&hsit->entity, config,
+ format = vsp1_entity_get_pad_format(&hsit->entity, state,
HSIT_PAD_SOURCE);
*format = fmt->format;
format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
@@ -114,7 +113,6 @@ done:
}
static const struct v4l2_subdev_pad_ops hsit_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = hsit_enum_mbus_code,
.enum_frame_size = hsit_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lif.c b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
index 0ab2e0c70..b1d21a548 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
@@ -68,7 +68,6 @@ static int lif_set_format(struct v4l2_subdev *subdev,
}
static const struct v4l2_subdev_pad_ops lif_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = lif_enum_mbus_code,
.enum_frame_size = lif_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
@@ -94,7 +93,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
unsigned int obth;
unsigned int lbth;
- format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
+ format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.state,
LIF_PAD_SOURCE);
switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) {
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lut.c b/drivers/media/platform/renesas/vsp1/vsp1_lut.c
index ac6802a32..451d24ab0 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lut.c
@@ -131,7 +131,6 @@ static int lut_set_format(struct v4l2_subdev *subdev,
*/
static const struct v4l2_subdev_pad_ops lut_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = lut_enum_mbus_code,
.enum_frame_size = lut_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
index ea12c3f12..c47579efc 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
@@ -81,10 +81,10 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
/* Format */
sink_format = vsp1_entity_get_pad_format(&rpf->entity,
- rpf->entity.config,
+ rpf->entity.state,
RWPF_PAD_SINK);
source_format = vsp1_entity_get_pad_format(&rpf->entity,
- rpf->entity.config,
+ rpf->entity.state,
RWPF_PAD_SOURCE);
infmt = VI6_RPF_INFMT_CIPM
@@ -158,7 +158,7 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
const struct v4l2_rect *compose;
compose = vsp1_entity_get_pad_selection(pipe->brx,
- pipe->brx->config,
+ pipe->brx->state,
rpf->brx_input,
V4L2_SEL_TGT_COMPOSE);
left = compose->left;
@@ -302,7 +302,7 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
* offsets are needed, as planes 2 and 3 always have identical
* strides.
*/
- crop = *vsp1_rwpf_get_crop(rpf, rpf->entity.config);
+ crop = *vsp1_rwpf_get_crop(rpf, rpf->entity.state);
/*
* Partition Algorithm Control
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
index e0f87c810..09fb6ffa1 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
@@ -19,8 +19,7 @@
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
struct v4l2_subdev_state *sd_state)
{
- return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, sd_state,
- RWPF_PAD_SINK);
+ return v4l2_subdev_state_get_crop(sd_state, RWPF_PAD_SINK);
}
/* -----------------------------------------------------------------------------
@@ -62,15 +61,14 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_format *fmt)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
- fmt->which);
- if (!config) {
+ state = vsp1_entity_get_state(&rwpf->entity, sd_state, fmt->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
@@ -81,7 +79,7 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
- format = vsp1_entity_get_pad_format(&rwpf->entity, config, fmt->pad);
+ format = vsp1_entity_get_pad_format(&rwpf->entity, state, fmt->pad);
if (fmt->pad == RWPF_PAD_SOURCE) {
/*
@@ -107,7 +105,7 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
struct v4l2_rect *crop;
/* Update the sink crop rectangle. */
- crop = vsp1_rwpf_get_crop(rwpf, config);
+ crop = vsp1_rwpf_get_crop(rwpf, state);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
@@ -115,7 +113,7 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
}
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, state,
RWPF_PAD_SOURCE);
*format = fmt->format;
@@ -134,7 +132,7 @@ static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
@@ -147,20 +145,19 @@ static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
- sel->which);
- if (!config) {
+ state = vsp1_entity_get_state(&rwpf->entity, sd_state, sel->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *vsp1_rwpf_get_crop(rwpf, config);
+ sel->r = *vsp1_rwpf_get_crop(rwpf, state);
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
- format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, state,
RWPF_PAD_SINK);
sel->r.left = 0;
sel->r.top = 0;
@@ -183,7 +180,7 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
int ret = 0;
@@ -200,15 +197,14 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&rwpf->entity.lock);
- config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
- sel->which);
- if (!config) {
+ state = vsp1_entity_get_state(&rwpf->entity, sd_state, sel->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
/* Make sure the crop rectangle is entirely contained in the image. */
- format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, state,
RWPF_PAD_SINK);
/*
@@ -229,11 +225,11 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
sel->r.height = min_t(unsigned int, sel->r.height,
format->height - sel->r.top);
- crop = vsp1_rwpf_get_crop(rwpf, config);
+ crop = vsp1_rwpf_get_crop(rwpf, state);
*crop = sel->r;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ format = vsp1_entity_get_pad_format(&rwpf->entity, state,
RWPF_PAD_SOURCE);
format->width = crop->width;
format->height = crop->height;
@@ -244,7 +240,6 @@ done:
}
static const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = vsp1_rwpf_enum_mbus_code,
.enum_frame_size = vsp1_rwpf_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_sru.c b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
index b614a2aea..11e008aa9 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
@@ -123,16 +123,15 @@ static int sru_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_sru *sru = to_sru(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(&sru->entity, sd_state,
- fse->which);
- if (!config)
+ state = vsp1_entity_get_state(&sru->entity, sd_state, fse->which);
+ if (!state)
return -EINVAL;
- format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&sru->entity, state, SRU_PAD_SINK);
mutex_lock(&sru->entity.lock);
@@ -221,31 +220,30 @@ static int sru_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_format *fmt)
{
struct vsp1_sru *sru = to_sru(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&sru->entity.lock);
- config = vsp1_entity_get_pad_config(&sru->entity, sd_state,
- fmt->which);
- if (!config) {
+ state = vsp1_entity_get_state(&sru->entity, sd_state, fmt->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
- sru_try_format(sru, config, fmt->pad, &fmt->format);
+ sru_try_format(sru, state, fmt->pad, &fmt->format);
- format = vsp1_entity_get_pad_format(&sru->entity, config, fmt->pad);
+ format = vsp1_entity_get_pad_format(&sru->entity, state, fmt->pad);
*format = fmt->format;
if (fmt->pad == SRU_PAD_SINK) {
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&sru->entity, config,
+ format = vsp1_entity_get_pad_format(&sru->entity, state,
SRU_PAD_SOURCE);
*format = fmt->format;
- sru_try_format(sru, config, SRU_PAD_SOURCE, format);
+ sru_try_format(sru, state, SRU_PAD_SOURCE, format);
}
done:
@@ -254,7 +252,6 @@ done:
}
static const struct v4l2_subdev_pad_ops sru_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = sru_enum_mbus_code,
.enum_frame_size = sru_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
@@ -280,9 +277,9 @@ static void sru_configure_stream(struct vsp1_entity *entity,
struct v4l2_mbus_framefmt *output;
u32 ctrl0;
- input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.state,
SRU_PAD_SINK);
- output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.state,
SRU_PAD_SOURCE);
if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
@@ -310,9 +307,9 @@ static unsigned int sru_max_width(struct vsp1_entity *entity,
struct v4l2_mbus_framefmt *input;
struct v4l2_mbus_framefmt *output;
- input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.state,
SRU_PAD_SINK);
- output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.state,
SRU_PAD_SOURCE);
/*
@@ -336,9 +333,9 @@ static void sru_partition(struct vsp1_entity *entity,
struct v4l2_mbus_framefmt *input;
struct v4l2_mbus_framefmt *output;
- input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.state,
SRU_PAD_SINK);
- output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.state,
SRU_PAD_SOURCE);
/* Adapt if SRUx2 is enabled. */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_uds.c b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
index 1c290cda0..d89f1197b 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
@@ -128,17 +128,15 @@ static int uds_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_uds *uds = to_uds(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
- config = vsp1_entity_get_pad_config(&uds->entity, sd_state,
- fse->which);
- if (!config)
+ state = vsp1_entity_get_state(&uds->entity, sd_state, fse->which);
+ if (!state)
return -EINVAL;
- format = vsp1_entity_get_pad_format(&uds->entity, config,
- UDS_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&uds->entity, state, UDS_PAD_SINK);
mutex_lock(&uds->entity.lock);
@@ -205,31 +203,30 @@ static int uds_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_format *fmt)
{
struct vsp1_uds *uds = to_uds(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
mutex_lock(&uds->entity.lock);
- config = vsp1_entity_get_pad_config(&uds->entity, sd_state,
- fmt->which);
- if (!config) {
+ state = vsp1_entity_get_state(&uds->entity, sd_state, fmt->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
- uds_try_format(uds, config, fmt->pad, &fmt->format);
+ uds_try_format(uds, state, fmt->pad, &fmt->format);
- format = vsp1_entity_get_pad_format(&uds->entity, config, fmt->pad);
+ format = vsp1_entity_get_pad_format(&uds->entity, state, fmt->pad);
*format = fmt->format;
if (fmt->pad == UDS_PAD_SINK) {
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&uds->entity, config,
+ format = vsp1_entity_get_pad_format(&uds->entity, state,
UDS_PAD_SOURCE);
*format = fmt->format;
- uds_try_format(uds, config, UDS_PAD_SOURCE, format);
+ uds_try_format(uds, state, UDS_PAD_SOURCE, format);
}
done:
@@ -242,7 +239,6 @@ done:
*/
static const struct v4l2_subdev_pad_ops uds_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = uds_enum_mbus_code,
.enum_frame_size = uds_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
@@ -269,9 +265,9 @@ static void uds_configure_stream(struct vsp1_entity *entity,
unsigned int vscale;
bool multitap;
- input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.state,
UDS_PAD_SINK);
- output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.state,
UDS_PAD_SOURCE);
hscale = uds_compute_ratio(input->width, output->width);
@@ -314,7 +310,7 @@ static void uds_configure_partition(struct vsp1_entity *entity,
struct vsp1_partition *partition = pipe->partition;
const struct v4l2_mbus_framefmt *output;
- output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.state,
UDS_PAD_SOURCE);
/* Input size clipping. */
@@ -339,9 +335,9 @@ static unsigned int uds_max_width(struct vsp1_entity *entity,
const struct v4l2_mbus_framefmt *input;
unsigned int hscale;
- input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.state,
UDS_PAD_SINK);
- output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.state,
UDS_PAD_SOURCE);
hscale = output->width / input->width;
@@ -381,9 +377,9 @@ static void uds_partition(struct vsp1_entity *entity,
partition->uds_sink = *window;
partition->uds_source = *window;
- input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.state,
UDS_PAD_SINK);
- output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.state,
UDS_PAD_SOURCE);
partition->uds_sink.width = window->width * input->width
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_uif.c b/drivers/media/platform/renesas/vsp1/vsp1_uif.c
index 83d7f17df..f66936a28 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_uif.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uif.c
@@ -86,7 +86,7 @@ static int uif_get_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_uif *uif = to_uif(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
int ret = 0;
@@ -95,9 +95,8 @@ static int uif_get_selection(struct v4l2_subdev *subdev,
mutex_lock(&uif->entity.lock);
- config = vsp1_entity_get_pad_config(&uif->entity, sd_state,
- sel->which);
- if (!config) {
+ state = vsp1_entity_get_state(&uif->entity, sd_state, sel->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
@@ -105,7 +104,7 @@ static int uif_get_selection(struct v4l2_subdev *subdev,
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
- format = vsp1_entity_get_pad_format(&uif->entity, config,
+ format = vsp1_entity_get_pad_format(&uif->entity, state,
UIF_PAD_SINK);
sel->r.left = 0;
sel->r.top = 0;
@@ -114,7 +113,7 @@ static int uif_get_selection(struct v4l2_subdev *subdev,
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *vsp1_entity_get_pad_selection(&uif->entity, config,
+ sel->r = *vsp1_entity_get_pad_selection(&uif->entity, state,
sel->pad, sel->target);
break;
@@ -133,7 +132,7 @@ static int uif_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_uif *uif = to_uif(subdev);
- struct v4l2_subdev_state *config;
+ struct v4l2_subdev_state *state;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *selection;
int ret = 0;
@@ -144,15 +143,14 @@ static int uif_set_selection(struct v4l2_subdev *subdev,
mutex_lock(&uif->entity.lock);
- config = vsp1_entity_get_pad_config(&uif->entity, sd_state,
- sel->which);
- if (!config) {
+ state = vsp1_entity_get_state(&uif->entity, sd_state, sel->which);
+ if (!state) {
ret = -EINVAL;
goto done;
}
/* The crop rectangle must be inside the input frame. */
- format = vsp1_entity_get_pad_format(&uif->entity, config, UIF_PAD_SINK);
+ format = vsp1_entity_get_pad_format(&uif->entity, state, UIF_PAD_SINK);
sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
@@ -162,7 +160,7 @@ static int uif_set_selection(struct v4l2_subdev *subdev,
format->height - sel->r.top);
/* Store the crop rectangle. */
- selection = vsp1_entity_get_pad_selection(&uif->entity, config,
+ selection = vsp1_entity_get_pad_selection(&uif->entity, state,
sel->pad, V4L2_SEL_TGT_CROP);
*selection = sel->r;
@@ -176,7 +174,6 @@ done:
*/
static const struct v4l2_subdev_pad_ops uif_pad_ops = {
- .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = uif_enum_mbus_code,
.enum_frame_size = uif_enum_frame_size,
.get_fmt = vsp1_subdev_get_pad_format,
@@ -206,7 +203,7 @@ static void uif_configure_stream(struct vsp1_entity *entity,
vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMPMR,
VI6_UIF_DISCOM_DOCMPMR_SEL(9));
- crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ crop = vsp1_entity_get_pad_selection(entity, entity->state,
UIF_PAD_SINK, V4L2_SEL_TGT_CROP);
left = crop->left;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index e9d502776..5a9cb0e56 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -198,7 +198,7 @@ static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe,
* at the WPF sink.
*/
format = vsp1_entity_get_pad_format(&pipe->output->entity,
- pipe->output->entity.config,
+ pipe->output->entity.state,
RWPF_PAD_SINK);
/* A single partition simply processes the output size in full. */
@@ -263,7 +263,7 @@ static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
* at the WPF sink.
*/
format = vsp1_entity_get_pad_format(&pipe->output->entity,
- pipe->output->entity.config,
+ pipe->output->entity.state,
RWPF_PAD_SINK);
div_size = format->width;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
index cab4445ec..9693aeab1 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
@@ -66,10 +66,10 @@ static int vsp1_wpf_set_rotation(struct vsp1_rwpf *wpf, unsigned int rotation)
}
sink_format = vsp1_entity_get_pad_format(&wpf->entity,
- wpf->entity.config,
+ wpf->entity.state,
RWPF_PAD_SINK);
source_format = vsp1_entity_get_pad_format(&wpf->entity,
- wpf->entity.config,
+ wpf->entity.state,
RWPF_PAD_SOURCE);
mutex_lock(&wpf->entity.lock);
@@ -246,10 +246,10 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
int ret;
sink_format = vsp1_entity_get_pad_format(&wpf->entity,
- wpf->entity.config,
+ wpf->entity.state,
RWPF_PAD_SINK);
source_format = vsp1_entity_get_pad_format(&wpf->entity,
- wpf->entity.config,
+ wpf->entity.state,
RWPF_PAD_SOURCE);
/* Format */
@@ -384,7 +384,7 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
unsigned int i;
sink_format = vsp1_entity_get_pad_format(&wpf->entity,
- wpf->entity.config,
+ wpf->entity.state,
RWPF_PAD_SINK);
width = sink_format->width;
height = sink_format->height;
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index 81508ed5a..662c81b6d 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -5,7 +5,9 @@
*/
#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
@@ -15,6 +17,26 @@
#include "rga-hw.h"
#include "rga.h"
+static ssize_t fill_descriptors(struct rga_dma_desc *desc, size_t max_desc,
+ struct sg_table *sgt)
+{
+ struct sg_dma_page_iter iter;
+ struct rga_dma_desc *tmp = desc;
+ size_t n_desc = 0;
+ dma_addr_t addr;
+
+ for_each_sgtable_dma_page(sgt, &iter, 0) {
+ if (n_desc > max_desc)
+ return -EINVAL;
+ addr = sg_page_iter_dma_address(&iter);
+ tmp->addr = lower_32_bits(addr);
+ tmp++;
+ n_desc++;
+ }
+
+ return n_desc;
+}
+
static int
rga_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
@@ -22,28 +44,105 @@ rga_queue_setup(struct vb2_queue *vq,
{
struct rga_ctx *ctx = vb2_get_drv_priv(vq);
struct rga_frame *f = rga_get_frame(ctx, vq->type);
+ const struct v4l2_pix_format_mplane *pix_fmt;
+ int i;
if (IS_ERR(f))
return PTR_ERR(f);
- if (*nplanes)
- return sizes[0] < f->size ? -EINVAL : 0;
+ pix_fmt = &f->pix;
+
+ if (*nplanes) {
+ if (*nplanes != pix_fmt->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < pix_fmt->num_planes; i++)
+ if (sizes[i] < pix_fmt->plane_fmt[i].sizeimage)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *nplanes = pix_fmt->num_planes;
+
+ for (i = 0; i < pix_fmt->num_planes; i++)
+ sizes[i] = pix_fmt->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static int rga_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rga_vb_buffer *rbuf = vb_to_rga(vbuf);
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rockchip_rga *rga = ctx->rga;
+ struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
+ size_t n_desc = 0;
- sizes[0] = f->size;
- *nplanes = 1;
+ n_desc = DIV_ROUND_UP(f->size, PAGE_SIZE);
+
+ rbuf->n_desc = n_desc;
+ rbuf->dma_desc = dma_alloc_coherent(rga->dev,
+ rbuf->n_desc * sizeof(*rbuf->dma_desc),
+ &rbuf->dma_desc_pa, GFP_KERNEL);
+ if (!rbuf->dma_desc)
+ return -ENOMEM;
return 0;
}
+static int get_plane_offset(struct rga_frame *f, int plane)
+{
+ if (plane == 0)
+ return 0;
+ if (plane == 1)
+ return f->width * f->height;
+ if (plane == 2)
+ return f->width * f->height + (f->width * f->height / f->fmt->uv_factor);
+
+ return -EINVAL;
+}
+
static int rga_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rga_vb_buffer *rbuf = vb_to_rga(vbuf);
struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
+ ssize_t n_desc = 0;
+ size_t curr_desc = 0;
+ int i;
+ const struct v4l2_format_info *info;
+ unsigned int offsets[VIDEO_MAX_PLANES];
if (IS_ERR(f))
return PTR_ERR(f);
- vb2_set_plane_payload(vb, 0, f->size);
+ for (i = 0; i < vb->num_planes; i++) {
+ vb2_set_plane_payload(vb, i, f->pix.plane_fmt[i].sizeimage);
+
+ /* Create local MMU table for RGA */
+ n_desc = fill_descriptors(&rbuf->dma_desc[curr_desc],
+ rbuf->n_desc - curr_desc,
+ vb2_dma_sg_plane_desc(vb, i));
+ if (n_desc < 0) {
+ v4l2_err(&ctx->rga->v4l2_dev,
+ "Failed to map video buffer to RGA\n");
+ return n_desc;
+ }
+ offsets[i] = curr_desc << PAGE_SHIFT;
+ curr_desc += n_desc;
+ }
+
+ /* Fill the remaining planes */
+ info = v4l2_format_info(f->fmt->fourcc);
+ for (i = info->mem_planes; i < info->comp_planes; i++)
+ offsets[i] = get_plane_offset(f, i);
+
+ rbuf->offset.y_off = offsets[0];
+ rbuf->offset.u_off = offsets[1];
+ rbuf->offset.v_off = offsets[2];
return 0;
}
@@ -56,6 +155,17 @@ static void rga_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static void rga_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rga_vb_buffer *rbuf = vb_to_rga(vbuf);
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rockchip_rga *rga = ctx->rga;
+
+ dma_free_coherent(rga->dev, rbuf->n_desc * sizeof(*rbuf->dma_desc),
+ rbuf->dma_desc, rbuf->dma_desc_pa);
+}
+
static void rga_buf_return_buffers(struct vb2_queue *q,
enum vb2_buffer_state state)
{
@@ -99,50 +209,12 @@ static void rga_buf_stop_streaming(struct vb2_queue *q)
const struct vb2_ops rga_qops = {
.queue_setup = rga_queue_setup,
+ .buf_init = rga_buf_init,
.buf_prepare = rga_buf_prepare,
.buf_queue = rga_buf_queue,
+ .buf_cleanup = rga_buf_cleanup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = rga_buf_start_streaming,
.stop_streaming = rga_buf_stop_streaming,
};
-
-/* RGA MMU is a 1-Level MMU, so it can't be used through the IOMMU API.
- * We use it more like a scatter-gather list.
- */
-void rga_buf_map(struct vb2_buffer *vb)
-{
- struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct rockchip_rga *rga = ctx->rga;
- struct sg_table *sgt;
- struct scatterlist *sgl;
- unsigned int *pages;
- unsigned int address, len, i, p;
- unsigned int mapped_size = 0;
-
- if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- pages = rga->src_mmu_pages;
- else
- pages = rga->dst_mmu_pages;
-
- /* Create local MMU table for RGA */
- sgt = vb2_plane_cookie(vb, 0);
-
- for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
- len = sg_dma_len(sgl) >> PAGE_SHIFT;
- address = sg_phys(sgl);
-
- for (p = 0; p < len; p++) {
- dma_addr_t phys = address +
- ((dma_addr_t)p << PAGE_SHIFT);
-
- pages[mapped_size + p] = phys;
- }
-
- mapped_size += len;
- }
-
- /* sync local MMU table for RGA */
- dma_sync_single_for_device(rga->dev, virt_to_phys(pages),
- 8 * PAGE_SIZE, DMA_BIDIRECTIONAL);
-}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
index aaa96f256..11c3d7234 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.c
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -16,12 +16,6 @@ enum e_rga_start_pos {
RB = 3,
};
-struct rga_addr_offset {
- unsigned int y_off;
- unsigned int u_off;
- unsigned int v_off;
-};
-
struct rga_corners_addr_offset {
struct rga_addr_offset left_top;
struct rga_addr_offset right_top;
@@ -43,13 +37,13 @@ static unsigned int rga_get_scaling(unsigned int src, unsigned int dst)
}
static struct rga_corners_addr_offset
-rga_get_addr_offset(struct rga_frame *frm, unsigned int x, unsigned int y,
- unsigned int w, unsigned int h)
+rga_get_addr_offset(struct rga_frame *frm, struct rga_addr_offset *offset,
+ unsigned int x, unsigned int y, unsigned int w, unsigned int h)
{
struct rga_corners_addr_offset offsets;
struct rga_addr_offset *lt, *lb, *rt, *rb;
unsigned int x_div = 0,
- y_div = 0, uv_stride = 0, pixel_width = 0, uv_factor = 0;
+ y_div = 0, uv_stride = 0, pixel_width = 0;
lt = &offsets.left_top;
lb = &offsets.left_bottom;
@@ -58,14 +52,12 @@ rga_get_addr_offset(struct rga_frame *frm, unsigned int x, unsigned int y,
x_div = frm->fmt->x_div;
y_div = frm->fmt->y_div;
- uv_factor = frm->fmt->uv_factor;
uv_stride = frm->stride / x_div;
pixel_width = frm->stride / frm->width;
- lt->y_off = y * frm->stride + x * pixel_width;
- lt->u_off =
- frm->width * frm->height + (y / y_div) * uv_stride + x / x_div;
- lt->v_off = lt->u_off + frm->width * frm->height / uv_factor;
+ lt->y_off = offset->y_off + y * frm->stride + x * pixel_width;
+ lt->u_off = offset->u_off + (y / y_div) * uv_stride + x / x_div;
+ lt->v_off = offset->v_off + (y / y_div) * uv_stride + x / x_div;
lb->y_off = lt->y_off + (h - 1) * frm->stride;
lb->u_off = lt->u_off + (h / y_div - 1) * uv_stride;
@@ -119,40 +111,40 @@ static struct rga_addr_offset *rga_lookup_draw_pos(struct
return NULL;
}
-static void rga_cmd_set_src_addr(struct rga_ctx *ctx, void *mmu_pages)
+static void rga_cmd_set_src_addr(struct rga_ctx *ctx, dma_addr_t dma_addr)
{
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
unsigned int reg;
reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
- dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+ dest[reg >> 2] = dma_addr >> 4;
reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
dest[reg >> 2] |= 0x7;
}
-static void rga_cmd_set_src1_addr(struct rga_ctx *ctx, void *mmu_pages)
+static void rga_cmd_set_src1_addr(struct rga_ctx *ctx, dma_addr_t dma_addr)
{
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
unsigned int reg;
reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
- dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+ dest[reg >> 2] = dma_addr >> 4;
reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
dest[reg >> 2] |= 0x7 << 4;
}
-static void rga_cmd_set_dst_addr(struct rga_ctx *ctx, void *mmu_pages)
+static void rga_cmd_set_dst_addr(struct rga_ctx *ctx, dma_addr_t dma_addr)
{
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
unsigned int reg;
reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
- dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+ dest[reg >> 2] = dma_addr >> 4;
reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
dest[reg >> 2] |= 0x7 << 8;
@@ -163,7 +155,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
unsigned int scale_dst_w, scale_dst_h;
- unsigned int src_h, src_w, src_x, src_y, dst_h, dst_w, dst_x, dst_y;
+ unsigned int src_h, src_w, dst_h, dst_w;
union rga_src_info src_info;
union rga_dst_info dst_info;
union rga_src_x_factor x_factor;
@@ -173,18 +165,10 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
union rga_dst_vir_info dst_vir_info;
union rga_dst_act_info dst_act_info;
- struct rga_addr_offset *dst_offset;
- struct rga_corners_addr_offset offsets;
- struct rga_corners_addr_offset src_offsets;
-
src_h = ctx->in.crop.height;
src_w = ctx->in.crop.width;
- src_x = ctx->in.crop.left;
- src_y = ctx->in.crop.top;
dst_h = ctx->out.crop.height;
dst_w = ctx->out.crop.width;
- dst_x = ctx->out.crop.left;
- dst_y = ctx->out.crop.top;
src_info.val = dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2];
dst_info.val = dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2];
@@ -312,18 +296,37 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
dst_act_info.data.act_height = dst_h - 1;
dst_act_info.data.act_width = dst_w - 1;
- /*
- * Calculate the source framebuffer base address with offset pixel.
- */
- src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y,
- src_w, src_h);
+ dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2] = x_factor.val;
+ dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2] = y_factor.val;
+ dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = src_vir_info.val;
+ dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = src_act_info.val;
+
+ dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2] = src_info.val;
+
+ dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = dst_vir_info.val;
+ dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = dst_act_info.val;
+
+ dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2] = dst_info.val;
+}
+
+static void rga_cmd_set_src_info(struct rga_ctx *ctx,
+ struct rga_addr_offset *offset)
+{
+ struct rga_corners_addr_offset src_offsets;
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int src_h, src_w, src_x, src_y;
+
+ src_h = ctx->in.crop.height;
+ src_w = ctx->in.crop.width;
+ src_x = ctx->in.crop.left;
+ src_y = ctx->in.crop.top;
/*
- * Configure the dest framebuffer base address with pixel offset.
+ * Calculate the source framebuffer base address with offset pixel.
*/
- offsets = rga_get_addr_offset(&ctx->out, dst_x, dst_y, dst_w, dst_h);
- dst_offset = rga_lookup_draw_pos(&offsets, src_info.data.rot_mode,
- src_info.data.mir_mode);
+ src_offsets = rga_get_addr_offset(&ctx->in, offset,
+ src_x, src_y, src_w, src_h);
dest[(RGA_SRC_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
src_offsets.left_top.y_off;
@@ -331,13 +334,49 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
src_offsets.left_top.u_off;
dest[(RGA_SRC_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
src_offsets.left_top.v_off;
+}
- dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2] = x_factor.val;
- dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2] = y_factor.val;
- dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = src_vir_info.val;
- dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = src_act_info.val;
+static void rga_cmd_set_dst_info(struct rga_ctx *ctx,
+ struct rga_addr_offset *offset)
+{
+ struct rga_addr_offset *dst_offset;
+ struct rga_corners_addr_offset offsets;
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int dst_h, dst_w, dst_x, dst_y;
+ unsigned int mir_mode = 0;
+ unsigned int rot_mode = 0;
- dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2] = src_info.val;
+ dst_h = ctx->out.crop.height;
+ dst_w = ctx->out.crop.width;
+ dst_x = ctx->out.crop.left;
+ dst_y = ctx->out.crop.top;
+
+ if (ctx->vflip)
+ mir_mode |= RGA_SRC_MIRR_MODE_X;
+ if (ctx->hflip)
+ mir_mode |= RGA_SRC_MIRR_MODE_Y;
+
+ switch (ctx->rotate) {
+ case 90:
+ rot_mode = RGA_SRC_ROT_MODE_90_DEGREE;
+ break;
+ case 180:
+ rot_mode = RGA_SRC_ROT_MODE_180_DEGREE;
+ break;
+ case 270:
+ rot_mode = RGA_SRC_ROT_MODE_270_DEGREE;
+ break;
+ default:
+ rot_mode = RGA_SRC_ROT_MODE_0_DEGREE;
+ break;
+ }
+
+ /*
+ * Configure the dest framebuffer base address with pixel offset.
+ */
+ offsets = rga_get_addr_offset(&ctx->out, offset, dst_x, dst_y, dst_w, dst_h);
+ dst_offset = rga_lookup_draw_pos(&offsets, mir_mode, rot_mode);
dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
dst_offset->y_off;
@@ -345,11 +384,6 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
dst_offset->u_off;
dest[(RGA_DST_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
dst_offset->v_off;
-
- dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = dst_vir_info.val;
- dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = dst_act_info.val;
-
- dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2] = dst_info.val;
}
static void rga_cmd_set_mode(struct rga_ctx *ctx)
@@ -375,22 +409,25 @@ static void rga_cmd_set_mode(struct rga_ctx *ctx)
dest[(RGA_MODE_CTRL - RGA_MODE_BASE_REG) >> 2] = mode.val;
}
-static void rga_cmd_set(struct rga_ctx *ctx)
+static void rga_cmd_set(struct rga_ctx *ctx,
+ struct rga_vb_buffer *src, struct rga_vb_buffer *dst)
{
struct rockchip_rga *rga = ctx->rga;
memset(rga->cmdbuf_virt, 0, RGA_CMDBUF_SIZE * 4);
- rga_cmd_set_src_addr(ctx, rga->src_mmu_pages);
+ rga_cmd_set_src_addr(ctx, src->dma_desc_pa);
/*
* Due to hardware bug,
* src1 mmu also should be configured when using alpha blending.
*/
- rga_cmd_set_src1_addr(ctx, rga->dst_mmu_pages);
+ rga_cmd_set_src1_addr(ctx, dst->dma_desc_pa);
- rga_cmd_set_dst_addr(ctx, rga->dst_mmu_pages);
+ rga_cmd_set_dst_addr(ctx, dst->dma_desc_pa);
rga_cmd_set_mode(ctx);
+ rga_cmd_set_src_info(ctx, &src->offset);
+ rga_cmd_set_dst_info(ctx, &dst->offset);
rga_cmd_set_trans_info(ctx);
rga_write(rga, RGA_CMD_BASE, rga->cmdbuf_phy);
@@ -400,11 +437,12 @@ static void rga_cmd_set(struct rga_ctx *ctx)
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
-void rga_hw_start(struct rockchip_rga *rga)
+void rga_hw_start(struct rockchip_rga *rga,
+ struct rga_vb_buffer *src, struct rga_vb_buffer *dst)
{
struct rga_ctx *ctx = rga->curr;
- rga_cmd_set(ctx);
+ rga_cmd_set(ctx, src, dst);
rga_write(rga, RGA_SYS_CTRL, 0x00);
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 25f5b5eeb..00fdfa9e1 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -45,10 +45,7 @@ static void device_run(void *prv)
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- rga_buf_map(&src->vb2_buf);
- rga_buf_map(&dst->vb2_buf);
-
- rga_hw_start(rga);
+ rga_hw_start(rga, vb_to_rga(src), vb_to_rga(dst));
spin_unlock_irqrestore(&rga->ctrl_lock, flags);
}
@@ -96,12 +93,13 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
struct rga_ctx *ctx = priv;
int ret;
- src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &rga_qops;
src_vq->mem_ops = &vb2_dma_sg_memops;
- src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->gfp_flags = __GFP_DMA32;
+ src_vq->buf_struct_size = sizeof(struct rga_vb_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->rga->mutex;
src_vq->dev = ctx->rga->v4l2_dev.dev;
@@ -110,12 +108,13 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
if (ret)
return ret;
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &rga_qops;
dst_vq->mem_ops = &vb2_dma_sg_memops;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->gfp_flags = __GFP_DMA32;
+ dst_vq->buf_struct_size = sizeof(struct rga_vb_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->rga->mutex;
dst_vq->dev = ctx->rga->v4l2_dev.dev;
@@ -282,6 +281,15 @@ static struct rga_fmt formats[] = {
.x_div = 1,
},
{
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_NV16,
.color_swap = RGA_COLOR_NONE_SWAP,
.hw_format = RGA_COLOR_FMT_YUV422SP,
@@ -321,12 +329,12 @@ static struct rga_fmt formats[] = {
#define NUM_FORMATS ARRAY_SIZE(formats)
-static struct rga_fmt *rga_fmt_find(struct v4l2_format *f)
+static struct rga_fmt *rga_fmt_find(u32 pixelformat)
{
unsigned int i;
for (i = 0; i < NUM_FORMATS; i++) {
- if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ if (formats[i].fourcc == pixelformat)
return &formats[i];
}
return NULL;
@@ -345,14 +353,11 @@ static struct rga_frame def_frame = {
struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type)
{
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (V4L2_TYPE_IS_OUTPUT(type))
return &ctx->in;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (V4L2_TYPE_IS_CAPTURE(type))
return &ctx->out;
- default:
- return ERR_PTR(-EINVAL);
- }
+ return ERR_PTR(-EINVAL);
}
static int rga_open(struct file *file)
@@ -369,6 +374,11 @@ static int rga_open(struct file *file)
ctx->in = def_frame;
ctx->out = def_frame;
+ v4l2_fill_pixfmt_mp(&ctx->in.pix,
+ ctx->in.fmt->fourcc, ctx->out.width, ctx->out.height);
+ v4l2_fill_pixfmt_mp(&ctx->out.pix,
+ ctx->out.fmt->fourcc, ctx->out.width, ctx->out.height);
+
if (mutex_lock_interruptible(&rga->mutex)) {
kfree(ctx);
return -ERESTARTSYS;
@@ -449,6 +459,7 @@ static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
+ struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
struct rga_ctx *ctx = prv;
struct vb2_queue *vq;
struct rga_frame *frm;
@@ -460,58 +471,43 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
if (IS_ERR(frm))
return PTR_ERR(frm);
- f->fmt.pix.width = frm->width;
- f->fmt.pix.height = frm->height;
- f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.pixelformat = frm->fmt->fourcc;
- f->fmt.pix.bytesperline = frm->stride;
- f->fmt.pix.sizeimage = frm->size;
- f->fmt.pix.colorspace = frm->colorspace;
+ v4l2_fill_pixfmt_mp(pix_fmt, frm->fmt->fourcc, frm->width, frm->height);
+
+ pix_fmt->field = V4L2_FIELD_NONE;
+ pix_fmt->colorspace = frm->colorspace;
return 0;
}
static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
+ struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
struct rga_fmt *fmt;
- fmt = rga_fmt_find(f);
- if (!fmt) {
+ fmt = rga_fmt_find(pix_fmt->pixelformat);
+ if (!fmt)
fmt = &formats[0];
- f->fmt.pix.pixelformat = fmt->fourcc;
- }
- f->fmt.pix.field = V4L2_FIELD_NONE;
+ pix_fmt->width = clamp(pix_fmt->width,
+ (u32)MIN_WIDTH, (u32)MAX_WIDTH);
+ pix_fmt->height = clamp(pix_fmt->height,
+ (u32)MIN_HEIGHT, (u32)MAX_HEIGHT);
- if (f->fmt.pix.width > MAX_WIDTH)
- f->fmt.pix.width = MAX_WIDTH;
- if (f->fmt.pix.height > MAX_HEIGHT)
- f->fmt.pix.height = MAX_HEIGHT;
-
- if (f->fmt.pix.width < MIN_WIDTH)
- f->fmt.pix.width = MIN_WIDTH;
- if (f->fmt.pix.height < MIN_HEIGHT)
- f->fmt.pix.height = MIN_HEIGHT;
-
- if (fmt->hw_format >= RGA_COLOR_FMT_YUV422SP)
- f->fmt.pix.bytesperline = f->fmt.pix.width;
- else
- f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
-
- f->fmt.pix.sizeimage =
- f->fmt.pix.height * (f->fmt.pix.width * fmt->depth) >> 3;
+ v4l2_fill_pixfmt_mp(pix_fmt, fmt->fourcc, pix_fmt->width, pix_fmt->height);
+ pix_fmt->field = V4L2_FIELD_NONE;
return 0;
}
static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
+ struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
struct rga_ctx *ctx = prv;
struct rockchip_rga *rga = ctx->rga;
struct vb2_queue *vq;
struct rga_frame *frm;
- struct rga_fmt *fmt;
int ret = 0;
+ int i;
/* Adjust all values accordingly to the hardware capabilities
* and chosen format.
@@ -527,15 +523,14 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
frm = rga_get_frame(ctx, f->type);
if (IS_ERR(frm))
return PTR_ERR(frm);
- fmt = rga_fmt_find(f);
- if (!fmt)
- return -EINVAL;
- frm->width = f->fmt.pix.width;
- frm->height = f->fmt.pix.height;
- frm->size = f->fmt.pix.sizeimage;
- frm->fmt = fmt;
- frm->stride = f->fmt.pix.bytesperline;
- frm->colorspace = f->fmt.pix.colorspace;
+ frm->width = pix_fmt->width;
+ frm->height = pix_fmt->height;
+ frm->size = 0;
+ for (i = 0; i < pix_fmt->num_planes; i++)
+ frm->size += pix_fmt->plane_fmt[i].sizeimage;
+ frm->fmt = rga_fmt_find(pix_fmt->pixelformat);
+ frm->stride = pix_fmt->plane_fmt[0].bytesperline;
+ frm->colorspace = pix_fmt->colorspace;
/* Reset crop settings */
frm->crop.left = 0;
@@ -543,6 +538,21 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
frm->crop.width = frm->width;
frm->crop.height = frm->height;
+ frm->pix = *pix_fmt;
+
+ v4l2_dbg(debug, 1, &rga->v4l2_dev,
+ "[%s] fmt - %p4cc %dx%d (stride %d, sizeimage %d)\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "OUTPUT" : "CAPTURE",
+ &frm->fmt->fourcc, frm->width, frm->height,
+ frm->stride, frm->size);
+
+ for (i = 0; i < pix_fmt->num_planes; i++) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev,
+ "plane[%d]: size %d, bytesperline %d\n",
+ i, pix_fmt->plane_fmt[i].sizeimage,
+ pix_fmt->plane_fmt[i].bytesperline);
+ }
+
return 0;
}
@@ -560,21 +570,21 @@ static int vidioc_g_selection(struct file *file, void *prv,
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (!V4L2_TYPE_IS_CAPTURE(s->type))
return -EINVAL;
break;
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (!V4L2_TYPE_IS_OUTPUT(s->type))
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE:
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (!V4L2_TYPE_IS_CAPTURE(s->type))
return -EINVAL;
use_frame = true;
break;
case V4L2_SEL_TGT_CROP:
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (!V4L2_TYPE_IS_OUTPUT(s->type))
return -EINVAL;
use_frame = true;
break;
@@ -612,7 +622,7 @@ static int vidioc_s_selection(struct file *file, void *prv,
* COMPOSE target is only valid for capture buffer type, return
* error for output buffer type
*/
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (!V4L2_TYPE_IS_CAPTURE(s->type))
return -EINVAL;
break;
case V4L2_SEL_TGT_CROP:
@@ -620,7 +630,7 @@ static int vidioc_s_selection(struct file *file, void *prv,
* CROP target is only valid for output buffer type, return
* error for capture buffer type
*/
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (!V4L2_TYPE_IS_OUTPUT(s->type))
return -EINVAL;
break;
/*
@@ -653,14 +663,14 @@ static const struct v4l2_ioctl_ops rga_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
- .vidioc_g_fmt_vid_out = vidioc_g_fmt,
- .vidioc_try_fmt_vid_out = vidioc_try_fmt,
- .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
@@ -687,7 +697,7 @@ static const struct video_device rga_videodev = {
.minor = -1,
.release = video_device_release,
.vfl_dir = VFL_DIR_M2M,
- .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
};
static int rga_enable_clocks(struct rockchip_rga *rga)
@@ -827,6 +837,12 @@ static int rga_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ ret = dma_set_mask_and_coherent(rga->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(rga->dev, "32-bit DMA not supported");
+ goto err_put_clk;
+ }
+
ret = v4l2_device_register(&pdev->dev, &rga->v4l2_dev);
if (ret)
goto err_put_clk;
@@ -872,26 +888,13 @@ static int rga_probe(struct platform_device *pdev)
goto rel_m2m;
}
- rga->src_mmu_pages =
- (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
- if (!rga->src_mmu_pages) {
- ret = -ENOMEM;
- goto free_dma;
- }
- rga->dst_mmu_pages =
- (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
- if (!rga->dst_mmu_pages) {
- ret = -ENOMEM;
- goto free_src_pages;
- }
-
def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
def_frame.size = def_frame.stride * def_frame.height;
ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
- goto free_dst_pages;
+ goto free_dma;
}
v4l2_info(&rga->v4l2_dev, "Registered %s as /dev/%s\n",
@@ -899,10 +902,6 @@ static int rga_probe(struct platform_device *pdev)
return 0;
-free_dst_pages:
- free_pages((unsigned long)rga->dst_mmu_pages, 3);
-free_src_pages:
- free_pages((unsigned long)rga->src_mmu_pages, 3);
free_dma:
dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
@@ -925,9 +924,6 @@ static void rga_remove(struct platform_device *pdev)
dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
- free_pages((unsigned long)rga->src_mmu_pages, 3);
- free_pages((unsigned long)rga->dst_mmu_pages, 3);
-
v4l2_info(&rga->v4l2_dev, "Removing\n");
v4l2_m2m_release(rga->m2m_dev);
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
index 5fa9d2f36..3502dff60 100644
--- a/drivers/media/platform/rockchip/rga/rga.h
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -34,12 +34,17 @@ struct rga_frame {
/* Image format */
struct rga_fmt *fmt;
+ struct v4l2_pix_format_mplane pix;
/* Variables that can calculated once and reused */
u32 stride;
u32 size;
};
+struct rga_dma_desc {
+ u32 addr;
+};
+
struct rockchip_rga_version {
u32 major;
u32 minor;
@@ -81,15 +86,36 @@ struct rockchip_rga {
struct rga_ctx *curr;
dma_addr_t cmdbuf_phy;
void *cmdbuf_virt;
- unsigned int *src_mmu_pages;
- unsigned int *dst_mmu_pages;
};
+struct rga_addr_offset {
+ unsigned int y_off;
+ unsigned int u_off;
+ unsigned int v_off;
+};
+
+struct rga_vb_buffer {
+ struct vb2_v4l2_buffer vb_buf;
+ struct list_head queue;
+
+ /* RGA MMU mapping for this buffer */
+ struct rga_dma_desc *dma_desc;
+ dma_addr_t dma_desc_pa;
+ size_t n_desc;
+
+ /* Plane offsets of this buffer into the mapping */
+ struct rga_addr_offset offset;
+};
+
+static inline struct rga_vb_buffer *vb_to_rga(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct rga_vb_buffer, vb_buf);
+}
+
struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type);
/* RGA Buffers Manage */
extern const struct vb2_ops rga_qops;
-void rga_buf_map(struct vb2_buffer *vb);
/* RGA Hardware */
static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
@@ -110,6 +136,7 @@ static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
rga_write(rga, reg, temp);
};
-void rga_hw_start(struct rockchip_rga *rga);
+void rga_hw_start(struct rockchip_rga *rga,
+ struct rga_vb_buffer *src, struct rga_vb_buffer *dst);
#endif
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 3752b702e..c381c2213 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -1434,7 +1434,7 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap)
q->ops = &rkisp1_vb2_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct rkisp1_buffer);
- q->min_buffers_needed = RKISP1_MIN_BUFFERS_NEEDED;
+ q->min_queued_buffers = RKISP1_MIN_BUFFERS_NEEDED;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->vlock;
q->dev = cap->rkisp1->dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index a4e272adc..b757f75ed 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -425,6 +425,7 @@ struct rkisp1_debug {
unsigned long stats_error;
unsigned long stop_timeout[2];
unsigned long frame_drop[2];
+ unsigned long complete_frames;
};
/*
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index 7320c1c72..4202642e0 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -257,8 +257,8 @@ static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index)
return -EINVAL;
- sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_CSI_PAD_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_CSI_PAD_SINK);
code->code = sink_fmt->code;
return 0;
@@ -285,15 +285,13 @@ static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
return -EINVAL;
}
-static int rkisp1_csi_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int rkisp1_csi_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
- sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_CSI_PAD_SINK);
- src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_CSI_PAD_SRC);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_CSI_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_CSI_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
@@ -316,7 +314,7 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
if (fmt->pad == RKISP1_CSI_PAD_SRC)
return v4l2_subdev_get_fmt(sd, sd_state, fmt);
- sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_CSI_PAD_SINK);
sink_fmt->code = fmt->format.code;
@@ -336,7 +334,7 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
fmt->format = *sink_fmt;
/* Propagate the format to the source pad. */
- src_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SRC);
+ src_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_CSI_PAD_SRC);
*src_fmt = *sink_fmt;
return 0;
@@ -389,7 +387,7 @@ static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
return -EINVAL;
sd_state = v4l2_subdev_lock_and_get_active_state(sd);
- sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_CSI_PAD_SINK);
format = rkisp1_mbus_info_get_by_code(sink_fmt->code);
v4l2_subdev_unlock_state(sd_state);
@@ -422,7 +420,6 @@ static const struct v4l2_subdev_video_ops rkisp1_csi_video_ops = {
static const struct v4l2_subdev_pad_ops rkisp1_csi_pad_ops = {
.enum_mbus_code = rkisp1_csi_enum_mbus_code,
- .init_cfg = rkisp1_csi_init_config,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = rkisp1_csi_set_fmt,
};
@@ -432,6 +429,10 @@ static const struct v4l2_subdev_ops rkisp1_csi_ops = {
.pad = &rkisp1_csi_pad_ops,
};
+static const struct v4l2_subdev_internal_ops rkisp1_csi_internal_ops = {
+ .init_state = rkisp1_csi_init_state,
+};
+
int rkisp1_csi_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
@@ -443,6 +444,7 @@ int rkisp1_csi_register(struct rkisp1_device *rkisp1)
sd = &csi->sd;
v4l2_subdev_init(sd, &rkisp1_csi_ops);
+ sd->internal_ops = &rkisp1_csi_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sd->entity.ops = &rkisp1_csi_media_ops;
sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c
index 71df3dc95..79cda589d 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c
@@ -92,6 +92,10 @@ static int rkisp1_debug_dump_isp_regs_show(struct seq_file *m, void *p)
RKISP1_DEBUG_REG(ISP_FLAGS_SHD),
RKISP1_DEBUG_REG(ISP_RIS),
RKISP1_DEBUG_REG(ISP_ERR),
+ RKISP1_DEBUG_SHD_REG(ISP_IS_H_OFFS),
+ RKISP1_DEBUG_SHD_REG(ISP_IS_V_OFFS),
+ RKISP1_DEBUG_SHD_REG(ISP_IS_H_SIZE),
+ RKISP1_DEBUG_SHD_REG(ISP_IS_V_SIZE),
{ /* Sentinel */ },
};
struct rkisp1_device *rkisp1 = m->private;
@@ -217,6 +221,8 @@ void rkisp1_debug_init(struct rkisp1_device *rkisp1)
&debug->frame_drop[RKISP1_MAINPATH]);
debugfs_create_ulong("sp_frame_drop", 0444, debug->debugfs_dir,
&debug->frame_drop[RKISP1_SELFPATH]);
+ debugfs_create_ulong("complete_frames", 0444, debug->debugfs_dir,
+ &debug->complete_frames);
debugfs_create_file("input_status", 0444, debug->debugfs_dir, rkisp1,
&rkisp1_debug_input_status_fops);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index caffea6a4..78a1f7a14 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -66,8 +66,8 @@ static void rkisp1_config_ism(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state)
{
const struct v4l2_rect *src_crop =
- v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val;
@@ -102,12 +102,12 @@ static int rkisp1_config_isp(struct rkisp1_isp *isp,
const struct v4l2_mbus_framefmt *sink_frm;
const struct v4l2_rect *sink_crop;
- sink_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
- sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
- src_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ sink_frm = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ src_frm = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
sink_fmt = rkisp1_mbus_info_get_by_code(sink_frm->code);
src_fmt = rkisp1_mbus_info_get_by_code(src_frm->code);
@@ -201,8 +201,8 @@ static int rkisp1_config_isp(struct rkisp1_isp *isp,
} else {
struct v4l2_mbus_framefmt *src_frm;
- src_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_frm = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
rkisp1_params_pre_configure(&rkisp1->params, sink_fmt->bayer_pat,
src_frm->quantization,
src_frm->ycbcr_enc);
@@ -332,8 +332,8 @@ static void rkisp1_isp_start(struct rkisp1_isp *isp,
RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
- src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
src_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
if (src_info->pixel_enc != V4L2_PIXEL_ENC_BAYER)
@@ -423,15 +423,15 @@ static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
return 0;
}
-static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int rkisp1_isp_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop, *src_crop;
/* Video. */
- sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
@@ -441,15 +441,15 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
- sink_crop = v4l2_subdev_get_pad_crop(sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
- src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_fmt = *sink_fmt;
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
src_fmt->colorspace = V4L2_COLORSPACE_SRGB;
@@ -457,15 +457,15 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
- src_crop = v4l2_subdev_get_pad_crop(sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_crop = *sink_crop;
/* Parameters and statistics. */
- sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_ISP_PAD_SINK_PARAMS);
- src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_STATS);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SINK_PARAMS);
+ src_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SOURCE_STATS);
sink_fmt->width = 0;
sink_fmt->height = 0;
sink_fmt->field = V4L2_FIELD_NONE;
@@ -486,12 +486,12 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
const struct v4l2_rect *src_crop;
bool set_csc;
- sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
- src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
- src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ src_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
/*
* Media bus code. The ISP can operate in pass-through mode (Bayer in,
@@ -584,10 +584,10 @@ static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
const struct v4l2_rect *sink_crop;
struct v4l2_rect *src_crop;
- src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
- sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
+ src_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
src_crop->left = ALIGN(r->left, 2);
src_crop->width = ALIGN(r->width, 2);
@@ -598,8 +598,8 @@ static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
*r = *src_crop;
/* Propagate to out format */
- src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
rkisp1_isp_set_src_fmt(isp, sd_state, src_fmt);
}
@@ -610,10 +610,10 @@ static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
struct v4l2_rect *sink_crop, *src_crop;
const struct v4l2_mbus_framefmt *sink_fmt;
- sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
- sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
sink_crop->left = ALIGN(r->left, 2);
sink_crop->width = ALIGN(r->width, 2);
@@ -624,8 +624,8 @@ static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
*r = *sink_crop;
/* Propagate to out crop */
- src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SOURCE_VIDEO);
+ src_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SOURCE_VIDEO);
rkisp1_isp_set_src_crop(isp, sd_state, src_crop);
}
@@ -638,8 +638,8 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
struct v4l2_rect *sink_crop;
bool is_yuv;
- sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
sink_fmt->code = format->code;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SINK)) {
@@ -687,8 +687,8 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
*format = *sink_fmt;
/* Propagate to in crop */
- sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
rkisp1_isp_set_sink_crop(isp, sd_state, sink_crop);
}
@@ -703,7 +703,8 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
rkisp1_isp_set_src_fmt(isp, sd_state, &fmt->format);
else
- fmt->format = *v4l2_subdev_get_pad_format(sd, sd_state, fmt->pad);
+ fmt->format = *v4l2_subdev_state_get_format(sd_state,
+ fmt->pad);
return 0;
}
@@ -723,19 +724,19 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_pad_format(sd, sd_state, sel->pad);
+ fmt = v4l2_subdev_state_get_format(sd_state, sel->pad);
sel->r.height = fmt->height;
sel->r.width = fmt->width;
sel->r.left = 0;
sel->r.top = 0;
} else {
- sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state,
- RKISP1_ISP_PAD_SINK_VIDEO);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_ISP_PAD_SINK_VIDEO);
}
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state, sel->pad);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, sel->pad);
break;
default:
@@ -782,7 +783,6 @@ static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = {
.enum_frame_size = rkisp1_isp_enum_frame_size,
.get_selection = rkisp1_isp_get_selection,
.set_selection = rkisp1_isp_set_selection,
- .init_cfg = rkisp1_isp_init_config,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = rkisp1_isp_set_fmt,
.link_validate = v4l2_subdev_link_validate_default,
@@ -893,6 +893,10 @@ static const struct v4l2_subdev_ops rkisp1_isp_ops = {
.pad = &rkisp1_isp_pad_ops,
};
+static const struct v4l2_subdev_internal_ops rkisp1_isp_internal_ops = {
+ .init_state = rkisp1_isp_init_state,
+};
+
int rkisp1_isp_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_isp *isp = &rkisp1->isp;
@@ -903,6 +907,7 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1)
isp->rkisp1 = rkisp1;
v4l2_subdev_init(sd, &rkisp1_isp_ops);
+ sd->internal_ops = &rkisp1_isp_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
sd->entity.ops = &rkisp1_isp_media_ops;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
@@ -1007,6 +1012,8 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
if (status & RKISP1_CIF_ISP_FRAME) {
u32 isp_ris;
+ rkisp1->debug.complete_frames++;
+
/* New frame from the sensor received */
isp_ris = rkisp1_read(rkisp1, RKISP1_CIF_ISP_RIS);
if (isp_ris & RKISP1_STATS_MEAS_MASK)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index 350f452e6..bea69a0d7 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -172,12 +172,9 @@
#define RKISP1_CIF_MI_FRAME(stream) BIT((stream)->id)
#define RKISP1_CIF_MI_MBLK_LINE BIT(2)
#define RKISP1_CIF_MI_FILL_MP_Y BIT(3)
-#define RKISP1_CIF_MI_WRAP_MP_Y BIT(4)
-#define RKISP1_CIF_MI_WRAP_MP_CB BIT(5)
-#define RKISP1_CIF_MI_WRAP_MP_CR BIT(6)
-#define RKISP1_CIF_MI_WRAP_SP_Y BIT(7)
-#define RKISP1_CIF_MI_WRAP_SP_CB BIT(8)
-#define RKISP1_CIF_MI_WRAP_SP_CR BIT(9)
+#define RKISP1_CIF_MI_WRAP_Y(stream) BIT(4 + (stream)->id * 3)
+#define RKISP1_CIF_MI_WRAP_CB(stream) BIT(5 + (stream)->id * 3)
+#define RKISP1_CIF_MI_WRAP_CR(stream) BIT(6 + (stream)->id * 3)
#define RKISP1_CIF_MI_DMA_READY BIT(11)
/* MI_STATUS */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
index 6297870ee..a8e377701 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
@@ -142,10 +142,8 @@ static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz,
struct v4l2_rect *sink_crop;
u32 dc_ctrl;
- sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
- sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state, RKISP1_RSZ_PAD_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SINK);
if (sink_crop->width == sink_fmt->width &&
sink_crop->height == sink_fmt->height &&
@@ -275,10 +273,8 @@ static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
struct v4l2_area src_y, src_c;
struct v4l2_rect sink_c;
- sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
- src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SRC);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SRC);
sink_yuv_info = rkisp1_rsz_get_yuv_mbus_info(sink_fmt->code);
src_yuv_info = rkisp1_rsz_get_yuv_mbus_info(src_fmt->code);
@@ -292,8 +288,7 @@ static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
return;
}
- sink_y = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
+ sink_y = v4l2_subdev_state_get_crop(sd_state, RKISP1_RSZ_PAD_SINK);
sink_c.width = sink_y->width / sink_yuv_info->hdiv;
sink_c.height = sink_y->height / sink_yuv_info->vdiv;
@@ -381,14 +376,13 @@ static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
return -EINVAL;
}
-static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int rkisp1_rsz_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_RSZ_PAD_SRC);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
@@ -398,15 +392,13 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
- sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state, RKISP1_RSZ_PAD_SINK);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
- src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SINK);
*src_fmt = *sink_fmt;
/* NOTE: there is no crop in the source pad, only in the sink */
@@ -421,10 +413,8 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
const struct rkisp1_mbus_info *sink_mbus_info;
struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
- sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
- src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SRC);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SRC);
sink_mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
@@ -451,10 +441,8 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
- sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
- sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SINK);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state, RKISP1_RSZ_PAD_SINK);
/* Not crop for MP bayer raw data */
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
@@ -488,12 +476,9 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
struct v4l2_rect *sink_crop;
bool is_yuv;
- sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
- src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SRC);
- sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SINK);
+ src_fmt = v4l2_subdev_state_get_format(sd_state, RKISP1_RSZ_PAD_SRC);
+ sink_crop = v4l2_subdev_state_get_crop(sd_state, RKISP1_RSZ_PAD_SINK);
if (rsz->id == RKISP1_SELFPATH)
sink_fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
@@ -583,8 +568,8 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
- mf_sink = v4l2_subdev_get_pad_format(sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
+ mf_sink = v4l2_subdev_state_get_format(sd_state,
+ RKISP1_RSZ_PAD_SINK);
sel->r.height = mf_sink->height;
sel->r.width = mf_sink->width;
sel->r.left = 0;
@@ -592,8 +577,8 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
break;
case V4L2_SEL_TGT_CROP:
- sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state,
- RKISP1_RSZ_PAD_SINK);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state,
+ RKISP1_RSZ_PAD_SINK);
break;
default:
@@ -630,7 +615,6 @@ static const struct v4l2_subdev_pad_ops rkisp1_rsz_pad_ops = {
.enum_mbus_code = rkisp1_rsz_enum_mbus_code,
.get_selection = rkisp1_rsz_get_selection,
.set_selection = rkisp1_rsz_set_selection,
- .init_cfg = rkisp1_rsz_init_config,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = rkisp1_rsz_set_fmt,
.link_validate = v4l2_subdev_link_validate_default,
@@ -677,6 +661,10 @@ static const struct v4l2_subdev_ops rkisp1_rsz_ops = {
.pad = &rkisp1_rsz_pad_ops,
};
+static const struct v4l2_subdev_internal_ops rkisp1_rsz_internal_ops = {
+ .init_state = rkisp1_rsz_init_state,
+};
+
static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
{
if (!rsz->rkisp1)
@@ -706,6 +694,7 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
}
v4l2_subdev_init(sd, &rkisp1_rsz_ops);
+ sd->internal_ops = &rkisp1_rsz_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sd->entity.ops = &rkisp1_rsz_media_ops;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.h b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
index 1ea5fa1bf..b9777e07f 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
@@ -26,7 +26,6 @@
#include "gsc-regs.h"
-#define CONFIG_VB2_GSC_DMA_CONTIG 1
#define GSC_MODULE_NAME "exynos-gsc"
#define GSC_SHUTDOWN_TIMEOUT ((100*HZ)/1000)
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
index a0d43bf89..05cafba1c 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c
@@ -1479,7 +1479,7 @@ static int fimc_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1534,7 +1534,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mf->colorspace = V4L2_COLORSPACE_JPEG;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*mf = fmt->format;
return 0;
}
@@ -1604,10 +1604,10 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
return 0;
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+ try_sel = v4l2_subdev_state_get_crop(sd_state, sel->pad);
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
+ try_sel = v4l2_subdev_state_get_compose(sd_state, sel->pad);
f = &ctx->d_frame;
break;
default:
@@ -1651,10 +1651,10 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+ try_sel = v4l2_subdev_state_get_crop(sd_state, sel->pad);
break;
case V4L2_SEL_TGT_COMPOSE:
- try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad);
+ try_sel = v4l2_subdev_state_get_compose(sd_state, sel->pad);
f = &ctx->d_frame;
break;
default:
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
index 97908778e..0be687b01 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
@@ -814,7 +814,7 @@ err:
fimc_clk_put(fimc);
dev_err(&fimc->pdev->dev, "failed to get clock: %s\n",
fimc_clocks[i]);
- return -ENXIO;
+ return ret;
}
#ifdef CONFIG_PM
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
index bef6e9b4a..44363c424 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c
@@ -57,7 +57,6 @@ static int fimc_is_i2c_probe(struct platform_device *pdev)
strscpy(i2c_adap->name, "exynos4x12-isp-i2c", sizeof(i2c_adap->name));
i2c_adap->owner = THIS_MODULE;
i2c_adap->algo = &fimc_is_i2c_algorithm;
- i2c_adap->class = I2C_CLASS_SPD;
platform_set_drvdata(pdev, isp_i2c);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-isp.c b/drivers/media/platform/samsung/exynos4-is/fimc-isp.c
index b85986e50..3c5d7bee2 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp.c
@@ -126,7 +126,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *mf = *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ *mf = *v4l2_subdev_state_get_format(sd_state, fmt->pad);
return 0;
}
@@ -172,9 +172,8 @@ static void __isp_subdev_try_format(struct fimc_isp *isp,
mf->code = MEDIA_BUS_FMT_SGRBG10_1X10;
} else {
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- format = v4l2_subdev_get_try_format(&isp->subdev,
- sd_state,
- FIMC_ISP_SD_PAD_SINK);
+ format = v4l2_subdev_state_get_format(sd_state,
+ FIMC_ISP_SD_PAD_SINK);
else
format = &isp->sink_fmt;
@@ -207,7 +206,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
__isp_subdev_try_format(isp, sd_state, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*mf = fmt->format;
/* Propagate format to the source pads */
@@ -220,8 +219,8 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
format.pad = pad;
__isp_subdev_try_format(isp, sd_state,
&format);
- mf = v4l2_subdev_get_try_format(sd, sd_state,
- pad);
+ mf = v4l2_subdev_state_get_format(sd_state,
+ pad);
*mf = format.format;
}
}
@@ -374,18 +373,17 @@ static int fimc_isp_subdev_open(struct v4l2_subdev *sd,
.field = V4L2_FIELD_NONE,
};
- format = v4l2_subdev_get_try_format(sd, fh->state,
- FIMC_ISP_SD_PAD_SINK);
+ format = v4l2_subdev_state_get_format(fh->state, FIMC_ISP_SD_PAD_SINK);
*format = fmt;
- format = v4l2_subdev_get_try_format(sd, fh->state,
- FIMC_ISP_SD_PAD_SRC_FIFO);
+ format = v4l2_subdev_state_get_format(fh->state,
+ FIMC_ISP_SD_PAD_SRC_FIFO);
fmt.width = DEFAULT_PREVIEW_STILL_WIDTH;
fmt.height = DEFAULT_PREVIEW_STILL_HEIGHT;
*format = fmt;
- format = v4l2_subdev_get_try_format(sd, fh->state,
- FIMC_ISP_SD_PAD_SRC_DMA);
+ format = v4l2_subdev_state_get_format(fh->state,
+ FIMC_ISP_SD_PAD_SRC_DMA);
*format = fmt;
return 0;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
index 9396b10b5..7898c9beb 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
@@ -574,16 +574,14 @@ static const struct fimc_fmt *fimc_lite_subdev_try_fmt(struct fimc_lite *fimc,
struct v4l2_rect *rect;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sink_fmt = v4l2_subdev_get_try_format(&fimc->subdev,
- sd_state,
- FLITE_SD_PAD_SINK);
+ sink_fmt = v4l2_subdev_state_get_format(sd_state,
+ FLITE_SD_PAD_SINK);
mf->code = sink_fmt->code;
mf->colorspace = sink_fmt->colorspace;
- rect = v4l2_subdev_get_try_crop(&fimc->subdev,
- sd_state,
- FLITE_SD_PAD_SINK);
+ rect = v4l2_subdev_state_get_crop(sd_state,
+ FLITE_SD_PAD_SINK);
} else {
mf->code = sink->fmt->mbus_code;
mf->colorspace = sink->fmt->colorspace;
@@ -1021,7 +1019,7 @@ static struct v4l2_mbus_framefmt *__fimc_lite_subdev_get_try_fmt(
if (pad != FLITE_SD_PAD_SINK)
pad = FLITE_SD_PAD_SOURCE_DMA;
- return v4l2_subdev_get_try_format(sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
}
static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
@@ -1129,7 +1127,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, sel->pad);
return 0;
}
@@ -1166,7 +1164,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
fimc_lite_try_crop(fimc, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad) = sel->r;
+ *v4l2_subdev_state_get_crop(sd_state, sel->pad) = sel->r;
} else {
unsigned long flags;
spin_lock_irqsave(&fimc->slock, flags);
diff --git a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
index 686ca8753..aae8a8b2c 100644
--- a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c
@@ -569,8 +569,7 @@ static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return sd_state ? v4l2_subdev_get_try_format(&state->sd,
- sd_state, 0) : NULL;
+ return sd_state ? v4l2_subdev_state_get_format(sd_state, 0) : NULL;
return &state->format;
}
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
index 0f5b3845d..be58260ea 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
@@ -1216,7 +1216,7 @@ static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
@@ -1305,7 +1305,7 @@ static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
__camif_subdev_try_format(camif, mf, fmt->pad);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
*mf = fmt->format;
mutex_unlock(&camif->lock);
return 0;
@@ -1357,7 +1357,7 @@ static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, sel->pad);
return 0;
}
@@ -1445,7 +1445,7 @@ static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
__camif_try_crop(camif, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad) = sel->r;
+ *v4l2_subdev_state_get_crop(sd_state, sel->pad) = sel->r;
} else {
unsigned long flags;
unsigned int i;
diff --git a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v12.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v12.h
new file mode 100644
index 000000000..24e669d8e
--- /dev/null
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v12.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Register definition file for Samsung MFC V12.x Interface (FIMV) driver
+ *
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ */
+
+#ifndef _REGS_MFC_V12_H
+#define _REGS_MFC_V12_H
+
+#include <linux/sizes.h>
+#include "regs-mfc-v10.h"
+
+/* MFCv12 Context buffer sizes */
+#define MFC_CTX_BUF_SIZE_V12 (30 * SZ_1K)
+#define MFC_H264_DEC_CTX_BUF_SIZE_V12 (2 * SZ_1M)
+#define MFC_OTHER_DEC_CTX_BUF_SIZE_V12 (30 * SZ_1K)
+#define MFC_H264_ENC_CTX_BUF_SIZE_V12 (100 * SZ_1K)
+#define MFC_HEVC_ENC_CTX_BUF_SIZE_V12 (40 * SZ_1K)
+#define MFC_OTHER_ENC_CTX_BUF_SIZE_V12 (25 * SZ_1K)
+
+/* MFCv12 variant defines */
+#define MAX_FW_SIZE_V12 (SZ_1M)
+#define MAX_CPB_SIZE_V12 (7 * SZ_1M)
+#define MFC_VERSION_V12 0xC0
+#define MFC_NUM_PORTS_V12 1
+#define S5P_FIMV_CODEC_VP9_ENC 27
+#define MFC_CHROMA_PAD_BYTES_V12 256
+#define S5P_FIMV_D_ALIGN_PLANE_SIZE_V12 256
+
+/* Encoder buffer size for MFCv12 */
+#define ENC_V120_BASE_SIZE(x, y) \
+ ((((x) + 3) * ((y) + 3) * 8) \
+ + ((((y) * 64) + 2304) * ((x) + 7) / 8))
+
+#define ENC_V120_H264_ME_SIZE(x, y) \
+ ALIGN((ENC_V120_BASE_SIZE(x, y) \
+ + (DIV_ROUND_UP((x) * (y), 64) * 32)), 256)
+
+#define ENC_V120_MPEG4_ME_SIZE(x, y) \
+ ALIGN((ENC_V120_BASE_SIZE(x, y) \
+ + (DIV_ROUND_UP((x) * (y), 128) * 16)), 256)
+
+#define ENC_V120_VP8_ME_SIZE(x, y) \
+ ALIGN(ENC_V120_BASE_SIZE((x), (y)), 256)
+
+#define ENC_V120_HEVC_ME_SIZE(x, y) \
+ ALIGN(((((x) + 3) * ((y) + 3) * 32) \
+ + ((((y) * 128) + 2304) * ((x) + 3) / 4)), 256)
+
+#endif /*_REGS_MFC_V12_H*/
diff --git a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v7.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v7.h
index 4a7adfdaa..50f9bf060 100644
--- a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v7.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v7.h
@@ -24,6 +24,7 @@
#define S5P_FIMV_E_ENCODED_SOURCE_FIRST_ADDR_V7 0xfa70
#define S5P_FIMV_E_ENCODED_SOURCE_SECOND_ADDR_V7 0xfa74
+#define S5P_FIMV_E_ENCODED_SOURCE_THIRD_ADDR_V7 0xfa78
#define S5P_FIMV_E_VP8_OPTIONS_V7 0xfdb0
#define S5P_FIMV_E_VP8_FILTER_OPTIONS_V7 0xfdb4
diff --git a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v8.h b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v8.h
index 162e3c7e9..0ef9eb2df 100644
--- a/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v8.h
+++ b/drivers/media/platform/samsung/s5p-mfc/regs-mfc-v8.h
@@ -17,13 +17,16 @@
#define S5P_FIMV_D_MIN_SCRATCH_BUFFER_SIZE_V8 0xf108
#define S5P_FIMV_D_FIRST_PLANE_DPB_SIZE_V8 0xf144
#define S5P_FIMV_D_SECOND_PLANE_DPB_SIZE_V8 0xf148
+#define S5P_FIMV_D_THIRD_PLANE_DPB_SIZE_V8 0xf14C
#define S5P_FIMV_D_MV_BUFFER_SIZE_V8 0xf150
#define S5P_FIMV_D_FIRST_PLANE_DPB_STRIDE_SIZE_V8 0xf138
#define S5P_FIMV_D_SECOND_PLANE_DPB_STRIDE_SIZE_V8 0xf13c
+#define S5P_FIMV_D_THIRD_PLANE_DPB_STRIDE_SIZE_V8 0xf140
#define S5P_FIMV_D_FIRST_PLANE_DPB_V8 0xf160
#define S5P_FIMV_D_SECOND_PLANE_DPB_V8 0xf260
+#define S5P_FIMV_D_THIRD_PLANE_DPB_V8 0xf360
#define S5P_FIMV_D_MV_BUFFER_V8 0xf460
#define S5P_FIMV_D_NUM_MV_V8 0xf134
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index e30e54935..fbb047ead 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -604,6 +604,8 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
s5p_mfc_clock_off();
wake_up(&ctx->queue);
+ if (ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
+ set_work_bit_irqsave(ctx);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
} else {
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
@@ -790,6 +792,8 @@ static int s5p_mfc_open(struct file *file)
INIT_LIST_HEAD(&ctx->dst_queue);
ctx->src_queue_cnt = 0;
ctx->dst_queue_cnt = 0;
+ ctx->is_422 = 0;
+ ctx->is_10bit = 0;
/* Get context number */
ctx->num = 0;
while (dev->ctx[ctx->num]) {
@@ -863,7 +867,7 @@ static int s5p_mfc_open(struct file *file)
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
} else if (vdev == dev->vfd_enc) {
- q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->ops = get_enc_queue_ops();
} else {
ret = -ENOENT;
@@ -890,7 +894,7 @@ static int s5p_mfc_open(struct file *file)
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
} else if (vdev == dev->vfd_enc) {
- q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->ops = get_enc_queue_ops();
} else {
ret = -ENOENT;
@@ -1660,6 +1664,31 @@ static struct s5p_mfc_variant mfc_drvdata_v10 = {
.fw_name[0] = "s5p-mfc-v10.fw",
};
+static struct s5p_mfc_buf_size_v6 mfc_buf_size_v12 = {
+ .dev_ctx = MFC_CTX_BUF_SIZE_V12,
+ .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V12,
+ .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V12,
+ .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V12,
+ .hevc_enc_ctx = MFC_HEVC_ENC_CTX_BUF_SIZE_V12,
+ .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V12,
+};
+
+static struct s5p_mfc_buf_size buf_size_v12 = {
+ .fw = MAX_FW_SIZE_V12,
+ .cpb = MAX_CPB_SIZE_V12,
+ .priv = &mfc_buf_size_v12,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v12 = {
+ .version = MFC_VERSION_V12,
+ .version_bit = MFC_V12_BIT,
+ .port_num = MFC_NUM_PORTS_V12,
+ .buf_size = &buf_size_v12,
+ .fw_name[0] = "s5p-mfc-v12.fw",
+ .clk_names = {"mfc"},
+ .num_clocks = 1,
+};
+
static const struct of_device_id exynos_mfc_match[] = {
{
.compatible = "samsung,mfc-v5",
@@ -1682,6 +1711,9 @@ static const struct of_device_id exynos_mfc_match[] = {
}, {
.compatible = "samsung,mfc-v10",
.data = &mfc_drvdata_v10,
+ }, {
+ .compatible = "tesla,fsd-mfc",
+ .data = &mfc_drvdata_v12,
},
{},
};
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
index 5304f42c8..59450b324 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
@@ -19,7 +19,7 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include "regs-mfc.h"
-#include "regs-mfc-v10.h"
+#include "regs-mfc-v12.h"
#define S5P_MFC_NAME "s5p-mfc"
@@ -56,6 +56,7 @@
#define MFC_NO_INSTANCE_SET -1
#define MFC_ENC_CAP_PLANE_COUNT 1
#define MFC_ENC_OUT_PLANE_COUNT 2
+#define VB2_MAX_PLANE_COUNT 3
#define STUFF_BYTE 4
#define MFC_MAX_CTRLS 128
@@ -181,6 +182,7 @@ struct s5p_mfc_buf {
struct {
size_t luma;
size_t chroma;
+ size_t chroma_1;
} raw;
size_t stream;
} cookie;
@@ -621,6 +623,10 @@ struct s5p_mfc_codec_ops {
* v4l2 control framework
* @ctrl_handler: handler for v4l2 framework
* @scratch_buf_size: scratch buffer size
+ * @is_10bit: state to check 10bit support
+ * @is_422: state to check YUV422 10bit format
+ * @chroma_size_1: size of a chroma third plane
+ * @stride: size of stride for all planes
*/
struct s5p_mfc_ctx {
struct s5p_mfc_dev *dev;
@@ -657,6 +663,7 @@ struct s5p_mfc_ctx {
int luma_size;
int chroma_size;
+ int chroma_size_1;
int mv_size;
unsigned long consumed_stream;
@@ -720,6 +727,9 @@ struct s5p_mfc_ctx {
struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS];
struct v4l2_ctrl_handler ctrl_handler;
size_t scratch_buf_size;
+ int is_10bit;
+ int is_422;
+ int stride[VB2_MAX_PLANE_COUNT];
};
/*
@@ -771,22 +781,27 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
#define HAS_PORTNUM(dev) (dev ? (dev->variant ? \
(dev->variant->port_num ? 1 : 0) : 0) : 0)
#define IS_TWOPORT(dev) (dev->variant->port_num == 2 ? 1 : 0)
-#define IS_MFCV6_PLUS(dev) (dev->variant->version >= 0x60 ? 1 : 0)
-#define IS_MFCV7_PLUS(dev) (dev->variant->version >= 0x70 ? 1 : 0)
-#define IS_MFCV8_PLUS(dev) (dev->variant->version >= 0x80 ? 1 : 0)
-#define IS_MFCV10(dev) (dev->variant->version >= 0xA0 ? 1 : 0)
-#define FW_HAS_E_MIN_SCRATCH_BUF(dev) (IS_MFCV10(dev))
+#define IS_MFCV6_PLUS(dev) ((dev)->variant->version >= 0x60)
+#define IS_MFCV7_PLUS(dev) ((dev)->variant->version >= 0x70)
+#define IS_MFCV8_PLUS(dev) ((dev)->variant->version >= 0x80)
+#define IS_MFCV10_PLUS(dev) ((dev)->variant->version >= 0xA0)
+#define IS_MFCV12(dev) ((dev)->variant->version >= 0xC0)
+#define FW_HAS_E_MIN_SCRATCH_BUF(dev) (IS_MFCV10_PLUS(dev))
#define MFC_V5_BIT BIT(0)
#define MFC_V6_BIT BIT(1)
#define MFC_V7_BIT BIT(2)
#define MFC_V8_BIT BIT(3)
#define MFC_V10_BIT BIT(5)
+#define MFC_V12_BIT BIT(7)
#define MFC_V5PLUS_BITS (MFC_V5_BIT | MFC_V6_BIT | MFC_V7_BIT | \
- MFC_V8_BIT | MFC_V10_BIT)
+ MFC_V8_BIT | MFC_V10_BIT | MFC_V12_BIT)
#define MFC_V6PLUS_BITS (MFC_V6_BIT | MFC_V7_BIT | MFC_V8_BIT | \
- MFC_V10_BIT)
-#define MFC_V7PLUS_BITS (MFC_V7_BIT | MFC_V8_BIT | MFC_V10_BIT)
+ MFC_V10_BIT | MFC_V12_BIT)
+#define MFC_V7PLUS_BITS (MFC_V7_BIT | MFC_V8_BIT | MFC_V10_BIT | \
+ MFC_V12_BIT)
+
+#define MFC_V10PLUS_BITS (MFC_V10_BIT | MFC_V12_BIT)
#endif /* S5P_MFC_COMMON_H_ */
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
index 6d3c92045..503487f34 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c
@@ -51,8 +51,14 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev)
* into kernel. */
mfc_debug_enter();
- if (dev->fw_get_done)
- return 0;
+ /* In case of MFC v12, RET_SYS_INIT response from hardware fails due to
+ * incorrect firmware transfer and therefore it is not able to initialize
+ * the hardware. This causes failed response for SYS_INIT command when
+ * MFC runs for second time. So, load the MFC v12 firmware for each run.
+ */
+ if (!IS_MFCV12(dev))
+ if (dev->fw_get_done)
+ return 0;
for (i = MFC_FW_MAX_VERSIONS - 1; i >= 0; i--) {
if (!dev->variant->fw_name[i])
@@ -130,7 +136,7 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
mfc_write(dev, 0, S5P_FIMV_REG_CLEAR_BEGIN_V6 + (i*4));
/* check bus reset control before reset */
- if (dev->risc_on)
+ if (dev->risc_on && !IS_MFCV12(dev))
if (s5p_mfc_bus_reset(dev))
return -EIO;
/* Reset
@@ -236,7 +242,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
else
mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
- if (IS_MFCV10(dev))
+ if (IS_MFCV10_PLUS(dev))
mfc_write(dev, 0x0, S5P_FIMV_MFC_CLOCK_OFF_V10);
mfc_debug(2, "Will now wait for completion of firmware transfer\n");
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
index 268ffe4da..3957f28d4 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
@@ -57,6 +57,20 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V6PLUS_BITS,
},
{
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 3,
+ .versions = MFC_V12_BIT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420M,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 3,
+ .versions = MFC_V12_BIT
+ },
+ {
.fourcc = V4L2_PIX_FMT_H264,
.codec_mode = S5P_MFC_CODEC_H264_DEC,
.type = MFC_FMT_DEC,
@@ -146,7 +160,7 @@ static struct s5p_mfc_fmt formats[] = {
.codec_mode = S5P_FIMV_CODEC_HEVC_DEC,
.type = MFC_FMT_DEC,
.num_planes = 1,
- .versions = MFC_V10_BIT,
+ .versions = MFC_V10PLUS_BITS,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION |
V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM,
},
@@ -155,7 +169,7 @@ static struct s5p_mfc_fmt formats[] = {
.codec_mode = S5P_FIMV_CODEC_VP9_DEC,
.type = MFC_FMT_DEC,
.num_planes = 1,
- .versions = MFC_V10_BIT,
+ .versions = MFC_V10PLUS_BITS,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
};
@@ -355,14 +369,19 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix_mp->width = ctx->buf_width;
pix_mp->height = ctx->buf_height;
pix_mp->field = V4L2_FIELD_NONE;
- pix_mp->num_planes = 2;
+ pix_mp->num_planes = ctx->dst_fmt->num_planes;
/* Set pixelformat to the format in which MFC
outputs the decoded frame */
pix_mp->pixelformat = ctx->dst_fmt->fourcc;
- pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_mp->plane_fmt[0].bytesperline = ctx->stride[0];
pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
- pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_mp->plane_fmt[1].bytesperline = ctx->stride[1];
pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->dst_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M) {
+ pix_mp->plane_fmt[2].bytesperline = ctx->stride[2];
+ pix_mp->plane_fmt[2].sizeimage = ctx->chroma_size_1;
+ }
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
/* This is run on OUTPUT
The buffer contains compressed image
@@ -920,6 +939,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
struct s5p_mfc_dev *dev = ctx->dev;
+ const struct v4l2_format_info *format;
/* Video output for decoding (source)
* this can be set after getting an instance */
@@ -936,7 +956,13 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
} else if (ctx->state == MFCINST_HEAD_PARSED &&
vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
/* Output plane count is 2 - one for Y and one for CbCr */
- *plane_count = 2;
+ format = v4l2_format_info(ctx->dst_fmt->fourcc);
+ if (!format) {
+ mfc_err("invalid format\n");
+ return -EINVAL;
+ }
+ *plane_count = format->comp_planes;
+
/* Setup buffer count */
if (*buf_count < ctx->pb_count)
*buf_count = ctx->pb_count;
@@ -955,14 +981,18 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
psize[0] = ctx->luma_size;
psize[1] = ctx->chroma_size;
-
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->dst_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ psize[2] = ctx->chroma_size_1;
if (IS_MFCV6_PLUS(dev))
alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
else
alloc_devs[0] = ctx->dev->mem_dev[BANK_R_CTX];
alloc_devs[1] = ctx->dev->mem_dev[BANK_L_CTX];
- } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
- ctx->state == MFCINST_INIT) {
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->dst_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ alloc_devs[2] = ctx->dev->mem_dev[BANK_L_CTX];
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && ctx->state == MFCINST_INIT) {
psize[0] = ctx->dec_src_buf_size;
alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
} else {
@@ -994,12 +1024,24 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
mfc_err("Plane buffer (CAPTURE) is too small\n");
return -EINVAL;
}
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->dst_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M) {
+ if (vb2_plane_size(vb, 2) < ctx->chroma_size_1) {
+ mfc_err("Plane buffer (CAPTURE) is too small\n");
+ return -EINVAL;
+ }
+ }
i = vb->index;
ctx->dst_bufs[i].b = vbuf;
ctx->dst_bufs[i].cookie.raw.luma =
vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->dst_bufs[i].cookie.raw.chroma =
vb2_dma_contig_plane_dma_addr(vb, 1);
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->dst_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M) {
+ ctx->dst_bufs[i].cookie.raw.chroma_1 =
+ vb2_dma_contig_plane_dma_addr(vb, 2);
+ }
ctx->dst_bufs_cnt++;
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (IS_ERR_OR_NULL(ERR_PTR(
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
index 4b4c129c0..ef8bb40b9 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
@@ -60,6 +60,20 @@ static struct s5p_mfc_fmt formats[] = {
.versions = MFC_V6PLUS_BITS,
},
{
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 3,
+ .versions = MFC_V12_BIT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420M,
+ .codec_mode = S5P_MFC_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 3,
+ .versions = MFC_V12_BIT,
+ },
+ {
.fourcc = V4L2_PIX_FMT_H264,
.codec_mode = S5P_MFC_CODEC_H264_ENC,
.type = MFC_FMT_ENC,
@@ -92,7 +106,7 @@ static struct s5p_mfc_fmt formats[] = {
.codec_mode = S5P_FIMV_CODEC_HEVC_ENC,
.type = MFC_FMT_ENC,
.num_planes = 1,
- .versions = MFC_V10_BIT,
+ .versions = MFC_V10PLUS_BITS,
},
};
@@ -1150,7 +1164,6 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_enc_params *p = &ctx->enc_params;
struct s5p_mfc_buf *dst_mb;
- unsigned int enc_pb_count;
if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
if (!list_empty(&ctx->dst_queue)) {
@@ -1172,14 +1185,12 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
set_work_bit_irqsave(ctx);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
} else {
- enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops,
- get_enc_dpb_count, dev);
- if (ctx->pb_count < enc_pb_count)
- ctx->pb_count = enc_pb_count;
+ ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_enc_dpb_count, dev);
if (FW_HAS_E_MIN_SCRATCH_BUF(dev)) {
ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
get_e_min_scratch_buf_size, dev);
- ctx->bank1.size += ctx->scratch_buf_size;
+ if (!IS_MFCV12(dev))
+ ctx->bank1.size += ctx->scratch_buf_size;
}
ctx->state = MFCINST_HEAD_PRODUCED;
}
@@ -1192,14 +1203,20 @@ static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *dst_mb;
struct s5p_mfc_buf *src_mb;
- unsigned long src_y_addr, src_c_addr, dst_addr;
+ unsigned long src_y_addr, src_c_addr, src_c_1_addr, dst_addr;
unsigned int dst_size;
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ src_c_1_addr =
+ vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 2);
+ else
+ src_c_1_addr = 0;
s5p_mfc_hw_call(dev->mfc_ops, set_enc_frame_buffer, ctx,
- src_y_addr, src_c_addr);
+ src_y_addr, src_c_addr, src_c_1_addr);
dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0);
@@ -1214,8 +1231,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *mb_entry;
- unsigned long enc_y_addr = 0, enc_c_addr = 0;
- unsigned long mb_y_addr, mb_c_addr;
+ unsigned long enc_y_addr = 0, enc_c_addr = 0, enc_c_1_addr = 0;
+ unsigned long mb_y_addr, mb_c_addr, mb_c_1_addr;
int slice_type;
unsigned int strm_size;
bool src_ready;
@@ -1228,18 +1245,26 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
if (slice_type >= 0) {
s5p_mfc_hw_call(dev->mfc_ops, get_enc_frame_buffer, ctx,
- &enc_y_addr, &enc_c_addr);
+ &enc_y_addr, &enc_c_addr, &enc_c_1_addr);
list_for_each_entry(mb_entry, &ctx->src_queue, list) {
mb_y_addr = vb2_dma_contig_plane_dma_addr(
&mb_entry->b->vb2_buf, 0);
mb_c_addr = vb2_dma_contig_plane_dma_addr(
&mb_entry->b->vb2_buf, 1);
- if ((enc_y_addr == mb_y_addr) &&
- (enc_c_addr == mb_c_addr)) {
+ if (ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YUV420M ||
+ ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ mb_c_1_addr = vb2_dma_contig_plane_dma_addr
+ (&mb_entry->b->vb2_buf, 2);
+ else
+ mb_c_1_addr = 0;
+ if (enc_y_addr == mb_y_addr && enc_c_addr == mb_c_addr && enc_c_1_addr
+ == mb_c_1_addr) {
list_del(&mb_entry->list);
ctx->src_queue_cnt--;
vb2_buffer_done(&mb_entry->b->vb2_buf,
- VB2_BUF_STATE_DONE);
+ VB2_BUF_STATE_DONE);
break;
}
}
@@ -1248,20 +1273,27 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
&mb_entry->b->vb2_buf, 0);
mb_c_addr = vb2_dma_contig_plane_dma_addr(
&mb_entry->b->vb2_buf, 1);
- if ((enc_y_addr == mb_y_addr) &&
- (enc_c_addr == mb_c_addr)) {
+ if (ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YUV420M ||
+ ctx->src_fmt->fourcc == V4L2_PIX_FMT_YVU420M)
+ mb_c_1_addr = vb2_dma_contig_plane_dma_addr(&
+ mb_entry->b->vb2_buf, 2);
+ else
+ mb_c_1_addr = 0;
+ if (enc_y_addr == mb_y_addr && enc_c_addr == mb_c_addr && enc_c_1_addr
+ == mb_c_1_addr) {
list_del(&mb_entry->list);
ctx->ref_queue_cnt--;
vb2_buffer_done(&mb_entry->b->vb2_buf,
- VB2_BUF_STATE_DONE);
+ VB2_BUF_STATE_DONE);
break;
}
}
}
if (ctx->src_queue_cnt > 0 && (ctx->state == MFCINST_RUNNING ||
- ctx->state == MFCINST_FINISHING)) {
+ ctx->state == MFCINST_FINISHING)) {
mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
- list);
+ list);
if (mb_entry->flags & MFC_BUF_FLAG_USED) {
list_del(&mb_entry->list);
ctx->src_queue_cnt--;
@@ -1380,10 +1412,15 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix_fmt_mp->pixelformat = ctx->src_fmt->fourcc;
pix_fmt_mp->num_planes = ctx->src_fmt->num_planes;
- pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->stride[0];
pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
- pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->stride[1];
pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M) {
+ pix_fmt_mp->plane_fmt[2].bytesperline = ctx->stride[2];
+ pix_fmt_mp->plane_fmt[2].sizeimage = ctx->chroma_size_1;
+ }
} else {
mfc_err("invalid buf type\n");
return -EINVAL;
@@ -1420,9 +1457,12 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mfc_err("Unsupported format by this MFC version.\n");
return -EINVAL;
}
-
- v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
- &pix_fmt_mp->height, 4, 1080, 1, 0);
+ if (IS_MFCV12(dev))
+ v4l_bound_align_image(&pix_fmt_mp->width, 8, 3840, 1, &pix_fmt_mp
+ ->height, 4, 2160, 1, 0);
+ else
+ v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1, &pix_fmt_mp
+ ->height, 4, 1080, 1, 0);
} else {
mfc_err("invalid buf type\n");
return -EINVAL;
@@ -1467,9 +1507,14 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
s5p_mfc_hw_call(dev->mfc_ops, enc_calc_src_size, ctx);
pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
- pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->stride[0];
pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
- pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->stride[1];
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M) {
+ pix_fmt_mp->plane_fmt[2].bytesperline = ctx->stride[2];
+ pix_fmt_mp->plane_fmt[2].sizeimage = ctx->chroma_size_1;
+ }
ctx->src_bufs_cnt = 0;
ctx->output_state = QUEUE_FREE;
@@ -1489,9 +1534,10 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
int ret = 0;
- /* if memory is not mmp or userptr return error */
+ /* if memory is not mmp or userptr or dmabuf return error */
if ((reqbufs->memory != V4L2_MEMORY_MMAP) &&
- (reqbufs->memory != V4L2_MEMORY_USERPTR))
+ (reqbufs->memory != V4L2_MEMORY_USERPTR) &&
+ (reqbufs->memory != V4L2_MEMORY_DMABUF))
return -EINVAL;
if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (reqbufs->count == 0) {
@@ -1514,14 +1560,6 @@ static int vidioc_reqbufs(struct file *file, void *priv,
}
ctx->capture_state = QUEUE_BUFS_REQUESTED;
- ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
- alloc_codec_buffers, ctx);
- if (ret) {
- mfc_err("Failed to allocate encoding buffers\n");
- reqbufs->count = 0;
- ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
- return -ENOMEM;
- }
} else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (reqbufs->count == 0) {
mfc_debug(2, "Freeing buffers\n");
@@ -1537,15 +1575,13 @@ static int vidioc_reqbufs(struct file *file, void *priv,
return -EINVAL;
}
- if (IS_MFCV6_PLUS(dev)) {
+ if (IS_MFCV6_PLUS(dev) && (!IS_MFCV12(dev))) {
/* Check for min encoder buffers */
if (ctx->pb_count &&
(reqbufs->count < ctx->pb_count)) {
reqbufs->count = ctx->pb_count;
mfc_debug(2, "Minimum %d output buffers needed\n",
ctx->pb_count);
- } else {
- ctx->pb_count = reqbufs->count;
}
}
@@ -1568,9 +1604,10 @@ static int vidioc_querybuf(struct file *file, void *priv,
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
int ret = 0;
- /* if memory is not mmp or userptr return error */
+ /* if memory is not mmp or userptr or dmabuf return error */
if ((buf->memory != V4L2_MEMORY_MMAP) &&
- (buf->memory != V4L2_MEMORY_USERPTR))
+ (buf->memory != V4L2_MEMORY_USERPTR) &&
+ (buf->memory != V4L2_MEMORY_DMABUF))
return -EINVAL;
if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (ctx->state != MFCINST_GOT_INST) {
@@ -2413,10 +2450,18 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
psize[0] = ctx->luma_size;
psize[1] = ctx->chroma_size;
+ if (ctx->src_fmt && (ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M))
+ psize[2] = ctx->chroma_size_1;
if (IS_MFCV6_PLUS(dev)) {
alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
alloc_devs[1] = ctx->dev->mem_dev[BANK_L_CTX];
+ if (ctx->src_fmt && (ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M))
+ alloc_devs[2] = ctx->dev->mem_dev[BANK_L_CTX];
} else {
alloc_devs[0] = ctx->dev->mem_dev[BANK_R_CTX];
alloc_devs[1] = ctx->dev->mem_dev[BANK_R_CTX];
@@ -2455,6 +2500,11 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
vb2_dma_contig_plane_dma_addr(vb, 0);
ctx->src_bufs[i].cookie.raw.chroma =
vb2_dma_contig_plane_dma_addr(vb, 1);
+ if (ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ ctx->src_bufs[i].cookie.raw.chroma_1 =
+ vb2_dma_contig_plane_dma_addr(vb, 2);
ctx->src_bufs_cnt++;
} else {
mfc_err("invalid queue type: %d\n", vq->type);
@@ -2492,6 +2542,12 @@ static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
mfc_err("plane size is too small for output\n");
return -EINVAL;
}
+ if ((ctx->src_fmt->fourcc == V4L2_PIX_FMT_YUV420M ||
+ ctx->src_fmt->fourcc == V4L2_PIX_FMT_YVU420M) &&
+ (vb2_plane_size(vb, 2) < ctx->chroma_size_1)) {
+ mfc_err("plane size is too small for output\n");
+ return -EINVAL;
+ }
} else {
mfc_err("invalid queue type: %d\n", vq->type);
return -EINVAL;
@@ -2513,11 +2569,11 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
S5P_MFC_R2H_CMD_SEQ_DONE_RET,
0);
}
-
- if (ctx->src_bufs_cnt < ctx->pb_count) {
- mfc_err("Need minimum %d OUTPUT buffers\n",
- ctx->pb_count);
- return -ENOBUFS;
+ if (q->memory != V4L2_MEMORY_DMABUF) {
+ if (ctx->src_bufs_cnt < ctx->pb_count) {
+ mfc_err("Need minimum %d OUTPUT buffers\n", ctx->pb_count);
+ return -ENOBUFS;
+ }
}
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h
index b9831275f..7c5e851c8 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr.h
@@ -166,9 +166,9 @@ struct s5p_mfc_regs {
void __iomem *d_decoded_third_addr;/* only v7 */
void __iomem *d_used_dpb_flag_upper;/* v7 and v8 */
void __iomem *d_used_dpb_flag_lower;/* v7 and v8 */
- void __iomem *d_min_scratch_buffer_size; /* v10 */
- void __iomem *d_static_buffer_addr; /* v10 */
- void __iomem *d_static_buffer_size; /* v10 */
+ void __iomem *d_min_scratch_buffer_size; /* v10 and v12 */
+ void __iomem *d_static_buffer_addr; /* v10 and v12 */
+ void __iomem *d_static_buffer_size; /* v10 and v12 */
/* encoder registers */
void __iomem *e_frame_width;
@@ -268,7 +268,7 @@ struct s5p_mfc_regs {
void __iomem *e_vp8_hierarchical_qp_layer0;/* v7 and v8 */
void __iomem *e_vp8_hierarchical_qp_layer1;/* v7 and v8 */
void __iomem *e_vp8_hierarchical_qp_layer2;/* v7 and v8 */
- void __iomem *e_min_scratch_buffer_size; /* v10 */
+ void __iomem *e_min_scratch_buffer_size; /* v10 and v12 */
void __iomem *e_num_t_layer; /* v10 */
void __iomem *e_hier_qp_layer0; /* v10 */
void __iomem *e_hier_bit_rate_layer0; /* v10 */
@@ -293,9 +293,11 @@ struct s5p_mfc_hw_ops {
int (*set_enc_stream_buffer)(struct s5p_mfc_ctx *ctx,
unsigned long addr, unsigned int size);
void (*set_enc_frame_buffer)(struct s5p_mfc_ctx *ctx,
- unsigned long y_addr, unsigned long c_addr);
+ unsigned long y_addr, unsigned long c_addr,
+ unsigned long c_1_addr);
void (*get_enc_frame_buffer)(struct s5p_mfc_ctx *ctx,
- unsigned long *y_addr, unsigned long *c_addr);
+ unsigned long *y_addr, unsigned long *c_addr,
+ unsigned long *c_1_addr);
void (*try_run)(struct s5p_mfc_dev *dev);
void (*clear_int_flags)(struct s5p_mfc_dev *dev);
int (*get_dspl_y_adr)(struct s5p_mfc_dev *dev);
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c
index 28a06dc34..fcfaf125a 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v5.c
@@ -516,7 +516,8 @@ static int s5p_mfc_set_enc_stream_buffer_v5(struct s5p_mfc_ctx *ctx,
}
static void s5p_mfc_set_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx,
- unsigned long y_addr, unsigned long c_addr)
+ unsigned long y_addr, unsigned long c_addr,
+ unsigned long c_1_addr)
{
struct s5p_mfc_dev *dev = ctx->dev;
@@ -525,7 +526,8 @@ static void s5p_mfc_set_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx,
}
static void s5p_mfc_get_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx,
- unsigned long *y_addr, unsigned long *c_addr)
+ unsigned long *y_addr, unsigned long *c_addr,
+ unsigned long *c_1_addr)
{
struct s5p_mfc_dev *dev = ctx->dev;
@@ -1210,7 +1212,7 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
if (list_empty(&ctx->src_queue)) {
/* send null frame */
s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->dma_base[BANK_R_CTX],
- dev->dma_base[BANK_R_CTX]);
+ dev->dma_base[BANK_R_CTX], 0);
src_mb = NULL;
} else {
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
@@ -1220,7 +1222,7 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
/* send null frame */
s5p_mfc_set_enc_frame_buffer_v5(ctx,
dev->dma_base[BANK_R_CTX],
- dev->dma_base[BANK_R_CTX]);
+ dev->dma_base[BANK_R_CTX], 0);
ctx->state = MFCINST_FINISHING;
} else {
src_y_addr = vb2_dma_contig_plane_dma_addr(
@@ -1228,7 +1230,7 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
src_c_addr = vb2_dma_contig_plane_dma_addr(
&src_mb->b->vb2_buf, 1);
s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr,
- src_c_addr);
+ src_c_addr, 0);
if (src_mb->flags & MFC_BUF_FLAG_EOS)
ctx->state = MFCINST_FINISHING;
}
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
index c0df5ac9f..fd945211d 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
@@ -60,21 +60,23 @@ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx)
static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- unsigned int mb_width, mb_height;
+ unsigned int mb_width, mb_height, width64, height32;
unsigned int lcu_width = 0, lcu_height = 0;
int ret;
mb_width = MB_WIDTH(ctx->img_width);
mb_height = MB_HEIGHT(ctx->img_height);
+ width64 = ALIGN(ctx->img_width, 64);
+ height32 = ALIGN(ctx->img_height, 32);
if (ctx->type == MFCINST_DECODER) {
mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
ctx->luma_size, ctx->chroma_size, ctx->mv_size);
mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
} else if (ctx->type == MFCINST_ENCODER) {
- if (IS_MFCV10(dev)) {
+ if (IS_MFCV10_PLUS(dev))
ctx->tmv_buffer_size = 0;
- } else if (IS_MFCV8_PLUS(dev))
+ else if (IS_MFCV8_PLUS(dev))
ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 *
ALIGN(S5P_FIMV_TMV_BUFFER_SIZE_V8(mb_width, mb_height),
S5P_FIMV_TMV_BUFFER_ALIGN_V6);
@@ -82,7 +84,39 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 *
ALIGN(S5P_FIMV_TMV_BUFFER_SIZE_V6(mb_width, mb_height),
S5P_FIMV_TMV_BUFFER_ALIGN_V6);
- if (IS_MFCV10(dev)) {
+ if (IS_MFCV12(dev)) {
+ lcu_width = S5P_MFC_LCU_WIDTH(ctx->img_width);
+ lcu_height = S5P_MFC_LCU_HEIGHT(ctx->img_height);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_HEVC_ENC && ctx->is_10bit) {
+ ctx->luma_dpb_size =
+ width64 * height32 +
+ ALIGN(DIV_ROUND_UP(lcu_width * 32, 4), 16) * height32 + 128;
+ if (ctx->is_422)
+ ctx->chroma_dpb_size =
+ ctx->luma_dpb_size;
+ else
+ ctx->chroma_dpb_size =
+ width64 * height32 / 2 +
+ ALIGN(DIV_ROUND_UP(lcu_width *
+ 32, 4), 16) * height32 / 2 + 128;
+ } else if (ctx->codec_mode == S5P_FIMV_CODEC_VP9_ENC && ctx->is_10bit) {
+ ctx->luma_dpb_size =
+ ALIGN(ctx->img_width * 2, 128) * height32 + 64;
+ ctx->chroma_dpb_size =
+ ALIGN(ctx->img_width * 2, 128) * height32 / 2 + 64;
+ } else {
+ ctx->luma_dpb_size =
+ width64 * height32 + 64;
+ if (ctx->is_422)
+ ctx->chroma_dpb_size =
+ ctx->luma_dpb_size;
+ else
+ ctx->chroma_dpb_size =
+ width64 * height32 / 2 + 64;
+ }
+ ctx->luma_dpb_size = ALIGN(ctx->luma_dpb_size + 256, SZ_2K);
+ ctx->chroma_dpb_size = ALIGN(ctx->chroma_dpb_size + 256, SZ_2K);
+ } else if (IS_MFCV10_PLUS(dev)) {
lcu_width = S5P_MFC_LCU_WIDTH(ctx->img_width);
lcu_height = S5P_MFC_LCU_HEIGHT(ctx->img_height);
if (ctx->codec_mode != S5P_FIMV_CODEC_HEVC_ENC) {
@@ -133,7 +167,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
switch (ctx->codec_mode) {
case S5P_MFC_CODEC_H264_DEC:
case S5P_MFC_CODEC_H264_MVC_DEC:
- if (IS_MFCV10(dev))
+ if (IS_MFCV10_PLUS(dev))
mfc_debug(2, "Use min scratch buffer size\n");
else if (IS_MFCV8_PLUS(dev))
ctx->scratch_buf_size =
@@ -152,7 +186,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
(ctx->mv_count * ctx->mv_size);
break;
case S5P_MFC_CODEC_MPEG4_DEC:
- if (IS_MFCV10(dev))
+ if (IS_MFCV10_PLUS(dev))
mfc_debug(2, "Use min scratch buffer size\n");
else if (IS_MFCV7_PLUS(dev)) {
ctx->scratch_buf_size =
@@ -172,7 +206,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
break;
case S5P_MFC_CODEC_VC1RCV_DEC:
case S5P_MFC_CODEC_VC1_DEC:
- if (IS_MFCV10(dev))
+ if (IS_MFCV10_PLUS(dev))
mfc_debug(2, "Use min scratch buffer size\n");
else
ctx->scratch_buf_size =
@@ -189,7 +223,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_H263_DEC:
- if (IS_MFCV10(dev))
+ if (IS_MFCV10_PLUS(dev))
mfc_debug(2, "Use min scratch buffer size\n");
else
ctx->scratch_buf_size =
@@ -201,7 +235,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
ctx->bank1.size = ctx->scratch_buf_size;
break;
case S5P_MFC_CODEC_VP8_DEC:
- if (IS_MFCV10(dev))
+ if (IS_MFCV10_PLUS(dev))
mfc_debug(2, "Use min scratch buffer size\n");
else if (IS_MFCV8_PLUS(dev))
ctx->scratch_buf_size =
@@ -230,7 +264,11 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
DEC_VP9_STATIC_BUFFER_SIZE;
break;
case S5P_MFC_CODEC_H264_ENC:
- if (IS_MFCV10(dev)) {
+ if (IS_MFCV12(dev)) {
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->me_buffer_size =
+ ENC_V120_H264_ME_SIZE(mb_width, mb_height);
+ } else if (IS_MFCV10_PLUS(dev)) {
mfc_debug(2, "Use min scratch buffer size\n");
ctx->me_buffer_size =
ALIGN(ENC_V100_H264_ME_SIZE(mb_width, mb_height), 16);
@@ -254,7 +292,11 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
break;
case S5P_MFC_CODEC_MPEG4_ENC:
case S5P_MFC_CODEC_H263_ENC:
- if (IS_MFCV10(dev)) {
+ if (IS_MFCV12(dev)) {
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->me_buffer_size =
+ ENC_V120_MPEG4_ME_SIZE(mb_width, mb_height);
+ } else if (IS_MFCV10_PLUS(dev)) {
mfc_debug(2, "Use min scratch buffer size\n");
ctx->me_buffer_size =
ALIGN(ENC_V100_MPEG4_ME_SIZE(mb_width,
@@ -265,7 +307,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
mb_width,
mb_height);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
- S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
ctx->bank1.size =
ctx->scratch_buf_size + ctx->tmv_buffer_size +
(ctx->pb_count * (ctx->luma_dpb_size +
@@ -273,7 +315,11 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_VP8_ENC:
- if (IS_MFCV10(dev)) {
+ if (IS_MFCV12(dev)) {
+ mfc_debug(2, "Use min scratch buffer size\n");
+ ctx->me_buffer_size =
+ ENC_V120_VP8_ME_SIZE(mb_width, mb_height);
+ } else if (IS_MFCV10_PLUS(dev)) {
mfc_debug(2, "Use min scratch buffer size\n");
ctx->me_buffer_size =
ALIGN(ENC_V100_VP8_ME_SIZE(mb_width, mb_height),
@@ -297,9 +343,13 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
ctx->bank2.size = 0;
break;
case S5P_MFC_CODEC_HEVC_ENC:
+ if (IS_MFCV12(dev))
+ ctx->me_buffer_size =
+ ENC_V120_HEVC_ME_SIZE(lcu_width, lcu_height);
+ else
+ ctx->me_buffer_size =
+ ALIGN(ENC_V100_HEVC_ME_SIZE(lcu_width, lcu_height), 16);
mfc_debug(2, "Use min scratch buffer size\n");
- ctx->me_buffer_size =
- ALIGN(ENC_V100_HEVC_ME_SIZE(lcu_width, lcu_height), 16);
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, 256);
ctx->bank1.size =
ctx->scratch_buf_size + ctx->tmv_buffer_size +
@@ -438,30 +488,48 @@ static void s5p_mfc_dec_calc_dpb_size_v6(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_dev *dev = ctx->dev;
ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN_V6);
ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN_V6);
+ ctx->chroma_size_1 = 0;
mfc_debug(2, "SEQ Done: Movie dimensions %dx%d,\n"
"buffer dimensions: %dx%d\n", ctx->img_width,
ctx->img_height, ctx->buf_width, ctx->buf_height);
- ctx->luma_size = calc_plane(ctx->img_width, ctx->img_height);
- ctx->chroma_size = calc_plane(ctx->img_width, (ctx->img_height >> 1));
+ switch (ctx->dst_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV21M:
+ ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN_V6);
+ ctx->stride[1] = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN_V6);
+ ctx->luma_size = calc_plane(ctx->stride[0], ctx->img_height);
+ ctx->chroma_size = calc_plane(ctx->stride[1], (ctx->img_height / 2));
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YVU420M:
+ ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN_V6);
+ ctx->stride[1] = ALIGN(ctx->img_width / 2, S5P_FIMV_NV12MT_HALIGN_V6);
+ ctx->stride[2] = ALIGN(ctx->img_width / 2, S5P_FIMV_NV12MT_HALIGN_V6);
+ ctx->luma_size = calc_plane(ctx->stride[0], ctx->img_height);
+ ctx->chroma_size = calc_plane(ctx->stride[1], (ctx->img_height / 2));
+ ctx->chroma_size_1 = calc_plane(ctx->stride[2], (ctx->img_height / 2));
+ break;
+ }
+
if (IS_MFCV8_PLUS(ctx->dev)) {
/* MFCv8 needs additional 64 bytes for luma,chroma dpb*/
ctx->luma_size += S5P_FIMV_D_ALIGN_PLANE_SIZE_V8;
ctx->chroma_size += S5P_FIMV_D_ALIGN_PLANE_SIZE_V8;
+ ctx->chroma_size_1 += S5P_FIMV_D_ALIGN_PLANE_SIZE_V8;
}
if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) {
- if (IS_MFCV10(dev)) {
- ctx->mv_size = S5P_MFC_DEC_MV_SIZE_V10(ctx->img_width,
- ctx->img_height);
- } else {
- ctx->mv_size = S5P_MFC_DEC_MV_SIZE_V6(ctx->img_width,
- ctx->img_height);
- }
+ if (IS_MFCV12(dev))
+ ctx->mv_size = S5P_MFC_DEC_MV_SIZE(ctx->img_width, ctx->img_height, 1024);
+ else if (IS_MFCV10_PLUS(dev))
+ ctx->mv_size = S5P_MFC_DEC_MV_SIZE(ctx->img_width, ctx->img_height, 512);
+ else
+ ctx->mv_size = S5P_MFC_DEC_MV_SIZE(ctx->img_width, ctx->img_height, 128);
+
} else if (ctx->codec_mode == S5P_MFC_CODEC_HEVC_DEC) {
- ctx->mv_size = s5p_mfc_dec_hevc_mv_size(ctx->img_width,
- ctx->img_height);
+ ctx->mv_size = s5p_mfc_dec_hevc_mv_size(ctx->img_width, ctx->img_height);
ctx->mv_size = ALIGN(ctx->mv_size, 32);
} else {
ctx->mv_size = 0;
@@ -475,14 +543,40 @@ static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx)
mb_width = MB_WIDTH(ctx->img_width);
mb_height = MB_HEIGHT(ctx->img_height);
- ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
- ctx->luma_size = ALIGN((mb_width * mb_height) * 256, 256);
- ctx->chroma_size = ALIGN((mb_width * mb_height) * 128, 256);
-
- /* MFCv7 needs pad bytes for Luma and Chroma */
- if (IS_MFCV7_PLUS(ctx->dev)) {
+ if (IS_MFCV12(ctx->dev)) {
+ switch (ctx->src_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV21M:
+ ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
+ ctx->stride[1] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
+ ctx->luma_size = ctx->stride[0] * ALIGN(ctx->img_height, 16);
+ ctx->chroma_size = ctx->stride[0] * ALIGN(ctx->img_height / 2, 16);
+ break;
+ case V4L2_PIX_FMT_YUV420M:
+ case V4L2_PIX_FMT_YVU420M:
+ ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
+ ctx->stride[1] = ALIGN(ctx->img_width / 2, S5P_FIMV_NV12M_HALIGN_V6);
+ ctx->stride[2] = ALIGN(ctx->img_width / 2, S5P_FIMV_NV12M_HALIGN_V6);
+ ctx->luma_size = ctx->stride[0] * ALIGN(ctx->img_height, 16);
+ ctx->chroma_size = ctx->stride[1] * ALIGN(ctx->img_height / 2, 16);
+ ctx->chroma_size_1 = ctx->stride[2] * ALIGN(ctx->img_height / 2, 16);
+ break;
+ }
ctx->luma_size += MFC_LUMA_PAD_BYTES_V7;
- ctx->chroma_size += MFC_CHROMA_PAD_BYTES_V7;
+ ctx->chroma_size += MFC_CHROMA_PAD_BYTES_V12;
+ ctx->chroma_size_1 += MFC_CHROMA_PAD_BYTES_V12;
+ } else {
+ ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
+ ctx->stride[0] = ctx->buf_width;
+ ctx->stride[1] = ctx->buf_width;
+ ctx->luma_size = ALIGN((mb_width * mb_height) * 256, 256);
+ ctx->chroma_size = ALIGN((mb_width * mb_height) * 128, 256);
+ ctx->chroma_size_1 = 0;
+ /* MFCv7 needs pad bytes for Luma and Chroma */
+ if (IS_MFCV7_PLUS(ctx->dev)) {
+ ctx->luma_size += MFC_LUMA_PAD_BYTES_V7;
+ ctx->chroma_size += MFC_LUMA_PAD_BYTES_V7;
+ }
}
}
@@ -529,15 +623,18 @@ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
writel(ctx->total_dpb_count, mfc_regs->d_num_dpb);
writel(ctx->luma_size, mfc_regs->d_first_plane_dpb_size);
writel(ctx->chroma_size, mfc_regs->d_second_plane_dpb_size);
-
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->dst_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ writel(ctx->chroma_size_1, mfc_regs->d_third_plane_dpb_size);
writel(buf_addr1, mfc_regs->d_scratch_buffer_addr);
writel(ctx->scratch_buf_size, mfc_regs->d_scratch_buffer_size);
if (IS_MFCV8_PLUS(dev)) {
- writel(ctx->img_width,
- mfc_regs->d_first_plane_dpb_stride_size);
- writel(ctx->img_width,
- mfc_regs->d_second_plane_dpb_stride_size);
+ writel(ctx->stride[0], mfc_regs->d_first_plane_dpb_stride_size);
+ writel(ctx->stride[1], mfc_regs->d_second_plane_dpb_stride_size);
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->dst_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ writel(ctx->stride[2], mfc_regs->d_third_plane_dpb_stride_size);
}
buf_addr1 += ctx->scratch_buf_size;
@@ -566,6 +663,13 @@ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
ctx->dst_bufs[i].cookie.raw.chroma);
writel(ctx->dst_bufs[i].cookie.raw.chroma,
mfc_regs->d_second_plane_dpb + i * 4);
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->dst_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M) {
+ mfc_debug(2, "\tChroma_1 %d: %zx\n", i, ctx
+ ->dst_bufs[i].cookie.raw.chroma_1);
+ writel(ctx->dst_bufs[i].cookie.raw.chroma_1, mfc_regs->d_third_plane_dpb +
+ i * 4);
+ }
}
if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC ||
@@ -624,20 +728,24 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
}
static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
- unsigned long y_addr, unsigned long c_addr)
+ unsigned long y_addr, unsigned long c_addr,
+ unsigned long c_1_addr)
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
writel(y_addr, mfc_regs->e_source_first_plane_addr);
writel(c_addr, mfc_regs->e_source_second_plane_addr);
+ writel(c_1_addr, mfc_regs->e_source_third_plane_addr);
mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
+ mfc_debug(2, "enc src cr buf addr: 0x%08lx\n", c_1_addr);
}
static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
- unsigned long *y_addr, unsigned long *c_addr)
+ unsigned long *y_addr, unsigned long *c_addr,
+ unsigned long *c_1_addr)
{
struct s5p_mfc_dev *dev = ctx->dev;
const struct s5p_mfc_regs *mfc_regs = dev->mfc_regs;
@@ -645,12 +753,17 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
*y_addr = readl(mfc_regs->e_encoded_source_first_plane_addr);
*c_addr = readl(mfc_regs->e_encoded_source_second_plane_addr);
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ *c_1_addr = readl(mfc_regs->e_encoded_source_third_plane_addr);
+ else
+ *c_1_addr = 0;
enc_recon_y_addr = readl(mfc_regs->e_recon_luma_dpb_addr);
enc_recon_c_addr = readl(mfc_regs->e_recon_chroma_dpb_addr);
mfc_debug(2, "recon y addr: 0x%08lx y_addr: 0x%08lx\n", enc_recon_y_addr, *y_addr);
- mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
+ mfc_debug(2, "recon c addr: 0x%08lx c_addr: 0x%08lx\n", enc_recon_c_addr, *c_addr);
}
/* Set encoding ref & codec buffer */
@@ -668,7 +781,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
- if (IS_MFCV10(dev)) {
+ if (IS_MFCV10_PLUS(dev)) {
/* start address of per buffer is aligned */
for (i = 0; i < ctx->pb_count; i++) {
writel(buf_addr1, mfc_regs->e_luma_dpb + (4 * i));
@@ -827,6 +940,20 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
writel(reg, mfc_regs->e_enc_options);
/* 0: NV12(CbCr), 1: NV21(CrCb) */
writel(0x0, mfc_regs->pixel_format);
+ } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_YVU420M) {
+ /* 0: Linear, 1: 2D tiled*/
+ reg = readl(mfc_regs->e_enc_options);
+ reg &= ~(0x1 << 7);
+ writel(reg, mfc_regs->e_enc_options);
+ /* 2: YV12(CrCb), 3: I420(CrCb) */
+ writel(0x2, mfc_regs->pixel_format);
+ } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_YUV420M) {
+ /* 0: Linear, 1: 2D tiled*/
+ reg = readl(mfc_regs->e_enc_options);
+ reg &= ~(0x1 << 7);
+ writel(reg, mfc_regs->e_enc_options);
+ /* 2: YV12(CrCb), 3: I420(CrCb) */
+ writel(0x3, mfc_regs->pixel_format);
}
/* memory structure recon. frame */
@@ -865,10 +992,24 @@ static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
/* reaction coefficient */
if (p->rc_frame) {
- if (p->rc_reaction_coeff < TIGHT_CBR_MAX) /* tight CBR */
- writel(1, mfc_regs->e_rc_mode);
- else /* loose CBR */
- writel(2, mfc_regs->e_rc_mode);
+ if (IS_MFCV12(dev)) {
+ /* loose CBR */
+ if (p->rc_reaction_coeff < LOOSE_CBR_MAX)
+ writel(1, mfc_regs->e_rc_mode);
+ /* tight CBR */
+ else if (p->rc_reaction_coeff < TIGHT_CBR_MAX)
+ writel(0, mfc_regs->e_rc_mode);
+ /* VBR */
+ else
+ writel(2, mfc_regs->e_rc_mode);
+ } else {
+ /* tight CBR */
+ if (p->rc_reaction_coeff < TIGHT_CBR_MAX)
+ writel(1, mfc_regs->e_rc_mode);
+ /* loose CBR */
+ else
+ writel(2, mfc_regs->e_rc_mode);
+ }
}
/* seq header ctrl */
@@ -930,6 +1071,18 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
reg |= ((p->num_b_frame & 0x3) << 16);
writel(reg, mfc_regs->e_gop_config);
+ /* UHD encoding case */
+ if (ctx->img_width == 3840 && ctx->img_height == 2160) {
+ if (p_h264->level < 51) {
+ mfc_debug(2, "Set Level 5.1 for UHD\n");
+ p_h264->level = 51;
+ }
+ if (p_h264->profile != 0x2) {
+ mfc_debug(2, "Set High profile for UHD\n");
+ p_h264->profile = 0x2;
+ }
+ }
+
/* profile & level */
reg = 0;
/** level */
@@ -1637,8 +1790,12 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
else
writel(reg, mfc_regs->d_dec_options);
- /* 0: NV12(CbCr), 1: NV21(CrCb) */
- if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV21M)
+ /* 0: NV12(CbCr), 1: NV21(CrCb), 2: YV12(CrCb), 3: I420(CbCr) */
+ if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YUV420M)
+ writel(0x3, mfc_regs->pixel_format);
+ else if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_YVU420M)
+ writel(0x2, mfc_regs->pixel_format);
+ else if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV21M)
writel(0x1, mfc_regs->pixel_format);
else
writel(0x0, mfc_regs->pixel_format);
@@ -1722,8 +1879,11 @@ static int s5p_mfc_init_encode_v6(struct s5p_mfc_ctx *ctx)
/* Set stride lengths for v7 & above */
if (IS_MFCV7_PLUS(dev)) {
- writel(ctx->img_width, mfc_regs->e_source_first_plane_stride);
- writel(ctx->img_width, mfc_regs->e_source_second_plane_stride);
+ writel(ctx->stride[0], mfc_regs->e_source_first_plane_stride);
+ writel(ctx->stride[1], mfc_regs->e_source_second_plane_stride);
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ writel(ctx->stride[2], mfc_regs->e_source_third_plane_stride);
}
writel(ctx->inst_no, mfc_regs->instance_id);
@@ -1832,7 +1992,7 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *dst_mb;
struct s5p_mfc_buf *src_mb;
- unsigned long src_y_addr, src_c_addr, dst_addr;
+ unsigned long src_y_addr, src_c_addr, src_c_1_addr, dst_addr;
/*
unsigned int src_y_size, src_c_size;
*/
@@ -1850,22 +2010,28 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
if (list_empty(&ctx->src_queue)) {
/* send null frame */
- s5p_mfc_set_enc_frame_buffer_v6(ctx, 0, 0);
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, 0, 0, 0);
src_mb = NULL;
} else {
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
src_mb->flags |= MFC_BUF_FLAG_USED;
if (src_mb->b->vb2_buf.planes[0].bytesused == 0) {
- s5p_mfc_set_enc_frame_buffer_v6(ctx, 0, 0);
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, 0, 0, 0);
ctx->state = MFCINST_FINISHING;
} else {
src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0);
src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1);
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_YUV420M || ctx->src_fmt->fourcc ==
+ V4L2_PIX_FMT_YVU420M)
+ src_c_1_addr = vb2_dma_contig_plane_dma_addr
+ (&src_mb->b->vb2_buf, 2);
+ else
+ src_c_1_addr = 0;
mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
- s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
+ s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr, src_c_1_addr);
if (src_mb->flags & MFC_BUF_FLAG_EOS)
ctx->state = MFCINST_FINISHING;
}
@@ -1944,6 +2110,13 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
struct s5p_mfc_dev *dev = ctx->dev;
int ret;
+ ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, alloc_codec_buffers, ctx);
+ if (ret) {
+ mfc_err("Failed to allocate encoding buffers\n");
+ return -ENOMEM;
+ }
+ mfc_debug(2, "Allocated Internal Encoding Buffers\n");
+
dev->curr_ctx = ctx->num;
ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
if (ret) {
@@ -2387,10 +2560,9 @@ const struct s5p_mfc_regs *s5p_mfc_init_regs_v6_plus(struct s5p_mfc_dev *dev)
R(e_source_first_plane_stride, S5P_FIMV_E_SOURCE_FIRST_STRIDE_V7);
R(e_source_second_plane_stride, S5P_FIMV_E_SOURCE_SECOND_STRIDE_V7);
R(e_source_third_plane_stride, S5P_FIMV_E_SOURCE_THIRD_STRIDE_V7);
- R(e_encoded_source_first_plane_addr,
- S5P_FIMV_E_ENCODED_SOURCE_FIRST_ADDR_V7);
- R(e_encoded_source_second_plane_addr,
- S5P_FIMV_E_ENCODED_SOURCE_SECOND_ADDR_V7);
+ R(e_encoded_source_first_plane_addr, S5P_FIMV_E_ENCODED_SOURCE_FIRST_ADDR_V7);
+ R(e_encoded_source_second_plane_addr, S5P_FIMV_E_ENCODED_SOURCE_SECOND_ADDR_V7);
+ R(e_encoded_source_third_plane_addr, S5P_FIMV_E_ENCODED_SOURCE_THIRD_ADDR_V7);
R(e_vp8_options, S5P_FIMV_E_VP8_OPTIONS_V7);
if (!IS_MFCV8_PLUS(dev))
@@ -2405,16 +2577,17 @@ const struct s5p_mfc_regs *s5p_mfc_init_regs_v6_plus(struct s5p_mfc_dev *dev)
R(d_cpb_buffer_offset, S5P_FIMV_D_CPB_BUFFER_OFFSET_V8);
R(d_first_plane_dpb_size, S5P_FIMV_D_FIRST_PLANE_DPB_SIZE_V8);
R(d_second_plane_dpb_size, S5P_FIMV_D_SECOND_PLANE_DPB_SIZE_V8);
+ R(d_third_plane_dpb_size, S5P_FIMV_D_THIRD_PLANE_DPB_SIZE_V8);
R(d_scratch_buffer_addr, S5P_FIMV_D_SCRATCH_BUFFER_ADDR_V8);
R(d_scratch_buffer_size, S5P_FIMV_D_SCRATCH_BUFFER_SIZE_V8);
- R(d_first_plane_dpb_stride_size,
- S5P_FIMV_D_FIRST_PLANE_DPB_STRIDE_SIZE_V8);
- R(d_second_plane_dpb_stride_size,
- S5P_FIMV_D_SECOND_PLANE_DPB_STRIDE_SIZE_V8);
+ R(d_first_plane_dpb_stride_size, S5P_FIMV_D_FIRST_PLANE_DPB_STRIDE_SIZE_V8);
+ R(d_second_plane_dpb_stride_size, S5P_FIMV_D_SECOND_PLANE_DPB_STRIDE_SIZE_V8);
+ R(d_third_plane_dpb_stride_size, S5P_FIMV_D_THIRD_PLANE_DPB_STRIDE_SIZE_V8);
R(d_mv_buffer_size, S5P_FIMV_D_MV_BUFFER_SIZE_V8);
R(d_num_mv, S5P_FIMV_D_NUM_MV_V8);
R(d_first_plane_dpb, S5P_FIMV_D_FIRST_PLANE_DPB_V8);
R(d_second_plane_dpb, S5P_FIMV_D_SECOND_PLANE_DPB_V8);
+ R(d_third_plane_dpb, S5P_FIMV_D_THIRD_PLANE_DPB_V8);
R(d_mv_buffer, S5P_FIMV_D_MV_BUFFER_V8);
R(d_init_buffer_options, S5P_FIMV_D_INIT_BUFFER_OPTIONS_V8);
R(d_available_dpb_flag_lower, S5P_FIMV_D_AVAILABLE_DPB_FLAG_LOWER_V8);
@@ -2455,7 +2628,7 @@ const struct s5p_mfc_regs *s5p_mfc_init_regs_v6_plus(struct s5p_mfc_dev *dev)
R(e_h264_options, S5P_FIMV_E_H264_OPTIONS_V8);
R(e_min_scratch_buffer_size, S5P_FIMV_E_MIN_SCRATCH_BUFFER_SIZE_V8);
- if (!IS_MFCV10(dev))
+ if (!IS_MFCV10_PLUS(dev))
goto done;
/* Initialize registers used in MFC v10 only.
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
index e4dd03c54..94ecb0e6e 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.h
@@ -19,10 +19,8 @@
#define MB_WIDTH(x_size) DIV_ROUND_UP(x_size, 16)
#define MB_HEIGHT(y_size) DIV_ROUND_UP(y_size, 16)
-#define S5P_MFC_DEC_MV_SIZE_V6(x, y) (MB_WIDTH(x) * \
- (((MB_HEIGHT(y)+1)/2)*2) * 64 + 128)
-#define S5P_MFC_DEC_MV_SIZE_V10(x, y) (MB_WIDTH(x) * \
- (((MB_HEIGHT(y)+1)/2)*2) * 64 + 512)
+#define S5P_MFC_DEC_MV_SIZE(x, y, offset) (MB_WIDTH(x) * \
+ (((MB_HEIGHT(y) + 1) / 2) * 2) * 64 + (offset))
#define S5P_MFC_LCU_WIDTH(x_size) DIV_ROUND_UP(x_size, 32)
#define S5P_MFC_LCU_HEIGHT(y_size) DIV_ROUND_UP(y_size, 32)
@@ -42,6 +40,7 @@
#define ENC_H264_LEVEL_MAX 42
#define ENC_MPEG4_VOP_TIME_RES_MAX ((1 << 16) - 1)
#define FRAME_DELTA_H264_H263 1
+#define LOOSE_CBR_MAX 5
#define TIGHT_CBR_MAX 10
#define ENC_HEVC_RC_FRAME_RATE_MAX ((1 << 16) - 1)
#define ENC_HEVC_QP_INDEX_MIN -12
diff --git a/drivers/media/platform/st/sti/hva/hva-v4l2.c b/drivers/media/platform/st/sti/hva/hva-v4l2.c
index 3a848ca32..161a5c0fb 100644
--- a/drivers/media/platform/st/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/st/sti/hva/hva-v4l2.c
@@ -569,14 +569,11 @@ static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
struct vb2_buffer *vb2_buf;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, buf->type);
-
- if (buf->index >= vq->num_buffers) {
- dev_dbg(dev, "%s buffer index %d out of range (%d)\n",
- ctx->name, buf->index, vq->num_buffers);
+ vb2_buf = vb2_get_buffer(vq, buf->index);
+ if (!vb2_buf) {
+ dev_dbg(dev, "%s buffer index %d not found\n", ctx->name, buf->index);
return -EINVAL;
}
-
- vb2_buf = vb2_get_buffer(vq, buf->index);
stream = to_hva_stream(to_vb2_v4l2_buffer(vb2_buf));
stream->bytesused = buf->bytesused;
}
@@ -1145,7 +1142,7 @@ static int hva_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->buf_struct_size = sizeof(struct hva_frame);
- src_vq->min_buffers_needed = MIN_FRAMES;
+ src_vq->min_queued_buffers = MIN_FRAMES;
src_vq->dev = ctx->hva_dev->dev;
ret = queue_init(ctx, src_vq);
@@ -1154,7 +1151,7 @@ static int hva_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->buf_struct_size = sizeof(struct hva_stream);
- dst_vq->min_buffers_needed = MIN_STREAMS;
+ dst_vq->min_queued_buffers = MIN_STREAMS;
dst_vq->dev = ctx->hva_dev->dev;
return queue_init(ctx, dst_vq);
diff --git a/drivers/media/platform/st/stm32/Kconfig b/drivers/media/platform/st/stm32/Kconfig
index b22dd4753..9df9a2a17 100644
--- a/drivers/media/platform/st/stm32/Kconfig
+++ b/drivers/media/platform/st/stm32/Kconfig
@@ -16,6 +16,22 @@ config VIDEO_STM32_DCMI
To compile this driver as a module, choose M here: the module
will be called stm32-dcmi.
+config VIDEO_STM32_DCMIPP
+ tristate "STM32 Digital Camera Memory Interface Pixel Processor (DCMIPP) support"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STM32 || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This module makes the STM32 Digital Camera Memory Interface
+ Pixel Processor (DCMIPP) available as a v4l2 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called stm32-dcmipp.
+
# Mem2mem drivers
config VIDEO_STM32_DMA2D
tristate "STM32 Chrom-Art Accelerator (DMA2D)"
diff --git a/drivers/media/platform/st/stm32/Makefile b/drivers/media/platform/st/stm32/Makefile
index 896ef98a7..7ed8297b9 100644
--- a/drivers/media/platform/st/stm32/Makefile
+++ b/drivers/media/platform/st/stm32/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32-dcmi.o
+obj-$(CONFIG_VIDEO_STM32_DCMIPP) += stm32-dcmipp/
stm32-dma2d-objs := dma2d/dma2d.o dma2d/dma2d-hw.o
obj-$(CONFIG_VIDEO_STM32_DMA2D) += stm32-dma2d.o
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index 8cb4fdcae..c4610e305 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -1890,7 +1889,6 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
static int dcmi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match = NULL;
struct v4l2_fwnode_endpoint ep = { .bus_type = 0 };
struct stm32_dcmi *dcmi;
struct vb2_queue *q;
@@ -1899,12 +1897,6 @@ static int dcmi_probe(struct platform_device *pdev)
struct clk *mclk;
int ret = 0;
- match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Could not find a match in devicetree\n");
- return -ENODEV;
- }
-
dcmi = devm_kzalloc(&pdev->dev, sizeof(struct stm32_dcmi), GFP_KERNEL);
if (!dcmi)
return -ENOMEM;
@@ -2039,7 +2031,7 @@ static int dcmi_probe(struct platform_device *pdev)
q->ops = &dcmi_video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->allow_cache_hints = 1;
q->dev = &pdev->dev;
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile b/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile
new file mode 100644
index 000000000..8920d9388
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+stm32-dcmipp-y := dcmipp-core.o dcmipp-common.o dcmipp-parallel.o dcmipp-byteproc.o dcmipp-bytecap.o
+
+obj-$(CONFIG_VIDEO_STM32_DCMIPP) += stm32-dcmipp.o
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
new file mode 100644
index 000000000..9f768f011
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface Pixel Processor
+ *
+ * Copyright (C) STMicroelectronics SA 2023
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "dcmipp-common.h"
+
+#define DCMIPP_PRSR 0x1f8
+#define DCMIPP_CMIER 0x3f0
+#define DCMIPP_CMIER_P0FRAMEIE BIT(9)
+#define DCMIPP_CMIER_P0VSYNCIE BIT(10)
+#define DCMIPP_CMIER_P0OVRIE BIT(15)
+#define DCMIPP_CMIER_P0ALL (DCMIPP_CMIER_P0VSYNCIE |\
+ DCMIPP_CMIER_P0FRAMEIE |\
+ DCMIPP_CMIER_P0OVRIE)
+#define DCMIPP_CMSR1 0x3f4
+#define DCMIPP_CMSR2 0x3f8
+#define DCMIPP_CMSR2_P0FRAMEF BIT(9)
+#define DCMIPP_CMSR2_P0VSYNCF BIT(10)
+#define DCMIPP_CMSR2_P0OVRF BIT(15)
+#define DCMIPP_CMFCR 0x3fc
+#define DCMIPP_P0FSCR 0x404
+#define DCMIPP_P0FSCR_PIPEN BIT(31)
+#define DCMIPP_P0FCTCR 0x500
+#define DCMIPP_P0FCTCR_CPTREQ BIT(3)
+#define DCMIPP_P0DCCNTR 0x5b0
+#define DCMIPP_P0DCLMTR 0x5b4
+#define DCMIPP_P0DCLMTR_ENABLE BIT(31)
+#define DCMIPP_P0DCLMTR_LIMIT_MASK GENMASK(23, 0)
+#define DCMIPP_P0PPM0AR1 0x5c4
+#define DCMIPP_P0SR 0x5f8
+#define DCMIPP_P0SR_CPTACT BIT(23)
+
+struct dcmipp_bytecap_pix_map {
+ unsigned int code;
+ u32 pixelformat;
+};
+
+#define PIXMAP_MBUS_PFMT(mbus, fmt) \
+ { \
+ .code = MEDIA_BUS_FMT_##mbus, \
+ .pixelformat = V4L2_PIX_FMT_##fmt \
+ }
+
+static const struct dcmipp_bytecap_pix_map dcmipp_bytecap_pix_map_list[] = {
+ PIXMAP_MBUS_PFMT(RGB565_2X8_LE, RGB565),
+ PIXMAP_MBUS_PFMT(YUYV8_2X8, YUYV),
+ PIXMAP_MBUS_PFMT(YVYU8_2X8, YVYU),
+ PIXMAP_MBUS_PFMT(UYVY8_2X8, UYVY),
+ PIXMAP_MBUS_PFMT(VYUY8_2X8, VYUY),
+ PIXMAP_MBUS_PFMT(Y8_1X8, GREY),
+ PIXMAP_MBUS_PFMT(SBGGR8_1X8, SBGGR8),
+ PIXMAP_MBUS_PFMT(SGBRG8_1X8, SGBRG8),
+ PIXMAP_MBUS_PFMT(SGRBG8_1X8, SGRBG8),
+ PIXMAP_MBUS_PFMT(SRGGB8_1X8, SRGGB8),
+ PIXMAP_MBUS_PFMT(JPEG_1X8, JPEG),
+};
+
+static const struct dcmipp_bytecap_pix_map *
+dcmipp_bytecap_pix_map_by_pixelformat(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
+ if (dcmipp_bytecap_pix_map_list[i].pixelformat == pixelformat)
+ return &dcmipp_bytecap_pix_map_list[i];
+ }
+
+ return NULL;
+}
+
+struct dcmipp_buf {
+ struct vb2_v4l2_buffer vb;
+ bool prepared;
+ dma_addr_t addr;
+ size_t size;
+ struct list_head list;
+};
+
+enum dcmipp_state {
+ DCMIPP_STOPPED = 0,
+ DCMIPP_WAIT_FOR_BUFFER,
+ DCMIPP_RUNNING,
+};
+
+struct dcmipp_bytecap_device {
+ struct dcmipp_ent_device ved;
+ struct video_device vdev;
+ struct device *dev;
+ struct v4l2_pix_format format;
+ struct vb2_queue queue;
+ struct list_head buffers;
+ /*
+ * Protects concurrent calls of buf queue / irq handler
+ * and buffer handling related variables / lists
+ */
+ spinlock_t irqlock;
+ /* mutex used as vdev and queue lock */
+ struct mutex lock;
+ u32 sequence;
+ struct media_pipeline pipe;
+ struct v4l2_subdev *s_subdev;
+
+ enum dcmipp_state state;
+
+ /*
+ * DCMIPP driver is handling 2 buffers
+ * active: buffer into which DCMIPP is currently writing into
+ * next: buffer given to the DCMIPP and which will become
+ * automatically active on next VSYNC
+ */
+ struct dcmipp_buf *active, *next;
+
+ void __iomem *regs;
+
+ u32 cmier;
+ u32 cmsr2;
+
+ struct {
+ u32 errors;
+ u32 limit;
+ u32 overrun;
+ u32 buffers;
+ u32 vsync;
+ u32 frame;
+ u32 it;
+ u32 underrun;
+ u32 nactive;
+ } count;
+};
+
+static const struct v4l2_pix_format fmt_default = {
+ .width = DCMIPP_FMT_WIDTH_DEFAULT,
+ .height = DCMIPP_FMT_HEIGHT_DEFAULT,
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = DCMIPP_FMT_WIDTH_DEFAULT * 2,
+ .sizeimage = DCMIPP_FMT_WIDTH_DEFAULT * DCMIPP_FMT_HEIGHT_DEFAULT * 2,
+ .colorspace = DCMIPP_COLORSPACE_DEFAULT,
+ .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
+ .quantization = DCMIPP_QUANTIZATION_DEFAULT,
+ .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
+};
+
+static int dcmipp_bytecap_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DCMIPP_PDEV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int dcmipp_bytecap_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct dcmipp_bytecap_device *vcap = video_drvdata(file);
+
+ f->fmt.pix = vcap->format;
+
+ return 0;
+}
+
+static int dcmipp_bytecap_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct dcmipp_bytecap_device *vcap = video_drvdata(file);
+ struct v4l2_pix_format *format = &f->fmt.pix;
+ const struct dcmipp_bytecap_pix_map *vpix;
+ u32 in_w, in_h;
+
+ /* Don't accept a pixelformat that is not on the table */
+ vpix = dcmipp_bytecap_pix_map_by_pixelformat(format->pixelformat);
+ if (!vpix)
+ format->pixelformat = fmt_default.pixelformat;
+
+ /* Adjust width & height */
+ in_w = format->width;
+ in_h = format->height;
+ v4l_bound_align_image(&format->width, DCMIPP_FRAME_MIN_WIDTH,
+ DCMIPP_FRAME_MAX_WIDTH, 0, &format->height,
+ DCMIPP_FRAME_MIN_HEIGHT, DCMIPP_FRAME_MAX_HEIGHT,
+ 0, 0);
+ if (format->width != in_w || format->height != in_h)
+ dev_dbg(vcap->dev, "resolution updated: %dx%d -> %dx%d\n",
+ in_w, in_h, format->width, format->height);
+
+ if (format->pixelformat == V4L2_PIX_FMT_JPEG) {
+ format->bytesperline = format->width;
+ format->sizeimage = format->bytesperline * format->height;
+ } else {
+ v4l2_fill_pixfmt(format, format->pixelformat,
+ format->width, format->height);
+ }
+
+ if (format->field == V4L2_FIELD_ANY)
+ format->field = fmt_default.field;
+
+ dcmipp_colorimetry_clamp(format);
+
+ return 0;
+}
+
+static int dcmipp_bytecap_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct dcmipp_bytecap_device *vcap = video_drvdata(file);
+ int ret;
+
+ /* Do not change the format while stream is on */
+ if (vb2_is_busy(&vcap->queue))
+ return -EBUSY;
+
+ ret = dcmipp_bytecap_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ dev_dbg(vcap->dev, "%s: format update: old:%ux%u (0x%p4cc, %u, %u, %u, %u) new:%ux%d (0x%p4cc, %u, %u, %u, %u)\n",
+ vcap->vdev.name,
+ /* old */
+ vcap->format.width, vcap->format.height,
+ &vcap->format.pixelformat, vcap->format.colorspace,
+ vcap->format.quantization, vcap->format.xfer_func,
+ vcap->format.ycbcr_enc,
+ /* new */
+ f->fmt.pix.width, f->fmt.pix.height,
+ &f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
+ f->fmt.pix.quantization, f->fmt.pix.xfer_func,
+ f->fmt.pix.ycbcr_enc);
+
+ vcap->format = f->fmt.pix;
+
+ return 0;
+}
+
+static int dcmipp_bytecap_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct dcmipp_bytecap_pix_map *vpix;
+ unsigned int index = f->index;
+ unsigned int i;
+
+ if (f->mbus_code) {
+ /*
+ * If a media bus code is specified, only enumerate formats
+ * compatible with it.
+ */
+ for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
+ vpix = &dcmipp_bytecap_pix_map_list[i];
+ if (vpix->code != f->mbus_code)
+ continue;
+
+ if (index == 0)
+ break;
+
+ index--;
+ }
+
+ if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
+ return -EINVAL;
+ } else {
+ /* Otherwise, enumerate all formats. */
+ if (f->index >= ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
+ return -EINVAL;
+
+ vpix = &dcmipp_bytecap_pix_map_list[f->index];
+ }
+
+ f->pixelformat = vpix->pixelformat;
+
+ return 0;
+}
+
+static int dcmipp_bytecap_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct dcmipp_bytecap_pix_map *vpix;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ /* Only accept code in the pix map table */
+ vpix = dcmipp_bytecap_pix_map_by_pixelformat(fsize->pixel_format);
+ if (!vpix)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = DCMIPP_FRAME_MIN_WIDTH;
+ fsize->stepwise.max_width = DCMIPP_FRAME_MAX_WIDTH;
+ fsize->stepwise.min_height = DCMIPP_FRAME_MIN_HEIGHT;
+ fsize->stepwise.max_height = DCMIPP_FRAME_MAX_HEIGHT;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static const struct v4l2_file_operations dcmipp_bytecap_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops dcmipp_bytecap_ioctl_ops = {
+ .vidioc_querycap = dcmipp_bytecap_querycap,
+
+ .vidioc_g_fmt_vid_cap = dcmipp_bytecap_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = dcmipp_bytecap_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = dcmipp_bytecap_try_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = dcmipp_bytecap_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = dcmipp_bytecap_enum_framesizes,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static int dcmipp_pipeline_s_stream(struct dcmipp_bytecap_device *vcap,
+ int state)
+{
+ struct media_pad *pad;
+ int ret;
+
+ /*
+ * Get source subdev - since link is IMMUTABLE, pointer is cached
+ * within the dcmipp_bytecap_device structure
+ */
+ if (!vcap->s_subdev) {
+ pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ return -EINVAL;
+ vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
+ }
+
+ ret = v4l2_subdev_call(vcap->s_subdev, video, s_stream, state);
+ if (ret < 0) {
+ dev_err(vcap->dev, "failed to %s streaming (%d)\n",
+ state ? "start" : "stop", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dcmipp_start_capture(struct dcmipp_bytecap_device *vcap,
+ struct dcmipp_buf *buf)
+{
+ /* Set buffer address */
+ reg_write(vcap, DCMIPP_P0PPM0AR1, buf->addr);
+
+ /* Set buffer size */
+ reg_write(vcap, DCMIPP_P0DCLMTR, DCMIPP_P0DCLMTR_ENABLE |
+ ((buf->size / 4) & DCMIPP_P0DCLMTR_LIMIT_MASK));
+
+ /* Capture request */
+ reg_set(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
+}
+
+static void dcmipp_bytecap_all_buffers_done(struct dcmipp_bytecap_device *vcap,
+ enum vb2_buffer_state state)
+{
+ struct dcmipp_buf *buf, *node;
+
+ list_for_each_entry_safe(buf, node, &vcap->buffers, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+}
+
+static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
+ unsigned int count)
+{
+ struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
+ struct media_entity *entity = &vcap->vdev.entity;
+ struct dcmipp_buf *buf;
+ int ret;
+
+ vcap->sequence = 0;
+ memset(&vcap->count, 0, sizeof(vcap->count));
+
+ ret = pm_runtime_resume_and_get(vcap->dev);
+ if (ret < 0) {
+ dev_err(vcap->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
+ __func__, ret);
+ goto err_buffer_done;
+ }
+
+ ret = media_pipeline_start(entity->pads, &vcap->pipe);
+ if (ret) {
+ dev_dbg(vcap->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
+ __func__, ret);
+ goto err_pm_put;
+ }
+
+ ret = dcmipp_pipeline_s_stream(vcap, 1);
+ if (ret)
+ goto err_media_pipeline_stop;
+
+ spin_lock_irq(&vcap->irqlock);
+
+ /* Enable pipe at the end of programming */
+ reg_set(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
+
+ /*
+ * vb2 framework guarantee that we have at least 'min_queued_buffers'
+ * buffers in the list at this moment
+ */
+ vcap->next = list_first_entry(&vcap->buffers, typeof(*buf), list);
+ dev_dbg(vcap->dev, "Start with next [%d] %p phy=%pad\n",
+ vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
+
+ dcmipp_start_capture(vcap, vcap->next);
+
+ /* Enable interruptions */
+ vcap->cmier |= DCMIPP_CMIER_P0ALL;
+ reg_set(vcap, DCMIPP_CMIER, vcap->cmier);
+
+ vcap->state = DCMIPP_RUNNING;
+
+ spin_unlock_irq(&vcap->irqlock);
+
+ return 0;
+
+err_media_pipeline_stop:
+ media_pipeline_stop(entity->pads);
+err_pm_put:
+ pm_runtime_put(vcap->dev);
+err_buffer_done:
+ spin_lock_irq(&vcap->irqlock);
+ /*
+ * Return all buffers to vb2 in QUEUED state.
+ * This will give ownership back to userspace
+ */
+ dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_QUEUED);
+ vcap->active = NULL;
+ spin_unlock_irq(&vcap->irqlock);
+
+ return ret;
+}
+
+static void dcmipp_dump_status(struct dcmipp_bytecap_device *vcap)
+{
+ struct device *dev = vcap->dev;
+
+ dev_dbg(dev, "[DCMIPP_PRSR] =%#10.8x\n", reg_read(vcap, DCMIPP_PRSR));
+ dev_dbg(dev, "[DCMIPP_P0SR] =%#10.8x\n", reg_read(vcap, DCMIPP_P0SR));
+ dev_dbg(dev, "[DCMIPP_P0DCCNTR]=%#10.8x\n",
+ reg_read(vcap, DCMIPP_P0DCCNTR));
+ dev_dbg(dev, "[DCMIPP_CMSR1] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR1));
+ dev_dbg(dev, "[DCMIPP_CMSR2] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR2));
+}
+
+/*
+ * Stop the stream engine. Any remaining buffers in the stream queue are
+ * dequeued and passed on to the vb2 framework marked as STATE_ERROR.
+ */
+static void dcmipp_bytecap_stop_streaming(struct vb2_queue *vq)
+{
+ struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
+ int ret;
+ u32 status;
+
+ dcmipp_pipeline_s_stream(vcap, 0);
+
+ /* Stop the media pipeline */
+ media_pipeline_stop(vcap->vdev.entity.pads);
+
+ /* Disable interruptions */
+ reg_clear(vcap, DCMIPP_CMIER, vcap->cmier);
+
+ /* Stop capture */
+ reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
+
+ /* Wait until CPTACT become 0 */
+ ret = readl_relaxed_poll_timeout(vcap->regs + DCMIPP_P0SR, status,
+ !(status & DCMIPP_P0SR_CPTACT),
+ 20 * USEC_PER_MSEC,
+ 1000 * USEC_PER_MSEC);
+ if (ret)
+ dev_warn(vcap->dev, "Timeout when stopping\n");
+
+ /* Disable pipe */
+ reg_clear(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
+
+ spin_lock_irq(&vcap->irqlock);
+
+ /* Return all queued buffers to vb2 in ERROR state */
+ dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&vcap->buffers);
+
+ vcap->active = NULL;
+ vcap->state = DCMIPP_STOPPED;
+
+ spin_unlock_irq(&vcap->irqlock);
+
+ dcmipp_dump_status(vcap);
+
+ pm_runtime_put(vcap->dev);
+
+ if (vcap->count.errors)
+ dev_warn(vcap->dev, "Some errors found while streaming: errors=%d (overrun=%d, limit=%d, nactive=%d), underrun=%d, buffers=%d\n",
+ vcap->count.errors, vcap->count.overrun,
+ vcap->count.limit, vcap->count.nactive,
+ vcap->count.underrun, vcap->count.buffers);
+}
+
+static int dcmipp_bytecap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
+ unsigned long size;
+
+ size = vcap->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(vcap->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ if (!buf->prepared) {
+ /* Get memory addresses */
+ buf->addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+ buf->prepared = true;
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
+
+ dev_dbg(vcap->dev, "Setup [%d] phy=%pad size=%zu\n",
+ vb->index, &buf->addr, buf->size);
+ }
+
+ return 0;
+}
+
+static void dcmipp_bytecap_buf_queue(struct vb2_buffer *vb2_buf)
+{
+ struct dcmipp_bytecap_device *vcap =
+ vb2_get_drv_priv(vb2_buf->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2_buf);
+ struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
+
+ dev_dbg(vcap->dev, "Queue [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
+ buf, &buf->addr);
+
+ spin_lock_irq(&vcap->irqlock);
+ list_add_tail(&buf->list, &vcap->buffers);
+
+ if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
+ vcap->next = buf;
+ dev_dbg(vcap->dev, "Restart with next [%d] %p phy=%pad\n",
+ buf->vb.vb2_buf.index, buf, &buf->addr);
+
+ dcmipp_start_capture(vcap, buf);
+
+ vcap->state = DCMIPP_RUNNING;
+ }
+
+ spin_unlock_irq(&vcap->irqlock);
+}
+
+static int dcmipp_bytecap_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
+ unsigned int size;
+
+ size = vcap->format.sizeimage;
+
+ /* Make sure the image size is large enough */
+ if (*nplanes)
+ return sizes[0] < vcap->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = vcap->format.sizeimage;
+
+ dev_dbg(vcap->dev, "Setup queue, count=%d, size=%d\n",
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int dcmipp_bytecap_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static const struct vb2_ops dcmipp_bytecap_qops = {
+ .start_streaming = dcmipp_bytecap_start_streaming,
+ .stop_streaming = dcmipp_bytecap_stop_streaming,
+ .buf_init = dcmipp_bytecap_buf_init,
+ .buf_prepare = dcmipp_bytecap_buf_prepare,
+ .buf_queue = dcmipp_bytecap_buf_queue,
+ .queue_setup = dcmipp_bytecap_queue_setup,
+ /*
+ * Since q->lock is set we can use the standard
+ * vb2_ops_wait_prepare/finish helper functions.
+ */
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static void dcmipp_bytecap_release(struct video_device *vdev)
+{
+ struct dcmipp_bytecap_device *vcap =
+ container_of(vdev, struct dcmipp_bytecap_device, vdev);
+
+ dcmipp_pads_cleanup(vcap->ved.pads);
+ mutex_destroy(&vcap->lock);
+
+ kfree(vcap);
+}
+
+void dcmipp_bytecap_ent_release(struct dcmipp_ent_device *ved)
+{
+ struct dcmipp_bytecap_device *vcap =
+ container_of(ved, struct dcmipp_bytecap_device, ved);
+
+ media_entity_cleanup(ved->ent);
+ vb2_video_unregister_device(&vcap->vdev);
+}
+
+static void dcmipp_buffer_done(struct dcmipp_bytecap_device *vcap,
+ struct dcmipp_buf *buf,
+ size_t bytesused,
+ int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ list_del_init(&buf->list);
+
+ vbuf = &buf->vb;
+
+ vbuf->sequence = vcap->sequence++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
+ vb2_buffer_done(&vbuf->vb2_buf,
+ err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dev_dbg(vcap->dev, "Done [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
+ buf, &buf->addr);
+ vcap->count.buffers++;
+}
+
+/* irqlock must be held */
+static void
+dcmipp_bytecap_set_next_frame_or_stop(struct dcmipp_bytecap_device *vcap)
+{
+ if (!vcap->next && list_is_singular(&vcap->buffers)) {
+ /*
+ * If there is no available buffer (none or a single one in the
+ * list while two are expected), stop the capture (effective
+ * for next frame). On-going frame capture will continue until
+ * FRAME END but no further capture will be done.
+ */
+ reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
+
+ dev_dbg(vcap->dev, "Capture restart is deferred to next buffer queueing\n");
+ vcap->next = NULL;
+ vcap->state = DCMIPP_WAIT_FOR_BUFFER;
+ return;
+ }
+
+ /* If we don't have buffer yet, pick the one after active */
+ if (!vcap->next)
+ vcap->next = list_next_entry(vcap->active, list);
+
+ /*
+ * Set buffer address
+ * This register is shadowed and will be taken into
+ * account on next VSYNC (start of next frame)
+ */
+ reg_write(vcap, DCMIPP_P0PPM0AR1, vcap->next->addr);
+ dev_dbg(vcap->dev, "Write [%d] %p phy=%pad\n",
+ vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
+}
+
+/* irqlock must be held */
+static void dcmipp_bytecap_process_frame(struct dcmipp_bytecap_device *vcap,
+ size_t bytesused)
+{
+ int err = 0;
+ struct dcmipp_buf *buf = vcap->active;
+
+ if (!buf) {
+ vcap->count.nactive++;
+ vcap->count.errors++;
+ return;
+ }
+
+ if (bytesused > buf->size) {
+ dev_dbg(vcap->dev, "frame larger than expected (%zu > %zu)\n",
+ bytesused, buf->size);
+ /* Clip to buffer size and return buffer to V4L2 in error */
+ bytesused = buf->size;
+ vcap->count.limit++;
+ vcap->count.errors++;
+ err = -EOVERFLOW;
+ }
+
+ dcmipp_buffer_done(vcap, buf, bytesused, err);
+ vcap->active = NULL;
+}
+
+static irqreturn_t dcmipp_bytecap_irq_thread(int irq, void *arg)
+{
+ struct dcmipp_bytecap_device *vcap =
+ container_of(arg, struct dcmipp_bytecap_device, ved);
+ size_t bytesused = 0;
+ u32 cmsr2;
+
+ spin_lock_irq(&vcap->irqlock);
+
+ cmsr2 = vcap->cmsr2 & vcap->cmier;
+
+ /*
+ * If we have an overrun, a frame-end will probably not be generated,
+ * in that case the active buffer will be recycled as next buffer by
+ * the VSYNC handler
+ */
+ if (cmsr2 & DCMIPP_CMSR2_P0OVRF) {
+ vcap->count.errors++;
+ vcap->count.overrun++;
+ }
+
+ if (cmsr2 & DCMIPP_CMSR2_P0FRAMEF) {
+ vcap->count.frame++;
+
+ /* Read captured buffer size */
+ bytesused = reg_read(vcap, DCMIPP_P0DCCNTR);
+ dcmipp_bytecap_process_frame(vcap, bytesused);
+ }
+
+ if (cmsr2 & DCMIPP_CMSR2_P0VSYNCF) {
+ vcap->count.vsync++;
+ if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
+ vcap->count.underrun++;
+ goto out;
+ }
+
+ /*
+ * On VSYNC, the previously set next buffer is going to become
+ * active thanks to the shadowing mechanism of the DCMIPP. In
+ * most of the cases, since a FRAMEEND has already come,
+ * pointer next is NULL since active is reset during the
+ * FRAMEEND handling. However, in case of framerate adjustment,
+ * there are more VSYNC than FRAMEEND. Thus we recycle the
+ * active (but not used) buffer and put it back into next.
+ */
+ swap(vcap->active, vcap->next);
+ dcmipp_bytecap_set_next_frame_or_stop(vcap);
+ }
+
+out:
+ spin_unlock_irq(&vcap->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dcmipp_bytecap_irq_callback(int irq, void *arg)
+{
+ struct dcmipp_bytecap_device *vcap =
+ container_of(arg, struct dcmipp_bytecap_device, ved);
+
+ /* Store interrupt status register */
+ vcap->cmsr2 = reg_read(vcap, DCMIPP_CMSR2) & vcap->cmier;
+ vcap->count.it++;
+
+ /* Clear interrupt */
+ reg_write(vcap, DCMIPP_CMFCR, vcap->cmsr2);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int dcmipp_bytecap_link_validate(struct media_link *link)
+{
+ struct media_entity *entity = link->sink->entity;
+ struct video_device *vd = media_entity_to_video_device(entity);
+ struct dcmipp_bytecap_device *vcap = container_of(vd,
+ struct dcmipp_bytecap_device, vdev);
+ struct v4l2_subdev *source_sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct v4l2_subdev_format source_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = link->source->index,
+ };
+ const struct dcmipp_bytecap_pix_map *vpix;
+ int ret;
+
+ ret = v4l2_subdev_call(source_sd, pad, get_fmt, NULL, &source_fmt);
+ if (ret < 0)
+ return 0;
+
+ if (source_fmt.format.width != vcap->format.width ||
+ source_fmt.format.height != vcap->format.height) {
+ dev_err(vcap->dev, "Wrong width or height %ux%u (%ux%u expected)\n",
+ vcap->format.width, vcap->format.height,
+ source_fmt.format.width, source_fmt.format.height);
+ return -EINVAL;
+ }
+
+ vpix = dcmipp_bytecap_pix_map_by_pixelformat(vcap->format.pixelformat);
+ if (source_fmt.format.code != vpix->code) {
+ dev_err(vcap->dev, "Wrong mbus_code 0x%x, (0x%x expected)\n",
+ vpix->code, source_fmt.format.code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations dcmipp_bytecap_entity_ops = {
+ .link_validate = dcmipp_bytecap_link_validate,
+};
+
+struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
+ const char *entity_name,
+ struct v4l2_device *v4l2_dev,
+ void __iomem *regs)
+{
+ struct dcmipp_bytecap_device *vcap;
+ struct video_device *vdev;
+ struct vb2_queue *q;
+ const unsigned long pad_flag = MEDIA_PAD_FL_SINK;
+ int ret = 0;
+
+ /* Allocate the dcmipp_bytecap_device struct */
+ vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
+ if (!vcap)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate the pads */
+ vcap->ved.pads = dcmipp_pads_init(1, &pad_flag);
+ if (IS_ERR(vcap->ved.pads)) {
+ ret = PTR_ERR(vcap->ved.pads);
+ goto err_free_vcap;
+ }
+
+ /* Initialize the media entity */
+ vcap->vdev.entity.name = entity_name;
+ vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
+ vcap->vdev.entity.ops = &dcmipp_bytecap_entity_ops;
+ ret = media_entity_pads_init(&vcap->vdev.entity, 1, vcap->ved.pads);
+ if (ret)
+ goto err_clean_pads;
+
+ /* Initialize the lock */
+ mutex_init(&vcap->lock);
+
+ /* Initialize the vb2 queue */
+ q = &vcap->queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->lock = &vcap->lock;
+ q->drv_priv = vcap;
+ q->buf_struct_size = sizeof(struct dcmipp_buf);
+ q->ops = &dcmipp_bytecap_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_queued_buffers = 1;
+ q->dev = dev;
+
+ /* DCMIPP requires 16 bytes aligned buffers */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32) & ~0x0f);
+ if (ret) {
+ dev_err(dev, "Failed to set DMA mask\n");
+ goto err_mutex_destroy;
+ }
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ dev_err(dev, "%s: vb2 queue init failed (err=%d)\n",
+ entity_name, ret);
+ goto err_clean_m_ent;
+ }
+
+ /* Initialize buffer list and its lock */
+ INIT_LIST_HEAD(&vcap->buffers);
+ spin_lock_init(&vcap->irqlock);
+
+ /* Set default frame format */
+ vcap->format = fmt_default;
+
+ /* Fill the dcmipp_ent_device struct */
+ vcap->ved.ent = &vcap->vdev.entity;
+ vcap->ved.handler = dcmipp_bytecap_irq_callback;
+ vcap->ved.thread_fn = dcmipp_bytecap_irq_thread;
+ vcap->dev = dev;
+ vcap->regs = regs;
+
+ /* Initialize the video_device struct */
+ vdev = &vcap->vdev;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_IO_MC;
+ vdev->release = dcmipp_bytecap_release;
+ vdev->fops = &dcmipp_bytecap_fops;
+ vdev->ioctl_ops = &dcmipp_bytecap_ioctl_ops;
+ vdev->lock = &vcap->lock;
+ vdev->queue = q;
+ vdev->v4l2_dev = v4l2_dev;
+ strscpy(vdev->name, entity_name, sizeof(vdev->name));
+ video_set_drvdata(vdev, &vcap->ved);
+
+ /* Register the video_device with the v4l2 and the media framework */
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(dev, "%s: video register failed (err=%d)\n",
+ vcap->vdev.name, ret);
+ goto err_clean_m_ent;
+ }
+
+ return &vcap->ved;
+
+err_clean_m_ent:
+ media_entity_cleanup(&vcap->vdev.entity);
+err_mutex_destroy:
+ mutex_destroy(&vcap->lock);
+err_clean_pads:
+ dcmipp_pads_cleanup(vcap->ved.pads);
+err_free_vcap:
+ kfree(vcap);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
new file mode 100644
index 000000000..5a361ad6b
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c
@@ -0,0 +1,565 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface Pixel Processor
+ *
+ * Copyright (C) STMicroelectronics SA 2023
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-rect.h>
+#include <media/v4l2-subdev.h>
+
+#include "dcmipp-common.h"
+
+#define DCMIPP_P0FCTCR 0x500
+#define DCMIPP_P0FCTCR_FRATE_MASK GENMASK(1, 0)
+#define DCMIPP_P0SCSTR 0x504
+#define DCMIPP_P0SCSTR_HSTART_SHIFT 0
+#define DCMIPP_P0SCSTR_VSTART_SHIFT 16
+#define DCMIPP_P0SCSZR 0x508
+#define DCMIPP_P0SCSZR_ENABLE BIT(31)
+#define DCMIPP_P0SCSZR_HSIZE_SHIFT 0
+#define DCMIPP_P0SCSZR_VSIZE_SHIFT 16
+#define DCMIPP_P0PPCR 0x5c0
+#define DCMIPP_P0PPCR_BSM_1_2 0x1
+#define DCMIPP_P0PPCR_BSM_1_4 0x2
+#define DCMIPP_P0PPCR_BSM_2_4 0x3
+#define DCMIPP_P0PPCR_BSM_MASK GENMASK(8, 7)
+#define DCMIPP_P0PPCR_BSM_SHIFT 0x7
+#define DCMIPP_P0PPCR_LSM BIT(10)
+#define DCMIPP_P0PPCR_OELS BIT(11)
+
+#define IS_SINK(pad) (!(pad))
+#define IS_SRC(pad) ((pad))
+
+struct dcmipp_byteproc_pix_map {
+ unsigned int code;
+ unsigned int bpp;
+};
+
+#define PIXMAP_MBUS_BPP(mbus, byteperpixel) \
+ { \
+ .code = MEDIA_BUS_FMT_##mbus, \
+ .bpp = byteperpixel, \
+ }
+static const struct dcmipp_byteproc_pix_map dcmipp_byteproc_pix_map_list[] = {
+ PIXMAP_MBUS_BPP(RGB565_2X8_LE, 2),
+ PIXMAP_MBUS_BPP(YUYV8_2X8, 2),
+ PIXMAP_MBUS_BPP(YVYU8_2X8, 2),
+ PIXMAP_MBUS_BPP(UYVY8_2X8, 2),
+ PIXMAP_MBUS_BPP(VYUY8_2X8, 2),
+ PIXMAP_MBUS_BPP(Y8_1X8, 1),
+ PIXMAP_MBUS_BPP(SBGGR8_1X8, 1),
+ PIXMAP_MBUS_BPP(SGBRG8_1X8, 1),
+ PIXMAP_MBUS_BPP(SGRBG8_1X8, 1),
+ PIXMAP_MBUS_BPP(SRGGB8_1X8, 1),
+ PIXMAP_MBUS_BPP(JPEG_1X8, 1),
+};
+
+static const struct dcmipp_byteproc_pix_map *
+dcmipp_byteproc_pix_map_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dcmipp_byteproc_pix_map_list); i++) {
+ if (dcmipp_byteproc_pix_map_list[i].code == code)
+ return &dcmipp_byteproc_pix_map_list[i];
+ }
+
+ return NULL;
+}
+
+struct dcmipp_byteproc_device {
+ struct dcmipp_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ void __iomem *regs;
+ bool streaming;
+};
+
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = DCMIPP_FMT_WIDTH_DEFAULT,
+ .height = DCMIPP_FMT_HEIGHT_DEFAULT,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = DCMIPP_COLORSPACE_DEFAULT,
+ .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
+ .quantization = DCMIPP_QUANTIZATION_DEFAULT,
+ .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
+};
+
+static const struct v4l2_rect crop_min = {
+ .width = DCMIPP_FRAME_MIN_WIDTH,
+ .height = DCMIPP_FRAME_MIN_HEIGHT,
+ .top = 0,
+ .left = 0,
+};
+
+static void dcmipp_byteproc_adjust_crop(struct v4l2_rect *r,
+ struct v4l2_rect *compose)
+{
+ /* Disallow rectangles smaller than the minimal one. */
+ v4l2_rect_set_min_size(r, &crop_min);
+ v4l2_rect_map_inside(r, compose);
+}
+
+static void dcmipp_byteproc_adjust_compose(struct v4l2_rect *r,
+ const struct v4l2_mbus_framefmt *fmt)
+{
+ r->top = 0;
+ r->left = 0;
+
+ /* Compose is not possible for JPEG or Bayer formats */
+ if (fmt->code == MEDIA_BUS_FMT_JPEG_1X8 ||
+ fmt->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
+ fmt->code == MEDIA_BUS_FMT_SGBRG8_1X8 ||
+ fmt->code == MEDIA_BUS_FMT_SGRBG8_1X8 ||
+ fmt->code == MEDIA_BUS_FMT_SRGGB8_1X8) {
+ r->width = fmt->width;
+ r->height = fmt->height;
+ return;
+ }
+
+ /* Adjust height - we can only perform 1/2 decimation */
+ if (r->height <= (fmt->height / 2))
+ r->height = fmt->height / 2;
+ else
+ r->height = fmt->height;
+
+ /* Adjust width /2 or /4 for 8bits formats and /2 for 16bits formats */
+ if (fmt->code == MEDIA_BUS_FMT_Y8_1X8 && r->width <= (fmt->width / 4))
+ r->width = fmt->width / 4;
+ else if (r->width <= (fmt->width / 2))
+ r->width = fmt->width / 2;
+ else
+ r->width = fmt->width;
+}
+
+static void dcmipp_byteproc_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
+{
+ const struct dcmipp_byteproc_pix_map *vpix;
+
+ /* Only accept code in the pix map table */
+ vpix = dcmipp_byteproc_pix_map_by_code(fmt->code);
+ if (!vpix)
+ fmt->code = fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, DCMIPP_FRAME_MIN_WIDTH,
+ DCMIPP_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, DCMIPP_FRAME_MIN_HEIGHT,
+ DCMIPP_FRAME_MAX_HEIGHT) & ~1;
+
+ if (fmt->field == V4L2_FIELD_ANY || fmt->field == V4L2_FIELD_ALTERNATE)
+ fmt->field = fmt_default.field;
+
+ dcmipp_colorimetry_clamp(fmt);
+}
+
+static int dcmipp_byteproc_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int i;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ struct v4l2_mbus_framefmt *mf;
+ struct v4l2_rect *r;
+
+ mf = v4l2_subdev_state_get_format(sd_state, i);
+ *mf = fmt_default;
+
+ if (IS_SINK(i))
+ r = v4l2_subdev_state_get_compose(sd_state, i);
+ else
+ r = v4l2_subdev_state_get_crop(sd_state, i);
+
+ r->top = 0;
+ r->left = 0;
+ r->width = DCMIPP_FMT_WIDTH_DEFAULT;
+ r->height = DCMIPP_FMT_HEIGHT_DEFAULT;
+ }
+
+ return 0;
+}
+
+static int
+dcmipp_byteproc_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct dcmipp_byteproc_pix_map *vpix;
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ if (IS_SINK(code->pad)) {
+ if (code->index >= ARRAY_SIZE(dcmipp_byteproc_pix_map_list))
+ return -EINVAL;
+ vpix = &dcmipp_byteproc_pix_map_list[code->index];
+ code->code = vpix->code;
+ } else {
+ /* byteproc doesn't support transformation on format */
+ if (code->index > 0)
+ return -EINVAL;
+
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, 0);
+ code->code = sink_fmt->code;
+ }
+
+ return 0;
+}
+
+static int
+dcmipp_byteproc_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_rect *compose;
+
+ if (fse->index)
+ return -EINVAL;
+
+ fse->min_width = DCMIPP_FRAME_MIN_WIDTH;
+ fse->min_height = DCMIPP_FRAME_MIN_HEIGHT;
+
+ if (IS_SINK(fse->pad)) {
+ fse->max_width = DCMIPP_FRAME_MAX_WIDTH;
+ fse->max_height = DCMIPP_FRAME_MAX_HEIGHT;
+ } else {
+ compose = v4l2_subdev_state_get_compose(sd_state, 0);
+ fse->max_width = compose->width;
+ fse->max_height = compose->height;
+ }
+
+ return 0;
+}
+
+static int dcmipp_byteproc_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+ struct v4l2_rect *crop, *compose;
+
+ if (byteproc->streaming)
+ return -EBUSY;
+
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
+
+ crop = v4l2_subdev_state_get_crop(sd_state, 1);
+ compose = v4l2_subdev_state_get_compose(sd_state, 0);
+
+ if (IS_SRC(fmt->pad)) {
+ fmt->format = *v4l2_subdev_state_get_format(sd_state, 0);
+ fmt->format.width = crop->width;
+ fmt->format.height = crop->height;
+ } else {
+ dcmipp_byteproc_adjust_fmt(&fmt->format);
+ crop->top = 0;
+ crop->left = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+ *compose = *crop;
+ /* Set the same format on SOURCE pad as well */
+ *v4l2_subdev_state_get_format(sd_state, 1) = fmt->format;
+ }
+ *mf = fmt->format;
+
+ return 0;
+}
+
+static int dcmipp_byteproc_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *s)
+{
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_rect *crop, *compose;
+
+ /*
+ * In the HW, the decimation block is located prior to the crop hence:
+ * Compose is done on the sink pad
+ * Crop is done on the src pad
+ */
+ if (IS_SINK(s->pad) &&
+ (s->target == V4L2_SEL_TGT_CROP ||
+ s->target == V4L2_SEL_TGT_CROP_BOUNDS ||
+ s->target == V4L2_SEL_TGT_CROP_DEFAULT))
+ return -EINVAL;
+
+ if (IS_SRC(s->pad) &&
+ (s->target == V4L2_SEL_TGT_COMPOSE ||
+ s->target == V4L2_SEL_TGT_COMPOSE_BOUNDS ||
+ s->target == V4L2_SEL_TGT_COMPOSE_DEFAULT))
+ return -EINVAL;
+
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, 0);
+ crop = v4l2_subdev_state_get_crop(sd_state, 1);
+ compose = v4l2_subdev_state_get_compose(sd_state, 0);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ s->r = *crop;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r = *compose;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r = *compose;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = sink_fmt->width;
+ s->r.height = sink_fmt->height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dcmipp_byteproc_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *s)
+{
+ struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+ struct v4l2_rect *crop, *compose;
+
+ /*
+ * In the HW, the decimation block is located prior to the crop hence:
+ * Compose is done on the sink pad
+ * Crop is done on the src pad
+ */
+ if ((s->target == V4L2_SEL_TGT_CROP ||
+ s->target == V4L2_SEL_TGT_CROP_BOUNDS ||
+ s->target == V4L2_SEL_TGT_CROP_DEFAULT) && IS_SINK(s->pad))
+ return -EINVAL;
+
+ if ((s->target == V4L2_SEL_TGT_COMPOSE ||
+ s->target == V4L2_SEL_TGT_COMPOSE_BOUNDS ||
+ s->target == V4L2_SEL_TGT_COMPOSE_DEFAULT) && IS_SRC(s->pad))
+ return -EINVAL;
+
+ crop = v4l2_subdev_state_get_crop(sd_state, 1);
+ compose = v4l2_subdev_state_get_compose(sd_state, 0);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ dcmipp_byteproc_adjust_crop(&s->r, compose);
+
+ *crop = s->r;
+ mf = v4l2_subdev_state_get_format(sd_state, 1);
+ mf->width = s->r.width;
+ mf->height = s->r.height;
+
+ dev_dbg(byteproc->dev, "s_selection: crop %ux%u@(%u,%u)\n",
+ crop->width, crop->height, crop->left, crop->top);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ mf = v4l2_subdev_state_get_format(sd_state, 0);
+ dcmipp_byteproc_adjust_compose(&s->r, mf);
+ *compose = s->r;
+ *crop = s->r;
+
+ mf = v4l2_subdev_state_get_format(sd_state, 1);
+ mf->width = s->r.width;
+ mf->height = s->r.height;
+
+ dev_dbg(byteproc->dev, "s_selection: compose %ux%u@(%u,%u)\n",
+ compose->width, compose->height,
+ compose->left, compose->top);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops dcmipp_byteproc_pad_ops = {
+ .enum_mbus_code = dcmipp_byteproc_enum_mbus_code,
+ .enum_frame_size = dcmipp_byteproc_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = dcmipp_byteproc_set_fmt,
+ .get_selection = dcmipp_byteproc_get_selection,
+ .set_selection = dcmipp_byteproc_set_selection,
+};
+
+static int dcmipp_byteproc_configure_scale_crop
+ (struct dcmipp_byteproc_device *byteproc)
+{
+ const struct dcmipp_byteproc_pix_map *vpix;
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ u32 hprediv, vprediv;
+ struct v4l2_rect *compose, *crop;
+ u32 val = 0;
+
+ state = v4l2_subdev_lock_and_get_active_state(&byteproc->sd);
+ sink_fmt = v4l2_subdev_state_get_format(state, 0);
+ compose = v4l2_subdev_state_get_compose(state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 1);
+ v4l2_subdev_unlock_state(state);
+
+ /* find output format bpp */
+ vpix = dcmipp_byteproc_pix_map_by_code(sink_fmt->code);
+ if (!vpix)
+ return -EINVAL;
+
+ /* clear decimation/crop */
+ reg_clear(byteproc, DCMIPP_P0PPCR, DCMIPP_P0PPCR_BSM_MASK);
+ reg_clear(byteproc, DCMIPP_P0PPCR, DCMIPP_P0PPCR_LSM);
+ reg_write(byteproc, DCMIPP_P0SCSTR, 0);
+ reg_write(byteproc, DCMIPP_P0SCSZR, 0);
+
+ /* Ignore decimation/crop with JPEG */
+ if (vpix->code == MEDIA_BUS_FMT_JPEG_1X8)
+ return 0;
+
+ /* decimation */
+ hprediv = sink_fmt->width / compose->width;
+ if (hprediv == 4)
+ val |= DCMIPP_P0PPCR_BSM_1_4 << DCMIPP_P0PPCR_BSM_SHIFT;
+ else if ((vpix->code == MEDIA_BUS_FMT_Y8_1X8) && (hprediv == 2))
+ val |= DCMIPP_P0PPCR_BSM_1_2 << DCMIPP_P0PPCR_BSM_SHIFT;
+ else if (hprediv == 2)
+ val |= DCMIPP_P0PPCR_BSM_2_4 << DCMIPP_P0PPCR_BSM_SHIFT;
+
+ vprediv = sink_fmt->height / compose->height;
+ if (vprediv == 2)
+ val |= DCMIPP_P0PPCR_LSM | DCMIPP_P0PPCR_OELS;
+
+ /* decimate using bytes and lines skipping */
+ if (val) {
+ reg_set(byteproc, DCMIPP_P0PPCR, val);
+
+ dev_dbg(byteproc->dev, "decimate to %dx%d [prediv=%dx%d]\n",
+ compose->width, compose->height,
+ hprediv, vprediv);
+ }
+
+ dev_dbg(byteproc->dev, "crop to %dx%d\n", crop->width, crop->height);
+
+ /* expressed in 32-bits words on X axis, lines on Y axis */
+ reg_write(byteproc, DCMIPP_P0SCSTR,
+ (((crop->left * vpix->bpp) / 4) <<
+ DCMIPP_P0SCSTR_HSTART_SHIFT) |
+ (crop->top << DCMIPP_P0SCSTR_VSTART_SHIFT));
+ reg_write(byteproc, DCMIPP_P0SCSZR,
+ DCMIPP_P0SCSZR_ENABLE |
+ (((crop->width * vpix->bpp) / 4) <<
+ DCMIPP_P0SCSZR_HSIZE_SHIFT) |
+ (crop->height << DCMIPP_P0SCSZR_VSIZE_SHIFT));
+
+ return 0;
+}
+
+static int dcmipp_byteproc_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *s_subdev;
+ struct media_pad *pad;
+ int ret = 0;
+
+ /* Get source subdev */
+ pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ return -EINVAL;
+ s_subdev = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (enable) {
+ ret = dcmipp_byteproc_configure_scale_crop(byteproc);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
+ if (ret < 0) {
+ dev_err(byteproc->dev,
+ "failed to start source subdev streaming (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
+ if (ret < 0) {
+ dev_err(byteproc->dev,
+ "failed to stop source subdev streaming (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ byteproc->streaming = enable;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops dcmipp_byteproc_video_ops = {
+ .s_stream = dcmipp_byteproc_s_stream,
+};
+
+static const struct v4l2_subdev_ops dcmipp_byteproc_ops = {
+ .pad = &dcmipp_byteproc_pad_ops,
+ .video = &dcmipp_byteproc_video_ops,
+};
+
+static void dcmipp_byteproc_release(struct v4l2_subdev *sd)
+{
+ struct dcmipp_byteproc_device *byteproc = v4l2_get_subdevdata(sd);
+
+ kfree(byteproc);
+}
+
+static const struct v4l2_subdev_internal_ops dcmipp_byteproc_int_ops = {
+ .init_state = dcmipp_byteproc_init_state,
+ .release = dcmipp_byteproc_release,
+};
+
+void dcmipp_byteproc_ent_release(struct dcmipp_ent_device *ved)
+{
+ struct dcmipp_byteproc_device *byteproc =
+ container_of(ved, struct dcmipp_byteproc_device, ved);
+
+ dcmipp_ent_sd_unregister(ved, &byteproc->sd);
+}
+
+struct dcmipp_ent_device *
+dcmipp_byteproc_ent_init(struct device *dev, const char *entity_name,
+ struct v4l2_device *v4l2_dev, void __iomem *regs)
+{
+ struct dcmipp_byteproc_device *byteproc;
+ const unsigned long pads_flag[] = {
+ MEDIA_PAD_FL_SINK, MEDIA_PAD_FL_SOURCE,
+ };
+ int ret;
+
+ /* Allocate the byteproc struct */
+ byteproc = kzalloc(sizeof(*byteproc), GFP_KERNEL);
+ if (!byteproc)
+ return ERR_PTR(-ENOMEM);
+
+ byteproc->regs = regs;
+
+ /* Initialize ved and sd */
+ ret = dcmipp_ent_sd_register(&byteproc->ved, &byteproc->sd,
+ v4l2_dev, entity_name,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER,
+ ARRAY_SIZE(pads_flag), pads_flag,
+ &dcmipp_byteproc_int_ops,
+ &dcmipp_byteproc_ops,
+ NULL, NULL);
+ if (ret) {
+ kfree(byteproc);
+ return ERR_PTR(ret);
+ }
+
+ byteproc->dev = dev;
+
+ return &byteproc->ved;
+}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.c
new file mode 100644
index 000000000..562933e08
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface Pixel Processor
+ *
+ * Copyright (C) STMicroelectronics SA 2023
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "dcmipp-common.h"
+
+/* Helper function to allocate and initialize pads */
+struct media_pad *dcmipp_pads_init(u16 num_pads, const unsigned long *pads_flags)
+{
+ struct media_pad *pads;
+ unsigned int i;
+
+ /* Allocate memory for the pads */
+ pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
+ if (!pads)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the pads */
+ for (i = 0; i < num_pads; i++) {
+ pads[i].index = i;
+ pads[i].flags = pads_flags[i];
+ }
+
+ return pads;
+}
+
+static const struct media_entity_operations dcmipp_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+int dcmipp_ent_sd_register(struct dcmipp_ent_device *ved,
+ struct v4l2_subdev *sd,
+ struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u32 function,
+ u16 num_pads,
+ const unsigned long *pads_flag,
+ const struct v4l2_subdev_internal_ops *sd_int_ops,
+ const struct v4l2_subdev_ops *sd_ops,
+ irq_handler_t handler,
+ irq_handler_t thread_fn)
+{
+ int ret;
+
+ /* Allocate the pads. Should be released from the sd_int_op release */
+ ved->pads = dcmipp_pads_init(num_pads, pads_flag);
+ if (IS_ERR(ved->pads))
+ return PTR_ERR(ved->pads);
+
+ /* Fill the dcmipp_ent_device struct */
+ ved->ent = &sd->entity;
+
+ /* Initialize the subdev */
+ v4l2_subdev_init(sd, sd_ops);
+ sd->internal_ops = sd_int_ops;
+ sd->entity.function = function;
+ sd->entity.ops = &dcmipp_entity_ops;
+ sd->owner = THIS_MODULE;
+ strscpy(sd->name, name, sizeof(sd->name));
+ v4l2_set_subdevdata(sd, ved);
+
+ /* Expose this subdev to user space */
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ if (sd->ctrl_handler)
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* Initialize the media entity */
+ ret = media_entity_pads_init(&sd->entity, num_pads, ved->pads);
+ if (ret)
+ goto err_clean_pads;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret < 0)
+ goto err_clean_m_ent;
+
+ /* Register the subdev with the v4l2 and the media framework */
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret) {
+ dev_err(v4l2_dev->dev,
+ "%s: subdev register failed (err=%d)\n",
+ name, ret);
+ goto err_clean_m_ent;
+ }
+
+ ved->handler = handler;
+ ved->thread_fn = thread_fn;
+
+ return 0;
+
+err_clean_m_ent:
+ media_entity_cleanup(&sd->entity);
+err_clean_pads:
+ dcmipp_pads_cleanup(ved->pads);
+ return ret;
+}
+
+void
+dcmipp_ent_sd_unregister(struct dcmipp_ent_device *ved, struct v4l2_subdev *sd)
+{
+ media_entity_cleanup(ved->ent);
+ v4l2_device_unregister_subdev(sd);
+}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h
new file mode 100644
index 000000000..7a7cf43ba
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for STM32 Digital Camera Memory Interface Pixel Processor
+ *
+ * Copyright (C) STMicroelectronics SA 2023
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#ifndef _DCMIPP_COMMON_H_
+#define _DCMIPP_COMMON_H_
+
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define DCMIPP_PDEV_NAME "dcmipp"
+
+#define DCMIPP_FRAME_MAX_WIDTH 4096
+#define DCMIPP_FRAME_MAX_HEIGHT 2160
+#define DCMIPP_FRAME_MIN_WIDTH 16
+#define DCMIPP_FRAME_MIN_HEIGHT 16
+
+#define DCMIPP_FMT_WIDTH_DEFAULT 640
+#define DCMIPP_FMT_HEIGHT_DEFAULT 480
+
+#define DCMIPP_COLORSPACE_DEFAULT V4L2_COLORSPACE_REC709
+#define DCMIPP_YCBCR_ENC_DEFAULT V4L2_YCBCR_ENC_DEFAULT
+#define DCMIPP_QUANTIZATION_DEFAULT V4L2_QUANTIZATION_DEFAULT
+#define DCMIPP_XFER_FUNC_DEFAULT V4L2_XFER_FUNC_DEFAULT
+
+/**
+ * dcmipp_colorimetry_clamp() - Adjust colorimetry parameters
+ *
+ * @fmt: the pointer to struct v4l2_pix_format or
+ * struct v4l2_mbus_framefmt
+ *
+ * Entities must check if colorimetry given by the userspace is valid, if not
+ * then set them as DEFAULT
+ */
+#define dcmipp_colorimetry_clamp(fmt) \
+do { \
+ if ((fmt)->colorspace == V4L2_COLORSPACE_DEFAULT || \
+ (fmt)->colorspace > V4L2_COLORSPACE_DCI_P3) { \
+ (fmt)->colorspace = DCMIPP_COLORSPACE_DEFAULT; \
+ (fmt)->ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT; \
+ (fmt)->quantization = DCMIPP_QUANTIZATION_DEFAULT; \
+ (fmt)->xfer_func = DCMIPP_XFER_FUNC_DEFAULT; \
+ } \
+ if ((fmt)->ycbcr_enc > V4L2_YCBCR_ENC_SMPTE240M) \
+ (fmt)->ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT; \
+ if ((fmt)->quantization > V4L2_QUANTIZATION_LIM_RANGE) \
+ (fmt)->quantization = DCMIPP_QUANTIZATION_DEFAULT; \
+ if ((fmt)->xfer_func > V4L2_XFER_FUNC_SMPTE2084) \
+ (fmt)->xfer_func = DCMIPP_XFER_FUNC_DEFAULT; \
+} while (0)
+
+/**
+ * struct dcmipp_ent_device - core struct that represents a node in the topology
+ *
+ * @ent: the pointer to struct media_entity for the node
+ * @pads: the list of pads of the node
+ * @bus: struct v4l2_mbus_config_parallel describing input bus
+ * @bus_type: type of input bus (parallel or BT656)
+ * @handler: irq handler dedicated to the subdev
+ * @handler_ret: value returned by the irq handler
+ * @thread_fn: threaded irq handler
+ *
+ * The DCMIPP provides a single IRQ line and a IRQ status registers for all
+ * subdevs, hence once the main irq handler (registered at probe time) is
+ * called, it will chain calls to the irq handler of each the subdevs of the
+ * pipelines, using the handler/handler_ret/thread_fn variables.
+ *
+ * Each node of the topology must create a dcmipp_ent_device struct.
+ * Depending on the node it will be of an instance of v4l2_subdev or
+ * video_device struct where both contains a struct media_entity.
+ * Those structures should embedded the dcmipp_ent_device struct through
+ * v4l2_set_subdevdata() and video_set_drvdata() respectivaly, allowing the
+ * dcmipp_ent_device struct to be retrieved from the corresponding struct
+ * media_entity
+ */
+struct dcmipp_ent_device {
+ struct media_entity *ent;
+ struct media_pad *pads;
+
+ /* Parallel input device */
+ struct v4l2_mbus_config_parallel bus;
+ enum v4l2_mbus_type bus_type;
+ irq_handler_t handler;
+ irqreturn_t handler_ret;
+ irq_handler_t thread_fn;
+};
+
+/**
+ * dcmipp_pads_init - initialize pads
+ *
+ * @num_pads: number of pads to initialize
+ * @pads_flags: flags to use in each pad
+ *
+ * Helper functions to allocate/initialize pads
+ */
+struct media_pad *dcmipp_pads_init(u16 num_pads,
+ const unsigned long *pads_flags);
+
+/**
+ * dcmipp_pads_cleanup - free pads
+ *
+ * @pads: pointer to the pads
+ *
+ * Helper function to free the pads initialized with dcmipp_pads_init
+ */
+static inline void dcmipp_pads_cleanup(struct media_pad *pads)
+{
+ kfree(pads);
+}
+
+/**
+ * dcmipp_ent_sd_register - initialize and register a subdev node
+ *
+ * @ved: the dcmipp_ent_device struct to be initialize
+ * @sd: the v4l2_subdev struct to be initialize and registered
+ * @v4l2_dev: the v4l2 device to register the v4l2_subdev
+ * @name: name of the sub-device. Please notice that the name must be
+ * unique.
+ * @function: media entity function defined by MEDIA_ENT_F_* macros
+ * @num_pads: number of pads to initialize
+ * @pads_flag: flags to use in each pad
+ * @sd_int_ops: pointer to &struct v4l2_subdev_internal_ops
+ * @sd_ops: pointer to &struct v4l2_subdev_ops.
+ * @handler: func pointer of the irq handler
+ * @thread_fn: func pointer of the threaded irq handler
+ *
+ * Helper function initialize and register the struct dcmipp_ent_device and
+ * struct v4l2_subdev which represents a subdev node in the topology
+ */
+int dcmipp_ent_sd_register(struct dcmipp_ent_device *ved,
+ struct v4l2_subdev *sd,
+ struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u32 function,
+ u16 num_pads,
+ const unsigned long *pads_flag,
+ const struct v4l2_subdev_internal_ops *sd_int_ops,
+ const struct v4l2_subdev_ops *sd_ops,
+ irq_handler_t handler,
+ irq_handler_t thread_fn);
+
+/**
+ * dcmipp_ent_sd_unregister - cleanup and unregister a subdev node
+ *
+ * @ved: the dcmipp_ent_device struct to be cleaned up
+ * @sd: the v4l2_subdev struct to be unregistered
+ *
+ * Helper function cleanup and unregister the struct dcmipp_ent_device and
+ * struct v4l2_subdev which represents a subdev node in the topology
+ */
+void dcmipp_ent_sd_unregister(struct dcmipp_ent_device *ved,
+ struct v4l2_subdev *sd);
+
+#define reg_write(device, reg, val) \
+ (__reg_write((device)->dev, (device)->regs, (reg), (val)))
+#define reg_read(device, reg) \
+ (__reg_read((device)->dev, (device)->regs, (reg)))
+#define reg_set(device, reg, mask) \
+ (__reg_set((device)->dev, (device)->regs, (reg), (mask)))
+#define reg_clear(device, reg, mask) \
+ (__reg_clear((device)->dev, (device)->regs, (reg), (mask)))
+
+static inline u32 __reg_read(struct device *dev, void __iomem *base, u32 reg)
+{
+ u32 val = readl_relaxed(base + reg);
+
+ dev_dbg(dev, "RD 0x%x %#10.8x\n", reg, val);
+ return val;
+}
+
+static inline void __reg_write(struct device *dev, void __iomem *base, u32 reg,
+ u32 val)
+{
+ dev_dbg(dev, "WR 0x%x %#10.8x\n", reg, val);
+ writel_relaxed(val, base + reg);
+}
+
+static inline void __reg_set(struct device *dev, void __iomem *base, u32 reg,
+ u32 mask)
+{
+ dev_dbg(dev, "SET 0x%x %#10.8x\n", reg, mask);
+ __reg_write(dev, base, reg, readl_relaxed(base + reg) | mask);
+}
+
+static inline void __reg_clear(struct device *dev, void __iomem *base, u32 reg,
+ u32 mask)
+{
+ dev_dbg(dev, "CLR 0x%x %#10.8x\n", reg, mask);
+ __reg_write(dev, base, reg, readl_relaxed(base + reg) & ~mask);
+}
+
+/* DCMIPP subdev init / release entry points */
+struct dcmipp_ent_device *dcmipp_par_ent_init(struct device *dev,
+ const char *entity_name,
+ struct v4l2_device *v4l2_dev,
+ void __iomem *regs);
+void dcmipp_par_ent_release(struct dcmipp_ent_device *ved);
+struct dcmipp_ent_device *
+dcmipp_byteproc_ent_init(struct device *dev, const char *entity_name,
+ struct v4l2_device *v4l2_dev, void __iomem *regs);
+void dcmipp_byteproc_ent_release(struct dcmipp_ent_device *ved);
+struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
+ const char *entity_name,
+ struct v4l2_device *v4l2_dev,
+ void __iomem *regs);
+void dcmipp_bytecap_ent_release(struct dcmipp_ent_device *ved);
+
+#endif
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
new file mode 100644
index 000000000..32c6619be
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
@@ -0,0 +1,604 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface Pixel Processor
+ *
+ * Copyright (C) STMicroelectronics SA 2023
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#include "dcmipp-common.h"
+
+#define DCMIPP_MDEV_MODEL_NAME "DCMIPP MDEV"
+
+#define DCMIPP_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \
+ .src_ent = src, \
+ .src_pad = srcpad, \
+ .sink_ent = sink, \
+ .sink_pad = sinkpad, \
+ .flags = link_flags, \
+}
+
+struct dcmipp_device {
+ /* The platform device */
+ struct platform_device pdev;
+ struct device *dev;
+
+ /* Hardware resources */
+ void __iomem *regs;
+ struct clk *kclk;
+
+ /* The pipeline configuration */
+ const struct dcmipp_pipeline_config *pipe_cfg;
+
+ /* The Associated media_device parent */
+ struct media_device mdev;
+
+ /* Internal v4l2 parent device*/
+ struct v4l2_device v4l2_dev;
+
+ /* Entities */
+ struct dcmipp_ent_device **entity;
+
+ struct v4l2_async_notifier notifier;
+};
+
+static inline struct dcmipp_device *
+notifier_to_dcmipp(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct dcmipp_device, notifier);
+}
+
+/* Structure which describes individual configuration for each entity */
+struct dcmipp_ent_config {
+ const char *name;
+ struct dcmipp_ent_device *(*init)
+ (struct device *dev, const char *entity_name,
+ struct v4l2_device *v4l2_dev, void __iomem *regs);
+ void (*release)(struct dcmipp_ent_device *ved);
+};
+
+/* Structure which describes links between entities */
+struct dcmipp_ent_link {
+ unsigned int src_ent;
+ u16 src_pad;
+ unsigned int sink_ent;
+ u16 sink_pad;
+ u32 flags;
+};
+
+/* Structure which describes the whole topology */
+struct dcmipp_pipeline_config {
+ const struct dcmipp_ent_config *ents;
+ size_t num_ents;
+ const struct dcmipp_ent_link *links;
+ size_t num_links;
+};
+
+/* --------------------------------------------------------------------------
+ * Topology Configuration
+ */
+
+static const struct dcmipp_ent_config stm32mp13_ent_config[] = {
+ {
+ .name = "dcmipp_parallel",
+ .init = dcmipp_par_ent_init,
+ .release = dcmipp_par_ent_release,
+ },
+ {
+ .name = "dcmipp_dump_postproc",
+ .init = dcmipp_byteproc_ent_init,
+ .release = dcmipp_byteproc_ent_release,
+ },
+ {
+ .name = "dcmipp_dump_capture",
+ .init = dcmipp_bytecap_ent_init,
+ .release = dcmipp_bytecap_ent_release,
+ },
+};
+
+#define ID_PARALLEL 0
+#define ID_DUMP_BYTEPROC 1
+#define ID_DUMP_CAPTURE 2
+
+static const struct dcmipp_ent_link stm32mp13_ent_links[] = {
+ DCMIPP_ENT_LINK(ID_PARALLEL, 1, ID_DUMP_BYTEPROC, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ DCMIPP_ENT_LINK(ID_DUMP_BYTEPROC, 1, ID_DUMP_CAPTURE, 0,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+};
+
+static const struct dcmipp_pipeline_config stm32mp13_pipe_cfg = {
+ .ents = stm32mp13_ent_config,
+ .num_ents = ARRAY_SIZE(stm32mp13_ent_config),
+ .links = stm32mp13_ent_links,
+ .num_links = ARRAY_SIZE(stm32mp13_ent_links)
+};
+
+#define LINK_FLAG_TO_STR(f) ((f) == 0 ? "" :\
+ (f) == MEDIA_LNK_FL_ENABLED ? "ENABLED" :\
+ (f) == MEDIA_LNK_FL_IMMUTABLE ? "IMMUTABLE" :\
+ (f) == (MEDIA_LNK_FL_ENABLED |\
+ MEDIA_LNK_FL_IMMUTABLE) ?\
+ "ENABLED, IMMUTABLE" :\
+ "UNKNOWN")
+
+static int dcmipp_create_links(struct dcmipp_device *dcmipp)
+{
+ unsigned int i;
+ int ret;
+
+ /* Initialize the links between entities */
+ for (i = 0; i < dcmipp->pipe_cfg->num_links; i++) {
+ const struct dcmipp_ent_link *link =
+ &dcmipp->pipe_cfg->links[i];
+ struct dcmipp_ent_device *ved_src =
+ dcmipp->entity[link->src_ent];
+ struct dcmipp_ent_device *ved_sink =
+ dcmipp->entity[link->sink_ent];
+
+ dev_dbg(dcmipp->dev, "Create link \"%s\":%d -> %d:\"%s\" [%s]\n",
+ dcmipp->pipe_cfg->ents[link->src_ent].name,
+ link->src_pad, link->sink_pad,
+ dcmipp->pipe_cfg->ents[link->sink_ent].name,
+ LINK_FLAG_TO_STR(link->flags));
+
+ ret = media_create_pad_link(ved_src->ent, link->src_pad,
+ ved_sink->ent, link->sink_pad,
+ link->flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dcmipp_graph_init(struct dcmipp_device *dcmipp);
+
+static int dcmipp_create_subdevs(struct dcmipp_device *dcmipp)
+{
+ int ret, i;
+
+ /* Call all subdev inits */
+ for (i = 0; i < dcmipp->pipe_cfg->num_ents; i++) {
+ const char *name = dcmipp->pipe_cfg->ents[i].name;
+
+ dev_dbg(dcmipp->dev, "add subdev %s\n", name);
+ dcmipp->entity[i] =
+ dcmipp->pipe_cfg->ents[i].init(dcmipp->dev, name,
+ &dcmipp->v4l2_dev,
+ dcmipp->regs);
+ if (IS_ERR(dcmipp->entity[i])) {
+ dev_err(dcmipp->dev, "failed to init subdev %s\n",
+ name);
+ ret = PTR_ERR(dcmipp->entity[i]);
+ goto err_init_entity;
+ }
+ }
+
+ /* Initialize links */
+ ret = dcmipp_create_links(dcmipp);
+ if (ret)
+ goto err_init_entity;
+
+ ret = dcmipp_graph_init(dcmipp);
+ if (ret < 0)
+ goto err_init_entity;
+
+ return 0;
+
+err_init_entity:
+ while (i > 0)
+ dcmipp->pipe_cfg->ents[i - 1].release(dcmipp->entity[i - 1]);
+ return ret;
+}
+
+static const struct of_device_id dcmipp_of_match[] = {
+ { .compatible = "st,stm32mp13-dcmipp", .data = &stm32mp13_pipe_cfg },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, dcmipp_of_match);
+
+static irqreturn_t dcmipp_irq_thread(int irq, void *arg)
+{
+ struct dcmipp_device *dcmipp = arg;
+ struct dcmipp_ent_device *ved;
+ unsigned int i;
+
+ /* Call irq thread of each entities of pipeline */
+ for (i = 0; i < dcmipp->pipe_cfg->num_ents; i++) {
+ ved = dcmipp->entity[i];
+ if (ved->thread_fn && ved->handler_ret == IRQ_WAKE_THREAD)
+ ved->thread_fn(irq, ved);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dcmipp_irq_callback(int irq, void *arg)
+{
+ struct dcmipp_device *dcmipp = arg;
+ struct dcmipp_ent_device *ved;
+ irqreturn_t ret = IRQ_HANDLED;
+ unsigned int i;
+
+ /* Call irq handler of each entities of pipeline */
+ for (i = 0; i < dcmipp->pipe_cfg->num_ents; i++) {
+ ved = dcmipp->entity[i];
+ if (ved->handler)
+ ved->handler_ret = ved->handler(irq, ved);
+ else if (ved->thread_fn)
+ ved->handler_ret = IRQ_WAKE_THREAD;
+ else
+ ved->handler_ret = IRQ_HANDLED;
+ if (ved->handler_ret != IRQ_HANDLED)
+ ret = ved->handler_ret;
+ }
+
+ return ret;
+}
+
+static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct dcmipp_device *dcmipp = notifier_to_dcmipp(notifier);
+ unsigned int ret;
+ int src_pad;
+ struct dcmipp_ent_device *sink;
+ struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_PARALLEL };
+ struct fwnode_handle *ep;
+
+ dev_dbg(dcmipp->dev, "Subdev \"%s\" bound\n", subdev->name);
+
+ /*
+ * Link this sub-device to DCMIPP, it could be
+ * a parallel camera sensor or a CSI-2 to parallel bridge
+ */
+ src_pad = media_entity_get_fwnode_pad(&subdev->entity,
+ subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+
+ /* Get bus characteristics from devicetree */
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dcmipp->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep) {
+ dev_err(dcmipp->dev, "Could not find the endpoint\n");
+ return -ENODEV;
+ }
+
+ /* Check for parallel bus-type first, then bt656 */
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret) {
+ vep.bus_type = V4L2_MBUS_BT656;
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret) {
+ dev_err(dcmipp->dev, "Could not parse the endpoint\n");
+ fwnode_handle_put(ep);
+ return ret;
+ }
+ }
+
+ fwnode_handle_put(ep);
+
+ if (vep.bus.parallel.bus_width == 0) {
+ dev_err(dcmipp->dev, "Invalid parallel interface bus-width\n");
+ return -ENODEV;
+ }
+
+ /* Only 8 bits bus width supported with BT656 bus */
+ if (vep.bus_type == V4L2_MBUS_BT656 &&
+ vep.bus.parallel.bus_width != 8) {
+ dev_err(dcmipp->dev, "BT656 bus conflicts with %u bits bus width (8 bits required)\n",
+ vep.bus.parallel.bus_width);
+ return -ENODEV;
+ }
+
+ /* Parallel input device detected, connect it to parallel subdev */
+ sink = dcmipp->entity[ID_PARALLEL];
+ sink->bus.flags = vep.bus.parallel.flags;
+ sink->bus.bus_width = vep.bus.parallel.bus_width;
+ sink->bus.data_shift = vep.bus.parallel.data_shift;
+ sink->bus_type = vep.bus_type;
+ ret = media_create_pad_link(&subdev->entity, src_pad, sink->ent, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(dcmipp->dev, "Failed to create media pad link with subdev \"%s\"\n",
+ subdev->name);
+ return ret;
+ }
+
+ dev_dbg(dcmipp->dev, "DCMIPP is now linked to \"%s\"\n", subdev->name);
+
+ return 0;
+}
+
+static void dcmipp_graph_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asd)
+{
+ struct dcmipp_device *dcmipp = notifier_to_dcmipp(notifier);
+
+ dev_dbg(dcmipp->dev, "Removing %s\n", sd->name);
+}
+
+static int dcmipp_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct dcmipp_device *dcmipp = notifier_to_dcmipp(notifier);
+ int ret;
+
+ /* Register the media device */
+ ret = media_device_register(&dcmipp->mdev);
+ if (ret) {
+ dev_err(dcmipp->mdev.dev,
+ "media device register failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ /* Expose all subdev's nodes*/
+ ret = v4l2_device_register_subdev_nodes(&dcmipp->v4l2_dev);
+ if (ret) {
+ dev_err(dcmipp->mdev.dev,
+ "dcmipp subdev nodes registration failed (err=%d)\n",
+ ret);
+ media_device_unregister(&dcmipp->mdev);
+ return ret;
+ }
+
+ dev_dbg(dcmipp->dev, "Notify complete !\n");
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations dcmipp_graph_notify_ops = {
+ .bound = dcmipp_graph_notify_bound,
+ .unbind = dcmipp_graph_notify_unbind,
+ .complete = dcmipp_graph_notify_complete,
+};
+
+static int dcmipp_graph_init(struct dcmipp_device *dcmipp)
+{
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *ep;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dcmipp->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep) {
+ dev_err(dcmipp->dev, "Failed to get next endpoint\n");
+ return -EINVAL;
+ }
+
+ v4l2_async_nf_init(&dcmipp->notifier, &dcmipp->v4l2_dev);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&dcmipp->notifier, ep,
+ struct v4l2_async_connection);
+
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd)) {
+ dev_err(dcmipp->dev, "Failed to add fwnode remote subdev\n");
+ return PTR_ERR(asd);
+ }
+
+ dcmipp->notifier.ops = &dcmipp_graph_notify_ops;
+
+ ret = v4l2_async_nf_register(&dcmipp->notifier);
+ if (ret < 0) {
+ dev_err(dcmipp->dev, "Failed to register notifier\n");
+ v4l2_async_nf_cleanup(&dcmipp->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dcmipp_probe(struct platform_device *pdev)
+{
+ struct dcmipp_device *dcmipp;
+ struct clk *kclk;
+ const struct dcmipp_pipeline_config *pipe_cfg;
+ struct reset_control *rstc;
+ int irq;
+ int ret;
+
+ dcmipp = devm_kzalloc(&pdev->dev, sizeof(*dcmipp), GFP_KERNEL);
+ if (!dcmipp)
+ return -ENOMEM;
+
+ dcmipp->dev = &pdev->dev;
+
+ pipe_cfg = device_get_match_data(dcmipp->dev);
+ if (!pipe_cfg) {
+ dev_err(&pdev->dev, "Can't get device data\n");
+ return -ENODEV;
+ }
+ dcmipp->pipe_cfg = pipe_cfg;
+
+ platform_set_drvdata(pdev, dcmipp);
+
+ /* Get hardware resources from devicetree */
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "Could not get reset control\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get irq\n");
+ return irq ? irq : -ENXIO;
+ }
+
+ dcmipp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(dcmipp->regs)) {
+ dev_err(&pdev->dev, "Could not map registers\n");
+ return PTR_ERR(dcmipp->regs);
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, dcmipp_irq_callback,
+ dcmipp_irq_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), dcmipp);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ return ret;
+ }
+
+ /* Reset device */
+ ret = reset_control_assert(rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to assert the reset line\n");
+ return ret;
+ }
+
+ usleep_range(3000, 5000);
+
+ ret = reset_control_deassert(rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to deassert the reset line\n");
+ return ret;
+ }
+
+ kclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(kclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(kclk),
+ "Unable to get kclk\n");
+ dcmipp->kclk = kclk;
+
+ dcmipp->entity = devm_kcalloc(&pdev->dev, dcmipp->pipe_cfg->num_ents,
+ sizeof(*dcmipp->entity), GFP_KERNEL);
+ if (!dcmipp->entity)
+ return -ENOMEM;
+
+ /* Register the v4l2 struct */
+ ret = v4l2_device_register(&pdev->dev, &dcmipp->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "v4l2 device register failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ /* Link the media device within the v4l2_device */
+ dcmipp->v4l2_dev.mdev = &dcmipp->mdev;
+
+ /* Initialize media device */
+ strscpy(dcmipp->mdev.model, DCMIPP_MDEV_MODEL_NAME,
+ sizeof(dcmipp->mdev.model));
+ dcmipp->mdev.dev = &pdev->dev;
+ media_device_init(&dcmipp->mdev);
+
+ /* Initialize subdevs */
+ ret = dcmipp_create_subdevs(dcmipp);
+ if (ret) {
+ media_device_cleanup(&dcmipp->mdev);
+ v4l2_device_unregister(&dcmipp->v4l2_dev);
+ return ret;
+ }
+
+ pm_runtime_enable(dcmipp->dev);
+
+ dev_info(&pdev->dev, "Probe done");
+
+ return 0;
+}
+
+static int dcmipp_remove(struct platform_device *pdev)
+{
+ struct dcmipp_device *dcmipp = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_nf_unregister(&dcmipp->notifier);
+ v4l2_async_nf_cleanup(&dcmipp->notifier);
+
+ for (i = 0; i < dcmipp->pipe_cfg->num_ents; i++)
+ dcmipp->pipe_cfg->ents[i].release(dcmipp->entity[i]);
+
+ media_device_unregister(&dcmipp->mdev);
+ media_device_cleanup(&dcmipp->mdev);
+
+ v4l2_device_unregister(&dcmipp->v4l2_dev);
+
+ return 0;
+}
+
+static int dcmipp_runtime_suspend(struct device *dev)
+{
+ struct dcmipp_device *dcmipp = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dcmipp->kclk);
+
+ return 0;
+}
+
+static int dcmipp_runtime_resume(struct device *dev)
+{
+ struct dcmipp_device *dcmipp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dcmipp->kclk);
+ if (ret)
+ dev_err(dev, "%s: Failed to prepare_enable kclk\n", __func__);
+
+ return ret;
+}
+
+static int dcmipp_suspend(struct device *dev)
+{
+ /* disable clock */
+ pm_runtime_force_suspend(dev);
+
+ /* change pinctrl state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int dcmipp_resume(struct device *dev)
+{
+ /* restore pinctl default state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* clock enable */
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dcmipp_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(dcmipp_suspend, dcmipp_resume)
+ RUNTIME_PM_OPS(dcmipp_runtime_suspend, dcmipp_runtime_resume, NULL)
+};
+
+static struct platform_driver dcmipp_pdrv = {
+ .probe = dcmipp_probe,
+ .remove = dcmipp_remove,
+ .driver = {
+ .name = DCMIPP_PDEV_NAME,
+ .of_match_table = dcmipp_of_match,
+ .pm = pm_ptr(&dcmipp_pm_ops),
+ },
+};
+
+module_platform_driver(dcmipp_pdrv);
+
+MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@foss.st.com>");
+MODULE_AUTHOR("Alain Volmat <alain.volmat@foss.st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface with Pixel Processor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c
new file mode 100644
index 000000000..62c5c3331
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface Pixel Processor
+ *
+ * Copyright (C) STMicroelectronics SA 2023
+ * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
+ * Alain Volmat <alain.volmat@foss.st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "dcmipp-common.h"
+
+#define DCMIPP_PRCR 0x104
+#define DCMIPP_PRCR_FORMAT_SHIFT 16
+#define DCMIPP_PRCR_FORMAT_YUV422 0x1e
+#define DCMIPP_PRCR_FORMAT_RGB565 0x22
+#define DCMIPP_PRCR_FORMAT_RAW8 0x2a
+#define DCMIPP_PRCR_FORMAT_G8 0x4a
+#define DCMIPP_PRCR_FORMAT_BYTE_STREAM 0x5a
+#define DCMIPP_PRCR_ESS BIT(4)
+#define DCMIPP_PRCR_PCKPOL BIT(5)
+#define DCMIPP_PRCR_HSPOL BIT(6)
+#define DCMIPP_PRCR_VSPOL BIT(7)
+#define DCMIPP_PRCR_ENABLE BIT(14)
+#define DCMIPP_PRCR_SWAPCYCLES BIT(25)
+
+#define DCMIPP_PRESCR 0x108
+#define DCMIPP_PRESUR 0x10c
+
+#define IS_SINK(pad) (!(pad))
+#define IS_SRC(pad) ((pad))
+
+struct dcmipp_par_pix_map {
+ unsigned int code_sink;
+ unsigned int code_src;
+ u8 prcr_format;
+ u8 prcr_swapcycles;
+};
+
+#define PIXMAP_SINK_SRC_PRCR_SWAP(sink, src, prcr, swap) \
+ { \
+ .code_sink = MEDIA_BUS_FMT_##sink, \
+ .code_src = MEDIA_BUS_FMT_##src, \
+ .prcr_format = DCMIPP_PRCR_FORMAT_##prcr, \
+ .prcr_swapcycles = swap, \
+ }
+static const struct dcmipp_par_pix_map dcmipp_par_pix_map_list[] = {
+ /* RGB565 */
+ PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_LE, RGB565_2X8_LE, RGB565, 1),
+ PIXMAP_SINK_SRC_PRCR_SWAP(RGB565_2X8_BE, RGB565_2X8_LE, RGB565, 0),
+ /* YUV422 */
+ PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, YUYV8_2X8, YUV422, 1),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YUYV8_2X8, UYVY8_2X8, YUV422, 0),
+ PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, UYVY8_2X8, YUV422, 1),
+ PIXMAP_SINK_SRC_PRCR_SWAP(UYVY8_2X8, YUYV8_2X8, YUV422, 0),
+ PIXMAP_SINK_SRC_PRCR_SWAP(YVYU8_2X8, YVYU8_2X8, YUV422, 1),
+ PIXMAP_SINK_SRC_PRCR_SWAP(VYUY8_2X8, VYUY8_2X8, YUV422, 1),
+ /* GREY */
+ PIXMAP_SINK_SRC_PRCR_SWAP(Y8_1X8, Y8_1X8, G8, 0),
+ /* Raw Bayer */
+ PIXMAP_SINK_SRC_PRCR_SWAP(SBGGR8_1X8, SBGGR8_1X8, RAW8, 0),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGBRG8_1X8, SGBRG8_1X8, RAW8, 0),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SGRBG8_1X8, SGRBG8_1X8, RAW8, 0),
+ PIXMAP_SINK_SRC_PRCR_SWAP(SRGGB8_1X8, SRGGB8_1X8, RAW8, 0),
+ /* JPEG */
+ PIXMAP_SINK_SRC_PRCR_SWAP(JPEG_1X8, JPEG_1X8, BYTE_STREAM, 0),
+};
+
+/*
+ * Search through the pix_map table, skipping two consecutive entry with the
+ * same code
+ */
+static inline const struct dcmipp_par_pix_map *dcmipp_par_pix_map_by_index
+ (unsigned int index,
+ unsigned int pad)
+{
+ unsigned int i = 0;
+ u32 prev_code = 0, cur_code;
+
+ while (i < ARRAY_SIZE(dcmipp_par_pix_map_list)) {
+ if (IS_SRC(pad))
+ cur_code = dcmipp_par_pix_map_list[i].code_src;
+ else
+ cur_code = dcmipp_par_pix_map_list[i].code_sink;
+
+ if (cur_code == prev_code) {
+ i++;
+ continue;
+ }
+ prev_code = cur_code;
+
+ if (index == 0)
+ break;
+ i++;
+ index--;
+ }
+
+ if (i >= ARRAY_SIZE(dcmipp_par_pix_map_list))
+ return NULL;
+
+ return &dcmipp_par_pix_map_list[i];
+}
+
+static inline const struct dcmipp_par_pix_map *dcmipp_par_pix_map_by_code
+ (u32 code_sink, u32 code_src)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dcmipp_par_pix_map_list); i++) {
+ if ((dcmipp_par_pix_map_list[i].code_sink == code_sink &&
+ dcmipp_par_pix_map_list[i].code_src == code_src) ||
+ (dcmipp_par_pix_map_list[i].code_sink == code_src &&
+ dcmipp_par_pix_map_list[i].code_src == code_sink) ||
+ (dcmipp_par_pix_map_list[i].code_sink == code_sink &&
+ code_src == 0) ||
+ (code_sink == 0 &&
+ dcmipp_par_pix_map_list[i].code_src == code_src))
+ return &dcmipp_par_pix_map_list[i];
+ }
+ return NULL;
+}
+
+struct dcmipp_par_device {
+ struct dcmipp_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ void __iomem *regs;
+ bool streaming;
+};
+
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = DCMIPP_FMT_WIDTH_DEFAULT,
+ .height = DCMIPP_FMT_HEIGHT_DEFAULT,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = DCMIPP_COLORSPACE_DEFAULT,
+ .ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
+ .quantization = DCMIPP_QUANTIZATION_DEFAULT,
+ .xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
+};
+
+static int dcmipp_par_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int i;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_state_get_format(sd_state, i);
+ *mf = fmt_default;
+ }
+
+ return 0;
+}
+
+static int dcmipp_par_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct dcmipp_par_pix_map *vpix =
+ dcmipp_par_pix_map_by_index(code->index, code->pad);
+
+ if (!vpix)
+ return -EINVAL;
+
+ code->code = IS_SRC(code->pad) ? vpix->code_src : vpix->code_sink;
+
+ return 0;
+}
+
+static int dcmipp_par_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct dcmipp_par_pix_map *vpix;
+
+ if (fse->index)
+ return -EINVAL;
+
+ /* Only accept code in the pix map table */
+ vpix = dcmipp_par_pix_map_by_code(IS_SINK(fse->pad) ? fse->code : 0,
+ IS_SRC(fse->pad) ? fse->code : 0);
+ if (!vpix)
+ return -EINVAL;
+
+ fse->min_width = DCMIPP_FRAME_MIN_WIDTH;
+ fse->max_width = DCMIPP_FRAME_MAX_WIDTH;
+ fse->min_height = DCMIPP_FRAME_MIN_HEIGHT;
+ fse->max_height = DCMIPP_FRAME_MAX_HEIGHT;
+
+ return 0;
+}
+
+static void dcmipp_par_adjust_fmt(struct dcmipp_par_device *par,
+ struct v4l2_mbus_framefmt *fmt, __u32 pad)
+{
+ const struct dcmipp_par_pix_map *vpix;
+
+ /* Only accept code in the pix map table */
+ vpix = dcmipp_par_pix_map_by_code(IS_SINK(pad) ? fmt->code : 0,
+ IS_SRC(pad) ? fmt->code : 0);
+ if (!vpix)
+ fmt->code = fmt_default.code;
+
+ /* Exclude JPEG if BT656 bus is selected */
+ if (vpix && vpix->code_sink == MEDIA_BUS_FMT_JPEG_1X8 &&
+ par->ved.bus_type == V4L2_MBUS_BT656)
+ fmt->code = fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, DCMIPP_FRAME_MIN_WIDTH,
+ DCMIPP_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, DCMIPP_FRAME_MIN_HEIGHT,
+ DCMIPP_FRAME_MAX_HEIGHT) & ~1;
+
+ if (fmt->field == V4L2_FIELD_ANY || fmt->field == V4L2_FIELD_ALTERNATE)
+ fmt->field = fmt_default.field;
+
+ dcmipp_colorimetry_clamp(fmt);
+}
+
+static int dcmipp_par_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct dcmipp_par_device *par = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (par->streaming)
+ return -EBUSY;
+
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
+
+ /* Set the new format */
+ dcmipp_par_adjust_fmt(par, &fmt->format, fmt->pad);
+
+ dev_dbg(par->dev, "%s: format update: old:%dx%d (0x%x, %d, %d, %d, %d) new:%dx%d (0x%x, %d, %d, %d, %d)\n",
+ par->sd.name,
+ /* old */
+ mf->width, mf->height, mf->code,
+ mf->colorspace, mf->quantization,
+ mf->xfer_func, mf->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *mf = fmt->format;
+
+ /* When setting the sink format, report that format on the src pad */
+ if (IS_SINK(fmt->pad)) {
+ mf = v4l2_subdev_state_get_format(sd_state, 1);
+ *mf = fmt->format;
+ dcmipp_par_adjust_fmt(par, mf, 1);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops dcmipp_par_pad_ops = {
+ .enum_mbus_code = dcmipp_par_enum_mbus_code,
+ .enum_frame_size = dcmipp_par_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = dcmipp_par_set_fmt,
+};
+
+static int dcmipp_par_configure(struct dcmipp_par_device *par)
+{
+ u32 val = 0;
+ const struct dcmipp_par_pix_map *vpix;
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *sink_fmt;
+ struct v4l2_mbus_framefmt *src_fmt;
+
+ /* Set vertical synchronization polarity */
+ if (par->ved.bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ val |= DCMIPP_PRCR_VSPOL;
+
+ /* Set horizontal synchronization polarity */
+ if (par->ved.bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ val |= DCMIPP_PRCR_HSPOL;
+
+ /* Set pixel clock polarity */
+ if (par->ved.bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ val |= DCMIPP_PRCR_PCKPOL;
+
+ /*
+ * BT656 embedded synchronisation bus mode.
+ *
+ * Default SAV/EAV mode is supported here with default codes
+ * SAV=0xff000080 & EAV=0xff00009d.
+ * With DCMIPP this means LSC=SAV=0x80 & LEC=EAV=0x9d.
+ */
+ if (par->ved.bus_type == V4L2_MBUS_BT656) {
+ val |= DCMIPP_PRCR_ESS;
+
+ /* Unmask all codes */
+ reg_write(par, DCMIPP_PRESUR, 0xffffffff);/* FEC:LEC:LSC:FSC */
+
+ /* Trig on LSC=0x80 & LEC=0x9d codes, ignore FSC and FEC */
+ reg_write(par, DCMIPP_PRESCR, 0xff9d80ff);/* FEC:LEC:LSC:FSC */
+ }
+
+ /* Set format */
+ state = v4l2_subdev_lock_and_get_active_state(&par->sd);
+ sink_fmt = v4l2_subdev_state_get_format(state, 0);
+ src_fmt = v4l2_subdev_state_get_format(state, 1);
+ v4l2_subdev_unlock_state(state);
+
+ vpix = dcmipp_par_pix_map_by_code(sink_fmt->code, src_fmt->code);
+ if (!vpix) {
+ dev_err(par->dev, "Invalid sink/src format configuration\n");
+ return -EINVAL;
+ }
+
+ val |= vpix->prcr_format << DCMIPP_PRCR_FORMAT_SHIFT;
+
+ /* swap cycles */
+ if (vpix->prcr_swapcycles)
+ val |= DCMIPP_PRCR_SWAPCYCLES;
+
+ reg_write(par, DCMIPP_PRCR, val);
+
+ return 0;
+}
+
+static int dcmipp_par_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct dcmipp_par_device *par =
+ container_of(sd, struct dcmipp_par_device, sd);
+ struct v4l2_subdev *s_subdev;
+ struct media_pad *pad;
+ int ret = 0;
+
+ /* Get source subdev */
+ pad = media_pad_remote_pad_first(&sd->entity.pads[0]);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ return -EINVAL;
+ s_subdev = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (enable) {
+ ret = dcmipp_par_configure(par);
+ if (ret)
+ return ret;
+
+ /* Enable parallel interface */
+ reg_set(par, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
+
+ ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
+ if (ret < 0) {
+ dev_err(par->dev,
+ "failed to start source subdev streaming (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = v4l2_subdev_call(s_subdev, video, s_stream, enable);
+ if (ret < 0) {
+ dev_err(par->dev,
+ "failed to stop source subdev streaming (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Disable parallel interface */
+ reg_clear(par, DCMIPP_PRCR, DCMIPP_PRCR_ENABLE);
+ }
+
+ par->streaming = enable;
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops dcmipp_par_video_ops = {
+ .s_stream = dcmipp_par_s_stream,
+};
+
+static const struct v4l2_subdev_ops dcmipp_par_ops = {
+ .pad = &dcmipp_par_pad_ops,
+ .video = &dcmipp_par_video_ops,
+};
+
+static void dcmipp_par_release(struct v4l2_subdev *sd)
+{
+ struct dcmipp_par_device *par =
+ container_of(sd, struct dcmipp_par_device, sd);
+
+ kfree(par);
+}
+
+static const struct v4l2_subdev_internal_ops dcmipp_par_int_ops = {
+ .init_state = dcmipp_par_init_state,
+ .release = dcmipp_par_release,
+};
+
+void dcmipp_par_ent_release(struct dcmipp_ent_device *ved)
+{
+ struct dcmipp_par_device *par =
+ container_of(ved, struct dcmipp_par_device, ved);
+
+ dcmipp_ent_sd_unregister(ved, &par->sd);
+}
+
+struct dcmipp_ent_device *dcmipp_par_ent_init(struct device *dev,
+ const char *entity_name,
+ struct v4l2_device *v4l2_dev,
+ void __iomem *regs)
+{
+ struct dcmipp_par_device *par;
+ const unsigned long pads_flag[] = {
+ MEDIA_PAD_FL_SINK, MEDIA_PAD_FL_SOURCE,
+ };
+ int ret;
+
+ /* Allocate the par struct */
+ par = kzalloc(sizeof(*par), GFP_KERNEL);
+ if (!par)
+ return ERR_PTR(-ENOMEM);
+
+ par->regs = regs;
+
+ /* Initialize ved and sd */
+ ret = dcmipp_ent_sd_register(&par->ved, &par->sd, v4l2_dev,
+ entity_name, MEDIA_ENT_F_VID_IF_BRIDGE,
+ ARRAY_SIZE(pads_flag), pads_flag,
+ &dcmipp_par_int_ops, &dcmipp_par_ops,
+ NULL, NULL);
+ if (ret) {
+ kfree(par);
+ return ERR_PTR(ret);
+ }
+
+ par->dev = dev;
+
+ return &par->ved;
+}
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index ad13d447d..097a3a08e 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -211,6 +211,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
/* Initialize subdev */
v4l2_subdev_init(subdev, &sun4i_csi_subdev_ops);
+ subdev->internal_ops = &sun4i_csi_subdev_internal_ops;
subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
subdev->owner = THIS_MODULE;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
index 8eeed87bf..4e0c2df45 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h
@@ -89,6 +89,7 @@ enum csi_subdev_pads {
};
extern const struct v4l2_subdev_ops sun4i_csi_subdev_ops;
+extern const struct v4l2_subdev_internal_ops sun4i_csi_subdev_internal_ops;
struct sun4i_csi_format {
u32 mbus;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
index 95b5633b7..d1371e130 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
@@ -411,7 +411,7 @@ int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq)
for (i = 0; i < CSI_MAX_BUFFER; i++)
csi->current_buf[i] = NULL;
- q->min_buffers_needed = 3;
+ q->min_queued_buffers = 3;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
q->io_modes = VB2_MMAP | VB2_DMABUF;
q->lock = &csi->lock;
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
index 48702134c..744197b0f 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
@@ -266,12 +266,12 @@ static const struct v4l2_mbus_framefmt sun4i_csi_pad_fmt_default = {
.xfer_func = V4L2_XFER_FUNC_DEFAULT,
};
-static int sun4i_csi_subdev_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *sd_state)
+static int sun4i_csi_subdev_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_try_format(subdev, sd_state, CSI_SUBDEV_SINK);
+ fmt = v4l2_subdev_state_get_format(sd_state, CSI_SUBDEV_SINK);
*fmt = sun4i_csi_pad_fmt_default;
return 0;
@@ -285,8 +285,7 @@ static int sun4i_csi_subdev_get_fmt(struct v4l2_subdev *subdev,
struct v4l2_mbus_framefmt *subdev_fmt;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- subdev_fmt = v4l2_subdev_get_try_format(subdev, sd_state,
- fmt->pad);
+ subdev_fmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
else
subdev_fmt = &csi->subdev_fmt;
@@ -303,8 +302,7 @@ static int sun4i_csi_subdev_set_fmt(struct v4l2_subdev *subdev,
struct v4l2_mbus_framefmt *subdev_fmt;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- subdev_fmt = v4l2_subdev_get_try_format(subdev, sd_state,
- fmt->pad);
+ subdev_fmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
else
subdev_fmt = &csi->subdev_fmt;
@@ -336,7 +334,6 @@ sun4i_csi_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
static const struct v4l2_subdev_pad_ops sun4i_csi_subdev_pad_ops = {
.link_validate = v4l2_subdev_link_validate_default,
- .init_cfg = sun4i_csi_subdev_init_cfg,
.get_fmt = sun4i_csi_subdev_get_fmt,
.set_fmt = sun4i_csi_subdev_set_fmt,
.enum_mbus_code = sun4i_csi_subdev_enum_mbus_code,
@@ -346,6 +343,10 @@ const struct v4l2_subdev_ops sun4i_csi_subdev_ops = {
.pad = &sun4i_csi_subdev_pad_ops,
};
+const struct v4l2_subdev_internal_ops sun4i_csi_subdev_internal_ops = {
+ .init_state = sun4i_csi_subdev_init_state,
+};
+
int sun4i_csi_v4l2_register(struct sun4i_csi *csi)
{
struct video_device *vdev = &csi->vdev;
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c
index e57341312..d006d9dd0 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c
@@ -501,13 +501,13 @@ sun6i_csi_bridge_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
-static int sun6i_csi_bridge_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *state)
+static int sun6i_csi_bridge_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
{
struct sun6i_csi_device *csi_dev = v4l2_get_subdevdata(subdev);
unsigned int pad = SUN6I_CSI_BRIDGE_PAD_SINK;
struct v4l2_mbus_framefmt *mbus_format =
- v4l2_subdev_get_try_format(subdev, state, pad);
+ v4l2_subdev_state_get_format(state, pad);
struct mutex *lock = &csi_dev->bridge.lock;
mutex_lock(lock);
@@ -547,8 +547,8 @@ static int sun6i_csi_bridge_get_fmt(struct v4l2_subdev *subdev,
mutex_lock(lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *mbus_format = *v4l2_subdev_get_try_format(subdev, state,
- format->pad);
+ *mbus_format = *v4l2_subdev_state_get_format(state,
+ format->pad);
else
*mbus_format = csi_dev->bridge.mbus_format;
@@ -570,7 +570,7 @@ static int sun6i_csi_bridge_set_fmt(struct v4l2_subdev *subdev,
sun6i_csi_bridge_mbus_format_prepare(mbus_format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(subdev, state, format->pad) =
+ *v4l2_subdev_state_get_format(state, format->pad) =
*mbus_format;
else
csi_dev->bridge.mbus_format = *mbus_format;
@@ -581,7 +581,6 @@ static int sun6i_csi_bridge_set_fmt(struct v4l2_subdev *subdev,
}
static const struct v4l2_subdev_pad_ops sun6i_csi_bridge_pad_ops = {
- .init_cfg = sun6i_csi_bridge_init_cfg,
.enum_mbus_code = sun6i_csi_bridge_enum_mbus_code,
.get_fmt = sun6i_csi_bridge_get_fmt,
.set_fmt = sun6i_csi_bridge_set_fmt,
@@ -592,6 +591,10 @@ static const struct v4l2_subdev_ops sun6i_csi_bridge_subdev_ops = {
.pad = &sun6i_csi_bridge_pad_ops,
};
+static const struct v4l2_subdev_internal_ops sun6i_csi_bridge_internal_ops = {
+ .init_state = sun6i_csi_bridge_init_state,
+};
+
/* Media Entity */
static const struct media_entity_operations sun6i_csi_bridge_entity_ops = {
@@ -782,6 +785,7 @@ int sun6i_csi_bridge_setup(struct sun6i_csi_device *csi_dev)
/* V4L2 Subdev */
v4l2_subdev_init(subdev, &sun6i_csi_bridge_subdev_ops);
+ subdev->internal_ops = &sun6i_csi_bridge_internal_ops;
strscpy(subdev->name, SUN6I_CSI_BRIDGE_NAME, sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->owner = THIS_MODULE;
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
index cf6aadbc1..14c0dc827 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
@@ -1010,7 +1010,7 @@ int sun6i_csi_capture_setup(struct sun6i_csi_device *csi_dev)
queue->buf_struct_size = sizeof(struct sun6i_csi_buffer);
queue->ops = &sun6i_csi_capture_queue_ops;
queue->mem_ops = &vb2_dma_contig_memops;
- queue->min_buffers_needed = 2;
+ queue->min_queued_buffers = 2;
queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue->lock = &capture->lock;
queue->dev = csi_dev->dev;
diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
index 08d86c17b..f9d4dc45b 100644
--- a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
+++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
@@ -305,13 +305,13 @@ sun6i_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
-static int sun6i_mipi_csi2_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *state)
+static int sun6i_mipi_csi2_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
{
struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
unsigned int pad = SUN6I_MIPI_CSI2_PAD_SINK;
struct v4l2_mbus_framefmt *mbus_format =
- v4l2_subdev_get_try_format(subdev, state, pad);
+ v4l2_subdev_state_get_format(state, pad);
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
@@ -351,8 +351,8 @@ static int sun6i_mipi_csi2_get_fmt(struct v4l2_subdev *subdev,
mutex_lock(lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *mbus_format = *v4l2_subdev_get_try_format(subdev, state,
- format->pad);
+ *mbus_format = *v4l2_subdev_state_get_format(state,
+ format->pad);
else
*mbus_format = csi2_dev->bridge.mbus_format;
@@ -374,7 +374,7 @@ static int sun6i_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
sun6i_mipi_csi2_mbus_format_prepare(mbus_format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(subdev, state, format->pad) =
+ *v4l2_subdev_state_get_format(state, format->pad) =
*mbus_format;
else
csi2_dev->bridge.mbus_format = *mbus_format;
@@ -385,7 +385,6 @@ static int sun6i_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
}
static const struct v4l2_subdev_pad_ops sun6i_mipi_csi2_pad_ops = {
- .init_cfg = sun6i_mipi_csi2_init_cfg,
.enum_mbus_code = sun6i_mipi_csi2_enum_mbus_code,
.get_fmt = sun6i_mipi_csi2_get_fmt,
.set_fmt = sun6i_mipi_csi2_set_fmt,
@@ -396,6 +395,10 @@ static const struct v4l2_subdev_ops sun6i_mipi_csi2_subdev_ops = {
.pad = &sun6i_mipi_csi2_pad_ops,
};
+static const struct v4l2_subdev_internal_ops sun6i_mipi_csi2_internal_ops = {
+ .init_state = sun6i_mipi_csi2_init_state,
+};
+
/* Media Entity */
static const struct media_entity_operations sun6i_mipi_csi2_entity_ops = {
@@ -504,6 +507,7 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
/* V4L2 Subdev */
v4l2_subdev_init(subdev, &sun6i_mipi_csi2_subdev_ops);
+ subdev->internal_ops = &sun6i_mipi_csi2_internal_ops;
strscpy(subdev->name, SUN6I_MIPI_CSI2_NAME, sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->owner = THIS_MODULE;
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
index 14a184481..4a5698eb1 100644
--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
@@ -338,14 +338,14 @@ sun8i_a83t_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
-static int sun8i_a83t_mipi_csi2_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *state)
+static int sun8i_a83t_mipi_csi2_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
unsigned int pad = SUN8I_A83T_MIPI_CSI2_PAD_SINK;
struct v4l2_mbus_framefmt *mbus_format =
- v4l2_subdev_get_try_format(subdev, state, pad);
+ v4l2_subdev_state_get_format(state, pad);
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
@@ -387,8 +387,8 @@ static int sun8i_a83t_mipi_csi2_get_fmt(struct v4l2_subdev *subdev,
mutex_lock(lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *mbus_format = *v4l2_subdev_get_try_format(subdev, state,
- format->pad);
+ *mbus_format = *v4l2_subdev_state_get_format(state,
+ format->pad);
else
*mbus_format = csi2_dev->bridge.mbus_format;
@@ -411,7 +411,7 @@ static int sun8i_a83t_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(subdev, state, format->pad) =
+ *v4l2_subdev_state_get_format(state, format->pad) =
*mbus_format;
else
csi2_dev->bridge.mbus_format = *mbus_format;
@@ -422,7 +422,6 @@ static int sun8i_a83t_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
}
static const struct v4l2_subdev_pad_ops sun8i_a83t_mipi_csi2_pad_ops = {
- .init_cfg = sun8i_a83t_mipi_csi2_init_cfg,
.enum_mbus_code = sun8i_a83t_mipi_csi2_enum_mbus_code,
.get_fmt = sun8i_a83t_mipi_csi2_get_fmt,
.set_fmt = sun8i_a83t_mipi_csi2_set_fmt,
@@ -433,6 +432,10 @@ static const struct v4l2_subdev_ops sun8i_a83t_mipi_csi2_subdev_ops = {
.pad = &sun8i_a83t_mipi_csi2_pad_ops,
};
+static const struct v4l2_subdev_internal_ops sun8i_a83t_mipi_csi2_internal_ops = {
+ .init_state = sun8i_a83t_mipi_csi2_init_state,
+};
+
/* Media Entity */
static const struct media_entity_operations sun8i_a83t_mipi_csi2_entity_ops = {
@@ -542,6 +545,7 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
/* V4L2 Subdev */
v4l2_subdev_init(subdev, &sun8i_a83t_mipi_csi2_subdev_ops);
+ subdev->internal_ops = &sun8i_a83t_mipi_csi2_internal_ops;
strscpy(subdev->name, SUN8I_A83T_MIPI_CSI2_NAME, sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->owner = THIS_MODULE;
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index f7ff09378..a1c35a2b6 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -673,7 +673,7 @@ static int deinterlace_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->min_buffers_needed = 1;
+ src_vq->min_queued_buffers = 1;
src_vq->ops = &deinterlace_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
@@ -688,7 +688,7 @@ static int deinterlace_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->min_buffers_needed = 2;
+ dst_vq->min_queued_buffers = 2;
dst_vq->ops = &deinterlace_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
index 0b025ec91..a12323ca8 100644
--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
@@ -536,7 +536,7 @@ static int rotate_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- src_vq->min_buffers_needed = 1;
+ src_vq->min_queued_buffers = 1;
src_vq->ops = &rotate_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
@@ -551,7 +551,7 @@ static int rotate_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->min_buffers_needed = 2;
+ dst_vq->min_queued_buffers = 2;
dst_vq->ops = &rotate_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c
index 5fa2ea902..77e12457d 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c
@@ -1771,9 +1771,10 @@ static int vpfe_queue_setup(struct vb2_queue *vq,
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
unsigned size = vpfe->fmt.fmt.pix.sizeimage;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 3)
+ *nbuffers = 3 - q_num_bufs;
if (*nplanes) {
if (sizes[0] < size)
@@ -2233,7 +2234,7 @@ static int vpfe_probe_complete(struct vpfe_device *vpfe)
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &vpfe->lock;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->dev = vpfe->pdev;
err = vb2_queue_init(q);
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 1a4273bbe..4afc2ad00 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -57,7 +57,7 @@ static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy)
state = v4l2_subdev_get_locked_active_state(&phy->subdev);
- fmt = v4l2_subdev_get_pad_format(&phy->subdev, state, CAL_CAMERARX_PAD_SINK);
+ fmt = v4l2_subdev_state_get_format(state, CAL_CAMERARX_PAD_SINK);
fmtinfo = cal_format_by_code(fmt->code);
if (!fmtinfo)
@@ -621,8 +621,6 @@ static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
- struct cal_camerarx *phy = to_cal_camerarx(sd);
-
/* No transcoding, source and sink codes must match. */
if (cal_rx_pad_is_source(code->pad)) {
struct v4l2_mbus_framefmt *fmt;
@@ -630,8 +628,8 @@ static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- fmt = v4l2_subdev_get_pad_format(&phy->subdev, state,
- CAL_CAMERARX_PAD_SINK);
+ fmt = v4l2_subdev_state_get_format(state,
+ CAL_CAMERARX_PAD_SINK);
code->code = fmt->code;
} else {
if (code->index >= cal_num_formats)
@@ -656,8 +654,8 @@ static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
if (cal_rx_pad_is_source(fse->pad)) {
struct v4l2_mbus_framefmt *fmt;
- fmt = v4l2_subdev_get_pad_format(sd, state,
- CAL_CAMERARX_PAD_SINK);
+ fmt = v4l2_subdev_state_get_format(state,
+ CAL_CAMERARX_PAD_SINK);
if (fse->code != fmt->code)
return -EINVAL;
@@ -713,18 +711,18 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
/* Store the format and propagate it to the source pad. */
- fmt = v4l2_subdev_get_pad_format(sd, state, CAL_CAMERARX_PAD_SINK);
+ fmt = v4l2_subdev_state_get_format(state, CAL_CAMERARX_PAD_SINK);
*fmt = format->format;
- fmt = v4l2_subdev_get_pad_format(sd, state,
- CAL_CAMERARX_PAD_FIRST_SOURCE);
+ fmt = v4l2_subdev_state_get_format(state,
+ CAL_CAMERARX_PAD_FIRST_SOURCE);
*fmt = format->format;
return 0;
}
-static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state)
+static int cal_camerarx_sd_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format format = {
.which = state ? V4L2_SUBDEV_FORMAT_TRY
@@ -784,7 +782,6 @@ static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = {
};
static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = {
- .init_cfg = cal_camerarx_sd_init_cfg,
.enum_mbus_code = cal_camerarx_sd_enum_mbus_code,
.enum_frame_size = cal_camerarx_sd_enum_frame_size,
.get_fmt = v4l2_subdev_get_fmt,
@@ -797,6 +794,10 @@ static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = {
.pad = &cal_camerarx_pad_ops,
};
+static const struct v4l2_subdev_internal_ops cal_camerarx_internal_ops = {
+ .init_state = cal_camerarx_sd_init_state,
+};
+
static struct media_entity_operations cal_camerarx_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
@@ -848,6 +849,7 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
/* Initialize the V4L2 subdev and media entity. */
sd = &phy->subdev;
v4l2_subdev_init(sd, &cal_camerarx_subdev_ops);
+ sd->internal_ops = &cal_camerarx_internal_ops;
sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(sd->name, sizeof(sd->name), "CAMERARX%u", instance);
diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
index a8abcd0fe..e1ba5dfc2 100644
--- a/drivers/media/platform/ti/cal/cal-video.c
+++ b/drivers/media/platform/ti/cal/cal-video.c
@@ -603,9 +603,10 @@ static int cal_queue_setup(struct vb2_queue *vq,
{
struct cal_ctx *ctx = vb2_get_drv_priv(vq);
unsigned int size = ctx->v_fmt.fmt.pix.sizeimage;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 3)
+ *nbuffers = 3 - q_num_bufs;
if (*nplanes) {
if (sizes[0] < size)
@@ -697,7 +698,7 @@ static int cal_video_check_format(struct cal_ctx *ctx)
state = v4l2_subdev_lock_and_get_active_state(&ctx->phy->subdev);
- format = v4l2_subdev_get_pad_format(&ctx->phy->subdev, state, remote_pad->index);
+ format = v4l2_subdev_state_get_format(state, remote_pad->index);
if (!format) {
ret = -EINVAL;
goto out;
@@ -1009,7 +1010,7 @@ int cal_ctx_v4l2_init(struct cal_ctx *ctx)
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &ctx->mutex;
- q->min_buffers_needed = 3;
+ q->min_queued_buffers = 3;
q->dev = ctx->cal->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c
index 99fae8830..c31a5566f 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.c
+++ b/drivers/media/platform/ti/davinci/vpif_capture.c
@@ -113,6 +113,7 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
unsigned size = common->fmt.fmt.pix.sizeimage;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
vpif_dbg(2, debug, "vpif_buffer_setup\n");
@@ -122,8 +123,8 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
size = sizes[0];
}
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 3)
+ *nbuffers = 3 - q_num_bufs;
*nplanes = 1;
sizes[0] = size;
@@ -1428,7 +1429,7 @@ static int vpif_probe_complete(void)
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct vpif_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->lock = &common->lock;
q->dev = vpif_dev;
diff --git a/drivers/media/platform/ti/davinci/vpif_display.c b/drivers/media/platform/ti/davinci/vpif_display.c
index f8ec2991c..02ede1fe1 100644
--- a/drivers/media/platform/ti/davinci/vpif_display.c
+++ b/drivers/media/platform/ti/davinci/vpif_display.c
@@ -115,6 +115,7 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
unsigned size = common->fmt.fmt.pix.sizeimage;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
if (*nplanes) {
if (sizes[0] < size)
@@ -122,8 +123,8 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
size = sizes[0];
}
- if (vq->num_buffers + *nbuffers < 3)
- *nbuffers = 3 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 3)
+ *nbuffers = 3 - q_num_bufs;
*nplanes = 1;
sizes[0] = size;
@@ -1168,7 +1169,7 @@ static int vpif_probe_complete(void)
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct vpif_disp_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->lock = &common->lock;
q->dev = vpif_dev;
err = vb2_queue_init(q);
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
index ada61391c..59b30fc43 100644
--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -873,7 +873,7 @@ static int ti_csi2rx_init_vb2q(struct ti_csi2rx_dev *csi)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->dev = dmaengine_get_dma_device(csi->dma.chan);
q->lock = &csi->mutex;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/ti/omap/omap_vout.c b/drivers/media/platform/ti/omap/omap_vout.c
index 414327408..1c56b6a87 100644
--- a/drivers/media/platform/ti/omap/omap_vout.c
+++ b/drivers/media/platform/ti/omap/omap_vout.c
@@ -944,10 +944,11 @@ static int omap_vout_vb2_queue_setup(struct vb2_queue *vq,
struct device *alloc_devs[])
{
struct omap_vout_device *vout = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
int size = vout->pix.sizeimage;
- if (is_rotation_enabled(vout) && vq->num_buffers + *nbufs > VRFB_NUM_BUFS) {
- *nbufs = VRFB_NUM_BUFS - vq->num_buffers;
+ if (is_rotation_enabled(vout) && q_num_bufs + *nbufs > VRFB_NUM_BUFS) {
+ *nbufs = VRFB_NUM_BUFS - q_num_bufs;
if (*nbufs == 0)
return -EINVAL;
}
@@ -1403,7 +1404,7 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
vq->ops = &omap_vout_vb2_ops;
vq->mem_ops = &vb2_dma_contig_memops;
vq->lock = &vout->lock;
- vq->min_buffers_needed = 1;
+ vq->min_queued_buffers = 1;
vfd->queue = vq;
ret = vb2_queue_init(vq);
diff --git a/drivers/media/platform/ti/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c
index 2fe42aa91..dd375c4e1 100644
--- a/drivers/media/platform/ti/omap3isp/ispccdc.c
+++ b/drivers/media/platform/ti/omap3isp/ispccdc.c
@@ -1948,8 +1948,7 @@ __ccdc_get_format(struct isp_ccdc_device *ccdc,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ccdc->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &ccdc->formats[pad];
}
@@ -1960,8 +1959,8 @@ __ccdc_get_crop(struct isp_ccdc_device *ccdc,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&ccdc->subdev, sd_state,
- CCDC_PAD_SOURCE_OF);
+ return v4l2_subdev_state_get_crop(sd_state,
+ CCDC_PAD_SOURCE_OF);
else
return &ccdc->crop;
}
@@ -1969,7 +1968,7 @@ __ccdc_get_crop(struct isp_ccdc_device *ccdc,
/*
* ccdc_try_format - Try video format on a pad
* @ccdc: ISP CCDC device
- * @cfg : V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: Pad number
* @fmt: Format
*/
@@ -2127,7 +2126,7 @@ static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
/*
* ccdc_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg : V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -2230,7 +2229,7 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
/*
* ccdc_get_selection - Retrieve a selection rectangle on a pad
* @sd: ISP CCDC V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @sel: Selection rectangle
*
* The only supported rectangles are the crop rectangles on the output formatter
@@ -2274,7 +2273,7 @@ static int ccdc_get_selection(struct v4l2_subdev *sd,
/*
* ccdc_set_selection - Set a selection rectangle on a pad
* @sd: ISP CCDC V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @sel: Selection rectangle
*
* The only supported rectangle is the actual crop rectangle on the output
@@ -2322,7 +2321,7 @@ static int ccdc_set_selection(struct v4l2_subdev *sd,
/*
* ccdc_get_format - Retrieve the video format on a pad
* @sd : ISP CCDC V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
@@ -2346,7 +2345,7 @@ static int ccdc_get_format(struct v4l2_subdev *sd,
/*
* ccdc_set_format - Set the video format on a pad
* @sd : ISP CCDC V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
diff --git a/drivers/media/platform/ti/omap3isp/ispccp2.c b/drivers/media/platform/ti/omap3isp/ispccp2.c
index da5f0176e..1204ee221 100644
--- a/drivers/media/platform/ti/omap3isp/ispccp2.c
+++ b/drivers/media/platform/ti/omap3isp/ispccp2.c
@@ -614,7 +614,7 @@ static const unsigned int ccp2_fmts[] = {
/*
* __ccp2_get_format - helper function for getting ccp2 format
* @ccp2 : Pointer to ISP CCP2 device
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad : pad number
* @which : wanted subdev format
* return format structure or NULL on error
@@ -625,8 +625,7 @@ __ccp2_get_format(struct isp_ccp2_device *ccp2,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ccp2->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &ccp2->formats[pad];
}
@@ -634,7 +633,7 @@ __ccp2_get_format(struct isp_ccp2_device *ccp2,
/*
* ccp2_try_format - Handle try format by pad subdev method
* @ccp2 : Pointer to ISP CCP2 device
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad : pad num
* @fmt : pointer to v4l2 mbus format structure
* @which : wanted subdev format
@@ -689,7 +688,7 @@ static void ccp2_try_format(struct isp_ccp2_device *ccp2,
/*
* ccp2_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -750,7 +749,7 @@ static int ccp2_enum_frame_size(struct v4l2_subdev *sd,
/*
* ccp2_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
@@ -772,7 +771,7 @@ static int ccp2_get_format(struct v4l2_subdev *sd,
/*
* ccp2_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt : pointer to v4l2 subdev format structure
* returns zero
*/
diff --git a/drivers/media/platform/ti/omap3isp/ispcsi2.c b/drivers/media/platform/ti/omap3isp/ispcsi2.c
index 0f9a54b11..ae574e1b6 100644
--- a/drivers/media/platform/ti/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/ti/omap3isp/ispcsi2.c
@@ -834,8 +834,7 @@ __csi2_get_format(struct isp_csi2_device *csi2,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &csi2->formats[pad];
}
@@ -894,7 +893,7 @@ csi2_try_format(struct isp_csi2_device *csi2,
/*
* csi2_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -968,7 +967,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
/*
* csi2_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
@@ -990,7 +989,7 @@ static int csi2_get_format(struct v4l2_subdev *sd,
/*
* csi2_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
diff --git a/drivers/media/platform/ti/omap3isp/isppreview.c b/drivers/media/platform/ti/omap3isp/isppreview.c
index 53aedec79..e383a5765 100644
--- a/drivers/media/platform/ti/omap3isp/isppreview.c
+++ b/drivers/media/platform/ti/omap3isp/isppreview.c
@@ -1684,8 +1684,7 @@ __preview_get_format(struct isp_prev_device *prev,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&prev->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &prev->formats[pad];
}
@@ -1696,8 +1695,7 @@ __preview_get_crop(struct isp_prev_device *prev,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&prev->subdev, sd_state,
- PREV_PAD_SINK);
+ return v4l2_subdev_state_get_crop(sd_state, PREV_PAD_SINK);
else
return &prev->crop;
}
@@ -1724,7 +1722,7 @@ static const unsigned int preview_output_fmts[] = {
/*
* preview_try_format - Validate a format
* @prev: ISP preview engine
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad: pad number
* @fmt: format to be validated
* @which: try/active format selector
@@ -1863,7 +1861,7 @@ static void preview_try_crop(struct isp_prev_device *prev,
/*
* preview_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -1924,7 +1922,7 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
/*
* preview_get_selection - Retrieve a selection rectangle on a pad
* @sd: ISP preview V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @sel: Selection rectangle
*
* The only supported rectangles are the crop rectangles on the sink pad.
@@ -1967,7 +1965,7 @@ static int preview_get_selection(struct v4l2_subdev *sd,
/*
* preview_set_selection - Set a selection rectangle on a pad
* @sd: ISP preview V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @sel: Selection rectangle
*
* The only supported rectangle is the actual crop rectangle on the sink pad.
@@ -2015,7 +2013,7 @@ static int preview_set_selection(struct v4l2_subdev *sd,
/*
* preview_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
@@ -2037,7 +2035,7 @@ static int preview_get_format(struct v4l2_subdev *sd,
/*
* preview_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
diff --git a/drivers/media/platform/ti/omap3isp/ispresizer.c b/drivers/media/platform/ti/omap3isp/ispresizer.c
index ed2fb0c7a..87d821b02 100644
--- a/drivers/media/platform/ti/omap3isp/ispresizer.c
+++ b/drivers/media/platform/ti/omap3isp/ispresizer.c
@@ -109,7 +109,7 @@ static const struct isprsz_coef filter_coefs = {
* __resizer_get_format - helper function for getting resizer format
* @res : pointer to resizer private structure
* @pad : pad number
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @which : wanted subdev format
* return zero
*/
@@ -119,7 +119,7 @@ __resizer_get_format(struct isp_res_device *res,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&res->subdev, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &res->formats[pad];
}
@@ -127,7 +127,7 @@ __resizer_get_format(struct isp_res_device *res,
/*
* __resizer_get_crop - helper function for getting resizer crop rectangle
* @res : pointer to resizer private structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @which : wanted subdev crop rectangle
*/
static struct v4l2_rect *
@@ -136,8 +136,7 @@ __resizer_get_crop(struct isp_res_device *res,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&res->subdev, sd_state,
- RESZ_PAD_SINK);
+ return v4l2_subdev_state_get_crop(sd_state, RESZ_PAD_SINK);
else
return &res->crop.request;
}
@@ -1215,7 +1214,7 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
/*
* resizer_get_selection - Retrieve a selection rectangle on a pad
* @sd: ISP resizer V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @sel: Selection rectangle
*
* The only supported rectangles are the crop rectangles on the sink pad.
@@ -1265,7 +1264,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
/*
* resizer_set_selection - Set a selection rectangle on a pad
* @sd: ISP resizer V4L2 subdevice
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @sel: Selection rectangle
*
* The only supported rectangle is the actual crop rectangle on the sink pad.
@@ -1369,7 +1368,7 @@ static unsigned int resizer_max_in_width(struct isp_res_device *res)
/*
* resizer_try_format - Handle try format by pad subdev method
* @res : ISP resizer device
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @pad : pad num
* @fmt : pointer to v4l2 format structure
* @which : wanted subdev format
@@ -1413,7 +1412,7 @@ static void resizer_try_format(struct isp_res_device *res,
/*
* resizer_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -1474,7 +1473,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
/*
* resizer_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
@@ -1496,7 +1495,7 @@ static int resizer_get_format(struct v4l2_subdev *sd,
/*
* resizer_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
+ * @sd_state: V4L2 subdev state
* @fmt : pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
diff --git a/drivers/media/platform/verisilicon/Kconfig b/drivers/media/platform/verisilicon/Kconfig
index e65b836b9..24b927d8f 100644
--- a/drivers/media/platform/verisilicon/Kconfig
+++ b/drivers/media/platform/verisilicon/Kconfig
@@ -8,7 +8,6 @@ config VIDEO_HANTRO
depends on V4L_MEM2MEM_DRIVERS
depends on VIDEO_DEV
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index 77aee9489..6f5eb975d 100644
--- a/drivers/media/platform/verisilicon/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -328,6 +328,8 @@ struct hantro_vp9_decoded_buffer_info {
/* Info needed when the decoded frame serves as a reference frame. */
unsigned short width;
unsigned short height;
+ size_t chroma_offset;
+ size_t mv_offset;
u32 bit_depth : 4;
};
@@ -469,11 +471,14 @@ hantro_get_dst_buf(struct hantro_ctx *ctx)
bool hantro_needs_postproc(const struct hantro_ctx *ctx,
const struct hantro_fmt *fmt);
+dma_addr_t
+hantro_postproc_get_dec_buf_addr(struct hantro_ctx *ctx, int index);
+
static inline dma_addr_t
hantro_get_dec_buf_addr(struct hantro_ctx *ctx, struct vb2_buffer *vb)
{
if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
- return ctx->postproc.dec_q[vb->index].dma;
+ return hantro_postproc_get_dec_buf_addr(ctx, vb->index);
return vb2_dma_contig_plane_dma_addr(vb, 0);
}
@@ -485,8 +490,8 @@ vb2_to_hantro_decoded_buf(struct vb2_buffer *buf)
void hantro_postproc_disable(struct hantro_ctx *ctx);
void hantro_postproc_enable(struct hantro_ctx *ctx);
+int hantro_postproc_init(struct hantro_ctx *ctx);
void hantro_postproc_free(struct hantro_ctx *ctx);
-int hantro_postproc_alloc(struct hantro_ctx *ctx);
int hanto_postproc_enum_framesizes(struct hantro_ctx *ctx,
struct v4l2_frmsizeenum *fsize);
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 3a2a0f28c..db3df6cc4 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -235,8 +235,10 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
* The Kernel needs access to the JPEG destination buffer for the
* JPEG encoder to fill in the JPEG headers.
*/
- if (!ctx->is_encoder)
+ if (!ctx->is_encoder) {
dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+ dst_vq->max_num_buffers = MAX_POSTPROC_BUFFERS;
+ }
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c
index ee5f14c5f..b880a6849 100644
--- a/drivers/media/platform/verisilicon/hantro_g2.c
+++ b/drivers/media/platform/verisilicon/hantro_g2.c
@@ -8,6 +8,8 @@
#include "hantro_hw.h"
#include "hantro_g2_regs.h"
+#define G2_ALIGN 16
+
void hantro_g2_check_idle(struct hantro_dev *vpu)
{
int i;
@@ -42,3 +44,15 @@ irqreturn_t hantro_g2_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+
+size_t hantro_g2_chroma_offset(struct hantro_ctx *ctx)
+{
+ return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
+}
+
+size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx)
+{
+ size_t cr_offset = hantro_g2_chroma_offset(ctx);
+
+ return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
index a9d4ac84a..d3f8c33eb 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
@@ -8,20 +8,6 @@
#include "hantro_hw.h"
#include "hantro_g2_regs.h"
-#define G2_ALIGN 16
-
-static size_t hantro_hevc_chroma_offset(struct hantro_ctx *ctx)
-{
- return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
-}
-
-static size_t hantro_hevc_motion_vectors_offset(struct hantro_ctx *ctx)
-{
- size_t cr_offset = hantro_hevc_chroma_offset(ctx);
-
- return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
-}
-
static void prepare_tile_info_buffer(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
@@ -384,8 +370,8 @@ static int set_ref(struct hantro_ctx *ctx)
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
struct hantro_decoded_buffer *dst;
- size_t cr_offset = hantro_hevc_chroma_offset(ctx);
- size_t mv_offset = hantro_hevc_motion_vectors_offset(ctx);
+ size_t cr_offset = hantro_g2_chroma_offset(ctx);
+ size_t mv_offset = hantro_g2_motion_vectors_offset(ctx);
u32 max_ref_frames;
u16 dpb_longterm_e;
static const struct hantro_reg cur_poc[] = {
diff --git a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
index 6fc4b5555..342e543de 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
@@ -16,8 +16,6 @@
#include "hantro_vp9.h"
#include "hantro_g2_regs.h"
-#define G2_ALIGN 16
-
enum hantro_ref_frames {
INTRA_FRAME = 0,
LAST_FRAME = 1,
@@ -90,22 +88,6 @@ static int start_prepare_run(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_
return 0;
}
-static size_t chroma_offset(const struct hantro_ctx *ctx,
- const struct v4l2_ctrl_vp9_frame *dec_params)
-{
- int bytes_per_pixel = dec_params->bit_depth == 8 ? 1 : 2;
-
- return ctx->src_fmt.width * ctx->src_fmt.height * bytes_per_pixel;
-}
-
-static size_t mv_offset(const struct hantro_ctx *ctx,
- const struct v4l2_ctrl_vp9_frame *dec_params)
-{
- size_t cr_offset = chroma_offset(ctx, dec_params);
-
- return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
-}
-
static struct hantro_decoded_buffer *
get_ref_buf(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
{
@@ -156,11 +138,13 @@ static void config_output(struct hantro_ctx *ctx,
luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
hantro_write_addr(ctx->dev, G2_OUT_LUMA_ADDR, luma_addr);
- chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
+ chroma_addr = luma_addr + hantro_g2_chroma_offset(ctx);
hantro_write_addr(ctx->dev, G2_OUT_CHROMA_ADDR, chroma_addr);
+ dst->vp9.chroma_offset = hantro_g2_chroma_offset(ctx);
- mv_addr = luma_addr + mv_offset(ctx, dec_params);
+ mv_addr = luma_addr + hantro_g2_motion_vectors_offset(ctx);
hantro_write_addr(ctx->dev, G2_OUT_MV_ADDR, mv_addr);
+ dst->vp9.mv_offset = hantro_g2_motion_vectors_offset(ctx);
}
struct hantro_vp9_ref_reg {
@@ -195,7 +179,7 @@ static void config_ref(struct hantro_ctx *ctx,
luma_addr = hantro_get_dec_buf_addr(ctx, &buf->base.vb.vb2_buf);
hantro_write_addr(ctx->dev, ref_reg->y_base, luma_addr);
- chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
+ chroma_addr = luma_addr + buf->vp9.chroma_offset;
hantro_write_addr(ctx->dev, ref_reg->c_base, chroma_addr);
}
@@ -238,7 +222,7 @@ static void config_ref_registers(struct hantro_ctx *ctx,
config_ref(ctx, dst, &ref_regs[2], dec_params, dec_params->alt_frame_ts);
mv_addr = hantro_get_dec_buf_addr(ctx, &mv_ref->base.vb.vb2_buf) +
- mv_offset(ctx, dec_params);
+ mv_ref->vp9.mv_offset;
hantro_write_addr(ctx->dev, G2_REF_MV_ADDR(0), mv_addr);
hantro_reg_write(ctx->dev, &vp9_last_sign_bias,
diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h
index 7f33f7b07..9aec8a79a 100644
--- a/drivers/media/platform/verisilicon/hantro_hw.h
+++ b/drivers/media/platform/verisilicon/hantro_hw.h
@@ -40,6 +40,8 @@
#define AV1_MAX_FRAME_BUF_COUNT (V4L2_AV1_TOTAL_REFS_PER_FRAME + 1)
+#define MAX_POSTPROC_BUFFERS 64
+
struct hantro_dev;
struct hantro_ctx;
struct hantro_buf;
@@ -336,7 +338,7 @@ struct hantro_av1_dec_hw_ctx {
* @dec_q: References buffers, in decoder format.
*/
struct hantro_postproc_ctx {
- struct hantro_aux_buf dec_q[VB2_MAX_FRAME];
+ struct hantro_aux_buf dec_q[MAX_POSTPROC_BUFFERS];
};
/**
@@ -519,6 +521,9 @@ hantro_av1_mv_size(unsigned int width, unsigned int height)
return ALIGN(num_sbs * 384, 16) * 2 + 512;
}
+size_t hantro_g2_chroma_offset(struct hantro_ctx *ctx);
+size_t hantro_g2_motion_vectors_offset(struct hantro_ctx *ctx);
+
int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx);
void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index 64d6fb852..41e931763 100644
--- a/drivers/media/platform/verisilicon/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -177,9 +177,11 @@ static int hantro_postproc_g2_enum_framesizes(struct hantro_ctx *ctx,
void hantro_postproc_free(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *queue = &m2m_ctx->cap_q_ctx.q;
unsigned int i;
- for (i = 0; i < VB2_MAX_FRAME; ++i) {
+ for (i = 0; i < queue->max_num_buffers; ++i) {
struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
if (priv->cpu) {
@@ -190,20 +192,17 @@ void hantro_postproc_free(struct hantro_ctx *ctx)
}
}
-int hantro_postproc_alloc(struct hantro_ctx *ctx)
+static unsigned int hantro_postproc_buffer_size(struct hantro_ctx *ctx)
{
- struct hantro_dev *vpu = ctx->dev;
- struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
- struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
- unsigned int num_buffers = cap_queue->num_buffers;
struct v4l2_pix_format_mplane pix_mp;
const struct hantro_fmt *fmt;
- unsigned int i, buf_size;
+ unsigned int buf_size;
/* this should always pick native format */
fmt = hantro_get_default_fmt(ctx, false, ctx->bit_depth, HANTRO_AUTO_POSTPROC);
if (!fmt)
- return -EINVAL;
+ return 0;
+
v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width,
ctx->src_fmt.height);
@@ -221,23 +220,77 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
buf_size += hantro_av1_mv_size(pix_mp.width,
pix_mp.height);
- for (i = 0; i < num_buffers; ++i) {
- struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+ return buf_size;
+}
+
+static int hantro_postproc_alloc(struct hantro_ctx *ctx, int index)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_aux_buf *priv = &ctx->postproc.dec_q[index];
+ unsigned int buf_size = hantro_postproc_buffer_size(ctx);
+
+ if (!buf_size)
+ return -EINVAL;
+
+ /*
+ * The buffers on this queue are meant as intermediate
+ * buffers for the decoder, so no mapping is needed.
+ */
+ priv->attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+ priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
+ GFP_KERNEL, priv->attrs);
+ if (!priv->cpu)
+ return -ENOMEM;
+ priv->size = buf_size;
+
+ return 0;
+}
- /*
- * The buffers on this queue are meant as intermediate
- * buffers for the decoder, so no mapping is needed.
- */
- priv->attrs = DMA_ATTR_NO_KERNEL_MAPPING;
- priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
- GFP_KERNEL, priv->attrs);
- if (!priv->cpu)
- return -ENOMEM;
- priv->size = buf_size;
+int hantro_postproc_init(struct hantro_ctx *ctx)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
+ unsigned int num_buffers = vb2_get_num_buffers(cap_queue);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_buffers; i++) {
+ ret = hantro_postproc_alloc(ctx, i);
+ if (ret)
+ return ret;
}
+
return 0;
}
+dma_addr_t
+hantro_postproc_get_dec_buf_addr(struct hantro_ctx *ctx, int index)
+{
+ struct hantro_aux_buf *priv = &ctx->postproc.dec_q[index];
+ unsigned int buf_size = hantro_postproc_buffer_size(ctx);
+ struct hantro_dev *vpu = ctx->dev;
+ int ret;
+
+ if (priv->size < buf_size && priv->cpu) {
+ /* buffer is too small, release it */
+ dma_free_attrs(vpu->dev, priv->size, priv->cpu,
+ priv->dma, priv->attrs);
+ priv->cpu = NULL;
+ }
+
+ if (!priv->cpu) {
+ /* buffer not already allocated, try getting a new one */
+ ret = hantro_postproc_alloc(ctx, index);
+ if (ret)
+ return 0;
+ }
+
+ if (!priv->cpu)
+ return 0;
+
+ return priv->dma;
+}
+
static void hantro_postproc_g1_disable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index db145519f..941fa23c2 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -514,25 +514,14 @@ static int hantro_set_fmt_out(struct hantro_ctx *ctx,
return ret;
if (!ctx->is_encoder) {
- struct vb2_queue *peer_vq;
-
/*
* In order to support dynamic resolution change,
* the decoder admits a resolution change, as long
- * as the pixelformat remains. Can't be done if streaming.
- */
- if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
- pix_mp->pixelformat != ctx->src_fmt.pixelformat))
- return -EBUSY;
- /*
- * Since format change on the OUTPUT queue will reset
- * the CAPTURE queue, we can't allow doing so
- * when the CAPTURE queue has buffers allocated.
+ * as the pixelformat remains.
*/
- peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
- V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (vb2_is_busy(peer_vq))
+ if (vb2_is_streaming(vq) && pix_mp->pixelformat != ctx->src_fmt.pixelformat) {
return -EBUSY;
+ }
} else {
/*
* The encoder doesn't admit a format change if
@@ -577,15 +566,8 @@ static int hantro_set_fmt_out(struct hantro_ctx *ctx,
static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
struct v4l2_pix_format_mplane *pix_mp)
{
- struct vb2_queue *vq;
int ret;
- /* Change not allowed if queue is busy. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
- V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (vb2_is_busy(vq))
- return -EBUSY;
-
if (ctx->is_encoder) {
struct vb2_queue *peer_vq;
@@ -936,7 +918,7 @@ static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
}
if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) {
- ret = hantro_postproc_alloc(ctx);
+ ret = hantro_postproc_init(ctx);
if (ret)
goto err_codec_exit;
}
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 5de6b6694..31e9e92e7 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -89,10 +89,10 @@ static int video_mux_link_setup(struct media_entity *entity,
/* Propagate the active format to the source */
sd_state = v4l2_subdev_lock_and_get_active_state(sd);
- source_mbusformat = v4l2_subdev_get_pad_format(sd, sd_state,
- source_pad);
- *source_mbusformat = *v4l2_subdev_get_pad_format(sd, sd_state,
- vmux->active);
+ source_mbusformat = v4l2_subdev_state_get_format(sd_state,
+ source_pad);
+ *source_mbusformat = *v4l2_subdev_state_get_format(sd_state,
+ vmux->active);
v4l2_subdev_unlock_state(sd_state);
} else {
if (vmux->active != local->index)
@@ -154,11 +154,11 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
struct media_pad *pad = &vmux->pads[sdformat->pad];
u16 source_pad = sd->entity.num_pads - 1;
- mbusformat = v4l2_subdev_get_pad_format(sd, sd_state, sdformat->pad);
+ mbusformat = v4l2_subdev_state_get_format(sd_state, sdformat->pad);
if (!mbusformat)
return -EINVAL;
- source_mbusformat = v4l2_subdev_get_pad_format(sd, sd_state, source_pad);
+ source_mbusformat = v4l2_subdev_state_get_format(sd_state, source_pad);
if (!source_mbusformat)
return -EINVAL;
@@ -268,8 +268,8 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
/* Source pad mirrors active sink pad, no limitations on sink pads */
if ((pad->flags & MEDIA_PAD_FL_SOURCE) && vmux->active >= 0)
- sdformat->format = *v4l2_subdev_get_pad_format(sd, sd_state,
- vmux->active);
+ sdformat->format = *v4l2_subdev_state_get_format(sd_state,
+ vmux->active);
*mbusformat = sdformat->format;
@@ -282,8 +282,8 @@ static int video_mux_set_format(struct v4l2_subdev *sd,
return 0;
}
-static int video_mux_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int video_mux_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
struct v4l2_mbus_framefmt *mbusformat;
@@ -292,7 +292,7 @@ static int video_mux_init_cfg(struct v4l2_subdev *sd,
mutex_lock(&vmux->lock);
for (i = 0; i < sd->entity.num_pads; i++) {
- mbusformat = v4l2_subdev_get_pad_format(sd, sd_state, i);
+ mbusformat = v4l2_subdev_state_get_format(sd_state, i);
*mbusformat = video_mux_format_mbus_default;
}
@@ -302,7 +302,6 @@ static int video_mux_init_cfg(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops video_mux_pad_ops = {
- .init_cfg = video_mux_init_cfg,
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = video_mux_set_format,
};
@@ -312,6 +311,10 @@ static const struct v4l2_subdev_ops video_mux_subdev_ops = {
.video = &video_mux_subdev_video_ops,
};
+static const struct v4l2_subdev_internal_ops video_mux_internal_ops = {
+ .init_state = video_mux_init_state,
+};
+
static int video_mux_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
@@ -400,6 +403,7 @@ static int video_mux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vmux);
v4l2_subdev_init(&vmux->subdev, &video_mux_subdev_ops);
+ vmux->subdev.internal_ops = &video_mux_internal_ops;
snprintf(vmux->subdev.name, sizeof(vmux->subdev.name), "%pOFn", np);
vmux->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
vmux->subdev.dev = dev;
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
index 5b53745fe..f953d5474 100644
--- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -386,14 +386,6 @@ static void xcsi2rxss_log_counters(struct xcsi2rxss_state *state)
}
}
-/**
- * xcsi2rxss_log_status - Logs the status of the CSI-2 Receiver
- * @sd: Pointer to V4L2 subdevice structure
- *
- * This function prints the current status of Xilinx MIPI CSI-2
- *
- * Return: 0 on success
- */
static int xcsi2rxss_log_status(struct v4l2_subdev *sd)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
@@ -631,16 +623,6 @@ static irqreturn_t xcsi2rxss_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-/**
- * xcsi2rxss_s_stream - It is used to start/stop the streaming.
- * @sd: V4L2 Sub device
- * @enable: Flag (True / False)
- *
- * This function controls the start or stop of streaming for the
- * Xilinx MIPI CSI-2 Rx Subsystem.
- *
- * Return: 0 on success, errors otherwise
- */
static int xcsi2rxss_s_stream(struct v4l2_subdev *sd, int enable)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
@@ -671,8 +653,7 @@ __xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&xcsi2rxss->subdev,
- sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &xcsi2rxss->format;
default:
@@ -680,18 +661,8 @@ __xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
}
}
-/**
- * xcsi2rxss_init_cfg - Initialise the pad format config to default
- * @sd: Pointer to V4L2 Sub device structure
- * @sd_state: Pointer to sub device state structure
- *
- * This function is used to initialize the pad format with the default
- * values.
- *
- * Return: 0 on success
- */
-static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int xcsi2rxss_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
struct v4l2_mbus_framefmt *format;
@@ -699,7 +670,7 @@ static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
mutex_lock(&xcsi2rxss->lock);
for (i = 0; i < XCSI_MEDIA_PADS; i++) {
- format = v4l2_subdev_get_try_format(sd, sd_state, i);
+ format = v4l2_subdev_state_get_format(sd_state, i);
*format = xcsi2rxss->default_format;
}
mutex_unlock(&xcsi2rxss->lock);
@@ -707,16 +678,6 @@ static int xcsi2rxss_init_cfg(struct v4l2_subdev *sd,
return 0;
}
-/**
- * xcsi2rxss_get_format - Get the pad format
- * @sd: Pointer to V4L2 Sub device structure
- * @sd_state: Pointer to sub device state structure
- * @fmt: Pointer to pad level media bus format
- *
- * This function is used to get the pad format information.
- *
- * Return: 0 on success
- */
static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -732,19 +693,6 @@ static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
return 0;
}
-/**
- * xcsi2rxss_set_format - This is used to set the pad format
- * @sd: Pointer to V4L2 Sub device structure
- * @sd_state: Pointer to sub device state structure
- * @fmt: Pointer to pad level media bus format
- *
- * This function is used to set the pad format. Since the pad format is fixed
- * in hardware, it can't be modified on run time. So when a format set is
- * requested by application, all parameters except the format type is saved
- * for the pad and the original pad format is sent back to the application.
- *
- * Return: 0 on success
- */
static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -789,14 +737,6 @@ static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
return 0;
}
-/*
- * xcsi2rxss_enum_mbus_code - Handle pixel format enumeration
- * @sd: pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad configuration
- * @code: pointer to v4l2_subdev_mbus_code_enum structure
- *
- * Return: -EINVAL or zero on success
- */
static int xcsi2rxss_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
@@ -840,7 +780,6 @@ static const struct v4l2_subdev_video_ops xcsi2rxss_video_ops = {
};
static const struct v4l2_subdev_pad_ops xcsi2rxss_pad_ops = {
- .init_cfg = xcsi2rxss_init_cfg,
.get_fmt = xcsi2rxss_get_format,
.set_fmt = xcsi2rxss_set_format,
.enum_mbus_code = xcsi2rxss_enum_mbus_code,
@@ -853,6 +792,10 @@ static const struct v4l2_subdev_ops xcsi2rxss_ops = {
.pad = &xcsi2rxss_pad_ops
};
+static const struct v4l2_subdev_internal_ops xcsi2rxss_internal_ops = {
+ .init_state = xcsi2rxss_init_state,
+};
+
static int xcsi2rxss_parse_of(struct xcsi2rxss_state *xcsi2rxss)
{
struct device *dev = xcsi2rxss->dev;
@@ -1030,6 +973,7 @@ static int xcsi2rxss_probe(struct platform_device *pdev)
/* Initialize V4L2 subdevice and media entity */
subdev = &xcsi2rxss->subdev;
v4l2_subdev_init(subdev, &xcsi2rxss_ops);
+ subdev->internal_ops = &xcsi2rxss_internal_ops;
subdev->dev = dev;
strscpy(subdev->name, dev_name(dev), sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index 80353ca44..e05e528ff 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -256,8 +256,7 @@ __xtpg_get_pad_format(struct xtpg_device *xtpg,
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&xtpg->xvip.subdev,
- sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &xtpg->formats[pad];
default:
@@ -326,7 +325,7 @@ static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
{
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(subdev, sd_state, fse->pad);
+ format = v4l2_subdev_state_get_format(sd_state, fse->pad);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -354,11 +353,11 @@ static int xtpg_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
struct xtpg_device *xtpg = to_tpg(subdev);
struct v4l2_mbus_framefmt *format;
- format = v4l2_subdev_get_try_format(subdev, fh->state, 0);
+ format = v4l2_subdev_state_get_format(fh->state, 0);
*format = xtpg->default_format;
if (xtpg->npads == 2) {
- format = v4l2_subdev_get_try_format(subdev, fh->state, 1);
+ format = v4l2_subdev_state_get_format(fh->state, 1);
*format = xtpg->default_format;
}
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 5b214bf7f..f1574edd2 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -260,7 +260,7 @@ int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
if (code->index)
return -EINVAL;
- format = v4l2_subdev_get_try_format(subdev, sd_state, code->pad);
+ format = v4l2_subdev_state_get_format(sd_state, code->pad);
code->code = format->code;
@@ -295,7 +295,7 @@ int xvip_enum_frame_size(struct v4l2_subdev *subdev,
if (fse->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- format = v4l2_subdev_get_try_format(subdev, sd_state, fse->pad);
+ format = v4l2_subdev_state_get_format(sd_state, fse->pad);
if (fse->index || fse->code != format->code)
return -EINVAL;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 2afe67ffa..74d69ce22 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -319,6 +319,7 @@ config IR_PWM_TX
tristate "PWM IR transmitter"
depends on LIRC
depends on PWM
+ depends on HIGH_RES_TIMERS
depends on OF
help
Say Y if you want to use a PWM based IR transmitter. This is
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 0034f615b..de5bb9a08 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -9,7 +9,9 @@
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <media/rc-core.h>
@@ -251,7 +253,6 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct hix5hd2_ir_priv *priv;
struct device_node *node = pdev->dev.of_node;
- const struct of_device_id *of_id;
const char *map_name;
int ret;
@@ -259,12 +260,11 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- of_id = of_match_device(hix5hd2_ir_table, dev);
- if (!of_id) {
+ priv->socdata = device_get_match_data(dev);
+ if (!priv->socdata) {
dev_err(dev, "Unable to initialize IR data\n");
return -ENODEV;
}
- priv->socdata = of_id->data;
priv->regmap = syscon_regmap_lookup_by_phandle(node,
"hisilicon,power-syscon");
diff --git a/drivers/media/rc/meson-ir-tx.c b/drivers/media/rc/meson-ir-tx.c
index 6355b7989..fded2c256 100644
--- a/drivers/media/rc/meson-ir-tx.c
+++ b/drivers/media/rc/meson-ir-tx.c
@@ -305,7 +305,7 @@ static int meson_irtx_mod_clock_probe(struct meson_irtx *ir,
return 0;
}
-static int __init meson_irtx_probe(struct platform_device *pdev)
+static int meson_irtx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_irtx *ir;
@@ -333,20 +333,17 @@ static int __init meson_irtx_probe(struct platform_device *pdev)
spin_lock_init(&ir->lock);
ret = meson_irtx_mod_clock_probe(ir, &clk_nr);
- if (ret) {
- dev_err(dev, "modulator clock setup failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "modulator clock setup failed\n");
+
meson_irtx_setup(ir, clk_nr);
ret = devm_request_irq(dev, irq,
meson_irtx_irqhandler,
IRQF_TRIGGER_RISING,
DRIVER_NAME, ir);
- if (ret) {
- dev_err(dev, "irq request failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "irq request failed\n");
rc = rc_allocate_device(RC_DRIVER_IR_RAW_TX);
if (!rc)
@@ -360,25 +357,15 @@ static int __init meson_irtx_probe(struct platform_device *pdev)
rc->s_tx_carrier = meson_irtx_set_carrier;
rc->s_tx_duty_cycle = meson_irtx_set_duty_cycle;
- ret = rc_register_device(rc);
+ ret = devm_rc_register_device(dev, rc);
if (ret < 0) {
- dev_err(dev, "rc_dev registration failed\n");
rc_free_device(rc);
- return ret;
+ return dev_err_probe(dev, ret, "rc_dev registration failed\n");
}
- platform_set_drvdata(pdev, rc);
-
return 0;
}
-static void meson_irtx_remove(struct platform_device *pdev)
-{
- struct rc_dev *rc = platform_get_drvdata(pdev);
-
- rc_unregister_device(rc);
-}
-
static const struct of_device_id meson_irtx_dt_match[] = {
{
.compatible = "amlogic,meson-g12a-ir-tx",
@@ -388,14 +375,13 @@ static const struct of_device_id meson_irtx_dt_match[] = {
MODULE_DEVICE_TABLE(of, meson_irtx_dt_match);
static struct platform_driver meson_irtx_pd = {
- .remove_new = meson_irtx_remove,
+ .probe = meson_irtx_probe,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_irtx_dt_match,
},
};
-
-module_platform_driver_probe(meson_irtx_pd, meson_irtx_probe);
+module_platform_driver(meson_irtx_pd);
MODULE_DESCRIPTION("Meson IR TX driver");
MODULE_AUTHOR("Viktor Prutyanov <viktor.prutyanov@phystech.edu>");
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index c5f37c03a..fe368aebb 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/hrtimer.h>
+#include <linux/completion.h>
#include <media/rc-core.h>
#define DRIVER_NAME "pwm-ir-tx"
@@ -17,8 +19,14 @@
struct pwm_ir {
struct pwm_device *pwm;
- unsigned int carrier;
- unsigned int duty_cycle;
+ struct hrtimer timer;
+ struct completion tx_done;
+ struct pwm_state *state;
+ u32 carrier;
+ u32 duty_cycle;
+ const unsigned int *txbuf;
+ unsigned int txbuf_len;
+ unsigned int txbuf_index;
};
static const struct of_device_id pwm_ir_of_match[] = {
@@ -49,8 +57,8 @@ static int pwm_ir_set_carrier(struct rc_dev *dev, u32 carrier)
return 0;
}
-static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
- unsigned int count)
+static int pwm_ir_tx_sleep(struct rc_dev *dev, unsigned int *txbuf,
+ unsigned int count)
{
struct pwm_ir *pwm_ir = dev->priv;
struct pwm_device *pwm = pwm_ir->pwm;
@@ -68,7 +76,7 @@ static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
for (i = 0; i < count; i++) {
state.enabled = !(i % 2);
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
edge = ktime_add_us(edge, txbuf[i]);
delta = ktime_us_delta(edge, ktime_get());
@@ -77,11 +85,67 @@ static int pwm_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
}
state.enabled = false;
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
return count;
}
+static int pwm_ir_tx_atomic(struct rc_dev *dev, unsigned int *txbuf,
+ unsigned int count)
+{
+ struct pwm_ir *pwm_ir = dev->priv;
+ struct pwm_device *pwm = pwm_ir->pwm;
+ struct pwm_state state;
+
+ pwm_init_state(pwm, &state);
+
+ state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, pwm_ir->carrier);
+ pwm_set_relative_duty_cycle(&state, pwm_ir->duty_cycle, 100);
+
+ pwm_ir->txbuf = txbuf;
+ pwm_ir->txbuf_len = count;
+ pwm_ir->txbuf_index = 0;
+ pwm_ir->state = &state;
+
+ hrtimer_start(&pwm_ir->timer, 0, HRTIMER_MODE_REL);
+
+ wait_for_completion(&pwm_ir->tx_done);
+
+ return count;
+}
+
+static enum hrtimer_restart pwm_ir_timer(struct hrtimer *timer)
+{
+ struct pwm_ir *pwm_ir = container_of(timer, struct pwm_ir, timer);
+ ktime_t now;
+
+ /*
+ * If we happen to hit an odd latency spike, loop through the
+ * pulses until we catch up.
+ */
+ do {
+ u64 ns;
+
+ pwm_ir->state->enabled = !(pwm_ir->txbuf_index % 2);
+ pwm_apply_atomic(pwm_ir->pwm, pwm_ir->state);
+
+ if (pwm_ir->txbuf_index >= pwm_ir->txbuf_len) {
+ complete(&pwm_ir->tx_done);
+
+ return HRTIMER_NORESTART;
+ }
+
+ ns = US_TO_NS(pwm_ir->txbuf[pwm_ir->txbuf_index]);
+ hrtimer_add_expires_ns(timer, ns);
+
+ pwm_ir->txbuf_index++;
+
+ now = timer->base->get_time();
+ } while (hrtimer_get_expires_tv64(timer) < now);
+
+ return HRTIMER_RESTART;
+}
+
static int pwm_ir_probe(struct platform_device *pdev)
{
struct pwm_ir *pwm_ir;
@@ -103,10 +167,19 @@ static int pwm_ir_probe(struct platform_device *pdev)
if (!rcdev)
return -ENOMEM;
+ if (pwm_might_sleep(pwm_ir->pwm)) {
+ dev_info(&pdev->dev, "TX will not be accurate as PWM device might sleep\n");
+ rcdev->tx_ir = pwm_ir_tx_sleep;
+ } else {
+ init_completion(&pwm_ir->tx_done);
+ hrtimer_init(&pwm_ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pwm_ir->timer.function = pwm_ir_timer;
+ rcdev->tx_ir = pwm_ir_tx_atomic;
+ }
+
rcdev->priv = pwm_ir;
rcdev->driver_name = DRIVER_NAME;
rcdev->device_name = DEVICE_NAME;
- rcdev->tx_ir = pwm_ir_tx;
rcdev->s_tx_duty_cycle = pwm_ir_set_duty_cycle;
rcdev->s_tx_carrier = pwm_ir_set_carrier;
diff --git a/drivers/media/test-drivers/Kconfig b/drivers/media/test-drivers/Kconfig
index 459b433e9..5a5379524 100644
--- a/drivers/media/test-drivers/Kconfig
+++ b/drivers/media/test-drivers/Kconfig
@@ -12,7 +12,6 @@ config VIDEO_VIM2M
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
help
This is a virtual test device for the memory-to-memory driver
framework.
diff --git a/drivers/media/test-drivers/vicodec/Kconfig b/drivers/media/test-drivers/vicodec/Kconfig
index a7a828eec..4ea0689c3 100644
--- a/drivers/media/test-drivers/vicodec/Kconfig
+++ b/drivers/media/test-drivers/vicodec/Kconfig
@@ -5,7 +5,6 @@ config VIDEO_VICODEC
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
help
Driver for a Virtual Codec
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index 6f0e20df7..e13f5452b 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -1240,6 +1240,12 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
struct vicodec_ctx *ctx = file2ctx(file);
int ret;
+ /*
+ * This ioctl should not be used with a stateless codec that doesn't
+ * support holding buffers and the associated flush command.
+ */
+ WARN_ON(ctx->is_stateless);
+
ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
if (ret < 0)
return ret;
@@ -1718,6 +1724,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->max_num_buffers = 64;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &vicodec_qops;
@@ -2025,7 +2032,7 @@ static const struct v4l2_m2m_ops m2m_ops = {
static int register_instance(struct vicodec_dev *dev,
struct vicodec_dev_instance *dev_instance,
- const char *name, bool is_enc)
+ const char *name, bool is_enc, bool is_stateless)
{
struct video_device *vfd;
int ret;
@@ -2045,10 +2052,11 @@ static int register_instance(struct vicodec_dev *dev,
strscpy(vfd->name, name, sizeof(vfd->name));
vfd->device_caps = V4L2_CAP_STREAMING |
(multiplanar ? V4L2_CAP_VIDEO_M2M_MPLANE : V4L2_CAP_VIDEO_M2M);
- if (is_enc) {
+ if (is_enc || is_stateless) {
v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
- } else {
+ }
+ if (!is_enc) {
v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
}
@@ -2107,17 +2115,17 @@ static int vicodec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
ret = register_instance(dev, &dev->stateful_enc, "stateful-encoder",
- true);
+ true, false);
if (ret)
goto unreg_dev;
ret = register_instance(dev, &dev->stateful_dec, "stateful-decoder",
- false);
+ false, false);
if (ret)
goto unreg_sf_enc;
ret = register_instance(dev, &dev->stateless_dec, "stateless-decoder",
- false);
+ false, true);
if (ret)
goto unreg_sf_dec;
diff --git a/drivers/media/test-drivers/vidtv/vidtv_pes.c b/drivers/media/test-drivers/vidtv/vidtv_pes.c
index 782e5e7fb..6a360a0a3 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_pes.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_pes.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
#include <linux/types.h>
+#include <linux/math64.h>
#include <linux/printk.h>
#include <linux/ratelimit.h>
diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index aa944270e..2a2d19d23 100644
--- a/drivers/media/test-drivers/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -432,7 +432,7 @@ static struct vimc_ent_device *vimc_capture_add(struct vimc_device *vimc,
q->mem_ops = vimc_allocator == VIMC_ALLOCATOR_DMA_CONTIG
? &vb2_dma_contig_memops : &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->lock = &vcapture->lock;
q->dev = v4l2_dev->dev;
diff --git a/drivers/media/test-drivers/vimc/vimc-debayer.c b/drivers/media/test-drivers/vimc/vimc-debayer.c
index f671251fd..d72ed086e 100644
--- a/drivers/media/test-drivers/vimc/vimc-debayer.c
+++ b/drivers/media/test-drivers/vimc/vimc-debayer.c
@@ -150,18 +150,18 @@ static bool vimc_debayer_src_code_is_valid(u32 code)
return false;
}
-static int vimc_debayer_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int vimc_debayer_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct vimc_debayer_device *vdebayer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
unsigned int i;
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mf = v4l2_subdev_state_get_format(sd_state, 0);
*mf = sink_fmt_default;
for (i = 1; i < sd->entity.num_pads; i++) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, i);
+ mf = v4l2_subdev_state_get_format(sd_state, i);
*mf = sink_fmt_default;
mf->code = vdebayer->src_code;
}
@@ -221,7 +221,7 @@ static int vimc_debayer_get_fmt(struct v4l2_subdev *sd,
/* Get the current sink format */
fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
- *v4l2_subdev_get_try_format(sd, sd_state, 0) :
+ *v4l2_subdev_state_get_format(sd_state, 0) :
vdebayer->sink_fmt;
/* Set the right code for the source pad */
@@ -267,8 +267,8 @@ static int vimc_debayer_set_fmt(struct v4l2_subdev *sd,
sink_fmt = &vdebayer->sink_fmt;
src_code = &vdebayer->src_code;
} else {
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- src_code = &v4l2_subdev_get_try_format(sd, sd_state, 1)->code;
+ sink_fmt = v4l2_subdev_state_get_format(sd_state, 0);
+ src_code = &v4l2_subdev_state_get_format(sd_state, 1)->code;
}
/*
@@ -307,7 +307,6 @@ static int vimc_debayer_set_fmt(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops vimc_debayer_pad_ops = {
- .init_cfg = vimc_debayer_init_cfg,
.enum_mbus_code = vimc_debayer_enum_mbus_code,
.enum_frame_size = vimc_debayer_enum_frame_size,
.get_fmt = vimc_debayer_get_fmt,
@@ -395,6 +394,10 @@ static const struct v4l2_subdev_ops vimc_debayer_ops = {
.video = &vimc_debayer_video_ops,
};
+static const struct v4l2_subdev_internal_ops vimc_debayer_internal_ops = {
+ .init_state = vimc_debayer_init_state,
+};
+
static unsigned int vimc_debayer_get_val(const u8 *bytes,
const unsigned int n_bytes)
{
@@ -595,6 +598,8 @@ static struct vimc_ent_device *vimc_debayer_add(struct vimc_device *vimc,
if (ret)
goto err_free_hdl;
+ vdebayer->sd.internal_ops = &vimc_debayer_internal_ops;
+
vdebayer->ved.process_frame = vimc_debayer_process_frame;
vdebayer->ved.dev = vimc->mdev.dev;
vdebayer->mean_win_size = vimc_debayer_ctrl_mean_win_size.def;
diff --git a/drivers/media/test-drivers/vimc/vimc-scaler.c b/drivers/media/test-drivers/vimc/vimc-scaler.c
index b671774e2..afe13d6af 100644
--- a/drivers/media/test-drivers/vimc/vimc-scaler.c
+++ b/drivers/media/test-drivers/vimc/vimc-scaler.c
@@ -70,19 +70,19 @@ vimc_scaler_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
return r;
}
-static int vimc_scaler_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int vimc_scaler_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mf;
struct v4l2_rect *r;
unsigned int i;
for (i = 0; i < sd->entity.num_pads; i++) {
- mf = v4l2_subdev_get_try_format(sd, sd_state, i);
+ mf = v4l2_subdev_state_get_format(sd_state, i);
*mf = fmt_default;
}
- r = v4l2_subdev_get_try_crop(sd, sd_state, VIMC_SCALER_SINK);
+ r = v4l2_subdev_state_get_crop(sd_state, VIMC_SCALER_SINK);
*r = crop_rect_default;
return 0;
@@ -138,7 +138,7 @@ vimc_scaler_pad_format(struct vimc_scaler_device *vscaler,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&vscaler->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &vscaler->fmt[pad];
}
@@ -149,8 +149,7 @@ vimc_scaler_pad_crop(struct vimc_scaler_device *vscaler,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&vscaler->sd, sd_state,
- VIMC_SCALER_SINK);
+ return v4l2_subdev_state_get_crop(sd_state, VIMC_SCALER_SINK);
else
return &vscaler->crop_rect;
}
@@ -293,7 +292,6 @@ static int vimc_scaler_set_selection(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops vimc_scaler_pad_ops = {
- .init_cfg = vimc_scaler_init_cfg,
.enum_mbus_code = vimc_scaler_enum_mbus_code,
.enum_frame_size = vimc_scaler_enum_frame_size,
.get_fmt = vimc_scaler_get_fmt,
@@ -348,6 +346,10 @@ static const struct v4l2_subdev_ops vimc_scaler_ops = {
.video = &vimc_scaler_video_ops,
};
+static const struct v4l2_subdev_internal_ops vimc_scaler_internal_ops = {
+ .init_state = vimc_scaler_init_state,
+};
+
static void vimc_scaler_fill_src_frame(const struct vimc_scaler_device *const vscaler,
const u8 *const sink_frame)
{
@@ -425,6 +427,8 @@ static struct vimc_ent_device *vimc_scaler_add(struct vimc_device *vimc,
return ERR_PTR(ret);
}
+ vscaler->sd.internal_ops = &vimc_scaler_internal_ops;
+
vscaler->ved.process_frame = vimc_scaler_process_frame;
vscaler->ved.dev = vimc->mdev.dev;
diff --git a/drivers/media/test-drivers/vimc/vimc-sensor.c b/drivers/media/test-drivers/vimc/vimc-sensor.c
index 41a3dce2d..5e34b1aed 100644
--- a/drivers/media/test-drivers/vimc/vimc-sensor.c
+++ b/drivers/media/test-drivers/vimc/vimc-sensor.c
@@ -41,15 +41,15 @@ static const struct v4l2_mbus_framefmt fmt_default = {
.colorspace = V4L2_COLORSPACE_SRGB,
};
-static int vimc_sensor_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+static int vimc_sensor_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
unsigned int i;
for (i = 0; i < sd->entity.num_pads; i++) {
struct v4l2_mbus_framefmt *mf;
- mf = v4l2_subdev_get_try_format(sd, sd_state, i);
+ mf = v4l2_subdev_state_get_format(sd_state, i);
*mf = fmt_default;
}
@@ -100,7 +100,7 @@ static int vimc_sensor_get_fmt(struct v4l2_subdev *sd,
container_of(sd, struct vimc_sensor_device, sd);
fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
- *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) :
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) :
vsensor->mbus_format;
return 0;
@@ -159,7 +159,7 @@ static int vimc_sensor_set_fmt(struct v4l2_subdev *sd,
mf = &vsensor->mbus_format;
} else {
- mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ mf = v4l2_subdev_state_get_format(sd_state, fmt->pad);
}
/* Set the new format */
@@ -183,7 +183,6 @@ static int vimc_sensor_set_fmt(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_pad_ops vimc_sensor_pad_ops = {
- .init_cfg = vimc_sensor_init_cfg,
.enum_mbus_code = vimc_sensor_enum_mbus_code,
.enum_frame_size = vimc_sensor_enum_frame_size,
.get_fmt = vimc_sensor_get_fmt,
@@ -294,6 +293,10 @@ static const struct v4l2_subdev_ops vimc_sensor_ops = {
.video = &vimc_sensor_video_ops,
};
+static const struct v4l2_subdev_internal_ops vimc_sensor_internal_ops = {
+ .init_state = vimc_sensor_init_state,
+};
+
static int vimc_sensor_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vimc_sensor_device *vsensor =
@@ -429,6 +432,8 @@ static struct vimc_ent_device *vimc_sensor_add(struct vimc_device *vimc,
if (ret)
goto err_free_tpg;
+ vsensor->sd.internal_ops = &vimc_sensor_internal_ops;
+
vsensor->ved.process_frame = vimc_sensor_process_frame;
vsensor->ved.dev = vimc->mdev.dev;
diff --git a/drivers/media/test-drivers/visl/Kconfig b/drivers/media/test-drivers/visl/Kconfig
index 7508b904f..37be9f267 100644
--- a/drivers/media/test-drivers/visl/Kconfig
+++ b/drivers/media/test-drivers/visl/Kconfig
@@ -7,7 +7,6 @@ config VIDEO_VISL
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
select VIDEO_V4L2_TPG
help
diff --git a/drivers/media/test-drivers/visl/visl-core.c b/drivers/media/test-drivers/visl/visl-core.c
index 9970dc739..68dac8962 100644
--- a/drivers/media/test-drivers/visl/visl-core.c
+++ b/drivers/media/test-drivers/visl/visl-core.c
@@ -211,6 +211,27 @@ const struct visl_ctrls visl_hevc_ctrls = {
.num_ctrls = ARRAY_SIZE(visl_hevc_ctrl_descs),
};
+static const struct visl_ctrl_desc visl_av1_ctrl_descs[] = {
+ {
+ .cfg.id = V4L2_CID_STATELESS_AV1_FRAME,
+ },
+ {
+ .cfg.id = V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY,
+ .cfg.dims = { V4L2_AV1_MAX_TILE_COUNT },
+ },
+ {
+ .cfg.id = V4L2_CID_STATELESS_AV1_SEQUENCE,
+ },
+ {
+ .cfg.id = V4L2_CID_STATELESS_AV1_FILM_GRAIN,
+ },
+};
+
+const struct visl_ctrls visl_av1_ctrls = {
+ .ctrls = visl_av1_ctrl_descs,
+ .num_ctrls = ARRAY_SIZE(visl_av1_ctrl_descs),
+};
+
struct v4l2_ctrl *visl_find_control(struct visl_ctx *ctx, u32 id)
{
struct v4l2_ctrl_handler *hdl = &ctx->hdl;
diff --git a/drivers/media/test-drivers/visl/visl-dec.c b/drivers/media/test-drivers/visl/visl-dec.c
index 318d675e5..f21260054 100644
--- a/drivers/media/test-drivers/visl/visl-dec.c
+++ b/drivers/media/test-drivers/visl/visl-dec.c
@@ -13,12 +13,21 @@
#include "visl-trace-vp9.h"
#include "visl-trace-h264.h"
#include "visl-trace-hevc.h"
+#include "visl-trace-av1.h"
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <media/v4l2-mem2mem.h>
#include <media/tpg/v4l2-tpg.h>
+#define LAST_BUF_IDX (V4L2_AV1_REF_LAST_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define LAST2_BUF_IDX (V4L2_AV1_REF_LAST2_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define LAST3_BUF_IDX (V4L2_AV1_REF_LAST3_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define GOLDEN_BUF_IDX (V4L2_AV1_REF_GOLDEN_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define BWD_BUF_IDX (V4L2_AV1_REF_BWDREF_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define ALT2_BUF_IDX (V4L2_AV1_REF_ALTREF2_FRAME - V4L2_AV1_REF_LAST_FRAME)
+#define ALT_BUF_IDX (V4L2_AV1_REF_ALTREF_FRAME - V4L2_AV1_REF_LAST_FRAME)
+
static void *plane_vaddr(struct tpg_data *tpg, struct vb2_buffer *buf,
u32 p, u32 bpl[TPG_MAX_PLANES], u32 h)
{
@@ -152,6 +161,55 @@ static void visl_get_ref_frames(struct visl_ctx *ctx, u8 *buf,
break;
}
+
+ case VISL_CODEC_AV1: {
+ int idx_last = run->av1.frame->ref_frame_idx[LAST_BUF_IDX];
+ int idx_last2 = run->av1.frame->ref_frame_idx[LAST2_BUF_IDX];
+ int idx_last3 = run->av1.frame->ref_frame_idx[LAST3_BUF_IDX];
+ int idx_golden = run->av1.frame->ref_frame_idx[GOLDEN_BUF_IDX];
+ int idx_bwd = run->av1.frame->ref_frame_idx[BWD_BUF_IDX];
+ int idx_alt2 = run->av1.frame->ref_frame_idx[ALT2_BUF_IDX];
+ int idx_alt = run->av1.frame->ref_frame_idx[ALT_BUF_IDX];
+
+ struct vb2_buffer *ref_last =
+ vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_last]);
+ struct vb2_buffer *ref_last2 =
+ vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_last2]);
+ struct vb2_buffer *ref_last3 =
+ vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_last3]);
+ struct vb2_buffer *ref_golden =
+ vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_golden]);
+ struct vb2_buffer *ref_bwd =
+ vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_bwd]);
+ struct vb2_buffer *ref_alt2 =
+ vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_alt2]);
+ struct vb2_buffer *ref_alt =
+ vb2_find_buffer(cap_q, run->av1.frame->reference_frame_ts[idx_alt]);
+
+ scnprintf(buf, buflen,
+ "ref_last_ts: %llu, vb2_idx: %d\n"
+ "ref_last2_ts: %llu, vb2_idx: %d\n"
+ "ref_last3_ts: %llu, vb2_idx: %d\n"
+ "ref_golden_ts: %llu, vb2_idx: %d\n"
+ "ref_bwd_ts: %llu, vb2_idx: %d\n"
+ "ref_alt2_ts: %llu, vb2_idx: %d\n"
+ "ref_alt_ts: %llu, vb2_idx: %d\n",
+ run->av1.frame->reference_frame_ts[idx_last],
+ ref_last ? ref_last->index : -1,
+ run->av1.frame->reference_frame_ts[idx_last2],
+ ref_last2 ? ref_last2->index : -1,
+ run->av1.frame->reference_frame_ts[idx_last3],
+ ref_last3 ? ref_last3->index : -1,
+ run->av1.frame->reference_frame_ts[idx_golden],
+ ref_golden ? ref_golden->index : -1,
+ run->av1.frame->reference_frame_ts[idx_bwd],
+ ref_bwd ? ref_bwd->index : -1,
+ run->av1.frame->reference_frame_ts[idx_alt2],
+ ref_alt2 ? ref_alt2->index : -1,
+ run->av1.frame->reference_frame_ts[idx_alt],
+ ref_alt ? ref_alt->index : -1);
+ break;
+ }
}
}
@@ -287,16 +345,23 @@ static void visl_tpg_fill(struct visl_ctx *ctx, struct visl_run *run)
frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
len = 0;
- for (i = 0; i < out_q->num_buffers; i++) {
+ for (i = 0; i < vb2_get_num_buffers(out_q); i++) {
char entry[] = "index: %u, state: %s, request_fd: %d, ";
u32 old_len = len;
- char *q_status = visl_get_vb2_state(out_q->bufs[i]->state);
+ struct vb2_buffer *vb2;
+ char *q_status;
+
+ vb2 = vb2_get_buffer(out_q, i);
+ if (!vb2)
+ continue;
+
+ q_status = visl_get_vb2_state(vb2->state);
len += scnprintf(&buf[len], TPG_STR_BUF_SZ - len,
entry, i, q_status,
- to_vb2_v4l2_buffer(out_q->bufs[i])->request_fd);
+ to_vb2_v4l2_buffer(vb2)->request_fd);
- len += visl_fill_bytesused(to_vb2_v4l2_buffer(out_q->bufs[i]),
+ len += visl_fill_bytesused(to_vb2_v4l2_buffer(vb2),
&buf[len],
TPG_STR_BUF_SZ - len);
@@ -340,15 +405,22 @@ static void visl_tpg_fill(struct visl_ctx *ctx, struct visl_run *run)
frame_dprintk(ctx->dev, run->dst->sequence, "%s\n", buf);
len = 0;
- for (i = 0; i < cap_q->num_buffers; i++) {
+ for (i = 0; i < vb2_get_num_buffers(cap_q); i++) {
u32 old_len = len;
- char *q_status = visl_get_vb2_state(cap_q->bufs[i]->state);
+ struct vb2_buffer *vb2;
+ char *q_status;
+
+ vb2 = vb2_get_buffer(cap_q, i);
+ if (!vb2)
+ continue;
+
+ q_status = visl_get_vb2_state(vb2->state);
len += scnprintf(&buf[len], TPG_STR_BUF_SZ - len,
"index: %u, status: %s, timestamp: %llu, is_held: %d",
- cap_q->bufs[i]->index, q_status,
- cap_q->bufs[i]->timestamp,
- to_vb2_v4l2_buffer(cap_q->bufs[i])->is_held);
+ vb2->index, q_status,
+ vb2->timestamp,
+ to_vb2_v4l2_buffer(vb2)->is_held);
tpg_gen_text(&ctx->tpg, basep, line++ * line_height, 16, &buf[old_len]);
frame_dprintk(ctx->dev, run->dst->sequence, "%s", &buf[old_len]);
@@ -410,7 +482,13 @@ static void visl_trace_ctrls(struct visl_ctx *ctx, struct visl_run *run)
trace_v4l2_hevc_dpb_entry(&run->hevc.dpram->dpb[i]);
trace_v4l2_hevc_pred_weight_table(&run->hevc.spram->pred_weight_table);
- break;
+ break;
+ case VISL_CODEC_AV1:
+ trace_v4l2_ctrl_av1_sequence(run->av1.seq);
+ trace_v4l2_ctrl_av1_frame(run->av1.frame);
+ trace_v4l2_ctrl_av1_film_grain(run->av1.grain);
+ trace_v4l2_ctrl_av1_tile_group_entry(run->av1.tge);
+ break;
}
}
@@ -469,6 +547,12 @@ void visl_device_run(void *priv)
run.hevc.sm = visl_find_control_data(ctx, V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
run.hevc.dpram = visl_find_control_data(ctx, V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
break;
+ case VISL_CODEC_AV1:
+ run.av1.seq = visl_find_control_data(ctx, V4L2_CID_STATELESS_AV1_SEQUENCE);
+ run.av1.frame = visl_find_control_data(ctx, V4L2_CID_STATELESS_AV1_FRAME);
+ run.av1.tge = visl_find_control_data(ctx, V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY);
+ run.av1.grain = visl_find_control_data(ctx, V4L2_CID_STATELESS_AV1_FILM_GRAIN);
+ break;
}
frame_dprintk(ctx->dev, run.dst->sequence,
diff --git a/drivers/media/test-drivers/visl/visl-dec.h b/drivers/media/test-drivers/visl/visl-dec.h
index 4a706a9de..c2c2ef3a8 100644
--- a/drivers/media/test-drivers/visl/visl-dec.h
+++ b/drivers/media/test-drivers/visl/visl-dec.h
@@ -45,6 +45,13 @@ struct visl_hevc_run {
const struct v4l2_ctrl_hevc_decode_params *dpram;
};
+struct visl_av1_run {
+ const struct v4l2_ctrl_av1_sequence *seq;
+ const struct v4l2_ctrl_av1_frame *frame;
+ const struct v4l2_ctrl_av1_tile_group_entry *tge;
+ const struct v4l2_ctrl_av1_film_grain *grain;
+};
+
struct visl_run {
struct vb2_v4l2_buffer *src;
struct vb2_v4l2_buffer *dst;
@@ -56,6 +63,7 @@ struct visl_run {
struct visl_vp9_run vp9;
struct visl_h264_run h264;
struct visl_hevc_run hevc;
+ struct visl_av1_run av1;
};
};
diff --git a/drivers/media/test-drivers/visl/visl-trace-av1.h b/drivers/media/test-drivers/visl/visl-trace-av1.h
new file mode 100644
index 000000000..09f205de5
--- /dev/null
+++ b/drivers/media/test-drivers/visl/visl-trace-av1.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_VISL_TRACE_AV1_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VISL_TRACE_AV1_H_
+
+#include <linux/tracepoint.h>
+#include "visl.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM visl_av1_controls
+
+DECLARE_EVENT_CLASS(v4l2_ctrl_av1_seq_tmpl,
+ TP_PROTO(const struct v4l2_ctrl_av1_sequence *s),
+ TP_ARGS(s),
+ TP_STRUCT__entry(__field_struct(struct v4l2_ctrl_av1_sequence, s)),
+ TP_fast_assign(__entry->s = *s;),
+ TP_printk("\nflags %s\nseq_profile: %u\norder_hint_bits: %u\nbit_depth: %u\n"
+ "max_frame_width_minus_1: %u\nmax_frame_height_minus_1: %u\n",
+ __print_flags(__entry->s.flags, "|",
+ {V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE, "STILL_PICTURE"},
+ {V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK, "USE_128X128_SUPERBLOCK"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA, "ENABLE_FILTER_INTRA"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER, "ENABLE_INTRA_EDGE_FILTER"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND, "ENABLE_INTERINTRA_COMPOUND"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND, "ENABLE_MASKED_COMPOUND"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION, "ENABLE_WARPED_MOTION"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER, "ENABLE_DUAL_FILTER"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT, "ENABLE_ORDER_HINT"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP, "ENABLE_JNT_COMP"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS, "ENABLE_REF_FRAME_MVS"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES, "ENABLE_SUPERRES"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF, "ENABLE_CDEF"},
+ {V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION, "ENABLE_RESTORATION"},
+ {V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME, "MONO_CHROME"},
+ {V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE, "COLOR_RANGE"},
+ {V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X, "SUBSAMPLING_X"},
+ {V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y, "SUBSAMPLING_Y"},
+ {V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT, "FILM_GRAIN_PARAMS_PRESENT"},
+ {V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q, "SEPARATE_UV_DELTA_Q"}),
+ __entry->s.seq_profile,
+ __entry->s.order_hint_bits,
+ __entry->s.bit_depth,
+ __entry->s.max_frame_width_minus_1,
+ __entry->s.max_frame_height_minus_1
+ )
+);
+
+DECLARE_EVENT_CLASS(v4l2_ctrl_av1_tge_tmpl,
+ TP_PROTO(const struct v4l2_ctrl_av1_tile_group_entry *t),
+ TP_ARGS(t),
+ TP_STRUCT__entry(__field_struct(struct v4l2_ctrl_av1_tile_group_entry, t)),
+ TP_fast_assign(__entry->t = *t;),
+ TP_printk("\ntile_offset: %u\n tile_size: %u\n tile_row: %u\ntile_col: %u\n",
+ __entry->t.tile_offset,
+ __entry->t.tile_size,
+ __entry->t.tile_row,
+ __entry->t.tile_col
+ )
+);
+
+DECLARE_EVENT_CLASS(v4l2_ctrl_av1_frame_tmpl,
+ TP_PROTO(const struct v4l2_ctrl_av1_frame *f),
+ TP_ARGS(f),
+ TP_STRUCT__entry(__field_struct(struct v4l2_ctrl_av1_frame, f)),
+ TP_fast_assign(__entry->f = *f;),
+ TP_printk("\ntile_info.flags: %s\ntile_info.context_update_tile_id: %u\n"
+ "tile_info.tile_cols: %u\ntile_info.tile_rows: %u\n"
+ "tile_info.mi_col_starts: %s\ntile_info.mi_row_starts: %s\n"
+ "tile_info.width_in_sbs_minus_1: %s\ntile_info.height_in_sbs_minus_1: %s\n"
+ "tile_info.tile_size_bytes: %u\nquantization.flags: %s\n"
+ "quantization.base_q_idx: %u\nquantization.delta_q_y_dc: %d\n"
+ "quantization.delta_q_u_dc: %d\nquantization.delta_q_u_ac: %d\n"
+ "quantization.delta_q_v_dc: %d\nquantization.delta_q_v_ac: %d\n"
+ "quantization.qm_y: %u\nquantization.qm_u: %u\nquantization.qm_v: %u\n"
+ "quantization.delta_q_res: %u\nsuperres_denom: %u\nsegmentation.flags: %s\n"
+ "segmentation.last_active_seg_id: %u\nsegmentation.feature_enabled:%s\n"
+ "loop_filter.flags: %s\nloop_filter.level: %s\nloop_filter.sharpness: %u\n"
+ "loop_filter.ref_deltas: %s\nloop_filter.mode_deltas: %s\n"
+ "loop_filter.delta_lf_res: %u\ncdef.damping_minus_3: %u\ncdef.bits: %u\n"
+ "cdef.y_pri_strength: %s\ncdef.y_sec_strength: %s\n"
+ "cdef.uv_pri_strength: %s\ncdef.uv_sec_strength:%s\nskip_mode_frame: %s\n"
+ "primary_ref_frame: %u\nloop_restoration.flags: %s\n"
+ "loop_restoration.lr_unit_shift: %u\nloop_restoration.lr_uv_shift: %u\n"
+ "loop_restoration.frame_restoration_type: %s\n"
+ "loop_restoration.loop_restoration_size: %s\nflags: %s\norder_hint: %u\n"
+ "upscaled_width: %u\nframe_width_minus_1: %u\nframe_height_minus_1: %u\n"
+ "render_width_minus_1: %u\nrender_height_minus_1: %u\ncurrent_frame_id: %u\n"
+ "buffer_removal_time: %s\norder_hints: %s\nreference_frame_ts: %s\n"
+ "ref_frame_idx: %s\nrefresh_frame_flags: %u\n",
+ __print_flags(__entry->f.tile_info.flags, "|",
+ {V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING, "UNIFORM_TILE_SPACING"}),
+ __entry->f.tile_info.context_update_tile_id,
+ __entry->f.tile_info.tile_cols,
+ __entry->f.tile_info.tile_rows,
+ __print_array(__entry->f.tile_info.mi_col_starts,
+ ARRAY_SIZE(__entry->f.tile_info.mi_col_starts),
+ sizeof(__entry->f.tile_info.mi_col_starts[0])),
+ __print_array(__entry->f.tile_info.mi_row_starts,
+ ARRAY_SIZE(__entry->f.tile_info.mi_row_starts),
+ sizeof(__entry->f.tile_info.mi_row_starts[0])),
+ __print_array(__entry->f.tile_info.width_in_sbs_minus_1,
+ ARRAY_SIZE(__entry->f.tile_info.width_in_sbs_minus_1),
+ sizeof(__entry->f.tile_info.width_in_sbs_minus_1[0])),
+ __print_array(__entry->f.tile_info.height_in_sbs_minus_1,
+ ARRAY_SIZE(__entry->f.tile_info.height_in_sbs_minus_1),
+ sizeof(__entry->f.tile_info.height_in_sbs_minus_1[0])),
+ __entry->f.tile_info.tile_size_bytes,
+ __print_flags(__entry->f.quantization.flags, "|",
+ {V4L2_AV1_QUANTIZATION_FLAG_DIFF_UV_DELTA, "DIFF_UV_DELTA"},
+ {V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX, "USING_QMATRIX"},
+ {V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT, "DELTA_Q_PRESENT"}),
+ __entry->f.quantization.base_q_idx,
+ __entry->f.quantization.delta_q_y_dc,
+ __entry->f.quantization.delta_q_u_dc,
+ __entry->f.quantization.delta_q_u_ac,
+ __entry->f.quantization.delta_q_v_dc,
+ __entry->f.quantization.delta_q_v_ac,
+ __entry->f.quantization.qm_y,
+ __entry->f.quantization.qm_u,
+ __entry->f.quantization.qm_v,
+ __entry->f.quantization.delta_q_res,
+ __entry->f.superres_denom,
+ __print_flags(__entry->f.segmentation.flags, "|",
+ {V4L2_AV1_SEGMENTATION_FLAG_ENABLED, "ENABLED"},
+ {V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP, "UPDATE_MAP"},
+ {V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE, "TEMPORAL_UPDATE"},
+ {V4L2_AV1_SEGMENTATION_FLAG_UPDATE_DATA, "UPDATE_DATA"},
+ {V4L2_AV1_SEGMENTATION_FLAG_SEG_ID_PRE_SKIP, "SEG_ID_PRE_SKIP"}),
+ __entry->f.segmentation.last_active_seg_id,
+ __print_array(__entry->f.segmentation.feature_enabled,
+ ARRAY_SIZE(__entry->f.segmentation.feature_enabled),
+ sizeof(__entry->f.segmentation.feature_enabled[0])),
+ __print_flags(__entry->f.loop_filter.flags, "|",
+ {V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED, "DELTA_ENABLED"},
+ {V4L2_AV1_LOOP_FILTER_FLAG_DELTA_UPDATE, "DELTA_UPDATE"},
+ {V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT, "DELTA_LF_PRESENT"},
+ {V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI, "DELTA_LF_MULTI"}),
+ __print_array(__entry->f.loop_filter.level,
+ ARRAY_SIZE(__entry->f.loop_filter.level),
+ sizeof(__entry->f.loop_filter.level[0])),
+ __entry->f.loop_filter.sharpness,
+ __print_array(__entry->f.loop_filter.ref_deltas,
+ ARRAY_SIZE(__entry->f.loop_filter.ref_deltas),
+ sizeof(__entry->f.loop_filter.ref_deltas[0])),
+ __print_array(__entry->f.loop_filter.mode_deltas,
+ ARRAY_SIZE(__entry->f.loop_filter.mode_deltas),
+ sizeof(__entry->f.loop_filter.mode_deltas[0])),
+ __entry->f.loop_filter.delta_lf_res,
+ __entry->f.cdef.damping_minus_3,
+ __entry->f.cdef.bits,
+ __print_array(__entry->f.cdef.y_pri_strength,
+ ARRAY_SIZE(__entry->f.cdef.y_pri_strength),
+ sizeof(__entry->f.cdef.y_pri_strength[0])),
+ __print_array(__entry->f.cdef.y_sec_strength,
+ ARRAY_SIZE(__entry->f.cdef.y_sec_strength),
+ sizeof(__entry->f.cdef.y_sec_strength[0])),
+ __print_array(__entry->f.cdef.uv_pri_strength,
+ ARRAY_SIZE(__entry->f.cdef.uv_pri_strength),
+ sizeof(__entry->f.cdef.uv_pri_strength[0])),
+ __print_array(__entry->f.cdef.uv_sec_strength,
+ ARRAY_SIZE(__entry->f.cdef.uv_sec_strength),
+ sizeof(__entry->f.cdef.uv_sec_strength[0])),
+ __print_array(__entry->f.skip_mode_frame,
+ ARRAY_SIZE(__entry->f.skip_mode_frame),
+ sizeof(__entry->f.skip_mode_frame[0])),
+ __entry->f.primary_ref_frame,
+ __print_flags(__entry->f.loop_restoration.flags, "|",
+ {V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR, "USES_LR"},
+ {V4L2_AV1_LOOP_RESTORATION_FLAG_USES_CHROMA_LR, "USES_CHROMA_LR"}),
+ __entry->f.loop_restoration.lr_unit_shift,
+ __entry->f.loop_restoration.lr_uv_shift,
+ __print_array(__entry->f.loop_restoration.frame_restoration_type,
+ ARRAY_SIZE(__entry->f.loop_restoration.frame_restoration_type),
+ sizeof(__entry->f.loop_restoration.frame_restoration_type[0])),
+ __print_array(__entry->f.loop_restoration.loop_restoration_size,
+ ARRAY_SIZE(__entry->f.loop_restoration.loop_restoration_size),
+ sizeof(__entry->f.loop_restoration.loop_restoration_size[0])),
+ __print_flags(__entry->f.flags, "|",
+ {V4L2_AV1_FRAME_FLAG_SHOW_FRAME, "SHOW_FRAME"},
+ {V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME, "SHOWABLE_FRAME"},
+ {V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE, "ERROR_RESILIENT_MODE"},
+ {V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE, "DISABLE_CDF_UPDATE"},
+ {V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS, "ALLOW_SCREEN_CONTENT_TOOLS"},
+ {V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV, "FORCE_INTEGER_MV"},
+ {V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC, "ALLOW_INTRABC"},
+ {V4L2_AV1_FRAME_FLAG_USE_SUPERRES, "USE_SUPERRES"},
+ {V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV, "ALLOW_HIGH_PRECISION_MV"},
+ {V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE, "IS_MOTION_MODE_SWITCHABLE"},
+ {V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS, "USE_REF_FRAME_MVS"},
+ {V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF,
+ "DISABLE_FRAME_END_UPDATE_CDF"},
+ {V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION, "ALLOW_WARPED_MOTION"},
+ {V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT, "REFERENCE_SELECT"},
+ {V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET, "REDUCED_TX_SET"},
+ {V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED, "SKIP_MODE_ALLOWED"},
+ {V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT, "SKIP_MODE_PRESENT"},
+ {V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE, "FRAME_SIZE_OVERRIDE"},
+ {V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT, "BUFFER_REMOVAL_TIME_PRESENT"},
+ {V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING, "FRAME_REFS_SHORT_SIGNALING"}),
+ __entry->f.order_hint,
+ __entry->f.upscaled_width,
+ __entry->f.frame_width_minus_1,
+ __entry->f.frame_height_minus_1,
+ __entry->f.render_width_minus_1,
+ __entry->f.render_height_minus_1,
+ __entry->f.current_frame_id,
+ __print_array(__entry->f.buffer_removal_time,
+ ARRAY_SIZE(__entry->f.buffer_removal_time),
+ sizeof(__entry->f.buffer_removal_time[0])),
+ __print_array(__entry->f.order_hints,
+ ARRAY_SIZE(__entry->f.order_hints),
+ sizeof(__entry->f.order_hints[0])),
+ __print_array(__entry->f.reference_frame_ts,
+ ARRAY_SIZE(__entry->f.reference_frame_ts),
+ sizeof(__entry->f.reference_frame_ts[0])),
+ __print_array(__entry->f.ref_frame_idx,
+ ARRAY_SIZE(__entry->f.ref_frame_idx),
+ sizeof(__entry->f.ref_frame_idx[0])),
+ __entry->f.refresh_frame_flags
+ )
+);
+
+
+DECLARE_EVENT_CLASS(v4l2_ctrl_av1_film_grain_tmpl,
+ TP_PROTO(const struct v4l2_ctrl_av1_film_grain *f),
+ TP_ARGS(f),
+ TP_STRUCT__entry(__field_struct(struct v4l2_ctrl_av1_film_grain, f)),
+ TP_fast_assign(__entry->f = *f;),
+ TP_printk("\nflags %s\ncr_mult: %u\ngrain_seed: %u\n"
+ "film_grain_params_ref_idx: %u\nnum_y_points: %u\npoint_y_value: %s\n"
+ "point_y_scaling: %s\nnum_cb_points: %u\npoint_cb_value: %s\n"
+ "point_cb_scaling: %s\nnum_cr_points: %u\npoint_cr_value: %s\n"
+ "point_cr_scaling: %s\ngrain_scaling_minus_8: %u\nar_coeff_lag: %u\n"
+ "ar_coeffs_y_plus_128: %s\nar_coeffs_cb_plus_128: %s\n"
+ "ar_coeffs_cr_plus_128: %s\nar_coeff_shift_minus_6: %u\n"
+ "grain_scale_shift: %u\ncb_mult: %u\ncb_luma_mult: %u\ncr_luma_mult: %u\n"
+ "cb_offset: %u\ncr_offset: %u\n",
+ __print_flags(__entry->f.flags, "|",
+ {V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN, "APPLY_GRAIN"},
+ {V4L2_AV1_FILM_GRAIN_FLAG_UPDATE_GRAIN, "UPDATE_GRAIN"},
+ {V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA, "CHROMA_SCALING_FROM_LUMA"},
+ {V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP, "OVERLAP"},
+ {V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE, "CLIP_TO_RESTRICTED_RANGE"}),
+ __entry->f.cr_mult,
+ __entry->f.grain_seed,
+ __entry->f.film_grain_params_ref_idx,
+ __entry->f.num_y_points,
+ __print_array(__entry->f.point_y_value,
+ ARRAY_SIZE(__entry->f.point_y_value),
+ sizeof(__entry->f.point_y_value[0])),
+ __print_array(__entry->f.point_y_scaling,
+ ARRAY_SIZE(__entry->f.point_y_scaling),
+ sizeof(__entry->f.point_y_scaling[0])),
+ __entry->f.num_cb_points,
+ __print_array(__entry->f.point_cb_value,
+ ARRAY_SIZE(__entry->f.point_cb_value),
+ sizeof(__entry->f.point_cb_value[0])),
+ __print_array(__entry->f.point_cb_scaling,
+ ARRAY_SIZE(__entry->f.point_cb_scaling),
+ sizeof(__entry->f.point_cb_scaling[0])),
+ __entry->f.num_cr_points,
+ __print_array(__entry->f.point_cr_value,
+ ARRAY_SIZE(__entry->f.point_cr_value),
+ sizeof(__entry->f.point_cr_value[0])),
+ __print_array(__entry->f.point_cr_scaling,
+ ARRAY_SIZE(__entry->f.point_cr_scaling),
+ sizeof(__entry->f.point_cr_scaling[0])),
+ __entry->f.grain_scaling_minus_8,
+ __entry->f.ar_coeff_lag,
+ __print_array(__entry->f.ar_coeffs_y_plus_128,
+ ARRAY_SIZE(__entry->f.ar_coeffs_y_plus_128),
+ sizeof(__entry->f.ar_coeffs_y_plus_128[0])),
+ __print_array(__entry->f.ar_coeffs_cb_plus_128,
+ ARRAY_SIZE(__entry->f.ar_coeffs_cb_plus_128),
+ sizeof(__entry->f.ar_coeffs_cb_plus_128[0])),
+ __print_array(__entry->f.ar_coeffs_cr_plus_128,
+ ARRAY_SIZE(__entry->f.ar_coeffs_cr_plus_128),
+ sizeof(__entry->f.ar_coeffs_cr_plus_128[0])),
+ __entry->f.ar_coeff_shift_minus_6,
+ __entry->f.grain_scale_shift,
+ __entry->f.cb_mult,
+ __entry->f.cb_luma_mult,
+ __entry->f.cr_luma_mult,
+ __entry->f.cb_offset,
+ __entry->f.cr_offset
+ )
+)
+
+DEFINE_EVENT(v4l2_ctrl_av1_seq_tmpl, v4l2_ctrl_av1_sequence,
+ TP_PROTO(const struct v4l2_ctrl_av1_sequence *s),
+ TP_ARGS(s)
+);
+
+DEFINE_EVENT(v4l2_ctrl_av1_frame_tmpl, v4l2_ctrl_av1_frame,
+ TP_PROTO(const struct v4l2_ctrl_av1_frame *f),
+ TP_ARGS(f)
+);
+
+DEFINE_EVENT(v4l2_ctrl_av1_tge_tmpl, v4l2_ctrl_av1_tile_group_entry,
+ TP_PROTO(const struct v4l2_ctrl_av1_tile_group_entry *t),
+ TP_ARGS(t)
+);
+
+DEFINE_EVENT(v4l2_ctrl_av1_film_grain_tmpl, v4l2_ctrl_av1_film_grain,
+ TP_PROTO(const struct v4l2_ctrl_av1_film_grain *f),
+ TP_ARGS(f)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/media/test-drivers/visl
+#define TRACE_INCLUDE_FILE visl-trace-av1
+#include <trace/define_trace.h>
diff --git a/drivers/media/test-drivers/visl/visl-trace-points.c b/drivers/media/test-drivers/visl/visl-trace-points.c
index f7b866534..321ff732c 100644
--- a/drivers/media/test-drivers/visl/visl-trace-points.c
+++ b/drivers/media/test-drivers/visl/visl-trace-points.c
@@ -8,3 +8,4 @@
#include "visl-trace-vp9.h"
#include "visl-trace-h264.h"
#include "visl-trace-hevc.h"
+#include "visl-trace-av1.h"
diff --git a/drivers/media/test-drivers/visl/visl-video.c b/drivers/media/test-drivers/visl/visl-video.c
index 9303a3e11..b9a4b44bd 100644
--- a/drivers/media/test-drivers/visl/visl-video.c
+++ b/drivers/media/test-drivers/visl/visl-video.c
@@ -40,6 +40,9 @@ static void visl_set_current_codec(struct visl_ctx *ctx)
case V4L2_PIX_FMT_HEVC_SLICE:
ctx->current_codec = VISL_CODEC_HEVC;
break;
+ case V4L2_PIX_FMT_AV1_FRAME:
+ ctx->current_codec = VISL_CODEC_AV1;
+ break;
default:
dprintk(ctx->dev, "Warning: unsupported fourcc: %d\n", fourcc);
ctx->current_codec = VISL_CODEC_NONE;
@@ -218,6 +221,21 @@ const struct visl_coded_format_desc visl_coded_fmts[] = {
.num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts),
.decoded_fmts = visl_decoded_fmts,
},
+ {
+ .pixelformat = V4L2_PIX_FMT_AV1_FRAME,
+ .frmsize = {
+ .min_width = 64,
+ .max_width = 4096,
+ .step_width = 1,
+ .min_height = 64,
+ .max_height = 2304,
+ .step_height = 1,
+ },
+ .ctrls = &visl_av1_ctrls,
+ .num_decoded_fmts = ARRAY_SIZE(visl_decoded_fmts),
+ .decoded_fmts = visl_decoded_fmts,
+ },
+
};
const size_t num_coded_fmts = ARRAY_SIZE(visl_coded_fmts);
diff --git a/drivers/media/test-drivers/visl/visl-video.h b/drivers/media/test-drivers/visl/visl-video.h
index 27ad70a55..92e274894 100644
--- a/drivers/media/test-drivers/visl/visl-video.h
+++ b/drivers/media/test-drivers/visl/visl-video.h
@@ -17,6 +17,7 @@ extern const struct visl_ctrls visl_vp8_ctrls;
extern const struct visl_ctrls visl_vp9_ctrls;
extern const struct visl_ctrls visl_h264_ctrls;
extern const struct visl_ctrls visl_hevc_ctrls;
+extern const struct visl_ctrls visl_av1_ctrls;
int visl_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
diff --git a/drivers/media/test-drivers/visl/visl.h b/drivers/media/test-drivers/visl/visl.h
index 31639f2e5..c593b1337 100644
--- a/drivers/media/test-drivers/visl/visl.h
+++ b/drivers/media/test-drivers/visl/visl.h
@@ -127,6 +127,7 @@ enum visl_codec {
VISL_CODEC_VP9,
VISL_CODEC_H264,
VISL_CODEC_HEVC,
+ VISL_CODEC_AV1,
};
struct visl_blob {
diff --git a/drivers/media/test-drivers/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig
index 5b08a5ad2..ec2e71d76 100644
--- a/drivers/media/test-drivers/vivid/Kconfig
+++ b/drivers/media/test-drivers/vivid/Kconfig
@@ -10,7 +10,6 @@ config VIDEO_VIVID
select VIDEOBUF2_DMA_CONTIG
select VIDEO_V4L2_TPG
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
help
Enables a virtual video driver. This driver emulates a webcam,
TV, S-Video and HDMI capture hardware, including VBI support for
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 394c9f81e..159c72cbb 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -861,7 +861,7 @@ static const struct media_device_ops vivid_media_ops = {
static int vivid_create_queue(struct vivid_dev *dev,
struct vb2_queue *q,
u32 buf_type,
- unsigned int min_buffers_needed,
+ unsigned int min_queued_buffers,
const struct vb2_ops *ops)
{
if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->multiplanar)
@@ -876,6 +876,20 @@ static int vivid_create_queue(struct vivid_dev *dev,
q->type = buf_type;
q->io_modes = VB2_MMAP | VB2_DMABUF;
q->io_modes |= V4L2_TYPE_IS_OUTPUT(buf_type) ? VB2_WRITE : VB2_READ;
+
+ /*
+ * The maximum number of buffers is 32768 if PAGE_SHIFT == 12,
+ * see also MAX_BUFFER_INDEX in videobuf2-core.c. It will be less if
+ * PAGE_SHIFT > 12, but then max_num_buffers will be clamped by
+ * videobuf2-core.c to MAX_BUFFER_INDEX.
+ */
+ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ q->max_num_buffers = 64;
+ if (buf_type == V4L2_BUF_TYPE_SDR_CAPTURE)
+ q->max_num_buffers = 1024;
+ if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE)
+ q->max_num_buffers = 32768;
+
if (allocators[dev->inst] != 1)
q->io_modes |= VB2_USERPTR;
q->drv_priv = dev;
@@ -884,7 +898,7 @@ static int vivid_create_queue(struct vivid_dev *dev,
q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
&vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = supports_requests[dev->inst] ? 0 : min_buffers_needed;
+ q->min_queued_buffers = supports_requests[dev->inst] ? 0 : min_queued_buffers;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
q->supports_requests = supports_requests[dev->inst];
diff --git a/drivers/media/test-drivers/vivid/vivid-meta-cap.c b/drivers/media/test-drivers/vivid/vivid-meta-cap.c
index 780f96860..0a718d037 100644
--- a/drivers/media/test-drivers/vivid/vivid-meta-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-meta-cap.c
@@ -30,9 +30,6 @@ static int meta_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
sizes[0] = size;
}
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
-
*nplanes = 1;
return 0;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c
index 95835b52b..4a569a6e5 100644
--- a/drivers/media/test-drivers/vivid/vivid-meta-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c
@@ -18,6 +18,7 @@ static int meta_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
unsigned int size = sizeof(struct vivid_meta_out_buf);
if (!vivid_is_webcam(dev))
@@ -30,8 +31,8 @@ static int meta_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
sizes[0] = size;
}
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 2)
+ *nbuffers = 2 - q_num_bufs;
*nplanes = 1;
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-touch-cap.c b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
index c7f6e23df..4b3c6ea0a 100644
--- a/drivers/media/test-drivers/vivid/vivid-touch-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
@@ -13,6 +13,7 @@ static int touch_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
struct v4l2_pix_format *f = &dev->tch_format;
unsigned int size = f->sizeimage;
@@ -23,8 +24,8 @@ static int touch_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
sizes[0] = size;
}
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 2)
+ *nbuffers = 2 - q_num_bufs;
*nplanes = 1;
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-cap.c b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
index b65b02eee..3840b3a66 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
@@ -134,9 +134,6 @@ static int vbi_cap_queue_setup(struct vb2_queue *vq,
sizes[0] = size;
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
-
*nplanes = 1;
return 0;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-out.c b/drivers/media/test-drivers/vivid/vivid-vbi-out.c
index cd5647690..434a10676 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-out.c
@@ -30,9 +30,6 @@ static int vbi_out_queue_setup(struct vb2_queue *vq,
sizes[0] = size;
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
-
*nplanes = 1;
return 0;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index 3a06df35a..2804975fe 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -117,9 +117,6 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
dev->fmt_cap->data_offset[p];
}
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
-
*nplanes = buffers;
dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
index 184a6df2c..1653b2988 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
@@ -73,12 +73,9 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
vfmt->data_offset[p] : size;
}
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
-
*nplanes = planes;
- dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
+ dprintk(dev, 1, "%s: count=%u\n", __func__, *nbuffers);
for (p = 0; p < planes; p++)
dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
return 0;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 462eb8423..e24e655fb 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -482,12 +482,13 @@ static int airspy_queue_setup(struct vb2_queue *vq,
unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[])
{
struct airspy *s = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
dev_dbg(s->dev, "nbuffers=%d\n", *nbuffers);
/* Need at least 8 buffers */
- if (vq->num_buffers + *nbuffers < 8)
- *nbuffers = 8 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 8)
+ *nbuffers = 8 - q_num_bufs;
*nplanes = 1;
sizes[0] = PAGE_ALIGN(s->buffersize);
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index fe4410a5e..3b75d062e 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1218,12 +1218,13 @@ static int queue_setup(struct vb2_queue *vq,
{
struct cx231xx *dev = vb2_get_drv_priv(vq);
unsigned int size = mpeglinesize * mpeglines;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
dev->ts1.ts_packet_size = mpeglinesize;
dev->ts1.ts_packet_count = mpeglines;
- if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
- *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - q_num_bufs;
if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
@@ -1781,7 +1782,7 @@ int cx231xx_417_register(struct cx231xx *dev)
q->ops = &cx231xx_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->lock = &dev->lock;
err = vb2_queue_init(q);
if (err)
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index e23b8ccd7..8f347bbee 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -714,11 +714,12 @@ static int queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
dev->size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
- *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - q_num_bufs;
if (*nplanes)
return sizes[0] < dev->size ? -EINVAL : 0;
@@ -1810,7 +1811,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
q->ops = &cx231xx_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->lock = &dev->lock;
ret = vb2_queue_init(q);
if (ret)
@@ -1870,7 +1871,7 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
q->ops = &cx231xx_vbi_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->lock = &dev->lock;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/usb/dvb-usb/cxusb-analog.c b/drivers/media/usb/dvb-usb/cxusb-analog.c
index deba5224c..b5d8c6b75 100644
--- a/drivers/media/usb/dvb-usb/cxusb-analog.c
+++ b/drivers/media/usb/dvb-usb/cxusb-analog.c
@@ -1632,7 +1632,7 @@ static int cxusb_medion_register_analog_video(struct dvb_usb_device *dvbdev)
cxdev->videoqueue.buf_struct_size =
sizeof(struct cxusb_medion_vbuffer);
cxdev->videoqueue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- cxdev->videoqueue.min_buffers_needed = 6;
+ cxdev->videoqueue.min_queued_buffers = 6;
cxdev->videoqueue.lock = &cxdev->dev_lock;
ret = vb2_queue_init(&cxdev->videoqueue);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 25e0620de..4aef584e2 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1607,7 +1607,8 @@ static int vidioc_g_parm(struct file *file, void *priv,
p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
if (dev->is_webcam) {
rc = v4l2_device_call_until_err(&v4l2->v4l2_dev, 0,
- video, g_frame_interval, &ival);
+ pad, get_frame_interval, NULL,
+ &ival);
if (!rc)
p->parm.capture.timeperframe = ival.interval;
} else {
@@ -1639,7 +1640,8 @@ static int vidioc_s_parm(struct file *file, void *priv,
p->parm.capture.readbuffers = EM28XX_MIN_BUF;
p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
rc = v4l2_device_call_until_err(&dev->v4l2->v4l2_dev, 0,
- video, s_frame_interval, &ival);
+ pad, set_frame_interval, NULL,
+ &ival);
if (!rc)
p->parm.capture.timeperframe = ival.interval;
return rc;
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 770714c34..e8c8bdb9c 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1257,7 +1257,7 @@ static int vidioc_g_parm(struct file *filp, void *priv,
{
struct gspca_dev *gspca_dev = video_drvdata(filp);
- parm->parm.capture.readbuffers = gspca_dev->queue.min_buffers_needed;
+ parm->parm.capture.readbuffers = gspca_dev->queue.min_queued_buffers;
if (!gspca_dev->sd_desc->get_streamparm)
return 0;
@@ -1273,7 +1273,7 @@ static int vidioc_s_parm(struct file *filp, void *priv,
{
struct gspca_dev *gspca_dev = video_drvdata(filp);
- parm->parm.capture.readbuffers = gspca_dev->queue.min_buffers_needed;
+ parm->parm.capture.readbuffers = gspca_dev->queue.min_queued_buffers;
if (!gspca_dev->sd_desc->set_streamparm) {
parm->parm.capture.capability = 0;
@@ -1517,7 +1517,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
q->ops = &gspca_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_queued_buffers = 2;
q->lock = &gspca_dev->usb_lock;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index 3e535be2c..9c0ecd5f0 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -753,12 +753,13 @@ static int hackrf_queue_setup(struct vb2_queue *vq,
unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[])
{
struct hackrf_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
dev_dbg(dev->dev, "nbuffers=%d\n", *nbuffers);
/* Need at least 8 buffers */
- if (vq->num_buffers + *nbuffers < 8)
- *nbuffers = 8 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 8)
+ *nbuffers = 8 - q_num_bufs;
*nplanes = 1;
sizes[0] = PAGE_ALIGN(dev->buffersize);
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 1e30e0595..702f1c8bd 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -726,9 +726,10 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
{
struct usbtv *usbtv = vb2_get_drv_priv(vq);
unsigned size = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32);
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
- if (vq->num_buffers + *nbuffers < 2)
- *nbuffers = 2 - vq->num_buffers;
+ if (q_num_bufs + *nbuffers < 2)
+ *nbuffers = 2 - q_num_bufs;
if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
@@ -962,15 +963,8 @@ ctrl_fail:
void usbtv_video_free(struct usbtv *usbtv)
{
- mutex_lock(&usbtv->vb2q_lock);
- mutex_lock(&usbtv->v4l2_lock);
-
- usbtv_stop(usbtv);
vb2_video_unregister_device(&usbtv->vdev);
v4l2_device_disconnect(&usbtv->v4l2_dev);
- mutex_unlock(&usbtv->v4l2_lock);
- mutex_unlock(&usbtv->vb2q_lock);
-
v4l2_device_put(&usbtv->v4l2_dev);
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 28dde08ec..7cbf4692b 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1954,7 +1954,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
/* Check if the bandwidth is high enough. */
psize = uvc_endpoint_max_bpi(stream->dev->udev, ep);
- if (psize >= bandwidth && psize <= best_psize) {
+ if (psize >= bandwidth && psize < best_psize) {
altsetting = alts->desc.bAlternateSetting;
best_psize = psize;
best_ep = ep;
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 8cfd593d2..3ec323bd5 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -876,9 +876,6 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
if (sd->asc_list.next) {
list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list,
asc_subdev_entry) {
- list_move(&asc->asc_entry,
- &asc->notifier->waiting_list);
-
v4l2_async_unbind_subdev_one(asc->notifier, asc);
}
}
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 3a4b15a98..273d83de2 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -195,9 +195,9 @@ int v4l2_g_parm_cap(struct video_device *vdev,
if (vdev->device_caps & V4L2_CAP_READWRITE)
a->parm.capture.readbuffers = 2;
- if (v4l2_subdev_has_op(sd, video, g_frame_interval))
+ if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- ret = v4l2_subdev_call(sd, video, g_frame_interval, &ival);
+ ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival);
if (!ret)
a->parm.capture.timeperframe = ival.interval;
return ret;
@@ -222,9 +222,9 @@ int v4l2_s_parm_cap(struct video_device *vdev,
else
a->parm.capture.readbuffers = 0;
- if (v4l2_subdev_has_op(sd, video, g_frame_interval))
+ if (v4l2_subdev_has_op(sd, pad, get_frame_interval))
a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- ret = v4l2_subdev_call(sd, video, s_frame_interval, &ival);
+ ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival);
if (!ret)
a->parm.capture.timeperframe = ival.interval;
return ret;
@@ -254,6 +254,9 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
{ .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index f3bed3785..8c07400bd 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -116,6 +116,9 @@ struct v4l2_format32 {
* @flags: additional buffer management attributes (ignored unless the
* queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and
* configured for MMAP streaming I/O).
+ * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set
+ * this field indicate the maximum possible number of buffers
+ * for this queue.
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
@@ -125,7 +128,8 @@ struct v4l2_create_buffers32 {
struct v4l2_format32 format;
__u32 capabilities;
__u32 flags;
- __u32 reserved[6];
+ __u32 max_num_buffers;
+ __u32 reserved[5];
};
static int get_v4l2_format32(struct v4l2_format *p64,
@@ -175,6 +179,9 @@ static int get_v4l2_create32(struct v4l2_create_buffers *p64,
return -EFAULT;
if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags)))
return -EFAULT;
+ if (copy_from_user(&p64->max_num_buffers, &p32->max_num_buffers,
+ sizeof(p32->max_num_buffers)))
+ return -EFAULT;
return get_v4l2_format32(&p64->format, &p32->format);
}
@@ -221,6 +228,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers *p64,
offsetof(struct v4l2_create_buffers32, format)) ||
put_user(p64->capabilities, &p32->capabilities) ||
put_user(p64->flags, &p32->flags) ||
+ put_user(p64->max_num_buffers, &p32->max_num_buffers) ||
copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
return put_v4l2_format32(&p64->format, &p32->format);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index f81279492..d13954bd3 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -642,11 +642,13 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
- if (ops->vidioc_g_selection) {
+ if (ops->vidioc_g_selection &&
+ !test_bit(_IOC_NR(VIDIOC_G_SELECTION), vdev->valid_ioctls)) {
__set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
__set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
}
- if (ops->vidioc_s_selection)
+ if (ops->vidioc_s_selection &&
+ !test_bit(_IOC_NR(VIDIOC_S_SELECTION), vdev->valid_ioctls))
__set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 7f181fbbb..89c719214 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -1179,7 +1179,9 @@ v4l2_async_nf_parse_fwnode_sensor(struct device *dev,
static const char * const led_props[] = { "led" };
static const struct v4l2_fwnode_int_props props[] = {
{ "flash-leds", led_props, ARRAY_SIZE(led_props) },
- { "lens-focus", NULL, 0 },
+ { "mipi-img-flash-leds", },
+ { "lens-focus", },
+ { "mipi-img-lens-focus", },
};
unsigned int i;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 9b1de54ce..33076af4d 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -483,9 +483,9 @@ static void v4l_print_create_buffers(const void *arg, bool write_only)
{
const struct v4l2_create_buffers *p = arg;
- pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, ",
+ pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, max num buffers=%u",
p->index, p->count, prt_names(p->memory, v4l2_memory_names),
- p->capabilities);
+ p->capabilities, p->max_num_buffers);
v4l_print_format(&p->format, write_only);
}
@@ -2951,7 +2951,7 @@ void v4l_printk_ioctl(const char *prefix, unsigned int cmd)
type = "v4l2_int";
break;
case 'V':
- if (_IOC_NR(cmd) >= V4L2_IOCTLS) {
+ if (!v4l2_is_known_ioctl(cmd)) {
type = "v4l2";
break;
}
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 8db9ac9c1..75517134a 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -301,9 +301,12 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
- if (!m2m_ctx->out_q_ctx.q.streaming
- || !m2m_ctx->cap_q_ctx.q.streaming) {
- dprintk("Streaming needs to be on for both queues\n");
+ if (!m2m_ctx->out_q_ctx.q.streaming ||
+ (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) {
+ if (!m2m_ctx->ignore_cap_streaming)
+ dprintk("Streaming needs to be on for both queues\n");
+ else
+ dprintk("Streaming needs to be on for the OUTPUT queue\n");
return;
}
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index be86b906c..4c6198c48 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -177,7 +177,7 @@ static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
{
if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- if (!v4l2_subdev_state_get_stream_format(state, pad, stream))
+ if (!v4l2_subdev_state_get_format(state, pad, stream))
return -EINVAL;
return 0;
#else
@@ -245,29 +245,6 @@ static int call_enum_frame_size(struct v4l2_subdev *sd,
sd->ops->pad->enum_frame_size(sd, state, fse);
}
-static inline int check_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
-{
- if (!fi)
- return -EINVAL;
-
- return check_pad(sd, fi->pad);
-}
-
-static int call_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
-{
- return check_frame_interval(sd, fi) ? :
- sd->ops->video->g_frame_interval(sd, fi);
-}
-
-static int call_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
-{
- return check_frame_interval(sd, fi) ? :
- sd->ops->video->s_frame_interval(sd, fi);
-}
-
static int call_enum_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_interval_enum *fie)
@@ -307,6 +284,33 @@ static int call_set_selection(struct v4l2_subdev *sd,
sd->ops->pad->set_selection(sd, state, sel);
}
+static inline int check_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ if (!fi)
+ return -EINVAL;
+
+ return check_which(fi->which) ? : check_pad(sd, fi->pad) ? :
+ check_state(sd, state, fi->which, fi->pad, fi->stream);
+}
+
+static int call_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ return check_frame_interval(sd, state, fi) ? :
+ sd->ops->pad->get_frame_interval(sd, state, fi);
+}
+
+static int call_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ return check_frame_interval(sd, state, fi) ? :
+ sd->ops->pad->set_frame_interval(sd, state, fi);
+}
+
static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_frame_desc *fd)
{
@@ -479,6 +483,8 @@ static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
.enum_frame_interval = call_enum_frame_interval_state,
.get_selection = call_get_selection_state,
.set_selection = call_set_selection_state,
+ .get_frame_interval = call_get_frame_interval,
+ .set_frame_interval = call_set_frame_interval,
.get_edid = call_get_edid,
.set_edid = call_set_edid,
.dv_timings_cap = call_dv_timings_cap,
@@ -488,8 +494,6 @@ static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
};
static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
- .g_frame_interval = call_g_frame_interval,
- .s_frame_interval = call_s_frame_interval,
.s_stream = call_s_stream,
};
@@ -531,6 +535,17 @@ subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
case VIDIOC_SUBDEV_S_SELECTION:
which = ((struct v4l2_subdev_selection *)arg)->which;
break;
+ case VIDIOC_SUBDEV_G_FRAME_INTERVAL:
+ case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (!(subdev_fh->client_caps &
+ V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH))
+ fi->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ which = fi->which;
+ break;
+ }
case VIDIOC_SUBDEV_G_ROUTING:
case VIDIOC_SUBDEV_S_ROUTING:
which = ((struct v4l2_subdev_routing *)arg)->which;
@@ -781,20 +796,20 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
fi->stream = 0;
memset(fi->reserved, 0, sizeof(fi->reserved));
- return v4l2_subdev_call(sd, video, g_frame_interval, arg);
+ return v4l2_subdev_call(sd, pad, get_frame_interval, state, fi);
}
case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
struct v4l2_subdev_frame_interval *fi = arg;
- if (ro_subdev)
- return -EPERM;
-
if (!client_supports_streams)
fi->stream = 0;
+ if (fi->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
memset(fi->reserved, 0, sizeof(fi->reserved));
- return v4l2_subdev_call(sd, video, s_frame_interval, arg);
+ return v4l2_subdev_call(sd, pad, set_frame_interval, state, fi);
}
case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
@@ -989,7 +1004,8 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
/* Filter out unsupported capabilities */
- client_cap->capabilities &= V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+ client_cap->capabilities &= (V4L2_SUBDEV_CLIENT_CAP_STREAMS |
+ V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH);
subdev_fh->client_caps = client_cap->capabilities;
@@ -1448,6 +1464,8 @@ __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
else
state->lock = &state->_lock;
+ state->sd = sd;
+
/* Drivers that support streams do not need the legacy pad config */
if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
state->pads = kvcalloc(sd->entity.num_pads,
@@ -1458,16 +1476,18 @@ __v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
}
}
- /*
- * There can be no race at this point, but we lock the state anyway to
- * satisfy lockdep checks.
- */
- v4l2_subdev_lock_state(state);
- ret = v4l2_subdev_call(sd, pad, init_cfg, state);
- v4l2_subdev_unlock_state(state);
+ if (sd->internal_ops && sd->internal_ops->init_state) {
+ /*
+ * There can be no race at this point, but we lock the state
+ * anyway to satisfy lockdep checks.
+ */
+ v4l2_subdev_lock_state(state);
+ ret = sd->internal_ops->init_state(sd, state);
+ v4l2_subdev_unlock_state(state);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- goto err;
+ if (ret)
+ goto err;
+ }
return state;
@@ -1517,7 +1537,8 @@ void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
__v4l2_subdev_state_free(sd->active_state);
sd->active_state = NULL;
- if (list_empty(&sd->async_subdev_endpoint_list))
+ /* Uninitialised sub-device, bail out here. */
+ if (!sd->async_subdev_endpoint_list.next)
return;
list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
@@ -1529,6 +1550,144 @@ void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
}
EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
+struct v4l2_mbus_framefmt *
+__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(!state))
+ return NULL;
+
+ if (state->pads) {
+ if (stream)
+ return NULL;
+
+ if (pad >= state->sd->entity.num_pads)
+ return NULL;
+
+ return &state->pads[pad].format;
+ }
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_format);
+
+struct v4l2_rect *
+__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
+ u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(!state))
+ return NULL;
+
+ if (state->pads) {
+ if (stream)
+ return NULL;
+
+ if (pad >= state->sd->entity.num_pads)
+ return NULL;
+
+ return &state->pads[pad].crop;
+ }
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].crop;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_crop);
+
+struct v4l2_rect *
+__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(!state))
+ return NULL;
+
+ if (state->pads) {
+ if (stream)
+ return NULL;
+
+ if (pad >= state->sd->entity.num_pads)
+ return NULL;
+
+ return &state->pads[pad].compose;
+ }
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].compose;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_compose);
+
+struct v4l2_fract *
+__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ if (WARN_ON(!state))
+ return NULL;
+
+ lockdep_assert_held(state->lock);
+
+ if (state->pads) {
+ if (stream)
+ return NULL;
+
+ if (pad >= state->sd->entity.num_pads)
+ return NULL;
+
+ return &state->pads[pad].interval;
+ }
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].interval;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_get_interval);
+
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static int
@@ -1585,14 +1744,7 @@ int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
{
struct v4l2_mbus_framefmt *fmt;
- if (sd->flags & V4L2_SUBDEV_FL_STREAMS)
- fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
- format->stream);
- else if (format->pad < sd->entity.num_pads && format->stream == 0)
- fmt = v4l2_subdev_get_pad_format(sd, state, format->pad);
- else
- fmt = NULL;
-
+ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream);
if (!fmt)
return -EINVAL;
@@ -1602,6 +1754,22 @@ int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
}
EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
+int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct v4l2_fract *interval;
+
+ interval = v4l2_subdev_state_get_interval(state, fi->pad, fi->stream);
+ if (!interval)
+ return -EINVAL;
+
+ fi->interval = *interval;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_frame_interval);
+
int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
const struct v4l2_subdev_krouting *routing)
@@ -1682,69 +1850,6 @@ int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
}
EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
-struct v4l2_mbus_framefmt *
-v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream)
-{
- struct v4l2_subdev_stream_configs *stream_configs;
- unsigned int i;
-
- lockdep_assert_held(state->lock);
-
- stream_configs = &state->stream_configs;
-
- for (i = 0; i < stream_configs->num_configs; ++i) {
- if (stream_configs->configs[i].pad == pad &&
- stream_configs->configs[i].stream == stream)
- return &stream_configs->configs[i].fmt;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_format);
-
-struct v4l2_rect *
-v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream)
-{
- struct v4l2_subdev_stream_configs *stream_configs;
- unsigned int i;
-
- lockdep_assert_held(state->lock);
-
- stream_configs = &state->stream_configs;
-
- for (i = 0; i < stream_configs->num_configs; ++i) {
- if (stream_configs->configs[i].pad == pad &&
- stream_configs->configs[i].stream == stream)
- return &stream_configs->configs[i].crop;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_crop);
-
-struct v4l2_rect *
-v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream)
-{
- struct v4l2_subdev_stream_configs *stream_configs;
- unsigned int i;
-
- lockdep_assert_held(state->lock);
-
- stream_configs = &state->stream_configs;
-
- for (i = 0; i < stream_configs->num_configs; ++i) {
- if (stream_configs->configs[i].pad == pad &&
- stream_configs->configs[i].stream == stream)
- return &stream_configs->configs[i].compose;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_compose);
-
int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
u32 pad, u32 stream, u32 *other_pad,
u32 *other_stream)
@@ -1789,8 +1894,7 @@ v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
if (ret)
return NULL;
- return v4l2_subdev_state_get_stream_format(state, other_pad,
- other_stream);
+ return v4l2_subdev_state_get_format(state, other_pad, other_stream);
}
EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index a7ab3d377..5028467b2 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -909,13 +909,11 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
return ret;
}
-static int brcmstb_dpfe_remove(struct platform_device *pdev)
+static void brcmstb_dpfe_remove(struct platform_device *pdev)
{
struct brcmstb_dpfe_priv *priv = dev_get_drvdata(&pdev->dev);
sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
-
- return 0;
}
static const struct of_device_id brcmstb_dpfe_of_match[] = {
@@ -936,7 +934,7 @@ static struct platform_driver brcmstb_dpfe_driver = {
.of_match_table = brcmstb_dpfe_of_match,
},
.probe = brcmstb_dpfe_probe,
- .remove = brcmstb_dpfe_remove,
+ .remove_new = brcmstb_dpfe_remove,
.resume = brcmstb_dpfe_resume,
};
diff --git a/drivers/memory/brcmstb_memc.c b/drivers/memory/brcmstb_memc.c
index a6ea51996..ea9213f71 100644
--- a/drivers/memory/brcmstb_memc.c
+++ b/drivers/memory/brcmstb_memc.c
@@ -152,13 +152,11 @@ static int brcmstb_memc_probe(struct platform_device *pdev)
return 0;
}
-static int brcmstb_memc_remove(struct platform_device *pdev)
+static void brcmstb_memc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
sysfs_remove_group(&dev->kobj, &dev_attr_group);
-
- return 0;
}
enum brcmstb_memc_hwtype {
@@ -284,7 +282,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_memc_pm_ops, brcmstb_memc_suspend,
static struct platform_driver brcmstb_memc_driver = {
.probe = brcmstb_memc_probe,
- .remove = brcmstb_memc_remove,
+ .remove_new = brcmstb_memc_remove,
.driver = {
.name = "brcmstb_memc",
.of_match_table = brcmstb_memc_of_match,
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index f30564320..434982545 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -1159,13 +1159,11 @@ error:
return -ENODEV;
}
-static int __exit emif_remove(struct platform_device *pdev)
+static void __exit emif_remove(struct platform_device *pdev)
{
struct emif_data *emif = platform_get_drvdata(pdev);
emif_debugfs_exit(emif);
-
- return 0;
}
static void emif_shutdown(struct platform_device *pdev)
@@ -1185,7 +1183,7 @@ MODULE_DEVICE_TABLE(of, emif_of_match);
#endif
static struct platform_driver emif_driver = {
- .remove = __exit_p(emif_remove),
+ .remove_new = __exit_p(emif_remove),
.shutdown = emif_shutdown,
.driver = {
.name = "emif",
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
index 8096c4f33..f47d05f7c 100644
--- a/drivers/memory/fsl-corenet-cf.c
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -223,7 +223,7 @@ static int ccf_probe(struct platform_device *pdev)
return 0;
}
-static int ccf_remove(struct platform_device *pdev)
+static void ccf_remove(struct platform_device *pdev)
{
struct ccf_private *ccf = dev_get_drvdata(&pdev->dev);
@@ -241,8 +241,6 @@ static int ccf_remove(struct platform_device *pdev)
iowrite32be(0, &ccf->err_regs->errinten);
break;
}
-
- return 0;
}
static struct platform_driver ccf_driver = {
@@ -251,7 +249,7 @@ static struct platform_driver ccf_driver = {
.of_match_table = ccf_matches,
},
.probe = ccf_probe,
- .remove = ccf_remove,
+ .remove_new = ccf_remove,
};
module_platform_driver(ccf_driver);
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 2509e5152..15e919c24 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -84,7 +84,7 @@ static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
return 0;
}
-static int fsl_ifc_ctrl_remove(struct platform_device *dev)
+static void fsl_ifc_ctrl_remove(struct platform_device *dev)
{
struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev);
@@ -98,8 +98,6 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev)
iounmap(ctrl->gregs);
dev_set_drvdata(&dev->dev, NULL);
-
- return 0;
}
/*
@@ -318,7 +316,7 @@ static struct platform_driver fsl_ifc_ctrl_driver = {
.of_match_table = fsl_ifc_match,
},
.probe = fsl_ifc_ctrl_probe,
- .remove = fsl_ifc_ctrl_remove,
+ .remove_new = fsl_ifc_ctrl_remove,
};
static int __init fsl_ifc_init(void)
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
index e5a93e7da..fb6db2ffe 100644
--- a/drivers/memory/jz4780-nemc.c
+++ b/drivers/memory/jz4780-nemc.c
@@ -384,12 +384,11 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
return 0;
}
-static int jz4780_nemc_remove(struct platform_device *pdev)
+static void jz4780_nemc_remove(struct platform_device *pdev)
{
struct jz4780_nemc *nemc = platform_get_drvdata(pdev);
clk_disable_unprepare(nemc->clk);
- return 0;
}
static const struct jz_soc_info jz4740_soc_info = {
@@ -408,7 +407,7 @@ static const struct of_device_id jz4780_nemc_dt_match[] = {
static struct platform_driver jz4780_nemc_driver = {
.probe = jz4780_nemc_probe,
- .remove = jz4780_nemc_remove,
+ .remove_new = jz4780_nemc_remove,
.driver = {
.name = "jz4780-nemc",
.of_match_table = of_match_ptr(jz4780_nemc_dt_match),
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 6523cb510..572c7fbdc 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -566,14 +566,13 @@ err_pm_disable:
return ret;
}
-static int mtk_smi_larb_remove(struct platform_device *pdev)
+static void mtk_smi_larb_remove(struct platform_device *pdev)
{
struct mtk_smi_larb *larb = platform_get_drvdata(pdev);
device_link_remove(&pdev->dev, larb->smi_common_dev);
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &mtk_smi_larb_component_ops);
- return 0;
}
static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
@@ -616,7 +615,7 @@ static const struct dev_pm_ops smi_larb_pm_ops = {
static struct platform_driver mtk_smi_larb_driver = {
.probe = mtk_smi_larb_probe,
- .remove = mtk_smi_larb_remove,
+ .remove_new = mtk_smi_larb_remove,
.driver = {
.name = "mtk-smi-larb",
.of_match_table = mtk_smi_larb_of_ids,
@@ -795,14 +794,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
return 0;
}
-static int mtk_smi_common_remove(struct platform_device *pdev)
+static void mtk_smi_common_remove(struct platform_device *pdev)
{
struct mtk_smi *common = dev_get_drvdata(&pdev->dev);
if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
device_link_remove(&pdev->dev, common->smi_common_dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused mtk_smi_common_resume(struct device *dev)
@@ -842,7 +840,7 @@ static const struct dev_pm_ops smi_common_pm_ops = {
static struct platform_driver mtk_smi_common_driver = {
.probe = mtk_smi_common_probe,
- .remove = mtk_smi_common_remove,
+ .remove_new = mtk_smi_common_remove,
.driver = {
.name = "mtk-smi-common",
.of_match_table = mtk_smi_common_of_ids,
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index d78f73db3..80d038884 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2690,7 +2690,7 @@ gpio_init_failed:
return rc;
}
-static int gpmc_remove(struct platform_device *pdev)
+static void gpmc_remove(struct platform_device *pdev)
{
int i;
struct gpmc_device *gpmc = platform_get_drvdata(pdev);
@@ -2702,8 +2702,6 @@ static int gpmc_remove(struct platform_device *pdev)
gpmc_mem_exit();
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2747,7 +2745,7 @@ MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
static struct platform_driver gpmc_driver = {
.probe = gpmc_probe,
- .remove = gpmc_remove,
+ .remove_new = gpmc_remove,
.driver = {
.name = DEVICE_NAME,
.of_match_table = of_match_ptr(gpmc_dt_ids),
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 9695b2d3a..3167826b2 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -777,13 +777,11 @@ static int rpcif_probe(struct platform_device *pdev)
return 0;
}
-static int rpcif_remove(struct platform_device *pdev)
+static void rpcif_remove(struct platform_device *pdev)
{
struct rpcif_priv *rpc = platform_get_drvdata(pdev);
platform_device_unregister(rpc->vdev);
-
- return 0;
}
static const struct of_device_id rpcif_of_match[] = {
@@ -797,7 +795,7 @@ MODULE_DEVICE_TABLE(of, rpcif_of_match);
static struct platform_driver rpcif_driver = {
.probe = rpcif_probe,
- .remove = rpcif_remove,
+ .remove_new = rpcif_remove,
.driver = {
.name = "rpc-if",
.of_match_table = rpcif_of_match,
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 6d019dbd7..da7ecd921 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1558,7 +1558,7 @@ remove_clocks:
* clean the device's resources. It just calls explicitly disable function for
* the performance counters.
*/
-static int exynos5_dmc_remove(struct platform_device *pdev)
+static void exynos5_dmc_remove(struct platform_device *pdev)
{
struct exynos5_dmc *dmc = dev_get_drvdata(&pdev->dev);
@@ -1569,8 +1569,6 @@ static int exynos5_dmc_remove(struct platform_device *pdev)
clk_disable_unprepare(dmc->mout_bpll);
clk_disable_unprepare(dmc->fout_bpll);
-
- return 0;
}
static const struct of_device_id exynos5_dmc_of_match[] = {
@@ -1581,7 +1579,7 @@ MODULE_DEVICE_TABLE(of, exynos5_dmc_of_match);
static struct platform_driver exynos5_dmc_platdrv = {
.probe = exynos5_dmc_probe,
- .remove = exynos5_dmc_remove,
+ .remove_new = exynos5_dmc_remove,
.driver = {
.name = "exynos5-dmc",
.of_match_table = exynos5_dmc_of_match,
diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
index 9015e8277..47d0ea5f1 100644
--- a/drivers/memory/stm32-fmc2-ebi.c
+++ b/drivers/memory/stm32-fmc2-ebi.c
@@ -1146,7 +1146,7 @@ err_release:
return ret;
}
-static int stm32_fmc2_ebi_remove(struct platform_device *pdev)
+static void stm32_fmc2_ebi_remove(struct platform_device *pdev)
{
struct stm32_fmc2_ebi *ebi = platform_get_drvdata(pdev);
@@ -1154,8 +1154,6 @@ static int stm32_fmc2_ebi_remove(struct platform_device *pdev)
stm32_fmc2_ebi_disable_banks(ebi);
stm32_fmc2_ebi_disable(ebi);
clk_disable_unprepare(ebi->clk);
-
- return 0;
}
static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev)
@@ -1197,7 +1195,7 @@ MODULE_DEVICE_TABLE(of, stm32_fmc2_ebi_match);
static struct platform_driver stm32_fmc2_ebi_driver = {
.probe = stm32_fmc2_ebi_probe,
- .remove = stm32_fmc2_ebi_remove,
+ .remove_new = stm32_fmc2_ebi_remove,
.driver = {
.name = "stm32_fmc2_ebi",
.of_match_table = stm32_fmc2_ebi_match,
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index 4007f4e16..fcd4aea48 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -378,7 +378,7 @@ put_bpmp:
return err;
}
-static int tegra186_emc_remove(struct platform_device *pdev)
+static void tegra186_emc_remove(struct platform_device *pdev)
{
struct tegra_mc *mc = dev_get_drvdata(pdev->dev.parent);
struct tegra186_emc *emc = platform_get_drvdata(pdev);
@@ -387,8 +387,6 @@ static int tegra186_emc_remove(struct platform_device *pdev)
mc->bpmp = NULL;
tegra_bpmp_put(emc->bpmp);
-
- return 0;
}
static const struct of_device_id tegra186_emc_of_match[] = {
@@ -413,7 +411,7 @@ static struct platform_driver tegra186_emc_driver = {
.sync_state = icc_sync_state,
},
.probe = tegra186_emc_probe,
- .remove = tegra186_emc_remove,
+ .remove_new = tegra186_emc_remove,
};
module_platform_driver(tegra186_emc_driver);
diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c
index 533f85a4b..1b3183951 100644
--- a/drivers/memory/tegra/tegra186.c
+++ b/drivers/memory/tegra/tegra186.c
@@ -75,6 +75,9 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
{
u32 value, old;
+ if (client->regs.sid.security == 0 && client->regs.sid.override == 0)
+ return;
+
value = readl(mc->regs + client->regs.sid.security);
if ((value & MC_SID_STREAMID_SECURITY_OVERRIDE) == 0) {
/*
@@ -111,9 +114,12 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
{
#if IS_ENABLED(CONFIG_IOMMU_API)
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct of_phandle_args args;
unsigned int i, index = 0;
+ u32 sid;
+
+ if (!tegra_dev_iommu_get_stream_id(dev, &sid))
+ return 0;
while (!of_parse_phandle_with_args(dev->of_node, "interconnects", "#interconnect-cells",
index, &args)) {
@@ -121,11 +127,10 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
for (i = 0; i < mc->soc->num_clients; i++) {
const struct tegra_mc_client *client = &mc->soc->clients[i];
- if (client->id == args.args[0]) {
- u32 sid = fwspec->ids[0] & MC_SID_STREAMID_OVERRIDE_MASK;
-
- tegra186_mc_client_sid_override(mc, client, sid);
- }
+ if (client->id == args.args[0])
+ tegra186_mc_client_sid_override(
+ mc, client,
+ sid & MC_SID_STREAMID_OVERRIDE_MASK);
}
}
@@ -136,9 +141,25 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
return 0;
}
+static int tegra186_mc_resume(struct tegra_mc *mc)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+ unsigned int i;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ const struct tegra_mc_client *client = &mc->soc->clients[i];
+
+ tegra186_mc_client_sid_override(mc, client, client->sid);
+ }
+#endif
+
+ return 0;
+}
+
const struct tegra_mc_ops tegra186_mc_ops = {
.probe = tegra186_mc_probe,
.remove = tegra186_mc_remove,
+ .resume = tegra186_mc_resume,
.probe_device = tegra186_mc_probe_device,
.handle_irq = tegra30_mc_handle_irq,
};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index 3300bde47..78ca1d6c0 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -1985,15 +1985,13 @@ release:
return err;
}
-static int tegra210_emc_remove(struct platform_device *pdev)
+static void tegra210_emc_remove(struct platform_device *pdev)
{
struct tegra210_emc *emc = platform_get_drvdata(pdev);
debugfs_remove_recursive(emc->debugfs.root);
tegra210_clk_emc_detach(emc->clk);
of_reserved_mem_device_release(emc->dev);
-
- return 0;
}
static int __maybe_unused tegra210_emc_suspend(struct device *dev)
@@ -2053,7 +2051,7 @@ static struct platform_driver tegra210_emc_driver = {
.pm = &tegra210_emc_pm_ops,
},
.probe = tegra210_emc_probe,
- .remove = tegra210_emc_remove,
+ .remove_new = tegra210_emc_remove,
};
module_platform_driver(tegra210_emc_driver);
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index f81e7df87..e192db9e0 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -427,17 +427,16 @@ error:
return ret;
}
-static int aemif_remove(struct platform_device *pdev)
+static void aemif_remove(struct platform_device *pdev)
{
struct aemif_device *aemif = platform_get_drvdata(pdev);
clk_disable_unprepare(aemif->clk);
- return 0;
}
static struct platform_driver aemif_driver = {
.probe = aemif_probe,
- .remove = aemif_remove,
+ .remove_new = aemif_remove,
.driver = {
.name = "ti-aemif",
.of_match_table = of_match_ptr(aemif_of_match),
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index cef0d3beb..592f70e9c 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -315,15 +315,13 @@ fail_free_sram:
return ret;
}
-static int ti_emif_remove(struct platform_device *pdev)
+static void ti_emif_remove(struct platform_device *pdev)
{
struct ti_emif_data *emif_data = emif_instance;
emif_instance = NULL;
ti_emif_free_sram(emif_data);
-
- return 0;
}
static const struct dev_pm_ops ti_emif_pm_ops = {
@@ -332,7 +330,7 @@ static const struct dev_pm_ops ti_emif_pm_ops = {
static struct platform_driver ti_emif_driver = {
.probe = ti_emif_probe,
- .remove = ti_emif_remove,
+ .remove_new = ti_emif_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ti_emif_of_match,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f8fdf8223..4b023ee22 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2257,7 +2257,7 @@ config MFD_VEXPRESS_SYSREG
config RAVE_SP_CORE
tristate "RAVE SP MCU core driver"
depends on SERIAL_DEV_BUS
- select CRC_CCITT
+ select CRC_ITU_T
help
Select this to get support for the Supervisory Processor
device found on several devices in RAVE line of hardware.
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index eeeb62415..8f3ebe651 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -30,7 +30,7 @@ static void ab8500_power_off(void)
{
sigset_t old;
sigset_t all;
- static const char * const pss[] = {"ab8500_ac", "pm2301", "ab8500_usb"};
+ static const char * const pss[] = {"ab8500_ac", "ab8500_usb"};
int i;
bool charger_present = false;
union power_supply_propval val;
@@ -140,14 +140,12 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev)
return 0;
}
-static int ab8500_sysctrl_remove(struct platform_device *pdev)
+static void ab8500_sysctrl_remove(struct platform_device *pdev)
{
sysctrl_dev = NULL;
if (pm_power_off == ab8500_power_off)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id ab8500_sysctrl_match[] = {
@@ -161,7 +159,7 @@ static struct platform_driver ab8500_sysctrl_driver = {
.of_match_table = ab8500_sysctrl_match,
},
.probe = ab8500_sysctrl_probe,
- .remove = ab8500_sysctrl_remove,
+ .remove_new = ab8500_sysctrl_remove,
};
static int __init ab8500_sysctrl_init(void)
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 79d393b60..603b1cd52 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -288,13 +288,12 @@ failed:
return retval;
}
-static int ec_device_remove(struct platform_device *pdev)
+static void ec_device_remove(struct platform_device *pdev)
{
struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev);
mfd_remove_devices(ec->dev);
device_unregister(&ec->class_dev);
- return 0;
}
static const struct platform_device_id cros_ec_id[] = {
@@ -309,7 +308,7 @@ static struct platform_driver cros_ec_dev_driver = {
},
.id_table = cros_ec_id,
.probe = ec_device_probe,
- .remove = ec_device_remove,
+ .remove_new = ec_device_remove,
};
static int __init cros_ec_dev_init(void)
diff --git a/drivers/mfd/cs42l43-sdw.c b/drivers/mfd/cs42l43-sdw.c
index 4be4df9dd..1d85bbf8c 100644
--- a/drivers/mfd/cs42l43-sdw.c
+++ b/drivers/mfd/cs42l43-sdw.c
@@ -6,11 +6,11 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mfd/cs42l43-regs.h>
#include <linux/module.h>
-#include <linux/device.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw_type.h>
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 45da007d3..73a221079 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -588,16 +588,8 @@ static struct regmap_config da9062_regmap_config = {
.volatile_table = &da9062_aa_volatile_table,
};
-static const struct of_device_id da9062_dt_ids[] = {
- { .compatible = "dlg,da9061", .data = (void *)COMPAT_TYPE_DA9061, },
- { .compatible = "dlg,da9062", .data = (void *)COMPAT_TYPE_DA9062, },
- { }
-};
-MODULE_DEVICE_TABLE(of, da9062_dt_ids);
-
static int da9062_i2c_probe(struct i2c_client *i2c)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
struct da9062 *chip;
unsigned int irq_base = 0;
const struct mfd_cell *cell;
@@ -611,10 +603,7 @@ static int da9062_i2c_probe(struct i2c_client *i2c)
if (!chip)
return -ENOMEM;
- if (i2c->dev.of_node)
- chip->chip_type = (uintptr_t)of_device_get_match_data(&i2c->dev);
- else
- chip->chip_type = id->driver_data;
+ chip->chip_type = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, chip);
chip->dev = &i2c->dev;
@@ -714,10 +703,17 @@ static void da9062_i2c_remove(struct i2c_client *i2c)
regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
}
+static const struct of_device_id da9062_dt_ids[] = {
+ { .compatible = "dlg,da9061", .data = (void *)COMPAT_TYPE_DA9061 },
+ { .compatible = "dlg,da9062", .data = (void *)COMPAT_TYPE_DA9062 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da9062_dt_ids);
+
static const struct i2c_device_id da9062_i2c_id[] = {
{ "da9061", COMPAT_TYPE_DA9061 },
{ "da9062", COMPAT_TYPE_DA9062 },
- { },
+ { }
};
MODULE_DEVICE_TABLE(i2c, da9062_i2c_id);
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index 1506d8d35..e58990c85 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -137,7 +137,7 @@ static int exynos_lpass_probe(struct platform_device *pdev)
return devm_of_platform_populate(dev);
}
-static int exynos_lpass_remove(struct platform_device *pdev)
+static void exynos_lpass_remove(struct platform_device *pdev)
{
struct exynos_lpass *lpass = platform_get_drvdata(pdev);
@@ -146,8 +146,6 @@ static int exynos_lpass_remove(struct platform_device *pdev)
if (!pm_runtime_status_suspended(&pdev->dev))
exynos_lpass_disable(lpass);
regmap_exit(lpass->top);
-
- return 0;
}
static int __maybe_unused exynos_lpass_suspend(struct device *dev)
@@ -187,7 +185,7 @@ static struct platform_driver exynos_lpass_driver = {
.of_match_table = exynos_lpass_of_match,
},
.probe = exynos_lpass_probe,
- .remove = exynos_lpass_remove,
+ .remove_new = exynos_lpass_remove,
};
module_platform_driver(exynos_lpass_driver);
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 089c2ce61..74f38bf37 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -194,11 +194,9 @@ err_irq:
return ret;
}
-static int mx25_tsadc_remove(struct platform_device *pdev)
+static void mx25_tsadc_remove(struct platform_device *pdev)
{
mx25_tsadc_unset_irq(pdev);
-
- return 0;
}
static const struct of_device_id mx25_tsadc_ids[] = {
@@ -213,7 +211,7 @@ static struct platform_driver mx25_tsadc_driver = {
.of_match_table = mx25_tsadc_ids,
},
.probe = mx25_tsadc_probe,
- .remove = mx25_tsadc_remove,
+ .remove_new = mx25_tsadc_remove,
};
module_platform_driver(mx25_tsadc_driver);
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 8feae8d8f..042109304 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -144,13 +144,12 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
return 0;
}
-static int hi655x_pmic_remove(struct platform_device *pdev)
+static void hi655x_pmic_remove(struct platform_device *pdev)
{
struct hi655x_pmic *pmic = platform_get_drvdata(pdev);
regmap_del_irq_chip(gpiod_to_irq(pmic->gpio), pmic->irq_data);
mfd_remove_devices(&pdev->dev);
- return 0;
}
static const struct of_device_id hi655x_pmic_match[] = {
@@ -165,7 +164,7 @@ static struct platform_driver hi655x_pmic_driver = {
.of_match_table = hi655x_pmic_match,
},
.probe = hi655x_pmic_probe,
- .remove = hi655x_pmic_remove,
+ .remove_new = hi655x_pmic_remove,
};
module_platform_driver(hi655x_pmic_driver);
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 212818aef..2a83f8678 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -8,15 +8,20 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/gfp_types.h>
#include <linux/ioport.h>
-#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+
#include <linux/pxa2xx_ssp.h>
+#include <asm/errno.h>
+
#include "intel-lpss.h"
static const struct property_entry spt_spi_properties[] = {
@@ -169,23 +174,20 @@ MODULE_DEVICE_TABLE(acpi, intel_lpss_acpi_ids);
static int intel_lpss_acpi_probe(struct platform_device *pdev)
{
+ const struct intel_lpss_platform_info *data;
struct intel_lpss_platform_info *info;
- const struct acpi_device_id *id;
int ret;
- id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev);
- if (!id)
+ data = device_get_match_data(&pdev->dev);
+ if (!data)
return -ENODEV;
- info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
- GFP_KERNEL);
+ info = devm_kmemdup(&pdev->dev, data, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ /* No need to check mem and irq here as intel_lpss_probe() does it for us */
info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!info->mem)
- return -ENODEV;
-
info->irq = platform_get_irq(pdev, 0);
ret = intel_lpss_probe(&pdev->dev, info);
@@ -198,23 +200,19 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev)
return 0;
}
-static int intel_lpss_acpi_remove(struct platform_device *pdev)
+static void intel_lpss_acpi_remove(struct platform_device *pdev)
{
intel_lpss_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
-static INTEL_LPSS_PM_OPS(intel_lpss_acpi_pm_ops);
-
static struct platform_driver intel_lpss_acpi_driver = {
.probe = intel_lpss_acpi_probe,
- .remove = intel_lpss_acpi_remove,
+ .remove_new = intel_lpss_acpi_remove,
.driver = {
.name = "intel-lpss",
.acpi_match_table = intel_lpss_acpi_ids,
- .pm = &intel_lpss_acpi_pm_ops,
+ .pm = pm_ptr(&intel_lpss_pm_ops),
},
};
@@ -224,3 +222,4 @@ MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel LPSS ACPI driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(INTEL_LPSS);
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index cab11ed23..8c00e0c69 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -8,14 +8,19 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/ioport.h>
-#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/gfp_types.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+
#include <linux/pxa2xx_ssp.h>
+#include <asm/errno.h>
+
#include "intel-lpss.h"
static const struct pci_device_id quirk_ids[] = {
@@ -40,6 +45,7 @@ static const struct pci_device_id quirk_ids[] = {
static int intel_lpss_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ const struct intel_lpss_platform_info *data = (void *)id->driver_data;
const struct pci_device_id *quirk_pci_info;
struct intel_lpss_platform_info *info;
int ret;
@@ -48,13 +54,17 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
- GFP_KERNEL);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (ret < 0)
+ return ret;
+
+ info = devm_kmemdup(&pdev->dev, data, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- info->mem = &pdev->resource[0];
- info->irq = pdev->irq;
+ /* No need to check mem and irq here as intel_lpss_probe() does it for us */
+ info->mem = pci_resource_n(pdev, 0);
+ info->irq = pci_irq_vector(pdev, 0);
quirk_pci_info = pci_match_id(quirk_ids, pdev);
if (quirk_pci_info)
@@ -84,8 +94,6 @@ static void intel_lpss_pci_remove(struct pci_dev *pdev)
intel_lpss_remove(&pdev->dev);
}
-static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
-
static const struct property_entry spt_spi_properties[] = {
PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_SPT_SSP),
{ }
@@ -596,7 +604,7 @@ static struct pci_driver intel_lpss_pci_driver = {
.probe = intel_lpss_pci_probe,
.remove = intel_lpss_pci_remove,
.driver = {
- .pm = &intel_lpss_pci_pm_ops,
+ .pm = pm_ptr(&intel_lpss_pm_ops),
},
};
@@ -606,3 +614,4 @@ MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel LPSS PCI driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(INTEL_LPSS);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index d422e88ba..2a9018112 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -10,26 +10,34 @@
* Jarkko Nikula <jarkko.nikula@linux.intel.com>
*/
-#include <linux/clk.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/clkdev.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/property.h>
-#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
+
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dma/idma64.h>
#include "intel-lpss.h"
+struct dentry;
+
#define LPSS_DEV_OFFSET 0x000
#define LPSS_DEV_SIZE 0x200
#define LPSS_PRIV_OFFSET 0x200
@@ -385,9 +393,12 @@ int intel_lpss_probe(struct device *dev,
struct intel_lpss *lpss;
int ret;
- if (!info || !info->mem || info->irq <= 0)
+ if (!info || !info->mem)
return -EINVAL;
+ if (info->irq < 0)
+ return info->irq;
+
lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
if (!lpss)
return -ENOMEM;
@@ -412,7 +423,7 @@ int intel_lpss_probe(struct device *dev,
intel_lpss_init_dev(lpss);
- lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
+ lpss->devid = ida_alloc(&intel_lpss_devid_ida, GFP_KERNEL);
if (lpss->devid < 0)
return lpss->devid;
@@ -449,11 +460,11 @@ err_remove_ltr:
intel_lpss_unregister_clock(lpss);
err_clk_register:
- ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+ ida_free(&intel_lpss_devid_ida, lpss->devid);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_lpss_probe);
+EXPORT_SYMBOL_NS_GPL(intel_lpss_probe, INTEL_LPSS);
void intel_lpss_remove(struct device *dev)
{
@@ -463,11 +474,10 @@ void intel_lpss_remove(struct device *dev)
intel_lpss_debugfs_remove(lpss);
intel_lpss_ltr_hide(lpss);
intel_lpss_unregister_clock(lpss);
- ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+ ida_free(&intel_lpss_devid_ida, lpss->devid);
}
-EXPORT_SYMBOL_GPL(intel_lpss_remove);
+EXPORT_SYMBOL_NS_GPL(intel_lpss_remove, INTEL_LPSS);
-#ifdef CONFIG_PM
static int resume_lpss_device(struct device *dev, void *data)
{
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
@@ -476,7 +486,7 @@ static int resume_lpss_device(struct device *dev, void *data)
return 0;
}
-int intel_lpss_prepare(struct device *dev)
+static int intel_lpss_prepare(struct device *dev)
{
/*
* Resume both child devices before entering system sleep. This
@@ -485,9 +495,8 @@ int intel_lpss_prepare(struct device *dev)
device_for_each_child_reverse(dev, NULL, resume_lpss_device);
return 0;
}
-EXPORT_SYMBOL_GPL(intel_lpss_prepare);
-int intel_lpss_suspend(struct device *dev)
+static int intel_lpss_suspend(struct device *dev)
{
struct intel_lpss *lpss = dev_get_drvdata(dev);
unsigned int i;
@@ -506,9 +515,8 @@ int intel_lpss_suspend(struct device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(intel_lpss_suspend);
-int intel_lpss_resume(struct device *dev)
+static int intel_lpss_resume(struct device *dev)
{
struct intel_lpss *lpss = dev_get_drvdata(dev);
unsigned int i;
@@ -521,8 +529,12 @@ int intel_lpss_resume(struct device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(intel_lpss_resume);
-#endif
+
+EXPORT_NS_GPL_DEV_PM_OPS(intel_lpss_pm_ops, INTEL_LPSS) = {
+ .prepare = pm_sleep_ptr(&intel_lpss_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume)
+ RUNTIME_PM_OPS(intel_lpss_suspend, intel_lpss_resume, NULL)
+};
static int __init intel_lpss_init(void)
{
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index f50d11d60..6f8f668f4 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -42,32 +42,6 @@ int intel_lpss_probe(struct device *dev,
const struct intel_lpss_platform_info *info);
void intel_lpss_remove(struct device *dev);
-#ifdef CONFIG_PM
-int intel_lpss_prepare(struct device *dev);
-int intel_lpss_suspend(struct device *dev);
-int intel_lpss_resume(struct device *dev);
-
-#ifdef CONFIG_PM_SLEEP
-#define INTEL_LPSS_SLEEP_PM_OPS \
- .prepare = intel_lpss_prepare, \
- SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume)
-#else
-#define INTEL_LPSS_SLEEP_PM_OPS
-#endif
-
-#define INTEL_LPSS_RUNTIME_PM_OPS \
- .runtime_suspend = intel_lpss_suspend, \
- .runtime_resume = intel_lpss_resume,
-
-#else /* !CONFIG_PM */
-#define INTEL_LPSS_SLEEP_PM_OPS
-#define INTEL_LPSS_RUNTIME_PM_OPS
-#endif /* CONFIG_PM */
-
-#define INTEL_LPSS_PM_OPS(name) \
-const struct dev_pm_ops name = { \
- INTEL_LPSS_SLEEP_PM_OPS \
- INTEL_LPSS_RUNTIME_PM_OPS \
-}
+extern const struct dev_pm_ops intel_lpss_pm_ops;
#endif /* __MFD_INTEL_LPSS_H */
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 33c6cfe9f..67af36a38 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -535,7 +535,7 @@ static int kempld_probe(struct platform_device *pdev)
return kempld_detect_device(pld);
}
-static int kempld_remove(struct platform_device *pdev)
+static void kempld_remove(struct platform_device *pdev)
{
struct kempld_device_data *pld = platform_get_drvdata(pdev);
const struct kempld_platform_data *pdata = dev_get_platdata(pld->dev);
@@ -544,8 +544,6 @@ static int kempld_remove(struct platform_device *pdev)
mfd_remove_devices(&pdev->dev);
pdata->release_hardware_mutex(pld);
-
- return 0;
}
#ifdef CONFIG_ACPI
@@ -563,7 +561,7 @@ static struct platform_driver kempld_driver = {
.acpi_match_table = ACPI_PTR(kempld_acpi_table),
},
.probe = kempld_probe,
- .remove = kempld_remove,
+ .remove_new = kempld_remove,
};
static const struct dmi_system_id kempld_dmi_table[] __initconst = {
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 1c9831b78..3883e472b 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -232,7 +232,7 @@ static int mcp_sa11x0_probe(struct platform_device *dev)
return ret;
}
-static int mcp_sa11x0_remove(struct platform_device *dev)
+static void mcp_sa11x0_remove(struct platform_device *dev)
{
struct mcp *mcp = platform_get_drvdata(dev);
struct mcp_sa11x0 *m = priv(mcp);
@@ -251,8 +251,6 @@ static int mcp_sa11x0_remove(struct platform_device *dev)
mcp_host_free(mcp);
release_mem_region(mem1->start, resource_size(mem1));
release_mem_region(mem0->start, resource_size(mem0));
-
- return 0;
}
static int mcp_sa11x0_suspend(struct device *dev)
@@ -288,7 +286,7 @@ static const struct dev_pm_ops mcp_sa11x0_pm_ops = {
static struct platform_driver mcp_sa11x0_driver = {
.probe = mcp_sa11x0_probe,
- .remove = mcp_sa11x0_remove,
+ .remove_new = mcp_sa11x0_remove,
.driver = {
.name = DRIVER_NAME,
.pm = pm_sleep_ptr(&mcp_sa11x0_pm_ops),
diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c
index ec1b35656..73893890b 100644
--- a/drivers/mfd/mxs-lradc.c
+++ b/drivers/mfd/mxs-lradc.c
@@ -230,13 +230,11 @@ err_clk:
return ret;
}
-static int mxs_lradc_remove(struct platform_device *pdev)
+static void mxs_lradc_remove(struct platform_device *pdev)
{
struct mxs_lradc *lradc = platform_get_drvdata(pdev);
clk_disable_unprepare(lradc->clk);
-
- return 0;
}
static struct platform_driver mxs_lradc_driver = {
@@ -245,7 +243,7 @@ static struct platform_driver mxs_lradc_driver = {
.of_match_table = mxs_lradc_dt_ids,
},
.probe = mxs_lradc_probe,
- .remove = mxs_lradc_remove,
+ .remove_new = mxs_lradc_remove,
};
module_platform_driver(mxs_lradc_driver);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 78f1bb55d..ebc62033d 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -816,13 +816,12 @@ static int usbhs_omap_remove_child(struct device *dev, void *data)
*
* Reverses the effect of usbhs_omap_probe().
*/
-static int usbhs_omap_remove(struct platform_device *pdev)
+static void usbhs_omap_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
/* remove children */
device_for_each_child(&pdev->dev, NULL, usbhs_omap_remove_child);
- return 0;
}
static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
@@ -845,7 +844,7 @@ static struct platform_driver usbhs_omap_driver = {
.of_match_table = usbhs_omap_dt_ids,
},
.probe = usbhs_omap_probe,
- .remove = usbhs_omap_remove,
+ .remove_new = usbhs_omap_remove,
};
MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>");
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 906353735..b6303ddb0 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -270,7 +270,7 @@ static int usbtll_omap_probe(struct platform_device *pdev)
*
* Reverses the effect of usbtll_omap_probe().
*/
-static int usbtll_omap_remove(struct platform_device *pdev)
+static void usbtll_omap_remove(struct platform_device *pdev)
{
struct usbtll_omap *tll = platform_get_drvdata(pdev);
int i;
@@ -287,7 +287,6 @@ static int usbtll_omap_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
- return 0;
}
static const struct of_device_id usbtll_omap_dt_ids[] = {
@@ -303,7 +302,7 @@ static struct platform_driver usbtll_omap_driver = {
.of_match_table = usbtll_omap_dt_ids,
},
.probe = usbtll_omap_probe,
- .remove = usbtll_omap_remove,
+ .remove_new = usbtll_omap_remove,
};
int omap_tll_init(struct usbhs_omap_platform_data *pdata)
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index 191b1bc61..ab55906f9 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -218,7 +218,7 @@ static int pcf50633_adc_probe(struct platform_device *pdev)
return 0;
}
-static int pcf50633_adc_remove(struct platform_device *pdev)
+static void pcf50633_adc_remove(struct platform_device *pdev)
{
struct pcf50633_adc *adc = platform_get_drvdata(pdev);
int i, head;
@@ -236,8 +236,6 @@ static int pcf50633_adc_remove(struct platform_device *pdev)
kfree(adc->queue[i]);
mutex_unlock(&adc->queue_mutex);
-
- return 0;
}
static struct platform_driver pcf50633_adc_driver = {
@@ -245,7 +243,7 @@ static struct platform_driver pcf50633_adc_driver = {
.name = "pcf50633-adc",
},
.probe = pcf50633_adc_probe,
- .remove = pcf50633_adc_remove,
+ .remove_new = pcf50633_adc_remove,
};
module_platform_driver(pcf50633_adc_driver);
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index 07c531bd1..8b6285f68 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -585,19 +585,17 @@ static int pm8xxx_remove_child(struct device *dev, void *unused)
return 0;
}
-static int pm8xxx_remove(struct platform_device *pdev)
+static void pm8xxx_remove(struct platform_device *pdev)
{
struct pm_irq_chip *chip = platform_get_drvdata(pdev);
device_for_each_child(&pdev->dev, NULL, pm8xxx_remove_child);
irq_domain_remove(chip->irqdomain);
-
- return 0;
}
static struct platform_driver pm8xxx_driver = {
.probe = pm8xxx_probe,
- .remove = pm8xxx_remove,
+ .remove_new = pm8xxx_remove,
.driver = {
.name = "pm8xxx-core",
.of_match_table = pm8xxx_id_table,
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 4549fa9f7..eab5bf6cf 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -53,6 +53,7 @@ static const struct of_device_id pmic_spmi_id_table[] = {
{ .compatible = "qcom,pm8901", .data = N_USIDS(2) },
{ .compatible = "qcom,pm8909", .data = N_USIDS(2) },
{ .compatible = "qcom,pm8916", .data = N_USIDS(2) },
+ { .compatible = "qcom,pm8937", .data = N_USIDS(2) },
{ .compatible = "qcom,pm8941", .data = N_USIDS(2) },
{ .compatible = "qcom,pm8950", .data = N_USIDS(2) },
{ .compatible = "qcom,pm8994", .data = N_USIDS(2) },
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index da50eba10..6ff84b260 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -9,7 +9,7 @@
*/
#include <linux/atomic.h>
-#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -251,7 +251,7 @@ static void csum_8b2c(const u8 *buf, size_t size, u8 *crc)
static void csum_ccitt(const u8 *buf, size_t size, u8 *crc)
{
- const u16 calculated = crc_ccitt_false(0xffff, buf, size);
+ const u16 calculated = crc_itu_t(0xffff, buf, size);
/*
* While the rest of the wire protocol is little-endian,
@@ -471,17 +471,17 @@ static void rave_sp_receive_frame(struct rave_sp *sp,
rave_sp_receive_reply(sp, data, length);
}
-static int rave_sp_receive_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t size)
+static ssize_t rave_sp_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct device *dev = &serdev->dev;
struct rave_sp *sp = dev_get_drvdata(dev);
struct rave_sp_deframer *deframer = &sp->deframer;
- const unsigned char *src = buf;
- const unsigned char *end = buf + size;
+ const u8 *src = buf;
+ const u8 *end = buf + size;
while (src < end) {
- const unsigned char byte = *src++;
+ const u8 byte = *src++;
switch (deframer->state) {
case RAVE_SP_EXPECT_SOF:
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 28027982c..b3592982a 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1667,7 +1667,7 @@ static void sm501_pci_remove(struct pci_dev *dev)
pci_disable_device(dev);
}
-static int sm501_plat_remove(struct platform_device *dev)
+static void sm501_plat_remove(struct platform_device *dev)
{
struct sm501_devdata *sm = platform_get_drvdata(dev);
@@ -1675,8 +1675,6 @@ static int sm501_plat_remove(struct platform_device *dev)
iounmap(sm->regs);
release_mem_region(sm->io_res->start, 0x100);
-
- return 0;
}
static const struct pci_device_id sm501_pci_tbl[] = {
@@ -1707,7 +1705,7 @@ static struct platform_driver sm501_plat_driver = {
.of_match_table = of_sm501_match_tbl,
},
.probe = sm501_plat_probe,
- .remove = sm501_plat_remove,
+ .remove_new = sm501_plat_remove,
.suspend = pm_sleep_ptr(sm501_plat_suspend),
.resume = pm_sleep_ptr(sm501_plat_resume),
};
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index a656a1c18..9fd13d889 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -306,7 +306,7 @@ static int stm32_timers_probe(struct platform_device *pdev)
return ret;
}
-static int stm32_timers_remove(struct platform_device *pdev)
+static void stm32_timers_remove(struct platform_device *pdev)
{
struct stm32_timers *ddata = platform_get_drvdata(pdev);
@@ -316,8 +316,6 @@ static int stm32_timers_remove(struct platform_device *pdev)
*/
of_platform_depopulate(&pdev->dev);
stm32_timers_dma_remove(&pdev->dev, ddata);
-
- return 0;
}
static const struct of_device_id stm32_timers_of_match[] = {
@@ -328,7 +326,7 @@ MODULE_DEVICE_TABLE(of, stm32_timers_of_match);
static struct platform_driver stm32_timers_driver = {
.probe = stm32_timers_probe,
- .remove = stm32_timers_remove,
+ .remove_new = stm32_timers_remove,
.driver = {
.name = "stm32-timers",
.of_match_table = stm32_timers_of_match,
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index b88eb70c1..4bbd542d7 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -298,7 +298,7 @@ err_disable_clk:
return err;
}
-static int ti_tscadc_remove(struct platform_device *pdev)
+static void ti_tscadc_remove(struct platform_device *pdev)
{
struct ti_tscadc_dev *tscadc = platform_get_drvdata(pdev);
@@ -308,8 +308,6 @@ static int ti_tscadc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
mfd_remove_devices(tscadc->dev);
-
- return 0;
}
static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data)
@@ -381,7 +379,7 @@ static struct platform_driver ti_tscadc_driver = {
.of_match_table = ti_tscadc_dt_ids,
},
.probe = ti_tscadc_probe,
- .remove = ti_tscadc_remove,
+ .remove_new = ti_tscadc_remove,
};
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index 152179ee1..fdce81b33 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -36,6 +36,7 @@ static const struct regmap_config tps65086_regmap_config = {
.val_bits = 8,
.cache_type = REGCACHE_MAPLE,
.volatile_table = &tps65086_volatile_table,
+ .max_register = TPS65086_OC_STATUS,
};
static const struct regmap_irq tps65086_irqs[] = {
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 8f4210075..f206a9c50 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -140,15 +140,13 @@ static int tps65911_comparator_probe(struct platform_device *pdev)
return ret;
}
-static int tps65911_comparator_remove(struct platform_device *pdev)
+static void tps65911_comparator_remove(struct platform_device *pdev)
{
struct tps65910 *tps65910;
tps65910 = dev_get_drvdata(pdev->dev.parent);
device_remove_file(&pdev->dev, &dev_attr_comp2_threshold);
device_remove_file(&pdev->dev, &dev_attr_comp1_threshold);
-
- return 0;
}
static struct platform_driver tps65911_comparator_driver = {
@@ -156,7 +154,7 @@ static struct platform_driver tps65911_comparator_driver = {
.name = "tps65911-comparator",
},
.probe = tps65911_comparator_probe,
- .remove = tps65911_comparator_remove,
+ .remove_new = tps65911_comparator_remove,
};
static int __init tps65911_comparator_init(void)
diff --git a/drivers/mfd/tps6594-spi.c b/drivers/mfd/tps6594-spi.c
index f4b4f37f9..24b72847e 100644
--- a/drivers/mfd/tps6594-spi.c
+++ b/drivers/mfd/tps6594-spi.c
@@ -98,7 +98,7 @@ static int tps6594_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, tps);
tps->dev = dev;
- tps->reg = spi->chip_select;
+ tps->reg = spi_get_chipselect(spi, 0);
tps->irq = spi->irq;
tps->regmap = devm_regmap_init(dev, NULL, spi, &tps6594_spi_regmap_config);
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index 88002f894..d436ddf66 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -258,12 +258,10 @@ static int twl4030_audio_probe(struct platform_device *pdev)
return ret;
}
-static int twl4030_audio_remove(struct platform_device *pdev)
+static void twl4030_audio_remove(struct platform_device *pdev)
{
mfd_remove_devices(&pdev->dev);
twl4030_audio_dev = NULL;
-
- return 0;
}
static const struct of_device_id twl4030_audio_of_match[] = {
@@ -278,7 +276,7 @@ static struct platform_driver twl4030_audio_driver = {
.of_match_table = twl4030_audio_of_match,
},
.probe = twl4030_audio_probe,
- .remove = twl4030_audio_remove,
+ .remove_new = twl4030_audio_remove,
};
module_platform_driver(twl4030_audio_driver);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f37c4b838..4fb291f0b 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -562,6 +562,18 @@ config TPS6594_PFSM
This driver can also be built as a module. If so, the module
will be called tps6594-pfsm.
+config NSM
+ tristate "Nitro (Enclaves) Security Module support"
+ depends on VIRTIO
+ select HW_RANDOM
+ help
+ This driver provides support for the Nitro Security Module
+ in AWS EC2 Nitro based Enclaves. The driver exposes a /dev/nsm
+ device user space can use to communicate with the hypervisor.
+
+ To compile this driver as a module, choose M here.
+ The module will be called nsm.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f2a4d1ff6..ea6ea5bbb 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -67,3 +67,4 @@ obj-$(CONFIG_TMR_MANAGER) += xilinx_tmr_manager.o
obj-$(CONFIG_TMR_INJECT) += xilinx_tmr_inject.o
obj-$(CONFIG_TPS6594_ESM) += tps6594-esm.o
obj-$(CONFIG_TPS6594_PFSM) += tps6594-pfsm.o
+obj-$(CONFIG_NSM) += nsm.o
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
index 2bce835ca..59bab76ff 100644
--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -64,9 +64,9 @@ static void bcm_vk_tty_wq_handler(struct work_struct *work)
struct bcm_vk_tty *vktty;
int card_status;
int count;
- unsigned char c;
int i;
int wr;
+ u8 c;
card_status = vkread32(vk, BAR_0, BAR_CARD_STATUS);
if (BCM_VK_INTF_IS_DOWN(card_status))
@@ -192,7 +192,7 @@ static ssize_t bcm_vk_tty_write(struct tty_struct *tty, const u8 *buffer,
int index;
struct bcm_vk *vk;
struct bcm_vk_tty *vktty;
- int i;
+ size_t i;
index = tty->index;
vk = dev_get_drvdata(tty->dev);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index 895128475..1e1bca6b0 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o rts5264.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rts5264.c b/drivers/misc/cardreader/rts5264.c
new file mode 100644
index 000000000..8be4ed7d9
--- /dev/null
+++ b/drivers/misc/cardreader/rts5264.c
@@ -0,0 +1,886 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky Wu <ricky_wu@realtek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5264.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5264_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & 0x0F;
+}
+
+static void rts5264_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x88, 0x88, 0x88},
+ {0x77, 0x77, 0x77},
+ {0x99, 0x99, 0x99},
+ {0x66, 0x66, 0x66},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0x77, 0x77, 0x77},
+ {0xBB, 0xBB, 0xBB},
+ {0x65, 0x65, 0x65},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rts5264_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ if (!runtime) {
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, 0);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, 0);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_FW_CTL,
+ RTS5264_INFORM_RTD3_COLD, RTS5264_INFORM_RTD3_COLD);
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+ RTS5264_FORCE_PRSNT_LOW, RTS5264_FORCE_PRSNT_LOW);
+ }
+
+ rtsx_pci_write_register(pcr, RTS5264_REG_FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5264_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5264_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5264_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5264_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5264_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5264_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5264_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5264_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+ CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD);
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG1,
+ RTS5264_LDO1_TUNE_MASK, RTS5264_LDO1_33);
+ rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_LDO1_POWERON, RTS5264_LDO1_POWERON);
+ rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_LDO3318_POWERON, RTS5264_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5264_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5264_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ rtsx_pci_write_register(pcr, RTS5264_CARD_PWR_CTL,
+ RTS5264_PUPDC, RTS5264_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318);
+ rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG,
+ RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318_DFT);
+ rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG,
+ RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5264_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5264_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, DMACTL, DMA_RST, DMA_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5264_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5264_stop_cmd(pcr);
+ rts5264_switch_output_voltage(pcr, OUTPUT_3V3);
+}
+
+static int rts5264_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5264_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL,
+ RTS5264_LDO_POWERON_MASK, 0);
+
+ rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
+ CFG_SD_POW_AUTO_PD, 0);
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5264_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN,
+ RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN);
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN,
+ RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN);
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN,
+ RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+ RTS5264_POW_VDET, RTS5264_POW_VDET);
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ val = mask;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN;
+ val = mask;
+ rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, val);
+
+ mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN;
+ val = mask;
+ rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, val);
+}
+
+static void rts5264_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+ mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN;
+ rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, 0);
+
+ mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN;
+ rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, 0);
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_DET, RTS5264_POW_VDET, 0);
+}
+
+static void rts5264_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_LMT_THD_MASK,
+ RTS5264_LDO1_LMT_THD_2000);
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_THD_MASK, RTS5264_LDO2_OCP_THD_950);
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_LMT_THD_MASK,
+ RTS5264_LDO2_LMT_THD_2000);
+
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_THD_MASK, RTS5264_LDO3_OCP_THD_710);
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_LMT_THD_MASK,
+ RTS5264_LDO3_LMT_THD_1500);
+
+ rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+ RTS5264_TUNE_VROV_MASK, RTS5264_TUNE_VROV_1V6);
+
+ mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ } else {
+ rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0,
+ RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0,
+ RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0,
+ RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_DET,
+ RTS5264_POW_VDET, 0);
+ }
+}
+
+static int rts5264_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, RTS5264_OCP_VDD3_STS, val);
+}
+
+static int rts5264_get_ovpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, RTS5264_OVP_STS, val);
+}
+
+static void rts5264_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+ val = mask;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL,
+ SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR,
+ SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_CTL,
+ RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR,
+ RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR);
+
+ udelay(1000);
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL,
+ SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR, 0);
+ rtsx_pci_write_register(pcr, RTS5264_OVP_CTL,
+ RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR, 0);
+}
+
+static void rts5264_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ rts5264_get_ocpstat2(pcr, &pcr->ocp_stat2);
+ rts5264_get_ovpstat(pcr, &pcr->ovp_stat);
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER | SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (SD_VDD3_OC_NOW | SD_VDD3_OC_EVER)) ||
+ (pcr->ovp_stat & (RTS5264_OVP_NOW | RTS5264_OVP_EVER))) {
+ rts5264_clear_ocpstat(pcr);
+ rts5264_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ pcr->ocp_stat = 0;
+ pcr->ocp_stat2 = 0;
+ pcr->ovp_stat = 0;
+ }
+}
+
+static void rts5264_init_from_hw(struct rtsx_pcr *pcr)
+{
+ struct pci_dev *pdev = pcr->pci;
+ u32 lval1, lval2, i;
+ u16 setting_reg1, setting_reg2;
+ u8 valid, efuse_valid, tmp;
+
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR | REG_EFUSE_POWER_MASK,
+ REG_EFUSE_POR | REG_EFUSE_POWERON);
+ udelay(1);
+ rtsx_pci_write_register(pcr, RTS5264_EFUSE_ADDR,
+ RTS5264_EFUSE_ADDR_MASK, 0x00);
+ rtsx_pci_write_register(pcr, RTS5264_EFUSE_CTL,
+ RTS5264_EFUSE_ENABLE | RTS5264_EFUSE_MODE_MASK,
+ RTS5264_EFUSE_ENABLE);
+
+ /* Wait transfer end */
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ rtsx_pci_read_register(pcr, RTS5264_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS5264_EFUSE_READ_DATA, &tmp);
+ efuse_valid = ((tmp & 0x0C) >> 2);
+ pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
+
+ pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2);
+ /* 0x816 */
+ valid = (u8)((lval2 >> 16) & 0x03);
+
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR, 0);
+ pcr_dbg(pcr, "Disable efuse por!\n");
+
+ if (efuse_valid == 2 || efuse_valid == 3) {
+ if (valid == 3) {
+ /* Bypass efuse */
+ setting_reg1 = PCR_SETTING_REG1;
+ setting_reg2 = PCR_SETTING_REG2;
+ } else {
+ /* Use efuse data */
+ setting_reg1 = PCR_SETTING_REG4;
+ setting_reg2 = PCR_SETTING_REG5;
+ }
+ } else if (efuse_valid == 0) {
+ // default
+ setting_reg1 = PCR_SETTING_REG1;
+ setting_reg2 = PCR_SETTING_REG2;
+ } else {
+ return;
+ }
+
+ pci_read_config_dword(pdev, setting_reg2, &lval2);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2);
+
+ if (!rts5264_vendor_setting_valid(lval2)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->rtd3_en = rts5264_reg_to_rtd3(lval2);
+
+ if (rts5264_reg_check_reverse_socket(lval2))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+
+ pci_read_config_dword(pdev, setting_reg1, &lval1);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
+
+ pcr->aspm_en = rts5264_reg_to_aspm(lval1);
+ pcr->sd30_drive_sel_1v8 = rts5264_reg_to_sd30_drive_sel_1v8(lval1);
+ pcr->sd30_drive_sel_3v3 = rts5264_reg_to_sd30_drive_sel_3v3(lval1);
+
+ if (setting_reg1 == PCR_SETTING_REG1) {
+ /* store setting */
+ rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF));
+
+ pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1);
+ lval2 = lval2 & 0x00FFFFFF;
+ pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2);
+ }
+}
+
+static void rts5264_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ rtsx_pci_disable_oobs_polling(pcr);
+ else
+ rtsx_pci_enable_oobs_polling(pcr);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+
+ if (option->ltr_en) {
+ if (option->ltr_enabled)
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ }
+}
+
+static int rts5264_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+ rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN);
+
+ rts5264_init_from_cfg(pcr);
+ rts5264_init_from_hw(pcr);
+
+ /* power off efuse */
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2,
+ RTS5264_CHIP_RST_N_SEL, 0);
+ rtsx_pci_write_register(pcr, RTS5264_REG_LDO12_CFG,
+ RTS5264_LDO12_SR_MASK, RTS5264_LDO12_SR_0_0_MS);
+ rtsx_pci_write_register(pcr, CDGW, 0xFF, 0x01);
+ rtsx_pci_write_register(pcr, RTS5264_CKMUX_MBIAS_PWR,
+ RTS5264_POW_CKMUX, RTS5264_POW_CKMUX);
+ rtsx_pci_write_register(pcr, RTS5264_CMD_OE_START_EARLY,
+ RTS5264_CMD_OE_EARLY_EN | RTS5264_CMD_OE_EARLY_CYCLE_MASK,
+ RTS5264_CMD_OE_EARLY_EN);
+ rtsx_pci_write_register(pcr, RTS5264_DAT_OE_START_EARLY,
+ RTS5264_DAT_OE_EARLY_EN | RTS5264_DAT_OE_EARLY_CYCLE_MASK,
+ RTS5264_DAT_OE_EARLY_EN);
+ rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+ rtsx_pci_write_register(pcr, RTS5264_PWR_CUT,
+ RTS5264_CFG_MEM_PD, RTS5264_CFG_MEM_PD);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+ RTS5264_AUX_CLK_16M_EN, 0);
+
+ /* Release PRSNT# */
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+ RTS5264_FORCE_PRSNT_LOW, 0);
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5264_fill_driving(pcr, OUTPUT_3V3);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFF);
+ rtsx_pci_write_register(pcr, RBCTL, U_AUTO_DMA_EN_MASK, 0);
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4,
+ RTS5264_F_HIGH_RC_MASK, RTS5264_F_HIGH_RC_400K);
+
+ if (pcr->rtd3_en) {
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, 0);
+ } else {
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+ }
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00);
+
+ /* Clear Enter RTD3_cold Information*/
+ rtsx_pci_write_register(pcr, RTS5264_FW_CTL,
+ RTS5264_INFORM_RTD3_COLD, 0);
+
+ return 0;
+}
+
+static void rts5264_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5264_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ udelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5264_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5264_enable_aspm(pcr, true);
+ else
+ rts5264_disable_aspm(pcr, false);
+}
+
+static void rts5264_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* Run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* L1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5264_pcr_ops = {
+ .turn_on_led = rts5264_turn_on_led,
+ .turn_off_led = rts5264_turn_off_led,
+ .extra_init_hw = rts5264_extra_init_hw,
+ .enable_auto_blink = rts5264_enable_auto_blink,
+ .disable_auto_blink = rts5264_disable_auto_blink,
+ .card_power_on = rts5264_card_power_on,
+ .card_power_off = rts5264_card_power_off,
+ .switch_output_voltage = rts5264_switch_output_voltage,
+ .force_power_down = rts5264_force_power_down,
+ .stop_cmd = rts5264_stop_cmd,
+ .set_aspm = rts5264_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5264_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5264_enable_ocp,
+ .disable_ocp = rts5264_disable_ocp,
+ .init_ocp = rts5264_init_ocp,
+ .process_ocp = rts5264_process_ocp,
+ .clear_ocpstat = rts5264_clear_ocpstat,
+};
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u16 n;
+ u8 clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5264_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5264_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5264_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5264_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = clk - 4;
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = 125/clk + 3;
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2) - 1;
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5264_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5264_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5264_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CHANGE_CLK, CHANGE_CLK);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+
+ if (is_version(pcr, 0x5264, IC_VER_A)) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_CARD_CLK_SRC2,
+ RTS5264_REG_BIG_KVCO_A, 0);
+ } else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_SYS_DUMMY_1,
+ RTS5264_REG_BIG_KVCO, 0);
+ }
+
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+}
+
+void rts5264_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+ u8 val;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
+ rtsx_pci_read_register(pcr, RTS5264_FW_STATUS, &val);
+ if (!(val & RTS5264_EXPRESS_LINK_FAIL_MASK))
+ pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5264_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = 0x00;
+ pcr->sd30_drive_sel_3v3 = 0x00;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->aspm_mode = ASPM_MODE_REG;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(24, 24, 11);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5264_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5264_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5264_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5264_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= (SD_OC_INT_EN | SD_OVP_INT_EN);
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5264_LDO1_OCP_THD_1150;
+}
diff --git a/drivers/misc/cardreader/rts5264.h b/drivers/misc/cardreader/rts5264.h
new file mode 100644
index 000000000..e3cbbf2fe
--- /dev/null
+++ b/drivers/misc/cardreader/rts5264.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Ricky Wu <ricky_wu@realtek.com>
+ */
+#ifndef RTS5264_H
+#define RTS5264_H
+
+/*New add*/
+#define rts5264_vendor_setting_valid(reg) ((reg) & 0x010000)
+#define rts5264_reg_to_aspm(reg) \
+ (((~(reg) >> 28) & 0x02) | (((reg) >> 28) & 0x01))
+#define rts5264_reg_check_reverse_socket(reg) ((reg) & 0x04)
+#define rts5264_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) & 0x03)
+#define rts5264_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) & 0x03)
+#define rts5264_reg_to_rtd3(reg) ((reg) & 0x08)
+
+#define RTS5264_AUTOLOAD_CFG0 0xFF7B
+#define RTS5264_AUTOLOAD_CFG1 0xFF7C
+#define RTS5264_AUTOLOAD_CFG3 0xFF7E
+#define RTS5264_AUTOLOAD_CFG4 0xFF7F
+#define RTS5264_FORCE_PRSNT_LOW (1 << 6)
+#define RTS5264_AUX_CLK_16M_EN (1 << 5)
+#define RTS5264_F_HIGH_RC_MASK (1 << 4)
+#define RTS5264_F_HIGH_RC_1_6M (1 << 4)
+#define RTS5264_F_HIGH_RC_400K (0 << 4)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5264_SSC_DEPTH_MASK 0x07
+#define RTS5264_SSC_DEPTH_DISALBE 0x00
+#define RTS5264_SSC_DEPTH_8M 0x01
+#define RTS5264_SSC_DEPTH_4M 0x02
+#define RTS5264_SSC_DEPTH_2M 0x03
+#define RTS5264_SSC_DEPTH_1M 0x04
+#define RTS5264_SSC_DEPTH_512K 0x05
+#define RTS5264_SSC_DEPTH_256K 0x06
+#define RTS5264_SSC_DEPTH_128K 0x07
+
+#define RTS5264_CARD_CLK_SRC2 0xFC2F
+#define RTS5264_REG_BIG_KVCO_A 0x20
+
+/* efuse control register*/
+#define RTS5264_EFUSE_CTL 0xFC30
+#define RTS5264_EFUSE_ENABLE 0x80
+/* EFUSE_MODE: 0=READ 1=PROGRAM */
+#define RTS5264_EFUSE_MODE_MASK 0x40
+#define RTS5264_EFUSE_PROGRAM 0x40
+
+#define RTS5264_EFUSE_ADDR 0xFC31
+#define RTS5264_EFUSE_ADDR_MASK 0x3F
+
+#define RTS5264_EFUSE_WRITE_DATA 0xFC32
+#define RTS5264_EFUSE_READ_DATA 0xFC34
+
+#define RTS5264_SYS_DUMMY_1 0xFC35
+#define RTS5264_REG_BIG_KVCO 0x04
+
+/* DMACTL 0xFE2C */
+#define RTS5264_DMA_PACK_SIZE_MASK 0x70
+
+#define RTS5264_FW_CFG1 0xFF55
+#define RTS5264_SYS_CLK_SEL_MCU_CLK (0x01<<7)
+#define RTS5264_CRC_CLK_SEL_MCU_CLK (0x01<<6)
+#define RTS5264_FAKE_MCU_CLOCK_GATING (0x01<<5)
+#define RTS5264_MCU_BUS_SEL_MASK (0x01<<4)
+
+/* FW status register */
+#define RTS5264_FW_STATUS 0xFF56
+#define RTS5264_EXPRESS_LINK_FAIL_MASK (0x01<<7)
+
+/* FW control register */
+#define RTS5264_FW_CTL 0xFF5F
+#define RTS5264_INFORM_RTD3_COLD (0x01<<5)
+
+#define RTS5264_REG_FPDCTL 0xFF60
+
+#define RTS5264_REG_LDO12_CFG 0xFF6E
+#define RTS5264_LDO12_SR_MASK (0x03<<6)
+#define RTS5264_LDO12_SR_1_0_MS (0x03<<6)
+#define RTS5264_LDO12_SR_0_5_MS (0x02<<6)
+#define RTS5264_LDO12_SR_0_2_5_MS (0x01<<6)
+#define RTS5264_LDO12_SR_0_0_MS (0x00<<6)
+#define RTS5264_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5264_LDO12_115 (0x03<<1)
+#define RTS5264_LDO12_120 (0x04<<1)
+#define RTS5264_LDO12_125 (0x05<<1)
+#define RTS5264_LDO12_130 (0x06<<1)
+#define RTS5264_LDO12_135 (0x07<<1)
+
+/* LDO control register */
+#define RTS5264_CARD_PWR_CTL 0xFD50
+#define RTS5264_SD_CLK_ISO (0x01<<7)
+#define RTS5264_PAD_SD_DAT_FW_CTRL (0x01<<6)
+#define RTS5264_PUPDC (0x01<<5)
+#define RTS5264_SD_CMD_ISO (0x01<<4)
+
+#define RTS5264_OCP_VDD3_CTL 0xFD89
+#define SD_VDD3_DETECT_EN 0x08
+#define SD_VDD3_OCP_INT_EN 0x04
+#define SD_VDD3_OCP_INT_CLR 0x02
+#define SD_VDD3_OC_CLR 0x01
+
+#define RTS5264_OCP_VDD3_STS 0xFD8A
+#define SD_VDD3_OCP_DETECT 0x08
+#define SD_VDD3_OC_NOW 0x04
+#define SD_VDD3_OC_EVER 0x02
+
+#define RTS5264_OVP_CTL 0xFD8D
+#define RTS5264_OVP_TIME_MASK 0xF0
+#define RTS5264_OVP_TIME_DFT 0x50
+#define RTS5264_OVP_DETECT_EN 0x08
+#define RTS5264_OVP_INT_EN 0x04
+#define RTS5264_OVP_INT_CLR 0x02
+#define RTS5264_OVP_CLR 0x01
+
+#define RTS5264_OVP_STS 0xFD8E
+#define RTS5264_OVP_GLTCH_TIME_MASK 0xF0
+#define RTS5264_OVP_GLTCH_TIME_DFT 0x50
+#define RTS5264_VOVER_DET 0x08
+#define RTS5264_OVP_NOW 0x04
+#define RTS5264_OVP_EVER 0x02
+
+#define RTS5264_CMD_OE_START_EARLY 0xFDCB
+#define RTS5264_CMD_OE_EARLY_LEAVE 0x08
+#define RTS5264_CMD_OE_EARLY_CYCLE_MASK 0x06
+#define RTS5264_CMD_OE_EARLY_4CYCLE 0x06
+#define RTS5264_CMD_OE_EARLY_3CYCLE 0x04
+#define RTS5264_CMD_OE_EARLY_2CYCLE 0x02
+#define RTS5264_CMD_OE_EARLY_1CYCLE 0x00
+#define RTS5264_CMD_OE_EARLY_EN 0x01
+
+#define RTS5264_DAT_OE_START_EARLY 0xFDCC
+#define RTS5264_DAT_OE_EARLY_LEAVE 0x08
+#define RTS5264_DAT_OE_EARLY_CYCLE_MASK 0x06
+#define RTS5264_DAT_OE_EARLY_4CYCLE 0x06
+#define RTS5264_DAT_OE_EARLY_3CYCLE 0x04
+#define RTS5264_DAT_OE_EARLY_2CYCLE 0x02
+#define RTS5264_DAT_OE_EARLY_1CYCLE 0x00
+#define RTS5264_DAT_OE_EARLY_EN 0x01
+
+#define RTS5264_LDO1233318_POW_CTL 0xFF70
+#define RTS5264_TUNE_REF_LDO3318 (0x03<<6)
+#define RTS5264_TUNE_REF_LDO3318_DFT (0x02<<6)
+#define RTS5264_LDO3318_POWERON (0x01<<3)
+#define RTS5264_LDO3_POWERON (0x01<<2)
+#define RTS5264_LDO2_POWERON (0x01<<1)
+#define RTS5264_LDO1_POWERON (0x01<<0)
+#define RTS5264_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5264_DV3318_CFG 0xFF71
+#define RTS5264_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5264_DV3318_18 (0x02<<4)
+#define RTS5264_DV3318_19 (0x04<<4)
+#define RTS5264_DV3318_33 (0x07<<4)
+
+#define RTS5264_LDO1_CFG0 0xFF72
+#define RTS5264_LDO1_OCP_THD_MASK (0x07 << 5)
+#define RTS5264_LDO1_OCP_EN (0x01 << 4)
+#define RTS5264_LDO1_OCP_LMT_THD_MASK (0x03 << 2)
+#define RTS5264_LDO1_OCP_LMT_EN (0x01 << 1)
+
+#define RTS5264_LDO1_OCP_THD_850 (0x00<<5)
+#define RTS5264_LDO1_OCP_THD_950 (0x01<<5)
+#define RTS5264_LDO1_OCP_THD_1050 (0x02<<5)
+#define RTS5264_LDO1_OCP_THD_1100 (0x03<<5)
+#define RTS5264_LDO1_OCP_THD_1150 (0x04<<5)
+#define RTS5264_LDO1_OCP_THD_1200 (0x05<<5)
+#define RTS5264_LDO1_OCP_THD_1300 (0x06<<5)
+#define RTS5264_LDO1_OCP_THD_1350 (0x07<<5)
+
+#define RTS5264_LDO1_LMT_THD_1700 (0x00<<2)
+#define RTS5264_LDO1_LMT_THD_1800 (0x01<<2)
+#define RTS5264_LDO1_LMT_THD_1900 (0x02<<2)
+#define RTS5264_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5264_LDO1_CFG1 0xFF73
+#define RTS5264_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5264_LDO1_18 (0x05<<1)
+#define RTS5264_LDO1_33 (0x07<<1)
+#define RTS5264_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5264_LDO2_CFG0 0xFF74
+#define RTS5264_LDO2_OCP_THD_MASK (0x07<<5)
+#define RTS5264_LDO2_OCP_EN (0x01<<4)
+#define RTS5264_LDO2_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5264_LDO2_OCP_LMT_EN (0x01<<1)
+
+#define RTS5264_LDO2_OCP_THD_750 (0x00<<5)
+#define RTS5264_LDO2_OCP_THD_850 (0x01<<5)
+#define RTS5264_LDO2_OCP_THD_900 (0x02<<5)
+#define RTS5264_LDO2_OCP_THD_950 (0x03<<5)
+#define RTS5264_LDO2_OCP_THD_1050 (0x04<<5)
+#define RTS5264_LDO2_OCP_THD_1100 (0x05<<5)
+#define RTS5264_LDO2_OCP_THD_1150 (0x06<<5)
+#define RTS5264_LDO2_OCP_THD_1200 (0x07<<5)
+
+#define RTS5264_LDO2_LMT_THD_1700 (0x00<<2)
+#define RTS5264_LDO2_LMT_THD_1800 (0x01<<2)
+#define RTS5264_LDO2_LMT_THD_1900 (0x02<<2)
+#define RTS5264_LDO2_LMT_THD_2000 (0x03<<2)
+
+#define RTS5264_LDO2_CFG1 0xFF75
+#define RTS5264_LDO2_TUNE_MASK (0x07<<1)
+#define RTS5264_LDO2_18 (0x02<<1)
+#define RTS5264_LDO2_185 (0x03<<1)
+#define RTS5264_LDO2_19 (0x04<<1)
+#define RTS5264_LDO2_195 (0x05<<1)
+#define RTS5264_LDO2_33 (0x07<<1)
+#define RTS5264_LDO2_PWD_MASK (0x01<<0)
+
+#define RTS5264_LDO3_CFG0 0xFF76
+#define RTS5264_LDO3_OCP_THD_MASK (0x07<<5)
+#define RTS5264_LDO3_OCP_EN (0x01<<4)
+#define RTS5264_LDO3_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5264_LDO3_OCP_LMT_EN (0x01<<1)
+
+#define RTS5264_LDO3_OCP_THD_610 (0x00<<5)
+#define RTS5264_LDO3_OCP_THD_630 (0x01<<5)
+#define RTS5264_LDO3_OCP_THD_670 (0x02<<5)
+#define RTS5264_LDO3_OCP_THD_710 (0x03<<5)
+#define RTS5264_LDO3_OCP_THD_750 (0x04<<5)
+#define RTS5264_LDO3_OCP_THD_770 (0x05<<5)
+#define RTS5264_LDO3_OCP_THD_810 (0x06<<5)
+#define RTS5264_LDO3_OCP_THD_850 (0x07<<5)
+
+#define RTS5264_LDO3_LMT_THD_1200 (0x00<<2)
+#define RTS5264_LDO3_LMT_THD_1300 (0x01<<2)
+#define RTS5264_LDO3_LMT_THD_1400 (0x02<<2)
+#define RTS5264_LDO3_LMT_THD_1500 (0x03<<2)
+
+#define RTS5264_LDO3_CFG1 0xFF77
+#define RTS5264_LDO3_TUNE_MASK (0x07<<1)
+#define RTS5264_LDO3_12 (0x02<<1)
+#define RTS5264_LDO3_125 (0x03<<1)
+#define RTS5264_LDO3_13 (0x04<<1)
+#define RTS5264_LDO3_135 (0x05<<1)
+#define RTS5264_LDO3_33 (0x07<<1)
+#define RTS5264_LDO3_PWD_MASK (0x01<<0)
+
+#define RTS5264_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+#define REG_EFUSE_BYPASS 0x08
+#define REG_EFUSE_POR 0x04
+#define REG_EFUSE_POWER_MASK 0x03
+#define REG_EFUSE_POWERON 0x03
+#define REG_EFUSE_POWEROFF 0x00
+
+#define RTS5264_PWR_CUT 0xFF81
+#define RTS5264_CFG_MEM_PD 0xF0
+
+#define RTS5264_OVP_DET 0xFF8A
+#define RTS5264_POW_VDET 0x04
+#define RTS5264_TUNE_VROV_MASK 0x03
+#define RTS5264_TUNE_VROV_2V 0x03
+#define RTS5264_TUNE_VROV_1V8 0x02
+#define RTS5264_TUNE_VROV_1V6 0x01
+#define RTS5264_TUNE_VROV_1V4 0x00
+
+#define RTS5264_CKMUX_MBIAS_PWR 0xFF8B
+#define RTS5264_NON_XTAL_SEL 0x80
+#define RTS5264_POW_CKMUX 0x40
+#define RTS5264_LVD_MASK 0x04
+#define RTS5264_POW_PSW_MASK 0x03
+#define RTS5264_POW_PSW_DFT 0x03
+
+/* Single LUN, support SD/SD EXPRESS */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+#define SD_EXPRESS_LUN 2
+
+int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5264_H */
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index a30751ad3..0ad2ff906 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -26,6 +26,7 @@
#include "rtsx_pcr.h"
#include "rts5261.h"
#include "rts5228.h"
+#include "rts5264.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -54,6 +55,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -714,6 +716,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (PCI_PID(pcr) == PID_5228)
return rts5228_pci_switch_clock(pcr, card_clock,
ssc_depth, initial_mode, double_clk, vpclk);
+ if (PCI_PID(pcr) == PID_5264)
+ return rts5264_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
@@ -987,7 +992,8 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg &= (pcr->bier | 0x7FFFFF);
- if (int_reg & SD_OC_INT)
+ if ((int_reg & SD_OC_INT) ||
+ ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
rtsx_pci_process_ocp_interrupt(pcr);
if (int_reg & SD_INT) {
@@ -996,7 +1002,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
} else {
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
@@ -1159,7 +1165,9 @@ void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
{
u16 val;
- if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ if ((PCI_PID(pcr) != PID_525A) &&
+ (PCI_PID(pcr) != PID_5260) &&
+ (PCI_PID(pcr) != PID_5264)) {
rtsx_pci_read_phy_register(pcr, 0x01, &val);
val |= 1<<9;
rtsx_pci_write_phy_register(pcr, 0x01, val);
@@ -1175,7 +1183,9 @@ void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
{
u16 val;
- if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
+ if ((PCI_PID(pcr) != PID_525A) &&
+ (PCI_PID(pcr) != PID_5260) &&
+ (PCI_PID(pcr) != PID_5264)) {
rtsx_pci_read_phy_register(pcr, 0x01, &val);
val &= ~(1<<9);
rtsx_pci_write_phy_register(pcr, 0x01, val);
@@ -1226,7 +1236,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_enable_bus_int(pcr);
/* Power on SSC */
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
/* Gating real mcu clock */
err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
RTS5261_MCU_CLOCK_GATING, 0);
@@ -1270,6 +1280,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
else if (PCI_PID(pcr) == PID_5228)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
RTS5228_SSC_DEPTH_2M);
+ else if (is_version(pcr, 0x5264, IC_VER_A))
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ else if (PCI_PID(pcr) == PID_5264)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5264_SSC_DEPTH_2M);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
@@ -1305,6 +1320,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_5260:
case PID_5261:
case PID_5228:
+ case PID_5264:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1404,6 +1420,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5228:
rts5228_init_params(pcr);
break;
+
+ case 0x5264:
+ rts5264_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1544,7 +1564,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pcr->pci = pcidev;
dev_set_drvdata(&pcidev->dev, handle);
- if (CHK_PCI_PID(pcr, 0x525A))
+ if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
bar = 1;
len = pci_resource_len(pcidev, bar);
base = pci_resource_start(pcidev, bar);
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 37d1f316a..9215d66de 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -74,6 +74,7 @@ void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
void rts5261_init_params(struct rtsx_pcr *pcr);
void rts5228_init_params(struct rtsx_pcr *pcr);
+void rts5264_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 0562071cd..6ad0ab892 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -836,7 +836,8 @@ static inline bool cxl_is_power8(void)
{
if ((pvr_version_is(PVR_POWER8E)) ||
(pvr_version_is(PVR_POWER8NVL)) ||
- (pvr_version_is(PVR_POWER8)))
+ (pvr_version_is(PVR_POWER8)) ||
+ (pvr_version_is(PVR_HX_C2000)))
return true;
return false;
}
diff --git a/drivers/misc/dw-xdata-pcie.c b/drivers/misc/dw-xdata-pcie.c
index 257c25da5..efd0ca8cc 100644
--- a/drivers/misc/dw-xdata-pcie.c
+++ b/drivers/misc/dw-xdata-pcie.c
@@ -333,7 +333,7 @@ static int dw_xdata_pcie_probe(struct pci_dev *pdev,
dw->pdev = pdev;
- id = ida_simple_get(&xdata_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&xdata_ida, GFP_KERNEL);
if (id < 0) {
dev_err(dev, "xData: unable to get id\n");
return id;
@@ -377,7 +377,7 @@ err_kfree_name:
kfree(dw->misc_dev.name);
err_ida_remove:
- ida_simple_remove(&xdata_ida, id);
+ ida_free(&xdata_ida, id);
return err;
}
@@ -396,7 +396,7 @@ static void dw_xdata_pcie_remove(struct pci_dev *pdev)
dw_xdata_stop(dw);
misc_deregister(&dw->misc_dev);
kfree(dw->misc_dev.name);
- ida_simple_remove(&xdata_ida, id);
+ ida_free(&xdata_ida, id);
}
static const struct pci_device_id dw_xdata_pcie_id_table[] = {
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index f61a80597..4bd4f32bc 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nvmem-provider.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
@@ -242,7 +243,7 @@ static const struct i2c_device_id at24_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, at24_ids);
-static const struct of_device_id at24_of_match[] = {
+static const struct of_device_id __maybe_unused at24_of_match[] = {
{ .compatible = "atmel,24c00", .data = &at24_data_24c00 },
{ .compatible = "atmel,24c01", .data = &at24_data_24c01 },
{ .compatible = "atmel,24cs01", .data = &at24_data_24cs01 },
@@ -439,12 +440,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
if (off + count > at24->byte_len)
return -EINVAL;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
return ret;
- }
-
/*
* Read data from chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -486,12 +484,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
if (off + count > at24->byte_len)
return -EINVAL;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
return ret;
- }
-
/*
* Write data to chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -563,6 +558,31 @@ static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len)
}
}
+static void at24_probe_temp_sensor(struct i2c_client *client)
+{
+ struct at24_data *at24 = i2c_get_clientdata(client);
+ struct i2c_board_info info = { .type = "jc42" };
+ int ret;
+ u8 val;
+
+ /*
+ * Byte 2 has value 11 for DDR3, earlier versions don't
+ * support the thermal sensor present flag
+ */
+ ret = at24_read(at24, 2, &val, 1);
+ if (ret || val != 11)
+ return;
+
+ /* Byte 32, bit 7 is set if temp sensor is present */
+ ret = at24_read(at24, 32, &val, 1);
+ if (ret || !(val & BIT(7)))
+ return;
+
+ info.addr = 0x18 | (client->addr & 7);
+
+ i2c_new_client_device(client->adapter, &info);
+}
+
static int at24_probe(struct i2c_client *client)
{
struct regmap_config regmap_config = { };
@@ -738,15 +758,6 @@ static int at24_probe(struct i2c_client *client)
}
pm_runtime_enable(dev);
- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(at24->nvmem)) {
- pm_runtime_disable(dev);
- if (!pm_runtime_status_suspended(dev))
- regulator_disable(at24->vcc_reg);
- return dev_err_probe(dev, PTR_ERR(at24->nvmem),
- "failed to register nvmem\n");
- }
-
/*
* Perform a one-byte test read to verify that the chip is functional,
* unless powering on the device is to be avoided during probe (i.e.
@@ -762,6 +773,19 @@ static int at24_probe(struct i2c_client *client)
}
}
+ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(at24->nvmem)) {
+ pm_runtime_disable(dev);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
+ return dev_err_probe(dev, PTR_ERR(at24->nvmem),
+ "failed to register nvmem\n");
+ }
+
+ /* If this a SPD EEPROM, probe for DDR3 thermal sensor */
+ if (cdata == &at24_data_spd)
+ at24_probe_temp_sensor(client);
+
pm_runtime_idle(dev);
if (writable)
@@ -812,7 +836,7 @@ static struct i2c_driver at24_driver = {
.driver = {
.name = "at24",
.pm = &at24_pm_ops,
- .of_match_table = at24_of_match,
+ .of_match_table = of_match_ptr(at24_of_match),
.acpi_match_table = ACPI_PTR(at24_acpi_ids),
},
.probe = at24_probe,
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index a1acd7713..21feebc30 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -31,6 +31,7 @@
* over performance.
*/
+#define EE1004_MAX_BUSSES 8
#define EE1004_ADDR_SET_PAGE 0x36
#define EE1004_NUM_PAGES 2
#define EE1004_PAGE_SIZE 256
@@ -42,9 +43,13 @@
* from page selection to end of read.
*/
static DEFINE_MUTEX(ee1004_bus_lock);
-static struct i2c_client *ee1004_set_page[EE1004_NUM_PAGES];
-static unsigned int ee1004_dev_count;
-static int ee1004_current_page;
+
+static struct ee1004_bus_data {
+ struct i2c_adapter *adap;
+ struct i2c_client *set_page[EE1004_NUM_PAGES];
+ unsigned int dev_count;
+ int current_page;
+} ee1004_bus_data[EE1004_MAX_BUSSES];
static const struct i2c_device_id ee1004_ids[] = {
{ "ee1004", 0 },
@@ -54,11 +59,29 @@ MODULE_DEVICE_TABLE(i2c, ee1004_ids);
/*-------------------------------------------------------------------------*/
-static int ee1004_get_current_page(void)
+static struct ee1004_bus_data *ee1004_get_bus_data(struct i2c_adapter *adap)
+{
+ int i;
+
+ for (i = 0; i < EE1004_MAX_BUSSES; i++)
+ if (ee1004_bus_data[i].adap == adap)
+ return ee1004_bus_data + i;
+
+ /* If not existent yet, create new entry */
+ for (i = 0; i < EE1004_MAX_BUSSES; i++)
+ if (!ee1004_bus_data[i].adap) {
+ ee1004_bus_data[i].adap = adap;
+ return ee1004_bus_data + i;
+ }
+
+ return NULL;
+}
+
+static int ee1004_get_current_page(struct ee1004_bus_data *bd)
{
int err;
- err = i2c_smbus_read_byte(ee1004_set_page[0]);
+ err = i2c_smbus_read_byte(bd->set_page[0]);
if (err == -ENXIO) {
/* Nack means page 1 is selected */
return 1;
@@ -72,28 +95,29 @@ static int ee1004_get_current_page(void)
return 0;
}
-static int ee1004_set_current_page(struct device *dev, int page)
+static int ee1004_set_current_page(struct i2c_client *client, int page)
{
+ struct ee1004_bus_data *bd = i2c_get_clientdata(client);
int ret;
- if (page == ee1004_current_page)
+ if (page == bd->current_page)
return 0;
/* Data is ignored */
- ret = i2c_smbus_write_byte(ee1004_set_page[page], 0x00);
+ ret = i2c_smbus_write_byte(bd->set_page[page], 0x00);
/*
* Don't give up just yet. Some memory modules will select the page
* but not ack the command. Check which page is selected now.
*/
- if (ret == -ENXIO && ee1004_get_current_page() == page)
+ if (ret == -ENXIO && ee1004_get_current_page(bd) == page)
ret = 0;
if (ret < 0) {
- dev_err(dev, "Failed to select page %d (%d)\n", page, ret);
+ dev_err(&client->dev, "Failed to select page %d (%d)\n", page, ret);
return ret;
}
- dev_dbg(dev, "Selected page %d\n", page);
- ee1004_current_page = page;
+ dev_dbg(&client->dev, "Selected page %d\n", page);
+ bd->current_page = page;
return 0;
}
@@ -106,7 +130,7 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
page = offset >> EE1004_PAGE_SHIFT;
offset &= (1 << EE1004_PAGE_SHIFT) - 1;
- status = ee1004_set_current_page(&client->dev, page);
+ status = ee1004_set_current_page(client, page);
if (status)
return status;
@@ -158,17 +182,34 @@ static struct bin_attribute *ee1004_attrs[] = {
BIN_ATTRIBUTE_GROUPS(ee1004);
-static void ee1004_cleanup(int idx)
+static void ee1004_probe_temp_sensor(struct i2c_client *client)
{
- if (--ee1004_dev_count == 0)
- while (--idx >= 0) {
- i2c_unregister_device(ee1004_set_page[idx]);
- ee1004_set_page[idx] = NULL;
- }
+ struct i2c_board_info info = { .type = "jc42" };
+ u8 byte14;
+ int ret;
+
+ /* byte 14, bit 7 is set if temp sensor is present */
+ ret = ee1004_eeprom_read(client, &byte14, 14, 1);
+ if (ret != 1 || !(byte14 & BIT(7)))
+ return;
+
+ info.addr = 0x18 | (client->addr & 7);
+
+ i2c_new_client_device(client->adapter, &info);
+}
+
+static void ee1004_cleanup(int idx, struct ee1004_bus_data *bd)
+{
+ if (--bd->dev_count == 0) {
+ while (--idx >= 0)
+ i2c_unregister_device(bd->set_page[idx]);
+ memset(bd, 0, sizeof(struct ee1004_bus_data));
+ }
}
static int ee1004_probe(struct i2c_client *client)
{
+ struct ee1004_bus_data *bd;
int err, cnr = 0;
/* Make sure we can operate on this adapter */
@@ -178,9 +219,19 @@ static int ee1004_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
return -EPFNOSUPPORT;
- /* Use 2 dummy devices for page select command */
mutex_lock(&ee1004_bus_lock);
- if (++ee1004_dev_count == 1) {
+
+ bd = ee1004_get_bus_data(client->adapter);
+ if (!bd) {
+ mutex_unlock(&ee1004_bus_lock);
+ return dev_err_probe(&client->dev, -ENOSPC,
+ "Only %d busses supported", EE1004_MAX_BUSSES);
+ }
+
+ i2c_set_clientdata(client, bd);
+
+ if (++bd->dev_count == 1) {
+ /* Use 2 dummy devices for page select command */
for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) {
struct i2c_client *cl;
@@ -189,21 +240,19 @@ static int ee1004_probe(struct i2c_client *client)
err = PTR_ERR(cl);
goto err_clients;
}
- ee1004_set_page[cnr] = cl;
+ bd->set_page[cnr] = cl;
}
/* Remember current page to avoid unneeded page select */
- err = ee1004_get_current_page();
+ err = ee1004_get_current_page(bd);
if (err < 0)
goto err_clients;
dev_dbg(&client->dev, "Currently selected page: %d\n", err);
- ee1004_current_page = err;
- } else if (client->adapter != ee1004_set_page[0]->adapter) {
- dev_err(&client->dev,
- "Driver only supports devices on a single I2C bus\n");
- err = -EOPNOTSUPP;
- goto err_clients;
+ bd->current_page = err;
}
+
+ ee1004_probe_temp_sensor(client);
+
mutex_unlock(&ee1004_bus_lock);
dev_info(&client->dev,
@@ -213,7 +262,7 @@ static int ee1004_probe(struct i2c_client *client)
return 0;
err_clients:
- ee1004_cleanup(cnr);
+ ee1004_cleanup(cnr, bd);
mutex_unlock(&ee1004_bus_lock);
return err;
@@ -221,9 +270,11 @@ static int ee1004_probe(struct i2c_client *client)
static void ee1004_remove(struct i2c_client *client)
{
+ struct ee1004_bus_data *bd = i2c_get_clientdata(client);
+
/* Remove page select clients if this is the last device */
mutex_lock(&ee1004_bus_lock);
- ee1004_cleanup(EE1004_NUM_PAGES);
+ ee1004_cleanup(EE1004_NUM_PAGES, bd);
mutex_unlock(&ee1004_bus_lock);
}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 55fc5b80e..4441aca22 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -443,7 +443,7 @@ static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
if (vsize == 0)
return -EINVAL;
- if (get_order(vsize) > MAX_ORDER)
+ if (get_order(vsize) > MAX_PAGE_ORDER)
return -ENOMEM;
dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 1c798d6b2..a2c4a9b4f 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -210,7 +210,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
dma_addr_t *dma_handle)
{
- if (get_order(size) > MAX_ORDER)
+ if (get_order(size) > MAX_PAGE_ORDER)
return NULL;
return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
@@ -308,7 +308,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
- if (get_order(sgl->sgl_size) > MAX_ORDER) {
+ if (get_order(sgl->sgl_size) > MAX_PAGE_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
return ret;
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 0ce4cbf6a..4f467d397 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -4,6 +4,7 @@
* page allocation and slab allocations.
*/
#include "lkdtm.h"
+#include <linux/kfence.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
@@ -132,6 +133,64 @@ static void lkdtm_READ_AFTER_FREE(void)
kfree(val);
}
+static void lkdtm_KFENCE_READ_AFTER_FREE(void)
+{
+ int *base, val, saw;
+ unsigned long timeout, resched_after;
+ size_t len = 1024;
+ /*
+ * The slub allocator will use the either the first word or
+ * the middle of the allocation to store the free pointer,
+ * depending on configurations. Store in the second word to
+ * avoid running into the freelist.
+ */
+ size_t offset = sizeof(*base);
+
+ /*
+ * 100x the sample interval should be more than enough to ensure we get
+ * a KFENCE allocation eventually.
+ */
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
+ /*
+ * Especially for non-preemption kernels, ensure the allocation-gate
+ * timer can catch up: after @resched_after, every failed allocation
+ * attempt yields, to ensure the allocation-gate timer is scheduled.
+ */
+ resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
+ do {
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base) {
+ pr_err("FAIL: Unable to allocate kfence memory!\n");
+ return;
+ }
+
+ if (is_kfence_address(base)) {
+ val = 0x12345678;
+ base[offset] = val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ } else {
+ pr_err("FAIL: Memory was not poisoned!\n");
+ pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
+ }
+ return;
+ }
+
+ kfree(base);
+ if (time_after(jiffies, resched_after))
+ cond_resched();
+ } while (time_before(jiffies, timeout));
+
+ pr_err("FAIL: kfence memory never allocated!\n");
+}
+
static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
@@ -327,6 +386,7 @@ static struct crashtype crashtypes[] = {
CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
CRASHTYPE(WRITE_AFTER_FREE),
CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(KFENCE_READ_AFTER_FREE),
CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
CRASHTYPE(READ_BUDDY_AFTER_FREE),
CRASHTYPE(SLAB_INIT_ON_ALLOC),
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 37db142de..67d9391f1 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -3,6 +3,7 @@
config INTEL_MEI
tristate "Intel Management Engine Interface"
depends on X86 && PCI
+ default GENERIC_CPU || MCORE2 || MATOM || X86_GENERIC
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
@@ -11,10 +12,11 @@ config INTEL_MEI
For more information see
<https://software.intel.com/en-us/manageability/>
+if INTEL_MEI
+
config INTEL_MEI_ME
tristate "ME Enabled Intel Chipsets"
- select INTEL_MEI
- depends on X86 && PCI
+ default y
help
MEI support for ME Enabled Intel chipsets.
@@ -38,8 +40,6 @@ config INTEL_MEI_ME
config INTEL_MEI_TXE
tristate "Intel Trusted Execution Environment with ME Interface"
- select INTEL_MEI
- depends on X86 && PCI
help
MEI Support for Trusted Execution Environment device on Intel SoCs
@@ -48,9 +48,7 @@ config INTEL_MEI_TXE
config INTEL_MEI_GSC
tristate "Intel MEI GSC embedded device"
- depends on INTEL_MEI
depends on INTEL_MEI_ME
- depends on X86 && PCI
depends on DRM_I915
help
Intel auxiliary driver for GSC devices embedded in Intel graphics devices.
@@ -60,6 +58,31 @@ config INTEL_MEI_GSC
tasks such as graphics card firmware update and security
tasks.
+config INTEL_MEI_VSC_HW
+ tristate "Intel visual sensing controller device transport driver"
+ depends on ACPI && SPI
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Intel SPI transport driver between host and Intel visual sensing
+ controller (IVSC) device.
+
+ This driver can also be built as a module. If so, the module
+ will be called mei-vsc-hw.
+
+config INTEL_MEI_VSC
+ tristate "Intel visual sensing controller device with ME interface"
+ depends on INTEL_MEI_VSC_HW
+ help
+ Intel MEI over SPI driver for Intel visual sensing controller
+ (IVSC) device embedded in IA platform. It supports camera sharing
+ between IVSC for context sensing and IPU for typical media usage.
+ Select this config should enable transport layer for IVSC device.
+
+ This driver can also be built as a module. If so, the module
+ will be called mei-vsc.
+
source "drivers/misc/mei/hdcp/Kconfig"
source "drivers/misc/mei/pxp/Kconfig"
source "drivers/misc/mei/gsc_proxy/Kconfig"
+
+endif
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 14aee253a..6f9fdbf1a 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -31,3 +31,10 @@ CFLAGS_mei-trace.o = -I$(src)
obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/
+
+obj-$(CONFIG_INTEL_MEI_VSC_HW) += mei-vsc-hw.o
+mei-vsc-hw-y := vsc-tp.o
+mei-vsc-hw-y += vsc-fw-loader.o
+
+obj-$(CONFIG_INTEL_MEI_VSC) += mei-vsc.o
+mei-vsc-y := platform-vsc.o
diff --git a/drivers/misc/mei/gsc_proxy/Kconfig b/drivers/misc/mei/gsc_proxy/Kconfig
index 5f68d9f3d..ac78b9d1e 100644
--- a/drivers/misc/mei/gsc_proxy/Kconfig
+++ b/drivers/misc/mei/gsc_proxy/Kconfig
@@ -3,7 +3,7 @@
#
config INTEL_MEI_GSC_PROXY
tristate "Intel GSC Proxy services of ME Interface"
- select INTEL_MEI_ME
+ depends on INTEL_MEI_ME
depends on DRM_I915
help
MEI Support for GSC Proxy Services on Intel platforms.
diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig
index 54e1c9526..9be312ec7 100644
--- a/drivers/misc/mei/hdcp/Kconfig
+++ b/drivers/misc/mei/hdcp/Kconfig
@@ -3,7 +3,7 @@
#
config INTEL_MEI_HDCP
tristate "Intel HDCP2.2 services of ME Interface"
- select INTEL_MEI_ME
+ depends on INTEL_MEI_ME
depends on DRM_I915
help
MEI Support for HDCP2.2 Services on Intel platforms.
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 8cf636c54..bd4e3df44 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
new file mode 100644
index 000000000..8db0fcf24
--- /dev/null
+++ b/drivers/misc/mei/platform-vsc.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Interface Linux driver
+ */
+
+#include <linux/align.h>
+#include <linux/cache.h>
+#include <linux/cleanup.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/mei.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+
+#include <asm-generic/bug.h>
+#include <asm-generic/unaligned.h>
+
+#include "mei_dev.h"
+#include "vsc-tp.h"
+
+#define MEI_VSC_DRV_NAME "intel_vsc"
+
+#define MEI_VSC_MAX_MSG_SIZE 512
+
+#define MEI_VSC_POLL_DELAY_US (50 * USEC_PER_MSEC)
+#define MEI_VSC_POLL_TIMEOUT_US (200 * USEC_PER_MSEC)
+
+#define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw))
+
+struct mei_vsc_host_timestamp {
+ u64 realtime;
+ u64 boottime;
+};
+
+struct mei_vsc_hw {
+ struct vsc_tp *tp;
+
+ bool fw_ready;
+ bool host_ready;
+
+ atomic_t write_lock_cnt;
+
+ u32 rx_len;
+ u32 rx_hdr;
+
+ /* buffer for tx */
+ char tx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned;
+ /* buffer for rx */
+ char rx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned;
+};
+
+static int mei_vsc_read_helper(struct mei_vsc_hw *hw, u8 *buf,
+ u32 max_len)
+{
+ struct mei_vsc_host_timestamp ts = {
+ .realtime = ktime_to_ns(ktime_get_real()),
+ .boottime = ktime_to_ns(ktime_get_boottime()),
+ };
+
+ return vsc_tp_xfer(hw->tp, VSC_TP_CMD_READ, &ts, sizeof(ts),
+ buf, max_len);
+}
+
+static int mei_vsc_write_helper(struct mei_vsc_hw *hw, u8 *buf, u32 len)
+{
+ u8 status;
+
+ return vsc_tp_xfer(hw->tp, VSC_TP_CMD_WRITE, buf, len, &status,
+ sizeof(status));
+}
+
+static int mei_vsc_fw_status(struct mei_device *mei_dev,
+ struct mei_fw_status *fw_status)
+{
+ if (!fw_status)
+ return -EINVAL;
+
+ fw_status->count = 0;
+
+ return 0;
+}
+
+static inline enum mei_pg_state mei_vsc_pg_state(struct mei_device *mei_dev)
+{
+ return MEI_PG_OFF;
+}
+
+static void mei_vsc_intr_enable(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ vsc_tp_intr_enable(hw->tp);
+}
+
+static void mei_vsc_intr_disable(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ vsc_tp_intr_disable(hw->tp);
+}
+
+/* mei framework requires this ops */
+static void mei_vsc_intr_clear(struct mei_device *mei_dev)
+{
+}
+
+/* wait for pending irq handler */
+static void mei_vsc_synchronize_irq(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ vsc_tp_intr_synchronize(hw->tp);
+}
+
+static int mei_vsc_hw_config(struct mei_device *mei_dev)
+{
+ return 0;
+}
+
+static bool mei_vsc_host_is_ready(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ return hw->host_ready;
+}
+
+static bool mei_vsc_hw_is_ready(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ return hw->fw_ready;
+}
+
+static int mei_vsc_hw_start(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ int ret, rlen;
+ u8 buf;
+
+ hw->host_ready = true;
+
+ vsc_tp_intr_enable(hw->tp);
+
+ ret = read_poll_timeout(mei_vsc_read_helper, rlen,
+ rlen >= 0, MEI_VSC_POLL_DELAY_US,
+ MEI_VSC_POLL_TIMEOUT_US, true,
+ hw, &buf, sizeof(buf));
+ if (ret) {
+ dev_err(mei_dev->dev, "wait fw ready failed: %d\n", ret);
+ return ret;
+ }
+
+ hw->fw_ready = true;
+
+ return 0;
+}
+
+static bool mei_vsc_hbuf_is_ready(struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ return atomic_read(&hw->write_lock_cnt) == 0;
+}
+
+static int mei_vsc_hbuf_empty_slots(struct mei_device *mei_dev)
+{
+ return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static u32 mei_vsc_hbuf_depth(const struct mei_device *mei_dev)
+{
+ return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static int mei_vsc_write(struct mei_device *mei_dev,
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ char *buf = hw->tx_buf;
+ int ret;
+
+ if (WARN_ON(!hdr || !IS_ALIGNED(hdr_len, 4)))
+ return -EINVAL;
+
+ if (!data || data_len > MEI_VSC_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ atomic_inc(&hw->write_lock_cnt);
+
+ memcpy(buf, hdr, hdr_len);
+ memcpy(buf + hdr_len, data, data_len);
+
+ ret = mei_vsc_write_helper(hw, buf, hdr_len + data_len);
+
+ atomic_dec_if_positive(&hw->write_lock_cnt);
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline u32 mei_vsc_read(const struct mei_device *mei_dev)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ int ret;
+
+ ret = mei_vsc_read_helper(hw, hw->rx_buf, sizeof(hw->rx_buf));
+ if (ret < 0 || ret < sizeof(u32))
+ return 0;
+ hw->rx_len = ret;
+
+ hw->rx_hdr = get_unaligned_le32(hw->rx_buf);
+
+ return hw->rx_hdr;
+}
+
+static int mei_vsc_count_full_read_slots(struct mei_device *mei_dev)
+{
+ return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+static int mei_vsc_read_slots(struct mei_device *mei_dev, unsigned char *buf,
+ unsigned long len)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ struct mei_msg_hdr *hdr;
+
+ hdr = (struct mei_msg_hdr *)&hw->rx_hdr;
+ if (len != hdr->length || hdr->length + sizeof(*hdr) != hw->rx_len)
+ return -EINVAL;
+
+ memcpy(buf, hw->rx_buf + sizeof(*hdr), len);
+
+ return 0;
+}
+
+static bool mei_vsc_pg_in_transition(struct mei_device *mei_dev)
+{
+ return mei_dev->pg_event >= MEI_PG_EVENT_WAIT &&
+ mei_dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
+}
+
+static bool mei_vsc_pg_is_enabled(struct mei_device *mei_dev)
+{
+ return false;
+}
+
+static int mei_vsc_hw_reset(struct mei_device *mei_dev, bool intr_enable)
+{
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ vsc_tp_reset(hw->tp);
+
+ vsc_tp_intr_disable(hw->tp);
+
+ return vsc_tp_init(hw->tp, mei_dev->dev);
+}
+
+static const struct mei_hw_ops mei_vsc_hw_ops = {
+ .fw_status = mei_vsc_fw_status,
+ .pg_state = mei_vsc_pg_state,
+
+ .host_is_ready = mei_vsc_host_is_ready,
+ .hw_is_ready = mei_vsc_hw_is_ready,
+ .hw_reset = mei_vsc_hw_reset,
+ .hw_config = mei_vsc_hw_config,
+ .hw_start = mei_vsc_hw_start,
+
+ .pg_in_transition = mei_vsc_pg_in_transition,
+ .pg_is_enabled = mei_vsc_pg_is_enabled,
+
+ .intr_clear = mei_vsc_intr_clear,
+ .intr_enable = mei_vsc_intr_enable,
+ .intr_disable = mei_vsc_intr_disable,
+ .synchronize_irq = mei_vsc_synchronize_irq,
+
+ .hbuf_free_slots = mei_vsc_hbuf_empty_slots,
+ .hbuf_is_ready = mei_vsc_hbuf_is_ready,
+ .hbuf_depth = mei_vsc_hbuf_depth,
+ .write = mei_vsc_write,
+
+ .rdbuf_full_slots = mei_vsc_count_full_read_slots,
+ .read_hdr = mei_vsc_read,
+ .read = mei_vsc_read_slots,
+};
+
+static void mei_vsc_event_cb(void *context)
+{
+ struct mei_device *mei_dev = context;
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ struct list_head cmpl_list;
+ s32 slots;
+ int ret;
+
+ if (mei_dev->dev_state == MEI_DEV_RESETTING ||
+ mei_dev->dev_state == MEI_DEV_INITIALIZING)
+ return;
+
+ INIT_LIST_HEAD(&cmpl_list);
+
+ guard(mutex)(&mei_dev->device_lock);
+
+ while (vsc_tp_need_read(hw->tp)) {
+ /* check slots available for reading */
+ slots = mei_count_full_read_slots(mei_dev);
+
+ ret = mei_irq_read_handler(mei_dev, &cmpl_list, &slots);
+ if (ret) {
+ if (ret != -ENODATA) {
+ if (mei_dev->dev_state != MEI_DEV_RESETTING &&
+ mei_dev->dev_state != MEI_DEV_POWER_DOWN)
+ schedule_work(&mei_dev->reset_work);
+ }
+
+ return;
+ }
+ }
+
+ mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
+ ret = mei_irq_write_handler(mei_dev, &cmpl_list);
+ if (ret)
+ dev_err(mei_dev->dev, "dispatch write request failed: %d\n", ret);
+
+ mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
+ mei_irq_compl_handler(mei_dev, &cmpl_list);
+}
+
+static int mei_vsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mei_device *mei_dev;
+ struct mei_vsc_hw *hw;
+ struct vsc_tp *tp;
+ int ret;
+
+ tp = *(struct vsc_tp **)dev_get_platdata(dev);
+ if (!tp)
+ return dev_err_probe(dev, -ENODEV, "no platform data\n");
+
+ mei_dev = devm_kzalloc(dev, size_add(sizeof(*mei_dev), sizeof(*hw)),
+ GFP_KERNEL);
+ if (!mei_dev)
+ return -ENOMEM;
+
+ mei_device_init(mei_dev, dev, false, &mei_vsc_hw_ops);
+ mei_dev->fw_f_fw_ver_supported = 0;
+ mei_dev->kind = "ivsc";
+
+ hw = mei_dev_to_vsc_hw(mei_dev);
+ atomic_set(&hw->write_lock_cnt, 0);
+ hw->tp = tp;
+
+ platform_set_drvdata(pdev, mei_dev);
+
+ vsc_tp_register_event_cb(tp, mei_vsc_event_cb, mei_dev);
+
+ ret = mei_start(mei_dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "init hw failed\n");
+ goto err_cancel;
+ }
+
+ ret = mei_register(mei_dev, dev);
+ if (ret)
+ goto err_stop;
+
+ pm_runtime_enable(mei_dev->dev);
+
+ return 0;
+
+err_stop:
+ mei_stop(mei_dev);
+
+err_cancel:
+ mei_cancel_work(mei_dev);
+
+ mei_disable_interrupts(mei_dev);
+
+ return ret;
+}
+
+static int mei_vsc_remove(struct platform_device *pdev)
+{
+ struct mei_device *mei_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(mei_dev->dev);
+
+ mei_stop(mei_dev);
+
+ mei_disable_interrupts(mei_dev);
+
+ mei_deregister(mei_dev);
+
+ return 0;
+}
+
+static int mei_vsc_suspend(struct device *dev)
+{
+ struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+
+ mei_stop(mei_dev);
+
+ mei_disable_interrupts(mei_dev);
+
+ vsc_tp_free_irq(hw->tp);
+
+ return 0;
+}
+
+static int mei_vsc_resume(struct device *dev)
+{
+ struct mei_device *mei_dev = dev_get_drvdata(dev);
+ struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ int ret;
+
+ ret = vsc_tp_request_irq(hw->tp);
+ if (ret)
+ return ret;
+
+ ret = mei_restart(mei_dev);
+ if (ret)
+ goto err_free;
+
+ /* start timer if stopped in suspend */
+ schedule_delayed_work(&mei_dev->timer_work, HZ);
+
+ return 0;
+
+err_free:
+ vsc_tp_free_irq(hw->tp);
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
+
+static const struct platform_device_id mei_vsc_id_table[] = {
+ { MEI_VSC_DRV_NAME },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mei_vsc_id_table);
+
+static struct platform_driver mei_vsc_drv = {
+ .probe = mei_vsc_probe,
+ .remove = mei_vsc_remove,
+ .id_table = mei_vsc_id_table,
+ .driver = {
+ .name = MEI_VSC_DRV_NAME,
+ .pm = &mei_vsc_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(mei_vsc_drv);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Intel Visual Sensing Controller Interface");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(VSC_TP);
diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig
index 4029b96af..e9219b61c 100644
--- a/drivers/misc/mei/pxp/Kconfig
+++ b/drivers/misc/mei/pxp/Kconfig
@@ -1,10 +1,9 @@
-
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2020, Intel Corporation. All rights reserved.
#
config INTEL_MEI_PXP
tristate "Intel PXP services of ME Interface"
- select INTEL_MEI_ME
+ depends on INTEL_MEI_ME
depends on DRM_I915
help
MEI Support for PXP Services on Intel platforms.
diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c
new file mode 100644
index 000000000..ffa4ccd96
--- /dev/null
+++ b/drivers/misc/mei/vsc-fw-loader.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/firmware.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+
+#include <asm-generic/unaligned.h>
+
+#include "vsc-tp.h"
+
+#define VSC_MAGIC_NUM 0x49505343 /* IPSC */
+#define VSC_MAGIC_FW 0x49574653 /* IWFS */
+#define VSC_MAGIC_FILE 0x46564353 /* FVCS */
+
+#define VSC_ADDR_BASE 0xE0030000
+#define VSC_EFUSE_ADDR (VSC_ADDR_BASE + 0x038)
+#define VSC_STRAP_ADDR (VSC_ADDR_BASE + 0x100)
+
+#define VSC_MAINSTEPPING_VERSION_MASK GENMASK(7, 4)
+#define VSC_MAINSTEPPING_VERSION_A 0
+
+#define VSC_SUBSTEPPING_VERSION_MASK GENMASK(3, 0)
+#define VSC_SUBSTEPPING_VERSION_0 0
+#define VSC_SUBSTEPPING_VERSION_1 2
+
+#define VSC_BOOT_IMG_OPTION_MASK GENMASK(15, 0)
+
+#define VSC_SKU_CFG_LOCATION 0x5001A000
+#define VSC_SKU_MAX_SIZE 4100u
+
+#define VSC_ACE_IMG_CNT 2
+#define VSC_CSI_IMG_CNT 4
+#define VSC_IMG_CNT_MAX 6
+
+#define VSC_ROM_PKG_SIZE 256u
+#define VSC_FW_PKG_SIZE 512u
+
+#define VSC_IMAGE_DIR "intel/vsc/"
+
+#define VSC_CSI_IMAGE_NAME VSC_IMAGE_DIR "ivsc_fw.bin"
+#define VSC_ACE_IMAGE_NAME_FMT VSC_IMAGE_DIR "ivsc_pkg_%s_0.bin"
+#define VSC_CFG_IMAGE_NAME_FMT VSC_IMAGE_DIR "ivsc_skucfg_%s_0_1.bin"
+
+#define VSC_IMAGE_PATH_MAX_LEN 64
+
+#define VSC_SENSOR_NAME_MAX_LEN 16
+
+/* command id */
+enum {
+ VSC_CMD_QUERY = 0,
+ VSC_CMD_DL_SET = 1,
+ VSC_CMD_DL_START = 2,
+ VSC_CMD_DL_CONT = 3,
+ VSC_CMD_DUMP_MEM = 4,
+ VSC_CMD_GET_CONT = 8,
+ VSC_CMD_CAM_BOOT = 10,
+};
+
+/* command ack token */
+enum {
+ VSC_TOKEN_BOOTLOADER_REQ = 1,
+ VSC_TOKEN_DUMP_RESP = 4,
+ VSC_TOKEN_ERROR = 7,
+};
+
+/* image type */
+enum {
+ VSC_IMG_BOOTLOADER_TYPE = 1,
+ VSC_IMG_CSI_EM7D_TYPE,
+ VSC_IMG_CSI_SEM_TYPE,
+ VSC_IMG_CSI_RUNTIME_TYPE,
+ VSC_IMG_ACE_VISION_TYPE,
+ VSC_IMG_ACE_CFG_TYPE,
+ VSC_IMG_SKU_CFG_TYPE,
+};
+
+/* image fragments */
+enum {
+ VSC_IMG_BOOTLOADER_FRAG,
+ VSC_IMG_CSI_SEM_FRAG,
+ VSC_IMG_CSI_RUNTIME_FRAG,
+ VSC_IMG_ACE_VISION_FRAG,
+ VSC_IMG_ACE_CFG_FRAG,
+ VSC_IMG_CSI_EM7D_FRAG,
+ VSC_IMG_SKU_CFG_FRAG,
+ VSC_IMG_FRAG_MAX
+};
+
+struct vsc_rom_cmd {
+ __le32 magic;
+ __u8 cmd_id;
+ union {
+ /* download start */
+ struct {
+ __u8 img_type;
+ __le16 option;
+ __le32 img_len;
+ __le32 img_loc;
+ __le32 crc;
+ DECLARE_FLEX_ARRAY(__u8, res);
+ } __packed dl_start;
+ /* download set */
+ struct {
+ __u8 option;
+ __le16 img_cnt;
+ DECLARE_FLEX_ARRAY(__le32, payload);
+ } __packed dl_set;
+ /* download continue */
+ struct {
+ __u8 end_flag;
+ __le16 len;
+ /* 8 is the offset of payload */
+ __u8 payload[VSC_ROM_PKG_SIZE - 8];
+ } __packed dl_cont;
+ /* dump memory */
+ struct {
+ __u8 res;
+ __le16 len;
+ __le32 addr;
+ DECLARE_FLEX_ARRAY(__u8, payload);
+ } __packed dump_mem;
+ /* 5 is the offset of padding */
+ __u8 padding[VSC_ROM_PKG_SIZE - 5];
+ } data;
+};
+
+struct vsc_rom_cmd_ack {
+ __le32 magic;
+ __u8 token;
+ __u8 type;
+ __u8 res[2];
+ __u8 payload[];
+};
+
+struct vsc_fw_cmd {
+ __le32 magic;
+ __u8 cmd_id;
+ union {
+ struct {
+ __le16 option;
+ __u8 img_type;
+ __le32 img_len;
+ __le32 img_loc;
+ __le32 crc;
+ DECLARE_FLEX_ARRAY(__u8, res);
+ } __packed dl_start;
+ struct {
+ __le16 option;
+ __u8 img_cnt;
+ DECLARE_FLEX_ARRAY(__le32, payload);
+ } __packed dl_set;
+ struct {
+ __le32 addr;
+ __u8 len;
+ DECLARE_FLEX_ARRAY(__u8, payload);
+ } __packed dump_mem;
+ struct {
+ __u8 resv[3];
+ __le32 crc;
+ DECLARE_FLEX_ARRAY(__u8, payload);
+ } __packed boot;
+ /* 5 is the offset of padding */
+ __u8 padding[VSC_FW_PKG_SIZE - 5];
+ } data;
+};
+
+struct vsc_img {
+ __le32 magic;
+ __le32 option;
+ __le32 image_count;
+ __le32 image_location[VSC_IMG_CNT_MAX];
+};
+
+struct vsc_fw_sign {
+ __le32 magic;
+ __le32 image_size;
+ __u8 image[];
+};
+
+struct vsc_image_code_data {
+ /* fragment index */
+ u8 frag_index;
+ /* image type */
+ u8 image_type;
+};
+
+struct vsc_img_frag {
+ u8 type;
+ u32 location;
+ const u8 *data;
+ u32 size;
+};
+
+/**
+ * struct vsc_fw_loader - represent vsc firmware loader
+ * @dev: device used to request fimware
+ * @tp: transport layer used with the firmware loader
+ * @csi: CSI image
+ * @ace: ACE image
+ * @cfg: config image
+ * @tx_buf: tx buffer
+ * @rx_buf: rx buffer
+ * @option: command option
+ * @count: total image count
+ * @sensor_name: camera sensor name
+ * @frags: image fragments
+ */
+struct vsc_fw_loader {
+ struct device *dev;
+ struct vsc_tp *tp;
+
+ const struct firmware *csi;
+ const struct firmware *ace;
+ const struct firmware *cfg;
+
+ void *tx_buf;
+ void *rx_buf;
+
+ u16 option;
+ u16 count;
+
+ char sensor_name[VSC_SENSOR_NAME_MAX_LEN];
+
+ struct vsc_img_frag frags[VSC_IMG_FRAG_MAX];
+};
+
+static inline u32 vsc_sum_crc(void *data, size_t size)
+{
+ u32 crc = 0;
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ crc += *((u8 *)data + i);
+
+ return crc;
+}
+
+/* get sensor name to construct image name */
+static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
+ struct device *dev)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
+ union acpi_object obj = {
+ .type = ACPI_TYPE_INTEGER,
+ .integer.value = 1,
+ };
+ struct acpi_object_list arg_list = {
+ .count = 1,
+ .pointer = &obj,
+ };
+ union acpi_object *ret_obj;
+ acpi_handle handle;
+ acpi_status status;
+ int ret = 0;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -EINVAL;
+
+ status = acpi_evaluate_object(handle, "SID", &arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "can't evaluate SID method: %d\n", status);
+ return -ENODEV;
+ }
+
+ ret_obj = buffer.pointer;
+ if (!ret_obj) {
+ dev_err(dev, "can't locate ACPI buffer\n");
+ return -ENODEV;
+ }
+
+ if (ret_obj->type != ACPI_TYPE_STRING) {
+ dev_err(dev, "found non-string entry\n");
+ ret = -ENODEV;
+ goto out_free_buff;
+ }
+
+ /* string length excludes trailing NUL */
+ if (ret_obj->string.length >= sizeof(fw_loader->sensor_name)) {
+ dev_err(dev, "sensor name buffer too small\n");
+ ret = -EINVAL;
+ goto out_free_buff;
+ }
+
+ memcpy(fw_loader->sensor_name, ret_obj->string.pointer,
+ ret_obj->string.length);
+
+ string_lower(fw_loader->sensor_name, fw_loader->sensor_name);
+
+out_free_buff:
+ ACPI_FREE(buffer.pointer);
+
+ return ret;
+}
+
+static int vsc_identify_silicon(struct vsc_fw_loader *fw_loader)
+{
+ struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf;
+ struct vsc_rom_cmd *cmd = fw_loader->tx_buf;
+ u8 version, sub_version;
+ int ret;
+
+ /* identify stepping information */
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DUMP_MEM;
+ cmd->data.dump_mem.addr = cpu_to_le32(VSC_EFUSE_ADDR);
+ cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32));
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token == VSC_TOKEN_ERROR)
+ return -EINVAL;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_GET_CONT;
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token != VSC_TOKEN_DUMP_RESP)
+ return -EINVAL;
+
+ version = FIELD_GET(VSC_MAINSTEPPING_VERSION_MASK, ack->payload[0]);
+ sub_version = FIELD_GET(VSC_SUBSTEPPING_VERSION_MASK, ack->payload[0]);
+
+ if (version != VSC_MAINSTEPPING_VERSION_A)
+ return -EINVAL;
+
+ if (sub_version != VSC_SUBSTEPPING_VERSION_0 &&
+ sub_version != VSC_SUBSTEPPING_VERSION_1)
+ return -EINVAL;
+
+ dev_info(fw_loader->dev, "silicon stepping version is %u:%u\n",
+ version, sub_version);
+
+ /* identify strap information */
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DUMP_MEM;
+ cmd->data.dump_mem.addr = cpu_to_le32(VSC_STRAP_ADDR);
+ cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32));
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token == VSC_TOKEN_ERROR)
+ return -EINVAL;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_GET_CONT;
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token != VSC_TOKEN_DUMP_RESP)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vsc_identify_csi_image(struct vsc_fw_loader *fw_loader)
+{
+ const struct firmware *image;
+ struct vsc_fw_sign *sign;
+ struct vsc_img *img;
+ unsigned int i;
+ int ret;
+
+ ret = request_firmware(&image, VSC_CSI_IMAGE_NAME, fw_loader->dev);
+ if (ret)
+ return ret;
+
+ img = (struct vsc_img *)image->data;
+ if (!img) {
+ ret = -ENOENT;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(img->image_count) != VSC_CSI_IMG_CNT) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+ fw_loader->count += le32_to_cpu(img->image_count) - 1;
+
+ fw_loader->option =
+ FIELD_GET(VSC_BOOT_IMG_OPTION_MASK, le32_to_cpu(img->option));
+
+ sign = (struct vsc_fw_sign *)
+ (img->image_location + le32_to_cpu(img->image_count));
+
+ for (i = 0; i < VSC_CSI_IMG_CNT; i++) {
+ /* mapping from CSI image index to image code data */
+ static const struct vsc_image_code_data csi_image_map[] = {
+ { VSC_IMG_BOOTLOADER_FRAG, VSC_IMG_BOOTLOADER_TYPE },
+ { VSC_IMG_CSI_SEM_FRAG, VSC_IMG_CSI_SEM_TYPE },
+ { VSC_IMG_CSI_RUNTIME_FRAG, VSC_IMG_CSI_RUNTIME_TYPE },
+ { VSC_IMG_CSI_EM7D_FRAG, VSC_IMG_CSI_EM7D_TYPE },
+ };
+ struct vsc_img_frag *frag;
+
+ if ((u8 *)sign + sizeof(*sign) > image->data + image->size) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (!le32_to_cpu(img->image_location[i])) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ frag = &fw_loader->frags[csi_image_map[i].frag_index];
+
+ frag->data = sign->image;
+ frag->size = le32_to_cpu(sign->image_size);
+ frag->location = le32_to_cpu(img->image_location[i]);
+ frag->type = csi_image_map[i].image_type;
+
+ sign = (struct vsc_fw_sign *)
+ (sign->image + le32_to_cpu(sign->image_size));
+ }
+
+ fw_loader->csi = image;
+
+ return 0;
+
+err_release_image:
+ release_firmware(image);
+
+ return ret;
+}
+
+static int vsc_identify_ace_image(struct vsc_fw_loader *fw_loader)
+{
+ char path[VSC_IMAGE_PATH_MAX_LEN];
+ const struct firmware *image;
+ struct vsc_fw_sign *sign;
+ struct vsc_img *img;
+ unsigned int i;
+ int ret;
+
+ snprintf(path, sizeof(path), VSC_ACE_IMAGE_NAME_FMT,
+ fw_loader->sensor_name);
+
+ ret = request_firmware(&image, path, fw_loader->dev);
+ if (ret)
+ return ret;
+
+ img = (struct vsc_img *)image->data;
+ if (!img) {
+ ret = -ENOENT;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(img->image_count) != VSC_ACE_IMG_CNT) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+ fw_loader->count += le32_to_cpu(img->image_count);
+
+ sign = (struct vsc_fw_sign *)
+ (img->image_location + le32_to_cpu(img->image_count));
+
+ for (i = 0; i < VSC_ACE_IMG_CNT; i++) {
+ /* mapping from ACE image index to image code data */
+ static const struct vsc_image_code_data ace_image_map[] = {
+ { VSC_IMG_ACE_VISION_FRAG, VSC_IMG_ACE_VISION_TYPE },
+ { VSC_IMG_ACE_CFG_FRAG, VSC_IMG_ACE_CFG_TYPE },
+ };
+ struct vsc_img_frag *frag, *last_frag;
+ u8 frag_index;
+
+ if ((u8 *)sign + sizeof(*sign) > image->data + image->size) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ frag_index = ace_image_map[i].frag_index;
+ frag = &fw_loader->frags[frag_index];
+
+ frag->data = sign->image;
+ frag->size = le32_to_cpu(sign->image_size);
+ frag->location = le32_to_cpu(img->image_location[i]);
+ frag->type = ace_image_map[i].image_type;
+
+ if (!frag->location) {
+ last_frag = &fw_loader->frags[frag_index - 1];
+ frag->location =
+ ALIGN(last_frag->location + last_frag->size, SZ_4K);
+ }
+
+ sign = (struct vsc_fw_sign *)
+ (sign->image + le32_to_cpu(sign->image_size));
+ }
+
+ fw_loader->ace = image;
+
+ return 0;
+
+err_release_image:
+ release_firmware(image);
+
+ return ret;
+}
+
+static int vsc_identify_cfg_image(struct vsc_fw_loader *fw_loader)
+{
+ struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_SKU_CFG_FRAG];
+ char path[VSC_IMAGE_PATH_MAX_LEN];
+ const struct firmware *image;
+ u32 size;
+ int ret;
+
+ snprintf(path, sizeof(path), VSC_CFG_IMAGE_NAME_FMT,
+ fw_loader->sensor_name);
+
+ ret = request_firmware(&image, path, fw_loader->dev);
+ if (ret)
+ return ret;
+
+ /* identify image size */
+ if (image->size <= sizeof(u32) || image->size > VSC_SKU_MAX_SIZE) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ size = le32_to_cpu(*((__le32 *)image->data)) + sizeof(u32);
+ if (image->size != size) {
+ ret = -EINVAL;
+ goto err_release_image;
+ }
+
+ frag->data = image->data;
+ frag->size = image->size;
+ frag->type = VSC_IMG_SKU_CFG_TYPE;
+ frag->location = VSC_SKU_CFG_LOCATION;
+
+ fw_loader->cfg = image;
+
+ return 0;
+
+err_release_image:
+ release_firmware(image);
+
+ return ret;
+}
+
+static int vsc_download_bootloader(struct vsc_fw_loader *fw_loader)
+{
+ struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_BOOTLOADER_FRAG];
+ struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf;
+ struct vsc_rom_cmd *cmd = fw_loader->tx_buf;
+ u32 len, c_len;
+ size_t remain;
+ const u8 *p;
+ int ret;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_QUERY;
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+ if (ack->token != VSC_TOKEN_DUMP_RESP &&
+ ack->token != VSC_TOKEN_BOOTLOADER_REQ)
+ return -EINVAL;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DL_START;
+ cmd->data.dl_start.option = cpu_to_le16(fw_loader->option);
+ cmd->data.dl_start.img_type = frag->type;
+ cmd->data.dl_start.img_len = cpu_to_le32(frag->size);
+ cmd->data.dl_start.img_loc = cpu_to_le32(frag->location);
+
+ c_len = offsetof(struct vsc_rom_cmd, data.dl_start.crc);
+ cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ p = frag->data;
+ remain = frag->size;
+
+ /* download image data */
+ while (remain > 0) {
+ len = min(remain, sizeof(cmd->data.dl_cont.payload));
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DL_CONT;
+ cmd->data.dl_cont.len = cpu_to_le16(len);
+ cmd->data.dl_cont.end_flag = remain == len;
+ memcpy(cmd->data.dl_cont.payload, p, len);
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ p += len;
+ remain -= len;
+ }
+
+ return 0;
+}
+
+static int vsc_download_firmware(struct vsc_fw_loader *fw_loader)
+{
+ struct vsc_fw_cmd *cmd = fw_loader->tx_buf;
+ unsigned int i, index = 0;
+ u32 c_len;
+ int ret;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DL_SET;
+ cmd->data.dl_set.img_cnt = cpu_to_le16(fw_loader->count);
+ put_unaligned_le16(fw_loader->option, &cmd->data.dl_set.option);
+
+ for (i = VSC_IMG_CSI_SEM_FRAG; i <= VSC_IMG_CSI_EM7D_FRAG; i++) {
+ struct vsc_img_frag *frag = &fw_loader->frags[i];
+
+ cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->location);
+ cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->size);
+ }
+
+ c_len = offsetof(struct vsc_fw_cmd, data.dl_set.payload[index]);
+ cmd->data.dl_set.payload[index] = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ for (i = VSC_IMG_CSI_SEM_FRAG; i < VSC_IMG_FRAG_MAX; i++) {
+ struct vsc_img_frag *frag = &fw_loader->frags[i];
+ const u8 *p;
+ u32 remain;
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_DL_START;
+ cmd->data.dl_start.img_type = frag->type;
+ cmd->data.dl_start.img_len = cpu_to_le32(frag->size);
+ cmd->data.dl_start.img_loc = cpu_to_le32(frag->location);
+ put_unaligned_le16(fw_loader->option, &cmd->data.dl_start.option);
+
+ c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc);
+ cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ p = frag->data;
+ remain = frag->size;
+
+ /* download image data */
+ while (remain > 0) {
+ u32 len = min(remain, VSC_FW_PKG_SIZE);
+
+ memcpy(fw_loader->tx_buf, p, len);
+ memset(fw_loader->tx_buf + len, 0, VSC_FW_PKG_SIZE - len);
+
+ ret = vsc_tp_rom_xfer(fw_loader->tp, fw_loader->tx_buf,
+ NULL, VSC_FW_PKG_SIZE);
+ if (ret)
+ break;
+
+ p += len;
+ remain -= len;
+ }
+ }
+
+ cmd->magic = cpu_to_le32(VSC_MAGIC_NUM);
+ cmd->cmd_id = VSC_CMD_CAM_BOOT;
+
+ c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc);
+ cmd->data.boot.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len));
+
+ return vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE);
+}
+
+/**
+ * vsc_tp_init - init vsc_tp
+ * @tp: vsc_tp device handle
+ * @dev: device node for mei vsc device
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_init(struct vsc_tp *tp, struct device *dev)
+{
+ struct vsc_fw_loader *fw_loader __free(kfree) = NULL;
+ void *tx_buf __free(kfree) = NULL;
+ void *rx_buf __free(kfree) = NULL;
+ int ret;
+
+ fw_loader = kzalloc(sizeof(*fw_loader), GFP_KERNEL);
+ if (!fw_loader)
+ return -ENOMEM;
+
+ tx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL);
+ if (!tx_buf)
+ return -ENOMEM;
+
+ rx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
+
+ fw_loader->tx_buf = tx_buf;
+ fw_loader->rx_buf = rx_buf;
+
+ fw_loader->tp = tp;
+ fw_loader->dev = dev;
+
+ ret = vsc_get_sensor_name(fw_loader, dev);
+ if (ret)
+ return ret;
+
+ ret = vsc_identify_silicon(fw_loader);
+ if (ret)
+ return ret;
+
+ ret = vsc_identify_csi_image(fw_loader);
+ if (ret)
+ return ret;
+
+ ret = vsc_identify_ace_image(fw_loader);
+ if (ret)
+ goto err_release_csi;
+
+ ret = vsc_identify_cfg_image(fw_loader);
+ if (ret)
+ goto err_release_ace;
+
+ ret = vsc_download_bootloader(fw_loader);
+ if (!ret)
+ ret = vsc_download_firmware(fw_loader);
+
+ release_firmware(fw_loader->cfg);
+
+err_release_ace:
+ release_firmware(fw_loader->ace);
+
+err_release_csi:
+ release_firmware(fw_loader->csi);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_init, VSC_TP);
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
new file mode 100644
index 000000000..870c70ef3
--- /dev/null
+++ b/drivers/misc/mei/vsc-tp.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#include <linux/acpi.h>
+#include <linux/cleanup.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "vsc-tp.h"
+
+#define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS 20
+#define VSC_TP_ROM_BOOTUP_DELAY_MS 10
+#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define VSC_TP_ROM_XFER_POLL_DELAY_US (20 * USEC_PER_MSEC)
+#define VSC_TP_WAIT_FW_POLL_TIMEOUT (2 * HZ)
+#define VSC_TP_WAIT_FW_POLL_DELAY_US (20 * USEC_PER_MSEC)
+#define VSC_TP_MAX_XFER_COUNT 5
+
+#define VSC_TP_PACKET_SYNC 0x31
+#define VSC_TP_CRC_SIZE sizeof(u32)
+#define VSC_TP_MAX_MSG_SIZE 2048
+/* SPI xfer timeout size */
+#define VSC_TP_XFER_TIMEOUT_BYTES 700
+#define VSC_TP_PACKET_PADDING_SIZE 1
+#define VSC_TP_PACKET_SIZE(pkt) \
+ (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
+#define VSC_TP_MAX_PACKET_SIZE \
+ (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
+#define VSC_TP_MAX_XFER_SIZE \
+ (VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
+#define VSC_TP_NEXT_XFER_LEN(len, offset) \
+ (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
+
+struct vsc_tp_packet {
+ __u8 sync;
+ __u8 cmd;
+ __le16 len;
+ __le32 seq;
+ __u8 buf[] __counted_by(len);
+};
+
+struct vsc_tp {
+ /* do the actual data transfer */
+ struct spi_device *spi;
+
+ /* bind with mei framework */
+ struct platform_device *pdev;
+
+ struct gpio_desc *wakeuphost;
+ struct gpio_desc *resetfw;
+ struct gpio_desc *wakeupfw;
+
+ /* command sequence number */
+ u32 seq;
+
+ /* command buffer */
+ void *tx_buf;
+ void *rx_buf;
+
+ atomic_t assert_cnt;
+ wait_queue_head_t xfer_wait;
+
+ vsc_tp_event_cb_t event_notify;
+ void *event_notify_context;
+
+ /* used to protect command download */
+ struct mutex mutex;
+};
+
+/* GPIO resources */
+static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
+static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
+static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
+static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
+
+static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
+ { "wakeuphost-gpios", &wakeuphost_gpio, 1 },
+ { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
+ { "resetfw-gpios", &resetfw_gpio, 1 },
+ { "wakeupfw-gpios", &wakeupfw, 1 },
+ {}
+};
+
+static irqreturn_t vsc_tp_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ atomic_inc(&tp->assert_cnt);
+
+ wake_up(&tp->xfer_wait);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
+{
+ struct vsc_tp *tp = data;
+
+ if (tp->event_notify)
+ tp->event_notify(tp->event_notify_context);
+
+ return IRQ_HANDLED;
+}
+
+/* wakeup firmware and wait for response */
+static int vsc_tp_wakeup_request(struct vsc_tp *tp)
+{
+ int ret;
+
+ gpiod_set_value_cansleep(tp->wakeupfw, 0);
+
+ ret = wait_event_timeout(tp->xfer_wait,
+ atomic_read(&tp->assert_cnt),
+ VSC_TP_WAIT_FW_POLL_TIMEOUT);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
+ VSC_TP_WAIT_FW_POLL_DELAY_US,
+ VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
+ tp->wakeuphost);
+}
+
+static void vsc_tp_wakeup_release(struct vsc_tp *tp)
+{
+ atomic_dec_if_positive(&tp->assert_cnt);
+
+ gpiod_set_value_cansleep(tp->wakeupfw, 1);
+}
+
+static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
+{
+ struct spi_message msg = { 0 };
+ struct spi_transfer xfer = {
+ .tx_buf = obuf,
+ .rx_buf = ibuf,
+ .len = len,
+ };
+
+ spi_message_init_with_transfers(&msg, &xfer, 1);
+
+ return spi_sync_locked(tp->spi, &msg);
+}
+
+static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
+ void *ibuf, u16 ilen)
+{
+ int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
+ int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
+ u8 *src, *crc_src, *rx_buf = tp->rx_buf;
+ int count_down = VSC_TP_MAX_XFER_COUNT;
+ u32 recv_crc = 0, crc = ~0;
+ struct vsc_tp_packet ack;
+ u8 *dst = (u8 *)&ack;
+ bool synced = false;
+
+ do {
+ ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len);
+ if (ret)
+ return ret;
+ memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
+
+ if (synced) {
+ src = rx_buf;
+ src_len = next_xfer_len;
+ } else {
+ src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len);
+ if (!src)
+ continue;
+ synced = true;
+ src_len = next_xfer_len - (src - rx_buf);
+ }
+
+ /* traverse received data */
+ while (src_len > 0) {
+ cpy_len = min(src_len, dst_len);
+ memcpy(dst, src, cpy_len);
+ crc_src = src;
+ src += cpy_len;
+ src_len -= cpy_len;
+ dst += cpy_len;
+ dst_len -= cpy_len;
+
+ if (offset < sizeof(ack)) {
+ offset += cpy_len;
+ crc = crc32(crc, crc_src, cpy_len);
+
+ if (!src_len)
+ continue;
+
+ if (le16_to_cpu(ack.len)) {
+ dst = ibuf;
+ dst_len = min(ilen, le16_to_cpu(ack.len));
+ } else {
+ dst = (u8 *)&recv_crc;
+ dst_len = sizeof(recv_crc);
+ }
+ } else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
+ offset += cpy_len;
+ crc = crc32(crc, crc_src, cpy_len);
+
+ if (src_len) {
+ int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
+
+ cpy_len = min(src_len, remain);
+ offset += cpy_len;
+ crc = crc32(crc, src, cpy_len);
+ src += cpy_len;
+ src_len -= cpy_len;
+ if (src_len) {
+ dst = (u8 *)&recv_crc;
+ dst_len = sizeof(recv_crc);
+ continue;
+ }
+ }
+ next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
+ } else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
+ offset += cpy_len;
+
+ if (src_len) {
+ /* terminate the traverse */
+ next_xfer_len = 0;
+ break;
+ }
+ next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
+ }
+ }
+ } while (next_xfer_len > 0 && --count_down);
+
+ if (next_xfer_len > 0)
+ return -EAGAIN;
+
+ if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
+ dev_err(&tp->spi->dev, "recv crc or seq error\n");
+ return -EINVAL;
+ }
+
+ if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
+ ack.cmd == VSC_TP_CMD_BUSY) {
+ dev_err(&tp->spi->dev, "recv cmd ack error\n");
+ return -EAGAIN;
+ }
+
+ return min(le16_to_cpu(ack.len), ilen);
+}
+
+/**
+ * vsc_tp_xfer - transfer data to firmware
+ * @tp: vsc_tp device handle
+ * @cmd: the command to be sent to the device
+ * @obuf: the tx buffer to be sent to the device
+ * @olen: the length of tx buffer
+ * @ibuf: the rx buffer to receive from the device
+ * @ilen: the length of rx buffer
+ * Return: the length of received data in case of success,
+ * otherwise negative value
+ */
+int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
+ void *ibuf, size_t ilen)
+{
+ struct vsc_tp_packet *pkt = tp->tx_buf;
+ u32 crc;
+ int ret;
+
+ if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ guard(mutex)(&tp->mutex);
+
+ pkt->sync = VSC_TP_PACKET_SYNC;
+ pkt->cmd = cmd;
+ pkt->len = cpu_to_le16(olen);
+ pkt->seq = cpu_to_le32(++tp->seq);
+ memcpy(pkt->buf, obuf, olen);
+
+ crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
+ memcpy(pkt->buf + olen, &crc, sizeof(crc));
+
+ ret = vsc_tp_wakeup_request(tp);
+ if (unlikely(ret))
+ dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
+ else
+ ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
+
+ vsc_tp_wakeup_release(tp);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP);
+
+/**
+ * vsc_tp_rom_xfer - transfer data to rom code
+ * @tp: vsc_tp device handle
+ * @obuf: the data buffer to be sent to the device
+ * @ibuf: the buffer to receive data from the device
+ * @len: the length of tx buffer and rx buffer
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
+{
+ size_t words = len / sizeof(__be32);
+ int ret;
+
+ if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ guard(mutex)(&tp->mutex);
+
+ /* rom xfer is big endian */
+ cpu_to_be32_array(tp->tx_buf, obuf, words);
+
+ ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
+ !ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
+ VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
+ tp->wakeuphost);
+ if (ret) {
+ dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
+ return ret;
+ }
+
+ ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len);
+ if (ret)
+ return ret;
+
+ if (ibuf)
+ cpu_to_be32_array(ibuf, tp->rx_buf, words);
+
+ return ret;
+}
+
+/**
+ * vsc_tp_reset - reset vsc transport layer
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_reset(struct vsc_tp *tp)
+{
+ disable_irq(tp->spi->irq);
+
+ /* toggle reset pin */
+ gpiod_set_value_cansleep(tp->resetfw, 0);
+ msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
+ gpiod_set_value_cansleep(tp->resetfw, 1);
+
+ /* wait for ROM */
+ msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
+
+ /*
+ * Set default host wakeup pin to non-active
+ * to avoid unexpected host irq interrupt.
+ */
+ gpiod_set_value_cansleep(tp->wakeupfw, 1);
+
+ atomic_set(&tp->assert_cnt, 0);
+
+ enable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP);
+
+/**
+ * vsc_tp_need_read - check if device has data to sent
+ * @tp: vsc_tp device handle
+ * Return: true if device has data to sent, otherwise false
+ */
+bool vsc_tp_need_read(struct vsc_tp *tp)
+{
+ if (!atomic_read(&tp->assert_cnt))
+ return false;
+ if (!gpiod_get_value_cansleep(tp->wakeuphost))
+ return false;
+ if (!gpiod_get_value_cansleep(tp->wakeupfw))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
+
+/**
+ * vsc_tp_register_event_cb - register a callback function to receive event
+ * @tp: vsc_tp device handle
+ * @event_cb: callback function
+ * @context: execution context of event callback
+ * Return: 0 in case of success, negative value in case of error
+ */
+int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
+ void *context)
+{
+ tp->event_notify = event_cb;
+ tp->event_notify_context = context;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
+
+/**
+ * vsc_tp_request_irq - request irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+int vsc_tp_request_irq(struct vsc_tp *tp)
+{
+ struct spi_device *spi = tp->spi;
+ struct device *dev = &spi->dev;
+ int ret;
+
+ irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
+
+/**
+ * vsc_tp_free_irq - free irq for vsc_tp device
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_free_irq(struct vsc_tp *tp)
+{
+ free_irq(tp->spi->irq, tp);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
+
+/**
+ * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_synchronize(struct vsc_tp *tp)
+{
+ synchronize_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP);
+
+/**
+ * vsc_tp_intr_enable - enable vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_enable(struct vsc_tp *tp)
+{
+ enable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP);
+
+/**
+ * vsc_tp_intr_disable - disable vsc_tp interrupt
+ * @tp: vsc_tp device handle
+ */
+void vsc_tp_intr_disable(struct vsc_tp *tp)
+{
+ disable_irq(tp->spi->irq);
+}
+EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
+
+static int vsc_tp_match_any(struct acpi_device *adev, void *data)
+{
+ struct acpi_device **__adev = data;
+
+ *__adev = adev;
+
+ return 1;
+}
+
+static int vsc_tp_probe(struct spi_device *spi)
+{
+ struct platform_device_info pinfo = { 0 };
+ struct device *dev = &spi->dev;
+ struct platform_device *pdev;
+ struct acpi_device *adev;
+ struct vsc_tp *tp;
+ int ret;
+
+ tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
+ if (!tp)
+ return -ENOMEM;
+
+ tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
+ if (!tp->tx_buf)
+ return -ENOMEM;
+
+ tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
+ if (!tp->rx_buf)
+ return -ENOMEM;
+
+ ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios);
+ if (ret)
+ return ret;
+
+ tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
+ if (IS_ERR(tp->wakeuphost))
+ return PTR_ERR(tp->wakeuphost);
+
+ tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH);
+ if (IS_ERR(tp->resetfw))
+ return PTR_ERR(tp->resetfw);
+
+ tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH);
+ if (IS_ERR(tp->wakeupfw))
+ return PTR_ERR(tp->wakeupfw);
+
+ atomic_set(&tp->assert_cnt, 0);
+ init_waitqueue_head(&tp->xfer_wait);
+ tp->spi = spi;
+
+ irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(spi->irq, vsc_tp_isr, vsc_tp_thread_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), tp);
+ if (ret)
+ return ret;
+
+ mutex_init(&tp->mutex);
+
+ /* only one child acpi device */
+ ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
+ vsc_tp_match_any, &adev);
+ if (!ret) {
+ ret = -ENODEV;
+ goto err_destroy_lock;
+ }
+ pinfo.fwnode = acpi_fwnode_handle(adev);
+
+ pinfo.name = "intel_vsc";
+ pinfo.data = &tp;
+ pinfo.size_data = sizeof(tp);
+ pinfo.id = PLATFORM_DEVID_NONE;
+
+ pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto err_destroy_lock;
+ }
+
+ tp->pdev = pdev;
+ spi_set_drvdata(spi, tp);
+
+ return 0;
+
+err_destroy_lock:
+ mutex_destroy(&tp->mutex);
+
+ free_irq(spi->irq, tp);
+
+ return ret;
+}
+
+static void vsc_tp_remove(struct spi_device *spi)
+{
+ struct vsc_tp *tp = spi_get_drvdata(spi);
+
+ platform_device_unregister(tp->pdev);
+
+ mutex_destroy(&tp->mutex);
+
+ free_irq(spi->irq, tp);
+}
+
+static const struct acpi_device_id vsc_tp_acpi_ids[] = {
+ { "INTC1009" }, /* Raptor Lake */
+ { "INTC1058" }, /* Tiger Lake */
+ { "INTC1094" }, /* Alder Lake */
+ { "INTC10D0" }, /* Meteor Lake */
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
+
+static struct spi_driver vsc_tp_driver = {
+ .probe = vsc_tp_probe,
+ .remove = vsc_tp_remove,
+ .driver = {
+ .name = "vsc-tp",
+ .acpi_match_table = vsc_tp_acpi_ids,
+ },
+};
+module_spi_driver(vsc_tp_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h
new file mode 100644
index 000000000..14ca195cb
--- /dev/null
+++ b/drivers/misc/mei/vsc-tp.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Intel Corporation.
+ * Intel Visual Sensing Controller Transport Layer Linux driver
+ */
+
+#ifndef _VSC_TP_H_
+#define _VSC_TP_H_
+
+#include <linux/types.h>
+
+#define VSC_TP_CMD_WRITE 0x01
+#define VSC_TP_CMD_READ 0x02
+
+#define VSC_TP_CMD_ACK 0x10
+#define VSC_TP_CMD_NACK 0x11
+#define VSC_TP_CMD_BUSY 0x12
+
+struct vsc_tp;
+
+/**
+ * typedef vsc_event_cb_t - event callback function signature
+ * @context: the execution context of who registered this callback
+ *
+ * The callback function is called in interrupt context and the data
+ * payload is only valid during the call. If the user needs access
+ * the data payload later, it must copy the payload.
+ */
+typedef void (*vsc_tp_event_cb_t)(void *context);
+
+int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf,
+ size_t len);
+
+int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
+ void *ibuf, size_t ilen);
+
+int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
+ void *context);
+
+int vsc_tp_request_irq(struct vsc_tp *tp);
+void vsc_tp_free_irq(struct vsc_tp *tp);
+
+void vsc_tp_intr_enable(struct vsc_tp *tp);
+void vsc_tp_intr_disable(struct vsc_tp *tp);
+void vsc_tp_intr_synchronize(struct vsc_tp *tp);
+
+void vsc_tp_reset(struct vsc_tp *tp);
+
+bool vsc_tp_need_read(struct vsc_tp *tp);
+
+int vsc_tp_init(struct vsc_tp *tp, struct device *dev);
+
+#endif
diff --git a/drivers/misc/nsm.c b/drivers/misc/nsm.c
new file mode 100644
index 000000000..0eaa3b448
--- /dev/null
+++ b/drivers/misc/nsm.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Amazon Nitro Secure Module driver.
+ *
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * The Nitro Secure Module implements commands via CBOR over virtio.
+ * This driver exposes a raw message ioctls on /dev/nsm that user
+ * space can use to issue these commands.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio.h>
+#include <linux/wait.h>
+#include <uapi/linux/nsm.h>
+
+/* Timeout for NSM virtqueue respose in milliseconds. */
+#define NSM_DEFAULT_TIMEOUT_MSECS (120000) /* 2 minutes */
+
+/* Maximum length input data */
+struct nsm_data_req {
+ u32 len;
+ u8 data[NSM_REQUEST_MAX_SIZE];
+};
+
+/* Maximum length output data */
+struct nsm_data_resp {
+ u32 len;
+ u8 data[NSM_RESPONSE_MAX_SIZE];
+};
+
+/* Full NSM request/response message */
+struct nsm_msg {
+ struct nsm_data_req req;
+ struct nsm_data_resp resp;
+};
+
+struct nsm {
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+ struct mutex lock;
+ struct completion cmd_done;
+ struct miscdevice misc;
+ struct hwrng hwrng;
+ struct work_struct misc_init;
+ struct nsm_msg msg;
+};
+
+/* NSM device ID */
+static const struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_NITRO_SEC_MOD, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct nsm *file_to_nsm(struct file *file)
+{
+ return container_of(file->private_data, struct nsm, misc);
+}
+
+static struct nsm *hwrng_to_nsm(struct hwrng *rng)
+{
+ return container_of(rng, struct nsm, hwrng);
+}
+
+#define CBOR_TYPE_MASK 0xE0
+#define CBOR_TYPE_MAP 0xA0
+#define CBOR_TYPE_TEXT 0x60
+#define CBOR_TYPE_ARRAY 0x40
+#define CBOR_HEADER_SIZE_SHORT 1
+
+#define CBOR_SHORT_SIZE_MAX_VALUE 23
+#define CBOR_LONG_SIZE_U8 24
+#define CBOR_LONG_SIZE_U16 25
+#define CBOR_LONG_SIZE_U32 26
+#define CBOR_LONG_SIZE_U64 27
+
+static bool cbor_object_is_array(const u8 *cbor_object, size_t cbor_object_size)
+{
+ if (cbor_object_size == 0 || cbor_object == NULL)
+ return false;
+
+ return (cbor_object[0] & CBOR_TYPE_MASK) == CBOR_TYPE_ARRAY;
+}
+
+static int cbor_object_get_array(u8 *cbor_object, size_t cbor_object_size, u8 **cbor_array)
+{
+ u8 cbor_short_size;
+ void *array_len_p;
+ u64 array_len;
+ u64 array_offset;
+
+ if (!cbor_object_is_array(cbor_object, cbor_object_size))
+ return -EFAULT;
+
+ cbor_short_size = (cbor_object[0] & 0x1F);
+
+ /* Decoding byte array length */
+ array_offset = CBOR_HEADER_SIZE_SHORT;
+ if (cbor_short_size >= CBOR_LONG_SIZE_U8)
+ array_offset += BIT(cbor_short_size - CBOR_LONG_SIZE_U8);
+
+ if (cbor_object_size < array_offset)
+ return -EFAULT;
+
+ array_len_p = &cbor_object[1];
+
+ switch (cbor_short_size) {
+ case CBOR_SHORT_SIZE_MAX_VALUE: /* short encoding */
+ array_len = cbor_short_size;
+ break;
+ case CBOR_LONG_SIZE_U8:
+ array_len = *(u8 *)array_len_p;
+ break;
+ case CBOR_LONG_SIZE_U16:
+ array_len = be16_to_cpup((__be16 *)array_len_p);
+ break;
+ case CBOR_LONG_SIZE_U32:
+ array_len = be32_to_cpup((__be32 *)array_len_p);
+ break;
+ case CBOR_LONG_SIZE_U64:
+ array_len = be64_to_cpup((__be64 *)array_len_p);
+ break;
+ }
+
+ if (cbor_object_size < array_offset)
+ return -EFAULT;
+
+ if (cbor_object_size - array_offset < array_len)
+ return -EFAULT;
+
+ if (array_len > INT_MAX)
+ return -EFAULT;
+
+ *cbor_array = cbor_object + array_offset;
+ return array_len;
+}
+
+/* Copy the request of a raw message to kernel space */
+static int fill_req_raw(struct nsm *nsm, struct nsm_data_req *req,
+ struct nsm_raw *raw)
+{
+ /* Verify the user input size. */
+ if (raw->request.len > sizeof(req->data))
+ return -EMSGSIZE;
+
+ /* Copy the request payload */
+ if (copy_from_user(req->data, u64_to_user_ptr(raw->request.addr),
+ raw->request.len))
+ return -EFAULT;
+
+ req->len = raw->request.len;
+
+ return 0;
+}
+
+/* Copy the response of a raw message back to user-space */
+static int parse_resp_raw(struct nsm *nsm, struct nsm_data_resp *resp,
+ struct nsm_raw *raw)
+{
+ /* Truncate any message that does not fit. */
+ raw->response.len = min_t(u64, raw->response.len, resp->len);
+
+ /* Copy the response content to user space */
+ if (copy_to_user(u64_to_user_ptr(raw->response.addr),
+ resp->data, raw->response.len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Virtqueue interrupt handler */
+static void nsm_vq_callback(struct virtqueue *vq)
+{
+ struct nsm *nsm = vq->vdev->priv;
+
+ complete(&nsm->cmd_done);
+}
+
+/* Forward a message to the NSM device and wait for the response from it */
+static int nsm_sendrecv_msg_locked(struct nsm *nsm)
+{
+ struct device *dev = &nsm->vdev->dev;
+ struct scatterlist sg_in, sg_out;
+ struct nsm_msg *msg = &nsm->msg;
+ struct virtqueue *vq = nsm->vq;
+ unsigned int len;
+ void *queue_buf;
+ bool kicked;
+ int rc;
+
+ /* Initialize scatter-gather lists with request and response buffers. */
+ sg_init_one(&sg_out, msg->req.data, msg->req.len);
+ sg_init_one(&sg_in, msg->resp.data, sizeof(msg->resp.data));
+
+ init_completion(&nsm->cmd_done);
+ /* Add the request buffer (read by the device). */
+ rc = virtqueue_add_outbuf(vq, &sg_out, 1, msg->req.data, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ /* Add the response buffer (written by the device). */
+ rc = virtqueue_add_inbuf(vq, &sg_in, 1, msg->resp.data, GFP_KERNEL);
+ if (rc)
+ goto cleanup;
+
+ kicked = virtqueue_kick(vq);
+ if (!kicked) {
+ /* Cannot kick the virtqueue. */
+ rc = -EIO;
+ goto cleanup;
+ }
+
+ /* If the kick succeeded, wait for the device's response. */
+ if (!wait_for_completion_io_timeout(&nsm->cmd_done,
+ msecs_to_jiffies(NSM_DEFAULT_TIMEOUT_MSECS))) {
+ rc = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ queue_buf = virtqueue_get_buf(vq, &len);
+ if (!queue_buf || (queue_buf != msg->req.data)) {
+ dev_err(dev, "wrong request buffer.");
+ rc = -ENODATA;
+ goto cleanup;
+ }
+
+ queue_buf = virtqueue_get_buf(vq, &len);
+ if (!queue_buf || (queue_buf != msg->resp.data)) {
+ dev_err(dev, "wrong response buffer.");
+ rc = -ENODATA;
+ goto cleanup;
+ }
+
+ msg->resp.len = len;
+
+ rc = 0;
+
+cleanup:
+ if (rc) {
+ /* Clean the virtqueue. */
+ while (virtqueue_get_buf(vq, &len) != NULL)
+ ;
+ }
+
+ return rc;
+}
+
+static int fill_req_get_random(struct nsm *nsm, struct nsm_data_req *req)
+{
+ /*
+ * 69 # text(9)
+ * 47657452616E646F6D # "GetRandom"
+ */
+ const u8 request[] = { CBOR_TYPE_TEXT + strlen("GetRandom"),
+ 'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm' };
+
+ memcpy(req->data, request, sizeof(request));
+ req->len = sizeof(request);
+
+ return 0;
+}
+
+static int parse_resp_get_random(struct nsm *nsm, struct nsm_data_resp *resp,
+ void *out, size_t max)
+{
+ /*
+ * A1 # map(1)
+ * 69 # text(9) - Name of field
+ * 47657452616E646F6D # "GetRandom"
+ * A1 # map(1) - The field itself
+ * 66 # text(6)
+ * 72616E646F6D # "random"
+ * # The rest of the response is random data
+ */
+ const u8 response[] = { CBOR_TYPE_MAP + 1,
+ CBOR_TYPE_TEXT + strlen("GetRandom"),
+ 'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm',
+ CBOR_TYPE_MAP + 1,
+ CBOR_TYPE_TEXT + strlen("random"),
+ 'r', 'a', 'n', 'd', 'o', 'm' };
+ struct device *dev = &nsm->vdev->dev;
+ u8 *rand_data = NULL;
+ u8 *resp_ptr = resp->data;
+ u64 resp_len = resp->len;
+ int rc;
+
+ if ((resp->len < sizeof(response) + 1) ||
+ (memcmp(resp_ptr, response, sizeof(response)) != 0)) {
+ dev_err(dev, "Invalid response for GetRandom");
+ return -EFAULT;
+ }
+
+ resp_ptr += sizeof(response);
+ resp_len -= sizeof(response);
+
+ rc = cbor_object_get_array(resp_ptr, resp_len, &rand_data);
+ if (rc < 0) {
+ dev_err(dev, "GetRandom: Invalid CBOR encoding\n");
+ return rc;
+ }
+
+ rc = min_t(size_t, rc, max);
+ memcpy(out, rand_data, rc);
+
+ return rc;
+}
+
+/*
+ * HwRNG implementation
+ */
+static int nsm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct nsm *nsm = hwrng_to_nsm(rng);
+ struct device *dev = &nsm->vdev->dev;
+ int rc = 0;
+
+ /* NSM always needs to wait for a response */
+ if (!wait)
+ return 0;
+
+ mutex_lock(&nsm->lock);
+
+ rc = fill_req_get_random(nsm, &nsm->msg.req);
+ if (rc != 0)
+ goto out;
+
+ rc = nsm_sendrecv_msg_locked(nsm);
+ if (rc != 0)
+ goto out;
+
+ rc = parse_resp_get_random(nsm, &nsm->msg.resp, data, max);
+ if (rc < 0)
+ goto out;
+
+ dev_dbg(dev, "RNG: returning rand bytes = %d", rc);
+out:
+ mutex_unlock(&nsm->lock);
+ return rc;
+}
+
+static long nsm_dev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = u64_to_user_ptr((u64)arg);
+ struct nsm *nsm = file_to_nsm(file);
+ struct nsm_raw raw;
+ int r = 0;
+
+ if (cmd != NSM_IOCTL_RAW)
+ return -EINVAL;
+
+ if (_IOC_SIZE(cmd) != sizeof(raw))
+ return -EINVAL;
+
+ /* Copy user argument struct to kernel argument struct */
+ r = -EFAULT;
+ if (copy_from_user(&raw, argp, _IOC_SIZE(cmd)))
+ goto out;
+
+ mutex_lock(&nsm->lock);
+
+ /* Convert kernel argument struct to device request */
+ r = fill_req_raw(nsm, &nsm->msg.req, &raw);
+ if (r)
+ goto out;
+
+ /* Send message to NSM and read reply */
+ r = nsm_sendrecv_msg_locked(nsm);
+ if (r)
+ goto out;
+
+ /* Parse device response into kernel argument struct */
+ r = parse_resp_raw(nsm, &nsm->msg.resp, &raw);
+ if (r)
+ goto out;
+
+ /* Copy kernel argument struct back to user argument struct */
+ r = -EFAULT;
+ if (copy_to_user(argp, &raw, sizeof(raw)))
+ goto out;
+
+ r = 0;
+
+out:
+ mutex_unlock(&nsm->lock);
+ return r;
+}
+
+static int nsm_device_init_vq(struct virtio_device *vdev)
+{
+ struct virtqueue *vq = virtio_find_single_vq(vdev,
+ nsm_vq_callback, "nsm.vq.0");
+ struct nsm *nsm = vdev->priv;
+
+ if (IS_ERR(vq))
+ return PTR_ERR(vq);
+
+ nsm->vq = vq;
+
+ return 0;
+}
+
+static const struct file_operations nsm_dev_fops = {
+ .unlocked_ioctl = nsm_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+/* Handler for probing the NSM device */
+static int nsm_device_probe(struct virtio_device *vdev)
+{
+ struct device *dev = &vdev->dev;
+ struct nsm *nsm;
+ int rc;
+
+ nsm = devm_kzalloc(&vdev->dev, sizeof(*nsm), GFP_KERNEL);
+ if (!nsm)
+ return -ENOMEM;
+
+ vdev->priv = nsm;
+ nsm->vdev = vdev;
+
+ rc = nsm_device_init_vq(vdev);
+ if (rc) {
+ dev_err(dev, "queue failed to initialize: %d.\n", rc);
+ goto err_init_vq;
+ }
+
+ mutex_init(&nsm->lock);
+
+ /* Register as hwrng provider */
+ nsm->hwrng = (struct hwrng) {
+ .read = nsm_rng_read,
+ .name = "nsm-hwrng",
+ .quality = 1000,
+ };
+
+ rc = hwrng_register(&nsm->hwrng);
+ if (rc) {
+ dev_err(dev, "RNG initialization error: %d.\n", rc);
+ goto err_hwrng;
+ }
+
+ /* Register /dev/nsm device node */
+ nsm->misc = (struct miscdevice) {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nsm",
+ .fops = &nsm_dev_fops,
+ .mode = 0666,
+ };
+
+ rc = misc_register(&nsm->misc);
+ if (rc) {
+ dev_err(dev, "misc device registration error: %d.\n", rc);
+ goto err_misc;
+ }
+
+ return 0;
+
+err_misc:
+ hwrng_unregister(&nsm->hwrng);
+err_hwrng:
+ vdev->config->del_vqs(vdev);
+err_init_vq:
+ return rc;
+}
+
+/* Handler for removing the NSM device */
+static void nsm_device_remove(struct virtio_device *vdev)
+{
+ struct nsm *nsm = vdev->priv;
+
+ hwrng_unregister(&nsm->hwrng);
+
+ vdev->config->del_vqs(vdev);
+ misc_deregister(&nsm->misc);
+}
+
+/* NSM device configuration structure */
+static struct virtio_driver virtio_nsm_driver = {
+ .feature_table = 0,
+ .feature_table_size = 0,
+ .feature_table_legacy = 0,
+ .feature_table_size_legacy = 0,
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = nsm_device_probe,
+ .remove = nsm_device_remove,
+};
+
+module_virtio_driver(virtio_nsm_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio NSM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c
index a06920b7e..36f7379b8 100644
--- a/drivers/misc/ocxl/afu_irq.c
+++ b/drivers/misc/ocxl/afu_irq.c
@@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(ocxl_irq_set_handler);
static irqreturn_t afu_irq_handler(int virq, void *data)
{
- struct afu_irq *irq = (struct afu_irq *) data;
+ struct afu_irq *irq = data;
trace_ocxl_afu_irq_receive(virq);
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index 7f83116ae..cded7d1ca 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(ocxl_context_alloc);
*/
static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
{
- struct ocxl_context *ctx = (struct ocxl_context *) data;
+ struct ocxl_context *ctx = data;
mutex_lock(&ctx->xsl_error_lock);
ctx->xsl_error.addr = addr;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index c06c699c0..03402203c 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -188,7 +188,7 @@ ack:
static irqreturn_t xsl_fault_handler(int irq, void *data)
{
- struct ocxl_link *link = (struct ocxl_link *) data;
+ struct ocxl_link *link = data;
struct spa *spa = link->spa;
u64 dsisr, dar, pe_handle;
struct pe_data *pe_data;
@@ -483,7 +483,7 @@ static void release_xsl(struct kref *ref)
void ocxl_link_release(struct pci_dev *dev, void *link_handle)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
mutex_lock(&links_list_lock);
kref_put(&link->ref, release_xsl);
@@ -540,7 +540,7 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc = 0;
@@ -630,7 +630,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc;
@@ -666,7 +666,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
int ocxl_link_remove_pe(void *link_handle, int pasid)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
struct pe_data *pe_data;
@@ -752,7 +752,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
int irq;
if (atomic_dec_if_positive(&link->irq_available) < 0)
@@ -771,7 +771,7 @@ EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
void ocxl_link_free_irq(void *link_handle, int hw_irq)
{
- struct ocxl_link *link = (struct ocxl_link *) link_handle;
+ struct ocxl_link *link = link_handle;
xive_native_free_irq(hw_irq);
atomic_inc(&link->irq_available);
diff --git a/drivers/misc/ocxl/main.c b/drivers/misc/ocxl/main.c
index ef73cf35d..658974143 100644
--- a/drivers/misc/ocxl/main.c
+++ b/drivers/misc/ocxl/main.c
@@ -7,7 +7,7 @@
static int __init init_ocxl(void)
{
- int rc = 0;
+ int rc;
if (!tlbie_capable)
return -EINVAL;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index af5190887..c38a6083f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -28,14 +28,14 @@
#define DRV_MODULE_NAME "pci-endpoint-test"
#define IRQ_TYPE_UNDEFINED -1
-#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_INTX 0
#define IRQ_TYPE_MSI 1
#define IRQ_TYPE_MSIX 2
#define PCI_ENDPOINT_TEST_MAGIC 0x0
#define PCI_ENDPOINT_TEST_COMMAND 0x4
-#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_INTX_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
#define COMMAND_READ BIT(3)
@@ -183,8 +183,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
bool res = true;
switch (type) {
- case IRQ_TYPE_LEGACY:
- irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
+ case IRQ_TYPE_INTX:
+ irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
if (irq < 0)
dev_err(dev, "Failed to get Legacy interrupt\n");
break;
@@ -244,7 +244,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
fail:
switch (irq_type) {
- case IRQ_TYPE_LEGACY:
+ case IRQ_TYPE_INTX:
dev_err(dev, "Failed to request IRQ %d for Legacy\n",
pci_irq_vector(pdev, i));
break;
@@ -263,6 +263,15 @@ fail:
return false;
}
+static const u32 bar_test_pattern[] = {
+ 0xA0A0A0A0,
+ 0xA1A1A1A1,
+ 0xA2A2A2A2,
+ 0xA3A3A3A3,
+ 0xA4A4A4A4,
+ 0xA5A5A5A5,
+};
+
static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
@@ -280,26 +289,27 @@ static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
size = 0x4;
for (j = 0; j < size; j += 4)
- pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
+ pci_endpoint_test_bar_writel(test, barno, j,
+ bar_test_pattern[barno]);
for (j = 0; j < size; j += 4) {
val = pci_endpoint_test_bar_readl(test, barno, j);
- if (val != 0xA0A0A0A0)
+ if (val != bar_test_pattern[barno])
return false;
}
return true;
}
-static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
+static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
{
u32 val;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
- IRQ_TYPE_LEGACY);
+ IRQ_TYPE_INTX);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
- COMMAND_RAISE_LEGACY_IRQ);
+ COMMAND_RAISE_INTX_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
@@ -385,7 +395,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
@@ -521,7 +531,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
@@ -621,7 +631,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
if (use_dma)
flags |= FLAG_USE_DMA;
- if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
@@ -691,7 +701,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
- if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
+ if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
return false;
}
@@ -737,8 +747,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
- case PCITEST_LEGACY_IRQ:
- ret = pci_endpoint_test_legacy_irq(test);
+ case PCITEST_INTX_IRQ:
+ ret = pci_endpoint_test_intx_irq(test);
break;
case PCITEST_MSI:
case PCITEST_MSIX:
@@ -801,7 +811,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->irq_type = IRQ_TYPE_UNDEFINED;
if (no_msi)
- irq_type = IRQ_TYPE_LEGACY;
+ irq_type = IRQ_TYPE_INTX;
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
@@ -860,7 +870,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, test);
- id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
if (id < 0) {
err = id;
dev_err(dev, "Unable to get id\n");
@@ -907,7 +917,7 @@ err_kfree_test_name:
kfree(test->name);
err_ida_remove:
- ida_simple_remove(&pci_endpoint_test_ida, id);
+ ida_free(&pci_endpoint_test_ida, id);
err_iounmap:
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
@@ -943,7 +953,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
misc_deregister(&test->miscdev);
kfree(misc_device->name);
kfree(test->name);
- ida_simple_remove(&pci_endpoint_test_ida, id);
+ ida_free(&pci_endpoint_test_ida, id);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
index 9715798ac..f3f2113a5 100644
--- a/drivers/misc/pvpanic/pvpanic-mmio.c
+++ b/drivers/misc/pvpanic/pvpanic-mmio.c
@@ -7,16 +7,15 @@
* Copyright (C) 2021 Oracle.
*/
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/io.h>
-#include <linux/kernel.h>
+#include <linux/ioport.h>
#include <linux/kexec.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
-#include <linux/slab.h>
-
-#include <uapi/misc/pvpanic.h>
#include "pvpanic.h"
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
index 689af4c28..9ad20e827 100644
--- a/drivers/misc/pvpanic/pvpanic-pci.c
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -5,17 +5,13 @@
* Copyright (C) 2021 Oracle.
*/
-#include <linux/kernel.h>
+#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
-#include <linux/slab.h>
-
-#include <uapi/misc/pvpanic.h>
#include "pvpanic.h"
-#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index 305b367e0..df3457ce1 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -8,16 +8,20 @@
*/
#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/kexec.h>
+#include <linux/kstrtox.h>
+#include <linux/limits.h>
+#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/panic_notifier.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
#include <linux/types.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
#include <uapi/misc/pvpanic.h>
diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h
index 46ffb1043..a42fa760e 100644
--- a/drivers/misc/pvpanic/pvpanic.h
+++ b/drivers/misc/pvpanic/pvpanic.h
@@ -8,6 +8,11 @@
#ifndef PVPANIC_H_
#define PVPANIC_H_
+#include <linux/compiler_types.h>
+
+struct attribute_group;
+struct device;
+
int devm_pvpanic_probe(struct device *dev, void __iomem *base);
extern const struct attribute_group *pvpanic_dev_groups[];
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
index f50d22882..a0ad1f3a6 100644
--- a/drivers/misc/vmw_vmci/vmci_datagram.c
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -234,7 +234,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
dg_info->in_dg_host_queue = true;
dg_info->entry = dst_entry;
- memcpy(&dg_info->msg, dg, dg_size);
+ dg_info->msg = *dg;
+ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
@@ -377,7 +378,8 @@ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
dg_info->in_dg_host_queue = false;
dg_info->entry = dst_entry;
- memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+ dg_info->msg = *dg;
+ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
index de7fee7ea..681b35001 100644
--- a/drivers/misc/vmw_vmci/vmci_handle_array.c
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -8,12 +8,6 @@
#include <linux/slab.h>
#include "vmci_handle_array.h"
-static size_t handle_arr_calc_size(u32 capacity)
-{
- return VMCI_HANDLE_ARRAY_HEADER_SIZE +
- capacity * sizeof(struct vmci_handle);
-}
-
struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
{
struct vmci_handle_arr *array;
@@ -25,7 +19,7 @@ struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
max_capacity);
- array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+ array = kmalloc(struct_size(array, entries, capacity), GFP_ATOMIC);
if (!array)
return NULL;
@@ -51,8 +45,8 @@ int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
struct vmci_handle_arr *new_array;
u32 capacity_bump = min(array->max_capacity - array->capacity,
array->capacity);
- size_t new_size = handle_arr_calc_size(array->capacity +
- capacity_bump);
+ size_t new_size = struct_size(array, entries,
+ size_add(array->capacity, capacity_bump));
if (array->size >= array->max_capacity)
return VMCI_ERROR_NO_MEM;
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
index 96193f85b..27a38b97e 100644
--- a/drivers/misc/vmw_vmci/vmci_handle_array.h
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -17,17 +17,11 @@ struct vmci_handle_arr {
u32 max_capacity;
u32 size;
u32 pad;
- struct vmci_handle entries[];
+ struct vmci_handle entries[] __counted_by(capacity);
};
-#define VMCI_HANDLE_ARRAY_HEADER_SIZE \
- offsetof(struct vmci_handle_arr, entries)
/* Select a default capacity that results in a 64 byte sized array */
#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6
-/* Make sure that the max array size can be expressed by a u32 */
-#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \
- ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \
- sizeof(struct vmci_handle))
struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity);
void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 2f51db4df..cf396e8f3 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -119,13 +119,12 @@ void mmc_retune_enable(struct mmc_host *host)
/*
* Pause re-tuning for a small set of operations. The pause begins after the
- * next command and after first doing re-tuning.
+ * next command.
*/
void mmc_retune_pause(struct mmc_host *host)
{
if (!host->retune_paused) {
host->retune_paused = 1;
- mmc_retune_needed(host);
mmc_retune_hold(host);
}
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 8479cf3df..58ed7193a 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -136,6 +136,17 @@ static void mmc_set_erase_size(struct mmc_card *card)
mmc_init_erase(card);
}
+
+static void mmc_set_wp_grp_size(struct mmc_card *card)
+{
+ if (card->ext_csd.erase_group_def & 1)
+ card->wp_grp_size = card->ext_csd.hc_erase_size *
+ card->ext_csd.raw_hc_erase_gap_size;
+ else
+ card->wp_grp_size = card->csd.erase_size *
+ (card->csd.wp_grp_size + 1);
+}
+
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
@@ -186,6 +197,7 @@ static int mmc_decode_csd(struct mmc_card *card)
b = UNSTUFF_BITS(resp, 37, 5);
csd->erase_size = (a + 1) * (b + 1);
csd->erase_size <<= csd->write_blkbits - 9;
+ csd->wp_grp_size = UNSTUFF_BITS(resp, 32, 5);
}
return 0;
@@ -613,11 +625,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
} else {
card->ext_csd.data_tag_unit_size = 0;
}
-
- card->ext_csd.max_packed_writes =
- ext_csd[EXT_CSD_MAX_PACKED_WRITES];
- card->ext_csd.max_packed_reads =
- ext_csd[EXT_CSD_MAX_PACKED_READS];
} else {
card->ext_csd.data_sector_size = 512;
}
@@ -790,6 +797,7 @@ MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
+MMC_DEV_ATTR(wp_grp_size, "%u\n", card->wp_grp_size << 9);
MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
@@ -850,6 +858,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_date.attr,
&dev_attr_erase_size.attr,
&dev_attr_preferred_erase_size.attr,
+ &dev_attr_wp_grp_size.attr,
&dev_attr_fwrev.attr,
&dev_attr_ffu_capable.attr,
&dev_attr_hwrev.attr,
@@ -1766,7 +1775,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_set_erase_size(card);
}
}
-
+ mmc_set_wp_grp_size(card);
/*
* Ensure eMMC user default partition is enabled
*/
@@ -1824,8 +1833,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
-
- } else if (!mmc_card_hs400es(card)) {
+ } else if (mmc_card_hs400es(card)) {
+ if (host->ops->execute_hs400_tuning) {
+ err = host->ops->execute_hs400_tuning(host, card);
+ if (err)
+ goto free_card;
+ }
+ } else {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
if (err > 0 && mmc_card_hs(card)) {
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 0f6a56310..8f7f587a0 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -1904,7 +1904,7 @@ static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
}
static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
- unsigned long sz)
+ unsigned long sz, int secs, int force_retuning)
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
@@ -1921,7 +1921,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
for (cnt = 0; cnt < UINT_MAX; cnt++) {
ktime_get_ts64(&ts2);
ts = timespec64_sub(ts2, ts1);
- if (ts.tv_sec >= 10)
+ if (ts.tv_sec >= secs)
break;
ea = mmc_test_rnd_num(range1);
if (ea == last_ea)
@@ -1929,6 +1929,8 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
last_ea = ea;
dev_addr = rnd_addr + test->card->pref_erase * ea +
ssz * mmc_test_rnd_num(range2);
+ if (force_retuning)
+ mmc_retune_needed(test->card->host);
ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
if (ret)
return ret;
@@ -1953,24 +1955,35 @@ static int mmc_test_random_perf(struct mmc_test_card *test, int write)
*/
if (write) {
next = rnd_next;
- ret = mmc_test_rnd_perf(test, write, 0, sz);
+ ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0);
if (ret)
return ret;
rnd_next = next;
}
- ret = mmc_test_rnd_perf(test, write, 1, sz);
+ ret = mmc_test_rnd_perf(test, write, 1, sz, 10, 0);
if (ret)
return ret;
}
sz = t->max_tfr;
if (write) {
next = rnd_next;
- ret = mmc_test_rnd_perf(test, write, 0, sz);
+ ret = mmc_test_rnd_perf(test, write, 0, sz, 10, 0);
if (ret)
return ret;
rnd_next = next;
}
- return mmc_test_rnd_perf(test, write, 1, sz);
+ return mmc_test_rnd_perf(test, write, 1, sz, 10, 0);
+}
+
+static int mmc_test_retuning(struct mmc_test_card *test)
+{
+ if (!mmc_can_retune(test->card->host)) {
+ pr_info("%s: No retuning - test skipped\n",
+ mmc_hostname(test->card->host));
+ return RESULT_UNSUP_HOST;
+ }
+
+ return mmc_test_rnd_perf(test, 0, 0, 8192, 30, 1);
}
/*
@@ -2921,6 +2934,14 @@ static const struct mmc_test_case mmc_test_cases[] = {
.run = mmc_test_cmds_during_write_cmd23_nonblock,
.cleanup = mmc_test_area_cleanup,
},
+
+ {
+ .name = "Re-tuning reliability",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_retuning,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
};
static DEFINE_MUTEX(mmc_test_lock);
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index ef38dcd3a..575ebbce3 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -178,11 +178,9 @@ static inline void sdio_uart_release_func(struct sdio_uart_port *port)
sdio_release_host(port->func);
}
-static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
+static inline u8 sdio_in(struct sdio_uart_port *port, int offset)
{
- unsigned char c;
- c = sdio_readb(port->func, port->regs_offset + offset, NULL);
- return c;
+ return sdio_readb(port->func, port->regs_offset + offset, NULL);
}
static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
@@ -192,8 +190,8 @@ static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
{
- unsigned char status;
unsigned int ret;
+ u8 status;
/* FIXME: What stops this losing the delta bits and breaking
sdio_uart_check_modem_status ? */
@@ -354,15 +352,13 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
sdio_out(port, UART_IER, port->ier);
}
-static void sdio_uart_receive_chars(struct sdio_uart_port *port,
- unsigned int *status)
+static void sdio_uart_receive_chars(struct sdio_uart_port *port, u8 *status)
{
- unsigned int ch, flag;
int max_count = 256;
do {
- ch = sdio_in(port, UART_RX);
- flag = TTY_NORMAL;
+ u8 ch = sdio_in(port, UART_RX);
+ u8 flag = TTY_NORMAL;
port->icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
@@ -449,8 +445,8 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
{
- int status;
struct tty_struct *tty;
+ u8 status;
status = sdio_in(port, UART_MSR);
@@ -499,7 +495,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
static void sdio_uart_irq(struct sdio_func *func)
{
struct sdio_uart_port *port = sdio_get_drvdata(func);
- unsigned int iir, lsr;
+ u8 iir, lsr;
/*
* In a few places sdio_uart_irq() is called directly instead of
@@ -795,7 +791,7 @@ static unsigned int sdio_uart_chars_in_buffer(struct tty_struct *tty)
return kfifo_len(&port->xmit_fifo);
}
-static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
+static void sdio_uart_send_xchar(struct tty_struct *tty, u8 ch)
{
struct sdio_uart_port *port = tty->driver_data;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 2a99ffb61..b8dda8160 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1157,7 +1157,7 @@ static int mmc_spi_probe(struct spi_device *spi)
/* We rely on full duplex transfers, mostly to reduce
* per-transfer overheads (by making fewer transfers).
*/
- if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
return -EINVAL;
/* MMC and SD specs only seem to care that sampling is on the
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e967cca7a..b790c3c3c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -273,6 +273,7 @@ static struct variant_data variant_stm32_sdmmc = {
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(12, 5),
.stm32_idmabsize_align = BIT(5),
+ .supports_sdio_irq = true,
.busy_timeout = true,
.busy_detect = true,
.busy_detect_flag = MCI_STM32_BUSYD0,
@@ -300,6 +301,7 @@ static struct variant_data variant_stm32_sdmmcv2 = {
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(16, 5),
.stm32_idmabsize_align = BIT(5),
+ .supports_sdio_irq = true,
.dma_lli = true,
.busy_timeout = true,
.busy_detect = true,
@@ -328,6 +330,7 @@ static struct variant_data variant_stm32_sdmmcv3 = {
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
.stm32_idmabsize_mask = GENMASK(16, 6),
.stm32_idmabsize_align = BIT(6),
+ .supports_sdio_irq = true,
.dma_lli = true,
.busy_timeout = true,
.busy_detect = true,
@@ -421,8 +424,9 @@ void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
*/
static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
{
- /* Keep busy mode in DPSM if enabled */
- datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
+ /* Keep busy mode in DPSM and SDIO mask if enabled */
+ datactrl |= host->datactrl_reg & (host->variant->busy_dpsm_flag |
+ host->variant->datactrl_mask_sdio);
if (host->datactrl_reg != datactrl) {
host->datactrl_reg = datactrl;
@@ -1762,6 +1766,25 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mmci_write_sdio_irq_bit(struct mmci_host *host, int enable)
+{
+ void __iomem *base = host->base;
+ u32 mask = readl_relaxed(base + MMCIMASK0);
+
+ if (enable)
+ writel_relaxed(mask | MCI_ST_SDIOITMASK, base + MMCIMASK0);
+ else
+ writel_relaxed(mask & ~MCI_ST_SDIOITMASK, base + MMCIMASK0);
+}
+
+static void mmci_signal_sdio_irq(struct mmci_host *host, u32 status)
+{
+ if (status & MCI_ST_SDIOIT) {
+ mmci_write_sdio_irq_bit(host, 0);
+ sdio_signal_irq(host->mmc);
+ }
+}
+
/*
* Handle completion of command and data transfers.
*/
@@ -1806,6 +1829,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
mmci_data_irq(host, host->data, status);
}
+ if (host->variant->supports_sdio_irq)
+ mmci_signal_sdio_irq(host, status);
+
/*
* Busy detection has been handled by mmci_cmd_irq() above.
* Clear the status bit to prevent polling in IRQ context.
@@ -2042,6 +2068,35 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
return ret;
}
+static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ if (enable)
+ /* Keep the SDIO mode bit if SDIO irqs are enabled */
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ spin_lock_irqsave(&host->lock, flags);
+ mmci_write_sdio_irq_bit(host, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!enable) {
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+ }
+}
+
+static void mmci_ack_sdio_irq(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mmci_write_sdio_irq_bit(host, 1);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
static struct mmc_host_ops mmci_ops = {
.request = mmci_request,
.pre_req = mmci_pre_request,
@@ -2317,6 +2372,16 @@ static int mmci_probe(struct amba_device *dev,
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
}
+ if (variant->supports_sdio_irq && host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+ mmci_ops.enable_sdio_irq = mmci_enable_sdio_irq;
+ mmci_ops.ack_sdio_irq = mmci_ack_sdio_irq;
+
+ mmci_write_datactrlreg(host,
+ host->variant->datactrl_mask_sdio);
+ }
+
/* Variants with mandatory busy timeout in HW needs R1B responses. */
if (variant->busy_timeout)
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 34d9897c2..a5eb4ced4 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -331,6 +331,7 @@ enum mmci_busy_state {
* register.
* @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
* @dma_lli: true if variant has dma link list feature.
+ * @supports_sdio_irq: allow SD I/O card to interrupt the host
* @stm32_idmabsize_mask: stm32 sdmmc idma buffer size.
* @dma_flow_controller: use peripheral as flow controller for DMA.
*/
@@ -377,6 +378,7 @@ struct variant_data {
u32 start_err;
u32 opendrain;
u8 dma_lli:1;
+ bool supports_sdio_irq;
u32 stm32_idmabsize_mask;
u32 stm32_idmabsize_align;
bool dma_flow_controller;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 97f7c3d4b..1634b1f5d 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -252,12 +252,16 @@
#define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */
#define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */
+#define MSDC_PAD_TUNE_DATRRDLY2 GENMASK(12, 8) /* RW */
#define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */
+#define MSDC_PAD_TUNE_CMDRDLY2 GENMASK(20, 16) /* RW */
#define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */
#define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */
#define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */
#define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */
#define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */
+#define MSDC_PAD_TUNE_RD2_SEL BIT(13) /* RW */
+#define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */
#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
@@ -325,7 +329,9 @@
#define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */
-#define PAD_DELAY_MAX 32 /* PAD delay cells */
+#define TUNING_REG2_FIXED_OFFEST 4
+#define PAD_DELAY_HALF 32 /* PAD delay cells */
+#define PAD_DELAY_FULL 64
/*--------------------------------------------------------------------------*/
/* Descriptor Structure */
/*--------------------------------------------------------------------------*/
@@ -461,6 +467,7 @@ struct msdc_host {
u32 hs400_ds_dly3;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
+ u32 tuning_step;
bool hs400_cmd_resp_sel_rising;
/* cmd response sample selection for HS400 */
bool hs400_mode; /* current eMMC will run at hs400 mode */
@@ -1149,9 +1156,11 @@ static void msdc_recheck_sdio_irq(struct msdc_host *host)
static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd)
{
- if (host->error)
- dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
- __func__, cmd->opcode, cmd->arg, host->error);
+ if (host->error &&
+ ((!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning) ||
+ cmd->error == -ETIMEDOUT))
+ dev_warn(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
+ __func__, cmd->opcode, cmd->arg, host->error);
}
static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
@@ -1615,7 +1624,7 @@ static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
}
if (cmd_err || dat_err) {
- dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x",
+ dev_err(host->dev, "cmd_err = %d, dat_err = %d, intsts = 0x%x",
cmd_err, dat_err, intsts);
}
@@ -1780,10 +1789,20 @@ static void msdc_init_hw(struct msdc_host *host)
DATA_K_VALUE_SEL);
sdr_set_bits(host->top_base + EMMC_TOP_CMD,
PAD_CMD_RD_RXDLY_SEL);
+ if (host->tuning_step > PAD_DELAY_HALF) {
+ sdr_set_bits(host->top_base + EMMC_TOP_CONTROL,
+ PAD_DAT_RD_RXDLY2_SEL);
+ sdr_set_bits(host->top_base + EMMC_TOP_CMD,
+ PAD_CMD_RD_RXDLY2_SEL);
+ }
} else {
sdr_set_bits(host->base + tune_reg,
MSDC_PAD_TUNE_RD_SEL |
MSDC_PAD_TUNE_CMD_SEL);
+ if (host->tuning_step > PAD_DELAY_HALF)
+ sdr_set_bits(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_RD2_SEL |
+ MSDC_PAD_TUNE_CMD2_SEL);
}
} else {
/* choose clock tune */
@@ -1925,24 +1944,24 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
msdc_set_mclk(host, ios->timing, ios->clock);
}
-static u32 test_delay_bit(u32 delay, u32 bit)
+static u64 test_delay_bit(u64 delay, u32 bit)
{
- bit %= PAD_DELAY_MAX;
- return delay & BIT(bit);
+ bit %= PAD_DELAY_FULL;
+ return delay & BIT_ULL(bit);
}
-static int get_delay_len(u32 delay, u32 start_bit)
+static int get_delay_len(u64 delay, u32 start_bit)
{
int i;
- for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
+ for (i = 0; i < (PAD_DELAY_FULL - start_bit); i++) {
if (test_delay_bit(delay, start_bit + i) == 0)
return i;
}
- return PAD_DELAY_MAX - start_bit;
+ return PAD_DELAY_FULL - start_bit;
}
-static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
+static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u64 delay)
{
int start = 0, len = 0;
int start_final = 0, len_final = 0;
@@ -1950,28 +1969,28 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
struct msdc_delay_phase delay_phase = { 0, };
if (delay == 0) {
- dev_err(host->dev, "phase error: [map:%x]\n", delay);
+ dev_err(host->dev, "phase error: [map:%016llx]\n", delay);
delay_phase.final_phase = final_phase;
return delay_phase;
}
- while (start < PAD_DELAY_MAX) {
+ while (start < PAD_DELAY_FULL) {
len = get_delay_len(delay, start);
if (len_final < len) {
start_final = start;
len_final = len;
}
start += len ? len : 1;
- if (len >= 12 && start_final < 4)
+ if (!upper_32_bits(delay) && len >= 12 && start_final < 4)
break;
}
/* The rule is that to find the smallest delay cell */
if (start_final == 0)
- final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
+ final_phase = (start_final + len_final / 3) % PAD_DELAY_FULL;
else
- final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
- dev_dbg(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
+ final_phase = (start_final + len_final / 2) % PAD_DELAY_FULL;
+ dev_dbg(host->dev, "phase: [map:%016llx] [maxlen:%d] [final:%d]\n",
delay, len_final, final_phase);
delay_phase.maxlen = len_final;
@@ -1984,30 +2003,64 @@ static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value)
{
u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->top_base)
- sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
- value);
- else
- sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
- value);
+ if (host->top_base) {
+ if (value < PAD_DELAY_HALF) {
+ sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, value);
+ sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 0);
+ } else {
+ sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY,
+ PAD_DELAY_HALF - 1);
+ sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2,
+ value - PAD_DELAY_HALF);
+ }
+ } else {
+ if (value < PAD_DELAY_HALF) {
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value);
+ sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_CMDRDLY2, 0);
+ } else {
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
+ PAD_DELAY_HALF - 1);
+ sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_CMDRDLY2, value - PAD_DELAY_HALF);
+ }
+ }
}
static inline void msdc_set_data_delay(struct msdc_host *host, u32 value)
{
u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->top_base)
- sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
- PAD_DAT_RD_RXDLY, value);
- else
- sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
- value);
+ if (host->top_base) {
+ if (value < PAD_DELAY_HALF) {
+ sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
+ PAD_DAT_RD_RXDLY, value);
+ sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
+ PAD_DAT_RD_RXDLY2, 0);
+ } else {
+ sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
+ PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1);
+ sdr_set_field(host->top_base + EMMC_TOP_CONTROL,
+ PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF);
+ }
+ } else {
+ if (value < PAD_DELAY_HALF) {
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value);
+ sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_DATRRDLY2, 0);
+ } else {
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY,
+ PAD_DELAY_HALF - 1);
+ sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST,
+ MSDC_PAD_TUNE_DATRRDLY2, value - PAD_DELAY_HALF);
+ }
+ }
}
static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
- u32 rise_delay = 0, fall_delay = 0;
+ u64 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
@@ -2023,7 +2076,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
host->hs200_cmd_int_delay);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_cmd_delay(host, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -2033,9 +2086,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
- rise_delay |= BIT(i);
+ rise_delay |= BIT_ULL(i);
} else {
- rise_delay &= ~BIT(i);
+ rise_delay &= ~BIT_ULL(i);
break;
}
}
@@ -2047,7 +2100,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_cmd_delay(host, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -2057,9 +2110,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
for (j = 0; j < 3; j++) {
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err) {
- fall_delay |= BIT(i);
+ fall_delay |= BIT_ULL(i);
} else {
- fall_delay &= ~BIT(i);
+ fall_delay &= ~BIT_ULL(i);
break;
}
}
@@ -2082,12 +2135,12 @@ skip_fall:
if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
- internal_delay |= BIT(i);
+ internal_delay |= BIT_ULL(i);
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
@@ -2121,7 +2174,8 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
else
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+
+ for (i = 0; i < PAD_DELAY_HALF; i++) {
sdr_set_field(host->base + PAD_CMD_TUNE,
PAD_CMD_TUNE_RX_DLY3, i);
/*
@@ -2151,7 +2205,7 @@ static int hs400_tune_response(struct mmc_host *mmc, u32 opcode)
static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
- u32 rise_delay = 0, fall_delay = 0;
+ u64 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int i, ret;
@@ -2160,11 +2214,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- rise_delay |= BIT(i);
+ rise_delay |= BIT_ULL(i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
@@ -2174,11 +2228,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- fall_delay |= BIT(i);
+ fall_delay |= BIT_ULL(i);
}
final_fall_delay = get_best_delay(host, fall_delay);
@@ -2206,7 +2260,7 @@ skip_fall:
static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
- u32 rise_delay = 0, fall_delay = 0;
+ u64 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int i, ret;
@@ -2217,12 +2271,12 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
sdr_clr_bits(host->base + MSDC_IOCON,
MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
- for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_cmd_delay(host, i);
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- rise_delay |= BIT(i);
+ rise_delay |= BIT_ULL(i);
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
@@ -2233,12 +2287,12 @@ static int msdc_tune_together(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
sdr_set_bits(host->base + MSDC_IOCON,
MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL);
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < host->tuning_step; i++) {
msdc_set_cmd_delay(host, i);
msdc_set_data_delay(host, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
- fall_delay |= BIT(i);
+ fall_delay |= BIT_ULL(i);
}
final_fall_delay = get_best_delay(host, fall_delay);
@@ -2346,7 +2400,7 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
}
host->hs400_tuning = true;
- for (i = 0; i < PAD_DELAY_MAX; i++) {
+ for (i = 0; i < PAD_DELAY_HALF; i++) {
if (host->top_base)
sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY1, i);
@@ -2580,6 +2634,8 @@ static const struct cqhci_host_ops msdc_cmdq_ops = {
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
+
of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
&host->latch_ck);
@@ -2601,6 +2657,14 @@ static void msdc_of_property_parse(struct platform_device *pdev,
else
host->hs400_cmd_resp_sel_rising = false;
+ if (of_property_read_u32(pdev->dev.of_node, "mediatek,tuning-step",
+ &host->tuning_step)) {
+ if (mmc->caps2 & MMC_CAP2_NO_MMC)
+ host->tuning_step = PAD_DELAY_FULL;
+ else
+ host->tuning_step = PAD_DELAY_HALF;
+ }
+
if (of_property_read_bool(pdev->dev.of_node,
"supports-cqe"))
host->cqhci = true;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 9fb8995b4..13fa8588e 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1119,10 +1119,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
host = slot->host;
- if (slot->vsd)
- gpiod_set_value(slot->vsd, power_on);
- if (slot->vio)
- gpiod_set_value(slot->vio, power_on);
+ if (power_on) {
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(1);
+ }
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(1);
+ }
+ } else {
+ if (slot->vio) {
+ gpiod_set_value(slot->vio, power_on);
+ msleep(50);
+ }
+ if (slot->vsd) {
+ gpiod_set_value(slot->vsd, power_on);
+ msleep(50);
+ }
+ }
if (slot->pdata->set_power != NULL)
slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
@@ -1259,18 +1274,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->pdata = &host->pdata->slots[id];
/* Check for some optional GPIO controls */
- slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
- id, GPIOD_OUT_LOW);
+ slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vsd))
return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
"error looking up VSD GPIO\n");
- slot->vio = gpiod_get_index_optional(host->dev, "vio",
- id, GPIOD_OUT_LOW);
+ slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
+ id, GPIOD_OUT_LOW);
if (IS_ERR(slot->vio))
return dev_err_probe(host->dev, PTR_ERR(slot->vio),
"error looking up VIO GPIO\n");
- slot->cover = gpiod_get_index_optional(host->dev, "cover",
- id, GPIOD_IN);
+ slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
+ id, GPIOD_IN);
if (IS_ERR(slot->cover))
return dev_err_probe(host->dev, PTR_ERR(slot->cover),
"error looking up cover switch GPIO\n");
@@ -1384,13 +1399,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
if (IS_ERR(host->virt_base))
return PTR_ERR(host->virt_base);
- host->slot_switch = gpiod_get_optional(host->dev, "switch",
- GPIOD_OUT_LOW);
- if (IS_ERR(host->slot_switch))
- return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
- "error looking up slot switch GPIO\n");
-
-
INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
@@ -1409,6 +1417,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
platform_set_drvdata(pdev, host);
+ host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(host->slot_switch))
+ return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+ "error looking up slot switch GPIO\n");
+
host->id = pdev->id;
host->irq = irq;
host->phys_base = res->start;
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 87d78432a..7dfe7c4e0 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -7,6 +7,7 @@
* Wei WANG <wei_wang@realsil.com.cn>
*/
+#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/highmem.h>
@@ -947,7 +948,7 @@ static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode)
/* send at least 74 clocks */
rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN);
- if (PCI_PID(pcr) == PID_5261) {
+ if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
/*
* If test mode is set switch to SD Express mandatorily,
* this is only for factory testing.
@@ -1364,6 +1365,14 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
struct rtsx_pcr *pcr = host->pcr;
+ if (PCI_PID(pcr) == PID_5264) {
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_2_5GT);
+ pci_write_config_byte(pcr->pci, 0x80e, 0x02);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_5_0GT);
+ }
+
/* Set relink_time for changing to PCIe card */
relink_time = 0x8FFF;
@@ -1379,6 +1388,12 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
if (pcr->ops->disable_auto_blink)
pcr->ops->disable_auto_blink(pcr);
+ if (PCI_PID(pcr) == PID_5264) {
+ rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2,
+ RTS5264_CHIP_RST_N_SEL, RTS5264_CHIP_RST_N_SEL);
+ rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
+ }
+
/* For PCIe/NVMe mode can't enter delink issue */
pcr->hw_param.interrupt_en &= ~(SD_INT_EN);
rtsx_pci_writel(pcr, RTSX_BIER, pcr->hw_param.interrupt_en);
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index c23251bb9..9053526fa 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -6,6 +6,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -44,8 +45,13 @@ struct brcmstb_match_priv {
static inline void enable_clock_gating(struct sdhci_host *host)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
u32 reg;
+ if (!(priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK))
+ return;
+
reg = sdhci_readl(host, SDHCI_VENDOR);
reg |= SDHCI_VENDOR_GATE_SDCLK_EN;
sdhci_writel(host, reg, SDHCI_VENDOR);
@@ -53,14 +59,53 @@ static inline void enable_clock_gating(struct sdhci_host *host)
static void brcmstb_reset(struct sdhci_host *host, u8 mask)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
-
sdhci_and_cqhci_reset(host, mask);
/* Reset will clear this, so re-enable it */
- if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK)
- enable_clock_gating(host);
+ enable_clock_gating(host);
+}
+
+static void brcmstb_sdhci_reset_cmd_data(struct sdhci_host *host, u8 mask)
+{
+ u32 new_mask = (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA)) << 24;
+ int ret;
+ u32 reg;
+
+ /*
+ * SDHCI_CLOCK_CONTROL register CARD_EN and CLOCK_INT_EN bits shall
+ * be set along with SOFTWARE_RESET register RESET_CMD or RESET_DATA
+ * bits, hence access SDHCI_CLOCK_CONTROL register as 32-bit register
+ */
+ new_mask |= SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN;
+ reg = sdhci_readl(host, SDHCI_CLOCK_CONTROL);
+ sdhci_writel(host, reg | new_mask, SDHCI_CLOCK_CONTROL);
+
+ reg = sdhci_readb(host, SDHCI_SOFTWARE_RESET);
+
+ ret = read_poll_timeout_atomic(sdhci_readb, reg, !(reg & mask),
+ 10, 10000, false,
+ host, SDHCI_SOFTWARE_RESET);
+
+ if (ret) {
+ pr_err("%s: Reset 0x%x never completed.\n",
+ mmc_hostname(host->mmc), (int)mask);
+ sdhci_err_stats_inc(host, CTRL_TIMEOUT);
+ sdhci_dumpregs(host);
+ }
+}
+
+static void brcmstb_reset_74165b0(struct sdhci_host *host, u8 mask)
+{
+ /* take care of RESET_ALL as usual */
+ if (mask & SDHCI_RESET_ALL)
+ sdhci_and_cqhci_reset(host, SDHCI_RESET_ALL);
+
+ /* cmd and/or data treated differently on this core */
+ if (mask & (SDHCI_RESET_CMD | SDHCI_RESET_DATA))
+ brcmstb_sdhci_reset_cmd_data(host, mask);
+
+ /* Reset will clear this, so re-enable it */
+ enable_clock_gating(host);
}
static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -162,6 +207,13 @@ static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
.set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
};
+static struct sdhci_ops sdhci_brcmstb_ops_74165b0 = {
+ .set_clock = sdhci_brcmstb_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = brcmstb_reset_74165b0,
+ .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+};
+
static struct brcmstb_match_priv match_priv_7425 = {
.flags = BRCMSTB_MATCH_FLAGS_NO_64BIT |
BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
@@ -179,10 +231,17 @@ static const struct brcmstb_match_priv match_priv_7216 = {
.ops = &sdhci_brcmstb_ops_7216,
};
+static struct brcmstb_match_priv match_priv_74165b0 = {
+ .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE,
+ .hs400es = sdhci_brcmstb_hs400es,
+ .ops = &sdhci_brcmstb_ops_74165b0,
+};
+
static const struct of_device_id __maybe_unused sdhci_brcm_of_match[] = {
{ .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 },
{ .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
{ .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
+ { .compatible = "brcm,bcm74165b0-sdhci", .data = &match_priv_74165b0 },
{},
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 668e0acee..e113b99a3 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2694,6 +2694,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
@@ -2708,6 +2713,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@@ -2726,7 +2732,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return sdhci_msm_ice_resume(msm_host);
+ ret = sdhci_msm_ice_resume(msm_host);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index a0524127c..60b42126a 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
@@ -35,6 +36,21 @@
#define DWCMSHC_CARD_IS_EMMC BIT(0)
#define DWCMSHC_ENHANCED_STROBE BIT(8)
#define DWCMSHC_EMMC_ATCTRL 0x40
+/* Tuning and auto-tuning fields in AT_CTRL_R control register */
+#define AT_CTRL_AT_EN BIT(0) /* autotuning is enabled */
+#define AT_CTRL_CI_SEL BIT(1) /* interval to drive center phase select */
+#define AT_CTRL_SWIN_TH_EN BIT(2) /* sampling window threshold enable */
+#define AT_CTRL_RPT_TUNE_ERR BIT(3) /* enable reporting framing errors */
+#define AT_CTRL_SW_TUNE_EN BIT(4) /* enable software managed tuning */
+#define AT_CTRL_WIN_EDGE_SEL_MASK GENMASK(11, 8) /* bits [11:8] */
+#define AT_CTRL_WIN_EDGE_SEL 0xf /* sampling window edge select */
+#define AT_CTRL_TUNE_CLK_STOP_EN BIT(16) /* clocks stopped during phase code change */
+#define AT_CTRL_PRE_CHANGE_DLY_MASK GENMASK(18, 17) /* bits [18:17] */
+#define AT_CTRL_PRE_CHANGE_DLY 0x1 /* 2-cycle latency */
+#define AT_CTRL_POST_CHANGE_DLY_MASK GENMASK(20, 19) /* bits [20:19] */
+#define AT_CTRL_POST_CHANGE_DLY 0x3 /* 4-cycle latency */
+#define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */
+#define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */
/* Rockchip specific Registers */
#define DWCMSHC_EMMC_DLL_CTRL 0x800
@@ -72,6 +88,82 @@
(((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
#define RK35xx_MAX_CLKS 3
+/* PHY register area pointer */
+#define DWC_MSHC_PTR_PHY_R 0x300
+
+/* PHY general configuration */
+#define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00)
+#define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */
+#define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */
+#define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */
+#define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */
+#define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */
+
+/* PHY command/response pad settings */
+#define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04)
+
+/* PHY data pad settings */
+#define PHY_DATAPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x06)
+
+/* PHY clock pad settings */
+#define PHY_CLKPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x08)
+
+/* PHY strobe pad settings */
+#define PHY_STBPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0a)
+
+/* PHY reset pad settings */
+#define PHY_RSTNPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0c)
+
+/* Bitfields are common for all pad settings */
+#define PHY_PAD_RXSEL_1V8 0x1 /* Receiver type select for 1.8V */
+#define PHY_PAD_RXSEL_3V3 0x2 /* Receiver type select for 3.3V */
+
+#define PHY_PAD_WEAKPULL_MASK GENMASK(4, 3) /* bits [4:3] */
+#define PHY_PAD_WEAKPULL_PULLUP 0x1 /* Weak pull up enabled */
+#define PHY_PAD_WEAKPULL_PULLDOWN 0x2 /* Weak pull down enabled */
+
+#define PHY_PAD_TXSLEW_CTRL_P_MASK GENMASK(8, 5) /* bits [8:5] */
+#define PHY_PAD_TXSLEW_CTRL_P 0x3 /* Slew control for P-Type pad TX */
+#define PHY_PAD_TXSLEW_CTRL_N_MASK GENMASK(12, 9) /* bits [12:9] */
+#define PHY_PAD_TXSLEW_CTRL_N 0x3 /* Slew control for N-Type pad TX */
+
+/* PHY CLK delay line settings */
+#define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d)
+#define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */
+
+/* PHY CLK delay line delay code */
+#define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e)
+#define PHY_SDCLKDL_DC_INITIAL 0x40 /* initial delay code */
+#define PHY_SDCLKDL_DC_DEFAULT 0x32 /* default delay code */
+#define PHY_SDCLKDL_DC_HS400 0x18 /* delay code for HS400 mode */
+
+/* PHY drift_cclk_rx delay line configuration setting */
+#define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21)
+#define PHY_ATDL_CNFG_INPSEL_MASK GENMASK(3, 2) /* bits [3:2] */
+#define PHY_ATDL_CNFG_INPSEL 0x3 /* delay line input source */
+
+/* PHY DLL control settings */
+#define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24)
+#define PHY_DLL_CTRL_DISABLE 0x0 /* PHY DLL is enabled */
+#define PHY_DLL_CTRL_ENABLE 0x1 /* PHY DLL is disabled */
+
+/* PHY DLL configuration register 1 */
+#define PHY_DLL_CNFG1_R (DWC_MSHC_PTR_PHY_R + 0x25)
+#define PHY_DLL_CNFG1_SLVDLY_MASK GENMASK(5, 4) /* bits [5:4] */
+#define PHY_DLL_CNFG1_SLVDLY 0x2 /* DLL slave update delay input */
+#define PHY_DLL_CNFG1_WAITCYCLE 0x5 /* DLL wait cycle input */
+
+/* PHY DLL configuration register 2 */
+#define PHY_DLL_CNFG2_R (DWC_MSHC_PTR_PHY_R + 0x26)
+#define PHY_DLL_CNFG2_JUMPSTEP 0xa /* DLL jump step input */
+
+/* PHY DLL master and slave delay line configuration settings */
+#define PHY_DLLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x28)
+#define PHY_DLLDL_CNFG_SLV_INPSEL_MASK GENMASK(6, 5) /* bits [6:5] */
+#define PHY_DLLDL_CNFG_SLV_INPSEL 0x3 /* clock source select for slave DL */
+
+#define FLAG_IO_FIXED_1V8 BIT(0)
+
#define BOUNDARY_OK(addr, len) \
((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
@@ -92,6 +184,8 @@ struct dwcmshc_priv {
struct clk *bus_clk;
int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA reg */
void *priv; /* pointer to SoC private stuff */
+ u16 delay_line;
+ u16 flags;
};
/*
@@ -157,6 +251,127 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_request(mmc, mrq);
}
+static void dwcmshc_phy_1_8v_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 val;
+
+ /* deassert phy reset & set tx drive strength */
+ val = PHY_CNFG_RSTN_DEASSERT;
+ val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
+ val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN);
+ sdhci_writel(host, val, PHY_CNFG_R);
+
+ /* disable delay line */
+ sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
+
+ /* set delay line */
+ sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R);
+ sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R);
+
+ /* enable delay lane */
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
+ val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+
+ /* configure phy pads */
+ val = PHY_PAD_RXSEL_1V8;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
+
+ val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
+
+ val = PHY_PAD_RXSEL_1V8;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
+
+ /* enable data strobe mode */
+ sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL),
+ PHY_DLLDL_CNFG_R);
+
+ /* enable phy dll */
+ sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
+}
+
+static void dwcmshc_phy_3_3v_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 val;
+
+ /* deassert phy reset & set tx drive strength */
+ val = PHY_CNFG_RSTN_DEASSERT;
+ val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
+ val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN);
+ sdhci_writel(host, val, PHY_CNFG_R);
+
+ /* disable delay line */
+ sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
+
+ /* set delay line */
+ sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R);
+ sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R);
+
+ /* enable delay lane */
+ val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
+ val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
+ sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+
+ /* configure phy pads */
+ val = PHY_PAD_RXSEL_3V3;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
+ sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
+
+ val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
+
+ val = PHY_PAD_RXSEL_3V3;
+ val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
+ val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
+ sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
+
+ /* enable phy dll */
+ sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
+}
+
+static void th1520_sdhci_set_phy(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+ u16 emmc_ctrl;
+
+ /* Before power on, set PHY configs */
+ if (priv->flags & FLAG_IO_FIXED_1V8)
+ dwcmshc_phy_1_8v_init(host);
+ else
+ dwcmshc_phy_3_3v_init(host);
+
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
+ emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ emmc_ctrl |= DWCMSHC_CARD_IS_EMMC;
+ sdhci_writew(host, emmc_ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
+ }
+
+ sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) |
+ PHY_DLL_CNFG1_WAITCYCLE, PHY_DLL_CNFG1_R);
+}
+
static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -189,9 +404,25 @@ static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
ctrl_2 |= DWCMSHC_CTRL_HS400;
}
+ if (priv->flags & FLAG_IO_FIXED_1V8)
+ ctrl_2 |= SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+static void th1520_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+ dwcmshc_set_uhs_signaling(host, timing);
+ if (timing == MMC_TIMING_MMC_HS400)
+ priv->delay_line = PHY_SDCLKDL_DC_HS400;
+ else
+ sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R);
+ th1520_sdhci_set_phy(host);
+}
+
static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -338,6 +569,80 @@ static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
sdhci_reset(host, mask);
}
+static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 val = 0;
+
+ if (host->flags & SDHCI_HS400_TUNING)
+ return 0;
+
+ sdhci_writeb(host, FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL),
+ PHY_ATDL_CNFG_R);
+ val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+
+ /*
+ * configure tuning settings:
+ * - center phase select code driven in block gap interval
+ * - disable reporting of framing errors
+ * - disable software managed tuning
+ * - disable user selection of sampling window edges,
+ * instead tuning calculated edges are used
+ */
+ val &= ~(AT_CTRL_CI_SEL | AT_CTRL_RPT_TUNE_ERR | AT_CTRL_SW_TUNE_EN |
+ FIELD_PREP(AT_CTRL_WIN_EDGE_SEL_MASK, AT_CTRL_WIN_EDGE_SEL));
+
+ /*
+ * configure tuning settings:
+ * - enable auto-tuning
+ * - enable sampling window threshold
+ * - stop clocks during phase code change
+ * - set max latency in cycles between tx and rx clocks
+ * - set max latency in cycles to switch output phase
+ * - set max sampling window threshold value
+ */
+ val |= AT_CTRL_AT_EN | AT_CTRL_SWIN_TH_EN | AT_CTRL_TUNE_CLK_STOP_EN;
+ val |= FIELD_PREP(AT_CTRL_PRE_CHANGE_DLY_MASK, AT_CTRL_PRE_CHANGE_DLY);
+ val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY);
+ val |= FIELD_PREP(AT_CTRL_SWIN_TH_VAL_MASK, AT_CTRL_SWIN_TH_VAL);
+
+ sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+ val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+
+ /* perform tuning */
+ sdhci_start_tuning(host);
+ host->tuning_loop_count = 128;
+ host->tuning_err = __sdhci_execute_tuning(host, opcode);
+ if (host->tuning_err) {
+ /* disable auto-tuning upon tuning error */
+ val &= ~AT_CTRL_AT_EN;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
+ dev_err(mmc_dev(host->mmc), "tuning failed: %d\n", host->tuning_err);
+ return -EIO;
+ }
+ sdhci_end_tuning(host);
+
+ return 0;
+}
+
+static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u16 ctrl_2;
+
+ sdhci_reset(host, mask);
+
+ if (priv->flags & FLAG_IO_FIXED_1V8) {
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) {
+ ctrl_2 |= SDHCI_CTRL_VDD_180;
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ }
+ }
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -356,6 +661,17 @@ static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
.adma_write_desc = dwcmshc_adma_write_desc,
};
+static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = th1520_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
+ .reset = th1520_sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+ .voltage_switch = dwcmshc_phy_1_8v_init,
+ .platform_execute_tuning = &th1520_execute_tuning,
+};
+
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.ops = &sdhci_dwcmshc_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
@@ -379,6 +695,12 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = {
+ .ops = &sdhci_dwcmshc_th1520_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
{
int err;
@@ -447,6 +769,10 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
.compatible = "snps,dwcmshc-sdhci",
.data = &sdhci_dwcmshc_pdata,
},
+ {
+ .compatible = "thead,th1520-dwcmshc",
+ .data = &sdhci_dwcmshc_th1520_pdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
@@ -542,6 +868,30 @@ static int dwcmshc_probe(struct platform_device *pdev)
goto err_clk;
}
+ if (pltfm_data == &sdhci_dwcmshc_th1520_pdata) {
+ priv->delay_line = PHY_SDCLKDL_DC_DEFAULT;
+
+ if (device_property_read_bool(dev, "mmc-ddr-1_8v") ||
+ device_property_read_bool(dev, "mmc-hs200-1_8v") ||
+ device_property_read_bool(dev, "mmc-hs400-1_8v"))
+ priv->flags |= FLAG_IO_FIXED_1V8;
+ else
+ priv->flags &= ~FLAG_IO_FIXED_1V8;
+
+ /*
+ * start_signal_voltage_switch() will try 3.3V first
+ * then 1.8V. Use SDHCI_SIGNALING_180 rather than
+ * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V
+ * in sdhci_start_signal_voltage_switch().
+ */
+ if (priv->flags & FLAG_IO_FIXED_1V8) {
+ host->flags &= ~SDHCI_SIGNALING_330;
+ host->flags |= SDHCI_SIGNALING_180;
+ }
+
+ sdhci_enable_v4_mode(host);
+ }
+
#ifdef CONFIG_ACPI
if (pltfm_data == &sdhci_dwcmshc_bf3_pdata)
sdhci_enable_v4_mode(host);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 0a26831b3..94076b095 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* SDHCI Controller driver for TI's OMAP SoCs
*
* Copyright (C) 2017 Texas Instruments
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 25ba7aecc..0e52867f6 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -18,6 +18,8 @@
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
#include "sdhci-pltfm.h"
#include "sdhci-xenon.h"
@@ -422,6 +424,7 @@ static int xenon_probe_params(struct platform_device *pdev)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
u32 sdhc_id, nr_sdhc;
u32 tuning_count;
+ struct sysinfo si;
/* Disable HS200 on Armada AP806 */
if (priv->hw_version == XENON_AP806)
@@ -450,6 +453,23 @@ static int xenon_probe_params(struct platform_device *pdev)
}
priv->tuning_count = tuning_count;
+ /*
+ * AC5/X/IM HW has only 31-bits passed in the crossbar switch.
+ * If we have more than 2GB of memory, this means we might pass
+ * memory pointers which are above 2GB and which cannot be properly
+ * represented. In this case, disable ADMA, 64-bit DMA and allow only SDMA.
+ * This effectively will enable bounce buffer quirk in the
+ * generic SDHCI driver, which will make sure DMA is only done
+ * from supported memory regions:
+ */
+ if (priv->hw_version == XENON_AC5) {
+ si_meminfo(&si);
+ if (si.totalram * si.mem_unit > SZ_2G) {
+ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA;
+ }
+ }
+
return xenon_phy_parse_params(dev, host);
}
@@ -562,6 +582,16 @@ static int xenon_probe(struct platform_device *pdev)
goto remove_sdhc;
pm_runtime_put_autosuspend(&pdev->dev);
+ /*
+ * If we previously detected AC5 with over 2GB of memory,
+ * then we disable ADMA and 64-bit DMA.
+ * This means generic SDHCI driver has set the DMA mask to
+ * 32-bit. Since DDR starts at 0x2_0000_0000, we must use
+ * 34-bit DMA mask to access this DDR memory:
+ */
+ if (priv->hw_version == XENON_AC5 &&
+ host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
return 0;
@@ -680,6 +710,7 @@ static const struct of_device_id sdhci_xenon_dt_ids[] = {
{ .compatible = "marvell,armada-ap807-sdhci", .data = (void *)XENON_AP807},
{ .compatible = "marvell,armada-cp110-sdhci", .data = (void *)XENON_CP110},
{ .compatible = "marvell,armada-3700-sdhci", .data = (void *)XENON_A3700},
+ { .compatible = "marvell,ac5-sdhci", .data = (void *)XENON_AC5},
{}
};
MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 3e9c6c908..0460d97aa 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -57,7 +57,8 @@ enum xenon_variant {
XENON_A3700,
XENON_AP806,
XENON_AP807,
- XENON_CP110
+ XENON_CP110,
+ XENON_AC5
};
struct xenon_priv {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ff41aa565..c79f73459 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2841,7 +2841,7 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
}
EXPORT_SYMBOL_GPL(sdhci_send_tuning);
-static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
{
int i;
@@ -2879,6 +2879,7 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
sdhci_reset_tuning(host);
return -EAGAIN;
}
+EXPORT_SYMBOL_GPL(__sdhci_execute_tuning);
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f219bdea8..a20864fc0 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -793,6 +793,7 @@ void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
+int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode);
void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios);
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 967bd2dfc..d659c5942 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -141,7 +141,6 @@ static const struct timing_data td[] = {
struct sdhci_am654_data {
struct regmap *base;
- bool legacy_otapdly;
int otap_del_sel[ARRAY_SIZE(td)];
int itap_del_sel[ARRAY_SIZE(td)];
int clkbuf_sel;
@@ -272,11 +271,7 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_set_clock(host, clock);
/* Setup DLL Output TAP delay */
- if (sdhci_am654->legacy_otapdly)
- otap_del_sel = sdhci_am654->otap_del_sel[0];
- else
- otap_del_sel = sdhci_am654->otap_del_sel[timing];
-
+ otap_del_sel = sdhci_am654->otap_del_sel[timing];
otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
@@ -314,10 +309,7 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
u32 mask, val;
/* Setup DLL Output TAP delay */
- if (sdhci_am654->legacy_otapdly)
- otap_del_sel = sdhci_am654->otap_del_sel[0];
- else
- otap_del_sel = sdhci_am654->otap_del_sel[timing];
+ otap_del_sel = sdhci_am654->otap_del_sel[timing];
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
val = (0x1 << OTAPDLYENA_SHIFT) |
@@ -577,32 +569,15 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
int i;
int ret;
- ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding,
- &sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]);
- if (ret) {
- /*
- * ti,otap-del-sel-legacy is mandatory, look for old binding
- * if not found.
- */
- ret = device_property_read_u32(dev, "ti,otap-del-sel",
- &sdhci_am654->otap_del_sel[0]);
- if (ret) {
- dev_err(dev, "Couldn't find otap-del-sel\n");
-
- return ret;
- }
-
- dev_info(dev, "Using legacy binding ti,otap-del-sel\n");
- sdhci_am654->legacy_otapdly = true;
-
- return 0;
- }
-
for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
ret = device_property_read_u32(dev, td[i].otap_binding,
&sdhci_am654->otap_del_sel[i]);
if (ret) {
+ if (i == MMC_TIMING_LEGACY) {
+ dev_err(dev, "Couldn't find mandatory ti,otap-del-sel-legacy\n");
+ return ret;
+ }
dev_dbg(dev, "Couldn't find %s\n",
td[i].otap_binding);
/*
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index f58cfb15d..b69dade3f 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -47,7 +47,7 @@ struct map_info uflash_map_templ = {
.bankwidth = UFLASH_BUSWIDTH,
};
-int uflash_devinit(struct platform_device *op, struct device_node *dp)
+static int uflash_devinit(struct platform_device *op, struct device_node *dp)
{
struct uflash_dev *up;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 5bc32108c..f0526dcc2 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -376,10 +376,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
- if (tr->discard) {
+ if (tr->discard)
blk_queue_max_discard_sectors(new->rq, UINT_MAX);
- new->rq->limits.discard_granularity = tr->blksize;
- }
gd->queue = new->rq;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index bb0759ca1..0de87bc63 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/nvmem-provider.h>
#include <linux/root_dev.h>
+#include <linux/error-injection.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -620,6 +621,7 @@ static void mtd_check_of_node(struct mtd_info *mtd)
if (plen == mtd_name_len &&
!strncmp(mtd->name, pname + offset, plen)) {
mtd_set_of_node(mtd, mtd_dn);
+ of_node_put(mtd_dn);
break;
}
}
@@ -898,7 +900,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.name = compatible;
config.id = NVMEM_DEVID_AUTO;
config.owner = THIS_MODULE;
- config.add_legacy_fixed_of_cells = true;
+ config.add_legacy_fixed_of_cells = !mtd_type_is_nand(mtd);
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
config.ignore_wp = true;
@@ -1412,6 +1414,7 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase);
+ALLOW_ERROR_INJECTION(mtd_erase, ERRNO);
/*
* This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
@@ -1511,6 +1514,7 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
return ret;
}
EXPORT_SYMBOL_GPL(mtd_read);
+ALLOW_ERROR_INJECTION(mtd_read, ERRNO);
int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf)
@@ -1527,6 +1531,7 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
return ret;
}
EXPORT_SYMBOL_GPL(mtd_write);
+ALLOW_ERROR_INJECTION(mtd_write, ERRNO);
/*
* In blackbox flight recorder like scenarios we want to make successful writes
@@ -2347,6 +2352,7 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
return 0;
}
EXPORT_SYMBOL_GPL(mtd_block_markbad);
+ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO);
/*
* default_mtd_writev - the default writev method
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
index 959662900..968c5b674 100644
--- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
@@ -85,7 +85,7 @@ MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
static struct platform_driver bcm63138_nand_driver = {
.probe = bcm63138_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "bcm63138_nand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
index a06cd87f8..05b7b653b 100644
--- a/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
@@ -117,7 +117,7 @@ MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match);
static struct platform_driver bcm6368_nand_driver = {
.probe = bcm6368_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "bcm6368_nand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
index dd2797791..4e7e435ba 100644
--- a/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/bcma_nand.c
@@ -119,7 +119,7 @@ static int brcmnand_bcma_nand_probe(struct platform_device *pdev)
static struct platform_driver brcmnand_bcma_nand_driver = {
.probe = brcmnand_bcma_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "bcma_brcmnand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 440bef477..b8e70fc64 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -625,6 +625,8 @@ enum {
/* Only for v7.2 */
#define ACC_CONTROL_ECC_EXT_SHIFT 13
+static int brcmnand_status(struct brcmnand_host *host);
+
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
@@ -1022,19 +1024,6 @@ static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
return -1;
}
-static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
-{
- struct brcmnand_controller *ctrl = host->ctrl;
- int shift = brcmnand_sector_1k_shift(ctrl);
- u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
- BRCMNAND_CS_ACC_CONTROL);
-
- if (shift < 0)
- return 0;
-
- return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
-}
-
static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
{
struct brcmnand_controller *ctrl = host->ctrl;
@@ -1061,10 +1050,11 @@ enum {
CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
};
-static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+static int bcmnand_ctrl_poll_status(struct brcmnand_host *host,
u32 mask, u32 expected_val,
unsigned long timeout_ms)
{
+ struct brcmnand_controller *ctrl = host->ctrl;
unsigned long limit;
u32 val;
@@ -1073,6 +1063,9 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
limit = jiffies + msecs_to_jiffies(timeout_ms);
do {
+ if (mask & INTFC_FLASH_STATUS)
+ brcmnand_status(host);
+
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
@@ -1084,6 +1077,9 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
* do a final check after time out in case the CPU was busy and the driver
* did not get enough time to perform the polling to avoid false alarms
*/
+ if (mask & INTFC_FLASH_STATUS)
+ brcmnand_status(host);
+
val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
if ((val & mask) == expected_val)
return 0;
@@ -1379,7 +1375,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
* make sure ctrl/flash ready before and after
* changing state of #WP pin
*/
- ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY |
NAND_STATUS_READY,
NAND_CTRL_RDY |
NAND_STATUS_READY, 0);
@@ -1387,9 +1383,10 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
return;
brcmnand_set_wp(ctrl, wp);
- nand_status_op(chip, NULL);
+ /* force controller operation to update internal copy of NAND chip status */
+ brcmnand_status(host);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
- ret = bcmnand_ctrl_poll_status(ctrl,
+ ret = bcmnand_ctrl_poll_status(host,
NAND_CTRL_RDY |
NAND_STATUS_READY |
NAND_STATUS_WP,
@@ -1629,13 +1626,13 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
*/
if (oops_in_progress) {
if (ctrl->cmd_pending &&
- bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
return;
} else
BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
- ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
WARN_ON(ret);
mb(); /* flush previous writes */
@@ -1643,16 +1640,6 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
cmd << brcmnand_cmd_shift(ctrl));
}
-/***********************************************************************
- * NAND MTD API: read/program/erase
- ***********************************************************************/
-
-static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
- unsigned int ctrl)
-{
- /* intentionally left blank */
-}
-
static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
@@ -1664,7 +1651,7 @@ static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
if (mtd->oops_panic_write || ctrl->irq < 0) {
/* switch to interrupt polling and PIO mode */
disable_ctrl_irqs(ctrl);
- sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
+ sts = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY,
NAND_CTRL_RDY, 0);
err = sts < 0;
} else {
@@ -1703,6 +1690,26 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
INTFC_FLASH_STATUS;
}
+static int brcmnand_status(struct brcmnand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ brcmnand_set_cmd_addr(mtd, 0);
+ brcmnand_send_cmd(host, CMD_STATUS_READ);
+
+ return brcmnand_waitfunc(chip);
+}
+
+static int brcmnand_reset(struct brcmnand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+
+ brcmnand_send_cmd(host, CMD_FLASH_RESET);
+
+ return brcmnand_waitfunc(chip);
+}
+
enum {
LLOP_RE = BIT(16),
LLOP_WE = BIT(17),
@@ -1752,190 +1759,6 @@ static int brcmnand_low_level_op(struct brcmnand_host *host,
return brcmnand_waitfunc(chip);
}
-static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
- int column, int page_addr)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct brcmnand_host *host = nand_get_controller_data(chip);
- struct brcmnand_controller *ctrl = host->ctrl;
- u64 addr = (u64)page_addr << chip->page_shift;
- int native_cmd = 0;
-
- if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
- command == NAND_CMD_RNDOUT)
- addr = (u64)column;
- /* Avoid propagating a negative, don't-care address */
- else if (page_addr < 0)
- addr = 0;
-
- dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
- (unsigned long long)addr);
-
- host->last_cmd = command;
- host->last_byte = 0;
- host->last_addr = addr;
-
- switch (command) {
- case NAND_CMD_RESET:
- native_cmd = CMD_FLASH_RESET;
- break;
- case NAND_CMD_STATUS:
- native_cmd = CMD_STATUS_READ;
- break;
- case NAND_CMD_READID:
- native_cmd = CMD_DEVICE_ID_READ;
- break;
- case NAND_CMD_READOOB:
- native_cmd = CMD_SPARE_AREA_READ;
- break;
- case NAND_CMD_ERASE1:
- native_cmd = CMD_BLOCK_ERASE;
- brcmnand_wp(mtd, 0);
- break;
- case NAND_CMD_PARAM:
- native_cmd = CMD_PARAMETER_READ;
- break;
- case NAND_CMD_SET_FEATURES:
- case NAND_CMD_GET_FEATURES:
- brcmnand_low_level_op(host, LL_OP_CMD, command, false);
- brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
- break;
- case NAND_CMD_RNDOUT:
- native_cmd = CMD_PARAMETER_CHANGE_COL;
- addr &= ~((u64)(FC_BYTES - 1));
- /*
- * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
- * NB: hwcfg.sector_size_1k may not be initialized yet
- */
- if (brcmnand_get_sector_size_1k(host)) {
- host->hwcfg.sector_size_1k =
- brcmnand_get_sector_size_1k(host);
- brcmnand_set_sector_size_1k(host, 0);
- }
- break;
- }
-
- if (!native_cmd)
- return;
-
- brcmnand_set_cmd_addr(mtd, addr);
- brcmnand_send_cmd(host, native_cmd);
- brcmnand_waitfunc(chip);
-
- if (native_cmd == CMD_PARAMETER_READ ||
- native_cmd == CMD_PARAMETER_CHANGE_COL) {
- /* Copy flash cache word-wise */
- u32 *flash_cache = (u32 *)ctrl->flash_cache;
- int i;
-
- brcmnand_soc_data_bus_prepare(ctrl->soc, true);
-
- /*
- * Must cache the FLASH_CACHE now, since changes in
- * SECTOR_SIZE_1K may invalidate it
- */
- for (i = 0; i < FC_WORDS; i++)
- /*
- * Flash cache is big endian for parameter pages, at
- * least on STB SoCs
- */
- flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
-
- brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
-
- /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
- if (host->hwcfg.sector_size_1k)
- brcmnand_set_sector_size_1k(host,
- host->hwcfg.sector_size_1k);
- }
-
- /* Re-enable protection is necessary only after erase */
- if (command == NAND_CMD_ERASE1)
- brcmnand_wp(mtd, 1);
-}
-
-static uint8_t brcmnand_read_byte(struct nand_chip *chip)
-{
- struct brcmnand_host *host = nand_get_controller_data(chip);
- struct brcmnand_controller *ctrl = host->ctrl;
- uint8_t ret = 0;
- int addr, offs;
-
- switch (host->last_cmd) {
- case NAND_CMD_READID:
- if (host->last_byte < 4)
- ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
- (24 - (host->last_byte << 3));
- else if (host->last_byte < 8)
- ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
- (56 - (host->last_byte << 3));
- break;
-
- case NAND_CMD_READOOB:
- ret = oob_reg_read(ctrl, host->last_byte);
- break;
-
- case NAND_CMD_STATUS:
- ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
- INTFC_FLASH_STATUS;
- if (wp_on) /* hide WP status */
- ret |= NAND_STATUS_WP;
- break;
-
- case NAND_CMD_PARAM:
- case NAND_CMD_RNDOUT:
- addr = host->last_addr + host->last_byte;
- offs = addr & (FC_BYTES - 1);
-
- /* At FC_BYTES boundary, switch to next column */
- if (host->last_byte > 0 && offs == 0)
- nand_change_read_column_op(chip, addr, NULL, 0, false);
-
- ret = ctrl->flash_cache[offs];
- break;
- case NAND_CMD_GET_FEATURES:
- if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
- ret = 0;
- } else {
- bool last = host->last_byte ==
- ONFI_SUBFEATURE_PARAM_LEN - 1;
- brcmnand_low_level_op(host, LL_OP_RD, 0, last);
- ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
- }
- }
-
- dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
- host->last_byte++;
-
- return ret;
-}
-
-static void brcmnand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
- int i;
-
- for (i = 0; i < len; i++, buf++)
- *buf = brcmnand_read_byte(chip);
-}
-
-static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
-{
- int i;
- struct brcmnand_host *host = nand_get_controller_data(chip);
-
- switch (host->last_cmd) {
- case NAND_CMD_SET_FEATURES:
- for (i = 0; i < len; i++)
- brcmnand_low_level_op(host, LL_OP_WR, buf[i],
- (i + 1) == len);
- break;
- default:
- BUG();
- break;
- }
-}
-
/*
* Kick EDU engine
*/
@@ -2345,8 +2168,9 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ host->last_addr = addr;
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
@@ -2359,8 +2183,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
+ u64 addr = (u64)page << chip->page_shift;
- nand_read_page_op(chip, page, 0, NULL, 0);
+ host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
@@ -2468,11 +2293,11 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ host->last_addr = addr;
- return nand_prog_page_end_op(chip);
+ return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
}
static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
@@ -2481,13 +2306,15 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ u64 addr = (u64)page << chip->page_shift;
+ int ret = 0;
- nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
- brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
- return nand_prog_page_end_op(chip);
+ return ret;
}
static int brcmnand_write_oob(struct nand_chip *chip, int page)
@@ -2511,6 +2338,134 @@ static int brcmnand_write_oob_raw(struct nand_chip *chip, int page)
return ret;
}
+static int brcmnand_exec_instr(struct brcmnand_host *host, int i,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr = &op->instrs[i];
+ struct brcmnand_controller *ctrl = host->ctrl;
+ const u8 *out;
+ bool last_op;
+ int ret = 0;
+ u8 *in;
+
+ /*
+ * The controller needs to be aware of the last command in the operation
+ * (WAITRDY excepted).
+ */
+ last_op = ((i == (op->ninstrs - 1)) && (instr->type != NAND_OP_WAITRDY_INSTR)) ||
+ ((i == (op->ninstrs - 2)) && (op->instrs[i+1].type == NAND_OP_WAITRDY_INSTR));
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ brcmnand_low_level_op(host, LL_OP_CMD, instr->ctx.cmd.opcode, last_op);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ brcmnand_low_level_op(host, LL_OP_ADDR, instr->ctx.addr.addrs[i],
+ last_op && (i == (instr->ctx.addr.naddrs - 1)));
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ in = instr->ctx.data.buf.in;
+ for (i = 0; i < instr->ctx.data.len; i++) {
+ brcmnand_low_level_op(host, LL_OP_RD, 0,
+ last_op && (i == (instr->ctx.data.len - 1)));
+ in[i] = brcmnand_read_reg(host->ctrl, BRCMNAND_LL_RDATA);
+ }
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ out = instr->ctx.data.buf.out;
+ for (i = 0; i < instr->ctx.data.len; i++)
+ brcmnand_low_level_op(host, LL_OP_WR, out[i],
+ last_op && (i == (instr->ctx.data.len - 1)));
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ break;
+
+ default:
+ dev_err(ctrl->dev, "unsupported instruction type: %d\n",
+ instr->type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmnand_op_is_status(const struct nand_operation *op)
+{
+ if ((op->ninstrs == 2) &&
+ (op->instrs[0].type == NAND_OP_CMD_INSTR) &&
+ (op->instrs[0].ctx.cmd.opcode == NAND_CMD_STATUS) &&
+ (op->instrs[1].type == NAND_OP_DATA_IN_INSTR))
+ return 1;
+
+ return 0;
+}
+
+static int brcmnand_op_is_reset(const struct nand_operation *op)
+{
+ if ((op->ninstrs == 2) &&
+ (op->instrs[0].type == NAND_OP_CMD_INSTR) &&
+ (op->instrs[0].ctx.cmd.opcode == NAND_CMD_RESET) &&
+ (op->instrs[1].type == NAND_OP_WAITRDY_INSTR))
+ return 1;
+
+ return 0;
+}
+
+static int brcmnand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *status;
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ if (brcmnand_op_is_status(op)) {
+ status = op->instrs[1].ctx.data.buf.in;
+ ret = brcmnand_status(host);
+ if (ret < 0)
+ return ret;
+
+ *status = ret & 0xFF;
+
+ return 0;
+ }
+ else if (brcmnand_op_is_reset(op)) {
+ ret = brcmnand_reset(host);
+ if (ret < 0)
+ return ret;
+
+ brcmnand_wp(mtd, 1);
+
+ return 0;
+ }
+
+ if (op->deassert_wp)
+ brcmnand_wp(mtd, 0);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = brcmnand_exec_instr(host, i, op);
+ if (ret)
+ break;
+ }
+
+ if (op->deassert_wp)
+ brcmnand_wp(mtd, 1);
+
+ return ret;
+}
+
/***********************************************************************
* Per-CS setup (1 NAND device)
***********************************************************************/
@@ -2821,6 +2776,7 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops brcmnand_controller_ops = {
.attach_chip = brcmnand_attach_chip,
+ .exec_op = brcmnand_exec_op,
};
static int brcmnand_init_cs(struct brcmnand_host *host,
@@ -2845,13 +2801,6 @@ static int brcmnand_init_cs(struct brcmnand_host *host,
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
- chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl;
- chip->legacy.cmdfunc = brcmnand_cmdfunc;
- chip->legacy.waitfunc = brcmnand_waitfunc;
- chip->legacy.read_byte = brcmnand_read_byte;
- chip->legacy.read_buf = brcmnand_read_buf;
- chip->legacy.write_buf = brcmnand_write_buf;
-
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.read_page = brcmnand_read_page;
chip->ecc.write_page = brcmnand_write_page;
@@ -2863,6 +2812,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host,
chip->ecc.write_oob = brcmnand_write_oob;
chip->controller = &ctrl->controller;
+ ctrl->controller.controller_wp = 1;
/*
* The bootloader might have configured 16bit mode but
@@ -3299,7 +3249,7 @@ err:
}
EXPORT_SYMBOL_GPL(brcmnand_probe);
-int brcmnand_remove(struct platform_device *pdev)
+void brcmnand_remove(struct platform_device *pdev)
{
struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
struct brcmnand_host *host;
@@ -3316,8 +3266,6 @@ int brcmnand_remove(struct platform_device *pdev)
clk_disable_unprepare(ctrl->clk);
dev_set_drvdata(&pdev->dev, NULL);
-
- return 0;
}
EXPORT_SYMBOL_GPL(brcmnand_remove);
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
index f1f93d85f..928114c0b 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -88,7 +88,7 @@ static inline void brcmnand_soc_write(struct brcmnand_soc *soc, u32 val,
}
int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
-int brcmnand_remove(struct platform_device *pdev);
+void brcmnand_remove(struct platform_device *pdev);
extern const struct dev_pm_ops brcmnand_pm_ops;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
index 950923d97..558f083b9 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c
@@ -23,7 +23,7 @@ static int brcmstb_nand_probe(struct platform_device *pdev)
static struct platform_driver brcmstb_nand_driver = {
.probe = brcmstb_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "brcmstb_nand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
index 089c70fc6..bf46c8b85 100644
--- a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
@@ -134,7 +134,7 @@ MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
static struct platform_driver iproc_nand_driver = {
.probe = iproc_nand_probe,
- .remove = brcmnand_remove,
+ .remove_new = brcmnand_remove,
.driver = {
.name = "iproc_nand",
.pm = &brcmnand_pm_ops,
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5d2ddb037..8db7fc424 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
- 0xffffffff };
+};
static struct mtd_info *doclist = NULL;
@@ -1491,10 +1491,12 @@ static int __init doc_probe(unsigned long physadr)
else
numchips = doc2001_init(mtd);
- if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
- /* DBB note: i believe nand_cleanup is necessary here, as
- buffers may have been allocated in nand_base. Check with
- Thomas. FIX ME! */
+ ret = nand_scan(nand, numchips);
+ if (ret)
+ goto fail;
+
+ ret = doc->late_init(mtd);
+ if (ret) {
nand_cleanup(nand);
goto fail;
}
@@ -1552,7 +1554,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index b3a881cbc..2a96a87cf 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -90,6 +90,8 @@
/* eMMC clock register, misc control */
#define CLK_SELECT_NAND BIT(31)
+#define CLK_ALWAYS_ON_NAND BIT(24)
+#define CLK_SELECT_FIX_PLL2 BIT(6)
#define NFC_CLK_CYCLE 6
@@ -509,7 +511,7 @@ static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf)
__le64 *info;
int i, count;
- for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) {
info = &meson_chip->info_buf[i];
*info |= oob_buf[count];
*info |= oob_buf[count + 1] << 8;
@@ -522,7 +524,7 @@ static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf)
__le64 *info;
int i, count;
- for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) {
+ for (i = 0, count = 0; i < nand->ecc.steps; i++, count += (2 + nand->ecc.bytes)) {
info = &meson_chip->info_buf[i];
oob_buf[count] = *info;
oob_buf[count + 1] = *info >> 8;
@@ -1154,7 +1156,7 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
return PTR_ERR(nfc->nand_clk);
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
- writel(CLK_SELECT_NAND | readl(nfc->reg_clk),
+ writel(CLK_ALWAYS_ON_NAND | CLK_SELECT_NAND | CLK_SELECT_FIX_PLL2,
nfc->reg_clk);
ret = clk_prepare_enable(nfc->core_clk);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index ccbd42a42..2479fa98f 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -366,6 +366,10 @@ static int nand_check_wp(struct nand_chip *chip)
if (chip->options & NAND_BROKEN_XD)
return 0;
+ /* controller responsible for NAND write protect */
+ if (chip->controller->controller_wp)
+ return 0;
+
/* Check the WP bit */
ret = nand_status_op(chip, &status);
if (ret)
@@ -1537,7 +1541,8 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
};
- struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
+ instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
if (naddrs < 0)
@@ -1960,7 +1965,8 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
0),
};
- struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
+ instrs);
if (chip->options & NAND_ROW_ADDR_3)
instrs[1].ctx.addr.naddrs++;
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index c506e92a3..1c76ee98e 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -128,7 +128,7 @@ struct pl35x_nand {
* @conf_regs: SMC configuration registers for command phase
* @io_regs: NAND data registers for data phase
* @controller: Core NAND controller structure
- * @chip: NAND chip information structure
+ * @chips: List of connected NAND chips
* @selected_chip: NAND chip currently selected by the controller
* @assigned_cs: List of assigned CS
* @ecc_buf: Temporary buffer to extract ECC bytes
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index b079605c8..b8cff9240 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2815,7 +2815,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
host->cfg0_raw & ~(7 << CW_PER_PAGE));
nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
instrs = 3;
- } else {
+ } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
return 0;
}
@@ -2830,9 +2830,8 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc_set_reg(chip, NAND_EXEC_CMD, 1);
write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
- 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
- NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == OP_BLOCK_ERASE)
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 596cf9a78..7baaef69d 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -98,7 +98,7 @@ enum nfc_type {
* @high: ECC count high bit index at register.
* @high_mask: mask bit
*/
-struct ecc_cnt_status {
+struct rk_ecc_cnt_status {
u8 err_flag_bit;
u8 low;
u8 low_mask;
@@ -108,6 +108,7 @@ struct ecc_cnt_status {
};
/**
+ * struct nfc_cfg: Rockchip NAND controller configuration
* @type: NFC version
* @ecc_strengths: ECC strengths
* @ecc_cfgs: ECC config values
@@ -144,8 +145,8 @@ struct nfc_cfg {
u32 int_st_off;
u32 oob0_off;
u32 oob1_off;
- struct ecc_cnt_status ecc0;
- struct ecc_cnt_status ecc1;
+ struct rk_ecc_cnt_status ecc0;
+ struct rk_ecc_cnt_status ecc1;
};
struct rk_nfc_nand_chip {
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index 3d3d5c981..48c1d0eb6 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -105,7 +105,6 @@ struct s3c2410_nand_info;
/**
* struct s3c2410_nand_mtd - driver MTD structure
- * @mtd: The MTD instance to pass to the MTD layer.
* @chip: The NAND chip information.
* @set: The platform information supplied for this set of NAND chips.
* @info: Link back to the hardware information.
@@ -145,7 +144,6 @@ enum s3c_nand_clk_state {
* @clk_rate: The clock rate from @clk.
* @clk_state: The current clock state.
* @cpu_type: The exact type of this controller.
- * @freq_transition: CPUFreq notifier block
*/
struct s3c2410_nand_info {
/* mtd info */
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index eddcc0728..37f79c019 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -276,7 +276,7 @@ static const struct nand_controller_ops txx9ndfmc_controller_ops = {
.attach_chip = txx9ndfmc_attach_chip,
};
-static int __init txx9ndfmc_probe(struct platform_device *dev)
+static int txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
int hold, spw;
@@ -369,13 +369,11 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
return 0;
}
-static int __exit txx9ndfmc_remove(struct platform_device *dev)
+static void txx9ndfmc_remove(struct platform_device *dev)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int ret, i;
- if (!drvdata)
- return 0;
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
struct mtd_info *mtd = drvdata->mtds[i];
struct nand_chip *chip;
@@ -392,7 +390,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
- return 0;
}
#ifdef CONFIG_PM
@@ -407,14 +404,14 @@ static int txx9ndfmc_resume(struct platform_device *dev)
#endif
static struct platform_driver txx9ndfmc_driver = {
- .remove = __exit_p(txx9ndfmc_remove),
+ .probe = txx9ndfmc_probe,
+ .remove_new = txx9ndfmc_remove,
.resume = txx9ndfmc_resume,
.driver = {
.name = "txx9ndfmc",
},
};
-
-module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
+module_platform_driver(txx9ndfmc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 849ccfedb..e0b6715e5 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -974,7 +974,7 @@ static int spinand_manufacturer_match(struct spinand_device *spinand,
spinand->manufacturer = manufacturer;
return 0;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int spinand_id_detect(struct spinand_device *spinand)
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index e13b8d2dd..45d1153a0 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -16,12 +16,12 @@
* is to unlock the whole flash array on startup. Therefore, we have to support
* exactly this operation.
*/
-static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
-static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -37,7 +37,7 @@ static int at25fs_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return ret;
}
-static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int at25fs_nor_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
@@ -69,7 +69,7 @@ static const struct spi_nor_fixups at25fs_nor_fixups = {
* Return: 0 on success, -error otherwise.
*/
static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
- uint64_t len, bool is_protect)
+ u64 len, bool is_protect)
{
int ret;
u8 sr;
@@ -118,20 +118,18 @@ static int atmel_nor_set_global_protection(struct spi_nor *nor, loff_t ofs,
return spi_nor_write_sr(nor, nor->bouncebuf, 1);
}
-static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+static int atmel_nor_global_protect(struct spi_nor *nor, loff_t ofs, u64 len)
{
return atmel_nor_set_global_protection(nor, ofs, len, true);
}
-static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+static int atmel_nor_global_unprotect(struct spi_nor *nor, loff_t ofs, u64 len)
{
return atmel_nor_set_global_protection(nor, ofs, len, false);
}
static int atmel_nor_is_global_protected(struct spi_nor *nor, loff_t ofs,
- uint64_t len)
+ u64 len)
{
int ret;
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 1c443fe56..4129764fa 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1060,24 +1060,32 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
}
/**
- * spi_nor_erase_chip() - Erase the entire flash memory.
+ * spi_nor_erase_die() - Erase the entire die.
* @nor: pointer to 'struct spi_nor'.
+ * @addr: address of the die.
+ * @die_size: size of the die.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_erase_chip(struct spi_nor *nor)
+static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size)
{
+ bool multi_die = nor->mtd.size != die_size;
int ret;
- dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+ dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10));
if (nor->spimem) {
- struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
+ struct spi_mem_op op =
+ SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode,
+ nor->addr_nbytes, addr, multi_die);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
+ if (multi_die)
+ return -EOPNOTSUPP;
+
ret = spi_nor_controller_ops_write_reg(nor,
SPINOR_OP_CHIP_ERASE,
NULL, 0);
@@ -1792,6 +1800,51 @@ destroy_erase_cmd_list:
return ret;
}
+static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr,
+ size_t len, size_t die_size)
+{
+ unsigned long timeout;
+ int ret;
+
+ /*
+ * Scale the timeout linearly with the size of the flash, with
+ * a minimum calibrated to an old 2MB flash. We could try to
+ * pull these from CFI/SFDP, but these values should be good
+ * enough for now.
+ */
+ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+ CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+ (unsigned long)(nor->mtd.size / SZ_2M));
+
+ do {
+ ret = spi_nor_lock_device(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_write_enable(nor);
+ if (ret) {
+ spi_nor_unlock_device(nor);
+ return ret;
+ }
+
+ ret = spi_nor_erase_die(nor, addr, die_size);
+
+ spi_nor_unlock_device(nor);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ if (ret)
+ return ret;
+
+ addr += die_size;
+ len -= die_size;
+
+ } while (len);
+
+ return 0;
+}
+
/*
* Erase an address range on the nor chip. The address range may extend
* one or more erase sectors. Return an error if there is a problem erasing.
@@ -1799,8 +1852,10 @@ destroy_erase_cmd_list:
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
- uint32_t rem;
+ u8 n_dice = nor->params->n_dice;
+ bool multi_die_erase = false;
+ u32 addr, len, rem;
+ size_t die_size;
int ret;
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
@@ -1815,39 +1870,22 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
addr = instr->addr;
len = instr->len;
+ if (n_dice) {
+ die_size = div_u64(mtd->size, n_dice);
+ if (!(len & (die_size - 1)) && !(addr & (die_size - 1)))
+ multi_die_erase = true;
+ } else {
+ die_size = mtd->size;
+ }
+
ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len);
if (ret)
return ret;
- /* whole-chip erase? */
- if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
- unsigned long timeout;
-
- ret = spi_nor_lock_device(nor);
- if (ret)
- goto erase_err;
-
- ret = spi_nor_write_enable(nor);
- if (ret) {
- spi_nor_unlock_device(nor);
- goto erase_err;
- }
-
- ret = spi_nor_erase_chip(nor);
- spi_nor_unlock_device(nor);
- if (ret)
- goto erase_err;
-
- /*
- * Scale the timeout linearly with the size of the flash, with
- * a minimum calibrated to an old 2MB flash. We could try to
- * pull these from CFI/SFDP, but these values should be good
- * enough for now.
- */
- timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
- CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
- (unsigned long)(mtd->size / SZ_2M));
- ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+ /* chip (die) erase? */
+ if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) ||
+ multi_die_erase) {
+ ret = spi_nor_erase_dice(nor, addr, len, die_size);
if (ret)
goto erase_err;
@@ -2146,7 +2184,7 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (is_power_of_2(page_size)) {
page_offset = addr & (page_size - 1);
} else {
- uint64_t aux = addr;
+ u64 aux = addr;
page_offset = do_div(aux, page_size);
}
@@ -2850,9 +2888,6 @@ static void spi_nor_init_flags(struct spi_nor *nor)
nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
}
- if (flags & NO_CHIP_ERASE)
- nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
-
if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
!nor->controller_ops)
nor->flags |= SNOR_F_RWW;
@@ -2897,17 +2932,22 @@ static int spi_nor_late_init_params(struct spi_nor *nor)
return ret;
}
+ /* Needed by some flashes late_init hooks. */
+ spi_nor_init_flags(nor);
+
if (nor->info->fixups && nor->info->fixups->late_init) {
ret = nor->info->fixups->late_init(nor);
if (ret)
return ret;
}
+ if (!nor->params->die_erase_opcode)
+ nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE;
+
/* Default method kept for backward compatibility. */
if (!params->set_4byte_addr_mode)
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr;
- spi_nor_init_flags(nor);
spi_nor_init_fixup_flags(nor);
/*
@@ -3145,8 +3185,20 @@ int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
struct spi_nor_flash_parameter *params = nor->params;
int ret;
+ if (enable) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ "enabling reset hack; may not recover from unexpected reboots\n");
+ }
+
ret = params->set_4byte_addr_mode(nor, enable);
- if (ret && ret != -ENOTSUPP)
+ if (ret && ret != -EOPNOTSUPP)
return ret;
if (enable) {
@@ -3193,20 +3245,8 @@ static int spi_nor_init(struct spi_nor *nor)
if (nor->addr_nbytes == 4 &&
nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
- !(nor->flags & SNOR_F_4B_OPCODES)) {
- /*
- * If the RESET# pin isn't hooked up properly, or the system
- * otherwise doesn't perform a reset command in the boot
- * sequence, it's impossible to 100% protect against unexpected
- * reboots (e.g., crashes). Warn the user (or hopefully, system
- * designer) that this is bad.
- */
- WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
- "enabling reset hack; may not recover from unexpected reboots\n");
- err = spi_nor_set_4byte_addr_mode(nor, true);
- if (err)
- return err;
- }
+ !(nor->flags & SNOR_F_4B_OPCODES))
+ return spi_nor_set_4byte_addr_mode(nor, true);
return 0;
}
@@ -3237,7 +3277,8 @@ static void spi_nor_soft_reset(struct spi_nor *nor)
ret = spi_mem_exec_op(nor->spimem, &op);
if (ret) {
- dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ if (ret != -EOPNOTSUPP)
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
return;
}
@@ -3452,9 +3493,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
{
const struct flash_info *info;
struct device *dev = nor->dev;
- struct mtd_info *mtd = &nor->mtd;
int ret;
- int i;
ret = spi_nor_check(nor);
if (ret)
@@ -3518,25 +3557,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
/* No mtd_info fields should be used up to this point. */
spi_nor_set_mtd_info(nor);
- dev_info(dev, "%s (%lld Kbytes)\n", info->name,
- (long long)mtd->size >> 10);
-
- dev_dbg(dev,
- "mtd .name = %s, .size = 0x%llx (%lldMiB), "
- ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
- mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
- mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
-
- if (mtd->numeraseregions)
- for (i = 0; i < mtd->numeraseregions; i++)
- dev_dbg(dev,
- "mtd.eraseregions[%d] = { .offset = 0x%llx, "
- ".erasesize = 0x%.8x (%uKiB), "
- ".numblocks = %d }\n",
- i, (long long)mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].erasesize / 1024,
- mtd->eraseregions[i].numblocks);
+ dev_dbg(dev, "Manufacturer and device ID: %*phN\n",
+ SPI_NOR_MAX_ID_LEN, nor->id);
+
return 0;
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 93cd2fc36..d36c0e072 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -85,9 +85,9 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPI_NOR_CHIP_ERASE_OP \
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), \
- SPI_MEM_OP_NO_ADDR, \
+#define SPI_NOR_DIE_ERASE_OP(opcode, addr_nbytes, addr, dice) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
+ SPI_MEM_OP_ADDR(dice ? addr_nbytes : 0, addr, 0), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
@@ -293,9 +293,9 @@ struct spi_nor_erase_map {
* @is_locked: check if a region of the SPI NOR is completely locked
*/
struct spi_nor_locking_ops {
- int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
- int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*lock)(struct spi_nor *nor, loff_t ofs, u64 len);
+ int (*unlock)(struct spi_nor *nor, loff_t ofs, u64 len);
+ int (*is_locked)(struct spi_nor *nor, loff_t ofs, u64 len);
};
/**
@@ -362,6 +362,7 @@ struct spi_nor_otp {
* command in octal DTR mode.
* @n_banks: number of banks.
* @n_dice: number of dice in the flash memory.
+ * @die_erase_opcode: die erase opcode. Defaults to SPINOR_OP_CHIP_ERASE.
* @vreg_offset: volatile register offset for each die.
* @hwcaps: describes the read and page program hardware
* capabilities.
@@ -399,6 +400,7 @@ struct spi_nor_flash_parameter {
u8 rdsr_addr_nbytes;
u8 n_banks;
u8 n_dice;
+ u8 die_erase_opcode;
u32 *vreg_offset;
struct spi_nor_hwcaps hwcaps;
@@ -463,7 +465,7 @@ struct spi_nor_id {
* struct flash_info - SPI NOR flash_info entry.
* @id: pointer to struct spi_nor_id or NULL, which means "no ID" (mostly
* older chips).
- * @name: the name of the flash.
+ * @name: (obsolete) the name of the flash. Do not set it for new additions.
* @size: the size of the flash in bytes.
* @sector_size: (optional) the size listed here is what works with
* SPINOR_OP_SE, which isn't necessarily called a "sector" by
@@ -487,7 +489,6 @@ struct spi_nor_id {
* Usually these will power-up in a write-protected
* state.
* SPI_NOR_NO_ERASE: no erase command needed.
- * NO_CHIP_ERASE: chip does not support chip erase.
* SPI_NOR_NO_FR: can't do fastread.
* SPI_NOR_QUAD_PP: flash supports Quad Input Page Program.
* SPI_NOR_RWW: flash supports reads while write.
@@ -537,10 +538,9 @@ struct flash_info {
#define SPI_NOR_BP3_SR_BIT6 BIT(4)
#define SPI_NOR_SWP_IS_VOLATILE BIT(5)
#define SPI_NOR_NO_ERASE BIT(6)
-#define NO_CHIP_ERASE BIT(7)
-#define SPI_NOR_NO_FR BIT(8)
-#define SPI_NOR_QUAD_PP BIT(9)
-#define SPI_NOR_RWW BIT(10)
+#define SPI_NOR_NO_FR BIT(7)
+#define SPI_NOR_QUAD_PP BIT(8)
+#define SPI_NOR_RWW BIT(9)
u8 no_sfdp_flags;
#define SPI_NOR_SKIP_SFDP BIT(0)
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index 6e163cb5b..2dbda6b69 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -138,7 +138,7 @@ static int spi_nor_params_show(struct seq_file *s, void *data)
if (!(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
string_get_size(params->size, 1, STRING_UNITS_2, buf, sizeof(buf));
- seq_printf(s, " %02x (%s)\n", SPINOR_OP_CHIP_ERASE, buf);
+ seq_printf(s, " %02x (%s)\n", nor->params->die_erase_opcode, buf);
}
seq_puts(s, "\nsector map\n");
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 8920547c1..3c6499fdb 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -11,6 +11,7 @@
/* flash_info mfr_flag. Used to read proprietary FSR register. */
#define USE_FSR BIT(0)
+#define SPINOR_OP_MT_DIE_ERASE 0xc4 /* Chip (die) erase opcode */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */
@@ -192,6 +193,50 @@ static struct spi_nor_fixups mt25qu512a_fixups = {
.post_bfpt = mt25qu512a_post_bfpt_fixup,
};
+static int st_nor_four_die_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->die_erase_opcode = SPINOR_OP_MT_DIE_ERASE;
+ params->n_dice = 4;
+
+ /*
+ * Unfortunately the die erase opcode does not have a 4-byte opcode
+ * correspondent for these flashes. The SFDP 4BAIT table fails to
+ * consider the die erase too. We're forced to enter in the 4 byte
+ * address mode in order to benefit of the die erase.
+ */
+ return spi_nor_set_4byte_addr_mode(nor, true);
+}
+
+static int st_nor_two_die_late_init(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->die_erase_opcode = SPINOR_OP_MT_DIE_ERASE;
+ params->n_dice = 2;
+
+ /*
+ * Unfortunately the die erase opcode does not have a 4-byte opcode
+ * correspondent for these flashes. The SFDP 4BAIT table fails to
+ * consider the die erase too. We're forced to enter in the 4 byte
+ * address mode in order to benefit of the die erase.
+ */
+ return spi_nor_set_4byte_addr_mode(nor, true);
+}
+
+static struct spi_nor_fixups n25q00_fixups = {
+ .late_init = st_nor_four_die_late_init,
+};
+
+static struct spi_nor_fixups mt25q01_fixups = {
+ .late_init = st_nor_two_die_late_init,
+};
+
+static struct spi_nor_fixups mt25q02_fixups = {
+ .late_init = st_nor_four_die_late_init,
+};
+
static const struct flash_info st_nor_parts[] = {
{
.name = "m25p05-nonjedec",
@@ -366,16 +411,17 @@ static const struct flash_info st_nor_parts[] = {
.name = "n25q00",
.size = SZ_128M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
- SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE,
+ SPI_NOR_BP3_SR_BIT6,
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
+ .fixups = &n25q00_fixups,
}, {
.id = SNOR_ID(0x20, 0xba, 0x22),
.name = "mt25ql02g",
.size = SZ_256M,
- .flags = NO_CHIP_ERASE,
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
+ .fixups = &mt25q02_fixups,
}, {
.id = SNOR_ID(0x20, 0xbb, 0x15),
.name = "n25q016a",
@@ -430,19 +476,24 @@ static const struct flash_info st_nor_parts[] = {
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
}, {
+ .id = SNOR_ID(0x20, 0xbb, 0x21, 0x10, 0x44, 0x00),
+ .name = "mt25qu01g",
+ .mfr_flags = USE_FSR,
+ .fixups = &mt25q01_fixups,
+ }, {
.id = SNOR_ID(0x20, 0xbb, 0x21),
.name = "n25q00a",
.size = SZ_128M,
- .flags = NO_CHIP_ERASE,
.no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
+ .fixups = &n25q00_fixups,
}, {
.id = SNOR_ID(0x20, 0xbb, 0x22),
.name = "mt25qu02g",
.size = SZ_256M,
- .flags = NO_CHIP_ERASE,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.mfr_flags = USE_FSR,
+ .fixups = &mt25q02_fixups,
}
};
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index b3b11dfed..57713de32 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -446,6 +446,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
u32 dword;
u16 half;
u8 erase_mask;
+ u8 wait_states, mode_clocks, opcode;
/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
@@ -631,6 +632,32 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt);
+ /* Parse 1-1-8 read instruction */
+ opcode = FIELD_GET(BFPT_DWORD17_RD_1_1_8_CMD, bfpt.dwords[SFDP_DWORD(17)]);
+ if (opcode) {
+ mode_clocks = FIELD_GET(BFPT_DWORD17_RD_1_1_8_MODE_CLOCKS,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ wait_states = FIELD_GET(BFPT_DWORD17_RD_1_1_8_WAIT_STATES,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ mode_clocks, wait_states, opcode,
+ SNOR_PROTO_1_1_8);
+ }
+
+ /* Parse 1-8-8 read instruction */
+ opcode = FIELD_GET(BFPT_DWORD17_RD_1_8_8_CMD, bfpt.dwords[SFDP_DWORD(17)]);
+ if (opcode) {
+ mode_clocks = FIELD_GET(BFPT_DWORD17_RD_1_8_8_MODE_CLOCKS,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ wait_states = FIELD_GET(BFPT_DWORD17_RD_1_8_8_WAIT_STATES,
+ bfpt.dwords[SFDP_DWORD(17)]);
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_8_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_8_8],
+ mode_clocks, wait_states, opcode,
+ SNOR_PROTO_1_8_8);
+ }
+
/* 8D-8D-8D command extension. */
switch (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) {
case BFPT_DWORD18_CMD_EXT_REP:
@@ -968,6 +995,8 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
{ SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
{ SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
{ SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
+ { SNOR_HWCAPS_READ_1_1_8, BIT(20) },
+ { SNOR_HWCAPS_READ_1_8_8, BIT(21) },
};
static const struct sfdp_4bait programs[] = {
{ SNOR_HWCAPS_PP, BIT(6) },
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
index 6eb99e1cd..da0fe5aa9 100644
--- a/drivers/mtd/spi-nor/sfdp.h
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -118,6 +118,13 @@ struct sfdp_bfpt {
(BFPT_DWORD16_EN4B_EN4B | BFPT_DWORD16_EX4B_EX4B)
#define BFPT_DWORD16_SWRST_EN_RST BIT(12)
+#define BFPT_DWORD17_RD_1_1_8_CMD GENMASK(31, 24)
+#define BFPT_DWORD17_RD_1_1_8_MODE_CLOCKS GENMASK(23, 21)
+#define BFPT_DWORD17_RD_1_1_8_WAIT_STATES GENMASK(20, 16)
+#define BFPT_DWORD17_RD_1_8_8_CMD GENMASK(15, 8)
+#define BFPT_DWORD17_RD_1_8_8_MODE_CLOCKS GENMASK(7, 5)
+#define BFPT_DWORD17_RD_1_8_8_WAIT_STATES GENMASK(4, 0)
+
#define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29)
#define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */
#define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 129213443..6cc237c24 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -17,6 +17,7 @@
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */
+#define SPINOR_OP_CYPRESS_DIE_ERASE 0x61 /* Chip (die) erase */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
#define SPINOR_REG_CYPRESS_VREG 0x00800000
@@ -644,6 +645,7 @@ static int s25hx_t_late_init(struct spi_nor *nor)
params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
+ params->die_erase_opcode = SPINOR_OP_CYPRESS_DIE_ERASE;
return 0;
}
@@ -933,7 +935,6 @@ static const struct flash_info spansion_nor_parts[] = {
.id = SNOR_ID(0x34, 0x2a, 0x1c, 0x0f, 0x00, 0x90),
.name = "s25hl02gt",
.mfr_flags = USE_CLPEF,
- .flags = NO_CHIP_ERASE,
.fixups = &s25hx_t_fixups
}, {
.id = SNOR_ID(0x34, 0x2b, 0x19, 0x0f, 0x08, 0x90),
@@ -954,7 +955,6 @@ static const struct flash_info spansion_nor_parts[] = {
.id = SNOR_ID(0x34, 0x2b, 0x1c, 0x0f, 0x00, 0x90),
.name = "s25hs02gt",
.mfr_flags = USE_CLPEF,
- .flags = NO_CHIP_ERASE,
.fixups = &s25hx_t_fixups
}, {
.id = SNOR_ID(0x34, 0x5a, 0x1a),
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 44d2a546b..180b73906 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -13,12 +13,12 @@
#define SST26VF_CR_BPNV BIT(3)
-static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
-static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -38,7 +38,7 @@ static int sst26vf_nor_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return spi_nor_global_block_unlock(nor);
}
-static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int sst26vf_nor_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 585813310..e48c3cff2 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -53,7 +53,7 @@ static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
}
static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
- uint64_t *len)
+ u64 *len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
@@ -90,10 +90,10 @@ static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
* (if @locked is false); false otherwise.
*/
static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
- uint64_t len, u8 sr, bool locked)
+ u64 len, u8 sr, bool locked)
{
loff_t lock_offs, lock_offs_max, offs_max;
- uint64_t lock_len;
+ u64 lock_len;
if (!len)
return true;
@@ -111,14 +111,13 @@ static bool spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
return (ofs >= lock_offs_max) || (offs_max <= lock_offs);
}
-static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
+static bool spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, u64 len, u8 sr)
{
return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
}
-static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs,
- uint64_t len, u8 sr)
+static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, u64 len,
+ u8 sr)
{
return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
}
@@ -156,7 +155,7 @@ static bool spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs,
*
* Returns negative on errors, 0 on success.
*/
-static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, u64 len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
@@ -246,7 +245,7 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
*
* Returns negative on errors, 0 on success.
*/
-static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, u64 len)
{
struct mtd_info *mtd = &nor->mtd;
u64 min_prot_len;
@@ -331,7 +330,7 @@ static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
* Returns 1 if entire region is locked, 0 if any portion is unlocked, and
* negative on errors.
*/
-static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, u64 len)
{
int ret;
@@ -353,7 +352,7 @@ void spi_nor_init_default_locking_ops(struct spi_nor *nor)
nor->params->locking_ops = &spi_nor_sr_locking_ops;
}
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
@@ -368,7 +367,7 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
@@ -383,7 +382,7 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, u64 len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
index 2dfdc555a..96064e4ba 100644
--- a/drivers/mtd/spi-nor/sysfs.c
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -78,6 +78,8 @@ static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer)
return 0;
+ if (attr == &dev_attr_partname.attr && !nor->info->name)
+ return 0;
if (attr == &dev_attr_jedec_id.attr && !nor->info->id && !nor->id)
return 0;
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 04da685c3..211f279a3 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -18,7 +18,6 @@
struct ssfdcr_record {
struct mtd_blktrans_dev mbd;
- int usecount;
unsigned char heads;
unsigned char sectors;
unsigned short cylinders;
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 2ed77b7b3..7499a5401 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -104,4 +104,13 @@ config MTD_UBI_BLOCK
If in doubt, say "N".
+config MTD_UBI_FAULT_INJECTION
+ bool "Fault injection capability of UBI device"
+ default n
+ depends on FAULT_INJECTION_DEBUG_FS
+ help
+ This option enables fault-injection support for UBI devices for
+ testing purposes.
+
+ If in doubt, say "N".
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 309a42aea..654bd7372 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -434,7 +434,7 @@ out_remove_minor:
list_del(&dev->list);
idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
- put_disk(dev->gd);
+ put_disk(gd);
out_free_tags:
blk_mq_free_tag_set(&dev->tag_set);
out_free_dev:
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 27168f511..d57f52bd2 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -10,7 +10,37 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/fault-inject.h>
+
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(fault_eccerr_attr);
+static DECLARE_FAULT_ATTR(fault_bitflips_attr);
+static DECLARE_FAULT_ATTR(fault_read_failure_attr);
+static DECLARE_FAULT_ATTR(fault_write_failure_attr);
+static DECLARE_FAULT_ATTR(fault_erase_failure_attr);
+static DECLARE_FAULT_ATTR(fault_power_cut_attr);
+static DECLARE_FAULT_ATTR(fault_io_ff_attr);
+static DECLARE_FAULT_ATTR(fault_io_ff_bitflips_attr);
+static DECLARE_FAULT_ATTR(fault_bad_hdr_attr);
+static DECLARE_FAULT_ATTR(fault_bad_hdr_ebadmsg_attr);
+
+#define FAIL_ACTION(name, fault_attr) \
+bool should_fail_##name(void) \
+{ \
+ return should_fail(&fault_attr, 1); \
+}
+FAIL_ACTION(eccerr, fault_eccerr_attr)
+FAIL_ACTION(bitflips, fault_bitflips_attr)
+FAIL_ACTION(read_failure, fault_read_failure_attr)
+FAIL_ACTION(write_failure, fault_write_failure_attr)
+FAIL_ACTION(erase_failure, fault_erase_failure_attr)
+FAIL_ACTION(power_cut, fault_power_cut_attr)
+FAIL_ACTION(io_ff, fault_io_ff_attr)
+FAIL_ACTION(io_ff_bitflips, fault_io_ff_bitflips_attr)
+FAIL_ACTION(bad_hdr, fault_bad_hdr_attr)
+FAIL_ACTION(bad_hdr_ebadmsg, fault_bad_hdr_ebadmsg_attr)
+#endif
/**
* ubi_dump_flash - dump a region of flash.
@@ -212,6 +242,52 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
*/
static struct dentry *dfs_rootdir;
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+static void dfs_create_fault_entry(struct dentry *parent)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("fault_inject", parent);
+ if (IS_ERR_OR_NULL(dir)) {
+ int err = dir ? PTR_ERR(dir) : -ENODEV;
+
+ pr_warn("UBI error: cannot create \"fault_inject\" debugfs directory, error %d\n",
+ err);
+ return;
+ }
+
+ fault_create_debugfs_attr("emulate_eccerr", dir,
+ &fault_eccerr_attr);
+
+ fault_create_debugfs_attr("emulate_read_failure", dir,
+ &fault_read_failure_attr);
+
+ fault_create_debugfs_attr("emulate_bitflips", dir,
+ &fault_bitflips_attr);
+
+ fault_create_debugfs_attr("emulate_write_failure", dir,
+ &fault_write_failure_attr);
+
+ fault_create_debugfs_attr("emulate_erase_failure", dir,
+ &fault_erase_failure_attr);
+
+ fault_create_debugfs_attr("emulate_power_cut", dir,
+ &fault_power_cut_attr);
+
+ fault_create_debugfs_attr("emulate_io_ff", dir,
+ &fault_io_ff_attr);
+
+ fault_create_debugfs_attr("emulate_io_ff_bitflips", dir,
+ &fault_io_ff_bitflips_attr);
+
+ fault_create_debugfs_attr("emulate_bad_hdr", dir,
+ &fault_bad_hdr_attr);
+
+ fault_create_debugfs_attr("emulate_bad_hdr_ebadmsg", dir,
+ &fault_bad_hdr_ebadmsg_attr);
+}
+#endif
+
/**
* ubi_debugfs_init - create UBI debugfs directory.
*
@@ -232,6 +308,10 @@ int ubi_debugfs_init(void)
return err;
}
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+ dfs_create_fault_entry(dfs_rootdir);
+#endif
+
return 0;
}
@@ -252,7 +332,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
struct dentry *dent = file->f_path.dentry;
struct ubi_device *ubi;
struct ubi_debug_info *d;
- char buf[8];
+ char buf[16];
int val;
ubi = ubi_get_device(ubi_num);
@@ -272,7 +352,12 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
val = d->emulate_bitflips;
else if (dent == d->dfs_emulate_io_failures)
val = d->emulate_io_failures;
- else if (dent == d->dfs_emulate_power_cut) {
+ else if (dent == d->dfs_emulate_failures) {
+ snprintf(buf, sizeof(buf), "0x%04x\n", d->emulate_failures);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_emulate_power_cut) {
snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
@@ -287,8 +372,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
count = simple_read_from_buffer(user_buf, count, ppos,
buf, strlen(buf));
goto out;
- }
- else {
+ } else {
count = -EINVAL;
goto out;
}
@@ -316,7 +400,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
struct ubi_device *ubi;
struct ubi_debug_info *d;
size_t buf_size;
- char buf[8] = {0};
+ char buf[16] = {0};
int val;
ubi = ubi_get_device(ubi_num);
@@ -330,7 +414,11 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
goto out;
}
- if (dent == d->dfs_power_cut_min) {
+ if (dent == d->dfs_emulate_failures) {
+ if (kstrtouint(buf, 0, &d->emulate_failures) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_power_cut_min) {
if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
count = -EINVAL;
goto out;
@@ -559,6 +647,12 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
(void *)ubi_num, &eraseblk_count_fops);
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+ d->dfs_emulate_failures = debugfs_create_file("emulate_failures",
+ mode, d->dfs_dir,
+ (void *)ubi_num,
+ &dfs_fops);
+#endif
return 0;
}
@@ -600,7 +694,5 @@ int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
if (ubi->dbg.power_cut_counter)
return 0;
- ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
- ubi_ro_mode(ubi);
return 1;
}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 23676f32b..b2fd97548 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -53,56 +53,315 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi);
void ubi_debugfs_exit_dev(struct ubi_device *ubi);
/**
- * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * The following function is a legacy implementation of UBI fault-injection
+ * hook. When using more powerful fault injection capabilities, the legacy
+ * fault injection interface should be retained.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
+
+static inline int ubi_dbg_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_bitflips)
+ return !get_random_u32_below(200);
+ return 0;
+}
+
+static inline int ubi_dbg_write_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !get_random_u32_below(500);
+ return 0;
+}
+
+static inline int ubi_dbg_erase_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !get_random_u32_below(400);
+ return 0;
+}
+
+/**
+ * MASK_XXX: Mask for emulate_failures in ubi_debug_info.The mask is used to
+ * precisely control the type and process of fault injection.
+ */
+/* Emulate a power cut when writing EC/VID header */
+#define MASK_POWER_CUT_EC (1 << 0)
+#define MASK_POWER_CUT_VID (1 << 1)
+/* Emulate a power cut when writing data*/
+#define MASK_POWER_CUT_DATA (1 << 2)
+/* Emulate bit-flips */
+#define MASK_BITFLIPS (1 << 3)
+/* Emulate ecc error */
+#define MASK_ECCERR (1 << 4)
+/* Emulates -EIO during data read */
+#define MASK_READ_FAILURE (1 << 5)
+#define MASK_READ_FAILURE_EC (1 << 6)
+#define MASK_READ_FAILURE_VID (1 << 7)
+/* Emulates -EIO during data write */
+#define MASK_WRITE_FAILURE (1 << 8)
+/* Emulates -EIO during erase a PEB*/
+#define MASK_ERASE_FAILURE (1 << 9)
+/* Return UBI_IO_FF when reading EC/VID header */
+#define MASK_IO_FF_EC (1 << 10)
+#define MASK_IO_FF_VID (1 << 11)
+/* Return UBI_IO_FF_BITFLIPS when reading EC/VID header */
+#define MASK_IO_FF_BITFLIPS_EC (1 << 12)
+#define MASK_IO_FF_BITFLIPS_VID (1 << 13)
+/* Return UBI_IO_BAD_HDR when reading EC/VID header */
+#define MASK_BAD_HDR_EC (1 << 14)
+#define MASK_BAD_HDR_VID (1 << 15)
+/* Return UBI_IO_BAD_HDR_EBADMSG when reading EC/VID header */
+#define MASK_BAD_HDR_EBADMSG_EC (1 << 16)
+#define MASK_BAD_HDR_EBADMSG_VID (1 << 17)
+
+#ifdef CONFIG_MTD_UBI_FAULT_INJECTION
+
+extern bool should_fail_eccerr(void);
+extern bool should_fail_bitflips(void);
+extern bool should_fail_read_failure(void);
+extern bool should_fail_write_failure(void);
+extern bool should_fail_erase_failure(void);
+extern bool should_fail_power_cut(void);
+extern bool should_fail_io_ff(void);
+extern bool should_fail_io_ff_bitflips(void);
+extern bool should_fail_bad_hdr(void);
+extern bool should_fail_bad_hdr_ebadmsg(void);
+
+static inline bool ubi_dbg_fail_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_BITFLIPS)
+ return should_fail_bitflips();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_write(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_WRITE_FAILURE)
+ return should_fail_write_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_erase(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_ERASE_FAILURE)
+ return should_fail_erase_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_power_cut(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_power_cut();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_read(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_read_failure();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_eccerr(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_failures & MASK_ECCERR)
+ return should_fail_eccerr();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_ff(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_io_ff();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_ff_bitflips(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_io_ff_bitflips();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_bad_hdr(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_bad_hdr();
+ return false;
+}
+
+static inline bool ubi_dbg_fail_bad_hdr_ebadmsg(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ if (ubi->dbg.emulate_failures & caller)
+ return should_fail_bad_hdr_ebadmsg();
+ return false;
+}
+#else /* CONFIG_MTD_UBI_FAULT_INJECTION */
+
+#define ubi_dbg_fail_bitflip(u) false
+#define ubi_dbg_fail_write(u) false
+#define ubi_dbg_fail_erase(u) false
+#define ubi_dbg_fail_power_cut(u, c) false
+#define ubi_dbg_fail_read(u, c) false
+#define ubi_dbg_fail_eccerr(u) false
+#define ubi_dbg_fail_ff(u, c) false
+#define ubi_dbg_fail_ff_bitflips(u, v) false
+#define ubi_dbg_fail_bad_hdr(u, c) false
+#define ubi_dbg_fail_bad_hdr_ebadmsg(u, c) false
+
+#endif
+
+/**
+ * ubi_dbg_is_power_cut - if it is time to emulate power cut.
* @ubi: UBI device description object
*
- * Returns non-zero if the UBI background thread is disabled for testing
- * purposes.
+ * Returns true if power cut should be emulated, otherwise returns false.
*/
-static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_power_cut(struct ubi_device *ubi,
+ unsigned int caller)
{
- return ubi->dbg.disable_bgt;
+ if (ubi_dbg_power_cut(ubi, caller))
+ return true;
+ return ubi_dbg_fail_power_cut(ubi, caller);
}
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
* @ubi: UBI device description object
*
- * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
+ * Returns true if a bit-flip should be emulated, otherwise returns false.
*/
-static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_bitflips)
- return !get_random_u32_below(200);
- return 0;
+ if (ubi_dbg_bitflip(ubi))
+ return true;
+ return ubi_dbg_fail_bitflip(ubi);
}
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
* @ubi: UBI device description object
*
- * Returns non-zero if a write failure should be emulated, otherwise returns
- * zero.
+ * Returns true if a write failure should be emulated, otherwise returns
+ * false.
*/
-static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_io_failures)
- return !get_random_u32_below(500);
- return 0;
+ if (ubi_dbg_write_failure(ubi))
+ return true;
+ return ubi_dbg_fail_write(ubi);
}
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
* @ubi: UBI device description object
*
- * Returns non-zero if an erase failure should be emulated, otherwise returns
- * zero.
+ * Returns true if an erase failure should be emulated, otherwise returns
+ * false.
*/
-static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
+static inline bool ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi->dbg.emulate_io_failures)
- return !get_random_u32_below(400);
- return 0;
+ if (ubi_dbg_erase_failure(ubi))
+ return true;
+ return ubi_dbg_fail_erase(ubi);
+}
+
+/**
+ * ubi_dbg_is_eccerr - if it is time to emulate ECC error.
+ * @ubi: UBI device description object
+ *
+ * Returns true if a ECC error should be emulated, otherwise returns false.
+ */
+static inline bool ubi_dbg_is_eccerr(const struct ubi_device *ubi)
+{
+ return ubi_dbg_fail_eccerr(ubi);
+}
+
+/**
+ * ubi_dbg_is_read_failure - if it is time to emulate a read failure.
+ * @ubi: UBI device description object
+ *
+ * Returns true if a read failure should be emulated, otherwise returns
+ * false.
+ */
+static inline bool ubi_dbg_is_read_failure(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_read(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_ff - if it is time to emulate that read region is only 0xFF.
+ * @ubi: UBI device description object
+ *
+ * Returns true if read region should be emulated 0xFF, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_ff(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_ff(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_ff_bitflips - if it is time to emulate that read region is only 0xFF
+ * with error reported by the MTD driver
+ *
+ * @ubi: UBI device description object
+ *
+ * Returns true if read region should be emulated 0xFF and error
+ * reported by the MTD driver, otherwise returns false.
+ */
+static inline bool ubi_dbg_is_ff_bitflips(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_ff_bitflips(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bad_hdr - if it is time to emulate a bad header
+ * @ubi: UBI device description object
+ *
+ * Returns true if a bad header error should be emulated, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_bad_hdr(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_bad_hdr(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bad_hdr_ebadmsg - if it is time to emulate a bad header with
+ * ECC error.
+ *
+ * @ubi: UBI device description object
+ *
+ * Returns true if a bad header with ECC error should be emulated, otherwise
+ * returns false.
+ */
+static inline bool ubi_dbg_is_bad_hdr_ebadmsg(const struct ubi_device *ubi,
+ unsigned int caller)
+{
+ return ubi_dbg_fail_bad_hdr_ebadmsg(ubi, caller);
+}
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+ return ubi->dbg.disable_bgt;
}
static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
@@ -125,5 +384,4 @@ static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
ubi->dbg.chk_fastmap = 1;
}
-int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 01b644861..a4999bce4 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -195,7 +195,19 @@ retry:
if (ubi_dbg_is_bitflip(ubi)) {
dbg_gen("bit-flip (emulated)");
- err = UBI_IO_BITFLIPS;
+ return UBI_IO_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE)) {
+ ubi_warn(ubi, "cannot read %d bytes from PEB %d:%d (emulated)",
+ len, pnum, offset);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_eccerr(ubi)) {
+ ubi_warn(ubi, "ECC error (emulated) while reading %d bytes from PEB %d:%d, read %zd bytes",
+ len, pnum, offset, read);
+ return -EBADMSG;
}
}
@@ -782,7 +794,36 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* If there was %-EBADMSG, but the header CRC is still OK, report about
* a bit-flip to force scrubbing on this PEB.
*/
- return read_err ? UBI_IO_BITFLIPS : 0;
+ if (read_err)
+ return UBI_IO_BITFLIPS;
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_EC)) {
+ ubi_warn(ubi, "cannot read EC header from PEB %d (emulated)",
+ pnum);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_ff(ubi, MASK_IO_FF_EC)) {
+ ubi_warn(ubi, "bit-all-ff (emulated)");
+ return UBI_IO_FF;
+ }
+
+ if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_EC)) {
+ ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)");
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_EC)) {
+ ubi_warn(ubi, "bad_hdr (emulated)");
+ return UBI_IO_BAD_HDR;
+ }
+
+ if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_EC)) {
+ ubi_warn(ubi, "bad_hdr with ECC error (emulated)");
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ return 0;
}
/**
@@ -821,8 +862,11 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
if (err)
return err;
- if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+ if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_EC)) {
+ ubi_warn(ubi, "emulating a power cut when writing EC header");
+ ubi_ro_mode(ubi);
return -EROFS;
+ }
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
return err;
@@ -1029,7 +1073,36 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
return -EINVAL;
}
- return read_err ? UBI_IO_BITFLIPS : 0;
+ if (read_err)
+ return UBI_IO_BITFLIPS;
+
+ if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_VID)) {
+ ubi_warn(ubi, "cannot read VID header from PEB %d (emulated)",
+ pnum);
+ return -EIO;
+ }
+
+ if (ubi_dbg_is_ff(ubi, MASK_IO_FF_VID)) {
+ ubi_warn(ubi, "bit-all-ff (emulated)");
+ return UBI_IO_FF;
+ }
+
+ if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_VID)) {
+ ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)");
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_VID)) {
+ ubi_warn(ubi, "bad_hdr (emulated)");
+ return UBI_IO_BAD_HDR;
+ }
+
+ if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_VID)) {
+ ubi_warn(ubi, "bad_hdr with ECC error (emulated)");
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ return 0;
}
/**
@@ -1071,8 +1144,11 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
if (err)
return err;
- if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+ if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_VID)) {
+ ubi_warn(ubi, "emulating a power cut when writing VID header");
+ ubi_ro_mode(ubi);
return -EROFS;
+ }
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index a5ec566df..0b42bb45d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -145,17 +145,6 @@ enum {
UBI_BAD_FASTMAP,
};
-/*
- * Flags for emulate_power_cut in ubi_debug_info
- *
- * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
- * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
- */
-enum {
- POWER_CUT_EC_WRITE = 0x01,
- POWER_CUT_VID_WRITE = 0x02,
-};
-
/**
* struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the
* flash.
@@ -404,6 +393,7 @@ struct ubi_volume_desc {
* @power_cut_counter: count down for writes left until emulated power cut
* @power_cut_min: minimum number of writes before emulating a power cut
* @power_cut_max: maximum number of writes until emulating a power cut
+ * @emulate_failures: emulate failures for testing purposes
* @dfs_dir_name: name of debugfs directory containing files of this UBI device
* @dfs_dir: direntry object of the UBI device debugfs directory
* @dfs_chk_gen: debugfs knob to enable UBI general extra checks
@@ -415,6 +405,7 @@ struct ubi_volume_desc {
* @dfs_emulate_power_cut: debugfs knob to emulate power cuts
* @dfs_power_cut_min: debugfs knob for minimum writes before power cut
* @dfs_power_cut_max: debugfs knob for maximum writes until power cut
+ * @dfs_emulate_failures: debugfs entry to control the fault injection type
*/
struct ubi_debug_info {
unsigned int chk_gen:1;
@@ -427,6 +418,7 @@ struct ubi_debug_info {
unsigned int power_cut_counter;
unsigned int power_cut_min;
unsigned int power_cut_max;
+ unsigned int emulate_failures;
char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
struct dentry *dfs_dir;
struct dentry *dfs_chk_gen;
@@ -438,6 +430,7 @@ struct ubi_debug_info {
struct dentry *dfs_emulate_power_cut;
struct dentry *dfs_power_cut_min;
struct dentry *dfs_power_cut_max;
+ struct dentry *dfs_emulate_failures;
};
/**
@@ -1130,6 +1123,19 @@ static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb)
return vidb->hdr;
}
+/**
+ * ubi_ro_mode - switch to read-only mode.
+ * @ubi: UBI device description object
+ */
+static inline void ubi_ro_mode(struct ubi_device *ubi)
+{
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn(ubi, "switch to read-only mode");
+ dump_stack();
+ }
+}
+
/*
* This function is equivalent to 'ubi_io_read()', but @offset is relative to
* the beginning of the logical eraseblock, not to the beginning of the
@@ -1151,20 +1157,13 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
int pnum, int offset, int len)
{
ubi_assert(offset >= 0);
- return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
-}
-/**
- * ubi_ro_mode - switch to read-only mode.
- * @ubi: UBI device description object
- */
-static inline void ubi_ro_mode(struct ubi_device *ubi)
-{
- if (!ubi->ro_mode) {
- ubi->ro_mode = 1;
- ubi_warn(ubi, "switch to read-only mode");
- dump_stack();
+ if (ubi_dbg_power_cut(ubi, MASK_POWER_CUT_DATA)) {
+ ubi_warn(ubi, "XXXXX emulating a power cut when writing data XXXXX");
+ ubi_ro_mode(ubi);
+ return -EROFS;
}
+ return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
}
/**
diff --git a/drivers/mux/mmio.c b/drivers/mux/mmio.c
index fd1d121a5..30a952c34 100644
--- a/drivers/mux/mmio.c
+++ b/drivers/mux/mmio.c
@@ -44,15 +44,20 @@ static int mux_mmio_probe(struct platform_device *pdev)
int ret;
int i;
- if (of_device_is_compatible(np, "mmio-mux"))
+ if (of_device_is_compatible(np, "mmio-mux")) {
regmap = syscon_node_to_regmap(np->parent);
- else
- regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- dev_err(dev, "failed to get regmap: %d\n", ret);
- return ret;
+ } else {
+ regmap = device_node_to_regmap(np);
+ /* Fallback to checking the parent node on "real" errors. */
+ if (IS_ERR(regmap) && regmap != ERR_PTR(-EPROBE_DEFER)) {
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ regmap = ERR_PTR(-ENODEV);
+ }
}
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "failed to get regmap\n");
ret = of_property_count_u32_elems(np, "mux-reg-masks");
if (ret == 0 || ret % 2)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index af0da4bb4..8ca0bc223 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -434,6 +434,7 @@ config VIRTIO_NET
tristate "Virtio network driver"
depends on VIRTIO
select NET_FAILOVER
+ select DIMLIB
help
This is the virtual network driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 8c651fdee..57f172906 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -186,4 +186,5 @@ static void __exit arcnet_raw_exit(void)
module_init(arcnet_raw_init);
module_exit(arcnet_raw_exit);
+MODULE_DESCRIPTION("ARCnet raw mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 8c3ccc7c8..53d10a04d 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -312,6 +312,7 @@ module_param(node, int, 0);
module_param(io, int, 0);
module_param(irq, int, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx RIM I chipset driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index c09b56784..7a0a79973 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -265,4 +265,5 @@ static void __exit capmode_module_exit(void)
module_init(capmode_module_init);
module_exit(capmode_module_exit);
+MODULE_DESCRIPTION("ARCnet CAP mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 7b5c8bb02..c5e571ec9 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -61,6 +61,7 @@ module_param(timeout, int, 0);
module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCI driver");
MODULE_LICENSE("GPL");
static void led_tx_set(struct led_classdev *led_cdev,
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 06e1651b5..a0053e399 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -399,6 +399,7 @@ EXPORT_SYMBOL(com20020_found);
EXPORT_SYMBOL(com20020_netdev_ops);
#endif
+MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver");
MODULE_LICENSE("GPL");
#ifdef MODULE
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index dc3253b31..75f08aa75 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -97,6 +97,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCMCIA driver");
MODULE_LICENSE("GPL");
/*====================================================================*/
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 37b47749f..3b463fbc6 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -350,6 +350,7 @@ static char device[9]; /* use eg. device=arc1 to change name */
module_param_hw(io, int, ioport, 0);
module_param_hw(irq, int, irq, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx IO mapped chipset driver");
MODULE_LICENSE("GPL");
#ifndef MODULE
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index f49dae194..b3b287c16 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -645,6 +645,7 @@ static void com90xx_copy_from_card(struct net_device *dev, int bufnum,
TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
}
+MODULE_DESCRIPTION("ARCnet COM90xx normal chipset driver");
MODULE_LICENSE("GPL");
static int __init com90xx_init(void)
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index a7752a5b6..46519ca63 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -78,6 +78,7 @@ static void __exit arcnet_rfc1051_exit(void)
module_init(arcnet_rfc1051_init);
module_exit(arcnet_rfc1051_exit);
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1051) module");
MODULE_LICENSE("GPL");
/* Determine a packet's protocol ID.
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index a4c856282..0edf35d97 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -35,6 +35,7 @@
#include "arcdevice.h"
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1201) module");
MODULE_LICENSE("GPL");
static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b094c48be..cd0683bcc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5757,10 +5757,8 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
{
struct bonding *bond = netdev_priv(bond_dev);
struct ethtool_ts_info ts_info;
- const struct ethtool_ops *ops;
struct net_device *real_dev;
bool sw_tx_support = false;
- struct phy_device *phydev;
struct list_head *iter;
struct slave *slave;
int ret = 0;
@@ -5771,29 +5769,12 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
rcu_read_unlock();
if (real_dev) {
- ops = real_dev->ethtool_ops;
- phydev = real_dev->phydev;
-
- if (phy_has_tsinfo(phydev)) {
- ret = phy_ts_info(phydev, info);
- goto out;
- } else if (ops->get_ts_info) {
- ret = ops->get_ts_info(real_dev, info);
- goto out;
- }
+ ret = ethtool_get_ts_info_by_layer(real_dev, info);
} else {
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
- ret = -1;
- ops = slave->dev->ethtool_ops;
- phydev = slave->dev->phydev;
-
- if (phy_has_tsinfo(phydev))
- ret = phy_ts_info(phydev, &ts_info);
- else if (ops->get_ts_info)
- ret = ops->get_ts_info(slave->dev, &ts_info);
-
+ ret = ethtool_get_ts_info_by_layer(slave->dev, &ts_info);
if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) {
sw_tx_support = true;
continue;
@@ -5805,15 +5786,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
rcu_read_unlock();
}
- ret = 0;
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
if (sw_tx_support)
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
- info->phc_index = -1;
-
-out:
dev_put(real_dev);
return ret;
}
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index f44ba2600..e2ec69aa4 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -30,9 +30,9 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -259,22 +259,13 @@ static int c_can_plat_probe(struct platform_device *pdev)
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
- const struct of_device_id *match;
struct resource *mem;
int irq;
struct clk *clk;
const struct c_can_driver_data *drvdata;
struct device_node *np = pdev->dev.of_node;
- match = of_match_device(c_can_of_table, &pdev->dev);
- if (match) {
- drvdata = match->data;
- } else if (pdev->id_entry->driver_data) {
- drvdata = (struct c_can_driver_data *)
- platform_get_device_id(pdev)->driver_data;
- } else {
- return -ENODEV;
- }
+ drvdata = device_get_match_data(&pdev->dev);
/* get the appropriate clk */
clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index d15f85a40..8ea7f2795 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -23,11 +23,11 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/can/platform/flexcan.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -2034,7 +2034,6 @@ MODULE_DEVICE_TABLE(platform, flexcan_id_table);
static int flexcan_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
@@ -2090,14 +2089,7 @@ static int flexcan_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- of_id = of_match_device(flexcan_of_match, &pdev->dev);
- if (of_id)
- devtype_data = of_id->data;
- else if (platform_get_device_id(pdev)->driver_data)
- devtype_data = (struct flexcan_devtype_data *)
- platform_get_device_id(pdev)->driver_data;
- else
- return -ENODEV;
+ devtype_data = device_get_match_data(&pdev->dev);
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
!((devtype_data->quirks &
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 4837df6ef..5b3d69c3b 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/netdevice.h>
#include <linux/can/dev.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -290,7 +292,7 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
int irq, mscan_clksrc = 0;
int err = -ENOMEM;
- data = of_device_get_match_data(&ofdev->dev);
+ data = device_get_match_data(&ofdev->dev);
if (!data)
return -EINVAL;
@@ -351,13 +353,11 @@ exit_unmap_mem:
static void mpc5xxx_can_remove(struct platform_device *ofdev)
{
- const struct of_device_id *match;
const struct mpc5xxx_can_data *data;
struct net_device *dev = platform_get_drvdata(ofdev);
struct mscan_priv *priv = netdev_priv(dev);
- match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
- data = match ? match->data : NULL;
+ data = device_get_match_data(&ofdev->dev);
unregister_mscandev(dev);
if (data && data->put_clock)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 24ad9f593..1efa39e13 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -1143,7 +1143,7 @@ static void __exit peak_usb_exit(void)
int err;
/* last chance do send any synchronous commands here */
- err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL,
+ err = driver_for_each_device(&peak_usb_driver.driver, NULL,
NULL, peak_usb_do_device_exit);
if (err)
pr_err("%s: failed to stop all can devices (err %d)\n",
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index abe58f103..3722eaa84 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -20,8 +20,8 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -1726,8 +1726,7 @@ static int xcan_probe(struct platform_device *pdev)
struct net_device *ndev;
struct xcan_priv *priv;
struct phy *transceiver;
- const struct of_device_id *of_id;
- const struct xcan_devtype_data *devtype = &xcan_axi_data;
+ const struct xcan_devtype_data *devtype;
void __iomem *addr;
int ret;
int rx_max, tx_max;
@@ -1741,9 +1740,7 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
- of_id = of_match_device(xcan_of_match, &pdev->dev);
- if (of_id && of_id->data)
- devtype = of_id->data;
+ devtype = device_get_match_data(&pdev->dev);
hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
"tx-mailbox-count" : "tx-fifo-depth";
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index cadee5505..4a52ccbe3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -621,8 +621,6 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
goto err_of_node_put;
}
- priv->master_mii_dn = dn;
-
priv->user_mii_bus = mdiobus_alloc();
if (!priv->user_mii_bus) {
err = -ENOMEM;
@@ -635,7 +633,6 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->user_mii_bus->write = bcm_sf2_sw_mdio_write;
snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
index++);
- priv->user_mii_bus->dev.of_node = dn;
/* Include the pseudo-PHY address to divert reads towards our
* workaround. This is only required for 7445D0, since 7445E0
@@ -683,9 +680,11 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->user_mii_bus);
- if (err && dn)
+ if (err)
goto err_free_user_mii_bus;
+ of_node_put(dn);
+
return 0;
err_free_user_mii_bus:
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 424f896b5..f95f4880b 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -107,7 +107,6 @@ struct bcm_sf2_priv {
/* Master and slave MDIO bus controller */
unsigned int indir_phy_mask;
- struct device_node *master_mii_dn;
struct mii_bus *user_mii_bus;
struct mii_bus *master_mii_bus;
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
index 237066d30..14ca42491 100644
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ b/drivers/net/dsa/dsa_loop_bdinfo.c
@@ -32,4 +32,5 @@ static int __init dsa_loop_bdinfo_init(void)
}
arch_initcall(dsa_loop_bdinfo_init)
+MODULE_DESCRIPTION("DSA mock-up switch driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 9c185c9f0..de48b1940 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -505,27 +505,34 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
return gswip_mdio_r(priv, GSWIP_MDIO_READ);
}
-static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
+static int gswip_mdio(struct gswip_priv *priv)
{
- struct dsa_switch *ds = priv->ds;
- int err;
+ struct device_node *mdio_np, *switch_np = priv->dev->of_node;
+ struct device *dev = priv->dev;
+ struct mii_bus *bus;
+ int err = 0;
- ds->user_mii_bus = mdiobus_alloc();
- if (!ds->user_mii_bus)
- return -ENOMEM;
+ mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
+ if (!of_device_is_available(mdio_np))
+ goto out_put_node;
- ds->user_mii_bus->priv = priv;
- ds->user_mii_bus->read = gswip_mdio_rd;
- ds->user_mii_bus->write = gswip_mdio_wr;
- ds->user_mii_bus->name = "lantiq,xrx200-mdio";
- snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
- dev_name(priv->dev));
- ds->user_mii_bus->parent = priv->dev;
- ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask;
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus) {
+ err = -ENOMEM;
+ goto out_put_node;
+ }
- err = of_mdiobus_register(ds->user_mii_bus, mdio_np);
- if (err)
- mdiobus_free(ds->user_mii_bus);
+ bus->priv = priv;
+ bus->read = gswip_mdio_rd;
+ bus->write = gswip_mdio_wr;
+ bus->name = "lantiq,xrx200-mdio";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ err = devm_of_mdiobus_register(dev, bus, mdio_np);
+
+out_put_node:
+ of_node_put(mdio_np);
return err;
}
@@ -1759,7 +1766,7 @@ static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
return;
for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++)
- ethtool_sprintf(&data, "%s", gswip_rmon_cnt[i].name);
+ ethtool_puts(&data, gswip_rmon_cnt[i].name);
}
static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
@@ -2094,9 +2101,9 @@ remove_gphy:
static int gswip_probe(struct platform_device *pdev)
{
- struct gswip_priv *priv;
- struct device_node *np, *mdio_np, *gphy_fw_np;
+ struct device_node *np, *gphy_fw_np;
struct device *dev = &pdev->dev;
+ struct gswip_priv *priv;
int err;
int i;
u32 version;
@@ -2163,19 +2170,16 @@ static int gswip_probe(struct platform_device *pdev)
}
/* bring up the mdio bus */
- mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
- if (mdio_np) {
- err = gswip_mdio(priv, mdio_np);
- if (err) {
- dev_err(dev, "mdio probe failed\n");
- goto put_mdio_node;
- }
+ err = gswip_mdio(priv);
+ if (err) {
+ dev_err(dev, "mdio probe failed\n");
+ goto gphy_fw_remove;
}
err = dsa_register_switch(priv->ds);
if (err) {
dev_err(dev, "dsa switch register failed: %i\n", err);
- goto mdio_bus;
+ goto gphy_fw_remove;
}
if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
@@ -2194,13 +2198,7 @@ static int gswip_probe(struct platform_device *pdev)
disable_switch:
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
-mdio_bus:
- if (mdio_np) {
- mdiobus_unregister(priv->ds->user_mii_bus);
- mdiobus_free(priv->ds->user_mii_bus);
- }
-put_mdio_node:
- of_node_put(mdio_np);
+gphy_fw_remove:
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
return err;
@@ -2219,12 +2217,6 @@ static void gswip_remove(struct platform_device *pdev)
dsa_unregister_switch(priv->ds);
- if (priv->ds->user_mii_bus) {
- mdiobus_unregister(priv->ds->user_mii_bus);
- of_node_put(priv->ds->user_mii_bus->dev.of_node);
- mdiobus_free(priv->ds->user_mii_bus);
- }
-
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
}
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 4cea811e7..1a5225264 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -56,5 +56,9 @@ int ksz8_reset_switch(struct ksz_device *dev);
int ksz8_switch_init(struct ksz_device *dev);
void ksz8_switch_exit(struct ksz_device *dev);
int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
+void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev, int speed, int duplex,
+ bool tx_pause, bool rx_pause);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 9048d1f19..c3da97abc 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1358,6 +1358,9 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
+ if (!ksz_is_ksz87xx(dev))
+ return;
+
if (!p->interface && dev->compat_interface) {
dev_warn(dev->dev,
"Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
@@ -1391,18 +1394,29 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
- if (cpu_port) {
- if (!ksz_is_ksz88x3(dev))
- ksz8795_cpu_interface_select(dev, port);
-
+ if (cpu_port)
member = dsa_user_ports(ds);
- } else {
+ else
member = BIT(dsa_upstream_port(ds, port));
- }
ksz8_cfg_port_member(dev, port, member);
}
+static void ksz88x3_config_rmii_clk(struct ksz_device *dev)
+{
+ struct dsa_port *cpu_dp = dsa_to_port(dev->ds, dev->cpu_port);
+ bool rmii_clk_internal;
+
+ if (!ksz_is_ksz88x3(dev))
+ return;
+
+ rmii_clk_internal = of_property_read_bool(cpu_dp->dn,
+ "microchip,rmii-clk-internal");
+
+ ksz_cfg(dev, KSZ88X3_REG_FVID_AND_HOST_MODE,
+ KSZ88X3_PORT3_RMII_CLK_INTERNAL, rmii_clk_internal);
+}
+
void ksz8_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -1419,6 +1433,9 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
ksz8_port_setup(dev, dev->cpu_port, true);
+ ksz8795_cpu_interface_select(dev, dev->cpu_port);
+ ksz88x3_config_rmii_clk(dev);
+
for (i = 0; i < dev->phy_port_cnt; i++) {
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
}
@@ -1439,6 +1456,127 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
}
}
+/**
+ * ksz8_phy_port_link_up - Configures ports with integrated PHYs
+ * @dev: The KSZ device instance.
+ * @port: The port number to configure.
+ * @duplex: The desired duplex mode.
+ * @tx_pause: If true, enables transmit pause.
+ * @rx_pause: If true, enables receive pause.
+ *
+ * Description:
+ * The function configures flow control settings for a given port based on the
+ * desired settings and current duplex mode.
+ *
+ * According to the KSZ8873 datasheet, the PORT_FORCE_FLOW_CTRL bit in the
+ * Port Control 2 register (0x1A for Port 1, 0x22 for Port 2, 0x32 for Port 3)
+ * determines how flow control is handled on the port:
+ * "1 = will always enable full-duplex flow control on the port, regardless
+ * of AN result.
+ * 0 = full-duplex flow control is enabled based on AN result."
+ *
+ * This means that the flow control behavior depends on the state of this bit:
+ * - If PORT_FORCE_FLOW_CTRL is set to 1, the switch will ignore AN results and
+ * force flow control on the port.
+ * - If PORT_FORCE_FLOW_CTRL is set to 0, the switch will enable or disable
+ * flow control based on the AN results.
+ *
+ * However, there is a potential limitation in this configuration. It is
+ * currently not possible to force disable flow control on a port if we still
+ * advertise pause support. While such a configuration is not currently
+ * supported by Linux, and may not make practical sense, it's important to be
+ * aware of this limitation when working with the KSZ8873 and similar devices.
+ */
+static void ksz8_phy_port_link_up(struct ksz_device *dev, int port, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u16 *regs = dev->info->regs;
+ u8 sctrl = 0;
+
+ /* The KSZ8795 switch differs from the KSZ8873 by supporting
+ * asymmetric pause control. However, since a single bit is used to
+ * control both RX and TX pause, we can't enforce asymmetric pause
+ * control - both TX and RX pause will be either enabled or disabled
+ * together.
+ *
+ * If auto-negotiation is enabled, we usually allow the flow control to
+ * be determined by the auto-negotiation process based on the
+ * capabilities of both link partners. However, for KSZ8873, the
+ * PORT_FORCE_FLOW_CTRL bit may be set by the hardware bootstrap,
+ * ignoring the auto-negotiation result. Thus, even in auto-negotiation
+ * mode, we need to ensure that the PORT_FORCE_FLOW_CTRL bit is
+ * properly cleared.
+ *
+ * In the absence of pause auto-negotiation, we will enforce symmetric
+ * pause control for both variants of switches - KSZ8873 and KSZ8795.
+ *
+ * Autoneg Pause Autoneg rx,tx PORT_FORCE_FLOW_CTRL
+ * 1 1 x 0
+ * 0 1 x 0 (flow control probably disabled)
+ * x 0 1 1 (flow control force enabled)
+ * 1 0 0 0 (flow control still depends on
+ * aneg result due to hardware)
+ * 0 0 0 0 (flow control probably disabled)
+ */
+ if (dev->ports[port].manual_flow && tx_pause)
+ sctrl |= PORT_FORCE_FLOW_CTRL;
+
+ ksz_prmw8(dev, port, regs[P_STP_CTRL], PORT_FORCE_FLOW_CTRL, sctrl);
+}
+
+/**
+ * ksz8_cpu_port_link_up - Configures the CPU port of the switch.
+ * @dev: The KSZ device instance.
+ * @speed: The desired link speed.
+ * @duplex: The desired duplex mode.
+ * @tx_pause: If true, enables transmit pause.
+ * @rx_pause: If true, enables receive pause.
+ *
+ * Description:
+ * The function configures flow control and speed settings for the CPU
+ * port of the switch based on the desired settings, current duplex mode, and
+ * speed.
+ */
+static void ksz8_cpu_port_link_up(struct ksz_device *dev, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u16 *regs = dev->info->regs;
+ u8 ctrl = 0;
+
+ /* SW_FLOW_CTRL, SW_HALF_DUPLEX, and SW_10_MBIT bits are bootstrappable
+ * at least on KSZ8873. They can have different values depending on your
+ * board setup.
+ */
+ if (tx_pause || rx_pause)
+ ctrl |= SW_FLOW_CTRL;
+
+ if (duplex == DUPLEX_HALF)
+ ctrl |= SW_HALF_DUPLEX;
+
+ /* This hardware only supports SPEED_10 and SPEED_100. For SPEED_10
+ * we need to set the SW_10_MBIT bit. Otherwise, we can leave it 0.
+ */
+ if (speed == SPEED_10)
+ ctrl |= SW_10_MBIT;
+
+ ksz_rmw8(dev, regs[S_BROADCAST_CTRL], SW_HALF_DUPLEX | SW_FLOW_CTRL |
+ SW_10_MBIT, ctrl);
+}
+
+void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phydev, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ /* If the port is the CPU port, apply special handling. Only the CPU
+ * port is configured via global registers.
+ */
+ if (dev->cpu_port == port)
+ ksz8_cpu_port_link_up(dev, speed, duplex, tx_pause, rx_pause);
+ else if (dev->info->internal_phy[port])
+ ksz8_phy_port_link_up(dev, port, duplex, tx_pause, rx_pause);
+}
+
static int ksz8_handle_global_errata(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -1487,8 +1625,6 @@ int ksz8_setup(struct dsa_switch *ds)
*/
ds->vlan_filtering_is_global = true;
- ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
-
/* Enable automatic fast aging when link changed detected. */
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 3c9dae53e..beca974e0 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -22,6 +22,9 @@
#define KSZ8863_GLOBAL_SOFTWARE_RESET BIT(4)
#define KSZ8863_PCS_RESET BIT(0)
+#define KSZ88X3_REG_FVID_AND_HOST_MODE 0xC6
+#define KSZ88X3_PORT3_RMII_CLK_INTERNAL BIT(3)
+
#define REG_SW_CTRL_0 0x02
#define SW_NEW_BACKOFF BIT(7)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index d5d7e48b7..25a49708f 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -277,6 +277,7 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.mirror_add = ksz8_port_mirror_add,
.mirror_del = ksz8_port_mirror_del,
.get_caps = ksz8_get_caps,
+ .phylink_mac_link_up = ksz8_phylink_mac_link_up,
.config_cpu_port = ksz8_config_cpu_port,
.enable_stp_addr = ksz8_enable_stp_addr,
.reset = ksz8_reset_switch,
@@ -1672,15 +1673,23 @@ static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num)
static int ksz_check_device_id(struct ksz_device *dev)
{
- const struct ksz_chip_data *dt_chip_data;
+ const struct ksz_chip_data *expected_chip_data;
+ u32 expected_chip_id;
- dt_chip_data = of_device_get_match_data(dev->dev);
+ if (dev->pdata) {
+ expected_chip_id = dev->pdata->chip_id;
+ expected_chip_data = ksz_lookup_info(expected_chip_id);
+ if (WARN_ON(!expected_chip_data))
+ return -ENODEV;
+ } else {
+ expected_chip_data = of_device_get_match_data(dev->dev);
+ expected_chip_id = expected_chip_data->chip_id;
+ }
- /* Check for Device Tree and Chip ID */
- if (dt_chip_data->chip_id != dev->chip_id) {
+ if (expected_chip_id != dev->chip_id) {
dev_err(dev->dev,
"Device tree specifies chip %s but found %s, please fix it!\n",
- dt_chip_data->dev_name, dev->info->dev_name);
+ expected_chip_data->dev_name, dev->info->dev_name);
return -ENODEV;
}
@@ -2709,7 +2718,7 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
proto = DSA_TAG_PROTO_KSZ9477;
if (is_lan937x(dev))
- proto = DSA_TAG_PROTO_LAN937X_VALUE;
+ proto = DSA_TAG_PROTO_LAN937X;
return proto;
}
@@ -2980,8 +2989,10 @@ static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
{
struct ksz_device *dev = ds->priv;
- if (ksz_is_ksz88x3(dev))
+ if (ksz_is_ksz88x3(dev)) {
+ dev->ports[port].manual_flow = !(state->pause & MLO_PAUSE_AN);
return;
+ }
/* Internal PHYs */
if (dev->info->internal_phy[port])
@@ -3124,10 +3135,8 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct ksz_device *dev = ds->priv;
- if (dev->dev_ops->phylink_mac_link_up)
- dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
- phydev, speed, duplex,
- tx_pause, rx_pause);
+ dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface, phydev,
+ speed, duplex, tx_pause, rx_pause);
}
static int ksz_switch_detect(struct ksz_device *dev)
@@ -4169,9 +4178,6 @@ int ksz_switch_register(struct ksz_device *dev)
int ret;
int i;
- if (dev->pdata)
- dev->chip_id = dev->pdata->chip_id;
-
dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(dev->reset_gpio))
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index b7e8a403a..15612101a 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <net/dsa.h>
#include <linux/irq.h>
+#include <linux/platform_data/microchip-ksz.h>
#include "ksz_ptp.h"
@@ -134,6 +135,7 @@ struct ksz_port {
ktime_t tstamp_msg;
struct completion tstamp_msg_comp;
#endif
+ bool manual_flow;
};
struct ksz_device {
@@ -202,25 +204,6 @@ enum ksz_model {
LAN9374,
};
-enum ksz_chip_id {
- KSZ8563_CHIP_ID = 0x8563,
- KSZ8795_CHIP_ID = 0x8795,
- KSZ8794_CHIP_ID = 0x8794,
- KSZ8765_CHIP_ID = 0x8765,
- KSZ8830_CHIP_ID = 0x8830,
- KSZ9477_CHIP_ID = 0x00947700,
- KSZ9896_CHIP_ID = 0x00989600,
- KSZ9897_CHIP_ID = 0x00989700,
- KSZ9893_CHIP_ID = 0x00989300,
- KSZ9563_CHIP_ID = 0x00956300,
- KSZ9567_CHIP_ID = 0x00956700,
- LAN9370_CHIP_ID = 0x00937000,
- LAN9371_CHIP_ID = 0x00937100,
- LAN9372_CHIP_ID = 0x00937200,
- LAN9373_CHIP_ID = 0x00937300,
- LAN9374_CHIP_ID = 0x00937400,
-};
-
enum ksz_regs {
REG_SW_MAC_ADDR,
REG_IND_CTRL_0,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index e6b8bf603..e48ef9823 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -836,7 +836,7 @@ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
return;
for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
- ethtool_sprintf(&data, "%s", mt7530_mib[i].name);
+ ethtool_puts(&data, mt7530_mib[i].name);
}
static void
@@ -998,20 +998,173 @@ unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
-/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
- * 802.1Q™-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
- * must only be propagated to C-VLAN and MAC Bridge components. That means
- * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
- * these frames are supposed to be processed by the CPU (software). So we make
- * the switch only forward them to the CPU port. And if received from a CPU
- * port, forward to a single port. The software is responsible of making the
- * switch conform to the latter by setting a single port as destination port on
- * the special tag.
+/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
+ * of the Open Systems Interconnection basic reference model (OSI/RM) are
+ * described; the medium access control (MAC) and logical link control (LLC)
+ * sublayers. The MAC sublayer is the one facing the physical layer.
*
- * This switch intellectual property cannot conform to this part of the standard
- * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
- * DAs, it also includes :22-FF which the scope of propagation is not supposed
- * to be restricted for these MAC DAs.
+ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
+ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
+ * of the Bridge, at least two Ports, and higher layer entities with at least a
+ * Spanning Tree Protocol Entity included.
+ *
+ * Each Bridge Port also functions as an end station and shall provide the MAC
+ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
+ * distinct LLC Entity that supports protocol identification, multiplexing, and
+ * demultiplexing, for protocol data unit (PDU) transmission and reception by
+ * one or more higher layer entities.
+ *
+ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
+ * Entity associated with each Bridge Port is modeled as being directly
+ * connected to the attached Local Area Network (LAN).
+ *
+ * On the switch with CPU port architecture, CPU port functions as Management
+ * Port, and the Management Port functionality is provided by software which
+ * functions as an end station. Software is connected to an IEEE 802 LAN that is
+ * wholly contained within the system that incorporates the Bridge. Software
+ * provides access to the LLC Entity associated with each Bridge Port by the
+ * value of the source port field on the special tag on the frame received by
+ * software.
+ *
+ * We call frames that carry control information to determine the active
+ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
+ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
+ * Protocol Data Units (MVRPDUs), and frames from other link constrained
+ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
+ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
+ * forwarded by a Bridge. Permanently configured entries in the filtering
+ * database (FDB) ensure that such frames are discarded by the Forwarding
+ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
+ *
+ * Each of the reserved MAC addresses specified in Table 8-1
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
+ * permanently configured in the FDB in C-VLAN components and ERs.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-2
+ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
+ * configured in the FDB in S-VLAN components.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-3
+ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
+ * TPMR components.
+ *
+ * The FDB entries for reserved MAC addresses shall specify filtering for all
+ * Bridge Ports and all VIDs. Management shall not provide the capability to
+ * modify or remove entries for reserved MAC addresses.
+ *
+ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
+ * propagation of PDUs within a Bridged Network, as follows:
+ *
+ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
+ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
+ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
+ * PDUs transmitted using this destination address, or any other addresses
+ * that appear in Table 8-1, Table 8-2, and Table 8-3
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
+ * therefore travel no further than those stations that can be reached via a
+ * single individual LAN from the originating station.
+ *
+ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
+ * address that no conformant S-VLAN component, C-VLAN component, or MAC
+ * Bridge can forward; however, this address is relayed by a TPMR component.
+ * PDUs using this destination address, or any of the other addresses that
+ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
+ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
+ * any TPMRs but will propagate no further than the nearest S-VLAN component,
+ * C-VLAN component, or MAC Bridge.
+ *
+ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
+ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
+ * relayed by TPMR components and S-VLAN components. PDUs using this
+ * destination address, or any of the other addresses that appear in Table 8-1
+ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
+ * will be relayed by TPMR components and S-VLAN components but will propagate
+ * no further than the nearest C-VLAN component or MAC Bridge.
+ *
+ * Because the LLC Entity associated with each Bridge Port is provided via CPU
+ * port, we must not filter these frames but forward them to CPU port.
+ *
+ * In a Bridge, the transmission Port is majorly decided by ingress and egress
+ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
+ * For link-local frames, only CPU port should be designated as destination port
+ * in the FDB, and the other functions of the Forwarding Process must not
+ * interfere with the decision of the transmission Port. We call this process
+ * trapping frames to CPU port.
+ *
+ * Therefore, on the switch with CPU port architecture, link-local frames must
+ * be trapped to CPU port, and certain link-local frames received by a Port of a
+ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
+ * from it.
+ *
+ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
+ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
+ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
+ * doesn't count) of this architecture will either function as a standard MAC
+ * Bridge or a standard VLAN Bridge.
+ *
+ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
+ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
+ * we don't need to relay PDUs using the destination addresses specified on the
+ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
+ * section where they must be relayed by TPMR components.
+ *
+ * One option to trap link-local frames to CPU port is to add static FDB entries
+ * with CPU port designated as destination port. However, because that
+ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
+ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
+ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
+ * entries. This switch intellectual property can only hold a maximum of 2048
+ * entries. Using this option, there also isn't a mechanism to prevent
+ * link-local frames from being discarded when the spanning tree Port State of
+ * the reception Port is discarding.
+ *
+ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
+ * registers. Whilst this applies to every VID, it doesn't contain all of the
+ * reserved MAC addresses without affecting the remaining Standard Group MAC
+ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
+ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
+ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
+ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
+ * The latter option provides better but not complete conformance.
+ *
+ * This switch intellectual property also does not provide a mechanism to trap
+ * link-local frames with specific destination addresses to CPU port by Bridge,
+ * to conform to the filtering rules for the distinct Bridge components.
+ *
+ * Therefore, regardless of the type of the Bridge component, link-local frames
+ * with these destination addresses will be trapped to CPU port:
+ *
+ * 01-80-C2-00-00-[00,01,02,03,0E]
+ *
+ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
+ *
+ * In a Bridge comprising an S-VLAN component:
+ *
+ * Link-local frames with these destination addresses will be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-00
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
+ *
+ * To trap link-local frames to CPU port as conformant as this switch
+ * intellectual property can allow, link-local frames are made to be regarded as
+ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
+ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
+ * State function of the Forwarding Process.
+ *
+ * The only remaining interference is the ingress rules. When the reception Port
+ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
+ * There doesn't seem to be a mechanism on the switch intellectual property to
+ * have link-local frames bypass this function of the Forwarding Process.
*/
static void
mt753x_trap_frames(struct mt7530_priv *priv)
@@ -1019,35 +1172,43 @@ mt753x_trap_frames(struct mt7530_priv *priv)
/* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
* VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
- MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
- MT753X_BPDU_PORT_FW_MASK,
- MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_BPC,
+ MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
+ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+ MT753X_BPDU_PORT_FW_MASK,
+ MT753X_PAE_BPDU_FR |
+ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
- MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
- MT753X_R01_PORT_FW_MASK,
- MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC1,
+ MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
+ MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
+ MT753X_R02_BPDU_FR |
+ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
- MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
- MT753X_R03_PORT_FW_MASK,
- MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC2,
+ MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
+ MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
+ MT753X_R0E_BPDU_FR |
+ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
}
static int
@@ -1786,14 +1947,16 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
static int mt753x_mirror_port_get(unsigned int id, u32 val)
{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
- MIRROR_PORT(val);
+ return (id == ID_MT7531 || id == ID_MT7988) ?
+ MT7531_MIRROR_PORT_GET(val) :
+ MIRROR_PORT(val);
}
static int mt753x_mirror_port_set(unsigned int id, u32 val)
{
- return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
- MIRROR_PORT(val);
+ return (id == ID_MT7531 || id == ID_MT7988) ?
+ MT7531_MIRROR_PORT_SET(val) :
+ MIRROR_PORT(val);
}
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
@@ -2308,8 +2471,6 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
- mt7530_pll_setup(priv);
-
/* Lower Tx driving for TRGMII path */
for (i = 0; i < NUM_TRGMII_CTRL; i++)
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
@@ -2327,6 +2488,9 @@ mt7530_setup(struct dsa_switch *ds)
priv->p6_interface = PHY_INTERFACE_MODE_NA;
+ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
+ mt7530_pll_setup(priv);
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2356,6 +2520,9 @@ mt7530_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
@@ -2464,6 +2631,9 @@ mt7531_setup_common(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2542,18 +2712,25 @@ mt7531_setup(struct dsa_switch *ds)
priv->p5_interface = PHY_INTERFACE_MODE_NA;
priv->p6_interface = PHY_INTERFACE_MODE_NA;
- /* Enable PHY core PLL, since phy_device has not yet been created
- * provided for phy_[read,write]_mmd_indirect is called, we provide
- * our own mt7531_ind_mmd_phy_[read,write] to complete this
- * function.
+ /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
+ * phy_device has not yet been created provided for
+ * phy_[read,write]_mmd_indirect is called, we provide our own
+ * mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
- val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
CORE_PLL_GROUP4, val);
+ /* Disable EEE advertisement on the switch PHYs. */
+ for (i = MT753X_CTRL_PHY_ADDR;
+ i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
+ mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ 0);
+ }
+
mt7531_setup_common(ds);
/* Setup VLAN ID 0 for VLAN-unaware bridges */
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 75bc9043c..0ad52d3cb 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -32,6 +32,10 @@ enum mt753x_id {
#define SYSC_REG_RSTCTRL 0x34
#define RESET_MCM BIT(2)
+/* Register for ARL global control */
+#define MT753X_AGC 0xc
+#define LOCAL_EN BIT(7)
+
/* Registers to mac forward control for unknown frames */
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
@@ -65,6 +69,7 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
+#define MT753X_PAE_BPDU_FR BIT(25)
#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
@@ -75,20 +80,24 @@ enum mt753x_id {
/* Register for :01 and :02 MAC DA frame control */
#define MT753X_RGAC1 0x28
+#define MT753X_R02_BPDU_FR BIT(25)
#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
+#define MT753X_R01_BPDU_FR BIT(9)
#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_BPDU_FR BIT(25)
#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+#define MT753X_R03_BPDU_FR BIT(9)
#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
@@ -625,6 +634,7 @@ enum mt7531_clk_skew {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_RG_SYSPLL_DMY2 BIT(6)
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index dd2cbffcf..32416d880 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
phy_interface_set_rgmii(supported);
}
-static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
- struct phylink_config *config)
+static void
+mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
unsigned long *supported = config->supported_interfaces;
+ int err;
+ u16 reg;
- /* Translate the default cmode */
- mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+ if (err) {
+ dev_err(chip->dev, "p%d: failed to read port status\n", port);
+ return;
+ }
+
+ switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
+ case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
+ case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ break;
+
+ case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ break;
+
+ default:
+ dev_err(chip->dev,
+ "p%d: invalid port mode in status register: %04x\n",
+ port, reg);
+ }
+}
+
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ if (!mv88e6xxx_phy_is_internal(chip, port))
+ mv88e6250_setup_supported_interfaces(chip, port, config);
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
@@ -943,76 +991,94 @@ error:
static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
+ int err;
+
if (!chip->info->ops->stats_snapshot)
return -EOPNOTSUPP;
- return chip->info->ops->stats_snapshot(chip, port);
-}
-
-static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
- { "in_good_octets", 8, 0x00, STATS_TYPE_BANK0, },
- { "in_bad_octets", 4, 0x02, STATS_TYPE_BANK0, },
- { "in_unicast", 4, 0x04, STATS_TYPE_BANK0, },
- { "in_broadcasts", 4, 0x06, STATS_TYPE_BANK0, },
- { "in_multicasts", 4, 0x07, STATS_TYPE_BANK0, },
- { "in_pause", 4, 0x16, STATS_TYPE_BANK0, },
- { "in_undersize", 4, 0x18, STATS_TYPE_BANK0, },
- { "in_fragments", 4, 0x19, STATS_TYPE_BANK0, },
- { "in_oversize", 4, 0x1a, STATS_TYPE_BANK0, },
- { "in_jabber", 4, 0x1b, STATS_TYPE_BANK0, },
- { "in_rx_error", 4, 0x1c, STATS_TYPE_BANK0, },
- { "in_fcs_error", 4, 0x1d, STATS_TYPE_BANK0, },
- { "out_octets", 8, 0x0e, STATS_TYPE_BANK0, },
- { "out_unicast", 4, 0x10, STATS_TYPE_BANK0, },
- { "out_broadcasts", 4, 0x13, STATS_TYPE_BANK0, },
- { "out_multicasts", 4, 0x12, STATS_TYPE_BANK0, },
- { "out_pause", 4, 0x15, STATS_TYPE_BANK0, },
- { "excessive", 4, 0x11, STATS_TYPE_BANK0, },
- { "collisions", 4, 0x1e, STATS_TYPE_BANK0, },
- { "deferred", 4, 0x05, STATS_TYPE_BANK0, },
- { "single", 4, 0x14, STATS_TYPE_BANK0, },
- { "multiple", 4, 0x17, STATS_TYPE_BANK0, },
- { "out_fcs_error", 4, 0x03, STATS_TYPE_BANK0, },
- { "late", 4, 0x1f, STATS_TYPE_BANK0, },
- { "hist_64bytes", 4, 0x08, STATS_TYPE_BANK0, },
- { "hist_65_127bytes", 4, 0x09, STATS_TYPE_BANK0, },
- { "hist_128_255bytes", 4, 0x0a, STATS_TYPE_BANK0, },
- { "hist_256_511bytes", 4, 0x0b, STATS_TYPE_BANK0, },
- { "hist_512_1023bytes", 4, 0x0c, STATS_TYPE_BANK0, },
- { "hist_1024_max_bytes", 4, 0x0d, STATS_TYPE_BANK0, },
- { "sw_in_discards", 4, 0x10, STATS_TYPE_PORT, },
- { "sw_in_filtered", 2, 0x12, STATS_TYPE_PORT, },
- { "sw_out_filtered", 2, 0x13, STATS_TYPE_PORT, },
- { "in_discards", 4, 0x00, STATS_TYPE_BANK1, },
- { "in_filtered", 4, 0x01, STATS_TYPE_BANK1, },
- { "in_accepted", 4, 0x02, STATS_TYPE_BANK1, },
- { "in_bad_accepted", 4, 0x03, STATS_TYPE_BANK1, },
- { "in_good_avb_class_a", 4, 0x04, STATS_TYPE_BANK1, },
- { "in_good_avb_class_b", 4, 0x05, STATS_TYPE_BANK1, },
- { "in_bad_avb_class_a", 4, 0x06, STATS_TYPE_BANK1, },
- { "in_bad_avb_class_b", 4, 0x07, STATS_TYPE_BANK1, },
- { "tcam_counter_0", 4, 0x08, STATS_TYPE_BANK1, },
- { "tcam_counter_1", 4, 0x09, STATS_TYPE_BANK1, },
- { "tcam_counter_2", 4, 0x0a, STATS_TYPE_BANK1, },
- { "tcam_counter_3", 4, 0x0b, STATS_TYPE_BANK1, },
- { "in_da_unknown", 4, 0x0e, STATS_TYPE_BANK1, },
- { "in_management", 4, 0x0f, STATS_TYPE_BANK1, },
- { "out_queue_0", 4, 0x10, STATS_TYPE_BANK1, },
- { "out_queue_1", 4, 0x11, STATS_TYPE_BANK1, },
- { "out_queue_2", 4, 0x12, STATS_TYPE_BANK1, },
- { "out_queue_3", 4, 0x13, STATS_TYPE_BANK1, },
- { "out_queue_4", 4, 0x14, STATS_TYPE_BANK1, },
- { "out_queue_5", 4, 0x15, STATS_TYPE_BANK1, },
- { "out_queue_6", 4, 0x16, STATS_TYPE_BANK1, },
- { "out_queue_7", 4, 0x17, STATS_TYPE_BANK1, },
- { "out_cut_through", 4, 0x18, STATS_TYPE_BANK1, },
- { "out_octets_a", 4, 0x1a, STATS_TYPE_BANK1, },
- { "out_octets_b", 4, 0x1b, STATS_TYPE_BANK1, },
- { "out_management", 4, 0x1f, STATS_TYPE_BANK1, },
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->stats_snapshot(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+#define MV88E6XXX_HW_STAT_MAPPER(_fn) \
+ _fn(in_good_octets, 8, 0x00, STATS_TYPE_BANK0), \
+ _fn(in_bad_octets, 4, 0x02, STATS_TYPE_BANK0), \
+ _fn(in_unicast, 4, 0x04, STATS_TYPE_BANK0), \
+ _fn(in_broadcasts, 4, 0x06, STATS_TYPE_BANK0), \
+ _fn(in_multicasts, 4, 0x07, STATS_TYPE_BANK0), \
+ _fn(in_pause, 4, 0x16, STATS_TYPE_BANK0), \
+ _fn(in_undersize, 4, 0x18, STATS_TYPE_BANK0), \
+ _fn(in_fragments, 4, 0x19, STATS_TYPE_BANK0), \
+ _fn(in_oversize, 4, 0x1a, STATS_TYPE_BANK0), \
+ _fn(in_jabber, 4, 0x1b, STATS_TYPE_BANK0), \
+ _fn(in_rx_error, 4, 0x1c, STATS_TYPE_BANK0), \
+ _fn(in_fcs_error, 4, 0x1d, STATS_TYPE_BANK0), \
+ _fn(out_octets, 8, 0x0e, STATS_TYPE_BANK0), \
+ _fn(out_unicast, 4, 0x10, STATS_TYPE_BANK0), \
+ _fn(out_broadcasts, 4, 0x13, STATS_TYPE_BANK0), \
+ _fn(out_multicasts, 4, 0x12, STATS_TYPE_BANK0), \
+ _fn(out_pause, 4, 0x15, STATS_TYPE_BANK0), \
+ _fn(excessive, 4, 0x11, STATS_TYPE_BANK0), \
+ _fn(collisions, 4, 0x1e, STATS_TYPE_BANK0), \
+ _fn(deferred, 4, 0x05, STATS_TYPE_BANK0), \
+ _fn(single, 4, 0x14, STATS_TYPE_BANK0), \
+ _fn(multiple, 4, 0x17, STATS_TYPE_BANK0), \
+ _fn(out_fcs_error, 4, 0x03, STATS_TYPE_BANK0), \
+ _fn(late, 4, 0x1f, STATS_TYPE_BANK0), \
+ _fn(hist_64bytes, 4, 0x08, STATS_TYPE_BANK0), \
+ _fn(hist_65_127bytes, 4, 0x09, STATS_TYPE_BANK0), \
+ _fn(hist_128_255bytes, 4, 0x0a, STATS_TYPE_BANK0), \
+ _fn(hist_256_511bytes, 4, 0x0b, STATS_TYPE_BANK0), \
+ _fn(hist_512_1023bytes, 4, 0x0c, STATS_TYPE_BANK0), \
+ _fn(hist_1024_max_bytes, 4, 0x0d, STATS_TYPE_BANK0), \
+ _fn(sw_in_discards, 4, 0x10, STATS_TYPE_PORT), \
+ _fn(sw_in_filtered, 2, 0x12, STATS_TYPE_PORT), \
+ _fn(sw_out_filtered, 2, 0x13, STATS_TYPE_PORT), \
+ _fn(in_discards, 4, 0x00, STATS_TYPE_BANK1), \
+ _fn(in_filtered, 4, 0x01, STATS_TYPE_BANK1), \
+ _fn(in_accepted, 4, 0x02, STATS_TYPE_BANK1), \
+ _fn(in_bad_accepted, 4, 0x03, STATS_TYPE_BANK1), \
+ _fn(in_good_avb_class_a, 4, 0x04, STATS_TYPE_BANK1), \
+ _fn(in_good_avb_class_b, 4, 0x05, STATS_TYPE_BANK1), \
+ _fn(in_bad_avb_class_a, 4, 0x06, STATS_TYPE_BANK1), \
+ _fn(in_bad_avb_class_b, 4, 0x07, STATS_TYPE_BANK1), \
+ _fn(tcam_counter_0, 4, 0x08, STATS_TYPE_BANK1), \
+ _fn(tcam_counter_1, 4, 0x09, STATS_TYPE_BANK1), \
+ _fn(tcam_counter_2, 4, 0x0a, STATS_TYPE_BANK1), \
+ _fn(tcam_counter_3, 4, 0x0b, STATS_TYPE_BANK1), \
+ _fn(in_da_unknown, 4, 0x0e, STATS_TYPE_BANK1), \
+ _fn(in_management, 4, 0x0f, STATS_TYPE_BANK1), \
+ _fn(out_queue_0, 4, 0x10, STATS_TYPE_BANK1), \
+ _fn(out_queue_1, 4, 0x11, STATS_TYPE_BANK1), \
+ _fn(out_queue_2, 4, 0x12, STATS_TYPE_BANK1), \
+ _fn(out_queue_3, 4, 0x13, STATS_TYPE_BANK1), \
+ _fn(out_queue_4, 4, 0x14, STATS_TYPE_BANK1), \
+ _fn(out_queue_5, 4, 0x15, STATS_TYPE_BANK1), \
+ _fn(out_queue_6, 4, 0x16, STATS_TYPE_BANK1), \
+ _fn(out_queue_7, 4, 0x17, STATS_TYPE_BANK1), \
+ _fn(out_cut_through, 4, 0x18, STATS_TYPE_BANK1), \
+ _fn(out_octets_a, 4, 0x1a, STATS_TYPE_BANK1), \
+ _fn(out_octets_b, 4, 0x1b, STATS_TYPE_BANK1), \
+ _fn(out_management, 4, 0x1f, STATS_TYPE_BANK1), \
+ /* */
+
+#define MV88E6XXX_HW_STAT_ENTRY(_string, _size, _reg, _type) \
+ { #_string, _size, _reg, _type }
+static const struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
+ MV88E6XXX_HW_STAT_MAPPER(MV88E6XXX_HW_STAT_ENTRY)
+};
+
+#define MV88E6XXX_HW_STAT_ENUM(_string, _size, _reg, _type) \
+ MV88E6XXX_HW_STAT_ID_ ## _string
+enum mv88e6xxx_hw_stat_id {
+ MV88E6XXX_HW_STAT_MAPPER(MV88E6XXX_HW_STAT_ENUM)
};
static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_hw_stat *s,
+ const struct mv88e6xxx_hw_stat *s,
int port, u16 bank1_select,
u16 histogram)
{
@@ -1055,7 +1121,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
uint8_t *data, int types)
{
- struct mv88e6xxx_hw_stat *stat;
+ const struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
@@ -1136,7 +1202,7 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip,
int types)
{
- struct mv88e6xxx_hw_stat *stat;
+ const struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
@@ -1195,59 +1261,82 @@ out:
return count;
}
-static int mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data, int types,
- u16 bank1_select, u16 histogram)
+static size_t mv88e6095_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
{
- struct mv88e6xxx_hw_stat *stat;
- int i, j;
+ if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_PORT)))
+ return 0;
- for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
- stat = &mv88e6xxx_hw_stats[i];
- if (stat->type & types) {
- mv88e6xxx_reg_lock(chip);
- data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
- bank1_select,
- histogram);
- mv88e6xxx_reg_unlock(chip);
+ *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
+ MV88E6XXX_G1_STATS_OP_HIST_RX);
+ return 1;
+}
- j++;
- }
- }
- return j;
+static size_t mv88e6250_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
+{
+ if (!(stat->type & STATS_TYPE_BANK0))
+ return 0;
+
+ *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
+ MV88E6XXX_G1_STATS_OP_HIST_RX);
+ return 1;
}
-static int mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static size_t mv88e6320_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
{
- return mv88e6xxx_stats_get_stats(chip, port, data,
- STATS_TYPE_BANK0 | STATS_TYPE_PORT,
- 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+ if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
+ return 0;
+
+ *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
+ MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
+ MV88E6XXX_G1_STATS_OP_HIST_RX);
+ return 1;
}
-static int mv88e6250_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static size_t mv88e6390_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
{
- return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0,
- 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+ if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
+ return 0;
+
+ *data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
+ MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
+ 0);
+ return 1;
}
-static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static size_t mv88e6xxx_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data)
{
- return mv88e6xxx_stats_get_stats(chip, port, data,
- STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
- MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
- MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+ int ret = 0;
+
+ if (chip->info->ops->stats_get_stat) {
+ mv88e6xxx_reg_lock(chip);
+ ret = chip->info->ops->stats_get_stat(chip, port, stat, data);
+ mv88e6xxx_reg_unlock(chip);
+ }
+
+ return ret;
}
-static int mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static size_t mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
{
- return mv88e6xxx_stats_get_stats(chip, port, data,
- STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
- MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
- 0);
+ const struct mv88e6xxx_hw_stat *stat;
+ size_t i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ j += mv88e6xxx_stats_get_stat(chip, port, stat, &data[j]);
+ }
+ return j;
}
static void mv88e6xxx_atu_vtu_get_stats(struct mv88e6xxx_chip *chip, int port,
@@ -1263,10 +1352,9 @@ static void mv88e6xxx_atu_vtu_get_stats(struct mv88e6xxx_chip *chip, int port,
static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
- int count = 0;
+ size_t count;
- if (chip->info->ops->stats_get_stats)
- count = chip->info->ops->stats_get_stats(chip, port, data);
+ count = mv88e6xxx_stats_get_stats(chip, port, data);
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->serdes_get_stats) {
@@ -1284,16 +1372,90 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int ret;
- mv88e6xxx_reg_lock(chip);
+ ret = mv88e6xxx_stats_snapshot(chip, port);
+ if (ret < 0)
+ return;
+
+ mv88e6xxx_get_stats(chip, port, data);
+}
+
+static void mv88e6xxx_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int ret;
ret = mv88e6xxx_stats_snapshot(chip, port);
- mv88e6xxx_reg_unlock(chip);
+ if (ret < 0)
+ return;
+#define MV88E6XXX_ETH_MAC_STAT_MAP(_id, _member) \
+ mv88e6xxx_stats_get_stat(chip, port, \
+ &mv88e6xxx_hw_stats[MV88E6XXX_HW_STAT_ID_ ## _id], \
+ &mac_stats->stats._member)
+
+ MV88E6XXX_ETH_MAC_STAT_MAP(out_unicast, FramesTransmittedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(single, SingleCollisionFrames);
+ MV88E6XXX_ETH_MAC_STAT_MAP(multiple, MultipleCollisionFrames);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_unicast, FramesReceivedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_fcs_error, FrameCheckSequenceErrors);
+ MV88E6XXX_ETH_MAC_STAT_MAP(out_octets, OctetsTransmittedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(deferred, FramesWithDeferredXmissions);
+ MV88E6XXX_ETH_MAC_STAT_MAP(late, LateCollisions);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_good_octets, OctetsReceivedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(out_multicasts, MulticastFramesXmittedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(out_broadcasts, BroadcastFramesXmittedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(excessive, FramesWithExcessiveDeferral);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_multicasts, MulticastFramesReceivedOK);
+ MV88E6XXX_ETH_MAC_STAT_MAP(in_broadcasts, BroadcastFramesReceivedOK);
+
+#undef MV88E6XXX_ETH_MAC_STAT_MAP
+
+ mac_stats->stats.FramesTransmittedOK += mac_stats->stats.MulticastFramesXmittedOK;
+ mac_stats->stats.FramesTransmittedOK += mac_stats->stats.BroadcastFramesXmittedOK;
+ mac_stats->stats.FramesReceivedOK += mac_stats->stats.MulticastFramesReceivedOK;
+ mac_stats->stats.FramesReceivedOK += mac_stats->stats.BroadcastFramesReceivedOK;
+}
+
+static void mv88e6xxx_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ static const struct ethtool_rmon_hist_range rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 65535 },
+ {}
+ };
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int ret;
+
+ ret = mv88e6xxx_stats_snapshot(chip, port);
if (ret < 0)
return;
- mv88e6xxx_get_stats(chip, port, data);
+#define MV88E6XXX_RMON_STAT_MAP(_id, _member) \
+ mv88e6xxx_stats_get_stat(chip, port, \
+ &mv88e6xxx_hw_stats[MV88E6XXX_HW_STAT_ID_ ## _id], \
+ &rmon_stats->stats._member)
+
+ MV88E6XXX_RMON_STAT_MAP(in_undersize, undersize_pkts);
+ MV88E6XXX_RMON_STAT_MAP(in_oversize, oversize_pkts);
+ MV88E6XXX_RMON_STAT_MAP(in_fragments, fragments);
+ MV88E6XXX_RMON_STAT_MAP(in_jabber, jabbers);
+ MV88E6XXX_RMON_STAT_MAP(hist_64bytes, hist[0]);
+ MV88E6XXX_RMON_STAT_MAP(hist_65_127bytes, hist[1]);
+ MV88E6XXX_RMON_STAT_MAP(hist_128_255bytes, hist[2]);
+ MV88E6XXX_RMON_STAT_MAP(hist_256_511bytes, hist[3]);
+ MV88E6XXX_RMON_STAT_MAP(hist_512_1023bytes, hist[4]);
+ MV88E6XXX_RMON_STAT_MAP(hist_1024_max_bytes, hist[5]);
+#undef MV88E6XXX_RMON_STAT_MAP
+
+ *ranges = rmon_ranges;
}
static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
@@ -3987,7 +4149,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4025,7 +4187,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
@@ -4066,7 +4228,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4108,7 +4270,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4151,7 +4313,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4200,7 +4362,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -4255,7 +4417,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4293,7 +4455,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4341,7 +4503,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4390,7 +4552,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4441,7 +4603,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4490,7 +4652,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4535,7 +4697,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4584,7 +4746,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -4642,7 +4804,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -4698,7 +4860,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -4757,7 +4919,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -4810,7 +4972,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6250_stats_get_sset_count,
.stats_get_strings = mv88e6250_stats_get_strings,
- .stats_get_stats = mv88e6250_stats_get_stats,
+ .stats_get_stat = mv88e6250_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6250_watchdog_ops,
@@ -4857,7 +5019,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -4916,7 +5078,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6320_stats_get_stats,
+ .stats_get_stat = mv88e6320_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -4963,7 +5125,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6320_stats_get_stats,
+ .stats_get_stat = mv88e6320_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -5012,7 +5174,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -5070,7 +5232,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -5116,7 +5278,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -5167,7 +5329,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
- .stats_get_stats = mv88e6095_stats_get_stats,
+ .stats_get_stat = mv88e6095_stats_get_stat,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
@@ -5229,7 +5391,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -5291,7 +5453,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
.set_cpu_port = mv88e6390_g1_set_cpu_port,
.set_egress_port = mv88e6390_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
@@ -5353,7 +5515,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
- .stats_get_stats = mv88e6390_stats_get_stats,
+ .stats_get_stat = mv88e6390_stats_get_stat,
/* .set_cpu_port is missing because this family does not support a global
* CPU port, only per port CPU port which is set via
* .port_set_upstream_port method.
@@ -5386,8 +5548,12 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.family = MV88E6XXX_FAMILY_6250,
.name = "Marvell 88E6020",
.num_databases = 64,
- .num_ports = 4,
+ /* Ports 2-4 are not routed to pins
+ * => usable ports 0, 1, 5, 6
+ */
+ .num_ports = 7,
.num_internal_phys = 2,
+ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
.max_vid = 4095,
.port_base_addr = 0x8,
.phy_base_addr = 0x0,
@@ -6817,6 +6983,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_eth_mac_stats = mv88e6xxx_get_eth_mac_stats,
+ .get_rmon_stats = mv88e6xxx_get_rmon_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index c54d305a1..85eb29338 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -318,6 +318,17 @@ struct mv88e6xxx_mst {
struct mv88e6xxx_stu_entry stu;
};
+#define STATS_TYPE_PORT BIT(0)
+#define STATS_TYPE_BANK0 BIT(1)
+#define STATS_TYPE_BANK1 BIT(2)
+
+struct mv88e6xxx_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ size_t size;
+ int reg;
+ int type;
+};
+
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
@@ -574,8 +585,9 @@ struct mv88e6xxx_ops {
/* Return the number of strings describing statistics */
int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip);
int (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
- int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
+ size_t (*stats_get_stat)(struct mv88e6xxx_chip *chip, int port,
+ const struct mv88e6xxx_hw_stat *stat,
+ uint64_t *data);
int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int (*set_egress_port)(struct mv88e6xxx_chip *chip,
enum mv88e6xxx_egress_direction direction,
@@ -727,17 +739,6 @@ struct mv88e6xxx_pcs_ops {
};
-#define STATS_TYPE_PORT BIT(0)
-#define STATS_TYPE_BANK0 BIT(1)
-#define STATS_TYPE_BANK1 BIT(2)
-
-struct mv88e6xxx_hw_stat {
- char string[ETH_GSTRING_LEN];
- size_t size;
- int reg;
- int type;
-};
-
static inline bool mv88e6xxx_has_stu(struct mv88e6xxx_chip *chip)
{
return chip->info->max_sid > 0 &&
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 174c773b3..49444a72f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -462,8 +462,7 @@ int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip)
int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_HIST_MODE_MASK,
- MV88E6390_G1_CTL2_HIST_MODE_RX |
- MV88E6390_G1_CTL2_HIST_MODE_TX);
+ MV88E6390_G1_CTL2_HIST_MODE_RX);
}
int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
@@ -491,7 +490,7 @@ int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
if (err)
return err;
- val |= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
+ val |= MV88E6XXX_G1_STATS_OP_HIST_RX;
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
@@ -506,7 +505,7 @@ int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
MV88E6XXX_G1_STATS_OP_BUSY |
MV88E6XXX_G1_STATS_OP_CAPTURE_PORT |
- MV88E6XXX_G1_STATS_OP_HIST_RX_TX | port);
+ MV88E6XXX_G1_STATS_OP_HIST_RX | port);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 86deeb347..ddadeb9bf 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -25,10 +25,25 @@
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF 0x0900
#define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL 0x0a00
#define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL 0x0b00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF 0x0c00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF 0x0d00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL 0x0e00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL 0x0f00
+/* - Modes with PHY suffix use output instead of input clock
+ * - Modes without RMII or RGMII use MII
+ * - Modes without speed do not have a fixed speed specified in the manual
+ * ("DC to x MHz" - variable clock support?)
+ */
+#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED 0x0000
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII 0x0100
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
+#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY 0x0400
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL 0x0600
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL 0x0700
+#define MV88E6250_PORT_STS_PORTMODE_MII_HALF 0x0800
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY 0x0900
+#define MV88E6250_PORT_STS_PORTMODE_MII_FULL 0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY 0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY 0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY 0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY 0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY 0x0f00
#define MV88E6XXX_PORT_STS_LINK 0x0800
#define MV88E6XXX_PORT_STS_DUPLEX 0x0400
#define MV88E6XXX_PORT_STS_SPEED_MASK 0x0300
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 6f2a7aee7..95d78b318 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -947,46 +947,47 @@ static int
qca8k_mdio_register(struct qca8k_priv *priv)
{
struct dsa_switch *ds = priv->ds;
+ struct device *dev = ds->dev;
struct device_node *mdio;
struct mii_bus *bus;
- int err;
+ int err = 0;
- mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ mdio = of_get_child_by_name(dev->of_node, "mdio");
+ if (mdio && !of_device_is_available(mdio))
+ goto out_put_node;
- bus = devm_mdiobus_alloc(ds->dev);
+ bus = devm_mdiobus_alloc(dev);
if (!bus) {
err = -ENOMEM;
goto out_put_node;
}
+ priv->internal_mdio_bus = bus;
bus->priv = (void *)priv;
snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
ds->dst->index, ds->index);
- bus->parent = ds->dev;
- bus->phy_mask = ~ds->phys_mii_mask;
- ds->user_mii_bus = bus;
+ bus->parent = dev;
- /* Check if the devicetree declare the port:phy mapping */
- if (of_device_is_available(mdio)) {
+ if (mdio) {
+ /* Check if the device tree declares the port:phy mapping */
bus->name = "qca8k user mii";
bus->read = qca8k_internal_mdio_read;
bus->write = qca8k_internal_mdio_write;
- err = devm_of_mdiobus_register(priv->dev, bus, mdio);
- goto out_put_node;
+ } else {
+ /* If a mapping can't be found, the legacy mapping is used,
+ * using qca8k_port_to_phy()
+ */
+ ds->user_mii_bus = bus;
+ bus->phy_mask = ~ds->phys_mii_mask;
+ bus->name = "qca8k-legacy user mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
}
- /* If a mapping can't be found the legacy mapping is used,
- * using the qca8k_port_to_phy function
- */
- bus->name = "qca8k-legacy user mii";
- bus->read = qca8k_legacy_mdio_read;
- bus->write = qca8k_legacy_mdio_write;
-
- err = devm_mdiobus_register(priv->dev, bus);
+ err = devm_of_mdiobus_register(dev, bus, mdio);
out_put_node:
of_node_put(mdio);
-
return err;
}
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 9243eff89..2358cd399 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -487,7 +487,7 @@ void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
return;
for (i = 0; i < priv->info->mib_count; i++)
- ethtool_sprintf(&data, "%s", ar8327_mib[i].name);
+ ethtool_puts(&data, ar8327_mib[i].name);
}
void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index 90e30c290..811ebeeff 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -366,7 +366,6 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
{
struct fwnode_handle *led = NULL, *leds = NULL;
struct led_init_data init_data = { };
- struct dsa_switch *ds = priv->ds;
enum led_default_state state;
struct qca8k_led *port_led;
int led_num, led_index;
@@ -429,7 +428,8 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.default_label = ":port";
init_data.fwnode = led;
init_data.devname_mandatory = true;
- init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->user_mii_bus->id,
+ init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
+ priv->internal_mdio_bus->id,
port_num);
if (!init_data.devicename)
return -ENOMEM;
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 2ac7e88f8..c8785c36c 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -454,6 +454,7 @@ struct qca8k_priv {
struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
+ struct mii_bus *internal_mdio_bus;
struct dsa_switch *ds;
struct mutex reg_mutex;
struct device *dev;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 0875e4fc9..b072045eb 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1303,7 +1303,7 @@ static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
- ethtool_sprintf(&data, "%s", mib->name);
+ ethtool_puts(&data, mib->name);
}
}
diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c
index 82e267b8f..59f98d2c8 100644
--- a/drivers/net/dsa/realtek/rtl8366-core.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -401,7 +401,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
return;
for (i = 0; i < priv->num_mib_counters; i++)
- ethtool_sprintf(&data, "%s", priv->mib_counters[i].name);
+ ethtool_puts(&data, priv->mib_counters[i].name);
}
EXPORT_SYMBOL_GPL(rtl8366_get_strings);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index b39b719a5..e3b6a470c 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -117,10 +118,11 @@
RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
/* CPU port control reg */
-#define RTL8368RB_CPU_CTRL_REG 0x0061
-#define RTL8368RB_CPU_PORTS_MSK 0x00FF
+#define RTL8366RB_CPU_CTRL_REG 0x0061
+#define RTL8366RB_CPU_PORTS_MSK 0x00FF
/* Disables inserting custom tag length/type 0x8899 */
-#define RTL8368RB_CPU_NO_TAG BIT(15)
+#define RTL8366RB_CPU_NO_TAG BIT(15)
+#define RTL8366RB_CPU_TAG_SIZE 4
#define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */
#define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */
@@ -912,10 +914,10 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* Enable CPU port with custom DSA tag 8899.
*
- * If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
+ * If you set RTL8366RB_CPU_NO_TAG (bit 15) in this register
* the custom tag is turned off.
*/
- ret = regmap_update_bits(priv->map, RTL8368RB_CPU_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_CPU_CTRL_REG,
0xFFFF,
BIT(priv->cpu_port));
if (ret)
@@ -928,15 +930,19 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Set maximum packet length to 1536 bytes */
+ /* Set default maximum packet length to 1536 bytes */
ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
return ret;
- for (i = 0; i < RTL8366RB_NUM_PORTS; i++)
- /* layer 2 size, see rtl8366rb_change_mtu() */
- rb->max_mtu[i] = 1532;
+ for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
+ if (i == priv->cpu_port)
+ /* CPU port need to also accept the tag */
+ rb->max_mtu[i] = ETH_DATA_LEN + RTL8366RB_CPU_TAG_SIZE;
+ else
+ rb->max_mtu[i] = ETH_DATA_LEN;
+ }
/* Disable learning for all ports */
ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
@@ -1441,24 +1447,29 @@ static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
/* Roof out the MTU for the entire switch to the greatest
* common denominator: the biggest set for any one port will
* be the biggest MTU for the switch.
- *
- * The first setting, 1522 bytes, is max IP packet 1500 bytes,
- * plus ethernet header, 1518 bytes, plus CPU tag, 4 bytes.
- * This function should consider the parameter an SDU, so the
- * MTU passed for this setting is 1518 bytes. The same logic
- * of subtracting the DSA tag of 4 bytes apply to the other
- * settings.
*/
- max_mtu = 1518;
+ max_mtu = ETH_DATA_LEN;
for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
if (rb->max_mtu[i] > max_mtu)
max_mtu = rb->max_mtu[i];
}
- if (max_mtu <= 1518)
+
+ /* Translate to layer 2 size.
+ * Add ethernet and (possible) VLAN headers, and checksum to the size.
+ * For ETH_DATA_LEN (1500 bytes) this will add up to 1522 bytes.
+ */
+ max_mtu += VLAN_ETH_HLEN;
+ max_mtu += ETH_FCS_LEN;
+
+ if (max_mtu <= 1522)
len = RTL8366RB_SGCR_MAX_LENGTH_1522;
- else if (max_mtu > 1518 && max_mtu <= 1532)
+ else if (max_mtu > 1522 && max_mtu <= 1536)
+ /* This will be the most common default if using VLAN and
+ * CPU tagging on a port as both VLAN and CPU tag will
+ * result in 1518 + 4 + 4 = 1526 bytes.
+ */
len = RTL8366RB_SGCR_MAX_LENGTH_1536;
- else if (max_mtu > 1532 && max_mtu <= 1548)
+ else if (max_mtu > 1536 && max_mtu <= 1552)
len = RTL8366RB_SGCR_MAX_LENGTH_1552;
else
len = RTL8366RB_SGCR_MAX_LENGTH_16000;
@@ -1470,10 +1481,12 @@ static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
{
- /* The max MTU is 16000 bytes, so we subtract the CPU tag
- * and the max presented to the system is 15996 bytes.
+ /* The max MTU is 16000 bytes, so we subtract the ethernet
+ * headers with VLAN and checksum and arrive at
+ * 16000 - 18 - 4 = 15978. This does not include the CPU tag
+ * since that is added to the requested MTU by the DSA framework.
*/
- return 15996;
+ return 16000 - VLAN_ETH_HLEN - ETH_FCS_LEN;
}
static int rtl8366rb_get_vlan_4k(struct realtek_priv *priv, u32 vid,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 74cee39d7..6646f7fb0 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -21,6 +21,8 @@
#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/dsa/8021q.h>
+#include <linux/units.h>
+
#include "sja1105.h"
#include "sja1105_tas.h"
@@ -2138,7 +2140,6 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
sja1105_bridge_member(ds, port, bridge, false);
}
-#define BYTES_PER_KBIT (1000LL / 8)
/* Port 0 (the uC port) does not have CBS shapers */
#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 833e55e4b..52ddb4ef2 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -94,7 +94,7 @@ int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
return tmp & 0xffff;
}
-int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 571f037fe..ae70eac3b 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -949,7 +949,7 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
indices[5] = ((val >> 26) & 0x1f); /* TX counter 2 */
/* The first counters is the RX octets */
- ethtool_sprintf(&buf, "RxEtherStatsOctets");
+ ethtool_puts(&buf, "RxEtherStatsOctets");
/* Each port supports recording 3 RX counters and 3 TX counters,
* figure out what counters we use in this set-up and return the
@@ -959,15 +959,15 @@ static void vsc73xx_get_strings(struct dsa_switch *ds, int port, u32 stringset,
*/
for (i = 0; i < 3; i++) {
cnt = vsc73xx_find_counter(vsc, indices[i], false);
- ethtool_sprintf(&buf, "%s", cnt ? cnt->name : "");
+ ethtool_puts(&buf, cnt ? cnt->name : "");
}
/* TX stats begins with the number of TX octets */
- ethtool_sprintf(&buf, "TxEtherStatsOctets");
+ ethtool_puts(&buf, "TxEtherStatsOctets");
for (i = 3; i < 6; i++) {
cnt = vsc73xx_find_counter(vsc, indices[i], true);
- ethtool_sprintf(&buf, "%s", cnt ? cnt->name : "");
+ ethtool_puts(&buf, cnt ? cnt->name : "");
}
}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 768454aa3..946bba070 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -71,6 +71,7 @@ static int dummy_dev_init(struct net_device *dev)
if (!dev->lstats)
return -ENOMEM;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 0e0aa4016..c5636245f 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
module_init(ns8390_module_init);
module_exit(ns8390_module_exit);
#endif /* MODULE */
+MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 683474205..6d429b11e 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
module_init(NS8390p_init_module);
module_exit(NS8390p_cleanup_module);
+MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index a09f383dd..828edca8d 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -610,4 +610,5 @@ static int init_pcmcia(void)
return 1;
}
+MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 24f49a8ff..fd9dcdc35 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
module_init(hydra_init_module);
module_exit(hydra_cleanup_module);
+MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 265976e3b..6cc0e190a 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
module_init(stnic_probe);
module_exit(stnic_cleanup);
+MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index d70390e9d..c24dd4fe7 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
module_init(zorro8390_init_module);
module_exit(zorro8390_cleanup_module);
+MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index da3bdd302..760a9a60b 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -21,6 +21,7 @@ config ADIN1110
tristate "Analog Devices ADIN1110 MAC-PHY"
depends on SPI && NET_SWITCHDEV
select CRC8
+ select PHYLIB
help
Say yes here to build support for Analog Devices ADIN1110
Low Power 10BASE-T1L Ethernet MAC-PHY.
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index f1f752a8f..6ab615365 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 633b321d7..4db689372 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -362,7 +362,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
- size = io_sq->bounce_buf_ctrl.buffer_size *
+ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index d671df4b7..0cb6cc1ce 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include "ena_netdev.h"
+#include "ena_xdp.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
@@ -262,17 +263,14 @@ static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
ena_stats->name);
}
- if (!is_xdp) {
- /* RX stats, in XDP there isn't a RX queue
- * counterpart
- */
- for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
- ena_stats = &ena_stats_rx_strings[j];
+ /* In XDP there isn't an RX queue counterpart */
+ if (is_xdp)
+ continue;
- ethtool_sprintf(data,
- "queue_%u_rx_%s", i,
- ena_stats->name);
- }
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ ethtool_sprintf(data, "queue_%u_rx_%s", i, ena_stats->name);
}
}
}
@@ -299,13 +297,13 @@ static void ena_get_strings(struct ena_adapter *adapter,
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
- ethtool_sprintf(&data, ena_stats->name);
+ ethtool_puts(&data, ena_stats->name);
}
if (eni_stats_needed) {
for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
ena_stats = &ena_stats_eni_strings[i];
- ethtool_sprintf(&data, ena_stats->name);
+ ethtool_puts(&data, ena_stats->name);
}
}
@@ -802,15 +800,15 @@ static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
return rc;
}
-static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ena_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ena_adapter *adapter = netdev_priv(netdev);
enum ena_admin_hash_functions ena_func;
u8 func;
int rc;
- rc = ena_indirection_table_get(adapter, indir);
+ rc = ena_indirection_table_get(adapter, rxfh->indir);
if (rc)
return rc;
@@ -825,7 +823,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return rc;
}
- rc = ena_com_get_hash_key(adapter->ena_dev, key);
+ rc = ena_com_get_hash_key(adapter->ena_dev, rxfh->key);
if (rc)
return rc;
@@ -842,27 +840,27 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return -EOPNOTSUPP;
}
- if (hfunc)
- *hfunc = func;
+ rxfh->hfunc = func;
return 0;
}
-static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ena_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_hash_functions func = 0;
int rc;
- if (indir) {
- rc = ena_indirection_table_set(adapter, indir);
+ if (rxfh->indir) {
+ rc = ena_indirection_table_set(adapter, rxfh->indir);
if (rc)
return rc;
}
- switch (hfunc) {
+ switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
func = ena_com_get_current_hash_function(ena_dev);
break;
@@ -874,12 +872,12 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
break;
default:
netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
- hfunc);
+ rxfh->hfunc);
return -EOPNOTSUPP;
}
- if (key || func) {
- rc = ena_com_fill_hash_function(ena_dev, func, key,
+ if (rxfh->key || func) {
+ rc = ena_com_fill_hash_function(ena_dev, func, rxfh->key,
ENA_HASH_KEY_SIZE,
0xFFFFFFFF);
if (unlikely(rc)) {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4fa27c9a3..95ed32542 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -19,8 +19,8 @@
#include <net/ip.h>
#include "ena_netdev.h"
-#include <linux/bpf_trace.h>
#include "ena_pci_id_tbl.h"
+#include "ena_xdp.h"
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
@@ -45,53 +45,6 @@ static void check_for_admin_com_state(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
-static void ena_init_io_rings(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
- int count);
-static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
- int count);
-static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index,
- int count);
-static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
-static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
-static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
-static void ena_napi_disable_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_napi_enable_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static int ena_up(struct ena_adapter *adapter);
-static void ena_down(struct ena_adapter *adapter);
-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring);
-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring);
-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info);
-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index, int count);
-
-/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
-static void ena_increase_stat(u64 *statp, u64 cnt,
- struct u64_stats_sync *syncp)
-{
- u64_stats_update_begin(syncp);
- (*statp) += cnt;
- u64_stats_update_end(syncp);
-}
-
-static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
-{
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
-}
-
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -135,19 +88,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-static int ena_xmit_common(struct net_device *dev,
- struct ena_ring *ring,
- struct ena_tx_buffer *tx_info,
- struct ena_com_tx_ctx *ena_tx_ctx,
- u16 next_to_use,
- u32 bytes)
+int ena_xmit_common(struct ena_adapter *adapter,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes)
{
- struct ena_adapter *adapter = netdev_priv(dev);
int rc, nb_hw_desc;
if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
ena_tx_ctx))) {
- netif_dbg(adapter, tx_queued, dev,
+ netif_dbg(adapter, tx_queued, adapter->netdev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid);
ena_ring_tx_doorbell(ring);
@@ -162,7 +114,7 @@ static int ena_xmit_common(struct net_device *dev,
* ena_com_prepare_tx() are fatal and therefore require a device reset.
*/
if (unlikely(rc)) {
- netif_err(adapter, tx_queued, dev,
+ netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp);
@@ -178,6 +130,7 @@ static int ena_xmit_common(struct net_device *dev,
u64_stats_update_end(&ring->syncp);
tx_info->tx_descs = nb_hw_desc;
+ tx_info->total_tx_size = bytes;
tx_info->last_jiffies = jiffies;
tx_info->print_once = 0;
@@ -186,467 +139,6 @@ static int ena_xmit_common(struct net_device *dev,
return 0;
}
-/* This is the XDP napi callback. XDP queues use a separate napi callback
- * than Rx/Tx queues.
- */
-static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
-{
- struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
- u32 xdp_work_done, xdp_budget;
- struct ena_ring *xdp_ring;
- int napi_comp_call = 0;
- int ret;
-
- xdp_ring = ena_napi->xdp_ring;
-
- xdp_budget = budget;
-
- if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
- test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
- napi_complete_done(napi, 0);
- return 0;
- }
-
- xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
-
- /* If the device is about to reset or down, avoid unmask
- * the interrupt and return 0 so NAPI won't reschedule
- */
- if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
- napi_complete_done(napi, 0);
- ret = 0;
- } else if (xdp_budget > xdp_work_done) {
- napi_comp_call = 1;
- if (napi_complete_done(napi, xdp_work_done))
- ena_unmask_interrupt(xdp_ring, NULL);
- ena_update_ring_numa_node(xdp_ring, NULL);
- ret = xdp_work_done;
- } else {
- ret = xdp_budget;
- }
-
- u64_stats_update_begin(&xdp_ring->syncp);
- xdp_ring->tx_stats.napi_comp += napi_comp_call;
- xdp_ring->tx_stats.tx_poll++;
- u64_stats_update_end(&xdp_ring->syncp);
- xdp_ring->tx_stats.last_napi_jiffies = jiffies;
-
- return ret;
-}
-
-static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
- struct ena_tx_buffer *tx_info,
- struct xdp_frame *xdpf,
- struct ena_com_tx_ctx *ena_tx_ctx)
-{
- struct ena_adapter *adapter = xdp_ring->adapter;
- struct ena_com_buf *ena_buf;
- int push_len = 0;
- dma_addr_t dma;
- void *data;
- u32 size;
-
- tx_info->xdpf = xdpf;
- data = tx_info->xdpf->data;
- size = tx_info->xdpf->len;
-
- if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- /* Designate part of the packet for LLQ */
- push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
-
- ena_tx_ctx->push_header = data;
-
- size -= push_len;
- data += push_len;
- }
-
- ena_tx_ctx->header_len = push_len;
-
- if (size > 0) {
- dma = dma_map_single(xdp_ring->dev,
- data,
- size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
- goto error_report_dma_error;
-
- tx_info->map_linear_data = 0;
-
- ena_buf = tx_info->bufs;
- ena_buf->paddr = dma;
- ena_buf->len = size;
-
- ena_tx_ctx->ena_bufs = ena_buf;
- ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
- }
-
- return 0;
-
-error_report_dma_error:
- ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
- &xdp_ring->syncp);
- netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
-
- return -EINVAL;
-}
-
-static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
- struct net_device *dev,
- struct xdp_frame *xdpf,
- int flags)
-{
- struct ena_com_tx_ctx ena_tx_ctx = {};
- struct ena_tx_buffer *tx_info;
- u16 next_to_use, req_id;
- int rc;
-
- next_to_use = xdp_ring->next_to_use;
- req_id = xdp_ring->free_ids[next_to_use];
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- tx_info->num_of_bufs = 0;
-
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
- if (unlikely(rc))
- return rc;
-
- ena_tx_ctx.req_id = req_id;
-
- rc = ena_xmit_common(dev,
- xdp_ring,
- tx_info,
- &ena_tx_ctx,
- next_to_use,
- xdpf->len);
- if (rc)
- goto error_unmap_dma;
-
- /* trigger the dma engine. ena_ring_tx_doorbell()
- * calls a memory barrier inside it.
- */
- if (flags & XDP_XMIT_FLUSH)
- ena_ring_tx_doorbell(xdp_ring);
-
- return rc;
-
-error_unmap_dma:
- ena_unmap_tx_buff(xdp_ring, tx_info);
- tx_info->xdpf = NULL;
- return rc;
-}
-
-static int ena_xdp_xmit(struct net_device *dev, int n,
- struct xdp_frame **frames, u32 flags)
-{
- struct ena_adapter *adapter = netdev_priv(dev);
- struct ena_ring *xdp_ring;
- int qid, i, nxmit = 0;
-
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
- return -EINVAL;
-
- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
- return -ENETDOWN;
-
- /* We assume that all rings have the same XDP program */
- if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
- return -ENXIO;
-
- qid = smp_processor_id() % adapter->xdp_num_queues;
- qid += adapter->xdp_first_ring;
- xdp_ring = &adapter->tx_ring[qid];
-
- /* Other CPU ids might try to send thorugh this queue */
- spin_lock(&xdp_ring->xdp_tx_lock);
-
- for (i = 0; i < n; i++) {
- if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
- break;
- nxmit++;
- }
-
- /* Ring doorbell to make device aware of the packets */
- if (flags & XDP_XMIT_FLUSH)
- ena_ring_tx_doorbell(xdp_ring);
-
- spin_unlock(&xdp_ring->xdp_tx_lock);
-
- /* Return number of packets sent */
- return nxmit;
-}
-
-static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
-{
- u32 verdict = ENA_XDP_PASS;
- struct bpf_prog *xdp_prog;
- struct ena_ring *xdp_ring;
- struct xdp_frame *xdpf;
- u64 *xdp_stat;
-
- xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
-
- if (!xdp_prog)
- goto out;
-
- verdict = bpf_prog_run_xdp(xdp_prog, xdp);
-
- switch (verdict) {
- case XDP_TX:
- xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- }
-
- /* Find xmit queue */
- xdp_ring = rx_ring->xdp_ring;
-
- /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
- spin_lock(&xdp_ring->xdp_tx_lock);
-
- if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
- XDP_XMIT_FLUSH))
- xdp_return_frame(xdpf);
-
- spin_unlock(&xdp_ring->xdp_tx_lock);
- xdp_stat = &rx_ring->rx_stats.xdp_tx;
- verdict = ENA_XDP_TX;
- break;
- case XDP_REDIRECT:
- if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
- xdp_stat = &rx_ring->rx_stats.xdp_redirect;
- verdict = ENA_XDP_REDIRECT;
- break;
- }
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_ABORTED:
- trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_DROP:
- xdp_stat = &rx_ring->rx_stats.xdp_drop;
- verdict = ENA_XDP_DROP;
- break;
- case XDP_PASS:
- xdp_stat = &rx_ring->rx_stats.xdp_pass;
- verdict = ENA_XDP_PASS;
- break;
- default:
- bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
- xdp_stat = &rx_ring->rx_stats.xdp_invalid;
- verdict = ENA_XDP_DROP;
- }
-
- ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
-out:
- return verdict;
-}
-
-static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
-{
- adapter->xdp_first_ring = adapter->num_io_queues;
- adapter->xdp_num_queues = adapter->num_io_queues;
-
- ena_init_io_rings(adapter,
- adapter->xdp_first_ring,
- adapter->xdp_num_queues);
-}
-
-static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
-{
- u32 xdp_first_ring = adapter->xdp_first_ring;
- u32 xdp_num_queues = adapter->xdp_num_queues;
- int rc = 0;
-
- rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
- if (rc)
- goto setup_err;
-
- rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
- if (rc)
- goto create_err;
-
- return 0;
-
-create_err:
- ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
-setup_err:
- return rc;
-}
-
-/* Provides a way for both kernel and bpf-prog to know
- * more about the RX-queue a given XDP frame arrived on.
- */
-static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
-{
- int rc;
-
- rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
-
- if (rc) {
- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
- "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
- rx_ring->qid, rc);
- goto err;
- }
-
- rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
- NULL);
-
- if (rc) {
- netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
- "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
- rx_ring->qid, rc);
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- }
-
-err:
- return rc;
-}
-
-static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
-{
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
-}
-
-static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
- struct bpf_prog *prog,
- int first, int count)
-{
- struct bpf_prog *old_bpf_prog;
- struct ena_ring *rx_ring;
- int i = 0;
-
- for (i = first; i < count; i++) {
- rx_ring = &adapter->rx_ring[i];
- old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
-
- if (!old_bpf_prog && prog) {
- ena_xdp_register_rxq_info(rx_ring);
- rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
- } else if (old_bpf_prog && !prog) {
- ena_xdp_unregister_rxq_info(rx_ring);
- rx_ring->rx_headroom = NET_SKB_PAD;
- }
- }
-}
-
-static void ena_xdp_exchange_program(struct ena_adapter *adapter,
- struct bpf_prog *prog)
-{
- struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
-
- ena_xdp_exchange_program_rx_in_range(adapter,
- prog,
- 0,
- adapter->num_io_queues);
-
- if (old_bpf_prog)
- bpf_prog_put(old_bpf_prog);
-}
-
-static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
-{
- bool was_up;
- int rc;
-
- was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
-
- if (was_up)
- ena_down(adapter);
-
- adapter->xdp_first_ring = 0;
- adapter->xdp_num_queues = 0;
- ena_xdp_exchange_program(adapter, NULL);
- if (was_up) {
- rc = ena_up(adapter);
- if (rc)
- return rc;
- }
- return 0;
-}
-
-static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- struct bpf_prog *prog = bpf->prog;
- struct bpf_prog *old_bpf_prog;
- int rc, prev_mtu;
- bool is_up;
-
- is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
- rc = ena_xdp_allowed(adapter);
- if (rc == ENA_XDP_ALLOWED) {
- old_bpf_prog = adapter->xdp_bpf_prog;
- if (prog) {
- if (!is_up) {
- ena_init_all_xdp_queues(adapter);
- } else if (!old_bpf_prog) {
- ena_down(adapter);
- ena_init_all_xdp_queues(adapter);
- }
- ena_xdp_exchange_program(adapter, prog);
-
- if (is_up && !old_bpf_prog) {
- rc = ena_up(adapter);
- if (rc)
- return rc;
- }
- xdp_features_set_redirect_target(netdev, false);
- } else if (old_bpf_prog) {
- xdp_features_clear_redirect_target(netdev);
- rc = ena_destroy_and_free_all_xdp_queues(adapter);
- if (rc)
- return rc;
- }
-
- prev_mtu = netdev->max_mtu;
- netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
-
- if (!old_bpf_prog)
- netif_info(adapter, drv, adapter->netdev,
- "XDP program is set, changing the max_mtu from %d to %d",
- prev_mtu, netdev->max_mtu);
-
- } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
- netif_err(adapter, drv, adapter->netdev,
- "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
- netdev->mtu, ENA_XDP_MAX_MTU);
- NL_SET_ERR_MSG_MOD(bpf->extack,
- "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
- return -EINVAL;
- } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
- netif_err(adapter, drv, adapter->netdev,
- "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
- adapter->num_io_queues, adapter->max_num_io_queues);
- NL_SET_ERR_MSG_MOD(bpf->extack,
- "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
- * program as well as to query the current xdp program id.
- */
-static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
-{
- switch (bpf->command) {
- case XDP_SETUP_PROG:
- return ena_xdp_set(netdev, bpf);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
{
#ifdef CONFIG_RFS_ACCEL
@@ -688,8 +180,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
u64_stats_init(&ring->syncp);
}
-static void ena_init_io_rings(struct ena_adapter *adapter,
- int first_index, int count)
+void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev;
struct ena_ring *txr, *rxr;
@@ -820,9 +312,8 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->push_buf_intermediate_buf = NULL;
}
-static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index,
- int count)
+int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
int i, rc = 0;
@@ -845,8 +336,8 @@ err_setup_tx:
return rc;
}
-static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
- int first_index, int count)
+void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
int i;
@@ -859,7 +350,7 @@ static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
*
* Free all transmit software resources
*/
-static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
+void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
{
ena_free_all_io_tx_resources_in_range(adapter,
0,
@@ -1169,8 +660,8 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
ena_free_rx_bufs(adapter, i);
}
-static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
- struct ena_tx_buffer *tx_info)
+void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info)
{
struct ena_com_buf *ena_buf;
u32 cnt;
@@ -1205,8 +696,11 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
+ bool is_xdp_ring;
u32 i;
+ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
+
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@@ -1226,10 +720,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
- dev_kfree_skb_any(tx_info->skb);
+ if (is_xdp_ring)
+ xdp_return_frame(tx_info->xdpf);
+ else
+ dev_kfree_skb_any(tx_info->skb);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->qid));
+
+ if (!is_xdp_ring)
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@@ -1262,6 +761,7 @@ static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
for (i = 0; i < adapter->num_io_queues; i++) {
ena_qid = ENA_IO_RXQ_IDX(i);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
+ ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
}
}
@@ -1272,8 +772,8 @@ static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
ena_destroy_all_rx_queues(adapter);
}
-static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
- struct ena_tx_buffer *tx_info, bool is_xdp)
+int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp)
{
if (tx_info)
netif_err(ring->adapter,
@@ -1305,17 +805,6 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
}
-static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
-{
- struct ena_tx_buffer *tx_info;
-
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- if (likely(tx_info->xdpf))
- return 0;
-
- return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
-}
-
static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
{
struct netdev_queue *txq;
@@ -1363,7 +852,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
skb);
- tx_bytes += skb->len;
+ tx_bytes += tx_info->total_tx_size;
dev_kfree_skb(skb);
tx_pkts++;
total_done += tx_info->tx_descs;
@@ -1688,6 +1177,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u
return ret;
}
+
/* ena_clean_rx_irq - Cleanup RX irq
* @rx_ring: RX ring to clean
* @napi: napi handler
@@ -1880,8 +1370,8 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
rx_ring->per_napi_packets = 0;
}
-static void ena_unmask_interrupt(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring)
+void ena_unmask_interrupt(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
{
u32 rx_interval = tx_ring->smoothed_interval;
struct ena_eth_io_intr_reg intr_reg;
@@ -1913,8 +1403,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
}
-static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
- struct ena_ring *rx_ring)
+void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring)
{
int cpu = get_cpu();
int numa_node;
@@ -1949,67 +1439,6 @@ out:
put_cpu();
}
-static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
-{
- u32 total_done = 0;
- u16 next_to_clean;
- int tx_pkts = 0;
- u16 req_id;
- int rc;
-
- if (unlikely(!xdp_ring))
- return 0;
- next_to_clean = xdp_ring->next_to_clean;
-
- while (tx_pkts < budget) {
- struct ena_tx_buffer *tx_info;
- struct xdp_frame *xdpf;
-
- rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
- &req_id);
- if (rc) {
- if (unlikely(rc == -EINVAL))
- handle_invalid_req_id(xdp_ring, req_id, NULL,
- true);
- break;
- }
-
- /* validate that the request id points to a valid xdp_frame */
- rc = validate_xdp_req_id(xdp_ring, req_id);
- if (rc)
- break;
-
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- xdpf = tx_info->xdpf;
-
- tx_info->xdpf = NULL;
- tx_info->last_jiffies = 0;
- ena_unmap_tx_buff(xdp_ring, tx_info);
-
- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
- "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
- xdpf);
-
- tx_pkts++;
- total_done += tx_info->tx_descs;
-
- xdp_return_frame(xdpf);
- xdp_ring->free_ids[next_to_clean] = req_id;
- next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
- xdp_ring->ring_size);
- }
-
- xdp_ring->next_to_clean = next_to_clean;
- ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
-
- netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
- "tx_poll: q %d done. total pkts: %d\n",
- xdp_ring->qid, tx_pkts);
-
- return tx_pkts;
-}
-
static int ena_io_poll(struct napi_struct *napi, int budget)
{
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
@@ -2326,28 +1755,36 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
netif_napi_del(&adapter->ena_napi[i].napi);
- WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
- adapter->ena_napi[i].xdp_ring);
+ WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
+ adapter->ena_napi[i].rx_ring);
}
}
static void ena_init_napi_in_range(struct ena_adapter *adapter,
int first_index, int count)
{
+ int (*napi_handler)(struct napi_struct *napi, int budget);
int i;
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
+ struct ena_ring *rx_ring, *tx_ring;
- netif_napi_add(adapter->netdev, &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
+ memset(napi, 0, sizeof(*napi));
- if (!ENA_IS_XDP_INDEX(adapter, i)) {
- napi->rx_ring = &adapter->rx_ring[i];
- napi->tx_ring = &adapter->tx_ring[i];
- } else {
- napi->xdp_ring = &adapter->tx_ring[i];
- }
+ rx_ring = &adapter->rx_ring[i];
+ tx_ring = &adapter->tx_ring[i];
+
+ napi_handler = ena_io_poll;
+ if (ENA_IS_XDP_INDEX(adapter, i))
+ napi_handler = ena_xdp_io_poll;
+
+ netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
+
+ if (!ENA_IS_XDP_INDEX(adapter, i))
+ napi->rx_ring = rx_ring;
+
+ napi->tx_ring = tx_ring;
napi->qid = i;
}
}
@@ -2475,8 +1912,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
return rc;
}
-static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
- int first_index, int count)
+int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
int rc, i;
@@ -2556,12 +1993,15 @@ static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
if (rc)
goto create_err;
INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
+
+ ena_xdp_register_rxq_info(&adapter->rx_ring[i]);
}
return 0;
create_err:
while (i--) {
+ ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
cancel_work_sync(&adapter->ena_napi[i].dim.work);
ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
}
@@ -2686,7 +2126,7 @@ err_setup_tx:
}
}
-static int ena_up(struct ena_adapter *adapter)
+int ena_up(struct ena_adapter *adapter)
{
int io_queue_count, rc, i;
@@ -2748,7 +2188,7 @@ err_req_irq:
return rc;
}
-static void ena_down(struct ena_adapter *adapter)
+void ena_down(struct ena_adapter *adapter)
{
int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
@@ -3179,7 +2619,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
- rc = ena_xmit_common(dev,
+ rc = ena_xmit_common(adapter,
tx_ring,
tx_info,
&ena_tx_ctx,
@@ -3259,8 +2699,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
strscpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
- strncpy(host_info->os_dist_str, utsname()->release,
- sizeof(host_info->os_dist_str) - 1);
+ strscpy(host_info->os_dist_str, utsname()->release,
+ sizeof(host_info->os_dist_str));
host_info->driver_version =
(DRV_MODULE_GEN_MAJOR) |
(DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
@@ -3347,6 +2787,7 @@ static void ena_get_stats64(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_ring *rx_ring, *tx_ring;
+ u64 total_xdp_rx_drops = 0;
unsigned int start;
u64 rx_drops;
u64 tx_drops;
@@ -3355,8 +2796,8 @@ static void ena_get_stats64(struct net_device *netdev,
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return;
- for (i = 0; i < adapter->num_io_queues; i++) {
- u64 bytes, packets;
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
+ u64 bytes, packets, xdp_rx_drops;
tx_ring = &adapter->tx_ring[i];
@@ -3369,16 +2810,22 @@ static void ena_get_stats64(struct net_device *netdev,
stats->tx_packets += packets;
stats->tx_bytes += bytes;
+ /* In XDP there isn't an RX queue counterpart */
+ if (ENA_IS_XDP_INDEX(adapter, i))
+ continue;
+
rx_ring = &adapter->rx_ring[i];
do {
start = u64_stats_fetch_begin(&rx_ring->syncp);
packets = rx_ring->rx_stats.cnt;
bytes = rx_ring->rx_stats.bytes;
+ xdp_rx_drops = rx_ring->rx_stats.xdp_drop;
} while (u64_stats_fetch_retry(&rx_ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
+ total_xdp_rx_drops += xdp_rx_drops;
}
do {
@@ -3387,7 +2834,7 @@ static void ena_get_stats64(struct net_device *netdev,
tx_drops = adapter->dev_stats.tx_drops;
} while (u64_stats_fetch_retry(&adapter->syncp, start));
- stats->rx_dropped = rx_drops;
+ stats->rx_dropped = rx_drops + total_xdp_rx_drops;
stats->tx_dropped = tx_drops;
stats->multicast = 0;
@@ -3982,10 +3429,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- int i, budget, rc;
+ int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
+
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -3998,27 +3446,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
- budget = ENA_MONITORED_TX_QUEUES;
+ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
- tx_ring = &adapter->tx_ring[i];
- rx_ring = &adapter->rx_ring[i];
+ qid = adapter->last_monitored_tx_qid;
+
+ while (budget) {
+ qid = (qid + 1) % io_queue_count;
+
+ tx_ring = &adapter->tx_ring[qid];
+ rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
- if (!budget)
- break;
}
- adapter->last_monitored_tx_qid = i % io_queue_count;
+ adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 33c923e12..6d2cc2021 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -110,19 +110,6 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
-/* The max MTU size is configured to be the ethernet frame size without
- * the overhead of the ethernet header, which can have a VLAN header, and
- * a frame check sequence (FCS).
- * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
- */
-
-#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
- VLAN_HLEN - XDP_PACKET_HEADROOM - \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-
-#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
- ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
-
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -138,13 +125,18 @@ struct ena_napi {
struct napi_struct napi;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- struct ena_ring *xdp_ring;
u32 qid;
struct dim dim;
};
struct ena_tx_buffer {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ /* XDP buffer structure which is used for sending packets in
+ * the xdp queues
+ */
+ struct xdp_frame *xdpf;
+ };
/* num of ena desc for this specific skb
* (includes data desc and metadata desc)
*/
@@ -152,16 +144,14 @@ struct ena_tx_buffer {
/* num of buffers used by this skb */
u32 num_of_bufs;
- /* XDP buffer structure which is used for sending packets in
- * the xdp queues
- */
- struct xdp_frame *xdpf;
+ /* Total size of all buffers in bytes */
+ u32 total_tx_size;
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
/* Used for detect missing tx packets to limit the number of prints */
- u32 print_once;
+ u8 print_once;
/* Save the last jiffies to detect missing tx packets
*
* sets to non zero value on ena_start_xmit and set to zero on
@@ -421,47 +411,44 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
-enum ena_xdp_errors_t {
- ENA_XDP_ALLOWED = 0,
- ENA_XDP_CURRENT_MTU_TOO_LARGE,
- ENA_XDP_NO_ENOUGH_QUEUES,
-};
+int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ struct ena_tx_buffer *tx_info, bool is_xdp);
-enum ENA_XDP_ACTIONS {
- ENA_XDP_PASS = 0,
- ENA_XDP_TX = BIT(0),
- ENA_XDP_REDIRECT = BIT(1),
- ENA_XDP_DROP = BIT(2)
-};
-
-#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
-
-static inline bool ena_xdp_present(struct ena_adapter *adapter)
-{
- return !!adapter->xdp_bpf_prog;
-}
-
-static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
+static inline void ena_increase_stat(u64 *statp, u64 cnt,
+ struct u64_stats_sync *syncp)
{
- return !!ring->xdp_bpf_prog;
+ u64_stats_update_begin(syncp);
+ (*statp) += cnt;
+ u64_stats_update_end(syncp);
}
-static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
- u32 queues)
+static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
{
- return 2 * queues <= adapter->max_num_io_queues;
-}
-
-static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
-{
- enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
-
- if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
- rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
- else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
- rc = ENA_XDP_NO_ENOUGH_QUEUES;
-
- return rc;
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
}
+int ena_xmit_common(struct ena_adapter *adapter,
+ struct ena_ring *ring,
+ struct ena_tx_buffer *tx_info,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ u16 next_to_use,
+ u32 bytes);
+void ena_unmap_tx_buff(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info);
+void ena_init_io_rings(struct ena_adapter *adapter,
+ int first_index, int count);
+int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
+ int first_index, int count);
+void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
+void ena_down(struct ena_adapter *adapter);
+int ena_up(struct ena_adapter *adapter);
+void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring);
+void ena_update_ring_numa_node(struct ena_ring *tx_ring,
+ struct ena_ring *rx_ring);
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
new file mode 100644
index 000000000..34d73c72f
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include "ena_xdp.h"
+
+static int validate_xdp_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
+
+ return handle_invalid_req_id(tx_ring, req_id, tx_info, true);
+}
+
+static int ena_xdp_tx_map_frame(struct ena_ring *tx_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_frame *xdpf,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_adapter *adapter = tx_ring->adapter;
+ struct ena_com_buf *ena_buf;
+ int push_len = 0;
+ dma_addr_t dma;
+ void *data;
+ u32 size;
+
+ tx_info->xdpf = xdpf;
+ data = tx_info->xdpf->data;
+ size = tx_info->xdpf->len;
+
+ if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ /* Designate part of the packet for LLQ */
+ push_len = min_t(u32, size, tx_ring->tx_max_header_size);
+
+ ena_tx_ctx->push_header = data;
+
+ size -= push_len;
+ data += push_len;
+ }
+
+ ena_tx_ctx->header_len = push_len;
+
+ if (size > 0) {
+ dma = dma_map_single(tx_ring->dev,
+ data,
+ size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
+ goto error_report_dma_error;
+
+ tx_info->map_linear_data = 0;
+
+ ena_buf = tx_info->bufs;
+ ena_buf->paddr = dma;
+ ena_buf->len = size;
+
+ ena_tx_ctx->ena_bufs = ena_buf;
+ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
+ }
+
+ return 0;
+
+error_report_dma_error:
+ ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
+ &tx_ring->syncp);
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
+
+ return -EINVAL;
+}
+
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+ struct ena_adapter *adapter,
+ struct xdp_frame *xdpf,
+ int flags)
+{
+ struct ena_com_tx_ctx ena_tx_ctx = {};
+ struct ena_tx_buffer *tx_info;
+ u16 next_to_use, req_id;
+ int rc;
+
+ next_to_use = tx_ring->next_to_use;
+ req_id = tx_ring->free_ids[next_to_use];
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ tx_info->num_of_bufs = 0;
+
+ rc = ena_xdp_tx_map_frame(tx_ring, tx_info, xdpf, &ena_tx_ctx);
+ if (unlikely(rc))
+ goto err;
+
+ ena_tx_ctx.req_id = req_id;
+
+ rc = ena_xmit_common(adapter,
+ tx_ring,
+ tx_info,
+ &ena_tx_ctx,
+ next_to_use,
+ xdpf->len);
+ if (rc)
+ goto error_unmap_dma;
+
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
+ */
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(tx_ring);
+
+ return rc;
+
+error_unmap_dma:
+ ena_unmap_tx_buff(tx_ring, tx_info);
+err:
+ tx_info->xdpf = NULL;
+
+ return rc;
+}
+
+int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ struct ena_ring *tx_ring;
+ int qid, i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return -ENETDOWN;
+
+ /* We assume that all rings have the same XDP program */
+ if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
+ return -ENXIO;
+
+ qid = smp_processor_id() % adapter->xdp_num_queues;
+ qid += adapter->xdp_first_ring;
+ tx_ring = &adapter->tx_ring[qid];
+
+ /* Other CPU ids might try to send thorugh this queue */
+ spin_lock(&tx_ring->xdp_tx_lock);
+
+ for (i = 0; i < n; i++) {
+ if (ena_xdp_xmit_frame(tx_ring, adapter, frames[i], 0))
+ break;
+ nxmit++;
+ }
+
+ /* Ring doorbell to make device aware of the packets */
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(tx_ring);
+
+ spin_unlock(&tx_ring->xdp_tx_lock);
+
+ /* Return number of packets sent */
+ return nxmit;
+}
+
+static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
+{
+ adapter->xdp_first_ring = adapter->num_io_queues;
+ adapter->xdp_num_queues = adapter->num_io_queues;
+
+ ena_init_io_rings(adapter,
+ adapter->xdp_first_ring,
+ adapter->xdp_num_queues);
+}
+
+int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
+{
+ u32 xdp_first_ring = adapter->xdp_first_ring;
+ u32 xdp_num_queues = adapter->xdp_num_queues;
+ int rc = 0;
+
+ rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ if (rc)
+ goto setup_err;
+
+ rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
+ if (rc)
+ goto create_err;
+
+ return 0;
+
+create_err:
+ ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
+setup_err:
+ return rc;
+}
+
+/* Provides a way for both kernel and bpf-prog to know
+ * more about the RX-queue a given XDP frame arrived on.
+ */
+int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
+{
+ int rc;
+
+ rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
+
+ netif_dbg(rx_ring->adapter, ifup, rx_ring->netdev, "Registering RX info for queue %d",
+ rx_ring->qid);
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ goto err;
+ }
+
+ rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL);
+
+ if (rc) {
+ netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
+ "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
+ rx_ring->qid, rc);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+
+err:
+ return rc;
+}
+
+void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
+{
+ netif_dbg(rx_ring->adapter, ifdown, rx_ring->netdev,
+ "Unregistering RX info for queue %d",
+ rx_ring->qid);
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+}
+
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count)
+{
+ struct bpf_prog *old_bpf_prog;
+ struct ena_ring *rx_ring;
+ int i = 0;
+
+ for (i = first; i < count; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
+
+ if (!old_bpf_prog && prog) {
+ rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
+ } else if (old_bpf_prog && !prog) {
+ rx_ring->rx_headroom = NET_SKB_PAD;
+ }
+ }
+}
+
+static void ena_xdp_exchange_program(struct ena_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
+
+ ena_xdp_exchange_program_rx_in_range(adapter,
+ prog,
+ 0,
+ adapter->num_io_queues);
+
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+}
+
+static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
+{
+ bool was_up;
+ int rc;
+
+ was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+
+ if (was_up)
+ ena_down(adapter);
+
+ adapter->xdp_first_ring = 0;
+ adapter->xdp_num_queues = 0;
+ ena_xdp_exchange_program(adapter, NULL);
+ if (was_up) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ int rc, prev_mtu;
+ bool is_up;
+
+ is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ rc = ena_xdp_allowed(adapter);
+ if (rc == ENA_XDP_ALLOWED) {
+ old_bpf_prog = adapter->xdp_bpf_prog;
+ if (prog) {
+ if (!is_up) {
+ ena_init_all_xdp_queues(adapter);
+ } else if (!old_bpf_prog) {
+ ena_down(adapter);
+ ena_init_all_xdp_queues(adapter);
+ }
+ ena_xdp_exchange_program(adapter, prog);
+
+ netif_dbg(adapter, drv, adapter->netdev, "Set a new XDP program\n");
+
+ if (is_up && !old_bpf_prog) {
+ rc = ena_up(adapter);
+ if (rc)
+ return rc;
+ }
+ xdp_features_set_redirect_target(netdev, false);
+ } else if (old_bpf_prog) {
+ xdp_features_clear_redirect_target(netdev);
+ netif_dbg(adapter, drv, adapter->netdev, "Removing XDP program\n");
+
+ rc = ena_destroy_and_free_all_xdp_queues(adapter);
+ if (rc)
+ return rc;
+ }
+
+ prev_mtu = netdev->max_mtu;
+ netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
+
+ if (!old_bpf_prog)
+ netif_info(adapter, drv, adapter->netdev,
+ "XDP program is set, changing the max_mtu from %d to %d",
+ prev_mtu, netdev->max_mtu);
+
+ } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
+ netdev->mtu, ENA_XDP_MAX_MTU);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
+ return -EINVAL;
+ } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
+ netif_err(adapter, drv, adapter->netdev,
+ "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
+ adapter->num_io_queues, adapter->max_num_io_queues);
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
+ * program as well as to query the current xdp program id.
+ */
+int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ena_xdp_set(netdev, bpf);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
+{
+ u32 total_done = 0;
+ u16 next_to_clean;
+ int tx_pkts = 0;
+ u16 req_id;
+ int rc;
+
+ if (unlikely(!tx_ring))
+ return 0;
+ next_to_clean = tx_ring->next_to_clean;
+
+ while (tx_pkts < budget) {
+ struct ena_tx_buffer *tx_info;
+ struct xdp_frame *xdpf;
+
+ rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
+ &req_id);
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(tx_ring, req_id, NULL, true);
+ break;
+ }
+
+ /* validate that the request id points to a valid xdp_frame */
+ rc = validate_xdp_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+
+ tx_info->last_jiffies = 0;
+
+ xdpf = tx_info->xdpf;
+ tx_info->xdpf = NULL;
+ ena_unmap_tx_buff(tx_ring, tx_info);
+ xdp_return_frame(xdpf);
+
+ tx_pkts++;
+ total_done += tx_info->tx_descs;
+ tx_ring->free_ids[next_to_clean] = req_id;
+ next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
+ tx_ring->ring_size);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d pkt #%d req_id %d\n", tx_ring->qid, tx_pkts, req_id);
+ }
+
+ tx_ring->next_to_clean = next_to_clean;
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
+ ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
+
+ netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
+ "tx_poll: q %d done. total pkts: %d\n",
+ tx_ring->qid, tx_pkts);
+
+ return tx_pkts;
+}
+
+/* This is the XDP napi callback. XDP queues use a separate napi callback
+ * than Rx/Tx queues.
+ */
+int ena_xdp_io_poll(struct napi_struct *napi, int budget)
+{
+ struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
+ struct ena_ring *tx_ring;
+ u32 work_done;
+ int ret;
+
+ tx_ring = ena_napi->tx_ring;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+ test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ work_done = ena_clean_xdp_irq(tx_ring, budget);
+
+ /* If the device is about to reset or down, avoid unmask
+ * the interrupt and return 0 so NAPI won't reschedule
+ */
+ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
+ napi_complete_done(napi, 0);
+ ret = 0;
+ } else if (budget > work_done) {
+ ena_increase_stat(&tx_ring->tx_stats.napi_comp, 1,
+ &tx_ring->syncp);
+ if (napi_complete_done(napi, work_done))
+ ena_unmask_interrupt(tx_ring, NULL);
+
+ ena_update_ring_numa_node(tx_ring, NULL);
+ ret = work_done;
+ } else {
+ ret = budget;
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.tx_poll++;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->tx_stats.last_napi_jiffies = jiffies;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.h b/drivers/net/ethernet/amazon/ena/ena_xdp.h
new file mode 100644
index 000000000..cfd827284
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_XDP_H
+#define ENA_XDP_H
+
+#include "ena_netdev.h"
+#include <linux/bpf_trace.h>
+
+/* The max MTU size is configured to be the ethernet frame size without
+ * the overhead of the ethernet header, which can have a VLAN header, and
+ * a frame check sequence (FCS).
+ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
+ */
+#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
+ VLAN_HLEN - XDP_PACKET_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
+ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
+
+enum ENA_XDP_ACTIONS {
+ ENA_XDP_PASS = 0,
+ ENA_XDP_TX = BIT(0),
+ ENA_XDP_REDIRECT = BIT(1),
+ ENA_XDP_DROP = BIT(2)
+};
+
+#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
+
+int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
+void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
+ struct bpf_prog *prog,
+ int first, int count);
+int ena_xdp_io_poll(struct napi_struct *napi, int budget);
+int ena_xdp_xmit_frame(struct ena_ring *tx_ring,
+ struct ena_adapter *adapter,
+ struct xdp_frame *xdpf,
+ int flags);
+int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
+int ena_xdp_register_rxq_info(struct ena_ring *rx_ring);
+void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring);
+
+enum ena_xdp_errors_t {
+ ENA_XDP_ALLOWED = 0,
+ ENA_XDP_CURRENT_MTU_TOO_LARGE,
+ ENA_XDP_NO_ENOUGH_QUEUES,
+};
+
+static inline bool ena_xdp_present(struct ena_adapter *adapter)
+{
+ return !!adapter->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_present_ring(struct ena_ring *ring)
+{
+ return !!ring->xdp_bpf_prog;
+}
+
+static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
+{
+ return 2 * queues <= adapter->max_num_io_queues;
+}
+
+static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
+{
+ enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
+
+ if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
+ rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
+ else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ rc = ENA_XDP_NO_ENOUGH_QUEUES;
+
+ return rc;
+}
+
+static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
+{
+ u32 verdict = ENA_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ struct ena_ring *xdp_ring;
+ struct xdp_frame *xdpf;
+ u64 *xdp_stat;
+
+ xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
+
+ verdict = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ switch (verdict) {
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ }
+
+ /* Find xmit queue */
+ xdp_ring = rx_ring->xdp_ring;
+
+ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
+ spin_lock(&xdp_ring->xdp_tx_lock);
+
+ if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
+ XDP_XMIT_FLUSH))
+ xdp_return_frame(xdpf);
+
+ spin_unlock(&xdp_ring->xdp_tx_lock);
+ xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ verdict = ENA_XDP_TX;
+ break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
+ xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+ verdict = ENA_XDP_REDIRECT;
+ break;
+ }
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_DROP:
+ xdp_stat = &rx_ring->rx_stats.xdp_drop;
+ verdict = ENA_XDP_DROP;
+ break;
+ case XDP_PASS:
+ xdp_stat = &rx_ring->rx_stats.xdp_pass;
+ verdict = ENA_XDP_PASS;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+ verdict = ENA_XDP_DROP;
+ }
+
+ ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
+
+ return verdict;
+}
+#endif /* ENA_XDP_H */
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 7658a7286..dd4f0965b 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -595,6 +595,16 @@ err_out:
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
+void pdsc_pci_reset_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
+ struct pci_dev *pdev = pdsc->pdev;
+
+ pci_dev_get(pdev);
+ pci_reset_function(pdev);
+ pci_dev_put(pdev);
+}
+
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
u8 fw_status;
@@ -609,8 +619,8 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
- pdsc_reset_prepare(pdsc->pdev);
- pdsc_reset_done(pdsc->pdev);
+ /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
+ queue_work(pdsc->wq, &pdsc->pci_reset_work);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 110c4b826..401ff56eb 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -197,6 +197,7 @@ struct pdsc {
struct pdsc_qcq notifyqcq;
u64 last_eid;
struct pdsc_viftype *viftype_status;
+ struct work_struct pci_reset_work;
};
/** enum pds_core_dbell_bits - bitwise composition of dbell values.
@@ -283,9 +284,6 @@ int pdsc_devcmd_init(struct pdsc *pdsc);
int pdsc_devcmd_reset(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
-void pdsc_reset_prepare(struct pci_dev *pdev);
-void pdsc_reset_done(struct pci_dev *pdev);
-
int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
irq_handler_t handler, void *data);
void pdsc_intr_free(struct pdsc *pdsc, int index);
@@ -315,5 +313,6 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
void pdsc_fw_down(struct pdsc *pdsc);
void pdsc_fw_up(struct pdsc *pdsc);
+void pdsc_pci_reset_thread(struct work_struct *work);
#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index e65a1632d..bfb79c5aa 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -229,6 +229,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc)
.reset.opcode = PDS_CORE_CMD_RESET,
};
+ if (!pdsc_is_fw_running(pdsc))
+ return 0;
+
return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
}
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index cdbf053b5..a375d612d 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -238,6 +238,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
pdsc->wq = create_singlethread_workqueue(wq_name);
INIT_WORK(&pdsc->health_work, pdsc_health_thread);
+ INIT_WORK(&pdsc->pci_reset_work, pdsc_pci_reset_thread);
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
@@ -451,6 +452,9 @@ static void pdsc_remove(struct pci_dev *pdev)
static void pdsc_stop_health_thread(struct pdsc *pdsc)
{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->health_work.func)
cancel_work_sync(&pdsc->health_work);
@@ -458,11 +462,14 @@ static void pdsc_stop_health_thread(struct pdsc *pdsc)
static void pdsc_restart_health_thread(struct pdsc *pdsc)
{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
mod_timer(&pdsc->wdtimer, jiffies + 1);
}
-void pdsc_reset_prepare(struct pci_dev *pdev)
+static void pdsc_reset_prepare(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
@@ -471,10 +478,11 @@ void pdsc_reset_prepare(struct pci_dev *pdev)
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
}
-void pdsc_reset_done(struct pci_dev *pdev)
+static void pdsc_reset_done(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
struct device *dev = pdsc->dev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 32fab5e77..58e7e88aa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -527,47 +527,48 @@ static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
return ARRAY_SIZE(pdata->rss_table);
}
-static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int xgbe_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
- indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
- MAC_RSSDR, DMCH);
+ rxfh->indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+ MAC_RSSDR, DMCH);
}
- if (key)
- memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+ if (rxfh->key)
+ memcpy(rxfh->key, pdata->rss_key, sizeof(pdata->rss_key));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int xgbe_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int ret;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
netdev_err(netdev, "unsupported hash function\n");
return -EOPNOTSUPP;
}
- if (indir) {
- ret = hw_if->set_rss_lookup_table(pdata, indir);
+ if (rxfh->indir) {
+ ret = hw_if->set_rss_lookup_table(pdata, rxfh->indir);
if (ret)
return ret;
}
- if (key) {
- ret = hw_if->set_rss_hash_key(pdata, key);
+ if (rxfh->key) {
+ ret = hw_if->set_rss_hash_key(pdata, rxfh->key);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index ad136ed49..f01a1e566 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -495,7 +495,7 @@ struct xgbe_ring {
* a DMA channel.
*/
struct xgbe_channel {
- char name[16];
+ char name[20];
/* Address of private data area for device */
struct xgbe_prv_data *pdata;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index ac4ea93bd..18a6c8d99 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -447,8 +447,8 @@ static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
return sizeof(cfg->aq_rss.hash_secret_key);
}
-static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int aq_ethtool_get_rss(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@@ -456,21 +456,21 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
cfg = aq_nic_get_cfg(aq_nic);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
- if (indir) {
+ rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ if (rxfh->indir) {
for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
- indir[i] = cfg->aq_rss.indirection_table[i];
+ rxfh->indir[i] = cfg->aq_rss.indirection_table[i];
}
- if (key)
- memcpy(key, cfg->aq_rss.hash_secret_key,
+ if (rxfh->key)
+ memcpy(rxfh->key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
return 0;
}
-static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int aq_ethtool_set_rss(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(netdev);
struct aq_nic_cfg_s *cfg;
@@ -482,16 +482,17 @@ static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
rss_entries = cfg->aq_rss.indirection_table_size;
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
/* Fill out the redirection table */
- if (indir)
+ if (rxfh->indir)
for (i = 0; i < rss_entries; i++)
- cfg->aq_rss.indirection_table[i] = indir[i];
+ cfg->aq_rss.indirection_table[i] = rxfh->indir[i];
/* Fill out the rss hash key */
- if (key) {
- memcpy(cfg->aq_rss.hash_secret_key, key,
+ if (rxfh->key) {
+ memcpy(cfg->aq_rss.hash_secret_key, rxfh->key,
sizeof(cfg->aq_rss.hash_secret_key));
err = aq_nic->aq_hw_ops->hw_rss_hash_set(aq_nic->aq_hw,
&cfg->aq_rss);
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e551ffaed..11e8996b3 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -284,7 +284,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
ax88796c_proc_tx_hdr(&info, skb->ip_summed);
/* SOP and SEG header */
- memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD);
+ memcpy(skb_push(skb, TX_OVERHEAD), &info.tx_overhead, TX_OVERHEAD);
/* Write SPI TXQ header */
memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h
index 4a83c991d..68a09edec 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.h
+++ b/drivers/net/ethernet/asix/ax88796c_main.h
@@ -25,7 +25,7 @@
#define AX88796C_PHY_REGDUMP_LEN 14
#define AX88796C_PHY_ID 0x10
-#define TX_OVERHEAD 8
+#define TX_OVERHEAD sizeof_field(struct tx_pkt_info, tx_overhead)
#define TX_EOP_SIZE 4
#define AX_MCAST_FILTER_SIZE 8
@@ -549,8 +549,10 @@ struct tx_eop_header {
};
struct tx_pkt_info {
- struct tx_sop_header sop;
- struct tx_segment_header seg;
+ struct_group(tx_overhead,
+ struct tx_sop_header sop;
+ struct tx_segment_header seg;
+ );
struct tx_eop_header eop;
u16 pkt_len;
u16 seq_num;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 9cae5a309..529172a87 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -391,7 +391,9 @@ static void umac_reset(struct bcmasp_intf *intf)
umac_wl(intf, 0x0, UMC_CMD);
umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
usleep_range(10, 100);
- umac_wl(intf, 0x0, UMC_CMD);
+ /* We hold the umac in reset and bring it out of
+ * reset when phy link is up.
+ */
}
static void umac_set_hw_addr(struct bcmasp_intf *intf,
@@ -411,6 +413,8 @@ static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
u32 reg;
reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ return;
if (enable)
reg |= mask;
else
@@ -429,13 +433,10 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_FRM_LEN);
umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
- umac_enable_set(intf, UMC_CMD_PROMISC, 1);
}
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
- struct bcmasp_intf *intf =
- container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@@ -478,10 +479,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
- /* Ensure all descriptors have been written to DRAM for the hardware
- * to see updated contents.
- */
- wmb();
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@@ -656,6 +663,12 @@ static void bcmasp_adj_link(struct net_device *dev)
UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
UMC_CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
+ if (reg & UMC_CMD_SW_RESET) {
+ reg &= ~UMC_CMD_SW_RESET;
+ umac_wl(intf, reg, UMC_CMD);
+ udelay(2);
+ reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ }
umac_wl(intf, reg, UMC_CMD);
intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
@@ -684,6 +697,8 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
+ if (!buffer_pg)
+ return -ENOMEM;
dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
@@ -785,6 +800,7 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
@@ -895,6 +911,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+ bcmasp_tx_reclaim(intf);
+
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);
@@ -1061,9 +1079,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
umac_init(intf);
- /* Disable the UniMAC RX/TX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
-
umac_set_hw_addr(intf, dev->dev_addr);
intf->old_duplex = -1;
@@ -1083,9 +1098,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
bcmasp_enable_rx(intf, 1);
- /* Turn on UniMAC TX/RX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
-
intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
bcmasp_netif_start(dev);
@@ -1095,6 +1107,7 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
err_reclaim_tx:
+ netif_napi_del(&intf->tx_napi);
bcmasp_reclaim_free_all_tx(intf);
err_phy_disconnect:
if (phydev)
@@ -1321,7 +1334,14 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
if (intf->wolopts & WAKE_FILTER)
bcmasp_netfilt_suspend(intf);
- /* UniMAC receive needs to be turned on */
+ /* Bring UniMAC out of reset if needed and enable RX */
+ reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ reg &= ~UMC_CMD_SW_RESET;
+
+ reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ umac_wl(intf, reg, UMC_CMD);
+
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
if (intf->parent->wol_irq > 0) {
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3e4fb3c3e..1be6d1403 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 3e7c8671c..72df1bb10 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
};
module_platform_driver(bcm4908_enet_driver);
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 9b83d5361..50b8e97a8 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 6e4f36aaf..36f9bad28 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -362,4 +362,5 @@ module_init(bgmac_init)
module_exit(bgmac_exit)
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 0b21fd5bd..77425c7a3 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
};
module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 448a1b90d..6ffdc4229 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e9c1e1bb5..528441b28 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strscpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bda3ccc28..0bc7690cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
@@ -3486,16 +3486,15 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int bnx2x_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
/* Get the current configuration of the RSS indirection table */
@@ -3511,13 +3510,14 @@ static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
* queue.
*/
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
- indir[i] = ind_table[i] - bp->fp->cl_id;
+ rxfh->indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
-static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int bnx2x_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
@@ -3525,11 +3525,12 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
@@ -3542,7 +3543,7 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
* align the received table to the Client ID of the leading RSS
* queue
*/
- bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = rxfh->indir[i] + bp->fp->cl_id;
}
if (bp->state == BNX2X_STATE_OPEN)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 02808513f..ea310057f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 22c8bfb5e..e133a412a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -120,6 +120,10 @@ static const struct {
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
+ [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
+ [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
+ [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" },
+ [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" },
[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
@@ -174,6 +178,10 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+ { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 },
+ { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 },
+ { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 },
+ { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 },
{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
@@ -254,22 +262,28 @@ static bool bnxt_vf_pciid(enum board_idx idx)
writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
- writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
+ writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_P5(db, idx) \
- bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
(db)->doorbell)
+#define BNXT_DB_NQ_P7(db, idx) \
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
+
#define BNXT_DB_CQ_ARM(db, idx) \
- writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
+ writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_ARM_P5(db, idx) \
- bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
- (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
+ DB_RING_IDX(db, idx), (db)->doorbell)
static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ BNXT_DB_NQ_P7(db, idx);
+ else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_P5(db, idx);
else
BNXT_DB_CQ(db, idx);
@@ -277,7 +291,7 @@ static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_ARM_P5(db, idx);
else
BNXT_DB_CQ_ARM(db, idx);
@@ -285,9 +299,9 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
- RING_CMP(idx), db->doorbell);
+ DB_RING_IDX(db, idx), db->doorbell);
else
BNXT_DB_CQ(db, idx);
}
@@ -321,7 +335,7 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
else
set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
@@ -331,16 +345,16 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
}
void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- int idx)
+ u16 curr)
{
struct bnxt_napi *bnapi = txr->bnapi;
if (bnapi->tx_fault)
return;
- netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
- txr->txq_index, bnapi->tx_pkts,
- txr->tx_cons, txr->tx_prod, idx);
+ netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
+ txr->txq_index, txr->tx_hw_cons,
+ txr->tx_cons, txr->tx_prod, curr);
WARN_ON_ONCE(1);
bnapi->tx_fault = 1;
bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
@@ -381,6 +395,8 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 prod)
{
+ /* Sync BD data before updating doorbell */
+ wmb();
bnxt_db_write(bp, &txr->tx_db, prod);
txr->kick_pending = 0;
}
@@ -388,7 +404,7 @@ static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- struct tx_bd *txbd;
+ struct tx_bd *txbd, *txbd0;
struct tx_bd_ext *txbd1;
struct netdev_queue *txq;
int i;
@@ -430,11 +446,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- txbd->tx_bd_opaque = prod;
-
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->skb = skb;
tx_buf->nr_frags = last_frag;
@@ -519,12 +533,15 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
txbd->tx_bd_haddr = txr->data_mapping;
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
prod = NEXT_TX(prod);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
memcpy(txbd, tx_push1, sizeof(*txbd));
prod = NEXT_TX(prod);
tx_push->doorbell =
- cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+ cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
+ DB_RING_IDX(&txr->tx_db, prod));
WRITE_ONCE(txr->tx_prod, prod);
tx_buf->is_push = 1;
@@ -562,19 +579,29 @@ normal_tx:
((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
prod = NEXT_TX(prod);
txbd1 = (struct tx_bd_ext *)
- &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) {
+ bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
u32 hdr_len;
- if (skb->encapsulation)
- hdr_len = skb_inner_tcp_all_headers(skb);
- else
+ if (skb->encapsulation) {
+ if (udp_gso)
+ hdr_len = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ hdr_len = skb_inner_tcp_all_headers(skb);
+ } else if (udp_gso) {
+ hdr_len = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ } else {
hdr_len = skb_tcp_all_headers(skb);
+ }
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -601,11 +628,12 @@ normal_tx:
txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
txbd1->tx_bd_cfa_action =
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
+ txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
@@ -614,7 +642,7 @@ normal_tx:
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_dma_error;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -632,22 +660,26 @@ normal_tx:
skb_tx_timestamp(skb);
- /* Sync BD data before updating doorbell */
- wmb();
-
prod = NEXT_TX(prod);
WRITE_ONCE(txr->tx_prod, prod);
- if (!netdev_xmit_more() || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
bnxt_txr_db_kick(bp, txr, prod);
- else
+ } else {
+ if (free_size >= bp->tx_wake_thresh)
+ txbd0->tx_bd_len_flags_type |=
+ cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
txr->kick_pending = 1;
+ }
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
- if (netdev_xmit_more() && !tx_buf->is_push)
+ if (netdev_xmit_more() && !tx_buf->is_push) {
+ txbd0->tx_bd_len_flags_type &=
+ cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
bnxt_txr_db_kick(bp, txr, prod);
+ }
netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
bp->tx_wake_thresh);
@@ -662,7 +694,7 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), DMA_TO_DEVICE);
prod = NEXT_TX(prod);
@@ -670,7 +702,7 @@ tx_dma_error:
/* unmap remaining mapped pages */
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_TO_DEVICE);
@@ -686,31 +718,32 @@ tx_kick_pending:
return NETDEV_TX_OK;
}
-static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ int budget)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
- u16 cons = txr->tx_cons;
struct pci_dev *pdev = bp->pdev;
- int nr_pkts = bnapi->tx_pkts;
- int i;
+ u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
+ u16 cons = txr->tx_cons;
+ int tx_pkts = 0;
- for (i = 0; i < nr_pkts; i++) {
+ while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
int j, last;
- tx_buf = &txr->tx_buf_ring[cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
cons = NEXT_TX(cons);
skb = tx_buf->skb;
tx_buf->skb = NULL;
if (unlikely(!skb)) {
- bnxt_sched_reset_txr(bp, txr, i);
+ bnxt_sched_reset_txr(bp, txr, cons);
return;
}
+ tx_pkts++;
tx_bytes += skb->len;
if (tx_buf->is_push) {
@@ -724,7 +757,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
for (j = 0; j < last; j++) {
cons = NEXT_TX(cons);
- tx_buf = &txr->tx_buf_ring[cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
@@ -732,7 +765,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
DMA_TO_DEVICE);
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
skb = NULL;
@@ -747,14 +780,25 @@ next_tx_int:
dev_consume_skb_any(skb);
}
- bnapi->tx_pkts = 0;
WRITE_ONCE(txr->tx_cons, cons);
- __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
+ __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
}
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+ struct bnxt_tx_ring_info *txr;
+ int i;
+
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
+ __bnxt_tx_int(bp, txr, budget);
+ }
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
+}
+
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
@@ -803,8 +847,8 @@ static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp)
{
- struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) {
@@ -837,9 +881,10 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
{
u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnxt *bp = rxr->bnapi->bp;
struct rx_bd *cons_bd, *prod_bd;
- prod_rx_buf = &rxr->rx_buf_ring[prod];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data;
@@ -847,8 +892,8 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
prod_rx_buf->mapping = cons_rx_buf->mapping;
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
- cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
+ cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
}
@@ -868,7 +913,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
- &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
struct page *page;
dma_addr_t mapping;
@@ -885,7 +930,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
__set_bit(sw_prod, rxr->rx_agg_bmap);
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
- rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
rx_agg_buf->page = page;
rx_agg_buf->offset = offset;
@@ -927,7 +972,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
bool p5_tpa = false;
u32 i;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
@@ -961,13 +1006,13 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
prod_rx_buf->mapping = cons_rx_buf->mapping;
- prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
prod_bd->rx_bd_opaque = sw_prod;
prod = NEXT_RX_AGG(prod);
- sw_prod = NEXT_RX_AGG(sw_prod);
+ sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
@@ -1094,7 +1139,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
u32 i, total_frag_len = 0;
bool p5_tpa = false;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
@@ -1249,7 +1294,7 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
agg_bufs = TPA_END_AGG_BUFS(tpa_end);
@@ -1290,8 +1335,39 @@ static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
return map->agg_id_tbl[agg_id];
}
+static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->cfa_code_valid = 1;
+ tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
+ tpa_info->vlan_valid = 0;
+ if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+ tpa_info->vlan_valid = 1;
+ tpa_info->metadata =
+ le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ }
+}
+
+static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_start_cmp *tpa_start,
+ struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+ tpa_info->vlan_valid = 0;
+ if (TPA_START_VLAN_VALID(tpa_start)) {
+ u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
+ u32 vlan_proto = ETH_P_8021Q;
+
+ tpa_info->vlan_valid = 1;
+ if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
+ vlan_proto = ETH_P_8021AD;
+ tpa_info->metadata = vlan_proto << 16 |
+ TPA_START_METADATA0_TCI(tpa_start1);
+ }
+}
+
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
- struct rx_tpa_start_cmp *tpa_start,
+ u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
@@ -1300,7 +1376,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_bd *prod_bd;
dma_addr_t mapping;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_START_AGG_ID_P5(tpa_start);
agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
} else {
@@ -1309,7 +1385,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
- prod_rx_buf = &rxr->rx_buf_ring[prod];
+ prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons ||
@@ -1320,17 +1396,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
bnxt_sched_reset_rxr(bp, rxr);
return;
}
- /* Store cfa_code in tpa_info to use in tpa_end
- * completion processing.
- */
- tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
prod_rx_buf->data = tpa_info->data;
prod_rx_buf->data_ptr = tpa_info->data_ptr;
mapping = tpa_info->mapping;
prod_rx_buf->mapping = mapping;
- prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
@@ -1343,12 +1415,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
RX_TPA_START_CMP_LEN_SHIFT;
if (likely(TPA_START_HASH_VALID(tpa_start))) {
- u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
-
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
+ if (TPA_START_IS_IPV6(tpa_start1))
+ tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
+ else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
+ TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
@@ -1358,13 +1431,16 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
- tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
+ bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1);
+ else
+ bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod);
- cons = NEXT_RX(cons);
- rxr->rx_next_cons = NEXT_RX(cons);
+ cons = RING_RX(bp, NEXT_RX(cons));
+ rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
cons_rx_buf = &rxr->rx_buf_ring[cons];
bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
@@ -1563,7 +1639,7 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
else
payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
@@ -1594,6 +1670,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct net_device *dev = bp->dev;
u8 *data_ptr, agg_bufs;
unsigned int len;
struct bnxt_tpa_info *tpa_info;
@@ -1611,7 +1688,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_END_AGG_ID_P5(tpa_end);
agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
@@ -1658,7 +1735,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
} else {
@@ -1668,7 +1745,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
@@ -1684,7 +1761,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1695,19 +1772,20 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
}
- skb->protocol =
- eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
+ if (tpa_info->cfa_code_valid)
+ dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code);
+ skb->protocol = eth_type_trans(skb, dev);
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
- (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
+ if (tpa_info->vlan_valid &&
+ (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
__be16 vlan_proto = htons(tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT);
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
@@ -1774,6 +1852,64 @@ ts_valid:
return true;
}
+static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
+ struct rx_cmp *rxcmp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ __be16 vlan_proto;
+ u16 vtag;
+
+ if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ __le32 flags2 = rxcmp1->rx_cmp_flags2;
+ u32 meta_data;
+
+ if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
+ return skb;
+
+ meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+ vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (eth_type_vlan(vlan_proto))
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ else
+ goto vlan_err;
+ } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ if (RX_CMP_VLAN_VALID(rxcmp)) {
+ u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
+
+ if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
+ vlan_proto = htons(ETH_P_8021Q);
+ else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
+ vlan_proto = htons(ETH_P_8021AD);
+ else
+ goto vlan_err;
+ vtag = RX_CMP_METADATA0_TCI(rxcmp1);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
+ }
+ }
+ return skb;
+vlan_err:
+ dev_kfree_skb(skb);
+ return NULL;
+}
+
+static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp,
+ struct rx_cmp *rxcmp)
+{
+ u8 ext_op;
+
+ ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp);
+ switch (ext_op) {
+ case EXT_OP_INNER_4:
+ case EXT_OP_OUTER_4:
+ case EXT_OP_INNFL_3:
+ case EXT_OP_OUTFL_3:
+ return PKT_HASH_TYPE_L4;
+ default:
+ return PKT_HASH_TYPE_L3;
+ }
+}
+
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
@@ -1790,7 +1926,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp *rxcmp;
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
- u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -1827,8 +1963,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
dma_rmb();
prod = rxr->rx_prod;
- if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
- bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+ if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
+ bnxt_tpa_start(bp, rxr, cmp_type,
+ (struct rx_tpa_start_cmp *)rxcmp,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
*event |= BNXT_RX_EVENT;
@@ -1893,7 +2031,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
rx_err);
@@ -1913,11 +2051,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!frag_len)
+ goto oom_next_rx;
}
xdp_active = true;
}
@@ -1940,9 +2075,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -1953,60 +2086,52 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
} else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
if (RX_CMP_HASH_VALID(rxcmp)) {
- u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
- enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+ enum pkt_hash_types type;
- /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- if (hash_type != 1 && hash_type != 3)
- type = PKT_HASH_TYPE_L3;
+ if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
+ type = bnxt_rss_ext_op(bp, rxcmp);
+ } else {
+ u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+
+ /* RSS profiles 1 and 3 with extract code 0 for inner
+ * 4-tuple
+ */
+ if (hash_type != 1 && hash_type != 3)
+ type = PKT_HASH_TYPE_L3;
+ else
+ type = PKT_HASH_TYPE_L4;
+ }
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
- cfa_code = RX_CMP_CFA_CODE(rxcmp1);
- skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
-
- if ((rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
- (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
- u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
- u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
- __be16 vlan_proto = htons(meta_data >>
- RX_CMP_FLAGS2_METADATA_TPID_SFT);
+ if (cmp_type == CMP_TYPE_RX_L2_CMP)
+ dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1));
+ skb->protocol = eth_type_trans(skb, dev);
- if (eth_type_vlan(vlan_proto)) {
- __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
- } else {
- dev_kfree_skb(skb);
+ if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) {
+ skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
+ if (!skb)
goto next_rx;
- }
}
skb_checksum_none_assert(skb);
@@ -2023,7 +2148,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) {
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u64 ns, ts;
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
@@ -2047,12 +2172,17 @@ next_rx:
next_rx_no_len:
rxr->rx_prod = NEXT_RX(prod);
- rxr->rx_next_cons = NEXT_RX(cons);
+ rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2086,7 +2216,8 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
*/
dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
- if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+ if (cmp_type == CMP_TYPE_RX_L2_CMP ||
+ cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
@@ -2098,7 +2229,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
return rc;
}
@@ -2146,6 +2277,10 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ return link_info->force_link_speed2;
if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
return link_info->force_pam4_link_speed;
return link_info->force_link_speed;
@@ -2153,6 +2288,28 @@ static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ link_info->req_link_speed = link_info->force_link_speed2;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ switch (link_info->req_link_speed) {
+ case BNXT_LINK_SPEED_50GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ break;
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112;
+ break;
+ default:
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ }
+ return;
+ }
link_info->req_link_speed = link_info->force_link_speed;
link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
if (link_info->force_pam4_link_speed) {
@@ -2163,12 +2320,25 @@ static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ link_info->advertising = link_info->auto_link_speeds2;
+ return;
+ }
link_info->advertising = link_info->auto_link_speeds;
link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
}
static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (link_info->req_link_speed != link_info->force_link_speed2)
+ return true;
+ return false;
+ }
if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
link_info->req_link_speed != link_info->force_link_speed)
return true;
@@ -2180,6 +2350,13 @@ static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (link_info->advertising != link_info->auto_link_speeds2)
+ return true;
+ return false;
+ }
if (link_info->advertising != link_info->auto_link_speeds ||
link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
return true;
@@ -2430,7 +2607,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr;
u16 grp_idx;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto async_event_process_exit;
netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
@@ -2603,7 +2780,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
u32 cons;
- int tx_pkts = 0;
int rx_pkts = 0;
u8 event = 0;
struct tx_cmp *txcmp;
@@ -2611,6 +2787,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 0;
cpr->had_work_done = 1;
while (1) {
+ u8 cmp_type;
int rc;
cons = RING_CMP(raw_cons);
@@ -2623,17 +2800,31 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
- tx_pkts++;
+ cmp_type = TX_CMP_TYPE(txcmp);
+ if (cmp_type == CMP_TYPE_TX_L2_CMP ||
+ cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
+ u32 opaque = txcmp->tx_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ u16 tx_freed;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ event |= BNXT_TX_CMP_EVENT;
+ if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
+ txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
+ else
+ txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque);
+ tx_freed = (txr->tx_hw_cons - txr->tx_cons) &
+ bp->tx_ring_mask;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
+ if (unlikely(tx_freed >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
cpr->has_more_work = 1;
break;
}
- } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
+ cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
else
@@ -2650,12 +2841,9 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
- } else if (unlikely((TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_DONE) ||
- (TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
- (TX_CMP_TYPE(txcmp) ==
- CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+ } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
+ cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) {
bnxt_hwrm_handler(bp, txcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -2670,7 +2858,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
xdp_do_flush();
if (event & BNXT_TX_EVENT) {
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
u16 prod = txr->tx_prod;
/* Sync BD data before updating doorbell */
@@ -2680,7 +2868,6 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
cpr->cp_raw_cons = raw_cons;
- bnapi->tx_pkts += tx_pkts;
bnapi->events |= event;
return rx_pkts;
}
@@ -2688,7 +2875,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
int budget)
{
- if (bnapi->tx_pkts && !bnapi->tx_fault)
+ if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault)
bnapi->tx_int(bp, bnapi, budget);
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
@@ -2701,7 +2888,7 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
}
- bnapi->events = 0;
+ bnapi->events &= BNXT_TX_CMP_EVENT;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -2841,10 +3028,10 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i, work_done = 0;
- for (i = 0; i < 2; i++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ for (i = 0; i < cpr->cp_ring_count; i++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
- if (cpr2) {
+ if (cpr2->had_nqe_notify) {
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
@@ -2859,14 +3046,22 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int i;
- for (i = 0; i < 2; i++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
+ for (i = 0; i < cpr->cp_ring_count; i++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
struct bnxt_db_info *db;
- if (cpr2 && cpr2->had_work_done) {
+ if (cpr2->had_work_done) {
+ u32 tgl = 0;
+
+ if (dbr_type == DBR_TYPE_CQ_ARMALL) {
+ cpr2->had_nqe_notify = 0;
+ tgl = cpr2->toggle;
+ }
db = &cpr2->cp_db;
- bnxt_writeq(bp, db->db_key64 | dbr_type |
- RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ bnxt_writeq(bp,
+ db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
+ DB_RING_IDX(db, cpr2->cp_raw_cons),
+ db->doorbell);
cpr2->had_work_done = 0;
}
}
@@ -2893,6 +3088,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
}
while (1) {
+ u16 type;
+
cons = RING_CMP(raw_cons);
nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
@@ -2914,15 +3111,21 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
*/
dma_rmb();
- if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
+ type = le16_to_cpu(nqcmp->type);
+ if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
+ u32 cq_type = BNXT_NQ_HDL_TYPE(idx);
struct bnxt_cp_ring_info *cpr2;
/* No more budget for RX work */
- if (budget && work_done >= budget && idx == BNXT_RX_HDL)
+ if (budget && work_done >= budget &&
+ cq_type == BNXT_NQ_HDL_TYPE_RX)
break;
- cpr2 = cpr->cp_ring_arr[idx];
+ idx = BNXT_NQ_HDL_IDX(idx);
+ cpr2 = &cpr->cp_ring_arr[idx];
+ cpr2->had_nqe_notify = 1;
+ cpr2->toggle = NQE_CN_TOGGLE(type);
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
@@ -2937,8 +3140,9 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
}
poll_done:
- cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
- if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
+ cpr_rx = &cpr->cp_ring_arr[0];
+ if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
+ (bp->flags & BNXT_FLAG_DIM)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
@@ -3112,20 +3316,20 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
-static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
{
- u8 init_val = mem_init->init_val;
- u16 offset = mem_init->offset;
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
u8 *p2 = p;
int i;
if (!init_val)
return;
- if (offset == BNXT_MEM_INVALID_OFFSET) {
+ if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
memset(p, init_val, len);
return;
}
- for (i = 0; i < len; i += mem_init->size)
+ for (i = 0; i < len; i += ctxm->entry_size)
*(p2 + i + offset) = init_val;
}
@@ -3192,8 +3396,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->mem_init)
- bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+ if (rmem->ctx_mem)
+ bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
@@ -3240,7 +3444,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
int i, j;
bp->max_tpa = MAX_TPA;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!bp->max_tpa_v2)
return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
@@ -3255,7 +3459,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (!rxr->rx_tpa)
return -ENOMEM;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
for (j = 0; j < bp->max_tpa; j++) {
agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
@@ -3313,6 +3517,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.pool_size += bp->rx_ring_size;
pp.nid = dev_to_node(&bp->pdev->dev);
pp.napi = &rxr->bnapi->napi;
+ pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
@@ -3410,6 +3615,15 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
}
}
+#define BNXT_TC_TO_RING_BASE(bp, tc) \
+ ((tc) * (bp)->tx_nr_rings_per_tc)
+
+#define BNXT_RING_TO_TC_OFF(bp, tx) \
+ ((tx) % (bp)->tx_nr_rings_per_tc)
+
+#define BNXT_RING_TO_TC(bp, tx) \
+ ((tx) / (bp)->tx_nr_rings_per_tc)
+
static int bnxt_alloc_tx_rings(struct bnxt *bp)
{
int i, j, rc;
@@ -3465,7 +3679,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
spin_lock_init(&txr->xdp_tx_lock);
if (i < bp->tx_nr_rings_xdp)
continue;
- if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+ if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1))
j++;
}
return 0;
@@ -3548,36 +3762,33 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
bnxt_free_ring(bp, &ring->ring_mem);
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ if (!cpr->cp_ring_arr)
+ continue;
- if (cpr2) {
- ring = &cpr2->cp_ring_struct;
- bnxt_free_ring(bp, &ring->ring_mem);
- bnxt_free_cp_arrays(cpr2);
- kfree(cpr2);
- cpr->cp_ring_arr[j] = NULL;
- }
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+
+ ring = &cpr2->cp_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+ bnxt_free_cp_arrays(cpr2);
}
+ kfree(cpr->cp_ring_arr);
+ cpr->cp_ring_arr = NULL;
+ cpr->cp_ring_count = 0;
}
}
-static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
+static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring_mem_info *rmem;
struct bnxt_ring_struct *ring;
- struct bnxt_cp_ring_info *cpr;
int rc;
- cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
- if (!cpr)
- return NULL;
-
rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
if (rc) {
bnxt_free_cp_arrays(cpr);
- kfree(cpr);
- return NULL;
+ return -ENOMEM;
}
ring = &cpr->cp_ring_struct;
rmem = &ring->ring_mem;
@@ -3590,23 +3801,26 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
if (rc) {
bnxt_free_ring(bp, rmem);
bnxt_free_cp_arrays(cpr);
- kfree(cpr);
- cpr = NULL;
}
- return cpr;
+ return rc;
}
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
- int i, rc, ulp_base_vec, ulp_msix;
+ int i, j, rc, ulp_base_vec, ulp_msix;
+ int tcs = bp->num_tc;
+ if (!tcs)
+ tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
ulp_base_vec = bnxt_get_ulp_msix_base(bp);
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- struct bnxt_cp_ring_info *cpr;
+ struct bnxt_cp_ring_info *cpr, *cpr2;
struct bnxt_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
if (!bnapi)
continue;
@@ -3624,35 +3838,55 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
else
ring->map_idx = i;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
if (i < bp->rx_nr_rings) {
- struct bnxt_cp_ring_info *cpr2 =
- bnxt_alloc_cp_sub_ring(bp);
-
- cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
- if (!cpr2)
- return -ENOMEM;
- cpr2->bnapi = bnapi;
+ cp_count++;
+ rx = 1;
+ }
+ if (i < bp->tx_nr_rings_xdp) {
+ cp_count++;
+ tx = 1;
+ } else if ((sh && i < bp->tx_nr_rings) ||
+ (!sh && i >= bp->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
}
- if ((sh && i < bp->tx_nr_rings) ||
- (!sh && i >= bp->rx_nr_rings)) {
- struct bnxt_cp_ring_info *cpr2 =
- bnxt_alloc_cp_sub_ring(bp);
- cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
- if (!cpr2)
- return -ENOMEM;
+ cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!cpr->cp_ring_arr)
+ return -ENOMEM;
+ cpr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr2 = &cpr->cp_ring_arr[k];
+ rc = bnxt_alloc_cp_sub_ring(bp, cpr2);
+ if (rc)
+ return rc;
cpr2->bnapi = bnapi;
+ cpr2->cp_idx = k;
+ if (!k && rx) {
+ bp->rx_ring[i].rx_cpr = cpr2;
+ cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNXT_TC_TO_RING_BASE(bp, tc) + j;
+ bp->tx_ring[n].tx_cpr = cpr2;
+ cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX;
+ }
}
+ if (tx)
+ j++;
}
return 0;
}
static void bnxt_init_ring_struct(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3697,18 +3931,16 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
rmem->vmem = (void **)&rxr->rx_agg_ring;
skip_rx:
- txr = bnapi->tx_ring;
- if (!txr)
- continue;
-
- ring = &txr->tx_ring_struct;
- rmem = &ring->ring_mem;
- rmem->nr_pages = bp->tx_nr_pages;
- rmem->page_size = HW_RXBD_RING_SIZE;
- rmem->pg_arr = (void **)txr->tx_desc_ring;
- rmem->dma_arr = txr->tx_desc_mapping;
- rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
- rmem->vmem = (void **)&txr->tx_buf_ring;
+ bnxt_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
}
}
@@ -3799,6 +4031,9 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
bpf_prog_add(bp->xdp_prog, 1);
rxr->xdp_prog = bp->xdp_prog;
@@ -3829,11 +4064,10 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
ring->fw_ring_id = INVALID_HW_RING_ID;
cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
-
- if (!cpr2)
- continue;
+ if (!cpr->cp_ring_arr)
+ continue;
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
ring = &cpr2->cp_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
@@ -3876,6 +4110,11 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if (i >= bp->tx_nr_rings_xdp)
+ netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp,
+ NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
}
return 0;
@@ -3921,7 +4160,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
+ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
num_vnics += bp->rx_nr_rings;
#endif
@@ -3952,13 +4191,22 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (i == 0)
+ if (!i) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ bp->toeplitz_prefix = 0;
get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
- else
+ for (k = 0; k < 8; k++) {
+ bp->toeplitz_prefix <<= 8;
+ bp->toeplitz_prefix |= key[k];
+ }
+ } else {
memcpy(vnic->rss_hash_key,
bp->vnic_info[0].rss_hash_key,
HW_HASH_KEY_SIZE);
+ }
}
}
}
@@ -4194,7 +4442,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
}
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_skip_grps;
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
@@ -4208,13 +4456,13 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
goto out;
}
vnic_skip_grps:
- if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
!(vnic->flags & BNXT_VNIC_RSS_FLAG))
continue;
/* Allocate rss table and hash key */
size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
@@ -4324,7 +4572,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
- !(bp->flags & BNXT_FLAG_CHIP_P5))
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
@@ -4362,7 +4610,7 @@ static void bnxt_init_stats(struct bnxt *bp)
stats = &cpr->stats;
rc = bnxt_hwrm_func_qstat_ext(bp, stats);
if (rc) {
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
mask = (1ULL << 48) - 1;
else
mask = -1ULL;
@@ -4508,7 +4756,7 @@ alloc_tx_ext_stats:
static void bnxt_clear_ring_indices(struct bnxt *bp)
{
- int i;
+ int i, j;
if (!bp->bnapi)
return;
@@ -4525,10 +4773,10 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
cpr = &bnapi->cp_ring;
cpr->cp_raw_cons = 0;
- txr = bnapi->tx_ring;
- if (txr) {
+ bnxt_for_each_napi_tx(j, bnapi, txr) {
txr->tx_prod = 0;
txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
}
rxr = bnapi->rx_ring;
@@ -4538,12 +4786,12 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
rxr->rx_sw_agg_prod = 0;
rxr->rx_next_cons = 0;
}
+ bnapi->events = 0;
}
}
-static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
-#ifdef CONFIG_RFS_ACCEL
int i;
/* Under rtnl_lock and all our NAPIs have been disabled. It's
@@ -4555,40 +4803,73 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
struct bnxt_ntuple_filter *fltr;
head = &bp->ntp_fltr_hash_tbl[i];
- hlist_for_each_entry_safe(fltr, tmp, head, hash) {
- hlist_del(&fltr->hash);
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
+ bnxt_del_l2_filter(bp, fltr->l2_fltr);
+ if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ continue;
+ hlist_del(&fltr->base.hash);
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
kfree(fltr);
}
}
- if (irq_reinit) {
- bitmap_free(bp->ntp_fltr_bmap);
- bp->ntp_fltr_bmap = NULL;
- }
+ if (!all)
+ return;
+
+ bitmap_free(bp->ntp_fltr_bmap);
+ bp->ntp_fltr_bmap = NULL;
bp->ntp_fltr_count = 0;
-#endif
}
static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
- if (!(bp->flags & BNXT_FLAG_RFS))
+ if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
return 0;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
return rc;
-#else
- return 0;
-#endif
+}
+
+static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
+{
+ int i;
+
+ for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ struct bnxt_l2_filter *fltr;
+
+ head = &bp->l2_fltr_hash_tbl[i];
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
+ if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ continue;
+ hlist_del(&fltr->base.hash);
+ if (fltr->base.flags) {
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+ }
+ }
+}
+
+static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
+ get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
}
static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
@@ -4598,7 +4879,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
bnxt_free_all_cp_arrays(bp);
- bnxt_free_ntp_fltrs(bp, irq_re_init);
+ bnxt_free_ntp_fltrs(bp, false);
+ bnxt_free_l2_filters(bp, false);
if (irq_re_init) {
bnxt_free_ring_stats(bp);
if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
@@ -4641,7 +4923,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr =
&bp->bnapi[i]->cp_ring;
@@ -4659,11 +4941,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
rxr->rx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
rxr->rx_agg_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
+ } else {
+ rxr->rx_cpr = &bp->bnapi[i]->cp_ring;
}
rxr->bnapi = bp->bnapi[i];
bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
@@ -4686,22 +4970,33 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
else
j = bp->rx_nr_rings;
- for (i = 0; i < bp->tx_nr_rings; i++, j++) {
+ for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+ struct bnxt_napi *bnapi2;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
txr->tx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG;
- txr->bnapi = bp->bnapi[j];
- bp->bnapi[j]->tx_ring = txr;
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
if (i >= bp->tx_nr_rings_xdp) {
+ int k = j + BNXT_RING_TO_TC_OFF(bp, i);
+
+ bnapi2 = bp->bnapi[k];
txr->txq_index = i - bp->tx_nr_rings_xdp;
- bp->bnapi[j]->tx_int = bnxt_tx_int;
+ txr->tx_napi_idx =
+ BNXT_RING_TO_TC(bp, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ bnapi2->tx_int = bnxt_tx_int;
} else {
- bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
- bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+ bnapi2 = bp->bnapi[j];
+ bnapi2->flags |= BNXT_NAPI_FLAG_XDP;
+ bnapi2->tx_ring[0] = txr;
+ bnapi2->tx_int = bnxt_tx_int_xdp;
+ j++;
}
+ txr->bnapi = bnapi2;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ txr->tx_cpr = &bnapi2->cp_ring;
}
rc = bnxt_alloc_stats(bp);
@@ -4913,6 +5208,8 @@ int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
return hwrm_req_send(bp, req);
}
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa);
+
static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
{
struct hwrm_tunnel_dst_port_free_input *req;
@@ -4942,6 +5239,11 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
bp->nge_port = 0;
bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
+ case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE:
+ req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id);
+ bp->vxlan_gpe_port = 0;
+ bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID;
+ break;
default:
break;
}
@@ -4950,6 +5252,8 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
if (rc)
netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
rc);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
return rc;
}
@@ -4985,9 +5289,16 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
bp->nge_port = port;
bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
+ case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE:
+ bp->vxlan_gpe_port = port;
+ bp->vxlan_gpe_fw_dst_port_id =
+ le16_to_cpu(resp->tunnel_dst_port_id);
+ break;
default:
break;
}
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
err_out:
hwrm_req_drop(bp, req);
@@ -5013,25 +5324,301 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return hwrm_req_send_silent(bp, req);
}
+void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ if (!atomic_dec_and_test(&fltr->refcnt))
+ return;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return;
+ }
+ hlist_del_rcu(&fltr->base.hash);
+ if (fltr->base.flags) {
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u32 idx)
+{
+ struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
+ struct bnxt_l2_filter *fltr;
+
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnxt_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u32 idx)
+{
+ struct bnxt_l2_filter *fltr = NULL;
+
+ rcu_read_lock();
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr)
+ atomic_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+#define BNXT_IPV4_4TUPLE(bp, fkeys) \
+ (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
+ ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
+
+#define BNXT_IPV6_4TUPLE(bp, fkeys) \
+ (((fkeys)->basic.ip_proto == IPPROTO_TCP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
+ ((fkeys)->basic.ip_proto == IPPROTO_UDP && \
+ (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
+
+static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
+{
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ if (BNXT_IPV4_4TUPLE(bp, fkeys))
+ return sizeof(fkeys->addrs.v4addrs) +
+ sizeof(fkeys->ports);
+
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
+ return sizeof(fkeys->addrs.v4addrs);
+ }
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (BNXT_IPV6_4TUPLE(bp, fkeys))
+ return sizeof(fkeys->addrs.v6addrs) +
+ sizeof(fkeys->ports);
+
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
+ return sizeof(fkeys->addrs.v6addrs);
+ }
+
+ return 0;
+}
+
+static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
+ const unsigned char *key)
+{
+ u64 prefix = bp->toeplitz_prefix, hash = 0;
+ struct bnxt_ipv4_tuple tuple4;
+ struct bnxt_ipv6_tuple tuple6;
+ int i, j, len = 0;
+ u8 *four_tuple;
+
+ len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
+ if (!len)
+ return 0;
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuple4.v4addrs = fkeys->addrs.v4addrs;
+ tuple4.ports = fkeys->ports;
+ four_tuple = (unsigned char *)&tuple4;
+ } else {
+ tuple6.v6addrs = fkeys->addrs.v6addrs;
+ tuple6.ports = fkeys->ports;
+ four_tuple = (unsigned char *)&tuple6;
+ }
+
+ for (i = 0, j = 8; i < len; i++, j++) {
+ u8 byte = four_tuple[i];
+ int bit;
+
+ for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
+ if (byte & 0x80)
+ hash ^= prefix;
+ }
+ prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
+ }
+
+ /* The valid part of the hash is in the upper 32 bits. */
+ return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
+}
+
#ifdef CONFIG_RFS_ACCEL
-static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
- struct bnxt_ntuple_filter *fltr)
+static struct bnxt_l2_filter *
+bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ fltr = bnxt_lookup_l2_filter(bp, key, idx);
+ return fltr;
+}
+#endif
+
+static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
+ struct bnxt_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNXT_FLTR_TYPE_L2;
+ if (fltr->base.flags) {
+ int bit_id;
+
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+ BNXT_MAX_FLTR, 0);
+ if (bit_id < 0)
+ return -ENOMEM;
+ fltr->base.sw_id = (u16)bit_id;
+ }
+ head = &bp->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ atomic_set(&fltr->refcnt, 1);
+ return 0;
+}
+
+static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ fltr = bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ if (rc) {
+ bnxt_del_l2_filter(bp, fltr);
+ fltr = ERR_PTR(rc);
+ }
+ return fltr;
+}
+
+static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &pf->vf[vf_idx];
+
+ return vf->fw_fid;
+#else
+ return INVALID_HW_RING_ID;
+#endif
+}
+
+int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ u16 target_id = 0xffff;
+ int rc;
+
+ if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (fltr->base.vf_idx >= pf->active_vfs)
+ return -EINVAL;
+
+ target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
+ if (target_id == INVALID_HW_RING_ID)
+ return -EINVAL;
+ }
+
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->target_id = cpu_to_le16(target_id);
+ req->l2_filter_id = fltr->base.filter_id;
+ return hwrm_req_send(bp, req);
+}
+
+int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ u16 target_id = 0xffff;
+ int rc;
+
+ if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (fltr->base.vf_idx >= pf->active_vfs)
+ return -EINVAL;
+
+ target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
+ }
+ rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->target_id = cpu_to_le16(target_id);
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ req->flags |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ fltr->base.filter_id = resp->l2_filter_id;
+ set_bit(BNXT_FLTR_VALID, &fltr->base.state);
+ }
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_free_input *req;
int rc;
+ set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
if (rc)
return rc;
- req->ntuple_filter_id = fltr->filter_id;
+ req->ntuple_filter_id = fltr->base.filter_id;
return hwrm_req_send(bp, req);
}
#define BNXT_NTP_FLTR_FLAGS \
(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
- CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
@@ -5047,12 +5634,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
#define BNXT_NTP_TUNNEL_FLTR_FLAG \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
-static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
- struct bnxt_ntuple_filter *fltr)
+void bnxt_fill_ipv6_mask(__be32 mask[4])
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ mask[i] = cpu_to_be32(~0);
+}
+
+int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
u32 flags = 0;
int rc;
@@ -5061,42 +5657,47 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
if (rc)
return rc;
- req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
+ l2_fltr = fltr->l2_fltr;
+ req->l2_filter_id = l2_fltr->base.filter_id;
+
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->rxq);
+ req->dst_id = cpu_to_le16(fltr->base.rxq);
} else {
- vnic = &bp->vnic_info[fltr->rxq + 1];
+ vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
req->flags = cpu_to_le32(flags);
req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
- memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req->ip_protocol = keys->basic.ip_proto;
if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
- int i;
-
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- for (i = 0; i < 4; i++) {
- req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
- req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ *(struct in6_addr *)&req->src_ipaddr[0] =
+ keys->addrs.v6addrs.src;
+ bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ *(struct in6_addr *)&req->dst_ipaddr[0] =
+ keys->addrs.v6addrs.dst;
+ bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
}
} else {
- req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+ }
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -5104,80 +5705,85 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
- req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ req->src_port = keys->ports.src;
+ req->src_port_mask = cpu_to_be16(0xffff);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = cpu_to_be16(0xffff);
+ }
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- fltr->filter_id = resp->ntuple_filter_id;
+ fltr->base.filter_id = resp->ntuple_filter_id;
hwrm_req_drop(bp, req);
return rc;
}
-#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
const u8 *mac_addr)
{
- struct hwrm_cfa_l2_filter_alloc_output *resp;
- struct hwrm_cfa_l2_filter_alloc_input *req;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
int rc;
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
- if (rc)
- return rc;
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
- req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
- if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
- req->flags |=
- cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
- req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
- req->enables =
- cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
- CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
- memcpy(req->l2_addr, mac_addr, ETH_ALEN);
- req->l2_addr_mask[0] = 0xff;
- req->l2_addr_mask[1] = 0xff;
- req->l2_addr_mask[2] = 0xff;
- req->l2_addr_mask[3] = 0xff;
- req->l2_addr_mask[4] = 0xff;
- req->l2_addr_mask[5] = 0xff;
-
- resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
- if (!rc)
- bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
- resp->l2_filter_id;
- hwrm_req_drop(bp, req);
+ fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
return rc;
}
-static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
{
- struct hwrm_cfa_l2_filter_free_input *req;
u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
- int rc;
/* Any associated ntuple filters will also be cleared by firmware. */
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
- if (rc)
- return rc;
- hwrm_req_hold(bp, req);
for (i = 0; i < num_of_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
for (j = 0; j < vnic->uc_filter_count; j++) {
- req->l2_filter_id = vnic->fw_l2_filter_id[j];
+ struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
- rc = hwrm_req_send(bp, req);
+ bnxt_hwrm_l2_filter_free(bp, fltr);
+ bnxt_del_l2_filter(bp, fltr);
}
vnic->uc_filter_count = 0;
}
- hwrm_req_drop(bp, req);
- return rc;
+}
+
+#define BNXT_DFLT_TUNL_TPA_BMAP \
+ (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \
+ VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6)
+
+static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
+ struct hwrm_vnic_tpa_cfg_input *req)
+{
+ u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA))
+ return;
+
+ if (bp->vxlan_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN;
+ if (bp->vxlan_gpe_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE;
+ if (bp->nge_port)
+ tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE;
+
+ req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN);
+ req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
@@ -5226,7 +5832,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
segs = MAX_TPA_SEGS_P5;
max_aggs = bp->max_tpa;
} else {
@@ -5236,6 +5842,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
req->max_aggs = cpu_to_le16(max_aggs);
req->min_agg_len = cpu_to_le32(512);
+ bnxt_hwrm_vnic_update_tunl_tpa(bp, req);
}
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -5252,35 +5859,25 @@ static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = rxr->bnapi;
- struct bnxt_cp_ring_info *cpr;
-
- cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
- return cpr->cp_ring_struct.fw_ring_id;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
+ else
return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
- }
}
static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_napi *bnapi = txr->bnapi;
- struct bnxt_cp_ring_info *cpr;
-
- cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
- return cpr->cp_ring_struct.fw_ring_id;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return txr->tx_cpr->cp_ring_struct.fw_ring_id;
+ else
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
- }
}
static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
{
int entries;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
else
entries = HW_HASH_INDEX_SIZE;
@@ -5330,8 +5927,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ if (!rx_rings)
+ return 0;
+ return bnxt_calc_nr_ring_pages(rx_rings - 1,
+ BNXT_RSS_TABLE_ENTRIES_P5);
+ }
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2;
return 1;
@@ -5376,7 +5977,7 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_fill_hw_rss_tbl_p5(bp, vnic);
else
bnxt_fill_hw_rss_tbl(bp, vnic);
@@ -5401,7 +6002,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
struct hwrm_vnic_rss_cfg_input *req;
int rc;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
@@ -5566,7 +6167,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
req->default_rx_ring_id =
@@ -5666,7 +6267,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_no_ring_grps;
/* map ring groups to this vnic */
@@ -5701,7 +6302,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
int rc;
bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
- bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
+ bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP;
+ bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP;
if (bp->hwrm_spec_code < 0x10600)
return 0;
@@ -5714,9 +6316,9 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (!rc) {
u32 flags = le32_to_cpu(resp->flags);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
- bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+ bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP;
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
@@ -5725,18 +6327,22 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
* VLAN_STRIP_CAP properly.
*/
if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
- (BNXT_CHIP_P5_THOR(bp) &&
+ (BNXT_CHIP_P5(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
- bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
+ bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED)
+ bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2) {
- if (BNXT_CHIP_P5_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
else
- bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7;
}
+ if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
}
hwrm_req_drop(bp, req);
return rc;
@@ -5749,7 +6355,7 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
int rc;
u16 i;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
@@ -5782,7 +6388,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
struct hwrm_ring_grp_free_input *req;
u16 i;
- if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return;
if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
@@ -5842,12 +6448,15 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->length = cpu_to_le32(bp->tx_ring_mask + 1);
req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req->queue_id = cpu_to_le16(ring->queue_id);
+ if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
+ req->cmpl_coal_cnt =
+ RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
break;
}
case HWRM_RING_ALLOC_RX:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 flags = 0;
/* Association of rx ring with stats context */
@@ -5862,7 +6471,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
}
break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
/* Association of agg ring with rx ring */
grp_info = &bp->grp_info[ring->grp_idx];
@@ -5880,7 +6489,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
/* Association of cp ring with nq */
grp_info = &bp->grp_info[map_index];
req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
@@ -5948,14 +6557,34 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
}
}
+static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bp->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bp->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bp->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bp->cp_ring_mask;
+ break;
+ }
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+ }
+}
+
static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
u32 map_idx, u32 xid)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- if (BNXT_PF(bp))
- db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
- else
- db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
@@ -5972,6 +6601,11 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break;
}
db->db_key64 |= (u64)xid << DBR_XID_SFT;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ db->db_key64 |= DBR_VALID;
+
+ db->doorbell = bp->bar1 + bp->db_offset;
} else {
db->doorbell = bp->bar1 + map_idx * 0x80;
switch (ring_type) {
@@ -5987,6 +6621,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break;
}
}
+ bnxt_set_db_mask(bp, db, ring_type);
}
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
@@ -5995,7 +6630,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
int i, rc = 0;
u32 type;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = HWRM_RING_ALLOC_NQ;
else
type = HWRM_RING_ALLOC_CMPL;
@@ -6031,15 +6666,13 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_ring_struct *ring;
u32 map_idx;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
struct bnxt_napi *bnapi = txr->bnapi;
- struct bnxt_cp_ring_info *cpr, *cpr2;
u32 type2 = HWRM_RING_ALLOC_CMPL;
- cpr = &bnapi->cp_ring;
- cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_TX_HDL;
+ ring->handle = BNXT_SET_NQ_HDL(cpr2);
map_idx = bnapi->index;
rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
if (rc)
@@ -6071,14 +6704,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_cp_ring_info *cpr2;
- cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_RX_HDL;
+ ring->handle = BNXT_SET_NQ_HDL(cpr2);
rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
if (rc)
goto err_out;
@@ -6186,7 +6817,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else
type = RING_FREE_REQ_RING_TYPE_RX;
@@ -6213,7 +6844,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
*/
bnxt_disable_int_sync(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_NQ;
else
type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
@@ -6223,18 +6854,16 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
- if (cpr2) {
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
+ ring = &cpr2->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ continue;
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
}
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
@@ -6246,6 +6875,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
bool shared);
@@ -6282,14 +6913,16 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
int rx = hw_resc->resv_rx_rings;
int tx = hw_resc->resv_tx_rings;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx >>= 1;
if (cp < (rx + tx)) {
- bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
+ if (rc)
+ goto get_rings_exit;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
hw_resc->resv_rx_rings = rx;
@@ -6301,8 +6934,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
+get_rings_exit:
hwrm_req_drop(bp, req);
- return 0;
+ return rc;
}
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -6346,7 +6980,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
enables |= tx_rings + ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
@@ -6362,16 +6996,17 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs =
- cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
bnxt_rfs_supported(bp))
req->num_rsscos_ctxs =
cpu_to_le16(ring_grps + 1);
@@ -6397,7 +7032,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
enables |= tx_rings + ring_grps ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
@@ -6412,9 +7047,11 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -6508,7 +7145,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
{
int cp;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return bnxt_nq_rings_in_use(bp);
cp = bp->tx_nr_rings + bp->rx_nr_rings;
@@ -6565,7 +7202,8 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
bnxt_check_rss_tbl_no_rmgr(bp);
return false;
}
- if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -6573,9 +7211,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
- !(bp->flags & BNXT_FLAG_CHIP_P5)))
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
return true;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
return true;
return false;
@@ -6590,13 +7228,15 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
int grp, rx_rings, rc;
int vnic = 1, stat;
bool sh = false;
+ int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
@@ -6639,7 +7279,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx = rx_rings << 1;
- cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, tx);
+ cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
bp->tx_nr_rings = tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
@@ -6686,7 +7327,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req->flags = cpu_to_le32(flags);
@@ -6708,7 +7349,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
else
@@ -6902,10 +7543,40 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
return hwrm_req_send(bp, req_rx);
}
+static int
+bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
+
+ req->ring_id = cpu_to_le16(ring_id);
+ return hwrm_req_send(bp, req);
+}
+
+static int
+bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ struct bnxt_tx_ring_info *txr;
+ int i, rc;
+
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ u16 ring_id;
+
+ ring_id = bnxt_cp_ring_for_tx(bp, txr);
+ req->ring_id = cpu_to_le16(ring_id);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ return rc;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ }
+ return 0;
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
- *req;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx;
int i, rc;
rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
@@ -6926,29 +7597,19 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_coal *hw_coal;
- u16 ring_id;
- req = req_rx;
- if (!bnapi->rx_ring) {
- ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req = req_tx;
- } else {
- ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
- }
- req->ring_id = cpu_to_le16(ring_id);
-
- rc = hwrm_req_send(bp, req);
+ if (!bnapi->rx_ring)
+ rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
+ else
+ rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx);
if (rc)
break;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
- if (bnapi->rx_ring && bnapi->tx_ring) {
- req = req_tx;
- ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
- req->ring_id = cpu_to_le16(ring_id);
- rc = hwrm_req_send(bp, req);
+ if (bnapi->rx_ring && bnapi->tx_ring[0]) {
+ rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx);
if (rc)
break;
}
@@ -7044,7 +7705,6 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_output *resp;
struct hwrm_func_qcfg_input *req;
- u32 min_db_offset = 0;
u16 flags;
int rc;
@@ -7102,16 +7762,17 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (bp->db_size)
goto func_qcfg_exit;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+ if (BNXT_CHIP_P5(bp)) {
if (BNXT_PF(bp))
- min_db_offset = DB_PF_OFFSET_P5;
+ bp->db_offset = DB_PF_OFFSET_P5;
else
- min_db_offset = DB_VF_OFFSET_P5;
+ bp->db_offset = DB_VF_OFFSET_P5;
}
bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
1024);
if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
- bp->db_size <= min_db_offset)
+ bp->db_size <= bp->db_offset)
bp->db_size = pci_resource_len(bp->pdev, 2);
func_qcfg_exit:
@@ -7119,37 +7780,99 @@ func_qcfg_exit:
return rc;
}
-static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
- struct hwrm_func_backing_store_qcaps_output *resp)
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
{
- struct bnxt_mem_init *mem_init;
- u16 init_mask;
- u8 init_val;
- u8 *offset;
- int i;
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
- init_val = resp->ctx_kind_initializer;
- init_mask = le16_to_cpu(resp->ctx_init_mask);
- offset = &resp->qp_init_offset;
- mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
- for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
- mem_init->init_val = init_val;
- mem_init->offset = BNXT_MEM_INVALID_OFFSET;
- if (!init_mask)
+ if (!ctxm->max_entries)
continue;
- if (i == BNXT_CTX_MEM_INIT_STAT)
- offset = &resp->stat_init_offset;
- if (init_mask & (1 << i))
- mem_init->offset = *offset * 4;
- else
- mem_init->init_val = 0;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
}
- ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
- ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
+ return 0;
+}
+
+#define BNXT_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
+
+static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
+{
+ struct hwrm_func_backing_store_qcaps_v2_output *resp;
+ struct hwrm_func_backing_store_qcaps_v2_input *req;
+ struct bnxt_ctx_mem_info *ctx;
+ u16 type;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+ if (rc)
+ return rc;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bp->ctx = ctx;
+
+ resp = hwrm_req_hold(bp, req);
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; ) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req->type = cpu_to_le16(type);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->flags = flags;
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnxt_init_ctx_initializer(ctxm, init_val, init_off,
+ BNXT_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNXT_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
+
+ctx_done:
+ hwrm_req_drop(bp, req);
+ return rc;
}
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
@@ -7161,6 +7884,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
return 0;
+ if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
+
rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
if (rc)
return rc;
@@ -7168,48 +7894,84 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc) {
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- int i, tqm_rings;
+ u8 init_val, init_idx = 0;
+ u16 init_mask;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = bp->ctx;
if (!ctx) {
- rc = -ENOMEM;
- goto ctx_err;
- }
- ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
- ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
- ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
- ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
- ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
- ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
- ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
- ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
- ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
- ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
- ctx->vnic_max_vnic_entries =
- le16_to_cpu(resp->vnic_max_vnic_entries);
- ctx->vnic_max_ring_table_entries =
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ bp->ctx = ctx;
+ }
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
+ ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
+ ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
+ ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries);
+ ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
+ ctxm->max_entries = ctxm->vnic_entries +
le16_to_cpu(resp->vnic_max_ring_table_entries);
- ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
- ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
- ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
- ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
- ctx->tqm_min_entries_per_ring =
- le32_to_cpu(resp->tqm_min_entries_per_ring);
- ctx->tqm_max_entries_per_ring =
- le32_to_cpu(resp->tqm_max_entries_per_ring);
- ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
- if (!ctx->tqm_entries_multiple)
- ctx->tqm_entries_multiple = 1;
- ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
- ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
- ctx->mrav_num_entries_units =
+ ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->vnic_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->stat_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
+ ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
+ ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
+ ctxm->entry_multiple = resp->tqm_entries_multiple;
+ if (!ctxm->entry_multiple)
+ ctxm->entry_multiple = 1;
+
+ memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctxm->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units);
- ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
- ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->mrav_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
- bnxt_init_ctx_initializer(ctx, resp);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
+ ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
@@ -7217,16 +7979,11 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
- ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
- if (!ctx_pg) {
- kfree(ctx);
- rc = -ENOMEM;
- goto ctx_err;
- }
- for (i = 0; i < tqm_rings; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
- bp->ctx = ctx;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
+ ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
+
+ rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
} else {
rc = 0;
}
@@ -7265,6 +8022,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input *req;
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
void **__req = (void **)&req;
u32 req_len = sizeof(*req);
__le32 *num_entries;
@@ -7286,82 +8044,102 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
req->enables = cpu_to_le32(enables);
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
- ctx_pg = &ctx->qp_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctx_pg = ctxm->pg_info;
req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
- req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
- req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
- req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
+ req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
+ req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
+ req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->qpc_pg_size_qpc_lvl,
&req->qpc_page_dir);
+
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)
+ req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
- ctx_pg = &ctx->srq_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctx_pg = ctxm->pg_info;
req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
- req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
- req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
+ req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
+ req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->srq_pg_size_srq_lvl,
&req->srq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
- ctx_pg = &ctx->cq_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctx_pg = ctxm->pg_info;
req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
- req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
- req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
+ req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
+ req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->cq_pg_size_cq_lvl,
&req->cq_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
- ctx_pg = &ctx->vnic_mem;
- req->vnic_num_vnic_entries =
- cpu_to_le16(ctx->vnic_max_vnic_entries);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctx_pg = ctxm->pg_info;
+ req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
req->vnic_num_ring_table_entries =
- cpu_to_le16(ctx->vnic_max_ring_table_entries);
- req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
+ cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
+ req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->vnic_pg_size_vnic_lvl,
&req->vnic_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
- ctx_pg = &ctx->stat_mem;
- req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
- req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctx_pg = ctxm->pg_info;
+ req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
+ req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->stat_pg_size_stat_lvl,
&req->stat_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
- ctx_pg = &ctx->mrav_mem;
+ u32 units;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctx_pg = ctxm->pg_info;
req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
- if (ctx->mrav_num_entries_units)
- flags |=
- FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
- req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
+ units = ctxm->mrav_num_entries_units;
+ if (units) {
+ u32 num_mr, num_ah = ctxm->mrav_av_entries;
+ u32 entries;
+
+ num_mr = ctx_pg->entries - num_ah;
+ entries = ((num_mr / units) << 16) | (num_ah / units);
+ req->mrav_num_entries = cpu_to_le32(entries);
+ flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
+ }
+ req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->mrav_pg_size_mrav_lvl,
&req->mrav_page_dir);
}
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
- ctx_pg = &ctx->tim_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctx_pg = ctxm->pg_info;
req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
- req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
+ req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->tim_pg_size_tim_lvl,
&req->tim_page_dir);
}
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
for (i = 0, num_entries = &req->tqm_sp_num_entries,
pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req->tqm_sp_page_dir,
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
+ ctx_pg = ctxm->pg_info;
i < BNXT_MAX_TQM_RINGS;
+ ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
- req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
- ctx_pg = ctx->tqm_mem[i];
+ req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
*num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
@@ -7385,7 +8163,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth, struct bnxt_mem_init *mem_init)
+ u8 depth, struct bnxt_ctx_mem_type *ctxm)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -7423,7 +8201,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- rmem->mem_init = mem_init;
+ rmem->ctx_mem = ctxm;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -7438,7 +8216,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- rmem->mem_init = mem_init;
+ rmem->ctx_mem = ctxm;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -7473,41 +8251,144 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0;
}
+static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input *req;
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+ if (rc)
+ return rc;
+ hwrm_req_hold(bp, req);
+ req->type = cpu_to_le16(ctxm->type);
+ req->entry_size = cpu_to_le16(ctxm->entry_size);
+ req->subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req->instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req->num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req->page_size_pbl_level,
+ &req->page_dir);
+ if (last && j == n)
+ req->flags =
+ cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
+ rc = hwrm_req_send(bp, req);
+ }
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ struct bnxt_ctx_mem_type *ctxm;
+ u16 last_type;
+ int rc = 0;
+ u16 type;
+
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNXT_CTX_MAX - 1;
+ else
+ last_type = BNXT_CTX_L2_MAX - 1;
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
- int i;
+ u16 type;
if (!ctx)
return;
- if (ctx->tqm_mem[0]) {
- for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
- bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
- kfree(ctx->tqm_mem[0]);
- ctx->tqm_mem[0] = NULL;
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
}
- bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
- bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
+ kfree(ctx);
+ bp->ctx = NULL;
}
static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- struct bnxt_mem_init *init;
- u32 mem_size, ena, entries;
- u32 entries_sp, min;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
+ u32 fast_qpmd_qps;
u8 pg_lvl = 1;
int i, rc;
@@ -7521,120 +8402,98 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ fast_qpmd_qps = ctxm->qp_fast_qpmd_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = 65536;
- extra_srqs = 8192;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp destroy feature */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (fast_qpmd_qps)
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
- ctx_pg = &ctx->qp_mem;
- ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
- extra_qps;
- if (ctx->qp_entry_size) {
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->srq_mem;
- ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
- if (ctx->srq_entry_size) {
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->cq_mem;
- ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
- if (ctx->cq_entry_size) {
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->vnic_mem;
- ctx_pg->entries = ctx->vnic_max_vnic_entries +
- ctx->vnic_max_ring_table_entries;
- if (ctx->vnic_entry_size) {
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
- ctx_pg = &ctx->stat_mem;
- ctx_pg->entries = ctx->stat_max_entries;
- if (ctx->stat_entry_size) {
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
+ if (rc)
+ return rc;
- ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
goto skip_rdma;
- ctx_pg = &ctx->mrav_mem;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
/* 128K extra is needed to accommodate static AH context
* allocation by f/w.
*/
- num_mr = 1024 * 256;
- num_ah = 1024 * 128;
- ctx_pg->entries = num_mr + num_ah;
- if (ctx->mrav_entry_size) {
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
- if (rc)
- return rc;
- }
- ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
- if (ctx->mrav_num_entries_units)
- ctx_pg->entries =
- ((num_mr / ctx->mrav_num_entries_units) << 16) |
- (num_ah / ctx->mrav_num_entries_units);
-
- ctx_pg = &ctx->tim_mem;
- ctx_pg->entries = ctx->qp_mem.entries;
- if (ctx->tim_entry_size) {
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
- if (rc)
- return rc;
- }
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
+ if (rc)
+ return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
+ if (rc)
+ return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
- min = ctx->tqm_min_entries_per_ring;
- entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
- 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
- entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
- entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
- entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
- for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
- ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = i ? entries : entries_sp;
- if (ctx->tqm_entry_size) {
- mem_size = ctx->tqm_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
- NULL);
- if (rc)
- return rc;
- }
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
+ if (rc)
+ return rc;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
- }
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
- rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
+
+ if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ rc = bnxt_backing_store_cfg_v2(bp, ena);
+ else
+ rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) {
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
rc);
@@ -7682,7 +8541,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 max_msix = le16_to_cpu(resp->max_msix);
hw_resc->max_nqs = max_msix;
@@ -7711,7 +8570,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -7743,7 +8602,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
- } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
@@ -7815,10 +8674,16 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
+ bp->flags |= BNXT_FLAG_TX_COAL_CMPL;
flags_ext2 = le32_to_cpu(resp->flags_ext2);
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
+ bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -8033,7 +8898,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -8450,7 +9315,7 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp)
int i;
/* Chip bug. Counter intermittently becomes 0. */
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ignore_zero = true;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -8644,7 +9509,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
return;
bnxt_hwrm_clear_vnic_filter(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
/* clear all RSS setting before free vnic ctx */
bnxt_hwrm_clear_vnic_rss(bp);
bnxt_hwrm_vnic_ctx_free(bp);
@@ -8653,7 +9518,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, false);
bnxt_hwrm_vnic_free(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_hwrm_vnic_ctx_free(bp);
}
@@ -8810,7 +9675,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return __bnxt_setup_vnic_p5(bp, vnic_id);
else
return __bnxt_setup_vnic(bp, vnic_id);
@@ -8818,10 +9683,9 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -8834,7 +9698,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic = &bp->vnic_info[vnic_id];
vnic->flags |= BNXT_VNIC_RFS_FLAG;
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
if (rc) {
@@ -8847,9 +9711,6 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
break;
}
return rc;
-#else
- return 0;
-#endif
}
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
@@ -8928,7 +9789,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bnxt_hwrm_update_rss_hash_cfg(bp);
if (bp->flags & BNXT_FLAG_RFS) {
@@ -9046,8 +9907,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp)
return rc;
}
-static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
- bool shared)
+static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared)
{
int _rx = *rx, _tx = *tx;
@@ -9070,19 +9931,59 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
return 0;
}
+static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
+{
+ return (tx - tx_xdp) / tx_sets + tx_xdp;
+}
+
+int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
+{
+ int tcs = bp->num_tc;
+
+ if (!tcs)
+ tcs = 1;
+ return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp);
+}
+
+static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
+{
+ int tcs = bp->num_tc;
+
+ return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
+ bp->tx_nr_rings_xdp;
+}
+
+static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool sh)
+{
+ int tx_cp = bnxt_num_tx_to_cp(bp, *tx);
+
+ if (tx_cp != *tx) {
+ int tx_saved = tx_cp, rc;
+
+ rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh);
+ if (rc)
+ return rc;
+ if (tx_cp != tx_saved)
+ *tx = bnxt_num_cp_to_tx(bp, tx_cp);
+ return 0;
+ }
+ return __bnxt_trim_rings(bp, rx, tx, max, sh);
+}
+
static void bnxt_setup_msix(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
struct net_device *dev = bp->dev;
int tcs, i;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
if (tcs) {
int i, off, count;
for (i = 0; i < tcs; i++) {
count = bp->tx_nr_rings_per_tc;
- off = i * count;
+ off = BNXT_TC_TO_RING_BASE(bp, i);
netdev_set_tc_queue(dev, i, count, off);
}
}
@@ -9108,8 +10009,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
- if (netdev_get_num_tc(bp->dev))
+ if (bp->num_tc) {
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
+ }
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
@@ -9137,7 +10040,6 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
return bp->hw_resc.max_rsscos_ctxs;
@@ -9147,7 +10049,6 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
return bp->hw_resc.max_vnics;
}
-#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
@@ -9163,7 +10064,7 @@ static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{
unsigned int cp = bp->hw_resc.max_cp_rings;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
cp -= bnxt_get_ulp_msix_num(bp);
return cp;
@@ -9173,7 +10074,7 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
@@ -9189,7 +10090,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
unsigned int cp;
cp = bnxt_get_max_func_cp_rings_for_en(bp);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return cp - bp->rx_nr_rings - bp->tx_nr_rings;
else
return cp - bp->cp_nr_rings;
@@ -9208,7 +10109,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
int max_idx, avail_msix;
max_idx = bp->total_irqs;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings;
if (!BNXT_NEW_RM(bp) || avail_msix >= num)
@@ -9232,7 +10133,7 @@ static int bnxt_get_num_msix(struct bnxt *bp)
static int bnxt_init_msix(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
struct msix_entry *msix_ent;
total_vecs = bnxt_get_num_msix(bp);
@@ -9274,9 +10175,10 @@ static int bnxt_init_msix(struct bnxt *bp)
if (rc)
goto msix_setup_exit;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
bp->cp_nr_rings = (min == 1) ?
- max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
} else {
rc = -ENOMEM;
@@ -9336,8 +10238,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
- int tcs = netdev_get_num_tc(bp->dev);
bool irq_cleared = false;
+ int tcs = bp->num_tc;
int rc;
if (!bnxt_need_reserve_rings(bp))
@@ -9363,6 +10265,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
if (bp->tx_nr_rings_xdp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
else
@@ -9439,6 +10342,7 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
+ netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
@@ -9466,6 +10370,11 @@ static void bnxt_del_napi(struct bnxt *bp)
if (!bp->bnapi)
return;
+ for (i = 0; i < bp->rx_nr_rings; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++)
+ netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -9486,7 +10395,7 @@ static void bnxt_init_napi(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_USING_MSIX) {
int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
@@ -9542,8 +10451,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
cpr = &bnapi->cp_ring;
bnapi->in_reset = false;
- bnapi->tx_pkts = 0;
-
if (bnapi->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
@@ -9647,7 +10554,10 @@ void bnxt_report_link(struct bnxt *bp)
signal = "(NRZ) ";
break;
case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
- signal = "(PAM4) ";
+ signal = "(PAM4 56Gbps) ";
+ break;
+ case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112:
+ signal = "(PAM4 112Gbps) ";
break;
default:
break;
@@ -9675,7 +10585,9 @@ static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
if (!resp->supported_speeds_auto_mode &&
!resp->supported_speeds_force_mode &&
!resp->supported_pam4_speeds_auto_mode &&
- !resp->supported_pam4_speeds_force_mode)
+ !resp->supported_pam4_speeds_force_mode &&
+ !resp->supported_speeds2_auto_mode &&
+ !resp->supported_speeds2_force_mode)
return true;
return false;
}
@@ -9721,6 +10633,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
/* Phy re-enabled, reprobe the speeds */
link_info->support_auto_speeds = 0;
link_info->support_pam4_auto_speeds = 0;
+ link_info->support_auto_speeds2 = 0;
}
}
if (resp->supported_speeds_auto_mode)
@@ -9729,6 +10642,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (resp->supported_pam4_speeds_auto_mode)
link_info->support_pam4_auto_speeds =
le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
+ if (resp->supported_speeds2_auto_mode)
+ link_info->support_auto_speeds2 =
+ le16_to_cpu(resp->supported_speeds2_auto_mode);
bp->port_count = resp->port_cnt;
@@ -9746,9 +10662,19 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported)
static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
/* Check if any advertised speeds are no longer supported. The caller
* holds the link_lock mutex, so we can modify link_info settings.
*/
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds2)) {
+ link_info->advertising = link_info->support_auto_speeds2;
+ return true;
+ }
+ return false;
+ }
if (bnxt_support_dropped(link_info->advertising,
link_info->support_auto_speeds)) {
link_info->advertising = link_info->support_auto_speeds;
@@ -9797,18 +10723,25 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->lp_pause = resp->link_partner_adv_pause;
link_info->force_pause_setting = resp->force_pause;
link_info->duplex_setting = resp->duplex_cfg;
- if (link_info->phy_link_status == BNXT_LINK_LINK)
+ if (link_info->phy_link_status == BNXT_LINK_LINK) {
link_info->link_speed = le16_to_cpu(resp->link_speed);
- else
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2)
+ link_info->active_lanes = resp->active_lanes;
+ } else {
link_info->link_speed = 0;
+ link_info->active_lanes = 0;
+ }
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
link_info->force_pam4_link_speed =
le16_to_cpu(resp->force_pam4_link_speed);
+ link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
+ link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
link_info->auto_pam4_link_speeds =
le16_to_cpu(resp->auto_pam4_link_speed_mask);
+ link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2);
link_info->lp_auto_link_speeds =
le16_to_cpu(resp->link_partner_adv_speeds);
link_info->lp_auto_pam4_link_speeds =
@@ -9947,7 +10880,11 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
{
if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
- if (bp->link_info.advertising) {
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK);
+ req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising);
+ } else if (bp->link_info.advertising) {
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
}
@@ -9961,7 +10898,12 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
} else {
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
- if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed);
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2);
+ netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n",
+ (u32)bp->link_info.req_link_speed);
+ } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
} else {
@@ -10226,8 +11168,6 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
@@ -10578,6 +11518,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ if (bp->ptp_cfg)
+ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
return 0;
@@ -11115,7 +12057,6 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- struct hwrm_cfa_l2_filter_free_input *req;
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -11127,16 +12068,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
if (!uc_update)
goto skip_uc;
- rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
- if (rc)
- return rc;
- hwrm_req_hold(bp, req);
for (i = 1; i < vnic->uc_filter_count; i++) {
- req->l2_filter_id = vnic->fw_l2_filter_id[i];
+ struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
- rc = hwrm_req_send(bp, req);
+ bnxt_hwrm_l2_filter_free(bp, fltr);
+ bnxt_del_l2_filter(bp, fltr);
}
- hwrm_req_drop(bp, req);
vnic->uc_filter_count = 1;
@@ -11213,7 +12150,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
return true;
return false;
@@ -11223,7 +12160,7 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
return true;
return false;
}
@@ -11231,10 +12168,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
-#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
@@ -11244,7 +12180,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
/* RSS contexts not a limiting factor */
- if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
if (vnics > max_vnics || vnics > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
@@ -11267,9 +12203,6 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
return false;
-#else
- return false;
-#endif
}
static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -11336,7 +12269,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
(flags & BNXT_FLAG_TPA) == 0 ||
- (bp->flags & BNXT_FLAG_CHIP_P5))
+ (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
re_init = true;
}
@@ -11445,9 +12378,10 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
struct udphdr *uh = udp_hdr(skb);
__be16 udp_port = uh->dest;
- if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port &&
+ udp_port != bp->vxlan_gpe_port)
return false;
- if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ if (skb->inner_protocol == htons(ETH_P_TEB)) {
struct ethhdr *eh = inner_eth_hdr(skb);
switch (eh->h_proto) {
@@ -11458,6 +12392,11 @@ static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
skb_inner_network_offset(skb),
NULL);
}
+ } else if (skb->inner_protocol == htons(ETH_P_IP)) {
+ return true;
+ } else if (skb->inner_protocol == htons(ETH_P_IPV6)) {
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
}
return false;
}
@@ -11578,15 +12517,13 @@ static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
- int i = bnapi->index;
-
- if (!txr)
- return;
+ struct bnxt_tx_ring_info *txr;
+ int i = bnapi->index, j;
- netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
- i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
- txr->tx_cons);
+ bnxt_for_each_napi_tx(j, bnapi, txr)
+ netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+ i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
+ txr->tx_cons);
}
static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
@@ -11749,8 +12686,7 @@ static void bnxt_timer(struct timer_list *t)
if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
- if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
- netif_carrier_ok(dev))
+ if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
bnxt_restart_timer:
@@ -11832,6 +12768,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@@ -11845,12 +12791,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -11859,8 +12800,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
}
static bool is_bnxt_fw_ok(struct bnxt *bp)
@@ -12003,7 +12942,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
{
int i;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -12016,12 +12955,11 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
continue;
cpr = &bnapi->cp_ring;
- for (j = 0; j < 2; j++) {
- struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
u32 val[2];
- if (!cpr2 || cpr2->has_more_work ||
- !bnxt_has_work(bp, cpr2))
+ if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2))
continue;
if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
@@ -12182,36 +13120,42 @@ static void bnxt_sp_task(struct work_struct *work)
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
+static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ int *max_cp);
+
/* Under rtnl_lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
- int max_rx, max_tx, tx_sets = 1;
+ int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
int tx_rings_needed, stats;
int rx_rings = rx;
- int cp, vnics, rc;
+ int cp, vnics;
if (tcs)
tx_sets = tcs;
- rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
- if (rc)
- return rc;
+ _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp);
- if (max_rx < rx)
+ if (max_rx < rx_rings)
return -ENOMEM;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+
tx_rings_needed = tx * tx_sets + tx_xdp;
if (max_tx < tx_rings_needed)
return -ENOMEM;
vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
- vnics += rx_rings;
+ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
+ BNXT_FLAG_RFS)
+ vnics += rx;
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx_rings <<= 1;
- cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+ tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
+ cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < cp)
+ return -ENOMEM;
stats = cp;
if (BNXT_NEW_RM(bp)) {
cp += bnxt_get_ulp_msix_num(bp);
@@ -12286,10 +13230,10 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
{
u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
return true;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
return true;
return false;
@@ -12372,15 +13316,15 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
{
- bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP;
bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bp->rss_hash_delta = bp->rss_hash_cfg;
if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
- bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
}
@@ -12850,7 +13794,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
{
struct bnxt *bp = netdev_priv(dev);
bool sh = false;
- int rc;
+ int rc, tx_cp;
if (tc > bp->max_tc) {
netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
@@ -12858,7 +13802,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
- if (netdev_get_num_tc(dev) == tc)
+ if (bp->num_tc == tc)
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -12876,13 +13820,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (tc) {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
netdev_set_num_tc(dev, tc);
+ bp->num_tc = tc;
} else {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
+ bp->num_tc = 0;
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
if (netif_running(bp->dev))
return bnxt_open_nic(bp, true, false);
@@ -12932,38 +13879,102 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-#ifdef CONFIG_RFS_ACCEL
+u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
+ const struct sk_buff *skb)
+{
+ struct bnxt_vnic_info *vnic;
+
+ if (skb)
+ return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+
+ vnic = &bp->vnic_info[0];
+ return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
+}
+
+int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
+ u32 idx)
+{
+ struct hlist_head *head;
+ int bit_id;
+
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
+ if (bit_id < 0) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return -ENOMEM;
+ }
+
+ fltr->base.sw_id = (u16)bit_id;
+ fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
+ fltr->base.flags |= BNXT_ACT_RING_DST;
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bp->ntp_fltr_count++;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return 0;
+}
+
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
+ if (f1->ntuple_flags != f2->ntuple_flags)
+ return false;
+
if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto)
return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
- if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
+ keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
return false;
} else {
- if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src)) ||
- memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst)))
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
+ memcmp(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src,
+ sizeof(keys1->addrs.v6addrs.src))) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
+ memcmp(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst,
+ sizeof(keys1->addrs.v6addrs.dst))))
return false;
}
- if (keys1->ports.ports == keys2->ports.ports &&
- keys1->control.flags == keys2->control.flags &&
- ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
- ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
+ if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
+ keys1->ports.src != keys2->ports.src) ||
+ ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
+ keys1->ports.dst != keys2->ports.dst))
+ return false;
+
+ if (keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr)
return true;
return false;
}
+struct bnxt_ntuple_filter *
+bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr, u32 idx)
+{
+ struct bnxt_ntuple_filter *f;
+ struct hlist_head *head;
+
+ head = &bp->ntp_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(f, head, base.hash) {
+ if (bnxt_fltr_match(f, fltr))
+ return f;
+ }
+ return NULL;
+}
+
+#ifdef CONFIG_RFS_ACCEL
static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
@@ -12971,29 +13982,31 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct bnxt_ntuple_filter *fltr, *new_fltr;
struct flow_keys *fkeys;
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
- int rc = 0, idx, bit_id, l2_idx = 0;
- struct hlist_head *head;
+ struct bnxt_l2_filter *l2_fltr;
+ int rc = 0, idx;
u32 flags;
- if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- int off = 0, j;
+ if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
+ l2_fltr = bp->vnic_info[0].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ } else {
+ struct bnxt_l2_key key;
- netif_addr_lock_bh(dev);
- for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
- if (ether_addr_equal(eth->h_dest,
- vnic->uc_list + off)) {
- l2_idx = j + 1;
- break;
- }
- }
- netif_addr_unlock_bh(dev);
- if (!l2_idx)
+ ether_addr_copy(key.dst_mac_addr, eth->h_dest);
+ key.vlan = 0;
+ l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
+ if (!l2_fltr)
return -EINVAL;
+ if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return -EINVAL;
+ }
}
new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
- if (!new_fltr)
+ if (!new_fltr) {
+ bnxt_del_l2_filter(bp, l2_fltr);
return -ENOMEM;
+ }
fkeys = &new_fltr->fkeys;
if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
@@ -13020,49 +14033,52 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
- memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
- memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+ new_fltr->l2_fltr = l2_fltr;
+ new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
- idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- head = &bp->ntp_fltr_hash_tbl[idx];
+ idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
- hlist_for_each_entry_rcu(fltr, head, hash) {
- if (bnxt_fltr_match(fltr, new_fltr)) {
- rc = fltr->sw_id;
- rcu_read_unlock();
- goto err_free;
- }
- }
- rcu_read_unlock();
-
- spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_NTP_FLTR_MAX_FLTR, 0);
- if (bit_id < 0) {
- spin_unlock_bh(&bp->ntp_fltr_lock);
- rc = -ENOMEM;
+ fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
+ if (fltr) {
+ rc = fltr->base.sw_id;
+ rcu_read_unlock();
goto err_free;
}
+ rcu_read_unlock();
- new_fltr->sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id;
- new_fltr->l2_fltr_idx = l2_idx;
- new_fltr->rxq = rxq_index;
- hlist_add_head_rcu(&new_fltr->hash, head);
- bp->ntp_fltr_count++;
- spin_unlock_bh(&bp->ntp_fltr_lock);
-
- bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
-
- return new_fltr->sw_id;
+ new_fltr->base.rxq = rxq_index;
+ rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
+ if (!rc) {
+ bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
+ return new_fltr->base.sw_id;
+ }
err_free:
+ bnxt_del_l2_filter(bp, l2_fltr);
kfree(new_fltr);
return rc;
}
+#endif
+
+void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
+{
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return;
+ }
+ hlist_del_rcu(&fltr->base.hash);
+ bp->ntp_fltr_count--;
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr->l2_fltr);
+ clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
+ kfree_rcu(fltr, base.rcu);
+}
static void bnxt_cfg_ntp_filters(struct bnxt *bp)
{
+#ifdef CONFIG_RFS_ACCEL
int i;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
@@ -13072,13 +14088,15 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
int rc;
head = &bp->ntp_fltr_hash_tbl[i];
- hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+ hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bool del = false;
- if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
- if (rps_may_expire_flow(bp->dev, fltr->rxq,
+ if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
+ if (fltr->base.flags & BNXT_ACT_NO_AGING)
+ continue;
+ if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
fltr->flow_id,
- fltr->sw_id)) {
+ fltr->base.sw_id)) {
bnxt_hwrm_cfa_ntuple_filter_free(bp,
fltr);
del = true;
@@ -13089,30 +14107,16 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
if (rc)
del = true;
else
- set_bit(BNXT_FLTR_VALID, &fltr->state);
+ set_bit(BNXT_FLTR_VALID, &fltr->base.state);
}
- if (del) {
- spin_lock_bh(&bp->ntp_fltr_lock);
- hlist_del_rcu(&fltr->hash);
- bp->ntp_fltr_count--;
- spin_unlock_bh(&bp->ntp_fltr_lock);
- synchronize_rcu();
- clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
- kfree(fltr);
- }
+ if (del)
+ bnxt_del_ntp_filter(bp, fltr);
}
}
+#endif
}
-#else
-
-static void bnxt_cfg_ntp_filters(struct bnxt *bp)
-{
-}
-
-#endif /* CONFIG_RFS_ACCEL */
-
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
@@ -13120,9 +14124,11 @@ static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int tabl
unsigned int cmd;
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
- cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE;
else
- cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE;
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
}
@@ -13135,8 +14141,10 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
- else
+ else if (ti->type == UDP_TUNNEL_TYPE_GENEVE)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
+ else
+ cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE;
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
@@ -13150,6 +14158,16 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
+}, bnxt_udp_tunnels_p7 = {
+ .set_port = bnxt_udp_tunnel_set_port,
+ .unset_port = bnxt_udp_tunnel_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, },
+ },
};
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -13257,6 +14275,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_ptp_clear(bp);
unregister_netdev(dev);
+ bnxt_free_l2_filters(bp, true);
+ bnxt_free_ntp_fltrs(bp, true);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@@ -13279,8 +14299,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -13349,7 +14367,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
bnxt_get_ulp_msix_num(bp),
hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -13358,8 +14376,14 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
}
if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rc;
+
+ rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ if (rc) {
+ *max_rx = 0;
+ *max_tx = 0;
+ }
/* On P5 chips, max_cp output param should be available NQs */
*max_cp = max_irq;
}
@@ -13660,7 +14684,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
max_irqs = bnxt_get_max_irq(pdev);
- dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+ dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE,
+ max_irqs);
if (!dev)
return -ENOMEM;
@@ -13702,10 +14727,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
bnxt_vpd_read_info(bp);
- if (BNXT_CHIP_P5(bp)) {
- bp->flags |= BNXT_FLAG_CHIP_P5;
- if (BNXT_CHIP_SR2(bp))
- bp->flags |= BNXT_FLAG_CHIP_SR2;
+ if (BNXT_CHIP_P5_PLUS(bp)) {
+ bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
+ if (BNXT_CHIP_P7(bp))
+ bp->flags |= BNXT_FLAG_CHIP_P7;
}
rc = bnxt_alloc_rss_indir_tbl(bp);
@@ -13730,6 +14755,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_GRO;
+ if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
+ dev->hw_features |= NETIF_F_GSO_UDP_L4;
if (BNXT_SUPPORTS_TPA(bp))
dev->hw_features |= NETIF_F_LRO;
@@ -13740,7 +14767,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
- dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
+ if (bp->flags & BNXT_FLAG_UDP_GSO_CAP)
+ dev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7;
+ else
+ dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
@@ -13768,7 +14800,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x;
- else if (BNXT_CHIP_P5(bp))
+ else if (BNXT_CHIP_P5_PLUS(bp))
bp->gro_func = bnxt_gro_func_5750x;
}
if (!BNXT_CHIP_P4_PLUS(bp))
@@ -13794,6 +14826,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ bnxt_init_l2_fltr_tbl(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
@@ -13876,8 +14909,6 @@ init_err_pci_clean:
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -13930,8 +14961,6 @@ static int bnxt_suspend(struct device *device)
bnxt_hwrm_func_drv_unrgtr(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
rtnl_unlock();
return rc;
}
@@ -14008,6 +15037,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
@@ -14016,22 +15046,31 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- if (state == pci_channel_io_perm_failure) {
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
+
+ if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
rtnl_unlock();
/* Request a slot slot reset. */
@@ -14111,6 +15150,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a7d7b09ea..47338b48c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,7 +18,7 @@
*/
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 2
+#define DRV_VER_UPD 3
#include <linux/ethtool.h>
#include <linux/interrupt.h>
@@ -61,6 +61,24 @@ struct tx_bd {
__le64 tx_bd_haddr;
} __packed;
+#define TX_OPAQUE_IDX_MASK 0x0000ffff
+#define TX_OPAQUE_BDS_MASK 0x00ff0000
+#define TX_OPAQUE_BDS_SHIFT 16
+#define TX_OPAQUE_RING_MASK 0xff000000
+#define TX_OPAQUE_RING_SHIFT 24
+
+#define SET_TX_OPAQUE(bp, txr, idx, bds) \
+ (((txr)->tx_napi_idx << TX_OPAQUE_RING_SHIFT) | \
+ ((bds) << TX_OPAQUE_BDS_SHIFT) | ((idx) & (bp)->tx_ring_mask))
+
+#define TX_OPAQUE_IDX(opq) ((opq) & TX_OPAQUE_IDX_MASK)
+#define TX_OPAQUE_RING(opq) (((opq) & TX_OPAQUE_RING_MASK) >> \
+ TX_OPAQUE_RING_SHIFT)
+#define TX_OPAQUE_BDS(opq) (((opq) & TX_OPAQUE_BDS_MASK) >> \
+ TX_OPAQUE_BDS_SHIFT)
+#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
+ (bp)->tx_ring_mask)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -121,11 +139,15 @@ struct tx_cmp {
__le32 tx_cmp_flags_type;
#define CMP_TYPE (0x3f << 0)
#define CMP_TYPE_TX_L2_CMP 0
+ #define CMP_TYPE_TX_L2_COAL_CMP 2
+ #define CMP_TYPE_TX_L2_PKT_TS_CMP 4
#define CMP_TYPE_RX_L2_CMP 17
#define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21
#define CMP_TYPE_RX_TPA_AGG_CMP 22
+ #define CMP_TYPE_RX_L2_V3_CMP 23
+ #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25
#define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
@@ -152,9 +174,13 @@ struct tx_cmp {
#define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
#define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
- __le32 tx_cmp_unsed_3;
+ __le32 sq_cons_idx;
+ #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff
};
+#define TX_CMP_SQ_CONS_IDX(txcmp) \
+ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
@@ -182,8 +208,20 @@ struct rx_cmp {
#define RX_CMP_AGG_BUFS_SHIFT 1
#define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
#define RX_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_CMP_V3_RSS_EXT_OP_LEGACY (0xf << 12)
+ #define RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT 12
+ #define RX_CMP_V3_RSS_EXT_OP_NEW (0xf << 8)
+ #define RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT 8
#define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
#define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
+ #define RX_CMP_SUB_NS_TS (0xf << 16)
+ #define RX_CMP_SUB_NS_TS_SHIFT 16
+ #define RX_CMP_METADATA1 (0xf << 28)
+ #define RX_CMP_METADATA1_SHIFT 28
+ #define RX_CMP_METADATA1_TPID_SEL (0x7 << 28)
+ #define RX_CMP_METADATA1_TPID_8021Q (0x1 << 28)
+ #define RX_CMP_METADATA1_TPID_8021AD (0x0 << 28)
+ #define RX_CMP_METADATA1_VALID (0x8 << 28)
__le32 rx_cmp_rss_hash;
};
@@ -203,6 +241,30 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
+ RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE_NEW(rxcmp) \
+ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_NEW) >>\
+ RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT)
+
+#define RX_CMP_V3_HASH_TYPE(bp, rxcmp) \
+ (((bp)->rss_cap & BNXT_RSS_CAP_RSS_TCAM) ? \
+ RX_CMP_V3_HASH_TYPE_NEW(rxcmp) : \
+ RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp))
+
+#define EXT_OP_INNER_4 0x0
+#define EXT_OP_OUTER_4 0x2
+#define EXT_OP_INNFL_3 0x8
+#define EXT_OP_OUTFL_3 0xa
+
+#define RX_CMP_VLAN_VALID(rxcmp) \
+ ((rxcmp)->rx_cmp_misc_v1 & cpu_to_le32(RX_CMP_METADATA1_VALID))
+
+#define RX_CMP_VLAN_TPID_SEL(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_METADATA1_TPID_SEL)
+
struct rx_cmp_ext {
__le32 rx_cmp_flags2;
#define RX_CMP_FLAGS2_IP_CS_CALC 0x1
@@ -250,6 +312,9 @@ struct rx_cmp_ext {
#define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
#define RX_CMPL_CFA_CODE_SFT 16
+ #define RX_CMPL_METADATA0_TCI_MASK (0xffff << 16)
+ #define RX_CMPL_METADATA0_VID_MASK (0x0fff << 16)
+ #define RX_CMPL_METADATA0_SFT 16
__le32 rx_cmp_timestamp;
};
@@ -275,6 +340,10 @@ struct rx_cmp_ext {
((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
+#define RX_CMP_METADATA0_TCI(rxcmp1) \
+ ((le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_METADATA0_TCI_MASK) >> RX_CMPL_METADATA0_SFT)
+
struct rx_agg_cmp {
__le32 rx_agg_cmp_len_flags_type;
#define RX_AGG_CMP_TYPE (0x3f << 0)
@@ -317,10 +386,18 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V1 (0x1 << 0)
#define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
+ #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE (0x1ff << 7)
+ #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
#define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
+ #define RX_TPA_START_CMP_METADATA1 (0xf << 28)
+ #define RX_TPA_START_CMP_METADATA1_SHIFT 28
+ #define RX_TPA_START_METADATA1_TPID_SEL (0x7 << 28)
+ #define RX_TPA_START_METADATA1_TPID_8021Q (0x1 << 28)
+ #define RX_TPA_START_METADATA1_TPID_8021AD (0x0 << 28)
+ #define RX_TPA_START_METADATA1_VALID (0x8 << 28)
__le32 rx_tpa_start_cmp_rss_hash;
};
@@ -334,6 +411,11 @@ struct rx_tpa_start_cmp {
RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define TPA_START_V3_HASH_TYPE(rx_tpa_start) \
+ (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >> \
+ RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+
#define TPA_START_AGG_ID(rx_tpa_start) \
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
@@ -346,6 +428,14 @@ struct rx_tpa_start_cmp {
((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+#define TPA_START_VLAN_VALID(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 & \
+ cpu_to_le32(RX_TPA_START_METADATA1_VALID))
+
+#define TPA_START_VLAN_TPID_SEL(rx_tpa_start) \
+ (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_METADATA1_TPID_SEL)
+
struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
@@ -356,6 +446,8 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+ #define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE (0x1 << 10)
+ #define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
@@ -369,6 +461,9 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
+ #define RX_TPA_START_CMP_METADATA0_TCI_MASK (0xffff << 16)
+ #define RX_TPA_START_CMP_METADATA0_VID_MASK (0x0fff << 16)
+ #define RX_TPA_START_CMP_METADATA0_SFT 16
__le32 rx_tpa_start_cmp_hdr_info;
};
@@ -385,6 +480,11 @@ struct rx_tpa_start_cmp_ext {
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+#define TPA_START_METADATA0_TCI(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_METADATA0_TCI_MASK) >> \
+ RX_TPA_START_CMP_METADATA0_SFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -529,6 +629,8 @@ struct nqe_cn {
#define NQ_CN_TYPE_SFT 0
#define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
#define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
+ #define NQ_CN_TOGGLE_MASK 0xc0UL
+ #define NQ_CN_TOGGLE_SFT 6
__le16 reserved16;
__le32 cq_handle_low;
__le32 v;
@@ -536,6 +638,23 @@ struct nqe_cn {
__le32 cq_handle_high;
};
+#define BNXT_NQ_HDL_IDX_MASK 0x00ffffff
+#define BNXT_NQ_HDL_TYPE_MASK 0xff000000
+#define BNXT_NQ_HDL_TYPE_SHIFT 24
+#define BNXT_NQ_HDL_TYPE_RX 0x00
+#define BNXT_NQ_HDL_TYPE_TX 0x01
+
+#define BNXT_NQ_HDL_IDX(hdl) ((hdl) & BNXT_NQ_HDL_IDX_MASK)
+#define BNXT_NQ_HDL_TYPE(hdl) (((hdl) & BNXT_NQ_HDL_TYPE_MASK) >> \
+ BNXT_NQ_HDL_TYPE_SHIFT)
+
+#define BNXT_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNXT_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+#define NQE_CN_TYPE(type) ((type) & NQ_CN_TYPE_MASK)
+#define NQE_CN_TOGGLE(type) (((type) & NQ_CN_TOGGLE_MASK) >> \
+ NQ_CN_TOGGLE_SFT)
+
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -551,9 +670,14 @@ struct nqe_cn {
/* 64-bit doorbell */
#define DBR_INDEX_MASK 0x0000000000ffffffULL
+#define DBR_EPOCH_MASK 0x01000000UL
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_MASK 0x06000000UL
+#define DBR_TOGGLE_SFT 25
#define DBR_XID_MASK 0x000fffff00000000ULL
#define DBR_XID_SFT 32
#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
#define DBR_TYPE_SQ (0x0ULL << 60)
#define DBR_TYPE_RQ (0x1ULL << 60)
#define DBR_TYPE_SRQ (0x2ULL << 60)
@@ -566,6 +690,7 @@ struct nqe_cn {
#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
#define DBR_TYPE_NQ (0xaULL << 60)
#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
#define DBR_TYPE_NULL (0xfULL << 60)
#define DB_PF_OFFSET_P5 0x10000
@@ -661,10 +786,12 @@ struct nqe_cn {
*/
#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
-#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_RING(bp, x) (((x) & (bp)->rx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bp, x) (((x) & (bp)->rx_agg_ring_mask) >> \
+ (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
-#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_RING(bp, x) (((x) & (bp)->tx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
@@ -691,11 +818,14 @@ struct nqe_cn {
#define RX_CMP_TYPE(rxcmp) \
(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
-#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
+#define RING_RX(bp, idx) ((idx) & (bp)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
-#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
+#define RING_RX_AGG(bp, idx) ((idx) & (bp)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
-#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
+#define RING_TX(bp, idx) ((idx) & (bp)->tx_ring_mask)
+#define NEXT_TX(idx) ((idx) + 1)
#define ADV_RAW_CMP(idx, n) ((idx) + (n))
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
@@ -708,6 +838,7 @@ struct nqe_cn {
#define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4
#define BNXT_REDIRECT_EVENT 8
+#define BNXT_TX_CMP_EVENT 0x10
struct bnxt_sw_tx_bd {
union {
@@ -736,13 +867,6 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
-struct bnxt_mem_init {
- u8 init_val;
- u16 offset;
-#define BNXT_MEM_INVALID_OFFSET 0xffff
- u16 size;
-};
-
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
@@ -752,7 +876,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
- struct bnxt_mem_init *mem_init;
+ struct bnxt_ctx_mem_type *ctx_mem;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -794,13 +918,27 @@ struct bnxt_db_info {
u64 db_key64;
u32 db_key32;
};
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
};
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+
+#define DB_TOGGLE(tgl) ((tgl) << DBR_TOGGLE_SFT)
+
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
+ struct bnxt_cp_ring_info *tx_cpr;
u16 tx_prod;
u16 tx_cons;
+ u16 tx_hw_cons;
u16 txq_index;
+ u8 tx_napi_idx;
u8 kick_pending;
struct bnxt_db_info tx_db;
@@ -895,6 +1033,8 @@ struct bnxt_tpa_info {
u16 cfa_code; /* cfa_code in TPA start compl */
u8 agg_count;
+ u8 vlan_valid:1;
+ u8 cfa_code_valid:1;
struct rx_agg_cmp *agg_arr;
};
@@ -907,6 +1047,7 @@ struct bnxt_tpa_idx_map {
struct bnxt_rx_ring_info {
struct bnxt_napi *bnapi;
+ struct bnxt_cp_ring_info *rx_cpr;
u16 rx_prod;
u16 rx_agg_prod;
u16 rx_sw_agg_prod;
@@ -986,6 +1127,11 @@ struct bnxt_cp_ring_info {
u8 had_work_done:1;
u8 has_more_work:1;
+ u8 had_nqe_notify:1;
+ u8 toggle;
+
+ u8 cp_ring_type;
+ u8 cp_idx;
u32 last_cp_raw_cons;
@@ -1010,11 +1156,18 @@ struct bnxt_cp_ring_info {
struct bnxt_ring_struct cp_ring_struct;
- struct bnxt_cp_ring_info *cp_ring_arr[2];
-#define BNXT_RX_HDL 0
-#define BNXT_TX_HDL 1
+ int cp_ring_count;
+ struct bnxt_cp_ring_info *cp_ring_arr;
};
+#define BNXT_MAX_QUEUE 8
+#define BNXT_MAX_TXR_PER_NAPI BNXT_MAX_QUEUE
+
+#define bnxt_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNXT_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
struct bnxt_napi {
struct napi_struct napi;
struct bnxt *bp;
@@ -1022,11 +1175,10 @@ struct bnxt_napi {
int index;
struct bnxt_cp_ring_info cp_ring;
struct bnxt_rx_ring_info *rx_ring;
- struct bnxt_tx_ring_info *tx_ring;
+ struct bnxt_tx_ring_info *tx_ring[BNXT_MAX_TXR_PER_NAPI];
void (*tx_int)(struct bnxt *, struct bnxt_napi *,
int budget);
- int tx_pkts;
u8 events;
u8 tx_fault:1;
@@ -1067,7 +1219,7 @@ struct bnxt_vnic_info {
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4
- __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+ struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
u16 uc_filter_count;
u8 *uc_list;
@@ -1180,19 +1332,71 @@ struct bnxt_pf_info {
struct bnxt_vf_info *vf;
};
-struct bnxt_ntuple_filter {
+struct bnxt_filter_base {
struct hlist_node hash;
- u8 dst_mac_addr[ETH_ALEN];
- u8 src_mac_addr[ETH_ALEN];
- struct flow_keys fkeys;
__le64 filter_id;
+ u8 type;
+#define BNXT_FLTR_TYPE_NTUPLE 1
+#define BNXT_FLTR_TYPE_L2 2
+ u8 flags;
+#define BNXT_ACT_DROP 1
+#define BNXT_ACT_RING_DST 2
+#define BNXT_ACT_FUNC_DST 4
+#define BNXT_ACT_NO_AGING 8
u16 sw_id;
- u8 l2_fltr_idx;
u16 rxq;
- u32 flow_id;
+ u16 fw_vnic_id;
+ u16 vf_idx;
unsigned long state;
#define BNXT_FLTR_VALID 0
-#define BNXT_FLTR_UPDATE 1
+#define BNXT_FLTR_INSERTED 1
+#define BNXT_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnxt_ntuple_filter {
+ struct bnxt_filter_base base;
+ struct flow_keys fkeys;
+ struct bnxt_l2_filter *l2_fltr;
+ u32 ntuple_flags;
+#define BNXT_NTUPLE_MATCH_SRC_IP 1
+#define BNXT_NTUPLE_MATCH_DST_IP 2
+#define BNXT_NTUPLE_MATCH_SRC_PORT 4
+#define BNXT_NTUPLE_MATCH_DST_PORT 8
+#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
+ BNXT_NTUPLE_MATCH_DST_IP | \
+ BNXT_NTUPLE_MATCH_SRC_PORT | \
+ BNXT_NTUPLE_MATCH_DST_PORT)
+ u32 flow_id;
+};
+
+struct bnxt_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+struct bnxt_ipv4_tuple {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ports ports;
+};
+
+struct bnxt_ipv6_tuple {
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_ports ports;
+};
+
+#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
+
+struct bnxt_l2_filter {
+ struct bnxt_filter_base base;
+ struct bnxt_l2_key l2_key;
+ atomic_t refcnt;
};
struct bnxt_link_info {
@@ -1214,6 +1418,7 @@ struct bnxt_link_info {
#define BNXT_LINK_STATE_DOWN 1
#define BNXT_LINK_STATE_UP 2
#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
+ u8 active_lanes;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1248,8 +1453,11 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
+#define BNXT_LINK_SPEED_400GB PORT_PHY_QCFG_RESP_LINK_SPEED_400GB
u16 support_speeds;
u16 support_pam4_speeds;
+ u16 support_speeds2;
+
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
@@ -1265,12 +1473,52 @@ struct bnxt_link_info {
#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G
#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G
#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G
+ u16 auto_link_speeds2;
+#define BNXT_LINK_SPEEDS2_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB
+#define BNXT_LINK_SPEEDS2_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB
+#define BNXT_LINK_SPEEDS2_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB
+#define BNXT_LINK_SPEEDS2_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB
+#define BNXT_LINK_SPEEDS2_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB
+#define BNXT_LINK_SPEEDS2_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB
+#define BNXT_LINK_SPEEDS2_MSK_50GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56
+#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112
+#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112
+#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112 \
+ PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112
+
u16 support_auto_speeds;
u16 support_pam4_auto_speeds;
+ u16 support_auto_speeds2;
+
u16 lp_auto_link_speeds;
u16 lp_auto_pam4_link_speeds;
u16 force_link_speed;
u16 force_pam4_link_speed;
+ u16 force_link_speed2;
+#define BNXT_LINK_SPEED_50GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56
+#define BNXT_LINK_SPEED_100GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56
+#define BNXT_LINK_SPEED_200GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56
+#define BNXT_LINK_SPEED_400GB_PAM4 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56
+#define BNXT_LINK_SPEED_100GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112
+#define BNXT_LINK_SPEED_200GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112
+#define BNXT_LINK_SPEED_400GB_PAM4_112 \
+ PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+
u32 preemphasis;
u8 module_status;
u8 active_fec_sig_mode;
@@ -1301,6 +1549,7 @@ struct bnxt_link_info {
u8 req_signal_mode;
#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+#define BNXT_SIG_MODE_PAM4_112 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
#define BNXT_SIG_MODE_MAX (PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST + 1)
u8 req_duplex;
u8 req_flow_ctrl;
@@ -1361,8 +1610,6 @@ struct bnxt_link_info {
(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
BNXT_FEC_RS_OFF(link_info))
-#define BNXT_MAX_QUEUE 8
-
struct bnxt_queue_info {
u8 queue_id;
u8 queue_profile;
@@ -1391,7 +1638,7 @@ struct bnxt_test_info {
};
#define CHIMP_REG_VIEW_ADDR \
- ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
+ ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ? 0x80000000 : 0xb1000000)
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
@@ -1515,53 +1762,73 @@ do { \
attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
} while (0)
+struct bnxt_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 split_entry_cnt;
+#define BNXT_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ u32 qp_fast_qpmd_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNXT_MAX_SPLIT_ENTRY];
+ };
+ struct bnxt_ctx_pg_info *pg_info;
+};
+
+#define BNXT_CTX_MRAV_AV_SPLIT_ENTRY 0
+
+#define BNXT_CTX_QP FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
+#define BNXT_CTX_SRQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
+#define BNXT_CTX_CQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
+#define BNXT_CTX_VNIC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
+#define BNXT_CTX_STAT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
+#define BNXT_CTX_STQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
+#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
+#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
+#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
+#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
+#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
+#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
+#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
+#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
+#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
+#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
+#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
+#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
+#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
+#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
+
+#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
+#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
+#define BNXT_CTX_INV ((u16)-1)
+
struct bnxt_ctx_mem_info {
- u32 qp_max_entries;
- u16 qp_min_qp1_entries;
- u16 qp_max_l2_entries;
- u16 qp_entry_size;
- u16 srq_max_l2_entries;
- u32 srq_max_entries;
- u16 srq_entry_size;
- u16 cq_max_l2_entries;
- u32 cq_max_entries;
- u16 cq_entry_size;
- u16 vnic_max_vnic_entries;
- u16 vnic_max_ring_table_entries;
- u16 vnic_entry_size;
- u32 stat_max_entries;
- u16 stat_entry_size;
- u16 tqm_entry_size;
- u32 tqm_min_entries_per_ring;
- u32 tqm_max_entries_per_ring;
- u32 mrav_max_entries;
- u16 mrav_entry_size;
- u16 tim_entry_size;
- u32 tim_max_entries;
- u16 mrav_num_entries_units;
- u8 tqm_entries_multiple;
u8 tqm_fp_rings_count;
u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01
-
- struct bnxt_ctx_pg_info qp_mem;
- struct bnxt_ctx_pg_info srq_mem;
- struct bnxt_ctx_pg_info cq_mem;
- struct bnxt_ctx_pg_info vnic_mem;
- struct bnxt_ctx_pg_info stat_mem;
- struct bnxt_ctx_pg_info mrav_mem;
- struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
-
-#define BNXT_CTX_MEM_INIT_QP 0
-#define BNXT_CTX_MEM_INIT_SRQ 1
-#define BNXT_CTX_MEM_INIT_CQ 2
-#define BNXT_CTX_MEM_INIT_VNIC 3
-#define BNXT_CTX_MEM_INIT_STAT 4
-#define BNXT_CTX_MEM_INIT_MRAV 5
-#define BNXT_CTX_MEM_INIT_MAX 6
- struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
+ struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_V2_MAX];
};
enum bnxt_health_severity {
@@ -1697,6 +1964,10 @@ enum board_idx {
BCM57508_NPAR,
BCM57504_NPAR,
BCM57502_NPAR,
+ BCM57608,
+ BCM57604,
+ BCM57602,
+ BCM57601,
BCM58802,
BCM58804,
BCM58808,
@@ -1744,14 +2015,14 @@ struct bnxt {
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
+#define CHIP_NUM_57608 0x1760
+
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
u8 chip_rev;
-#define CHIP_NUM_58818 0xd818
-
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1801,7 +2072,7 @@ struct bnxt {
atomic_t intr_sem;
u32 flags;
- #define BNXT_FLAG_CHIP_P5 0x1
+ #define BNXT_FLAG_CHIP_P5_PLUS 0x1
#define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET
@@ -1820,8 +2091,6 @@ struct bnxt {
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
- #define BNXT_FLAG_UDP_RSS_CAP 0x800
- #define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
@@ -1829,13 +2098,15 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
- #define BNXT_FLAG_CHIP_SR2 0x80000
+ #define BNXT_FLAG_CHIP_P7 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
+ #define BNXT_FLAG_UDP_GSO_CAP 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
+ #define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1855,21 +2126,21 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
+ (!((bp)->flags & BNXT_FLAG_CHIP_P5_PLUS) ||\
(bp)->max_tpa_v2) && !is_kdump_kernel())
#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
-#define BNXT_CHIP_SR2(bp) \
- ((bp)->chip_num == CHIP_NUM_58818)
+#define BNXT_CHIP_P7(bp) \
+ ((bp)->chip_num == CHIP_NUM_57608)
-#define BNXT_CHIP_P5_THOR(bp) \
+#define BNXT_CHIP_P5(bp) \
((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 5 */
-#define BNXT_CHIP_P5(bp) \
- (BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp))
+#define BNXT_CHIP_P5_PLUS(bp) \
+ (BNXT_CHIP_P5(bp) || BNXT_CHIP_P7(bp))
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
@@ -1880,7 +2151,7 @@ struct bnxt {
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
#define BNXT_CHIP_P4_PLUS(bp) \
- (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
@@ -1941,6 +2212,11 @@ struct bnxt {
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u32 rss_hash_delta;
+ u32 rss_cap;
+#define BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA BIT(0)
+#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
+#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
+#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
u16 max_mtu;
u8 max_tc;
@@ -1949,6 +2225,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 num_tc;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2006,7 +2283,6 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
- #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA BIT_ULL(19)
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
#define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
#define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
@@ -2023,6 +2299,8 @@ struct bnxt {
#define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33)
#define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34)
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
+ #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
+ #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
u32 fw_dbg_cap;
@@ -2067,8 +2345,10 @@ struct bnxt {
u16 vxlan_fw_dst_port_id;
u16 nge_fw_dst_port_id;
+ u16 vxlan_gpe_fw_dst_port_id;
__be16 vxlan_port;
__be16 nge_port;
+ __be16 vxlan_gpe_port;
u8 port_partition_type;
u8 port_count;
u16 br_mode;
@@ -2136,9 +2416,11 @@ struct bnxt {
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
+ int db_offset; /* db_offset within db_size */
int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096
+#define BNXT_MAX_FLTR (BNXT_NTP_FLTR_MAX_FLTR + BNXT_L2_FLTR_MAX_FLTR)
#define BNXT_NTP_FLTR_HASH_SIZE 512
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
@@ -2147,6 +2429,14 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+#define BNXT_L2_FLTR_MAX_FLTR 1024
+#define BNXT_L2_FLTR_HASH_SIZE 32
+#define BNXT_L2_FLTR_HASH_MASK (BNXT_L2_FLTR_HASH_SIZE - 1)
+ struct hlist_head l2_fltr_hash_tbl[BNXT_L2_FLTR_HASH_SIZE];
+
+ u32 hash_seed;
+ u64 toeplitz_prefix;
+
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
@@ -2169,6 +2459,7 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
+#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2212,15 +2503,15 @@ struct bnxt {
#define BNXT_NUM_TX_RING_STATS 8
#define BNXT_NUM_TPA_RING_STATS 4
#define BNXT_NUM_TPA_RING_STATS_P5 5
-#define BNXT_NUM_TPA_RING_STATS_P5_SR2 6
+#define BNXT_NUM_TPA_RING_STATS_P7 6
#define BNXT_RING_STATS_SIZE_P5 \
((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
BNXT_NUM_TPA_RING_STATS_P5) * 8)
-#define BNXT_RING_STATS_SIZE_P5_SR2 \
+#define BNXT_RING_STATS_SIZE_P7 \
((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
- BNXT_NUM_TPA_RING_STATS_P5_SR2) * 8)
+ BNXT_NUM_TPA_RING_STATS_P7) * 8)
#define BNXT_GET_RING_STATS64(sw, counter) \
(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
@@ -2303,10 +2594,11 @@ static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
static inline void bnxt_db_write_relaxed(struct bnxt *bp,
struct bnxt_db_info *db, u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_writeq_relaxed(bp, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
} else {
- u32 db_val = db->db_key32 | idx;
+ u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel_relaxed(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB)
@@ -2318,10 +2610,11 @@ static inline void bnxt_db_write_relaxed(struct bnxt *bp,
static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
u32 idx)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- bnxt_writeq(bp, db->db_key64 | idx, db->doorbell);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ bnxt_writeq(bp, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
} else {
- u32 db_val = db->db_key32 | idx;
+ u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB)
@@ -2351,12 +2644,21 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
+void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr);
+void bnxt_fill_ipv6_mask(__be32 mask[4]);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
void bnxt_free_ctx_mem(struct bnxt *bp);
+int bnxt_num_tx_to_cp(struct bnxt *bp, int tx);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
@@ -2366,7 +2668,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- int idx);
+ u16 curr);
void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
@@ -2393,6 +2695,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int bnxt_fw_init_one(struct bnxt *bp);
bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
+struct bnxt_ntuple_filter *bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
+ struct bnxt_ntuple_filter *fltr, u32 idx);
+u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
+ const struct sk_buff *skb);
+int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
+ u32 idx);
+void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 63e067038..0dbb880a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
}
}
if (bp->ieee_ets) {
- int tc = netdev_get_num_tc(bp->dev);
+ int tc = bp->num_tc;
if (!tc)
tc = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 89809f1b1..ae4529c04 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -462,8 +462,6 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
}
bnxt_cancel_reservations(bp, false);
bnxt_free_ctx_mem(bp);
- kfree(bp->ctx);
- bp->ctx = NULL;
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
@@ -734,7 +732,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
}
/* earlier devices present as an array of raw bytes */
- if (!BNXT_CHIP_P5(bp)) {
+ if (!BNXT_CHIP_P5_PLUS(bp)) {
dim = 0;
i = 0;
bits *= 3; /* array of 3 version components */
@@ -754,7 +752,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
goto exit;
bnxt_copy_from_nvm_data(&ver, data, bits, bytes);
- if (BNXT_CHIP_P5(bp)) {
+ if (BNXT_CHIP_P5_PLUS(bp)) {
*nvm_cfg_ver <<= 8;
*nvm_cfg_ver |= ver.vu8;
} else {
@@ -774,7 +772,7 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
if (!strlen(buf))
return 0;
- if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)))
return 0;
@@ -1000,7 +998,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- if (BNXT_CHIP_P5(bp)) {
+ if (BNXT_CHIP_P5_PLUS(bp)) {
rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 5f67a7f94..dc4ca706b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -460,6 +460,7 @@ static const struct {
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
+ BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
};
static const struct {
@@ -510,9 +511,9 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
if (BNXT_SUPPORTS_TPA(bp)) {
if (bp->max_tpa_v2) {
- if (BNXT_CHIP_P5_THOR(bp))
+ if (BNXT_CHIP_P5(bp))
return BNXT_NUM_TPA_RING_STATS_P5;
- return BNXT_NUM_TPA_RING_STATS_P5_SR2;
+ return BNXT_NUM_TPA_RING_STATS_P7;
}
return BNXT_NUM_TPA_RING_STATS;
}
@@ -527,7 +528,8 @@ static int bnxt_get_num_ring_stats(struct bnxt *bp)
bnxt_get_num_tpa_ring_stats(bp);
tx = NUM_RING_TX_HW_STATS;
cmn = NUM_RING_CMN_SW_STATS;
- return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ return rx * bp->rx_nr_rings +
+ tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
cmn * bp->cp_nr_rings;
}
@@ -882,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
tx_grps = max(tcs, 1);
if (bp->tx_nr_rings_xdp)
tx_grps++;
@@ -922,6 +924,7 @@ static int bnxt_set_channels(struct net_device *dev,
bool sh = false;
int tx_xdp = 0;
int rc = 0;
+ int tx_cp;
if (channel->other_count)
return -EINVAL;
@@ -941,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -988,8 +991,9 @@ static int bnxt_set_channels(struct net_device *dev,
if (tcs > 1)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
- bp->tx_nr_rings + bp->rx_nr_rings;
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
+ tx_cp + bp->rx_nr_rings;
/* After changing number of rx channels, update NTUPLE feature. */
netdev_update_features(dev);
@@ -1007,29 +1011,60 @@ static int bnxt_set_channels(struct net_device *dev,
return rc;
}
-#ifdef CONFIG_RFS_ACCEL
-static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
+ int tbl_size, u32 *ids, u32 start,
+ u32 id_cnt)
{
- int i, j = 0;
+ int i, j = start;
- cmd->data = bp->ntp_fltr_count;
- for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+ if (j >= id_cnt)
+ return j;
+ for (i = 0; i < tbl_size; i++) {
struct hlist_head *head;
- struct bnxt_ntuple_filter *fltr;
+ struct bnxt_filter_base *fltr;
- head = &bp->ntp_fltr_hash_tbl[i];
- rcu_read_lock();
+ head = &tbl[i];
hlist_for_each_entry_rcu(fltr, head, hash) {
- if (j == cmd->rule_cnt)
- break;
- rule_locs[j++] = fltr->sw_id;
+ if (!fltr->flags ||
+ test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
+ continue;
+ ids[j++] = fltr->sw_id;
+ if (j == id_cnt)
+ return j;
+ }
+ }
+ return j;
+}
+
+static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
+ struct hlist_head tbl[],
+ int tbl_size, u32 id)
+{
+ int i;
+
+ for (i = 0; i < tbl_size; i++) {
+ struct hlist_head *head;
+ struct bnxt_filter_base *fltr;
+
+ head = &tbl[i];
+ hlist_for_each_entry_rcu(fltr, head, hash) {
+ if (fltr->flags && fltr->sw_id == id)
+ return fltr;
}
- rcu_read_unlock();
- if (j == cmd->rule_cnt)
- break;
}
- cmd->rule_cnt = j;
+ return NULL;
+}
+
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ cmd->data = bp->ntp_fltr_count;
+ rcu_read_lock();
+ cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ rule_locs, 0, cmd->rule_cnt);
+ rcu_read_unlock();
+
return 0;
}
@@ -1037,27 +1072,24 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fs =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
struct flow_keys *fkeys;
- int i, rc = -EINVAL;
+ int rc = -EINVAL;
if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
return rc;
- for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
- struct hlist_head *head;
-
- head = &bp->ntp_fltr_hash_tbl[i];
- rcu_read_lock();
- hlist_for_each_entry_rcu(fltr, head, hash) {
- if (fltr->sw_id == fs->location)
- goto fltr_found;
- }
+ rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ fs->location);
+ if (!fltr_base) {
rcu_read_unlock();
+ return rc;
}
- return rc;
+ fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
-fltr_found:
fkeys = &fltr->fkeys;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
if (fkeys->basic.ip_proto == IPPROTO_TCP)
@@ -1067,20 +1099,23 @@ fltr_found:
else
goto fltr_err;
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
-
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
-
- fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
-
- fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ }
} else {
- int i;
-
if (fkeys->basic.ip_proto == IPPROTO_TCP)
fs->flow_type = TCP_V6_FLOW;
else if (fkeys->basic.ip_proto == IPPROTO_UDP)
@@ -1088,22 +1123,27 @@ fltr_found:
else
goto fltr_err;
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
- fkeys->addrs.v6addrs.src;
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
- fkeys->addrs.v6addrs.dst;
- for (i = 0; i < 4; i++) {
- fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
- fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+ fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
+ fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
+ }
+ if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
+ fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
- fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
-
- fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
- fs->ring_cookie = fltr->rxq;
+ fs->ring_cookie = fltr->base.rxq;
rc = 0;
fltr_err:
@@ -1111,7 +1151,221 @@ fltr_err:
return rc;
}
-#endif
+
+#define IPV4_ALL_MASK ((__force __be32)~0)
+#define L4_PORT_ALL_MASK ((__force __be16)~0)
+
+static bool ipv6_mask_is_full(__be32 mask[4])
+{
+ return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
+}
+
+static bool ipv6_mask_is_zero(__be32 mask[4])
+{
+ return !(mask[0] | mask[1] | mask[2] | mask[3]);
+}
+
+static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ struct bnxt_ntuple_filter *new_fltr, *fltr;
+ struct bnxt_l2_filter *l2_fltr;
+ u32 flow_type = fs->flow_type;
+ struct flow_keys *fkeys;
+ u32 idx;
+ int rc;
+
+ if (!bp->vnic_info)
+ return -EAGAIN;
+
+ if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ return -EOPNOTSUPP;
+
+ new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
+ if (!new_fltr)
+ return -ENOMEM;
+
+ l2_fltr = bp->vnic_info[0].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ new_fltr->l2_fltr = l2_fltr;
+ fkeys = &new_fltr->fkeys;
+
+ rc = -EOPNOTSUPP;
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
+
+ fkeys->basic.ip_proto = IPPROTO_TCP;
+ if (flow_type == UDP_V4_FLOW)
+ fkeys->basic.ip_proto = IPPROTO_UDP;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+
+ if (ip_mask->ip4src == IPV4_ALL_MASK) {
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
+ } else if (ip_mask->ip4src) {
+ goto ntuple_err;
+ }
+ if (ip_mask->ip4dst == IPV4_ALL_MASK) {
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
+ } else if (ip_mask->ip4dst) {
+ goto ntuple_err;
+ }
+
+ if (ip_mask->psrc == L4_PORT_ALL_MASK) {
+ fkeys->ports.src = ip_spec->psrc;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
+ } else if (ip_mask->psrc) {
+ goto ntuple_err;
+ }
+ if (ip_mask->pdst == L4_PORT_ALL_MASK) {
+ fkeys->ports.dst = ip_spec->pdst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
+ } else if (ip_mask->pdst) {
+ goto ntuple_err;
+ }
+ break;
+ }
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW: {
+ struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
+
+ fkeys->basic.ip_proto = IPPROTO_TCP;
+ if (flow_type == UDP_V6_FLOW)
+ fkeys->basic.ip_proto = IPPROTO_UDP;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+
+ if (ipv6_mask_is_full(ip_mask->ip6src)) {
+ fkeys->addrs.v6addrs.src =
+ *(struct in6_addr *)&ip_spec->ip6src;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
+ } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
+ goto ntuple_err;
+ }
+ if (ipv6_mask_is_full(ip_mask->ip6dst)) {
+ fkeys->addrs.v6addrs.dst =
+ *(struct in6_addr *)&ip_spec->ip6dst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
+ } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
+ goto ntuple_err;
+ }
+
+ if (ip_mask->psrc == L4_PORT_ALL_MASK) {
+ fkeys->ports.src = ip_spec->psrc;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
+ } else if (ip_mask->psrc) {
+ goto ntuple_err;
+ }
+ if (ip_mask->pdst == L4_PORT_ALL_MASK) {
+ fkeys->ports.dst = ip_spec->pdst;
+ new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
+ } else if (ip_mask->pdst) {
+ goto ntuple_err;
+ }
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ goto ntuple_err;
+ }
+ if (!new_fltr->ntuple_flags)
+ goto ntuple_err;
+
+ idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
+ rcu_read_lock();
+ fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
+ if (fltr) {
+ rcu_read_unlock();
+ rc = -EEXIST;
+ goto ntuple_err;
+ }
+ rcu_read_unlock();
+
+ new_fltr->base.rxq = ring;
+ new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
+ rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
+ if (!rc) {
+ rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
+ if (rc) {
+ bnxt_del_ntp_filter(bp, new_fltr);
+ return rc;
+ }
+ fs->location = new_fltr->base.sw_id;
+ return 0;
+ }
+
+ntuple_err:
+ atomic_dec(&l2_fltr->refcnt);
+ kfree(new_fltr);
+ return rc;
+}
+
+static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ u32 ring, flow_type;
+ int rc;
+ u8 vf;
+
+ if (!netif_running(bp->dev))
+ return -EAGAIN;
+ if (!(bp->flags & BNXT_FLAG_RFS))
+ return -EPERM;
+ if (fs->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ if (BNXT_VF(bp) && vf)
+ return -EINVAL;
+ if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
+ return -EINVAL;
+ if (!vf && ring >= bp->rx_nr_rings)
+ return -EINVAL;
+
+ flow_type = fs->flow_type;
+ if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+ if (flow_type == ETHER_FLOW)
+ rc = -EOPNOTSUPP;
+ else
+ rc = bnxt_add_ntuple_cls_rule(bp, fs);
+ return rc;
+}
+
+static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct bnxt_filter_base *fltr_base;
+ struct bnxt_ntuple_filter *fltr;
+
+ rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
+ BNXT_NTP_FLTR_HASH_SIZE,
+ fs->location);
+ if (!fltr_base) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
+ if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
+ bnxt_del_ntp_filter(bp, fltr);
+ return 0;
+}
static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
{
@@ -1194,7 +1448,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
} else if (cmd->flow_type == UDP_V4_FLOW) {
- if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
return -EINVAL;
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
if (tuple == 4)
@@ -1204,7 +1458,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
} else if (cmd->flow_type == UDP_V6_FLOW) {
- if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
+ if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
return -EINVAL;
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
@@ -1244,7 +1498,7 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg == rss_hash_cfg)
return 0;
- if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
bp->rss_hash_cfg = rss_hash_cfg;
if (netif_running(bp->dev)) {
@@ -1261,14 +1515,13 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int rc = 0;
switch (cmd->cmd) {
-#ifdef CONFIG_RFS_ACCEL
case ETHTOOL_GRXRINGS:
cmd->data = bp->rx_nr_rings;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+ cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1278,7 +1531,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRULE:
rc = bnxt_grxclsrule(bp, cmd);
break;
-#endif
case ETHTOOL_GRXFH:
rc = bnxt_grxfh(bp, cmd);
@@ -1302,6 +1554,14 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
rc = bnxt_srxfh(bp, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ rc = bnxt_srxclsrlins(bp, cmd);
+ break;
+
+ case ETHTOOL_SRXCLSRLDEL:
+ rc = bnxt_srxclsrldel(bp, cmd);
+ break;
+
default:
rc = -EOPNOTSUPP;
break;
@@ -1313,8 +1573,9 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ BNXT_RSS_TABLE_ENTRIES_P5;
return HW_HASH_INDEX_SIZE;
}
@@ -1323,49 +1584,49 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
return HW_HASH_KEY_SIZE;
}
-static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int bnxt_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
if (!bp->vnic_info)
return 0;
vnic = &bp->vnic_info[0];
- if (indir && bp->rss_indir_tbl) {
+ if (rxfh->indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- indir[i] = bp->rss_indir_tbl[i];
+ rxfh->indir[i] = bp->rss_indir_tbl[i];
}
- if (key && vnic->rss_hash_key)
- memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+ if (rxfh->key && vnic->rss_hash_key)
+ memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
return 0;
}
-static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int bnxt_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- if (hfunc && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (key)
+ if (rxfh->key)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- bp->rss_indir_tbl[i] = indir[i];
+ bp->rss_indir_tbl[i] = rxfh->indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
@@ -1568,6 +1829,22 @@ static const enum bnxt_media_type bnxt_phy_types[] = {
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
[PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
};
static enum bnxt_media_type
@@ -1595,6 +1872,7 @@ enum bnxt_link_speed_indices {
BNXT_LINK_SPEED_50GB_IDX,
BNXT_LINK_SPEED_100GB_IDX,
BNXT_LINK_SPEED_200GB_IDX,
+ BNXT_LINK_SPEED_400GB_IDX,
__BNXT_LINK_SPEED_END
};
@@ -1606,9 +1884,21 @@ static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
- case BNXT_LINK_SPEED_50GB: return BNXT_LINK_SPEED_50GB_IDX;
- case BNXT_LINK_SPEED_100GB: return BNXT_LINK_SPEED_100GB_IDX;
- case BNXT_LINK_SPEED_200GB: return BNXT_LINK_SPEED_200GB_IDX;
+ case BNXT_LINK_SPEED_50GB:
+ case BNXT_LINK_SPEED_50GB_PAM4:
+ return BNXT_LINK_SPEED_50GB_IDX;
+ case BNXT_LINK_SPEED_100GB:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
+ return BNXT_LINK_SPEED_100GB_IDX;
+ case BNXT_LINK_SPEED_200GB:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
+ return BNXT_LINK_SPEED_200GB_IDX;
+ case BNXT_LINK_SPEED_400GB:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ return BNXT_LINK_SPEED_400GB_IDX;
default: return BNXT_LINK_SPEED_UNKNOWN;
}
}
@@ -1681,6 +1971,12 @@ bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
},
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ },
},
[BNXT_LINK_SPEED_200GB_IDX] = {
[BNXT_SIG_MODE_PAM4] = {
@@ -1689,6 +1985,26 @@ bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
[BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
[BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
},
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_400GB_IDX] = {
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4_112] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ },
},
};
@@ -1753,7 +2069,8 @@ static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
lk_ksettings->link_modes.supported);
}
- if (link_info->support_auto_speeds || link_info->support_pam4_auto_speeds)
+ if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
+ link_info->support_pam4_auto_speeds)
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
lk_ksettings->link_modes.supported);
@@ -1789,22 +2106,60 @@ static const u16 bnxt_pam4_speed_masks[] = {
[BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
[BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
[BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_nrz_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
+ [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
+ [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
+ [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_pam4_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
+ [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
+};
+
+static const u16 bnxt_pam4_112_speeds2_masks[] = {
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
+ [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
};
static enum bnxt_link_speed_indices
-bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk)
+bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
{
const u16 *speeds;
int idx, len;
switch (sig_mode) {
case BNXT_SIG_MODE_NRZ:
- speeds = bnxt_nrz_speed_masks;
- len = ARRAY_SIZE(bnxt_nrz_speed_masks);
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ speeds = bnxt_nrz_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
+ } else {
+ speeds = bnxt_nrz_speed_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speed_masks);
+ }
break;
case BNXT_SIG_MODE_PAM4:
- speeds = bnxt_pam4_speed_masks;
- len = ARRAY_SIZE(bnxt_pam4_speed_masks);
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ speeds = bnxt_pam4_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
+ } else {
+ speeds = bnxt_pam4_speed_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speed_masks);
+ }
+ break;
+ case BNXT_SIG_MODE_PAM4_112:
+ speeds = bnxt_pam4_112_speeds2_masks;
+ len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
break;
default:
return BNXT_LINK_SPEED_UNKNOWN;
@@ -1822,14 +2177,14 @@ bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk)
static void
__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
- u8 sig_mode, unsigned long *et_mask)
+ u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
{
enum ethtool_link_mode_bit_indices link_mode;
enum bnxt_link_speed_indices speed;
u8 bit;
for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
- speed = bnxt_encoding_speed_idx(sig_mode, 1 << bit);
+ speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
if (!speed)
continue;
@@ -1843,16 +2198,83 @@ __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
static void
bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
- u8 sig_mode, unsigned long *et_mask)
+ u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
{
if (media) {
- __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask);
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
+ et_mask);
return;
}
/* list speeds for all media if unknown */
for (media = 1; media < __BNXT_MEDIA_END; media++)
- __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask);
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
+ et_mask);
+}
+
+static void
+bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
+ u16 phy_flags = bp->phy_flags;
+
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ sp_nrz = link_info->support_speeds2;
+ sp_pam4 = link_info->support_speeds2;
+ sp_pam4_112 = link_info->support_speeds2;
+ } else {
+ sp_nrz = link_info->support_speeds;
+ sp_pam4 = link_info->support_pam4_speeds;
+ }
+ bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
+ phy_flags, lk_ksettings->link_modes.supported);
+}
+
+static void
+bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
+ u16 phy_flags = bp->phy_flags;
+
+ sp_nrz = link_info->advertising;
+ if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ sp_pam4 = link_info->advertising;
+ sp_pam4_112 = link_info->advertising;
+ } else {
+ sp_pam4 = link_info->advertising_pam4;
+ }
+ bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
+ phy_flags, lk_ksettings->link_modes.advertising);
+}
+
+static void
+bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
+ enum bnxt_media_type media,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 phy_flags = bp->phy_flags;
+
+ bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
+ BNXT_SIG_MODE_NRZ, phy_flags,
+ lk_ksettings->link_modes.lp_advertising);
+ bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
+ BNXT_SIG_MODE_PAM4, phy_flags,
+ lk_ksettings->link_modes.lp_advertising);
}
static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
@@ -1881,22 +2303,42 @@ static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
const unsigned long *et_mask)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+ u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
enum bnxt_media_type media = bnxt_get_media(link_info);
+ u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
+ u32 delta_pam4_112 = 0;
u32 delta_pam4 = 0;
u32 delta_nrz = 0;
int i, m;
+ adv = &link_info->advertising;
+ if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
+ adv_pam4 = &link_info->advertising;
+ adv_pam4_112 = &link_info->advertising;
+ sp_msks = bnxt_nrz_speeds2_masks;
+ sp_pam4_msks = bnxt_pam4_speeds2_masks;
+ sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
+ } else {
+ adv_pam4 = &link_info->advertising_pam4;
+ sp_msks = bnxt_nrz_speed_masks;
+ sp_pam4_msks = bnxt_pam4_speed_masks;
+ }
for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
/* accept any legal media from user */
for (m = 1; m < __BNXT_MEDIA_END; m++) {
bnxt_update_speed(&delta_nrz, m == media,
- &link_info->advertising,
- bnxt_nrz_speed_masks[i], et_mask,
+ adv, sp_msks[i], et_mask,
bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
bnxt_update_speed(&delta_pam4, m == media,
- &link_info->advertising_pam4,
- bnxt_pam4_speed_masks[i], et_mask,
+ adv_pam4, sp_pam4_msks[i], et_mask,
bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
+ if (!adv_pam4_112)
+ continue;
+
+ bnxt_update_speed(&delta_pam4_112, m == media,
+ adv_pam4_112, sp_pam4_112_msks[i], et_mask,
+ bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
}
}
}
@@ -1961,11 +2403,20 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
case BNXT_LINK_SPEED_40GB:
return SPEED_40000;
case BNXT_LINK_SPEED_50GB:
+ case BNXT_LINK_SPEED_50GB_PAM4:
return SPEED_50000;
case BNXT_LINK_SPEED_100GB:
+ case BNXT_LINK_SPEED_100GB_PAM4:
+ case BNXT_LINK_SPEED_100GB_PAM4_112:
return SPEED_100000;
case BNXT_LINK_SPEED_200GB:
+ case BNXT_LINK_SPEED_200GB_PAM4:
+ case BNXT_LINK_SPEED_200GB_PAM4_112:
return SPEED_200000;
+ case BNXT_LINK_SPEED_400GB:
+ case BNXT_LINK_SPEED_400GB_PAM4:
+ case BNXT_LINK_SPEED_400GB_PAM4_112:
+ return SPEED_400000;
default:
return SPEED_UNKNOWN;
}
@@ -1981,6 +2432,7 @@ static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
base->duplex = DUPLEX_HALF;
if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
base->duplex = DUPLEX_FULL;
+ lk_ksettings->lanes = link_info->active_lanes;
} else if (!link_info->autoneg) {
base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
base->duplex = DUPLEX_HALF;
@@ -2008,12 +2460,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
mutex_lock(&bp->link_lock);
bnxt_get_ethtool_modes(link_info, lk_ksettings);
media = bnxt_get_media(link_info);
- bnxt_get_ethtool_speeds(link_info->support_speeds,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.supported);
- bnxt_get_ethtool_speeds(link_info->support_pam4_speeds,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.supported);
+ bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
link_mode = bnxt_get_link_mode(link_info);
if (link_mode != BNXT_LINK_MODE_UNKNOWN)
@@ -2026,20 +2473,10 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
lk_ksettings->link_modes.advertising);
base->autoneg = AUTONEG_ENABLE;
- bnxt_get_ethtool_speeds(link_info->advertising,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.advertising);
- bnxt_get_ethtool_speeds(link_info->advertising_pam4,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.advertising);
- if (link_info->phy_link_status == BNXT_LINK_LINK) {
- bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds,
- media, BNXT_SIG_MODE_NRZ,
- lk_ksettings->link_modes.lp_advertising);
- bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds,
- media, BNXT_SIG_MODE_PAM4,
- lk_ksettings->link_modes.lp_advertising);
- }
+ bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ bnxt_get_all_ethtool_lp_speeds(link_info, media,
+ lk_ksettings);
} else {
base->autoneg = AUTONEG_DISABLE;
}
@@ -2074,6 +2511,7 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
u16 support_pam4_spds = link_info->support_pam4_speeds;
+ u16 support_spds2 = link_info->support_speeds2;
u16 support_spds = link_info->support_speeds;
u8 sig_mode = BNXT_SIG_MODE_NRZ;
u32 lanes_needed = 1;
@@ -2085,7 +2523,8 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
break;
case SPEED_1000:
- if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
break;
case SPEED_2500:
@@ -2093,7 +2532,8 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
break;
case SPEED_10000:
- if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
break;
case SPEED_20000:
@@ -2103,26 +2543,34 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
}
break;
case SPEED_25000:
- if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+ if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
break;
case SPEED_40000:
- if (support_spds & BNXT_LINK_SPEED_MSK_40GB) {
+ if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
lanes_needed = 4;
}
break;
case SPEED_50000:
- if ((support_spds & BNXT_LINK_SPEED_MSK_50GB) && lanes != 1) {
+ if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
+ lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
lanes_needed = 2;
} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
sig_mode = BNXT_SIG_MODE_PAM4;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
+ fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
}
break;
case SPEED_100000:
- if ((support_spds & BNXT_LINK_SPEED_MSK_100GB) &&
+ if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
+ (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
lanes != 2 && lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
lanes_needed = 4;
@@ -2130,6 +2578,14 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
sig_mode = BNXT_SIG_MODE_PAM4;
lanes_needed = 2;
+ } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
+ lanes != 1) {
+ fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 2;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
}
break;
case SPEED_200000:
@@ -2137,6 +2593,27 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
sig_mode = BNXT_SIG_MODE_PAM4;
lanes_needed = 4;
+ } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
+ lanes != 2) {
+ fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 4;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
+ lanes_needed = 2;
+ }
+ break;
+ case SPEED_400000:
+ if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
+ lanes != 4) {
+ fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 8;
+ } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
+ fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
+ sig_mode = BNXT_SIG_MODE_PAM4_112;
+ lanes_needed = 4;
}
break;
}
@@ -3910,7 +4387,8 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
* reading any further.
*/
dma_rmb();
- if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
+ TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
raw_cons = NEXT_RAW_CMP(raw_cons);
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -3934,8 +4412,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
int rc;
cpr = &rxr->bnapi->cp_ring;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
- cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ cpr = rxr->rx_cpr;
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index d5fad5a3c..e957abd70 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -40,6 +40,8 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
@@ -196,6 +198,9 @@ struct cmd_nums {
#define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
#define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
#define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -214,6 +219,7 @@ struct cmd_nums {
#define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
#define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
#define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
#define HWRM_STAT_CTX_ENG_QUERY 0xafUL
#define HWRM_STAT_CTX_ALLOC 0xb0UL
#define HWRM_STAT_CTX_FREE 0xb1UL
@@ -261,6 +267,7 @@ struct cmd_nums {
#define HWRM_PORT_EP_TX_CFG 0xdbUL
#define HWRM_PORT_CFG 0xdcUL
#define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
@@ -392,6 +399,10 @@ struct cmd_nums {
#define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
#define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
#define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -406,9 +417,9 @@ struct cmd_nums {
#define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
#define HWRM_MFG_SOC_IMAGE 0x20cUL
#define HWRM_MFG_SOC_QSTATUS 0x20dUL
- #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
- #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
#define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
#define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
#define HWRM_MFG_PRVSN_GET_STATE 0x213UL
@@ -418,6 +429,16 @@ struct cmd_nums {
#define HWRM_MFG_SELFTEST_EXEC 0x217UL
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -582,9 +603,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 171
-#define HWRM_VERSION_STR "1.10.2.171"
+#define HWRM_VERSION_UPDATE 3
+#define HWRM_VERSION_RSVD 15
+#define HWRM_VERSION_STR "1.10.3.15"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -816,7 +837,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1632,7 +1654,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:896b/112B) */
+/* hwrm_func_qcaps_output (size:1088b/136B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1736,21 +1758,29 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
__le16 max_key_ctxs_alloc;
__le32 flags_ext2;
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1760,15 +1790,21 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
- u8 key_xid_partition_cap;
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_TKC 0x1UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_RKC 0x2UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_TKC 0x4UL
- #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_RKC 0x8UL
- u8 unused_1;
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
- u8 unused_2[5];
+ u8 unused_2[2];
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ u8 unused_3[3];
u8 valid;
};
@@ -1783,7 +1819,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:1024b/128B) */
+/* hwrm_func_qcfg_output (size:1280b/160B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1892,7 +1928,7 @@ struct hwrm_func_qcfg_output {
__le16 alloc_msix;
__le16 registered_vfs;
__le16 l2_doorbell_bar_size_kb;
- u8 unused_1;
+ u8 active_endpoints;
u8 always_1;
__le32 reset_addr_poll;
__le16 legacy_l2_db_size_kb;
@@ -1952,15 +1988,26 @@ struct hwrm_func_qcfg_output {
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
u8 unused_5[2];
- __le32 alloc_tx_key_ctxs;
- __le32 alloc_rx_key_ctxs;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
u8 lag_id;
u8 parif;
- u8 unused_6[5];
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ u8 unused_7;
u8 valid;
};
-/* hwrm_func_cfg_input (size:1088b/136B) */
+/* hwrm_func_cfg_input (size:1280b/160B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1996,7 +2043,6 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
- #define FUNC_CFG_REQ_FLAGS_KEY_CTX_ASSETS_TEST 0x80000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -2028,8 +2074,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
#define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
#define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
- #define FUNC_CFG_REQ_ENABLES_TX_KEY_CTXS 0x40000000UL
- #define FUNC_CFG_REQ_ENABLES_RX_KEY_CTXS 0x80000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
__le16 admin_mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -2139,10 +2185,21 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__be16 tpid;
__le16 host_mtu;
- u8 unused_0[4];
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
__le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
- #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
u8 port_kdnet_mode;
#define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
@@ -2165,7 +2222,18 @@ struct hwrm_func_cfg_input {
__le32 num_ktls_rx_key_ctxs;
__le32 num_quic_tx_key_ctxs;
__le32 num_quic_rx_key_ctxs;
- __le32 unused_2;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL
+ __le16 unused_2;
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2604,7 +2672,7 @@ struct hwrm_func_vf_resource_cfg_input {
__le32 max_quic_rx_key_ctxs;
};
-/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
struct hwrm_func_vf_resource_cfg_output {
__le16 error_code;
__le16 req_type;
@@ -2618,8 +2686,10 @@ struct hwrm_func_vf_resource_cfg_output {
__le16 reserved_vnics;
__le16 reserved_stat_ctx;
__le16 reserved_hw_ring_grps;
- __le32 reserved_tx_key_ctxs;
- __le32 reserved_rx_key_ctxs;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
u8 unused_0[7];
u8 valid;
};
@@ -3441,7 +3511,8 @@ struct hwrm_func_ptp_cfg_input {
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
#define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
- #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
u8 unused_0[3];
__le32 ptp_freq_adj_ext_period;
__le32 ptp_freq_adj_ext_up;
@@ -3627,28 +3698,28 @@ struct hwrm_func_backing_store_qcfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
u8 rsvd[4];
};
@@ -3744,6 +3815,15 @@ struct mrav_split_entries {
__le32 rsvd2[2];
};
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 rsvd;
+ __le32 rsvd2[2];
+};
+
/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
struct hwrm_func_backing_store_qcaps_v2_input {
__le16 req_type;
@@ -3761,8 +3841,8 @@ struct hwrm_func_backing_store_qcaps_v2_input {
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
@@ -3793,8 +3873,8 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
@@ -3838,56 +3918,55 @@ struct hwrm_func_backing_store_qcaps_v2_output {
/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
struct hwrm_func_dbr_pacing_qcfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
};
/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
struct hwrm_func_dbr_pacing_qcfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
-#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
- u8 unused_0[7];
- __le32 dbr_stat_db_fifo_reg;
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST \
- FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
- __le32 dbr_stat_db_fifo_reg_watermark_mask;
- u8 dbr_stat_db_fifo_reg_watermark_shift;
- u8 unused_1[3];
- __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
- u8 dbr_stat_db_fifo_reg_fifo_room_shift;
- u8 unused_2[3];
- __le32 dbr_throttling_aeq_arm_reg;
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST \
- FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
-#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
- u8 dbr_throttling_aeq_arm_reg_val;
- u8 unused_3[7];
- __le32 primary_nq_id;
- __le32 pacing_threshold;
- u8 unused_4[7];
- u8 valid;
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
};
/* hwrm_func_drv_if_change_input (size:192b/24B) */
@@ -3915,7 +3994,7 @@ struct hwrm_func_drv_if_change_output {
u8 valid;
};
-/* hwrm_port_phy_cfg_input (size:448b/56B) */
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3960,6 +4039,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
@@ -3990,7 +4071,9 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
- u8 unused_0;
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
__le16 auto_link_speed;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
@@ -4054,7 +4137,36 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
- u8 unused_2[2];
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ u8 unused_2[6];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -4104,7 +4216,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
#define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
#define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
@@ -4127,6 +4240,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -4270,7 +4384,23 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -4366,6 +4496,7 @@ struct hwrm_port_phy_qcfg_output {
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le16 support_pam4_speeds;
@@ -4387,7 +4518,53 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
u8 link_down_reason;
#define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
- u8 unused_0[7];
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
u8 valid;
};
@@ -4426,6 +4603,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
#define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -4459,7 +4637,12 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
__le32 ptp_freq_adj_ppb;
- u8 unused_1[4];
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
__le64 ptp_adj_phase;
};
@@ -4504,6 +4687,7 @@ struct hwrm_port_mac_ptp_qcfg_output {
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -4968,7 +5152,7 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:256b/32B) */
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -5051,7 +5235,40 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
#define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
u8 valid;
};
@@ -5472,6 +5689,30 @@ struct hwrm_port_led_qcaps_output {
u8 valid;
};
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
__le16 req_type;
@@ -7488,7 +7729,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
__le16 dst_id;
- __le16 mirror_vnic_id;
+ __le16 rfs_ring_tbl_idx;
u8 tunnel_type;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
@@ -8201,6 +8442,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
u8 unused_0[3];
u8 valid;
};
@@ -8223,7 +8465,8 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
u8 unused_0[6];
};
@@ -8245,7 +8488,10 @@ struct hwrm_tunnel_dst_port_query_output {
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
#define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
- u8 unused_0[2];
+ u8 status;
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
+ u8 unused_0;
u8 valid;
};
@@ -8267,7 +8513,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
__be16 tunnel_dst_port_val;
u8 unused_0[4];
@@ -8284,7 +8531,8 @@ struct hwrm_tunnel_dst_port_alloc_output {
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
#define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
- #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
u8 upar_in_use;
#define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
#define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
@@ -8316,7 +8564,8 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE
u8 tunnel_next_proto;
__le16 tunnel_dst_port_id;
u8 unused_0[4];
@@ -9717,7 +9966,7 @@ struct hwrm_nvm_get_dev_info_input {
__le64 resp_addr;
};
-/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
+/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -9747,6 +9996,10 @@ struct hwrm_nvm_get_dev_info_output {
__le16 roce_fw_minor;
__le16 roce_fw_build;
__le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
u8 unused_0[7];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 6e3da3362..cc0766033 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -129,7 +129,7 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
}
resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
if (!rc)
*ts = le64_to_cpu(resp->ptp_msg_ts);
hwrm_req_drop(bp, req);
@@ -319,15 +319,17 @@ static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
return hwrm_req_send(bp, req);
}
-void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct hwrm_port_mac_cfg_input *req;
+ int rc;
if (!ptp || !ptp->tstamp_filters)
- return;
+ return -EIO;
- if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG))
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
goto out;
if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
@@ -342,15 +344,17 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
- if (!hwrm_req_send(bp, req)) {
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters &
PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE);
- return;
+ return 0;
}
ptp->tstamp_filters = 0;
out:
bp->ptp_all_rx_tstamp = 0;
netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n");
+ return rc;
}
void bnxt_ptp_reapply_pps(struct bnxt *bp)
@@ -494,7 +498,6 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 flags = 0;
- int rc = 0;
switch (ptp->rx_filter) {
case HWTSTAMP_FILTER_ALL:
@@ -519,18 +522,7 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
ptp->tstamp_filters = flags;
- if (netif_running(bp->dev)) {
- if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
- bnxt_close_nic(bp, false, false);
- rc = bnxt_open_nic(bp, false, false);
- } else {
- bnxt_ptp_cfg_tstamp_filters(bp);
- }
- if (!rc && !ptp->tstamp_filters)
- rc = -EIO;
- }
-
- return rc;
+ return bnxt_ptp_cfg_tstamp_filters(bp);
}
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -649,7 +641,7 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
int rc, i;
reg_arr = ptp->refclk_regs;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN);
if (rc)
return rc;
@@ -692,8 +684,8 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(ptp->tx_skb, &timestamp);
} else {
- netdev_err(bp->dev, "TS query for TX timer failed rc = %x\n",
- rc);
+ netdev_warn_once(bp->dev,
+ "TS query for TX timer failed rc = %x\n", rc);
}
dev_kfree_skb_any(ptp->tx_skb);
@@ -966,7 +958,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
rc = err;
goto out;
}
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (BNXT_CHIP_P5(bp)) {
spin_lock_bh(&ptp->ptp_lock);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 34162e07a..fce8dc39a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -137,7 +137,7 @@ do { \
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
-void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
+int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c722b3b41..175192eba 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -536,7 +536,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
if (rc)
return rc;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
vf_ring_grps = 0;
} else {
@@ -565,7 +565,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->min_l2_ctxs = cpu_to_le16(min);
req->min_vnics = cpu_to_le16(min);
req->min_stat_ctx = cpu_to_le16(min);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
req->min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
@@ -602,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req->max_rsscos_ctx = cpu_to_le16(vf_rss);
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
req->max_msix = cpu_to_le16(vf_msix / num_vfs);
hwrm_req_hold(bp, req);
@@ -630,7 +630,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
le16_to_cpu(req->min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 6ba2b9398..195c02dc0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -42,13 +42,10 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
- if (bp->flags & BNXT_FLAG_CHIP_P5) {
- ent[i].db_offset = DB_PF_OFFSET_P5;
- if (BNXT_VF(bp))
- ent[i].db_offset = DB_VF_OFFSET_P5;
- } else {
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ ent[i].db_offset = bp->db_offset;
+ else
ent[i].db_offset = (idx + i) * 0x80;
- }
}
}
@@ -213,6 +210,9 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
+ if (edev->ulp_tbl->msix_requested)
+ bnxt_fill_msix_vecs(bp, edev->msix_entries);
+
if (aux_priv) {
struct auxiliary_device *adev;
@@ -333,6 +333,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->pdev = bp->pdev;
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
+ edev->l2_db_offset = bp->db_offset;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
@@ -394,12 +395,13 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
+ aux_priv->edev = edev;
+
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
- aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 6ff77f082..b9e73de14 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -73,6 +73,10 @@ struct bnxt_en_dev {
* bytes mapped as non-
* cacheable.
*/
+ int l2_db_offset; /* Doorbell offset in
+ * bytes within
+ * l2_db_size_nc.
+ */
u16 chip_num;
u16 hw_ring_stats_size;
u16 pf_port_id;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 8cb9a9915..4079538bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -42,17 +42,17 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
/* fill up the first buffer */
prod = txr->tx_prod;
- tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->nr_frags = num_frags;
if (xdp)
tx_buf->page = virt_to_head_page(xdp->data);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) |
((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
- txbd->tx_bd_opaque = prod;
+ txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
/* now let us fill up the frags into the next buffers */
@@ -66,10 +66,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
WRITE_ONCE(txr->tx_prod, prod);
/* first fill up the first buffer */
- frag_tx_buf = &txr->tx_buf_ring[prod];
+ frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
frag_tx_buf->page = skb_frag_page(frag);
- txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+ txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag);
flags = frag_len << TX_BD_LEN_SHIFT;
@@ -120,20 +120,20 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
- struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+ struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ u16 tx_hw_cons = txr->tx_hw_cons;
bool rx_doorbell_needed = false;
- int nr_pkts = bnapi->tx_pkts;
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i, j, frags;
+ int j, frags;
if (!budget)
return;
- for (i = 0; i < nr_pkts; i++) {
- tx_buf = &txr->tx_buf_ring[tx_cons];
+ while (RING_TX(bp, tx_cons) != tx_hw_cons) {
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
if (tx_buf->action == XDP_REDIRECT) {
struct pci_dev *pdev = bp->pdev;
@@ -153,20 +153,20 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
frags = tx_buf->nr_frags;
for (j = 0; j < frags; j++) {
tx_cons = NEXT_TX(tx_cons);
- tx_buf = &txr->tx_buf_ring[tx_cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
}
} else {
- bnxt_sched_reset_txr(bp, txr, i);
+ bnxt_sched_reset_txr(bp, txr, tx_cons);
return;
}
tx_cons = NEXT_TX(tx_cons);
}
- bnapi->tx_pkts = 0;
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
WRITE_ONCE(txr->tx_cons, tx_cons);
if (rx_doorbell_needed) {
- tx_buf = &txr->tx_buf_ring[last_tx_cons];
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
}
@@ -242,7 +242,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
pdev = bp->pdev;
offset = bp->rx_offset;
- txr = rxr->bnapi->tx_ring;
+ txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data;
@@ -268,7 +268,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
case XDP_TX:
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
- *event = 0;
+ *event &= BNXT_TX_CMP_EVENT;
if (unlikely(xdp_buff_has_frags(&xdp))) {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
@@ -391,7 +391,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
- int tx_xdp = 0, rc, tc;
+ int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
if (prog && !prog->aux->xdp_has_frags &&
@@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog)
tx_xdp = bp->rx_nr_rings;
- tc = netdev_get_num_tc(dev);
+ tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
@@ -439,7 +439,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
- bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+ bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f52830dfb..04964bbe0 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12745,24 +12745,23 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
{
struct tg3 *tp = netdev_priv(dev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- indir[i] = tp->rss_ind_tbl[i];
+ rxfh->indir[i] = tp->rss_ind_tbl[i];
return 0;
}
-static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
@@ -12770,15 +12769,16 @@ static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
- tp->rss_ind_tbl[i] = indir[i];
+ tp->rss_ind_tbl[i] = rxfh->indir[i];
if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
return 0;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 31191b520..c32174484 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1091,10 +1091,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
*/
static void
-bnad_tx_cleanup(struct delayed_work *work)
+bnad_tx_cleanup(struct work_struct *work)
{
struct bnad_tx_info *tx_info =
- container_of(work, struct bnad_tx_info, tx_cleanup_work);
+ container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
struct bnad *bnad = NULL;
struct bna_tcb *tcb;
unsigned long flags;
@@ -1170,7 +1170,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
* Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
*/
static void
-bnad_rx_cleanup(void *work)
+bnad_rx_cleanup(struct work_struct *work)
{
struct bnad_rx_info *rx_info =
container_of(work, struct bnad_rx_info, rx_cleanup_work);
@@ -1991,8 +1991,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
}
tx_info->tx = tx;
- INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
- (work_func_t)bnad_tx_cleanup);
+ INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
/* Register ISR for the Tx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2248,8 +2247,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
rx_info->rx = rx;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- INIT_WORK(&rx_info->rx_cleanup_work,
- (work_func_t)(bnad_rx_cleanup));
+ INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
/*
* Init NAPI, so that state is set to NAPI_STATE_SCHED,
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index df10edff5..d1ad6c9f8 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -608,7 +608,7 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN));
- ethtool_sprintf(&string, bnad_net_stats_strings[i]);
+ ethtool_puts(&string, bnad_net_stats_strings[i]);
}
bmap = bna_tx_rid_mask(&bnad->bna);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 78c972bb1..aa5700ac9 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1165,9 +1165,10 @@ struct macb_ptp_info {
int (*get_ts_info)(struct net_device *dev,
struct ethtool_ts_info *info);
int (*get_hwtst)(struct net_device *netdev,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *tstamp_config);
int (*set_hwtst)(struct net_device *netdev,
- struct ifreq *ifr, int cmd);
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack);
};
struct macb_pm_data {
@@ -1314,7 +1315,7 @@ struct macb {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct tsu_incr tsu_incr;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
/* RX queue filer rule set*/
struct ethtool_rx_fs_list rx_fs_list;
@@ -1363,8 +1364,12 @@ static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, stru
gem_ptp_rxstamp(bp, skb, desc);
}
-int gem_get_hwtst(struct net_device *dev, struct ifreq *rq);
-int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+int gem_get_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config);
+int gem_set_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack);
#else
static inline void gem_ptp_init(struct net_device *ndev) { }
static inline void gem_ptp_remove(struct net_device *ndev) { }
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index cebae0f41..898debfd4 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3773,18 +3773,38 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (bp->ptp_info) {
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bp->ptp_info->set_hwtst(dev, rq, cmd);
- case SIOCGHWTSTAMP:
- return bp->ptp_info->get_hwtst(dev, rq);
- }
- }
-
return phylink_mii_ioctl(bp->phylink, rq, cmd);
}
+static int macb_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!bp->ptp_info)
+ return -EOPNOTSUPP;
+
+ return bp->ptp_info->get_hwtst(dev, cfg);
+}
+
+static int macb_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!bp->ptp_info)
+ return -EOPNOTSUPP;
+
+ return bp->ptp_info->set_hwtst(dev, cfg, extack);
+}
+
static inline void macb_set_txcsum_feature(struct macb *bp,
netdev_features_t features)
{
@@ -3884,6 +3904,8 @@ static const struct net_device_ops macb_netdev_ops = {
#endif
.ndo_set_features = macb_set_features,
.ndo_features_check = macb_features_check,
+ .ndo_hwtstamp_set = macb_hwtstamp_set,
+ .ndo_hwtstamp_get = macb_hwtstamp_get,
};
/* Configure peripheral capabilities according to device tree
@@ -4539,6 +4561,8 @@ static const struct net_device_ops at91ether_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = at91ether_poll_controller,
#endif
+ .ndo_hwtstamp_set = macb_hwtstamp_set,
+ .ndo_hwtstamp_get = macb_hwtstamp_get,
};
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 51d26fa19..a63bf29c4 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -374,19 +374,16 @@ static int gem_ptp_set_ts_mode(struct macb *bp,
return 0;
}
-int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
+int gem_get_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config)
{
- struct hwtstamp_config *tstamp_config;
struct macb *bp = netdev_priv(dev);
- tstamp_config = &bp->tstamp_config;
+ *tstamp_config = bp->tstamp_config;
if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
return -EOPNOTSUPP;
- if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config)))
- return -EFAULT;
- else
- return 0;
+ return 0;
}
static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
@@ -401,22 +398,18 @@ static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
}
-int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
+int gem_set_hwtst(struct net_device *dev,
+ struct kernel_hwtstamp_config *tstamp_config,
+ struct netlink_ext_ack *extack)
{
enum macb_bd_control tx_bd_control = TSTAMP_DISABLED;
enum macb_bd_control rx_bd_control = TSTAMP_DISABLED;
- struct hwtstamp_config *tstamp_config;
struct macb *bp = netdev_priv(dev);
u32 regval;
- tstamp_config = &bp->tstamp_config;
if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0)
return -EOPNOTSUPP;
- if (copy_from_user(tstamp_config, ifr->ifr_data,
- sizeof(*tstamp_config)))
- return -EFAULT;
-
switch (tstamp_config->tx_type) {
case HWTSTAMP_TX_OFF:
break;
@@ -463,12 +456,11 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ERANGE;
}
+ bp->tstamp_config = *tstamp_config;
+
if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0)
return -ERANGE;
- if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config)))
- return -EFAULT;
- else
- return 0;
+ return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 068ed52b6..b3c81a2e9 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1490,7 +1490,7 @@ int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
- mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
+ mbox_cmd.fn = cn23xx_get_vf_stats_callback;
ctx.stats = stats;
atomic_set(&ctx.status, 0);
mbox_cmd.fn_arg = (void *)&ctx;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
index dd5d80fee..d2fcb3da4 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -429,7 +429,7 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
mbox_cmd.q_no = 0;
mbox_cmd.recv_len = 0;
mbox_cmd.recv_status = 0;
- mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
+ mbox_cmd.fn = octeon_pfvf_hs_callback;
mbox_cmd.fn_arg = &status;
octeon_mbox_write(oct, &mbox_cmd);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 9cc6303c8..f38d31bfa 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -27,6 +27,7 @@
#include "octeon_network.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
MODULE_LICENSE("GPL");
/* OOM task polling interval */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index d92bd7e16..9ac85d22c 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -57,7 +57,10 @@ union octeon_mbox_message {
} s;
};
-typedef void (*octeon_mbox_callback_t)(void *, void *, void *);
+struct octeon_mbox_cmd;
+
+typedef void (*octeon_mbox_callback_t)(struct octeon_device *,
+ struct octeon_mbox_cmd *, void *);
struct octeon_mbox_cmd {
union octeon_mbox_message msg;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d8d71bf97..34125b8cd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -653,35 +653,36 @@ static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
return nic->rss_info.rss_size;
}
-static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int nicvf_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
- if (indir) {
+ if (rxfh->indir) {
for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
+ rxfh->indir[idx] = rss->ind_tbl[idx];
}
- if (hkey)
- memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int nicvf_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
int idx;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!rss->enable) {
@@ -690,13 +691,13 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
- if (indir) {
+ if (rxfh->indir) {
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss->ind_tbl[idx] = rxfh->indir[idx];
}
- if (hkey) {
- memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+ if (rxfh->key) {
+ memcpy(rss->key, rxfh->key, RSS_HASH_KEY_SIZE * sizeof(u64));
nicvf_set_rss_key(nic);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index 6d682b7c7..9d11e5598 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -237,7 +237,7 @@ struct adapter {
int msix_nvectors;
struct {
unsigned short vec;
- char desc[22];
+ char desc[IFNAMSIZ + 1 + 12]; /* Needs space for "%s-%d" */
} msix_info[SGE_QSETS + 1];
/* T3 modules */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index d117022d1..2236f1d35 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -380,19 +380,18 @@ static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
*/
static void name_msix_vecs(struct adapter *adap)
{
- int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
+ int i, j, msi_idx = 1;
- snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
- adap->msix_info[0].desc[n] = 0;
+ strscpy(adap->msix_info[0].desc, adap->name, sizeof(adap->msix_info[0].desc));
for_each_port(adap, j) {
struct net_device *d = adap->port[j];
const struct port_info *pi = netdev_priv(d);
for (i = 0; i < pi->nqsets; i++, msi_idx++) {
- snprintf(adap->msix_info[msi_idx].desc, n,
+ snprintf(adap->msix_info[msi_idx].desc,
+ sizeof(adap->msix_info[0].desc),
"%s-%d", d->name, pi->first_qset + i);
- adap->msix_info[msi_idx].desc[n] = 0;
}
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 8477a93ce..47eecde36 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1588,22 +1588,23 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
-static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
+static int get_rss_table(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!p)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
while (n--)
- p[n] = pi->rss[n];
+ rxfh->indir[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
- const u8 hfunc)
+static int set_rss_table(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
@@ -1611,16 +1612,17 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
/* We require at least one supported parameter to be changed and no
* change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!p)
+ if (!rxfh->indir)
return 0;
/* Interface must be brought up atleast once */
if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
for (i = 0; i < pi->rss_size; i++)
- pi->rss[i] = p[i];
+ pi->rss[i] = rxfh->indir[i];
return cxgb4_write_rss(pi, pi->rss);
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 1c2a540db..1f495cfd7 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
module_platform_driver(ep93xx_eth_driver);
+MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 08b7cc0a1..241906697 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -568,31 +568,32 @@ static u32 enic_get_rxfh_key_size(struct net_device *netdev)
return ENIC_RSS_LEN;
}
-static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int enic_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct enic *enic = netdev_priv(netdev);
- if (hkey)
- memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
+ if (rxfh->key)
+ memcpy(rxfh->key, enic->rss_key, ENIC_RSS_LEN);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int enic_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
- indir)
+ if (rxfh->indir ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EINVAL;
- if (hkey)
- memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
+ if (rxfh->key)
+ memcpy(enic->rss_key, rxfh->key, ENIC_RSS_LEN);
return __enic_set_rsskey(enic);
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 20fcb20b4..66b577835 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
tlv->type = htons(type);
tlv->length = htons(length);
- memcpy(tlv->value, value, length);
+ unsafe_memcpy(tlv->value, value, length,
+ /* Flexible array of flexible arrays */);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
vp->length = htonl(ntohl(vp->length) +
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 78287cfcb..705c3eb19 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -79,8 +79,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM)
/**
* struct gmac_queue_page - page buffer per-page info
@@ -1143,23 +1142,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
struct gmac_txdesc *txd;
skb_frag_t *skb_frag;
dma_addr_t mapping;
- unsigned short mtu;
void *buffer;
int ret;
- mtu = ETH_HLEN;
- mtu += netdev->mtu;
- if (skb->protocol == htons(ETH_P_8021Q))
- mtu += VLAN_HLEN;
-
+ /* TODO: implement proper TSO using MTU in word3 */
word1 = skb->len;
word3 = SOF_BIT;
- if (word1 > mtu) {
- word1 |= TSS_MTU_ENABLE_BIT;
- word3 |= mtu;
- }
-
if (skb->len >= ETH_FRAME_LEN) {
/* Hardware offloaded checksumming isn't working on frames
* bigger than 1514 bytes. A hypothesis about this is that the
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index db6615aa9..7bfeae04b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -565,8 +565,7 @@ static void rio_hw_init(struct net_device *dev)
* too. However, it doesn't work on IP1000A so we use 16-bit access.
*/
for (i = 0; i < 3; i++)
- dw16(StationAddr0 + 2 * i,
- cpu_to_le16(((const u16 *)dev->dev_addr)[i]));
+ dw16(StationAddr0 + 2 * i, get_unaligned_le16(&dev->dev_addr[2 * i]));
set_multicast (dev);
if (np->coalesce) {
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index a29de29bd..f001a649f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1271,43 +1271,45 @@ static u32 be_get_rxfh_key_size(struct net_device *netdev)
return RSS_HASH_KEY_LEN;
}
-static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
- u8 *hfunc)
+static int be_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i;
struct rss_info *rss = &adapter->rss_info;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
- indir[i] = rss->rss_queue[i];
+ rxfh->indir[i] = rss->rss_queue[i];
}
- if (hkey)
- memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->rss_hkey, RSS_HASH_KEY_LEN);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+static int be_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
int rc = 0, i, j;
struct be_adapter *adapter = netdev_priv(netdev);
+ u8 *hkey = rxfh->key;
u8 rsstable[RSS_INDIR_TABLE_LEN];
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
struct be_rx_obj *rxo;
for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
- j = indir[i];
+ j = rxfh->indir[i];
rxo = &adapter->rx_obj[j];
rsstable[i] = rxo->rss_id;
adapter->rss_info.rss_queue[i] = j;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 4d7184d46..9ebe751c1 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -633,7 +633,7 @@ out_netdev:
return err;
}
-static s32 nps_enet_remove(struct platform_device *pdev)
+static void nps_enet_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct nps_enet_priv *priv = netdev_priv(ndev);
@@ -641,8 +641,6 @@ static s32 nps_enet_remove(struct platform_device *pdev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id nps_enet_dt_ids[] = {
@@ -653,7 +651,7 @@ MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
- .remove = nps_enet_remove,
+ .remove_new = nps_enet_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = nps_enet_dt_ids,
@@ -663,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
module_platform_driver(nps_enet_driver);
MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index e01a24612..f3543a2df 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -289,7 +289,7 @@ static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
int err;
if (port_priv->vlans[vid]) {
- netdev_warn(netdev, "VLAN %d already configured\n", vid);
+ netdev_err(netdev, "VLAN %d already configured\n", vid);
return -EEXIST;
}
@@ -1509,9 +1509,9 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
struct device *dev = (struct device *)arg;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
struct ethsw_port_priv *port_priv;
- u32 status = ~0;
int err, if_id;
bool had_mac;
+ u32 status;
err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, &status);
@@ -1523,12 +1523,11 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
if_id = (status & 0xFFFF0000) >> 16;
port_priv = ethsw->ports[if_id];
- if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
dpaa2_switch_port_link_state_update(port_priv->netdev);
- dpaa2_switch_port_set_mac_addr(port_priv);
- }
if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
+ dpaa2_switch_port_set_mac_addr(port_priv);
/* We can avoid locking because the "endpoint changed" IRQ
* handler is the only one who changes priv->mac at runtime,
* so we are not racing with anyone.
@@ -1540,20 +1539,20 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
dpaa2_switch_port_connect_mac(port_priv);
}
-out:
err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, status);
if (err)
dev_err(dev, "Can't clear irq status (err %d)\n", err);
+out:
return IRQ_HANDLED;
}
static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
{
+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED | DPSW_IRQ_EVENT_ENDPOINT_CHANGED;
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
- u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
struct fsl_mc_device_irq *irq;
int err;
@@ -1775,8 +1774,10 @@ int dpaa2_switch_port_vlans_add(struct net_device *netdev,
/* Make sure that the VLAN is not already configured
* on the switch port
*/
- if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
+ if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER) {
+ netdev_err(netdev, "VLAN %d already configured\n", vlan->vid);
return -EEXIST;
+ }
/* Check if there is space for a new VLAN */
err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
@@ -2003,25 +2004,11 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
struct ethsw_core *ethsw = port_priv->ethsw_data;
- struct ethsw_port_priv *other_port_priv;
- struct net_device *other_dev;
- struct list_head *iter;
bool learn_ena;
int err;
- netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
- if (!dpaa2_switch_port_dev_check(other_dev))
- continue;
-
- other_port_priv = netdev_priv(other_dev);
- if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
- NL_SET_ERR_MSG_MOD(extack,
- "Interface from a different DPSW is in the bridge already");
- return -EINVAL;
- }
- }
-
/* Delete the previously manually installed VLAN 1 */
err = dpaa2_switch_port_del_vlan(port_priv, 1);
if (err)
@@ -2039,6 +2026,11 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
if (err)
goto err_egress_flood;
+ /* Recreate the egress flood domain of the FDB that we just left. */
+ err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
+ if (err)
+ goto err_egress_flood;
+
err = switchdev_bridge_port_offload(netdev, netdev, NULL,
NULL, NULL, false, extack);
if (err)
@@ -2155,6 +2147,10 @@ dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
struct net_device *upper_dev,
struct netlink_ext_ack *extack)
{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct ethsw_port_priv *other_port_priv;
+ struct net_device *other_dev;
+ struct list_head *iter;
int err;
if (!br_vlan_enabled(upper_dev)) {
@@ -2169,54 +2165,93 @@ dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
return 0;
}
+ netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
+ if (!dpaa2_switch_port_dev_check(other_dev))
+ continue;
+
+ other_port_priv = netdev_priv(other_dev);
+ if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Interface from a different DPSW is in the bridge already");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
-static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int dpaa2_switch_port_prechangeupper(struct net_device *netdev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
- int err = 0;
+ int err;
if (!dpaa2_switch_port_dev_check(netdev))
- return NOTIFY_DONE;
+ return 0;
extack = netdev_notifier_info_to_extack(&info->info);
-
- switch (event) {
- case NETDEV_PRECHANGEUPPER:
- upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev))
- break;
-
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
upper_dev,
extack);
if (err)
- goto out;
+ return err;
if (!info->linking)
dpaa2_switch_port_pre_bridge_leave(netdev);
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_changeupper(struct net_device *netdev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!dpaa2_switch_port_dev_check(netdev))
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ return dpaa2_switch_port_bridge_join(netdev,
+ upper_dev,
+ extack);
+ else
+ return dpaa2_switch_port_bridge_leave(netdev);
+ }
+
+ return 0;
+}
+
+static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = dpaa2_switch_port_prechangeupper(netdev, ptr);
+ if (err)
+ return notifier_from_errno(err);
break;
case NETDEV_CHANGEUPPER:
- upper_dev = info->upper_dev;
- if (netif_is_bridge_master(upper_dev)) {
- if (info->linking)
- err = dpaa2_switch_port_bridge_join(netdev,
- upper_dev,
- extack);
- else
- err = dpaa2_switch_port_bridge_leave(netdev);
- }
+ err = dpaa2_switch_port_changeupper(netdev, ptr);
+ if (err)
+ return notifier_from_errno(err);
+
break;
}
-out:
- return notifier_from_errno(err);
+ return NOTIFY_DONE;
}
struct ethsw_switchdev_event_work {
@@ -3294,6 +3329,7 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_FILTER |
NETIF_F_HW_TC;
+ port_netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
err = dpaa2_switch_port_init(port_priv, port_idx);
if (err)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index cffbf27c4..bfdbdab44 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
+MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index e993ed04a..f7753ea5b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -690,25 +690,26 @@ static u32 enetc_get_rxfh_indir_size(struct net_device *ndev)
return priv->si->num_rss;
}
-static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int enetc_get_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int err = 0, i;
/* return hash function */
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
/* return hash key */
- if (key && hw->port)
+ if (rxfh->key && hw->port)
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
- ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i));
+ ((u32 *)rxfh->key)[i] = enetc_port_rd(hw,
+ ENETC_PRSSK(i));
/* return RSS table */
- if (indir)
- err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss);
+ if (rxfh->indir)
+ err = enetc_get_rss_table(priv->si, rxfh->indir,
+ priv->si->num_rss);
return err;
}
@@ -722,20 +723,22 @@ void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
}
EXPORT_SYMBOL_GPL(enetc_set_rss_key);
-static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int enetc_set_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
int err = 0;
/* set hash key, if PF */
- if (key && hw->port)
- enetc_set_rss_key(hw, key);
+ if (rxfh->key && hw->port)
+ enetc_set_rss_key(hw, rxfh->key);
/* set RSS table */
- if (indir)
- err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss);
+ if (rxfh->indir)
+ err = enetc_set_rss_table(priv->si, rxfh->indir,
+ priv->si->num_rss);
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c153dc083..11b145558 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -920,6 +920,7 @@ static void enetc_imdio_remove(struct enetc_pf *pf)
static bool enetc_port_has_pcs(struct enetc_pf *pf)
{
return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_1000BASEX ||
pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
}
@@ -1116,6 +1117,8 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv,
pf->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pf->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
pf->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_USXGMII,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c10768098..8decb1b07 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2406,8 +2406,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- phy_dev->mac_managed_pm = true;
-
phy_attached_info(phy_dev);
return 0;
@@ -2419,10 +2417,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
bool suppress_preamble = false;
+ struct phy_device *phydev;
struct device_node *node;
int err = -ENXIO;
u32 mii_speed, holdtime;
u32 bus_freq;
+ int addr;
/*
* The i.MX28 dual fec interfaces are not equal.
@@ -2536,6 +2536,13 @@ static int fec_enet_mii_init(struct platform_device *pdev)
goto err_out_free_mdiobus;
of_node_put(node);
+ /* find all the PHY devices on the bus and set mac_managed_pm to true */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phydev = mdiobus_get_phy(fep->mii_bus, addr);
+ if (phydev)
+ phydev->mac_managed_pm = true;
+ }
+
mii_cnt++;
/* save fec0 mii_bus */
@@ -2934,10 +2941,10 @@ static void fec_enet_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
- ethtool_sprintf(&data, "%s", fec_stats[i].name);
+ ethtool_puts(&data, fec_stats[i].name);
}
for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
- ethtool_sprintf(&data, "%s", fec_xdp_stat_strs[i]);
+ ethtool_puts(&data, fec_xdp_stat_strs[i]);
}
page_pool_ethtool_stats_get_strings(data);
@@ -4771,4 +4778,5 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
+MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 70dd982a5..026f7270a 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
module_platform_driver(fsl_pq_mdio_driver);
+MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 31aa185f4..4edd0adfc 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -655,7 +655,7 @@ static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
i);
}
for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
- ethtool_sprintf(&p, txq_stat_names[j]);
+ ethtool_puts(&p, txq_stat_names[j]);
for (i = 0; i < fp->num_xdpqs; i++) {
for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
@@ -663,7 +663,7 @@ static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
xdpq_stat_names[j], i);
}
for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
- ethtool_sprintf(&p, xdpq_stat_names[j]);
+ ethtool_puts(&p, xdpq_stat_names[j]);
for (i = 0; i < netdev->real_num_rx_queues; i++) {
for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
@@ -671,10 +671,10 @@ static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
i);
}
for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
- ethtool_sprintf(&p, rxq_stat_names[j]);
+ ethtool_puts(&p, rxq_stat_names[j]);
for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
- ethtool_sprintf(&p, tls_stat_names[j]);
+ ethtool_puts(&p, tls_stat_names[j]);
break;
default:
break;
@@ -977,44 +977,44 @@ static u32 fun_get_rxfh_key_size(struct net_device *netdev)
return sizeof(fp->rss_key);
}
-static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int fun_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
const struct funeth_priv *fp = netdev_priv(netdev);
if (!fp->rss_cfg)
return -EOPNOTSUPP;
- if (indir)
- memcpy(indir, fp->indir_table,
+ if (rxfh->indir)
+ memcpy(rxfh->indir, fp->indir_table,
sizeof(u32) * fp->indir_table_nentries);
- if (key)
- memcpy(key, fp->rss_key, sizeof(fp->rss_key));
+ if (rxfh->key)
+ memcpy(rxfh->key, fp->rss_key, sizeof(fp->rss_key));
- if (hfunc)
- *hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
- ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
+ rxfh->hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
+ ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
return 0;
}
-static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int fun_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct funeth_priv *fp = netdev_priv(netdev);
- const u32 *rss_indir = indir ? indir : fp->indir_table;
- const u8 *rss_key = key ? key : fp->rss_key;
+ const u32 *rss_indir = rxfh->indir ? rxfh->indir : fp->indir_table;
+ const u8 *rss_key = rxfh->key ? rxfh->key : fp->rss_key;
enum fun_eth_hash_alg algo;
if (!fp->rss_cfg)
return -EOPNOTSUPP;
- if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+ if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE)
algo = fp->hash_algo;
- else if (hfunc == ETH_RSS_HASH_CRC32)
+ else if (rxfh->hfunc == ETH_RSS_HASH_CRC32)
algo = FUN_ETH_RSS_ALG_CRC32;
- else if (hfunc == ETH_RSS_HASH_TOP)
+ else if (rxfh->hfunc == ETH_RSS_HASH_TOP)
algo = FUN_ETH_RSS_ALG_TOEPLITZ;
else
return -EINVAL;
@@ -1031,10 +1031,10 @@ static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
}
fp->hash_algo = algo;
- if (key)
- memcpy(fp->rss_key, key, sizeof(fp->rss_key));
- if (indir)
- memcpy(fp->indir_table, indir,
+ if (rxfh->key)
+ memcpy(fp->rss_key, rxfh->key, sizeof(fp->rss_key));
+ if (rxfh->indir)
+ memcpy(fp->indir_table, rxfh->indir,
sizeof(u32) * fp->indir_table_nentries);
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 0d1e681be..b80349154 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -8,6 +8,7 @@
#define _GVE_H_
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
@@ -41,12 +42,16 @@
#define NIC_TX_STATS_REPORT_NUM 0
#define NIC_RX_STATS_REPORT_NUM 4
+#define GVE_ADMINQ_BUFFER_SIZE 4096
+
#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
-#define GVE_RX_BUFFER_SIZE_DQO 2048
+#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+
+#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
@@ -672,6 +677,7 @@ struct gve_priv {
/* Admin queue - see gve_adminq.h*/
union gve_adminq_command *adminq;
dma_addr_t adminq_bus_addr;
+ struct dma_pool *adminq_pool;
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 79db7a6d4..12fbd723e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -194,12 +194,19 @@ gve_process_device_options(struct gve_priv *priv,
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
{
- priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
- &priv->adminq_bus_addr, GFP_KERNEL);
- if (unlikely(!priv->adminq))
+ priv->adminq_pool = dma_pool_create("adminq_pool", dev,
+ GVE_ADMINQ_BUFFER_SIZE, 0, 0);
+ if (unlikely(!priv->adminq_pool))
return -ENOMEM;
+ priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
+ &priv->adminq_bus_addr);
+ if (unlikely(!priv->adminq)) {
+ dma_pool_destroy(priv->adminq_pool);
+ return -ENOMEM;
+ }
- priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
+ priv->adminq_mask =
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1;
priv->adminq_prod_cnt = 0;
priv->adminq_cmd_fail = 0;
priv->adminq_timeouts = 0;
@@ -218,9 +225,20 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_get_ptype_map_cnt = 0;
/* Setup Admin queue with the device */
- iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
- &priv->reg_bar0->adminq_pfn);
-
+ if (priv->pdev->revision < 0x1) {
+ iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
+ &priv->reg_bar0->adminq_pfn);
+ } else {
+ iowrite16be(GVE_ADMINQ_BUFFER_SIZE,
+ &priv->reg_bar0->adminq_length);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ iowrite32be(priv->adminq_bus_addr >> 32,
+ &priv->reg_bar0->adminq_base_address_hi);
+#endif
+ iowrite32be(priv->adminq_bus_addr,
+ &priv->reg_bar0->adminq_base_address_lo);
+ iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
+ }
gve_set_admin_queue_ok(priv);
return 0;
}
@@ -230,16 +248,27 @@ void gve_adminq_release(struct gve_priv *priv)
int i = 0;
/* Tell the device the adminq is leaving */
- iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
- while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
- /* If this is reached the device is unrecoverable and still
- * holding memory. Continue looping to avoid memory corruption,
- * but WARN so it is visible what is going on.
- */
- if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
- WARN(1, "Unrecoverable platform error!");
- i++;
- msleep(GVE_ADMINQ_SLEEP_LEN);
+ if (priv->pdev->revision < 0x1) {
+ iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
+ while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
+ /* If this is reached the device is unrecoverable and still
+ * holding memory. Continue looping to avoid memory corruption,
+ * but WARN so it is visible what is going on.
+ */
+ if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
+ WARN(1, "Unrecoverable platform error!");
+ i++;
+ msleep(GVE_ADMINQ_SLEEP_LEN);
+ }
+ } else {
+ iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status);
+ while (!(ioread32be(&priv->reg_bar0->device_status)
+ & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) {
+ if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
+ WARN(1, "Unrecoverable platform error!");
+ i++;
+ msleep(GVE_ADMINQ_SLEEP_LEN);
+ }
}
gve_clear_device_rings_ok(priv);
gve_clear_device_resources_ok(priv);
@@ -251,7 +280,8 @@ void gve_adminq_free(struct device *dev, struct gve_priv *priv)
if (!gve_get_admin_queue_ok(priv))
return;
gve_adminq_release(priv);
- dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
+ dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr);
+ dma_pool_destroy(priv->adminq_pool);
gve_clear_admin_queue_ok(priv);
}
@@ -697,18 +727,7 @@ static int gve_set_desc_cnt(struct gve_priv *priv,
struct gve_device_descriptor *descriptor)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
- dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
- priv->tx_desc_cnt);
- return -EINVAL;
- }
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
- < PAGE_SIZE) {
- dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
- priv->rx_desc_cnt);
- return -EINVAL;
- }
return 0;
}
@@ -778,8 +797,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
u16 mtu;
memset(&cmd, 0, sizeof(cmd));
- descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
- &descriptor_bus, GFP_KERNEL);
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
+ &descriptor_bus);
if (!descriptor)
return -ENOMEM;
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
@@ -787,7 +806,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
cpu_to_be64(descriptor_bus);
cmd.describe_device.device_descriptor_version =
cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
- cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
+ cmd.describe_device.available_length =
+ cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE);
err = gve_adminq_execute_cmd(priv, &cmd);
if (err)
@@ -868,8 +888,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_op_jumbo_frames, dev_op_dqo_qpl);
free_device_descriptor:
- dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
- descriptor_bus);
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
return err;
}
@@ -898,6 +917,7 @@ int gve_adminq_register_page_list(struct gve_priv *priv,
.page_list_id = cpu_to_be32(qpl->id),
.num_pages = cpu_to_be32(num_entries),
.page_address_list_addr = cpu_to_be64(page_list_bus),
+ .page_size = cpu_to_be64(PAGE_SIZE),
};
err = gve_adminq_execute_cmd(priv, &cmd);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 38a22279e..5865ccdcc 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -219,9 +219,10 @@ struct gve_adminq_register_page_list {
__be32 page_list_id;
__be32 num_pages;
__be64 page_address_list_addr;
+ __be64 page_size;
};
-static_assert(sizeof(struct gve_adminq_register_page_list) == 16);
+static_assert(sizeof(struct gve_adminq_register_page_list) == 24);
struct gve_adminq_unregister_page_list {
__be32 page_list_id;
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 1eb4d5fd8..c36b93f0d 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -33,6 +33,9 @@
#define GVE_DEALLOCATE_COMPL_TIMEOUT 60
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
+netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 233e59469..e5397aa1e 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -519,7 +519,7 @@ static int gve_set_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
{
u32 max_copybreak = gve_is_gqi(priv) ?
- (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
+ GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
len = *(u32 *)value;
if (len > max_copybreak)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 2d42e7338..619bf63ec 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -79,6 +79,18 @@ static int gve_verify_driver_compatibility(struct gve_priv *priv)
return err;
}
+static netdev_features_t gve_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (!gve_is_gqi(priv))
+ return gve_features_check_dqo(skb, dev, features);
+
+ return features;
+}
+
static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
@@ -1316,7 +1328,7 @@ static int gve_open(struct net_device *dev)
/* Hard code this for now. This may be tuned in the future for
* performance.
*/
- priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
+ priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
}
err = gve_create_rings(priv);
if (err)
@@ -1652,7 +1664,7 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
@@ -1879,6 +1891,7 @@ err:
static const struct net_device_ops gve_netdev_ops = {
.ndo_start_xmit = gve_start_xmit,
+ .ndo_features_check = gve_features_check,
.ndo_open = gve_open,
.ndo_stop = gve_close,
.ndo_get_stats64 = gve_get_stats,
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index fb655463c..8e72b9700 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -18,11 +18,20 @@ struct gve_registers {
__be32 adminq_event_counter;
u8 reserved[3];
u8 driver_version;
+ __be32 adminq_base_address_hi;
+ __be32 adminq_base_address_lo;
+ __be16 adminq_length;
};
enum gve_device_status_flags {
GVE_DEVICE_STATUS_RESET_MASK = BIT(1),
GVE_DEVICE_STATUS_LINK_STATUS_MASK = BIT(2),
GVE_DEVICE_STATUS_REPORT_STATS_MASK = BIT(3),
+ GVE_DEVICE_STATUS_DEVICE_IS_RESET = BIT(4),
+};
+
+enum gve_driver_status_flags {
+ GVE_DRIVER_STATUS_RUN_MASK = BIT(0),
+ GVE_DRIVER_STATUS_RESET_MASK = BIT(1),
};
#endif /* _GVE_REGISTER_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 93ff7c8ec..76615d47e 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -211,9 +211,9 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
- u32 slots, npages;
int filled_pages;
size_t bytes;
+ u32 slots;
int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
@@ -270,12 +270,6 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* alloc rx desc ring */
bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
- npages = bytes / PAGE_SIZE;
- if (npages * PAGE_SIZE != bytes) {
- err = -EIO;
- goto abort_with_q_resources;
- }
-
rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
GFP_KERNEL);
if (!rx->desc.desc_ring) {
@@ -289,7 +283,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* Allocating half-page buffers allows page-flipping which is faster
* than copying or allocating new pages.
*/
- rx->packet_buffer_size = PAGE_SIZE / 2;
+ rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);
@@ -405,10 +399,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
{
- const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
+ const __be64 offset = cpu_to_be64(GVE_DEFAULT_RX_BUFFER_OFFSET);
/* "flip" to other packet buffer on this page */
- page_info->page_offset ^= PAGE_SIZE / 2;
+ page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset;
}
@@ -513,8 +507,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
return NULL;
gve_dec_pagecnt_bias(copy_page_info);
- copy_page_info->page_offset += rx->packet_buffer_size;
- copy_page_info->page_offset &= (PAGE_SIZE - 1);
+ copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
if (copy_page_info->can_flip) {
/* We have used both halves of this copy page, it
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 9f6ffc4a5..07ba12478 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -819,7 +819,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
}
-#define GVE_TX_START_THRESH PAGE_SIZE
+#define GVE_TX_START_THRESH 4096
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake)
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 1e19b834a..f59c4710f 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -843,6 +843,16 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
return true;
}
+netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_is_gso(skb) && !gve_can_send_tso(skb))
+ return features & ~NETIF_F_GSO_MASK;
+
+ return features;
+}
+
/* Attempt to transmit specified SKB.
*
* Returns 0 if the SKB was transmitted or dropped.
@@ -854,11 +864,10 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
int num_buffer_descs;
int total_num_descs;
- if (tx->dqo.qpl) {
- if (skb_is_gso(skb))
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
+ if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto drop;
+ if (tx->dqo.qpl) {
/* We do not need to verify the number of buffers used per
* packet or per segment in case of TSO as with 2K size buffers
* none of the TX packet rules would be violated.
@@ -868,24 +877,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
*/
num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO);
} else {
- if (skb_is_gso(skb)) {
- /* If TSO doesn't meet HW requirements, attempt to linearize the
- * packet.
- */
- if (unlikely(!gve_can_send_tso(skb) &&
- skb_linearize(skb) < 0)) {
- net_err_ratelimited("%s: Failed to transmit TSO packet\n",
- priv->dev->name);
- goto drop;
- }
-
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
-
- num_buffer_descs = gve_num_buffer_descs_needed(skb);
- } else {
- num_buffer_descs = gve_num_buffer_descs_needed(skb);
-
+ num_buffer_descs = gve_num_buffer_descs_needed(skb);
+ if (!skb_is_gso(skb)) {
if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
if (unlikely(skb_linearize(skb) < 0))
goto drop;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 8f391e2ad..bdb7afaab 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -678,7 +678,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
return;
for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++)
- ethtool_sprintf(&buff, g_gmac_stats_string[i].desc);
+ ethtool_puts(&buff, g_gmac_stats_string[i].desc);
}
static int hns_gmac_get_sset_count(int stringset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index fc26ffaae..c58833eb4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -752,7 +752,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data)
return;
for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++)
- ethtool_sprintf(&buff, g_xgmac_stats_string[i].desc);
+ ethtool_puts(&buff, g_xgmac_stats_string[i].desc);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index b54f3706f..a5bb306b2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -912,42 +912,41 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
if (stringset == ETH_SS_TEST) {
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
- ethtool_sprintf(&buff,
- hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
- ethtool_sprintf(&buff,
- hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
+ ethtool_puts(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_MAC]);
+ ethtool_puts(&buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES]);
if ((netdev->phydev) && (!netdev->phydev->is_c45))
- ethtool_sprintf(&buff,
- hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
+ ethtool_puts(&buff,
+ hns_nic_test_strs[MAC_INTERNALLOOP_PHY]);
} else {
- ethtool_sprintf(&buff, "rx_packets");
- ethtool_sprintf(&buff, "tx_packets");
- ethtool_sprintf(&buff, "rx_bytes");
- ethtool_sprintf(&buff, "tx_bytes");
- ethtool_sprintf(&buff, "rx_errors");
- ethtool_sprintf(&buff, "tx_errors");
- ethtool_sprintf(&buff, "rx_dropped");
- ethtool_sprintf(&buff, "tx_dropped");
- ethtool_sprintf(&buff, "multicast");
- ethtool_sprintf(&buff, "collisions");
- ethtool_sprintf(&buff, "rx_over_errors");
- ethtool_sprintf(&buff, "rx_crc_errors");
- ethtool_sprintf(&buff, "rx_frame_errors");
- ethtool_sprintf(&buff, "rx_fifo_errors");
- ethtool_sprintf(&buff, "rx_missed_errors");
- ethtool_sprintf(&buff, "tx_aborted_errors");
- ethtool_sprintf(&buff, "tx_carrier_errors");
- ethtool_sprintf(&buff, "tx_fifo_errors");
- ethtool_sprintf(&buff, "tx_heartbeat_errors");
- ethtool_sprintf(&buff, "rx_length_errors");
- ethtool_sprintf(&buff, "tx_window_errors");
- ethtool_sprintf(&buff, "rx_compressed");
- ethtool_sprintf(&buff, "tx_compressed");
- ethtool_sprintf(&buff, "netdev_rx_dropped");
- ethtool_sprintf(&buff, "netdev_tx_dropped");
-
- ethtool_sprintf(&buff, "netdev_tx_timeout");
+ ethtool_puts(&buff, "rx_packets");
+ ethtool_puts(&buff, "tx_packets");
+ ethtool_puts(&buff, "rx_bytes");
+ ethtool_puts(&buff, "tx_bytes");
+ ethtool_puts(&buff, "rx_errors");
+ ethtool_puts(&buff, "tx_errors");
+ ethtool_puts(&buff, "rx_dropped");
+ ethtool_puts(&buff, "tx_dropped");
+ ethtool_puts(&buff, "multicast");
+ ethtool_puts(&buff, "collisions");
+ ethtool_puts(&buff, "rx_over_errors");
+ ethtool_puts(&buff, "rx_crc_errors");
+ ethtool_puts(&buff, "rx_frame_errors");
+ ethtool_puts(&buff, "rx_fifo_errors");
+ ethtool_puts(&buff, "rx_missed_errors");
+ ethtool_puts(&buff, "tx_aborted_errors");
+ ethtool_puts(&buff, "tx_carrier_errors");
+ ethtool_puts(&buff, "tx_fifo_errors");
+ ethtool_puts(&buff, "tx_heartbeat_errors");
+ ethtool_puts(&buff, "rx_length_errors");
+ ethtool_puts(&buff, "tx_window_errors");
+ ethtool_puts(&buff, "rx_compressed");
+ ethtool_puts(&buff, "tx_compressed");
+ ethtool_puts(&buff, "netdev_rx_dropped");
+ ethtool_puts(&buff, "netdev_tx_dropped");
+
+ ethtool_puts(&buff, "netdev_tx_timeout");
h->dev->ops->get_strings(h, stringset, buff);
}
@@ -1187,7 +1186,7 @@ hns_get_rss_indir_size(struct net_device *netdev)
}
static int
-hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+hns_get_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
@@ -1200,15 +1199,16 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
ops = priv->ae_handle->dev->ops;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- return ops->get_rss(priv->ae_handle, indir, key, hfunc);
+ return ops->get_rss(priv->ae_handle,
+ rxfh->indir, rxfh->key, &rxfh->hfunc);
}
static int
-hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+hns_set_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
@@ -1221,12 +1221,14 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
ops = priv->ae_handle->dev->ops;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
netdev_err(netdev, "Invalid hfunc!\n");
return -EOPNOTSUPP;
}
- return ops->set_rss(priv->ae_handle, indir, key, hfunc);
+ return ops->set_rss(priv->ae_handle,
+ rxfh->indir, rxfh->key, rxfh->hfunc);
}
static int hns_get_rxnfc(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index f3c9395d8..618f66d95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -85,7 +85,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
true);
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ desc.data[0] = cpu_to_le32(tqp->index);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b618797a7..f1695c889 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1041,7 +1041,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
return;
order = get_order(alloc_size);
- if (order > MAX_ORDER) {
+ if (order > MAX_PAGE_ORDER) {
if (net_ratelimit())
dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
return;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 682239f33..941cb529d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -78,6 +78,9 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
+#define HNS3_NIC_LB_TEST_UNEXECUTED 4
+
+static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
@@ -418,18 +421,26 @@ static void hns3_do_external_lb(struct net_device *ndev,
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
+ int cnt = hns3_get_sset_count(ndev, ETH_SS_TEST);
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
+ int i;
+
+ /* initialize the loopback test result, avoid marking an unexcuted
+ * loopback test as PASS.
+ */
+ for (i = 0; i < cnt; i++)
+ data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
netdev_err(ndev, "dev resetting!");
- return;
+ goto failure;
}
if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
- return;
+ goto failure;
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test start\n");
@@ -451,6 +462,10 @@ static void hns3_self_test(struct net_device *ndev,
if (netif_msg_ifdown(h))
netdev_info(ndev, "self test end\n");
+ return;
+
+failure:
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
@@ -941,19 +956,21 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
return ae_dev->dev_specs.rss_ind_tbl_size;
}
-static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int hns3_get_rss(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo->ops->get_rss)
return -EOPNOTSUPP;
- return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
+ return h->ae_algo->ops->get_rss(h, rxfh->indir, rxfh->key,
+ &rxfh->hfunc);
}
-static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int hns3_set_rss(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
@@ -962,19 +979,22 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
return -EOPNOTSUPP;
if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
- hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
- hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
+ rxfh->hfunc != ETH_RSS_HASH_TOP) ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP &&
+ rxfh->hfunc != ETH_RSS_HASH_XOR)) {
netdev_err(netdev, "hash func not supported\n");
return -EOPNOTSUPP;
}
- if (!indir) {
+ if (!rxfh->indir) {
netdev_err(netdev,
"set rss failed for indir is empty\n");
return -EOPNOTSUPP;
}
- return h->ae_algo->ops->set_rss(h, indir, key, hfunc);
+ return h->ae_algo->ops->set_rss(h, rxfh->indir, rxfh->key,
+ rxfh->hfunc);
}
static int hns3_get_rxnfc(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index ff3f8f424..9ec471ced 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -981,19 +981,24 @@ static const struct hclge_dbg_item tm_pri_items[] = {
static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
{
- char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
char content[HCLGE_DBG_TM_INFO_LEN];
u8 pri_num, sch_mode, weight, i, j;
+ char *data_str;
int pos, ret;
ret = hclge_tm_get_pri_num(hdev, &pri_num);
if (ret)
return ret;
+ data_str = kcalloc(ARRAY_SIZE(tm_pri_items), HCLGE_DBG_DATA_STR_LEN,
+ GFP_KERNEL);
+ if (!data_str)
+ return -ENOMEM;
+
for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
- result[i] = &data_str[i][0];
+ result[i] = &data_str[i * HCLGE_DBG_DATA_STR_LEN];
hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
NULL, ARRAY_SIZE(tm_pri_items));
@@ -1002,23 +1007,23 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
for (i = 0; i < pri_num; i++) {
ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
if (ret)
- return ret;
+ goto out;
ret = hclge_tm_get_pri_weight(hdev, i, &weight);
if (ret)
- return ret;
+ goto out;
ret = hclge_tm_get_pri_shaper(hdev, i,
HCLGE_OPC_TM_PRI_C_SHAPPING,
&c_shaper_para);
if (ret)
- return ret;
+ goto out;
ret = hclge_tm_get_pri_shaper(hdev, i,
HCLGE_OPC_TM_PRI_P_SHAPPING,
&p_shaper_para);
if (ret)
- return ret;
+ goto out;
sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
"sp";
@@ -1035,7 +1040,9 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
pos += scnprintf(buf + pos, len - pos, "%s", content);
}
- return 0;
+out:
+ kfree(data_str);
+ return ret;
}
static const struct hclge_dbg_item tm_qset_items[] = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 609d3799d..a3b7723a9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -11613,6 +11613,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_pci_uninit;
+ devl_lock(hdev->devlink);
+
/* Firmware command queue initialize */
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
@@ -11792,6 +11794,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+ devl_unlock(hdev->devlink);
return 0;
err_mdiobus_unreg:
@@ -11804,6 +11807,7 @@ err_msi_uninit:
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_devlink_uninit:
+ devl_unlock(hdev->devlink);
hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index f4b680286..0304f03d4 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1137,7 +1137,7 @@ static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
static int hinic_get_rxfh(struct net_device *netdev,
- u32 *indir, u8 *key, u8 *hfunc)
+ struct ethtool_rxfh_param *rxfh)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
u8 hash_engine_type = 0;
@@ -1146,32 +1146,33 @@ static int hinic_get_rxfh(struct net_device *netdev,
if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- if (hfunc) {
- err = hinic_rss_get_hash_engine(nic_dev,
- nic_dev->rss_tmpl_idx,
- &hash_engine_type);
- if (err)
- return -EFAULT;
+ err = hinic_rss_get_hash_engine(nic_dev,
+ nic_dev->rss_tmpl_idx,
+ &hash_engine_type);
+ if (err)
+ return -EFAULT;
- *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
- }
+ rxfh->hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
- if (indir) {
+ if (rxfh->indir) {
err = hinic_rss_get_indir_tbl(nic_dev,
- nic_dev->rss_tmpl_idx, indir);
+ nic_dev->rss_tmpl_idx,
+ rxfh->indir);
if (err)
return -EFAULT;
}
- if (key)
+ if (rxfh->key)
err = hinic_rss_get_template_tbl(nic_dev,
- nic_dev->rss_tmpl_idx, key);
+ nic_dev->rss_tmpl_idx,
+ rxfh->key);
return err;
}
-static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int hinic_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
int err = 0;
@@ -1179,11 +1180,12 @@ static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
- if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ if (rxfh->hfunc != ETH_RSS_HASH_TOP &&
+ rxfh->hfunc != ETH_RSS_HASH_XOR)
return -EOPNOTSUPP;
- nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
+ nic_dev->rss_hash_engine = (rxfh->hfunc == ETH_RSS_HASH_XOR) ?
HINIC_RSS_HASH_ENGINE_TYPE_XOR :
HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
err = hinic_rss_set_hash_engine
@@ -1193,7 +1195,7 @@ static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
return -EFAULT;
}
- err = __set_rss_rxfh(netdev, indir, key);
+ err = __set_rss_rxfh(netdev, rxfh->indir, rxfh->key);
return err;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4e18b4cef..94ac36b14 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -48,7 +48,7 @@
* of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
* some padding.
*
- * But the size of a single DMA region is limited by MAX_ORDER in the
+ * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the
* kernel (about 16MB currently). To support say 4K Jumbo frames, we
* use a set of LTBs (struct ltb_set) per pool.
*
@@ -75,7 +75,7 @@
* pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
* plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
*/
-#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_ORDER) * PAGE_SIZE))
+#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
#define IBMVNIC_LTB_SET_SIZE (38 << 20)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 06ddd7147..d55638ad8 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -299,6 +299,17 @@ config ICE
To compile this driver as a module, choose M here. The module
will be called ice.
+config ICE_HWMON
+ bool "Intel(R) Ethernet Connection E800 Series Support HWMON support"
+ default y
+ depends on ICE && HWMON && !(ICE=y && HWMON=m)
+ help
+ Say Y if you want to expose thermal sensor data on Intel devices.
+
+ Some of our devices contain internal thermal sensors.
+ This data is available via the hwmon sysfs interface and exposes
+ the onboard sensors.
+
config ICE_SWITCHDEV
bool "Switchdev Support"
default y
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4576511c9..f9328f2e6 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3261,8 +3261,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->mdix_mode =
- (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
- IGP01E1000_PSSR_MDIX_SHIFT);
+ (e1000_auto_x_mode)FIELD_GET(IGP01E1000_PSSR_MDIX, phy_data);
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -3273,11 +3272,11 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
/* Get cable length */
@@ -3327,14 +3326,12 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->extended_10bt_distance =
- ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
- M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+ FIELD_GET(M88E1000_PSCR_10BT_EXT_DIST_ENABLE, phy_data) ?
e1000_10bt_ext_dist_enable_lower :
e1000_10bt_ext_dist_enable_normal;
phy_info->polarity_correction =
- ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
- M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+ FIELD_GET(M88E1000_PSCR_POLARITY_REVERSAL, phy_data) ?
e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
/* Check polarity status */
@@ -3348,27 +3345,25 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
return ret_val;
phy_info->mdix_mode =
- (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
- M88E1000_PSSR_MDIX_SHIFT);
+ (e1000_auto_x_mode)FIELD_GET(M88E1000_PSSR_MDIX, phy_data);
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
/* Cable Length Estimation and Local/Remote Receiver Information
* are only valid at 1000 Mbps.
*/
phy_info->cable_length =
- (e1000_cable_length) ((phy_data &
- M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+ (e1000_cable_length)FIELD_GET(M88E1000_PSSR_CABLE_LENGTH,
+ phy_data);
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
return ret_val;
- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ phy_info->local_rx = FIELD_GET(SR_1000T_LOCAL_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ phy_info->remote_rx = FIELD_GET(SR_1000T_REMOTE_RX_STATUS,
+ phy_data) ?
e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
}
@@ -3516,7 +3511,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
if (ret_val)
return ret_val;
eeprom_size =
- (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+ FIELD_GET(EEPROM_SIZE_MASK, eeprom_size);
/* 256B eeprom size was not supported in earlier hardware, so we
* bump eeprom_size up one to ensure that "1" (which maps to
* 256B) is never the result used in the shifting logic below.
@@ -4892,8 +4887,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
&phy_data);
if (ret_val)
return ret_val;
- cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ cable_length = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
/* Convert the enum value to ranged values */
switch (cable_length) {
@@ -5002,8 +4996,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
&phy_data);
if (ret_val)
return ret_val;
- *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
- M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+ *polarity = FIELD_GET(M88E1000_PSSR_REV_POLARITY, phy_data) ?
e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
} else if (hw->phy_type == e1000_phy_igp) {
@@ -5073,8 +5066,8 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
- M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ hw->speed_downgraded = FIELD_GET(M88E1000_PSSR_DOWNSHIFT,
+ phy_data);
}
return E1000_SUCCESS;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index be9c695dd..4eb1ceaf8 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -92,8 +92,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -1035,17 +1034,18 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
- 0xFFFF);
+ /* these next three accesses were always meant to use page 0x34 using
+ * GG82563_REG(0x34, N) but never did, so we've just corrected the call
+ * to not drop bits
+ */
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 4, 0xFFFF);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- &reg_data);
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 9, &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- reg_data);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 9, reg_data);
if (ret_val)
return ret_val;
ret_val =
@@ -1209,8 +1209,8 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
if (ret_val)
return ret_val;
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) |
+ E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1244,8 +1244,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
if (ret_val)
return ret_val;
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b1e890dd..969f855a7 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -157,8 +157,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
fallthrough;
default:
nvm->type = e1000_nvm_eeprom_spi;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = (u16)FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 63c3c7938..23a58cada 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -678,11 +678,8 @@
/* PCI/PCI-X/PCI-EX Config space */
#define PCI_HEADER_TYPE_REGISTER 0x0E
-#define PCIE_LINK_STATUS 0x12
#define PCI_HEADER_TYPE_MULTIFUNC 0x80
-#define PCIE_LINK_WIDTH_MASK 0x3F0
-#define PCIE_LINK_WIDTH_SHIFT 4
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 9835e6a90..fc0f98ea6 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -654,8 +654,8 @@ static void e1000_get_drvinfo(struct net_device *netdev,
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d-%d",
- (adapter->eeprom_vers & 0xF000) >> 12,
- (adapter->eeprom_vers & 0x0FF0) >> 4,
+ FIELD_GET(0xF000, adapter->eeprom_vers),
+ FIELD_GET(0x0FF0, adapter->eeprom_vers),
(adapter->eeprom_vers & 0x000F));
strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
@@ -925,8 +925,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
}
if (mac->type >= e1000_pch_lpt)
- wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
- E1000_FWSM_WLOCK_MAC_SHIFT;
+ wlock_mac = FIELD_GET(E1000_FWSM_WLOCK_MAC_MASK, er32(FWSM));
for (i = 0; i < mac->rar_entry_count; i++) {
if (mac->type >= e1000_pch_lpt) {
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1fef6bb5a..4b6e75361 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -628,6 +628,7 @@ struct e1000_phy_info {
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
+ u32 retry_count;
enum e1000_media_type media_type;
@@ -644,6 +645,7 @@ struct e1000_phy_info {
bool polarity_correction;
bool speed_downgraded;
bool autoneg_wait_to_complete;
+ bool retry_enabled;
};
struct e1000_nvm_info {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 39e9fc601..f9e94be36 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -222,11 +222,18 @@ out:
if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
@@ -310,6 +317,11 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
goto out;
}
+ /* There is no guarantee that the PHY is accessible at this time
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
* inaccessible and resetting the PHY is not blocked, toggle the
* LANPHYPC Value bit to force the interconnect to PCIe mode.
@@ -380,6 +392,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
break;
}
+ e1000e_enable_phy_retry(hw);
+
hw->phy.ops.release(hw);
if (!ret_val) {
@@ -449,6 +463,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->id = e1000_phy_unknown;
+ if (hw->mac.type == e1000_pch_mtp) {
+ phy->retry_count = 2;
+ e1000e_enable_phy_retry(hw);
+ }
+
ret_val = e1000_init_phy_workarounds_pchlan(hw);
if (ret_val)
return ret_val;
@@ -1072,13 +1091,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
(1U << (E1000_LTRV_SCALE_FACTOR *
- ((lat_enc & E1000_LTRV_SCALE_MASK)
- >> E1000_LTRV_SCALE_SHIFT)));
+ FIELD_GET(E1000_LTRV_SCALE_MASK, lat_enc)));
max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
- (1U << (E1000_LTRV_SCALE_FACTOR *
- ((max_ltr_enc & E1000_LTRV_SCALE_MASK)
- >> E1000_LTRV_SCALE_SHIFT)));
+ (1U << (E1000_LTRV_SCALE_FACTOR *
+ FIELD_GET(E1000_LTRV_SCALE_MASK, max_ltr_enc)));
if (lat_enc_d > max_ltr_enc_d)
lat_enc = max_ltr_enc;
@@ -1148,18 +1165,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
- /* Force SMBus mode in PHY */
- ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
- if (ret_val)
- goto release;
- phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
- e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Force SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
-
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
@@ -1315,6 +1320,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
/* Toggle LANPHYPC Value bit */
e1000_toggle_lanphypc_pch_lpt(hw);
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
/* Unforce SMBus mode in PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
if (ret_val) {
@@ -1335,6 +1345,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+ e1000e_enable_phy_retry(hw);
+
/* Unforce SMBus mode in MAC */
mac_reg = er32(CTRL_EXT);
mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
@@ -2075,8 +2087,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
{
u16 phy_data;
u32 strap = er32(STRAP);
- u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
- E1000_STRAP_SMT_FREQ_SHIFT;
+ u32 freq = FIELD_GET(E1000_STRAP_SMT_FREQ_MASK, strap);
s32 ret_val;
strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -2562,8 +2573,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
(u16)(mac_reg & 0xFFFF));
hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
- (u16)((mac_reg & E1000_RAH_AV)
- >> 16));
+ (u16)((mac_reg & E1000_RAH_AV) >> 16));
}
e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
@@ -3205,7 +3215,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
&nvm_dword);
if (ret_val)
return ret_val;
- sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ sig_byte = FIELD_GET(0xFF00, nvm_dword);
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
E1000_ICH_NVM_SIG_VALUE) {
*bank = 0;
@@ -3218,7 +3228,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
&nvm_dword);
if (ret_val)
return ret_val;
- sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ sig_byte = FIELD_GET(0xFF00, nvm_dword);
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
E1000_ICH_NVM_SIG_VALUE) {
*bank = 1;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 5df7ad93f..d7df2a0ed 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
+
#include "e1000.h"
/**
@@ -13,21 +15,17 @@
**/
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
{
+ struct pci_dev *pdev = hw->adapter->pdev;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
- struct e1000_adapter *adapter = hw->adapter;
- u16 pcie_link_status, cap_offset;
+ u16 pcie_link_status;
- cap_offset = adapter->pdev->pcie_cap;
- if (!cap_offset) {
+ if (!pci_pcie_cap(pdev)) {
bus->width = e1000_bus_width_unknown;
} else {
- pci_read_config_word(adapter->pdev,
- cap_offset + PCIE_LINK_STATUS,
- &pcie_link_status);
- bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCIE_LINK_WIDTH_MASK) >>
- PCIE_LINK_WIDTH_SHIFT);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &pcie_link_status);
+ bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW,
+ pcie_link_status);
}
mac->ops.set_lan_id(hw);
@@ -52,7 +50,7 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
* for the device regardless of function swap state.
*/
reg = er32(STATUS);
- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+ bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f536c8567..3692fce20 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1788,8 +1788,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
@@ -1868,8 +1867,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
@@ -5031,8 +5029,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
- (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
- E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+ FIELD_GET(E1000_PBECCSTS_UNCORR_ERR_CNT_MASK, pbeccsts);
}
}
@@ -6249,7 +6246,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg |= BM_RCTL_MPE;
phy_reg &= ~(BM_RCTL_MO_MASK);
if (mac_reg & E1000_RCTL_MO_3)
- phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+ phy_reg |= (FIELD_GET(E1000_RCTL_MO_3, mac_reg)
<< BM_RCTL_MO_SHIFT);
if (mac_reg & E1000_RCTL_BAM)
phy_reg |= BM_RCTL_BAM;
@@ -6626,6 +6623,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6691,14 +6689,31 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) {
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
+ if (retval)
+ return retval;
+ }
+
+ /* Force SMBUS to allow WOL */
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
+ e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
+ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
- if (retval)
- return retval;
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in MAC */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 08c3d477d..93544f1cc 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -107,6 +107,16 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
}
+void e1000e_disable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = false;
+}
+
+void e1000e_enable_phy_retry(struct e1000_hw *hw)
+{
+ hw->phy.retry_enabled = true;
+}
+
/**
* e1000e_read_phy_reg_mdic - Read MDI control register
* @hw: pointer to the HW structure
@@ -118,57 +128,73 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
**/
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = ((offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
- e_dbg("MDI Read offset error - requested %d, returned %d\n",
- offset,
- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
- return -E1000_ERR_PHY;
- }
- *data = (u16)mdic;
+ ew32(MDIC, mdic);
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usleep_range(50, 60);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Read offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
- return 0;
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usleep_range(100, 150);
+
+ if (success) {
+ *data = (u16)mdic;
+ return 0;
+ }
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
+ }
+
+ return -E1000_ERR_PHY;
}
/**
@@ -181,57 +207,72 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
**/
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
{
+ u32 i, mdic = 0, retry_counter, retry_max;
struct e1000_phy_info *phy = &hw->phy;
- u32 i, mdic = 0;
+ bool success;
if (offset > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
+ retry_max = phy->retry_enabled ? phy->retry_count : 0;
+
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = (((u32)data) |
- (offset << E1000_MDIC_REG_SHIFT) |
- (phy->addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
+ for (retry_counter = 0; retry_counter <= retry_max; retry_counter++) {
+ success = true;
- ew32(MDIC, mdic);
+ mdic = (((u32)data) |
+ (offset << E1000_MDIC_REG_SHIFT) |
+ (phy->addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
- /* Poll the ready bit to see if the MDI read completed
- * Increasing the time out as testing showed failures with
- * the lower time out
- */
- for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Write PHY Red Address %d Error\n", offset);
- return -E1000_ERR_PHY;
- }
- if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
- e_dbg("MDI Write offset error - requested %d, returned %d\n",
- offset,
- (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
- return -E1000_ERR_PHY;
- }
+ ew32(MDIC, mdic);
- /* Allow some time after each MDIC transaction to avoid
- * reading duplicate data in the next MDIC transaction.
- */
- if (hw->mac.type == e1000_pch2lan)
- udelay(100);
+ /* Poll the ready bit to see if the MDI read completed
+ * Increasing the time out as testing showed failures with
+ * the lower time out
+ */
+ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+ usleep_range(50, 60);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n",
+ offset);
+ success = false;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Write PHY Reg Address %d Error\n", offset);
+ success = false;
+ }
+ if (FIELD_GET(E1000_MDIC_REG_MASK, mdic) != offset) {
+ e_dbg("MDI Write offset error - requested %d, returned %d\n",
+ offset, FIELD_GET(E1000_MDIC_REG_MASK, mdic));
+ success = false;
+ }
- return 0;
+ /* Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ usleep_range(100, 150);
+
+ if (success)
+ return 0;
+
+ if (retry_counter != retry_max) {
+ e_dbg("Perform retry on PHY transaction...\n");
+ mdelay(10);
+ }
+ }
+
+ return -E1000_ERR_PHY;
}
/**
@@ -463,8 +504,8 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) |
+ E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -536,8 +577,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
- kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ kmrnctrlsta = FIELD_PREP(E1000_KMRNCTRLSTA_OFFSET, offset) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1793,8 +1833,7 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY;
@@ -3234,8 +3273,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
- I82577_DSTATUS_CABLE_LENGTH_SHIFT);
+ length = FIELD_GET(I82577_DSTATUS_CABLE_LENGTH, phy_data);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
return -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index c48777d09..049bb325b 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -51,6 +51,8 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
void e1000_power_up_phy_copper(struct e1000_hw *hw);
void e1000_power_down_phy_copper(struct e1000_hw *hw);
+void e1000e_disable_phy_retry(struct e1000_hw *hw);
+void e1000e_enable_phy_retry(struct e1000_hw *hw);
s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 13a05604d..1bc5b6c0b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1057,16 +1057,16 @@ static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
}
-static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int fm10k_get_rssh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
+ u8 *key = rxfh->key;
int i, err;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- err = fm10k_get_reta(netdev, indir);
+ err = fm10k_get_reta(netdev, rxfh->indir);
if (err || !key)
return err;
@@ -1076,23 +1076,25 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
return 0;
}
-static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int fm10k_set_rssh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_hw *hw = &interface->hw;
int i, err;
/* We do not allow change in unsupported parameters */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- err = fm10k_set_reta(netdev, indir);
- if (err || !key)
+ err = fm10k_set_reta(netdev, rxfh->indir);
+ if (err || !rxfh->key)
return err;
- for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) {
- u32 rssrk = le32_to_cpu(*(__le32 *)key);
+ for (i = 0; i < FM10K_RSSRK_SIZE; i++, rxfh->key += 4) {
+ u32 rssrk = le32_to_cpu(*(__le32 *)rxfh->key);
if (interface->rssrk[i] == rssrk)
continue;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index ae700a180..98861cc6d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -866,8 +866,7 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
* register is RO from the VF, so the PF must do this even in the
* case of notifying the VF of a new VID via the mailbox.
*/
- txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
- FM10K_TXQCTL_VID_MASK;
+ txqctl = FIELD_PREP(FM10K_TXQCTL_VID_MASK, vf_vid);
txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
FM10K_TXQCTL_VF | vf_idx;
@@ -1576,8 +1575,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
if (func & FM10K_FAULT_FUNC_PF)
fault->func = 0;
else
- fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
- FM10K_FAULT_FUNC_VF_SHIFT);
+ fault->func = 1 + FIELD_GET(FM10K_FAULT_FUNC_VF_MASK, func);
/* record fault type */
fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index c50928ec1..7fb1961f2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -127,15 +127,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
hw->mac.max_queues = i;
/* fetch default VLAN and ITR scale */
- hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) &
- FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
+ hw->mac.default_vid = FIELD_GET(FM10K_TXQCTL_VID_MASK,
+ fm10k_read_reg(hw, FM10K_TXQCTL(0)));
/* Read the ITR scale from TDLEN. See the definition of
* FM10K_TDLEN_ITR_SCALE_SHIFT for more information about how TDLEN is
* used here.
*/
- hw->mac.itr_scale = (fm10k_read_reg(hw, FM10K_TDLEN(0)) &
- FM10K_TDLEN_ITR_SCALE_MASK) >>
- FM10K_TDLEN_ITR_SCALE_SHIFT;
+ hw->mac.itr_scale = FIELD_GET(FM10K_TDLEN_ITR_SCALE_MASK,
+ fm10k_read_reg(hw, FM10K_TDLEN(0)));
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1bf424ac3..4d2b05de6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -24,6 +24,7 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_NUM_DESCRIPTORS_XL710 8160
#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
@@ -33,11 +34,11 @@
#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
- (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
+ (test_bit(I40E_HW_CAP_RSS_AQ, (pf)->hw.caps) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_MAX_VF_QUEUES 16
#define i40e_pf_get_max_q_per_tc(pf) \
- (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
+ (test_bit(I40E_HW_CAP_128_QP_RSS, (pf)->hw.caps) ? 128 : 64)
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256
@@ -78,7 +79,7 @@
#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */
/* driver state flags */
-enum i40e_state_t {
+enum i40e_state {
__I40E_TESTING,
__I40E_CONFIG_BUSY,
__I40E_CONFIG_DONE,
@@ -126,7 +127,7 @@ enum i40e_state_t {
BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
-enum i40e_vsi_state_t {
+enum i40e_vsi_state {
__I40E_VSI_DOWN,
__I40E_VSI_NEEDS_RESTART,
__I40E_VSI_SYNCING_FILTERS,
@@ -138,6 +139,60 @@ enum i40e_vsi_state_t {
__I40E_VSI_STATE_SIZE__,
};
+enum i40e_pf_flags {
+ I40E_FLAG_MSI_ENA,
+ I40E_FLAG_MSIX_ENA,
+ I40E_FLAG_RSS_ENA,
+ I40E_FLAG_VMDQ_ENA,
+ I40E_FLAG_SRIOV_ENA,
+ I40E_FLAG_DCB_CAPABLE,
+ I40E_FLAG_DCB_ENA,
+ I40E_FLAG_FD_SB_ENA,
+ I40E_FLAG_FD_ATR_ENA,
+ I40E_FLAG_MFP_ENA,
+ I40E_FLAG_HW_ATR_EVICT_ENA,
+ I40E_FLAG_VEB_MODE_ENA,
+ I40E_FLAG_VEB_STATS_ENA,
+ I40E_FLAG_LINK_POLLING_ENA,
+ I40E_FLAG_TRUE_PROMISC_ENA,
+ I40E_FLAG_LEGACY_RX_ENA,
+ I40E_FLAG_PTP_ENA,
+ I40E_FLAG_IWARP_ENA,
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ I40E_FLAG_SOURCE_PRUNING_DIS,
+ I40E_FLAG_TC_MQPRIO_ENA,
+ I40E_FLAG_FD_SB_INACTIVE,
+ I40E_FLAG_FD_SB_TO_CLOUD_FILTER,
+ I40E_FLAG_FW_LLDP_DIS,
+ I40E_FLAG_RS_FEC,
+ I40E_FLAG_BASE_R_FEC,
+ /* TOTAL_PORT_SHUTDOWN_ENA
+ * Allows to physically disable the link on the NIC's port.
+ * If enabled, (after link down request from the OS)
+ * no link, traffic or led activity is possible on that port.
+ *
+ * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA is set, the
+ * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA must be explicitly forced
+ * to true and cannot be disabled by system admin at that time.
+ * The functionalities are exclusive in terms of configuration, but
+ * they also have similar behavior (allowing to disable physical
+ * link of the port), with following differences:
+ * - LINK_DOWN_ON_CLOSE_ENA is configurable at host OS run-time and
+ * is supported by whole family of 7xx Intel Ethernet Controllers
+ * - TOTAL_PORT_SHUTDOWN_ENA may be enabled only before OS loads
+ * (in BIOS) only if motherboard's BIOS and NIC's FW has support of it
+ * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought
+ * down by sending phy_type=0 to NIC's FW
+ * - when TOTAL_PORT_SHUTDOWN_ENA is used, phy_type is not altered,
+ * instead the link is being brought down by clearing
+ * bit (I40E_AQ_PHY_ENABLE_LINK) in abilities field of
+ * i40e_aq_set_phy_config structure
+ */
+ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
+ I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_PF_FLAGS_NBITS, /* must be last */
+};
+
enum i40e_interrupt_policy {
I40E_INTERRUPT_BEST_CASE,
I40E_INTERRUPT_MEDIUM,
@@ -413,9 +468,7 @@ struct i40e_pf {
struct i40e_hw hw;
DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries;
- bool fc_autoneg_status;
- u16 eeprom_version;
u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
@@ -431,7 +484,6 @@ struct i40e_pf {
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u16 num_alloc_vsi; /* num VSIs this driver supports */
- u8 atr_sample_rate;
bool wol_en;
struct hlist_head fdir_filter_list;
@@ -469,89 +521,16 @@ struct i40e_pf {
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;
- enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
u32 msg_enable;
char int_name[I40E_INT_NAME_STR_LEN];
- u16 adminq_work_limit; /* num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
struct work_struct service_task;
- u32 hw_features;
-#define I40E_HW_RSS_AQ_CAPABLE BIT(0)
-#define I40E_HW_128_QP_RSS_CAPABLE BIT(1)
-#define I40E_HW_ATR_EVICT_CAPABLE BIT(2)
-#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3)
-#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4)
-#define I40E_HW_NO_PCI_LINK_CHECK BIT(5)
-#define I40E_HW_100M_SGMII_CAPABLE BIT(6)
-#define I40E_HW_NO_DCB_SUPPORT BIT(7)
-#define I40E_HW_USE_SET_LLDP_MIB BIT(8)
-#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
-#define I40E_HW_PTP_L4_CAPABLE BIT(10)
-#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
-#define I40E_HW_HAVE_CRT_RETIMER BIT(13)
-#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
-#define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
-#define I40E_HW_STOP_FW_LLDP BIT(16)
-#define I40E_HW_PORT_ID_VALID BIT(17)
-#define I40E_HW_RESTART_AUTONEG BIT(18)
-
- u32 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40E_FLAG_MSI_ENABLED BIT(1)
-#define I40E_FLAG_MSIX_ENABLED BIT(2)
-#define I40E_FLAG_RSS_ENABLED BIT(3)
-#define I40E_FLAG_VMDQ_ENABLED BIT(4)
-#define I40E_FLAG_SRIOV_ENABLED BIT(5)
-#define I40E_FLAG_DCB_CAPABLE BIT(6)
-#define I40E_FLAG_DCB_ENABLED BIT(7)
-#define I40E_FLAG_FD_SB_ENABLED BIT(8)
-#define I40E_FLAG_FD_ATR_ENABLED BIT(9)
-#define I40E_FLAG_MFP_ENABLED BIT(10)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT(12)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT(13)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15)
-#define I40E_FLAG_LEGACY_RX BIT(16)
-#define I40E_FLAG_PTP BIT(17)
-#define I40E_FLAG_IWARP_ENABLED BIT(18)
-#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19)
-#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20)
-#define I40E_FLAG_TC_MQPRIO BIT(21)
-#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
-#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
-#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
-#define I40E_FLAG_RS_FEC BIT(25)
-#define I40E_FLAG_BASE_R_FEC BIT(26)
-/* TOTAL_PORT_SHUTDOWN
- * Allows to physically disable the link on the NIC's port.
- * If enabled, (after link down request from the OS)
- * no link, traffic or led activity is possible on that port.
- *
- * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the
- * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true
- * and cannot be disabled by system admin at that time.
- * The functionalities are exclusive in terms of configuration, but they also
- * have similar behavior (allowing to disable physical link of the port),
- * with following differences:
- * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is
- * supported by whole family of 7xx Intel Ethernet Controllers
- * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS)
- * only if motherboard's BIOS and NIC's FW has support of it
- * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down
- * by sending phy_type=0 to NIC's FW
- * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead
- * the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK)
- * in abilities field of i40e_aq_set_phy_config structure
- */
-#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
-#define I40E_FLAG_VF_VLAN_PRUNING BIT(28)
-
+ DECLARE_BITMAP(flags, I40E_PF_FLAGS_NBITS);
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
@@ -559,7 +538,6 @@ struct i40e_pf {
u32 tx_timeout_count;
u32 tx_timeout_recovery_level;
unsigned long tx_timeout_last_recovery;
- u32 tx_sluggish_count;
u32 hw_csum_rx_error;
u32 led_status;
u16 corer_count; /* Core reset count */
@@ -581,17 +559,13 @@ struct i40e_pf {
struct i40e_lump_tracking *irq_pile;
/* switch config info */
- u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
- struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
bool cur_promisc;
- u16 instance; /* A unique number per i40e_pf instance in the system */
-
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
@@ -685,9 +659,7 @@ struct i40e_pf {
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
struct timespec64 ptp_prev_hw_time;
- struct work_struct ptp_pps_work;
struct work_struct ptp_extts0_work;
- struct work_struct ptp_extts1_work;
ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
@@ -695,10 +667,7 @@ struct i40e_pf {
u32 tx_hwtstamp_skipped;
u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
- u64 ptp_pps_start;
- u32 pps_delay;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
- struct ptp_pin_desc ptp_pin[3];
unsigned long latch_events[4];
bool ptp_tx;
bool ptp_rx;
@@ -711,7 +680,6 @@ struct i40e_pf {
u32 fd_inv;
u16 phy_led_val;
- u16 override_q_count;
u16 last_sw_conf_flags;
u16 last_sw_conf_valid_flags;
/* List to keep previous DDP profiles to be rolled back in the future */
@@ -940,6 +908,7 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+ bool in_busy_poll;
int irq_num; /* IRQ assigned to this q_vector */
} ____cacheline_internodealigned_in_smp;
@@ -1267,7 +1236,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
{
- return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
+ return test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
}
#ifdef CONFIG_I40E_DCB
@@ -1301,7 +1270,7 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf);
int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
-void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
@@ -1321,13 +1290,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
* i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
* @pf: pointer to a pf.
*
- * Check and return value of flag I40E_FLAG_TC_MQPRIO.
+ * Check and return state of flag I40E_FLAG_TC_MQPRIO.
*
- * Return: I40E_FLAG_TC_MQPRIO set state.
+ * Return: true/false if I40E_FLAG_TC_MQPRIO is set or not
**/
-static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+static inline bool i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
{
- return pf->flags & I40E_FLAG_TC_MQPRIO;
+ return test_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9ce6e633c..f73f5930f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -9,40 +9,6 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
- * i40e_adminq_init_regs - Initialize AdminQ registers
- * @hw: pointer to the hardware structure
- *
- * This assumes the alloc_asq and alloc_arq functions have already been called
- **/
-static void i40e_adminq_init_regs(struct i40e_hw *hw)
-{
- /* set head and tail registers in our local struct */
- if (i40e_is_vf(hw)) {
- hw->aq.asq.tail = I40E_VF_ATQT1;
- hw->aq.asq.head = I40E_VF_ATQH1;
- hw->aq.asq.len = I40E_VF_ATQLEN1;
- hw->aq.asq.bal = I40E_VF_ATQBAL1;
- hw->aq.asq.bah = I40E_VF_ATQBAH1;
- hw->aq.arq.tail = I40E_VF_ARQT1;
- hw->aq.arq.head = I40E_VF_ARQH1;
- hw->aq.arq.len = I40E_VF_ARQLEN1;
- hw->aq.arq.bal = I40E_VF_ARQBAL1;
- hw->aq.arq.bah = I40E_VF_ARQBAH1;
- } else {
- hw->aq.asq.tail = I40E_PF_ATQT;
- hw->aq.asq.head = I40E_PF_ATQH;
- hw->aq.asq.len = I40E_PF_ATQLEN;
- hw->aq.asq.bal = I40E_PF_ATQBAL;
- hw->aq.asq.bah = I40E_PF_ATQBAH;
- hw->aq.arq.tail = I40E_PF_ARQT;
- hw->aq.arq.head = I40E_PF_ARQH;
- hw->aq.arq.len = I40E_PF_ARQLEN;
- hw->aq.arq.bal = I40E_PF_ARQBAL;
- hw->aq.arq.bah = I40E_PF_ARQBAH;
- }
-}
-
-/**
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
@@ -267,17 +233,17 @@ static int i40e_config_asq_regs(struct i40e_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, I40E_PF_ATQH, 0);
+ wr32(hw, I40E_PF_ATQT, 0);
/* set starting point */
- wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
- wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
- wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.asq.bal);
+ reg = rd32(hw, I40E_PF_ATQBAL);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = -EIO;
@@ -296,20 +262,20 @@ static int i40e_config_arq_regs(struct i40e_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, I40E_PF_ARQH, 0);
+ wr32(hw, I40E_PF_ARQT, 0);
/* set starting point */
- wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
- wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
- wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
- wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+ wr32(hw, I40E_PF_ARQT, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.arq.bal);
+ reg = rd32(hw, I40E_PF_ARQBAL);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = -EIO;
@@ -452,11 +418,11 @@ static int i40e_shutdown_asq(struct i40e_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
- wr32(hw, hw->aq.asq.len, 0);
- wr32(hw, hw->aq.asq.bal, 0);
- wr32(hw, hw->aq.asq.bah, 0);
+ wr32(hw, I40E_PF_ATQH, 0);
+ wr32(hw, I40E_PF_ATQT, 0);
+ wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, I40E_PF_ATQBAL, 0);
+ wr32(hw, I40E_PF_ATQBAH, 0);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
@@ -486,11 +452,11 @@ static int i40e_shutdown_arq(struct i40e_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
- wr32(hw, hw->aq.arq.len, 0);
- wr32(hw, hw->aq.arq.bal, 0);
- wr32(hw, hw->aq.arq.bah, 0);
+ wr32(hw, I40E_PF_ARQH, 0);
+ wr32(hw, I40E_PF_ARQT, 0);
+ wr32(hw, I40E_PF_ARQLEN, 0);
+ wr32(hw, I40E_PF_ARQBAL, 0);
+ wr32(hw, I40E_PF_ARQBAH, 0);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
@@ -503,44 +469,76 @@ shutdown_arq_out:
}
/**
- * i40e_set_hw_flags - set HW flags
+ * i40e_set_hw_caps - set HW flags
* @hw: pointer to the hardware structure
**/
-static void i40e_set_hw_flags(struct i40e_hw *hw)
+static void i40e_set_hw_caps(struct i40e_hw *hw)
{
- struct i40e_adminq_info *aq = &hw->aq;
-
- hw->flags = 0;
+ bitmap_zero(hw->caps, I40E_HW_CAPS_NBITS);
switch (hw->mac.type) {
case I40E_MAC_XL710:
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
+ set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
/* The ability to RX (not drop) 802.1ad frames */
- hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ set_bit(I40E_HW_CAP_802_1AD, hw->caps);
+ }
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5)) {
+ /* Supported in FW API version higher than 1.4 */
+ set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
+ }
+ if (i40e_is_fw_ver_lt(hw, 4, 33)) {
+ set_bit(I40E_HW_CAP_RESTART_AUTONEG, hw->caps);
+ /* No DCB support for FW < v4.33 */
+ set_bit(I40E_HW_CAP_NO_DCB_SUPPORT, hw->caps);
+ }
+ if (i40e_is_fw_ver_lt(hw, 4, 3)) {
+ /* Disable FW LLDP if FW < v4.3 */
+ set_bit(I40E_HW_CAP_STOP_FW_LLDP, hw->caps);
+ }
+ if (i40e_is_fw_ver_ge(hw, 4, 40)) {
+ /* Use the FW Set LLDP MIB API if FW >= v4.40 */
+ set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
+ }
+ if (i40e_is_fw_ver_ge(hw, 6, 0)) {
+ /* Enable PTP L4 if FW > v6.0 */
+ set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
}
break;
case I40E_MAC_X722:
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+ set_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps);
+ set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
+ set_bit(I40E_HW_CAP_RSS_AQ, hw->caps);
+ set_bit(I40E_HW_CAP_128_QP_RSS, hw->caps);
+ set_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
+ set_bit(I40E_HW_CAP_WB_ON_ITR, hw->caps);
+ set_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, hw->caps);
+ set_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, hw->caps);
+ set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
+ set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
+ set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
+ set_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, hw->caps);
+ set_bit(I40E_HW_CAP_OUTER_UDP_CSUM, hw->caps);
+
+ if (rd32(hw, I40E_GLQF_FDEVICTENA(1)) !=
+ I40E_FDEVICT_PCTYPE_DEFAULT) {
+ hw_warn(hw, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
+ clear_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
+ }
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_GET_LINK_INFO_X722))
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
- hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
+ if (i40e_is_aq_api_ver_ge(hw, 1,
+ I40E_MINOR_VER_FW_REQUEST_FEC_X722))
+ set_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps);
fallthrough;
default:
@@ -548,22 +546,18 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
}
/* Newer versions of firmware require lock when reading the NVM */
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 5))
- hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 8)) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
- hw->flags |= I40E_HW_FLAG_DROP_MODE;
- }
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
+ set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
+
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if (i40e_is_aq_api_ver_ge(hw, 1, 7))
+ set_bit(I40E_HW_CAP_802_1AD, hw->caps);
+
+ if (i40e_is_aq_api_ver_ge(hw, 1, 8))
+ set_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps);
- if (aq->api_maj_ver > 1 ||
- (aq->api_maj_ver == 1 &&
- aq->api_min_ver >= 9))
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+ if (i40e_is_aq_api_ver_ge(hw, 1, 9))
+ set_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps);
}
/**
@@ -593,9 +587,6 @@ int i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_exit;
}
- /* Set up register offsets */
- i40e_adminq_init_regs(hw);
-
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
@@ -633,7 +624,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
/* Some features were introduced in different FW API version
* for different MAC type.
*/
- i40e_set_hw_flags(hw);
+ i40e_set_hw_caps(hw);
/* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
@@ -648,25 +639,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
- if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
- }
- if (hw->mac.type == I40E_MAC_X722 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
- hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
- }
-
- /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
- if (hw->aq.api_maj_ver > 1 ||
- (hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 7))
- hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
-
- if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR + 1, 0)) {
ret_code = -EIO;
goto init_adminq_free_arq;
}
@@ -723,9 +696,9 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
- while (rd32(hw, hw->aq.asq.head) != ntc) {
+ while (rd32(hw, I40E_PF_ATQH) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, I40E_PF_ATQH));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
@@ -759,7 +732,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+ return rd32(hw, I40E_PF_ATQH) == hw->aq.asq.next_to_use;
}
@@ -800,7 +773,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
hw->aq.asq_last_status = I40E_AQ_RC_OK;
- val = rd32(hw, hw->aq.asq.head);
+ val = rd32(hw, I40E_PF_ATQH);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
@@ -892,7 +865,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
- wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+ wr32(hw, I40E_PF_ATQT, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
@@ -952,7 +925,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
+ if (rd32(hw, I40E_PF_ATQLEN) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = -EIO;
@@ -1106,7 +1079,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
}
/* set next_to_use to head */
- ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+ ntu = rd32(hw, I40E_PF_ARQH) & I40E_PF_ARQH_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = -EALREADY;
@@ -1154,7 +1127,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
- wr32(hw, hw->aq.arq.tail, ntc);
+ wr32(hw, I40E_PF_ARQT, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 80125bea8..ee86d2c53 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -29,13 +29,6 @@ struct i40e_adminq_ring {
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
-
- /* used for queue tracking */
- u32 head;
- u32 tail;
- u32 len;
- u32 bah;
- u32 bal;
};
/* ASQ transaction details */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 18a1c3b6d..c8f35d4de 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -5,6 +5,7 @@
#define _I40E_ADMINQ_CMD_H_
#include <linux/bits.h>
+#include <linux/types.h>
/* This header file defines the i40e Admin Queue commands and is shared between
* i40e Firmware and Software.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 3eb6564c1..de6ca6295 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -196,11 +196,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
- if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) &
- I40E_PF_ATQLEN_ATQENABLE_MASK);
- else
+ /* Check if the queue is initialized */
+ if (!hw->aq.asq.count)
return false;
+
+ return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK);
}
/**
@@ -249,6 +249,7 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
int status;
+ u16 flags;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -261,23 +262,18 @@ static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+ vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(I40E_AQC_SET_RSS_LUT_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
if (pf_lut)
- cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF);
else
- cmd_resp->flags |= cpu_to_le16((u16)
- ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI);
+ cmd_resp->flags = cpu_to_le16(flags);
status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status;
@@ -347,11 +343,9 @@ static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+ vsi_id = FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(I40E_AQC_SET_RSS_KEY_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
@@ -670,11 +664,11 @@ int i40e_init_shared_code(struct i40e_hw *hw)
hw->phy.get_link_info = true;
/* Determine port number and PF number*/
- port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
- >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+ port = FIELD_GET(I40E_PFGEN_PORTNUM_PORT_NUM_MASK,
+ rd32(hw, I40E_PFGEN_PORTNUM));
hw->port = (u8)port;
- ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
- I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+ ari = FIELD_GET(I40E_GLPCI_CAPSUP_ARI_EN_MASK,
+ rd32(hw, I40E_GLPCI_CAPSUP));
func_rid = rd32(hw, I40E_PF_FUNC_RID);
if (ari)
hw->pf_id = (u8)(func_rid & 0xff);
@@ -992,9 +986,8 @@ int i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
- I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
- I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = FIELD_GET(I40E_GLGEN_RSTCTL_GRSTDEL_MASK,
+ rd32(hw, I40E_GLGEN_RSTCTL));
/* It can take upto 15 secs for GRST steady state.
* Bump it to 16 secs max to be safe.
@@ -1086,26 +1079,20 @@ void i40e_clear_hw(struct i40e_hw *hw)
/* get number of interrupts, queues, and VFs */
val = rd32(hw, I40E_GLPCI_CNF2);
- num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
- I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
- num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
- I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+ num_pf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_PF_N_MASK, val);
+ num_vf_int = FIELD_GET(I40E_GLPCI_CNF2_MSI_X_VF_N_MASK, val);
val = rd32(hw, I40E_PFLAN_QALLOC);
- base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
- I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
- j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
- I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+ base_queue = FIELD_GET(I40E_PFLAN_QALLOC_FIRSTQ_MASK, val);
+ j = FIELD_GET(I40E_PFLAN_QALLOC_LASTQ_MASK, val);
if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
num_queues = (j - base_queue) + 1;
else
num_queues = 0;
val = rd32(hw, I40E_PF_VT_PFALLOC);
- i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
- I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
- j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
- I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+ i = FIELD_GET(I40E_PF_VT_PFALLOC_FIRSTVF_MASK, val);
+ j = FIELD_GET(I40E_PF_VT_PFALLOC_LASTVF_MASK, val);
if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
num_vfs = (j - i) + 1;
else
@@ -1200,8 +1187,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
!hw->func_caps.led[idx])
return 0;
gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
- I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ port = FIELD_GET(I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK, gpio_val);
/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
* if it is not our port then ignore
@@ -1245,8 +1231,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
- mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
- I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+ mode = FIELD_GET(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK, gpio_val);
break;
}
@@ -1289,14 +1274,14 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
pin_func = I40E_PIN_FUNC_LED;
gpio_val &= ~I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK;
- gpio_val |= ((pin_func <<
- I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) &
- I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK);
+ gpio_val |=
+ FIELD_PREP(I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK,
+ pin_func);
}
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
- gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
- I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+ gpio_val |= FIELD_PREP(I40E_GLGEN_GPIO_CTL_LED_MODE_MASK,
+ mode);
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
@@ -1375,8 +1360,8 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (report_init) {
if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
+ I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
} else {
hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
@@ -1646,12 +1631,11 @@ int i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->lse_enable = false;
- if ((hw->mac.type == I40E_MAC_XL710) &&
- (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
- hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 40) &&
+ hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps) &&
hw->mac.type != I40E_MAC_X722) {
__le32 tmp;
@@ -1751,21 +1735,6 @@ int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
}
/**
- * i40e_is_aq_api_ver_ge
- * @aq: pointer to AdminQ info containing HW API version to compare
- * @maj: API major value
- * @min: API minor value
- *
- * Assert whether current HW API version is greater/equal than provided.
- **/
-static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
- u16 min)
-{
- return (aq->api_maj_ver > maj ||
- (aq->api_maj_ver == maj && aq->api_min_ver >= min));
-}
-
-/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -1891,14 +1860,14 @@ int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (rx_only_promisc && i40e_is_aq_api_ver_ge(hw, 1, 5))
flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
cmd->valid_flags |=
cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
@@ -2001,13 +1970,13 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
if (enable) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
}
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
- if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+ if (i40e_is_aq_api_ver_ge(hw, 1, 5))
cmd->valid_flags |=
cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
cmd->seid = cpu_to_le16(seid);
@@ -2254,7 +2223,7 @@ int i40e_aq_set_switch_config(struct i40e_hw *hw,
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
scfg->mode = mode;
- if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_802_1AD, hw->caps)) {
scfg->switch_tag = cpu_to_le16(hw->switch_tag);
scfg->first_tag = cpu_to_le16(hw->first_tag);
scfg->second_tag = cpu_to_le16(hw->second_tag);
@@ -3531,8 +3500,7 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
- cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ cmd->type |= FIELD_PREP(I40E_AQ_LLDP_BRIDGE_TYPE_MASK, bridge_type);
desc.datalen = cpu_to_le16(buff_size);
@@ -3638,7 +3606,7 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
(struct i40e_aqc_lldp_restore *)&desc.params.raw;
int status;
- if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
i40e_debug(hw, I40E_DEBUG_ALL,
"Restore LLDP not supported by current FW version.\n");
return -ENODEV;
@@ -3681,7 +3649,7 @@ int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
if (persist) {
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
else
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3714,7 +3682,7 @@ int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
cmd->command = I40E_AQ_LLDP_AGENT_START;
if (persist) {
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps))
cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
else
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3742,7 +3710,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
int status;
- if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ if (!test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps))
return -ENODEV;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -4213,8 +4181,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
val = rd32(hw, I40E_GLHMC_FCOEFMAX);
- fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
- >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ fcoe_fmax = FIELD_GET(I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK, val);
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
return -EINVAL;
@@ -4250,30 +4217,25 @@ int i40e_set_filter_control(struct i40e_hw *hw,
/* Program required PE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
- val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PEHSIZE_MASK, settings->pe_filt_num);
/* Program required PE contexts for the PF */
val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
- val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PEDSIZE_MASK, settings->pe_cntx_num);
/* Program required FCoE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
- val |= ((u32)settings->fcoe_filt_num <<
- I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCHSIZE_MASK,
+ settings->fcoe_filt_num);
/* Program required FCoE DDP contexts for the PF */
val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
- val |= ((u32)settings->fcoe_cntx_num <<
- I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
- I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_PFFCDSIZE_MASK,
+ settings->fcoe_cntx_num);
/* Program Hash LUT size for the PF */
val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
hash_lut_size = 1;
- val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
- I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ val |= FIELD_PREP(I40E_PFQF_CTL_0_HASHLUTSIZE_MASK, hash_lut_size);
/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
if (settings->enable_fdir)
@@ -4674,8 +4636,7 @@ int i40e_read_phy_register_clause22(struct i40e_hw *hw,
"PHY: Can't write command to external PHY.\n");
} else {
command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
- *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
- I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
}
return status;
@@ -4784,8 +4745,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
if (!status) {
command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
- *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
- I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ *value = FIELD_GET(I40E_GLGEN_MSRWD_MDIRDDATA_MASK, command);
} else {
i40e_debug(hw, I40E_DEBUG_PHY,
"PHY: Can't read register value from external PHY.\n");
@@ -5044,7 +5004,7 @@ static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
u32 i;
*reg_val = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5077,7 +5037,7 @@ static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
int status;
u32 i;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5116,7 +5076,7 @@ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u8 port_num;
u32 i;
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
status =
i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
@@ -5239,14 +5199,14 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
**/
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
- bool use_register;
+ bool use_register = false;
int status = 0;
int retry = 5;
u32 val = 0;
- use_register = (((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver < 5)) ||
- (hw->mac.type == I40E_MAC_X722));
+ if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
+ use_register = true;
+
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -5301,13 +5261,13 @@ int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
**/
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
- bool use_register;
+ bool use_register = false;
int status = 0;
int retry = 5;
- use_register = (((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver < 5)) ||
- (hw->mac.type == I40E_MAC_X722));
+ if (i40e_is_aq_api_ver_lt(hw, 1, 5) || hw->mac.type == I40E_MAC_X722)
+ use_register = true;
+
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -5335,16 +5295,17 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
u8 mdio_num,
struct i40e_aqc_phy_register_access *cmd)
{
- if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
- if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
- cmd->cmd_flags |=
- I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
- ((mdio_num <<
- I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
- I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
- else
- i40e_debug(hw, I40E_DEBUG_PHY,
- "MDIO I/F number selection not supported by current FW version.\n");
+ if (!set_mdio ||
+ cmd->phy_interface != I40E_AQ_PHY_REG_ACCESS_EXTERNAL)
+ return;
+
+ if (test_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps)) {
+ cmd->cmd_flags |=
+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+ FIELD_PREP(I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK,
+ mdio_num);
+ } else {
+ i40e_debug(hw, I40E_DEBUG_PHY, "MDIO I/F number selection not supported by current FW version.\n");
}
}
@@ -5929,9 +5890,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(filters[i].element.flags) &
- I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
- I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ tnl_type = le16_get_bits(filters[i].element.flags,
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
/* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in
@@ -6023,9 +5983,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(filters[i].element.flags) &
- I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
- I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ tnl_type = le16_get_bits(filters[i].element.flags,
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK);
/* Due to hardware eccentricities, the VNI for Geneve is shifted
* one more byte further than normally used for Tenant ID in
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index d57dd30b0..8db1eb0c1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -22,8 +22,7 @@ int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
return -EINVAL;
reg = rd32(hw, I40E_PRTDCB_GENS);
- *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
- I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+ *status = FIELD_GET(I40E_PRTDCB_GENS_DCBX_STATUS_MASK, reg);
return 0;
}
@@ -52,12 +51,9 @@ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
* |1bit | 1bit|3 bits|3bits|
*/
etscfg = &dcbcfg->etscfg;
- etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
- I40E_IEEE_ETS_WILLING_SHIFT);
- etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
- I40E_IEEE_ETS_CBS_SHIFT);
- etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
- I40E_IEEE_ETS_MAXTC_SHIFT);
+ etscfg->willing = FIELD_GET(I40E_IEEE_ETS_WILLING_MASK, buf[offset]);
+ etscfg->cbs = FIELD_GET(I40E_IEEE_ETS_CBS_MASK, buf[offset]);
+ etscfg->maxtcs = FIELD_GET(I40E_IEEE_ETS_MAXTC_MASK, buf[offset]);
/* Move offset to Priority Assignment Table */
offset++;
@@ -71,11 +67,9 @@ static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
- I40E_IEEE_ETS_PRIO_1_SHIFT);
- etscfg->prioritytable[i * 2] = priority;
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
- I40E_IEEE_ETS_PRIO_0_SHIFT);
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_1_MASK, buf[offset]);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_0_MASK, buf[offset]);
etscfg->prioritytable[i * 2 + 1] = priority;
offset++;
}
@@ -126,12 +120,10 @@ static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
- I40E_IEEE_ETS_PRIO_1_SHIFT);
- dcbcfg->etsrec.prioritytable[i*2] = priority;
- priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
- I40E_IEEE_ETS_PRIO_0_SHIFT);
- dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_1_MASK, buf[offset]);
+ dcbcfg->etsrec.prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_IEEE_ETS_PRIO_0_MASK, buf[offset]);
+ dcbcfg->etsrec.prioritytable[(i * 2) + 1] = priority;
offset++;
}
@@ -172,12 +164,9 @@ static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
* |1bit | 1bit|2 bits|4bits| 1 octet |
*/
- dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
- I40E_IEEE_PFC_WILLING_SHIFT);
- dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
- I40E_IEEE_PFC_MBC_SHIFT);
- dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
- I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.willing = FIELD_GET(I40E_IEEE_PFC_WILLING_MASK, buf[0]);
+ dcbcfg->pfc.mbc = FIELD_GET(I40E_IEEE_PFC_MBC_MASK, buf[0]);
+ dcbcfg->pfc.pfccap = FIELD_GET(I40E_IEEE_PFC_CAP_MASK, buf[0]);
dcbcfg->pfc.pfcenable = buf[1];
}
@@ -198,8 +187,7 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
u8 *buf;
typelength = ntohs(tlv->typelength);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
buf = tlv->tlvinfo;
/* The App priority table starts 5 octets after TLV header */
@@ -217,12 +205,10 @@ static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
* -----------------------------------------
*/
while (offset < length) {
- dcbcfg->app[i].priority = (u8)((buf[offset] &
- I40E_IEEE_APP_PRIO_MASK) >>
- I40E_IEEE_APP_PRIO_SHIFT);
- dcbcfg->app[i].selector = (u8)((buf[offset] &
- I40E_IEEE_APP_SEL_MASK) >>
- I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].priority = FIELD_GET(I40E_IEEE_APP_PRIO_MASK,
+ buf[offset]);
+ dcbcfg->app[i].selector = FIELD_GET(I40E_IEEE_APP_SEL_MASK,
+ buf[offset]);
dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
buf[offset + 2];
/* Move to next app */
@@ -250,8 +236,7 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
u8 subtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
- I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ subtype = FIELD_GET(I40E_LLDP_TLV_SUBTYPE_MASK, ouisubtype);
switch (subtype) {
case I40E_IEEE_SUBTYPE_ETS_CFG:
i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
@@ -301,11 +286,9 @@ static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
- priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
- etscfg->prioritytable[i * 2] = priority;
- priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
+ priority = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK, buf[offset]);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK, buf[offset]);
etscfg->prioritytable[i * 2 + 1] = priority;
offset++;
}
@@ -362,8 +345,7 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
u8 i;
typelength = ntohs(tlv->hdr.typelen);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
dcbcfg->numapps = length / sizeof(*app);
@@ -419,15 +401,13 @@ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
u32 ouisubtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
- I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ subtype = FIELD_GET(I40E_LLDP_TLV_SUBTYPE_MASK, ouisubtype);
/* Return if not CEE DCBX */
if (subtype != I40E_CEE_DCBX_TYPE)
return;
typelength = ntohs(tlv->typelength);
- tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ tlvlen = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
sizeof(struct i40e_cee_ctrl_tlv);
/* Return if no CEE DCBX Feature TLVs */
@@ -437,11 +417,8 @@ static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
typelength = ntohs(sub_tlv->hdr.typelen);
- sublen = (u16)((typelength &
- I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
- subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
- I40E_LLDP_TLV_TYPE_SHIFT);
+ sublen = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
+ subtype = FIELD_GET(I40E_LLDP_TLV_TYPE_MASK, typelength);
switch (subtype) {
case I40E_CEE_SUBTYPE_PG_CFG:
i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
@@ -478,8 +455,7 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
u32 oui;
ouisubtype = ntohl(tlv->ouisubtype);
- oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
- I40E_LLDP_TLV_OUI_SHIFT);
+ oui = FIELD_GET(I40E_LLDP_TLV_OUI_MASK, ouisubtype);
switch (oui) {
case I40E_IEEE_8021QAZ_OUI:
i40e_parse_ieee_tlv(tlv, dcbcfg);
@@ -517,10 +493,8 @@ int i40e_lldp_to_dcb_config(u8 *lldpmib,
tlv = (struct i40e_lldp_org_tlv *)lldpmib;
while (1) {
typelength = ntohs(tlv->typelength);
- type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
- I40E_LLDP_TLV_TYPE_SHIFT);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ type = FIELD_GET(I40E_LLDP_TLV_TYPE_MASK, typelength);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
offset += sizeof(typelength) + length;
/* END TLV or beyond LLDPDU size */
@@ -594,7 +568,7 @@ static void i40e_cee_to_dcb_v1_config(
{
u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, tc, err;
+ u8 i, err;
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -603,13 +577,13 @@ static void i40e_cee_to_dcb_v1_config(
* from those in the CEE Priority Group sub-TLV.
*/
for (i = 0; i < 4; i++) {
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
- dcbcfg->etscfg.prioritytable[i * 2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
+ u8 tc;
+
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK,
+ cee_cfg->oper_prio_tc[i]);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
}
@@ -631,8 +605,7 @@ static void i40e_cee_to_dcb_v1_config(
dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
- status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
- I40E_AQC_CEE_APP_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_APP_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
/* Add APPs if Error is False */
if (!err) {
@@ -641,22 +614,19 @@ static void i40e_cee_to_dcb_v1_config(
/* FCoE APP */
dcbcfg->app[0].priority =
- (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
- I40E_AQC_CEE_APP_FCOE_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FCOE_MASK, app_prio);
dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
/* iSCSI APP */
dcbcfg->app[1].priority =
- (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
- I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_ISCSI_MASK, app_prio);
dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
/* FIP APP */
dcbcfg->app[2].priority =
- (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
- I40E_AQC_CEE_APP_FIP_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FIP_MASK, app_prio);
dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
}
@@ -675,7 +645,7 @@ static void i40e_cee_to_dcb_config(
{
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, tc, err, sync, oper;
+ u8 i, err, sync, oper;
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -684,13 +654,13 @@ static void i40e_cee_to_dcb_config(
* from those in the CEE Priority Group sub-TLV.
*/
for (i = 0; i < 4; i++) {
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_0_MASK) >>
- I40E_CEE_PGID_PRIO_0_SHIFT);
- dcbcfg->etscfg.prioritytable[i * 2] = tc;
- tc = (u8)((cee_cfg->oper_prio_tc[i] &
- I40E_CEE_PGID_PRIO_1_MASK) >>
- I40E_CEE_PGID_PRIO_1_SHIFT);
+ u8 tc;
+
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_0_MASK,
+ cee_cfg->oper_prio_tc[i]);
+ dcbcfg->etscfg.prioritytable[i * 2] = tc;
+ tc = FIELD_GET(I40E_CEE_PGID_PRIO_1_MASK,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
}
@@ -713,8 +683,7 @@ static void i40e_cee_to_dcb_config(
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
i = 0;
- status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
- I40E_AQC_CEE_FCOE_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_FCOE_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -722,15 +691,13 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* FCoE APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
- I40E_AQC_CEE_APP_FCOE_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FCOE_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
i++;
}
- status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
- I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_ISCSI_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -738,15 +705,13 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* iSCSI APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
- I40E_AQC_CEE_APP_ISCSI_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_ISCSI_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
i++;
}
- status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
- I40E_AQC_CEE_FIP_STATUS_SHIFT;
+ status = FIELD_GET(I40E_AQC_CEE_FIP_STATUS_MASK, tlv_status);
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
@@ -754,8 +719,7 @@ static void i40e_cee_to_dcb_config(
if (!err && sync && oper) {
/* FIP APP */
dcbcfg->app[i].priority =
- (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
- I40E_AQC_CEE_APP_FIP_SHIFT;
+ FIELD_GET(I40E_AQC_CEE_APP_FIP_MASK, app_prio);
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
i++;
@@ -806,14 +770,11 @@ int i40e_get_dcb_config(struct i40e_hw *hw)
int ret = 0;
/* If Firmware version < v4.33 on X710/XL710, IEEE only */
- if ((hw->mac.type == I40E_MAC_XL710) &&
- (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)))
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_lt(hw, 4, 33))
return i40e_get_ieee_dcb_config(hw);
/* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
- if ((hw->mac.type == I40E_MAC_XL710) &&
- ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
+ if (hw->mac.type == I40E_MAC_XL710 && i40e_is_fw_ver_eq(hw, 4, 33)) {
ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
sizeof(cee_v1_cfg), NULL);
if (!ret) {
@@ -879,7 +840,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
return -EOPNOTSUPP;
/* Read LLDP NVM area */
- if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ if (test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
u8 offset = 0;
if (hw->mac.type == I40E_MAC_XL710)
@@ -1191,7 +1152,7 @@ static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
selector = dcbcfg->app[i].selector & 0x7;
buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
- buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
/* Move to next app */
offset += 3;
i++;
@@ -1287,8 +1248,7 @@ int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
do {
i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
typelength = ntohs(tlv->typelength);
- length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
- I40E_LLDP_TLV_LEN_SHIFT);
+ length = FIELD_GET(I40E_LLDP_TLV_LEN_MASK, typelength);
if (length)
offset += length + I40E_IEEE_TLV_HEADER_LENGTH;
/* END TLV or beyond LLDPDU size */
@@ -1323,20 +1283,16 @@ void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
u32 reg = rd32(hw, I40E_PRTDCB_RETSC);
reg &= ~I40E_PRTDCB_RETSC_ETS_MODE_MASK;
- reg |= ((u32)ets_mode << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) &
- I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_ETS_MODE_MASK, ets_mode);
reg &= ~I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
- reg |= ((u32)non_ets_mode << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) &
- I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK, non_ets_mode);
reg &= ~I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
- reg |= (max_exponent << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) &
- I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK, max_exponent);
reg &= ~I40E_PRTDCB_RETSC_LLTC_MASK;
- reg |= (lltc_map << I40E_PRTDCB_RETSC_LLTC_SHIFT) &
- I40E_PRTDCB_RETSC_LLTC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSC_LLTC_MASK, lltc_map);
wr32(hw, I40E_PRTDCB_RETSC, reg);
}
@@ -1391,14 +1347,12 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
*/
reg = rd32(hw, I40E_PRT_SWR_PM_THR);
reg &= ~I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
- reg |= (threshold << I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT) &
- I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ reg |= FIELD_PREP(I40E_PRT_SWR_PM_THR_THRESHOLD_MASK, threshold);
wr32(hw, I40E_PRT_SWR_PM_THR, reg);
reg = rd32(hw, I40E_PRTDCB_RPPMC);
reg &= ~I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
- reg |= (fifo_size << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) &
- I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK, fifo_size);
wr32(hw, I40E_PRTDCB_RPPMC, reg);
}
@@ -1440,19 +1394,17 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
reg &= ~I40E_PRTDCB_MFLCN_RPFCE_MASK;
if (pfc_en) {
- reg |= BIT(I40E_PRTDCB_MFLCN_RPFCM_SHIFT) &
- I40E_PRTDCB_MFLCN_RPFCM_MASK;
- reg |= ((u32)pfc_en << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) &
- I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_MFLCN_RPFCM_MASK, 1);
+ reg |= FIELD_PREP(I40E_PRTDCB_MFLCN_RPFCE_MASK,
+ pfc_en);
}
wr32(hw, I40E_PRTDCB_MFLCN, reg);
reg = rd32(hw, I40E_PRTDCB_FCCFG);
reg &= ~I40E_PRTDCB_FCCFG_TFCE_MASK;
if (pfc_en)
- reg |= (I40E_DCB_PFC_ENABLED <<
- I40E_PRTDCB_FCCFG_TFCE_SHIFT) &
- I40E_PRTDCB_FCCFG_TFCE_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_FCCFG_TFCE_MASK,
+ I40E_DCB_PFC_ENABLED);
wr32(hw, I40E_PRTDCB_FCCFG, reg);
/* FCTTV and FCRTV to be set by default */
@@ -1470,25 +1422,22 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE);
reg &= ~I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
- reg |= ((u32)pfc_en <<
- I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK,
+ pfc_en);
wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, reg);
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE);
reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
- reg |= ((u32)pfc_en <<
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK,
+ pfc_en);
wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, reg);
for (i = 0; i < I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX; i++) {
reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i));
reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
if (pfc_en) {
- reg |= ((u32)refresh_time <<
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) &
- I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ reg |= FIELD_PREP(I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK,
+ refresh_time);
}
wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i), reg);
}
@@ -1500,14 +1449,12 @@ void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
reg = rd32(hw, I40E_PRTDCB_TC2PFC);
reg &= ~I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
- reg |= ((u32)tc2pfc << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) &
- I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_TC2PFC_TC2PFC_MASK, tc2pfc);
wr32(hw, I40E_PRTDCB_TC2PFC, reg);
reg = rd32(hw, I40E_PRTDCB_RUP);
reg &= ~I40E_PRTDCB_RUP_NOVLANUP_MASK;
- reg |= ((u32)first_pfc_prio << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) &
- I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_RUP_NOVLANUP_MASK, first_pfc_prio);
wr32(hw, I40E_PRTDCB_RUP, reg);
reg = rd32(hw, I40E_PRTDCB_TDPMC);
@@ -1539,8 +1486,7 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
u32 reg = rd32(hw, I40E_PRTDCB_GENC);
reg &= ~I40E_PRTDCB_GENC_NUMTC_MASK;
- reg |= ((u32)num_tc << I40E_PRTDCB_GENC_NUMTC_SHIFT) &
- I40E_PRTDCB_GENC_NUMTC_MASK;
+ reg |= FIELD_PREP(I40E_PRTDCB_GENC_NUMTC_MASK, num_tc);
wr32(hw, I40E_PRTDCB_GENC, reg);
}
@@ -1554,8 +1500,7 @@ u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
{
u32 reg = rd32(hw, I40E_PRTDCB_GENC);
- return (u8)((reg & I40E_PRTDCB_GENC_NUMTC_MASK) >>
- I40E_PRTDCB_GENC_NUMTC_SHIFT);
+ return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
}
/**
@@ -1578,13 +1523,13 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
- I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
- reg |= ((u32)bw_share[i] << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
- I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
- reg |= ((u32)mode[i] << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
- I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
- reg |= ((u32)prio_type[i] << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
- I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK,
+ bw_share[i]);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK,
+ mode[i]);
+ reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_ETSTC_MASK,
+ prio_type[i]);
wr32(hw, I40E_PRTDCB_RETSTCC(i), reg);
}
}
@@ -1724,8 +1669,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SLW);
reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
- reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
- I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLW_SLW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SLW, reg);
}
@@ -1738,8 +1682,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SLT(i));
reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
- I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLT_SLT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SLT(i), reg);
}
@@ -1748,8 +1692,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_DLW(i));
reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
- I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DLW_DLW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DLW(i), reg);
}
}
@@ -1760,8 +1704,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SHW);
reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
- reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
- I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHW_SHW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SHW, reg);
}
@@ -1774,8 +1717,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_SHT(i));
reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
- I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHT_SHT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SHT(i), reg);
}
@@ -1784,8 +1727,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val < old_val) {
reg = rd32(hw, I40E_PRTRPB_DHW(i));
reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
- I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DHW_DHW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DHW(i), reg);
}
}
@@ -1795,8 +1738,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
new_val = new_pb_cfg->tc_pool_size[i];
reg = rd32(hw, I40E_PRTRPB_DPS(i));
reg &= ~I40E_PRTRPB_DPS_DPS_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) &
- I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DPS_DPS_TCN_MASK, new_val);
wr32(hw, I40E_PRTRPB_DPS(i), reg);
}
@@ -1804,8 +1746,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
new_val = new_pb_cfg->shared_pool_size;
reg = rd32(hw, I40E_PRTRPB_SPS);
reg &= ~I40E_PRTRPB_SPS_SPS_MASK;
- reg |= (new_val << I40E_PRTRPB_SPS_SPS_SHIFT) &
- I40E_PRTRPB_SPS_SPS_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SPS_SPS_MASK, new_val);
wr32(hw, I40E_PRTRPB_SPS, reg);
/* Program the shared pool low water mark per port if increasing */
@@ -1814,8 +1755,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SLW);
reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
- reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
- I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLW_SLW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SLW, reg);
}
@@ -1828,8 +1768,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SLT(i));
reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
- I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SLT_SLT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SLT(i), reg);
}
@@ -1838,8 +1778,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_DLW(i));
reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
- I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DLW_DLW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DLW(i), reg);
}
}
@@ -1850,8 +1790,7 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SHW);
reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
- reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
- I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHW_SHW_MASK, new_val);
wr32(hw, I40E_PRTRPB_SHW, reg);
}
@@ -1864,8 +1803,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_SHT(i));
reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
- I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_SHT_SHT_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_SHT(i), reg);
}
@@ -1874,8 +1813,8 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
if (new_val > old_val) {
reg = rd32(hw, I40E_PRTRPB_DHW(i));
reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
- reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
- I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= FIELD_PREP(I40E_PRTRPB_DHW_DHW_TCN_MASK,
+ new_val);
wr32(hw, I40E_PRTRPB_DHW(i), reg);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 6b60dc9b7..d76497566 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -43,7 +43,7 @@
#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
#define I40E_LLDP_TLV_OUI_SHIFT 8
-#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT)
/* Defines for IEEE ETS TLV */
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 077a95dad..b96a92187 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -21,8 +21,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
- I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+ *delay = FIELD_GET(I40E_PRTDCB_GENC_PFCLDA_MASK, val);
}
/**
@@ -310,8 +309,8 @@ static u8 i40e_dcbnl_getstate(struct net_device *netdev)
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
dev_dbg(&pf->pdev->dev, "DCB state=%d\n",
- !!(pf->flags & I40E_FLAG_DCB_ENABLED));
- return !!(pf->flags & I40E_FLAG_DCB_ENABLED);
+ test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0);
+ return test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0;
}
/**
@@ -331,19 +330,19 @@ static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state)
return ret;
dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n",
- state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0);
+ state, test_bit(I40E_FLAG_DCB_ENA, pf->flags) ? 1 : 0);
/* Nothing to do */
- if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!state == !test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return ret;
if (i40e_is_sw_dcb(pf)) {
if (state) {
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
memcpy(&pf->hw.desired_dcbx_config,
&pf->hw.local_dcbx_config,
sizeof(struct i40e_dcbx_config));
} else {
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
}
} else {
/* Cannot directly manipulate FW LLDP Agent */
@@ -653,7 +652,7 @@ static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
{
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return I40E_DCBNL_STATUS_ERROR;
switch (capid) {
@@ -693,7 +692,7 @@ static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return -EINVAL;
*num = I40E_MAX_TRAFFIC_CLASS;
@@ -827,15 +826,12 @@ static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
u8 *perm_addr)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
- int i, j;
+ int i;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
for (i = 0; i < dev->addr_len; i++)
perm_addr[i] = pf->hw.mac.perm_addr[i];
-
- for (j = 0; j < dev->addr_len; j++, i++)
- perm_addr[i] = pf->hw.mac.san_addr[j];
}
static const struct dcbnl_rtnl_ops dcbnl_ops = {
@@ -891,11 +887,11 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
return;
/* DCB not enabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return;
/* MFP mode but not an iSCSI PF so return */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi))
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(hw->func_caps.iscsi))
return;
dcbxcfg = &hw->local_dcbx_config;
@@ -1002,7 +998,7 @@ void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
int i;
/* MFP mode but not an iSCSI PF so return */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && !(pf->hw.func_caps.iscsi))
return;
for (i = 0; i < old_cfg->numapps; i++) {
@@ -1025,7 +1021,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
/* Not DCB capable */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return;
dev->dcbnl_ops = &dcbnl_ops;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index cf25bfc5d..2f53f0f53 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -81,8 +81,8 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
struct i40e_profile_info *old)
{
- unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
- unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+ unsigned int group_id_old = FIELD_GET(0x00FF0000, old->track_id);
+ unsigned int group_id_new = FIELD_GET(0x00FF0000, new->track_id);
/* 0x00 group must be only the first */
if (group_id_new == 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h
index 27ebc72d8..e9871dfb3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debug.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h
@@ -37,6 +37,7 @@ struct i40e_hw;
struct device *i40e_hw_to_dev(struct i40e_hw *hw);
#define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A)
+#define hw_warn(hw, S, A...) dev_warn(i40e_hw_to_dev(hw), S, ##A)
#define i40e_debug(h, m, s, ...) \
do { \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 999c9708d..ef70ddbe9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" state[%d] = %08lx\n",
i, vsi->state[i]);
if (vsi == pf->vsi[pf->lan_vsi])
- dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+ dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
- pf->hw.mac.san_addr,
pf->hw.mac.port_addr);
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
dev_info(&pf->pdev->dev,
@@ -820,8 +819,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
/* By default we are in VEPA mode, if this is the first VF/VMDq
* VSI to be added switch to VEB mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
@@ -1029,9 +1028,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
"emp reset count: %d\n", pf->empr_count);
dev_info(&pf->pdev->dev,
"pf reset count: %d\n", pf->pfr_count);
- dev_info(&pf->pdev->dev,
- "pf tx sluggish count: %d\n",
- pf->tx_sluggish_count);
} else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
struct i40e_aqc_query_port_ets_config_resp *bw_data;
struct i40e_dcbx_config *cfg =
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index ece3a6b9a..ab20202a3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -4,6 +4,7 @@
#ifndef _I40E_DIAG_H_
#define _I40E_DIAG_H_
+#include <linux/types.h>
#include "i40e_adminq_cmd.h"
/* forward-declare the HW struct for the compiler */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index fd7163128..c84177971 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -430,35 +430,35 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
struct i40e_priv_flags {
char flag_string[ETH_GSTRING_LEN];
- u64 flag;
+ u8 bitno;
bool read_only;
};
-#define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
+#define I40E_PRIV_FLAG(_name, _bitno, _read_only) { \
.flag_string = _name, \
- .flag = _flag, \
+ .bitno = _bitno, \
.read_only = _read_only, \
}
static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
/* NOTE: MFP setting cannot be changed */
- I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENA, 1),
I40E_PRIV_FLAG("total-port-shutdown",
- I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1),
- I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
- I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
- I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
- I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
+ I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, 1),
+ I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENA, 0),
+ I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENA, 0),
+ I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENA, 0),
+ I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENA, 0),
I40E_PRIV_FLAG("link-down-on-close",
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0),
- I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, 0),
+ I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX_ENA, 0),
I40E_PRIV_FLAG("disable-source-pruning",
- I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
- I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
+ I40E_FLAG_SOURCE_PRUNING_DIS, 0),
+ I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_FW_LLDP_DIS, 0),
I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
- I40E_FLAG_VF_VLAN_PRUNING, 0),
+ I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -466,7 +466,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
/* Private flags with a global effect, restricted to PF 0 */
static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("vf-true-promisc-support",
- I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
+ I40E_FLAG_TRUE_PROMISC_ENA, 0),
};
#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
@@ -502,7 +502,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
- if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -601,7 +601,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
10000baseKX4_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR &&
- !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseKR_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
@@ -609,7 +609,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
10000baseKR_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX &&
- !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseKX_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
@@ -917,7 +917,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
- if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
if (hw_link_info->requested_speeds &
@@ -1488,12 +1488,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
int status = 0;
- u32 flags = 0;
int err = 0;
- flags = READ_ONCE(pf->flags);
- i40e_set_fec_in_flags(fec_cfg, &flags);
-
/* Get the current phy config */
memset(&abilities, 0, sizeof(abilities));
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
@@ -1525,7 +1521,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
err = -EAGAIN;
goto done;
}
- pf->flags = flags;
+ i40e_set_fec_in_flags(fec_cfg, pf->flags);
status = i40e_update_link_info(hw);
if (status)
/* debug level message only due to relation to the link
@@ -1599,7 +1595,7 @@ static int i40e_set_fec_param(struct net_device *netdev,
return -EPERM;
if (hw->mac.type == I40E_MAC_X722 &&
- !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
+ !test_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps)) {
netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n");
return -EOPNOTSUPP;
}
@@ -1915,7 +1911,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
last = true;
}
- offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i);
ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len,
(u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
last, NULL);
@@ -1956,9 +1952,8 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
val = X722_EEPROM_SCOPE_LIMIT + 1;
return val;
}
- val = (rd32(hw, I40E_GLPCI_LBARCTRL)
- & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
- >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ val = FIELD_GET(I40E_GLPCI_LBARCTRL_FL_SIZE_MASK,
+ rd32(hw, I40E_GLPCI_LBARCTRL));
/* register returns value in power of 2, 64Kbyte chunks. */
val = (64 * 1024) * BIT(val);
return val;
@@ -2015,6 +2010,18 @@ static void i40e_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
+static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ return I40E_MAX_NUM_DESCRIPTORS_XL710;
+ default:
+ return I40E_MAX_NUM_DESCRIPTORS;
+ }
+}
+
static void i40e_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -2024,8 +2031,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
- ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->rx_max_pending = i40e_get_max_num_descriptors(pf);
+ ring->tx_max_pending = i40e_get_max_num_descriptors(pf);
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = vsi->rx_rings[0]->count;
@@ -2050,12 +2057,12 @@ static int i40e_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
+ u32 new_rx_count, new_tx_count, max_num_descriptors;
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u32 new_rx_count, new_tx_count;
u16 tx_alloc_queue_pairs;
int timeout = 50;
int i, err = 0;
@@ -2063,14 +2070,15 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ max_num_descriptors = i40e_get_max_num_descriptors(pf);
+ if (ring->tx_pending > max_num_descriptors ||
ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
- ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending > max_num_descriptors ||
ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
netdev_info(netdev,
"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
ring->tx_pending, ring->rx_pending,
- I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ I40E_MIN_NUM_DESCRIPTORS, max_num_descriptors);
return -EINVAL;
}
@@ -2419,7 +2427,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
(pf->lan_veb < I40E_MAX_VEB) &&
- (pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+ test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags));
if (veb_stats) {
veb = pf->veb[pf->lan_veb];
@@ -2514,13 +2522,11 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
u8 *p = data;
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&p, "%s",
- i40e_gstrings_priv_flags[i].flag_string);
+ ethtool_puts(&p, i40e_gstrings_priv_flags[i].flag_string);
if (pf->hw.pf_id != 0)
return;
for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&p, "%s",
- i40e_gl_gstrings_priv_flags[i].flag_string);
+ ethtool_puts(&p, i40e_gl_gstrings_priv_flags[i].flag_string);
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -2548,7 +2554,7 @@ static int i40e_get_ts_info(struct net_device *dev,
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
/* only report HW timestamping if PTP is enabled */
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -2570,7 +2576,7 @@ static int i40e_get_ts_info(struct net_device *dev,
BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
- if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE)
+ if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
@@ -2819,10 +2825,10 @@ static int i40e_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) {
pf->led_status = i40e_led_get(hw);
} else {
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps))
i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
NULL);
ret = i40e_led_get_phy(hw, &temp_status,
@@ -2831,25 +2837,25 @@ static int i40e_set_phys_id(struct net_device *netdev,
}
return blink_freq;
case ETHTOOL_ID_ON:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps))
i40e_led_set(hw, 0xf, false);
else
ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
break;
case ETHTOOL_ID_OFF:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS))
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps))
i40e_led_set(hw, 0x0, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
break;
case ETHTOOL_ID_INACTIVE:
- if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
+ if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) {
i40e_led_set(hw, pf->led_status, false);
} else {
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
I40E_PHY_LED_MODE_ORIG));
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps))
i40e_aq_set_phy_debug(hw, 0, NULL);
}
break;
@@ -2886,7 +2892,6 @@ static int __i40e_get_coalesce(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
- ec->rx_max_coalesced_frames_irq = vsi->work_limit;
/* rx and tx usecs has per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
@@ -3020,7 +3025,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ if (ec->tx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
if (queue < 0) {
@@ -3278,7 +3283,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
} else if (valid) {
data->flex_word = value & I40E_USERDEF_FLEX_WORD;
data->flex_offset =
- (value & I40E_USERDEF_FLEX_OFFSET) >> 16;
+ FIELD_GET(I40E_USERDEF_FLEX_OFFSET, value);
data->flex_filter = true;
}
@@ -3628,7 +3633,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
return -EOPNOTSUPP;
@@ -3644,19 +3649,22 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps))
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps))
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps)) {
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
@@ -3666,7 +3674,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
break;
case UDP_V6_FLOW:
set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ pf->hw.caps)) {
set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
@@ -4644,7 +4653,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
* main port cannot change them when in MFP mode as this would impact
* any filters on the other ports.
*/
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n");
return -EOPNOTSUPP;
}
@@ -4804,7 +4813,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -EINVAL;
pf = vsi->back;
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return -EOPNOTSUPP;
if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
@@ -5001,7 +5010,7 @@ static void i40e_get_channels(struct net_device *dev,
ch->max_combined = i40e_max_channels(vsi);
/* report info for other vector */
- ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+ ch->other_count = test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0;
ch->max_other = ch->other_count;
/* Note: This code assumes DCB is disabled for now. */
@@ -5044,7 +5053,7 @@ static int i40e_set_channels(struct net_device *dev,
return -EINVAL;
/* verify other_count has not changed */
- if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+ if (ch->other_count != (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0))
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
@@ -5109,15 +5118,13 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
/**
* i40e_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Returns 0 on
* success.
**/
-static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int i40e_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5125,13 +5132,12 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
int ret;
u16 i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- seed = key;
+ seed = rxfh->key;
lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -5139,7 +5145,7 @@ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (ret)
goto out;
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- indir[i] = (u32)(lut[i]);
+ rxfh->indir[i] = (u32)(lut[i]);
out:
kfree(lut);
@@ -5150,15 +5156,15 @@ out:
/**
* i40e_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
-static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int i40e_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5166,17 +5172,18 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
u8 *seed = NULL;
u16 i;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (key) {
+ if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
- memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
+ memcpy(vsi->rss_hkey_user, rxfh->key, I40E_HKEY_ARRAY_SIZE);
seed = vsi->rss_hkey_user;
}
if (!vsi->rss_lut_user) {
@@ -5186,9 +5193,9 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- if (indir)
+ if (rxfh->indir)
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
else
i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
vsi->rss_size);
@@ -5215,11 +5222,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
u32 i, j, ret_flags = 0;
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
- priv_flags = &i40e_gstrings_priv_flags[i];
+ priv_flag = &i40e_gstrings_priv_flags[i];
- if (priv_flags->flag & pf->flags)
+ if (test_bit(priv_flag->bitno, pf->flags))
ret_flags |= BIT(i);
}
@@ -5227,11 +5234,11 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
return ret_flags;
for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
- priv_flags = &i40e_gl_gstrings_priv_flags[j];
+ priv_flag = &i40e_gl_gstrings_priv_flags[j];
- if (priv_flags->flag & pf->flags)
+ if (test_bit(priv_flag->bitno, pf->flags))
ret_flags |= BIT(i + j);
}
@@ -5245,8 +5252,10 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
**/
static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
{
+ DECLARE_BITMAP(changed_flags, I40E_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(orig_flags, I40E_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(new_flags, I40E_PF_FLAGS_NBITS);
struct i40e_netdev_priv *np = netdev_priv(dev);
- u64 orig_flags, new_flags, changed_flags;
enum i40e_admin_queue_err adq_err;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
@@ -5254,51 +5263,57 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
int status;
u32 i, j;
- orig_flags = READ_ONCE(pf->flags);
- new_flags = orig_flags;
+ bitmap_copy(orig_flags, pf->flags, I40E_PF_FLAGS_NBITS);
+ bitmap_copy(new_flags, pf->flags, I40E_PF_FLAGS_NBITS);
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- const struct i40e_priv_flags *priv_flags;
-
- priv_flags = &i40e_gstrings_priv_flags[i];
+ const struct i40e_priv_flags *priv_flag;
+ bool new_val;
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
+ priv_flag = &i40e_gstrings_priv_flags[i];
+ new_val = (flags & BIT(i)) ? true : false;
/* If this is a read-only flag, it can't be changed */
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
+ if (priv_flag->read_only &&
+ test_bit(priv_flag->bitno, orig_flags) != new_val)
return -EOPNOTSUPP;
+
+ if (new_val)
+ set_bit(priv_flag->bitno, new_flags);
+ else
+ clear_bit(priv_flag->bitno, new_flags);
}
if (pf->hw.pf_id != 0)
goto flags_complete;
for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
- const struct i40e_priv_flags *priv_flags;
+ const struct i40e_priv_flags *priv_flag;
+ bool new_val;
- priv_flags = &i40e_gl_gstrings_priv_flags[j];
-
- if (flags & BIT(i + j))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
+ priv_flag = &i40e_gl_gstrings_priv_flags[j];
+ new_val = (flags & BIT(i + j)) ? true : false;
/* If this is a read-only flag, it can't be changed */
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
+ if (priv_flag->read_only &&
+ test_bit(priv_flag->bitno, orig_flags) != new_val)
return -EOPNOTSUPP;
+
+ if (new_val)
+ set_bit(priv_flag->bitno, new_flags);
+ else
+ clear_bit(priv_flag->bitno, new_flags);
}
flags_complete:
- changed_flags = orig_flags ^ new_flags;
+ bitmap_xor(changed_flags, pf->flags, orig_flags, I40E_PF_FLAGS_NBITS);
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags))
reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
- if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
- I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
+
+ if (test_bit(I40E_FLAG_VEB_STATS_ENA, changed_flags) ||
+ test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) ||
+ test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, changed_flags))
reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
/* Before we finalize any flag changes, we need to perform some
@@ -5306,8 +5321,8 @@ flags_complete:
*/
/* ATR eviction is not supported on all devices */
- if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) &&
- !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, new_flags) &&
+ !test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps))
return -EOPNOTSUPP;
/* If the driver detected FW LLDP was disabled on init, this flag could
@@ -5318,15 +5333,14 @@ flags_complete:
* disable LLDP, however we _must_ not allow the user to enable/disable
* LLDP with this flag on unsupported FW versions.
*/
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
- dev_warn(&pf->pdev->dev,
- "Device does not support changing FW LLDP\n");
- return -EOPNOTSUPP;
- }
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags) &&
+ !test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps)) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FW LLDP\n");
+ return -EOPNOTSUPP;
}
- if (changed_flags & I40E_FLAG_RS_FEC &&
+ if (test_bit(I40E_FLAG_RS_FEC, changed_flags) &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B) {
dev_warn(&pf->pdev->dev,
@@ -5334,7 +5348,7 @@ flags_complete:
return -EOPNOTSUPP;
}
- if (changed_flags & I40E_FLAG_BASE_R_FEC &&
+ if (test_bit(I40E_FLAG_BASE_R_FEC, changed_flags) &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B &&
pf->hw.device_id != I40E_DEV_ID_KX_X722) {
@@ -5349,17 +5363,17 @@ flags_complete:
*/
/* Flush current ATR settings if ATR was disabled */
- if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
- !(new_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, changed_flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA, new_flags)) {
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
- if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
+ if (test_bit(I40E_FLAG_TRUE_PROMISC_ENA, changed_flags)) {
u16 sw_flags = 0, valid_flags = 0;
int ret;
- if (!(new_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ if (!test_bit(I40E_FLAG_TRUE_PROMISC_ENA, new_flags))
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
@@ -5374,17 +5388,17 @@ flags_complete:
}
}
- if ((changed_flags & I40E_FLAG_RS_FEC) ||
- (changed_flags & I40E_FLAG_BASE_R_FEC)) {
+ if (test_bit(I40E_FLAG_RS_FEC, changed_flags) ||
+ test_bit(I40E_FLAG_BASE_R_FEC, changed_flags)) {
u8 fec_cfg = 0;
- if (new_flags & I40E_FLAG_RS_FEC &&
- new_flags & I40E_FLAG_BASE_R_FEC) {
+ if (test_bit(I40E_FLAG_RS_FEC, new_flags) &&
+ test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) {
fec_cfg = I40E_AQ_SET_FEC_AUTO;
- } else if (new_flags & I40E_FLAG_RS_FEC) {
+ } else if (test_bit(I40E_FLAG_RS_FEC, new_flags)) {
fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS |
I40E_AQ_SET_FEC_ABILITY_RS);
- } else if (new_flags & I40E_FLAG_BASE_R_FEC) {
+ } else if (test_bit(I40E_FLAG_BASE_R_FEC, new_flags)) {
fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR |
I40E_AQ_SET_FEC_ABILITY_KR);
}
@@ -5392,35 +5406,35 @@ flags_complete:
dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
}
- if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
- (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) {
+ if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) &&
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, orig_flags)) {
dev_err(&pf->pdev->dev,
"Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) &&
+ if (test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, changed_flags) &&
pf->num_alloc_vfs) {
dev_warn(&pf->pdev->dev,
"Changing vf-vlan-pruning flag while VF(s) are active is not supported\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & I40E_FLAG_LEGACY_RX) &&
+ if (test_bit(I40E_FLAG_LEGACY_RX_ENA, changed_flags) &&
I40E_2K_TOO_SMALL_WITH_PADDING) {
dev_warn(&pf->pdev->dev,
"2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
return -EOPNOTSUPP;
}
- if ((changed_flags & new_flags &
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
- (new_flags & I40E_FLAG_MFP_ENABLED))
+ if (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, changed_flags) &&
+ test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, new_flags) &&
+ test_bit(I40E_FLAG_MFP_ENA, new_flags))
dev_warn(&pf->pdev->dev,
"Turning on link-down-on-close flag may affect other partitions\n");
- if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, changed_flags)) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, new_flags)) {
#ifdef CONFIG_I40E_DCB
i40e_dcb_sw_default_config(pf);
#endif /* CONFIG_I40E_DCB */
@@ -5461,7 +5475,7 @@ flags_complete:
* initialization or (b) while holding the RTNL lock, we don't need
* anything fancy here.
*/
- pf->flags = new_flags;
+ bitmap_copy(pf->flags, new_flags, I40E_PF_FLAGS_NBITS);
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
@@ -5491,7 +5505,7 @@ static int i40e_get_module_info(struct net_device *netdev,
int status;
/* Check if firmware supports reading module EEPROM. */
- if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) {
netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n");
return -EINVAL;
}
@@ -5571,8 +5585,8 @@ static int i40e_get_module_info(struct net_device *netdev,
modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
break;
default:
- netdev_err(vsi->netdev, "Module type unrecognized\n");
- return -EINVAL;
+ netdev_dbg(vsi->netdev, "SFP module type unrecognized or no SFP connector used.\n");
+ return -EOPNOTSUPP;
}
return 0;
}
@@ -5768,7 +5782,7 @@ static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
static const struct ethtool_ops i40e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH |
ETHTOOL_COALESCE_TX_USECS_HIGH,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b0dc0fc6b..f3c1df46f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -377,7 +377,7 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
head = i40e_get_head(tx_ring);
/* Read interrupt register */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
val = rd32(&pf->hw,
I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
tx_ring->vsi->base_vector - 1));
@@ -1203,11 +1203,9 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ FIELD_GET(I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK, val);
nsd->rx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ FIELD_GET(I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK, val);
i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
pf->stat_offsets_loaded,
&osd->tx_lpi_count, &nsd->tx_lpi_count);
@@ -1215,13 +1213,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->rx_lpi_count, &nsd->rx_lpi_count);
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
!test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
!test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
nsd->fd_atr_status = true;
else
@@ -1259,8 +1257,11 @@ int i40e_count_filters(struct i40e_vsi *vsi)
int bkt;
int cnt = 0;
- hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
- ++cnt;
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (f->state == I40E_FILTER_NEW ||
+ f->state == I40E_FILTER_ACTIVE)
+ ++cnt;
+ }
return cnt;
}
@@ -1491,7 +1492,7 @@ static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
return pvid;
is_any = (trusted ||
- !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
+ !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags));
if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
(!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
@@ -1896,7 +1897,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
u8 *lut;
int ret;
- if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return 0;
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
@@ -2051,7 +2052,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
*/
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
- else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
vsi->num_queue_pairs = pf->num_lan_msix;
else
vsi->num_queue_pairs = 1;
@@ -2064,7 +2065,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
else
num_tc_qps = vsi->alloc_queue_pairs;
- if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
/* Find numtc from enabled TC bitmap */
for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i)) /* TC is enabled */
@@ -2083,7 +2084,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Do not allow use more TC queue pairs than MSI-X vectors exist */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
/* Setup queue offset/count for all TCs for given VSI */
@@ -2095,8 +2096,10 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
switch (vsi->type) {
case I40E_VSI_MAIN:
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED)) ||
+ if ((!test_bit(I40E_FLAG_FD_SB_ENA,
+ pf->flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA,
+ pf->flags)) ||
vsi->tc_config.enabled_tc != 1) {
qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps);
@@ -2482,7 +2485,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
if (vsi->type == I40E_VSI_MAIN &&
pf->lan_veb != I40E_NO_VEB &&
- !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
* promisc behavior but will not get VF or VMDq traffic
@@ -2913,7 +2916,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
*/
static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
{
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags))
return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048);
return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
@@ -3468,8 +3471,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
ring->xsk_pool = i40e_xsk_pool(ring);
/* some ATR related tx ring init */
- if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
- ring->atr_sample_rate = vsi->back->atr_sample_rate;
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) {
+ ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
ring->atr_count = 0;
} else {
ring->atr_sample_rate = 0;
@@ -3484,9 +3487,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.new_context = 1;
tx_ctx.base = (ring->dma / 128);
tx_ctx.qlen = ring->count;
- tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED));
- tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+ if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags))
+ tx_ctx.fd_ena = 1;
+ if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags))
+ tx_ctx.timesync_ena = 1;
/* FDIR VSI tx ring can still use RS bit and writebacks */
if (vsi->type != I40E_VSI_FDIR)
tx_ctx.head_wb_ena = 1;
@@ -3538,21 +3543,19 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
else
return -EINVAL;
- qtx_ctl |= (ring->ch->vsi_number <<
- I40E_QTX_CTL_VFVM_INDX_SHIFT) &
- I40E_QTX_CTL_VFVM_INDX_MASK;
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ ring->ch->vsi_number);
} else {
if (vsi->type == I40E_VSI_VMDQ2) {
qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
- qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
- I40E_QTX_CTL_VFVM_INDX_MASK;
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ vsi->id);
} else {
qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
}
}
- qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
- I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
@@ -3684,7 +3687,7 @@ skip:
}
/* configure Rx buffer alignment */
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+ if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) {
if (I40E_2K_TOO_SMALL_WITH_PADDING) {
dev_info(&vsi->back->pdev->dev,
"2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
@@ -3782,7 +3785,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */
for (i = 0; i < vsi->num_queue_pairs; i++) {
rx_ring = vsi->rx_rings[i];
@@ -3849,7 +3852,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return;
/* Reset FDir counters as we're replaying all existing filters */
@@ -3915,6 +3918,12 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
+ /* Set ITR for software interrupts triggered after exiting
+ * busy-loop polling.
+ */
+ wr32(hw, I40E_PFINT_ITRN(I40E_SW_ITR, vector - 1),
+ I40E_ITR_20K);
+
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
@@ -3987,10 +3996,10 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
- if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, val);
@@ -4226,7 +4235,7 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
}
/* disable each interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = vsi->base_vector;
i < (vsi->num_q_vectors + vsi->base_vector); i++)
wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
@@ -4252,7 +4261,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
int i;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
@@ -4273,7 +4282,7 @@ static void i40e_free_misc_vector(struct i40e_pf *pf)
wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
i40e_flush(&pf->hw);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
@@ -4308,7 +4317,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
(icr0 & I40E_PFINT_ICR0_SWINT_MASK))
pf->sw_int_count++;
- if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
(icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
@@ -4359,8 +4368,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
val = rd32(hw, I40E_GLGEN_RSTAT);
- val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
- >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ val = FIELD_GET(I40E_GLGEN_RSTAT_RESET_TYPE_MASK, val);
if (val == I40E_RESET_CORER) {
pf->corer_count++;
} else if (val == I40E_RESET_GLOBR) {
@@ -4501,7 +4509,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags))
i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
return budget > 0;
@@ -4614,9 +4622,9 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
struct i40e_pf *pf = vsi->back;
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
err = i40e_vsi_request_irq_msix(vsi, basename);
- else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+ else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags))
err = request_irq(pf->pdev->irq, i40e_intr, 0,
pf->int_name, pf);
else
@@ -4648,7 +4656,7 @@ static void i40e_netpoll(struct net_device *netdev)
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
@@ -4927,27 +4935,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int pf_q, err, q_end;
+ u32 pf_q, tx_q_end, rx_q_end;
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- q_end = vsi->base_queue + vsi->num_queue_pairs;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
- i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+ tx_q_end = vsi->base_queue +
+ vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false);
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
- err = i40e_control_wait_rx_q(pf, pf_q, false);
- if (err)
- dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d disable timeout\n",
- vsi->seid, pf_q);
- }
+ rx_q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++)
+ i40e_control_rx_q(pf, pf_q, false);
msleep(I40E_DISABLE_TX_GAP_MSEC);
- pf_q = vsi->base_queue;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
i40e_vsi_wait_queues_disabled(vsi);
@@ -4988,7 +4992,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u32 val, qp;
int i;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
if (!vsi->q_vectors)
return;
@@ -5022,8 +5026,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
* next_q field of the registers.
*/
val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
- qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
- >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK,
+ val);
val |= I40E_QUEUE_END_OF_LIST
<< I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
@@ -5045,8 +5049,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
val = rd32(hw, I40E_QINT_TQCTL(qp));
- next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
- >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+ next = FIELD_GET(I40E_QINT_TQCTL_NEXTQ_INDX_MASK,
+ val);
val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
I40E_QINT_TQCTL_MSIX0_INDX_MASK |
@@ -5064,8 +5068,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
free_irq(pf->pdev->irq, pf);
val = rd32(hw, I40E_PFINT_LNKLST0);
- qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
- >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ qp = FIELD_GET(I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK, val);
val |= I40E_QUEUE_END_OF_LIST
<< I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
wr32(hw, I40E_PFINT_LNKLST0, val);
@@ -5150,16 +5153,17 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
{
/* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
pci_disable_msix(pf->pdev);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
kfree(pf->irq_pile);
pf->irq_pile = NULL;
- } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+ } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
pci_disable_msi(pf->pdev);
}
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+ clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
}
/**
@@ -5499,11 +5503,11 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return 1;
/* SFP mode will be enabled for all TCs on port */
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
return i40e_dcb_get_num_tc(dcbcfg);
/* MFP mode return count of enabled TCs for this PF */
@@ -5533,11 +5537,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
/* If neither MQPRIO nor DCB is enabled for this PF then just return
* default TC
*/
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
/* MFP enabled and iSCSI PF type */
@@ -5626,7 +5630,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
/* There is no need to reset BW when mqprio mode is on. */
if (i40e_is_tc_mqprio_enabled(pf))
return 0;
- if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
dev_info(&pf->pdev->dev,
@@ -5879,7 +5883,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
}
vsi->reconfig_rss = false;
}
- if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
@@ -6292,7 +6296,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
if (ch->type == I40E_VSI_VMDQ2)
ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id =
@@ -6597,8 +6601,8 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
* VSI to be added switch to VEB mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
if (vsi->type == I40E_VSI_MAIN) {
if (i40e_is_tc_mqprio_enabled(pf))
@@ -7009,9 +7013,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
if (need_reconfig) {
/* Enable DCB tagging only when more than one TC */
if (new_numtc > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
@@ -7101,7 +7105,7 @@ out:
set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
/* registers are set, lets apply */
- if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+ if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps))
ret = i40e_hw_set_dcb_config(pf, new_cfg);
}
@@ -7122,7 +7126,7 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err;
- if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
+ if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) {
/* Update the local cached instance with TC0 ETS */
memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
@@ -7183,12 +7187,12 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
* Also do not enable DCBx if FW LLDP agent is disabled
*/
- if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
+ if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) {
dev_info(&pf->pdev->dev, "DCB is not supported.\n");
err = -EOPNOTSUPP;
goto out;
}
- if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) {
dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
err = i40e_dcb_sw_default_config(pf);
if (err) {
@@ -7199,8 +7203,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->dcbx_cap = DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
/* at init capable but disabled */
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
goto out;
}
err = i40e_init_dcb(hw, true);
@@ -7215,20 +7219,20 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Enable DCB tagging only when more than one TC
* or explicitly disable if only one TC
*/
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
} else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
- pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
+ set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Query for DCB configuration failed, err %pe aq_err %s\n",
@@ -7388,7 +7392,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
i40e_vsi_configure_msix(vsi);
else
i40e_configure_msi_and_legacy(vsi);
@@ -7492,7 +7496,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
* and its speed values are OK, no need for a flap
* if non_zero_phy_type was set, still need to force up
*/
- if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
+ if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags))
non_zero_phy_type = true;
else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
return 0;
@@ -7508,7 +7512,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
/* Copy the old settings, except of phy_type */
config.abilities = abilities.abilities;
- if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
+ if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
if (is_up)
config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
else
@@ -7558,8 +7562,8 @@ int i40e_up(struct i40e_vsi *vsi)
int err;
if (vsi->type == I40E_VSI_MAIN &&
- (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
- vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
i40e_force_link_state(vsi->back, true);
err = i40e_vsi_configure(vsi);
@@ -7587,8 +7591,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi);
i40e_vsi_stop_rings(vsi);
if (vsi->type == I40E_VSI_MAIN &&
- (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
- vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+ (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) ||
+ test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags)))
i40e_force_link_state(vsi->back, false);
i40e_napi_disable_all(vsi);
@@ -7994,7 +7998,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
struct i40e_fwd_adapter *fwd;
int avail_macvlan, ret;
- if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
return ERR_PTR(-EINVAL);
}
@@ -8189,23 +8193,23 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
hw = mqprio_qopt->qopt.hw;
mode = mqprio_qopt->mode;
if (!hw) {
- pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+ clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
goto config_tc;
}
/* Check if MFP enabled */
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
netdev_info(netdev,
"Configuring TC not supported in MFP mode\n");
return ret;
}
switch (mode) {
case TC_MQPRIO_MODE_DCB:
- pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+ clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
/* Check if DCB enabled to continue */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev,
"DCB is not enabled for adapter\n");
return ret;
@@ -8219,20 +8223,20 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
}
break;
case TC_MQPRIO_MODE_CHANNEL:
- if (pf->flags & I40E_FLAG_DCB_ENABLED) {
+ if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) {
netdev_info(netdev,
"Full offload of TC Mqprio options is not supported when DCB is enabled\n");
return ret;
}
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return ret;
ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
if (ret)
return ret;
memcpy(&vsi->mqprio_qopt, mqprio_qopt,
sizeof(*mqprio_qopt));
- pf->flags |= I40E_FLAG_TC_MQPRIO;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
break;
default:
return -EINVAL;
@@ -8816,11 +8820,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
return -EINVAL;
}
- if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) {
dev_err(&vsi->back->pdev->dev,
"Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
- vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags);
}
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
@@ -8916,11 +8920,11 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
pf->num_cloud_filters--;
if (!pf->num_cloud_filters)
- if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
- !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
return 0;
}
@@ -9221,11 +9225,11 @@ static void i40e_cloud_filter_exit(struct i40e_pf *pf)
}
pf->num_cloud_filters = 0;
- if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
- !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) {
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
}
@@ -9313,7 +9317,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
i40e_prep_for_reset(pf);
i40e_reset_and_rebuild(pf, true, lock_acquired);
dev_info(&pf->pdev->dev,
- pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
@@ -9428,12 +9432,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
(hw->phy.link_info.link_speed &
~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
- !(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
/* let firmware decide if the DCB should be disabled */
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -9469,7 +9473,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
dev_warn(&pf->pdev->dev,
"DCB is not supported for X710-T*L 2.5/5G speeds\n");
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
} else {
dev_info(&pf->pdev->dev,
"Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
@@ -9497,9 +9501,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Enable DCB tagging only when more than one TC */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
set_bit(__I40E_PORT_SUSPENDED, pf->state);
/* Reconfiguration needed quiesce all VSIs */
@@ -9567,18 +9571,18 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
queue, qtx_ctl);
+ if (FIELD_GET(I40E_QTX_CTL_PFVF_Q_MASK, qtx_ctl) !=
+ I40E_QTX_CTL_VF_QUEUE)
+ return;
+
/* Queue belongs to VF, find the VF and issue VF reset */
- if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
- >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
- vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
- >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
- vf_id -= hw->func_caps.vf_base_id;
- vf = &pf->vf[vf_id];
- i40e_vc_notify_vf_reset(vf);
- /* Allow VF to process pending reset notification */
- msleep(20);
- i40e_reset_vf(vf, false);
- }
+ vf_id = FIELD_GET(I40E_QTX_CTL_VFVM_INDX_MASK, qtx_ctl);
+ vf_id -= hw->func_caps.vf_base_id;
+ vf = &pf->vf[vf_id];
+ i40e_vc_notify_vf_reset(vf);
+ /* Allow VF to process pending reset notification */
+ msleep(20);
+ i40e_reset_vf(vf, false);
}
/**
@@ -9604,8 +9608,7 @@ u32 i40e_get_current_fd_count(struct i40e_pf *pf)
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
- ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
- I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ FIELD_GET(I40E_PFQF_FDSTAT_BEST_CNT_MASK, val);
return fcnt_prog;
}
@@ -9619,8 +9622,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
- ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
- I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+ FIELD_GET(I40E_GLQF_FDCNT_0_BESTCNT_MASK, val);
return fcnt_prog;
}
@@ -9631,7 +9633,7 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
{
if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
@@ -9652,7 +9654,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
@@ -9967,7 +9969,7 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->vf)
i40e_vc_notify_link_state(pf);
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
i40e_ptp_set_increment(pf);
#ifdef CONFIG_I40E_DCB
if (new_link == old_link)
@@ -9984,13 +9986,13 @@ static void i40e_link_event(struct i40e_pf *pf)
memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
err = i40e_dcb_sw_default_config(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
} else {
pf->dcbx_cap = DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_CAPABLE;
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
}
}
#endif /* CONFIG_I40E_DCB */
@@ -10015,7 +10017,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
- if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
+ if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) ||
test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
i40e_link_event(pf);
@@ -10026,7 +10028,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
- if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+ if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
/* Update the stats for the active switching components */
for (i = 0; i < I40E_MAX_VEB; i++)
if (pf->veb[i])
@@ -10115,7 +10117,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
(!(status->link_info & I40E_AQ_LINK_UP)) &&
- (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
+ (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) {
dev_err(&pf->pdev->dev,
"Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
dev_err(&pf->pdev->dev,
@@ -10143,7 +10145,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
return;
/* check for error indications */
- val = rd32(&pf->hw, pf->hw.aq.arq.len);
+ val = rd32(&pf->hw, I40E_PF_ARQLEN);
oldval = val;
if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
if (hw->debug_mask & I40E_DEBUG_AQ)
@@ -10162,9 +10164,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
}
if (oldval != val)
- wr32(&pf->hw, pf->hw.aq.arq.len, val);
+ wr32(&pf->hw, I40E_PF_ARQLEN, val);
- val = rd32(&pf->hw, pf->hw.aq.asq.len);
+ val = rd32(&pf->hw, I40E_PF_ATQLEN);
oldval = val;
if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
if (pf->hw.debug_mask & I40E_DEBUG_AQ)
@@ -10182,7 +10184,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
}
if (oldval != val)
- wr32(&pf->hw, pf->hw.aq.asq.len, val);
+ wr32(&pf->hw, I40E_PF_ATQLEN, val);
event.buf_len = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
@@ -10242,9 +10244,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
opcode);
break;
}
- } while (i++ < pf->adminq_work_limit);
+ } while (i++ < I40E_AQ_WORK_LIMIT);
- if (i < pf->adminq_work_limit)
+ if (i < I40E_AQ_WORK_LIMIT)
clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
/* re-enable Admin queue interrupt cause */
@@ -10421,7 +10423,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
veb->bridge_mode = BRIDGE_MODE_VEB;
else
veb->bridge_mode = BRIDGE_MODE_VEPA;
@@ -10566,7 +10568,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
}
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
return;
/* find existing VSI and see if it needs configuring */
@@ -10578,8 +10580,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
return;
}
}
@@ -10957,14 +10959,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_aq_set_dcb_parameters(hw, false, NULL);
dev_warn(&pf->pdev->dev,
"DCB is not supported for X710-T*L 2.5/5G speeds\n");
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
} else {
i40e_aq_set_dcb_parameters(hw, true, NULL);
ret = i40e_init_pf_dcb(pf);
if (ret) {
dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
ret);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
/* Continue without DCB enabled */
}
}
@@ -11085,7 +11087,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
wr32(hw, I40E_REG_MSS, val);
}
- if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+ if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
@@ -11095,7 +11097,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
ret = i40e_setup_misc_vector(pf);
if (ret)
goto end_unlock;
@@ -11202,14 +11204,10 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
- u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
- I40E_GL_MDET_TX_PF_NUM_SHIFT;
- u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
- I40E_GL_MDET_TX_VF_NUM_SHIFT;
- u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
- I40E_GL_MDET_TX_EVENT_SHIFT;
- u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
- I40E_GL_MDET_TX_QUEUE_SHIFT) -
+ u8 pf_num = FIELD_GET(I40E_GL_MDET_TX_PF_NUM_MASK, reg);
+ u16 vf_num = FIELD_GET(I40E_GL_MDET_TX_VF_NUM_MASK, reg);
+ u8 event = FIELD_GET(I40E_GL_MDET_TX_EVENT_MASK, reg);
+ u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) -
pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
@@ -11219,12 +11217,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
}
reg = rd32(hw, I40E_GL_MDET_RX);
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
- u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
- I40E_GL_MDET_RX_FUNCTION_SHIFT;
- u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
- I40E_GL_MDET_RX_EVENT_SHIFT;
- u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
- I40E_GL_MDET_RX_QUEUE_SHIFT) -
+ u8 func = FIELD_GET(I40E_GL_MDET_RX_FUNCTION_MASK, reg);
+ u8 event = FIELD_GET(I40E_GL_MDET_RX_EVENT_MASK, reg);
+ u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) -
pf->hw.func_caps.base_queue;
if (netif_msg_rx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
@@ -11370,7 +11365,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
if (!vsi->num_rx_desc)
vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
I40E_REQ_DESCRIPTOR_MULTIPLE);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
vsi->num_q_vectors = pf->num_lan_msix;
else
vsi->num_q_vectors = 1;
@@ -11688,7 +11683,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->count = vsi->num_tx_desc;
ring->size = 0;
ring->dcb_tc = 0;
- if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
WRITE_ONCE(vsi->tx_rings[i], ring++);
@@ -11705,7 +11700,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->count = vsi->num_tx_desc;
ring->size = 0;
ring->dcb_tc = 0;
- if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps))
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
@@ -11769,7 +11764,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
int v_actual;
int iwarp_requested = 0;
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return -ENODEV;
/* The number of vectors we'll request will be comprised of:
@@ -11808,7 +11803,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
vectors_left -= pf->num_lan_msix;
/* reserve one vector for sideband flow director */
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
if (vectors_left) {
pf->num_fdsb_msix = 1;
v_budget++;
@@ -11819,7 +11814,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/* can we reserve enough for iWARP? */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
iwarp_requested = pf->num_iwarp_msix;
if (!vectors_left)
@@ -11831,7 +11826,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
/* any vectors left over go for VMDq support */
- if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) {
if (!vectors_left) {
pf->num_vmdq_msix = 0;
pf->num_vmdq_qps = 0;
@@ -11888,7 +11883,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
v_actual = i40e_reserve_msix_vectors(pf, v_budget);
if (v_actual < I40E_MIN_MSIX) {
- pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
pci_disable_msix(pf->pdev);
@@ -11926,7 +11921,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_msix = 1;
break;
case 3:
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->num_lan_msix = 1;
pf->num_iwarp_msix = 1;
} else {
@@ -11934,7 +11929,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
break;
default:
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->num_iwarp_msix = min_t(int, (vec / 3),
iwarp_requested);
pf->num_vmdq_vsis = min_t(int, (vec / 3),
@@ -11943,7 +11938,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_vsis = min_t(int, (vec / 2),
I40E_DEFAULT_NUM_VMDQ_VSI);
}
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
pf->num_fdsb_msix = 1;
vec--;
}
@@ -11955,22 +11950,20 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- (pf->num_fdsb_msix == 0)) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) {
dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
- if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
- (pf->num_vmdq_msix == 0)) {
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) {
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
}
- if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
- (pf->num_iwarp_msix == 0)) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) &&
+ pf->num_iwarp_msix == 0) {
dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
}
i40e_debug(&pf->hw, I40E_DEBUG_INIT,
"MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
@@ -12024,7 +12017,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
int err, v_idx, num_q_vectors;
/* if not MSIX, give the one vector only to the LAN VSI */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_q_vectors = vsi->num_q_vectors;
else if (vsi == pf->vsi[pf->lan_vsi])
num_q_vectors = 1;
@@ -12055,38 +12048,39 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
int vectors = 0;
ssize_t size;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
vectors = i40e_init_msix(pf);
if (vectors < 0) {
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_MSIX_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
}
}
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSI_ENA, pf->flags)) {
dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
vectors = pci_enable_msi(pf->pdev);
if (vectors < 0) {
dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
vectors);
- pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+ clear_bit(I40E_FLAG_MSI_ENA, pf->flags);
}
vectors = 1; /* one MSI or Legacy vector */
}
- if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+ if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
/* set up vector assignment tracking */
@@ -12119,7 +12113,8 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
* scheme. We need to re-enabled them here in order to attempt to
* re-acquire the MSI or MSI-X vectors
*/
- pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+ set_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
err = i40e_init_interrupt_scheme(pf);
if (err)
@@ -12141,7 +12136,7 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
if (err)
goto err_unwind;
- if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags))
i40e_client_update_msix_info(pf);
return 0;
@@ -12169,7 +12164,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
{
int err;
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
err = i40e_setup_misc_vector(pf);
if (err) {
@@ -12179,7 +12174,7 @@ static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
return err;
}
} else {
- u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
+ u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED;
err = request_irq(pf->pdev->irq, i40e_intr, flags,
pf->int_name, pf);
@@ -12383,7 +12378,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
struct i40e_pf *pf = vsi->back;
- if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return i40e_config_rss_aq(vsi, seed, lut, lut_size);
else
return i40e_config_rss_reg(vsi, seed, lut, lut_size);
@@ -12402,7 +12397,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
struct i40e_pf *pf = vsi->back;
- if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps))
return i40e_get_rss_aq(vsi, seed, lut, lut_size);
else
return i40e_get_rss_reg(vsi, seed, lut, lut_size);
@@ -12505,7 +12500,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int new_rss_size;
- if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags))
return 0;
queue_count = min_t(int, queue_count, num_online_cpus());
@@ -12738,9 +12733,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
u16 pow;
/* Set default capability flags */
- pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
- I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_MSIX_ENABLED;
+ bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS);
+ set_bit(I40E_FLAG_MSI_ENA, pf->flags);
+ set_bit(I40E_FLAG_MSIX_ENA, pf->flags);
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_RX_DEF;
@@ -12760,14 +12755,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
if (pf->hw.func_caps.rss) {
- pf->flags |= I40E_FLAG_RSS_ENABLED;
+ set_bit(I40E_FLAG_RSS_ENA, pf->flags);
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
num_online_cpus());
}
/* MFP mode enabled */
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
+ set_bit(I40E_FLAG_MFP_ENA, pf->flags);
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
if (i40e_get_partition_bw_setting(pf)) {
dev_warn(&pf->pdev->dev,
@@ -12784,84 +12779,31 @@ static int i40e_sw_init(struct i40e_pf *pf)
if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+ set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
pf->hw.num_partitions > 1)
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
else
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
pf->fdir_pf_filter_count =
pf->hw.func_caps.fd_filters_guaranteed;
pf->hw.fdir_shared_filter_count =
pf->hw.func_caps.fd_filters_best_effort;
}
- if (pf->hw.mac.type == I40E_MAC_X722) {
- pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
- I40E_HW_128_QP_RSS_CAPABLE |
- I40E_HW_ATR_EVICT_CAPABLE |
- I40E_HW_WB_ON_ITR_CAPABLE |
- I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
- I40E_HW_NO_PCI_LINK_CHECK |
- I40E_HW_USE_SET_LLDP_MIB |
- I40E_HW_GENEVE_OFFLOAD_CAPABLE |
- I40E_HW_PTP_L4_CAPABLE |
- I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
- I40E_HW_OUTER_UDP_CSUM_CAPABLE);
-
-#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
- if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
- I40E_FDEVICT_PCTYPE_DEFAULT) {
- dev_warn(&pf->pdev->dev,
- "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
- pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
- }
- } else if ((pf->hw.aq.api_maj_ver > 1) ||
- ((pf->hw.aq.api_maj_ver == 1) &&
- (pf->hw.aq.api_min_ver > 4))) {
- /* Supported in FW API version higher than 1.4 */
- pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
- }
-
/* Enable HW ATR eviction if possible */
- if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
- pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
-
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
- (pf->hw.aq.fw_maj_ver < 4))) {
- pf->hw_features |= I40E_HW_RESTART_AUTONEG;
- /* No DCB support for FW < v4.33 */
- pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
- }
-
- /* Disable FW LLDP if FW < v4.3 */
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
- (pf->hw.aq.fw_maj_ver < 4)))
- pf->hw_features |= I40E_HW_STOP_FW_LLDP;
-
- /* Use the FW Set LLDP MIB API if FW > v4.40 */
- if ((pf->hw.mac.type == I40E_MAC_XL710) &&
- (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
- (pf->hw.aq.fw_maj_ver >= 5)))
- pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
-
- /* Enable PTP L4 if FW > v6.0 */
- if (pf->hw.mac.type == I40E_MAC_XL710 &&
- pf->hw.aq.fw_maj_ver >= 6)
- pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
+ if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps))
+ set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags);
if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
- pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ set_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
- pf->flags |= I40E_FLAG_IWARP_ENABLED;
+ set_bit(I40E_FLAG_IWARP_ENA, pf->flags);
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
@@ -12871,25 +12813,23 @@ static int i40e_sw_init(struct i40e_pf *pf)
* if NPAR is functioning so unset this hw flag in this case.
*/
if (pf->hw.mac.type == I40E_MAC_XL710 &&
- pf->hw.func_caps.npar_enable &&
- (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
- pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ pf->hw.func_caps.npar_enable)
+ clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps);
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
- pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+ set_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
}
#endif /* CONFIG_PCI_IOV */
- pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
/* By default FW has this off for performance reasons */
- pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+ clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
/* set up queue assignment tracking */
size = sizeof(struct i40e_lump_tracking)
@@ -12908,8 +12848,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Link down on close must be on when total port shutdown
* is enabled for a given port
*/
- pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
- I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
+ set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
+ set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
dev_info(&pf->pdev->dev,
"total-port-shutdown was enabled, link-down-on-close is forced on\n");
}
@@ -12935,31 +12875,31 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
*/
if (features & NETIF_F_NTUPLE) {
/* Enable filters and mark for reset */
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags))
need_reset = true;
/* enable FD_SB only if there is MSI-X vector and no cloud
* filters exist
*/
if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ set_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
}
} else {
/* turn off filters, mark for reset and clear SW filter list */
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
need_reset = true;
i40e_fdir_filter_exit(pf);
}
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
/* reset fd counters */
pf->fd_add_err = 0;
pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */
if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
}
@@ -13108,7 +13048,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
+ if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps))
return -EOPNOTSUPP;
ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
@@ -13137,7 +13077,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct i40e_pf *pf = np->vsi->back;
int err = 0;
- if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags))
return -EOPNOTSUPP;
if (vid) {
@@ -13237,9 +13177,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
veb->bridge_mode = mode;
/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
if (mode == BRIDGE_MODE_VEB)
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
else
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
break;
}
@@ -13573,7 +13513,7 @@ static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
struct i40e_hw *hw = &pf->hw;
/* All rings in a qp belong to the same qvector. */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
else
i40e_irq_dynamic_enable_icr0(pf);
@@ -13598,7 +13538,7 @@ static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
*
* All rings in a qp belong to the same qvector.
*/
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
@@ -13783,7 +13723,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_RXCSUM |
0;
- if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
@@ -13819,7 +13759,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
@@ -14010,7 +13950,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* negative logic - if it's set, we need to fiddle with
* the VSI to disable source pruning.
*/
- if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
+ if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) {
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
ctxt.pf_num = pf->hw.pf_id;
@@ -14032,7 +13972,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
}
/* MFP mode setup queue map and update VSI */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) &&
!(pf->hw.func_caps.iscsi)) { /* NIC type PF */
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->main_vsi_seid;
@@ -14080,7 +14020,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) &&
(i40e_is_vsi_uplink_mode_veb(vsi))) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
@@ -14128,7 +14068,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
- if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |=
@@ -14344,7 +14284,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
/* In Legacy mode, we do not have to get any other vector since we
* piggyback on the misc/ICR0 for queue interrupts.
*/
- if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
return ret;
if (vsi->num_q_vectors)
vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
@@ -14511,9 +14451,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
* already enabled, in which case we can't force VEPA
* mode.
*/
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
veb->bridge_mode = BRIDGE_MODE_VEPA;
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
}
i40e_config_bridge_mode(veb);
}
@@ -14609,12 +14549,16 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
break;
}
- if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
- (vsi->type == I40E_VSI_VMDQ2)) {
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
+ vsi->type == I40E_VSI_VMDQ2) {
ret = i40e_vsi_config_rss(vsi);
+ if (ret)
+ goto err_config;
}
return vsi;
+err_config:
+ i40e_vsi_clear_rings(vsi);
err_rings:
i40e_vsi_free_q_vectors(vsi);
err_msix:
@@ -14852,7 +14796,7 @@ void i40e_veb_release(struct i40e_veb *veb)
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
struct i40e_pf *pf = veb->pf;
- bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
+ bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
int ret;
ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
@@ -15041,12 +14985,11 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
* the PF's VSI
*/
pf->mac_seid = uplink_seid;
- pf->pf_seid = downlink_seid;
pf->main_vsi_seid = seid;
if (printconfig)
dev_info(&pf->pdev->dev,
"pf_seid=%d main_vsi_seid=%d\n",
- pf->pf_seid, pf->main_vsi_seid);
+ downlink_seid, pf->main_vsi_seid);
break;
case I40E_SWITCH_ELEMENT_TYPE_PF:
case I40E_SWITCH_ELEMENT_TYPE_VF:
@@ -15152,7 +15095,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
*/
if ((pf->hw.pf_id == 0) &&
- !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) {
flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
pf->last_sw_conf_flags = flags;
}
@@ -15219,16 +15162,12 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
/* enable RSS in the HW, even for only one queue, as the stack can use
* the hash
*/
- if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+ if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i40e_pf_config_rss(pf);
/* fill in link information and enable LSE reporting */
i40e_link_event(pf);
- /* Initialize user-specific link properties */
- pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
- I40E_AQ_AN_COMPLETED) ? true : false);
-
i40e_ptp_init(pf);
if (!lock_acquired)
@@ -15261,42 +15200,42 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left = pf->hw.func_caps.num_tx_qp;
if ((queues_left == 1) ||
- !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->alloc_rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
- } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_CAPABLE))) {
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
+ } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
+ !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) {
/* one qp for PF */
pf->alloc_rss_size = pf->num_lan_qps = 1;
queues_left -= pf->num_lan_qps;
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_IWARP_ENABLED |
- I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_RSS_ENA, pf->flags);
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
+ clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
} else {
/* Not enough queues for all TCs */
- if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
- (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
- I40E_FLAG_DCB_ENABLED);
+ if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) &&
+ queues_left < I40E_MAX_TRAFFIC_CLASS) {
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
@@ -15309,24 +15248,24 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
}
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
if (queues_left > 1) {
queues_left -= 1; /* save 1 queue for FD */
} else {
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+ clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
+ set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags);
dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
}
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
pf->num_vf_qps && pf->num_req_vfs && queues_left) {
pf->num_req_vfs = min_t(int, pf->num_req_vfs,
(queues_left / pf->num_vf_qps));
queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
}
- if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) &&
pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
(queues_left / pf->num_vmdq_qps));
@@ -15337,7 +15276,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev,
"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
pf->hw.func_caps.num_tx_qp,
- !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+ !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags),
pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
queues_left);
@@ -15361,7 +15300,8 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
/* Flow Director is enabled */
- if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ||
+ test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
settings->enable_fdir = true;
/* Ethtype and MACVLAN filters enabled for PF */
@@ -15393,21 +15333,21 @@ static void i40e_print_features(struct i40e_pf *pf)
i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis,
pf->vsi[pf->lan_vsi]->num_queue_pairs);
- if (pf->flags & I40E_FLAG_RSS_ENABLED)
+ if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " RSS");
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " FD_ATR");
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) {
i += scnprintf(&buf[i], REMAIN(i), " FD_SB");
i += scnprintf(&buf[i], REMAIN(i), " NTUPLE");
}
- if (pf->flags & I40E_FLAG_DCB_CAPABLE)
+ if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " DCB");
i += scnprintf(&buf[i], REMAIN(i), " VxLAN");
i += scnprintf(&buf[i], REMAIN(i), " Geneve");
- if (pf->flags & I40E_FLAG_PTP)
+ if (test_bit(I40E_FLAG_PTP_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " PTP");
- if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " VEB");
else
i += scnprintf(&buf[i], REMAIN(i), " VEPA");
@@ -15438,22 +15378,26 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
* @fec_cfg: FEC option to set in flags
* @flags: ptr to flags in which we set FEC option
**/
-void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
+void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags)
{
- if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
- *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
+ if (fec_cfg & I40E_AQ_SET_FEC_AUTO) {
+ set_bit(I40E_FLAG_RS_FEC, flags);
+ set_bit(I40E_FLAG_BASE_R_FEC, flags);
+ }
if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
(fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
- *flags |= I40E_FLAG_RS_FEC;
- *flags &= ~I40E_FLAG_BASE_R_FEC;
+ set_bit(I40E_FLAG_RS_FEC, flags);
+ clear_bit(I40E_FLAG_BASE_R_FEC, flags);
}
if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
(fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
- *flags |= I40E_FLAG_BASE_R_FEC;
- *flags &= ~I40E_FLAG_RS_FEC;
+ set_bit(I40E_FLAG_BASE_R_FEC, flags);
+ clear_bit(I40E_FLAG_RS_FEC, flags);
+ }
+ if (fec_cfg == 0) {
+ clear_bit(I40E_FLAG_RS_FEC, flags);
+ clear_bit(I40E_FLAG_BASE_R_FEC, flags);
}
- if (fec_cfg == 0)
- *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
}
/**
@@ -15697,7 +15641,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_I40E_DCB */
struct i40e_pf *pf;
struct i40e_hw *hw;
- static u16 pfs_found;
u16 wol_nvm_bits;
char nvm_ver[32];
u16 link_status;
@@ -15775,7 +15718,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
hw->bus.bus_id = pdev->bus->number;
- pf->instance = pfs_found;
/* Select something other than the 802.1ad ethertype for the
* switch to use internally and drop on ingress.
@@ -15837,7 +15779,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
- pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
snprintf(pf->int_name, sizeof(pf->int_name) - 1,
"%s-%s:misc",
@@ -15879,15 +15820,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->vendor_id, hw->device_id, hw->subsystem_vendor_id,
hw->subsystem_device_id);
- if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
+ if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw) + 1))
dev_dbg(&pdev->dev,
"The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
hw->aq.api_maj_ver,
hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR,
I40E_FW_MINOR_VERSION(hw));
- else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
+ else if (i40e_is_aq_api_ver_lt(hw, 1, 4))
dev_info(&pdev->dev,
"The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
hw->aq.api_maj_ver,
@@ -15934,7 +15875,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* Ignore error return codes because if it was already disabled via
* hardware settings this will fail
*/
- if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
+ if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) {
dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
i40e_aq_stop_lldp(hw, true, false, NULL);
}
@@ -15951,7 +15892,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
if (is_valid_ether_addr(hw->mac.port_addr))
- pf->hw_features |= I40E_HW_PORT_ID_VALID;
+ set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps);
i40e_ptp_alloc_pins(pf);
pci_set_drvdata(pdev, pf);
@@ -15961,10 +15902,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
(!status &&
lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
- (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
- (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
+ (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) :
+ (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags));
dev_info(&pdev->dev,
- (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
+ test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ?
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
@@ -15974,7 +15915,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
+ clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(I40E_FLAG_DCB_ENA, pf->flags);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -16042,11 +15984,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_PCI_IOV
/* prep for VF support */
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
!test_bit(__I40E_BAD_EEPROM, pf->state)) {
if (pci_num_vf(pdev))
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
}
#endif
err = i40e_setup_pf_switch(pf, false, false);
@@ -16087,7 +16029,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
wr32(hw, I40E_REG_MSS, val);
}
- if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
+ if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) {
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
@@ -16107,7 +16049,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) {
err = i40e_setup_misc_vector(pf);
if (err) {
dev_info(&pdev->dev,
@@ -16120,8 +16062,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_PCI_IOV
/* prep for VF support */
- if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) &&
+ test_bit(I40E_FLAG_MSIX_ENA, pf->flags) &&
!test_bit(__I40E_BAD_EEPROM, pf->state)) {
/* disable link interrupts for VFs */
val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
@@ -16141,7 +16083,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif /* CONFIG_PCI_IOV */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
pf->num_iwarp_msix,
I40E_IWARP_IRQ_PILE_ID);
@@ -16149,7 +16091,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"failed to get tracking for %d vectors for IWARP err=%d\n",
pf->num_iwarp_msix, pf->iwarp_base_vector);
- pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ clear_bit(I40E_FLAG_IWARP_ENA, pf->flags);
}
}
@@ -16163,7 +16105,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
round_jiffies(jiffies + pf->service_timer_period));
/* add this PF to client device list and launch a client service task */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
err = i40e_lan_add_device(pf);
if (err)
dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
@@ -16176,7 +16118,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* and will report PCI Gen 1 x 1 by default so don't bother
* checking them.
*/
- if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
+ if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) {
char speed[PCI_SPEED_SIZE] = "Unknown";
char width[PCI_WIDTH_SIZE] = "Unknown";
@@ -16230,7 +16172,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* set the FEC config due to the board capabilities */
- i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
+ i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags);
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
@@ -16241,11 +16183,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
- val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
- I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+ val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
+ rd32(&pf->hw, I40E_PRTGL_SAH));
if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- pf->hw.port, val);
+ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
+ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16257,10 +16199,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->main_vsi_seid);
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
- (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
- pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
+ (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+ set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps);
if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
- pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
+ set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps);
/* print a string summarizing features */
i40e_print_features(pf);
@@ -16329,10 +16271,10 @@ static void i40e_remove(struct pci_dev *pdev)
usleep_range(1000, 2000);
set_bit(__I40E_IN_REMOVE, pf->state);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags);
}
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
@@ -16387,7 +16329,7 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_cloud_filter_exit(pf);
/* remove attached clients */
- if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) {
ret_code = i40e_lan_del_device(pf);
if (ret_code)
dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
@@ -16406,7 +16348,7 @@ static void i40e_remove(struct pci_dev *pdev)
unmap:
/* Free MSI/legacy interrupt 0 when in recovery mode. */
if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
free_irq(pf->pdev->irq, pf);
/* shutdown the adminq */
@@ -16625,7 +16567,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
*/
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
- if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
i40e_enable_mc_magic_wake(pf);
i40e_prep_for_reset(pf);
@@ -16637,7 +16580,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
/* Free MSI/legacy interrupt 0 when in recovery mode. */
if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ !test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
free_irq(pf->pdev->irq, pf);
/* Since we're going to destroy queues during the
@@ -16678,7 +16621,8 @@ static int __maybe_unused i40e_suspend(struct device *dev)
*/
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
- if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
i40e_enable_mc_magic_wake(pf);
/* Since we're going to destroy queues during the
@@ -16789,7 +16733,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index e5aec09d5..605fd82f5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -27,8 +27,7 @@ int i40e_init_nvm(struct i40e_hw *hw)
* as the blank mode may be used in the factory line.
*/
gens = rd32(hw, I40E_GLNVM_GENS);
- sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
- I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ sr_size = FIELD_GET(I40E_GLNVM_GENS_SR_SIZE_MASK, gens);
/* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
@@ -194,9 +193,8 @@ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
- *data = (u16)((sr_reg &
- I40E_GLNVM_SRDATA_RDDATA_MASK)
- >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ *data = FIELD_GET(I40E_GLNVM_SRDATA_RDDATA_MASK,
+ sr_reg);
}
}
if (ret_code)
@@ -292,7 +290,7 @@ static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
static int __i40e_read_nvm_word(struct i40e_hw *hw,
u16 offset, u16 *data)
{
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps))
return i40e_read_nvm_word_aq(hw, offset, data);
return i40e_read_nvm_word_srctl(hw, offset, data);
@@ -311,14 +309,14 @@ int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
int ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps))
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
- if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ if (test_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps))
i40e_release_nvm(hw);
return ret_code;
@@ -500,7 +498,7 @@ static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
u16 offset, u16 *words,
u16 *data)
{
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps))
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
@@ -522,7 +520,7 @@ int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
int ret_code = 0;
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ if (test_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps)) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
@@ -772,13 +770,12 @@ static inline u8 i40e_nvmupd_get_module(u32 val)
}
static inline u8 i40e_nvmupd_get_transaction(u32 val)
{
- return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+ return FIELD_GET(I40E_NVM_TRANS_MASK, val);
}
static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
{
- return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
- I40E_NVM_PRESERVATION_FLAGS_SHIFT);
+ return FIELD_GET(I40E_NVM_PRESERVATION_FLAGS_MASK, val);
}
static const char * const i40e_nvm_update_state_str[] = {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 001162042..ce1f11b8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -501,4 +501,73 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+/* Firmware and AdminQ version check helpers */
+
+/**
+ * i40e_is_aq_api_ver_ge
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static inline bool i40e_is_aq_api_ver_ge(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.api_maj_ver > maj ||
+ (hw->aq.api_maj_ver == maj && hw->aq.api_min_ver >= min));
+}
+
+/**
+ * i40e_is_aq_api_ver_lt
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current HW API version is less than provided.
+ **/
+static inline bool i40e_is_aq_api_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return !i40e_is_aq_api_ver_ge(hw, maj, min);
+}
+
+/**
+ * i40e_is_fw_ver_ge
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is greater/equal than provided.
+ **/
+static inline bool i40e_is_fw_ver_ge(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.fw_maj_ver > maj ||
+ (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver >= min));
+}
+
+/**
+ * i40e_is_fw_ver_lt
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is less than provided.
+ **/
+static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return !i40e_is_fw_ver_ge(hw, maj, min);
+}
+
+/**
+ * i40e_is_fw_ver_eq
+ * @hw: pointer to i40e_hw structure
+ * @maj: API major value to compare
+ * @min: API minor value to compare
+ *
+ * Assert whether current firmware version is equal to provided.
+ **/
+static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min)
+{
+ return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min);
+}
+
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 20b77398f..e7ebcb09f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -35,7 +35,7 @@ enum i40e_ptp_pin {
GPIO_4
};
-enum i40e_can_set_pins_t {
+enum i40e_can_set_pins {
CANT_DO_PINS = -1,
CAN_SET_PINS,
CAN_DO_PINS
@@ -193,7 +193,7 @@ static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw)
* return CAN_DO_PINS if pins can be manipulated within a NIC or
* return CANT_DO_PINS otherwise.
**/
-static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf)
+static enum i40e_can_set_pins i40e_can_set_pins(struct i40e_pf *pf)
{
if (!i40e_is_ptp_pin_dev(&pf->hw)) {
dev_warn(&pf->pdev->dev,
@@ -680,7 +680,7 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf)
* configured. We don't want to spuriously warn about Rx timestamp
* hangs if we don't care about the timestamps.
*/
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx)
return;
spin_lock_bh(&pf->ptp_rx_lock);
@@ -733,7 +733,7 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf)
{
struct sk_buff *skb;
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx)
return;
/* Nothing to do if we're not already waiting for a timestamp */
@@ -771,7 +771,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
u32 hi, lo;
u64 ns;
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_tx)
return;
/* don't attempt to timestamp if we don't have an skb */
@@ -818,7 +818,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
/* Since we cannot turn off the Rx timestamp logic if the device is
* doing Tx timestamping, check if Rx timestamping is configured.
*/
- if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags) || !pf->ptp_rx)
return;
hw = &pf->hw;
@@ -924,7 +924,7 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config = &pf->tstamp_config;
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
@@ -1071,7 +1071,7 @@ static void i40e_ptp_set_pins_hw(struct i40e_pf *pf)
static int i40e_ptp_set_pins(struct i40e_pf *pf,
struct i40e_ptp_pins_settings *pins)
{
- enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf);
+ enum i40e_can_set_pins pin_caps = i40e_can_set_pins(pf);
int i = 0;
if (pin_caps == CANT_DO_PINS)
@@ -1211,7 +1211,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
return -ERANGE;
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
@@ -1225,7 +1225,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
+ if (!test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps))
return -ERANGE;
fallthrough;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -1234,7 +1234,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
pf->ptp_rx = true;
tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
I40E_PRTTSYN_CTL1_TSYNTYPE_V2;
- if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) {
tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
} else {
@@ -1308,7 +1308,7 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -1426,7 +1426,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
void i40e_ptp_save_hw_time(struct i40e_pf *pf)
{
/* don't try to access the PTP clock if it's not enabled */
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return;
i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL);
@@ -1480,10 +1480,10 @@ void i40e_ptp_init(struct i40e_pf *pf)
/* Only one PF is assigned to control 1588 logic per port. Do not
* enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID
*/
- pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
- I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ pf_id = FIELD_GET(I40E_PRTTSYN_CTL0_PF_ID_MASK,
+ rd32(hw, I40E_PRTTSYN_CTL0));
if (hw->pf_id != pf_id) {
- pf->flags &= ~I40E_FLAG_PTP;
+ clear_bit(I40E_FLAG_PTP_ENA, pf->flags);
dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n",
__func__,
netdev->name);
@@ -1504,7 +1504,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
if (pf->hw.debug_mask & I40E_DEBUG_LAN)
dev_info(&pf->pdev->dev, "PHC enabled\n");
- pf->flags |= I40E_FLAG_PTP;
+ set_bit(I40E_FLAG_PTP_ENA, pf->flags);
/* Ensure the clocks are running. */
regval = rd32(hw, I40E_PRTTSYN_CTL0);
@@ -1539,7 +1539,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
u32 regval;
- pf->flags &= ~I40E_FLAG_PTP;
+ clear_bit(I40E_FLAG_PTP_ENA, pf->flags);
pf->ptp_tx = false;
pf->ptp_rx = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index f6671ac79..432afbb64 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -333,8 +333,11 @@
#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
@@ -863,16 +866,6 @@
#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
#define I40E_PFPM_WUFC_MAG_SHIFT 1
#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
-#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
-#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
-#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
-#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
-#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
-#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
-#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
-#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
-#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VFQF_HLUT_MAX_INDEX 15
@@ -899,6 +892,7 @@
#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
/* Redefined for X722 family */
#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 071ef309a..1a12b7328 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -33,19 +33,16 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
- (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+ flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index);
- flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
- (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_FLEXOFF_MASK,
+ fdata->flex_off);
- flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
- (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
/* Use LAN VSI Id if not programmed by user */
- flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
- ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK,
+ fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id);
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
@@ -55,17 +52,15 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
- (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl);
- dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
- (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_FD_STATUS_MASK,
+ fdata->fd_status);
if (fdata->cnt_index) {
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
- ((u32)fdata->cnt_index <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ fdata->cnt_index);
}
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -464,7 +459,7 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
&pf->fd_tcp6_filter_cnt);
if (add) {
- if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
@@ -691,8 +686,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
u32 error;
qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
- error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
- I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+ error = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK, qword1);
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
@@ -734,7 +728,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
- if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
!test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
pf->state))
if (I40E_DEBUG_FD & pf->hw.debug_mask)
@@ -1071,7 +1065,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
if (q_vector->arm_wb_state)
return;
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
@@ -1095,7 +1089,7 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
**/
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
- if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
@@ -1403,8 +1397,7 @@ void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
{
u8 id;
- id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
- I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+ id = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK, qword1);
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
@@ -1755,11 +1748,9 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u64 qword;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
+ rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
@@ -1892,13 +1883,10 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb)
{
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ u32 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
- u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
+ u32 tsyn = FIELD_GET(I40E_RXD_QW1_STATUS_TSYNINDX_MASK, rx_status);
+ u8 rx_ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
if (unlikely(tsynvalid))
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -2551,8 +2539,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
continue;
}
- size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
if (!size)
break;
@@ -2643,7 +2630,22 @@ process_next:
return failure ? budget : (int)total_rx_packets;
}
-static inline u32 i40e_buildreg_itr(const int type, u16 itr)
+/**
+ * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
+ * @itr_idx: interrupt throttling index
+ * @interval: interrupt throttling interval value in usecs
+ * @force_swint: force software interrupt
+ *
+ * The function builds a value for I40E_PFINT_DYN_CTLN register that
+ * is used to update interrupt throttling interval for specified ITR index
+ * and optionally enforces a software interrupt. If the @itr_idx is equal
+ * to I40E_ITR_NONE then no interval change is applied and only @force_swint
+ * parameter is taken into account. If the interval change and enforced
+ * software interrupt are not requested then the built value just enables
+ * appropriate vector interrupt.
+ **/
+static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
+ bool force_swint)
{
u32 val;
@@ -2657,23 +2659,33 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
* an event in the PBA anyway so we need to rely on the automask
* to hold pending events for us until the interrupt is re-enabled
*
- * The itr value is reported in microseconds, and the register
- * value is recorded in 2 microsecond units. For this reason we
- * only need to shift by the interval shift - 1 instead of the
- * full value.
+ * We have to shift the given value as it is reported in microseconds
+ * and the register value is recorded in 2 microsecond units.
*/
- itr &= I40E_ITR_MASK;
+ interval >>= 1;
+ /* 1. Enable vector interrupt
+ * 2. Update the interval for the specified ITR index
+ * (I40E_ITR_NONE in the register is used to indicate that
+ * no interval update is requested)
+ */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
+
+ /* 3. Enforce software interrupt trigger if requested
+ * (These software interrupts rate is limited by ITR2 that is
+ * set to 20K interrupts per second)
+ */
+ if (force_swint)
+ val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
+ I40E_SW_ITR);
return val;
}
-/* a small macro to shorten up some long lines */
-#define INTREG I40E_PFINT_DYN_CTLN
-
/* The act of updating the ITR will cause it to immediately trigger. In order
* to prevent this from throwing off adaptive update statistics we defer the
* update so that it can only happen so often. So after either Tx or Rx are
@@ -2692,11 +2704,13 @@ static inline u32 i40e_buildreg_itr(const int type, u16 itr)
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
+ enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
struct i40e_hw *hw = &vsi->back->hw;
- u32 intval;
+ u16 interval = 0;
+ u32 itr_val;
/* If we don't have MSIX, then we only need to re-enable icr0 */
- if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
+ if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
i40e_irq_dynamic_enable_icr0(vsi->back);
return;
}
@@ -2715,8 +2729,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
@@ -2725,25 +2739,36 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- intval = i40e_buildreg_itr(I40E_TX_ITR,
- q_vector->tx.target_itr);
+ itr_idx = I40E_TX_ITR;
+ interval = q_vector->tx.target_itr;
q_vector->tx.current_itr = q_vector->tx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
/* Rx ITR needs to be increased, third priority */
- intval = i40e_buildreg_itr(I40E_RX_ITR,
- q_vector->rx.target_itr);
+ itr_idx = I40E_RX_ITR;
+ interval = q_vector->rx.target_itr;
q_vector->rx.current_itr = q_vector->rx.target_itr;
q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* No ITR update, lowest priority */
- intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
if (q_vector->itr_countdown)
q_vector->itr_countdown--;
}
- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(q_vector->reg_idx), intval);
+ /* Do not update interrupt control register if VSI is down */
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ /* Update ITR interval if necessary and enforce software interrupt
+ * if we are exiting busy poll.
+ */
+ if (q_vector->in_busy_poll) {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, true);
+ q_vector->in_busy_poll = false;
+ } else {
+ itr_val = i40e_buildreg_itr(itr_idx, interval, false);
+ }
+ wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
}
/**
@@ -2858,6 +2883,8 @@ tx_only:
*/
if (likely(napi_complete_done(napi, work_done)))
i40e_update_enable_itr(vsi, q_vector);
+ else
+ q_vector->in_busy_poll = true;
return min(work_done, budget - 1);
}
@@ -2885,7 +2912,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i;
/* make sure ATR is enabled */
- if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+ if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
return;
if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
@@ -2930,7 +2957,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
return;
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
@@ -2956,8 +2983,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
+ flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
+ tx_ring->queue_index);
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
(I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
@@ -2983,16 +3010,14 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
dtype_cmd |=
- ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ I40E_FD_ATR_STAT_IDX(pf->hw.pf_id));
else
dtype_cmd |=
- ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
+ I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id));
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
+ if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags))
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -3050,7 +3075,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= I40E_TX_FLAGS_SW_VLAN;
}
- if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
goto out;
/* Insert 802.1p priority into VLAN header */
@@ -3226,7 +3251,7 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
* we are not already transmitting a packet to be timestamped
*/
pf = i40e_netdev_to_pf(tx_ring->netdev);
- if (!(pf->flags & I40E_FLAG_PTP))
+ if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
return 0;
if (pf->ptp_tx &&
@@ -3598,8 +3623,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
- I40E_TX_FLAGS_VLAN_SHIFT;
+ td_tag = FIELD_GET(I40E_TX_FLAGS_VLAN_MASK, tx_flags);
}
first->tx_flags = tx_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 421fe5675..2cdc7de63 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -58,7 +58,7 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl)
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
* register but instead is a special value meaning "don't update" ITR0/1/2.
*/
-enum i40e_dyn_idx_t {
+enum i40e_dyn_idx {
I40E_IDX_ITR0 = 0,
I40E_IDX_ITR1 = 1,
I40E_IDX_ITR2 = 2,
@@ -68,6 +68,7 @@ enum i40e_dyn_idx_t {
/* these are indexes into ITRN registers */
#define I40E_RX_ITR I40E_IDX_ITR0
#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
@@ -92,8 +93,8 @@ enum i40e_dyn_idx_t {
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
#define i40e_pf_get_default_rss_hena(pf) \
- (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
+ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
@@ -306,7 +307,7 @@ struct i40e_rx_queue_stats {
u64 page_busy_count;
};
-enum i40e_ring_state_t {
+enum i40e_ring_state {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
__I40E_RING_STATE_NBITS /* must be last */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index f95bc2a4a..d90314996 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -64,9 +64,7 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
I40E_MAC_XL710,
- I40E_MAC_VF,
I40E_MAC_X722,
- I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -272,9 +270,7 @@ struct i40e_mac_info {
enum i40e_mac_type type;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
- u8 san_addr[ETH_ALEN];
u8 port_addr[ETH_ALEN];
- u16 max_fcoeq;
};
enum i40e_aq_resources_ids {
@@ -482,6 +478,36 @@ struct i40e_dcbx_config {
struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
};
+enum i40e_hw_flags {
+ I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE,
+ I40E_HW_CAP_802_1AD,
+ I40E_HW_CAP_AQ_PHY_ACCESS,
+ I40E_HW_CAP_NVM_READ_REQUIRES_LOCK,
+ I40E_HW_CAP_FW_LLDP_STOPPABLE,
+ I40E_HW_CAP_FW_LLDP_PERSISTENT,
+ I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED,
+ I40E_HW_CAP_X722_FEC_REQUEST,
+ I40E_HW_CAP_RSS_AQ,
+ I40E_HW_CAP_128_QP_RSS,
+ I40E_HW_CAP_ATR_EVICT,
+ I40E_HW_CAP_WB_ON_ITR,
+ I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
+ I40E_HW_CAP_NO_PCI_LINK_CHECK,
+ I40E_HW_CAP_100M_SGMII,
+ I40E_HW_CAP_NO_DCB_SUPPORT,
+ I40E_HW_CAP_USE_SET_LLDP_MIB,
+ I40E_HW_CAP_GENEVE_OFFLOAD,
+ I40E_HW_CAP_PTP_L4,
+ I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE,
+ I40E_HW_CAP_CRT_RETIMER,
+ I40E_HW_CAP_OUTER_UDP_CSUM,
+ I40E_HW_CAP_PHY_CONTROLS_LEDS,
+ I40E_HW_CAP_STOP_FW_LLDP,
+ I40E_HW_CAP_PORT_ID_VALID,
+ I40E_HW_CAP_RESTART_AUTONEG,
+ I40E_HW_CAPS_NBITS,
+};
+
/* Port hardware description */
struct i40e_hw {
u8 __iomem *hw_addr;
@@ -546,16 +572,7 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
-#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
-#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
-#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
-#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
-#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
-#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
-#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
-#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
- u64 flags;
+ DECLARE_BITMAP(caps, I40E_HW_CAPS_NBITS);
/* Used in set switch config AQ command */
u16 switch_tag;
@@ -567,12 +584,6 @@ struct i40e_hw {
char err_str[16];
};
-static inline bool i40e_is_vf(struct i40e_hw *hw)
-{
- return (hw->mac.type == I40E_MAC_VF ||
- hw->mac.type == I40E_MAC_X722_VF);
-}
-
struct i40e_driver_version {
u8 major_version;
u8 minor_version;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3d8a23d33..37e77163d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -500,10 +500,10 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
*/
reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
- next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
- >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
- next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
- >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
+ next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK,
+ reg);
+ next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK,
+ reg);
reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
reg = (next_q_index &
@@ -581,10 +581,10 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
* queue on top. Also link it with the new queue in CEQCTL.
*/
reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
- next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
- next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
- I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK,
+ reg);
+ next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK,
+ reg);
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
@@ -685,11 +685,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* associate this queue with the PCI VF function */
qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
- qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
- & I40E_QTX_CTL_PF_INDX_MASK);
- qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
- << I40E_QTX_CTL_VFVM_INDX_SHIFT)
- & I40E_QTX_CTL_VFVM_INDX_MASK);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
+ qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
+ vf->vf_id + hw->func_caps.vf_base_id);
wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
i40e_flush(hw);
@@ -1630,8 +1628,8 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
- int i, v;
u32 reg;
+ int i;
/* If we don't have any VFs, then there is nothing to reset */
if (!pf->num_alloc_vfs)
@@ -1642,11 +1640,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- vf = &pf->vf[v];
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is being reset no need to trigger reset again */
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ i40e_trigger_vf_reset(vf, flr);
}
/* HW requires some time to make sure it can flush the FIFO for a VF
@@ -1655,14 +1652,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
* the VFs using a simple iterator that increments once that VF has
* finished resetting.
*/
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
+ for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
usleep_range(10000, 20000);
/* Check each VF in sequence, beginning with the VF to fail
* the previous check.
*/
- while (v < pf->num_alloc_vfs) {
- vf = &pf->vf[v];
+ while (vf < &pf->vf[pf->num_alloc_vfs]) {
if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
@@ -1672,7 +1668,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
*/
- v++;
+ ++vf;
}
}
@@ -1682,39 +1678,39 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* Display a warning if at least one VF didn't manage to reset in
* time, but continue on with the operation.
*/
- if (v < pf->num_alloc_vfs)
+ if (vf < &pf->vf[pf->num_alloc_vfs])
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
- pf->vf[v].vf_id);
+ vf->vf_id);
usleep_range(10000, 20000);
/* Begin disabling all the rings associated with VFs, but do not wait
* between each VF.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
}
/* Now that we've notified HW to disable all of the VF rings, wait
* until they finish.
*/
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* On initial reset, we don't have any queues to disable */
- if (pf->vf[v].lan_vsi_idx == 0)
+ if (vf->lan_vsi_idx == 0)
continue;
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
+ i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
}
/* Hw may need up to 50ms to finish disabling the RX queues. We
@@ -1723,12 +1719,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
+ for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
/* If VF is reset in another thread just continue */
if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
continue;
- i40e_cleanup_reset_vf(&pf->vf[v]);
+ i40e_cleanup_reset_vf(vf);
}
i40e_flush(hw);
@@ -1834,7 +1830,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
if (ret) {
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
pf->num_alloc_vfs = 0;
goto err_iov;
}
@@ -1945,8 +1941,8 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
}
if (num_vfs) {
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
+ set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
@@ -1955,7 +1951,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+ clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
@@ -2163,14 +2159,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
- if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
+ if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
else
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) {
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |=
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
@@ -2179,12 +2175,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
- if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
+ if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) &&
(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev,
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
vf->vf_id);
@@ -2194,7 +2190,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
}
- if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
+ if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) {
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
vfres->vf_cap_flags |=
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
@@ -3145,11 +3141,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
/* Allow to delete VF primary MAC only if it was not set
* administratively by PF or if VF is trusted.
*/
- if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
- i40e_can_vf_change_mac(vf))
- was_unimac_deleted = true;
- else
- continue;
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+ }
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
@@ -4737,9 +4734,8 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->max_tx_rate = vf->tx_rate;
ivi->min_tx_rate = 0;
- ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
- ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
- I40E_VLAN_PRIORITY_SHIFT;
+ ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK);
+ ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK);
if (vf->link_forced == false)
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->link_up == true)
@@ -4929,7 +4925,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
}
- if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
ret = -EINVAL;
goto out;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 65f38a57b..11500003a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -477,8 +477,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
continue;
}
- size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
if (!size)
break;
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 63b45c61c..db8188c7a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -313,7 +313,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
-#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
+#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
+#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(16)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
@@ -415,6 +416,7 @@ struct iavf_adapter {
struct iavf_vsi vsi;
u32 aq_wait_count;
/* RSS stuff */
+ enum virtchnl_rss_algorithm hfunc;
u64 hena;
u16 rss_key_size;
u16 rss_lut_size;
@@ -540,6 +542,7 @@ void iavf_get_hena(struct iavf_adapter *adapter);
void iavf_set_hena(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
+void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
void iavf_enable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_disable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 9ffbd24d8..82fcd18ad 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -8,27 +8,6 @@
#include "iavf_prototype.h"
/**
- * iavf_adminq_init_regs - Initialize AdminQ registers
- * @hw: pointer to the hardware structure
- *
- * This assumes the alloc_asq and alloc_arq functions have already been called
- **/
-static void iavf_adminq_init_regs(struct iavf_hw *hw)
-{
- /* set head and tail registers in our local struct */
- hw->aq.asq.tail = IAVF_VF_ATQT1;
- hw->aq.asq.head = IAVF_VF_ATQH1;
- hw->aq.asq.len = IAVF_VF_ATQLEN1;
- hw->aq.asq.bal = IAVF_VF_ATQBAL1;
- hw->aq.asq.bah = IAVF_VF_ATQBAH1;
- hw->aq.arq.tail = IAVF_VF_ARQT1;
- hw->aq.arq.head = IAVF_VF_ARQH1;
- hw->aq.arq.len = IAVF_VF_ARQLEN1;
- hw->aq.arq.bal = IAVF_VF_ARQBAL1;
- hw->aq.arq.bah = IAVF_VF_ARQBAH1;
-}
-
-/**
* iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
@@ -259,17 +238,17 @@ static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, IAVF_VF_ATQH1, 0);
+ wr32(hw, IAVF_VF_ATQT1, 0);
/* set starting point */
- wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries |
IAVF_VF_ATQLEN1_ATQENABLE_MASK));
- wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
- wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa));
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.asq.bal);
+ reg = rd32(hw, IAVF_VF_ATQBAL1);
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
@@ -288,20 +267,20 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
u32 reg = 0;
/* Clear Head and Tail */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, IAVF_VF_ARQH1, 0);
+ wr32(hw, IAVF_VF_ARQT1, 0);
/* set starting point */
- wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries |
IAVF_VF_ARQLEN1_ARQENABLE_MASK));
- wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
- wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa));
/* Update tail in the HW to post pre-allocated buffers */
- wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+ wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1);
/* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.arq.bal);
+ reg = rd32(hw, IAVF_VF_ARQBAL1);
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
@@ -455,11 +434,11 @@ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
- wr32(hw, hw->aq.asq.len, 0);
- wr32(hw, hw->aq.asq.bal, 0);
- wr32(hw, hw->aq.asq.bah, 0);
+ wr32(hw, IAVF_VF_ATQH1, 0);
+ wr32(hw, IAVF_VF_ATQT1, 0);
+ wr32(hw, IAVF_VF_ATQLEN1, 0);
+ wr32(hw, IAVF_VF_ATQBAL1, 0);
+ wr32(hw, IAVF_VF_ATQBAH1, 0);
hw->aq.asq.count = 0; /* to indicate uninitialized queue */
@@ -489,11 +468,11 @@ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
}
/* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
- wr32(hw, hw->aq.arq.len, 0);
- wr32(hw, hw->aq.arq.bal, 0);
- wr32(hw, hw->aq.arq.bah, 0);
+ wr32(hw, IAVF_VF_ARQH1, 0);
+ wr32(hw, IAVF_VF_ARQT1, 0);
+ wr32(hw, IAVF_VF_ARQLEN1, 0);
+ wr32(hw, IAVF_VF_ARQBAL1, 0);
+ wr32(hw, IAVF_VF_ARQBAH1, 0);
hw->aq.arq.count = 0; /* to indicate uninitialized queue */
@@ -529,9 +508,6 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
goto init_adminq_exit;
}
- /* Set up register offsets */
- iavf_adminq_init_regs(hw);
-
/* setup ASQ command write back timeout */
hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
@@ -587,9 +563,9 @@ static u16 iavf_clean_asq(struct iavf_hw *hw)
desc = IAVF_ADMINQ_DESC(*asq, ntc);
details = IAVF_ADMINQ_DETAILS(*asq, ntc);
- while (rd32(hw, hw->aq.asq.head) != ntc) {
+ while (rd32(hw, IAVF_VF_ATQH1) != ntc) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+ "ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1));
if (details->callback) {
IAVF_ADMINQ_CALLBACK cb_func =
@@ -624,7 +600,7 @@ bool iavf_asq_done(struct iavf_hw *hw)
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+ return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use;
}
/**
@@ -663,7 +639,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
hw->aq.asq_last_status = IAVF_AQ_RC_OK;
- val = rd32(hw, hw->aq.asq.head);
+ val = rd32(hw, IAVF_VF_ATQH1);
if (val >= hw->aq.num_asq_entries) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
@@ -755,7 +731,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
if (!details->postpone)
- wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+ wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use);
/* if cmd_details are not defined or async flag is not set,
* we need to wait for desc write back
@@ -810,7 +786,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
- if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
@@ -878,7 +854,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
}
/* set next_to_use to head */
- ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
+ ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
@@ -926,7 +902,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
/* set tail = the last cleaned desc index. */
- wr32(hw, hw->aq.arq.tail, ntc);
+ wr32(hw, IAVF_VF_ARQT1, ntc);
/* ntc is updated to tail + 1 */
ntc++;
if (ntc == hw->aq.num_arq_entries)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index 1f60518eb..406506f64 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -29,13 +29,6 @@ struct iavf_adminq_ring {
/* used for interrupt processing */
u16 next_to_use;
u16 next_to_clean;
-
- /* used for queue tracking */
- u32 head;
- u32 tail;
- u32 len;
- u32 bah;
- u32 bal;
};
/* ASQ transaction details */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
index 6edbf134b..a9e1da35e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
@@ -95,17 +95,21 @@ iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
* @rss_cfg: the virtchnl message to be filled with RSS configuration setting
* @packet_hdrs: the RSS configuration protocol header types
* @hash_flds: the RSS configuration protocol hash fields
+ * @symm: if true, symmetric hash is required
*
* Returns 0 if the RSS configuration virtchnl message is filled successfully
*/
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
- u32 packet_hdrs, u64 hash_flds)
+ u32 packet_hdrs, u64 hash_flds, bool symm)
{
struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
struct virtchnl_proto_hdr *hdr;
- rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ if (symm)
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ else
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
proto_hdrs->tunnel_level = 0; /* always outer layer */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
index 4d3be11af..e31eb2afe 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
@@ -80,13 +80,14 @@ struct iavf_adv_rss {
u32 packet_hdrs;
u64 hash_flds;
+ bool symm;
struct virtchnl_rss_cfg cfg_msg;
};
int
iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
- u32 packet_hdrs, u64 hash_flds);
+ u32 packet_hdrs, u64 hash_flds, bool symm);
struct iavf_adv_rss *
iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs);
void
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 6a10c0ecf..5a25233a8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -280,11 +280,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc,
**/
bool iavf_check_asq_alive(struct iavf_hw *hw)
{
- if (hw->aq.asq.len)
- return !!(rd32(hw, hw->aq.asq.len) &
- IAVF_VF_ATQLEN1_ATQENABLE_MASK);
- else
+ /* Check if the queue is initialized */
+ if (!hw->aq.asq.count)
return false;
+
+ return !!(rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQENABLE_MASK);
}
/**
@@ -331,6 +331,7 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
struct iavf_aq_desc desc;
struct iavf_aqc_get_set_rss_lut *cmd_resp =
(struct iavf_aqc_get_set_rss_lut *)&desc.params.raw;
+ u16 flags;
if (set)
iavf_fill_default_direct_cmd_desc(&desc,
@@ -343,22 +344,18 @@ static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw,
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- IAVF_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_LUT_VSI_VALID);
+ vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
if (pf_lut)
- cmd_resp->flags |= cpu_to_le16((u16)
- ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF);
else
- cmd_resp->flags |= cpu_to_le16((u16)
- ((IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
- IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK,
+ IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI);
+
+ cmd_resp->flags = cpu_to_le16(flags);
status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
@@ -412,11 +409,9 @@ iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF);
desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD);
- cmd_resp->vsi_id =
- cpu_to_le16((u16)((vsi_id <<
- IAVF_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
- IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK));
- cmd_resp->vsi_id |= cpu_to_le16((u16)IAVF_AQC_SET_RSS_KEY_VSI_VALID);
+ vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) |
+ FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1);
+ cmd_resp->vsi_id = cpu_to_le16(vsi_id);
status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 25ba5653a..378c3e9dd 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -397,8 +397,7 @@ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
unsigned int i;
for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&data, "%s",
- iavf_gstrings_priv_flags[i].flag_string);
+ ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string);
}
/**
@@ -1018,8 +1017,7 @@ iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
flex = &fltr->flex_words[cnt++];
flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
- flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
- IAVF_USERDEF_FLEX_OFFS_S;
+ flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value);
if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
return -EINVAL;
}
@@ -1437,16 +1435,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
- if (adapter->link_up) {
+
+ if (adapter->link_up)
fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
- } else {
+ else
fltr->state = IAVF_FDIR_FLTR_INACTIVE;
- }
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (adapter->link_up)
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
ret:
if (err && fltr)
kfree(fltr);
@@ -1476,7 +1473,6 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (fltr) {
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
} else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
list_del(&fltr->list);
kfree(fltr);
@@ -1491,7 +1487,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
return err;
}
@@ -1542,11 +1538,12 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
/**
* iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
* @cmd: ethtool rxnfc command
+ * @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended hash fields for
* RSS configuration
*/
-static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
+static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
@@ -1618,17 +1615,20 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
int count = 50, err = 0;
+ bool symm = false;
u64 hash_flds;
u32 hdrs;
if (!ADV_RSS_SUPPORT(adapter))
return -EOPNOTSUPP;
+ symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC);
+
hdrs = iavf_adv_rss_parse_hdrs(cmd);
if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
return -EINVAL;
- hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
+ hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm);
if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
return -EINVAL;
@@ -1636,7 +1636,8 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (!rss_new)
return -ENOMEM;
- if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
+ if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds,
+ symm)) {
kfree(rss_new);
return -EINVAL;
}
@@ -1655,12 +1656,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
if (rss_old) {
if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
err = -EBUSY;
- } else if (rss_old->hash_flds != hash_flds) {
+ } else if (rss_old->hash_flds != hash_flds ||
+ rss_old->symm != symm) {
rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_old->hash_flds = hash_flds;
+ rss_old->symm = symm;
memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
sizeof(rss_new->cfg_msg));
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
} else {
err = -EEXIST;
}
@@ -1669,13 +1671,13 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
rss_new->packet_hdrs = hdrs;
rss_new->hash_flds = hash_flds;
+ rss_new->symm = symm;
list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
}
spin_unlock_bh(&adapter->adv_rss_lock);
if (!err)
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
mutex_unlock(&adapter->crit_lock);
@@ -1909,27 +1911,27 @@ static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
/**
* iavf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
**/
-static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int iavf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (key)
- memcpy(key, adapter->rss_key, adapter->rss_key_size);
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
+
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size);
- if (indir)
+ if (rxfh->indir)
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
- indir[i] = (u32)adapter->rss_lut[i];
+ rxfh->indir[i] = (u32)adapter->rss_lut[i];
return 0;
}
@@ -1937,33 +1939,46 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
/**
* iavf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
-static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int iavf_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
/* Only support toeplitz hash function */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!key && !indir)
+ if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
+ adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) {
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+ adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
+ adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) {
+ adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ }
+
+ if (!rxfh->key && !rxfh->indir)
return 0;
- if (key)
- memcpy(adapter->rss_key, key, adapter->rss_key_size);
+ if (rxfh->key)
+ memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
+ adapter->rss_lut[i] = (u8)(rxfh->indir[i]);
}
return iavf_config_rss(adapter);
@@ -1972,6 +1987,7 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .cap_rss_sym_xor_supported = true,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 65ddcd81c..2d47b0b46 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -358,7 +358,7 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
if (fltr->ip_mask.tclass == U8_MAX) {
iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
- iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0;
+ iph->flow_lbl[0] = FIELD_PREP(0xF0, fltr->ip_data.tclass);
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index e8d5b889a..1ff381361 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1038,13 +1038,12 @@ static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
*/
new_f->is_primary = true;
new_f->add = true;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
ether_addr_copy(hw->mac.addr, new_mac);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* schedule the watchdog task to immediately process the request */
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER);
return 0;
}
@@ -1263,8 +1262,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
iavf_napi_enable_all(adapter);
- adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES);
}
/**
@@ -1420,8 +1418,7 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
}
- adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES);
}
/**
@@ -2150,6 +2147,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_set_rss_lut(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HFUNC) {
+ iavf_set_rss_hfunc(adapter);
+ return 0;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
iavf_set_promiscuous(adapter);
@@ -2318,10 +2319,8 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
}
}
- if (aq_required) {
- adapter->aq_required |= aq_required;
- mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
- }
+ if (aq_required)
+ iavf_schedule_aq_request(adapter, aq_required);
}
/**
@@ -3234,7 +3233,7 @@ static void iavf_adminq_task(struct work_struct *work)
goto freedom;
/* check for error indications */
- val = rd32(hw, hw->aq.arq.len);
+ val = rd32(hw, IAVF_VF_ARQLEN1);
if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
goto freedom;
oldval = val;
@@ -3251,9 +3250,9 @@ static void iavf_adminq_task(struct work_struct *work)
val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
}
if (oldval != val)
- wr32(hw, hw->aq.arq.len, val);
+ wr32(hw, IAVF_VF_ARQLEN1, val);
- val = rd32(hw, hw->aq.asq.len);
+ val = rd32(hw, IAVF_VF_ATQLEN1);
oldval = val;
if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
@@ -3268,7 +3267,7 @@ static void iavf_adminq_task(struct work_struct *work)
val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
}
if (oldval != val)
- wr32(hw, hw->aq.asq.len, val);
+ wr32(hw, IAVF_VF_ATQLEN1, val);
freedom:
kfree(event.msg_buf);
@@ -3513,6 +3512,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt *mqprio_qopt)
+{
+ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+ int i;
+
+ if (adapter->num_tc != mqprio_qopt->num_tc)
+ return false;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ if (ch[i].count != mqprio_qopt->count[i] ||
+ ch[i].offset != mqprio_qopt->offset[i])
+ return false;
+ }
+ return true;
+}
+
+/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_data: tc offload data
@@ -3569,7 +3596,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
- if (adapter->num_tc == num_tc)
+ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index fb7edba9c..b71484c87 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -989,11 +989,9 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi,
u64 qword;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
- rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
- IAVF_RXD_QW1_ERROR_SHIFT;
- rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
- IAVF_RXD_QW1_STATUS_SHIFT;
+ ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
+ rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
@@ -1534,8 +1532,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
break;
- size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+ size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = iavf_get_rx_buffer(rx_ring, size);
@@ -1582,8 +1579,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
total_rx_bytes += skb->len;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
- IAVF_RXD_QW1_PTYPE_SHIFT;
+ rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
/* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
@@ -2291,8 +2287,7 @@ static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
- IAVF_TX_FLAGS_VLAN_SHIFT;
+ td_tag = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
}
first->tx_flags = tx_flags;
@@ -2468,8 +2463,7 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
IAVF_TXD_CTX_QW1_CMD_SHIFT;
- cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
- IAVF_TX_FLAGS_VLAN_SHIFT;
+ cd_l2tag2 = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
}
/* obtain protocol of skb */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 2d9366be0..22f2df7c4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1142,6 +1142,34 @@ void iavf_set_rss_lut(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_rss_hfunc
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS Hash function
+ **/
+void iavf_set_rss_hfunc(struct iavf_adapter *adapter)
+{
+ struct virtchnl_rss_hfunc *vrh;
+ int len = sizeof(*vrh);
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh)
+ return;
+ vrh->vsi_id = adapter->vsi.id;
+ vrh->rss_algorithm = adapter->hfunc;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len);
+ kfree(vrh);
+}
+
+/**
* iavf_enable_vlan_stripping
* @adapter: adapter structure
*
@@ -2190,6 +2218,19 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
+
+ if (adapter->hfunc ==
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ adapter->hfunc =
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+ else
+ adapter->hfunc =
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 067990798..cddd82d4c 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -34,7 +34,9 @@ ice-y := ice_main.o \
ice_lag.o \
ice_ethtool.o \
ice_repr.o \
- ice_tc_lib.o
+ ice_tc_lib.o \
+ ice_fwlog.o \
+ ice_debugfs.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
ice_virtchnl.o \
@@ -49,3 +51,4 @@ ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o
+ice-$(CONFIG_ICE_HWMON) += ice_hwmon.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 351e0d36d..367b613d9 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -360,6 +360,7 @@ struct ice_vsi {
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
+ u8 rss_hfunc; /* User configured hash type */
u8 *rss_hkey_user; /* User configured hash keys */
u8 *rss_lut_user; /* User configured lookup table entries */
u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
@@ -517,16 +518,22 @@ enum ice_pf_flags {
};
enum ice_misc_thread_tasks {
- ICE_MISC_THREAD_EXTTS_EVENT,
ICE_MISC_THREAD_TX_TSTAMP,
ICE_MISC_THREAD_NBITS /* must be last */
};
-struct ice_switchdev_info {
+struct ice_eswitch {
struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
struct ice_esw_br_offloads *br_offloads;
+ struct xarray reprs;
bool is_running;
+ /* struct to allow cp queues management optimization */
+ struct {
+ int to_reach;
+ int value;
+ bool is_reaching;
+ } qs;
};
struct ice_agg_node {
@@ -563,6 +570,10 @@ struct ice_pf {
struct ice_vsi_stats **vsi_stats;
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
+ struct dentry *ice_debugfs_pf;
+ struct dentry *ice_debugfs_pf_fwlog;
+ /* keep track of all the dentrys for FW log modules */
+ struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -597,6 +608,7 @@ struct ice_pf {
u32 hw_csum_rx_error;
u32 oicr_err_reg;
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
+ struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
u16 num_lan_msix; /* Total MSIX vectors for base driver */
@@ -621,6 +633,7 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
+ char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
@@ -637,7 +650,7 @@ struct ice_pf {
struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */
- struct ice_switchdev_info switchdev;
+ struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
#define ICE_INVALID_AGG_NODE_ID 0
@@ -648,6 +661,7 @@ struct ice_pf {
#define ICE_MAX_VF_AGG_NODES 32
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
+ struct device *hwmon_dev;
};
extern struct workqueue_struct *ice_lag_wq;
@@ -846,7 +860,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
*/
static inline bool ice_is_switchdev_running(struct ice_pf *pf)
{
- return pf->switchdev.is_running;
+ return pf->eswitch.is_running;
}
#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
@@ -881,6 +895,11 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
+void ice_debugfs_fwlog_init(struct ice_pf *pf);
+void ice_debugfs_init(void);
+void ice_debugfs_exit(void);
+void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
+
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
@@ -912,6 +931,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
+int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
@@ -989,4 +1009,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
+
+extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index b8437c36f..1f3e7a690 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -117,6 +117,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
#define ICE_AQC_CAPS_RDMA 0x0051
+#define ICE_AQC_CAPS_SENSOR_READING 0x0067
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
@@ -592,8 +593,9 @@ struct ice_aqc_recipe_data_elem {
struct ice_aqc_recipe_to_profile {
__le16 profile_id;
u8 rsvd[6];
- DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+ __le64 recipe_assoc;
};
+static_assert(sizeof(struct ice_aqc_recipe_to_profile) == 16);
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
@@ -1414,6 +1416,30 @@ struct ice_aqc_get_phy_rec_clk_out {
__le16 node_handle;
};
+/* Get sensor reading (direct 0x0632) */
+struct ice_aqc_get_sensor_reading {
+ u8 sensor;
+ u8 format;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get sensor reading response (direct 0x0632) */
+struct ice_aqc_get_sensor_reading_resp {
+ union {
+ u8 raw[8];
+ /* Output data for sensor 0x00, format 0x00 */
+ struct _packed {
+ s8 temp;
+ u8 temp_warning_threshold;
+ u8 temp_critical_threshold;
+ u8 temp_fatal_threshold;
+ u8 reserved[4];
+ } s0f0;
+ } data;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -2070,78 +2096,6 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
-/* Configure Firmware Logging Command (indirect 0xFF09)
- * Logging Information Read Response (indirect 0xFF10)
- * Note: The 0xFF10 command has no input parameters.
- */
-struct ice_aqc_fw_logging {
- u8 log_ctrl;
-#define ICE_AQC_FW_LOG_AQ_EN BIT(0)
-#define ICE_AQC_FW_LOG_UART_EN BIT(1)
- u8 rsvd0;
- u8 log_ctrl_valid; /* Not used by 0xFF10 Response */
-#define ICE_AQC_FW_LOG_AQ_VALID BIT(0)
-#define ICE_AQC_FW_LOG_UART_VALID BIT(1)
- u8 rsvd1[5];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_NETPROXY,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
-/* Defines for both above FW logging command/response buffers */
-#define ICE_AQC_FW_LOG_ID_S 0
-#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S)
-
-#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */
-#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */
-
-#define ICE_AQC_FW_LOG_EN_S 12
-#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S)
-#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */
-#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */
-#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */
-#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */
-
-/* Get/Clear FW Log (indirect 0xFF11) */
-struct ice_aqc_get_clear_fw_log {
- u8 flags;
-#define ICE_AQC_FW_LOG_CLEAR BIT(0)
-#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1)
- u8 rsvd1[7];
- __le32 addr_high;
- __le32 addr_low;
-};
-
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2404,6 +2358,84 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
+enum ice_aqc_fw_logging_mod {
+ ICE_AQC_FW_LOG_ID_GENERAL = 0,
+ ICE_AQC_FW_LOG_ID_CTRL,
+ ICE_AQC_FW_LOG_ID_LINK,
+ ICE_AQC_FW_LOG_ID_LINK_TOPO,
+ ICE_AQC_FW_LOG_ID_DNL,
+ ICE_AQC_FW_LOG_ID_I2C,
+ ICE_AQC_FW_LOG_ID_SDP,
+ ICE_AQC_FW_LOG_ID_MDIO,
+ ICE_AQC_FW_LOG_ID_ADMINQ,
+ ICE_AQC_FW_LOG_ID_HDMA,
+ ICE_AQC_FW_LOG_ID_LLDP,
+ ICE_AQC_FW_LOG_ID_DCBX,
+ ICE_AQC_FW_LOG_ID_DCB,
+ ICE_AQC_FW_LOG_ID_XLR,
+ ICE_AQC_FW_LOG_ID_NVM,
+ ICE_AQC_FW_LOG_ID_AUTH,
+ ICE_AQC_FW_LOG_ID_VPD,
+ ICE_AQC_FW_LOG_ID_IOSF,
+ ICE_AQC_FW_LOG_ID_PARSER,
+ ICE_AQC_FW_LOG_ID_SW,
+ ICE_AQC_FW_LOG_ID_SCHEDULER,
+ ICE_AQC_FW_LOG_ID_TXQ,
+ ICE_AQC_FW_LOG_ID_RSVD,
+ ICE_AQC_FW_LOG_ID_POST,
+ ICE_AQC_FW_LOG_ID_WATCHDOG,
+ ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ ICE_AQC_FW_LOG_ID_MNG,
+ ICE_AQC_FW_LOG_ID_SYNCE,
+ ICE_AQC_FW_LOG_ID_HEALTH,
+ ICE_AQC_FW_LOG_ID_TSDRV,
+ ICE_AQC_FW_LOG_ID_PFREG,
+ ICE_AQC_FW_LOG_ID_MDLVER,
+ ICE_AQC_FW_LOG_ID_MAX,
+};
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ */
+struct ice_aqc_fw_log {
+ u8 cmd_flags;
+#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
+#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
+#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
+#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
+#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
+#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
+
+ u8 rsp_flag;
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
+#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
+
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ice_aqc_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -2444,6 +2476,8 @@ struct ice_aq_desc {
struct ice_aqc_restart_an restart_an;
struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
+ struct ice_aqc_get_sensor_reading get_sensor_reading;
+ struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
@@ -2481,8 +2515,6 @@ struct ice_aq_desc {
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
- struct ice_aqc_fw_logging fw_logging;
- struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_cgu_input_config set_cgu_input_config;
struct ice_aqc_get_cgu_input_config get_cgu_input_config;
@@ -2494,6 +2526,7 @@ struct ice_aq_desc {
struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
struct ice_aqc_get_cgu_info get_cgu_info;
struct ice_aqc_driver_shared_params drv_shared_params;
+ struct ice_aqc_fw_log fw_log;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_mac_cfg set_mac_cfg;
@@ -2619,6 +2652,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
+ ice_aqc_opc_get_sensor_reading = 0x0632,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
@@ -2691,9 +2725,11 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
- /* debug commands */
- ice_aqc_opc_fw_logging = 0xFF09,
- ice_aqc_opc_fw_logging_info = 0xFF10,
+ /* FW Logging Commands */
+ ice_aqc_opc_fw_logs_config = 0xFF30,
+ ice_aqc_opc_fw_logs_register = 0xFF31,
+ ice_aqc_opc_fw_logs_query = 0xFF32,
+ ice_aqc_opc_fw_logs_event = 0xFF33,
};
#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 4f3e65b47..c979192e4 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -189,10 +189,16 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
tx_ring->q_vector = NULL;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ }
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
rx_ring->q_vector = NULL;
+ }
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -224,24 +230,16 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
/* no need to update global register if ITR gran is already set */
if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
- (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
- GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
- GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
- GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
- (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
- GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_200_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_100_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_50_M, regval) == ICE_ITR_GRAN_US) &&
+ (FIELD_GET(GLINT_CTL_ITR_GRAN_25_M, regval) == ICE_ITR_GRAN_US))
return;
- regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
- GLINT_CTL_ITR_GRAN_200_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
- GLINT_CTL_ITR_GRAN_100_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
- GLINT_CTL_ITR_GRAN_50_M) |
- ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
- GLINT_CTL_ITR_GRAN_25_M);
+ regval = FIELD_PREP(GLINT_CTL_ITR_GRAN_200_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_100_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_50_M, ICE_ITR_GRAN_US) |
+ FIELD_PREP(GLINT_CTL_ITR_GRAN_25_M, ICE_ITR_GRAN_US);
wr32(hw, GLINT_CTL, regval);
}
@@ -278,7 +276,7 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
*/
static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
{
- struct ice_vsi *vsi = ring->vsi;
+ const struct ice_vsi *vsi = ring->vsi;
int i;
ice_for_each_txq(vsi, i) {
@@ -519,6 +517,19 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
return 0;
}
+static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
+{
+ void *ctx_ptr = &ring->pkt_ctx;
+ struct xsk_cb_desc desc = {};
+
+ XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
+ desc.src = &ctx_ptr;
+ desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
+ sizeof(struct xdp_buff);
+ desc.bytes = sizeof(ctx_ptr);
+ xsk_pool_fill_cb(ring->xsk_pool, &desc);
+}
+
/**
* ice_vsi_cfg_rxq - Configure an Rx queue
* @ring: the ring being configured
@@ -561,6 +572,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ ice_xsk_pool_fill_cb(ring);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
@@ -584,6 +596,7 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
ring->xdp.data = NULL;
+ ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
@@ -922,10 +935,10 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
struct ice_hw *hw = &pf->hw;
u32 val;
- itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
+ itr_idx = FIELD_PREP(QINT_TQCTL_ITR_INDX_M, itr_idx);
val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
+ FIELD_PREP(QINT_TQCTL_MSIX_INDX_M, msix_idx);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
if (ice_is_xdp_ena_vsi(vsi)) {
@@ -954,10 +967,10 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
struct ice_hw *hw = &pf->hw;
u32 val;
- itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
+ itr_idx = FIELD_PREP(QINT_RQCTL_ITR_INDX_M, itr_idx);
val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
- ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
+ FIELD_PREP(QINT_RQCTL_MSIX_INDX_M, msix_idx);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
@@ -969,7 +982,7 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
* @hw: pointer to the HW structure
* @q_vector: interrupt vector to trigger the software interrupt for
*/
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
+void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector)
{
wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
(ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
@@ -1044,7 +1057,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue
*/
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_channel *ch = ring->ch;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index b67dca417..17321ba75 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -22,12 +22,12 @@ void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
-void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
+void ice_trigger_sw_intr(struct ice_hw *hw, const struct ice_q_vector *q_vector);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index edac34c79..10c32cd80 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -934,216 +934,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
}
/**
- * ice_get_fw_log_cfg - get FW logging configuration
- * @hw: pointer to the HW struct
- */
-static int ice_get_fw_log_cfg(struct ice_hw *hw)
-{
- struct ice_aq_desc desc;
- __le16 *config;
- int status;
- u16 size;
-
- size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
- config = kzalloc(size, GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
-
- status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
- if (!status) {
- u16 i;
-
- /* Save FW logging information into the HW structure */
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- u16 v, m, flgs;
-
- v = le16_to_cpu(config[i]);
- m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
- flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
-
- if (m < ICE_AQC_FW_LOG_ID_MAX)
- hw->fw_log.evnts[m].cur = flgs;
- }
- }
-
- kfree(config);
-
- return status;
-}
-
-/**
- * ice_cfg_fw_log - configure FW logging
- * @hw: pointer to the HW struct
- * @enable: enable certain FW logging events if true, disable all if false
- *
- * This function enables/disables the FW logging via Rx CQ events and a UART
- * port based on predetermined configurations. FW logging via the Rx CQ can be
- * enabled/disabled for individual PF's. However, FW logging via the UART can
- * only be enabled/disabled for all PFs on the same device.
- *
- * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
- * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
- * before initializing the device.
- *
- * When re/configuring FW logging, callers need to update the "cfg" elements of
- * the hw->fw_log.evnts array with the desired logging event configurations for
- * modules of interest. When disabling FW logging completely, the callers can
- * just pass false in the "enable" parameter. On completion, the function will
- * update the "cur" element of the hw->fw_log.evnts array with the resulting
- * logging event configurations of the modules that are being re/configured. FW
- * logging modules that are not part of a reconfiguration operation retain their
- * previous states.
- *
- * Before resetting the device, it is recommended that the driver disables FW
- * logging before shutting down the control queue. When disabling FW logging
- * ("enable" = false), the latest configurations of FW logging events stored in
- * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
- * a device reset.
- *
- * When enabling FW logging to emit log messages via the Rx CQ during the
- * device's initialization phase, a mechanism alternative to interrupt handlers
- * needs to be used to extract FW log messages from the Rx CQ periodically and
- * to prevent the Rx CQ from being full and stalling other types of control
- * messages from FW to SW. Interrupts are typically disabled during the device's
- * initialization phase.
- */
-static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
-{
- struct ice_aqc_fw_logging *cmd;
- u16 i, chgs = 0, len = 0;
- struct ice_aq_desc desc;
- __le16 *data = NULL;
- u8 actv_evnts = 0;
- void *buf = NULL;
- int status = 0;
-
- if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
- return 0;
-
- /* Disable FW logging only when the control queue is still responsive */
- if (!enable &&
- (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
- return 0;
-
- /* Get current FW log settings */
- status = ice_get_fw_log_cfg(hw);
- if (status)
- return status;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
- cmd = &desc.params.fw_logging;
-
- /* Indicate which controls are valid */
- if (hw->fw_log.cq_en)
- cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
-
- if (hw->fw_log.uart_en)
- cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
-
- if (enable) {
- /* Fill in an array of entries with FW logging modules and
- * logging events being reconfigured.
- */
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- u16 val;
-
- /* Keep track of enabled event types */
- actv_evnts |= hw->fw_log.evnts[i].cfg;
-
- if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
- continue;
-
- if (!data) {
- data = devm_kcalloc(ice_hw_to_dev(hw),
- ICE_AQC_FW_LOG_ID_MAX,
- sizeof(*data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- }
-
- val = i << ICE_AQC_FW_LOG_ID_S;
- val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
- data[chgs++] = cpu_to_le16(val);
- }
-
- /* Only enable FW logging if at least one module is specified.
- * If FW logging is currently enabled but all modules are not
- * enabled to emit log messages, disable FW logging altogether.
- */
- if (actv_evnts) {
- /* Leave if there is effectively no change */
- if (!chgs)
- goto out;
-
- if (hw->fw_log.cq_en)
- cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
-
- if (hw->fw_log.uart_en)
- cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
-
- buf = data;
- len = sizeof(*data) * chgs;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- }
- }
-
- status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
- if (!status) {
- /* Update the current configuration to reflect events enabled.
- * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
- * logging mode is enabled for the device. They do not reflect
- * actual modules being enabled to emit log messages. So, their
- * values remain unchanged even when all modules are disabled.
- */
- u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
-
- hw->fw_log.actv_evnts = actv_evnts;
- for (i = 0; i < cnt; i++) {
- u16 v, m;
-
- if (!enable) {
- /* When disabling all FW logging events as part
- * of device's de-initialization, the original
- * configurations are retained, and can be used
- * to reconfigure FW logging later if the device
- * is re-initialized.
- */
- hw->fw_log.evnts[i].cur = 0;
- continue;
- }
-
- v = le16_to_cpu(data[i]);
- m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
- hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
- }
- }
-
-out:
- devm_kfree(ice_hw_to_dev(hw), data);
-
- return status;
-}
-
-/**
- * ice_output_fw_log
- * @hw: pointer to the HW struct
- * @desc: pointer to the AQ message descriptor
- * @buf: pointer to the buffer accompanying the AQ message
- *
- * Formats a FW Log message and outputs it via the standard driver logs.
- */
-void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
-{
- ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
- ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
- le16_to_cpu(desc->datalen));
- ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
-}
-
-/**
* ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
*
@@ -1152,9 +942,8 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
{
- u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
- GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
- GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+ u8 max_agg_bw = FIELD_GET(GL_PWR_MODE_CTL_CAR_MAX_BW_M,
+ rd32(hw, GL_PWR_MODE_CTL));
switch (max_agg_bw) {
case ICE_MAX_AGG_BW_200G:
@@ -1186,9 +975,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
- PF_FUNC_RID_FUNC_NUM_M) >>
- PF_FUNC_RID_FUNC_NUM_S;
+ hw->pf_id = FIELD_GET(PF_FUNC_RID_FUNC_NUM_M, rd32(hw, PF_FUNC_RID));
status = ice_reset(hw, ICE_RESET_PFR);
if (status)
@@ -1200,10 +987,10 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- /* Enable FW logging. Not fatal if this fails. */
- status = ice_cfg_fw_log(hw, true);
+ status = ice_fwlog_init(hw);
if (status)
- ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
+ ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
+ status);
status = ice_clear_pf_cfg(hw);
if (status)
@@ -1354,8 +1141,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
- /* Attempt to disable FW logging before shutting down control queues */
- ice_cfg_fw_log(hw, false);
+ ice_fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -1374,8 +1160,8 @@ int ice_check_reset(struct ice_hw *hw)
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
- grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
+ grst_timeout = FIELD_GET(GLGEN_RSTCTL_GRSTDEL_M,
+ rd32(hw, GLGEN_RSTCTL)) + 10;
for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100);
@@ -2459,7 +2245,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+ info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
@@ -2660,11 +2446,12 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
- info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
+ info->tmr1_owner = FIELD_GET(ICE_TS_TMR1_OWNR_M, number);
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+ info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2685,6 +2472,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_ena);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
info->ts_ll_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
+ info->ts_ll_int_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2711,6 +2500,26 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
+ * enabled sensors.
+ */
+static void
+ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ dev_p->supported_sensors = le32_to_cpu(cap->number);
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "dev caps: supported sensors (bitmap) = 0x%x\n",
+ dev_p->supported_sensors);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2755,9 +2564,12 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case ICE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_SENSOR_READING:
+ ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -4072,6 +3884,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
+ u16 i2c_bus_addr;
int status;
if (!data || (mem_addr & 0xff00))
@@ -4082,15 +3895,13 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
- cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
- ICE_AQC_SFF_I2CBUS_7BIT_M) |
- ((set_page <<
- ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
- ICE_AQC_SFF_SET_EEPROM_PAGE_M));
- cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
- cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
+ i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
+ FIELD_PREP(ICE_AQC_SFF_SET_EEPROM_PAGE_M, set_page);
if (write)
- cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
+ i2c_bus_addr |= ICE_AQC_SFF_IS_WRITE;
+ cmd->i2c_bus_addr = cpu_to_le16(i2c_bus_addr);
+ cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
+ cmd->eeprom_page = le16_encode_bits(page, ICE_AQC_SFF_EEPROM_PAGE_M);
status = ice_aq_send_cmd(hw, &desc, data, length, cd);
return status;
@@ -4345,6 +4156,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
+ u16 vmvf_and_timeout;
u16 i, sz = 0;
int status;
@@ -4360,27 +4172,26 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
cmd->num_entries = num_qgrps;
- cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
- ICE_AQC_Q_DIS_TIMEOUT_M);
+ vmvf_and_timeout = FIELD_PREP(ICE_AQC_Q_DIS_TIMEOUT_M, 5);
switch (rst_src) {
case ICE_VM_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
- cmd->vmvf_and_timeout |=
- cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ vmvf_and_timeout |= vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M;
break;
case ICE_VF_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
/* In this case, FW expects vmvf_num to be absolute VF ID */
- cmd->vmvf_and_timeout |=
- cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
- ICE_AQC_Q_DIS_VMVF_NUM_M);
+ vmvf_and_timeout |= (vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M;
break;
case ICE_NO_RESET:
default:
break;
}
+ cmd->vmvf_and_timeout = cpu_to_le16(vmvf_and_timeout);
+
/* flush pipe on time out */
cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
/* If no queue group info, we are in a reset flow. Issue the AQ */
@@ -4455,10 +4266,8 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
- cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) &
- ICE_AQC_Q_CFG_DST_PRT_M;
- cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) &
- ICE_AQC_Q_CFG_TIMEOUT_M;
+ cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
+ cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
cmd->blocked_cgds = 0;
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -5539,6 +5348,35 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
}
/**
+ * ice_aq_get_sensor_reading
+ * @hw: pointer to the HW struct
+ * @data: pointer to data to be read from the sensor
+ *
+ * Get sensor reading (0x0632)
+ */
+int ice_aq_get_sensor_reading(struct ice_hw *hw,
+ struct ice_aqc_get_sensor_reading_resp *data)
+{
+ struct ice_aqc_get_sensor_reading *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
+ cmd = &desc.params.get_sensor_reading;
+#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
+#define ICE_INTERNAL_TEMP_SENSOR 0
+ cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
+ cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ memcpy(data, &desc.params.get_sensor_reading_resp,
+ sizeof(*data));
+
+ return status;
+}
+
+/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct
*
@@ -5933,7 +5771,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
- ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
+ ldo->options = FIELD_GET(ICE_LINK_OVERRIDE_OPT_M, buf);
ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
ICE_LINK_OVERRIDE_PHY_CFG_S;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 31fdcac33..3e933f75e 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
+#include "ice.h"
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
@@ -199,7 +200,6 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
-void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
@@ -241,6 +241,8 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
int
ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
u8 *flags, u16 *node_handle);
+int ice_aq_get_sensor_reading(struct ice_hw *hw,
+ struct ice_aqc_get_sensor_reading_resp *data);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 396e55502..74418c445 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -35,8 +35,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M;
- cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M;
+ cmd->type |= FIELD_PREP(ICE_AQ_LLDP_BRID_TYPE_M, bridge_type);
desc.datalen = cpu_to_le16(buf_size);
@@ -147,8 +146,7 @@ static u8 ice_get_dcbx_status(struct ice_hw *hw)
u32 reg;
reg = rd32(hw, PRTDCB_GENS);
- return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >>
- PRTDCB_GENS_DCBX_STATUS_S);
+ return FIELD_GET(PRTDCB_GENS_DCBX_STATUS_M, reg);
}
/**
@@ -174,11 +172,9 @@ ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
*/
for (i = 0; i < 4; i++) {
ets_cfg->prio_table[i * 2] =
- ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >>
- ICE_IEEE_ETS_PRIO_1_S);
+ FIELD_GET(ICE_IEEE_ETS_PRIO_1_M, buf[offset]);
ets_cfg->prio_table[i * 2 + 1] =
- ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >>
- ICE_IEEE_ETS_PRIO_0_S);
+ FIELD_GET(ICE_IEEE_ETS_PRIO_0_M, buf[offset]);
offset++;
}
@@ -222,11 +218,9 @@ ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv,
* |1bit | 1bit|3 bits|3bits|
*/
etscfg = &dcbcfg->etscfg;
- etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >>
- ICE_IEEE_ETS_WILLING_S);
- etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S);
- etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >>
- ICE_IEEE_ETS_MAXTC_S);
+ etscfg->willing = FIELD_GET(ICE_IEEE_ETS_WILLING_M, buf[0]);
+ etscfg->cbs = FIELD_GET(ICE_IEEE_ETS_CBS_M, buf[0]);
+ etscfg->maxtcs = FIELD_GET(ICE_IEEE_ETS_MAXTC_M, buf[0]);
/* Begin parsing at Priority Assignment Table (offset 1 in buf) */
ice_parse_ieee_ets_common_tlv(&buf[1], etscfg);
@@ -268,11 +262,9 @@ ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv,
* -----------------------------------------
* |1bit | 1bit|2 bits|4bits| 1 octet |
*/
- dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >>
- ICE_IEEE_PFC_WILLING_S);
- dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S);
- dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >>
- ICE_IEEE_PFC_CAP_S);
+ dcbcfg->pfc.willing = FIELD_GET(ICE_IEEE_PFC_WILLING_M, buf[0]);
+ dcbcfg->pfc.mbc = FIELD_GET(ICE_IEEE_PFC_MBC_M, buf[0]);
+ dcbcfg->pfc.pfccap = FIELD_GET(ICE_IEEE_PFC_CAP_M, buf[0]);
dcbcfg->pfc.pfcena = buf[1];
}
@@ -294,7 +286,7 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
u8 *buf;
typelen = ntohs(tlv->typelen);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
buf = tlv->tlvinfo;
/* Removing sizeof(ouisubtype) and reserved byte from len.
@@ -314,12 +306,10 @@ ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
* -----------------------------------------
*/
while (offset < len) {
- dcbcfg->app[i].priority = ((buf[offset] &
- ICE_IEEE_APP_PRIO_M) >>
- ICE_IEEE_APP_PRIO_S);
- dcbcfg->app[i].selector = ((buf[offset] &
- ICE_IEEE_APP_SEL_M) >>
- ICE_IEEE_APP_SEL_S);
+ dcbcfg->app[i].priority = FIELD_GET(ICE_IEEE_APP_PRIO_M,
+ buf[offset]);
+ dcbcfg->app[i].selector = FIELD_GET(ICE_IEEE_APP_SEL_M,
+ buf[offset]);
dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) |
buf[offset + 2];
/* Move to next app */
@@ -347,8 +337,7 @@ ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u8 subtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
- ICE_LLDP_TLV_SUBTYPE_S);
+ subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype);
switch (subtype) {
case ICE_IEEE_SUBTYPE_ETS_CFG:
ice_parse_ieee_etscfg_tlv(tlv, dcbcfg);
@@ -399,11 +388,9 @@ ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
*/
for (i = 0; i < 4; i++) {
etscfg->prio_table[i * 2] =
- ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >>
- ICE_CEE_PGID_PRIO_1_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_1_M, buf[offset]);
etscfg->prio_table[i * 2 + 1] =
- ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >>
- ICE_CEE_PGID_PRIO_0_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_0_M, buf[offset]);
offset++;
}
@@ -466,7 +453,7 @@ ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u8 i;
typelen = ntohs(tlv->hdr.typelen);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
dcbcfg->numapps = len / sizeof(*app);
if (!dcbcfg->numapps)
@@ -521,14 +508,13 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u32 ouisubtype;
ouisubtype = ntohl(tlv->ouisubtype);
- subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
- ICE_LLDP_TLV_SUBTYPE_S);
+ subtype = FIELD_GET(ICE_LLDP_TLV_SUBTYPE_M, ouisubtype);
/* Return if not CEE DCBX */
if (subtype != ICE_CEE_DCBX_TYPE)
return;
typelen = ntohs(tlv->typelen);
- tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ tlvlen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
len = sizeof(tlv->typelen) + sizeof(ouisubtype) +
sizeof(struct ice_cee_ctrl_tlv);
/* Return if no CEE DCBX Feature TLVs */
@@ -540,9 +526,8 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u16 sublen;
typelen = ntohs(sub_tlv->hdr.typelen);
- sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
- subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >>
- ICE_LLDP_TLV_TYPE_S);
+ sublen = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
+ subtype = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen);
switch (subtype) {
case ICE_CEE_SUBTYPE_PG_CFG:
ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
@@ -579,7 +564,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
u32 oui;
ouisubtype = ntohl(tlv->ouisubtype);
- oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S);
+ oui = FIELD_GET(ICE_LLDP_TLV_OUI_M, ouisubtype);
switch (oui) {
case ICE_IEEE_8021QAZ_OUI:
ice_parse_ieee_tlv(tlv, dcbcfg);
@@ -616,8 +601,8 @@ static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
tlv = (struct ice_lldp_org_tlv *)lldpmib;
while (1) {
typelen = ntohs(tlv->typelen);
- type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S);
- len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ type = FIELD_GET(ICE_LLDP_TLV_TYPE_M, typelen);
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += sizeof(typelen) + len;
/* END TLV or beyond LLDPDU size */
@@ -806,11 +791,11 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*/
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
dcbcfg->etscfg.prio_table[i * 2] =
- ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >>
- ICE_CEE_PGID_PRIO_0_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_0_M,
+ cee_cfg->oper_prio_tc[i]);
dcbcfg->etscfg.prio_table[i * 2 + 1] =
- ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >>
- ICE_CEE_PGID_PRIO_1_S);
+ FIELD_GET(ICE_CEE_PGID_PRIO_1_M,
+ cee_cfg->oper_prio_tc[i]);
}
ice_for_each_traffic_class(i) {
@@ -982,7 +967,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
- change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
+ change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
@@ -1483,7 +1468,7 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
while (1) {
ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
typelen = ntohs(tlv->typelen);
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
if (len)
offset += len + 2;
/* END TLV or beyond LLDPDU size */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 850db8e0e..6e20ee610 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -934,7 +934,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
skb->priority != TC_PRIO_CONTROL) {
first->vid &= ~VLAN_PRIO_MASK;
/* Mask the lower 3 bits to set the 802.1p priority */
- first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+ first->vid |= FIELD_PREP(VLAN_PRIO_MASK, skb->priority);
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index e1fbc6de4..6d50b90a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -227,7 +227,7 @@ static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, PRTDCB_GENC);
- *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
+ *delay = FIELD_GET(PRTDCB_GENC_PFCLDA_M, val);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
new file mode 100644
index 000000000..c2bfba6b9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+#include "ice.h"
+
+static struct dentry *ice_debugfs_root;
+
+/* create a define that has an extra module that doesn't really exist. this
+ * is so we can add a module 'all' to easily enable/disable all the modules
+ */
+#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
+ */
+static const char * const ice_fwlog_module_string[] = {
+ "general",
+ "ctrl",
+ "link",
+ "link_topo",
+ "dnl",
+ "i2c",
+ "sdp",
+ "mdio",
+ "adminq",
+ "hdma",
+ "lldp",
+ "dcbx",
+ "dcb",
+ "xlr",
+ "nvm",
+ "auth",
+ "vpd",
+ "iosf",
+ "parser",
+ "sw",
+ "scheduler",
+ "txq",
+ "rsvd",
+ "post",
+ "watchdog",
+ "task_dispatch",
+ "mng",
+ "synce",
+ "health",
+ "tsdrv",
+ "pfreg",
+ "mdlver",
+ "all",
+};
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_fwlog_level
+ */
+static const char * const ice_fwlog_level_string[] = {
+ "none",
+ "error",
+ "warning",
+ "normal",
+ "verbose",
+};
+
+/* the order in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in ice_fwlog_level
+ */
+static const char * const ice_fwlog_log_size[] = {
+ "128K",
+ "256K",
+ "512K",
+ "1M",
+ "2M",
+};
+
+/**
+ * ice_fwlog_print_module_cfg - print current FW logging module configuration
+ * @hw: pointer to the HW structure
+ * @module: module to print
+ * @s: the seq file to put data into
+ */
+static void
+ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
+{
+ struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
+ struct ice_fwlog_module_entry *entry;
+
+ if (module != ICE_AQC_FW_LOG_ID_MAX) {
+ entry = &cfg->module_entries[module];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ ice_fwlog_module_string[entry->module_id],
+ ice_fwlog_level_string[entry->log_level]);
+ } else {
+ int i;
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ entry = &cfg->module_entries[i];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ ice_fwlog_module_string[entry->module_id],
+ ice_fwlog_level_string[entry->log_level]);
+ }
+ }
+}
+
+static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
+{
+ int i, module;
+
+ module = -1;
+ /* find the module based on the dentry */
+ for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
+ if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
+ module = i;
+ break;
+ }
+ }
+
+ return module;
+}
+
+/**
+ * ice_debugfs_module_show - read from 'module' file
+ * @s: the opened file
+ * @v: pointer to the offset
+ */
+static int ice_debugfs_module_show(struct seq_file *s, void *v)
+{
+ const struct file *filp = s->file;
+ struct dentry *dentry;
+ struct ice_pf *pf;
+ int module;
+
+ dentry = file_dentry(filp);
+ pf = s->private;
+
+ module = ice_find_module_by_dentry(pf, dentry);
+ if (module < 0) {
+ dev_info(ice_pf_to_dev(pf), "unknown module\n");
+ return -EINVAL;
+ }
+
+ ice_fwlog_print_module_cfg(&pf->hw, module, s);
+
+ return 0;
+}
+
+static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, ice_debugfs_module_show, inode->i_private);
+}
+
+/**
+ * ice_debugfs_module_write - write into 'module' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_module_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = file_inode(filp)->i_private;
+ struct dentry *dentry = file_dentry(filp);
+ struct device *dev = ice_pf_to_dev(pf);
+ char user_val[16], *cmd_buf;
+ int module, log_level, cnt;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 8)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ module = ice_find_module_by_dentry(pf, dentry);
+ if (module < 0) {
+ dev_info(dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ cnt = sscanf(cmd_buf, "%s", user_val);
+ if (cnt != 1)
+ return -EINVAL;
+
+ log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
+ if (log_level < 0) {
+ dev_info(dev, "unknown log level '%s'\n", user_val);
+ return -EINVAL;
+ }
+
+ if (module != ICE_AQC_FW_LOG_ID_MAX) {
+ ice_pf_fwlog_update_module(pf, log_level, module);
+ } else {
+ /* the module 'all' is a shortcut so that we can set
+ * all of the modules to the same level quickly
+ */
+ int i;
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
+ ice_pf_fwlog_update_module(pf, log_level, i);
+ }
+
+ return count;
+}
+
+static const struct file_operations ice_debugfs_module_fops = {
+ .owner = THIS_MODULE,
+ .open = ice_debugfs_module_open,
+ .read = seq_read,
+ .release = single_release,
+ .write = ice_debugfs_module_write,
+};
+
+/**
+ * ice_debugfs_nr_messages_read - read from 'nr_messages' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%d\n",
+ hw->fwlog_cfg.log_resolution);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_nr_messages_write - write into 'nr_messages' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ s16 nr_messages;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 4)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtos16(user_val, 0, &nr_messages);
+ if (ret)
+ return ret;
+
+ if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
+ nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
+ dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
+ nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
+ ICE_AQC_FW_LOG_MAX_RESOLUTION);
+ return -EINVAL;
+ }
+
+ hw->fwlog_cfg.log_resolution = nr_messages;
+
+ return count;
+}
+
+static const struct file_operations ice_debugfs_nr_messages_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_nr_messages_read,
+ .write = ice_debugfs_nr_messages_write,
+};
+
+/**
+ * ice_debugfs_enable_read - read from 'enable' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_enable_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%u\n",
+ (u16)(hw->fwlog_cfg.options &
+ ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_enable_write - write into 'enable' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_enable_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ bool enable;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 2)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtobool(user_val, &enable);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ else
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
+
+ ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ ret = ice_fwlog_register(hw);
+ else
+ ret = ice_fwlog_unregister(hw);
+
+ if (ret)
+ goto enable_write_error;
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+enable_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_enable_read,
+ .write = ice_debugfs_enable_write,
+};
+
+/**
+ * ice_debugfs_log_size_read - read from 'log_size' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_log_size_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ char buff[32] = {};
+ int index;
+
+ index = hw->fwlog_ring.index;
+ snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * ice_debugfs_log_size_write - write into 'log_size' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ char user_val[8], *cmd_buf;
+ ssize_t ret;
+ int index;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 5)
+ return -EINVAL;
+
+ cmd_buf = memdup_user(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = sysfs_match_string(ice_fwlog_log_size, user_val);
+ if (index < 0) {
+ dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
+ user_val);
+ ret = -EINVAL;
+ goto log_size_write_error;
+ } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
+ dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
+ ret = -EINVAL;
+ goto log_size_write_error;
+ }
+
+ /* free all the buffers and the tracking info and resize */
+ ice_fwlog_realloc_rings(hw, index);
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+log_size_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_log_size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_log_size_read,
+ .write = ice_debugfs_log_size_write,
+};
+
+/**
+ * ice_debugfs_data_read - read from 'data' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct ice_hw *hw = &pf->hw;
+ int data_copied = 0;
+ bool done = false;
+
+ if (ice_fwlog_ring_empty(&hw->fwlog_ring))
+ return 0;
+
+ while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
+ struct ice_fwlog_data *log;
+ u16 cur_buf_len;
+
+ log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
+ cur_buf_len = log->data_size;
+ if (cur_buf_len >= count) {
+ done = true;
+ continue;
+ }
+
+ if (copy_to_user(buffer, log->data, cur_buf_len)) {
+ /* if there is an error then bail and return whatever
+ * the driver has copied so far
+ */
+ done = true;
+ continue;
+ }
+
+ data_copied += cur_buf_len;
+ buffer += cur_buf_len;
+ count -= cur_buf_len;
+ *ppos += cur_buf_len;
+ ice_fwlog_ring_increment(&hw->fwlog_ring.head,
+ hw->fwlog_ring.size);
+ }
+
+ return data_copied;
+}
+
+/**
+ * ice_debugfs_data_write - write into 'data' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct ice_pf *pf = filp->private_data;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ ssize_t ret;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ /* any value is allowed to clear the buffer so no need to even look at
+ * what the value is
+ */
+ if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
+ hw->fwlog_ring.head = 0;
+ hw->fwlog_ring.tail = 0;
+ } else {
+ dev_info(dev, "Can't clear FW log data while FW log running\n");
+ ret = -EINVAL;
+ goto nr_buffs_write_error;
+ }
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+nr_buffs_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations ice_debugfs_data_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ice_debugfs_data_read,
+ .write = ice_debugfs_data_write,
+};
+
+/**
+ * ice_debugfs_fwlog_init - setup the debugfs directory
+ * @pf: the ice that is starting up
+ */
+void ice_debugfs_fwlog_init(struct ice_pf *pf)
+{
+ const char *name = pci_name(pf->pdev);
+ struct dentry *fw_modules_dir;
+ struct dentry **fw_modules;
+ int i;
+
+ /* only support fw log commands on PF 0 */
+ if (pf->hw.bus.func)
+ return;
+
+ /* allocate space for this first because if it fails then we don't
+ * need to unwind
+ */
+ fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
+ GFP_KERNEL);
+ if (!fw_modules)
+ return;
+
+ pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
+ if (IS_ERR(pf->ice_debugfs_pf))
+ goto err_create_module_files;
+
+ pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
+ pf->ice_debugfs_pf);
+ if (IS_ERR(pf->ice_debugfs_pf))
+ goto err_create_module_files;
+
+ fw_modules_dir = debugfs_create_dir("modules",
+ pf->ice_debugfs_pf_fwlog);
+ if (IS_ERR(fw_modules_dir))
+ goto err_create_module_files;
+
+ for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
+ fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
+ 0600, fw_modules_dir, pf,
+ &ice_debugfs_module_fops);
+ if (IS_ERR(fw_modules[i]))
+ goto err_create_module_files;
+ }
+
+ debugfs_create_file("nr_messages", 0600,
+ pf->ice_debugfs_pf_fwlog, pf,
+ &ice_debugfs_nr_messages_fops);
+
+ pf->ice_debugfs_pf_fwlog_modules = fw_modules;
+
+ debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_enable_fops);
+
+ debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_log_size_fops);
+
+ debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
+ pf, &ice_debugfs_data_fops);
+
+ return;
+
+err_create_module_files:
+ debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
+ kfree(fw_modules);
+}
+
+/**
+ * ice_debugfs_init - create root directory for debugfs entries
+ */
+void ice_debugfs_init(void)
+{
+ ice_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR(ice_debugfs_root))
+ pr_info("init of debugfs failed\n");
+}
+
+/**
+ * ice_debugfs_exit - remove debugfs entries
+ */
+void ice_debugfs_exit(void)
+{
+ debugfs_remove_recursive(ice_debugfs_root);
+ ice_debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 80dc5445b..65be56f2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -193,6 +193,24 @@ ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
}
+static void ice_info_cgu_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ u32 id, cfg_ver, fw_ver;
+
+ if (!ice_is_feature_supported(pf, ICE_F_CGU))
+ return;
+ if (ice_aq_get_cgu_info(&pf->hw, &id, &cfg_ver, &fw_ver))
+ return;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", id, cfg_ver, fw_ver);
+}
+
+static void ice_info_cgu_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ if (!ice_is_feature_supported(pf, ICE_F_CGU))
+ return;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u", pf->hw.cgu_part_number);
+}
+
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
@@ -235,6 +253,8 @@ static const struct ice_devlink_version {
running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
+ fixed("cgu.id", ice_info_cgu_id),
+ running("fw.cgu", ice_info_cgu_fw_build),
};
/**
@@ -810,6 +830,10 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_vf *vf;
int i;
+ if (node->rate_node)
+ /* already added, skip to the next */
+ goto traverse_children;
+
if (node->parent == tc_node) {
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
@@ -831,6 +855,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
if (rate_node && !IS_ERR(rate_node))
node->rate_node = rate_node;
+traverse_children:
for (i = 0; i < node->num_children; i++)
ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
}
@@ -861,6 +886,30 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
return 0;
}
+static void ice_clear_rate_nodes(struct ice_sched_node *node)
+{
+ node->rate_node = NULL;
+
+ for (int i = 0; i < node->num_children; i++)
+ ice_clear_rate_nodes(node->children[i]);
+}
+
+/**
+ * ice_devlink_rate_clear_tx_topology - clear node->rate_node
+ * @vsi: main vsi struct
+ *
+ * Clear rate_node to cleanup creation of Tx topology.
+ *
+ */
+void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi)
+{
+ struct ice_port_info *pi = vsi->port_info;
+
+ mutex_lock(&pi->sched_lock);
+ ice_clear_rate_nodes(pi->root->children[0]);
+ mutex_unlock(&pi->sched_lock);
+}
+
/**
* ice_set_object_tx_share - sets node scheduling parameter
* @pi: devlink struct instance
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index 6ec96779f..d291c0e2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -20,5 +20,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf);
int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi);
void ice_tear_down_devlink_rate_tree(struct ice_pf *pf);
+void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi);
#endif /* _ICE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 68b894bb6..bd9b1fed7 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -552,31 +552,6 @@ ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
- * ice_dpll_mode_supported - check if dpll's working mode is supported
- * @dpll: registered dpll pointer
- * @dpll_priv: private data pointer passed on dpll registration
- * @mode: mode to be checked for support
- * @extack: error reporting
- *
- * Dpll subsystem callback. Provides information if working mode is supported
- * by dpll.
- *
- * Return:
- * * true - mode is supported
- * * false - mode is not supported
- */
-static bool ice_dpll_mode_supported(const struct dpll_device *dpll,
- void *dpll_priv,
- enum dpll_mode mode,
- struct netlink_ext_ack *extack)
-{
- if (mode == DPLL_MODE_AUTOMATIC)
- return true;
-
- return false;
-}
-
-/**
* ice_dpll_mode_get - get dpll's working mode
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
@@ -1259,7 +1234,6 @@ static const struct dpll_pin_ops ice_dpll_output_ops = {
static const struct dpll_device_ops ice_dpll_ops = {
.lock_status_get = ice_dpll_lock_status_get,
- .mode_supported = ice_dpll_mode_supported,
.mode_get = ice_dpll_mode_get,
};
@@ -1623,7 +1597,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
}
if (WARN_ON_ONCE(!vsi || !vsi->netdev))
return;
- netdev_dpll_pin_clear(vsi->netdev);
+ dpll_netdev_pin_clear(vsi->netdev);
dpll_pin_put(rclk->pin);
}
@@ -1667,7 +1641,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (WARN_ON((!vsi || !vsi->netdev)))
return -EINVAL;
- netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+ dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index a655d499a..9069725c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -11,17 +11,34 @@
#include "ice_tc_lib.h"
/**
- * ice_eswitch_add_vf_sp_rule - add adv rule with VF's VSI index
+ * ice_eswitch_del_sp_rules - delete adv rules added on PRs
+ * @pf: pointer to the PF struct
+ *
+ * Delete all advanced rules that were used to forward packets with the
+ * device's VSI index to the corresponding eswitch ctrl VSI queue.
+ */
+static void ice_eswitch_del_sp_rules(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&pf->eswitch.reprs, id, repr) {
+ if (repr->sp_rule.rid)
+ ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule);
+ }
+}
+
+/**
+ * ice_eswitch_add_sp_rule - add adv rule with device's VSI index
* @pf: pointer to PF struct
- * @vf: pointer to VF struct
+ * @repr: pointer to the repr struct
*
* This function adds advanced rule that forwards packets with
- * VF's VSI index to the corresponding switchdev ctrl VSI queue.
+ * device's VSI index to the corresponding eswitch ctrl VSI queue.
*/
-static int
-ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
+static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct ice_adv_rule_info rule_info = { 0 };
struct ice_adv_lkup_elem *list;
struct ice_hw *hw = &pf->hw;
@@ -38,39 +55,42 @@ ice_eswitch_add_vf_sp_rule(struct ice_pf *pf, struct ice_vf *vf)
rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
- ctrl_vsi->rxq_map[vf->vf_id];
+ ctrl_vsi->rxq_map[repr->q_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
- rule_info.src_vsi = vf->lan_vsi_idx;
+ rule_info.src_vsi = repr->src_vsi->idx;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
- &vf->repr->sp_rule);
+ &repr->sp_rule);
if (err)
- dev_err(ice_pf_to_dev(pf), "Unable to add VF slow-path rule in switchdev mode for VF %d",
- vf->vf_id);
+ dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d",
+ repr->id);
kfree(list);
return err;
}
-/**
- * ice_eswitch_del_vf_sp_rule - delete adv rule with VF's VSI index
- * @vf: pointer to the VF struct
- *
- * Delete the advanced rule that was used to forward packets with the VF's VSI
- * index to the corresponding switchdev ctrl VSI queue.
- */
-static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
+static int
+ice_eswitch_add_sp_rules(struct ice_pf *pf)
{
- if (!vf->repr)
- return;
+ struct ice_repr *repr;
+ unsigned long id;
+ int err;
+
+ xa_for_each(&pf->eswitch.reprs, id, repr) {
+ err = ice_eswitch_add_sp_rule(pf, repr);
+ if (err) {
+ ice_eswitch_del_sp_rules(pf);
+ return err;
+ }
+ }
- ice_rem_adv_rule_by_id(&vf->pf->hw, &vf->repr->sp_rule);
+ return 0;
}
/**
- * ice_eswitch_setup_env - configure switchdev HW filters
+ * ice_eswitch_setup_env - configure eswitch HW filters
* @pf: pointer to PF struct
*
* This function adds HW filters configuration specific for switchdev
@@ -78,18 +98,18 @@ static void ice_eswitch_del_vf_sp_rule(struct ice_vf *vf)
*/
static int ice_eswitch_setup_env(struct ice_pf *pf)
{
- struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
- struct net_device *uplink_netdev = uplink_vsi->netdev;
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct net_device *netdev = uplink_vsi->netdev;
struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
- netif_addr_lock_bh(uplink_netdev);
- __dev_uc_unsync(uplink_netdev, NULL);
- __dev_mc_unsync(uplink_netdev, NULL);
- netif_addr_unlock_bh(uplink_netdev);
+ netif_addr_lock_bh(netdev);
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+ netif_addr_unlock_bh(netdev);
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
@@ -132,19 +152,20 @@ err_def_rx:
}
/**
- * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
- * @pf: pointer to PF struct
+ * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
+ * @eswitch: pointer to eswitch struct
*
- * In switchdev number of allocated Tx/Rx rings is equal.
+ * In eswitch number of allocated Tx/Rx rings is equal.
*
* This function fills q_vectors structures associated with representor and
* move each ring pairs to port representor netdevs. Each port representor
* will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
* number of VFs.
*/
-static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
+static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
{
- struct ice_vsi *vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *vsi = eswitch->control_vsi;
+ unsigned long repr_id = 0;
int q_id;
ice_for_each_txq(vsi, q_id) {
@@ -152,13 +173,14 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
struct ice_repr *repr;
- struct ice_vf *vf;
- vf = ice_get_vf_by_id(pf, q_id);
- if (WARN_ON(!vf))
- continue;
+ repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX,
+ XA_PRESENT);
+ if (!repr)
+ break;
- repr = vf->repr;
+ repr_id += 1;
+ repr->q_id = q_id;
q_vector = repr->q_vector;
tx_ring = vsi->tx_rings[q_id];
rx_ring = vsi->rx_rings[q_id];
@@ -181,136 +203,96 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
rx_ring->q_vector = q_vector;
rx_ring->next = NULL;
rx_ring->netdev = repr->netdev;
-
- ice_put_vf(vf);
}
}
/**
- * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * ice_eswitch_release_repr - clear PR VSI configuration
* @pf: poiner to PF struct
- * @ctrl_vsi: pointer to switchdev control VSI
+ * @repr: pointer to PR
*/
static void
-ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vf *vf;
- unsigned int bkt;
+ struct ice_vsi *vsi = repr->src_vsi;
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_vsi *vsi = vf->repr->src_vsi;
-
- /* Skip VFs that aren't configured */
- if (!vf->repr->dst)
- continue;
+ /* Skip representors that aren't configured */
+ if (!repr->dst)
+ return;
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- ice_eswitch_del_vf_sp_rule(vf);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(repr->dst);
+ repr->dst = NULL;
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
+ ICE_FWD_TO_VSI);
- netif_napi_del(&vf->repr->q_vector->napi);
- }
+ netif_napi_del(&repr->q_vector->napi);
}
/**
- * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
+ * ice_eswitch_setup_repr - configure PR to run in switchdev mode
* @pf: pointer to PF struct
+ * @repr: pointer to PR struct
*/
-static int ice_eswitch_setup_reprs(struct ice_pf *pf)
+static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- int max_vsi_num = 0;
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_vsi *vsi = vf->repr->src_vsi;
-
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
- vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
- GFP_KERNEL);
- if (!vf->repr->dst) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- goto err;
- }
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct ice_vsi *vsi = repr->src_vsi;
+ struct metadata_dst *dst;
- if (ice_eswitch_add_vf_sp_rule(pf, vf)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- goto err;
- }
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!repr->dst)
+ goto err_add_mac_fltr;
- if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- ice_eswitch_del_vf_sp_rule(vf);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- goto err;
- }
+ if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
+ goto err_dst_free;
- if (ice_vsi_add_vlan_zero(vsi)) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- ice_eswitch_del_vf_sp_rule(vf);
- metadata_dst_free(vf->repr->dst);
- vf->repr->dst = NULL;
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- goto err;
- }
-
- if (max_vsi_num < vsi->vsi_num)
- max_vsi_num = vsi->vsi_num;
+ if (ice_vsi_add_vlan_zero(vsi))
+ goto err_update_security;
- netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
- ice_napi_poll);
+ netif_napi_add(repr->netdev, &repr->q_vector->napi,
+ ice_napi_poll);
- netif_keep_dst(vf->repr->netdev);
- }
+ netif_keep_dst(repr->netdev);
- ice_for_each_vf(pf, bkt, vf) {
- struct ice_repr *repr = vf->repr;
- struct ice_vsi *vsi = repr->src_vsi;
- struct metadata_dst *dst;
-
- dst = repr->dst;
- dst->u.port_info.port_id = vsi->vsi_num;
- dst->u.port_info.lower_dev = repr->netdev;
- ice_repr_set_traffic_vsi(repr, ctrl_vsi);
- }
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = repr->netdev;
+ ice_repr_set_traffic_vsi(repr, ctrl_vsi);
return 0;
-err:
- ice_eswitch_release_reprs(pf, ctrl_vsi);
+err_update_security:
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+err_dst_free:
+ metadata_dst_free(repr->dst);
+ repr->dst = NULL;
+err_add_mac_fltr:
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
return -ENODEV;
}
/**
- * ice_eswitch_update_repr - reconfigure VF port representor
- * @vsi: VF VSI for which port representor is configured
+ * ice_eswitch_update_repr - reconfigure port representor
+ * @repr_id: representor ID
+ * @vsi: VSI for which port representor is configured
*/
-void ice_eswitch_update_repr(struct ice_vsi *vsi)
+void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
- struct ice_vf *vf;
int ret;
if (!ice_is_switchdev_running(pf))
return;
- vf = vsi->vf;
- repr = vf->repr;
+ repr = xa_load(&pf->eswitch.reprs, repr_id);
+ if (!repr)
+ return;
+
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
@@ -319,9 +301,10 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
- dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
- vsi->vf->vf_id);
+ ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
+ ICE_FWD_TO_VSI);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
+ repr->id);
}
}
@@ -353,13 +336,13 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_dst_drop(skb);
dst_hold((struct dst_entry *)repr->dst);
skb_dst_set(skb, (struct dst_entry *)repr->dst);
- skb->queue_mapping = repr->vf->vf_id;
+ skb->queue_mapping = repr->q_id;
return ice_start_xmit(skb, netdev);
}
/**
- * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
+ * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
* @skb: pointer to send buffer
* @off: pointer to offload struct
*/
@@ -375,14 +358,14 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
- dst_vsi = ((u64)dst->u.port_info.port_id <<
- ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
+ dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M,
+ dst->u.port_info.port_id);
off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
}
}
/**
- * ice_eswitch_release_env - clear switchdev HW filters
+ * ice_eswitch_release_env - clear eswitch HW filters
* @pf: pointer to PF struct
*
* This function removes HW filters configuration specific for switchdev
@@ -390,8 +373,8 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
*/
static void ice_eswitch_release_env(struct ice_pf *pf)
{
- struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct ice_vsi_vlan_ops *vlan_ops;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
@@ -407,7 +390,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
}
/**
- * ice_eswitch_vsi_setup - configure switchdev control VSI
+ * ice_eswitch_vsi_setup - configure eswitch control VSI
* @pf: pointer to PF structure
* @pi: pointer to port_info structure
*/
@@ -424,48 +407,29 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
- * ice_eswitch_napi_del - remove NAPI handle for all port representors
- * @pf: pointer to PF structure
- */
-static void ice_eswitch_napi_del(struct ice_pf *pf)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf)
- netif_napi_del(&vf->repr->q_vector->napi);
-}
-
-/**
* ice_eswitch_napi_enable - enable NAPI for all port representors
- * @pf: pointer to PF structure
+ * @reprs: xarray of reprs
*/
-static void ice_eswitch_napi_enable(struct ice_pf *pf)
+static void ice_eswitch_napi_enable(struct xarray *reprs)
{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
+ struct ice_repr *repr;
+ unsigned long id;
- ice_for_each_vf(pf, bkt, vf)
- napi_enable(&vf->repr->q_vector->napi);
+ xa_for_each(reprs, id, repr)
+ napi_enable(&repr->q_vector->napi);
}
/**
* ice_eswitch_napi_disable - disable NAPI for all port representors
- * @pf: pointer to PF structure
+ * @reprs: xarray of reprs
*/
-static void ice_eswitch_napi_disable(struct ice_pf *pf)
+static void ice_eswitch_napi_disable(struct xarray *reprs)
{
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
+ struct ice_repr *repr;
+ unsigned long id;
- ice_for_each_vf(pf, bkt, vf)
- napi_disable(&vf->repr->q_vector->napi);
+ xa_for_each(reprs, id, repr)
+ napi_disable(&repr->q_vector->napi);
}
/**
@@ -486,39 +450,26 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
return -EINVAL;
}
- pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
- if (!pf->switchdev.control_vsi)
+ pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
+ if (!pf->eswitch.control_vsi)
return -ENODEV;
- ctrl_vsi = pf->switchdev.control_vsi;
- pf->switchdev.uplink_vsi = uplink_vsi;
+ ctrl_vsi = pf->eswitch.control_vsi;
+ /* cp VSI is createad with 1 queue as default */
+ pf->eswitch.qs.value = 1;
+ pf->eswitch.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
goto err_vsi;
- if (ice_repr_add_for_all_vfs(pf))
- goto err_repr_add;
-
- if (ice_eswitch_setup_reprs(pf))
- goto err_setup_reprs;
-
- ice_eswitch_remap_rings_to_vectors(pf);
-
- if (ice_vsi_open(ctrl_vsi))
- goto err_setup_reprs;
-
if (ice_eswitch_br_offloads_init(pf))
goto err_br_offloads;
- ice_eswitch_napi_enable(pf);
+ pf->eswitch.is_running = true;
return 0;
err_br_offloads:
- ice_vsi_close(ctrl_vsi);
-err_setup_reprs:
- ice_repr_rem_from_all_vfs(pf);
-err_repr_add:
ice_eswitch_release_env(pf);
err_vsi:
ice_vsi_release(ctrl_vsi);
@@ -526,19 +477,19 @@ err_vsi:
}
/**
- * ice_eswitch_disable_switchdev - disable switchdev resources
+ * ice_eswitch_disable_switchdev - disable eswitch resources
* @pf: pointer to PF structure
*/
static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
- ice_eswitch_napi_disable(pf);
ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
- ice_repr_rem_from_all_vfs(pf);
+
+ pf->eswitch.is_running = false;
+ pf->eswitch.qs.is_reaching = false;
}
/**
@@ -566,6 +517,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
case DEVLINK_ESWITCH_MODE_LEGACY:
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
pf->hw.pf_id);
+ xa_destroy(&pf->eswitch.reprs);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
@@ -578,6 +530,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
pf->hw.pf_id);
+ xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
break;
}
@@ -616,74 +569,168 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
}
/**
- * ice_eswitch_release - cleanup eswitch
+ * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
* @pf: pointer to PF structure
*/
-void ice_eswitch_release(struct ice_pf *pf)
+static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{
- if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ struct ice_repr *repr;
+ unsigned long id;
+
+ if (test_bit(ICE_DOWN, pf->state))
return;
- ice_eswitch_disable_switchdev(pf);
- pf->switchdev.is_running = false;
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_repr_start_tx_queues(repr);
}
/**
- * ice_eswitch_configure - configure eswitch
+ * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
* @pf: pointer to PF structure
*/
-int ice_eswitch_configure(struct ice_pf *pf)
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{
- int status;
-
- if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
- return 0;
+ struct ice_repr *repr;
+ unsigned long id;
- status = ice_eswitch_enable_switchdev(pf);
- if (status)
- return status;
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
- pf->switchdev.is_running = true;
- return 0;
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_repr_stop_tx_queues(repr);
}
-/**
- * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
- * @pf: pointer to PF structure
- */
-static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
+static void ice_eswitch_stop_reprs(struct ice_pf *pf)
{
- struct ice_vf *vf;
- unsigned int bkt;
+ ice_eswitch_del_sp_rules(pf);
+ ice_eswitch_stop_all_tx_queues(pf);
+ ice_eswitch_napi_disable(&pf->eswitch.reprs);
+}
- lockdep_assert_held(&pf->vfs.table_lock);
+static void ice_eswitch_start_reprs(struct ice_pf *pf)
+{
+ ice_eswitch_napi_enable(&pf->eswitch.reprs);
+ ice_eswitch_start_all_tx_queues(pf);
+ ice_eswitch_add_sp_rules(pf);
+}
- if (test_bit(ICE_DOWN, pf->state))
- return;
+static void
+ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
+{
+ struct ice_vsi *cp = eswitch->control_vsi;
+ int queues = 0;
+
+ if (eswitch->qs.is_reaching) {
+ if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
+ queues = eswitch->qs.to_reach;
+ eswitch->qs.is_reaching = false;
+ } else {
+ queues = 0;
+ }
+ } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
+ change < 0) {
+ queues = cp->alloc_txq + change;
+ }
- ice_for_each_vf(pf, bkt, vf) {
- if (vf->repr)
- ice_repr_start_tx_queues(vf->repr);
+ if (queues) {
+ cp->req_txq = queues;
+ cp->req_rxq = queues;
+ ice_vsi_close(cp);
+ ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
+ ice_vsi_open(cp);
+ } else if (!change) {
+ /* change == 0 means that VSI wasn't open, open it here */
+ ice_vsi_open(cp);
}
+
+ eswitch->qs.value += change;
+ ice_eswitch_remap_rings_to_vectors(eswitch);
}
-/**
- * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
- * @pf: pointer to PF structure
- */
-void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
+int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_vf *vf;
- unsigned int bkt;
+ struct ice_repr *repr;
+ int change = 1;
+ int err;
- lockdep_assert_held(&pf->vfs.table_lock);
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return 0;
- if (test_bit(ICE_DOWN, pf->state))
+ if (xa_empty(&pf->eswitch.reprs)) {
+ err = ice_eswitch_enable_switchdev(pf);
+ if (err)
+ return err;
+ /* Control plane VSI is created with 1 queue as default */
+ pf->eswitch.qs.to_reach -= 1;
+ change = 0;
+ }
+
+ ice_eswitch_stop_reprs(pf);
+
+ repr = ice_repr_add_vf(vf);
+ if (IS_ERR(repr)) {
+ err = PTR_ERR(repr);
+ goto err_create_repr;
+ }
+
+ err = ice_eswitch_setup_repr(pf, repr);
+ if (err)
+ goto err_setup_repr;
+
+ err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr,
+ XA_LIMIT(1, INT_MAX), GFP_KERNEL);
+ if (err)
+ goto err_xa_alloc;
+
+ vf->repr_id = repr->id;
+
+ ice_eswitch_cp_change_queues(&pf->eswitch, change);
+ ice_eswitch_start_reprs(pf);
+
+ return 0;
+
+err_xa_alloc:
+ ice_eswitch_release_repr(pf, repr);
+err_setup_repr:
+ ice_repr_rem_vf(repr);
+err_create_repr:
+ if (xa_empty(&pf->eswitch.reprs))
+ ice_eswitch_disable_switchdev(pf);
+ ice_eswitch_start_reprs(pf);
+
+ return err;
+}
+
+void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ if (!repr)
return;
- ice_for_each_vf(pf, bkt, vf) {
- if (vf->repr)
- ice_repr_stop_tx_queues(vf->repr);
+ ice_eswitch_stop_reprs(pf);
+ xa_erase(&pf->eswitch.reprs, repr->id);
+
+ if (xa_empty(&pf->eswitch.reprs))
+ ice_eswitch_disable_switchdev(pf);
+ else
+ ice_eswitch_cp_change_queues(&pf->eswitch, -1);
+
+ ice_eswitch_release_repr(pf, repr);
+ ice_repr_rem_vf(repr);
+
+ if (xa_empty(&pf->eswitch.reprs)) {
+ /* since all port representors are destroyed, there is
+ * no point in keeping the nodes
+ */
+ ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
+ devl_lock(devlink);
+ devl_rate_nodes_destroy(devlink);
+ devl_unlock(devlink);
+ } else {
+ ice_eswitch_start_reprs(pf);
}
}
@@ -693,30 +740,35 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
*/
int ice_eswitch_rebuild(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- int status;
-
- ice_eswitch_napi_disable(pf);
- ice_eswitch_napi_del(pf);
-
- status = ice_eswitch_setup_env(pf);
- if (status)
- return status;
+ struct ice_repr *repr;
+ unsigned long id;
+ int err;
- status = ice_eswitch_setup_reprs(pf);
- if (status)
- return status;
+ if (!ice_is_switchdev_running(pf))
+ return 0;
- ice_eswitch_remap_rings_to_vectors(pf);
+ err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT);
+ if (err)
+ return err;
- ice_replay_tc_fltrs(pf);
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ ice_eswitch_detach(pf, repr->vf);
- status = ice_vsi_open(ctrl_vsi);
- if (status)
- return status;
+ return 0;
+}
- ice_eswitch_napi_enable(pf);
- ice_eswitch_start_all_tx_queues(pf);
+/**
+ * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
+ * @pf: pointer to PF structure
+ * @change: how many more (or less) queues is needed
+ *
+ * Remember to call ice_eswitch_attach/detach() the "change" times.
+ */
+void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
+{
+ if (pf->eswitch.qs.value + change < 0)
+ return;
- return 0;
+ pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
+ pf->eswitch.qs.is_reaching = true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index b18bf83a2..1a288a03a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -7,8 +7,9 @@
#include <net/devlink.h>
#ifdef CONFIG_ICE_SWITCHDEV
-void ice_eswitch_release(struct ice_pf *pf);
-int ice_eswitch_configure(struct ice_pf *pf);
+void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
+int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
int ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
@@ -17,7 +18,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
-void ice_eswitch_update_repr(struct ice_vsi *vsi);
+void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
@@ -25,8 +26,15 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
#else /* CONFIG_ICE_SWITCHDEV */
-static inline void ice_eswitch_release(struct ice_pf *pf) { }
+static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
+
+static inline int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
@@ -34,7 +42,8 @@ static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
-static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
+static inline void
+ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
@@ -68,5 +77,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
return NETDEV_TX_BUSY;
}
+
+static inline void
+ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index 6ae0269bd..ac5beecd0 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
}
- if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
+ if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
- else if (vsi->vf && vsi->vf->repr)
- vsi->vf->repr->br_port = NULL;
+ } else {
+ struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
+
+ if (repr)
+ repr->br_port = NULL;
+ }
xa_erase(&bridge->ports, br_port->vsi_idx);
ice_eswitch_br_port_vlans_flush(br_port);
@@ -947,7 +951,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
static int
ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
{
- struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *vsi = pf->eswitch.uplink_vsi;
struct ice_esw_br_port *br_port;
int err;
@@ -1185,7 +1189,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb,
static void
ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
{
- struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
+ struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads;
ASSERT_RTNL();
@@ -1194,7 +1198,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
- pf->switchdev.br_offloads = NULL;
+ pf->eswitch.br_offloads = NULL;
kfree(br_offloads);
}
@@ -1205,14 +1209,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
ASSERT_RTNL();
- if (pf->switchdev.br_offloads)
+ if (pf->eswitch.br_offloads)
return ERR_PTR(-EEXIST);
br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
- pf->switchdev.br_offloads = br_offloads;
+ pf->eswitch.br_offloads = br_offloads;
br_offloads->pf = pf;
return br_offloads;
@@ -1223,7 +1227,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads;
- br_offloads = pf->switchdev.br_offloads;
+ br_offloads = pf->eswitch.br_offloads;
if (!br_offloads)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index bde9bc74f..a19b06f18 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -129,7 +129,6 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
- ICE_PF_STAT("rx_length_errors.nic", stats.rx_len_errors),
ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
@@ -1142,8 +1141,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ICE_VSI_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_vsi_stats[i].stat_string);
+ ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string);
if (ice_is_port_repr_netdev(netdev))
return;
@@ -1162,8 +1160,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
return;
for (i = 0; i < ICE_PF_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_pf_stats[i].stat_string);
+ ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
@@ -1179,8 +1176,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
- ethtool_sprintf(&p, "%s",
- ice_gstrings_priv_flags[i].name);
+ ethtool_puts(&p, ice_gstrings_priv_flags[i].name);
break;
default:
break;
@@ -2505,27 +2501,15 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
-#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
-#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
-#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
-#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
-#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
-#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
-#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
-#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
-
/**
* ice_parse_hash_flds - parses hash fields from RSS hash input
* @nfc: ethtool rxnfc command
+ * @symm: true if Symmetric Topelitz is set
*
* This function parses the rxnfc command and returns intended
* hash fields for RSS configuration
*/
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
+static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2594,9 +2578,11 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
+ struct ice_rss_hash_cfg cfg;
struct device *dev;
u64 hashed_flds;
int status;
+ bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2606,7 +2592,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- hashed_flds = ice_parse_hash_flds(nfc);
+ symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
+ hashed_flds = ice_parse_hash_flds(nfc, symm);
if (hashed_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
vsi->vsi_num);
@@ -2620,7 +2607,12 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
+ cfg.hash_flds = hashed_flds;
+ cfg.addl_hdrs = hdrs;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+ cfg.symm = symm;
+
+ status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
if (status) {
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
vsi->vsi_num, status);
@@ -2641,6 +2633,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
+ bool symm;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2659,7 +2652,7 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return;
}
- hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
+ hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
@@ -3198,11 +3191,18 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
return np->vsi->rss_table_size;
}
+/**
+ * ice_get_rxfh - get the Rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ *
+ * Reads the indirection table directly from the hardware.
+ */
static int
-ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
@@ -3233,17 +3233,18 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
vsi = vsi->tc_map_vsi[rss_context];
}
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
+ rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
- if (!indir)
+ if (!rxfh->indir)
return 0;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
- err = ice_get_rss_key(vsi, key);
+ err = ice_get_rss_key(vsi, rxfh->key);
if (err)
goto out;
@@ -3253,12 +3254,12 @@ ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
if (ice_is_adq_active(pf)) {
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = offset + lut[i] % qcount;
+ rxfh->indir[i] = offset + lut[i] % qcount;
goto out;
}
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = lut[i];
+ rxfh->indir[i] = lut[i];
out:
kfree(lut);
@@ -3266,42 +3267,31 @@ out:
}
/**
- * ice_get_rxfh - get the Rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
- *
- * Reads the indirection table directly from the hardware.
- */
-static int
-ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
-{
- return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
-}
-
-/**
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
static int
-ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
- const u8 hfunc)
+ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
int err;
dev = ice_pf_to_dev(pf);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (rxfh->rss_context)
return -EOPNOTSUPP;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3315,7 +3305,15 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EOPNOTSUPP;
}
- if (key) {
+ /* Update the VSI's hash function */
+ if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ err = ice_set_rss_hfunc(vsi, hfunc);
+ if (err)
+ return err;
+
+ if (rxfh->key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
@@ -3323,7 +3321,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
- memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
+ memcpy(vsi->rss_hkey_user, rxfh->key,
+ ICE_VSIQF_HKEY_ARRAY_SIZE);
err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
if (err)
@@ -3338,11 +3337,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- if (indir) {
+ if (rxfh->indir) {
int i;
for (i = 0; i < vsi->rss_table_size; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
} else {
ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
vsi->rss_size);
@@ -4220,9 +4219,11 @@ ice_get_module_eeprom(struct net_device *netdev,
}
static const struct ethtool_ops ice_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
+ .cap_rss_sym_xor_supported = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_drvinfo = ice_get_drvinfo,
@@ -4253,7 +4254,6 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
- .get_rxfh_context = ice_get_rxfh_context,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index d151e5bac..9a1a04f5f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -302,9 +302,7 @@ void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
continue;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
- u64 prof_id;
-
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ u64 prof_id = prof->prof_id[tun];
for (i = 0; i < prof->cnt; i++) {
if (prof->vsi_h[i] != vsi_idx)
@@ -362,10 +360,9 @@ ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
return;
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
- u64 prof_id;
+ u64 prof_id = prof->prof_id[tun];
int j;
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
for (j = 0; j < prof->cnt; j++) {
u16 vsi_num;
@@ -439,14 +436,12 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
struct ice_flow_prof *hw_prof;
struct ice_fd_hw_prof *prof;
- u64 prof_id;
int j;
prof = hw->fdir_prof[flow];
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
+ ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX,
prof->fdir_seg[tun], TNL_SEG_CNT(tun),
- &hw_prof);
+ false, &hw_prof);
for (j = 0; j < prof->cnt; j++) {
enum ice_flow_priority prio;
u64 entry_h = 0;
@@ -454,7 +449,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
prio = ICE_FLOW_PRIO_NORMAL;
err = ice_flow_add_entry(hw, ICE_BLK_FD,
- prof_id,
+ hw_prof->id,
prof->vsi_h[0],
prof->vsi_h[j],
prio, prof->fdir_seg,
@@ -464,6 +459,7 @@ void ice_fdir_replay_flows(struct ice_hw *hw)
flow);
continue;
}
+ prof->prof_id[tun] = hw_prof->id;
prof->entry_h[j][tun] = entry_h;
}
}
@@ -507,8 +503,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
return -EINVAL;
data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
- data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
- ICE_USERDEF_FLEX_OFFS_S;
+ data->flex_offset = FIELD_GET(ICE_USERDEF_FLEX_OFFS_M, value);
if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
return -EINVAL;
@@ -638,7 +633,6 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
u64 entry1_h = 0;
u64 entry2_h = 0;
bool del_last;
- u64 prof_id;
int err;
int idx;
@@ -668,7 +662,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* then return error.
*/
if (hw->fdir_fltr_cnt[flow]) {
- dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return -EINVAL;
}
@@ -686,23 +680,23 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* That is the final parameters are 1 header (segment), no
* actions (NULL) and zero actions 0.
*/
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- TNL_SEG_CNT(tun), &prof);
+ err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
+ TNL_SEG_CNT(tun), false, &prof);
if (err)
return err;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (err)
goto err_prof;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, main_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (err)
goto err_entry;
hw_prof->fdir_seg[tun] = seg;
+ hw_prof->prof_id[tun] = prof->id;
hw_prof->entry_h[0][tun] = entry1_h;
hw_prof->entry_h[1][tun] = entry2_h;
hw_prof->vsi_h[0] = main_vsi->idx;
@@ -719,7 +713,7 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
entry1_h = 0;
vsi_h = main_vsi->tc_map_vsi[idx]->idx;
- err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id,
main_vsi->idx, vsi_h,
ICE_FLOW_PRIO_NORMAL, seg,
&entry1_h);
@@ -756,7 +750,7 @@ err_unroll:
if (!hw_prof->entry_h[idx][tun])
continue;
- ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
hw_prof->entry_h[idx][tun] = 0;
if (del_last)
@@ -766,11 +760,11 @@ err_unroll:
hw_prof->cnt = 0;
err_entry:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
- ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
+ ice_get_hw_vsi_num(hw, main_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
- ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
- dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
+ dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
return err;
}
@@ -1853,6 +1847,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
struct ice_pf *pf;
struct ice_hw *hw;
int fltrs_needed;
+ u32 max_location;
u16 tunnel_port;
int ret;
@@ -1884,8 +1879,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
if (ret)
return ret;
- if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
- dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ max_location = ice_get_fdir_cnt_all(hw);
+ if (fsp->location >= max_location) {
+ dev_err(dev, "Failed to add filter. The number of ntuple filters or provided location exceed max %d.\n",
+ max_location);
return -ENOSPC;
}
@@ -1893,7 +1890,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
- dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
+ dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
return -ENOSPC;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index ae089d32e..5840c3e04 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -604,55 +604,32 @@ ice_set_fd_desc_val(struct ice_fd_fltr_desc_ctx *ctx,
u64 qword;
/* prep QW0 of FD filter programming desc */
- qword = ((u64)ctx->qindex << ICE_FXD_FLTR_QW0_QINDEX_S) &
- ICE_FXD_FLTR_QW0_QINDEX_M;
- qword |= ((u64)ctx->comp_q << ICE_FXD_FLTR_QW0_COMP_Q_S) &
- ICE_FXD_FLTR_QW0_COMP_Q_M;
- qword |= ((u64)ctx->comp_report << ICE_FXD_FLTR_QW0_COMP_REPORT_S) &
- ICE_FXD_FLTR_QW0_COMP_REPORT_M;
- qword |= ((u64)ctx->fd_space << ICE_FXD_FLTR_QW0_FD_SPACE_S) &
- ICE_FXD_FLTR_QW0_FD_SPACE_M;
- qword |= ((u64)ctx->cnt_index << ICE_FXD_FLTR_QW0_STAT_CNT_S) &
- ICE_FXD_FLTR_QW0_STAT_CNT_M;
- qword |= ((u64)ctx->cnt_ena << ICE_FXD_FLTR_QW0_STAT_ENA_S) &
- ICE_FXD_FLTR_QW0_STAT_ENA_M;
- qword |= ((u64)ctx->evict_ena << ICE_FXD_FLTR_QW0_EVICT_ENA_S) &
- ICE_FXD_FLTR_QW0_EVICT_ENA_M;
- qword |= ((u64)ctx->toq << ICE_FXD_FLTR_QW0_TO_Q_S) &
- ICE_FXD_FLTR_QW0_TO_Q_M;
- qword |= ((u64)ctx->toq_prio << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) &
- ICE_FXD_FLTR_QW0_TO_Q_PRI_M;
- qword |= ((u64)ctx->dpu_recipe << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) &
- ICE_FXD_FLTR_QW0_DPU_RECIPE_M;
- qword |= ((u64)ctx->drop << ICE_FXD_FLTR_QW0_DROP_S) &
- ICE_FXD_FLTR_QW0_DROP_M;
- qword |= ((u64)ctx->flex_prio << ICE_FXD_FLTR_QW0_FLEX_PRI_S) &
- ICE_FXD_FLTR_QW0_FLEX_PRI_M;
- qword |= ((u64)ctx->flex_mdid << ICE_FXD_FLTR_QW0_FLEX_MDID_S) &
- ICE_FXD_FLTR_QW0_FLEX_MDID_M;
- qword |= ((u64)ctx->flex_val << ICE_FXD_FLTR_QW0_FLEX_VAL_S) &
- ICE_FXD_FLTR_QW0_FLEX_VAL_M;
+ qword = FIELD_PREP(ICE_FXD_FLTR_QW0_QINDEX_M, ctx->qindex);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_Q_M, ctx->comp_q);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_COMP_REPORT_M, ctx->comp_report);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FD_SPACE_M, ctx->fd_space);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_CNT_M, ctx->cnt_index);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_STAT_ENA_M, ctx->cnt_ena);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_EVICT_ENA_M, ctx->evict_ena);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_M, ctx->toq);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_TO_Q_PRI_M, ctx->toq_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DPU_RECIPE_M, ctx->dpu_recipe);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_DROP_M, ctx->drop);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_PRI_M, ctx->flex_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_MDID_M, ctx->flex_mdid);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW0_FLEX_VAL_M, ctx->flex_val);
fdir_desc->qidx_compq_space_stat = cpu_to_le64(qword);
/* prep QW1 of FD filter programming desc */
- qword = ((u64)ctx->dtype << ICE_FXD_FLTR_QW1_DTYPE_S) &
- ICE_FXD_FLTR_QW1_DTYPE_M;
- qword |= ((u64)ctx->pcmd << ICE_FXD_FLTR_QW1_PCMD_S) &
- ICE_FXD_FLTR_QW1_PCMD_M;
- qword |= ((u64)ctx->desc_prof_prio << ICE_FXD_FLTR_QW1_PROF_PRI_S) &
- ICE_FXD_FLTR_QW1_PROF_PRI_M;
- qword |= ((u64)ctx->desc_prof << ICE_FXD_FLTR_QW1_PROF_S) &
- ICE_FXD_FLTR_QW1_PROF_M;
- qword |= ((u64)ctx->fd_vsi << ICE_FXD_FLTR_QW1_FD_VSI_S) &
- ICE_FXD_FLTR_QW1_FD_VSI_M;
- qword |= ((u64)ctx->swap << ICE_FXD_FLTR_QW1_SWAP_S) &
- ICE_FXD_FLTR_QW1_SWAP_M;
- qword |= ((u64)ctx->fdid_prio << ICE_FXD_FLTR_QW1_FDID_PRI_S) &
- ICE_FXD_FLTR_QW1_FDID_PRI_M;
- qword |= ((u64)ctx->fdid_mdid << ICE_FXD_FLTR_QW1_FDID_MDID_S) &
- ICE_FXD_FLTR_QW1_FDID_MDID_M;
- qword |= ((u64)ctx->fdid << ICE_FXD_FLTR_QW1_FDID_S) &
- ICE_FXD_FLTR_QW1_FDID_M;
+ qword = FIELD_PREP(ICE_FXD_FLTR_QW1_DTYPE_M, ctx->dtype);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PCMD_M, ctx->pcmd);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_PRI_M, ctx->desc_prof_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_PROF_M, ctx->desc_prof);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FD_VSI_M, ctx->fd_vsi);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_SWAP_M, ctx->swap);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_PRI_M, ctx->fdid_prio);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_MDID_M, ctx->fdid_mdid);
+ qword |= FIELD_PREP(ICE_FXD_FLTR_QW1_FDID_M, ctx->fdid);
fdir_desc->dtype_cmd_vsi_fdid = cpu_to_le64(qword);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 5ce413965..20d5db88c 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1218,11 +1218,13 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @blk: HW block
* @fv: field vector to search for
* @masks: masks for FV
+ * @symm: symmetric setting for RSS flows
* @prof_id: receives the profile ID
*/
static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
- struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
+ struct ice_fv_word *fv, u16 *masks, bool symm,
+ u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
u8 i;
@@ -1236,6 +1238,9 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
+ if (blk == ICE_BLK_RSS && es->symm[i] != symm)
+ continue;
+
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
@@ -1409,13 +1414,13 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
switch (blk) {
case ICE_BLK_RSS:
offset = GLQF_HMASK(mask_idx);
- val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
- val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
+ val = FIELD_PREP(GLQF_HMASK_MSK_INDEX_M, idx);
+ val |= FIELD_PREP(GLQF_HMASK_MASK_M, mask);
break;
case ICE_BLK_FD:
offset = GLQF_FDMASK(mask_idx);
- val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
- val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
+ val = FIELD_PREP(GLQF_FDMASK_MSK_INDEX_M, idx);
+ val |= FIELD_PREP(GLQF_FDMASK_MASK_M, mask);
break;
default:
ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
@@ -1716,15 +1721,16 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
}
/**
- * ice_write_es - write an extraction sequence to hardware
+ * ice_write_es - write an extraction sequence and symmetric setting to hardware
* @hw: pointer to the HW struct
* @blk: the block in which to write the extraction sequence
* @prof_id: the profile ID to write
* @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ * @symm: symmetric setting for RSS profiles
*/
static void
ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
- struct ice_fv_word *fv)
+ struct ice_fv_word *fv, bool symm)
{
u16 off;
@@ -1737,6 +1743,9 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
memcpy(&hw->blk[blk].es.t[off], fv,
hw->blk[blk].es.fvw * sizeof(*fv));
}
+
+ if (blk == ICE_BLK_RSS)
+ hw->blk[blk].es.symm[prof_id] = symm;
}
/**
@@ -1753,7 +1762,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
- ice_write_es(hw, blk, prof_id, NULL);
+ ice_write_es(hw, blk, prof_id, NULL, false);
ice_free_prof_masks(hw, blk, prof_id);
return ice_free_prof_id(hw, blk, prof_id);
}
@@ -2116,8 +2125,10 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
}
list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
@@ -2150,6 +2161,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@@ -2178,8 +2190,11 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
+ memset(es->symm, 0, es->count * sizeof(*es->symm));
memset(es->written, 0, es->count * sizeof(*es->written));
memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
+
+ memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id));
}
}
@@ -2196,6 +2211,7 @@ int ice_init_hw_tbls(struct ice_hw *hw)
ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
@@ -2292,6 +2308,11 @@ int ice_init_hw_tbls(struct ice_hw *hw)
if (!es->ref_count)
goto err;
+ es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->symm), GFP_KERNEL);
+ if (!es->symm)
+ goto err;
+
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
if (!es->written)
@@ -2301,6 +2322,12 @@ int ice_init_hw_tbls(struct ice_hw *hw)
sizeof(*es->mask_ena), GFP_KERNEL);
if (!es->mask_ena)
goto err;
+
+ prof_id->count = blk_sizes[i].prof_id;
+ prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count,
+ sizeof(*prof_id->id), GFP_KERNEL);
+ if (!prof_id->id)
+ goto err;
}
return 0;
@@ -2963,6 +2990,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @attr_cnt: number of elements in attr array
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
+ * @symm: symmetric setting for RSS profiles
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@@ -2972,7 +3000,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks)
+ struct ice_fv_word *es, u16 *masks, bool symm)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -2986,7 +3014,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
- status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
+ status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
@@ -3009,7 +3037,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
goto err_ice_add_prof;
/* and write new es */
- ice_write_es(hw, blk, prof_id, es);
+ ice_write_es(hw, blk, prof_id, es, symm);
}
ice_prof_inc_ref(hw, blk, prof_id);
@@ -3097,7 +3125,7 @@ err_ice_add_prof:
* This will search for a profile tracking ID which was previously added.
* The profile map lock should be held before calling this function.
*/
-static struct ice_prof_map *
+struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 7af7c8e9a..b39d7cdc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -42,7 +42,9 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks);
+ struct ice_fv_word *es, u16 *masks, bool symm);
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 4f42e14ed..d427a79d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -146,6 +146,7 @@ struct ice_es {
u32 *mask_ena;
struct list_head prof_map;
struct ice_fv_word *t;
+ u8 *symm; /* symmetric setting per profile (RSS blk)*/
struct mutex prof_map_lock; /* protect access to profiles list */
u8 *written;
u8 reverse; /* set to true to reverse FV order */
@@ -304,10 +305,16 @@ struct ice_masks {
struct ice_mask masks[ICE_PROF_MASK_COUNT];
};
+struct ice_prof_id {
+ unsigned long *id;
+ int count;
+};
+
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
struct ice_xlt2 xlt2;
+ struct ice_prof_id prof_id;
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fb8b925aa..fc2b58f56 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1235,6 +1235,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
+#define ICE_FLOW_FIND_PROF_CHK_SYMM 0x00000008
/**
* ice_flow_find_prof_conds - Find a profile matching headers and conditions
@@ -1243,13 +1244,14 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
* @dir: flow direction
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
* @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
*/
static struct ice_flow_prof *
ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
- u8 segs_cnt, u16 vsi_handle, u32 conds)
+ u8 segs_cnt, bool symm, u16 vsi_handle, u32 conds)
{
struct ice_flow_prof *p, *prof = NULL;
@@ -1265,6 +1267,11 @@ ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
!test_bit(vsi_handle, p->vsis))
continue;
+ /* Check for symmetric settings */
+ if ((conds & ICE_FLOW_FIND_PROF_CHK_SYMM) &&
+ p->symm != symm)
+ continue;
+
/* Protocol headers must be checked. Matched fields are
* checked if specified.
*/
@@ -1328,26 +1335,33 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*
* Assumption: the caller has acquired the lock to the profile list
*/
static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
- enum ice_flow_dir dir, u64 prof_id,
+ enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof)
+ bool symm, struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
+ struct ice_prof_id *ids;
int status;
+ u64 prof_id;
u8 i;
if (!prof)
return -EINVAL;
+ ids = &hw->blk[blk].prof_id;
+ prof_id = find_first_zero_bit(ids->id, ids->count);
+ if (prof_id >= ids->count)
+ return -ENOSPC;
+
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -1369,6 +1383,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
params->prof->id = prof_id;
params->prof->dir = dir;
params->prof->segs_cnt = segs_cnt;
+ params->prof->symm = symm;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
@@ -1385,7 +1400,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
- params->mask);
+ params->mask, symm);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1393,6 +1408,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
INIT_LIST_HEAD(&params->prof->entries);
mutex_init(&params->prof->entries_lock);
+ set_bit(prof_id, ids->id);
*prof = params->prof;
out:
@@ -1436,6 +1452,7 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
+ clear_bit(prof->id, hw->blk[blk].prof_id.id);
list_del(&prof->l_entry);
mutex_destroy(&prof->entries_lock);
devm_kfree(ice_hw_to_dev(hw), prof);
@@ -1511,15 +1528,15 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @hw: pointer to the HW struct
* @blk: classification stage
* @dir: flow direction
- * @prof_id: unique ID to identify this flow profile
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
+ * @symm: symmetric setting for RSS profiles
* @prof: stores the returned flow profile added
*/
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof)
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ bool symm, struct ice_flow_prof **prof)
{
int status;
@@ -1538,8 +1555,8 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
mutex_lock(&hw->fl_profs_locks[blk]);
- status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
- prof);
+ status = ice_flow_add_prof_sync(hw, blk, dir, segs, segs_cnt,
+ symm, prof);
if (!status)
list_add(&(*prof)->l_entry, &hw->fl_profs[blk]);
@@ -1855,37 +1872,49 @@ int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
/**
* ice_flow_set_rss_seg_info - setup packet segments for RSS
* @segs: pointer to the flow field segment(s)
- * @hash_fields: fields to be hashed on for the segment(s)
- * @flow_hdr: protocol header fields within a packet segment
+ * @seg_cnt: segment count
+ * @cfg: configure parameters
*
* Helper function to extract fields from hash bitmap and use flow
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
static int
-ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
- u32 flow_hdr)
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_flow_seg_info *seg;
u64 val;
- u8 i;
+ u16 i;
- for_each_set_bit(i, (unsigned long *)&hash_fields,
- ICE_FLOW_FIELD_IDX_MAX)
- ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ /* set inner most segment */
+ seg = &segs[seg_cnt - 1];
+
+ for_each_set_bit(i, (const unsigned long *)&cfg->hash_flds,
+ (u16)ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
- ICE_FLOW_SET_HDRS(segs, flow_hdr);
+ ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
+
+ /* set outer most header */
+ if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
+ segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
+ else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
+ segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER;
- if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
+ if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return -EINVAL;
- val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+ val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
- val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+ val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
return -EIO;
@@ -1956,6 +1985,39 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
}
/**
+ * ice_get_rss_hdr_type - get a RSS profile's header type
+ * @prof: RSS flow profile
+ */
+static enum ice_rss_cfg_hdr_type
+ice_get_rss_hdr_type(struct ice_flow_prof *prof)
+{
+ if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
+ return ICE_RSS_OUTER_HEADERS;
+ } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
+ const struct ice_flow_seg_info *s;
+
+ s = &prof->segs[ICE_RSS_OUTER_HEADERS];
+ if (s->hdrs == ICE_FLOW_SEG_HDR_NONE)
+ return ICE_RSS_INNER_HEADERS;
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ return ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
+ if (s->hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ return ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
+ }
+
+ return ICE_RSS_ANY_HEADERS;
+}
+
+static bool
+ice_rss_match_prof(struct ice_rss_cfg *r, struct ice_flow_prof *prof,
+ enum ice_rss_cfg_hdr_type hdr_type)
+{
+ return (r->hash.hdr_type == hdr_type &&
+ r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs);
+}
+
+/**
* ice_rem_rss_list - remove RSS configuration from list
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
@@ -1966,15 +2028,16 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
static void
ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
+ enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *tmp;
/* Search for RSS hash fields associated to the VSI that match the
* hash configurations associated to the flow profile. If found
* remove from the RSS entry list of the VSI context and delete entry.
*/
+ hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry)
- if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
- r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ if (ice_rss_match_prof(r, prof, hdr_type)) {
clear_bit(vsi_handle, r->vsis);
if (bitmap_empty(r->vsis, ICE_MAX_VSI)) {
list_del(&r->l_entry);
@@ -1995,11 +2058,12 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
+ enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *rss_cfg;
+ hdr_type = ice_get_rss_hdr_type(prof);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
- if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
- r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ if (ice_rss_match_prof(r, prof, hdr_type)) {
set_bit(vsi_handle, r->vsis);
return 0;
}
@@ -2009,8 +2073,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
if (!rss_cfg)
return -ENOMEM;
- rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
- rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
+ rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
+ rss_cfg->hash.hdr_type = hdr_type;
+ rss_cfg->hash.symm = prof->symm;
set_bit(vsi_handle, rss_cfg->vsis);
list_add_tail(&rss_cfg->l_entry, &hw->rss_list_head);
@@ -2018,65 +2084,177 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
return 0;
}
-#define ICE_FLOW_PROF_HASH_S 0
-#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
-#define ICE_FLOW_PROF_HDR_S 32
-#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
-#define ICE_FLOW_PROF_ENCAP_S 63
-#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+/**
+ * ice_rss_config_xor_word - set the HSYMM registers for one input set word
+ * @hw: pointer to the hardware structure
+ * @prof_id: RSS hardware profile id
+ * @src: the FV index used by the protocol's source field
+ * @dst: the FV index used by the protocol's destination field
+ *
+ * Write to the HSYMM register with the index of @src FV the value of the @dst
+ * FV index. This will tell the hardware to XOR HSYMM[src] with INSET[dst]
+ * while calculating the RSS input set.
+ */
+static void
+ice_rss_config_xor_word(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst)
+{
+ u32 val, reg, bits_shift;
+ u8 reg_idx;
+
+ reg_idx = src / GLQF_HSYMM_REG_SIZE;
+ bits_shift = ((src % GLQF_HSYMM_REG_SIZE) << 3);
+ val = dst | GLQF_HSYMM_ENABLE_BIT;
+
+ reg = rd32(hw, GLQF_HSYMM(prof_id, reg_idx));
+ reg = (reg & ~(0xff << bits_shift)) | (val << bits_shift);
+ wr32(hw, GLQF_HSYMM(prof_id, reg_idx), reg);
+}
-#define ICE_RSS_OUTER_HEADERS 1
-#define ICE_RSS_INNER_HEADERS 2
+/**
+ * ice_rss_config_xor - set the symmetric registers for a profile's protocol
+ * @hw: pointer to the hardware structure
+ * @prof_id: RSS hardware profile id
+ * @src: the FV index used by the protocol's source field
+ * @dst: the FV index used by the protocol's destination field
+ * @len: length of the source/destination fields in words
+ */
+static void
+ice_rss_config_xor(struct ice_hw *hw, u8 prof_id, u8 src, u8 dst, u8 len)
+{
+ int fv_last_word =
+ ICE_FLOW_SW_FIELD_VECTOR_MAX / ICE_FLOW_FV_EXTRACT_SZ - 1;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ ice_rss_config_xor_word(hw, prof_id,
+ /* Yes, field vector in GLQF_HSYMM and
+ * GLQF_HINSET is inversed!
+ */
+ fv_last_word - (src + i),
+ fv_last_word - (dst + i));
+ ice_rss_config_xor_word(hw, prof_id,
+ fv_last_word - (dst + i),
+ fv_last_word - (src + i));
+ }
+}
-/* Flow profile ID format:
- * [0:31] - Packet match fields
- * [32:62] - Protocol header
- * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+/**
+ * ice_rss_set_symm - set the symmetric settings for an RSS profile
+ * @hw: pointer to the hardware structure
+ * @prof: pointer to flow profile
+ *
+ * The symmetric hash will result from XORing the protocol's fields with
+ * indexes in GLQF_HSYMM and GLQF_HINSET. This function configures the profile's
+ * GLQF_HSYMM registers.
*/
-#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
- ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
- (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
- ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0)))
+static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
+{
+ struct ice_prof_map *map;
+ u8 prof_id, m;
+
+ mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ map = ice_search_prof_id(hw, ICE_BLK_RSS, prof->id);
+ if (map)
+ prof_id = map->prof_id;
+ mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+
+ if (!map)
+ return;
+
+ /* clear to default */
+ for (m = 0; m < GLQF_HSYMM_REG_PER_PROF; m++)
+ wr32(hw, GLQF_HSYMM(prof_id, m), 0);
+
+ if (prof->symm) {
+ struct ice_flow_seg_xtrct *ipv4_src, *ipv4_dst;
+ struct ice_flow_seg_xtrct *ipv6_src, *ipv6_dst;
+ struct ice_flow_seg_xtrct *sctp_src, *sctp_dst;
+ struct ice_flow_seg_xtrct *tcp_src, *tcp_dst;
+ struct ice_flow_seg_xtrct *udp_src, *udp_dst;
+ struct ice_flow_seg_info *seg;
+
+ seg = &prof->segs[prof->segs_cnt - 1];
+
+ ipv4_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_SA].xtrct;
+ ipv4_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV4_DA].xtrct;
+
+ ipv6_src = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_SA].xtrct;
+ ipv6_dst = &seg->fields[ICE_FLOW_FIELD_IDX_IPV6_DA].xtrct;
+
+ tcp_src = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_SRC_PORT].xtrct;
+ tcp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_TCP_DST_PORT].xtrct;
+
+ udp_src = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_SRC_PORT].xtrct;
+ udp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_UDP_DST_PORT].xtrct;
+
+ sctp_src = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT].xtrct;
+ sctp_dst = &seg->fields[ICE_FLOW_FIELD_IDX_SCTP_DST_PORT].xtrct;
+
+ /* xor IPv4 */
+ if (ipv4_src->prot_id != 0 && ipv4_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv4_src->idx, ipv4_dst->idx, 2);
+
+ /* xor IPv6 */
+ if (ipv6_src->prot_id != 0 && ipv6_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ ipv6_src->idx, ipv6_dst->idx, 8);
+
+ /* xor TCP */
+ if (tcp_src->prot_id != 0 && tcp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ tcp_src->idx, tcp_dst->idx, 1);
+
+ /* xor UDP */
+ if (udp_src->prot_id != 0 && udp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ udp_src->idx, udp_dst->idx, 1);
+
+ /* xor SCTP */
+ if (sctp_src->prot_id != 0 && sctp_dst->prot_id != 0)
+ ice_rss_config_xor(hw, prof_id,
+ sctp_src->idx, sctp_dst->idx, 1);
+ }
+}
/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
- * @addl_hdrs: protocol header fields
- * @segs_cnt: packet segment count
+ * @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
-ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs, u8 segs_cnt)
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
+ u8 segs_cnt;
int status;
- if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
- return -EINVAL;
+ segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
+ ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
- addl_hdrs);
+ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto exit;
- /* Search for a flow profile that has matching headers, hash fields
- * and has the input VSI associated to it. If found, no further
+ /* Search for a flow profile that has matching headers, hash fields,
+ * symm and has the input VSI associated to it. If found, no further
* operations required and exit.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS |
+ ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof)
goto exit;
@@ -2087,7 +2265,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* the protocol header and new hash field configuration.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+ cfg->symm, vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_VSI);
if (prof) {
status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
if (!status)
@@ -2103,11 +2282,12 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
}
}
- /* Search for a profile that has same match fields only. If this
- * exists then associate the VSI to this profile.
+ /* Search for a profile that has the same match fields and symmetric
+ * setting. If this exists then associate the VSI to this profile.
*/
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_SYMM |
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (prof) {
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
@@ -2116,17 +2296,14 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
goto exit;
}
- /* Create a new flow profile with generated profile and packet
- * segment information.
- */
+ /* Create a new flow profile with packet segment information. */
status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
- ICE_FLOW_GEN_PROFID(hashed_flds,
- segs[segs_cnt - 1].hdrs,
- segs_cnt),
- segs, segs_cnt, &prof);
+ segs, segs_cnt, cfg->symm, &prof);
if (status)
goto exit;
+ prof->symm = cfg->symm;
+ ice_rss_set_symm(hw, prof);
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
/* If association to a new flow profile failed then this profile can
* be removed.
@@ -2146,30 +2323,43 @@ exit:
/**
* ice_add_rss_cfg - add an RSS configuration with specified hashed fields
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
- * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
- * @addl_hdrs: protocol header fields
+ * @vsi: VSI to add the RSS configuration to
+ * @cfg: configure parameters
*
* This function will generate a flow profile based on fields associated with
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
int
-ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_rss_hash_cfg local_cfg;
+ u16 vsi_handle;
int status;
- if (hashed_flds == ICE_HASH_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!vsi)
+ return -EINVAL;
+
+ vsi_handle = vsi->idx;
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
- ICE_RSS_OUTER_HEADERS);
- if (!status)
- status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
- addl_hdrs, ICE_RSS_INNER_HEADERS);
+ local_cfg = *cfg;
+ if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ } else {
+ local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ if (!status) {
+ local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ &local_cfg);
+ }
+ }
mutex_unlock(&hw->rss_locks);
return status;
@@ -2179,33 +2369,33 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* ice_rem_rss_cfg_sync - remove an existing RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
- * @addl_hdrs: Protocol header fields within a packet segment
- * @segs_cnt: packet segment count
+ * @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static int
-ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs, u8 segs_cnt)
+ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
+ u8 segs_cnt;
int status;
+ segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
+ ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
- status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
- addl_hdrs);
+ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto out;
prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
- vsi_handle,
+ cfg->symm, vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
status = -ENOENT;
@@ -2233,31 +2423,39 @@ out:
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
- * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
- * @addl_hdrs: Protocol header fields within a packet segment
+ * @cfg: configure parameters
*
* This function will lookup the flow profile based on the input
* hash field bitmap, iterate through the profile entry list of
* that profile and find entry associated with input VSI to be
- * removed. Calls are made to underlying flow s which will APIs
+ * removed. Calls are made to underlying flow apis which will in
* turn build or update buffers for RSS XLT1 section.
*/
-int __maybe_unused
-ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs)
+int
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg)
{
+ struct ice_rss_hash_cfg local_cfg;
int status;
- if (hashed_flds == ICE_HASH_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
+ cfg->hash_flds == ICE_HASH_INVALID)
return -EINVAL;
mutex_lock(&hw->rss_locks);
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
- ICE_RSS_OUTER_HEADERS);
- if (!status)
- status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
- addl_hdrs, ICE_RSS_INNER_HEADERS);
+ local_cfg = *cfg;
+ if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ } else {
+ local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
+ if (!status) {
+ local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle,
+ &local_cfg);
+ }
+ }
mutex_unlock(&hw->rss_locks);
return status;
@@ -2298,18 +2496,24 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
- * @vsi_handle: software VSI handle
+ * @vsi: VF's VSI
* @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
{
+ struct ice_rss_hash_cfg hcfg;
+ u16 vsi_handle;
int status = 0;
u64 hash_flds;
+ if (!vsi)
+ return -EINVAL;
+
+ vsi_handle = vsi->idx;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
@@ -2379,8 +2583,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
if (rss_hash == ICE_HASH_INVALID)
return -EIO;
- status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
- ICE_FLOW_SEG_HDR_NONE);
+ hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ hcfg.hash_flds = rss_hash;
+ hcfg.hdr_type = ICE_RSS_ANY_HEADERS;
+ hcfg.symm = false;
+ status = ice_add_rss_cfg(hw, vsi, &hcfg);
if (status)
break;
}
@@ -2388,6 +2595,54 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
return status;
}
+static bool rss_cfg_symm_valid(u64 hfld)
+{
+ return !((!!(hfld & ICE_FLOW_HASH_FLD_IPV4_SA) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_IPV4_DA)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_IPV6_SA) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_IPV6_DA)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_TCP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_TCP_DST_PORT)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_UDP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_UDP_DST_PORT)) ||
+ (!!(hfld & ICE_FLOW_HASH_FLD_SCTP_SRC_PORT) ^
+ !!(hfld & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)));
+}
+
+/**
+ * ice_set_rss_cfg_symm - set symmtery for all VSI's RSS configurations
+ * @hw: pointer to the hardware structure
+ * @vsi: VSI to set/unset Symmetric RSS
+ * @symm: TRUE to set Symmetric RSS hashing
+ */
+int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm)
+{
+ struct ice_rss_hash_cfg local;
+ struct ice_rss_cfg *r, *tmp;
+ u16 vsi_handle = vsi->idx;
+ int status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ mutex_lock(&hw->rss_locks);
+ list_for_each_entry_safe(r, tmp, &hw->rss_list_head, l_entry) {
+ if (test_bit(vsi_handle, r->vsis) && r->hash.symm != symm) {
+ local = r->hash;
+ local.symm = symm;
+ if (symm && !rss_cfg_symm_valid(r->hash.hash_flds))
+ continue;
+
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &local);
+ if (status)
+ break;
+ }
+ }
+ mutex_unlock(&hw->rss_locks);
+
+ return status;
+}
+
/**
* ice_replay_rss_cfg - replay RSS configurations associated with VSI
* @hw: pointer to the hardware structure
@@ -2404,16 +2659,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
if (test_bit(vsi_handle, r->vsis)) {
- status = ice_add_rss_cfg_sync(hw, vsi_handle,
- r->hashed_flds,
- r->packet_hdr,
- ICE_RSS_OUTER_HEADERS);
- if (status)
- break;
- status = ice_add_rss_cfg_sync(hw, vsi_handle,
- r->hashed_flds,
- r->packet_hdr,
- ICE_RSS_INNER_HEADERS);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
if (status)
break;
}
@@ -2428,11 +2674,12 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hdrs: protocol header type
+ * @symm: whether the RSS is symmetric (bool, output)
*
* This function will return the match fields of the first instance of flow
* profile having the given header types and containing input VSI
*/
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm)
{
u64 rss_hash = ICE_HASH_INVALID;
struct ice_rss_cfg *r;
@@ -2444,8 +2691,9 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
- r->packet_hdr == hdrs) {
- rss_hash = r->hashed_flds;
+ r->hash.addl_hdrs == hdrs) {
+ rss_hash = r->hash.hash_flds;
+ *symm = r->hash.symm;
break;
}
mutex_unlock(&hw->rss_locks);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 96923ef0a..ff82915ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -34,6 +34,8 @@
#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_FLOW_HASH_GTP_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
@@ -227,6 +229,19 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_MAX
};
+#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
+#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
+#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
+#define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
+#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
@@ -279,6 +294,24 @@ enum ice_flow_avf_hdr_field {
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+enum ice_rss_cfg_hdr_type {
+ ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
+ ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
+ /* take inner headers as inputset for packet with outer ipv4. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
+ /* take inner headers as inputset for packet with outer ipv6. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
+ /* take outer headers first then inner headers as inputset */
+ ICE_RSS_ANY_HEADERS
+};
+
+struct ice_rss_hash_cfg {
+ u32 addl_hdrs; /* protocol header fields */
+ u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
+ enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
+ bool symm; /* symmetric or asymmetric hash */
+};
+
enum ice_flow_dir {
ICE_FLOW_RX = 0x02,
};
@@ -289,8 +322,10 @@ enum ice_flow_priority {
ICE_FLOW_PRIO_HIGH
};
+#define ICE_FLOW_SEG_SINGLE 1
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_SEG_RAW_FLD_MAX 2
+#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
#define ICE_FLOW_FV_EXTRACT_SZ 2
#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
@@ -372,20 +407,21 @@ struct ice_flow_prof {
/* software VSI handles referenced by this flow profile */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
+
+ bool symm; /* Symmetric Hash for RSS */
};
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
DECLARE_BITMAP(vsis, ICE_MAX_VSI);
- u64 hashed_flds;
- u32 packet_hdr;
+ struct ice_rss_hash_cfg hash;
};
int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_prof **prof);
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
@@ -401,13 +437,13 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm);
+int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ u64 hashed_flds);
int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-int
-ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs);
-int
-ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
- u32 addl_hdrs);
-u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
+int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
+ const struct ice_rss_hash_cfg *cfg);
+int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
+ const struct ice_rss_hash_cfg *cfg);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
new file mode 100644
index 000000000..92b5dac48
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/vmalloc.h>
+#include "ice.h"
+#include "ice_common.h"
+#include "ice_fwlog.h"
+
+bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
+{
+ u16 head, tail;
+
+ head = rings->head;
+ tail = rings->tail;
+
+ if (head < tail && (tail - head == (rings->size - 1)))
+ return true;
+ else if (head > tail && (tail == (head - 1)))
+ return true;
+
+ return false;
+}
+
+bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
+{
+ return rings->head == rings->tail;
+}
+
+void ice_fwlog_ring_increment(u16 *item, u16 size)
+{
+ *item = (*item + 1) & (size - 1);
+}
+
+static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
+{
+ int i, nr_bytes;
+ u8 *mem;
+
+ nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
+ mem = vzalloc(nr_bytes);
+ if (!mem)
+ return -ENOMEM;
+
+ for (i = 0; i < rings->size; i++) {
+ struct ice_fwlog_data *ring = &rings->rings[i];
+
+ ring->data_size = ICE_AQ_MAX_BUF_LEN;
+ ring->data = mem;
+ mem += ICE_AQ_MAX_BUF_LEN;
+ }
+
+ return 0;
+}
+
+static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
+{
+ int i;
+
+ for (i = 0; i < rings->size; i++) {
+ struct ice_fwlog_data *ring = &rings->rings[i];
+
+ /* the first ring is the base memory for the whole range so
+ * free it
+ */
+ if (!i)
+ vfree(ring->data);
+
+ ring->data = NULL;
+ ring->data_size = 0;
+ }
+}
+
+#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
+/**
+ * ice_fwlog_realloc_rings - reallocate the FW log rings
+ * @hw: pointer to the HW structure
+ * @index: the new index to use to allocate memory for the log data
+ *
+ */
+void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
+{
+ struct ice_fwlog_ring ring;
+ int status, ring_size;
+
+ /* convert the number of bytes into a number of 4K buffers. externally
+ * the driver presents the interface to the FW log data as a number of
+ * bytes because that's easy for users to understand. internally the
+ * driver uses a ring of buffers because the driver doesn't know where
+ * the beginning and end of any line of log data is so the driver has
+ * to overwrite data as complete blocks. when the data is returned to
+ * the user the driver knows that the data is correct and the FW log
+ * can be correctly parsed by the tools
+ */
+ ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
+ if (ring_size == hw->fwlog_ring.size)
+ return;
+
+ /* allocate space for the new rings and buffers then release the
+ * old rings and buffers. that way if we don't have enough
+ * memory then we at least have what we had before
+ */
+ ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
+ if (!ring.rings)
+ return;
+
+ ring.size = ring_size;
+
+ status = ice_fwlog_alloc_ring_buffs(&ring);
+ if (status) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
+ ice_fwlog_free_ring_buffs(&ring);
+ kfree(ring.rings);
+ return;
+ }
+
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+
+ hw->fwlog_ring.rings = ring.rings;
+ hw->fwlog_ring.size = ring.size;
+ hw->fwlog_ring.index = index;
+ hw->fwlog_ring.head = 0;
+ hw->fwlog_ring.tail = 0;
+}
+
+/**
+ * ice_fwlog_init - Initialize FW logging configuration
+ * @hw: pointer to the HW structure
+ *
+ * This function should be called on driver initialization during
+ * ice_init_hw().
+ */
+int ice_fwlog_init(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ ice_fwlog_set_supported(hw);
+
+ if (ice_fwlog_supported(hw)) {
+ int status;
+
+ /* read the current config from the FW and store it */
+ status = ice_fwlog_get(hw, &hw->fwlog_cfg);
+ if (status)
+ return status;
+
+ hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
+ sizeof(*hw->fwlog_ring.rings),
+ GFP_KERNEL);
+ if (!hw->fwlog_ring.rings) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
+ return -ENOMEM;
+ }
+
+ hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
+ hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
+
+ status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
+ if (status) {
+ dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+ return status;
+ }
+
+ ice_debugfs_fwlog_init(hw->back);
+ } else {
+ dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fwlog_deinit - unroll FW logging configuration
+ * @hw: pointer to the HW structure
+ *
+ * This function should be called in ice_deinit_hw().
+ */
+void ice_fwlog_deinit(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ int status;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
+ status = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ if (status)
+ dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
+ status);
+
+ kfree(pf->ice_debugfs_pf_fwlog_modules);
+
+ pf->ice_debugfs_pf_fwlog_modules = NULL;
+
+ status = ice_fwlog_unregister(hw);
+ if (status)
+ dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
+ status);
+
+ if (hw->fwlog_ring.rings) {
+ ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
+ kfree(hw->fwlog_ring.rings);
+ }
+}
+
+/**
+ * ice_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @hw: pointer to the HW structure
+ *
+ * This will always return false if called before ice_init_hw(), so it must be
+ * called after ice_init_hw().
+ */
+bool ice_fwlog_supported(struct ice_hw *hw)
+{
+ return hw->fwlog_supported;
+}
+
+/**
+ * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
+ * @hw: pointer to the HW structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from ice_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ */
+static int
+ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
+ u16 num_entries, u16 options, u16 log_resolution)
+{
+ struct ice_aqc_fw_log_cfg_resp *fw_modules;
+ struct ice_aqc_fw_log *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ int i;
+
+ fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
+ if (!fw_modules)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ cpu_to_le16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
+
+ if (options & ICE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
+ if (options & ICE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
+
+ status = ice_aq_send_cmd(hw, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries,
+ NULL);
+
+ kfree(fw_modules);
+
+ return status;
+}
+
+/**
+ * ice_fwlog_set - Set the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config used to set firmware logging
+ *
+ * This function should be called whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
+ * for init.
+ */
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ return ice_aq_fwlog_set(hw, cfg->module_entries,
+ ICE_AQC_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+}
+
+/**
+ * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
+ * @hw: pointer to the HW structure
+ * @cfg: firmware logging configuration to populate
+ */
+static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ struct ice_aqc_fw_log_cfg_resp *fw_modules;
+ struct ice_aqc_fw_log *cmd;
+ struct ice_aq_desc desc;
+ u16 module_id_cnt;
+ int status;
+ void *buf;
+ int i;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
+ cmd = &desc.params.fw_log;
+
+ cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
+
+ status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
+ goto status_out;
+ }
+
+ module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
+ } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
+ ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
+ ICE_AQC_FW_LOG_ID_MAX);
+ module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
+ cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
+ cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ le16_to_cpu(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_fwlog_get - Get the firmware logging settings
+ * @hw: pointer to the HW structure
+ * @cfg: config to populate based on current firmware logging settings
+ */
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
+{
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ return ice_aq_fwlog_get(hw, cfg);
+}
+
+/**
+ * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
+ * @hw: pointer to the HW structure
+ * @reg: true to register and false to unregister
+ */
+static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
+
+ if (reg)
+ desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_fwlog_register - Register the PF for firmware logging
+ * @hw: pointer to the HW structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in ice_fwlog_set.
+ */
+int ice_fwlog_register(struct ice_hw *hw)
+{
+ int status;
+
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ status = ice_aq_fwlog_register(hw, true);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
+ else
+ hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ice_fwlog_unregister - Unregister the PF from firmware logging
+ * @hw: pointer to the HW structure
+ */
+int ice_fwlog_unregister(struct ice_hw *hw)
+{
+ int status;
+
+ if (!ice_fwlog_supported(hw))
+ return -EOPNOTSUPP;
+
+ status = ice_aq_fwlog_register(hw, false);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
+ else
+ hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * ice_fwlog_set_supported - Set if FW logging is supported by FW
+ * @hw: pointer to the HW struct
+ *
+ * If FW returns success to the ice_aq_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_supported flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ */
+void ice_fwlog_set_supported(struct ice_hw *hw)
+{
+ struct ice_fwlog_cfg *cfg;
+ int status;
+
+ hw->fwlog_supported = false;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ /* don't call ice_fwlog_get() because that would check to see if FW
+ * logging is supported which is what the driver is determining now
+ */
+ status = ice_aq_fwlog_get(hw, cfg);
+ if (status)
+ ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
+ status);
+ else
+ hw->fwlog_supported = true;
+
+ kfree(cfg);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
new file mode 100644
index 000000000..287e71fa4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_FWLOG_H_
+#define _ICE_FWLOG_H_
+#include "ice_adminq_cmd.h"
+
+struct ice_hw;
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
+ */
+enum ice_fwlog_level {
+ ICE_FWLOG_LEVEL_NONE = 0,
+ ICE_FWLOG_LEVEL_ERROR = 1,
+ ICE_FWLOG_LEVEL_WARNING = 2,
+ ICE_FWLOG_LEVEL_NORMAL = 3,
+ ICE_FWLOG_LEVEL_VERBOSE = 4,
+ ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ice_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ice_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
+ /* options used to configure firmware logging */
+ u16 options;
+#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ice_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ice_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u16 log_resolution;
+};
+
+struct ice_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ice_fwlog_ring {
+ struct ice_fwlog_data *rings;
+ u16 index;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
+#define ICE_FWLOG_RING_SIZE_DFLT 256
+#define ICE_FWLOG_RING_SIZE_MAX 512
+
+bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
+bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
+void ice_fwlog_ring_increment(u16 *item, u16 size);
+void ice_fwlog_set_supported(struct ice_hw *hw);
+bool ice_fwlog_supported(struct ice_hw *hw);
+int ice_fwlog_init(struct ice_hw *hw);
+void ice_fwlog_deinit(struct ice_hw *hw);
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_register(struct ice_hw *hw);
+int ice_fwlog_unregister(struct ice_hw *hw);
+void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
+#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 86936b758..cfac1d432 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -200,6 +200,8 @@
#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12)
#define GLINT_VECT2FUNC_IS_PF_S 16
#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
+#define PFINT_ALLOC 0x001D2600
+#define PFINT_ALLOC_FIRST ICE_M(0x7FF, 0)
#define PFINT_FW_CTL 0x0016C800
#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_FW_CTL_ITR_INDX_S 11
@@ -404,6 +406,10 @@
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
#define GLQF_HMASK_SEL_MAX_INDEX 127
#define GLQF_HMASK_SEL_MASK_SEL_S 0
+#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512))
+#define GLQF_HSYMM_REG_SIZE 4
+#define GLQF_HSYMM_REG_PER_PROF 6
+#define GLQF_HSYMM_ENABLE_BIT BIT(7)
#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
new file mode 100644
index 000000000..e4c2c1bff
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_hwmon.h"
+#include "ice_adminq_cmd.h"
+
+#include <linux/hwmon.h>
+
+#define TEMP_FROM_REG(reg) ((reg) * 1000)
+
+static const struct hwmon_channel_info *ice_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_EMERGENCY),
+ NULL
+};
+
+static int ice_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct ice_aqc_get_sensor_reading_resp resp;
+ struct ice_pf *pf = dev_get_drvdata(dev);
+ int ret;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ ret = ice_aq_get_sensor_reading(&pf->hw, &resp);
+ if (ret) {
+ dev_warn_ratelimited(dev,
+ "%s HW read failure (%d)\n",
+ __func__,
+ ret);
+ return ret;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp);
+ break;
+ case hwmon_temp_max:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_warning_threshold);
+ break;
+ case hwmon_temp_crit:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_critical_threshold);
+ break;
+ case hwmon_temp_emergency:
+ *val = TEMP_FROM_REG(resp.data.s0f0.temp_fatal_threshold);
+ break;
+ default:
+ dev_dbg(dev, "%s unsupported attribute (%d)\n",
+ __func__, attr);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t ice_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit:
+ case hwmon_temp_max:
+ case hwmon_temp_emergency:
+ return 0444;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops ice_hwmon_ops = {
+ .is_visible = ice_hwmon_is_visible,
+ .read = ice_hwmon_read
+};
+
+static const struct hwmon_chip_info ice_chip_info = {
+ .ops = &ice_hwmon_ops,
+ .info = ice_hwmon_info
+};
+
+static bool ice_is_internal_reading_supported(struct ice_pf *pf)
+{
+ /* Only the first PF will report temperature for a chip.
+ * Note that internal temp reading is not supported
+ * for older FW (< v4.30).
+ */
+ if (pf->hw.pf_id)
+ return false;
+
+ unsigned long sensors = pf->hw.dev_caps.supported_sensors;
+
+ return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+};
+
+void ice_hwmon_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct device *hdev;
+
+ if (!ice_is_internal_reading_supported(pf))
+ return;
+
+ hdev = hwmon_device_register_with_info(dev, "ice", pf, &ice_chip_info,
+ NULL);
+ if (IS_ERR(hdev)) {
+ dev_warn(dev,
+ "hwmon_device_register_with_info returns error (%ld)",
+ PTR_ERR(hdev));
+ return;
+ }
+ pf->hwmon_dev = hdev;
+}
+
+void ice_hwmon_exit(struct ice_pf *pf)
+{
+ if (!pf->hwmon_dev)
+ return;
+ hwmon_device_unregister(pf->hwmon_dev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.h b/drivers/net/ethernet/intel/ice/ice_hwmon.h
new file mode 100644
index 000000000..d66d40354
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023, Intel Corporation. */
+
+#ifndef _ICE_HWMON_H_
+#define _ICE_HWMON_H_
+
+#ifdef CONFIG_ICE_HWMON
+void ice_hwmon_init(struct ice_pf *pf);
+void ice_hwmon_exit(struct ice_pf *pf);
+#else /* CONFIG_ICE_HWMON */
+static inline void ice_hwmon_init(struct ice_pf *pf) { }
+static inline void ice_hwmon_exit(struct ice_pf *pf) { }
+#endif /* CONFIG_ICE_HWMON */
+
+#endif /* _ICE_HWMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index b47cd43ae..a7a342809 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
}
/**
+ * ice_pkg_has_lport_extract - check if lport extraction supported
+ * @hw: HW struct
+ */
+static bool ice_pkg_has_lport_extract(struct ice_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) {
+ u16 offset;
+ u8 fv_prot;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i,
+ &fv_prot, &offset);
+ if (fv_prot == ICE_FV_PROT_MDID &&
+ offset == ICE_LP_EXT_BUF_OFFSET)
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_lag_find_primary - returns pointer to primary interfaces lag struct
* @lag: local interfaces lag struct
*/
@@ -208,8 +229,7 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
eth_hdr = s_rule->hdr_data;
ice_fill_eth_hdr(eth_hdr);
- act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num);
s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
s_rule->recipe_id = cpu_to_le16(recipe_id);
@@ -754,9 +774,7 @@ ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
ICE_SINGLE_ACT_LAN_ENABLE |
ICE_SINGLE_ACT_VALID_BIT |
- ((vsi->vsi_num <<
- ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M));
+ FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num));
s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
opc = ice_aqc_opc_add_sw_rules;
@@ -1209,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
}
/**
- * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG
* @pf: PF struct
*/
static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
@@ -1222,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
else
ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
- if (caps->sriov_lag)
+ if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw))
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
@@ -2023,7 +2041,7 @@ int ice_init_lag(struct ice_pf *pf)
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ &recipe_bits, NULL);
if (err)
continue;
@@ -2031,7 +2049,7 @@ int ice_init_lag(struct ice_pf *pf)
recipe_bits |= BIT(lag->pf_recipe) |
BIT(lag->lport_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
- (u8 *)&recipe_bits, NULL);
+ recipe_bits, NULL);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index ede833dfa..183b38792 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -17,6 +17,9 @@ enum ice_lag_role {
#define ICE_LAG_INVALID_PORT 0xFF
#define ICE_LAG_RESET_RETRIES 5
+#define ICE_SW_DEFAULT_PROFILE 0
+#define ICE_FV_PROT_MDID 255
+#define ICE_LP_EXT_BUF_OFFSET 32
struct ice_pf;
struct ice_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 89f986a75..d384ddfcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -673,6 +673,212 @@ struct ice_tlan_ctx {
* Use the enum ice_rx_l2_ptype to decode the packet type
* ENDIF
*/
+#define ICE_PTYPES \
+ /* L2 Packet types */ \
+ ICE_PTT_UNUSED_ENTRY(0), \
+ ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \
+ ICE_PTT_UNUSED_ENTRY(2), \
+ ICE_PTT_UNUSED_ENTRY(3), \
+ ICE_PTT_UNUSED_ENTRY(4), \
+ ICE_PTT_UNUSED_ENTRY(5), \
+ ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT_UNUSED_ENTRY(8), \
+ ICE_PTT_UNUSED_ENTRY(9), \
+ ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
+ ICE_PTT_UNUSED_ENTRY(12), \
+ ICE_PTT_UNUSED_ENTRY(13), \
+ ICE_PTT_UNUSED_ENTRY(14), \
+ ICE_PTT_UNUSED_ENTRY(15), \
+ ICE_PTT_UNUSED_ENTRY(16), \
+ ICE_PTT_UNUSED_ENTRY(17), \
+ ICE_PTT_UNUSED_ENTRY(18), \
+ ICE_PTT_UNUSED_ENTRY(19), \
+ ICE_PTT_UNUSED_ENTRY(20), \
+ ICE_PTT_UNUSED_ENTRY(21), \
+ \
+ /* Non Tunneled IPv4 */ \
+ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(25), \
+ ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \
+ ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \
+ ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> IPv4 */ \
+ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(32), \
+ ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> IPv6 */ \
+ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(39), \
+ ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT */ \
+ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 --> GRE/NAT --> IPv4 */ \
+ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(47), \
+ ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> IPv6 */ \
+ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(54), \
+ ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC */ \
+ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \
+ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(62), \
+ ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \
+ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(69), \
+ ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */ \
+ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \
+ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(77), \
+ ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \
+ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(84), \
+ ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* Non Tunneled IPv6 */ \
+ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(91), \
+ ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \
+ ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \
+ ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> IPv4 */ \
+ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(98), \
+ ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> IPv6 */ \
+ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(105), \
+ ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT */ \
+ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> IPv4 */ \
+ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(113), \
+ ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> IPv6 */ \
+ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(120), \
+ ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC */ \
+ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \
+ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(128), \
+ ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \
+ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(135), \
+ ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */ \
+ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \
+ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
+ ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
+ ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(143), \
+ ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
+ ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
+ ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
+ \
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \
+ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
+ ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
+ ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
+ ICE_PTT_UNUSED_ENTRY(150), \
+ ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
+ ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
+ ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+#define ICE_NUM_DEFINED_PTYPES 154
/* macro to make the table lines short, use explicit indexing with [PTYPE] */
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
@@ -695,212 +901,10 @@ struct ice_tlan_ctx {
/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
- /* L2 Packet types */
- ICE_PTT_UNUSED_ENTRY(0),
- ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- ICE_PTT_UNUSED_ENTRY(2),
- ICE_PTT_UNUSED_ENTRY(3),
- ICE_PTT_UNUSED_ENTRY(4),
- ICE_PTT_UNUSED_ENTRY(5),
- ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT_UNUSED_ENTRY(8),
- ICE_PTT_UNUSED_ENTRY(9),
- ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- ICE_PTT_UNUSED_ENTRY(12),
- ICE_PTT_UNUSED_ENTRY(13),
- ICE_PTT_UNUSED_ENTRY(14),
- ICE_PTT_UNUSED_ENTRY(15),
- ICE_PTT_UNUSED_ENTRY(16),
- ICE_PTT_UNUSED_ENTRY(17),
- ICE_PTT_UNUSED_ENTRY(18),
- ICE_PTT_UNUSED_ENTRY(19),
- ICE_PTT_UNUSED_ENTRY(20),
- ICE_PTT_UNUSED_ENTRY(21),
-
- /* Non Tunneled IPv4 */
- ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(25),
- ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(32),
- ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(39),
- ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(47),
- ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(54),
- ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(62),
- ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(69),
- ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(77),
- ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(84),
- ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(91),
- ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(98),
- ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(105),
- ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(113),
- ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(120),
- ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(128),
- ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(135),
- ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(143),
- ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- ICE_PTT_UNUSED_ENTRY(150),
- ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+ ICE_PTYPES
/* unused entries */
- [154 ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+ [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index c01950de4..cfc20684f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -212,11 +212,18 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq));
break;
case ICE_VSI_SWITCHDEV_CTRL:
- /* The number of queues for ctrl VSI is equal to number of VFs.
+ /* The number of queues for ctrl VSI is equal to number of PRs
* Each ring is associated to the corresponding VF_PR netdev.
+ * Tx and Rx rings are always equal
*/
- vsi->alloc_txq = ice_get_num_vfs(pf);
- vsi->alloc_rxq = vsi->alloc_txq;
+ if (vsi->req_txq && vsi->req_rxq) {
+ vsi->alloc_txq = vsi->req_txq;
+ vsi->alloc_rxq = vsi->req_rxq;
+ } else {
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ }
+
vsi->num_q_vectors = 1;
break;
case ICE_VSI_VF:
@@ -519,16 +526,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
struct ice_pf *pf = q_vector->vsi->back;
- struct ice_vf *vf;
- unsigned int bkt;
+ struct ice_repr *repr;
+ unsigned long id;
if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
- rcu_read_lock();
- ice_for_each_vf_rcu(pf, bkt, vf)
- napi_schedule(&vf->repr->q_vector->napi);
- rcu_read_unlock();
+ xa_for_each(&pf->eswitch.reprs, id, repr)
+ napi_schedule(&repr->q_vector->napi);
return IRQ_HANDLED;
}
@@ -969,9 +974,8 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
/* allow all untagged/tagged packets by default on Tx */
- ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
- ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
- ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
+ ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
/* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
* results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
*
@@ -982,13 +986,11 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
ctxt->info.outer_vlan_flags =
- (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
ctxt->info.outer_vlan_flags |=
- (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
- ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
+ ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
ctxt->info.outer_vlan_flags |=
FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
@@ -1067,10 +1069,8 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
- qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
- ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
offset += num_rxq_per_tc;
tx_count += num_txq_per_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
@@ -1153,18 +1153,14 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ctxt->info.max_fd_fltr_shared =
cpu_to_le16(vsi->num_bfltr);
/* default queue index within the VSI of the default FD */
- val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
- ICE_AQ_VSI_FD_DEF_Q_M);
+ val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q);
/* target queue or queue group to the FD filter */
- val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
- ICE_AQ_VSI_FD_DEF_GRP_M);
+ val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group);
ctxt->info.fd_def_q = cpu_to_le16(val);
/* queue index on which FD filter completion is reported */
- val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
- ICE_AQ_VSI_FD_REPORT_Q_M);
+ val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q);
/* priority of the default qindex action */
- val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
- ICE_AQ_VSI_FD_DEF_PRIORITY_M);
+ val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio);
ctxt->info.fd_report_opt = cpu_to_le16(val);
}
@@ -1187,12 +1183,10 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
break;
case ICE_VSI_VF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
break;
default:
dev_dbg(dev, "Unsupported VSI type %s\n",
@@ -1200,9 +1194,12 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
return;
}
- ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ vsi->rss_hfunc = hash_type;
+
+ ctxt->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
}
static void
@@ -1216,10 +1213,8 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
pow = order_base_2(qcount);
- qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
- ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
@@ -1601,12 +1596,44 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
}
+static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
+ /* configure RSS for IPv4 with input set IP src/dst */
+ {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for IPv6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
+ {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
+ {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for sctp4 with input set IP src/dst - only support
+ * RSS on SCTPv4 on outer headers (non-tunneled)
+ */
+ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
+ {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
+ {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false},
+ /* configure RSS for sctp6 with input set IPv6 src/dst - only support
+ * RSS on SCTPv6 on outer headers (non-tunneled)
+ */
+ {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
+ {ICE_FLOW_SEG_HDR_ESP,
+ ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
+};
+
/**
* ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
* @vsi: VSI to be configured
@@ -1620,11 +1647,12 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
*/
static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
{
- u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
+ u16 vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct device *dev;
int status;
+ u32 i;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1632,67 +1660,15 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
vsi_num);
return;
}
- /* configure RSS for IPv4 with input set IP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for IPv6 with input set IPv6 src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
- ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
- ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
- ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for sctp4 with input set IP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
- ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
+ const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
- /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
- ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- /* configure RSS for sctp6 with input set IPv6 src/dst */
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
- ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
- vsi_num, status);
-
- status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
- ICE_FLOW_SEG_HDR_ESP);
- if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
- vsi_num, status);
+ status = ice_add_rss_cfg(hw, vsi, cfg);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
+ cfg->addl_hdrs, cfg->hash_flds,
+ cfg->hdr_type, cfg->symm);
+ }
}
/**
@@ -1809,11 +1785,8 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
QRXFLXP_CNTXT_RXDID_PRIO_M |
QRXFLXP_CNTXT_TS_M);
- regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
- QRXFLXP_CNTXT_RXDID_IDX_M;
-
- regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
- QRXFLXP_CNTXT_RXDID_PRIO_M;
+ regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid);
+ regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio);
if (ena_ts)
/* Enable TimeSync on this queue */
@@ -2451,6 +2424,10 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi);
+
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
@@ -2927,6 +2904,123 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
+ * __ice_queue_set_napi - Set the napi instance for the queue
+ * @dev: device to which NAPI and queue belong
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ * @locked: is the rtnl_lock already held
+ *
+ * Set the napi instance for the queue. Caller indicates the lock status.
+ */
+static void
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
+{
+ if (!locked)
+ rtnl_lock();
+ netif_queue_set_napi(dev, queue_index, type, napi);
+ if (!locked)
+ rtnl_unlock();
+}
+
+/**
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (!vsi->netdev)
+ return;
+
+ if (current_work() == &pf->serv_task ||
+ test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+ test_bit(ICE_DOWN, pf->state) ||
+ test_bit(ICE_SUSPENDED, pf->state))
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ false);
+ else
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ true);
+}
+
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ * @locked: is the rtnl_lock already held
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
+ * Associate the q_vector napi with all the queue[s] on the vector
+ */
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_vsi_set_napi_queues
+ * @vsi: VSI pointer
+ *
+ * Associate queue[s] with napi for all vectors
+ */
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3071,27 +3165,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
/**
- * ice_vsi_realloc_stat_arrays - Frees unused stat structures
+ * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
* @vsi: VSI pointer
- * @prev_txq: Number of Tx rings before ring reallocation
- * @prev_rxq: Number of Rx rings before ring reallocation
*/
-static void
-ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
+static int
+ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
{
+ u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
+ u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
struct ice_vsi_stats *vsi_stat;
struct ice_pf *pf = vsi->back;
+ u16 prev_txq = vsi->alloc_txq;
+ u16 prev_rxq = vsi->alloc_rxq;
int i;
- if (!prev_txq || !prev_rxq)
- return;
- if (vsi->type == ICE_VSI_CHNL)
- return;
-
vsi_stat = pf->vsi_stats[vsi->idx];
- if (vsi->num_txq < prev_txq) {
- for (i = vsi->num_txq; i < prev_txq; i++) {
+ if (req_txq < prev_txq) {
+ for (i = req_txq; i < prev_txq; i++) {
if (vsi_stat->tx_ring_stats[i]) {
kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
@@ -3099,14 +3192,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
}
}
- if (vsi->num_rxq < prev_rxq) {
- for (i = vsi->num_rxq; i < prev_rxq; i++) {
+ tx_ring_stats = vsi_stat->tx_ring_stats;
+ vsi_stat->tx_ring_stats =
+ krealloc_array(vsi_stat->tx_ring_stats, req_txq,
+ sizeof(*vsi_stat->tx_ring_stats),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vsi_stat->tx_ring_stats) {
+ vsi_stat->tx_ring_stats = tx_ring_stats;
+ return -ENOMEM;
+ }
+
+ if (req_rxq < prev_rxq) {
+ for (i = req_rxq; i < prev_rxq; i++) {
if (vsi_stat->rx_ring_stats[i]) {
kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
}
}
}
+
+ rx_ring_stats = vsi_stat->rx_ring_stats;
+ vsi_stat->rx_ring_stats =
+ krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
+ sizeof(*vsi_stat->rx_ring_stats),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vsi_stat->rx_ring_stats) {
+ vsi_stat->rx_ring_stats = rx_ring_stats;
+ return -ENOMEM;
+ }
+
+ return 0;
}
/**
@@ -3123,9 +3238,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int ret, prev_txq, prev_rxq;
- int prev_num_q_vectors = 0;
+ int prev_num_q_vectors;
struct ice_pf *pf;
+ int ret;
if (!vsi)
return -EINVAL;
@@ -3137,6 +3252,15 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
+ ret = ice_vsi_realloc_stat_arrays(vsi);
+ if (ret)
+ goto err_vsi_cfg;
+
+ ice_vsi_decfg(vsi);
+ ret = ice_vsi_cfg_def(vsi, &params);
+ if (ret)
+ goto err_vsi_cfg;
+
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
if (!coalesce)
@@ -3144,14 +3268,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
- prev_txq = vsi->num_txq;
- prev_rxq = vsi->num_rxq;
-
- ice_vsi_decfg(vsi);
- ret = ice_vsi_cfg_def(vsi, &params);
- if (ret)
- goto err_vsi_cfg;
-
ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) {
if (vsi_flags & ICE_VSI_FLAG_INIT) {
@@ -3163,8 +3279,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
- ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
-
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
@@ -3172,8 +3286,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
err_vsi_cfg_tc_lan:
ice_vsi_decfg(vsi);
-err_vsi_cfg:
kfree(coalesce);
+err_vsi_cfg:
return ret;
}
@@ -3316,9 +3430,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
pow = order_base_2(tc0_qcount);
- qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
- ICE_AQ_VSI_TC_Q_OFFSET_M) |
- ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
+ qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset);
+ qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index f24f5d1e6..bfcfc582a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -91,6 +91,16 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index dabf33cec..6d256dbcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,6 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
+#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
* ice driver.
@@ -979,7 +980,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
* Octets 13 - 20 are TSA values - leave as zeros
*/
buf[5] = 0x64;
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += len + 2;
tlv = (struct ice_lldp_org_tlv *)
((char *)tlv + sizeof(tlv->typelen) + len);
@@ -1013,7 +1014,7 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
/* Octet 1 left as all zeros - PFC disabled */
buf[0] = 0x08;
- len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen);
offset += len + 2;
if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
@@ -1252,6 +1253,32 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
+ * ice_get_fwlog_data - copy the FW log data from ARQ event
+ * @pf: PF that the FW log event is associated with
+ * @event: event structure containing FW log data
+ */
+static void
+ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ struct ice_fwlog_data *fwlog;
+ struct ice_hw *hw = &pf->hw;
+
+ fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
+
+ memset(fwlog->data, 0, PAGE_SIZE);
+ fwlog->data_size = le16_to_cpu(event->desc.datalen);
+
+ memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
+ ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
+
+ if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
+ /* the rings are full so bump the head to create room */
+ ice_fwlog_ring_increment(&hw->fwlog_ring.head,
+ hw->fwlog_ring.size);
+ }
+}
+
+/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1532,8 +1559,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vc_process_vf_msg(pf, &event, &data);
break;
- case ice_aqc_opc_fw_logging:
- ice_output_fw_log(hw, &event.desc, event.msg_buf);
+ case ice_aqc_opc_fw_logs_event:
+ ice_get_fwlog_data(pf, &event);
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
@@ -1744,14 +1771,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* find what triggered an MDD event */
reg = rd32(hw, GL_MDET_TX_PQM);
if (reg & GL_MDET_TX_PQM_VALID_M) {
- u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
- GL_MDET_TX_PQM_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
- GL_MDET_TX_PQM_VF_NUM_S;
- u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
- GL_MDET_TX_PQM_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
- GL_MDET_TX_PQM_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
@@ -1761,14 +1784,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
if (reg & GL_MDET_TX_TCLAN_VALID_M) {
- u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
- GL_MDET_TX_TCLAN_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
- GL_MDET_TX_TCLAN_VF_NUM_S;
- u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
- GL_MDET_TX_TCLAN_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
- GL_MDET_TX_TCLAN_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
@@ -1778,14 +1797,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, GL_MDET_RX);
if (reg & GL_MDET_RX_VALID_M) {
- u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
- GL_MDET_RX_PF_NUM_S;
- u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
- GL_MDET_RX_VF_NUM_S;
- u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
- GL_MDET_RX_MAL_TYPE_S;
- u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
- GL_MDET_RX_QNUM_S);
+ u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg);
+ u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg);
+ u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg);
+ u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
@@ -3031,6 +3046,7 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
static void ice_ena_misc_vector(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ u32 pf_intr_start_offset;
u32 val;
/* Disable anti-spoof detection interrupt to prevent spurious event
@@ -3059,6 +3075,47 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
/* SW_ITR_IDX = 0, but don't change INTENA */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
+
+ if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ return;
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
+ GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
+}
+
+/**
+ * ice_ll_ts_intr - ll_ts interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
+{
+ struct ice_pf *pf = data;
+ u32 pf_intr_start_offset;
+ struct ice_ptp_tx *tx;
+ unsigned long flags;
+ struct ice_hw *hw;
+ u32 val;
+ u8 idx;
+
+ hw = &pf->hw;
+ tx = &pf->ptp.port.tx;
+ spin_lock_irqsave(&tx->lock, flags);
+ ice_ptp_complete_tx_single_tstamp(tx);
+
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset),
+ val);
+
+ return IRQ_HANDLED;
}
/**
@@ -3069,6 +3126,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
{
struct ice_pf *pf = (struct ice_pf *)data;
+ irqreturn_t ret = IRQ_HANDLED;
struct ice_hw *hw = &pf->hw;
struct device *dev;
u32 oicr, ena_mask;
@@ -3108,8 +3166,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* we have a reset warning */
ena_mask &= ~PFINT_OICR_GRST_M;
- reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
- GLGEN_RSTAT_RESET_TYPE_S;
+ reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M,
+ rd32(hw, GLGEN_RSTAT));
if (reset == ICE_RESET_CORER)
pf->corer_count++;
@@ -3150,8 +3208,22 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
+ if (ice_pf_state_is_nominal(pf) &&
+ pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ unsigned long flags;
+ u8 idx;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock_irqrestore(&tx->lock, flags);
+ } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ ret = IRQ_WAKE_THREAD;
+ }
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3167,7 +3239,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
GLTSYN_STAT_EVENT1_M |
GLTSYN_STAT_EVENT2_M);
- set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
+ ice_ptp_extts_event(pf);
}
}
@@ -3190,8 +3262,11 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(ICE_PFR_REQ, pf->state);
}
}
+ ice_service_task_schedule(pf);
+ if (ret == IRQ_HANDLED)
+ ice_irq_dynamic_ena(hw, NULL, NULL);
- return IRQ_WAKE_THREAD;
+ return ret;
}
/**
@@ -3207,12 +3282,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
hw = &pf->hw;
if (ice_is_reset_in_progress(pf->state))
- return IRQ_HANDLED;
-
- ice_service_task_schedule(pf);
-
- if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
- ice_ptp_extts_event(pf);
+ goto skip_irq;
if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
/* Process outstanding Tx timestamps. If there is more work,
@@ -3224,6 +3294,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
}
}
+skip_irq:
ice_irq_dynamic_ena(hw, NULL, NULL);
return IRQ_HANDLED;
@@ -3254,6 +3325,20 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
}
/**
+ * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
+ * @pf: board private structure
+ */
+static void ice_free_irq_msix_ll_ts(struct ice_pf *pf)
+{
+ int irq_num = pf->ll_ts_irq.virq;
+
+ synchronize_irq(irq_num);
+ devm_free_irq(ice_pf_to_dev(pf), irq_num, pf);
+
+ ice_free_irq(pf, pf->ll_ts_irq);
+}
+
+/**
* ice_free_irq_msix_misc - Unroll misc vector setup
* @pf: board private structure
*/
@@ -3272,6 +3357,8 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf);
ice_free_irq(pf, pf->oicr_irq);
+ if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ ice_free_irq_msix_ll_ts(pf);
}
/**
@@ -3297,10 +3384,12 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
- /* This enables Sideband queue Interrupt causes */
- val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
- PFINT_SB_CTL_CAUSE_ENA_M);
- wr32(hw, PFINT_SB_CTL, val);
+ if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ /* enable Sideband queue Interrupt causes */
+ val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
+ PFINT_SB_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_SB_CTL, val);
+ }
ice_flush(hw);
}
@@ -3317,13 +3406,17 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- struct msi_map oicr_irq;
+ u32 pf_intr_start_offset;
+ struct msi_map irq;
int err = 0;
if (!pf->int_name[0])
snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
dev_driver_string(dev), dev_name(dev));
+ if (!pf->int_name_ll_ts[0])
+ snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1,
+ "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev));
/* Do not request IRQ but do enable OICR interrupt since settings are
* lost during reset. Note that this function is called only during
* rebuild path and not while reset is in progress.
@@ -3332,11 +3425,11 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
goto skip_req_irq;
/* reserve one vector in irq_tracker for misc interrupts */
- oicr_irq = ice_alloc_irq(pf, false);
- if (oicr_irq.index < 0)
- return oicr_irq.index;
+ irq = ice_alloc_irq(pf, false);
+ if (irq.index < 0)
+ return irq.index;
- pf->oicr_irq = oicr_irq;
+ pf->oicr_irq = irq;
err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr,
ice_misc_intr_thread_fn, 0,
pf->int_name, pf);
@@ -3347,10 +3440,34 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
return err;
}
+ /* reserve one vector in irq_tracker for ll_ts interrupt */
+ if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ goto skip_req_irq;
+
+ irq = ice_alloc_irq(pf, false);
+ if (irq.index < 0)
+ return irq.index;
+
+ pf->ll_ts_irq = irq;
+ err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0,
+ pf->int_name_ll_ts, pf);
+ if (err) {
+ dev_err(dev, "devm_request_irq for %s failed: %d\n",
+ pf->int_name_ll_ts, err);
+ ice_free_irq(pf, pf->ll_ts_irq);
+ return err;
+ }
+
skip_req_irq:
ice_ena_misc_vector(pf);
ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index);
+ /* This enables LL TS interrupt */
+ pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST;
+ if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read)
+ wr32(hw, PFINT_SB_CTL,
+ ((pf->ll_ts_irq.index + pf_intr_start_offset) &
+ PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M);
wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index),
ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
@@ -3375,9 +3492,11 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- ice_for_each_q_vector(vsi, v_idx)
+ ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
+ __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ }
}
/**
@@ -3397,6 +3516,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
netdev->netdev_ops = &ice_netdev_ops;
netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
+ netdev->xdp_metadata_ops = &ice_xdp_md_ops;
ice_set_ethtool_ops(netdev);
if (vsi->type != ICE_VSI_PF)
@@ -4361,6 +4481,19 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
+ * ice_pf_fwlog_update_module - update 1 module
+ * @pf: pointer to the PF struct
+ * @log_level: log_level to use for the @module
+ * @module: module to update
+ */
+void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ hw->fwlog_cfg.module_entries[module].log_level = log_level;
+}
+
+/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -4685,6 +4818,8 @@ static void ice_init_features(struct ice_pf *pf)
if (ice_init_lag(pf))
dev_warn(dev, "Failed to init link aggregation support\n");
+
+ ice_hwmon_init(pf);
}
static void ice_deinit_features(struct ice_pf *pf)
@@ -4702,6 +4837,8 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_ptp_release(pf);
if (test_bit(ICE_FLAG_DPLL, pf->flags))
ice_dpll_deinit(pf);
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ xa_destroy(&pf->eswitch.reprs);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -5203,11 +5340,15 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ ice_debugfs_exit();
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
+ ice_hwmon_exit(pf);
+
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
set_bit(ICE_DOWN, pf->state);
@@ -5306,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ ice_vsi_set_napi_queues(pf->vsi[v]);
}
ret = ice_req_irq_msix_misc(pf);
@@ -5672,6 +5814,8 @@ static int __init ice_module_init(void)
goto err_dest_wq;
}
+ ice_debugfs_init();
+
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register PCI driver, err %d\n", status);
@@ -5682,6 +5826,7 @@ static int __init ice_module_init(void)
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
+ ice_debugfs_exit();
err_dest_wq:
destroy_workqueue(ice_wq);
return status;
@@ -6041,6 +6186,23 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
}
/**
+ * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
+ * @vsi: PF's VSI
+ * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order
+ *
+ * Store current stripped VLAN proto in ring packet context,
+ * so it can be accessed more efficiently by packet processing code.
+ */
+static void
+ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype)
+{
+ u16 i;
+
+ ice_for_each_alloc_rxq(vsi, i)
+ vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype;
+}
+
+/**
* ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
* @vsi: PF's VSI
* @features: features used to determine VLAN offload settings
@@ -6082,6 +6244,9 @@ ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
if (strip_err || insert_err)
return -EIO;
+ ice_set_rx_rings_vlan_proto(vsi, enable_stripping ?
+ htons(vlan_ethertype) : 0);
+
return 0;
}
@@ -6668,13 +6833,11 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_crc_errors = pf->stats.crc_errors;
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes +
- pf->stats.rx_len_errors +
pf->stats.rx_undersize +
pf->hw_csum_rx_error +
pf->stats.rx_jabber +
pf->stats.rx_fragments +
pf->stats.rx_oversize;
- cur_ns->rx_length_errors = pf->stats.rx_len_errors;
/* record drops from the port level */
cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
}
@@ -6814,9 +6977,6 @@ void ice_update_pf_stats(struct ice_pf *pf)
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
- ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
- &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
-
ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
@@ -7399,9 +7559,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
+ err = ice_eswitch_rebuild(pf);
if (err) {
- dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
+ dev_err(dev, "Switchdev rebuild failed: %d\n", err);
goto err_vsi_rebuild;
}
@@ -7702,6 +7862,59 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
}
/**
+ * ice_set_rss_hfunc - Set RSS HASH function
+ * @vsi: Pointer to VSI structure
+ * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctx;
+ bool symm;
+ int err;
+
+ if (hfunc == vsi->rss_hfunc)
+ return 0;
+
+ if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ &&
+ hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
+ return -EOPNOTSUPP;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+ ctx->info.q_opt_rss = vsi->info.q_opt_rss;
+ ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
+ ctx->info.q_opt_rss |=
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ err = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n",
+ vsi->vsi_num, err);
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ vsi->rss_hfunc = hfunc;
+ netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n",
+ hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ?
+ "Symmetric " : "");
+ }
+ kfree(ctx);
+ if (err)
+ return err;
+
+ /* Fix the symmetry setting for all existing RSS configurations */
+ symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
+ return ice_set_rss_cfg_symm(hw, vsi, symm);
+}
+
+/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
* @pid: process ID
@@ -7889,8 +8102,8 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct ice_hw *hw = &pf->hw;
u32 head, val = 0;
- head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
- QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
+ head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
+ rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
@@ -8138,13 +8351,12 @@ static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
enum ice_flow_priority prio;
- u64 prof_id;
/* add this VSI to FDir profile for this flow */
prio = ICE_FLOW_PRIO_NORMAL;
prof = hw->fdir_prof[flow];
- prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ status = ice_flow_add_entry(hw, ICE_BLK_FD,
+ prof->prof_id[tun],
prof->vsi_h[0], vsi->idx,
prio, prof->fdir_seg[tun],
&entry_h);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index f6f52a248..d4e05d2cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -571,8 +571,8 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
return status;
}
- nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
- nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
+ nvm->major = FIELD_GET(ICE_NVM_VER_HI_MASK, ver);
+ nvm->minor = FIELD_GET(ICE_NVM_VER_LO_MASK, ver);
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
if (status) {
@@ -706,9 +706,9 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
combo_ver = le32_to_cpu(civd.combo_ver);
- orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT);
- orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
- orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT);
+ orom->major = FIELD_GET(ICE_OROM_VER_MASK, combo_ver);
+ orom->patch = FIELD_GET(ICE_OROM_VER_PATCH_MASK, combo_ver);
+ orom->build = FIELD_GET(ICE_OROM_VER_BUILD_MASK, combo_ver);
return 0;
}
@@ -950,7 +950,8 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
}
/* Check that the control word indicates validity */
- if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
+ if (FIELD_GET(ICE_SR_CTRL_WORD_1_M, ctrl_word) !=
+ ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
return -EIO;
}
@@ -1027,7 +1028,7 @@ int ice_init_nvm(struct ice_hw *hw)
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);
- sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
/* Switching to words (sr_size contains power of 2) */
flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index 82bc54fec..a2562f042 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -24,7 +24,7 @@
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define ice_flush(a) rd32((a), GLGEN_STAT)
-#define ICE_M(m, s) ((m) << (s))
+#define ICE_M(m, s) ((m ## U) << (s))
struct ice_dma_mem {
void *va;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index e6b1ce76c..3b6605c85 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -7,7 +7,7 @@
#define E810_OUT_PROP_DELAY_NS 1
-#define UNKNOWN_INCVAL_E822 0x100000000ULL
+#define UNKNOWN_INCVAL_E82X 0x100000000ULL
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
@@ -525,6 +525,119 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_req_tx_single_tstamp - Request Tx timestamp for a port from FW
+ * @tx: the PTP Tx timestamp tracker
+ * @idx: index of the timestamp to request
+ */
+void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
+{
+ struct ice_ptp_port *ptp_port;
+ struct sk_buff *skb;
+ struct ice_pf *pf;
+
+ if (!tx->init)
+ return;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ /* Drop packets which have waited for more than 2 seconds */
+ if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
+ /* Count the number of Tx timestamps that timed out */
+ pf->ptp.tx_hwtstamp_timeouts++;
+
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ wr32(&pf->hw, PF_SB_ATQBAL,
+ TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
+ TS_LL_READ_TS);
+ tx->last_ll_ts_idx_read = idx;
+}
+
+/**
+ * ice_ptp_complete_tx_single_tstamp - Complete Tx timestamp for a port
+ * @tx: the PTP Tx timestamp tracker
+ */
+void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
+{
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_ptp_port *ptp_port;
+ u64 raw_tstamp, tstamp;
+ bool drop_ts = false;
+ struct sk_buff *skb;
+ struct ice_pf *pf;
+ u32 val;
+
+ if (!tx->init || tx->last_ll_ts_idx_read < 0)
+ return;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
+ val = rd32(&pf->hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (val & TS_LL_READ_TS) {
+ dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
+ return;
+ }
+
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ raw_tstamp <<= 32;
+
+ /* Read the low 32 bit value */
+ raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+
+ /* For PHYs which don't implement a proper timestamp ready bitmap,
+ * verify that the timestamp value is different from the last cached
+ * timestamp. If it is not, skip this for now assuming it hasn't yet
+ * been captured by hardware.
+ */
+ if (!drop_ts && tx->verify_cached &&
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ return;
+
+ if (tx->verify_cached && raw_tstamp)
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ if (test_and_clear_bit(idx, tx->stale))
+ drop_ts = true;
+
+ if (!skb)
+ return;
+
+ if (drop_ts) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ if (tstamp) {
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+ }
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+/**
* ice_ptp_process_tx_tstamp - Process Tx timestamps for a port
* @tx: the PTP Tx timestamp tracker
*
@@ -575,6 +688,7 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
{
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
@@ -646,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
drop_ts = true;
skip_ts_read:
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
if (tx->verify_cached && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
@@ -654,7 +768,7 @@ skip_ts_read:
tx->tstamps[idx].skb = NULL;
if (test_and_clear_bit(idx, tx->stale))
drop_ts = true;
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* It is unlikely but possible that the SKB will have been
* flushed at this point due to link change or teardown.
@@ -705,7 +819,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
/* Read the Tx ready status first */
err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
- if (err || tstamp_ready)
+ if (err)
+ break;
+ else if (tstamp_ready)
return ICE_TX_TSTAMP_WORK_PENDING;
}
@@ -722,6 +838,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
{
bool more_timestamps;
+ unsigned long flags;
if (!tx->init)
return ICE_TX_TSTAMP_WORK_DONE;
@@ -730,9 +847,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
ice_ptp_process_tx_tstamp(tx);
/* Check if there are outstanding Tx timestamps */
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
if (more_timestamps)
return ICE_TX_TSTAMP_WORK_PENDING;
@@ -769,6 +886,7 @@ ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
tx->in_use = in_use;
tx->stale = stale;
tx->init = 1;
+ tx->last_ll_ts_idx_read = -1;
spin_lock_init(&tx->lock);
@@ -786,6 +904,7 @@ static void
ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
struct ice_hw *hw = &pf->hw;
+ unsigned long flags;
u64 tstamp_ready;
int err;
u8 idx;
@@ -809,12 +928,12 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
ice_clear_phy_tstamp(hw, tx->block, phy_idx);
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
clear_bit(idx, tx->in_use);
clear_bit(idx, tx->stale);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* Count the number of Tx timestamps flushed */
pf->ptp.tx_hwtstamp_flushed++;
@@ -838,9 +957,11 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
static void
ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
{
- spin_lock(&tx->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
}
/**
@@ -853,9 +974,11 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
static void
ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
- spin_lock(&tx->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
tx->init = 0;
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* wait for potentially outstanding interrupt to complete */
synchronize_irq(pf->oicr_irq.virq);
@@ -875,7 +998,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
* @port: the port this structure tracks
@@ -886,11 +1009,11 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
* registers into chunks based on the port number.
*/
static int
-ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
tx->block = port / ICE_PORTS_PER_QUAD;
- tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822;
- tx->len = INDEX_PER_PORT_E822;
+ tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
+ tx->len = INDEX_PER_PORT_E82X;
tx->verify_cached = 0;
return ice_ptp_alloc_tx_tracker(tx);
@@ -1093,10 +1216,10 @@ static u64 ice_base_incval(struct ice_pf *pf)
if (ice_is_e810(hw))
incval = ICE_PTP_NOMINAL_INCVAL_E810;
- else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
- incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
+ else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
+ incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
else
- incval = UNKNOWN_INCVAL_E822;
+ incval = UNKNOWN_INCVAL_E82X;
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
@@ -1125,10 +1248,10 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
/* need to read FIFO state */
if (offs == 0 || offs == 1)
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO01_STATUS,
&val);
else
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_FIFO23_STATUS,
&val);
if (err) {
@@ -1138,9 +1261,9 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
}
if (offs & 0x1)
- phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
+ phy_sts = FIELD_GET(Q_REG_FIFO13_M, val);
else
- phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
+ phy_sts = FIELD_GET(Q_REG_FIFO02_M, val);
if (phy_sts & FIFO_EMPTY) {
port->tx_fifo_busy_cnt = FIFO_OK;
@@ -1156,7 +1279,7 @@ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
dev_dbg(ice_pf_to_dev(pf),
"Port %d Tx FIFO still not empty; resetting quad %d\n",
port->port_num, quad);
- ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
port->tx_fifo_busy_cnt = FIFO_OK;
return 0;
}
@@ -1201,8 +1324,8 @@ static void ice_ptp_wait_for_offsets(struct kthread_work *work)
tx_err = ice_ptp_check_tx_fifo(port);
if (!tx_err)
- tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num);
- rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num);
+ tx_err = ice_phy_cfg_tx_offset_e82x(hw, port->port_num);
+ rx_err = ice_phy_cfg_rx_offset_e82x(hw, port->port_num);
if (tx_err || rx_err) {
/* Tx and/or Rx offset not yet configured, try again later */
kthread_queue_delayed_work(pf->ptp.kworker,
@@ -1231,7 +1354,7 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- err = ice_stop_phy_timer_e822(hw, port, true);
+ err = ice_stop_phy_timer_e82x(hw, port, true);
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
@@ -1255,6 +1378,7 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
struct ice_pf *pf = ptp_port_to_pf(ptp_port);
u8 port = ptp_port->port_num;
struct ice_hw *hw = &pf->hw;
+ unsigned long flags;
int err;
if (ice_is_e810(hw))
@@ -1268,20 +1392,20 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
/* temporarily disable Tx timestamps while calibrating PHY offset */
- spin_lock(&ptp_port->tx.lock);
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
ptp_port->tx.calibrating = true;
- spin_unlock(&ptp_port->tx.lock);
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
ptp_port->tx_fifo_busy_cnt = 0;
/* Start the PHY timer in Vernier mode */
- err = ice_start_phy_timer_e822(hw, port);
+ err = ice_start_phy_timer_e82x(hw, port);
if (err)
goto out_unlock;
/* Enable Tx timestamps right away */
- spin_lock(&ptp_port->tx.lock);
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
ptp_port->tx.calibrating = false;
- spin_unlock(&ptp_port->tx.lock);
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
@@ -1323,7 +1447,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
return;
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1349,7 +1473,7 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
&val);
if (err)
break;
@@ -1357,13 +1481,13 @@ static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
if (ena) {
val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
- val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
- Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
+ val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
+ threshold);
} else {
val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
}
- err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
val);
if (err)
break;
@@ -1503,8 +1627,7 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
* + num_in_channels * tmr_idx
*/
func = 1 + chan + (tmr_idx * 3);
- gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
- GLGEN_GPIO_CTL_PIN_FUNC_M);
+ gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
pf->ptp.ext_ts_chan |= (1 << chan);
} else {
/* clear the values we set to reset defaults */
@@ -1601,7 +1724,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
if (ice_is_e810(hw))
start_time -= E810_OUT_PROP_DELAY_NS;
else
- start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
+ start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw));
/* 2. Write TARGET time */
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
@@ -1614,7 +1737,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
/* 4. write GPIO CTL reg */
func = 8 + chan + (tmr_idx * 4);
val = GLGEN_GPIO_CTL_PIN_DIR_M |
- ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
+ FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func);
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
/* Store the value if requested */
@@ -1840,7 +1963,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_clkout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->phy_model == ICE_PHY_E822)
+ if (hw->phy_model == ICE_PHY_E82X)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2127,30 +2250,26 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
}
/**
- * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
- * @rx_ring: Ring to get the VSI info
+ * ice_ptp_get_rx_hwts - Get packet Rx timestamp in ns
* @rx_desc: Receive descriptor
- * @skb: Particular skb to send timestamp with
+ * @pkt_ctx: Packet context to get the cached time
*
* The driver receives a notification in the receive descriptor with timestamp.
- * The timestamp is in ns, so we must convert the result first.
*/
-void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
+u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx)
{
- struct skb_shared_hwtstamps *hwtstamps;
u64 ts_ns, cached_time;
u32 ts_high;
if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
- return;
+ return 0;
- cached_time = READ_ONCE(rx_ring->cached_phctime);
+ cached_time = READ_ONCE(pkt_ctx->cached_phctime);
/* Do not report a timestamp if we don't have a cached PHC time */
if (!cached_time)
- return;
+ return 0;
/* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
* PHC value, rather than accessing the PF. This also allows us to
@@ -2161,9 +2280,7 @@ ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
+ return ts_ns;
}
/**
@@ -2378,18 +2495,23 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
*/
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
{
+ unsigned long flags;
u8 idx;
- spin_lock(&tx->lock);
+ spin_lock_irqsave(&tx->lock, flags);
/* Check that this tracker is accepting new timestamp requests */
if (!ice_ptp_is_tx_tracker_up(tx)) {
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
return -1;
}
/* Find and set the first available index */
- idx = find_first_zero_bit(tx->in_use, tx->len);
+ idx = find_next_zero_bit(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx == tx->len)
+ idx = find_first_zero_bit(tx->in_use, tx->len);
+
if (idx < tx->len) {
/* We got a valid index that no other thread could have set. Store
* a reference to the skb and the start time to allow discarding old
@@ -2403,7 +2525,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
ice_trace(tx_tstamp_request, skb, idx);
}
- spin_unlock(&tx->lock);
+ spin_unlock_irqrestore(&tx->lock, flags);
/* return the appropriate PHY timestamp register index, -1 if no
* indexes were available.
@@ -2440,6 +2562,54 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
}
+/**
+ * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
+ * @pf: Board private structure
+ *
+ * The device PHY issues Tx timestamp interrupts to the driver for processing
+ * timestamp data from the PHY. It will not interrupt again until all
+ * current timestamp data is read. In rare circumstances, it is possible that
+ * the driver fails to read all outstanding data.
+ *
+ * To avoid getting permanently stuck, periodically check if the PHY has
+ * outstanding timestamp data. If so, trigger an interrupt from software to
+ * process this data.
+ */
+static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ bool trigger_oicr = false;
+ unsigned int i;
+
+ if (ice_is_e810(hw))
+ return;
+
+ if (!ice_pf_src_tmr_owned(pf))
+ return;
+
+ for (i = 0; i < ICE_MAX_QUAD; i++) {
+ u64 tstamp_ready;
+ int err;
+
+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
+ if (!err && tstamp_ready) {
+ trigger_oicr = true;
+ break;
+ }
+ }
+
+ if (trigger_oicr) {
+ /* Trigger a software interrupt, to ensure this data
+ * gets processed.
+ */
+ dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
+
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+}
+
static void ice_ptp_periodic_work(struct kthread_work *work)
{
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
@@ -2451,6 +2621,8 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
err = ice_ptp_update_cached_phctime(pf);
+ ice_ptp_maybe_trigger_tx_interrupt(pf);
+
/* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
msecs_to_jiffies(err ? 10 : 500));
@@ -2468,12 +2640,10 @@ void ice_ptp_reset(struct ice_pf *pf)
int err, itr = 1;
u64 time_diff;
- if (test_bit(ICE_PFR_REQ, pf->state))
+ if (test_bit(ICE_PFR_REQ, pf->state) ||
+ !ice_pf_src_tmr_owned(pf))
goto pfr;
- if (!ice_pf_src_tmr_owned(pf))
- goto reset_ts;
-
err = ice_ptp_init_phc(hw);
if (err)
goto err;
@@ -2517,10 +2687,6 @@ void ice_ptp_reset(struct ice_pf *pf)
goto err;
}
-reset_ts:
- /* Restart the PHY timestamping block */
- ice_ptp_reset_phy_timestamping(pf);
-
pfr:
/* Init Tx structures */
if (ice_is_e810(&pf->hw)) {
@@ -2528,7 +2694,7 @@ pfr:
} else {
kthread_init_delayed_work(&ptp->port.ov_work,
ice_ptp_wait_for_offsets);
- err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
+ err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx,
ptp->port.port_num);
}
if (err)
@@ -2536,6 +2702,11 @@ pfr:
set_bit(ICE_FLAG_PTP, pf->flags);
+ /* Restart the PHY timestamping block */
+ if (!test_bit(ICE_PFR_REQ, pf->state) &&
+ ice_pf_src_tmr_owned(pf))
+ ice_ptp_restart_all_phy(pf);
+
/* Start periodic work going */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
@@ -2898,11 +3069,11 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
- return ice_ptp_init_tx_e822(pf, &ptp_port->tx,
+ return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
return -ENODEV;
@@ -2991,7 +3162,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
switch (pf->hw.phy_model) {
- case ICE_PHY_E822:
+ case ICE_PHY_E82X:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 06a330867..087dd32d8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -131,6 +131,7 @@ enum ice_tx_tstamp_work {
* @calibrating: if true, the PHY is calibrating the Tx offset. During this
* window, timestamps are temporarily disabled.
* @verify_cached: if true, verify new timestamp differs from last read value
+ * @last_ll_ts_idx_read: index of the last LL TS read by the FW
*/
struct ice_ptp_tx {
spinlock_t lock; /* lock protecting in_use bitmap */
@@ -143,11 +144,12 @@ struct ice_ptp_tx {
u8 init : 1;
u8 calibrating : 1;
u8 verify_cached : 1;
+ s8 last_ll_ts_idx_read;
};
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
-#define INDEX_PER_PORT_E822 16
+#define INDEX_PER_PORT_E82X 16
#define INDEX_PER_PORT_E810 64
/**
@@ -296,11 +298,12 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
+void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
+void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
-void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx);
void ice_ptp_reset(struct ice_pf *pf);
void ice_ptp_prepare_for_reset(struct ice_pf *pf);
void ice_ptp_init(struct ice_pf *pf);
@@ -325,13 +328,23 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return -1;
}
+static inline void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
+{ }
+
+static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { }
+
static inline bool ice_ptp_process_ts(struct ice_pf *pf)
{
return true;
}
-static inline void
-ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+
+static inline u64
+ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
+ const struct ice_pkt_ctx *pkt_ctx)
+{
+ return 0;
+}
+
static inline void ice_ptp_reset(struct ice_pf *pf) { }
static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 4109aa3b2..2c4dab0c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -9,17 +9,17 @@
*/
/* Constants defined for the PTP 1588 clock hardware. */
-/* struct ice_time_ref_info_e822
+/* struct ice_time_ref_info_e82x
*
* E822 hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
* This lookup table defines several constants that depend on the current time
- * reference. See the struct ice_time_ref_info_e822 for information about the
+ * reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
@@ -81,7 +81,7 @@ const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
},
};
-const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
/* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
{
/* refclk_pre_div */
@@ -155,7 +155,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
},
};
-/* struct ice_vernier_info_e822
+/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
* actual packet transmission or reception during the initialization of the
@@ -168,7 +168,7 @@ const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
* used by this link speed, and that the register should be cleared by writing
* 0. Other values specify the clock frequency in Hz.
*/
-const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* ICE_PTP_LNK_SPD_1G */
{
/* tx_par_clk */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index a00b55e14..187ce9b54 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -284,19 +284,19 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
- * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * ice_fill_phy_msg_e82x - Fill message data for a PHY register access
* @msg: the PHY message buffer to fill in
* @port: the port to access
* @offset: the register offset
*/
static void
-ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY_E822;
- phy = port / ICE_PORTS_PER_PHY_E822;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822;
+ phy_port = port % ICE_PORTS_PER_PHY_E82X;
+ phy = port / ICE_PORTS_PER_PHY_E82X;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -315,7 +315,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
}
/**
- * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * ice_is_64b_phy_reg_e82x - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
*
@@ -323,7 +323,7 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
* represented as two 32bit registers. If it is, return the appropriate high
* register offset to use.
*/
-static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+static bool ice_is_64b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
{
switch (low_addr) {
case P_REG_PAR_PCS_TX_OFFSET_L:
@@ -368,7 +368,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
}
/**
- * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * ice_is_40b_phy_reg_e82x - Check if this is a 40bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 40bit value
*
@@ -377,7 +377,7 @@ static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
* upper 32 bits in the high register. If it is, return the appropriate high
* register offset to use.
*/
-static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+static bool ice_is_40b_phy_reg_e82x(u16 low_addr, u16 *high_addr)
{
switch (low_addr) {
case P_REG_TIMETUS_L:
@@ -413,7 +413,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
}
/**
- * ice_read_phy_reg_e822 - Read a PHY register
+ * ice_read_phy_reg_e82x - Read a PHY register
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @offset: PHY register offset to read
@@ -422,12 +422,12 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
* Read a PHY register for the given port over the device sideband queue.
*/
static int
-ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e822(&msg, port, offset);
+ ice_fill_phy_msg_e82x(&msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
err = ice_sbq_rw_reg(hw, &msg);
@@ -443,7 +443,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
}
/**
- * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * ice_read_64b_phy_reg_e82x - Read a 64bit value from PHY registers
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @low_addr: offset of the lower register to read from
@@ -455,7 +455,7 @@ ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
* known to be two parts of a 64bit value.
*/
static int
-ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+ice_read_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
{
u32 low, high;
u16 high_addr;
@@ -464,20 +464,20 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
/* Only operate on registers known to be split into two 32bit
* registers.
*/
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
low_addr);
return -EINVAL;
}
- err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ err = ice_read_phy_reg_e82x(hw, port, low_addr, &low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+ err = ice_read_phy_reg_e82x(hw, port, high_addr, &high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
high_addr, err);
@@ -490,7 +490,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
}
/**
- * ice_write_phy_reg_e822 - Write a PHY register
+ * ice_write_phy_reg_e82x - Write a PHY register
* @hw: pointer to the HW struct
* @port: PHY port to write to
* @offset: PHY register offset to write
@@ -499,12 +499,12 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
* Write a PHY register for the given port over the device sideband queue.
*/
static int
-ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e822(&msg, port, offset);
+ ice_fill_phy_msg_e82x(&msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
@@ -519,7 +519,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
}
/**
- * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * ice_write_40b_phy_reg_e82x - Write a 40b value to the PHY
* @hw: pointer to the HW struct
* @port: port to write to
* @low_addr: offset of the low register
@@ -529,7 +529,7 @@ ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
* it up into two chunks, the lower 8 bits and the upper 32 bits.
*/
static int
-ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
u32 low, high;
u16 high_addr;
@@ -538,7 +538,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/* Only operate on registers known to be split into a lower 8 bit
* register and an upper 32 bit register.
*/
- if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_40b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
low_addr);
return -EINVAL;
@@ -547,14 +547,14 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low = (u32)(val & P_REG_40B_LOW_M);
high = (u32)(val >> P_REG_40B_HIGH_S);
- err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
high_addr, err);
@@ -565,7 +565,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
}
/**
- * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * ice_write_64b_phy_reg_e82x - Write a 64bit value to PHY registers
* @hw: pointer to the HW struct
* @port: PHY port to read from
* @low_addr: offset of the lower register to read from
@@ -577,7 +577,7 @@ ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* a 64bit value.
*/
static int
-ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
u32 low, high;
u16 high_addr;
@@ -586,7 +586,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/* Only operate on registers known to be split into two 32bit
* registers.
*/
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ if (!ice_is_64b_phy_reg_e82x(low_addr, &high_addr)) {
ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
low_addr);
return -EINVAL;
@@ -595,14 +595,14 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low = lower_32_bits(val);
high = upper_32_bits(val);
- err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
low_addr, err);
return err;
}
- err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ err = ice_write_phy_reg_e82x(hw, port, high_addr, high);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
high_addr, err);
@@ -613,7 +613,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
}
/**
- * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * ice_fill_quad_msg_e82x - Fill message data for quad register access
* @msg: the PHY message buffer to fill in
* @quad: the quad to access
* @offset: the register offset
@@ -622,7 +622,7 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* multiple PHYs.
*/
static int
-ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
{
u32 addr;
@@ -631,7 +631,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
msg->dest_dev = rmn_0;
- if ((quad % ICE_QUADS_PER_PHY_E822) == 0)
+ if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
@@ -643,7 +643,7 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
}
/**
- * ice_read_quad_reg_e822 - Read a PHY quad register
+ * ice_read_quad_reg_e82x - Read a PHY quad register
* @hw: pointer to the HW struct
* @quad: quad to read from
* @offset: quad register offset to read
@@ -653,12 +653,12 @@ ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
* shared between multiple PHYs.
*/
int
-ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(&msg, quad, offset);
if (err)
return err;
@@ -677,7 +677,7 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
}
/**
- * ice_write_quad_reg_e822 - Write a PHY quad register
+ * ice_write_quad_reg_e82x - Write a PHY quad register
* @hw: pointer to the HW struct
* @quad: quad to write to
* @offset: quad register offset to write
@@ -687,12 +687,12 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
* shared between multiple PHYs.
*/
int
-ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(&msg, quad, offset);
if (err)
return err;
@@ -710,7 +710,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
}
/**
- * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * ice_read_phy_tstamp_e82x - Read a PHY timestamp out of the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
* @idx: the timestamp index to read
@@ -721,7 +721,7 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
* family of devices.
*/
static int
-ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
{
u16 lo_addr, hi_addr;
u32 lo, hi;
@@ -730,14 +730,14 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
- err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ err = ice_read_quad_reg_e82x(hw, quad, lo_addr, &lo);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
err);
return err;
}
- err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ err = ice_read_quad_reg_e82x(hw, quad, hi_addr, &hi);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
err);
@@ -754,7 +754,7 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
}
/**
- * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * ice_clear_phy_tstamp_e82x - Clear a timestamp from the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
* @idx: the timestamp index to reset
@@ -770,18 +770,18 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
*
* To directly clear the contents of the timestamp block entirely, discarding
* all timestamp data at once, software should instead use
- * ice_ptp_reset_ts_memory_quad_e822().
+ * ice_ptp_reset_ts_memory_quad_e82x().
*
* This function should only be called on an idx whose bit is set according to
* ice_get_phy_tx_tstamp_ready().
*/
static int
-ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+ice_clear_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx)
{
u64 unused_tstamp;
int err;
- err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp);
+ err = ice_read_phy_tstamp_e82x(hw, quad, idx, &unused_tstamp);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
quad, idx, err);
@@ -792,33 +792,33 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
}
/**
- * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block
+ * ice_ptp_reset_ts_memory_quad_e82x - Clear all timestamps from the quad block
* @hw: pointer to the HW struct
* @quad: the quad to read from
*
* Clear all timestamps from the PHY quad block that is shared between the
* internal PHYs on the E822 devices.
*/
-void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad)
+void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad)
{
- ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
- ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e82x(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
}
/**
- * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks
+ * ice_ptp_reset_ts_memory_e82x - Clear all timestamps from all quad blocks
* @hw: pointer to the HW struct
*/
-static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
+static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
{
unsigned int quad;
for (quad = 0; quad < ICE_MAX_QUAD; quad++)
- ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
}
/**
- * ice_read_cgu_reg_e822 - Read a CGU register
+ * ice_read_cgu_reg_e82x - Read a CGU register
* @hw: pointer to the HW struct
* @addr: Register address to read
* @val: storage for register value read
@@ -827,7 +827,7 @@ static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
* applicable to E822 devices.
*/
static int
-ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg;
int err;
@@ -850,7 +850,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
}
/**
- * ice_write_cgu_reg_e822 - Write a CGU register
+ * ice_write_cgu_reg_e82x - Write a CGU register
* @hw: pointer to the HW struct
* @addr: Register address to write
* @val: value to write into the register
@@ -859,7 +859,7 @@ ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
* applicable to E822 devices.
*/
static int
-ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg;
int err;
@@ -925,7 +925,7 @@ static const char *ice_clk_src_str(u8 clk_src)
}
/**
- * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
* @hw: pointer to the HW struct
* @clk_freq: Clock frequency to program
* @clk_src: Clock source to select (TIME_REF, or TCX0)
@@ -934,7 +934,7 @@ static const char *ice_clk_src_str(u8 clk_src)
* time reference, enabling the PLL which drives the PTP hardware clock.
*/
static int
-ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
enum ice_clk_src clk_src)
{
union tspll_ro_bwm_lf bwm_lf;
@@ -963,15 +963,15 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
return -EINVAL;
}
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
if (err)
return err;
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
if (err)
return err;
- err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
@@ -986,43 +986,43 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
if (dw24.field.ts_pll_enable) {
dw24.field.ts_pll_enable = 0;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
}
/* Set the frequency */
dw9.field.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
if (err)
return err;
/* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
if (err)
return err;
dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
dw19.field.tspll_ndivratio = 1;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
if (err)
return err;
/* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
if (err)
return err;
dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
dw22.field.time1588clk_sel_div2 = 0;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
if (err)
return err;
/* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
if (err)
return err;
@@ -1030,21 +1030,21 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
dw24.field.time_ref_sel = clk_src;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
/* Finally, enable the PLL */
dw24.field.ts_pll_enable = 1;
- err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
if (err)
return err;
/* Wait to verify if the PLL locks */
usleep_range(1000, 5000);
- err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
if (err)
return err;
@@ -1064,18 +1064,18 @@ ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
}
/**
- * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * ice_init_cgu_e82x - Initialize CGU with settings from firmware
* @hw: pointer to the HW structure
*
* Initialize the Clock Generation Unit of the E822 device.
*/
-static int ice_init_cgu_e822(struct ice_hw *hw)
+static int ice_init_cgu_e82x(struct ice_hw *hw)
{
struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
union tspll_cntr_bist_settings cntr_bist;
int err;
- err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
&cntr_bist.val);
if (err)
return err;
@@ -1084,7 +1084,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw)
cntr_bist.field.i_plllock_sel_0 = 0;
cntr_bist.field.i_plllock_sel_1 = 0;
- err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
cntr_bist.val);
if (err)
return err;
@@ -1092,7 +1092,7 @@ static int ice_init_cgu_e822(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
if (err)
return err;
@@ -1113,7 +1113,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
PTP_VERNIER_WL);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
@@ -1126,12 +1126,12 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * ice_ptp_init_phc_e82x - Perform E822 specific PHC initialization
* @hw: pointer to HW struct
*
* Perform PHC initialization steps specific to E822 devices.
*/
-static int ice_ptp_init_phc_e822(struct ice_hw *hw)
+static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
int err;
u32 regval;
@@ -1145,7 +1145,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
wr32(hw, PF_SB_REM_DEV_CTL, regval);
/* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e822(hw);
+ err = ice_init_cgu_e82x(hw);
if (err)
return err;
@@ -1154,7 +1154,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
}
/**
- * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * ice_ptp_prep_phy_time_e82x - Prepare PHY port with initial time
* @hw: pointer to the HW struct
* @time: Time to initialize the PHY port clocks to
*
@@ -1164,7 +1164,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
* units of nominal nanoseconds.
*/
static int
-ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
{
u64 phy_time;
u8 port;
@@ -1177,14 +1177,14 @@ ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
/* Tx case */
- err = ice_write_64b_phy_reg_e822(hw, port,
+ err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_TX_TIMER_INC_PRE_L,
phy_time);
if (err)
goto exit_err;
/* Rx case */
- err = ice_write_64b_phy_reg_e822(hw, port,
+ err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_RX_TIMER_INC_PRE_L,
phy_time);
if (err)
@@ -1201,7 +1201,7 @@ exit_err:
}
/**
- * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * ice_ptp_prep_port_adj_e82x - Prepare a single port for time adjust
* @hw: pointer to HW struct
* @port: Port number to be programmed
* @time: time in cycles to adjust the port Tx and Rx clocks
@@ -1216,7 +1216,7 @@ exit_err:
* Negative adjustments are supported using 2s complement arithmetic.
*/
static int
-ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
+ice_ptp_prep_port_adj_e82x(struct ice_hw *hw, u8 port, s64 time)
{
u32 l_time, u_time;
int err;
@@ -1225,23 +1225,23 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
u_time = upper_32_bits(time);
/* Tx case */
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L,
l_time);
if (err)
goto exit_err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_U,
u_time);
if (err)
goto exit_err;
/* Rx case */
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_L,
l_time);
if (err)
goto exit_err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TIMER_INC_PRE_U,
u_time);
if (err)
goto exit_err;
@@ -1255,7 +1255,7 @@ exit_err:
}
/**
- * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * ice_ptp_prep_phy_adj_e82x - Prep PHY ports for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment in nanoseconds
*
@@ -1264,7 +1264,7 @@ exit_err:
* ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
*/
static int
-ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
+ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
{
s64 cycles;
u8 port;
@@ -1281,7 +1281,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
+ err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
if (err)
return err;
}
@@ -1290,7 +1290,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
}
/**
- * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * ice_ptp_prep_phy_incval_e82x - Prepare PHY ports for time adjustment
* @hw: pointer to HW struct
* @incval: new increment value to prepare
*
@@ -1299,13 +1299,13 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
* issuing an ICE_PTP_INIT_INCVAL command.
*/
static int
-ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
{
int err;
u8 port;
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
incval);
if (err)
goto exit_err;
@@ -1337,7 +1337,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
int err;
/* Tx case */
- err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+ err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
err);
@@ -1348,7 +1348,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
(unsigned long long)*tx_ts);
/* Rx case */
- err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+ err = ice_read_64b_phy_reg_e82x(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
err);
@@ -1362,7 +1362,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
}
/**
- * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
+ * ice_ptp_write_port_cmd_e82x - Prepare a single PHY port for a timer command
* @hw: pointer to HW struct
* @port: Port to which cmd has to be sent
* @cmd: Command to be sent to the port
@@ -1372,8 +1372,8 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
* Do not use this function directly. If you want to configure exactly one
* port, use ice_ptp_one_port_cmd() instead.
*/
-static int
-ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
u8 tmr_idx;
@@ -1403,7 +1403,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
/* Tx case */
/* Read, modify, write */
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
err);
@@ -1414,7 +1414,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
val &= ~TS_CMD_MASK;
val |= cmd_val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
err);
@@ -1423,7 +1423,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
/* Rx case */
/* Read, modify, write */
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
err);
@@ -1434,7 +1434,7 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
val &= ~TS_CMD_MASK;
val |= cmd_val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
err);
@@ -1469,7 +1469,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
else
cmd = ICE_PTP_NOP;
- err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
if (err)
return err;
}
@@ -1478,7 +1478,7 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
}
/**
- * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
* @hw: pointer to the HW struct
* @cmd: timer command to prepare
*
@@ -1486,14 +1486,14 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
* command.
*/
static int
-ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u8 port;
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
if (err)
return err;
}
@@ -1509,7 +1509,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
/**
- * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * ice_phy_get_speed_and_fec_e82x - Get link speed and FEC based on serdes mode
* @hw: pointer to HW struct
* @port: the port to read from
* @link_out: if non-NULL, holds link speed on success
@@ -1519,7 +1519,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
* algorithm.
*/
static int
-ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+ice_phy_get_speed_and_fec_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_link_spd *link_out,
enum ice_ptp_fec_mode *fec_out)
{
@@ -1528,7 +1528,7 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
u32 serdes;
int err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_LINK_SPEED, &serdes);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
return err;
@@ -1585,18 +1585,18 @@ ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
}
/**
- * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * ice_phy_cfg_lane_e82x - Configure PHY quad for single/multi-lane timestamp
* @hw: pointer to HW struct
* @port: to configure the quad for
*/
-static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
int err;
u32 val;
u8 quad;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, NULL);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
err);
@@ -1605,7 +1605,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
quad = port / ICE_PORTS_PER_QUAD;
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
err);
@@ -1617,7 +1617,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
else
val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
- err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+ err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
err);
@@ -1626,7 +1626,7 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
+ * ice_phy_cfg_uix_e82x - Configure Serdes UI to TU conversion for E822
* @hw: pointer to the HW structure
* @port: the port to configure
*
@@ -1671,12 +1671,12 @@ static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
* a divide by 390,625,000. This does lose some precision, but avoids
* miscalculation due to arithmetic overflow.
*/
-static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
+static int ice_phy_cfg_uix_e82x(struct ice_hw *hw, u8 port)
{
u64 cur_freq, clk_incval, tu_per_sec, uix;
int err;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second divided by 256 */
@@ -1688,7 +1688,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
/* Program the 10Gb/40Gb conversion ratio */
uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_10G_40G_L,
uix);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
@@ -1699,7 +1699,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
/* Program the 25Gb/100Gb conversion ratio */
uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_UIX66_25G_100G_L,
uix);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
@@ -1711,7 +1711,7 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
+ * ice_phy_cfg_parpcs_e82x - Configure TUs per PAR/PCS clock cycle
* @hw: pointer to the HW struct
* @port: port to configure
*
@@ -1753,18 +1753,18 @@ static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
* frequency is ~29 bits, so multiplying them together should fit within the
* 64 bit arithmetic.
*/
-static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
+static int ice_phy_cfg_parpcs_e82x(struct ice_hw *hw, u8 port)
{
u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
int err;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per cycle of the PHC clock */
@@ -1784,7 +1784,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1796,7 +1796,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PAR_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1808,7 +1808,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1820,7 +1820,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_PCS_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1832,7 +1832,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1844,7 +1844,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PAR_RX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1856,7 +1856,7 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_TX_TUS_L,
phy_tus);
if (err)
return err;
@@ -1868,23 +1868,23 @@ static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
else
phy_tus = 0;
- return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
+ return ice_write_40b_phy_reg_e82x(hw, port, P_REG_DESK_PCS_RX_TUS_L,
phy_tus);
}
/**
- * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
+ * ice_calc_fixed_tx_offset_e82x - Calculated Fixed Tx offset for a port
* @hw: pointer to the HW struct
* @link_spd: the Link speed to calculate for
*
* Calculate the fixed offset due to known static latency data.
*/
static u64
-ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+ice_calc_fixed_tx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
{
u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -1904,7 +1904,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
}
/**
- * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
+ * ice_phy_cfg_tx_offset_e82x - Configure total Tx timestamp offset
* @hw: pointer to the HW struct
* @port: the PHY port to configure
*
@@ -1926,7 +1926,7 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* Returns zero on success, -EBUSY if the hardware vernier offset
* calibration has not completed, or another error code on failure.
*/
-int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
+int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
@@ -1935,7 +1935,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
u32 reg;
/* Nothing to do if we've already programmed the offset */
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OR, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
port, err);
@@ -1945,7 +1945,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
if (reg)
return 0;
- err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_OV_STATUS, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
port, err);
@@ -1955,11 +1955,11 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
if (!(reg & P_REG_TX_OV_STATUS_OV_M))
return -EBUSY;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+ total_offset = ice_calc_fixed_tx_offset_e82x(hw, link_spd);
/* Read the first Vernier offset from the PHY register and add it to
* the total offset.
@@ -1970,7 +1970,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
link_spd == ICE_PTP_LNK_SPD_25G_RS ||
link_spd == ICE_PTP_LNK_SPD_40G ||
link_spd == ICE_PTP_LNK_SPD_50G) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_PCS_TX_OFFSET_L,
&val);
if (err)
@@ -1985,7 +1985,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
*/
if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
link_spd == ICE_PTP_LNK_SPD_100G_RS) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_TX_TIME_L,
&val);
if (err)
@@ -1998,12 +1998,12 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
* PHY and indicate that the Tx offset is ready. After this,
* timestamps will be enabled.
*/
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_TX_OFFSET_L,
total_offset);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 1);
if (err)
return err;
@@ -2014,7 +2014,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
+ * ice_phy_calc_pmd_adj_e82x - Calculate PMD adjustment for Rx
* @hw: pointer to the HW struct
* @port: the PHY port to adjust for
* @link_spd: the current link speed of the PHY
@@ -2026,7 +2026,7 @@ int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
* various delays caused when receiving a packet.
*/
static int
-ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
+ice_phy_calc_pmd_adj_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_link_spd link_spd,
enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
{
@@ -2035,7 +2035,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u32 val;
int err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PMD_ALIGNMENT, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
err);
@@ -2044,7 +2044,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
pmd_align = (u8)val;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -2123,7 +2123,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u64 cycle_adj;
u8 rx_cycle;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_40_TO_160_CNT,
&val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
@@ -2145,7 +2145,7 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
u64 cycle_adj;
u8 rx_cycle;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_80_TO_160_CNT,
&val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
@@ -2172,18 +2172,18 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
}
/**
- * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
+ * ice_calc_fixed_rx_offset_e82x - Calculated the fixed Rx offset for a port
* @hw: pointer to HW struct
* @link_spd: The Link speed to calculate for
*
* Determine the fixed Rx latency for a given link speed.
*/
static u64
-ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+ice_calc_fixed_rx_offset_e82x(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
{
u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
- cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ cur_freq = ice_e82x_pll_freq(ice_e82x_time_ref(hw));
clk_incval = ice_ptp_read_src_incval(hw);
/* Calculate TUs per second */
@@ -2203,7 +2203,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
}
/**
- * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
+ * ice_phy_cfg_rx_offset_e82x - Configure total Rx timestamp offset
* @hw: pointer to the HW struct
* @port: the PHY port to configure
*
@@ -2229,7 +2229,7 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* Returns zero on success, -EBUSY if the hardware vernier offset
* calibration has not completed, or another error code on failure.
*/
-int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
+int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
{
enum ice_ptp_link_spd link_spd;
enum ice_ptp_fec_mode fec_mode;
@@ -2238,7 +2238,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
u32 reg;
/* Nothing to do if we've already programmed the offset */
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OR, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
port, err);
@@ -2248,7 +2248,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
if (reg)
return 0;
- err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &reg);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_OV_STATUS, &reg);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
port, err);
@@ -2258,16 +2258,16 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
if (!(reg & P_REG_RX_OV_STATUS_OV_M))
return -EBUSY;
- err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ err = ice_phy_get_speed_and_fec_e82x(hw, port, &link_spd, &fec_mode);
if (err)
return err;
- total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+ total_offset = ice_calc_fixed_rx_offset_e82x(hw, link_spd);
/* Read the first Vernier offset from the PHY register and add it to
* the total offset.
*/
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_PCS_RX_OFFSET_L,
&val);
if (err)
@@ -2282,7 +2282,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
link_spd == ICE_PTP_LNK_SPD_50G ||
link_spd == ICE_PTP_LNK_SPD_50G_RS ||
link_spd == ICE_PTP_LNK_SPD_100G_RS) {
- err = ice_read_64b_phy_reg_e822(hw, port,
+ err = ice_read_64b_phy_reg_e82x(hw, port,
P_REG_PAR_RX_TIME_L,
&val);
if (err)
@@ -2292,7 +2292,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
}
/* In addition, Rx must account for the PMD alignment */
- err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
+ err = ice_phy_calc_pmd_adj_e82x(hw, port, link_spd, fec_mode, &pmd);
if (err)
return err;
@@ -2308,12 +2308,12 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* PHY and indicate that the Rx offset is ready. After this,
* timestamps will be enabled.
*/
- err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TOTAL_RX_OFFSET_L,
total_offset);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 1);
if (err)
return err;
@@ -2324,7 +2324,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
+ * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
* @hw: pointer to the HW struct
* @port: the PHY port to read
* @phy_time: on return, the 64bit PHY timer value
@@ -2334,7 +2334,7 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* and PHC timer values.
*/
static int
-ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
+ice_read_phy_and_phc_time_e82x(struct ice_hw *hw, u8 port, u64 *phy_time,
u64 *phc_time)
{
u64 tx_time, rx_time;
@@ -2381,7 +2381,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
}
/**
- * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
+ * ice_sync_phy_timer_e82x - Synchronize the PHY timer with PHC timer
* @hw: pointer to the HW struct
* @port: the PHY port to synchronize
*
@@ -2392,7 +2392,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
* to the PHY timer in order to ensure it reads the same value as the
* primary PHC timer.
*/
-static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+static int ice_sync_phy_timer_e82x(struct ice_hw *hw, u8 port)
{
u64 phc_time, phy_time, difference;
int err;
@@ -2402,7 +2402,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
return -EBUSY;
}
- err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
if (err)
goto err_unlock;
@@ -2416,7 +2416,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
*/
difference = phc_time - phy_time;
- err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
+ err = ice_ptp_prep_port_adj_e82x(hw, port, (s64)difference);
if (err)
goto err_unlock;
@@ -2433,7 +2433,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
/* Re-capture the timer values to flush the command registers and
* verify that the time was properly adjusted.
*/
- err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ err = ice_read_phy_and_phc_time_e82x(hw, port, &phy_time, &phc_time);
if (err)
goto err_unlock;
@@ -2452,7 +2452,7 @@ err_unlock:
}
/**
- * ice_stop_phy_timer_e822 - Stop the PHY clock timer
+ * ice_stop_phy_timer_e82x - Stop the PHY clock timer
* @hw: pointer to the HW struct
* @port: the PHY port to stop
* @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
@@ -2462,36 +2462,36 @@ err_unlock:
* initialized or when link speed changes.
*/
int
-ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
+ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset)
{
int err;
u32 val;
- err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
if (err)
return err;
- err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
if (err)
return err;
- err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
if (err)
return err;
val &= ~P_REG_PS_START_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val &= ~P_REG_PS_ENA_CLK_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
if (soft_reset) {
val |= P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
}
@@ -2502,7 +2502,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
}
/**
- * ice_start_phy_timer_e822 - Start the PHY clock timer
+ * ice_start_phy_timer_e82x - Start the PHY clock timer
* @hw: pointer to the HW struct
* @port: the PHY port to start
*
@@ -2512,7 +2512,7 @@ ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
*
* Hardware will take Vernier measurements on Tx or Rx of packets.
*/
-int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
+int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port)
{
u32 lo, hi, val;
u64 incval;
@@ -2521,17 +2521,17 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
tmr_idx = ice_get_ptp_src_clock_index(hw);
- err = ice_stop_phy_timer_e822(hw, port, false);
+ err = ice_stop_phy_timer_e82x(hw, port, false);
if (err)
return err;
- ice_phy_cfg_lane_e822(hw, port);
+ ice_phy_cfg_lane_e82x(hw, port);
- err = ice_phy_cfg_uix_e822(hw, port);
+ err = ice_phy_cfg_uix_e82x(hw, port);
if (err)
return err;
- err = ice_phy_cfg_parpcs_e822(hw, port);
+ err = ice_phy_cfg_parpcs_e82x(hw, port);
if (err)
return err;
@@ -2539,7 +2539,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
incval = (u64)hi << 32 | lo;
- err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+ err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval);
if (err)
return err;
@@ -2552,22 +2552,22 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
ice_ptp_exec_tmr_cmd(hw);
- err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ err = ice_read_phy_reg_e82x(hw, port, P_REG_PS, &val);
if (err)
return err;
val |= P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val |= P_REG_PS_START_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val &= ~P_REG_PS_SFT_RESET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
@@ -2578,18 +2578,18 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
ice_ptp_exec_tmr_cmd(hw);
val |= P_REG_PS_ENA_CLK_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
val |= P_REG_PS_LOAD_OFFSET_M;
- err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_PS, val);
if (err)
return err;
ice_ptp_exec_tmr_cmd(hw);
- err = ice_sync_phy_timer_e822(hw, port);
+ err = ice_sync_phy_timer_e82x(hw, port);
if (err)
return err;
@@ -2599,7 +2599,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
}
/**
- * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register
+ * ice_get_phy_tx_tstamp_ready_e82x - Read Tx memory status register
* @hw: pointer to the HW struct
* @quad: the timestamp quad to read from
* @tstamp_ready: contents of the Tx memory status register
@@ -2609,19 +2609,19 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
* ready to be captured from the PHY timestamp block.
*/
static int
-ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
+ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
{
u32 hi, lo;
int err;
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
quad, err);
return err;
}
- err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
quad, err);
@@ -3307,7 +3307,7 @@ void ice_ptp_init_phy_model(struct ice_hw *hw)
if (ice_is_e810(hw))
hw->phy_model = ICE_PHY_E810;
else
- hw->phy_model = ICE_PHY_E822;
+ hw->phy_model = ICE_PHY_E82X;
}
/**
@@ -3332,8 +3332,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
case ICE_PHY_E810:
err = ice_ptp_port_cmd_e810(hw, cmd);
break;
- case ICE_PHY_E822:
- err = ice_ptp_port_cmd_e822(hw, cmd);
+ case ICE_PHY_E82X:
+ err = ice_ptp_port_cmd_e82x(hw, cmd);
break;
default:
err = -EOPNOTSUPP;
@@ -3384,8 +3384,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
default:
err = -EOPNOTSUPP;
@@ -3426,8 +3426,8 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
default:
err = -EOPNOTSUPP;
@@ -3492,8 +3492,8 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E822:
- err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ case ICE_PHY_E82X:
+ err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
default:
err = -EOPNOTSUPP;
@@ -3521,8 +3521,8 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E822:
- return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ case ICE_PHY_E82X:
+ return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -3549,8 +3549,8 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E822:
- return ice_clear_phy_tstamp_e822(hw, block, idx);
+ case ICE_PHY_E82X:
+ return ice_clear_phy_tstamp_e82x(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -3608,8 +3608,8 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
switch (hw->phy_model) {
- case ICE_PHY_E822:
- ice_ptp_reset_ts_memory_e822(hw);
+ case ICE_PHY_E82X:
+ ice_ptp_reset_ts_memory_e82x(hw);
break;
case ICE_PHY_E810:
default:
@@ -3636,8 +3636,8 @@ int ice_ptp_init_phc(struct ice_hw *hw)
switch (hw->phy_model) {
case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E822:
- return ice_ptp_init_phc_e822(hw);
+ case ICE_PHY_E82X:
+ return ice_ptp_init_phc_e82x(hw);
default:
return -EOPNOTSUPP;
}
@@ -3660,8 +3660,8 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E822:
- return ice_get_phy_tx_tstamp_ready_e822(hw, block,
+ case ICE_PHY_E82X:
+ return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
break;
default:
@@ -3942,7 +3942,7 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_SGMII:
- *pin_num = ICE_E822_RCLK_PINS_NUM;
+ *pin_num = ICE_E82X_RCLK_PINS_NUM;
ret = 0;
if (hw->cgu_part_number ==
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index cf7670156..1f3e03124 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -42,7 +42,7 @@ enum ice_ptp_fec_mode {
};
/**
- * struct ice_time_ref_info_e822
+ * struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
* @pps_delay: propagation delay of the PPS output signal
@@ -50,14 +50,14 @@ enum ice_ptp_fec_mode {
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
*/
-struct ice_time_ref_info_e822 {
+struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
u8 pps_delay;
};
/**
- * struct ice_vernier_info_e822
+ * struct ice_vernier_info_e82x
* @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS
* @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS
* @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS
@@ -80,7 +80,7 @@ struct ice_time_ref_info_e822 {
* different link speeds, either the deskew marker for multi-lane link speeds
* or the Reed Solomon gearbox marker for RS-FEC.
*/
-struct ice_vernier_info_e822 {
+struct ice_vernier_info_e82x {
u32 tx_par_clk;
u32 rx_par_clk;
u32 tx_pcs_clk;
@@ -95,7 +95,7 @@ struct ice_vernier_info_e822 {
};
/**
- * struct ice_cgu_pll_params_e822
+ * struct ice_cgu_pll_params_e82x
* @refclk_pre_div: Reference clock pre-divisor
* @feedback_div: Feedback divisor
* @frac_n_div: Fractional divisor
@@ -104,7 +104,7 @@ struct ice_vernier_info_e822 {
* Clock Generation Unit parameters used to program the PLL based on the
* selected TIME_REF frequency.
*/
-struct ice_cgu_pll_params_e822 {
+struct ice_cgu_pll_params_e82x {
u32 refclk_pre_div;
u32 feedback_div;
u32 frac_n_div;
@@ -124,7 +124,7 @@ enum ice_phy_rclk_pins {
};
#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1)
-#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
+#define ICE_E82X_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \
(_pin) + ZL_REF1P)
@@ -183,16 +183,16 @@ struct ice_cgu_pin_desc {
};
extern const struct
-ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
/* Table of constants for Vernier calibration on E822 */
-extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
+extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
@@ -215,23 +215,23 @@ int ice_ptp_init_phc(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
/* E822 family functions */
-int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
-int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
-void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);
+int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+int ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
/**
- * ice_e822_time_ref - Get the current TIME_REF from capabilities
+ * ice_e82x_time_ref - Get the current TIME_REF from capabilities
* @hw: pointer to the HW structure
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
+static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
/**
- * ice_set_e822_time_ref - Set new TIME_REF
+ * ice_set_e82x_time_ref - Set new TIME_REF
* @hw: pointer to the HW structure
* @time_ref: new TIME_REF to set
*
@@ -239,31 +239,31 @@ static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
{
return e822_time_ref[time_ref].pps_delay;
}
/* E822 Vernier calibration functions */
-int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset);
-int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port);
-int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port);
+int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
@@ -509,6 +509,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define TS_LL_READ_RETRIES 200
#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
#define TS_LL_READ_TS_IDX GENMASK(29, 24)
+#define TS_LL_READ_TS_INTR BIT(30)
#define TS_LL_READ_TS BIT(31)
/* Internal PHY timestamp address */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index c686ac093..5f30fb131 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -14,7 +14,7 @@
*/
static int ice_repr_get_sw_port_id(struct ice_repr *repr)
{
- return repr->vf->pf->hw.port_info->lport;
+ return repr->src_vsi->back->hw.port_info->lport;
}
/**
@@ -35,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
return -EOPNOTSUPP;
res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
- repr->vf->vf_id);
+ repr->id);
if (res <= 0)
return -EOPNOTSUPP;
return 0;
@@ -278,25 +278,67 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
+static void ice_repr_remove_node(struct devlink_port *devlink_port)
+{
+ devl_lock(devlink_port->devlink);
+ devl_rate_leaf_destroy(devlink_port);
+ devl_unlock(devlink_port->devlink);
+}
+
/**
- * ice_repr_add - add representor for VF
- * @vf: pointer to VF structure
+ * ice_repr_rem - remove representor from VF
+ * @repr: pointer to representor structure
*/
-static int ice_repr_add(struct ice_vf *vf)
+static void ice_repr_rem(struct ice_repr *repr)
+{
+ kfree(repr->q_vector);
+ free_netdev(repr->netdev);
+ kfree(repr);
+}
+
+/**
+ * ice_repr_rem_vf - remove representor from VF
+ * @repr: pointer to representor structure
+ */
+void ice_repr_rem_vf(struct ice_repr *repr)
+{
+ ice_repr_remove_node(&repr->vf->devlink_port);
+ unregister_netdev(repr->netdev);
+ ice_devlink_destroy_vf_port(repr->vf);
+ ice_virtchnl_set_dflt_ops(repr->vf);
+ ice_repr_rem(repr);
+}
+
+static void ice_repr_set_tx_topology(struct ice_pf *pf)
+{
+ struct devlink *devlink;
+
+ /* only export if ADQ and DCB disabled and eswitch enabled*/
+ if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
+ !ice_is_switchdev_running(pf))
+ return;
+
+ devlink = priv_to_devlink(pf);
+ ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
+}
+
+/**
+ * ice_repr_add - add representor for generic VSI
+ * @pf: pointer to PF structure
+ * @src_vsi: pointer to VSI structure of device to represent
+ * @parent_mac: device MAC address
+ */
+static struct ice_repr *
+ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
{
struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
- struct ice_vsi *vsi;
int err;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
@@ -304,9 +346,7 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc;
}
- repr->src_vsi = vsi;
- repr->vf = vf;
- vf->repr = repr;
+ repr->src_vsi = src_vsi;
np = netdev_priv(repr->netdev);
np->repr = repr;
@@ -316,10 +356,40 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc_q_vector;
}
repr->q_vector = q_vector;
+ repr->q_id = repr->id;
+
+ ether_addr_copy(repr->parent_mac, parent_mac);
+
+ return repr;
+
+err_alloc_q_vector:
+ free_netdev(repr->netdev);
+err_alloc:
+ kfree(repr);
+ return ERR_PTR(err);
+}
+
+struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
+{
+ struct ice_repr *repr;
+ struct ice_vsi *vsi;
+ int err;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return ERR_PTR(-ENOENT);
err = ice_devlink_create_vf_port(vf);
if (err)
- goto err_devlink;
+ return ERR_PTR(err);
+
+ repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
+ if (IS_ERR(repr)) {
+ err = PTR_ERR(repr);
+ goto err_repr_add;
+ }
+
+ repr->vf = vf;
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
@@ -331,100 +401,23 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_netdev;
ice_virtchnl_set_repr_ops(vf);
+ ice_repr_set_tx_topology(vf->pf);
- return 0;
+ return repr;
err_netdev:
+ ice_repr_rem(repr);
+err_repr_add:
ice_devlink_destroy_vf_port(vf);
-err_devlink:
- kfree(repr->q_vector);
- vf->repr->q_vector = NULL;
-err_alloc_q_vector:
- free_netdev(repr->netdev);
- repr->netdev = NULL;
-err_alloc:
- kfree(repr);
- vf->repr = NULL;
- return err;
-}
-
-/**
- * ice_repr_rem - remove representor from VF
- * @vf: pointer to VF structure
- */
-static void ice_repr_rem(struct ice_vf *vf)
-{
- if (!vf->repr)
- return;
-
- kfree(vf->repr->q_vector);
- vf->repr->q_vector = NULL;
- unregister_netdev(vf->repr->netdev);
- ice_devlink_destroy_vf_port(vf);
- free_netdev(vf->repr->netdev);
- vf->repr->netdev = NULL;
- kfree(vf->repr);
- vf->repr = NULL;
-
- ice_virtchnl_set_dflt_ops(vf);
-}
-
-/**
- * ice_repr_rem_from_all_vfs - remove port representor for all VFs
- * @pf: pointer to PF structure
- */
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
-{
- struct devlink *devlink;
- struct ice_vf *vf;
- unsigned int bkt;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf)
- ice_repr_rem(vf);
-
- /* since all port representors are destroyed, there is
- * no point in keeping the nodes
- */
- devlink = priv_to_devlink(pf);
- devl_lock(devlink);
- devl_rate_nodes_destroy(devlink);
- devl_unlock(devlink);
+ return ERR_PTR(err);
}
-/**
- * ice_repr_add_for_all_vfs - add port representor for all VFs
- * @pf: pointer to PF structure
- */
-int ice_repr_add_for_all_vfs(struct ice_pf *pf)
+struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
{
- struct devlink *devlink;
- struct ice_vf *vf;
- unsigned int bkt;
- int err;
-
- lockdep_assert_held(&pf->vfs.table_lock);
-
- ice_for_each_vf(pf, bkt, vf) {
- err = ice_repr_add(vf);
- if (err)
- goto err;
- }
-
- /* only export if ADQ and DCB disabled */
- if (ice_is_adq_active(pf) || ice_is_dcb_active(pf))
- return 0;
-
- devlink = priv_to_devlink(pf);
- ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
-
- return 0;
-
-err:
- ice_repr_rem_from_all_vfs(pf);
+ if (!vsi->vf)
+ return NULL;
- return err;
+ return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index e1ee2d2c1..f9aede315 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -13,14 +13,17 @@ struct ice_repr {
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
+ int q_id;
+ u32 id;
+ u8 parent_mac[ETH_ALEN];
#ifdef CONFIG_ICE_SWITCHDEV
/* info about slow path rule */
struct ice_rule_query_data sp_rule;
#endif
};
-int ice_repr_add_for_all_vfs(struct ice_pf *pf);
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
+struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
+void ice_repr_rem_vf(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
@@ -29,4 +32,6 @@ void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
+
+struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2f4a62125..d174a4eeb 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1387,8 +1387,7 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
u32 val, clk_src;
val = rd32(hw, GLGEN_CLKSTAT_SRC);
- clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
- GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
+ clk_src = FIELD_GET(GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M, val);
#define PSM_CLK_SRC_367_MHZ 0x0
#define PSM_CLK_SRC_416_MHZ 0x1
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index cd6192870..b0f78c2f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -106,10 +106,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
for (v = first; v <= last; v++) {
u32 reg;
- reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
- GLINT_VECT2FUNC_IS_PF_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
+ reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) |
+ FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
@@ -172,13 +170,14 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
- mutex_lock(&vfs->table_lock);
+ ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
- ice_eswitch_release(pf);
+ mutex_lock(&vfs->table_lock);
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
+ ice_eswitch_detach(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
@@ -274,24 +273,20 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
(device_based_first_msix + vf->num_msix) - 1;
device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
- VPINT_ALLOC_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
- VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
+ reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) |
+ FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) |
+ VPINT_ALLOC_VALID_M;
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
- & VPINT_ALLOC_PCI_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
- VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
+ reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) |
+ FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) |
+ VPINT_ALLOC_PCI_VALID_M;
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
- reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
- GLINT_VECT2FUNC_VF_NUM_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
+ reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) |
+ FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
@@ -324,10 +319,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
*/
- reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
- VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
- VPLAN_TX_QBASE_VFNUMQ_M));
+ reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) |
+ FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1);
wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
} else {
dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
@@ -342,10 +335,8 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
* VFNUMQ value should be set to (number of queues - 1). A value
* of 0 means 1 queue and a value of 255 means 256 queues
*/
- reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
- VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
- VPLAN_RX_QBASE_VFNUMQ_M));
+ reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) |
+ FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1);
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
} else {
dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
@@ -609,6 +600,14 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
+ retval = ice_eswitch_attach(pf, vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
+ vf->vf_id, retval);
+ ice_vf_vsi_release(vf);
+ goto teardown;
+ }
+
set_bit(ICE_VF_STATE_INIT, vf->vf_states);
ice_ena_vf_mappings(vf);
wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
@@ -899,6 +898,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
goto err_unroll_sriov;
}
+ ice_eswitch_reserve_cp_queues(pf, num_vfs);
ret = ice_start_vfs(pf);
if (ret) {
dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
@@ -908,12 +908,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
clear_bit(ICE_VF_DIS, pf->state);
- ret = ice_eswitch_configure(pf);
- if (ret) {
- dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
- goto err_unroll_sriov;
- }
-
/* rearm global interrupts */
if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -1311,8 +1305,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
/* event returns device global Rx queue number */
- queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
- GLDCB_RTCTQ_RXQNUM_S;
+ queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq);
vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
if (!vf)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index ee19f3aa3..ba0ef91e4 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2025,12 +2025,12 @@ error_out:
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2042,7 +2042,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
/* Set the recipe ID bit in the bitmask to let the device know which
* profile we are associating the recipe to
*/
- memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+ cmd->recipe_assoc = cpu_to_le64(r_assoc);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -2051,12 +2051,12 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* ice_aq_get_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
- * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @r_assoc: Recipe bitmap filled in and need to be returned as response
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
@@ -2069,7 +2069,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (!status)
- memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+ *r_assoc = le64_to_cpu(cmd->recipe_assoc);
return status;
}
@@ -2108,6 +2108,7 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
static void ice_get_recp_to_prof_map(struct ice_hw *hw)
{
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 i;
for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
@@ -2115,8 +2116,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
- if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))
continue;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_copy(profile_to_recipe[i], r_bitmap,
ICE_MAX_NUM_RECIPES);
for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
@@ -2492,25 +2494,24 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
switch (f_info->fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
- ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ f_info->fwd_id.hw_vsi_id);
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_VSI_LIST:
act |= ICE_SINGLE_ACT_VSI_LIST;
- act |= (f_info->fwd_id.vsi_list_id <<
- ICE_SINGLE_ACT_VSI_LIST_ID_S) &
- ICE_SINGLE_ACT_VSI_LIST_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_LIST_ID_M,
+ f_info->fwd_id.vsi_list_id);
if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
act |= ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_Q:
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ f_info->fwd_id.q_id);
break;
case ICE_DROP_PACKET:
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
@@ -2520,10 +2521,9 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
q_rgn = f_info->qgrp_size > 0 ?
(u8)ilog2(f_info->qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
- act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
- ICE_SINGLE_ACT_Q_REGION_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ f_info->fwd_id.q_id);
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn);
break;
default:
return;
@@ -2649,7 +2649,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
+ act |= FIELD_PREP(ICE_LG_ACT_VSI_LIST_ID_M, id);
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->act[0] = cpu_to_le32(act);
@@ -2657,16 +2657,15 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Second action descriptor type */
act = ICE_LG_ACT_GENERIC;
- act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+ act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, 1);
lg_act->act[1] = cpu_to_le32(act);
- act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
- ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
+ act = FIELD_PREP(ICE_LG_ACT_GENERIC_OFFSET_M,
+ ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX);
/* Third action Marker value */
act |= ICE_LG_ACT_GENERIC;
- act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
- ICE_LG_ACT_GENERIC_VALUE_M;
+ act |= FIELD_PREP(ICE_LG_ACT_GENERIC_VALUE_M, sw_marker);
lg_act->act[2] = cpu_to_le32(act);
@@ -2675,9 +2674,9 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_aqc_opc_update_sw_rules);
/* Update the action to point to the large action ID */
- rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
- ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
- ICE_SINGLE_ACT_PTR_VAL_M));
+ act = ICE_SINGLE_ACT_PTR;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_PTR_VAL_M, l_id);
+ rx_tx->act = cpu_to_le32(act);
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
@@ -4426,8 +4425,8 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
int status;
buf->num_elems = cpu_to_le16(num_items);
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) |
+ alloc_shared);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
@@ -4454,8 +4453,8 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
int status;
buf->num_elems = cpu_to_le16(num_items);
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->res_type = cpu_to_le16(FIELD_PREP(ICE_AQC_RES_TYPE_M, type) |
+ alloc_shared);
buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
@@ -4481,18 +4480,15 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
+ u16 res_type;
int status;
buf->num_elems = cpu_to_le16(1);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, type);
if (shared)
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) |
- ICE_AQC_RES_TYPE_FLAG_SHARED);
- else
- buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
- ICE_AQC_RES_TYPE_M) &
- ~ICE_AQC_RES_TYPE_FLAG_SHARED);
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
+ buf->res_type = cpu_to_le16(res_type);
buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
status = ice_aq_alloc_free_res(hw, buf, buf_len,
ice_aqc_opc_share_res);
@@ -5024,8 +5020,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
entry->chain_idx = chain_idx;
content->result_indx =
ICE_AQ_RECIPE_RESULT_EN |
- ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
- ICE_AQ_RECIPE_RESULT_DATA_M);
+ FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
+ chain_idx);
clear_bit(chain_idx, result_idx_bm);
chain_idx = find_first_bit(result_idx_bm,
ICE_MAX_FV_WORDS);
@@ -5396,22 +5392,24 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
list_for_each_entry(fvit, &rm->fv_list, list_entry) {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u64 recp_assoc;
u16 j;
status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap, NULL);
+ &recp_assoc, NULL);
if (status)
goto err_unroll;
+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
ICE_MAX_NUM_RECIPES);
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
goto err_unroll;
+ bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
- (u8 *)r_bitmap,
- NULL);
+ recp_assoc, NULL);
ice_release_change_lock(hw);
if (status)
@@ -6065,6 +6063,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET ||
rinfo->sw_act.fltr_act == ICE_NOP)) {
status = -EIO;
goto free_pkt_profile;
@@ -6077,9 +6076,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
- rinfo->sw_act.fltr_act == ICE_NOP)
+ rinfo->sw_act.fltr_act == ICE_MIRROR_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_NOP) {
rinfo->sw_act.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
+ }
if (rinfo->src_vsi)
rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
@@ -6115,38 +6116,45 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = -ENOMEM;
goto free_pkt_profile;
}
- if (!rinfo->flags_info.act_valid) {
- act |= ICE_SINGLE_ACT_LAN_ENABLE;
- act |= ICE_SINGLE_ACT_LB_ENABLE;
- } else {
- act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
- ICE_SINGLE_ACT_LB_ENABLE);
+
+ if (rinfo->sw_act.fltr_act != ICE_MIRROR_PACKET) {
+ if (!rinfo->flags_info.act_valid) {
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ } else {
+ act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_LB_ENABLE);
+ }
}
switch (rinfo->sw_act.fltr_act) {
case ICE_FWD_TO_VSI:
- act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
- ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
break;
case ICE_FWD_TO_Q:
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ rinfo->sw_act.fwd_id.q_id);
break;
case ICE_FWD_TO_QGRP:
q_rgn = rinfo->sw_act.qgrp_size > 0 ?
(u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
act |= ICE_SINGLE_ACT_TO_Q;
- act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
- ICE_SINGLE_ACT_Q_INDEX_M;
- act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
- ICE_SINGLE_ACT_Q_REGION_M;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_INDEX_M,
+ rinfo->sw_act.fwd_id.q_id);
+ act |= FIELD_PREP(ICE_SINGLE_ACT_Q_REGION_M, q_rgn);
break;
case ICE_DROP_PACKET:
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
ICE_SINGLE_ACT_VALID_BIT;
break;
+ case ICE_MIRROR_PACKET:
+ act |= ICE_SINGLE_ACT_OTHER_ACTS;
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
+ break;
case ICE_NOP:
act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
rinfo->sw_act.fwd_id.hw_vsi_id);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index db7e501b7..89ffa1b51 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -424,10 +424,10 @@ int ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd);
int
-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd);
int
-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index dd03cb69a..688ccb061 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -28,6 +28,8 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
* - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
* - Tunnel flag (present if tunnel)
*/
+ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+ lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -363,6 +365,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
/* Always add direction metadata */
ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ ice_rule_add_src_vsi_metadata(&list[i]);
+ i++;
+ }
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
@@ -653,7 +660,7 @@ static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
ice_tc_is_dev_uplink(target_dev)) {
repr = ice_netdev_to_repr(filter_dev);
- fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
+ fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
@@ -689,6 +696,41 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
return 0;
}
+static int ice_tc_setup_mirror_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct net_device *target_dev)
+{
+ struct ice_repr *repr;
+
+ fltr->action.fltr_act = ICE_MIRROR_PACKET;
+
+ if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_tc_is_dev_uplink(target_dev)) {
+ repr = ice_netdev_to_repr(filter_dev);
+
+ fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_tc_is_dev_uplink(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr,
struct flow_action_entry *act)
@@ -710,6 +752,12 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
break;
+ case FLOW_ACTION_MIRRED:
+ err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev);
+ if (err)
+ return err;
+ break;
+
default:
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
return -EINVAL;
@@ -731,7 +779,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
int ret;
int i;
- if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
@@ -765,7 +813,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
- fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
+ fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
@@ -779,6 +827,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
+ rule_info.src_vsi = vsi->idx;
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
@@ -1440,7 +1489,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9170a3e8f..97d41d6eb 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -552,13 +552,14 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
* @rx_buf: Rx buffer to store the XDP action
+ * @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf)
+ struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -566,6 +567,8 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (!xdp_prog)
goto exit;
+ ice_xdp_meta_set_desc(xdp, eop_desc);
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
@@ -1176,8 +1179,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, ntc);
@@ -1237,7 +1239,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
+ ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
if (rx_buf->act == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
@@ -1275,7 +1277,7 @@ construct_skb:
continue;
}
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
+ vlan_tci = ice_get_vlan_tci(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
if (eth_skb_pad(skb))
@@ -1285,14 +1287,11 @@ construct_skb:
total_rx_bytes += skb->len;
/* populate checksum, VLAN, and protocol */
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
-
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
- ice_receive_skb(rx_ring, skb, vlan_tag);
+ ice_receive_skb(rx_ring, skb, vlan_tci);
/* update budget accounting */
total_rx_pkts++;
@@ -1493,9 +1492,9 @@ static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
* be static in non-adaptive mode (user configured)
*/
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
- GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
- GLINT_DYN_CTL_WB_ON_ITR_M);
+ FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) |
+ FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) |
+ FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1));
q_vector->wb_on_itr = true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b28b9826b..af955b0e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -257,6 +257,20 @@ enum ice_rx_dtype {
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
+struct ice_pkt_ctx {
+ u64 cached_phctime;
+ __be16 vlan_proto;
+};
+
+struct ice_xdp_buff {
+ struct xdp_buff xdp_buff;
+ const union ice_32b_rx_flex_desc *eop_desc;
+ const struct ice_pkt_ctx *pkt_ctx;
+};
+
+/* Required for compatibility with xdp_buffs from xsk_pool */
+static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
+
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
@@ -298,7 +312,6 @@ enum ice_dynamic_itr {
/* descriptor ring, associated with a VSI */
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -310,13 +323,24 @@ struct ice_rx_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 next_to_alloc;
- /* CL2 - 2nd cacheline starts here */
+
union {
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
- struct xdp_buff xdp;
+ /* CL2 - 2nd cacheline starts here */
+ union {
+ struct ice_xdp_buff xdp_ext;
+ struct xdp_buff xdp;
+ };
/* CL3 - 3rd cacheline starts here */
+ union {
+ struct ice_pkt_ctx pkt_ctx;
+ struct {
+ u64 cached_phctime;
+ __be16 vlan_proto;
+ };
+ };
struct bpf_prog *xdp_prog;
u16 rx_offset;
@@ -332,10 +356,10 @@ struct ice_rx_ring {
/* CL4 - 4th cacheline starts here */
struct ice_channel *ch;
struct ice_tx_ring *xdp_ring;
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
u32 nr_frags;
dma_addr_t dma; /* physical address of ring */
- u64 cached_phctime;
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 7e06373e1..839e5da24 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -63,28 +63,42 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
}
/**
- * ice_rx_hash - set the hash value in the skb
+ * ice_get_rx_hash - get RX hash value from descriptor
+ * @rx_desc: specific descriptor
+ *
+ * Returns hash, if present, 0 otherwise.
+ */
+static u32 ice_get_rx_hash(const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *nic_mdid;
+
+ if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC))
+ return 0;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ return le32_to_cpu(nic_mdid->rss_hash);
+}
+
+/**
+ * ice_rx_hash_to_skb - set the hash value in the skb
* @rx_ring: descriptor ring
* @rx_desc: specific descriptor
* @skb: pointer to current skb
* @rx_ptype: the ptype value from the descriptor
*/
static void
-ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 rx_ptype)
+ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u16 rx_ptype)
{
- struct ice_32b_rx_flex_desc_nic *nic_mdid;
u32 hash;
if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
return;
- if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
- return;
-
- nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
- hash = le32_to_cpu(nic_mdid->rss_hash);
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+ hash = ice_get_rx_hash(rx_desc);
+ if (likely(hash))
+ skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
}
/**
@@ -171,11 +185,38 @@ checksum_fail:
}
/**
+ * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb
+ * @rx_ring: Ring to get the VSI info
+ * @rx_desc: Receive descriptor
+ * @skb: Particular skb to send timestamp with
+ *
+ * The timestamp is in ns, so we must convert the result first.
+ */
+static void
+ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ts_ns);
+}
+
+/**
+ * ice_get_ptype - Read HW packet type from the descriptor
+ * @rx_desc: RX descriptor
+ */
+static u16 ice_get_ptype(const union ice_32b_rx_flex_desc *rx_desc)
+{
+ return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+}
+
+/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @ptype: the packet type decoded by hardware
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
@@ -184,9 +225,11 @@ checksum_fail:
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 ptype)
+ struct sk_buff *skb)
{
- ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+ u16 ptype = ice_get_ptype(rx_desc);
+
+ ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -194,28 +237,24 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
if (rx_ring->ptp_rx)
- ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+ ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb);
}
/**
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: Rx ring in play
* @skb: packet to send up
- * @vlan_tag: VLAN tag for packet
+ * @vlan_tci: VLAN TCI for packet
*
* This function sends the completed packet (via. skb) up the stack using
* gro receive functions (with/without VLAN tag)
*/
void
-ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci)
{
- netdev_features_t features = rx_ring->netdev->features;
- bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
+ if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto)
+ __vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto,
+ vlan_tci);
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -464,3 +503,125 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
spin_unlock(&xdp_ring->tx_lock);
}
}
+
+/**
+ * ice_xdp_rx_hw_ts - HW timestamp XDP hint handler
+ * @ctx: XDP buff pointer
+ * @ts_ns: destination address
+ *
+ * Copy HW timestamp (if available) to the destination address.
+ */
+static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
+ xdp_ext->pkt_ctx);
+ if (!*ts_ns)
+ return -ENODATA;
+
+ return 0;
+}
+
+/* Define a ptype index -> XDP hash type lookup table.
+ * It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
+ * avoiding possible copy-paste errors.
+ */
+#undef ICE_PTT
+#undef ICE_PTT_UNUSED_ENTRY
+
+#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
+
+#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
+
+/* A few supplementary definitions for when XDP hash types do not coincide
+ * with what can be generated from ptype definitions
+ * by means of preprocessor concatenation.
+ */
+#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2
+#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4
+
+static const enum xdp_rss_hash_type
+ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
+ ICE_PTYPES
+};
+
+#undef XDP_RSS_L3_NONE
+#undef XDP_RSS_L4_NONE
+#undef XDP_RSS_TYPE_PAY2
+#undef XDP_RSS_TYPE_PAY3
+#undef XDP_RSS_TYPE_PAY4
+
+#undef ICE_PTT
+#undef ICE_PTT_UNUSED_ENTRY
+
+/**
+ * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
+ * @eop_desc: End of Packet descriptor
+ */
+static enum xdp_rss_hash_type
+ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
+{
+ u16 ptype = ice_get_ptype(eop_desc);
+
+ if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
+ return 0;
+
+ return ice_ptype_to_xdp_hash[ptype];
+}
+
+/**
+ * ice_xdp_rx_hash - RX hash XDP hint handler
+ * @ctx: XDP buff pointer
+ * @hash: hash destination address
+ * @rss_type: XDP hash type destination address
+ *
+ * Copy RX hash (if available) and its type to the destination address.
+ */
+static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *hash = ice_get_rx_hash(xdp_ext->eop_desc);
+ *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
+ if (!likely(*hash))
+ return -ENODATA;
+
+ return 0;
+}
+
+/**
+ * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler
+ * @ctx: XDP buff pointer
+ * @vlan_proto: destination address for VLAN protocol
+ * @vlan_tci: destination address for VLAN TCI
+ *
+ * Copy VLAN tag (if was stripped) and corresponding protocol
+ * to the destination address.
+ */
+static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci)
+{
+ const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+
+ *vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
+ if (!*vlan_proto)
+ return -ENODATA;
+
+ *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
+ if (!*vlan_tci)
+ return -ENODATA;
+
+ return 0;
+}
+
+const struct xdp_metadata_ops ice_xdp_md_ops = {
+ .xmo_rx_timestamp = ice_xdp_rx_hw_ts,
+ .xmo_rx_hash = ice_xdp_rx_hash,
+ .xmo_rx_vlan_tag = ice_xdp_rx_vlan_tag,
+};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index b0e56675f..afcead4ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -97,7 +97,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
- * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
+ * ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
* The OS and current PF implementation only support stripping a single VLAN tag
@@ -105,7 +105,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
* one is found return the tag, else return 0 to mean no VLAN tag was found.
*/
static inline u16
-ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
+ice_get_vlan_tci(const union ice_32b_rx_flex_desc *rx_desc)
{
u16 stat_err_bits;
@@ -161,7 +161,17 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u16 ptype);
+ struct sk_buff *skb);
void
-ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
+
+static inline void
+ice_xdp_meta_set_desc(struct xdp_buff *xdp,
+ union ice_32b_rx_flex_desc *eop_desc)
+{
+ struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
+ xdp_buff);
+
+ xdp_ext->eop_desc = eop_desc;
+}
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a18ca0ff8..a508e917c 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,6 +17,7 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
+#include "ice_fwlog.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -246,6 +247,7 @@ struct ice_fd_hw_prof {
int cnt;
u64 entry_h[ICE_MAX_FDIR_VSI_PER_FILTER][ICE_FD_HW_SEG_MAX];
u16 vsi_h[ICE_MAX_FDIR_VSI_PER_FILTER];
+ u64 prof_id[ICE_FD_HW_SEG_MAX];
};
/* Common HW capabilities for SW use */
@@ -351,6 +353,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
+#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
struct ice_ts_dev_info {
/* Device specific info */
@@ -364,6 +367,7 @@ struct ice_ts_dev_info {
u8 tmr0_ena;
u8 tmr1_ena;
u8 ts_ll_read;
+ u8 ts_ll_int_read;
};
/* Function specific capabilities */
@@ -377,6 +381,8 @@ struct ice_hw_func_caps {
struct ice_ts_func_info ts_func_info;
};
+#define ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT 0
+
/* Device wide capabilities */
struct ice_hw_dev_caps {
struct ice_hw_common_caps common_cap;
@@ -385,6 +391,11 @@ struct ice_hw_dev_caps {
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
+ /* bitmap of supported sensors
+ * bit 0 - internal temperature sensor
+ * bit 31:1 - Reserved
+ */
+ u32 supported_sensors;
};
/* MAC info */
@@ -730,24 +741,6 @@ struct ice_switch_info {
DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
-/* FW logging configuration */
-struct ice_fw_log_evnt {
- u8 cfg : 4; /* New event enables to configure */
- u8 cur : 4; /* Current/active event enables */
-};
-
-struct ice_fw_log_cfg {
- u8 cq_en : 1; /* FW logging is enabled via the control queue */
- u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */
- u8 actv_evnts; /* Cumulation of currently enabled log events */
-
-#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S)
-#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S)
- struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX];
-};
-
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
@@ -827,7 +820,7 @@ struct ice_mbx_data {
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
ICE_PHY_E810 = 1,
- ICE_PHY_E822,
+ ICE_PHY_E82X,
};
/* Port hardware description */
@@ -889,7 +882,9 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fw_log_cfg fw_log;
+ struct ice_fwlog_cfg fwlog_cfg;
+ bool fwlog_supported; /* does hardware support FW logging? */
+ struct ice_fwlog_ring fwlog_ring;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
@@ -910,10 +905,9 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_PHY_PER_NAC_E822 1
#define ICE_MAX_QUAD 2
-#define ICE_QUADS_PER_PHY_E822 2
-#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_QUADS_PER_PHY_E82X 2
+#define ICE_PORTS_PER_PHY_E82X 8
#define ICE_PORTS_PER_QUAD 4
#define ICE_PORTS_PER_PHY_E810 4
#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
@@ -1009,7 +1003,6 @@ struct ice_hw_port_stats {
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
- u64 rx_len_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
@@ -1048,6 +1041,7 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
+ ICE_MIRROR_PACKET,
ICE_NOP,
ICE_INVAL_ACT
};
@@ -1078,7 +1072,7 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_OROM_VER_BUILD_SHIFT 8
#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
#define ICE_OROM_VER_SHIFT 24
-#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
+#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 88e3cd09f..15ade19de 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -775,6 +775,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
+ ice_eswitch_detach(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -790,13 +791,11 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
+ ice_eswitch_attach(pf, vf);
+
mutex_unlock(&vf->cfg_lock);
}
- if (ice_is_eswitch_mode_switchdev(pf))
- if (ice_eswitch_rebuild(pf))
- dev_warn(dev, "eswitch rebuild failed\n");
-
ice_flush(hw);
clear_bit(ICE_VF_DIS, pf->state);
@@ -864,6 +863,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
if (lag && lag->bonded && lag->primary) {
@@ -875,11 +879,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
act_prt = ICE_LAG_INVALID_PORT;
}
- if (flags & ICE_VF_RESET_LOCK)
- mutex_lock(&vf->cfg_lock);
- else
- lockdep_assert_held(&vf->cfg_lock);
-
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
@@ -958,20 +957,20 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
- ice_eswitch_update_repr(vsi);
+ ice_eswitch_update_repr(vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
out_unlock:
- if (flags & ICE_VF_RESET_LOCK)
- mutex_unlock(&vf->cfg_lock);
-
if (lag && lag->bonded && lag->primary &&
act_prt != ICE_LAG_INVALID_PORT)
ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
mutex_unlock(&pf->lag_mutex);
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 6b41e0f3d..0cc903406 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -129,7 +129,7 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
- struct ice_repr *repr;
+ unsigned long repr_id;
const struct ice_virtchnl_ops *virtchnl_ops;
const struct ice_vf_ops *vf_ops;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index 80dc4bcdd..b3e1bdcb8 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -26,24 +26,22 @@ static void ice_port_vlan_on(struct ice_vsi *vsi)
struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vsi->back;
- if (ice_is_dvm_ena(&pf->hw)) {
- vlan_ops = &vsi->outer_vlan_ops;
-
- /* setup outer VLAN ops */
- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
- vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
+ if (ice_is_dvm_ena(&pf->hw)) {
vlan_ops->add_vlan = noop_vlan_arg;
vlan_ops->del_vlan = noop_vlan_arg;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- } else {
- vlan_ops = &vsi->inner_vlan_ops;
+ /* setup outer VLAN ops */
+ vlan_ops = &vsi->outer_vlan_ops;
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+ } else {
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index d6348f208..7b550d7d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -499,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
@@ -545,12 +545,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
*/
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi(pf, vsi_id);
-
- return (vsi && (vsi->vf == vf));
+ return vsi_id == ICE_VF_VSI_ID;
}
/**
@@ -682,9 +677,7 @@ out:
* a specific virtchnl RSS cfg
* @hw: pointer to the hardware
* @rss_cfg: pointer to the virtchnl RSS cfg
- * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
- * to configure
- * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @hash_cfg: pointer to the HW hash configuration
*
* Return true if all the protocol header and hash fields in the RSS cfg could
* be parsed, else return false
@@ -692,13 +685,23 @@ out:
* This function parses the virtchnl RSS cfg to be the intended
* hash fields and the intended header for RSS configuration
*/
-static bool
-ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
- u32 *addl_hdrs, u64 *hash_flds)
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
{
const struct ice_vc_hash_field_match_type *hf_list;
const struct ice_vc_hdr_match_type *hdr_list;
int i, hf_list_len, hdr_list_len;
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
hf_list = ice_vc_hash_field_list;
hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
@@ -849,18 +852,24 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
kfree(ctx);
} else {
- u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- u64 hash_flds = ICE_HASH_INVALID;
+ struct ice_rss_hash_cfg cfg;
+
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
- &hash_flds)) {
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add) {
- if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs)) {
+ if (ice_add_rss_cfg(hw, vsi, &cfg)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
vsi->vsi_num, v_ret);
@@ -868,8 +877,7 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
} else {
int status;
- status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs);
+ status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
/* We just ignore -ENOENT, because if two configurations
* share the same profile remove one of them actually
* removes both, since the profile is deleted.
@@ -980,6 +988,51 @@ error_param:
}
/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2625,7 +2678,7 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
}
if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
v_ret = ice_err_to_virt_err(status);
}
@@ -3037,7 +3090,7 @@ static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
{
struct ice_vlan vlan = { 0 };
- vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan.prio = FIELD_GET(VLAN_PRIO_MASK, vc_vlan->tci);
vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
vlan.tpid = vc_vlan->tpid;
@@ -3746,6 +3799,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
+ .config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
.add_vlan_msg = ice_vc_add_vlan_msg,
@@ -3875,6 +3929,7 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
.config_rss_key = ice_vc_config_rss_key,
.config_rss_lut = ice_vc_config_rss_lut,
+ .config_rss_hfunc = ice_vc_config_rss_hfunc,
.get_stats_msg = ice_vc_get_stats_msg,
.cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
.add_vlan_msg = ice_vc_add_vlan_msg,
@@ -4057,6 +4112,9 @@ error_handler:
case VIRTCHNL_OP_CONFIG_RSS_LUT:
err = ops->config_rss_lut(vf, msg);
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ err = ops->config_rss_hfunc(vf, msg);
+ break;
case VIRTCHNL_OP_GET_STATS:
err = ops->get_stats_msg(vf, msg);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index cd747718d..3a4115869 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -19,6 +19,15 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* VFs only get a single VSI. For ice hardware, the VF does not need to know
+ * its VSI index. However, the virtchnl interface requires a VSI number,
+ * mainly due to legacy hardware.
+ *
+ * Since the VF doesn't need this information, report a static value to the VF
+ * instead of leaking any information about the PF or hardware setup.
+ */
+#define ICE_VF_VSI_ID 1
+
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
@@ -32,6 +41,7 @@ struct ice_virtchnl_ops {
int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_hfunc)(struct ice_vf *vf, u8 *msg);
int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 588b77f1a..d796dbd2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -66,6 +66,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 24b23b7ef..f001553e1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -10,19 +10,6 @@
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
-#define ICE_FLOW_PROF_TYPE_S 0
-#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
-#define ICE_FLOW_PROF_VSI_S 32
-#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
-
-/* Flow profile ID format:
- * [0:31] - flow type, flow + tun_offs
- * [32:63] - VSI index
- */
-#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
- ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
- (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
-
#define GTPU_TEID_OFFSET 4
#define GTPU_EH_QFI_OFFSET 1
#define GTPU_EH_QFI_MASK 0x3F
@@ -493,6 +480,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
return;
vf_prof = fdir->fdir_prof[flow];
+ prof_id = vf_prof->prof_id[tun];
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
@@ -503,9 +491,6 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
if (!fdir->prof_entry_cnt[flow][tun])
return;
- prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
- flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
-
for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
if (vf_prof->entry_h[i][tun]) {
u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
@@ -647,7 +632,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_hw *hw;
u64 entry1_h = 0;
u64 entry2_h = 0;
- u64 prof_id;
int ret;
pf = vf->pf;
@@ -681,18 +665,15 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
ice_vc_fdir_rem_prof(vf, flow, tun);
}
- prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
- tun ? ICE_FLTR_PTYPE_MAX : 0);
-
- ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- tun + 1, &prof);
+ ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, seg,
+ tun + 1, false, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
- ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (ret) {
@@ -701,7 +682,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
goto err_prof;
}
- ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof->id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (ret) {
@@ -725,14 +706,16 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
vf_prof->cnt++;
fdir->prof_entry_cnt[flow][tun]++;
+ vf_prof->prof_id[tun] = prof->id;
+
return 0;
err_entry_1:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
- ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
+ ice_get_hw_vsi_num(hw, vf_vsi->idx), prof->id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
- ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
+ ice_flow_rem_prof(hw, ICE_BLK_FD, prof->id);
err_exit:
return ret;
}
@@ -1480,16 +1463,15 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
int ret;
stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
- if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
- ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
+ if (FIELD_GET(ICE_FXD_FLTR_WB_QW1_DD_M, stat_err) !=
+ ICE_FXD_FLTR_WB_QW1_DD_YES) {
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
ret = -EINVAL;
goto err_exit;
}
- prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
- ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
+ prog_id = FIELD_GET(ICE_FXD_FLTR_WB_QW1_PROG_ID_M, stat_err);
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
dev_err(dev, "VF %d: Desc show add, but ctx not",
@@ -1508,8 +1490,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
goto err_exit;
}
- error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
- ICE_FXD_FLTR_WB_QW1_FAIL_S;
+ error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_M, stat_err);
if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
@@ -1524,8 +1505,7 @@ ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
goto err_exit;
}
- error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
- ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
+ error = FIELD_GET(ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M, stat_err);
if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 830790211..2e9ad27cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -487,10 +487,11 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
ctxt->info.outer_vlan_flags |=
- ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
- ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M));
+ /* we want EMODE_SHOW_BOTH, but that value is zero, so the line
+ * above clears it well enough that we don't need to try to set
+ * zero here, so just do the tag type
+ */
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -595,11 +596,9 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M |
ICE_AQ_VSI_OUTER_TAG_TYPE_M);
ctxt->info.outer_vlan_flags |=
- ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL) |
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -648,9 +647,8 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
ctxt->info.outer_vlan_flags |=
ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
- ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
- ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
@@ -708,8 +706,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
ctxt->info.outer_vlan_flags =
(ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW <<
ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
- ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
- ICE_AQ_VSI_OUTER_TAG_TYPE_M) |
+ FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, tag_type) |
ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED <<
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) |
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 0fd5551b1..2eecd0f39 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -459,6 +459,11 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
+ /* Put private info that changes on a per-packet basis
+ * into xdp_buff_xsk->cb.
+ */
+ ice_xdp_meta_set_desc(*xdp, rx_desc);
+
rx_desc++;
xdp++;
}
@@ -865,8 +870,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
- u16 vlan_tag = 0;
- u16 rx_ptype;
+ u16 vlan_tci;
rx_desc = ICE_RX_DESC(rx_ring, ntc);
@@ -943,13 +947,10 @@ construct_skb:
total_rx_bytes += skb->len;
total_rx_packets++;
- vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
-
- rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
- ICE_RX_FLEX_DESC_PTYPE_M;
+ vlan_tci = ice_get_vlan_tci(rx_desc);
- ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- ice_receive_skb(rx_ring, skb, vlan_tag);
+ ice_process_skb_fields(rx_ring, rx_desc, skb);
+ ice_receive_skb(rx_ring, skb, vlan_tci);
}
rx_ring->next_to_clean = ntc;
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index bee73353b..0acc125de 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -15,7 +15,7 @@ struct idpf_vport_max_q;
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/sctp.h>
-#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <net/gro.h>
#include <linux/dim.h>
@@ -418,11 +418,13 @@ struct idpf_vport {
/**
* enum idpf_user_flags
+ * @__IDPF_USER_FLAG_HSPLIT: header split state
* @__IDPF_PROMISC_UC: Unicast promiscuous mode
* @__IDPF_PROMISC_MC: Multicast promiscuous mode
* @__IDPF_USER_FLAGS_NBITS: Must be last
*/
enum idpf_user_flags {
+ __IDPF_USER_FLAG_HSPLIT = 0U,
__IDPF_PROMISC_UC = 32,
__IDPF_PROMISC_MC,
@@ -965,4 +967,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
+u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
+bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val);
+
#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 52ea38669..986d429d1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -75,14 +75,12 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
/**
* idpf_get_rxfh - get the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function in use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
*
* Reads the indirection table directly from the hardware. Always returns 0.
*/
-static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int idpf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
@@ -103,15 +101,14 @@ static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (key)
- memcpy(key, rss_data->rss_key, rss_data->rss_key_size);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < rss_data->rss_lut_size; i++)
- indir[i] = rss_data->rss_lut[i];
+ rxfh->indir[i] = rss_data->rss_lut[i];
}
unlock_mutex:
@@ -123,15 +120,15 @@ unlock_mutex:
/**
* idpf_set_rxfh - set the rx flow hash indirection table
* @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function to use
+ * @rxfh: pointer to param struct (indir, key, hfunc)
+ * @extack: extended ACK from the Netlink message
*
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
*/
-static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int idpf_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
@@ -154,17 +151,18 @@ static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
err = -EOPNOTSUPP;
goto unlock_mutex;
}
- if (key)
- memcpy(rss_data->rss_key, key, rss_data->rss_key_size);
+ if (rxfh->key)
+ memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
- if (indir) {
+ if (rxfh->indir) {
for (lut = 0; lut < rss_data->rss_lut_size; lut++)
- rss_data->rss_lut[lut] = indir[lut];
+ rss_data->rss_lut[lut] = rxfh->indir[lut];
}
err = idpf_config_rss(vport);
@@ -320,6 +318,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_pending = vport->rxq_desc_count;
ring->tx_pending = vport->txq_desc_count;
+ kring->tcp_data_split = idpf_vport_get_hsplit(vport);
+
idpf_vport_ctrl_unlock(netdev);
}
@@ -379,6 +379,14 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_rx_count == vport->rxq_desc_count)
goto unlock_mutex;
+ if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
+ NL_SET_ERR_MSG_MOD(ext_ack,
+ "setting TCP data split is not supported");
+ err = -EOPNOTSUPP;
+
+ goto unlock_mutex;
+ }
+
config_data = &vport->adapter->vport_config[idx]->user_config;
config_data->num_req_txq_desc = new_tx_count;
config_data->num_req_rxq_desc = new_rx_count;
@@ -532,7 +540,7 @@ static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
unsigned int i;
for (i = 0; i < size; i++)
- ethtool_sprintf(p, "%s", stats[i].stat_string);
+ ethtool_puts(p, stats[i].stat_string);
}
/**
@@ -1334,6 +1342,7 @@ static int idpf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_msglevel = idpf_get_msglevel,
.set_msglevel = idpf_set_msglevel,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 0241e498c..58179bd73 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -1060,6 +1060,71 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
}
/**
+ * idpf_is_hsplit_supported - check whether the header split is supported
+ * @vport: virtual port to check the capability for
+ *
+ * Return: true if it's supported by the HW/FW, false if not.
+ */
+static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
+{
+ return idpf_is_queue_model_split(vport->rxq_model) &&
+ idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
+ IDPF_CAP_HSPLIT);
+}
+
+/**
+ * idpf_vport_get_hsplit - get the current header split feature state
+ * @vport: virtual port to query the state for
+ *
+ * Return: ``ETHTOOL_TCP_DATA_SPLIT_UNKNOWN`` if not supported,
+ * ``ETHTOOL_TCP_DATA_SPLIT_DISABLED`` if disabled,
+ * ``ETHTOOL_TCP_DATA_SPLIT_ENABLED`` if active.
+ */
+u8 idpf_vport_get_hsplit(const struct idpf_vport *vport)
+{
+ const struct idpf_vport_user_config_data *config;
+
+ if (!idpf_is_hsplit_supported(vport))
+ return ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+
+ config = &vport->adapter->vport_config[vport->idx]->user_config;
+
+ return test_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags) ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
+}
+
+/**
+ * idpf_vport_set_hsplit - enable or disable header split on a given vport
+ * @vport: virtual port to configure
+ * @val: Ethtool flag controlling the header split state
+ *
+ * Return: true on success, false if not supported by the HW.
+ */
+bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val)
+{
+ struct idpf_vport_user_config_data *config;
+
+ if (!idpf_is_hsplit_supported(vport))
+ return val == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+
+ config = &vport->adapter->vport_config[vport->idx]->user_config;
+
+ switch (val) {
+ case ETHTOOL_TCP_DATA_SPLIT_UNKNOWN:
+ /* Default is to enable */
+ case ETHTOOL_TCP_DATA_SPLIT_ENABLED:
+ __set_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
+ return true;
+ case ETHTOOL_TCP_DATA_SPLIT_DISABLED:
+ __clear_bit(__IDPF_USER_FLAG_HSPLIT, config->user_flags);
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* idpf_vport_alloc - Allocates the next available struct vport in the adapter
* @adapter: board private structure
* @max_q: vport max queue info
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 20c4b3a64..27b93592c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -328,10 +328,9 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
if (offload->tso_segs) {
qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S;
- qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) &
- IDPF_TXD_CTX_QW1_TSO_LEN_M;
- qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) &
- IDPF_TXD_CTX_QW1_MSS_M;
+ qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_TSO_LEN_M,
+ offload->tso_len);
+ qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.tx.lso_pkts);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 9e942e5ba..017a081d8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -505,9 +505,9 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) |
- (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) <<
- IDPF_RX_BI_GEN_S);
+ FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RX_BI_GEN_M,
+ test_bit(__IDPF_Q_GEN_CHK, refillq->flags));
if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
@@ -1240,12 +1240,15 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_adapter *adapter = vport->adapter;
struct idpf_queue *q;
int i, k, err = 0;
+ bool hs;
vport->rxq_grps = kcalloc(vport->num_rxq_grp,
sizeof(struct idpf_rxq_group), GFP_KERNEL);
if (!vport->rxq_grps)
return -ENOMEM;
+ hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
int j;
@@ -1298,9 +1301,8 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
- if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
- IDPF_CAP_HSPLIT) &&
- idpf_is_queue_model_split(vport->rxq_model)) {
+
+ if (hs) {
q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
}
@@ -1344,9 +1346,7 @@ skip_splitq_rx_init:
rx_qgrp->splitq.rxq_sets[j]->refillq1 =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
- if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
- IDPF_CAP_HSPLIT) &&
- idpf_is_queue_model_split(vport->rxq_model)) {
+ if (hs) {
q->rx_hsplit_en = true;
q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
}
@@ -1825,14 +1825,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
u16 gen;
/* if the descriptor isn't done, no work yet to do */
- gen = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+ gen = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_GEN_M);
if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
- rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+ rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
dev_err(&complq->vport->adapter->pdev->dev,
@@ -1842,9 +1842,8 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
tx_q = complq->txq_grp->txqs[rel_tx_qid];
/* Determine completion type */
- ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_COMPL_TYPE_M) >>
- IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+ ctype = le16_get_bits(tx_desc->qid_comptype_gen,
+ IDPF_TXD_COMPLQ_COMPL_TYPE_M);
switch (ctype) {
case IDPF_TXD_COMPLT_RE:
hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
@@ -1945,11 +1944,10 @@ void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
u16 td_cmd, u16 size)
{
desc->q.qw1.cmd_dtype =
- cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M);
+ le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
desc->q.qw1.cmd_dtype |=
- cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) &
- IDPF_FLEX_TXD_QW1_CMD_M);
- desc->q.qw1.buf_size = cpu_to_le16((u16)size);
+ le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
+ desc->q.qw1.buf_size = cpu_to_le16(size);
desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
}
@@ -2843,8 +2841,9 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n
qword1);
csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
- csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M,
- le16_to_cpu(rx_desc->ptype_err_fflags0));
+ csum->raw_csum_inv =
+ le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
}
@@ -2938,8 +2937,10 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
struct idpf_rx_ptype_decoded decoded;
u16 rx_ptype;
- rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M,
- le16_to_cpu(rx_desc->ptype_err_fflags0));
+ rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
+
+ skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
/* If we don't know the ptype we can't do anything else with it. Just
@@ -2951,10 +2952,8 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
/* process RSS/hash */
idpf_rx_hash(rxq, skb, rx_desc, &decoded);
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
- if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M,
- le16_to_cpu(rx_desc->hdrlen_flags)))
+ if (le16_get_bits(rx_desc->hdrlen_flags,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
@@ -3148,8 +3147,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
dma_rmb();
/* if the descriptor isn't done, no work yet to do */
- gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id);
+ gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
break;
@@ -3164,9 +3163,8 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
continue;
}
- pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M,
- pkt_len);
+ pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
rx_desc->status_err0_qw1);
@@ -3183,14 +3181,12 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
goto bypass_hsplit;
}
- hdr_len = le16_to_cpu(rx_desc->hdrlen_flags);
- hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M,
- hdr_len);
+ hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);
bypass_hsplit:
- bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
- bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M,
- bufq_id);
+ bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
if (!bufq_id)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index b0c52f178..390977a76 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -3287,6 +3287,8 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
+ idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
+
idpf_vport_init_num_qs(vport, vport_msg);
idpf_vport_calc_num_q_desc(vport);
idpf_vport_calc_num_q_groups(vport);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 8d6e44ee1..64dfc362d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -222,8 +222,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
}
/* set lan id */
- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
- E1000_STATUS_FUNC_SHIFT;
+ hw->bus.func = FIELD_GET(E1000_STATUS_FUNC_MASK, rd32(E1000_STATUS));
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
@@ -262,8 +261,8 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
if (ret_val)
goto out;
- data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
- E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ data = FIELD_GET(E1000_M88E1112_MAC_CTRL_1_MODE_MASK,
+ data);
if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
data == E1000_M88E1112_AUTO_COPPER_BASEX)
hw->mac.ops.check_for_link =
@@ -330,8 +329,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
u32 eecd = rd32(E1000_EECD);
u16 size;
- size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ size = FIELD_GET(E1000_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -2798,7 +2796,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg)
!= NVM_ETS_TYPE_EMC)
return E1000_NOT_IMPLEMENTED;
@@ -2808,10 +2806,8 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
for (i = 1; i < num_sensors; i++) {
hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
- NVM_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
- NVM_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor);
+ sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor);
if (sensor_location != 0)
hw->phy.ops.read_i2c_byte(hw,
@@ -2859,20 +2855,17 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
- if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ if (FIELD_GET(NVM_ETS_TYPE_MASK, ets_cfg)
!= NVM_ETS_TYPE_EMC)
return E1000_NOT_IMPLEMENTED;
- low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
- NVM_ETS_LTHRES_DELTA_SHIFT);
+ low_thresh_delta = FIELD_GET(NVM_ETS_LTHRES_DELTA_MASK, ets_cfg);
num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
for (i = 1; i <= num_sensors; i++) {
hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
- sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
- NVM_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
- NVM_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(NVM_ETS_DATA_INDEX_MASK, ets_sensor);
+ sensor_location = FIELD_GET(NVM_ETS_DATA_LOC_MASK, ets_sensor);
therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
hw->phy.ops.write_i2c_byte(hw,
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 53b396fd1..503b23986 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -473,7 +473,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record);
status = 0;
break;
}
@@ -483,8 +483,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
(i != 1))) {
- version = (*next_record & E1000_INVM_VER_FIELD_TWO)
- >> 13;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_TWO,
+ *next_record);
status = 0;
break;
}
@@ -493,15 +493,15 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
*/
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ version = FIELD_GET(E1000_INVM_VER_FIELD_ONE, *record);
status = 0;
break;
}
}
if (!status) {
- invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
- >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_major = FIELD_GET(E1000_INVM_MAJOR_MASK,
+ version);
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
}
/* Read Image Type */
@@ -520,7 +520,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
((((*record & 0x3) != 0) && (i != 1)))) {
invm_ver->invm_img_type =
- (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ FIELD_GET(E1000_INVM_IMGTYPE_FIELD,
+ *next_record);
status = 0;
break;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index caf91c6f5..fa3dfafd2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include <linux/if_ether.h>
#include <linux/delay.h>
#include <linux/pci.h>
@@ -50,13 +51,12 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
break;
}
- bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT);
+ bus->width = (enum e1000_bus_width)FIELD_GET(PCI_EXP_LNKSTA_NLW,
+ pcie_link_status);
}
reg = rd32(E1000_STATUS);
- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+ bus->func = FIELD_GET(E1000_STATUS_FUNC_MASK, reg);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 0da57e895..2dcd64d6d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -708,10 +708,10 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
*/
if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
- >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
- >> NVM_MINOR_SHIFT;
+ fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK,
+ fw_version);
+ fw_vers->eep_minor = FIELD_GET(NVM_MINOR_MASK,
+ fw_version);
fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
goto etrack_id;
}
@@ -753,15 +753,13 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
return;
}
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
- >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_major = FIELD_GET(NVM_MAJOR_MASK, fw_version);
/* check for old style version format in newer images*/
if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
} else {
- eeprom_verl = (fw_version & NVM_MINOR_MASK)
- >> NVM_MINOR_SHIFT;
+ eeprom_verl = FIELD_GET(NVM_MINOR_MASK, fw_version);
}
/* Convert minor value to hex before assigning to output struct
* Val to be converted will not be higher than 99, per tool output
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3c1b562a3..cd65008c7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -255,7 +255,7 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
}
/* Need to byte-swap the 16-bit value. */
- *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+ *data = ((i2ccmd >> 8) & 0x00FF) | FIELD_PREP(0xFF00, i2ccmd);
return 0;
}
@@ -282,7 +282,7 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
}
/* Swap the data bytes for the I2C interface */
- phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+ phy_data_swapped = ((data >> 8) & 0x00FF) | FIELD_PREP(0xFF00, data);
/* Set up Op-code, Phy Address, and register address in the I2CCMD
* register. The MAC will take care of interfacing with the
@@ -1682,8 +1682,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
goto out;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
@@ -1796,8 +1795,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
if (ret_val)
goto out;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = FIELD_GET(M88E1000_PSSR_CABLE_LENGTH, phy_data);
if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
ret_val = -E1000_ERR_PHY;
goto out;
@@ -2578,8 +2576,7 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
if (ret_val)
goto out;
- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
- I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+ length = FIELD_GET(I82580_DSTATUS_CABLE_LENGTH, phy_data);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
ret_val = -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index a2b759531..3c2dc7bde 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -637,7 +637,7 @@ struct igb_adapter {
struct timespec64 period;
} perout[IGB_N_PEROUT];
- char fw_version[32];
+ char fw_version[48];
#ifdef CONFIG_IGB_HWMON
struct hwmon_buff *igb_hwmon_buff;
bool ets;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 16d2a55d5..b66199c9b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2356,11 +2356,9 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
break;
case ETH_SS_STATS:
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igb_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, igb_gstrings_stats[i].stat_string);
for (i = 0; i < IGB_NETDEV_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igb_gstrings_net_stats[i].stat_string);
+ ethtool_puts(&p, igb_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -2434,7 +2432,7 @@ static int igb_get_ts_info(struct net_device *dev,
}
}
-#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(FIELD_MAX(U16_MAX))
static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@@ -2713,8 +2711,7 @@ static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter,
etqf |= (etype & E1000_ETQF_ETYPE_MASK);
etqf &= ~E1000_ETQF_QUEUE_MASK;
- etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT)
- & E1000_ETQF_QUEUE_MASK);
+ etqf |= FIELD_PREP(E1000_ETQF_QUEUE_MASK, input->action);
etqf |= E1000_ETQF_QUEUE_ENABLE;
wr32(E1000_ETQF(i), etqf);
@@ -2733,8 +2730,8 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter,
u32 vlapqf;
vlapqf = rd32(E1000_VLAPQF);
- vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
- >> VLAN_PRIO_SHIFT;
+ vlan_priority = FIELD_GET(VLAN_PRIO_MASK,
+ ntohs(input->filter.vlan_tci));
queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK;
/* check whether this vlan prio is already set */
@@ -2817,7 +2814,7 @@ static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter,
u8 vlan_priority;
u32 vlapqf;
- vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan_priority = FIELD_GET(VLAN_PRIO_MASK, vlan_tci);
vlapqf = rd32(E1000_VLAPQF);
vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority);
@@ -3282,18 +3279,17 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int igb_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < IGB_RETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
@@ -3333,8 +3329,9 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
}
-static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int igb_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3342,10 +3339,11 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
u32 num_queues;
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
num_queues = adapter->rss_queues;
@@ -3362,12 +3360,12 @@ static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
/* Verify user input. */
for (i = 0; i < IGB_RETA_SIZE; i++)
- if (indir[i] >= num_queues)
+ if (rxfh->indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGB_RETA_SIZE; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
igb_write_rss_indir_tbl(adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ada42ba63..7662c42e3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_fw_version fw;
- char *lbuf;
igb_get_fw_version(hw, &fw);
@@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter)
case e1000_i210:
case e1000_i211:
if (!(igb_get_flash_presence_i210(hw))) {
- lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor,
- fw.invm_img_type);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
break;
}
fallthrough;
default:
/* if option rom is valid, display its version too */
if (fw.or_valid) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id, fw.or_major, fw.or_build,
- fw.or_patch);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
} else if (fw.etrack_id != 0X0000) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
} else {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major,
- fw.eep_minor, fw.eep_build);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
-
- /* the truncate happens here if it doesn't fit */
- strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version));
- kfree(lbuf);
}
/**
@@ -7282,7 +7283,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
static int igb_set_vf_multicasts(struct igb_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int n = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
int i;
@@ -7542,7 +7543,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
{
- int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int add = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
int ret;
@@ -9797,8 +9798,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
- E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= FIELD_PREP(E1000_RTTBCNRC_RF_INT_MASK, rf_int);
bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
} else {
bcnrc_val = 0;
@@ -9987,8 +9987,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
hwm = 64 * (pba - 6);
reg = rd32(E1000_FCRTC);
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
- reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
- & E1000_FCRTC_RTH_COAL_MASK);
+ reg |= FIELD_PREP(E1000_FCRTC_RTH_COAL_MASK, hwm);
wr32(E1000_FCRTC, reg);
/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
@@ -9997,8 +9996,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
dmac_thr = pba - 10;
reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
- reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
- & E1000_DMACR_DMACTHR_MASK);
+ reg |= FIELD_PREP(E1000_DMACR_DMACTHR_MASK, dmac_thr);
/* transition to L0x or L1 if available..*/
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index a3cd7ac48..d15282ee5 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
+#include <linux/bitfield.h>
#include "mbx.h"
/**
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index e6c1fbee0..a4d4f00e6 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -273,9 +273,8 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
* that case, it fills the header buffer and spills the rest
* into the page.
*/
- hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
- & E1000_RXDADV_HDRBUFLEN_MASK) >>
- E1000_RXDADV_HDRBUFLEN_SHIFT;
+ hlen = le16_get_bits(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info,
+ E1000_RXDADV_HDRBUFLEN_MASK);
if (hlen > adapter->rx_ps_hdr_size)
hlen = adapter->rx_ps_hdr_size;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 85cc16396..45430e246 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -81,6 +81,21 @@ struct igc_tx_timestamp_request {
u32 flags; /* flags that should be added to the tx_buffer */
};
+struct igc_inline_rx_tstamps {
+ /* Timestamps are saved in little endian at the beginning of the packet
+ * buffer following the layout:
+ *
+ * DWORD: | 0 | 1 | 2 | 3 |
+ * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
+ *
+ * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
+ * part of the timestamp.
+ *
+ */
+ __le32 timer1[2];
+ __le32 timer0[2];
+};
+
struct igc_ring_container {
struct igc_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
@@ -261,6 +276,8 @@ struct igc_adapter {
unsigned int ptp_flags;
/* System time value lock */
spinlock_t tmreg_lock;
+ /* Free-running timer lock */
+ spinlock_t free_timer_lock;
struct cyclecounter cc;
struct timecounter tc;
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
@@ -469,6 +486,8 @@ enum igc_tx_flags {
IGC_TX_FLAGS_TSTAMP_1 = 0x100,
IGC_TX_FLAGS_TSTAMP_2 = 0x200,
IGC_TX_FLAGS_TSTAMP_3 = 0x400,
+
+ IGC_TX_FLAGS_TSTAMP_TIMER_1 = 0x800,
};
enum igc_boards {
@@ -531,7 +550,7 @@ struct igc_rx_buffer {
struct igc_xdp_buff {
struct xdp_buff xdp;
union igc_adv_rx_desc *rx_desc;
- ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
+ struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
};
struct igc_q_vector {
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index a1d815af5..9fae8bdec 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -68,8 +68,7 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw)
u32 eecd = rd32(IGC_EECD);
u16 size;
- size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
- IGC_EECD_SIZE_EX_SHIFT);
+ size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -162,8 +161,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
phy->reset_delay_us = 100;
/* set lan id */
- hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
- IGC_STATUS_FUNC_SHIFT;
+ hw->bus.func = FIELD_GET(IGC_STATUS_FUNC_MASK, rd32(IGC_STATUS));
/* Make sure the PHY is in a good state. Several people have reported
* firmware leaving the PHY's page select register set to something
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index f7d6491d4..bf8cdfbba 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -37,6 +37,10 @@ struct igc_adv_tx_context_desc {
#define IGC_ADVTXD_TSTAMP_REG_1 0x00010000 /* Select register 1 for timestamp */
#define IGC_ADVTXD_TSTAMP_REG_2 0x00020000 /* Select register 2 for timestamp */
#define IGC_ADVTXD_TSTAMP_REG_3 0x00030000 /* Select register 3 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_1 0x00010000 /* Select timer 1 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_2 0x00020000 /* Select timer 2 for timestamp */
+#define IGC_ADVTXD_TSTAMP_TIMER_3 0x00030000 /* Select timer 3 for timestamp */
+
#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index b3037016f..5f92b3c7c 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -317,6 +317,8 @@
#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
+#define IGC_TXD_PTP2_TIMER_1 0x00000020
+
/* IPSec Encrypt Enable */
#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 859b2636f..b95d2c86e 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -773,11 +773,9 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
break;
case ETH_SS_STATS:
for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igc_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, igc_gstrings_stats[i].stat_string);
for (i = 0; i < IGC_NETDEV_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- igc_gstrings_net_stats[i].stat_string);
+ ethtool_puts(&p, igc_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -1464,45 +1462,46 @@ static u32 igc_ethtool_get_rxfh_indir_size(struct net_device *netdev)
return IGC_RETA_SIZE;
}
-static int igc_ethtool_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int igc_ethtool_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct igc_adapter *adapter = netdev_priv(netdev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
for (i = 0; i < IGC_RETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
return 0;
}
-static int igc_ethtool_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int igc_ethtool_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
u32 num_queues;
int i;
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
num_queues = adapter->rss_queues;
/* Verify user input. */
for (i = 0; i < IGC_RETA_SIZE; i++)
- if (indir[i] >= num_queues)
+ if (rxfh->indir[i] >= num_queues)
return -EINVAL;
for (i = 0; i < IGC_RETA_SIZE; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
igc_write_rss_indir_tbl(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index d2562c8e8..0dd61719f 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -579,9 +579,8 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
/* Calculate tw_system (nsec). */
if (speed == SPEED_100) {
- tw_system = ((rd32(IGC_EEE_SU) &
- IGC_TW_SYSTEM_100_MASK) >>
- IGC_TW_SYSTEM_100_SHIFT) * 500;
+ tw_system = FIELD_GET(IGC_TW_SYSTEM_100_MASK,
+ rd32(IGC_EEE_SU)) * 500;
} else {
tw_system = (rd32(IGC_EEE_SU) &
IGC_TW_SYSTEM_1000_MASK) * 500;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 45716d271..23bed58a9 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1299,14 +1299,16 @@ static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
/* insert L4 checksum */
- olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
- ((IGC_TXD_POPTS_TXSM << 8) /
- IGC_TX_FLAGS_CSUM);
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM,
+ (IGC_TXD_POPTS_TXSM << 8));
/* insert IPv4 checksum */
- olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
- (((IGC_TXD_POPTS_IXSM << 8)) /
- IGC_TX_FLAGS_IPV4);
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4,
+ (IGC_TXD_POPTS_IXSM << 8));
+
+ /* Use the second timer (free running, in general) for the timestamp */
+ olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1,
+ IGC_TXD_PTP2_TIMER_1);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
@@ -1640,10 +1642,6 @@ done:
if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- /* FIXME: add support for retrieving timestamps from
- * the other timer registers before skipping the
- * timestamping request.
- */
unsigned long flags;
u32 tstamp_flags;
@@ -1651,6 +1649,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
}
@@ -1963,9 +1963,9 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
@@ -1982,8 +1982,10 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
/* Determine available headroom for copy */
headlen = size;
@@ -2583,11 +2585,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
int xdp_status = 0, rx_buffer_pgcnt;
while (likely(total_packets < budget)) {
- union igc_adv_rx_desc *rx_desc;
+ struct igc_xdp_buff ctx = { .rx_ts = NULL };
struct igc_rx_buffer *rx_buffer;
+ union igc_adv_rx_desc *rx_desc;
unsigned int size, truesize;
- struct igc_xdp_buff ctx;
- ktime_t timestamp = 0;
int pkt_offset = 0;
void *pktbuf;
@@ -2614,9 +2615,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
- timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
- pktbuf);
- ctx.rx_ts = timestamp;
+ ctx.rx_ts = pktbuf;
pkt_offset = IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
}
@@ -2653,8 +2652,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
else if (ring_uses_build_skb(rx_ring))
skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
else
- skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,
- timestamp);
+ skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -2803,9 +2801,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
ctx->rx_desc = desc;
if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
- timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
- bi->xdp->data);
- ctx->rx_ts = timestamp;
+ ctx->rx_ts = bi->xdp->data;
bi->xdp->data += IGC_TS_HDR_LEN;
@@ -3452,8 +3448,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
/* Configure filter */
queuing = input->length & IGC_FHFT_LENGTH_MASK;
- queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
- queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
+ queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue);
+ queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio);
if (input->immediate_irq)
queuing |= IGC_FHFT_IMM_INT;
@@ -3712,8 +3708,7 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
- int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
if (err)
@@ -3735,8 +3730,7 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter,
igc_del_etype_filter(adapter, rule->filter.etype);
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
- int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
igc_del_vlan_prio_filter(adapter, prio);
}
@@ -6551,6 +6545,24 @@ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
return 0;
}
+static ktime_t igc_get_tstamp(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ struct igc_inline_rx_tstamps *tstamp;
+ ktime_t timestamp;
+
+ tstamp = hwtstamps->netdev_data;
+
+ if (cycles)
+ timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1);
+ else
+ timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
+
+ return timestamp;
+}
+
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
@@ -6568,6 +6580,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
.ndo_xsk_wakeup = igc_xsk_wakeup,
+ .ndo_get_tstamp = igc_get_tstamp,
};
/* PCIe configuration access */
@@ -6671,9 +6684,11 @@ static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
{
const struct igc_xdp_buff *ctx = (void *)_ctx;
+ struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev);
+ struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts;
if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
- *timestamp = ctx->rx_ts;
+ *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index d0d9e7170..861f37076 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -130,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* Temporary workaround - should be removed when PHY will implement
- * IEEE registers as properly
- */
- /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
usleep_range(1000, 2000);
}
@@ -727,7 +723,7 @@ static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
*/
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
{
- u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ u8 dev_addr = FIELD_GET(GPY_MMD_MASK, offset);
s32 ret_val;
offset = offset & GPY_REG_MASK;
@@ -758,7 +754,7 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
*/
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
{
- u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
+ u8 dev_addr = FIELD_GET(GPY_MMD_MASK, offset);
s32 ret_val;
offset = offset & GPY_REG_MASK;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 928f38792..885faaa7b 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -459,12 +459,10 @@ static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
/**
* igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
* @adapter: Pointer to adapter the packet buffer belongs to
- * @buf: Pointer to packet buffer
+ * @buf: Pointer to start of timestamp in HW format (2 32-bit words)
*
- * This function retrieves the timestamp saved in the beginning of packet
- * buffer. While two timestamps are available, one in timer0 reference and the
- * other in timer1 reference, this function considers only the timestamp in
- * timer0 reference.
+ * This function retrieves and converts the timestamp stored at @buf
+ * to ktime_t, adjusting for hardware latencies.
*
* Returns timestamp value.
*/
@@ -474,17 +472,8 @@ ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf)
u32 secs, nsecs;
int adjust;
- /* Timestamps are saved in little endian at the beginning of the packet
- * buffer following the layout:
- *
- * DWORD: | 0 | 1 | 2 | 3 |
- * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
- *
- * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
- * part of the timestamp.
- */
- nsecs = le32_to_cpu(buf[2]);
- secs = le32_to_cpu(buf[3]);
+ nsecs = le32_to_cpu(buf[0]);
+ secs = le32_to_cpu(buf[1]);
timestamp = ktime_set(secs, nsecs);
@@ -542,10 +531,11 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
val = rd32(IGC_SRRCTL(i));
- /* FIXME: For now, only support retrieving RX timestamps from
- * timer 0.
+ /* Enable retrieving timestamps from timer 0, the
+ * "adjustable clock" and timer 1 the "free running
+ * clock".
*/
- val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) |
+ val |= IGC_SRRCTL_TIMER1SEL(1) | IGC_SRRCTL_TIMER0SEL(0) |
IGC_SRRCTL_TIMESTAMP;
wr32(IGC_SRRCTL(i), val);
}
@@ -1035,6 +1025,26 @@ static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp,
adapter, &adapter->snapshot, cts);
}
+static int igc_ptp_getcyclesx64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igc_adapter *igc = container_of(ptp, struct igc_adapter, ptp_caps);
+ struct igc_hw *hw = &igc->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&igc->free_timer_lock, flags);
+
+ ptp_read_system_prets(sts);
+ ts->tv_nsec = rd32(IGC_SYSTIML_1);
+ ts->tv_sec = rd32(IGC_SYSTIMH_1);
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&igc->free_timer_lock, flags);
+
+ return 0;
+}
+
/**
* igc_ptp_init - Initialize PTP functionality
* @adapter: Board private structure
@@ -1088,6 +1098,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->ptp_caps.adjfine = igc_ptp_adjfine_i225;
adapter->ptp_caps.adjtime = igc_ptp_adjtime_i225;
adapter->ptp_caps.gettimex64 = igc_ptp_gettimex64_i225;
+ adapter->ptp_caps.getcyclesx64 = igc_ptp_getcyclesx64;
adapter->ptp_caps.settime64 = igc_ptp_settime_i225;
adapter->ptp_caps.enable = igc_ptp_feature_enable_i225;
adapter->ptp_caps.pps = 1;
@@ -1108,6 +1119,7 @@ void igc_ptp_init(struct igc_adapter *adapter)
}
spin_lock_init(&adapter->ptp_tx_lock);
+ spin_lock_init(&adapter->free_timer_lock);
spin_lock_init(&adapter->tmreg_lock);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 20e17f5fb..d38c87d7e 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -243,6 +243,11 @@
#define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */
#define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define IGC_SYSTIML_1 0x0B688 /* System time register Low - RO (timer 1) */
+#define IGC_SYSTIMH_1 0x0B68C /* System time register High - RO (timer 1) */
+#define IGC_SYSTIMR_1 0x0B684 /* System time register Residue (timer 1) */
+#define IGC_TIMINCA_1 0x0B690 /* Increment attributes register - RW (timer 1) */
+
/* TX Timestamp Low */
#define IGC_TXSTMPL_0 0x0B618
#define IGC_TXSTMPL_1 0x0B698
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 3d56481e1..6835d5f18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -794,7 +794,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
rar_high &= ~IXGBE_RAH_VIND_MASK;
- rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+ rar_high |= FIELD_PREP(IXGBE_RAH_VIND_MASK, vmdq);
IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b2a0f2aaa..2e6e03651 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -684,7 +684,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
u32 reg;
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
- bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+ bus->func = FIELD_GET(IXGBE_STATUS_LAN_ID, reg);
bus->lan_id = bus->func;
/* check for a port swap */
@@ -695,8 +695,8 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
/* Get MAC instance from EEPROM for configuring CS4227 */
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
- bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
- IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ bus->instance_id = FIELD_GET(IXGBE_EE_CTRL_4_INST_ID,
+ ee_ctrl_4);
}
}
@@ -870,10 +870,9 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* SPI EEPROM is assumed here. This code would need to
* change if a future EEPROM is not SPI.
*/
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -3935,10 +3934,10 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
if (status)
return status;
- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
- IXGBE_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
- IXGBE_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK,
+ ets_sensor);
+ sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK,
+ ets_sensor);
if (sensor_location != 0) {
status = hw->phy.ops.read_i2c_byte(hw,
@@ -3982,8 +3981,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
if (status)
return status;
- low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
- IXGBE_ETS_LTHRES_DELTA_SHIFT);
+ low_thresh_delta = FIELD_GET(IXGBE_ETS_LTHRES_DELTA_MASK, ets_cfg);
num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
if (num_sensors > IXGBE_MAX_SENSORS)
num_sensors = IXGBE_MAX_SENSORS;
@@ -3997,10 +3995,10 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
ets_offset + 1 + i);
continue;
}
- sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
- IXGBE_ETS_DATA_INDEX_SHIFT);
- sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
- IXGBE_ETS_DATA_LOC_SHIFT);
+ sensor_index = FIELD_GET(IXGBE_ETS_DATA_INDEX_MASK,
+ ets_sensor);
+ sensor_location = FIELD_GET(IXGBE_ETS_DATA_LOC_MASK,
+ ets_sensor);
therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
hw->phy.ops.write_i2c_byte(hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e47461f3e..9a6345771 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1413,12 +1413,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < IXGBE_TEST_LEN; i++)
- ethtool_sprintf(&p, "%s", ixgbe_gstrings_test[i]);
+ ethtool_puts(&p, ixgbe_gstrings_test[i]);
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, "%s",
- ixgbe_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, ixgbe_gstrings_stats[i].stat_string);
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -3108,35 +3107,37 @@ static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
indir[i] = adapter->rss_indir_tbl[i] & rss_m;
}
-static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ixgbe_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- ixgbe_get_reta(adapter, indir);
+ if (rxfh->indir)
+ ixgbe_get_reta(adapter, rxfh->indir);
- if (key)
- memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key,
+ ixgbe_get_rxfh_key_size(netdev));
return 0;
}
-static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ixgbe_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
/* Fill out the redirection table */
- if (indir) {
+ if (rxfh->indir) {
int max_queues = min_t(int, adapter->num_rx_queues,
ixgbe_rss_indir_tbl_max(adapter));
@@ -3147,18 +3148,19 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
/* Verify user input. */
for (i = 0; i < reta_entries; i++)
- if (indir[i] >= max_queues)
+ if (rxfh->indir[i] >= max_queues)
return -EINVAL;
for (i = 0; i < reta_entries; i++)
- adapter->rss_indir_tbl[i] = indir[i];
+ adapter->rss_indir_tbl[i] = rxfh->indir[i];
ixgbe_store_reta(adapter);
}
/* Fill out the rss hash key */
- if (key) {
- memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
+ if (rxfh->key) {
+ memcpy(adapter->rss_key, rxfh->key,
+ ixgbe_get_rxfh_key_size(netdev));
ixgbe_store_key(adapter);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 7311bd545..18d63c8c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -670,8 +670,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
fcoe->indices);
fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
- fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
- IXGBE_FCRETA_ENTRY_HIGH_MASK;
+ fcoe_q_h = FIELD_PREP(IXGBE_FCRETA_ENTRY_HIGH_MASK,
+ fcoe_q_h);
}
fcoe_i = fcoe->offset + (i % fcoe->indices);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 13a6fca31..866024f2b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -914,7 +914,13 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
- xs = kzalloc(sizeof(*xs), GFP_KERNEL);
+ algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
+ if (unlikely(!algo)) {
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ xs = kzalloc(sizeof(*xs), GFP_ATOMIC);
if (unlikely(!xs)) {
err = -ENOMEM;
goto err_out;
@@ -930,14 +936,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4));
xs->xso.dev = adapter->netdev;
- algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1);
- if (unlikely(!algo)) {
- err = -ENOENT;
- goto err_xs;
- }
-
aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8;
- xs->aead = kzalloc(aead_len, GFP_KERNEL);
+ xs->aead = kzalloc(aead_len, GFP_ATOMIC);
if (unlikely(!xs->aead)) {
err = -ENOMEM;
goto err_xs;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ce234e76e..99876b765 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11409,7 +11409,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if ((pf_func & 1) == (pdev->devfn & 1)) {
unsigned int device_id;
- vf = (req_id & 0x7F) >> 1;
+ vf = FIELD_GET(0x7F, req_id);
e_dev_err("VF %d has caused a PCIe error\n", vf);
e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
"%8.8x\tdw3: %8.8x\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 930dc5071..f28140a05 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -276,9 +276,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
return 0;
if (hw->phy.nw_mng_if_sel) {
- phy_addr = (hw->phy.nw_mng_if_sel &
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ phy_addr = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD,
+ hw->phy.nw_mng_if_sel);
if (ixgbe_probe_phy(hw, phy_addr))
return 0;
else
@@ -1447,8 +1446,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
if (ret_val)
goto err_eeprom;
- control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ control = FIELD_GET(IXGBE_CONTROL_MASK_NL, eword);
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 9379069c5..7299a830f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -363,8 +363,7 @@ int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
+ int entries = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
@@ -969,7 +968,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
- u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+ u32 add = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = adapter->hw_tcs;
@@ -992,8 +991,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
- int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
- IXGBE_VT_MSGINFO_SHIFT;
+ int index = FIELD_GET(IXGBE_VT_MSGINFO_MASK, msgbuf[0]);
int err;
if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
@@ -1849,5 +1847,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
ivi->trusted = adapter->vfinfo[vf].trusted;
+ ivi->linkstate = adapter->vfinfo[vf].link_state;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 15325c549..57a912e46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -187,16 +187,16 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ u16 eeprom_size;
+ u32 eec;
+
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cdc912bba..c1adc94a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -630,16 +630,16 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
- u32 eec;
- u16 eeprom_size;
if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ u16 eeprom_size;
+ u32 eec;
+
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
- eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ eeprom_size = FIELD_GET(IXGBE_EEC_SIZE, eec);
eeprom->word_size = BIT(eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
@@ -714,8 +714,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
hw_dbg(hw, "Failed to read, error %x\n", error);
ret = -EIO;
goto out;
@@ -1415,8 +1414,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
hw_dbg(hw, "Failed to write, error %x\n", error);
return -EIO;
}
@@ -3229,9 +3227,8 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
*/
if (hw->mac.type == ixgbe_mac_x550em_a &&
hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
- hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ hw->phy.mdio.prtad = FIELD_GET(IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD,
+ hw->phy.nw_mng_if_sel);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 296915414..7ac53171b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -897,40 +897,41 @@ static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
return IXGBEVF_RSS_HASH_KEY_SIZE;
}
-static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ixgbevf_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
int err = 0;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
- if (key)
- memcpy(key, adapter->rss_key,
+ if (rxfh->key)
+ memcpy(rxfh->key, adapter->rss_key,
ixgbevf_get_rxfh_key_size(netdev));
- if (indir) {
+ if (rxfh->indir) {
int i;
for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ rxfh->indir[i] = adapter->rss_indir_tbl[i];
}
} else {
/* If neither indirection table nor hash key was requested
* - just return a success avoiding taking any locks.
*/
- if (!indir && !key)
+ if (!rxfh->indir && !rxfh->key)
return 0;
spin_lock_bh(&adapter->mbx_lock);
- if (indir)
- err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+ if (rxfh->indir)
+ err = ixgbevf_get_reta_locked(&adapter->hw,
+ rxfh->indir,
adapter->num_rx_queues);
- if (!err && key)
- err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+ if (!err && rxfh->key)
+ err = ixgbevf_get_rss_key_locked(&adapter->hw,
+ rxfh->key);
spin_unlock_bh(&adapter->mbx_lock);
}
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index 5182fe737..ff54fbe41 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
module_platform_driver(liteeth_driver);
MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 5f66f779e..9190eff6c 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -53,6 +53,13 @@
#define MVMDIO_XSMI_BUSY BIT(30)
#define MVMDIO_XSMI_ADDR_REG 0x8
+#define MVMDIO_XSMI_CFG_REG 0xc
+#define MVMDIO_XSMI_CLKDIV_MASK 0x3
+#define MVMDIO_XSMI_CLKDIV_256 0x0
+#define MVMDIO_XSMI_CLKDIV_64 0x1
+#define MVMDIO_XSMI_CLKDIV_32 0x2
+#define MVMDIO_XSMI_CLKDIV_8 0x3
+
/*
* SMI Timeout measurements:
* - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt)
@@ -225,6 +232,40 @@ static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id,
return 0;
}
+static void orion_mdio_xsmi_set_mdc_freq(struct mii_bus *bus)
+{
+ struct orion_mdio_dev *dev = bus->priv;
+ struct clk *mg_core;
+ u32 div, freq, cfg;
+
+ if (device_property_read_u32(bus->parent, "clock-frequency", &freq))
+ return;
+
+ mg_core = of_clk_get_by_name(bus->parent->of_node, "mg_core_clk");
+ if (IS_ERR(mg_core)) {
+ dev_err(bus->parent,
+ "MG core clock unknown, not changing MDC frequency");
+ return;
+ }
+
+ div = clk_get_rate(mg_core) / (freq + 1) + 1;
+ clk_put(mg_core);
+
+ if (div <= 8)
+ div = MVMDIO_XSMI_CLKDIV_8;
+ else if (div <= 32)
+ div = MVMDIO_XSMI_CLKDIV_32;
+ else if (div <= 64)
+ div = MVMDIO_XSMI_CLKDIV_64;
+ else
+ div = MVMDIO_XSMI_CLKDIV_256;
+
+ cfg = readl(dev->regs + MVMDIO_XSMI_CFG_REG);
+ cfg &= ~MVMDIO_XSMI_CLKDIV_MASK;
+ cfg |= div;
+ writel(cfg, dev->regs + MVMDIO_XSMI_CFG_REG);
+}
+
static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
{
struct orion_mdio_dev *dev = dev_id;
@@ -303,6 +344,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev_warn(&pdev->dev,
"unsupported number of clocks, limiting to the first "
__stringify(ARRAY_SIZE(dev->clk)) "\n");
+
+ if (type == BUS_TYPE_XSMI)
+ orion_mdio_xsmi_set_mdc_freq(bus);
} else {
dev->clk[0] = clk_get(&pdev->dev, NULL);
if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 29aac3275..a641b3534 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5030,8 +5030,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
return 0;
}
-static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int mvneta_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -5042,20 +5043,21 @@ static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
/* We require at least one supported parameter to be changed
* and no change in any of the unsupported parameters
*/
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
+ memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE);
return mvneta_config_rss(pp);
}
-static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int mvneta_ethtool_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -5063,13 +5065,12 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
if (pp->neta_armada3700)
return -EOPNOTSUPP;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
+ memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 065f07392..23adf53c2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1538,10 +1538,21 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
- if (port->gop_id == 2)
+ if (port->gop_id == 2) {
val |= GENCONF_CTRL0_PORT2_RGMII;
- else if (port->gop_id == 3)
+ } else if (port->gop_id == 3) {
val |= GENCONF_CTRL0_PORT3_RGMII_MII;
+
+ /* According to the specification, GENCONF_CTRL0_PORT3_RGMII
+ * should be set to 1 for RGMII and 0 for MII. However, tests
+ * show that it is the other way around. This is also what
+ * U-Boot does for mvpp2, so it is assumed to be correct.
+ */
+ if (port->phy_interface == PHY_INTERFACE_MODE_MII)
+ val |= GENCONF_CTRL0_PORT3_RGMII;
+ else
+ val &= ~GENCONF_CTRL0_PORT3_RGMII;
+ }
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
@@ -1640,6 +1651,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
return 0;
switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -5659,49 +5671,11 @@ static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
-static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
-
- if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
-
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_CRC32;
-
- return ret;
-}
-
-static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
-
- if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
-
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
- return -EOPNOTSUPP;
-
- if (key)
- return -EOPNOTSUPP;
-
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
-
- return ret;
-}
-
-static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mvpp2_port *port = netdev_priv(dev);
+ u32 rss_context = rxfh->rss_context;
int ret = 0;
if (!mvpp22_rss_is_supported(port))
@@ -5709,33 +5683,34 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
if (rss_context >= MVPP22_N_RSS_TABLES)
return -EINVAL;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_CRC32;
+ rxfh->hfunc = ETH_RSS_HASH_CRC32;
- if (indir)
- ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rss_context,
+ rxfh->indir);
return ret;
}
-static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret;
+ u32 *rss_context = &rxfh->rss_context;
+ int ret = 0;
if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_CRC32)
return -EOPNOTSUPP;
- if (key)
+ if (rxfh->key)
return -EOPNOTSUPP;
- if (delete)
+ if (*rss_context && rxfh->rss_delete)
return mvpp22_port_rss_ctx_delete(port, *rss_context);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
@@ -5744,8 +5719,13 @@ static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
return ret;
}
- return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
+ rxfh->indir);
+
+ return ret;
}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5765,6 +5745,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
@@ -5787,8 +5768,6 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
- .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
- .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -6923,7 +6902,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->min_mtu = ETH_MIN_MTU;
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
- dev->dev.of_node = port_node;
+ device_set_node(&dev->dev, port_fwnode);
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
port->pcs_gmac.neg_mode = true;
@@ -6973,8 +6952,11 @@ static int mvpp2_port_probe(struct platform_device *pdev,
MAC_10000FD;
}
- if (mvpp2_port_supports_rgmii(port))
+ if (mvpp2_port_supports_rgmii(port)) {
phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ port->phylink_config.supported_interfaces);
+ }
if (comphy) {
/* If a COMPHY is present, we can support any of the
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
index 2026c8118..62162ed63 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/Makefile
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
- octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o \
+ octep_pfvf_mbox.o octep_cnxk_pf.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index d4ee24546..b58059694 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -216,16 +216,21 @@ static void octep_init_config_cn93_pf(struct octep_device *oct)
conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
- conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
- conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
- conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ if (oct->chip_id == OCTEP_PCI_DEVICE_ID_CN98_PF) {
+ conf->pf_ring_cfg.srn = CN98_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN98_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ } else {
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ }
dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
conf->iq.instr_type = OCTEP_64BYTE_INSTR;
- conf->iq.pkind = 0;
conf->iq.db_min = OCTEP_DB_MIN;
conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
@@ -357,16 +362,55 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
{
struct octep_mbox *mbox = oct->mbox[q_no];
- mbox->q_no = q_no;
-
- /* PF mbox interrupt reg */
- mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
-
/* PF to VF DATA reg. PF writes into this reg */
- mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+ mbox->pf_vf_data_reg = oct->mmio[0].hw_addr + CN93_SDP_MBOX_PF_VF_DATA(q_no);
/* VF to PF DATA reg. PF reads from this reg */
- mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+ mbox->vf_pf_data_reg = oct->mmio[0].hw_addr + CN93_SDP_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Poll for mailbox messages from VF */
+static void octep_poll_pfvf_mailbox(struct octep_device *oct)
+{
+ u32 vf, active_vfs, active_rings_per_vf, vf_mbox_queue;
+ u64 reg0, reg1;
+
+ reg0 = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0));
+ reg1 = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(1));
+ if (reg0 || reg1) {
+ active_vfs = CFG_GET_ACTIVE_VFS(oct->conf);
+ active_rings_per_vf = CFG_GET_ACTIVE_RPVF(oct->conf);
+ for (vf = 0; vf < active_vfs; vf++) {
+ vf_mbox_queue = vf * active_rings_per_vf;
+
+ if (vf_mbox_queue < 64) {
+ if (!(reg0 & (0x1UL << vf_mbox_queue)))
+ continue;
+ } else {
+ if (!(reg1 & (0x1UL << (vf_mbox_queue - 64))))
+ continue;
+ }
+
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "bad mbox vf %d\n", vf);
+ continue;
+ }
+ schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
+ }
+ if (reg0)
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0), reg0);
+ if (reg1)
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT(1), reg1);
+ }
+}
+
+/* PF-VF mailbox interrupt handler */
+static irqreturn_t octep_pfvf_mbox_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_pfvf_mailbox(oct);
+ return IRQ_HANDLED;
}
/* Poll OEI events like heartbeat */
@@ -398,6 +442,7 @@ static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev)
*/
static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
{
+ octep_poll_pfvf_mailbox(oct);
octep_poll_oei_cn93_pf(oct);
}
@@ -578,6 +623,13 @@ static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
return IRQ_HANDLED;
}
+/* soft reset of 98xx */
+static int octep_soft_reset_cn98_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN98XX: skip soft reset\n");
+ return 0;
+}
+
/* soft reset of 93xx */
static int octep_soft_reset_cn93_pf(struct octep_device *oct)
{
@@ -634,6 +686,8 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1S(1), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
@@ -660,6 +714,8 @@ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MBOX_RINT_ENA_W1C(1), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
@@ -795,6 +851,7 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+ oct->hw_ops.mbox_intr_handler = octep_pfvf_mbox_intr_handler_cn93_pf;
oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf;
oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf;
oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf;
@@ -806,7 +863,10 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf;
oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf;
oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
- oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ if (oct->chip_id == OCTEP_PCI_DEVICE_ID_CN98_PF)
+ oct->hw_ops.soft_reset = octep_soft_reset_cn98_pf;
+ else
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
new file mode 100644
index 000000000..5de0b5ecb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
@@ -0,0 +1,925 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cnxk_pf.h"
+
+/* We will support 128 pf's in control mbox */
+#define CTRL_MBOX_MAX_PF 128
+#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cnxk_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint",
+ "epf_rsvd0",
+ "epf_vfore_rint",
+ "epf_rsvd1",
+ "epf_mbox_rint",
+ "epf_rsvd2_0",
+ "epf_rsvd2_1",
+ "epf_dma_rint",
+ "epf_dma_vf_rint",
+ "epf_rsvd3",
+ "epf_pp_vf_rint",
+ "epf_rsvd3",
+ "epf_misc_rint",
+ "epf_rsvd5",
+ /* Next 16 are for OEI_RINT */
+ "epf_oei_rint0",
+ "epf_oei_rint1",
+ "epf_oei_rint2",
+ "epf_oei_rint3",
+ "epf_oei_rint4",
+ "epf_oei_rint5",
+ "epf_oei_rint6",
+ "epf_oei_rint7",
+ "epf_oei_rint8",
+ "epf_oei_rint9",
+ "epf_oei_rint10",
+ "epf_oei_rint11",
+ "epf_oei_rint12",
+ "epf_oei_rint13",
+ "epf_oei_rint14",
+ "epf_oei_rint15",
+ /* IOQ interrupt */
+ "octeon_ep"
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cnxk_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cnxk_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cnxk_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(q_no), val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CNXK PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cnxk_reset_iq(oct, q);
+ cnxk_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cnxk_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cnxk_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CNXK_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CNXK_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_init_config_cnxk_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u8 link = 0;
+ u64 val;
+ int pos;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CNXK_SDP_EPF_RINFO);
+ dev_info(&pdev->dev, "SDP_EPF_RINFO[0x%x]:0x%llx\n", CNXK_SDP_EPF_RINFO, val);
+ conf->sriov_cfg.max_rings_per_vf = CNXK_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CNXK_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CNXK_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CNXK_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ dev_info(&pdev->dev, "SDP_MAC_PF_RING_CTL[%d]:0x%llx\n", oct->pcie_port, val);
+ conf->pf_ring_cfg.srn = CNXK_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CNXK_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+ conf->oq.wmark = OCTEP_OQ_WMARK_MIN;
+
+ conf->msix_cfg.non_ioq_msix = CNXK_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cnxk_non_ioq_msix_names;
+
+ pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_byte(oct->pdev,
+ pos + PCI_SRIOV_FUNC_LINK,
+ &link);
+ link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
+ }
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
+ CNXK_PEM_BAR4_INDEX_OFFSET +
+ (link * CTRL_MBOX_SZ);
+
+ conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL;
+ conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CNXK_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CNXK_R_IN_CTL_RDSIZE;
+ reg_val |= CNXK_R_IN_CTL_IS_64B;
+ reg_val |= CNXK_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CNXK_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CNXK_R_OUT_CTL_IMODE);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_ES_I);
+ reg_val &= ~(CNXK_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CNXK_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CNXK_R_OUT_CTL_ES_D);
+ reg_val |= (CNXK_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
+
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~0x7fffffULL;
+
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (oq->buffer_size & 0xffff);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CNXK_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CNXK_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ /* set watermark for backpressure */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no));
+ reg_val &= ~0xFFFFFFFFULL;
+ reg_val |= CFG_GET_OQ_WMARK(oct->conf);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cnxk_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->pf_vf_data_reg = oct->mmio[0].hw_addr + CNXK_SDP_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->vf_pf_data_reg = oct->mmio[0].hw_addr + CNXK_SDP_MBOX_VF_PF_DATA(q_no);
+}
+
+static void octep_poll_pfvf_mailbox_cnxk_pf(struct octep_device *oct)
+{
+ u32 vf, active_vfs, active_rings_per_vf, vf_mbox_queue;
+ u64 reg0;
+
+ reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_MBOX_RINT(0));
+ if (reg0) {
+ active_vfs = CFG_GET_ACTIVE_VFS(oct->conf);
+ active_rings_per_vf = CFG_GET_ACTIVE_RPVF(oct->conf);
+ for (vf = 0; vf < active_vfs; vf++) {
+ vf_mbox_queue = vf * active_rings_per_vf;
+ if (!(reg0 & (0x1UL << vf_mbox_queue)))
+ continue;
+
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "bad mbox vf %d\n", vf);
+ continue;
+ }
+ schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
+ }
+ if (reg0)
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT(0), reg0);
+ }
+}
+
+static irqreturn_t octep_pfvf_mbox_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_pfvf_mailbox_cnxk_pf(oct);
+ return IRQ_HANDLED;
+}
+
+/* Poll OEI events like heartbeat */
+static void octep_poll_oei_cnxk_pf(struct octep_device *oct)
+{
+ u64 reg0;
+
+ /* Check for OEI INTR */
+ reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_OEI_RINT);
+ if (reg0) {
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT, reg0);
+ if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ atomic_set(&oct->hb_miss_cnt, 0);
+ }
+}
+
+/* OEI interrupt handler */
+static irqreturn_t octep_oei_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_oei_cnxk_pf(oct);
+ return IRQ_HANDLED;
+}
+
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ * MBOX_RINT is needed for pfvf mailbox
+ */
+static void octep_poll_non_ioq_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ octep_poll_pfvf_mailbox_cnxk_pf(oct);
+ octep_poll_oei_cnxk_pf(oct);
+}
+
+/* Interrupt handler for input ring error interrupts. */
+static irqreturn_t octep_ire_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CNXK_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for output ring error interrupts. */
+static irqreturn_t octep_ore_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf input ring error interrupts. */
+static irqreturn_t octep_vfire_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf output ring error interrupts. */
+static irqreturn_t octep_vfore_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma related interrupts. */
+static irqreturn_t octep_dma_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ u64 reg_val = 0;
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_RINT);
+ if (reg_val)
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT, reg_val);
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma transaction error interrupts for VFs */
+static irqreturn_t octep_dma_vf_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for pp transaction error interrupts for VFs */
+static irqreturn_t octep_pp_vf_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0), reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for mac related interrupts. */
+static irqreturn_t octep_misc_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT, reg_val);
+ }
+ return IRQ_HANDLED;
+}
+
+/* Interrupts handler for all reserved interrupts. */
+static irqreturn_t octep_rsvd_intr_handler_cnxk_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cnxk_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset */
+static int octep_soft_reset_cnxk_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CNXKXX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to a hw bug, it is not.
+ * Set it to RUNNING right before reset so that it is not
+ * left in READY (1) state after a reset. This is required
+ * in addition to the early setting to handle the case where
+ * the OcteonTX is unexpectedly reset, reboots, and then
+ * the module is removed.
+ */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
+ FW_STATUS_RUNNING);
+
+ /* Set chip domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_RST_CHIP_DOMAIN_W1S, 1);
+ /* Wait till Octeon resets. */
+ mdelay(10);
+ /* restore the reset value */
+ octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cnxk_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT_ENA_W1S(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cnxk_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT_ENA_W1C(0), -1ULL);
+
+ octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cnxk_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cnxk_pf(oct, q);
+ octep_enable_oq_cnxk_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cnxk_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cnxk_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cnxk_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cnxk_pf(oct, q);
+ octep_disable_oq_cnxk_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cnxk_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cnxk_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cnxk_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cnxk_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cnxk_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cnxk_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cnxk_pf;
+
+ oct->hw_ops.mbox_intr_handler = octep_pfvf_mbox_intr_handler_cnxk_pf;
+ oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cnxk_pf;
+ oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cnxk_pf;
+ oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cnxk_pf;
+ oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cnxk_pf;
+ oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cnxk_pf;
+ oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cnxk_pf;
+ oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cnxk_pf;
+ oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cnxk_pf;
+ oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cnxk_pf;
+ oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cnxk_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cnxk_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cnxk_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cnxk_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cnxk_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cnxk_pf;
+ oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cnxk_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cnxk_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cnxk_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cnxk_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cnxk_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cnxk_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cnxk_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cnxk_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cnxk_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cnxk_pf;
+
+ octep_setup_pci_window_regs_cnxk_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CNXK_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cnxk_pf(oct);
+ octep_configure_ring_mapping_cnxk_pf(oct);
+
+ /* Firmware status CSR is supposed to be cleared by
+ * core domain reset, but due to IPBUPEM-38842, it is not.
+ * Set it to RUNNING early in boot, so that unexpected resets
+ * leave it in a state that is not READY (1).
+ */
+ OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
+ FW_STATUS_RUNNING);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
index 1622a6ebf..162766017 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -13,12 +13,16 @@
#define OCTEP_64BYTE_INSTR 64
/* Tx Queue: maximum descriptors per ring */
+/* This needs to be a power of 2 */
#define OCTEP_IQ_MAX_DESCRIPTORS 1024
/* Minimum input (Tx) requests to be enqueued to ring doorbell */
-#define OCTEP_DB_MIN 1
+#define OCTEP_DB_MIN 8
/* Packet threshold for Tx queue interrupt */
#define OCTEP_IQ_INTR_THRESHOLD 0x0
+/* Minimum watermark for backpressure */
+#define OCTEP_OQ_WMARK_MIN 256
+
/* Rx Queue: maximum descriptors per ring */
#define OCTEP_OQ_MAX_DESCRIPTORS 1024
@@ -44,8 +48,6 @@
/* Minimum MTU supported by Octeon network interface */
#define OCTEP_MIN_MTU ETH_MIN_MTU
-/* Maximum MTU supported by Octeon interface*/
-#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
/* Default MTU */
#define OCTEP_DEFAULT_MTU 1500
@@ -58,7 +60,6 @@
#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
-#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
@@ -68,12 +69,12 @@
#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
-#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
@@ -97,9 +98,6 @@ struct octep_iq_config {
/* Command size - 32 or 64 bytes */
u16 instr_type;
- /* pkind for packets sent to Octeon */
- u16 pkind;
-
/* Minimum number of commands pending to be posted to Octeon before driver
* hits the Input queue doorbell.
*/
@@ -137,6 +135,12 @@ struct octep_oq_config {
* default. The time is specified in microseconds.
*/
u32 oq_intr_time;
+
+ /* Water mark for backpressure.
+ * Output queue sends backpressure signal to source when
+ * free buffer count falls below wmark.
+ */
+ u32 wmark;
};
/* Tx/Rx configuration */
@@ -189,11 +193,37 @@ struct octep_ctrl_mbox_config {
/* Info from firmware */
struct octep_fw_info {
/* interface pkind */
- u16 pkind;
+ u8 pkind;
+
+ /* front size data */
+ u8 fsz;
+
/* heartbeat interval in milliseconds */
u16 hb_interval;
+
/* heartbeat miss count */
u16 hb_miss_count;
+
+ /* reserved */
+ u16 reserved1;
+
+ /* supported rx offloads OCTEP_ETH_RX_OFFLOAD_* */
+ u16 rx_ol_flags;
+
+ /* supported tx offloads OCTEP_ETH_TX_OFFLOAD_* */
+ u16 tx_ol_flags;
+
+ /* reserved */
+ u32 reserved_offloads;
+
+ /* extra offload flags */
+ u64 ext_ol_flags;
+
+ /* supported features */
+ u64 features[2];
+
+ /* reserved */
+ u64 reserved2[3];
};
/* Data Structure to hold configuration limits and active config */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
index 7f8135788..6da32d40f 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -16,10 +16,12 @@
* |reserved (4 bytes) |
* |-------------------------------------------|
* |host version (8 bytes) |
+ * | low 32 bits |
* |host status (8 bytes) |
* |host reserved (104 bytes) |
* |-------------------------------------------|
- * |fw version (8 bytes) |
+ * |fw version's (8 bytes) |
+ * | min=high 32 bits, max=low 32 bits |
* |fw status (8 bytes) |
* |fw reserved (104 bytes) |
* |===========================================|
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 0594607a2..01b7be154 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -13,6 +13,7 @@
#include "octep_config.h"
#include "octep_main.h"
#include "octep_ctrl_net.h"
+#include "octep_pfvf_mbox.h"
/* Control plane version */
#define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0)
@@ -22,12 +23,15 @@ static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu);
static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac);
static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state);
static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info);
+static const u32 offloads_sz = sizeof(struct octep_ctrl_net_offloads);
static atomic_t ctrl_net_msg_id;
/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
- [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_GET_INFO] =
- OCTEP_CP_VERSION(1, 0, 0)
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE] =
+ OCTEP_CP_VERSION(1, 0, 0),
+ [OCTEP_CTRL_NET_H2F_CMD_OFFLOADS] = OCTEP_CP_VERSION(1, 0, 1)
+
};
/* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */
@@ -122,7 +126,7 @@ int octep_ctrl_net_init(struct octep_device *oct)
int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
int err;
@@ -139,7 +143,7 @@ int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, state_sz, vfid);
@@ -154,7 +158,7 @@ int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, state_sz, vfid);
@@ -168,7 +172,7 @@ int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
int err;
@@ -187,7 +191,7 @@ int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, mac_sz, vfid);
@@ -198,10 +202,28 @@ int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
return octep_send_mbox_req(oct, &d, wait_for_response);
}
+int octep_ctrl_net_get_mtu(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+ int err;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, mtu_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req->mtu.cmd = OCTEP_CTRL_NET_CMD_GET;
+
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ return d.data.resp.mtu.val;
+}
+
int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, mtu_sz, vfid);
@@ -216,7 +238,7 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
struct octep_iface_rx_stats *rx_stats,
struct octep_iface_tx_stats *tx_stats)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
struct octep_ctrl_net_h2f_resp *resp;
int err;
@@ -236,7 +258,7 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
struct octep_ctrl_net_h2f_resp *resp;
int err;
@@ -262,7 +284,7 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info,
bool wait_for_response)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_req *req = &d.data.req;
init_send_req(&d.msg, req, link_info_sz, vfid);
@@ -308,6 +330,11 @@ static int process_mbox_notify(struct octep_device *oct,
octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT)
return -EOPNOTSUPP;
+ if (msg->hdr.s.is_vf) {
+ octep_pfvf_notify(oct, msg);
+ return 0;
+ }
+
switch (cmd) {
case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
if (netif_running(netdev)) {
@@ -331,8 +358,8 @@ static int process_mbox_notify(struct octep_device *oct,
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
{
static u16 msg_sz = sizeof(union octep_ctrl_net_max_data);
- union octep_ctrl_net_max_data data = {0};
- struct octep_ctrl_mbox_msg msg = {0};
+ union octep_ctrl_net_max_data data = {};
+ struct octep_ctrl_mbox_msg msg = {};
int ret;
msg.hdr.s.sz = msg_sz;
@@ -356,7 +383,7 @@ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
struct octep_fw_info *info)
{
- struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_wait_data d = {};
struct octep_ctrl_net_h2f_resp *resp;
struct octep_ctrl_net_h2f_req *req;
int err;
@@ -375,10 +402,41 @@ int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
return 0;
}
+int octep_ctrl_net_dev_remove(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+
+ req = &d.data.req;
+ dev_dbg(&oct->pdev->dev, "Sending dev_unload msg to fw\n");
+ init_send_req(&d.msg, req, sizeof(int), vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE;
+
+ return octep_send_mbox_req(oct, &d, false);
+}
+
+int octep_ctrl_net_set_offloads(struct octep_device *oct, int vfid,
+ struct octep_ctrl_net_offloads *offloads,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {};
+ struct octep_ctrl_net_h2f_req *req;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, offloads_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_OFFLOADS;
+ req->offloads.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->offloads.offloads = *offloads;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
int octep_ctrl_net_uninit(struct octep_device *oct)
{
struct octep_ctrl_net_wait_data *pos, *n;
+ octep_ctrl_net_dev_remove(oct, OCTEP_CTRL_NET_INVALID_VFID);
+
list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list)
pos->done = 1;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index b330f3701..0b823bea9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -42,6 +42,8 @@ enum octep_ctrl_net_h2f_cmd {
OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
OCTEP_CTRL_NET_H2F_CMD_GET_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_DEV_REMOVE,
+ OCTEP_CTRL_NET_H2F_CMD_OFFLOADS,
OCTEP_CTRL_NET_H2F_CMD_MAX
};
@@ -112,6 +114,26 @@ struct octep_ctrl_net_h2f_req_cmd_link_info {
struct octep_ctrl_net_link_info info;
};
+/* offloads */
+struct octep_ctrl_net_offloads {
+ /* supported rx offloads OCTEP_RX_OFFLOAD_* */
+ u16 rx_offloads;
+ /* supported tx offloads OCTEP_TX_OFFLOAD_* */
+ u16 tx_offloads;
+ /* reserved */
+ u32 reserved_offloads;
+ /* extra offloads */
+ u64 ext_offloads;
+};
+
+/* get/set offloads */
+struct octep_ctrl_net_h2f_req_cmd_offloads {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_offloads */
+ struct octep_ctrl_net_offloads offloads;
+};
+
/* Host to fw request data */
struct octep_ctrl_net_h2f_req {
union octep_ctrl_net_req_hdr hdr;
@@ -121,6 +143,7 @@ struct octep_ctrl_net_h2f_req {
struct octep_ctrl_net_h2f_req_cmd_state link;
struct octep_ctrl_net_h2f_req_cmd_state rx;
struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ struct octep_ctrl_net_h2f_req_cmd_offloads offloads;
};
} __packed;
@@ -178,6 +201,7 @@ struct octep_ctrl_net_h2f_resp {
struct octep_ctrl_net_h2f_resp_cmd_state rx;
struct octep_ctrl_net_link_info link_info;
struct octep_ctrl_net_h2f_resp_cmd_get_info info;
+ struct octep_ctrl_net_offloads offloads;
};
} __packed;
@@ -218,87 +242,105 @@ struct octep_ctrl_net_wait_data {
} data;
};
-/** Initialize data for ctrl net.
+/**
+ * octep_ctrl_net_init() - Initialize data for ctrl net.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*
* return value: 0 on success, -errno on error.
*/
int octep_ctrl_net_init(struct octep_device *oct);
-/** Get link status from firmware.
+/**
+ * octep_ctrl_net_get_link_status() - Get link status from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
*
* return value: link status 0=down, 1=up.
*/
int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid);
-/** Set link status in firmware.
+/**
+ * octep_ctrl_net_set_link_status() - Set link status in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param up: boolean status.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @up: boolean status.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure
*/
int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
bool wait_for_response);
-/** Set rx state in firmware.
+/**
+ * octep_ctrl_net_set_rx_state() - Set rx state in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param up: boolean status.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @up: boolean status.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
bool wait_for_response);
-/** Get mac address from firmware.
+/**
+ * octep_ctrl_net_get_mac_addr() - Get mac address from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param addr: non-null pointer to mac address.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @addr: non-null pointer to mac address.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr);
-/** Set mac address in firmware.
+/**
+ * octep_ctrl_net_set_mac_addr() - Set mac address in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param addr: non-null pointer to mac address.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @addr: non-null pointer to mac address.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
bool wait_for_response);
-/** Set mtu in firmware.
+/**
+ * octep_ctrl_net_get_mtu() - Get max MTU from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param mtu: mtu.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ *
+ * return value: mtu on success, -errno on failure.
+ */
+int octep_ctrl_net_get_mtu(struct octep_device *oct, int vfid);
+
+/**
+ * octep_ctrl_net_set_mtu() - Set mtu in firmware.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @mtu: mtu.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
bool wait_for_response);
-/** Get interface statistics from firmware.
+/**
+ * octep_ctrl_net_get_if_stats() - Get interface statistics from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param rx_stats: non-null pointer struct octep_iface_rx_stats.
- * @param tx_stats: non-null pointer struct octep_iface_tx_stats.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @rx_stats: non-null pointer struct octep_iface_rx_stats.
+ * @tx_stats: non-null pointer struct octep_iface_tx_stats.
*
* return value: 0 on success, -errno on failure.
*/
@@ -306,23 +348,25 @@ int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
struct octep_iface_rx_stats *rx_stats,
struct octep_iface_tx_stats *tx_stats);
-/** Get link info from firmware.
+/**
+ * octep_ctrl_net_get_link_info() - Get link info from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param link_info: non-null pointer to struct octep_iface_link_info.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @link_info: non-null pointer to struct octep_iface_link_info.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
struct octep_iface_link_info *link_info);
-/** Set link info in firmware.
+/**
+ * octep_ctrl_net_set_link_info() - Set link info in firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param link_info: non-null pointer to struct octep_iface_link_info.
- * @param wait_for_response: poll for response.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @link_info: non-null pointer to struct octep_iface_link_info.
+ * @wait_for_response: poll for response.
*
* return value: 0 on success, -errno on failure.
*/
@@ -331,26 +375,53 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct,
struct octep_iface_link_info *link_info,
bool wait_for_response);
-/** Poll for firmware messages and process them.
+/**
+ * octep_ctrl_net_recv_fw_messages() - Poll for firmware messages and process them.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*/
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
-/** Get info from firmware.
+/**
+ * octep_ctrl_net_get_info() - Get info from firmware.
*
- * @param oct: non-null pointer to struct octep_device.
- * @param vfid: Index of virtual function.
- * @param info: non-null pointer to struct octep_fw_info.
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @info: non-null pointer to struct octep_fw_info.
*
* return value: 0 on success, -errno on failure.
*/
int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
struct octep_fw_info *info);
-/** Uninitialize data for ctrl net.
+/**
+ * octep_ctrl_net_dev_remove() - Indicate to firmware that a device unload has happened.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_dev_remove(struct octep_device *oct, int vfid);
+
+/**
+ * octep_ctrl_net_set_offloads() - Set offloads in firmware.
+ *
+ * @oct: non-null pointer to struct octep_device.
+ * @vfid: Index of virtual function.
+ * @offloads: non-null pointer to struct octep_ctrl_net_offloads.
+ * @wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_offloads(struct octep_device *oct, int vfid,
+ struct octep_ctrl_net_offloads *offloads,
+ bool wait_for_response);
+
+/**
+ * octep_ctrl_net_uninit() - Uninitialize data for ctrl net.
*
- * @param oct: non-null pointer to struct octep_device.
+ * @oct: non-null pointer to struct octep_device.
*
* return value: 0 on success, -errno on error.
*/
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index a9bdf3283..7c9faa714 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -16,14 +16,20 @@
#include "octep_config.h"
#include "octep_main.h"
#include "octep_ctrl_net.h"
+#include "octep_pfvf_mbox.h"
#define OCTEP_INTR_POLL_TIME_MSECS 100
struct workqueue_struct *octep_wq;
/* Supported Devices */
static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_PF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_PF)},
{0, },
};
MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
@@ -155,6 +161,21 @@ static void octep_disable_msix(struct octep_device *oct)
}
/**
+ * octep_mbox_intr_handler() - common handler for pfvf mbox interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for pfvf mbox interrupts.
+ */
+static irqreturn_t octep_mbox_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.mbox_intr_handler(oct);
+}
+
+/**
* octep_oei_intr_handler() - common handler for output endpoint interrupts.
*
* @irq: Interrupt number.
@@ -357,8 +378,12 @@ static int octep_request_irqs(struct octep_device *oct)
snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
"%s-%s", netdev->name, non_ioq_msix_names[i]);
- if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
- strlen("epf_oei_rint"))) {
+ if (!strncmp(non_ioq_msix_names[i], "epf_mbox_rint", strlen("epf_mbox_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_mbox_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
+ strlen("epf_oei_rint"))) {
ret = request_irq(msix_entry->vector,
octep_oei_intr_handler, 0,
irq_name, oct);
@@ -777,17 +802,24 @@ static int octep_stop(struct net_device *netdev)
*/
static inline int octep_iq_full_check(struct octep_iq *iq)
{
- if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ if (likely((IQ_INSTR_SPACE(iq)) >
OCTEP_WAKE_QUEUE_THRESHOLD))
return 0;
/* Stop the queue if unable to send */
netif_stop_subqueue(iq->netdev, iq->q_no);
+ /* Allow for pending updates in write index
+ * from iq_process_completion in other cpus
+ * to reflect, in case queue gets free
+ * entries.
+ */
+ smp_mb();
+
/* check again and restart the queue, in case NAPI has just freed
* enough Tx ring entries.
*/
- if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
iq->stats.restart_cnt++;
@@ -810,6 +842,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct octep_device *oct = netdev_priv(netdev);
+ netdev_features_t feat = netdev->features;
struct octep_tx_sglist_desc *sglist;
struct octep_tx_buffer *tx_buffer;
struct octep_tx_desc_hw *hw_desc;
@@ -818,8 +851,12 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
struct octep_iq *iq;
skb_frag_t *frag;
u16 nr_frags, si;
+ int xmit_more;
u16 q_no, wi;
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
q_no = skb_get_queue_mapping(skb);
if (q_no >= oct->num_iqs) {
netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
@@ -827,10 +864,6 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
}
iq = oct->iq[q_no];
- if (octep_iq_full_check(iq)) {
- iq->stats.tx_busy++;
- return NETDEV_TX_BUSY;
- }
shinfo = skb_shinfo(skb);
nr_frags = shinfo->nr_frags;
@@ -843,8 +876,9 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
tx_buffer->skb = skb;
ih = &hw_desc->ih;
- ih->tlen = skb->len;
- ih->pkind = oct->pkind;
+ ih->pkind = oct->conf->fw_info.pkind;
+ ih->fsz = oct->conf->fw_info.fsz;
+ ih->tlen = skb->len + ih->fsz;
if (!nr_frags) {
tx_buffer->gather = 0;
@@ -869,9 +903,6 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
if (dma_mapping_error(iq->dev, dma))
goto dma_map_err;
- dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
- OCTEP_SGLIST_SIZE_PER_PKT,
- DMA_TO_DEVICE);
memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
sglist[0].len[3] = len;
sglist[0].dma_ptr[0] = dma;
@@ -891,26 +922,46 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
frag++;
si++;
}
- dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
- OCTEP_SGLIST_SIZE_PER_PKT,
- DMA_TO_DEVICE);
-
hw_desc->dptr = tx_buffer->sglist_dma;
}
- netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ if (oct->conf->fw_info.tx_ol_flags) {
+ if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
+ hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM;
+ hw_desc->txm.ol_flags |= OCTEP_TX_OFFLOAD_TSO;
+ hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
+ hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
+ } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ hw_desc->txm.ol_flags = OCTEP_TX_OFFLOAD_CKSUM;
+ }
+ /* due to ESR txm will be swapped by hw */
+ hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ __netdev_tx_sent_queue(iq->netdev_q, skb->len, xmit_more);
+
skb_tx_timestamp(skb);
- atomic_inc(&iq->instr_pending);
+ iq->fill_cnt++;
wi++;
- if (wi == iq->max_count)
- wi = 0;
- iq->host_write_index = wi;
+ iq->host_write_index = wi & iq->ring_size_mask;
+
+ /* octep_iq_full_check stops the queue and returns
+ * true if so, in case the queue has become full
+ * by inserting current packet. If so, we can
+ * go ahead and ring doorbell.
+ */
+ if (!octep_iq_full_check(iq) && xmit_more &&
+ iq->fill_cnt < iq->fill_threshold)
+ return NETDEV_TX_OK;
+
/* Flush the hw descriptor before writing to doorbell */
wmb();
-
- /* Ring Doorbell to notify the NIC there is a new packet */
- writel(1, iq->doorbell_reg);
- iq->stats.instr_posted++;
+ /* Ring Doorbell to notify the NIC of new packets */
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ iq->stats.instr_posted += iq->fill_cnt;
+ iq->fill_cnt = 0;
return NETDEV_TX_OK;
dma_map_sg_err:
@@ -1051,6 +1102,41 @@ static int octep_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
+static int octep_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct octep_ctrl_net_offloads offloads = { 0 };
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ /* We only support features received from firmware */
+ if ((features & dev->hw_features) != features)
+ return -EINVAL;
+
+ if (features & NETIF_F_TSO)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_TSO6)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_IP_CSUM)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_IPV6_CSUM)
+ offloads.tx_offloads |= OCTEP_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_RXCSUM)
+ offloads.rx_offloads |= OCTEP_RX_OFFLOAD_CKSUM;
+
+ err = octep_ctrl_net_set_offloads(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ &offloads,
+ true);
+ if (!err)
+ dev->features = features;
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1059,6 +1145,7 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_tx_timeout = octep_tx_timeout,
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
+ .ndo_set_features = octep_set_features,
};
/**
@@ -1132,10 +1219,20 @@ static void octep_ctrl_mbox_task(struct work_struct *work)
static const char *octep_devid_to_str(struct octep_device *oct)
{
switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN98_PF:
+ return "CN98XX";
case OCTEP_PCI_DEVICE_ID_CN93_PF:
return "CN93XX";
case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
return "CNF95N";
+ case OCTEP_PCI_DEVICE_ID_CN10KA_PF:
+ return "CN10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_PF:
+ return "CNF10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_PF:
+ return "CNF10KB";
+ case OCTEP_PCI_DEVICE_ID_CN10KB_PF:
+ return "CN10KB";
default:
return "Unsupported";
}
@@ -1174,6 +1271,7 @@ int octep_device_setup(struct octep_device *oct)
dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN98_PF:
case OCTEP_PCI_DEVICE_ID_CN93_PF:
case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
@@ -1181,13 +1279,20 @@ int octep_device_setup(struct octep_device *oct)
OCTEP_MINOR_REV(oct));
octep_device_setup_cn93_pf(oct);
break;
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_PF:
+ case OCTEP_PCI_DEVICE_ID_CN10KA_PF:
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_PF:
+ case OCTEP_PCI_DEVICE_ID_CN10KB_PF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
+ octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct));
+ octep_device_setup_cnxk_pf(oct);
+ break;
default:
dev_err(&pdev->dev,
"%s: unsupported device\n", __func__);
goto unsupported_dev;
}
- oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
ret = octep_ctrl_net_init(oct);
if (ret)
@@ -1237,6 +1342,7 @@ static void octep_device_cleanup(struct octep_device *oct)
oct->mbox[i] = NULL;
}
+ octep_delete_pfvf_mbox(oct);
octep_ctrl_net_uninit(oct);
cancel_delayed_work_sync(&oct->hb_task);
@@ -1284,6 +1390,7 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct octep_device *octep_dev = NULL;
struct net_device *netdev;
+ int max_rx_pktlen;
int err;
err = pci_enable_device(pdev);
@@ -1333,6 +1440,12 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_octep_config;
}
+ err = octep_setup_pfvf_mbox(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "PF-VF mailbox setup failed\n");
+ goto register_dev_err;
+ }
+
err = octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
&octep_dev->conf->fw_info);
if (err) {
@@ -1350,11 +1463,29 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_carrier_off(netdev);
netdev->hw_features = NETIF_F_SG;
- netdev->features |= netdev->hw_features;
+ if (OCTEP_TX_IP_CSUM(octep_dev->conf->fw_info.tx_ol_flags))
+ netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ if (OCTEP_RX_IP_CSUM(octep_dev->conf->fw_info.rx_ol_flags))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ max_rx_pktlen = octep_ctrl_net_get_mtu(octep_dev, OCTEP_CTRL_NET_INVALID_VFID);
+ if (max_rx_pktlen < 0) {
+ dev_err(&octep_dev->pdev->dev,
+ "Failed to get max receive packet size; err = %d\n", max_rx_pktlen);
+ err = max_rx_pktlen;
+ goto register_dev_err;
+ }
netdev->min_mtu = OCTEP_MIN_MTU;
- netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->max_mtu = max_rx_pktlen - (ETH_HLEN + ETH_FCS_LEN);
netdev->mtu = OCTEP_DEFAULT_MTU;
+ if (OCTEP_TX_TSO(octep_dev->conf->fw_info.tx_ol_flags)) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netif_set_tso_max_size(netdev, netdev->max_mtu);
+ }
+
+ netdev->features |= netdev->hw_features;
err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
octep_dev->mac_addr);
if (err) {
@@ -1383,6 +1514,21 @@ err_dma_mask:
return err;
}
+static int octep_sriov_disable(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+
+ if (pci_vfs_assigned(oct->pdev)) {
+ dev_warn(&pdev->dev, "Can't disable SRIOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ pci_disable_sriov(pdev);
+ CFG_GET_ACTIVE_VFS(oct->conf) = 0;
+
+ return 0;
+}
+
/**
* octep_remove() - Remove Octeon PCI device from driver control.
*
@@ -1400,6 +1546,7 @@ static void octep_remove(struct pci_dev *pdev)
return;
netdev = oct->netdev;
+ octep_sriov_disable(oct);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
@@ -1410,11 +1557,47 @@ static void octep_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static int octep_sriov_enable(struct octep_device *oct, int num_vfs)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int err;
+
+ CFG_GET_ACTIVE_VFS(oct->conf) = num_vfs;
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev, "Failed to enable SRIOV err=%d\n", err);
+ CFG_GET_ACTIVE_VFS(oct->conf) = 0;
+ return err;
+ }
+
+ return num_vfs;
+}
+
+static int octep_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ int max_nvfs;
+
+ if (num_vfs == 0)
+ return octep_sriov_disable(oct);
+
+ max_nvfs = CFG_GET_MAX_VFS(oct->conf);
+
+ if (num_vfs > max_nvfs) {
+ dev_err(&pdev->dev, "Invalid VF count Max supported VFs = %d\n",
+ max_nvfs);
+ return -EINVAL;
+ }
+
+ return octep_sriov_enable(oct, num_vfs);
+}
+
static struct pci_driver octep_driver = {
.name = OCTEP_DRV_NAME,
.id_table = octep_pci_id_tbl,
.probe = octep_probe,
.remove = octep_remove,
+ .sriov_configure = octep_sriov_configure,
};
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index 6df902ebb..fee59e0e0 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -18,11 +18,17 @@
#define OCTEP_PCIID_CN93_PF 0xB200177d
#define OCTEP_PCIID_CN93_VF 0xB203177d
+#define OCTEP_PCI_DEVICE_ID_CN98_PF 0xB100
#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
#define OCTEP_PCI_DEVICE_ID_CNF95N_PF 0xB400 //95N PF
+#define OCTEP_PCI_DEVICE_ID_CN10KA_PF 0xB900 //CN10KA PF
+#define OCTEP_PCI_DEVICE_ID_CNF10KA_PF 0xBA00 //CNF10KA PF
+#define OCTEP_PCI_DEVICE_ID_CNF10KB_PF 0xBC00 //CNF10KB PF
+#define OCTEP_PCI_DEVICE_ID_CN10KB_PF 0xBD00 //CN10KB PF
+
#define OCTEP_MAX_QUEUES 63
#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
@@ -40,6 +46,15 @@
#define OCTEP_OQ_INTR_RESEND_BIT 59
#define OCTEP_MMIO_REGIONS 3
+
+#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
+ ((iq__)->host_write_index - (iq__)->flush_index) & \
+ (iq__)->ring_size_mask; \
+ })
+#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
+ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \
+ })
+
/* PCI address space mapping information.
* Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
* Octeon gets mapped to different physical address spaces in
@@ -65,6 +80,7 @@ struct octep_hw_ops {
void (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+ irqreturn_t (*mbox_intr_handler)(void *ioq_vector);
irqreturn_t (*oei_intr_handler)(void *ioq_vector);
irqreturn_t (*ire_intr_handler)(void *ioq_vector);
irqreturn_t (*ore_intr_handler)(void *ioq_vector);
@@ -103,28 +119,27 @@ struct octep_mbox_data {
u64 *data;
};
+#define MAX_VF_PF_MBOX_DATA_SIZE 384
+/* wrappers around work structs */
+struct octep_pfvf_mbox_wk {
+ struct work_struct work;
+ void *ctxptr;
+ u64 ctxul;
+};
+
/* Octeon device mailbox */
struct octep_mbox {
- /* A spinlock to protect access to this q_mbox. */
- spinlock_t lock;
-
- u32 q_no;
- u32 state;
-
- /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
- u8 __iomem *mbox_int_reg;
-
- /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
- * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
- */
- u8 __iomem *mbox_write_reg;
-
- /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
- * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
- */
- u8 __iomem *mbox_read_reg;
-
+ /* A mutex to protect access to this q_mbox. */
+ struct mutex lock;
+ u32 vf_id;
+ u32 config_data_index;
+ u32 message_len;
+ u8 __iomem *pf_vf_data_reg;
+ u8 __iomem *vf_pf_data_reg;
+ struct octep_pfvf_mbox_wk wk;
+ struct octep_device *oct;
struct octep_mbox_data mbox_data;
+ u8 config_data[MAX_VF_PF_MBOX_DATA_SIZE];
};
/* Tx/Rx queue vector per interrupt. */
@@ -202,6 +217,12 @@ struct octep_iface_link_info {
u8 oper_up;
};
+/* The Octeon VF device specific info data structure.*/
+struct octep_pfvf_info {
+ u8 mac_addr[ETH_ALEN];
+ u32 mbox_version;
+};
+
/* The Octeon device specific private data structure.
* Each Octeon device has this structure to represent all its components.
*/
@@ -232,8 +253,7 @@ struct octep_device {
/* Tx queues (IQ: Instruction Queue) */
u16 num_iqs;
- /* pkind value to be used in every Tx hardware descriptor */
- u8 pkind;
+
/* Pointers to Octeon Tx queues */
struct octep_iq *iq[OCTEP_MAX_IQ];
@@ -268,6 +288,8 @@ struct octep_device {
/* Mailbox to talk to VFs */
struct octep_mbox *mbox[OCTEP_MAX_VF];
+ /* VFs info */
+ struct octep_pfvf_info vf_info[OCTEP_MAX_VF];
/* Work entry to handle Tx timeout */
struct work_struct tx_timeout_task;
@@ -377,6 +399,7 @@ int octep_setup_oqs(struct octep_device *oct);
void octep_free_oqs(struct octep_device *oct);
void octep_oq_dbell_init(struct octep_device *oct);
void octep_device_setup_cn93_pf(struct octep_device *oct);
+void octep_device_setup_cnxk_pf(struct octep_device *oct);
int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
int octep_oq_process_rx(struct octep_oq *oq, int budget);
void octep_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
new file mode 100644
index 000000000..2e2c3be8a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_pfvf_mbox.h"
+#include "octep_ctrl_net.h"
+
+/* When a new command is implemented, the below table should be updated
+ * with new command and it's version info.
+ */
+static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
+ [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
+ [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+static void octep_pfvf_validate_version(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ u32 vf_version = (u32)cmd.s_version.version;
+
+ dev_dbg(&oct->pdev->dev, "VF id:%d VF version:%d PF version:%d\n",
+ vf_id, vf_version, OCTEP_PFVF_MBOX_VERSION_CURRENT);
+ if (vf_version < OCTEP_PFVF_MBOX_VERSION_CURRENT)
+ rsp->s_version.version = vf_version;
+ else
+ rsp->s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+
+ oct->vf_info[vf_id].mbox_version = rsp->s_version.version;
+ dev_dbg(&oct->pdev->dev, "VF id:%d negotiated VF version:%d\n",
+ vf_id, oct->vf_info[vf_id].mbox_version);
+
+ rsp->s_version.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_link_status(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int status;
+
+ status = octep_ctrl_net_get_link_status(oct, vf_id);
+ if (status < 0) {
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF link status failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ rsp->s_link_status.status = status;
+}
+
+static void octep_pfvf_set_link_status(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_link_status(oct, vf_id, cmd.s_link_status.status, true);
+ if (err) {
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF link status failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_status.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_set_rx_state(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_rx_state(oct, vf_id, cmd.s_link_state.state, true);
+ if (err) {
+ rsp->s_link_state.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF Rx link state failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_link_state.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static int octep_send_notification(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd)
+{
+ u32 max_rings_per_vf, vf_mbox_queue;
+ struct octep_mbox *mbox;
+
+ /* check if VF PF Mailbox is compatible for this notification */
+ if (pfvf_cmd_versions[cmd.s.opcode] > oct->vf_info[vf_id].mbox_version) {
+ dev_dbg(&oct->pdev->dev, "VF Mbox doesn't support Notification:%d on VF ver:%d\n",
+ cmd.s.opcode, oct->vf_info[vf_id].mbox_version);
+ return -EOPNOTSUPP;
+ }
+
+ max_rings_per_vf = CFG_GET_MAX_RPVF(oct->conf);
+ vf_mbox_queue = vf_id * max_rings_per_vf;
+ if (!oct->mbox[vf_mbox_queue]) {
+ dev_err(&oct->pdev->dev, "Notif obtained for bad mbox vf %d\n", vf_id);
+ return -EINVAL;
+ }
+ mbox = oct->mbox[vf_mbox_queue];
+
+ mutex_lock(&mbox->lock);
+ writeq(cmd.u64, mbox->pf_vf_data_reg);
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+}
+
+static void octep_pfvf_set_mtu(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_mtu(oct, vf_id, cmd.s_set_mtu.mtu, true);
+ if (err) {
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF MTU failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_mtu(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int max_rx_pktlen = oct->netdev->max_mtu + (ETH_HLEN + ETH_FCS_LEN);
+
+ rsp->s_set_mtu.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ rsp->s_get_mtu.mtu = max_rx_pktlen;
+}
+
+static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
+ if (err) {
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
+ if (err) {
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int err;
+
+ err = octep_ctrl_net_dev_remove(oct, vf_id);
+ if (err) {
+ rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Failed to acknowledge fw of vf %d removal\n",
+ vf_id);
+ return;
+ }
+ rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_get_fw_info(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_fw_info fw_info;
+ int err;
+
+ err = octep_ctrl_net_get_info(oct, vf_id, &fw_info);
+ if (err) {
+ rsp->s_fw_info.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Get VF info failed via host control Mbox\n");
+ return;
+ }
+
+ rsp->s_fw_info.pkind = fw_info.pkind;
+ rsp->s_fw_info.fsz = fw_info.fsz;
+ rsp->s_fw_info.rx_ol_flags = fw_info.rx_ol_flags;
+ rsp->s_fw_info.tx_ol_flags = fw_info.tx_ol_flags;
+
+ rsp->s_fw_info.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+static void octep_pfvf_set_offloads(struct octep_device *oct, u32 vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_ctrl_net_offloads offloads = {
+ .rx_offloads = cmd.s_offloads.rx_ol_flags,
+ .tx_offloads = cmd.s_offloads.tx_ol_flags
+ };
+ int err;
+
+ err = octep_ctrl_net_set_offloads(oct, vf_id, &offloads, true);
+ if (err) {
+ rsp->s_offloads.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ dev_err(&oct->pdev->dev, "Set VF offloads failed via host control Mbox\n");
+ return;
+ }
+ rsp->s_offloads.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+}
+
+int octep_setup_pfvf_mbox(struct octep_device *oct)
+{
+ int i = 0, num_vfs = 0, rings_per_vf = 0;
+ int ring = 0;
+
+ num_vfs = oct->conf->sriov_cfg.active_vfs;
+ rings_per_vf = oct->conf->sriov_cfg.max_rings_per_vf;
+
+ for (i = 0; i < num_vfs; i++) {
+ ring = rings_per_vf * i;
+ oct->mbox[ring] = vzalloc(sizeof(*oct->mbox[ring]));
+
+ if (!oct->mbox[ring])
+ goto free_mbox;
+
+ memset(oct->mbox[ring], 0, sizeof(struct octep_mbox));
+ memset(&oct->vf_info[i], 0, sizeof(struct octep_pfvf_info));
+ mutex_init(&oct->mbox[ring]->lock);
+ INIT_WORK(&oct->mbox[ring]->wk.work, octep_pfvf_mbox_work);
+ oct->mbox[ring]->wk.ctxptr = oct->mbox[ring];
+ oct->mbox[ring]->oct = oct;
+ oct->mbox[ring]->vf_id = i;
+ oct->hw_ops.setup_mbox_regs(oct, ring);
+ }
+ return 0;
+
+free_mbox:
+ while (i) {
+ i--;
+ ring = rings_per_vf * i;
+ cancel_work_sync(&oct->mbox[ring]->wk.work);
+ mutex_destroy(&oct->mbox[ring]->lock);
+ vfree(oct->mbox[ring]);
+ oct->mbox[ring] = NULL;
+ }
+ return -ENOMEM;
+}
+
+void octep_delete_pfvf_mbox(struct octep_device *oct)
+{
+ int rings_per_vf = oct->conf->sriov_cfg.max_rings_per_vf;
+ int num_vfs = oct->conf->sriov_cfg.active_vfs;
+ int i = 0, ring = 0, vf_srn = 0;
+
+ for (i = 0; i < num_vfs; i++) {
+ ring = vf_srn + rings_per_vf * i;
+ if (!oct->mbox[ring])
+ continue;
+
+ if (work_pending(&oct->mbox[ring]->wk.work))
+ cancel_work_sync(&oct->mbox[ring]->wk.work);
+
+ mutex_destroy(&oct->mbox[ring]->lock);
+ vfree(oct->mbox[ring]);
+ oct->mbox[ring] = NULL;
+ }
+}
+
+static void octep_pfvf_pf_get_data(struct octep_device *oct,
+ struct octep_mbox *mbox, int vf_id,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ int length = 0;
+ int i = 0;
+ int err;
+ struct octep_iface_link_info link_info;
+ struct octep_iface_rx_stats rx_stats;
+ struct octep_iface_tx_stats tx_stats;
+
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+
+ if (cmd.s_data.frag != OCTEP_PFVF_MBOX_MORE_FRAG_FLAG) {
+ mbox->config_data_index = 0;
+ memset(mbox->config_data, 0, MAX_VF_PF_MBOX_DATA_SIZE);
+ /* Based on the OPCODE CMD the PF driver
+ * specific API should be called to fetch
+ * the requested data
+ */
+ switch (cmd.s.opcode) {
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO:
+ memset(&link_info, 0, sizeof(link_info));
+ err = octep_ctrl_net_get_link_info(oct, vf_id, &link_info);
+ if (!err) {
+ mbox->message_len = sizeof(link_info);
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ memcpy(mbox->config_data, (u8 *)&link_info, sizeof(link_info));
+ } else {
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_STATS:
+ memset(&rx_stats, 0, sizeof(rx_stats));
+ memset(&tx_stats, 0, sizeof(tx_stats));
+ err = octep_ctrl_net_get_if_stats(oct, vf_id, &rx_stats, &tx_stats);
+ if (!err) {
+ mbox->message_len = sizeof(rx_stats) + sizeof(tx_stats);
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ memcpy(mbox->config_data, (u8 *)&rx_stats, sizeof(rx_stats));
+ memcpy(mbox->config_data + sizeof(rx_stats), (u8 *)&tx_stats,
+ sizeof(tx_stats));
+
+ } else {
+ rsp->s_data.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+ break;
+ }
+ *((int32_t *)rsp->s_data.data) = mbox->message_len;
+ return;
+ }
+
+ if (mbox->message_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE)
+ length = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ else
+ length = mbox->message_len;
+
+ mbox->message_len -= length;
+
+ for (i = 0; i < length; i++) {
+ rsp->s_data.data[i] =
+ mbox->config_data[mbox->config_data_index];
+ mbox->config_data_index++;
+ }
+}
+
+void octep_pfvf_notify(struct octep_device *oct, struct octep_ctrl_mbox_msg *msg)
+{
+ union octep_pfvf_mbox_word notif = { 0 };
+ struct octep_ctrl_net_f2h_req *req;
+
+ req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg;
+ switch (req->hdr.s.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ notif.s_link_status.opcode = OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS;
+ notif.s_link_status.status = req->link.state;
+ break;
+ default:
+ pr_info("Unknown mbox notif for vf: %u\n",
+ req->hdr.s.cmd);
+ return;
+ }
+
+ notif.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
+ octep_send_notification(oct, msg->hdr.s.vf_idx, notif);
+}
+
+void octep_pfvf_mbox_work(struct work_struct *work)
+{
+ struct octep_pfvf_mbox_wk *wk = container_of(work, struct octep_pfvf_mbox_wk, work);
+ union octep_pfvf_mbox_word cmd = { 0 };
+ union octep_pfvf_mbox_word rsp = { 0 };
+ struct octep_mbox *mbox = NULL;
+ struct octep_device *oct = NULL;
+ int vf_id;
+
+ mbox = (struct octep_mbox *)wk->ctxptr;
+ oct = (struct octep_device *)mbox->oct;
+ vf_id = mbox->vf_id;
+
+ mutex_lock(&mbox->lock);
+ cmd.u64 = readq(mbox->vf_pf_data_reg);
+ rsp.u64 = 0;
+
+ switch (cmd.s.opcode) {
+ case OCTEP_PFVF_MBOX_CMD_VERSION:
+ octep_pfvf_validate_version(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS:
+ octep_pfvf_get_link_status(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS:
+ octep_pfvf_set_link_status(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_RX_STATE:
+ octep_pfvf_set_rx_state(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_MTU:
+ octep_pfvf_set_mtu(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR:
+ octep_pfvf_set_mac_addr(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR:
+ octep_pfvf_get_mac_addr(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO:
+ case OCTEP_PFVF_MBOX_CMD_GET_STATS:
+ octep_pfvf_pf_get_data(oct, mbox, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_MTU:
+ octep_pfvf_get_mtu(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_DEV_REMOVE:
+ octep_pfvf_dev_remove(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_GET_FW_INFO:
+ octep_pfvf_get_fw_info(oct, vf_id, cmd, &rsp);
+ break;
+ case OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS:
+ octep_pfvf_set_offloads(oct, vf_id, cmd, &rsp);
+ break;
+ default:
+ dev_err(&oct->pdev->dev, "PF-VF mailbox: invalid opcode %d\n", cmd.s.opcode);
+ rsp.s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ break;
+ }
+ writeq(rsp.u64, mbox->vf_pf_data_reg);
+ mutex_unlock(&mbox->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
new file mode 100644
index 000000000..0dc6eead2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_PFVF_MBOX_H_
+#define _OCTEP_PFVF_MBOX_H_
+
+/* VF flags */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
+#define OCTEON_SDP_16K_HW_FRS 16380UL
+#define OCTEON_SDP_64K_HW_FRS 65531UL
+
+/* When a new command is implemented,PF Mbox version should be bumped.
+ */
+enum octep_pfvf_mbox_version {
+ OCTEP_PFVF_MBOX_VERSION_V0,
+ OCTEP_PFVF_MBOX_VERSION_V1,
+ OCTEP_PFVF_MBOX_VERSION_V2,
+};
+
+#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+
+enum octep_pfvf_mbox_opcode {
+ OCTEP_PFVF_MBOX_CMD_VERSION,
+ OCTEP_PFVF_MBOX_CMD_SET_MTU,
+ OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
+ OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_MTU,
+ OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
+ OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
+ OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
+ OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_MAX,
+};
+
+enum octep_pfvf_mbox_word_type {
+ OCTEP_PFVF_MBOX_TYPE_CMD,
+ OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
+ OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
+};
+
+enum octep_pfvf_mbox_cmd_status {
+ OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
+ OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4
+};
+
+enum octep_pfvf_mbox_state {
+ OCTEP_PFVF_MBOX_STATE_IDLE = 0,
+ OCTEP_PFVF_MBOX_STATE_BUSY = 1,
+};
+
+enum octep_pfvf_link_status {
+ OCTEP_PFVF_LINK_STATUS_DOWN,
+ OCTEP_PFVF_LINK_STATUS_UP,
+};
+
+enum octep_pfvf_link_speed {
+ OCTEP_PFVF_LINK_SPEED_NONE,
+ OCTEP_PFVF_LINK_SPEED_1000,
+ OCTEP_PFVF_LINK_SPEED_10000,
+ OCTEP_PFVF_LINK_SPEED_25000,
+ OCTEP_PFVF_LINK_SPEED_40000,
+ OCTEP_PFVF_LINK_SPEED_50000,
+ OCTEP_PFVF_LINK_SPEED_100000,
+ OCTEP_PFVF_LINK_SPEED_LAST,
+};
+
+enum octep_pfvf_link_duplex {
+ OCTEP_PFVF_LINK_HALF_DUPLEX,
+ OCTEP_PFVF_LINK_FULL_DUPLEX,
+};
+
+enum octep_pfvf_link_autoneg {
+ OCTEP_PFVF_LINK_AUTONEG,
+ OCTEP_PFVF_LINK_FIXED,
+};
+
+#define OCTEP_PFVF_MBOX_TIMEOUT_MS 500
+#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
+#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
+#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
+#define OCTEP_PFVF_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
+
+union octep_pfvf_mbox_word {
+ u64 u64;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 data:48;
+ } s;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 frag:1;
+ u64 rsvd:5;
+ u8 data[6];
+ } s_data;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 version:48;
+ } s_version;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u8 mac_addr[6];
+ } s_set_mac;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_set_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_get_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 state:1;
+ u64 rsvd:53;
+ } s_link_state;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 status:1;
+ u64 rsvd:53;
+ } s_link_status;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 pkind:8;
+ u64 fsz:8;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ u64 rsvd:6;
+ } s_fw_info;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:22;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ } s_offloads;
+} __packed;
+
+void octep_pfvf_mbox_work(struct work_struct *work);
+int octep_setup_pfvf_mbox(struct octep_device *oct);
+void octep_delete_pfvf_mbox(struct octep_device *oct);
+void octep_pfvf_notify(struct octep_device *oct, struct octep_ctrl_mbox_msg *msg);
+#endif
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index 0a43983e9..ca473502d 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -208,6 +208,9 @@
#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+#define CN93_SDP_MBOX_VF_PF_DATA_START 0x24000
+#define CN93_SDP_MBOX_PF_VF_DATA_START 0x22000
+
#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
(CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
@@ -217,6 +220,12 @@
#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
(CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+#define CN93_SDP_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_MBOX_VF_PF_DATA_START + ((ring) * CN93_EPVF_RING_OFFSET))
+
+#define CN93_SDP_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_MBOX_PF_VF_DATA_START + ((ring) * CN93_EPVF_RING_OFFSET))
+
/* ##################### Interrupt Registers ########################## */
#define CN93_SDP_R_ERR_TYPE_START 0x10400
@@ -362,6 +371,10 @@
#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+#define CN98_SDP_MAC_PF_RING_CTL_NPFS(val) (((val) >> 48) & 0xF)
+#define CN98_SDP_MAC_PF_RING_CTL_SRN(val) ((val) & 0xFF)
+#define CN98_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 32) & 0x3F)
+
/* Number of non-queue interrupts in CN93xx */
#define CN93_NUM_NON_IOQ_INTR 16
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
new file mode 100644
index 000000000..e637d7c82
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cnxk_pf.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CNXK_PF_H_
+#define _OCTEP_REGS_CNXK_PF_H_
+
+/* ############################ RST ######################### */
+#define CNXK_RST_BOOT 0x000087E006001600ULL
+#define CNXK_RST_CHIP_DOMAIN_W1S 0x000087E006001810ULL
+#define CNXK_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CNXK_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CNXK_CONFIG_XPANSION_BAR 0x38
+#define CNXK_CONFIG_PCIE_CAP 0x70
+#define CNXK_CONFIG_PCIE_DEVCAP 0x74
+#define CNXK_CONFIG_PCIE_DEVCTL 0x78
+#define CNXK_CONFIG_PCIE_LINKCAP 0x7C
+#define CNXK_CONFIG_PCIE_LINKCTL 0x80
+#define CNXK_CONFIG_PCIE_SLOTCAP 0x84
+#define CNXK_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CNXK_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CNXK_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CNXK_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CNXK_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CNXK_RING_OFFSET (0x1ULL << 17)
+#define CNXK_EPF_OFFSET (0x1ULL << 25)
+#define CNXK_MAC_OFFSET (0x1ULL << 4)
+#define CNXK_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CNXK_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CNXK_SDP_EPF_SCRATCH 0x209E0
+
+/* ################# Window Registers ######################### */
+#define CNXK_SDP_WIN_WR_ADDR64 0x20000
+#define CNXK_SDP_WIN_RD_ADDR64 0x20010
+#define CNXK_SDP_WIN_WR_DATA64 0x20020
+#define CNXK_SDP_WIN_WR_MASK_REG 0x20030
+#define CNXK_SDP_WIN_RD_DATA64 0x20040
+
+#define CNXK_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CNXK_SDP_EPF_RINFO 0x209F0
+
+#define CNXK_SDP_EPF_RINFO_SRN(val) ((val) & 0x7F)
+#define CNXK_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CNXK_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0x7F)
+
+/* SDP Function select */
+#define CNXK_SDP_FUNC_SEL_EPF_BIT_POS 7
+#define CNXK_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CNXK_SDP_R_IN_CONTROL_START 0x10000
+#define CNXK_SDP_R_IN_ENABLE_START 0x10010
+#define CNXK_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CNXK_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CNXK_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CNXK_SDP_R_IN_CNTS_START 0x10050
+#define CNXK_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CNXK_SDP_R_IN_PKT_CNT_START 0x10080
+#define CNXK_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CNXK_SDP_R_IN_CONTROL(ring) \
+ (CNXK_SDP_R_IN_CONTROL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_ENABLE(ring) \
+ (CNXK_SDP_R_IN_ENABLE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_BADDR(ring) \
+ (CNXK_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CNXK_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INSTR_DBELL(ring) \
+ (CNXK_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS(ring) \
+ (CNXK_SDP_R_IN_CNTS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_LEVELS(ring) \
+ (CNXK_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_PKT_CNT(ring) \
+ (CNXK_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_BYTE_CNT(ring) \
+ (CNXK_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CNXK_R_IN_CTL_RPVF_MASK (0xF)
+#define CNXK_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CNXK_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CNXK_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CNXK_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CNXK_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CNXK_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CNXK_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CNXK_R_IN_CTL_NSR (0x1ULL << 3)
+#define CNXK_R_IN_CTL_ESR (0x1ULL << 1)
+#define CNXK_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CNXK_R_IN_CTL_MASK (CNXK_R_IN_CTL_RDSIZE | CNXK_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CNXK_SDP_R_OUT_CNTS_START 0x10100
+#define CNXK_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CNXK_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CNXK_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CNXK_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CNXK_SDP_R_OUT_CONTROL_START 0x10150
+#define CNXK_SDP_R_OUT_WMARK_START 0x10160
+#define CNXK_SDP_R_OUT_ENABLE_START 0x10170
+#define CNXK_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CNXK_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CNXK_SDP_R_OUT_CONTROL(ring) \
+ (CNXK_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_ENABLE(ring) \
+ (CNXK_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CNXK_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CNXK_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CNXK_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_WMARK(ring) \
+ (CNXK_SDP_R_OUT_WMARK_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS(ring) \
+ (CNXK_SDP_R_OUT_CNTS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_LEVELS(ring) \
+ (CNXK_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_PKT_CNT(ring) \
+ (CNXK_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_BYTE_CNT(ring) \
+ (CNXK_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CNXK_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CNXK_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CNXK_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CNXK_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CNXK_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CNXK_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CNXK_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CNXK_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CNXK_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CNXK_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CNXK_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CNXK_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CNXK_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CNXK_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CNXK_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CNXK_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CNXK_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CNXK_SDP_R_MBOX_ISM_START 0x10500
+#define CNXK_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CNXK_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CNXK_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CNXK_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_ISM(ring) \
+ (CNXK_SDP_R_MBOX_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_SDP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS_ISM(ring) \
+ (CNXK_SDP_R_IN_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CNXK_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CNXK_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CNXK_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CNXK_SDP_MBOX_VF_PF_DATA_START 0x24000
+#define CNXK_SDP_MBOX_PF_VF_DATA_START 0x22000
+
+#define CNXK_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CNXK_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_MBOX_VF_PF_DATA(ring) \
+ (CNXK_SDP_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+
+#define CNXK_SDP_MBOX_PF_VF_DATA(ring) \
+ (CNXK_SDP_MBOX_PF_VF_DATA_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CNXK_SDP_R_ERR_TYPE_START 0x10400
+
+#define CNXK_SDP_R_ERR_TYPE(ring) \
+ (CNXK_SDP_R_ERR_TYPE_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_MBOX_ISM_START 0x10500
+#define CNXK_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CNXK_SDP_R_MBOX_ISM(ring) \
+ (CNXK_SDP_R_MBOX_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_SDP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_R_IN_CNTS_ISM(ring) \
+ (CNXK_SDP_R_IN_CNTS_ISM_START + ((ring) * CNXK_RING_OFFSET))
+
+#define CNXK_SDP_EPF_MBOX_RINT_START 0x20100
+#define CNXK_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CNXK_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CNXK_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CNXK_SDP_EPF_IRERR_RINT 0x20200
+#define CNXK_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CNXK_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CNXK_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CNXK_SDP_EPF_VFORE_RINT_START 0x20240
+#define CNXK_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CNXK_SDP_EPF_ORERR_RINT 0x20320
+#define CNXK_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CNXK_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CNXK_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CNXK_SDP_EPF_OEI_RINT 0x20400
+#define CNXK_SDP_EPF_OEI_RINT_W1S 0x20500
+#define CNXK_SDP_EPF_OEI_RINT_ENA_W1C 0x20600
+#define CNXK_SDP_EPF_OEI_RINT_ENA_W1S 0x20700
+
+#define CNXK_SDP_EPF_DMA_RINT 0x20800
+#define CNXK_SDP_EPF_DMA_RINT_W1S 0x20810
+#define CNXK_SDP_EPF_DMA_RINT_ENA_W1C 0x20820
+#define CNXK_SDP_EPF_DMA_RINT_ENA_W1S 0x20830
+
+#define CNXK_SDP_EPF_DMA_INT_LEVEL_START 0x20840
+#define CNXK_SDP_EPF_DMA_CNT_START 0x20860
+#define CNXK_SDP_EPF_DMA_TIM_START 0x20880
+
+#define CNXK_SDP_EPF_MISC_RINT 0x208A0
+#define CNXK_SDP_EPF_MISC_RINT_W1S 0x208B0
+#define CNXK_SDP_EPF_MISC_RINT_ENA_W1C 0x208C0
+#define CNXK_SDP_EPF_MISC_RINT_ENA_W1S 0x208D0
+
+#define CNXK_SDP_EPF_DMA_VF_RINT_START 0x208E0
+#define CNXK_SDP_EPF_DMA_VF_RINT_W1S_START 0x20900
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20920
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20940
+
+#define CNXK_SDP_EPF_PP_VF_RINT_START 0x20960
+#define CNXK_SDP_EPF_PP_VF_RINT_W1S_START 0x20980
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x209A0
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x209C0
+
+#define CNXK_SDP_EPF_MBOX_RINT(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_VFIRE_RINT(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_VFORE_RINT(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_DMA_VF_RINT(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+
+#define CNXK_SDP_EPF_PP_VF_RINT(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+#define CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CNXK_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CNXK_INTR_R_SEND_ISM BIT_ULL(63)
+#define CNXK_INTR_R_OUT_INT BIT_ULL(62)
+#define CNXK_INTR_R_IN_INT BIT_ULL(61)
+#define CNXK_INTR_R_MBOX_INT BIT_ULL(60)
+#define CNXK_INTR_R_RESEND BIT_ULL(59)
+#define CNXK_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CNXK_SDP_EPVF_RING_START 0x26000
+#define CNXK_SDP_IN_RING_TB_MAP_START 0x28000
+#define CNXK_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CNXK_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CNXK_SDP_EPVF_RING(ring) \
+ (CNXK_SDP_EPVF_RING_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_IN_RING_TB_MAP(ring) \
+ (CNXK_SDP_N_RING_TB_MAP_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_IN_RATE_LIMIT(ring) \
+ (CNXK_SDP_IN_RATE_LIMIT_START + ((ring) * CNXK_EPVF_RING_OFFSET))
+#define CNXK_SDP_MAC_PF_RING_CTL(mac) \
+ (CNXK_SDP_MAC_PF_RING_CTL_START + ((mac) * CNXK_MAC_OFFSET))
+
+#define CNXK_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0x3)
+#define CNXK_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0x7F)
+#define CNXK_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CNXKxx */
+#define CNXK_NUM_NON_IOQ_INTR 32
+
+/* bit 0 for control mbox interrupt */
+#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0)
+/* bit 1 for firmware heartbeat interrupt */
+#define CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define FW_STATUS_RUNNING 2ULL
+#define CNXK_PEMX_PFX_CSX_PFCFGX(pem, pf, offset) ({ typeof(offset) _off = (offset); \
+ ((0x8e0000008000 | \
+ (uint64_t)(pem) << 36 \
+ | (pf) << 18 \
+ | ((_off >> 16) & 1) << 16 \
+ | (_off >> 3) << 3) \
+ + (((_off >> 2) & 1) << 2)); \
+ })
+
+/* Register defines for use with CNXK_PEMX_PFX_CSX_PFCFGX */
+#define CNXK_PCIEEP_VSECST_CTL 0x418
+
+#define CNXK_PEM_BAR4_INDEX 7
+#define CNXK_PEM_BAR4_INDEX_SIZE 0x400000ULL
+#define CNXK_PEM_BAR4_INDEX_OFFSET (CNXK_PEM_BAR4_INDEX * CNXK_PEM_BAR4_INDEX_SIZE)
+
+#endif /* _OCTEP_REGS_CNXK_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 3c43f8078..4746a6b25 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -143,7 +143,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
* additional header is filled-in by Octeon after length field in
* Rx packets. this header contains additional packet information.
*/
- if (oct->caps_enabled)
+ if (oct->conf->fw_info.rx_ol_flags)
oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
@@ -353,11 +353,13 @@ static int __octep_oq_process_rx(struct octep_device *oct,
struct octep_oq *oq, u16 pkts_to_process)
{
struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ netdev_features_t feat = oq->netdev->features;
struct octep_rx_buffer *buff_info;
struct octep_oq_resp_hw *resp_hw;
u32 pkt, rx_bytes, desc_used;
struct sk_buff *skb;
u16 data_offset;
+ u16 rx_ol_flags;
u32 read_idx;
read_idx = oq->host_read_idx;
@@ -372,7 +374,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
/* Swap the length field that is in Big-Endian to CPU */
buff_info->len = be64_to_cpu(resp_hw->length);
- if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ if (oct->conf->fw_info.rx_ol_flags) {
/* Extended response header is immediately after
* response header (resp_hw)
*/
@@ -384,11 +386,13 @@ static int __octep_oq_process_rx(struct octep_device *oct,
*/
data_offset = OCTEP_OQ_RESP_HW_SIZE +
OCTEP_OQ_RESP_HW_EXT_SIZE;
+ rx_ol_flags = resp_hw_ext->rx_ol_flags;
} else {
/* Data is immediately after
* Hardware Rx response header.
*/
data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ rx_ol_flags = 0;
}
rx_bytes += buff_info->len;
@@ -444,8 +448,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
skb->dev = oq->netdev;
skb->protocol = eth_type_trans(skb, skb->dev);
- if (resp_hw_ext &&
- resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ if (feat & NETIF_F_RXCSUM &&
+ OCTEP_RX_CSUM_VERIFIED(rx_ol_flags))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 49feae80d..3b08e2d56 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -20,13 +20,33 @@ struct octep_oq_desc_hw {
dma_addr_t buffer_ptr;
u64 info_ptr;
};
+
static_assert(sizeof(struct octep_oq_desc_hw) == 16);
#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
-#define OCTEP_CSUM_L4_VERIFIED 0x1
-#define OCTEP_CSUM_IP_VERIFIED 0x2
-#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+/* Rx offload flags */
+#define OCTEP_RX_OFFLOAD_VLAN_STRIP BIT(0)
+#define OCTEP_RX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_RX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_RX_OFFLOAD_TCP_CKSUM BIT(3)
+
+#define OCTEP_RX_OFFLOAD_CKSUM (OCTEP_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_RX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_RX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_RX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_RX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_RX_OFFLOAD_UDP_CKSUM))
+
+/* bit 0 is vlan strip */
+#define OCTEP_RX_CSUM_IP_VERIFIED BIT(1)
+#define OCTEP_RX_CSUM_L4_VERIFIED BIT(2)
+
+#define OCTEP_RX_CSUM_VERIFIED(flags) ((flags) & \
+ (OCTEP_RX_CSUM_L4_VERIFIED | \
+ OCTEP_RX_CSUM_IP_VERIFIED))
/* Extended Response Header in packet data received from Hardware.
* Includes metadata like checksum status.
@@ -35,11 +55,12 @@ static_assert(sizeof(struct octep_oq_desc_hw) == 16);
*/
struct octep_oq_resp_hw_ext {
/* Reserved. */
- u64 reserved:62;
+ u64 rsvd:48;
- /* checksum verified. */
- u64 csum_verified:2;
+ /* offload flags */
+ u16 rx_ol_flags;
};
+
static_assert(sizeof(struct octep_oq_resp_hw_ext) == 8);
#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
@@ -52,6 +73,7 @@ struct octep_oq_resp_hw {
/* The Length of the packet. */
__be64 length;
};
+
static_assert(sizeof(struct octep_oq_resp_hw) == 8);
#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index d0adb82d6..06851b78a 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -21,7 +21,6 @@ static void octep_iq_reset_indices(struct octep_iq *iq)
iq->flush_index = 0;
iq->pkts_processed = 0;
iq->pkt_in_done = 0;
- atomic_set(&iq->instr_pending, 0);
}
/**
@@ -82,7 +81,6 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
}
iq->pkts_processed += compl_pkts;
- atomic_sub(compl_pkts, &iq->instr_pending);
iq->stats.instr_completed += compl_pkts;
iq->stats.bytes_sent += compl_bytes;
iq->stats.sgentry_sent += compl_sg;
@@ -91,7 +89,7 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
- ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ (IQ_INSTR_SPACE(iq) >
OCTEP_WAKE_QUEUE_THRESHOLD))
netif_wake_subqueue(iq->netdev, iq->q_no);
return !budget;
@@ -144,7 +142,6 @@ static void octep_iq_free_pending(struct octep_iq *iq)
dev_kfree_skb_any(skb);
}
- atomic_set(&iq->instr_pending, 0);
iq->flush_index = fi;
netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 86c98b13f..875a2c340 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -36,6 +36,7 @@ struct octep_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
};
+
static_assert(sizeof(struct octep_tx_sglist_desc) == 40);
/* Each Scatter/Gather entry sent to hardwar hold four pointers.
@@ -60,6 +61,18 @@ struct octep_tx_buffer {
/* Hardware interface Tx statistics */
struct octep_iface_tx_stats {
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
/* Packets dropped due to excessive collisions */
u64 xscol;
@@ -76,12 +89,6 @@ struct octep_iface_tx_stats {
*/
u64 scol;
- /* Total octets sent on the interface */
- u64 octs;
-
- /* Total frames sent on the interface */
- u64 pkts;
-
/* Packets sent with an octet count < 64 */
u64 hist_lt64;
@@ -106,12 +113,6 @@ struct octep_iface_tx_stats {
/* Packets sent with an octet count of > 1518 */
u64 hist_gt1518;
- /* Packets sent to a broadcast DMAC */
- u64 bcst;
-
- /* Packets sent to the multicast DMAC */
- u64 mcst;
-
/* Packets sent that experienced a transmit underflow and were
* truncated
*/
@@ -172,9 +173,6 @@ struct octep_iq {
/* Statistics for this input queue. */
struct octep_iq_stats stats;
- /* This field keeps track of the instructions pending in this queue. */
- atomic_t instr_pending;
-
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_tx_desc_hw *desc_ring;
@@ -240,32 +238,53 @@ struct octep_instr_hdr {
/* Reserved3 */
u64 reserved3:1;
};
+
static_assert(sizeof(struct octep_instr_hdr) == 8);
-/* Hardware Tx completion response header */
-struct octep_instr_resp_hdr {
- /* Request ID */
- u64 rid:16;
+/* Tx offload flags */
+#define OCTEP_TX_OFFLOAD_VLAN_INSERT BIT(0)
+#define OCTEP_TX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_TX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_TX_OFFLOAD_TCP_CKSUM BIT(3)
+#define OCTEP_TX_OFFLOAD_SCTP_CKSUM BIT(4)
+#define OCTEP_TX_OFFLOAD_TCP_TSO BIT(5)
+#define OCTEP_TX_OFFLOAD_UDP_TSO BIT(6)
+
+#define OCTEP_TX_OFFLOAD_CKSUM (OCTEP_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_TX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_TX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_TX_OFFLOAD_TSO (OCTEP_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_TX_OFFLOAD_UDP_TSO)
+
+#define OCTEP_TX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_TX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_TX_OFFLOAD_UDP_CKSUM))
- /* PCIe port to use for response */
- u64 pcie_port:3;
+#define OCTEP_TX_TSO(flags) ((flags) & \
+ (OCTEP_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_TX_OFFLOAD_UDP_TSO))
- /* Scatter indicator 1=scatter */
- u64 scatter:1;
+struct tx_mdata {
- /* Size of Expected result OR no. of entries in scatter list */
- u64 rlenssz:14;
+ /* offload flags */
+ u16 ol_flags;
- /* Desired destination port for result */
- u64 dport:6;
+ /* gso size */
+ u16 gso_size;
- /* Opcode Specific parameters */
- u64 param:8;
+ /* gso flags */
+ u16 gso_segs;
- /* Opcode for the return packet */
- u64 opcode:16;
+ /* reserved */
+ u16 rsvd1;
+
+ /* reserved */
+ u64 rsvd2;
};
-static_assert(sizeof(struct octep_instr_hdr) == 8);
+
+static_assert(sizeof(struct tx_mdata) == 16);
/* 64-byte Tx instruction format.
* Format of instruction for a 64-byte mode input queue.
@@ -284,18 +303,14 @@ struct octep_tx_desc_hw {
struct octep_instr_hdr ih;
u64 ih64;
};
-
- /* Pointer where the response for a RAW mode packet will be written
- * by Octeon.
- */
- u64 rptr;
-
- /* Input Instruction Response Header. */
- struct octep_instr_resp_hdr irh;
-
+ union {
+ u64 txm64[2];
+ struct tx_mdata txm;
+ };
/* Additional headers available in a 64-byte instruction. */
- u64 exhdr[4];
+ u64 exthdr[4];
};
+
static_assert(sizeof(struct octep_tx_desc_hw) == 64);
#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 3c0f55b3e..b86f3224f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -808,6 +808,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 7d741e3ba..1e5aa5397 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -452,4 +452,5 @@ const char *otx2_mbox_id2name(u16 id)
EXPORT_SYMBOL(otx2_mbox_id2name);
MODULE_AUTHOR("Marvell.");
+MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index bd4b9661e..98e203a0e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -310,6 +310,13 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
+M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
+ nix_mcast_grp_create_rsp) \
+M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_req, \
+ msg_rsp) \
+M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
+ nix_mcast_grp_update_req, \
+ nix_mcast_grp_update_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -836,6 +843,9 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_INVALID_MCAST_GRP = -436,
+ NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
+ NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
};
/* For NIX RX vtag action */
@@ -1210,6 +1220,68 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+struct nix_mcast_grp_create_req {
+ struct mbox_msghdr hdr;
+#define NIX_MCAST_INGRESS 0
+#define NIX_MCAST_EGRESS 1
+ u8 dir;
+ u8 reserved[11];
+ /* Reserving few bytes for future requirement */
+};
+
+struct nix_mcast_grp_create_rsp {
+ struct mbox_msghdr hdr;
+ /* This mcast_grp_idx should be passed during MCAM
+ * write entry for multicast. AF will identify the
+ * corresponding multicast table index associated
+ * with the group id and program the same to MCAM entry.
+ * This group id is also needed during group delete
+ * and update request.
+ */
+ u32 mcast_grp_idx;
+};
+
+struct nix_mcast_grp_destroy_req {
+ struct mbox_msghdr hdr;
+ /* Group id returned by nix_mcast_grp_create_rsp */
+ u32 mcast_grp_idx;
+ /* If AF is requesting for destroy, then set
+ * it to '1'. Otherwise keep it to '0'
+ */
+ u8 is_af;
+};
+
+struct nix_mcast_grp_update_req {
+ struct mbox_msghdr hdr;
+ /* Group id returned by nix_mcast_grp_create_rsp */
+ u32 mcast_grp_idx;
+ /* Number of multicast/mirror entries requested */
+ u32 num_mce_entry;
+#define NIX_MCE_ENTRY_MAX 64
+#define NIX_RX_RQ 0
+#define NIX_RX_RSS 1
+ /* Receive queue or RSS index within pf_func */
+ u32 rq_rss_index[NIX_MCE_ENTRY_MAX];
+ /* pcifunc is required for both ingress and egress multicast */
+ u16 pcifunc[NIX_MCE_ENTRY_MAX];
+ /* channel is required for egress multicast */
+ u16 channel[NIX_MCE_ENTRY_MAX];
+#define NIX_MCAST_OP_ADD_ENTRY 0
+#define NIX_MCAST_OP_DEL_ENTRY 1
+ /* Destination type. 0:Receive queue, 1:RSS*/
+ u8 dest_type[NIX_MCE_ENTRY_MAX];
+ u8 op;
+ /* If AF is requesting for update, then set
+ * it to '1'. Otherwise keep it to '0'
+ */
+ u8 is_af;
+};
+
+struct nix_mcast_grp_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 mce_start_index;
+};
+
/* Global NIX inline IPSec configuration */
struct nix_inline_ipsec_cfg {
struct mbox_msghdr hdr;
@@ -1485,6 +1557,8 @@ struct flow_msg {
#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0)
#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8)
u32 mpls_lse[4];
+ u8 icmp_type;
+ u8 icmp_code;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 8c0732c9a..b0b4dea54 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -214,6 +214,8 @@ enum key_fields {
NPC_MPLS3_TTL,
NPC_MPLS4_LBTCBOS,
NPC_MPLS4_TTL,
+ NPC_TYPE_ICMP,
+ NPC_CODE_ICMP,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 32645aefd..6a911ea0c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -156,7 +156,7 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
return start;
}
-static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
+void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
{
if (!rsrc->bmap)
return;
@@ -935,6 +935,9 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
hw->total_vfs = (cfg >> 20) & 0xFFF;
hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
+ if (!is_rvu_otx2(rvu))
+ rvu_apr_block_cn10k_init(rvu);
+
/* Init NPA LF's bitmap */
block = &hw->block[BLKADDR_NPA];
if (!block->implemented)
@@ -2633,6 +2636,10 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 2. Flush and reset SSO/SSOW
* 3. Cleanup pools (NPA)
*/
+
+ /* Free multicast/mirror node associated with the 'pcifunc' */
+ rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
+
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 185c296ea..d44a400e1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -116,11 +116,12 @@ struct rvu_block {
};
struct nix_mcast {
- struct qmem *mce_ctx;
- struct qmem *mcast_buf;
- int replay_pkind;
- int next_free_mce;
- struct mutex mce_lock; /* Serialize MCE updates */
+ struct qmem *mce_ctx;
+ struct qmem *mcast_buf;
+ int replay_pkind;
+ struct rsrc_bmap mce_counter[2];
+ /* Counters for both ingress and egress mcast lists */
+ struct mutex mce_lock; /* Serialize MCE updates */
};
struct nix_mce_list {
@@ -129,6 +130,23 @@ struct nix_mce_list {
int max;
};
+struct nix_mcast_grp_elem {
+ struct nix_mce_list mcast_mce_list;
+ u32 mcast_grp_idx;
+ u32 pcifunc;
+ int mcam_index;
+ int mce_start_index;
+ struct list_head list;
+ u8 dir;
+};
+
+struct nix_mcast_grp {
+ struct list_head mcast_grp_head;
+ int count;
+ int next_grp_index;
+ struct mutex mcast_grp_lock; /* Serialize MCE updates */
+};
+
/* layer metadata to uniquely identify a packet header field */
struct npc_layer_mdata {
u8 lid;
@@ -339,6 +357,7 @@ struct nix_hw {
struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
+ struct nix_mcast_grp mcast_grp;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
@@ -744,6 +763,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
+void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc);
@@ -850,6 +870,11 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
struct nix_txsch *txsch, bool enable);
+void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc);
+int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx);
+int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx, u16 mcam_index);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
@@ -898,6 +923,10 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, bool enable);
+u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index);
+void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u64 cfg);
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
@@ -923,6 +952,8 @@ int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
u64 bcast_mcast_val, u64 bcast_mcast_mask);
void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf);
+int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr);
+void npc_mcam_rsrcs_deinit(struct rvu *rvu);
/* CPT APIs */
int rvu_cpt_register_interrupts(struct rvu *rvu);
@@ -944,6 +975,7 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+void rvu_apr_block_cn10k_init(struct rvu *rvu);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 72e060cf6..e9bf9231b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ if (iter >= MAX_LMAC_COUNT)
+ continue;
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 0e74c5a22..7fa98aeb3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -559,3 +559,12 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
cfg |= BIT_ULL(1) | BIT_ULL(2);
rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
}
+
+void rvu_apr_block_cn10k_init(struct rvu *rvu)
+{
+ u64 reg;
+
+ reg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ reg |= FIELD_PREP(LMTST_THROTTLE_MASK, LMTST_WR_PEND_MAX);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CFG, reg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index bd817ee88..e6d7914ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1825,6 +1825,8 @@ static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
@@ -1836,6 +1838,16 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
cq_ctx->bpid, cq_ctx->bp_ena);
+ if (!is_rvu_otx2(rvu)) {
+ seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
+ seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
+ seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
+ seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
+ cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
+ cq_ctx->lbpid_low);
+ seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
+ }
+
seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
cq_ctx->update_time, cq_ctx->avg_level);
seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
@@ -1847,6 +1859,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
cq_ctx->qsize, cq_ctx->caching);
seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
cq_ctx->substream, cq_ctx->ena);
+ if (!is_rvu_otx2(rvu)) {
+ seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
+ seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
+ cq_ctx->cpt_drop_err_en);
+ }
seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
cq_ctx->drop_ena, cq_ctx->drop);
seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
@@ -2889,6 +2906,14 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
rule->mask.mpls_lse[3]);
break;
+ case NPC_TYPE_ICMP:
+ seq_printf(s, "%d ", rule->packet.icmp_type);
+ seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
+ break;
+ case NPC_CODE_ICMP:
+ seq_printf(s, "%d ", rule->packet.icmp_code);
+ seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
+ break;
default:
seq_puts(s, "\n");
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 21b5d71c1..96c04f7d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -5,7 +5,7 @@
*
*/
-#include<linux/bitfield.h>
+#include <linux/bitfield.h>
#include "rvu.h"
#include "rvu_reg.h"
@@ -1235,8 +1235,9 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
- RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
@@ -1354,12 +1355,97 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink
return 0;
}
+static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
+
+ return 0;
+}
+
+static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ npc_mcam_rsrcs_deinit(rvu);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ block->lf.max = ctx->val.vu16;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_mcam_rsrcs_init(rvu, blkaddr);
+
+ return 0;
+}
+
+static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u16 max_nix0_lf, max_nix1_lf;
+ struct npc_mcam *mcam;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
+ max_nix0_lf = cfg & 0xFFF;
+ cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2);
+ max_nix1_lf = cfg & 0xFFF;
+
+ /* Do not allow user to modify maximum NIX LFs while mcam entries
+ * have already been assigned.
+ */
+ mcam = &rvu->hw->mcam;
+ if (mcam->bmap_fcnt < mcam->bmap_entries) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "mcam entries have already been assigned, can't resize");
+ return -EPERM;
+ }
+
+ if (max_nix0_lf && val.vu16 > max_nix0_lf) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "requested nixlf is greater than the max supported nix0_lf");
+ return -EPERM;
+ }
+
+ if (max_nix1_lf && val.vu16 > max_nix1_lf) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "requested nixlf is greater than the max supported nix1_lf");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct devlink_param rvu_af_dl_params[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
"dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_npc_mcam_high_zone_percent_get,
+ rvu_af_dl_npc_mcam_high_zone_percent_set,
+ rvu_af_dl_npc_mcam_high_zone_percent_validate),
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
+ "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_dl_nix_maxlf_get,
+ rvu_af_dl_nix_maxlf_set,
+ rvu_af_dl_nix_maxlf_validate),
};
static const struct devlink_param rvu_af_dl_param_exact_match[] = {
@@ -1369,12 +1455,6 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = {
rvu_af_npc_exact_feature_get,
rvu_af_npc_exact_feature_disable,
rvu_af_npc_exact_feature_validate),
- DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
- "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- rvu_af_dl_npc_mcam_high_zone_percent_get,
- rvu_af_dl_npc_mcam_high_zone_percent_set,
- rvu_af_dl_npc_mcam_high_zone_percent_validate),
};
/* Devlink switch mode */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 58744313f..42db213fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -72,12 +72,19 @@ enum nix_makr_fmt_indexes {
/* For now considering MC resources needed for broadcast
* pkt replication only. i.e 256 HWVFs + 12 PFs.
*/
-#define MC_TBL_SIZE MC_TBL_SZ_512
-#define MC_BUF_CNT MC_BUF_CNT_128
+#define MC_TBL_SIZE MC_TBL_SZ_2K
+#define MC_BUF_CNT MC_BUF_CNT_1024
+
+#define MC_TX_MAX 2048
struct mce {
struct hlist_node node;
+ u32 rq_rss_index;
u16 pcifunc;
+ u16 channel;
+ u8 dest_type;
+ u8 is_active;
+ u8 reserved[2];
};
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
@@ -165,18 +172,33 @@ static void nix_mce_list_init(struct nix_mce_list *list, int max)
list->max = max;
}
-static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
+static int nix_alloc_mce_list(struct nix_mcast *mcast, int count, u8 dir)
{
+ struct rsrc_bmap *mce_counter;
int idx;
if (!mcast)
- return 0;
+ return -EINVAL;
- idx = mcast->next_free_mce;
- mcast->next_free_mce += count;
+ mce_counter = &mcast->mce_counter[dir];
+ if (!rvu_rsrc_check_contig(mce_counter, count))
+ return -ENOSPC;
+
+ idx = rvu_alloc_rsrc_contig(mce_counter, count);
return idx;
}
+static void nix_free_mce_list(struct nix_mcast *mcast, int count, int start, u8 dir)
+{
+ struct rsrc_bmap *mce_counter;
+
+ if (!mcast)
+ return;
+
+ mce_counter = &mcast->mce_counter[dir];
+ rvu_free_rsrc_contig(mce_counter, count, start);
+}
+
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
int nix_blkaddr = 0, i = 0;
@@ -2956,7 +2978,8 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
}
static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
- int mce, u8 op, u16 pcifunc, int next, bool eol)
+ int mce, u8 op, u16 pcifunc, int next,
+ int index, u8 mce_op, bool eol)
{
struct nix_aq_enq_req aq_req;
int err;
@@ -2967,8 +2990,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
aq_req.qidx = mce;
/* Use RSS with RSS index 0 */
- aq_req.mce.op = 1;
- aq_req.mce.index = 0;
+ aq_req.mce.op = mce_op;
+ aq_req.mce.index = index;
aq_req.mce.eol = eol;
aq_req.mce.pf_func = pcifunc;
aq_req.mce.next = next;
@@ -2985,6 +3008,206 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static void nix_delete_mcast_mce_list(struct nix_mce_list *mce_list)
+{
+ struct hlist_node *tmp;
+ struct mce *mce;
+
+ /* Scan through the current list */
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ }
+
+ mce_list->count = 0;
+ mce_list->max = 0;
+}
+
+static int nix_get_last_mce_list_index(struct nix_mcast_grp_elem *elem)
+{
+ return elem->mce_start_index + elem->mcast_mce_list.count - 1;
+}
+
+static int nix_update_ingress_mce_list_hw(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem)
+{
+ int idx, last_idx, next_idx, err;
+ struct nix_mce_list *mce_list;
+ struct mce *mce, *prev_mce;
+
+ mce_list = &elem->mcast_mce_list;
+ idx = elem->mce_start_index;
+ last_idx = nix_get_last_mce_list_index(elem);
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (idx > last_idx)
+ break;
+
+ if (!mce->is_active) {
+ if (idx == elem->mce_start_index) {
+ idx++;
+ prev_mce = mce;
+ elem->mce_start_index = idx;
+ continue;
+ } else if (idx == last_idx) {
+ err = nix_blk_setup_mce(rvu, nix_hw, idx - 1, NIX_AQ_INSTOP_WRITE,
+ prev_mce->pcifunc, next_idx,
+ prev_mce->rq_rss_index,
+ prev_mce->dest_type,
+ false);
+ if (err)
+ return err;
+
+ break;
+ }
+ }
+
+ next_idx = idx + 1;
+ /* EOL should be set in last MCE */
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ mce->rq_rss_index, mce->dest_type,
+ (next_idx > last_idx) ? true : false);
+ if (err)
+ return err;
+
+ idx++;
+ prev_mce = mce;
+ }
+
+ return 0;
+}
+
+static void nix_update_egress_mce_list_hw(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem)
+{
+ struct nix_mce_list *mce_list;
+ int idx, last_idx, next_idx;
+ struct mce *mce, *prev_mce;
+ u64 regval;
+ u8 eol;
+
+ mce_list = &elem->mcast_mce_list;
+ idx = elem->mce_start_index;
+ last_idx = nix_get_last_mce_list_index(elem);
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (idx > last_idx)
+ break;
+
+ if (!mce->is_active) {
+ if (idx == elem->mce_start_index) {
+ idx++;
+ prev_mce = mce;
+ elem->mce_start_index = idx;
+ continue;
+ } else if (idx == last_idx) {
+ regval = (next_idx << 16) | (1 << 12) | prev_mce->channel;
+ rvu_write64(rvu, nix_hw->blkaddr,
+ NIX_AF_TX_MCASTX(idx - 1),
+ regval);
+ break;
+ }
+ }
+
+ eol = 0;
+ next_idx = idx + 1;
+ /* EOL should be set in last MCE */
+ if (next_idx > last_idx)
+ eol = 1;
+
+ regval = (next_idx << 16) | (eol << 12) | mce->channel;
+ rvu_write64(rvu, nix_hw->blkaddr,
+ NIX_AF_TX_MCASTX(idx),
+ regval);
+ idx++;
+ prev_mce = mce;
+ }
+}
+
+static int nix_del_mce_list_entry(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem,
+ struct nix_mcast_grp_update_req *req)
+{
+ u32 num_entry = req->num_mce_entry;
+ struct nix_mce_list *mce_list;
+ struct mce *mce;
+ bool is_found;
+ int i;
+
+ mce_list = &elem->mcast_mce_list;
+ for (i = 0; i < num_entry; i++) {
+ is_found = false;
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ /* If already exists, then delete */
+ if (mce->pcifunc == req->pcifunc[i]) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ is_found = true;
+ break;
+ }
+ }
+
+ if (!is_found)
+ return NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
+ }
+
+ mce_list->max = mce_list->count;
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+ return 0;
+}
+
+static int nix_add_mce_list_entry(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_mcast_grp_elem *elem,
+ struct nix_mcast_grp_update_req *req)
+{
+ u32 num_entry = req->num_mce_entry;
+ struct nix_mce_list *mce_list;
+ struct hlist_node *tmp;
+ struct mce *mce;
+ int i;
+
+ mce_list = &elem->mcast_mce_list;
+ for (i = 0; i < num_entry; i++) {
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
+ if (!mce)
+ goto free_mce;
+
+ mce->pcifunc = req->pcifunc[i];
+ mce->channel = req->channel[i];
+ mce->rq_rss_index = req->rq_rss_index[i];
+ mce->dest_type = req->dest_type[i];
+ mce->is_active = 1;
+ hlist_add_head(&mce->node, &mce_list->head);
+ mce_list->count++;
+ }
+
+ mce_list->max += num_entry;
+
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ return nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+ return 0;
+
+free_mce:
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ hlist_del(&mce->node);
+ kfree(mce);
+ mce_list->count--;
+ }
+
+ return -ENOMEM;
+}
+
static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
u16 pcifunc, bool add)
{
@@ -3080,6 +3303,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
/* EOL should be set in last MCE */
err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
mce->pcifunc, next_idx,
+ 0, 1,
(next_idx > last_idx) ? true : false);
if (err)
goto end;
@@ -3160,6 +3384,16 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
return err;
}
+static void nix_setup_mcast_grp(struct nix_hw *nix_hw)
+{
+ struct nix_mcast_grp *mcast_grp = &nix_hw->mcast_grp;
+
+ INIT_LIST_HEAD(&mcast_grp->mcast_grp_head);
+ mutex_init(&mcast_grp->mcast_grp_lock);
+ mcast_grp->next_grp_index = 1;
+ mcast_grp->count = 0;
+}
+
static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
{
struct nix_mcast *mcast = &nix_hw->mcast;
@@ -3184,15 +3418,15 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
continue;
/* save start idx of broadcast mce list */
- pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
/* save start idx of multicast mce list */
- pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
/* save the start idx of promisc mce list */
- pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1, NIX_MCAST_INGRESS);
nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
for (idx = 0; idx < (numvfs + 1); idx++) {
@@ -3207,7 +3441,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->bcast_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
@@ -3215,7 +3449,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->mcast_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
@@ -3223,7 +3457,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
err = nix_blk_setup_mce(rvu, nix_hw,
pfvf->promisc_mce_idx + idx,
NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ pcifunc, 0, 0, 1, true);
if (err)
return err;
}
@@ -3238,13 +3472,30 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
int err, size;
size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
- size = (1ULL << size);
+ size = BIT_ULL(size);
+
+ /* Allocate bitmap for rx mce entries */
+ mcast->mce_counter[NIX_MCAST_INGRESS].max = 256UL << MC_TBL_SIZE;
+ err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ if (err)
+ return -ENOMEM;
+
+ /* Allocate bitmap for tx mce entries */
+ mcast->mce_counter[NIX_MCAST_EGRESS].max = MC_TX_MAX;
+ err = rvu_alloc_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ return -ENOMEM;
+ }
/* Alloc memory for multicast/mirror replication entries */
err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
- (256UL << MC_TBL_SIZE), size);
- if (err)
+ mcast->mce_counter[NIX_MCAST_INGRESS].max, size);
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
return -ENOMEM;
+ }
rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
(u64)mcast->mce_ctx->iova);
@@ -3257,8 +3508,11 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
(8UL << MC_BUF_CNT), size);
- if (err)
+ if (err) {
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_INGRESS]);
+ rvu_free_bitmap(&mcast->mce_counter[NIX_MCAST_EGRESS]);
return -ENOMEM;
+ }
rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
(u64)mcast->mcast_buf->iova);
@@ -3272,6 +3526,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
mutex_init(&mcast->mce_lock);
+ nix_setup_mcast_grp(nix_hw);
+
return nix_setup_mce_tables(rvu, nix_hw);
}
@@ -4465,18 +4721,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+ }
- /* Set chan/link to backpressure TL3 instead of TL2 */
- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
- * This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
- */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
- }
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
@@ -4698,6 +4954,74 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
+static void nix_mcast_update_action(struct rvu *rvu,
+ struct nix_mcast_grp_elem *elem)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_rx_action rx_action = { 0 };
+ struct nix_tx_action tx_action = { 0 };
+ int npc_blkaddr;
+
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (elem->dir == NIX_MCAST_INGRESS) {
+ *(u64 *)&rx_action = npc_get_mcam_action(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index);
+ rx_action.index = elem->mce_start_index;
+ npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
+ *(u64 *)&rx_action);
+ } else {
+ *(u64 *)&tx_action = npc_get_mcam_action(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index);
+ tx_action.index = elem->mce_start_index;
+ npc_set_mcam_action(rvu, mcam, npc_blkaddr, elem->mcam_index,
+ *(u64 *)&tx_action);
+ }
+}
+
+static void nix_mcast_update_mce_entry(struct rvu *rvu, u16 pcifunc, u8 is_active)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_for_each_entry(elem, &mcast_grp->mcast_grp_head, list) {
+ struct nix_mce_list *mce_list;
+ struct mce *mce;
+
+ /* Iterate the group elements and disable the element which
+ * received the disable request.
+ */
+ mce_list = &elem->mcast_mce_list;
+ hlist_for_each_entry(mce, &mce_list->head, node) {
+ if (mce->pcifunc == pcifunc) {
+ mce->is_active = is_active;
+ break;
+ }
+ }
+
+ /* Dump the updated list to HW */
+ if (elem->dir == NIX_MCAST_INGRESS)
+ nix_update_ingress_mce_list_hw(rvu, nix_hw, elem);
+ else
+ nix_update_egress_mce_list_hw(rvu, nix_hw, elem);
+
+ /* Update the multicast index in NPC rule */
+ nix_mcast_update_action(rvu, elem);
+ }
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+}
+
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -4709,6 +5033,9 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
if (err)
return err;
+ /* Enable the interface if it is in any multicast list */
+ nix_mcast_update_mce_entry(rvu, pcifunc, 1);
+
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
npc_mcam_enable_flows(rvu, pcifunc);
@@ -4733,6 +5060,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ /* Disable the interface if it is in any multicast list */
+ nix_mcast_update_mce_entry(rvu, pcifunc, 0);
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
@@ -5707,3 +6037,361 @@ int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *re
return 0;
}
+
+static struct nix_mcast_grp_elem *rvu_nix_mcast_find_grp_elem(struct nix_mcast_grp *mcast_grp,
+ u32 mcast_grp_idx)
+{
+ struct nix_mcast_grp_elem *iter;
+ bool is_found = false;
+
+ list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
+ if (iter->mcast_grp_idx == mcast_grp_idx) {
+ is_found = true;
+ break;
+ }
+ }
+
+ if (is_found)
+ return iter;
+
+ return NULL;
+}
+
+int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, ret;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
+ if (!elem)
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ else
+ ret = elem->mce_start_index;
+
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+ return ret;
+}
+
+void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_mcast_grp_destroy_req dreq = { 0 };
+ struct nix_mcast_grp_update_req ureq = { 0 };
+ struct nix_mcast_grp_update_rsp ursp = { 0 };
+ struct nix_mcast_grp_elem *elem, *tmp;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_for_each_entry_safe(elem, tmp, &mcast_grp->mcast_grp_head, list) {
+ struct nix_mce_list *mce_list;
+ struct hlist_node *tmp;
+ struct mce *mce;
+
+ /* If the pcifunc which created the multicast/mirror
+ * group received an FLR, then delete the entire group.
+ */
+ if (elem->pcifunc == pcifunc) {
+ /* Delete group */
+ dreq.hdr.pcifunc = elem->pcifunc;
+ dreq.mcast_grp_idx = elem->mcast_grp_idx;
+ dreq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
+ continue;
+ }
+
+ /* Iterate the group elements and delete the element which
+ * received the FLR.
+ */
+ mce_list = &elem->mcast_mce_list;
+ hlist_for_each_entry_safe(mce, tmp, &mce_list->head, node) {
+ if (mce->pcifunc == pcifunc) {
+ ureq.hdr.pcifunc = pcifunc;
+ ureq.num_mce_entry = 1;
+ ureq.mcast_grp_idx = elem->mcast_grp_idx;
+ ureq.op = NIX_MCAST_OP_DEL_ENTRY;
+ ureq.pcifunc[0] = pcifunc;
+ ureq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_update(rvu, &ureq, &ursp);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+}
+
+int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
+ u32 mcast_grp_idx, u16 mcam_index)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, ret = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, mcast_grp_idx);
+ if (!elem)
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ else
+ elem->mcam_index = mcam_index;
+
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+ return ret;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_create(struct rvu *rvu,
+ struct nix_mcast_grp_create_req *req,
+ struct nix_mcast_grp_create_rsp *rsp)
+{
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ struct nix_hw *nix_hw;
+ int blkaddr, err;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return -ENOMEM;
+
+ INIT_HLIST_HEAD(&elem->mcast_mce_list.head);
+ elem->mcam_index = -1;
+ elem->mce_start_index = -1;
+ elem->pcifunc = req->hdr.pcifunc;
+ elem->dir = req->dir;
+ elem->mcast_grp_idx = mcast_grp->next_grp_index++;
+
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+ list_add_tail(&elem->list, &mcast_grp->mcast_grp_head);
+ mcast_grp->count++;
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ rsp->mcast_grp_idx = elem->mcast_grp_idx;
+ return 0;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_destroy(struct rvu *rvu,
+ struct nix_mcast_grp_destroy_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_delete_flow_req uninstall_req = { 0 };
+ struct npc_delete_flow_rsp uninstall_rsp = { 0 };
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ int blkaddr, err, ret = 0;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ /* If AF is requesting for the deletion,
+ * then AF is already taking the lock
+ */
+ if (!req->is_af)
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
+ if (!elem) {
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ goto unlock_grp;
+ }
+
+ /* If no mce entries are associated with the group
+ * then just remove it from the global list.
+ */
+ if (!elem->mcast_mce_list.count)
+ goto delete_grp;
+
+ /* Delete the associated mcam entry and
+ * remove all mce entries from the group
+ */
+ mcast = &nix_hw->mcast;
+ mutex_lock(&mcast->mce_lock);
+ if (elem->mcam_index != -1) {
+ uninstall_req.hdr.pcifunc = req->hdr.pcifunc;
+ uninstall_req.entry = elem->mcam_index;
+ rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
+ }
+
+ nix_free_mce_list(mcast, elem->mcast_mce_list.count,
+ elem->mce_start_index, elem->dir);
+ nix_delete_mcast_mce_list(&elem->mcast_mce_list);
+ mutex_unlock(&mcast->mce_lock);
+
+delete_grp:
+ list_del(&elem->list);
+ kfree(elem);
+ mcast_grp->count--;
+
+unlock_grp:
+ if (!req->is_af)
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ return ret;
+}
+
+int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
+ struct nix_mcast_grp_update_req *req,
+ struct nix_mcast_grp_update_rsp *rsp)
+{
+ struct nix_mcast_grp_destroy_req dreq = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct nix_mcast_grp_elem *elem;
+ struct nix_mcast_grp *mcast_grp;
+ int blkaddr, err, npc_blkaddr;
+ u16 prev_count, new_count;
+ struct nix_mcast *mcast;
+ struct nix_hw *nix_hw;
+ int i, ret;
+
+ if (!req->num_mce_entry)
+ return 0;
+
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast_grp = &nix_hw->mcast_grp;
+
+ /* If AF is requesting for the updation,
+ * then AF is already taking the lock
+ */
+ if (!req->is_af)
+ mutex_lock(&mcast_grp->mcast_grp_lock);
+
+ elem = rvu_nix_mcast_find_grp_elem(mcast_grp, req->mcast_grp_idx);
+ if (!elem) {
+ ret = NIX_AF_ERR_INVALID_MCAST_GRP;
+ goto unlock_grp;
+ }
+
+ /* If any pcifunc matches the group's pcifunc, then we can
+ * delete the entire group.
+ */
+ if (req->op == NIX_MCAST_OP_DEL_ENTRY) {
+ for (i = 0; i < req->num_mce_entry; i++) {
+ if (elem->pcifunc == req->pcifunc[i]) {
+ /* Delete group */
+ dreq.hdr.pcifunc = elem->pcifunc;
+ dreq.mcast_grp_idx = elem->mcast_grp_idx;
+ dreq.is_af = 1;
+ rvu_mbox_handler_nix_mcast_grp_destroy(rvu, &dreq, NULL);
+ ret = 0;
+ goto unlock_grp;
+ }
+ }
+ }
+
+ mcast = &nix_hw->mcast;
+ mutex_lock(&mcast->mce_lock);
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, false);
+
+ prev_count = elem->mcast_mce_list.count;
+ if (req->op == NIX_MCAST_OP_ADD_ENTRY) {
+ new_count = prev_count + req->num_mce_entry;
+ if (prev_count)
+ nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
+
+ elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
+
+ /* It is possible not to get contiguous memory */
+ if (elem->mce_start_index < 0) {
+ if (elem->mcam_index != -1) {
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+ ret = NIX_AF_ERR_NON_CONTIG_MCE_LIST;
+ goto unlock_mce;
+ }
+ }
+
+ ret = nix_add_mce_list_entry(rvu, nix_hw, elem, req);
+ if (ret) {
+ nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
+ if (prev_count)
+ elem->mce_start_index = nix_alloc_mce_list(mcast,
+ prev_count,
+ elem->dir);
+
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+
+ goto unlock_mce;
+ }
+ } else {
+ if (!prev_count || prev_count < req->num_mce_entry) {
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr,
+ elem->mcam_index, true);
+ ret = NIX_AF_ERR_INVALID_MCAST_DEL_REQ;
+ goto unlock_mce;
+ }
+
+ nix_free_mce_list(mcast, prev_count, elem->mce_start_index, elem->dir);
+ new_count = prev_count - req->num_mce_entry;
+ elem->mce_start_index = nix_alloc_mce_list(mcast, new_count, elem->dir);
+ ret = nix_del_mce_list_entry(rvu, nix_hw, elem, req);
+ if (ret) {
+ nix_free_mce_list(mcast, new_count, elem->mce_start_index, elem->dir);
+ elem->mce_start_index = nix_alloc_mce_list(mcast, prev_count, elem->dir);
+ if (elem->mcam_index != -1)
+ npc_enable_mcam_entry(rvu, mcam,
+ npc_blkaddr,
+ elem->mcam_index,
+ true);
+
+ goto unlock_mce;
+ }
+ }
+
+ if (elem->mcam_index == -1) {
+ rsp->mce_start_index = elem->mce_start_index;
+ ret = 0;
+ goto unlock_mce;
+ }
+
+ nix_mcast_update_action(rvu, elem);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, elem->mcam_index, true);
+ rsp->mce_start_index = elem->mce_start_index;
+ ret = 0;
+
+unlock_mce:
+ mutex_unlock(&mcast->mce_lock);
+
+unlock_grp:
+ if (!req->is_af)
+ mutex_unlock(&mcast_grp->mcast_grp_lock);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 55639c133..d94b7b88e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
return 0;
}
-static int npc_mcam_verify_pf_func(struct rvu *rvu,
- struct mcam_entry *entry_data, u8 intf,
- u16 pcifunc)
-{
- u16 pf_func, pf_func_mask;
-
- if (is_npc_intf_rx(intf))
- return 0;
-
- pf_func_mask = (entry_data->kw_mask[0] >> 32) &
- NPC_KEX_PF_FUNC_MASK;
- pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
-
- pf_func = be16_to_cpu((__force __be16)pf_func);
- if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
- ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
- (pcifunc & ~RVU_PFVF_FUNC_MASK)))
- return -EINVAL;
-
- return 0;
-}
-
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -599,8 +577,8 @@ static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
}
-static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index)
+u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index)
{
int bank = npc_get_bank(mcam, index);
@@ -609,6 +587,16 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
}
+void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, u64 cfg)
+{
+ int bank = npc_get_bank(mcam, index);
+
+ index &= (mcam->banksize - 1);
+ return rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank), cfg);
+}
+
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
@@ -1669,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
struct npc_coalesced_kpu_prfl *img_data = NULL;
int i = 0, rc = -EINVAL;
void __iomem *kpu_prfl_addr;
- u16 offset;
+ u32 offset;
img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
@@ -1840,7 +1828,21 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]);
}
-static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
+void npc_mcam_rsrcs_deinit(struct rvu *rvu)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ bitmap_free(mcam->bmap);
+ bitmap_free(mcam->bmap_reverse);
+ kfree(mcam->entry2pfvf_map);
+ kfree(mcam->cntr2pfvf_map);
+ kfree(mcam->entry2cntr_map);
+ kfree(mcam->cntr_refcnt);
+ kfree(mcam->entry2target_pffunc);
+ kfree(mcam->counters.bmap);
+}
+
+int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
{
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
@@ -1884,24 +1886,22 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
/* Allocate bitmaps for managing MCAM entries */
- mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap)
return -ENOMEM;
- mcam->bmap_reverse = devm_kcalloc(rvu->dev,
- BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap_reverse)
- return -ENOMEM;
+ goto free_bmap;
mcam->bmap_fcnt = mcam->bmap_entries;
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
- mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
+
if (!mcam->entry2pfvf_map)
- return -ENOMEM;
+ goto free_bmap_reverse;
/* Reserve 1/8th of MCAM entries at the bottom for low priority
* allocations and another 1/8th at the top for high priority
@@ -1920,31 +1920,31 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
*/
err = rvu_alloc_bitmap(&mcam->counters);
if (err)
- return err;
+ goto free_entry_map;
- mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr2pfvf_map)
- goto free_mem;
+ goto free_cntr_bmap;
/* Alloc memory for MCAM entry to counter mapping and for tracking
* counter's reference count.
*/
- mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->entry2cntr_map)
- goto free_mem;
+ goto free_cntr_map;
- mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr_refcnt)
- goto free_mem;
+ goto free_entry_cntr_map;
/* Alloc memory for saving target device of mcam rule */
- mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2target_pffunc = kmalloc_array(mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
if (!mcam->entry2target_pffunc)
- goto free_mem;
+ goto free_cntr_refcnt;
for (index = 0; index < mcam->bmap_entries; index++) {
mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
@@ -1958,8 +1958,21 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
return 0;
-free_mem:
+free_cntr_refcnt:
+ kfree(mcam->cntr_refcnt);
+free_entry_cntr_map:
+ kfree(mcam->entry2cntr_map);
+free_cntr_map:
+ kfree(mcam->cntr2pfvf_map);
+free_cntr_bmap:
kfree(mcam->counters.bmap);
+free_entry_map:
+ kfree(mcam->entry2pfvf_map);
+free_bmap_reverse:
+ bitmap_free(mcam->bmap_reverse);
+free_bmap:
+ bitmap_free(mcam->bmap);
+
return -ENOMEM;
}
@@ -2167,7 +2180,7 @@ void rvu_npc_freemem(struct rvu *rvu)
struct npc_mcam *mcam = &rvu->hw->mcam;
kfree(pkind->rsrc.bmap);
- kfree(mcam->counters.bmap);
+ npc_mcam_rsrcs_deinit(rvu);
if (rvu->kpu_prfl_addr)
iounmap(rvu->kpu_prfl_addr);
else
@@ -2819,12 +2832,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (!is_pffunc_af(pcifunc) &&
- npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
- rc = NPC_MCAM_INVALID_REQ;
- goto exit;
- }
-
/* For AF installed rules, the nix_intf should be set to target NIX */
if (is_pffunc_af(req->hdr.pcifunc))
nix_intf = req->intf;
@@ -3176,10 +3183,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_MCAM_INVALID_REQ;
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- req->hdr.pcifunc))
- return NPC_MCAM_INVALID_REQ;
-
/* Try to allocate a MCAM entry */
entry_req.hdr.pcifunc = req->hdr.pcifunc;
entry_req.contig = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 114e4ec21..c75669c8f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -51,6 +51,8 @@ static const char * const npc_flow_names[] = {
[NPC_MPLS3_TTL] = "lse depth 3 ttl",
[NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos",
[NPC_MPLS4_TTL] = "lse depth 4",
+ [NPC_TYPE_ICMP] = "icmp type",
+ [NPC_CODE_ICMP] = "icmp code",
[NPC_UNKNOWN] = "unknown",
};
@@ -526,6 +528,8 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
+ NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
@@ -555,7 +559,7 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 *features = &mcam->rx_features;
- u64 tcp_udp_sctp;
+ u64 proto_flags;
int hdr;
if (is_npc_intf_tx(intf))
@@ -566,18 +570,21 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
*features |= BIT_ULL(hdr);
}
- tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ proto_flags = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
- BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
+ BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP);
/* for tcp/udp/sctp corresponding layer type should be in the key */
- if (*features & tcp_udp_sctp) {
+ if (*features & proto_flags) {
if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
- *features &= ~tcp_udp_sctp;
+ *features &= ~proto_flags;
else
*features |= BIT_ULL(NPC_IPPROTO_TCP) |
BIT_ULL(NPC_IPPROTO_UDP) |
- BIT_ULL(NPC_IPPROTO_SCTP);
+ BIT_ULL(NPC_IPPROTO_SCTP) |
+ BIT_ULL(NPC_IPPROTO_ICMP);
}
/* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
@@ -971,6 +978,10 @@ do { \
ntohs(mask->sport), 0);
NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_TYPE_ICMP, icmp_type, pkt->icmp_type, 0,
+ mask->icmp_type, 0);
+ NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
+ mask->icmp_code, 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
@@ -1106,13 +1117,40 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
}
}
-static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
- struct mcam_entry *entry,
- struct npc_install_flow_req *req,
- u16 target, bool pf_set_vfs_mac)
+static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
+ u64 op, void *action)
+{
+ int mce_index;
+
+ /* If a PF/VF is installing a multicast rule then it is expected
+ * that the PF/VF should have created a group for the multicast/mirror
+ * list. Otherwise reject the configuration.
+ * During this scenario, req->index is set as multicast/mirror
+ * group index.
+ */
+ if (req->hdr.pcifunc &&
+ (op == NIX_RX_ACTIONOP_MCAST || op == NIX_TX_ACTIONOP_MCAST)) {
+ mce_index = rvu_nix_mcast_get_mce_index(rvu, req->hdr.pcifunc, req->index);
+ if (mce_index < 0)
+ return mce_index;
+
+ if (op == NIX_RX_ACTIONOP_MCAST)
+ ((struct nix_rx_action *)action)->index = mce_index;
+ else
+ ((struct nix_tx_action *)action)->index = mce_index;
+ }
+
+ return 0;
+}
+
+static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req,
+ u16 target, bool pf_set_vfs_mac)
{
struct rvu_switch *rswitch = &rvu->rswitch;
struct nix_rx_action action;
+ int ret;
if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
req->chan_mask = 0x0; /* Do not care channel */
@@ -1124,6 +1162,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target;
action.op = req->op;
action.index = req->index;
+
+ ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
+ if (ret)
+ return ret;
+
action.match_id = req->match_id;
action.flow_key_alg = req->flow_key_alg;
@@ -1155,14 +1198,17 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+
+ return 0;
}
-static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
- struct mcam_entry *entry,
- struct npc_install_flow_req *req, u16 target)
+static int npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
{
struct nix_tx_action action;
u64 mask = ~0ULL;
+ int ret;
/* If AF is installing then do not care about
* PF_FUNC in Send Descriptor
@@ -1176,6 +1222,11 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
*(u64 *)&action = 0x00;
action.op = req->op;
action.index = req->index;
+
+ ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
+ if (ret)
+ return ret;
+
action.match_id = req->match_id;
entry->action = *(u64 *)&action;
@@ -1191,6 +1242,8 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+
+ return 0;
}
static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
@@ -1220,10 +1273,15 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
req->intf, blkaddr);
- if (is_npc_intf_rx(req->intf))
- npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
- else
- npc_update_tx_entry(rvu, pfvf, entry, req, target);
+ if (is_npc_intf_rx(req->intf)) {
+ err = npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
+ if (err)
+ return err;
+ } else {
+ err = npc_update_tx_entry(rvu, pfvf, entry, req, target);
+ if (err)
+ return err;
+ }
/* Default unicast rules do not exist for TX */
if (is_npc_intf_tx(req->intf))
@@ -1340,6 +1398,10 @@ find_rule:
return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
req->index, req->match_id);
+ if (owner && req->op == NIX_RX_ACTIONOP_MCAST)
+ return rvu_nix_mcast_update_mcam_entry(rvu, req->hdr.pcifunc,
+ req->index, entry_index);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 18c1c9f36..6f73ad980 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -734,5 +734,7 @@
#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23
#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22
#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21
+#define LMTST_THROTTLE_MASK GENMASK_ULL(38, 35)
+#define LMTST_WR_PEND_MAX 15
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index edc9367b1..5ef406c7e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -340,11 +340,12 @@ struct nix_aq_res_s {
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
- u64 rsvd_64_67 : 4;
+ u64 lbp_ena : 1;
+ u64 lbpid_low : 3;
u64 bp_ena : 1;
- u64 rsvd_69_71 : 3;
+ u64 lbpid_med : 3;
u64 bpid : 9;
- u64 rsvd_81_83 : 3;
+ u64 lbpid_high : 3;
u64 qint_idx : 7;
u64 cq_err : 1;
u64 cint_idx : 7;
@@ -358,10 +359,14 @@ struct nix_cq_ctx_s {
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
- u64 rsvd_210_211 : 2;
- u64 substream : 20;
+ u64 cpt_drop_err_en : 1;
+ u64 rsvd_211 : 1;
+ u64 substream : 12;
+ u64 stash_thresh : 4;
+ u64 lbp_frac : 4;
u64 caching : 1;
- u64 rsvd_233_235 : 3;
+ u64 stashing : 1;
+ u64 rsvd_234_235 : 2;
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 8b7fc0af9..7f786de61 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -834,21 +834,26 @@ static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
return 0;
}
-/* RSS context configuration */
-static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc,
- u32 *rss_context, bool delete)
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
int ret, idx;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
- *rss_context >= MAX_RSS_GROUPS)
+ if (rxfh->rss_context)
+ rss_context = rxfh->rss_context;
+
+ if (rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ rss_context >= MAX_RSS_GROUPS)
return -EINVAL;
rss = &pfvf->hw.rss_info;
@@ -858,40 +863,45 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
return -EIO;
}
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
+ if (rxfh->key) {
+ memcpy(rss->key, rxfh->key, sizeof(rss->key));
otx2_set_rss_key(pfvf);
}
- if (delete)
- return otx2_rss_ctx_delete(pfvf, *rss_context);
+ if (rxfh->rss_delete)
+ return otx2_rss_ctx_delete(pfvf, rss_context);
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, &rss_context);
+ rxfh->rss_context = rss_context;
if (ret)
return ret;
}
- if (indir) {
- rss_ctx = rss->rss_ctx[*rss_context];
+ if (rxfh->indir) {
+ rss_ctx = rss->rss_ctx[rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss_ctx->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = rxfh->indir[idx];
}
- otx2_set_rss_table(pfvf, *rss_context);
+ otx2_set_rss_table(pfvf, rss_context);
return 0;
}
-static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc, u32 rss_context)
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
struct otx2_nic *pfvf = netdev_priv(dev);
struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
+ u32 *indir = rxfh->indir;
int idx, rx_queues;
rss = &pfvf->hw.rss_info;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->rss_context)
+ rss_context = rxfh->rss_context;
if (!indir)
return 0;
@@ -913,30 +923,12 @@ static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
for (idx = 0; idx < rss->rss_size; idx++)
indir[idx] = rss_ctx->ind_tbl[idx];
}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
+ if (rxfh->key)
+ memcpy(rxfh->key, rss->key, sizeof(rss->key));
return 0;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
-{
- return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
- DEFAULT_RSS_CONTEXT_GROUP);
-}
-
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
-{
-
- u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
-
- return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
-}
-
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -1317,6 +1309,7 @@ static void otx2_get_fec_stats(struct net_device *netdev,
}
static const struct ethtool_ops otx2_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1339,8 +1332,6 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
- .get_rxfh_context = otx2_get_rxfh_context,
- .set_rxfh_context = otx2_set_rxfh_context,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -1440,6 +1431,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
}
static const struct ethtool_ops otx2vf_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1458,8 +1450,6 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
- .get_rxfh_context = otx2_get_rxfh_context,
- .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index b40bd0e46..3f46d5e0f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1933,7 +1933,7 @@ int otx2_open(struct net_device *netdev)
* mcam entries are enabled to receive the packets. Hence disable the
* packet I/O.
*/
- if (err == EIO)
+ if (err == -EIO)
goto err_disable_rxtx;
else if (err)
goto err_tx_stop_queues;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index db1e0e0e8..60ee7ae2c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -29,6 +29,8 @@
#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4)
+#define MCAST_INVALID_GRP (-1U)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -47,6 +49,7 @@ struct otx2_tc_flow {
bool is_act_police;
u32 prio;
struct npc_install_flow_req req;
+ u32 mcast_grp_idx;
u64 rate;
u32 burst;
bool is_pps;
@@ -355,22 +358,96 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return rc;
}
+static int otx2_tc_update_mcast(struct otx2_nic *nic,
+ struct npc_install_flow_req *req,
+ struct netlink_ext_ack *extack,
+ struct otx2_tc_flow *node,
+ struct nix_mcast_grp_update_req *ureq,
+ u8 num_intf)
+{
+ struct nix_mcast_grp_update_req *grp_update_req;
+ struct nix_mcast_grp_create_req *creq;
+ struct nix_mcast_grp_create_rsp *crsp;
+ u32 grp_index;
+ int rc;
+
+ mutex_lock(&nic->mbox.lock);
+ creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
+ if (!creq) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ creq->dir = NIX_MCAST_INGRESS;
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
+ goto error;
+ }
+
+ crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0,
+ &creq->hdr);
+ if (IS_ERR(crsp)) {
+ rc = PTR_ERR(crsp);
+ goto error;
+ }
+
+ grp_index = crsp->mcast_grp_idx;
+ grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
+ if (!grp_update_req) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ ureq->op = NIX_MCAST_OP_ADD_ENTRY;
+ ureq->mcast_grp_idx = grp_index;
+ ureq->num_mce_entry = num_intf;
+ ureq->pcifunc[0] = nic->pcifunc;
+ ureq->channel[0] = nic->hw.tx_chan_base;
+
+ ureq->dest_type[0] = NIX_RX_RSS;
+ ureq->rq_rss_index[0] = 0;
+ memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
+
+ /* Send message to AF */
+ rc = otx2_sync_mbox_msg(&nic->mbox);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
+ goto error;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+ req->op = NIX_RX_ACTIONOP_MCAST;
+ req->index = grp_index;
+ node->mcast_grp_idx = grp_index;
+ return 0;
+
+error:
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
struct npc_install_flow_req *req,
struct flow_cls_offload *f,
struct otx2_tc_flow *node)
{
+ struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
struct netlink_ext_ack *extack = f->common.extack;
+ bool pps = false, mcast = false;
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
- bool pps = false;
+ u8 num_intf = 1;
+ int err, i;
u64 rate;
- int err;
- int i;
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
@@ -442,11 +519,30 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
req->index = act->rx_queue;
break;
+ case FLOW_ACTION_MIRRED_INGRESS:
+ target = act->dev;
+ priv = netdev_priv(target);
+ dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
+ dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
+ dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
+ dummy_grp_update_req.rq_rss_index[num_intf] = 0;
+ mcast = true;
+ num_intf++;
+ break;
+
default:
return -EOPNOTSUPP;
}
}
+ if (mcast) {
+ err = otx2_tc_update_mcast(nic, req, extack, node,
+ &dummy_grp_update_req,
+ num_intf);
+ if (err)
+ return err;
+ }
+
if (nr_police > 1) {
NL_SET_ERR_MSG_MOD(extack,
"rate limit police offload requires a single action");
@@ -541,6 +637,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -591,6 +688,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
+ u32 val;
flow_rule_match_control(rule, &match);
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -599,12 +697,14 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
if (ntohs(flow_spec->etype) == ETH_P_IP) {
- flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
flow_mask->ip_flag = IPV4_FLAG_MORE;
req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
- flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_spec->next_header = val ?
+ IPPROTO_FRAGMENT : 0;
flow_mask->next_header = 0xff;
req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
} else {
@@ -815,6 +915,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+
+ flow_rule_match_icmp(rule, &match);
+
+ flow_spec->icmp_type = match.key->type;
+ flow_mask->icmp_type = match.mask->type;
+ req->features |= BIT_ULL(NPC_TYPE_ICMP);
+
+ flow_spec->icmp_code = match.key->code;
+ flow_mask->icmp_code = match.mask->code;
+ req->features |= BIT_ULL(NPC_CODE_ICMP);
+ }
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
@@ -1052,6 +1165,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct nix_mcast_grp_destroy_req *grp_destroy_req;
struct otx2_tc_flow *flow_node;
int err;
@@ -1085,6 +1199,15 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
mutex_unlock(&nic->mbox.lock);
}
+ /* Remove the multicast/mirror related nodes */
+ if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
+ mutex_lock(&nic->mbox.lock);
+ grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
+ grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
+ otx2_sync_mbox_msg(&nic->mbox);
+ mutex_unlock(&nic->mbox.lock);
+ }
+
free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
@@ -1124,6 +1247,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
new_node->prio = tc_flow_cmd->common.prio;
+ new_node->mcast_grp_idx = MCAST_INVALID_GRP;
memset(&dummy, 0, sizeof(struct npc_install_flow_req));
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1e77bbf5d..1723e9912 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -382,6 +382,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
otx2_qos_read_txschq_cfg_tl(node, cfg);
cnt = cfg->static_node_pos[node->level];
cfg->schq_contig_list[node->level][cnt] = node->schq;
+ cfg->schq_index_used[node->level][cnt] = true;
cfg->schq_contig[node->level]++;
cfg->static_node_pos[node->level]++;
otx2_qos_read_txschq_cfg_schq(node, cfg);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index a1231203e..caa13b9ce 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1158,15 +1158,18 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
+ dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
struct mtk_tx_dma_v2 *txd;
txd = eth->scratch_ring + i * soc->txrx.txd_size;
- txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ txd->txd1 = addr;
if (i < cnt - 1)
txd->txd2 = eth->phy_scratch_ring +
(i + 1) * soc->txrx.txd_size;
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+ txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
txd->txd4 = 0;
if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 9a6744c0d..61334a710 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -670,7 +670,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
void *buf;
int s;
- page = __dev_alloc_pages(GFP_KERNEL, 0);
+ page = __dev_alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
@@ -691,10 +691,11 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
struct mtk_wdma_desc *desc = desc_ptr;
+ u32 ctrl;
desc->buf0 = cpu_to_le32(buf_phys);
if (!mtk_wed_is_v3_or_greater(dev->hw)) {
- u32 txd_size, ctrl;
+ u32 txd_size;
txd_size = dev->wlan.init_buf(buf, buf_phys,
token++);
@@ -708,11 +709,11 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 |
FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
MTK_WED_BUF_SIZE - txd_size);
- desc->ctrl = cpu_to_le32(ctrl);
desc->info = 0;
} else {
- desc->ctrl = cpu_to_le32(token << 16);
+ ctrl = token << 16 | TX_DMA_PREP_ADDR64(buf_phys);
}
+ desc->ctrl = cpu_to_le32(ctrl);
desc_ptr += desc_size;
buf += MTK_WED_BUF_SIZE;
@@ -811,6 +812,7 @@ mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
buf_phys = page_phys;
for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
desc->buf0 = cpu_to_le32(buf_phys);
+ desc->token = cpu_to_le32(RX_DMA_PREP_ADDR64(buf_phys));
buf_phys += MTK_WED_PAGE_BUF_SIZE;
desc++;
}
@@ -1072,13 +1074,13 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
+ mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
- wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (!mtk_wed_get_rx_capa(dev))
return;
@@ -1091,7 +1093,6 @@ static void
mtk_wed_deinit(struct mtk_wed_device *dev)
{
mtk_wed_stop(dev);
- mtk_wed_dma_disable(dev);
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
@@ -2603,9 +2604,6 @@ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
static void
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
{
- if (!dev->running)
- return;
-
mtk_wed_set_ext_int(dev, !!mask);
wed_w32(dev, MTK_WED_INT_MASK, mask);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index ae44ad5f8..d58b07e7e 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -142,7 +142,8 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
dma_addr_t addr;
void *buf;
- buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
+ buf = page_frag_alloc(&q->cache, q->buf_size,
+ GFP_ATOMIC | GFP_DMA32);
if (!buf)
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 164a13272..619e1c3ef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1258,8 +1258,8 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
return -EINVAL;
}
-static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
- u8 *hfunc)
+static int mlx4_en_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
@@ -1269,19 +1269,19 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
rss_rings = rounddown_pow_of_two(rss_rings);
for (i = 0; i < n; i++) {
- if (!ring_index)
+ if (!rxfh->indir)
break;
- ring_index[i] = i % rss_rings;
+ rxfh->indir[i] = i % rss_rings;
}
- if (key)
- memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
- if (hfunc)
- *hfunc = priv->rss_hash_fn;
+ if (rxfh->key)
+ memcpy(rxfh->key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
+ rxfh->hfunc = priv->rss_hash_fn;
return 0;
}
-static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
- const u8 *key, const u8 hfunc)
+static int mlx4_en_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
@@ -1295,12 +1295,12 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
* between rings
*/
for (i = 0; i < n; i++) {
- if (!ring_index)
+ if (!rxfh->indir)
break;
- if (i > 0 && !ring_index[i] && !rss_rings)
+ if (i > 0 && !rxfh->indir[i] && !rss_rings)
rss_rings = i;
- if (ring_index[i] != (i % (rss_rings ?: n)))
+ if (rxfh->indir[i] != (i % (rss_rings ?: n)))
return -EINVAL;
}
@@ -1311,8 +1311,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
if (!is_power_of_2(rss_rings))
return -EINVAL;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
- err = mlx4_en_check_rxfh_func(dev, hfunc);
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE) {
+ err = mlx4_en_check_rxfh_func(dev, rxfh->hfunc);
if (err)
return err;
}
@@ -1323,12 +1323,12 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
mlx4_en_stop_port(dev, 1);
}
- if (ring_index)
+ if (rxfh->indir)
priv->prof->rss_rings = rss_rings;
- if (key)
- memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE)
- priv->rss_hash_fn = hfunc;
+ if (rxfh->key)
+ memcpy(priv->rss_key, rxfh->key, MLX4_EN_RSS_KEY_SIZE);
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->rss_hash_fn = rxfh->hfunc;
if (port_up) {
err = mlx4_en_start_port(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
index 28d02749d..7659ad21e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/crdump.c
@@ -55,7 +55,10 @@ int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data)
ret = mlx5_vsc_sem_set_space(dev, MLX5_SEMAPHORE_SW_RESET,
MLX5_VSC_LOCK);
if (ret) {
- mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
+ if (ret == -EBUSY)
+ mlx5_core_info(dev, "SW reset semaphore is already in use\n");
+ else
+ mlx5_core_warn(dev, "Failed to lock SW reset semaphore\n");
goto unlock_gw;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 8ce5c8bcd..d74a5aaf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -36,11 +36,17 @@ static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
return 0;
}
+struct mlx5_dpll_synce_status {
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ bool ho_acq;
+ bool oper_freq_measure;
+ s32 frequency_diff;
+};
+
static int
mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
- enum mlx5_msees_admin_status *admin_status,
- enum mlx5_msees_oper_status *oper_status,
- bool *ho_acq)
+ struct mlx5_dpll_synce_status *synce_status)
{
u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
@@ -50,11 +56,11 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
MLX5_REG_MSEES, 0, 0);
if (err)
return err;
- if (admin_status)
- *admin_status = MLX5_GET(msees_reg, out, admin_status);
- *oper_status = MLX5_GET(msees_reg, out, oper_status);
- if (ho_acq)
- *ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ synce_status->admin_status = MLX5_GET(msees_reg, out, admin_status);
+ synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
+ synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
+ synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
return 0;
}
@@ -67,21 +73,23 @@ mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
MLX5_SET(msees_reg, in, field_select,
MLX5_MSEES_FIELD_SELECT_ENABLE |
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE |
MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
MLX5_SET(msees_reg, in, admin_status, admin_status);
+ MLX5_SET(msees_reg, in, admin_freq_measure, true);
return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MSEES, 0, 1);
}
static enum dpll_lock_status
-mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
+mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
{
- switch (oper_status) {
+ switch (synce_status->oper_status) {
case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
fallthrough;
case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
- return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
- DPLL_LOCK_STATUS_LOCKED;
+ return synce_status->ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
+ DPLL_LOCK_STATUS_LOCKED;
case MLX5_MSEES_OPER_STATUS_HOLDOVER:
fallthrough;
case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
@@ -92,31 +100,37 @@ mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
}
static enum dpll_pin_state
-mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status,
- enum mlx5_msees_oper_status oper_status)
+mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
{
- return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
- (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
- oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
+ return (synce_status->admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
+ (synce_status->oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
+ synce_status->oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
}
+static int
+mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
+ s64 *ffo)
+{
+ if (!synce_status->oper_freq_measure)
+ return -ENODATA;
+ *ffo = synce_status->frequency_diff;
+ return 0;
+}
+
static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
void *priv,
enum dpll_lock_status *status,
struct netlink_ext_ack *extack)
{
- enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = priv;
- bool ho_acq;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL,
- &oper_status, &ho_acq);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
return err;
-
- *status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ *status = mlx5_dpll_lock_status_get(&synce_status);
return 0;
}
@@ -128,18 +142,9 @@ static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
return 0;
}
-static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll,
- void *priv,
- enum dpll_mode mode,
- struct netlink_ext_ack *extack)
-{
- return mode == DPLL_MODE_MANUAL;
-}
-
static const struct dpll_device_ops mlx5_dpll_device_ops = {
.lock_status_get = mlx5_dpll_device_lock_status_get,
.mode_get = mlx5_dpll_device_mode_get,
- .mode_supported = mlx5_dpll_device_mode_supported,
};
static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
@@ -160,16 +165,14 @@ static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
enum dpll_pin_state *state,
struct netlink_ext_ack *extack)
{
- enum mlx5_msees_admin_status admin_status;
- enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = pin_priv;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
- &oper_status, NULL);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
return err;
- *state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ *state = mlx5_dpll_pin_state_get(&synce_status);
return 0;
}
@@ -188,10 +191,25 @@ static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
}
+static int mlx5_dpll_ffo_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack)
+{
+ struct mlx5_dpll_synce_status synce_status;
+ struct mlx5_dpll *mdpll = pin_priv;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
+ if (err)
+ return err;
+ return mlx5_dpll_pin_ffo_get(&synce_status, ffo);
+}
+
static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
.direction_get = mlx5_dpll_pin_direction_get,
.state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
.state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
+ .ffo_get = mlx5_dpll_ffo_get,
};
static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
@@ -211,19 +229,16 @@ static void mlx5_dpll_periodic_work(struct work_struct *work)
{
struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
work.work);
- enum mlx5_msees_admin_status admin_status;
- enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll_synce_status synce_status;
enum dpll_lock_status lock_status;
enum dpll_pin_state pin_state;
- bool ho_acq;
int err;
- err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
- &oper_status, &ho_acq);
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
if (err)
goto err_out;
- lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
- pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ lock_status = mlx5_dpll_lock_status_get(&synce_status);
+ pin_state = mlx5_dpll_pin_state_get(&synce_status);
if (!mdpll->last.valid)
goto invalid_out;
@@ -246,7 +261,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
{
if (mdpll->tracking_netdev)
return;
- netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
mdpll->tracking_netdev = netdev;
}
@@ -254,7 +269,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
{
if (!mdpll->tracking_netdev)
return;
- netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ dpll_netdev_pin_clear(mdpll->tracking_netdev);
mdpll->tracking_netdev = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 729a11b5f..55c6ace0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -72,7 +72,6 @@ struct page_pool;
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
-#define MLX5E_MAX_NUM_TC 8
#define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE
#define MLX5_RX_HEADROOM NET_SKB_PAD
@@ -364,7 +363,7 @@ struct mlx5e_cq {
/* control */
struct net_device *netdev;
struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
+ struct workqueue_struct *workqueue;
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
@@ -484,10 +483,12 @@ struct mlx5e_xdp_info_fifo {
struct mlx5e_xdpsq;
struct mlx5e_xmit_data;
+struct xsk_tx_metadata;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
- int);
+ int,
+ struct xsk_tx_metadata *);
struct mlx5e_xdpsq {
/* data path */
@@ -756,7 +757,7 @@ struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
struct mlx5e_xdpsq rq_xdpsq;
- struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq sq[MLX5_MAX_NUM_TC];
struct mlx5e_icosq icosq; /* internal control operations */
struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
bool xdp;
@@ -806,7 +807,7 @@ struct mlx5e_channels {
struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
- struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
struct mlx5e_rq_stats xskrq;
struct mlx5e_xdpsq_stats rq_xdpsq;
@@ -816,8 +817,8 @@ struct mlx5e_channel_stats {
struct mlx5e_ptp_stats {
struct mlx5e_ch_stats ch;
- struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
- struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq_stats sq[MLX5_MAX_NUM_TC];
+ struct mlx5e_ptp_cq_stats cq[MLX5_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
} ____cacheline_aligned_in_smp;
@@ -885,7 +886,6 @@ struct mlx5e_priv {
struct mlx5e_rq drop_rq;
struct mlx5e_channels channels;
- u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
@@ -983,6 +983,8 @@ struct mlx5e_profile {
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
int (*max_nch_limit)(struct mlx5_core_dev *mdev);
+ u32 (*get_tisn)(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv,
+ u8 lag_port, u8 tc);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
@@ -990,6 +992,11 @@ struct mlx5e_profile {
u32 features;
};
+u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev,
+ struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
+ u8 lag_port, u8 tc);
+
#define mlx5e_profile_feature_cap(profile, feature) \
((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature))
@@ -1037,6 +1044,8 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
struct mlx5e_create_cq_param {
+ struct net_device *netdev;
+ struct workqueue_struct *wq;
struct napi_struct *napi;
struct mlx5e_ch_stats *ch_stats;
int node;
@@ -1044,7 +1053,7 @@ struct mlx5e_create_cq_param {
};
struct mlx5e_cq_param;
-int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
+int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq);
@@ -1115,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb);
@@ -1131,8 +1140,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
-int mlx5e_create_tises(struct mlx5e_priv *priv);
-void mlx5e_destroy_tises(struct mlx5e_priv *priv);
int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
@@ -1174,9 +1181,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
-int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
- const u8 hfunc);
+int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh);
+int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 254c84739..40c8df111 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -36,7 +36,7 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
return true;
}
-void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
index e1ac4b3d2..6beba7f07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
@@ -7,6 +7,5 @@
int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv);
-void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv);
#endif /* __MLX5_MONITOR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 30507b7c2..5d213a988 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -669,6 +669,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
{
*ccp = (struct mlx5e_create_cq_param) {
+ .netdev = c->netdev,
+ .wq = c->priv->wq,
.napi = &c->napi,
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 15d97c685..ca05b3252 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -518,9 +518,11 @@ static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
for (tc = 0; tc < num_tc; tc++) {
int txq_ix = ix_base + tc;
+ u32 tisn;
- err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- cparams, tc, &c->ptpsq[tc]);
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, tc);
+ err = mlx5e_ptp_open_txqsq(c, tisn, txq_ix, cparams, tc, &c->ptpsq[tc]);
if (err)
goto close_txqsq;
}
@@ -555,6 +557,8 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
num_tc = mlx5e_get_dcb_num_tc(params);
+ ccp.netdev = c->netdev;
+ ccp.wq = c->priv->wq;
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
@@ -565,7 +569,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
- err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
if (err)
goto out_err_txqsq_cq;
}
@@ -574,7 +578,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
- err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
if (err)
goto out_err_ts_cq;
@@ -602,6 +606,8 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
struct mlx5e_cq_param *cq_param;
struct mlx5e_cq *cq = &c->rq.cq;
+ ccp.netdev = c->netdev;
+ ccp.wq = c->priv->wq;
ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
@@ -609,7 +615,7 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
cq_param = &cparams->rq_param.cqp;
- return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
+ return mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
}
static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 7b700d0f9..883c04485 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -49,7 +49,7 @@ enum {
struct mlx5e_ptp {
/* data path */
- struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_ptpsq ptpsq[MLX5_MAX_NUM_TC];
struct mlx5e_rq rq;
struct napi_struct napi;
struct device *pdev;
@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
}
static inline u8
+mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
+{
+ return fifo->data[fifo->mask & fifo->cc];
+}
+
+static inline void
mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
{
- return fifo->data[fifo->mask & fifo->cc++];
+ fifo->cc++;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 244bc15a4..922bc5b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -77,29 +77,31 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
struct mlx5e_params *params;
struct mlx5e_channel *c;
struct mlx5e_txqsq *sq;
+ u32 tisn;
params = &chs->params;
txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node_qid > priv->htb_max_qos_sqs);
- if (node_qid == priv->htb_max_qos_sqs) {
- struct mlx5e_sq_stats *stats, **stats_list = NULL;
+ WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
+ if (!priv->htb_qos_sq_stats) {
+ struct mlx5e_sq_stats **stats_list;
+
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list), GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ }
+
+ if (!priv->htb_qos_sq_stats[node_qid]) {
+ struct mlx5e_sq_stats *stats;
- if (priv->htb_max_qos_sqs == 0) {
- stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
- sizeof(*stats_list),
- GFP_KERNEL);
- if (!stats_list)
- return -ENOMEM;
- }
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
- if (!stats) {
- kvfree(stats_list);
+ if (!stats)
return -ENOMEM;
- }
- if (stats_list)
- WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
@@ -123,11 +125,13 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
memset(&param_cq, 0, sizeof(param_cq));
mlx5e_build_sq_param(priv->mdev, params, &param_sq);
mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
- err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
- err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, hw_id,
+
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, 0);
+ err = mlx5e_open_txqsq(c, tisn, txq_ix, params, &param_sq, sq, 0, hw_id,
priv->htb_qos_sq_stats[node_qid]);
if (err)
goto err_close_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index b12fe3c5a..a55452c69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -147,6 +147,20 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
}
}
+static void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ u64 dbytes;
+ u64 dpkts;
+
+ dpkts = priv->stats.rep_stats.vport_rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
+ dbytes = priv->stats.rep_stats.vport_rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
+ mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats, &priv->stats.rep_stats);
+ flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
+ FLOW_ACTION_HW_STATS_DELAYED);
+}
+
static
int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index f675b1926..f66bbc846 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
{
+ mutex_lock(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
kvfree(selq->standby);
@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
kvfree(selq->standby);
selq->standby = NULL;
+ mutex_unlock(selq->state_lock);
}
void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 368a95fa7..b14cd62ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -48,7 +48,8 @@ mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
- u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
+ u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? TCA_PEDIT_KEY_EX_CMD_SET :
+ TCA_PEDIT_KEY_EX_CMD_ADD;
u8 htype = act->mangle.htype;
int err = -EOPNOTSUPP;
u32 mask, val, offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 5620d9f97..ac458a8d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -68,11 +68,13 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
node = dev_to_node(mdev->device);
+ ccp.netdev = priv->netdev;
+ ccp.wq = priv->wq;
ccp.node = node;
ccp.ch_stats = t->stats;
ccp.napi = &t->napi;
ccp.ix = 0;
- err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
+ err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 13c7ed1bb..82b5ca1be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -103,7 +103,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL)))
return false;
/* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
@@ -145,7 +145,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL)))
return false;
/* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */
@@ -256,9 +256,55 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
return 0;
}
+static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+ const struct mlx5_cqe64 *cqe = _ctx->cqe;
+
+ if (!cqe_has_vlan(cqe))
+ return -ENODATA;
+
+ *vlan_proto = htons(ETH_P_8021Q);
+ *vlan_tci = be16_to_cpu(cqe->vlan_info);
+ return 0;
+}
+
const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
.xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
.xmo_rx_hash = mlx5e_xdp_rx_hash,
+ .xmo_rx_vlan_tag = mlx5e_xdp_rx_vlan_tag,
+};
+
+struct mlx5e_xsk_tx_complete {
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_cq *cq;
+};
+
+static u64 mlx5e_xsk_fill_timestamp(void *_priv)
+{
+ struct mlx5e_xsk_tx_complete *priv = _priv;
+ u64 ts;
+
+ ts = get_cqe_ts(priv->cqe);
+
+ if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
+ return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
+
+ return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
+}
+
+static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
+{
+ struct mlx5_wqe_eth_seg *eseg = priv;
+
+ /* HW/FW is doing parsing, so offsets are largely ignored. */
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+}
+
+const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops = {
+ .tmo_fill_timestamp = mlx5e_xsk_fill_timestamp,
+ .tmo_request_checksum = mlx5e_xsk_request_checksum,
};
/* returns true if packet was consumed by xdp */
@@ -398,11 +444,11 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result);
+ int check_result, struct xsk_tx_metadata *meta);
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result)
+ int check_result, struct xsk_tx_metadata *meta)
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -420,7 +466,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
*/
if (unlikely(sq->mpwqe.wqe))
mlx5e_xdp_mpwqe_complete(sq);
- return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
+ return mlx5e_xmit_xdp_frame(sq, xdptxd, 0, meta);
}
if (!xdptxd->len) {
skb_frag_t *frag = &xdptxdf->sinfo->frags[0];
@@ -450,6 +496,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
* and it's safe to complete it at any time.
*/
mlx5e_xdp_mpwqe_session_start(sq);
+ xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, &session->wqe->eth);
}
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
@@ -480,7 +527,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
- int check_result)
+ int check_result, struct xsk_tx_metadata *meta)
{
struct mlx5e_xmit_data_frags *xdptxdf =
container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
@@ -601,6 +648,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
sq->pc++;
}
+ xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
+
sq->doorbell_cseg = cseg;
stats->xmit++;
@@ -610,7 +659,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
u32 *xsk_frames,
- struct xdp_frame_bulk *bq)
+ struct xdp_frame_bulk *bq,
+ struct mlx5e_cq *cq,
+ struct mlx5_cqe64 *cqe)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
u16 i;
@@ -670,10 +721,24 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
break;
}
- case MLX5E_XDP_XMIT_MODE_XSK:
+ case MLX5E_XDP_XMIT_MODE_XSK: {
/* AF_XDP send */
+ struct xsk_tx_metadata_compl *compl = NULL;
+ struct mlx5e_xsk_tx_complete priv = {
+ .cqe = cqe,
+ .cq = cq,
+ };
+
+ if (xp_tx_metadata_enabled(sq->xsk_pool)) {
+ xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+ compl = &xdpi.xsk_meta;
+
+ xsk_tx_metadata_complete(compl, &mlx5e_xsk_tx_metadata_ops, &priv);
+ }
+
(*xsk_frames)++;
break;
+ }
default:
WARN_ON_ONCE(true);
}
@@ -722,7 +787,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, cq, cqe);
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
@@ -769,7 +834,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
sq->cc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, NULL, NULL);
}
xdp_flush_frame_bulk(&bq);
@@ -842,7 +907,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
- mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
+ mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL);
if (unlikely(!ret)) {
int j;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index ecfe93a47..e054db1e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -33,6 +33,7 @@
#define __MLX5_EN_XDP_H__
#include <linux/indirect_call_wrapper.h>
+#include <net/xdp_sock.h>
#include "en.h"
#include "en/txrx.h"
@@ -82,7 +83,7 @@ enum mlx5e_xdp_xmit_mode {
* num, page_1, page_2, ... , page_num.
*
* MLX5E_XDP_XMIT_MODE_XSK:
- * none.
+ * frame.xsk_meta.
*/
#define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4
@@ -97,6 +98,7 @@ union mlx5e_xdp_info {
u8 num;
struct page *page;
} page;
+ struct xsk_tx_metadata_compl xsk_meta;
};
struct mlx5e_xsk_param;
@@ -112,13 +114,16 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
+extern const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops;
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- int check_result));
+ int check_result,
+ struct xsk_tx_metadata *meta));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
- int check_result));
+ int check_result,
+ struct xsk_tx_metadata *meta));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 36826b582..82e6abbc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -127,7 +127,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
- err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
if (unlikely(err))
goto err_free_cparam;
@@ -136,7 +136,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_rx_cq;
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xsksq.cq);
if (unlikely(err))
goto err_close_rq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 597f319d4..a59199ed5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -55,12 +55,16 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, *xdpi);
+ if (xp_tx_metadata_enabled(sq->xsk_pool))
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info) { .xsk_meta = {} });
sq->doorbell_cseg = &nopwqe->ctrl;
}
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{
struct xsk_buff_pool *pool = sq->xsk_pool;
+ struct xsk_tx_metadata *meta = NULL;
union mlx5e_xdp_info xdpi;
bool work_done = true;
bool flush = false;
@@ -93,12 +97,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr);
xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr);
xdptxd.len = desc.len;
+ meta = xsk_buff_get_metadata(pool, desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd,
- check_result);
+ check_result, meta);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
@@ -106,6 +111,16 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xsk_tx_post_err(sq, &xdpi);
} else {
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
+ if (xp_tx_metadata_enabled(sq->xsk_pool)) {
+ struct xsk_tx_metadata_compl compl;
+
+ xsk_tx_metadata_to_compl(meta, &compl);
+ XSK_TX_COMPL_FITS(void *);
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo,
+ (union mlx5e_xdp_info)
+ { .xsk_meta = compl });
+ }
}
flush = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index b2cabd6ab..cc9bcc420 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
.mdo_add_secy = mlx5e_macsec_add_secy,
.mdo_upd_secy = mlx5e_macsec_upd_secy,
.mdo_del_secy = mlx5e_macsec_del_secy,
+ .rx_uses_md_dst = true,
};
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index e66f486fa..415fec776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -45,6 +45,10 @@ struct arfs_table {
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
+enum {
+ MLX5E_ARFS_STATE_ENABLED,
+};
+
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
@@ -59,6 +63,7 @@ struct mlx5e_arfs_tables {
spinlock_t arfs_lock;
int last_filter_id;
struct workqueue_struct *wq;
+ unsigned long state;
};
struct arfs_tuple {
@@ -169,6 +174,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
return err;
}
}
+ set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
return 0;
}
@@ -454,6 +461,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
int i;
int j;
+ clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
@@ -626,17 +635,8 @@ static void arfs_handle_work(struct work_struct *work)
struct mlx5_flow_handle *rule;
arfs = mlx5e_fs_get_arfs(priv->fs);
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&arfs->arfs_lock);
- hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&arfs->arfs_lock);
-
- mutex_unlock(&priv->state_lock);
- kfree(arfs_rule);
- goto out;
- }
- mutex_unlock(&priv->state_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
+ return;
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
@@ -752,6 +752,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -EPERM;
+ }
+
arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 41c396e76..6ed3a32b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -74,7 +74,73 @@ int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
return err;
}
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
+{
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+
+ if (mlx5_lag_is_lacp_owner(mdev))
+ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+
+ return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
+{
+ mlx5_core_destroy_tis(mdev, tisn);
+}
+
+static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])
+{
+ int tc, i;
+
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
+ for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
+ mlx5e_destroy_tis(mdev, tisn[i][tc]);
+}
+
+static bool mlx5_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
+}
+
+static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC])
+{
+ int tc, i;
+ int err;
+
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
+ for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
+
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(tisc, tisc, prio, tc << 1);
+
+ if (mlx5_lag_should_assign_affinity(mdev))
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
+
+ err = mlx5e_create_tis(mdev, in, &tisn[i][tc]);
+ if (err)
+ goto err_close_tises;
+ }
+ }
+
+ return 0;
+
+err_close_tises:
+ for (; i >= 0; i--) {
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(mdev, tisn[i][tc]);
+ tc = MLX5_MAX_NUM_TC;
+ }
+
+ return err;
+}
+
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
@@ -103,6 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
+ if (create_tises) {
+ err = mlx5e_create_tises(mdev, res->tisn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
+ goto err_destroy_bfreg;
+ }
+ res->tisn_valid = true;
+ }
+
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
@@ -115,6 +190,8 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
return 0;
+err_destroy_bfreg:
+ mlx5_free_bfreg(mdev, &res->bfreg);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
@@ -130,6 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
+ if (res->tisn_valid)
+ mlx5e_destroy_tises(mdev, res->tisn);
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c7c1b667b..93461b0c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -451,6 +451,23 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ /* If RXFH is configured, changing the channels number is allowed only if
+ * it does not require resizing the RSS table. This is because the previous
+ * configuration may no longer be compatible with the new RSS table.
+ */
+ if (netif_is_rxfh_configured(priv->netdev)) {
+ int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels);
+ int new_rqt_size = mlx5e_rqt_size(priv->mdev, count);
+
+ if (new_rqt_size != cur_rqt_size) {
+ err = -EINVAL;
+ netdev_err(priv->netdev,
+ "%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n",
+ __func__, new_rqt_size, cur_rqt_size);
+ goto out;
+ }
+ }
+
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
@@ -1262,27 +1279,29 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u32 rss_context = rxfh->rss_context;
int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc);
+ err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
+ rxfh->indir, rxfh->key, &rxfh->hfunc);
mutex_unlock(&priv->state_lock);
return err;
}
-static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc,
- u32 *rss_context, bool delete)
+int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ u32 *rss_context = &rxfh->rss_context;
+ u8 hfunc = rxfh->hfunc;
int err;
mutex_lock(&priv->state_lock);
- if (delete) {
+ if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
@@ -1295,7 +1314,8 @@ static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
goto unlock;
}
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key,
+ err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
+ rxfh->indir, rxfh->key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
unlock:
@@ -1303,25 +1323,6 @@ unlock:
return err;
}
-int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0);
-}
-
-int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int err;
-
- mutex_lock(&priv->state_lock);
- err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
- mutex_unlock(&priv->state_lock);
- return err;
-}
-
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
@@ -2398,6 +2399,7 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
@@ -2420,8 +2422,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
- .get_rxfh_context = mlx5e_get_rxfh_context,
- .set_rxfh_context = mlx5e_set_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0c87ddb8a..952f1f981 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -902,6 +902,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pp_params.nid = node;
pp_params.dev = rq->pdev;
pp_params.napi = rq->cq.napi;
+ pp_params.netdev = rq->netdev;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
@@ -1351,6 +1352,17 @@ void mlx5e_close_rq(struct mlx5e_rq *rq)
mlx5e_free_rq(rq);
}
+u32 mlx5e_profile_get_tisn(struct mlx5_core_dev *mdev,
+ struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
+ u8 lag_port, u8 tc)
+{
+ if (profile->get_tisn)
+ return profile->get_tisn(mdev, priv, lag_port, tc);
+
+ return mdev->mlx5e_res.hw_objs.tisn[lag_port][tc];
+}
+
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
kvfree(sq->db.xdpi_fifo.xi);
@@ -1919,7 +1931,8 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
return err;
csp.tis_lst_sz = 1;
- csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
+ csp.tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, 0); /* tc = 0 */
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
@@ -1981,11 +1994,12 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
mlx5e_free_xdpsq(sq);
}
-static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
+static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ struct workqueue_struct *workqueue,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int err;
u32 i;
@@ -2012,13 +2026,13 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
}
cq->mdev = mdev;
- cq->netdev = priv->netdev;
- cq->priv = priv;
+ cq->netdev = netdev;
+ cq->workqueue = workqueue;
return 0;
}
-static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
+static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
@@ -2029,7 +2043,7 @@ static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
- err = mlx5e_alloc_cq_common(priv, param, cq);
+ err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq);
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
@@ -2095,14 +2109,13 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
-int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
+int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int err;
- err = mlx5e_alloc_cq(priv, param, ccp, cq);
+ err = mlx5e_alloc_cq(mdev, param, ccp, cq);
if (err)
return err;
@@ -2135,7 +2148,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->txq_sq.cqp,
ccp, &c->sq[tc].cq);
if (err)
goto err_close_tx_cqs;
@@ -2203,12 +2216,15 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
u32 qos_queue_group_id;
+ u32 tisn;
+ tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
+ c->lag_port, tc);
err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
if (err)
goto err_close_sqs;
- err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
+ err = mlx5e_open_txqsq(c, tisn, txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
&c->priv->channel_stats[c->ix]->sq[tc]);
@@ -2336,12 +2352,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
mlx5e_build_create_cq_param(&ccp, c);
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->async_icosq.cqp, &ccp,
&c->async_icosq.cq);
if (err)
return err;
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
goto err_close_async_icosq_cq;
@@ -2350,17 +2366,17 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_icosq_cq;
- err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
- err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
+ err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
goto err_close_xdp_tx_cqs;
- err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
+ err = c->xdp ? mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
@@ -3307,7 +3323,7 @@ static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- return mlx5e_alloc_cq_common(priv, param, cq);
+ return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
@@ -3363,75 +3379,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
mlx5e_free_cq(&drop_rq->cq);
}
-int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
-{
- void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
- MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
-
- if (MLX5_GET(tisc, tisc, tls_en))
- MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
-
- if (mlx5_lag_is_lacp_owner(mdev))
- MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
-
- return mlx5_core_create_tis(mdev, in, tisn);
-}
-
-void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
-{
- mlx5_core_destroy_tis(mdev, tisn);
-}
-
-void mlx5e_destroy_tises(struct mlx5e_priv *priv)
-{
- int tc, i;
-
- for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
-}
-
-static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
-}
-
-int mlx5e_create_tises(struct mlx5e_priv *priv)
-{
- int tc, i;
- int err;
-
- for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
- for (tc = 0; tc < priv->profile->max_tc; tc++) {
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
-
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
- MLX5_SET(tisc, tisc, prio, tc << 1);
-
- if (mlx5e_lag_should_assign_affinity(priv->mdev))
- MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
-
- err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
- if (err)
- goto err_close_tises;
- }
- }
-
- return 0;
-
-err_close_tises:
- for (; i >= 0; i--) {
- for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
- tc = priv->profile->max_tc;
- }
-
- return err;
-}
-
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
if (priv->mqprio_rl) {
@@ -3440,7 +3387,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
priv->mqprio_rl = NULL;
}
mlx5e_accel_cleanup_tx(priv);
- mlx5e_destroy_tises(priv);
}
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
@@ -3542,7 +3488,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- if (tc && tc != MLX5E_MAX_NUM_TC)
+ if (tc && tc != MLX5_MAX_NUM_TC)
return -EINVAL;
new_params = priv->channels.params;
@@ -5185,6 +5131,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
+ netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
mlx5e_dcbnl_build_netdev(netdev);
@@ -5265,7 +5212,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
- netdev->features |= NETIF_F_GSO_UDP_L4;
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
@@ -5505,23 +5451,13 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
int err;
- err = mlx5e_create_tises(priv);
- if (err) {
- mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
- return err;
- }
-
err = mlx5e_accel_init_tx(priv);
if (err)
- goto err_destroy_tises;
+ return err;
mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
-
-err_destroy_tises:
- mlx5e_destroy_tises(priv);
- return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
@@ -5616,7 +5552,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
- .max_tc = MLX5E_MAX_NUM_TC,
+ .max_tc = MLX5_MAX_NUM_TC,
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
@@ -5759,9 +5695,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
- mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq);
- mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++)
@@ -6056,7 +5990,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
if (netif_device_present(netdev))
return 0;
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, true);
if (err)
return err;
@@ -6069,7 +6003,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
return 0;
}
-static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
+static int _mlx5e_suspend(struct auxiliary_device *adev)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
@@ -6087,15 +6021,18 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
return 0;
}
-static int mlx5e_probe(struct auxiliary_device *adev,
- const struct auxiliary_device_id *id)
+static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
+{
+ return _mlx5e_suspend(adev);
+}
+
+static int _mlx5e_probe(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
const struct mlx5e_profile *profile = &mlx5e_nic_profile;
struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5e_dev *mlx5e_dev;
struct net_device *netdev;
- pm_message_t state = {};
struct mlx5e_priv *priv;
int err;
@@ -6150,7 +6087,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
return 0;
err_resume:
- mlx5e_suspend(adev, state);
+ _mlx5e_suspend(adev);
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
@@ -6162,16 +6099,21 @@ err_devlink_unregister:
return err;
}
+static int mlx5e_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ return _mlx5e_probe(adev);
+}
+
static void mlx5e_remove(struct auxiliary_device *adev)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
- pm_message_t state = {};
mlx5_core_uplink_netdev_set(priv->mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
- mlx5e_suspend(adev, state);
+ _mlx5e_suspend(adev);
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index e92d4f835..05527418f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -112,8 +112,18 @@ static const struct counter_desc vport_rep_stats_desc[] = {
tx_vport_rdma_multicast_bytes) },
};
+static const struct counter_desc vport_rep_loopback_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ vport_loopback_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ vport_loopback_bytes) },
+};
+
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
+#define NUM_VPORT_REP_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
+ ARRAY_SIZE(vport_rep_loopback_stats_desc) : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
{
@@ -157,7 +167,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
{
- return NUM_VPORT_REP_HW_COUNTERS;
+ return NUM_VPORT_REP_HW_COUNTERS +
+ NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
@@ -166,6 +177,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_rep_loopback_stats_desc[i].format);
return idx;
}
@@ -176,6 +190,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i);
+ for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_loopback_stats_desc, i);
return idx;
}
@@ -247,6 +264,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
rep_stats->tx_vport_rdma_multicast_bytes =
MLX5_GET_CTR(out, received_ib_multicast.octets);
+ if (MLX5_CAP_GEN(priv->mdev, vport_counter_local_loopback)) {
+ rep_stats->vport_loopback_packets =
+ MLX5_GET_CTR(out, local_loopback.packets);
+ rep_stats->vport_loopback_bytes =
+ MLX5_GET_CTR(out, local_loopback.octets);
+ }
+
out:
kvfree(out);
}
@@ -1156,12 +1180,6 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
int err;
- err = mlx5e_create_tises(priv);
- if (err) {
- mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
- return err;
- }
-
err = mlx5e_rep_neigh_init(rpriv);
if (err)
goto err_neigh_init;
@@ -1184,7 +1202,6 @@ err_ht_init:
err_init_tx:
mlx5e_rep_neigh_cleanup(rpriv);
err_neigh_init:
- mlx5e_destroy_tises(priv);
return err;
}
@@ -1198,7 +1215,6 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
mlx5e_cleanup_uplink_rep_tx(rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
- mlx5e_destroy_tises(priv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@ -1428,7 +1444,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
- .max_tc = MLX5E_MAX_NUM_TC,
+ .max_tc = MLX5_MAX_NUM_TC,
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 8d9743a5e..d601b5faa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -298,8 +298,8 @@ static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
struct page *page = frag_page->page;
- if (page_pool_defrag_page(page, drain_count) == 0)
- page_pool_put_defragged_page(rq->page_pool, page, -1, true);
+ if (page_pool_unref_page(page, drain_count) == 0)
+ page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -1039,7 +1039,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- queue_work(cq->priv->wq, &sq->recover_work);
+ queue_work(cq->workqueue, &sq->recover_work);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 477c547dc..12b3607af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -476,6 +476,8 @@ struct mlx5e_rep_stats {
u64 tx_vport_rdma_multicast_packets;
u64 rx_vport_rdma_multicast_bytes;
u64 tx_vport_rdma_multicast_bytes;
+ u64 vport_loopback_packets;
+ u64 vport_loopback_bytes;
};
struct mlx5e_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 404dd1d9b..9fb2c057b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3210,10 +3210,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
- set_masks = &hdrs[0].masks;
- add_masks = &hdrs[1].masks;
- set_vals = &hdrs[0].vals;
- add_vals = &hdrs[1].vals;
+ set_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].masks;
+ add_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].masks;
+ set_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].vals;
+ add_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].vals;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -5031,22 +5031,6 @@ int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
return apply_police_params(priv, 0, extack);
}
-void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
- struct tc_cls_matchall_offload *ma)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct rtnl_link_stats64 cur_stats;
- u64 dbytes;
- u64 dpkts;
-
- mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
- dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
- dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
- rpriv->prev_vf_vport_stats = cur_stats;
- flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
- FLOW_ACTION_HW_STATS_DELAYED);
-}
-
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index adb39e30f..c24bda56b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -203,8 +203,6 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
-void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
- struct tc_cls_matchall_offload *ma);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 1ead69c5f..e21a3b412 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+ mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
+
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
@@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
- if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
- mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
- be32_to_cpu(eseg->flow_table_metadata));
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}
@@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata =
- cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
+ cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
@@ -863,7 +862,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
- queue_work(cq->priv->wq, &sq->recover_work);
+ queue_work(cq->workqueue, &sq->recover_work);
}
stats->cqe_err++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 3047d7015..1789800fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
+ dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- if (MLX5_ESWITCH_MANAGER(dev) &&
- mlx5_esw_vport_match_metadata_supported(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
-
- dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
@@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
reps_err:
mlx5_esw_vports_cleanup(esw);
+ dev->priv.eswitch = NULL;
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
@@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
- esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
@@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw);
+ esw->dev->priv.eswitch = NULL;
mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b4eb17141..349e28a6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -618,13 +618,6 @@ static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
return esw && MLX5_ESWITCH_MANAGER(esw->dev);
}
-/* The returned number is valid only when the dev is eswitch manager. */
-static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
-{
- return mlx5_core_is_ecpf_esw_manager(dev) ?
- MLX5_VPORT_ECPF : MLX5_VPORT_PF;
-}
-
static inline bool
mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index baaae628b..e3cce110e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2476,6 +2476,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
+ if (MLX5_ESWITCH_MANAGER(esw->dev) &&
+ mlx5_esw_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
err = devl_params_register(priv_to_devlink(esw->dev),
esw_devlink_params,
ARRAY_SIZE(esw_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index b29299c49..9b8599c20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -1146,3 +1146,37 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
return mlx5_fs_cmd_get_stub_cmds();
}
}
+
+int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {};
+
+ if (silent_mode && !MLX5_CAP_GEN(dev, silent_mode))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
+ MLX5_SET(set_l2_table_entry_in, in, silent_mode_valid, 1);
+ MLX5_SET(set_l2_table_entry_in, in, silent_mode, silent_mode);
+
+ return mlx5_cmd_exec_in(dev, set_l2_table_entry, in);
+}
+
+int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect)
+{
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
+
+ if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(set_flow_table_root_in, in, opcode,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+ MLX5_SET(set_flow_table_root_in, in, table_type,
+ FS_FT_NIC_TX);
+ if (disconnect)
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
+ else
+ MLX5_SET(set_flow_table_root_in, in, table_id, ft_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 7790ae553..53e0e5137 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -122,4 +122,6 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
+int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
+int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e6bfa7e4f..cf085a478 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err;
}
+static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
+ struct mlx5_pkt_reformat *p2)
+{
+ return p1->owner == p2->owner &&
+ (p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
+ p1->id == p2->id :
+ mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
+ mlx5_fs_dr_action_get_pkt_reformat_id(p2));
+}
+
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
- (d1->vport.pkt_reformat->id ==
- d2->vport.pkt_reformat->id) : true)) ||
+ mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
+ d2->vport.pkt_reformat) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
+ /* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
+ if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 4aed1768b..78eb6b709 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -181,7 +181,7 @@ struct mlx5_flow_rule {
struct mlx5_flow_handle {
int num_rules;
- struct mlx5_flow_rule *rule[];
+ struct mlx5_flow_rule *rule[] __counted_by(num_rules);
};
/* Type of children is mlx5_flow_group */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 17fe30a4c..0c26d707e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -539,7 +539,7 @@ struct mlx5_fc_bulk {
u32 base_id;
int bulk_len;
unsigned long *bitmask;
- struct mlx5_fc fcs[];
+ struct mlx5_fc fcs[] __counted_by(bulk_len);
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 3a9cdf794..2911aa34a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -348,6 +348,25 @@ static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
}
#endif
+static const struct pci_device_id mgt_ifc_device_ids[] = {
+ { PCI_VDEVICE(MELLANOX, 0xc2d2) }, /* BlueField1 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d3) }, /* BlueField2 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d4) }, /* BlueField3-Lx MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d5) }, /* BlueField3 MGT interface device ID */
+ { PCI_VDEVICE(MELLANOX, 0xc2d6) }, /* BlueField4 MGT interface device ID */
+};
+
+static bool mlx5_is_mgt_ifc_pci_device(struct mlx5_core_dev *dev, u16 dev_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mgt_ifc_device_ids); ++i)
+ if (mgt_ifc_device_ids[i].device == dev_id)
+ return true;
+
+ return false;
+}
+
static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -362,10 +381,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
if (err)
return pcibios_err_to_errno(err);
- if (sdev_id != dev_id) {
- mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id);
- return -EPERM;
- }
+
+ if (sdev_id == dev_id)
+ continue;
+
+ if (mlx5_is_mgt_ifc_pci_device(dev, sdev_id))
+ continue;
+
+ mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id);
+ return -EPERM;
}
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 2bf77a525..d77be1b4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -339,7 +339,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &ipriv->tisn);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -356,7 +356,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
+ mlx5e_destroy_tis(priv->mdev, ipriv->tisn);
mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn);
}
@@ -483,6 +483,18 @@ static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv)
return ARRAY_SIZE(mlx5i_stats_grps);
}
+u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ if (WARN(lag_port || tc,
+ "IPoIB unexpected non-zero value: lag_port (%u), tc (%u)\n",
+ lag_port, tc))
+ return 0;
+
+ return ipriv->tisn;
+}
+
static const struct mlx5e_profile mlx5i_nic_profile = {
.init = mlx5i_init,
.cleanup = mlx5i_cleanup,
@@ -499,6 +511,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.max_tc = MLX5I_MAX_NUM_TC,
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
+ .get_tisn = mlx5i_get_tisn,
};
/* mlx5i netdev NDos */
@@ -770,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
}
/* This should only be called once per mdev */
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, false);
if (err)
goto destroy_ht;
}
@@ -829,7 +842,7 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
*params = (struct rdma_netdev_alloc_params){
.sizeof_priv = sizeof(struct mlx5i_priv) +
sizeof(struct mlx5e_priv),
- .txqs = nch * MLX5E_MAX_NUM_TC,
+ .txqs = nch * MLX5_MAX_NUM_TC,
.rxqs = nch,
.param = mdev,
.initialize_rdma_netdev = mlx5_rdma_setup_rn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index f3f2af972..2ab6437a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -53,6 +53,7 @@ extern const struct mlx5e_rx_handlers mlx5i_rx_handlers;
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
u32 qpn;
+ u32 tisn;
bool sub_interface;
u32 num_sub_interfaces;
u32 qkey;
@@ -63,6 +64,7 @@ struct mlx5i_priv {
};
int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
+u32 mlx5i_get_tisn(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, u8 lag_port, u8 tc);
/* Underlay QP create/destroy functions */
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 03e681297..f87471306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -218,7 +218,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qpn, &epriv->tisn[0][0]);
+ err = mlx5i_create_tis(mdev, ipriv->qpn, &ipriv->tisn);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -240,7 +240,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
err_close_channels:
mlx5e_close_channels(&epriv->channels);
err_clear_state_opened_flag:
- mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
+ mlx5e_destroy_tis(mdev, ipriv->tisn);
err_remove_rx_uderlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_unint_underlay_qp:
@@ -269,7 +269,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
- mlx5e_destroy_tis(mdev, priv->tisn[0][0]);
+ mlx5e_destroy_tis(mdev, ipriv->tisn);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
@@ -361,6 +361,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.update_stats = NULL,
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
+ .get_tisn = mlx5i_get_tisn,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d14459e5c..69d482f7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -703,8 +703,10 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return err;
}
- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
mlx5_lag_port_sel_destroy(ldev);
+ ldev->buckets = 1;
+ }
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0c83ef174..036174163 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -266,9 +266,6 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
return -EINVAL;
@@ -286,12 +283,15 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_settime_real_time(mdev, ts);
- if (err)
- return err;
+
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_settime_real_time(mdev, ts);
+
+ if (err)
+ return err;
+ }
write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
@@ -341,9 +341,6 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
/* HW time adjustment range is checked. If out of range, settime instead */
if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
struct timespec64 ts;
@@ -367,13 +364,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_adjtime_real_time(mdev, delta);
- if (err)
- return err;
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_adjtime_real_time(mdev, delta);
+
+ if (err)
+ return err;
+ }
+
write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&timer->tc, delta);
mlx5_update_clock_info_page(mdev);
@@ -396,15 +396,14 @@ static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_p
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
- if (!mlx5_modify_mtutc_allowed(mdev))
- return 0;
-
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
- if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
+ if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units) &&
+ scaled_ppm <= S32_MAX && scaled_ppm >= S32_MIN) {
+ /* HW scaled_ppm support on mlx5 devices only supports a 32-bit value */
MLX5_SET(mtutc_reg, in, freq_adj_units,
MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
- MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, (s32)scaled_ppm);
} else {
MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
@@ -420,13 +419,15 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct mlx5_core_dev *mdev;
unsigned long flags;
u32 mult;
- int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
- if (err)
- return err;
+ if (mlx5_modify_mtutc_allowed(mdev)) {
+ int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
+
+ if (err)
+ return err;
+ }
mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
@@ -1004,14 +1005,38 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
info->frac = timer->tc.frac;
}
+static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+ u8 log_max_freq_adjustment = 0;
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTUTC, 0, 0);
+ if (!err)
+ log_max_freq_adjustment =
+ MLX5_GET(mtutc_reg, out, log_max_freq_adjustment);
+
+ if (log_max_freq_adjustment)
+ clock->ptp_info.max_adj =
+ min(S32_MAX, 1 << log_max_freq_adjustment);
+}
+
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
+ /* Configure the PHC */
+ clock->ptp_info = mlx5_ptp_clock_info;
+
+ if (MLX5_CAP_MCAM_REG(mdev, mtutc))
+ mlx5_init_timer_max_freq_adjustment(mdev);
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(clock);
- clock->ptp_info = mlx5_ptp_clock_info;
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
@@ -1042,11 +1067,10 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
}
seqlock_init(&clock->lock);
- mlx5_init_timer_clock(mdev);
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
- /* Configure the PHC */
- clock->ptp_info = mlx5_ptp_clock_info;
+ /* Initialize the device clock */
+ mlx5_init_timer_clock(mdev);
/* Initialize 1PPS data structures */
mlx5_init_pps(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index e8e50563e..e7d59cfa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -256,6 +256,13 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
devcom_free_comp_dev(devcom);
}
+int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
+{
+ struct mlx5_devcom_comp *comp = devcom->comp;
+
+ return kref_read(&comp->ref);
+}
+
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index fc23bbef8..ec32b686f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -31,6 +31,7 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int event, int rollback_event,
void *event_data);
+int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom);
void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready);
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 9482e51ac..7c5516b0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -13,11 +13,13 @@ struct mlx5_dm {
unsigned long *steering_sw_icm_alloc_blocks;
unsigned long *header_modify_sw_icm_alloc_blocks;
unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
+ unsigned long *header_encap_sw_icm_alloc_blocks;
};
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
{
u64 header_modify_pattern_icm_blocks = 0;
+ u64 header_sw_encap_icm_blocks = 0;
u64 header_modify_icm_blocks = 0;
u64 steering_icm_blocks = 0;
struct mlx5_dm *dm;
@@ -54,6 +56,17 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
goto err_modify_hdr;
}
+ if (MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size)) {
+ header_sw_encap_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_encap_sw_icm_alloc_blocks =
+ bitmap_zalloc(header_sw_encap_icm_blocks, GFP_KERNEL);
+ if (!dm->header_encap_sw_icm_alloc_blocks)
+ goto err_pattern;
+ }
+
support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
@@ -66,11 +79,14 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
dm->header_modify_pattern_sw_icm_alloc_blocks =
bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
- goto err_pattern;
+ goto err_sw_encap;
}
return dm;
+err_sw_encap:
+ bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+
err_pattern:
bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
@@ -105,6 +121,14 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
}
+ if (dm->header_encap_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_encap_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_indirect_encap_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+ }
+
if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
BIT(MLX5_CAP_DEV_MEM(dev,
@@ -164,6 +188,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
log_header_modify_pattern_sw_icm_size);
block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_SW_ENCAP:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ indirect_encap_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_indirect_encap_sw_icm_size);
+ block_map = dm->header_encap_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
@@ -242,6 +273,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
header_modify_pattern_sw_icm_start_address);
block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_SW_ENCAP:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ indirect_encap_sw_icm_start_address);
+ block_map = dm->header_encap_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a17152c1c..e285823bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -219,7 +219,6 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
driver_version);
u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
- int remaining_size = driver_ver_sz;
char *string;
if (!MLX5_CAP_GEN(dev, driver_version))
@@ -227,22 +226,9 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
- strncpy(string, "Linux", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, ",", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, KBUILD_MODNAME, remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
- strncat(string, ",", remaining_size);
-
- remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
-
- snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
- LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
- LINUX_VERSION_SUBLEVEL);
+ snprintf(string, driver_ver_sz, "Linux,%s,%u.%u.%u",
+ KBUILD_MODNAME, LINUX_VERSION_MAJOR,
+ LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL);
/*Send the command*/
MLX5_SET(set_driver_version_in, in, opcode,
@@ -1494,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
if (err)
goto err_register;
+ err = mlx5_crdump_enable(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
+
+ err = mlx5_hwmon_dev_register(dev);
+ if (err)
+ mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
+
mutex_unlock(&dev->intf_state_mutex);
return 0;
@@ -1519,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
int err;
devl_lock(devlink);
+ devl_register(devlink);
err = mlx5_init_one_devl_locked(dev);
+ if (err)
+ devl_unregister(devlink);
devl_unlock(devlink);
return err;
}
@@ -1531,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
+ mlx5_hwmon_dev_unregister(dev);
+ mlx5_crdump_disable(dev);
mlx5_unregister_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1548,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unregister(devlink);
devl_unlock(devlink);
}
@@ -1694,16 +1694,23 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
}
devl_lock(devlink);
+ devl_register(devlink);
+
err = mlx5_devlink_params_register(priv_to_devlink(dev));
- devl_unlock(devlink);
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
- goto query_hca_caps_err;
+ goto params_reg_err;
}
+ devl_unlock(devlink);
return 0;
+params_reg_err:
+ devl_unregister(devlink);
+ devl_unlock(devlink);
query_hca_caps_err:
+ devl_unregister(devlink);
+ devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
@@ -1716,6 +1723,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
devl_lock(devlink);
mlx5_devlink_params_unregister(priv_to_devlink(dev));
+ devl_unregister(devlink);
devl_unlock(devlink);
if (dev->state != MLX5_DEVICE_STATE_UP)
return;
@@ -1957,16 +1965,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one;
}
- err = mlx5_crdump_enable(dev);
- if (err)
- dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
-
- err = mlx5_hwmon_dev_register(dev);
- if (err)
- mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
-
pci_save_state(pdev);
- devlink_register(devlink);
return 0;
err_init_one:
@@ -1987,16 +1986,9 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
- /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
- * devlink notify APIs.
- * Hence, we must drain them before unregistering the devlink.
- */
mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev);
- devlink_unregister(devlink);
mlx5_sriov_disable(pdev, false);
- mlx5_hwmon_dev_unregister(dev);
- mlx5_crdump_disable(dev);
mlx5_uninit_one(dev);
mlx5_pci_close(dev);
mlx5_mdev_uninit(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6b14e347d..a79b79593 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -243,6 +243,7 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
u8 access_reg_group);
int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
+int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir);
void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 4dcf995cb..6bac8ad70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -19,6 +19,7 @@
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
+#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
return;
}
+ vecidx -= MLX5_IRQ_VEC_COMP_BASE;
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
@@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc;
- int offset = 1;
+ int offset = MLX5_IRQ_VEC_COMP_BASE;
if (!pool->xa_num_irqs.max)
offset = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7d8c73281..7fba1c46e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1206,3 +1206,13 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
*speed = max_speed;
return 0;
}
+
+int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir)
+{
+ u32 in[MLX5_ST_SZ_DW(mpir_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(mpir_reg);
+
+ MLX5_SET(mpir_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(dev, in, sz, mpir, sz, MLX5_REG_MPIR, 0, 0);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 169c2c68e..7ebe71280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -75,7 +75,6 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto peer_devlink_set_err;
}
- devlink_register(devlink);
return 0;
peer_devlink_set_err:
@@ -95,24 +94,28 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
+ struct devlink *devlink;
- mlx5_drain_health_wq(sf_dev->mdev);
- devlink_unregister(devlink);
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
+ devlink = priv_to_devlink(mdev);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
+ if (mlx5_dev_is_lightweight(mdev))
+ mlx5_uninit_one_light(mdev);
else
- mlx5_uninit_one(sf_dev->mdev);
- iounmap(sf_dev->mdev->iseg);
- mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_uninit_one(mdev);
+ iounmap(mdev->iseg);
+ mlx5_mdev_uninit(mdev);
mlx5_devlink_free(devlink);
}
static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
- mlx5_unload_one(sf_dev->mdev, false);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_unload_one(mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index d2b65a0ce..2ebb61ef3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1177,7 +1177,6 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
bool ignore_flow_level,
u32 flow_source)
{
- struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest;
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
@@ -1256,11 +1255,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
* one that done in the TX.
* So, if one of the ft target is wire, put it at the end of the dest list.
*/
- if (is_ft_wire && num_dst_ft > 1) {
- tmp_hw_dest = hw_dests[last_dest];
- hw_dests[last_dest] = hw_dests[num_of_dests - 1];
- hw_dests[num_of_dests - 1] = tmp_hw_dest;
- }
+ if (is_ft_wire && num_dst_ft > 1)
+ swap(hw_dests[last_dest], hw_dests[num_of_dests - 1]);
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 21753f327..1005bb693 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -440,6 +440,27 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 *out;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
+
+ *sd_group = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.sd_group);
+out:
+ kvfree(out);
+ return err;
+}
+
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 3d09fa545..ba3038686 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include "mlxbf_gige.h"
@@ -139,13 +140,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
control |= MLXBF_GIGE_CONTROL_PORT_EN;
writeq(control, priv->base + MLXBF_GIGE_CONTROL);
- err = mlxbf_gige_request_irqs(priv);
- if (err)
- return err;
mlxbf_gige_cache_stats(priv);
err = mlxbf_gige_clean_port(priv);
if (err)
- goto free_irqs;
+ return err;
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
@@ -157,7 +155,7 @@ static int mlxbf_gige_open(struct net_device *netdev)
err = mlxbf_gige_tx_init(priv);
if (err)
- goto free_irqs;
+ goto phy_deinit;
err = mlxbf_gige_rx_init(priv);
if (err)
goto tx_deinit;
@@ -166,6 +164,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
napi_enable(&priv->napi);
netif_start_queue(netdev);
+ err = mlxbf_gige_request_irqs(priv);
+ if (err)
+ goto napi_deinit;
+
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
@@ -182,11 +184,17 @@ static int mlxbf_gige_open(struct net_device *netdev)
return 0;
+napi_deinit:
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
+ mlxbf_gige_rx_deinit(priv);
+
tx_deinit:
mlxbf_gige_tx_deinit(priv);
-free_irqs:
- mlxbf_gige_free_irqs(priv);
+phy_deinit:
+ phy_stop(phydev);
return err;
}
@@ -485,8 +493,13 @@ static void mlxbf_gige_shutdown(struct platform_device *pdev)
{
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
- writeq(0, priv->base + MLXBF_GIGE_INT_EN);
- mlxbf_gige_clean_port(priv);
+ rtnl_lock();
+ netif_device_detach(priv->netdev);
+
+ if (netif_running(priv->netdev))
+ dev_close(priv->netdev);
+
+ rtnl_unlock();
}
static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index e827c78be..e3271c845 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -282,6 +282,12 @@ MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
*/
MLXSW_ITEM32(cmd_mbox, query_fw, lag_mode_support, 0x18, 1, 1);
+/* cmd_mbox_query_fw_cff_support
+ * 0: CONFIG_PROFILE.flood_mode = 5 (CFF) is not supported by FW
+ * 1: CONFIG_PROFILE.flood_mode = 5 (CFF) is supported by FW
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cff_support, 0x18, 2, 1);
+
/* cmd_mbox_query_fw_clr_int_base_offset
* Clear Interrupt register's offset from clr_int_bar register
* in PCI address space.
@@ -779,6 +785,11 @@ enum mlxsw_cmd_mbox_config_profile_flood_mode {
* used.
*/
MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
+ /* CFF - Compressed FID Flood (CFF) mode.
+ * Reserved when legacy bridge model is used.
+ * Supported only by Spectrum-2+.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF = 5,
};
/* cmd_mbox_config_profile_flood_mode
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f23421f03..4a79c0d7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -211,6 +211,13 @@ mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_lag_mode);
+enum mlxsw_cmd_mbox_config_profile_flood_mode
+mlxsw_core_flood_mode(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->flood_mode(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_flood_mode);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->driver_priv;
@@ -842,7 +849,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
- EMAD, DISCARD);
+ EMAD, FORWARD);
static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 764d14bd5..6d1122559 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -38,6 +38,8 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
enum mlxsw_cmd_mbox_config_profile_lag_mode
mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core);
+enum mlxsw_cmd_mbox_config_profile_flood_mode
+mlxsw_core_flood_mode(struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
@@ -322,7 +324,12 @@ struct mlxsw_config_profile {
u16 max_regions;
u8 max_flood_tables;
u8 max_vid_flood_tables;
+
+ /* Flood mode to use if used_flood_mode. If flood_mode_prefer_cff,
+ * the backup flood mode (if any) when CFF unsupported.
+ */
u8 flood_mode;
+
u8 max_fid_offset_flood_tables;
u16 fid_offset_flood_table_size;
u8 max_fid_flood_tables;
@@ -338,6 +345,7 @@ struct mlxsw_config_profile {
u8 kvd_hash_double_parts;
u8 cqe_time_stamp_type;
bool lag_mode_prefer_sw;
+ bool flood_mode_prefer_cff;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -489,6 +497,7 @@ struct mlxsw_bus {
u32 (*read_utc_sec)(void *bus_priv);
u32 (*read_utc_nsec)(void *bus_priv);
enum mlxsw_cmd_mbox_config_profile_lag_mode (*lag_mode)(void *bus_priv);
+ enum mlxsw_cmd_mbox_config_profile_flood_mode (*flood_mode)(void *priv);
u8 features;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index faa63ea9b..1915fa41c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -95,7 +95,7 @@ struct mlxsw_afa_set {
*/
has_trap:1,
has_police:1;
- unsigned int ref_count;
+ refcount_t ref_count;
struct mlxsw_afa_set *next; /* Pointer to the next set. */
struct mlxsw_afa_set *prev; /* Pointer to the previous set,
* note that set may have multiple
@@ -120,7 +120,7 @@ struct mlxsw_afa_fwd_entry {
struct rhash_head ht_node;
struct mlxsw_afa_fwd_entry_ht_key ht_key;
u32 kvdl_index;
- unsigned int ref_count;
+ refcount_t ref_count;
};
static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
@@ -282,7 +282,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
/* Need to initialize the set to pass by default */
mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
set->ht_key.is_first = is_first;
- set->ref_count = 1;
+ refcount_set(&set->ref_count, 1);
return set;
}
@@ -330,7 +330,7 @@ static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_set *set)
{
- if (--set->ref_count)
+ if (!refcount_dec_and_test(&set->ref_count))
return;
if (set->shared)
mlxsw_afa_set_unshare(mlxsw_afa, set);
@@ -350,7 +350,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
mlxsw_afa_set_ht_params);
if (set) {
- set->ref_count++;
+ refcount_inc(&set->ref_count);
mlxsw_afa_set_put(mlxsw_afa, orig_set);
} else {
set = orig_set;
@@ -564,7 +564,7 @@ mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
if (!fwd_entry)
return ERR_PTR(-ENOMEM);
fwd_entry->ht_key.local_port = local_port;
- fwd_entry->ref_count = 1;
+ refcount_set(&fwd_entry->ref_count, 1);
err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
&fwd_entry->ht_node,
@@ -607,7 +607,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
mlxsw_afa_fwd_entry_ht_params);
if (fwd_entry) {
- fwd_entry->ref_count++;
+ refcount_inc(&fwd_entry->ref_count);
return fwd_entry;
}
return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
@@ -616,7 +616,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_fwd_entry *fwd_entry)
{
- if (--fwd_entry->ref_count)
+ if (!refcount_dec_and_test(&fwd_entry->ref_count))
return;
mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 0d5e6f9b4..947500f8e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/errno.h>
+#include <linux/refcount.h>
#include "item.h"
#include "core_acl_flex_keys.h"
@@ -107,7 +108,7 @@ EXPORT_SYMBOL(mlxsw_afk_destroy);
struct mlxsw_afk_key_info {
struct list_head list;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int blocks_count;
int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
* is index inside "blocks"
@@ -334,7 +335,7 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
if (err)
goto err_picker;
list_add(&key_info->list, &mlxsw_afk->key_info_list);
- key_info->ref_count = 1;
+ refcount_set(&key_info->ref_count, 1);
return key_info;
err_picker:
@@ -356,7 +357,7 @@ mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
if (key_info) {
- key_info->ref_count++;
+ refcount_inc(&key_info->ref_count);
return key_info;
}
return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
@@ -365,7 +366,7 @@ EXPORT_SYMBOL(mlxsw_afk_key_info_get);
void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
{
- if (--key_info->ref_count)
+ if (!refcount_dec_and_test(&key_info->ref_count))
return;
mlxsw_afk_key_info_destroy(key_info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 53b150b7a..6c06b0592 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
.got_inactive = mlxsw_env_got_inactive,
};
-static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
{
char mcam_pl[MLXSW_REG_MCAM_LEN];
- bool mcia_128b_supported;
+ bool mcia_128b_supported = false;
int err;
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
- &mcia_128b_supported);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+ &mcia_128b_supported);
mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
-
- return 0;
}
int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
@@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_type_set;
- err = mlxsw_env_max_module_eeprom_len_query(env);
- if (err)
- goto err_eeprom_len_query;
-
+ mlxsw_env_max_module_eeprom_len_query(env);
env->line_cards[0]->active = true;
return 0;
-err_eeprom_len_query:
err_type_set:
mlxsw_env_module_event_disable(env, 0);
err_mlxsw_env_module_event_enable:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index e4b25e187..f42a1b1c9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -106,7 +106,9 @@ struct mlxsw_pci {
u64 utc_sec_offset;
u64 utc_nsec_offset;
bool lag_mode_support;
+ bool cff_support;
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
+ enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -130,6 +132,7 @@ struct mlxsw_pci {
const struct pci_device_id *id;
enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
u8 num_sdq_cqs; /* Number of CQs used for SDQs */
+ bool skip_reset;
};
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -1245,11 +1248,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
mbox, profile->fid_flood_table_size);
}
- if (profile->used_flood_mode) {
+ if (profile->flood_mode_prefer_cff && mlxsw_pci->cff_support) {
+ enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode =
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF;
+
+ mlxsw_cmd_mbox_config_profile_set_flood_mode_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_flood_mode_set(mbox, flood_mode);
+ mlxsw_pci->flood_mode = flood_mode;
+ } else if (profile->used_flood_mode) {
mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
mbox, 1);
mlxsw_cmd_mbox_config_profile_flood_mode_set(
mbox, profile->flood_mode);
+ mlxsw_pci->flood_mode = profile->flood_mode;
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
}
if (profile->used_max_ib_mc) {
mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
@@ -1476,11 +1490,47 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
return -EBUSY;
}
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
- const struct pci_device_id *id)
+static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ int err;
+
+ mlxsw_reg_mrsr_pack(mrsr_pl,
+ MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
+ err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+ if (err)
+ return err;
+
+ device_lock_assert(&pdev->dev);
+
+ pci_cfg_access_lock(pdev);
+ pci_save_state(pdev);
+
+ err = __pci_reset_function_locked(pdev);
+ if (err)
+ pci_err(pdev, "PCI function reset failed with %d\n", err);
+
+ pci_restore_state(pdev);
+ pci_cfg_access_unlock(pdev);
+
+ return err;
+}
+
+static int mlxsw_pci_reset_sw(struct mlxsw_pci *mlxsw_pci)
+{
+ char mrsr_pl[MLXSW_REG_MRSR_LEN];
+
+ mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET);
+ return mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+}
+
+static int
+mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
+{
+ struct pci_dev *pdev = mlxsw_pci->pdev;
+ char mcam_pl[MLXSW_REG_MCAM_LEN];
+ bool pci_reset_supported = false;
u32 sys_status;
int err;
@@ -1491,8 +1541,24 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return err;
}
- mlxsw_reg_mrsr_pack(mrsr_pl);
- err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+ /* PCI core already issued a PCI reset, do not issue another reset. */
+ if (mlxsw_pci->skip_reset)
+ return 0;
+
+ mlxsw_reg_mcam_pack(mcam_pl,
+ MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
+ err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
+ if (!err)
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+ &pci_reset_supported);
+
+ if (pci_reset_supported) {
+ pci_dbg(pdev, "Starting PCI reset flow\n");
+ err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
+ } else {
+ pci_dbg(pdev, "Starting software reset flow\n");
+ err = mlxsw_pci_reset_sw(mlxsw_pci);
+ }
if (err)
return err;
@@ -1537,9 +1603,9 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (!mbox)
return -ENOMEM;
- err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
+ err = mlxsw_pci_reset(mlxsw_pci, mlxsw_pci->id);
if (err)
- goto err_sw_reset;
+ goto err_reset;
err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
if (err < 0) {
@@ -1601,6 +1667,9 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->lag_mode_support =
mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
+ mlxsw_pci->cff_support =
+ mlxsw_cmd_mbox_query_fw_cff_support_get(mbox);
+
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1672,7 +1741,7 @@ err_iface_rev:
err_query_fw:
mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_alloc_irq:
-err_sw_reset:
+err_reset:
mbox_put:
mlxsw_cmd_mbox_free(mbox);
return err;
@@ -1917,6 +1986,14 @@ mlxsw_pci_lag_mode(void *bus_priv)
return mlxsw_pci->lag_mode;
}
+static enum mlxsw_cmd_mbox_config_profile_flood_mode
+mlxsw_pci_flood_mode(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci->flood_mode;
+}
+
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
@@ -1929,6 +2006,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.read_utc_sec = mlxsw_pci_read_utc_sec,
.read_utc_nsec = mlxsw_pci_read_utc_nsec,
.lag_mode = mlxsw_pci_lag_mode,
+ .flood_mode = mlxsw_pci_flood_mode,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
@@ -2059,11 +2137,34 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
kfree(mlxsw_pci);
}
+static void mlxsw_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+}
+
+static void mlxsw_pci_reset_done(struct pci_dev *pdev)
+{
+ struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+ mlxsw_pci->skip_reset = true;
+ mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus,
+ mlxsw_pci, false, NULL, NULL);
+ mlxsw_pci->skip_reset = false;
+}
+
+static const struct pci_error_handlers mlxsw_pci_err_handler = {
+ .reset_prepare = mlxsw_pci_reset_prepare,
+ .reset_done = mlxsw_pci_reset_done,
+};
+
int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
{
pci_driver->probe = mlxsw_pci_probe;
pci_driver->remove = mlxsw_pci_remove;
pci_driver->shutdown = mlxsw_pci_remove;
+ pci_driver->err_handler = &mlxsw_pci_err_handler;
return pci_register_driver(pci_driver);
}
EXPORT_SYMBOL(mlxsw_pci_driver_register);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 25b294fde..8892654c6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1024,6 +1024,8 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
* ------------------------------------------
* The following register controls the association of flooding tables and MIDs
* to packet types used for flooding.
+ *
+ * Reserved when CONFIG_PROFILE.flood_mode = CFF.
*/
#define MLXSW_REG_SFGC_ID 0x2011
#define MLXSW_REG_SFGC_LEN 0x14
@@ -1862,6 +1864,7 @@ MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
* Access: RW
*
* Note: Reserved when legacy bridge model is used.
+ * Reserved when CONFIG_PROFILE.flood_mode = CFF.
*/
MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
@@ -1872,6 +1875,7 @@ MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
* Access: RW
*
* Note: Reserved when legacy bridge model is used and when flood_rsp=1.
+ * Reserved when CONFIG_PROFILE.flood_mode = CFF
*/
MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
@@ -1880,6 +1884,8 @@ MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
* Used to point into the flooding table selected by SFGC register if
* the table is of type FID-Offset. Otherwise, this field is reserved.
* Access: RW
+ *
+ * Note: Reserved when CONFIG_PROFILE.flood_mode = CFF
*/
MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
@@ -1938,6 +1944,35 @@ MLXSW_ITEM32(reg, sfmr, irif_v, 0x14, 24, 1);
*/
MLXSW_ITEM32(reg, sfmr, irif, 0x14, 0, 16);
+/* reg_sfmr_cff_mid_base
+ * Pointer to PGT table.
+ * Range: 0..(cap_max_pgt-1)
+ * Access: RW
+ *
+ * Note: Reserved when SwitchX/-2 and Spectrum-1.
+ * Supported when CONFIG_PROFILE.flood_mode = CFF.
+ */
+MLXSW_ITEM32(reg, sfmr, cff_mid_base, 0x20, 0, 16);
+
+/* reg_sfmr_nve_flood_prf_id
+ * FID flooding profile_id for NVE Encap
+ * Range 0..(max_cap_nve_flood_prf-1)
+ * Access: RW
+ *
+ * Note: Reserved when SwitchX/-2 and Spectrum-1
+ */
+MLXSW_ITEM32(reg, sfmr, nve_flood_prf_id, 0x24, 8, 2);
+
+/* reg_sfmr_cff_prf_id
+ * Compressed Fid Flooding profile_id
+ * Range 0..(max_cap_nve_flood_prf-1)
+ * Access: RW
+ *
+ * Note: Reserved when SwitchX/-2 and Spectrum-1
+ * Supported only when CONFIG_PROFLE.flood_mode = CFF.
+ */
+MLXSW_ITEM32(reg, sfmr, cff_prf_id, 0x24, 0, 2);
+
/* reg_sfmr_smpe_valid
* SMPE is valid.
* Access: RW
@@ -1959,18 +1994,11 @@ MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
- u16 fid_offset, bool flood_rsp,
- enum mlxsw_reg_bridge_type bridge_type,
bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
mlxsw_reg_sfmr_fid_set(payload, fid);
- mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
- mlxsw_reg_sfmr_vtfp_set(payload, false);
- mlxsw_reg_sfmr_vv_set(payload, false);
- mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
- mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
@@ -2168,6 +2196,50 @@ static inline void mlxsw_reg_spvc_pack(char *payload, u16 local_port, bool et1,
mlxsw_reg_spvc_et0_set(payload, et0);
}
+/* SFFP - Switch FID Flooding Profiles Register
+ * --------------------------------------------
+ * The SFFP register populates the fid flooding profile tables used for the NVE
+ * flooding and Compressed-FID Flooding (CFF).
+ *
+ * Reserved on Spectrum-1.
+ */
+#define MLXSW_REG_SFFP_ID 0x2029
+#define MLXSW_REG_SFFP_LEN 0x0C
+
+MLXSW_REG_DEFINE(sffp, MLXSW_REG_SFFP_ID, MLXSW_REG_SFFP_LEN);
+
+/* reg_sffp_profile_id
+ * Profile ID a.k.a. SFMR.nve_flood_prf_id or SFMR.cff_prf_id
+ * Range 0..max_cap_nve_flood_prf-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sffp, profile_id, 0x00, 16, 2);
+
+/* reg_sffp_type
+ * The traffic type to reach the flooding table.
+ * Same as SFGC.type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sffp, type, 0x00, 0, 4);
+
+/* reg_sffp_flood_offset
+ * Flood offset. Offset to add to SFMR.cff_mid_base to get the final PGT address
+ * for FID flood; or offset to add to SFMR.nve_tunnel_flood_ptr to get KVD
+ * pointer for NVE underlay.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sffp, flood_offset, 0x04, 0, 3);
+
+static inline void mlxsw_reg_sffp_pack(char *payload, u8 profile_id,
+ enum mlxsw_reg_sfgc_type type,
+ u8 flood_offset)
+{
+ MLXSW_REG_ZERO(sffp, payload);
+ mlxsw_reg_sffp_profile_id_set(payload, profile_id);
+ mlxsw_reg_sffp_type_set(payload, type);
+ mlxsw_reg_sffp_flood_offset_set(payload, flood_offset);
+}
+
/* SPEVET - Switch Port Egress VLAN EtherType
* ------------------------------------------
* The switch port egress VLAN EtherType configures which EtherType to push at
@@ -10122,6 +10194,15 @@ mlxsw_reg_mgir_unpack(char *payload, u32 *hw_rev, char *fw_info_psid,
MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN);
+enum mlxsw_reg_mrsr_command {
+ /* Switch soft reset, does not reset PCI firmware. */
+ MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET = 1,
+ /* Reset will be done when PCI link will be disabled.
+ * This command will reset PCI firmware also.
+ */
+ MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE = 6,
+};
+
/* reg_mrsr_command
* Reset/shutdown command
* 0 - do nothing
@@ -10130,10 +10211,11 @@ MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN);
*/
MLXSW_ITEM32(reg, mrsr, command, 0x00, 0, 4);
-static inline void mlxsw_reg_mrsr_pack(char *payload)
+static inline void mlxsw_reg_mrsr_pack(char *payload,
+ enum mlxsw_reg_mrsr_command command)
{
MLXSW_REG_ZERO(mrsr, payload);
- mlxsw_reg_mrsr_command_set(payload, 1);
+ mlxsw_reg_mrsr_command_set(payload, command);
}
/* MLCR - Management LED Control Register
@@ -10584,6 +10666,8 @@ MLXSW_ITEM32(reg, mcam, feature_group, 0x00, 16, 8);
enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
/* If set, MCIA supports 128 bytes payloads. Otherwise, 48 bytes. */
MLXSW_REG_MCAM_MCIA_128B = 34,
+ /* If set, MRSR.command=6 is supported. */
+ MLXSW_REG_MCAM_PCI_RESET = 48,
};
#define MLXSW_REG_BYTES_PER_DWORD 0x4
@@ -12934,6 +13018,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spfsr),
MLXSW_REG(spvc),
+ MLXSW_REG(sffp),
MLXSW_REG(spevet),
MLXSW_REG(smpe),
MLXSW_REG(smid2),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 89dd2777e..9d7977ebe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -27,6 +27,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_FID,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
+ MLXSW_RES_ID_MAX_NVE_FLOOD_PRF,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -88,6 +89,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_FID] = 0x2512,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
+ [MLXSW_RES_ID_MAX_NVE_FLOOD_PRF] = 0x2522,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index cec72d99d..5d3413636 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3190,10 +3190,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_lag_init;
}
- err = mlxsw_sp_fids_init(mlxsw_sp);
+ err = mlxsw_sp->fid_core_ops->init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
- goto err_fids_init;
+ goto err_fid_core_init;
}
err = mlxsw_sp_policers_init(mlxsw_sp);
@@ -3379,8 +3379,8 @@ err_devlink_traps_init:
err_traps_init:
mlxsw_sp_policers_fini(mlxsw_sp);
err_policers_init:
- mlxsw_sp_fids_fini(mlxsw_sp);
-err_fids_init:
+ mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
+err_fid_core_init:
mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_pgt_fini(mlxsw_sp);
@@ -3416,7 +3416,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
- mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
+ mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
mlxsw_sp->pgt_smpe_index_valid = true;
@@ -3450,7 +3450,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
- mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
mlxsw_sp->pgt_smpe_index_valid = false;
@@ -3484,7 +3484,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
- mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
mlxsw_sp->pgt_smpe_index_valid = false;
@@ -3518,7 +3518,7 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
- mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
+ mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
mlxsw_sp->pgt_smpe_index_valid = false;
@@ -3552,7 +3552,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_devlink_traps_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
- mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
@@ -3598,6 +3598,7 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.used_cqe_time_stamp_type = 1,
.cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
.lag_mode_prefer_sw = true,
+ .flood_mode_prefer_cff = true,
};
/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
@@ -3626,6 +3627,7 @@ static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
.used_cqe_time_stamp_type = 1,
.cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
.lag_mode_prefer_sw = true,
+ .flood_mode_prefer_cff = true,
};
static void
@@ -4515,6 +4517,10 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
+ err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
+ if (err)
+ goto err_fid_port_join_lag;
+
/* Port is no longer usable as a router interface */
if (mlxsw_sp_port->default_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
@@ -4534,6 +4540,8 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
err_replay:
mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
err_router_join:
+ mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
+err_fid_port_join_lag:
lag->ref_count--;
mlxsw_sp_port->lagged = 0;
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
@@ -4569,6 +4577,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
*/
mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
+ mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
+
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index c70333b46..a0c9775fa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -205,7 +205,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_mall_ops *mall_ops;
const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
- const struct mlxsw_sp_fid_family **fid_family_arr;
+ const struct mlxsw_sp_fid_core_ops *fid_core_ops;
size_t listeners_count;
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
@@ -252,6 +252,11 @@ struct mlxsw_sp_ptp_ops {
const struct mlxsw_tx_info *tx_info);
};
+struct mlxsw_sp_fid_core_ops {
+ int (*init)(struct mlxsw_sp *mlxsw_sp);
+ void (*fini)(struct mlxsw_sp *mlxsw_sp);
+};
+
static inline struct mlxsw_sp_upper *
mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
@@ -508,6 +513,10 @@ enum mlxsw_sp_flood_type {
MLXSW_SP_FLOOD_TYPE_UC,
MLXSW_SP_FLOOD_TYPE_BC,
MLXSW_SP_FLOOD_TYPE_MC,
+ /* For RSP FIDs in CFF mode. */
+ MLXSW_SP_FLOOD_TYPE_NOT_UC,
+ /* For NVE traffic. */
+ MLXSW_SP_FLOOD_TYPE_ANY,
};
int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
@@ -753,6 +762,8 @@ union mlxsw_sp_l3addr {
};
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
+int mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif *rif,
+ u16 *port, bool *is_lag);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
@@ -1319,11 +1330,11 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid);
int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
-int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
-void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_fid_port_join_lag(const struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_fid_port_leave_lag(const struct mlxsw_sp_port *mlxsw_sp_port);
-extern const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[];
-extern const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[];
+extern const struct mlxsw_sp_fid_core_ops mlxsw_sp1_fid_core_ops;
+extern const struct mlxsw_sp_fid_core_ops mlxsw_sp2_fid_core_ops;
/* spectrum_mr.c */
enum mlxsw_sp_mr_route_prio {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 7c59c8a13..b01b000bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@@ -55,7 +56,7 @@ struct mlxsw_sp_acl_ruleset {
struct rhash_head ht_node; /* Member of acl HT */
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int min_prio;
unsigned int max_prio;
unsigned long priv[];
@@ -99,7 +100,7 @@ static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
/* We hold a reference on ruleset ourselves */
- return ruleset->ref_count == 2;
+ return refcount_read(&ruleset->ref_count) == 2;
}
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
@@ -176,7 +177,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
ruleset = kzalloc(alloc_size, GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
- ruleset->ref_count = 1;
+ refcount_set(&ruleset->ref_count, 1);
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
@@ -222,13 +223,13 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
{
- ruleset->ref_count++;
+ refcount_inc(&ruleset->ref_count);
}
static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset)
{
- if (--ruleset->ref_count)
+ if (!refcount_dec_and_test(&ruleset->ref_count))
return;
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 50ea1eff0..92a406f02 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -9,6 +9,8 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/idr.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -57,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
- if (id < tcam->max_regions) {
- __set_bit(id, tcam->used_regions);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_regions);
+ ida_free(&tcam->used_regions, id);
}
static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
u16 *p_id)
{
- u16 id;
+ int id;
- id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
- if (id < tcam->max_groups) {
- __set_bit(id, tcam->used_groups);
- *p_id = id;
- return 0;
- }
- return -ENOBUFS;
+ id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ *p_id = id;
+
+ return 0;
}
static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
u16 id)
{
- __clear_bit(id, tcam->used_groups);
+ ida_free(&tcam->used_groups, id);
}
struct mlxsw_sp_acl_tcam_pattern {
@@ -155,7 +159,7 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_vchunk;
@@ -176,7 +180,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
unsigned int priority; /* Priority within the vregion and group */
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct mlxsw_sp_acl_tcam_vregion *vregion;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_entry {
@@ -714,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+ mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@@ -725,6 +731,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
}
static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ /* The entry markers are relative to the current chunk and therefore
+ * needs to be reset together with the chunk marker.
+ */
+ ctx->current_vchunk = NULL;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+}
+
+static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
@@ -746,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
- vregion->rehash.ctx.current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@@ -769,7 +786,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
vregion->tcam = tcam;
vregion->mlxsw_sp = mlxsw_sp;
vregion->vgroup = vgroup;
- vregion->ref_count = 1;
+ refcount_set(&vregion->ref_count, 1);
vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(vregion->key_info)) {
@@ -819,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
- cancel_delayed_work_sync(&vregion->rehash.dw);
+ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
+ ctx->hints_priv)
+ ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@@ -856,7 +877,7 @@ mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
*/
return ERR_PTR(-EOPNOTSUPP);
}
- vregion->ref_count++;
+ refcount_inc(&vregion->ref_count);
return vregion;
}
@@ -871,7 +892,7 @@ static void
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- if (--vregion->ref_count)
+ if (!refcount_dec_and_test(&vregion->ref_count))
return;
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
@@ -924,7 +945,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
INIT_LIST_HEAD(&vchunk->ventry_list);
vchunk->priority = priority;
vchunk->vgroup = vgroup;
- vchunk->ref_count = 1;
+ refcount_set(&vchunk->ref_count, 1);
vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
priority, elusage);
@@ -1008,7 +1029,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
elusage)))
return ERR_PTR(-EINVAL);
- vchunk->ref_count++;
+ refcount_inc(&vchunk->ref_count);
return vchunk;
}
return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
@@ -1019,7 +1040,7 @@ static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
- if (--vchunk->ref_count)
+ if (!refcount_dec_and_test(&vchunk->ref_count))
return;
mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
}
@@ -1153,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
- ventry->entry, activity);
+ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
+ int err;
+
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
+ activity);
+ mutex_unlock(&vregion->lock);
+ return err;
}
static int
@@ -1188,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+ WARN_ON(vchunk->chunk2);
+
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@@ -1206,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@@ -1229,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+ if (list_empty(&vchunk->ventry_list))
+ goto out;
+
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@@ -1238,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
+ WARN_ON(ventry->vchunk != vchunk);
+
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@@ -1278,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
+out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@@ -1291,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@@ -1319,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
- mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
+ if (ctx->this_is_rollback)
+ return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@@ -1339,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
- mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@@ -1388,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@@ -1440,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ return;
}
if (*credits >= 0)
@@ -1548,19 +1588,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions) {
- err = -ENOMEM;
- goto err_alloc_used_regions;
- }
+ ida_init(&tcam->used_regions);
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
+ ida_init(&tcam->used_groups);
tcam->max_groups = max_groups;
tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_MAX_GROUP_SIZE);
@@ -1574,10 +1606,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
-err_alloc_used_regions:
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
err_rehash_params_register:
mutex_destroy(&tcam->lock);
@@ -1590,8 +1620,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
+ ida_destroy(&tcam->used_groups);
+ ida_destroy(&tcam->used_regions);
mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
mutex_destroy(&tcam->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 462bf4484..79a1d8606 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -6,15 +6,16 @@
#include <linux/list.h>
#include <linux/parman.h>
+#include <linux/idr.h>
#include "reg.h"
#include "spectrum.h"
#include "core_acl_flex_keys.h"
struct mlxsw_sp_acl_tcam {
- unsigned long *used_regions; /* bit array */
+ struct ida used_regions;
unsigned int max_regions;
- unsigned long *used_groups; /* bit array */
+ struct ida used_groups;
unsigned int max_groups;
unsigned int max_group_size;
struct mutex lock; /* guards vregion list */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index e954b8cd2..65562ab20 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -11,6 +11,7 @@
#include <linux/refcount.h>
#include "spectrum.h"
+#include "spectrum_router.h"
#include "reg.h"
struct mlxsw_sp_fid_family;
@@ -71,12 +72,12 @@ static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
- enum mlxsw_flood_table_type table_type;
+ enum mlxsw_flood_table_type table_type; /* For flood_mode!=CFF. */
int table_index;
};
struct mlxsw_sp_fid_ops {
- void (*setup)(struct mlxsw_sp_fid *fid, const void *arg);
+ int (*setup)(struct mlxsw_sp_fid *fid, const void *arg);
int (*configure)(struct mlxsw_sp_fid *fid);
void (*deconfigure)(struct mlxsw_sp_fid *fid);
int (*index_alloc)(struct mlxsw_sp_fid *fid, const void *arg,
@@ -95,6 +96,34 @@ struct mlxsw_sp_fid_ops {
const struct net_device *nve_dev);
int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_rif *rif);
+ int (*flood_table_init)(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table);
+ int (*pgt_size)(const struct mlxsw_sp_fid_family *fid_family,
+ u16 *p_pgt_size);
+ u16 (*fid_mid)(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_flood_table *flood_table);
+ void (*fid_pack)(char *sfmr_pl, const struct mlxsw_sp_fid *fid,
+ enum mlxsw_reg_sfmr_op op);
+
+ /* These are specific to RFID families and we assume are only
+ * implemented by RFID families, if at all.
+ */
+ int (*fid_port_init)(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port);
+ void (*fid_port_fini)(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port);
+};
+
+enum mlxsw_sp_fid_flood_profile_id {
+ MLXSW_SP_FID_FLOOD_PROFILE_ID_BRIDGE = 1,
+ MLXSW_SP_FID_FLOOD_PROFILE_ID_RSP,
+ MLXSW_SP_FID_FLOOD_PROFILE_ID_NVE,
+};
+
+struct mlxsw_sp_fid_flood_profile {
+ const struct mlxsw_sp_flood_table *flood_tables;
+ int nr_flood_tables;
+ const enum mlxsw_sp_fid_flood_profile_id profile_id; /* For CFF mode. */
};
struct mlxsw_sp_fid_family {
@@ -104,12 +133,11 @@ struct mlxsw_sp_fid_family {
u16 end_index;
struct list_head fids_list;
unsigned long *fids_bitmap;
- const struct mlxsw_sp_flood_table *flood_tables;
- int nr_flood_tables;
+ const struct mlxsw_sp_fid_flood_profile *flood_profile;
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
- bool flood_rsp;
+ bool flood_rsp; /* For flood_mode!=CFF. */
enum mlxsw_reg_bridge_type bridge_type;
u16 pgt_base;
bool smpe_index_valid;
@@ -131,10 +159,31 @@ static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
};
+static const int mlxsw_sp_sfgc_not_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
+};
+
+static const int mlxsw_sp_sfgc_any_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
+ [MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
+ [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
+};
+
static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_UC] = mlxsw_sp_sfgc_uc_packet_types,
[MLXSW_SP_FLOOD_TYPE_BC] = mlxsw_sp_sfgc_bc_packet_types,
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_NOT_UC] = mlxsw_sp_sfgc_not_uc_packet_types,
+ [MLXSW_SP_FLOOD_TYPE_ANY] = mlxsw_sp_sfgc_any_packet_types,
};
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
@@ -305,10 +354,13 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
int i;
- for (i = 0; i < fid_family->nr_flood_tables; i++) {
- if (fid_family->flood_tables[i].packet_type != packet_type)
+ for (i = 0; i < fid_family->flood_profile->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &fid_family->flood_profile->flood_tables[i];
+ if (flood_table->packet_type != packet_type)
continue;
- return &fid_family->flood_tables[i];
+ return flood_table;
}
return NULL;
@@ -320,24 +372,62 @@ mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
return fid_family->end_index - fid_family->start_index + 1;
}
-static u16
-mlxsw_sp_fid_family_pgt_size(const struct mlxsw_sp_fid_family *fid_family)
+static int
+mlxsw_sp_fid_8021d_pgt_size(const struct mlxsw_sp_fid_family *fid_family,
+ u16 *p_pgt_size)
{
u16 num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
- return num_fids * fid_family->nr_flood_tables;
+ *p_pgt_size = num_fids * fid_family->flood_profile->nr_flood_tables;
+ return 0;
+}
+
+static unsigned int mlxsw_sp_fid_rfid_port_offset_cff(unsigned int local_port)
+{
+ /* Port 0 is the CPU port. Since we never create RIFs based off that
+ * port, we don't need to count it.
+ */
+ return WARN_ON_ONCE(!local_port) ? 0 : local_port - 1;
+}
+
+static int
+mlxsw_sp_fid_rfid_pgt_size_cff(const struct mlxsw_sp_fid_family *fid_family,
+ u16 *p_pgt_size)
+{
+ struct mlxsw_core *core = fid_family->mlxsw_sp->core;
+ unsigned int max_ports;
+ u16 pgt_size;
+ u16 max_lags;
+ int err;
+
+ max_ports = mlxsw_core_max_ports(core);
+
+ err = mlxsw_core_max_lag(core, &max_lags);
+ if (err)
+ return err;
+
+ pgt_size = (mlxsw_sp_fid_rfid_port_offset_cff(max_ports) + max_lags) *
+ fid_family->flood_profile->nr_flood_tables;
+ *p_pgt_size = pgt_size;
+ return 0;
}
static u16
-mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
- const struct mlxsw_sp_flood_table *flood_table,
- u16 fid_offset)
+mlxsw_sp_fid_pgt_base_ctl(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
{
u16 num_fids;
num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
- return fid_family->pgt_base + num_fids * flood_table->table_index +
- fid_offset;
+ return fid_family->pgt_base + num_fids * flood_table->table_index;
+}
+
+static u16
+mlxsw_sp_fid_fid_mid_ctl(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ return mlxsw_sp_fid_pgt_base_ctl(fid->fid_family, flood_table) +
+ fid->fid_offset;
}
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
@@ -348,15 +438,14 @@ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_flood_table *flood_table;
u16 mid_index;
- if (WARN_ON(!fid_family->flood_tables))
+ if (WARN_ON(!fid_family->flood_profile))
return -EINVAL;
flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
if (!flood_table)
return -ESRCH;
- mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
- fid->fid_offset);
+ mid_index = fid_family->ops->fid_mid(fid, flood_table);
return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
fid->fid_index, local_port, member);
}
@@ -410,12 +499,13 @@ u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid)
return mlxsw_sp_fid_8021q_fid(fid)->vid;
}
-static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
+static int mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
{
u16 vid = *(u16 *) arg;
mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
+ return 0;
}
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
@@ -424,18 +514,76 @@ static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
MLXSW_REG_SFMR_OP_DESTROY_FID;
}
-static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
+static void mlxsw_sp_fid_pack(char *sfmr_pl,
+ const struct mlxsw_sp_fid *fid,
+ enum mlxsw_reg_sfmr_op op)
{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
u16 smpe;
smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
- mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
- fid->fid_offset, fid->fid_family->flood_rsp,
- fid->fid_family->bridge_type,
+ mlxsw_reg_sfmr_pack(sfmr_pl, op, fid->fid_index,
fid->fid_family->smpe_index_valid, smpe);
+}
+
+static void mlxsw_sp_fid_pack_ctl(char *sfmr_pl,
+ const struct mlxsw_sp_fid *fid,
+ enum mlxsw_reg_sfmr_op op)
+{
+ mlxsw_sp_fid_pack(sfmr_pl, fid, op);
+ mlxsw_reg_sfmr_fid_offset_set(sfmr_pl, fid->fid_offset);
+ mlxsw_reg_sfmr_flood_rsp_set(sfmr_pl, fid->fid_family->flood_rsp);
+ mlxsw_reg_sfmr_flood_bridge_type_set(sfmr_pl,
+ fid->fid_family->bridge_type);
+}
+
+static u16
+mlxsw_sp_fid_off_pgt_base_cff(const struct mlxsw_sp_fid_family *fid_family,
+ u16 fid_offset)
+{
+ return fid_family->pgt_base +
+ fid_offset * fid_family->flood_profile->nr_flood_tables;
+}
+
+static u16 mlxsw_sp_fid_pgt_base_cff(const struct mlxsw_sp_fid *fid)
+{
+ return mlxsw_sp_fid_off_pgt_base_cff(fid->fid_family, fid->fid_offset);
+}
+
+static void mlxsw_sp_fid_fid_pack_cff(char *sfmr_pl,
+ const struct mlxsw_sp_fid *fid,
+ enum mlxsw_reg_sfmr_op op)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ u16 pgt_base = mlxsw_sp_fid_pgt_base_cff(fid);
+
+ mlxsw_sp_fid_pack(sfmr_pl, fid, op);
+ mlxsw_reg_sfmr_cff_mid_base_set(sfmr_pl, pgt_base);
+ mlxsw_reg_sfmr_cff_prf_id_set(sfmr_pl,
+ fid_family->flood_profile->profile_id);
+ mlxsw_reg_sfmr_nve_flood_prf_id_set(sfmr_pl,
+ MLXSW_SP_FID_FLOOD_PROFILE_ID_NVE);
+}
+
+static u16 mlxsw_sp_fid_rfid_fid_offset_cff(struct mlxsw_sp *mlxsw_sp,
+ u16 port_lag_id, bool is_lag)
+{
+ u16 max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ if (is_lag)
+ return mlxsw_sp_fid_rfid_port_offset_cff(max_ports) +
+ port_lag_id;
+ else
+ return mlxsw_sp_fid_rfid_port_offset_cff(port_lag_id);
+}
+
+static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ fid->fid_family->ops->fid_pack(sfmr_pl, fid,
+ mlxsw_sp_sfmr_op(valid));
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
@@ -444,15 +592,10 @@ static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
{
struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
- u16 smpe;
- smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+ fid->fid_family->ops->fid_pack(sfmr_pl, fid,
+ MLXSW_REG_SFMR_OP_CREATE_FID);
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
- fid->fid_index, fid->fid_offset,
- fid->fid_family->flood_rsp,
- fid->fid_family->bridge_type,
- fid->fid_family->smpe_index_valid, smpe);
mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
@@ -768,12 +911,13 @@ mlxsw_sp_fid_8021d_fid(const struct mlxsw_sp_fid *fid)
return container_of(fid, struct mlxsw_sp_fid_8021d, common);
}
-static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
+static int mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
{
int br_ifindex = *(int *) arg;
mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
+ return 0;
}
static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
@@ -1058,7 +1202,37 @@ mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
return 0;
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
+static int
+mlxsw_sp_fid_flood_table_init_ctl(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ const int *sfgc_packet_types;
+ u16 mid_base;
+ int err, i;
+
+ mid_base = mlxsw_sp_fid_pgt_base_ctl(fid_family, flood_table);
+
+ sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
+ for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
+ char sfgc_pl[MLXSW_REG_SFGC_LEN];
+
+ if (!sfgc_packet_types[i])
+ continue;
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
+ flood_table->table_type, 0, mid_base);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops_ctl = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
@@ -1072,6 +1246,36 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
.vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
+ .flood_table_init = mlxsw_sp_fid_flood_table_init_ctl,
+ .pgt_size = mlxsw_sp_fid_8021d_pgt_size,
+ .fid_mid = mlxsw_sp_fid_fid_mid_ctl,
+ .fid_pack = mlxsw_sp_fid_pack_ctl,
+};
+
+static u16
+mlxsw_sp_fid_fid_mid_cff(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ return mlxsw_sp_fid_pgt_base_cff(fid) + flood_table->table_index;
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops_cff = {
+ .setup = mlxsw_sp_fid_8021d_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021d_compare,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
+ .pgt_size = mlxsw_sp_fid_8021d_pgt_size,
+ .fid_mid = mlxsw_sp_fid_fid_mid_cff,
+ .fid_pack = mlxsw_sp_fid_fid_pack_cff,
};
#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
@@ -1095,6 +1299,45 @@ static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
},
};
+static const
+struct mlxsw_sp_fid_flood_profile mlxsw_sp_fid_8021d_flood_profile = {
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .profile_id = MLXSW_SP_FID_FLOOD_PROFILE_ID_BRIDGE,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_rsp_flood_tables_cff[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_UC,
+ .table_index = 0,
+ },
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_NOT_UC,
+ .table_index = 1,
+ },
+};
+
+static const
+struct mlxsw_sp_fid_flood_profile mlxsw_sp_fid_rsp_flood_profile_cff = {
+ .flood_tables = mlxsw_sp_fid_rsp_flood_tables_cff,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_rsp_flood_tables_cff),
+ .profile_id = MLXSW_SP_FID_FLOOD_PROFILE_ID_RSP,
+};
+
+static const struct mlxsw_sp_flood_table mlxsw_sp_fid_nve_flood_tables_cff[] = {
+ {
+ .packet_type = MLXSW_SP_FLOOD_TYPE_ANY,
+ .table_index = 0,
+ },
+};
+
+static const
+struct mlxsw_sp_fid_flood_profile mlxsw_sp_fid_nve_flood_profile_cff = {
+ .flood_tables = mlxsw_sp_fid_nve_flood_tables_cff,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_nve_flood_tables_cff),
+ .profile_id = MLXSW_SP_FID_FLOOD_PROFILE_ID_NVE,
+};
+
static bool
mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
{
@@ -1110,9 +1353,35 @@ mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
}
-static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
+static int mlxsw_sp_fid_rfid_setup_ctl(struct mlxsw_sp_fid *fid,
+ const void *arg)
{
+ /* In controlled mode, the FW takes care of FID placement. */
fid->fid_offset = 0;
+ return 0;
+}
+
+static int mlxsw_sp_fid_rfid_setup_cff(struct mlxsw_sp_fid *fid,
+ const void *arg)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u16 rif_index = *(const u16 *)arg;
+ struct mlxsw_sp_rif *rif;
+ bool is_lag;
+ u16 port;
+ int err;
+
+ rif = mlxsw_sp_rif_by_index(mlxsw_sp, rif_index);
+ if (!rif)
+ return -ENOENT;
+
+ err = mlxsw_sp_rif_subport_port(rif, &port, &is_lag);
+ if (err)
+ return err;
+
+ fid->fid_offset = mlxsw_sp_fid_rfid_fid_offset_cff(mlxsw_sp, port,
+ is_lag);
+ return 0;
}
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
@@ -1238,8 +1507,8 @@ mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
return 0;
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
- .setup = mlxsw_sp_fid_rfid_setup,
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops_ctl = {
+ .setup = mlxsw_sp_fid_rfid_setup_ctl,
.configure = mlxsw_sp_fid_rfid_configure,
.deconfigure = mlxsw_sp_fid_rfid_deconfigure,
.index_alloc = mlxsw_sp_fid_rfid_index_alloc,
@@ -1251,11 +1520,146 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
.nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
.vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
+ .fid_pack = mlxsw_sp_fid_pack_ctl,
+};
+
+static int
+mlxsw_sp_fid_rfid_port_add_cff(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 pgt_addr, u16 smpe, unsigned int local_port)
+{
+ int err;
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe,
+ local_port, true);
+ if (err)
+ return err;
+
+ if (flood_table->packet_type == MLXSW_SP_FLOOD_TYPE_NOT_UC) {
+ u16 router_port = mlxsw_sp_router_port(mlxsw_sp);
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe,
+ router_port, true);
+ if (err)
+ goto err_entry_port_set;
+ }
+
+ return 0;
+
+err_entry_port_set:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe, local_port,
+ false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_rfid_port_del_cff(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 pgt_addr, u16 smpe, u16 local_port)
+{
+ if (flood_table->packet_type == MLXSW_SP_FLOOD_TYPE_NOT_UC) {
+ u16 router_port = mlxsw_sp_router_port(mlxsw_sp);
+
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe,
+ router_port, false);
+ }
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, pgt_addr, smpe, local_port,
+ false);
+}
+
+static int
+mlxsw_sp_fid_rfid_port_memb_ft_cff(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table,
+ const struct mlxsw_sp_port *mlxsw_sp_port,
+ bool member)
+{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 local_port = mlxsw_sp_port->local_port;
+ u16 fid_pgt_base;
+ u16 fid_offset;
+ u16 pgt_addr;
+ u16 smpe;
+ u16 port;
+
+ /* In-PGT SMPE is only valid on Spectrum-1, CFF only on Spectrum>1. */
+ smpe = 0;
+
+ port = mlxsw_sp_port->lagged ? mlxsw_sp_port->lag_id : local_port;
+ fid_offset = mlxsw_sp_fid_rfid_fid_offset_cff(mlxsw_sp, port,
+ mlxsw_sp_port->lagged);
+ fid_pgt_base = mlxsw_sp_fid_off_pgt_base_cff(fid_family, fid_offset);
+ pgt_addr = fid_pgt_base + flood_table->table_index;
+
+ if (member)
+ return mlxsw_sp_fid_rfid_port_add_cff(mlxsw_sp, flood_table,
+ pgt_addr, smpe,
+ local_port);
+
+ mlxsw_sp_fid_rfid_port_del_cff(mlxsw_sp, flood_table, pgt_addr, smpe,
+ local_port);
+ return 0;
+}
+
+static int
+mlxsw_sp_fid_rfid_port_memb_cff(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port,
+ bool member)
+{
+ int i;
+
+ for (i = 0; i < fid_family->flood_profile->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table =
+ &fid_family->flood_profile->flood_tables[i];
+ int err;
+
+ err = mlxsw_sp_fid_rfid_port_memb_ft_cff(fid_family,
+ flood_table,
+ mlxsw_sp_port, member);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp_fid_rfid_port_init_cff(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_fid_rfid_port_memb_cff(fid_family, mlxsw_sp_port, true);
+}
+
+static void
+mlxsw_sp_fid_rfid_port_fini_cff(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_fid_rfid_port_memb_cff(fid_family, mlxsw_sp_port, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops_cff = {
+ .setup = mlxsw_sp_fid_rfid_setup_cff,
+ .configure = mlxsw_sp_fid_rfid_configure,
+ .deconfigure = mlxsw_sp_fid_rfid_deconfigure,
+ .index_alloc = mlxsw_sp_fid_rfid_index_alloc,
+ .compare = mlxsw_sp_fid_rfid_compare,
+ .port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_rfid_vni_set,
+ .vni_clear = mlxsw_sp_fid_rfid_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
+ .pgt_size = mlxsw_sp_fid_rfid_pgt_size_cff,
+ .fid_port_init = mlxsw_sp_fid_rfid_port_init_cff,
+ .fid_port_fini = mlxsw_sp_fid_rfid_port_fini_cff,
+ .fid_mid = mlxsw_sp_fid_fid_mid_cff,
+ .fid_pack = mlxsw_sp_fid_fid_pack_cff,
};
-static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
+static int mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
{
fid->fid_offset = 0;
+ return 0;
}
static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
@@ -1312,6 +1716,7 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
.vni_clear = mlxsw_sp_fid_dummy_vni_clear,
.nve_flood_index_set = mlxsw_sp_fid_dummy_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
+ .fid_pack = mlxsw_sp_fid_pack,
};
static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
@@ -1395,7 +1800,7 @@ mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
__mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops_ctl = {
.setup = mlxsw_sp_fid_8021q_setup,
.configure = mlxsw_sp_fid_8021q_configure,
.deconfigure = mlxsw_sp_fid_8021q_deconfigure,
@@ -1409,6 +1814,29 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
.vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+ .flood_table_init = mlxsw_sp_fid_flood_table_init_ctl,
+ .pgt_size = mlxsw_sp_fid_8021d_pgt_size,
+ .fid_mid = mlxsw_sp_fid_fid_mid_ctl,
+ .fid_pack = mlxsw_sp_fid_pack_ctl,
+};
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops_cff = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+ .pgt_size = mlxsw_sp_fid_8021d_pgt_size,
+ .fid_mid = mlxsw_sp_fid_fid_mid_cff,
+ .fid_pack = mlxsw_sp_fid_fid_pack_cff,
};
/* There are 4K-2 802.1Q FIDs */
@@ -1434,10 +1862,9 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
.fid_size = sizeof(struct mlxsw_sp_fid_8021q),
.start_index = MLXSW_SP_FID_8021Q_START,
.end_index = MLXSW_SP_FID_8021Q_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
.rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_ops,
+ .ops = &mlxsw_sp_fid_8021q_ops_ctl,
.flood_rsp = false,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
.smpe_index_valid = false,
@@ -1448,10 +1875,9 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
.fid_size = sizeof(struct mlxsw_sp_fid_8021d),
.start_index = MLXSW_SP_FID_8021D_START,
.end_index = MLXSW_SP_FID_8021D_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
.rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
+ .ops = &mlxsw_sp_fid_8021d_ops_ctl,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
.smpe_index_valid = false,
};
@@ -1465,47 +1891,45 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
.smpe_index_valid = false,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family_ctl = {
.type = MLXSW_SP_FID_TYPE_RFID,
.fid_size = sizeof(struct mlxsw_sp_fid),
.start_index = MLXSW_SP_RFID_START,
.end_index = MLXSW_SP_RFID_END,
.rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
- .ops = &mlxsw_sp_fid_rfid_ops,
+ .ops = &mlxsw_sp_fid_rfid_ops_ctl,
.flood_rsp = true,
.smpe_index_valid = false,
};
-const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
+static const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
[MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
- [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family_ctl,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family_ctl = {
.type = MLXSW_SP_FID_TYPE_8021Q,
.fid_size = sizeof(struct mlxsw_sp_fid_8021q),
.start_index = MLXSW_SP_FID_8021Q_START,
.end_index = MLXSW_SP_FID_8021Q_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
.rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_ops,
+ .ops = &mlxsw_sp_fid_8021q_ops_ctl,
.flood_rsp = false,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
.smpe_index_valid = true,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family_ctl = {
.type = MLXSW_SP_FID_TYPE_8021D,
.fid_size = sizeof(struct mlxsw_sp_fid_8021d),
.start_index = MLXSW_SP_FID_8021D_START,
.end_index = MLXSW_SP_FID_8021D_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
.rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
+ .ops = &mlxsw_sp_fid_8021d_ops_ctl,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
.smpe_index_valid = true,
};
@@ -1519,11 +1943,51 @@ static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
.smpe_index_valid = false,
};
-const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
- [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
+static const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr_ctl[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family_ctl,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family_ctl,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
- [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family_ctl,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family_cff = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops_cff,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family_cff = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_profile = &mlxsw_sp_fid_8021d_flood_profile,
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops_cff,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family_cff = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_START,
+ .end_index = MLXSW_SP_RFID_END,
+ .flood_profile = &mlxsw_sp_fid_rsp_flood_profile_cff,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops_cff,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr_cff[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family_cff,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family_cff,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family_cff,
};
static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
@@ -1571,7 +2035,9 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid->fid_index = fid_index;
__set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
- fid->fid_family->ops->setup(fid, arg);
+ err = fid->fid_family->ops->setup(fid, arg);
+ if (err)
+ goto err_setup;
err = fid->fid_family->ops->configure(fid);
if (err)
@@ -1589,6 +2055,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
err_rhashtable_insert:
fid->fid_family->ops->deconfigure(fid);
err_configure:
+err_setup:
__clear_bit(fid_index - fid_family->start_index,
fid_family->fids_bitmap);
err_index_alloc:
@@ -1650,36 +2117,6 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp)
}
static int
-mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
- const struct mlxsw_sp_flood_table *flood_table)
-{
- enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
- struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- const int *sfgc_packet_types;
- u16 mid_base;
- int err, i;
-
- mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
-
- sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
- for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
- char sfgc_pl[MLXSW_REG_SFGC_LEN];
-
- if (!sfgc_packet_types[i])
- continue;
-
- mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
- flood_table->table_type, 0, mid_base);
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int
mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
{
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
@@ -1687,22 +2124,28 @@ mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
int err;
int i;
- if (!fid_family->nr_flood_tables)
- return 0;
+ err = fid_family->ops->pgt_size(fid_family, &pgt_size);
+ if (err)
+ return err;
- pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family);
err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &fid_family->pgt_base,
pgt_size);
if (err)
return err;
- for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ if (!fid_family->flood_profile)
+ return 0;
+
+ for (i = 0; i < fid_family->flood_profile->nr_flood_tables; i++) {
const struct mlxsw_sp_flood_table *flood_table;
- flood_table = &fid_family->flood_tables[i];
- err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table);
- if (err)
- goto err_flood_table_init;
+ flood_table = &fid_family->flood_profile->flood_tables[i];
+ if (fid_family->ops->flood_table_init) {
+ err = fid_family->ops->flood_table_init(fid_family,
+ flood_table);
+ if (err)
+ goto err_flood_table_init;
+ }
}
return 0;
@@ -1717,11 +2160,12 @@ mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
{
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
u16 pgt_size;
+ int err;
- if (!fid_family->nr_flood_tables)
+ err = fid_family->ops->pgt_size(fid_family, &pgt_size);
+ if (WARN_ON_ONCE(err))
return;
- pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family);
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, fid_family->pgt_base, pgt_size);
}
@@ -1744,7 +2188,7 @@ static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
goto err_alloc_fids_bitmap;
}
- if (fid_family->flood_tables) {
+ if (fid_family->flood_profile) {
err = mlxsw_sp_fid_flood_tables_init(fid_family);
if (err)
goto err_fid_flood_tables_init;
@@ -1767,7 +2211,7 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
- if (fid_family->flood_tables)
+ if (fid_family->flood_profile)
mlxsw_sp_fid_flood_tables_fini(fid_family);
bitmap_free(fid_family->fids_bitmap);
@@ -1775,9 +2219,34 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
kfree(fid_family);
}
+static int mlxsw_sp_fid_port_init(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ const enum mlxsw_sp_fid_type type_rfid = MLXSW_SP_FID_TYPE_RFID;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_fid_family *rfid_family;
+
+ rfid_family = mlxsw_sp->fid_core->fid_family_arr[type_rfid];
+ if (rfid_family->ops->fid_port_init)
+ return rfid_family->ops->fid_port_init(rfid_family,
+ mlxsw_sp_port);
+ return 0;
+}
+
+static void mlxsw_sp_fid_port_fini(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ const enum mlxsw_sp_fid_type type_rfid = MLXSW_SP_FID_TYPE_RFID;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_fid_family *rfid_family;
+
+ rfid_family = mlxsw_sp->fid_core->fid_family_arr[type_rfid];
+ if (rfid_family->ops->fid_port_fini)
+ rfid_family->ops->fid_port_fini(rfid_family, mlxsw_sp_port);
+}
+
int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err;
/* Track number of FIDs configured on the port with mapping type
* PORT_VID_TO_FID, so that we know when to transition the port
@@ -1785,17 +2254,42 @@ int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port)
*/
mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
- return mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ err = mlxsw_sp_fid_port_init(mlxsw_sp_port);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+ if (err)
+ goto err_vp_mode_set;
+
+ return 0;
+
+err_vp_mode_set:
+ mlxsw_sp_fid_port_fini(mlxsw_sp_port);
+ return err;
}
void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ mlxsw_sp_fid_port_fini(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[mlxsw_sp_port->local_port] = 0;
}
-int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
+int mlxsw_sp_fid_port_join_lag(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_fid_port_init(mlxsw_sp_port);
+}
+
+void mlxsw_sp_fid_port_leave_lag(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_fid_port_fini(mlxsw_sp_port);
+}
+
+static int
+mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid_family *fid_family_arr[])
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
struct mlxsw_sp_fid_core *fid_core;
@@ -1822,8 +2316,7 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
}
for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
- err = mlxsw_sp_fid_family_register(mlxsw_sp,
- mlxsw_sp->fid_family_arr[i]);
+ err = mlxsw_sp_fid_family_register(mlxsw_sp, fid_family_arr[i]);
if (err)
goto err_fid_ops_register;
@@ -1848,7 +2341,7 @@ err_rhashtable_fid_init:
return err;
}
-void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_fid_core *fid_core = mlxsw_sp->fid_core;
int i;
@@ -1861,3 +2354,143 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&fid_core->fid_ht);
kfree(fid_core);
}
+
+static int mlxsw_sp1_fids_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fids_init(mlxsw_sp, mlxsw_sp1_fid_family_arr);
+}
+
+const struct mlxsw_sp_fid_core_ops mlxsw_sp1_fid_core_ops = {
+ .init = mlxsw_sp1_fids_init,
+ .fini = mlxsw_sp_fids_fini,
+};
+
+static int mlxsw_sp_fid_check_flood_profile_id(struct mlxsw_sp *mlxsw_sp,
+ int profile_id)
+{
+ u32 max_profiles;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_FLOOD_PRF))
+ return -EIO;
+
+ max_profiles = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_FLOOD_PRF);
+ if (WARN_ON_ONCE(!profile_id) ||
+ WARN_ON_ONCE(profile_id >= max_profiles))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+mlxsw_sp2_fids_init_flood_table(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_fid_flood_profile_id profile_id,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ const int *sfgc_packet_types;
+ int err;
+ int i;
+
+ sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
+ for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
+ char sffp_pl[MLXSW_REG_SFFP_LEN];
+
+ if (!sfgc_packet_types[i])
+ continue;
+
+ mlxsw_reg_sffp_pack(sffp_pl, profile_id, i,
+ flood_table->table_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sffp), sffp_pl);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_sp2_fids_init_flood_profile(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid_flood_profile *
+ flood_profile)
+{
+ int err;
+ int i;
+
+ err = mlxsw_sp_fid_check_flood_profile_id(mlxsw_sp,
+ flood_profile->profile_id);
+ if (err)
+ return err;
+
+ for (i = 0; i < flood_profile->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &flood_profile->flood_tables[i];
+ err = mlxsw_sp2_fids_init_flood_table(mlxsw_sp,
+ flood_profile->profile_id,
+ flood_table);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const
+struct mlxsw_sp_fid_flood_profile *mlxsw_sp_fid_flood_profiles[] = {
+ &mlxsw_sp_fid_8021d_flood_profile,
+ &mlxsw_sp_fid_rsp_flood_profile_cff,
+ &mlxsw_sp_fid_nve_flood_profile_cff,
+};
+
+static int
+mlxsw_sp2_fids_init_flood_profiles(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_fid_flood_profiles); i++) {
+ const struct mlxsw_sp_fid_flood_profile *flood_profile;
+
+ flood_profile = mlxsw_sp_fid_flood_profiles[i];
+ err = mlxsw_sp2_fids_init_flood_profile(mlxsw_sp,
+ flood_profile);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp2_fids_init_ctl(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_fids_init(mlxsw_sp, mlxsw_sp2_fid_family_arr_ctl);
+}
+
+static int mlxsw_sp2_fids_init_cff(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = mlxsw_sp2_fids_init_flood_profiles(mlxsw_sp);
+ if (err)
+ return err;
+
+ return mlxsw_sp_fids_init(mlxsw_sp, mlxsw_sp2_fid_family_arr_cff);
+}
+
+static int mlxsw_sp2_fids_init(struct mlxsw_sp *mlxsw_sp)
+{
+ switch (mlxsw_core_flood_mode(mlxsw_sp->core)) {
+ case MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED:
+ return mlxsw_sp2_fids_init_ctl(mlxsw_sp);
+ case MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF:
+ return mlxsw_sp2_fids_init_cff(mlxsw_sp);
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+}
+
+const struct mlxsw_sp_fid_core_ops mlxsw_sp2_fid_core_ops = {
+ .init = mlxsw_sp2_fids_init,
+ .fini = mlxsw_sp_fids_fini,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 39e6d941f..87617df69 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -501,7 +501,7 @@ struct mlxsw_sp_rt6 {
struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
- unsigned int ref_count;
+ refcount_t ref_count;
enum mlxsw_sp_l3proto proto;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
@@ -578,7 +578,7 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count == 0)
+ if (refcount_read(&lpm_tree->ref_count) == 0)
return lpm_tree;
}
return NULL;
@@ -654,7 +654,7 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
sizeof(lpm_tree->prefix_usage));
memset(&lpm_tree->prefix_ref_count, 0,
sizeof(lpm_tree->prefix_ref_count));
- lpm_tree->ref_count = 1;
+ refcount_set(&lpm_tree->ref_count, 1);
return lpm_tree;
err_left_struct_set:
@@ -678,7 +678,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count != 0 &&
+ if (refcount_read(&lpm_tree->ref_count) &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage)) {
@@ -691,14 +691,15 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
{
- lpm_tree->ref_count++;
+ refcount_inc(&lpm_tree->ref_count);
}
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ if (!refcount_dec_and_test(&lpm_tree->ref_count))
+ return;
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -8419,6 +8420,9 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->ops = ops;
rif->rif_entries = rif_entries;
+ if (ops->setup)
+ ops->setup(rif, params);
+
if (ops->fid_get) {
fid = ops->fid_get(rif, params, extack);
if (IS_ERR(fid)) {
@@ -8428,9 +8432,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
rif->fid = fid;
}
- if (ops->setup)
- ops->setup(rif, params);
-
err = ops->configure(rif, extack);
if (err)
goto err_configure;
@@ -8660,6 +8661,20 @@ mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
return container_of(rif, struct mlxsw_sp_rif_subport, common);
}
+int mlxsw_sp_rif_subport_port(const struct mlxsw_sp_rif *rif,
+ u16 *port, bool *is_lag)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ if (WARN_ON(rif->ops->type != MLXSW_SP_RIF_TYPE_SUBPORT))
+ return -EINVAL;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ *is_lag = rif_subport->lag;
+ *port = *is_lag ? rif_subport->lag_id : rif_subport->system_port;
+ return 0;
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6c749c148..6397ff0dc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -61,7 +61,7 @@ struct mlxsw_sp_bridge_port {
struct mlxsw_sp_bridge_device *bridge_device;
struct list_head list;
struct list_head vlans_list;
- unsigned int ref_count;
+ refcount_t ref_count;
u8 stp_state;
unsigned long flags;
bool mrouter;
@@ -495,7 +495,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
- bridge_port->ref_count = 1;
+ refcount_set(&bridge_port->ref_count, 1);
err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
NULL, NULL, NULL, false, extack);
@@ -531,7 +531,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
if (bridge_port) {
- bridge_port->ref_count++;
+ refcount_inc(&bridge_port->ref_count);
return bridge_port;
}
@@ -558,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
- if (--bridge_port->ref_count != 0)
+ if (!refcount_dec_and_test(&bridge_port->ref_count))
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e5ec0a363..31f75b4a6 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -368,7 +368,6 @@ union ks8851_tx_hdr {
* @rdfifo: FIFO read callback
* @wrfifo: FIFO write callback
* @start_xmit: start_xmit() implementation callback
- * @rx_skb: rx_skb() implementation callback
* @flush_tx_work: flush_tx_work() implementation callback
*
* The @statelock is used to protect information in the structure which may
@@ -423,8 +422,6 @@ struct ks8851_net {
struct sk_buff *txp, bool irq);
netdev_tx_t (*start_xmit)(struct sk_buff *skb,
struct net_device *dev);
- void (*rx_skb)(struct ks8851_net *ks,
- struct sk_buff *skb);
void (*flush_tx_work)(struct ks8851_net *ks);
};
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 0bf13b38b..d4cdf3d4f 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -232,16 +232,6 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
}
/**
- * ks8851_rx_skb - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
-{
- ks->rx_skb(ks, skb);
-}
-
-/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
*
@@ -309,7 +299,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- ks8851_rx_skb(ks, skb);
+ __netif_rx(skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -340,6 +330,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
unsigned long flags;
unsigned int status;
+ local_bh_disable();
+
ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
@@ -416,6 +408,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
+ local_bh_enable();
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2a7f29854..381b9cd28 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
}
-/**
- * ks8851_rx_skb_par - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
{
return ks8851_rdreg16_par(ks, KS_TXQCR);
@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
ks->rdfifo = ks8851_rdfifo_par;
ks->wrfifo = ks8851_wrfifo_par;
ks->start_xmit = ks8851_start_xmit_par;
- ks->rx_skb = ks8851_rx_skb_par;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
IRQ_RXI | /* RX done */ \
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 88e26c120..55f6f9f6d 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -156,7 +156,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
txb[0] = cpu_to_le16(op | KS_SPIOP_RD);
- if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX) {
msg = &kss->spi_msg2;
xfer = kss->spi_xfer2;
@@ -180,7 +180,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned int op,
ret = spi_sync(kss->spidev, msg);
if (ret < 0)
netdev_err(ks->netdev, "read: spi_sync() failed\n");
- else if (kss->spidev->master->flags & SPI_MASTER_HALF_DUPLEX)
+ else if (kss->spidev->master->flags & SPI_CONTROLLER_HALF_DUPLEX)
memcpy(rxb, trx, rxl);
else
memcpy(rxb, trx + 2, rxl);
@@ -299,16 +299,6 @@ static unsigned int calc_txlen(unsigned int len)
}
/**
- * ks8851_rx_skb_spi - receive skbuff
- * @ks: The device state
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
-/**
* ks8851_tx_work - process tx packet(s)
* @work: The work strucutre what was scheduled.
*
@@ -435,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
ks->rdfifo = ks8851_rdfifo_spi;
ks->wrfifo = ks8851_wrfifo_spi;
ks->start_xmit = ks8851_start_xmit_spi;
- ks->rx_skb = ks8851_rx_skb_spi;
ks->flush_tx_work = ks8851_flush_tx_work_spi;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 6961cfc55..a2b3f4433 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -934,11 +934,11 @@ static u32 lan743x_ethtool_get_rxfh_indir_size(struct net_device *netdev)
}
static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
- u32 *indir, u8 *key, u8 *hfunc)
+ struct ethtool_rxfh_param *rxfh)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- if (indir) {
+ if (rxfh->indir) {
int dw_index;
int byte_index = 0;
@@ -947,17 +947,17 @@ static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
lan743x_csr_read(adapter, RFE_INDX(dw_index));
byte_index = dw_index << 2;
- indir[byte_index + 0] =
+ rxfh->indir[byte_index + 0] =
((four_entries >> 0) & 0x000000FF);
- indir[byte_index + 1] =
+ rxfh->indir[byte_index + 1] =
((four_entries >> 8) & 0x000000FF);
- indir[byte_index + 2] =
+ rxfh->indir[byte_index + 2] =
((four_entries >> 16) & 0x000000FF);
- indir[byte_index + 3] =
+ rxfh->indir[byte_index + 3] =
((four_entries >> 24) & 0x000000FF);
}
}
- if (key) {
+ if (rxfh->key) {
int dword_index;
int byte_index = 0;
@@ -967,28 +967,30 @@ static int lan743x_ethtool_get_rxfh(struct net_device *netdev,
RFE_HASH_KEY(dword_index));
byte_index = dword_index << 2;
- key[byte_index + 0] =
+ rxfh->key[byte_index + 0] =
((four_entries >> 0) & 0x000000FF);
- key[byte_index + 1] =
+ rxfh->key[byte_index + 1] =
((four_entries >> 8) & 0x000000FF);
- key[byte_index + 2] =
+ rxfh->key[byte_index + 2] =
((four_entries >> 16) & 0x000000FF);
- key[byte_index + 3] =
+ rxfh->key[byte_index + 3] =
((four_entries >> 24) & 0x000000FF);
}
}
- if (hfunc)
- (*hfunc) = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
- const u32 *indir, const u8 *key,
- const u8 hfunc)
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 *indir = rxfh->indir;
+ u8 *key = rxfh->key;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
@@ -1075,7 +1077,6 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
buf = lan743x_csr_read(adapter, MAC_CR);
if (buf & MAC_CR_EEE_EN_) {
eee->eee_enabled = true;
- eee->eee_active = !!(eee->advertised & eee->lp_advertised);
eee->tx_lpi_enabled = true;
/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 45e209a7d..6bf336740 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -25,6 +25,8 @@
#define PCS_POWER_STATE_DOWN 0x6
#define PCS_POWER_STATE_UP 0x4
+#define RFE_RD_FIFO_TH_3_DWORDS 0x3
+
static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
{
u32 chip_rev;
@@ -3272,6 +3274,21 @@ static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
lan743x_pci_cleanup(adapter);
}
+static void pci11x1x_set_rfe_rd_fifo_threshold(struct lan743x_adapter *adapter)
+{
+ u16 rev = adapter->csr.id_rev & ID_REV_CHIP_REV_MASK_;
+
+ if (rev == ID_REV_CHIP_REV_PCI11X1X_B0_) {
+ u32 misc_ctl;
+
+ misc_ctl = lan743x_csr_read(adapter, MISC_CTL_0);
+ misc_ctl &= ~MISC_CTL_0_RFE_READ_FIFO_MASK_;
+ misc_ctl |= FIELD_PREP(MISC_CTL_0_RFE_READ_FIFO_MASK_,
+ RFE_RD_FIFO_TH_3_DWORDS);
+ lan743x_csr_write(adapter, MISC_CTL_0, misc_ctl);
+ }
+}
+
static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
@@ -3287,6 +3304,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
pci11x1x_strap_get_status(adapter);
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
+ pci11x1x_set_rfe_rd_fifo_threshold(adapter);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index b64846178..645bc048e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -26,6 +26,7 @@
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_REV_A0_ (0x00000000)
#define ID_REV_CHIP_REV_B0_ (0x00000010)
+#define ID_REV_CHIP_REV_PCI11X1X_B0_ (0x000000B0)
#define FPGA_REV (0x04)
#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
@@ -311,6 +312,9 @@
#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+#define MISC_CTL_0 (0x920)
+#define MISC_CTL_0_RFE_READ_FIFO_MASK_ GENMASK(6, 4)
+
/* Vendor Specific SGMII MMD details */
#define SR_VSMMD_PCS_ID1 0x0004
#define SR_VSMMD_PCS_ID2 0x0005
@@ -1075,7 +1079,7 @@ struct lan743x_adapter {
#define DMA_DESCRIPTOR_SPACING_32 (32)
#define DMA_DESCRIPTOR_SPACING_64 (64)
#define DMA_DESCRIPTOR_SPACING_128 (128)
-#define DEFAULT_DMA_DESCRIPTOR_SPACING (L1_CACHE_BYTES)
+#define DEFAULT_DMA_DESCRIPTOR_SPACING (DMA_DESCRIPTOR_SPACING_16)
#define DMAC_CHANNEL_STATE_SET(start_bit, stop_bit) \
(((start_bit) ? 2 : 0) | ((stop_bit) ? 1 : 0))
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 37d2584b4..a06dc5a9b 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1012,7 +1012,7 @@ static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data)
return;
for (idx = 0; idx < sparx5->num_ethtool_stats; idx++)
- ethtool_sprintf(&data, "%s", sparx5->stats_layout[idx]);
+ ethtool_puts(&data, sparx5->stats_layout[idx]);
}
static void sparx5_get_sset_data(struct net_device *ndev,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 3a1b1a1f5..60dd2fd60 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -731,7 +731,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
bool sgmii = false, inband_aneg = false;
int err;
- if (port->conf.inband) {
+ if (conf->inband) {
if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
conf->portmode == PHY_INTERFACE_MODE_QSGMII)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
@@ -948,7 +948,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
if (err)
return -EINVAL;
- if (port->conf.inband) {
+ if (conf->inband) {
/* Enable/disable 1G counters in ASM */
spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
ASM_PORT_CFG_CSC_STAT_DIS,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 523e0c470..55f255a3c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -36,6 +36,27 @@ struct sparx5_tc_flower_template {
u16 l3_proto; /* protocol specified in the template */
};
+/* SparX-5 VCAP fragment types:
+ * 0 = no fragment, 1 = initial fragment,
+ * 2 = suspicious fragment, 3 = valid follow-up fragment
+ */
+enum { /* key / mask */
+ FRAG_NOT = 0x03, /* 0 / 3 */
+ FRAG_SOME = 0x11, /* 1 / 1 */
+ FRAG_FIRST = 0x13, /* 1 / 3 */
+ FRAG_LATER = 0x33, /* 3 / 3 */
+ FRAG_INVAL = 0xff, /* invalid */
+};
+
+/* Flower fragment flag to VCAP fragment type mapping */
+static const u8 sparx5_vcap_frag_map[4][4] = { /* is_frag */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_FIRST }, /* 0/0 */
+ { FRAG_NOT, FRAG_NOT, FRAG_INVAL, FRAG_INVAL }, /* 0/1 */
+ { FRAG_INVAL, FRAG_INVAL, FRAG_INVAL, FRAG_INVAL }, /* 1/0 */
+ { FRAG_SOME, FRAG_LATER, FRAG_INVAL, FRAG_FIRST } /* 1/1 */
+ /* 0/0 0/1 1/0 1/1 <-- first_frag */
+};
+
static int
sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
@@ -145,29 +166,27 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
flow_rule_match_control(st->frule, &mt);
if (mt.mask->flags) {
- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
- value = 1; /* initial fragment */
- mask = 0x3;
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
+ u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
+ u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
+
+ u8 first_frag_key = !!(mt.key->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_mask = !!(mt.mask->flags & FLOW_DIS_FIRST_FRAG);
+ u8 first_frag_idx = (first_frag_key << 1) | first_frag_mask;
+
+ /* Lookup verdict based on the 2 + 2 input bits */
+ u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
+
+ if (vdt == FRAG_INVAL) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "Match on invalid fragment flag combination");
+ return -EINVAL;
}
+ /* Extract VCAP fragment key and mask from verdict */
+ value = (vdt >> 4) & 0x3;
+ mask = vdt & 0x3;
+
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 6367de0c2..d33b27214 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -158,6 +158,9 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
if (dev_type == GDMA_DEVICE_MANA) {
gc->mana.gdma_context = gc;
gc->mana.dev_id = dev;
+ } else if (dev_type == GDMA_DEVICE_MANA_IB) {
+ gc->mana_ib.dev_id = dev;
+ gc->mana_ib.gdma_context = gc;
}
}
@@ -414,8 +417,12 @@ static void mana_gd_process_eq_events(void *arg)
old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
/* No more entries */
- if (owner_bits == old_bits)
+ if (owner_bits == old_bits) {
+ /* return here without ringing the doorbell */
+ if (i == 0)
+ return;
break;
+ }
new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
if (owner_bits != new_bits) {
@@ -445,42 +452,29 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
- struct gdma_resource *r;
unsigned int msi_index;
unsigned long flags;
struct device *dev;
int err = 0;
gc = gd->gdma_context;
- r = &gc->msix_resource;
dev = gc->dev;
+ msi_index = spec->eq.msix_index;
- spin_lock_irqsave(&r->lock, flags);
-
- msi_index = find_first_zero_bit(r->map, r->size);
- if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
+ if (msi_index >= gc->num_msix_usable) {
err = -ENOSPC;
- } else {
- bitmap_set(r->map, msi_index, 1);
- queue->eq.msix_index = msi_index;
- }
-
- spin_unlock_irqrestore(&r->lock, flags);
-
- if (err) {
- dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
- err, msi_index, r->size, gc->num_msix_usable);
+ dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
+ err, msi_index, gc->num_msix_usable);
return err;
}
+ queue->eq.msix_index = msi_index;
gic = &gc->irq_contexts[msi_index];
- WARN_ON(gic->handler || gic->arg);
-
- gic->arg = queue;
-
- gic->handler = mana_gd_process_eq_events;
+ spin_lock_irqsave(&gic->lock, flags);
+ list_add_rcu(&queue->entry, &gic->eq_list);
+ spin_unlock_irqrestore(&gic->lock, flags);
return 0;
}
@@ -490,12 +484,11 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
struct gdma_context *gc;
- struct gdma_resource *r;
unsigned int msix_index;
unsigned long flags;
+ struct gdma_queue *eq;
gc = gd->gdma_context;
- r = &gc->msix_resource;
/* At most num_online_cpus() + 1 interrupts are used. */
msix_index = queue->eq.msix_index;
@@ -503,14 +496,17 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
return;
gic = &gc->irq_contexts[msix_index];
- gic->handler = NULL;
- gic->arg = NULL;
-
- spin_lock_irqsave(&r->lock, flags);
- bitmap_clear(r->map, msix_index, 1);
- spin_unlock_irqrestore(&r->lock, flags);
+ spin_lock_irqsave(&gic->lock, flags);
+ list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
+ if (queue == eq) {
+ list_del_rcu(&eq->entry);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gic->lock, flags);
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+ synchronize_rcu();
}
int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
@@ -588,6 +584,7 @@ static int mana_gd_create_eq(struct gdma_dev *gd,
int err;
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
+ queue->id = INVALID_QUEUE_ID;
log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
@@ -819,6 +816,7 @@ free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, NET_MANA);
int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
const struct gdma_queue_spec *spec,
@@ -895,6 +893,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_free_memory(gmi);
kfree(queue);
}
+EXPORT_SYMBOL_NS(mana_gd_destroy_queue, NET_MANA);
int mana_gd_verify_vf_version(struct pci_dev *pdev)
{
@@ -971,6 +970,7 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_register_device, NET_MANA);
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -1001,6 +1001,7 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_deregister_device, NET_MANA);
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1217,9 +1218,14 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
struct gdma_irq_context *gic = arg;
+ struct list_head *eq_list = &gic->eq_list;
+ struct gdma_queue *eq;
- if (gic->handler)
- gic->handler(gic->arg);
+ rcu_read_lock();
+ list_for_each_entry_rcu(eq, eq_list, entry) {
+ gic->handler(eq);
+ }
+ rcu_read_unlock();
return IRQ_HANDLED;
}
@@ -1271,8 +1277,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
for (i = 0; i < nvec; i++) {
gic = &gc->irq_contexts[i];
- gic->handler = NULL;
- gic->arg = NULL;
+ gic->handler = mana_gd_process_eq_events;
+ INIT_LIST_HEAD(&gic->eq_list);
+ spin_lock_init(&gic->lock);
if (!i)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
@@ -1295,10 +1302,6 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
- err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
- if (err)
- goto free_irq;
-
gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;
@@ -1329,8 +1332,6 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (gc->max_num_msix < 1)
return;
- mana_gd_free_res_map(&gc->msix_resource);
-
for (i = 0; i < gc->max_num_msix; i++) {
irq = pci_irq_vector(pdev, i);
if (irq < 0)
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 9d1cd3bfc..2729a2c5a 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -300,6 +300,7 @@ static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
spec.eq.context = ctx;
spec.eq.callback = cb;
spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
+ spec.eq.msix_index = 0;
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index fc3d2903a..d8af5e7e1 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -601,7 +601,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
- *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
+ *datasize = mtu + ETH_HLEN;
}
static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
@@ -1244,6 +1244,7 @@ static int mana_create_eq(struct mana_context *ac)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
for (i = 0; i < gc->max_num_queues; i++) {
+ spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
if (err)
goto out;
@@ -2137,6 +2138,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
pprm.pool_size = RX_BUFFERS_PER_QUEUE;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
+ pprm.netdev = rxq->ndev;
rxq->page_pool = page_pool_create(&pprm);
@@ -2385,13 +2387,33 @@ void mana_query_gf_stats(struct mana_port_context *apc)
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
sizeof(req), sizeof(resp));
- req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES |
+ req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
+ STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
+ STATISTICS_FLAGS_HC_RX_BYTES |
+ STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
+ STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
+ STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
+ STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
+ STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
+ STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
+ STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
+ STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
+ STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
+ STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
+ STATISTICS_FLAGS_HC_TX_BYTES |
STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
- STATISTICS_FLAGS_HC_TX_BCAST_BYTES;
+ STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
+ STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
@@ -2407,6 +2429,30 @@ void mana_query_gf_stats(struct mana_port_context *apc)
return;
}
+ apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
+ apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
+ apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
+ apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
+ apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
+ apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
+ apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
+ apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
+ apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
+ apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
+ apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
+ apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
+ resp.tx_err_inval_vport_offset_pkt;
+ apc->eth_stats.hc_tx_err_vlan_enforcement =
+ resp.tx_err_vlan_enforcement;
+ apc->eth_stats.hc_tx_err_eth_type_enforcement =
+ resp.tx_err_ethtype_enforcement;
+ apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
+ apc->eth_stats.hc_tx_err_sqpdid_enforcement =
+ resp.tx_err_SQPDID_enforcement;
+ apc->eth_stats.hc_tx_err_cqpdid_enforcement =
+ resp.tx_err_CQPDID_enforcement;
+ apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
+ apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
@@ -2414,6 +2460,7 @@ void mana_query_gf_stats(struct mana_port_context *apc)
apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+ apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
}
static int mana_init_port(struct net_device *ndev)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 607150165..ab2413d71 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -13,6 +13,46 @@ static const struct {
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+ {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
+ hc_rx_discards_no_wqe)},
+ {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ hc_rx_err_vport_disabled)},
+ {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
+ {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_ucast_pkts)},
+ {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_ucast_bytes)},
+ {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_bcast_pkts)},
+ {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_bcast_bytes)},
+ {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_rx_mcast_pkts)},
+ {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_rx_mcast_bytes)},
+ {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_gf_disabled)},
+ {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_vport_disabled)},
+ {"hc_tx_err_inval_vportoffset_pkt",
+ offsetof(struct mana_ethtool_stats,
+ hc_tx_err_inval_vportoffset_pkt)},
+ {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_vlan_enforcement)},
+ {"hc_tx_err_eth_type_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
+ {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_sa_enforcement)},
+ {"hc_tx_err_sqpdid_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
+ {"hc_tx_err_cqpdid_enforcement",
+ offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
+ {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_mtu_violation)},
+ {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_inval_oob)},
+ {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
+ hc_tx_err_gdma)},
{"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
{"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
hc_tx_ucast_pkts)},
@@ -208,28 +248,28 @@ static u32 mana_rss_indir_size(struct net_device *ndev)
return MANA_INDIRECT_TABLE_SIZE;
}
-static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int mana_get_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh)
{
struct mana_port_context *apc = netdev_priv(ndev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- indir[i] = apc->indir_table[i];
+ rxfh->indir[i] = apc->indir_table[i];
}
- if (key)
- memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE);
+ if (rxfh->key)
+ memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
return 0;
}
-static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int mana_set_rxfh(struct net_device *ndev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mana_port_context *apc = netdev_priv(ndev);
bool update_hash = false, update_table = false;
@@ -240,25 +280,26 @@ static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
if (!apc->port_is_up)
return -EOPNOTSUPP;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- if (indir[i] >= apc->num_queues)
+ if (rxfh->indir[i] >= apc->num_queues)
return -EINVAL;
update_table = true;
for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
save_table[i] = apc->indir_table[i];
- apc->indir_table[i] = indir[i];
+ apc->indir_table[i] = rxfh->indir[i];
}
}
- if (key) {
+ if (rxfh->key) {
update_hash = true;
memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
- memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE);
+ memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
}
err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 61d8bfd12..55408f16f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -414,6 +414,7 @@ static const u64 fix_mac[] = {
END_SIGN
};
+MODULE_DESCRIPTION("Neterion 10GbE driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 86db8e814..2c7bd6e80 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -76,7 +76,7 @@ struct nfp_fl_lag_group {
/* Use this ID with zero members to ack a batch config */
#define NFP_FL_LAG_SYNC_ID 0
#define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */
-#define NFP_FL_LAG_GROUP_MAX 32 /* IDs 1 to 31 are valid */
+#define NFP_FL_LAG_GROUP_MAX 31 /* IDs 1 to 31 are valid */
/* wait for more config */
#define NFP_FL_LAG_DELAY (msecs_to_jiffies(2))
@@ -111,8 +111,8 @@ nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
- id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
- NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
+ id = ida_alloc_range(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
+ NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
if (id < 0) {
nfp_flower_cmsg_warn(priv->app,
"No more bonding groups available\n");
@@ -121,7 +121,7 @@ nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
group = kmalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
- ida_simple_remove(&lag->ida_handle, id);
+ ida_free(&lag->ida_handle, id);
return ERR_PTR(-ENOMEM);
}
@@ -328,8 +328,7 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
}
if (entry->to_destroy) {
- ida_simple_remove(&lag->ida_handle,
- entry->group_id);
+ ida_free(&lag->ida_handle, entry->group_id);
list_del(&entry->list);
kfree(entry);
}
@@ -420,7 +419,7 @@ nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
struct nfp_flower_cmsg_lag_config *cmsg_payload;
cmsg_payload = nfp_flower_cmsg_get_data(skb);
- if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
+ if (be32_to_cpu(cmsg_payload->group_id) > NFP_FL_LAG_GROUP_MAX)
return -EINVAL;
/* Drop cmsg retrans if storage limit is exceeded to prevent
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 17381bfc1..d215efc6c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -74,7 +74,7 @@ static void
nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb, u32 md_bytes)
{
- u32 l3_offset, l4_offset, hdrlen;
+ u32 l3_offset, l4_offset, hdrlen, l4_hdrlen;
u16 mss;
if (!skb_is_gso(skb))
@@ -83,13 +83,16 @@ nfp_nfd3_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfd3_tx_buf *txbuf,
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_tcp_all_headers(skb);
+ l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ sizeof(struct udphdr) : tcp_hdrlen(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_tcp_all_headers(skb);
+ l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ sizeof(struct udphdr) : inner_tcp_hdrlen(skb);
}
+ hdrlen = l4_offset + l4_hdrlen;
txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index 8d78c6fae..dae5af7d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -40,20 +40,23 @@ static __le64
nfp_nfdk_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_nfdk_tx_buf *txbuf,
struct sk_buff *skb)
{
- u32 segs, hdrlen, l3_offset, l4_offset;
+ u32 segs, hdrlen, l3_offset, l4_offset, l4_hdrlen;
struct nfp_nfdk_tx_desc txd;
u16 mss;
if (!skb->encapsulation) {
l3_offset = skb_network_offset(skb);
l4_offset = skb_transport_offset(skb);
- hdrlen = skb_tcp_all_headers(skb);
+ l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ sizeof(struct udphdr) : tcp_hdrlen(skb);
} else {
l3_offset = skb_inner_network_offset(skb);
l4_offset = skb_inner_transport_offset(skb);
- hdrlen = skb_inner_tcp_all_headers(skb);
+ l4_hdrlen = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
+ sizeof(struct udphdr) : inner_tcp_hdrlen(skb);
}
+ hdrlen = l4_offset + l4_hdrlen;
segs = skb_shinfo(skb)->gso_segs;
mss = skb_shinfo(skb)->gso_size & NFDK_DESC_TX_MSS_MASK;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 8c6954c58..635d33c0d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -75,8 +75,10 @@ nfp_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
if (ret)
return ret;
- if (eth_port.port_lanes % count)
+ if (eth_port.port_lanes % count) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid count");
return -EINVAL;
+ }
/* Special case the 100G CXP -> 2x40G split */
lanes = eth_port.port_lanes / count;
@@ -101,8 +103,10 @@ nfp_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
if (ret)
return ret;
- if (!eth_port.is_split)
+ if (!eth_port.is_split) {
+ NL_SET_ERR_MSG_MOD(extack, "port is not split");
return -EINVAL;
+ }
/* Special case the 100G CXP -> 2x40G unsplit */
lanes = eth_port.port_lanes;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 939cfce15..46764aecc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -621,6 +621,9 @@ struct nfp_net_dp {
* @mbox_amsg.lock: Protect message list
* @mbox_amsg.list: List of message to process
* @mbox_amsg.work: Work to process message asynchronously
+ * @fs: Flow steering
+ * @fs.count: Flow count
+ * @fs.list: List of flows
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -728,9 +731,39 @@ struct nfp_net {
struct work_struct work;
} mbox_amsg;
+ struct {
+ u16 count;
+ struct list_head list;
+ } fs;
+
void *app_priv;
};
+struct nfp_fs_entry {
+ struct list_head node;
+ u32 flow_type;
+ u32 loc;
+ struct {
+ union {
+ struct {
+ __be32 sip4;
+ __be32 dip4;
+ };
+ struct {
+ __be32 sip6[4];
+ __be32 dip6[4];
+ };
+ };
+ union {
+ __be16 l3_proto;
+ u8 l4_proto;
+ };
+ __be16 sport;
+ __be16 dport;
+ } key, msk;
+ u64 action;
+};
+
struct nfp_mbox_amsg_entry {
struct list_head list;
int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
@@ -933,9 +966,9 @@ static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
netdev->netdev_ops == &nfp_nfdk_netdev_ops;
}
-static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
+static inline int nfp_net_coalesce_para_check(u32 param)
{
- if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1)))
+ if (param >= ((1 << 16) - 1))
return -EINVAL;
return 0;
@@ -987,6 +1020,9 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
struct netlink_ext_ack *extack);
+int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry);
+int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry);
+
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f2085340a..f28e769e6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1176,7 +1176,8 @@ static void nfp_net_rx_dim_work(struct work_struct *work)
* count.
*/
factor = nn->tlv_caps.me_freq_mhz / 16;
- if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
+ if (nfp_net_coalesce_para_check(factor * moder.usec) ||
+ nfp_net_coalesce_para_check(moder.pkts))
return;
/* copy RX interrupt coalesce parameters */
@@ -1205,7 +1206,8 @@ static void nfp_net_tx_dim_work(struct work_struct *work)
* count.
*/
factor = nn->tlv_caps.me_freq_mhz / 16;
- if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
+ if (nfp_net_coalesce_para_check(factor * moder.usec) ||
+ nfp_net_coalesce_para_check(moder.pkts))
return;
/* copy TX interrupt coalesce parameters */
@@ -1763,6 +1765,186 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
+static void
+nfp_net_fs_fill_v4(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
+{
+ unsigned int i;
+
+ union {
+ struct {
+ __be16 loc;
+ u8 k_proto, m_proto;
+ __be32 k_sip, m_sip, k_dip, m_dip;
+ __be16 k_sport, m_sport, k_dport, m_dport;
+ };
+ __be32 val[7];
+ } v4_rule;
+
+ nn_writel(nn, *addr, op);
+ *addr += sizeof(u32);
+
+ v4_rule.loc = cpu_to_be16(entry->loc);
+ v4_rule.k_proto = entry->key.l4_proto;
+ v4_rule.m_proto = entry->msk.l4_proto;
+ v4_rule.k_sip = entry->key.sip4;
+ v4_rule.m_sip = entry->msk.sip4;
+ v4_rule.k_dip = entry->key.dip4;
+ v4_rule.m_dip = entry->msk.dip4;
+ v4_rule.k_sport = entry->key.sport;
+ v4_rule.m_sport = entry->msk.sport;
+ v4_rule.k_dport = entry->key.dport;
+ v4_rule.m_dport = entry->msk.dport;
+
+ for (i = 0; i < ARRAY_SIZE(v4_rule.val); i++, *addr += sizeof(__be32))
+ nn_writel(nn, *addr, be32_to_cpu(v4_rule.val[i]));
+}
+
+static void
+nfp_net_fs_fill_v6(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
+{
+ unsigned int i;
+
+ union {
+ struct {
+ __be16 loc;
+ u8 k_proto, m_proto;
+ __be32 k_sip[4], m_sip[4], k_dip[4], m_dip[4];
+ __be16 k_sport, m_sport, k_dport, m_dport;
+ };
+ __be32 val[19];
+ } v6_rule;
+
+ nn_writel(nn, *addr, op);
+ *addr += sizeof(u32);
+
+ v6_rule.loc = cpu_to_be16(entry->loc);
+ v6_rule.k_proto = entry->key.l4_proto;
+ v6_rule.m_proto = entry->msk.l4_proto;
+ for (i = 0; i < 4; i++) {
+ v6_rule.k_sip[i] = entry->key.sip6[i];
+ v6_rule.m_sip[i] = entry->msk.sip6[i];
+ v6_rule.k_dip[i] = entry->key.dip6[i];
+ v6_rule.m_dip[i] = entry->msk.dip6[i];
+ }
+ v6_rule.k_sport = entry->key.sport;
+ v6_rule.m_sport = entry->msk.sport;
+ v6_rule.k_dport = entry->key.dport;
+ v6_rule.m_dport = entry->msk.dport;
+
+ for (i = 0; i < ARRAY_SIZE(v6_rule.val); i++, *addr += sizeof(__be32))
+ nn_writel(nn, *addr, be32_to_cpu(v6_rule.val[i]));
+}
+
+#define NFP_FS_QUEUE_ID GENMASK(22, 16)
+#define NFP_FS_ACT GENMASK(15, 0)
+#define NFP_FS_ACT_DROP BIT(0)
+#define NFP_FS_ACT_Q BIT(1)
+static void
+nfp_net_fs_fill_act(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 addr)
+{
+ u32 action = 0; /* 0 means default passthrough */
+
+ if (entry->action == RX_CLS_FLOW_DISC)
+ action = NFP_FS_ACT_DROP;
+ else if (!(entry->flow_type & FLOW_RSS))
+ action = FIELD_PREP(NFP_FS_QUEUE_ID, entry->action) | NFP_FS_ACT_Q;
+
+ nn_writel(nn, addr, action);
+}
+
+int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
+{
+ u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
+ if (err)
+ return err;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_USER_FLOW:
+ nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V4, &addr);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V6, &addr);
+ break;
+ case ETHER_FLOW:
+ nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE);
+ addr += sizeof(u32);
+ nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
+ addr += sizeof(u32);
+ break;
+ }
+
+ nfp_net_fs_fill_act(nn, entry, addr);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
+ if (err) {
+ nn_err(nn, "Add new fs rule failed with %d\n", err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
+{
+ u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
+ if (err)
+ return err;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_USER_FLOW:
+ nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V4, &addr);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V6, &addr);
+ break;
+ case ETHER_FLOW:
+ nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE);
+ addr += sizeof(u32);
+ nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
+ addr += sizeof(u32);
+ break;
+ }
+
+ nfp_net_fs_fill_act(nn, entry, addr);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
+ if (err) {
+ nn_err(nn, "Delete fs rule failed with %d\n", err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void nfp_net_fs_clean(struct nfp_net *nn)
+{
+ struct nfp_fs_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &nn->fs.list, node) {
+ nfp_net_fs_del_hw(nn, entry);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -1934,7 +2116,10 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
if (skb_is_gso(skb)) {
u32 hdrlen;
- hdrlen = skb_inner_tcp_all_headers(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ hdrlen = skb_inner_transport_offset(skb) + sizeof(struct udphdr);
+ else
+ hdrlen = skb_inner_tcp_all_headers(skb);
/* Assume worst case scenario of having longest possible
* metadata prepend - 8B
@@ -2237,7 +2422,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2266,6 +2451,7 @@ void nfp_net_info(struct nfp_net *nn)
"RXCSUM_COMPLETE " : "",
nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER ? "MULTICAST_FILTER " : "",
+ nn->cap_w1 & NFP_NET_CFG_CTRL_USO ? "USO " : "",
nfp_app_extra_cap(nn->app, nn));
}
@@ -2514,6 +2700,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
nn->cap & NFP_NET_CFG_CTRL_LSO2) {
netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (nn->cap_w1 & NFP_NET_CFG_CTRL_USO)
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
NFP_NET_CFG_CTRL_LSO;
}
@@ -2741,6 +2929,8 @@ int nfp_net_init(struct nfp_net *nn)
INIT_LIST_HEAD(&nn->mbox_amsg.list);
INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
+ INIT_LIST_HEAD(&nn->fs.list);
+
return register_netdev(nn->dp.netdev);
err_clean_mbox:
@@ -2760,6 +2950,7 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
+ nfp_net_fs_clean(nn);
flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 3e63f6d6a..634c63c7f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -269,6 +269,8 @@
#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */
#define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */
#define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */
+#define NFP_NET_CFG_CTRL_FLOW_STEER (0x1 << 8) /* Flow steering */
+#define NFP_NET_CFG_CTRL_USO (0x1 << 16) /* UDP segmentation offload */
#define NFP_NET_CFG_CAP_WORD1 0x00a4
@@ -418,6 +420,8 @@
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9
+#define NFP_NET_CFG_MBOX_CMD_FLOW_STEER 10
+
/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
@@ -440,6 +444,18 @@
#define NFP_NET_CFG_MULTICAST_MAC_LO (NFP_NET_CFG_MULTICAST + 6)
#define NFP_NET_CFG_MULTICAST_SZ 0x0006
+/* Max size of FS rules in bytes */
+#define NFP_NET_CFG_FS_SZ 0x0054
+/* Sub commands for FS */
+enum {
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_V4,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_V4,
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_V6,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_V6,
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE,
+};
+
/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e75cbb287..fbca8d0ef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -633,7 +633,8 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
ring->tx_pending = nn->dp.txd_cnt;
}
-static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
+static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt,
+ struct netlink_ext_ack *extack)
{
struct nfp_net_dp *dp;
@@ -644,7 +645,7 @@ static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
dp->rxd_cnt = rxd_cnt;
dp->txd_cnt = txd_cnt;
- return nfp_net_ring_reconfig(nn, dp, NULL);
+ return nfp_net_ring_reconfig(nn, dp, extack);
}
static int nfp_net_set_ringparam(struct net_device *netdev,
@@ -657,7 +658,7 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
/* We don't have separate queues/rings for small/large frames. */
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
- return -EINVAL;
+ return -EOPNOTSUPP;
qc_min = nn->dev_info->min_qc_size;
qc_max = nn->dev_info->max_qc_size;
@@ -666,9 +667,15 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
rxd_cnt = roundup_pow_of_two(ring->rx_pending);
txd_cnt = roundup_pow_of_two(ring->tx_pending);
- if (rxd_cnt < qc_min || rxd_cnt > qc_max ||
- txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp)
+ if (rxd_cnt < qc_min || rxd_cnt > qc_max) {
+ NL_SET_ERR_MSG_MOD(extack, "rx parameter out of bounds");
return -EINVAL;
+ }
+
+ if (txd_cnt < qc_min / tx_dpp || txd_cnt > qc_max / tx_dpp) {
+ NL_SET_ERR_MSG_MOD(extack, "tx parameter out of bounds");
+ return -EINVAL;
+ }
if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
return 0;
@@ -676,7 +683,7 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
- return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
+ return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt, extack);
}
static int nfp_test_link(struct net_device *netdev)
@@ -800,7 +807,7 @@ static void nfp_get_self_test_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < NFP_TEST_TOTAL_NUM; i++)
if (nfp_self_test[i].is_supported(netdev))
- ethtool_sprintf(&data, nfp_self_test[i].name);
+ ethtool_puts(&data, nfp_self_test[i].name);
}
static int nfp_get_self_test_count(struct net_device *netdev)
@@ -852,24 +859,24 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
ethtool_sprintf(&data, "rvec_%u_tx_busy", i);
}
- ethtool_sprintf(&data, "hw_rx_csum_ok");
- ethtool_sprintf(&data, "hw_rx_csum_inner_ok");
- ethtool_sprintf(&data, "hw_rx_csum_complete");
- ethtool_sprintf(&data, "hw_rx_csum_err");
- ethtool_sprintf(&data, "rx_replace_buf_alloc_fail");
- ethtool_sprintf(&data, "rx_tls_decrypted_packets");
- ethtool_sprintf(&data, "hw_tx_csum");
- ethtool_sprintf(&data, "hw_tx_inner_csum");
- ethtool_sprintf(&data, "tx_gather");
- ethtool_sprintf(&data, "tx_lso");
- ethtool_sprintf(&data, "tx_tls_encrypted_packets");
- ethtool_sprintf(&data, "tx_tls_ooo");
- ethtool_sprintf(&data, "tx_tls_drop_no_sync_data");
-
- ethtool_sprintf(&data, "hw_tls_no_space");
- ethtool_sprintf(&data, "rx_tls_resync_req_ok");
- ethtool_sprintf(&data, "rx_tls_resync_req_ign");
- ethtool_sprintf(&data, "rx_tls_resync_sent");
+ ethtool_puts(&data, "hw_rx_csum_ok");
+ ethtool_puts(&data, "hw_rx_csum_inner_ok");
+ ethtool_puts(&data, "hw_rx_csum_complete");
+ ethtool_puts(&data, "hw_rx_csum_err");
+ ethtool_puts(&data, "rx_replace_buf_alloc_fail");
+ ethtool_puts(&data, "rx_tls_decrypted_packets");
+ ethtool_puts(&data, "hw_tx_csum");
+ ethtool_puts(&data, "hw_tx_inner_csum");
+ ethtool_puts(&data, "tx_gather");
+ ethtool_puts(&data, "tx_lso");
+ ethtool_puts(&data, "tx_tls_encrypted_packets");
+ ethtool_puts(&data, "tx_tls_ooo");
+ ethtool_puts(&data, "tx_tls_drop_no_sync_data");
+
+ ethtool_puts(&data, "hw_tls_no_space");
+ ethtool_puts(&data, "rx_tls_resync_req_ok");
+ ethtool_puts(&data, "rx_tls_resync_req_ign");
+ ethtool_puts(&data, "rx_tls_resync_sent");
return data;
}
@@ -943,13 +950,13 @@ nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
swap_off = repr * NN_ET_SWITCH_STATS_LEN;
for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
- ethtool_sprintf(&data, nfp_net_et_stats[i + swap_off].name);
+ ethtool_puts(&data, nfp_net_et_stats[i + swap_off].name);
for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
- ethtool_sprintf(&data, nfp_net_et_stats[i - swap_off].name);
+ ethtool_puts(&data, nfp_net_et_stats[i - swap_off].name);
for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&data, nfp_net_et_stats[i].name);
+ ethtool_puts(&data, nfp_net_et_stats[i].name);
for (i = 0; i < num_vecs; i++) {
ethtool_sprintf(&data, "rxq_%u_pkts", i);
@@ -1317,6 +1324,116 @@ static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
return 0;
}
+#define NFP_FS_MAX_ENTRY 1024
+
+static int nfp_net_fs_to_ethtool(struct nfp_fs_entry *entry, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ unsigned int i;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fs->h_u.tcp_ip4_spec.ip4src = entry->key.sip4;
+ fs->h_u.tcp_ip4_spec.ip4dst = entry->key.dip4;
+ fs->h_u.tcp_ip4_spec.psrc = entry->key.sport;
+ fs->h_u.tcp_ip4_spec.pdst = entry->key.dport;
+ fs->m_u.tcp_ip4_spec.ip4src = entry->msk.sip4;
+ fs->m_u.tcp_ip4_spec.ip4dst = entry->msk.dip4;
+ fs->m_u.tcp_ip4_spec.psrc = entry->msk.sport;
+ fs->m_u.tcp_ip4_spec.pdst = entry->msk.dport;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ for (i = 0; i < 4; i++) {
+ fs->h_u.tcp_ip6_spec.ip6src[i] = entry->key.sip6[i];
+ fs->h_u.tcp_ip6_spec.ip6dst[i] = entry->key.dip6[i];
+ fs->m_u.tcp_ip6_spec.ip6src[i] = entry->msk.sip6[i];
+ fs->m_u.tcp_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
+ }
+ fs->h_u.tcp_ip6_spec.psrc = entry->key.sport;
+ fs->h_u.tcp_ip6_spec.pdst = entry->key.dport;
+ fs->m_u.tcp_ip6_spec.psrc = entry->msk.sport;
+ fs->m_u.tcp_ip6_spec.pdst = entry->msk.dport;
+ break;
+ case IPV4_USER_FLOW:
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.ip4src = entry->key.sip4;
+ fs->h_u.usr_ip4_spec.ip4dst = entry->key.dip4;
+ fs->h_u.usr_ip4_spec.proto = entry->key.l4_proto;
+ fs->m_u.usr_ip4_spec.ip4src = entry->msk.sip4;
+ fs->m_u.usr_ip4_spec.ip4dst = entry->msk.dip4;
+ fs->m_u.usr_ip4_spec.proto = entry->msk.l4_proto;
+ break;
+ case IPV6_USER_FLOW:
+ for (i = 0; i < 4; i++) {
+ fs->h_u.usr_ip6_spec.ip6src[i] = entry->key.sip6[i];
+ fs->h_u.usr_ip6_spec.ip6dst[i] = entry->key.dip6[i];
+ fs->m_u.usr_ip6_spec.ip6src[i] = entry->msk.sip6[i];
+ fs->m_u.usr_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
+ }
+ fs->h_u.usr_ip6_spec.l4_proto = entry->key.l4_proto;
+ fs->m_u.usr_ip6_spec.l4_proto = entry->msk.l4_proto;
+ break;
+ case ETHER_FLOW:
+ fs->h_u.ether_spec.h_proto = entry->key.l3_proto;
+ fs->m_u.ether_spec.h_proto = entry->msk.l3_proto;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fs->flow_type = entry->flow_type;
+ fs->ring_cookie = entry->action;
+
+ if (fs->flow_type & FLOW_RSS) {
+ /* Only rss_context of 0 is supported. */
+ cmd->rss_context = 0;
+ /* RSS is used, mask the ring. */
+ fs->ring_cookie |= ETHTOOL_RX_FLOW_SPEC_RING;
+ }
+
+ return 0;
+}
+
+static int nfp_net_get_fs_rule(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct nfp_fs_entry *entry;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ if (cmd->fs.location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == cmd->fs.location)
+ return nfp_net_fs_to_ethtool(entry, cmd);
+
+ if (entry->loc > cmd->fs.location)
+ /* no need to continue */
+ return -ENOENT;
+ }
+
+ return -ENOENT;
+}
+
+static int nfp_net_get_fs_loc(struct nfp_net *nn, u32 *rule_locs)
+{
+ struct nfp_fs_entry *entry;
+ u32 count = 0;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(entry, &nn->fs.list, node)
+ rule_locs[count++] = entry->loc;
+
+ return 0;
+}
+
static int nfp_net_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
@@ -1326,6 +1443,14 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nn->dp.num_rx_rings;
return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = nn->fs.count;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return nfp_net_get_fs_rule(nn, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = NFP_FS_MAX_ENTRY;
+ return nfp_net_get_fs_loc(nn, rule_locs);
case ETHTOOL_GRXFH:
return nfp_net_get_rss_hash_opts(nn, cmd);
default:
@@ -1385,6 +1510,253 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
return 0;
}
+static int nfp_net_fs_from_ethtool(struct nfp_fs_entry *entry, struct ethtool_rx_flow_spec *fs)
+{
+ unsigned int i;
+
+ /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ entry->msk.sip4 = fs->m_u.tcp_ip4_spec.ip4src;
+ entry->msk.dip4 = fs->m_u.tcp_ip4_spec.ip4dst;
+ entry->msk.sport = fs->m_u.tcp_ip4_spec.psrc;
+ entry->msk.dport = fs->m_u.tcp_ip4_spec.pdst;
+ entry->key.sip4 = fs->h_u.tcp_ip4_spec.ip4src & entry->msk.sip4;
+ entry->key.dip4 = fs->h_u.tcp_ip4_spec.ip4dst & entry->msk.dip4;
+ entry->key.sport = fs->h_u.tcp_ip4_spec.psrc & entry->msk.sport;
+ entry->key.dport = fs->h_u.tcp_ip4_spec.pdst & entry->msk.dport;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ for (i = 0; i < 4; i++) {
+ entry->msk.sip6[i] = fs->m_u.tcp_ip6_spec.ip6src[i];
+ entry->msk.dip6[i] = fs->m_u.tcp_ip6_spec.ip6dst[i];
+ entry->key.sip6[i] = fs->h_u.tcp_ip6_spec.ip6src[i] & entry->msk.sip6[i];
+ entry->key.dip6[i] = fs->h_u.tcp_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
+ }
+ entry->msk.sport = fs->m_u.tcp_ip6_spec.psrc;
+ entry->msk.dport = fs->m_u.tcp_ip6_spec.pdst;
+ entry->key.sport = fs->h_u.tcp_ip6_spec.psrc & entry->msk.sport;
+ entry->key.dport = fs->h_u.tcp_ip6_spec.pdst & entry->msk.dport;
+ break;
+ case IPV4_USER_FLOW:
+ entry->msk.sip4 = fs->m_u.usr_ip4_spec.ip4src;
+ entry->msk.dip4 = fs->m_u.usr_ip4_spec.ip4dst;
+ entry->msk.l4_proto = fs->m_u.usr_ip4_spec.proto;
+ entry->key.sip4 = fs->h_u.usr_ip4_spec.ip4src & entry->msk.sip4;
+ entry->key.dip4 = fs->h_u.usr_ip4_spec.ip4dst & entry->msk.dip4;
+ entry->key.l4_proto = fs->h_u.usr_ip4_spec.proto & entry->msk.l4_proto;
+ break;
+ case IPV6_USER_FLOW:
+ for (i = 0; i < 4; i++) {
+ entry->msk.sip6[i] = fs->m_u.usr_ip6_spec.ip6src[i];
+ entry->msk.dip6[i] = fs->m_u.usr_ip6_spec.ip6dst[i];
+ entry->key.sip6[i] = fs->h_u.usr_ip6_spec.ip6src[i] & entry->msk.sip6[i];
+ entry->key.dip6[i] = fs->h_u.usr_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
+ }
+ entry->msk.l4_proto = fs->m_u.usr_ip6_spec.l4_proto;
+ entry->key.l4_proto = fs->h_u.usr_ip6_spec.l4_proto & entry->msk.l4_proto;
+ break;
+ case ETHER_FLOW:
+ entry->msk.l3_proto = fs->m_u.ether_spec.h_proto;
+ entry->key.l3_proto = fs->h_u.ether_spec.h_proto & entry->msk.l3_proto;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_TCP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_UDP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_SCTP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ }
+
+ entry->flow_type = fs->flow_type;
+ entry->action = fs->ring_cookie;
+ entry->loc = fs->location;
+
+ return 0;
+}
+
+static int nfp_net_fs_check_existing(struct nfp_net *nn, struct nfp_fs_entry *new)
+{
+ struct nfp_fs_entry *entry;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (new->loc != entry->loc &&
+ !((new->flow_type ^ entry->flow_type) & ~FLOW_RSS) &&
+ !memcmp(&new->key, &entry->key, sizeof(new->key)) &&
+ !memcmp(&new->msk, &entry->msk, sizeof(new->msk)))
+ return entry->loc;
+ }
+
+ /* -1 means no duplicates */
+ return -1;
+}
+
+static int nfp_net_fs_add(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct nfp_fs_entry *new, *entry;
+ bool unsupp_mask;
+ int err, id;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ /* Only default RSS context(0) is supported. */
+ if ((fs->flow_type & FLOW_RSS) && cmd->rss_context)
+ return -EOPNOTSUPP;
+
+ if (fs->location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= nn->dp.num_rx_rings)
+ return -EINVAL;
+
+ /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ unsupp_mask = !!fs->m_u.tcp_ip4_spec.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ unsupp_mask = !!fs->m_u.tcp_ip6_spec.tclass;
+ break;
+ case IPV4_USER_FLOW:
+ unsupp_mask = !!fs->m_u.usr_ip4_spec.l4_4_bytes ||
+ !!fs->m_u.usr_ip4_spec.tos ||
+ !!fs->m_u.usr_ip4_spec.ip_ver;
+ /* ip_ver must be ETH_RX_NFC_IP4. */
+ unsupp_mask |= fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4;
+ break;
+ case IPV6_USER_FLOW:
+ unsupp_mask = !!fs->m_u.usr_ip6_spec.l4_4_bytes ||
+ !!fs->m_u.usr_ip6_spec.tclass;
+ break;
+ case ETHER_FLOW:
+ if (fs->h_u.ether_spec.h_proto == htons(ETH_P_IP) ||
+ fs->h_u.ether_spec.h_proto == htons(ETH_P_IPV6)) {
+ nn_err(nn, "Please use ip4/ip6 flow type instead.\n");
+ return -EOPNOTSUPP;
+ }
+ /* Only unmasked ethtype is supported. */
+ unsupp_mask = !is_zero_ether_addr(fs->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fs->m_u.ether_spec.h_source) ||
+ (fs->m_u.ether_spec.h_proto != htons(0xffff));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (unsupp_mask)
+ return -EOPNOTSUPP;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ nfp_net_fs_from_ethtool(new, fs);
+
+ id = nfp_net_fs_check_existing(nn, new);
+ if (id >= 0) {
+ nn_err(nn, "Identical rule is existing in %d.\n", id);
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* Insert to list in ascending order of location. */
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == fs->location) {
+ err = nfp_net_fs_del_hw(nn, entry);
+ if (err)
+ goto err;
+
+ nn->fs.count--;
+ err = nfp_net_fs_add_hw(nn, new);
+ if (err)
+ goto err;
+
+ nn->fs.count++;
+ list_replace(&entry->node, &new->node);
+ kfree(entry);
+
+ return 0;
+ }
+
+ if (entry->loc > fs->location)
+ break;
+ }
+
+ if (nn->fs.count == NFP_FS_MAX_ENTRY) {
+ err = -ENOSPC;
+ goto err;
+ }
+
+ err = nfp_net_fs_add_hw(nn, new);
+ if (err)
+ goto err;
+
+ list_add_tail(&new->node, &entry->node);
+ nn->fs.count++;
+
+ return 0;
+
+err:
+ kfree(new);
+ return err;
+}
+
+static int nfp_net_fs_del(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct nfp_fs_entry *entry;
+ int err;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ if (!nn->fs.count || cmd->fs.location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == cmd->fs.location) {
+ err = nfp_net_fs_del_hw(nn, entry);
+ if (err)
+ return err;
+
+ list_del(&entry->node);
+ kfree(entry);
+ nn->fs.count--;
+
+ return 0;
+ } else if (entry->loc > cmd->fs.location) {
+ /* no need to continue */
+ break;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int nfp_net_set_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
@@ -1393,6 +1765,10 @@ static int nfp_net_set_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
return nfp_net_set_rss_hash_opt(nn, cmd);
+ case ETHTOOL_SRXCLSRLINS:
+ return nfp_net_fs_add(nn, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return nfp_net_fs_del(nn, cmd);
default:
return -EOPNOTSUPP;
}
@@ -1418,8 +1794,8 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
return nfp_net_rss_key_sz(nn);
}
-static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int nfp_net_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct nfp_net *nn = netdev_priv(netdev);
int i;
@@ -1427,41 +1803,41 @@ static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
return -EOPNOTSUPP;
- if (indir)
+ if (rxfh->indir)
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
- indir[i] = nn->rss_itbl[i];
- if (key)
- memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
- if (hfunc) {
- *hfunc = nn->rss_hfunc;
- if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- }
+ rxfh->indir[i] = nn->rss_itbl[i];
+ if (rxfh->key)
+ memcpy(rxfh->key, nn->rss_key, nfp_net_rss_key_sz(nn));
+
+ rxfh->hfunc = nn->rss_hfunc;
+ if (rxfh->hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
+ rxfh->hfunc = ETH_RSS_HASH_UNKNOWN;
return 0;
}
static int nfp_net_set_rxfh(struct net_device *netdev,
- const u32 *indir, const u8 *key,
- const u8 hfunc)
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
int i;
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
- !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
+ !(rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE ||
+ rxfh->hfunc == nn->rss_hfunc))
return -EOPNOTSUPP;
- if (!key && !indir)
+ if (!rxfh->key && !rxfh->indir)
return 0;
- if (key) {
- memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
+ if (rxfh->key) {
+ memcpy(nn->rss_key, rxfh->key, nfp_net_rss_key_sz(nn));
nfp_net_rss_write_key(nn);
}
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
- nn->rss_itbl[i] = indir[i];
+ nn->rss_itbl[i] = rxfh->indir[i];
nfp_net_rss_write_itbl(nn);
}
@@ -1497,7 +1873,7 @@ static int nfp_net_get_coalesce(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
- return -EINVAL;
+ return -EOPNOTSUPP;
ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on;
ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on;
@@ -1776,22 +2152,40 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
*/
if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* ensure valid configuration */
- if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
+ if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rx-usecs and rx-frames cannot both be zero");
return -EINVAL;
+ }
- if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
+ if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "tx-usecs and tx-frames cannot both be zero");
return -EINVAL;
+ }
- if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor,
- ec->rx_max_coalesced_frames))
+ if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx-usecs too large");
return -EINVAL;
+ }
- if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor,
- ec->tx_max_coalesced_frames))
+ if (nfp_net_coalesce_para_check(ec->rx_max_coalesced_frames)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx-frames too large");
return -EINVAL;
+ }
+
+ if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx-usecs too large");
+ return -EINVAL;
+ }
+
+ if (nfp_net_coalesce_para_check(ec->tx_max_coalesced_frames)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx-frames too large");
+ return -EINVAL;
+ }
/* configuration is valid */
nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce;
@@ -1866,6 +2260,30 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static int nfp_port_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg != AUTONEG_DISABLE)
+ return -EOPNOTSUPP;
+
+ err = nfp_eth_set_pauseparam(port->app->cpp, eth_port->index,
+ pause->tx_pause, pause->rx_pause);
+ if (!err)
+ /* Only refresh if we did something */
+ nfp_net_refresh_port_table(port);
+
+ return err < 0 ? err : 0;
+}
+
static void nfp_port_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -1877,10 +2295,10 @@ static void nfp_port_get_pauseparam(struct net_device *netdev,
if (!eth_port)
return;
- /* Currently pause frame support is fixed */
+ /* Currently pause frame autoneg is fixed */
pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = 1;
- pause->tx_pause = 1;
+ pause->rx_pause = eth_port->rx_pause;
+ pause->tx_pause = eth_port->tx_pause;
}
static int nfp_net_set_phys_id(struct net_device *netdev,
@@ -2106,8 +2524,10 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .set_pauseparam = nfp_port_set_pauseparam,
.get_pauseparam = nfp_port_get_pauseparam,
.set_phys_id = nfp_net_set_phys_id,
+ .get_ts_info = ethtool_op_get_ts_info,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
@@ -2130,6 +2550,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .set_pauseparam = nfp_port_set_pauseparam,
.get_pauseparam = nfp_port_get_pauseparam,
.set_phys_id = nfp_net_set_phys_id,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 00264af13..dc0e405c1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -189,6 +189,8 @@ enum nfp_ethtool_link_mode_list {
* @ports.enabled: is enabled?
* @ports.tx_enabled: is TX enabled?
* @ports.rx_enabled: is RX enabled?
+ * @ports.rx_pause: Switch of RX pause frame
+ * @ports.tx_pause: Switch of Tx pause frame
* @ports.override_changed: is media reconfig pending?
*
* @ports.port_type: one of %PORT_* defines for ethtool
@@ -227,6 +229,8 @@ struct nfp_eth_table {
bool tx_enabled;
bool rx_enabled;
bool supp_aneg;
+ bool rx_pause;
+ bool tx_pause;
bool override_changed;
@@ -255,6 +259,8 @@ int
nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state);
+int nfp_eth_set_pauseparam(struct nfp_cpp *cpp, unsigned int idx,
+ unsigned int tx_pause, unsigned int rx_pause);
static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 9d62085d7..5cfddc9a5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -42,6 +42,8 @@
#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28)
+#define NSP_ETH_STATE_TX_PAUSE BIT_ULL(31)
+#define NSP_ETH_STATE_RX_PAUSE BIT_ULL(32)
#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
@@ -52,6 +54,8 @@
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
#define NSP_ETH_CTRL_SET_IDMODE BIT_ULL(8)
+#define NSP_ETH_CTRL_SET_TX_PAUSE BIT_ULL(10)
+#define NSP_ETH_CTRL_SET_RX_PAUSE BIT_ULL(11)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -180,6 +184,15 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state);
dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 37) {
+ dst->tx_pause = true;
+ dst->rx_pause = true;
+ return;
+ }
+
+ dst->tx_pause = FIELD_GET(NSP_ETH_STATE_TX_PAUSE, state);
+ dst->rx_pause = FIELD_GET(NSP_ETH_STATE_RX_PAUSE, state);
}
static void
@@ -497,7 +510,7 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed)
static int
nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
const u64 mask, const unsigned int shift,
- unsigned int val, const u64 ctrl_bit)
+ u64 val, const u64 ctrl_bit)
{
union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
unsigned int idx = nfp_nsp_config_idx(nsp);
@@ -630,6 +643,81 @@ nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode)
}
/**
+ * __nfp_eth_set_txpause() - set tx pause control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @tx_pause: TX pause switch
+ *
+ * Set TX pause switch.
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int __nfp_eth_set_txpause(struct nfp_nsp *nsp, unsigned int tx_pause)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_TX_PAUSE,
+ tx_pause, NSP_ETH_CTRL_SET_TX_PAUSE);
+}
+
+/**
+ * __nfp_eth_set_rxpause() - set rx pause control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @rx_pause: RX pause switch
+ *
+ * Set RX pause switch.
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int __nfp_eth_set_rxpause(struct nfp_nsp *nsp, unsigned int rx_pause)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_RX_PAUSE,
+ rx_pause, NSP_ETH_CTRL_SET_RX_PAUSE);
+}
+
+/**
+ * nfp_eth_set_pauseparam() - Set TX/RX pause switch.
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @tx_pause: TX pause switch
+ * @rx_pause: RX pause switch
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_pauseparam(struct nfp_cpp *cpp, unsigned int idx,
+ unsigned int tx_pause, unsigned int rx_pause)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 37) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set pause parameter operation not supported, please update flash\n");
+ nfp_eth_config_cleanup_end(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ err = __nfp_eth_set_txpause(nsp, tx_pause);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ err = __nfp_eth_set_rxpause(nsp, rx_pause);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/**
* __nfp_eth_set_speed() - set interface speed/rate
* @nsp: NFP NSP handle returned from nfp_eth_config_start()
* @speed: Desired speed (per lane)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 2453a40f6..9ffef2e06 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -91,6 +91,4 @@ int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
-const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr);
-
#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index fa4237c27..6ba8d4aca 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic)
bars[i].len = 0;
}
}
+ ionic->num_bars = 0;
}
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
@@ -215,9 +216,18 @@ out:
static void ionic_clear_pci(struct ionic *ionic)
{
- ionic_unmap_bars(ionic);
- pci_release_regions(ionic->pdev);
- pci_disable_device(ionic->pdev);
+ if (ionic->num_bars) {
+ ionic->idev.dev_info_regs = NULL;
+ ionic->idev.dev_cmd_regs = NULL;
+ ionic->idev.intr_status = NULL;
+ ionic->idev.intr_ctrl = NULL;
+
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+ }
+
+ if (pci_is_enabled(ionic->pdev))
+ pci_disable_device(ionic->pdev);
}
static int ionic_setup_one(struct ionic *ionic)
@@ -389,7 +399,7 @@ static void ionic_remove(struct pci_dev *pdev)
{
struct ionic *ionic = pci_get_drvdata(pdev);
- del_timer_sync(&ionic->watchdog_timer);
+ timer_shutdown_sync(&ionic->watchdog_timer);
if (ionic->lif) {
/* prevent adminq cmds if already known as down */
@@ -420,6 +430,8 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
dev_dbg(ionic->dev, "%s: device stopping\n", __func__);
+ set_bit(IONIC_LIF_F_FW_RESET, lif->state);
+
del_timer_sync(&ionic->watchdog_timer);
cancel_work_sync(&lif->deferred.work);
@@ -428,6 +440,7 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
ionic_txrx_free(lif);
ionic_lif_deinit(lif);
ionic_qcqs_free(lif);
+ ionic_debugfs_del_lif(lif);
mutex_unlock(&lif->queue_lock);
ionic_dev_teardown(ionic);
@@ -459,10 +472,35 @@ err_out:
__func__, err ? "failed" : "done");
}
+static pci_ers_result_t ionic_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t error)
+{
+ if (error == pci_channel_io_frozen) {
+ ionic_reset_prepare(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void ionic_pci_error_resume(struct pci_dev *pdev)
+{
+ struct ionic *ionic = pci_get_drvdata(pdev);
+ struct ionic_lif *lif = ionic->lif;
+
+ if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ pci_reset_function_locked(pdev);
+}
+
static const struct pci_error_handlers ionic_err_handler = {
/* FLR handling */
.reset_prepare = ionic_reset_prepare,
.reset_done = ionic_reset_done,
+
+ /* PCI bus error detected on this device */
+ .error_detected = ionic_pci_error_detected,
+ .resume = ionic_pci_error_resume,
+
};
static struct pci_driver ionic_driver = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index c58217027..91327ef67 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -287,6 +287,9 @@ void ionic_debugfs_add_lif(struct ionic_lif *lif)
void ionic_debugfs_del_lif(struct ionic_lif *lif)
{
+ if (!lif->dentry)
+ return;
+
debugfs_remove_recursive(lif->dentry);
lif->dentry = NULL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 22ab0a44f..746072b4d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -165,9 +165,19 @@ void ionic_dev_teardown(struct ionic *ionic)
}
/* Devcmd Interface */
-bool ionic_is_fw_running(struct ionic_dev *idev)
+static bool __ionic_is_fw_running(struct ionic_dev *idev, u8 *status_ptr)
{
- u8 fw_status = ioread8(&idev->dev_info_regs->fw_status);
+ u8 fw_status;
+
+ if (!idev->dev_info_regs) {
+ if (status_ptr)
+ *status_ptr = 0xff;
+ return false;
+ }
+
+ fw_status = ioread8(&idev->dev_info_regs->fw_status);
+ if (status_ptr)
+ *status_ptr = fw_status;
/* firmware is useful only if the running bit is set and
* fw_status != 0xff (bad PCI read)
@@ -175,6 +185,11 @@ bool ionic_is_fw_running(struct ionic_dev *idev)
return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
}
+bool ionic_is_fw_running(struct ionic_dev *idev)
+{
+ return __ionic_is_fw_running(idev, NULL);
+}
+
int ionic_heartbeat_check(struct ionic *ionic)
{
unsigned long check_time, last_check_time;
@@ -199,10 +214,8 @@ do_check_time:
goto do_check_time;
}
- fw_status = ioread8(&idev->dev_info_regs->fw_status);
-
/* If fw_status is not ready don't bother with the generation */
- if (!ionic_is_fw_running(idev)) {
+ if (!__ionic_is_fw_running(idev, &fw_status)) {
fw_status_ready = false;
} else {
fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
@@ -306,22 +319,32 @@ do_check_time:
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return (u8)PCI_ERROR_RESPONSE;
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
}
bool ionic_dev_cmd_done(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return false;
return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
}
void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
{
+ if (!idev->dev_cmd_regs)
+ return;
memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
}
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
{
idev->opcode = cmd->cmd.opcode;
+
+ if (!idev->dev_cmd_regs)
+ return;
+
memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
iowrite32(0, &idev->dev_cmd_regs->done);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
@@ -470,46 +493,6 @@ int ionic_set_vf_config(struct ionic *ionic, int vf,
return err;
}
-int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
- struct ionic_vf_getattr_comp *comp)
-{
- union ionic_dev_cmd cmd = {
- .vf_getattr.opcode = IONIC_CMD_VF_GETATTR,
- .vf_getattr.attr = attr,
- .vf_getattr.vf_index = cpu_to_le16(vf),
- };
- int err;
-
- if (vf >= ionic->num_vfs)
- return -EINVAL;
-
- switch (attr) {
- case IONIC_VF_ATTR_SPOOFCHK:
- case IONIC_VF_ATTR_TRUST:
- case IONIC_VF_ATTR_LINKSTATE:
- case IONIC_VF_ATTR_MAC:
- case IONIC_VF_ATTR_VLAN:
- case IONIC_VF_ATTR_RATE:
- break;
- case IONIC_VF_ATTR_STATSADDR:
- default:
- return -EINVAL;
- }
-
- mutex_lock(&ionic->dev_cmd_lock);
- ionic_dev_cmd_go(&ionic->idev, &cmd);
- err = ionic_dev_cmd_wait_nomsg(ionic, DEVCMD_TIMEOUT);
- memcpy_fromio(comp, &ionic->idev.dev_cmd_regs->comp.vf_getattr,
- sizeof(*comp));
- mutex_unlock(&ionic->dev_cmd_lock);
-
- if (err && comp->status != IONIC_RC_ENOSUPP)
- ionic_dev_cmd_dev_err_print(ionic, cmd.vf_getattr.opcode,
- comp->status, err);
-
- return err;
-}
-
void ionic_vf_start(struct ionic *ionic)
{
union ionic_dev_cmd cmd = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index fd112bee4..2667e1cde 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -270,12 +270,12 @@ struct ionic_queue {
struct ionic_intr_info {
char name[IONIC_INTR_NAME_MAX_SZ];
+ u64 rearm_count;
unsigned int index;
unsigned int vector;
- u64 rearm_count;
unsigned int cpu;
- cpumask_t affinity_mask;
u32 dim_coal_hw;
+ cpumask_t affinity_mask;
};
struct ionic_cq {
@@ -342,8 +342,7 @@ void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type);
int ionic_set_vf_config(struct ionic *ionic, int vf,
struct ionic_vf_setattr_cmd *vfc);
-int ionic_dev_cmd_vf_getattr(struct ionic *ionic, int vf, u8 attr,
- struct ionic_vf_getattr_comp *comp);
+
void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
u16 lif_type, u8 qtype, u8 qver);
void ionic_vf_start(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3a6b0a9bc..0ffc9c490 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev;
unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
+ idev = &lif->ionic->idev;
+ if (!idev->dev_info_regs)
+ return;
+
offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
- memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
+ memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
}
static void ionic_get_link_ext_stats(struct net_device *netdev,
@@ -823,36 +828,38 @@ static u32 ionic_get_rxfh_key_size(struct net_device *netdev)
return IONIC_RSS_HASH_KEY_SIZE;
}
-static int ionic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ionic_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ionic_lif *lif = netdev_priv(netdev);
unsigned int i, tbl_sz;
- if (indir) {
+ if (rxfh->indir) {
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
for (i = 0; i < tbl_sz; i++)
- indir[i] = lif->rss_ind_tbl[i];
+ rxfh->indir[i] = lif->rss_ind_tbl[i];
}
- if (key)
- memcpy(key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
+ if (rxfh->key)
+ memcpy(rxfh->key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ionic_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- return ionic_lif_rss_config(lif, lif->rss_types, key, indir);
+ return ionic_lif_rss_config(lif, lif->rss_types,
+ rxfh->key, rxfh->indir);
}
static int ionic_set_tunable(struct net_device *dev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index 5f40324cd..3c209c1a2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
dl = priv_to_devlink(ionic);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ if (!idev->dev_cmd_regs) {
+ err = -ENXIO;
+ goto err_out;
+ }
+
buf_sz = sizeof(idev->dev_cmd_regs->data);
netdev_dbg(netdev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 075e0e3fc..44d30115d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -424,14 +424,10 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_qcq_intr_free(lif, qcq);
- if (qcq->cq.info) {
- vfree(qcq->cq.info);
- qcq->cq.info = NULL;
- }
- if (qcq->q.info) {
- vfree(qcq->q.info);
- qcq->q.info = NULL;
- }
+ vfree(qcq->cq.info);
+ qcq->cq.info = NULL;
+ vfree(qcq->q.info);
+ qcq->q.info = NULL;
}
void ionic_qcqs_free(struct ionic_lif *lif)
@@ -2332,82 +2328,11 @@ static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd
}
}
-static int ionic_get_fw_vf_config(struct ionic *ionic, int vf, struct ionic_vf *vfdata)
-{
- struct ionic_vf_getattr_comp comp = { 0 };
- int err;
- u8 attr;
-
- attr = IONIC_VF_ATTR_VLAN;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- vfdata->vlanid = comp.vlanid;
-
- attr = IONIC_VF_ATTR_SPOOFCHK;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- vfdata->spoofchk = comp.spoofchk;
-
- attr = IONIC_VF_ATTR_LINKSTATE;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err) {
- switch (comp.linkstate) {
- case IONIC_VF_LINK_STATUS_UP:
- vfdata->linkstate = IFLA_VF_LINK_STATE_ENABLE;
- break;
- case IONIC_VF_LINK_STATUS_DOWN:
- vfdata->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- break;
- case IONIC_VF_LINK_STATUS_AUTO:
- vfdata->linkstate = IFLA_VF_LINK_STATE_AUTO;
- break;
- default:
- dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
- break;
- }
- }
-
- attr = IONIC_VF_ATTR_RATE;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- vfdata->maxrate = comp.maxrate;
-
- attr = IONIC_VF_ATTR_TRUST;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- vfdata->trusted = comp.trust;
-
- attr = IONIC_VF_ATTR_MAC;
- err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
- if (err && comp.status != IONIC_RC_ENOSUPP)
- goto err_out;
- if (!err)
- ether_addr_copy(vfdata->macaddr, comp.macaddr);
-
-err_out:
- if (err)
- dev_err(ionic->dev, "Failed to get %s for VF %d\n",
- ionic_vf_attr_to_str(attr), vf);
-
- return err;
-}
-
static int ionic_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivf)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
- struct ionic_vf vfdata = { 0 };
int ret = 0;
if (!netif_device_present(netdev))
@@ -2418,18 +2343,16 @@ static int ionic_get_vf_config(struct net_device *netdev,
if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
ret = -EINVAL;
} else {
- ivf->vf = vf;
- ivf->qos = 0;
-
- ret = ionic_get_fw_vf_config(ionic, vf, &vfdata);
- if (!ret) {
- ivf->vlan = le16_to_cpu(vfdata.vlanid);
- ivf->spoofchk = vfdata.spoofchk;
- ivf->linkstate = vfdata.linkstate;
- ivf->max_tx_rate = le32_to_cpu(vfdata.maxrate);
- ivf->trusted = vfdata.trusted;
- ether_addr_copy(ivf->mac, vfdata.macaddr);
- }
+ struct ionic_vf *vfdata = &ionic->vfs[vf];
+
+ ivf->vf = vf;
+ ivf->qos = 0;
+ ivf->vlan = le16_to_cpu(vfdata->vlanid);
+ ivf->spoofchk = vfdata->spoofchk;
+ ivf->linkstate = vfdata->linkstate;
+ ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate);
+ ivf->trusted = vfdata->trusted;
+ ether_addr_copy(ivf->mac, vfdata->macaddr);
}
up_read(&ionic->vf_op_lock);
@@ -3127,6 +3050,7 @@ int ionic_lif_alloc(struct ionic *ionic)
lif = netdev_priv(netdev);
lif->netdev = netdev;
ionic->lif = lif;
+ lif->ionic = ionic;
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
@@ -3149,7 +3073,6 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->neqs = ionic->neqs_per_lif;
lif->nxqs = ionic->ntxqs_per_lif;
- lif->ionic = ionic;
lif->index = 0;
if (is_kdump_kernel()) {
@@ -3468,9 +3391,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
napi_enable(&qcq->napi);
- if (qcq->flags & IONIC_QCQ_F_INTR)
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector,
+ &qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3636,7 +3562,10 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
}
- err = ionic_init_nic_features(lif);
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ err = ionic_set_nic_features(lif, lif->netdev->features);
+ else
+ err = ionic_init_nic_features(lif);
if (err)
goto err_out_notifyq_deinit;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 457c24195..61548b3ee 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -312,6 +312,11 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
return (usecs * mult) / div;
}
+static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
+{
+ return unlikely(q->features & IONIC_TXQ_F_HWSTAMP);
+}
+
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 83c413a10..2f479de32 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -188,28 +188,6 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
}
}
-const char *ionic_vf_attr_to_str(enum ionic_vf_attr attr)
-{
- switch (attr) {
- case IONIC_VF_ATTR_SPOOFCHK:
- return "IONIC_VF_ATTR_SPOOFCHK";
- case IONIC_VF_ATTR_TRUST:
- return "IONIC_VF_ATTR_TRUST";
- case IONIC_VF_ATTR_LINKSTATE:
- return "IONIC_VF_ATTR_LINKSTATE";
- case IONIC_VF_ATTR_MAC:
- return "IONIC_VF_ATTR_MAC";
- case IONIC_VF_ATTR_VLAN:
- return "IONIC_VF_ATTR_VLAN";
- case IONIC_VF_ATTR_RATE:
- return "IONIC_VF_ATTR_RATE";
- case IONIC_VF_ATTR_STATSADDR:
- return "IONIC_VF_ATTR_STATSADDR";
- default:
- return "IONIC_VF_ATTR_UNKNOWN";
- }
-}
-
static void ionic_adminq_flush(struct ionic_lif *lif)
{
struct ionic_desc_info *desc_info;
@@ -438,6 +416,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
+ if (!idev->dev_cmd_regs)
+ return;
+
iowrite32(0, &idev->dev_cmd_regs->doorbell);
memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 9859a4432..1f6022fb7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -258,10 +258,10 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
int i, q_num;
for (i = 0; i < IONIC_NUM_LIF_STATS; i++)
- ethtool_sprintf(buf, ionic_lif_stats_desc[i].name);
+ ethtool_puts(buf, ionic_lif_stats_desc[i].name);
for (i = 0; i < IONIC_NUM_PORT_STATS; i++)
- ethtool_sprintf(buf, ionic_port_stats_desc[i].name);
+ ethtool_puts(buf, ionic_port_stats_desc[i].name);
for (q_num = 0; q_num < MAX_Q(lif); q_num++)
ionic_sw_stats_get_tx_strings(lif, buf, q_num);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index ccc1b1d40..6f4776759 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -579,6 +579,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done = ionic_cq_service(cq, budget,
ionic_tx_service, NULL, NULL);
+ if (unlikely(!budget))
+ return budget;
+
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -607,6 +610,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
+ if (unlikely(!budget))
+ return budget;
+
lif = cq->bound_q->lif;
idev = &lif->ionic->idev;
@@ -656,6 +662,9 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
ionic_tx_service, NULL, NULL);
+ if (unlikely(!budget))
+ return budget;
+
rx_work_done = ionic_cq_service(rxcq, budget,
ionic_rx_service, NULL, NULL);
@@ -803,7 +812,7 @@ static void ionic_tx_clean(struct ionic_queue *q,
qi = skb_get_queue_mapping(skb);
- if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
+ if (ionic_txq_hwstamp_enabled(q)) {
if (cq_info) {
struct skb_shared_hwtstamps hwts = {};
__le64 *cq_desc_hwstamp;
@@ -870,7 +879,7 @@ bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
desc_info->cb_arg = NULL;
} while (index != le16_to_cpu(comp->comp_index));
- if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
return true;
@@ -908,7 +917,7 @@ void ionic_tx_empty(struct ionic_queue *q)
desc_info->cb_arg = NULL;
}
- if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
}
@@ -986,7 +995,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
if (start) {
skb_tx_timestamp(skb);
- if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ if (!ionic_txq_hwstamp_enabled(q))
netdev_tx_sent_queue(q_to_ndq(q), skb->len);
ionic_txq_post(q, false, ionic_tx_clean, skb);
} else {
@@ -1233,7 +1242,7 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts++;
stats->bytes += skb->len;
- if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+ if (!ionic_txq_hwstamp_enabled(q))
netdev_tx_sent_queue(q_to_ndq(q), skb->len);
ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index b6b849a07..0e240b5ab 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1370,28 +1370,29 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev)
return sizeof(edev->rss_key);
}
-static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+static int qede_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct qede_dev *edev = netdev_priv(dev);
int i;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
- indir[i] = edev->rss_ind_table[i];
+ rxfh->indir[i] = edev->rss_ind_table[i];
- if (key)
- memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
+ if (rxfh->key)
+ memcpy(rxfh->key, edev->rss_key, qede_get_rxfh_key_size(dev));
return 0;
}
-static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int qede_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct qed_update_vport_params *vport_update_params;
struct qede_dev *edev = netdev_priv(dev);
@@ -1403,20 +1404,21 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
return -EOPNOTSUPP;
}
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir && !key)
+ if (!rxfh->indir && !rxfh->key)
return 0;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
- edev->rss_ind_table[i] = indir[i];
+ edev->rss_ind_table[i] = rxfh->indir[i];
edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
}
- if (key) {
- memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
+ if (rxfh->key) {
+ memcpy(&edev->rss_key, rxfh->key, qede_get_rxfh_key_size(dev));
edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index c95d56e56..b733374b4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2092,8 +2092,8 @@ static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter)
return -EINVAL;
}
- strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
- QLC_FW_FILE_NAME_LEN);
+ strscpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
+ sizeof(fw_info->fw_file_name));
ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev);
if (ret) {
@@ -2396,12 +2396,12 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
switch (pdev->device) {
case PCI_DEVICE_ID_QLOGIC_QLE834X:
case PCI_DEVICE_ID_QLOGIC_QLE8830:
- strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
- QLC_FW_FILE_NAME_LEN);
+ strscpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+ sizeof(fw_info->fw_file_name));
break;
case PCI_DEVICE_ID_QLOGIC_QLE844X:
- strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
- QLC_FW_FILE_NAME_LEN);
+ strscpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+ sizeof(fw_info->fw_file_name));
break;
default:
dev_err(&pdev->dev, "%s: Invalid device id\n",
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 9adec91f3..223321897 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -58,9 +58,8 @@ struct qcauart {
unsigned char *tx_buffer;
};
-static int
-qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
- size_t count)
+static ssize_t
+qca_tty_receive(struct serdev_device *serdev, const u8 *data, size_t count)
{
struct qcauart *qca = serdev_device_get_drvdata(serdev);
struct net_device *netdev = qca->net_dev;
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 93d9df55b..03015b665 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -113,4 +113,11 @@ config R8169
To compile this driver as a module, choose M here: the module
will be called r8169. This is recommended.
+config R8169_LEDS
+ def_bool R8169 && LEDS_TRIGGER_NETDEV
+ depends on !(R8169=y && LEDS_CLASS=m)
+ help
+ Optional support for controlling the NIC LED's with the netdev
+ LED trigger.
+
endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index 2e1d78b10..635491d88 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,5 +6,6 @@
obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ATP) += atp.o
-r8169-objs += r8169_main.o r8169_firmware.o r8169_phy_config.o
+r8169-y += r8169_main.o r8169_firmware.o r8169_phy_config.o
+r8169-$(CONFIG_R8169_LEDS) += r8169_leds.o
obj-$(CONFIG_R8169) += r8169.o
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 55ef8251f..1ef399287 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -8,6 +8,7 @@
* See MAINTAINERS file for support contact information.
*/
+#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/phy.h>
@@ -71,9 +72,17 @@ enum mac_version {
};
struct rtl8169_private;
+struct r8169_led_classdev;
void r8169_apply_firmware(struct rtl8169_private *tp);
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp);
u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr);
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver);
+
+void r8169_get_led_name(struct rtl8169_private *tp, int idx,
+ char *buf, int buf_len);
+int rtl8168_get_led_mode(struct rtl8169_private *tp);
+int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev);
+void r8169_remove_leds(struct r8169_led_classdev *leds);
diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c
index cbc6b846d..ed6e721b1 100644
--- a/drivers/net/ethernet/realtek/r8169_firmware.c
+++ b/drivers/net/ethernet/realtek/r8169_firmware.c
@@ -151,9 +151,6 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
u32 regno = (action & 0x0fff0000) >> 16;
enum rtl_fw_opcode opcode = action >> 28;
- if (!action)
- break;
-
switch (opcode) {
case PHY_READ:
predata = fw_read(tp, regno);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
new file mode 100644
index 000000000..1c97f3cca
--- /dev/null
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* r8169_leds.c: Realtek 8169/8168/8101/8125 ethernet driver.
+ *
+ * Copyright (c) 2023 Heiner Kallweit <hkallweit1@gmail.com>
+ *
+ * See MAINTAINERS file for support contact information.
+ */
+
+#include <linux/leds.h>
+#include <linux/netdevice.h>
+#include <uapi/linux/uleds.h>
+
+#include "r8169.h"
+
+#define RTL8168_LED_CTRL_OPTION2 BIT(15)
+#define RTL8168_LED_CTRL_ACT BIT(3)
+#define RTL8168_LED_CTRL_LINK_1000 BIT(2)
+#define RTL8168_LED_CTRL_LINK_100 BIT(1)
+#define RTL8168_LED_CTRL_LINK_10 BIT(0)
+
+#define RTL8168_NUM_LEDS 3
+
+#define RTL8168_SUPPORTED_MODES \
+ (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK_100) | \
+ BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_RX) | \
+ BIT(TRIGGER_NETDEV_TX))
+
+struct r8169_led_classdev {
+ struct led_classdev led;
+ struct net_device *ndev;
+ int index;
+};
+
+#define lcdev_to_r8169_ldev(lcdev) container_of(lcdev, struct r8169_led_classdev, led)
+
+static int rtl8168_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int shift = ldev->index * 4;
+ bool rx, tx;
+
+ if (flags & ~RTL8168_SUPPORTED_MODES)
+ goto nosupp;
+
+ rx = flags & BIT(TRIGGER_NETDEV_RX);
+ tx = flags & BIT(TRIGGER_NETDEV_TX);
+ if (rx != tx)
+ goto nosupp;
+
+ return 0;
+
+nosupp:
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
+ return -EOPNOTSUPP;
+}
+
+static int rtl8168_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int shift = ldev->index * 4;
+ u16 mode = 0;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode |= RTL8168_LED_CTRL_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode |= RTL8168_LED_CTRL_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode |= RTL8168_LED_CTRL_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_TX))
+ mode |= RTL8168_LED_CTRL_ACT;
+
+ return rtl8168_led_mod_ctrl(tp, 0x000f << shift, mode << shift);
+}
+
+static int rtl8168_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int shift = ldev->index * 4;
+ int mode;
+
+ mode = rtl8168_get_led_mode(tp);
+ if (mode < 0)
+ return mode;
+
+ if (mode & RTL8168_LED_CTRL_OPTION2) {
+ rtl8168_led_mod_ctrl(tp, RTL8168_LED_CTRL_OPTION2, 0);
+ netdev_notice(ldev->ndev, "Deactivating unsupported Option2 LED mode\n");
+ }
+
+ mode = (mode >> shift) & 0x000f;
+
+ if (mode & RTL8168_LED_CTRL_ACT)
+ *flags |= BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+
+ if (mode & RTL8168_LED_CTRL_LINK_10)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_10);
+ if (mode & RTL8168_LED_CTRL_LINK_100)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_100);
+ if (mode & RTL8168_LED_CTRL_LINK_1000)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ return 0;
+}
+
+static struct device *
+ r8169_led_hw_control_get_device(struct led_classdev *led_cdev)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+
+ return &ldev->ndev->dev;
+}
+
+static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
+ struct net_device *ndev, int index)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->ndev = ndev;
+ ldev->index = index;
+
+ r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->default_trigger = "netdev";
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->hw_control_is_supported = rtl8168_led_hw_control_is_supported;
+ led_cdev->hw_control_set = rtl8168_led_hw_control_set;
+ led_cdev->hw_control_get = rtl8168_led_hw_control_get;
+ led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
+
+ /* ignore errors */
+ led_classdev_register(&ndev->dev, led_cdev);
+}
+
+struct r8169_led_classdev *rtl8168_init_leds(struct net_device *ndev)
+{
+ struct r8169_led_classdev *leds;
+ int i;
+
+ leds = kcalloc(RTL8168_NUM_LEDS + 1, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return NULL;
+
+ for (i = 0; i < RTL8168_NUM_LEDS; i++)
+ rtl8168_setup_ldev(leds + i, ndev, i);
+
+ return leds;
+}
+
+void r8169_remove_leds(struct r8169_led_classdev *leds)
+{
+ if (!leds)
+ return;
+
+ for (struct r8169_led_classdev *l = leds; l->ndev; l++)
+ led_classdev_unregister(&l->led);
+
+ kfree(leds);
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 81fd31f6f..32b73f398 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -56,10 +56,6 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
-/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
- The RTL chips use a 64 element hash table based on the Ethernet CRC. */
-#define MC_FILTER_LIMIT 32
-
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -289,6 +285,7 @@ enum rtl8168_8101_registers {
};
enum rtl8168_registers {
+ LED_CTRL = 0x18,
LED_FREQ = 0x1a,
EEE_LED = 0x1b,
ERIDR = 0x70,
@@ -620,6 +617,7 @@ struct rtl8169_private {
raw_spinlock_t config25_lock;
raw_spinlock_t mac_ocp_lock;
+ struct mutex led_lock; /* serialize LED ctrl RMW access */
raw_spinlock_t cfg9346_usage_lock;
int cfg9346_usage_count;
@@ -636,6 +634,8 @@ struct rtl8169_private {
const char *fw_name;
struct rtl_fw *rtl_fw;
+ struct r8169_led_classdev *leds;
+
u32 ocp_base;
};
@@ -792,6 +792,62 @@ static const struct rtl_cond name = { \
\
static bool name ## _check(struct rtl8169_private *tp)
+int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val)
+{
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->led_lock);
+ RTL_W16(tp, LED_CTRL, (RTL_R16(tp, LED_CTRL) & ~mask) | val);
+ mutex_unlock(&tp->led_lock);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+int rtl8168_get_led_mode(struct rtl8169_private *tp)
+{
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = RTL_R16(tp, LED_CTRL);
+
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
+
+void r8169_get_led_name(struct rtl8169_private *tp, int idx,
+ char *buf, int buf_len)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ char pdom[8], pfun[8];
+ int domain;
+
+ domain = pci_domain_nr(pdev->bus);
+ if (domain)
+ snprintf(pdom, sizeof(pdom), "P%d", domain);
+ else
+ pdom[0] = '\0';
+
+ if (pdev->multifunction)
+ snprintf(pfun, sizeof(pfun), "f%d", PCI_FUNC(pdev->devfn));
+ else
+ pfun[0] = '\0';
+
+ snprintf(buf, buf_len, "en%sp%ds%d%s-%d::lan", pdom, pdev->bus->number,
+ PCI_SLOT(pdev->devfn), pfun, idx);
+}
+
static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
@@ -1201,17 +1257,40 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
+static void rtl_dash_loop_wait(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long usecs, int n, bool high)
+{
+ if (!tp->dash_enabled)
+ return;
+ rtl_loop_wait(tp, c, usecs, n, high);
+}
+
+static void rtl_dash_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
+{
+ rtl_dash_loop_wait(tp, c, d, n, true);
+}
+
+static void rtl_dash_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned long d, int n)
+{
+ rtl_dash_loop_wait(tp, c, d, n, false);
+}
+
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
+ rtl_dash_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)
@@ -1225,7 +1304,7 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
@@ -1233,7 +1312,7 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl8168ep_stop_cmac(tp);
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
- rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
+ rtl_dash_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
@@ -2233,6 +2312,9 @@ u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
{
+ if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
+ return;
+
set_bit(flag, tp->wk.flags);
schedule_work(&tp->wk.work);
}
@@ -2603,8 +2685,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode |= AcceptAllPhys;
} else if (!(dev->flags & IFF_MULTICAST)) {
rx_mode &= ~AcceptMulticast;
- } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
- dev->flags & IFF_ALLMULTI ||
+ } else if (dev->flags & IFF_ALLMULTI ||
tp->mac_version == RTL_GIGA_MAC_VER_35) {
/* accept all multicasts */
} else if (netdev_mc_empty(dev)) {
@@ -3106,6 +3187,33 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
rtl_ephy_init(tp, e_info_8168g_2);
}
+static void rtl8411b_fix_phy_down(struct rtl8169_private *tp)
+{
+ static const u16 fix_data[] = {
+/* 0xf800 */ 0xe008, 0xe00a, 0xe00c, 0xe00e, 0xe027, 0xe04f, 0xe05e, 0xe065,
+/* 0xf810 */ 0xc602, 0xbe00, 0x0000, 0xc502, 0xbd00, 0x074c, 0xc302, 0xbb00,
+/* 0xf820 */ 0x080a, 0x6420, 0x48c2, 0x8c20, 0xc516, 0x64a4, 0x49c0, 0xf009,
+/* 0xf830 */ 0x74a2, 0x8ca5, 0x74a0, 0xc50e, 0x9ca2, 0x1c11, 0x9ca0, 0xe006,
+/* 0xf840 */ 0x74f8, 0x48c4, 0x8cf8, 0xc404, 0xbc00, 0xc403, 0xbc00, 0x0bf2,
+/* 0xf850 */ 0x0c0a, 0xe434, 0xd3c0, 0x49d9, 0xf01f, 0xc526, 0x64a5, 0x1400,
+/* 0xf860 */ 0xf007, 0x0c01, 0x8ca5, 0x1c15, 0xc51b, 0x9ca0, 0xe013, 0xc519,
+/* 0xf870 */ 0x74a0, 0x48c4, 0x8ca0, 0xc516, 0x74a4, 0x48c8, 0x48ca, 0x9ca4,
+/* 0xf880 */ 0xc512, 0x1b00, 0x9ba0, 0x1b1c, 0x483f, 0x9ba2, 0x1b04, 0xc508,
+/* 0xf890 */ 0x9ba0, 0xc505, 0xbd00, 0xc502, 0xbd00, 0x0300, 0x051e, 0xe434,
+/* 0xf8a0 */ 0xe018, 0xe092, 0xde20, 0xd3c0, 0xc50f, 0x76a4, 0x49e3, 0xf007,
+/* 0xf8b0 */ 0x49c0, 0xf103, 0xc607, 0xbe00, 0xc606, 0xbe00, 0xc602, 0xbe00,
+/* 0xf8c0 */ 0x0c4c, 0x0c28, 0x0c2c, 0xdc00, 0xc707, 0x1d00, 0x8de2, 0x48c1,
+/* 0xf8d0 */ 0xc502, 0xbd00, 0x00aa, 0xe0c0, 0xc502, 0xbd00, 0x0132
+ };
+ unsigned long flags;
+ int i;
+
+ raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(fix_data); i++)
+ __r8168_mac_ocp_write(tp, 0xf800 + 2 * i, fix_data[i]);
+ raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+}
+
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8411_2[] = {
@@ -3139,117 +3247,7 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
mdelay(3);
r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
- r8168_mac_ocp_write(tp, 0xF800, 0xE008);
- r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
- r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
- r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
- r8168_mac_ocp_write(tp, 0xF808, 0xE027);
- r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
- r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
- r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
- r8168_mac_ocp_write(tp, 0xF810, 0xC602);
- r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
- r8168_mac_ocp_write(tp, 0xF814, 0x0000);
- r8168_mac_ocp_write(tp, 0xF816, 0xC502);
- r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
- r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
- r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
- r8168_mac_ocp_write(tp, 0xF820, 0x080A);
- r8168_mac_ocp_write(tp, 0xF822, 0x6420);
- r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
- r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
- r8168_mac_ocp_write(tp, 0xF828, 0xC516);
- r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
- r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
- r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
- r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
- r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
- r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
- r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
- r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
- r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
- r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
- r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
- r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
- r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
- r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
- r8168_mac_ocp_write(tp, 0xF846, 0xC404);
- r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
- r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
- r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
- r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
- r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
- r8168_mac_ocp_write(tp, 0xF852, 0xE434);
- r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
- r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
- r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
- r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
- r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
- r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
- r8168_mac_ocp_write(tp, 0xF860, 0xF007);
- r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
- r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
- r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
- r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
- r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
- r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
- r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
- r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
- r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
- r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
- r8168_mac_ocp_write(tp, 0xF876, 0xC516);
- r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
- r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
- r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
- r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
- r8168_mac_ocp_write(tp, 0xF880, 0xC512);
- r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
- r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
- r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
- r8168_mac_ocp_write(tp, 0xF888, 0x483F);
- r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
- r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
- r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
- r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
- r8168_mac_ocp_write(tp, 0xF892, 0xC505);
- r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF896, 0xC502);
- r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
- r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
- r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
- r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
- r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
- r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
- r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
- r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
- r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
- r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
- r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
- r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
- r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
- r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
- r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
- r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
- r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
- r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
- r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
- r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
- r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
- r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
- r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
- r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
- r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
- r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
- r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
- r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
- r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
- r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
- r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
- r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
- r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
+ rtl8411b_fix_phy_down(tp);
r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
@@ -4561,8 +4559,7 @@ static void rtl_task(struct work_struct *work)
rtnl_lock();
- if (!netif_running(tp->dev) ||
- !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
+ if (!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock;
if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
@@ -4935,6 +4932,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->wk.work);
+ if (IS_ENABLED(CONFIG_R8169_LEDS))
+ r8169_remove_leds(tp->leds);
+
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)
@@ -5055,6 +5055,15 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
struct mii_bus *new_bus;
int ret;
+ /* On some boards with this chip version the BIOS is buggy and misses
+ * to reset the PHY page selector. This results in the PHY ID read
+ * accessing registers on a different page, returning a more or
+ * less random value. Fix this by resetting the page selector first.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_25 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_26)
+ r8169_mdio_write(tp, 0x1f, 0);
+
new_bus = devm_mdiobus_alloc(&pdev->dev);
if (!new_bus)
return -ENOMEM;
@@ -5227,6 +5236,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
raw_spin_lock_init(&tp->cfg9346_usage_lock);
raw_spin_lock_init(&tp->config25_lock);
raw_spin_lock_init(&tp->mac_ocp_lock);
+ mutex_init(&tp->led_lock);
dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
struct pcpu_sw_netstats);
@@ -5383,6 +5393,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ if (IS_ENABLED(CONFIG_R8169_LEDS) &&
+ tp->mac_version > RTL_GIGA_MAC_VER_06 &&
+ tp->mac_version < RTL_GIGA_MAC_VER_61)
+ tp->leds = rtl8168_init_leds(dev);
+
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 8ef5b0241..d6136fe5c 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -40,11 +40,21 @@ config RAVB
config RENESAS_ETHER_SWITCH
tristate "Renesas Ethernet Switch support"
depends on ARCH_RENESAS || COMPILE_TEST
- depends on PTP_1588_CLOCK_OPTIONAL
+ depends on PTP_1588_CLOCK
select CRC32
select MII
select PHYLINK
+ select RENESAS_GEN4_PTP
help
Renesas Ethernet Switch device driver.
+config RENESAS_GEN4_PTP
+ tristate "Renesas R-Car Gen4 gPTP support" if COMPILE_TEST
+ depends on PTP_1588_CLOCK
+ select CRC32
+ select MII
+ select PHYLIB
+ help
+ Renesas R-Car Gen4 gPTP device driver.
+
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index e8fd85b5f..9070acfd6 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
obj-$(CONFIG_RAVB) += ravb.o
-rswitch_drv-objs := rswitch.o rcar_gen4_ptp.o
-obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch_drv.o
+obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
+
+obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index e0f8276cf..fd59155a7 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1060,8 +1060,10 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
- struct ravb_rx_desc *gbeth_rx_ring;
- struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ union {
+ struct ravb_rx_desc *desc;
+ struct ravb_ex_rx_desc *ex_desc;
+ } rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0e3731f50..853c2a0d4 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -250,11 +250,11 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
unsigned int ring_size;
unsigned int i;
- if (!priv->gbeth_rx_ring)
+ if (!priv->rx_ring[q].desc)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
+ struct ravb_rx_desc *desc = &priv->rx_ring[q].desc[i];
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
@@ -264,9 +264,9 @@ static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].desc,
priv->rx_desc_dma[q]);
- priv->gbeth_rx_ring = NULL;
+ priv->rx_ring[q].desc = NULL;
}
static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
@@ -275,11 +275,11 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
unsigned int ring_size;
unsigned int i;
- if (!priv->rx_ring[q])
+ if (!priv->rx_ring[q].ex_desc)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q].ex_desc[i];
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
@@ -290,9 +290,9 @@ static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].ex_desc,
priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
+ priv->rx_ring[q].ex_desc = NULL;
}
/* Free skb's and DMA buffers for Ethernet AVB */
@@ -344,11 +344,11 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
unsigned int i;
rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- memset(priv->gbeth_rx_ring, 0, rx_ring_size);
+ memset(priv->rx_ring[q].desc, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
- rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
GBETH_RX_BUFF_MAX,
@@ -361,7 +361,7 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc = &priv->rx_ring[q].desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
@@ -374,11 +374,11 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
dma_addr_t dma_addr;
unsigned int i;
- memset(priv->rx_ring[q], 0, rx_ring_size);
+ memset(priv->rx_ring[q].ex_desc, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
- rx_desc = &priv->rx_ring[q][i];
+ rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
RX_BUF_SZ,
@@ -391,7 +391,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = &priv->rx_ring[q][i];
+ rx_desc = &priv->rx_ring[q].ex_desc[i];
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
@@ -446,10 +446,10 @@ static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
- priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->gbeth_rx_ring;
+ priv->rx_ring[q].desc = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ return priv->rx_ring[q].desc;
}
static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
@@ -459,10 +459,11 @@ static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
- priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->rx_ring[q];
+ priv->rx_ring[q].ex_desc = dma_alloc_coherent(ndev->dev.parent,
+ ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ return priv->rx_ring[q].ex_desc;
}
/* Init skb and descriptor buffer for Ethernet AVB */
@@ -772,29 +773,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
struct ravb_rx_desc *desc;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ int rx_packets = 0;
u8 desc_status;
- int boguscnt;
u16 pkt_len;
u8 die_dt;
int entry;
int limit;
+ int i;
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->gbeth_rx_ring[entry];
- while (desc->die_dt != DT_FEMPTY) {
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
+
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -820,7 +820,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
case DT_FSTART:
@@ -848,20 +848,17 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
eth_type_trans(priv->rx_1st_skb, ndev);
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
}
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
+ desc = &priv->rx_ring[q].desc[entry];
desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
if (!priv->rx_skb[q][entry]) {
@@ -887,9 +884,9 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -897,30 +894,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
- priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc;
+ unsigned int limit, i;
struct sk_buff *skb;
dma_addr_t dma_addr;
struct timespec64 ts;
+ int rx_packets = 0;
u8 desc_status;
u16 pkt_len;
- int limit;
+ int entry;
+
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ break;
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->rx_ring[q][entry];
- while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -966,18 +962,15 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
}
-
- entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
@@ -1002,9 +995,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -1288,25 +1281,16 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- bool gptp = info->gptp || info->ccc_gac;
- struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
- unsigned int entry;
+ bool unmask;
- if (!gptp) {
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (gptp || desc->die_dt != DT_FEMPTY) {
- if (ravb_rx(ndev, &quota, q))
- goto out;
- }
+ unmask = !ravb_rx(ndev, &quota, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1316,6 +1300,18 @@ static int ravb_poll(struct napi_struct *napi, int budget)
netif_wake_subqueue(ndev, q);
spin_unlock_irqrestore(&priv->lock, flags);
+ /* Receive error message handling */
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ if (info->nc_queues)
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
+ ndev->stats.rx_over_errors = priv->rx_over_errors;
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
+ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+
+ if (!unmask)
+ goto out;
+
napi_complete(napi);
/* Re-enable RX/TX interrupts */
@@ -1329,14 +1325,6 @@ static int ravb_poll(struct napi_struct *napi, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
- /* Receive error message handling */
- priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
- if (info->nc_queues)
- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors)
- ndev->stats.rx_over_errors = priv->rx_over_errors;
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
- ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
out:
return budget - quota;
}
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index c007e33c4..72e7fcc56 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -14,7 +14,7 @@
#include "rcar_gen4_ptp.h"
#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
-static const struct rcar_gen4_ptp_reg_offset s4_offs = {
+static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
.enable = PTPTMEC,
.disable = PTPTMDC,
.increment = PTPTIVC0,
@@ -130,25 +130,42 @@ static struct ptp_clock_info rcar_gen4_ptp_info = {
.enable = rcar_gen4_ptp_enable,
};
-static void rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout)
+static int rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
+ enum rcar_gen4_ptp_reg_layout layout)
{
- WARN_ON(layout != RCAR_GEN4_PTP_REG_LAYOUT_S4);
+ if (layout != RCAR_GEN4_PTP_REG_LAYOUT)
+ return -EINVAL;
- ptp_priv->offs = &s4_offs;
+ ptp_priv->offs = &gen4_offs;
+
+ return 0;
+}
+
+static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
+{
+ /* Timer increment in ns.
+ * bit[31:27] - integer
+ * bit[26:0] - decimal
+ * increment[ns] = perid[ns] * 2^27 => (1ns * 2^27) / rate[hz]
+ */
+ return div_s64(1000000000LL << 27, rate);
}
int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 clock)
+ enum rcar_gen4_ptp_reg_layout layout, u32 rate)
{
+ int ret;
+
if (ptp_priv->initialized)
return 0;
spin_lock_init(&ptp_priv->lock);
- rcar_gen4_ptp_set_offs(ptp_priv, layout);
+ ret = rcar_gen4_ptp_set_offs(ptp_priv, layout);
+ if (ret)
+ return ret;
- ptp_priv->default_addend = clock;
+ ptp_priv->default_addend = rcar_gen4_ptp_rate_to_increment(rate);
iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment);
ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL);
if (IS_ERR(ptp_priv->clock))
@@ -159,6 +176,7 @@ int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
return 0;
}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_register);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
{
@@ -166,6 +184,7 @@ int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
return ptp_clock_unregister(ptp_priv->clock);
}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_unregister);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
{
@@ -179,3 +198,8 @@ struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev)
return ptp;
}
+EXPORT_SYMBOL_GPL(rcar_gen4_ptp_alloc);
+
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("Renesas R-Car Gen4 gPTP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index b1bbea8d3..e22da5acd 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -9,13 +9,10 @@
#include <linux/ptp_clock_kernel.h>
-#define PTPTIVC_INIT 0x19000000 /* 320MHz */
-#define RCAR_GEN4_PTP_CLOCK_S4 PTPTIVC_INIT
#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000
-/* for rcar_gen4_ptp_init */
enum rcar_gen4_ptp_reg_layout {
- RCAR_GEN4_PTP_REG_LAYOUT_S4
+ RCAR_GEN4_PTP_REG_LAYOUT
};
/* driver's definitions */
@@ -28,7 +25,7 @@ enum rcar_gen4_ptp_reg_layout {
#define PTPRO 0
-enum rcar_gen4_ptp_reg_s4 {
+enum rcar_gen4_ptp_reg {
PTPTMEC = PTPRO + 0x0010,
PTPTMDC = PTPRO + 0x0014,
PTPTIVC0 = PTPRO + 0x0020,
@@ -65,7 +62,7 @@ struct rcar_gen4_ptp_private {
};
int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 clock);
+ enum rcar_gen4_ptp_reg_layout layout, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index e77c6ff93..dcab638c5 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -56,7 +56,8 @@ static void rswitch_clock_disable(struct rswitch_private *priv)
iowrite32(RCDC_RCD, priv->addr + RCDC);
}
-static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
+static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr,
+ unsigned int port)
{
u32 val = ioread32(coma_addr + RCEC);
@@ -66,7 +67,8 @@ static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
return false;
}
-static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
+static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port,
+ int enable)
{
u32 val;
@@ -100,7 +102,7 @@ static void rswitch_coma_init(struct rswitch_private *priv)
/* R-Switch-2 block (TOP) */
static void rswitch_top_init(struct rswitch_private *priv)
{
- int i;
+ unsigned int i;
for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
@@ -109,7 +111,7 @@ static void rswitch_top_init(struct rswitch_private *priv)
/* Forwarding engine block (MFWD) */
static void rswitch_fwd_init(struct rswitch_private *priv)
{
- int i;
+ unsigned int i;
/* For ETHA */
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
@@ -166,7 +168,7 @@ static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
{
u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
- int i;
+ unsigned int i;
for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
if (dis[i] & mask[i])
@@ -178,7 +180,7 @@ static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool
static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
{
- int i;
+ unsigned int i;
for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
dis[i] = ioread32(priv->addr + GWDIS(i));
@@ -186,23 +188,26 @@ static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
}
}
-static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
+static void rswitch_enadis_data_irq(struct rswitch_private *priv,
+ unsigned int index, bool enable)
{
u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
iowrite32(BIT(index % 32), priv->addr + offs);
}
-static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
+static void rswitch_ack_data_irq(struct rswitch_private *priv,
+ unsigned int index)
{
u32 offs = GWDIS(index / 32);
iowrite32(BIT(index % 32), priv->addr + offs);
}
-static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
+static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq,
+ bool cur, unsigned int num)
{
- int index = cur ? gq->cur : gq->dirty;
+ unsigned int index = cur ? gq->cur : gq->dirty;
if (index + num >= gq->ring_size)
index = (index + num) % gq->ring_size;
@@ -212,7 +217,7 @@ static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int
return index;
}
-static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
+static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
{
if (gq->cur >= gq->dirty)
return gq->cur - gq->dirty;
@@ -230,28 +235,28 @@ static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
return false;
}
-static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
- int start_index, int num)
+static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
+ unsigned int start_index,
+ unsigned int num)
{
- int i, index;
+ unsigned int i, index;
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
- if (gq->skbs[index])
+ if (gq->rx_bufs[index])
continue;
- gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
- PKT_BUF_SZ + RSWITCH_ALIGN - 1);
- if (!gq->skbs[index])
+ gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
+ if (!gq->rx_bufs[index])
goto err;
}
return 0;
err:
- for (i--; i >= 0; i--) {
+ for (; i-- > 0; ) {
index = (i + start_index) % gq->ring_size;
- dev_kfree_skb(gq->skbs[index]);
- gq->skbs[index] = NULL;
+ skb_free_frag(gq->rx_bufs[index]);
+ gq->rx_bufs[index] = NULL;
}
return -ENOMEM;
@@ -260,7 +265,7 @@ err:
static void rswitch_gwca_queue_free(struct net_device *ndev,
struct rswitch_gwca_queue *gq)
{
- int i;
+ unsigned int i;
if (!gq->dir_tx) {
dma_free_coherent(ndev->dev.parent,
@@ -269,16 +274,19 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
gq->rx_ring = NULL;
for (i = 0; i < gq->ring_size; i++)
- dev_kfree_skb(gq->skbs[i]);
+ skb_free_frag(gq->rx_bufs[i]);
+ kfree(gq->rx_bufs);
+ gq->rx_bufs = NULL;
} else {
dma_free_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_desc) *
(gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
gq->tx_ring = NULL;
+ kfree(gq->skbs);
+ gq->skbs = NULL;
+ kfree(gq->unmap_addrs);
+ gq->unmap_addrs = NULL;
}
-
- kfree(gq->skbs);
- gq->skbs = NULL;
}
static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
@@ -294,25 +302,31 @@ static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
static int rswitch_gwca_queue_alloc(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq,
- bool dir_tx, int ring_size)
+ bool dir_tx, unsigned int ring_size)
{
- int i, bit;
+ unsigned int i, bit;
gq->dir_tx = dir_tx;
gq->ring_size = ring_size;
gq->ndev = ndev;
- gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
- if (!gq->skbs)
- return -ENOMEM;
-
if (!dir_tx) {
- rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
+ gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
+ if (!gq->rx_bufs)
+ return -ENOMEM;
+ if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
+ goto out;
gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
} else {
+ gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
+ if (!gq->skbs)
+ return -ENOMEM;
+ gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL);
+ if (!gq->unmap_addrs)
+ goto out;
gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
@@ -351,22 +365,23 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq)
{
- int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
+ unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
struct rswitch_ext_desc *desc;
struct rswitch_desc *linkfix;
dma_addr_t dma_addr;
- int i;
+ unsigned int i;
memset(gq->tx_ring, 0, ring_size);
for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
- gq->skbs[i]->data, PKT_BUF_SZ,
+ gq->rx_bufs[i] + RSWITCH_HEADROOM,
+ RSWITCH_MAP_BUF_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto err;
- desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
+ desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
rswitch_desc_set_dptr(&desc->desc, dma_addr);
desc->desc.die_dt = DT_FEMPTY | DIE;
} else {
@@ -387,10 +402,10 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
err:
if (!gq->dir_tx) {
- for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
+ for (desc = gq->tx_ring; i-- > 0; desc++) {
dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
- DMA_FROM_DEVICE);
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
}
}
@@ -398,11 +413,12 @@ err:
}
static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
- int start_index, int num)
+ unsigned int start_index,
+ unsigned int num)
{
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
struct rswitch_ts_desc *desc;
- int i, index;
+ unsigned int i, index;
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
@@ -413,24 +429,26 @@ static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
struct rswitch_gwca_queue *gq,
- int start_index, int num)
+ unsigned int start_index,
+ unsigned int num)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_ext_ts_desc *desc;
+ unsigned int i, index;
dma_addr_t dma_addr;
- int i, index;
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
desc = &gq->rx_ring[index];
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
- gq->skbs[index]->data, PKT_BUF_SZ,
+ gq->rx_bufs[index] + RSWITCH_HEADROOM,
+ RSWITCH_MAP_BUF_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto err;
- desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
+ desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
rswitch_desc_set_dptr(&desc->desc, dma_addr);
dma_wmb();
desc->desc.die_dt = DT_FEMPTY | DIE;
@@ -444,12 +462,12 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
err:
if (!gq->dir_tx) {
- for (i--; i >= 0; i--) {
+ for (; i-- > 0; ) {
index = (i + start_index) % gq->ring_size;
desc = &gq->rx_ring[index];
dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
- DMA_FROM_DEVICE);
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
}
}
@@ -460,7 +478,7 @@ static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq)
{
- int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
+ unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
struct rswitch_ext_ts_desc *desc;
struct rswitch_desc *linkfix;
int err;
@@ -487,7 +505,7 @@ static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
{
- int i, num_queues = priv->gwca.num_queues;
+ unsigned int i, num_queues = priv->gwca.num_queues;
struct rswitch_gwca *gwca = &priv->gwca;
struct device *dev = &priv->pdev->dev;
@@ -537,7 +555,7 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq;
- int index;
+ unsigned int index;
index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
if (index >= priv->gwca.num_queues)
@@ -583,7 +601,7 @@ static void rswitch_txdmac_free(struct net_device *ndev)
rswitch_gwca_put(rdev->priv, rdev->tx_queue);
}
-static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
+static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index)
{
struct rswitch_device *rdev = priv->rdev[index];
@@ -617,7 +635,7 @@ static void rswitch_rxdmac_free(struct net_device *ndev)
rswitch_gwca_put(rdev->priv, rdev->rx_queue);
}
-static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
+static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
{
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
@@ -627,7 +645,8 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
static int rswitch_gwca_hw_init(struct rswitch_private *priv)
{
- int i, err;
+ unsigned int i;
+ int err;
err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
if (err < 0)
@@ -649,6 +668,8 @@ static int rswitch_gwca_hw_init(struct rswitch_private *priv)
iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
+ iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f),
+ priv->addr + GWMDNC);
iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
@@ -693,15 +714,88 @@ static int rswitch_gwca_halt(struct rswitch_private *priv)
return err;
}
+static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev,
+ struct rswitch_gwca_queue *gq,
+ struct rswitch_ext_ts_desc *desc)
+{
+ dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc);
+ u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
+ u8 die_dt = desc->desc.die_dt & DT_MASK;
+ struct sk_buff *skb = NULL;
+
+ dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ /* The RX descriptor order will be one of the following:
+ * - FSINGLE
+ * - FSTART -> FEND
+ * - FSTART -> FMID -> FEND
+ */
+
+ /* Check whether the descriptor is unexpected order */
+ switch (die_dt) {
+ case DT_FSTART:
+ case DT_FSINGLE:
+ if (gq->skb_fstart) {
+ dev_kfree_skb_any(gq->skb_fstart);
+ gq->skb_fstart = NULL;
+ ndev->stats.rx_dropped++;
+ }
+ break;
+ case DT_FMID:
+ case DT_FEND:
+ if (!gq->skb_fstart) {
+ ndev->stats.rx_dropped++;
+ return NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Handle the descriptor */
+ switch (die_dt) {
+ case DT_FSTART:
+ case DT_FSINGLE:
+ skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
+ if (skb) {
+ skb_reserve(skb, RSWITCH_HEADROOM);
+ skb_put(skb, pkt_len);
+ gq->pkt_len = pkt_len;
+ if (die_dt == DT_FSTART) {
+ gq->skb_fstart = skb;
+ skb = NULL;
+ }
+ }
+ break;
+ case DT_FMID:
+ case DT_FEND:
+ skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags,
+ virt_to_page(gq->rx_bufs[gq->cur]),
+ offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM,
+ pkt_len, RSWITCH_BUF_SIZE);
+ if (die_dt == DT_FEND) {
+ skb = gq->skb_fstart;
+ gq->skb_fstart = NULL;
+ }
+ gq->pkt_len += pkt_len;
+ break;
+ default:
+ netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt);
+ break;
+ }
+
+ return skb;
+}
+
static bool rswitch_rx(struct net_device *ndev, int *quota)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_queue *gq = rdev->rx_queue;
struct rswitch_ext_ts_desc *desc;
- int limit, boguscnt, num, ret;
+ int limit, boguscnt, ret;
struct sk_buff *skb;
- dma_addr_t dma_addr;
- u16 pkt_len;
+ unsigned int num;
u32 get_ts;
if (*quota <= 0)
@@ -713,11 +807,10 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
desc = &gq->rx_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
dma_rmb();
- pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
- skb = gq->skbs[gq->cur];
- gq->skbs[gq->cur] = NULL;
- dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ skb = rswitch_rx_handle_desc(ndev, gq, desc);
+ if (!skb)
+ goto out;
+
get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
if (get_ts) {
struct skb_shared_hwtstamps *shhwtstamps;
@@ -729,12 +822,13 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
}
- skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&rdev->napi, skb);
rdev->ndev->stats.rx_packets++;
- rdev->ndev->stats.rx_bytes += pkt_len;
+ rdev->ndev->stats.rx_bytes += gq->pkt_len;
+out:
+ gq->rx_bufs[gq->cur] = NULL;
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->rx_ring[gq->cur];
@@ -743,7 +837,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
}
num = rswitch_get_num_cur_queues(gq);
- ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
+ ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
if (ret < 0)
goto err;
ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
@@ -761,39 +855,32 @@ err:
return 0;
}
-static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
+static void rswitch_tx_free(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_queue *gq = rdev->tx_queue;
struct rswitch_ext_desc *desc;
- dma_addr_t dma_addr;
struct sk_buff *skb;
- int free_num = 0;
- int size;
for (; rswitch_get_num_cur_queues(gq) > 0;
gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
desc = &gq->tx_ring[gq->dirty];
- if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
+ if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
break;
dma_rmb();
- size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
skb = gq->skbs[gq->dirty];
if (skb) {
- dma_addr = rswitch_desc_get_dptr(&desc->desc);
- dma_unmap_single(ndev->dev.parent, dma_addr,
- size, DMA_TO_DEVICE);
+ dma_unmap_single(ndev->dev.parent,
+ gq->unmap_addrs[gq->dirty],
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL;
- free_num++;
+ rdev->ndev->stats.tx_packets++;
+ rdev->ndev->stats.tx_bytes += skb->len;
}
desc->desc.die_dt = DT_EEMPTY;
- rdev->ndev->stats.tx_packets++;
- rdev->ndev->stats.tx_bytes += size;
}
-
- return free_num;
}
static int rswitch_poll(struct napi_struct *napi, int budget)
@@ -808,7 +895,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
priv = rdev->priv;
retry:
- rswitch_tx_free(ndev, true);
+ rswitch_tx_free(ndev);
if (rswitch_rx(ndev, &quota))
goto out;
@@ -851,7 +938,7 @@ static void rswitch_queue_interrupt(struct net_device *ndev)
static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
{
struct rswitch_gwca_queue *gq;
- int i, index, bit;
+ unsigned int i, index, bit;
for (i = 0; i < priv->gwca.num_queues; i++) {
gq = &priv->gwca.queues[i];
@@ -918,8 +1005,8 @@ static void rswitch_ts(struct rswitch_private *priv)
struct skb_shared_hwtstamps shhwtstamps;
struct rswitch_ts_desc *desc;
struct timespec64 ts;
+ unsigned int num;
u32 tag, port;
- int num;
desc = &gq->ts_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
@@ -1438,7 +1525,7 @@ err_init_one:
static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
- int i;
+ unsigned int i;
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
phy_exit(priv->rdev[i]->serdes);
@@ -1500,31 +1587,10 @@ static int rswitch_stop(struct net_device *ndev)
return 0;
};
-static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
+ struct sk_buff *skb,
+ struct rswitch_ext_desc *desc)
{
- struct rswitch_device *rdev = netdev_priv(ndev);
- struct rswitch_gwca_queue *gq = rdev->tx_queue;
- netdev_tx_t ret = NETDEV_TX_OK;
- struct rswitch_ext_desc *desc;
- dma_addr_t dma_addr;
-
- if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
- netif_stop_subqueue(ndev, 0);
- return NETDEV_TX_BUSY;
- }
-
- if (skb_put_padto(skb, ETH_ZLEN))
- return ret;
-
- dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- goto err_kfree;
-
- gq->skbs[gq->cur] = skb;
- desc = &gq->tx_ring[gq->cur];
- rswitch_desc_set_dptr(&desc->desc, dma_addr);
- desc->desc.info_ds = cpu_to_le16(skb->len);
-
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
@@ -1532,7 +1598,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
if (!ts_info)
- goto err_unmap;
+ return false;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
rdev->ts_tag++;
@@ -1546,18 +1612,97 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
skb_tx_timestamp(skb);
}
+ return true;
+}
+
+static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
+ struct sk_buff *skb,
+ struct rswitch_ext_desc *desc,
+ dma_addr_t dma_addr, u16 len, u8 die_dt)
+{
+ rswitch_desc_set_dptr(&desc->desc, dma_addr);
+ desc->desc.info_ds = cpu_to_le16(len);
+ if (!rswitch_ext_desc_set_info1(rdev, skb, desc))
+ return false;
+
dma_wmb();
- desc->desc.die_dt = DT_FSINGLE | DIE;
+ desc->desc.die_dt = die_dt;
+
+ return true;
+}
+
+static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
+{
+ if (nr_desc == 1)
+ return DT_FSINGLE | DIE;
+ if (index == 0)
+ return DT_FSTART;
+ if (nr_desc - 1 == index)
+ return DT_FEND | DIE;
+ return DT_FMID;
+}
+
+static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
+{
+ switch (die_dt & DT_MASK) {
+ case DT_FSINGLE:
+ case DT_FEND:
+ return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
+ case DT_FSTART:
+ case DT_FMID:
+ return RSWITCH_DESC_BUF_SIZE;
+ default:
+ return 0;
+ }
+}
+
+static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_queue *gq = rdev->tx_queue;
+ dma_addr_t dma_addr, dma_addr_orig;
+ netdev_tx_t ret = NETDEV_TX_OK;
+ struct rswitch_ext_desc *desc;
+ unsigned int i, nr_desc;
+ u8 die_dt;
+ u16 len;
+
+ nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
+ if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
+ netif_stop_subqueue(ndev, 0);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return ret;
+
+ dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
+ goto err_kfree;
+
+ gq->skbs[gq->cur] = skb;
+ gq->unmap_addrs[gq->cur] = dma_addr_orig;
+
+ /* DT_FSTART should be set at last. So, this is reverse order. */
+ for (i = nr_desc; i-- > 0; ) {
+ desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
+ die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
+ dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
+ len = rswitch_ext_desc_get_len(die_dt, skb->len);
+ if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
+ goto err_unmap;
+ }
+
wmb(); /* gq->cur must be incremented after die_dt was set */
- gq->cur = rswitch_next_queue_index(gq, true, 1);
+ gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
err_unmap:
- dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+ dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
err_kfree:
dev_kfree_skb_any(skb);
@@ -1693,7 +1838,7 @@ static const struct of_device_id renesas_eth_sw_of_table[] = {
};
MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
-static void rswitch_etha_init(struct rswitch_private *priv, int index)
+static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index)
{
struct rswitch_etha *etha = &priv->etha[index];
@@ -1709,7 +1854,7 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index)
etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
}
-static int rswitch_device_alloc(struct rswitch_private *priv, int index)
+static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index)
{
struct platform_device *pdev = priv->pdev;
struct rswitch_device *rdev;
@@ -1738,6 +1883,8 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
ndev->netdev_ops = &rswitch_netdev_ops;
ndev->ethtool_ops = &rswitch_ethtool_ops;
+ ndev->max_mtu = RSWITCH_MAX_MTU;
+ ndev->min_mtu = ETH_MIN_MTU;
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
@@ -1780,7 +1927,7 @@ out_get_params:
return err;
}
-static void rswitch_device_free(struct rswitch_private *priv, int index)
+static void rswitch_device_free(struct rswitch_private *priv, unsigned int index)
{
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
@@ -1832,8 +1979,8 @@ static int rswitch_init(struct rswitch_private *priv)
rswitch_fwd_init(priv);
- err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
- RCAR_GEN4_PTP_CLOCK_S4);
+ err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
+ clk_get_rate(priv->clk));
if (err < 0)
goto err_ptp_register;
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 27c9d3872..72e3ff596 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -26,11 +26,17 @@
else
#define TX_RING_SIZE 1024
-#define RX_RING_SIZE 1024
+#define RX_RING_SIZE 4096
#define TS_RING_SIZE (TX_RING_SIZE * RSWITCH_NUM_PORTS)
-#define PKT_BUF_SZ 1584
+#define RSWITCH_MAX_MTU 9600
+#define RSWITCH_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+#define RSWITCH_DESC_BUF_SIZE 2048
+#define RSWITCH_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
#define RSWITCH_ALIGN 128
+#define RSWITCH_BUF_SIZE (RSWITCH_HEADROOM + RSWITCH_DESC_BUF_SIZE + \
+ RSWITCH_TAILROOM + RSWITCH_ALIGN)
+#define RSWITCH_MAP_BUF_SIZE (RSWITCH_BUF_SIZE - RSWITCH_HEADROOM)
#define RSWITCH_MAX_CTAG_PCP 7
#define RSWITCH_TIMEOUT_US 100000
@@ -768,6 +774,10 @@ enum rswitch_gwca_mode {
#define GWARIRM_ARIOG BIT(0)
#define GWARIRM_ARR BIT(1)
+#define GWMDNC_TSDMN(num) (((num) << 16) & GENMASK(17, 16))
+#define GWMDNC_TXDMN(num) (((num) << 8) & GENMASK(12, 8))
+#define GWMDNC_RXDMN(num) ((num) & GENMASK(4, 0))
+
#define GWDCC_BALR BIT(24)
#define GWDCC_DCP_MASK GENMASK(18, 16)
#define GWDCC_DCP(prio) FIELD_PREP(GWDCC_DCP_MASK, (prio))
@@ -909,7 +919,7 @@ struct rswitch_ext_ts_desc {
} __packed;
struct rswitch_etha {
- int index;
+ unsigned int index;
void __iomem *addr;
void __iomem *coma_addr;
bool external_phy;
@@ -938,15 +948,28 @@ struct rswitch_gwca_queue {
/* Common */
dma_addr_t ring_dma;
- int ring_size;
- int cur;
- int dirty;
+ unsigned int ring_size;
+ unsigned int cur;
+ unsigned int dirty;
- /* For [rt]_ring */
- int index;
+ /* For [rt]x_ring */
+ unsigned int index;
bool dir_tx;
- struct sk_buff **skbs;
struct net_device *ndev; /* queue to ndev for irq */
+
+ union {
+ /* For TX */
+ struct {
+ struct sk_buff **skbs;
+ dma_addr_t *unmap_addrs;
+ };
+ /* For RX */
+ struct {
+ void **rx_bufs;
+ struct sk_buff *skb_fstart;
+ u16 pkt_len;
+ };
+ };
};
struct rswitch_gwca_ts_info {
@@ -959,7 +982,7 @@ struct rswitch_gwca_ts_info {
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
- int index;
+ unsigned int index;
struct rswitch_desc *linkfix_table;
dma_addr_t linkfix_table_dma;
u32 linkfix_table_size;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 475e1e8c1..0786eb0da 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -50,7 +50,7 @@
* the macros available to do this only define GCC 8.
*/
__diag_push();
-__diag_ignore(GCC, 8, "-Woverride-init",
+__diag_ignore_all("-Woverride-init",
"logic to initialize all and then override some is OK");
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 6dfa062fe..8fa6c0e91 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3706,13 +3706,13 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
}
static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
- struct hwtstamp_config *init)
+ struct kernel_hwtstamp_config *init)
{
return -EOPNOTSUPP;
}
static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
- struct hwtstamp_config *init)
+ struct kernel_hwtstamp_config *init)
{
int rc;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 702abbe59..cf55202b3 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -37,6 +37,7 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
@@ -60,8 +61,6 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
- .get_rxfh_context = efx_ethtool_get_rxfh_context,
- .set_rxfh_context = efx_ethtool_set_rxfh_context,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 19f4b4d0b..e9d9de8e6 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -495,11 +495,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct efx_nic *efx = efx_netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
- if (cmd == SIOCSHWTSTAMP)
- return efx_ptp_set_ts_config(efx, ifr);
- if (cmd == SIOCGHWTSTAMP)
- return efx_ptp_get_ts_config(efx, ifr);
-
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400)
@@ -581,6 +576,23 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
return -EOPNOTSUPP;
}
+static int efx_hwtstamp_set(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return efx_ptp_set_ts_config(efx, config, extack);
+}
+
+static int efx_hwtstamp_get(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ return efx_ptp_get_ts_config(efx, config);
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -596,6 +608,8 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_features_check = efx_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = efx_hwtstamp_set,
+ .ndo_hwtstamp_get = efx_hwtstamp_get,
#ifdef CONFIG_SFC_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 364323599..37c69c8d9 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -240,6 +240,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -269,8 +270,6 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
- .get_rxfh_context = efx_ethtool_get_rxfh_context,
- .set_rxfh_context = efx_ethtool_set_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index a8cbceeb3..7d5e5db4e 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -1163,48 +1163,8 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- int rc;
-
- rc = efx->type->rx_pull_rss_config(efx);
- if (rc)
- return rc;
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, efx->rss_context.rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- if (key)
- memcpy(key, efx->rss_context.rx_hash_key,
- efx->type->rx_hash_key_size);
- return 0;
-}
-
-int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
-
- /* Hash function is Toeplitz, cannot be changed */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
- return -EOPNOTSUPP;
- if (!indir && !key)
- return 0;
-
- if (!key)
- key = efx->rss_context.rx_hash_key;
- if (!indir)
- indir = efx->rss_context.rx_indir_table;
-
- return efx->type->rx_push_rss_config(efx, true, indir, key);
-}
-
-int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static int efx_ethtool_get_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
struct efx_rss_context *ctx;
@@ -1214,7 +1174,7 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
- ctx = efx_find_rss_context_entry(efx, rss_context);
+ ctx = efx_find_rss_context_entry(efx, rxfh->rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@@ -1223,37 +1183,60 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
if (rc)
goto out_unlock;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
- if (key)
- memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, ctx->rx_indir_table,
+ sizeof(ctx->rx_indir_table));
+ if (rxfh->key)
+ memcpy(rxfh->key, ctx->rx_hash_key,
+ efx->type->rx_hash_key_size);
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
}
-int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+int efx_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ int rc;
+
+ if (rxfh->rss_context)
+ return efx_ethtool_get_rxfh_context(net_dev, rxfh);
+
+ rc = efx->type->rx_pull_rss_config(efx);
+ if (rc)
+ return rc;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, efx->rss_context.rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ if (rxfh->key)
+ memcpy(rxfh->key, efx->rss_context.rx_hash_key,
+ efx->type->rx_hash_key_size);
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
+ u32 *rss_context = &rxfh->rss_context;
struct efx_rss_context *ctx;
+ u32 *indir = rxfh->indir;
bool allocated = false;
+ u8 *key = rxfh->key;
int rc;
if (!efx->type->rx_push_rss_context_config)
return -EOPNOTSUPP;
- /* Hash function is Toeplitz, cannot be changed */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
- return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (delete) {
+ if (rxfh->rss_delete) {
/* alloc + delete == Nothing to do */
rc = -EINVAL;
goto out_unlock;
@@ -1276,7 +1259,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
}
}
- if (delete) {
+ if (rxfh->rss_delete) {
/* delete this context */
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc)
@@ -1299,6 +1282,33 @@ out_unlock:
return rc;
}
+int efx_ethtool_set_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ u32 *indir = rxfh->indir;
+ u8 *key = rxfh->key;
+
+ /* Hash function is Toeplitz, cannot be changed */
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (rxfh->rss_context)
+ return efx_ethtool_set_rxfh_context(net_dev, rxfh, extack);
+
+ if (!indir && !key)
+ return 0;
+
+ if (!key)
+ key = efx->rss_context.rx_hash_key;
+ if (!indir)
+ indir = efx->rss_context.rx_indir_table;
+
+ return efx->type->rx_push_rss_config(efx, true, indir, key);
+}
+
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 659491932..a680e5980 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -44,16 +44,11 @@ int efx_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev);
-int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc);
+int efx_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh);
int efx_ethtool_set_rxfh(struct net_device *net_dev,
- const u32 *indir, const u8 *key, const u8 hfunc);
-int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context);
-int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete);
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 3976a333f..f4db683b8 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -1257,31 +1257,33 @@ static u32 ef4_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
0 : ARRAY_SIZE(efx->rx_indir_table));
}
-static int ef4_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int ef4_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct ef4_nic *efx = netdev_priv(net_dev);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, efx->rx_indir_table,
+ sizeof(efx->rx_indir_table));
return 0;
}
-static int ef4_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int ef4_ethtool_set_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!indir)
+ if (!rxfh->indir)
return 0;
- return efx->type->rx_push_rss_config(efx, true, indir);
+ return efx->type->rx_push_rss_config(efx, true, rxfh->indir);
}
static int ef4_ethtool_get_module_eeprom(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 27d86e90a..f2dd7feb0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1473,7 +1473,7 @@ struct efx_nic_type {
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
- struct hwtstamp_config *init);
+ struct kernel_hwtstamp_config *init);
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index b04fdbb8a..c3bffbf0b 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -301,7 +301,7 @@ struct efx_ptp_data {
bool reset_required;
struct list_head rxfilters_mcast;
struct list_head rxfilters_ucast;
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
bool enabled;
unsigned int mode;
void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
@@ -1848,7 +1848,7 @@ int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
return 0;
}
-static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+static int efx_ptp_ts_init(struct efx_nic *efx, struct kernel_hwtstamp_config *init)
{
int rc;
@@ -1895,33 +1895,25 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
-int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+int efx_ptp_set_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack __always_unused *extack)
{
- struct hwtstamp_config config;
- int rc;
-
/* Not a PTP enabled port */
if (!efx->ptp_data)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- rc = efx_ptp_ts_init(efx, &config);
- if (rc != 0)
- return rc;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config))
- ? -EFAULT : 0;
+ return efx_ptp_ts_init(efx, config);
}
-int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+int efx_ptp_get_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config)
{
+ /* Not a PTP enabled port */
if (!efx->ptp_data)
return -EOPNOTSUPP;
-
- return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
- sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+ *config = efx->ptp_data->config;
+ return 0;
}
static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 7b1ef7002..2f30dbb49 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -18,8 +18,11 @@ void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
void efx_ptp_update_channel(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_remove(struct efx_nic *efx);
-int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
-int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_set_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int efx_ptp_get_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config);
void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_ptp_get_mode(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
index 8c557f6a1..59d3a6043 100644
--- a/drivers/net/ethernet/sfc/siena/efx.c
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -495,11 +495,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr);
- if (cmd == SIOCSHWTSTAMP)
- return efx_siena_ptp_set_ts_config(efx, ifr);
- if (cmd == SIOCGHWTSTAMP)
- return efx_siena_ptp_get_ts_config(efx, ifr);
-
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400)
@@ -579,6 +574,23 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
return -EOPNOTSUPP;
}
+static int efx_siena_hwtstamp_set(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx_siena_ptp_set_ts_config(efx, config, extack);
+}
+
+static int efx_siena_hwtstamp_get(struct net_device *net_dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx_siena_ptp_get_ts_config(efx, config);
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -594,6 +606,8 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_features_check = efx_siena_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = efx_siena_hwtstamp_set,
+ .ndo_hwtstamp_get = efx_siena_hwtstamp_get,
#ifdef CONFIG_SFC_SIENA_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index e4ec58921..14dd3893b 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -240,6 +240,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_siena_ethtool_ops = {
+ .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -269,8 +270,6 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
.set_rxfh = efx_siena_ethtool_set_rxfh,
- .get_rxfh_context = efx_siena_ethtool_get_rxfh_context,
- .set_rxfh_context = efx_siena_ethtool_set_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_siena_ethtool_get_module_info,
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index f590e87e5..5f0a8127e 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -1164,48 +1164,8 @@ u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- int rc;
-
- rc = efx->type->rx_pull_rss_config(efx);
- if (rc)
- return rc;
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, efx->rss_context.rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- if (key)
- memcpy(key, efx->rss_context.rx_hash_key,
- efx->type->rx_hash_key_size);
- return 0;
-}
-
-int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- /* Hash function is Toeplitz, cannot be changed */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
- return -EOPNOTSUPP;
- if (!indir && !key)
- return 0;
-
- if (!key)
- key = efx->rss_context.rx_hash_key;
- if (!indir)
- indir = efx->rss_context.rx_indir_table;
-
- return efx->type->rx_push_rss_config(efx, true, indir, key);
-}
-
-int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context)
+static int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_rss_context *ctx;
@@ -1215,7 +1175,7 @@ int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
- ctx = efx_siena_find_rss_context_entry(efx, rss_context);
+ ctx = efx_siena_find_rss_context_entry(efx, rxfh->rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@@ -1224,37 +1184,60 @@ int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
if (rc)
goto out_unlock;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (indir)
- memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
- if (key)
- memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, ctx->rx_indir_table,
+ sizeof(ctx->rx_indir_table));
+ if (rxfh->key)
+ memcpy(rxfh->key, ctx->rx_hash_key,
+ efx->type->rx_hash_key_size);
out_unlock:
mutex_unlock(&efx->rss_lock);
return rc;
}
-int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete)
+int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ if (rxfh->rss_context)
+ return efx_siena_ethtool_get_rxfh_context(net_dev, rxfh);
+
+ rc = efx->type->rx_pull_rss_config(efx);
+ if (rc)
+ return rc;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->indir)
+ memcpy(rxfh->indir, efx->rss_context.rx_indir_table,
+ sizeof(efx->rss_context.rx_indir_table));
+ if (rxfh->key)
+ memcpy(rxfh->key, efx->rss_context.rx_hash_key,
+ efx->type->rx_hash_key_size);
+ return 0;
+}
+
+static int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ u32 *rss_context = &rxfh->rss_context;
struct efx_rss_context *ctx;
+ u32 *indir = rxfh->indir;
bool allocated = false;
+ u8 *key = rxfh->key;
int rc;
if (!efx->type->rx_push_rss_context_config)
return -EOPNOTSUPP;
- /* Hash function is Toeplitz, cannot be changed */
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
- return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (delete) {
+ if (rxfh->rss_delete) {
/* alloc + delete == Nothing to do */
rc = -EINVAL;
goto out_unlock;
@@ -1277,7 +1260,7 @@ int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
}
}
- if (delete) {
+ if (rxfh->rss_delete) {
/* delete this context */
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc)
@@ -1300,6 +1283,33 @@ out_unlock:
return rc;
}
+int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u32 *indir = rxfh->indir;
+ u8 *key = rxfh->key;
+
+ /* Hash function is Toeplitz, cannot be changed */
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (rxfh->rss_context)
+ efx_siena_ethtool_set_rxfh_context(net_dev, rxfh, extack);
+
+ if (!indir && !key)
+ return 0;
+
+ if (!key)
+ key = efx->rss_context.rx_hash_key;
+ if (!indir)
+ indir = efx->rss_context.rx_indir_table;
+
+ return efx->type->rx_push_rss_config(efx, true, indir, key);
+}
+
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
index 04b375dc6..d674bab0f 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -41,16 +41,11 @@ int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info);
u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev);
-int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
- u8 *hfunc);
+int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
+ struct ethtool_rxfh_param *rxfh);
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
- const u32 *indir, const u8 *key, const u8 hfunc);
-int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
- u8 *key, u8 *hfunc, u32 rss_context);
-int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
- const u32 *indir, const u8 *key,
- const u8 hfunc, u32 *rss_context,
- bool delete);
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index ff7bbc325..94152f595 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -1424,7 +1424,7 @@ struct efx_nic_type {
void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
- struct hwtstamp_config *init);
+ struct kernel_hwtstamp_config *init);
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index 38e666561..4b5e2f0ba 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -297,7 +297,7 @@ struct efx_ptp_data {
u32 rxfilter_event;
u32 rxfilter_general;
bool rxfilter_installed;
- struct hwtstamp_config config;
+ struct kernel_hwtstamp_config config;
bool enabled;
unsigned int mode;
void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
@@ -1762,7 +1762,8 @@ int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
return 0;
}
-static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+static int efx_ptp_ts_init(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *init)
{
int rc;
@@ -1799,33 +1800,26 @@ void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
}
-int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack __always_unused *extack)
{
- struct hwtstamp_config config;
- int rc;
-
/* Not a PTP enabled port */
if (!efx->ptp_data)
return -EOPNOTSUPP;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- rc = efx_ptp_ts_init(efx, &config);
- if (rc != 0)
- return rc;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config))
- ? -EFAULT : 0;
+ return efx_ptp_ts_init(efx, config);
}
-int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+int efx_siena_ptp_get_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config)
{
+ /* Not a PTP enabled port */
if (!efx->ptp_data)
return -EOPNOTSUPP;
- return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
- sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+ *config = efx->ptp_data->config;
+ return 0;
}
static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
index 4172f90e9..6352f8442 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.h
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -15,8 +15,11 @@
struct ethtool_ts_info;
void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx);
-int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
-int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+int efx_siena_ptp_get_ts_config(struct efx_nic *efx,
+ struct kernel_hwtstamp_config *config);
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
struct ethtool_ts_info *ts_info);
bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c
index a44c8fa25..ca33dc08e 100644
--- a/drivers/net/ethernet/sfc/siena/siena.c
+++ b/drivers/net/ethernet/sfc/siena/siena.c
@@ -136,7 +136,7 @@ static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
}
static int siena_ptp_set_ts_config(struct efx_nic *efx,
- struct hwtstamp_config *init)
+ struct kernel_hwtstamp_config *init)
{
int rc;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 0891e9e49..5ab8b81b8 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1302,6 +1302,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = NETSEC_RXBUF_HEADROOM,
.max_len = NETSEC_RX_BUF_SIZE,
+ .napi = &priv->napi,
+ .netdev = priv->ndev,
};
int i, err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 85dcda51d..4ec61f1ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -165,9 +165,9 @@ config DWMAC_STARFIVE
help
Support for ethernet controllers on StarFive RISC-V SoCs
- This selects the StarFive platform specific glue layer support for
- the stmmac device driver. This driver is used for StarFive JH7110
- ethernet controller.
+ This selects the StarFive platform specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ StarFive JH7100 and JH7110 ethernet controllers.
config DWMAC_STI
tristate "STi GMAC support"
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 80e598bd4..26cad4344 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,7 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
- stmmac_xdp.o \
+ stmmac_xdp.o stmmac_est.o \
$(stmmac-y)
stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index c75c64a93..5a1d46dcd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -550,6 +550,7 @@ extern const struct stmmac_hwtimestamp stmmac_ptp;
extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
+ u32 caps;
u32 speed_mask;
u32 speed10;
u32 speed100;
@@ -588,6 +589,7 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
+ const struct stmmac_est_ops *est;
struct dw_xpcs *xpcs;
struct phylink_pcs *lynx_pcs; /* Lynx external PCS */
struct mii_regs mii; /* MII register Addresses */
@@ -605,6 +607,7 @@ struct mac_device_info {
u32 vlan_filter[32];
bool vlan_fail_q_en;
u8 vlan_fail_q;
+ bool hw_vlan_en;
};
struct stmmac_rx_routing {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 8f730ada7..6b65420e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -353,6 +353,10 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ for (int i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 5d630affb..4e1076fae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -15,13 +15,20 @@
#include "stmmac_platform.h"
-#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
-#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
-#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
+#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
+#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+
+#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8
+
+struct starfive_dwmac_data {
+ unsigned int gtxclk_dlychain;
+};
struct starfive_dwmac {
struct device *dev;
struct clk *clk_tx;
+ const struct starfive_dwmac_data *data;
};
static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
@@ -67,6 +74,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
break;
@@ -89,6 +98,14 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
if (err)
return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
+ if (dwmac->data) {
+ err = regmap_write(regmap, JH7100_SYSMAIN_REGISTER49_DLYCHAIN,
+ dwmac->data->gtxclk_dlychain);
+ if (err)
+ return dev_err_probe(dwmac->dev, err,
+ "error selecting gtxclk delay chain\n");
+ }
+
return 0;
}
@@ -114,6 +131,8 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
+ dwmac->data = device_get_match_data(&pdev->dev);
+
dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
if (IS_ERR(dwmac->clk_tx))
return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
@@ -144,8 +163,13 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
+static const struct starfive_dwmac_data jh7100_data = {
+ .gtxclk_dlychain = 4,
+};
+
static const struct of_device_id starfive_dwmac_match[] = {
- { .compatible = "starfive,jh7110-dwmac" },
+ { .compatible = "starfive,jh7100-dwmac", .data = &jh7100_data },
+ { .compatible = "starfive,jh7110-dwmac" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index b21d99faa..e1b761dcf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1096,6 +1096,8 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
/* The loopback bit seems to be re-set when link change
* Simply mask it each time
* Speed 10/100/1000 are set in BIT(2)/BIT(3)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 3927609ab..855529944 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -539,6 +539,8 @@ int dwmac1000_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
mac->link.duplex = GMAC_CONTROL_DM;
mac->link.speed10 = GMAC_CONTROL_PS;
mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index a6e8d7bd9..7667d103c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -175,6 +175,8 @@ int dwmac100_setup(struct stmmac_priv *priv)
dev_info(priv->device, "\tDWMAC100\n");
mac->pcsr = priv->ioaddr;
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
mac->link.duplex = MAC_CONTROL_F;
mac->link.speed10 = 0;
mac->link.speed100 = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c6ff1fa0e..a38226d7c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -70,7 +70,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
{
- priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ else
+ priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
}
static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
@@ -92,19 +95,41 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
u32 prio, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
- u32 base_register;
- u32 value;
+ u32 clear_mask = 0;
+ u32 ctrl2, ctrl3;
+ int i;
- base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
- if (queue >= 4)
- queue -= 4;
+ ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
+ ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
- value = readl(ioaddr + base_register);
+ /* The software must ensure that the same priority
+ * is not mapped to multiple Rx queues
+ */
+ for (i = 0; i < 4; i++)
+ clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
+ GMAC_RXQCTRL_PSRQX_MASK(i));
- value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
- value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ ctrl2 &= ~clear_mask;
+ ctrl3 &= ~clear_mask;
+
+ /* First assign new priorities to a queue, then
+ * clear them from others queues
+ */
+ if (queue < 4) {
+ ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
GMAC_RXQCTRL_PSRQX_MASK(queue);
- writel(value, ioaddr + base_register);
+
+ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
+ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
+ } else {
+ queue -= 4;
+
+ ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ GMAC_RXQCTRL_PSRQX_MASK(queue);
+
+ writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
+ writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
+ }
}
static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
@@ -1134,6 +1159,35 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
return 0;
}
+static void dwmac4_rx_hw_vlan(struct mac_device_info *hw,
+ struct dma_desc *rx_desc, struct sk_buff *skb)
+{
+ if (hw->desc->get_rx_vlan_valid(rx_desc)) {
+ u16 vid = hw->desc->get_rx_vlan_tci(rx_desc);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
+static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_VLAN_TAG);
+
+ value &= ~GMAC_VLAN_TAG_CTRL_EVLS_MASK;
+
+ if (hw->hw_vlan_en)
+ /* Always strip VLAN on Receive */
+ value |= GMAC_VLAN_TAG_STRIP_ALL;
+ else
+ /* Do not strip VLAN on Receive */
+ value |= GMAC_VLAN_TAG_STRIP_NONE;
+
+ /* Enable outer VLAN Tag in Rx DMA descriptor */
+ value |= GMAC_VLAN_TAG_CTRL_EVLRXS;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+}
+
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
.phylink_get_caps = dwmac4_phylink_get_caps,
@@ -1175,6 +1229,8 @@ const struct stmmac_ops dwmac4_ops = {
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac410_ops = {
@@ -1216,14 +1272,14 @@ const struct stmmac_ops dwmac410_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .est_configure = dwmac5_est_configure,
- .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
const struct stmmac_ops dwmac510_ops = {
@@ -1269,14 +1325,14 @@ const struct stmmac_ops dwmac510_ops = {
.set_arp_offload = dwmac4_set_arp_offload,
.config_l3_filter = dwmac4_config_l3_filter,
.config_l4_filter = dwmac4_config_l4_filter,
- .est_configure = dwmac5_est_configure,
- .est_irq_status = dwmac5_est_irq_status,
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+ .rx_hw_vlan = dwmac4_rx_hw_vlan,
+ .set_hw_vlan_mode = dwmac4_set_hw_vlan_mode,
};
static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
@@ -1325,6 +1381,8 @@ int dwmac4_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
mac->link.duplex = GMAC_CONFIG_DM;
mac->link.speed10 = GMAC_CONFIG_PS;
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 89a14084c..1c5802e0d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -198,6 +198,17 @@ static int dwmac4_get_tx_ls(struct dma_desc *p)
>> TDES3_LAST_DESCRIPTOR_SHIFT;
}
+static u16 dwmac4_wrback_get_rx_vlan_tci(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des0) & RDES0_VLAN_TAG_MASK);
+}
+
+static bool dwmac4_wrback_get_rx_vlan_valid(struct dma_desc *p)
+{
+ return ((le32_to_cpu(p->des3) & RDES3_LAST_DESCRIPTOR) &&
+ (le32_to_cpu(p->des3) & RDES3_RDES0_VALID));
+}
+
static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
@@ -551,6 +562,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.set_tx_owner = dwmac4_set_tx_owner,
.set_rx_owner = dwmac4_set_rx_owner,
.get_tx_ls = dwmac4_get_tx_ls,
+ .get_rx_vlan_tci = dwmac4_wrback_get_rx_vlan_tci,
+ .get_rx_vlan_valid = dwmac4_wrback_get_rx_vlan_valid,
.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 8fd167501..e02cebc3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -573,143 +573,6 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
return 0;
}
-static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
-{
- u32 ctrl;
-
- writel(val, ioaddr + MTL_EST_GCL_DATA);
-
- ctrl = (reg << ADDR_SHIFT);
- ctrl |= gcl ? 0 : GCRR;
-
- writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
-
- ctrl |= SRWO;
- writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
-
- return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
- ctrl, !(ctrl & SRWO), 100, 5000);
-}
-
-int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate)
-{
- int i, ret = 0x0;
- u32 ctrl;
-
- ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
- ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
- ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
- ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
- ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
- ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
- if (ret)
- return ret;
-
- for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
- if (ret)
- return ret;
- }
-
- ctrl = readl(ioaddr + MTL_EST_CONTROL);
- ctrl &= ~PTOV;
- ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
- if (cfg->enable)
- ctrl |= EEST | SSWL;
- else
- ctrl &= ~EEST;
-
- writel(ctrl, ioaddr + MTL_EST_CONTROL);
-
- /* Configure EST interrupt */
- if (cfg->enable)
- ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC);
- else
- ctrl = 0;
-
- writel(ctrl, ioaddr + MTL_EST_INT_EN);
-
- return 0;
-}
-
-void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt)
-{
- u32 status, value, feqn, hbfq, hbfs, btrl;
- u32 txqcnt_mask = (1 << txqcnt) - 1;
-
- status = readl(ioaddr + MTL_EST_STATUS);
-
- value = (CGCE | HLBS | HLBF | BTRE | SWLC);
-
- /* Return if there is no error */
- if (!(status & value))
- return;
-
- if (status & CGCE) {
- /* Clear Interrupt */
- writel(CGCE, ioaddr + MTL_EST_STATUS);
-
- x->mtl_est_cgce++;
- }
-
- if (status & HLBS) {
- value = readl(ioaddr + MTL_EST_SCH_ERR);
- value &= txqcnt_mask;
-
- x->mtl_est_hlbs++;
-
- /* Clear Interrupt */
- writel(value, ioaddr + MTL_EST_SCH_ERR);
-
- /* Collecting info to shows all the queues that has HLBS
- * issue. The only way to clear this is to clear the
- * statistic
- */
- if (net_ratelimit())
- netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
- }
-
- if (status & HLBF) {
- value = readl(ioaddr + MTL_EST_FRM_SZ_ERR);
- feqn = value & txqcnt_mask;
-
- value = readl(ioaddr + MTL_EST_FRM_SZ_CAP);
- hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT;
- hbfs = value & SZ_CAP_HBFS_MASK;
-
- x->mtl_est_hlbf++;
-
- /* Clear Interrupt */
- writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR);
-
- if (net_ratelimit())
- netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
- hbfq, hbfs);
- }
-
- if (status & BTRE) {
- if ((status & BTRL) == BTRL_MAX)
- x->mtl_est_btrlm++;
- else
- x->mtl_est_btre++;
-
- btrl = (status & BTRL) >> BTRL_SHIFT;
-
- if (net_ratelimit())
- netdev_info(dev, "EST: BTR Error Loop Count %u\n",
- btrl);
-
- writel(BTRE, ioaddr + MTL_EST_STATUS);
- }
-
- if (status & SWLC) {
- writel(SWLC, ioaddr + MTL_EST_STATUS);
- netdev_info(dev, "EST: SWOL has been switched\n");
- }
-}
-
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 34e620790..bf33a51d2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -39,53 +39,6 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
-#define MTL_EST_CONTROL 0x00000c50
-#define PTOV GENMASK(31, 24)
-#define PTOV_SHIFT 24
-#define SSWL BIT(1)
-#define EEST BIT(0)
-
-#define MTL_EST_STATUS 0x00000c58
-#define BTRL GENMASK(11, 8)
-#define BTRL_SHIFT 8
-#define BTRL_MAX (0xF << BTRL_SHIFT)
-#define SWOL BIT(7)
-#define SWOL_SHIFT 7
-#define CGCE BIT(4)
-#define HLBS BIT(3)
-#define HLBF BIT(2)
-#define BTRE BIT(1)
-#define SWLC BIT(0)
-
-#define MTL_EST_SCH_ERR 0x00000c60
-#define MTL_EST_FRM_SZ_ERR 0x00000c64
-#define MTL_EST_FRM_SZ_CAP 0x00000c68
-#define SZ_CAP_HBFS_MASK GENMASK(14, 0)
-#define SZ_CAP_HBFQ_SHIFT 16
-#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \
- ((val) > 4 ? GENMASK(18, 16) : \
- (val) > 2 ? GENMASK(17, 16) : \
- BIT(16)); })
-
-#define MTL_EST_INT_EN 0x00000c70
-#define IECGCE CGCE
-#define IEHS HLBS
-#define IEHF HLBF
-#define IEBE BTRE
-#define IECC SWLC
-
-#define MTL_EST_GCL_CONTROL 0x00000c80
-#define BTR_LOW 0x0
-#define BTR_HIGH 0x1
-#define CTR_LOW 0x2
-#define CTR_HIGH 0x3
-#define TER 0x4
-#define LLR 0x5
-#define ADDR_SHIFT 8
-#define GCRR BIT(2)
-#define SRWO BIT(0)
-#define MTL_EST_GCL_DATA 0x00000c84
-
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -149,10 +102,6 @@ int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
struct stmmac_pps_cfg *cfg, bool enable,
u32 sub_second_inc, u32 systime_flags);
-int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate);
-void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt);
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 173948474..6a2c7d22d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -284,22 +284,6 @@
#define XGMAC_TC_PRTY_MAP1 0x00001044
#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
-#define XGMAC_MTL_EST_CONTROL 0x00001050
-#define XGMAC_PTOV GENMASK(31, 23)
-#define XGMAC_PTOV_SHIFT 23
-#define XGMAC_SSWL BIT(1)
-#define XGMAC_EEST BIT(0)
-#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080
-#define XGMAC_BTR_LOW 0x0
-#define XGMAC_BTR_HIGH 0x1
-#define XGMAC_CTR_LOW 0x2
-#define XGMAC_CTR_HIGH 0x3
-#define XGMAC_TER 0x4
-#define XGMAC_LLR 0x5
-#define XGMAC_ADDR_SHIFT 8
-#define XGMAC_GCRR BIT(2)
-#define XGMAC_SRWO BIT(0)
-#define XGMAC_MTL_EST_GCL_DATA 0x00001084
#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
#define XGMAC_RXPI BIT(31)
#define XGMAC_NPE GENMASK(23, 16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index b5509f244..f8e7775bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -47,14 +47,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
-static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
-{
- priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
- MAC_10000FD | MAC_25000FD |
- MAC_40000FD | MAC_50000FD |
- MAC_100000FD;
-}
-
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -105,17 +97,41 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value, reg;
+ u32 clear_mask = 0;
+ u32 ctrl2, ctrl3;
+ int i;
- reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
- if (queue >= 4)
+ ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
+ ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
+
+ /* The software must ensure that the same priority
+ * is not mapped to multiple Rx queues
+ */
+ for (i = 0; i < 4; i++)
+ clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
+ XGMAC_PSRQ(i));
+
+ ctrl2 &= ~clear_mask;
+ ctrl3 &= ~clear_mask;
+
+ /* First assign new priorities to a queue, then
+ * clear them from others queues
+ */
+ if (queue < 4) {
+ ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
+ XGMAC_PSRQ(queue);
+
+ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
+ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
+ } else {
queue -= 4;
- value = readl(ioaddr + reg);
- value &= ~XGMAC_PSRQ(queue);
- value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
+ ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
+ XGMAC_PSRQ(queue);
- writel(value, ioaddr + reg);
+ writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
+ writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
+ }
}
static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
@@ -1489,57 +1505,6 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
-{
- u32 ctrl;
-
- writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
-
- ctrl = (reg << XGMAC_ADDR_SHIFT);
- ctrl |= gcl ? 0 : XGMAC_GCRR;
-
- writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
-
- ctrl |= XGMAC_SRWO;
- writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
-
- return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
- ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
-}
-
-static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate)
-{
- int i, ret = 0x0;
- u32 ctrl;
-
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
- ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
- if (ret)
- return ret;
-
- for (i = 0; i < cfg->gcl_size; i++) {
- ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
- if (ret)
- return ret;
- }
-
- ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
- ctrl &= ~XGMAC_PTOV;
- ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
- if (cfg->enable)
- ctrl |= XGMAC_EEST | XGMAC_SSWL;
- else
- ctrl &= ~XGMAC_EEST;
-
- writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
- return 0;
-}
-
static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq,
u32 num_rxq, bool enable)
@@ -1567,7 +1532,6 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1609,7 +1573,6 @@ const struct stmmac_ops dwxgmac210_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .est_configure = dwxgmac3_est_configure,
.fpe_configure = dwxgmac3_fpe_configure,
};
@@ -1629,7 +1592,6 @@ static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
const struct stmmac_ops dwxlgmac2_ops = {
.core_init = dwxgmac2_core_init,
- .phylink_get_caps = xgmac_phylink_get_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxlgmac2_rx_queue_enable,
@@ -1671,7 +1633,6 @@ const struct stmmac_ops dwxlgmac2_ops = {
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
- .est_configure = dwxgmac3_est_configure,
.fpe_configure = dwxgmac3_fpe_configure,
};
@@ -1690,6 +1651,9 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
@@ -1727,6 +1691,11 @@ int dwxlgmac2_setup(struct stmmac_priv *priv)
if (mac->multicast_filter_bins)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
mac->link.duplex = 0;
mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index b8ba8f2d8..29367105d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -7,6 +7,7 @@
#include "common.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+#include "stmmac_est.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{
@@ -114,6 +115,7 @@ static const struct stmmac_hwif_entry {
const void *mode;
const void *tc;
const void *mmc;
+ const void *est;
int (*setup)(struct stmmac_priv *priv);
int (*quirks)(struct stmmac_priv *priv);
} stmmac_hw[] = {
@@ -162,6 +164,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
@@ -170,6 +173,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = stmmac_dwmac4_quirks,
}, {
@@ -180,6 +184,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops,
@@ -188,6 +193,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -198,6 +204,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
@@ -206,6 +213,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -216,6 +224,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
@@ -224,6 +233,7 @@ static const struct stmmac_hwif_entry {
.mode = &dwmac4_ring_mode_ops,
.tc = &dwmac510_tc_ops,
.mmc = &dwmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwmac4_setup,
.quirks = NULL,
}, {
@@ -235,6 +245,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
+ .est_off = EST_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -243,6 +254,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
}, {
@@ -254,6 +266,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = MMC_XGMAC_OFFSET,
+ .est_off = EST_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -262,6 +275,7 @@ static const struct stmmac_hwif_entry {
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
+ .est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
.quirks = stmmac_dwxlgmac_quirks,
},
@@ -296,6 +310,10 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
(needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
priv->mmcaddr = priv->ioaddr +
(needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
+ if (needs_gmac4)
+ priv->estaddr = priv->ioaddr + EST_GMAC4_OFFSET;
+ else if (needs_xgmac)
+ priv->estaddr = priv->ioaddr + EST_XGMAC_OFFSET;
/* Check for HW specific setup first */
if (priv->plat->setup) {
@@ -332,10 +350,13 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->mode = mac->mode ? : entry->mode;
mac->tc = mac->tc ? : entry->tc;
mac->mmc = mac->mmc ? : entry->mmc;
+ mac->est = mac->est ? : entry->est;
priv->hw = mac;
priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+ if (entry->est)
+ priv->estaddr = priv->ioaddr + entry->regs.est_off;
/* Entry found */
if (needs_setup) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 68aa2d5ca..7be04b547 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -56,6 +56,10 @@ struct stmmac_desc_ops {
void (*set_tx_ic)(struct dma_desc *p);
/* Last tx segment reports the transmit status */
int (*get_tx_ls)(struct dma_desc *p);
+ /* Get the tag of the descriptor */
+ u16 (*get_rx_vlan_tci)(struct dma_desc *p);
+ /* Get the valid status of descriptor */
+ bool (*get_rx_vlan_valid)(struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
int (*tx_status)(struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr);
@@ -117,6 +121,10 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_tx_ic, __args)
#define stmmac_get_tx_ls(__priv, __args...) \
stmmac_do_callback(__priv, desc, get_tx_ls, __args)
+#define stmmac_get_rx_vlan_tci(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_vlan_tci, __args)
+#define stmmac_get_rx_vlan_valid(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_vlan_valid, __args)
#define stmmac_tx_status(__priv, __args...) \
stmmac_do_callback(__priv, desc, tx_status, __args)
#define stmmac_get_tx_len(__priv, __args...) \
@@ -388,6 +396,9 @@ struct stmmac_ops {
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
__le16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
+ struct sk_buff *skb);
+ void (*set_hw_vlan_mode)(struct mac_device_info *hw);
int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
struct mac_device_info *hw,
__be16 proto, u16 vid);
@@ -408,10 +419,6 @@ struct stmmac_ops {
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
- int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg,
- unsigned int ptp_rate);
- void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
- struct stmmac_extra_stats *x, u32 txqcnt);
void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
bool enable);
@@ -499,6 +506,10 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
#define stmmac_enable_vlan(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_rx_hw_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_hw_vlan, __args)
+#define stmmac_set_hw_vlan_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_hw_vlan_mode, __args)
#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
@@ -515,10 +526,6 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
-#define stmmac_est_configure(__priv, __args...) \
- stmmac_do_callback(__priv, mac, est_configure, __args)
-#define stmmac_est_irq_status(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, est_irq_status, __args)
#define stmmac_fpe_configure(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
#define stmmac_fpe_send_mpacket(__priv, __args...) \
@@ -644,9 +651,22 @@ struct stmmac_mmc_ops {
#define stmmac_mmc_read(__priv, __args...) \
stmmac_do_void_callback(__priv, mmc, read, __args)
+struct stmmac_est_ops {
+ int (*configure)(struct stmmac_priv *priv, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void (*irq_status)(struct stmmac_priv *priv, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+};
+
+#define stmmac_est_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, est, configure, __args)
+#define stmmac_est_irq_status(__priv, __args...) \
+ stmmac_do_void_callback(__priv, est, irq_status, __args)
+
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
+ u32 est_off;
};
extern const struct stmmac_ops dwmac100_ops;
@@ -665,6 +685,7 @@ extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index a0c059258..14c9d2637 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -52,6 +52,8 @@ struct stmmac_counters {
unsigned int mmc_tx_excessdef;
unsigned int mmc_tx_pause_frame;
unsigned int mmc_tx_vlan_frame_g;
+ unsigned int mmc_tx_lpi_usec;
+ unsigned int mmc_tx_lpi_tran;
/* MMC RX counter registers */
unsigned int mmc_rx_framecount_gb;
@@ -78,9 +80,16 @@ struct stmmac_counters {
unsigned int mmc_rx_fifo_overflow;
unsigned int mmc_rx_vlan_frames_gb;
unsigned int mmc_rx_watchdog_error;
+ unsigned int mmc_rx_lpi_usec;
+ unsigned int mmc_rx_lpi_tran;
+ unsigned int mmc_rx_discard_frames_gb;
+ unsigned int mmc_rx_discard_octets_gb;
+ unsigned int mmc_rx_align_err_frames;
+
/* IPC */
unsigned int mmc_rx_ipc_intr_mask;
unsigned int mmc_rx_ipc_intr;
+
/* IPv4 */
unsigned int mmc_rx_ipv4_gd;
unsigned int mmc_rx_ipv4_hderr;
@@ -118,9 +127,14 @@ struct stmmac_counters {
unsigned int mmc_rx_icmp_gd_octets;
unsigned int mmc_rx_icmp_err_octets;
+ /* Stream-Gate Filter */
+ unsigned int mmc_sgf_pass_fragment_cntr;
+ unsigned int mmc_sgf_fail_fragment_cntr;
+
/* FPE */
unsigned int mmc_tx_fpe_fragment_cntr;
unsigned int mmc_tx_hold_req_cntr;
+ unsigned int mmc_tx_gate_overrun_cntr;
unsigned int mmc_rx_packet_assembly_err_cntr;
unsigned int mmc_rx_packet_smd_err_cntr;
unsigned int mmc_rx_packet_assembly_ok_cntr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 6a7c1d325..8597c6aba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -177,9 +177,12 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+#define MMC_XGMAC_SGF_PASS_PKT 0x1f0
+#define MMC_XGMAC_SGF_FAIL_PKT 0x1f4
#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_TX_GATE_OVERRUN 0x210
#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
@@ -187,6 +190,40 @@
#define MMC_XGMAC_RX_FPE_FRAG 0x234
#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c
+#define MMC_XGMAC_RX_IPV4_GD 0x264
+#define MMC_XGMAC_RX_IPV4_HDERR 0x26c
+#define MMC_XGMAC_RX_IPV4_NOPAY 0x274
+#define MMC_XGMAC_RX_IPV4_FRAG 0x27c
+#define MMC_XGMAC_RX_IPV4_UDSBL 0x284
+
+#define MMC_XGMAC_RX_IPV6_GD 0x28c
+#define MMC_XGMAC_RX_IPV6_HDERR 0x294
+#define MMC_XGMAC_RX_IPV6_NOPAY 0x29c
+
+#define MMC_XGMAC_RX_UDP_GD 0x2a4
+#define MMC_XGMAC_RX_UDP_ERR 0x2ac
+#define MMC_XGMAC_RX_TCP_GD 0x2b4
+#define MMC_XGMAC_RX_TCP_ERR 0x2bc
+#define MMC_XGMAC_RX_ICMP_GD 0x2c4
+#define MMC_XGMAC_RX_ICMP_ERR 0x2cc
+
+#define MMC_XGMAC_RX_IPV4_GD_OCTETS 0x2d4
+#define MMC_XGMAC_RX_IPV4_HDERR_OCTETS 0x2dc
+#define MMC_XGMAC_RX_IPV4_NOPAY_OCTETS 0x2e4
+#define MMC_XGMAC_RX_IPV4_FRAG_OCTETS 0x2ec
+#define MMC_XGMAC_RX_IPV4_UDSBL_OCTETS 0x2f4
+
+#define MMC_XGMAC_RX_IPV6_GD_OCTETS 0x2fc
+#define MMC_XGMAC_RX_IPV6_HDERR_OCTETS 0x304
+#define MMC_XGMAC_RX_IPV6_NOPAY_OCTETS 0x30c
+
+#define MMC_XGMAC_RX_UDP_GD_OCTETS 0x314
+#define MMC_XGMAC_RX_UDP_ERR_OCTETS 0x31c
+#define MMC_XGMAC_RX_TCP_GD_OCTETS 0x324
+#define MMC_XGMAC_RX_TCP_ERR_OCTETS 0x32c
+#define MMC_XGMAC_RX_ICMP_GD_OCTETS 0x334
+#define MMC_XGMAC_RX_ICMP_ERR_OCTETS 0x33c
+
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -414,6 +451,8 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
&mmc->mmc_tx_pause_frame);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
&mmc->mmc_tx_vlan_frame_g);
+ mmc->mmc_tx_lpi_usec += readl(mmcaddr + MMC_XGMAC_TX_LPI_USEC);
+ mmc->mmc_tx_lpi_tran += readl(mmcaddr + MMC_XGMAC_TX_LPI_TRAN);
/* MMC RX counter registers */
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
@@ -459,9 +498,23 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
&mmc->mmc_rx_vlan_frames_gb);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
-
+ mmc->mmc_rx_lpi_usec += readl(mmcaddr + MMC_XGMAC_RX_LPI_USEC);
+ mmc->mmc_rx_lpi_tran += readl(mmcaddr + MMC_XGMAC_RX_LPI_TRAN);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_DISCARD_PKT_GB,
+ &mmc->mmc_rx_discard_frames_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_DISCARD_OCT_GB,
+ &mmc->mmc_rx_discard_octets_gb);
+ mmc->mmc_rx_align_err_frames +=
+ readl(mmcaddr + MMC_XGMAC_RX_ALIGN_ERR_PKT);
+
+ mmc->mmc_sgf_pass_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_SGF_PASS_PKT);
+ mmc->mmc_sgf_fail_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_SGF_FAIL_PKT);
mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_GATE_OVERRUN,
+ &mmc->mmc_tx_gate_overrun_cntr);
mmc->mmc_rx_packet_assembly_err_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
mmc->mmc_rx_packet_smd_err_cntr +=
@@ -470,6 +523,68 @@ static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
mmc->mmc_rx_fpe_fragment_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_GD,
+ &mmc->mmc_rx_ipv4_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_HDERR,
+ &mmc->mmc_rx_ipv4_hderr);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_NOPAY,
+ &mmc->mmc_rx_ipv4_nopay);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_FRAG,
+ &mmc->mmc_rx_ipv4_frag);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_UDSBL,
+ &mmc->mmc_rx_ipv4_udsbl);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_GD,
+ &mmc->mmc_rx_ipv6_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_HDERR,
+ &mmc->mmc_rx_ipv6_hderr);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_NOPAY,
+ &mmc->mmc_rx_ipv6_nopay);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_GD,
+ &mmc->mmc_rx_udp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_ERR,
+ &mmc->mmc_rx_udp_err);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_GD,
+ &mmc->mmc_rx_tcp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_ERR,
+ &mmc->mmc_rx_tcp_err);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_GD,
+ &mmc->mmc_rx_icmp_gd);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_ERR,
+ &mmc->mmc_rx_icmp_err);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_GD_OCTETS,
+ &mmc->mmc_rx_ipv4_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_HDERR_OCTETS,
+ &mmc->mmc_rx_ipv4_hderr_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_NOPAY_OCTETS,
+ &mmc->mmc_rx_ipv4_nopay_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_FRAG_OCTETS,
+ &mmc->mmc_rx_ipv4_frag_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV4_UDSBL_OCTETS,
+ &mmc->mmc_rx_ipv4_udsbl_octets);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_GD_OCTETS,
+ &mmc->mmc_rx_ipv6_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_HDERR_OCTETS,
+ &mmc->mmc_rx_ipv6_hderr_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_IPV6_NOPAY_OCTETS,
+ &mmc->mmc_rx_ipv6_nopay_octets);
+
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_GD_OCTETS,
+ &mmc->mmc_rx_udp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UDP_ERR_OCTETS,
+ &mmc->mmc_rx_udp_err_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_GD_OCTETS,
+ &mmc->mmc_rx_tcp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_TCP_ERR_OCTETS,
+ &mmc->mmc_rx_tcp_err_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_GD_OCTETS,
+ &mmc->mmc_rx_icmp_gd_octets);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_ICMP_ERR_OCTETS,
+ &mmc->mmc_rx_icmp_err_octets);
}
const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b8c93b881..f155e4841 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -51,6 +51,7 @@ struct stmmac_tx_info {
bool last_segment;
bool is_jumbo;
enum stmmac_txbuf_type buf_type;
+ struct xsk_tx_metadata_compl xsk_meta;
};
#define STMMAC_TBS_AVAIL BIT(0)
@@ -100,6 +101,17 @@ struct stmmac_xdp_buff {
struct dma_desc *ndesc;
};
+struct stmmac_metadata_request {
+ struct stmmac_priv *priv;
+ struct dma_desc *tx_desc;
+ bool *set_ic;
+};
+
+struct stmmac_xsk_tx_complete {
+ struct stmmac_priv *priv;
+ struct dma_desc *desc;
+};
+
struct stmmac_rx_queue {
u32 rx_count_frames;
u32 queue_index;
@@ -284,6 +296,7 @@ struct stmmac_priv {
void __iomem *mmcaddr;
void __iomem *ptpaddr;
+ void __iomem *estaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
int sfty_ce_irq;
int sfty_ue_irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
new file mode 100644
index 000000000..4da6ccc17
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, Intel Corporation
+ * stmmac EST(802.3 Qbv) handling
+ */
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include "stmmac.h"
+#include "stmmac_est.h"
+
+static int est_write(void __iomem *est_addr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, est_addr + EST_GCL_DATA);
+
+ ctrl = (reg << EST_ADDR_SHIFT);
+ ctrl |= gcl ? 0 : EST_GCRR;
+ writel(ctrl, est_addr + EST_GCL_CONTROL);
+
+ ctrl |= EST_SRWO;
+ writel(ctrl, est_addr + EST_GCL_CONTROL);
+
+ return readl_poll_timeout(est_addr + EST_GCL_CONTROL, ctrl,
+ !(ctrl & EST_SRWO), 100, 5000);
+}
+
+static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ void __iomem *est_addr = priv->estaddr;
+ int i, ret = 0;
+ u32 ctrl;
+
+ ret |= est_write(est_addr, EST_BTR_LOW, cfg->btr[0], false);
+ ret |= est_write(est_addr, EST_BTR_HIGH, cfg->btr[1], false);
+ ret |= est_write(est_addr, EST_TER, cfg->ter, false);
+ ret |= est_write(est_addr, EST_LLR, cfg->gcl_size, false);
+ ret |= est_write(est_addr, EST_CTR_LOW, cfg->ctr[0], false);
+ ret |= est_write(est_addr, EST_CTR_HIGH, cfg->ctr[1], false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = est_write(est_addr, i, cfg->gcl[i], true);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = readl(est_addr + EST_CONTROL);
+ if (priv->plat->has_xgmac) {
+ ctrl &= ~EST_XGMAC_PTOV;
+ ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) <<
+ EST_XGMAC_PTOV_SHIFT;
+ } else {
+ ctrl &= ~EST_GMAC5_PTOV;
+ ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_GMAC5_PTOV_MUL) <<
+ EST_GMAC5_PTOV_SHIFT;
+ }
+ if (cfg->enable)
+ ctrl |= EST_EEST | EST_SSWL;
+ else
+ ctrl &= ~EST_EEST;
+
+ writel(ctrl, est_addr + EST_CONTROL);
+
+ /* Configure EST interrupt */
+ if (cfg->enable)
+ ctrl = EST_IECGCE | EST_IEHS | EST_IEHF | EST_IEBE | EST_IECC;
+ else
+ ctrl = 0;
+
+ writel(ctrl, est_addr + EST_INT_EN);
+
+ return 0;
+}
+
+static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt)
+{
+ u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
+ void __iomem *est_addr = priv->estaddr;
+ u32 txqcnt_mask = BIT(txqcnt) - 1;
+
+ status = readl(est_addr + EST_STATUS);
+
+ value = EST_CGCE | EST_HLBS | EST_HLBF | EST_BTRE | EST_SWLC;
+
+ /* Return if there is no error */
+ if (!(status & value))
+ return;
+
+ if (status & EST_CGCE) {
+ /* Clear Interrupt */
+ writel(EST_CGCE, est_addr + EST_STATUS);
+
+ x->mtl_est_cgce++;
+ }
+
+ if (status & EST_HLBS) {
+ value = readl(est_addr + EST_SCH_ERR);
+ value &= txqcnt_mask;
+
+ x->mtl_est_hlbs++;
+
+ /* Clear Interrupt */
+ writel(value, est_addr + EST_SCH_ERR);
+
+ /* Collecting info to shows all the queues that has HLBS
+ * issue. The only way to clear this is to clear the
+ * statistic
+ */
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
+ }
+
+ if (status & EST_HLBF) {
+ value = readl(est_addr + EST_FRM_SZ_ERR);
+ feqn = value & txqcnt_mask;
+
+ value = readl(est_addr + EST_FRM_SZ_CAP);
+ hbfq = (value & EST_SZ_CAP_HBFQ_MASK(txqcnt)) >>
+ EST_SZ_CAP_HBFQ_SHIFT;
+ hbfs = value & EST_SZ_CAP_HBFS_MASK;
+
+ x->mtl_est_hlbf++;
+
+ /* Clear Interrupt */
+ writel(feqn, est_addr + EST_FRM_SZ_ERR);
+
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
+ hbfq, hbfs);
+ }
+
+ if (status & EST_BTRE) {
+ if (priv->plat->has_xgmac) {
+ btrl = FIELD_GET(EST_XGMAC_BTRL, status);
+ btrl_max = FIELD_MAX(EST_XGMAC_BTRL);
+ } else {
+ btrl = FIELD_GET(EST_GMAC5_BTRL, status);
+ btrl_max = FIELD_MAX(EST_GMAC5_BTRL);
+ }
+ if (btrl == btrl_max)
+ x->mtl_est_btrlm++;
+ else
+ x->mtl_est_btre++;
+
+ if (net_ratelimit())
+ netdev_info(dev, "EST: BTR Error Loop Count %u\n",
+ btrl);
+
+ writel(EST_BTRE, est_addr + EST_STATUS);
+ }
+
+ if (status & EST_SWLC) {
+ writel(EST_SWLC, est_addr + EST_STATUS);
+ netdev_info(dev, "EST: SWOL has been switched\n");
+ }
+}
+
+const struct stmmac_est_ops dwmac510_est_ops = {
+ .configure = est_configure,
+ .irq_status = est_irq_status,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
new file mode 100644
index 000000000..7a858c566
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, Intel Corporation
+ * stmmac EST(802.3 Qbv) handling
+ */
+
+#define EST_GMAC4_OFFSET 0x00000c50
+#define EST_XGMAC_OFFSET 0x00001050
+
+#define EST_CONTROL 0x00000000
+#define EST_GMAC5_PTOV GENMASK(31, 24)
+#define EST_GMAC5_PTOV_SHIFT 24
+#define EST_GMAC5_PTOV_MUL 6
+#define EST_XGMAC_PTOV GENMASK(31, 23)
+#define EST_XGMAC_PTOV_SHIFT 23
+#define EST_XGMAC_PTOV_MUL 9
+#define EST_SSWL BIT(1)
+#define EST_EEST BIT(0)
+
+#define EST_STATUS 0x00000008
+#define EST_GMAC5_BTRL GENMASK(11, 8)
+#define EST_XGMAC_BTRL GENMASK(15, 8)
+#define EST_SWOL BIT(7)
+#define EST_SWOL_SHIFT 7
+#define EST_CGCE BIT(4)
+#define EST_HLBS BIT(3)
+#define EST_HLBF BIT(2)
+#define EST_BTRE BIT(1)
+#define EST_SWLC BIT(0)
+
+#define EST_SCH_ERR 0x00000010
+
+#define EST_FRM_SZ_ERR 0x00000014
+
+#define EST_FRM_SZ_CAP 0x00000018
+#define EST_SZ_CAP_HBFS_MASK GENMASK(14, 0)
+#define EST_SZ_CAP_HBFQ_SHIFT 16
+#define EST_SZ_CAP_HBFQ_MASK(val) \
+ ({ \
+ typeof(val) _val = (val); \
+ (_val > 4 ? GENMASK(18, 16) : \
+ _val > 2 ? GENMASK(17, 16) : \
+ BIT(16)); \
+ })
+
+#define EST_INT_EN 0x00000020
+#define EST_IECGCE EST_CGCE
+#define EST_IEHS EST_HLBS
+#define EST_IEHF EST_HLBF
+#define EST_IEBE EST_BTRE
+#define EST_IECC EST_SWLC
+
+#define EST_GCL_CONTROL 0x00000030
+#define EST_BTR_LOW 0x0
+#define EST_BTR_HIGH 0x1
+#define EST_CTR_LOW 0x2
+#define EST_CTR_HIGH 0x3
+#define EST_TER 0x4
+#define EST_LLR 0x5
+#define EST_ADDR_SHIFT 8
+#define EST_GCRR BIT(2)
+#define EST_SRWO BIT(0)
+
+#define EST_GCL_DATA 0x00000034
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f9c1f3f4a..ec44becf0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -212,6 +212,8 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_excessdef),
STMMAC_MMC_STAT(mmc_tx_pause_frame),
STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_tx_lpi_usec),
+ STMMAC_MMC_STAT(mmc_tx_lpi_tran),
STMMAC_MMC_STAT(mmc_rx_framecount_gb),
STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
STMMAC_MMC_STAT(mmc_rx_octetcount_g),
@@ -236,6 +238,11 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_lpi_usec),
+ STMMAC_MMC_STAT(mmc_rx_lpi_tran),
+ STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
+ STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_align_err_frames),
STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
STMMAC_MMC_STAT(mmc_rx_ipc_intr),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
@@ -266,8 +273,11 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+ STMMAC_MMC_STAT(mmc_sgf_pass_fragment_cntr),
+ STMMAC_MMC_STAT(mmc_sgf_fail_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
+ STMMAC_MMC_STAT(mmc_tx_gate_overrun_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
@@ -1125,41 +1135,42 @@ static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
return ARRAY_SIZE(priv->rss.table);
}
-static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int stmmac_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
- indir[i] = priv->rss.table[i];
+ rxfh->indir[i] = priv->rss.table[i];
}
- if (key)
- memcpy(key, priv->rss.key, sizeof(priv->rss.key));
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->key)
+ memcpy(rxfh->key, priv->rss.key, sizeof(priv->rss.key));
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int stmmac_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
- if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
- priv->rss.table[i] = indir[i];
+ priv->rss.table[i] = rxfh->indir[i];
}
- if (key)
- memcpy(priv->rss.key, key, sizeof(priv->rss.key));
+ if (rxfh->key)
+ memcpy(priv->rss.key, rxfh->key, sizeof(priv->rss.key));
return stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index de4d76919..83b732c30 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1198,17 +1198,6 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
-static void stmmac_set_half_duplex(struct stmmac_priv *priv)
-{
- /* Half-Duplex can only work with single tx queue */
- if (priv->plat->tx_queues_to_use > 1)
- priv->phylink_config.mac_capabilities &=
- ~(MAC_10HD | MAC_100HD | MAC_1000HD);
- else
- priv->phylink_config.mac_capabilities |=
- (MAC_10HD | MAC_100HD | MAC_1000HD);
-}
-
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1236,15 +1225,11 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
xpcs_get_interfaces(priv->hw->xpcs,
priv->phylink_config.supported_interfaces);
- priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10FD | MAC_100FD |
- MAC_1000FD;
-
- stmmac_set_half_duplex(priv);
-
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
+ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
max_speed = priv->plat->max_speed;
if (max_speed)
phylink_limit_mac_speed(&priv->phylink_config, max_speed);
@@ -2431,6 +2416,46 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
}
}
+static void stmmac_xsk_request_timestamp(void *_priv)
+{
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
+ *meta_req->set_ic = true;
+}
+
+static u64 stmmac_xsk_fill_timestamp(void *_priv)
+{
+ struct stmmac_xsk_tx_complete *tx_compl = _priv;
+ struct stmmac_priv *priv = tx_compl->priv;
+ struct dma_desc *desc = tx_compl->desc;
+ bool found = false;
+ u64 ns = 0;
+
+ if (!priv->hwts_tx_en)
+ return 0;
+
+ /* check tx tstamp status */
+ if (stmmac_get_tx_timestamp_status(priv, desc)) {
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+
+ if (found) {
+ ns -= priv->plat->cdc_error_adj;
+ return ns_to_ktime(ns);
+ }
+
+ return 0;
+}
+
+static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
+ .tmo_request_timestamp = stmmac_xsk_request_timestamp,
+ .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+};
+
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
@@ -2449,6 +2474,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
budget = min(budget, stmmac_tx_avail(priv, queue));
while (budget-- > 0) {
+ struct stmmac_metadata_request meta_req;
+ struct xsk_tx_metadata *meta = NULL;
dma_addr_t dma_addr;
bool set_ic;
@@ -2472,6 +2499,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_desc = tx_q->dma_tx + entry;
dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
@@ -2499,6 +2527,11 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
else
set_ic = false;
+ meta_req.priv = priv;
+ meta_req.tx_desc = tx_desc;
+ meta_req.set_ic = &set_ic;
+ xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
+ &meta_req);
if (set_ic) {
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
@@ -2511,6 +2544,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ xsk_tx_metadata_to_compl(meta,
+ &tx_q->tx_skbuff_dma[entry].xsk_meta);
+
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
@@ -2619,8 +2655,19 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
} else {
tx_packets++;
}
- if (skb)
+ if (skb) {
stmmac_get_tx_hwtstamp(priv, p, skb);
+ } else if (tx_q->xsk_pool &&
+ xp_tx_metadata_enabled(tx_q->xsk_pool)) {
+ struct stmmac_xsk_tx_complete tx_compl = {
+ .priv = priv,
+ .desc = p,
+ };
+
+ xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
+ &stmmac_xsk_tx_metadata_ops,
+ &tx_compl);
+ }
}
if (likely(tx_q->tx_skbuff_dma[entry].buf &&
@@ -3468,6 +3515,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ stmmac_set_hw_vlan_mode(priv, priv->hw);
+
if (priv->dma_cap.fpesel) {
stmmac_fpe_start_wq(priv);
@@ -5020,7 +5069,12 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
}
stmmac_get_rx_hwtstamp(priv, p, np, skb);
- stmmac_rx_vlan(priv->dev, skb);
+ if (priv->hw->hw_vlan_en)
+ /* MAC level stripping. */
+ stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
+ else
+ /* Driver level stripping. */
+ stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
@@ -5534,7 +5588,14 @@ drain_data:
/* Got entire packet into SKB. Finish it. */
stmmac_get_rx_hwtstamp(priv, p, np, skb);
- stmmac_rx_vlan(priv->dev, skb);
+
+ if (priv->hw->hw_vlan_en)
+ /* MAC level stripping. */
+ stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
+ else
+ /* Driver level stripping. */
+ stmmac_rx_vlan(priv->dev, skb);
+
skb->protocol = eth_type_trans(skb, priv->dev);
if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
@@ -5840,6 +5901,13 @@ static int stmmac_set_features(struct net_device *netdev,
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
}
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ priv->hw->hw_vlan_en = true;
+ else
+ priv->hw->hw_vlan_en = false;
+
+ stmmac_set_hw_vlan_mode(priv, priv->hw);
+
return 0;
}
@@ -5901,7 +5969,7 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
pm_wakeup_event(priv->device, 0);
if (priv->dma_cap.estsel)
- stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
+ stmmac_est_irq_status(priv, priv, priv->dev,
&priv->xstats, tx_cnt);
if (priv->dma_cap.fpesel) {
@@ -6183,30 +6251,23 @@ static struct dentry *stmmac_fs_dir;
static void sysfs_display_ring(void *head, int size, int extend_desc,
struct seq_file *seq, dma_addr_t dma_phy_addr)
{
- int i;
struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
struct dma_desc *p = (struct dma_desc *)head;
+ unsigned int desc_size;
dma_addr_t dma_addr;
+ int i;
+ desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
for (i = 0; i < size; i++) {
- if (extend_desc) {
- dma_addr = dma_phy_addr + i * sizeof(*ep);
- seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
- i, &dma_addr,
- le32_to_cpu(ep->basic.des0),
- le32_to_cpu(ep->basic.des1),
- le32_to_cpu(ep->basic.des2),
- le32_to_cpu(ep->basic.des3));
- ep++;
- } else {
- dma_addr = dma_phy_addr + i * sizeof(*p);
- seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
- i, &dma_addr,
- le32_to_cpu(p->des0), le32_to_cpu(p->des1),
- le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ dma_addr = dma_phy_addr + i * desc_size;
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ if (extend_desc)
+ p = &(++ep)->basic;
+ else
p++;
- }
- seq_printf(seq, "\n");
}
}
@@ -7210,6 +7271,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret = 0, i;
+ int max_speed;
if (netif_running(dev))
stmmac_release(dev);
@@ -7223,7 +7285,14 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
- stmmac_set_half_duplex(priv);
+ stmmac_mac_phylink_get_caps(priv);
+
+ priv->phylink_config.mac_capabilities = priv->hw->link.caps;
+
+ max_speed = priv->plat->max_speed;
+ if (max_speed)
+ phylink_limit_mac_speed(&priv->phylink_config, max_speed);
+
stmmac_napi_add(dev);
if (netif_running(dev))
@@ -7469,6 +7538,7 @@ int stmmac_dvr_probe(struct device *device,
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
+ ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
@@ -7535,6 +7605,9 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ priv->hw->hw_vlan_en = true;
+
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1ffde555d..70eadc83c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -296,62 +296,80 @@ out:
}
/**
- * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
- * @plat: driver data platform structure
- * @np: device tree node
- * @dev: device pointer
- * Description:
- * The mdio bus will be allocated in case of a phy transceiver is on board;
- * it will be NULL if the fixed-link is configured.
- * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
- * in any case (for DSA, mdio must be registered even if fixed-link).
- * The table below sums the supported configurations:
- * -------------------------------
- * snps,phy-addr | Y
- * -------------------------------
- * phy-handle | Y
- * -------------------------------
- * fixed-link | N
- * -------------------------------
- * snps,dwmac-mdio |
- * even if | Y
- * fixed-link |
- * -------------------------------
+ * stmmac_of_get_mdio() - Gets the MDIO bus from the devicetree.
+ * @np: devicetree node
*
- * It returns 0 in case of success otherwise -ENODEV.
+ * The MDIO bus will be searched for in the following ways:
+ * 1. The compatible is "snps,dwc-qos-ethernet-4.10" && a "mdio" named
+ * child node exists
+ * 2. A child node with the "snps,dwmac-mdio" compatible is present
+ *
+ * Return: The MDIO node if present otherwise NULL
*/
-static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
- struct device_node *np, struct device *dev)
+static struct device_node *stmmac_of_get_mdio(struct device_node *np)
{
- bool mdio = !of_phy_is_fixed_link(np);
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
{},
};
+ struct device_node *mdio_node = NULL;
if (of_match_node(need_mdio_ids, np)) {
- plat->mdio_node = of_get_child_by_name(np, "mdio");
+ mdio_node = of_get_child_by_name(np, "mdio");
} else {
/**
* If snps,dwmac-mdio is passed from DT, always register
* the MDIO
*/
- for_each_child_of_node(np, plat->mdio_node) {
- if (of_device_is_compatible(plat->mdio_node,
+ for_each_child_of_node(np, mdio_node) {
+ if (of_device_is_compatible(mdio_node,
"snps,dwmac-mdio"))
break;
}
}
- if (plat->mdio_node) {
+ return mdio_node;
+}
+
+/**
+ * stmmac_mdio_setup() - Populate platform related MDIO structures.
+ * @plat: driver data platform structure
+ * @np: devicetree node
+ * @dev: device pointer
+ *
+ * This searches for MDIO information from the devicetree.
+ * If an MDIO node is found, it's assigned to plat->mdio_node and
+ * plat->mdio_bus_data is allocated.
+ * If no connection can be determined, just plat->mdio_bus_data is allocated
+ * to indicate a bus should be created and scanned for a phy.
+ * If it's determined there's no MDIO bus needed, both are left NULL.
+ *
+ * This expects that plat->phy_node has already been searched for.
+ *
+ * Return: 0 on success, errno otherwise.
+ */
+static int stmmac_mdio_setup(struct plat_stmmacenet_data *plat,
+ struct device_node *np, struct device *dev)
+{
+ bool legacy_mdio;
+
+ plat->mdio_node = stmmac_of_get_mdio(np);
+ if (plat->mdio_node)
dev_dbg(dev, "Found MDIO subnode\n");
- mdio = true;
- }
- if (mdio) {
- plat->mdio_bus_data =
- devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
- GFP_KERNEL);
+ /* Legacy devicetrees allowed for no MDIO bus description and expect
+ * the bus to be scanned for devices. If there's no phy or fixed-link
+ * described assume this is the case since there must be something
+ * connected to the MAC.
+ */
+ legacy_mdio = !of_phy_is_fixed_link(np) && !plat->phy_node;
+ if (legacy_mdio)
+ dev_info(dev, "Deprecated MDIO bus assumption used\n");
+
+ if (plat->mdio_node || legacy_mdio) {
+ plat->mdio_bus_data = devm_kzalloc(dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
if (!plat->mdio_bus_data)
return -ENOMEM;
@@ -471,8 +489,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- /* To Configure PHY by using all device-tree supported properties */
- rc = stmmac_dt_phy(plat, np, &pdev->dev);
+ rc = stmmac_mdio_setup(plat, np, &pdev->dev);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index bffa5c017..e04830a3a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -72,7 +72,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
est_rst = true;
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
}
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
priv->plat->est->enable = true;
- ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 6ad3e0a11..26fa33e5e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -975,6 +975,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
+ if (qopt->cycle_time_extension >= BIT(wid + 7))
+ return -ERANGE;
if (!plat->est) {
plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
@@ -1041,6 +1043,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
+ priv->plat->est->ter = qopt->cycle_time_extension;
+
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
@@ -1051,7 +1055,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
*/
priv->plat->fpe_cfg->enable = fpe;
- ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret) {
@@ -1072,7 +1076,7 @@ disable:
if (priv->plat->est) {
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index e60b557d5..1530d1398 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -134,14 +134,16 @@ config TI_K3_AM65_CPTS
protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn)
and PCIe Subsystem Precision Time Measurement (PTM).
-config TI_AM65_CPSW_TAS
- bool "Enable TAS offload in AM65 CPSW"
+config TI_AM65_CPSW_QOS
+ bool "Enable QoS offload features in AM65 CPSW"
depends on TI_K3_AM65_CPSW_NUSS && NET_SCH_TAPRIO && TI_K3_AM65_CPTS
help
- Say y here to support Time Aware Shaper(TAS) offload in AM65 CPSW.
- AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
- defined in IEEE 802.1Q 2018. The EST scheduler runs on CPTS and the
- TAS/EST schedule is updated in the Fetch RAM memory of the CPSW.
+ This option enables QoS offload features in AM65 CPSW like
+ Time Aware Shaper (TAS) / Enhanced Scheduled Traffic (EST),
+ MQPRIO qdisc offload and Frame-Preemption MAC Merge / Interspersing
+ Express Traffic (IET).
+ The EST scheduler runs on CPTS and the TAS/EST schedule is
+ updated in the Fetch RAM memory of the CPSW.
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
@@ -187,6 +189,7 @@ config TI_ICSSG_PRUETH
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available starting with the AM65 platform.
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 27de1d697..d8590304f 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -26,7 +26,8 @@ keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.
obj-$(CONFIG_TI_K3_CPPI_DESC_POOL) += k3-cppi-desc-pool.o
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o
+ti-am65-cpsw-nuss-$(CONFIG_TI_AM65_CPSW_QOS) += am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index c51e2af91..35fceba01 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -11,6 +11,7 @@
#include <linux/pm_runtime.h>
#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-qos.h"
#include "cpsw_ale.h"
#include "am65-cpts.h"
@@ -662,6 +663,34 @@ static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
hw_stats[i].offset);
}
+static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_stats_regs __iomem *stats;
+
+ stats = port->stat_base;
+
+ if (s->src != ETHTOOL_MAC_STATS_SRC_AGGREGATE)
+ return;
+
+ s->FramesTransmittedOK = readl_relaxed(&stats->tx_good_frames);
+ s->SingleCollisionFrames = readl_relaxed(&stats->tx_single_coll_frames);
+ s->MultipleCollisionFrames = readl_relaxed(&stats->tx_mult_coll_frames);
+ s->FramesReceivedOK = readl_relaxed(&stats->rx_good_frames);
+ s->FrameCheckSequenceErrors = readl_relaxed(&stats->rx_crc_errors);
+ s->AlignmentErrors = readl_relaxed(&stats->rx_align_code_errors);
+ s->OctetsTransmittedOK = readl_relaxed(&stats->tx_octets);
+ s->FramesWithDeferredXmissions = readl_relaxed(&stats->tx_deferred_frames);
+ s->LateCollisions = readl_relaxed(&stats->tx_late_collisions);
+ s->CarrierSenseErrors = readl_relaxed(&stats->tx_carrier_sense_errors);
+ s->OctetsReceivedOK = readl_relaxed(&stats->rx_octets);
+ s->MulticastFramesXmittedOK = readl_relaxed(&stats->tx_multicast_frames);
+ s->BroadcastFramesXmittedOK = readl_relaxed(&stats->tx_broadcast_frames);
+ s->MulticastFramesReceivedOK = readl_relaxed(&stats->rx_multicast_frames);
+ s->BroadcastFramesReceivedOK = readl_relaxed(&stats->rx_broadcast_frames);
+};
+
static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
@@ -715,6 +744,240 @@ static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
return 0;
}
+static void am65_cpsw_port_iet_rx_enable(struct am65_cpsw_port *port, bool enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (enable)
+ val |= AM65_CPSW_PN_CTL_IET_PORT_EN;
+ else
+ val &= ~AM65_CPSW_PN_CTL_IET_PORT_EN;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
+ am65_cpsw_iet_common_enable(port->common);
+}
+
+static void am65_cpsw_port_iet_tx_enable(struct am65_cpsw_port *port, bool enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ if (enable)
+ val |= AM65_CPSW_PN_IET_MAC_PENABLE;
+ else
+ val &= ~AM65_CPSW_PN_IET_MAC_PENABLE;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+}
+
+static int am65_cpsw_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_ndev_priv *priv = netdev_priv(ndev);
+ u32 port_ctrl, iet_ctrl, iet_status;
+ u32 add_frag_size;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_QOS))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mm_lock);
+
+ iet_ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ port_ctrl = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+
+ state->tx_enabled = !!(iet_ctrl & AM65_CPSW_PN_IET_MAC_PENABLE);
+ state->pmac_enabled = !!(port_ctrl & AM65_CPSW_PN_CTL_IET_PORT_EN);
+
+ iet_status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
+
+ if (iet_ctrl & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ else if (iet_status & AM65_CPSW_PN_MAC_VERIFIED)
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ else if (iet_status & AM65_CPSW_PN_MAC_VERIFY_FAIL)
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ else
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+
+ add_frag_size = AM65_CPSW_PN_IET_MAC_GET_ADDFRAGSIZE(iet_ctrl);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(add_frag_size);
+
+ /* Errata i2208: RX min fragment size cannot be less than 124 */
+ state->rx_min_frag_size = 124;
+
+ /* FPE active if common tx_enabled and verification success or disabled (forced) */
+ state->tx_active = state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED);
+ state->verify_enabled = !(iet_ctrl & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY);
+
+ state->verify_time = port->qos.iet.verify_time_ms;
+
+ /* 802.3-2018 clause 30.14.1.6, says that the aMACMergeVerifyTime
+ * variable has a range between 1 and 128 ms inclusive. Limit to that.
+ */
+ state->max_verify_time = 128;
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static int am65_cpsw_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_ndev_priv *priv = netdev_priv(ndev);
+ struct am65_cpsw_iet *iet = &port->qos.iet;
+ u32 val, add_frag_size;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_QOS))
+ return -EOPNOTSUPP;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size, &add_frag_size, extack);
+ if (err)
+ return err;
+
+ mutex_lock(&priv->mm_lock);
+
+ if (cfg->pmac_enabled) {
+ /* change TX & RX FIFO MAX_BLKS as per TRM recommendation */
+ if (!iet->original_max_blks)
+ iet->original_max_blks = readl(port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
+
+ writel(AM65_CPSW_PN_TX_RX_MAX_BLKS_IET,
+ port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
+ } else if (iet->original_max_blks) {
+ /* restore RX & TX FIFO MAX_BLKS */
+ writel(iet->original_max_blks,
+ port->port_base + AM65_CPSW_PN_REG_MAX_BLKS);
+ }
+
+ am65_cpsw_port_iet_rx_enable(port, cfg->pmac_enabled);
+ am65_cpsw_port_iet_tx_enable(port, cfg->tx_enabled);
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ if (cfg->verify_enabled) {
+ val &= ~AM65_CPSW_PN_IET_MAC_DISABLEVERIFY;
+ /* Reset Verify state machine. Verification won't start here.
+ * Verification will be done once link-up.
+ */
+ val |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ } else {
+ val |= AM65_CPSW_PN_IET_MAC_DISABLEVERIFY;
+ /* Clear LINKFAIL to allow verify/response packets */
+ val &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ }
+
+ val &= ~AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK;
+ val |= AM65_CPSW_PN_IET_MAC_SET_ADDFRAGSIZE(add_frag_size);
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ /* verify_timeout_count can only be set at valid link */
+ port->qos.iet.verify_time_ms = cfg->verify_time;
+
+ /* enable/disable preemption based on link status */
+ am65_cpsw_iet_commit_preemptible_tcs(port);
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static void am65_cpsw_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ void __iomem *base = port->stat_base;
+
+ s->MACMergeFrameAssOkCount = readl(base + AM65_CPSW_STATN_IET_RX_ASSEMBLY_OK);
+ s->MACMergeFrameAssErrorCount = readl(base + AM65_CPSW_STATN_IET_RX_ASSEMBLY_ERROR);
+ s->MACMergeFrameSmdErrorCount = readl(base + AM65_CPSW_STATN_IET_RX_SMD_ERROR);
+ /* CPSW Functional Spec states:
+ * "The IET stat aMACMergeFragCountRx is derived by adding the
+ * Receive Assembly Error count to this value. i.e. AM65_CPSW_STATN_IET_RX_FRAG"
+ */
+ s->MACMergeFragCountRx = readl(base + AM65_CPSW_STATN_IET_RX_FRAG) + s->MACMergeFrameAssErrorCount;
+ s->MACMergeFragCountTx = readl(base + AM65_CPSW_STATN_IET_TX_FRAG);
+ s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
+}
+
+static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ tx_chn = &common->tx_chns[0];
+
+ coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &common->tx_chns[queue];
+
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ tx_chn = &common->tx_chns[0];
+
+ if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
+ return -EINVAL;
+
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ return -EINVAL;
+
+ common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
+ tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+
+ if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &common->tx_chns[queue];
+
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
+ dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
+ queue);
+ coal->tx_coalesce_usecs = 20;
+ }
+
+ tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.begin = am65_cpsw_ethtool_op_begin,
.complete = am65_cpsw_ethtool_op_complete,
@@ -729,9 +992,15 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_sset_count = am65_cpsw_get_sset_count,
.get_strings = am65_cpsw_get_strings,
.get_ethtool_stats = am65_cpsw_get_ethtool_stats,
+ .get_eth_mac_stats = am65_cpsw_get_eth_mac_stats,
.get_ts_info = am65_cpsw_get_ethtool_ts_info,
.get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
.set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_coalesce = am65_cpsw_get_coalesce,
+ .set_coalesce = am65_cpsw_set_coalesce,
+ .get_per_queue_coalesce = am65_cpsw_get_per_queue_coalesce,
+ .set_per_queue_coalesce = am65_cpsw_set_per_queue_coalesce,
.get_link = ethtool_op_get_link,
.get_link_ksettings = am65_cpsw_get_link_ksettings,
@@ -743,4 +1012,7 @@ const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.get_eee = am65_cpsw_get_eee,
.set_eee = am65_cpsw_set_eee,
.nway_reset = am65_cpsw_nway_reset,
+ .get_mm = am65_cpsw_get_mm,
+ .set_mm = am65_cpsw_set_mm,
+ .get_mm_stats = am65_cpsw_get_mm_stats,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index f7a6d720e..1d00e2180 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -136,6 +136,8 @@
NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+#define AM65_CPSW_DEFAULT_TX_CHNS 8
+
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
const u8 *dev_addr)
{
@@ -292,7 +294,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
txqueue,
netif_tx_queue_stopped(netif_txq),
jiffies_to_msecs(jiffies - trans_start),
- dql_avail(&netif_txq->dql),
+ netdev_queue_dql_avail(netif_txq),
k3_cppi_desc_pool_avail(tx_chn->desc_pool));
if (netif_tx_queue_stopped(netif_txq)) {
@@ -367,10 +369,81 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
- int port_idx, i, ret;
+ int port_idx, i, ret, tx;
struct sk_buff *skb;
u32 val, port_mask;
@@ -437,8 +510,12 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
AM65_CPSW_MAX_PACKET_SIZE,
GFP_KERNEL);
if (!skb) {
+ ret = -ENOMEM;
dev_err(common->dev, "cannot allocate skb\n");
- return -ENOMEM;
+ if (i)
+ goto fail_rx;
+
+ return ret;
}
ret = am65_cpsw_nuss_rx_push(common, skb);
@@ -447,17 +524,28 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
"cannot submit skb to channel rx, error %d\n",
ret);
kfree_skb(skb);
+ if (i)
+ goto fail_rx;
+
return ret;
}
- kmemleak_not_leak(skb);
}
- k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
- for (i = 0; i < common->tx_ch_num; i++) {
- ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
- if (ret)
- return ret;
- napi_enable(&common->tx_chns[i].napi_tx);
+ ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
+ goto fail_rx;
+ }
+
+ for (tx = 0; tx < common->tx_ch_num; tx++) {
+ ret = k3_udma_glue_enable_tx_chn(common->tx_chns[tx].tx_chn);
+ if (ret) {
+ dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
+ tx, ret);
+ tx--;
+ goto fail_tx;
+ }
+ napi_enable(&common->tx_chns[tx].napi_tx);
}
napi_enable(&common->napi_rx);
@@ -468,10 +556,22 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
-}
-static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
-static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+fail_tx:
+ while (tx >= 0) {
+ napi_disable(&common->tx_chns[tx].napi_tx);
+ k3_udma_glue_disable_tx_chn(common->tx_chns[tx].tx_chn);
+ tx--;
+ }
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+
+fail_rx:
+ k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0,
+ &common->rx_chns,
+ am65_cpsw_nuss_rx_cleanup, 0);
+ return ret;
+}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
@@ -496,8 +596,10 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
msecs_to_jiffies(1000));
if (!i)
dev_err(common->dev, "tx timeout\n");
- for (i = 0; i < common->tx_ch_num; i++)
+ for (i = 0; i < common->tx_ch_num; i++) {
napi_disable(&common->tx_chns[i].napi_tx);
+ hrtimer_cancel(&common->tx_chns[i].tx_hrtimer);
+ }
for (i = 0; i < common->tx_ch_num; i++) {
k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
@@ -516,6 +618,7 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
}
napi_disable(&common->napi_rx);
+ hrtimer_cancel(&common->rx_hrtimer);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
@@ -646,27 +749,6 @@ runtime_put:
return ret;
}
-static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct am65_cpsw_rx_chn *rx_chn = data;
- struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
-
- desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
-
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
-}
-
static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
{
struct skb_shared_hwtstamps *ssh;
@@ -806,6 +888,15 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
return ret;
}
+static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
+{
+ struct am65_cpsw_common *common =
+ container_of(timer, struct am65_cpsw_common, rx_hrtimer);
+
+ enable_irq(common->rx_chns.irq);
+ return HRTIMER_NORESTART;
+}
+
static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
@@ -833,63 +924,19 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (common->rx_irq_disabled) {
common->rx_irq_disabled = false;
- enable_irq(common->rx_chns.irq);
+ if (unlikely(common->rx_pace_timeout)) {
+ hrtimer_start(&common->rx_hrtimer,
+ ns_to_ktime(common->rx_pace_timeout),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(common->rx_chns.irq);
+ }
}
}
return num_rx;
}
-static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
- struct cppi5_host_desc_t *desc)
-{
- struct cppi5_host_desc_t *first_desc, *next_desc;
- dma_addr_t buf_dma, next_desc_dma;
- u32 buf_dma_len;
-
- first_desc = desc;
- next_desc = first_desc;
-
- cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
- while (next_desc_dma) {
- next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- next_desc_dma);
- cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
- }
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
-}
-
-static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct am65_cpsw_tx_chn *tx_chn = data;
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- dev_kfree_skb_any(skb);
-}
-
static struct sk_buff *
am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
dma_addr_t desc_dma)
@@ -939,7 +986,7 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
}
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
- int chn, unsigned int budget)
+ int chn, unsigned int budget, bool *tdown)
{
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
@@ -962,6 +1009,7 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete);
+ *tdown = true;
break;
}
@@ -984,7 +1032,7 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
}
static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
- int chn, unsigned int budget)
+ int chn, unsigned int budget, bool *tdown)
{
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
@@ -1005,6 +1053,7 @@ static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete);
+ *tdown = true;
break;
}
@@ -1030,21 +1079,40 @@ static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
return num_tx;
}
+static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
+{
+ struct am65_cpsw_tx_chn *tx_chns =
+ container_of(timer, struct am65_cpsw_tx_chn, tx_hrtimer);
+
+ enable_irq(tx_chns->irq);
+ return HRTIMER_NORESTART;
+}
+
static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
{
struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
+ bool tdown = false;
int num_tx;
if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
- num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
+ num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
+ budget, &tdown);
else
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
if (num_tx >= budget)
return budget;
- if (napi_complete_done(napi_tx, num_tx))
- enable_irq(tx_chn->irq);
+ if (napi_complete_done(napi_tx, num_tx)) {
+ if (unlikely(tx_chn->tx_pace_timeout && !tdown)) {
+ hrtimer_start(&tx_chn->tx_hrtimer,
+ ns_to_ktime(tx_chn->tx_pace_timeout),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(tx_chn->irq);
+ }
+ }
return 0;
}
@@ -1676,6 +1744,8 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll);
+ hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -1901,6 +1971,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
netif_napi_add(common->dma_ndev, &common->napi_rx,
am65_cpsw_nuss_rx_poll);
+ hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
ret = devm_request_irq(dev, rx_chn->irq,
am65_cpsw_nuss_rx_irq,
@@ -2098,6 +2170,9 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
dev_err(dev, "Use random MAC address\n");
}
}
+
+ /* Reset all Queue priorities to 0 */
+ writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
}
of_node_put(node);
@@ -2162,6 +2237,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
ndev_priv = netdev_priv(port->ndev);
ndev_priv->port = port;
ndev_priv->msg_enable = AM65_CPSW_DEBUG;
+ mutex_init(&ndev_priv->mm_lock);
+ port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
@@ -2716,6 +2793,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
int ret = 0, i;
@@ -2728,6 +2807,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
if (ret)
return ret;
+ /* The DMA Channels are not guaranteed to be in a clean state.
+ * Reset and disable them to ensure that they are back to the
+ * clean state and ready to be used.
+ */
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
+
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;
@@ -2898,7 +2993,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
- common->tx_ch_num = 1;
+ common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
common->pf_p0_rx_ptype_rrobin = false;
common->default_vlan = 1;
@@ -3000,7 +3095,7 @@ err_pm_clear:
return ret;
}
-static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+static void am65_cpsw_nuss_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct am65_cpsw_common *common;
@@ -3009,8 +3104,14 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
common = dev_get_drvdata(dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ /* Note, if this error path is taken, we're leaking some
+ * resources.
+ */
+ dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
+ ERR_PTR(ret));
+ return;
+ }
am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
@@ -3028,7 +3129,6 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int am65_cpsw_nuss_suspend(struct device *dev)
@@ -3128,7 +3228,7 @@ static struct platform_driver am65_cpsw_nuss_driver = {
.pm = &am65_cpsw_nuss_dev_pm_ops,
},
.probe = am65_cpsw_nuss_probe,
- .remove = am65_cpsw_nuss_remove,
+ .remove_new = am65_cpsw_nuss_remove,
};
module_platform_driver(am65_cpsw_nuss_driver);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index f3dad2ab9..7da0492dc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -75,6 +75,8 @@ struct am65_cpsw_tx_chn {
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_tx_channel *tx_chn;
spinlock_t lock; /* protect TX rings in multi-port mode */
+ struct hrtimer tx_hrtimer;
+ unsigned long tx_pace_timeout;
int irq;
u32 id;
u32 descs_num;
@@ -138,6 +140,8 @@ struct am65_cpsw_common {
struct napi_struct napi_rx;
bool rx_irq_disabled;
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout;
u32 nuss_ver;
u32 cpsw_ver;
@@ -145,6 +149,7 @@ struct am65_cpsw_common {
bool pf_p0_rx_ptype_rrobin;
struct am65_cpts *cpts;
int est_enabled;
+ bool iet_enabled;
bool is_emac_mode;
u16 br_members;
@@ -170,6 +175,10 @@ struct am65_cpsw_ndev_priv {
struct am65_cpsw_port *port;
struct am65_cpsw_ndev_stats __percpu *stats;
bool offload_fwd_mark;
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates
+ */
+ struct mutex mm_lock;
};
#define am65_ndev_to_priv(ndev) \
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 9ac2ff05d..816e73a3d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -4,10 +4,13 @@
*
* quality of service module includes:
* Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
+ * Interspersed Express Traffic (IET - P802.3br/D2.0)
*/
#include <linux/pm_runtime.h>
+#include <linux/math.h>
#include <linux/time.h>
+#include <linux/units.h>
#include <net/pkt_cls.h>
#include "am65-cpsw-nuss.h"
@@ -15,40 +18,7 @@
#include "am65-cpts.h"
#include "cpsw_ale.h"
-#define AM65_CPSW_REG_CTL 0x004
-#define AM65_CPSW_PN_REG_CTL 0x004
-#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
-#define AM65_CPSW_PN_REG_EST_CTL 0x060
-#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
-
-/* AM65_CPSW_REG_CTL register fields */
-#define AM65_CPSW_CTL_EST_EN BIT(18)
-
-/* AM65_CPSW_PN_REG_CTL register fields */
-#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
-
-/* AM65_CPSW_PN_REG_EST_CTL register fields */
-#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
-#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
-#define AM65_CPSW_PN_EST_TS_EN BIT(2)
-#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
-#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
-#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
-
-/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
-#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
-#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
-#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
-#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
-#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
-
-/* EST FETCH COMMAND RAM */
-#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
-#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
-#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
-#define AM65_CPSW_FETCH_CNT_OFFSET 8
-#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
-#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+#define TO_MBPS(x) DIV_ROUND_UP((x), BYTES_PER_MBIT)
enum timer_act {
TACT_PROG, /* need program timer */
@@ -56,6 +26,412 @@ enum timer_act {
TACT_SKIP_PROG, /* just buffer can be updated */
};
+static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs);
+
+static u32
+am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
+{
+ u32 ir;
+
+ bus_freq /= 1000000;
+ ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq);
+ return ir;
+}
+
+static void am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port *port)
+{
+ int prio;
+
+ for (prio = 0; prio < AM65_CPSW_PN_FIFO_PRIO_NUM; prio++) {
+ writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
+ writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
+ }
+}
+
+static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct am65_cpsw_common *common = port->common;
+ struct tc_mqprio_qopt_offload *mqprio;
+ bool enable, shaper_susp = false;
+ u32 rate_mbps;
+ int tc, prio;
+
+ mqprio = &p_mqprio->mqprio_hw;
+ /* takes care of no link case as well */
+ if (p_mqprio->max_rate_total > port->qos.link_speed)
+ shaper_susp = true;
+
+ am65_cpsw_tx_pn_shaper_reset(port);
+
+ enable = p_mqprio->shaper_en && !shaper_susp;
+ if (!enable)
+ return;
+
+ /* Rate limit is specified per Traffic Class but
+ * for CPSW, rate limit can be applied per priority
+ * at port FIFO.
+ *
+ * We have assigned the same priority (TCn) to all queues
+ * of a Traffic Class so they share the same shaper
+ * bandwidth.
+ */
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ prio = tc;
+
+ rate_mbps = TO_MBPS(mqprio->min_rate[tc]);
+ rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
+ common->bus_freq);
+ writel(rate_mbps,
+ port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
+
+ rate_mbps = 0;
+
+ if (mqprio->max_rate[tc]) {
+ rate_mbps = mqprio->max_rate[tc] - mqprio->min_rate[tc];
+ rate_mbps = TO_MBPS(rate_mbps);
+ rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
+ common->bus_freq);
+ }
+
+ writel(rate_mbps,
+ port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
+ }
+}
+
+static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct netlink_ext_ack *extack = mqprio->extack;
+ u64 min_rate_total = 0, max_rate_total = 0;
+ u32 min_rate_msk = 0, max_rate_msk = 0;
+ bool has_min_rate, has_max_rate;
+ int num_tc, i;
+
+ if (!(mqprio->flags & TC_MQPRIO_F_SHAPER))
+ return 0;
+
+ if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE)
+ return 0;
+
+ has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
+ has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
+
+ if (!has_min_rate && has_max_rate) {
+ NL_SET_ERR_MSG_MOD(extack, "min_rate is required with max_rate");
+ return -EOPNOTSUPP;
+ }
+
+ if (!has_min_rate)
+ return 0;
+
+ num_tc = mqprio->qopt.num_tc;
+
+ for (i = num_tc - 1; i >= 0; i--) {
+ u32 ch_msk;
+
+ if (mqprio->min_rate[i])
+ min_rate_msk |= BIT(i);
+ min_rate_total += mqprio->min_rate[i];
+
+ if (has_max_rate) {
+ if (mqprio->max_rate[i])
+ max_rate_msk |= BIT(i);
+ max_rate_total += mqprio->max_rate[i];
+
+ if (!mqprio->min_rate[i] && mqprio->max_rate[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TX tc%d rate max>0 but min=0",
+ i);
+ return -EINVAL;
+ }
+
+ if (mqprio->max_rate[i] &&
+ mqprio->max_rate[i] < mqprio->min_rate[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TX tc%d rate min(%llu)>max(%llu)",
+ i, mqprio->min_rate[i],
+ mqprio->max_rate[i]);
+ return -EINVAL;
+ }
+ }
+
+ ch_msk = GENMASK(num_tc - 1, i);
+ if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Min rate must be set sequentially hi->lo tx_rate_msk%x",
+ min_rate_msk);
+ return -EINVAL;
+ }
+
+ if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Max rate must be set sequentially hi->lo tx_rate_msk%x",
+ max_rate_msk);
+ return -EINVAL;
+ }
+ }
+
+ min_rate_total = TO_MBPS(min_rate_total);
+ max_rate_total = TO_MBPS(max_rate_total);
+
+ p_mqprio->shaper_en = true;
+ p_mqprio->max_rate_total = max_t(u64, min_rate_total, max_rate_total);
+
+ return 0;
+}
+
+static void am65_cpsw_reset_tc_mqprio(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+
+ p_mqprio->shaper_en = false;
+ p_mqprio->max_rate_total = 0;
+
+ am65_cpsw_tx_pn_shaper_reset(port);
+ netdev_reset_tc(ndev);
+
+ /* Reset all Queue priorities to 0 */
+ writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
+
+ am65_cpsw_iet_change_preemptible_tcs(port, 0);
+}
+
+static int am65_cpsw_setup_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct am65_cpsw_common *common = port->common;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ int i, tc, offset, count, prio, ret;
+ u8 num_tc = qopt->num_tc;
+ u32 tx_prio_map = 0;
+
+ memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
+
+ ret = pm_runtime_get_sync(common->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(common->dev);
+ return ret;
+ }
+
+ if (!num_tc) {
+ am65_cpsw_reset_tc_mqprio(ndev);
+ ret = 0;
+ goto exit_put;
+ }
+
+ ret = am65_cpsw_mqprio_verify_shaper(port, mqprio);
+ if (ret)
+ goto exit_put;
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ /* Multiple Linux priorities can map to a Traffic Class
+ * A Traffic Class can have multiple contiguous Queues,
+ * Queues get mapped to Channels (thread_id),
+ * if not VLAN tagged, thread_id is used as packet_priority
+ * if VLAN tagged. VLAN priority is used as packet_priority
+ * packet_priority gets mapped to header_priority in p0_rx_pri_map,
+ * header_priority gets mapped to switch_priority in pn_tx_pri_map.
+ * As p0_rx_pri_map is left at defaults (0x76543210), we can
+ * assume that Queue_n gets mapped to header_priority_n. We can then
+ * set the switch priority in pn_tx_pri_map.
+ */
+
+ for (tc = 0; tc < num_tc; tc++) {
+ prio = tc;
+
+ /* For simplicity we assign the same priority (TCn) to
+ * all queues of a Traffic Class.
+ */
+ for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++)
+ tx_prio_map |= prio << (4 * i);
+
+ count = qopt->count[tc];
+ offset = qopt->offset[tc];
+ netdev_set_tc_queue(ndev, tc, count, offset);
+ }
+
+ writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
+
+ am65_cpsw_tx_pn_shaper_apply(port);
+ am65_cpsw_iet_change_preemptible_tcs(port, mqprio->preemptible_tcs);
+
+exit_put:
+ pm_runtime_put(common->dev);
+
+ return ret;
+}
+
+static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
+{
+ int verify_time_ms = port->qos.iet.verify_time_ms;
+ u32 val;
+
+ /* The number of wireside clocks contained in the verify
+ * timeout counter. The default is 0x1312d0
+ * (10ms at 125Mhz in 1G mode).
+ */
+ val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */
+
+ val /= MILLIHZ_PER_HZ; /* count per ms timeout */
+ val *= verify_time_ms; /* count for timeout ms */
+
+ if (val > AM65_CPSW_PN_MAC_VERIFY_CNT_MASK)
+ return -EINVAL;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY);
+
+ return 0;
+}
+
+static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
+{
+ u32 ctrl, status;
+ int try;
+
+ try = 20;
+ do {
+ /* Reset the verify state machine by writing 1
+ * to LINKFAIL
+ */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ /* Clear MAC_LINKFAIL bit to start Verify. */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ msleep(port->qos.iet.verify_time_ms);
+
+ status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
+ if (status & AM65_CPSW_PN_MAC_VERIFIED)
+ return 0;
+
+ if (status & AM65_CPSW_PN_MAC_VERIFY_FAIL) {
+ netdev_dbg(port->ndev,
+ "MAC Merge verify failed, trying again\n");
+ continue;
+ }
+
+ if (status & AM65_CPSW_PN_MAC_RESPOND_ERR) {
+ netdev_dbg(port->ndev, "MAC Merge respond error\n");
+ return -ENODEV;
+ }
+
+ if (status & AM65_CPSW_PN_MAC_VERIFY_ERR) {
+ netdev_dbg(port->ndev, "MAC Merge verify error\n");
+ return -ENODEV;
+ }
+ } while (try-- > 0);
+
+ netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port *port, u8 preemptible_tcs)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ val &= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK;
+ val |= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs);
+ writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+}
+
+/* enable common IET_ENABLE only if at least 1 port has rx IET enabled.
+ * UAPI doesn't allow tx enable without rx enable.
+ */
+void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ bool rx_enable = false;
+ u32 val;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ rx_enable = !!(val & AM65_CPSW_PN_CTL_IET_PORT_EN);
+ if (rx_enable)
+ break;
+ }
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+
+ if (rx_enable)
+ val |= AM65_CPSW_CTL_IET_EN;
+ else
+ val &= ~AM65_CPSW_CTL_IET_EN;
+
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->iet_enabled = rx_enable;
+}
+
+/* CPSW does not have an IRQ to notify changes to the MAC Merge TX status
+ * (active/inactive), but the preemptible traffic classes should only be
+ * committed to hardware once TX is active. Resort to polling.
+ */
+void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
+{
+ u8 preemptible_tcs;
+ int err;
+ u32 val;
+
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (!(val & AM65_CPSW_PN_CTL_IET_PORT_EN))
+ return;
+
+ /* update common IET enable */
+ am65_cpsw_iet_common_enable(port->common);
+
+ /* update verify count */
+ err = am65_cpsw_iet_set_verify_timeout_count(port);
+ if (err) {
+ netdev_err(port->ndev, "couldn't set verify count: %d\n", err);
+ return;
+ }
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ if (!(val & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)) {
+ err = am65_cpsw_iet_verify_wait(port);
+ if (err)
+ return;
+ }
+
+ preemptible_tcs = port->qos.iet.preemptible_tcs;
+ am65_cpsw_iet_set_preempt_mask(port, preemptible_tcs);
+}
+
+static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+
+ port->qos.iet.preemptible_tcs = preemptible_tcs;
+ mutex_lock(&priv->mm_lock);
+ am65_cpsw_iet_commit_preemptible_tcs(port);
+ mutex_unlock(&priv->mm_lock);
+}
+
+static void am65_cpsw_iet_link_state_update(struct net_device *ndev)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ mutex_lock(&priv->mm_lock);
+ am65_cpsw_iet_commit_preemptible_tcs(port);
+ mutex_unlock(&priv->mm_lock);
+}
+
static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
{
return port->qos.est_oper || port->qos.est_admin;
@@ -428,7 +804,7 @@ static void am65_cpsw_stop_est(struct net_device *ndev)
am65_cpsw_timer_stop(ndev);
}
-static void am65_cpsw_purge_est(struct net_device *ndev)
+static void am65_cpsw_taprio_destroy(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
@@ -439,31 +815,74 @@ static void am65_cpsw_purge_est(struct net_device *ndev)
port->qos.est_oper = NULL;
port->qos.est_admin = NULL;
+
+ am65_cpsw_reset_tc_mqprio(ndev);
}
-static int am65_cpsw_configure_taprio(struct net_device *ndev,
- struct am65_cpsw_est *est_new)
+static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
+ struct tc_taprio_qopt_offload *to)
+{
+ int i;
+
+ *to = *from;
+ for (i = 0; i < from->num_entries; i++)
+ to->entries[i] = from->entries[i];
+}
+
+static int am65_cpsw_taprio_replace(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct netlink_ext_ack *extack = taprio->mqprio.extack;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpts *cpts = common->cpts;
- int ret = 0, tact = TACT_PROG;
+ struct am65_cpsw_est *est_new;
+ int ret, tact;
- am65_cpsw_est_update_state(ndev);
+ if (!netif_running(ndev)) {
+ NL_SET_ERR_MSG_MOD(extack, "interface is down, link speed unknown");
+ return -ENETDOWN;
+ }
- if (est_new->taprio.cmd == TAPRIO_CMD_DESTROY) {
- am65_cpsw_stop_est(ndev);
- return ret;
+ if (common->pf_p0_rx_ptype_rrobin) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "p0-rx-ptype-rrobin flag conflicts with taprio qdisc");
+ return -EINVAL;
}
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return -ENOLINK;
+
+ if (taprio->cycle_time_extension) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "cycle time extension not supported");
+ return -EOPNOTSUPP;
+ }
+
+ est_new = devm_kzalloc(&ndev->dev,
+ struct_size(est_new, taprio.entries, taprio->num_entries),
+ GFP_KERNEL);
+ if (!est_new)
+ return -ENOMEM;
+
+ ret = am65_cpsw_setup_mqprio(ndev, &taprio->mqprio);
+ if (ret)
+ return ret;
+
+ am65_cpsw_cp_taprio(taprio, &est_new->taprio);
+
+ am65_cpsw_est_update_state(ndev);
+
ret = am65_cpsw_est_check_scheds(ndev, est_new);
if (ret < 0)
- return ret;
+ goto fail;
tact = am65_cpsw_timer_act(ndev, est_new);
if (tact == TACT_NEED_STOP) {
- dev_err(&ndev->dev,
- "Can't toggle estf timer, stop taprio first");
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't toggle estf timer, stop taprio first");
+ ret = -EINVAL;
+ goto fail;
}
if (tact == TACT_PROG)
@@ -476,62 +895,26 @@ static int am65_cpsw_configure_taprio(struct net_device *ndev,
am65_cpsw_est_set_sched_list(ndev, est_new);
am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
- am65_cpsw_est_set(ndev, est_new->taprio.cmd == TAPRIO_CMD_REPLACE);
+ am65_cpsw_est_set(ndev, 1);
if (tact == TACT_PROG) {
ret = am65_cpsw_timer_set(ndev, est_new);
if (ret) {
- dev_err(&ndev->dev, "Failed to set cycle time");
- return ret;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set cycle time");
+ goto fail;
}
}
- return ret;
-}
-
-static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
- struct tc_taprio_qopt_offload *to)
-{
- int i;
-
- *to = *from;
- for (i = 0; i < from->num_entries; i++)
- to->entries[i] = from->entries[i];
-}
-
-static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
-{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- struct tc_taprio_qopt_offload *taprio = type_data;
- struct am65_cpsw_est *est_new;
- int ret = 0;
-
- if (taprio->cycle_time_extension) {
- dev_err(&ndev->dev, "Failed to set cycle time extension");
- return -EOPNOTSUPP;
- }
-
- est_new = devm_kzalloc(&ndev->dev,
- struct_size(est_new, taprio.entries, taprio->num_entries),
- GFP_KERNEL);
- if (!est_new)
- return -ENOMEM;
-
- am65_cpsw_cp_taprio(taprio, &est_new->taprio);
- ret = am65_cpsw_configure_taprio(ndev, est_new);
- if (!ret) {
- if (taprio->cmd == TAPRIO_CMD_REPLACE) {
- devm_kfree(&ndev->dev, port->qos.est_admin);
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+ port->qos.est_admin = est_new;
+ am65_cpsw_iet_change_preemptible_tcs(port, taprio->mqprio.preemptible_tcs);
- port->qos.est_admin = est_new;
- } else {
- devm_kfree(&ndev->dev, est_new);
- am65_cpsw_purge_est(ndev);
- }
- } else {
- devm_kfree(&ndev->dev, est_new);
- }
+ return 0;
+fail:
+ am65_cpsw_reset_tc_mqprio(ndev);
+ devm_kfree(&ndev->dev, est_new);
return ret;
}
@@ -541,7 +924,6 @@ static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
ktime_t cur_time;
s64 delta;
- port->qos.link_speed = link_speed;
if (!am65_cpsw_port_est_enabled(port))
return;
@@ -558,37 +940,26 @@ static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
return;
purge_est:
- am65_cpsw_purge_est(ndev);
+ am65_cpsw_taprio_destroy(ndev);
}
static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct tc_taprio_qopt_offload *taprio = type_data;
- struct am65_cpsw_common *common = port->common;
-
- if (taprio->cmd != TAPRIO_CMD_REPLACE &&
- taprio->cmd != TAPRIO_CMD_DESTROY)
- return -EOPNOTSUPP;
-
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
- return -ENODEV;
-
- if (!netif_running(ndev)) {
- dev_err(&ndev->dev, "interface is down, link speed unknown\n");
- return -ENETDOWN;
- }
-
- if (common->pf_p0_rx_ptype_rrobin) {
- dev_err(&ndev->dev,
- "p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
- return -EINVAL;
+ int err = 0;
+
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = am65_cpsw_taprio_replace(ndev, taprio);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ am65_cpsw_taprio_destroy(ndev);
+ break;
+ default:
+ err = -EOPNOTSUPP;
}
- if (port->qos.link_speed == SPEED_UNKNOWN)
- return -ENOLINK;
-
- return am65_cpsw_set_taprio(ndev, type_data);
+ return err;
}
static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
@@ -596,12 +967,17 @@ static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
struct tc_query_caps_base *base = type_data;
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
+
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
- return -EOPNOTSUPP;
-
caps->gate_mask_per_txq = true;
return 0;
@@ -787,55 +1163,6 @@ static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_blo
port, port, true);
}
-int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_QUERY_CAPS:
- return am65_cpsw_tc_query_caps(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return am65_cpsw_setup_taprio(ndev, type_data);
- case TC_SETUP_BLOCK:
- return am65_cpsw_qos_setup_tc_block(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
-{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
-
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
- return;
-
- am65_cpsw_est_link_up(ndev, link_speed);
- port->qos.link_down_time = 0;
-}
-
-void am65_cpsw_qos_link_down(struct net_device *ndev)
-{
- struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
-
- if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
- return;
-
- if (!port->qos.link_down_time)
- port->qos.link_down_time = ktime_get();
-
- port->qos.link_speed = SPEED_UNKNOWN;
-}
-
-static u32
-am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
-{
- u32 ir;
-
- bus_freq /= 1000000;
- ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq);
- return ir;
-}
-
static void
am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common *common,
int tx_ch, u32 rate_mbps)
@@ -937,3 +1264,44 @@ void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
}
}
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return am65_cpsw_tc_query_caps(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return am65_cpsw_setup_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return am65_cpsw_setup_mqprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return am65_cpsw_qos_setup_tc_block(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ port->qos.link_speed = link_speed;
+ am65_cpsw_tx_pn_shaper_apply(port);
+ am65_cpsw_iet_link_state_update(ndev);
+
+ am65_cpsw_est_link_up(ndev, link_speed);
+ port->qos.link_down_time = 0;
+}
+
+void am65_cpsw_qos_link_down(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ port->qos.link_speed = SPEED_UNKNOWN;
+ am65_cpsw_tx_pn_shaper_apply(port);
+ am65_cpsw_iet_link_state_update(ndev);
+
+ if (!port->qos.link_down_time)
+ port->qos.link_down_time = ktime_get();
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
index 0cc2a3b3d..b328e56c5 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -9,6 +9,7 @@
#include <net/pkt_sched.h>
struct am65_cpsw_common;
+struct am65_cpsw_port;
struct am65_cpsw_est {
int buf;
@@ -16,6 +17,18 @@ struct am65_cpsw_est {
struct tc_taprio_qopt_offload taprio;
};
+struct am65_cpsw_mqprio {
+ struct tc_mqprio_qopt_offload mqprio_hw;
+ u64 max_rate_total;
+ bool shaper_en;
+};
+
+struct am65_cpsw_iet {
+ u8 preemptible_tcs;
+ u32 original_max_blks;
+ int verify_time_ms;
+};
+
struct am65_cpsw_ale_ratelimit {
unsigned long cookie;
u64 rate_packet_ps;
@@ -26,16 +39,189 @@ struct am65_cpsw_qos {
struct am65_cpsw_est *est_oper;
ktime_t link_down_time;
int link_speed;
+ struct am65_cpsw_mqprio mqprio;
+ struct am65_cpsw_iet iet;
struct am65_cpsw_ale_ratelimit ale_bc_ratelimit;
struct am65_cpsw_ale_ratelimit ale_mc_ratelimit;
};
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
+#define AM65_CPSW_P0_REG_PRI_EIR(pri) (0x160 + 4 * (pri))
+
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_TX_PRI_MAP 0x018
+#define AM65_CPSW_PN_REG_RX_PRI_MAP 0x020
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
+#define AM65_CPSW_PN_REG_PRI_EIR(pri) (0x160 + 4 * (pri))
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+/* number of priority queues per port FIFO */
+#define AM65_CPSW_PN_FIFO_PRIO_NUM 8
+
+#if IS_ENABLED(CONFIG_TI_AM65_CPSW_QOS)
int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed);
void am65_cpsw_qos_link_down(struct net_device *ndev);
int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev, int queue, u32 rate_mbps);
void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common);
+void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port);
+void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common);
+#else
+static inline int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void am65_cpsw_qos_link_up(struct net_device *ndev,
+ int link_speed)
+{ }
+
+static inline void am65_cpsw_qos_link_down(struct net_device *ndev)
+{ }
+
+static inline int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
+ int queue,
+ u32 rate_mbps)
+{
+ return 0;
+}
+
+static inline void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
+{ }
+static inline void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
+{ }
+static inline void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
+{ }
+#endif
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_MAX_BLKS 0x008
+#define AM65_CPSW_PN_REG_TX_PRI_MAP 0x018
+#define AM65_CPSW_PN_REG_RX_PRI_MAP 0x020
+#define AM65_CPSW_PN_REG_IET_CTRL 0x040
+#define AM65_CPSW_PN_REG_IET_STATUS 0x044
+#define AM65_CPSW_PN_REG_IET_VERIFY 0x048
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+#define AM65_CPSW_PN_REG_PRI_CIR(pri) (0x140 + 4 * (pri))
+#define AM65_CPSW_PN_REG_PRI_EIR(pri) (0x160 + 4 * (pri))
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_IET_EN BIT(17)
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_IET_PORT_EN BIT(16)
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_IET_CTRL register fields */
+#define AM65_CPSW_PN_IET_MAC_PENABLE BIT(0)
+#define AM65_CPSW_PN_IET_MAC_DISABLEVERIFY BIT(2)
+#define AM65_CPSW_PN_IET_MAC_LINKFAIL BIT(3)
+#define AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK GENMASK(10, 8)
+#define AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_OFFSET 8
+#define AM65_CPSW_PN_IET_MAC_PREMPT_MASK GENMASK(23, 16)
+#define AM65_CPSW_PN_IET_MAC_PREMPT_OFFSET 16
+
+#define AM65_CPSW_PN_IET_MAC_SET_ADDFRAGSIZE(n) (((n) << AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_OFFSET) & \
+ AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK)
+#define AM65_CPSW_PN_IET_MAC_GET_ADDFRAGSIZE(n) (((n) & AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_MASK) >> \
+ AM65_CPSW_PN_IET_MAC_MAC_ADDFRAGSIZE_OFFSET)
+#define AM65_CPSW_PN_IET_MAC_SET_PREEMPT(n) (((n) << AM65_CPSW_PN_IET_MAC_PREMPT_OFFSET) & \
+ AM65_CPSW_PN_IET_MAC_PREMPT_MASK)
+#define AM65_CPSW_PN_IET_MAC_GET_PREEMPT(n) (((n) & AM65_CPSW_PN_IET_MAC_PREMPT_MASK) >> \
+ AM65_CPSW_PN_IET_MAC_PREMPT_OFFSET)
+
+/* AM65_CPSW_PN_REG_IET_STATUS register fields */
+#define AM65_CPSW_PN_MAC_STATUS GENMASK(3, 0)
+#define AM65_CPSW_PN_MAC_VERIFIED BIT(0)
+#define AM65_CPSW_PN_MAC_VERIFY_FAIL BIT(1)
+#define AM65_CPSW_PN_MAC_RESPOND_ERR BIT(2)
+#define AM65_CPSW_PN_MAC_VERIFY_ERR BIT(3)
+
+/* AM65_CPSW_PN_REG_IET_VERIFY register fields */
+#define AM65_CPSW_PN_MAC_VERIFY_CNT_MASK GENMASK(23, 0)
+#define AM65_CPSW_PN_MAC_GET_VERIFY_CNT(n) ((n) & AM65_CPSW_PN_MAC_VERIFY_CNT_MASK)
+/* 10 msec converted to NSEC */
+#define AM65_CPSW_IET_VERIFY_CNT_MS (10)
+#define AM65_CPSW_IET_VERIFY_CNT_NS (AM65_CPSW_IET_VERIFY_CNT_MS * \
+ NSEC_PER_MSEC)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+/* AM65_CPSW_PN_REG_MAX_BLKS fields for IET and No IET cases */
+/* 7 blocks for pn_rx_max_blks, 13 for pn_tx_max_blks*/
+#define AM65_CPSW_PN_TX_RX_MAX_BLKS_IET 0xD07
+
+/* Slave IET Stats. register offsets */
+#define AM65_CPSW_STATN_IET_RX_ASSEMBLY_ERROR 0x140
+#define AM65_CPSW_STATN_IET_RX_ASSEMBLY_OK 0x144
+#define AM65_CPSW_STATN_IET_RX_SMD_ERROR 0x148
+#define AM65_CPSW_STATN_IET_RX_FRAG 0x14c
+#define AM65_CPSW_STATN_IET_TX_HOLD 0x150
+#define AM65_CPSW_STATN_IET_TX_FRAG 0x154
+
+/* number of priority queues per port FIFO */
+#define AM65_CPSW_PN_FIFO_PRIO_NUM 8
#endif /* AM65_CPSW_QOS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c66618d91..f89716b1c 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -784,6 +784,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
+ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
+ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
+ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
+ mtype_seqid = skb_cb->skb_mtype_seqid;
+
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 2ed165dcd..c0a5abd8d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1724,14 +1724,20 @@ clean_runtime_disable_ret:
return ret;
}
-static int cpsw_remove(struct platform_device *pdev)
+static void cpsw_remove(struct platform_device *pdev)
{
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int i, ret;
ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ /* Note, if this error path is taken, we're leaking some
+ * resources.
+ */
+ dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
+ ERR_PTR(ret));
+ return;
+ }
for (i = 0; i < cpsw->data.slaves; i++)
if (cpsw->slaves[i].ndev)
@@ -1742,7 +1748,6 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1797,7 +1802,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
- .remove = cpsw_remove,
+ .remove_new = cpsw_remove,
};
module_platform_driver(cpsw_driver);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 9061dca97..087dcb675 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -2040,14 +2040,20 @@ clean_dt_ret:
return ret;
}
-static int cpsw_remove(struct platform_device *pdev)
+static void cpsw_remove(struct platform_device *pdev)
{
struct cpsw_common *cpsw = platform_get_drvdata(pdev);
int ret;
ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ /* Note, if this error path is taken, we're leaking some
+ * resources.
+ */
+ dev_err(&pdev->dev, "Failed to resume device (%pe)\n",
+ ERR_PTR(ret));
+ return;
+ }
cpsw_unregister_notifiers(cpsw);
cpsw_unregister_devlink(cpsw);
@@ -2058,7 +2064,6 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_remove_dt(cpsw);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused cpsw_suspend(struct device *dev)
@@ -2119,7 +2124,7 @@ static struct platform_driver cpsw_driver = {
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
- .remove = cpsw_remove,
+ .remove_new = cpsw_remove,
};
module_platform_driver(cpsw_driver);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index bcccf43d3..dbbea9146 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -638,6 +638,16 @@ static void cpts_calc_mult_shift(struct cpts *cpts)
freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
}
+static void cpts_clk_unregister(void *clk)
+{
+ clk_hw_unregister_mux(clk);
+}
+
+static void cpts_clk_del_provider(void *np)
+{
+ of_clk_del_provider(np);
+}
+
static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
{
struct device_node *refclk_np;
@@ -687,9 +697,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
goto mux_fail;
}
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))clk_hw_unregister_mux,
- clk_hw);
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw);
if (ret) {
dev_err(cpts->dev, "add clkmux unreg action %d", ret);
goto mux_fail;
@@ -699,8 +707,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
if (ret)
goto mux_fail;
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))of_clk_del_provider,
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider,
refclk_np);
if (ret) {
dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 628c87dc1..8e07d4a1b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -511,16 +511,12 @@ static const struct k3_mdio_soc_data am65_mdio_soc_data = {
};
static const struct soc_device_attribute k3_mdio_socinfo[] = {
- { .family = "AM62X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "AM64X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "AM64X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
- { .family = "AM65X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "AM65X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
- { .family = "J7200", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "J7200", .revision = "SR2.0", .data = &am65_mdio_soc_data },
- { .family = "J721E", .revision = "SR1.0", .data = &am65_mdio_soc_data },
- { .family = "J721E", .revision = "SR2.0", .data = &am65_mdio_soc_data },
- { .family = "J721S2", .revision = "SR1.0", .data = &am65_mdio_soc_data},
+ { .family = "AM62X", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .data = &am65_mdio_soc_data },
+ { .family = "J721S2", .data = &am65_mdio_soc_data },
{ /* sentinel */ },
};
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 411898a4f..4a78e8a1c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -421,12 +421,14 @@ static int prueth_init_rx_chns(struct prueth_emac *emac,
if (!i)
fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
- rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (rx_chn->irq[i] <= 0) {
- ret = rx_chn->irq[i];
+ ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (ret <= 0) {
+ if (!ret)
+ ret = -ENXIO;
netdev_err(ndev, "Failed to get rx dma irq");
goto fail;
}
+ rx_chn->irq[i] = ret;
}
return 0;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9d535ae59..c1b0d35c8 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -93,12 +93,13 @@ static void gelic_card_get_ether_port_status(struct gelic_card *card,
* gelic_descr_get_status -- returns the status of a descriptor
* @descr: descriptor to look at
*
- * returns the status as in the dmac_cmd_status field of the descriptor
+ * returns the status as in the hw_regs.dmac_cmd_status field of the descriptor
*/
static enum gelic_descr_dma_status
gelic_descr_get_status(struct gelic_descr *descr)
{
- return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
+ return be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
+ GELIC_DESCR_DMA_STAT_MASK;
}
static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
@@ -152,15 +153,15 @@ static void gelic_card_enable_rxdmac(struct gelic_card *card)
if (gelic_descr_get_status(card->rx_chain.head) !=
GELIC_DESCR_DMA_CARDOWNED) {
printk(KERN_ERR "%s: status=%x\n", __func__,
- be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
+ be32_to_cpu(card->rx_chain.head->hw_regs.dmac_cmd_status));
printk(KERN_ERR "%s: nextphy=%x\n", __func__,
- be32_to_cpu(card->rx_chain.head->next_descr_addr));
+ be32_to_cpu(card->rx_chain.head->hw_regs.next_descr_addr));
printk(KERN_ERR "%s: head=%p\n", __func__,
card->rx_chain.head);
}
#endif
status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
- card->rx_chain.head->bus_addr, 0);
+ card->rx_chain.head->link.cpu_addr, 0);
if (status)
dev_info(ctodev(card),
"lv1_net_start_rx_dma failed, status=%d\n", status);
@@ -195,8 +196,8 @@ static void gelic_card_disable_rxdmac(struct gelic_card *card)
static void gelic_descr_set_status(struct gelic_descr *descr,
enum gelic_descr_dma_status status)
{
- descr->dmac_cmd_status = cpu_to_be32(status |
- (be32_to_cpu(descr->dmac_cmd_status) &
+ descr->hw_regs.dmac_cmd_status = cpu_to_be32(status |
+ (be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
~GELIC_DESCR_DMA_STAT_MASK));
/*
* dma_cmd_status field is used to indicate whether the descriptor
@@ -224,13 +225,14 @@ static void gelic_card_reset_chain(struct gelic_card *card,
for (descr = start_descr; start_descr != descr->next; descr++) {
gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
- descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
+ descr->hw_regs.next_descr_addr
+ = cpu_to_be32(descr->next->link.cpu_addr);
}
chain->head = start_descr;
chain->tail = (descr - 1);
- (descr - 1)->next_descr_addr = 0;
+ (descr - 1)->hw_regs.next_descr_addr = 0;
}
void gelic_card_up(struct gelic_card *card)
@@ -286,10 +288,12 @@ static void gelic_card_free_chain(struct gelic_card *card,
{
struct gelic_descr *descr;
- for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) {
- dma_unmap_single(ctodev(card), descr->bus_addr,
- GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
- descr->bus_addr = 0;
+ for (descr = descr_in; descr && descr->link.cpu_addr;
+ descr = descr->next) {
+ dma_unmap_single(ctodev(card), descr->link.cpu_addr,
+ descr->link.size, DMA_BIDIRECTIONAL);
+ descr->link.cpu_addr = 0;
+ descr->link.size = 0;
}
}
@@ -317,17 +321,21 @@ static int gelic_card_init_chain(struct gelic_card *card,
/* set up the hardware pointers in each descriptor */
for (i = 0; i < no; i++, descr++) {
- dma_addr_t cpu_addr;
-
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
- cpu_addr = dma_map_single(ctodev(card), descr,
- GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+ descr->link.size = sizeof(struct gelic_hw_regs);
+ descr->link.cpu_addr = dma_map_single(ctodev(card), descr,
+ descr->link.size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(ctodev(card), cpu_addr))
- goto iommu_error;
+ if (dma_mapping_error(ctodev(card), descr->link.cpu_addr)) {
+ for (i--, descr--; 0 <= i; i--, descr--) {
+ dma_unmap_single(ctodev(card),
+ descr->link.cpu_addr, descr->link.size,
+ DMA_BIDIRECTIONAL);
+ }
+ return -ENOMEM;
+ }
- descr->bus_addr = cpu_to_be32(cpu_addr);
descr->next = descr + 1;
descr->prev = descr - 1;
}
@@ -338,24 +346,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
/* chain bus addr of hw descriptor */
descr = start_descr;
for (i = 0; i < no; i++, descr++) {
- descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
+ descr->hw_regs.next_descr_addr =
+ cpu_to_be32(descr->next->link.cpu_addr);
}
chain->head = start_descr;
chain->tail = start_descr;
/* do not chain last hw descriptor */
- (descr - 1)->next_descr_addr = 0;
+ (descr - 1)->hw_regs.next_descr_addr = 0;
return 0;
-
-iommu_error:
- for (i--, descr--; 0 <= i; i--, descr--)
- if (descr->bus_addr)
- dma_unmap_single(ctodev(card), descr->bus_addr,
- GELIC_DESCR_SIZE,
- DMA_BIDIRECTIONAL);
- return -ENOMEM;
}
/**
@@ -383,16 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
+ descr->hw_regs.dmac_cmd_status = 0;
+ descr->hw_regs.result_size = 0;
+ descr->hw_regs.valid_size = 0;
+ descr->hw_regs.data_error = 0;
+ descr->hw_regs.payload.dev_addr = 0;
+ descr->hw_regs.payload.size = 0;
+
descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
- descr->buf_addr = 0; /* tell DMAC don't touch memory */
+ descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
return -ENOMEM;
}
- descr->buf_size = cpu_to_be32(rx_skb_size);
- descr->dmac_cmd_status = 0;
- descr->result_size = 0;
- descr->valid_size = 0;
- descr->data_error = 0;
offset = ((unsigned long)descr->skb->data) &
(GELIC_NET_RXBUF_ALIGN - 1);
@@ -401,7 +404,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
/* io-mmu-map the skb */
cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
- descr->buf_addr = cpu_to_be32(cpu_addr);
+ descr->hw_regs.payload.dev_addr = cpu_to_be32(cpu_addr);
if (dma_mapping_error(ctodev(card), cpu_addr)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
@@ -409,10 +412,14 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
"%s:Could not iommu-map rx buffer\n", __func__);
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
return -ENOMEM;
- } else {
- gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
- return 0;
}
+
+ descr->hw_regs.payload.size = cpu_to_be32(GELIC_NET_MAX_FRAME);
+ descr->hw_regs.payload.dev_addr = cpu_to_be32(cpu_addr);
+
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
+
+ return 0;
}
/**
@@ -427,14 +434,15 @@ static void gelic_card_release_rx_chain(struct gelic_card *card)
do {
if (descr->skb) {
dma_unmap_single(ctodev(card),
- be32_to_cpu(descr->buf_addr),
- descr->skb->len,
- DMA_FROM_DEVICE);
- descr->buf_addr = 0;
+ be32_to_cpu(descr->hw_regs.payload.dev_addr),
+ descr->skb->len,
+ DMA_FROM_DEVICE);
+ descr->hw_regs.payload.dev_addr = 0;
+ descr->hw_regs.payload.size = 0;
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
gelic_descr_set_status(descr,
- GELIC_DESCR_DMA_NOT_IN_USE);
+ GELIC_DESCR_DMA_NOT_IN_USE);
}
descr = descr->next;
} while (descr != card->rx_chain.head);
@@ -496,19 +504,20 @@ static void gelic_descr_release_tx(struct gelic_card *card,
{
struct sk_buff *skb = descr->skb;
- BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL));
+ BUG_ON(!(be32_to_cpu(descr->hw_regs.data_status) & GELIC_DESCR_TX_TAIL));
- dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_single(ctodev(card),
+ be32_to_cpu(descr->hw_regs.payload.dev_addr), skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
- descr->buf_addr = 0;
- descr->buf_size = 0;
- descr->next_descr_addr = 0;
- descr->result_size = 0;
- descr->valid_size = 0;
- descr->data_status = 0;
- descr->data_error = 0;
+ descr->hw_regs.payload.dev_addr = 0;
+ descr->hw_regs.payload.size = 0;
+ descr->hw_regs.next_descr_addr = 0;
+ descr->hw_regs.result_size = 0;
+ descr->hw_regs.valid_size = 0;
+ descr->hw_regs.data_status = 0;
+ descr->hw_regs.data_error = 0;
descr->skb = NULL;
/* set descr status */
@@ -701,7 +710,7 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
struct sk_buff *skb)
{
if (skb->ip_summed != CHECKSUM_PARTIAL)
- descr->dmac_cmd_status =
+ descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
else {
@@ -709,19 +718,19 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
* if yes: tcp? udp? */
if (skb->protocol == htons(ETH_P_IP)) {
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- descr->dmac_cmd_status =
+ descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
- descr->dmac_cmd_status =
+ descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
else /*
* the stack should checksum non-tcp and non-udp
* packets on his own: NETIF_F_IP_CSUM
*/
- descr->dmac_cmd_status =
+ descr->hw_regs.dmac_cmd_status =
cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL);
}
@@ -789,11 +798,11 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
return -ENOMEM;
}
- descr->buf_addr = cpu_to_be32(buf);
- descr->buf_size = cpu_to_be32(skb->len);
+ descr->hw_regs.payload.dev_addr = cpu_to_be32(buf);
+ descr->hw_regs.payload.size = cpu_to_be32(skb->len);
descr->skb = skb;
- descr->data_status = 0;
- descr->next_descr_addr = 0; /* terminate hw descr */
+ descr->hw_regs.data_status = 0;
+ descr->hw_regs.next_descr_addr = 0; /* terminate hw descr */
gelic_descr_set_tx_cmdstat(descr, skb);
/* bump free descriptor pointer */
@@ -818,7 +827,7 @@ static int gelic_card_kick_txdma(struct gelic_card *card,
if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) {
card->tx_dma_progress = 1;
status = lv1_net_start_tx_dma(bus_id(card), dev_id(card),
- descr->bus_addr, 0);
+ descr->link.cpu_addr, 0);
if (status) {
card->tx_dma_progress = 0;
dev_info(ctodev(card), "lv1_net_start_txdma failed," \
@@ -871,7 +880,8 @@ netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
* link this prepared descriptor to previous one
* to achieve high performance
*/
- descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
+ descr->prev->hw_regs.next_descr_addr =
+ cpu_to_be32(descr->link.cpu_addr);
/*
* as hardware descriptor is modified in the above lines,
* ensure that the hardware sees it
@@ -884,12 +894,12 @@ netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
*/
netdev->stats.tx_dropped++;
/* don't trigger BUG_ON() in gelic_descr_release_tx */
- descr->data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL);
+ descr->hw_regs.data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL);
gelic_descr_release_tx(card, descr);
/* reset head */
card->tx_chain.head = descr;
/* reset hw termination */
- descr->prev->next_descr_addr = 0;
+ descr->prev->hw_regs.next_descr_addr = 0;
dev_info(ctodev(card), "%s: kick failure\n", __func__);
}
@@ -914,21 +924,21 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
struct sk_buff *skb = descr->skb;
u32 data_status, data_error;
- data_status = be32_to_cpu(descr->data_status);
- data_error = be32_to_cpu(descr->data_error);
+ data_status = be32_to_cpu(descr->hw_regs.data_status);
+ data_error = be32_to_cpu(descr->hw_regs.data_error);
/* unmap skb buffer */
- dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
- GELIC_NET_MAX_FRAME,
- DMA_FROM_DEVICE);
-
- skb_put(skb, be32_to_cpu(descr->valid_size)?
- be32_to_cpu(descr->valid_size) :
- be32_to_cpu(descr->result_size));
- if (!descr->valid_size)
+ dma_unmap_single(ctodev(card),
+ be32_to_cpu(descr->hw_regs.payload.dev_addr),
+ be32_to_cpu(descr->hw_regs.payload.size), DMA_FROM_DEVICE);
+
+ skb_put(skb, be32_to_cpu(descr->hw_regs.valid_size)?
+ be32_to_cpu(descr->hw_regs.valid_size) :
+ be32_to_cpu(descr->hw_regs.result_size));
+ if (!descr->hw_regs.valid_size)
dev_info(ctodev(card), "buffer full %x %x %x\n",
- be32_to_cpu(descr->result_size),
- be32_to_cpu(descr->buf_size),
- be32_to_cpu(descr->dmac_cmd_status));
+ be32_to_cpu(descr->hw_regs.result_size),
+ be32_to_cpu(descr->hw_regs.payload.size),
+ be32_to_cpu(descr->hw_regs.dmac_cmd_status));
descr->skb = NULL;
/*
@@ -1039,14 +1049,14 @@ refill:
/* is the current descriptor terminated with next_descr == NULL? */
dmac_chain_ended =
- be32_to_cpu(descr->dmac_cmd_status) &
+ be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
GELIC_DESCR_RX_DMA_CHAIN_END;
/*
* So that always DMAC can see the end
* of the descriptor chain to avoid
* from unwanted DMAC overrun.
*/
- descr->next_descr_addr = 0;
+ descr->hw_regs.next_descr_addr = 0;
/* change the descriptor state: */
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
@@ -1063,7 +1073,8 @@ refill:
/*
* Set this descriptor the end of the chain.
*/
- descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
+ descr->prev->hw_regs.next_descr_addr =
+ cpu_to_be32(descr->link.cpu_addr);
/*
* If dmac chain was met, DMAC stopped.
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 0d98defb0..f7d7931e5 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -221,29 +221,35 @@ enum gelic_lv1_phy {
GELIC_LV1_PHY_ETHERNET_0 = 0x0000000000000002L,
};
-/* size of hardware part of gelic descriptor */
-#define GELIC_DESCR_SIZE (32)
-
enum gelic_port_type {
GELIC_PORT_ETHERNET_0 = 0,
GELIC_PORT_WIRELESS = 1,
GELIC_PORT_MAX
};
-struct gelic_descr {
- /* as defined by the hardware */
- __be32 buf_addr;
- __be32 buf_size;
+/* As defined by the gelic hardware device. */
+struct gelic_hw_regs {
+ struct {
+ __be32 dev_addr;
+ __be32 size;
+ } __packed payload;
__be32 next_descr_addr;
__be32 dmac_cmd_status;
__be32 result_size;
__be32 valid_size; /* all zeroes for tx */
__be32 data_status;
__be32 data_error; /* all zeroes for tx */
+} __packed;
+
+struct gelic_chain_link {
+ dma_addr_t cpu_addr;
+ unsigned int size;
+};
- /* used in the driver */
+struct gelic_descr {
+ struct gelic_hw_regs hw_regs;
+ struct gelic_chain_link link;
struct sk_buff *skb;
- dma_addr_t bus_addr;
struct gelic_descr *next;
struct gelic_descr *prev;
} __attribute__((aligned(32)));
@@ -346,12 +352,6 @@ static inline void *port_priv(struct gelic_port *port)
return port->priv;
}
-#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-void udbg_shutdown_ps3gelic(void);
-#else
-static inline void udbg_shutdown_ps3gelic(void) {}
-#endif
-
int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
/* shared netdev ops */
void gelic_card_up(struct gelic_card *card);
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 23cd610bd..85cdbdd44 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -26,7 +26,7 @@ config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
select LIBWX
- select PHYLIB
+ select PHYLINK
help
This driver supports Wangxun(R) GbE PCI Express family of
adapters.
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index ddc5f6d20..cc3bec42e 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -8,6 +8,7 @@
#include "wx_type.h"
#include "wx_ethtool.h"
#include "wx_hw.h"
+#include "wx_lib.h"
struct wx_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -75,7 +76,7 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string);
+ ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -185,3 +186,238 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
}
}
EXPORT_SYMBOL(wx_get_drvinfo);
+
+int wx_nway_reset(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return phylink_ethtool_nway_reset(wx->phylink);
+}
+EXPORT_SYMBOL(wx_nway_reset);
+
+int wx_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(wx->phylink, cmd);
+}
+EXPORT_SYMBOL(wx_get_link_ksettings);
+
+int wx_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(wx->phylink, cmd);
+}
+EXPORT_SYMBOL(wx_set_link_ksettings);
+
+void wx_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ phylink_ethtool_get_pauseparam(wx->phylink, pause);
+}
+EXPORT_SYMBOL(wx_get_pauseparam);
+
+int wx_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return phylink_ethtool_set_pauseparam(wx->phylink, pause);
+}
+EXPORT_SYMBOL(wx_set_pauseparam);
+
+void wx_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ ring->rx_max_pending = WX_MAX_RXD;
+ ring->tx_max_pending = WX_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = wx->rx_ring_count;
+ ring->tx_pending = wx->tx_ring_count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+EXPORT_SYMBOL(wx_get_ringparam);
+
+int wx_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
+ /* only valid if in constant ITR mode */
+ if (wx->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = wx->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ if (wx->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = wx->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_coalesce);
+
+int wx_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u16 tx_itr_param, rx_itr_param;
+ struct wx_q_vector *q_vector;
+ u16 max_eitr;
+ int i;
+
+ if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
+ /* reject Tx specific changes in case of mixed RxTx vectors */
+ if (ec->tx_coalesce_usecs)
+ return -EOPNOTSUPP;
+ }
+
+ if (ec->tx_max_coalesced_frames_irq)
+ wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if (wx->mac.type == wx_mac_sp)
+ max_eitr = WX_SP_MAX_EITR;
+ else
+ max_eitr = WX_EM_MAX_EITR;
+
+ if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
+ (ec->tx_coalesce_usecs > (max_eitr >> 2)))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs > 1)
+ wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ wx->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ if (wx->rx_itr_setting == 1)
+ rx_itr_param = WX_20K_ITR;
+ else
+ rx_itr_param = wx->rx_itr_setting;
+
+ if (ec->tx_coalesce_usecs > 1)
+ wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ wx->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (wx->tx_itr_setting == 1) {
+ if (wx->mac.type == wx_mac_sp)
+ tx_itr_param = WX_12K_ITR;
+ else
+ tx_itr_param = WX_20K_ITR;
+ } else {
+ tx_itr_param = wx->tx_itr_setting;
+ }
+
+ /* mixed Rx/Tx */
+ if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
+ wx->tx_itr_setting = wx->rx_itr_setting;
+
+ for (i = 0; i < wx->num_q_vectors; i++) {
+ q_vector = wx->q_vector[i];
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
+ wx_write_eitr(q_vector);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_coalesce);
+
+static unsigned int wx_max_channels(struct wx *wx)
+{
+ unsigned int max_combined;
+
+ if (!wx->msix_q_entries) {
+ /* We only support one q_vector without MSI-X */
+ max_combined = 1;
+ } else {
+ /* support up to max allowed queues with RSS */
+ if (wx->mac.type == wx_mac_sp)
+ max_combined = 63;
+ else
+ max_combined = 8;
+ }
+
+ return max_combined;
+}
+
+void wx_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ /* report maximum channels */
+ ch->max_combined = wx_max_channels(wx);
+
+ /* report info for other vector */
+ if (wx->msix_q_entries) {
+ ch->max_other = 1;
+ ch->other_count = 1;
+ }
+
+ /* record RSS queues */
+ ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
+}
+EXPORT_SYMBOL(wx_get_channels);
+
+int wx_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ unsigned int count = ch->combined_count;
+ struct wx *wx = netdev_priv(dev);
+
+ /* verify other_count has not changed */
+ if (ch->other_count != 1)
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > wx_max_channels(wx))
+ return -EINVAL;
+
+ wx->ring_feature[RING_F_RSS].limit = count;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_channels);
+
+u32 wx_get_msglevel(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return wx->msg_enable;
+}
+EXPORT_SYMBOL(wx_get_msglevel);
+
+void wx_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ wx->msg_enable = data;
+}
+EXPORT_SYMBOL(wx_set_msglevel);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 16d1a0936..600c3b597 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -13,4 +13,31 @@ void wx_get_mac_stats(struct net_device *netdev,
void wx_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info);
+int wx_nway_reset(struct net_device *netdev);
+int wx_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
+int wx_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd);
+void wx_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int wx_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+void wx_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack);
+int wx_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
+int wx_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
+void wx_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch);
+int wx_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch);
+u32 wx_get_msglevel(struct net_device *netdev);
+void wx_set_msglevel(struct net_device *netdev, u32 data);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 533e912af..1db754615 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -149,9 +149,9 @@ void wx_irq_disable(struct wx *wx)
int vector;
for (vector = 0; vector < wx->num_q_vectors; vector++)
- synchronize_irq(wx->msix_entries[vector].vector);
+ synchronize_irq(wx->msix_q_entries[vector].vector);
- synchronize_irq(wx->msix_entries[vector].vector);
+ synchronize_irq(wx->msix_entry->vector);
} else {
synchronize_irq(pdev->irq);
}
@@ -1158,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx)
wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
}
+#define WX_ETH_FRAMING 20
+
+/**
+ * wx_hpbthresh - calculate high water mark for flow control
+ *
+ * @wx: board private structure to calculate for
+ **/
+static int wx_hpbthresh(struct wx *wx)
+{
+ struct net_device *dev = wx->netdev;
+ int link, tc, kb, marker;
+ u32 dv_id, rx_pba;
+
+ /* Calculate max LAN frame size */
+ link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING;
+ tc = link;
+
+ /* Calculate delay value for device */
+ dv_id = WX_DV(link, tc);
+
+ /* Delay value is calculated in bit times convert to KB */
+ kb = WX_BT2KB(dv_id);
+ rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
+
+ marker = rx_pba - kb;
+
+ /* It is possible that the packet buffer is not large enough
+ * to provide required headroom. In this case throw an error
+ * to user and a do the best we can.
+ */
+ if (marker < 0) {
+ dev_warn(&wx->pdev->dev,
+ "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n");
+ marker = tc + 1;
+ }
+
+ return marker;
+}
+
+/**
+ * wx_lpbthresh - calculate low water mark for flow control
+ *
+ * @wx: board private structure to calculate for
+ **/
+static int wx_lpbthresh(struct wx *wx)
+{
+ struct net_device *dev = wx->netdev;
+ u32 dv_id;
+ int tc;
+
+ /* Calculate max LAN frame size */
+ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Calculate delay value for device */
+ dv_id = WX_LOW_DV(tc);
+
+ /* Delay value is calculated in bit times convert to KB */
+ return WX_BT2KB(dv_id);
+}
+
+/**
+ * wx_pbthresh_setup - calculate and setup high low water marks
+ *
+ * @wx: board private structure to calculate for
+ **/
+static void wx_pbthresh_setup(struct wx *wx)
+{
+ wx->fc.high_water = wx_hpbthresh(wx);
+ wx->fc.low_water = wx_lpbthresh(wx);
+
+ /* Low water marks must not be larger than high water marks */
+ if (wx->fc.low_water > wx->fc.high_water)
+ wx->fc.low_water = 0;
+}
+
static void wx_configure_port(struct wx *wx)
{
u32 value, i;
@@ -1522,6 +1597,72 @@ static void wx_restore_vlan(struct wx *wx)
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
}
+static void wx_store_reta(struct wx *wx)
+{
+ u8 *indir_tbl = wx->rss_indir_tbl;
+ u32 reta = 0;
+ u32 i;
+
+ /* Fill out the redirection table as follows:
+ * - 8 bit wide entries containing 4 bit RSS index
+ */
+ for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
+ reta |= indir_tbl[i] << (i & 0x3) * 8;
+ if ((i & 3) == 3) {
+ wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
+ reta = 0;
+ }
+ }
+}
+
+static void wx_setup_reta(struct wx *wx)
+{
+ u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 random_key_size = WX_RSS_KEY_SIZE / 4;
+ u32 i, j;
+
+ /* Fill out hash function seeds */
+ for (i = 0; i < random_key_size; i++)
+ wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
+
+ /* Fill out redirection table */
+ memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
+
+ for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
+ if (j == rss_i)
+ j = 0;
+
+ wx->rss_indir_tbl[i] = j;
+ }
+
+ wx_store_reta(wx);
+}
+
+static void wx_setup_mrqc(struct wx *wx)
+{
+ u32 rss_field = 0;
+
+ /* Disable indicating checksum in descriptor, enables RSS hash */
+ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
+
+ /* Perform hash on these packet types */
+ rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
+ WX_RDB_RA_CTL_RSS_IPV4_TCP |
+ WX_RDB_RA_CTL_RSS_IPV4_UDP |
+ WX_RDB_RA_CTL_RSS_IPV6 |
+ WX_RDB_RA_CTL_RSS_IPV6_TCP |
+ WX_RDB_RA_CTL_RSS_IPV6_UDP;
+
+ netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
+
+ wx_setup_reta(wx);
+
+ if (wx->rss_enabled)
+ rss_field |= WX_RDB_RA_CTL_RSS_EN;
+
+ wr32(wx, WX_RDB_RA_CTL, rss_field);
+}
+
/**
* wx_configure_rx - Configure Receive Unit after Reset
* @wx: pointer to private structure
@@ -1554,6 +1695,8 @@ void wx_configure_rx(struct wx *wx)
wr32(wx, WX_PSR_CTL, psrctl);
}
+ wx_setup_mrqc(wx);
+
/* set_rx_buffer_len must be called before ring initialization */
wx_set_rx_buffer_len(wx);
@@ -1584,6 +1727,7 @@ static void wx_configure_isb(struct wx *wx)
void wx_configure(struct wx *wx)
{
wx_set_rxpba(wx);
+ wx_pbthresh_setup(wx);
wx_configure_port(wx);
wx_set_rx_mode(wx->netdev);
@@ -1750,6 +1894,28 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
}
EXPORT_SYMBOL(wx_get_pcie_msix_counts);
+/**
+ * wx_init_rss_key - Initialize wx RSS key
+ * @wx: device handle
+ *
+ * Allocates and initializes the RSS key if it is not allocated.
+ **/
+static int wx_init_rss_key(struct wx *wx)
+{
+ u32 *rss_key;
+
+ if (!wx->rss_key) {
+ rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
+ if (unlikely(!rss_key))
+ return -ENOMEM;
+
+ netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
+ wx->rss_key = rss_key;
+ }
+
+ return 0;
+}
+
int wx_sw_init(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
@@ -1777,14 +1943,23 @@ int wx_sw_init(struct wx *wx)
wx->subsystem_device_id = swab16((u16)ssid);
}
+ err = wx_init_rss_key(wx);
+ if (err < 0) {
+ wx_err(wx, "rss key allocation failed\n");
+ return err;
+ }
+
wx->mac_table = kcalloc(wx->mac.num_rar_entries,
sizeof(struct wx_mac_addr),
GFP_KERNEL);
if (!wx->mac_table) {
wx_err(wx, "mac_table allocation failed\n");
+ kfree(wx->rss_key);
return -ENOMEM;
}
+ wx->msix_in_use = false;
+
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
@@ -2003,6 +2178,102 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
}
EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
+static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
+{
+ u16 reg_idx = ring->reg_idx;
+ u32 srrctl;
+
+ srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ srrctl |= WX_PX_RR_CFG_DROP_EN;
+
+ wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
+}
+
+static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
+{
+ u16 reg_idx = ring->reg_idx;
+ u32 srrctl;
+
+ srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ srrctl &= ~WX_PX_RR_CFG_DROP_EN;
+
+ wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
+}
+
+int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
+{
+ u16 pause_time = WX_DEFAULT_FCPAUSE;
+ u32 mflcn_reg, fccfg_reg, reg;
+ u32 fcrtl, fcrth;
+ int i;
+
+ /* Low water mark of zero causes XOFF floods */
+ if (tx_pause && wx->fc.high_water) {
+ if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
+ wx_err(wx, "Invalid water mark configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Disable any previous flow control settings */
+ mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
+ mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE;
+
+ fccfg_reg = rd32(wx, WX_RDB_RFCC);
+ fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X;
+
+ if (rx_pause)
+ mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE;
+ if (tx_pause)
+ fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X;
+
+ /* Set 802.3x based flow control settings. */
+ wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
+ wr32(wx, WX_RDB_RFCC, fccfg_reg);
+
+ /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+ if (tx_pause && wx->fc.high_water) {
+ fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
+ wr32(wx, WX_RDB_RFCL, fcrtl);
+ fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
+ } else {
+ wr32(wx, WX_RDB_RFCL, 0);
+ /* In order to prevent Tx hangs when the internal Tx
+ * switch is enabled we must set the high water mark
+ * to the Rx packet buffer size - 24KB. This allows
+ * the Tx switch to function even under heavy Rx
+ * workloads.
+ */
+ fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
+ }
+
+ wr32(wx, WX_RDB_RFCH, fcrth);
+
+ /* Configure pause time */
+ reg = pause_time * 0x00010001;
+ wr32(wx, WX_RDB_RFCV, reg);
+
+ /* Configure flow control refresh threshold value */
+ wr32(wx, WX_RDB_RFCRT, pause_time / 2);
+
+ /* We should set the drop enable bit if:
+ * Number of Rx queues > 1 and flow control is disabled
+ *
+ * This allows us to avoid head of line blocking for security
+ * and performance reasons.
+ */
+ if (wx->num_rx_queues > 1 && !tx_pause) {
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_enable_rx_drop(wx, wx->rx_ring[i]);
+ } else {
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_disable_rx_drop(wx, wx->rx_ring[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_fc_enable);
+
/**
* wx_update_stats - Update the board statistics counters.
* @wx: board private structure
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 12c20a7c3..9e219fa71 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -41,6 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
+int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
void wx_update_stats(struct wx *wx);
void wx_clear_hw_cntrs(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 347d3cec0..08d3e4069 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1568,8 +1568,14 @@ EXPORT_SYMBOL(wx_napi_disable_all);
**/
static void wx_set_rss_queues(struct wx *wx)
{
- wx->num_rx_queues = wx->mac.max_rx_queues;
- wx->num_tx_queues = wx->mac.max_tx_queues;
+ struct wx_ring_feature *f;
+
+ /* set mask for 16 queue limit of RSS */
+ f = &wx->ring_feature[RING_F_RSS];
+ f->indices = f->limit;
+
+ wx->num_rx_queues = f->limit;
+ wx->num_tx_queues = f->limit;
}
static void wx_set_num_queues(struct wx *wx)
@@ -1592,38 +1598,54 @@ static void wx_set_num_queues(struct wx *wx)
*/
static int wx_acquire_msix_vectors(struct wx *wx)
{
- struct irq_affinity affd = {0, };
+ struct irq_affinity affd = { .pre_vectors = 1 };
int nvecs, i;
- nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);
+ /* We start by asking for one vector per queue pair */
+ nvecs = max(wx->num_rx_queues, wx->num_tx_queues);
+ nvecs = min_t(int, nvecs, num_online_cpus());
+ nvecs = min_t(int, nvecs, wx->mac.max_msix_vectors);
- wx->msix_entries = kcalloc(nvecs,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!wx->msix_entries)
+ wx->msix_q_entries = kcalloc(nvecs, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_q_entries)
return -ENOMEM;
+ /* One for non-queue interrupts */
+ nvecs += 1;
+
+ if (!wx->msix_in_use) {
+ wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entry) {
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ return -ENOMEM;
+ }
+ }
+
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
nvecs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&affd);
if (nvecs < 0) {
wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
- kfree(wx->msix_entries);
- wx->msix_entries = NULL;
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
return nvecs;
}
+ wx->msix_entry->entry = 0;
+ wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
+ nvecs -= 1;
for (i = 0; i < nvecs; i++) {
- wx->msix_entries[i].entry = i;
- wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i);
+ wx->msix_q_entries[i].entry = i;
+ wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1);
}
- /* one for msix_other */
- nvecs -= 1;
wx->num_q_vectors = nvecs;
- wx->num_rx_queues = nvecs;
- wx->num_tx_queues = nvecs;
return 0;
}
@@ -1645,9 +1667,11 @@ static int wx_set_interrupt_capability(struct wx *wx)
if (ret == 0 || (ret == -ENOMEM))
return ret;
- wx->num_rx_queues = 1;
- wx->num_tx_queues = 1;
- wx->num_q_vectors = 1;
+ /* Disable RSS */
+ dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
+ wx->ring_feature[RING_F_RSS].limit = 1;
+
+ wx_set_num_queues(wx);
/* minmum one for queue, one for misc*/
nvecs = 1;
@@ -1905,8 +1929,12 @@ void wx_reset_interrupt_capability(struct wx *wx)
return;
if (pdev->msix_enabled) {
- kfree(wx->msix_entries);
- wx->msix_entries = NULL;
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ if (!wx->msix_in_use) {
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
+ }
}
pci_free_irq_vectors(wx->pdev);
}
@@ -1978,7 +2006,7 @@ void wx_free_irq(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_entries[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
/* free only the irqs that were actually requested */
if (!q_vector->rx.ring && !q_vector->tx.ring)
@@ -1988,7 +2016,7 @@ void wx_free_irq(struct wx *wx)
}
if (wx->mac.type == wx_mac_em)
- free_irq(wx->msix_entries[vector].vector, wx);
+ free_irq(wx->msix_entry->vector, wx);
}
EXPORT_SYMBOL(wx_free_irq);
@@ -2065,6 +2093,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
wr32(wx, WX_PX_MISC_IVAR, ivar);
} else {
/* tx or rx causes */
+ msix_vector += 1; /* offset for queue vectors */
msix_vector |= WX_PX_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction));
ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
@@ -2082,7 +2111,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction,
* when it needs to update EITR registers at runtime. Hardware
* specific quirks/differences are taken care of here.
*/
-static void wx_write_eitr(struct wx_q_vector *q_vector)
+void wx_write_eitr(struct wx_q_vector *q_vector)
{
struct wx *wx = q_vector->wx;
int v_idx = q_vector->v_idx;
@@ -2095,7 +2124,7 @@ static void wx_write_eitr(struct wx_q_vector *q_vector)
itr_reg |= WX_PX_ITR_CNT_WDIS;
- wr32(wx, WX_PX_ITR(v_idx), itr_reg);
+ wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg);
}
/**
@@ -2141,9 +2170,9 @@ void wx_configure_vectors(struct wx *wx)
wx_write_eitr(q_vector);
}
- wx_set_ivar(wx, -1, 0, v_idx);
+ wx_set_ivar(wx, -1, 0, 0);
if (pdev->msix_enabled)
- wr32(wx, WX_PX_ITR(v_idx), 1950);
+ wr32(wx, WX_PX_ITR(0), 1950);
}
EXPORT_SYMBOL(wx_configure_vectors);
@@ -2656,11 +2685,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev);
- if (changed & NETIF_F_RXHASH)
+ if (features & NETIF_F_RXHASH) {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
WX_RDB_RA_CTL_RSS_EN);
- else
+ wx->rss_enabled = true;
+ } else {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
+ wx->rss_enabled = false;
+ }
if (changed &
(NETIF_F_HW_VLAN_CTAG_RX |
@@ -2671,4 +2703,71 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
}
EXPORT_SYMBOL(wx_set_features);
+void wx_set_ring(struct wx *wx, u32 new_tx_count,
+ u32 new_rx_count, struct wx_ring *temp_ring)
+{
+ int i, err = 0;
+
+ /* Setup new Tx resources and free the old Tx resources in that order.
+ * We can then assign the new resources to the rings via a memcpy.
+ * The advantage to this approach is that we are guaranteed to still
+ * have resources even in the case of an allocation failure.
+ */
+ if (new_tx_count != wx->tx_ring_count) {
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ memcpy(&temp_ring[i], wx->tx_ring[i],
+ sizeof(struct wx_ring));
+
+ temp_ring[i].count = new_tx_count;
+ err = wx_setup_tx_resources(&temp_ring[i]);
+ if (err) {
+ wx_err(wx, "setup new tx resources failed, keep using the old config\n");
+ while (i) {
+ i--;
+ wx_free_tx_resources(&temp_ring[i]);
+ }
+ return;
+ }
+ }
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ wx_free_tx_resources(wx->tx_ring[i]);
+
+ memcpy(wx->tx_ring[i], &temp_ring[i],
+ sizeof(struct wx_ring));
+ }
+
+ wx->tx_ring_count = new_tx_count;
+ }
+
+ /* Repeat the process for the Rx rings if needed */
+ if (new_rx_count != wx->rx_ring_count) {
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], wx->rx_ring[i],
+ sizeof(struct wx_ring));
+
+ temp_ring[i].count = new_rx_count;
+ err = wx_setup_rx_resources(&temp_ring[i]);
+ if (err) {
+ wx_err(wx, "setup new rx resources failed, keep using the old config\n");
+ while (i) {
+ i--;
+ wx_free_rx_resources(&temp_ring[i]);
+ }
+ return;
+ }
+ }
+
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ wx_free_rx_resources(wx->rx_ring[i]);
+ memcpy(wx->rx_ring[i], &temp_ring[i],
+ sizeof(struct wx_ring));
+ }
+
+ wx->rx_ring_count = new_rx_count;
+ }
+}
+EXPORT_SYMBOL(wx_set_ring);
+
+MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index df1f4a595..ec909e876 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx);
int wx_setup_isb_resources(struct wx *wx);
void wx_free_isb_resources(struct wx *wx);
u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
+void wx_write_eitr(struct wx_q_vector *q_vector);
void wx_configure_vectors(struct wx *wx);
void wx_clean_all_rx_rings(struct wx *wx);
void wx_clean_all_tx_rings(struct wx *wx);
@@ -29,5 +30,7 @@ int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features);
+void wx_set_ring(struct wx *wx, u32 new_tx_count,
+ u32 new_rx_count, struct wx_ring *temp_ring);
#endif /* _NGBE_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 83f9bb7b3..b4dc4f341 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
#include <net/ip.h>
#define WX_NCSI_SUP 0x8000
@@ -130,6 +131,15 @@
#define WX_RDB_PFCMACDAH 0x19214
#define WX_RDB_LXOFFTXC 0x19218
#define WX_RDB_LXONTXC 0x1921C
+/* Flow Control Registers */
+#define WX_RDB_RFCV 0x19200
+#define WX_RDB_RFCL 0x19220
+#define WX_RDB_RFCL_XONE BIT(31)
+#define WX_RDB_RFCH 0x19260
+#define WX_RDB_RFCH_XOFFE BIT(31)
+#define WX_RDB_RFCRT 0x192A0
+#define WX_RDB_RFCC 0x192A4
+#define WX_RDB_RFCC_RFCE_802_3X BIT(3)
/* ring assignment */
#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
#define WX_RDB_PL_CFG_L4HDR BIT(1)
@@ -137,8 +147,16 @@
#define WX_RDB_PL_CFG_L2HDR BIT(3)
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
+#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4))
+#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4))
#define WX_RDB_RA_CTL 0x194F4
#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */
+#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16)
+#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17)
+#define WX_RDB_RA_CTL_RSS_IPV6 BIT(20)
+#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
+#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
+#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
/******************************* PSR Registers *******************************/
/* psr control */
@@ -305,6 +323,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
#define WX_7K_ITR 595
#define WX_12K_ITR 336
+#define WX_20K_ITR 200
#define WX_SP_MAX_EITR 0x00000FF8U
#define WX_EM_MAX_EITR 0x00007FFCU
@@ -330,6 +349,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */
#define WX_PX_RR_CFG_VLAN BIT(31)
+#define WX_PX_RR_CFG_DROP_EN BIT(30)
#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
#define WX_PX_RR_CFG_RR_THER_SHIFT 16
#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
@@ -367,8 +387,46 @@ enum WX_MSCA_CMD_value {
#define WX_MAC_STATE_MODIFIED 0x2
#define WX_MAC_STATE_IN_USE 0x4
+/* BitTimes (BT) conversion */
+#define WX_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024))
+#define WX_B2BT(BT) ((BT) * 8)
+
+/* Calculate Delay to respond to PFC */
+#define WX_PFC_D 672
+/* Calculate Cable Delay */
+#define WX_CABLE_DC 5556 /* Delay Copper */
+/* Calculate Delay incurred from higher layer */
+#define WX_HD 6144
+
+/* Calculate Interface Delay */
+#define WX_PHY_D 12800
+#define WX_MAC_D 4096
+#define WX_XAUI_D (2 * 1024)
+#define WX_ID (WX_MAC_D + WX_XAUI_D + WX_PHY_D)
+/* Calculate PCI Bus delay for low thresholds */
+#define WX_PCI_DELAY 10000
+
+/* Calculate delay value in bit times */
+#define WX_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * (WX_B2BT(_max_frame_link) + WX_PFC_D + \
+ (2 * WX_CABLE_DC) + (2 * WX_ID) + WX_HD) / 25 + 1) + \
+ 2 * WX_B2BT(_max_frame_tc))
+
+/* Calculate low threshold delay values */
+#define WX_LOW_DV(_max_frame_tc) \
+ (2 * (2 * WX_B2BT(_max_frame_tc) + (36 * WX_PCI_DELAY / 25) + 1))
+
+/* flow control */
+#define WX_DEFAULT_FCPAUSE 0xFFFF
+
#define WX_MAX_RXD 8192
#define WX_MAX_TXD 8192
+#define WX_MIN_RXD 128
+#define WX_MIN_TXD 128
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
#define VMDQ_P(p) p
@@ -871,6 +929,19 @@ struct wx_q_vector {
struct wx_ring ring[] ____cacheline_internodealigned_in_smp;
};
+struct wx_ring_feature {
+ u16 limit; /* upper limit on feature indices */
+ u16 indices; /* current value of indices */
+ u16 mask; /* Mask used for feature to ring mapping */
+ u16 offset; /* offset to start of feature */
+};
+
+enum wx_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_RSS,
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
enum wx_isb_idx {
WX_ISB_HEADER,
WX_ISB_MISC,
@@ -879,6 +950,11 @@ enum wx_isb_idx {
WX_ISB_MAX
};
+struct wx_fc_info {
+ u32 high_water; /* Flow Ctrl High-water */
+ u32 low_water; /* Flow Ctrl Low-water */
+};
+
/* Statistics counters collected by the MAC */
struct wx_hw_stats {
u64 gprc;
@@ -919,6 +995,7 @@ struct wx {
enum sp_media_type media_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
+ struct wx_fc_info fc;
struct wx_mac_addr *mac_table;
u16 device_id;
u16 vendor_id;
@@ -939,6 +1016,8 @@ struct wx {
int speed;
int duplex;
struct phy_device *phydev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
bool wol_hw_supported;
bool ncsi_enabled;
@@ -966,7 +1045,10 @@ struct wx {
struct wx_q_vector *q_vector[64];
unsigned int queues_per_pool;
- struct msix_entry *msix_entries;
+ struct msix_entry *msix_q_entries;
+ struct msix_entry *msix_entry;
+ bool msix_in_use;
+ struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */
dma_addr_t isb_dma;
@@ -974,8 +1056,9 @@ struct wx {
u32 isb_tag[WX_ISB_MAX];
#define WX_MAX_RETA_ENTRIES 128
+#define WX_RSS_INDIR_TBL_MAX 64
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
-
+ bool rss_enabled;
#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
u32 wol;
@@ -992,7 +1075,7 @@ struct wx {
};
#define WX_INTR_ALL (~0ULL)
-#define WX_INTR_Q(i) BIT(i)
+#define WX_INTR_Q(i) BIT((i) + 1)
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@@ -1044,4 +1127,9 @@ rd64(struct wx *wx, u32 reg)
#define wx_dbg(wx, fmt, arg...) \
dev_dbg(&(wx)->pdev->dev, fmt, ##arg)
+static inline struct wx *phylink_to_wx(struct phylink_config *config)
+{
+ return container_of(config, struct wx, phylink_config);
+}
+
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index afbdf6919..786a652ae 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -7,7 +7,10 @@
#include "../libwx/wx_ethtool.h"
#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
#include "ngbe_ethtool.h"
+#include "ngbe_type.h"
static void ngbe_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
@@ -41,12 +44,75 @@ static int ngbe_set_wol(struct net_device *netdev,
return 0;
}
+static int ngbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+ struct wx_ring *temp_ring;
+ int i;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ if (new_tx_count == wx->tx_ring_count &&
+ new_rx_count == wx->rx_ring_count)
+ return 0;
+
+ if (!netif_running(wx->netdev)) {
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx->rx_ring[i]->count = new_rx_count;
+ wx->tx_ring_count = new_tx_count;
+ wx->rx_ring_count = new_rx_count;
+
+ return 0;
+ }
+
+ /* allocate temporary buffer to store rings in */
+ i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
+ temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
+ if (!temp_ring)
+ return -ENOMEM;
+
+ ngbe_down(wx);
+
+ wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
+ kvfree(temp_ring);
+
+ wx_configure(wx);
+ ngbe_up(wx);
+
+ return 0;
+}
+
+static int ngbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ int err;
+
+ err = wx_set_channels(dev, ch);
+ if (err < 0)
+ return err;
+
+ /* use setup TC to update any traffic class queue mapping */
+ return ngbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
static const struct ethtool_ops ngbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
- .nway_reset = phy_ethtool_nway_reset,
+ .get_link_ksettings = wx_get_link_ksettings,
+ .set_link_ksettings = wx_set_link_ksettings,
+ .nway_reset = wx_nway_reset,
.get_wol = ngbe_get_wol,
.set_wol = ngbe_set_wol,
.get_sset_count = wx_get_sset_count,
@@ -54,6 +120,16 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.get_ethtool_stats = wx_get_ethtool_stats,
.get_eth_mac_stats = wx_get_mac_stats,
.get_pause_stats = wx_get_pause_stats,
+ .get_pauseparam = wx_get_pauseparam,
+ .set_pauseparam = wx_set_pauseparam,
+ .get_ringparam = wx_get_ringparam,
+ .set_ringparam = ngbe_set_ringparam,
+ .get_coalesce = wx_get_coalesce,
+ .set_coalesce = wx_set_coalesce,
+ .get_channels = wx_get_channels,
+ .set_channels = ngbe_set_channels,
+ .get_msglevel = wx_get_msglevel,
+ .set_msglevel = wx_set_msglevel,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 8db804543..fdd6b4f70 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -80,28 +80,6 @@ static void ngbe_init_type_code(struct wx *wx)
}
/**
- * ngbe_init_rss_key - Initialize wx RSS key
- * @wx: device handle
- *
- * Allocates and initializes the RSS key if it is not allocated.
- **/
-static inline int ngbe_init_rss_key(struct wx *wx)
-{
- u32 *rss_key;
-
- if (!wx->rss_key) {
- rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
- if (unlikely(!rss_key))
- return -ENOMEM;
-
- netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
- wx->rss_key = rss_key;
- }
-
- return 0;
-}
-
-/**
* ngbe_sw_init - Initialize general software structures
* @wx: board private structure to initialize
**/
@@ -134,8 +112,9 @@ static int ngbe_sw_init(struct wx *wx)
dev_err(&pdev->dev, "Do not support MSI-X\n");
wx->mac.max_msix_vectors = msix_count;
- if (ngbe_init_rss_key(wx))
- return -ENOMEM;
+ wx->ring_feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES,
+ num_online_cpus());
+ wx->rss_enabled = true;
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
@@ -175,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues)
if (queues)
wx_intr_enable(wx, NGBE_INTR_ALL);
else
- wx_intr_enable(wx, NGBE_INTR_MISC(wx));
+ wx_intr_enable(wx, NGBE_INTR_MISC);
}
/**
@@ -241,7 +220,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_entries[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@@ -259,7 +238,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
}
}
- err = request_irq(wx->msix_entries[vector].vector,
+ err = request_irq(wx->msix_entry->vector,
ngbe_msix_other, 0, netdev->name, wx);
if (err) {
@@ -272,7 +251,7 @@ static int ngbe_request_msix_irqs(struct wx *wx)
free_queue_irqs:
while (vector) {
vector--;
- free_irq(wx->msix_entries[vector].vector,
+ free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
wx_reset_interrupt_capability(wx);
@@ -334,15 +313,15 @@ static void ngbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
-static void ngbe_down(struct wx *wx)
+void ngbe_down(struct wx *wx)
{
- phy_stop(wx->phydev);
+ phylink_stop(wx->phylink);
ngbe_disable_device(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
-static void ngbe_up(struct wx *wx)
+void ngbe_up(struct wx *wx)
{
wx_configure_vectors(wx);
@@ -359,7 +338,7 @@ static void ngbe_up(struct wx *wx)
if (wx->gpio_ctrl)
ngbe_sfp_modules_txrx_powerctl(wx, true);
- phy_start(wx->phydev);
+ phylink_start(wx->phylink);
}
/**
@@ -388,7 +367,7 @@ static int ngbe_open(struct net_device *netdev)
if (err)
goto err_free_resources;
- err = ngbe_phy_connect(wx);
+ err = phylink_connect_phy(wx->phylink, wx->phydev);
if (err)
goto err_free_irq;
@@ -404,7 +383,7 @@ static int ngbe_open(struct net_device *netdev)
return 0;
err_dis_phy:
- phy_disconnect(wx->phydev);
+ phylink_disconnect_phy(wx->phylink);
err_free_irq:
wx_free_irq(wx);
err_free_resources:
@@ -430,7 +409,7 @@ static int ngbe_close(struct net_device *netdev)
ngbe_down(wx);
wx_free_irq(wx);
wx_free_resources(wx);
- phy_disconnect(wx->phydev);
+ phylink_disconnect_phy(wx->phylink);
wx_control_hw(wx, false);
return 0;
@@ -480,6 +459,39 @@ static void ngbe_shutdown(struct pci_dev *pdev)
}
}
+/**
+ * ngbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @dev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int ngbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ngbe_close(dev);
+
+ wx_clear_interrupt_scheme(wx);
+
+ if (tc)
+ netdev_set_num_tc(dev, tc);
+ else
+ netdev_reset_tc(dev);
+
+ wx_init_interrupt_scheme(wx);
+
+ if (netif_running(dev))
+ ngbe_open(dev);
+
+ return 0;
+}
+
static const struct net_device_ops ngbe_netdev_ops = {
.ndo_open = ngbe_open,
.ndo_stop = ngbe_close,
@@ -582,6 +594,7 @@ static int ngbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE -
@@ -680,6 +693,7 @@ static int ngbe_probe(struct pci_dev *pdev,
return 0;
err_register:
+ phylink_destroy(wx->phylink);
wx_control_hw(wx, false);
err_clear_interrupt_scheme:
wx_clear_interrupt_scheme(wx);
@@ -709,9 +723,11 @@ static void ngbe_remove(struct pci_dev *pdev)
netdev = wx->netdev;
unregister_netdev(netdev);
+ phylink_destroy(wx->phylink);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+ kfree(wx->rss_key);
kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index 6302ecca7..ec54b18c5 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -56,22 +56,28 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr,
return ret;
}
-static void ngbe_handle_link_change(struct net_device *dev)
+static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct wx *wx = netdev_priv(dev);
- struct phy_device *phydev;
+}
+
+static void ngbe_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void ngbe_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct wx *wx = phylink_to_wx(config);
u32 lan_speed, reg;
- phydev = wx->phydev;
- if (!(wx->link != phydev->link ||
- wx->speed != phydev->speed ||
- wx->duplex != phydev->duplex))
- return;
+ wx_fc_enable(wx, tx_pause, rx_pause);
- wx->link = phydev->link;
- wx->speed = phydev->speed;
- wx->duplex = phydev->duplex;
- switch (phydev->speed) {
+ switch (speed) {
case SPEED_10:
lan_speed = 0;
break;
@@ -83,54 +89,51 @@ static void ngbe_handle_link_change(struct net_device *dev)
lan_speed = 2;
break;
}
+
wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed);
- if (phydev->link) {
- reg = rd32(wx, WX_MAC_TX_CFG);
- reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
- reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
- wr32(wx, WX_MAC_TX_CFG, reg);
- /* Re configure MAC RX */
- reg = rd32(wx, WX_MAC_RX_CFG);
- wr32(wx, WX_MAC_RX_CFG, reg);
- wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
- reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
- wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
- }
- phy_print_status(phydev);
+ reg = rd32(wx, WX_MAC_TX_CFG);
+ reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
+ reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
+ wr32(wx, WX_MAC_TX_CFG, reg);
+
+ /* Re configure MAC Rx */
+ reg = rd32(wx, WX_MAC_RX_CFG);
+ wr32(wx, WX_MAC_RX_CFG, reg);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
}
-int ngbe_phy_connect(struct wx *wx)
+static const struct phylink_mac_ops ngbe_mac_ops = {
+ .mac_config = ngbe_mac_config,
+ .mac_link_down = ngbe_mac_link_down,
+ .mac_link_up = ngbe_mac_link_up,
+};
+
+static int ngbe_phylink_init(struct wx *wx)
{
- int ret;
+ struct phylink_config *config;
+ phy_interface_t phy_mode;
+ struct phylink *phylink;
- ret = phy_connect_direct(wx->netdev,
- wx->phydev,
- ngbe_handle_link_change,
- PHY_INTERFACE_MODE_RGMII_ID);
- if (ret) {
- wx_err(wx, "PHY connect failed.\n");
- return ret;
- }
+ config = &wx->phylink_config;
+ config->dev = &wx->netdev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_1000FD | MAC_100FD | MAC_10FD |
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ config->mac_managed_pm = true;
- return 0;
-}
+ phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces);
-static void ngbe_phy_fixup(struct wx *wx)
-{
- struct phy_device *phydev = wx->phydev;
- struct ethtool_eee eee;
-
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- phydev->mac_managed_pm = true;
- if (wx->mac_type != em_mac_type_mdi)
- return;
- /* disable EEE, internal phy does not support eee */
- memset(&eee, 0, sizeof(eee));
- phy_ethtool_set_eee(phydev, &eee);
+ phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ wx->phylink = phylink;
+
+ return 0;
}
int ngbe_mdio_init(struct wx *wx)
@@ -165,11 +168,16 @@ int ngbe_mdio_init(struct wx *wx)
return -ENODEV;
phy_attached_info(wx->phydev);
- ngbe_phy_fixup(wx);
wx->link = 0;
wx->speed = 0;
wx->duplex = 0;
+ ret = ngbe_phylink_init(wx);
+ if (ret) {
+ wx_err(wx, "failed to init phylink: %d\n", ret);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
index 0a6400dd8..f610b7718 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
@@ -7,6 +7,5 @@
#ifndef _NGBE_MDIO_H_
#define _NGBE_MDIO_H_
-int ngbe_phy_connect(struct wx *wx);
int ngbe_mdio_init(struct wx *wx);
#endif /* _NGBE_MDIO_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index ff754d69b..f48ed7fc1 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -80,7 +80,7 @@
NGBE_PX_MISC_IEN_GPIO)
#define NGBE_INTR_ALL 0x1FF
-#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+#define NGBE_INTR_MISC BIT(0)
#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
#define NGBE_CFG_LAN_SPEED 0x14440
@@ -105,6 +105,7 @@
#define NGBE_FW_CMD_ST_FAIL 0x70657376
#define NGBE_MAX_FDIR_INDICES 7
+#define NGBE_MAX_RSS_INDICES 8
#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
@@ -130,4 +131,8 @@
extern char ngbe_driver_name[];
+void ngbe_down(struct wx *wx);
+void ngbe_up(struct wx *wx);
+int ngbe_setup_tc(struct net_device *dev, u8 tc);
+
#endif /* _NGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index 3f336a088..db675512c 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -7,43 +7,93 @@
#include "../libwx/wx_ethtool.h"
#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
#include "txgbe_type.h"
#include "txgbe_ethtool.h"
-static int txgbe_nway_reset(struct net_device *netdev)
+static int txgbe_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
- struct txgbe *txgbe = netdev_to_txgbe(netdev);
+ struct wx *wx = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+ struct wx_ring *temp_ring;
+ int i;
- return phylink_ethtool_nway_reset(txgbe->phylink);
-}
+ new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
-static int txgbe_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct txgbe *txgbe = netdev_to_txgbe(netdev);
+ new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ if (new_tx_count == wx->tx_ring_count &&
+ new_rx_count == wx->rx_ring_count)
+ return 0;
+
+ if (!netif_running(wx->netdev)) {
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx->tx_ring[i]->count = new_tx_count;
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx->rx_ring[i]->count = new_rx_count;
+ wx->tx_ring_count = new_tx_count;
+ wx->rx_ring_count = new_rx_count;
+
+ return 0;
+ }
- return phylink_ethtool_ksettings_get(txgbe->phylink, cmd);
+ /* allocate temporary buffer to store rings in */
+ i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
+ temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
+ if (!temp_ring)
+ return -ENOMEM;
+
+ txgbe_down(wx);
+
+ wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring);
+ kvfree(temp_ring);
+
+ txgbe_up(wx);
+
+ return 0;
}
-static int txgbe_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
+static int txgbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
{
- struct txgbe *txgbe = netdev_to_txgbe(netdev);
+ int err;
+
+ err = wx_set_channels(dev, ch);
+ if (err < 0)
+ return err;
- return phylink_ethtool_ksettings_set(txgbe->phylink, cmd);
+ /* use setup TC to update any traffic class queue mapping */
+ return txgbe_setup_tc(dev, netdev_get_num_tc(dev));
}
static const struct ethtool_ops txgbe_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
.get_drvinfo = wx_get_drvinfo,
- .nway_reset = txgbe_nway_reset,
+ .nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = txgbe_get_link_ksettings,
- .set_link_ksettings = txgbe_set_link_ksettings,
+ .get_link_ksettings = wx_get_link_ksettings,
+ .set_link_ksettings = wx_set_link_ksettings,
.get_sset_count = wx_get_sset_count,
.get_strings = wx_get_strings,
.get_ethtool_stats = wx_get_ethtool_stats,
.get_eth_mac_stats = wx_get_mac_stats,
.get_pause_stats = wx_get_pause_stats,
+ .get_pauseparam = wx_get_pauseparam,
+ .set_pauseparam = wx_set_pauseparam,
+ .get_ringparam = wx_get_ringparam,
+ .set_ringparam = txgbe_set_ringparam,
+ .get_coalesce = wx_get_coalesce,
+ .set_coalesce = wx_set_coalesce,
+ .get_channels = wx_get_channels,
+ .set_channels = txgbe_set_channels,
+ .get_msglevel = wx_get_msglevel,
+ .set_msglevel = wx_set_msglevel,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 526250102..3b151c410 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -86,7 +86,7 @@ static void txgbe_irq_enable(struct wx *wx, bool queues)
wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
/* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
if (queues)
wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
@@ -145,7 +145,7 @@ static int txgbe_request_msix_irqs(struct wx *wx)
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_entries[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
if (q_vector->tx.ring && q_vector->rx.ring)
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@@ -168,7 +168,7 @@ static int txgbe_request_msix_irqs(struct wx *wx)
free_queue_irqs:
while (vector) {
vector--;
- free_irq(wx->msix_entries[vector].vector,
+ free_irq(wx->msix_q_entries[vector].vector,
wx->q_vector[vector]);
}
wx_reset_interrupt_capability(wx);
@@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx)
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
- struct txgbe *txgbe;
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- txgbe = netdev_to_txgbe(netdev);
- phylink_start(txgbe->phylink);
+ phylink_start(wx->phylink);
/* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0));
@@ -290,18 +288,22 @@ static void txgbe_disable_device(struct wx *wx)
wx_update_stats(wx);
}
-static void txgbe_down(struct wx *wx)
+void txgbe_down(struct wx *wx)
{
- struct txgbe *txgbe = netdev_to_txgbe(wx->netdev);
-
txgbe_disable_device(wx);
txgbe_reset(wx);
- phylink_stop(txgbe->phylink);
+ phylink_stop(wx->phylink);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
+void txgbe_up(struct wx *wx)
+{
+ wx_configure(wx);
+ txgbe_up_complete(wx);
+}
+
/**
* txgbe_init_type_code - Initialize the shared code
* @wx: pointer to hardware structure
@@ -376,6 +378,10 @@ static int txgbe_sw_init(struct wx *wx)
wx_err(wx, "Do not support MSI-X\n");
wx->mac.max_msix_vectors = msix_count;
+ wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES,
+ num_online_cpus());
+ wx->rss_enabled = true;
+
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@@ -502,6 +508,41 @@ static void txgbe_shutdown(struct pci_dev *pdev)
}
}
+/**
+ * txgbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @dev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int txgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ txgbe_close(dev);
+ else
+ txgbe_reset(wx);
+
+ wx_clear_interrupt_scheme(wx);
+
+ if (tc)
+ netdev_set_num_tc(dev, tc);
+ else
+ netdev_reset_tc(dev);
+
+ wx_init_interrupt_scheme(wx);
+
+ if (netif_running(dev))
+ txgbe_open(dev);
+
+ return 0;
+}
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -638,6 +679,7 @@ static int txgbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE -
@@ -775,6 +817,7 @@ static void txgbe_remove(struct pci_dev *pdev)
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+ kfree(wx->rss_key);
kfree(wx->mac_table);
wx_clear_interrupt_scheme(wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index b95187a08..8cddc9ddb 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -20,6 +20,8 @@
#include "txgbe_phy.h"
#include "txgbe_hw.h"
+#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw"
+
static int txgbe_swnodes_register(struct txgbe *txgbe)
{
struct txgbe_nodes *nodes = &txgbe->nodes;
@@ -159,7 +161,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe)
static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config,
phy_interface_t interface)
{
- struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
+ struct txgbe *txgbe = wx->priv;
if (interface == PHY_INTERFACE_MODE_10GBASER)
return &txgbe->xpcs->pcs;
@@ -175,7 +178,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode,
static void txgbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
- struct wx *wx = netdev_priv(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
}
@@ -186,9 +189,11 @@ static void txgbe_mac_link_up(struct phylink_config *config,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct wx *wx = netdev_priv(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
u32 txcfg, wdg;
+ wx_fc_enable(wx, tx_pause, rx_pause);
+
txcfg = rd32(wx, WX_MAC_TX_CFG);
txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK;
@@ -217,7 +222,7 @@ static void txgbe_mac_link_up(struct phylink_config *config,
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct wx *wx = netdev_priv(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0);
@@ -228,7 +233,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct wx *wx = netdev_priv(to_net_dev(config->dev));
+ struct wx *wx = phylink_to_wx(config);
txgbe_enable_sec_tx_path(wx);
wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
@@ -253,10 +258,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
phy_interface_t phy_mode;
struct phylink *phylink;
- config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
+ config = &wx->phylink_config;
config->dev = &wx->netdev->dev;
config->type = PHYLINK_NETDEV;
config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD |
@@ -287,7 +289,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
}
}
- txgbe->phylink = phylink;
+ wx->phylink = phylink;
return 0;
}
@@ -483,11 +485,11 @@ static void txgbe_irq_handler(struct irq_desc *desc)
TXGBE_PX_MISC_ETH_AN)) {
u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
- phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
+ phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
}
/* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
}
static int txgbe_gpio_init(struct txgbe *txgbe)
@@ -531,7 +533,12 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector;
+
+ /* now only suuported on MSI-X interrupt */
+ if (!wx->msix_entry)
+ return -EPERM;
+
+ girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -551,8 +558,8 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d",
- pci_dev_id(pdev));
+ snprintf(clk_name, sizeof(clk_name), "%s.%d",
+ TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
if (IS_ERR(clk))
@@ -614,7 +621,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
info.parent = &pdev->dev;
info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
- info.name = "i2c_designware";
+ info.name = TXGBE_I2C_CLK_DEV_NAME;
info.id = pci_dev_id(pdev);
info.res = &DEFINE_RES_IRQ(pdev->irq);
@@ -701,6 +708,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
int txgbe_init_phy(struct txgbe *txgbe)
{
+ struct wx *wx = txgbe->wx;
int ret;
if (txgbe->wx->media_type == sp_media_copper)
@@ -708,46 +716,48 @@ int txgbe_init_phy(struct txgbe *txgbe)
ret = txgbe_swnodes_register(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to register software nodes\n");
+ wx_err(wx, "failed to register software nodes\n");
return ret;
}
ret = txgbe_mdio_pcs_init(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret);
+ wx_err(wx, "failed to init mdio pcs: %d\n", ret);
goto err_unregister_swnode;
}
ret = txgbe_phylink_init(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to init phylink\n");
+ wx_err(wx, "failed to init phylink\n");
goto err_destroy_xpcs;
}
ret = txgbe_gpio_init(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to init gpio\n");
+ wx_err(wx, "failed to init gpio\n");
goto err_destroy_phylink;
}
ret = txgbe_clock_register(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to register clock: %d\n", ret);
+ wx_err(wx, "failed to register clock: %d\n", ret);
goto err_destroy_phylink;
}
ret = txgbe_i2c_register(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret);
+ wx_err(wx, "failed to init i2c interface: %d\n", ret);
goto err_unregister_clk;
}
ret = txgbe_sfp_register(txgbe);
if (ret) {
- wx_err(txgbe->wx, "failed to register sfp\n");
+ wx_err(wx, "failed to register sfp\n");
goto err_unregister_i2c;
}
+ wx->msix_in_use = true;
+
return 0;
err_unregister_i2c:
@@ -756,7 +766,7 @@ err_unregister_clk:
clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk);
err_destroy_phylink:
- phylink_destroy(txgbe->phylink);
+ phylink_destroy(wx->phylink);
err_destroy_xpcs:
xpcs_destroy(txgbe->xpcs);
err_unregister_swnode:
@@ -768,8 +778,8 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
if (txgbe->wx->media_type == sp_media_copper) {
- phylink_disconnect_phy(txgbe->phylink);
- phylink_destroy(txgbe->phylink);
+ phylink_disconnect_phy(txgbe->wx->phylink);
+ phylink_destroy(txgbe->wx->phylink);
return;
}
@@ -777,7 +787,8 @@ void txgbe_remove_phy(struct txgbe *txgbe)
platform_device_unregister(txgbe->i2c_dev);
clkdev_drop(txgbe->clock);
clk_unregister(txgbe->clk);
- phylink_destroy(txgbe->phylink);
+ phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group);
+ txgbe->wx->msix_in_use = false;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 3ba9ce43f..270a6fd9a 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -98,6 +98,7 @@
#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
+#define TXGBE_MAX_RSS_INDICES 63
#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
@@ -122,19 +123,16 @@
#define TXGBE_DEFAULT_RX_WORK 128
#endif
-#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
-#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
+#define TXGBE_INTR_MISC BIT(0)
+#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1)
#define TXGBE_MAX_EITR GENMASK(11, 3)
extern char txgbe_driver_name[];
-static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev)
-{
- struct wx *wx = netdev_priv(netdev);
-
- return wx->priv;
-}
+void txgbe_down(struct wx *wx);
+void txgbe_up(struct wx *wx);
+int txgbe_setup_tc(struct net_device *dev, u8 tc);
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
@@ -175,7 +173,6 @@ struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
struct dw_xpcs *xpcs;
- struct phylink *phylink;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 0014729b8..35d96c633 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -26,6 +26,7 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
depends on HAS_IOMEM
+ depends on XILINX_DMA
select PHYLINK
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 575ff9de8..807ead678 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
+#include <linux/skbuff.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -379,6 +380,22 @@ struct axidma_bd {
#define XAE_NUM_MISC_CLOCKS 3
/**
+ * struct skbuf_dma_descriptor - skb for each dma descriptor
+ * @sgl: Pointer for sglist.
+ * @desc: Pointer to dma descriptor.
+ * @dma_address: dma address of sglist.
+ * @skb: Pointer to SKB transferred using DMA
+ * @sg_len: number of entries in the sglist.
+ */
+struct skbuf_dma_descriptor {
+ struct scatterlist sgl[MAX_SKB_FRAGS + 1];
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t dma_address;
+ struct sk_buff *skb;
+ int sg_len;
+};
+
+/**
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
@@ -435,6 +452,15 @@ struct axidma_bd {
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
* @coalesce_usec_tx: IRQ coalesce delay for TX
+ * @use_dmaengine: flag to check dmaengine framework usage.
+ * @tx_chan: TX DMA channel.
+ * @rx_chan: RX DMA channel.
+ * @tx_skb_ring: Pointer to TX skb ring buffer array.
+ * @rx_skb_ring: Pointer to RX skb ring buffer array.
+ * @tx_ring_head: TX skb ring buffer head index.
+ * @tx_ring_tail: TX skb ring buffer tail index.
+ * @rx_ring_head: RX skb ring buffer head index.
+ * @rx_ring_tail: RX skb ring buffer tail index.
*/
struct axienet_local {
struct net_device *ndev;
@@ -499,6 +525,15 @@ struct axienet_local {
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
u32 coalesce_usec_tx;
+ u8 use_dmaengine;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ struct skbuf_dma_descriptor **tx_skb_ring;
+ struct skbuf_dma_descriptor **rx_skb_ring;
+ int tx_ring_head;
+ int tx_ring_tail;
+ int rx_ring_head;
+ int rx_ring_tail;
};
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index bf6e33990..aaf780fd4 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -38,6 +38,11 @@
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/circ_buf.h>
+#include <net/netdev_queues.h>
#include "xilinx_axienet.h"
@@ -47,6 +52,9 @@
#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
+#define DMA_NUM_APP_WORDS 5
+#define LEN_APP 4
+#define RX_BUF_NUM_DEFAULT 128
/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
#define DRIVER_NAME "xaxienet"
@@ -55,6 +63,8 @@
#define AXIENET_REGS_N 40
+static void axienet_rx_submit_desc(struct net_device *ndev);
+
/* Match table for of_platform binding */
static const struct of_device_id axienet_of_match[] = {
{ .compatible = "xlnx,axi-ethernet-1.00.a", },
@@ -120,6 +130,16 @@ static struct axienet_option axienet_options[] = {
{}
};
+static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
+{
+ return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
+}
+
+static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
+{
+ return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
+}
+
/**
* axienet_dma_in32 - Memory mapped Axi DMA register read
* @lp: Pointer to axienet local structure
@@ -589,10 +609,6 @@ static int axienet_device_reset(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
int ret;
- ret = __axienet_device_reset(lp);
- if (ret)
- return ret;
-
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
@@ -606,11 +622,17 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_JUMBO;
}
- ret = axienet_dma_bd_init(ndev);
- if (ret) {
- netdev_err(ndev, "%s: descriptor allocation failed\n",
- __func__);
- return ret;
+ if (!lp->use_dmaengine) {
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ return ret;
+
+ ret = axienet_dma_bd_init(ndev);
+ if (ret) {
+ netdev_err(ndev, "%s: descriptor allocation failed\n",
+ __func__);
+ return ret;
+ }
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -726,6 +748,128 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
}
/**
+ * axienet_dma_tx_cb - DMA engine callback for TX channel.
+ * @data: Pointer to the axienet_local structure.
+ * @result: error reporting through dmaengine_result.
+ * This function is called by dmaengine driver for TX channel to notify
+ * that the transmit is done.
+ */
+static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
+{
+ struct skbuf_dma_descriptor *skbuf_dma;
+ struct axienet_local *lp = data;
+ struct netdev_queue *txq;
+ int len;
+
+ skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
+ len = skbuf_dma->skb->len;
+ txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
+ u64_stats_update_begin(&lp->tx_stat_sync);
+ u64_stats_add(&lp->tx_bytes, len);
+ u64_stats_add(&lp->tx_packets, 1);
+ u64_stats_update_end(&lp->tx_stat_sync);
+ dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
+ dev_consume_skb_any(skbuf_dma->skb);
+ netif_txq_completed_wake(txq, 1, len,
+ CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
+ 2 * MAX_SKB_FRAGS);
+}
+
+/**
+ * axienet_start_xmit_dmaengine - Starts the transmission.
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK on success or any non space errors.
+ * NETDEV_TX_BUSY when free element in TX skb ring buffer
+ * is not available.
+ *
+ * This function is invoked to initiate transmission. The
+ * function sets the skbs, register dma callback API and submit
+ * the dma transaction.
+ * Additionally if checksum offloading is supported,
+ * it populates AXI Stream Control fields with appropriate values.
+ */
+static netdev_tx_t
+axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct dma_async_tx_descriptor *dma_tx_desc = NULL;
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
+ struct skbuf_dma_descriptor *skbuf_dma;
+ struct dma_device *dma_dev;
+ struct netdev_queue *txq;
+ u32 csum_start_off;
+ u32 csum_index_off;
+ int sg_len;
+ int ret;
+
+ dma_dev = lp->tx_chan->device;
+ sg_len = skb_shinfo(skb)->nr_frags + 1;
+ if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
+ netif_stop_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
+ if (!skbuf_dma)
+ goto xmit_error_drop_skb;
+
+ lp->tx_ring_head++;
+ sg_init_table(skbuf_dma->sgl, sg_len);
+ ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
+ if (ret < 0)
+ goto xmit_error_drop_skb;
+
+ ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
+ if (!ret)
+ goto xmit_error_drop_skb;
+
+ /* Fill up app fields for checksum */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
+ /* Tx Full Checksum Offload Enabled */
+ app_metadata[0] |= 2;
+ } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
+ csum_start_off = skb_transport_offset(skb);
+ csum_index_off = csum_start_off + skb->csum_offset;
+ /* Tx Partial Checksum Offload Enabled */
+ app_metadata[0] |= 1;
+ app_metadata[1] = (csum_start_off << 16) | csum_index_off;
+ }
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
+ }
+
+ dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
+ sg_len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT, (void *)app_metadata);
+ if (!dma_tx_desc)
+ goto xmit_error_unmap_sg;
+
+ skbuf_dma->skb = skb;
+ skbuf_dma->sg_len = sg_len;
+ dma_tx_desc->callback_param = lp;
+ dma_tx_desc->callback_result = axienet_dma_tx_cb;
+ dmaengine_submit(dma_tx_desc);
+ dma_async_issue_pending(lp->tx_chan);
+ txq = skb_get_tx_queue(lp->ndev, skb);
+ netdev_tx_sent_queue(txq, skb->len);
+ netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
+ MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
+
+ return NETDEV_TX_OK;
+
+xmit_error_unmap_sg:
+ dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
+xmit_error_drop_skb:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
* axienet_tx_poll - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @napi: Pointer to NAPI structure.
@@ -892,6 +1036,42 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/**
+ * axienet_dma_rx_cb - DMA engine callback for RX channel.
+ * @data: Pointer to the skbuf_dma_descriptor structure.
+ * @result: error reporting through dmaengine_result.
+ * This function is called by dmaengine driver for RX channel to notify
+ * that the packet is received.
+ */
+static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+{
+ struct skbuf_dma_descriptor *skbuf_dma;
+ size_t meta_len, meta_max_len, rx_len;
+ struct axienet_local *lp = data;
+ struct sk_buff *skb;
+ u32 *app_metadata;
+
+ skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
+ skb = skbuf_dma->skb;
+ app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
+ &meta_max_len);
+ dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ /* TODO: Derive app word index programmatically */
+ rx_len = (app_metadata[LEN_APP] & 0xFFFF);
+ skb_put(skb, rx_len);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ __netif_rx(skb);
+ u64_stats_update_begin(&lp->rx_stat_sync);
+ u64_stats_add(&lp->rx_packets, 1);
+ u64_stats_add(&lp->rx_bytes, rx_len);
+ u64_stats_update_end(&lp->rx_stat_sync);
+ axienet_rx_submit_desc(lp->ndev);
+ dma_async_issue_pending(lp->rx_chan);
+}
+
+/**
* axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
* @napi: Pointer to NAPI structure.
* @budget: Max number of RX packets to process.
@@ -1125,40 +1305,158 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
static void axienet_dma_err_handler(struct work_struct *work);
/**
- * axienet_open - Driver open routine.
- * @ndev: Pointer to net_device structure
+ * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
+ * allocate skbuff, map the scatterlist and obtain a descriptor
+ * and then add the callback information and submit descriptor.
+ *
+ * @ndev: net_device pointer
+ *
+ */
+static void axienet_rx_submit_desc(struct net_device *ndev)
+{
+ struct dma_async_tx_descriptor *dma_rx_desc = NULL;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct skbuf_dma_descriptor *skbuf_dma;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+
+ skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
+ if (!skbuf_dma)
+ return;
+
+ lp->rx_ring_head++;
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ return;
+
+ sg_init_table(skbuf_dma->sgl, 1);
+ addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(lp->dev, addr))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "DMA mapping error\n");
+ goto rx_submit_err_free_skb;
+ }
+ sg_dma_address(skbuf_dma->sgl) = addr;
+ sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
+ dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
+ 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!dma_rx_desc)
+ goto rx_submit_err_unmap_skb;
+
+ skbuf_dma->skb = skb;
+ skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
+ skbuf_dma->desc = dma_rx_desc;
+ dma_rx_desc->callback_param = lp;
+ dma_rx_desc->callback_result = axienet_dma_rx_cb;
+ dmaengine_submit(dma_rx_desc);
+
+ return;
+
+rx_submit_err_unmap_skb:
+ dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
+rx_submit_err_free_skb:
+ dev_kfree_skb(skb);
+}
+
+/**
+ * axienet_init_dmaengine - init the dmaengine code.
+ * @ndev: Pointer to net_device structure
*
* Return: 0, on success.
- * non-zero error value on failure
+ * non-zero error value on failure
*
- * This is the driver open routine. It calls phylink_start to start the
- * PHY device.
- * It also allocates interrupt service routines, enables the interrupt lines
- * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
- * descriptors are initialized.
+ * This is the dmaengine initialization code.
*/
-static int axienet_open(struct net_device *ndev)
+static int axienet_init_dmaengine(struct net_device *ndev)
{
- int ret;
struct axienet_local *lp = netdev_priv(ndev);
+ struct skbuf_dma_descriptor *skbuf_dma;
+ int i, ret;
- dev_dbg(&ndev->dev, "axienet_open()\n");
+ lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
+ if (IS_ERR(lp->tx_chan)) {
+ dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
+ return PTR_ERR(lp->tx_chan);
+ }
- /* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- axienet_lock_mii(lp);
- ret = axienet_device_reset(ndev);
- axienet_unlock_mii(lp);
+ lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
+ if (IS_ERR(lp->rx_chan)) {
+ ret = PTR_ERR(lp->rx_chan);
+ dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
+ goto err_dma_release_tx;
+ }
- ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
- if (ret) {
- dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
- return ret;
+ lp->tx_ring_tail = 0;
+ lp->tx_ring_head = 0;
+ lp->rx_ring_tail = 0;
+ lp->rx_ring_head = 0;
+ lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
+ GFP_KERNEL);
+ if (!lp->tx_skb_ring) {
+ ret = -ENOMEM;
+ goto err_dma_release_rx;
+ }
+ for (i = 0; i < TX_BD_NUM_MAX; i++) {
+ skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
+ if (!skbuf_dma) {
+ ret = -ENOMEM;
+ goto err_free_tx_skb_ring;
+ }
+ lp->tx_skb_ring[i] = skbuf_dma;
}
- phylink_start(lp->phylink);
+ lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
+ GFP_KERNEL);
+ if (!lp->rx_skb_ring) {
+ ret = -ENOMEM;
+ goto err_free_tx_skb_ring;
+ }
+ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
+ skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
+ if (!skbuf_dma) {
+ ret = -ENOMEM;
+ goto err_free_rx_skb_ring;
+ }
+ lp->rx_skb_ring[i] = skbuf_dma;
+ }
+ /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
+ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
+ axienet_rx_submit_desc(ndev);
+ dma_async_issue_pending(lp->rx_chan);
+
+ return 0;
+
+err_free_rx_skb_ring:
+ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
+ kfree(lp->rx_skb_ring[i]);
+ kfree(lp->rx_skb_ring);
+err_free_tx_skb_ring:
+ for (i = 0; i < TX_BD_NUM_MAX; i++)
+ kfree(lp->tx_skb_ring[i]);
+ kfree(lp->tx_skb_ring);
+err_dma_release_rx:
+ dma_release_channel(lp->rx_chan);
+err_dma_release_tx:
+ dma_release_channel(lp->tx_chan);
+ return ret;
+}
+
+/**
+ * axienet_init_legacy_dma - init the dma legacy code.
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0, on success.
+ * non-zero error value on failure
+ *
+ * This is the dma initialization code. It also allocates interrupt
+ * service routines, enables the interrupt lines and ISR handling.
+ *
+ */
+static int axienet_init_legacy_dma(struct net_device *ndev)
+{
+ int ret;
+ struct axienet_local *lp = netdev_priv(ndev);
/* Enable worker thread for Axi DMA error handling */
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
@@ -1193,14 +1491,77 @@ err_rx_irq:
err_tx_irq:
napi_disable(&lp->napi_tx);
napi_disable(&lp->napi_rx);
- phylink_stop(lp->phylink);
- phylink_disconnect_phy(lp->phylink);
cancel_work_sync(&lp->dma_err_task);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
/**
+ * axienet_open - Driver open routine.
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0, on success.
+ * non-zero error value on failure
+ *
+ * This is the driver open routine. It calls phylink_start to start the
+ * PHY device.
+ * It also allocates interrupt service routines, enables the interrupt lines
+ * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
+ * descriptors are initialized.
+ */
+static int axienet_open(struct net_device *ndev)
+{
+ int ret;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ dev_dbg(&ndev->dev, "%s\n", __func__);
+
+ /* When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
+ */
+ axienet_lock_mii(lp);
+ ret = axienet_device_reset(ndev);
+ axienet_unlock_mii(lp);
+
+ ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+ if (ret) {
+ dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
+ return ret;
+ }
+
+ phylink_start(lp->phylink);
+
+ if (lp->use_dmaengine) {
+ /* Enable interrupts for Axi Ethernet core (if defined) */
+ if (lp->eth_irq > 0) {
+ ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret)
+ goto err_phy;
+ }
+
+ ret = axienet_init_dmaengine(ndev);
+ if (ret < 0)
+ goto err_free_eth_irq;
+ } else {
+ ret = axienet_init_legacy_dma(ndev);
+ if (ret)
+ goto err_phy;
+ }
+
+ return 0;
+
+err_free_eth_irq:
+ if (lp->eth_irq > 0)
+ free_irq(lp->eth_irq, ndev);
+err_phy:
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+ return ret;
+}
+
+/**
* axienet_stop - Driver stop routine.
* @ndev: Pointer to net_device structure
*
@@ -1213,11 +1574,14 @@ err_tx_irq:
static int axienet_stop(struct net_device *ndev)
{
struct axienet_local *lp = netdev_priv(ndev);
+ int i;
dev_dbg(&ndev->dev, "axienet_close()\n");
- napi_disable(&lp->napi_tx);
- napi_disable(&lp->napi_rx);
+ if (!lp->use_dmaengine) {
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+ }
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1225,18 +1589,33 @@ static int axienet_stop(struct net_device *ndev)
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- axienet_dma_stop(lp);
+ if (!lp->use_dmaengine) {
+ axienet_dma_stop(lp);
+ cancel_work_sync(&lp->dma_err_task);
+ free_irq(lp->tx_irq, ndev);
+ free_irq(lp->rx_irq, ndev);
+ axienet_dma_bd_release(ndev);
+ } else {
+ dmaengine_terminate_sync(lp->tx_chan);
+ dmaengine_synchronize(lp->tx_chan);
+ dmaengine_terminate_sync(lp->rx_chan);
+ dmaengine_synchronize(lp->rx_chan);
+
+ for (i = 0; i < TX_BD_NUM_MAX; i++)
+ kfree(lp->tx_skb_ring[i]);
+ kfree(lp->tx_skb_ring);
+ for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
+ kfree(lp->rx_skb_ring[i]);
+ kfree(lp->rx_skb_ring);
+
+ dma_release_channel(lp->rx_chan);
+ dma_release_channel(lp->tx_chan);
+ }
axienet_iow(lp, XAE_IE_OFFSET, 0);
- cancel_work_sync(&lp->dma_err_task);
-
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
- free_irq(lp->tx_irq, ndev);
- free_irq(lp->rx_irq, ndev);
-
- axienet_dma_bd_release(ndev);
return 0;
}
@@ -1333,6 +1712,18 @@ static const struct net_device_ops axienet_netdev_ops = {
#endif
};
+static const struct net_device_ops axienet_netdev_dmaengine_ops = {
+ .ndo_open = axienet_open,
+ .ndo_stop = axienet_stop,
+ .ndo_start_xmit = axienet_start_xmit_dmaengine,
+ .ndo_get_stats64 = axienet_get_stats64,
+ .ndo_change_mtu = axienet_change_mtu,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = axienet_ioctl,
+ .ndo_set_rx_mode = axienet_set_multicast_list,
+};
+
/**
* axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
* @ndev: Pointer to net_device structure
@@ -1412,14 +1803,16 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
- data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
- data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
- data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
- data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+ if (!lp->use_dmaengine) {
+ data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
+ data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
+ data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
+ data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+ }
}
static void
@@ -1863,7 +2256,6 @@ static int axienet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
- ndev->netdev_ops = &axienet_netdev_ops;
ndev->ethtool_ops = &axienet_ethtool_ops;
/* MTU range: 64 - 9000 */
@@ -1880,9 +2272,6 @@ static int axienet_probe(struct platform_device *pdev)
u64_stats_init(&lp->rx_stat_sync);
u64_stats_init(&lp->tx_stat_sync);
- netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
- netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
-
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2008,82 +2397,118 @@ static int axienet_probe(struct platform_device *pdev)
goto cleanup_clk;
}
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (np) {
- struct resource dmares;
+ if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- ret = of_address_to_resource(np, 0, &dmares);
- if (ret) {
- dev_err(&pdev->dev,
- "unable to get DMA resource\n");
+ if (np) {
+ struct resource dmares;
+
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get DMA resource\n");
+ of_node_put(np);
+ goto cleanup_clk;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ lp->rx_irq = irq_of_parse_and_map(np, 1);
+ lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
+ } else {
+ /* Check for these resources directly on the Ethernet node. */
+ lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ lp->rx_irq = platform_get_irq(pdev, 1);
+ lp->tx_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq_optional(pdev, 2);
+ }
+ if (IS_ERR(lp->dma_regs)) {
+ dev_err(&pdev->dev, "could not map DMA regs\n");
+ ret = PTR_ERR(lp->dma_regs);
+ goto cleanup_clk;
+ }
+ if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
+ dev_err(&pdev->dev, "could not determine irqs\n");
+ ret = -ENOMEM;
goto cleanup_clk;
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev,
- &dmares);
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
- lp->eth_irq = platform_get_irq_optional(pdev, 0);
- } else {
- /* Check for these resources directly on the Ethernet node. */
- lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
- lp->rx_irq = platform_get_irq(pdev, 1);
- lp->tx_irq = platform_get_irq(pdev, 0);
- lp->eth_irq = platform_get_irq_optional(pdev, 2);
- }
- if (IS_ERR(lp->dma_regs)) {
- dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- goto cleanup_clk;
- }
- if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto cleanup_clk;
- }
- /* Reset core now that clocks are enabled, prior to accessing MDIO */
- ret = __axienet_device_reset(lp);
- if (ret)
- goto cleanup_clk;
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
+
+ /* Autodetect the need for 64-bit DMA pointers.
+ * When the IP is configured for a bus width bigger than 32 bits,
+ * writing the MSB registers is mandatory, even if they are all 0.
+ * We can detect this case by writing all 1's to one such register
+ * and see if that sticks: when the IP is configured for 32 bits
+ * only, those registers are RES0.
+ * Those MSB registers were introduced in IP v7.1, which we check first.
+ */
+ if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
+ void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
- /* Autodetect the need for 64-bit DMA pointers.
- * When the IP is configured for a bus width bigger than 32 bits,
- * writing the MSB registers is mandatory, even if they are all 0.
- * We can detect this case by writing all 1's to one such register
- * and see if that sticks: when the IP is configured for 32 bits
- * only, those registers are RES0.
- * Those MSB registers were introduced in IP v7.1, which we check first.
- */
- if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
- void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
-
- iowrite32(0x0, desc);
- if (ioread32(desc) == 0) { /* sanity check */
- iowrite32(0xffffffff, desc);
- if (ioread32(desc) > 0) {
- lp->features |= XAE_FEATURE_DMA_64BIT;
- addr_width = 64;
- dev_info(&pdev->dev,
- "autodetected 64-bit DMA range\n");
- }
iowrite32(0x0, desc);
+ if (ioread32(desc) == 0) { /* sanity check */
+ iowrite32(0xffffffff, desc);
+ if (ioread32(desc) > 0) {
+ lp->features |= XAE_FEATURE_DMA_64BIT;
+ addr_width = 64;
+ dev_info(&pdev->dev,
+ "autodetected 64-bit DMA range\n");
+ }
+ iowrite32(0x0, desc);
+ }
+ }
+ if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ ret = -EINVAL;
+ goto cleanup_clk;
}
- }
- if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
- dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
- ret = -EINVAL;
- goto cleanup_clk;
- }
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
- if (ret) {
- dev_err(&pdev->dev, "No suitable DMA available\n");
- goto cleanup_clk;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto cleanup_clk;
+ }
+ netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
+ netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
+ } else {
+ struct xilinx_vdma_config cfg;
+ struct dma_chan *tx_chan;
+
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
+ if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
+ ret = lp->eth_irq;
+ goto cleanup_clk;
+ }
+ tx_chan = dma_request_chan(lp->dev, "tx_chan0");
+ if (IS_ERR(tx_chan)) {
+ ret = PTR_ERR(tx_chan);
+ dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
+ goto cleanup_clk;
+ }
+
+ cfg.reset = 1;
+ /* As name says VDMA but it has support for DMA channel reset */
+ ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Reset channel failed\n");
+ dma_release_channel(tx_chan);
+ goto cleanup_clk;
+ }
+
+ dma_release_channel(tx_chan);
+ lp->use_dmaengine = 1;
}
+ if (lp->use_dmaengine)
+ ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
+ else
+ ndev->netdev_ops = &axienet_netdev_ops;
/* Check for Ethernet core IRQ (optional) */
if (lp->eth_irq <= 0)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
@@ -2099,8 +2524,8 @@ static int axienet_probe(struct platform_device *pdev)
}
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
- lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
ret = axienet_mdio_setup(lp);
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 2b6a607ac..a273362c9 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -153,6 +153,7 @@ static const struct pci_device_id skfddi_pci_tbl[] = {
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
+MODULE_DESCRIPTION("SysKonnect FDDI PCI driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index cd8cf0847..5fbe33a09 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1438,7 +1438,7 @@ err_out:
}
/* fjes_remove - Device Removal Routine */
-static int fjes_remove(struct platform_device *plat_dev)
+static void fjes_remove(struct platform_device *plat_dev)
{
struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
struct fjes_adapter *adapter = netdev_priv(netdev);
@@ -1462,8 +1462,6 @@ static int fjes_remove(struct platform_device *plat_dev)
netif_napi_del(&adapter->napi);
free_netdev(netdev);
-
- return 0;
}
static struct platform_driver fjes_driver = {
@@ -1471,7 +1469,7 @@ static struct platform_driver fjes_driver = {
.name = DRV_NAME,
},
.probe = fjes_probe,
- .remove = fjes_remove,
+ .remove_new = fjes_remove,
};
static acpi_status
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 356da958e..7f00fca0c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -234,7 +234,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
if (!tun_dst) {
- geneve->dev->stats.rx_dropped++;
+ DEV_STATS_INC(geneve->dev, rx_dropped);
goto drop;
}
/* Update tunnel dst according to Geneve options. */
@@ -246,8 +246,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
* since we don't support any...
*/
if (gnvh->critical) {
- geneve->dev->stats.rx_frame_errors++;
- geneve->dev->stats.rx_errors++;
+ DEV_STATS_INC(geneve->dev, rx_frame_errors);
+ DEV_STATS_INC(geneve->dev, rx_errors);
goto drop;
}
}
@@ -263,7 +263,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
/* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source,
geneve->dev->dev_addr)) {
- geneve->dev->stats.rx_errors++;
+ DEV_STATS_INC(geneve->dev, rx_errors);
goto drop;
}
} else {
@@ -310,8 +310,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
#endif
}
if (err > 1) {
- ++geneve->dev->stats.rx_frame_errors;
- ++geneve->dev->stats.rx_errors;
+ DEV_STATS_INC(geneve->dev, rx_frame_errors);
+ DEV_STATS_INC(geneve->dev, rx_errors);
goto drop;
}
}
@@ -349,6 +349,7 @@ static int geneve_init(struct net_device *dev)
gro_cells_destroy(&geneve->gro_cells);
return err;
}
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -391,14 +392,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely((!geneve->cfg.inner_proto_inherit &&
inner_proto != htons(ETH_P_TEB)))) {
- geneve->dev->stats.rx_dropped++;
+ DEV_STATS_INC(geneve->dev, rx_dropped);
goto drop;
}
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
- geneve->dev->stats.rx_dropped++;
+ DEV_STATS_INC(geneve->dev, rx_dropped);
goto drop;
}
@@ -829,7 +830,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs4)
@@ -936,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
if (!gs6)
@@ -1021,7 +1022,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
dev_kfree_skb(skb);
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
} else {
@@ -1044,11 +1045,11 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 2b5357d94..63f932256 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1111,11 +1111,12 @@ out_hashtable:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct hlist_node *next;
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 9d2d66a4a..11831a1c9 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1586,10 +1586,10 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
- ethtool_sprintf(&p, netvsc_stats[i].name);
+ ethtool_puts(&p, netvsc_stats[i].name);
for (i = 0; i < ARRAY_SIZE(vf_stats); i++)
- ethtool_sprintf(&p, vf_stats[i].name);
+ ethtool_puts(&p, vf_stats[i].name);
for (i = 0; i < nvdev->num_chn; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
@@ -1756,8 +1756,8 @@ static u32 netvsc_rss_indir_size(struct net_device *dev)
return ndc->rx_table_sz;
}
-static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
- u8 *hfunc)
+static int netvsc_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
@@ -1767,47 +1767,49 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
if (!ndev)
return -ENODEV;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
rndis_dev = ndev->extension;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ndc->rx_table_sz; i++)
- indir[i] = ndc->rx_table[i];
+ rxfh->indir[i] = ndc->rx_table[i];
}
- if (key)
- memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
+ if (rxfh->key)
+ memcpy(rxfh->key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
return 0;
}
-static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int netvsc_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
struct rndis_device *rndis_dev;
+ u8 *key = rxfh->key;
int i;
if (!ndev)
return -ENODEV;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
rndis_dev = ndev->extension;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < ndc->rx_table_sz; i++)
- if (indir[i] >= ndev->num_chn)
+ if (rxfh->indir[i] >= ndev->num_chn)
return -EINVAL;
for (i = 0; i < ndc->rx_table_sz; i++)
- ndc->rx_table[i] = indir[i];
+ ndc->rx_table[i] = rxfh->indir[i];
}
if (!key) {
- if (!indir)
+ if (!rxfh->indir)
return 0;
key = rndis_dev->rss_key;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index af95947a8..ecc2128ca 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -21,7 +21,6 @@
#include <linux/rtnetlink.h>
#include <linux/ucs2_string.h>
#include <linux/string.h>
-#include <linux/slab.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 523d13ee0..2930141d7 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -221,7 +221,7 @@ err_slave:
return err;
}
-static int fakelb_remove(struct platform_device *pdev)
+static void fakelb_remove(struct platform_device *pdev)
{
struct fakelb_phy *phy, *tmp;
@@ -229,14 +229,13 @@ static int fakelb_remove(struct platform_device *pdev)
list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
fakelb_del(phy);
mutex_unlock(&fakelb_phys_lock);
- return 0;
}
static struct platform_device *ieee802154fake_dev;
static struct platform_driver ieee802154fake_driver = {
.probe = fakelb_probe,
- .remove = fakelb_remove,
+ .remove_new = fakelb_remove,
.driver = {
.name = "ieee802154fakelb",
},
@@ -260,4 +259,5 @@ static __exit void fake_remove_module(void)
module_init(fakelb_init_module);
module_exit(fake_remove_module);
+MODULE_DESCRIPTION("IEEE 802.15.4 loopback driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 31cba9aa7..2c2483bbe 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -1035,7 +1035,7 @@ err_slave:
return err;
}
-static int hwsim_remove(struct platform_device *pdev)
+static void hwsim_remove(struct platform_device *pdev)
{
struct hwsim_phy *phy, *tmp;
@@ -1043,13 +1043,11 @@ static int hwsim_remove(struct platform_device *pdev)
list_for_each_entry_safe(phy, tmp, &hwsim_phys, list)
hwsim_del(phy);
mutex_unlock(&hwsim_phys_lock);
-
- return 0;
}
static struct platform_driver mac802154hwsim_driver = {
.probe = hwsim_probe,
- .remove = hwsim_remove,
+ .remove_new = hwsim_remove,
.driver = {
.name = "mac802154_hwsim",
},
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 7293d5cc2..d3abb3863 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -2,12 +2,12 @@
#
# Makefile for the Qualcomm IPA driver.
-IPA_REG_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11 5.0
+IPA_REG_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11 5.0 5.5
# Some IPA versions can reuse another set of GSI register definitions.
GSI_REG_VERSIONS := 3.1 3.5.1 4.0 4.5 4.9 4.11 5.0
-IPA_DATA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11 5.0
+IPA_DATA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11 5.0 5.5
obj-$(CONFIG_QCOM_IPA) += ipa.o
diff --git a/drivers/net/ipa/data/ipa_data-v5.5.c b/drivers/net/ipa/data/ipa_data-v5.5.c
new file mode 100644
index 000000000..2c6390f11
--- /dev/null
+++ b/drivers/net/ipa/data/ipa_data-v5.5.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/kernel.h>
+#include <linux/log2.h>
+
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
+
+/** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.5 */
+enum ipa_resource_type {
+ /* Source resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS = 0,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS,
+ IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF,
+ IPA_RESOURCE_TYPE_SRC_HPS_DMARS,
+ IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES,
+
+ /* Destination resource types; first must have value 0 */
+ IPA_RESOURCE_TYPE_DST_DATA_SECTORS = 0,
+ IPA_RESOURCE_TYPE_DST_DPS_DMARS,
+ IPA_RESOURCE_TYPE_DST_ULSO_SEGMENTS,
+};
+
+/* Resource groups used for an SoC having IPA v5.5 */
+enum ipa_rsrc_group_id {
+ /* Source resource group identifiers */
+ IPA_RSRC_GROUP_SRC_UL = 0,
+ IPA_RSRC_GROUP_SRC_DL,
+ IPA_RSRC_GROUP_SRC_UNUSED_2,
+ IPA_RSRC_GROUP_SRC_UNUSED_3,
+ IPA_RSRC_GROUP_SRC_URLLC,
+ IPA_RSRC_GROUP_SRC_U_RX_QC,
+ IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
+
+ /* Destination resource group identifiers */
+ IPA_RSRC_GROUP_DST_UL = 0,
+ IPA_RSRC_GROUP_DST_DL,
+ IPA_RSRC_GROUP_DST_UNUSED_2,
+ IPA_RSRC_GROUP_DST_UNUSED_3,
+ IPA_RSRC_GROUP_DST_UNUSED_4,
+ IPA_RSRC_GROUP_DST_UC,
+ IPA_RSRC_GROUP_DST_DRB_IP,
+ IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
+};
+
+/* QSB configuration data for an SoC having IPA v5.5 */
+static const struct ipa_qsb_data ipa_qsb_data[] = {
+ [IPA_QSB_MASTER_DDR] = {
+ .max_writes = 0, /* Unlimited */
+ .max_reads = 12,
+ .max_reads_beats = 0,
+ },
+ [IPA_QSB_MASTER_PCIE] = {
+ .max_writes = 0, /* Unlimited */
+ .max_reads = 8,
+ .max_reads_beats = 0,
+ },
+};
+
+/* Endpoint configuration data for an SoC having IPA v5.5 */
+static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
+ [IPA_ENDPOINT_AP_COMMAND_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 12,
+ .endpoint_id = 14,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 20,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .dma_mode = true,
+ .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
+ .tx = {
+ .seq_type = IPA_SEQ_DMA,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_LAN_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 13,
+ .endpoint_id = 16,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_UL,
+ .aggregation = true,
+ .status_enable = true,
+ .rx = {
+ .buffer_size = 8192,
+ .pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_TX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 11,
+ .endpoint_id = 2,
+ .toward_ipa = true,
+ .channel = {
+ .tre_count = 512,
+ .event_count = 512,
+ .tlv_count = 25,
+ },
+ .endpoint = {
+ .filter_support = true,
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_SRC_UL,
+ .checksum = true,
+ .qmap = true,
+ .status_enable = true,
+ .tx = {
+ .seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
+ .status_endpoint =
+ IPA_ENDPOINT_MODEM_AP_RX,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_AP_MODEM_RX] = {
+ .ee_id = GSI_EE_AP,
+ .channel_id = 1,
+ .endpoint_id = 23,
+ .toward_ipa = false,
+ .channel = {
+ .tre_count = 256,
+ .event_count = 256,
+ .tlv_count = 9,
+ },
+ .endpoint = {
+ .config = {
+ .resource_group = IPA_RSRC_GROUP_DST_DL,
+ .checksum = true,
+ .qmap = true,
+ .aggregation = true,
+ .rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
+ .aggr_close_eof = true,
+ },
+ },
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 0,
+ .endpoint_id = 12,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+ [IPA_ENDPOINT_MODEM_AP_RX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 7,
+ .endpoint_id = 21,
+ .toward_ipa = false,
+ },
+ [IPA_ENDPOINT_MODEM_DL_NLO_TX] = {
+ .ee_id = GSI_EE_MODEM,
+ .channel_id = 2,
+ .endpoint_id = 15,
+ .toward_ipa = true,
+ .endpoint = {
+ .filter_support = true,
+ },
+ },
+};
+
+/* Source resource configuration data for an SoC having IPA v5.5 */
+static const struct ipa_resource ipa_resource_src[] = {
+ [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 3, .max = 9,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 4, .max = 10,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 1, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_U_RX_QC] = {
+ .min = 0, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_LISTS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 9, .max = 9,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 12, .max = 12,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 10, .max = 10,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_DESCRIPTOR_BUFF] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 9, .max = 9,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 24, .max = 24,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 20, .max = 20,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_HPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 1, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_U_RX_QC] = {
+ .min = 0, .max = 63,
+ },
+ },
+ [IPA_RESOURCE_TYPE_SRC_ACK_ENTRIES] = {
+ .limits[IPA_RSRC_GROUP_SRC_UL] = {
+ .min = 22, .max = 22,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_DL] = {
+ .min = 16, .max = 16,
+ },
+ .limits[IPA_RSRC_GROUP_SRC_URLLC] = {
+ .min = 16, .max = 16,
+ },
+ },
+};
+
+/* Destination resource configuration data for an SoC having IPA v5.5 */
+static const struct ipa_resource ipa_resource_dst[] = {
+ [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 6, .max = 6,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 5, .max = 5,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DRB_IP] = {
+ .min = 39, .max = 39,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 0, .max = 3,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 0, .max = 3,
+ },
+ },
+ [IPA_RESOURCE_TYPE_DST_ULSO_SEGMENTS] = {
+ .limits[IPA_RSRC_GROUP_DST_UL] = {
+ .min = 0, .max = 63,
+ },
+ .limits[IPA_RSRC_GROUP_DST_DL] = {
+ .min = 0, .max = 63,
+ },
+ },
+};
+
+/* Resource configuration data for an SoC having IPA v5.5 */
+static const struct ipa_resource_data ipa_resource_data = {
+ .rsrc_group_dst_count = IPA_RSRC_GROUP_DST_COUNT,
+ .rsrc_group_src_count = IPA_RSRC_GROUP_SRC_COUNT,
+ .resource_src_count = ARRAY_SIZE(ipa_resource_src),
+ .resource_src = ipa_resource_src,
+ .resource_dst_count = ARRAY_SIZE(ipa_resource_dst),
+ .resource_dst = ipa_resource_dst,
+};
+
+/* IPA-resident memory region data for an SoC having IPA v5.5 */
+static const struct ipa_mem ipa_mem_local_data[] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
+ .offset = 0x0000,
+ .size = 0x1000,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_SHARED,
+ .offset = 0x1000,
+ .size = 0x0080,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_UC_INFO,
+ .offset = 0x1080,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x1288,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_FILTER,
+ .offset = 0x1308,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
+ .offset = 0x1388,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_FILTER,
+ .offset = 0x1408,
+ .size = 0x0078,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
+ .offset = 0x1488,
+ .size = 0x0098,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V4_ROUTE,
+ .offset = 0x1528,
+ .size = 0x0098,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
+ .offset = 0x15c8,
+ .size = 0x0098,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_V6_ROUTE,
+ .offset = 0x1668,
+ .size = 0x0098,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_MODEM_HEADER,
+ .offset = 0x1708,
+ .size = 0x0240,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_HEADER,
+ .offset = 0x1948,
+ .size = 0x01e0,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
+ .offset = 0x1b40,
+ .size = 0x0b20,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
+ .offset = 0x2660,
+ .size = 0x0200,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
+ .offset = 0x2868,
+ .size = 0x0060,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
+ .offset = 0x28c8,
+ .size = 0x0048,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_TETHERING,
+ .offset = 0x2910,
+ .size = 0x03c0,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_AP_V4_FILTER,
+ .offset = 0x29b8,
+ .size = 0x0188,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_AP_V6_FILTER,
+ .offset = 0x2b40,
+ .size = 0x0228,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
+ .offset = 0x2cd0,
+ .size = 0x0ba0,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_STATS_DROP,
+ .offset = 0x3870,
+ .size = 0x0020,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_MODEM,
+ .offset = 0x3898,
+ .size = 0x0d48,
+ .canary_count = 2,
+ },
+ {
+ .id = IPA_MEM_NAT_TABLE,
+ .offset = 0x45e0,
+ .size = 0x0900,
+ .canary_count = 0,
+ },
+ {
+ .id = IPA_MEM_PDN_CONFIG,
+ .offset = 0x4ee8,
+ .size = 0x0100,
+ .canary_count = 2,
+ },
+};
+
+/* Memory configuration data for an SoC having IPA v5.5 */
+static const struct ipa_mem_data ipa_mem_data = {
+ .local_count = ARRAY_SIZE(ipa_mem_local_data),
+ .local = ipa_mem_local_data,
+ .imem_addr = 0x14688000,
+ .imem_size = 0x00002000,
+ .smem_id = 497,
+ .smem_size = 0x0000b000,
+};
+
+/* Interconnect rates are in 1000 byte/second units */
+static const struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 1900000, /* 1.9 GBps */
+ .average_bandwidth = 600000, /* 600 MBps */
+ },
+ /* Average rate is unused for the next interconnect */
+ {
+ .name = "config",
+ .peak_bandwidth = 76800, /* 76.8 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
+/* Clock and interconnect configuration data for an SoC having IPA v5.5 */
+static const struct ipa_power_data ipa_power_data = {
+ .core_clock_rate = 120 * 1000 * 1000, /* Hz */
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
+};
+
+/* Configuration data for an SoC having IPA v5.5. */
+const struct ipa_data ipa_data_v5_5 = {
+ .version = IPA_VERSION_5_5,
+ .qsb_count = ARRAY_SIZE(ipa_qsb_data),
+ .qsb_data = ipa_qsb_data,
+ .modem_route_count = 11,
+ .endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
+ .endpoint_data = ipa_gsi_endpoint_data,
+ .resource_data = &ipa_resource_data,
+ .mem_data = &ipa_mem_data,
+ .power_data = &ipa_power_data,
+};
diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c
index c5458e28b..106c43884 100644
--- a/drivers/net/ipa/gsi_reg.c
+++ b/drivers/net/ipa/gsi_reg.c
@@ -110,6 +110,7 @@ static const struct regs *gsi_regs(struct gsi *gsi)
return &gsi_regs_v4_11;
case IPA_VERSION_5_0:
+ case IPA_VERSION_5_5:
return &gsi_regs_v5_0;
default:
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index ce82b00fd..2a1605e67 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -250,5 +250,6 @@ extern const struct ipa_data ipa_data_v4_7;
extern const struct ipa_data ipa_data_v4_9;
extern const struct ipa_data ipa_data_v4_11;
extern const struct ipa_data ipa_data_v5_0;
+extern const struct ipa_data ipa_data_v5_5;
#endif /* _IPA_DATA_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index da853353a..00475fd7a 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -74,6 +74,7 @@
#define IPA_PAS_ID 15
/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */
+/* IPA v5.5+ does not specify Qtime timestamp config for DPL */
#define DPL_TIMESTAMP_SHIFT 14 /* ~1.172 kHz, ~853 usec per tick */
#define TAG_TIMESTAMP_SHIFT 14
#define NAT_TIMESTAMP_SHIFT 24 /* ~1.144 Hz, ~874 msec per tick */
@@ -376,9 +377,11 @@ static void ipa_qtime_config(struct ipa *ipa)
iowrite32(0, ipa->reg_virt + reg_offset(reg));
reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
- /* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
- val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
- val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
+ if (ipa->version < IPA_VERSION_5_5) {
+ /* Set DPL time stamp resolution to use Qtime (not 1 msec) */
+ val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
+ }
/* Configure tag and NAT Qtime timestamp resolution as well */
val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
@@ -688,6 +691,10 @@ static const struct of_device_id ipa_match[] = {
.compatible = "qcom,sdx65-ipa",
.data = &ipa_data_v5_0,
},
+ {
+ .compatible = "qcom,sm8550-ipa",
+ .data = &ipa_data_v5_5,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, ipa_match);
@@ -936,7 +943,7 @@ err_power_exit:
return ret;
}
-static int ipa_remove(struct platform_device *pdev)
+static void ipa_remove(struct platform_device *pdev)
{
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
struct ipa_power *power = ipa->power;
@@ -959,8 +966,16 @@ static int ipa_remove(struct platform_device *pdev)
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
ret = ipa_modem_stop(ipa);
}
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * Not cleaning up here properly might also yield a
+ * crash later on. As the device is still unregistered
+ * in this case, this might even yield a crash later on.
+ */
+ dev_err(dev, "Failed to stop modem (%pe), leaking resources\n",
+ ERR_PTR(ret));
+ return;
+ }
ipa_teardown(ipa);
}
@@ -978,17 +993,6 @@ out_power_put:
ipa_power_exit(power);
dev_info(dev, "IPA driver removed");
-
- return 0;
-}
-
-static void ipa_shutdown(struct platform_device *pdev)
-{
- int ret;
-
- ret = ipa_remove(pdev);
- if (ret)
- dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret);
}
static const struct attribute_group *ipa_attribute_groups[] = {
@@ -1001,8 +1005,8 @@ static const struct attribute_group *ipa_attribute_groups[] = {
static struct platform_driver ipa_driver = {
.probe = ipa_probe,
- .remove = ipa_remove,
- .shutdown = ipa_shutdown,
+ .remove_new = ipa_remove,
+ .shutdown = ipa_remove,
.driver = {
.name = "ipa",
.pm = &ipa_pm_ops,
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index db6ada234..694960537 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -165,7 +165,7 @@ static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
case IPA_MEM_AP_V4_FILTER:
case IPA_MEM_AP_V6_FILTER:
- if (version != IPA_VERSION_5_0)
+ if (version < IPA_VERSION_5_0)
return false;
break;
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 818a84f7c..6a3203ae6 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -44,12 +44,12 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
case DST_RSRC_GRP_45_RSRC_TYPE:
return version <= IPA_VERSION_3_1 ||
version == IPA_VERSION_4_5 ||
- version == IPA_VERSION_5_0;
+ version >= IPA_VERSION_5_0;
case SRC_RSRC_GRP_67_RSRC_TYPE:
case DST_RSRC_GRP_67_RSRC_TYPE:
return version <= IPA_VERSION_3_1 ||
- version == IPA_VERSION_5_0;
+ version >= IPA_VERSION_5_0;
case ENDP_FILTER_ROUTER_HSH_CFG:
return version < IPA_VERSION_5_0 &&
@@ -125,6 +125,8 @@ static const struct regs *ipa_regs(enum ipa_version version)
return &ipa_regs_v4_11;
case IPA_VERSION_5_0:
return &ipa_regs_v5_0;
+ case IPA_VERSION_5_5:
+ return &ipa_regs_v5_5;
default:
return NULL;
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 3ac48dea8..2998f115f 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -240,25 +240,25 @@ enum ipa_reg_local_pkt_proc_cntxt_field_id {
/* COUNTER_CFG register */
enum ipa_reg_counter_cfg_field_id {
- EOT_COAL_GRANULARITY, /* Not v3.5+ */
+ EOT_COAL_GRANULARITY, /* Not IPA v3.5+ */
AGGR_GRANULARITY,
};
/* IPA_TX_CFG register */
enum ipa_reg_ipa_tx_cfg_field_id {
- TX0_PREFETCH_DISABLE, /* Not v4.0+ */
- TX1_PREFETCH_DISABLE, /* Not v4.0+ */
- PREFETCH_ALMOST_EMPTY_SIZE, /* Not v4.0+ */
- PREFETCH_ALMOST_EMPTY_SIZE_TX0, /* v4.0+ */
- DMAW_SCND_OUTSD_PRED_THRESHOLD, /* v4.0+ */
- DMAW_SCND_OUTSD_PRED_EN, /* v4.0+ */
- DMAW_MAX_BEATS_256_DIS, /* v4.0+ */
- PA_MASK_EN, /* v4.0+ */
- PREFETCH_ALMOST_EMPTY_SIZE_TX1, /* v4.0+ */
- DUAL_TX_ENABLE, /* v4.5+ */
- SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
- SSPND_PA_NO_BQ_STATE, /* v4.2 only */
- HOLB_STICKY_DROP_EN, /* v5.0+ */
+ TX0_PREFETCH_DISABLE, /* Not IPA v4.0+ */
+ TX1_PREFETCH_DISABLE, /* Not IPA v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE, /* Not IPA v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX0, /* IPA v4.0+ */
+ DMAW_SCND_OUTSD_PRED_THRESHOLD, /* IPA v4.0+ */
+ DMAW_SCND_OUTSD_PRED_EN, /* IPA v4.0+ */
+ DMAW_MAX_BEATS_256_DIS, /* IPA v4.0+ */
+ PA_MASK_EN, /* IPA v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX1, /* IPA v4.0+ */
+ DUAL_TX_ENABLE, /* IPA v4.5+ */
+ SSPND_PA_NO_START_STATE, /* IPA v4,2+, not IPA v4.5 */
+ SSPND_PA_NO_BQ_STATE, /* IPA v4.2 only */
+ HOLB_STICKY_DROP_EN, /* IPA v5.0+ */
};
/* FLAVOR_0 register */
@@ -277,8 +277,8 @@ enum ipa_reg_idle_indication_cfg_field_id {
/* QTIME_TIMESTAMP_CFG register */
enum ipa_reg_qtime_timestamp_cfg_field_id {
- DPL_TIMESTAMP_LSB,
- DPL_TIMESTAMP_SEL,
+ DPL_TIMESTAMP_LSB, /* Not IPA v5.5+ */
+ DPL_TIMESTAMP_SEL, /* Not IPA v5.5+ */
TAG_TIMESTAMP_LSB,
NAT_TIMESTAMP_LSB,
};
@@ -319,8 +319,8 @@ enum ipa_reg_rsrc_grp_rsrc_type_field_id {
/* ENDP_INIT_CTRL register */
enum ipa_reg_endp_init_ctrl_field_id {
- ENDP_SUSPEND, /* Not v4.0+ */
- ENDP_DELAY, /* Not v4.2+ */
+ ENDP_SUSPEND, /* Not IPA v4.0+ */
+ ENDP_DELAY, /* Not IPA v4.2+ */
};
/* ENDP_INIT_CFG register */
@@ -329,6 +329,7 @@ enum ipa_reg_endp_init_cfg_field_id {
CS_OFFLOAD_EN,
CS_METADATA_HDR_OFFSET,
CS_GEN_QMB_MASTER_SEL,
+ PIPE_REPLICATE_EN, /* IPA v5.5+ */
};
/** enum ipa_cs_offload_en - ENDP_INIT_CFG register CS_OFFLOAD_EN field value */
@@ -359,11 +360,11 @@ enum ipa_reg_endp_init_hdr_field_id {
HDR_ADDITIONAL_CONST_LEN,
HDR_OFST_PKT_SIZE_VALID,
HDR_OFST_PKT_SIZE,
- HDR_A5_MUX, /* Not v4.9+ */
+ HDR_A5_MUX, /* Not IPA v4.9+ */
HDR_LEN_INC_DEAGG_HDR,
- HDR_METADATA_REG_VALID, /* Not v4.5+ */
- HDR_LEN_MSB, /* v4.5+ */
- HDR_OFST_METADATA_MSB, /* v4.5+ */
+ HDR_METADATA_REG_VALID, /* Not IPA v4.5+ */
+ HDR_LEN_MSB, /* IPA v4.5+ */
+ HDR_OFST_METADATA_MSB, /* IPA v4.5+ */
};
/* ENDP_INIT_HDR_EXT register */
@@ -374,23 +375,23 @@ enum ipa_reg_endp_init_hdr_ext_field_id {
HDR_PAYLOAD_LEN_INC_PADDING,
HDR_TOTAL_LEN_OR_PAD_OFFSET,
HDR_PAD_TO_ALIGNMENT,
- HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
- HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
- HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
- HDR_BYTES_TO_REMOVE_VALID, /* v5.0+ */
- HDR_BYTES_TO_REMOVE, /* v5.0+ */
+ HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* IPA v4.5+ */
+ HDR_OFST_PKT_SIZE_MSB, /* IPA v4.5+ */
+ HDR_ADDITIONAL_CONST_LEN_MSB, /* IPA v4.5+ */
+ HDR_BYTES_TO_REMOVE_VALID, /* IPA v5.0+ */
+ HDR_BYTES_TO_REMOVE, /* IPA v5.0+ */
};
/* ENDP_INIT_MODE register */
enum ipa_reg_endp_init_mode_field_id {
ENDP_MODE,
- DCPH_ENABLE, /* v4.5+ */
+ DCPH_ENABLE, /* IPA v4.5+ */
DEST_PIPE_INDEX,
BYTE_THRESHOLD,
- PIPE_REPLICATION_EN,
+ PIPE_REPLICATION_EN, /* Not IPA v5.5+ */
PAD_EN,
- HDR_FTCH_DISABLE, /* v4.5+ */
- DRBIP_ACL_ENABLE, /* v4.9+ */
+ HDR_FTCH_DISABLE, /* IPA v4.5+ */
+ DRBIP_ACL_ENABLE, /* IPA v4.9+ */
};
/** enum ipa_mode - ENDP_INIT_MODE register MODE field value */
@@ -412,6 +413,7 @@ enum ipa_reg_endp_init_aggr_field_id {
FORCE_CLOSE,
HARD_BYTE_LIMIT_EN,
AGGR_GRAN_SEL,
+ AGGR_COAL_L2, /* IPA v5.5+ */
};
/** enum ipa_aggr_en - ENDP_INIT_AGGR register AGGR_EN field value */
@@ -439,10 +441,10 @@ enum ipa_reg_endp_init_hol_block_en_field_id {
/* ENDP_INIT_HOL_BLOCK_TIMER register */
enum ipa_reg_endp_init_hol_block_timer_field_id {
- TIMER_BASE_VALUE, /* Not v4.5+ */
- TIMER_SCALE, /* v4.2 only */
- TIMER_LIMIT, /* v4.5+ */
- TIMER_GRAN_SEL, /* v4.5+ */
+ TIMER_BASE_VALUE, /* Not IPA v4.5+ */
+ TIMER_SCALE, /* IPA v4.2 only */
+ TIMER_LIMIT, /* IPA v4.5+ */
+ TIMER_GRAN_SEL, /* IPA v4.5+ */
};
/* ENDP_INIT_DEAGGR register */
@@ -463,7 +465,7 @@ enum ipa_reg_endp_init_rsrc_grp_field_id {
/* ENDP_INIT_SEQ register */
enum ipa_reg_endp_init_seq_field_id {
SEQ_TYPE,
- SEQ_REP_TYPE, /* Not v4.5+ */
+ SEQ_REP_TYPE, /* Not IPA v4.5+ */
};
/**
@@ -512,8 +514,8 @@ enum ipa_seq_rep_type {
enum ipa_reg_endp_status_field_id {
STATUS_EN,
STATUS_ENDP,
- STATUS_LOCATION, /* Not v4.5+ */
- STATUS_PKT_SUPPRESS, /* v4.0+ */
+ STATUS_LOCATION, /* Not IPA v4.5+ */
+ STATUS_PKT_SUPPRESS, /* IPA v4.0+ */
};
/* ENDP_FILTER_ROUTER_HSH_CFG register */
@@ -585,11 +587,12 @@ enum ipa_reg_endp_cache_cfg_field_id {
* @IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN: (Not currently used)
* @IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN: (Not currently used)
* @IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN: (Not currently used)
+ * @IPA_IRQ_ERROR_NON_FATAL: (Not currently used)
+ * @IPA_IRQ_ERROR_FATAL: (Not currently used)
*/
enum ipa_irq_id {
- IPA_IRQ_BAD_SNOC_ACCESS = 0x0,
- /* The next bit is not present for IPA v3.5+ */
- IPA_IRQ_EOT_COAL = 0x1,
+ IPA_IRQ_BAD_SNOC_ACCESS = 0x0, /* Not IPA v5.5+ */
+ IPA_IRQ_EOT_COAL = 0x1, /* Not IPA v3.5+ */
IPA_IRQ_UC_0 = 0x2,
IPA_IRQ_UC_1 = 0x3,
IPA_IRQ_UC_2 = 0x4,
@@ -597,11 +600,11 @@ enum ipa_irq_id {
IPA_IRQ_UC_IN_Q_NOT_EMPTY = 0x6,
IPA_IRQ_UC_RX_CMD_Q_NOT_FULL = 0x7,
IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY = 0x8,
- IPA_IRQ_RX_ERR = 0x9,
- IPA_IRQ_DEAGGR_ERR = 0xa,
- IPA_IRQ_TX_ERR = 0xb,
- IPA_IRQ_STEP_MODE = 0xc,
- IPA_IRQ_PROC_ERR = 0xd,
+ IPA_IRQ_RX_ERR = 0x9, /* Not IPA v5.5+ */
+ IPA_IRQ_DEAGGR_ERR = 0xa, /* Not IPA v5.5+ */
+ IPA_IRQ_TX_ERR = 0xb, /* Not IPA v5.5+ */
+ IPA_IRQ_STEP_MODE = 0xc, /* Not IPA v5.5+ */
+ IPA_IRQ_PROC_ERR = 0xd, /* Not IPA v5.5+ */
IPA_IRQ_TX_SUSPEND = 0xe,
IPA_IRQ_TX_HOLB_DROP = 0xf,
IPA_IRQ_BAM_GSI_IDLE = 0x10,
@@ -610,17 +613,16 @@ enum ipa_irq_id {
IPA_IRQ_PIPE_YELLOW_ABOVE = 0x13,
IPA_IRQ_PIPE_RED_ABOVE = 0x14,
IPA_IRQ_UCP = 0x15,
- /* The next bit is not present for IPA v4.5+ */
- IPA_IRQ_DCMP = 0x16,
+ IPA_IRQ_DCMP = 0x16, /* Not IPA v4.5+ */
IPA_IRQ_GSI_EE = 0x17,
IPA_IRQ_GSI_IPA_IF_TLV_RCVD = 0x18,
IPA_IRQ_GSI_UC = 0x19,
- /* The next bit is present for IPA v4.5+ */
- IPA_IRQ_TLV_LEN_MIN_DSM = 0x1a,
- /* The next three bits are present for IPA v4.9+ */
- IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN = 0x1b,
- IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN = 0x1c,
- IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN = 0x1d,
+ IPA_IRQ_TLV_LEN_MIN_DSM = 0x1a, /* IPA v4.5-v5.2 */
+ IPA_IRQ_DRBIP_PKT_EXCEED_MAX_SIZE_EN = 0x1b, /* IPA v4.9-v5.2 */
+ IPA_IRQ_DRBIP_DATA_SCTR_CFG_ERROR_EN = 0x1c, /* IPA v4.9-v5.2 */
+ IPA_IRQ_DRBIP_IMM_CMD_NO_FLSH_HZRD_EN = 0x1d, /* IPA v4.9-v5.2 */
+ IPA_IRQ_ERROR_NON_FATAL = 0x1e, /* IPA v5.5+ */
+ IPA_IRQ_ERROR_FATAL = 0x1f, /* IPA v5.5+ */
IPA_IRQ_COUNT, /* Last; not an id */
};
@@ -637,6 +639,7 @@ extern const struct regs ipa_regs_v4_7;
extern const struct regs ipa_regs_v4_9;
extern const struct regs ipa_regs_v4_11;
extern const struct regs ipa_regs_v5_0;
+extern const struct regs ipa_regs_v5_5;
const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 06e75b8ec..38150345b 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -56,6 +56,7 @@ static inline bool ipa_version_supported(enum ipa_version version)
case IPA_VERSION_4_9:
case IPA_VERSION_4_11:
case IPA_VERSION_5_0:
+ case IPA_VERSION_5_5:
return true;
default:
return false;
diff --git a/drivers/net/ipa/reg/ipa_reg-v5.5.c b/drivers/net/ipa/reg/ipa_reg-v5.5.c
new file mode 100644
index 000000000..26ca9c9ba
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v5.5.c
@@ -0,0 +1,565 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bits.h>
+
+#include "../ipa_reg.h"
+#include "../ipa_version.h"
+
+static const u32 reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(7, 0),
+ [MAX_CONS_PIPES] = GENMASK(15, 8),
+ [MAX_PROD_PIPES] = GENMASK(23, 16),
+ [PROD_LOWEST] = GENMASK(31, 24),
+};
+
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000000);
+
+static const u32 reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ /* Bits 17-18 reserved */
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(27, 22),
+ /* Bits 28-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+REG_FIELDS(COMP_CFG, comp_cfg, 0x00000048);
+
+static const u32 reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000050);
+
+static const u32 reg_route_fmask[] = {
+ [ROUTE_DEF_PIPE] = GENMASK(7, 0),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(15, 8),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(25, 16),
+ [ROUTE_DEF_HDR_TABLE] = BIT(26),
+ [ROUTE_DEF_RETAIN_HDR] = BIT(27),
+ [ROUTE_DIS] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+REG_FIELDS(ROUTE, route, 0x00000054);
+
+static const u32 reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x0000005c);
+
+static const u32 reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000070);
+
+static const u32 reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000074);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x00000120, 0x0004);
+
+static const u32 reg_filt_rout_cache_flush_fmask[] = {
+ [ROUTER_CACHE] = BIT(0),
+ /* Bits 1-3 reserved */
+ [FILTER_CACHE] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+REG_FIELDS(FILT_ROUT_CACHE_FLUSH, filt_rout_cache_flush, 0x0000404);
+
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x00000478);
+
+static const u32 reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bit 19 reserved */
+ [HOLB_STICKY_DROP_EN] = BIT(20),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x00000488);
+
+static const u32 reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x000004a8);
+
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
+ /* Bits 0-7 reserved */
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x000004ac);
+
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x000004b0);
+
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+ [PULSE_GRAN_3] = GENMASK(11, 9),
+ /* Bits 12-31 reserved */
+};
+
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x000004b4);
+
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000600, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000604, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000608, 0x0020);
+
+static const u32 reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000060c, 0x0020);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000006b0, 0x0004);
+
+static const u32 reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ [PIPE_REPLICATE_EN] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00001008, 0x0080);
+
+static const u32 reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000100c, 0x0080);
+
+static const u32 reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ /* Bit 26 reserved */
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00001010, 0x0080);
+
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ [HDR_BYTES_TO_REMOVE_VALID] = BIT(22),
+ /* Bit 23 reserved */
+ [HDR_BYTES_TO_REMOVE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00001014, 0x0080);
+
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00001018, 0x0080);
+
+static const u32 reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(11, 4),
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ /* Bit 28 reserved */
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00001020, 0x0080);
+
+static const u32 reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ [AGGR_COAL_L2] = BIT(28),
+ /* Bits 27-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00001024, 0x0080);
+
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000102c, 0x0080);
+
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = GENMASK(9, 8),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00001030, 0x0080);
+
+static const u32 reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00001034, 0x0080);
+
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00001038, 0x0080);
+
+static const u32 reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000103c, 0x0080);
+
+static const u32 reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(8, 1),
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00001040, 0x0080);
+
+static const u32 reg_endp_filter_cache_cfg_fmask[] = {
+ [CACHE_MSK_SRC_ID] = BIT(0),
+ [CACHE_MSK_SRC_IP] = BIT(1),
+ [CACHE_MSK_DST_IP] = BIT(2),
+ [CACHE_MSK_SRC_PORT] = BIT(3),
+ [CACHE_MSK_DST_PORT] = BIT(4),
+ [CACHE_MSK_PROTOCOL] = BIT(5),
+ [CACHE_MSK_METADATA] = BIT(6),
+ /* Bits 7-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_FILTER_CACHE_CFG, endp_filter_cache_cfg,
+ 0x0000105c, 0x0080);
+
+static const u32 reg_endp_router_cache_cfg_fmask[] = {
+ [CACHE_MSK_SRC_ID] = BIT(0),
+ [CACHE_MSK_SRC_IP] = BIT(1),
+ [CACHE_MSK_DST_IP] = BIT(2),
+ [CACHE_MSK_SRC_PORT] = BIT(3),
+ [CACHE_MSK_DST_PORT] = BIT(4),
+ [CACHE_MSK_PROTOCOL] = BIT(5),
+ [CACHE_MSK_METADATA] = BIT(6),
+ /* Bits 7-31 reserved */
+};
+
+REG_STRIDE_FIELDS(ENDP_ROUTER_CACHE_CFG, endp_router_cache_cfg,
+ 0x00001060, 0x0080);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x0000c008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000c00c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x0000c010 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000c01c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x0000c030 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x0000c050 + 0x1000 * GSI_EE_AP, 0x0004);
+
+/* Valid bits defined by ipa->available */
+
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x0000c070 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_CACHE_FLUSH] = &reg_filt_rout_cache_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_CACHE_CFG] = &reg_endp_filter_cache_cfg,
+ [ENDP_ROUTER_CACHE_CFG] = &reg_endp_router_cache_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v5_5 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 57c79f5f2..df7c43a10 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -387,6 +387,7 @@ static const struct header_ops ipvlan_header_ops = {
.parse = eth_header_parse,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
+ .parse_protocol = eth_header_parse_protocol,
};
static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
@@ -600,15 +601,15 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
port->dev_id_start = 0x1;
/* Since L2 address is shared among all IPvlan slaves including
- * master, use unique 16 bit dev-ids to diffentiate among them.
+ * master, use unique 16 bit dev-ids to differentiate among them.
* Assign IDs between 0x1 and 0xFFFE (used by the master) to each
* slave link [see addrconf_ifid_eui48()].
*/
- err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE,
- GFP_KERNEL);
+ err = ida_alloc_range(&port->ida, port->dev_id_start, 0xFFFD,
+ GFP_KERNEL);
if (err < 0)
- err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
- GFP_KERNEL);
+ err = ida_alloc_range(&port->ida, 0x1, port->dev_id_start - 1,
+ GFP_KERNEL);
if (err < 0)
goto unregister_netdev;
dev->dev_id = err;
@@ -640,7 +641,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
unlink_netdev:
netdev_upper_dev_unlink(phy_dev, dev);
remove_ida:
- ida_simple_remove(&port->ida, dev->dev_id);
+ ida_free(&port->ida, dev->dev_id);
unregister_netdev:
unregister_netdevice(dev);
return err;
@@ -660,7 +661,7 @@ void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
}
spin_unlock_bh(&ipvlan->addrs_lock);
- ida_simple_remove(&ipvlan->port->ida, dev->dev_id);
+ ida_free(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 60944a4be..1afc4c47b 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -237,4 +237,5 @@ static void __exit ipvtap_exit(void)
module_exit(ipvtap_exit);
MODULE_ALIAS_RTNL_LINK("ipvtap");
MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
+MODULE_DESCRIPTION("IP-VLAN based tap driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f6d53e63e..f6eab66c2 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -144,6 +144,7 @@ static int loopback_dev_init(struct net_device *dev)
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 9663050a8..ac3f6801c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -93,6 +93,8 @@ struct pcpu_secy_stats {
* @secys: linked list of SecY's on the underlying device
* @gro_cells: pointer to the Generic Receive Offload cell
* @offload: status of offloading on the MACsec device
+ * @insert_tx_tag: when offloading, device requires to insert an
+ * additional tag
*/
struct macsec_dev {
struct macsec_secy secy;
@@ -102,6 +104,7 @@ struct macsec_dev {
struct list_head secys;
struct gro_cells gro_cells;
enum macsec_offload offload;
+ bool insert_tx_tag;
};
/**
@@ -996,10 +999,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
+ bool is_macsec_md_dst;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
md_dst = skb_metadata_dst(skb);
+ is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1010,14 +1015,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
- struct macsec_rx_sc *rx_sc = NULL;
+ const struct macsec_ops *ops;
- if (md_dst && md_dst->type == METADATA_MACSEC)
- rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
+ ops = macsec_get_ops(macsec, NULL);
- if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
+ if (ops->rx_uses_md_dst && !is_macsec_md_dst)
continue;
+ if (is_macsec_md_dst) {
+ struct macsec_rx_sc *rx_sc;
+
+ /* All drivers that implement MACsec offload
+ * support using skb metadata destinations must
+ * indicate that they do so.
+ */
+ DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
+ rx_sc = find_rx_sc(&macsec->secy,
+ md_dst->u.macsec_info.sci);
+ if (!rx_sc)
+ continue;
+ /* device indicated macsec offload occurred */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ eth_skb_pkt_type(skb, ndev);
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ }
+
+ /* This datapath is insecure because it is unable to
+ * enforce isolation of broadcast/multicast traffic and
+ * unicast traffic with promiscuous mode on the macsec
+ * netdev. Since the core stack has no mechanism to
+ * check that the hardware did indeed receive MACsec
+ * traffic, it is possible that the response handling
+ * done by the MACsec port was to a plaintext packet.
+ * This violates the MACsec protocol standard.
+ */
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1033,14 +1066,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
break;
nskb->dev = ndev;
- if (ether_addr_equal_64bits(hdr->h_dest,
- ndev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
- else
- nskb->pkt_type = PACKET_MULTICAST;
+ eth_skb_pkt_type(nskb, ndev);
__netif_rx(nskb);
- } else if (rx_sc || ndev->flags & IFF_PROMISC) {
+ } else if (ndev->flags & IFF_PROMISC) {
skb->dev = ndev;
skb->pkt_type = PACKET_HOST;
ret = RX_HANDLER_ANOTHER;
@@ -2583,6 +2612,33 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
return false;
}
+static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
+ const struct macsec_ops *ops)
+{
+ return macsec->offload == MACSEC_OFFLOAD_PHY &&
+ ops->mdo_insert_tx_tag;
+}
+
+static void macsec_set_head_tail_room(struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+ int needed_headroom, needed_tailroom;
+ const struct macsec_ops *ops;
+
+ ops = macsec_get_ops(macsec, NULL);
+ if (ops) {
+ needed_headroom = ops->needed_headroom;
+ needed_tailroom = ops->needed_tailroom;
+ } else {
+ needed_headroom = MACSEC_NEEDED_HEADROOM;
+ needed_tailroom = MACSEC_NEEDED_TAILROOM;
+ }
+
+ dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
+ dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
+}
+
static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
{
enum macsec_offload prev_offload;
@@ -2620,8 +2676,13 @@ static int macsec_update_offload(struct net_device *dev, enum macsec_offload off
ctx.secy = &macsec->secy;
ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
: macsec_offload(ops->mdo_add_secy, &ctx);
- if (ret)
+ if (ret) {
macsec->offload = prev_offload;
+ return ret;
+ }
+
+ macsec_set_head_tail_room(dev);
+ macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
return ret;
}
@@ -3379,6 +3440,40 @@ static struct genl_family macsec_fam __ro_after_init = {
.resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
};
+static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ const struct macsec_ops *ops;
+ struct phy_device *phydev;
+ struct macsec_context ctx;
+ int skb_final_len;
+ int err;
+
+ ops = macsec_get_ops(macsec, &ctx);
+ skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
+ ops->needed_tailroom;
+ if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ phydev = macsec->real_dev->phydev;
+
+ err = skb_ensure_writable_head_tail(skb, dev);
+ if (unlikely(err < 0))
+ goto cleanup;
+
+ err = ops->mdo_insert_tx_tag(phydev, skb);
+ if (unlikely(err))
+ goto cleanup;
+
+ return skb;
+cleanup:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -3393,6 +3488,15 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
skb_dst_drop(skb);
dst_hold(&md_dst->dst);
skb_dst_set(skb, &md_dst->dst);
+
+ if (macsec->insert_tx_tag) {
+ skb = macsec_insert_tx_tag(skb, dev);
+ if (IS_ERR(skb)) {
+ DEV_STATS_INC(dev, tx_dropped);
+ return NETDEV_TX_OK;
+ }
+ }
+
skb->dev = macsec->real_dev;
return dev_queue_xmit(skb);
}
@@ -3454,10 +3558,7 @@ static int macsec_dev_init(struct net_device *dev)
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
- dev->needed_headroom = real_dev->needed_headroom +
- MACSEC_NEEDED_HEADROOM;
- dev->needed_tailroom = real_dev->needed_tailroom +
- MACSEC_NEEDED_TAILROOM;
+ macsec_set_head_tail_room(dev);
if (is_zero_ether_addr(dev->dev_addr))
eth_hw_addr_inherit(dev, real_dev);
@@ -3604,21 +3705,19 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
struct sockaddr *addr = p;
+ u8 old_addr[ETH_ALEN];
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (!(dev->flags & IFF_UP))
- goto out;
-
- err = dev_uc_add(real_dev, addr->sa_data);
- if (err < 0)
- return err;
-
- dev_uc_del(real_dev, dev->dev_addr);
+ if (dev->flags & IFF_UP) {
+ err = dev_uc_add(real_dev, addr->sa_data);
+ if (err < 0)
+ return err;
+ }
-out:
+ ether_addr_copy(old_addr, dev->dev_addr);
eth_hw_addr_set(dev, addr->sa_data);
/* If h/w offloading is available, propagate to the device */
@@ -3627,13 +3726,29 @@ out:
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
- if (ops) {
- ctx.secy = &macsec->secy;
- macsec_offload(ops->mdo_upd_secy, &ctx);
+ if (!ops) {
+ err = -EOPNOTSUPP;
+ goto restore_old_addr;
}
+
+ ctx.secy = &macsec->secy;
+ err = macsec_offload(ops->mdo_upd_secy, &ctx);
+ if (err)
+ goto restore_old_addr;
}
+ if (dev->flags & IFF_UP)
+ dev_uc_del(real_dev, old_addr);
+
return 0;
+
+restore_old_addr:
+ if (dev->flags & IFF_UP)
+ dev_uc_del(real_dev, addr->sa_data);
+
+ eth_hw_addr_set(dev, old_addr);
+
+ return err;
}
static int macsec_change_mtu(struct net_device *dev, int new_mtu)
@@ -4126,6 +4241,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
err = macsec_offload(ops->mdo_add_secy, &ctx);
if (err)
goto del_dev;
+
+ macsec->insert_tx_tag =
+ macsec_needs_tx_tag(macsec, ops);
}
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index c8da94af4..a3cc66575 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -609,6 +609,7 @@ static const struct header_ops macvlan_hard_header_ops = {
.parse = eth_header_parse,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
+ .parse_protocol = eth_header_parse_protocol,
};
static int macvlan_open(struct net_device *dev)
@@ -1086,20 +1087,8 @@ static int macvlan_ethtool_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct net_device *real_dev = macvlan_dev_real_dev(dev);
- const struct ethtool_ops *ops = real_dev->ethtool_ops;
- struct phy_device *phydev = real_dev->phydev;
- if (phy_has_tsinfo(phydev)) {
- return phy_ts_info(phydev, info);
- } else if (ops->get_ts_info) {
- return ops->get_ts_info(real_dev, info);
- } else {
- info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
- }
-
- return 0;
+ return ethtool_get_ts_info_by_layer(real_dev, info);
}
static netdev_features_t macvlan_fix_features(struct net_device *dev,
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index e8cd8eef3..68f8ee0ec 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -73,24 +73,19 @@ static inline void unimac_mdio_start(struct unimac_mdio_priv *priv)
unimac_mdio_writel(priv, reg, MDIO_CMD);
}
-static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv)
-{
- return unimac_mdio_readl(priv, MDIO_CMD) & MDIO_START_BUSY;
-}
-
static int unimac_mdio_poll(void *wait_func_data)
{
struct unimac_mdio_priv *priv = wait_func_data;
- unsigned int timeout = 1000;
+ u32 val;
- do {
- if (!unimac_mdio_busy(priv))
- return 0;
-
- usleep_range(1000, 2000);
- } while (--timeout);
+ /*
+ * C22 transactions should take ~25 usec, will need to adjust
+ * if C45 support is added.
+ */
+ udelay(30);
- return -ETIMEDOUT;
+ return read_poll_timeout(unimac_mdio_readl, val, !(val & MDIO_START_BUSY),
+ 2000, 100000, false, priv, MDIO_CMD);
}
static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 897b88c50..778db310a 100644
--- a/drivers/net/mdio/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
@@ -123,9 +123,9 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
new_bus->parent = dev;
if (bus_id != -1)
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
+ snprintf(new_bus->id, sizeof(new_bus->id), "gpio-%x", bus_id);
else
- strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
+ strscpy(new_bus->id, "gpio", sizeof(new_bus->id));
if (pdata) {
new_bus->phy_mask = pdata->phy_mask;
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index a750bd4c7..1ce7d67ba 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -2,6 +2,7 @@
/*
* Copyright 2016 Broadcom
*/
+#include <linux/align.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -11,6 +12,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/sizes.h>
#define MDIO_RATE_ADJ_EXT_OFFSET 0x000
#define MDIO_RATE_ADJ_INT_OFFSET 0x004
@@ -220,12 +222,12 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
md->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(md->base))
return PTR_ERR(md->base);
- if (res->start & 0xfff) {
+ if (!IS_ALIGNED(res->start, SZ_4K)) {
/* For backward compatibility in case the
* base address is specified with an offset.
*/
dev_info(&pdev->dev, "fix base address in dt-blob\n");
- res->start &= ~0xfff;
+ res->start = ALIGN_DOWN(res->start, SZ_4K);
res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
}
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index bef4cce71..fe0e46bd7 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -190,8 +190,8 @@ int mdio_mux_init(struct device *dev,
r = of_property_read_u32(child_bus_node, "reg", &v);
if (r) {
dev_err(dev,
- "Error: Failed to find reg for child %pOF\n",
- child_bus_node);
+ "Error: Failed to find reg for child %pOF: %pe\n",
+ child_bus_node, ERR_PTR(r));
continue;
}
@@ -214,8 +214,10 @@ int mdio_mux_init(struct device *dev,
snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x.%x",
cb->mii_bus->name, pb->parent_id, v);
cb->mii_bus->parent = dev;
- cb->mii_bus->read = mdio_mux_read;
- cb->mii_bus->write = mdio_mux_write;
+ if (parent_bus->read)
+ cb->mii_bus->read = mdio_mux_read;
+ if (parent_bus->write)
+ cb->mii_bus->write = mdio_mux_write;
if (parent_bus->read_c45)
cb->mii_bus->read_c45 = mdio_mux_read_c45;
if (parent_bus->write_c45)
@@ -229,8 +231,8 @@ int mdio_mux_init(struct device *dev,
}
devm_kfree(dev, cb);
dev_err(dev,
- "Error: Failed to register MDIO bus for child %pOF\n",
- child_bus_node);
+ "Error: Failed to register MDIO bus for child %pOF: %pe\n",
+ child_bus_node, ERR_PTR(r));
} else {
cb->next = pb->children;
pb->children = cb;
diff --git a/drivers/net/netdevsim/macsec.c b/drivers/net/netdevsim/macsec.c
index 0d5f50430..aa007b1e4 100644
--- a/drivers/net/netdevsim/macsec.c
+++ b/drivers/net/netdevsim/macsec.c
@@ -3,11 +3,6 @@
#include <net/macsec.h>
#include "netdevsim.h"
-static inline u64 sci_to_cpu(sci_t sci)
-{
- return be64_to_cpu((__force __be64)sci);
-}
-
static int nsim_macsec_find_secy(struct netdevsim *ns, sci_t sci)
{
int i;
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index 97139c071..d93f84fbb 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -505,11 +505,9 @@ disable_runtime_pm:
return ret;
}
-static int miic_remove(struct platform_device *pdev)
+static void miic_remove(struct platform_device *pdev)
{
pm_runtime_put(&pdev->dev);
-
- return 0;
}
static const struct of_device_id miic_of_mtable[] = {
@@ -525,7 +523,7 @@ static struct platform_driver miic_driver = {
.of_match_table = miic_of_mtable,
},
.probe = miic_probe,
- .remove = miic_remove,
+ .remove_new = miic_remove,
};
module_platform_driver(miic_driver);
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 31f0beba6..03d6a6aef 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -293,7 +293,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
dev = MDIO_MMD_VEND2;
break;
default:
- return -1;
+ return -EINVAL;
}
ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
@@ -891,7 +891,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return ret;
break;
default:
- return -1;
+ return -EINVAL;
}
if (compat->pma_config) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 421d2b629..9e2672800 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -60,6 +60,14 @@ config FIXED_PHY
Currently tested with mpc866ads and mpc8349e-mitx.
+config RUST_PHYLIB_ABSTRACTIONS
+ bool "Rust PHYLIB abstractions support"
+ depends on RUST
+ depends on PHYLIB=y
+ help
+ Adds support needed for PHY drivers written in Rust. It provides
+ a wrapper around the C phylib core.
+
config SFP
tristate "SFP cage support"
depends on I2C && PHYLINK
@@ -96,10 +104,7 @@ config ADIN1100_PHY
Currently supports the:
- ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY
-config AQUANTIA_PHY
- tristate "Aquantia PHYs"
- help
- Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+source "drivers/net/phy/aquantia/Kconfig"
config AX88796B_PHY
tristate "Asix PHYs"
@@ -107,6 +112,14 @@ config AX88796B_PHY
Currently supports the Asix Electronics PHY found in the X-Surf 100
AX88796B package.
+config AX88796B_RUST_PHY
+ bool "Rust reference driver for Asix PHYs"
+ depends on RUST_PHYLIB_ABSTRACTIONS && AX88796B_PHY
+ help
+ Uses the Rust reference driver for Asix PHYs (ax88796b_rust.ko).
+ The features are equivalent. It supports the Asix Electronics PHY
+ found in the X-Surf 100 AX88796B package.
+
config BROADCOM_PHY
tristate "Broadcom 54XX PHYs"
select BCM_NET_PHYLIB
@@ -304,9 +317,10 @@ config NXP_CBTX_PHY
config NXP_C45_TJA11XX_PHY
tristate "NXP C45 TJA11XX PHYs"
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on MACSEC || !MACSEC
help
Enable support for NXP C45 TJA11XX PHYs.
- Currently supports the TJA1103 and TJA1120 PHYs.
+ Currently supports the TJA1103, TJA1104 and TJA1120 PHYs.
config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
@@ -397,6 +411,19 @@ config DP83TD510_PHY
Support for the DP83TD510 Ethernet 10Base-T1L PHY. This PHY supports
a 10M single pair Ethernet connection for up to 1000 meter cable.
+config DP83TG720_PHY
+ tristate "Texas Instruments DP83TG720 Ethernet 1000Base-T1 PHY"
+ help
+ The DP83TG720S-Q1 is an automotive Ethernet physical layer
+ transceiver compliant with IEEE 802.3bp and Open Alliance
+ standards. It supports key functions necessary for
+ transmitting and receiving data over both unshielded and
+ shielded single twisted-pair cables. This device offers
+ flexible xMII interface options, including support for both
+ RGMII and SGMII MAC interfaces. It's suitable for applications
+ requiring high-speed data transmission in automotive
+ networking environments.
+
config VITESSE_PHY
tristate "Vitesse PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index c945ed9bd..6097afd44 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -35,13 +35,13 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m)
obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AMD_PHY) += amd.o
-aquantia-objs += aquantia_main.o
-ifdef CONFIG_HWMON
-aquantia-objs += aquantia_hwmon.o
-endif
-obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
+obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
obj-$(CONFIG_AT803X_PHY) += at803x.o
-obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
+ifdef CONFIG_AX88796B_RUST_PHY
+ obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
+else
+ obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
+endif
obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
@@ -61,6 +61,7 @@ obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_DP83869_PHY) += dp83869.o
obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_DP83TD510_PHY) += dp83td510.o
+obj-$(CONFIG_DP83TG720_PHY) += dp83tg720.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
@@ -83,7 +84,11 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_NCN26000_PHY) += ncn26000.o
-obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
+nxp-c45-tja-objs += nxp-c45-tja11xx.o
+ifdef CONFIG_MACSEC
+nxp-c45-tja-objs += nxp-c45-tja11xx-macsec.o
+endif
+obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja.o
obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 134637584..2e1a46e12 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -68,6 +68,24 @@
#define ADIN1300_EEE_CAP_REG 0x8000
#define ADIN1300_EEE_ADV_REG 0x8001
#define ADIN1300_EEE_LPABLE_REG 0x8002
+
+#define ADIN1300_FLD_EN_REG 0x8E27
+#define ADIN1300_FLD_PCS_ERR_100_EN BIT(7)
+#define ADIN1300_FLD_PCS_ERR_1000_EN BIT(6)
+#define ADIN1300_FLD_SLCR_OUT_STUCK_100_EN BIT(5)
+#define ADIN1300_FLD_SLCR_OUT_STUCK_1000_EN BIT(4)
+#define ADIN1300_FLD_SLCR_IN_ZDET_100_EN BIT(3)
+#define ADIN1300_FLD_SLCR_IN_ZDET_1000_EN BIT(2)
+#define ADIN1300_FLD_SLCR_IN_INVLD_100_EN BIT(1)
+#define ADIN1300_FLD_SLCR_IN_INVLD_1000_EN BIT(0)
+/* These bits are the ones which are enabled by default. */
+#define ADIN1300_FLD_EN_ON \
+ (ADIN1300_FLD_SLCR_OUT_STUCK_100_EN | \
+ ADIN1300_FLD_SLCR_OUT_STUCK_1000_EN | \
+ ADIN1300_FLD_SLCR_IN_ZDET_100_EN | \
+ ADIN1300_FLD_SLCR_IN_ZDET_1000_EN | \
+ ADIN1300_FLD_SLCR_IN_INVLD_1000_EN)
+
#define ADIN1300_CLOCK_STOP_REG 0x9400
#define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000
@@ -416,6 +434,37 @@ static int adin_set_edpd(struct phy_device *phydev, u16 tx_interval)
val);
}
+static int adin_get_fast_down(struct phy_device *phydev, u8 *msecs)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_FLD_EN_REG);
+ if (reg < 0)
+ return reg;
+
+ if (reg & ADIN1300_FLD_EN_ON)
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_ON;
+ else
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_OFF;
+
+ return 0;
+}
+
+static int adin_set_fast_down(struct phy_device *phydev, const u8 *msecs)
+{
+ if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_ON)
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_FLD_EN_REG,
+ ADIN1300_FLD_EN_ON);
+
+ if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_FLD_EN_REG,
+ ADIN1300_FLD_EN_ON);
+
+ return -EINVAL;
+}
+
static int adin_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data)
{
@@ -424,6 +473,8 @@ static int adin_get_tunable(struct phy_device *phydev,
return adin_get_downshift(phydev, data);
case ETHTOOL_PHY_EDPD:
return adin_get_edpd(phydev, data);
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return adin_get_fast_down(phydev, data);
default:
return -EOPNOTSUPP;
}
@@ -437,6 +488,8 @@ static int adin_set_tunable(struct phy_device *phydev,
return adin_set_downshift(phydev, *(const u8 *)data);
case ETHTOOL_PHY_EDPD:
return adin_set_edpd(phydev, *(const u16 *)data);
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return adin_set_fast_down(phydev, data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/phy/aquantia.h b/drivers/net/phy/aquantia.h
deleted file mode 100644
index c684b65c6..000000000
--- a/drivers/net/phy/aquantia.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* HWMON driver for Aquantia PHY
- *
- * Author: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
- * Author: Andrew Lunn <andrew@lunn.ch>
- * Author: Heiner Kallweit <hkallweit1@gmail.com>
- */
-
-#include <linux/device.h>
-#include <linux/phy.h>
-
-#if IS_REACHABLE(CONFIG_HWMON)
-int aqr_hwmon_probe(struct phy_device *phydev);
-#else
-static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
-#endif
diff --git a/drivers/net/phy/aquantia/Kconfig b/drivers/net/phy/aquantia/Kconfig
new file mode 100644
index 000000000..1a6567858
--- /dev/null
+++ b/drivers/net/phy/aquantia/Kconfig
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config AQUANTIA_PHY
+ tristate "Aquantia PHYs"
+ select CRC_ITU_T
+ help
+ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
diff --git a/drivers/net/phy/aquantia/Makefile b/drivers/net/phy/aquantia/Makefile
new file mode 100644
index 000000000..aa77fb63c
--- /dev/null
+++ b/drivers/net/phy/aquantia/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+aquantia-objs += aquantia_main.o aquantia_firmware.o
+ifdef CONFIG_HWMON
+aquantia-objs += aquantia_hwmon.o
+endif
+obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h
new file mode 100644
index 000000000..1c19ae74a
--- /dev/null
+++ b/drivers/net/phy/aquantia/aquantia.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* HWMON driver for Aquantia PHY
+ *
+ * Author: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
+ * Author: Andrew Lunn <andrew@lunn.ch>
+ * Author: Heiner Kallweit <hkallweit1@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/phy.h>
+
+/* Vendor specific 1, MDIO_MMD_VEND1 */
+#define VEND1_GLOBAL_SC 0x0
+#define VEND1_GLOBAL_SC_SOFT_RESET BIT(15)
+#define VEND1_GLOBAL_SC_LOW_POWER BIT(11)
+
+#define VEND1_GLOBAL_FW_ID 0x0020
+#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
+#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1 0x0200
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_EXECUTE BIT(15)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_WRITE BIT(14)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_CRC_RESET BIT(12)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_BUSY BIT(8)
+
+#define VEND1_GLOBAL_MAILBOX_INTERFACE2 0x0201
+#define VEND1_GLOBAL_MAILBOX_INTERFACE3 0x0202
+#define VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR_MASK GENMASK(15, 0)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR(x) FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR_MASK, (u16)((x) >> 16))
+#define VEND1_GLOBAL_MAILBOX_INTERFACE4 0x0203
+#define VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR_MASK GENMASK(15, 2)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR(x) FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR_MASK, (u16)(x))
+
+#define VEND1_GLOBAL_MAILBOX_INTERFACE5 0x0204
+#define VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA_MASK GENMASK(15, 0)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA(x) FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA_MASK, (u16)((x) >> 16))
+#define VEND1_GLOBAL_MAILBOX_INTERFACE6 0x0205
+#define VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA_MASK GENMASK(15, 0)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA(x) FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA_MASK, (u16)(x))
+
+/* The following registers all have similar layouts; first the registers... */
+#define VEND1_GLOBAL_CFG_10M 0x0310
+#define VEND1_GLOBAL_CFG_100M 0x031b
+#define VEND1_GLOBAL_CFG_1G 0x031c
+#define VEND1_GLOBAL_CFG_2_5G 0x031d
+#define VEND1_GLOBAL_CFG_5G 0x031e
+#define VEND1_GLOBAL_CFG_10G 0x031f
+/* ...and now the fields */
+#define VEND1_GLOBAL_CFG_SERDES_MODE GENMASK(2, 0)
+#define VEND1_GLOBAL_CFG_SERDES_MODE_XFI 0
+#define VEND1_GLOBAL_CFG_SERDES_MODE_SGMII 3
+#define VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII 4
+#define VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G 6
+#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
+
+/* Vendor specific 1, MDIO_MMD_VEND2 */
+#define VEND1_GLOBAL_CONTROL2 0xc001
+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_RST BIT(15)
+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD BIT(6)
+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL BIT(0)
+
+#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421
+#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422
+#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423
+#define VEND1_THERMAL_PROV_LOW_TEMP_WARN 0xc424
+#define VEND1_THERMAL_STAT1 0xc820
+#define VEND1_THERMAL_STAT2 0xc821
+#define VEND1_THERMAL_STAT2_VALID BIT(0)
+#define VEND1_GENERAL_STAT1 0xc830
+#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL BIT(14)
+#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL BIT(13)
+#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN BIT(12)
+#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN BIT(11)
+
+#define VEND1_GLOBAL_GEN_STAT2 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+
+#define VEND1_GLOBAL_RSVD_STAT1 0xc885
+#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
+#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
+
+#define VEND1_GLOBAL_RSVD_STAT9 0xc88d
+#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
+#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
+
+#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
+#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
+
+#define VEND1_GLOBAL_INT_STD_MASK 0xff00
+#define VEND1_GLOBAL_INT_STD_MASK_PMA1 BIT(15)
+#define VEND1_GLOBAL_INT_STD_MASK_PMA2 BIT(14)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS1 BIT(13)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS2 BIT(12)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS3 BIT(11)
+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1 BIT(10)
+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2 BIT(9)
+#define VEND1_GLOBAL_INT_STD_MASK_AN1 BIT(8)
+#define VEND1_GLOBAL_INT_STD_MASK_AN2 BIT(7)
+#define VEND1_GLOBAL_INT_STD_MASK_GBE BIT(6)
+#define VEND1_GLOBAL_INT_STD_MASK_ALL BIT(0)
+
+#define VEND1_GLOBAL_INT_VEND_MASK 0xff01
+#define VEND1_GLOBAL_INT_VEND_MASK_PMA BIT(15)
+#define VEND1_GLOBAL_INT_VEND_MASK_PCS BIT(14)
+#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS BIT(13)
+#define VEND1_GLOBAL_INT_VEND_MASK_AN BIT(12)
+#define VEND1_GLOBAL_INT_VEND_MASK_GBE BIT(11)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1 BIT(2)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+
+#if IS_REACHABLE(CONFIG_HWMON)
+int aqr_hwmon_probe(struct phy_device *phydev);
+#else
+static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
+#endif
+
+int aqr_firmware_load(struct phy_device *phydev);
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
new file mode 100644
index 000000000..0c9640ef1
--- /dev/null
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/of.h>
+#include <linux/firmware.h>
+#include <linux/crc-itu-t.h>
+#include <linux/nvmem-consumer.h>
+
+#include <asm/unaligned.h>
+
+#include "aquantia.h"
+
+#define UP_RESET_SLEEP 100
+
+/* addresses of memory segments in the phy */
+#define DRAM_BASE_ADDR 0x3FFE0000
+#define IRAM_BASE_ADDR 0x40000000
+
+/* firmware image format constants */
+#define VERSION_STRING_SIZE 0x40
+#define VERSION_STRING_OFFSET 0x0200
+/* primary offset is written at an offset from the start of the fw blob */
+#define PRIMARY_OFFSET_OFFSET 0x8
+/* primary offset needs to be then added to a base offset */
+#define PRIMARY_OFFSET_SHIFT 12
+#define PRIMARY_OFFSET(x) ((x) << PRIMARY_OFFSET_SHIFT)
+#define HEADER_OFFSET 0x300
+
+struct aqr_fw_header {
+ u32 padding;
+ u8 iram_offset[3];
+ u8 iram_size[3];
+ u8 dram_offset[3];
+ u8 dram_size[3];
+} __packed;
+
+enum aqr_fw_src {
+ AQR_FW_SRC_NVMEM = 0,
+ AQR_FW_SRC_FS,
+};
+
+static const char * const aqr_fw_src_string[] = {
+ [AQR_FW_SRC_NVMEM] = "NVMEM",
+ [AQR_FW_SRC_FS] = "FS",
+};
+
+/* AQR firmware doesn't have fixed offsets for iram and dram section
+ * but instead provide an header with the offset to use on reading
+ * and parsing the firmware.
+ *
+ * AQR firmware can't be trusted and each offset is validated to be
+ * not negative and be in the size of the firmware itself.
+ */
+static bool aqr_fw_validate_get(size_t size, size_t offset, size_t get_size)
+{
+ return offset + get_size <= size;
+}
+
+static int aqr_fw_get_be16(const u8 *data, size_t offset, size_t size, u16 *value)
+{
+ if (!aqr_fw_validate_get(size, offset, sizeof(u16)))
+ return -EINVAL;
+
+ *value = get_unaligned_be16(data + offset);
+
+ return 0;
+}
+
+static int aqr_fw_get_le16(const u8 *data, size_t offset, size_t size, u16 *value)
+{
+ if (!aqr_fw_validate_get(size, offset, sizeof(u16)))
+ return -EINVAL;
+
+ *value = get_unaligned_le16(data + offset);
+
+ return 0;
+}
+
+static int aqr_fw_get_le24(const u8 *data, size_t offset, size_t size, u32 *value)
+{
+ if (!aqr_fw_validate_get(size, offset, sizeof(u8) * 3))
+ return -EINVAL;
+
+ *value = get_unaligned_le24(data + offset);
+
+ return 0;
+}
+
+/* load data into the phy's memory */
+static int aqr_fw_load_memory(struct phy_device *phydev, u32 addr,
+ const u8 *data, size_t len)
+{
+ u16 crc = 0, up_crc;
+ size_t pos;
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE1_CRC_RESET);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE3,
+ VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR(addr));
+ phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE4,
+ VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR(addr));
+
+ /* We assume and enforce the size to be word aligned.
+ * If a firmware that is not word aligned is found, please report upstream.
+ */
+ for (pos = 0; pos < len; pos += sizeof(u32)) {
+ u8 crc_data[4];
+ u32 word;
+
+ /* FW data is always stored in little-endian */
+ word = get_unaligned_le32((const u32 *)(data + pos));
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE5,
+ VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA(word));
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE6,
+ VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA(word));
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE1_EXECUTE |
+ VEND1_GLOBAL_MAILBOX_INTERFACE1_WRITE);
+
+ /* Word is swapped internally and MAILBOX CRC is calculated
+ * using big-endian order. Mimic what the PHY does to have a
+ * matching CRC...
+ */
+ crc_data[0] = word >> 24;
+ crc_data[1] = word >> 16;
+ crc_data[2] = word >> 8;
+ crc_data[3] = word;
+
+ /* ...calculate CRC as we load data... */
+ crc = crc_itu_t(crc, crc_data, sizeof(crc_data));
+ }
+ /* ...gets CRC from MAILBOX after we have loaded the entire section... */
+ up_crc = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE2);
+ /* ...and make sure it does match our calculated CRC */
+ if (crc != up_crc) {
+ phydev_err(phydev, "CRC mismatch: calculated 0x%04x PHY 0x%04x\n",
+ crc, up_crc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aqr_fw_boot(struct phy_device *phydev, const u8 *data, size_t size,
+ enum aqr_fw_src fw_src)
+{
+ u16 calculated_crc, read_crc, read_primary_offset;
+ u32 iram_offset = 0, iram_size = 0;
+ u32 dram_offset = 0, dram_size = 0;
+ char version[VERSION_STRING_SIZE];
+ u32 primary_offset = 0;
+ int ret;
+
+ /* extract saved CRC at the end of the fw
+ * CRC is saved in big-endian as PHY is BE
+ */
+ ret = aqr_fw_get_be16(data, size - sizeof(u16), size, &read_crc);
+ if (ret) {
+ phydev_err(phydev, "bad firmware CRC in firmware\n");
+ return ret;
+ }
+ calculated_crc = crc_itu_t(0, data, size - sizeof(u16));
+ if (read_crc != calculated_crc) {
+ phydev_err(phydev, "bad firmware CRC: file 0x%04x calculated 0x%04x\n",
+ read_crc, calculated_crc);
+ return -EINVAL;
+ }
+
+ /* Get the primary offset to extract DRAM and IRAM sections. */
+ ret = aqr_fw_get_le16(data, PRIMARY_OFFSET_OFFSET, size, &read_primary_offset);
+ if (ret) {
+ phydev_err(phydev, "bad primary offset in firmware\n");
+ return ret;
+ }
+ primary_offset = PRIMARY_OFFSET(read_primary_offset);
+
+ /* Find the DRAM and IRAM sections within the firmware file.
+ * Make sure the fw_header is correctly in the firmware.
+ */
+ if (!aqr_fw_validate_get(size, primary_offset + HEADER_OFFSET,
+ sizeof(struct aqr_fw_header))) {
+ phydev_err(phydev, "bad fw_header in firmware\n");
+ return -EINVAL;
+ }
+
+ /* offset are in LE and values needs to be converted to cpu endian */
+ ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
+ offsetof(struct aqr_fw_header, iram_offset),
+ size, &iram_offset);
+ if (ret) {
+ phydev_err(phydev, "bad iram offset in firmware\n");
+ return ret;
+ }
+ ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
+ offsetof(struct aqr_fw_header, iram_size),
+ size, &iram_size);
+ if (ret) {
+ phydev_err(phydev, "invalid iram size in firmware\n");
+ return ret;
+ }
+ ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
+ offsetof(struct aqr_fw_header, dram_offset),
+ size, &dram_offset);
+ if (ret) {
+ phydev_err(phydev, "bad dram offset in firmware\n");
+ return ret;
+ }
+ ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
+ offsetof(struct aqr_fw_header, dram_size),
+ size, &dram_size);
+ if (ret) {
+ phydev_err(phydev, "invalid dram size in firmware\n");
+ return ret;
+ }
+
+ /* Increment the offset with the primary offset.
+ * Validate iram/dram offset and size.
+ */
+ iram_offset += primary_offset;
+ if (iram_size % sizeof(u32)) {
+ phydev_err(phydev, "iram size if not aligned to word size. Please report this upstream!\n");
+ return -EINVAL;
+ }
+ if (!aqr_fw_validate_get(size, iram_offset, iram_size)) {
+ phydev_err(phydev, "invalid iram offset for iram size\n");
+ return -EINVAL;
+ }
+
+ dram_offset += primary_offset;
+ if (dram_size % sizeof(u32)) {
+ phydev_err(phydev, "dram size if not aligned to word size. Please report this upstream!\n");
+ return -EINVAL;
+ }
+ if (!aqr_fw_validate_get(size, dram_offset, dram_size)) {
+ phydev_err(phydev, "invalid iram offset for iram size\n");
+ return -EINVAL;
+ }
+
+ phydev_dbg(phydev, "primary %d IRAM offset=%d size=%d DRAM offset=%d size=%d\n",
+ primary_offset, iram_offset, iram_size, dram_offset, dram_size);
+
+ if (!aqr_fw_validate_get(size, dram_offset + VERSION_STRING_OFFSET,
+ VERSION_STRING_SIZE)) {
+ phydev_err(phydev, "invalid version in firmware\n");
+ return -EINVAL;
+ }
+ strscpy(version, (char *)data + dram_offset + VERSION_STRING_OFFSET,
+ VERSION_STRING_SIZE);
+ if (version[0] == '\0') {
+ phydev_err(phydev, "invalid version in firmware\n");
+ return -EINVAL;
+ }
+ phydev_info(phydev, "loading firmware version '%s' from '%s'\n", version,
+ aqr_fw_src_string[fw_src]);
+
+ /* stall the microcprocessor */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL | VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD);
+
+ phydev_dbg(phydev, "loading DRAM 0x%08x from offset=%d size=%d\n",
+ DRAM_BASE_ADDR, dram_offset, dram_size);
+ ret = aqr_fw_load_memory(phydev, DRAM_BASE_ADDR, data + dram_offset,
+ dram_size);
+ if (ret)
+ return ret;
+
+ phydev_dbg(phydev, "loading IRAM 0x%08x from offset=%d size=%d\n",
+ IRAM_BASE_ADDR, iram_offset, iram_size);
+ ret = aqr_fw_load_memory(phydev, IRAM_BASE_ADDR, data + iram_offset,
+ iram_size);
+ if (ret)
+ return ret;
+
+ /* make sure soft reset and low power mode are clear */
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_SC,
+ VEND1_GLOBAL_SC_SOFT_RESET | VEND1_GLOBAL_SC_LOW_POWER);
+
+ /* Release the microprocessor. UP_RESET must be held for 100 usec. */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL |
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD |
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_RST);
+ usleep_range(UP_RESET_SLEEP, UP_RESET_SLEEP * 2);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD);
+
+ return 0;
+}
+
+static int aqr_firmware_load_nvmem(struct phy_device *phydev)
+{
+ struct nvmem_cell *cell;
+ size_t size;
+ u8 *buf;
+ int ret;
+
+ cell = nvmem_cell_get(&phydev->mdio.dev, "firmware");
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &size);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto exit;
+ }
+
+ ret = aqr_fw_boot(phydev, buf, size, AQR_FW_SRC_NVMEM);
+ if (ret)
+ phydev_err(phydev, "firmware loading failed: %d\n", ret);
+
+ kfree(buf);
+exit:
+ nvmem_cell_put(cell);
+
+ return ret;
+}
+
+static int aqr_firmware_load_fs(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw;
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(dev->of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ret;
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret) {
+ phydev_err(phydev, "failed to find FW file %s (%d)\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = aqr_fw_boot(phydev, fw->data, fw->size, AQR_FW_SRC_FS);
+ if (ret)
+ phydev_err(phydev, "firmware loading failed: %d\n", ret);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int aqr_firmware_load(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Check if the firmware is not already loaded by pooling
+ * the current version returned by the PHY. If 0 is returned,
+ * no firmware is loaded.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+ if (ret > 0)
+ goto exit;
+
+ ret = aqr_firmware_load_nvmem(phydev);
+ if (!ret)
+ goto exit;
+
+ ret = aqr_firmware_load_fs(phydev);
+ if (ret)
+ return ret;
+
+exit:
+ return 0;
+}
diff --git a/drivers/net/phy/aquantia_hwmon.c b/drivers/net/phy/aquantia/aquantia_hwmon.c
index 0da451e46..7b3c49c3b 100644
--- a/drivers/net/phy/aquantia_hwmon.c
+++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
@@ -13,20 +13,6 @@
#include "aquantia.h"
-/* Vendor specific 1, MDIO_MMD_VEND2 */
-#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421
-#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422
-#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423
-#define VEND1_THERMAL_PROV_LOW_TEMP_WARN 0xc424
-#define VEND1_THERMAL_STAT1 0xc820
-#define VEND1_THERMAL_STAT2 0xc821
-#define VEND1_THERMAL_STAT2_VALID BIT(0)
-#define VEND1_GENERAL_STAT1 0xc830
-#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL BIT(14)
-#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL BIT(13)
-#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN BIT(12)
-#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN BIT(11)
-
#if IS_REACHABLE(CONFIG_HWMON)
static umode_t aqr_hwmon_is_visible(const void *data,
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 334a6904c..97a2fafa1 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -91,61 +91,6 @@
#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
-/* Vendor specific 1, MDIO_MMD_VEND1 */
-#define VEND1_GLOBAL_FW_ID 0x0020
-#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
-#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
-
-#define VEND1_GLOBAL_GEN_STAT2 0xc831
-#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
-
-/* The following registers all have similar layouts; first the registers... */
-#define VEND1_GLOBAL_CFG_10M 0x0310
-#define VEND1_GLOBAL_CFG_100M 0x031b
-#define VEND1_GLOBAL_CFG_1G 0x031c
-#define VEND1_GLOBAL_CFG_2_5G 0x031d
-#define VEND1_GLOBAL_CFG_5G 0x031e
-#define VEND1_GLOBAL_CFG_10G 0x031f
-/* ...and now the fields */
-#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
-#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
-#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
-#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
-
-#define VEND1_GLOBAL_RSVD_STAT1 0xc885
-#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
-#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
-
-#define VEND1_GLOBAL_RSVD_STAT9 0xc88d
-#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
-#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
-
-#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
-#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
-
-#define VEND1_GLOBAL_INT_STD_MASK 0xff00
-#define VEND1_GLOBAL_INT_STD_MASK_PMA1 BIT(15)
-#define VEND1_GLOBAL_INT_STD_MASK_PMA2 BIT(14)
-#define VEND1_GLOBAL_INT_STD_MASK_PCS1 BIT(13)
-#define VEND1_GLOBAL_INT_STD_MASK_PCS2 BIT(12)
-#define VEND1_GLOBAL_INT_STD_MASK_PCS3 BIT(11)
-#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1 BIT(10)
-#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2 BIT(9)
-#define VEND1_GLOBAL_INT_STD_MASK_AN1 BIT(8)
-#define VEND1_GLOBAL_INT_STD_MASK_AN2 BIT(7)
-#define VEND1_GLOBAL_INT_STD_MASK_GBE BIT(6)
-#define VEND1_GLOBAL_INT_STD_MASK_ALL BIT(0)
-
-#define VEND1_GLOBAL_INT_VEND_MASK 0xff01
-#define VEND1_GLOBAL_INT_VEND_MASK_PMA BIT(15)
-#define VEND1_GLOBAL_INT_VEND_MASK_PCS BIT(14)
-#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS BIT(13)
-#define VEND1_GLOBAL_INT_VEND_MASK_AN BIT(12)
-#define VEND1_GLOBAL_INT_VEND_MASK_GBE BIT(11)
-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1 BIT(2)
-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
-
/* Sleep and timeout for checking if the Processor-Intensive
* MDIO operation is finished
*/
@@ -711,13 +656,93 @@ static int aqr107_resume(struct phy_device *phydev)
return aqr107_wait_processor_intensive_op(phydev);
}
+static const u16 aqr_global_cfg_regs[] = {
+ VEND1_GLOBAL_CFG_10M,
+ VEND1_GLOBAL_CFG_100M,
+ VEND1_GLOBAL_CFG_1G,
+ VEND1_GLOBAL_CFG_2_5G,
+ VEND1_GLOBAL_CFG_5G,
+ VEND1_GLOBAL_CFG_10G
+};
+
+static int aqr107_fill_interface_modes(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+ unsigned int serdes_mode, rate_adapt;
+ phy_interface_t interface;
+ int i, val;
+
+ /* Walk the media-speed configuration registers to determine which
+ * host-side serdes modes may be used by the PHY depending on the
+ * negotiated media speed.
+ */
+ for (i = 0; i < ARRAY_SIZE(aqr_global_cfg_regs); i++) {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ aqr_global_cfg_regs[i]);
+ if (val < 0)
+ return val;
+
+ serdes_mode = FIELD_GET(VEND1_GLOBAL_CFG_SERDES_MODE, val);
+ rate_adapt = FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val);
+
+ switch (serdes_mode) {
+ case VEND1_GLOBAL_CFG_SERDES_MODE_XFI:
+ if (rate_adapt == VEND1_GLOBAL_CFG_RATE_ADAPT_USX)
+ interface = PHY_INTERFACE_MODE_USXGMII;
+ else
+ interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G:
+ interface = PHY_INTERFACE_MODE_5GBASER;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII:
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_SGMII:
+ interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+
+ default:
+ phydev_warn(phydev, "unrecognised serdes mode %u\n",
+ serdes_mode);
+ interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ if (interface != PHY_INTERFACE_MODE_NA)
+ __set_bit(interface, possible);
+ }
+
+ return 0;
+}
+
+static int aqr113c_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = aqr107_config_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ return aqr107_fill_interface_modes(phydev);
+}
+
static int aqr107_probe(struct phy_device *phydev)
{
+ int ret;
+
phydev->priv = devm_kzalloc(&phydev->mdio.dev,
sizeof(struct aqr107_priv), GFP_KERNEL);
if (!phydev->priv)
return -ENOMEM;
+ ret = aqr_firmware_load(phydev);
+ if (ret)
+ return ret;
+
return aqr_hwmon_probe(phydev);
}
@@ -843,7 +868,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
.get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .config_init = aqr113c_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ef203b080..fdb89a505 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -254,6 +254,7 @@
#define QCA808X_CDT_ENABLE_TEST BIT(15)
#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
+#define QCA808X_CDT_STATUS BIT(11)
#define QCA808X_CDT_LENGTH_UNIT BIT(10)
#define QCA808X_MMD3_CDT_STATUS 0x8064
@@ -261,16 +262,44 @@
#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
-#define QCA808X_CDT_DIAG_LENGTH GENMASK(7, 0)
+#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
+#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
-#define QCA808X_CDT_STATUS_STAT_FAIL 0
-#define QCA808X_CDT_STATUS_STAT_NORMAL 1
-#define QCA808X_CDT_STATUS_STAT_OPEN 2
-#define QCA808X_CDT_STATUS_STAT_SHORT 3
+
+#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
+#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+
+#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+
+/* NORMAL are MDI with type set to 0 */
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+
+/* Added for reference of existence but should be handled by wait_for_completion already */
+#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
/* QCA808X 1G chip type */
#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
@@ -295,12 +324,17 @@ struct at803x_hw_stat {
enum stat_access_type access_type;
};
-static struct at803x_hw_stat at803x_hw_stats[] = {
+static struct at803x_hw_stat qca83xx_hw_stats[] = {
{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
};
+struct at803x_ss_mask {
+ u16 speed_mask;
+ u8 speed_shift;
+};
+
struct at803x_priv {
int flags;
u16 clk_25m_reg;
@@ -311,7 +345,7 @@ struct at803x_priv {
bool is_1000basex;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
- u64 stats[ARRAY_SIZE(at803x_hw_stats)];
+ u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
};
struct at803x_context {
@@ -457,7 +491,7 @@ static int at803x_set_wol(struct phy_device *phydev,
if (!ndev)
return -ENODEV;
- mac = (const u8 *) ndev->dev_addr;
+ mac = (const u8 *)ndev->dev_addr;
if (!is_valid_ether_addr(mac))
return -EINVAL;
@@ -466,27 +500,11 @@ static int at803x_set_wol(struct phy_device *phydev,
phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
- /* Enable WOL function for 1588 */
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- 0, AT803X_WOL_EN);
- if (ret)
- return ret;
- }
/* Enable WOL interrupt */
ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
if (ret)
return ret;
} else {
- /* Disable WoL function for 1588 */
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
- if (ret)
- return ret;
- }
/* Disable WOL interrupt */
ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
if (ret)
@@ -529,24 +547,24 @@ static void at803x_get_wol(struct phy_device *phydev,
wol->wolopts |= WAKE_MAGIC;
}
-static int at803x_get_sset_count(struct phy_device *phydev)
+static int qca83xx_get_sset_count(struct phy_device *phydev)
{
- return ARRAY_SIZE(at803x_hw_stats);
+ return ARRAY_SIZE(qca83xx_hw_stats);
}
-static void at803x_get_strings(struct phy_device *phydev, u8 *data)
+static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
{
int i;
- for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
strscpy(data + i * ETH_GSTRING_LEN,
- at803x_hw_stats[i].string, ETH_GSTRING_LEN);
+ qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
-static u64 at803x_get_stat(struct phy_device *phydev, int i)
+static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
{
- struct at803x_hw_stat stat = at803x_hw_stats[i];
+ struct at803x_hw_stat stat = qca83xx_hw_stats[i];
struct at803x_priv *priv = phydev->priv;
int val;
u64 ret;
@@ -567,13 +585,13 @@ static u64 at803x_get_stat(struct phy_device *phydev, int i)
return ret;
}
-static void at803x_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
+static void qca83xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
{
int i;
- for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++)
- data[i] = at803x_get_stat(phydev, i);
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+ data[i] = qca83xx_get_stat(phydev, i);
}
static int at803x_suspend(struct phy_device *phydev)
@@ -599,139 +617,6 @@ static int at803x_resume(struct phy_device *phydev)
return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
-static int at803x_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
-
- if (selector)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_RGMII_1V8);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_RGMII_1V8, 0);
-}
-
-static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
- int val;
-
- val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
- if (val < 0)
- return val;
-
- return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
-}
-
-static const struct regulator_ops vddio_regulator_ops = {
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
- .get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
-};
-
-static const unsigned int vddio_voltage_table[] = {
- 1500000,
- 1800000,
-};
-
-static const struct regulator_desc vddio_desc = {
- .name = "vddio",
- .of_match = of_match_ptr("vddio-regulator"),
- .n_voltages = ARRAY_SIZE(vddio_voltage_table),
- .volt_table = vddio_voltage_table,
- .ops = &vddio_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static const struct regulator_ops vddh_regulator_ops = {
-};
-
-static const struct regulator_desc vddh_desc = {
- .name = "vddh",
- .of_match = of_match_ptr("vddh-regulator"),
- .n_voltages = 1,
- .fixed_uV = 2500000,
- .ops = &vddh_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static int at8031_register_regulators(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct regulator_config config = { };
-
- config.dev = dev;
- config.driver_data = phydev;
-
- priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
- if (IS_ERR(priv->vddio_rdev)) {
- phydev_err(phydev, "failed to register VDDIO regulator\n");
- return PTR_ERR(priv->vddio_rdev);
- }
-
- priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
- if (IS_ERR(priv->vddh_rdev)) {
- phydev_err(phydev, "failed to register VDDH regulator\n");
- return PTR_ERR(priv->vddh_rdev);
- }
-
- return 0;
-}
-
-static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
-{
- struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- phy_interface_t iface;
-
- linkmode_zero(phy_support);
- phylink_set(phy_support, 1000baseX_Full);
- phylink_set(phy_support, 1000baseT_Full);
- phylink_set(phy_support, Autoneg);
- phylink_set(phy_support, Pause);
- phylink_set(phy_support, Asym_Pause);
-
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
- /* Some modules support 10G modes as well as others we support.
- * Mask out non-supported modes so the correct interface is picked.
- */
- linkmode_and(sfp_support, phy_support, sfp_support);
-
- if (linkmode_empty(sfp_support)) {
- dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
- return -EINVAL;
- }
-
- iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
-
- /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
- * interface for use with SFP modules.
- * However, some copper modules detected as having a preferred SGMII
- * interface do default to and function in 1000Base-X mode, so just
- * print a warning and allow such modules, as they may have some chance
- * of working.
- */
- if (iface == PHY_INTERFACE_MODE_SGMII)
- dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
- else if (iface != PHY_INTERFACE_MODE_1000BASEX)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct sfp_upstream_ops at803x_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .module_insert = at803x_sfp_insert,
-};
-
static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -787,23 +672,6 @@ static int at803x_parse_dt(struct phy_device *phydev)
priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
-
- /* Fixup for the AR8030/AR8035. This chip has another mask and
- * doesn't support the DSP reference. Eg. the lowest bit of the
- * mask. The upper two bits select the same frequencies. Mask
- * the lowest bit here.
- *
- * Warning:
- * There was no datasheet for the AR8030 available so this is
- * just a guess. But the AR8035 is listed as pin compatible
- * to the AR8030 so there might be a good chance it works on
- * the AR8030 too.
- */
- if (phydev->drv->phy_id == ATH8030_PHY_ID ||
- phydev->drv->phy_id == ATH8035_PHY_ID) {
- priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
- priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
- }
}
ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
@@ -825,30 +693,6 @@ static int at803x_parse_dt(struct phy_device *phydev)
}
}
- /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
- * options.
- */
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- if (of_property_read_bool(node, "qca,keep-pll-enabled"))
- priv->flags |= AT803X_KEEP_PLL_ENABLED;
-
- ret = at8031_register_regulators(phydev);
- if (ret < 0)
- return ret;
-
- ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
- "vddio");
- if (ret) {
- phydev_err(phydev, "failed to get VDDIO regulator\n");
- return ret;
- }
-
- /* Only AR8031/8033 support 1000Base-X for SFP modules */
- ret = phy_sfp_probe(phydev, &at803x_sfp_ops);
- if (ret < 0)
- return ret;
- }
-
return 0;
}
@@ -868,35 +712,6 @@ static int at803x_probe(struct phy_device *phydev)
if (ret)
return ret;
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- int mode_cfg;
-
- if (ccr < 0)
- return ccr;
- mode_cfg = ccr & AT803X_MODE_CFG_MASK;
-
- switch (mode_cfg) {
- case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
- case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
- priv->is_1000basex = true;
- fallthrough;
- case AT803X_MODE_CFG_FX100_RGMII_50OHM:
- case AT803X_MODE_CFG_FX100_RGMII_75OHM:
- priv->is_fiber = true;
- break;
- }
-
- /* Disable WoL in 1588 register which is enabled
- * by default
- */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -1004,27 +819,8 @@ static int at803x_hibernation_mode_config(struct phy_device *phydev)
static int at803x_config_init(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
int ret;
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- /* Some bootloaders leave the fiber page selected.
- * Switch to the appropriate page (fiber or copper), as otherwise we
- * read the PHY capabilities from the wrong page.
- */
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev,
- priv->is_fiber ? AT803X_PAGE_FIBER :
- AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
- return ret;
-
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
- }
-
/* The RX and TX delay default is:
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
@@ -1078,7 +874,6 @@ static int at803x_ack_interrupt(struct phy_device *phydev)
static int at803x_config_intr(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
int err;
int value;
@@ -1095,10 +890,6 @@ static int at803x_config_intr(struct phy_device *phydev)
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
value |= AT803X_INTR_ENABLE_LINK_FAIL;
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
- if (priv->is_fiber) {
- value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
- }
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
} else {
@@ -1154,9 +945,9 @@ static void at803x_link_change_notify(struct phy_device *phydev)
at803x_context_save(phydev, &context);
phy_device_reset(phydev, 1);
- msleep(1);
+ usleep_range(1000, 2000);
phy_device_reset(phydev, 0);
- msleep(1);
+ usleep_range(1000, 2000);
at803x_context_restore(phydev, &context);
@@ -1164,7 +955,8 @@ static void at803x_link_change_notify(struct phy_device *phydev)
}
}
-static int at803x_read_specific_status(struct phy_device *phydev)
+static int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask)
{
int ss;
@@ -1183,11 +975,8 @@ static int at803x_read_specific_status(struct phy_device *phydev)
if (sfc < 0)
return sfc;
- /* qca8081 takes the different bits for speed value from at803x */
- if (phydev->drv->phy_id == QCA8081_PHY_ID)
- speed = FIELD_GET(QCA808X_SS_SPEED_MASK, ss);
- else
- speed = FIELD_GET(AT803X_SS_SPEED_MASK, ss);
+ speed = ss & ss_mask.speed_mask;
+ speed >>= ss_mask.speed_shift;
switch (speed) {
case AT803X_SS_SPEED_10:
@@ -1231,12 +1020,9 @@ static int at803x_read_specific_status(struct phy_device *phydev)
static int at803x_read_status(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
+ struct at803x_ss_mask ss_mask = { 0 };
int err, old_link = phydev->link;
- if (priv->is_1000basex)
- return genphy_c37_read_status(phydev);
-
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
@@ -1255,7 +1041,9 @@ static int at803x_read_status(struct phy_device *phydev)
if (err < 0)
return err;
- err = at803x_read_specific_status(phydev);
+ ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+ err = at803x_read_specific_status(phydev, ss_mask);
if (err < 0)
return err;
@@ -1288,9 +1076,8 @@ static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
}
-static int at803x_config_aneg(struct phy_device *phydev)
+static int at803x_prepare_config_aneg(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
int ret;
ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
@@ -1307,33 +1094,22 @@ static int at803x_config_aneg(struct phy_device *phydev)
return ret;
}
- if (priv->is_1000basex)
- return genphy_c37_config_aneg(phydev);
-
- /* Do not restart auto-negotiation by setting ret to 0 defautly,
- * when calling __genphy_config_aneg later.
- */
- ret = 0;
-
- if (phydev->drv->phy_id == QCA8081_PHY_ID) {
- int phy_ctrl = 0;
+ return 0;
+}
- /* The reg MII_BMCR also needs to be configured for force mode, the
- * genphy_config_aneg is also needed.
- */
- if (phydev->autoneg == AUTONEG_DISABLE)
- genphy_c45_pma_setup_forced(phydev);
+static int at803x_config_aneg(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
- phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
- if (ret < 0)
- return ret;
- }
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
- return __genphy_config_aneg(phydev, ret);
+ return genphy_config_aneg(phydev);
}
static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
@@ -1441,10 +1217,8 @@ static bool at803x_cdt_fault_length_valid(u16 status)
return false;
}
-static int at803x_cdt_fault_length(u16 status)
+static int at803x_cdt_fault_length(int dt)
{
- int dt;
-
/* According to the datasheet the distance to the fault is
* DELTA_TIME * 0.824 meters.
*
@@ -1460,36 +1234,19 @@ static int at803x_cdt_fault_length(u16 status)
* With a VF of 0.69 we get the factor 0.824 mentioned in the
* datasheet.
*/
- dt = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, status);
-
return (dt * 824) / 10;
}
-static int at803x_cdt_start(struct phy_device *phydev, int pair)
+static int at803x_cdt_start(struct phy_device *phydev,
+ u32 cdt_start)
{
- u16 cdt;
-
- /* qca8081 takes the different bit 15 to enable CDT test */
- if (phydev->drv->phy_id == QCA8081_PHY_ID)
- cdt = QCA808X_CDT_ENABLE_TEST |
- QCA808X_CDT_LENGTH_UNIT |
- QCA808X_CDT_INTER_CHECK_DIS;
- else
- cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
- AT803X_CDT_ENABLE_TEST;
-
- return phy_write(phydev, AT803X_CDT, cdt);
+ return phy_write(phydev, AT803X_CDT, cdt_start);
}
-static int at803x_cdt_wait_for_completion(struct phy_device *phydev)
+static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en)
{
int val, ret;
- u16 cdt_en;
-
- if (phydev->drv->phy_id == QCA8081_PHY_ID)
- cdt_en = QCA808X_CDT_ENABLE_TEST;
- else
- cdt_en = AT803X_CDT_ENABLE_TEST;
/* One test run takes about 25ms */
ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
@@ -1509,11 +1266,13 @@ static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
};
int ret, val;
- ret = at803x_cdt_start(phydev, pair);
+ val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
+ ret = at803x_cdt_start(phydev, val);
if (ret)
return ret;
- ret = at803x_cdt_wait_for_completion(phydev);
+ ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
if (ret)
return ret;
@@ -1527,27 +1286,21 @@ static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
ethnl_cable_test_result(phydev, ethtool_pair[pair],
at803x_cable_test_result_trans(val));
- if (at803x_cdt_fault_length_valid(val))
+ if (at803x_cdt_fault_length_valid(val)) {
+ val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
at803x_cdt_fault_length(val));
+ }
return 1;
}
static int at803x_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
+ bool *finished, unsigned long pair_mask)
{
- unsigned long pair_mask;
int retries = 20;
int pair, ret;
- if (phydev->phy_id == ATH9331_PHY_ID ||
- phydev->phy_id == ATH8032_PHY_ID ||
- phydev->phy_id == QCA9561_PHY_ID)
- pair_mask = 0x3;
- else
- pair_mask = 0xf;
-
*finished = false;
/* According to the datasheet the CDT can be performed when
@@ -1574,7 +1327,7 @@ static int at803x_cable_test_get_status(struct phy_device *phydev,
return 0;
}
-static int at803x_cable_test_start(struct phy_device *phydev)
+static void at803x_cable_test_autoneg(struct phy_device *phydev)
{
/* Enable auto-negotiation, but advertise no capabilities, no link
* will be established. A restart of the auto-negotiation is not
@@ -1582,15 +1335,360 @@ static int at803x_cable_test_start(struct phy_device *phydev)
*/
phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
- if (phydev->phy_id != ATH9331_PHY_ID &&
- phydev->phy_id != ATH8032_PHY_ID &&
- phydev->phy_id != QCA9561_PHY_ID)
- phy_write(phydev, MII_CTRL1000, 0);
+}
+
+static int at803x_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static const struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at8031_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at8031_sfp_insert,
+};
+
+static int at8031_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
+ "vddio");
+ if (ret) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return ret;
+ }
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ return phy_sfp_probe(phydev, &at8031_sfp_ops);
+}
+
+static int at8031_probe(struct phy_device *phydev)
+{
+ struct at803x_priv *priv;
+ int mode_cfg;
+ int ccr;
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ priv = phydev->priv;
+
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ ret = at8031_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if (ccr < 0)
+ return ccr;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
+
+ /* Disable WoL in 1588 register which is enabled
+ * by default
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+}
+
+static int at8031_config_init(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ return at803x_config_init(phydev);
+}
+
+static int at8031_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ /* First setup MAC address and enable WOL interrupt */
+ ret = at803x_set_wol(phydev, wol);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ /* Enable WOL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ else
+ /* Disable WoL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+
+ return ret;
+}
+
+static int at8031_config_intr(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err, value = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
+ priv->is_fiber) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+
+ err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
+ if (err)
+ return err;
+ }
+
+ return at803x_config_intr(phydev);
+}
+/* AR8031 and AR8033 share the same read status logic */
+static int at8031_read_status(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev);
+
+ return at803x_read_status(phydev);
+}
+
+/* AR8031 and AR8035 share the same cable test get status reg */
+static int at8031_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0xf);
+}
+
+/* AR8031 and AR8035 share the same cable test start logic */
+static int at8031_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ phy_write(phydev, MII_CTRL1000, 0);
/* we do all the (time consuming) work later */
return 0;
}
+/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
+static int at8032_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0x3);
+}
+
+static int at8035_parse_dt(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* Mask is set by the generic at803x_parse_dt
+ * if property is set. Assume property is set
+ * with the mask not zero.
+ */
+ if (priv->clk_25m_mask) {
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+ }
+
+ return 0;
+}
+
+/* AR8030 and AR8035 shared the same special mask for clk_25m */
+static int at8035_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ return at8035_parse_dt(phydev);
+}
+
static int qca83xx_config_init(struct phy_device *phydev)
{
u8 switch_revision;
@@ -1616,27 +1714,26 @@ static int qca83xx_config_init(struct phy_device *phydev)
break;
}
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static int qca8327_config_init(struct phy_device *phydev)
+{
/* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
* Disable on init and enable only with 100m speed following
* qca original source code.
*/
- if (phydev->drv->phy_id == QCA8327_A_PHY_ID ||
- phydev->drv->phy_id == QCA8327_B_PHY_ID)
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
- /* Following original QCA sourcecode set port to prefer master */
- phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
-
- return 0;
+ return qca83xx_config_init(phydev);
}
static void qca83xx_link_change_notify(struct phy_device *phydev)
{
- /* QCA8337 doesn't require DAC Amplitude adjustement */
- if (phydev->drv->phy_id == QCA8337_PHY_ID)
- return;
-
/* Set DAC Amplitude adjustment to +6% for 100m on link running */
if (phydev->state == PHY_RUNNING) {
if (phydev->speed == SPEED_100)
@@ -1672,26 +1769,13 @@ static int qca83xx_resume(struct phy_device *phydev)
if (ret)
return ret;
- msleep(1);
+ usleep_range(1000, 2000);
return 0;
}
static int qca83xx_suspend(struct phy_device *phydev)
{
- u16 mask = 0;
-
- /* Only QCA8337 support actual suspend.
- * QCA8327 cause port unreliability when phy suspend
- * is set.
- */
- if (phydev->drv->phy_id == QCA8337_PHY_ID) {
- genphy_suspend(phydev);
- } else {
- mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
- phy_modify(phydev, MII_BMCR, mask, 0);
- }
-
at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
AT803X_DEBUG_GATE_CLK_IN1000, 0);
@@ -1702,6 +1786,27 @@ static int qca83xx_suspend(struct phy_device *phydev)
return 0;
}
+static int qca8337_suspend(struct phy_device *phydev)
+{
+ /* Only QCA8337 support actual suspend. */
+ genphy_suspend(phydev);
+
+ return qca83xx_suspend(phydev);
+}
+
+static int qca8327_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+
+ return qca83xx_suspend(phydev);
+}
+
static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
{
int ret;
@@ -1712,27 +1817,27 @@ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
return ret;
phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
- QCA808X_TOP_OPTION1_DATA);
+ QCA808X_TOP_OPTION1_DATA);
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
- QCA808X_MSE_THRESHOLD_20DB_VALUE);
+ QCA808X_MSE_THRESHOLD_20DB_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
- QCA808X_MSE_THRESHOLD_17DB_VALUE);
+ QCA808X_MSE_THRESHOLD_17DB_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
- QCA808X_MSE_THRESHOLD_27DB_VALUE);
+ QCA808X_MSE_THRESHOLD_27DB_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
- QCA808X_MSE_THRESHOLD_28DB_VALUE);
+ QCA808X_MSE_THRESHOLD_28DB_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
- QCA808X_MMD3_DEBUG_1_VALUE);
+ QCA808X_MMD3_DEBUG_1_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
- QCA808X_MMD3_DEBUG_4_VALUE);
+ QCA808X_MMD3_DEBUG_4_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
- QCA808X_MMD3_DEBUG_5_VALUE);
+ QCA808X_MMD3_DEBUG_5_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
- QCA808X_MMD3_DEBUG_3_VALUE);
+ QCA808X_MMD3_DEBUG_3_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
- QCA808X_MMD3_DEBUG_6_VALUE);
+ QCA808X_MMD3_DEBUG_6_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
- QCA808X_MMD3_DEBUG_2_VALUE);
+ QCA808X_MMD3_DEBUG_2_VALUE);
return 0;
}
@@ -1769,13 +1874,14 @@ static int qca808x_config_init(struct phy_device *phydev)
/* Active adc&vga on 802.3az for the link 1000M and 100M */
ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
- QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+ QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
if (ret)
return ret;
/* Adjust the threshold on 802.3az for the link 1000M */
ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
- QCA808X_PHY_MMD3_AZ_TRAINING_CTRL, QCA808X_MMD3_AZ_TRAINING_VAL);
+ QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
+ QCA808X_MMD3_AZ_TRAINING_VAL);
if (ret)
return ret;
@@ -1801,11 +1907,13 @@ static int qca808x_config_init(struct phy_device *phydev)
/* Configure adc threshold as 100mv for the link 10M */
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
- QCA808X_ADC_THRESHOLD_MASK, QCA808X_ADC_THRESHOLD_100MV);
+ QCA808X_ADC_THRESHOLD_MASK,
+ QCA808X_ADC_THRESHOLD_100MV);
}
static int qca808x_read_status(struct phy_device *phydev)
{
+ struct at803x_ss_mask ss_mask = { 0 };
int ret;
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
@@ -1813,13 +1921,16 @@ static int qca808x_read_status(struct phy_device *phydev)
return ret;
linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
- ret & MDIO_AN_10GBT_STAT_LP2_5G);
+ ret & MDIO_AN_10GBT_STAT_LP2_5G);
ret = genphy_read_status(phydev);
if (ret)
return ret;
- ret = at803x_read_specific_status(phydev);
+ /* qca8081 takes the different bits for speed value from at803x */
+ ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
+ ret = at803x_read_specific_status(phydev, ss_mask);
if (ret < 0)
return ret;
@@ -1840,7 +1951,7 @@ static int qca808x_read_status(struct phy_device *phydev)
*/
if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
- qca808x_is_prefer_master(phydev)) {
+ qca808x_is_prefer_master(phydev)) {
qca808x_phy_ms_seed_enable(phydev, false);
} else {
qca808x_phy_ms_seed_enable(phydev, true);
@@ -1868,8 +1979,17 @@ static int qca808x_soft_reset(struct phy_device *phydev)
static bool qca808x_cdt_fault_length_valid(int cdt_code)
{
switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_SHORT:
- case QCA808X_CDT_STATUS_STAT_OPEN:
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
return true;
default:
return false;
@@ -1881,17 +2001,28 @@ static int qca808x_cable_test_result_trans(int cdt_code)
switch (cdt_code) {
case QCA808X_CDT_STATUS_STAT_NORMAL:
return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case QCA808X_CDT_STATUS_STAT_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case QCA808X_CDT_STATUS_STAT_OPEN:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
case QCA808X_CDT_STATUS_STAT_FAIL:
default:
return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
}
}
-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair)
+static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+ int result)
{
int val;
u32 cdt_length_reg = 0;
@@ -1917,7 +2048,12 @@ static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair)
if (val < 0)
return val;
- return (FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val) * 824) / 10;
+ if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+ else
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+
+ return at803x_cdt_fault_length(val);
}
static int qca808x_cable_test_start(struct phy_device *phydev)
@@ -1961,18 +2097,53 @@ static int qca808x_cable_test_start(struct phy_device *phydev)
return 0;
}
+static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+ u16 status)
+{
+ int length, result;
+ u16 pair_code;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ result = qca808x_cable_test_result_trans(pair_code);
+ ethnl_cable_test_result(phydev, pair, result);
+
+ if (qca808x_cdt_fault_length_valid(pair_code)) {
+ length = qca808x_cdt_fault_length(phydev, pair, result);
+ ethnl_cable_test_fault_length(phydev, pair, length);
+ }
+
+ return 0;
+}
+
static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
{
int ret, val;
- int pair_a, pair_b, pair_c, pair_d;
*finished = false;
- ret = at803x_cdt_start(phydev, 0);
+ val = QCA808X_CDT_ENABLE_TEST |
+ QCA808X_CDT_LENGTH_UNIT;
+ ret = at803x_cdt_start(phydev, val);
if (ret)
return ret;
- ret = at803x_cdt_wait_for_completion(phydev);
+ ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
if (ret)
return ret;
@@ -1980,32 +2151,21 @@ static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finish
if (val < 0)
return val;
- pair_a = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, val);
- pair_b = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, val);
- pair_c = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, val);
- pair_d = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, val);
-
- ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
- qca808x_cable_test_result_trans(pair_a));
- ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
- qca808x_cable_test_result_trans(pair_b));
- ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
- qca808x_cable_test_result_trans(pair_c));
- ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
- qca808x_cable_test_result_trans(pair_d));
-
- if (qca808x_cdt_fault_length_valid(pair_a))
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
- qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A));
- if (qca808x_cdt_fault_length_valid(pair_b))
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B,
- qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B));
- if (qca808x_cdt_fault_length_valid(pair_c))
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C,
- qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C));
- if (qca808x_cdt_fault_length_valid(pair_d))
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D,
- qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D));
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+ if (ret)
+ return ret;
*finished = true;
@@ -2040,14 +2200,41 @@ static int qca808x_get_features(struct phy_device *phydev)
return 0;
}
+static int qca808x_config_aneg(struct phy_device *phydev)
+{
+ int phy_ctrl = 0;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The reg MII_BMCR also needs to be configured for force mode, the
+ * genphy_config_aneg is also needed.
+ */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ genphy_c45_pma_setup_forced(phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+ phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return __genphy_config_aneg(phydev, ret);
+}
+
static void qca808x_link_change_notify(struct phy_device *phydev)
{
/* Assert interface sgmii fifo on link down, deassert it on link up,
* the interface device address is always phy address added by 1.
*/
mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
- MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
- QCA8081_PHY_FIFO_RSTN, phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+ MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+ QCA8081_PHY_FIFO_RSTN,
+ phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
}
static struct phy_driver at803x_driver[] = {
@@ -2056,7 +2243,7 @@ static struct phy_driver at803x_driver[] = {
PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
.name = "Qualcomm Atheros AR8035",
.flags = PHY_POLL_CABLE_TEST,
- .probe = at803x_probe,
+ .probe = at8035_probe,
.config_aneg = at803x_config_aneg,
.config_init = at803x_config_init,
.soft_reset = genphy_soft_reset,
@@ -2070,14 +2257,14 @@ static struct phy_driver at803x_driver[] = {
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
}, {
/* Qualcomm Atheros AR8030 */
.phy_id = ATH8030_PHY_ID,
.name = "Qualcomm Atheros AR8030",
.phy_id_mask = AT8030_PHY_ID_MASK,
- .probe = at803x_probe,
+ .probe = at8035_probe,
.config_init = at803x_config_init,
.link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
@@ -2092,24 +2279,24 @@ static struct phy_driver at803x_driver[] = {
PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
.name = "Qualcomm Atheros AR8031/AR8033",
.flags = PHY_POLL_CABLE_TEST,
- .probe = at803x_probe,
- .config_init = at803x_config_init,
+ .probe = at8031_probe,
+ .config_init = at8031_config_init,
.config_aneg = at803x_config_aneg,
.soft_reset = genphy_soft_reset,
- .set_wol = at803x_set_wol,
+ .set_wol = at8031_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
.read_page = at803x_read_page,
.write_page = at803x_write_page,
.get_features = at803x_get_features,
- .read_status = at803x_read_status,
- .config_intr = at803x_config_intr,
+ .read_status = at8031_read_status,
+ .config_intr = at8031_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
}, {
/* Qualcomm Atheros AR8032 */
PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
@@ -2124,7 +2311,7 @@ static struct phy_driver at803x_driver[] = {
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_get_status = at8032_cable_test_get_status,
}, {
/* ATHEROS AR9331 */
PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
@@ -2137,7 +2324,7 @@ static struct phy_driver at803x_driver[] = {
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_get_status = at8032_cable_test_get_status,
.read_status = at803x_read_status,
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
@@ -2153,7 +2340,7 @@ static struct phy_driver at803x_driver[] = {
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_get_status = at8032_cable_test_get_status,
.read_status = at803x_read_status,
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
@@ -2163,15 +2350,14 @@ static struct phy_driver at803x_driver[] = {
.phy_id_mask = QCA8K_PHY_ID_MASK,
.name = "Qualcomm Atheros 8337 internal PHY",
/* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
.config_init = qca83xx_config_init,
.soft_reset = genphy_soft_reset,
- .get_sset_count = at803x_get_sset_count,
- .get_strings = at803x_get_strings,
- .get_stats = at803x_get_stats,
- .suspend = qca83xx_suspend,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8337_suspend,
.resume = qca83xx_resume,
}, {
/* QCA8327-A from switch QCA8327-AL1A */
@@ -2182,12 +2368,12 @@ static struct phy_driver at803x_driver[] = {
.link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
+ .config_init = qca8327_config_init,
.soft_reset = genphy_soft_reset,
- .get_sset_count = at803x_get_sset_count,
- .get_strings = at803x_get_strings,
- .get_stats = at803x_get_stats,
- .suspend = qca83xx_suspend,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
.resume = qca83xx_resume,
}, {
/* QCA8327-B from switch QCA8327-BL1A */
@@ -2198,12 +2384,12 @@ static struct phy_driver at803x_driver[] = {
.link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
+ .config_init = qca8327_config_init,
.soft_reset = genphy_soft_reset,
- .get_sset_count = at803x_get_sset_count,
- .get_strings = at803x_get_strings,
- .get_stats = at803x_get_stats,
- .suspend = qca83xx_suspend,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
.resume = qca83xx_resume,
}, {
/* Qualcomm QCA8081 */
@@ -2218,7 +2404,7 @@ static struct phy_driver at803x_driver[] = {
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.get_features = qca808x_get_features,
- .config_aneg = at803x_config_aneg,
+ .config_aneg = qca808x_config_aneg,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_status = qca808x_read_status,
diff --git a/drivers/net/phy/ax88796b_rust.rs b/drivers/net/phy/ax88796b_rust.rs
new file mode 100644
index 000000000..5c9257296
--- /dev/null
+++ b/drivers/net/phy/ax88796b_rust.rs
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2023 FUJITA Tomonori <fujita.tomonori@gmail.com>
+
+//! Rust Asix PHYs driver
+//!
+//! C version of this driver: [`drivers/net/phy/ax88796b.c`](./ax88796b.c)
+use kernel::{
+ c_str,
+ net::phy::{self, DeviceId, Driver},
+ prelude::*,
+ uapi,
+};
+
+kernel::module_phy_driver! {
+ drivers: [PhyAX88772A, PhyAX88772C, PhyAX88796B],
+ device_table: [
+ DeviceId::new_with_driver::<PhyAX88772A>(),
+ DeviceId::new_with_driver::<PhyAX88772C>(),
+ DeviceId::new_with_driver::<PhyAX88796B>()
+ ],
+ name: "rust_asix_phy",
+ author: "FUJITA Tomonori <fujita.tomonori@gmail.com>",
+ description: "Rust Asix PHYs driver",
+ license: "GPL",
+}
+
+const MII_BMCR: u16 = uapi::MII_BMCR as u16;
+const BMCR_SPEED100: u16 = uapi::BMCR_SPEED100 as u16;
+const BMCR_FULLDPLX: u16 = uapi::BMCR_FULLDPLX as u16;
+
+// Performs a software PHY reset using the standard
+// BMCR_RESET bit and poll for the reset bit to be cleared.
+// Toggle BMCR_RESET bit off to accommodate broken AX8796B PHY implementation
+// such as used on the Individual Computers' X-Surf 100 Zorro card.
+fn asix_soft_reset(dev: &mut phy::Device) -> Result {
+ dev.write(uapi::MII_BMCR as u16, 0)?;
+ dev.genphy_soft_reset()
+}
+
+struct PhyAX88772A;
+
+#[vtable]
+impl Driver for PhyAX88772A {
+ const FLAGS: u32 = phy::flags::IS_INTERNAL;
+ const NAME: &'static CStr = c_str!("Asix Electronics AX88772A");
+ const PHY_DEVICE_ID: DeviceId = DeviceId::new_with_exact_mask(0x003b1861);
+
+ // AX88772A is not working properly with some old switches (NETGEAR EN 108TP):
+ // after autoneg is done and the link status is reported as active, the MII_LPA
+ // register is 0. This issue is not reproducible on AX88772C.
+ fn read_status(dev: &mut phy::Device) -> Result<u16> {
+ dev.genphy_update_link()?;
+ if !dev.is_link_up() {
+ return Ok(0);
+ }
+ // If MII_LPA is 0, phy_resolve_aneg_linkmode() will fail to resolve
+ // linkmode so use MII_BMCR as default values.
+ let ret = dev.read(MII_BMCR)?;
+
+ if ret & BMCR_SPEED100 != 0 {
+ dev.set_speed(uapi::SPEED_100);
+ } else {
+ dev.set_speed(uapi::SPEED_10);
+ }
+
+ let duplex = if ret & BMCR_FULLDPLX != 0 {
+ phy::DuplexMode::Full
+ } else {
+ phy::DuplexMode::Half
+ };
+ dev.set_duplex(duplex);
+
+ dev.genphy_read_lpa()?;
+
+ if dev.is_autoneg_enabled() && dev.is_autoneg_completed() {
+ dev.resolve_aneg_linkmode();
+ }
+
+ Ok(0)
+ }
+
+ fn suspend(dev: &mut phy::Device) -> Result {
+ dev.genphy_suspend()
+ }
+
+ fn resume(dev: &mut phy::Device) -> Result {
+ dev.genphy_resume()
+ }
+
+ fn soft_reset(dev: &mut phy::Device) -> Result {
+ asix_soft_reset(dev)
+ }
+
+ fn link_change_notify(dev: &mut phy::Device) {
+ // Reset PHY, otherwise MII_LPA will provide outdated information.
+ // This issue is reproducible only with some link partner PHYs.
+ if dev.state() == phy::DeviceState::NoLink {
+ let _ = dev.init_hw();
+ let _ = dev.start_aneg();
+ }
+ }
+}
+
+struct PhyAX88772C;
+
+#[vtable]
+impl Driver for PhyAX88772C {
+ const FLAGS: u32 = phy::flags::IS_INTERNAL;
+ const NAME: &'static CStr = c_str!("Asix Electronics AX88772C");
+ const PHY_DEVICE_ID: DeviceId = DeviceId::new_with_exact_mask(0x003b1881);
+
+ fn suspend(dev: &mut phy::Device) -> Result {
+ dev.genphy_suspend()
+ }
+
+ fn resume(dev: &mut phy::Device) -> Result {
+ dev.genphy_resume()
+ }
+
+ fn soft_reset(dev: &mut phy::Device) -> Result {
+ asix_soft_reset(dev)
+ }
+}
+
+struct PhyAX88796B;
+
+#[vtable]
+impl Driver for PhyAX88796B {
+ const NAME: &'static CStr = c_str!("Asix Electronics AX88796B");
+ const PHY_DEVICE_ID: DeviceId = DeviceId::new_with_model_mask(0x003b1841);
+
+ fn soft_reset(dev: &mut phy::Device) -> Result {
+ asix_soft_reset(dev)
+ }
+}
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index cb4b91af5..617d384d4 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -782,16 +782,13 @@ out:
}
static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
- struct ifreq *ifr)
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
- struct hwtstamp_config cfg;
u16 mode, ctrl;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->hwts_rx = false;
break;
@@ -804,14 +801,14 @@ static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
priv->hwts_rx = true;
break;
default:
return -ERANGE;
}
- priv->tx_type = cfg.tx_type;
+ priv->tx_type = cfg->tx_type;
ctrl = priv->hwts_rx ? SLICE_RX_EN : 0;
ctrl |= priv->tx_type != HWTSTAMP_TX_OFF ? SLICE_TX_EN : 0;
@@ -840,7 +837,7 @@ static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
/* purge existing data */
skb_queue_purge(&priv->tx_queue);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts,
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index d43076592..2eea3d09b 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -128,6 +128,10 @@
#define BCM54140_DEFAULT_DOWNSHIFT 5
#define BCM54140_MAX_DOWNSHIFT 9
+enum bcm54140_global_phy {
+ BCM54140_BASE_ADDR = 0,
+};
+
struct bcm54140_priv {
int port;
int base_addr;
@@ -429,11 +433,13 @@ static int bcm54140_base_read_rdb(struct phy_device *phydev, u16 rdb)
int ret;
phy_lock_mdio_bus(phydev);
- ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
+ MII_BCM54XX_RDB_ADDR, rdb);
if (ret < 0)
goto out;
- ret = __phy_package_read(phydev, MII_BCM54XX_RDB_DATA);
+ ret = __phy_package_read(phydev, BCM54140_BASE_ADDR,
+ MII_BCM54XX_RDB_DATA);
out:
phy_unlock_mdio_bus(phydev);
@@ -446,11 +452,13 @@ static int bcm54140_base_write_rdb(struct phy_device *phydev,
int ret;
phy_lock_mdio_bus(phydev);
- ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
+ MII_BCM54XX_RDB_ADDR, rdb);
if (ret < 0)
goto out;
- ret = __phy_package_write(phydev, MII_BCM54XX_RDB_DATA, val);
+ ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
+ MII_BCM54XX_RDB_DATA, val);
out:
phy_unlock_mdio_bus(phydev);
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
index 9717a1626..f1d47c264 100644
--- a/drivers/net/phy/bcm84881.c
+++ b/drivers/net/phy/bcm84881.c
@@ -29,8 +29,19 @@ static int bcm84881_wait_init(struct phy_device *phydev)
100000, 2000000, false);
}
+static void bcm84881_fill_possible_interfaces(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII, possible);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, possible);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, possible);
+}
+
static int bcm84881_config_init(struct phy_device *phydev)
{
+ bcm84881_fill_possible_interfaces(phydev);
+
switch (phydev->interface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_2500BASEX:
@@ -39,6 +50,7 @@ static int bcm84881_config_init(struct phy_device *phydev)
default:
return -ENODEV;
}
+
return 0;
}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 3a6271056..312a8bb35 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -1135,6 +1135,8 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
+ .suspend = bcm54xx_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2657be7cc..5c42c47dc 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1207,22 +1207,20 @@ static irqreturn_t dp83640_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+static int dp83640_hwtstamp(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct dp83640_private *dp83640 =
container_of(mii_ts, struct dp83640_private, mii_ts);
- struct hwtstamp_config cfg;
u16 txcfg0, rxcfg0;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
+ if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
return -ERANGE;
- dp83640->hwts_tx_en = cfg.tx_type;
+ dp83640->hwts_tx_en = cfg->tx_type;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
dp83640->hwts_rx_en = 0;
dp83640->layer = 0;
@@ -1234,7 +1232,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V1;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
@@ -1242,7 +1240,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V2;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
@@ -1250,7 +1248,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@@ -1258,7 +1256,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
@@ -1292,7 +1290,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
mutex_unlock(&dp83640->clock->extreg_lock);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static void rx_timestamp_work(struct work_struct *work)
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index fa8c6fdcf..d7aaefb52 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -695,7 +695,8 @@ static int dp83869_configure_mode(struct phy_device *phydev,
phy_ctrl_val = dp83869->mode;
if (phydev->interface == PHY_INTERFACE_MODE_MII) {
if (dp83869->mode == DP83869_100M_MEDIA_CONVERT ||
- dp83869->mode == DP83869_RGMII_100_BASE) {
+ dp83869->mode == DP83869_RGMII_100_BASE ||
+ dp83869->mode == DP83869_RGMII_COPPER_ETHERNET) {
phy_ctrl_val |= DP83869_OP_MODE_MII;
} else {
phydev_err(phydev, "selected op-mode is not valid with MII mode\n");
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
new file mode 100644
index 000000000..326c9770a
--- /dev/null
+++ b/drivers/net/phy/dp83tg720.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for the Texas Instruments DP83TG720 PHY
+ * Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define DP83TG720S_PHY_ID 0x2000a284
+
+/* MDIO_MMD_VEND2 registers */
+#define DP83TG720S_MII_REG_10 0x10
+#define DP83TG720S_STS_MII_INT BIT(7)
+#define DP83TG720S_LINK_STATUS BIT(0)
+
+#define DP83TG720S_PHY_RESET 0x1f
+#define DP83TG720S_HW_RESET BIT(15)
+
+#define DP83TG720S_RGMII_DELAY_CTRL 0x602
+/* In RGMII mode, Enable or disable the internal delay for RXD */
+#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
+/* In RGMII mode, Enable or disable the internal delay for TXD */
+#define DP83TG720S_RGMII_TX_CLK_SEL BIT(0)
+
+#define DP83TG720S_SQI_REG_1 0x871
+#define DP83TG720S_SQI_OUT_WORST GENMASK(7, 5)
+#define DP83TG720S_SQI_OUT GENMASK(3, 1)
+
+#define DP83TG720_SQI_MAX 7
+
+static int dp83tg720_config_aneg(struct phy_device *phydev)
+{
+ /* Autoneg is not supported and this PHY supports only one speed.
+ * We need to care only about master/slave configuration if it was
+ * changed by user.
+ */
+ return genphy_c45_pma_baset1_setup_master_slave(phydev);
+}
+
+static int dp83tg720_read_status(struct phy_device *phydev)
+{
+ u16 phy_sts;
+ int ret;
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ /* Most of Clause 45 registers are not present, so we can't use
+ * genphy_c45_read_status() here.
+ */
+ phy_sts = phy_read(phydev, DP83TG720S_MII_REG_10);
+ phydev->link = !!(phy_sts & DP83TG720S_LINK_STATUS);
+ if (!phydev->link) {
+ /* According to the "DP83TC81x, DP83TG72x Software
+ * Implementation Guide", the PHY needs to be reset after a
+ * link loss or if no link is created after at least 100ms.
+ *
+ * Currently we are polling with the PHY_STATE_TIME (1000ms)
+ * interval, which is still enough for not automotive use cases.
+ */
+ ret = phy_init_hw(phydev);
+ if (ret)
+ return ret;
+
+ /* After HW reset we need to restore master/slave configuration.
+ */
+ ret = dp83tg720_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ } else {
+ /* PMA/PMD control 1 register (Register 1.0) is present, but it
+ * doesn't contain the link speed information.
+ * So genphy_c45_read_pma() can't be used here.
+ */
+ ret = genphy_c45_pma_baset1_read_master_slave(phydev);
+ if (ret)
+ return ret;
+
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ }
+
+ return 0;
+}
+
+static int dp83tg720_get_sqi(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->link)
+ return 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_SQI_REG_1);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(DP83TG720S_SQI_OUT, ret);
+}
+
+static int dp83tg720_get_sqi_max(struct phy_device *phydev)
+{
+ return DP83TG720_SQI_MAX;
+}
+
+static int dp83tg720_config_rgmii_delay(struct phy_device *phydev)
+{
+ u16 rgmii_delay_mask;
+ u16 rgmii_delay = 0;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ rgmii_delay = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ rgmii_delay = DP83TG720S_RGMII_RX_CLK_SEL |
+ DP83TG720S_RGMII_TX_CLK_SEL;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ rgmii_delay = DP83TG720S_RGMII_RX_CLK_SEL;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rgmii_delay = DP83TG720S_RGMII_TX_CLK_SEL;
+ break;
+ default:
+ return 0;
+ }
+
+ rgmii_delay_mask = DP83TG720S_RGMII_RX_CLK_SEL |
+ DP83TG720S_RGMII_TX_CLK_SEL;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TG720S_RGMII_DELAY_CTRL, rgmii_delay_mask,
+ rgmii_delay);
+}
+
+static int dp83tg720_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Software Restart is not enough to recover from a link failure.
+ * Using Hardware Reset instead.
+ */
+ ret = phy_write(phydev, DP83TG720S_PHY_RESET, DP83TG720S_HW_RESET);
+ if (ret)
+ return ret;
+
+ /* Wait until MDC can be used again.
+ * The wait value of one 1ms is documented in "DP83TG720S-Q1 1000BASE-T1
+ * Automotive Ethernet PHY with SGMII and RGMII" datasheet.
+ */
+ usleep_range(1000, 2000);
+
+ if (phy_interface_is_rgmii(phydev))
+ return dp83tg720_config_rgmii_delay(phydev);
+
+ return 0;
+}
+
+static struct phy_driver dp83tg720_driver[] = {
+{
+ PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
+ .name = "TI DP83TG720S",
+
+ .config_aneg = dp83tg720_config_aneg,
+ .read_status = dp83tg720_read_status,
+ .get_features = genphy_c45_pma_read_ext_abilities,
+ .config_init = dp83tg720_config_init,
+ .get_sqi = dp83tg720_get_sqi,
+ .get_sqi_max = dp83tg720_get_sqi_max,
+
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+} };
+module_phy_driver(dp83tg720_driver);
+
+static struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
+ { PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, dp83tg720_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83TG720S PHY driver");
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index d4bb90d76..ad43e2809 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -141,13 +141,21 @@ enum {
MV_V2_TEMP_UNKNOWN = 0x9600, /* unknown function */
};
+struct mv3310_mactype {
+ bool valid;
+ bool fixed_interface;
+ phy_interface_t interface_10g;
+};
+
struct mv3310_chip {
bool (*has_downshift)(struct phy_device *phydev);
void (*init_supported_interfaces)(unsigned long *mask);
int (*get_mactype)(struct phy_device *phydev);
int (*set_mactype)(struct phy_device *phydev, int mactype);
int (*select_mactype)(unsigned long *interfaces);
- int (*init_interface)(struct phy_device *phydev, int mactype);
+
+ const struct mv3310_mactype *mactypes;
+ size_t n_mactypes;
#ifdef CONFIG_HWMON
int (*hwmon_read_temp_reg)(struct phy_device *phydev);
@@ -156,11 +164,10 @@ struct mv3310_chip {
struct mv3310_priv {
DECLARE_BITMAP(supported_interfaces, PHY_INTERFACE_MODE_MAX);
+ const struct mv3310_mactype *mactype;
u32 firmware_ver;
bool has_downshift;
- bool rate_match;
- phy_interface_t const_interface;
struct device *hwmon_dev;
char *hwmon_name;
@@ -702,70 +709,114 @@ static int mv3310_select_mactype(unsigned long *interfaces)
return -1;
}
-static int mv2110_init_interface(struct phy_device *phydev, int mactype)
-{
- struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
-
- priv->rate_match = false;
-
- if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH)
- priv->rate_match = true;
-
- if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII)
- priv->const_interface = PHY_INTERFACE_MODE_USXGMII;
- else if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH)
- priv->const_interface = PHY_INTERFACE_MODE_10GBASER;
- else if (mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER ||
- mactype == MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER_NO_SGMII_AN)
- priv->const_interface = PHY_INTERFACE_MODE_NA;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int mv3310_init_interface(struct phy_device *phydev, int mactype)
-{
- struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+static const struct mv3310_mactype mv2110_mactypes[] = {
+ [MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_USXGMII,
+ },
+ [MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_NA,
+ },
+ [MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER_NO_SGMII_AN] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_NA,
+ },
+ [MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_10GBASER,
+ },
+};
- priv->rate_match = false;
-
- if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH ||
- mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH ||
- mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH)
- priv->rate_match = true;
-
- if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII)
- priv->const_interface = PHY_INTERFACE_MODE_USXGMII;
- else if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH ||
- mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN ||
- mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER)
- priv->const_interface = PHY_INTERFACE_MODE_10GBASER;
- else if (mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH ||
- mactype == MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI)
- priv->const_interface = PHY_INTERFACE_MODE_RXAUI;
- else if (mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH ||
- mactype == MV_V2_3310_PORT_CTRL_MACTYPE_XAUI)
- priv->const_interface = PHY_INTERFACE_MODE_XAUI;
- else
- return -EINVAL;
+static const struct mv3310_mactype mv3310_mactypes[] = {
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_RXAUI,
+ },
+ [MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_XAUI,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_RXAUI,
+ },
+ [MV_V2_3310_PORT_CTRL_MACTYPE_XAUI] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_XAUI,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_10GBASER,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_10GBASER,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_10GBASER,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_USXGMII,
+ },
+};
- return 0;
-}
+static const struct mv3310_mactype mv3340_mactypes[] = {
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_RXAUI,
+ },
+ [MV_V2_3340_PORT_CTRL_MACTYPE_RXAUI_NO_SGMII_AN] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_RXAUI,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_RXAUI,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_10GBASER,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN] = {
+ .valid = true,
+ .interface_10g = PHY_INTERFACE_MODE_10GBASER,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_10GBASER,
+ },
+ [MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII] = {
+ .valid = true,
+ .fixed_interface = true,
+ .interface_10g = PHY_INTERFACE_MODE_USXGMII,
+ },
+};
-static int mv3340_init_interface(struct phy_device *phydev, int mactype)
+static void mv3310_fill_possible_interfaces(struct phy_device *phydev)
{
struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
- int err = 0;
-
- priv->rate_match = false;
+ unsigned long *possible = phydev->possible_interfaces;
+ const struct mv3310_mactype *mactype = priv->mactype;
- if (mactype == MV_V2_3340_PORT_CTRL_MACTYPE_RXAUI_NO_SGMII_AN)
- priv->const_interface = PHY_INTERFACE_MODE_RXAUI;
- else
- err = mv3310_init_interface(phydev, mactype);
+ if (mactype->interface_10g != PHY_INTERFACE_MODE_NA)
+ __set_bit(priv->mactype->interface_10g, possible);
- return err;
+ if (!mactype->fixed_interface) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, possible);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, possible);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, possible);
+ }
}
static int mv3310_config_init(struct phy_device *phydev)
@@ -803,12 +854,15 @@ static int mv3310_config_init(struct phy_device *phydev)
if (mactype < 0)
return mactype;
- err = chip->init_interface(phydev, mactype);
- if (err) {
+ if (mactype >= chip->n_mactypes || !chip->mactypes[mactype].valid) {
phydev_err(phydev, "MACTYPE configuration invalid\n");
- return err;
+ return -EINVAL;
}
+ priv->mactype = &chip->mactypes[mactype];
+
+ mv3310_fill_possible_interfaces(phydev);
+
/* Enable EDPD mode - saving 600mW */
err = mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
if (err)
@@ -935,9 +989,8 @@ static void mv3310_update_interface(struct phy_device *phydev)
*
* In USXGMII mode the PHY interface mode is also fixed.
*/
- if (priv->rate_match ||
- priv->const_interface == PHY_INTERFACE_MODE_USXGMII) {
- phydev->interface = priv->const_interface;
+ if (priv->mactype->fixed_interface) {
+ phydev->interface = priv->mactype->interface_10g;
return;
}
@@ -949,7 +1002,7 @@ static void mv3310_update_interface(struct phy_device *phydev)
*/
switch (phydev->speed) {
case SPEED_10000:
- phydev->interface = priv->const_interface;
+ phydev->interface = priv->mactype->interface_10g;
break;
case SPEED_5000:
phydev->interface = PHY_INTERFACE_MODE_5GBASER;
@@ -1163,7 +1216,9 @@ static const struct mv3310_chip mv3310_type = {
.get_mactype = mv3310_get_mactype,
.set_mactype = mv3310_set_mactype,
.select_mactype = mv3310_select_mactype,
- .init_interface = mv3310_init_interface,
+
+ .mactypes = mv3310_mactypes,
+ .n_mactypes = ARRAY_SIZE(mv3310_mactypes),
#ifdef CONFIG_HWMON
.hwmon_read_temp_reg = mv3310_hwmon_read_temp_reg,
@@ -1176,7 +1231,9 @@ static const struct mv3310_chip mv3340_type = {
.get_mactype = mv3310_get_mactype,
.set_mactype = mv3310_set_mactype,
.select_mactype = mv3310_select_mactype,
- .init_interface = mv3340_init_interface,
+
+ .mactypes = mv3340_mactypes,
+ .n_mactypes = ARRAY_SIZE(mv3340_mactypes),
#ifdef CONFIG_HWMON
.hwmon_read_temp_reg = mv3310_hwmon_read_temp_reg,
@@ -1188,7 +1245,9 @@ static const struct mv3310_chip mv2110_type = {
.get_mactype = mv2110_get_mactype,
.set_mactype = mv2110_set_mactype,
.select_mactype = mv2110_select_mactype,
- .init_interface = mv2110_init_interface,
+
+ .mactypes = mv2110_mactypes,
+ .n_mactypes = ARRAY_SIZE(mv2110_mactypes),
#ifdef CONFIG_HWMON
.hwmon_read_temp_reg = mv2110_hwmon_read_temp_reg,
@@ -1200,7 +1259,9 @@ static const struct mv3310_chip mv2111_type = {
.get_mactype = mv2110_get_mactype,
.set_mactype = mv2110_set_mactype,
.select_mactype = mv2110_select_mactype,
- .init_interface = mv2110_init_interface,
+
+ .mactypes = mv2110_mactypes,
+ .n_mactypes = ARRAY_SIZE(mv2110_mactypes),
#ifdef CONFIG_HWMON
.hwmon_read_temp_reg = mv2110_hwmon_read_temp_reg,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 25dcaa49a..afbad1ad8 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -193,6 +193,10 @@ static void mdiobus_release(struct device *d)
bus->state != MDIOBUS_ALLOCATED,
"%s: not in RELEASED or ALLOCATED state\n",
bus->id);
+
+ if (bus->state == MDIOBUS_RELEASED)
+ fwnode_handle_put(dev_fwnode(d));
+
kfree(bus);
}
@@ -506,7 +510,7 @@ static int mdiobus_create_device(struct mii_bus *bus,
if (IS_ERR(mdiodev))
return -ENODEV;
- strncpy(mdiodev->modalias, bi->modalias,
+ strscpy(mdiodev->modalias, bi->modalias,
sizeof(mdiodev->modalias));
mdiodev->bus_match = mdio_device_bus_match;
mdiodev->dev.platform_data = (void *)bi->platform_data;
@@ -684,6 +688,15 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
bus->dev.groups = NULL;
dev_set_name(&bus->dev, "%s", bus->id);
+ /* If the bus state is allocated, we're registering a fresh bus
+ * that may have a fwnode associated with it. Grab a reference
+ * to the fwnode. This will be dropped when the bus is released.
+ * If the bus was set to unregistered, it means that the bus was
+ * previously registered, and we've already grabbed a reference.
+ */
+ if (bus->state == MDIOBUS_ALLOCATED)
+ fwnode_handle_get(dev_fwnode(&bus->dev));
+
/* We need to set state to MDIOBUS_UNREGISTERED to correctly release
* the device in mdiobus_free()
*
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 044828d08..73f6539b9 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -62,6 +62,7 @@ struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
mdiodev->device_remove = mdio_device_remove;
mdiodev->bus = bus;
mdiodev->addr = addr;
+ mdiodev->reset_state = -1;
dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr);
@@ -122,6 +123,9 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
if (!mdiodev->reset_gpio && !mdiodev->reset_ctrl)
return;
+ if (mdiodev->reset_state == value)
+ return;
+
if (mdiodev->reset_gpio)
gpiod_set_value_cansleep(mdiodev->reset_gpio, value);
@@ -135,6 +139,8 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
if (d)
fsleep(d);
+
+ mdiodev->reset_state = value;
}
EXPORT_SYMBOL(mdio_device_reset);
diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
index 69b829e6a..7fd3377db 100644
--- a/drivers/net/phy/mdio_devres.c
+++ b/drivers/net/phy/mdio_devres.c
@@ -131,4 +131,5 @@ int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
EXPORT_SYMBOL(__devm_of_mdiobus_register);
#endif /* CONFIG_OF_MDIO */
+MODULE_DESCRIPTION("Network MDIO bus devres helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index 0f3a1538a..f4f9412d0 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -216,6 +216,9 @@
#define MTK_PHY_LED_ON_LINK1000 BIT(0)
#define MTK_PHY_LED_ON_LINK100 BIT(1)
#define MTK_PHY_LED_ON_LINK10 BIT(2)
+#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\
+ MTK_PHY_LED_ON_LINK100 |\
+ MTK_PHY_LED_ON_LINK1000)
#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
@@ -231,6 +234,12 @@
#define MTK_PHY_LED_BLINK_100RX BIT(3)
#define MTK_PHY_LED_BLINK_10TX BIT(4)
#define MTK_PHY_LED_BLINK_10RX BIT(5)
+#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\
+ MTK_PHY_LED_BLINK_100RX |\
+ MTK_PHY_LED_BLINK_1000RX)
+#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\
+ MTK_PHY_LED_BLINK_100TX |\
+ MTK_PHY_LED_BLINK_1000TX)
#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
@@ -1247,11 +1256,9 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (blink < 0)
return -EIO;
- if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK10)) ||
- (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
- MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
+ if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+ MTK_PHY_LED_ON_LINKDOWN)) ||
+ (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
set_bit(bit_netdev, &priv->led_state);
else
clear_bit(bit_netdev, &priv->led_state);
@@ -1269,7 +1276,7 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (!rules)
return 0;
- if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
+ if (on & MTK_PHY_LED_ON_LINK)
*rules |= BIT(TRIGGER_NETDEV_LINK);
if (on & MTK_PHY_LED_ON_LINK10)
@@ -1287,10 +1294,10 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
if (on & MTK_PHY_LED_ON_HDX)
*rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
- if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
+ if (blink & MTK_PHY_LED_BLINK_RX)
*rules |= BIT(TRIGGER_NETDEV_RX);
- if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
+ if (blink & MTK_PHY_LED_BLINK_TX)
*rules |= BIT(TRIGGER_NETDEV_TX);
return 0;
@@ -1323,15 +1330,19 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
on |= MTK_PHY_LED_ON_LINK1000;
if (rules & BIT(TRIGGER_NETDEV_RX)) {
- blink |= MTK_PHY_LED_BLINK_10RX |
- MTK_PHY_LED_BLINK_100RX |
- MTK_PHY_LED_BLINK_1000RX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) :
+ MTK_PHY_LED_BLINK_RX;
}
if (rules & BIT(TRIGGER_NETDEV_TX)) {
- blink |= MTK_PHY_LED_BLINK_10TX |
- MTK_PHY_LED_BLINK_100TX |
- MTK_PHY_LED_BLINK_1000TX;
+ blink |= (on & MTK_PHY_LED_ON_LINK) ?
+ (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) |
+ ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) :
+ MTK_PHY_LED_BLINK_TX;
}
if (blink || on)
@@ -1344,9 +1355,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
MTK_PHY_LED0_ON_CTRL,
MTK_PHY_LED_ON_FDX |
MTK_PHY_LED_ON_HDX |
- MTK_PHY_LED_ON_LINK10 |
- MTK_PHY_LED_ON_LINK100 |
- MTK_PHY_LED_ON_LINK1000,
+ MTK_PHY_LED_ON_LINK,
on);
if (ret)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ca2db4adc..1f950c824 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2006,7 +2006,7 @@ static int kszphy_probe(struct phy_device *phydev)
kszphy_parse_led_mode(phydev);
- clk = devm_clk_get(&phydev->mdio.dev, "rmii-ref");
+ clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, "rmii-ref");
/* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
if (!IS_ERR_OR_NULL(clk)) {
unsigned long rate = clk_get_rate(clk);
@@ -2026,6 +2026,11 @@ static int kszphy_probe(struct phy_device *phydev)
rate);
return -EINVAL;
}
+ } else if (!clk) {
+ /* unnamed clock from the generic ethernet-phy binding */
+ clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
}
if (ksz8041_fiber_mode(phydev))
@@ -2400,24 +2405,23 @@ static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
}
-static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct kszphy_ptp_priv *ptp_priv =
container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
struct phy_device *phydev = ptp_priv->phydev;
struct lan8814_shared_priv *shared = phydev->shared->priv;
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
- struct hwtstamp_config config;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
+ int tx_mod;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ ptp_priv->hwts_tx_type = config->tx_type;
+ ptp_priv->rx_filter = config->rx_filter;
- ptp_priv->hwts_tx_type = config.tx_type;
- ptp_priv->rx_filter = config.rx_filter;
-
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp_priv->layer = 0;
ptp_priv->version = 0;
@@ -2459,17 +2463,22 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
- if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
+ if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
+ lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
+ tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ } else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ }
- if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE)
lan8814_config_ts_intr(ptp_priv->phydev, true);
else
lan8814_config_ts_intr(ptp_priv->phydev, false);
mutex_lock(&shared->shared_lock);
- if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (config->rx_filter != HWTSTAMP_FILTER_NONE)
shared->ref++;
else
shared->ref--;
@@ -2493,7 +2502,7 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
lan8814_flush_fifo(ptp_priv->phydev, false);
lan8814_flush_fifo(ptp_priv->phydev, true);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
@@ -2519,7 +2528,7 @@ static void lan8814_txtstamp(struct mii_timestamper *mii_ts,
}
}
-static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
+static bool lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
u32 type;
@@ -2529,7 +2538,11 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
ptp_header = ptp_parse_header(skb, type);
skb_pull_inline(skb, ETH_HLEN);
+ if (!ptp_header)
+ return false;
+
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
+ return true;
}
static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
@@ -2541,7 +2554,8 @@ static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
bool ret = false;
u16 skb_sig;
- lan8814_get_sig_rx(skb, &skb_sig);
+ if (!lan8814_get_sig_rx(skb, &skb_sig))
+ return ret;
/* Iterate over all RX timestamps and match it with the received skbs */
spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
@@ -2821,7 +2835,7 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
return 0;
}
-static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
+static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
u32 type;
@@ -2829,7 +2843,11 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
type = ptp_classify_raw(skb);
ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
+ return true;
}
static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
@@ -2843,7 +2861,8 @@ static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
- lan8814_get_sig_tx(skb, &skb_sig);
+ if (!lan8814_get_sig_tx(skb, &skb_sig))
+ continue;
if (memcmp(&skb_sig, &seq_id, sizeof(seq_id)))
continue;
@@ -2897,7 +2916,8 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
spin_lock_irqsave(&ptp_priv->rx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->rx_queue, skb, skb_tmp) {
- lan8814_get_sig_rx(skb, &skb_sig);
+ if (!lan8814_get_sig_rx(skb, &skb_sig))
+ continue;
if (memcmp(&skb_sig, &rx_ts->seq_id, sizeof(rx_ts->seq_id)))
continue;
@@ -3718,21 +3738,19 @@ static void lan8841_ptp_enable_processing(struct kszphy_ptp_priv *ptp_priv,
#define LAN8841_PTP_TX_TIMESTAMP_EN 443
#define LAN8841_PTP_TX_MOD 445
-static int lan8841_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+static int lan8841_hwtstamp(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
struct phy_device *phydev = ptp_priv->phydev;
- struct hwtstamp_config config;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- ptp_priv->hwts_tx_type = config.tx_type;
- ptp_priv->rx_filter = config.rx_filter;
+ ptp_priv->hwts_tx_type = config->tx_type;
+ ptp_priv->rx_filter = config->rx_filter;
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp_priv->layer = 0;
ptp_priv->version = 0;
@@ -3786,13 +3804,13 @@ static int lan8841_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
/* Now enable/disable the timestamping */
lan8841_ptp_enable_processing(ptp_priv,
- config.rx_filter != HWTSTAMP_FILTER_NONE);
+ config->rx_filter != HWTSTAMP_FILTER_NONE);
skb_queue_purge(&ptp_priv->tx_queue);
lan8841_ptp_flush_fifo(ptp_priv);
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+ return 0;
}
static bool lan8841_rxtstamp(struct mii_timestamper *mii_ts,
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 7a962050a..6a3d8a754 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -416,6 +416,11 @@ struct vsc8531_private {
* gpio_lock: used for PHC operations. Common for all PHYs as the load/save GPIO
* is shared.
*/
+
+enum vsc85xx_global_phy {
+ VSC88XX_BASE_ADDR = 0,
+};
+
struct vsc85xx_shared_private {
struct mutex gpio_lock;
};
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 4171f01d3..6f74ce0ab 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -711,7 +711,7 @@ int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
dump_stack();
}
- return __phy_package_write(phydev, regnum, val);
+ return __phy_package_write(phydev, VSC88XX_BASE_ADDR, regnum, val);
}
/* phydev->bus->mdio_lock should be locked when using this function */
@@ -722,7 +722,7 @@ int phy_base_read(struct phy_device *phydev, u32 regnum)
dump_stack();
}
- return __phy_package_read(phydev, regnum);
+ return __phy_package_read(phydev, VSC88XX_BASE_ADDR, regnum);
}
u32 vsc85xx_csr_read(struct phy_device *phydev,
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index cf728bfd8..eb0b032cb 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1045,19 +1045,17 @@ static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
val);
}
-static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct vsc8531_private *vsc8531 =
container_of(mii_ts, struct vsc8531_private, mii_ts);
struct phy_device *phydev = vsc8531->ptp->phydev;
- struct hwtstamp_config cfg;
bool one_step = false;
u32 val;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- switch (cfg.tx_type) {
+ switch (cfg->tx_type) {
case HWTSTAMP_TX_ONESTEP_SYNC:
one_step = true;
break;
@@ -1069,9 +1067,9 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
return -ERANGE;
}
- vsc8531->ptp->tx_type = cfg.tx_type;
+ vsc8531->ptp->tx_type = cfg->tx_type;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -1084,7 +1082,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
return -ERANGE;
}
- vsc8531->ptp->rx_filter = cfg.rx_filter;
+ vsc8531->ptp->rx_filter = cfg->rx_filter;
mutex_lock(&vsc8531->ts_lock);
@@ -1132,7 +1130,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
vsc8531->ptp->configured = 1;
mutex_unlock(&vsc8531->ts_lock);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
diff --git a/drivers/net/phy/nxp-c45-tja11xx-macsec.c b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
new file mode 100644
index 000000000..550ef0897
--- /dev/null
+++ b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
@@ -0,0 +1,1729 @@
+// SPDX-License-Identifier: GPL-2.0
+/* NXP C45 PTP PHY driver interface
+ * Copyright 2023 NXP
+ * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/processor.h>
+#include <net/dst_metadata.h>
+#include <net/macsec.h>
+
+#include "nxp-c45-tja11xx.h"
+
+#define MACSEC_REG_SIZE 32
+#define TX_SC_MAX 4
+
+#define TX_SC_BIT(secy_id) BIT(MACSEC_REG_SIZE - (secy_id) - 1)
+
+#define VEND1_MACSEC_BASE 0x9000
+
+#define MACSEC_CFG 0x0000
+#define MACSEC_CFG_BYPASS BIT(1)
+#define MACSEC_CFG_S0I BIT(0)
+
+#define MACSEC_TPNET 0x0044
+#define PN_WRAP_THRESHOLD 0xffffffff
+
+#define MACSEC_RXSCA 0x0080
+#define MACSEC_RXSCKA 0x0084
+
+#define MACSEC_TXSCA 0x00C0
+#define MACSEC_TXSCKA 0x00C4
+
+#define MACSEC_RXSC_SCI_1H 0x0100
+
+#define MACSEC_RXSC_CFG 0x0128
+#define MACSEC_RXSC_CFG_XPN BIT(25)
+#define MACSEC_RXSC_CFG_AES_256 BIT(24)
+#define MACSEC_RXSC_CFG_SCI_EN BIT(11)
+#define MACSEC_RXSC_CFG_RP BIT(10)
+#define MACSEC_RXSC_CFG_VF_MASK GENMASK(9, 8)
+#define MACSEC_RXSC_CFG_VF_OFF 8
+
+#define MACSEC_RPW 0x012C
+
+#define MACSEC_RXSA_A_CS 0x0180
+#define MACSEC_RXSA_A_NPN 0x0184
+#define MACSEC_RXSA_A_XNPN 0x0188
+#define MACSEC_RXSA_A_LNPN 0x018C
+#define MACSEC_RXSA_A_LXNPN 0x0190
+
+#define MACSEC_RXSA_B_CS 0x01C0
+#define MACSEC_RXSA_B_NPN 0x01C4
+#define MACSEC_RXSA_B_XNPN 0x01C8
+#define MACSEC_RXSA_B_LNPN 0x01CC
+#define MACSEC_RXSA_B_LXNPN 0x01D0
+
+#define MACSEC_RXSA_CS_AN_OFF 1
+#define MACSEC_RXSA_CS_EN BIT(0)
+
+#define MACSEC_TXSC_SCI_1H 0x0200
+#define MACSEC_TXSC_CFG 0x0228
+#define MACSEC_TXSC_CFG_XPN BIT(25)
+#define MACSEC_TXSC_CFG_AES_256 BIT(24)
+#define MACSEC_TXSC_CFG_AN_MASK GENMASK(19, 18)
+#define MACSEC_TXSC_CFG_AN_OFF 18
+#define MACSEC_TXSC_CFG_ASA BIT(17)
+#define MACSEC_TXSC_CFG_SCE BIT(16)
+#define MACSEC_TXSC_CFG_ENCRYPT BIT(4)
+#define MACSEC_TXSC_CFG_PROTECT BIT(3)
+#define MACSEC_TXSC_CFG_SEND_SCI BIT(2)
+#define MACSEC_TXSC_CFG_END_STATION BIT(1)
+#define MACSEC_TXSC_CFG_SCB BIT(0)
+
+#define MACSEC_TXSA_A_CS 0x0280
+#define MACSEC_TXSA_A_NPN 0x0284
+#define MACSEC_TXSA_A_XNPN 0x0288
+
+#define MACSEC_TXSA_B_CS 0x02C0
+#define MACSEC_TXSA_B_NPN 0x02C4
+#define MACSEC_TXSA_B_XNPN 0x02C8
+
+#define MACSEC_SA_CS_A BIT(31)
+
+#define MACSEC_EVR 0x0400
+#define MACSEC_EVER 0x0404
+
+#define MACSEC_RXSA_A_KA 0x0700
+#define MACSEC_RXSA_A_SSCI 0x0720
+#define MACSEC_RXSA_A_SALT 0x0724
+
+#define MACSEC_RXSA_B_KA 0x0740
+#define MACSEC_RXSA_B_SSCI 0x0760
+#define MACSEC_RXSA_B_SALT 0x0764
+
+#define MACSEC_TXSA_A_KA 0x0780
+#define MACSEC_TXSA_A_SSCI 0x07A0
+#define MACSEC_TXSA_A_SALT 0x07A4
+
+#define MACSEC_TXSA_B_KA 0x07C0
+#define MACSEC_TXSA_B_SSCI 0x07E0
+#define MACSEC_TXSA_B_SALT 0x07E4
+
+#define MACSEC_UPFR0D2 0x0A08
+#define MACSEC_UPFR0M1 0x0A10
+#define MACSEC_OVP BIT(12)
+
+#define MACSEC_UPFR0M2 0x0A14
+#define ETYPE_MASK 0xffff
+
+#define MACSEC_UPFR0R 0x0A18
+#define MACSEC_UPFR_EN BIT(0)
+
+#define ADPTR_CNTRL 0x0F00
+#define ADPTR_CNTRL_CONFIG_EN BIT(14)
+#define ADPTR_CNTRL_ADPTR_EN BIT(12)
+#define ADPTR_TX_TAG_CNTRL 0x0F0C
+#define ADPTR_TX_TAG_CNTRL_ENA BIT(31)
+
+#define TX_SC_FLT_BASE 0x800
+#define TX_SC_FLT_SIZE 0x10
+#define TX_FLT_BASE(flt_id) (TX_SC_FLT_BASE + \
+ TX_SC_FLT_SIZE * (flt_id))
+
+#define TX_SC_FLT_OFF_MAC_DA_SA 0x04
+#define TX_SC_FLT_OFF_MAC_SA 0x08
+#define TX_SC_FLT_OFF_MAC_CFG 0x0C
+#define TX_SC_FLT_BY_SA BIT(14)
+#define TX_SC_FLT_EN BIT(8)
+
+#define TX_SC_FLT_MAC_DA_SA(base) ((base) + TX_SC_FLT_OFF_MAC_DA_SA)
+#define TX_SC_FLT_MAC_SA(base) ((base) + TX_SC_FLT_OFF_MAC_SA)
+#define TX_SC_FLT_MAC_CFG(base) ((base) + TX_SC_FLT_OFF_MAC_CFG)
+
+#define ADAPTER_EN BIT(6)
+#define MACSEC_EN BIT(5)
+
+#define MACSEC_INOV1HS 0x0140
+#define MACSEC_INOV2HS 0x0144
+#define MACSEC_INOD1HS 0x0148
+#define MACSEC_INOD2HS 0x014C
+#define MACSEC_RXSCIPUS 0x0150
+#define MACSEC_RXSCIPDS 0x0154
+#define MACSEC_RXSCIPLS 0x0158
+#define MACSEC_RXAN0INUSS 0x0160
+#define MACSEC_RXAN0IPUSS 0x0170
+#define MACSEC_RXSA_A_IPOS 0x0194
+#define MACSEC_RXSA_A_IPIS 0x01B0
+#define MACSEC_RXSA_A_IPNVS 0x01B4
+#define MACSEC_RXSA_B_IPOS 0x01D4
+#define MACSEC_RXSA_B_IPIS 0x01F0
+#define MACSEC_RXSA_B_IPNVS 0x01F4
+#define MACSEC_OPUS 0x021C
+#define MACSEC_OPTLS 0x022C
+#define MACSEC_OOP1HS 0x0240
+#define MACSEC_OOP2HS 0x0244
+#define MACSEC_OOE1HS 0x0248
+#define MACSEC_OOE2HS 0x024C
+#define MACSEC_TXSA_A_OPPS 0x028C
+#define MACSEC_TXSA_A_OPES 0x0290
+#define MACSEC_TXSA_B_OPPS 0x02CC
+#define MACSEC_TXSA_B_OPES 0x02D0
+#define MACSEC_INPWTS 0x0630
+#define MACSEC_INPBTS 0x0638
+#define MACSEC_IPSNFS 0x063C
+
+#define TJA11XX_TLV_TX_NEEDED_HEADROOM (32)
+#define TJA11XX_TLV_NEEDED_TAILROOM (0)
+
+#define ETH_P_TJA11XX_TLV (0x4e58)
+
+enum nxp_c45_sa_type {
+ TX_SA,
+ RX_SA,
+};
+
+struct nxp_c45_sa {
+ void *sa;
+ const struct nxp_c45_sa_regs *regs;
+ enum nxp_c45_sa_type type;
+ bool is_key_a;
+ u8 an;
+ struct list_head list;
+};
+
+struct nxp_c45_secy {
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ struct list_head sa_list;
+ int secy_id;
+ bool rx_sc0_impl;
+ struct list_head list;
+};
+
+struct nxp_c45_macsec {
+ struct list_head secy_list;
+ DECLARE_BITMAP(secy_bitmap, TX_SC_MAX);
+ DECLARE_BITMAP(tx_sc_bitmap, TX_SC_MAX);
+};
+
+struct nxp_c45_sa_regs {
+ u16 cs;
+ u16 npn;
+ u16 xnpn;
+ u16 lnpn;
+ u16 lxnpn;
+ u16 ka;
+ u16 ssci;
+ u16 salt;
+ u16 ipis;
+ u16 ipnvs;
+ u16 ipos;
+ u16 opps;
+ u16 opes;
+};
+
+static const struct nxp_c45_sa_regs rx_sa_a_regs = {
+ .cs = MACSEC_RXSA_A_CS,
+ .npn = MACSEC_RXSA_A_NPN,
+ .xnpn = MACSEC_RXSA_A_XNPN,
+ .lnpn = MACSEC_RXSA_A_LNPN,
+ .lxnpn = MACSEC_RXSA_A_LXNPN,
+ .ka = MACSEC_RXSA_A_KA,
+ .ssci = MACSEC_RXSA_A_SSCI,
+ .salt = MACSEC_RXSA_A_SALT,
+ .ipis = MACSEC_RXSA_A_IPIS,
+ .ipnvs = MACSEC_RXSA_A_IPNVS,
+ .ipos = MACSEC_RXSA_A_IPOS,
+};
+
+static const struct nxp_c45_sa_regs rx_sa_b_regs = {
+ .cs = MACSEC_RXSA_B_CS,
+ .npn = MACSEC_RXSA_B_NPN,
+ .xnpn = MACSEC_RXSA_B_XNPN,
+ .lnpn = MACSEC_RXSA_B_LNPN,
+ .lxnpn = MACSEC_RXSA_B_LXNPN,
+ .ka = MACSEC_RXSA_B_KA,
+ .ssci = MACSEC_RXSA_B_SSCI,
+ .salt = MACSEC_RXSA_B_SALT,
+ .ipis = MACSEC_RXSA_B_IPIS,
+ .ipnvs = MACSEC_RXSA_B_IPNVS,
+ .ipos = MACSEC_RXSA_B_IPOS,
+};
+
+static const struct nxp_c45_sa_regs tx_sa_a_regs = {
+ .cs = MACSEC_TXSA_A_CS,
+ .npn = MACSEC_TXSA_A_NPN,
+ .xnpn = MACSEC_TXSA_A_XNPN,
+ .ka = MACSEC_TXSA_A_KA,
+ .ssci = MACSEC_TXSA_A_SSCI,
+ .salt = MACSEC_TXSA_A_SALT,
+ .opps = MACSEC_TXSA_A_OPPS,
+ .opes = MACSEC_TXSA_A_OPES,
+};
+
+static const struct nxp_c45_sa_regs tx_sa_b_regs = {
+ .cs = MACSEC_TXSA_B_CS,
+ .npn = MACSEC_TXSA_B_NPN,
+ .xnpn = MACSEC_TXSA_B_XNPN,
+ .ka = MACSEC_TXSA_B_KA,
+ .ssci = MACSEC_TXSA_B_SSCI,
+ .salt = MACSEC_TXSA_B_SALT,
+ .opps = MACSEC_TXSA_B_OPPS,
+ .opes = MACSEC_TXSA_B_OPES,
+};
+
+static const
+struct nxp_c45_sa_regs *nxp_c45_sa_regs_get(enum nxp_c45_sa_type sa_type,
+ bool key_a)
+{
+ if (sa_type == RX_SA)
+ if (key_a)
+ return &rx_sa_a_regs;
+ else
+ return &rx_sa_b_regs;
+ else if (sa_type == TX_SA)
+ if (key_a)
+ return &tx_sa_a_regs;
+ else
+ return &tx_sa_b_regs;
+ else
+ return NULL;
+}
+
+static int nxp_c45_macsec_write(struct phy_device *phydev, u16 addr, u32 value)
+{
+ u32 lvalue = value;
+ u16 laddr;
+ int ret;
+
+ WARN_ON_ONCE(addr % 4);
+
+ phydev_dbg(phydev, "write addr 0x%x value 0x%x\n", addr, value);
+
+ laddr = VEND1_MACSEC_BASE + addr / 2;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, laddr, lvalue);
+ if (ret)
+ return ret;
+
+ laddr += 1;
+ lvalue >>= 16;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, laddr, lvalue);
+
+ return ret;
+}
+
+static int nxp_c45_macsec_read(struct phy_device *phydev, u16 addr, u32 *value)
+{
+ u32 lvalue;
+ u16 laddr;
+ int ret;
+
+ WARN_ON_ONCE(addr % 4);
+
+ laddr = VEND1_MACSEC_BASE + addr / 2;
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, laddr);
+ if (ret < 0)
+ return ret;
+
+ laddr += 1;
+ lvalue = (u32)ret & 0xffff;
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, laddr);
+ if (ret < 0)
+ return ret;
+
+ lvalue |= (u32)ret << 16;
+ *value = lvalue;
+
+ phydev_dbg(phydev, "read addr 0x%x value 0x%x\n", addr, *value);
+
+ return 0;
+}
+
+static void nxp_c45_macsec_read32_64(struct phy_device *phydev, u16 addr,
+ u64 *value)
+{
+ u32 lvalue;
+
+ nxp_c45_macsec_read(phydev, addr, &lvalue);
+ *value = lvalue;
+}
+
+static void nxp_c45_macsec_read64(struct phy_device *phydev, u16 addr,
+ u64 *value)
+{
+ u32 lvalue;
+
+ nxp_c45_macsec_read(phydev, addr, &lvalue);
+ *value = (u64)lvalue << 32;
+ nxp_c45_macsec_read(phydev, addr + 4, &lvalue);
+ *value |= lvalue;
+}
+
+static void nxp_c45_secy_irq_en(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy, bool en)
+{
+ u32 reg;
+
+ nxp_c45_macsec_read(phydev, MACSEC_EVER, &reg);
+ if (en)
+ reg |= TX_SC_BIT(phy_secy->secy_id);
+ else
+ reg &= ~TX_SC_BIT(phy_secy->secy_id);
+ nxp_c45_macsec_write(phydev, MACSEC_EVER, reg);
+}
+
+static struct nxp_c45_secy *nxp_c45_find_secy(struct list_head *secy_list,
+ sci_t sci)
+{
+ struct nxp_c45_secy *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, secy_list, list)
+ if (pos->secy->sci == sci)
+ return pos;
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct
+nxp_c45_secy *nxp_c45_find_secy_by_id(struct list_head *secy_list,
+ int id)
+{
+ struct nxp_c45_secy *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, secy_list, list)
+ if (pos->secy_id == id)
+ return pos;
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void nxp_c45_secy_free(struct nxp_c45_secy *phy_secy)
+{
+ list_del(&phy_secy->list);
+ kfree(phy_secy);
+}
+
+static struct nxp_c45_sa *nxp_c45_find_sa(struct list_head *sa_list,
+ enum nxp_c45_sa_type sa_type, u8 an)
+{
+ struct nxp_c45_sa *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, sa_list, list)
+ if (pos->an == an && pos->type == sa_type)
+ return pos;
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct nxp_c45_sa *nxp_c45_sa_alloc(struct list_head *sa_list, void *sa,
+ enum nxp_c45_sa_type sa_type, u8 an)
+{
+ struct nxp_c45_sa *first = NULL, *pos, *tmp;
+ int occurrences = 0;
+
+ list_for_each_entry_safe(pos, tmp, sa_list, list) {
+ if (pos->type != sa_type)
+ continue;
+
+ if (pos->an == an)
+ return ERR_PTR(-EINVAL);
+
+ first = pos;
+ occurrences++;
+ if (occurrences >= 2)
+ return ERR_PTR(-ENOSPC);
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ERR_PTR(-ENOMEM);
+
+ if (first)
+ tmp->is_key_a = !first->is_key_a;
+ else
+ tmp->is_key_a = true;
+
+ tmp->sa = sa;
+ tmp->type = sa_type;
+ tmp->an = an;
+ tmp->regs = nxp_c45_sa_regs_get(tmp->type, tmp->is_key_a);
+ list_add_tail(&tmp->list, sa_list);
+
+ return tmp;
+}
+
+static void nxp_c45_sa_free(struct nxp_c45_sa *sa)
+{
+ list_del(&sa->list);
+ kfree(sa);
+}
+
+static void nxp_c45_sa_list_free(struct list_head *sa_list)
+{
+ struct nxp_c45_sa *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, sa_list, list)
+ nxp_c45_sa_free(pos);
+}
+
+static void nxp_c45_sa_set_pn(struct phy_device *phydev,
+ struct nxp_c45_sa *sa, u64 pn,
+ u32 replay_window)
+{
+ const struct nxp_c45_sa_regs *sa_regs = sa->regs;
+ pn_t npn = {.full64 = pn};
+ pn_t lnpn;
+
+ nxp_c45_macsec_write(phydev, sa_regs->npn, npn.lower);
+ nxp_c45_macsec_write(phydev, sa_regs->xnpn, npn.upper);
+ if (sa->type != RX_SA)
+ return;
+
+ if (pn > replay_window)
+ lnpn.full64 = pn - replay_window;
+ else
+ lnpn.full64 = 1;
+
+ nxp_c45_macsec_write(phydev, sa_regs->lnpn, lnpn.lower);
+ nxp_c45_macsec_write(phydev, sa_regs->lxnpn, lnpn.upper);
+}
+
+static void nxp_c45_sa_set_key(struct macsec_context *ctx,
+ const struct nxp_c45_sa_regs *sa_regs,
+ u8 *salt, ssci_t ssci)
+{
+ struct phy_device *phydev = ctx->phydev;
+ u32 key_size = ctx->secy->key_len / 4;
+ u32 salt_size = MACSEC_SALT_LEN / 4;
+ u32 *key_u32 = (u32 *)ctx->sa.key;
+ u32 *salt_u32 = (u32 *)salt;
+ u32 reg, value;
+ int i;
+
+ for (i = 0; i < key_size; i++) {
+ reg = sa_regs->ka + i * 4;
+ value = (__force u32)cpu_to_be32(key_u32[i]);
+ nxp_c45_macsec_write(phydev, reg, value);
+ }
+
+ if (ctx->secy->xpn) {
+ for (i = 0; i < salt_size; i++) {
+ reg = sa_regs->salt + (2 - i) * 4;
+ value = (__force u32)cpu_to_be32(salt_u32[i]);
+ nxp_c45_macsec_write(phydev, reg, value);
+ }
+
+ value = (__force u32)cpu_to_be32((__force u32)ssci);
+ nxp_c45_macsec_write(phydev, sa_regs->ssci, value);
+ }
+
+ nxp_c45_macsec_write(phydev, sa_regs->cs, MACSEC_SA_CS_A);
+}
+
+static void nxp_c45_rx_sa_clear_stats(struct phy_device *phydev,
+ struct nxp_c45_sa *sa)
+{
+ nxp_c45_macsec_write(phydev, sa->regs->ipis, 0);
+ nxp_c45_macsec_write(phydev, sa->regs->ipnvs, 0);
+ nxp_c45_macsec_write(phydev, sa->regs->ipos, 0);
+
+ nxp_c45_macsec_write(phydev, MACSEC_RXAN0INUSS + sa->an * 4, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_RXAN0IPUSS + sa->an * 4, 0);
+}
+
+static void nxp_c45_rx_sa_read_stats(struct phy_device *phydev,
+ struct nxp_c45_sa *sa,
+ struct macsec_rx_sa_stats *stats)
+{
+ nxp_c45_macsec_read(phydev, sa->regs->ipis, &stats->InPktsInvalid);
+ nxp_c45_macsec_read(phydev, sa->regs->ipnvs, &stats->InPktsNotValid);
+ nxp_c45_macsec_read(phydev, sa->regs->ipos, &stats->InPktsOK);
+}
+
+static void nxp_c45_tx_sa_clear_stats(struct phy_device *phydev,
+ struct nxp_c45_sa *sa)
+{
+ nxp_c45_macsec_write(phydev, sa->regs->opps, 0);
+ nxp_c45_macsec_write(phydev, sa->regs->opes, 0);
+}
+
+static void nxp_c45_tx_sa_read_stats(struct phy_device *phydev,
+ struct nxp_c45_sa *sa,
+ struct macsec_tx_sa_stats *stats)
+{
+ nxp_c45_macsec_read(phydev, sa->regs->opps, &stats->OutPktsProtected);
+ nxp_c45_macsec_read(phydev, sa->regs->opes, &stats->OutPktsEncrypted);
+}
+
+static void nxp_c45_rx_sa_update(struct phy_device *phydev,
+ struct nxp_c45_sa *sa, bool en)
+{
+ const struct nxp_c45_sa_regs *sa_regs = sa->regs;
+ u32 cfg;
+
+ cfg = sa->an << MACSEC_RXSA_CS_AN_OFF;
+ cfg |= en ? MACSEC_RXSA_CS_EN : 0;
+ nxp_c45_macsec_write(phydev, sa_regs->cs, cfg);
+}
+
+static void nxp_c45_tx_sa_update(struct phy_device *phydev,
+ struct nxp_c45_sa *sa, bool en)
+{
+ u32 cfg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
+
+ cfg &= ~MACSEC_TXSC_CFG_AN_MASK;
+ cfg |= sa->an << MACSEC_TXSC_CFG_AN_OFF;
+
+ if (sa->is_key_a)
+ cfg &= ~MACSEC_TXSC_CFG_ASA;
+ else
+ cfg |= MACSEC_TXSC_CFG_ASA;
+
+ if (en)
+ cfg |= MACSEC_TXSC_CFG_SCE;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_SCE;
+
+ nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
+}
+
+static void nxp_c45_set_sci(struct phy_device *phydev, u16 sci_base_addr,
+ sci_t sci)
+{
+ u64 lsci = sci_to_cpu(sci);
+
+ nxp_c45_macsec_write(phydev, sci_base_addr, lsci >> 32);
+ nxp_c45_macsec_write(phydev, sci_base_addr + 4, lsci);
+}
+
+static bool nxp_c45_port_is_1(sci_t sci)
+{
+ u16 port = sci_to_cpu(sci);
+
+ return port == 1;
+}
+
+static void nxp_c45_select_secy(struct phy_device *phydev, u8 id)
+{
+ nxp_c45_macsec_write(phydev, MACSEC_RXSCA, id);
+ nxp_c45_macsec_write(phydev, MACSEC_RXSCKA, id);
+ nxp_c45_macsec_write(phydev, MACSEC_TXSCA, id);
+ nxp_c45_macsec_write(phydev, MACSEC_TXSCKA, id);
+}
+
+static bool nxp_c45_secy_valid(struct nxp_c45_secy *phy_secy,
+ bool can_rx_sc0_impl)
+{
+ bool end_station = phy_secy->secy->tx_sc.end_station;
+ bool scb = phy_secy->secy->tx_sc.scb;
+
+ phy_secy->rx_sc0_impl = false;
+
+ if (end_station) {
+ if (!nxp_c45_port_is_1(phy_secy->secy->sci))
+ return false;
+ if (!phy_secy->rx_sc)
+ return true;
+ return nxp_c45_port_is_1(phy_secy->rx_sc->sci);
+ }
+
+ if (scb)
+ return false;
+
+ if (!can_rx_sc0_impl)
+ return false;
+
+ if (phy_secy->secy_id != 0)
+ return false;
+
+ phy_secy->rx_sc0_impl = true;
+
+ return true;
+}
+
+static bool nxp_c45_rx_sc0_impl(struct nxp_c45_secy *phy_secy)
+{
+ bool end_station = phy_secy->secy->tx_sc.end_station;
+ bool send_sci = phy_secy->secy->tx_sc.send_sci;
+ bool scb = phy_secy->secy->tx_sc.scb;
+
+ return !end_station && !send_sci && !scb;
+}
+
+static bool nxp_c45_mac_addr_free(struct macsec_context *ctx)
+{
+ struct nxp_c45_phy *priv = ctx->phydev->priv;
+ struct nxp_c45_secy *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &priv->macsec->secy_list, list) {
+ if (pos->secy == ctx->secy)
+ continue;
+
+ if (memcmp(pos->secy->netdev->dev_addr,
+ ctx->secy->netdev->dev_addr, ETH_ALEN) == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void nxp_c45_tx_sc_en_flt(struct phy_device *phydev, int secy_id,
+ bool en)
+{
+ u32 tx_flt_base = TX_FLT_BASE(secy_id);
+ u32 reg = 0;
+
+ nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), &reg);
+ if (en)
+ reg |= TX_SC_FLT_EN;
+ else
+ reg &= ~TX_SC_FLT_EN;
+ nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg);
+}
+
+static void nxp_c45_tx_sc_set_flt(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ const u8 *dev_addr = phy_secy->secy->netdev->dev_addr;
+ u32 tx_flt_base = TX_FLT_BASE(phy_secy->secy_id);
+ u32 reg;
+
+ reg = dev_addr[0] << 8 | dev_addr[1];
+ nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_DA_SA(tx_flt_base), reg);
+ reg = dev_addr[5] | dev_addr[4] << 8 | dev_addr[3] << 16 |
+ dev_addr[2] << 24;
+
+ nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_SA(tx_flt_base), reg);
+ nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), &reg);
+ reg &= TX_SC_FLT_EN;
+ reg |= TX_SC_FLT_BY_SA | phy_secy->secy_id;
+ nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg);
+}
+
+static void nxp_c45_tx_sc_update(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ u32 cfg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
+
+ phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off");
+ if (phy_secy->secy->xpn)
+ cfg |= MACSEC_TXSC_CFG_XPN;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_XPN;
+
+ phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len);
+ if (phy_secy->secy->key_len == 32)
+ cfg |= MACSEC_TXSC_CFG_AES_256;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_AES_256;
+
+ phydev_dbg(phydev, "encryption %s\n",
+ phy_secy->secy->tx_sc.encrypt ? "on" : "off");
+ if (phy_secy->secy->tx_sc.encrypt)
+ cfg |= MACSEC_TXSC_CFG_ENCRYPT;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_ENCRYPT;
+
+ phydev_dbg(phydev, "protect frames %s\n",
+ phy_secy->secy->protect_frames ? "on" : "off");
+ if (phy_secy->secy->protect_frames)
+ cfg |= MACSEC_TXSC_CFG_PROTECT;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_PROTECT;
+
+ phydev_dbg(phydev, "send sci %s\n",
+ phy_secy->secy->tx_sc.send_sci ? "on" : "off");
+ if (phy_secy->secy->tx_sc.send_sci)
+ cfg |= MACSEC_TXSC_CFG_SEND_SCI;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_SEND_SCI;
+
+ phydev_dbg(phydev, "end station %s\n",
+ phy_secy->secy->tx_sc.end_station ? "on" : "off");
+ if (phy_secy->secy->tx_sc.end_station)
+ cfg |= MACSEC_TXSC_CFG_END_STATION;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_END_STATION;
+
+ phydev_dbg(phydev, "scb %s\n",
+ phy_secy->secy->tx_sc.scb ? "on" : "off");
+ if (phy_secy->secy->tx_sc.scb)
+ cfg |= MACSEC_TXSC_CFG_SCB;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_SCB;
+
+ nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
+}
+
+static void nxp_c45_tx_sc_clear_stats(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ struct nxp_c45_sa *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list)
+ if (pos->type == TX_SA)
+ nxp_c45_tx_sa_clear_stats(phydev, pos);
+
+ nxp_c45_macsec_write(phydev, MACSEC_OPUS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_OPTLS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_OOP1HS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_OOP2HS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_OOE1HS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_OOE2HS, 0);
+}
+
+static void nxp_c45_set_rx_sc0_impl(struct phy_device *phydev,
+ bool enable)
+{
+ u32 reg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_CFG, &reg);
+ if (enable)
+ reg |= MACSEC_CFG_S0I;
+ else
+ reg &= ~MACSEC_CFG_S0I;
+ nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
+}
+
+static bool nxp_c45_is_rx_sc0_impl(struct list_head *secy_list)
+{
+ struct nxp_c45_secy *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, secy_list, list)
+ if (pos->rx_sc0_impl)
+ return pos->rx_sc0_impl;
+
+ return false;
+}
+
+static void nxp_c45_rx_sc_en(struct phy_device *phydev,
+ struct macsec_rx_sc *rx_sc, bool en)
+{
+ u32 reg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, &reg);
+ if (rx_sc->active && en)
+ reg |= MACSEC_RXSC_CFG_SCI_EN;
+ else
+ reg &= ~MACSEC_RXSC_CFG_SCI_EN;
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, reg);
+}
+
+static void nxp_c45_rx_sc_update(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ struct macsec_rx_sc *rx_sc = phy_secy->rx_sc;
+ struct nxp_c45_phy *priv = phydev->priv;
+ u32 cfg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, &cfg);
+ cfg &= ~MACSEC_RXSC_CFG_VF_MASK;
+ cfg = phy_secy->secy->validate_frames << MACSEC_RXSC_CFG_VF_OFF;
+
+ phydev_dbg(phydev, "validate frames %u\n",
+ phy_secy->secy->validate_frames);
+ phydev_dbg(phydev, "replay_protect %s window %u\n",
+ phy_secy->secy->replay_protect ? "on" : "off",
+ phy_secy->secy->replay_window);
+ if (phy_secy->secy->replay_protect) {
+ cfg |= MACSEC_RXSC_CFG_RP;
+ nxp_c45_macsec_write(phydev, MACSEC_RPW,
+ phy_secy->secy->replay_window);
+ } else {
+ cfg &= ~MACSEC_RXSC_CFG_RP;
+ }
+
+ phydev_dbg(phydev, "rx_sc->active %s\n",
+ rx_sc->active ? "on" : "off");
+ if (rx_sc->active &&
+ test_bit(phy_secy->secy_id, priv->macsec->secy_bitmap))
+ cfg |= MACSEC_RXSC_CFG_SCI_EN;
+ else
+ cfg &= ~MACSEC_RXSC_CFG_SCI_EN;
+
+ phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len);
+ if (phy_secy->secy->key_len == 32)
+ cfg |= MACSEC_RXSC_CFG_AES_256;
+ else
+ cfg &= ~MACSEC_RXSC_CFG_AES_256;
+
+ phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off");
+ if (phy_secy->secy->xpn)
+ cfg |= MACSEC_RXSC_CFG_XPN;
+ else
+ cfg &= ~MACSEC_RXSC_CFG_XPN;
+
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, cfg);
+}
+
+static void nxp_c45_rx_sc_clear_stats(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ struct nxp_c45_sa *pos, *tmp;
+ int i;
+
+ list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list)
+ if (pos->type == RX_SA)
+ nxp_c45_rx_sa_clear_stats(phydev, pos);
+
+ nxp_c45_macsec_write(phydev, MACSEC_INOD1HS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_INOD2HS, 0);
+
+ nxp_c45_macsec_write(phydev, MACSEC_INOV1HS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_INOV2HS, 0);
+
+ nxp_c45_macsec_write(phydev, MACSEC_RXSCIPDS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_RXSCIPLS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_RXSCIPUS, 0);
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ nxp_c45_macsec_write(phydev, MACSEC_RXAN0INUSS + i * 4, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_RXAN0IPUSS + i * 4, 0);
+ }
+}
+
+static void nxp_c45_rx_sc_del(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ struct nxp_c45_sa *pos, *tmp;
+
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_RPW, 0);
+ nxp_c45_set_sci(phydev, MACSEC_RXSC_SCI_1H, 0);
+
+ nxp_c45_rx_sc_clear_stats(phydev, phy_secy);
+
+ list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
+ if (pos->type == RX_SA) {
+ nxp_c45_rx_sa_update(phydev, pos, false);
+ nxp_c45_sa_free(pos);
+ }
+ }
+}
+
+static void nxp_c45_clear_global_stats(struct phy_device *phydev)
+{
+ nxp_c45_macsec_write(phydev, MACSEC_INPBTS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_INPWTS, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_IPSNFS, 0);
+}
+
+static void nxp_c45_macsec_en(struct phy_device *phydev, bool en)
+{
+ u32 reg;
+
+ nxp_c45_macsec_read(phydev, MACSEC_CFG, &reg);
+ if (en)
+ reg |= MACSEC_CFG_BYPASS;
+ else
+ reg &= ~MACSEC_CFG_BYPASS;
+ nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
+}
+
+static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ int any_bit_set;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, true);
+ nxp_c45_set_rx_sc0_impl(phydev, phy_secy->rx_sc0_impl);
+ if (phy_secy->rx_sc)
+ nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, true);
+
+ any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
+ if (any_bit_set == TX_SC_MAX)
+ nxp_c45_macsec_en(phydev, true);
+
+ set_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ int any_bit_set;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, false);
+ if (phy_secy->rx_sc)
+ nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, false);
+ nxp_c45_set_rx_sc0_impl(phydev, false);
+
+ clear_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
+ any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
+ if (any_bit_set == TX_SC_MAX)
+ nxp_c45_macsec_en(phydev, false);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ bool can_rx_sc0_impl;
+ int idx;
+
+ phydev_dbg(phydev, "add SecY SCI %016llx\n",
+ sci_to_cpu(ctx->secy->sci));
+
+ if (!nxp_c45_mac_addr_free(ctx))
+ return -EBUSY;
+
+ if (nxp_c45_is_rx_sc0_impl(&priv->macsec->secy_list))
+ return -EBUSY;
+
+ idx = find_first_zero_bit(priv->macsec->tx_sc_bitmap, TX_SC_MAX);
+ if (idx == TX_SC_MAX)
+ return -ENOSPC;
+
+ phy_secy = kzalloc(sizeof(*phy_secy), GFP_KERNEL);
+ if (!phy_secy)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&phy_secy->sa_list);
+ phy_secy->secy = ctx->secy;
+ phy_secy->secy_id = idx;
+
+ /* If the point to point mode should be enabled, we should have no
+ * SecY added yet.
+ */
+ can_rx_sc0_impl = list_count_nodes(&priv->macsec->secy_list) == 0;
+ if (!nxp_c45_secy_valid(phy_secy, can_rx_sc0_impl)) {
+ kfree(phy_secy);
+ return -EINVAL;
+ }
+
+ phy_secy->rx_sc0_impl = nxp_c45_rx_sc0_impl(phy_secy);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_set_sci(phydev, MACSEC_TXSC_SCI_1H, ctx->secy->sci);
+ nxp_c45_tx_sc_set_flt(phydev, phy_secy);
+ nxp_c45_tx_sc_update(phydev, phy_secy);
+ if (phy_interrupt_is_valid(phydev))
+ nxp_c45_secy_irq_en(phydev, phy_secy, true);
+
+ set_bit(idx, priv->macsec->tx_sc_bitmap);
+ list_add_tail(&phy_secy->list, &priv->macsec->secy_list);
+
+ return 0;
+}
+
+static void nxp_c45_tx_sa_next(struct nxp_c45_secy *phy_secy,
+ struct nxp_c45_sa *next_sa, u8 encoding_sa)
+{
+ struct nxp_c45_sa *sa;
+
+ sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, encoding_sa);
+ if (!IS_ERR(sa)) {
+ memcpy(next_sa, sa, sizeof(*sa));
+ } else {
+ next_sa->is_key_a = true;
+ next_sa->an = encoding_sa;
+ }
+}
+
+static int nxp_c45_mdo_upd_secy(struct macsec_context *ctx)
+{
+ u8 encoding_sa = ctx->secy->tx_sc.encoding_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_sa next_sa;
+ bool can_rx_sc0_impl;
+
+ phydev_dbg(phydev, "update SecY SCI %016llx\n",
+ sci_to_cpu(ctx->secy->sci));
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ if (!nxp_c45_mac_addr_free(ctx))
+ return -EBUSY;
+
+ /* If the point to point mode should be enabled, we should have only
+ * one SecY added, respectively the updated one.
+ */
+ can_rx_sc0_impl = list_count_nodes(&priv->macsec->secy_list) == 1;
+ if (!nxp_c45_secy_valid(phy_secy, can_rx_sc0_impl))
+ return -EINVAL;
+ phy_secy->rx_sc0_impl = nxp_c45_rx_sc0_impl(phy_secy);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_tx_sc_set_flt(phydev, phy_secy);
+ nxp_c45_tx_sc_update(phydev, phy_secy);
+ nxp_c45_tx_sa_next(phy_secy, &next_sa, encoding_sa);
+ nxp_c45_tx_sa_update(phydev, &next_sa, ctx->secy->operational);
+
+ nxp_c45_set_rx_sc0_impl(phydev, phy_secy->rx_sc0_impl);
+ if (phy_secy->rx_sc)
+ nxp_c45_rx_sc_update(phydev, phy_secy);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_del_secy(struct macsec_context *ctx)
+{
+ u8 encoding_sa = ctx->secy->tx_sc.encoding_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_sa next_sa;
+
+ phydev_dbg(phydev, "delete SecY SCI %016llx\n",
+ sci_to_cpu(ctx->secy->sci));
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_mdo_dev_stop(ctx);
+ nxp_c45_tx_sa_next(phy_secy, &next_sa, encoding_sa);
+ nxp_c45_tx_sa_update(phydev, &next_sa, false);
+ nxp_c45_tx_sc_clear_stats(phydev, phy_secy);
+ if (phy_secy->rx_sc)
+ nxp_c45_rx_sc_del(phydev, phy_secy);
+
+ nxp_c45_sa_list_free(&phy_secy->sa_list);
+ if (phy_interrupt_is_valid(phydev))
+ nxp_c45_secy_irq_en(phydev, phy_secy, false);
+
+ clear_bit(phy_secy->secy_id, priv->macsec->tx_sc_bitmap);
+ nxp_c45_secy_free(phy_secy);
+
+ if (list_empty(&priv->macsec->secy_list))
+ nxp_c45_clear_global_stats(phydev);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+
+ phydev_dbg(phydev, "add RX SC SCI %016llx %s\n",
+ sci_to_cpu(ctx->rx_sc->sci),
+ ctx->rx_sc->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ if (phy_secy->rx_sc)
+ return -ENOSPC;
+
+ if (phy_secy->secy->tx_sc.end_station &&
+ !nxp_c45_port_is_1(ctx->rx_sc->sci))
+ return -EINVAL;
+
+ phy_secy->rx_sc = ctx->rx_sc;
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_set_sci(phydev, MACSEC_RXSC_SCI_1H, ctx->rx_sc->sci);
+ nxp_c45_rx_sc_update(phydev, phy_secy);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+
+ phydev_dbg(phydev, "update RX SC SCI %016llx %s\n",
+ sci_to_cpu(ctx->rx_sc->sci),
+ ctx->rx_sc->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_rx_sc_update(phydev, phy_secy);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+
+ phydev_dbg(phydev, "delete RX SC SCI %016llx %s\n",
+ sci_to_cpu(ctx->rx_sc->sci),
+ ctx->rx_sc->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_rx_sc_del(phydev, phy_secy);
+ phy_secy->rx_sc = NULL;
+
+ return 0;
+}
+
+static int nxp_c45_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ u8 an = ctx->sa.assoc_num;
+ struct nxp_c45_sa *sa;
+
+ phydev_dbg(phydev, "add RX SA %u %s to RX SC SCI %016llx\n",
+ an, rx_sa->active ? "enabled" : "disabled",
+ sci_to_cpu(rx_sa->sc->sci));
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ sa = nxp_c45_sa_alloc(&phy_secy->sa_list, rx_sa, RX_SA, an);
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_sa_set_pn(phydev, sa, rx_sa->next_pn,
+ ctx->secy->replay_window);
+ nxp_c45_sa_set_key(ctx, sa->regs, rx_sa->key.salt.bytes, rx_sa->ssci);
+ nxp_c45_rx_sa_update(phydev, sa, rx_sa->active);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ u8 an = ctx->sa.assoc_num;
+ struct nxp_c45_sa *sa;
+
+ phydev_dbg(phydev, "update RX SA %u %s to RX SC SCI %016llx\n",
+ an, rx_sa->active ? "enabled" : "disabled",
+ sci_to_cpu(rx_sa->sc->sci));
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ if (ctx->sa.update_pn)
+ nxp_c45_sa_set_pn(phydev, sa, rx_sa->next_pn,
+ ctx->secy->replay_window);
+ nxp_c45_rx_sa_update(phydev, sa, rx_sa->active);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ u8 an = ctx->sa.assoc_num;
+ struct nxp_c45_sa *sa;
+
+ phydev_dbg(phydev, "delete RX SA %u %s to RX SC SCI %016llx\n",
+ an, rx_sa->active ? "enabled" : "disabled",
+ sci_to_cpu(rx_sa->sc->sci));
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_rx_sa_update(phydev, sa, false);
+ nxp_c45_rx_sa_clear_stats(phydev, sa);
+
+ nxp_c45_sa_free(sa);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct macsec_tx_sa *tx_sa = ctx->sa.tx_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ u8 an = ctx->sa.assoc_num;
+ struct nxp_c45_sa *sa;
+
+ phydev_dbg(phydev, "add TX SA %u %s to TX SC %016llx\n",
+ an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
+ sci_to_cpu(ctx->secy->sci));
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ sa = nxp_c45_sa_alloc(&phy_secy->sa_list, tx_sa, TX_SA, an);
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_sa_set_pn(phydev, sa, tx_sa->next_pn, 0);
+ nxp_c45_sa_set_key(ctx, sa->regs, tx_sa->key.salt.bytes, tx_sa->ssci);
+ if (ctx->secy->tx_sc.encoding_sa == sa->an)
+ nxp_c45_tx_sa_update(phydev, sa, tx_sa->active);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct macsec_tx_sa *tx_sa = ctx->sa.tx_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ u8 an = ctx->sa.assoc_num;
+ struct nxp_c45_sa *sa;
+
+ phydev_dbg(phydev, "update TX SA %u %s to TX SC %016llx\n",
+ an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
+ sci_to_cpu(ctx->secy->sci));
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ if (ctx->sa.update_pn)
+ nxp_c45_sa_set_pn(phydev, sa, tx_sa->next_pn, 0);
+ if (ctx->secy->tx_sc.encoding_sa == sa->an)
+ nxp_c45_tx_sa_update(phydev, sa, tx_sa->active);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ u8 an = ctx->sa.assoc_num;
+ struct nxp_c45_sa *sa;
+
+ phydev_dbg(phydev, "delete TX SA %u %s to TX SC %016llx\n",
+ an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
+ sci_to_cpu(ctx->secy->sci));
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ if (ctx->secy->tx_sc.encoding_sa == sa->an)
+ nxp_c45_tx_sa_update(phydev, sa, false);
+ nxp_c45_tx_sa_clear_stats(phydev, sa);
+
+ nxp_c45_sa_free(sa);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct macsec_dev_stats *dev_stats;
+ struct nxp_c45_secy *phy_secy;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ dev_stats = ctx->stats.dev_stats;
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_macsec_read32_64(phydev, MACSEC_OPUS,
+ &dev_stats->OutPktsUntagged);
+ nxp_c45_macsec_read32_64(phydev, MACSEC_OPTLS,
+ &dev_stats->OutPktsTooLong);
+ nxp_c45_macsec_read32_64(phydev, MACSEC_INPBTS,
+ &dev_stats->InPktsBadTag);
+
+ if (phy_secy->secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ nxp_c45_macsec_read32_64(phydev, MACSEC_INPWTS,
+ &dev_stats->InPktsNoTag);
+ else
+ nxp_c45_macsec_read32_64(phydev, MACSEC_INPWTS,
+ &dev_stats->InPktsUntagged);
+
+ if (phy_secy->secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ nxp_c45_macsec_read32_64(phydev, MACSEC_IPSNFS,
+ &dev_stats->InPktsNoSCI);
+ else
+ nxp_c45_macsec_read32_64(phydev, MACSEC_IPSNFS,
+ &dev_stats->InPktsUnknownSCI);
+
+ /* Always 0. */
+ dev_stats->InPktsOverrun = 0;
+
+ return 0;
+}
+
+static int nxp_c45_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct macsec_tx_sa_stats tx_sa_stats;
+ struct macsec_tx_sc_stats *stats;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_sa *pos, *tmp;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ stats = ctx->stats.tx_sc_stats;
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_macsec_read64(phydev, MACSEC_OOE1HS,
+ &stats->OutOctetsEncrypted);
+ nxp_c45_macsec_read64(phydev, MACSEC_OOP1HS,
+ &stats->OutOctetsProtected);
+ list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
+ if (pos->type != TX_SA)
+ continue;
+
+ memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
+ nxp_c45_tx_sa_read_stats(phydev, pos, &tx_sa_stats);
+
+ stats->OutPktsEncrypted += tx_sa_stats.OutPktsEncrypted;
+ stats->OutPktsProtected += tx_sa_stats.OutPktsProtected;
+ }
+
+ return 0;
+}
+
+static int nxp_c45_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct macsec_tx_sa_stats *stats;
+ struct nxp_c45_secy *phy_secy;
+ u8 an = ctx->sa.assoc_num;
+ struct nxp_c45_sa *sa;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ stats = ctx->stats.tx_sa_stats;
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_tx_sa_read_stats(phydev, sa, stats);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct macsec_rx_sa_stats rx_sa_stats;
+ struct macsec_rx_sc_stats *stats;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_sa *pos, *tmp;
+ u32 reg = 0;
+ int i;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ if (phy_secy->rx_sc != ctx->rx_sc)
+ return -EINVAL;
+
+ stats = ctx->stats.rx_sc_stats;
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
+ if (pos->type != RX_SA)
+ continue;
+
+ memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
+ nxp_c45_rx_sa_read_stats(phydev, pos, &rx_sa_stats);
+
+ stats->InPktsInvalid += rx_sa_stats.InPktsInvalid;
+ stats->InPktsNotValid += rx_sa_stats.InPktsNotValid;
+ stats->InPktsOK += rx_sa_stats.InPktsOK;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ nxp_c45_macsec_read(phydev, MACSEC_RXAN0INUSS + i * 4, &reg);
+ stats->InPktsNotUsingSA += reg;
+ nxp_c45_macsec_read(phydev, MACSEC_RXAN0IPUSS + i * 4, &reg);
+ stats->InPktsUnusedSA += reg;
+ }
+
+ nxp_c45_macsec_read64(phydev, MACSEC_INOD1HS,
+ &stats->InOctetsDecrypted);
+ nxp_c45_macsec_read64(phydev, MACSEC_INOV1HS,
+ &stats->InOctetsValidated);
+
+ nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPDS,
+ &stats->InPktsDelayed);
+ nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPLS,
+ &stats->InPktsLate);
+ nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPUS,
+ &stats->InPktsUnchecked);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct macsec_rx_sa_stats *stats;
+ struct nxp_c45_secy *phy_secy;
+ u8 an = ctx->sa.assoc_num;
+ struct nxp_c45_sa *sa;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ stats = ctx->stats.rx_sa_stats;
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_rx_sa_read_stats(phydev, sa, stats);
+ nxp_c45_macsec_read(phydev, MACSEC_RXAN0INUSS + an * 4,
+ &stats->InPktsNotUsingSA);
+ nxp_c45_macsec_read(phydev, MACSEC_RXAN0IPUSS + an * 4,
+ &stats->InPktsUnusedSA);
+
+ return 0;
+}
+
+struct tja11xx_tlv_header {
+ struct ethhdr eth;
+ u8 subtype;
+ u8 len;
+ u8 payload[28];
+};
+
+static int nxp_c45_mdo_insert_tx_tag(struct phy_device *phydev,
+ struct sk_buff *skb)
+{
+ struct tja11xx_tlv_header *tlv;
+ struct ethhdr *eth;
+
+ eth = eth_hdr(skb);
+ tlv = skb_push(skb, TJA11XX_TLV_TX_NEEDED_HEADROOM);
+ memmove(tlv, eth, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ tlv->eth.h_proto = htons(ETH_P_TJA11XX_TLV);
+ tlv->subtype = 1;
+ tlv->len = sizeof(tlv->payload);
+ memset(tlv->payload, 0, sizeof(tlv->payload));
+
+ return 0;
+}
+
+static const struct macsec_ops nxp_c45_macsec_ops = {
+ .mdo_dev_open = nxp_c45_mdo_dev_open,
+ .mdo_dev_stop = nxp_c45_mdo_dev_stop,
+ .mdo_add_secy = nxp_c45_mdo_add_secy,
+ .mdo_upd_secy = nxp_c45_mdo_upd_secy,
+ .mdo_del_secy = nxp_c45_mdo_del_secy,
+ .mdo_add_rxsc = nxp_c45_mdo_add_rxsc,
+ .mdo_upd_rxsc = nxp_c45_mdo_upd_rxsc,
+ .mdo_del_rxsc = nxp_c45_mdo_del_rxsc,
+ .mdo_add_rxsa = nxp_c45_mdo_add_rxsa,
+ .mdo_upd_rxsa = nxp_c45_mdo_upd_rxsa,
+ .mdo_del_rxsa = nxp_c45_mdo_del_rxsa,
+ .mdo_add_txsa = nxp_c45_mdo_add_txsa,
+ .mdo_upd_txsa = nxp_c45_mdo_upd_txsa,
+ .mdo_del_txsa = nxp_c45_mdo_del_txsa,
+ .mdo_get_dev_stats = nxp_c45_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = nxp_c45_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = nxp_c45_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = nxp_c45_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = nxp_c45_mdo_get_rx_sa_stats,
+ .mdo_insert_tx_tag = nxp_c45_mdo_insert_tx_tag,
+ .needed_headroom = TJA11XX_TLV_TX_NEEDED_HEADROOM,
+ .needed_tailroom = TJA11XX_TLV_NEEDED_TAILROOM,
+};
+
+int nxp_c45_macsec_config_init(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+ int ret;
+
+ if (!priv->macsec)
+ return 0;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
+ MACSEC_EN | ADAPTER_EN);
+ if (ret)
+ return ret;
+
+ ret = nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_CONFIG_EN |
+ ADPTR_CNTRL_ADPTR_EN);
+ if (ret)
+ return ret;
+
+ ret = nxp_c45_macsec_write(phydev, ADPTR_TX_TAG_CNTRL,
+ ADPTR_TX_TAG_CNTRL_ENA);
+ if (ret)
+ return ret;
+
+ ret = nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_ADPTR_EN);
+ if (ret)
+ return ret;
+
+ ret = nxp_c45_macsec_write(phydev, MACSEC_TPNET, PN_WRAP_THRESHOLD);
+ if (ret)
+ return ret;
+
+ /* Set MKA filter. */
+ ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0D2, ETH_P_PAE);
+ if (ret)
+ return ret;
+
+ ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0M1, MACSEC_OVP);
+ if (ret)
+ return ret;
+
+ ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0M2, ETYPE_MASK);
+ if (ret)
+ return ret;
+
+ ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0R, MACSEC_UPFR_EN);
+
+ return ret;
+}
+
+int nxp_c45_macsec_probe(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+
+ priv->macsec = devm_kzalloc(dev, sizeof(*priv->macsec), GFP_KERNEL);
+ if (!priv->macsec)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&priv->macsec->secy_list);
+ phydev->macsec_ops = &nxp_c45_macsec_ops;
+
+ return 0;
+}
+
+void nxp_c45_macsec_remove(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *secy_p, *secy_t;
+ struct nxp_c45_sa *sa_p, *sa_t;
+ struct list_head *secy_list;
+
+ if (!priv->macsec)
+ return;
+
+ secy_list = &priv->macsec->secy_list;
+ nxp_c45_macsec_en(phydev, false);
+
+ list_for_each_entry_safe(secy_p, secy_t, secy_list, list) {
+ list_for_each_entry_safe(sa_p, sa_t, &secy_p->sa_list, list)
+ nxp_c45_sa_free(sa_p);
+ nxp_c45_secy_free(secy_p);
+ }
+}
+
+void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
+ irqreturn_t *ret)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *secy;
+ struct nxp_c45_sa *sa;
+ u8 encoding_sa;
+ int secy_id;
+ u32 reg = 0;
+
+ if (!priv->macsec)
+ return;
+
+ do {
+ nxp_c45_macsec_read(phydev, MACSEC_EVR, &reg);
+ if (!reg)
+ return;
+
+ secy_id = MACSEC_REG_SIZE - ffs(reg);
+ secy = nxp_c45_find_secy_by_id(&priv->macsec->secy_list,
+ secy_id);
+ if (IS_ERR(secy)) {
+ WARN_ON(1);
+ goto macsec_ack_irq;
+ }
+
+ encoding_sa = secy->secy->tx_sc.encoding_sa;
+ phydev_dbg(phydev, "pn_wrapped: TX SC %d, encoding_sa %u\n",
+ secy->secy_id, encoding_sa);
+
+ sa = nxp_c45_find_sa(&secy->sa_list, TX_SA, encoding_sa);
+ if (!IS_ERR(sa))
+ macsec_pn_wrapped(secy->secy, sa->sa);
+ else
+ WARN_ON(1);
+
+macsec_ack_irq:
+ nxp_c45_macsec_write(phydev, MACSEC_EVR,
+ TX_SC_BIT(secy_id));
+ *ret = IRQ_HANDLED;
+ } while (reg);
+}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 7ab080ff0..3cf614b4c 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* NXP C45 PHY driver
- * Copyright (C) 2021 NXP
+ * Copyright 2021-2023 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
@@ -14,9 +14,10 @@
#include <linux/processor.h>
#include <linux/property.h>
#include <linux/ptp_classify.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
+#include "nxp-c45-tja11xx.h"
+
#define PHY_ID_TJA_1103 0x001BB010
#define PHY_ID_TJA_1120 0x001BB031
@@ -75,9 +76,11 @@
#define PORT_CONTROL_EN BIT(14)
#define VEND1_PORT_ABILITIES 0x8046
+#define MACSEC_ABILITY BIT(5)
#define PTP_ABILITY BIT(3)
#define VEND1_PORT_FUNC_IRQ_EN 0x807A
+#define MACSEC_IRQS BIT(5)
#define PTP_IRQS BIT(3)
#define VEND1_PTP_IRQ_ACK 0x9008
@@ -148,7 +151,6 @@
#define TS_SEC_MASK GENMASK(1, 0)
-#define VEND1_PORT_FUNC_ENABLES 0x8048
#define PTP_ENABLE BIT(3)
#define PHY_TEST_ENABLE BIT(0)
@@ -281,25 +283,6 @@ struct nxp_c45_phy_data {
irqreturn_t *irq_status);
};
-struct nxp_c45_phy {
- const struct nxp_c45_phy_data *phy_data;
- struct phy_device *phydev;
- struct mii_timestamper mii_ts;
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info caps;
- struct sk_buff_head tx_queue;
- struct sk_buff_head rx_queue;
- /* used to access the PTP registers atomic */
- struct mutex ptp_lock;
- int hwts_tx;
- int hwts_rx;
- u32 tx_delay;
- u32 rx_delay;
- struct timespec64 extts_ts;
- int extts_index;
- bool extts;
-};
-
static const
struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
{
@@ -1022,24 +1005,21 @@ static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
}
static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
- struct ifreq *ifreq)
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
mii_ts);
struct phy_device *phydev = priv->phydev;
const struct nxp_c45_phy_data *data;
- struct hwtstamp_config cfg;
-
- if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
- return -EFAULT;
- if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
+ if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
return -ERANGE;
data = nxp_c45_get_data(phydev);
- priv->hwts_tx = cfg.tx_type;
+ priv->hwts_tx = cfg->tx_type;
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
priv->hwts_rx = 0;
break;
@@ -1047,7 +1027,7 @@ static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
priv->hwts_rx = 1;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
default:
return -ERANGE;
@@ -1074,7 +1054,7 @@ static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
nxp_c45_no_ptp_irq:
- return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
@@ -1218,12 +1198,25 @@ static int nxp_c45_start_op(struct phy_device *phydev)
static int nxp_c45_config_intr(struct phy_device *phydev)
{
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ int ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
+ if (ret)
+ return ret;
+
return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
- else
- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
+ }
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
+ if (ret)
+ return ret;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
}
static int tja1103_config_intr(struct phy_device *phydev)
@@ -1289,6 +1282,7 @@ static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
}
data->nmi_handler(phydev, &ret);
+ nxp_c45_handle_macsec_interrupt(phydev, &ret);
return ret;
}
@@ -1614,6 +1608,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
nxp_c45_counters_enable(phydev);
nxp_c45_ptp_init(phydev);
+ ret = nxp_c45_macsec_config_init(phydev);
+ if (ret)
+ return ret;
return nxp_c45_start_op(phydev);
}
@@ -1629,7 +1626,9 @@ static int nxp_c45_get_features(struct phy_device *phydev)
static int nxp_c45_probe(struct phy_device *phydev)
{
struct nxp_c45_phy *priv;
- int ptp_ability;
+ bool macsec_ability;
+ int phy_abilities;
+ bool ptp_ability;
int ret = 0;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
@@ -1645,9 +1644,9 @@ static int nxp_c45_probe(struct phy_device *phydev)
mutex_init(&priv->ptp_lock);
- ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_PORT_ABILITIES);
- ptp_ability = !!(ptp_ability & PTP_ABILITY);
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ ptp_ability = !!(phy_abilities & PTP_ABILITY);
if (!ptp_ability) {
phydev_dbg(phydev, "the phy does not support PTP");
goto no_ptp_support;
@@ -1666,6 +1665,20 @@ static int nxp_c45_probe(struct phy_device *phydev)
}
no_ptp_support:
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+ if (!macsec_ability) {
+ phydev_info(phydev, "the phy does not support MACsec\n");
+ goto no_macsec_support;
+ }
+
+ if (IS_ENABLED(CONFIG_MACSEC)) {
+ ret = nxp_c45_macsec_probe(phydev);
+ phydev_dbg(phydev, "MACsec support enabled.");
+ } else {
+ phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
+ }
+
+no_macsec_support:
return ret;
}
@@ -1679,6 +1692,7 @@ static void nxp_c45_remove(struct phy_device *phydev)
skb_queue_purge(&priv->tx_queue);
skb_queue_purge(&priv->rx_queue);
+ nxp_c45_macsec_remove(phydev);
}
static void tja1103_counters_enable(struct phy_device *phydev)
diff --git a/drivers/net/phy/nxp-c45-tja11xx.h b/drivers/net/phy/nxp-c45-tja11xx.h
new file mode 100644
index 000000000..f364fca68
--- /dev/null
+++ b/drivers/net/phy/nxp-c45-tja11xx.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* NXP C45 PHY driver header file
+ * Copyright 2023 NXP
+ * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
+ */
+
+#include <linux/ptp_clock_kernel.h>
+
+#define VEND1_PORT_FUNC_ENABLES 0x8048
+
+struct nxp_c45_macsec;
+
+struct nxp_c45_phy {
+ const struct nxp_c45_phy_data *phy_data;
+ struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+ /* used to access the PTP registers atomic */
+ struct mutex ptp_lock;
+ int hwts_tx;
+ int hwts_rx;
+ u32 tx_delay;
+ u32 rx_delay;
+ struct timespec64 extts_ts;
+ int extts_index;
+ bool extts;
+ struct nxp_c45_macsec *macsec;
+};
+
+#if IS_ENABLED(CONFIG_MACSEC)
+int nxp_c45_macsec_config_init(struct phy_device *phydev);
+int nxp_c45_macsec_probe(struct phy_device *phydev);
+void nxp_c45_macsec_remove(struct phy_device *phydev);
+void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
+ irqreturn_t *ret);
+#else
+static inline
+int nxp_c45_macsec_config_init(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static inline
+int nxp_c45_macsec_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static inline
+void nxp_c45_macsec_remove(struct phy_device *phydev)
+{
+}
+
+static inline
+void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
+ irqreturn_t *ret)
+{
+}
+#endif
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index a71399965..2c263ae44 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -415,7 +415,7 @@ static void tja11xx_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(tja11xx_hw_stats); i++)
- ethtool_sprintf(&data, "%s", tja11xx_hw_stats[i].string);
+ ethtool_puts(&data, tja11xx_hw_stats[i].string);
}
static void tja11xx_get_stats(struct phy_device *phydev,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 8e6fd4962..747d14bf1 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -920,6 +920,79 @@ int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_read_abilities);
/**
+ * genphy_c45_pma_read_ext_abilities - read supported link modes from PMA
+ * @phydev: target phy_device struct
+ *
+ * Read the supported link modes from the PMA/PMD extended ability register
+ * (Register 1.11).
+ */
+int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10GBLRM);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10GBT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10GBKX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10GBKR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_1000BT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_1000BKX);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_100BTX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_100BTX);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10BT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phydev->supported,
+ val & MDIO_PMA_EXTABLE_10BT);
+
+ if (val & MDIO_PMA_EXTABLE_NBT) {
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+ MDIO_PMA_NG_EXTABLE);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_NG_EXTABLE_2_5GBT);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported,
+ val & MDIO_PMA_NG_EXTABLE_5GBT);
+ }
+
+ if (val & MDIO_PMA_EXTABLE_BT1) {
+ val = genphy_c45_pma_baset1_read_abilities(phydev);
+ if (val < 0)
+ return val;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_pma_read_ext_abilities);
+
+/**
* genphy_c45_pma_read_abilities - read supported link modes from PMA
* @phydev: target phy_device struct
*
@@ -962,63 +1035,9 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev)
val & MDIO_PMA_STAT2_10GBER);
if (val & MDIO_PMA_STAT2_EXTABLE) {
- val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
+ val = genphy_c45_pma_read_ext_abilities(phydev);
if (val < 0)
return val;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_10GBLRM);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_10GBT);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_10GBKX4);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_10GBKR);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_1000BT);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_1000BKX);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_100BTX);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_100BTX);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_10BT);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- phydev->supported,
- val & MDIO_PMA_EXTABLE_10BT);
-
- if (val & MDIO_PMA_EXTABLE_NBT) {
- val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
- MDIO_PMA_NG_EXTABLE);
- if (val < 0)
- return val;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_NG_EXTABLE_2_5GBT);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->supported,
- val & MDIO_PMA_NG_EXTABLE_5GBT);
- }
-
- if (val & MDIO_PMA_EXTABLE_BT1) {
- val = genphy_c45_pma_baset1_read_abilities(phydev);
- if (val < 0)
- return val;
- }
}
/* This is optional functionality. If not supported, we may get an error
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 966c93cbe..15f349e59 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -540,6 +540,28 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
devad | MII_MMD_CTRL_NOINCR);
}
+static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum)
+{
+ if (is_c45)
+ return __mdiobus_c45_read(bus, phy_addr, devad, regnum);
+
+ mmd_phy_indirect(bus, phy_addr, devad, regnum);
+ /* Read the content of the MMD's selected register */
+ return __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
+}
+
+static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum, u16 val)
+{
+ if (is_c45)
+ return __mdiobus_c45_write(bus, phy_addr, devad, regnum, val);
+
+ mmd_phy_indirect(bus, phy_addr, devad, regnum);
+ /* Write the data into MMD's selected register */
+ return __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
+}
+
/**
* __phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
@@ -551,26 +573,14 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
*/
int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
{
- int val;
-
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv && phydev->drv->read_mmd) {
- val = phydev->drv->read_mmd(phydev, devad, regnum);
- } else if (phydev->is_c45) {
- val = __mdiobus_c45_read(phydev->mdio.bus, phydev->mdio.addr,
- devad, regnum);
- } else {
- struct mii_bus *bus = phydev->mdio.bus;
- int phy_addr = phydev->mdio.addr;
-
- mmd_phy_indirect(bus, phy_addr, devad, regnum);
+ if (phydev->drv && phydev->drv->read_mmd)
+ return phydev->drv->read_mmd(phydev, devad, regnum);
- /* Read the content of the MMD's selected register */
- val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
- }
- return val;
+ return mmd_phy_read(phydev->mdio.bus, phydev->mdio.addr,
+ phydev->is_c45, devad, regnum);
}
EXPORT_SYMBOL(__phy_read_mmd);
@@ -607,28 +617,14 @@ EXPORT_SYMBOL(phy_read_mmd);
*/
int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
{
- int ret;
-
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv && phydev->drv->write_mmd) {
- ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
- } else if (phydev->is_c45) {
- ret = __mdiobus_c45_write(phydev->mdio.bus, phydev->mdio.addr,
- devad, regnum, val);
- } else {
- struct mii_bus *bus = phydev->mdio.bus;
- int phy_addr = phydev->mdio.addr;
-
- mmd_phy_indirect(bus, phy_addr, devad, regnum);
+ if (phydev->drv && phydev->drv->write_mmd)
+ return phydev->drv->write_mmd(phydev, devad, regnum, val);
- /* Write the data into MMD's selected register */
- __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
-
- ret = 0;
- }
- return ret;
+ return mmd_phy_write(phydev->mdio.bus, phydev->mdio.addr,
+ phydev->is_c45, devad, regnum, val);
}
EXPORT_SYMBOL(__phy_write_mmd);
@@ -655,6 +651,146 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
EXPORT_SYMBOL(phy_write_mmd);
/**
+ * __phy_package_read_mmd - read MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Convenience helper for reading a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for __phy_read();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum);
+}
+EXPORT_SYMBOL(__phy_package_read_mmd);
+
+/**
+ * phy_package_read_mmd - read MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Convenience helper for reading a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for phy_read();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+ int val;
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ phy_lock_mdio_bus(phydev);
+ val = mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum);
+ phy_unlock_mdio_bus(phydev);
+
+ return val;
+}
+EXPORT_SYMBOL(phy_package_read_mmd);
+
+/**
+ * __phy_package_write_mmd - write MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to write
+ * @val: value to write to @regnum
+ *
+ * Convenience helper for writing a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for __phy_write();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum, val);
+}
+EXPORT_SYMBOL(__phy_package_write_mmd);
+
+/**
+ * phy_package_write_mmd - write MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to write
+ * @val: value to write to @regnum
+ *
+ * Convenience helper for writing a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for phy_write();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ phy_lock_mdio_bus(phydev);
+ ret = mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL(phy_package_write_mmd);
+
+/**
* phy_modify_changed - Function for modifying a PHY register
* @phydev: the phy_device struct
* @regnum: register number to modify
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a5fa07765..3376e58e2 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -325,9 +325,13 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_get);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mii_data = if_mii(ifr);
+ struct kernel_hwtstamp_config kernel_cfg;
+ struct netlink_ext_ack extack = {};
u16 val = mii_data->val_in;
bool change_autoneg = false;
+ struct hwtstamp_config cfg;
int prtad, devad;
+ int ret;
switch (cmd) {
case SIOCGMIIPHY:
@@ -411,8 +415,21 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
return 0;
case SIOCSHWTSTAMP:
- if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp) {
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ hwtstamp_config_to_kernel(&kernel_cfg, &cfg);
+ ret = phydev->mii_ts->hwtstamp(phydev->mii_ts, &kernel_cfg, &extack);
+ if (ret)
+ return ret;
+
+ hwtstamp_config_from_kernel(&cfg, &kernel_cfg);
+ if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)))
+ return -EFAULT;
+
+ return 0;
+ }
fallthrough;
default:
@@ -469,7 +486,7 @@ int __phy_hwtstamp_get(struct phy_device *phydev,
if (!phydev)
return -ENODEV;
- return phy_mii_ioctl(phydev, config->ifr, SIOCGHWTSTAMP);
+ return -EOPNOTSUPP;
}
/**
@@ -486,7 +503,10 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
if (!phydev)
return -ENODEV;
- return phy_mii_ioctl(phydev, config->ifr, SIOCSHWTSTAMP);
+ if (phydev->mii_ts && phydev->mii_ts->hwtstamp)
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, config, extack);
+
+ return -EOPNOTSUPP;
}
/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 813b753e2..8efa2a136 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -654,6 +654,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
mdiodev->flags = MDIO_DEVICE_FLAG_PHY;
mdiodev->device_free = phy_mdio_device_free;
mdiodev->device_remove = phy_mdio_device_remove;
+ mdiodev->reset_state = -1;
dev->speed = SPEED_UNKNOWN;
dev->duplex = DUPLEX_UNKNOWN;
@@ -1235,18 +1236,19 @@ int phy_init_hw(struct phy_device *phydev)
if (phydev->drv->soft_reset) {
ret = phydev->drv->soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+
/* see comment in genphy_soft_reset for an explanation */
- if (!ret)
- phydev->suspended = 0;
+ phydev->suspended = 0;
}
- if (ret < 0)
- return ret;
-
ret = phy_scan_fixups(phydev);
if (ret < 0)
return ret;
+ phy_interface_zero(phydev->possible_interfaces);
+
if (phydev->drv->config_init) {
ret = phydev->drv->config_init(phydev);
if (ret < 0)
@@ -1411,6 +1413,11 @@ int phy_sfp_probe(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_sfp_probe);
+static bool phy_drv_supports_irq(struct phy_driver *phydrv)
+{
+ return phydrv->config_intr && phydrv->handle_interrupt;
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -1525,6 +1532,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->dev_flags & PHY_F_NO_IRQ)
phydev->irq = PHY_POLL;
+ if (!phy_drv_supports_irq(phydev->drv) && phy_interrupt_is_valid(phydev))
+ phydev->irq = PHY_POLL;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -1650,20 +1660,22 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
/**
* phy_package_join - join a common PHY group
* @phydev: target phy_device struct
- * @addr: cookie and PHY address for global register access
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
* @priv_size: if non-zero allocate this amount of bytes for private data
*
* This joins a PHY group and provides a shared storage for all phydevs in
* this group. This is intended to be used for packages which contain
* more than one PHY, for example a quad PHY transceiver.
*
- * The addr parameter serves as a cookie which has to have the same value
- * for all members of one group and as a PHY address to access generic
- * registers of a PHY package. Usually, one of the PHY addresses of the
- * different PHYs in the package provides access to these global registers.
+ * The base_addr parameter serves as cookie which has to have the same values
+ * for all members of one group and as the base PHY address of the PHY package
+ * for offset calculation to access generic registers of a PHY package.
+ * Usually, one of the PHY addresses of the different PHYs in the package
+ * provides access to these global registers.
* The address which is given here, will be used in the phy_package_read()
- * and phy_package_write() convenience functions. If your PHY doesn't have
- * global registers you can just pick any of the PHY addresses.
+ * and phy_package_write() convenience functions as base and added to the
+ * passed offset in those functions.
*
* This will set the shared pointer of the phydev to the shared storage.
* If this is the first call for a this cookie the shared storage will be
@@ -1673,17 +1685,17 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
* Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
* with the same cookie but a different priv_size is an error.
*/
-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size)
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
{
struct mii_bus *bus = phydev->mdio.bus;
struct phy_package_shared *shared;
int ret;
- if (addr < 0 || addr >= PHY_MAX_ADDR)
+ if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
return -EINVAL;
mutex_lock(&bus->shared_lock);
- shared = bus->shared[addr];
+ shared = bus->shared[base_addr];
if (!shared) {
ret = -ENOMEM;
shared = kzalloc(sizeof(*shared), GFP_KERNEL);
@@ -1695,9 +1707,9 @@ int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size)
goto err_free;
shared->priv_size = priv_size;
}
- shared->addr = addr;
+ shared->base_addr = base_addr;
refcount_set(&shared->refcnt, 1);
- bus->shared[addr] = shared;
+ bus->shared[base_addr] = shared;
} else {
ret = -EINVAL;
if (priv_size && priv_size != shared->priv_size)
@@ -1735,7 +1747,7 @@ void phy_package_leave(struct phy_device *phydev)
return;
if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
- bus->shared[shared->addr] = NULL;
+ bus->shared[shared->base_addr] = NULL;
mutex_unlock(&bus->shared_lock);
kfree(shared->priv);
kfree(shared);
@@ -1754,7 +1766,8 @@ static void devm_phy_package_leave(struct device *dev, void *res)
* devm_phy_package_join - resource managed phy_package_join()
* @dev: device that is registering this PHY package
* @phydev: target phy_device struct
- * @addr: cookie and PHY address for global register access
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
* @priv_size: if non-zero allocate this amount of bytes for private data
*
* Managed phy_package_join(). Shared storage fetched by this function,
@@ -1762,7 +1775,7 @@ static void devm_phy_package_leave(struct device *dev, void *res)
* phy_package_join() for more information.
*/
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int addr, size_t priv_size)
+ int base_addr, size_t priv_size)
{
struct phy_device **ptr;
int ret;
@@ -1772,7 +1785,7 @@ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
if (!ptr)
return -ENOMEM;
- ret = phy_package_join(phydev, addr, priv_size);
+ ret = phy_package_join(phydev, base_addr, priv_size);
if (!ret) {
*ptr = phydev;
@@ -2987,11 +3000,6 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
-static bool phy_drv_supports_irq(struct phy_driver *phydrv)
-{
- return phydrv->config_intr && phydrv->handle_interrupt;
-}
-
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 25c19496a..ed0b4ccaa 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -121,6 +121,19 @@ do { \
})
#endif
+static const phy_interface_t phylink_sfp_interface_preference[] = {
+ PHY_INTERFACE_MODE_25GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_5GBASER,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_100BASEX,
+};
+
+static DECLARE_PHY_INTERFACE_MASK(phylink_sfp_interfaces);
+
/**
* phylink_set_port_modes() - set the port type modes in the ethtool mask
* @mask: ethtool link mode mask
@@ -689,28 +702,48 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
-static int phylink_validate_mask(struct phylink *pl, unsigned long *supported,
+static void phylink_validate_one(struct phylink *pl, struct phy_device *phy,
+ const unsigned long *supported,
+ const struct phylink_link_state *state,
+ phy_interface_t interface,
+ unsigned long *accum_supported,
+ unsigned long *accum_advertising)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_supported);
+ struct phylink_link_state tmp_state;
+
+ linkmode_copy(tmp_supported, supported);
+
+ tmp_state = *state;
+ tmp_state.interface = interface;
+
+ if (phy)
+ tmp_state.rate_matching = phy_get_rate_matching(phy, interface);
+
+ if (!phylink_validate_mac_and_pcs(pl, tmp_supported, &tmp_state)) {
+ phylink_dbg(pl, " interface %u (%s) rate match %s supports %*pbl\n",
+ interface, phy_modes(interface),
+ phy_rate_matching_to_str(tmp_state.rate_matching),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, tmp_supported);
+
+ linkmode_or(accum_supported, accum_supported, tmp_supported);
+ linkmode_or(accum_advertising, accum_advertising,
+ tmp_state.advertising);
+ }
+}
+
+static int phylink_validate_mask(struct phylink *pl, struct phy_device *phy,
+ unsigned long *supported,
struct phylink_link_state *state,
const unsigned long *interfaces)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(all_adv) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(all_s) = { 0, };
- __ETHTOOL_DECLARE_LINK_MODE_MASK(s);
- struct phylink_link_state t;
- int intf;
-
- for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
- if (test_bit(intf, interfaces)) {
- linkmode_copy(s, supported);
-
- t = *state;
- t.interface = intf;
- if (!phylink_validate_mac_and_pcs(pl, s, &t)) {
- linkmode_or(all_s, all_s, s);
- linkmode_or(all_adv, all_adv, t.advertising);
- }
- }
- }
+ int interface;
+
+ for_each_set_bit(interface, interfaces, PHY_INTERFACE_MODE_MAX)
+ phylink_validate_one(pl, phy, supported, state, interface,
+ all_s, all_adv);
linkmode_copy(supported, all_s);
linkmode_copy(state->advertising, all_adv);
@@ -724,7 +757,8 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
const unsigned long *interfaces = pl->config->supported_interfaces;
if (state->interface == PHY_INTERFACE_MODE_NA)
- return phylink_validate_mask(pl, supported, state, interfaces);
+ return phylink_validate_mask(pl, NULL, supported, state,
+ interfaces);
if (!test_bit(state->interface, interfaces))
return -EINVAL;
@@ -805,7 +839,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_warn(pl, "fixed link specifies half duplex for %dMbps link?\n",
pl->link_config.speed);
- bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_fill(pl->supported);
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
@@ -849,6 +883,7 @@ static int phylink_parse_mode(struct phylink *pl,
{
struct fwnode_handle *dn;
const char *managed;
+ unsigned long caps;
dn = fwnode_get_named_child_node(fwnode, "fixed-link");
if (dn || fwnode_property_present(fwnode, "fixed-link"))
@@ -881,80 +916,18 @@ static int phylink_parse_mode(struct phylink *pl,
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RTBI:
- phylink_set(pl->supported, 10baseT_Half);
- phylink_set(pl->supported, 10baseT_Full);
- phylink_set(pl->supported, 100baseT_Half);
- phylink_set(pl->supported, 100baseT_Full);
- phylink_set(pl->supported, 1000baseT_Half);
- phylink_set(pl->supported, 1000baseT_Full);
- break;
-
case PHY_INTERFACE_MODE_1000BASEX:
- phylink_set(pl->supported, 1000baseX_Full);
- break;
-
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(pl->supported, 2500baseX_Full);
- break;
-
case PHY_INTERFACE_MODE_5GBASER:
- phylink_set(pl->supported, 5000baseT_Full);
- break;
-
case PHY_INTERFACE_MODE_25GBASER:
- phylink_set(pl->supported, 25000baseCR_Full);
- phylink_set(pl->supported, 25000baseKR_Full);
- phylink_set(pl->supported, 25000baseSR_Full);
- fallthrough;
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
- phylink_set(pl->supported, 10baseT_Half);
- phylink_set(pl->supported, 10baseT_Full);
- phylink_set(pl->supported, 100baseT_Half);
- phylink_set(pl->supported, 100baseT_Full);
- phylink_set(pl->supported, 1000baseT_Half);
- phylink_set(pl->supported, 1000baseT_Full);
- phylink_set(pl->supported, 1000baseX_Full);
- phylink_set(pl->supported, 1000baseKX_Full);
- phylink_set(pl->supported, 2500baseT_Full);
- phylink_set(pl->supported, 2500baseX_Full);
- phylink_set(pl->supported, 5000baseT_Full);
- phylink_set(pl->supported, 10000baseT_Full);
- phylink_set(pl->supported, 10000baseKR_Full);
- phylink_set(pl->supported, 10000baseKX4_Full);
- phylink_set(pl->supported, 10000baseCR_Full);
- phylink_set(pl->supported, 10000baseSR_Full);
- phylink_set(pl->supported, 10000baseLR_Full);
- phylink_set(pl->supported, 10000baseLRM_Full);
- phylink_set(pl->supported, 10000baseER_Full);
- break;
-
case PHY_INTERFACE_MODE_XLGMII:
- phylink_set(pl->supported, 25000baseCR_Full);
- phylink_set(pl->supported, 25000baseKR_Full);
- phylink_set(pl->supported, 25000baseSR_Full);
- phylink_set(pl->supported, 40000baseKR4_Full);
- phylink_set(pl->supported, 40000baseCR4_Full);
- phylink_set(pl->supported, 40000baseSR4_Full);
- phylink_set(pl->supported, 40000baseLR4_Full);
- phylink_set(pl->supported, 50000baseCR2_Full);
- phylink_set(pl->supported, 50000baseKR2_Full);
- phylink_set(pl->supported, 50000baseSR2_Full);
- phylink_set(pl->supported, 50000baseKR_Full);
- phylink_set(pl->supported, 50000baseSR_Full);
- phylink_set(pl->supported, 50000baseCR_Full);
- phylink_set(pl->supported, 50000baseLR_ER_FR_Full);
- phylink_set(pl->supported, 50000baseDR_Full);
- phylink_set(pl->supported, 100000baseKR4_Full);
- phylink_set(pl->supported, 100000baseSR4_Full);
- phylink_set(pl->supported, 100000baseCR4_Full);
- phylink_set(pl->supported, 100000baseLR4_ER4_Full);
- phylink_set(pl->supported, 100000baseKR2_Full);
- phylink_set(pl->supported, 100000baseSR2_Full);
- phylink_set(pl->supported, 100000baseCR2_Full);
- phylink_set(pl->supported, 100000baseLR2_ER2_FR2_Full);
- phylink_set(pl->supported, 100000baseDR2_Full);
+ caps = ~(MAC_SYM_PAUSE | MAC_ASYM_PAUSE);
+ caps = phylink_get_capabilities(pl->link_config.interface, caps,
+ RATE_MATCH_NONE);
+ phylink_caps_to_linkmodes(pl->supported, caps);
break;
default:
@@ -1101,6 +1074,72 @@ static void phylink_pcs_an_restart(struct phylink *pl)
pl->pcs->ops->pcs_an_restart(pl->pcs);
}
+/**
+ * phylink_pcs_neg_mode() - helper to determine PCS inband mode
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @interface: interface mode to be used
+ * @advertising: adertisement ethtool link mode mask
+ *
+ * Determines the negotiation mode to be used by the PCS, and returns
+ * one of:
+ *
+ * - %PHYLINK_PCS_NEG_NONE: interface mode does not support inband
+ * - %PHYLINK_PCS_NEG_OUTBAND: an out of band mode (e.g. reading the PHY)
+ * will be used.
+ * - %PHYLINK_PCS_NEG_INBAND_DISABLED: inband mode selected but autoneg
+ * disabled
+ * - %PHYLINK_PCS_NEG_INBAND_ENABLED: inband mode selected and autoneg enabled
+ *
+ * Note: this is for cases where the PCS itself is involved in negotiation
+ * (e.g. Clause 37, SGMII and similar) not Clause 73.
+ */
+static unsigned int phylink_pcs_neg_mode(unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising)
+{
+ unsigned int neg_mode;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ /* These protocols are designed for use with a PHY which
+ * communicates its negotiation result back to the MAC via
+ * inband communication. Note: there exist PHYs that run
+ * with SGMII but do not send the inband data.
+ */
+ if (!phylink_autoneg_inband(mode))
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ else
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ break;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ /* 1000base-X is designed for use media-side for Fibre
+ * connections, and thus the Autoneg bit needs to be
+ * taken into account. We also do this for 2500base-X
+ * as well, but drivers may not support this, so may
+ * need to override this.
+ */
+ if (!phylink_autoneg_inband(mode))
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising))
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ else
+ neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
+ break;
+
+ default:
+ neg_mode = PHYLINK_PCS_NEG_NONE;
+ break;
+ }
+
+ return neg_mode;
+}
+
static void phylink_major_config(struct phylink *pl, bool restart,
const struct phylink_link_state *state)
{
@@ -1640,7 +1679,7 @@ struct phylink *phylink_create(struct phylink_config *config,
__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
timer_setup(&pl->link_poll, phylink_fixed_poll, 0);
- bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_fill(pl->supported);
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
@@ -1739,31 +1778,55 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
phylink_pause_to_str(pl->phy_state.pause));
}
-static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
- phy_interface_t interface)
+static int phylink_validate_phy(struct phylink *pl, struct phy_device *phy,
+ unsigned long *supported,
+ struct phylink_link_state *state)
{
- struct phylink_link_state config;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- char *irq_str;
- int ret;
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
- /*
- * This is the new way of dealing with flow control for PHYs,
- * as described by Timur Tabi in commit 529ed1275263 ("net: phy:
- * phy drivers should not set SUPPORTED_[Asym_]Pause") except
- * using our validate call to the MAC, we rely upon the MAC
- * clearing the bits from both supported and advertising fields.
+ /* If the PHY provides a bitmap of the interfaces it will be using
+ * depending on the negotiated media speeds, use this to validate
+ * which ethtool link modes can be used.
*/
- phy_support_asym_pause(phy);
+ if (!phy_interface_empty(phy->possible_interfaces)) {
+ /* We only care about the union of the PHY's interfaces and
+ * those which the host supports.
+ */
+ phy_interface_and(interfaces, phy->possible_interfaces,
+ pl->config->supported_interfaces);
- memset(&config, 0, sizeof(config));
- linkmode_copy(supported, phy->supported);
- linkmode_copy(config.advertising, phy->advertising);
+ if (phy_interface_empty(interfaces)) {
+ phylink_err(pl, "PHY has no common interfaces\n");
+ return -EINVAL;
+ }
+
+ if (phy_on_sfp(phy)) {
+ /* If the PHY is on a SFP, limit the interfaces to
+ * those that can be used with a SFP module.
+ */
+ phy_interface_and(interfaces, interfaces,
+ phylink_sfp_interfaces);
+
+ if (phy_interface_empty(interfaces)) {
+ phylink_err(pl, "SFP PHY's possible interfaces becomes empty\n");
+ return -EINVAL;
+ }
+ }
+
+ phylink_dbg(pl, "PHY %s uses interfaces %*pbl, validating %*pbl\n",
+ phydev_name(phy),
+ (int)PHY_INTERFACE_MODE_MAX,
+ phy->possible_interfaces,
+ (int)PHY_INTERFACE_MODE_MAX, interfaces);
+
+ return phylink_validate_mask(pl, phy, supported, state,
+ interfaces);
+ }
/* Check whether we would use rate matching for the proposed interface
* mode.
*/
- config.rate_matching = phy_get_rate_matching(phy, interface);
+ state->rate_matching = phy_get_rate_matching(phy, state->interface);
/* Clause 45 PHYs may switch their Serdes lane between, e.g. 10GBASE-R,
* 5GBASE-R, 2500BASE-X and SGMII if they are not using rate matching.
@@ -1776,15 +1839,38 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
* against all interface modes, which may lead to more ethtool link
* modes being advertised than are actually supported.
*/
- if (phy->is_c45 && config.rate_matching == RATE_MATCH_NONE &&
- interface != PHY_INTERFACE_MODE_RXAUI &&
- interface != PHY_INTERFACE_MODE_XAUI &&
- interface != PHY_INTERFACE_MODE_USXGMII)
- config.interface = PHY_INTERFACE_MODE_NA;
- else
- config.interface = interface;
+ if (phy->is_c45 && state->rate_matching == RATE_MATCH_NONE &&
+ state->interface != PHY_INTERFACE_MODE_RXAUI &&
+ state->interface != PHY_INTERFACE_MODE_XAUI &&
+ state->interface != PHY_INTERFACE_MODE_USXGMII)
+ state->interface = PHY_INTERFACE_MODE_NA;
- ret = phylink_validate(pl, supported, &config);
+ return phylink_validate(pl, supported, state);
+}
+
+static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
+{
+ struct phylink_link_state config;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ char *irq_str;
+ int ret;
+
+ /*
+ * This is the new way of dealing with flow control for PHYs,
+ * as described by Timur Tabi in commit 529ed1275263 ("net: phy:
+ * phy drivers should not set SUPPORTED_[Asym_]Pause") except
+ * using our validate call to the MAC, we rely upon the MAC
+ * clearing the bits from both supported and advertising fields.
+ */
+ phy_support_asym_pause(phy);
+
+ memset(&config, 0, sizeof(config));
+ linkmode_copy(supported, phy->supported);
+ linkmode_copy(config.advertising, phy->advertising);
+ config.interface = interface;
+
+ ret = phylink_validate_phy(pl, phy, supported, &config);
if (ret) {
phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %pe\n",
phy_modes(config.interface),
@@ -3005,19 +3091,6 @@ static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
pl->netdev->sfp_bus = NULL;
}
-static const phy_interface_t phylink_sfp_interface_preference[] = {
- PHY_INTERFACE_MODE_25GBASER,
- PHY_INTERFACE_MODE_USXGMII,
- PHY_INTERFACE_MODE_10GBASER,
- PHY_INTERFACE_MODE_5GBASER,
- PHY_INTERFACE_MODE_2500BASEX,
- PHY_INTERFACE_MODE_SGMII,
- PHY_INTERFACE_MODE_1000BASEX,
- PHY_INTERFACE_MODE_100BASEX,
-};
-
-static DECLARE_PHY_INTERFACE_MASK(phylink_sfp_interfaces);
-
static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl,
const unsigned long *intf)
{
@@ -3160,7 +3233,8 @@ static int phylink_sfp_config_optical(struct phylink *pl)
/* For all the interfaces that are supported, reduce the sfp_support
* mask to only those link modes that can be supported.
*/
- ret = phylink_validate_mask(pl, pl->sfp_support, &config, interfaces);
+ ret = phylink_validate_mask(pl, NULL, pl->sfp_support, &config,
+ interfaces);
if (ret) {
phylink_err(pl, "unsupported SFP module: validation with support %*pb failed\n",
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 208a9393c..db39dec7f 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -151,10 +151,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned int br_min, br_nom, br_max;
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
- phylink_set(modes, Autoneg);
- phylink_set(modes, Pause);
- phylink_set(modes, Asym_Pause);
-
/* Decode the bitrate information to MBd */
br_min = br_nom = br_max = 0;
if (id->base.br_nominal) {
@@ -328,7 +324,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
* modules use 2500Mbaud rather than 3100 or 3200Mbaud for
* 2500BASE-X, so we allow some slack here.
*/
- if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS) && br_nom) {
+ if (linkmode_empty(modes) && br_nom) {
if (br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
__set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
@@ -339,6 +335,10 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
}
}
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+
if (bus->sfp_quirk && bus->sfp_quirk->modes)
bus->sfp_quirk->modes(id, modes, interfaces);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 5468bd209..f75c9eb39 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -191,7 +191,7 @@ static const enum gpiod_flags gpio_flags[] = {
* R_PHY_RETRY is the number of attempts.
*/
#define T_PHY_RETRY msecs_to_jiffies(50)
-#define R_PHY_RETRY 12
+#define R_PHY_RETRY 25
/* SFP module presence detection is poor: the three MOD DEF signals are
* the same length on the PCB, which means it's possible for MOD DEF 0 to
@@ -275,6 +275,7 @@ struct sfp {
unsigned int module_power_mW;
unsigned int module_t_start_up;
unsigned int module_t_wait;
+ unsigned int phy_t_retry;
unsigned int rate_kbd;
unsigned int rs_threshold_kbd;
@@ -372,18 +373,28 @@ static void sfp_fixup_10gbaset_30m(struct sfp *sfp)
sfp->id.base.extended_cc = SFF8024_ECC_10GBASE_T_SR;
}
-static void sfp_fixup_rollball_proto(struct sfp *sfp, unsigned int secs)
+static void sfp_fixup_rollball(struct sfp *sfp)
{
sfp->mdio_protocol = MDIO_I2C_ROLLBALL;
- sfp->module_t_wait = msecs_to_jiffies(secs * 1000);
+
+ /* RollBall modules may disallow access to PHY registers for up to 25
+ * seconds, and the reads return 0xffff before that. Increase the time
+ * between PHY probe retries from 50ms to 1s so that we will wait for
+ * the PHY for a sufficient amount of time.
+ */
+ sfp->phy_t_retry = msecs_to_jiffies(1000);
}
static void sfp_fixup_fs_10gt(struct sfp *sfp)
{
sfp_fixup_10gbaset_30m(sfp);
+ sfp_fixup_rollball(sfp);
- // These SFPs need 4 seconds before the PHY can be accessed
- sfp_fixup_rollball_proto(sfp, 4);
+ /* The RollBall fixup is not enough for FS modules, the AQR chip inside
+ * them does not return 0xffff for PHY ID registers in all MMDs for the
+ * while initializing. They need a 4 second wait before accessing PHY.
+ */
+ sfp->module_t_wait = msecs_to_jiffies(4000);
}
static void sfp_fixup_halny_gsfp(struct sfp *sfp)
@@ -395,12 +406,6 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp)
sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS);
}
-static void sfp_fixup_rollball(struct sfp *sfp)
-{
- // Rollball SFPs need 25 seconds before the PHY can be accessed
- sfp_fixup_rollball_proto(sfp, 25);
-}
-
static void sfp_fixup_rollball_cc(struct sfp *sfp)
{
sfp_fixup_rollball(sfp);
@@ -2332,6 +2337,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
sfp->module_t_start_up = T_START_UP;
sfp->module_t_wait = T_WAIT;
+ sfp->phy_t_retry = T_PHY_RETRY;
sfp->state_ignore_mask = 0;
@@ -2629,7 +2635,11 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
ret = sfp_sm_probe_for_phy(sfp);
if (ret == -ENODEV) {
if (--sfp->sm_phy_retries) {
- sfp_sm_next(sfp, SFP_S_INIT_PHY, T_PHY_RETRY);
+ sfp_sm_next(sfp, SFP_S_INIT_PHY,
+ sfp->phy_t_retry);
+ dev_dbg(sfp->dev,
+ "no PHY detected, %u tries left\n",
+ sfp->sm_phy_retries);
break;
} else {
dev_info(sfp->dev, "no PHY detected\n");
@@ -3096,7 +3106,7 @@ static int sfp_probe(struct platform_device *pdev)
return 0;
}
-static int sfp_remove(struct platform_device *pdev)
+static void sfp_remove(struct platform_device *pdev)
{
struct sfp *sfp = platform_get_drvdata(pdev);
@@ -3106,8 +3116,6 @@ static int sfp_remove(struct platform_device *pdev)
rtnl_lock();
sfp_sm_event(sfp, SFP_E_REMOVE);
rtnl_unlock();
-
- return 0;
}
static void sfp_shutdown(struct platform_device *pdev)
@@ -3128,7 +3136,7 @@ static void sfp_shutdown(struct platform_device *pdev)
static struct platform_driver sfp_driver = {
.probe = sfp_probe,
- .remove = sfp_remove,
+ .remove_new = sfp_remove,
.shutdown = sfp_shutdown,
.driver = {
.name = "sfp",
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 1c7306a1a..150aea7c9 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -508,7 +508,7 @@ static void smsc_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(smsc_hw_stats); i++)
- ethtool_sprintf(&data, "%s", smsc_hw_stats[i].string);
+ ethtool_puts(&data, smsc_hw_stats[i].string);
}
static u64 smsc_get_stat(struct phy_device *phydev, int i)
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 40ce8abe6..cc7d1113e 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1437,4 +1437,5 @@ static int __init plip_init (void)
module_init(plip_init);
module_exit(plip_cleanup_module);
+MODULE_DESCRIPTION("PLIP (parallel port) network module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index db0dc36d1..55954594e 100644
--- a/drivers/net/ppp/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
@@ -1166,5 +1166,6 @@ static void __exit bsdcomp_cleanup(void)
module_init(bsdcomp_init);
module_exit(bsdcomp_cleanup);
+MODULE_DESCRIPTION("PPP BSD-Compress compression module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index e94a4b08f..c33c3db3c 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -87,6 +87,7 @@ struct asyncppp {
static int flag_time = HZ;
module_param(flag_time, int, 0);
MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
+MODULE_DESCRIPTION("PPP async serial channel module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_PPP);
@@ -537,7 +538,7 @@ ppp_async_encode(struct asyncppp *ap)
proto = get_unaligned_be16(data);
/*
- * LCP packets with code values between 1 (configure-reqest)
+ * LCP packets with code values between 1 (configure-request)
* and 7 (code-reject) must be sent as though no options
* had been negotiated.
*/
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index e6d48e5c6..4d2ff63f2 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -630,6 +630,7 @@ static void __exit deflate_cleanup(void)
module_init(deflate_init);
module_exit(deflate_cleanup);
+MODULE_DESCRIPTION("PPP Deflate compression module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE));
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0193af2d3..3dd52bf28 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3604,6 +3604,7 @@ EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+MODULE_DESCRIPTION("Generic PPP layer driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 52d05ce4a..45bf59ac8 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -724,5 +724,6 @@ ppp_sync_cleanup(void)
module_init(ppp_sync_init);
module_exit(ppp_sync_cleanup);
+MODULE_DESCRIPTION("PPP synchronous TTY channel module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_SYNC_PPP);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 8e7238e97..2ea4f4890 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
struct sk_buff *skb;
int error = 0;
- if (sk->sk_state & PPPOX_BOUND) {
- error = -EIO;
- goto end;
- }
+ if (sk->sk_state & PPPOX_BOUND)
+ return -EIO;
skb = skb_recv_datagram(sk, flags, &error);
- if (error < 0)
- goto end;
+ if (!skb)
+ return error;
- if (skb) {
- total_len = min_t(size_t, total_len, skb->len);
- error = skb_copy_datagram_msg(skb, 0, m, total_len);
- if (error == 0) {
- consume_skb(skb);
- return total_len;
- }
+ total_len = min_t(size_t, total_len, skb->len);
+ error = skb_copy_datagram_msg(skb, 0, m, total_len);
+ if (error == 0) {
+ consume_skb(skb);
+ return total_len;
}
kfree_skb(skb);
-end:
return error;
}
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index ba93bab94..18df7ca66 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -752,4 +752,5 @@ EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
+MODULE_DESCRIPTION("Compression helpers for SLIP (serial line)");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index e4280e37f..0aba3569c 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -1437,5 +1437,6 @@ out:
}
#endif
+MODULE_DESCRIPTION("SLIP (serial line) protocol module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_SLIP);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8f95a562b..86515f0c2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2132,14 +2132,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
- pr_err("unexpected GSO type: "
- "0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+
+ if (net_ratelimit()) {
+ netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+ }
WARN_ON_ONCE(1);
return -EINVAL;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d837c1887..21b6c4d94 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1273,6 +1273,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
if (is_valid_ether_addr(mac)) {
eth_hw_addr_set(dev->net, mac);
+ if (!is_local_ether_addr(mac))
+ dev->net->addr_assign_type = NET_ADDR_PERM;
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
eth_hw_addr_random(dev->net);
@@ -1315,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
netif_set_tso_max_size(dev->net, 16384);
+ ax88179_reset(dev);
+
return 0;
}
@@ -1452,21 +1456,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
- skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb)
return 0;
- skb_trim(ax_skb, pkt_len);
-
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
+ skb_put(ax_skb, pkt_len);
+ memcpy(ax_skb->data, skb->data + 2, pkt_len);
- skb->truesize = pkt_len_plus_padd +
- SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
@@ -1693,7 +1692,6 @@ static const struct driver_info ax88179_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1706,7 +1704,6 @@ static const struct driver_info ax88178a_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 83b845222..f088ea2ba 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -363,7 +363,6 @@ static int disable_net;
/* driver info */
static const char driver_name[] = "hso";
static const char tty_filename[] = "ttyHS";
-static const char *version = __FILE__ ": " MOD_AUTHOR;
/* the usb driver itself (registered in hso_init) */
static struct usb_driver hso_driver;
/* serial structures */
@@ -3228,16 +3227,8 @@ static struct usb_driver hso_driver = {
static int __init hso_init(void)
{
- int i;
int result;
- /* put it in the log */
- pr_info("%s\n", version);
-
- /* Initialise the serial table semaphore and table */
- for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++)
- serial_table[i] = NULL;
-
/* allocate our driver using the proper amount of supported minors */
tty_drv = tty_alloc_driver(HSO_SERIAL_TTY_MINORS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
@@ -3285,8 +3276,6 @@ err_free_tty:
static void __exit hso_exit(void)
{
- pr_info("unloaded\n");
-
tty_unregister_driver(tty_drv);
/* deregister the usb driver */
usb_deregister(&hso_driver);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f0fb9cd1f..d2aa2c5b1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1693,8 +1693,6 @@ static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
ret = lan78xx_read_reg(dev, MAC_CR, &buf);
if (buf & MAC_CR_EEE_EN_) {
edata->eee_enabled = true;
- edata->eee_active = !!(edata->advertised &
- edata->lp_advertised);
edata->tx_lpi_enabled = true;
/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 9bf2140fd..51fe00c2b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10069,7 +10069,7 @@ static struct usb_driver rtl8152_driver = {
.disable_hub_initiated_lpm = 1,
};
-static int rtl8152_cfgselector_probe(struct usb_device *udev)
+static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev)
{
struct usb_host_config *c;
int i, num_configs;
@@ -10078,7 +10078,7 @@ static int rtl8152_cfgselector_probe(struct usb_device *udev)
* driver supports it.
*/
if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN)
- return 0;
+ return -ENODEV;
/* The vendor mode is not always config #1, so to find it out. */
c = udev->config;
@@ -10096,19 +10096,13 @@ static int rtl8152_cfgselector_probe(struct usb_device *udev)
if (i == num_configs)
return -ENODEV;
- if (usb_set_configuration(udev, c->desc.bConfigurationValue)) {
- dev_err(&udev->dev, "Failed to set configuration %d\n",
- c->desc.bConfigurationValue);
- return -ENODEV;
- }
-
- return 0;
+ return c->desc.bConfigurationValue;
}
static struct usb_device_driver rtl8152_cfgselector_driver = {
- .name = MODULENAME "-cfgselector",
- .probe = rtl8152_cfgselector_probe,
- .id_table = rtl8152_table,
+ .name = MODULENAME "-cfgselector",
+ .choose_configuration = rtl8152_cfgselector_choose_configuration,
+ .id_table = rtl8152_table,
.generic_subclass = 1,
.supports_autosuspend = 1,
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2f3fd2873..5146aa698 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1483,6 +1483,7 @@ static void veth_free_queues(struct net_device *dev)
static int veth_dev_init(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
return veth_alloc_queues(dev);
}
@@ -1705,6 +1706,24 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
return 0;
}
+static int veth_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci)
+{
+ const struct veth_xdp_buff *_ctx = (void *)ctx;
+ const struct sk_buff *skb = _ctx->skb;
+ int err;
+
+ if (!skb)
+ return -ENODATA;
+
+ err = __vlan_hwaccel_get_tag(skb, vlan_tci);
+ if (err)
+ return err;
+
+ *vlan_proto = skb->vlan_proto;
+ return err;
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -1729,6 +1748,7 @@ static const struct net_device_ops veth_netdev_ops = {
static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
.xmo_rx_timestamp = veth_xdp_rx_timestamp,
.xmo_rx_hash = veth_xdp_rx_hash,
+ .xmo_rx_vlan_tag = veth_xdp_rx_vlan_tag,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1caf21fd5..ec14bf2a9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -19,6 +19,7 @@
#include <linux/average.h>
#include <linux/filter.h>
#include <linux/kernel.h>
+#include <linux/dim.h>
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
@@ -172,6 +173,17 @@ struct receive_queue {
struct virtnet_rq_stats stats;
+ /* The number of rx notifications */
+ u16 calls;
+
+ /* Is dynamic interrupt moderation enabled? */
+ bool dim_enabled;
+
+ /* Dynamic Interrupt Moderation */
+ struct dim dim;
+
+ u32 packets_in_napi;
+
struct virtnet_interrupt_coalesce intr_coal;
/* Chain pages by the private ptr. */
@@ -305,6 +317,9 @@ struct virtnet_info {
u8 duplex;
u32 speed;
+ /* Is rx dynamic interrupt moderation enabled? */
+ bool rx_dim_enabled;
+
/* Interrupt coalescing settings */
struct virtnet_interrupt_coalesce intr_coal_tx;
struct virtnet_interrupt_coalesce intr_coal_rx;
@@ -441,7 +456,7 @@ static void virtqueue_napi_schedule(struct napi_struct *napi,
}
}
-static void virtqueue_napi_complete(struct napi_struct *napi,
+static bool virtqueue_napi_complete(struct napi_struct *napi,
struct virtqueue *vq, int processed)
{
int opaque;
@@ -450,9 +465,13 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
if (napi_complete_done(napi, processed)) {
if (unlikely(virtqueue_poll(vq, opaque)))
virtqueue_napi_schedule(napi, vq);
+ else
+ return true;
} else {
virtqueue_disable_cb(vq);
}
+
+ return false;
}
static void skb_xmit_done(struct virtqueue *vq)
@@ -2010,6 +2029,7 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
+ rq->calls++;
virtqueue_napi_schedule(&rq->napi, rvq);
}
@@ -2150,6 +2170,24 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
}
}
+static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ struct dim_sample cur_sample = {};
+
+ if (!rq->packets_in_napi)
+ return;
+
+ u64_stats_update_begin(&rq->stats.syncp);
+ dim_update_sample(rq->calls,
+ u64_stats_read(&rq->stats.packets),
+ u64_stats_read(&rq->stats.bytes),
+ &cur_sample);
+ u64_stats_update_end(&rq->stats.syncp);
+
+ net_dim(&rq->dim, cur_sample);
+ rq->packets_in_napi = 0;
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
@@ -2158,17 +2196,22 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
struct send_queue *sq;
unsigned int received;
unsigned int xdp_xmit = 0;
+ bool napi_complete;
virtnet_poll_cleantx(rq);
received = virtnet_receive(rq, budget, &xdp_xmit);
+ rq->packets_in_napi += received;
if (xdp_xmit & VIRTIO_XDP_REDIR)
xdp_do_flush();
/* Out of packets? */
- if (received < budget)
- virtqueue_napi_complete(napi, rq->vq, received);
+ if (received < budget) {
+ napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
+ if (napi_complete && rq->dim_enabled)
+ virtnet_rx_dim_update(vi, rq);
+ }
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_get_sq(vi);
@@ -2239,8 +2282,11 @@ err_enable_qp:
disable_delayed_refill(vi);
cancel_delayed_work_sync(&vi->refill);
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
virtnet_disable_queue_pair(vi, i);
+ cancel_work_sync(&vi->rq[i].dim.work);
+ }
+
return err;
}
@@ -2402,8 +2448,10 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
qindex = rq - vi->rq;
- if (running)
+ if (running) {
napi_disable(&rq->napi);
+ cancel_work_sync(&rq->dim.work);
+ }
err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
if (err)
@@ -2650,8 +2698,10 @@ static int virtnet_close(struct net_device *dev)
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_disable_queue_pair(vi, i);
+ cancel_work_sync(&vi->rq[i].dim.work);
+ }
return 0;
}
@@ -2858,6 +2908,58 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
&vi->node_dead);
}
+static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
+ u16 vqn, u32 max_usecs, u32 max_packets)
+{
+ struct scatterlist sgs;
+
+ vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
+ vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
+ vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
+ sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+ VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
+ &sgs))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
+ u16 queue, u32 max_usecs,
+ u32 max_packets)
+{
+ int err;
+
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
+ max_usecs, max_packets);
+ if (err)
+ return err;
+
+ vi->rq[queue].intr_coal.max_usecs = max_usecs;
+ vi->rq[queue].intr_coal.max_packets = max_packets;
+
+ return 0;
+}
+
+static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
+ u16 queue, u32 max_usecs,
+ u32 max_packets)
+{
+ int err;
+
+ err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
+ max_usecs, max_packets);
+ if (err)
+ return err;
+
+ vi->sq[queue].intr_coal.max_usecs = max_usecs;
+ vi->sq[queue].intr_coal.max_packets = max_packets;
+
+ return 0;
+}
+
static void virtnet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -2871,9 +2973,6 @@ static void virtnet_get_ringparam(struct net_device *dev,
ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
}
-static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
- u16 vqn, u32 max_usecs, u32 max_packets);
-
static int virtnet_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -2915,14 +3014,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
* through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
* did not set any TX coalescing parameters, to 0.
*/
- err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
- vi->intr_coal_tx.max_usecs,
- vi->intr_coal_tx.max_packets);
+ err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
+ vi->intr_coal_tx.max_usecs,
+ vi->intr_coal_tx.max_packets);
if (err)
return err;
-
- vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
- vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
}
if (ring->rx_pending != rx_pending) {
@@ -2931,14 +3027,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err;
/* The reason is same as the transmit virtqueue reset */
- err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
- vi->intr_coal_rx.max_usecs,
- vi->intr_coal_rx.max_packets);
+ err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
+ vi->intr_coal_rx.max_usecs,
+ vi->intr_coal_rx.max_packets);
if (err)
return err;
-
- vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
- vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
}
}
@@ -3275,10 +3368,10 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
return 0;
}
-static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
- struct ethtool_coalesce *ec)
+static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
{
- struct scatterlist sgs_tx, sgs_rx;
+ struct scatterlist sgs_tx;
int i;
vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
@@ -3290,7 +3383,6 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
&sgs_tx))
return -EINVAL;
- /* Save parameters */
vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -3298,6 +3390,40 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
}
+ return 0;
+}
+
+static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
+{
+ bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
+ struct scatterlist sgs_rx;
+ int i;
+
+ if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ return -EOPNOTSUPP;
+
+ if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
+ ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
+ return -EINVAL;
+
+ if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
+ vi->rx_dim_enabled = true;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ vi->rq[i].dim_enabled = true;
+ return 0;
+ }
+
+ if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
+ vi->rx_dim_enabled = false;
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ vi->rq[i].dim_enabled = false;
+ }
+
+ /* Since the per-queue coalescing params can be set,
+ * we need apply the global new params even if they
+ * are not updated.
+ */
vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
@@ -3307,7 +3433,6 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
&sgs_rx))
return -EINVAL;
- /* Save parameters */
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -3318,21 +3443,55 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
return 0;
}
-static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
- u16 vqn, u32 max_usecs, u32 max_packets)
+static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec)
{
- struct scatterlist sgs;
+ int err;
- vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
- vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
- vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
- sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+ err = virtnet_send_tx_notf_coal_cmds(vi, ec);
+ if (err)
+ return err;
- if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
- VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
- &sgs))
+ err = virtnet_send_rx_notf_coal_cmds(vi, ec);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
+ struct ethtool_coalesce *ec,
+ u16 queue)
+{
+ bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
+ bool cur_rx_dim = vi->rq[queue].dim_enabled;
+ u32 max_usecs, max_packets;
+ int err;
+
+ max_usecs = vi->rq[queue].intr_coal.max_usecs;
+ max_packets = vi->rq[queue].intr_coal.max_packets;
+
+ if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
+ ec->rx_max_coalesced_frames != max_packets))
return -EINVAL;
+ if (rx_ctrl_dim_on && !cur_rx_dim) {
+ vi->rq[queue].dim_enabled = true;
+ return 0;
+ }
+
+ if (!rx_ctrl_dim_on && cur_rx_dim)
+ vi->rq[queue].dim_enabled = false;
+
+ /* If no params are updated, userspace ethtool will
+ * reject the modification.
+ */
+ err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
+ ec->rx_coalesce_usecs,
+ ec->rx_max_coalesced_frames);
+ if (err)
+ return err;
+
return 0;
}
@@ -3342,27 +3501,62 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
{
int err;
- err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
- ec->rx_coalesce_usecs,
- ec->rx_max_coalesced_frames);
+ err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
if (err)
return err;
- vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
- vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
-
- err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
- ec->tx_coalesce_usecs,
- ec->tx_max_coalesced_frames);
+ err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
+ ec->tx_coalesce_usecs,
+ ec->tx_max_coalesced_frames);
if (err)
return err;
- vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
- vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
-
return 0;
}
+static void virtnet_rx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct receive_queue *rq = container_of(dim,
+ struct receive_queue, dim);
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct net_device *dev = vi->dev;
+ struct dim_cq_moder update_moder;
+ int i, qnum, err;
+
+ if (!rtnl_trylock())
+ return;
+
+ /* Each rxq's work is queued by "net_dim()->schedule_work()"
+ * in response to NAPI traffic changes. Note that dim->profile_ix
+ * for each rxq is updated prior to the queuing action.
+ * So we only need to traverse and update profiles for all rxqs
+ * in the work which is holding rtnl_lock.
+ */
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ rq = &vi->rq[i];
+ dim = &rq->dim;
+ qnum = rq - vi->rq;
+
+ if (!rq->dim_enabled)
+ continue;
+
+ update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ if (update_moder.usec != rq->intr_coal.max_usecs ||
+ update_moder.pkts != rq->intr_coal.max_packets) {
+ err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
+ update_moder.usec,
+ update_moder.pkts);
+ if (err)
+ pr_debug("%s: Failed to send dim parameters on rxq%d\n",
+ dev->name, qnum);
+ dim->state = DIM_START_MEASURE;
+ }
+ }
+
+ rtnl_unlock();
+}
+
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
{
/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
@@ -3444,6 +3638,7 @@ static int virtnet_get_coalesce(struct net_device *dev,
ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
+ ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
} else {
ec->rx_max_coalesced_frames = 1;
@@ -3501,6 +3696,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
+ ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
} else {
ec->rx_max_coalesced_frames = 1;
@@ -3548,41 +3744,60 @@ static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
}
-static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+static int virtnet_get_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh)
{
struct virtnet_info *vi = netdev_priv(dev);
int i;
- if (indir) {
+ if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- indir[i] = vi->ctrl->rss.indirection_table[i];
+ rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
}
- if (key)
- memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
+ if (rxfh->key)
+ memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
return 0;
}
-static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
+static int virtnet_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct virtnet_info *vi = netdev_priv(dev);
+ bool update = false;
int i;
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (indir) {
+ if (rxfh->indir) {
+ if (!vi->has_rss)
+ return -EOPNOTSUPP;
+
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->ctrl->rss.indirection_table[i] = indir[i];
+ vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+ update = true;
}
- if (key)
- memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
- virtnet_commit_rss_command(vi);
+ if (rxfh->key) {
+ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
+ * device provides hash calculation capabilities, that is,
+ * hash_key is configured.
+ */
+ if (!vi->has_rss && !vi->has_rss_hash_report)
+ return -EOPNOTSUPP;
+
+ memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+ update = true;
+ }
+
+ if (update)
+ virtnet_commit_rss_command(vi);
return 0;
}
@@ -3626,7 +3841,7 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
static const struct ethtool_ops virtnet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USECS,
+ ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
@@ -4204,6 +4419,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
+ INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
+ vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
@@ -4484,13 +4702,15 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
vi->has_rss = true;
- if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_indir_table_size =
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
+ }
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 98c22d7d8..7e8008d53 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -245,20 +245,20 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
for (j = 0; j < adapter->num_tx_queues; j++) {
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
- ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc);
+ ethtool_puts(&buf, vmxnet3_tq_dev_stats[i].desc);
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
- ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc);
+ ethtool_puts(&buf, vmxnet3_tq_driver_stats[i].desc);
}
for (j = 0; j < adapter->num_rx_queues; j++) {
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
- ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc);
+ ethtool_puts(&buf, vmxnet3_rq_dev_stats[i].desc);
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
- ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc);
+ ethtool_puts(&buf, vmxnet3_rq_driver_stats[i].desc);
}
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
- ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc);
+ ethtool_puts(&buf, vmxnet3_global_stats[i].desc);
}
netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
@@ -1136,27 +1136,26 @@ vmxnet3_get_rss_indir_size(struct net_device *netdev)
}
static int
-vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
+vmxnet3_get_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
unsigned int n = rssConf->indTableSize;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
- if (!p)
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+ if (!rxfh->indir)
return 0;
if (n > UPT1_RSS_MAX_IND_TABLE_SIZE)
return 0;
while (n--)
- p[n] = rssConf->indTable[n];
+ rxfh->indir[n] = rssConf->indTable[n];
return 0;
}
static int
-vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
- const u8 hfunc)
+vmxnet3_set_rss(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
unsigned int i;
unsigned long flags;
@@ -1164,13 +1163,14 @@ vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
/* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ if (rxfh->key ||
+ (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
- if (!p)
+ if (!rxfh->indir)
return 0;
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = p[i];
+ rssConf->indTable[i] = rxfh->indir[i];
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 412c3c0b6..9ec46048d 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1615,6 +1615,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ return false;
+
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
@@ -2379,7 +2383,17 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
else
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
#if IS_ENABLED(CONFIG_IPV6)
- key.label = vxlan->cfg.label;
+ switch (vxlan->cfg.label_policy) {
+ case VXLAN_LABEL_FIXED:
+ key.label = vxlan->cfg.label;
+ break;
+ case VXLAN_LABEL_INHERIT:
+ key.label = ip_tunnel_get_flowlabel(old_iph, skb);
+ break;
+ default:
+ DEBUG_NET_WARN_ON_ONCE(1);
+ goto drop;
+ }
#endif
} else {
if (!info) {
@@ -2845,6 +2859,7 @@ static int vxlan_init(struct net_device *dev)
if (err)
goto err_gro_cells_destroy;
+ netdev_lockdep_set_classes(dev);
return 0;
err_gro_cells_destroy:
@@ -3225,6 +3240,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_fdb_get = vxlan_fdb_get,
.ndo_mdb_add = vxlan_mdb_add,
.ndo_mdb_del = vxlan_mdb_del,
+ .ndo_mdb_del_bulk = vxlan_mdb_del_bulk,
.ndo_mdb_dump = vxlan_mdb_dump,
.ndo_mdb_get = vxlan_mdb_get,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
@@ -3366,6 +3382,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_DF] = { .type = NLA_U8 },
[IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
[IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX),
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -3740,6 +3757,12 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
return -EINVAL;
}
+ if (conf->label_policy && !use_ipv6) {
+ NL_SET_ERR_MSG(extack,
+ "Label policy only applies to IPv6 VXLAN devices");
+ return -EINVAL;
+ }
+
if (conf->remote_ifindex) {
struct net_device *lowerdev;
@@ -4082,6 +4105,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_VXLAN_LABEL])
conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
IPV6_FLOWLABEL_MASK;
+ if (data[IFLA_VXLAN_LABEL_POLICY])
+ conf->label_policy = nla_get_u32(data[IFLA_VXLAN_LABEL_POLICY]);
if (data[IFLA_VXLAN_LEARNING]) {
err = vxlan_nl2flag(conf, data, IFLA_VXLAN_LEARNING,
@@ -4398,6 +4423,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
+ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LABEL_POLICY */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
@@ -4471,6 +4497,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
+ nla_put_u32(skb, IFLA_VXLAN_LABEL_POLICY, vxlan->cfg.label_policy) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
!!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c
index eb4c580b5..60eb95a06 100644
--- a/drivers/net/vxlan/vxlan_mdb.c
+++ b/drivers/net/vxlan/vxlan_mdb.c
@@ -74,6 +74,14 @@ struct vxlan_mdb_config {
u8 rt_protocol;
};
+struct vxlan_mdb_flush_desc {
+ union vxlan_addr remote_ip;
+ __be32 src_vni;
+ __be32 remote_vni;
+ __be16 remote_port;
+ u8 rt_protocol;
+};
+
static const struct rhashtable_params vxlan_mdb_rht_params = {
.head_offset = offsetof(struct vxlan_mdb_entry, rhnode),
.key_offset = offsetof(struct vxlan_mdb_entry, key),
@@ -1306,6 +1314,145 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
return err;
}
+static const struct nla_policy
+vxlan_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
+ [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
+ [MDBE_ATTR_DST] = NLA_POLICY_RANGE(NLA_BINARY,
+ sizeof(struct in_addr),
+ sizeof(struct in6_addr)),
+ [MDBE_ATTR_DST_PORT] = { .type = NLA_U16 },
+ [MDBE_ATTR_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
+ [MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
+ [MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
+};
+
+static int vxlan_mdb_flush_desc_init(struct vxlan_dev *vxlan,
+ struct vxlan_mdb_flush_desc *desc,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
+{
+ struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
+ struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
+ int err;
+
+ if (entry->ifindex && entry->ifindex != vxlan->dev->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port net device");
+ return -EINVAL;
+ }
+
+ if (entry->vid) {
+ NL_SET_ERR_MSG_MOD(extack, "VID must not be specified");
+ return -EINVAL;
+ }
+
+ if (!tb[MDBA_SET_ENTRY_ATTRS])
+ return 0;
+
+ err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
+ tb[MDBA_SET_ENTRY_ATTRS],
+ vxlan_mdbe_attrs_del_bulk_pol, extack);
+ if (err)
+ return err;
+
+ if (mdbe_attrs[MDBE_ATTR_STATE_MASK]) {
+ u8 state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
+
+ if ((state_mask & MDB_PERMANENT) && !(entry->state & MDB_PERMANENT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only permanent MDB entries are supported");
+ return -EINVAL;
+ }
+ }
+
+ if (mdbe_attrs[MDBE_ATTR_RTPROT])
+ desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
+
+ if (mdbe_attrs[MDBE_ATTR_DST])
+ vxlan_nla_get_addr(&desc->remote_ip, mdbe_attrs[MDBE_ATTR_DST]);
+
+ if (mdbe_attrs[MDBE_ATTR_DST_PORT])
+ desc->remote_port =
+ cpu_to_be16(nla_get_u16(mdbe_attrs[MDBE_ATTR_DST_PORT]));
+
+ if (mdbe_attrs[MDBE_ATTR_VNI])
+ desc->remote_vni =
+ cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_VNI]));
+
+ if (mdbe_attrs[MDBE_ATTR_SRC_VNI])
+ desc->src_vni =
+ cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI]));
+
+ return 0;
+}
+
+static void vxlan_mdb_remotes_flush(struct vxlan_dev *vxlan,
+ struct vxlan_mdb_entry *mdb_entry,
+ const struct vxlan_mdb_flush_desc *desc)
+{
+ struct vxlan_mdb_remote *remote, *tmp;
+
+ list_for_each_entry_safe(remote, tmp, &mdb_entry->remotes, list) {
+ struct vxlan_rdst *rd = rtnl_dereference(remote->rd);
+ __be32 remote_vni;
+
+ if (desc->remote_ip.sa.sa_family &&
+ !vxlan_addr_equal(&desc->remote_ip, &rd->remote_ip))
+ continue;
+
+ /* Encapsulation is performed with source VNI if remote VNI
+ * is not set.
+ */
+ remote_vni = rd->remote_vni ? : mdb_entry->key.vni;
+ if (desc->remote_vni && desc->remote_vni != remote_vni)
+ continue;
+
+ if (desc->remote_port && desc->remote_port != rd->remote_port)
+ continue;
+
+ if (desc->rt_protocol &&
+ desc->rt_protocol != remote->rt_protocol)
+ continue;
+
+ vxlan_mdb_remote_del(vxlan, mdb_entry, remote);
+ }
+}
+
+static void vxlan_mdb_flush(struct vxlan_dev *vxlan,
+ const struct vxlan_mdb_flush_desc *desc)
+{
+ struct vxlan_mdb_entry *mdb_entry;
+ struct hlist_node *tmp;
+
+ /* The removal of an entry cannot trigger the removal of another entry
+ * since entries are always added to the head of the list.
+ */
+ hlist_for_each_entry_safe(mdb_entry, tmp, &vxlan->mdb_list, mdb_node) {
+ if (desc->src_vni && desc->src_vni != mdb_entry->key.vni)
+ continue;
+
+ vxlan_mdb_remotes_flush(vxlan, mdb_entry, desc);
+ /* Entry will only be removed if its remotes list is empty. */
+ vxlan_mdb_entry_put(vxlan, mdb_entry);
+ }
+}
+
+int vxlan_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_mdb_flush_desc desc = {};
+ int err;
+
+ ASSERT_RTNL();
+
+ err = vxlan_mdb_flush_desc_init(vxlan, &desc, tb, extack);
+ if (err)
+ return err;
+
+ vxlan_mdb_flush(vxlan, &desc);
+
+ return 0;
+}
+
static const struct nla_policy vxlan_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
@@ -1575,29 +1722,6 @@ static void vxlan_mdb_check_empty(void *ptr, void *arg)
WARN_ON_ONCE(1);
}
-static void vxlan_mdb_remotes_flush(struct vxlan_dev *vxlan,
- struct vxlan_mdb_entry *mdb_entry)
-{
- struct vxlan_mdb_remote *remote, *tmp;
-
- list_for_each_entry_safe(remote, tmp, &mdb_entry->remotes, list)
- vxlan_mdb_remote_del(vxlan, mdb_entry, remote);
-}
-
-static void vxlan_mdb_entries_flush(struct vxlan_dev *vxlan)
-{
- struct vxlan_mdb_entry *mdb_entry;
- struct hlist_node *tmp;
-
- /* The removal of an entry cannot trigger the removal of another entry
- * since entries are always added to the head of the list.
- */
- hlist_for_each_entry_safe(mdb_entry, tmp, &vxlan->mdb_list, mdb_node) {
- vxlan_mdb_remotes_flush(vxlan, mdb_entry);
- vxlan_mdb_entry_put(vxlan, mdb_entry);
- }
-}
-
int vxlan_mdb_init(struct vxlan_dev *vxlan)
{
int err;
@@ -1613,7 +1737,9 @@ int vxlan_mdb_init(struct vxlan_dev *vxlan)
void vxlan_mdb_fini(struct vxlan_dev *vxlan)
{
- vxlan_mdb_entries_flush(vxlan);
+ struct vxlan_mdb_flush_desc desc = {};
+
+ vxlan_mdb_flush(vxlan, &desc);
WARN_ON_ONCE(vxlan->cfg.flags & VXLAN_F_MDB);
rhashtable_free_and_destroy(&vxlan->mdb_tbl, vxlan_mdb_check_empty,
NULL);
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index db679c380..b35d96b78 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -235,6 +235,8 @@ int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack);
int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
+int vxlan_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid,
u32 seq, struct netlink_ext_ack *extack);
struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan,
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index dcb069dde..7dda87756 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -95,6 +95,8 @@ config HDLC_X25
comment "X.25/LAPB support is disabled"
depends on HDLC && (LAPB!=m || HDLC!=m) && LAPB!=y
+source "drivers/net/wan/framer/Kconfig"
+
config PCI200SYN
tristate "Goramo PCI200SYN support"
depends on HDLC && PCI
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 5bec8fae4..8119b49d1 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_HDLC_FR) += hdlc_fr.o
obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
obj-$(CONFIG_HDLC_X25) += hdlc_x25.o
+obj-y += framer/
+
obj-$(CONFIG_FARSYNC) += farsync.o
obj-$(CONFIG_LAPBETHER) += lapbether.o
diff --git a/drivers/net/wan/framer/Kconfig b/drivers/net/wan/framer/Kconfig
new file mode 100644
index 000000000..dc83caef9
--- /dev/null
+++ b/drivers/net/wan/framer/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# FRAMER
+#
+
+menuconfig FRAMER
+ tristate "Framer Subsystem"
+ help
+ A framer is a component in charge of an E1/T1 line interface.
+ Connected usually to a TDM bus, it converts TDM frames to/from E1/T1
+ frames. It also provides information related to the E1/T1 line.
+ Used with HDLC, the network can be reached through the E1/T1 line.
+
+ This framework is designed to provide a generic interface for framer
+ devices present in the kernel. This layer will have the generic
+ API by which framer drivers can create framer using the framer
+ framework and framer users can obtain reference to the framer.
+ All the users of this framework should select this config.
+
+if FRAMER
+
+config GENERIC_FRAMER
+ bool
+
+config FRAMER_PEF2256
+ tristate "Lantiq PEF2256"
+ depends on OF
+ depends on HAS_IOMEM
+ select GENERIC_FRAMER
+ select MFD_CORE
+ select REGMAP_MMIO
+ help
+ Enable support for the Lantiq PEF2256 (FALC56) framer.
+ The PEF2256 is a framer and line interface between analog E1/T1/J1
+ line and a digital PCM bus.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called framer-pef2256.
+
+endif # FRAMER
diff --git a/drivers/net/wan/framer/Makefile b/drivers/net/wan/framer/Makefile
new file mode 100644
index 000000000..3403f2b14
--- /dev/null
+++ b/drivers/net/wan/framer/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the framer drivers.
+#
+
+obj-$(CONFIG_GENERIC_FRAMER) += framer-core.o
+obj-$(CONFIG_FRAMER_PEF2256) += pef2256/
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
new file mode 100644
index 000000000..c04dc88bd
--- /dev/null
+++ b/drivers/net/wan/framer/framer-core.c
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Generic Framer framework.
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/device.h>
+#include <linux/framer/framer.h>
+#include <linux/framer/framer-provider.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+static struct class *framer_class;
+static DEFINE_MUTEX(framer_provider_mutex);
+static LIST_HEAD(framer_provider_list);
+static DEFINE_IDA(framer_ida);
+
+#define dev_to_framer(a) (container_of((a), struct framer, dev))
+
+int framer_pm_runtime_get(struct framer *framer)
+{
+ int ret;
+
+ if (!pm_runtime_enabled(&framer->dev))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get(&framer->dev);
+ if (ret < 0 && ret != -EINPROGRESS)
+ pm_runtime_put_noidle(&framer->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(framer_pm_runtime_get);
+
+int framer_pm_runtime_get_sync(struct framer *framer)
+{
+ int ret;
+
+ if (!pm_runtime_enabled(&framer->dev))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get_sync(&framer->dev);
+ if (ret < 0)
+ pm_runtime_put_sync(&framer->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(framer_pm_runtime_get_sync);
+
+int framer_pm_runtime_put(struct framer *framer)
+{
+ if (!pm_runtime_enabled(&framer->dev))
+ return -EOPNOTSUPP;
+
+ return pm_runtime_put(&framer->dev);
+}
+EXPORT_SYMBOL_GPL(framer_pm_runtime_put);
+
+int framer_pm_runtime_put_sync(struct framer *framer)
+{
+ if (!pm_runtime_enabled(&framer->dev))
+ return -EOPNOTSUPP;
+
+ return pm_runtime_put_sync(&framer->dev);
+}
+EXPORT_SYMBOL_GPL(framer_pm_runtime_put_sync);
+
+/**
+ * framer_init - framer internal initialization before framer operation
+ * @framer: the framer returned by framer_get()
+ *
+ * Used to allow framer's driver to perform framer internal initialization,
+ * such as PLL block powering, clock initialization or anything that's
+ * is required by the framer to perform the start of operation.
+ * Must be called before framer_power_on().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
+int framer_init(struct framer *framer)
+{
+ bool start_polling = false;
+ int ret;
+
+ ret = framer_pm_runtime_get_sync(framer);
+ if (ret < 0 && ret != -EOPNOTSUPP)
+ return ret;
+ ret = 0; /* Override possible ret == -EOPNOTSUPP */
+
+ mutex_lock(&framer->mutex);
+ if (framer->power_count > framer->init_count)
+ dev_warn(&framer->dev, "framer_power_on was called before framer init\n");
+
+ if (framer->init_count == 0) {
+ if (framer->ops->init) {
+ ret = framer->ops->init(framer);
+ if (ret < 0) {
+ dev_err(&framer->dev, "framer init failed --> %d\n", ret);
+ goto out;
+ }
+ }
+ if (framer->ops->flags & FRAMER_FLAG_POLL_STATUS)
+ start_polling = true;
+ }
+ ++framer->init_count;
+
+out:
+ mutex_unlock(&framer->mutex);
+
+ if (!ret && start_polling) {
+ ret = framer_get_status(framer, &framer->prev_status);
+ if (ret < 0) {
+ dev_warn(&framer->dev, "framer get status failed --> %d\n", ret);
+ /* Will be retried on polling_work */
+ ret = 0;
+ }
+ queue_delayed_work(system_power_efficient_wq, &framer->polling_work, 1 * HZ);
+ }
+
+ framer_pm_runtime_put(framer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(framer_init);
+
+/**
+ * framer_exit - Framer internal un-initialization
+ * @framer: the framer returned by framer_get()
+ *
+ * Must be called after framer_power_off().
+ */
+int framer_exit(struct framer *framer)
+{
+ int ret;
+
+ ret = framer_pm_runtime_get_sync(framer);
+ if (ret < 0 && ret != -EOPNOTSUPP)
+ return ret;
+ ret = 0; /* Override possible ret == -EOPNOTSUPP */
+
+ mutex_lock(&framer->mutex);
+ --framer->init_count;
+ if (framer->init_count == 0) {
+ if (framer->ops->flags & FRAMER_FLAG_POLL_STATUS) {
+ mutex_unlock(&framer->mutex);
+ cancel_delayed_work_sync(&framer->polling_work);
+ mutex_lock(&framer->mutex);
+ }
+
+ if (framer->ops->exit)
+ framer->ops->exit(framer);
+ }
+
+ mutex_unlock(&framer->mutex);
+ framer_pm_runtime_put(framer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(framer_exit);
+
+/**
+ * framer_power_on - Enable the framer and enter proper operation
+ * @framer: the framer returned by framer_get()
+ *
+ * Must be called after framer_init().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
+int framer_power_on(struct framer *framer)
+{
+ int ret;
+
+ if (framer->pwr) {
+ ret = regulator_enable(framer->pwr);
+ if (ret)
+ return ret;
+ }
+
+ ret = framer_pm_runtime_get_sync(framer);
+ if (ret < 0 && ret != -EOPNOTSUPP)
+ goto err_pm_sync;
+
+ mutex_lock(&framer->mutex);
+ if (framer->power_count == 0 && framer->ops->power_on) {
+ ret = framer->ops->power_on(framer);
+ if (ret < 0) {
+ dev_err(&framer->dev, "framer poweron failed --> %d\n", ret);
+ goto err_pwr_on;
+ }
+ }
+ ++framer->power_count;
+ mutex_unlock(&framer->mutex);
+ return 0;
+
+err_pwr_on:
+ mutex_unlock(&framer->mutex);
+ framer_pm_runtime_put_sync(framer);
+err_pm_sync:
+ if (framer->pwr)
+ regulator_disable(framer->pwr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(framer_power_on);
+
+/**
+ * framer_power_off - Disable the framer.
+ * @framer: the framer returned by framer_get()
+ *
+ * Must be called before framer_exit().
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
+int framer_power_off(struct framer *framer)
+{
+ int ret;
+
+ mutex_lock(&framer->mutex);
+ if (framer->power_count == 1 && framer->ops->power_off) {
+ ret = framer->ops->power_off(framer);
+ if (ret < 0) {
+ dev_err(&framer->dev, "framer poweroff failed --> %d\n", ret);
+ mutex_unlock(&framer->mutex);
+ return ret;
+ }
+ }
+ --framer->power_count;
+ mutex_unlock(&framer->mutex);
+ framer_pm_runtime_put(framer);
+
+ if (framer->pwr)
+ regulator_disable(framer->pwr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(framer_power_off);
+
+/**
+ * framer_get_status() - Gets the framer status
+ * @framer: the framer returned by framer_get()
+ * @status: the status to retrieve
+ *
+ * Used to get the framer status. framer_init() must have been called
+ * on the framer.
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
+int framer_get_status(struct framer *framer, struct framer_status *status)
+{
+ int ret;
+
+ if (!framer->ops->get_status)
+ return -EOPNOTSUPP;
+
+ /* Be sure to have known values (struct padding and future extensions) */
+ memset(status, 0, sizeof(*status));
+
+ mutex_lock(&framer->mutex);
+ ret = framer->ops->get_status(framer, status);
+ mutex_unlock(&framer->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(framer_get_status);
+
+/**
+ * framer_set_config() - Sets the framer configuration
+ * @framer: the framer returned by framer_get()
+ * @config: the configuration to set
+ *
+ * Used to set the framer configuration. framer_init() must have been called
+ * on the framer.
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
+int framer_set_config(struct framer *framer, const struct framer_config *config)
+{
+ int ret;
+
+ if (!framer->ops->set_config)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&framer->mutex);
+ ret = framer->ops->set_config(framer, config);
+ mutex_unlock(&framer->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(framer_set_config);
+
+/**
+ * framer_get_config() - Gets the framer configuration
+ * @framer: the framer returned by framer_get()
+ * @config: the configuration to retrieve
+ *
+ * Used to get the framer configuration. framer_init() must have been called
+ * on the framer.
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
+int framer_get_config(struct framer *framer, struct framer_config *config)
+{
+ int ret;
+
+ if (!framer->ops->get_config)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&framer->mutex);
+ ret = framer->ops->get_config(framer, config);
+ mutex_unlock(&framer->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(framer_get_config);
+
+static void framer_polling_work(struct work_struct *work)
+{
+ struct framer *framer = container_of(work, struct framer, polling_work.work);
+ struct framer_status status;
+ int ret;
+
+ ret = framer_get_status(framer, &status);
+ if (ret) {
+ dev_err(&framer->dev, "polling, get status failed (%d)\n", ret);
+ goto end;
+ }
+ if (memcmp(&framer->prev_status, &status, sizeof(status))) {
+ blocking_notifier_call_chain(&framer->notifier_list,
+ FRAMER_EVENT_STATUS, NULL);
+ memcpy(&framer->prev_status, &status, sizeof(status));
+ }
+
+end:
+ /* Re-schedule task in 1 sec */
+ queue_delayed_work(system_power_efficient_wq, &framer->polling_work, 1 * HZ);
+}
+
+/**
+ * framer_notifier_register() - Registers a notifier
+ * @framer: the framer returned by framer_get()
+ * @nb: the notifier block to register
+ *
+ * Used to register a notifier block on framer events. framer_init() must have
+ * been called on the framer.
+ * The available framer events are present in enum framer_events.
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
+int framer_notifier_register(struct framer *framer, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&framer->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(framer_notifier_register);
+
+/**
+ * framer_notifier_unregister() - Unregisters a notifier
+ * @framer: the framer returned by framer_get()
+ * @nb: the notifier block to unregister
+ *
+ * Used to unregister a notifier block. framer_init() must have
+ * been called on the framer.
+ *
+ * Return: %0 if successful, a negative error code otherwise
+ */
+int framer_notifier_unregister(struct framer *framer, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&framer->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(framer_notifier_unregister);
+
+static struct framer_provider *framer_provider_of_lookup(const struct device_node *node)
+{
+ struct framer_provider *framer_provider;
+
+ list_for_each_entry(framer_provider, &framer_provider_list, list) {
+ if (device_match_of_node(framer_provider->dev, node))
+ return framer_provider;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static struct framer *framer_of_get_from_provider(struct of_phandle_args *args)
+{
+ struct framer_provider *framer_provider;
+ struct framer *framer;
+
+ mutex_lock(&framer_provider_mutex);
+ framer_provider = framer_provider_of_lookup(args->np);
+ if (IS_ERR(framer_provider) || !try_module_get(framer_provider->owner)) {
+ framer = ERR_PTR(-EPROBE_DEFER);
+ goto end;
+ }
+
+ framer = framer_provider->of_xlate(framer_provider->dev, args);
+
+ module_put(framer_provider->owner);
+
+end:
+ mutex_unlock(&framer_provider_mutex);
+
+ return framer;
+}
+
+static struct framer *framer_of_get_byphandle(struct device_node *np, const char *propname,
+ int index)
+{
+ struct of_phandle_args args;
+ struct framer *framer;
+ int ret;
+
+ ret = of_parse_phandle_with_optional_args(np, propname, "#framer-cells", index, &args);
+ if (ret)
+ return ERR_PTR(-ENODEV);
+
+ if (!of_device_is_available(args.np)) {
+ framer = ERR_PTR(-ENODEV);
+ goto out_node_put;
+ }
+
+ framer = framer_of_get_from_provider(&args);
+
+out_node_put:
+ of_node_put(args.np);
+
+ return framer;
+}
+
+static struct framer *framer_of_get_byparent(struct device_node *np, int index)
+{
+ struct of_phandle_args args;
+ struct framer *framer;
+
+ args.np = of_get_parent(np);
+ args.args_count = 1;
+ args.args[0] = index;
+
+ while (args.np) {
+ framer = framer_of_get_from_provider(&args);
+ if (IS_ERR(framer) && PTR_ERR(framer) != -EPROBE_DEFER) {
+ args.np = of_get_next_parent(args.np);
+ continue;
+ }
+ of_node_put(args.np);
+ return framer;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+/**
+ * framer_get() - lookup and obtain a reference to a framer.
+ * @dev: device that requests the framer
+ * @con_id: name of the framer from device's point of view
+ *
+ * Returns the framer driver, after getting a refcount to it; or
+ * -ENODEV if there is no such framer. The caller is responsible for
+ * calling framer_put() to release that count.
+ */
+struct framer *framer_get(struct device *dev, const char *con_id)
+{
+ struct framer *framer = ERR_PTR(-ENODEV);
+ struct device_link *link;
+ int ret;
+
+ if (dev->of_node) {
+ if (con_id)
+ framer = framer_of_get_byphandle(dev->of_node, con_id, 0);
+ else
+ framer = framer_of_get_byparent(dev->of_node, 0);
+ }
+
+ if (IS_ERR(framer))
+ return framer;
+
+ get_device(&framer->dev);
+
+ if (!try_module_get(framer->ops->owner)) {
+ ret = -EPROBE_DEFER;
+ goto err_put_device;
+ }
+
+ link = device_link_add(dev, &framer->dev, DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "failed to create device_link to %s\n", dev_name(&framer->dev));
+ ret = -EPROBE_DEFER;
+ goto err_module_put;
+ }
+
+ return framer;
+
+err_module_put:
+ module_put(framer->ops->owner);
+err_put_device:
+ put_device(&framer->dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(framer_get);
+
+/**
+ * framer_put() - release the framer
+ * @dev: device that wants to release this framer
+ * @framer: the framer returned by framer_get()
+ *
+ * Releases a refcount the caller received from framer_get().
+ */
+void framer_put(struct device *dev, struct framer *framer)
+{
+ device_link_remove(dev, &framer->dev);
+
+ module_put(framer->ops->owner);
+ put_device(&framer->dev);
+}
+EXPORT_SYMBOL_GPL(framer_put);
+
+static void devm_framer_put(struct device *dev, void *res)
+{
+ struct framer *framer = *(struct framer **)res;
+
+ framer_put(dev, framer);
+}
+
+/**
+ * devm_framer_get() - lookup and obtain a reference to a framer.
+ * @dev: device that requests this framer
+ * @con_id: name of the framer from device's point of view
+ *
+ * Gets the framer using framer_get(), and associates a device with it using
+ * devres. On driver detach, framer_put() function is invoked on the devres
+ * data, then, devres data is freed.
+ */
+struct framer *devm_framer_get(struct device *dev, const char *con_id)
+{
+ struct framer **ptr, *framer;
+
+ ptr = devres_alloc(devm_framer_put, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ framer = framer_get(dev, con_id);
+ if (!IS_ERR(framer)) {
+ *ptr = framer;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ return framer;
+ }
+
+ return framer;
+}
+EXPORT_SYMBOL_GPL(devm_framer_get);
+
+/**
+ * devm_framer_optional_get() - lookup and obtain a reference to an optional
+ * framer.
+ * @dev: device that requests this framer
+ * @con_id: name of the framer from device's point of view
+ *
+ * Same as devm_framer_get() except that if the framer does not exist, it is not
+ * considered an error and -ENODEV will not be returned. Instead the NULL framer
+ * is returned.
+ */
+struct framer *devm_framer_optional_get(struct device *dev, const char *con_id)
+{
+ struct framer *framer = devm_framer_get(dev, con_id);
+
+ if (PTR_ERR(framer) == -ENODEV)
+ framer = NULL;
+
+ return framer;
+}
+EXPORT_SYMBOL_GPL(devm_framer_optional_get);
+
+static void framer_notify_status_work(struct work_struct *work)
+{
+ struct framer *framer = container_of(work, struct framer, notify_status_work);
+
+ blocking_notifier_call_chain(&framer->notifier_list, FRAMER_EVENT_STATUS, NULL);
+}
+
+void framer_notify_status_change(struct framer *framer)
+{
+ /* Can be called from atomic context -> just schedule a task to call
+ * blocking notifiers
+ */
+ queue_work(system_power_efficient_wq, &framer->notify_status_work);
+}
+EXPORT_SYMBOL_GPL(framer_notify_status_change);
+
+/**
+ * framer_create() - create a new framer
+ * @dev: device that is creating the new framer
+ * @node: device node of the framer. default to dev->of_node.
+ * @ops: function pointers for performing framer operations
+ *
+ * Called to create a framer using framer framework.
+ */
+struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ struct framer *framer;
+ int ret;
+ int id;
+
+ /* get_status() is mandatory if the provider ask for polling status */
+ if (WARN_ON((ops->flags & FRAMER_FLAG_POLL_STATUS) && !ops->get_status))
+ return ERR_PTR(-EINVAL);
+
+ framer = kzalloc(sizeof(*framer), GFP_KERNEL);
+ if (!framer)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_alloc(&framer_ida, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(dev, "unable to get id\n");
+ ret = id;
+ goto free_framer;
+ }
+
+ device_initialize(&framer->dev);
+ mutex_init(&framer->mutex);
+ INIT_WORK(&framer->notify_status_work, framer_notify_status_work);
+ INIT_DELAYED_WORK(&framer->polling_work, framer_polling_work);
+ BLOCKING_INIT_NOTIFIER_HEAD(&framer->notifier_list);
+
+ framer->dev.class = framer_class;
+ framer->dev.parent = dev;
+ framer->dev.of_node = node ? node : dev->of_node;
+ framer->id = id;
+ framer->ops = ops;
+
+ ret = dev_set_name(&framer->dev, "framer-%s.%d", dev_name(dev), id);
+ if (ret)
+ goto put_dev;
+
+ /* framer-supply */
+ framer->pwr = regulator_get_optional(&framer->dev, "framer");
+ if (IS_ERR(framer->pwr)) {
+ ret = PTR_ERR(framer->pwr);
+ if (ret == -EPROBE_DEFER)
+ goto put_dev;
+
+ framer->pwr = NULL;
+ }
+
+ ret = device_add(&framer->dev);
+ if (ret)
+ goto put_dev;
+
+ if (pm_runtime_enabled(dev)) {
+ pm_runtime_enable(&framer->dev);
+ pm_runtime_no_callbacks(&framer->dev);
+ }
+
+ return framer;
+
+put_dev:
+ put_device(&framer->dev); /* calls framer_release() which frees resources */
+ return ERR_PTR(ret);
+
+free_framer:
+ kfree(framer);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(framer_create);
+
+/**
+ * framer_destroy() - destroy the framer
+ * @framer: the framer to be destroyed
+ *
+ * Called to destroy the framer.
+ */
+void framer_destroy(struct framer *framer)
+{
+ /* polling_work should already be stopped but if framer_exit() was not
+ * called (bug), here it's the last time to do that ...
+ */
+ cancel_delayed_work_sync(&framer->polling_work);
+ cancel_work_sync(&framer->notify_status_work);
+ pm_runtime_disable(&framer->dev);
+ device_unregister(&framer->dev); /* calls framer_release() which frees resources */
+}
+EXPORT_SYMBOL_GPL(framer_destroy);
+
+static void devm_framer_destroy(struct device *dev, void *res)
+{
+ struct framer *framer = *(struct framer **)res;
+
+ framer_destroy(framer);
+}
+
+/**
+ * devm_framer_create() - create a new framer
+ * @dev: device that is creating the new framer
+ * @node: device node of the framer
+ * @ops: function pointers for performing framer operations
+ *
+ * Creates a new framer device adding it to the framer class.
+ * While at that, it also associates the device with the framer using devres.
+ * On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ struct framer **ptr, *framer;
+
+ ptr = devres_alloc(devm_framer_destroy, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ framer = framer_create(dev, node, ops);
+ if (!IS_ERR(framer)) {
+ *ptr = framer;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return framer;
+}
+EXPORT_SYMBOL_GPL(devm_framer_create);
+
+/**
+ * framer_provider_simple_of_xlate() - returns the framer instance from framer provider
+ * @dev: the framer provider device
+ * @args: of_phandle_args (not used here)
+ *
+ * Intended to be used by framer provider for the common case where #framer-cells is
+ * 0. For other cases where #framer-cells is greater than '0', the framer provider
+ * should provide a custom of_xlate function that reads the *args* and returns
+ * the appropriate framer.
+ */
+struct framer *framer_provider_simple_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct class_dev_iter iter;
+ struct framer *framer;
+
+ class_dev_iter_init(&iter, framer_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ framer = dev_to_framer(dev);
+ if (args->np != framer->dev.of_node)
+ continue;
+
+ class_dev_iter_exit(&iter);
+ return framer;
+ }
+
+ class_dev_iter_exit(&iter);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
+
+/**
+ * __framer_provider_of_register() - create/register framer provider with the framework
+ * @dev: struct device of the framer provider
+ * @owner: the module owner containing of_xlate
+ * @of_xlate: function pointer to obtain framer instance from framer provider
+ *
+ * Creates struct framer_provider from dev and of_xlate function pointer.
+ * This is used in the case of dt boot for finding the framer instance from
+ * framer provider.
+ */
+struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ struct framer_provider *framer_provider;
+
+ framer_provider = kzalloc(sizeof(*framer_provider), GFP_KERNEL);
+ if (!framer_provider)
+ return ERR_PTR(-ENOMEM);
+
+ framer_provider->dev = dev;
+ framer_provider->owner = owner;
+ framer_provider->of_xlate = of_xlate;
+
+ of_node_get(framer_provider->dev->of_node);
+
+ mutex_lock(&framer_provider_mutex);
+ list_add_tail(&framer_provider->list, &framer_provider_list);
+ mutex_unlock(&framer_provider_mutex);
+
+ return framer_provider;
+}
+EXPORT_SYMBOL_GPL(__framer_provider_of_register);
+
+/**
+ * framer_provider_of_unregister() - unregister framer provider from the framework
+ * @framer_provider: framer provider returned by framer_provider_of_register()
+ *
+ * Removes the framer_provider created using framer_provider_of_register().
+ */
+void framer_provider_of_unregister(struct framer_provider *framer_provider)
+{
+ mutex_lock(&framer_provider_mutex);
+ list_del(&framer_provider->list);
+ mutex_unlock(&framer_provider_mutex);
+
+ of_node_put(framer_provider->dev->of_node);
+ kfree(framer_provider);
+}
+EXPORT_SYMBOL_GPL(framer_provider_of_unregister);
+
+static void devm_framer_provider_of_unregister(struct device *dev, void *res)
+{
+ struct framer_provider *framer_provider = *(struct framer_provider **)res;
+
+ framer_provider_of_unregister(framer_provider);
+}
+
+/**
+ * __devm_framer_provider_of_register() - create/register framer provider with
+ * the framework
+ * @dev: struct device of the framer provider
+ * @owner: the module owner containing of_xlate
+ * @of_xlate: function pointer to obtain framer instance from framer provider
+ *
+ * Creates struct framer_provider from dev and of_xlate function pointer.
+ * This is used in the case of dt boot for finding the framer instance from
+ * framer provider. While at that, it also associates the device with the
+ * framer provider using devres. On driver detach, release function is invoked
+ * on the devres data, then, devres data is freed.
+ */
+struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ struct framer_provider **ptr, *framer_provider;
+
+ ptr = devres_alloc(devm_framer_provider_of_unregister, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ framer_provider = __framer_provider_of_register(dev, owner, of_xlate);
+ if (!IS_ERR(framer_provider)) {
+ *ptr = framer_provider;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return framer_provider;
+}
+EXPORT_SYMBOL_GPL(__devm_framer_provider_of_register);
+
+/**
+ * framer_release() - release the framer
+ * @dev: the dev member within framer
+ *
+ * When the last reference to the device is removed, it is called
+ * from the embedded kobject as release method.
+ */
+static void framer_release(struct device *dev)
+{
+ struct framer *framer;
+
+ framer = dev_to_framer(dev);
+ regulator_put(framer->pwr);
+ ida_free(&framer_ida, framer->id);
+ kfree(framer);
+}
+
+static int __init framer_core_init(void)
+{
+ framer_class = class_create("framer");
+ if (IS_ERR(framer_class)) {
+ pr_err("failed to create framer class (%pe)\n", framer_class);
+ return PTR_ERR(framer_class);
+ }
+
+ framer_class->dev_release = framer_release;
+
+ return 0;
+}
+device_initcall(framer_core_init);
diff --git a/drivers/net/wan/framer/pef2256/Makefile b/drivers/net/wan/framer/pef2256/Makefile
new file mode 100644
index 000000000..f4d1208dd
--- /dev/null
+++ b/drivers/net/wan/framer/pef2256/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the pef2256 driver.
+#
+
+obj-$(CONFIG_FRAMER_PEF2256) += framer-pef2256.o
+
+framer-pef2256-objs := pef2256.o
diff --git a/drivers/net/wan/framer/pef2256/pef2256-regs.h b/drivers/net/wan/framer/pef2256/pef2256-regs.h
new file mode 100644
index 000000000..5d3183c91
--- /dev/null
+++ b/drivers/net/wan/framer/pef2256/pef2256-regs.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PEF2256 registers definition
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __PEF2256_REGS_H__
+#define __PEF2256_REGS_H__
+
+#include "linux/bitfield.h"
+
+/* Command Register */
+#define PEF2256_CMDR 0x02
+#define PEF2256_CMDR_RRES BIT(6)
+#define PEF2256_CMDR_XRES BIT(4)
+#define PEF2256_CMDR_SRES BIT(0)
+
+/* Interrupt Mask Register 0..5 */
+#define PEF2256_IMR0 0x14
+#define PEF2256_IMR1 0x15
+#define PEF2256_IMR2 0x16
+#define PEF2256_IMR3 0x17
+#define PEF2256_IMR4 0x18
+#define PEF2256_IMR5 0x19
+
+/* Framer Mode Register 0 */
+#define PEF2256_FMR0 0x1C
+#define PEF2256_FMR0_XC_MASK GENMASK(7, 6)
+#define PEF2256_FMR0_XC_NRZ FIELD_PREP_CONST(PEF2256_FMR0_XC_MASK, 0x0)
+#define PEF2256_FMR0_XC_CMI FIELD_PREP_CONST(PEF2256_FMR0_XC_MASK, 0x1)
+#define PEF2256_FMR0_XC_AMI FIELD_PREP_CONST(PEF2256_FMR0_XC_MASK, 0x2)
+#define PEF2256_FMR0_XC_HDB3 FIELD_PREP_CONST(PEF2256_FMR0_XC_MASK, 0x3)
+#define PEF2256_FMR0_RC_MASK GENMASK(5, 4)
+#define PEF2256_FMR0_RC_NRZ FIELD_PREP_CONST(PEF2256_FMR0_RC_MASK, 0x0)
+#define PEF2256_FMR0_RC_CMI FIELD_PREP_CONST(PEF2256_FMR0_RC_MASK, 0x1)
+#define PEF2256_FMR0_RC_AMI FIELD_PREP_CONST(PEF2256_FMR0_RC_MASK, 0x2)
+#define PEF2256_FMR0_RC_HDB3 FIELD_PREP_CONST(PEF2256_FMR0_RC_MASK, 0x3)
+
+/* Framer Mode Register 1 */
+#define PEF2256_FMR1 0x1D
+#define PEF2256_FMR1_XFS BIT(3)
+#define PEF2256_FMR1_ECM BIT(2)
+/* SSD is defined on 2 bits. The other bit is on SIC1 register */
+#define PEF2256_FMR1_SSD_MASK GENMASK(1, 1)
+#define PEF2256_FMR1_SSD_2048 FIELD_PREP_CONST(PEF2256_FMR1_SSD_MASK, 0x0)
+#define PEF2256_FMR1_SSD_4096 FIELD_PREP_CONST(PEF2256_FMR1_SSD_MASK, 0x1)
+#define PEF2256_FMR1_SSD_8192 FIELD_PREP_CONST(PEF2256_FMR1_SSD_MASK, 0x0)
+#define PEF2256_FMR1_SSD_16384 FIELD_PREP_CONST(PEF2256_FMR1_SSD_MASK, 0x1)
+
+/* Framer Mode Register 2 */
+#define PEF2256_FMR2 0x1E
+#define PEF2256_FMR2_RFS_MASK GENMASK(7, 6)
+#define PEF2256_FMR2_RFS_DOUBLEFRAME FIELD_PREP_CONST(PEF2256_FMR2_RFS_MASK, 0x0)
+#define PEF2256_FMR2_RFS_CRC4_MULTIFRAME FIELD_PREP_CONST(PEF2256_FMR2_RFS_MASK, 0x2)
+#define PEF2256_FMR2_RFS_AUTO_MULTIFRAME FIELD_PREP_CONST(PEF2256_FMR2_RFS_MASK, 0x3)
+#define PEF2256_FMR2_AXRA BIT(1)
+
+/* Transmit Service Word */
+#define PEF2256_XSW 0x20
+#define PEF2256_XSW_XSIS BIT(7)
+#define PEF2256_XSW_XTM BIT(6)
+#define PEF2256_XSW_XY_MASK GENMASK(5, 0)
+#define PEF2256_XSW_XY(_v) FIELD_PREP(PEF2256_XSW_XY_MASK, _v)
+
+/* Transmit Spare Bits */
+#define PEF2256_XSP 0x21
+#define PEF2256_XSP_XSIF BIT(2)
+
+/* Transmit Control 0..1 */
+#define PEF2256_XC0 0x22
+#define PEF2256_XC1 0x23
+
+/* Receive Control 0 */
+#define PEF2256_RC0 0x24
+#define PEF2256_RC0_SWD BIT(7)
+#define PEF2256_RC0_ASY4 BIT(6)
+
+/* Receive Control 1 */
+#define PEF2256_RC1 0x25
+
+/* Transmit Pulse Mask 0..1 */
+#define PEF2256_XPM0 0x26
+#define PEF2256_XPM1 0x27
+
+/* Transmit Pulse Mask 2 */
+#define PEF2256_XPM2 0x28
+#define PEF2256_XPM2_XLT BIT(6)
+
+/* Transparent Service Word Mask */
+#define PEF2256_TSWM 0x29
+
+/* Line Interface Mode 0 */
+#define PEF2256_LIM0 0x36
+#define PEF2256_2X_LIM0_BIT3 BIT(3) /* v2.x, described as a forced '1' bit */
+#define PEF2256_LIM0_MAS BIT(0)
+
+/* Line Interface Mode 1 */
+#define PEF2256_LIM1 0x37
+#define PEF2256_12_LIM1_RIL_MASK GENMASK(6, 4)
+#define PEF2256_12_LIM1_RIL_910 FIELD_PREP_CONST(PEF2256_12_LIM1_RIL_MASK, 0x0)
+#define PEF2256_12_LIM1_RIL_740 FIELD_PREP_CONST(PEF2256_12_LIM1_RIL_MASK, 0x1)
+#define PEF2256_12_LIM1_RIL_590 FIELD_PREP_CONST(PEF2256_12_LIM1_RIL_MASK, 0x2)
+#define PEF2256_12_LIM1_RIL_420 FIELD_PREP_CONST(PEF2256_12_LIM1_RIL_MASK, 0x3)
+#define PEF2256_12_LIM1_RIL_320 FIELD_PREP_CONST(PEF2256_12_LIM1_RIL_MASK, 0x4)
+#define PEF2256_12_LIM1_RIL_210 FIELD_PREP_CONST(PEF2256_12_LIM1_RIL_MASK, 0x5)
+#define PEF2256_12_LIM1_RIL_160 FIELD_PREP_CONST(PEF2256_12_LIM1_RIL_MASK, 0x6)
+#define PEF2256_12_LIM1_RIL_100 FIELD_PREP_CONST(PEF2256_12_LIM1_RIL_MASK, 0x7)
+#define PEF2256_2X_LIM1_RIL_MASK GENMASK(6, 4)
+#define PEF2256_2X_LIM1_RIL_2250 FIELD_PREP_CONST(PEF2256_2X_LIM1_RIL_MASK, 0x0)
+#define PEF2256_2X_LIM1_RIL_1100 FIELD_PREP_CONST(PEF2256_2X_LIM1_RIL_MASK, 0x1)
+#define PEF2256_2X_LIM1_RIL_600 FIELD_PREP_CONST(PEF2256_2X_LIM1_RIL_MASK, 0x2)
+#define PEF2256_2X_LIM1_RIL_350 FIELD_PREP_CONST(PEF2256_2X_LIM1_RIL_MASK, 0x3)
+#define PEF2256_2X_LIM1_RIL_210 FIELD_PREP_CONST(PEF2256_2X_LIM1_RIL_MASK, 0x4)
+#define PEF2256_2X_LIM1_RIL_140 FIELD_PREP_CONST(PEF2256_2X_LIM1_RIL_MASK, 0x5)
+#define PEF2256_2X_LIM1_RIL_100 FIELD_PREP_CONST(PEF2256_2X_LIM1_RIL_MASK, 0x6)
+#define PEF2256_2X_LIM1_RIL_50 FIELD_PREP_CONST(PEF2256_2X_LIM1_RIL_MASK, 0x7)
+
+/* Pulse Count Detection */
+#define PEF2256_PCD 0x38
+
+ /* Pulse Count Recovery */
+#define PEF2256_PCR 0x39
+
+ /* Line Interface Mode 2 */
+#define PEF2256_LIM2 0x3A
+#define PEF2256_LIM2_SLT_MASK GENMASK(5, 4)
+#define PEF2256_LIM2_SLT_THR55 FIELD_PREP_CONST(PEF2256_LIM2_SLT_MASK, 0x0)
+#define PEF2256_LIM2_SLT_THR67 FIELD_PREP_CONST(PEF2256_LIM2_SLT_MASK, 0x1)
+#define PEF2256_LIM2_SLT_THR50 FIELD_PREP_CONST(PEF2256_LIM2_SLT_MASK, 0x2)
+#define PEF2256_LIM2_SLT_THR45 FIELD_PREP_CONST(PEF2256_LIM2_SLT_MASK, 0x3)
+#define PEF2256_LIM2_ELT BIT(2)
+
+/* System Interface Control 1 */
+#define PEF2256_SIC1 0x3E
+#define PEF2256_SIC1_SSC_MASK (BIT(7) | BIT(3))
+#define PEF2256_SIC1_SSC_2048 (0)
+#define PEF2256_SIC1_SSC_4096 BIT(3)
+#define PEF2256_SIC1_SSC_8192 BIT(7)
+#define PEF2256_SIC1_SSC_16384 (BIT(7) | BIT(3))
+/* SSD is defined on 2 bits. The other bit is on FMR1 register */
+#define PEF2256_SIC1_SSD_MASK GENMASK(6, 6)
+#define PEF2256_SIC1_SSD_2048 FIELD_PREP_CONST(PEF2256_SIC1_SSD_MASK, 0x0)
+#define PEF2256_SIC1_SSD_4096 FIELD_PREP_CONST(PEF2256_SIC1_SSD_MASK, 0x0)
+#define PEF2256_SIC1_SSD_8192 FIELD_PREP_CONST(PEF2256_SIC1_SSD_MASK, 0x1)
+#define PEF2256_SIC1_SSD_16384 FIELD_PREP_CONST(PEF2256_SIC1_SSD_MASK, 0x1)
+#define PEF2256_SIC1_RBS_MASK GENMASK(5, 4)
+#define PEF2256_SIC1_RBS_2FRAMES FIELD_PREP_CONST(PEF2256_SIC1_RBS_MASK, 0x0)
+#define PEF2256_SIC1_RBS_1FRAME FIELD_PREP_CONST(PEF2256_SIC1_RBS_MASK, 0x1)
+#define PEF2256_SIC1_RBS_96BITS FIELD_PREP_CONST(PEF2256_SIC1_RBS_MASK, 0x2)
+#define PEF2256_SIC1_RBS_BYPASS FIELD_PREP_CONST(PEF2256_SIC1_RBS_MASK, 0x3)
+#define PEF2256_SIC1_XBS_MASK GENMASK(1, 0)
+#define PEF2256_SIC1_XBS_BYPASS FIELD_PREP_CONST(PEF2256_SIC1_XBS_MASK, 0x0)
+#define PEF2256_SIC1_XBS_1FRAME FIELD_PREP_CONST(PEF2256_SIC1_XBS_MASK, 0x1)
+#define PEF2256_SIC1_XBS_2FRAMES FIELD_PREP_CONST(PEF2256_SIC1_XBS_MASK, 0x2)
+#define PEF2256_SIC1_XBS_96BITS FIELD_PREP_CONST(PEF2256_SIC1_XBS_MASK, 0x3)
+
+/* System Interface Control 2 */
+#define PEF2256_SIC2 0x3F
+#define PEF2256_SIC2_SICS_MASK GENMASK(3, 1)
+#define PEF2256_SIC2_SICS(_v) FIELD_PREP(PEF2256_SIC2_SICS_MASK, _v)
+
+/* System Interface Control 3 */
+#define PEF2256_SIC3 0x40
+#define PEF2256_SIC3_RTRI BIT(5)
+#define PEF2256_SIC3_RESX BIT(3)
+#define PEF2256_SIC3_RESR BIT(2)
+
+/* Clock Mode Register 1 */
+#define PEF2256_CMR1 0x44
+#define PEF2256_CMR1_RS_MASK GENMASK(5, 4)
+#define PEF2256_CMR1_RS_DPLL FIELD_PREP_CONST(PEF2256_CMR1_RS_MASK, 0x0)
+#define PEF2256_CMR1_RS_DPLL_LOS_HIGH FIELD_PREP_CONST(PEF2256_CMR1_RS_MASK, 0x1)
+#define PEF2256_CMR1_RS_DCOR_2048 FIELD_PREP_CONST(PEF2256_CMR1_RS_MASK, 0x2)
+#define PEF2256_CMR1_RS_DCOR_8192 FIELD_PREP_CONST(PEF2256_CMR1_RS_MASK, 0x3)
+#define PEF2256_CMR1_DCS BIT(3)
+
+/* Clock Mode Register 2 */
+#define PEF2256_CMR2 0x45
+#define PEF2256_CMR2_DCOXC BIT(5)
+
+/* Global Configuration Register */
+#define PEF2256_GCR 0x46
+#define PEF2256_GCR_SCI BIT(6)
+#define PEF2256_GCR_ECMC BIT(4)
+
+/* Port Configuration 5 */
+#define PEF2256_PC5 0x84
+#define PEF2256_PC5_CRP BIT(0)
+
+/* Global Port Configuration 1 */
+#define PEF2256_GPC1 0x85
+#define PEF2256_GPC1_CSFP_MASK GENMASK(7, 5)
+#define PEF2256_GPC1_CSFP_SEC_IN_HIGH FIELD_PREP_CONST(PEF2256_GPC1_CSFP_MASK, 0x0)
+#define PEF2256_GPC1_CSFP_SEC_OUT_HIGH FIELD_PREP_CONST(PEF2256_GPC1_CSFP_MASK, 0x1)
+#define PEF2256_GPC1_CSFP_FSC_OUT_HIGH FIELD_PREP_CONST(PEF2256_GPC1_CSFP_MASK, 0x2)
+#define PEF2256_GPC1_CSFP_FSC_OUT_LOW FIELD_PREP_CONST(PEF2256_GPC1_CSFP_MASK, 0x3)
+
+/* Port Configuration 6 */
+#define PEF2256_PC6 0x86
+
+/* Global Counter Mode n=1..8 */
+#define PEF2256_GCM(_n) (0x92 + (_n) - 1)
+#define PEF2256_GCM1 0x92
+#define PEF2256_GCM2 0x93
+#define PEF2256_GCM3 0x94
+#define PEF2256_GCM4 0x95
+#define PEF2256_GCM5 0x96
+#define PEF2256_GCM6 0x97
+#define PEF2256_GCM7 0x98
+#define PEF2256_GCM8 0x99
+
+/* Version Status Register */
+#define PEF2256_VSTR 0x4A
+#define PEF2256_VSTR_VERSION_12 0x00
+#define PEF2256_VSTR_VERSION_21 0x10
+#define PEF2256_VSTR_VERSION_2x 0x05
+
+/* Framer Receive Status 0 */
+#define PEF2256_FRS0 0x4C
+#define PEF2256_FRS0_LOS BIT(7)
+#define PEF2256_FRS0_AIS BIT(6)
+
+/* Interrupt Status Register 0..5 */
+#define PEF2256_ISR(_n) (0x68 + (_n))
+#define PEF2256_ISR0 0x68
+#define PEF2256_ISR1 0x69
+#define PEF2256_ISR2 0x6A
+#define PEF2256_ISR3 0x6B
+#define PEF2256_ISR4 0x6C
+#define PEF2256_ISR5 0x6D
+
+/* Global Interrupt Status */
+#define PEF2256_GIS 0x6E
+#define PEF2256_GIS_ISR(_n) BIT(_n)
+
+/* Wafer Identification Register */
+#define PEF2256_WID 0xEC
+#define PEF2256_12_WID_MASK GENMASK(1, 0)
+#define PEF2256_12_WID_VERSION_12 FIELD_PREP_CONST(PEF2256_12_WID_MASK, 0x3)
+#define PEF2256_2X_WID_MASK GENMASK(7, 6)
+#define PEF2256_2X_WID_VERSION_21 FIELD_PREP_CONST(PEF2256_2X_WID_MASK, 0x0)
+#define PEF2256_2X_WID_VERSION_22 FIELD_PREP_CONST(PEF2256_2X_WID_MASK, 0x1)
+
+/* IMR2/ISR2 Interrupts common bits */
+#define PEF2256_INT2_AIS BIT(3)
+#define PEF2256_INT2_LOS BIT(2)
+
+#endif /* __PEF2256_REGS_H__ */
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
new file mode 100644
index 000000000..4f81053ee
--- /dev/null
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PEF2256 also known as FALC56 driver
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/framer/pef2256.h>
+#include <linux/clk.h>
+#include <linux/framer/framer-provider.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include "pef2256-regs.h"
+
+enum pef2256_frame_type {
+ PEF2256_FRAME_E1_DOUBLEFRAME,
+ PEF2256_FRAME_E1_CRC4_MULTIFRAME,
+ PEF2256_FRAME_E1_AUTO_MULTIFRAME,
+ PEF2256_FRAME_T1J1_4FRAME,
+ PEF2256_FRAME_T1J1_12FRAME,
+ PEF2256_FRAME_T1J1_24FRAME,
+ PEF2256_FRAME_T1J1_72FRAME,
+};
+
+struct pef2256 {
+ struct device *dev;
+ struct regmap *regmap;
+ enum pef2256_version version;
+ struct clk *mclk;
+ struct clk *sclkr;
+ struct clk *sclkx;
+ struct gpio_desc *reset_gpio;
+ unsigned long sysclk_rate;
+ u32 data_rate;
+ bool is_tx_falling_edge;
+ bool is_subordinate;
+ enum pef2256_frame_type frame_type;
+ u8 channel_phase;
+ atomic_t carrier;
+ struct framer *framer;
+};
+
+static u8 pef2256_read8(struct pef2256 *pef2256, int offset)
+{
+ int val;
+
+ regmap_read(pef2256->regmap, offset, &val);
+ return val;
+}
+
+static void pef2256_write8(struct pef2256 *pef2256, int offset, u8 val)
+{
+ regmap_write(pef2256->regmap, offset, val);
+}
+
+static void pef2256_clrbits8(struct pef2256 *pef2256, int offset, u8 clr)
+{
+ regmap_clear_bits(pef2256->regmap, offset, clr);
+}
+
+static void pef2256_setbits8(struct pef2256 *pef2256, int offset, u8 set)
+{
+ regmap_set_bits(pef2256->regmap, offset, set);
+}
+
+static void pef2256_clrsetbits8(struct pef2256 *pef2256, int offset, u8 clr, u8 set)
+{
+ regmap_update_bits(pef2256->regmap, offset, clr | set, set);
+}
+
+enum pef2256_version pef2256_get_version(struct pef2256 *pef2256)
+{
+ enum pef2256_version version = PEF2256_VERSION_UNKNOWN;
+ u8 vstr, wid;
+
+ vstr = pef2256_read8(pef2256, PEF2256_VSTR);
+ wid = pef2256_read8(pef2256, PEF2256_WID);
+
+ switch (vstr) {
+ case PEF2256_VSTR_VERSION_12:
+ if ((wid & PEF2256_12_WID_MASK) == PEF2256_12_WID_VERSION_12)
+ version = PEF2256_VERSION_1_2;
+ break;
+ case PEF2256_VSTR_VERSION_2x:
+ switch (wid & PEF2256_2X_WID_MASK) {
+ case PEF2256_2X_WID_VERSION_21:
+ version = PEF2256_VERSION_2_1;
+ break;
+ case PEF2256_2X_WID_VERSION_22:
+ version = PEF2256_VERSION_2_2;
+ break;
+ }
+ break;
+ case PEF2256_VSTR_VERSION_21:
+ version = PEF2256_VERSION_2_1;
+ break;
+ }
+
+ if (version == PEF2256_VERSION_UNKNOWN)
+ dev_err(pef2256->dev, "Unknown version (0x%02x, 0x%02x)\n", vstr, wid);
+
+ return version;
+}
+EXPORT_SYMBOL_GPL(pef2256_get_version);
+
+enum pef2256_gcm_config_item {
+ PEF2256_GCM_CONFIG_1544000 = 0,
+ PEF2256_GCM_CONFIG_2048000,
+ PEF2256_GCM_CONFIG_8192000,
+ PEF2256_GCM_CONFIG_10000000,
+ PEF2256_GCM_CONFIG_12352000,
+ PEF2256_GCM_CONFIG_16384000,
+};
+
+struct pef2256_gcm_config {
+ u8 gcm_12[6];
+ u8 gcm_2x[8];
+};
+
+static const struct pef2256_gcm_config pef2256_gcm_configs[] = {
+ [PEF2256_GCM_CONFIG_1544000] = {
+ .gcm_12 = {0xF0, 0x51, 0x00, 0x80, 0x00, 0x15},
+ .gcm_2x = {0x00, 0x15, 0x00, 0x08, 0x00, 0x3F, 0x9C, 0xDF},
+ },
+ [PEF2256_GCM_CONFIG_2048000] = {
+ .gcm_12 = {0x00, 0x58, 0xD2, 0xC2, 0x00, 0x10},
+ .gcm_2x = {0x00, 0x18, 0xFB, 0x0B, 0x00, 0x2F, 0xDB, 0xDF},
+ },
+ [PEF2256_GCM_CONFIG_8192000] = {
+ .gcm_12 = {0x00, 0x58, 0xD2, 0xC2, 0x03, 0x10},
+ .gcm_2x = {0x00, 0x18, 0xFB, 0x0B, 0x00, 0x0B, 0xDB, 0xDF},
+ },
+ [PEF2256_GCM_CONFIG_10000000] = {
+ .gcm_12 = {0x90, 0x51, 0x81, 0x8F, 0x04, 0x10},
+ .gcm_2x = {0x40, 0x1B, 0x3D, 0x0A, 0x00, 0x07, 0xC9, 0xDC},
+ },
+ [PEF2256_GCM_CONFIG_12352000] = {
+ .gcm_12 = {0xF0, 0x51, 0x00, 0x80, 0x07, 0x15},
+ .gcm_2x = {0x00, 0x19, 0x00, 0x08, 0x01, 0x0A, 0x98, 0xDA},
+ },
+ [PEF2256_GCM_CONFIG_16384000] = {
+ .gcm_12 = {0x00, 0x58, 0xD2, 0xC2, 0x07, 0x10},
+ .gcm_2x = {0x00, 0x18, 0xFB, 0x0B, 0x01, 0x0B, 0xDB, 0xDF},
+ },
+};
+
+static int pef2256_setup_gcm(struct pef2256 *pef2256)
+{
+ enum pef2256_gcm_config_item item;
+ unsigned long mclk_rate;
+ const u8 *gcm;
+ int i, count;
+
+ mclk_rate = clk_get_rate(pef2256->mclk);
+ switch (mclk_rate) {
+ case 1544000:
+ item = PEF2256_GCM_CONFIG_1544000;
+ break;
+ case 2048000:
+ item = PEF2256_GCM_CONFIG_2048000;
+ break;
+ case 8192000:
+ item = PEF2256_GCM_CONFIG_8192000;
+ break;
+ case 10000000:
+ item = PEF2256_GCM_CONFIG_10000000;
+ break;
+ case 12352000:
+ item = PEF2256_GCM_CONFIG_12352000;
+ break;
+ case 16384000:
+ item = PEF2256_GCM_CONFIG_16384000;
+ break;
+ default:
+ dev_err(pef2256->dev, "Unsupported v2.x MCLK rate %lu\n", mclk_rate);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(item >= ARRAY_SIZE(pef2256_gcm_configs));
+
+ if (pef2256->version == PEF2256_VERSION_1_2) {
+ gcm = pef2256_gcm_configs[item].gcm_12;
+ count = ARRAY_SIZE(pef2256_gcm_configs[item].gcm_12);
+ } else {
+ gcm = pef2256_gcm_configs[item].gcm_2x;
+ count = ARRAY_SIZE(pef2256_gcm_configs[item].gcm_2x);
+ }
+
+ for (i = 0; i < count; i++)
+ pef2256_write8(pef2256, PEF2256_GCM(i + 1), *(gcm + i));
+
+ return 0;
+}
+
+static int pef2256_setup_e1_line(struct pef2256 *pef2256)
+{
+ u8 fmr1, fmr2;
+
+ /* RCLK output : DPLL clock, DCO-X enabled, DCO-X internal ref clock */
+ pef2256_write8(pef2256, PEF2256_CMR1, 0x00);
+
+ /* SCLKR selected, SCLKX selected,
+ * receive synchro pulse sourced by SYPR,
+ * transmit synchro pulse sourced by SYPX,
+ * DCO-X center frequency enabled
+ */
+ pef2256_write8(pef2256, PEF2256_CMR2, PEF2256_CMR2_DCOXC);
+
+ if (pef2256->is_subordinate) {
+ /* select RCLK source = 2M, disable switching from RCLK to SYNC */
+ pef2256_clrsetbits8(pef2256, PEF2256_CMR1, PEF2256_CMR1_RS_MASK,
+ PEF2256_CMR1_RS_DCOR_2048 | PEF2256_CMR1_DCS);
+ }
+
+ /* slave mode, local loop off, mode short-haul
+ * In v2.x, bit3 is a forced 1 bit in the datasheet -> Need to be set.
+ */
+ if (pef2256->version == PEF2256_VERSION_1_2)
+ pef2256_write8(pef2256, PEF2256_LIM0, 0x00);
+ else
+ pef2256_write8(pef2256, PEF2256_LIM0, PEF2256_2X_LIM0_BIT3);
+
+ /* "master" mode */
+ if (!pef2256->is_subordinate)
+ pef2256_setbits8(pef2256, PEF2256_LIM0, PEF2256_LIM0_MAS);
+
+ /* analog interface selected, remote loop off */
+ pef2256_write8(pef2256, PEF2256_LIM1, 0x00);
+
+ /* receive input threshold = 0,21V */
+ if (pef2256->version == PEF2256_VERSION_1_2)
+ pef2256_clrsetbits8(pef2256, PEF2256_LIM1, PEF2256_12_LIM1_RIL_MASK,
+ PEF2256_12_LIM1_RIL_210);
+ else
+ pef2256_clrsetbits8(pef2256, PEF2256_LIM1, PEF2256_2X_LIM1_RIL_MASK,
+ PEF2256_2X_LIM1_RIL_210);
+
+ /* transmit pulse mask, default value from datasheet
+ * transmit line in normal operation
+ */
+ if (pef2256->version == PEF2256_VERSION_1_2)
+ pef2256_write8(pef2256, PEF2256_XPM0, 0x7B);
+ else
+ pef2256_write8(pef2256, PEF2256_XPM0, 0x9C);
+ pef2256_write8(pef2256, PEF2256_XPM1, 0x03);
+ pef2256_write8(pef2256, PEF2256_XPM2, 0x00);
+
+ /* HDB3 coding, no alarm simulation */
+ pef2256_write8(pef2256, PEF2256_FMR0, PEF2256_FMR0_XC_HDB3 | PEF2256_FMR0_RC_HDB3);
+
+ /* E1, frame format, 2 Mbit/s system data rate, no AIS
+ * transmission to remote end or system interface, payload loop
+ * off, transmit remote alarm on
+ */
+ fmr1 = 0x00;
+ fmr2 = PEF2256_FMR2_AXRA;
+ switch (pef2256->frame_type) {
+ case PEF2256_FRAME_E1_DOUBLEFRAME:
+ fmr2 |= PEF2256_FMR2_RFS_DOUBLEFRAME;
+ break;
+ case PEF2256_FRAME_E1_CRC4_MULTIFRAME:
+ fmr1 |= PEF2256_FMR1_XFS;
+ fmr2 |= PEF2256_FMR2_RFS_CRC4_MULTIFRAME;
+ break;
+ case PEF2256_FRAME_E1_AUTO_MULTIFRAME:
+ fmr1 |= PEF2256_FMR1_XFS;
+ fmr2 |= PEF2256_FMR2_RFS_AUTO_MULTIFRAME;
+ break;
+ default:
+ dev_err(pef2256->dev, "Unsupported frame type %d\n", pef2256->frame_type);
+ return -EINVAL;
+ }
+ pef2256_clrsetbits8(pef2256, PEF2256_FMR1, PEF2256_FMR1_XFS, fmr1);
+ pef2256_write8(pef2256, PEF2256_FMR2, fmr2);
+
+ if (!pef2256->is_subordinate) {
+ /* SEC input, active high */
+ pef2256_write8(pef2256, PEF2256_GPC1, PEF2256_GPC1_CSFP_SEC_IN_HIGH);
+ } else {
+ /* FSC output, active high */
+ pef2256_write8(pef2256, PEF2256_GPC1, PEF2256_GPC1_CSFP_FSC_OUT_HIGH);
+ }
+
+ /* SCLKR, SCLKX, RCLK configured to inputs,
+ * XFMS active low, CLK1 and CLK2 pin configuration
+ */
+ pef2256_write8(pef2256, PEF2256_PC5, 0x00);
+ pef2256_write8(pef2256, PEF2256_PC6, 0x00);
+
+ /* port RCLK is output */
+ pef2256_setbits8(pef2256, PEF2256_PC5, PEF2256_PC5_CRP);
+
+ return 0;
+}
+
+static void pef2256_setup_e1_los(struct pef2256 *pef2256)
+{
+ /* detection of LOS alarm = 176 pulses (ie (10 + 1) * 16) */
+ pef2256_write8(pef2256, PEF2256_PCD, 10);
+ /* recovery of LOS alarm = 22 pulses (ie 21 + 1) */
+ pef2256_write8(pef2256, PEF2256_PCR, 21);
+ /* E1 default for the receive slicer threshold */
+ pef2256_write8(pef2256, PEF2256_LIM2, PEF2256_LIM2_SLT_THR50);
+ if (pef2256->is_subordinate) {
+ /* Loop-timed */
+ pef2256_setbits8(pef2256, PEF2256_LIM2, PEF2256_LIM2_ELT);
+ }
+}
+
+static int pef2256_setup_e1_system(struct pef2256 *pef2256)
+{
+ u8 sic1, fmr1;
+
+ /* 2.048 MHz system clocking rate, receive buffer 2 frames, transmit
+ * buffer bypass, data sampled and transmitted on the falling edge of
+ * SCLKR/X, automatic freeze signaling, data is active in the first
+ * channel phase
+ */
+ pef2256_write8(pef2256, PEF2256_SIC1, 0x00);
+ pef2256_write8(pef2256, PEF2256_SIC2, 0x00);
+ pef2256_write8(pef2256, PEF2256_SIC3, 0x00);
+
+ if (pef2256->is_subordinate) {
+ /* transmit buffer size = 2 frames, transparent mode */
+ pef2256_clrsetbits8(pef2256, PEF2256_SIC1, PEF2256_SIC1_XBS_MASK,
+ PEF2256_SIC1_XBS_2FRAMES);
+ }
+
+ if (pef2256->version != PEF2256_VERSION_1_2) {
+ /* during inactive channel phase switch RDO/RSIG into tri-state */
+ pef2256_setbits8(pef2256, PEF2256_SIC3, PEF2256_SIC3_RTRI);
+ }
+
+ if (pef2256->is_tx_falling_edge) {
+ /* falling edge sync pulse transmit, rising edge sync pulse receive */
+ pef2256_clrsetbits8(pef2256, PEF2256_SIC3, PEF2256_SIC3_RESX, PEF2256_SIC3_RESR);
+ } else {
+ /* rising edge sync pulse transmit, falling edge sync pulse receive */
+ pef2256_clrsetbits8(pef2256, PEF2256_SIC3, PEF2256_SIC3_RESR, PEF2256_SIC3_RESX);
+ }
+
+ /* transmit offset counter (XCO10..0) = 4 */
+ pef2256_write8(pef2256, PEF2256_XC0, 0);
+ pef2256_write8(pef2256, PEF2256_XC1, 4);
+ /* receive offset counter (RCO10..0) = 4 */
+ pef2256_write8(pef2256, PEF2256_RC0, 0);
+ pef2256_write8(pef2256, PEF2256_RC1, 4);
+
+ /* system clock rate */
+ switch (pef2256->sysclk_rate) {
+ case 2048000:
+ sic1 = PEF2256_SIC1_SSC_2048;
+ break;
+ case 4096000:
+ sic1 = PEF2256_SIC1_SSC_4096;
+ break;
+ case 8192000:
+ sic1 = PEF2256_SIC1_SSC_8192;
+ break;
+ case 16384000:
+ sic1 = PEF2256_SIC1_SSC_16384;
+ break;
+ default:
+ dev_err(pef2256->dev, "Unsupported sysclk rate %lu\n", pef2256->sysclk_rate);
+ return -EINVAL;
+ }
+ pef2256_clrsetbits8(pef2256, PEF2256_SIC1, PEF2256_SIC1_SSC_MASK, sic1);
+
+ /* data clock rate */
+ switch (pef2256->data_rate) {
+ case 2048000:
+ fmr1 = PEF2256_FMR1_SSD_2048;
+ sic1 = PEF2256_SIC1_SSD_2048;
+ break;
+ case 4096000:
+ fmr1 = PEF2256_FMR1_SSD_4096;
+ sic1 = PEF2256_SIC1_SSD_4096;
+ break;
+ case 8192000:
+ fmr1 = PEF2256_FMR1_SSD_8192;
+ sic1 = PEF2256_SIC1_SSD_8192;
+ break;
+ case 16384000:
+ fmr1 = PEF2256_FMR1_SSD_16384;
+ sic1 = PEF2256_SIC1_SSD_16384;
+ break;
+ default:
+ dev_err(pef2256->dev, "Unsupported data rate %u\n", pef2256->data_rate);
+ return -EINVAL;
+ }
+ pef2256_clrsetbits8(pef2256, PEF2256_FMR1, PEF2256_FMR1_SSD_MASK, fmr1);
+ pef2256_clrsetbits8(pef2256, PEF2256_SIC1, PEF2256_SIC1_SSD_MASK, sic1);
+
+ /* channel phase */
+ pef2256_clrsetbits8(pef2256, PEF2256_SIC2, PEF2256_SIC2_SICS_MASK,
+ PEF2256_SIC2_SICS(pef2256->channel_phase));
+
+ return 0;
+}
+
+static void pef2256_setup_e1_signaling(struct pef2256 *pef2256)
+{
+ /* All bits of the transmitted service word are cleared */
+ pef2256_write8(pef2256, PEF2256_XSW, PEF2256_XSW_XY(0x1F));
+
+ /* CAS disabled and clear spare bit values */
+ pef2256_write8(pef2256, PEF2256_XSP, 0x00);
+
+ if (pef2256->is_subordinate) {
+ /* transparent mode */
+ pef2256_setbits8(pef2256, PEF2256_XSW, PEF2256_XSW_XTM);
+ }
+
+ /* Si-Bit, Spare bit For International, FAS word */
+ pef2256_setbits8(pef2256, PEF2256_XSW, PEF2256_XSW_XSIS);
+ pef2256_setbits8(pef2256, PEF2256_XSP, PEF2256_XSP_XSIF);
+
+ /* no transparent mode active */
+ pef2256_write8(pef2256, PEF2256_TSWM, 0x00);
+}
+
+static void pef2256_setup_e1_errors(struct pef2256 *pef2256)
+{
+ /* error counter latched every 1s */
+ pef2256_setbits8(pef2256, PEF2256_FMR1, PEF2256_FMR1_ECM);
+
+ /* error counter mode COFA */
+ pef2256_setbits8(pef2256, PEF2256_GCR, PEF2256_GCR_ECMC);
+
+ /* errors in service words have no influence */
+ pef2256_setbits8(pef2256, PEF2256_RC0, PEF2256_RC0_SWD);
+
+ /* 4 consecutive incorrect FAS causes loss of sync */
+ pef2256_setbits8(pef2256, PEF2256_RC0, PEF2256_RC0_ASY4);
+}
+
+static int pef2256_setup_e1(struct pef2256 *pef2256)
+{
+ int ret;
+
+ /* Setup, Master clocking mode (GCM8..1) */
+ ret = pef2256_setup_gcm(pef2256);
+ if (ret)
+ return ret;
+
+ /* Select E1 mode */
+ pef2256_write8(pef2256, PEF2256_FMR1, 0x00);
+
+ /* internal second timer, power on */
+ pef2256_write8(pef2256, PEF2256_GCR, 0x00);
+
+ /* Setup line interface */
+ ret = pef2256_setup_e1_line(pef2256);
+ if (ret)
+ return ret;
+
+ /* Setup Loss-of-signal detection and recovery */
+ pef2256_setup_e1_los(pef2256);
+
+ /* Setup system interface */
+ ret = pef2256_setup_e1_system(pef2256);
+ if (ret)
+ return ret;
+
+ /* Setup signaling */
+ pef2256_setup_e1_signaling(pef2256);
+
+ /* Setup errors counters and condition */
+ pef2256_setup_e1_errors(pef2256);
+
+ /* status changed interrupt at both up and down */
+ pef2256_setbits8(pef2256, PEF2256_GCR, PEF2256_GCR_SCI);
+
+ /* Clear any ISR2 pending interrupts and unmask needed interrupts */
+ pef2256_read8(pef2256, PEF2256_ISR2);
+ pef2256_clrbits8(pef2256, PEF2256_IMR2, PEF2256_INT2_LOS | PEF2256_INT2_AIS);
+
+ /* reset lines */
+ pef2256_write8(pef2256, PEF2256_CMDR, PEF2256_CMDR_RRES | PEF2256_CMDR_XRES);
+ return 0;
+}
+
+static void pef2256_isr_default_handler(struct pef2256 *pef2256, u8 nbr, u8 isr)
+{
+ dev_warn_ratelimited(pef2256->dev, "ISR%u: 0x%02x not handled\n", nbr, isr);
+}
+
+static bool pef2256_is_carrier_on(struct pef2256 *pef2256)
+{
+ u8 frs0;
+
+ frs0 = pef2256_read8(pef2256, PEF2256_FRS0);
+ return !(frs0 & (PEF2256_FRS0_LOS | PEF2256_FRS0_AIS));
+}
+
+static void pef2256_isr2_handler(struct pef2256 *pef2256, u8 nbr, u8 isr)
+{
+ bool carrier;
+
+ if (isr & (PEF2256_INT2_LOS | PEF2256_INT2_AIS)) {
+ carrier = pef2256_is_carrier_on(pef2256);
+ if (atomic_xchg(&pef2256->carrier, carrier) != carrier)
+ framer_notify_status_change(pef2256->framer);
+ }
+}
+
+static irqreturn_t pef2256_irq_handler(int irq, void *priv)
+{
+ static void (*pef2256_isr_handler[])(struct pef2256 *, u8, u8) = {
+ [0] = pef2256_isr_default_handler,
+ [1] = pef2256_isr_default_handler,
+ [2] = pef2256_isr2_handler,
+ [3] = pef2256_isr_default_handler,
+ [4] = pef2256_isr_default_handler,
+ [5] = pef2256_isr_default_handler
+ };
+ struct pef2256 *pef2256 = (struct pef2256 *)priv;
+ u8 gis;
+ u8 isr;
+ u8 n;
+
+ gis = pef2256_read8(pef2256, PEF2256_GIS);
+
+ for (n = 0; n < ARRAY_SIZE(pef2256_isr_handler); n++) {
+ if (gis & PEF2256_GIS_ISR(n)) {
+ isr = pef2256_read8(pef2256, PEF2256_ISR(n));
+ pef2256_isr_handler[n](pef2256, n, isr);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pef2256_check_rates(struct pef2256 *pef2256, unsigned long sysclk_rate,
+ unsigned long data_rate)
+{
+ unsigned long rate;
+
+ switch (sysclk_rate) {
+ case 2048000:
+ case 4096000:
+ case 8192000:
+ case 16384000:
+ break;
+ default:
+ dev_err(pef2256->dev, "Unsupported system clock rate %lu\n", sysclk_rate);
+ return -EINVAL;
+ }
+
+ for (rate = data_rate; rate <= data_rate * 4; rate *= 2) {
+ if (rate == sysclk_rate)
+ return 0;
+ }
+ dev_err(pef2256->dev, "Unsupported data rate %lu with system clock rate %lu\n",
+ data_rate, sysclk_rate);
+ return -EINVAL;
+}
+
+static int pef2556_of_parse(struct pef2256 *pef2256, struct device_node *np)
+{
+ int ret;
+
+ pef2256->data_rate = 2048000;
+ ret = of_property_read_u32(np, "lantiq,data-rate-bps", &pef2256->data_rate);
+ if (ret && ret != -EINVAL) {
+ dev_err(pef2256->dev, "%pOF: failed to read lantiq,data-rate-bps\n", np);
+ return ret;
+ }
+
+ ret = pef2256_check_rates(pef2256, pef2256->sysclk_rate, pef2256->data_rate);
+ if (ret)
+ return ret;
+
+ pef2256->is_tx_falling_edge = of_property_read_bool(np, "lantiq,clock-falling-edge");
+
+ pef2256->channel_phase = 0;
+ ret = of_property_read_u8(np, "lantiq,channel-phase", &pef2256->channel_phase);
+ if (ret && ret != -EINVAL) {
+ dev_err(pef2256->dev, "%pOF: failed to read lantiq,channel-phase\n",
+ np);
+ return ret;
+ }
+ if (pef2256->channel_phase >= pef2256->sysclk_rate / pef2256->data_rate) {
+ dev_err(pef2256->dev, "%pOF: Invalid lantiq,channel-phase %u\n",
+ np, pef2256->channel_phase);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config pef2256_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 8,
+ .max_register = 0xff,
+};
+
+static const struct mfd_cell pef2256_devs[] = {
+ { .name = "lantiq-pef2256-pinctrl", },
+};
+
+static int pef2256_add_audio_devices(struct pef2256 *pef2256)
+{
+ const char *compatible = "lantiq,pef2256-codec";
+ struct mfd_cell *audio_devs;
+ struct device_node *np;
+ unsigned int count = 0;
+ unsigned int i;
+ int ret;
+
+ for_each_available_child_of_node(pef2256->dev->of_node, np) {
+ if (of_device_is_compatible(np, compatible))
+ count++;
+ }
+
+ if (!count)
+ return 0;
+
+ audio_devs = kcalloc(count, sizeof(*audio_devs), GFP_KERNEL);
+ if (!audio_devs)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ audio_devs[i].name = "framer-codec";
+ audio_devs[i].of_compatible = compatible;
+ audio_devs[i].id = i;
+ }
+
+ ret = mfd_add_devices(pef2256->dev, 0, audio_devs, count, NULL, 0, NULL);
+ kfree(audio_devs);
+ return ret;
+}
+
+static int pef2256_framer_get_status(struct framer *framer, struct framer_status *status)
+{
+ struct pef2256 *pef2256 = framer_get_drvdata(framer);
+
+ status->link_is_on = !!atomic_read(&pef2256->carrier);
+ return 0;
+}
+
+static int pef2256_framer_set_config(struct framer *framer, const struct framer_config *config)
+{
+ struct pef2256 *pef2256 = framer_get_drvdata(framer);
+
+ if (config->iface != FRAMER_IFACE_E1) {
+ dev_err(pef2256->dev, "Only E1 line is currently supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (config->clock_type) {
+ case FRAMER_CLOCK_EXT:
+ pef2256->is_subordinate = true;
+ break;
+ case FRAMER_CLOCK_INT:
+ pef2256->is_subordinate = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Apply the new settings */
+ return pef2256_setup_e1(pef2256);
+}
+
+static int pef2256_framer_get_config(struct framer *framer, struct framer_config *config)
+{
+ struct pef2256 *pef2256 = framer_get_drvdata(framer);
+
+ config->iface = FRAMER_IFACE_E1;
+ config->clock_type = pef2256->is_subordinate ? FRAMER_CLOCK_EXT : FRAMER_CLOCK_INT;
+ config->line_clock_rate = 2048000;
+ return 0;
+}
+
+static const struct framer_ops pef2256_framer_ops = {
+ .owner = THIS_MODULE,
+ .get_status = pef2256_framer_get_status,
+ .get_config = pef2256_framer_get_config,
+ .set_config = pef2256_framer_set_config,
+};
+
+static int pef2256_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ unsigned long sclkr_rate, sclkx_rate;
+ struct framer_provider *framer_provider;
+ struct pef2256 *pef2256;
+ const char *version_txt;
+ void __iomem *iomem;
+ int ret;
+ int irq;
+
+ pef2256 = devm_kzalloc(&pdev->dev, sizeof(*pef2256), GFP_KERNEL);
+ if (!pef2256)
+ return -ENOMEM;
+
+ pef2256->dev = &pdev->dev;
+ atomic_set(&pef2256->carrier, 0);
+
+ pef2256->is_subordinate = true;
+ pef2256->frame_type = PEF2256_FRAME_E1_DOUBLEFRAME;
+
+ iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+
+ pef2256->regmap = devm_regmap_init_mmio(&pdev->dev, iomem,
+ &pef2256_regmap_config);
+ if (IS_ERR(pef2256->regmap)) {
+ dev_err(&pdev->dev, "Failed to initialise Regmap (%ld)\n",
+ PTR_ERR(pef2256->regmap));
+ return PTR_ERR(pef2256->regmap);
+ }
+
+ pef2256->mclk = devm_clk_get_enabled(&pdev->dev, "mclk");
+ if (IS_ERR(pef2256->mclk))
+ return PTR_ERR(pef2256->mclk);
+
+ pef2256->sclkr = devm_clk_get_enabled(&pdev->dev, "sclkr");
+ if (IS_ERR(pef2256->sclkr))
+ return PTR_ERR(pef2256->sclkr);
+
+ pef2256->sclkx = devm_clk_get_enabled(&pdev->dev, "sclkx");
+ if (IS_ERR(pef2256->sclkx))
+ return PTR_ERR(pef2256->sclkx);
+
+ /* Both SCLKR (receive) and SCLKX (transmit) must have the same rate,
+ * stored as sysclk_rate.
+ * The exact value will be checked at pef2256_check_rates()
+ */
+ sclkr_rate = clk_get_rate(pef2256->sclkr);
+ sclkx_rate = clk_get_rate(pef2256->sclkx);
+ if (sclkr_rate != sclkx_rate) {
+ dev_err(pef2256->dev, "clk rate mismatch. sclkr %lu Hz, sclkx %lu Hz\n",
+ sclkr_rate, sclkx_rate);
+ return -EINVAL;
+ }
+ pef2256->sysclk_rate = sclkr_rate;
+
+ /* Reset the component. The MCLK clock must be active during reset */
+ pef2256->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pef2256->reset_gpio))
+ return PTR_ERR(pef2256->reset_gpio);
+ if (pef2256->reset_gpio) {
+ gpiod_set_value_cansleep(pef2256->reset_gpio, 1);
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(pef2256->reset_gpio, 0);
+ usleep_range(10, 20);
+ }
+
+ pef2256->version = pef2256_get_version(pef2256);
+ switch (pef2256->version) {
+ case PEF2256_VERSION_1_2:
+ version_txt = "1.2";
+ break;
+ case PEF2256_VERSION_2_1:
+ version_txt = "2.1";
+ break;
+ case PEF2256_VERSION_2_2:
+ version_txt = "2.2";
+ break;
+ default:
+ return -ENODEV;
+ }
+ dev_info(pef2256->dev, "Version %s detected\n", version_txt);
+
+ ret = pef2556_of_parse(pef2256, np);
+ if (ret)
+ return ret;
+
+ /* Create the framer. It can be used on interrupts */
+ pef2256->framer = devm_framer_create(pef2256->dev, NULL, &pef2256_framer_ops);
+ if (IS_ERR(pef2256->framer))
+ return PTR_ERR(pef2256->framer);
+
+ framer_set_drvdata(pef2256->framer, pef2256);
+
+ /* Disable interrupts */
+ pef2256_write8(pef2256, PEF2256_IMR0, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR1, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR2, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR3, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR4, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR5, 0xff);
+
+ /* Clear any pending interrupts */
+ pef2256_read8(pef2256, PEF2256_ISR0);
+ pef2256_read8(pef2256, PEF2256_ISR1);
+ pef2256_read8(pef2256, PEF2256_ISR2);
+ pef2256_read8(pef2256, PEF2256_ISR3);
+ pef2256_read8(pef2256, PEF2256_ISR4);
+ pef2256_read8(pef2256, PEF2256_ISR5);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(pef2256->dev, irq, pef2256_irq_handler, 0, "pef2256", pef2256);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, pef2256);
+
+ ret = mfd_add_devices(pef2256->dev, 0, pef2256_devs,
+ ARRAY_SIZE(pef2256_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(pef2256->dev, "add devices failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = pef2256_setup_e1(pef2256);
+ if (ret)
+ return ret;
+
+ framer_provider = devm_framer_provider_of_register(pef2256->dev,
+ framer_provider_simple_of_xlate);
+ if (IS_ERR(framer_provider))
+ return PTR_ERR(framer_provider);
+
+ /* Add audio devices */
+ ret = pef2256_add_audio_devices(pef2256);
+ if (ret < 0) {
+ dev_err(pef2256->dev, "add audio devices failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pef2256_remove(struct platform_device *pdev)
+{
+ struct pef2256 *pef2256 = platform_get_drvdata(pdev);
+
+ /* Disable interrupts */
+ pef2256_write8(pef2256, PEF2256_IMR0, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR1, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR2, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR3, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR4, 0xff);
+ pef2256_write8(pef2256, PEF2256_IMR5, 0xff);
+
+ return 0;
+}
+
+static const struct of_device_id pef2256_id_table[] = {
+ { .compatible = "lantiq,pef2256" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, pef2256_id_table);
+
+static struct platform_driver pef2256_driver = {
+ .driver = {
+ .name = "lantiq-pef2256",
+ .of_match_table = pef2256_id_table,
+ },
+ .probe = pef2256_probe,
+ .remove = pef2256_remove,
+};
+module_platform_driver(pef2256_driver);
+
+struct regmap *pef2256_get_regmap(struct pef2256 *pef2256)
+{
+ return pef2256->regmap;
+}
+EXPORT_SYMBOL_GPL(pef2256_get_regmap);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("PEF2256 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index fd50bb313..605e70f7b 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -1259,7 +1259,7 @@ free_uhdlc_priv:
return ret;
}
-static int ucc_hdlc_remove(struct platform_device *pdev)
+static void ucc_hdlc_remove(struct platform_device *pdev)
{
struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
@@ -1277,8 +1277,6 @@ static int ucc_hdlc_remove(struct platform_device *pdev)
kfree(priv);
dev_info(&pdev->dev, "UCC based hdlc module removed\n");
-
- return 0;
}
static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
@@ -1292,7 +1290,7 @@ MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
static struct platform_driver ucc_hdlc_driver = {
.probe = ucc_hdlc_probe,
- .remove = ucc_hdlc_remove,
+ .remove_new = ucc_hdlc_remove,
.driver = {
.name = DRV_NAME,
.pm = HDLC_PM_OPS,
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index b09f4c235..931c5ca79 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -1522,20 +1522,19 @@ err_plat:
return err;
}
-static int ixp4xx_hss_remove(struct platform_device *pdev)
+static void ixp4xx_hss_remove(struct platform_device *pdev)
{
struct port *port = platform_get_drvdata(pdev);
unregister_hdlc_device(port->netdev);
free_netdev(port->netdev);
npe_release(port->npe);
- return 0;
}
static struct platform_driver ixp4xx_hss_driver = {
.driver.name = DRV_NAME,
.probe = ixp4xx_hss_probe,
- .remove = ixp4xx_hss_remove,
+ .remove_new = ixp4xx_hss_remove,
};
module_platform_driver(ixp4xx_hss_driver);
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index 8a51cfcff..cbb99fc5e 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -28,6 +28,7 @@
static struct spi_device *g_spi;
+MODULE_DESCRIPTION("Slic Maxim DS26522 driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Zhao Qiang<B45475@freescale.com>");
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 7555af519..c6599594d 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -22,7 +22,6 @@ source "drivers/net/wireless/admtek/Kconfig"
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/atmel/Kconfig"
source "drivers/net/wireless/broadcom/Kconfig"
-source "drivers/net/wireless/cisco/Kconfig"
source "drivers/net/wireless/intel/Kconfig"
source "drivers/net/wireless/intersil/Kconfig"
source "drivers/net/wireless/marvell/Kconfig"
@@ -38,8 +37,6 @@ source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zydas/Kconfig"
source "drivers/net/wireless/quantenna/Kconfig"
-source "drivers/net/wireless/legacy/Kconfig"
-
source "drivers/net/wireless/virtual/Kconfig"
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 4d7374d56..e1c4141c6 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_WLAN_VENDOR_ADMTEK) += admtek/
obj-$(CONFIG_WLAN_VENDOR_ATH) += ath/
obj-$(CONFIG_WLAN_VENDOR_ATMEL) += atmel/
obj-$(CONFIG_WLAN_VENDOR_BROADCOM) += broadcom/
-obj-$(CONFIG_WLAN_VENDOR_CISCO) += cisco/
obj-$(CONFIG_WLAN_VENDOR_INTEL) += intel/
obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/
obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/
@@ -23,5 +22,4 @@ obj-$(CONFIG_WLAN_VENDOR_ST) += st/
obj-$(CONFIG_WLAN_VENDOR_TI) += ti/
obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/
-obj-$(CONFIG_WLAN) += legacy/
obj-$(CONFIG_WLAN) += virtual/
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 43e0db78d..a742cec44 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1803,5 +1803,6 @@ static struct usb_driver ar5523_driver = {
module_usb_driver(ar5523_driver);
+MODULE_DESCRIPTION("Atheros AR5523 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index af6546572..9a4f8e815 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "bmi.h"
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index c27b82047..afae4a802 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hif.h"
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 6cdb225b7..0032f8aa8 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -100,6 +101,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -140,6 +142,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -181,6 +184,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -217,6 +221,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -257,6 +262,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -297,6 +303,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -337,6 +344,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -381,6 +389,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -427,6 +436,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -480,6 +490,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -530,6 +541,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -570,6 +582,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -612,6 +625,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -645,6 +659,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -692,6 +707,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = false,
.use_fw_tx_credits = true,
.delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -725,6 +741,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_restart_disconnect = true,
.use_fw_tx_credits = false,
.delay_unmap_buffer = true,
+ .mcast_frame_registration = false,
},
};
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 4b5239de4..c110d1552 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORE_H_
@@ -607,7 +608,7 @@ struct ath10k_vif {
u8 tim_bitmap[64];
u8 tim_len;
u32 ssid_len;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN] __nonstring;
bool hidden_ssid;
/* P2P_IE with NoA attribute for P2P_GO case */
u32 noa_len;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 2d1634a89..bb3a276b7 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "coredump.h"
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 437b9759f..e5ef0352e 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _COREDUMP_H_
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index ad9cf953a..b93a64bf8 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 87a336533..394bf3c32 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 5bfeecb95..a6e21ce90 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 0d180faf3..7ff665020 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -246,26 +246,12 @@ struct ath10k_htc_lookahead_bundle {
struct ath10k_htc_record {
struct ath10k_ath10k_htc_record_hdr hdr;
union {
- struct ath10k_htc_credit_report credit_report[0];
- struct ath10k_htc_lookahead_report lookahead_report[0];
- struct ath10k_htc_lookahead_bundle lookahead_bundle[0];
- u8 pauload[0];
+ DECLARE_FLEX_ARRAY(struct ath10k_htc_credit_report, credit_report);
+ DECLARE_FLEX_ARRAY(struct ath10k_htc_lookahead_report, lookahead_report);
+ DECLARE_FLEX_ARRAY(struct ath10k_htc_lookahead_bundle, lookahead_bundle);
};
} __packed __aligned(4);
-/*
- * note: the trailer offset is dynamic depending
- * on payload length. this is only a struct layout draft
- */
-struct ath10k_htc_frame {
- struct ath10k_htc_hdr hdr;
- union {
- struct ath10k_htc_msg msg;
- u8 payload[0];
- };
- struct ath10k_htc_record trailer[0];
-} __packed __aligned(4);
-
/*******************/
/* Host-side stuff */
/*******************/
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index c80470e88..4a9270e2a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HTT_H_
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index b261d6371..7d28ae545 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -1294,7 +1295,7 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
status->encoding = RX_ENC_LEGACY;
status->bw = RATE_INFO_BW_20;
- status->flag &= ~RX_FLAG_MACTIME_END;
+ status->flag &= ~RX_FLAG_MACTIME;
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index be4d4536a..9725feece 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/etherdevice.h>
@@ -40,7 +41,6 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_sta *arsta;
struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
- unsigned long frame_cnt;
unsigned long byte_cnt;
int idx;
u32 bit;
@@ -67,7 +67,7 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
bit = BIT(peer_id % 32);
idx = peer_id / 32;
- ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ ieee80211_txq_get_depth(txq, NULL, &byte_cnt);
count = ath10k_htt_tx_txq_calc_size(byte_cnt);
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 6d32b43a4..8fafe096a 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 9643031a4..93c073091 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HW_H_
@@ -639,6 +640,9 @@ struct ath10k_hw_params {
bool use_fw_tx_credits;
bool delay_unmap_buffer;
+
+ /* The hardware support multicast frame registrations */
+ bool mcast_frame_registration;
};
struct htt_resp;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 2cf693f3f..090bcf148 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "mac.h"
@@ -1242,7 +1243,7 @@ static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
return ar->monitor ||
(!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
ar->running_fw->fw_file.fw_features) &&
- (ar->filter_flags & FIF_OTHER_BSS)) ||
+ (ar->filter_flags & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) ||
test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
}
@@ -6025,10 +6026,15 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
int ret;
+ unsigned int supported = SUPPORTED_FILTERS;
mutex_lock(&ar->conf_mutex);
- *total_flags &= SUPPORTED_FILTERS;
+ if (ar->hw_params.mcast_frame_registration)
+ supported |= FIF_MCAST_ACTION;
+
+ *total_flags &= supported;
+
ar->filter_flags = *total_flags;
ret = ath10k_monitor_recalc(ar);
@@ -6121,9 +6127,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (ieee80211_vif_is_mesh(vif)) {
/* mesh doesn't use SSID but firmware needs it */
- strncpy(arvif->u.ap.ssid, "mesh",
- sizeof(arvif->u.ap.ssid));
arvif->u.ap.ssid_len = 4;
+ memcpy(arvif->u.ap.ssid, "mesh", arvif->u.ap.ssid_len);
}
}
@@ -10118,6 +10123,10 @@ int ath10k_mac_register(struct ath10k *ar)
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL);
+ if (ar->hw_params.mcast_frame_registration)
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
+
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 2f8c78527..3de2de6d4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/pci.h>
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 480cd97ab..27bb4cf2d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _PCI_H_
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 52c1a3de8..38e939f57 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/completion.h>
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
index 1c81e454f..0e85c75d2 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/soc/qcom/qmi.h>
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
index f0db99140..9f311f3bc 100644
--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef WCN3990_QMI_SVC_V01_H
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 777e53aa6..564293df1 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _RX_DESC_H_
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 56fbcfb80..0ab5433f6 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -3,6 +3,7 @@
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index cefd97323..31c8d7fbb 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h
index 48e066ba8..7e4cfbb67 100644
--- a/drivers/net/wireless/ath/ath10k/usb.h
+++ b/drivers/net/wireless/ath/ath10k/usb.h
@@ -3,6 +3,7 @@
* Copyright (c) 2004-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
* Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _USB_H_
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index dbb48d70f..83a8f07a6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 05fa7d4c0..88befe92f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index b112e8826..9146df98f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_H_
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 20b9aa8dd..aa7b2e703 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "mac.h"
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index ad5cc6cac..27f0523bf 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -2,7 +2,7 @@
config ATH11K
tristate "Qualcomm Technologies 802.11ax chipset support"
depends on MAC80211 && HAS_DMA
- depends on CRYPTO_MICHAEL_MIC
+ select CRYPTO_MICHAEL_MIC
select ATH_COMMON
select QCOM_QMI_HELPERS
help
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index f8f5e653c..7c0a23517 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -1251,7 +1251,7 @@ static void ath11k_ahb_free_resources(struct ath11k_base *ab)
platform_set_drvdata(pdev, NULL);
}
-static int ath11k_ahb_remove(struct platform_device *pdev)
+static void ath11k_ahb_remove(struct platform_device *pdev)
{
struct ath11k_base *ab = platform_get_drvdata(pdev);
@@ -1267,8 +1267,6 @@ static int ath11k_ahb_remove(struct platform_device *pdev)
qmi_fail:
ath11k_ahb_free_resources(ab);
-
- return 0;
}
static void ath11k_ahb_shutdown(struct platform_device *pdev)
@@ -1296,7 +1294,7 @@ static struct platform_driver ath11k_ahb_driver = {
.of_match_table = ath11k_ahb_of_match,
},
.probe = ath11k_ahb_probe,
- .remove = ath11k_ahb_remove,
+ .remove_new = ath11k_ahb_remove,
.shutdown = ath11k_ahb_shutdown,
};
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index 289d47ae9..e66e86bde 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_rx.h"
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index c0f6a0ba8..69946fc70 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CE_H
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 667d55e26..cd829ec70 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -595,7 +595,6 @@ struct ath11k {
struct ath11k_base *ab;
struct ath11k_pdev *pdev;
struct ieee80211_hw *hw;
- struct ieee80211_ops *ops;
struct ath11k_pdev_wmi *wmi;
struct ath11k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
@@ -919,6 +918,7 @@ struct ath11k_base {
* This may or may not be used during the runtime
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct cur_regulatory_info *reg_info_store;
/* Current DFS Regulatory */
enum ath11k_dfs_region dfs_region;
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index 5536e8642..fbb6e8d8a 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h
index ef906c687..2f93b78a5 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.h
+++ b/drivers/net/wireless/ath/ath11k/dbring.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DBRING_H
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
index f5c8a34c8..2b8544355 100644
--- a/drivers/net/wireless/ath/ath11k/debug.c
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index 9c52804ef..cc8934d15 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_DEBUG_H_
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 0796f4d92..a48e737ef 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index 6f630b42e..a39e45863 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_DEBUGFS_H_
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 0207fc491..870e86a31 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index 96219301f..476689bbd 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef DEBUG_HTT_STATS_H
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 8c177fba6..f56a24b6c 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.h b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
index e6c11b3a4..ace877e19 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_DEBUGFS_STA_H_
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index a7252b525..8975dc57a 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 15815af45..2f6dd69d3 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 7eac93ce7..afd481f58 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index a5fa08bc6..c1072e66e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
index 68a21ea9b..61be2265e 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_TX_H
diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c
index 8f84fba29..4e36292a7 100644
--- a/drivers/net/wireless/ath/ath11k/fw.c
+++ b/drivers/net/wireless/ath/ath11k/fw.c
@@ -133,7 +133,7 @@ static int ath11k_fw_request_firmware_api_n(struct ath11k_base *ab,
len -= ie_len;
data += ie_len;
- };
+ }
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 23f3af8e3..c060c4b5c 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 1942d41d6..80447f488 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_H
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index d895ea878..b2fd180bd 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 41946795d..e758ee8e1 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index 472a52cf5..0fa9aef9d 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_RX_H
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index d68ed4214..877a4073f 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HIF_H_
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index 2c2e425c8..23054ab29 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h
index d31e501c8..86f77eaca 100644
--- a/drivers/net/wireless/ath/ath11k/htc.h
+++ b/drivers/net/wireless/ath/ath11k/htc.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HTC_H
@@ -150,10 +151,7 @@ struct ath11k_htc_credit_report {
struct ath11k_htc_record {
struct ath11k_htc_record_hdr hdr;
- union {
- struct ath11k_htc_credit_report credit_report[0];
- u8 pauload[0];
- };
+ struct ath11k_htc_credit_report credit_report[];
} __packed __aligned(4);
enum ath11k_htc_svc_gid {
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index d7b5ec6e6..77d8f9237 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index d51a99669..1b070747a 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HW_H
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 1c4613b1b..cc8031008 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -4659,6 +4659,14 @@ static int ath11k_station_disassoc(struct ath11k *ar,
return 0;
}
+static u32 ath11k_mac_max_nss(const u8 *ht_mcs_mask, const u16 *vht_mcs_mask,
+ const u16 *he_mcs_mask)
+{
+ return max3(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask),
+ ath11k_mac_max_he_nss(he_mcs_mask));
+}
+
static void ath11k_sta_rc_update_wk(struct work_struct *wk)
{
struct ath11k *ar;
@@ -4704,9 +4712,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mutex_lock(&ar->conf_mutex);
nss = max_t(u32, 1, nss);
- nss = min(nss, max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
- ath11k_mac_max_vht_nss(vht_mcs_mask)),
- ath11k_mac_max_he_nss(he_mcs_mask)));
+ nss = min(nss, ath11k_mac_max_nss(ht_mcs_mask, vht_mcs_mask, he_mcs_mask));
if (changed & IEEE80211_RC_BW_CHANGED) {
/* Get the peer phymode */
@@ -8385,9 +8391,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
ath11k_warn(ar->ab,
"could not update fixed rate settings to all peers due to mcs/nss incompatibility\n");
nss = min_t(u32, ar->num_tx_chains,
- max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
- ath11k_mac_max_vht_nss(vht_mcs_mask)),
- ath11k_mac_max_he_nss(he_mcs_mask)));
+ ath11k_mac_max_nss(ht_mcs_mask, vht_mcs_mask, he_mcs_mask));
/* If multiple rates across different preambles are given
* we can reconfigure this info with all peers using PEER_ASSOC
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 0231783ad..0dfdeed51 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index afeabd6ec..337590236 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -106,7 +106,7 @@ static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
- .buf_len = 0,
+ .buf_len = 8192,
.num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
.ch_cfg = ath11k_mhi_channels_qca6390,
.num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index 8d9f852da..f81fba264 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_MHI_H
#define _ATH11K_MHI_H
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index e602d4130..15e2ceb22 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 1c79a932d..6d0126c39 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index 9bd385d0a..3ad2f3355 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_PEER_H
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index c270dc46d..2c7cab62b 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index d477e2be8..7e06d100a 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_QMI_H
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 3c7debae8..d4fd3509e 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
@@ -617,25 +618,68 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
*rule_idx = i;
}
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
+{
+ switch (power_type) {
+ case IEEE80211_REG_LPI_AP:
+ return WMI_REG_INDOOR_AP;
+ case IEEE80211_REG_SP_AP:
+ return WMI_REG_STANDARD_POWER_AP;
+ case IEEE80211_REG_VLP_AP:
+ return WMI_REG_VERY_LOW_POWER_AP;
+ default:
+ return WMI_REG_MAX_AP_TYPE;
+ }
+}
+
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect)
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
- struct cur_reg_rule *reg_rule;
+ struct cur_reg_rule *reg_rule, *reg_rule_6ghz;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
- u32 flags;
+ u32 flags, reg_6ghz_number, max_bw_6ghz;
char alpha2[3];
num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
- /* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list.
- * This can be updated after complete 6 GHz regulatory support is added.
- */
- if (reg_info->is_ext_reg_event)
- num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ if (reg_info->is_ext_reg_event) {
+ if (vdev_type == WMI_VDEV_TYPE_STA) {
+ enum wmi_reg_6ghz_ap_type ap_type;
+
+ ap_type = ath11k_reg_ap_pwr_convert(power_type);
+
+ if (ap_type == WMI_REG_MAX_AP_TYPE)
+ ap_type = WMI_REG_INDOOR_AP;
+
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+
+ if (reg_6ghz_number == 0) {
+ ap_type = WMI_REG_INDOOR_AP;
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ }
+
+ reg_rule_6ghz = reg_info->reg_rules_6ghz_client_ptr
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ max_bw_6ghz = reg_info->max_bw_6ghz_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ } else {
+ reg_6ghz_number = reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ reg_rule_6ghz =
+ reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP];
+ max_bw_6ghz = reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP];
+ }
+
+ num_rules += reg_6ghz_number;
+ }
if (!num_rules)
goto ret;
@@ -682,13 +726,10 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
- } else if (reg_info->is_ext_reg_event &&
- reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] &&
- (k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) {
- reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] +
- k++;
- max_bw = min_t(u16, reg_rule->max_bw,
- reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]);
+ } else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
+ k < reg_6ghz_number) {
+ reg_rule = reg_rule_6ghz + k++;
+ max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
flags = NL80211_RRF_AUTO_BW;
} else {
break;
@@ -757,6 +798,159 @@ ret:
return new_regd;
}
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
+}
+
+static enum wmi_vdev_type ath11k_reg_get_ar_vdev_type(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ /* Currently each struct ath11k maps to one struct ieee80211_hw/wiphy
+ * and one struct ieee80211_regdomain, so it could only store one group
+ * reg rules. It means multi-interface concurrency in the same ath11k is
+ * not support for the regdomain. So get the vdev type of the first entry
+ * now. After concurrency support for the regdomain, this should change.
+ */
+ arvif = list_first_entry_or_null(&ar->arvifs, struct ath11k_vif, list);
+ if (arvif)
+ return arvif->vdev_type;
+
+ return WMI_VDEV_TYPE_UNSPEC;
+}
+
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *regd;
+ bool intersect = false;
+ int pdev_idx;
+ struct ath11k *ar;
+ enum wmi_vdev_type vdev_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg handle chan list");
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ return -EINVAL;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock_bh(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock_bh(&ab->base_lock);
+ goto retfail;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback. In either case
+ * ath11k_reg_reset_info() needs to be called to avoid
+ * memory leak issue.
+ */
+ ath11k_reg_reset_info(reg_info);
+
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ return 0;
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto retfail;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ ar = ab->pdevs[pdev_idx].ar;
+ vdev_type = ath11k_reg_get_ar_vdev_type(ar);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi handle chan list power type %d vdev type %d intersect %d\n",
+ power_type, vdev_type, intersect);
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect, vdev_type, power_type);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_reg_reset_info(&ab->reg_info_store[pdev_idx]);
+ ab->reg_info_store[pdev_idx] = *reg_info;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
+ */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+
+retfail:
+
+ return -EINVAL;
+}
+
void ath11k_regd_update_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
@@ -780,10 +974,36 @@ void ath11k_reg_init(struct ath11k *ar)
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info)
+{
+ int i, j;
+
+ if (!reg_info)
+ return;
+
+ kfree(reg_info->reg_rules_2ghz_ptr);
+ kfree(reg_info->reg_rules_5ghz_ptr);
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+ kfree(reg_info->reg_rules_6ghz_client_ptr[i][j]);
+ }
+
+ memset(reg_info, 0, sizeof(*reg_info));
+}
+
void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
+ for (i = 0; i < ab->num_radios; i++)
+ ath11k_reg_reset_info(&ab->reg_info_store[i]);
+
+ kfree(ab->reg_info_store);
+ ab->reg_info_store = NULL;
+
for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 84daa6543..64edb7942 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_REG_H
@@ -29,11 +30,20 @@ enum ath11k_dfs_region {
/* ATH11K Regulatory API's */
void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect);
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 786d5f36f..2da6da727 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_RX_DESC_H
#define ATH11K_RX_DESC_H
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 0b7b7122c..79e091134 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/relay.h>
diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h
index 96bfa16e1..789cff7c6 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.h
+++ b/drivers/net/wireless/ath/ath11k/spectral.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_SPECTRAL_H
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index c9b012f97..c29b11ab5 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
index 83cb67686..cdaf4e01d 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.h
+++ b/drivers/net/wireless/ath/ath11k/thermal.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_THERMAL_
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index 9535745fe..235ab8ea7 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 2845b4313..442afda7e 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -4749,6 +4749,14 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
soc->pdevs[0].pdev_id = 0;
}
+ if (!soc->reg_info_store) {
+ soc->reg_info_store = kcalloc(soc->num_radios,
+ sizeof(*soc->reg_info_store),
+ GFP_ATOMIC);
+ if (!soc->reg_info_store)
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -7060,32 +7068,15 @@ static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
wake_up(&wmi->tx_ce_desc_wq);
}
-static bool ath11k_reg_is_world_alpha(char *alpha)
-{
- if (alpha[0] == '0' && alpha[1] == '0')
- return true;
-
- if (alpha[0] == 'n' && alpha[1] == 'a')
- return true;
-
- return false;
-}
-
-static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
- struct sk_buff *skb,
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb,
enum wmi_reg_chan_list_cmd_type id)
{
- struct cur_regulatory_info *reg_info = NULL;
- struct ieee80211_regdomain *regd = NULL;
- bool intersect = false;
- int ret = 0, pdev_idx, i, j;
- struct ath11k *ar;
+ struct cur_regulatory_info *reg_info;
+ int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
- if (!reg_info) {
- ret = -ENOMEM;
- goto fallback;
- }
+ if (!reg_info)
+ return -ENOMEM;
if (id == WMI_REG_CHAN_LIST_CC_ID)
ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
@@ -7093,118 +7084,22 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
if (ret) {
- ath11k_warn(ab, "failed to extract regulatory info from received event\n");
- goto fallback;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg chan list id %d", id);
-
- if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
- /* In case of failure to set the requested ctry,
- * fw retains the current regd. We print a failure info
- * and return from here.
- */
- ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
- goto mem_free;
- }
-
- pdev_idx = reg_info->phy_id;
-
- /* Avoid default reg rule updates sent during FW recovery if
- * it is already available
- */
- spin_lock(&ab->base_lock);
- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
- ab->default_regd[pdev_idx]) {
- spin_unlock(&ab->base_lock);
+ ath11k_warn(ab, "failed to extract regulatory info\n");
goto mem_free;
}
- spin_unlock(&ab->base_lock);
- if (pdev_idx >= ab->num_radios) {
- /* Process the event for phy0 only if single_pdev_only
- * is true. If pdev_idx is valid but not 0, discard the
- * event. Otherwise, it goes to fallback.
- */
- if (ab->hw_params.single_pdev_only &&
- pdev_idx < ab->hw_params.num_rxmda_per_pdev)
- goto mem_free;
- else
- goto fallback;
- }
-
- /* Avoid multiple overwrites to default regd, during core
- * stop-start after mac registration.
- */
- if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
- !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
- (char *)reg_info->alpha2, 2))
+ ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP);
+ if (ret) {
+ ath11k_warn(ab, "failed to process regulatory info %d\n", ret);
goto mem_free;
-
- /* Intersect new rules with default regd if a new country setting was
- * requested, i.e a default regd was already set during initialization
- * and the regd coming from this event has a valid country info.
- */
- if (ab->default_regd[pdev_idx] &&
- !ath11k_reg_is_world_alpha((char *)
- ab->default_regd[pdev_idx]->alpha2) &&
- !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
- intersect = true;
-
- regd = ath11k_reg_build_regd(ab, reg_info, intersect);
- if (!regd) {
- ath11k_warn(ab, "failed to build regd from reg_info\n");
- goto fallback;
- }
-
- spin_lock(&ab->base_lock);
- if (ab->default_regd[pdev_idx]) {
- /* The initial rules from FW after WMI Init is to build
- * the default regd. From then on, any rules updated for
- * the pdev could be due to user reg changes.
- * Free previously built regd before assigning the newly
- * generated regd to ar. NULL pointer handling will be
- * taken care by kfree itself.
- */
- ar = ab->pdevs[pdev_idx].ar;
- kfree(ab->new_regd[pdev_idx]);
- ab->new_regd[pdev_idx] = regd;
- queue_work(ab->workqueue, &ar->regd_update_work);
- } else {
- /* This regd would be applied during mac registration and is
- * held constant throughout for regd intersection purpose
- */
- ab->default_regd[pdev_idx] = regd;
}
- ab->dfs_region = reg_info->dfs_region;
- spin_unlock(&ab->base_lock);
- goto mem_free;
+ kfree(reg_info);
+ return 0;
-fallback:
- /* Fallback to older reg (by sending previous country setting
- * again if fw has succeeded and we failed to process here.
- * The Regdomain should be uniform across driver and fw. Since the
- * FW has processed the command and sent a success status, we expect
- * this function to succeed as well. If it doesn't, CTRY needs to be
- * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
- */
- /* TODO: This is rare, but still should also be handled */
- WARN_ON(1);
mem_free:
- if (reg_info) {
- kfree(reg_info->reg_rules_2ghz_ptr);
- kfree(reg_info->reg_rules_5ghz_ptr);
- if (reg_info->is_ext_reg_event) {
- for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
-
- for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
- for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
- }
- kfree(reg_info);
- }
+ ath11k_reg_reset_info(reg_info);
+ kfree(reg_info);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 100bb816b..cd2098d78 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_WMI_H
@@ -1096,25 +1096,27 @@ enum wmi_tlv_vdev_param {
};
enum wmi_tlv_peer_flags {
- WMI_TLV_PEER_AUTH = 0x00000001,
- WMI_TLV_PEER_QOS = 0x00000002,
- WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
- WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
- WMI_TLV_PEER_APSD = 0x00000800,
- WMI_TLV_PEER_HT = 0x00001000,
- WMI_TLV_PEER_40MHZ = 0x00002000,
- WMI_TLV_PEER_STBC = 0x00008000,
- WMI_TLV_PEER_LDPC = 0x00010000,
- WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
- WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
- WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
- WMI_TLV_PEER_VHT = 0x02000000,
- WMI_TLV_PEER_80MHZ = 0x04000000,
- WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_PEER_AUTH = 0x00000001,
+ WMI_PEER_QOS = 0x00000002,
+ WMI_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_PEER_HE = 0x00000400,
+ WMI_PEER_APSD = 0x00000800,
+ WMI_PEER_HT = 0x00001000,
+ WMI_PEER_40MHZ = 0x00002000,
+ WMI_PEER_STBC = 0x00008000,
+ WMI_PEER_LDPC = 0x00010000,
+ WMI_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_PEER_TWT_REQ = 0x00400000,
+ WMI_PEER_TWT_RESP = 0x00800000,
+ WMI_PEER_VHT = 0x02000000,
+ WMI_PEER_80MHZ = 0x04000000,
+ WMI_PEER_PMF = 0x08000000,
WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
WMI_PEER_160MHZ = 0x40000000,
WMI_PEER_SAFEMODE_EN = 0x80000000,
-
};
/** Enum list of TLV Tags for each parameter structure type. */
@@ -2580,7 +2582,6 @@ struct wmi_service_available_event {
struct ath11k_pdev_wmi {
struct ath11k_wmi_base *wmi_ab;
enum ath11k_htc_ep_id eid;
- const struct wmi_peer_flags_map *peer_flags;
u32 rx_decap_mode;
wait_queue_head_t tx_ce_desc_wq;
};
@@ -4062,31 +4063,6 @@ struct wmi_unit_test_cmd {
#define MAX_SUPPORTED_RATES 128
-#define WMI_PEER_AUTH 0x00000001
-#define WMI_PEER_QOS 0x00000002
-#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
-#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
-#define WMI_PEER_HE 0x00000400
-#define WMI_PEER_APSD 0x00000800
-#define WMI_PEER_HT 0x00001000
-#define WMI_PEER_40MHZ 0x00002000
-#define WMI_PEER_STBC 0x00008000
-#define WMI_PEER_LDPC 0x00010000
-#define WMI_PEER_DYN_MIMOPS 0x00020000
-#define WMI_PEER_STATIC_MIMOPS 0x00040000
-#define WMI_PEER_SPATIAL_MUX 0x00200000
-#define WMI_PEER_TWT_REQ 0x00400000
-#define WMI_PEER_TWT_RESP 0x00800000
-#define WMI_PEER_VHT 0x02000000
-#define WMI_PEER_80MHZ 0x04000000
-#define WMI_PEER_PMF 0x08000000
-/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
- * Need to be cleaned up
- */
-#define WMI_PEER_IS_P2P_CAPABLE 0x20000000
-#define WMI_PEER_160MHZ 0x40000000
-#define WMI_PEER_SAFEMODE_EN 0x80000000
-
struct beacon_tmpl_params {
u8 vdev_id;
u32 tim_ie_offset;
@@ -4975,6 +4951,7 @@ struct ath11k_targ_cap {
};
enum wmi_vdev_type {
+ WMI_VDEV_TYPE_UNSPEC = 0,
WMI_VDEV_TYPE_AP = 1,
WMI_VDEV_TYPE_STA = 2,
WMI_VDEV_TYPE_IBSS = 3,
@@ -5754,7 +5731,6 @@ struct ath11k_wmi_base {
struct completion unified_ready;
DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE);
wait_queue_head_t tx_credits_wq;
- const struct wmi_peer_flags_map *peer_flags;
u32 num_mem_chunks;
u32 rx_decap_mode;
struct wmi_host_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath11k/wow.h b/drivers/net/wireless/ath/ath11k/wow.h
index 553ba850d..c85811e3f 100644
--- a/drivers/net/wireless/ath/ath11k/wow.h
+++ b/drivers/net/wireless/ath/ath11k/wow.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WOW_H_
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index 4f9c514c1..e135d2b1b 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -2,7 +2,7 @@
config ATH12K
tristate "Qualcomm Technologies Wi-Fi 7 support (ath12k)"
depends on MAC80211 && HAS_DMA && PCI
- depends on CRYPTO_MICHAEL_MIC
+ select CRYPTO_MICHAEL_MIC
select QCOM_QMI_HELPERS
select MHI_BUS
select QRTR
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index d67494de4..01fb9b2ae 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CORE_H
@@ -199,6 +199,8 @@ enum ath12k_dev_flags {
ATH12K_FLAG_REGISTERED,
ATH12K_FLAG_QMI_FAIL,
ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
+ ATH12K_FLAG_CE_IRQ_ENABLED,
+ ATH12K_FLAG_EXT_IRQ_ENABLED,
};
enum ath12k_monitor_flags {
@@ -467,7 +469,6 @@ struct ath12k {
struct ath12k_base *ab;
struct ath12k_pdev *pdev;
struct ieee80211_hw *hw;
- struct ieee80211_ops *ops;
struct ath12k_wmi_pdev *wmi;
struct ath12k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 6893466f6..a6f81f2f9 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -961,9 +961,7 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, 0,
- ab->hw_params->hal_params->rx_buf_rbm,
- true);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, 0);
}
/* TODO: Implement handler for other interrupts */
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 61f765432..1df3cdd46 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@@ -31,7 +31,7 @@ struct dp_srng {
u32 ring_id;
};
-struct dp_rxdma_ring {
+struct dp_rxdma_mon_ring {
struct dp_srng refill_buf_ring;
struct idr bufs_idr;
/* Protects bufs_idr */
@@ -39,6 +39,11 @@ struct dp_rxdma_ring {
int bufs_max;
};
+struct dp_rxdma_ring {
+ struct dp_srng refill_buf_ring;
+ int bufs_max;
+};
+
#define ATH12K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
struct dp_tx_ring {
@@ -353,8 +358,8 @@ struct ath12k_dp {
struct dp_rxdma_ring rx_refill_buf_ring;
struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
- struct dp_rxdma_ring rxdma_mon_buf_ring;
- struct dp_rxdma_ring tx_mon_buf_ring;
+ struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
+ struct dp_rxdma_mon_ring tx_mon_buf_ring;
struct ath12k_reo_q_addr_lut reoq_lut;
};
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index f44bc5494..be4b39f5f 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
@@ -797,7 +797,7 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
/* TODO: add msdu start parsing logic */
break;
case HAL_MON_BUF_ADDR: {
- struct dp_rxdma_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
+ struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
struct dp_mon_packet_info *packet_info =
(struct dp_mon_packet_info *)tlv_data;
int buf_id = u32_get_bits(packet_info->cookie,
@@ -1091,7 +1091,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
spin_unlock_bh(&ar->ab->base_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -1104,6 +1104,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
+ (status->bw == RATE_INFO_BW_320) ? "320" : "",
status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->nss,
@@ -1259,7 +1260,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
}
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
- struct dp_rxdma_ring *buf_ring,
+ struct dp_rxdma_mon_ring *buf_ring,
int req_entries)
{
struct hal_mon_buf_ring *mon_buf;
@@ -1902,7 +1903,7 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_MON_BUF_ADDR: {
- struct dp_rxdma_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
+ struct dp_rxdma_mon_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
struct dp_mon_packet_info *packet_info =
(struct dp_mon_packet_info *)tlv_data;
int buf_id = u32_get_bits(packet_info->cookie,
@@ -2067,7 +2068,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
struct ath12k_skb_rxcb *rxcb;
struct dp_srng *mon_dst_ring;
struct hal_srng *srng;
- struct dp_rxdma_ring *buf_ring;
+ struct dp_rxdma_mon_ring *buf_ring;
u64 cookie;
u32 ppdu_id;
int num_buffs_reaped = 0, srng_id, buf_id;
@@ -2480,7 +2481,7 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
struct ath12k_skb_rxcb *rxcb;
struct dp_srng *mon_dst_ring;
struct hal_srng *srng;
- struct dp_rxdma_ring *buf_ring;
+ struct dp_rxdma_mon_ring *buf_ring;
struct ath12k_sta *arsta = NULL;
struct ath12k_peer *peer;
u64 cookie;
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h
index c18c38579..fb9e9c176 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.h
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_MON_H
@@ -80,7 +80,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
int mac_id, struct sk_buff *skb,
struct napi_struct *napi);
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
- struct dp_rxdma_ring *buf_ring,
+ struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id,
int *budget, enum dp_monitor_mode monitor_mode,
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 3543fadac..1ee83f765 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -256,22 +256,20 @@ static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
}
/* Returns number of Rx buffers replenished */
-int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
+int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
- int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- bool hw_cc)
+ int req_entries)
{
struct ath12k_buffer_addr *desc;
struct hal_srng *srng;
struct sk_buff *skb;
int num_free;
int num_remain;
- int buf_id;
u32 cookie;
dma_addr_t paddr;
struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info *rx_desc;
+ enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm;
req_entries = min(req_entries, rx_ring->bufs_max);
@@ -307,42 +305,29 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
- if (hw_cc) {
- spin_lock_bh(&dp->rx_desc_lock);
-
- /* Get desc from free list and store in used list
- * for cleanup purposes
- *
- * TODO: pass the removed descs rather than
- * add/read to optimize
- */
- rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
- struct ath12k_rx_desc_info,
- list);
- if (!rx_desc) {
- spin_unlock_bh(&dp->rx_desc_lock);
- goto fail_dma_unmap;
- }
-
- rx_desc->skb = skb;
- cookie = rx_desc->cookie;
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
+ spin_lock_bh(&dp->rx_desc_lock);
+ /* Get desc from free list and store in used list
+ * for cleanup purposes
+ *
+ * TODO: pass the removed descs rather than
+ * add/read to optimize
+ */
+ rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
+ struct ath12k_rx_desc_info,
+ list);
+ if (!rx_desc) {
spin_unlock_bh(&dp->rx_desc_lock);
- } else {
- spin_lock_bh(&rx_ring->idr_lock);
- buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
- rx_ring->bufs_max * 3, GFP_ATOMIC);
- spin_unlock_bh(&rx_ring->idr_lock);
- if (buf_id < 0)
- goto fail_dma_unmap;
- cookie = u32_encode_bits(mac_id,
- DP_RXDMA_BUF_COOKIE_PDEV_ID) |
- u32_encode_bits(buf_id,
- DP_RXDMA_BUF_COOKIE_BUF_ID);
+ goto fail_dma_unmap;
}
+ rx_desc->skb = skb;
+ cookie = rx_desc->cookie;
+ list_del(&rx_desc->list);
+ list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
+
+ spin_unlock_bh(&dp->rx_desc_lock);
+
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
goto fail_buf_unassign;
@@ -361,17 +346,11 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
return req_entries - num_remain;
fail_buf_unassign:
- if (hw_cc) {
- spin_lock_bh(&dp->rx_desc_lock);
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
- rx_desc->skb = NULL;
- spin_unlock_bh(&dp->rx_desc_lock);
- } else {
- spin_lock_bh(&rx_ring->idr_lock);
- idr_remove(&rx_ring->bufs_idr, buf_id);
- spin_unlock_bh(&rx_ring->idr_lock);
- }
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_del(&rx_desc->list);
+ list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
+ rx_desc->skb = NULL;
+ spin_unlock_bh(&dp->rx_desc_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
@@ -385,8 +364,8 @@ fail_free_skb:
return req_entries - num_remain;
}
-static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,
- struct dp_rxdma_ring *rx_ring)
+static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring)
{
struct sk_buff *skb;
int buf_id;
@@ -411,46 +390,49 @@ static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,
static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
- struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
- ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+ ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
+
+ ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->tx_mon_buf_ring);
+
+ return 0;
+}
- rx_ring = &dp->rxdma_mon_buf_ring;
- ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
+ struct dp_rxdma_mon_ring *rx_ring,
+ u32 ringtype)
+{
+ int num_entries;
- rx_ring = &dp->tx_mon_buf_ring;
- ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+ num_entries = rx_ring->refill_buf_ring.size /
+ ath12k_hal_srng_get_entrysize(ab, ringtype);
+
+ rx_ring->bufs_max = num_entries;
+ ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
return 0;
}
static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
- struct dp_rxdma_ring *rx_ring,
- u32 ringtype)
+ struct dp_rxdma_ring *rx_ring)
{
int num_entries;
num_entries = rx_ring->refill_buf_ring.size /
- ath12k_hal_srng_get_entrysize(ab, ringtype);
+ ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
rx_ring->bufs_max = num_entries;
- if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
- ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
- else
- ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,
- ab->hw_params->hal_params->rx_buf_rbm,
- ringtype == HAL_RXDMA_BUF);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries);
+
return 0;
}
static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
- struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
int ret;
- ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
- HAL_RXDMA_BUF);
+ ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
if (ret) {
ath12k_warn(ab,
"failed to setup HAL_RXDMA_BUF\n");
@@ -458,18 +440,18 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
}
if (ab->hw_params->rxdma1_enable) {
- rx_ring = &dp->rxdma_mon_buf_ring;
- ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
- HAL_RXDMA_MONITOR_BUF);
+ ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
+ &dp->rxdma_mon_buf_ring,
+ HAL_RXDMA_MONITOR_BUF);
if (ret) {
ath12k_warn(ab,
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
- rx_ring = &dp->tx_mon_buf_ring;
- ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
- HAL_TX_MONITOR_BUF);
+ ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
+ &dp->tx_mon_buf_ring,
+ HAL_TX_MONITOR_BUF);
if (ret) {
ath12k_warn(ab,
"failed to setup HAL_TX_MONITOR_BUF\n");
@@ -1339,9 +1321,6 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
bool is_ampdu = false;
- if (!usr_stats)
- return;
-
if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
return;
@@ -2438,7 +2417,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -2452,6 +2431,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
+ (status->bw == RATE_INFO_BW_320) ? "320" : "",
status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->nss,
@@ -2714,9 +2694,7 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- /* TODO: Move to implicit BM? */
- ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
- ab->hw_params->hal_params->rx_buf_rbm, true);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
@@ -3494,8 +3472,7 @@ exit:
rx_ring = &dp->rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,
- ab->hw_params->hal_params->rx_buf_rbm, true);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped);
return tot_n_bufs_reaped;
}
@@ -3808,8 +3785,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!num_buffs_reaped)
goto done;
- ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
- ab->hw_params->hal_params->rx_buf_rbm, true);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
@@ -4090,9 +4066,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
int i, ret;
- idr_init(&dp->rx_refill_buf_ring.bufs_idr);
- spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
-
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index c955b5c85..05b3d5581 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
@@ -116,11 +116,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi,
int budget);
-int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
+int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
- int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- bool hw_cc);
+ int req_entries);
int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar);
int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id);
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index fcfb6c819..095216eab 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_RX_H
@@ -61,6 +61,7 @@ enum hal_rx_bw {
HAL_RX_BW_40MHZ,
HAL_RX_BW_80MHZ,
HAL_RX_BW_160MHZ,
+ HAL_RX_BW_320MHZ,
HAL_RX_BW_MAX,
};
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 237fb8e7c..b965fc46a 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -343,6 +343,9 @@ ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
case ATH12K_BW_160:
ret = RATE_INFO_BW_160;
break;
+ case ATH12K_BW_320:
+ ret = RATE_INFO_BW_320;
+ break;
}
return ret;
@@ -359,6 +362,8 @@ enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw b
return ATH12K_BW_80;
case RATE_INFO_BW_160:
return ATH12K_BW_160;
+ case RATE_INFO_BW_320:
+ return ATH12K_BW_320;
default:
return ATH12K_BW_20;
}
@@ -3726,6 +3731,9 @@ static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
case IEEE80211_STA_RX_BW_160:
bw = WMI_PEER_CHWIDTH_160MHZ;
break;
+ case IEEE80211_STA_RX_BW_320:
+ bw = WMI_PEER_CHWIDTH_320MHZ;
+ break;
default:
ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
sta->deflink.bandwidth, sta->addr);
@@ -4987,7 +4995,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
if (ret) {
ath12k_warn(ar->ab, "failed to queue management frame %d\n",
ret);
- ieee80211_free_txskb(ar->hw, skb);
+ ieee80211_free_txskb(hw, skb);
}
return;
}
@@ -4995,7 +5003,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
ret = ath12k_dp_tx(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
- ieee80211_free_txskb(ar->hw, skb);
+ ieee80211_free_txskb(hw, skb);
}
}
@@ -5596,7 +5604,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
- param_value = ar->hw->wiphy->rts_threshold;
+ param_value = hw->wiphy->rts_threshold;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
@@ -6258,10 +6266,11 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
+ struct ieee80211_hw *hw = ar->hw;
lockdep_assert_held(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ieee80211_iterate_active_interfaces_atomic(hw,
IEEE80211_IFACE_ITER_NORMAL,
ath12k_mac_change_chanctx_cnt_iter,
&arg);
@@ -6272,7 +6281,7 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar,
if (!arg.vifs)
return;
- ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ieee80211_iterate_active_interfaces_atomic(hw,
IEEE80211_IFACE_ITER_NORMAL,
ath12k_mac_change_chanctx_fill_iter,
&arg);
@@ -6836,7 +6845,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
return ret;
}
- ieee80211_iterate_stations_atomic(ar->hw,
+ ieee80211_iterate_stations_atomic(hw,
ath12k_mac_disable_peer_fixed_rate,
arvif);
} else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
@@ -6882,14 +6891,14 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
return -EINVAL;
}
- ieee80211_iterate_stations_atomic(ar->hw,
+ ieee80211_iterate_stations_atomic(hw,
ath12k_mac_disable_peer_fixed_rate,
arvif);
mutex_lock(&ar->conf_mutex);
arvif->bitrate_mask = *mask;
- ieee80211_iterate_stations_atomic(ar->hw,
+ ieee80211_iterate_stations_atomic(hw,
ath12k_mac_set_bitrate_mask_iter,
arvif);
@@ -6927,7 +6936,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
ar->state = ATH12K_STATE_ON;
- ieee80211_wake_queues(ar->hw);
+ ieee80211_wake_queues(hw);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
@@ -7151,6 +7160,7 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
u32 supported_bands)
{
+ struct ieee80211_hw *hw = ar->hw;
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
void *channels;
@@ -7176,7 +7186,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_g_rates_size;
band->bitrates = ath12k_g_rates;
- ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
@@ -7203,7 +7213,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
@@ -7225,7 +7235,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
@@ -7244,6 +7254,8 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->hw;
+ struct wiphy *wiphy = hw->wiphy;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
@@ -7294,8 +7306,8 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80);
- ar->hw->wiphy->iface_combinations = combinations;
- ar->hw->wiphy->n_iface_combinations = 1;
+ wiphy->iface_combinations = combinations;
+ wiphy->n_iface_combinations = 1;
return 0;
}
@@ -7339,9 +7351,12 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
static void __ath12k_mac_unregister(struct ath12k *ar)
{
+ struct ieee80211_hw *hw = ar->hw;
+ struct wiphy *wiphy = hw->wiphy;
+
cancel_work_sync(&ar->regd_update_work);
- ieee80211_unregister_hw(ar->hw);
+ ieee80211_unregister_hw(hw);
idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
@@ -7350,10 +7365,10 @@ static void __ath12k_mac_unregister(struct ath12k *ar)
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
- kfree(ar->hw->wiphy->iface_combinations[0].limits);
- kfree(ar->hw->wiphy->iface_combinations);
+ kfree(wiphy->iface_combinations[0].limits);
+ kfree(wiphy->iface_combinations);
- SET_IEEE80211_DEV(ar->hw, NULL);
+ SET_IEEE80211_DEV(hw, NULL);
}
void ath12k_mac_unregister(struct ath12k_base *ab)
@@ -7375,6 +7390,8 @@ void ath12k_mac_unregister(struct ath12k_base *ab)
static int __ath12k_mac_register(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->hw;
+ struct wiphy *wiphy = hw->wiphy;
struct ath12k_pdev_cap *cap = &ar->pdev->cap;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_TKIP,
@@ -7392,9 +7409,9 @@ static int __ath12k_mac_register(struct ath12k *ar)
ath12k_pdev_caps_update(ar);
- SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+ SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
- SET_IEEE80211_DEV(ar->hw, ab->dev);
+ SET_IEEE80211_DEV(hw, ab->dev);
ret = ath12k_mac_setup_channels_rates(ar,
cap->supported_bands);
@@ -7410,103 +7427,102 @@ static int __ath12k_mac_register(struct ath12k *ar)
goto err_free_channels;
}
- ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
- ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
+ wiphy->available_antennas_rx = cap->rx_chain_mask;
+ wiphy->available_antennas_tx = cap->tx_chain_mask;
- ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes;
+ wiphy->interface_modes = ab->hw_params->interface_modes;
- if (ar->hw->wiphy->bands[NL80211_BAND_2GHZ] &&
- ar->hw->wiphy->bands[NL80211_BAND_5GHZ] &&
- ar->hw->wiphy->bands[NL80211_BAND_6GHZ])
- ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
+ if (wiphy->bands[NL80211_BAND_2GHZ] &&
+ wiphy->bands[NL80211_BAND_5GHZ] &&
+ wiphy->bands[NL80211_BAND_6GHZ])
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
- ieee80211_hw_set(ar->hw, SIGNAL_DBM);
- ieee80211_hw_set(ar->hw, SUPPORTS_PS);
- ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
- ieee80211_hw_set(ar->hw, MFP_CAPABLE);
- ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
- ieee80211_hw_set(ar->hw, AP_LINK_PS);
- ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
- ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
- ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
- ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
- ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
- ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
- ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(hw, REPORTS_LOW_ACK);
if (ht_cap & WMI_HT_CAP_ENABLED) {
- ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
- ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
- ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
- ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
- ieee80211_hw_set(ar->hw, USES_RSS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, USES_RSS);
}
- ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
- ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
/* TODO: Check if HT capability advertised from firmware is different
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
- ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+ wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
- ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
- ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+ wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
- ar->hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL;
+ hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL;
- ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
- ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
- ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->max_remain_on_channel_duration = 5000;
- ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
- ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+ wiphy->max_ap_assoc_sta = ar->max_num_stations;
- ar->hw->queues = ATH12K_HW_MAX_QUEUES;
- ar->hw->wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
- ar->hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
- ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->queues = ATH12K_HW_MAX_QUEUES;
+ wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
+ hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
- ar->hw->vif_data_size = sizeof(struct ath12k_vif);
- ar->hw->sta_data_size = sizeof(struct ath12k_sta);
+ hw->vif_data_size = sizeof(struct ath12k_vif);
+ hw->sta_data_size = sizeof(struct ath12k_sta);
- wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
- wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
- ar->hw->wiphy->cipher_suites = cipher_suites;
- ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+ wiphy->cipher_suites = cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
- ar->hw->wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
- ar->hw->wiphy->num_iftype_ext_capab =
- ARRAY_SIZE(ath12k_iftypes_ext_capa);
+ wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
+ wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa);
if (ar->supports_6ghz) {
- wiphy_ext_feature_set(ar->hw->wiphy,
+ wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
- wiphy_ext_feature_set(ar->hw->wiphy,
+ wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
}
- wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_PUNCT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PUNCT);
- ath12k_reg_init(ar);
+ ath12k_reg_init(hw);
if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
- ar->hw->netdev_features = NETIF_F_HW_CSUM;
- ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
- ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ hw->netdev_features = NETIF_F_HW_CSUM;
+ ieee80211_hw_set(hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
}
- ret = ieee80211_register_hw(ar->hw);
+ ret = ieee80211_register_hw(hw);
if (ret) {
ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
goto err_free_if_combs;
@@ -7518,7 +7534,7 @@ static int __ath12k_mac_register(struct ath12k *ar)
* while. But that time is so short and in practise it make
* a difference in real life.
*/
- ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
+ wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
/* Apply the regd received during initialization */
ret = ath12k_regd_update(ar, true);
@@ -7530,11 +7546,11 @@ static int __ath12k_mac_register(struct ath12k *ar)
return 0;
err_unregister_hw:
- ieee80211_unregister_hw(ar->hw);
+ ieee80211_unregister_hw(hw);
err_free_if_combs:
- kfree(ar->hw->wiphy->iface_combinations[0].limits);
- kfree(ar->hw->wiphy->iface_combinations);
+ kfree(wiphy->iface_combinations[0].limits);
+ kfree(wiphy->iface_combinations);
err_free_channels:
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
@@ -7542,7 +7558,7 @@ err_free_channels:
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
err:
- SET_IEEE80211_DEV(ar->hw, NULL);
+ SET_IEEE80211_DEV(hw, NULL);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 7d71ae1ab..7c63bb628 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -43,6 +43,7 @@ enum ath12k_supported_bw {
ATH12K_BW_40 = 1,
ATH12K_BW_80 = 2,
ATH12K_BW_160 = 3,
+ ATH12K_BW_320 = 4,
};
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index 27eb38b2b..d5441ddb3 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -251,6 +251,7 @@ static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci)
u32 user_base_data, base_vector;
int ret, num_vectors, i;
int *irq;
+ unsigned int msi_data;
ret = ath12k_pci_get_user_msi_assignment(ab,
"MHI", &num_vectors,
@@ -265,9 +266,15 @@ static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci)
if (!irq)
return -ENOMEM;
- for (i = 0; i < num_vectors; i++)
- irq[i] = ath12k_pci_get_msi_irq(ab->dev,
- base_vector + i);
+ msi_data = base_vector;
+ for (i = 0; i < num_vectors; i++) {
+ if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ irq[i] = ath12k_pci_get_msi_irq(ab->dev,
+ msi_data++);
+ else
+ irq[i] = ath12k_pci_get_msi_irq(ab->dev,
+ msi_data);
+ }
ab_pci->mhi_ctrl->irq = irq;
ab_pci->mhi_ctrl->nr_irqs = num_vectors;
@@ -374,6 +381,9 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
goto free_controller;
}
+ if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
+
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xffffffff;
mhi_ctrl->sbl_size = SZ_512K;
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index a6a5f9bcf..f0d2e2d87 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -60,6 +60,17 @@ static const struct ath12k_msi_config ath12k_msi_config[] = {
},
};
+static const struct ath12k_msi_config msi_config_one_msi = {
+ .total_vectors = 1,
+ .total_users = 4,
+ .users = (struct ath12k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 1, .base_vector = 0 },
+ },
+};
+
static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
"bhi",
"mhi-er0",
@@ -355,16 +366,30 @@ static void ath12k_pci_free_irq(struct ath12k_base *ab)
static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
u32 irq_idx;
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return;
+
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
u32 irq_idx;
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return;
+
irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
@@ -373,6 +398,8 @@ static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
{
int i;
+ clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
for (i = 0; i < ab->hw_params->ce_count; i++) {
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@@ -397,20 +424,27 @@ static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
{
struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+ int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
- ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+ enable_irq(ce_pipe->ab->irq_num[irq_idx]);
}
static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
{
struct ath12k_ce_pipe *ce_pipe = arg;
+ struct ath12k_base *ab = ce_pipe->ab;
+ int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
- ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
@@ -418,8 +452,15 @@ static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
int i;
+ /* In case of one MSI vector, we handle irq enable/disable
+ * in a uniform way since we only have one irq
+ */
+ if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return;
+
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
@@ -428,6 +469,8 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
int i;
+ clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
@@ -440,8 +483,15 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
int i;
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return;
+
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
@@ -467,11 +517,13 @@ static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
napi);
struct ath12k_base *ab = irq_grp->ab;
int work_done;
+ int i;
work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
- ath12k_pci_ext_grp_enable(irq_grp);
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
if (work_done > budget)
@@ -483,13 +535,19 @@ static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
{
struct ath12k_ext_irq_grp *irq_grp = arg;
+ struct ath12k_base *ab = irq_grp->ab;
+ int i;
+
+ if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
- ath12k_pci_ext_grp_disable(irq_grp);
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
napi_schedule(&irq_grp->napi);
@@ -498,6 +556,7 @@ static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
int i, j, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0, base_idx;
@@ -544,23 +603,32 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
- IRQF_SHARED,
+ ab_pci->irq_flags,
"DP_EXT_IRQ", irq_grp);
if (ret) {
ath12k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
-
- disable_irq_nosync(ab->irq_num[irq_idx]);
}
+ ath12k_pci_ext_grp_disable(irq_grp);
}
return 0;
}
+static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
+ const struct cpumask *m)
+{
+ if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ return 0;
+
+ return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+}
+
static int ath12k_pci_config_irq(struct ath12k_base *ab)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
struct ath12k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
@@ -589,7 +657,7 @@ static int ath12k_pci_config_irq(struct ath12k_base *ab)
tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
- IRQF_SHARED, irq_name[irq_idx],
+ ab_pci->irq_flags, irq_name[irq_idx],
ce_pipe);
if (ret) {
ath12k_err(ab, "failed to request irq %d: %d\n",
@@ -626,6 +694,8 @@ static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
{
int i;
+ set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
for (i = 0; i < ab->hw_params->ce_count; i++) {
if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
@@ -670,16 +740,27 @@ static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
msi_config->total_vectors,
msi_config->total_vectors,
PCI_IRQ_MSI);
- if (num_vectors != msi_config->total_vectors) {
- ath12k_err(ab, "failed to get %d MSI vectors, only %d available",
- msi_config->total_vectors, num_vectors);
- if (num_vectors >= 0)
- return -EINVAL;
- else
- return num_vectors;
+ if (num_vectors == msi_config->total_vectors) {
+ set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
+ ab_pci->irq_flags = IRQF_SHARED;
+ } else {
+ num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ 1,
+ 1,
+ PCI_IRQ_MSI);
+ if (num_vectors < 0) {
+ ret = -EINVAL;
+ goto reset_msi_config;
+ }
+ clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
+ ab_pci->msi_config = &msi_config_one_msi;
+ ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n");
}
+ ath12k_info(ab, "MSI vectors: %d\n", num_vectors);
+
ath12k_pci_msi_disable(ab_pci);
msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
@@ -700,6 +781,7 @@ static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
free_msi_vector:
pci_free_irq_vectors(ab_pci->pdev);
+reset_msi_config:
return ret;
}
@@ -708,6 +790,25 @@ static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci)
pci_free_irq_vectors(ab_pci->pdev);
}
+static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci)
+{
+ struct msi_desc *msi_desc;
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath12k_err(ab_pci->ab, "msi_desc is NULL!\n");
+ pci_free_irq_vectors(ab_pci->pdev);
+ return -EINVAL;
+ }
+
+ ab_pci->msi_ep_base_data = msi_desc->msg.data;
+
+ ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
+ ab_pci->msi_ep_base_data);
+
+ return 0;
+}
+
static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
{
struct ath12k_base *ab = ab_pci->ab;
@@ -891,11 +992,11 @@ int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
- *user_base_data = msi_config->users[idx].base_vector
- + ab_pci->msi_ep_base_data;
- *base_vector = msi_config->users[idx].base_vector;
+ *base_vector = msi_config->users[idx].base_vector;
+ *user_base_data = *base_vector + ab_pci->msi_ep_base_data;
- ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
@@ -956,6 +1057,8 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
{
int i;
+ set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
@@ -1000,7 +1103,10 @@ int ath12k_pci_start(struct ath12k_base *ab)
set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
- ath12k_pci_aspm_restore(ab_pci);
+ if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
+ ath12k_pci_aspm_restore(ab_pci);
+ else
+ ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
ath12k_pci_ce_irqs_enable(ab);
ath12k_ce_rx_post_buf(ab);
@@ -1262,10 +1368,16 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_pci_msi_free;
+ ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+ if (ret) {
+ ath12k_err(ab, "failed to set irq affinity %d\n", ret);
+ goto err_pci_msi_free;
+ }
+
ret = ath12k_mhi_register(ab_pci);
if (ret) {
ath12k_err(ab, "failed to register mhi: %d\n", ret);
- goto err_pci_msi_free;
+ goto err_irq_affinity_cleanup;
}
ret = ath12k_hal_srng_init(ab);
@@ -1286,6 +1398,17 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
goto err_ce_free;
}
+ /* kernel may allocate a dummy vector before request_irq and
+ * then allocate a real vector when request_irq is called.
+ * So get msi_data here again to avoid spurious interrupt
+ * as msi_data will configured to srngs.
+ */
+ ret = ath12k_pci_config_msi_data(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to config msi_data: %d\n", ret);
+ goto err_free_irq;
+ }
+
ret = ath12k_core_init(ab);
if (ret) {
ath12k_err(ab, "failed to init core: %d\n", ret);
@@ -1308,6 +1431,9 @@ err_mhi_unregister:
err_pci_msi_free:
ath12k_pci_msi_free(ab_pci);
+err_irq_affinity_cleanup:
+ ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
err_pci_free_region:
ath12k_pci_free_region(ab_pci);
@@ -1322,6 +1448,8 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
struct ath12k_base *ab = pci_get_drvdata(pdev);
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab);
ath12k_qmi_deinit_service(ab);
@@ -1348,7 +1476,9 @@ qmi_fail:
static void ath12k_pci_shutdown(struct pci_dev *pdev)
{
struct ath12k_base *ab = pci_get_drvdata(pdev);
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath12k_pci_power_down(ab);
}
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index 9a17a7dcd..b2edf32ad 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -84,6 +84,7 @@ enum ath12k_pci_flags {
ATH12K_PCI_FLAG_INIT_DONE,
ATH12K_PCI_FLAG_IS_MSI_64,
ATH12K_PCI_ASPM_RESTORE,
+ ATH12K_PCI_FLAG_MULTI_MSI_VECTORS,
};
struct ath12k_pci_ops {
@@ -108,6 +109,7 @@ struct ath12k_pci {
/* enum ath12k_pci_flags */
unsigned long flags;
u16 link_ctl;
+ unsigned long irq_flags;
const struct ath12k_pci_ops *pci_ops;
};
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index 1dac84398..29542c46b 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -28,11 +28,11 @@ static const struct ieee80211_regdomain ath12k_world_regd = {
}
};
-static bool ath12k_regdom_changes(struct ath12k *ar, char *alpha2)
+static bool ath12k_regdom_changes(struct ieee80211_hw *hw, char *alpha2)
{
const struct ieee80211_regdomain *regd;
- regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+ regd = rcu_dereference_rtnl(hw->wiphy->regd);
/* This can happen during wiphy registration where the previous
* user request is received before we update the regd received
* from firmware.
@@ -71,7 +71,7 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
return;
}
- if (!ath12k_regdom_changes(ar, request->alpha2)) {
+ if (!ath12k_regdom_changes(hw, request->alpha2)) {
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Country is already set\n");
return;
}
@@ -199,6 +199,7 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
+ struct ieee80211_hw *hw = ar->hw;
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
@@ -246,9 +247,9 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
}
rtnl_lock();
- wiphy_lock(ar->hw->wiphy);
- ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
- wiphy_unlock(ar->hw->wiphy);
+ wiphy_lock(hw->wiphy);
+ ret = regulatory_set_wiphy_regd_sync(hw->wiphy, regd_copy);
+ wiphy_unlock(hw->wiphy);
rtnl_unlock();
kfree(regd_copy);
@@ -729,10 +730,10 @@ void ath12k_regd_update_work(struct work_struct *work)
}
}
-void ath12k_reg_init(struct ath12k *ar)
+void ath12k_reg_init(struct ieee80211_hw *hw)
{
- ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
- ar->hw->wiphy->reg_notifier = ath12k_reg_notifier;
+ hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ hw->wiphy->reg_notifier = ath12k_reg_notifier;
}
void ath12k_reg_free(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
index d4a0776e1..29c7ec326 100644
--- a/drivers/net/wireless/ath/ath12k/reg.h
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -89,7 +89,7 @@ enum ath12k_reg_phy_bitmap {
ATH12K_REG_PHY_BITMAP_NO11BE = BIT(6),
};
-void ath12k_reg_init(struct ath12k *ar);
+void ath12k_reg_init(struct ieee80211_hw *hw);
void ath12k_reg_free(struct ath12k_base *ab);
void ath12k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab,
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 629373d67..06e5b9b40 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@@ -1146,25 +1146,27 @@ enum wmi_tlv_vdev_param {
};
enum wmi_tlv_peer_flags {
- WMI_TLV_PEER_AUTH = 0x00000001,
- WMI_TLV_PEER_QOS = 0x00000002,
- WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
- WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
- WMI_TLV_PEER_APSD = 0x00000800,
- WMI_TLV_PEER_HT = 0x00001000,
- WMI_TLV_PEER_40MHZ = 0x00002000,
- WMI_TLV_PEER_STBC = 0x00008000,
- WMI_TLV_PEER_LDPC = 0x00010000,
- WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
- WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
- WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
- WMI_TLV_PEER_VHT = 0x02000000,
- WMI_TLV_PEER_80MHZ = 0x04000000,
- WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_PEER_AUTH = 0x00000001,
+ WMI_PEER_QOS = 0x00000002,
+ WMI_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_PEER_HE = 0x00000400,
+ WMI_PEER_APSD = 0x00000800,
+ WMI_PEER_HT = 0x00001000,
+ WMI_PEER_40MHZ = 0x00002000,
+ WMI_PEER_STBC = 0x00008000,
+ WMI_PEER_LDPC = 0x00010000,
+ WMI_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_PEER_TWT_REQ = 0x00400000,
+ WMI_PEER_TWT_RESP = 0x00800000,
+ WMI_PEER_VHT = 0x02000000,
+ WMI_PEER_80MHZ = 0x04000000,
+ WMI_PEER_PMF = 0x08000000,
WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
WMI_PEER_160MHZ = 0x40000000,
WMI_PEER_SAFEMODE_EN = 0x80000000,
-
};
enum wmi_tlv_peer_flags_ext {
@@ -2220,6 +2222,7 @@ enum wmi_peer_chwidth {
WMI_PEER_CHWIDTH_40MHZ = 1,
WMI_PEER_CHWIDTH_80MHZ = 2,
WMI_PEER_CHWIDTH_160MHZ = 3,
+ WMI_PEER_CHWIDTH_320MHZ = 4,
};
enum wmi_beacon_gen_mode {
@@ -3844,31 +3847,6 @@ struct wmi_unit_test_cmd {
#define MAX_SUPPORTED_RATES 128
-#define WMI_PEER_AUTH 0x00000001
-#define WMI_PEER_QOS 0x00000002
-#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
-#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
-#define WMI_PEER_HE 0x00000400
-#define WMI_PEER_APSD 0x00000800
-#define WMI_PEER_HT 0x00001000
-#define WMI_PEER_40MHZ 0x00002000
-#define WMI_PEER_STBC 0x00008000
-#define WMI_PEER_LDPC 0x00010000
-#define WMI_PEER_DYN_MIMOPS 0x00020000
-#define WMI_PEER_STATIC_MIMOPS 0x00040000
-#define WMI_PEER_SPATIAL_MUX 0x00200000
-#define WMI_PEER_TWT_REQ 0x00400000
-#define WMI_PEER_TWT_RESP 0x00800000
-#define WMI_PEER_VHT 0x02000000
-#define WMI_PEER_80MHZ 0x04000000
-#define WMI_PEER_PMF 0x08000000
-/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
- * Need to be cleaned up
- */
-#define WMI_PEER_IS_P2P_CAPABLE 0x20000000
-#define WMI_PEER_160MHZ 0x40000000
-#define WMI_PEER_SAFEMODE_EN 0x80000000
-
struct ath12k_wmi_vht_rate_set_params {
__le32 tlv_header;
__le32 rx_max_rate;
@@ -4770,7 +4748,6 @@ struct wmi_probe_tmpl_cmd {
struct ath12k_wmi_pdev {
struct ath12k_wmi_base *wmi_ab;
enum ath12k_htc_ep_id eid;
- const struct wmi_peer_flags_map *peer_flags;
u32 rx_decap_mode;
};
@@ -4784,7 +4761,6 @@ struct ath12k_wmi_base {
struct completion unified_ready;
DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE);
wait_queue_head_t tx_credits_wq;
- const struct wmi_peer_flags_map *peer_flags;
u32 num_mem_chunks;
u32 rx_decap_mode;
struct ath12k_wmi_host_mem_chunk_arg mem_chunks[WMI_MAX_MEM_REQS];
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 08bd5d3b0..f27308ccb 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -185,7 +185,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
return ret;
}
-static int ath_ahb_remove(struct platform_device *pdev)
+static void ath_ahb_remove(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
@@ -193,7 +193,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
u32 reg;
if (!hw)
- return 0;
+ return;
ah = hw->priv;
@@ -215,13 +215,11 @@ static int ath_ahb_remove(struct platform_device *pdev)
ath5k_deinit_ah(ah);
iounmap(ah->iobase);
ieee80211_free_hw(hw);
-
- return 0;
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
- .remove = ath_ahb_remove,
+ .remove_new = ath_ahb_remove,
.driver = {
.name = "ar231x-wmac",
},
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 693296ee9..e85b71395 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -489,7 +489,4 @@ struct ath5k_eeprom_info {
/* Spur mitigation data (fbin values for spur channels) */
u16 ee_spur_chans[AR5K_EEPROM_N_SPUR_CHANS][AR5K_EEPROM_N_FREQ_BANDS];
-
- /* Antenna raw switch tables */
- u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
};
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 988222cea..acc84e671 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -643,7 +643,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
} else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
+ antcomb->rssi_lna2) {
/* set to A-B */
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index 708c8969b..a5eb43f30 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -125,7 +125,7 @@ static void owl_rescan(struct pci_dev *pdev)
static void owl_fw_cb(const struct firmware *fw, void *context)
{
- struct owl_ctx *ctx = (struct owl_ctx *)context;
+ struct owl_ctx *ctx = context;
complete(&ctx->eeprom_load);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
index 82de0fadb..7c13a1deb 100644
--- a/drivers/net/wireless/ath/ath9k/common-init.c
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -124,7 +124,7 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
int ath9k_cmn_init_channels_rates(struct ath_common *common)
{
- struct ath_hw *ah = (struct ath_hw *)common->ah;
+ struct ath_hw *ah = common->ah;
void *channels;
BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index a5349c72c..4b27445a5 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -471,7 +471,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
struct ath_hw *ah = spec_priv->ah;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
- struct ath_softc *sc = (struct ath_softc *)common->priv;
+ struct ath_softc *sc = common->priv;
u8 num_bins, *vdata = (u8 *)hdr;
struct ath_radar_info *radar_info;
int len = rs->rs_datalen;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index a0376a678..d84e3ee7b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1376,7 +1376,7 @@ void ath9k_deinit_debug(struct ath_softc *sc)
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_softc *sc = common->priv;
sc->debug.debugfs_phy = debugfs_create_dir("ath9k",
sc->hw->wiphy->debugfsdir);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 90cfe39aa..0c7841f95 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -70,7 +70,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev);
static void hif_usb_regout_cb(struct urb *urb)
{
- struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+ struct cmd_buf *cmd = urb->context;
switch (urb->status) {
case 0:
@@ -134,7 +134,7 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
static void hif_usb_mgmt_cb(struct urb *urb)
{
- struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
+ struct cmd_buf *cmd = urb->context;
struct hif_device_usb *hif_dev;
unsigned long flags;
bool txok = true;
@@ -252,7 +252,7 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
static void hif_usb_tx_cb(struct urb *urb)
{
- struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
+ struct tx_buf *tx_buf = urb->context;
struct hif_device_usb *hif_dev;
bool txok = true;
@@ -687,7 +687,7 @@ invalid_pkt:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
- struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct rx_buf *rx_buf = urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
int ret;
@@ -734,7 +734,7 @@ free:
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
- struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
+ struct rx_buf *rx_buf = urb->context;
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
struct sk_buff *skb = rx_buf->skb;
int ret;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 278ddc713..f7c6d9bc9 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -482,7 +482,7 @@ void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv)
int ath9k_htc_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME,
priv->hw->wiphy->debugfsdir);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index fc339079e..3633f9eb2 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -63,12 +63,12 @@ static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
static void ath9k_htc_op_ps_wakeup(struct ath_common *common)
{
- ath9k_htc_ps_wakeup((struct ath9k_htc_priv *) common->priv);
+ ath9k_htc_ps_wakeup(common->priv);
}
static void ath9k_htc_op_ps_restore(struct ath_common *common)
{
- ath9k_htc_ps_restore((struct ath9k_htc_priv *) common->priv);
+ ath9k_htc_ps_restore(common->priv);
}
static const struct ath_ps_ops ath9k_htc_ps_ops = {
@@ -235,7 +235,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
__be32 val, reg = cpu_to_be32(reg_offset);
int r;
@@ -257,7 +257,7 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
__be32 tmpaddr[8];
__be32 tmpval[8];
int i, ret;
@@ -282,7 +282,7 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
static void ath9k_regwrite_multi(struct ath_common *common)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
u32 rsp_status;
int r;
@@ -303,7 +303,7 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
const __be32 buf[2] = {
cpu_to_be32(reg_offset),
cpu_to_be32(val),
@@ -324,7 +324,7 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
mutex_lock(&priv->wmi->multi_write_mutex);
@@ -347,7 +347,7 @@ static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
if (atomic_read(&priv->wmi->mwrite_cnt))
ath9k_regwrite_buffer(hw_priv, val, reg_offset);
@@ -359,7 +359,7 @@ static void ath9k_enable_regwrite_buffer(void *hw_priv)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
atomic_inc(&priv->wmi->mwrite_cnt);
}
@@ -368,7 +368,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
atomic_dec(&priv->wmi->mwrite_cnt);
@@ -385,7 +385,7 @@ static void ath9k_reg_rmw_buffer(void *hw_priv,
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
u32 rsp_status;
int r;
@@ -423,7 +423,7 @@ static void ath9k_reg_rmw_flush(void *hw_priv)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
u32 rsp_status;
int r;
@@ -455,7 +455,7 @@ static void ath9k_enable_rmw_buffer(void *hw_priv)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
return;
@@ -468,7 +468,7 @@ static void ath9k_reg_rmw_single(void *hw_priv,
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
struct register_rmw buf, buf_ret;
int ret;
@@ -490,7 +490,7 @@ static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ struct ath9k_htc_priv *priv = common->priv;
if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) {
u32 val;
@@ -518,7 +518,7 @@ static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
- struct ath_hw *ah = (struct ath_hw *) common->ah;
+ struct ath_hw *ah = common->ah;
(void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
@@ -974,7 +974,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
err_init:
ath9k_stop_wmi(priv);
- hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
+ hif_dev = htc_handle->hif_dev;
ath9k_hif_usb_dealloc_urbs(hif_dev);
ath9k_destroy_wmi(priv);
err_free:
@@ -992,7 +992,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
ath9k_deinit_device(htc_handle->drv_priv);
ath9k_stop_wmi(htc_handle->drv_priv);
- ath9k_hif_usb_dealloc_urbs((struct hif_device_usb *)htc_handle->hif_dev);
+ ath9k_hif_usb_dealloc_urbs(htc_handle->hif_dev);
ath9k_destroy_wmi(htc_handle->drv_priv);
ieee80211_free_hw(htc_handle->drv_priv->hw);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 99667aba2..eb631fd33 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -89,7 +89,7 @@ static void htc_process_target_rdy(struct htc_target *target,
void *buf)
{
struct htc_endpoint *endpoint;
- struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
+ struct htc_ready_msg *htc_ready_msg = buf;
target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 4f00400c7..7fad7e75a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -151,12 +151,12 @@ static void ath9k_deinit_softc(struct ath_softc *sc);
static void ath9k_op_ps_wakeup(struct ath_common *common)
{
- ath9k_ps_wakeup((struct ath_softc *) common->priv);
+ ath9k_ps_wakeup(common->priv);
}
static void ath9k_op_ps_restore(struct ath_common *common)
{
- ath9k_ps_restore((struct ath_softc *) common->priv);
+ ath9k_ps_restore(common->priv);
}
static const struct ath_ps_ops ath9k_ps_ops = {
@@ -174,7 +174,7 @@ static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_softc *sc = common->priv;
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
unsigned long flags;
@@ -189,7 +189,7 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_softc *sc = common->priv;
u32 val;
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
@@ -229,7 +229,7 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
{
struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_softc *sc = common->priv;
unsigned long flags;
u32 val;
@@ -608,7 +608,7 @@ static int ath9k_nvmem_request_eeprom(struct ath_softc *sc)
}
/* devres manages the calibration values release on shutdown */
- ah->nvmem_blob = (u16 *)devm_kmemdup(sc->dev, buf, len, GFP_KERNEL);
+ ah->nvmem_blob = devm_kmemdup(sc->dev, buf, len, GFP_KERNEL);
kfree(buf);
if (!ah->nvmem_blob)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 9d84003db..d1e5767aa 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -304,7 +304,7 @@ fail_paprd:
void ath_ani_calibrate(struct timer_list *t)
{
struct ath_common *common = from_timer(common, t, ani.timer);
- struct ath_softc *sc = (struct ath_softc *)common->priv;
+ struct ath_softc *sc = common->priv;
struct ath_hw *ah = sc->sc_ah;
bool longcal = false;
bool shortcal = false;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1494feedb..c48ff0ffb 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2383,7 +2383,22 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw,
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct cfg80211_chan_def *chandef = &sc->cur_chan->chandef;
+ struct ieee80211_channel *chan = chandef->chan;
+ int pos = chan->hw_value;
set_bit(ATH_OP_SCANNING, &common->op_flags);
+
+ /* Reset current survey */
+ if (!sc->cur_chan->offchannel) {
+ if (sc->cur_survey != &sc->survey[pos]) {
+ if (sc->cur_survey)
+ sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+ sc->cur_survey = &sc->survey[pos];
+ }
+
+ memset(sc->cur_survey, 0, sizeof(struct survey_info));
+ sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+ }
}
static void ath9k_sw_scan_complete(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 0633589b8..e655cd8bb 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -781,7 +781,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
/* return bus cachesize in 4B word units */
static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_softc *sc = common->priv;
u8 u8tmp;
pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp);
@@ -799,7 +799,7 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
- struct ath_hw *ah = (struct ath_hw *) common->ah;
+ struct ath_hw *ah = common->ah;
common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
@@ -820,7 +820,7 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
/* Need to be called after we discover btcoex capabilities */
static void ath_pci_aspm_init(struct ath_common *common)
{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_softc *sc = common->priv;
struct ath_hw *ah = sc->sc_ah;
struct pci_dev *pdev = to_pci_dev(sc->dev);
struct pci_dev *parent;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 2bd116317..4e6b4df85 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1644,7 +1644,7 @@ out_err:
return ret;
}
-static int wcn36xx_remove(struct platform_device *pdev)
+static void wcn36xx_remove(struct platform_device *pdev)
{
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct wcn36xx *wcn = hw->priv;
@@ -1666,8 +1666,6 @@ static int wcn36xx_remove(struct platform_device *pdev)
mutex_destroy(&wcn->hal_mutex);
ieee80211_free_hw(hw);
-
- return 0;
}
static const struct of_device_id wcn36xx_of_match[] = {
@@ -1678,7 +1676,7 @@ MODULE_DEVICE_TABLE(of, wcn36xx_of_match);
static struct platform_driver wcn36xx_driver = {
.probe = wcn36xx_probe,
- .remove = wcn36xx_remove,
+ .remove_new = wcn36xx_remove,
.driver = {
.name = "wcn36xx",
.of_match_table = wcn36xx_of_match,
@@ -1687,6 +1685,7 @@ static struct platform_driver wcn36xx_driver = {
module_platform_driver(wcn36xx_driver);
+MODULE_DESCRIPTION("Qualcomm Atheros WCN3660/3680 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/atmel/Kconfig b/drivers/net/wireless/atmel/Kconfig
index bafdd57b0..7a2bb7a58 100644
--- a/drivers/net/wireless/atmel/Kconfig
+++ b/drivers/net/wireless/atmel/Kconfig
@@ -12,41 +12,6 @@ config WLAN_VENDOR_ATMEL
if WLAN_VENDOR_ATMEL
-config ATMEL
- tristate "Atmel at76c50x chipset 802.11b support"
- depends on CFG80211 && (PCI || PCMCIA) && HAS_IOPORT
- select WIRELESS_EXT
- select WEXT_PRIV
- select FW_LOADER
- select CRC32
- help
- A driver 802.11b wireless cards based on the Atmel fast-vnet
- chips. This driver supports standard Linux wireless extensions.
-
- Many cards based on this chipset do not have flash memory
- and need their firmware loaded at start-up. If yours is
- one of these, you will need to provide a firmware image
- to be loaded into the card by the driver. The Atmel
- firmware package can be downloaded from
- <http://www.thekelleys.org.uk/atmel>
-
-config PCI_ATMEL
- tristate "Atmel at76c506 PCI cards"
- depends on ATMEL && PCI
- help
- Enable support for PCI and mini-PCI cards containing the
- Atmel at76c506 chip.
-
-config PCMCIA_ATMEL
- tristate "Atmel at76c502/at76c504 PCMCIA cards"
- depends on ATMEL && PCMCIA
- select WIRELESS_EXT
- select FW_LOADER
- select CRC32
- help
- Enable support for PCMCIA cards containing the
- Atmel at76c502 and at76c504 chips.
-
config AT76C50X_USB
tristate "Atmel at76c503/at76c505/at76c505a USB cards"
depends on MAC80211 && USB
diff --git a/drivers/net/wireless/atmel/Makefile b/drivers/net/wireless/atmel/Makefile
index 17e628056..8338d7098 100644
--- a/drivers/net/wireless/atmel/Makefile
+++ b/drivers/net/wireless/atmel/Makefile
@@ -1,6 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ATMEL) += atmel.o
-obj-$(CONFIG_PCI_ATMEL) += atmel_pci.o
-obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o
-
obj-$(CONFIG_AT76C50X_USB) += at76c50x-usb.o
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
deleted file mode 100644
index 461dce21d..000000000
--- a/drivers/net/wireless/atmel/atmel.c
+++ /dev/null
@@ -1,4452 +0,0 @@
-/*** -*- linux-c -*- **********************************************************
-
- Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
-
- Copyright 2000-2001 ATMEL Corporation.
- Copyright 2003-2004 Simon Kelley.
-
- This code was developed from version 2.1.1 of the Atmel drivers,
- released by Atmel corp. under the GPL in December 2002. It also
- includes code from the Linux aironet drivers (C) Benjamin Reed,
- and the Linux PCMCIA package, (C) David Hinds and the Linux wireless
- extensions, (C) Jean Tourrilhes.
-
- The firmware module for reading the MAC address of the card comes from
- net.russotto.AtmelMACFW, written by Matthew T. Russotto and copyright
- by him. net.russotto.AtmelMACFW is used under the GPL license version 2.
- This file contains the module in binary form and, under the terms
- of the GPL, in source form. The source is located at the end of the file.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This software is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, see
- <http://www.gnu.org/licenses/>.
-
- For all queries about this code, please contact the current author,
- Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
-
- Credit is due to HP UK and Cambridge Online Systems Ltd for supplying
- hardware used during development of this driver.
-
-******************************************************************************/
-
-#include <linux/interrupt.h>
-
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <linux/crc32.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/device.h>
-#include <linux/moduleparam.h>
-#include <linux/firmware.h>
-#include <linux/jiffies.h>
-#include <net/cfg80211.h>
-#include "atmel.h"
-
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 98
-
-MODULE_AUTHOR("Simon Kelley");
-MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
-MODULE_LICENSE("GPL");
-
-/* The name of the firmware file to be loaded
- over-rides any automatic selection */
-static char *firmware = NULL;
-module_param(firmware, charp, 0);
-
-/* table of firmware file names */
-static struct {
- AtmelFWType fw_type;
- const char *fw_file;
- const char *fw_file_ext;
-} fw_table[] = {
- { ATMEL_FW_TYPE_502, "atmel_at76c502", "bin" },
- { ATMEL_FW_TYPE_502D, "atmel_at76c502d", "bin" },
- { ATMEL_FW_TYPE_502E, "atmel_at76c502e", "bin" },
- { ATMEL_FW_TYPE_502_3COM, "atmel_at76c502_3com", "bin" },
- { ATMEL_FW_TYPE_504, "atmel_at76c504", "bin" },
- { ATMEL_FW_TYPE_504_2958, "atmel_at76c504_2958", "bin" },
- { ATMEL_FW_TYPE_504A_2958, "atmel_at76c504a_2958", "bin" },
- { ATMEL_FW_TYPE_506, "atmel_at76c506", "bin" },
- { ATMEL_FW_TYPE_NONE, NULL, NULL }
-};
-MODULE_FIRMWARE("atmel_at76c502-wpa.bin");
-MODULE_FIRMWARE("atmel_at76c502.bin");
-MODULE_FIRMWARE("atmel_at76c502d-wpa.bin");
-MODULE_FIRMWARE("atmel_at76c502d.bin");
-MODULE_FIRMWARE("atmel_at76c502e-wpa.bin");
-MODULE_FIRMWARE("atmel_at76c502e.bin");
-MODULE_FIRMWARE("atmel_at76c502_3com-wpa.bin");
-MODULE_FIRMWARE("atmel_at76c502_3com.bin");
-MODULE_FIRMWARE("atmel_at76c504-wpa.bin");
-MODULE_FIRMWARE("atmel_at76c504.bin");
-MODULE_FIRMWARE("atmel_at76c504_2958-wpa.bin");
-MODULE_FIRMWARE("atmel_at76c504_2958.bin");
-MODULE_FIRMWARE("atmel_at76c504a_2958-wpa.bin");
-MODULE_FIRMWARE("atmel_at76c504a_2958.bin");
-MODULE_FIRMWARE("atmel_at76c506-wpa.bin");
-MODULE_FIRMWARE("atmel_at76c506.bin");
-
-#define MAX_SSID_LENGTH 32
-#define MGMT_JIFFIES (256 * HZ / 100)
-
-#define MAX_BSS_ENTRIES 64
-
-/* registers */
-#define GCR 0x00 /* (SIR0) General Configuration Register */
-#define BSR 0x02 /* (SIR1) Bank Switching Select Register */
-#define AR 0x04
-#define DR 0x08
-#define MR1 0x12 /* Mirror Register 1 */
-#define MR2 0x14 /* Mirror Register 2 */
-#define MR3 0x16 /* Mirror Register 3 */
-#define MR4 0x18 /* Mirror Register 4 */
-
-#define GPR1 0x0c
-#define GPR2 0x0e
-#define GPR3 0x10
-/*
- * Constants for the GCR register.
- */
-#define GCR_REMAP 0x0400 /* Remap internal SRAM to 0 */
-#define GCR_SWRES 0x0080 /* BIU reset (ARM and PAI are NOT reset) */
-#define GCR_CORES 0x0060 /* Core Reset (ARM and PAI are reset) */
-#define GCR_ENINT 0x0002 /* Enable Interrupts */
-#define GCR_ACKINT 0x0008 /* Acknowledge Interrupts */
-
-#define BSS_SRAM 0x0200 /* AMBA module selection --> SRAM */
-#define BSS_IRAM 0x0100 /* AMBA module selection --> IRAM */
-/*
- *Constants for the MR registers.
- */
-#define MAC_INIT_COMPLETE 0x0001 /* MAC init has been completed */
-#define MAC_BOOT_COMPLETE 0x0010 /* MAC boot has been completed */
-#define MAC_INIT_OK 0x0002 /* MAC boot has been completed */
-
-#define MIB_MAX_DATA_BYTES 212
-#define MIB_HEADER_SIZE 4 /* first four fields */
-
-struct get_set_mib {
- u8 type;
- u8 size;
- u8 index;
- u8 reserved;
- u8 data[MIB_MAX_DATA_BYTES];
-};
-
-struct rx_desc {
- u32 Next;
- u16 MsduPos;
- u16 MsduSize;
-
- u8 State;
- u8 Status;
- u8 Rate;
- u8 Rssi;
- u8 LinkQuality;
- u8 PreambleType;
- u16 Duration;
- u32 RxTime;
-};
-
-#define RX_DESC_FLAG_VALID 0x80
-#define RX_DESC_FLAG_CONSUMED 0x40
-#define RX_DESC_FLAG_IDLE 0x00
-
-#define RX_STATUS_SUCCESS 0x00
-
-#define RX_DESC_MSDU_POS_OFFSET 4
-#define RX_DESC_MSDU_SIZE_OFFSET 6
-#define RX_DESC_FLAGS_OFFSET 8
-#define RX_DESC_STATUS_OFFSET 9
-#define RX_DESC_RSSI_OFFSET 11
-#define RX_DESC_LINK_QUALITY_OFFSET 12
-#define RX_DESC_PREAMBLE_TYPE_OFFSET 13
-#define RX_DESC_DURATION_OFFSET 14
-#define RX_DESC_RX_TIME_OFFSET 16
-
-struct tx_desc {
- u32 NextDescriptor;
- u16 TxStartOfFrame;
- u16 TxLength;
-
- u8 TxState;
- u8 TxStatus;
- u8 RetryCount;
-
- u8 TxRate;
-
- u8 KeyIndex;
- u8 ChiperType;
- u8 ChipreLength;
- u8 Reserved1;
-
- u8 Reserved;
- u8 PacketType;
- u16 HostTxLength;
-};
-
-#define TX_DESC_NEXT_OFFSET 0
-#define TX_DESC_POS_OFFSET 4
-#define TX_DESC_SIZE_OFFSET 6
-#define TX_DESC_FLAGS_OFFSET 8
-#define TX_DESC_STATUS_OFFSET 9
-#define TX_DESC_RETRY_OFFSET 10
-#define TX_DESC_RATE_OFFSET 11
-#define TX_DESC_KEY_INDEX_OFFSET 12
-#define TX_DESC_CIPHER_TYPE_OFFSET 13
-#define TX_DESC_CIPHER_LENGTH_OFFSET 14
-#define TX_DESC_PACKET_TYPE_OFFSET 17
-#define TX_DESC_HOST_LENGTH_OFFSET 18
-
-/*
- * Host-MAC interface
- */
-
-#define TX_STATUS_SUCCESS 0x00
-
-#define TX_FIRM_OWN 0x80
-#define TX_DONE 0x40
-
-#define TX_ERROR 0x01
-
-#define TX_PACKET_TYPE_DATA 0x01
-#define TX_PACKET_TYPE_MGMT 0x02
-
-#define ISR_EMPTY 0x00 /* no bits set in ISR */
-#define ISR_TxCOMPLETE 0x01 /* packet transmitted */
-#define ISR_RxCOMPLETE 0x02 /* packet received */
-#define ISR_RxFRAMELOST 0x04 /* Rx Frame lost */
-#define ISR_FATAL_ERROR 0x08 /* Fatal error */
-#define ISR_COMMAND_COMPLETE 0x10 /* command completed */
-#define ISR_OUT_OF_RANGE 0x20 /* command completed */
-#define ISR_IBSS_MERGE 0x40 /* (4.1.2.30): IBSS merge */
-#define ISR_GENERIC_IRQ 0x80
-
-#define Local_Mib_Type 0x01
-#define Mac_Address_Mib_Type 0x02
-#define Mac_Mib_Type 0x03
-#define Statistics_Mib_Type 0x04
-#define Mac_Mgmt_Mib_Type 0x05
-#define Mac_Wep_Mib_Type 0x06
-#define Phy_Mib_Type 0x07
-#define Multi_Domain_MIB 0x08
-
-#define MAC_MGMT_MIB_CUR_BSSID_POS 14
-#define MAC_MIB_FRAG_THRESHOLD_POS 8
-#define MAC_MIB_RTS_THRESHOLD_POS 10
-#define MAC_MIB_SHORT_RETRY_POS 16
-#define MAC_MIB_LONG_RETRY_POS 17
-#define MAC_MIB_SHORT_RETRY_LIMIT_POS 16
-#define MAC_MGMT_MIB_BEACON_PER_POS 0
-#define MAC_MGMT_MIB_STATION_ID_POS 6
-#define MAC_MGMT_MIB_CUR_PRIVACY_POS 11
-#define MAC_MGMT_MIB_CUR_BSSID_POS 14
-#define MAC_MGMT_MIB_PS_MODE_POS 53
-#define MAC_MGMT_MIB_LISTEN_INTERVAL_POS 54
-#define MAC_MGMT_MIB_MULTI_DOMAIN_IMPLEMENTED 56
-#define MAC_MGMT_MIB_MULTI_DOMAIN_ENABLED 57
-#define PHY_MIB_CHANNEL_POS 14
-#define PHY_MIB_RATE_SET_POS 20
-#define PHY_MIB_REG_DOMAIN_POS 26
-#define LOCAL_MIB_AUTO_TX_RATE_POS 3
-#define LOCAL_MIB_SSID_SIZE 5
-#define LOCAL_MIB_TX_PROMISCUOUS_POS 6
-#define LOCAL_MIB_TX_MGMT_RATE_POS 7
-#define LOCAL_MIB_TX_CONTROL_RATE_POS 8
-#define LOCAL_MIB_PREAMBLE_TYPE 9
-#define MAC_ADDR_MIB_MAC_ADDR_POS 0
-
-#define CMD_Set_MIB_Vars 0x01
-#define CMD_Get_MIB_Vars 0x02
-#define CMD_Scan 0x03
-#define CMD_Join 0x04
-#define CMD_Start 0x05
-#define CMD_EnableRadio 0x06
-#define CMD_DisableRadio 0x07
-#define CMD_SiteSurvey 0x0B
-
-#define CMD_STATUS_IDLE 0x00
-#define CMD_STATUS_COMPLETE 0x01
-#define CMD_STATUS_UNKNOWN 0x02
-#define CMD_STATUS_INVALID_PARAMETER 0x03
-#define CMD_STATUS_FUNCTION_NOT_SUPPORTED 0x04
-#define CMD_STATUS_TIME_OUT 0x07
-#define CMD_STATUS_IN_PROGRESS 0x08
-#define CMD_STATUS_REJECTED_RADIO_OFF 0x09
-#define CMD_STATUS_HOST_ERROR 0xFF
-#define CMD_STATUS_BUSY 0xFE
-
-#define CMD_BLOCK_COMMAND_OFFSET 0
-#define CMD_BLOCK_STATUS_OFFSET 1
-#define CMD_BLOCK_PARAMETERS_OFFSET 4
-
-#define SCAN_OPTIONS_SITE_SURVEY 0x80
-
-#define MGMT_FRAME_BODY_OFFSET 24
-#define MAX_AUTHENTICATION_RETRIES 3
-#define MAX_ASSOCIATION_RETRIES 3
-
-#define AUTHENTICATION_RESPONSE_TIME_OUT 1000
-
-#define MAX_WIRELESS_BODY 2316 /* mtu is 2312, CRC is 4 */
-#define LOOP_RETRY_LIMIT 500000
-
-#define ACTIVE_MODE 1
-#define PS_MODE 2
-
-#define MAX_ENCRYPTION_KEYS 4
-#define MAX_ENCRYPTION_KEY_SIZE 40
-
-/*
- * 802.11 related definitions
- */
-
-/*
- * Regulatory Domains
- */
-
-#define REG_DOMAIN_FCC 0x10 /* Channels 1-11 USA */
-#define REG_DOMAIN_DOC 0x20 /* Channel 1-11 Canada */
-#define REG_DOMAIN_ETSI 0x30 /* Channel 1-13 Europe (ex Spain/France) */
-#define REG_DOMAIN_SPAIN 0x31 /* Channel 10-11 Spain */
-#define REG_DOMAIN_FRANCE 0x32 /* Channel 10-13 France */
-#define REG_DOMAIN_MKK 0x40 /* Channel 14 Japan */
-#define REG_DOMAIN_MKK1 0x41 /* Channel 1-14 Japan(MKK1) */
-#define REG_DOMAIN_ISRAEL 0x50 /* Channel 3-9 ISRAEL */
-
-#define BSS_TYPE_AD_HOC 1
-#define BSS_TYPE_INFRASTRUCTURE 2
-
-#define SCAN_TYPE_ACTIVE 0
-#define SCAN_TYPE_PASSIVE 1
-
-#define LONG_PREAMBLE 0
-#define SHORT_PREAMBLE 1
-#define AUTO_PREAMBLE 2
-
-#define DATA_FRAME_WS_HEADER_SIZE 30
-
-/* promiscuous mode control */
-#define PROM_MODE_OFF 0x0
-#define PROM_MODE_UNKNOWN 0x1
-#define PROM_MODE_CRC_FAILED 0x2
-#define PROM_MODE_DUPLICATED 0x4
-#define PROM_MODE_MGMT 0x8
-#define PROM_MODE_CTRL 0x10
-#define PROM_MODE_BAD_PROTOCOL 0x20
-
-#define IFACE_INT_STATUS_OFFSET 0
-#define IFACE_INT_MASK_OFFSET 1
-#define IFACE_LOCKOUT_HOST_OFFSET 2
-#define IFACE_LOCKOUT_MAC_OFFSET 3
-#define IFACE_FUNC_CTRL_OFFSET 28
-#define IFACE_MAC_STAT_OFFSET 30
-#define IFACE_GENERIC_INT_TYPE_OFFSET 32
-
-#define CIPHER_SUITE_NONE 0
-#define CIPHER_SUITE_WEP_64 1
-#define CIPHER_SUITE_TKIP 2
-#define CIPHER_SUITE_AES 3
-#define CIPHER_SUITE_CCX 4
-#define CIPHER_SUITE_WEP_128 5
-
-/*
- * IFACE MACROS & definitions
- */
-
-/*
- * FuncCtrl field:
- */
-#define FUNC_CTRL_TxENABLE 0x10
-#define FUNC_CTRL_RxENABLE 0x20
-#define FUNC_CTRL_INIT_COMPLETE 0x01
-
-/* A stub firmware image which reads the MAC address from NVRAM on the card.
- For copyright information and source see the end of this file. */
-static u8 mac_reader[] = {
- 0x06, 0x00, 0x00, 0xea, 0x04, 0x00, 0x00, 0xea, 0x03, 0x00, 0x00, 0xea, 0x02, 0x00, 0x00, 0xea,
- 0x01, 0x00, 0x00, 0xea, 0x00, 0x00, 0x00, 0xea, 0xff, 0xff, 0xff, 0xea, 0xfe, 0xff, 0xff, 0xea,
- 0xd3, 0x00, 0xa0, 0xe3, 0x00, 0xf0, 0x21, 0xe1, 0x0e, 0x04, 0xa0, 0xe3, 0x00, 0x10, 0xa0, 0xe3,
- 0x81, 0x11, 0xa0, 0xe1, 0x00, 0x10, 0x81, 0xe3, 0x00, 0x10, 0x80, 0xe5, 0x1c, 0x10, 0x90, 0xe5,
- 0x10, 0x10, 0xc1, 0xe3, 0x1c, 0x10, 0x80, 0xe5, 0x01, 0x10, 0xa0, 0xe3, 0x08, 0x10, 0x80, 0xe5,
- 0x02, 0x03, 0xa0, 0xe3, 0x00, 0x10, 0xa0, 0xe3, 0xb0, 0x10, 0xc0, 0xe1, 0xb4, 0x10, 0xc0, 0xe1,
- 0xb8, 0x10, 0xc0, 0xe1, 0xbc, 0x10, 0xc0, 0xe1, 0x56, 0xdc, 0xa0, 0xe3, 0x21, 0x00, 0x00, 0xeb,
- 0x0a, 0x00, 0xa0, 0xe3, 0x1a, 0x00, 0x00, 0xeb, 0x10, 0x00, 0x00, 0xeb, 0x07, 0x00, 0x00, 0xeb,
- 0x02, 0x03, 0xa0, 0xe3, 0x02, 0x14, 0xa0, 0xe3, 0xb4, 0x10, 0xc0, 0xe1, 0x4c, 0x10, 0x9f, 0xe5,
- 0xbc, 0x10, 0xc0, 0xe1, 0x10, 0x10, 0xa0, 0xe3, 0xb8, 0x10, 0xc0, 0xe1, 0xfe, 0xff, 0xff, 0xea,
- 0x00, 0x40, 0x2d, 0xe9, 0x00, 0x20, 0xa0, 0xe3, 0x02, 0x3c, 0xa0, 0xe3, 0x00, 0x10, 0xa0, 0xe3,
- 0x28, 0x00, 0x9f, 0xe5, 0x37, 0x00, 0x00, 0xeb, 0x00, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1,
- 0x00, 0x40, 0x2d, 0xe9, 0x12, 0x2e, 0xa0, 0xe3, 0x06, 0x30, 0xa0, 0xe3, 0x00, 0x10, 0xa0, 0xe3,
- 0x02, 0x04, 0xa0, 0xe3, 0x2f, 0x00, 0x00, 0xeb, 0x00, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1,
- 0x00, 0x02, 0x00, 0x02, 0x80, 0x01, 0x90, 0xe0, 0x01, 0x00, 0x00, 0x0a, 0x01, 0x00, 0x50, 0xe2,
- 0xfc, 0xff, 0xff, 0xea, 0x1e, 0xff, 0x2f, 0xe1, 0x80, 0x10, 0xa0, 0xe3, 0xf3, 0x06, 0xa0, 0xe3,
- 0x00, 0x10, 0x80, 0xe5, 0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x80, 0xe5, 0x01, 0x10, 0xa0, 0xe3,
- 0x04, 0x10, 0x80, 0xe5, 0x00, 0x10, 0x80, 0xe5, 0x0e, 0x34, 0xa0, 0xe3, 0x1c, 0x10, 0x93, 0xe5,
- 0x02, 0x1a, 0x81, 0xe3, 0x1c, 0x10, 0x83, 0xe5, 0x58, 0x11, 0x9f, 0xe5, 0x30, 0x10, 0x80, 0xe5,
- 0x54, 0x11, 0x9f, 0xe5, 0x34, 0x10, 0x80, 0xe5, 0x38, 0x10, 0x80, 0xe5, 0x3c, 0x10, 0x80, 0xe5,
- 0x10, 0x10, 0x90, 0xe5, 0x08, 0x00, 0x90, 0xe5, 0x1e, 0xff, 0x2f, 0xe1, 0xf3, 0x16, 0xa0, 0xe3,
- 0x08, 0x00, 0x91, 0xe5, 0x05, 0x00, 0xa0, 0xe3, 0x0c, 0x00, 0x81, 0xe5, 0x10, 0x00, 0x91, 0xe5,
- 0x02, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x0a, 0xff, 0x00, 0xa0, 0xe3, 0x0c, 0x00, 0x81, 0xe5,
- 0x10, 0x00, 0x91, 0xe5, 0x02, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x0a, 0x08, 0x00, 0x91, 0xe5,
- 0x10, 0x00, 0x91, 0xe5, 0x01, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x0a, 0x08, 0x00, 0x91, 0xe5,
- 0xff, 0x00, 0x00, 0xe2, 0x1e, 0xff, 0x2f, 0xe1, 0x30, 0x40, 0x2d, 0xe9, 0x00, 0x50, 0xa0, 0xe1,
- 0x03, 0x40, 0xa0, 0xe1, 0xa2, 0x02, 0xa0, 0xe1, 0x08, 0x00, 0x00, 0xe2, 0x03, 0x00, 0x80, 0xe2,
- 0xd8, 0x10, 0x9f, 0xe5, 0x00, 0x00, 0xc1, 0xe5, 0x01, 0x20, 0xc1, 0xe5, 0xe2, 0xff, 0xff, 0xeb,
- 0x01, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x1a, 0x14, 0x00, 0xa0, 0xe3, 0xc4, 0xff, 0xff, 0xeb,
- 0x04, 0x20, 0xa0, 0xe1, 0x05, 0x10, 0xa0, 0xe1, 0x02, 0x00, 0xa0, 0xe3, 0x01, 0x00, 0x00, 0xeb,
- 0x30, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1, 0x70, 0x40, 0x2d, 0xe9, 0xf3, 0x46, 0xa0, 0xe3,
- 0x00, 0x30, 0xa0, 0xe3, 0x00, 0x00, 0x50, 0xe3, 0x08, 0x00, 0x00, 0x9a, 0x8c, 0x50, 0x9f, 0xe5,
- 0x03, 0x60, 0xd5, 0xe7, 0x0c, 0x60, 0x84, 0xe5, 0x10, 0x60, 0x94, 0xe5, 0x02, 0x00, 0x16, 0xe3,
- 0xfc, 0xff, 0xff, 0x0a, 0x01, 0x30, 0x83, 0xe2, 0x00, 0x00, 0x53, 0xe1, 0xf7, 0xff, 0xff, 0x3a,
- 0xff, 0x30, 0xa0, 0xe3, 0x0c, 0x30, 0x84, 0xe5, 0x08, 0x00, 0x94, 0xe5, 0x10, 0x00, 0x94, 0xe5,
- 0x01, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x0a, 0x08, 0x00, 0x94, 0xe5, 0x00, 0x00, 0xa0, 0xe3,
- 0x00, 0x00, 0x52, 0xe3, 0x0b, 0x00, 0x00, 0x9a, 0x10, 0x50, 0x94, 0xe5, 0x02, 0x00, 0x15, 0xe3,
- 0xfc, 0xff, 0xff, 0x0a, 0x0c, 0x30, 0x84, 0xe5, 0x10, 0x50, 0x94, 0xe5, 0x01, 0x00, 0x15, 0xe3,
- 0xfc, 0xff, 0xff, 0x0a, 0x08, 0x50, 0x94, 0xe5, 0x01, 0x50, 0xc1, 0xe4, 0x01, 0x00, 0x80, 0xe2,
- 0x02, 0x00, 0x50, 0xe1, 0xf3, 0xff, 0xff, 0x3a, 0xc8, 0x00, 0xa0, 0xe3, 0x98, 0xff, 0xff, 0xeb,
- 0x70, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1, 0x01, 0x0c, 0x00, 0x02, 0x01, 0x02, 0x00, 0x02,
- 0x00, 0x01, 0x00, 0x02
-};
-
-struct atmel_private {
- void *card; /* Bus dependent structure varies for PCcard */
- int (*present_callback)(void *); /* And callback which uses it */
- char firmware_id[32];
- AtmelFWType firmware_type;
- u8 *firmware;
- int firmware_length;
- struct timer_list management_timer;
- struct net_device *dev;
- struct device *sys_dev;
- struct iw_statistics wstats;
- spinlock_t irqlock, timerlock; /* spinlocks */
- enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type;
- enum {
- CARD_TYPE_PARALLEL_FLASH,
- CARD_TYPE_SPI_FLASH,
- CARD_TYPE_EEPROM
- } card_type;
- int do_rx_crc; /* If we need to CRC incoming packets */
- int probe_crc; /* set if we don't yet know */
- int crc_ok_cnt, crc_ko_cnt; /* counters for probing */
- u16 rx_desc_head;
- u16 tx_desc_free, tx_desc_head, tx_desc_tail, tx_desc_previous;
- u16 tx_free_mem, tx_buff_head, tx_buff_tail;
-
- u16 frag_seq, frag_len, frag_no;
- u8 frag_source[6];
-
- u8 wep_is_on, default_key, exclude_unencrypted, encryption_level;
- u8 group_cipher_suite, pairwise_cipher_suite;
- u8 wep_keys[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
- int wep_key_len[MAX_ENCRYPTION_KEYS];
- int use_wpa, radio_on_broken; /* firmware dependent stuff. */
-
- u16 host_info_base;
- struct host_info_struct {
- /* NB this is matched to the hardware, don't change. */
- u8 volatile int_status;
- u8 volatile int_mask;
- u8 volatile lockout_host;
- u8 volatile lockout_mac;
-
- u16 tx_buff_pos;
- u16 tx_buff_size;
- u16 tx_desc_pos;
- u16 tx_desc_count;
-
- u16 rx_buff_pos;
- u16 rx_buff_size;
- u16 rx_desc_pos;
- u16 rx_desc_count;
-
- u16 build_version;
- u16 command_pos;
-
- u16 major_version;
- u16 minor_version;
-
- u16 func_ctrl;
- u16 mac_status;
- u16 generic_IRQ_type;
- u8 reserved[2];
- } host_info;
-
- enum {
- STATION_STATE_SCANNING,
- STATION_STATE_JOINNING,
- STATION_STATE_AUTHENTICATING,
- STATION_STATE_ASSOCIATING,
- STATION_STATE_READY,
- STATION_STATE_REASSOCIATING,
- STATION_STATE_DOWN,
- STATION_STATE_MGMT_ERROR
- } station_state;
-
- int operating_mode, power_mode;
- unsigned long last_qual;
- int beacons_this_sec;
- int channel;
- int reg_domain, config_reg_domain;
- int tx_rate;
- int auto_tx_rate;
- int rts_threshold;
- int frag_threshold;
- int long_retry, short_retry;
- int preamble;
- int default_beacon_period, beacon_period, listen_interval;
- int CurrentAuthentTransactionSeqNum, ExpectedAuthentTransactionSeqNum;
- int AuthenticationRequestRetryCnt, AssociationRequestRetryCnt, ReAssociationRequestRetryCnt;
- enum {
- SITE_SURVEY_IDLE,
- SITE_SURVEY_IN_PROGRESS,
- SITE_SURVEY_COMPLETED
- } site_survey_state;
- unsigned long last_survey;
-
- int station_was_associated, station_is_associated;
- int fast_scan;
-
- struct bss_info {
- int channel;
- int SSIDsize;
- int RSSI;
- int UsingWEP;
- int preamble;
- int beacon_period;
- int BSStype;
- u8 BSSID[6];
- u8 SSID[MAX_SSID_LENGTH];
- } BSSinfo[MAX_BSS_ENTRIES];
- int BSS_list_entries, current_BSS;
- int connect_to_any_BSS;
- int SSID_size, new_SSID_size;
- u8 CurrentBSSID[6], BSSID[6];
- u8 SSID[MAX_SSID_LENGTH], new_SSID[MAX_SSID_LENGTH];
- u64 last_beacon_timestamp;
- u8 rx_buf[MAX_WIRELESS_BODY];
-};
-
-static u8 atmel_basic_rates[4] = {0x82, 0x84, 0x0b, 0x16};
-
-static const struct {
- int reg_domain;
- int min, max;
- char *name;
-} channel_table[] = { { REG_DOMAIN_FCC, 1, 11, "USA" },
- { REG_DOMAIN_DOC, 1, 11, "Canada" },
- { REG_DOMAIN_ETSI, 1, 13, "Europe" },
- { REG_DOMAIN_SPAIN, 10, 11, "Spain" },
- { REG_DOMAIN_FRANCE, 10, 13, "France" },
- { REG_DOMAIN_MKK, 14, 14, "MKK" },
- { REG_DOMAIN_MKK1, 1, 14, "MKK1" },
- { REG_DOMAIN_ISRAEL, 3, 9, "Israel"} };
-
-static void build_wpa_mib(struct atmel_private *priv);
-static void atmel_copy_to_card(struct net_device *dev, u16 dest,
- const unsigned char *src, u16 len);
-static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest,
- u16 src, u16 len);
-static void atmel_set_gcr(struct net_device *dev, u16 mask);
-static void atmel_clear_gcr(struct net_device *dev, u16 mask);
-static int atmel_lock_mac(struct atmel_private *priv);
-static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
-static void atmel_command_irq(struct atmel_private *priv);
-static int atmel_validate_channel(struct atmel_private *priv, int channel);
-static void atmel_management_frame(struct atmel_private *priv,
- struct ieee80211_hdr *header,
- u16 frame_len, u8 rssi);
-static void atmel_management_timer(struct timer_list *t);
-static void atmel_send_command(struct atmel_private *priv, int command,
- void *cmd, int cmd_size);
-static int atmel_send_command_wait(struct atmel_private *priv, int command,
- void *cmd, int cmd_size);
-static void atmel_transmit_management_frame(struct atmel_private *priv,
- struct ieee80211_hdr *header,
- u8 *body, int body_len);
-
-static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
-static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index,
- u8 data);
-static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
- u16 data);
-static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
- const u8 *data, int data_len);
-static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
- u8 *data, int data_len);
-static void atmel_scan(struct atmel_private *priv, int specific_ssid);
-static void atmel_join_bss(struct atmel_private *priv, int bss_index);
-static void atmel_smooth_qual(struct atmel_private *priv);
-static void atmel_writeAR(struct net_device *dev, u16 data);
-static int probe_atmel_card(struct net_device *dev);
-static int reset_atmel_card(struct net_device *dev);
-static void atmel_enter_state(struct atmel_private *priv, int new_state);
-int atmel_open (struct net_device *dev);
-
-static inline u16 atmel_hi(struct atmel_private *priv, u16 offset)
-{
- return priv->host_info_base + offset;
-}
-
-static inline u16 atmel_co(struct atmel_private *priv, u16 offset)
-{
- return priv->host_info.command_pos + offset;
-}
-
-static inline u16 atmel_rx(struct atmel_private *priv, u16 offset, u16 desc)
-{
- return priv->host_info.rx_desc_pos + (sizeof(struct rx_desc) * desc) + offset;
-}
-
-static inline u16 atmel_tx(struct atmel_private *priv, u16 offset, u16 desc)
-{
- return priv->host_info.tx_desc_pos + (sizeof(struct tx_desc) * desc) + offset;
-}
-
-static inline u8 atmel_read8(struct net_device *dev, u16 offset)
-{
- return inb(dev->base_addr + offset);
-}
-
-static inline void atmel_write8(struct net_device *dev, u16 offset, u8 data)
-{
- outb(data, dev->base_addr + offset);
-}
-
-static inline u16 atmel_read16(struct net_device *dev, u16 offset)
-{
- return inw(dev->base_addr + offset);
-}
-
-static inline void atmel_write16(struct net_device *dev, u16 offset, u16 data)
-{
- outw(data, dev->base_addr + offset);
-}
-
-static inline u8 atmel_rmem8(struct atmel_private *priv, u16 pos)
-{
- atmel_writeAR(priv->dev, pos);
- return atmel_read8(priv->dev, DR);
-}
-
-static inline void atmel_wmem8(struct atmel_private *priv, u16 pos, u16 data)
-{
- atmel_writeAR(priv->dev, pos);
- atmel_write8(priv->dev, DR, data);
-}
-
-static inline u16 atmel_rmem16(struct atmel_private *priv, u16 pos)
-{
- atmel_writeAR(priv->dev, pos);
- return atmel_read16(priv->dev, DR);
-}
-
-static inline void atmel_wmem16(struct atmel_private *priv, u16 pos, u16 data)
-{
- atmel_writeAR(priv->dev, pos);
- atmel_write16(priv->dev, DR, data);
-}
-
-static const struct iw_handler_def atmel_handler_def;
-
-static void tx_done_irq(struct atmel_private *priv)
-{
- int i;
-
- for (i = 0;
- atmel_rmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head)) == TX_DONE &&
- i < priv->host_info.tx_desc_count;
- i++) {
- u8 status = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_STATUS_OFFSET, priv->tx_desc_head));
- u16 msdu_size = atmel_rmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_head));
- u8 type = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_head));
-
- atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head), 0);
-
- priv->tx_free_mem += msdu_size;
- priv->tx_desc_free++;
-
- if (priv->tx_buff_head + msdu_size > (priv->host_info.tx_buff_pos + priv->host_info.tx_buff_size))
- priv->tx_buff_head = 0;
- else
- priv->tx_buff_head += msdu_size;
-
- if (priv->tx_desc_head < (priv->host_info.tx_desc_count - 1))
- priv->tx_desc_head++ ;
- else
- priv->tx_desc_head = 0;
-
- if (type == TX_PACKET_TYPE_DATA) {
- if (status == TX_STATUS_SUCCESS)
- priv->dev->stats.tx_packets++;
- else
- priv->dev->stats.tx_errors++;
- netif_wake_queue(priv->dev);
- }
- }
-}
-
-static u16 find_tx_buff(struct atmel_private *priv, u16 len)
-{
- u16 bottom_free = priv->host_info.tx_buff_size - priv->tx_buff_tail;
-
- if (priv->tx_desc_free == 3 || priv->tx_free_mem < len)
- return 0;
-
- if (bottom_free >= len)
- return priv->host_info.tx_buff_pos + priv->tx_buff_tail;
-
- if (priv->tx_free_mem - bottom_free >= len) {
- priv->tx_buff_tail = 0;
- return priv->host_info.tx_buff_pos;
- }
-
- return 0;
-}
-
-static void tx_update_descriptor(struct atmel_private *priv, int is_bcast,
- u16 len, u16 buff, u8 type)
-{
- atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, priv->tx_desc_tail), buff);
- atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_tail), len);
- if (!priv->use_wpa)
- atmel_wmem16(priv, atmel_tx(priv, TX_DESC_HOST_LENGTH_OFFSET, priv->tx_desc_tail), len);
- atmel_wmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_tail), type);
- atmel_wmem8(priv, atmel_tx(priv, TX_DESC_RATE_OFFSET, priv->tx_desc_tail), priv->tx_rate);
- atmel_wmem8(priv, atmel_tx(priv, TX_DESC_RETRY_OFFSET, priv->tx_desc_tail), 0);
- if (priv->use_wpa) {
- int cipher_type, cipher_length;
- if (is_bcast) {
- cipher_type = priv->group_cipher_suite;
- if (cipher_type == CIPHER_SUITE_WEP_64 ||
- cipher_type == CIPHER_SUITE_WEP_128)
- cipher_length = 8;
- else if (cipher_type == CIPHER_SUITE_TKIP)
- cipher_length = 12;
- else if (priv->pairwise_cipher_suite == CIPHER_SUITE_WEP_64 ||
- priv->pairwise_cipher_suite == CIPHER_SUITE_WEP_128) {
- cipher_type = priv->pairwise_cipher_suite;
- cipher_length = 8;
- } else {
- cipher_type = CIPHER_SUITE_NONE;
- cipher_length = 0;
- }
- } else {
- cipher_type = priv->pairwise_cipher_suite;
- if (cipher_type == CIPHER_SUITE_WEP_64 ||
- cipher_type == CIPHER_SUITE_WEP_128)
- cipher_length = 8;
- else if (cipher_type == CIPHER_SUITE_TKIP)
- cipher_length = 12;
- else if (priv->group_cipher_suite == CIPHER_SUITE_WEP_64 ||
- priv->group_cipher_suite == CIPHER_SUITE_WEP_128) {
- cipher_type = priv->group_cipher_suite;
- cipher_length = 8;
- } else {
- cipher_type = CIPHER_SUITE_NONE;
- cipher_length = 0;
- }
- }
-
- atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_TYPE_OFFSET, priv->tx_desc_tail),
- cipher_type);
- atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_LENGTH_OFFSET, priv->tx_desc_tail),
- cipher_length);
- }
- atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_tail), 0x80000000L);
- atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_tail), TX_FIRM_OWN);
- if (priv->tx_desc_previous != priv->tx_desc_tail)
- atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_previous), 0);
- priv->tx_desc_previous = priv->tx_desc_tail;
- if (priv->tx_desc_tail < (priv->host_info.tx_desc_count - 1))
- priv->tx_desc_tail++;
- else
- priv->tx_desc_tail = 0;
- priv->tx_desc_free--;
- priv->tx_free_mem -= len;
-}
-
-static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct atmel_private *priv = netdev_priv(dev);
- struct ieee80211_hdr header;
- unsigned long flags;
- u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
-
- if (priv->card && priv->present_callback &&
- !(*priv->present_callback)(priv->card)) {
- dev->stats.tx_errors++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- if (priv->station_state != STATION_STATE_READY) {
- dev->stats.tx_errors++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- /* first ensure the timer func cannot run */
- spin_lock_bh(&priv->timerlock);
- /* then stop the hardware ISR */
- spin_lock_irqsave(&priv->irqlock, flags);
- /* nb doing the above in the opposite order will deadlock */
-
- /* The Wireless Header is 30 bytes. In the Ethernet packet we "cut" the
- 12 first bytes (containing DA/SA) and put them in the appropriate
- fields of the Wireless Header. Thus the packet length is then the
- initial + 18 (+30-12) */
-
- if (!(buff = find_tx_buff(priv, len + 18))) {
- dev->stats.tx_dropped++;
- spin_unlock_irqrestore(&priv->irqlock, flags);
- spin_unlock_bh(&priv->timerlock);
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
-
- frame_ctl = IEEE80211_FTYPE_DATA;
- header.duration_id = 0;
- header.seq_ctrl = 0;
- if (priv->wep_is_on)
- frame_ctl |= IEEE80211_FCTL_PROTECTED;
- if (priv->operating_mode == IW_MODE_ADHOC) {
- skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
- memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
- memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
- } else {
- frame_ctl |= IEEE80211_FCTL_TODS;
- memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
- memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
- skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
- }
-
- if (priv->use_wpa)
- memcpy(&header.addr4, rfc1042_header, ETH_ALEN);
-
- header.frame_control = cpu_to_le16(frame_ctl);
- /* Copy the wireless header into the card */
- atmel_copy_to_card(dev, buff, (unsigned char *)&header, DATA_FRAME_WS_HEADER_SIZE);
- /* Copy the packet sans its 802.3 header addresses which have been replaced */
- atmel_copy_to_card(dev, buff + DATA_FRAME_WS_HEADER_SIZE, skb->data + 12, len - 12);
- priv->tx_buff_tail += len - 12 + DATA_FRAME_WS_HEADER_SIZE;
-
- /* low bit of first byte of destination tells us if broadcast */
- tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
- dev->stats.tx_bytes += len;
-
- spin_unlock_irqrestore(&priv->irqlock, flags);
- spin_unlock_bh(&priv->timerlock);
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-static void atmel_transmit_management_frame(struct atmel_private *priv,
- struct ieee80211_hdr *header,
- u8 *body, int body_len)
-{
- u16 buff;
- int len = MGMT_FRAME_BODY_OFFSET + body_len;
-
- if (!(buff = find_tx_buff(priv, len)))
- return;
-
- atmel_copy_to_card(priv->dev, buff, (u8 *)header, MGMT_FRAME_BODY_OFFSET);
- atmel_copy_to_card(priv->dev, buff + MGMT_FRAME_BODY_OFFSET, body, body_len);
- priv->tx_buff_tail += len;
- tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
-}
-
-static void fast_rx_path(struct atmel_private *priv,
- struct ieee80211_hdr *header,
- u16 msdu_size, u16 rx_packet_loc, u32 crc)
-{
- /* fast path: unfragmented packet copy directly into skbuf */
- u8 mac4[6];
- struct sk_buff *skb;
- unsigned char *skbp;
-
- /* get the final, mac 4 header field, this tells us encapsulation */
- atmel_copy_to_host(priv->dev, mac4, rx_packet_loc + 24, 6);
- msdu_size -= 6;
-
- if (priv->do_rx_crc) {
- crc = crc32_le(crc, mac4, 6);
- msdu_size -= 4;
- }
-
- if (!(skb = dev_alloc_skb(msdu_size + 14))) {
- priv->dev->stats.rx_dropped++;
- return;
- }
-
- skb_reserve(skb, 2);
- skbp = skb_put(skb, msdu_size + 12);
- atmel_copy_to_host(priv->dev, skbp + 12, rx_packet_loc + 30, msdu_size);
-
- if (priv->do_rx_crc) {
- u32 netcrc;
- crc = crc32_le(crc, skbp + 12, msdu_size);
- atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + 30 + msdu_size, 4);
- if ((crc ^ 0xffffffff) != netcrc) {
- priv->dev->stats.rx_crc_errors++;
- dev_kfree_skb(skb);
- return;
- }
- }
-
- memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
- if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
- else
- memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
-
- skb->protocol = eth_type_trans(skb, priv->dev);
- skb->ip_summed = CHECKSUM_NONE;
- netif_rx(skb);
- priv->dev->stats.rx_bytes += 12 + msdu_size;
- priv->dev->stats.rx_packets++;
-}
-
-/* Test to see if the packet in card memory at packet_loc has a valid CRC
- It doesn't matter that this is slow: it is only used to proble the first few
- packets. */
-static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
-{
- int i = msdu_size - 4;
- u32 netcrc, crc = 0xffffffff;
-
- if (msdu_size < 4)
- return 0;
-
- atmel_copy_to_host(priv->dev, (void *)&netcrc, packet_loc + i, 4);
-
- atmel_writeAR(priv->dev, packet_loc);
- while (i--) {
- u8 octet = atmel_read8(priv->dev, DR);
- crc = crc32_le(crc, &octet, 1);
- }
-
- return (crc ^ 0xffffffff) == netcrc;
-}
-
-static void frag_rx_path(struct atmel_private *priv,
- struct ieee80211_hdr *header,
- u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
- u8 frag_no, int more_frags)
-{
- u8 mac4[ETH_ALEN];
- u8 source[ETH_ALEN];
- struct sk_buff *skb;
-
- if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- memcpy(source, header->addr3, ETH_ALEN);
- else
- memcpy(source, header->addr2, ETH_ALEN);
-
- rx_packet_loc += 24; /* skip header */
-
- if (priv->do_rx_crc)
- msdu_size -= 4;
-
- if (frag_no == 0) { /* first fragment */
- atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
- msdu_size -= ETH_ALEN;
- rx_packet_loc += ETH_ALEN;
-
- if (priv->do_rx_crc)
- crc = crc32_le(crc, mac4, 6);
-
- priv->frag_seq = seq_no;
- priv->frag_no = 1;
- priv->frag_len = msdu_size;
- memcpy(priv->frag_source, source, ETH_ALEN);
- memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
- memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
-
- atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
-
- if (priv->do_rx_crc) {
- u32 netcrc;
- crc = crc32_le(crc, &priv->rx_buf[12], msdu_size);
- atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
- if ((crc ^ 0xffffffff) != netcrc) {
- priv->dev->stats.rx_crc_errors++;
- eth_broadcast_addr(priv->frag_source);
- }
- }
-
- } else if (priv->frag_no == frag_no &&
- priv->frag_seq == seq_no &&
- memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
-
- atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
- rx_packet_loc, msdu_size);
- if (priv->do_rx_crc) {
- u32 netcrc;
- crc = crc32_le(crc,
- &priv->rx_buf[12 + priv->frag_len],
- msdu_size);
- atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
- if ((crc ^ 0xffffffff) != netcrc) {
- priv->dev->stats.rx_crc_errors++;
- eth_broadcast_addr(priv->frag_source);
- more_frags = 1; /* don't send broken assembly */
- }
- }
-
- priv->frag_len += msdu_size;
- priv->frag_no++;
-
- if (!more_frags) { /* last one */
- eth_broadcast_addr(priv->frag_source);
- if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
- priv->dev->stats.rx_dropped++;
- } else {
- skb_reserve(skb, 2);
- skb_put_data(skb, priv->rx_buf,
- priv->frag_len + 12);
- skb->protocol = eth_type_trans(skb, priv->dev);
- skb->ip_summed = CHECKSUM_NONE;
- netif_rx(skb);
- priv->dev->stats.rx_bytes += priv->frag_len + 12;
- priv->dev->stats.rx_packets++;
- }
- }
- } else
- priv->wstats.discard.fragment++;
-}
-
-static void rx_done_irq(struct atmel_private *priv)
-{
- int i;
- struct ieee80211_hdr header;
-
- for (i = 0;
- atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
- i < priv->host_info.rx_desc_count;
- i++) {
-
- u16 msdu_size, rx_packet_loc, frame_ctl, seq_control;
- u8 status = atmel_rmem8(priv, atmel_rx(priv, RX_DESC_STATUS_OFFSET, priv->rx_desc_head));
- u32 crc = 0xffffffff;
-
- if (status != RX_STATUS_SUCCESS) {
- if (status == 0xc1) /* determined by experiment */
- priv->wstats.discard.nwid++;
- else
- priv->dev->stats.rx_errors++;
- goto next;
- }
-
- msdu_size = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_SIZE_OFFSET, priv->rx_desc_head));
- rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head));
-
- if (msdu_size < 30) {
- priv->dev->stats.rx_errors++;
- goto next;
- }
-
- /* Get header as far as end of seq_ctrl */
- atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24);
- frame_ctl = le16_to_cpu(header.frame_control);
- seq_control = le16_to_cpu(header.seq_ctrl);
-
- /* probe for CRC use here if needed once five packets have
- arrived with the same crc status, we assume we know what's
- happening and stop probing */
- if (priv->probe_crc) {
- if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED)) {
- priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size);
- } else {
- priv->do_rx_crc = probe_crc(priv, rx_packet_loc + 24, msdu_size - 24);
- }
- if (priv->do_rx_crc) {
- if (priv->crc_ok_cnt++ > 5)
- priv->probe_crc = 0;
- } else {
- if (priv->crc_ko_cnt++ > 5)
- priv->probe_crc = 0;
- }
- }
-
- /* don't CRC header when WEP in use */
- if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED))) {
- crc = crc32_le(0xffffffff, (unsigned char *)&header, 24);
- }
- msdu_size -= 24; /* header */
-
- if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
- int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS;
- u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG;
- u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4;
-
- if (!more_fragments && packet_fragment_no == 0) {
- fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc);
- } else {
- frag_rx_path(priv, &header, msdu_size, rx_packet_loc, crc,
- packet_sequence_no, packet_fragment_no, more_fragments);
- }
- }
-
- if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
- /* copy rest of packet into buffer */
- atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
-
- /* we use the same buffer for frag reassembly and control packets */
- eth_broadcast_addr(priv->frag_source);
-
- if (priv->do_rx_crc) {
- /* last 4 octets is crc */
- msdu_size -= 4;
- crc = crc32_le(crc, (unsigned char *)&priv->rx_buf, msdu_size);
- if ((crc ^ 0xffffffff) != (*((u32 *)&priv->rx_buf[msdu_size]))) {
- priv->dev->stats.rx_crc_errors++;
- goto next;
- }
- }
-
- atmel_management_frame(priv, &header, msdu_size,
- atmel_rmem8(priv, atmel_rx(priv, RX_DESC_RSSI_OFFSET, priv->rx_desc_head)));
- }
-
-next:
- /* release descriptor */
- atmel_wmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head), RX_DESC_FLAG_CONSUMED);
-
- if (priv->rx_desc_head < (priv->host_info.rx_desc_count - 1))
- priv->rx_desc_head++;
- else
- priv->rx_desc_head = 0;
- }
-}
-
-static irqreturn_t service_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct atmel_private *priv = netdev_priv(dev);
- u8 isr;
- int i = -1;
- static const u8 irq_order[] = {
- ISR_OUT_OF_RANGE,
- ISR_RxCOMPLETE,
- ISR_TxCOMPLETE,
- ISR_RxFRAMELOST,
- ISR_FATAL_ERROR,
- ISR_COMMAND_COMPLETE,
- ISR_IBSS_MERGE,
- ISR_GENERIC_IRQ
- };
-
- if (priv->card && priv->present_callback &&
- !(*priv->present_callback)(priv->card))
- return IRQ_HANDLED;
-
- /* In this state upper-level code assumes it can mess with
- the card unhampered by interrupts which may change register state.
- Note that even though the card shouldn't generate interrupts
- the inturrupt line may be shared. This allows card setup
- to go on without disabling interrupts for a long time. */
- if (priv->station_state == STATION_STATE_DOWN)
- return IRQ_NONE;
-
- atmel_clear_gcr(dev, GCR_ENINT); /* disable interrupts */
-
- while (1) {
- if (!atmel_lock_mac(priv)) {
- /* failed to contact card */
- printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
- return IRQ_HANDLED;
- }
-
- isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
- atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
-
- if (!isr) {
- atmel_set_gcr(dev, GCR_ENINT); /* enable interrupts */
- return i == -1 ? IRQ_NONE : IRQ_HANDLED;
- }
-
- atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */
-
- for (i = 0; i < ARRAY_SIZE(irq_order); i++)
- if (isr & irq_order[i])
- break;
-
- if (!atmel_lock_mac(priv)) {
- /* failed to contact card */
- printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
- return IRQ_HANDLED;
- }
-
- isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
- isr ^= irq_order[i];
- atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET), isr);
- atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
-
- switch (irq_order[i]) {
-
- case ISR_OUT_OF_RANGE:
- if (priv->operating_mode == IW_MODE_INFRA &&
- priv->station_state == STATION_STATE_READY) {
- priv->station_is_associated = 0;
- atmel_scan(priv, 1);
- }
- break;
-
- case ISR_RxFRAMELOST:
- priv->wstats.discard.misc++;
- fallthrough;
- case ISR_RxCOMPLETE:
- rx_done_irq(priv);
- break;
-
- case ISR_TxCOMPLETE:
- tx_done_irq(priv);
- break;
-
- case ISR_FATAL_ERROR:
- printk(KERN_ALERT "%s: *** FATAL error interrupt ***\n", dev->name);
- atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
- break;
-
- case ISR_COMMAND_COMPLETE:
- atmel_command_irq(priv);
- break;
-
- case ISR_IBSS_MERGE:
- atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
- priv->CurrentBSSID, 6);
- /* The WPA stuff cares about the current AP address */
- if (priv->use_wpa)
- build_wpa_mib(priv);
- break;
- case ISR_GENERIC_IRQ:
- printk(KERN_INFO "%s: Generic_irq received.\n", dev->name);
- break;
- }
- }
-}
-
-static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev)
-{
- struct atmel_private *priv = netdev_priv(dev);
-
- /* update the link quality here in case we are seeing no beacons
- at all to drive the process */
- atmel_smooth_qual(priv);
-
- priv->wstats.status = priv->station_state;
-
- if (priv->operating_mode == IW_MODE_INFRA) {
- if (priv->station_state != STATION_STATE_READY) {
- priv->wstats.qual.qual = 0;
- priv->wstats.qual.level = 0;
- priv->wstats.qual.updated = (IW_QUAL_QUAL_INVALID
- | IW_QUAL_LEVEL_INVALID);
- }
- priv->wstats.qual.noise = 0;
- priv->wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
- } else {
- /* Quality levels cannot be determined in ad-hoc mode,
- because we can 'hear' more that one remote station. */
- priv->wstats.qual.qual = 0;
- priv->wstats.qual.level = 0;
- priv->wstats.qual.noise = 0;
- priv->wstats.qual.updated = IW_QUAL_QUAL_INVALID
- | IW_QUAL_LEVEL_INVALID
- | IW_QUAL_NOISE_INVALID;
- priv->wstats.miss.beacon = 0;
- }
-
- return &priv->wstats;
-}
-
-static int atmel_set_mac_address(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
- eth_hw_addr_set(dev, addr->sa_data);
- return atmel_open(dev);
-}
-
-EXPORT_SYMBOL(atmel_open);
-
-int atmel_open(struct net_device *dev)
-{
- struct atmel_private *priv = netdev_priv(dev);
- int i, channel, err;
-
- /* any scheduled timer is no longer needed and might screw things up.. */
- del_timer_sync(&priv->management_timer);
-
- /* Interrupts will not touch the card once in this state... */
- priv->station_state = STATION_STATE_DOWN;
-
- if (priv->new_SSID_size) {
- memcpy(priv->SSID, priv->new_SSID, priv->new_SSID_size);
- priv->SSID_size = priv->new_SSID_size;
- priv->new_SSID_size = 0;
- }
- priv->BSS_list_entries = 0;
-
- priv->AuthenticationRequestRetryCnt = 0;
- priv->AssociationRequestRetryCnt = 0;
- priv->ReAssociationRequestRetryCnt = 0;
- priv->CurrentAuthentTransactionSeqNum = 0x0001;
- priv->ExpectedAuthentTransactionSeqNum = 0x0002;
-
- priv->site_survey_state = SITE_SURVEY_IDLE;
- priv->station_is_associated = 0;
-
- err = reset_atmel_card(dev);
- if (err)
- return err;
-
- if (priv->config_reg_domain) {
- priv->reg_domain = priv->config_reg_domain;
- atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS, priv->reg_domain);
- } else {
- priv->reg_domain = atmel_get_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS);
- for (i = 0; i < ARRAY_SIZE(channel_table); i++)
- if (priv->reg_domain == channel_table[i].reg_domain)
- break;
- if (i == ARRAY_SIZE(channel_table)) {
- priv->reg_domain = REG_DOMAIN_MKK1;
- printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name);
- }
- }
-
- if ((channel = atmel_validate_channel(priv, priv->channel)))
- priv->channel = channel;
-
- /* this moves station_state on.... */
- atmel_scan(priv, 1);
-
- atmel_set_gcr(priv->dev, GCR_ENINT); /* enable interrupts */
- return 0;
-}
-
-static int atmel_close(struct net_device *dev)
-{
- struct atmel_private *priv = netdev_priv(dev);
-
- /* Send event to userspace that we are disassociating */
- if (priv->station_state == STATION_STATE_READY) {
- union iwreq_data wrqu;
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- eth_zero_addr(wrqu.ap_addr.sa_data);
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
- }
-
- atmel_enter_state(priv, STATION_STATE_DOWN);
-
- if (priv->bus_type == BUS_TYPE_PCCARD)
- atmel_write16(dev, GCR, 0x0060);
- atmel_write16(dev, GCR, 0x0040);
- return 0;
-}
-
-static int atmel_validate_channel(struct atmel_private *priv, int channel)
-{
- /* check that channel is OK, if so return zero,
- else return suitable default channel */
- int i;
-
- for (i = 0; i < ARRAY_SIZE(channel_table); i++)
- if (priv->reg_domain == channel_table[i].reg_domain) {
- if (channel >= channel_table[i].min &&
- channel <= channel_table[i].max)
- return 0;
- else
- return channel_table[i].min;
- }
- return 0;
-}
-
-#ifdef CONFIG_PROC_FS
-static int atmel_proc_show(struct seq_file *m, void *v)
-{
- struct atmel_private *priv = m->private;
- int i;
- char *s, *r, *c;
-
- seq_printf(m, "Driver version:\t\t%d.%d\n", DRIVER_MAJOR, DRIVER_MINOR);
-
- if (priv->station_state != STATION_STATE_DOWN) {
- seq_printf(m,
- "Firmware version:\t%d.%d build %d\n"
- "Firmware location:\t",
- priv->host_info.major_version,
- priv->host_info.minor_version,
- priv->host_info.build_version);
-
- if (priv->card_type != CARD_TYPE_EEPROM)
- seq_puts(m, "on card\n");
- else if (priv->firmware)
- seq_printf(m, "%s loaded by host\n", priv->firmware_id);
- else
- seq_printf(m, "%s loaded by hotplug\n", priv->firmware_id);
-
- switch (priv->card_type) {
- case CARD_TYPE_PARALLEL_FLASH:
- c = "Parallel flash";
- break;
- case CARD_TYPE_SPI_FLASH:
- c = "SPI flash\n";
- break;
- case CARD_TYPE_EEPROM:
- c = "EEPROM";
- break;
- default:
- c = "<unknown>";
- }
-
- r = "<unknown>";
- for (i = 0; i < ARRAY_SIZE(channel_table); i++)
- if (priv->reg_domain == channel_table[i].reg_domain)
- r = channel_table[i].name;
-
- seq_printf(m, "MAC memory type:\t%s\n", c);
- seq_printf(m, "Regulatory domain:\t%s\n", r);
- seq_printf(m, "Host CRC checking:\t%s\n",
- priv->do_rx_crc ? "On" : "Off");
- seq_printf(m, "WPA-capable firmware:\t%s\n",
- priv->use_wpa ? "Yes" : "No");
- }
-
- switch (priv->station_state) {
- case STATION_STATE_SCANNING:
- s = "Scanning";
- break;
- case STATION_STATE_JOINNING:
- s = "Joining";
- break;
- case STATION_STATE_AUTHENTICATING:
- s = "Authenticating";
- break;
- case STATION_STATE_ASSOCIATING:
- s = "Associating";
- break;
- case STATION_STATE_READY:
- s = "Ready";
- break;
- case STATION_STATE_REASSOCIATING:
- s = "Reassociating";
- break;
- case STATION_STATE_MGMT_ERROR:
- s = "Management error";
- break;
- case STATION_STATE_DOWN:
- s = "Down";
- break;
- default:
- s = "<unknown>";
- }
-
- seq_printf(m, "Current state:\t\t%s\n", s);
- return 0;
-}
-#endif
-
-static const struct net_device_ops atmel_netdev_ops = {
- .ndo_open = atmel_open,
- .ndo_stop = atmel_close,
- .ndo_set_mac_address = atmel_set_mac_address,
- .ndo_start_xmit = start_tx,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
- const AtmelFWType fw_type,
- struct device *sys_dev,
- int (*card_present)(void *), void *card)
-{
- struct net_device *dev;
- struct atmel_private *priv;
- int rc;
-
- /* Create the network device object. */
- dev = alloc_etherdev(sizeof(*priv));
- if (!dev)
- return NULL;
-
- if (dev_alloc_name(dev, dev->name) < 0) {
- printk(KERN_ERR "atmel: Couldn't get name!\n");
- goto err_out_free;
- }
-
- priv = netdev_priv(dev);
- priv->dev = dev;
- priv->sys_dev = sys_dev;
- priv->present_callback = card_present;
- priv->card = card;
- priv->firmware = NULL;
- priv->firmware_type = fw_type;
- if (firmware) /* module parameter */
- strscpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
- priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
- priv->station_state = STATION_STATE_DOWN;
- priv->do_rx_crc = 0;
- /* For PCMCIA cards, some chips need CRC, some don't
- so we have to probe. */
- if (priv->bus_type == BUS_TYPE_PCCARD) {
- priv->probe_crc = 1;
- priv->crc_ok_cnt = priv->crc_ko_cnt = 0;
- } else
- priv->probe_crc = 0;
- priv->last_qual = jiffies;
- priv->last_beacon_timestamp = 0;
- memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
- eth_zero_addr(priv->BSSID);
- priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
- priv->station_was_associated = 0;
-
- priv->last_survey = jiffies;
- priv->preamble = LONG_PREAMBLE;
- priv->operating_mode = IW_MODE_INFRA;
- priv->connect_to_any_BSS = 0;
- priv->config_reg_domain = 0;
- priv->reg_domain = 0;
- priv->tx_rate = 3;
- priv->auto_tx_rate = 1;
- priv->channel = 4;
- priv->power_mode = 0;
- priv->SSID[0] = '\0';
- priv->SSID_size = 0;
- priv->new_SSID_size = 0;
- priv->frag_threshold = 2346;
- priv->rts_threshold = 2347;
- priv->short_retry = 7;
- priv->long_retry = 4;
-
- priv->wep_is_on = 0;
- priv->default_key = 0;
- priv->encryption_level = 0;
- priv->exclude_unencrypted = 0;
- priv->group_cipher_suite = priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
- priv->use_wpa = 0;
- memset(priv->wep_keys, 0, sizeof(priv->wep_keys));
- memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
-
- priv->default_beacon_period = priv->beacon_period = 100;
- priv->listen_interval = 1;
-
- timer_setup(&priv->management_timer, atmel_management_timer, 0);
- spin_lock_init(&priv->irqlock);
- spin_lock_init(&priv->timerlock);
-
- dev->netdev_ops = &atmel_netdev_ops;
- dev->wireless_handlers = &atmel_handler_def;
- dev->irq = irq;
- dev->base_addr = port;
-
- /* MTU range: 68 - 2312 */
- dev->min_mtu = 68;
- dev->max_mtu = MAX_WIRELESS_BODY - ETH_FCS_LEN;
-
- SET_NETDEV_DEV(dev, sys_dev);
-
- if ((rc = request_irq(dev->irq, service_interrupt, IRQF_SHARED, dev->name, dev))) {
- printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc);
- goto err_out_free;
- }
-
- if (!request_region(dev->base_addr, 32,
- priv->bus_type == BUS_TYPE_PCCARD ? "atmel_cs" : "atmel_pci")) {
- goto err_out_irq;
- }
-
- if (register_netdev(dev))
- goto err_out_res;
-
- if (!probe_atmel_card(dev)) {
- unregister_netdev(dev);
- goto err_out_res;
- }
-
- netif_carrier_off(dev);
-
- if (!proc_create_single_data("driver/atmel", 0, NULL, atmel_proc_show,
- priv))
- printk(KERN_WARNING "atmel: unable to create /proc entry.\n");
-
- printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n",
- dev->name, DRIVER_MAJOR, DRIVER_MINOR, dev->dev_addr);
-
- return dev;
-
-err_out_res:
- release_region(dev->base_addr, 32);
-err_out_irq:
- free_irq(dev->irq, dev);
-err_out_free:
- free_netdev(dev);
- return NULL;
-}
-
-EXPORT_SYMBOL(init_atmel_card);
-
-void stop_atmel_card(struct net_device *dev)
-{
- struct atmel_private *priv = netdev_priv(dev);
-
- /* put a brick on it... */
- if (priv->bus_type == BUS_TYPE_PCCARD)
- atmel_write16(dev, GCR, 0x0060);
- atmel_write16(dev, GCR, 0x0040);
-
- del_timer_sync(&priv->management_timer);
- unregister_netdev(dev);
- remove_proc_entry("driver/atmel", NULL);
- free_irq(dev->irq, dev);
- kfree(priv->firmware);
- release_region(dev->base_addr, 32);
- free_netdev(dev);
-}
-
-EXPORT_SYMBOL(stop_atmel_card);
-
-static int atmel_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->essid;
- struct atmel_private *priv = netdev_priv(dev);
-
- /* Check if we asked for `any' */
- if (dwrq->flags == 0) {
- priv->connect_to_any_BSS = 1;
- } else {
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
-
- priv->connect_to_any_BSS = 0;
-
- /* Check the size of the string */
- if (dwrq->length > MAX_SSID_LENGTH)
- return -E2BIG;
- if (index != 0)
- return -EINVAL;
-
- memcpy(priv->new_SSID, extra, dwrq->length);
- priv->new_SSID_size = dwrq->length;
- }
-
- return -EINPROGRESS;
-}
-
-static int atmel_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->essid;
- struct atmel_private *priv = netdev_priv(dev);
-
- /* Get the current SSID */
- if (priv->new_SSID_size != 0) {
- memcpy(extra, priv->new_SSID, priv->new_SSID_size);
- dwrq->length = priv->new_SSID_size;
- } else {
- memcpy(extra, priv->SSID, priv->SSID_size);
- dwrq->length = priv->SSID_size;
- }
-
- dwrq->flags = !priv->connect_to_any_BSS; /* active */
-
- return 0;
-}
-
-static int atmel_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct sockaddr *awrq = &wrqu->ap_addr;
- struct atmel_private *priv = netdev_priv(dev);
- memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
- awrq->sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-static int atmel_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->encoding;
- struct atmel_private *priv = netdev_priv(dev);
-
- /* Basic checking: do we have a key to set ?
- * Note : with the new API, it's impossible to get a NULL pointer.
- * Therefore, we need to check a key size == 0 instead.
- * New version of iwconfig properly set the IW_ENCODE_NOKEY flag
- * when no key is present (only change flags), but older versions
- * don't do it. - Jean II */
- if (dwrq->length > 0) {
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- int current_index = priv->default_key;
- /* Check the size of the key */
- if (dwrq->length > 13) {
- return -EINVAL;
- }
- /* Check the index (none -> use current) */
- if (index < 0 || index >= 4)
- index = current_index;
- else
- priv->default_key = index;
- /* Set the length */
- if (dwrq->length > 5)
- priv->wep_key_len[index] = 13;
- else
- if (dwrq->length > 0)
- priv->wep_key_len[index] = 5;
- else
- /* Disable the key */
- priv->wep_key_len[index] = 0;
- /* Check if the key is not marked as invalid */
- if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
- /* Cleanup */
- memset(priv->wep_keys[index], 0, 13);
- /* Copy the key in the driver */
- memcpy(priv->wep_keys[index], extra, dwrq->length);
- }
- /* WE specify that if a valid key is set, encryption
- * should be enabled (user may turn it off later)
- * This is also how "iwconfig ethX key on" works */
- if (index == current_index &&
- priv->wep_key_len[index] > 0) {
- priv->wep_is_on = 1;
- priv->exclude_unencrypted = 1;
- if (priv->wep_key_len[index] > 5) {
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
- priv->encryption_level = 2;
- } else {
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
- priv->encryption_level = 1;
- }
- }
- } else {
- /* Do we want to just set the transmit key index ? */
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- if (index >= 0 && index < 4) {
- priv->default_key = index;
- } else
- /* Don't complain if only change the mode */
- if (!(dwrq->flags & IW_ENCODE_MODE))
- return -EINVAL;
- }
- /* Read the flags */
- if (dwrq->flags & IW_ENCODE_DISABLED) {
- priv->wep_is_on = 0;
- priv->encryption_level = 0;
- priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
- } else {
- priv->wep_is_on = 1;
- if (priv->wep_key_len[priv->default_key] > 5) {
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
- priv->encryption_level = 2;
- } else {
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
- priv->encryption_level = 1;
- }
- }
- if (dwrq->flags & IW_ENCODE_RESTRICTED)
- priv->exclude_unencrypted = 1;
- if (dwrq->flags & IW_ENCODE_OPEN)
- priv->exclude_unencrypted = 0;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int atmel_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->encoding;
- struct atmel_private *priv = netdev_priv(dev);
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
-
- if (!priv->wep_is_on)
- dwrq->flags = IW_ENCODE_DISABLED;
- else {
- if (priv->exclude_unencrypted)
- dwrq->flags = IW_ENCODE_RESTRICTED;
- else
- dwrq->flags = IW_ENCODE_OPEN;
- }
- /* Which key do we want ? -1 -> tx index */
- if (index < 0 || index >= 4)
- index = priv->default_key;
- dwrq->flags |= index + 1;
- /* Copy the key to the user buffer */
- dwrq->length = priv->wep_key_len[index];
- if (dwrq->length > 16) {
- dwrq->length = 0;
- } else {
- memset(extra, 0, 16);
- memcpy(extra, priv->wep_keys[index], dwrq->length);
- }
-
- return 0;
-}
-
-static int atmel_set_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct atmel_private *priv = netdev_priv(dev);
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int idx, key_len, alg = ext->alg, set_key = 1;
-
- /* Determine and validate the key index */
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (idx < 1 || idx > 4)
- return -EINVAL;
- idx--;
- } else
- idx = priv->default_key;
-
- if (encoding->flags & IW_ENCODE_DISABLED)
- alg = IW_ENCODE_ALG_NONE;
-
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- priv->default_key = idx;
- set_key = ext->key_len > 0 ? 1 : 0;
- }
-
- if (set_key) {
- /* Set the requested key first */
- switch (alg) {
- case IW_ENCODE_ALG_NONE:
- priv->wep_is_on = 0;
- priv->encryption_level = 0;
- priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
- break;
- case IW_ENCODE_ALG_WEP:
- if (ext->key_len > 5) {
- priv->wep_key_len[idx] = 13;
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
- priv->encryption_level = 2;
- } else if (ext->key_len > 0) {
- priv->wep_key_len[idx] = 5;
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
- priv->encryption_level = 1;
- } else {
- return -EINVAL;
- }
- priv->wep_is_on = 1;
- memset(priv->wep_keys[idx], 0, 13);
- key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
- memcpy(priv->wep_keys[idx], ext->key, key_len);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return -EINPROGRESS;
-}
-
-static int atmel_get_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct atmel_private *priv = netdev_priv(dev);
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int idx, max_key_len;
-
- max_key_len = encoding->length - sizeof(*ext);
- if (max_key_len < 0)
- return -EINVAL;
-
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (idx < 1 || idx > 4)
- return -EINVAL;
- idx--;
- } else
- idx = priv->default_key;
-
- encoding->flags = idx + 1;
- memset(ext, 0, sizeof(*ext));
-
- if (!priv->wep_is_on) {
- ext->alg = IW_ENCODE_ALG_NONE;
- ext->key_len = 0;
- encoding->flags |= IW_ENCODE_DISABLED;
- } else {
- if (priv->encryption_level > 0)
- ext->alg = IW_ENCODE_ALG_WEP;
- else
- return -EINVAL;
-
- ext->key_len = priv->wep_key_len[idx];
- memcpy(ext->key, priv->wep_keys[idx], ext->key_len);
- encoding->flags |= IW_ENCODE_ENABLED;
- }
-
- return 0;
-}
-
-static int atmel_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct atmel_private *priv = netdev_priv(dev);
- struct iw_param *param = &wrqu->param;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_KEY_MGMT:
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- case IW_AUTH_PRIVACY_INVOKED:
- /*
- * atmel does not use these parameters
- */
- break;
-
- case IW_AUTH_DROP_UNENCRYPTED:
- priv->exclude_unencrypted = param->value ? 1 : 0;
- break;
-
- case IW_AUTH_80211_AUTH_ALG: {
- if (param->value & IW_AUTH_ALG_SHARED_KEY) {
- priv->exclude_unencrypted = 1;
- } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
- priv->exclude_unencrypted = 0;
- } else
- return -EINVAL;
- break;
- }
-
- case IW_AUTH_WPA_ENABLED:
- /* Silently accept disable of WPA */
- if (param->value > 0)
- return -EOPNOTSUPP;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return -EINPROGRESS;
-}
-
-static int atmel_get_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct atmel_private *priv = netdev_priv(dev);
- struct iw_param *param = &wrqu->param;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_DROP_UNENCRYPTED:
- param->value = priv->exclude_unencrypted;
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- if (priv->exclude_unencrypted == 1)
- param->value = IW_AUTH_ALG_SHARED_KEY;
- else
- param->value = IW_AUTH_ALG_OPEN_SYSTEM;
- break;
-
- case IW_AUTH_WPA_ENABLED:
- param->value = 0;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-
-static int atmel_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- strcpy(wrqu->name, "IEEE 802.11-DS");
- return 0;
-}
-
-static int atmel_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->bitrate;
- struct atmel_private *priv = netdev_priv(dev);
-
- if (vwrq->fixed == 0) {
- priv->tx_rate = 3;
- priv->auto_tx_rate = 1;
- } else {
- priv->auto_tx_rate = 0;
-
- /* Which type of value ? */
- if ((vwrq->value < 4) && (vwrq->value >= 0)) {
- /* Setting by rate index */
- priv->tx_rate = vwrq->value;
- } else {
- /* Setting by frequency value */
- switch (vwrq->value) {
- case 1000000:
- priv->tx_rate = 0;
- break;
- case 2000000:
- priv->tx_rate = 1;
- break;
- case 5500000:
- priv->tx_rate = 2;
- break;
- case 11000000:
- priv->tx_rate = 3;
- break;
- default:
- return -EINVAL;
- }
- }
- }
-
- return -EINPROGRESS;
-}
-
-static int atmel_set_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- __u32 *uwrq = &wrqu->mode;
- struct atmel_private *priv = netdev_priv(dev);
-
- if (*uwrq != IW_MODE_ADHOC && *uwrq != IW_MODE_INFRA)
- return -EINVAL;
-
- priv->operating_mode = *uwrq;
- return -EINPROGRESS;
-}
-
-static int atmel_get_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- __u32 *uwrq = &wrqu->mode;
- struct atmel_private *priv = netdev_priv(dev);
-
- *uwrq = priv->operating_mode;
- return 0;
-}
-
-static int atmel_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->bitrate;
- struct atmel_private *priv = netdev_priv(dev);
-
- if (priv->auto_tx_rate) {
- vwrq->fixed = 0;
- vwrq->value = 11000000;
- } else {
- vwrq->fixed = 1;
- switch (priv->tx_rate) {
- case 0:
- vwrq->value = 1000000;
- break;
- case 1:
- vwrq->value = 2000000;
- break;
- case 2:
- vwrq->value = 5500000;
- break;
- case 3:
- vwrq->value = 11000000;
- break;
- }
- }
- return 0;
-}
-
-static int atmel_set_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->power;
- struct atmel_private *priv = netdev_priv(dev);
- priv->power_mode = vwrq->disabled ? 0 : 1;
- return -EINPROGRESS;
-}
-
-static int atmel_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->power;
- struct atmel_private *priv = netdev_priv(dev);
- vwrq->disabled = priv->power_mode ? 0 : 1;
- vwrq->flags = IW_POWER_ON;
- return 0;
-}
-
-static int atmel_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->retry;
- struct atmel_private *priv = netdev_priv(dev);
-
- if (!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) {
- if (vwrq->flags & IW_RETRY_LONG)
- priv->long_retry = vwrq->value;
- else if (vwrq->flags & IW_RETRY_SHORT)
- priv->short_retry = vwrq->value;
- else {
- /* No modifier : set both */
- priv->long_retry = vwrq->value;
- priv->short_retry = vwrq->value;
- }
- return -EINPROGRESS;
- }
-
- return -EINVAL;
-}
-
-static int atmel_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->retry;
- struct atmel_private *priv = netdev_priv(dev);
-
- vwrq->disabled = 0; /* Can't be disabled */
-
- /* Note : by default, display the short retry number */
- if (vwrq->flags & IW_RETRY_LONG) {
- vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- vwrq->value = priv->long_retry;
- } else {
- vwrq->flags = IW_RETRY_LIMIT;
- vwrq->value = priv->short_retry;
- if (priv->long_retry != priv->short_retry)
- vwrq->flags |= IW_RETRY_SHORT;
- }
-
- return 0;
-}
-
-static int atmel_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->rts;
- struct atmel_private *priv = netdev_priv(dev);
- int rthr = vwrq->value;
-
- if (vwrq->disabled)
- rthr = 2347;
- if ((rthr < 0) || (rthr > 2347)) {
- return -EINVAL;
- }
- priv->rts_threshold = rthr;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int atmel_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->rts;
- struct atmel_private *priv = netdev_priv(dev);
-
- vwrq->value = priv->rts_threshold;
- vwrq->disabled = (vwrq->value >= 2347);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-static int atmel_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->frag;
- struct atmel_private *priv = netdev_priv(dev);
- int fthr = vwrq->value;
-
- if (vwrq->disabled)
- fthr = 2346;
- if ((fthr < 256) || (fthr > 2346)) {
- return -EINVAL;
- }
- fthr &= ~0x1; /* Get an even value - is it really needed ??? */
- priv->frag_threshold = fthr;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int atmel_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->frag;
- struct atmel_private *priv = netdev_priv(dev);
-
- vwrq->value = priv->frag_threshold;
- vwrq->disabled = (vwrq->value >= 2346);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-static int atmel_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_freq *fwrq = &wrqu->freq;
- struct atmel_private *priv = netdev_priv(dev);
- int rc = -EINPROGRESS; /* Call commit handler */
-
- /* If setting by frequency, convert to a channel */
- if (fwrq->e == 1) {
- int f = fwrq->m / 100000;
-
- /* Hack to fall through... */
- fwrq->e = 0;
- fwrq->m = ieee80211_frequency_to_channel(f);
- }
- /* Setting by channel number */
- if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
- rc = -EOPNOTSUPP;
- else {
- int channel = fwrq->m;
- if (atmel_validate_channel(priv, channel) == 0) {
- priv->channel = channel;
- } else {
- rc = -EINVAL;
- }
- }
- return rc;
-}
-
-static int atmel_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_freq *fwrq = &wrqu->freq;
- struct atmel_private *priv = netdev_priv(dev);
-
- fwrq->m = priv->channel;
- fwrq->e = 0;
- return 0;
-}
-
-static int atmel_set_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *dwrq,
- char *extra)
-{
- struct atmel_private *priv = netdev_priv(dev);
- unsigned long flags;
-
- /* Note : you may have realised that, as this is a SET operation,
- * this is privileged and therefore a normal user can't
- * perform scanning.
- * This is not an error, while the device perform scanning,
- * traffic doesn't flow, so it's a perfect DoS...
- * Jean II */
-
- if (priv->station_state == STATION_STATE_DOWN)
- return -EAGAIN;
-
- /* Timeout old surveys. */
- if (time_after(jiffies, priv->last_survey + 20 * HZ))
- priv->site_survey_state = SITE_SURVEY_IDLE;
- priv->last_survey = jiffies;
-
- /* Initiate a scan command */
- if (priv->site_survey_state == SITE_SURVEY_IN_PROGRESS)
- return -EBUSY;
-
- del_timer_sync(&priv->management_timer);
- spin_lock_irqsave(&priv->irqlock, flags);
-
- priv->site_survey_state = SITE_SURVEY_IN_PROGRESS;
- priv->fast_scan = 0;
- atmel_scan(priv, 0);
- spin_unlock_irqrestore(&priv->irqlock, flags);
-
- return 0;
-}
-
-static int atmel_get_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->data;
- struct atmel_private *priv = netdev_priv(dev);
- int i;
- char *current_ev = extra;
- struct iw_event iwe;
-
- if (priv->site_survey_state != SITE_SURVEY_COMPLETED)
- return -EAGAIN;
-
- for (i = 0; i < priv->BSS_list_entries; i++) {
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
- current_ev = iwe_stream_add_event(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, IW_EV_ADDR_LEN);
-
- iwe.u.data.length = priv->BSSinfo[i].SSIDsize;
- if (iwe.u.data.length > 32)
- iwe.u.data.length = 32;
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, priv->BSSinfo[i].SSID);
-
- iwe.cmd = SIOCGIWMODE;
- iwe.u.mode = priv->BSSinfo[i].BSStype;
- current_ev = iwe_stream_add_event(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, IW_EV_UINT_LEN);
-
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = priv->BSSinfo[i].channel;
- iwe.u.freq.e = 0;
- current_ev = iwe_stream_add_event(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, IW_EV_FREQ_LEN);
-
- /* Add quality statistics */
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.level = priv->BSSinfo[i].RSSI;
- iwe.u.qual.qual = iwe.u.qual.level;
- /* iwe.u.qual.noise = SOMETHING */
- current_ev = iwe_stream_add_event(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, IW_EV_QUAL_LEN);
-
-
- iwe.cmd = SIOCGIWENCODE;
- if (priv->BSSinfo[i].UsingWEP)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- current_ev = iwe_stream_add_point(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, NULL);
- }
-
- /* Length of data */
- dwrq->length = (current_ev - extra);
- dwrq->flags = 0;
-
- return 0;
-}
-
-static int atmel_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->data;
- struct atmel_private *priv = netdev_priv(dev);
- struct iw_range *range = (struct iw_range *) extra;
- int k, i, j;
-
- dwrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(struct iw_range));
- range->min_nwid = 0x0000;
- range->max_nwid = 0x0000;
- range->num_channels = 0;
- for (j = 0; j < ARRAY_SIZE(channel_table); j++)
- if (priv->reg_domain == channel_table[j].reg_domain) {
- range->num_channels = channel_table[j].max - channel_table[j].min + 1;
- break;
- }
- if (range->num_channels != 0) {
- for (k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) {
- range->freq[k].i = i; /* List index */
-
- /* Values in MHz -> * 10^5 * 10 */
- range->freq[k].m = 100000 *
- ieee80211_channel_to_frequency(i, NL80211_BAND_2GHZ);
- range->freq[k++].e = 1;
- }
- range->num_frequency = k;
- }
-
- range->max_qual.qual = 100;
- range->max_qual.level = 100;
- range->max_qual.noise = 0;
- range->max_qual.updated = IW_QUAL_NOISE_INVALID;
-
- range->avg_qual.qual = 50;
- range->avg_qual.level = 50;
- range->avg_qual.noise = 0;
- range->avg_qual.updated = IW_QUAL_NOISE_INVALID;
-
- range->sensitivity = 0;
-
- range->bitrate[0] = 1000000;
- range->bitrate[1] = 2000000;
- range->bitrate[2] = 5500000;
- range->bitrate[3] = 11000000;
- range->num_bitrates = 4;
-
- range->min_rts = 0;
- range->max_rts = 2347;
- range->min_frag = 256;
- range->max_frag = 2346;
-
- range->encoding_size[0] = 5;
- range->encoding_size[1] = 13;
- range->num_encoding_sizes = 2;
- range->max_encoding_tokens = 4;
-
- range->pmp_flags = IW_POWER_ON;
- range->pmt_flags = IW_POWER_ON;
- range->pm_capa = 0;
-
- range->we_version_source = WIRELESS_EXT;
- range->we_version_compiled = WIRELESS_EXT;
- range->retry_capa = IW_RETRY_LIMIT ;
- range->retry_flags = IW_RETRY_LIMIT;
- range->r_time_flags = 0;
- range->min_retry = 1;
- range->max_retry = 65535;
-
- return 0;
-}
-
-static int atmel_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct sockaddr *awrq = &wrqu->ap_addr;
- struct atmel_private *priv = netdev_priv(dev);
- int i;
- static const u8 any[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
- static const u8 off[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
- unsigned long flags;
-
- if (awrq->sa_family != ARPHRD_ETHER)
- return -EINVAL;
-
- if (!memcmp(any, awrq->sa_data, 6) ||
- !memcmp(off, awrq->sa_data, 6)) {
- del_timer_sync(&priv->management_timer);
- spin_lock_irqsave(&priv->irqlock, flags);
- atmel_scan(priv, 1);
- spin_unlock_irqrestore(&priv->irqlock, flags);
- return 0;
- }
-
- for (i = 0; i < priv->BSS_list_entries; i++) {
- if (memcmp(priv->BSSinfo[i].BSSID, awrq->sa_data, 6) == 0) {
- if (!priv->wep_is_on && priv->BSSinfo[i].UsingWEP) {
- return -EINVAL;
- } else if (priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) {
- return -EINVAL;
- } else {
- del_timer_sync(&priv->management_timer);
- spin_lock_irqsave(&priv->irqlock, flags);
- atmel_join_bss(priv, i);
- spin_unlock_irqrestore(&priv->irqlock, flags);
- return 0;
- }
- }
- }
-
- return -EINVAL;
-}
-
-static int atmel_config_commit(struct net_device *dev,
- struct iw_request_info *info, /* NULL */
- union iwreq_data *zwrq, /* NULL */
- char *extra) /* NULL */
-{
- return atmel_open(dev);
-}
-
-static const iw_handler atmel_handler[] =
-{
- IW_HANDLER(SIOCSIWCOMMIT, atmel_config_commit),
- IW_HANDLER(SIOCGIWNAME, atmel_get_name),
- IW_HANDLER(SIOCSIWFREQ, atmel_set_freq),
- IW_HANDLER(SIOCGIWFREQ, atmel_get_freq),
- IW_HANDLER(SIOCSIWMODE, atmel_set_mode),
- IW_HANDLER(SIOCGIWMODE, atmel_get_mode),
- IW_HANDLER(SIOCGIWRANGE, atmel_get_range),
- IW_HANDLER(SIOCSIWAP, atmel_set_wap),
- IW_HANDLER(SIOCGIWAP, atmel_get_wap),
- IW_HANDLER(SIOCSIWSCAN, atmel_set_scan),
- IW_HANDLER(SIOCGIWSCAN, atmel_get_scan),
- IW_HANDLER(SIOCSIWESSID, atmel_set_essid),
- IW_HANDLER(SIOCGIWESSID, atmel_get_essid),
- IW_HANDLER(SIOCSIWRATE, atmel_set_rate),
- IW_HANDLER(SIOCGIWRATE, atmel_get_rate),
- IW_HANDLER(SIOCSIWRTS, atmel_set_rts),
- IW_HANDLER(SIOCGIWRTS, atmel_get_rts),
- IW_HANDLER(SIOCSIWFRAG, atmel_set_frag),
- IW_HANDLER(SIOCGIWFRAG, atmel_get_frag),
- IW_HANDLER(SIOCSIWRETRY, atmel_set_retry),
- IW_HANDLER(SIOCGIWRETRY, atmel_get_retry),
- IW_HANDLER(SIOCSIWENCODE, atmel_set_encode),
- IW_HANDLER(SIOCGIWENCODE, atmel_get_encode),
- IW_HANDLER(SIOCSIWPOWER, atmel_set_power),
- IW_HANDLER(SIOCGIWPOWER, atmel_get_power),
- IW_HANDLER(SIOCSIWAUTH, atmel_set_auth),
- IW_HANDLER(SIOCGIWAUTH, atmel_get_auth),
- IW_HANDLER(SIOCSIWENCODEEXT, atmel_set_encodeext),
- IW_HANDLER(SIOCGIWENCODEEXT, atmel_get_encodeext),
-};
-
-static const iw_handler atmel_private_handler[] =
-{
- NULL, /* SIOCIWFIRSTPRIV */
-};
-
-struct atmel_priv_ioctl {
- char id[32];
- unsigned char __user *data;
- unsigned short len;
-};
-
-#define ATMELFWL SIOCIWFIRSTPRIV
-#define ATMELIDIFC ATMELFWL + 1
-#define ATMELRD ATMELFWL + 2
-#define ATMELMAGIC 0x51807
-#define REGDOMAINSZ 20
-
-static const struct iw_priv_args atmel_private_args[] = {
- {
- .cmd = ATMELFWL,
- .set_args = IW_PRIV_TYPE_BYTE
- | IW_PRIV_SIZE_FIXED
- | sizeof(struct atmel_priv_ioctl),
- .get_args = IW_PRIV_TYPE_NONE,
- .name = "atmelfwl"
- }, {
- .cmd = ATMELIDIFC,
- .set_args = IW_PRIV_TYPE_NONE,
- .get_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- .name = "atmelidifc"
- }, {
- .cmd = ATMELRD,
- .set_args = IW_PRIV_TYPE_CHAR | REGDOMAINSZ,
- .get_args = IW_PRIV_TYPE_NONE,
- .name = "regdomain"
- },
-};
-
-static const struct iw_handler_def atmel_handler_def = {
- .num_standard = ARRAY_SIZE(atmel_handler),
- .num_private = ARRAY_SIZE(atmel_private_handler),
- .num_private_args = ARRAY_SIZE(atmel_private_args),
- .standard = atmel_handler,
- .private = atmel_private_handler,
- .private_args = (struct iw_priv_args *) atmel_private_args,
- .get_wireless_stats = atmel_get_wireless_stats
-};
-
-struct auth_body {
- __le16 alg;
- __le16 trans_seq;
- __le16 status;
- u8 el_id;
- u8 chall_text_len;
- u8 chall_text[253];
-};
-
-static void atmel_enter_state(struct atmel_private *priv, int new_state)
-{
- int old_state = priv->station_state;
-
- if (new_state == old_state)
- return;
-
- priv->station_state = new_state;
-
- if (new_state == STATION_STATE_READY) {
- netif_start_queue(priv->dev);
- netif_carrier_on(priv->dev);
- }
-
- if (old_state == STATION_STATE_READY) {
- netif_carrier_off(priv->dev);
- if (netif_running(priv->dev))
- netif_stop_queue(priv->dev);
- priv->last_beacon_timestamp = 0;
- }
-}
-
-static void atmel_scan(struct atmel_private *priv, int specific_ssid)
-{
- struct {
- u8 BSSID[ETH_ALEN];
- u8 SSID[MAX_SSID_LENGTH];
- u8 scan_type;
- u8 channel;
- __le16 BSS_type;
- __le16 min_channel_time;
- __le16 max_channel_time;
- u8 options;
- u8 SSID_size;
- } cmd;
-
- eth_broadcast_addr(cmd.BSSID);
-
- if (priv->fast_scan) {
- cmd.SSID_size = priv->SSID_size;
- memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- cmd.min_channel_time = cpu_to_le16(10);
- cmd.max_channel_time = cpu_to_le16(50);
- } else {
- priv->BSS_list_entries = 0;
- cmd.SSID_size = 0;
- cmd.min_channel_time = cpu_to_le16(10);
- cmd.max_channel_time = cpu_to_le16(120);
- }
-
- cmd.options = 0;
-
- if (!specific_ssid)
- cmd.options |= SCAN_OPTIONS_SITE_SURVEY;
-
- cmd.channel = (priv->channel & 0x7f);
- cmd.scan_type = SCAN_TYPE_ACTIVE;
- cmd.BSS_type = cpu_to_le16(priv->operating_mode == IW_MODE_ADHOC ?
- BSS_TYPE_AD_HOC : BSS_TYPE_INFRASTRUCTURE);
-
- atmel_send_command(priv, CMD_Scan, &cmd, sizeof(cmd));
-
- /* This must come after all hardware access to avoid being messed up
- by stuff happening in interrupt context after we leave STATE_DOWN */
- atmel_enter_state(priv, STATION_STATE_SCANNING);
-}
-
-static void join(struct atmel_private *priv, int type)
-{
- struct {
- u8 BSSID[6];
- u8 SSID[MAX_SSID_LENGTH];
- u8 BSS_type; /* this is a short in a scan command - weird */
- u8 channel;
- __le16 timeout;
- u8 SSID_size;
- u8 reserved;
- } cmd;
-
- cmd.SSID_size = priv->SSID_size;
- memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
- cmd.channel = (priv->channel & 0x7f);
- cmd.BSS_type = type;
- cmd.timeout = cpu_to_le16(2000);
-
- atmel_send_command(priv, CMD_Join, &cmd, sizeof(cmd));
-}
-
-static void start(struct atmel_private *priv, int type)
-{
- struct {
- u8 BSSID[6];
- u8 SSID[MAX_SSID_LENGTH];
- u8 BSS_type;
- u8 channel;
- u8 SSID_size;
- u8 reserved[3];
- } cmd;
-
- cmd.SSID_size = priv->SSID_size;
- memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
- memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
- cmd.BSS_type = type;
- cmd.channel = (priv->channel & 0x7f);
-
- atmel_send_command(priv, CMD_Start, &cmd, sizeof(cmd));
-}
-
-static void handle_beacon_probe(struct atmel_private *priv, u16 capability,
- u8 channel)
-{
- int rejoin = 0;
- int new = capability & WLAN_CAPABILITY_SHORT_PREAMBLE ?
- SHORT_PREAMBLE : LONG_PREAMBLE;
-
- if (priv->preamble != new) {
- priv->preamble = new;
- rejoin = 1;
- atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, new);
- }
-
- if (priv->channel != channel) {
- priv->channel = channel;
- rejoin = 1;
- atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_CHANNEL_POS, channel);
- }
-
- if (rejoin) {
- priv->station_is_associated = 0;
- atmel_enter_state(priv, STATION_STATE_JOINNING);
-
- if (priv->operating_mode == IW_MODE_INFRA)
- join(priv, BSS_TYPE_INFRASTRUCTURE);
- else
- join(priv, BSS_TYPE_AD_HOC);
- }
-}
-
-static void send_authentication_request(struct atmel_private *priv, u16 system,
- u8 *challenge, int challenge_len)
-{
- struct ieee80211_hdr header;
- struct auth_body auth;
-
- header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
- header.duration_id = cpu_to_le16(0x8000);
- header.seq_ctrl = 0;
- memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
- memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
- memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
-
- if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
- /* no WEP for authentication frames with TrSeqNo 1 */
- header.frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-
- auth.alg = cpu_to_le16(system);
-
- auth.status = 0;
- auth.trans_seq = cpu_to_le16(priv->CurrentAuthentTransactionSeqNum);
- priv->ExpectedAuthentTransactionSeqNum = priv->CurrentAuthentTransactionSeqNum+1;
- priv->CurrentAuthentTransactionSeqNum += 2;
-
- if (challenge_len != 0) {
- auth.el_id = 16; /* challenge_text */
- auth.chall_text_len = challenge_len;
- memcpy(auth.chall_text, challenge, challenge_len);
- atmel_transmit_management_frame(priv, &header, (u8 *)&auth, 8 + challenge_len);
- } else {
- atmel_transmit_management_frame(priv, &header, (u8 *)&auth, 6);
- }
-}
-
-static void send_association_request(struct atmel_private *priv, int is_reassoc)
-{
- u8 *ssid_el_p;
- int bodysize;
- struct ieee80211_hdr header;
- struct ass_req_format {
- __le16 capability;
- __le16 listen_interval;
- u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
- u8 ssid_el_id;
- u8 ssid_len;
- u8 ssid[MAX_SSID_LENGTH];
- u8 sup_rates_el_id;
- u8 sup_rates_len;
- u8 rates[4];
- } body;
-
- header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ));
- header.duration_id = cpu_to_le16(0x8000);
- header.seq_ctrl = 0;
-
- memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
- memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
- memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
-
- body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
- if (priv->wep_is_on)
- body.capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
- if (priv->preamble == SHORT_PREAMBLE)
- body.capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
-
- body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period);
-
- /* current AP address - only in reassoc frame */
- if (is_reassoc) {
- memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
- ssid_el_p = &body.ssid_el_id;
- bodysize = 18 + priv->SSID_size;
- } else {
- ssid_el_p = &body.ap[0];
- bodysize = 12 + priv->SSID_size;
- }
-
- ssid_el_p[0] = WLAN_EID_SSID;
- ssid_el_p[1] = priv->SSID_size;
- memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
- ssid_el_p[2 + priv->SSID_size] = WLAN_EID_SUPP_RATES;
- ssid_el_p[3 + priv->SSID_size] = 4; /* len of supported rates */
- memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4);
-
- atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
-}
-
-static int is_frame_from_current_bss(struct atmel_private *priv,
- struct ieee80211_hdr *header)
-{
- if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
- return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
- else
- return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0;
-}
-
-static int retrieve_bss(struct atmel_private *priv)
-{
- int i;
- int max_rssi = -128;
- int max_index = -1;
-
- if (priv->BSS_list_entries == 0)
- return -1;
-
- if (priv->connect_to_any_BSS) {
- /* Select a BSS with the max-RSSI but of the same type and of
- the same WEP mode and that it is not marked as 'bad' (i.e.
- we had previously failed to connect to this BSS with the
- settings that we currently use) */
- priv->current_BSS = 0;
- for (i = 0; i < priv->BSS_list_entries; i++) {
- if (priv->operating_mode == priv->BSSinfo[i].BSStype &&
- ((!priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) ||
- (priv->wep_is_on && priv->BSSinfo[i].UsingWEP)) &&
- !(priv->BSSinfo[i].channel & 0x80)) {
- max_rssi = priv->BSSinfo[i].RSSI;
- priv->current_BSS = max_index = i;
- }
- }
- return max_index;
- }
-
- for (i = 0; i < priv->BSS_list_entries; i++) {
- if (priv->SSID_size == priv->BSSinfo[i].SSIDsize &&
- memcmp(priv->SSID, priv->BSSinfo[i].SSID, priv->SSID_size) == 0 &&
- priv->operating_mode == priv->BSSinfo[i].BSStype &&
- atmel_validate_channel(priv, priv->BSSinfo[i].channel) == 0) {
- if (priv->BSSinfo[i].RSSI >= max_rssi) {
- max_rssi = priv->BSSinfo[i].RSSI;
- max_index = i;
- }
- }
- }
- return max_index;
-}
-
-static void store_bss_info(struct atmel_private *priv,
- struct ieee80211_hdr *header, u16 capability,
- u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len,
- u8 *ssid, int is_beacon)
-{
- u8 *bss = capability & WLAN_CAPABILITY_ESS ? header->addr2 : header->addr3;
- int i, index;
-
- for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
- if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
- index = i;
-
- /* If we process a probe and an entry from this BSS exists
- we will update the BSS entry with the info from this BSS.
- If we process a beacon we will only update RSSI */
-
- if (index == -1) {
- if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
- return;
- index = priv->BSS_list_entries++;
- memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
- priv->BSSinfo[index].RSSI = rssi;
- } else {
- if (rssi > priv->BSSinfo[index].RSSI)
- priv->BSSinfo[index].RSSI = rssi;
- if (is_beacon)
- return;
- }
-
- priv->BSSinfo[index].channel = channel;
- priv->BSSinfo[index].beacon_period = beacon_period;
- priv->BSSinfo[index].UsingWEP = capability & WLAN_CAPABILITY_PRIVACY;
- memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len);
- priv->BSSinfo[index].SSIDsize = ssid_len;
-
- if (capability & WLAN_CAPABILITY_IBSS)
- priv->BSSinfo[index].BSStype = IW_MODE_ADHOC;
- else if (capability & WLAN_CAPABILITY_ESS)
- priv->BSSinfo[index].BSStype = IW_MODE_INFRA;
-
- priv->BSSinfo[index].preamble = capability & WLAN_CAPABILITY_SHORT_PREAMBLE ?
- SHORT_PREAMBLE : LONG_PREAMBLE;
-}
-
-static void authenticate(struct atmel_private *priv, u16 frame_len)
-{
- struct auth_body *auth = (struct auth_body *)priv->rx_buf;
- u16 status = le16_to_cpu(auth->status);
- u16 trans_seq_no = le16_to_cpu(auth->trans_seq);
- u16 system = le16_to_cpu(auth->alg);
-
- if (status == WLAN_STATUS_SUCCESS && !priv->wep_is_on) {
- /* no WEP */
- if (priv->station_was_associated) {
- atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
- send_association_request(priv, 1);
- return;
- } else {
- atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
- send_association_request(priv, 0);
- return;
- }
- }
-
- if (status == WLAN_STATUS_SUCCESS && priv->wep_is_on) {
- int should_associate = 0;
- /* WEP */
- if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum)
- return;
-
- if (system == WLAN_AUTH_OPEN) {
- if (trans_seq_no == 0x0002) {
- should_associate = 1;
- }
- } else if (system == WLAN_AUTH_SHARED_KEY) {
- if (trans_seq_no == 0x0002 &&
- auth->el_id == WLAN_EID_CHALLENGE) {
- send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
- return;
- } else if (trans_seq_no == 0x0004) {
- should_associate = 1;
- }
- }
-
- if (should_associate) {
- if (priv->station_was_associated) {
- atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
- send_association_request(priv, 1);
- return;
- } else {
- atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
- send_association_request(priv, 0);
- return;
- }
- }
- }
-
- if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
- /* Flip back and forth between WEP auth modes until the max
- * authentication tries has been exceeded.
- */
- if (system == WLAN_AUTH_OPEN) {
- priv->CurrentAuthentTransactionSeqNum = 0x001;
- priv->exclude_unencrypted = 1;
- send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0);
- return;
- } else if (system == WLAN_AUTH_SHARED_KEY
- && priv->wep_is_on) {
- priv->CurrentAuthentTransactionSeqNum = 0x001;
- priv->exclude_unencrypted = 0;
- send_authentication_request(priv, WLAN_AUTH_OPEN, NULL, 0);
- return;
- } else if (priv->connect_to_any_BSS) {
- int bss_index;
-
- priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
-
- if ((bss_index = retrieve_bss(priv)) != -1) {
- atmel_join_bss(priv, bss_index);
- return;
- }
- }
- }
-
- priv->AuthenticationRequestRetryCnt = 0;
- atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
- priv->station_is_associated = 0;
-}
-
-static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
-{
- struct ass_resp_format {
- __le16 capability;
- __le16 status;
- __le16 ass_id;
- u8 el_id;
- u8 length;
- u8 rates[4];
- } *ass_resp = (struct ass_resp_format *)priv->rx_buf;
-
- u16 status = le16_to_cpu(ass_resp->status);
- u16 ass_id = le16_to_cpu(ass_resp->ass_id);
- u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length;
-
- union iwreq_data wrqu;
-
- if (frame_len < 8 + rates_len)
- return;
-
- if (status == WLAN_STATUS_SUCCESS) {
- if (subtype == IEEE80211_STYPE_ASSOC_RESP)
- priv->AssociationRequestRetryCnt = 0;
- else
- priv->ReAssociationRequestRetryCnt = 0;
-
- atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
- MAC_MGMT_MIB_STATION_ID_POS, ass_id & 0x3fff);
- atmel_set_mib(priv, Phy_Mib_Type,
- PHY_MIB_RATE_SET_POS, ass_resp->rates, rates_len);
- if (priv->power_mode == 0) {
- priv->listen_interval = 1;
- atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
- MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
- atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
- MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
- } else {
- priv->listen_interval = 2;
- atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
- MAC_MGMT_MIB_PS_MODE_POS, PS_MODE);
- atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
- MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 2);
- }
-
- priv->station_is_associated = 1;
- priv->station_was_associated = 1;
- atmel_enter_state(priv, STATION_STATE_READY);
-
- /* Send association event to userspace */
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- memcpy(wrqu.ap_addr.sa_data, priv->CurrentBSSID, ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
-
- return;
- }
-
- if (subtype == IEEE80211_STYPE_ASSOC_RESP &&
- status != WLAN_STATUS_ASSOC_DENIED_RATES &&
- status != WLAN_STATUS_CAPS_UNSUPPORTED &&
- priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
- mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
- priv->AssociationRequestRetryCnt++;
- send_association_request(priv, 0);
- return;
- }
-
- if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
- status != WLAN_STATUS_ASSOC_DENIED_RATES &&
- status != WLAN_STATUS_CAPS_UNSUPPORTED &&
- priv->ReAssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
- mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
- priv->ReAssociationRequestRetryCnt++;
- send_association_request(priv, 1);
- return;
- }
-
- atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
- priv->station_is_associated = 0;
-
- if (priv->connect_to_any_BSS) {
- int bss_index;
- priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
-
- if ((bss_index = retrieve_bss(priv)) != -1)
- atmel_join_bss(priv, bss_index);
- }
-}
-
-static void atmel_join_bss(struct atmel_private *priv, int bss_index)
-{
- struct bss_info *bss = &priv->BSSinfo[bss_index];
-
- memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
- memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
-
- /* The WPA stuff cares about the current AP address */
- if (priv->use_wpa)
- build_wpa_mib(priv);
-
- /* When switching to AdHoc turn OFF Power Save if needed */
-
- if (bss->BSStype == IW_MODE_ADHOC &&
- priv->operating_mode != IW_MODE_ADHOC &&
- priv->power_mode) {
- priv->power_mode = 0;
- priv->listen_interval = 1;
- atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
- MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
- atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
- MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
- }
-
- priv->operating_mode = bss->BSStype;
- priv->channel = bss->channel & 0x7f;
- priv->beacon_period = bss->beacon_period;
-
- if (priv->preamble != bss->preamble) {
- priv->preamble = bss->preamble;
- atmel_set_mib8(priv, Local_Mib_Type,
- LOCAL_MIB_PREAMBLE_TYPE, bss->preamble);
- }
-
- if (!priv->wep_is_on && bss->UsingWEP) {
- atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
- priv->station_is_associated = 0;
- return;
- }
-
- if (priv->wep_is_on && !bss->UsingWEP) {
- atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
- priv->station_is_associated = 0;
- return;
- }
-
- atmel_enter_state(priv, STATION_STATE_JOINNING);
-
- if (priv->operating_mode == IW_MODE_INFRA)
- join(priv, BSS_TYPE_INFRASTRUCTURE);
- else
- join(priv, BSS_TYPE_AD_HOC);
-}
-
-static void restart_search(struct atmel_private *priv)
-{
- int bss_index;
-
- if (!priv->connect_to_any_BSS) {
- atmel_scan(priv, 1);
- } else {
- priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
-
- if ((bss_index = retrieve_bss(priv)) != -1)
- atmel_join_bss(priv, bss_index);
- else
- atmel_scan(priv, 0);
- }
-}
-
-static void smooth_rssi(struct atmel_private *priv, u8 rssi)
-{
- u8 old = priv->wstats.qual.level;
- u8 max_rssi = 42; /* 502-rmfd-revd max by experiment, default for now */
-
- switch (priv->firmware_type) {
- case ATMEL_FW_TYPE_502E:
- max_rssi = 63; /* 502-rmfd-reve max by experiment */
- break;
- default:
- break;
- }
-
- rssi = rssi * 100 / max_rssi;
- if ((rssi + old) % 2)
- priv->wstats.qual.level = (rssi + old) / 2 + 1;
- else
- priv->wstats.qual.level = (rssi + old) / 2;
- priv->wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
- priv->wstats.qual.updated &= ~IW_QUAL_LEVEL_INVALID;
-}
-
-static void atmel_smooth_qual(struct atmel_private *priv)
-{
- unsigned long time_diff = (jiffies - priv->last_qual) / HZ;
- while (time_diff--) {
- priv->last_qual += HZ;
- priv->wstats.qual.qual = priv->wstats.qual.qual / 2;
- priv->wstats.qual.qual +=
- priv->beacons_this_sec * priv->beacon_period * (priv->wstats.qual.level + 100) / 4000;
- priv->beacons_this_sec = 0;
- }
- priv->wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
- priv->wstats.qual.updated &= ~IW_QUAL_QUAL_INVALID;
-}
-
-/* deals with incoming management frames. */
-static void atmel_management_frame(struct atmel_private *priv,
- struct ieee80211_hdr *header,
- u16 frame_len, u8 rssi)
-{
- u16 subtype;
-
- subtype = le16_to_cpu(header->frame_control) & IEEE80211_FCTL_STYPE;
- switch (subtype) {
- case IEEE80211_STYPE_BEACON:
- case IEEE80211_STYPE_PROBE_RESP:
-
- /* beacon frame has multiple variable-length fields -
- never let an engineer loose with a data structure design. */
- {
- struct beacon_format {
- __le64 timestamp;
- __le16 interval;
- __le16 capability;
- u8 ssid_el_id;
- u8 ssid_length;
- /* ssid here */
- u8 rates_el_id;
- u8 rates_length;
- /* rates here */
- u8 ds_el_id;
- u8 ds_length;
- /* ds here */
- } *beacon = (struct beacon_format *)priv->rx_buf;
-
- u8 channel, rates_length, ssid_length;
- u64 timestamp = le64_to_cpu(beacon->timestamp);
- u16 beacon_interval = le16_to_cpu(beacon->interval);
- u16 capability = le16_to_cpu(beacon->capability);
- u8 *beaconp = priv->rx_buf;
- ssid_length = beacon->ssid_length;
- /* this blows chunks. */
- if (frame_len < 14 || frame_len < ssid_length + 15)
- return;
- rates_length = beaconp[beacon->ssid_length + 15];
- if (frame_len < ssid_length + rates_length + 18)
- return;
- if (ssid_length > MAX_SSID_LENGTH)
- return;
- channel = beaconp[ssid_length + rates_length + 18];
-
- if (priv->station_state == STATION_STATE_READY) {
- smooth_rssi(priv, rssi);
- if (is_frame_from_current_bss(priv, header)) {
- priv->beacons_this_sec++;
- atmel_smooth_qual(priv);
- if (priv->last_beacon_timestamp) {
- /* Note truncate this to 32 bits - kernel can't divide a long */
- u32 beacon_delay = timestamp - priv->last_beacon_timestamp;
- int beacons = beacon_delay / (beacon_interval * 1000);
- if (beacons > 1)
- priv->wstats.miss.beacon += beacons - 1;
- }
- priv->last_beacon_timestamp = timestamp;
- handle_beacon_probe(priv, capability, channel);
- }
- }
-
- if (priv->station_state == STATION_STATE_SCANNING)
- store_bss_info(priv, header, capability,
- beacon_interval, channel, rssi,
- ssid_length,
- &beacon->rates_el_id,
- subtype == IEEE80211_STYPE_BEACON);
- }
- break;
-
- case IEEE80211_STYPE_AUTH:
-
- if (priv->station_state == STATION_STATE_AUTHENTICATING)
- authenticate(priv, frame_len);
-
- break;
-
- case IEEE80211_STYPE_ASSOC_RESP:
- case IEEE80211_STYPE_REASSOC_RESP:
-
- if (priv->station_state == STATION_STATE_ASSOCIATING ||
- priv->station_state == STATION_STATE_REASSOCIATING)
- associate(priv, frame_len, subtype);
-
- break;
-
- case IEEE80211_STYPE_DISASSOC:
- if (priv->station_is_associated &&
- priv->operating_mode == IW_MODE_INFRA &&
- is_frame_from_current_bss(priv, header)) {
- priv->station_was_associated = 0;
- priv->station_is_associated = 0;
-
- atmel_enter_state(priv, STATION_STATE_JOINNING);
- join(priv, BSS_TYPE_INFRASTRUCTURE);
- }
-
- break;
-
- case IEEE80211_STYPE_DEAUTH:
- if (priv->operating_mode == IW_MODE_INFRA &&
- is_frame_from_current_bss(priv, header)) {
- priv->station_was_associated = 0;
-
- atmel_enter_state(priv, STATION_STATE_JOINNING);
- join(priv, BSS_TYPE_INFRASTRUCTURE);
- }
-
- break;
- }
-}
-
-/* run when timer expires */
-static void atmel_management_timer(struct timer_list *t)
-{
- struct atmel_private *priv = from_timer(priv, t, management_timer);
- unsigned long flags;
-
- /* Check if the card has been yanked. */
- if (priv->card && priv->present_callback &&
- !(*priv->present_callback)(priv->card))
- return;
-
- spin_lock_irqsave(&priv->irqlock, flags);
-
- switch (priv->station_state) {
-
- case STATION_STATE_AUTHENTICATING:
- if (priv->AuthenticationRequestRetryCnt >= MAX_AUTHENTICATION_RETRIES) {
- atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
- priv->station_is_associated = 0;
- priv->AuthenticationRequestRetryCnt = 0;
- restart_search(priv);
- } else {
- int auth = WLAN_AUTH_OPEN;
- priv->AuthenticationRequestRetryCnt++;
- priv->CurrentAuthentTransactionSeqNum = 0x0001;
- mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
- if (priv->wep_is_on && priv->exclude_unencrypted)
- auth = WLAN_AUTH_SHARED_KEY;
- send_authentication_request(priv, auth, NULL, 0);
- }
- break;
-
- case STATION_STATE_ASSOCIATING:
- if (priv->AssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
- atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
- priv->station_is_associated = 0;
- priv->AssociationRequestRetryCnt = 0;
- restart_search(priv);
- } else {
- priv->AssociationRequestRetryCnt++;
- mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
- send_association_request(priv, 0);
- }
- break;
-
- case STATION_STATE_REASSOCIATING:
- if (priv->ReAssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
- atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
- priv->station_is_associated = 0;
- priv->ReAssociationRequestRetryCnt = 0;
- restart_search(priv);
- } else {
- priv->ReAssociationRequestRetryCnt++;
- mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
- send_association_request(priv, 1);
- }
- break;
-
- default:
- break;
- }
-
- spin_unlock_irqrestore(&priv->irqlock, flags);
-}
-
-static void atmel_command_irq(struct atmel_private *priv)
-{
- u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
- u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET));
- int fast_scan;
- union iwreq_data wrqu;
-
- if (status == CMD_STATUS_IDLE ||
- status == CMD_STATUS_IN_PROGRESS)
- return;
-
- switch (command) {
- case CMD_Start:
- if (status == CMD_STATUS_COMPLETE) {
- priv->station_was_associated = priv->station_is_associated;
- atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
- (u8 *)priv->CurrentBSSID, 6);
- atmel_enter_state(priv, STATION_STATE_READY);
- }
- break;
-
- case CMD_Scan:
- fast_scan = priv->fast_scan;
- priv->fast_scan = 0;
-
- if (status != CMD_STATUS_COMPLETE) {
- atmel_scan(priv, 1);
- } else {
- int bss_index = retrieve_bss(priv);
- int notify_scan_complete = 1;
- if (bss_index != -1) {
- atmel_join_bss(priv, bss_index);
- } else if (priv->operating_mode == IW_MODE_ADHOC &&
- priv->SSID_size != 0) {
- start(priv, BSS_TYPE_AD_HOC);
- } else {
- priv->fast_scan = !fast_scan;
- atmel_scan(priv, 1);
- notify_scan_complete = 0;
- }
- priv->site_survey_state = SITE_SURVEY_COMPLETED;
- if (notify_scan_complete) {
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
- }
- }
- break;
-
- case CMD_SiteSurvey:
- priv->fast_scan = 0;
-
- if (status != CMD_STATUS_COMPLETE)
- return;
-
- priv->site_survey_state = SITE_SURVEY_COMPLETED;
- if (priv->station_is_associated) {
- atmel_enter_state(priv, STATION_STATE_READY);
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
- } else {
- atmel_scan(priv, 1);
- }
- break;
-
- case CMD_Join:
- if (status == CMD_STATUS_COMPLETE) {
- if (priv->operating_mode == IW_MODE_ADHOC) {
- priv->station_was_associated = priv->station_is_associated;
- atmel_enter_state(priv, STATION_STATE_READY);
- } else {
- int auth = WLAN_AUTH_OPEN;
- priv->AuthenticationRequestRetryCnt = 0;
- atmel_enter_state(priv, STATION_STATE_AUTHENTICATING);
-
- mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
- priv->CurrentAuthentTransactionSeqNum = 0x0001;
- if (priv->wep_is_on && priv->exclude_unencrypted)
- auth = WLAN_AUTH_SHARED_KEY;
- send_authentication_request(priv, auth, NULL, 0);
- }
- return;
- }
-
- atmel_scan(priv, 1);
- }
-}
-
-static int atmel_wakeup_firmware(struct atmel_private *priv)
-{
- struct host_info_struct *iface = &priv->host_info;
- u16 mr1, mr3;
- int i;
-
- if (priv->card_type == CARD_TYPE_SPI_FLASH)
- atmel_set_gcr(priv->dev, GCR_REMAP);
-
- /* wake up on-board processor */
- atmel_clear_gcr(priv->dev, 0x0040);
- atmel_write16(priv->dev, BSR, BSS_SRAM);
-
- if (priv->card_type == CARD_TYPE_SPI_FLASH)
- mdelay(100);
-
- /* and wait for it */
- for (i = LOOP_RETRY_LIMIT; i; i--) {
- mr1 = atmel_read16(priv->dev, MR1);
- mr3 = atmel_read16(priv->dev, MR3);
-
- if (mr3 & MAC_BOOT_COMPLETE)
- break;
- if (mr1 & MAC_BOOT_COMPLETE &&
- priv->bus_type == BUS_TYPE_PCCARD)
- break;
- }
-
- if (i == 0) {
- printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name);
- return -EIO;
- }
-
- if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) {
- printk(KERN_ALERT "%s: card missing.\n", priv->dev->name);
- return -ENODEV;
- }
-
- /* now check for completion of MAC initialization through
- the FunCtrl field of the IFACE, poll MR1 to detect completion of
- MAC initialization, check completion status, set interrupt mask,
- enables interrupts and calls Tx and Rx initialization functions */
-
- atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), FUNC_CTRL_INIT_COMPLETE);
-
- for (i = LOOP_RETRY_LIMIT; i; i--) {
- mr1 = atmel_read16(priv->dev, MR1);
- mr3 = atmel_read16(priv->dev, MR3);
-
- if (mr3 & MAC_INIT_COMPLETE)
- break;
- if (mr1 & MAC_INIT_COMPLETE &&
- priv->bus_type == BUS_TYPE_PCCARD)
- break;
- }
-
- if (i == 0) {
- printk(KERN_ALERT "%s: MAC failed to initialise.\n",
- priv->dev->name);
- return -EIO;
- }
-
- /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */
- if ((mr3 & MAC_INIT_COMPLETE) &&
- !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) {
- printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name);
- return -EIO;
- }
- if ((mr1 & MAC_INIT_COMPLETE) &&
- !(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) {
- printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name);
- return -EIO;
- }
-
- atmel_copy_to_host(priv->dev, (unsigned char *)iface,
- priv->host_info_base, sizeof(*iface));
-
- iface->tx_buff_pos = le16_to_cpu(iface->tx_buff_pos);
- iface->tx_buff_size = le16_to_cpu(iface->tx_buff_size);
- iface->tx_desc_pos = le16_to_cpu(iface->tx_desc_pos);
- iface->tx_desc_count = le16_to_cpu(iface->tx_desc_count);
- iface->rx_buff_pos = le16_to_cpu(iface->rx_buff_pos);
- iface->rx_buff_size = le16_to_cpu(iface->rx_buff_size);
- iface->rx_desc_pos = le16_to_cpu(iface->rx_desc_pos);
- iface->rx_desc_count = le16_to_cpu(iface->rx_desc_count);
- iface->build_version = le16_to_cpu(iface->build_version);
- iface->command_pos = le16_to_cpu(iface->command_pos);
- iface->major_version = le16_to_cpu(iface->major_version);
- iface->minor_version = le16_to_cpu(iface->minor_version);
- iface->func_ctrl = le16_to_cpu(iface->func_ctrl);
- iface->mac_status = le16_to_cpu(iface->mac_status);
-
- return 0;
-}
-
-/* determine type of memory and MAC address */
-static int probe_atmel_card(struct net_device *dev)
-{
- int rc = 0;
- struct atmel_private *priv = netdev_priv(dev);
- u8 addr[ETH_ALEN] = {};
-
- /* reset pccard */
- if (priv->bus_type == BUS_TYPE_PCCARD)
- atmel_write16(dev, GCR, 0x0060);
-
- atmel_write16(dev, GCR, 0x0040);
- msleep(500);
-
- if (atmel_read16(dev, MR2) == 0) {
- /* No stored firmware so load a small stub which just
- tells us the MAC address */
- int i;
- priv->card_type = CARD_TYPE_EEPROM;
- atmel_write16(dev, BSR, BSS_IRAM);
- atmel_copy_to_card(dev, 0, mac_reader, sizeof(mac_reader));
- atmel_set_gcr(dev, GCR_REMAP);
- atmel_clear_gcr(priv->dev, 0x0040);
- atmel_write16(dev, BSR, BSS_SRAM);
- for (i = LOOP_RETRY_LIMIT; i; i--)
- if (atmel_read16(dev, MR3) & MAC_BOOT_COMPLETE)
- break;
- if (i == 0) {
- printk(KERN_ALERT "%s: MAC failed to boot MAC address reader.\n", dev->name);
- } else {
-
- atmel_copy_to_host(dev, addr, atmel_read16(dev, MR2), 6);
- eth_hw_addr_set(dev, addr);
- /* got address, now squash it again until the network
- interface is opened */
- if (priv->bus_type == BUS_TYPE_PCCARD)
- atmel_write16(dev, GCR, 0x0060);
- atmel_write16(dev, GCR, 0x0040);
- rc = 1;
- }
- } else if (atmel_read16(dev, MR4) == 0) {
- /* Mac address easy in this case. */
- priv->card_type = CARD_TYPE_PARALLEL_FLASH;
- atmel_write16(dev, BSR, 1);
- atmel_copy_to_host(dev, addr, 0xc000, 6);
- eth_hw_addr_set(dev, addr);
- atmel_write16(dev, BSR, 0x200);
- rc = 1;
- } else {
- /* Standard firmware in flash, boot it up and ask
- for the Mac Address */
- priv->card_type = CARD_TYPE_SPI_FLASH;
- if (atmel_wakeup_firmware(priv) == 0) {
- atmel_get_mib(priv, Mac_Address_Mib_Type, 0, addr, 6);
- eth_hw_addr_set(dev, addr);
-
- /* got address, now squash it again until the network
- interface is opened */
- if (priv->bus_type == BUS_TYPE_PCCARD)
- atmel_write16(dev, GCR, 0x0060);
- atmel_write16(dev, GCR, 0x0040);
- rc = 1;
- }
- }
-
- if (rc) {
- if (dev->dev_addr[0] == 0xFF) {
- static const u8 default_mac[] = {
- 0x00, 0x04, 0x25, 0x00, 0x00, 0x00
- };
- printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
- eth_hw_addr_set(dev, default_mac);
- }
- }
-
- return rc;
-}
-
-/* Move the encyption information on the MIB structure.
- This routine is for the pre-WPA firmware: later firmware has
- a different format MIB and a different routine. */
-static void build_wep_mib(struct atmel_private *priv)
-{
- struct { /* NB this is matched to the hardware, don't change. */
- u8 wep_is_on;
- u8 default_key; /* 0..3 */
- u8 reserved;
- u8 exclude_unencrypted;
-
- u32 WEPICV_error_count;
- u32 WEP_excluded_count;
-
- u8 wep_keys[MAX_ENCRYPTION_KEYS][13];
- u8 encryption_level; /* 0, 1, 2 */
- u8 reserved2[3];
- } mib;
- int i;
-
- mib.wep_is_on = priv->wep_is_on;
- if (priv->wep_is_on) {
- if (priv->wep_key_len[priv->default_key] > 5)
- mib.encryption_level = 2;
- else
- mib.encryption_level = 1;
- } else {
- mib.encryption_level = 0;
- }
-
- mib.default_key = priv->default_key;
- mib.exclude_unencrypted = priv->exclude_unencrypted;
-
- for (i = 0; i < MAX_ENCRYPTION_KEYS; i++)
- memcpy(mib.wep_keys[i], priv->wep_keys[i], 13);
-
- atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
-}
-
-static void build_wpa_mib(struct atmel_private *priv)
-{
- /* This is for the later (WPA enabled) firmware. */
-
- struct { /* NB this is matched to the hardware, don't change. */
- u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
- u8 receiver_address[ETH_ALEN];
- u8 wep_is_on;
- u8 default_key; /* 0..3 */
- u8 group_key;
- u8 exclude_unencrypted;
- u8 encryption_type;
- u8 reserved;
-
- u32 WEPICV_error_count;
- u32 WEP_excluded_count;
-
- u8 key_RSC[4][8];
- } mib;
-
- int i;
-
- mib.wep_is_on = priv->wep_is_on;
- mib.exclude_unencrypted = priv->exclude_unencrypted;
- memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
-
- /* zero all the keys before adding in valid ones. */
- memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
-
- if (priv->wep_is_on) {
- /* There's a comment in the Atmel code to the effect that this
- is only valid when still using WEP, it may need to be set to
- something to use WPA */
- memset(mib.key_RSC, 0, sizeof(mib.key_RSC));
-
- mib.default_key = mib.group_key = 255;
- for (i = 0; i < MAX_ENCRYPTION_KEYS; i++) {
- if (priv->wep_key_len[i] > 0) {
- memcpy(mib.cipher_default_key_value[i], priv->wep_keys[i], MAX_ENCRYPTION_KEY_SIZE);
- if (i == priv->default_key) {
- mib.default_key = i;
- mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 7;
- mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->pairwise_cipher_suite;
- } else {
- mib.group_key = i;
- priv->group_cipher_suite = priv->pairwise_cipher_suite;
- mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 1;
- mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->group_cipher_suite;
- }
- }
- }
- if (mib.default_key == 255)
- mib.default_key = mib.group_key != 255 ? mib.group_key : 0;
- if (mib.group_key == 255)
- mib.group_key = mib.default_key;
-
- }
-
- atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
-}
-
-static int reset_atmel_card(struct net_device *dev)
-{
- /* do everything necessary to wake up the hardware, including
- waiting for the lightning strike and throwing the knife switch....
-
- set all the Mib values which matter in the card to match
- their settings in the atmel_private structure. Some of these
- can be altered on the fly, but many (WEP, infrastructure or ad-hoc)
- can only be changed by tearing down the world and coming back through
- here.
-
- This routine is also responsible for initialising some
- hardware-specific fields in the atmel_private structure,
- including a copy of the firmware's hostinfo structure
- which is the route into the rest of the firmware datastructures. */
-
- struct atmel_private *priv = netdev_priv(dev);
- u8 configuration;
- int old_state = priv->station_state;
- int err = 0;
-
- /* data to add to the firmware names, in priority order
- this implemenents firmware versioning */
-
- static char *firmware_modifier[] = {
- "-wpa",
- "",
- NULL
- };
-
- /* reset pccard */
- if (priv->bus_type == BUS_TYPE_PCCARD)
- atmel_write16(priv->dev, GCR, 0x0060);
-
- /* stop card , disable interrupts */
- atmel_write16(priv->dev, GCR, 0x0040);
-
- if (priv->card_type == CARD_TYPE_EEPROM) {
- /* copy in firmware if needed */
- const struct firmware *fw_entry = NULL;
- const unsigned char *fw;
- int len = priv->firmware_length;
- if (!(fw = priv->firmware)) {
- if (priv->firmware_type == ATMEL_FW_TYPE_NONE) {
- if (strlen(priv->firmware_id) == 0) {
- printk(KERN_INFO
- "%s: card type is unknown: assuming at76c502 firmware is OK.\n",
- dev->name);
- printk(KERN_INFO
- "%s: if not, use the firmware= module parameter.\n",
- dev->name);
- strcpy(priv->firmware_id, "atmel_at76c502.bin");
- }
- err = request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev);
- if (err != 0) {
- printk(KERN_ALERT
- "%s: firmware %s is missing, cannot continue.\n",
- dev->name, priv->firmware_id);
- return err;
- }
- } else {
- int fw_index = 0;
- int success = 0;
-
- /* get firmware filename entry based on firmware type ID */
- while (fw_table[fw_index].fw_type != priv->firmware_type
- && fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE)
- fw_index++;
-
- /* construct the actual firmware file name */
- if (fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) {
- int i;
- for (i = 0; firmware_modifier[i]; i++) {
- snprintf(priv->firmware_id, 32, "%s%s.%s", fw_table[fw_index].fw_file,
- firmware_modifier[i], fw_table[fw_index].fw_file_ext);
- priv->firmware_id[31] = '\0';
- if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) == 0) {
- success = 1;
- break;
- }
- }
- }
- if (!success) {
- printk(KERN_ALERT
- "%s: firmware %s is missing, cannot start.\n",
- dev->name, priv->firmware_id);
- priv->firmware_id[0] = '\0';
- return -ENOENT;
- }
- }
-
- fw = fw_entry->data;
- len = fw_entry->size;
- }
-
- if (len <= 0x6000) {
- atmel_write16(priv->dev, BSR, BSS_IRAM);
- atmel_copy_to_card(priv->dev, 0, fw, len);
- atmel_set_gcr(priv->dev, GCR_REMAP);
- } else {
- /* Remap */
- atmel_set_gcr(priv->dev, GCR_REMAP);
- atmel_write16(priv->dev, BSR, BSS_IRAM);
- atmel_copy_to_card(priv->dev, 0, fw, 0x6000);
- atmel_write16(priv->dev, BSR, 0x2ff);
- atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000);
- }
-
- release_firmware(fw_entry);
- }
-
- err = atmel_wakeup_firmware(priv);
- if (err != 0)
- return err;
-
- /* Check the version and set the correct flag for wpa stuff,
- old and new firmware is incompatible.
- The pre-wpa 3com firmware reports major version 5,
- the wpa 3com firmware is major version 4 and doesn't need
- the 3com broken-ness filter. */
- priv->use_wpa = (priv->host_info.major_version == 4);
- priv->radio_on_broken = (priv->host_info.major_version == 5);
-
- /* unmask all irq sources */
- atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_MASK_OFFSET), 0xff);
-
- /* int Tx system and enable Tx */
- atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, 0), 0);
- atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, 0), 0x80000000L);
- atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, 0), 0);
- atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, 0), 0);
-
- priv->tx_desc_free = priv->host_info.tx_desc_count;
- priv->tx_desc_head = 0;
- priv->tx_desc_tail = 0;
- priv->tx_desc_previous = 0;
- priv->tx_free_mem = priv->host_info.tx_buff_size;
- priv->tx_buff_head = 0;
- priv->tx_buff_tail = 0;
-
- configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
- atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
- configuration | FUNC_CTRL_TxENABLE);
-
- /* init Rx system and enable */
- priv->rx_desc_head = 0;
-
- configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
- atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
- configuration | FUNC_CTRL_RxENABLE);
-
- if (!priv->radio_on_broken) {
- if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) ==
- CMD_STATUS_REJECTED_RADIO_OFF) {
- printk(KERN_INFO "%s: cannot turn the radio on.\n",
- dev->name);
- return -EIO;
- }
- }
-
- /* set up enough MIB values to run. */
- atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_AUTO_TX_RATE_POS, priv->auto_tx_rate);
- atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_TX_PROMISCUOUS_POS, PROM_MODE_OFF);
- atmel_set_mib16(priv, Mac_Mib_Type, MAC_MIB_RTS_THRESHOLD_POS, priv->rts_threshold);
- atmel_set_mib16(priv, Mac_Mib_Type, MAC_MIB_FRAG_THRESHOLD_POS, priv->frag_threshold);
- atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_SHORT_RETRY_POS, priv->short_retry);
- atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_LONG_RETRY_POS, priv->long_retry);
- atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, priv->preamble);
- atmel_set_mib(priv, Mac_Address_Mib_Type, MAC_ADDR_MIB_MAC_ADDR_POS,
- priv->dev->dev_addr, 6);
- atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
- atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
- atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_BEACON_PER_POS, priv->default_beacon_period);
- atmel_set_mib(priv, Phy_Mib_Type, PHY_MIB_RATE_SET_POS, atmel_basic_rates, 4);
- atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_PRIVACY_POS, priv->wep_is_on);
- if (priv->use_wpa)
- build_wpa_mib(priv);
- else
- build_wep_mib(priv);
-
- if (old_state == STATION_STATE_READY) {
- union iwreq_data wrqu;
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- eth_zero_addr(wrqu.ap_addr.sa_data);
- wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
- }
-
- return 0;
-}
-
-static void atmel_send_command(struct atmel_private *priv, int command,
- void *cmd, int cmd_size)
-{
- if (cmd)
- atmel_copy_to_card(priv->dev, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET),
- cmd, cmd_size);
-
- atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET), command);
- atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET), 0);
-}
-
-static int atmel_send_command_wait(struct atmel_private *priv, int command,
- void *cmd, int cmd_size)
-{
- int i, status;
-
- atmel_send_command(priv, command, cmd, cmd_size);
-
- for (i = 5000; i; i--) {
- status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
- if (status != CMD_STATUS_IDLE &&
- status != CMD_STATUS_IN_PROGRESS)
- break;
- udelay(20);
- }
-
- if (i == 0) {
- printk(KERN_ALERT "%s: failed to contact MAC.\n", priv->dev->name);
- status = CMD_STATUS_HOST_ERROR;
- } else {
- if (command != CMD_EnableRadio)
- status = CMD_STATUS_COMPLETE;
- }
-
- return status;
-}
-
-static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index)
-{
- struct get_set_mib m;
- m.type = type;
- m.size = 1;
- m.index = index;
-
- atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + 1);
- return atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE));
-}
-
-static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, u8 data)
-{
- struct get_set_mib m;
- m.type = type;
- m.size = 1;
- m.index = index;
- m.data[0] = data;
-
- atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 1);
-}
-
-static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
- u16 data)
-{
- struct get_set_mib m;
- m.type = type;
- m.size = 2;
- m.index = index;
- m.data[0] = data;
- m.data[1] = data >> 8;
-
- atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 2);
-}
-
-static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
- const u8 *data, int data_len)
-{
- struct get_set_mib m;
- m.type = type;
- m.size = data_len;
- m.index = index;
-
- if (data_len > MIB_MAX_DATA_BYTES)
- printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
-
- memcpy(m.data, data, data_len);
- atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
-}
-
-static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
- u8 *data, int data_len)
-{
- struct get_set_mib m;
- m.type = type;
- m.size = data_len;
- m.index = index;
-
- if (data_len > MIB_MAX_DATA_BYTES)
- printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
-
- atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
- atmel_copy_to_host(priv->dev, data,
- atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE), data_len);
-}
-
-static void atmel_writeAR(struct net_device *dev, u16 data)
-{
- int i;
- outw(data, dev->base_addr + AR);
- /* Address register appears to need some convincing..... */
- for (i = 0; data != inw(dev->base_addr + AR) && i < 10; i++)
- outw(data, dev->base_addr + AR);
-}
-
-static void atmel_copy_to_card(struct net_device *dev, u16 dest,
- const unsigned char *src, u16 len)
-{
- int i;
- atmel_writeAR(dev, dest);
- if (dest % 2) {
- atmel_write8(dev, DR, *src);
- src++; len--;
- }
- for (i = len; i > 1 ; i -= 2) {
- u8 lb = *src++;
- u8 hb = *src++;
- atmel_write16(dev, DR, lb | (hb << 8));
- }
- if (i)
- atmel_write8(dev, DR, *src);
-}
-
-static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest,
- u16 src, u16 len)
-{
- int i;
- atmel_writeAR(dev, src);
- if (src % 2) {
- *dest = atmel_read8(dev, DR);
- dest++; len--;
- }
- for (i = len; i > 1 ; i -= 2) {
- u16 hw = atmel_read16(dev, DR);
- *dest++ = hw;
- *dest++ = hw >> 8;
- }
- if (i)
- *dest = atmel_read8(dev, DR);
-}
-
-static void atmel_set_gcr(struct net_device *dev, u16 mask)
-{
- outw(inw(dev->base_addr + GCR) | mask, dev->base_addr + GCR);
-}
-
-static void atmel_clear_gcr(struct net_device *dev, u16 mask)
-{
- outw(inw(dev->base_addr + GCR) & ~mask, dev->base_addr + GCR);
-}
-
-static int atmel_lock_mac(struct atmel_private *priv)
-{
- int i, j = 20;
- retry:
- for (i = 5000; i; i--) {
- if (!atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET)))
- break;
- udelay(20);
- }
-
- if (!i)
- return 0; /* timed out */
-
- atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 1);
- if (atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET))) {
- atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
- if (!j--)
- return 0; /* timed out */
- goto retry;
- }
-
- return 1;
-}
-
-static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
-{
- atmel_writeAR(priv->dev, pos);
- atmel_write16(priv->dev, DR, data); /* card is little-endian */
- atmel_write16(priv->dev, DR, data >> 16);
-}
-
-/***************************************************************************/
-/* There follows the source form of the MAC address reading firmware */
-/***************************************************************************/
-#if 0
-
-/* Copyright 2003 Matthew T. Russotto */
-/* But derived from the Atmel 76C502 firmware written by Atmel and */
-/* included in "atmel wireless lan drivers" package */
-/*
- This file is part of net.russotto.AtmelMACFW, hereto referred to
- as AtmelMACFW
-
- AtmelMACFW is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License version 2
- as published by the Free Software Foundation.
-
- AtmelMACFW is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with AtmelMACFW; if not, see <http://www.gnu.org/licenses/>.
-
-****************************************************************************/
-/* This firmware should work on the 76C502 RFMD, RFMD_D, and RFMD_E */
-/* It will probably work on the 76C504 and 76C502 RFMD_3COM */
-/* It only works on SPI EEPROM versions of the card. */
-
-/* This firmware initializes the SPI controller and clock, reads the MAC */
-/* address from the EEPROM into SRAM, and puts the SRAM offset of the MAC */
-/* address in MR2, and sets MR3 to 0x10 to indicate it is done */
-/* It also puts a complete copy of the EEPROM in SRAM with the offset in */
-/* MR4, for investigational purposes (maybe we can determine chip type */
-/* from that?) */
-
- .org 0
- .set MRBASE, 0x8000000
- .set CPSR_INITIAL, 0xD3 /* IRQ/FIQ disabled, ARM mode, Supervisor state */
- .set CPSR_USER, 0xD1 /* IRQ/FIQ disabled, ARM mode, USER state */
- .set SRAM_BASE, 0x02000000
- .set SP_BASE, 0x0F300000
- .set UNK_BASE, 0x0F000000 /* Some internal device, but which one? */
- .set SPI_CGEN_BASE, 0x0E000000 /* Some internal device, but which one? */
- .set UNK3_BASE, 0x02014000 /* Some internal device, but which one? */
- .set STACK_BASE, 0x5600
- .set SP_SR, 0x10
- .set SP_TDRE, 2 /* status register bit -- TDR empty */
- .set SP_RDRF, 1 /* status register bit -- RDR full */
- .set SP_SWRST, 0x80
- .set SP_SPIEN, 0x1
- .set SP_CR, 0 /* control register */
- .set SP_MR, 4 /* mode register */
- .set SP_RDR, 0x08 /* Read Data Register */
- .set SP_TDR, 0x0C /* Transmit Data Register */
- .set SP_CSR0, 0x30 /* chip select registers */
- .set SP_CSR1, 0x34
- .set SP_CSR2, 0x38
- .set SP_CSR3, 0x3C
- .set NVRAM_CMD_RDSR, 5 /* read status register */
- .set NVRAM_CMD_READ, 3 /* read data */
- .set NVRAM_SR_RDY, 1 /* RDY bit. This bit is inverted */
- .set SPI_8CLOCKS, 0xFF /* Writing this to the TDR doesn't do anything to the
- serial output, since SO is normally high. But it
- does cause 8 clock cycles and thus 8 bits to be
- clocked in to the chip. See Atmel's SPI
- controller (e.g. AT91M55800) timing and 4K
- SPI EEPROM manuals */
-
- .set NVRAM_SCRATCH, 0x02000100 /* arbitrary area for scratchpad memory */
- .set NVRAM_IMAGE, 0x02000200
- .set NVRAM_LENGTH, 0x0200
- .set MAC_ADDRESS_MIB, SRAM_BASE
- .set MAC_ADDRESS_LENGTH, 6
- .set MAC_BOOT_FLAG, 0x10
- .set MR1, 0
- .set MR2, 4
- .set MR3, 8
- .set MR4, 0xC
-RESET_VECTOR:
- b RESET_HANDLER
-UNDEF_VECTOR:
- b HALT1
-SWI_VECTOR:
- b HALT1
-IABORT_VECTOR:
- b HALT1
-DABORT_VECTOR:
-RESERVED_VECTOR:
- b HALT1
-IRQ_VECTOR:
- b HALT1
-FIQ_VECTOR:
- b HALT1
-HALT1: b HALT1
-RESET_HANDLER:
- mov r0, #CPSR_INITIAL
- msr CPSR_c, r0 /* This is probably unnecessary */
-
-/* I'm guessing this is initializing clock generator electronics for SPI */
- ldr r0, =SPI_CGEN_BASE
- mov r1, #0
- mov r1, r1, lsl #3
- orr r1, r1, #0
- str r1, [r0]
- ldr r1, [r0, #28]
- bic r1, r1, #16
- str r1, [r0, #28]
- mov r1, #1
- str r1, [r0, #8]
-
- ldr r0, =MRBASE
- mov r1, #0
- strh r1, [r0, #MR1]
- strh r1, [r0, #MR2]
- strh r1, [r0, #MR3]
- strh r1, [r0, #MR4]
-
- mov sp, #STACK_BASE
- bl SP_INIT
- mov r0, #10
- bl DELAY9
- bl GET_MAC_ADDR
- bl GET_WHOLE_NVRAM
- ldr r0, =MRBASE
- ldr r1, =MAC_ADDRESS_MIB
- strh r1, [r0, #MR2]
- ldr r1, =NVRAM_IMAGE
- strh r1, [r0, #MR4]
- mov r1, #MAC_BOOT_FLAG
- strh r1, [r0, #MR3]
-HALT2: b HALT2
-.func Get_Whole_NVRAM, GET_WHOLE_NVRAM
-GET_WHOLE_NVRAM:
- stmdb sp!, {lr}
- mov r2, #0 /* 0th bytes of NVRAM */
- mov r3, #NVRAM_LENGTH
- mov r1, #0 /* not used in routine */
- ldr r0, =NVRAM_IMAGE
- bl NVRAM_XFER
- ldmia sp!, {lr}
- bx lr
-.endfunc
-
-.func Get_MAC_Addr, GET_MAC_ADDR
-GET_MAC_ADDR:
- stmdb sp!, {lr}
- mov r2, #0x120 /* address of MAC Address within NVRAM */
- mov r3, #MAC_ADDRESS_LENGTH
- mov r1, #0 /* not used in routine */
- ldr r0, =MAC_ADDRESS_MIB
- bl NVRAM_XFER
- ldmia sp!, {lr}
- bx lr
-.endfunc
-.ltorg
-.func Delay9, DELAY9
-DELAY9:
- adds r0, r0, r0, LSL #3 /* r0 = r0 * 9 */
-DELAYLOOP:
- beq DELAY9_done
- subs r0, r0, #1
- b DELAYLOOP
-DELAY9_done:
- bx lr
-.endfunc
-
-.func SP_Init, SP_INIT
-SP_INIT:
- mov r1, #SP_SWRST
- ldr r0, =SP_BASE
- str r1, [r0, #SP_CR] /* reset the SPI */
- mov r1, #0
- str r1, [r0, #SP_CR] /* release SPI from reset state */
- mov r1, #SP_SPIEN
- str r1, [r0, #SP_MR] /* set the SPI to MASTER mode*/
- str r1, [r0, #SP_CR] /* enable the SPI */
-
-/* My guess would be this turns on the SPI clock */
- ldr r3, =SPI_CGEN_BASE
- ldr r1, [r3, #28]
- orr r1, r1, #0x2000
- str r1, [r3, #28]
-
- ldr r1, =0x2000c01
- str r1, [r0, #SP_CSR0]
- ldr r1, =0x2000201
- str r1, [r0, #SP_CSR1]
- str r1, [r0, #SP_CSR2]
- str r1, [r0, #SP_CSR3]
- ldr r1, [r0, #SP_SR]
- ldr r0, [r0, #SP_RDR]
- bx lr
-.endfunc
-.func NVRAM_Init, NVRAM_INIT
-NVRAM_INIT:
- ldr r1, =SP_BASE
- ldr r0, [r1, #SP_RDR]
- mov r0, #NVRAM_CMD_RDSR
- str r0, [r1, #SP_TDR]
-SP_loop1:
- ldr r0, [r1, #SP_SR]
- tst r0, #SP_TDRE
- beq SP_loop1
-
- mov r0, #SPI_8CLOCKS
- str r0, [r1, #SP_TDR]
-SP_loop2:
- ldr r0, [r1, #SP_SR]
- tst r0, #SP_TDRE
- beq SP_loop2
-
- ldr r0, [r1, #SP_RDR]
-SP_loop3:
- ldr r0, [r1, #SP_SR]
- tst r0, #SP_RDRF
- beq SP_loop3
-
- ldr r0, [r1, #SP_RDR]
- and r0, r0, #255
- bx lr
-.endfunc
-
-.func NVRAM_Xfer, NVRAM_XFER
- /* r0 = dest address */
- /* r1 = not used */
- /* r2 = src address within NVRAM */
- /* r3 = length */
-NVRAM_XFER:
- stmdb sp!, {r4, r5, lr}
- mov r5, r0 /* save r0 (dest address) */
- mov r4, r3 /* save r3 (length) */
- mov r0, r2, LSR #5 /* SPI memories put A8 in the command field */
- and r0, r0, #8
- add r0, r0, #NVRAM_CMD_READ
- ldr r1, =NVRAM_SCRATCH
- strb r0, [r1, #0] /* save command in NVRAM_SCRATCH[0] */
- strb r2, [r1, #1] /* save low byte of source address in NVRAM_SCRATCH[1] */
-_local1:
- bl NVRAM_INIT
- tst r0, #NVRAM_SR_RDY
- bne _local1
- mov r0, #20
- bl DELAY9
- mov r2, r4 /* length */
- mov r1, r5 /* dest address */
- mov r0, #2 /* bytes to transfer in command */
- bl NVRAM_XFER2
- ldmia sp!, {r4, r5, lr}
- bx lr
-.endfunc
-
-.func NVRAM_Xfer2, NVRAM_XFER2
-NVRAM_XFER2:
- stmdb sp!, {r4, r5, r6, lr}
- ldr r4, =SP_BASE
- mov r3, #0
- cmp r0, #0
- bls _local2
- ldr r5, =NVRAM_SCRATCH
-_local4:
- ldrb r6, [r5, r3]
- str r6, [r4, #SP_TDR]
-_local3:
- ldr r6, [r4, #SP_SR]
- tst r6, #SP_TDRE
- beq _local3
- add r3, r3, #1
- cmp r3, r0 /* r0 is # of bytes to send out (command+addr) */
- blo _local4
-_local2:
- mov r3, #SPI_8CLOCKS
- str r3, [r4, #SP_TDR]
- ldr r0, [r4, #SP_RDR]
-_local5:
- ldr r0, [r4, #SP_SR]
- tst r0, #SP_RDRF
- beq _local5
- ldr r0, [r4, #SP_RDR] /* what's this byte? It's the byte read while writing the TDR -- nonsense, because the NVRAM doesn't read and write at the same time */
- mov r0, #0
- cmp r2, #0 /* r2 is # of bytes to copy in */
- bls _local6
-_local7:
- ldr r5, [r4, #SP_SR]
- tst r5, #SP_TDRE
- beq _local7
- str r3, [r4, #SP_TDR] /* r3 has SPI_8CLOCKS */
-_local8:
- ldr r5, [r4, #SP_SR]
- tst r5, #SP_RDRF
- beq _local8
- ldr r5, [r4, #SP_RDR] /* but didn't we read this byte above? */
- strb r5, [r1], #1 /* postindexed */
- add r0, r0, #1
- cmp r0, r2
- blo _local7 /* since we don't send another address, the NVRAM must be capable of sequential reads */
-_local6:
- mov r0, #200
- bl DELAY9
- ldmia sp!, {r4, r5, r6, lr}
- bx lr
-#endif
diff --git a/drivers/net/wireless/atmel/atmel.h b/drivers/net/wireless/atmel/atmel.h
deleted file mode 100644
index d2aa52cf6..000000000
--- a/drivers/net/wireless/atmel/atmel.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*** -*- linux-c -*- **********************************************************
-
- Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
-
- Copyright 2005 Dan Williams and Red Hat, Inc.
-
-
-******************************************************************************/
-
-#ifndef _ATMEL_H
-#define _ATMEL_H
-
-typedef enum {
- ATMEL_FW_TYPE_NONE = 0,
- ATMEL_FW_TYPE_502,
- ATMEL_FW_TYPE_502D,
- ATMEL_FW_TYPE_502E,
- ATMEL_FW_TYPE_502_3COM,
- ATMEL_FW_TYPE_504,
- ATMEL_FW_TYPE_504_2958,
- ATMEL_FW_TYPE_504A_2958,
- ATMEL_FW_TYPE_506
-} AtmelFWType;
-
-struct net_device *init_atmel_card(unsigned short, unsigned long, const AtmelFWType, struct device *,
- int (*present_func)(void *), void * );
-void stop_atmel_card( struct net_device *);
-int atmel_open( struct net_device * );
-
-#endif
diff --git a/drivers/net/wireless/atmel/atmel_cs.c b/drivers/net/wireless/atmel/atmel_cs.c
deleted file mode 100644
index 58bba9875..000000000
--- a/drivers/net/wireless/atmel/atmel_cs.c
+++ /dev/null
@@ -1,292 +0,0 @@
-/*** -*- linux-c -*- **********************************************************
-
- Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
-
- Copyright 2000-2001 ATMEL Corporation.
- Copyright 2003 Simon Kelley.
-
- This code was developed from version 2.1.1 of the Atmel drivers,
- released by Atmel corp. under the GPL in December 2002. It also
- includes code from the Linux aironet drivers (C) Benjamin Reed,
- and the Linux PCMCIA package, (C) David Hinds.
-
- For all queries about this code, please contact the current author,
- Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This software is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with Atmel wireless lan drivers; if not, see
- <http://www.gnu.org/licenses/>.
-
-******************************************************************************/
-
-#ifdef __IN_PCMCIA_PACKAGE__
-#include <pcmcia/k_compat.h>
-#endif
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/ciscode.h>
-
-#include <asm/io.h>
-#include <linux/wireless.h>
-
-#include "atmel.h"
-
-
-/*====================================================================*/
-
-MODULE_AUTHOR("Simon Kelley");
-MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
-MODULE_LICENSE("GPL");
-
-/*====================================================================*/
-
-static int atmel_config(struct pcmcia_device *link);
-static void atmel_release(struct pcmcia_device *link);
-
-static void atmel_detach(struct pcmcia_device *p_dev);
-
-struct local_info {
- struct net_device *eth_dev;
-};
-
-static int atmel_probe(struct pcmcia_device *p_dev)
-{
- struct local_info *local;
- int ret;
-
- dev_dbg(&p_dev->dev, "atmel_attach()\n");
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(*local), GFP_KERNEL);
- if (!local)
- return -ENOMEM;
-
- p_dev->priv = local;
-
- ret = atmel_config(p_dev);
- if (ret)
- goto err_free_priv;
-
- return 0;
-
-err_free_priv:
- kfree(p_dev->priv);
- return ret;
-}
-
-static void atmel_detach(struct pcmcia_device *link)
-{
- dev_dbg(&link->dev, "atmel_detach\n");
-
- atmel_release(link);
-
- kfree(link->priv);
-}
-
-/* Call-back function to interrogate PCMCIA-specific information
- about the current existence of the card */
-static int card_present(void *arg)
-{
- struct pcmcia_device *link = (struct pcmcia_device *)arg;
-
- if (pcmcia_dev_present(link))
- return 1;
-
- return 0;
-}
-
-static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
-static int atmel_config(struct pcmcia_device *link)
-{
- int ret;
- const struct pcmcia_device_id *did;
-
- did = dev_get_drvdata(&link->dev);
-
- dev_dbg(&link->dev, "atmel_config\n");
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
- CONF_AUTO_AUDIO | CONF_AUTO_SET_IO;
-
- if (pcmcia_loop_config(link, atmel_config_check, NULL))
- goto failed;
-
- if (!link->irq) {
- dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
- goto failed;
- }
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- ((struct local_info *)link->priv)->eth_dev =
- init_atmel_card(link->irq,
- link->resource[0]->start,
- did ? did->driver_info : ATMEL_FW_TYPE_NONE,
- &link->dev,
- card_present,
- link);
- if (!((struct local_info *)link->priv)->eth_dev)
- goto failed;
-
-
- return 0;
-
- failed:
- atmel_release(link);
- return -ENODEV;
-}
-
-static void atmel_release(struct pcmcia_device *link)
-{
- struct net_device *dev = ((struct local_info *)link->priv)->eth_dev;
-
- dev_dbg(&link->dev, "atmel_release\n");
-
- if (dev)
- stop_atmel_card(dev);
- ((struct local_info *)link->priv)->eth_dev = NULL;
-
- pcmcia_disable_device(link);
-}
-
-static int atmel_suspend(struct pcmcia_device *link)
-{
- struct local_info *local = link->priv;
-
- netif_device_detach(local->eth_dev);
-
- return 0;
-}
-
-static int atmel_resume(struct pcmcia_device *link)
-{
- struct local_info *local = link->priv;
-
- atmel_open(local->eth_dev);
- netif_device_attach(local->eth_dev);
-
- return 0;
-}
-
-/*====================================================================*/
-/* We use the driver_info field to store the correct firmware type for a card. */
-
-#define PCMCIA_DEVICE_MANF_CARD_INFO(manf, card, info) { \
- .match_flags = PCMCIA_DEV_ID_MATCH_MANF_ID| \
- PCMCIA_DEV_ID_MATCH_CARD_ID, \
- .manf_id = (manf), \
- .card_id = (card), \
- .driver_info = (kernel_ulong_t)(info), }
-
-#define PCMCIA_DEVICE_PROD_ID12_INFO(v1, v2, vh1, vh2, info) { \
- .match_flags = PCMCIA_DEV_ID_MATCH_PROD_ID1| \
- PCMCIA_DEV_ID_MATCH_PROD_ID2, \
- .prod_id = { (v1), (v2), NULL, NULL }, \
- .prod_id_hash = { (vh1), (vh2), 0, 0 }, \
- .driver_info = (kernel_ulong_t)(info), }
-
-static const struct pcmcia_device_id atmel_ids[] = {
- PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0620, ATMEL_FW_TYPE_502_3COM),
- PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0696, ATMEL_FW_TYPE_502_3COM),
- PCMCIA_DEVICE_MANF_CARD_INFO(0x01bf, 0x3302, ATMEL_FW_TYPE_502E),
- PCMCIA_DEVICE_MANF_CARD_INFO(0xd601, 0x0007, ATMEL_FW_TYPE_502),
- PCMCIA_DEVICE_PROD_ID12_INFO("11WAVE", "11WP611AL-E", 0x9eb2da1f, 0xc9a0d3f9, ATMEL_FW_TYPE_502E),
- PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR", 0xabda4164, 0x41b37e1f, ATMEL_FW_TYPE_502),
- PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_D", 0xabda4164, 0x3675d704, ATMEL_FW_TYPE_502D),
- PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_E", 0xabda4164, 0x4172e792, ATMEL_FW_TYPE_502E),
- PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504_R", 0xabda4164, 0x917f3d72, ATMEL_FW_TYPE_504_2958),
- PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504", 0xabda4164, 0x5040670a, ATMEL_FW_TYPE_504),
- PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504A", 0xabda4164, 0xe15ed87f, ATMEL_FW_TYPE_504A_2958),
- PCMCIA_DEVICE_PROD_ID12_INFO("BT", "Voyager 1020 Laptop Adapter", 0xae49b86a, 0x1e957cd5, ATMEL_FW_TYPE_502),
- PCMCIA_DEVICE_PROD_ID12_INFO("CNet", "CNWLC 11Mbps Wireless PC Card V-5", 0xbc477dde, 0x502fae6b, ATMEL_FW_TYPE_502E),
- PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN PC Card", 0x5b878724, 0x122f1df6, ATMEL_FW_TYPE_502),
- PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN Card S", 0x5b878724, 0x5fba533a, ATMEL_FW_TYPE_504_2958),
- PCMCIA_DEVICE_PROD_ID12_INFO("OEM", "11Mbps Wireless LAN PC Card V-3", 0xfea54c90, 0x1c5b0f68, ATMEL_FW_TYPE_502),
- PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W", 0xc4f8b18b, 0x30f38774, ATMEL_FW_TYPE_502D),
- PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W-V2", 0xc4f8b18b, 0x172d1377, ATMEL_FW_TYPE_502),
- PCMCIA_DEVICE_PROD_ID12_INFO("Wireless", "PC_CARD", 0xa407ecdd, 0x119f6314, ATMEL_FW_TYPE_502D),
- PCMCIA_DEVICE_PROD_ID12_INFO("WLAN", "802.11b PC CARD", 0x575c516c, 0xb1f6dbc4, ATMEL_FW_TYPE_502D),
- PCMCIA_DEVICE_PROD_ID12_INFO("LG", "LW2100N", 0xb474d43a, 0x6b1fec94, ATMEL_FW_TYPE_502E),
- PCMCIA_DEVICE_NULL
-};
-
-MODULE_DEVICE_TABLE(pcmcia, atmel_ids);
-
-static struct pcmcia_driver atmel_driver = {
- .owner = THIS_MODULE,
- .name = "atmel_cs",
- .probe = atmel_probe,
- .remove = atmel_detach,
- .id_table = atmel_ids,
- .suspend = atmel_suspend,
- .resume = atmel_resume,
-};
-module_pcmcia_driver(atmel_driver);
-
-/*
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- In addition:
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-*/
diff --git a/drivers/net/wireless/atmel/atmel_pci.c b/drivers/net/wireless/atmel/atmel_pci.c
deleted file mode 100644
index f428dc79d..000000000
--- a/drivers/net/wireless/atmel/atmel_pci.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*** -*- linux-c -*- **********************************************************
-
- Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
-
- Copyright 2004 Simon Kelley.
-
-
-******************************************************************************/
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include "atmel.h"
-
-MODULE_AUTHOR("Simon Kelley");
-MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
-MODULE_LICENSE("GPL");
-
-static const struct pci_device_id card_ids[] = {
- { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, card_ids);
-
-static int atmel_pci_probe(struct pci_dev *, const struct pci_device_id *);
-static void atmel_pci_remove(struct pci_dev *);
-
-static struct pci_driver atmel_driver = {
- .name = "atmel",
- .id_table = card_ids,
- .probe = atmel_pci_probe,
- .remove = atmel_pci_remove,
-};
-
-
-static int atmel_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pent)
-{
- struct net_device *dev;
-
- if (pci_enable_device(pdev))
- return -ENODEV;
-
- pci_set_master(pdev);
-
- dev = init_atmel_card(pdev->irq, pdev->resource[1].start,
- ATMEL_FW_TYPE_506,
- &pdev->dev, NULL, NULL);
- if (!dev) {
- pci_disable_device(pdev);
- return -ENODEV;
- }
-
- pci_set_drvdata(pdev, dev);
- return 0;
-}
-
-static void atmel_pci_remove(struct pci_dev *pdev)
-{
- stop_atmel_card(pci_get_drvdata(pdev));
-}
-
-module_pci_driver(atmel_driver);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
index d55f3271d..4f0c1e1a8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_bca_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_BCA, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom AP chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 0ff15d408..625b7cb37 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -867,7 +867,7 @@ struct wireless_dev *brcmf_apsta_add_vif(struct wiphy *wiphy, const char *name,
goto fail;
}
- strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+ strscpy(ifp->ndev->name, name, sizeof(ifp->ndev->name));
err = brcmf_net_attach(ifp, true);
if (err) {
bphy_err(drvr, "Registering netdevice failed\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index a194b0e68..b6d458e02 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -578,18 +578,16 @@ static int __init brcmf_common_pd_probe(struct platform_device *pdev)
return 0;
}
-static int brcmf_common_pd_remove(struct platform_device *pdev)
+static void brcmf_common_pd_remove(struct platform_device *pdev)
{
brcmf_dbg(INFO, "Enter\n");
if (brcmfmac_pdata->power_off)
brcmfmac_pdata->power_off();
-
- return 0;
}
static struct platform_driver brcmf_pd = {
- .remove = brcmf_common_pd_remove,
+ .remove_new = brcmf_common_pd_remove,
.driver = {
.name = BRCMFMAC_PDATA_NAME,
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
index f82fbbe3e..90d06cda0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_cyw_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_CYW, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Cypress/Infineon chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 86ff17493..c3a602197 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -83,6 +83,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* ACEPC W5 Pro Cherry Trail Z8350 HDMI stick, same wifi as the T8 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index d4492d02e..6e0c90f47 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2334,7 +2334,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
goto fail;
}
- strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1);
+ strscpy(ifp->ndev->name, name, sizeof(ifp->ndev->name));
ifp->ndev->name_assign_type = name_assign_type;
err = brcmf_net_attach(ifp, true);
if (err) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 80220685f..d7fb88bb6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2707,7 +2707,6 @@ MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
static struct pci_driver brcmf_pciedrvr = {
- .node = {},
.name = KBUILD_MODNAME,
.id_table = brcmf_pcie_devid_table,
.probe = brcmf_pcie_probe,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 2178675ae..0ccf73531 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1581,7 +1581,7 @@ static int brcmf_usb_reset_device(struct device *dev, void *notused)
void brcmf_usb_exit(void)
{
- struct device_driver *drv = &brcmf_usbdrvr.drvwrap.driver;
+ struct device_driver *drv = &brcmf_usbdrvr.driver;
int ret;
brcmf_dbg(USB, "Enter\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
index 02918d434..b66135e3c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_wcc_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_WCC, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom mobility chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index 5a6d9c865..f6962e558 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -341,7 +341,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
/* store the country code for passing up as a regulatory hint */
wlc_cm->world_regd = brcms_world_regd(ccode, ccode_len);
if (brcms_c_country_valid(ccode))
- strncpy(wlc->pub->srom_ccode, ccode, ccode_len);
+ memcpy(wlc->pub->srom_ccode, ccode, ccode_len);
/*
* If no custom world domain is found in the SROM, use the
@@ -354,10 +354,10 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
}
/* save default country for exiting 11d regulatory mode */
- strncpy(wlc->country_default, ccode, ccode_len);
+ memcpy(wlc->country_default, ccode, ccode_len);
/* initialize autocountry_default to driver default */
- strncpy(wlc->autocountry_default, ccode, ccode_len);
+ memcpy(wlc->autocountry_default, ccode, ccode_len);
brcms_c_set_country(wlc_cm, wlc_cm->world_regd);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
index b7df576bb..3d5c1ef8f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
@@ -584,8 +584,7 @@ struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
/* make a private copy of our callers name */
- strncpy(di->name, name, MAXNAMEL);
- di->name[MAXNAMEL - 1] = '\0';
+ strscpy(di->name, name, sizeof(di->name));
di->dmadev = core->dma_dev;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index b3663c5ef..34460b581 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -5551,8 +5551,8 @@ int brcms_c_module_register(struct brcms_pub *pub,
/* find an empty entry and just add, no duplication check! */
for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (wlc->modulecb[i].name[0] == '\0') {
- strncpy(wlc->modulecb[i].name, name,
- sizeof(wlc->modulecb[i].name) - 1);
+ strscpy(wlc->modulecb[i].name, name,
+ sizeof(wlc->modulecb[i].name));
wlc->modulecb[i].hdl = hdl;
wlc->modulecb[i].down_fn = d_fn;
return 0;
diff --git a/drivers/net/wireless/cisco/Kconfig b/drivers/net/wireless/cisco/Kconfig
deleted file mode 100644
index b40ee25ac..000000000
--- a/drivers/net/wireless/cisco/Kconfig
+++ /dev/null
@@ -1,59 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config WLAN_VENDOR_CISCO
- bool "Cisco devices"
- default y
- help
- If you have a wireless card belonging to this class, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all the
- questions about these cards. If you say Y, you will be asked for
- your specific card in the following questions.
-
-if WLAN_VENDOR_CISCO
-
-config AIRO
- tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
- depends on CFG80211 && (PCI || BROKEN)
- select WIRELESS_EXT
- select CRYPTO
- select CRYPTO_SKCIPHER
- select WEXT_SPY
- select WEXT_PRIV
- help
- This is the standard Linux driver to support Cisco/Aironet ISA and
- PCI 802.11 wireless cards.
- It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
- - with or without encryption) as well as card before the Cisco
- acquisition (Aironet 4500, Aironet 4800, Aironet 4800B).
-
- This driver support both the standard Linux Wireless Extensions
- and Cisco proprietary API, so both the Linux Wireless Tools and the
- Cisco Linux utilities can be used to configure the card.
-
- The driver can be compiled as a module and will be named "airo".
-
-config AIRO_CS
- tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
- depends on CFG80211 && PCMCIA
- select WIRELESS_EXT
- select WEXT_SPY
- select WEXT_PRIV
- select CRYPTO
- select CRYPTO_AES
- select CRYPTO_CTR
- help
- This is the standard Linux driver to support Cisco/Aironet PCMCIA
- 802.11 wireless cards. This driver is the same as the Aironet
- driver part of the Linux Pcmcia package.
- It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
- - with or without encryption) as well as card before the Cisco
- acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also
- supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom
- 802.11b cards.
-
- This driver support both the standard Linux Wireless Extensions
- and Cisco proprietary API, so both the Linux Wireless Tools and the
- Cisco Linux utilities can be used to configure the card.
-
-endif # WLAN_VENDOR_CISCO
diff --git a/drivers/net/wireless/cisco/Makefile b/drivers/net/wireless/cisco/Makefile
deleted file mode 100644
index 506a19ce3..000000000
--- a/drivers/net/wireless/cisco/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AIRO) += airo.o
-obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
deleted file mode 100644
index dbd13f7aa..000000000
--- a/drivers/net/wireless/cisco/airo.c
+++ /dev/null
@@ -1,8288 +0,0 @@
-/*======================================================================
-
- Aironet driver for 4500 and 4800 series cards
-
- This code is released under both the GPL version 2 and BSD licenses.
- Either license may be used. The respective licenses are found at
- the end of this file.
-
- This code was developed by Benjamin Reed <breed@users.sourceforge.net>
- including portions of which come from the Aironet PC4500
- Developer's Reference Manual and used with permission. Copyright
- (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
- code in the Developer's manual was granted for this driver by
- Aironet. Major code contributions were received from Javier Achirica
- <achirica@users.sourceforge.net> and Jean Tourrilhes <jt@hpl.hp.com>.
- Code was also integrated from the Cisco Aironet driver for Linux.
- Support for MPI350 cards was added by Fabrice Bellet
- <fabrice@bellet.info>.
-
-======================================================================*/
-
-#include <linux/err.h>
-#include <linux/init.h>
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/bitops.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <linux/io.h>
-#include <asm/unaligned.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/uaccess.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-
-#include <crypto/aes.h>
-#include <crypto/skcipher.h>
-
-#include <net/cfg80211.h>
-#include <net/iw_handler.h>
-
-#include "airo.h"
-
-#define DRV_NAME "airo"
-
-#ifdef CONFIG_PCI
-static const struct pci_device_id card_ids[] = {
- { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
- { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0x0340, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0x0350, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0x5000, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0xa504, PCI_ANY_ID, PCI_ANY_ID, },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, card_ids);
-
-static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *);
-static void airo_pci_remove(struct pci_dev *);
-static int __maybe_unused airo_pci_suspend(struct device *dev);
-static int __maybe_unused airo_pci_resume(struct device *dev);
-
-static SIMPLE_DEV_PM_OPS(airo_pci_pm_ops,
- airo_pci_suspend,
- airo_pci_resume);
-
-static struct pci_driver airo_driver = {
- .name = DRV_NAME,
- .id_table = card_ids,
- .probe = airo_pci_probe,
- .remove = airo_pci_remove,
- .driver.pm = &airo_pci_pm_ops,
-};
-#endif /* CONFIG_PCI */
-
-/* Include Wireless Extension definition and check version - Jean II */
-#include <linux/wireless.h>
-#define WIRELESS_SPY /* enable iwspy support */
-
-#define CISCO_EXT /* enable Cisco extensions */
-#ifdef CISCO_EXT
-#include <linux/delay.h>
-#endif
-
-/* Hack to do some power saving */
-#define POWER_ON_DOWN
-
-/* As you can see this list is HUGH!
- I really don't know what a lot of these counts are about, but they
- are all here for completeness. If the IGNLABEL macro is put in
- infront of the label, that statistic will not be included in the list
- of statistics in the /proc filesystem */
-
-#define IGNLABEL(comment) NULL
-static const char *statsLabels[] = {
- "RxOverrun",
- IGNLABEL("RxPlcpCrcErr"),
- IGNLABEL("RxPlcpFormatErr"),
- IGNLABEL("RxPlcpLengthErr"),
- "RxMacCrcErr",
- "RxMacCrcOk",
- "RxWepErr",
- "RxWepOk",
- "RetryLong",
- "RetryShort",
- "MaxRetries",
- "NoAck",
- "NoCts",
- "RxAck",
- "RxCts",
- "TxAck",
- "TxRts",
- "TxCts",
- "TxMc",
- "TxBc",
- "TxUcFrags",
- "TxUcPackets",
- "TxBeacon",
- "RxBeacon",
- "TxSinColl",
- "TxMulColl",
- "DefersNo",
- "DefersProt",
- "DefersEngy",
- "DupFram",
- "RxFragDisc",
- "TxAged",
- "RxAged",
- "LostSync-MaxRetry",
- "LostSync-MissedBeacons",
- "LostSync-ArlExceeded",
- "LostSync-Deauth",
- "LostSync-Disassoced",
- "LostSync-TsfTiming",
- "HostTxMc",
- "HostTxBc",
- "HostTxUc",
- "HostTxFail",
- "HostRxMc",
- "HostRxBc",
- "HostRxUc",
- "HostRxDiscard",
- IGNLABEL("HmacTxMc"),
- IGNLABEL("HmacTxBc"),
- IGNLABEL("HmacTxUc"),
- IGNLABEL("HmacTxFail"),
- IGNLABEL("HmacRxMc"),
- IGNLABEL("HmacRxBc"),
- IGNLABEL("HmacRxUc"),
- IGNLABEL("HmacRxDiscard"),
- IGNLABEL("HmacRxAccepted"),
- "SsidMismatch",
- "ApMismatch",
- "RatesMismatch",
- "AuthReject",
- "AuthTimeout",
- "AssocReject",
- "AssocTimeout",
- IGNLABEL("ReasonOutsideTable"),
- IGNLABEL("ReasonStatus1"),
- IGNLABEL("ReasonStatus2"),
- IGNLABEL("ReasonStatus3"),
- IGNLABEL("ReasonStatus4"),
- IGNLABEL("ReasonStatus5"),
- IGNLABEL("ReasonStatus6"),
- IGNLABEL("ReasonStatus7"),
- IGNLABEL("ReasonStatus8"),
- IGNLABEL("ReasonStatus9"),
- IGNLABEL("ReasonStatus10"),
- IGNLABEL("ReasonStatus11"),
- IGNLABEL("ReasonStatus12"),
- IGNLABEL("ReasonStatus13"),
- IGNLABEL("ReasonStatus14"),
- IGNLABEL("ReasonStatus15"),
- IGNLABEL("ReasonStatus16"),
- IGNLABEL("ReasonStatus17"),
- IGNLABEL("ReasonStatus18"),
- IGNLABEL("ReasonStatus19"),
- "RxMan",
- "TxMan",
- "RxRefresh",
- "TxRefresh",
- "RxPoll",
- "TxPoll",
- "HostRetries",
- "LostSync-HostReq",
- "HostTxBytes",
- "HostRxBytes",
- "ElapsedUsec",
- "ElapsedSec",
- "LostSyncBetterAP",
- "PrivacyMismatch",
- "Jammed",
- "DiscRxNotWepped",
- "PhyEleMismatch",
- (char*)-1 };
-#ifndef RUN_AT
-#define RUN_AT(x) (jiffies+(x))
-#endif
-
-
-/* These variables are for insmod, since it seems that the rates
- can only be set in setup_card. Rates should be a comma separated
- (no spaces) list of rates (up to 8). */
-
-static int rates[8];
-static char *ssids[3];
-
-static int io[4];
-static int irq[4];
-
-static
-int maxencrypt /* = 0 */; /* The highest rate that the card can encrypt at.
- 0 means no limit. For old cards this was 4 */
-
-static int auto_wep /* = 0 */; /* If set, it tries to figure out the wep mode */
-static int aux_bap /* = 0 */; /* Checks to see if the aux ports are needed to read
- the bap, needed on some older cards and buses. */
-static int adhoc;
-
-static int probe = 1;
-
-static kuid_t proc_kuid;
-static int proc_uid /* = 0 */;
-
-static kgid_t proc_kgid;
-static int proc_gid /* = 0 */;
-
-static int airo_perm = 0555;
-
-static int proc_perm = 0644;
-
-MODULE_AUTHOR("Benjamin Reed");
-MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards. "
- "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs.");
-MODULE_LICENSE("Dual BSD/GPL");
-module_param_hw_array(io, int, ioport, NULL, 0);
-module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_array(rates, int, NULL, 0);
-module_param_array(ssids, charp, NULL, 0);
-module_param(auto_wep, int, 0);
-MODULE_PARM_DESC(auto_wep,
- "If non-zero, the driver will keep looping through the authentication options until an association is made. "
- "The value of auto_wep is number of the wep keys to check. "
- "A value of 2 will try using the key at index 0 and index 1.");
-module_param(aux_bap, int, 0);
-MODULE_PARM_DESC(aux_bap,
- "If non-zero, the driver will switch into a mode that seems to work better for older cards with some older buses. "
- "Before switching it checks that the switch is needed.");
-module_param(maxencrypt, int, 0);
-MODULE_PARM_DESC(maxencrypt,
- "The maximum speed that the card can do encryption. "
- "Units are in 512kbs. "
- "Zero (default) means there is no limit. "
- "Older cards used to be limited to 2mbs (4).");
-module_param(adhoc, int, 0);
-MODULE_PARM_DESC(adhoc, "If non-zero, the card will start in adhoc mode.");
-module_param(probe, int, 0);
-MODULE_PARM_DESC(probe, "If zero, the driver won't start the card.");
-
-module_param(proc_uid, int, 0);
-MODULE_PARM_DESC(proc_uid, "The uid that the /proc files will belong to.");
-module_param(proc_gid, int, 0);
-MODULE_PARM_DESC(proc_gid, "The gid that the /proc files will belong to.");
-module_param(airo_perm, int, 0);
-MODULE_PARM_DESC(airo_perm, "The permission bits of /proc/[driver/]aironet.");
-module_param(proc_perm, int, 0);
-MODULE_PARM_DESC(proc_perm, "The permission bits of the files in /proc");
-
-/* This is a kind of sloppy hack to get this information to OUT4500 and
- IN4500. I would be extremely interested in the situation where this
- doesn't work though!!! */
-static int do8bitIO /* = 0 */;
-
-/* Return codes */
-#define SUCCESS 0
-#define ERROR -1
-#define NO_PACKET -2
-
-/* Commands */
-#define NOP2 0x0000
-#define MAC_ENABLE 0x0001
-#define MAC_DISABLE 0x0002
-#define CMD_LOSE_SYNC 0x0003 /* Not sure what this does... */
-#define CMD_SOFTRESET 0x0004
-#define HOSTSLEEP 0x0005
-#define CMD_MAGIC_PKT 0x0006
-#define CMD_SETWAKEMASK 0x0007
-#define CMD_READCFG 0x0008
-#define CMD_SETMODE 0x0009
-#define CMD_ALLOCATETX 0x000a
-#define CMD_TRANSMIT 0x000b
-#define CMD_DEALLOCATETX 0x000c
-#define NOP 0x0010
-#define CMD_WORKAROUND 0x0011
-#define CMD_ALLOCATEAUX 0x0020
-#define CMD_ACCESS 0x0021
-#define CMD_PCIBAP 0x0022
-#define CMD_PCIAUX 0x0023
-#define CMD_ALLOCBUF 0x0028
-#define CMD_GETTLV 0x0029
-#define CMD_PUTTLV 0x002a
-#define CMD_DELTLV 0x002b
-#define CMD_FINDNEXTTLV 0x002c
-#define CMD_PSPNODES 0x0030
-#define CMD_SETCW 0x0031
-#define CMD_SETPCF 0x0032
-#define CMD_SETPHYREG 0x003e
-#define CMD_TXTEST 0x003f
-#define MAC_ENABLETX 0x0101
-#define CMD_LISTBSS 0x0103
-#define CMD_SAVECFG 0x0108
-#define CMD_ENABLEAUX 0x0111
-#define CMD_WRITERID 0x0121
-#define CMD_USEPSPNODES 0x0130
-#define MAC_ENABLERX 0x0201
-
-/* Command errors */
-#define ERROR_QUALIF 0x00
-#define ERROR_ILLCMD 0x01
-#define ERROR_ILLFMT 0x02
-#define ERROR_INVFID 0x03
-#define ERROR_INVRID 0x04
-#define ERROR_LARGE 0x05
-#define ERROR_NDISABL 0x06
-#define ERROR_ALLOCBSY 0x07
-#define ERROR_NORD 0x0B
-#define ERROR_NOWR 0x0C
-#define ERROR_INVFIDTX 0x0D
-#define ERROR_TESTACT 0x0E
-#define ERROR_TAGNFND 0x12
-#define ERROR_DECODE 0x20
-#define ERROR_DESCUNAV 0x21
-#define ERROR_BADLEN 0x22
-#define ERROR_MODE 0x80
-#define ERROR_HOP 0x81
-#define ERROR_BINTER 0x82
-#define ERROR_RXMODE 0x83
-#define ERROR_MACADDR 0x84
-#define ERROR_RATES 0x85
-#define ERROR_ORDER 0x86
-#define ERROR_SCAN 0x87
-#define ERROR_AUTH 0x88
-#define ERROR_PSMODE 0x89
-#define ERROR_RTYPE 0x8A
-#define ERROR_DIVER 0x8B
-#define ERROR_SSID 0x8C
-#define ERROR_APLIST 0x8D
-#define ERROR_AUTOWAKE 0x8E
-#define ERROR_LEAP 0x8F
-
-/* Registers */
-#define COMMAND 0x00
-#define PARAM0 0x02
-#define PARAM1 0x04
-#define PARAM2 0x06
-#define STATUS 0x08
-#define RESP0 0x0a
-#define RESP1 0x0c
-#define RESP2 0x0e
-#define LINKSTAT 0x10
-#define SELECT0 0x18
-#define OFFSET0 0x1c
-#define RXFID 0x20
-#define TXALLOCFID 0x22
-#define TXCOMPLFID 0x24
-#define DATA0 0x36
-#define EVSTAT 0x30
-#define EVINTEN 0x32
-#define EVACK 0x34
-#define SWS0 0x28
-#define SWS1 0x2a
-#define SWS2 0x2c
-#define SWS3 0x2e
-#define AUXPAGE 0x3A
-#define AUXOFF 0x3C
-#define AUXDATA 0x3E
-
-#define FID_TX 1
-#define FID_RX 2
-/* Offset into aux memory for descriptors */
-#define AUX_OFFSET 0x800
-/* Size of allocated packets */
-#define PKTSIZE 1840
-#define RIDSIZE 2048
-/* Size of the transmit queue */
-#define MAXTXQ 64
-
-/* BAP selectors */
-#define BAP0 0 /* Used for receiving packets */
-#define BAP1 2 /* Used for xmiting packets and working with RIDS */
-
-/* Flags */
-#define COMMAND_BUSY 0x8000
-
-#define BAP_BUSY 0x8000
-#define BAP_ERR 0x4000
-#define BAP_DONE 0x2000
-
-#define PROMISC 0xffff
-#define NOPROMISC 0x0000
-
-#define EV_CMD 0x10
-#define EV_CLEARCOMMANDBUSY 0x4000
-#define EV_RX 0x01
-#define EV_TX 0x02
-#define EV_TXEXC 0x04
-#define EV_ALLOC 0x08
-#define EV_LINK 0x80
-#define EV_AWAKE 0x100
-#define EV_TXCPY 0x400
-#define EV_UNKNOWN 0x800
-#define EV_MIC 0x1000 /* Message Integrity Check Interrupt */
-#define EV_AWAKEN 0x2000
-#define STATUS_INTS (EV_AWAKE|EV_LINK|EV_TXEXC|EV_TX|EV_TXCPY|EV_RX|EV_MIC)
-
-#ifdef CHECK_UNKNOWN_INTS
-#define IGNORE_INTS (EV_CMD | EV_UNKNOWN)
-#else
-#define IGNORE_INTS (~STATUS_INTS)
-#endif
-
-/* RID TYPES */
-#define RID_RW 0x20
-
-/* The RIDs */
-#define RID_CAPABILITIES 0xFF00
-#define RID_APINFO 0xFF01
-#define RID_RADIOINFO 0xFF02
-#define RID_UNKNOWN3 0xFF03
-#define RID_RSSI 0xFF04
-#define RID_CONFIG 0xFF10
-#define RID_SSID 0xFF11
-#define RID_APLIST 0xFF12
-#define RID_DRVNAME 0xFF13
-#define RID_ETHERENCAP 0xFF14
-#define RID_WEP_TEMP 0xFF15
-#define RID_WEP_PERM 0xFF16
-#define RID_MODULATION 0xFF17
-#define RID_OPTIONS 0xFF18
-#define RID_ACTUALCONFIG 0xFF20 /*readonly*/
-#define RID_FACTORYCONFIG 0xFF21
-#define RID_UNKNOWN22 0xFF22
-#define RID_LEAPUSERNAME 0xFF23
-#define RID_LEAPPASSWORD 0xFF24
-#define RID_STATUS 0xFF50
-#define RID_BEACON_HST 0xFF51
-#define RID_BUSY_HST 0xFF52
-#define RID_RETRIES_HST 0xFF53
-#define RID_UNKNOWN54 0xFF54
-#define RID_UNKNOWN55 0xFF55
-#define RID_UNKNOWN56 0xFF56
-#define RID_MIC 0xFF57
-#define RID_STATS16 0xFF60
-#define RID_STATS16DELTA 0xFF61
-#define RID_STATS16DELTACLEAR 0xFF62
-#define RID_STATS 0xFF68
-#define RID_STATSDELTA 0xFF69
-#define RID_STATSDELTACLEAR 0xFF6A
-#define RID_ECHOTEST_RID 0xFF70
-#define RID_ECHOTEST_RESULTS 0xFF71
-#define RID_BSSLISTFIRST 0xFF72
-#define RID_BSSLISTNEXT 0xFF73
-#define RID_WPA_BSSLISTFIRST 0xFF74
-#define RID_WPA_BSSLISTNEXT 0xFF75
-
-typedef struct {
- u16 cmd;
- u16 parm0;
- u16 parm1;
- u16 parm2;
-} Cmd;
-
-typedef struct {
- u16 status;
- u16 rsp0;
- u16 rsp1;
- u16 rsp2;
-} Resp;
-
-/*
- * Rids and endian-ness: The Rids will always be in cpu endian, since
- * this all the patches from the big-endian guys end up doing that.
- * so all rid access should use the read/writeXXXRid routines.
- */
-
-/* This structure came from an email sent to me from an engineer at
- aironet for inclusion into this driver */
-typedef struct WepKeyRid WepKeyRid;
-struct WepKeyRid {
- __le16 len;
- __le16 kindex;
- u8 mac[ETH_ALEN];
- __le16 klen;
- u8 key[16];
-} __packed;
-
-/* These structures are from the Aironet's PC4500 Developers Manual */
-typedef struct Ssid Ssid;
-struct Ssid {
- __le16 len;
- u8 ssid[32];
-} __packed;
-
-typedef struct SsidRid SsidRid;
-struct SsidRid {
- __le16 len;
- Ssid ssids[3];
-} __packed;
-
-typedef struct ModulationRid ModulationRid;
-struct ModulationRid {
- __le16 len;
- __le16 modulation;
-#define MOD_DEFAULT cpu_to_le16(0)
-#define MOD_CCK cpu_to_le16(1)
-#define MOD_MOK cpu_to_le16(2)
-} __packed;
-
-typedef struct ConfigRid ConfigRid;
-struct ConfigRid {
- __le16 len; /* sizeof(ConfigRid) */
- __le16 opmode; /* operating mode */
-#define MODE_STA_IBSS cpu_to_le16(0)
-#define MODE_STA_ESS cpu_to_le16(1)
-#define MODE_AP cpu_to_le16(2)
-#define MODE_AP_RPTR cpu_to_le16(3)
-#define MODE_CFG_MASK cpu_to_le16(0xff)
-#define MODE_ETHERNET_HOST cpu_to_le16(0<<8) /* rx payloads converted */
-#define MODE_LLC_HOST cpu_to_le16(1<<8) /* rx payloads left as is */
-#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extensions */
-#define MODE_AP_INTERFACE cpu_to_le16(1<<10) /* enable ap interface extensions */
-#define MODE_ANTENNA_ALIGN cpu_to_le16(1<<11) /* enable antenna alignment */
-#define MODE_ETHER_LLC cpu_to_le16(1<<12) /* enable ethernet LLC */
-#define MODE_LEAF_NODE cpu_to_le16(1<<13) /* enable leaf node bridge */
-#define MODE_CF_POLLABLE cpu_to_le16(1<<14) /* enable CF pollable */
-#define MODE_MIC cpu_to_le16(1<<15) /* enable MIC */
- __le16 rmode; /* receive mode */
-#define RXMODE_BC_MC_ADDR cpu_to_le16(0)
-#define RXMODE_BC_ADDR cpu_to_le16(1) /* ignore multicasts */
-#define RXMODE_ADDR cpu_to_le16(2) /* ignore multicast and broadcast */
-#define RXMODE_RFMON cpu_to_le16(3) /* wireless monitor mode */
-#define RXMODE_RFMON_ANYBSS cpu_to_le16(4)
-#define RXMODE_LANMON cpu_to_le16(5) /* lan style monitor -- data packets only */
-#define RXMODE_MASK cpu_to_le16(255)
-#define RXMODE_DISABLE_802_3_HEADER cpu_to_le16(1<<8) /* disables 802.3 header on rx */
-#define RXMODE_FULL_MASK (RXMODE_MASK | RXMODE_DISABLE_802_3_HEADER)
-#define RXMODE_NORMALIZED_RSSI cpu_to_le16(1<<9) /* return normalized RSSI */
- __le16 fragThresh;
- __le16 rtsThres;
- u8 macAddr[ETH_ALEN];
- u8 rates[8];
- __le16 shortRetryLimit;
- __le16 longRetryLimit;
- __le16 txLifetime; /* in kusec */
- __le16 rxLifetime; /* in kusec */
- __le16 stationary;
- __le16 ordering;
- __le16 u16deviceType; /* for overriding device type */
- __le16 cfpRate;
- __le16 cfpDuration;
- __le16 _reserved1[3];
- /*---------- Scanning/Associating ----------*/
- __le16 scanMode;
-#define SCANMODE_ACTIVE cpu_to_le16(0)
-#define SCANMODE_PASSIVE cpu_to_le16(1)
-#define SCANMODE_AIROSCAN cpu_to_le16(2)
- __le16 probeDelay; /* in kusec */
- __le16 probeEnergyTimeout; /* in kusec */
- __le16 probeResponseTimeout;
- __le16 beaconListenTimeout;
- __le16 joinNetTimeout;
- __le16 authTimeout;
- __le16 authType;
-#define AUTH_OPEN cpu_to_le16(0x1)
-#define AUTH_ENCRYPT cpu_to_le16(0x101)
-#define AUTH_SHAREDKEY cpu_to_le16(0x102)
-#define AUTH_ALLOW_UNENCRYPTED cpu_to_le16(0x200)
- __le16 associationTimeout;
- __le16 specifiedApTimeout;
- __le16 offlineScanInterval;
- __le16 offlineScanDuration;
- __le16 linkLossDelay;
- __le16 maxBeaconLostTime;
- __le16 refreshInterval;
-#define DISABLE_REFRESH cpu_to_le16(0xFFFF)
- __le16 _reserved1a[1];
- /*---------- Power save operation ----------*/
- __le16 powerSaveMode;
-#define POWERSAVE_CAM cpu_to_le16(0)
-#define POWERSAVE_PSP cpu_to_le16(1)
-#define POWERSAVE_PSPCAM cpu_to_le16(2)
- __le16 sleepForDtims;
- __le16 listenInterval;
- __le16 fastListenInterval;
- __le16 listenDecay;
- __le16 fastListenDelay;
- __le16 _reserved2[2];
- /*---------- Ap/Ibss config items ----------*/
- __le16 beaconPeriod;
- __le16 atimDuration;
- __le16 hopPeriod;
- __le16 channelSet;
- __le16 channel;
- __le16 dtimPeriod;
- __le16 bridgeDistance;
- __le16 radioID;
- /*---------- Radio configuration ----------*/
- __le16 radioType;
-#define RADIOTYPE_DEFAULT cpu_to_le16(0)
-#define RADIOTYPE_802_11 cpu_to_le16(1)
-#define RADIOTYPE_LEGACY cpu_to_le16(2)
- u8 rxDiversity;
- u8 txDiversity;
- __le16 txPower;
-#define TXPOWER_DEFAULT 0
- __le16 rssiThreshold;
-#define RSSI_DEFAULT 0
- __le16 modulation;
-#define PREAMBLE_AUTO cpu_to_le16(0)
-#define PREAMBLE_LONG cpu_to_le16(1)
-#define PREAMBLE_SHORT cpu_to_le16(2)
- __le16 preamble;
- __le16 homeProduct;
- __le16 radioSpecific;
- /*---------- Aironet Extensions ----------*/
- u8 nodeName[16];
- __le16 arlThreshold;
- __le16 arlDecay;
- __le16 arlDelay;
- __le16 _reserved4[1];
- /*---------- Aironet Extensions ----------*/
- u8 magicAction;
-#define MAGIC_ACTION_STSCHG 1
-#define MAGIC_ACTION_RESUME 2
-#define MAGIC_IGNORE_MCAST (1<<8)
-#define MAGIC_IGNORE_BCAST (1<<9)
-#define MAGIC_SWITCH_TO_PSP (0<<10)
-#define MAGIC_STAY_IN_CAM (1<<10)
- u8 magicControl;
- __le16 autoWake;
-} __packed;
-
-typedef struct StatusRid StatusRid;
-struct StatusRid {
- __le16 len;
- u8 mac[ETH_ALEN];
- __le16 mode;
- __le16 errorCode;
- __le16 sigQuality;
- __le16 SSIDlen;
- char SSID[32];
- char apName[16];
- u8 bssid[4][ETH_ALEN];
- __le16 beaconPeriod;
- __le16 dimPeriod;
- __le16 atimDuration;
- __le16 hopPeriod;
- __le16 channelSet;
- __le16 channel;
- __le16 hopsToBackbone;
- __le16 apTotalLoad;
- __le16 generatedLoad;
- __le16 accumulatedArl;
- __le16 signalQuality;
- __le16 currentXmitRate;
- __le16 apDevExtensions;
- __le16 normalizedSignalStrength;
- __le16 shortPreamble;
- u8 apIP[4];
- u8 noisePercent; /* Noise percent in last second */
- u8 noisedBm; /* Noise dBm in last second */
- u8 noiseAvePercent; /* Noise percent in last minute */
- u8 noiseAvedBm; /* Noise dBm in last minute */
- u8 noiseMaxPercent; /* Highest noise percent in last minute */
- u8 noiseMaxdBm; /* Highest noise dbm in last minute */
- __le16 load;
- u8 carrier[4];
- __le16 assocStatus;
-#define STAT_NOPACKETS 0
-#define STAT_NOCARRIERSET 10
-#define STAT_GOTCARRIERSET 11
-#define STAT_WRONGSSID 20
-#define STAT_BADCHANNEL 25
-#define STAT_BADBITRATES 30
-#define STAT_BADPRIVACY 35
-#define STAT_APFOUND 40
-#define STAT_APREJECTED 50
-#define STAT_AUTHENTICATING 60
-#define STAT_DEAUTHENTICATED 61
-#define STAT_AUTHTIMEOUT 62
-#define STAT_ASSOCIATING 70
-#define STAT_DEASSOCIATED 71
-#define STAT_ASSOCTIMEOUT 72
-#define STAT_NOTAIROAP 73
-#define STAT_ASSOCIATED 80
-#define STAT_LEAPING 90
-#define STAT_LEAPFAILED 91
-#define STAT_LEAPTIMEDOUT 92
-#define STAT_LEAPCOMPLETE 93
-} __packed;
-
-typedef struct StatsRid StatsRid;
-struct StatsRid {
- __le16 len;
- __le16 spacer;
- __le32 vals[100];
-} __packed;
-
-typedef struct APListRid APListRid;
-struct APListRid {
- __le16 len;
- u8 ap[4][ETH_ALEN];
-} __packed;
-
-typedef struct CapabilityRid CapabilityRid;
-struct CapabilityRid {
- __le16 len;
- char oui[3];
- char zero;
- __le16 prodNum;
- char manName[32];
- char prodName[16];
- char prodVer[8];
- char factoryAddr[ETH_ALEN];
- char aironetAddr[ETH_ALEN];
- __le16 radioType;
- __le16 country;
- char callid[ETH_ALEN];
- char supportedRates[8];
- char rxDiversity;
- char txDiversity;
- __le16 txPowerLevels[8];
- __le16 hardVer;
- __le16 hardCap;
- __le16 tempRange;
- __le16 softVer;
- __le16 softSubVer;
- __le16 interfaceVer;
- __le16 softCap;
- __le16 bootBlockVer;
- __le16 requiredHard;
- __le16 extSoftCap;
-} __packed;
-
-/* Only present on firmware >= 5.30.17 */
-typedef struct BSSListRidExtra BSSListRidExtra;
-struct BSSListRidExtra {
- __le16 unknown[4];
- u8 fixed[12]; /* WLAN management frame */
- u8 iep[624];
-} __packed;
-
-typedef struct BSSListRid BSSListRid;
-struct BSSListRid {
- __le16 len;
- __le16 index; /* First is 0 and 0xffff means end of list */
-#define RADIO_FH 1 /* Frequency hopping radio type */
-#define RADIO_DS 2 /* Direct sequence radio type */
-#define RADIO_TMA 4 /* Proprietary radio used in old cards (2500) */
- __le16 radioType;
- u8 bssid[ETH_ALEN]; /* Mac address of the BSS */
- u8 zero;
- u8 ssidLen;
- u8 ssid[32];
- __le16 dBm;
-#define CAP_ESS cpu_to_le16(1<<0)
-#define CAP_IBSS cpu_to_le16(1<<1)
-#define CAP_PRIVACY cpu_to_le16(1<<4)
-#define CAP_SHORTHDR cpu_to_le16(1<<5)
- __le16 cap;
- __le16 beaconInterval;
- u8 rates[8]; /* Same as rates for config rid */
- struct { /* For frequency hopping only */
- __le16 dwell;
- u8 hopSet;
- u8 hopPattern;
- u8 hopIndex;
- u8 fill;
- } fh;
- __le16 dsChannel;
- __le16 atimWindow;
-
- /* Only present on firmware >= 5.30.17 */
- BSSListRidExtra extra;
-} __packed;
-
-typedef struct {
- BSSListRid bss;
- struct list_head list;
-} BSSListElement;
-
-typedef struct tdsRssiEntry tdsRssiEntry;
-struct tdsRssiEntry {
- u8 rssipct;
- u8 rssidBm;
-} __packed;
-
-typedef struct tdsRssiRid tdsRssiRid;
-struct tdsRssiRid {
- u16 len;
- tdsRssiEntry x[256];
-} __packed;
-
-typedef struct MICRid MICRid;
-struct MICRid {
- __le16 len;
- __le16 state;
- __le16 multicastValid;
- u8 multicast[16];
- __le16 unicastValid;
- u8 unicast[16];
-} __packed;
-
-typedef struct MICBuffer MICBuffer;
-struct MICBuffer {
- __be16 typelen;
-
- union {
- u8 snap[8];
- struct {
- u8 dsap;
- u8 ssap;
- u8 control;
- u8 orgcode[3];
- u8 fieldtype[2];
- } llc;
- } u;
- __be32 mic;
- __be32 seq;
-} __packed;
-
-typedef struct {
- u8 da[ETH_ALEN];
- u8 sa[ETH_ALEN];
-} etherHead;
-
-#define TXCTL_TXOK (1<<1) /* report if tx is ok */
-#define TXCTL_TXEX (1<<2) /* report if tx fails */
-#define TXCTL_802_3 (0<<3) /* 802.3 packet */
-#define TXCTL_802_11 (1<<3) /* 802.11 mac packet */
-#define TXCTL_ETHERNET (0<<4) /* payload has ethertype */
-#define TXCTL_LLC (1<<4) /* payload is llc */
-#define TXCTL_RELEASE (0<<5) /* release after completion */
-#define TXCTL_NORELEASE (1<<5) /* on completion returns to host */
-
-#define BUSY_FID 0x10000
-
-#ifdef CISCO_EXT
-#define AIROMAGIC 0xa55a
-/* Warning : SIOCDEVPRIVATE may disapear during 2.5.X - Jean II */
-#ifdef SIOCIWFIRSTPRIV
-#ifdef SIOCDEVPRIVATE
-#define AIROOLDIOCTL SIOCDEVPRIVATE
-#define AIROOLDIDIFC AIROOLDIOCTL + 1
-#endif /* SIOCDEVPRIVATE */
-#else /* SIOCIWFIRSTPRIV */
-#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
-#endif /* SIOCIWFIRSTPRIV */
-/* This may be wrong. When using the new SIOCIWFIRSTPRIV range, we probably
- * should use only "GET" ioctls (last bit set to 1). "SET" ioctls are root
- * only and don't return the modified struct ifreq to the application which
- * is usually a problem. - Jean II */
-#define AIROIOCTL SIOCIWFIRSTPRIV
-#define AIROIDIFC AIROIOCTL + 1
-
-/* Ioctl constants to be used in airo_ioctl.command */
-
-#define AIROGCAP 0 // Capability rid
-#define AIROGCFG 1 // USED A LOT
-#define AIROGSLIST 2 // System ID list
-#define AIROGVLIST 3 // List of specified AP's
-#define AIROGDRVNAM 4 // NOTUSED
-#define AIROGEHTENC 5 // NOTUSED
-#define AIROGWEPKTMP 6
-#define AIROGWEPKNV 7
-#define AIROGSTAT 8
-#define AIROGSTATSC32 9
-#define AIROGSTATSD32 10
-#define AIROGMICRID 11
-#define AIROGMICSTATS 12
-#define AIROGFLAGS 13
-#define AIROGID 14
-#define AIRORRID 15
-#define AIRORSWVERSION 17
-
-/* Leave gap of 40 commands after AIROGSTATSD32 for future */
-
-#define AIROPCAP AIROGSTATSD32 + 40
-#define AIROPVLIST AIROPCAP + 1
-#define AIROPSLIST AIROPVLIST + 1
-#define AIROPCFG AIROPSLIST + 1
-#define AIROPSIDS AIROPCFG + 1
-#define AIROPAPLIST AIROPSIDS + 1
-#define AIROPMACON AIROPAPLIST + 1 /* Enable mac */
-#define AIROPMACOFF AIROPMACON + 1 /* Disable mac */
-#define AIROPSTCLR AIROPMACOFF + 1
-#define AIROPWEPKEY AIROPSTCLR + 1
-#define AIROPWEPKEYNV AIROPWEPKEY + 1
-#define AIROPLEAPPWD AIROPWEPKEYNV + 1
-#define AIROPLEAPUSR AIROPLEAPPWD + 1
-
-/* Flash codes */
-
-#define AIROFLSHRST AIROPWEPKEYNV + 40
-#define AIROFLSHGCHR AIROFLSHRST + 1
-#define AIROFLSHSTFL AIROFLSHGCHR + 1
-#define AIROFLSHPCHR AIROFLSHSTFL + 1
-#define AIROFLPUTBUF AIROFLSHPCHR + 1
-#define AIRORESTART AIROFLPUTBUF + 1
-
-#define FLASHSIZE 32768
-#define AUXMEMSIZE (256 * 1024)
-
-typedef struct aironet_ioctl {
- unsigned short command; // What to do
- unsigned short len; // Len of data
- unsigned short ridnum; // rid number
- unsigned char __user *data; // d-data
-} aironet_ioctl;
-
-static const char swversion[] = "2.1";
-#endif /* CISCO_EXT */
-
-#define NUM_MODULES 2
-#define MIC_MSGLEN_MAX 2400
-#define EMMH32_MSGLEN_MAX MIC_MSGLEN_MAX
-#define AIRO_DEF_MTU 2312
-
-typedef struct {
- u32 size; // size
- u8 enabled; // MIC enabled or not
- u32 rxSuccess; // successful packets received
- u32 rxIncorrectMIC; // pkts dropped due to incorrect MIC comparison
- u32 rxNotMICed; // pkts dropped due to not being MIC'd
- u32 rxMICPlummed; // pkts dropped due to not having a MIC plummed
- u32 rxWrongSequence; // pkts dropped due to sequence number violation
- u32 reserve[32];
-} mic_statistics;
-
-typedef struct {
- __be32 coeff[((EMMH32_MSGLEN_MAX)+3)>>2];
- u64 accum; // accumulated mic, reduced to u32 in final()
- int position; // current position (byte offset) in message
- union {
- u8 d8[4];
- __be32 d32;
- } part; // saves partial message word across update() calls
-} emmh32_context;
-
-typedef struct {
- emmh32_context seed; // Context - the seed
- u32 rx; // Received sequence number
- u32 tx; // Tx sequence number
- u32 window; // Start of window
- u8 valid; // Flag to say if context is valid or not
- u8 key[16];
-} miccntx;
-
-typedef struct {
- miccntx mCtx; // Multicast context
- miccntx uCtx; // Unicast context
-} mic_module;
-
-typedef struct {
- unsigned int rid: 16;
- unsigned int len: 15;
- unsigned int valid: 1;
- dma_addr_t host_addr;
-} Rid;
-
-typedef struct {
- unsigned int offset: 15;
- unsigned int eoc: 1;
- unsigned int len: 15;
- unsigned int valid: 1;
- dma_addr_t host_addr;
-} TxFid;
-
-struct rx_hdr {
- __le16 status, len;
- u8 rssi[2];
- u8 rate;
- u8 freq;
- __le16 tmp[4];
-} __packed;
-
-typedef struct {
- unsigned int ctl: 15;
- unsigned int rdy: 1;
- unsigned int len: 15;
- unsigned int valid: 1;
- dma_addr_t host_addr;
-} RxFid;
-
-/*
- * Host receive descriptor
- */
-typedef struct {
- unsigned char __iomem *card_ram_off; /* offset into card memory of the
- desc */
- RxFid rx_desc; /* card receive descriptor */
- char *virtual_host_addr; /* virtual address of host receive
- buffer */
- int pending;
-} HostRxDesc;
-
-/*
- * Host transmit descriptor
- */
-typedef struct {
- unsigned char __iomem *card_ram_off; /* offset into card memory of the
- desc */
- TxFid tx_desc; /* card transmit descriptor */
- char *virtual_host_addr; /* virtual address of host receive
- buffer */
- int pending;
-} HostTxDesc;
-
-/*
- * Host RID descriptor
- */
-typedef struct {
- unsigned char __iomem *card_ram_off; /* offset into card memory of the
- descriptor */
- Rid rid_desc; /* card RID descriptor */
- char *virtual_host_addr; /* virtual address of host receive
- buffer */
-} HostRidDesc;
-
-typedef struct {
- u16 sw0;
- u16 sw1;
- u16 status;
- u16 len;
-#define HOST_SET (1 << 0)
-#define HOST_INT_TX (1 << 1) /* Interrupt on successful TX */
-#define HOST_INT_TXERR (1 << 2) /* Interrupt on unseccessful TX */
-#define HOST_LCC_PAYLOAD (1 << 4) /* LLC payload, 0 = Ethertype */
-#define HOST_DONT_RLSE (1 << 5) /* Don't release buffer when done */
-#define HOST_DONT_RETRY (1 << 6) /* Don't retry trasmit */
-#define HOST_CLR_AID (1 << 7) /* clear AID failure */
-#define HOST_RTS (1 << 9) /* Force RTS use */
-#define HOST_SHORT (1 << 10) /* Do short preamble */
- u16 ctl;
- u16 aid;
- u16 retries;
- u16 fill;
-} TxCtlHdr;
-
-typedef struct {
- u16 ctl;
- u16 duration;
- char addr1[6];
- char addr2[6];
- char addr3[6];
- u16 seq;
- char addr4[6];
-} WifiHdr;
-
-
-typedef struct {
- TxCtlHdr ctlhdr;
- u16 fill1;
- u16 fill2;
- WifiHdr wifihdr;
- u16 gaplen;
- u16 status;
-} WifiCtlHdr;
-
-static WifiCtlHdr wifictlhdr8023 = {
- .ctlhdr = {
- .ctl = HOST_DONT_RLSE,
- }
-};
-
-// A few details needed for WEP (Wireless Equivalent Privacy)
-#define MAX_KEY_SIZE 13 // 128 (?) bits
-#define MIN_KEY_SIZE 5 // 40 bits RC4 - WEP
-typedef struct wep_key_t {
- u16 len;
- u8 key[16]; /* 40-bit and 104-bit keys */
-} wep_key_t;
-
-/* List of Wireless Handlers (new API) */
-static const struct iw_handler_def airo_handler_def;
-
-static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
-
-struct airo_info;
-
-static int get_dec_u16(char *buffer, int *start, int limit);
-static void OUT4500(struct airo_info *, u16 reg, u16 value);
-static unsigned short IN4500(struct airo_info *, u16 reg);
-static u16 setup_card(struct airo_info*, struct net_device *dev, int lock);
-static int enable_MAC(struct airo_info *ai, int lock);
-static void disable_MAC(struct airo_info *ai, int lock);
-static void enable_interrupts(struct airo_info*);
-static void disable_interrupts(struct airo_info*);
-static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp,
- bool may_sleep);
-static int bap_setup(struct airo_info*, u16 rid, u16 offset, int whichbap);
-static int aux_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen,
- int whichbap);
-static int fast_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen,
- int whichbap);
-static int bap_write(struct airo_info*, const __le16 *pu16Src, int bytelen,
- int whichbap);
-static int PC4500_accessrid(struct airo_info*, u16 rid, u16 accmd);
-static int PC4500_readrid(struct airo_info*, u16 rid, void *pBuf, int len, int lock);
-static int PC4500_writerid(struct airo_info*, u16 rid, const void
- *pBuf, int len, int lock);
-static int do_writerid(struct airo_info*, u16 rid, const void *rid_data,
- int len, int dummy);
-static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw);
-static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket,
- bool may_sleep);
-static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket,
- bool may_sleep);
-
-static int mpi_send_packet(struct net_device *dev);
-static void mpi_unmap_card(struct pci_dev *pci);
-static void mpi_receive_802_3(struct airo_info *ai);
-static void mpi_receive_802_11(struct airo_info *ai);
-static int waitbusy(struct airo_info *ai);
-
-static irqreturn_t airo_interrupt(int irq, void* dev_id);
-static int airo_thread(void *data);
-static void timer_func(struct net_device *dev);
-static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *, int cmd);
-static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev);
-#ifdef CISCO_EXT
-static int readrids(struct net_device *dev, aironet_ioctl *comp);
-static int writerids(struct net_device *dev, aironet_ioctl *comp);
-static int flashcard(struct net_device *dev, aironet_ioctl *comp);
-#endif /* CISCO_EXT */
-static void micinit(struct airo_info *ai);
-static int micsetup(struct airo_info *ai);
-static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len);
-static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, u16 payLen);
-
-static u8 airo_rssi_to_dbm(tdsRssiEntry *rssi_rid, u8 rssi);
-static u8 airo_dbm_to_pct(tdsRssiEntry *rssi_rid, u8 dbm);
-
-static void airo_networks_free(struct airo_info *ai);
-
-struct airo_info {
- struct net_device *dev;
- struct list_head dev_list;
- /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we
- use the high bit to mark whether it is in use. */
-#define MAX_FIDS 6
-#define MPI_MAX_FIDS 1
- u32 fids[MAX_FIDS];
- ConfigRid config;
- char keyindex; // Used with auto wep
- char defindex; // Used with auto wep
- struct proc_dir_entry *proc_entry;
- spinlock_t aux_lock;
-#define FLAG_RADIO_OFF 0 /* User disabling of MAC */
-#define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */
-#define FLAG_RADIO_MASK 0x03
-#define FLAG_ENABLED 2
-#define FLAG_ADHOC 3 /* Needed by MIC */
-#define FLAG_MIC_CAPABLE 4
-#define FLAG_UPDATE_MULTI 5
-#define FLAG_UPDATE_UNI 6
-#define FLAG_802_11 7
-#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
-#define FLAG_PENDING_XMIT 9
-#define FLAG_PENDING_XMIT11 10
-#define FLAG_MPI 11
-#define FLAG_REGISTERED 12
-#define FLAG_COMMIT 13
-#define FLAG_RESET 14
-#define FLAG_FLASHING 15
-#define FLAG_WPA_CAPABLE 16
- unsigned long flags;
-#define JOB_DIE 0
-#define JOB_XMIT 1
-#define JOB_XMIT11 2
-#define JOB_STATS 3
-#define JOB_PROMISC 4
-#define JOB_MIC 5
-#define JOB_EVENT 6
-#define JOB_AUTOWEP 7
-#define JOB_SCAN_RESULTS 9
- unsigned long jobs;
- int (*bap_read)(struct airo_info*, __le16 *pu16Dst, int bytelen,
- int whichbap);
- unsigned short *flash;
- tdsRssiEntry *rssi;
- struct task_struct *list_bss_task;
- struct task_struct *airo_thread_task;
- struct semaphore sem;
- wait_queue_head_t thr_wait;
- unsigned long expires;
- struct {
- struct sk_buff *skb;
- int fid;
- } xmit, xmit11;
- struct net_device *wifidev;
- struct iw_statistics wstats; // wireless stats
- unsigned long scan_timeout; /* Time scan should be read */
- struct iw_spy_data spy_data;
- struct iw_public_data wireless_data;
- /* MIC stuff */
- struct crypto_sync_skcipher *tfm;
- mic_module mod[2];
- mic_statistics micstats;
- HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
- HostTxDesc txfids[MPI_MAX_FIDS];
- HostRidDesc config_desc;
- unsigned long ridbus; // phys addr of config_desc
- struct sk_buff_head txq;// tx queue used by mpi350 code
- struct pci_dev *pci;
- unsigned char __iomem *pcimem;
- unsigned char __iomem *pciaux;
- unsigned char *shared;
- dma_addr_t shared_dma;
- pm_message_t power;
- SsidRid *SSID;
- APListRid APList;
-#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
- char proc_name[IFNAMSIZ];
-
- int wep_capable;
- int max_wep_idx;
- int last_auth;
-
- /* WPA-related stuff */
- unsigned int bssListFirst;
- unsigned int bssListNext;
- unsigned int bssListRidLen;
-
- struct list_head network_list;
- struct list_head network_free_list;
- BSSListElement *networks;
-};
-
-static inline int bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen,
- int whichbap)
-{
- return ai->bap_read(ai, pu16Dst, bytelen, whichbap);
-}
-
-static int setup_proc_entry(struct net_device *dev,
- struct airo_info *apriv);
-static int takedown_proc_entry(struct net_device *dev,
- struct airo_info *apriv);
-
-static int cmdreset(struct airo_info *ai);
-static int setflashmode(struct airo_info *ai);
-static int flashgchar(struct airo_info *ai, int matchbyte, int dwelltime);
-static int flashputbuf(struct airo_info *ai);
-static int flashrestart(struct airo_info *ai, struct net_device *dev);
-
-#define airo_print(type, name, fmt, args...) \
- printk(type DRV_NAME "(%s): " fmt "\n", name, ##args)
-
-#define airo_print_info(name, fmt, args...) \
- airo_print(KERN_INFO, name, fmt, ##args)
-
-#define airo_print_dbg(name, fmt, args...) \
- airo_print(KERN_DEBUG, name, fmt, ##args)
-
-#define airo_print_warn(name, fmt, args...) \
- airo_print(KERN_WARNING, name, fmt, ##args)
-
-#define airo_print_err(name, fmt, args...) \
- airo_print(KERN_ERR, name, fmt, ##args)
-
-#define AIRO_FLASH(dev) (((struct airo_info *)dev->ml_priv)->flash)
-
-/***********************************************************************
- * MIC ROUTINES *
- ***********************************************************************
- */
-
-static int RxSeqValid(struct airo_info *ai, miccntx *context, int mcast, u32 micSeq);
-static void MoveWindow(miccntx *context, u32 micSeq);
-static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
- struct crypto_sync_skcipher *tfm);
-static void emmh32_init(emmh32_context *context);
-static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
-static void emmh32_final(emmh32_context *context, u8 digest[4]);
-static int flashpchar(struct airo_info *ai, int byte, int dwelltime);
-
-static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len,
- struct crypto_sync_skcipher *tfm)
-{
- /* If the current MIC context is valid and its key is the same as
- * the MIC register, there's nothing to do.
- */
- if (cur->valid && (memcmp(cur->key, key, key_len) == 0))
- return;
-
- /* Age current mic Context */
- memcpy(old, cur, sizeof(*cur));
-
- /* Initialize new context */
- memcpy(cur->key, key, key_len);
- cur->window = 33; /* Window always points to the middle */
- cur->rx = 0; /* Rx Sequence numbers */
- cur->tx = 0; /* Tx sequence numbers */
- cur->valid = 1; /* Key is now valid */
-
- /* Give key to mic seed */
- emmh32_setseed(&cur->seed, key, key_len, tfm);
-}
-
-/* micinit - Initialize mic seed */
-
-static void micinit(struct airo_info *ai)
-{
- MICRid mic_rid;
-
- clear_bit(JOB_MIC, &ai->jobs);
- PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0);
- up(&ai->sem);
-
- ai->micstats.enabled = (le16_to_cpu(mic_rid.state) & 0x00FF) ? 1 : 0;
- if (!ai->micstats.enabled) {
- /* So next time we have a valid key and mic is enabled, we will
- * update the sequence number if the key is the same as before.
- */
- ai->mod[0].uCtx.valid = 0;
- ai->mod[0].mCtx.valid = 0;
- return;
- }
-
- if (mic_rid.multicastValid) {
- age_mic_context(&ai->mod[0].mCtx, &ai->mod[1].mCtx,
- mic_rid.multicast, sizeof(mic_rid.multicast),
- ai->tfm);
- }
-
- if (mic_rid.unicastValid) {
- age_mic_context(&ai->mod[0].uCtx, &ai->mod[1].uCtx,
- mic_rid.unicast, sizeof(mic_rid.unicast),
- ai->tfm);
- }
-}
-
-/* micsetup - Get ready for business */
-
-static int micsetup(struct airo_info *ai)
-{
- int i;
-
- if (ai->tfm == NULL)
- ai->tfm = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
-
- if (IS_ERR(ai->tfm)) {
- airo_print_err(ai->dev->name, "failed to load transform for AES");
- ai->tfm = NULL;
- return ERROR;
- }
-
- for (i = 0; i < NUM_MODULES; i++) {
- memset(&ai->mod[i].mCtx, 0, sizeof(miccntx));
- memset(&ai->mod[i].uCtx, 0, sizeof(miccntx));
- }
- return SUCCESS;
-}
-
-static const u8 micsnap[] = {0xAA, 0xAA, 0x03, 0x00, 0x40, 0x96, 0x00, 0x02};
-
-/*===========================================================================
- * Description: Mic a packet
- *
- * Inputs: etherHead * pointer to an 802.3 frame
- *
- * Returns: BOOLEAN if successful, otherwise false.
- * PacketTxLen will be updated with the mic'd packets size.
- *
- * Caveats: It is assumed that the frame buffer will already
- * be big enough to hold the largets mic message possible.
- * (No memory allocation is done here).
- *
- * Author: sbraneky (10/15/01)
- * Merciless hacks by rwilcher (1/14/02)
- */
-
-static int encapsulate(struct airo_info *ai, etherHead *frame, MICBuffer *mic, int payLen)
-{
- miccntx *context;
-
- // Determine correct context
- // If not adhoc, always use unicast key
-
- if (test_bit(FLAG_ADHOC, &ai->flags) && (frame->da[0] & 0x1))
- context = &ai->mod[0].mCtx;
- else
- context = &ai->mod[0].uCtx;
-
- if (!context->valid)
- return ERROR;
-
- mic->typelen = htons(payLen + 16); //Length of Mic'd packet
-
- memcpy(&mic->u.snap, micsnap, sizeof(micsnap)); // Add Snap
-
- // Add Tx sequence
- mic->seq = htonl(context->tx);
- context->tx += 2;
-
- emmh32_init(&context->seed); // Mic the packet
- emmh32_update(&context->seed, frame->da, ETH_ALEN * 2); // DA, SA
- emmh32_update(&context->seed, (u8*)&mic->typelen, 10); // Type/Length and Snap
- emmh32_update(&context->seed, (u8*)&mic->seq, sizeof(mic->seq)); //SEQ
- emmh32_update(&context->seed, (u8*)(frame + 1), payLen); //payload
- emmh32_final(&context->seed, (u8*)&mic->mic);
-
- /* New Type/length ?????????? */
- mic->typelen = 0; //Let NIC know it could be an oversized packet
- return SUCCESS;
-}
-
-typedef enum {
- NONE,
- NOMIC,
- NOMICPLUMMED,
- SEQUENCE,
- INCORRECTMIC,
-} mic_error;
-
-/*===========================================================================
- * Description: Decapsulates a MIC'd packet and returns the 802.3 packet
- * (removes the MIC stuff) if packet is a valid packet.
- *
- * Inputs: etherHead pointer to the 802.3 packet
- *
- * Returns: BOOLEAN - TRUE if packet should be dropped otherwise FALSE
- *
- * Author: sbraneky (10/15/01)
- * Merciless hacks by rwilcher (1/14/02)
- *---------------------------------------------------------------------------
- */
-
-static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16 payLen)
-{
- int i;
- u32 micSEQ;
- miccntx *context;
- u8 digest[4];
- mic_error micError = NONE;
-
- // Check if the packet is a Mic'd packet
-
- if (!ai->micstats.enabled) {
- //No Mic set or Mic OFF but we received a MIC'd packet.
- if (memcmp ((u8*)eth + 14, micsnap, sizeof(micsnap)) == 0) {
- ai->micstats.rxMICPlummed++;
- return ERROR;
- }
- return SUCCESS;
- }
-
- if (ntohs(mic->typelen) == 0x888E)
- return SUCCESS;
-
- if (memcmp (mic->u.snap, micsnap, sizeof(micsnap)) != 0) {
- // Mic enabled but packet isn't Mic'd
- ai->micstats.rxMICPlummed++;
- return ERROR;
- }
-
- micSEQ = ntohl(mic->seq); //store SEQ as CPU order
-
- //At this point we a have a mic'd packet and mic is enabled
- //Now do the mic error checking.
-
- //Receive seq must be odd
- if ((micSEQ & 1) == 0) {
- ai->micstats.rxWrongSequence++;
- return ERROR;
- }
-
- for (i = 0; i < NUM_MODULES; i++) {
- int mcast = eth->da[0] & 1;
- //Determine proper context
- context = mcast ? &ai->mod[i].mCtx : &ai->mod[i].uCtx;
-
- //Make sure context is valid
- if (!context->valid) {
- if (i == 0)
- micError = NOMICPLUMMED;
- continue;
- }
- //DeMic it
-
- if (!mic->typelen)
- mic->typelen = htons(payLen + sizeof(MICBuffer) - 2);
-
- emmh32_init(&context->seed);
- emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
- emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
- emmh32_update(&context->seed, (u8 *)&mic->seq, sizeof(mic->seq));
- emmh32_update(&context->seed, (u8 *)(eth + 1), payLen);
- //Calculate MIC
- emmh32_final(&context->seed, digest);
-
- if (memcmp(digest, &mic->mic, 4)) { //Make sure the mics match
- //Invalid Mic
- if (i == 0)
- micError = INCORRECTMIC;
- continue;
- }
-
- //Check Sequence number if mics pass
- if (RxSeqValid(ai, context, mcast, micSEQ) == SUCCESS) {
- ai->micstats.rxSuccess++;
- return SUCCESS;
- }
- if (i == 0)
- micError = SEQUENCE;
- }
-
- // Update statistics
- switch (micError) {
- case NOMICPLUMMED: ai->micstats.rxMICPlummed++; break;
- case SEQUENCE: ai->micstats.rxWrongSequence++; break;
- case INCORRECTMIC: ai->micstats.rxIncorrectMIC++; break;
- case NONE: break;
- case NOMIC: break;
- }
- return ERROR;
-}
-
-/*===========================================================================
- * Description: Checks the Rx Seq number to make sure it is valid
- * and hasn't already been received
- *
- * Inputs: miccntx - mic context to check seq against
- * micSeq - the Mic seq number
- *
- * Returns: TRUE if valid otherwise FALSE.
- *
- * Author: sbraneky (10/15/01)
- * Merciless hacks by rwilcher (1/14/02)
- *---------------------------------------------------------------------------
- */
-
-static int RxSeqValid(struct airo_info *ai, miccntx *context, int mcast, u32 micSeq)
-{
- u32 seq, index;
-
- //Allow for the ap being rebooted - if it is then use the next
- //sequence number of the current sequence number - might go backwards
-
- if (mcast) {
- if (test_bit(FLAG_UPDATE_MULTI, &ai->flags)) {
- clear_bit (FLAG_UPDATE_MULTI, &ai->flags);
- context->window = (micSeq > 33) ? micSeq : 33;
- context->rx = 0; // Reset rx
- }
- } else if (test_bit(FLAG_UPDATE_UNI, &ai->flags)) {
- clear_bit (FLAG_UPDATE_UNI, &ai->flags);
- context->window = (micSeq > 33) ? micSeq : 33; // Move window
- context->rx = 0; // Reset rx
- }
-
- //Make sequence number relative to START of window
- seq = micSeq - (context->window - 33);
-
- //Too old of a SEQ number to check.
- if ((s32)seq < 0)
- return ERROR;
-
- if (seq > 64) {
- //Window is infinite forward
- MoveWindow(context, micSeq);
- return SUCCESS;
- }
-
- // We are in the window. Now check the context rx bit to see if it was already sent
- seq >>= 1; //divide by 2 because we only have odd numbers
- index = 1 << seq; //Get an index number
-
- if (!(context->rx & index)) {
- //micSEQ falls inside the window.
- //Add seqence number to the list of received numbers.
- context->rx |= index;
-
- MoveWindow(context, micSeq);
-
- return SUCCESS;
- }
- return ERROR;
-}
-
-static void MoveWindow(miccntx *context, u32 micSeq)
-{
- u32 shift;
-
- //Move window if seq greater than the middle of the window
- if (micSeq > context->window) {
- shift = (micSeq - context->window) >> 1;
-
- //Shift out old
- if (shift < 32)
- context->rx >>= shift;
- else
- context->rx = 0;
-
- context->window = micSeq; //Move window
- }
-}
-
-/*==============================================*/
-/*========== EMMH ROUTINES ====================*/
-/*==============================================*/
-
-/* mic accumulate */
-#define MIC_ACCUM(val) \
- context->accum += (u64)(val) * be32_to_cpu(context->coeff[coeff_position++]);
-
-/* expand the key to fill the MMH coefficient array */
-static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
- struct crypto_sync_skcipher *tfm)
-{
- /* take the keying material, expand if necessary, truncate at 16-bytes */
- /* run through AES counter mode to generate context->coeff[] */
-
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg;
- u8 iv[AES_BLOCK_SIZE] = {};
- int ret;
-
- crypto_sync_skcipher_setkey(tfm, pkey, 16);
-
- memset(context->coeff, 0, sizeof(context->coeff));
- sg_init_one(&sg, context->coeff, sizeof(context->coeff));
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, sizeof(context->coeff), iv);
-
- ret = crypto_skcipher_encrypt(req);
- WARN_ON_ONCE(ret);
-}
-
-/* prepare for calculation of a new mic */
-static void emmh32_init(emmh32_context *context)
-{
- /* prepare for new mic calculation */
- context->accum = 0;
- context->position = 0;
-}
-
-/* add some bytes to the mic calculation */
-static void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
-{
- int coeff_position, byte_position;
-
- if (len == 0) return;
-
- coeff_position = context->position >> 2;
-
- /* deal with partial 32-bit word left over from last update */
- byte_position = context->position & 3;
- if (byte_position) {
- /* have a partial word in part to deal with */
- do {
- if (len == 0) return;
- context->part.d8[byte_position++] = *pOctets++;
- context->position++;
- len--;
- } while (byte_position < 4);
- MIC_ACCUM(ntohl(context->part.d32));
- }
-
- /* deal with full 32-bit words */
- while (len >= 4) {
- MIC_ACCUM(ntohl(*(__be32 *)pOctets));
- context->position += 4;
- pOctets += 4;
- len -= 4;
- }
-
- /* deal with partial 32-bit word that will be left over from this update */
- byte_position = 0;
- while (len > 0) {
- context->part.d8[byte_position++] = *pOctets++;
- context->position++;
- len--;
- }
-}
-
-/* mask used to zero empty bytes for final partial word */
-static u32 mask32[4] = { 0x00000000L, 0xFF000000L, 0xFFFF0000L, 0xFFFFFF00L };
-
-/* calculate the mic */
-static void emmh32_final(emmh32_context *context, u8 digest[4])
-{
- int coeff_position, byte_position;
- u32 val;
-
- u64 sum, utmp;
- s64 stmp;
-
- coeff_position = context->position >> 2;
-
- /* deal with partial 32-bit word left over from last update */
- byte_position = context->position & 3;
- if (byte_position) {
- /* have a partial word in part to deal with */
- val = ntohl(context->part.d32);
- MIC_ACCUM(val & mask32[byte_position]); /* zero empty bytes */
- }
-
- /* reduce the accumulated u64 to a 32-bit MIC */
- sum = context->accum;
- stmp = (sum & 0xffffffffLL) - ((sum >> 32) * 15);
- utmp = (stmp & 0xffffffffLL) - ((stmp >> 32) * 15);
- sum = utmp & 0xffffffffLL;
- if (utmp > 0x10000000fLL)
- sum -= 15;
-
- val = (u32)sum;
- digest[0] = (val>>24) & 0xFF;
- digest[1] = (val>>16) & 0xFF;
- digest[2] = (val>>8) & 0xFF;
- digest[3] = val & 0xFF;
-}
-
-static int readBSSListRid(struct airo_info *ai, int first,
- BSSListRid *list)
-{
- Cmd cmd;
- Resp rsp;
-
- if (first == 1) {
- if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = CMD_LISTBSS;
- if (down_interruptible(&ai->sem))
- return -ERESTARTSYS;
- ai->list_bss_task = current;
- issuecommand(ai, &cmd, &rsp, true);
- up(&ai->sem);
- /* Let the command take effect */
- schedule_timeout_uninterruptible(3 * HZ);
- ai->list_bss_task = NULL;
- }
- return PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext,
- list, ai->bssListRidLen, 1);
-}
-
-static int readWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int temp, int lock)
-{
- return PC4500_readrid(ai, temp ? RID_WEP_TEMP : RID_WEP_PERM,
- wkr, sizeof(*wkr), lock);
-}
-
-static int writeWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int perm, int lock)
-{
- int rc;
- rc = PC4500_writerid(ai, RID_WEP_TEMP, wkr, sizeof(*wkr), lock);
- if (rc!=SUCCESS)
- airo_print_err(ai->dev->name, "WEP_TEMP set %x", rc);
- if (perm) {
- rc = PC4500_writerid(ai, RID_WEP_PERM, wkr, sizeof(*wkr), lock);
- if (rc!=SUCCESS)
- airo_print_err(ai->dev->name, "WEP_PERM set %x", rc);
- }
- return rc;
-}
-
-static int readSsidRid(struct airo_info*ai, SsidRid *ssidr)
-{
- return PC4500_readrid(ai, RID_SSID, ssidr, sizeof(*ssidr), 1);
-}
-
-static int writeSsidRid(struct airo_info*ai, SsidRid *pssidr, int lock)
-{
- return PC4500_writerid(ai, RID_SSID, pssidr, sizeof(*pssidr), lock);
-}
-
-static int readConfigRid(struct airo_info *ai, int lock)
-{
- int rc;
- ConfigRid cfg;
-
- if (ai->config.len)
- return SUCCESS;
-
- rc = PC4500_readrid(ai, RID_ACTUALCONFIG, &cfg, sizeof(cfg), lock);
- if (rc != SUCCESS)
- return rc;
-
- ai->config = cfg;
- return SUCCESS;
-}
-
-static inline void checkThrottle(struct airo_info *ai)
-{
- int i;
-/* Old hardware had a limit on encryption speed */
- if (ai->config.authType != AUTH_OPEN && maxencrypt) {
- for (i = 0; i<8; i++) {
- if (ai->config.rates[i] > maxencrypt) {
- ai->config.rates[i] = 0;
- }
- }
- }
-}
-
-static int writeConfigRid(struct airo_info *ai, int lock)
-{
- ConfigRid cfgr;
-
- if (!test_bit (FLAG_COMMIT, &ai->flags))
- return SUCCESS;
-
- clear_bit (FLAG_COMMIT, &ai->flags);
- clear_bit (FLAG_RESET, &ai->flags);
- checkThrottle(ai);
- cfgr = ai->config;
-
- if ((cfgr.opmode & MODE_CFG_MASK) == MODE_STA_IBSS)
- set_bit(FLAG_ADHOC, &ai->flags);
- else
- clear_bit(FLAG_ADHOC, &ai->flags);
-
- return PC4500_writerid(ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock);
-}
-
-static int readStatusRid(struct airo_info *ai, StatusRid *statr, int lock)
-{
- return PC4500_readrid(ai, RID_STATUS, statr, sizeof(*statr), lock);
-}
-
-static int writeAPListRid(struct airo_info *ai, APListRid *aplr, int lock)
-{
- return PC4500_writerid(ai, RID_APLIST, aplr, sizeof(*aplr), lock);
-}
-
-static int readCapabilityRid(struct airo_info *ai, CapabilityRid *capr, int lock)
-{
- return PC4500_readrid(ai, RID_CAPABILITIES, capr, sizeof(*capr), lock);
-}
-
-static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid, int lock)
-{
- return PC4500_readrid(ai, rid, sr, sizeof(*sr), lock);
-}
-
-static void try_auto_wep(struct airo_info *ai)
-{
- if (auto_wep && !test_bit(FLAG_RADIO_DOWN, &ai->flags)) {
- ai->expires = RUN_AT(3*HZ);
- wake_up_interruptible(&ai->thr_wait);
- }
-}
-
-static int airo_open(struct net_device *dev)
-{
- struct airo_info *ai = dev->ml_priv;
- int rc = 0;
-
- if (test_bit(FLAG_FLASHING, &ai->flags))
- return -EIO;
-
- /* Make sure the card is configured.
- * Wireless Extensions may postpone config changes until the card
- * is open (to pipeline changes and speed-up card setup). If
- * those changes are not yet committed, do it now - Jean II */
- if (test_bit(FLAG_COMMIT, &ai->flags)) {
- disable_MAC(ai, 1);
- writeConfigRid(ai, 1);
- }
-
- if (ai->wifidev != dev) {
- clear_bit(JOB_DIE, &ai->jobs);
- ai->airo_thread_task = kthread_run(airo_thread, dev, "%s",
- dev->name);
- if (IS_ERR(ai->airo_thread_task))
- return (int)PTR_ERR(ai->airo_thread_task);
-
- rc = request_irq(dev->irq, airo_interrupt, IRQF_SHARED,
- dev->name, dev);
- if (rc) {
- airo_print_err(dev->name,
- "register interrupt %d failed, rc %d",
- dev->irq, rc);
- set_bit(JOB_DIE, &ai->jobs);
- kthread_stop(ai->airo_thread_task);
- return rc;
- }
-
- /* Power on the MAC controller (which may have been disabled) */
- clear_bit(FLAG_RADIO_DOWN, &ai->flags);
- enable_interrupts(ai);
-
- try_auto_wep(ai);
- }
- enable_MAC(ai, 1);
-
- netif_start_queue(dev);
- return 0;
-}
-
-static netdev_tx_t mpi_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- int npacks, pending;
- unsigned long flags;
- struct airo_info *ai = dev->ml_priv;
-
- if (!skb) {
- airo_print_err(dev->name, "%s: skb == NULL!",__func__);
- return NETDEV_TX_OK;
- }
- if (skb_padto(skb, ETH_ZLEN)) {
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
- npacks = skb_queue_len (&ai->txq);
-
- if (npacks >= MAXTXQ - 1) {
- netif_stop_queue (dev);
- if (npacks > MAXTXQ) {
- dev->stats.tx_fifo_errors++;
- return NETDEV_TX_BUSY;
- }
- skb_queue_tail (&ai->txq, skb);
- return NETDEV_TX_OK;
- }
-
- spin_lock_irqsave(&ai->aux_lock, flags);
- skb_queue_tail (&ai->txq, skb);
- pending = test_bit(FLAG_PENDING_XMIT, &ai->flags);
- spin_unlock_irqrestore(&ai->aux_lock, flags);
- netif_wake_queue (dev);
-
- if (pending == 0) {
- set_bit(FLAG_PENDING_XMIT, &ai->flags);
- mpi_send_packet (dev);
- }
- return NETDEV_TX_OK;
-}
-
-/*
- * @mpi_send_packet
- *
- * Attempt to transmit a packet. Can be called from interrupt
- * or transmit . return number of packets we tried to send
- */
-
-static int mpi_send_packet (struct net_device *dev)
-{
- struct sk_buff *skb;
- unsigned char *buffer;
- s16 len;
- __le16 *payloadLen;
- struct airo_info *ai = dev->ml_priv;
- u8 *sendbuf;
-
- /* get a packet to send */
-
- if ((skb = skb_dequeue(&ai->txq)) == NULL) {
- airo_print_err(dev->name,
- "%s: Dequeue'd zero in send_packet()",
- __func__);
- return 0;
- }
-
- /* check min length*/
- len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- buffer = skb->data;
-
- ai->txfids[0].tx_desc.offset = 0;
- ai->txfids[0].tx_desc.valid = 1;
- ai->txfids[0].tx_desc.eoc = 1;
- ai->txfids[0].tx_desc.len =len+sizeof(WifiHdr);
-
-/*
- * Magic, the cards firmware needs a length count (2 bytes) in the host buffer
- * right after TXFID_HDR.The TXFID_HDR contains the status short so payloadlen
- * is immediately after it. ------------------------------------------------
- * |TXFIDHDR+STATUS|PAYLOADLEN|802.3HDR|PACKETDATA|
- * ------------------------------------------------
- */
-
- memcpy(ai->txfids[0].virtual_host_addr,
- (char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
-
- payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr +
- sizeof(wifictlhdr8023));
- sendbuf = ai->txfids[0].virtual_host_addr +
- sizeof(wifictlhdr8023) + 2 ;
-
- /*
- * Firmware automatically puts 802 header on so
- * we don't need to account for it in the length
- */
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
- (ntohs(((__be16 *)buffer)[6]) != 0x888E)) {
- MICBuffer pMic;
-
- if (encapsulate(ai, (etherHead *)buffer, &pMic, len - sizeof(etherHead)) != SUCCESS)
- return ERROR;
-
- *payloadLen = cpu_to_le16(len-sizeof(etherHead)+sizeof(pMic));
- ai->txfids[0].tx_desc.len += sizeof(pMic);
- /* copy data into airo dma buffer */
- memcpy (sendbuf, buffer, sizeof(etherHead));
- buffer += sizeof(etherHead);
- sendbuf += sizeof(etherHead);
- memcpy (sendbuf, &pMic, sizeof(pMic));
- sendbuf += sizeof(pMic);
- memcpy (sendbuf, buffer, len - sizeof(etherHead));
- } else {
- *payloadLen = cpu_to_le16(len - sizeof(etherHead));
-
- netif_trans_update(dev);
-
- /* copy data into airo dma buffer */
- memcpy(sendbuf, buffer, len);
- }
-
- memcpy_toio(ai->txfids[0].card_ram_off,
- &ai->txfids[0].tx_desc, sizeof(TxFid));
-
- OUT4500(ai, EVACK, 8);
-
- dev_kfree_skb_any(skb);
- return 1;
-}
-
-static void get_tx_error(struct airo_info *ai, s32 fid)
-{
- __le16 status;
-
- if (fid < 0)
- status = ((WifiCtlHdr *)ai->txfids[0].virtual_host_addr)->ctlhdr.status;
- else {
- if (bap_setup(ai, ai->fids[fid] & 0xffff, 4, BAP0) != SUCCESS)
- return;
- bap_read(ai, &status, 2, BAP0);
- }
- if (le16_to_cpu(status) & 2) /* Too many retries */
- ai->dev->stats.tx_aborted_errors++;
- if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */
- ai->dev->stats.tx_heartbeat_errors++;
- if (le16_to_cpu(status) & 8) /* Aid fail */
- { }
- if (le16_to_cpu(status) & 0x10) /* MAC disabled */
- ai->dev->stats.tx_carrier_errors++;
- if (le16_to_cpu(status) & 0x20) /* Association lost */
- { }
- /* We produce a TXDROP event only for retry or lifetime
- * exceeded, because that's the only status that really mean
- * that this particular node went away.
- * Other errors means that *we* screwed up. - Jean II */
- if ((le16_to_cpu(status) & 2) ||
- (le16_to_cpu(status) & 4)) {
- union iwreq_data wrqu;
- char junk[0x18];
-
- /* Faster to skip over useless data than to do
- * another bap_setup(). We are at offset 0x6 and
- * need to go to 0x18 and read 6 bytes - Jean II */
- bap_read(ai, (__le16 *) junk, 0x18, BAP0);
-
- /* Copy 802.11 dest address.
- * We use the 802.11 header because the frame may
- * not be 802.3 or may be mangled...
- * In Ad-Hoc mode, it will be the node address.
- * In managed mode, it will be most likely the AP addr
- * User space will figure out how to convert it to
- * whatever it needs (IP address or else).
- * - Jean II */
- memcpy(wrqu.addr.sa_data, junk + 0x12, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- /* Send event to user space */
- wireless_send_event(ai->dev, IWEVTXDROP, &wrqu, NULL);
- }
-}
-
-static void airo_end_xmit(struct net_device *dev, bool may_sleep)
-{
- u16 status;
- int i;
- struct airo_info *priv = dev->ml_priv;
- struct sk_buff *skb = priv->xmit.skb;
- int fid = priv->xmit.fid;
- u32 *fids = priv->fids;
-
- clear_bit(JOB_XMIT, &priv->jobs);
- clear_bit(FLAG_PENDING_XMIT, &priv->flags);
- status = transmit_802_3_packet(priv, fids[fid], skb->data, may_sleep);
- up(&priv->sem);
-
- i = 0;
- if (status == SUCCESS) {
- netif_trans_update(dev);
- for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
- } else {
- priv->fids[fid] &= 0xffff;
- dev->stats.tx_window_errors++;
- }
- if (i < MAX_FIDS / 2)
- netif_wake_queue(dev);
- dev_kfree_skb(skb);
-}
-
-static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- s16 len;
- int i, j;
- struct airo_info *priv = dev->ml_priv;
- u32 *fids = priv->fids;
-
- if (skb == NULL) {
- airo_print_err(dev->name, "%s: skb == NULL!", __func__);
- return NETDEV_TX_OK;
- }
- if (skb_padto(skb, ETH_ZLEN)) {
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
-
- /* Find a vacant FID */
- for (i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++);
- for (j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++);
-
- if (j >= MAX_FIDS / 2) {
- netif_stop_queue(dev);
-
- if (i == MAX_FIDS / 2) {
- dev->stats.tx_fifo_errors++;
- return NETDEV_TX_BUSY;
- }
- }
- /* check min length*/
- len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- /* Mark fid as used & save length for later */
- fids[i] |= (len << 16);
- priv->xmit.skb = skb;
- priv->xmit.fid = i;
- if (down_trylock(&priv->sem) != 0) {
- set_bit(FLAG_PENDING_XMIT, &priv->flags);
- netif_stop_queue(dev);
- set_bit(JOB_XMIT, &priv->jobs);
- wake_up_interruptible(&priv->thr_wait);
- } else
- airo_end_xmit(dev, false);
- return NETDEV_TX_OK;
-}
-
-static void airo_end_xmit11(struct net_device *dev, bool may_sleep)
-{
- u16 status;
- int i;
- struct airo_info *priv = dev->ml_priv;
- struct sk_buff *skb = priv->xmit11.skb;
- int fid = priv->xmit11.fid;
- u32 *fids = priv->fids;
-
- clear_bit(JOB_XMIT11, &priv->jobs);
- clear_bit(FLAG_PENDING_XMIT11, &priv->flags);
- status = transmit_802_11_packet(priv, fids[fid], skb->data, may_sleep);
- up(&priv->sem);
-
- i = MAX_FIDS / 2;
- if (status == SUCCESS) {
- netif_trans_update(dev);
- for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
- } else {
- priv->fids[fid] &= 0xffff;
- dev->stats.tx_window_errors++;
- }
- if (i < MAX_FIDS)
- netif_wake_queue(dev);
- dev_kfree_skb(skb);
-}
-
-static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
- struct net_device *dev)
-{
- s16 len;
- int i, j;
- struct airo_info *priv = dev->ml_priv;
- u32 *fids = priv->fids;
-
- if (test_bit(FLAG_MPI, &priv->flags)) {
- /* Not implemented yet for MPI350 */
- netif_stop_queue(dev);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb == NULL) {
- airo_print_err(dev->name, "%s: skb == NULL!", __func__);
- return NETDEV_TX_OK;
- }
- if (skb_padto(skb, ETH_ZLEN)) {
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
-
- /* Find a vacant FID */
- for (i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++);
- for (j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++);
-
- if (j >= MAX_FIDS) {
- netif_stop_queue(dev);
-
- if (i == MAX_FIDS) {
- dev->stats.tx_fifo_errors++;
- return NETDEV_TX_BUSY;
- }
- }
- /* check min length*/
- len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- /* Mark fid as used & save length for later */
- fids[i] |= (len << 16);
- priv->xmit11.skb = skb;
- priv->xmit11.fid = i;
- if (down_trylock(&priv->sem) != 0) {
- set_bit(FLAG_PENDING_XMIT11, &priv->flags);
- netif_stop_queue(dev);
- set_bit(JOB_XMIT11, &priv->jobs);
- wake_up_interruptible(&priv->thr_wait);
- } else
- airo_end_xmit11(dev, false);
- return NETDEV_TX_OK;
-}
-
-static void airo_read_stats(struct net_device *dev)
-{
- struct airo_info *ai = dev->ml_priv;
- StatsRid stats_rid;
- __le32 *vals = stats_rid.vals;
-
- clear_bit(JOB_STATS, &ai->jobs);
- if (ai->power.event) {
- up(&ai->sem);
- return;
- }
- readStatsRid(ai, &stats_rid, RID_STATS, 0);
- up(&ai->sem);
-
- dev->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) +
- le32_to_cpu(vals[45]);
- dev->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) +
- le32_to_cpu(vals[41]);
- dev->stats.rx_bytes = le32_to_cpu(vals[92]);
- dev->stats.tx_bytes = le32_to_cpu(vals[91]);
- dev->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) +
- le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]);
- dev->stats.tx_errors = le32_to_cpu(vals[42]) +
- dev->stats.tx_fifo_errors;
- dev->stats.multicast = le32_to_cpu(vals[43]);
- dev->stats.collisions = le32_to_cpu(vals[89]);
-
- /* detailed rx_errors: */
- dev->stats.rx_length_errors = le32_to_cpu(vals[3]);
- dev->stats.rx_crc_errors = le32_to_cpu(vals[4]);
- dev->stats.rx_frame_errors = le32_to_cpu(vals[2]);
- dev->stats.rx_fifo_errors = le32_to_cpu(vals[0]);
-}
-
-static struct net_device_stats *airo_get_stats(struct net_device *dev)
-{
- struct airo_info *local = dev->ml_priv;
-
- if (!test_bit(JOB_STATS, &local->jobs)) {
- set_bit(JOB_STATS, &local->jobs);
- wake_up_interruptible(&local->thr_wait);
- }
-
- return &dev->stats;
-}
-
-static void airo_set_promisc(struct airo_info *ai, bool may_sleep)
-{
- Cmd cmd;
- Resp rsp;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = CMD_SETMODE;
- clear_bit(JOB_PROMISC, &ai->jobs);
- cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
- issuecommand(ai, &cmd, &rsp, may_sleep);
- up(&ai->sem);
-}
-
-static void airo_set_multicast_list(struct net_device *dev)
-{
- struct airo_info *ai = dev->ml_priv;
-
- if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
- change_bit(FLAG_PROMISC, &ai->flags);
- if (down_trylock(&ai->sem) != 0) {
- set_bit(JOB_PROMISC, &ai->jobs);
- wake_up_interruptible(&ai->thr_wait);
- } else
- airo_set_promisc(ai, false);
- }
-
- if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
- /* Turn on multicast. (Should be already setup...) */
- }
-}
-
-static int airo_set_mac_address(struct net_device *dev, void *p)
-{
- struct airo_info *ai = dev->ml_priv;
- struct sockaddr *addr = p;
-
- readConfigRid(ai, 1);
- memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len);
- set_bit (FLAG_COMMIT, &ai->flags);
- disable_MAC(ai, 1);
- writeConfigRid (ai, 1);
- enable_MAC(ai, 1);
- dev_addr_set(ai->dev, addr->sa_data);
- if (ai->wifidev)
- dev_addr_set(ai->wifidev, addr->sa_data);
- return 0;
-}
-
-static LIST_HEAD(airo_devices);
-
-static void add_airo_dev(struct airo_info *ai)
-{
- /* Upper layers already keep track of PCI devices,
- * so we only need to remember our non-PCI cards. */
- if (!ai->pci)
- list_add_tail(&ai->dev_list, &airo_devices);
-}
-
-static void del_airo_dev(struct airo_info *ai)
-{
- if (!ai->pci)
- list_del(&ai->dev_list);
-}
-
-static int airo_close(struct net_device *dev)
-{
- struct airo_info *ai = dev->ml_priv;
-
- netif_stop_queue(dev);
-
- if (ai->wifidev != dev) {
-#ifdef POWER_ON_DOWN
- /* Shut power to the card. The idea is that the user can save
- * power when he doesn't need the card with "ifconfig down".
- * That's the method that is most friendly towards the network
- * stack (i.e. the network stack won't try to broadcast
- * anything on the interface and routes are gone. Jean II */
- set_bit(FLAG_RADIO_DOWN, &ai->flags);
- disable_MAC(ai, 1);
-#endif
- disable_interrupts(ai);
-
- free_irq(dev->irq, dev);
-
- set_bit(JOB_DIE, &ai->jobs);
- kthread_stop(ai->airo_thread_task);
- }
- return 0;
-}
-
-void stop_airo_card(struct net_device *dev, int freeres)
-{
- struct airo_info *ai = dev->ml_priv;
-
- set_bit(FLAG_RADIO_DOWN, &ai->flags);
- disable_MAC(ai, 1);
- disable_interrupts(ai);
- takedown_proc_entry(dev, ai);
- if (test_bit(FLAG_REGISTERED, &ai->flags)) {
- unregister_netdev(dev);
- if (ai->wifidev) {
- unregister_netdev(ai->wifidev);
- free_netdev(ai->wifidev);
- ai->wifidev = NULL;
- }
- clear_bit(FLAG_REGISTERED, &ai->flags);
- }
- /*
- * Clean out tx queue
- */
- if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) {
- struct sk_buff *skb = NULL;
- for (;(skb = skb_dequeue(&ai->txq));)
- dev_kfree_skb(skb);
- }
-
- airo_networks_free (ai);
-
- kfree(ai->flash);
- kfree(ai->rssi);
- kfree(ai->SSID);
- if (freeres) {
- /* PCMCIA frees this stuff, so only for PCI and ISA */
- release_region(dev->base_addr, 64);
- if (test_bit(FLAG_MPI, &ai->flags)) {
- if (ai->pci)
- mpi_unmap_card(ai->pci);
- if (ai->pcimem)
- iounmap(ai->pcimem);
- if (ai->pciaux)
- iounmap(ai->pciaux);
- dma_free_coherent(&ai->pci->dev, PCI_SHARED_LEN,
- ai->shared, ai->shared_dma);
- }
- }
- crypto_free_sync_skcipher(ai->tfm);
- del_airo_dev(ai);
- free_netdev(dev);
-}
-
-EXPORT_SYMBOL(stop_airo_card);
-
-static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr)
-{
- memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN);
- return ETH_ALEN;
-}
-
-static void mpi_unmap_card(struct pci_dev *pci)
-{
- unsigned long mem_start = pci_resource_start(pci, 1);
- unsigned long mem_len = pci_resource_len(pci, 1);
- unsigned long aux_start = pci_resource_start(pci, 2);
- unsigned long aux_len = AUXMEMSIZE;
-
- release_mem_region(aux_start, aux_len);
- release_mem_region(mem_start, mem_len);
-}
-
-/*************************************************************
- * This routine assumes that descriptors have been setup .
- * Run at insmod time or after reset when the descriptors
- * have been initialized . Returns 0 if all is well nz
- * otherwise . Does not allocate memory but sets up card
- * using previously allocated descriptors.
- */
-static int mpi_init_descriptors (struct airo_info *ai)
-{
- Cmd cmd;
- Resp rsp;
- int i;
- int rc = SUCCESS;
-
- /* Alloc card RX descriptors */
- netif_stop_queue(ai->dev);
-
- memset(&rsp, 0, sizeof(rsp));
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.cmd = CMD_ALLOCATEAUX;
- cmd.parm0 = FID_RX;
- cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux);
- cmd.parm2 = MPI_MAX_FIDS;
- rc = issuecommand(ai, &cmd, &rsp, true);
- if (rc != SUCCESS) {
- airo_print_err(ai->dev->name, "Couldn't allocate RX FID");
- return rc;
- }
-
- for (i = 0; i<MPI_MAX_FIDS; i++) {
- memcpy_toio(ai->rxfids[i].card_ram_off,
- &ai->rxfids[i].rx_desc, sizeof(RxFid));
- }
-
- /* Alloc card TX descriptors */
-
- memset(&rsp, 0, sizeof(rsp));
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.cmd = CMD_ALLOCATEAUX;
- cmd.parm0 = FID_TX;
- cmd.parm1 = (ai->txfids[0].card_ram_off - ai->pciaux);
- cmd.parm2 = MPI_MAX_FIDS;
-
- for (i = 0; i<MPI_MAX_FIDS; i++) {
- ai->txfids[i].tx_desc.valid = 1;
- memcpy_toio(ai->txfids[i].card_ram_off,
- &ai->txfids[i].tx_desc, sizeof(TxFid));
- }
- ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
-
- rc = issuecommand(ai, &cmd, &rsp, true);
- if (rc != SUCCESS) {
- airo_print_err(ai->dev->name, "Couldn't allocate TX FID");
- return rc;
- }
-
- /* Alloc card Rid descriptor */
- memset(&rsp, 0, sizeof(rsp));
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.cmd = CMD_ALLOCATEAUX;
- cmd.parm0 = RID_RW;
- cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux);
- cmd.parm2 = 1; /* Magic number... */
- rc = issuecommand(ai, &cmd, &rsp, true);
- if (rc != SUCCESS) {
- airo_print_err(ai->dev->name, "Couldn't allocate RID");
- return rc;
- }
-
- memcpy_toio(ai->config_desc.card_ram_off,
- &ai->config_desc.rid_desc, sizeof(Rid));
-
- return rc;
-}
-
-/*
- * We are setting up three things here:
- * 1) Map AUX memory for descriptors: Rid, TxFid, or RxFid.
- * 2) Map PCI memory for issuing commands.
- * 3) Allocate memory (shared) to send and receive ethernet frames.
- */
-static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
-{
- unsigned long mem_start, mem_len, aux_start, aux_len;
- int rc = -1;
- int i;
- dma_addr_t busaddroff;
- unsigned char *vpackoff;
- unsigned char __iomem *pciaddroff;
-
- mem_start = pci_resource_start(pci, 1);
- mem_len = pci_resource_len(pci, 1);
- aux_start = pci_resource_start(pci, 2);
- aux_len = AUXMEMSIZE;
-
- if (!request_mem_region(mem_start, mem_len, DRV_NAME)) {
- airo_print_err("", "Couldn't get region %x[%x]",
- (int)mem_start, (int)mem_len);
- goto out;
- }
- if (!request_mem_region(aux_start, aux_len, DRV_NAME)) {
- airo_print_err("", "Couldn't get region %x[%x]",
- (int)aux_start, (int)aux_len);
- goto free_region1;
- }
-
- ai->pcimem = ioremap(mem_start, mem_len);
- if (!ai->pcimem) {
- airo_print_err("", "Couldn't map region %x[%x]",
- (int)mem_start, (int)mem_len);
- goto free_region2;
- }
- ai->pciaux = ioremap(aux_start, aux_len);
- if (!ai->pciaux) {
- airo_print_err("", "Couldn't map region %x[%x]",
- (int)aux_start, (int)aux_len);
- goto free_memmap;
- }
-
- /* Reserve PKTSIZE for each fid and 2K for the Rids */
- ai->shared = dma_alloc_coherent(&pci->dev, PCI_SHARED_LEN,
- &ai->shared_dma, GFP_KERNEL);
- if (!ai->shared) {
- airo_print_err("", "Couldn't alloc_coherent %d",
- PCI_SHARED_LEN);
- goto free_auxmap;
- }
-
- /*
- * Setup descriptor RX, TX, CONFIG
- */
- busaddroff = ai->shared_dma;
- pciaddroff = ai->pciaux + AUX_OFFSET;
- vpackoff = ai->shared;
-
- /* RX descriptor setup */
- for (i = 0; i < MPI_MAX_FIDS; i++) {
- ai->rxfids[i].pending = 0;
- ai->rxfids[i].card_ram_off = pciaddroff;
- ai->rxfids[i].virtual_host_addr = vpackoff;
- ai->rxfids[i].rx_desc.host_addr = busaddroff;
- ai->rxfids[i].rx_desc.valid = 1;
- ai->rxfids[i].rx_desc.len = PKTSIZE;
- ai->rxfids[i].rx_desc.rdy = 0;
-
- pciaddroff += sizeof(RxFid);
- busaddroff += PKTSIZE;
- vpackoff += PKTSIZE;
- }
-
- /* TX descriptor setup */
- for (i = 0; i < MPI_MAX_FIDS; i++) {
- ai->txfids[i].card_ram_off = pciaddroff;
- ai->txfids[i].virtual_host_addr = vpackoff;
- ai->txfids[i].tx_desc.valid = 1;
- ai->txfids[i].tx_desc.host_addr = busaddroff;
- memcpy(ai->txfids[i].virtual_host_addr,
- &wifictlhdr8023, sizeof(wifictlhdr8023));
-
- pciaddroff += sizeof(TxFid);
- busaddroff += PKTSIZE;
- vpackoff += PKTSIZE;
- }
- ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
-
- /* Rid descriptor setup */
- ai->config_desc.card_ram_off = pciaddroff;
- ai->config_desc.virtual_host_addr = vpackoff;
- ai->config_desc.rid_desc.host_addr = busaddroff;
- ai->ridbus = busaddroff;
- ai->config_desc.rid_desc.rid = 0;
- ai->config_desc.rid_desc.len = RIDSIZE;
- ai->config_desc.rid_desc.valid = 1;
- pciaddroff += sizeof(Rid);
- busaddroff += RIDSIZE;
- vpackoff += RIDSIZE;
-
- /* Tell card about descriptors */
- if (mpi_init_descriptors (ai) != SUCCESS)
- goto free_shared;
-
- return 0;
- free_shared:
- dma_free_coherent(&pci->dev, PCI_SHARED_LEN, ai->shared,
- ai->shared_dma);
- free_auxmap:
- iounmap(ai->pciaux);
- free_memmap:
- iounmap(ai->pcimem);
- free_region2:
- release_mem_region(aux_start, aux_len);
- free_region1:
- release_mem_region(mem_start, mem_len);
- out:
- return rc;
-}
-
-static const struct header_ops airo_header_ops = {
- .parse = wll_header_parse,
-};
-
-static const struct net_device_ops airo11_netdev_ops = {
- .ndo_open = airo_open,
- .ndo_stop = airo_close,
- .ndo_start_xmit = airo_start_xmit11,
- .ndo_get_stats = airo_get_stats,
- .ndo_set_mac_address = airo_set_mac_address,
- .ndo_siocdevprivate = airo_siocdevprivate,
-};
-
-static void wifi_setup(struct net_device *dev)
-{
- dev->netdev_ops = &airo11_netdev_ops;
- dev->header_ops = &airo_header_ops;
- dev->wireless_handlers = &airo_handler_def;
-
- dev->type = ARPHRD_IEEE80211;
- dev->hard_header_len = ETH_HLEN;
- dev->mtu = AIRO_DEF_MTU;
- dev->min_mtu = 68;
- dev->max_mtu = MIC_MSGLEN_MAX;
- dev->addr_len = ETH_ALEN;
- dev->tx_queue_len = 100;
-
- eth_broadcast_addr(dev->broadcast);
-
- dev->flags = IFF_BROADCAST|IFF_MULTICAST;
-}
-
-static struct net_device *init_wifidev(struct airo_info *ai,
- struct net_device *ethdev)
-{
- int err;
- struct net_device *dev = alloc_netdev(0, "wifi%d", NET_NAME_UNKNOWN,
- wifi_setup);
- if (!dev)
- return NULL;
- dev->ml_priv = ethdev->ml_priv;
- dev->irq = ethdev->irq;
- dev->base_addr = ethdev->base_addr;
- dev->wireless_data = ethdev->wireless_data;
- SET_NETDEV_DEV(dev, ethdev->dev.parent);
- eth_hw_addr_inherit(dev, ethdev);
- err = register_netdev(dev);
- if (err<0) {
- free_netdev(dev);
- return NULL;
- }
- return dev;
-}
-
-static int reset_card(struct net_device *dev, int lock)
-{
- struct airo_info *ai = dev->ml_priv;
-
- if (lock && down_interruptible(&ai->sem))
- return -1;
- waitbusy (ai);
- OUT4500(ai, COMMAND, CMD_SOFTRESET);
- msleep(200);
- waitbusy (ai);
- msleep(200);
- if (lock)
- up(&ai->sem);
- return 0;
-}
-
-#define AIRO_MAX_NETWORK_COUNT 64
-static int airo_networks_allocate(struct airo_info *ai)
-{
- if (ai->networks)
- return 0;
-
- ai->networks = kcalloc(AIRO_MAX_NETWORK_COUNT, sizeof(BSSListElement),
- GFP_KERNEL);
- if (!ai->networks) {
- airo_print_warn("", "Out of memory allocating beacons");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void airo_networks_free(struct airo_info *ai)
-{
- kfree(ai->networks);
- ai->networks = NULL;
-}
-
-static void airo_networks_initialize(struct airo_info *ai)
-{
- int i;
-
- INIT_LIST_HEAD(&ai->network_free_list);
- INIT_LIST_HEAD(&ai->network_list);
- for (i = 0; i < AIRO_MAX_NETWORK_COUNT; i++)
- list_add_tail(&ai->networks[i].list,
- &ai->network_free_list);
-}
-
-static const struct net_device_ops airo_netdev_ops = {
- .ndo_open = airo_open,
- .ndo_stop = airo_close,
- .ndo_start_xmit = airo_start_xmit,
- .ndo_get_stats = airo_get_stats,
- .ndo_set_rx_mode = airo_set_multicast_list,
- .ndo_set_mac_address = airo_set_mac_address,
- .ndo_siocdevprivate = airo_siocdevprivate,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static const struct net_device_ops mpi_netdev_ops = {
- .ndo_open = airo_open,
- .ndo_stop = airo_close,
- .ndo_start_xmit = mpi_start_xmit,
- .ndo_get_stats = airo_get_stats,
- .ndo_set_rx_mode = airo_set_multicast_list,
- .ndo_set_mac_address = airo_set_mac_address,
- .ndo_siocdevprivate = airo_siocdevprivate,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static struct net_device *_init_airo_card(unsigned short irq, int port,
- int is_pcmcia, struct pci_dev *pci,
- struct device *dmdev)
-{
- struct net_device *dev;
- struct airo_info *ai;
- int i, rc;
- CapabilityRid cap_rid;
-
- /* Create the network device object. */
- dev = alloc_netdev(sizeof(*ai), "", NET_NAME_UNKNOWN, ether_setup);
- if (!dev) {
- airo_print_err("", "Couldn't alloc_etherdev");
- return NULL;
- }
-
- ai = dev->ml_priv = netdev_priv(dev);
- ai->wifidev = NULL;
- ai->flags = 1 << FLAG_RADIO_DOWN;
- ai->jobs = 0;
- ai->dev = dev;
- if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
- airo_print_dbg("", "Found an MPI350 card");
- set_bit(FLAG_MPI, &ai->flags);
- }
- spin_lock_init(&ai->aux_lock);
- sema_init(&ai->sem, 1);
- ai->config.len = 0;
- ai->pci = pci;
- init_waitqueue_head (&ai->thr_wait);
- ai->tfm = NULL;
- add_airo_dev(ai);
- ai->APList.len = cpu_to_le16(sizeof(struct APListRid));
-
- if (airo_networks_allocate (ai))
- goto err_out_free;
- airo_networks_initialize (ai);
-
- skb_queue_head_init (&ai->txq);
-
- /* The Airo-specific entries in the device structure. */
- if (test_bit(FLAG_MPI,&ai->flags))
- dev->netdev_ops = &mpi_netdev_ops;
- else
- dev->netdev_ops = &airo_netdev_ops;
- dev->wireless_handlers = &airo_handler_def;
- ai->wireless_data.spy_data = &ai->spy_data;
- dev->wireless_data = &ai->wireless_data;
- dev->irq = irq;
- dev->base_addr = port;
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->max_mtu = MIC_MSGLEN_MAX;
-
- SET_NETDEV_DEV(dev, dmdev);
-
- reset_card (dev, 1);
- msleep(400);
-
- if (!is_pcmcia) {
- if (!request_region(dev->base_addr, 64, DRV_NAME)) {
- rc = -EBUSY;
- airo_print_err(dev->name, "Couldn't request region");
- goto err_out_nets;
- }
- }
-
- if (test_bit(FLAG_MPI,&ai->flags)) {
- if (mpi_map_card(ai, pci)) {
- airo_print_err("", "Could not map memory");
- goto err_out_res;
- }
- }
-
- if (probe) {
- if (setup_card(ai, dev, 1) != SUCCESS) {
- airo_print_err(dev->name, "MAC could not be enabled");
- rc = -EIO;
- goto err_out_map;
- }
- } else if (!test_bit(FLAG_MPI,&ai->flags)) {
- ai->bap_read = fast_bap_read;
- set_bit(FLAG_FLASHING, &ai->flags);
- }
-
- strcpy(dev->name, "eth%d");
- rc = register_netdev(dev);
- if (rc) {
- airo_print_err(dev->name, "Couldn't register_netdev");
- goto err_out_map;
- }
- ai->wifidev = init_wifidev(ai, dev);
- if (!ai->wifidev)
- goto err_out_reg;
-
- rc = readCapabilityRid(ai, &cap_rid, 1);
- if (rc != SUCCESS) {
- rc = -EIO;
- goto err_out_wifi;
- }
- /* WEP capability discovery */
- ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
- ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
-
- airo_print_info(dev->name, "Firmware version %x.%x.%02d",
- ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
- (le16_to_cpu(cap_rid.softVer) & 0xFF),
- le16_to_cpu(cap_rid.softSubVer));
-
- /* Test for WPA support */
- /* Only firmware versions 5.30.17 or better can do WPA */
- if (le16_to_cpu(cap_rid.softVer) > 0x530
- || (le16_to_cpu(cap_rid.softVer) == 0x530
- && le16_to_cpu(cap_rid.softSubVer) >= 17)) {
- airo_print_info(ai->dev->name, "WPA supported.");
-
- set_bit(FLAG_WPA_CAPABLE, &ai->flags);
- ai->bssListFirst = RID_WPA_BSSLISTFIRST;
- ai->bssListNext = RID_WPA_BSSLISTNEXT;
- ai->bssListRidLen = sizeof(BSSListRid);
- } else {
- airo_print_info(ai->dev->name, "WPA unsupported with firmware "
- "versions older than 5.30.17.");
-
- ai->bssListFirst = RID_BSSLISTFIRST;
- ai->bssListNext = RID_BSSLISTNEXT;
- ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra);
- }
-
- set_bit(FLAG_REGISTERED,&ai->flags);
- airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
-
- /* Allocate the transmit buffers */
- if (probe && !test_bit(FLAG_MPI,&ai->flags))
- for (i = 0; i < MAX_FIDS; i++)
- ai->fids[i] = transmit_allocate(ai, AIRO_DEF_MTU, i>=MAX_FIDS/2);
-
- if (setup_proc_entry(dev, dev->ml_priv) < 0)
- goto err_out_wifi;
-
- return dev;
-
-err_out_wifi:
- unregister_netdev(ai->wifidev);
- free_netdev(ai->wifidev);
-err_out_reg:
- unregister_netdev(dev);
-err_out_map:
- if (test_bit(FLAG_MPI,&ai->flags) && pci) {
- dma_free_coherent(&pci->dev, PCI_SHARED_LEN, ai->shared,
- ai->shared_dma);
- iounmap(ai->pciaux);
- iounmap(ai->pcimem);
- mpi_unmap_card(ai->pci);
- }
-err_out_res:
- if (!is_pcmcia)
- release_region(dev->base_addr, 64);
-err_out_nets:
- airo_networks_free(ai);
-err_out_free:
- del_airo_dev(ai);
- free_netdev(dev);
- return NULL;
-}
-
-struct net_device *init_airo_card(unsigned short irq, int port, int is_pcmcia,
- struct device *dmdev)
-{
- return _init_airo_card (irq, port, is_pcmcia, NULL, dmdev);
-}
-
-EXPORT_SYMBOL(init_airo_card);
-
-static int waitbusy (struct airo_info *ai)
-{
- int delay = 0;
- while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
- udelay (10);
- if ((++delay % 20) == 0)
- OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
- }
- return delay < 10000;
-}
-
-int reset_airo_card(struct net_device *dev)
-{
- int i;
- struct airo_info *ai = dev->ml_priv;
-
- if (reset_card (dev, 1))
- return -1;
-
- if (setup_card(ai, dev, 1) != SUCCESS) {
- airo_print_err(dev->name, "MAC could not be enabled");
- return -1;
- }
- airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
- /* Allocate the transmit buffers if needed */
- if (!test_bit(FLAG_MPI,&ai->flags))
- for (i = 0; i < MAX_FIDS; i++)
- ai->fids[i] = transmit_allocate (ai, AIRO_DEF_MTU, i>=MAX_FIDS/2);
-
- enable_interrupts(ai);
- netif_wake_queue(dev);
- return 0;
-}
-
-EXPORT_SYMBOL(reset_airo_card);
-
-static void airo_send_event(struct net_device *dev)
-{
- struct airo_info *ai = dev->ml_priv;
- union iwreq_data wrqu;
- StatusRid status_rid;
-
- clear_bit(JOB_EVENT, &ai->jobs);
- PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0);
- up(&ai->sem);
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- memcpy(wrqu.ap_addr.sa_data, status_rid.bssid[0], ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-
- /* Send event to user space */
- wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
-}
-
-static void airo_process_scan_results (struct airo_info *ai)
-{
- union iwreq_data wrqu;
- BSSListRid bss;
- int rc;
- BSSListElement * loop_net;
- BSSListElement * tmp_net;
-
- /* Blow away current list of scan results */
- list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
- list_move_tail (&loop_net->list, &ai->network_free_list);
- /* Don't blow away ->list, just BSS data */
- memset (loop_net, 0, sizeof (loop_net->bss));
- }
-
- /* Try to read the first entry of the scan result */
- rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0);
- if ((rc) || (bss.index == cpu_to_le16(0xffff))) {
- /* No scan results */
- goto out;
- }
-
- /* Read and parse all entries */
- tmp_net = NULL;
- while ((!rc) && (bss.index != cpu_to_le16(0xffff))) {
- /* Grab a network off the free list */
- if (!list_empty(&ai->network_free_list)) {
- tmp_net = list_entry(ai->network_free_list.next,
- BSSListElement, list);
- list_del(ai->network_free_list.next);
- }
-
- if (tmp_net != NULL) {
- memcpy(tmp_net, &bss, sizeof(tmp_net->bss));
- list_add_tail(&tmp_net->list, &ai->network_list);
- tmp_net = NULL;
- }
-
- /* Read next entry */
- rc = PC4500_readrid(ai, ai->bssListNext,
- &bss, ai->bssListRidLen, 0);
- }
-
-out:
- /* write APList back (we cleared it in airo_set_scan) */
- disable_MAC(ai, 2);
- writeAPListRid(ai, &ai->APList, 0);
- enable_MAC(ai, 0);
-
- ai->scan_timeout = 0;
- clear_bit(JOB_SCAN_RESULTS, &ai->jobs);
- up(&ai->sem);
-
- /* Send an empty event to user space.
- * We don't send the received data on
- * the event because it would require
- * us to do complex transcoding, and
- * we want to minimise the work done in
- * the irq handler. Use a request to
- * extract the data - Jean II */
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL);
-}
-
-static int airo_thread(void *data)
-{
- struct net_device *dev = data;
- struct airo_info *ai = dev->ml_priv;
- int locked;
-
- set_freezable();
- while (1) {
- /* make swsusp happy with our thread */
- try_to_freeze();
-
- if (test_bit(JOB_DIE, &ai->jobs))
- break;
-
- if (ai->jobs) {
- locked = down_interruptible(&ai->sem);
- } else {
- wait_queue_entry_t wait;
-
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&ai->thr_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (ai->jobs)
- break;
- if (ai->expires || ai->scan_timeout) {
- if (ai->scan_timeout &&
- time_after_eq(jiffies, ai->scan_timeout)) {
- set_bit(JOB_SCAN_RESULTS, &ai->jobs);
- break;
- } else if (ai->expires &&
- time_after_eq(jiffies, ai->expires)) {
- set_bit(JOB_AUTOWEP, &ai->jobs);
- break;
- }
- if (!kthread_should_stop() &&
- !freezing(current)) {
- unsigned long wake_at;
- if (!ai->expires || !ai->scan_timeout) {
- wake_at = max(ai->expires,
- ai->scan_timeout);
- } else {
- wake_at = min(ai->expires,
- ai->scan_timeout);
- }
- schedule_timeout(wake_at - jiffies);
- continue;
- }
- } else if (!kthread_should_stop() &&
- !freezing(current)) {
- schedule();
- continue;
- }
- break;
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&ai->thr_wait, &wait);
- locked = 1;
- }
-
- if (locked)
- continue;
-
- if (test_bit(JOB_DIE, &ai->jobs)) {
- up(&ai->sem);
- break;
- }
-
- if (ai->power.event || test_bit(FLAG_FLASHING, &ai->flags)) {
- up(&ai->sem);
- continue;
- }
-
- if (test_bit(JOB_XMIT, &ai->jobs))
- airo_end_xmit(dev, true);
- else if (test_bit(JOB_XMIT11, &ai->jobs))
- airo_end_xmit11(dev, true);
- else if (test_bit(JOB_STATS, &ai->jobs))
- airo_read_stats(dev);
- else if (test_bit(JOB_PROMISC, &ai->jobs))
- airo_set_promisc(ai, true);
- else if (test_bit(JOB_MIC, &ai->jobs))
- micinit(ai);
- else if (test_bit(JOB_EVENT, &ai->jobs))
- airo_send_event(dev);
- else if (test_bit(JOB_AUTOWEP, &ai->jobs))
- timer_func(dev);
- else if (test_bit(JOB_SCAN_RESULTS, &ai->jobs))
- airo_process_scan_results(ai);
- else /* Shouldn't get here, but we make sure to unlock */
- up(&ai->sem);
- }
-
- return 0;
-}
-
-static int header_len(__le16 ctl)
-{
- u16 fc = le16_to_cpu(ctl);
- switch (fc & 0xc) {
- case 4:
- if ((fc & 0xe0) == 0xc0)
- return 10; /* one-address control packet */
- return 16; /* two-address control packet */
- case 8:
- if ((fc & 0x300) == 0x300)
- return 30; /* WDS packet */
- }
- return 24;
-}
-
-static void airo_handle_cisco_mic(struct airo_info *ai)
-{
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags)) {
- set_bit(JOB_MIC, &ai->jobs);
- wake_up_interruptible(&ai->thr_wait);
- }
-}
-
-/* Airo Status codes */
-#define STAT_NOBEACON 0x8000 /* Loss of sync - missed beacons */
-#define STAT_MAXRETRIES 0x8001 /* Loss of sync - max retries */
-#define STAT_MAXARL 0x8002 /* Loss of sync - average retry level exceeded*/
-#define STAT_FORCELOSS 0x8003 /* Loss of sync - host request */
-#define STAT_TSFSYNC 0x8004 /* Loss of sync - TSF synchronization */
-#define STAT_DEAUTH 0x8100 /* low byte is 802.11 reason code */
-#define STAT_DISASSOC 0x8200 /* low byte is 802.11 reason code */
-#define STAT_ASSOC_FAIL 0x8400 /* low byte is 802.11 reason code */
-#define STAT_AUTH_FAIL 0x0300 /* low byte is 802.11 reason code */
-#define STAT_ASSOC 0x0400 /* Associated */
-#define STAT_REASSOC 0x0600 /* Reassociated? Only on firmware >= 5.30.17 */
-
-static void airo_print_status(const char *devname, u16 status)
-{
- u8 reason = status & 0xFF;
-
- switch (status & 0xFF00) {
- case STAT_NOBEACON:
- switch (status) {
- case STAT_NOBEACON:
- airo_print_dbg(devname, "link lost (missed beacons)");
- break;
- case STAT_MAXRETRIES:
- case STAT_MAXARL:
- airo_print_dbg(devname, "link lost (max retries)");
- break;
- case STAT_FORCELOSS:
- airo_print_dbg(devname, "link lost (local choice)");
- break;
- case STAT_TSFSYNC:
- airo_print_dbg(devname, "link lost (TSF sync lost)");
- break;
- default:
- airo_print_dbg(devname, "unknown status %x\n", status);
- break;
- }
- break;
- case STAT_DEAUTH:
- airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
- break;
- case STAT_DISASSOC:
- airo_print_dbg(devname, "disassociated (reason: %d)", reason);
- break;
- case STAT_ASSOC_FAIL:
- airo_print_dbg(devname, "association failed (reason: %d)",
- reason);
- break;
- case STAT_AUTH_FAIL:
- airo_print_dbg(devname, "authentication failed (reason: %d)",
- reason);
- break;
- case STAT_ASSOC:
- case STAT_REASSOC:
- break;
- default:
- airo_print_dbg(devname, "unknown status %x\n", status);
- break;
- }
-}
-
-static void airo_handle_link(struct airo_info *ai)
-{
- union iwreq_data wrqu;
- int scan_forceloss = 0;
- u16 status;
-
- /* Get new status and acknowledge the link change */
- status = le16_to_cpu(IN4500(ai, LINKSTAT));
- OUT4500(ai, EVACK, EV_LINK);
-
- if ((status == STAT_FORCELOSS) && (ai->scan_timeout > 0))
- scan_forceloss = 1;
-
- airo_print_status(ai->dev->name, status);
-
- if ((status == STAT_ASSOC) || (status == STAT_REASSOC)) {
- if (auto_wep)
- ai->expires = 0;
- if (ai->list_bss_task)
- wake_up_process(ai->list_bss_task);
- set_bit(FLAG_UPDATE_UNI, &ai->flags);
- set_bit(FLAG_UPDATE_MULTI, &ai->flags);
-
- set_bit(JOB_EVENT, &ai->jobs);
- wake_up_interruptible(&ai->thr_wait);
-
- netif_carrier_on(ai->dev);
- } else if (!scan_forceloss) {
- if (auto_wep && !ai->expires) {
- ai->expires = RUN_AT(3*HZ);
- wake_up_interruptible(&ai->thr_wait);
- }
-
- /* Send event to user space */
- eth_zero_addr(wrqu.ap_addr.sa_data);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL);
- netif_carrier_off(ai->dev);
- } else {
- netif_carrier_off(ai->dev);
- }
-}
-
-static void airo_handle_rx(struct airo_info *ai)
-{
- struct sk_buff *skb = NULL;
- __le16 fc, v, *buffer, tmpbuf[4];
- u16 len, hdrlen = 0, gap, fid;
- struct rx_hdr hdr;
- int success = 0;
-
- if (test_bit(FLAG_MPI, &ai->flags)) {
- if (test_bit(FLAG_802_11, &ai->flags))
- mpi_receive_802_11(ai);
- else
- mpi_receive_802_3(ai);
- OUT4500(ai, EVACK, EV_RX);
- return;
- }
-
- fid = IN4500(ai, RXFID);
-
- /* Get the packet length */
- if (test_bit(FLAG_802_11, &ai->flags)) {
- bap_setup (ai, fid, 4, BAP0);
- bap_read (ai, (__le16*)&hdr, sizeof(hdr), BAP0);
- /* Bad CRC. Ignore packet */
- if (le16_to_cpu(hdr.status) & 2)
- hdr.len = 0;
- if (ai->wifidev == NULL)
- hdr.len = 0;
- } else {
- bap_setup(ai, fid, 0x36, BAP0);
- bap_read(ai, &hdr.len, 2, BAP0);
- }
- len = le16_to_cpu(hdr.len);
-
- if (len > AIRO_DEF_MTU) {
- airo_print_err(ai->dev->name, "Bad size %d", len);
- goto done;
- }
- if (len == 0)
- goto done;
-
- if (test_bit(FLAG_802_11, &ai->flags)) {
- bap_read(ai, &fc, sizeof (fc), BAP0);
- hdrlen = header_len(fc);
- } else
- hdrlen = ETH_ALEN * 2;
-
- skb = dev_alloc_skb(len + hdrlen + 2 + 2);
- if (!skb) {
- ai->dev->stats.rx_dropped++;
- goto done;
- }
-
- skb_reserve(skb, 2); /* This way the IP header is aligned */
- buffer = skb_put(skb, len + hdrlen);
- if (test_bit(FLAG_802_11, &ai->flags)) {
- buffer[0] = fc;
- bap_read(ai, buffer + 1, hdrlen - 2, BAP0);
- if (hdrlen == 24)
- bap_read(ai, tmpbuf, 6, BAP0);
-
- bap_read(ai, &v, sizeof(v), BAP0);
- gap = le16_to_cpu(v);
- if (gap) {
- if (gap <= 8) {
- bap_read(ai, tmpbuf, gap, BAP0);
- } else {
- airo_print_err(ai->dev->name, "gaplen too "
- "big. Problems will follow...");
- }
- }
- bap_read(ai, buffer + hdrlen/2, len, BAP0);
- } else {
- MICBuffer micbuf;
-
- bap_read(ai, buffer, ETH_ALEN * 2, BAP0);
- if (ai->micstats.enabled) {
- bap_read(ai, (__le16 *) &micbuf, sizeof (micbuf), BAP0);
- if (ntohs(micbuf.typelen) > 0x05DC)
- bap_setup(ai, fid, 0x44, BAP0);
- else {
- if (len <= sizeof (micbuf)) {
- dev_kfree_skb_irq(skb);
- goto done;
- }
-
- len -= sizeof(micbuf);
- skb_trim(skb, len + hdrlen);
- }
- }
-
- bap_read(ai, buffer + ETH_ALEN, len, BAP0);
- if (decapsulate(ai, &micbuf, (etherHead*) buffer, len))
- dev_kfree_skb_irq (skb);
- else
- success = 1;
- }
-
-#ifdef WIRELESS_SPY
- if (success && (ai->spy_data.spy_number > 0)) {
- char *sa;
- struct iw_quality wstats;
-
- /* Prepare spy data : addr + qual */
- if (!test_bit(FLAG_802_11, &ai->flags)) {
- sa = (char *) buffer + 6;
- bap_setup(ai, fid, 8, BAP0);
- bap_read(ai, (__le16 *) hdr.rssi, 2, BAP0);
- } else
- sa = (char *) buffer + 10;
- wstats.qual = hdr.rssi[0];
- if (ai->rssi)
- wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm;
- else
- wstats.level = (hdr.rssi[1] + 321) / 2;
- wstats.noise = ai->wstats.qual.noise;
- wstats.updated = IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_QUAL_UPDATED
- | IW_QUAL_DBM;
- /* Update spy records */
- wireless_spy_update(ai->dev, sa, &wstats);
- }
-#endif /* WIRELESS_SPY */
-
-done:
- OUT4500(ai, EVACK, EV_RX);
-
- if (success) {
- if (test_bit(FLAG_802_11, &ai->flags)) {
- skb_reset_mac_header(skb);
- skb->pkt_type = PACKET_OTHERHOST;
- skb->dev = ai->wifidev;
- skb->protocol = htons(ETH_P_802_2);
- } else
- skb->protocol = eth_type_trans(skb, ai->dev);
- skb->ip_summed = CHECKSUM_NONE;
-
- netif_rx(skb);
- }
-}
-
-static void airo_handle_tx(struct airo_info *ai, u16 status)
-{
- int i, index = -1;
- u16 fid;
-
- if (test_bit(FLAG_MPI, &ai->flags)) {
- unsigned long flags;
-
- if (status & EV_TXEXC)
- get_tx_error(ai, -1);
-
- spin_lock_irqsave(&ai->aux_lock, flags);
- if (!skb_queue_empty(&ai->txq)) {
- spin_unlock_irqrestore(&ai->aux_lock, flags);
- mpi_send_packet(ai->dev);
- } else {
- clear_bit(FLAG_PENDING_XMIT, &ai->flags);
- spin_unlock_irqrestore(&ai->aux_lock, flags);
- netif_wake_queue(ai->dev);
- }
- OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
- return;
- }
-
- fid = IN4500(ai, TXCOMPLFID);
-
- for (i = 0; i < MAX_FIDS; i++) {
- if ((ai->fids[i] & 0xffff) == fid)
- index = i;
- }
-
- if (index != -1) {
- if (status & EV_TXEXC)
- get_tx_error(ai, index);
-
- OUT4500(ai, EVACK, status & (EV_TX | EV_TXEXC));
-
- /* Set up to be used again */
- ai->fids[index] &= 0xffff;
- if (index < MAX_FIDS / 2) {
- if (!test_bit(FLAG_PENDING_XMIT, &ai->flags))
- netif_wake_queue(ai->dev);
- } else {
- if (!test_bit(FLAG_PENDING_XMIT11, &ai->flags))
- netif_wake_queue(ai->wifidev);
- }
- } else {
- OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
- airo_print_err(ai->dev->name, "Unallocated FID was used to xmit");
- }
-}
-
-static irqreturn_t airo_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- u16 status, savedInterrupts = 0;
- struct airo_info *ai = dev->ml_priv;
- int handled = 0;
-
- if (!netif_device_present(dev))
- return IRQ_NONE;
-
- for (;;) {
- status = IN4500(ai, EVSTAT);
- if (!(status & STATUS_INTS) || (status == 0xffff))
- break;
-
- handled = 1;
-
- if (status & EV_AWAKE) {
- OUT4500(ai, EVACK, EV_AWAKE);
- OUT4500(ai, EVACK, EV_AWAKE);
- }
-
- if (!savedInterrupts) {
- savedInterrupts = IN4500(ai, EVINTEN);
- OUT4500(ai, EVINTEN, 0);
- }
-
- if (status & EV_MIC) {
- OUT4500(ai, EVACK, EV_MIC);
- airo_handle_cisco_mic(ai);
- }
-
- if (status & EV_LINK) {
- /* Link status changed */
- airo_handle_link(ai);
- }
-
- /* Check to see if there is something to receive */
- if (status & EV_RX)
- airo_handle_rx(ai);
-
- /* Check to see if a packet has been transmitted */
- if (status & (EV_TX | EV_TXCPY | EV_TXEXC))
- airo_handle_tx(ai, status);
-
- if (status & ~STATUS_INTS & ~IGNORE_INTS) {
- airo_print_warn(ai->dev->name, "Got weird status %x",
- status & ~STATUS_INTS & ~IGNORE_INTS);
- }
- }
-
- if (savedInterrupts)
- OUT4500(ai, EVINTEN, savedInterrupts);
-
- return IRQ_RETVAL(handled);
-}
-
-/*
- * Routines to talk to the card
- */
-
-/*
- * This was originally written for the 4500, hence the name
- * NOTE: If use with 8bit mode and SMP bad things will happen!
- * Why would some one do 8 bit IO in an SMP machine?!?
- */
-static void OUT4500(struct airo_info *ai, u16 reg, u16 val)
-{
- if (test_bit(FLAG_MPI,&ai->flags))
- reg <<= 1;
- if (!do8bitIO)
- outw(val, ai->dev->base_addr + reg);
- else {
- outb(val & 0xff, ai->dev->base_addr + reg);
- outb(val >> 8, ai->dev->base_addr + reg + 1);
- }
-}
-
-static u16 IN4500(struct airo_info *ai, u16 reg)
-{
- unsigned short rc;
-
- if (test_bit(FLAG_MPI,&ai->flags))
- reg <<= 1;
- if (!do8bitIO)
- rc = inw(ai->dev->base_addr + reg);
- else {
- rc = inb(ai->dev->base_addr + reg);
- rc += ((int)inb(ai->dev->base_addr + reg + 1)) << 8;
- }
- return rc;
-}
-
-static int enable_MAC(struct airo_info *ai, int lock)
-{
- int rc;
- Cmd cmd;
- Resp rsp;
-
- /* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions
- * FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down"
- * Note : we could try to use !netif_running(dev) in enable_MAC()
- * instead of this flag, but I don't trust it *within* the
- * open/close functions, and testing both flags together is
- * "cheaper" - Jean II */
- if (ai->flags & FLAG_RADIO_MASK) return SUCCESS;
-
- if (lock && down_interruptible(&ai->sem))
- return -ERESTARTSYS;
-
- if (!test_bit(FLAG_ENABLED, &ai->flags)) {
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = MAC_ENABLE;
- rc = issuecommand(ai, &cmd, &rsp, true);
- if (rc == SUCCESS)
- set_bit(FLAG_ENABLED, &ai->flags);
- } else
- rc = SUCCESS;
-
- if (lock)
- up(&ai->sem);
-
- if (rc)
- airo_print_err(ai->dev->name, "Cannot enable MAC");
- else if ((rsp.status & 0xFF00) != 0) {
- airo_print_err(ai->dev->name, "Bad MAC enable reason=%x, "
- "rid=%x, offset=%d", rsp.rsp0, rsp.rsp1, rsp.rsp2);
- rc = ERROR;
- }
- return rc;
-}
-
-static void disable_MAC(struct airo_info *ai, int lock)
-{
- Cmd cmd;
- Resp rsp;
-
- if (lock == 1 && down_interruptible(&ai->sem))
- return;
-
- if (test_bit(FLAG_ENABLED, &ai->flags)) {
- if (lock != 2) /* lock == 2 means don't disable carrier */
- netif_carrier_off(ai->dev);
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = MAC_DISABLE; // disable in case already enabled
- issuecommand(ai, &cmd, &rsp, true);
- clear_bit(FLAG_ENABLED, &ai->flags);
- }
- if (lock == 1)
- up(&ai->sem);
-}
-
-static void enable_interrupts(struct airo_info *ai)
-{
- /* Enable the interrupts */
- OUT4500(ai, EVINTEN, STATUS_INTS);
-}
-
-static void disable_interrupts(struct airo_info *ai)
-{
- OUT4500(ai, EVINTEN, 0);
-}
-
-static void mpi_receive_802_3(struct airo_info *ai)
-{
- RxFid rxd;
- int len = 0;
- struct sk_buff *skb;
- char *buffer;
- int off = 0;
- MICBuffer micbuf;
-
- memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd));
- /* Make sure we got something */
- if (rxd.rdy && rxd.valid == 0) {
- len = rxd.len + 12;
- if (len < 12 || len > 2048)
- goto badrx;
-
- skb = dev_alloc_skb(len);
- if (!skb) {
- ai->dev->stats.rx_dropped++;
- goto badrx;
- }
- buffer = skb_put(skb, len);
- memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2);
- if (ai->micstats.enabled) {
- memcpy(&micbuf,
- ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2,
- sizeof(micbuf));
- if (ntohs(micbuf.typelen) <= 0x05DC) {
- if (len <= sizeof(micbuf) + ETH_ALEN * 2)
- goto badmic;
-
- off = sizeof(micbuf);
- skb_trim (skb, len - off);
- }
- }
- memcpy(buffer + ETH_ALEN * 2,
- ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2 + off,
- len - ETH_ALEN * 2 - off);
- if (decapsulate (ai, &micbuf, (etherHead*)buffer, len - off - ETH_ALEN * 2)) {
-badmic:
- dev_kfree_skb_irq (skb);
- goto badrx;
- }
-#ifdef WIRELESS_SPY
- if (ai->spy_data.spy_number > 0) {
- char *sa;
- struct iw_quality wstats;
- /* Prepare spy data : addr + qual */
- sa = buffer + ETH_ALEN;
- wstats.qual = 0; /* XXX Where do I get that info from ??? */
- wstats.level = 0;
- wstats.updated = 0;
- /* Update spy records */
- wireless_spy_update(ai->dev, sa, &wstats);
- }
-#endif /* WIRELESS_SPY */
-
- skb->ip_summed = CHECKSUM_NONE;
- skb->protocol = eth_type_trans(skb, ai->dev);
- netif_rx(skb);
- }
-badrx:
- if (rxd.valid == 0) {
- rxd.valid = 1;
- rxd.rdy = 0;
- rxd.len = PKTSIZE;
- memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd));
- }
-}
-
-static void mpi_receive_802_11(struct airo_info *ai)
-{
- RxFid rxd;
- struct sk_buff *skb = NULL;
- u16 len, hdrlen = 0;
- __le16 fc;
- struct rx_hdr hdr;
- u16 gap;
- u16 *buffer;
- char *ptr = ai->rxfids[0].virtual_host_addr + 4;
-
- memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd));
- memcpy ((char *)&hdr, ptr, sizeof(hdr));
- ptr += sizeof(hdr);
- /* Bad CRC. Ignore packet */
- if (le16_to_cpu(hdr.status) & 2)
- hdr.len = 0;
- if (ai->wifidev == NULL)
- hdr.len = 0;
- len = le16_to_cpu(hdr.len);
- if (len > AIRO_DEF_MTU) {
- airo_print_err(ai->dev->name, "Bad size %d", len);
- goto badrx;
- }
- if (len == 0)
- goto badrx;
-
- fc = get_unaligned((__le16 *)ptr);
- hdrlen = header_len(fc);
-
- skb = dev_alloc_skb(len + hdrlen + 2);
- if (!skb) {
- ai->dev->stats.rx_dropped++;
- goto badrx;
- }
- buffer = skb_put(skb, len + hdrlen);
- memcpy ((char *)buffer, ptr, hdrlen);
- ptr += hdrlen;
- if (hdrlen == 24)
- ptr += 6;
- gap = get_unaligned_le16(ptr);
- ptr += sizeof(__le16);
- if (gap) {
- if (gap <= 8)
- ptr += gap;
- else
- airo_print_err(ai->dev->name,
- "gaplen too big. Problems will follow...");
- }
- memcpy ((char *)buffer + hdrlen, ptr, len);
- ptr += len;
-#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
- if (ai->spy_data.spy_number > 0) {
- char *sa;
- struct iw_quality wstats;
- /* Prepare spy data : addr + qual */
- sa = (char*)buffer + 10;
- wstats.qual = hdr.rssi[0];
- if (ai->rssi)
- wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm;
- else
- wstats.level = (hdr.rssi[1] + 321) / 2;
- wstats.noise = ai->wstats.qual.noise;
- wstats.updated = IW_QUAL_QUAL_UPDATED
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- /* Update spy records */
- wireless_spy_update(ai->dev, sa, &wstats);
- }
-#endif /* IW_WIRELESS_SPY */
- skb_reset_mac_header(skb);
- skb->pkt_type = PACKET_OTHERHOST;
- skb->dev = ai->wifidev;
- skb->protocol = htons(ETH_P_802_2);
- skb->ip_summed = CHECKSUM_NONE;
- netif_rx(skb);
-
-badrx:
- if (rxd.valid == 0) {
- rxd.valid = 1;
- rxd.rdy = 0;
- rxd.len = PKTSIZE;
- memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd));
- }
-}
-
-static inline void set_auth_type(struct airo_info *local, int auth_type)
-{
- local->config.authType = auth_type;
- /* Cache the last auth type used (of AUTH_OPEN and AUTH_ENCRYPT).
- * Used by airo_set_auth()
- */
- if (auth_type == AUTH_OPEN || auth_type == AUTH_ENCRYPT)
- local->last_auth = auth_type;
-}
-
-static int noinline_for_stack airo_readconfig(struct airo_info *ai,
- struct net_device *dev, int lock)
-{
- int i, status;
- /* large variables, so don't inline this function,
- * maybe change to kmalloc
- */
- tdsRssiRid rssi_rid;
- CapabilityRid cap_rid;
-
- kfree(ai->SSID);
- ai->SSID = NULL;
- // general configuration (read/modify/write)
- status = readConfigRid(ai, lock);
- if (status != SUCCESS) return ERROR;
-
- status = readCapabilityRid(ai, &cap_rid, lock);
- if (status != SUCCESS) return ERROR;
-
- status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock);
- if (status == SUCCESS) {
- if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
- memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
- }
- else {
- kfree(ai->rssi);
- ai->rssi = NULL;
- if (cap_rid.softCap & cpu_to_le16(8))
- ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
- else
- airo_print_warn(ai->dev->name, "unknown received signal "
- "level scale");
- }
- ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
- set_auth_type(ai, AUTH_OPEN);
- ai->config.modulation = MOD_CCK;
-
- if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
- (cap_rid.extSoftCap & cpu_to_le16(1)) &&
- micsetup(ai) == SUCCESS) {
- ai->config.opmode |= MODE_MIC;
- set_bit(FLAG_MIC_CAPABLE, &ai->flags);
- }
-
- /* Save off the MAC */
- eth_hw_addr_set(dev, ai->config.macAddr);
-
- /* Check to see if there are any insmod configured
- rates to add */
- if (rates[0]) {
- memset(ai->config.rates, 0, sizeof(ai->config.rates));
- for (i = 0; i < 8 && rates[i]; i++) {
- ai->config.rates[i] = rates[i];
- }
- }
- set_bit (FLAG_COMMIT, &ai->flags);
-
- return SUCCESS;
-}
-
-
-static u16 setup_card(struct airo_info *ai, struct net_device *dev, int lock)
-{
- Cmd cmd;
- Resp rsp;
- int status;
- SsidRid mySsid;
- __le16 lastindex;
- WepKeyRid wkr;
- int rc;
-
- memset(&mySsid, 0, sizeof(mySsid));
- kfree (ai->flash);
- ai->flash = NULL;
-
- /* The NOP is the first step in getting the card going */
- cmd.cmd = NOP;
- cmd.parm0 = cmd.parm1 = cmd.parm2 = 0;
- if (lock && down_interruptible(&ai->sem))
- return ERROR;
- if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) {
- if (lock)
- up(&ai->sem);
- return ERROR;
- }
- disable_MAC(ai, 0);
-
- // Let's figure out if we need to use the AUX port
- if (!test_bit(FLAG_MPI,&ai->flags)) {
- cmd.cmd = CMD_ENABLEAUX;
- if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) {
- if (lock)
- up(&ai->sem);
- airo_print_err(ai->dev->name, "Error checking for AUX port");
- return ERROR;
- }
- if (!aux_bap || rsp.status & 0xff00) {
- ai->bap_read = fast_bap_read;
- airo_print_dbg(ai->dev->name, "Doing fast bap_reads");
- } else {
- ai->bap_read = aux_bap_read;
- airo_print_dbg(ai->dev->name, "Doing AUX bap_reads");
- }
- }
- if (lock)
- up(&ai->sem);
- if (ai->config.len == 0) {
- status = airo_readconfig(ai, dev, lock);
- if (status != SUCCESS)
- return ERROR;
- }
-
- /* Setup the SSIDs if present */
- if (ssids[0]) {
- int i;
- for (i = 0; i < 3 && ssids[i]; i++) {
- size_t len = strlen(ssids[i]);
- if (len > 32)
- len = 32;
- mySsid.ssids[i].len = cpu_to_le16(len);
- memcpy(mySsid.ssids[i].ssid, ssids[i], len);
- }
- mySsid.len = cpu_to_le16(sizeof(mySsid));
- }
-
- status = writeConfigRid(ai, lock);
- if (status != SUCCESS) return ERROR;
-
- /* Set up the SSID list */
- if (ssids[0]) {
- status = writeSsidRid(ai, &mySsid, lock);
- if (status != SUCCESS) return ERROR;
- }
-
- status = enable_MAC(ai, lock);
- if (status != SUCCESS)
- return ERROR;
-
- /* Grab the initial wep key, we gotta save it for auto_wep */
- rc = readWepKeyRid(ai, &wkr, 1, lock);
- if (rc == SUCCESS) do {
- lastindex = wkr.kindex;
- if (wkr.kindex == cpu_to_le16(0xffff)) {
- ai->defindex = wkr.mac[0];
- }
- rc = readWepKeyRid(ai, &wkr, 0, lock);
- } while (lastindex != wkr.kindex);
-
- try_auto_wep(ai);
-
- return SUCCESS;
-}
-
-static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp,
- bool may_sleep)
-{
- // Im really paranoid about letting it run forever!
- int max_tries = 600000;
-
- if (IN4500(ai, EVSTAT) & EV_CMD)
- OUT4500(ai, EVACK, EV_CMD);
-
- OUT4500(ai, PARAM0, pCmd->parm0);
- OUT4500(ai, PARAM1, pCmd->parm1);
- OUT4500(ai, PARAM2, pCmd->parm2);
- OUT4500(ai, COMMAND, pCmd->cmd);
-
- while (max_tries-- && (IN4500(ai, EVSTAT) & EV_CMD) == 0) {
- if ((IN4500(ai, COMMAND)) == pCmd->cmd)
- // PC4500 didn't notice command, try again
- OUT4500(ai, COMMAND, pCmd->cmd);
- if (may_sleep && (max_tries & 255) == 0)
- cond_resched();
- }
-
- if (max_tries == -1) {
- airo_print_err(ai->dev->name,
- "Max tries exceeded when issuing command");
- if (IN4500(ai, COMMAND) & COMMAND_BUSY)
- OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
- return ERROR;
- }
-
- // command completed
- pRsp->status = IN4500(ai, STATUS);
- pRsp->rsp0 = IN4500(ai, RESP0);
- pRsp->rsp1 = IN4500(ai, RESP1);
- pRsp->rsp2 = IN4500(ai, RESP2);
- if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET)
- airo_print_err(ai->dev->name,
- "cmd:%x status:%x rsp0:%x rsp1:%x rsp2:%x",
- pCmd->cmd, pRsp->status, pRsp->rsp0, pRsp->rsp1,
- pRsp->rsp2);
-
- // clear stuck command busy if necessary
- if (IN4500(ai, COMMAND) & COMMAND_BUSY) {
- OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
- }
- // acknowledge processing the status/response
- OUT4500(ai, EVACK, EV_CMD);
-
- return SUCCESS;
-}
-
-/* Sets up the bap to start exchange data. whichbap should
- * be one of the BAP0 or BAP1 defines. Locks should be held before
- * calling! */
-static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap)
-{
- int timeout = 50;
- int max_tries = 3;
-
- OUT4500(ai, SELECT0+whichbap, rid);
- OUT4500(ai, OFFSET0+whichbap, offset);
- while (1) {
- int status = IN4500(ai, OFFSET0+whichbap);
- if (status & BAP_BUSY) {
- /* This isn't really a timeout, but its kinda
- close */
- if (timeout--) {
- continue;
- }
- } else if (status & BAP_ERR) {
- /* invalid rid or offset */
- airo_print_err(ai->dev->name, "BAP error %x %d",
- status, whichbap);
- return ERROR;
- } else if (status & BAP_DONE) { // success
- return SUCCESS;
- }
- if (!(max_tries--)) {
- airo_print_err(ai->dev->name,
- "BAP setup error too many retries\n");
- return ERROR;
- }
- // -- PC4500 missed it, try again
- OUT4500(ai, SELECT0+whichbap, rid);
- OUT4500(ai, OFFSET0+whichbap, offset);
- timeout = 50;
- }
-}
-
-/* should only be called by aux_bap_read. This aux function and the
- following use concepts not documented in the developers guide. I
- got them from a patch given to my by Aironet */
-static u16 aux_setup(struct airo_info *ai, u16 page,
- u16 offset, u16 *len)
-{
- u16 next;
-
- OUT4500(ai, AUXPAGE, page);
- OUT4500(ai, AUXOFF, 0);
- next = IN4500(ai, AUXDATA);
- *len = IN4500(ai, AUXDATA)&0xff;
- if (offset != 4) OUT4500(ai, AUXOFF, offset);
- return next;
-}
-
-/* requires call to bap_setup() first */
-static int aux_bap_read(struct airo_info *ai, __le16 *pu16Dst,
- int bytelen, int whichbap)
-{
- u16 len;
- u16 page;
- u16 offset;
- u16 next;
- int words;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&ai->aux_lock, flags);
- page = IN4500(ai, SWS0+whichbap);
- offset = IN4500(ai, SWS2+whichbap);
- next = aux_setup(ai, page, offset, &len);
- words = (bytelen+1)>>1;
-
- for (i = 0; i<words;) {
- int count;
- count = (len>>1) < (words-i) ? (len>>1) : (words-i);
- if (!do8bitIO)
- insw(ai->dev->base_addr+DATA0+whichbap,
- pu16Dst+i, count);
- else
- insb(ai->dev->base_addr+DATA0+whichbap,
- pu16Dst+i, count << 1);
- i += count;
- if (i<words) {
- next = aux_setup(ai, next, 4, &len);
- }
- }
- spin_unlock_irqrestore(&ai->aux_lock, flags);
- return SUCCESS;
-}
-
-
-/* requires call to bap_setup() first */
-static int fast_bap_read(struct airo_info *ai, __le16 *pu16Dst,
- int bytelen, int whichbap)
-{
- bytelen = (bytelen + 1) & (~1); // round up to even value
- if (!do8bitIO)
- insw(ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1);
- else
- insb(ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen);
- return SUCCESS;
-}
-
-/* requires call to bap_setup() first */
-static int bap_write(struct airo_info *ai, const __le16 *pu16Src,
- int bytelen, int whichbap)
-{
- bytelen = (bytelen + 1) & (~1); // round up to even value
- if (!do8bitIO)
- outsw(ai->dev->base_addr+DATA0+whichbap,
- pu16Src, bytelen>>1);
- else
- outsb(ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen);
- return SUCCESS;
-}
-
-static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd)
-{
- Cmd cmd; /* for issuing commands */
- Resp rsp; /* response from commands */
- u16 status;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = accmd;
- cmd.parm0 = rid;
- status = issuecommand(ai, &cmd, &rsp, true);
- if (status != 0) return status;
- if ((rsp.status & 0x7F00) != 0) {
- return (accmd << 8) + (rsp.rsp0 & 0xFF);
- }
- return 0;
-}
-
-/* Note, that we are using BAP1 which is also used by transmit, so
- * we must get a lock. */
-static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, int lock)
-{
- u16 status;
- int rc = SUCCESS;
-
- if (lock) {
- if (down_interruptible(&ai->sem))
- return ERROR;
- }
- if (test_bit(FLAG_MPI,&ai->flags)) {
- Cmd cmd;
- Resp rsp;
-
- memset(&cmd, 0, sizeof(cmd));
- memset(&rsp, 0, sizeof(rsp));
- ai->config_desc.rid_desc.valid = 1;
- ai->config_desc.rid_desc.len = RIDSIZE;
- ai->config_desc.rid_desc.rid = 0;
- ai->config_desc.rid_desc.host_addr = ai->ridbus;
-
- cmd.cmd = CMD_ACCESS;
- cmd.parm0 = rid;
-
- memcpy_toio(ai->config_desc.card_ram_off,
- &ai->config_desc.rid_desc, sizeof(Rid));
-
- rc = issuecommand(ai, &cmd, &rsp, true);
-
- if (rsp.status & 0x7f00)
- rc = rsp.rsp0;
- if (!rc)
- memcpy(pBuf, ai->config_desc.virtual_host_addr, len);
- goto done;
- } else {
- if ((status = PC4500_accessrid(ai, rid, CMD_ACCESS))!=SUCCESS) {
- rc = status;
- goto done;
- }
- if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) {
- rc = ERROR;
- goto done;
- }
- // read the rid length field
- bap_read(ai, pBuf, 2, BAP1);
- // length for remaining part of rid
- len = min(len, (int)le16_to_cpu(*(__le16*)pBuf)) - 2;
-
- if (len <= 2) {
- airo_print_err(ai->dev->name,
- "Rid %x has a length of %d which is too short",
- (int)rid, (int)len);
- rc = ERROR;
- goto done;
- }
- // read remainder of the rid
- rc = bap_read(ai, ((__le16*)pBuf)+1, len, BAP1);
- }
-done:
- if (lock)
- up(&ai->sem);
- return rc;
-}
-
-/* Note, that we are using BAP1 which is also used by transmit, so
- * make sure this isn't called when a transmit is happening */
-static int PC4500_writerid(struct airo_info *ai, u16 rid,
- const void *pBuf, int len, int lock)
-{
- u16 status;
- int rc = SUCCESS;
-
- *(__le16*)pBuf = cpu_to_le16((u16)len);
-
- if (lock) {
- if (down_interruptible(&ai->sem))
- return ERROR;
- }
- if (test_bit(FLAG_MPI,&ai->flags)) {
- Cmd cmd;
- Resp rsp;
-
- if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid))
- airo_print_err(ai->dev->name,
- "%s: MAC should be disabled (rid=%04x)",
- __func__, rid);
- memset(&cmd, 0, sizeof(cmd));
- memset(&rsp, 0, sizeof(rsp));
-
- ai->config_desc.rid_desc.valid = 1;
- ai->config_desc.rid_desc.len = *((u16 *)pBuf);
- ai->config_desc.rid_desc.rid = 0;
-
- cmd.cmd = CMD_WRITERID;
- cmd.parm0 = rid;
-
- memcpy_toio(ai->config_desc.card_ram_off,
- &ai->config_desc.rid_desc, sizeof(Rid));
-
- if (len < 4 || len > 2047) {
- airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
- rc = -1;
- } else {
- memcpy(ai->config_desc.virtual_host_addr,
- pBuf, len);
-
- rc = issuecommand(ai, &cmd, &rsp, true);
- if ((rc & 0xff00) != 0) {
- airo_print_err(ai->dev->name, "%s: Write rid Error %d",
- __func__, rc);
- airo_print_err(ai->dev->name, "%s: Cmd=%04x",
- __func__, cmd.cmd);
- }
-
- if ((rsp.status & 0x7f00))
- rc = rsp.rsp0;
- }
- } else {
- // --- first access so that we can write the rid data
- if ((status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) {
- rc = status;
- goto done;
- }
- // --- now write the rid data
- if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) {
- rc = ERROR;
- goto done;
- }
- bap_write(ai, pBuf, len, BAP1);
- // ---now commit the rid data
- rc = PC4500_accessrid(ai, rid, 0x100|CMD_ACCESS);
- }
-done:
- if (lock)
- up(&ai->sem);
- return rc;
-}
-
-/* Allocates a FID to be used for transmitting packets. We only use
- one for now. */
-static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw)
-{
- unsigned int loop = 3000;
- Cmd cmd;
- Resp rsp;
- u16 txFid;
- __le16 txControl;
-
- cmd.cmd = CMD_ALLOCATETX;
- cmd.parm0 = lenPayload;
- if (down_interruptible(&ai->sem))
- return ERROR;
- if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) {
- txFid = ERROR;
- goto done;
- }
- if ((rsp.status & 0xFF00) != 0) {
- txFid = ERROR;
- goto done;
- }
- /* wait for the allocate event/indication
- * It makes me kind of nervous that this can just sit here and spin,
- * but in practice it only loops like four times. */
- while (((IN4500(ai, EVSTAT) & EV_ALLOC) == 0) && --loop);
- if (!loop) {
- txFid = ERROR;
- goto done;
- }
-
- // get the allocated fid and acknowledge
- txFid = IN4500(ai, TXALLOCFID);
- OUT4500(ai, EVACK, EV_ALLOC);
-
- /* The CARD is pretty cool since it converts the ethernet packet
- * into 802.11. Also note that we don't release the FID since we
- * will be using the same one over and over again. */
- /* We only have to setup the control once since we are not
- * releasing the fid. */
- if (raw)
- txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_11
- | TXCTL_ETHERNET | TXCTL_NORELEASE);
- else
- txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_3
- | TXCTL_ETHERNET | TXCTL_NORELEASE);
- if (bap_setup(ai, txFid, 0x0008, BAP1) != SUCCESS)
- txFid = ERROR;
- else
- bap_write(ai, &txControl, sizeof(txControl), BAP1);
-
-done:
- up(&ai->sem);
-
- return txFid;
-}
-
-/* In general BAP1 is dedicated to transmiting packets. However,
- since we need a BAP when accessing RIDs, we also use BAP1 for that.
- Make sure the BAP1 spinlock is held when this is called. */
-static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket,
- bool may_sleep)
-{
- __le16 payloadLen;
- Cmd cmd;
- Resp rsp;
- int miclen = 0;
- u16 txFid = len;
- MICBuffer pMic;
-
- len >>= 16;
-
- if (len <= ETH_ALEN * 2) {
- airo_print_warn(ai->dev->name, "Short packet %d", len);
- return ERROR;
- }
- len -= ETH_ALEN * 2;
-
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
- (ntohs(((__be16 *)pPacket)[6]) != 0x888E)) {
- if (encapsulate(ai, (etherHead *)pPacket,&pMic, len) != SUCCESS)
- return ERROR;
- miclen = sizeof(pMic);
- }
- // packet is destination[6], source[6], payload[len-12]
- // write the payload length and dst/src/payload
- if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR;
- /* The hardware addresses aren't counted as part of the payload, so
- * we have to subtract the 12 bytes for the addresses off */
- payloadLen = cpu_to_le16(len + miclen);
- bap_write(ai, &payloadLen, sizeof(payloadLen), BAP1);
- bap_write(ai, (__le16*)pPacket, sizeof(etherHead), BAP1);
- if (miclen)
- bap_write(ai, (__le16*)&pMic, miclen, BAP1);
- bap_write(ai, (__le16*)(pPacket + sizeof(etherHead)), len, BAP1);
- // issue the transmit command
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = CMD_TRANSMIT;
- cmd.parm0 = txFid;
- if (issuecommand(ai, &cmd, &rsp, may_sleep) != SUCCESS)
- return ERROR;
- if ((rsp.status & 0xFF00) != 0) return ERROR;
- return SUCCESS;
-}
-
-static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket,
- bool may_sleep)
-{
- __le16 fc, payloadLen;
- Cmd cmd;
- Resp rsp;
- int hdrlen;
- static u8 tail[(30-10) + 2 + 6] = {[30-10] = 6};
- /* padding of header to full size + le16 gaplen (6) + gaplen bytes */
- u16 txFid = len;
- len >>= 16;
-
- fc = *(__le16*)pPacket;
- hdrlen = header_len(fc);
-
- if (len < hdrlen) {
- airo_print_warn(ai->dev->name, "Short packet %d", len);
- return ERROR;
- }
-
- /* packet is 802.11 header + payload
- * write the payload length and dst/src/payload */
- if (bap_setup(ai, txFid, 6, BAP1) != SUCCESS) return ERROR;
- /* The 802.11 header aren't counted as part of the payload, so
- * we have to subtract the header bytes off */
- payloadLen = cpu_to_le16(len-hdrlen);
- bap_write(ai, &payloadLen, sizeof(payloadLen), BAP1);
- if (bap_setup(ai, txFid, 0x0014, BAP1) != SUCCESS) return ERROR;
- bap_write(ai, (__le16 *)pPacket, hdrlen, BAP1);
- bap_write(ai, (__le16 *)(tail + (hdrlen - 10)), 38 - hdrlen, BAP1);
-
- bap_write(ai, (__le16 *)(pPacket + hdrlen), len - hdrlen, BAP1);
- // issue the transmit command
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = CMD_TRANSMIT;
- cmd.parm0 = txFid;
- if (issuecommand(ai, &cmd, &rsp, may_sleep) != SUCCESS)
- return ERROR;
- if ((rsp.status & 0xFF00) != 0) return ERROR;
- return SUCCESS;
-}
-
-/*
- * This is the proc_fs routines. It is a bit messier than I would
- * like! Feel free to clean it up!
- */
-
-static ssize_t proc_read(struct file *file,
- char __user *buffer,
- size_t len,
- loff_t *offset);
-
-static ssize_t proc_write(struct file *file,
- const char __user *buffer,
- size_t len,
- loff_t *offset);
-static int proc_close(struct inode *inode, struct file *file);
-
-static int proc_stats_open(struct inode *inode, struct file *file);
-static int proc_statsdelta_open(struct inode *inode, struct file *file);
-static int proc_status_open(struct inode *inode, struct file *file);
-static int proc_SSID_open(struct inode *inode, struct file *file);
-static int proc_APList_open(struct inode *inode, struct file *file);
-static int proc_BSSList_open(struct inode *inode, struct file *file);
-static int proc_config_open(struct inode *inode, struct file *file);
-static int proc_wepkey_open(struct inode *inode, struct file *file);
-
-static const struct proc_ops proc_statsdelta_ops = {
- .proc_read = proc_read,
- .proc_open = proc_statsdelta_open,
- .proc_release = proc_close,
- .proc_lseek = default_llseek,
-};
-
-static const struct proc_ops proc_stats_ops = {
- .proc_read = proc_read,
- .proc_open = proc_stats_open,
- .proc_release = proc_close,
- .proc_lseek = default_llseek,
-};
-
-static const struct proc_ops proc_status_ops = {
- .proc_read = proc_read,
- .proc_open = proc_status_open,
- .proc_release = proc_close,
- .proc_lseek = default_llseek,
-};
-
-static const struct proc_ops proc_SSID_ops = {
- .proc_read = proc_read,
- .proc_write = proc_write,
- .proc_open = proc_SSID_open,
- .proc_release = proc_close,
- .proc_lseek = default_llseek,
-};
-
-static const struct proc_ops proc_BSSList_ops = {
- .proc_read = proc_read,
- .proc_write = proc_write,
- .proc_open = proc_BSSList_open,
- .proc_release = proc_close,
- .proc_lseek = default_llseek,
-};
-
-static const struct proc_ops proc_APList_ops = {
- .proc_read = proc_read,
- .proc_write = proc_write,
- .proc_open = proc_APList_open,
- .proc_release = proc_close,
- .proc_lseek = default_llseek,
-};
-
-static const struct proc_ops proc_config_ops = {
- .proc_read = proc_read,
- .proc_write = proc_write,
- .proc_open = proc_config_open,
- .proc_release = proc_close,
- .proc_lseek = default_llseek,
-};
-
-static const struct proc_ops proc_wepkey_ops = {
- .proc_read = proc_read,
- .proc_write = proc_write,
- .proc_open = proc_wepkey_open,
- .proc_release = proc_close,
- .proc_lseek = default_llseek,
-};
-
-static struct proc_dir_entry *airo_entry;
-
-struct proc_data {
- int release_buffer;
- int readlen;
- char *rbuffer;
- int writelen;
- int maxwritelen;
- char *wbuffer;
- void (*on_close) (struct inode *, struct file *);
-};
-
-static int setup_proc_entry(struct net_device *dev,
- struct airo_info *apriv)
-{
- struct proc_dir_entry *entry;
-
- /* First setup the device directory */
- strcpy(apriv->proc_name, dev->name);
- apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
- airo_entry);
- if (!apriv->proc_entry)
- return -ENOMEM;
- proc_set_user(apriv->proc_entry, proc_kuid, proc_kgid);
-
- /* Setup the StatsDelta */
- entry = proc_create_data("StatsDelta", 0444 & proc_perm,
- apriv->proc_entry, &proc_statsdelta_ops, dev);
- if (!entry)
- goto fail;
- proc_set_user(entry, proc_kuid, proc_kgid);
-
- /* Setup the Stats */
- entry = proc_create_data("Stats", 0444 & proc_perm,
- apriv->proc_entry, &proc_stats_ops, dev);
- if (!entry)
- goto fail;
- proc_set_user(entry, proc_kuid, proc_kgid);
-
- /* Setup the Status */
- entry = proc_create_data("Status", 0444 & proc_perm,
- apriv->proc_entry, &proc_status_ops, dev);
- if (!entry)
- goto fail;
- proc_set_user(entry, proc_kuid, proc_kgid);
-
- /* Setup the Config */
- entry = proc_create_data("Config", proc_perm,
- apriv->proc_entry, &proc_config_ops, dev);
- if (!entry)
- goto fail;
- proc_set_user(entry, proc_kuid, proc_kgid);
-
- /* Setup the SSID */
- entry = proc_create_data("SSID", proc_perm,
- apriv->proc_entry, &proc_SSID_ops, dev);
- if (!entry)
- goto fail;
- proc_set_user(entry, proc_kuid, proc_kgid);
-
- /* Setup the APList */
- entry = proc_create_data("APList", proc_perm,
- apriv->proc_entry, &proc_APList_ops, dev);
- if (!entry)
- goto fail;
- proc_set_user(entry, proc_kuid, proc_kgid);
-
- /* Setup the BSSList */
- entry = proc_create_data("BSSList", proc_perm,
- apriv->proc_entry, &proc_BSSList_ops, dev);
- if (!entry)
- goto fail;
- proc_set_user(entry, proc_kuid, proc_kgid);
-
- /* Setup the WepKey */
- entry = proc_create_data("WepKey", proc_perm,
- apriv->proc_entry, &proc_wepkey_ops, dev);
- if (!entry)
- goto fail;
- proc_set_user(entry, proc_kuid, proc_kgid);
- return 0;
-
-fail:
- remove_proc_subtree(apriv->proc_name, airo_entry);
- return -ENOMEM;
-}
-
-static int takedown_proc_entry(struct net_device *dev,
- struct airo_info *apriv)
-{
- remove_proc_subtree(apriv->proc_name, airo_entry);
- return 0;
-}
-
-/*
- * What we want from the proc_fs is to be able to efficiently read
- * and write the configuration. To do this, we want to read the
- * configuration when the file is opened and write it when the file is
- * closed. So basically we allocate a read buffer at open and fill it
- * with data, and allocate a write buffer and read it at close.
- */
-
-/*
- * The read routine is generic, it relies on the preallocated rbuffer
- * to supply the data.
- */
-static ssize_t proc_read(struct file *file,
- char __user *buffer,
- size_t len,
- loff_t *offset)
-{
- struct proc_data *priv = file->private_data;
-
- if (!priv->rbuffer)
- return -EINVAL;
-
- return simple_read_from_buffer(buffer, len, offset, priv->rbuffer,
- priv->readlen);
-}
-
-/*
- * The write routine is generic, it fills in a preallocated rbuffer
- * to supply the data.
- */
-static ssize_t proc_write(struct file *file,
- const char __user *buffer,
- size_t len,
- loff_t *offset)
-{
- ssize_t ret;
- struct proc_data *priv = file->private_data;
-
- if (!priv->wbuffer)
- return -EINVAL;
-
- ret = simple_write_to_buffer(priv->wbuffer, priv->maxwritelen, offset,
- buffer, len);
- if (ret > 0)
- priv->writelen = max_t(int, priv->writelen, *offset);
-
- return ret;
-}
-
-static int proc_status_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *apriv = dev->ml_priv;
- CapabilityRid cap_rid;
- StatusRid status_rid;
- u16 mode;
- int i;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = file->private_data;
- if ((data->rbuffer = kmalloc(2048, GFP_KERNEL)) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
-
- readStatusRid(apriv, &status_rid, 1);
- readCapabilityRid(apriv, &cap_rid, 1);
-
- mode = le16_to_cpu(status_rid.mode);
-
- i = sprintf(data->rbuffer, "Status: %s%s%s%s%s%s%s%s%s\n",
- mode & 1 ? "CFG ": "",
- mode & 2 ? "ACT ": "",
- mode & 0x10 ? "SYN ": "",
- mode & 0x20 ? "LNK ": "",
- mode & 0x40 ? "LEAP ": "",
- mode & 0x80 ? "PRIV ": "",
- mode & 0x100 ? "KEY ": "",
- mode & 0x200 ? "WEP ": "",
- mode & 0x8000 ? "ERR ": "");
- sprintf(data->rbuffer+i, "Mode: %x\n"
- "Signal Strength: %d\n"
- "Signal Quality: %d\n"
- "SSID: %-.*s\n"
- "AP: %-.16s\n"
- "Freq: %d\n"
- "BitRate: %dmbs\n"
- "Driver Version: %s\n"
- "Device: %s\nManufacturer: %s\nFirmware Version: %s\n"
- "Radio type: %x\nCountry: %x\nHardware Version: %x\n"
- "Software Version: %x\nSoftware Subversion: %x\n"
- "Boot block version: %x\n",
- le16_to_cpu(status_rid.mode),
- le16_to_cpu(status_rid.normalizedSignalStrength),
- le16_to_cpu(status_rid.signalQuality),
- le16_to_cpu(status_rid.SSIDlen),
- status_rid.SSID,
- status_rid.apName,
- le16_to_cpu(status_rid.channel),
- le16_to_cpu(status_rid.currentXmitRate) / 2,
- version,
- cap_rid.prodName,
- cap_rid.manName,
- cap_rid.prodVer,
- le16_to_cpu(cap_rid.radioType),
- le16_to_cpu(cap_rid.country),
- le16_to_cpu(cap_rid.hardVer),
- le16_to_cpu(cap_rid.softVer),
- le16_to_cpu(cap_rid.softSubVer),
- le16_to_cpu(cap_rid.bootBlockVer));
- data->readlen = strlen(data->rbuffer);
- return 0;
-}
-
-static int proc_stats_rid_open(struct inode*, struct file*, u16);
-static int proc_statsdelta_open(struct inode *inode,
- struct file *file)
-{
- if (file->f_mode&FMODE_WRITE) {
- return proc_stats_rid_open(inode, file, RID_STATSDELTACLEAR);
- }
- return proc_stats_rid_open(inode, file, RID_STATSDELTA);
-}
-
-static int proc_stats_open(struct inode *inode, struct file *file)
-{
- return proc_stats_rid_open(inode, file, RID_STATS);
-}
-
-static int proc_stats_rid_open(struct inode *inode,
- struct file *file,
- u16 rid)
-{
- struct proc_data *data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *apriv = dev->ml_priv;
- StatsRid stats;
- int i, j;
- __le32 *vals = stats.vals;
- int len;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = file->private_data;
- if ((data->rbuffer = kmalloc(4096, GFP_KERNEL)) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
-
- readStatsRid(apriv, &stats, rid, 1);
- len = le16_to_cpu(stats.len);
-
- j = 0;
- for (i = 0; statsLabels[i]!=(char *)-1 && i*4<len; i++) {
- if (!statsLabels[i]) continue;
- if (j+strlen(statsLabels[i])+16>4096) {
- airo_print_warn(apriv->dev->name,
- "Potentially disastrous buffer overflow averted!");
- break;
- }
- j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i],
- le32_to_cpu(vals[i]));
- }
- if (i*4 >= len) {
- airo_print_warn(apriv->dev->name, "Got a short rid");
- }
- data->readlen = j;
- return 0;
-}
-
-static int get_dec_u16(char *buffer, int *start, int limit)
-{
- u16 value;
- int valid = 0;
- for (value = 0; *start < limit && buffer[*start] >= '0' &&
- buffer[*start] <= '9'; (*start)++) {
- valid = 1;
- value *= 10;
- value += buffer[*start] - '0';
- }
- if (!valid) return -1;
- return value;
-}
-
-static int airo_config_commit(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra);
-
-static inline int sniffing_mode(struct airo_info *ai)
-{
- return (le16_to_cpu(ai->config.rmode) & le16_to_cpu(RXMODE_MASK)) >=
- le16_to_cpu(RXMODE_RFMON);
-}
-
-static void proc_config_on_close(struct inode *inode, struct file *file)
-{
- struct proc_data *data = file->private_data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- char *line;
-
- if (!data->writelen) return;
-
- readConfigRid(ai, 1);
- set_bit (FLAG_COMMIT, &ai->flags);
-
- line = data->wbuffer;
- while (line[0]) {
-/*** Mode processing */
- if (!strncmp(line, "Mode: ", 6)) {
- line += 6;
- if (sniffing_mode(ai))
- set_bit (FLAG_RESET, &ai->flags);
- ai->config.rmode &= ~RXMODE_FULL_MASK;
- clear_bit (FLAG_802_11, &ai->flags);
- ai->config.opmode &= ~MODE_CFG_MASK;
- ai->config.scanMode = SCANMODE_ACTIVE;
- if (line[0] == 'a') {
- ai->config.opmode |= MODE_STA_IBSS;
- } else {
- ai->config.opmode |= MODE_STA_ESS;
- if (line[0] == 'r') {
- ai->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
- ai->config.scanMode = SCANMODE_PASSIVE;
- set_bit (FLAG_802_11, &ai->flags);
- } else if (line[0] == 'y') {
- ai->config.rmode |= RXMODE_RFMON_ANYBSS | RXMODE_DISABLE_802_3_HEADER;
- ai->config.scanMode = SCANMODE_PASSIVE;
- set_bit (FLAG_802_11, &ai->flags);
- } else if (line[0] == 'l')
- ai->config.rmode |= RXMODE_LANMON;
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- }
-
-/*** Radio status */
- else if (!strncmp(line,"Radio: ", 7)) {
- line += 7;
- if (!strncmp(line,"off", 3)) {
- set_bit (FLAG_RADIO_OFF, &ai->flags);
- } else {
- clear_bit (FLAG_RADIO_OFF, &ai->flags);
- }
- }
-/*** NodeName processing */
- else if (!strncmp(line, "NodeName: ", 10)) {
- int j;
-
- line += 10;
- memset(ai->config.nodeName, 0, 16);
-/* Do the name, assume a space between the mode and node name */
- for (j = 0; j < 16 && line[j] != '\n'; j++) {
- ai->config.nodeName[j] = line[j];
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- }
-
-/*** PowerMode processing */
- else if (!strncmp(line, "PowerMode: ", 11)) {
- line += 11;
- if (!strncmp(line, "PSPCAM", 6)) {
- ai->config.powerSaveMode = POWERSAVE_PSPCAM;
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "PSP", 3)) {
- ai->config.powerSaveMode = POWERSAVE_PSP;
- set_bit (FLAG_COMMIT, &ai->flags);
- } else {
- ai->config.powerSaveMode = POWERSAVE_CAM;
- set_bit (FLAG_COMMIT, &ai->flags);
- }
- } else if (!strncmp(line, "DataRates: ", 11)) {
- int v, i = 0, k = 0; /* i is index into line,
- k is index to rates */
-
- line += 11;
- while ((v = get_dec_u16(line, &i, 3))!=-1) {
- ai->config.rates[k++] = (u8)v;
- line += i + 1;
- i = 0;
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "Channel: ", 9)) {
- int v, i = 0;
- line += 9;
- v = get_dec_u16(line, &i, i+3);
- if (v != -1) {
- ai->config.channelSet = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- }
- } else if (!strncmp(line, "XmitPower: ", 11)) {
- int v, i = 0;
- line += 11;
- v = get_dec_u16(line, &i, i+3);
- if (v != -1) {
- ai->config.txPower = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- }
- } else if (!strncmp(line, "WEP: ", 5)) {
- line += 5;
- switch(line[0]) {
- case 's':
- set_auth_type(ai, AUTH_SHAREDKEY);
- break;
- case 'e':
- set_auth_type(ai, AUTH_ENCRYPT);
- break;
- default:
- set_auth_type(ai, AUTH_OPEN);
- break;
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "LongRetryLimit: ", 16)) {
- int v, i = 0;
-
- line += 16;
- v = get_dec_u16(line, &i, 3);
- v = (v<0) ? 0 : ((v>255) ? 255 : v);
- ai->config.longRetryLimit = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "ShortRetryLimit: ", 17)) {
- int v, i = 0;
-
- line += 17;
- v = get_dec_u16(line, &i, 3);
- v = (v<0) ? 0 : ((v>255) ? 255 : v);
- ai->config.shortRetryLimit = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "RTSThreshold: ", 14)) {
- int v, i = 0;
-
- line += 14;
- v = get_dec_u16(line, &i, 4);
- v = (v<0) ? 0 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v);
- ai->config.rtsThres = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "TXMSDULifetime: ", 16)) {
- int v, i = 0;
-
- line += 16;
- v = get_dec_u16(line, &i, 5);
- v = (v<0) ? 0 : v;
- ai->config.txLifetime = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "RXMSDULifetime: ", 16)) {
- int v, i = 0;
-
- line += 16;
- v = get_dec_u16(line, &i, 5);
- v = (v<0) ? 0 : v;
- ai->config.rxLifetime = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "TXDiversity: ", 13)) {
- ai->config.txDiversity =
- (line[13]=='l') ? 1 :
- ((line[13]=='r')? 2: 3);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "RXDiversity: ", 13)) {
- ai->config.rxDiversity =
- (line[13]=='l') ? 1 :
- ((line[13]=='r')? 2: 3);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "FragThreshold: ", 15)) {
- int v, i = 0;
-
- line += 15;
- v = get_dec_u16(line, &i, 4);
- v = (v<256) ? 256 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v);
- v = v & 0xfffe; /* Make sure its even */
- ai->config.fragThresh = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "Modulation: ", 12)) {
- line += 12;
- switch(*line) {
- case 'd': ai->config.modulation = MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'c': ai->config.modulation = MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'm': ai->config.modulation = MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break;
- default: airo_print_warn(ai->dev->name, "Unknown modulation");
- }
- } else if (!strncmp(line, "Preamble: ", 10)) {
- line += 10;
- switch(*line) {
- case 'a': ai->config.preamble = PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'l': ai->config.preamble = PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 's': ai->config.preamble = PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break;
- default: airo_print_warn(ai->dev->name, "Unknown preamble");
- }
- } else {
- airo_print_warn(ai->dev->name, "Couldn't figure out %s", line);
- }
- while (line[0] && line[0] != '\n') line++;
- if (line[0]) line++;
- }
- airo_config_commit(dev, NULL, NULL, NULL);
-}
-
-static const char *get_rmode(__le16 mode)
-{
- switch(mode & RXMODE_MASK) {
- case RXMODE_RFMON: return "rfmon";
- case RXMODE_RFMON_ANYBSS: return "yna (any) bss rfmon";
- case RXMODE_LANMON: return "lanmon";
- }
- return "ESS";
-}
-
-static int proc_config_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- int i;
- __le16 mode;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = file->private_data;
- if ((data->rbuffer = kmalloc(2048, GFP_KERNEL)) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- if ((data->wbuffer = kzalloc(2048, GFP_KERNEL)) == NULL) {
- kfree (data->rbuffer);
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->maxwritelen = 2048;
- data->on_close = proc_config_on_close;
-
- readConfigRid(ai, 1);
-
- mode = ai->config.opmode & MODE_CFG_MASK;
- i = sprintf(data->rbuffer,
- "Mode: %s\n"
- "Radio: %s\n"
- "NodeName: %-16s\n"
- "PowerMode: %s\n"
- "DataRates: %d %d %d %d %d %d %d %d\n"
- "Channel: %d\n"
- "XmitPower: %d\n",
- mode == MODE_STA_IBSS ? "adhoc" :
- mode == MODE_STA_ESS ? get_rmode(ai->config.rmode):
- mode == MODE_AP ? "AP" :
- mode == MODE_AP_RPTR ? "AP RPTR" : "Error",
- test_bit(FLAG_RADIO_OFF, &ai->flags) ? "off" : "on",
- ai->config.nodeName,
- ai->config.powerSaveMode == POWERSAVE_CAM ? "CAM" :
- ai->config.powerSaveMode == POWERSAVE_PSP ? "PSP" :
- ai->config.powerSaveMode == POWERSAVE_PSPCAM ? "PSPCAM" :
- "Error",
- (int)ai->config.rates[0],
- (int)ai->config.rates[1],
- (int)ai->config.rates[2],
- (int)ai->config.rates[3],
- (int)ai->config.rates[4],
- (int)ai->config.rates[5],
- (int)ai->config.rates[6],
- (int)ai->config.rates[7],
- le16_to_cpu(ai->config.channelSet),
- le16_to_cpu(ai->config.txPower)
- );
- sprintf(data->rbuffer + i,
- "LongRetryLimit: %d\n"
- "ShortRetryLimit: %d\n"
- "RTSThreshold: %d\n"
- "TXMSDULifetime: %d\n"
- "RXMSDULifetime: %d\n"
- "TXDiversity: %s\n"
- "RXDiversity: %s\n"
- "FragThreshold: %d\n"
- "WEP: %s\n"
- "Modulation: %s\n"
- "Preamble: %s\n",
- le16_to_cpu(ai->config.longRetryLimit),
- le16_to_cpu(ai->config.shortRetryLimit),
- le16_to_cpu(ai->config.rtsThres),
- le16_to_cpu(ai->config.txLifetime),
- le16_to_cpu(ai->config.rxLifetime),
- ai->config.txDiversity == 1 ? "left" :
- ai->config.txDiversity == 2 ? "right" : "both",
- ai->config.rxDiversity == 1 ? "left" :
- ai->config.rxDiversity == 2 ? "right" : "both",
- le16_to_cpu(ai->config.fragThresh),
- ai->config.authType == AUTH_ENCRYPT ? "encrypt" :
- ai->config.authType == AUTH_SHAREDKEY ? "shared" : "open",
- ai->config.modulation == MOD_DEFAULT ? "default" :
- ai->config.modulation == MOD_CCK ? "cck" :
- ai->config.modulation == MOD_MOK ? "mok" : "error",
- ai->config.preamble == PREAMBLE_AUTO ? "auto" :
- ai->config.preamble == PREAMBLE_LONG ? "long" :
- ai->config.preamble == PREAMBLE_SHORT ? "short" : "error"
- );
- data->readlen = strlen(data->rbuffer);
- return 0;
-}
-
-static void proc_SSID_on_close(struct inode *inode, struct file *file)
-{
- struct proc_data *data = file->private_data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- SsidRid SSID_rid;
- int i;
- char *p = data->wbuffer;
- char *end = p + data->writelen;
-
- if (!data->writelen)
- return;
-
- *end = '\n'; /* sentinel; we have space for it */
-
- memset(&SSID_rid, 0, sizeof(SSID_rid));
-
- for (i = 0; i < 3 && p < end; i++) {
- int j = 0;
- /* copy up to 32 characters from this line */
- while (*p != '\n' && j < 32)
- SSID_rid.ssids[i].ssid[j++] = *p++;
- if (j == 0)
- break;
- SSID_rid.ssids[i].len = cpu_to_le16(j);
- /* skip to the beginning of the next line */
- while (*p++ != '\n')
- ;
- }
- if (i)
- SSID_rid.len = cpu_to_le16(sizeof(SSID_rid));
- disable_MAC(ai, 1);
- writeSsidRid(ai, &SSID_rid, 1);
- enable_MAC(ai, 1);
-}
-
-static void proc_APList_on_close(struct inode *inode, struct file *file)
-{
- struct proc_data *data = file->private_data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- APListRid *APList_rid = &ai->APList;
- int i;
-
- if (!data->writelen) return;
-
- memset(APList_rid, 0, sizeof(*APList_rid));
- APList_rid->len = cpu_to_le16(sizeof(*APList_rid));
-
- for (i = 0; i < 4 && data->writelen >= (i + 1) * 6 * 3; i++)
- mac_pton(data->wbuffer + i * 6 * 3, APList_rid->ap[i]);
-
- disable_MAC(ai, 1);
- writeAPListRid(ai, APList_rid, 1);
- enable_MAC(ai, 1);
-}
-
-/* This function wraps PC4500_writerid with a MAC disable */
-static int do_writerid(struct airo_info *ai, u16 rid, const void *rid_data,
- int len, int dummy)
-{
- int rc;
-
- disable_MAC(ai, 1);
- rc = PC4500_writerid(ai, rid, rid_data, len, 1);
- enable_MAC(ai, 1);
- return rc;
-}
-
-/* Returns the WEP key at the specified index, or -1 if that key does
- * not exist. The buffer is assumed to be at least 16 bytes in length.
- */
-static int get_wep_key(struct airo_info *ai, u16 index, char *buf, u16 buflen)
-{
- WepKeyRid wkr;
- int rc;
- __le16 lastindex;
-
- rc = readWepKeyRid(ai, &wkr, 1, 1);
- if (rc != SUCCESS)
- return -1;
- do {
- lastindex = wkr.kindex;
- if (le16_to_cpu(wkr.kindex) == index) {
- int klen = min_t(int, buflen, le16_to_cpu(wkr.klen));
- memcpy(buf, wkr.key, klen);
- return klen;
- }
- rc = readWepKeyRid(ai, &wkr, 0, 1);
- if (rc != SUCCESS)
- return -1;
- } while (lastindex != wkr.kindex);
- return -1;
-}
-
-static int get_wep_tx_idx(struct airo_info *ai)
-{
- WepKeyRid wkr;
- int rc;
- __le16 lastindex;
-
- rc = readWepKeyRid(ai, &wkr, 1, 1);
- if (rc != SUCCESS)
- return -1;
- do {
- lastindex = wkr.kindex;
- if (wkr.kindex == cpu_to_le16(0xffff))
- return wkr.mac[0];
- rc = readWepKeyRid(ai, &wkr, 0, 1);
- if (rc != SUCCESS)
- return -1;
- } while (lastindex != wkr.kindex);
- return -1;
-}
-
-static int set_wep_key(struct airo_info *ai, u16 index, const u8 *key,
- u16 keylen, int perm, int lock)
-{
- static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
- WepKeyRid wkr;
- int rc;
-
- if (WARN_ON(keylen == 0))
- return -1;
-
- memset(&wkr, 0, sizeof(wkr));
- wkr.len = cpu_to_le16(sizeof(wkr));
- wkr.kindex = cpu_to_le16(index);
- wkr.klen = cpu_to_le16(keylen);
- memcpy(wkr.key, key, keylen);
- memcpy(wkr.mac, macaddr, ETH_ALEN);
-
- if (perm) disable_MAC(ai, lock);
- rc = writeWepKeyRid(ai, &wkr, perm, lock);
- if (perm) enable_MAC(ai, lock);
- return rc;
-}
-
-static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
-{
- WepKeyRid wkr;
- int rc;
-
- memset(&wkr, 0, sizeof(wkr));
- wkr.len = cpu_to_le16(sizeof(wkr));
- wkr.kindex = cpu_to_le16(0xffff);
- wkr.mac[0] = (char)index;
-
- if (perm) {
- ai->defindex = (char)index;
- disable_MAC(ai, lock);
- }
-
- rc = writeWepKeyRid(ai, &wkr, perm, lock);
-
- if (perm)
- enable_MAC(ai, lock);
- return rc;
-}
-
-static void proc_wepkey_on_close(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- int i, rc;
- u8 key[16];
- u16 index = 0;
- int j = 0;
-
- memset(key, 0, sizeof(key));
-
- data = file->private_data;
- if (!data->writelen) return;
-
- if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' &&
- (data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
- index = data->wbuffer[0] - '0';
- if (data->wbuffer[1] == '\n') {
- rc = set_wep_tx_idx(ai, index, 1, 1);
- if (rc < 0) {
- airo_print_err(ai->dev->name, "failed to set "
- "WEP transmit index to %d: %d.",
- index, rc);
- }
- return;
- }
- j = 2;
- } else {
- airo_print_err(ai->dev->name, "WepKey passed invalid key index");
- return;
- }
-
- for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) {
- int val;
-
- if (i % 3 == 2)
- continue;
-
- val = hex_to_bin(data->wbuffer[i+j]);
- if (val < 0) {
- airo_print_err(ai->dev->name, "WebKey passed invalid key hex");
- return;
- }
- switch(i%3) {
- case 0:
- key[i/3] = (u8)val << 4;
- break;
- case 1:
- key[i/3] |= (u8)val;
- break;
- }
- }
-
- rc = set_wep_key(ai, index, key, i/3, 1, 1);
- if (rc < 0) {
- airo_print_err(ai->dev->name, "failed to set WEP key at index "
- "%d: %d.", index, rc);
- }
-}
-
-static int proc_wepkey_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- char *ptr;
- WepKeyRid wkr;
- __le16 lastindex;
- int j = 0;
- int rc;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- memset(&wkr, 0, sizeof(wkr));
- data = file->private_data;
- if ((data->rbuffer = kzalloc(180, GFP_KERNEL)) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->writelen = 0;
- data->maxwritelen = 80;
- if ((data->wbuffer = kzalloc(80, GFP_KERNEL)) == NULL) {
- kfree (data->rbuffer);
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->on_close = proc_wepkey_on_close;
-
- ptr = data->rbuffer;
- strcpy(ptr, "No wep keys\n");
- rc = readWepKeyRid(ai, &wkr, 1, 1);
- if (rc == SUCCESS) do {
- lastindex = wkr.kindex;
- if (wkr.kindex == cpu_to_le16(0xffff)) {
- j += sprintf(ptr+j, "Tx key = %d\n",
- (int)wkr.mac[0]);
- } else {
- j += sprintf(ptr+j, "Key %d set with length = %d\n",
- le16_to_cpu(wkr.kindex),
- le16_to_cpu(wkr.klen));
- }
- readWepKeyRid(ai, &wkr, 0, 1);
- } while ((lastindex != wkr.kindex) && (j < 180-30));
-
- data->readlen = strlen(data->rbuffer);
- return 0;
-}
-
-static int proc_SSID_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- int i;
- char *ptr;
- SsidRid SSID_rid;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = file->private_data;
- if ((data->rbuffer = kmalloc(104, GFP_KERNEL)) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->writelen = 0;
- data->maxwritelen = 33*3;
- /* allocate maxwritelen + 1; we'll want a sentinel */
- if ((data->wbuffer = kzalloc(33*3 + 1, GFP_KERNEL)) == NULL) {
- kfree (data->rbuffer);
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->on_close = proc_SSID_on_close;
-
- readSsidRid(ai, &SSID_rid);
- ptr = data->rbuffer;
- for (i = 0; i < 3; i++) {
- int j;
- size_t len = le16_to_cpu(SSID_rid.ssids[i].len);
- if (!len)
- break;
- if (len > 32)
- len = 32;
- for (j = 0; j < len && SSID_rid.ssids[i].ssid[j]; j++)
- *ptr++ = SSID_rid.ssids[i].ssid[j];
- *ptr++ = '\n';
- }
- *ptr = '\0';
- data->readlen = strlen(data->rbuffer);
- return 0;
-}
-
-static int proc_APList_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- int i;
- char *ptr;
- APListRid *APList_rid = &ai->APList;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = file->private_data;
- if ((data->rbuffer = kmalloc(104, GFP_KERNEL)) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->writelen = 0;
- data->maxwritelen = 4*6*3;
- if ((data->wbuffer = kzalloc(data->maxwritelen, GFP_KERNEL)) == NULL) {
- kfree (data->rbuffer);
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->on_close = proc_APList_on_close;
-
- ptr = data->rbuffer;
- for (i = 0; i < 4; i++) {
-// We end when we find a zero MAC
- if (!*(int*)APList_rid->ap[i] &&
- !*(int*)&APList_rid->ap[i][2]) break;
- ptr += sprintf(ptr, "%pM\n", APList_rid->ap[i]);
- }
- if (i==0) ptr += sprintf(ptr, "Not using specific APs\n");
-
- *ptr = '\0';
- data->readlen = strlen(data->rbuffer);
- return 0;
-}
-
-static int proc_BSSList_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct net_device *dev = pde_data(inode);
- struct airo_info *ai = dev->ml_priv;
- char *ptr;
- BSSListRid BSSList_rid;
- int rc;
- /* If doLoseSync is not 1, we won't do a Lose Sync */
- int doLoseSync = -1;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = file->private_data;
- if ((data->rbuffer = kmalloc(1024, GFP_KERNEL)) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->writelen = 0;
- data->maxwritelen = 0;
- data->wbuffer = NULL;
- data->on_close = NULL;
-
- if (file->f_mode & FMODE_WRITE) {
- if (!(file->f_mode & FMODE_READ)) {
- Cmd cmd;
- Resp rsp;
-
- if (ai->flags & FLAG_RADIO_MASK) {
- kfree(data->rbuffer);
- kfree(file->private_data);
- return -ENETDOWN;
- }
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = CMD_LISTBSS;
- if (down_interruptible(&ai->sem)) {
- kfree(data->rbuffer);
- kfree(file->private_data);
- return -ERESTARTSYS;
- }
- issuecommand(ai, &cmd, &rsp, true);
- up(&ai->sem);
- data->readlen = 0;
- return 0;
- }
- doLoseSync = 1;
- }
- ptr = data->rbuffer;
- /* There is a race condition here if there are concurrent opens.
- Since it is a rare condition, we'll just live with it, otherwise
- we have to add a spin lock... */
- rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
- while (rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
- ptr += sprintf(ptr, "%pM %.*s rssi = %d",
- BSSList_rid.bssid,
- (int)BSSList_rid.ssidLen,
- BSSList_rid.ssid,
- le16_to_cpu(BSSList_rid.dBm));
- ptr += sprintf(ptr, " channel = %d %s %s %s %s\n",
- le16_to_cpu(BSSList_rid.dsChannel),
- BSSList_rid.cap & CAP_ESS ? "ESS" : "",
- BSSList_rid.cap & CAP_IBSS ? "adhoc" : "",
- BSSList_rid.cap & CAP_PRIVACY ? "wep" : "",
- BSSList_rid.cap & CAP_SHORTHDR ? "shorthdr" : "");
- rc = readBSSListRid(ai, 0, &BSSList_rid);
- }
- *ptr = '\0';
- data->readlen = strlen(data->rbuffer);
- return 0;
-}
-
-static int proc_close(struct inode *inode, struct file *file)
-{
- struct proc_data *data = file->private_data;
-
- if (data->on_close != NULL)
- data->on_close(inode, file);
- kfree(data->rbuffer);
- kfree(data->wbuffer);
- kfree(data);
- return 0;
-}
-
-/* Since the card doesn't automatically switch to the right WEP mode,
- we will make it do it. If the card isn't associated, every secs we
- will switch WEP modes to see if that will help. If the card is
- associated we will check every minute to see if anything has
- changed. */
-static void timer_func(struct net_device *dev)
-{
- struct airo_info *apriv = dev->ml_priv;
-
-/* We don't have a link so try changing the authtype */
- readConfigRid(apriv, 0);
- disable_MAC(apriv, 0);
- switch(apriv->config.authType) {
- case AUTH_ENCRYPT:
-/* So drop to OPEN */
- apriv->config.authType = AUTH_OPEN;
- break;
- case AUTH_SHAREDKEY:
- if (apriv->keyindex < auto_wep) {
- set_wep_tx_idx(apriv, apriv->keyindex, 0, 0);
- apriv->config.authType = AUTH_SHAREDKEY;
- apriv->keyindex++;
- } else {
- /* Drop to ENCRYPT */
- apriv->keyindex = 0;
- set_wep_tx_idx(apriv, apriv->defindex, 0, 0);
- apriv->config.authType = AUTH_ENCRYPT;
- }
- break;
- default: /* We'll escalate to SHAREDKEY */
- apriv->config.authType = AUTH_SHAREDKEY;
- }
- set_bit (FLAG_COMMIT, &apriv->flags);
- writeConfigRid(apriv, 0);
- enable_MAC(apriv, 0);
- up(&apriv->sem);
-
-/* Schedule check to see if the change worked */
- clear_bit(JOB_AUTOWEP, &apriv->jobs);
- apriv->expires = RUN_AT(HZ*3);
-}
-
-#ifdef CONFIG_PCI
-static int airo_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pent)
-{
- struct net_device *dev;
-
- if (pci_enable_device(pdev))
- return -ENODEV;
- pci_set_master(pdev);
-
- if (pdev->device == 0x5000 || pdev->device == 0xa504)
- dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev);
- else
- dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev);
- if (!dev) {
- pci_disable_device(pdev);
- return -ENODEV;
- }
-
- pci_set_drvdata(pdev, dev);
- return 0;
-}
-
-static void airo_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- airo_print_info(dev->name, "Unregistering...");
- stop_airo_card(dev, 1);
- pci_disable_device(pdev);
-}
-
-static int __maybe_unused airo_pci_suspend(struct device *dev_d)
-{
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct airo_info *ai = dev->ml_priv;
- Cmd cmd;
- Resp rsp;
-
- if (!ai->SSID)
- ai->SSID = kmalloc(sizeof(SsidRid), GFP_KERNEL);
- if (!ai->SSID)
- return -ENOMEM;
- readSsidRid(ai, ai->SSID);
- memset(&cmd, 0, sizeof(cmd));
- /* the lock will be released at the end of the resume callback */
- if (down_interruptible(&ai->sem))
- return -EAGAIN;
- disable_MAC(ai, 0);
- netif_device_detach(dev);
- ai->power = PMSG_SUSPEND;
- cmd.cmd = HOSTSLEEP;
- issuecommand(ai, &cmd, &rsp, true);
-
- device_wakeup_enable(dev_d);
- return 0;
-}
-
-static int __maybe_unused airo_pci_resume(struct device *dev_d)
-{
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct airo_info *ai = dev->ml_priv;
- pci_power_t prev_state = to_pci_dev(dev_d)->current_state;
-
- device_wakeup_disable(dev_d);
-
- if (prev_state != PCI_D1) {
- reset_card(dev, 0);
- mpi_init_descriptors(ai);
- setup_card(ai, dev, 0);
- clear_bit(FLAG_RADIO_OFF, &ai->flags);
- clear_bit(FLAG_PENDING_XMIT, &ai->flags);
- } else {
- OUT4500(ai, EVACK, EV_AWAKEN);
- OUT4500(ai, EVACK, EV_AWAKEN);
- msleep(100);
- }
-
- set_bit(FLAG_COMMIT, &ai->flags);
- disable_MAC(ai, 0);
- msleep(200);
- if (ai->SSID) {
- writeSsidRid(ai, ai->SSID, 0);
- kfree(ai->SSID);
- ai->SSID = NULL;
- }
- writeAPListRid(ai, &ai->APList, 0);
- writeConfigRid(ai, 0);
- enable_MAC(ai, 0);
- ai->power = PMSG_ON;
- netif_device_attach(dev);
- netif_wake_queue(dev);
- enable_interrupts(ai);
- up(&ai->sem);
- return 0;
-}
-#endif
-
-static int __init airo_init_module(void)
-{
- int i;
-
- proc_kuid = make_kuid(&init_user_ns, proc_uid);
- proc_kgid = make_kgid(&init_user_ns, proc_gid);
- if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid))
- return -EINVAL;
-
- airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL);
-
- if (airo_entry)
- proc_set_user(airo_entry, proc_kuid, proc_kgid);
-
- for (i = 0; i < 4 && io[i] && irq[i]; i++) {
- airo_print_info("", "Trying to configure ISA adapter at irq=%d "
- "io = 0x%x", irq[i], io[i]);
- if (init_airo_card(irq[i], io[i], 0, NULL)) {
- /* do nothing */ ;
- }
- }
-
-#ifdef CONFIG_PCI
- airo_print_info("", "Probing for PCI adapters");
- i = pci_register_driver(&airo_driver);
- airo_print_info("", "Finished probing for PCI adapters");
-
- if (i) {
- remove_proc_entry("driver/aironet", NULL);
- return i;
- }
-#endif
-
- /* Always exit with success, as we are a library module
- * as well as a driver module
- */
- return 0;
-}
-
-static void __exit airo_cleanup_module(void)
-{
- struct airo_info *ai;
- while (!list_empty(&airo_devices)) {
- ai = list_entry(airo_devices.next, struct airo_info, dev_list);
- airo_print_info(ai->dev->name, "Unregistering...");
- stop_airo_card(ai->dev, 1);
- }
-#ifdef CONFIG_PCI
- pci_unregister_driver(&airo_driver);
-#endif
- remove_proc_entry("driver/aironet", NULL);
-}
-
-/*
- * Initial Wireless Extension code for Aironet driver by :
- * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
- * Conversion to new driver API by :
- * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02
- * Javier also did a good amount of work here, adding some new extensions
- * and fixing my code. Let's just say that without him this code just
- * would not work at all... - Jean II
- */
-
-static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi)
-{
- if (!rssi_rid)
- return 0;
-
- return (0x100 - rssi_rid[rssi].rssidBm);
-}
-
-static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm)
-{
- int i;
-
- if (!rssi_rid)
- return 0;
-
- for (i = 0; i < 256; i++)
- if (rssi_rid[i].rssidBm == dbm)
- return rssi_rid[i].rssipct;
-
- return 0;
-}
-
-
-static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid)
-{
- int quality = 0;
- u16 sq;
-
- if ((status_rid->mode & cpu_to_le16(0x3f)) != cpu_to_le16(0x3f))
- return 0;
-
- if (!(cap_rid->hardCap & cpu_to_le16(8)))
- return 0;
-
- sq = le16_to_cpu(status_rid->signalQuality);
- if (memcmp(cap_rid->prodName, "350", 3))
- if (sq > 0x20)
- quality = 0;
- else
- quality = 0x20 - sq;
- else
- if (sq > 0xb0)
- quality = 0;
- else if (sq < 0x10)
- quality = 0xa0;
- else
- quality = 0xb0 - sq;
- return quality;
-}
-
-#define airo_get_max_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x20 : 0xa0)
-#define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50)
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get protocol name
- */
-static int airo_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *cwrq,
- char *extra)
-{
- strcpy(cwrq->name, "IEEE 802.11-DS");
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set frequency
- */
-static int airo_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_freq *fwrq = &wrqu->freq;
- struct airo_info *local = dev->ml_priv;
- int rc = -EINPROGRESS; /* Call commit handler */
-
- /* If setting by frequency, convert to a channel */
- if (fwrq->e == 1) {
- int f = fwrq->m / 100000;
-
- /* Hack to fall through... */
- fwrq->e = 0;
- fwrq->m = ieee80211_frequency_to_channel(f);
- }
- /* Setting by channel number */
- if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
- rc = -EOPNOTSUPP;
- else {
- int channel = fwrq->m;
- /* We should do a better check than that,
- * based on the card capability !!! */
- if ((channel < 1) || (channel > 14)) {
- airo_print_dbg(dev->name, "New channel value of %d is invalid!",
- fwrq->m);
- rc = -EINVAL;
- } else {
- readConfigRid(local, 1);
- /* Yes ! We can set it !!! */
- local->config.channelSet = cpu_to_le16(channel);
- set_bit (FLAG_COMMIT, &local->flags);
- }
- }
- return rc;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get frequency
- */
-static int airo_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_freq *fwrq = &wrqu->freq;
- struct airo_info *local = dev->ml_priv;
- StatusRid status_rid; /* Card status info */
- int ch;
-
- readConfigRid(local, 1);
- if ((local->config.opmode & MODE_CFG_MASK) == MODE_STA_ESS)
- status_rid.channel = local->config.channelSet;
- else
- readStatusRid(local, &status_rid, 1);
-
- ch = le16_to_cpu(status_rid.channel);
- if ((ch > 0) && (ch < 15)) {
- fwrq->m = 100000 *
- ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
- fwrq->e = 1;
- } else {
- fwrq->m = ch;
- fwrq->e = 0;
- }
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set ESSID
- */
-static int airo_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->essid;
- struct airo_info *local = dev->ml_priv;
- SsidRid SSID_rid; /* SSIDs */
-
- /* Reload the list of current SSID */
- readSsidRid(local, &SSID_rid);
-
- /* Check if we asked for `any' */
- if (dwrq->flags == 0) {
- /* Just send an empty SSID list */
- memset(&SSID_rid, 0, sizeof(SSID_rid));
- } else {
- unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
-
- /* Check the size of the string */
- if (dwrq->length > IW_ESSID_MAX_SIZE)
- return -E2BIG ;
-
- /* Check if index is valid */
- if (index >= ARRAY_SIZE(SSID_rid.ssids))
- return -EINVAL;
-
- /* Set the SSID */
- memset(SSID_rid.ssids[index].ssid, 0,
- sizeof(SSID_rid.ssids[index].ssid));
- memcpy(SSID_rid.ssids[index].ssid, extra, dwrq->length);
- SSID_rid.ssids[index].len = cpu_to_le16(dwrq->length);
- }
- SSID_rid.len = cpu_to_le16(sizeof(SSID_rid));
- /* Write it to the card */
- disable_MAC(local, 1);
- writeSsidRid(local, &SSID_rid, 1);
- enable_MAC(local, 1);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get ESSID
- */
-static int airo_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->essid;
- struct airo_info *local = dev->ml_priv;
- StatusRid status_rid; /* Card status info */
-
- readStatusRid(local, &status_rid, 1);
-
- /* Note : if dwrq->flags != 0, we should
- * get the relevant SSID from the SSID list... */
-
- /* Get the current SSID */
- memcpy(extra, status_rid.SSID, le16_to_cpu(status_rid.SSIDlen));
- /* If none, we may want to get the one that was set */
-
- /* Push it out ! */
- dwrq->length = le16_to_cpu(status_rid.SSIDlen);
- dwrq->flags = 1; /* active */
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set AP address
- */
-static int airo_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct sockaddr *awrq = &wrqu->ap_addr;
- struct airo_info *local = dev->ml_priv;
- Cmd cmd;
- Resp rsp;
- APListRid *APList_rid = &local->APList;
-
- if (awrq->sa_family != ARPHRD_ETHER)
- return -EINVAL;
- else if (is_broadcast_ether_addr(awrq->sa_data) ||
- is_zero_ether_addr(awrq->sa_data)) {
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = CMD_LOSE_SYNC;
- if (down_interruptible(&local->sem))
- return -ERESTARTSYS;
- issuecommand(local, &cmd, &rsp, true);
- up(&local->sem);
- } else {
- memset(APList_rid, 0, sizeof(*APList_rid));
- APList_rid->len = cpu_to_le16(sizeof(*APList_rid));
- memcpy(APList_rid->ap[0], awrq->sa_data, ETH_ALEN);
- disable_MAC(local, 1);
- writeAPListRid(local, APList_rid, 1);
- enable_MAC(local, 1);
- }
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get AP address
- */
-static int airo_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct sockaddr *awrq = &wrqu->ap_addr;
- struct airo_info *local = dev->ml_priv;
- StatusRid status_rid; /* Card status info */
-
- readStatusRid(local, &status_rid, 1);
-
- /* Tentative. This seems to work, wow, I'm lucky !!! */
- memcpy(awrq->sa_data, status_rid.bssid[0], ETH_ALEN);
- awrq->sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Nickname
- */
-static int airo_set_nick(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->data;
- struct airo_info *local = dev->ml_priv;
-
- /* Check the size of the string */
- if (dwrq->length > 16) {
- return -E2BIG;
- }
- readConfigRid(local, 1);
- memset(local->config.nodeName, 0, sizeof(local->config.nodeName));
- memcpy(local->config.nodeName, extra, dwrq->length);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Nickname
- */
-static int airo_get_nick(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->data;
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- strncpy(extra, local->config.nodeName, 16);
- extra[16] = '\0';
- dwrq->length = strlen(extra);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Bit-Rate
- */
-static int airo_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->bitrate;
- struct airo_info *local = dev->ml_priv;
- CapabilityRid cap_rid; /* Card capability info */
- u8 brate = 0;
- int i;
-
- /* First : get a valid bit rate value */
- readCapabilityRid(local, &cap_rid, 1);
-
- /* Which type of value ? */
- if ((vwrq->value < 8) && (vwrq->value >= 0)) {
- /* Setting by rate index */
- /* Find value in the magic rate table */
- brate = cap_rid.supportedRates[vwrq->value];
- } else {
- /* Setting by frequency value */
- u8 normvalue = (u8) (vwrq->value/500000);
-
- /* Check if rate is valid */
- for (i = 0 ; i < 8 ; i++) {
- if (normvalue == cap_rid.supportedRates[i]) {
- brate = normvalue;
- break;
- }
- }
- }
- /* -1 designed the max rate (mostly auto mode) */
- if (vwrq->value == -1) {
- /* Get the highest available rate */
- for (i = 0 ; i < 8 ; i++) {
- if (cap_rid.supportedRates[i] == 0)
- break;
- }
- if (i != 0)
- brate = cap_rid.supportedRates[i - 1];
- }
- /* Check that it is valid */
- if (brate == 0) {
- return -EINVAL;
- }
-
- readConfigRid(local, 1);
- /* Now, check if we want a fixed or auto value */
- if (vwrq->fixed == 0) {
- /* Fill all the rates up to this max rate */
- memset(local->config.rates, 0, 8);
- for (i = 0 ; i < 8 ; i++) {
- local->config.rates[i] = cap_rid.supportedRates[i];
- if (local->config.rates[i] == brate)
- break;
- }
- } else {
- /* Fixed mode */
- /* One rate, fixed */
- memset(local->config.rates, 0, 8);
- local->config.rates[0] = brate;
- }
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Bit-Rate
- */
-static int airo_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->bitrate;
- struct airo_info *local = dev->ml_priv;
- StatusRid status_rid; /* Card status info */
- int ret;
-
- ret = readStatusRid(local, &status_rid, 1);
- if (ret)
- return -EBUSY;
-
- vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000;
- /* If more than one rate, set auto */
- readConfigRid(local, 1);
- vwrq->fixed = (local->config.rates[1] == 0);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set RTS threshold
- */
-static int airo_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->rts;
- struct airo_info *local = dev->ml_priv;
- int rthr = vwrq->value;
-
- if (vwrq->disabled)
- rthr = AIRO_DEF_MTU;
- if ((rthr < 0) || (rthr > AIRO_DEF_MTU)) {
- return -EINVAL;
- }
- readConfigRid(local, 1);
- local->config.rtsThres = cpu_to_le16(rthr);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get RTS threshold
- */
-static int airo_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->rts;
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- vwrq->value = le16_to_cpu(local->config.rtsThres);
- vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Fragmentation threshold
- */
-static int airo_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *vwrq = &wrqu->frag;
- struct airo_info *local = dev->ml_priv;
- int fthr = vwrq->value;
-
- if (vwrq->disabled)
- fthr = AIRO_DEF_MTU;
- if ((fthr < 256) || (fthr > AIRO_DEF_MTU)) {
- return -EINVAL;
- }
- fthr &= ~0x1; /* Get an even value - is it really needed ??? */
- readConfigRid(local, 1);
- local->config.fragThresh = cpu_to_le16(fthr);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Fragmentation threshold
- */
-static int airo_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->frag;
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- vwrq->value = le16_to_cpu(local->config.fragThresh);
- vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Mode of Operation
- */
-static int airo_set_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq,
- char *extra)
-{
- __u32 mode = uwrq->mode;
- struct airo_info *local = dev->ml_priv;
- int reset = 0;
-
- readConfigRid(local, 1);
- if (sniffing_mode(local))
- reset = 1;
-
- switch (mode) {
- case IW_MODE_ADHOC:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_STA_IBSS;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.scanMode = SCANMODE_ACTIVE;
- clear_bit (FLAG_802_11, &local->flags);
- break;
- case IW_MODE_INFRA:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_STA_ESS;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.scanMode = SCANMODE_ACTIVE;
- clear_bit (FLAG_802_11, &local->flags);
- break;
- case IW_MODE_MASTER:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_AP;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.scanMode = SCANMODE_ACTIVE;
- clear_bit (FLAG_802_11, &local->flags);
- break;
- case IW_MODE_REPEAT:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_AP_RPTR;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.scanMode = SCANMODE_ACTIVE;
- clear_bit (FLAG_802_11, &local->flags);
- break;
- case IW_MODE_MONITOR:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_STA_ESS;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
- local->config.scanMode = SCANMODE_PASSIVE;
- set_bit (FLAG_802_11, &local->flags);
- break;
- default:
- return -EINVAL;
- }
- if (reset)
- set_bit (FLAG_RESET, &local->flags);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Mode of Operation
- */
-static int airo_get_mode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- /* If not managed, assume it's ad-hoc */
- switch (local->config.opmode & MODE_CFG_MASK) {
- case MODE_STA_ESS:
- uwrq->mode = IW_MODE_INFRA;
- break;
- case MODE_AP:
- uwrq->mode = IW_MODE_MASTER;
- break;
- case MODE_AP_RPTR:
- uwrq->mode = IW_MODE_REPEAT;
- break;
- default:
- uwrq->mode = IW_MODE_ADHOC;
- }
-
- return 0;
-}
-
-static inline int valid_index(struct airo_info *ai, int index)
-{
- return (index >= 0) && (index <= ai->max_wep_idx);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Encryption Key
- */
-static int airo_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->encoding;
- struct airo_info *local = dev->ml_priv;
- int perm = (dwrq->flags & IW_ENCODE_TEMP ? 0 : 1);
- __le16 currentAuthType = local->config.authType;
- int rc = 0;
-
- if (!local->wep_capable)
- return -EOPNOTSUPP;
-
- readConfigRid(local, 1);
-
- /* Basic checking: do we have a key to set ?
- * Note : with the new API, it's impossible to get a NULL pointer.
- * Therefore, we need to check a key size == 0 instead.
- * New version of iwconfig properly set the IW_ENCODE_NOKEY flag
- * when no key is present (only change flags), but older versions
- * don't do it. - Jean II */
- if (dwrq->length > 0) {
- wep_key_t key;
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- int current_index;
-
- /* Check the size of the key */
- if (dwrq->length > MAX_KEY_SIZE) {
- return -EINVAL;
- }
-
- current_index = get_wep_tx_idx(local);
- if (current_index < 0)
- current_index = 0;
-
- /* Check the index (none -> use current) */
- if (!valid_index(local, index))
- index = current_index;
-
- /* Set the length */
- if (dwrq->length > MIN_KEY_SIZE)
- key.len = MAX_KEY_SIZE;
- else
- key.len = MIN_KEY_SIZE;
- /* Check if the key is not marked as invalid */
- if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
- /* Cleanup */
- memset(key.key, 0, MAX_KEY_SIZE);
- /* Copy the key in the driver */
- memcpy(key.key, extra, dwrq->length);
- /* Send the key to the card */
- rc = set_wep_key(local, index, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set"
- " WEP key at index %d: %d.",
- index, rc);
- return rc;
- }
- }
- /* WE specify that if a valid key is set, encryption
- * should be enabled (user may turn it off later)
- * This is also how "iwconfig ethX key on" works */
- if ((index == current_index) && (key.len > 0) &&
- (local->config.authType == AUTH_OPEN))
- set_auth_type(local, AUTH_ENCRYPT);
- } else {
- /* Do we want to just set the transmit key index ? */
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- if (valid_index(local, index)) {
- rc = set_wep_tx_idx(local, index, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set"
- " WEP transmit index to %d: %d.",
- index, rc);
- return rc;
- }
- } else {
- /* Don't complain if only change the mode */
- if (!(dwrq->flags & IW_ENCODE_MODE))
- return -EINVAL;
- }
- }
- /* Read the flags */
- if (dwrq->flags & IW_ENCODE_DISABLED)
- set_auth_type(local, AUTH_OPEN); /* disable encryption */
- if (dwrq->flags & IW_ENCODE_RESTRICTED)
- set_auth_type(local, AUTH_SHAREDKEY); /* Only Both */
- if (dwrq->flags & IW_ENCODE_OPEN)
- set_auth_type(local, AUTH_ENCRYPT); /* Only Wep */
- /* Commit the changes to flags if needed */
- if (local->config.authType != currentAuthType)
- set_bit (FLAG_COMMIT, &local->flags);
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Encryption Key
- */
-static int airo_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->encoding;
- struct airo_info *local = dev->ml_priv;
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- int wep_key_len;
- u8 buf[16];
-
- if (!local->wep_capable)
- return -EOPNOTSUPP;
-
- readConfigRid(local, 1);
-
- /* Check encryption mode */
- switch(local->config.authType) {
- case AUTH_ENCRYPT:
- dwrq->flags = IW_ENCODE_OPEN;
- break;
- case AUTH_SHAREDKEY:
- dwrq->flags = IW_ENCODE_RESTRICTED;
- break;
- default:
- case AUTH_OPEN:
- dwrq->flags = IW_ENCODE_DISABLED;
- break;
- }
- /* We can't return the key, so set the proper flag and return zero */
- dwrq->flags |= IW_ENCODE_NOKEY;
- memset(extra, 0, 16);
-
- /* Which key do we want ? -1 -> tx index */
- if (!valid_index(local, index)) {
- index = get_wep_tx_idx(local);
- if (index < 0)
- index = 0;
- }
- dwrq->flags |= index + 1;
-
- /* Copy the key to the user buffer */
- wep_key_len = get_wep_key(local, index, &buf[0], sizeof(buf));
- if (wep_key_len < 0) {
- dwrq->length = 0;
- } else {
- dwrq->length = wep_key_len;
- memcpy(extra, buf, dwrq->length);
- }
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set extended Encryption parameters
- */
-static int airo_set_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int perm = (encoding->flags & IW_ENCODE_TEMP ? 0 : 1);
- __le16 currentAuthType = local->config.authType;
- int idx, key_len, alg = ext->alg, set_key = 1, rc;
- wep_key_t key;
-
- if (!local->wep_capable)
- return -EOPNOTSUPP;
-
- readConfigRid(local, 1);
-
- /* Determine and validate the key index */
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (!valid_index(local, idx - 1))
- return -EINVAL;
- idx--;
- } else {
- idx = get_wep_tx_idx(local);
- if (idx < 0)
- idx = 0;
- }
-
- if (encoding->flags & IW_ENCODE_DISABLED)
- alg = IW_ENCODE_ALG_NONE;
-
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- /* Only set transmit key index here, actual
- * key is set below if needed.
- */
- rc = set_wep_tx_idx(local, idx, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set "
- "WEP transmit index to %d: %d.",
- idx, rc);
- return rc;
- }
- set_key = ext->key_len > 0 ? 1 : 0;
- }
-
- if (set_key) {
- /* Set the requested key first */
- memset(key.key, 0, MAX_KEY_SIZE);
- switch (alg) {
- case IW_ENCODE_ALG_NONE:
- key.len = 0;
- break;
- case IW_ENCODE_ALG_WEP:
- if (ext->key_len > MIN_KEY_SIZE) {
- key.len = MAX_KEY_SIZE;
- } else if (ext->key_len > 0) {
- key.len = MIN_KEY_SIZE;
- } else {
- return -EINVAL;
- }
- key_len = min (ext->key_len, key.len);
- memcpy(key.key, ext->key, key_len);
- break;
- default:
- return -EINVAL;
- }
- if (key.len == 0) {
- rc = set_wep_tx_idx(local, idx, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name,
- "failed to set WEP transmit index to %d: %d.",
- idx, rc);
- return rc;
- }
- } else {
- rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name,
- "failed to set WEP key at index %d: %d.",
- idx, rc);
- return rc;
- }
- }
- }
-
- /* Read the flags */
- if (encoding->flags & IW_ENCODE_DISABLED)
- set_auth_type(local, AUTH_OPEN); /* disable encryption */
- if (encoding->flags & IW_ENCODE_RESTRICTED)
- set_auth_type(local, AUTH_SHAREDKEY); /* Only Both */
- if (encoding->flags & IW_ENCODE_OPEN)
- set_auth_type(local, AUTH_ENCRYPT);
- /* Commit the changes to flags if needed */
- if (local->config.authType != currentAuthType)
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS;
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get extended Encryption parameters
- */
-static int airo_get_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int idx, max_key_len, wep_key_len;
- u8 buf[16];
-
- if (!local->wep_capable)
- return -EOPNOTSUPP;
-
- readConfigRid(local, 1);
-
- max_key_len = encoding->length - sizeof(*ext);
- if (max_key_len < 0)
- return -EINVAL;
-
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (!valid_index(local, idx - 1))
- return -EINVAL;
- idx--;
- } else {
- idx = get_wep_tx_idx(local);
- if (idx < 0)
- idx = 0;
- }
-
- encoding->flags = idx + 1;
- memset(ext, 0, sizeof(*ext));
-
- /* Check encryption mode */
- switch(local->config.authType) {
- case AUTH_ENCRYPT:
- encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED;
- break;
- case AUTH_SHAREDKEY:
- encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED;
- break;
- default:
- case AUTH_OPEN:
- encoding->flags = IW_ENCODE_ALG_NONE | IW_ENCODE_DISABLED;
- break;
- }
- /* We can't return the key, so set the proper flag and return zero */
- encoding->flags |= IW_ENCODE_NOKEY;
- memset(extra, 0, 16);
-
- /* Copy the key to the user buffer */
- wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
- if (wep_key_len < 0) {
- ext->key_len = 0;
- } else {
- ext->key_len = wep_key_len;
- memcpy(extra, buf, ext->key_len);
- }
-
- return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set extended authentication parameters
- */
-static int airo_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_param *param = &wrqu->param;
- __le16 currentAuthType = local->config.authType;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_KEY_MGMT:
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- case IW_AUTH_PRIVACY_INVOKED:
- /*
- * airo does not use these parameters
- */
- break;
-
- case IW_AUTH_DROP_UNENCRYPTED:
- if (param->value) {
- /* Only change auth type if unencrypted */
- if (currentAuthType == AUTH_OPEN)
- set_auth_type(local, AUTH_ENCRYPT);
- } else {
- set_auth_type(local, AUTH_OPEN);
- }
-
- /* Commit the changes to flags if needed */
- if (local->config.authType != currentAuthType)
- set_bit (FLAG_COMMIT, &local->flags);
- break;
-
- case IW_AUTH_80211_AUTH_ALG: {
- if (param->value & IW_AUTH_ALG_SHARED_KEY) {
- set_auth_type(local, AUTH_SHAREDKEY);
- } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
- /* We don't know here if WEP open system or
- * unencrypted mode was requested - so use the
- * last mode (of these two) used last time
- */
- set_auth_type(local, local->last_auth);
- } else
- return -EINVAL;
-
- /* Commit the changes to flags if needed */
- if (local->config.authType != currentAuthType)
- set_bit (FLAG_COMMIT, &local->flags);
- break;
- }
-
- case IW_AUTH_WPA_ENABLED:
- /* Silently accept disable of WPA */
- if (param->value > 0)
- return -EOPNOTSUPP;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return -EINPROGRESS;
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get extended authentication parameters
- */
-static int airo_get_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_param *param = &wrqu->param;
- __le16 currentAuthType = local->config.authType;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_DROP_UNENCRYPTED:
- switch (currentAuthType) {
- case AUTH_SHAREDKEY:
- case AUTH_ENCRYPT:
- param->value = 1;
- break;
- default:
- param->value = 0;
- break;
- }
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- switch (currentAuthType) {
- case AUTH_SHAREDKEY:
- param->value = IW_AUTH_ALG_SHARED_KEY;
- break;
- case AUTH_ENCRYPT:
- default:
- param->value = IW_AUTH_ALG_OPEN_SYSTEM;
- break;
- }
- break;
-
- case IW_AUTH_WPA_ENABLED:
- param->value = 0;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Tx-Power
- */
-static int airo_set_txpow(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->txpower;
- struct airo_info *local = dev->ml_priv;
- CapabilityRid cap_rid; /* Card capability info */
- int i;
- int rc = -EINVAL;
- __le16 v = cpu_to_le16(vwrq->value);
-
- readCapabilityRid(local, &cap_rid, 1);
-
- if (vwrq->disabled) {
- set_bit (FLAG_RADIO_OFF, &local->flags);
- set_bit (FLAG_COMMIT, &local->flags);
- return -EINPROGRESS; /* Call commit handler */
- }
- if (vwrq->flags != IW_TXPOW_MWATT) {
- return -EINVAL;
- }
- clear_bit (FLAG_RADIO_OFF, &local->flags);
- for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++)
- if (v == cap_rid.txPowerLevels[i]) {
- readConfigRid(local, 1);
- local->config.txPower = v;
- set_bit (FLAG_COMMIT, &local->flags);
- rc = -EINPROGRESS; /* Call commit handler */
- break;
- }
- return rc;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Tx-Power
- */
-static int airo_get_txpow(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->txpower;
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- vwrq->value = le16_to_cpu(local->config.txPower);
- vwrq->fixed = 1; /* No power control */
- vwrq->disabled = test_bit(FLAG_RADIO_OFF, &local->flags);
- vwrq->flags = IW_TXPOW_MWATT;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Retry limits
- */
-static int airo_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->retry;
- struct airo_info *local = dev->ml_priv;
- int rc = -EINVAL;
-
- if (vwrq->disabled) {
- return -EINVAL;
- }
- readConfigRid(local, 1);
- if (vwrq->flags & IW_RETRY_LIMIT) {
- __le16 v = cpu_to_le16(vwrq->value);
- if (vwrq->flags & IW_RETRY_LONG)
- local->config.longRetryLimit = v;
- else if (vwrq->flags & IW_RETRY_SHORT)
- local->config.shortRetryLimit = v;
- else {
- /* No modifier : set both */
- local->config.longRetryLimit = v;
- local->config.shortRetryLimit = v;
- }
- set_bit (FLAG_COMMIT, &local->flags);
- rc = -EINPROGRESS; /* Call commit handler */
- }
- if (vwrq->flags & IW_RETRY_LIFETIME) {
- local->config.txLifetime = cpu_to_le16(vwrq->value / 1024);
- set_bit (FLAG_COMMIT, &local->flags);
- rc = -EINPROGRESS; /* Call commit handler */
- }
- return rc;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Retry limits
- */
-static int airo_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->retry;
- struct airo_info *local = dev->ml_priv;
-
- vwrq->disabled = 0; /* Can't be disabled */
-
- readConfigRid(local, 1);
- /* Note : by default, display the min retry number */
- if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- vwrq->flags = IW_RETRY_LIFETIME;
- vwrq->value = le16_to_cpu(local->config.txLifetime) * 1024;
- } else if ((vwrq->flags & IW_RETRY_LONG)) {
- vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- vwrq->value = le16_to_cpu(local->config.longRetryLimit);
- } else {
- vwrq->flags = IW_RETRY_LIMIT;
- vwrq->value = le16_to_cpu(local->config.shortRetryLimit);
- if (local->config.shortRetryLimit != local->config.longRetryLimit)
- vwrq->flags |= IW_RETRY_SHORT;
- }
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get range info
- */
-static int airo_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->data;
- struct airo_info *local = dev->ml_priv;
- struct iw_range *range = (struct iw_range *) extra;
- CapabilityRid cap_rid; /* Card capability info */
- int i;
- int k;
-
- readCapabilityRid(local, &cap_rid, 1);
-
- dwrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(*range));
- range->min_nwid = 0x0000;
- range->max_nwid = 0x0000;
- range->num_channels = 14;
- /* Should be based on cap_rid.country to give only
- * what the current card support */
- k = 0;
- for (i = 0; i < 14; i++) {
- range->freq[k].i = i + 1; /* List index */
- range->freq[k].m = 100000 *
- ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
- range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
- }
- range->num_frequency = k;
-
- range->sensitivity = 65535;
-
- /* Hum... Should put the right values there */
- if (local->rssi)
- range->max_qual.qual = 100; /* % */
- else
- range->max_qual.qual = airo_get_max_quality(&cap_rid);
- range->max_qual.level = 0x100 - 120; /* -120 dBm */
- range->max_qual.noise = 0x100 - 120; /* -120 dBm */
-
- /* Experimental measurements - boundary 11/5.5 Mb/s */
- /* Note : with or without the (local->rssi), results
- * are somewhat different. - Jean II */
- if (local->rssi) {
- range->avg_qual.qual = 50; /* % */
- range->avg_qual.level = 0x100 - 70; /* -70 dBm */
- } else {
- range->avg_qual.qual = airo_get_avg_quality(&cap_rid);
- range->avg_qual.level = 0x100 - 80; /* -80 dBm */
- }
- range->avg_qual.noise = 0x100 - 85; /* -85 dBm */
-
- for (i = 0 ; i < 8 ; i++) {
- range->bitrate[i] = cap_rid.supportedRates[i] * 500000;
- if (range->bitrate[i] == 0)
- break;
- }
- range->num_bitrates = i;
-
- /* Set an indication of the max TCP throughput
- * in bit/s that we can expect using this interface.
- * May be use for QoS stuff... Jean II */
- if (i > 2)
- range->throughput = 5000 * 1000;
- else
- range->throughput = 1500 * 1000;
-
- range->min_rts = 0;
- range->max_rts = AIRO_DEF_MTU;
- range->min_frag = 256;
- range->max_frag = AIRO_DEF_MTU;
-
- if (cap_rid.softCap & cpu_to_le16(2)) {
- // WEP: RC4 40 bits
- range->encoding_size[0] = 5;
- // RC4 ~128 bits
- if (cap_rid.softCap & cpu_to_le16(0x100)) {
- range->encoding_size[1] = 13;
- range->num_encoding_sizes = 2;
- } else
- range->num_encoding_sizes = 1;
- range->max_encoding_tokens =
- cap_rid.softCap & cpu_to_le16(0x80) ? 4 : 1;
- } else {
- range->num_encoding_sizes = 0;
- range->max_encoding_tokens = 0;
- }
- range->min_pmp = 0;
- range->max_pmp = 5000000; /* 5 secs */
- range->min_pmt = 0;
- range->max_pmt = 65535 * 1024; /* ??? */
- range->pmp_flags = IW_POWER_PERIOD;
- range->pmt_flags = IW_POWER_TIMEOUT;
- range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
-
- /* Transmit Power - values are in mW */
- for (i = 0 ; i < 8 ; i++) {
- range->txpower[i] = le16_to_cpu(cap_rid.txPowerLevels[i]);
- if (range->txpower[i] == 0)
- break;
- }
- range->num_txpower = i;
- range->txpower_capa = IW_TXPOW_MWATT;
- range->we_version_source = 19;
- range->we_version_compiled = WIRELESS_EXT;
- range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
- range->retry_flags = IW_RETRY_LIMIT;
- range->r_time_flags = IW_RETRY_LIFETIME;
- range->min_retry = 1;
- range->max_retry = 65535;
- range->min_r_time = 1024;
- range->max_r_time = 65535 * 1024;
-
- /* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
- IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
- range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVTXDROP);
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Power Management
- */
-static int airo_set_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *vwrq = &wrqu->power;
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- if (vwrq->disabled) {
- if (sniffing_mode(local))
- return -EINVAL;
- local->config.powerSaveMode = POWERSAVE_CAM;
- local->config.rmode &= ~RXMODE_MASK;
- local->config.rmode |= RXMODE_BC_MC_ADDR;
- set_bit (FLAG_COMMIT, &local->flags);
- return -EINPROGRESS; /* Call commit handler */
- }
- if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- local->config.fastListenDelay = cpu_to_le16((vwrq->value + 500) / 1024);
- local->config.powerSaveMode = POWERSAVE_PSPCAM;
- set_bit (FLAG_COMMIT, &local->flags);
- } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
- local->config.fastListenInterval =
- local->config.listenInterval =
- cpu_to_le16((vwrq->value + 500) / 1024);
- local->config.powerSaveMode = POWERSAVE_PSPCAM;
- set_bit (FLAG_COMMIT, &local->flags);
- }
- switch (vwrq->flags & IW_POWER_MODE) {
- case IW_POWER_UNICAST_R:
- if (sniffing_mode(local))
- return -EINVAL;
- local->config.rmode &= ~RXMODE_MASK;
- local->config.rmode |= RXMODE_ADDR;
- set_bit (FLAG_COMMIT, &local->flags);
- break;
- case IW_POWER_ALL_R:
- if (sniffing_mode(local))
- return -EINVAL;
- local->config.rmode &= ~RXMODE_MASK;
- local->config.rmode |= RXMODE_BC_MC_ADDR;
- set_bit (FLAG_COMMIT, &local->flags);
- break;
- case IW_POWER_ON:
- /* This is broken, fixme ;-) */
- break;
- default:
- return -EINVAL;
- }
- // Note : we may want to factor local->need_commit here
- // Note2 : may also want to factor RXMODE_RFMON test
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Power Management
- */
-static int airo_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->power;
- struct airo_info *local = dev->ml_priv;
- __le16 mode;
-
- readConfigRid(local, 1);
- mode = local->config.powerSaveMode;
- if ((vwrq->disabled = (mode == POWERSAVE_CAM)))
- return 0;
- if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- vwrq->value = le16_to_cpu(local->config.fastListenDelay) * 1024;
- vwrq->flags = IW_POWER_TIMEOUT;
- } else {
- vwrq->value = le16_to_cpu(local->config.fastListenInterval) * 1024;
- vwrq->flags = IW_POWER_PERIOD;
- }
- if ((local->config.rmode & RXMODE_MASK) == RXMODE_ADDR)
- vwrq->flags |= IW_POWER_UNICAST_R;
- else
- vwrq->flags |= IW_POWER_ALL_R;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Sensitivity
- */
-static int airo_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->sens;
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- local->config.rssiThreshold =
- cpu_to_le16(vwrq->disabled ? RSSI_DEFAULT : vwrq->value);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Sensitivity
- */
-static int airo_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *vwrq = &wrqu->sens;
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- vwrq->value = le16_to_cpu(local->config.rssiThreshold);
- vwrq->disabled = (vwrq->value == 0);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get AP List
- * Note : this is deprecated in favor of IWSCAN
- */
-static int airo_get_aplist(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->data;
- struct airo_info *local = dev->ml_priv;
- struct sockaddr *address = (struct sockaddr *) extra;
- struct iw_quality *qual;
- BSSListRid BSSList;
- int i;
- int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
-
- qual = kmalloc_array(IW_MAX_AP, sizeof(*qual), GFP_KERNEL);
- if (!qual)
- return -ENOMEM;
-
- for (i = 0; i < IW_MAX_AP; i++) {
- u16 dBm;
- if (readBSSListRid(local, loseSync, &BSSList))
- break;
- loseSync = 0;
- memcpy(address[i].sa_data, BSSList.bssid, ETH_ALEN);
- address[i].sa_family = ARPHRD_ETHER;
- dBm = le16_to_cpu(BSSList.dBm);
- if (local->rssi) {
- qual[i].level = 0x100 - dBm;
- qual[i].qual = airo_dbm_to_pct(local->rssi, dBm);
- qual[i].updated = IW_QUAL_QUAL_UPDATED
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- } else {
- qual[i].level = (dBm + 321) / 2;
- qual[i].qual = 0;
- qual[i].updated = IW_QUAL_QUAL_INVALID
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- }
- qual[i].noise = local->wstats.qual.noise;
- if (BSSList.index == cpu_to_le16(0xffff))
- break;
- }
- if (!i) {
- StatusRid status_rid; /* Card status info */
- readStatusRid(local, &status_rid, 1);
- for (i = 0;
- i < min(IW_MAX_AP, 4) &&
- (status_rid.bssid[i][0]
- & status_rid.bssid[i][1]
- & status_rid.bssid[i][2]
- & status_rid.bssid[i][3]
- & status_rid.bssid[i][4]
- & status_rid.bssid[i][5])!=0xff &&
- (status_rid.bssid[i][0]
- | status_rid.bssid[i][1]
- | status_rid.bssid[i][2]
- | status_rid.bssid[i][3]
- | status_rid.bssid[i][4]
- | status_rid.bssid[i][5]);
- i++) {
- memcpy(address[i].sa_data,
- status_rid.bssid[i], ETH_ALEN);
- address[i].sa_family = ARPHRD_ETHER;
- }
- } else {
- dwrq->flags = 1; /* Should be define'd */
- memcpy(extra + sizeof(struct sockaddr) * i, qual,
- sizeof(struct iw_quality) * i);
- }
- dwrq->length = i;
-
- kfree(qual);
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : Initiate Scan
- */
-static int airo_set_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct airo_info *ai = dev->ml_priv;
- Cmd cmd;
- Resp rsp;
- int wake = 0;
- APListRid APList_rid_empty;
-
- /* Note : you may have realised that, as this is a SET operation,
- * this is privileged and therefore a normal user can't
- * perform scanning.
- * This is not an error, while the device perform scanning,
- * traffic doesn't flow, so it's a perfect DoS...
- * Jean II */
- if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
-
- if (down_interruptible(&ai->sem))
- return -ERESTARTSYS;
-
- /* If there's already a scan in progress, don't
- * trigger another one. */
- if (ai->scan_timeout > 0)
- goto out;
-
- /* Clear APList as it affects scan results */
- memset(&APList_rid_empty, 0, sizeof(APList_rid_empty));
- APList_rid_empty.len = cpu_to_le16(sizeof(APList_rid_empty));
- disable_MAC(ai, 2);
- writeAPListRid(ai, &APList_rid_empty, 0);
- enable_MAC(ai, 0);
-
- /* Initiate a scan command */
- ai->scan_timeout = RUN_AT(3*HZ);
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = CMD_LISTBSS;
- issuecommand(ai, &cmd, &rsp, true);
- wake = 1;
-
-out:
- up(&ai->sem);
- if (wake)
- wake_up_interruptible(&ai->thr_wait);
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Translate scan data returned from the card to a card independent
- * format that the Wireless Tools will understand - Jean II
- */
-static inline char *airo_translate_scan(struct net_device *dev,
- struct iw_request_info *info,
- char *current_ev,
- char *end_buf,
- BSSListRid *bss)
-{
- struct airo_info *ai = dev->ml_priv;
- struct iw_event iwe; /* Temporary buffer */
- __le16 capabilities;
- char * current_val; /* For rates */
- int i;
- char * buf;
- u16 dBm;
-
- /* First entry *MUST* be the AP MAC address */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_ADDR_LEN);
-
- /* Other entries will be displayed in the order we give them */
-
- /* Add the ESSID */
- iwe.u.data.length = bss->ssidLen;
- if (iwe.u.data.length > 32)
- iwe.u.data.length = 32;
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, bss->ssid);
-
- /* Add mode */
- iwe.cmd = SIOCGIWMODE;
- capabilities = bss->cap;
- if (capabilities & (CAP_ESS | CAP_IBSS)) {
- if (capabilities & CAP_ESS)
- iwe.u.mode = IW_MODE_MASTER;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_UINT_LEN);
- }
-
- /* Add frequency */
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
- iwe.u.freq.m = 100000 *
- ieee80211_channel_to_frequency(iwe.u.freq.m, NL80211_BAND_2GHZ);
- iwe.u.freq.e = 1;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_FREQ_LEN);
-
- dBm = le16_to_cpu(bss->dBm);
-
- /* Add quality statistics */
- iwe.cmd = IWEVQUAL;
- if (ai->rssi) {
- iwe.u.qual.level = 0x100 - dBm;
- iwe.u.qual.qual = airo_dbm_to_pct(ai->rssi, dBm);
- iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- } else {
- iwe.u.qual.level = (dBm + 321) / 2;
- iwe.u.qual.qual = 0;
- iwe.u.qual.updated = IW_QUAL_QUAL_INVALID
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- }
- iwe.u.qual.noise = ai->wstats.qual.noise;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_QUAL_LEN);
-
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- if (capabilities & CAP_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, bss->ssid);
-
- /* Rate : stuffing multiple values in a single event require a bit
- * more of magic - Jean II */
- current_val = current_ev + iwe_stream_lcp_len(info);
-
- iwe.cmd = SIOCGIWRATE;
- /* Those two flags are ignored... */
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
- /* Max 8 values */
- for (i = 0 ; i < 8 ; i++) {
- /* NULL terminated */
- if (bss->rates[i] == 0)
- break;
- /* Bit rate given in 500 kb/s units (+ 0x80) */
- iwe.u.bitrate.value = ((bss->rates[i] & 0x7f) * 500000);
- /* Add new value to event */
- current_val = iwe_stream_add_value(info, current_ev,
- current_val, end_buf,
- &iwe, IW_EV_PARAM_LEN);
- }
- /* Check if we added any event */
- if ((current_val - current_ev) > iwe_stream_lcp_len(info))
- current_ev = current_val;
-
- /* Beacon interval */
- buf = kmalloc(30, GFP_KERNEL);
- if (buf) {
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "bcn_int=%d", bss->beaconInterval);
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, buf);
- kfree(buf);
- }
-
- /* Put WPA/RSN Information Elements into the event stream */
- if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) {
- unsigned int num_null_ies = 0;
- u16 length = sizeof (bss->extra.iep);
- u8 *ie = (void *)&bss->extra.iep;
-
- while ((length >= 2) && (num_null_ies < 2)) {
- if (2 + ie[1] > length) {
- /* Invalid element, don't continue parsing IE */
- break;
- }
-
- switch (ie[0]) {
- case WLAN_EID_SSID:
- /* Two zero-length SSID elements
- * mean we're done parsing elements */
- if (!ie[1])
- num_null_ies++;
- break;
-
- case WLAN_EID_VENDOR_SPECIFIC:
- if (ie[1] >= 4 &&
- ie[2] == 0x00 &&
- ie[3] == 0x50 &&
- ie[4] == 0xf2 &&
- ie[5] == 0x01) {
- iwe.cmd = IWEVGENIE;
- /* 64 is an arbitrary cut-off */
- iwe.u.data.length = min(ie[1] + 2,
- 64);
- current_ev = iwe_stream_add_point(
- info, current_ev,
- end_buf, &iwe, ie);
- }
- break;
-
- case WLAN_EID_RSN:
- iwe.cmd = IWEVGENIE;
- /* 64 is an arbitrary cut-off */
- iwe.u.data.length = min(ie[1] + 2, 64);
- current_ev = iwe_stream_add_point(
- info, current_ev, end_buf,
- &iwe, ie);
- break;
-
- default:
- break;
- }
-
- length -= 2 + ie[1];
- ie += 2 + ie[1];
- }
- }
- return current_ev;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : Read Scan Results
- */
-static int airo_get_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *dwrq = &wrqu->data;
- struct airo_info *ai = dev->ml_priv;
- BSSListElement *net;
- int err = 0;
- char *current_ev = extra;
-
- /* If a scan is in-progress, return -EAGAIN */
- if (ai->scan_timeout > 0)
- return -EAGAIN;
-
- if (down_interruptible(&ai->sem))
- return -EAGAIN;
-
- list_for_each_entry (net, &ai->network_list, list) {
- /* Translate to WE format this entry */
- current_ev = airo_translate_scan(dev, info, current_ev,
- extra + dwrq->length,
- &net->bss);
-
- /* Check if there is space for one more entry */
- if ((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
- /* Ask user space to try again with a bigger buffer */
- err = -E2BIG;
- goto out;
- }
- }
-
- /* Length of data */
- dwrq->length = (current_ev - extra);
- dwrq->flags = 0; /* todo */
-
-out:
- up(&ai->sem);
- return err;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Commit handler : called after a bunch of SET operations
- */
-static int airo_config_commit(struct net_device *dev,
- struct iw_request_info *info, /* NULL */
- union iwreq_data *wrqu, /* NULL */
- char *extra) /* NULL */
-{
- struct airo_info *local = dev->ml_priv;
-
- if (!test_bit (FLAG_COMMIT, &local->flags))
- return 0;
-
- /* Some of the "SET" function may have modified some of the
- * parameters. It's now time to commit them in the card */
- disable_MAC(local, 1);
- if (test_bit (FLAG_RESET, &local->flags)) {
- SsidRid SSID_rid;
-
- readSsidRid(local, &SSID_rid);
- if (test_bit(FLAG_MPI,&local->flags))
- setup_card(local, dev, 1);
- else
- reset_airo_card(dev);
- disable_MAC(local, 1);
- writeSsidRid(local, &SSID_rid, 1);
- writeAPListRid(local, &local->APList, 1);
- }
- if (down_interruptible(&local->sem))
- return -ERESTARTSYS;
- writeConfigRid(local, 0);
- enable_MAC(local, 0);
- if (test_bit (FLAG_RESET, &local->flags))
- airo_set_promisc(local, true);
- else
- up(&local->sem);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const struct iw_priv_args airo_private_args[] = {
-/*{ cmd, set_args, get_args, name } */
- { AIROIOCTL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl),
- IW_PRIV_TYPE_BYTE | 2047, "airoioctl" },
- { AIROIDIFC, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl),
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "airoidifc" },
-};
-
-static const iw_handler airo_handler[] =
-{
- IW_HANDLER(SIOCSIWCOMMIT, airo_config_commit),
- IW_HANDLER(SIOCGIWNAME, airo_get_name),
- IW_HANDLER(SIOCSIWFREQ, airo_set_freq),
- IW_HANDLER(SIOCGIWFREQ, airo_get_freq),
- IW_HANDLER(SIOCSIWMODE, airo_set_mode),
- IW_HANDLER(SIOCGIWMODE, airo_get_mode),
- IW_HANDLER(SIOCSIWSENS, airo_set_sens),
- IW_HANDLER(SIOCGIWSENS, airo_get_sens),
- IW_HANDLER(SIOCGIWRANGE, airo_get_range),
- IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
- IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
- IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
- IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
- IW_HANDLER(SIOCSIWAP, airo_set_wap),
- IW_HANDLER(SIOCGIWAP, airo_get_wap),
- IW_HANDLER(SIOCGIWAPLIST, airo_get_aplist),
- IW_HANDLER(SIOCSIWSCAN, airo_set_scan),
- IW_HANDLER(SIOCGIWSCAN, airo_get_scan),
- IW_HANDLER(SIOCSIWESSID, airo_set_essid),
- IW_HANDLER(SIOCGIWESSID, airo_get_essid),
- IW_HANDLER(SIOCSIWNICKN, airo_set_nick),
- IW_HANDLER(SIOCGIWNICKN, airo_get_nick),
- IW_HANDLER(SIOCSIWRATE, airo_set_rate),
- IW_HANDLER(SIOCGIWRATE, airo_get_rate),
- IW_HANDLER(SIOCSIWRTS, airo_set_rts),
- IW_HANDLER(SIOCGIWRTS, airo_get_rts),
- IW_HANDLER(SIOCSIWFRAG, airo_set_frag),
- IW_HANDLER(SIOCGIWFRAG, airo_get_frag),
- IW_HANDLER(SIOCSIWTXPOW, airo_set_txpow),
- IW_HANDLER(SIOCGIWTXPOW, airo_get_txpow),
- IW_HANDLER(SIOCSIWRETRY, airo_set_retry),
- IW_HANDLER(SIOCGIWRETRY, airo_get_retry),
- IW_HANDLER(SIOCSIWENCODE, airo_set_encode),
- IW_HANDLER(SIOCGIWENCODE, airo_get_encode),
- IW_HANDLER(SIOCSIWPOWER, airo_set_power),
- IW_HANDLER(SIOCGIWPOWER, airo_get_power),
- IW_HANDLER(SIOCSIWAUTH, airo_set_auth),
- IW_HANDLER(SIOCGIWAUTH, airo_get_auth),
- IW_HANDLER(SIOCSIWENCODEEXT, airo_set_encodeext),
- IW_HANDLER(SIOCGIWENCODEEXT, airo_get_encodeext),
-};
-
-/* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here.
- * We want to force the use of the ioctl code, because those can't be
- * won't work the iw_handler code (because they simultaneously read
- * and write data and iw_handler can't do that).
- * Note that it's perfectly legal to read/write on a single ioctl command,
- * you just can't use iwpriv and need to force it via the ioctl handler.
- * Jean II */
-static const iw_handler airo_private_handler[] =
-{
- NULL, /* SIOCIWFIRSTPRIV */
-};
-
-static const struct iw_handler_def airo_handler_def =
-{
- .num_standard = ARRAY_SIZE(airo_handler),
- .num_private = ARRAY_SIZE(airo_private_handler),
- .num_private_args = ARRAY_SIZE(airo_private_args),
- .standard = airo_handler,
- .private = airo_private_handler,
- .private_args = airo_private_args,
- .get_wireless_stats = airo_get_wireless_stats,
-};
-
-/*
- * This defines the configuration part of the Wireless Extensions
- * Note : irq and spinlock protection will occur in the subroutines
- *
- * TODO :
- * o Check input value more carefully and fill correct values in range
- * o Test and shakeout the bugs (if any)
- *
- * Jean II
- *
- * Javier Achirica did a great job of merging code from the unnamed CISCO
- * developer that added support for flashing the card.
- */
-static int airo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
- void __user *data, int cmd)
-{
- int rc = 0;
- struct airo_info *ai = dev->ml_priv;
-
- if (ai->power.event)
- return 0;
-
- switch (cmd) {
-#ifdef CISCO_EXT
- case AIROIDIFC:
-#ifdef AIROOLDIDIFC
- case AIROOLDIDIFC:
-#endif
- {
- int val = AIROMAGIC;
- aironet_ioctl com;
- if (copy_from_user(&com, data, sizeof(com)))
- rc = -EFAULT;
- else if (copy_to_user(com.data, (char *)&val, sizeof(val)))
- rc = -EFAULT;
- }
- break;
-
- case AIROIOCTL:
-#ifdef AIROOLDIOCTL
- case AIROOLDIOCTL:
-#endif
- /* Get the command struct and hand it off for evaluation by
- * the proper subfunction
- */
- {
- aironet_ioctl com;
- if (copy_from_user(&com, data, sizeof(com))) {
- rc = -EFAULT;
- break;
- }
-
- /* Separate R/W functions bracket legality here
- */
- if (com.command == AIRORSWVERSION) {
- if (copy_to_user(com.data, swversion, sizeof(swversion)))
- rc = -EFAULT;
- else
- rc = 0;
- }
- else if (com.command <= AIRORRID)
- rc = readrids(dev,&com);
- else if (com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2))
- rc = writerids(dev,&com);
- else if (com.command >= AIROFLSHRST && com.command <= AIRORESTART)
- rc = flashcard(dev,&com);
- else
- rc = -EINVAL; /* Bad command in ioctl */
- }
- break;
-#endif /* CISCO_EXT */
-
- // All other calls are currently unsupported
- default:
- rc = -EOPNOTSUPP;
- }
- return rc;
-}
-
-/*
- * Get the Wireless stats out of the driver
- * Note : irq and spinlock protection will occur in the subroutines
- *
- * TODO :
- * o Check if work in Ad-Hoc mode (otherwise, use SPY, as in wvlan_cs)
- *
- * Jean
- */
-static void airo_read_wireless_stats(struct airo_info *local)
-{
- StatusRid status_rid;
- StatsRid stats_rid;
- CapabilityRid cap_rid;
- __le32 *vals = stats_rid.vals;
-
- /* Get stats out of the card */
- if (local->power.event)
- return;
-
- readCapabilityRid(local, &cap_rid, 0);
- readStatusRid(local, &status_rid, 0);
- readStatsRid(local, &stats_rid, RID_STATS, 0);
-
- /* The status */
- local->wstats.status = le16_to_cpu(status_rid.mode);
-
- /* Signal quality and co */
- if (local->rssi) {
- local->wstats.qual.level =
- airo_rssi_to_dbm(local->rssi,
- le16_to_cpu(status_rid.sigQuality));
- /* normalizedSignalStrength appears to be a percentage */
- local->wstats.qual.qual =
- le16_to_cpu(status_rid.normalizedSignalStrength);
- } else {
- local->wstats.qual.level =
- (le16_to_cpu(status_rid.normalizedSignalStrength) + 321) / 2;
- local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid);
- }
- if (le16_to_cpu(status_rid.len) >= 124) {
- local->wstats.qual.noise = 0x100 - status_rid.noisedBm;
- local->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- } else {
- local->wstats.qual.noise = 0;
- local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_DBM;
- }
-
- /* Packets discarded in the wireless adapter due to wireless
- * specific problems */
- local->wstats.discard.nwid = le32_to_cpu(vals[56]) +
- le32_to_cpu(vals[57]) +
- le32_to_cpu(vals[58]); /* SSID Mismatch */
- local->wstats.discard.code = le32_to_cpu(vals[6]);/* RxWepErr */
- local->wstats.discard.fragment = le32_to_cpu(vals[30]);
- local->wstats.discard.retries = le32_to_cpu(vals[10]);
- local->wstats.discard.misc = le32_to_cpu(vals[1]) +
- le32_to_cpu(vals[32]);
- local->wstats.miss.beacon = le32_to_cpu(vals[34]);
-}
-
-static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
-{
- struct airo_info *local = dev->ml_priv;
-
- if (!down_interruptible(&local->sem)) {
- airo_read_wireless_stats(local);
- up(&local->sem);
- }
- return &local->wstats;
-}
-
-#ifdef CISCO_EXT
-/*
- * This just translates from driver IOCTL codes to the command codes to
- * feed to the radio's host interface. Things can be added/deleted
- * as needed. This represents the READ side of control I/O to
- * the card
- */
-static int readrids(struct net_device *dev, aironet_ioctl *comp)
-{
- unsigned short ridcode;
- unsigned char *iobuf;
- int len;
- struct airo_info *ai = dev->ml_priv;
-
- if (test_bit(FLAG_FLASHING, &ai->flags))
- return -EIO;
-
- switch(comp->command)
- {
- case AIROGCAP: ridcode = RID_CAPABILITIES; break;
- case AIROGCFG: ridcode = RID_CONFIG;
- if (test_bit(FLAG_COMMIT, &ai->flags)) {
- disable_MAC (ai, 1);
- writeConfigRid (ai, 1);
- enable_MAC(ai, 1);
- }
- break;
- case AIROGSLIST: ridcode = RID_SSID; break;
- case AIROGVLIST: ridcode = RID_APLIST; break;
- case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
- case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
- case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
- case AIROGSTAT: ridcode = RID_STATUS; break;
- case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
- case AIROGSTATSC32: ridcode = RID_STATS; break;
- case AIROGMICSTATS:
- if (copy_to_user(comp->data, &ai->micstats,
- min((int)comp->len, (int)sizeof(ai->micstats))))
- return -EFAULT;
- return 0;
- case AIRORRID: ridcode = comp->ridnum; break;
- default:
- return -EINVAL;
- }
-
- if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- }
-
- if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
- return -ENOMEM;
-
- PC4500_readrid(ai, ridcode, iobuf, RIDSIZE, 1);
- /* get the count of bytes in the rid docs say 1st 2 bytes is it.
- * then return it to the user
- * 9/22/2000 Honor user given length
- */
- len = comp->len;
-
- if (copy_to_user(comp->data, iobuf, min(len, (int)RIDSIZE))) {
- kfree (iobuf);
- return -EFAULT;
- }
- kfree (iobuf);
- return 0;
-}
-
-/*
- * Danger Will Robinson write the rids here
- */
-
-static int writerids(struct net_device *dev, aironet_ioctl *comp)
-{
- struct airo_info *ai = dev->ml_priv;
- int ridcode;
- int enabled;
- int (*writer)(struct airo_info *, u16 rid, const void *, int, int);
- unsigned char *iobuf;
-
- /* Only super-user can write RIDs */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (test_bit(FLAG_FLASHING, &ai->flags))
- return -EIO;
-
- ridcode = 0;
- writer = do_writerid;
-
- switch(comp->command)
- {
- case AIROPSIDS: ridcode = RID_SSID; break;
- case AIROPCAP: ridcode = RID_CAPABILITIES; break;
- case AIROPAPLIST: ridcode = RID_APLIST; break;
- case AIROPCFG: ai->config.len = 0;
- clear_bit(FLAG_COMMIT, &ai->flags);
- ridcode = RID_CONFIG; break;
- case AIROPWEPKEYNV: ridcode = RID_WEP_PERM; break;
- case AIROPLEAPUSR: ridcode = RID_LEAPUSERNAME; break;
- case AIROPLEAPPWD: ridcode = RID_LEAPPASSWORD; break;
- case AIROPWEPKEY: ridcode = RID_WEP_TEMP; writer = PC4500_writerid;
- break;
- case AIROPLEAPUSR+1: ridcode = 0xFF2A; break;
- case AIROPLEAPUSR+2: ridcode = 0xFF2B; break;
-
- /* this is not really a rid but a command given to the card
- * same with MAC off
- */
- case AIROPMACON:
- if (enable_MAC(ai, 1) != 0)
- return -EIO;
- return 0;
-
- /*
- * Evidently this code in the airo driver does not get a symbol
- * as disable_MAC. it's probably so short the compiler does not gen one.
- */
- case AIROPMACOFF:
- disable_MAC(ai, 1);
- return 0;
-
- /* This command merely clears the counts does not actually store any data
- * only reads rid. But as it changes the cards state, I put it in the
- * writerid routines.
- */
- case AIROPSTCLR:
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
- return -ENOMEM;
-
- PC4500_readrid(ai, RID_STATSDELTACLEAR, iobuf, RIDSIZE, 1);
-
- enabled = ai->micstats.enabled;
- memset(&ai->micstats, 0, sizeof(ai->micstats));
- ai->micstats.enabled = enabled;
-
- if (copy_to_user(comp->data, iobuf,
- min((int)comp->len, (int)RIDSIZE))) {
- kfree (iobuf);
- return -EFAULT;
- }
- kfree (iobuf);
- return 0;
-
- default:
- return -EOPNOTSUPP; /* Blarg! */
- }
- if (comp->len > RIDSIZE)
- return -EINVAL;
-
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
- return -ENOMEM;
-
- if (copy_from_user(iobuf, comp->data, comp->len)) {
- kfree (iobuf);
- return -EFAULT;
- }
-
- if (comp->command == AIROPCFG) {
- ConfigRid *cfg = (ConfigRid *)iobuf;
-
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags))
- cfg->opmode |= MODE_MIC;
-
- if ((cfg->opmode & MODE_CFG_MASK) == MODE_STA_IBSS)
- set_bit (FLAG_ADHOC, &ai->flags);
- else
- clear_bit (FLAG_ADHOC, &ai->flags);
- }
-
- if ((*writer)(ai, ridcode, iobuf, comp->len, 1)) {
- kfree (iobuf);
- return -EIO;
- }
- kfree (iobuf);
- return 0;
-}
-
-/*****************************************************************************
- * Ancillary flash / mod functions much black magic lurkes here *
- *****************************************************************************
- */
-
-/*
- * Flash command switch table
- */
-
-static int flashcard(struct net_device *dev, aironet_ioctl *comp)
-{
- int z;
-
- /* Only super-user can modify flash */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- switch(comp->command)
- {
- case AIROFLSHRST:
- return cmdreset((struct airo_info *)dev->ml_priv);
-
- case AIROFLSHSTFL:
- if (!AIRO_FLASH(dev) &&
- (AIRO_FLASH(dev) = kmalloc(FLASHSIZE, GFP_KERNEL)) == NULL)
- return -ENOMEM;
- return setflashmode((struct airo_info *)dev->ml_priv);
-
- case AIROFLSHGCHR: /* Get char from aux */
- if (comp->len != sizeof(int))
- return -EINVAL;
- if (copy_from_user(&z, comp->data, comp->len))
- return -EFAULT;
- return flashgchar((struct airo_info *)dev->ml_priv, z, 8000);
-
- case AIROFLSHPCHR: /* Send char to card. */
- if (comp->len != sizeof(int))
- return -EINVAL;
- if (copy_from_user(&z, comp->data, comp->len))
- return -EFAULT;
- return flashpchar((struct airo_info *)dev->ml_priv, z, 8000);
-
- case AIROFLPUTBUF: /* Send 32k to card */
- if (!AIRO_FLASH(dev))
- return -ENOMEM;
- if (comp->len > FLASHSIZE)
- return -EINVAL;
- if (copy_from_user(AIRO_FLASH(dev), comp->data, comp->len))
- return -EFAULT;
-
- flashputbuf((struct airo_info *)dev->ml_priv);
- return 0;
-
- case AIRORESTART:
- if (flashrestart((struct airo_info *)dev->ml_priv, dev))
- return -EIO;
- return 0;
- }
- return -EINVAL;
-}
-
-#define FLASH_COMMAND 0x7e7e
-
-/*
- * STEP 1)
- * Disable MAC and do soft reset on
- * card.
- */
-
-static int cmdreset(struct airo_info *ai)
-{
- disable_MAC(ai, 1);
-
- if (!waitbusy (ai)) {
- airo_print_info(ai->dev->name, "Waitbusy hang before RESET");
- return -EBUSY;
- }
-
- OUT4500(ai, COMMAND, CMD_SOFTRESET);
-
- ssleep(1); /* WAS 600 12/7/00 */
-
- if (!waitbusy (ai)) {
- airo_print_info(ai->dev->name, "Waitbusy hang AFTER RESET");
- return -EBUSY;
- }
- return 0;
-}
-
-/* STEP 2)
- * Put the card in legendary flash
- * mode
- */
-
-static int setflashmode (struct airo_info *ai)
-{
- set_bit (FLAG_FLASHING, &ai->flags);
-
- OUT4500(ai, SWS0, FLASH_COMMAND);
- OUT4500(ai, SWS1, FLASH_COMMAND);
- if (probe) {
- OUT4500(ai, SWS0, FLASH_COMMAND);
- OUT4500(ai, COMMAND, 0x10);
- } else {
- OUT4500(ai, SWS2, FLASH_COMMAND);
- OUT4500(ai, SWS3, FLASH_COMMAND);
- OUT4500(ai, COMMAND, 0);
- }
- msleep(500); /* 500ms delay */
-
- if (!waitbusy(ai)) {
- clear_bit (FLAG_FLASHING, &ai->flags);
- airo_print_info(ai->dev->name, "Waitbusy hang after setflash mode");
- return -EIO;
- }
- return 0;
-}
-
-/* Put character to SWS0 wait for dwelltime
- * x 50us for echo .
- */
-
-static int flashpchar(struct airo_info *ai, int byte, int dwelltime)
-{
- int echo;
- int waittime;
-
- byte |= 0x8000;
-
- if (dwelltime == 0)
- dwelltime = 200;
-
- waittime = dwelltime;
-
- /* Wait for busy bit d15 to go false indicating buffer empty */
- while ((IN4500 (ai, SWS0) & 0x8000) && waittime > 0) {
- udelay (50);
- waittime -= 50;
- }
-
- /* timeout for busy clear wait */
- if (waittime <= 0) {
- airo_print_info(ai->dev->name, "flash putchar busywait timeout!");
- return -EBUSY;
- }
-
- /* Port is clear now write byte and wait for it to echo back */
- do {
- OUT4500(ai, SWS0, byte);
- udelay(50);
- dwelltime -= 50;
- echo = IN4500(ai, SWS1);
- } while (dwelltime >= 0 && echo != byte);
-
- OUT4500(ai, SWS1, 0);
-
- return (echo == byte) ? 0 : -EIO;
-}
-
-/*
- * Get a character from the card matching matchbyte
- * Step 3)
- */
-static int flashgchar(struct airo_info *ai, int matchbyte, int dwelltime)
-{
- int rchar;
- unsigned char rbyte = 0;
-
- do {
- rchar = IN4500(ai, SWS1);
-
- if (dwelltime && !(0x8000 & rchar)) {
- dwelltime -= 10;
- mdelay(10);
- continue;
- }
- rbyte = 0xff & rchar;
-
- if ((rbyte == matchbyte) && (0x8000 & rchar)) {
- OUT4500(ai, SWS1, 0);
- return 0;
- }
- if (rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar)
- break;
- OUT4500(ai, SWS1, 0);
-
- } while (dwelltime > 0);
- return -EIO;
-}
-
-/*
- * Transfer 32k of firmware data from user buffer to our buffer and
- * send to the card
- */
-
-static int flashputbuf(struct airo_info *ai)
-{
- int nwords;
-
- /* Write stuff */
- if (test_bit(FLAG_MPI,&ai->flags))
- memcpy_toio(ai->pciaux + 0x8000, ai->flash, FLASHSIZE);
- else {
- OUT4500(ai, AUXPAGE, 0x100);
- OUT4500(ai, AUXOFF, 0);
-
- for (nwords = 0; nwords != FLASHSIZE / 2; nwords++) {
- OUT4500(ai, AUXDATA, ai->flash[nwords] & 0xffff);
- }
- }
- OUT4500(ai, SWS0, 0x8000);
-
- return 0;
-}
-
-/*
- *
- */
-static int flashrestart(struct airo_info *ai, struct net_device *dev)
-{
- int i, status;
-
- ssleep(1); /* Added 12/7/00 */
- clear_bit (FLAG_FLASHING, &ai->flags);
- if (test_bit(FLAG_MPI, &ai->flags)) {
- status = mpi_init_descriptors(ai);
- if (status != SUCCESS)
- return status;
- }
- status = setup_card(ai, dev, 1);
-
- if (!test_bit(FLAG_MPI,&ai->flags))
- for (i = 0; i < MAX_FIDS; i++) {
- ai->fids[i] = transmit_allocate
- (ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2);
- }
-
- ssleep(1); /* Added 12/7/00 */
- return status;
-}
-#endif /* CISCO_EXT */
-
-/*
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- In addition:
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-*/
-
-module_init(airo_init_module);
-module_exit(airo_cleanup_module);
diff --git a/drivers/net/wireless/cisco/airo.h b/drivers/net/wireless/cisco/airo.h
deleted file mode 100644
index 8a02977a2..000000000
--- a/drivers/net/wireless/cisco/airo.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _AIRO_H_
-#define _AIRO_H_
-
-struct net_device *init_airo_card(unsigned short irq, int port, int is_pcmcia,
- struct device *dmdev);
-int reset_airo_card(struct net_device *dev);
-void stop_airo_card(struct net_device *dev, int freeres);
-
-#endif /* _AIRO_H_ */
diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c
deleted file mode 100644
index fcfe4c6d6..000000000
--- a/drivers/net/wireless/cisco/airo_cs.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/*======================================================================
-
- Aironet driver for 4500 and 4800 series cards
-
- This code is released under both the GPL version 2 and BSD licenses.
- Either license may be used. The respective licenses are found at
- the end of this file.
-
- This code was developed by Benjamin Reed <breed@users.sourceforge.net>
- including portions of which come from the Aironet PC4500
- Developer's Reference Manual and used with permission. Copyright
- (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
- code in the Developer's manual was granted for this driver by
- Aironet.
-
- In addition this module was derived from dummy_cs.
- The initial developer of dummy_cs is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
-======================================================================*/
-
-#ifdef __IN_PCMCIA_PACKAGE__
-#include <pcmcia/k_compat.h>
-#endif
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/netdevice.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#include <linux/io.h>
-
-#include "airo.h"
-
-
-/*====================================================================*/
-
-MODULE_AUTHOR("Benjamin Reed");
-MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet "
- "cards. This is the module that links the PCMCIA card "
- "with the airo module.");
-MODULE_LICENSE("Dual BSD/GPL");
-
-/*====================================================================*/
-
-static int airo_config(struct pcmcia_device *link);
-static void airo_release(struct pcmcia_device *link);
-
-static void airo_detach(struct pcmcia_device *p_dev);
-
-struct local_info {
- struct net_device *eth_dev;
-};
-
-static int airo_probe(struct pcmcia_device *p_dev)
-{
- struct local_info *local;
-
- dev_dbg(&p_dev->dev, "airo_attach()\n");
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(*local), GFP_KERNEL);
- if (!local)
- return -ENOMEM;
-
- p_dev->priv = local;
-
- return airo_config(p_dev);
-} /* airo_attach */
-
-static void airo_detach(struct pcmcia_device *link)
-{
- dev_dbg(&link->dev, "airo_detach\n");
-
- airo_release(link);
-
- if (((struct local_info *)link->priv)->eth_dev) {
- stop_airo_card(((struct local_info *)link->priv)->eth_dev,
- 0);
- }
- ((struct local_info *)link->priv)->eth_dev = NULL;
-
- kfree(link->priv);
-} /* airo_detach */
-
-static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
-
-static int airo_config(struct pcmcia_device *link)
-{
- int ret;
-
- dev_dbg(&link->dev, "airo_config\n");
-
- link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
- CONF_AUTO_AUDIO | CONF_AUTO_SET_IO;
-
- ret = pcmcia_loop_config(link, airo_cs_config_check, NULL);
- if (ret)
- goto failed;
-
- if (!link->irq)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
- ((struct local_info *)link->priv)->eth_dev =
- init_airo_card(link->irq,
- link->resource[0]->start, 1, &link->dev);
- if (!((struct local_info *)link->priv)->eth_dev)
- goto failed;
-
- return 0;
-
- failed:
- airo_release(link);
- return -ENODEV;
-} /* airo_config */
-
-static void airo_release(struct pcmcia_device *link)
-{
- dev_dbg(&link->dev, "airo_release\n");
- pcmcia_disable_device(link);
-}
-
-static int airo_suspend(struct pcmcia_device *link)
-{
- struct local_info *local = link->priv;
-
- netif_device_detach(local->eth_dev);
-
- return 0;
-}
-
-static int airo_resume(struct pcmcia_device *link)
-{
- struct local_info *local = link->priv;
-
- if (link->open) {
- reset_airo_card(local->eth_dev);
- netif_device_attach(local->eth_dev);
- }
-
- return 0;
-}
-
-static const struct pcmcia_device_id airo_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x015f, 0x000a),
- PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0005),
- PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0007),
- PCMCIA_DEVICE_MANF_CARD(0x0105, 0x0007),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, airo_ids);
-
-static struct pcmcia_driver airo_driver = {
- .owner = THIS_MODULE,
- .name = "airo_cs",
- .probe = airo_probe,
- .remove = airo_detach,
- .id_table = airo_ids,
- .suspend = airo_suspend,
- .resume = airo_resume,
-};
-module_pcmcia_driver(airo_driver);
-
-/*
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- In addition:
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-*/
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 69276266c..70e420df1 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4231,8 +4231,6 @@ il4965_rx_handle(struct il_priv *il)
fill_rx = 1;
while (i != r) {
- int len;
-
rxb = rxq->queue[i];
/* If an RXB doesn't have a Rx queue slot associated with it,
@@ -4246,10 +4244,6 @@ il4965_rx_handle(struct il_priv *il)
PAGE_SIZE << il->hw_params.rx_page_order,
DMA_FROM_DEVICE);
pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
-
reclaim = il_need_reclaim(il, pkt);
/* Based on type of command response or notification,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 054fef680..17570d62c 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -541,6 +541,9 @@ il_leds_init(struct il_priv *il)
il->led.name =
kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
+ if (!il->led.name)
+ return;
+
il->led.brightness_set = il_led_brightness_set;
il->led.blink_set = il_led_blink_set;
il->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 134635c70..73cbb120a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -299,3 +299,9 @@ MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-so-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-so-a0-gf4-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ty-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 82da957ad..1b6249561 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -179,3 +179,5 @@ MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-gl-c0-fm-c0.pnvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 80eb9b499..156c42854 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -33,6 +33,10 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0"
+#define IWL_SC2_A_FM_C_FW_PRE "iwlwifi-sc2-a0-fm-c0"
+#define IWL_SC2_A_WH_A_FW_PRE "iwlwifi-sc2-a0-wh-a0"
+#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
+#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
@@ -48,6 +52,14 @@
IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_sc_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -124,6 +136,9 @@ static const struct iwl_base_params iwl_sc_base_params = {
#define IWL_DEVICE_SC \
IWL_DEVICE_BZ_COMMON, \
+ .uhb_supported = true, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .num_rbds = IWL_NUM_RBDS_SC_EHT, \
.ht_params = &iwl_22000_ht_params
/*
@@ -149,10 +164,21 @@ const char iwl_sc_name[] = "Intel(R) TBD Sc device";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
- .uhb_supported = true,
IWL_DEVICE_SC,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_SC_EHT,
+};
+
+const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
+
+const struct iwl_cfg iwl_cfg_sc2 = {
+ .fw_name_mac = "sc2",
+ IWL_DEVICE_SC,
+};
+
+const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
+
+const struct iwl_cfg iwl_cfg_sc2f = {
+ .fw_name_mac = "sc2f",
+ IWL_DEVICE_SC,
};
MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
@@ -162,3 +188,7 @@ MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 8248c3beb..b740c65a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -60,6 +60,12 @@ enum iwl_debug_cmds {
*/
FW_DUMP_COMPLETE_CMD = 0xB,
/**
+ * @FW_CLEAR_BUFFER:
+ * clears the firmware's internal buffer
+ * no payload
+ */
+ FW_CLEAR_BUFFER = 0xD,
+ /**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index dfe0bebab..7ec959244 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -269,6 +269,9 @@ struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+#define IWL_MCC_US 0x5553
+#define IWL_MCC_CANADA 0x4341
+
/**
* struct iwl_mcc_update_cmd - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 57ddd8aef..59f7a75c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -19,7 +19,6 @@
* @fwrt_ptr: pointer to the buffer coming from fwrt
* @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
* transport's data.
- * @trans_len: length of the valid data in trans_ptr
* @fwrt_len: length of the valid data in fwrt_ptr
*/
struct iwl_fw_dump_ptrs {
@@ -880,10 +879,10 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
cpu_to_le32(fwrt->trans->hw_rev_step);
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->name,
- sizeof(dump_info->dev_human_readable) - 1);
- strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
- sizeof(dump_info->bus_human_readable) - 1);
+ strscpy_pad(dump_info->dev_human_readable, fwrt->trans->name,
+ sizeof(dump_info->dev_human_readable));
+ strscpy_pad(dump_info->bus_human_readable, fwrt->dev->bus->name,
+ sizeof(dump_info->bus_human_readable));
dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
dump_info->lmac_err_id[0] =
cpu_to_le32(fwrt->dump.lmac_err_id[0]);
@@ -3396,3 +3395,22 @@ void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt)
iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
IWL_EXPORT_SYMBOL(iwl_fw_disable_dbg_asserts);
+
+void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_fw_dbg_params params = {0};
+
+ iwl_fw_dbg_stop_sync(fwrt);
+
+ if (fw_has_api(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR)) {
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, FW_CLEAR_BUFFER),
+ };
+ iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ }
+
+ iwl_dbg_tlv_init_cfg(fwrt);
+ iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_clear_monitor_buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 66b233250..eb38c686b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -330,6 +330,7 @@ void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint,
u32 timepoint_data);
void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt);
+void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt);
#define IWL_FW_CHECK_FAILED(_obj, _fmt, ...) \
IWL_ERR_LIMIT(_obj, _fmt, __VA_ARGS__)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 03f6e5201..bfc39bd5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -20,7 +20,7 @@ struct iwl_ucode_header {
__le32 init_size; /* bytes of init code */
__le32 init_data_size; /* bytes of init data */
__le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
+ u8 data[]; /* in same order as sizes */
} v1;
struct {
__le32 build; /* build number */
@@ -29,7 +29,7 @@ struct iwl_ucode_header {
__le32 init_size; /* bytes of init code */
__le32 init_data_size; /* bytes of init data */
__le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
+ u8 data[]; /* in same order as sizes */
} v2;
} u;
};
@@ -243,6 +243,10 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* version tables.
* @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
* SCAN_CONFIG_DB_CMD_API_S.
+ * @IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX: Firmware offloaded the station disable tx
+ * logic.
+ * @IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR: Firmware supports clearing the debug
+ * internal buffer
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -280,6 +284,9 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
+ /* API Set 2 */
+ IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX = (__force iwl_ucode_tlv_api_t)66,
+ IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR = (__force iwl_ucode_tlv_api_t)67,
NUM_IWL_UCODE_TLV_API
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 02ded2229..e99d69136 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -377,7 +377,6 @@ struct iwl_cfg {
u16 nvm_calib_ver;
u32 rx_with_siso_diversity:1,
tx_with_siso_diversity:1,
- bt_shared_single_ant:1,
internal_wimax_coex:1,
host_interrupt_operation_mode:1,
high_temp:1,
@@ -419,6 +418,8 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_BZ 0x46
#define IWL_CFG_MAC_TYPE_GL 0x47
#define IWL_CFG_MAC_TYPE_SC 0x48
+#define IWL_CFG_MAC_TYPE_SC2 0x49
+#define IWL_CFG_MAC_TYPE_SC2F 0x4A
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -528,6 +529,8 @@ extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
extern const char iwl_sc_name[];
+extern const char iwl_sc2_name[];
+extern const char iwl_sc2f_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -633,6 +636,8 @@ extern const struct iwl_cfg iwl_cfg_bz;
extern const struct iwl_cfg iwl_cfg_gl;
extern const struct iwl_cfg iwl_cfg_sc;
+extern const struct iwl_cfg iwl_cfg_sc2;
+extern const struct iwl_cfg iwl_cfg_sc2f;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index a4df67ff2..4511d7fb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -354,6 +354,8 @@ enum {
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
#define CSR_HW_RF_ID_TYPE_MS (0x00111000)
+#define CSR_HW_RF_ID_TYPE_FM (0x00112000)
+#define CSR_HW_RF_ID_TYPE_WP (0x00113000)
/* HW_RF CHIP STEP */
#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 55d2ed13a..3442dfab5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1280,7 +1280,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
return 0;
}
-static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
+void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
{
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index 06fb7d665..7ed6329fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -57,6 +57,7 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data,
bool sync);
+void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt);
static inline void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 347fd95c4..2c280a2fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2019, 2023 Intel Corporation
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ)
@@ -36,20 +36,17 @@ TRACE_EVENT(iwlwifi_dev_tx_tb,
TRACE_EVENT(iwlwifi_dev_rx_data,
TP_PROTO(const struct device *dev,
- const struct iwl_trans *trans,
- void *rxbuf, size_t len),
- TP_ARGS(dev, trans, rxbuf, len),
+ void *rxbuf, size_t len, size_t start),
+ TP_ARGS(dev, rxbuf, len, start),
TP_STRUCT__entry(
DEV_ENTRY
- __dynamic_array(u8, data,
- len - iwl_rx_trace_len(trans, rxbuf, len, NULL))
+ __dynamic_array(u8, data, len - start)
),
TP_fast_assign(
- size_t offs = iwl_rx_trace_len(trans, rxbuf, len, NULL);
DEV_ASSIGN;
- if (offs < len)
+ if (start < len)
memcpy(__get_dynamic_array(data),
- ((u8 *)rxbuf) + offs, len - offs);
+ ((u8 *)rxbuf) + start, len - start);
),
TP_printk("[%s] RX frame data", __get_str(dev))
);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index 46ed723f1..e656bf6bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -4,7 +4,7 @@
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018, 2023 Intel Corporation
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ)
@@ -50,23 +50,20 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
- struct iwl_rx_packet *pkt, size_t len),
- TP_ARGS(dev, trans, pkt, len),
+ TP_PROTO(const struct device *dev,
+ struct iwl_rx_packet *pkt, size_t len, size_t trace_len,
+ size_t hdr_offset),
+ TP_ARGS(dev, pkt, len, trace_len, hdr_offset),
TP_STRUCT__entry(
DEV_ENTRY
__field(u16, cmd)
__field(u8, hdr_offset)
- __dynamic_array(u8, rxbuf,
- iwl_rx_trace_len(trans, pkt, len, NULL))
+ __dynamic_array(u8, rxbuf, trace_len)
),
TP_fast_assign(
- size_t hdr_offset = 0;
-
DEV_ASSIGN;
__entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
- memcpy(__get_dynamic_array(rxbuf), pkt,
- iwl_rx_trace_len(trans, pkt, len, &hdr_offset));
+ memcpy(__get_dynamic_array(rxbuf), pkt, trace_len);
__entry->hdr_offset = hdr_offset;
),
TP_printk("[%s] RX cmd %#.2x",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
index e46639b09..7e6862979 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018, 2023 Intel Corporation
*****************************************************************************/
#include <linux/module.h>
@@ -20,4 +20,17 @@
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
-#endif
+#else
+#include "iwl-devtrace.h"
+#endif /* __CHECKER__ */
+
+void __trace_iwlwifi_dev_rx(struct iwl_trans *trans, void *pkt, size_t len)
+{
+ size_t hdr_offset = 0, trace_len;
+
+ trace_len = iwl_rx_trace_len(trans, pkt, len, &hdr_offset);
+ trace_iwlwifi_dev_rx(trans->dev, pkt, len, trace_len, hdr_offset);
+
+ if (trace_len < len)
+ trace_iwlwifi_dev_rx_data(trans->dev, pkt, len, trace_len);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index 01fb7b900..c3e09f4fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -7,12 +7,12 @@
*****************************************************************************/
#ifndef __IWLWIFI_DEVICE_TRACE
+#define __IWLWIFI_DEVICE_TRACE
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "iwl-trans.h"
-#if !defined(__IWLWIFI_DEVICE_TRACE)
static inline bool iwl_trace_data(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -70,9 +70,6 @@ static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
ieee80211_hdrlen(hdr->frame_control);
}
-#endif
-
-#define __IWLWIFI_DEVICE_TRACE
#include <linux/tracepoint.h>
#include <linux/device.h>
@@ -98,4 +95,20 @@ static inline void trace_ ## name(proto) {}
#include "iwl-devtrace-data.h"
#include "iwl-devtrace-iwlwifi.h"
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+DECLARE_TRACEPOINT(iwlwifi_dev_rx);
+DECLARE_TRACEPOINT(iwlwifi_dev_rx_data);
+#endif
+
+void __trace_iwlwifi_dev_rx(struct iwl_trans *trans, void *pkt, size_t len);
+
+static inline void maybe_trace_iwlwifi_dev_rx(struct iwl_trans *trans,
+ void *pkt, size_t len)
+{
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ if (tracepoint_enabled(iwlwifi_dev_rx) ||
+ tracepoint_enabled(iwlwifi_dev_rx_data))
+ __trace_iwlwifi_dev_rx(trans, pkt, len);
+#endif
+}
#endif /* __IWLWIFI_DEVICE_TRACE */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index d85cadd8e..501a8cc21 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1606,10 +1606,17 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
/* Set the GO concurrent flag only in case that NO_IR is set.
* Otherwise it is meaningless
*/
- if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
- (flags & NL80211_RRF_NO_IR))
- flags |= NL80211_RRF_GO_CONCURRENT;
-
+ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT)) {
+ if (flags & NL80211_RRF_NO_IR)
+ flags |= NL80211_RRF_GO_CONCURRENT;
+ if (flags & NL80211_RRF_DFS) {
+ flags |= NL80211_RRF_DFS_CONCURRENT;
+ /* Our device doesn't set active bit for DFS channels
+ * however, once marked as DFS no-ir is not needed.
+ */
+ flags &= ~NL80211_RRF_NO_IR;
+ }
+ }
/*
* reg_capa is per regulatory domain so apply it for every channel
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index af5f9b210..3dc618a7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -64,8 +64,6 @@ struct iwl_cfg;
* received on the RSS queue(s). The queue parameter indicates which of the
* RSS queues received this frame; it will always be non-zero.
* This method must not sleep.
- * @async_cb: called when an ASYNC command with CMD_WANT_ASYNC_CALLBACK set
- * completes. Must be atomic.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
@@ -96,8 +94,6 @@ struct iwl_op_mode_ops {
struct iwl_rx_cmd_buffer *rxb);
void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
- void (*async_cb)(struct iwl_op_mode *op_mode,
- const struct iwl_device_cmd *cmd);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -147,13 +143,6 @@ static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
}
-static inline void iwl_op_mode_async_cb(struct iwl_op_mode *op_mode,
- const struct iwl_device_cmd *cmd)
-{
- if (op_mode->ops->async_cb)
- op_mode->ops->async_cb(op_mode, cmd);
-}
-
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
int queue)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 4bd759432..f95098c21 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -172,10 +172,6 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
return -EIO;
}
- if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) &&
- !(cmd->flags & CMD_ASYNC)))
- return -EINVAL;
-
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 05e72a212..5789a8735 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -110,8 +110,7 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
* the response. The caller needs to call iwl_free_resp when done.
* @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
- * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
- * called after this command completes. Valid only with CMD_ASYNC.
+ * @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
* @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
* SUSPEND and RESUME commands. We are in D3 mode when we set
* trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
@@ -120,7 +119,7 @@ enum CMD_MODE {
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
- CMD_WANT_ASYNC_CALLBACK = BIT(3),
+ CMD_BLOCK_TXQS = BIT(3),
CMD_SEND_IN_D3 = BIT(4),
};
@@ -534,11 +533,6 @@ struct iwl_pnvm_image {
* @wait_txq_empty: wait until specific tx queue is empty. May sleep.
* @freeze_txq_timer: prevents the timer of the queue from firing until the
* queue is set to awake. Must be atomic.
- * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
- * that the transport needs to refcount the calls since this function
- * will be called several times with block = true, and then the queues
- * need to be unblocked only after the same number of calls with
- * block = false.
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
@@ -613,7 +607,6 @@ struct iwl_trans_ops {
int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
- void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -1323,7 +1316,7 @@ iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
{
if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return trans->ops->rxq_dma_data(trans, queue, data);
}
@@ -1345,7 +1338,7 @@ iwl_trans_txq_alloc(struct iwl_trans *trans,
might_sleep();
if (WARN_ON_ONCE(!trans->ops->txq_alloc))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
@@ -1407,23 +1400,11 @@ static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
trans->ops->freeze_txq_timer(trans, txqs, freeze);
}
-static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
- bool block)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- if (trans->ops->block_txq_ptrs)
- trans->ops->block_txq_ptrs(trans, block);
-}
-
static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
u32 txqs)
{
if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* No need to wait if the firmware is not alive */
if (trans->state != IWL_TRANS_FW_ALIVE) {
@@ -1437,7 +1418,7 @@ static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
{
if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 5a5b1128e..9fe176169 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020, 2022 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022-2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -116,11 +116,6 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
ret = BT_COEX_TX_DIS_LUT;
- if (mvm->cfg->bt_shared_single_ant) {
- rcu_read_unlock();
- return ret;
- }
-
phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
secondary_ch_phy_id =
@@ -383,13 +378,12 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
/*
* don't reduce the Tx power if one of these is true:
* we are in LOOSE
- * single share antenna product
* BT is inactive
* we are not associated
*/
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
- mvm->cfg->bt_shared_single_ant || !vif->cfg.assoc ||
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF ||
+ !vif->cfg.assoc) {
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false);
/* FIXME: should this be per link? */
iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
@@ -570,7 +564,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Check if rssi is good enough for reduced Tx power, but not in loose
* scheme.
*/
- if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+ if (rssi_event == RSSI_EVENT_LOW ||
iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
ret = iwl_mvm_bt_coex_reduced_txp(mvm,
mvmvif->deflink.ap_sta_id,
@@ -639,10 +633,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
{
- /* there is no other antenna, shared antenna is always available */
- if (mvm->cfg->bt_shared_single_ant)
- return true;
-
if (ant & mvm->cfg->non_shared_ant)
return true;
@@ -652,10 +642,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
{
- /* there is no other antenna, shared antenna is always available */
- if (mvm->cfg->bt_shared_single_ant)
- return true;
-
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index d99a712e7..2a0836923 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1128,14 +1128,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret;
}
- /*
- * This needs to be unlocked due to lock ordering
- * constraints. Since we're in the suspend path
- * that isn't really a problem though.
- */
- mutex_unlock(&mvm->mutex);
ret = iwl_mvm_wowlan_config_key_params(mvm, vif);
- mutex_lock(&mvm->mutex);
if (ret)
return ret;
@@ -2508,7 +2501,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status_data *status)
{
int i;
- bool keep;
+ bool keep = false;
struct iwl_mvm_sta *mvm_ap_sta;
if (!status)
@@ -2536,18 +2529,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
mvm_ap_sta->tid_data[i].seq_number >> 4);
}
- /* now we have all the data we need, unlock to avoid mac80211 issues */
- mutex_unlock(&mvm->mutex);
-
iwl_mvm_report_wakeup_reasons(mvm, vif, status);
keep = iwl_mvm_setup_connection_keep(mvm, vif, status);
-
- return keep;
-
out_unlock:
mutex_unlock(&mvm->mutex);
- return false;
+ return keep;
}
#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 7737650e5..edc8204f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1521,7 +1521,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
/* supporting only MQ RX */
if (!mvm->trans->trans_cfg->mq_rx_supported)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
rxb._page = alloc_pages(GFP_ATOMIC, 0);
if (!rxb._page)
@@ -1714,6 +1714,20 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return count;
}
+static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&mvm->mutex);
+ iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
@@ -2166,6 +2180,7 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_clear, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
@@ -2372,6 +2387,7 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(fw_dbg_clear, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 233ae8188..ae0eb585b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -53,6 +53,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!pasn)
return -ENOBUFS;
+ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
+
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
switch (pasn->cipher) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 10b9219b3..8f10590f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -39,7 +39,7 @@ static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
*ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
break;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -77,7 +77,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
}
fallthrough;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -291,7 +291,7 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
default:
IWL_ERR(mvm, "Unsupported DYN_CONFIG_CMD version %u\n",
cmd_ver);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
}
return ret;
@@ -333,7 +333,7 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
if (cmd_ver < 3) {
IWL_ERR(mvm, "Adding PASN station not supported by FW\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if ((!hltk || !hltk_len) && (!tk || !tk_len)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 403bd17b8..125208466 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -27,9 +27,6 @@
#define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
-#define IWL_TAS_US_MCC 0x5553
-#define IWL_TAS_CANADA_MCC 0x4341
-
#define IWL_UATS_VLP_AP_SUPPORTED BIT(29)
#define IWL_UATS_AFC_AP_SUPPORTED BIT(30)
@@ -1234,10 +1231,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
dmi_get_system_info(DMI_SYS_VENDOR));
if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
&cmd.v4.block_list_size,
- IWL_TAS_US_MCC)) ||
+ IWL_MCC_US)) ||
(!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
&cmd.v4.block_list_size,
- IWL_TAS_CANADA_MCC))) {
+ IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 168e4a2ff..e321d41d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -152,6 +152,16 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
mvm->lar_regdom_set = true;
mvm->mcc_src = src_id;
+ /* Some kind of regulatory mess means we need to currently disallow
+ * puncturing in the US and Canada. Do that here, at least until we
+ * figure out the new chanctx APIs for puncturing.
+ */
+ if (resp->mcc == cpu_to_le16(IWL_MCC_US) ||
+ resp->mcc == cpu_to_le16(IWL_MCC_CANADA))
+ ieee80211_hw_set(mvm->hw, DISALLOW_PUNCTURING);
+ else
+ __clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mvm->hw->flags);
+
iwl_mei_set_country_code(__le16_to_cpu(resp->mcc));
out:
@@ -288,7 +298,7 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
/* This has been tested on those devices only */
if (mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!mvm->nvm_data)
return -EBUSY;
@@ -517,6 +527,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_DFS_CONCURRENT);
+
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 6af606e5d..1628bf554 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -874,6 +874,9 @@ void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm,
cmd.sta_id = cpu_to_le32(mvmsta->deflink.sta_id);
cmd.disable = cpu_to_le32(disable);
+ if (WARN_ON(iwl_mvm_has_no_host_disable_tx(mvm)))
+ return;
+
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, STA_DISABLE_TX_CMD),
CMD_ASYNC, sizeof(cmd), &cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1e33de05d..fe0fa9ff5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -947,6 +947,7 @@ struct iwl_mvm {
/* the vif that requested the current scan */
struct iwl_mvm_vif *scan_vif;
+ u8 scan_link_id;
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -1513,6 +1514,12 @@ static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
}
+static inline bool iwl_mvm_has_no_host_disable_tx(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX);
+}
+
static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 1627b2f81..adbbe19ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1703,18 +1703,6 @@ void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_common(mvm, rxb, pkt);
}
-static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
- const struct iwl_device_cmd *cmd)
-{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
- /*
- * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
- * commands that need to block the Tx queues.
- */
- iwl_trans_block_txq_ptrs(mvm->trans, false);
-}
-
static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
{
return queue == mvm->aux_queue || queue == mvm->probe_queue ||
@@ -2024,7 +2012,6 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
- .async_cb = iwl_mvm_async_cb, \
.queue_full = iwl_mvm_stop_sw_queue, \
.queue_not_full = iwl_mvm_wake_sw_queue, \
.hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
index 2ecd32bed..045c862a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
@@ -132,14 +132,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (ret)
return ERR_PTR(ret);
- if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) !=
+ resp_size)) {
+ iwl_free_resp(&cmd);
return ERR_PTR(-EIO);
+ }
resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
+ iwl_free_resp(&cmd);
+
if (!resp)
return ERR_PTR(-ENOMEM);
- iwl_free_resp(&cmd);
return resp;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 7bf2a5947..481dfbbe4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -236,21 +236,13 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
- struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta)
+ struct ieee80211_sta *sta)
{
if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
kfree_skb(skb);
return;
}
- if (sta && sta->valid_links && link_sta) {
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
-
- rx_status->link_valid = 1;
- rx_status->link_id = link_sta->link_id;
- }
-
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
@@ -587,7 +579,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
- sta, NULL /* FIXME */);
+ sta);
reorder_buf->num_stored--;
}
}
@@ -2214,6 +2206,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (IS_ERR(sta))
sta = NULL;
link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]);
+
+ if (sta && sta->valid_links && link_sta) {
+ rx_status->link_valid = 1;
+ rx_status->link_id = link_sta->link_id;
+ }
}
} else if (!is_multicast_ether_addr(hdr->addr2)) {
/*
@@ -2357,8 +2354,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
!(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
rx_status->flag |= RX_FLAG_AMSDU_MORE;
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta,
- link_sta);
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
}
out:
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 75c5c58e1..97d44354d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -101,6 +101,7 @@ struct iwl_mvm_scan_params {
bool scan_6ghz;
bool enable_6ghz_passive;
bool respect_p2p_go, respect_p2p_go_hb;
+ s8 tsf_report_link_id;
u8 bssid[ETH_ALEN] __aligned(2);
};
@@ -2342,20 +2343,15 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+ mvm->scan_link_id = 0;
+
if (version < 16) {
gp->scan_start_mac_or_link_id = scan_vif->id;
} else {
- struct iwl_mvm_vif_link_info *link_info;
- u8 link_id = 0;
+ struct iwl_mvm_vif_link_info *link_info =
+ scan_vif->link[params->tsf_report_link_id];
- /* Use one of the active link (if any). In the future it would
- * be possible that the link ID would be part of the scan
- * request coming from upper layers so we would need to use it.
- */
- if (vif->active_links)
- link_id = ffs(vif->active_links) - 1;
-
- link_info = scan_vif->link[link_id];
+ mvm->scan_link_id = params->tsf_report_link_id;
if (!WARN_ON(!link_info))
gp->scan_start_mac_or_link_id = link_info->fw_link_id;
}
@@ -2819,7 +2815,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (ver_handler->version != scan_ver)
continue;
- return ver_handler->handler(mvm, vif, params, type, uid);
+ err = ver_handler->handler(mvm, vif, params, type, uid);
+ return err ? : uid;
}
err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
@@ -2977,6 +2974,14 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (req->duration)
params.iter_notif = true;
+ params.tsf_report_link_id = req->tsf_report_link_id;
+ if (params.tsf_report_link_id < 0) {
+ if (vif->active_links)
+ params.tsf_report_link_id = __ffs(vif->active_links);
+ else
+ params.tsf_report_link_id = 0;
+ }
+
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
iwl_mvm_scan_6ghz_passive_scan(mvm, &params, vif);
@@ -3164,8 +3169,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
.aborted = aborted,
.scan_start_tsf = mvm->scan_start,
};
+ struct iwl_mvm_vif *scan_vif = mvm->scan_vif;
+ struct iwl_mvm_vif_link_info *link_info =
+ scan_vif->link[mvm->scan_link_id];
+
+ if (!WARN_ON(!link_info))
+ memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN);
- memcpy(info.tsf_bssid, mvm->scan_vif->deflink.bssid, ETH_ALEN);
ieee80211_scan_completed(mvm->hw, &info);
mvm->scan_vif = NULL;
cancel_delayed_work(&mvm->scan_timeout_dwork);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 990592514..c2e0cff74 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2578,7 +2578,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/*
* In IBSS, ieee80211_check_queues() sets the cab_queue to be
@@ -3262,7 +3262,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* should be updated as well.
*/
if (buf_size < IWL_FRAME_LIMIT)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -4139,10 +4139,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
}
/* block the Tx queues until the FW updated the sleep Tx count */
- iwl_trans_block_txq_ptrs(mvm->trans, true);
-
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
- CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
+ CMD_ASYNC | CMD_BLOCK_TXQS,
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
@@ -4180,7 +4178,8 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
int ret;
if (mvm->mld_api_is_used) {
- iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
return;
}
@@ -4197,7 +4196,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
if (mvm->mld_api_is_used) {
- iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
return;
}
@@ -4252,7 +4252,9 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
int i;
if (mvm->mld_api_is_used) {
- iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif, disable);
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif,
+ disable);
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index da00ef6e4..3d96a824d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -898,9 +898,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
unsigned int ver =
- iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(MAC_CONF_GROUP,
- SESSION_PROTECTION_CMD), 2);
+ iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ SESSION_PROTECTION_NOTIF, 2);
int id = le32_to_cpu(notif->mac_link_id);
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 26a095360..ba3db601a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -502,12 +502,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */
{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -1115,15 +1119,24 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_sc_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_sc2_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_sc2f_name),
#endif /* CONFIG_IWLMVM */
};
/*
* Read rf id and cdb info from prph register and store it
*/
-static int get_crf_id(struct iwl_trans *iwl_trans)
+static void get_crf_id(struct iwl_trans *iwl_trans)
{
- int ret = 0;
u32 sd_reg_ver_addr;
u32 val = 0;
@@ -1150,8 +1163,6 @@ static int get_crf_id(struct iwl_trans *iwl_trans)
IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",
iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id,
iwl_trans->hw_wfpm_id);
-
- return ret;
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 07931c2db..9c2461ba1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1351,8 +1351,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
if (len < sizeof(*pkt) || offset > max_len)
break;
- trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
- trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
+ maybe_trace_iwlwifi_dev_rx(trans, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index c9e5bda8f..a4a477233 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -290,6 +290,16 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_MS):
pos = scnprintf(buf, buflen, "MS");
break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM):
+ pos = scnprintf(buf, buflen, "FM");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP):
+ if (SILICON_Z_STEP ==
+ CSR_HW_RFID_STEP(trans->hw_rf_id))
+ pos = scnprintf(buf, buflen, "WHTC");
+ else
+ pos = scnprintf(buf, buflen, "WH");
+ break;
default:
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d10208075..63e13577a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2108,18 +2108,29 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
container_of(wk, struct iwl_trans_pcie_removal, work);
struct pci_dev *pdev = removal->pdev;
static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
- struct pci_bus *bus = pdev->bus;
+ struct pci_bus *bus;
+
+ pci_lock_rescan_remove();
+
+ bus = pdev->bus;
+ /* in this case, something else already removed the device */
+ if (!bus)
+ goto out;
dev_err(&pdev->dev, "Device gone - attempting removal\n");
+
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
- pci_lock_rescan_remove();
- pci_dev_put(pdev);
+
pci_stop_and_remove_bus_device(pdev);
- if (removal->rescan && bus) {
+ pci_dev_put(pdev);
+
+ if (removal->rescan) {
if (bus->parent)
bus = bus->parent;
pci_rescan_bus(bus);
}
+
+out:
pci_unlock_rescan_remove();
kfree(removal);
@@ -2134,6 +2145,7 @@ void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
return;
IWL_ERR(trans, "Device gone - scheduling removal!\n");
+ iwl_pcie_dump_csr(trans);
/*
* get a module reference to avoid doing this
@@ -2366,32 +2378,6 @@ static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
ofs, val);
}
-static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
-{
- int i;
-
- for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans->txqs.txq[i];
-
- if (i == trans->txqs.cmd.q_id)
- continue;
-
- spin_lock_bh(&txq->lock);
-
- if (!block && !(WARN_ON_ONCE(!txq->block))) {
- txq->block--;
- if (!txq->block) {
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->write_ptr | (i << 8));
- }
- } else if (block) {
- txq->block++;
- }
-
- spin_unlock_bh(&txq->lock);
- }
-}
-
#define IWL_FLUSH_WAIT_MS 2000
static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
@@ -3573,7 +3559,6 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
.freeze_txq_timer = iwl_trans_txq_freeze_timer,
- .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index c72a84d8b..aabbef114 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2020, 2023 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -42,6 +42,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_tfh_tfd *tfd;
unsigned long flags;
+ if (WARN_ON(cmd->flags & CMD_BLOCK_TXQS))
+ return -EINVAL;
+
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 2f39b639c..6c2b37e56 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -873,6 +873,33 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
+{
+ int i;
+
+ for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = trans->txqs.txq[i];
+
+ if (i == trans->txqs.cmd.q_id)
+ continue;
+
+ /* we skip the command queue (obviously) so it's OK to nest */
+ spin_lock_nested(&txq->lock, 1);
+
+ if (!block && !(WARN_ON_ONCE(!txq->block))) {
+ txq->block--;
+ if (!txq->block) {
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ txq->write_ptr | (i << 8));
+ }
+ } else if (block) {
+ txq->block++;
+ }
+
+ spin_unlock(&txq->lock);
+ }
+}
+
/*
* iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
@@ -1137,6 +1164,9 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto out;
}
+ if (cmd->flags & CMD_BLOCK_TXQS)
+ iwl_trans_pcie_block_txq_ptrs(trans, true);
+
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
@@ -1202,8 +1232,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->source->_rx_page_order = trans_pcie->rx_page_order;
}
- if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
- iwl_op_mode_async_cb(trans->op_mode, cmd);
+ if (meta->flags & CMD_BLOCK_TXQS)
+ iwl_trans_pcie_block_txq_ptrs(trans, false);
iwl_pcie_cmdq_reclaim(trans, txq_id, index);
diff --git a/drivers/net/wireless/intersil/Kconfig b/drivers/net/wireless/intersil/Kconfig
index bd6bf70ec..201b1534a 100644
--- a/drivers/net/wireless/intersil/Kconfig
+++ b/drivers/net/wireless/intersil/Kconfig
@@ -12,8 +12,6 @@ config WLAN_VENDOR_INTERSIL
if WLAN_VENDOR_INTERSIL
-source "drivers/net/wireless/intersil/hostap/Kconfig"
-source "drivers/net/wireless/intersil/orinoco/Kconfig"
source "drivers/net/wireless/intersil/p54/Kconfig"
endif # WLAN_VENDOR_INTERSIL
diff --git a/drivers/net/wireless/intersil/Makefile b/drivers/net/wireless/intersil/Makefile
index 65281d1b3..27e9b2869 100644
--- a/drivers/net/wireless/intersil/Makefile
+++ b/drivers/net/wireless/intersil/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_HOSTAP) += hostap/
-obj-$(CONFIG_HERMES) += orinoco/
obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/intersil/hostap/Kconfig b/drivers/net/wireless/intersil/hostap/Kconfig
deleted file mode 100644
index 2edff8efb..000000000
--- a/drivers/net/wireless/intersil/hostap/Kconfig
+++ /dev/null
@@ -1,95 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config HOSTAP
- tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
- select WIRELESS_EXT
- select WEXT_SPY
- select WEXT_PRIV
- select CRYPTO
- select CRYPTO_MICHAEL_MIC
- select CRC32
- select LIB80211
- select LIB80211_CRYPT_WEP
- select LIB80211_CRYPT_TKIP
- select LIB80211_CRYPT_CCMP
- help
- Shared driver code for IEEE 802.11b wireless cards based on
- Intersil Prism2/2.5/3 chipset. This driver supports so called
- Host AP mode that allows the card to act as an IEEE 802.11
- access point.
-
- See <http://hostap.epitest.fi/> for more information about the
- Host AP driver configuration and tools. This site includes
- information and tools (hostapd and wpa_supplicant) for WPA/WPA2
- support.
-
- This option includes the base Host AP driver code that is shared by
- different hardware models. You will also need to enable support for
- PLX/PCI/CS version of the driver to actually use the driver.
-
- The driver can be compiled as a module and it will be called
- hostap.
-
-config HOSTAP_FIRMWARE
- bool "Support downloading firmware images with Host AP driver"
- depends on HOSTAP
- help
- Configure Host AP driver to include support for firmware image
- download. This option by itself only enables downloading to the
- volatile memory, i.e. the card RAM. This option is required to
- support cards that don't have firmware in flash, such as D-Link
- DWL-520 rev E and D-Link DWL-650 rev P.
-
- Firmware image downloading needs a user space tool, prism2_srec.
- It is available from http://hostap.epitest.fi/.
-
-config HOSTAP_FIRMWARE_NVRAM
- bool "Support for non-volatile firmware download"
- depends on HOSTAP_FIRMWARE
- help
- Allow Host AP driver to write firmware images to the non-volatile
- card memory, i.e. flash memory that survives power cycling.
- Enable this option if you want to be able to change card firmware
- permanently.
-
- Firmware image downloading needs a user space tool, prism2_srec.
- It is available from http://hostap.epitest.fi/.
-
-config HOSTAP_PLX
- tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
- depends on PCI && HOSTAP && HAS_IOPORT
- help
- Host AP driver's version for Prism2/2.5/3 PC Cards in PLX9052 based
- PCI adaptors.
-
- "Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
- driver and its help text includes more information about the Host AP
- driver.
-
- The driver can be compiled as a module and will be named
- hostap_plx.
-
-config HOSTAP_PCI
- tristate "Host AP driver for Prism2.5 PCI adaptors"
- depends on PCI && HOSTAP
- help
- Host AP driver's version for Prism2.5 PCI adaptors.
-
- "Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
- driver and its help text includes more information about the Host AP
- driver.
-
- The driver can be compiled as a module and will be named
- hostap_pci.
-
-config HOSTAP_CS
- tristate "Host AP driver for Prism2/2.5/3 PC Cards"
- depends on PCMCIA && HOSTAP
- help
- Host AP driver's version for Prism2/2.5/3 PC Cards.
-
- "Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
- driver and its help text includes more information about the Host AP
- driver.
-
- The driver can be compiled as a module and will be named
- hostap_cs.
diff --git a/drivers/net/wireless/intersil/hostap/Makefile b/drivers/net/wireless/intersil/hostap/Makefile
deleted file mode 100644
index ae3bb73b2..000000000
--- a/drivers/net/wireless/intersil/hostap/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
- hostap_ioctl.o hostap_main.o hostap_proc.o
-obj-$(CONFIG_HOSTAP) += hostap.o
-
-obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
-obj-$(CONFIG_HOSTAP_PLX) += hostap_plx.o
-obj-$(CONFIG_HOSTAP_PCI) += hostap_pci.o
diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h
deleted file mode 100644
index 552ae33d7..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef HOSTAP_H
-#define HOSTAP_H
-
-#include <linux/ethtool.h>
-#include <linux/kernel.h>
-
-#include "hostap_wlan.h"
-#include "hostap_ap.h"
-
-static const long __maybe_unused freq_list[] = {
- 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484
-};
-#define FREQ_COUNT ARRAY_SIZE(freq_list)
-
-/* hostap.c */
-
-extern struct proc_dir_entry *hostap_proc;
-
-u16 hostap_tx_callback_register(local_info_t *local,
- void (*func)(struct sk_buff *, int ok, void *),
- void *data);
-int hostap_tx_callback_unregister(local_info_t *local, u16 idx);
-int hostap_set_word(struct net_device *dev, int rid, u16 val);
-int hostap_set_string(struct net_device *dev, int rid, const char *val);
-u16 hostap_get_porttype(local_info_t *local);
-int hostap_set_encryption(local_info_t *local);
-int hostap_set_antsel(local_info_t *local);
-int hostap_set_roaming(local_info_t *local);
-int hostap_set_auth_algs(local_info_t *local);
-void hostap_dump_rx_header(const char *name,
- const struct hfa384x_rx_frame *rx);
-void hostap_dump_tx_header(const char *name,
- const struct hfa384x_tx_frame *tx);
-extern const struct header_ops hostap_80211_ops;
-int hostap_80211_get_hdrlen(__le16 fc);
-struct net_device_stats *hostap_get_stats(struct net_device *dev);
-void hostap_setup_dev(struct net_device *dev, local_info_t *local,
- int type);
-void hostap_set_multicast_list_queue(struct work_struct *work);
-int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
-int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
-void hostap_cleanup(local_info_t *local);
-void hostap_cleanup_handler(void *data);
-struct net_device * hostap_add_interface(struct local_info *local,
- int type, int rtnl_locked,
- const char *prefix, const char *name);
-void hostap_remove_interface(struct net_device *dev, int rtnl_locked,
- int remove_from_list);
-int prism2_update_comms_qual(struct net_device *dev);
-int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
- u8 *body, size_t bodylen);
-int prism2_sta_deauth(local_info_t *local, u16 reason);
-int prism2_wds_add(local_info_t *local, u8 *remote_addr,
- int rtnl_locked);
-int prism2_wds_del(local_info_t *local, u8 *remote_addr,
- int rtnl_locked, int do_not_remove);
-
-
-/* hostap_ap.c */
-
-int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
-int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
-void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
-int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
-void ap_control_kickall(struct ap_data *ap);
-void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
- struct lib80211_crypt_data ***crypt);
-int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
- struct iw_quality qual[], int buf_size,
- int aplist);
-int prism2_ap_translate_scan(struct net_device *dev,
- struct iw_request_info *info, char *buffer);
-int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
-
-
-/* hostap_proc.c */
-
-void hostap_init_proc(local_info_t *local);
-void hostap_remove_proc(local_info_t *local);
-
-
-/* hostap_info.c */
-
-void hostap_info_init(local_info_t *local);
-void hostap_info_process(local_info_t *local, struct sk_buff *skb);
-
-
-/* hostap_ioctl.c */
-
-extern const struct iw_handler_def hostap_iw_handler_def;
-extern const struct ethtool_ops prism2_ethtool_ops;
-
-int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-
-#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211.h b/drivers/net/wireless/intersil/hostap/hostap_80211.h
deleted file mode 100644
index 1452cf6ec..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_80211.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef HOSTAP_80211_H
-#define HOSTAP_80211_H
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-struct hostap_ieee80211_mgmt {
- __le16 frame_control;
- __le16 duration;
- u8 da[6];
- u8 sa[6];
- u8 bssid[6];
- __le16 seq_ctrl;
- union {
- struct {
- __le16 auth_alg;
- __le16 auth_transaction;
- __le16 status_code;
- /* possibly followed by Challenge text */
- u8 variable[0];
- } __packed auth;
- struct {
- __le16 reason_code;
- } __packed deauth;
- struct {
- __le16 capab_info;
- __le16 listen_interval;
- /* followed by SSID and Supported rates */
- u8 variable[0];
- } __packed assoc_req;
- struct {
- __le16 capab_info;
- __le16 status_code;
- __le16 aid;
- /* followed by Supported rates */
- u8 variable[0];
- } __packed assoc_resp, reassoc_resp;
- struct {
- __le16 capab_info;
- __le16 listen_interval;
- u8 current_ap[6];
- /* followed by SSID and Supported rates */
- u8 variable[0];
- } __packed reassoc_req;
- struct {
- __le16 reason_code;
- } __packed disassoc;
- struct {
- } __packed probe_req;
- struct {
- u8 timestamp[8];
- __le16 beacon_int;
- __le16 capab_info;
- /* followed by some of SSID, Supported rates,
- * FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
- } __packed beacon, probe_resp;
- } u;
-} __packed;
-
-
-#define IEEE80211_MGMT_HDR_LEN 24
-#define IEEE80211_DATA_HDR3_LEN 24
-#define IEEE80211_DATA_HDR4_LEN 30
-
-
-struct hostap_80211_rx_status {
- u32 mac_time;
- u8 signal;
- u8 noise;
- u16 rate; /* in 100 kbps */
-};
-
-/* prism2_rx_80211 'type' argument */
-enum {
- PRISM2_RX_MONITOR, PRISM2_RX_MGMT, PRISM2_RX_NON_ASSOC,
- PRISM2_RX_NULLFUNC_ACK
-};
-
-int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats, int type);
-void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats);
-void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats);
-
-void hostap_dump_tx_80211(const char *name, struct sk_buff *skb);
-netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-
-#endif /* HOSTAP_80211_H */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
deleted file mode 100644
index 61be822f9..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
+++ /dev/null
@@ -1,1116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/lib80211.h>
-#include <linux/if_arp.h>
-
-#include "hostap_80211.h"
-#include "hostap.h"
-#include "hostap_ap.h"
-
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-static unsigned char rfc1042_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-/* No encapsulation header if EtherType < 0x600 (=length) */
-
-void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct ieee80211_hdr *hdr;
- u16 fc;
-
- hdr = (struct ieee80211_hdr *) skb->data;
-
- printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d "
- "jiffies=%ld\n",
- name, rx_stats->signal, rx_stats->noise, rx_stats->rate,
- skb->len, jiffies);
-
- if (skb->len < 2)
- return;
-
- fc = le16_to_cpu(hdr->frame_control);
- printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s",
- fc, (fc & IEEE80211_FCTL_FTYPE) >> 2,
- (fc & IEEE80211_FCTL_STYPE) >> 4,
- fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
- fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
-
- if (skb->len < IEEE80211_DATA_HDR3_LEN) {
- printk("\n");
- return;
- }
-
- printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
- le16_to_cpu(hdr->seq_ctrl));
-
- printk(KERN_DEBUG " A1=%pM", hdr->addr1);
- printk(" A2=%pM", hdr->addr2);
- printk(" A3=%pM", hdr->addr3);
- if (skb->len >= 30)
- printk(" A4=%pM", hdr->addr4);
- printk("\n");
-}
-
-
-/* Send RX frame to netif with 802.11 (and possible prism) header.
- * Called from hardware or software IRQ context. */
-int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats, int type)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int hdrlen, phdrlen, head_need, tail_need;
- u16 fc;
- int prism_header, ret;
- struct ieee80211_hdr *fhdr;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (dev->type == ARPHRD_IEEE80211_PRISM) {
- if (local->monitor_type == PRISM2_MONITOR_PRISM) {
- prism_header = 1;
- phdrlen = sizeof(struct linux_wlan_ng_prism_hdr);
- } else { /* local->monitor_type == PRISM2_MONITOR_CAPHDR */
- prism_header = 2;
- phdrlen = sizeof(struct linux_wlan_ng_cap_hdr);
- }
- } else if (dev->type == ARPHRD_IEEE80211_RADIOTAP) {
- prism_header = 3;
- phdrlen = sizeof(struct hostap_radiotap_rx);
- } else {
- prism_header = 0;
- phdrlen = 0;
- }
-
- fhdr = (struct ieee80211_hdr *) skb->data;
- fc = le16_to_cpu(fhdr->frame_control);
-
- if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) {
- printk(KERN_DEBUG "%s: dropped management frame with header "
- "version %d\n", dev->name, fc & IEEE80211_FCTL_VERS);
- dev_kfree_skb_any(skb);
- return 0;
- }
-
- hdrlen = hostap_80211_get_hdrlen(fhdr->frame_control);
-
- /* check if there is enough room for extra data; if not, expand skb
- * buffer to be large enough for the changes */
- head_need = phdrlen;
- tail_need = 0;
-#ifdef PRISM2_ADD_BOGUS_CRC
- tail_need += 4;
-#endif /* PRISM2_ADD_BOGUS_CRC */
-
- head_need -= skb_headroom(skb);
- tail_need -= skb_tailroom(skb);
-
- if (head_need > 0 || tail_need > 0) {
- if (pskb_expand_head(skb, head_need > 0 ? head_need : 0,
- tail_need > 0 ? tail_need : 0,
- GFP_ATOMIC)) {
- printk(KERN_DEBUG "%s: prism2_rx_80211 failed to "
- "reallocate skb buffer\n", dev->name);
- dev_kfree_skb_any(skb);
- return 0;
- }
- }
-
- /* We now have an skb with enough head and tail room, so just insert
- * the extra data */
-
-#ifdef PRISM2_ADD_BOGUS_CRC
- memset(skb_put(skb, 4), 0xff, 4); /* Prism2 strips CRC */
-#endif /* PRISM2_ADD_BOGUS_CRC */
-
- if (prism_header == 1) {
- struct linux_wlan_ng_prism_hdr *hdr;
- hdr = skb_push(skb, phdrlen);
- memset(hdr, 0, phdrlen);
- hdr->msgcode = LWNG_CAP_DID_BASE;
- hdr->msglen = sizeof(*hdr);
- memcpy(hdr->devname, dev->name, sizeof(hdr->devname));
-#define LWNG_SETVAL(f,i,s,l,d) \
-hdr->f.did = LWNG_CAP_DID_BASE | (i << 12); \
-hdr->f.status = s; hdr->f.len = l; hdr->f.data = d
- LWNG_SETVAL(hosttime, 1, 0, 4, jiffies);
- LWNG_SETVAL(mactime, 2, 0, 4, rx_stats->mac_time);
- LWNG_SETVAL(channel, 3, 1 /* no value */, 4, 0);
- LWNG_SETVAL(rssi, 4, 1 /* no value */, 4, 0);
- LWNG_SETVAL(sq, 5, 1 /* no value */, 4, 0);
- LWNG_SETVAL(signal, 6, 0, 4, rx_stats->signal);
- LWNG_SETVAL(noise, 7, 0, 4, rx_stats->noise);
- LWNG_SETVAL(rate, 8, 0, 4, rx_stats->rate / 5);
- LWNG_SETVAL(istx, 9, 0, 4, 0);
- LWNG_SETVAL(frmlen, 10, 0, 4, skb->len - phdrlen);
-#undef LWNG_SETVAL
- } else if (prism_header == 2) {
- struct linux_wlan_ng_cap_hdr *hdr;
- hdr = skb_push(skb, phdrlen);
- memset(hdr, 0, phdrlen);
- hdr->version = htonl(LWNG_CAPHDR_VERSION);
- hdr->length = htonl(phdrlen);
- hdr->mactime = __cpu_to_be64(rx_stats->mac_time);
- hdr->hosttime = __cpu_to_be64(jiffies);
- hdr->phytype = htonl(4); /* dss_dot11_b */
- hdr->channel = htonl(local->channel);
- hdr->datarate = htonl(rx_stats->rate);
- hdr->antenna = htonl(0); /* unknown */
- hdr->priority = htonl(0); /* unknown */
- hdr->ssi_type = htonl(3); /* raw */
- hdr->ssi_signal = htonl(rx_stats->signal);
- hdr->ssi_noise = htonl(rx_stats->noise);
- hdr->preamble = htonl(0); /* unknown */
- hdr->encoding = htonl(1); /* cck */
- } else if (prism_header == 3) {
- struct hostap_radiotap_rx *hdr;
- hdr = skb_push(skb, phdrlen);
- memset(hdr, 0, phdrlen);
- hdr->hdr.it_len = cpu_to_le16(phdrlen);
- hdr->hdr.it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
- (1 << IEEE80211_RADIOTAP_CHANNEL) |
- (1 << IEEE80211_RADIOTAP_RATE) |
- (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
- (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE));
- hdr->tsft = cpu_to_le64(rx_stats->mac_time);
- hdr->chan_freq = cpu_to_le16(freq_list[local->channel - 1]);
- hdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_CCK |
- IEEE80211_CHAN_2GHZ);
- hdr->rate = rx_stats->rate / 5;
- hdr->dbm_antsignal = rx_stats->signal;
- hdr->dbm_antnoise = rx_stats->noise;
- }
-
- ret = skb->len - phdrlen;
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb_pull(skb, hdrlen);
- if (prism_header)
- skb_pull(skb, phdrlen);
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = cpu_to_be16(ETH_P_802_2);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
-
- return ret;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void monitor_rx(struct net_device *dev, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
- int len;
-
- len = prism2_rx_80211(dev, skb, rx_stats, PRISM2_RX_MONITOR);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static struct prism2_frag_entry *
-prism2_frag_cache_find(local_info_t *local, unsigned int seq,
- unsigned int frag, u8 *src, u8 *dst)
-{
- struct prism2_frag_entry *entry;
- int i;
-
- for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) {
- entry = &local->frag_cache[i];
- if (entry->skb != NULL &&
- time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
- printk(KERN_DEBUG "%s: expiring fragment cache entry "
- "seq=%u last_frag=%u\n",
- local->dev->name, entry->seq, entry->last_frag);
- dev_kfree_skb(entry->skb);
- entry->skb = NULL;
- }
-
- if (entry->skb != NULL && entry->seq == seq &&
- (entry->last_frag + 1 == frag || frag == -1) &&
- memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
- memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
- return entry;
- }
-
- return NULL;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static struct sk_buff *
-prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
-{
- struct sk_buff *skb = NULL;
- u16 sc;
- unsigned int frag, seq;
- struct prism2_frag_entry *entry;
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- frag = sc & IEEE80211_SCTL_FRAG;
- seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
-
- if (frag == 0) {
- /* Reserve enough space to fit maximum frame length */
- skb = dev_alloc_skb(local->dev->mtu +
- sizeof(struct ieee80211_hdr) +
- 8 /* LLC */ +
- 2 /* alignment */ +
- 8 /* WEP */ + ETH_ALEN /* WDS */);
- if (skb == NULL)
- return NULL;
-
- entry = &local->frag_cache[local->frag_next_idx];
- local->frag_next_idx++;
- if (local->frag_next_idx >= PRISM2_FRAG_CACHE_LEN)
- local->frag_next_idx = 0;
-
- if (entry->skb != NULL)
- dev_kfree_skb(entry->skb);
-
- entry->first_frag_time = jiffies;
- entry->seq = seq;
- entry->last_frag = frag;
- entry->skb = skb;
- memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
- memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
- } else {
- /* received a fragment of a frame for which the head fragment
- * should have already been received */
- entry = prism2_frag_cache_find(local, seq, frag, hdr->addr2,
- hdr->addr1);
- if (entry != NULL) {
- entry->last_frag = frag;
- skb = entry->skb;
- }
- }
-
- return skb;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static int prism2_frag_cache_invalidate(local_info_t *local,
- struct ieee80211_hdr *hdr)
-{
- u16 sc;
- unsigned int seq;
- struct prism2_frag_entry *entry;
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
-
- entry = prism2_frag_cache_find(local, seq, -1, hdr->addr2, hdr->addr1);
-
- if (entry == NULL) {
- printk(KERN_DEBUG "%s: could not invalidate fragment cache "
- "entry (seq=%u)\n",
- local->dev->name, seq);
- return -1;
- }
-
- entry->skb = NULL;
- return 0;
-}
-
-
-static struct hostap_bss_info *__hostap_get_bss(local_info_t *local, u8 *bssid,
- u8 *ssid, size_t ssid_len)
-{
- struct list_head *ptr;
- struct hostap_bss_info *bss;
-
- list_for_each(ptr, &local->bss_list) {
- bss = list_entry(ptr, struct hostap_bss_info, list);
- if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0 &&
- (ssid == NULL ||
- (ssid_len == bss->ssid_len &&
- memcmp(ssid, bss->ssid, ssid_len) == 0))) {
- list_move(&bss->list, &local->bss_list);
- return bss;
- }
- }
-
- return NULL;
-}
-
-
-static struct hostap_bss_info *__hostap_add_bss(local_info_t *local, u8 *bssid,
- u8 *ssid, size_t ssid_len)
-{
- struct hostap_bss_info *bss;
-
- if (local->num_bss_info >= HOSTAP_MAX_BSS_COUNT) {
- bss = list_entry(local->bss_list.prev,
- struct hostap_bss_info, list);
- list_del(&bss->list);
- local->num_bss_info--;
- } else {
- bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
- if (bss == NULL)
- return NULL;
- }
-
- memset(bss, 0, sizeof(*bss));
- memcpy(bss->bssid, bssid, ETH_ALEN);
- memcpy(bss->ssid, ssid, ssid_len);
- bss->ssid_len = ssid_len;
- local->num_bss_info++;
- list_add(&bss->list, &local->bss_list);
- return bss;
-}
-
-
-static void __hostap_expire_bss(local_info_t *local)
-{
- struct hostap_bss_info *bss;
-
- while (local->num_bss_info > 0) {
- bss = list_entry(local->bss_list.prev,
- struct hostap_bss_info, list);
- if (!time_after(jiffies, bss->last_update + 60 * HZ))
- break;
-
- list_del(&bss->list);
- local->num_bss_info--;
- kfree(bss);
- }
-}
-
-
-/* Both IEEE 802.11 Beacon and Probe Response frames have similar structure, so
- * the same routine can be used to parse both of them. */
-static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
- int stype)
-{
- struct hostap_ieee80211_mgmt *mgmt;
- int left, chan = 0;
- u8 *pos;
- u8 *ssid = NULL, *wpa = NULL, *rsn = NULL;
- size_t ssid_len = 0, wpa_len = 0, rsn_len = 0;
- struct hostap_bss_info *bss;
-
- if (skb->len < IEEE80211_MGMT_HDR_LEN + sizeof(mgmt->u.beacon))
- return;
-
- mgmt = (struct hostap_ieee80211_mgmt *) skb->data;
- pos = mgmt->u.beacon.variable;
- left = skb->len - (pos - skb->data);
-
- while (left >= 2) {
- if (2 + pos[1] > left)
- return; /* parse failed */
- switch (*pos) {
- case WLAN_EID_SSID:
- ssid = pos + 2;
- ssid_len = pos[1];
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (pos[1] >= 4 &&
- pos[2] == 0x00 && pos[3] == 0x50 &&
- pos[4] == 0xf2 && pos[5] == 1) {
- wpa = pos;
- wpa_len = pos[1] + 2;
- }
- break;
- case WLAN_EID_RSN:
- rsn = pos;
- rsn_len = pos[1] + 2;
- break;
- case WLAN_EID_DS_PARAMS:
- if (pos[1] >= 1)
- chan = pos[2];
- break;
- }
- left -= 2 + pos[1];
- pos += 2 + pos[1];
- }
-
- if (wpa_len > MAX_WPA_IE_LEN)
- wpa_len = MAX_WPA_IE_LEN;
- if (rsn_len > MAX_WPA_IE_LEN)
- rsn_len = MAX_WPA_IE_LEN;
- if (ssid_len > sizeof(bss->ssid))
- ssid_len = sizeof(bss->ssid);
-
- spin_lock(&local->lock);
- bss = __hostap_get_bss(local, mgmt->bssid, ssid, ssid_len);
- if (bss == NULL)
- bss = __hostap_add_bss(local, mgmt->bssid, ssid, ssid_len);
- if (bss) {
- bss->last_update = jiffies;
- bss->count++;
- bss->capab_info = le16_to_cpu(mgmt->u.beacon.capab_info);
- if (wpa) {
- memcpy(bss->wpa_ie, wpa, wpa_len);
- bss->wpa_ie_len = wpa_len;
- } else
- bss->wpa_ie_len = 0;
- if (rsn) {
- memcpy(bss->rsn_ie, rsn, rsn_len);
- bss->rsn_ie_len = rsn_len;
- } else
- bss->rsn_ie_len = 0;
- bss->chan = chan;
- }
- __hostap_expire_bss(local);
- spin_unlock(&local->lock);
-}
-
-
-static int
-hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats, u16 type,
- u16 stype)
-{
- if (local->iw_mode == IW_MODE_MASTER)
- hostap_update_sta_ps(local, (struct ieee80211_hdr *) skb->data);
-
- if (local->hostapd && type == IEEE80211_FTYPE_MGMT) {
- if (stype == IEEE80211_STYPE_BEACON &&
- local->iw_mode == IW_MODE_MASTER) {
- struct sk_buff *skb2;
- /* Process beacon frames also in kernel driver to
- * update STA(AP) table statistics */
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2)
- hostap_rx(skb2->dev, skb2, rx_stats);
- }
-
- /* send management frames to the user space daemon for
- * processing */
- local->apdevstats.rx_packets++;
- local->apdevstats.rx_bytes += skb->len;
- if (local->apdev == NULL)
- return -1;
- prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_MGMT);
- return 0;
- }
-
- if (local->iw_mode == IW_MODE_MASTER) {
- if (type != IEEE80211_FTYPE_MGMT &&
- type != IEEE80211_FTYPE_CTL) {
- printk(KERN_DEBUG "%s: unknown management frame "
- "(type=0x%02x, stype=0x%02x) dropped\n",
- skb->dev->name, type >> 2, stype >> 4);
- return -1;
- }
-
- hostap_rx(skb->dev, skb, rx_stats);
- return 0;
- } else if (type == IEEE80211_FTYPE_MGMT &&
- (stype == IEEE80211_STYPE_BEACON ||
- stype == IEEE80211_STYPE_PROBE_RESP)) {
- hostap_rx_sta_beacon(local, skb, stype);
- return -1;
- } else if (type == IEEE80211_FTYPE_MGMT &&
- (stype == IEEE80211_STYPE_ASSOC_RESP ||
- stype == IEEE80211_STYPE_REASSOC_RESP)) {
- /* Ignore (Re)AssocResp silently since these are not currently
- * needed but are still received when WPA/RSN mode is enabled.
- */
- return -1;
- } else {
- printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: dropped unhandled"
- " management frame in non-Host AP mode (type=%d:%d)\n",
- skb->dev->name, type >> 2, stype >> 4);
- return -1;
- }
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static struct net_device *prism2_rx_get_wds(local_info_t *local,
- u8 *addr)
-{
- struct hostap_interface *iface = NULL;
- struct list_head *ptr;
-
- read_lock_bh(&local->iface_lock);
- list_for_each(ptr, &local->hostap_interfaces) {
- iface = list_entry(ptr, struct hostap_interface, list);
- if (iface->type == HOSTAP_INTERFACE_WDS &&
- memcmp(iface->u.wds.remote_addr, addr, ETH_ALEN) == 0)
- break;
- iface = NULL;
- }
- read_unlock_bh(&local->iface_lock);
-
- return iface ? iface->dev : NULL;
-}
-
-
-static int
-hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, u16 fc,
- struct net_device **wds)
-{
- /* FIX: is this really supposed to accept WDS frames only in Master
- * mode? What about Repeater or Managed with WDS frames? */
- if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) !=
- (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS) &&
- (local->iw_mode != IW_MODE_MASTER || !(fc & IEEE80211_FCTL_TODS)))
- return 0; /* not a WDS frame */
-
- /* Possible WDS frame: either IEEE 802.11 compliant (if FromDS)
- * or own non-standard frame with 4th address after payload */
- if (!ether_addr_equal(hdr->addr1, local->dev->dev_addr) &&
- (hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff ||
- hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
- hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
- /* RA (or BSSID) is not ours - drop */
- PDEBUG(DEBUG_EXTRA2, "%s: received WDS frame with "
- "not own or broadcast %s=%pM\n",
- local->dev->name,
- fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID",
- hdr->addr1);
- return -1;
- }
-
- /* check if the frame came from a registered WDS connection */
- *wds = prism2_rx_get_wds(local, hdr->addr2);
- if (*wds == NULL && fc & IEEE80211_FCTL_FROMDS &&
- (local->iw_mode != IW_MODE_INFRA ||
- !(local->wds_type & HOSTAP_WDS_AP_CLIENT) ||
- memcmp(hdr->addr2, local->bssid, ETH_ALEN) != 0)) {
- /* require that WDS link has been registered with TA or the
- * frame is from current AP when using 'AP client mode' */
- PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame "
- "from unknown TA=%pM\n",
- local->dev->name, hdr->addr2);
- if (local->ap && local->ap->autom_ap_wds)
- hostap_wds_link_oper(local, hdr->addr2, WDS_ADD);
- return -1;
- }
-
- if (*wds && !(fc & IEEE80211_FCTL_FROMDS) && local->ap &&
- hostap_is_sta_assoc(local->ap, hdr->addr2)) {
- /* STA is actually associated with us even though it has a
- * registered WDS link. Assume it is in 'AP client' mode.
- * Since this is a 3-addr frame, assume it is not (bogus) WDS
- * frame and process it like any normal ToDS frame from
- * associated STA. */
- *wds = NULL;
- }
-
- return 0;
-}
-
-
-static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
-{
- struct net_device *dev = local->dev;
- u16 fc, ethertype;
- struct ieee80211_hdr *hdr;
- u8 *pos;
-
- if (skb->len < 24)
- return 0;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- fc = le16_to_cpu(hdr->frame_control);
-
- /* check that the frame is unicast frame to us */
- if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- IEEE80211_FCTL_TODS &&
- ether_addr_equal(hdr->addr1, dev->dev_addr) &&
- ether_addr_equal(hdr->addr3, dev->dev_addr)) {
- /* ToDS frame with own addr BSSID and DA */
- } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- IEEE80211_FCTL_FROMDS &&
- ether_addr_equal(hdr->addr1, dev->dev_addr)) {
- /* FromDS frame with own addr as DA */
- } else
- return 0;
-
- if (skb->len < 24 + 8)
- return 0;
-
- /* check for port access entity Ethernet type */
- pos = skb->data + 24;
- ethertype = (pos[6] << 8) | pos[7];
- if (ethertype == ETH_P_PAE)
- return 1;
-
- return 0;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static int
-hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
- struct lib80211_crypt_data *crypt)
-{
- struct ieee80211_hdr *hdr;
- int res, hdrlen;
-
- if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
- return 0;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = hostap_80211_get_hdrlen(hdr->frame_control);
-
- if (local->tkip_countermeasures &&
- strcmp(crypt->ops->name, "TKIP") == 0) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
- "received packet from %pM\n",
- local->dev->name, hdr->addr2);
- }
- return -1;
- }
-
- atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- printk(KERN_DEBUG "%s: decryption failed (SA=%pM) res=%d\n",
- local->dev->name, hdr->addr2, res);
- local->comm_tallies.rx_discards_wep_undecryptable++;
- return -1;
- }
-
- return res;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static int
-hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
- int keyidx, struct lib80211_crypt_data *crypt)
-{
- struct ieee80211_hdr *hdr;
- int res, hdrlen;
-
- if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
- return 0;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = hostap_80211_get_hdrlen(hdr->frame_control);
-
- atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
- " (SA=%pM keyidx=%d)\n",
- local->dev->name, hdr->addr2, keyidx);
- return -1;
- }
-
- return 0;
-}
-
-
-/* All received frames are sent to this function. @skb contains the frame in
- * IEEE 802.11 format, i.e., in the format it was sent over air.
- * This function is called only as a tasklet (software IRQ). */
-void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct ieee80211_hdr *hdr;
- size_t hdrlen;
- u16 fc, type, stype, sc;
- struct net_device *wds = NULL;
- unsigned int frag;
- u8 *payload;
- struct sk_buff *skb2 = NULL;
- u16 ethertype;
- int frame_authorized = 0;
- int from_assoc_ap = 0;
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
- struct lib80211_crypt_data *crypt = NULL;
- void *sta = NULL;
- int keyidx = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
- iface->stats.rx_packets++;
- iface->stats.rx_bytes += skb->len;
-
- /* dev is the master radio device; change this to be the default
- * virtual interface (this may be changed to WDS device below) */
- dev = local->ddev;
- iface = netdev_priv(dev);
-
- hdr = (struct ieee80211_hdr *) skb->data;
-
- if (skb->len < 10)
- goto rx_dropped;
-
- fc = le16_to_cpu(hdr->frame_control);
- type = fc & IEEE80211_FCTL_FTYPE;
- stype = fc & IEEE80211_FCTL_STYPE;
- sc = le16_to_cpu(hdr->seq_ctrl);
- frag = sc & IEEE80211_SCTL_FRAG;
- hdrlen = hostap_80211_get_hdrlen(hdr->frame_control);
-
- /* Put this code here so that we avoid duplicating it in all
- * Rx paths. - Jean II */
-#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
- /* If spy monitoring on */
- if (iface->spy_data.spy_number > 0) {
- struct iw_quality wstats;
- wstats.level = rx_stats->signal;
- wstats.noise = rx_stats->noise;
- wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED
- | IW_QUAL_QUAL_INVALID | IW_QUAL_DBM;
- /* Update spy records */
- wireless_spy_update(dev, hdr->addr2, &wstats);
- }
-#endif /* IW_WIRELESS_SPY */
- hostap_update_rx_stats(local->ap, hdr, rx_stats);
-
- if (local->iw_mode == IW_MODE_MONITOR) {
- monitor_rx(dev, skb, rx_stats);
- return;
- }
-
- if (local->host_decrypt) {
- int idx = 0;
- if (skb->len >= hdrlen + 3)
- idx = skb->data[hdrlen + 3] >> 6;
- crypt = local->crypt_info.crypt[idx];
- sta = NULL;
-
- /* Use station specific key to override default keys if the
- * receiver address is a unicast address ("individual RA"). If
- * bcrx_sta_key parameter is set, station specific key is used
- * even with broad/multicast targets (this is against IEEE
- * 802.11, but makes it easier to use different keys with
- * stations that do not support WEP key mapping). */
-
- if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
- (void) hostap_handle_sta_crypto(local, hdr, &crypt,
- &sta);
-
- /* allow NULL decrypt to indicate an station specific override
- * for default encryption */
- if (crypt && (crypt->ops == NULL ||
- crypt->ops->decrypt_mpdu == NULL))
- crypt = NULL;
-
- if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) {
-#if 0
- /* This seems to be triggered by some (multicast?)
- * frames from other than current BSS, so just drop the
- * frames silently instead of filling system log with
- * these reports. */
- printk(KERN_DEBUG "%s: WEP decryption failed (not set)"
- " (SA=%pM)\n",
- local->dev->name, hdr->addr2);
-#endif
- local->comm_tallies.rx_discards_wep_undecryptable++;
- goto rx_dropped;
- }
- }
-
- if (type != IEEE80211_FTYPE_DATA) {
- if (type == IEEE80211_FTYPE_MGMT &&
- stype == IEEE80211_STYPE_AUTH &&
- fc & IEEE80211_FCTL_PROTECTED && local->host_decrypt &&
- (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
- {
- printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
- "from %pM\n", dev->name, hdr->addr2);
- /* TODO: could inform hostapd about this so that it
- * could send auth failure report */
- goto rx_dropped;
- }
-
- if (hostap_rx_frame_mgmt(local, skb, rx_stats, type, stype))
- goto rx_dropped;
- else
- goto rx_exit;
- }
-
- /* Data frame - extract src/dst addresses */
- if (skb->len < IEEE80211_DATA_HDR3_LEN)
- goto rx_dropped;
-
- switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
- case IEEE80211_FCTL_FROMDS:
- memcpy(dst, hdr->addr1, ETH_ALEN);
- memcpy(src, hdr->addr3, ETH_ALEN);
- break;
- case IEEE80211_FCTL_TODS:
- memcpy(dst, hdr->addr3, ETH_ALEN);
- memcpy(src, hdr->addr2, ETH_ALEN);
- break;
- case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
- if (skb->len < IEEE80211_DATA_HDR4_LEN)
- goto rx_dropped;
- memcpy(dst, hdr->addr3, ETH_ALEN);
- memcpy(src, hdr->addr4, ETH_ALEN);
- break;
- default:
- memcpy(dst, hdr->addr1, ETH_ALEN);
- memcpy(src, hdr->addr2, ETH_ALEN);
- break;
- }
-
- if (hostap_rx_frame_wds(local, hdr, fc, &wds))
- goto rx_dropped;
- if (wds)
- skb->dev = dev = wds;
-
- if (local->iw_mode == IW_MODE_MASTER && !wds &&
- (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- IEEE80211_FCTL_FROMDS &&
- local->stadev &&
- memcmp(hdr->addr2, local->assoc_ap_addr, ETH_ALEN) == 0) {
- /* Frame from BSSID of the AP for which we are a client */
- skb->dev = dev = local->stadev;
- from_assoc_ap = 1;
- }
-
- if ((local->iw_mode == IW_MODE_MASTER ||
- local->iw_mode == IW_MODE_REPEAT) &&
- !from_assoc_ap) {
- switch (hostap_handle_sta_rx(local, dev, skb, rx_stats,
- wds != NULL)) {
- case AP_RX_CONTINUE_NOT_AUTHORIZED:
- frame_authorized = 0;
- break;
- case AP_RX_CONTINUE:
- frame_authorized = 1;
- break;
- case AP_RX_DROP:
- goto rx_dropped;
- case AP_RX_EXIT:
- goto rx_exit;
- }
- }
-
- /* Nullfunc frames may have PS-bit set, so they must be passed to
- * hostap_handle_sta_rx() before being dropped here. */
- if (stype != IEEE80211_STYPE_DATA &&
- stype != IEEE80211_STYPE_DATA_CFACK &&
- stype != IEEE80211_STYPE_DATA_CFPOLL &&
- stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
- if (stype != IEEE80211_STYPE_NULLFUNC)
- printk(KERN_DEBUG "%s: RX: dropped data frame "
- "with no data (type=0x%02x, subtype=0x%02x)\n",
- dev->name, type >> 2, stype >> 4);
- goto rx_dropped;
- }
-
- /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
-
- if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
- (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
- goto rx_dropped;
- hdr = (struct ieee80211_hdr *) skb->data;
-
- /* skb: hdr + (possibly fragmented) plaintext payload */
-
- if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
- (frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
- int flen;
- struct sk_buff *frag_skb =
- prism2_frag_cache_get(local, hdr);
- if (!frag_skb) {
- printk(KERN_DEBUG "%s: Rx cannot get skb from "
- "fragment cache (morefrag=%d seq=%u frag=%u)\n",
- dev->name, (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
- (sc & IEEE80211_SCTL_SEQ) >> 4, frag);
- goto rx_dropped;
- }
-
- flen = skb->len;
- if (frag != 0)
- flen -= hdrlen;
-
- if (frag_skb->tail + flen > frag_skb->end) {
- printk(KERN_WARNING "%s: host decrypted and "
- "reassembled frame did not fit skb\n",
- dev->name);
- prism2_frag_cache_invalidate(local, hdr);
- goto rx_dropped;
- }
-
- if (frag == 0) {
- /* copy first fragment (including full headers) into
- * beginning of the fragment cache skb */
- skb_copy_from_linear_data(skb, skb_put(frag_skb, flen),
- flen);
- } else {
- /* append frame payload to the end of the fragment
- * cache skb */
- skb_copy_from_linear_data_offset(skb, hdrlen,
- skb_put(frag_skb,
- flen), flen);
- }
- dev_kfree_skb(skb);
- skb = NULL;
-
- if (fc & IEEE80211_FCTL_MOREFRAGS) {
- /* more fragments expected - leave the skb in fragment
- * cache for now; it will be delivered to upper layers
- * after all fragments have been received */
- goto rx_exit;
- }
-
- /* this was the last fragment and the frame will be
- * delivered, so remove skb from fragment cache */
- skb = frag_skb;
- hdr = (struct ieee80211_hdr *) skb->data;
- prism2_frag_cache_invalidate(local, hdr);
- }
-
- /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
- * encrypted/authenticated */
-
- if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
- hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt))
- goto rx_dropped;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) {
- if (local->ieee_802_1x &&
- hostap_is_eapol_frame(local, skb)) {
- /* pass unencrypted EAPOL frames even if encryption is
- * configured */
- PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X - passing "
- "unencrypted EAPOL frame\n", local->dev->name);
- } else {
- printk(KERN_DEBUG "%s: encryption configured, but RX "
- "frame not encrypted (SA=%pM)\n",
- local->dev->name, hdr->addr2);
- goto rx_dropped;
- }
- }
-
- if (local->drop_unencrypted && !(fc & IEEE80211_FCTL_PROTECTED) &&
- !hostap_is_eapol_frame(local, skb)) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: dropped unencrypted RX data "
- "frame from %pM (drop_unencrypted=1)\n",
- dev->name, hdr->addr2);
- }
- goto rx_dropped;
- }
-
- /* skb: hdr + (possible reassembled) full plaintext payload */
-
- payload = skb->data + hdrlen;
- ethertype = (payload[6] << 8) | payload[7];
-
- /* If IEEE 802.1X is used, check whether the port is authorized to send
- * the received frame. */
- if (local->ieee_802_1x && local->iw_mode == IW_MODE_MASTER) {
- if (ethertype == ETH_P_PAE) {
- PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X frame\n",
- dev->name);
- if (local->hostapd && local->apdev) {
- /* Send IEEE 802.1X frames to the user
- * space daemon for processing */
- prism2_rx_80211(local->apdev, skb, rx_stats,
- PRISM2_RX_MGMT);
- local->apdevstats.rx_packets++;
- local->apdevstats.rx_bytes += skb->len;
- goto rx_exit;
- }
- } else if (!frame_authorized) {
- printk(KERN_DEBUG "%s: dropped frame from "
- "unauthorized port (IEEE 802.1X): "
- "ethertype=0x%04x\n",
- dev->name, ethertype);
- goto rx_dropped;
- }
- }
-
- /* convert hdr + possible LLC headers into Ethernet header */
- if (skb->len - hdrlen >= 8 &&
- ((memcmp(payload, rfc1042_header, 6) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- memcmp(payload, bridge_tunnel_header, 6) == 0)) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
- skb_pull(skb, hdrlen + 6);
- memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
- } else {
- __be16 len;
- /* Leave Ethernet header part of hdr and full payload */
- skb_pull(skb, hdrlen);
- len = htons(skb->len);
- memcpy(skb_push(skb, 2), &len, 2);
- memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
- }
-
- if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
- IEEE80211_FCTL_TODS) &&
- skb->len >= ETH_HLEN + ETH_ALEN) {
- /* Non-standard frame: get addr4 from its bogus location after
- * the payload */
- skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN,
- skb->data + ETH_ALEN,
- ETH_ALEN);
- skb_trim(skb, skb->len - ETH_ALEN);
- }
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- if (local->iw_mode == IW_MODE_MASTER && !wds &&
- local->ap->bridge_packets) {
- if (dst[0] & 0x01) {
- /* copy multicast frame both to the higher layers and
- * to the wireless media */
- local->ap->bridged_multicast++;
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 == NULL)
- printk(KERN_DEBUG "%s: skb_clone failed for "
- "multicast frame\n", dev->name);
- } else if (hostap_is_sta_authorized(local->ap, dst)) {
- /* send frame directly to the associated STA using
- * wireless media and not passing to higher layers */
- local->ap->bridged_unicast++;
- skb2 = skb;
- skb = NULL;
- }
- }
-
- if (skb2 != NULL) {
- /* send to wireless media */
- skb2->dev = dev;
- skb2->protocol = cpu_to_be16(ETH_P_802_3);
- skb_reset_mac_header(skb2);
- skb_reset_network_header(skb2);
- /* skb2->network_header += ETH_HLEN; */
- dev_queue_xmit(skb2);
- }
-
- if (skb) {
- skb->protocol = eth_type_trans(skb, dev);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
- }
-
- rx_exit:
- if (sta)
- hostap_handle_sta_release(sta);
- return;
-
- rx_dropped:
- dev_kfree_skb(skb);
-
- dev->stats.rx_dropped++;
- goto rx_exit;
-}
-
-
-EXPORT_SYMBOL(hostap_80211_rx);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
deleted file mode 100644
index c47da0694..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
+++ /dev/null
@@ -1,554 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/etherdevice.h>
-
-#include "hostap_80211.h"
-#include "hostap_common.h"
-#include "hostap_wlan.h"
-#include "hostap.h"
-#include "hostap_ap.h"
-
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-static unsigned char rfc1042_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-/* No encapsulation header if EtherType < 0x600 (=length) */
-
-void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
- u16 fc;
-
- hdr = (struct ieee80211_hdr *) skb->data;
-
- printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n",
- name, skb->len, jiffies);
-
- if (skb->len < 2)
- return;
-
- fc = le16_to_cpu(hdr->frame_control);
- printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s",
- fc, (fc & IEEE80211_FCTL_FTYPE) >> 2,
- (fc & IEEE80211_FCTL_STYPE) >> 4,
- fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
- fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
-
- if (skb->len < IEEE80211_DATA_HDR3_LEN) {
- printk("\n");
- return;
- }
-
- printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
- le16_to_cpu(hdr->seq_ctrl));
-
- printk(KERN_DEBUG " A1=%pM", hdr->addr1);
- printk(" A2=%pM", hdr->addr2);
- printk(" A3=%pM", hdr->addr3);
- if (skb->len >= 30)
- printk(" A4=%pM", hdr->addr4);
- printk("\n");
-}
-
-
-/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta)
- * Convert Ethernet header into a suitable IEEE 802.11 header depending on
- * device configuration. */
-netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int need_headroom, need_tailroom = 0;
- struct ieee80211_hdr hdr;
- u16 fc, ethertype = 0;
- enum {
- WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
- } use_wds = WDS_NO;
- u8 *encaps_data;
- int hdr_len, encaps_len, skip_header_bytes;
- int to_assoc_ap = 0;
- struct hostap_skb_tx_data *meta;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (skb->len < ETH_HLEN) {
- printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
- "(len=%d)\n", dev->name, skb->len);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- if (local->ddev != dev) {
- use_wds = (local->iw_mode == IW_MODE_MASTER &&
- !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ?
- WDS_OWN_FRAME : WDS_COMPLIANT_FRAME;
- if (dev == local->stadev) {
- to_assoc_ap = 1;
- use_wds = WDS_NO;
- } else if (dev == local->apdev) {
- printk(KERN_DEBUG "%s: prism2_tx: trying to use "
- "AP device with Ethernet net dev\n", dev->name);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- } else {
- if (local->iw_mode == IW_MODE_REPEAT) {
- printk(KERN_DEBUG "%s: prism2_tx: trying to use "
- "non-WDS link in Repeater mode\n", dev->name);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- } else if (local->iw_mode == IW_MODE_INFRA &&
- (local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
- !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) {
- /* AP client mode: send frames with foreign src addr
- * using 4-addr WDS frames */
- use_wds = WDS_COMPLIANT_FRAME;
- }
- }
-
- /* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload
- * ==>
- * Prism2 TX frame with 802.11 header:
- * txdesc (address order depending on used mode; includes dst_addr and
- * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel;
- * proto[2], payload {, possible addr4[6]} */
-
- ethertype = (skb->data[12] << 8) | skb->data[13];
-
- memset(&hdr, 0, sizeof(hdr));
-
- /* Length of data after IEEE 802.11 header */
- encaps_data = NULL;
- encaps_len = 0;
- skip_header_bytes = ETH_HLEN;
- if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
- encaps_data = bridge_tunnel_header;
- encaps_len = sizeof(bridge_tunnel_header);
- skip_header_bytes -= 2;
- } else if (ethertype >= 0x600) {
- encaps_data = rfc1042_header;
- encaps_len = sizeof(rfc1042_header);
- skip_header_bytes -= 2;
- }
-
- fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
- hdr_len = IEEE80211_DATA_HDR3_LEN;
-
- if (use_wds != WDS_NO) {
- /* Note! Prism2 station firmware has problems with sending real
- * 802.11 frames with four addresses; until these problems can
- * be fixed or worked around, 4-addr frames needed for WDS are
- * using incompatible format: FromDS flag is not set and the
- * fourth address is added after the frame payload; it is
- * assumed, that the receiving station knows how to handle this
- * frame format */
-
- if (use_wds == WDS_COMPLIANT_FRAME) {
- fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
- /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
- * Addr4 = SA */
- skb_copy_from_linear_data_offset(skb, ETH_ALEN,
- &hdr.addr4, ETH_ALEN);
- hdr_len += ETH_ALEN;
- } else {
- /* bogus 4-addr format to workaround Prism2 station
- * f/w bug */
- fc |= IEEE80211_FCTL_TODS;
- /* From DS: Addr1 = DA (used as RA),
- * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA),
- */
-
- /* SA from skb->data + ETH_ALEN will be added after
- * frame payload; use hdr.addr4 as a temporary buffer
- */
- skb_copy_from_linear_data_offset(skb, ETH_ALEN,
- &hdr.addr4, ETH_ALEN);
- need_tailroom += ETH_ALEN;
- }
-
- /* send broadcast and multicast frames to broadcast RA, if
- * configured; otherwise, use unicast RA of the WDS link */
- if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
- is_multicast_ether_addr(skb->data))
- eth_broadcast_addr(hdr.addr1);
- else if (iface->type == HOSTAP_INTERFACE_WDS)
- memcpy(&hdr.addr1, iface->u.wds.remote_addr,
- ETH_ALEN);
- else
- memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
- memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
- skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
- } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
- fc |= IEEE80211_FCTL_FROMDS;
- /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
- skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
- memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
- skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
- ETH_ALEN);
- } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
- fc |= IEEE80211_FCTL_TODS;
- /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
- memcpy(&hdr.addr1, to_assoc_ap ?
- local->assoc_ap_addr : local->bssid, ETH_ALEN);
- skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
- ETH_ALEN);
- skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
- } else if (local->iw_mode == IW_MODE_ADHOC) {
- /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
- skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
- skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
- ETH_ALEN);
- memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
- }
-
- hdr.frame_control = cpu_to_le16(fc);
-
- skb_pull(skb, skip_header_bytes);
- need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len;
- if (skb_tailroom(skb) < need_tailroom) {
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (skb == NULL) {
- iface->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
- if (pskb_expand_head(skb, need_headroom, need_tailroom,
- GFP_ATOMIC)) {
- kfree_skb(skb);
- iface->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
- } else if (skb_headroom(skb) < need_headroom) {
- struct sk_buff *tmp = skb;
- skb = skb_realloc_headroom(skb, need_headroom);
- kfree_skb(tmp);
- if (skb == NULL) {
- iface->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
- } else {
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (skb == NULL) {
- iface->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
- }
-
- if (encaps_data)
- memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
- memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
- if (use_wds == WDS_OWN_FRAME) {
- skb_put_data(skb, &hdr.addr4, ETH_ALEN);
- }
-
- iface->stats.tx_packets++;
- iface->stats.tx_bytes += skb->len;
-
- skb_reset_mac_header(skb);
- meta = (struct hostap_skb_tx_data *) skb->cb;
- memset(meta, 0, sizeof(*meta));
- meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
- if (use_wds)
- meta->flags |= HOSTAP_TX_FLAGS_WDS;
- meta->ethertype = ethertype;
- meta->iface = iface;
-
- /* Send IEEE 802.11 encapsulated frame using the master radio device */
- skb->dev = local->dev;
- dev_queue_xmit(skb);
- return NETDEV_TX_OK;
-}
-
-
-/* hard_start_xmit function for hostapd wlan#ap interfaces */
-netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct hostap_skb_tx_data *meta;
- struct ieee80211_hdr *hdr;
- u16 fc;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (skb->len < 10) {
- printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb "
- "(len=%d)\n", dev->name, skb->len);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- iface->stats.tx_packets++;
- iface->stats.tx_bytes += skb->len;
-
- meta = (struct hostap_skb_tx_data *) skb->cb;
- memset(meta, 0, sizeof(*meta));
- meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
- meta->iface = iface;
-
- if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) {
- hdr = (struct ieee80211_hdr *) skb->data;
- fc = le16_to_cpu(hdr->frame_control);
- if (ieee80211_is_data(hdr->frame_control) &&
- (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DATA) {
- u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN +
- sizeof(rfc1042_header)];
- meta->ethertype = (pos[0] << 8) | pos[1];
- }
- }
-
- /* Send IEEE 802.11 encapsulated frame using the master radio device */
- skb->dev = local->dev;
- dev_queue_xmit(skb);
- return NETDEV_TX_OK;
-}
-
-
-/* Called only from software IRQ */
-static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
- struct lib80211_crypt_data *crypt)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct ieee80211_hdr *hdr;
- int prefix_len, postfix_len, hdr_len, res;
-
- iface = netdev_priv(skb->dev);
- local = iface->local;
-
- if (skb->len < IEEE80211_DATA_HDR3_LEN) {
- kfree_skb(skb);
- return NULL;
- }
-
- if (local->tkip_countermeasures &&
- strcmp(crypt->ops->name, "TKIP") == 0) {
- hdr = (struct ieee80211_hdr *) skb->data;
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
- "TX packet to %pM\n",
- local->dev->name, hdr->addr1);
- }
- kfree_skb(skb);
- return NULL;
- }
-
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (skb == NULL)
- return NULL;
-
- prefix_len = crypt->ops->extra_mpdu_prefix_len +
- crypt->ops->extra_msdu_prefix_len;
- postfix_len = crypt->ops->extra_mpdu_postfix_len +
- crypt->ops->extra_msdu_postfix_len;
- if ((skb_headroom(skb) < prefix_len ||
- skb_tailroom(skb) < postfix_len) &&
- pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) {
- kfree_skb(skb);
- return NULL;
- }
-
- hdr = (struct ieee80211_hdr *) skb->data;
- hdr_len = hostap_80211_get_hdrlen(hdr->frame_control);
-
- /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
- * call both MSDU and MPDU encryption functions from here. */
- atomic_inc(&crypt->refcnt);
- res = 0;
- if (crypt->ops->encrypt_msdu)
- res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv);
- if (res == 0 && crypt->ops->encrypt_mpdu)
- res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv);
- atomic_dec(&crypt->refcnt);
- if (res < 0) {
- kfree_skb(skb);
- return NULL;
- }
-
- return skb;
-}
-
-
-/* hard_start_xmit function for master radio interface wifi#.
- * AP processing (TX rate control, power save buffering, etc.).
- * Use hardware TX function to send the frame. */
-netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- netdev_tx_t ret = NETDEV_TX_BUSY;
- u16 fc;
- struct hostap_tx_data tx;
- ap_tx_ret tx_ret;
- struct hostap_skb_tx_data *meta;
- int no_encrypt = 0;
- struct ieee80211_hdr *hdr;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- tx.skb = skb;
- tx.sta_ptr = NULL;
-
- meta = (struct hostap_skb_tx_data *) skb->cb;
- if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
- printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
- "expected 0x%08x)\n",
- dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC);
- ret = NETDEV_TX_OK;
- iface->stats.tx_dropped++;
- goto fail;
- }
-
- if (local->host_encrypt) {
- /* Set crypt to default algorithm and key; will be replaced in
- * AP code if STA has own alg/key */
- tx.crypt = local->crypt_info.crypt[local->crypt_info.tx_keyidx];
- tx.host_encrypt = 1;
- } else {
- tx.crypt = NULL;
- tx.host_encrypt = 0;
- }
-
- if (skb->len < 24) {
- printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb "
- "(len=%d)\n", dev->name, skb->len);
- ret = NETDEV_TX_OK;
- iface->stats.tx_dropped++;
- goto fail;
- }
-
- /* FIX (?):
- * Wi-Fi 802.11b test plan suggests that AP should ignore power save
- * bit in authentication and (re)association frames and assume tha
- * STA remains awake for the response. */
- tx_ret = hostap_handle_sta_tx(local, &tx);
- skb = tx.skb;
- meta = (struct hostap_skb_tx_data *) skb->cb;
- hdr = (struct ieee80211_hdr *) skb->data;
- fc = le16_to_cpu(hdr->frame_control);
- switch (tx_ret) {
- case AP_TX_CONTINUE:
- break;
- case AP_TX_CONTINUE_NOT_AUTHORIZED:
- if (local->ieee_802_1x &&
- ieee80211_is_data(hdr->frame_control) &&
- meta->ethertype != ETH_P_PAE &&
- !(meta->flags & HOSTAP_TX_FLAGS_WDS)) {
- printk(KERN_DEBUG "%s: dropped frame to unauthorized "
- "port (IEEE 802.1X): ethertype=0x%04x\n",
- dev->name, meta->ethertype);
- hostap_dump_tx_80211(dev->name, skb);
-
- ret = NETDEV_TX_OK; /* drop packet */
- iface->stats.tx_dropped++;
- goto fail;
- }
- break;
- case AP_TX_DROP:
- ret = NETDEV_TX_OK; /* drop packet */
- iface->stats.tx_dropped++;
- goto fail;
- case AP_TX_RETRY:
- goto fail;
- case AP_TX_BUFFERED:
- /* do not free skb here, it will be freed when the
- * buffered frame is sent/timed out */
- ret = NETDEV_TX_OK;
- goto tx_exit;
- }
-
- /* Request TX callback if protocol version is 2 in 802.11 header;
- * this version 2 is a special case used between hostapd and kernel
- * driver */
- if (((fc & IEEE80211_FCTL_VERS) == BIT(1)) &&
- local->ap && local->ap->tx_callback_idx && meta->tx_cb_idx == 0) {
- meta->tx_cb_idx = local->ap->tx_callback_idx;
-
- /* remove special version from the frame header */
- fc &= ~IEEE80211_FCTL_VERS;
- hdr->frame_control = cpu_to_le16(fc);
- }
-
- if (!ieee80211_is_data(hdr->frame_control)) {
- no_encrypt = 1;
- tx.crypt = NULL;
- }
-
- if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt &&
- !(fc & IEEE80211_FCTL_PROTECTED)) {
- no_encrypt = 1;
- PDEBUG(DEBUG_EXTRA2, "%s: TX: IEEE 802.1X - passing "
- "unencrypted EAPOL frame\n", dev->name);
- tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */
- }
-
- if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu))
- tx.crypt = NULL;
- else if ((tx.crypt ||
- local->crypt_info.crypt[local->crypt_info.tx_keyidx]) &&
- !no_encrypt) {
- /* Add ISWEP flag both for firmware and host based encryption
- */
- fc |= IEEE80211_FCTL_PROTECTED;
- hdr->frame_control = cpu_to_le16(fc);
- } else if (local->drop_unencrypted &&
- ieee80211_is_data(hdr->frame_control) &&
- meta->ethertype != ETH_P_PAE) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: dropped unencrypted TX data "
- "frame (drop_unencrypted=1)\n", dev->name);
- }
- iface->stats.tx_dropped++;
- ret = NETDEV_TX_OK;
- goto fail;
- }
-
- if (tx.crypt) {
- skb = hostap_tx_encrypt(skb, tx.crypt);
- if (skb == NULL) {
- printk(KERN_DEBUG "%s: TX - encryption failed\n",
- dev->name);
- ret = NETDEV_TX_OK;
- goto fail;
- }
- meta = (struct hostap_skb_tx_data *) skb->cb;
- if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
- printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
- "expected 0x%08x) after hostap_tx_encrypt\n",
- dev->name, meta->magic,
- HOSTAP_SKB_TX_DATA_MAGIC);
- ret = NETDEV_TX_OK;
- iface->stats.tx_dropped++;
- goto fail;
- }
- }
-
- if (local->func->tx == NULL || local->func->tx(skb, dev)) {
- ret = NETDEV_TX_OK;
- iface->stats.tx_dropped++;
- } else {
- ret = NETDEV_TX_OK;
- iface->stats.tx_packets++;
- iface->stats.tx_bytes += skb->len;
- }
-
- fail:
- if (ret == NETDEV_TX_OK && skb)
- dev_kfree_skb(skb);
- tx_exit:
- if (tx.sta_ptr)
- hostap_handle_sta_release(tx.sta_ptr);
- return ret;
-}
-
-
-EXPORT_SYMBOL(hostap_master_start_xmit);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
deleted file mode 100644
index 9b546a71e..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ /dev/null
@@ -1,3277 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intersil Prism2 driver with Host AP (software access point) support
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <j@w1.fi>
- * Copyright (c) 2002-2005, Jouni Malinen <j@w1.fi>
- *
- * This file is to be included into hostap.c when S/W AP functionality is
- * compiled.
- *
- * AP: FIX:
- * - if unicast Class 2 (assoc,reassoc,disassoc) frame received from
- * unauthenticated STA, send deauth. frame (8802.11: 5.5)
- * - if unicast Class 3 (data with to/from DS,deauth,pspoll) frame received
- * from authenticated, but unassoc STA, send disassoc frame (8802.11: 5.5)
- * - if unicast Class 3 received from unauthenticated STA, send deauth. frame
- * (8802.11: 5.5)
- */
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/if_arp.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/moduleparam.h>
-#include <linux/etherdevice.h>
-
-#include "hostap_wlan.h"
-#include "hostap.h"
-#include "hostap_ap.h"
-
-static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
- DEF_INTS };
-module_param_array(other_ap_policy, int, NULL, 0444);
-MODULE_PARM_DESC(other_ap_policy, "Other AP beacon monitoring policy (0-3)");
-
-static int ap_max_inactivity[MAX_PARM_DEVICES] = { AP_MAX_INACTIVITY_SEC,
- DEF_INTS };
-module_param_array(ap_max_inactivity, int, NULL, 0444);
-MODULE_PARM_DESC(ap_max_inactivity, "AP timeout (in seconds) for station "
- "inactivity");
-
-static int ap_bridge_packets[MAX_PARM_DEVICES] = { 1, DEF_INTS };
-module_param_array(ap_bridge_packets, int, NULL, 0444);
-MODULE_PARM_DESC(ap_bridge_packets, "Bridge packets directly between "
- "stations");
-
-static int autom_ap_wds[MAX_PARM_DEVICES] = { 0, DEF_INTS };
-module_param_array(autom_ap_wds, int, NULL, 0444);
-MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs "
- "automatically");
-
-
-static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
-static void hostap_event_expired_sta(struct net_device *dev,
- struct sta_info *sta);
-static void handle_add_proc_queue(struct work_struct *work);
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(struct work_struct *work);
-static void prism2_send_mgmt(struct net_device *dev,
- u16 type_subtype, char *body,
- int body_len, u8 *addr, u16 tx_cb_idx);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
-static int ap_debug_proc_show(struct seq_file *m, void *v)
-{
- struct ap_data *ap = pde_data(file_inode(m->file));
-
- seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
- seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
- seq_printf(m, "max_inactivity=%u\n", ap->max_inactivity / HZ);
- seq_printf(m, "bridge_packets=%u\n", ap->bridge_packets);
- seq_printf(m, "nullfunc_ack=%u\n", ap->nullfunc_ack);
- seq_printf(m, "autom_ap_wds=%u\n", ap->autom_ap_wds);
- seq_printf(m, "auth_algs=%u\n", ap->local->auth_algs);
- seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
- return 0;
-}
-#endif
-
-static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
-{
- sta->hnext = ap->sta_hash[STA_HASH(sta->addr)];
- ap->sta_hash[STA_HASH(sta->addr)] = sta;
-}
-
-static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
-{
- struct sta_info *s;
-
- s = ap->sta_hash[STA_HASH(sta->addr)];
- if (s == NULL) return;
- if (ether_addr_equal(s->addr, sta->addr)) {
- ap->sta_hash[STA_HASH(sta->addr)] = s->hnext;
- return;
- }
-
- while (s->hnext != NULL && !ether_addr_equal(s->hnext->addr, sta->addr))
- s = s->hnext;
- if (s->hnext != NULL)
- s->hnext = s->hnext->hnext;
- else
- printk("AP: could not remove STA %pM from hash table\n",
- sta->addr);
-}
-
-static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
-{
- if (sta->ap && sta->local)
- hostap_event_expired_sta(sta->local->dev, sta);
-
- if (ap->proc != NULL) {
- char name[20];
- sprintf(name, "%pM", sta->addr);
- remove_proc_entry(name, ap->proc);
- }
-
- if (sta->crypt) {
- sta->crypt->ops->deinit(sta->crypt->priv);
- kfree(sta->crypt);
- sta->crypt = NULL;
- }
-
- skb_queue_purge(&sta->tx_buf);
-
- ap->num_sta--;
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- if (sta->aid > 0)
- ap->sta_aid[sta->aid - 1] = NULL;
-
- if (!sta->ap)
- kfree(sta->u.sta.challenge);
- timer_shutdown_sync(&sta->timer);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- kfree(sta);
-}
-
-
-static void hostap_set_tim(local_info_t *local, int aid, int set)
-{
- if (local->func->set_tim)
- local->func->set_tim(local->dev, aid, set);
-}
-
-
-static void hostap_event_new_sta(struct net_device *dev, struct sta_info *sta)
-{
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(dev, IWEVREGISTERED, &wrqu, NULL);
-}
-
-
-static void hostap_event_expired_sta(struct net_device *dev,
- struct sta_info *sta)
-{
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof(wrqu));
- memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(dev, IWEVEXPIRED, &wrqu, NULL);
-}
-
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-
-static void ap_handle_timer(struct timer_list *t)
-{
- struct sta_info *sta = from_timer(sta, t, timer);
- local_info_t *local;
- struct ap_data *ap;
- unsigned long next_time = 0;
- int was_assoc;
-
- if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) {
- PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n");
- return;
- }
-
- local = sta->local;
- ap = local->ap;
- was_assoc = sta->flags & WLAN_STA_ASSOC;
-
- if (atomic_read(&sta->users) != 0)
- next_time = jiffies + HZ;
- else if ((sta->flags & WLAN_STA_PERM) && !(sta->flags & WLAN_STA_AUTH))
- next_time = jiffies + ap->max_inactivity;
-
- if (time_before(jiffies, sta->last_rx + ap->max_inactivity)) {
- /* station activity detected; reset timeout state */
- sta->timeout_next = STA_NULLFUNC;
- next_time = sta->last_rx + ap->max_inactivity;
- } else if (sta->timeout_next == STA_DISASSOC &&
- !(sta->flags & WLAN_STA_PENDING_POLL)) {
- /* STA ACKed data nullfunc frame poll */
- sta->timeout_next = STA_NULLFUNC;
- next_time = jiffies + ap->max_inactivity;
- }
-
- if (next_time) {
- sta->timer.expires = next_time;
- add_timer(&sta->timer);
- return;
- }
-
- if (sta->ap)
- sta->timeout_next = STA_DEAUTH;
-
- if (sta->timeout_next == STA_DEAUTH && !(sta->flags & WLAN_STA_PERM)) {
- spin_lock(&ap->sta_table_lock);
- ap_sta_hash_del(ap, sta);
- list_del(&sta->list);
- spin_unlock(&ap->sta_table_lock);
- sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC);
- } else if (sta->timeout_next == STA_DISASSOC)
- sta->flags &= ~WLAN_STA_ASSOC;
-
- if (was_assoc && !(sta->flags & WLAN_STA_ASSOC) && !sta->ap)
- hostap_event_expired_sta(local->dev, sta);
-
- if (sta->timeout_next == STA_DEAUTH && sta->aid > 0 &&
- !skb_queue_empty(&sta->tx_buf)) {
- hostap_set_tim(local, sta->aid, 0);
- sta->flags &= ~WLAN_STA_TIM;
- }
-
- if (sta->ap) {
- if (ap->autom_ap_wds) {
- PDEBUG(DEBUG_AP, "%s: removing automatic WDS "
- "connection to AP %pM\n",
- local->dev->name, sta->addr);
- hostap_wds_link_oper(local, sta->addr, WDS_DEL);
- }
- } else if (sta->timeout_next == STA_NULLFUNC) {
- /* send data frame to poll STA and check whether this frame
- * is ACKed */
- /* FIX: IEEE80211_STYPE_NULLFUNC would be more appropriate, but
- * it is apparently not retried so TX Exc events are not
- * received for it */
- sta->flags |= WLAN_STA_PENDING_POLL;
- prism2_send_mgmt(local->dev, IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_DATA, NULL, 0,
- sta->addr, ap->tx_callback_poll);
- } else {
- int deauth = sta->timeout_next == STA_DEAUTH;
- __le16 resp;
- PDEBUG(DEBUG_AP, "%s: sending %s info to STA %pM"
- "(last=%lu, jiffies=%lu)\n",
- local->dev->name,
- deauth ? "deauthentication" : "disassociation",
- sta->addr, sta->last_rx, jiffies);
-
- resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID :
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
- prism2_send_mgmt(local->dev, IEEE80211_FTYPE_MGMT |
- (deauth ? IEEE80211_STYPE_DEAUTH :
- IEEE80211_STYPE_DISASSOC),
- (char *) &resp, 2, sta->addr, 0);
- }
-
- if (sta->timeout_next == STA_DEAUTH) {
- if (sta->flags & WLAN_STA_PERM) {
- PDEBUG(DEBUG_AP, "%s: STA %pM"
- " would have been removed, "
- "but it has 'perm' flag\n",
- local->dev->name, sta->addr);
- } else
- ap_free_sta(ap, sta);
- return;
- }
-
- if (sta->timeout_next == STA_NULLFUNC) {
- sta->timeout_next = STA_DISASSOC;
- sta->timer.expires = jiffies + AP_DISASSOC_DELAY;
- } else {
- sta->timeout_next = STA_DEAUTH;
- sta->timer.expires = jiffies + AP_DEAUTH_DELAY;
- }
-
- add_timer(&sta->timer);
-}
-
-
-void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
- int resend)
-{
- u8 addr[ETH_ALEN];
- __le16 resp;
- int i;
-
- PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name);
- eth_broadcast_addr(addr);
-
- resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
-
- /* deauth message sent; try to resend it few times; the message is
- * broadcast, so it may be delayed until next DTIM; there is not much
- * else we can do at this point since the driver is going to be shut
- * down */
- for (i = 0; i < 5; i++) {
- prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_DEAUTH,
- (char *) &resp, 2, addr, 0);
-
- if (!resend || ap->num_sta <= 0)
- return;
-
- mdelay(50);
- }
-}
-
-
-static int ap_control_proc_show(struct seq_file *m, void *v)
-{
- struct ap_data *ap = pde_data(file_inode(m->file));
- char *policy_txt;
- struct mac_entry *entry;
-
- if (v == SEQ_START_TOKEN) {
- switch (ap->mac_restrictions.policy) {
- case MAC_POLICY_OPEN:
- policy_txt = "open";
- break;
- case MAC_POLICY_ALLOW:
- policy_txt = "allow";
- break;
- case MAC_POLICY_DENY:
- policy_txt = "deny";
- break;
- default:
- policy_txt = "unknown";
- break;
- }
- seq_printf(m, "MAC policy: %s\n", policy_txt);
- seq_printf(m, "MAC entries: %u\n", ap->mac_restrictions.entries);
- seq_puts(m, "MAC list:\n");
- return 0;
- }
-
- entry = v;
- seq_printf(m, "%pM\n", entry->addr);
- return 0;
-}
-
-static void *ap_control_proc_start(struct seq_file *m, loff_t *_pos)
-{
- struct ap_data *ap = pde_data(file_inode(m->file));
- spin_lock_bh(&ap->mac_restrictions.lock);
- return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos);
-}
-
-static void *ap_control_proc_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- struct ap_data *ap = pde_data(file_inode(m->file));
- return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos);
-}
-
-static void ap_control_proc_stop(struct seq_file *m, void *v)
-{
- struct ap_data *ap = pde_data(file_inode(m->file));
- spin_unlock_bh(&ap->mac_restrictions.lock);
-}
-
-static const struct seq_operations ap_control_proc_seqops = {
- .start = ap_control_proc_start,
- .next = ap_control_proc_next,
- .stop = ap_control_proc_stop,
- .show = ap_control_proc_show,
-};
-
-int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
-{
- struct mac_entry *entry;
-
- entry = kmalloc(sizeof(struct mac_entry), GFP_KERNEL);
- if (entry == NULL)
- return -ENOMEM;
-
- memcpy(entry->addr, mac, ETH_ALEN);
-
- spin_lock_bh(&mac_restrictions->lock);
- list_add_tail(&entry->list, &mac_restrictions->mac_list);
- mac_restrictions->entries++;
- spin_unlock_bh(&mac_restrictions->lock);
-
- return 0;
-}
-
-
-int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
-{
- struct list_head *ptr;
- struct mac_entry *entry;
-
- spin_lock_bh(&mac_restrictions->lock);
- for (ptr = mac_restrictions->mac_list.next;
- ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
- entry = list_entry(ptr, struct mac_entry, list);
-
- if (ether_addr_equal(entry->addr, mac)) {
- list_del(ptr);
- kfree(entry);
- mac_restrictions->entries--;
- spin_unlock_bh(&mac_restrictions->lock);
- return 0;
- }
- }
- spin_unlock_bh(&mac_restrictions->lock);
- return -1;
-}
-
-
-static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
- u8 *mac)
-{
- struct mac_entry *entry;
- int found = 0;
-
- if (mac_restrictions->policy == MAC_POLICY_OPEN)
- return 0;
-
- spin_lock_bh(&mac_restrictions->lock);
- list_for_each_entry(entry, &mac_restrictions->mac_list, list) {
- if (ether_addr_equal(entry->addr, mac)) {
- found = 1;
- break;
- }
- }
- spin_unlock_bh(&mac_restrictions->lock);
-
- if (mac_restrictions->policy == MAC_POLICY_ALLOW)
- return !found;
- else
- return found;
-}
-
-
-void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
-{
- struct list_head *ptr, *n;
- struct mac_entry *entry;
-
- if (mac_restrictions->entries == 0)
- return;
-
- spin_lock_bh(&mac_restrictions->lock);
- for (ptr = mac_restrictions->mac_list.next, n = ptr->next;
- ptr != &mac_restrictions->mac_list;
- ptr = n, n = ptr->next) {
- entry = list_entry(ptr, struct mac_entry, list);
- list_del(ptr);
- kfree(entry);
- }
- mac_restrictions->entries = 0;
- spin_unlock_bh(&mac_restrictions->lock);
-}
-
-
-int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
-{
- struct sta_info *sta;
- __le16 resp;
-
- spin_lock_bh(&ap->sta_table_lock);
- sta = ap_get_sta(ap, mac);
- if (sta) {
- ap_sta_hash_del(ap, sta);
- list_del(&sta->list);
- }
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (!sta)
- return -EINVAL;
-
- resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
- prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH,
- (char *) &resp, 2, sta->addr, 0);
-
- if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
- hostap_event_expired_sta(dev, sta);
-
- ap_free_sta(ap, sta);
-
- return 0;
-}
-
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-void ap_control_kickall(struct ap_data *ap)
-{
- struct list_head *ptr, *n;
- struct sta_info *sta;
-
- spin_lock_bh(&ap->sta_table_lock);
- for (ptr = ap->sta_list.next, n = ptr->next; ptr != &ap->sta_list;
- ptr = n, n = ptr->next) {
- sta = list_entry(ptr, struct sta_info, list);
- ap_sta_hash_del(ap, sta);
- list_del(&sta->list);
- if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
- hostap_event_expired_sta(sta->local->dev, sta);
- ap_free_sta(ap, sta);
- }
- spin_unlock_bh(&ap->sta_table_lock);
-}
-
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-
-static int prism2_ap_proc_show(struct seq_file *m, void *v)
-{
- struct sta_info *sta = v;
- int i;
-
- if (v == SEQ_START_TOKEN) {
- seq_printf(m, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n");
- return 0;
- }
-
- if (!sta->ap)
- return 0;
-
- seq_printf(m, "%pM %d %d %d %d '",
- sta->addr,
- sta->u.ap.channel, sta->last_rx_signal,
- sta->last_rx_silence, sta->last_rx_rate);
-
- for (i = 0; i < sta->u.ap.ssid_len; i++) {
- if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127)
- seq_putc(m, sta->u.ap.ssid[i]);
- else
- seq_printf(m, "<%02x>", sta->u.ap.ssid[i]);
- }
-
- seq_putc(m, '\'');
- if (sta->capability & WLAN_CAPABILITY_ESS)
- seq_puts(m, " [ESS]");
- if (sta->capability & WLAN_CAPABILITY_IBSS)
- seq_puts(m, " [IBSS]");
- if (sta->capability & WLAN_CAPABILITY_PRIVACY)
- seq_puts(m, " [WEP]");
- seq_putc(m, '\n');
- return 0;
-}
-
-static void *prism2_ap_proc_start(struct seq_file *m, loff_t *_pos)
-{
- struct ap_data *ap = pde_data(file_inode(m->file));
- spin_lock_bh(&ap->sta_table_lock);
- return seq_list_start_head(&ap->sta_list, *_pos);
-}
-
-static void *prism2_ap_proc_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- struct ap_data *ap = pde_data(file_inode(m->file));
- return seq_list_next(v, &ap->sta_list, _pos);
-}
-
-static void prism2_ap_proc_stop(struct seq_file *m, void *v)
-{
- struct ap_data *ap = pde_data(file_inode(m->file));
- spin_unlock_bh(&ap->sta_table_lock);
-}
-
-static const struct seq_operations prism2_ap_proc_seqops = {
- .start = prism2_ap_proc_start,
- .next = prism2_ap_proc_next,
- .stop = prism2_ap_proc_stop,
- .show = prism2_ap_proc_show,
-};
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver)
-{
- if (!ap)
- return;
-
- if (sta_fw_ver == PRISM2_FW_VER(0,8,0)) {
- PDEBUG(DEBUG_AP, "Using data::nullfunc ACK workaround - "
- "firmware upgrade recommended\n");
- ap->nullfunc_ack = 1;
- } else
- ap->nullfunc_ack = 0;
-
- if (sta_fw_ver == PRISM2_FW_VER(1,4,2)) {
- printk(KERN_WARNING "%s: Warning: secondary station firmware "
- "version 1.4.2 does not seem to work in Host AP mode\n",
- ap->local->dev->name);
- }
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
-{
- struct ap_data *ap = data;
- struct ieee80211_hdr *hdr;
-
- if (!ap->local->hostapd || !ap->local->apdev) {
- dev_kfree_skb(skb);
- return;
- }
-
- /* Pass the TX callback frame to the hostapd; use 802.11 header version
- * 1 to indicate failure (no ACK) and 2 success (frame ACKed) */
-
- hdr = (struct ieee80211_hdr *) skb->data;
- hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_VERS);
- hdr->frame_control |= cpu_to_le16(ok ? BIT(1) : BIT(0));
-
- skb->dev = ap->local->apdev;
- skb_pull(skb, hostap_80211_get_hdrlen(hdr->frame_control));
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = cpu_to_be16(ETH_P_802_2);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx(skb);
-}
-
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-/* Called only as a tasklet (software IRQ) */
-static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
-{
- struct ap_data *ap = data;
- struct net_device *dev = ap->local->dev;
- struct ieee80211_hdr *hdr;
- u16 auth_alg, auth_transaction, status;
- __le16 *pos;
- struct sta_info *sta = NULL;
- char *txt = NULL;
-
- if (ap->local->hostapd) {
- dev_kfree_skb(skb);
- return;
- }
-
- hdr = (struct ieee80211_hdr *) skb->data;
- if (!ieee80211_is_auth(hdr->frame_control) ||
- skb->len < IEEE80211_MGMT_HDR_LEN + 6) {
- printk(KERN_DEBUG "%s: hostap_ap_tx_cb_auth received invalid "
- "frame\n", dev->name);
- dev_kfree_skb(skb);
- return;
- }
-
- pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
- auth_alg = le16_to_cpu(*pos++);
- auth_transaction = le16_to_cpu(*pos++);
- status = le16_to_cpu(*pos++);
-
- if (!ok) {
- txt = "frame was not ACKed";
- goto done;
- }
-
- spin_lock(&ap->sta_table_lock);
- sta = ap_get_sta(ap, hdr->addr1);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock(&ap->sta_table_lock);
-
- if (!sta) {
- txt = "STA not found";
- goto done;
- }
-
- if (status == WLAN_STATUS_SUCCESS &&
- ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 2) ||
- (auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 4))) {
- txt = "STA authenticated";
- sta->flags |= WLAN_STA_AUTH;
- sta->last_auth = jiffies;
- } else if (status != WLAN_STATUS_SUCCESS)
- txt = "authentication failed";
-
- done:
- if (sta)
- atomic_dec(&sta->users);
- if (txt) {
- PDEBUG(DEBUG_AP, "%s: %pM auth_cb - alg=%d "
- "trans#=%d status=%d - %s\n",
- dev->name, hdr->addr1,
- auth_alg, auth_transaction, status, txt);
- }
- dev_kfree_skb(skb);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
-{
- struct ap_data *ap = data;
- struct net_device *dev = ap->local->dev;
- struct ieee80211_hdr *hdr;
- u16 status;
- __le16 *pos;
- struct sta_info *sta = NULL;
- char *txt = NULL;
-
- if (ap->local->hostapd) {
- dev_kfree_skb(skb);
- return;
- }
-
- hdr = (struct ieee80211_hdr *) skb->data;
- if ((!ieee80211_is_assoc_resp(hdr->frame_control) &&
- !ieee80211_is_reassoc_resp(hdr->frame_control)) ||
- skb->len < IEEE80211_MGMT_HDR_LEN + 4) {
- printk(KERN_DEBUG "%s: hostap_ap_tx_cb_assoc received invalid "
- "frame\n", dev->name);
- dev_kfree_skb(skb);
- return;
- }
-
- if (!ok) {
- txt = "frame was not ACKed";
- goto done;
- }
-
- spin_lock(&ap->sta_table_lock);
- sta = ap_get_sta(ap, hdr->addr1);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock(&ap->sta_table_lock);
-
- if (!sta) {
- txt = "STA not found";
- goto done;
- }
-
- pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
- pos++;
- status = le16_to_cpu(*pos++);
- if (status == WLAN_STATUS_SUCCESS) {
- if (!(sta->flags & WLAN_STA_ASSOC))
- hostap_event_new_sta(dev, sta);
- txt = "STA associated";
- sta->flags |= WLAN_STA_ASSOC;
- sta->last_assoc = jiffies;
- } else
- txt = "association failed";
-
- done:
- if (sta)
- atomic_dec(&sta->users);
- if (txt) {
- PDEBUG(DEBUG_AP, "%s: %pM assoc_cb - %s\n",
- dev->name, hdr->addr1, txt);
- }
- dev_kfree_skb(skb);
-}
-
-/* Called only as a tasklet (software IRQ); TX callback for poll frames used
- * in verifying whether the STA is still present. */
-static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
-{
- struct ap_data *ap = data;
- struct ieee80211_hdr *hdr;
- struct sta_info *sta;
-
- if (skb->len < 24)
- goto fail;
- hdr = (struct ieee80211_hdr *) skb->data;
- if (ok) {
- spin_lock(&ap->sta_table_lock);
- sta = ap_get_sta(ap, hdr->addr1);
- if (sta)
- sta->flags &= ~WLAN_STA_PENDING_POLL;
- spin_unlock(&ap->sta_table_lock);
- } else {
- PDEBUG(DEBUG_AP,
- "%s: STA %pM did not ACK activity poll frame\n",
- ap->local->dev->name, hdr->addr1);
- }
-
- fail:
- dev_kfree_skb(skb);
-}
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-void hostap_init_data(local_info_t *local)
-{
- struct ap_data *ap = local->ap;
-
- if (ap == NULL) {
- printk(KERN_WARNING "hostap_init_data: ap == NULL\n");
- return;
- }
- memset(ap, 0, sizeof(struct ap_data));
- ap->local = local;
-
- ap->ap_policy = GET_INT_PARM(other_ap_policy, local->card_idx);
- ap->bridge_packets = GET_INT_PARM(ap_bridge_packets, local->card_idx);
- ap->max_inactivity =
- GET_INT_PARM(ap_max_inactivity, local->card_idx) * HZ;
- ap->autom_ap_wds = GET_INT_PARM(autom_ap_wds, local->card_idx);
-
- spin_lock_init(&ap->sta_table_lock);
- INIT_LIST_HEAD(&ap->sta_list);
-
- /* Initialize task queue structure for AP management */
- INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
-
- ap->tx_callback_idx =
- hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
- if (ap->tx_callback_idx == 0)
- printk(KERN_WARNING "%s: failed to register TX callback for "
- "AP\n", local->dev->name);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
-
- ap->tx_callback_auth =
- hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
- ap->tx_callback_assoc =
- hostap_tx_callback_register(local, hostap_ap_tx_cb_assoc, ap);
- ap->tx_callback_poll =
- hostap_tx_callback_register(local, hostap_ap_tx_cb_poll, ap);
- if (ap->tx_callback_auth == 0 || ap->tx_callback_assoc == 0 ||
- ap->tx_callback_poll == 0)
- printk(KERN_WARNING "%s: failed to register TX callback for "
- "AP\n", local->dev->name);
-
- spin_lock_init(&ap->mac_restrictions.lock);
- INIT_LIST_HEAD(&ap->mac_restrictions.mac_list);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- ap->initialized = 1;
-}
-
-
-void hostap_init_ap_proc(local_info_t *local)
-{
- struct ap_data *ap = local->ap;
-
- ap->proc = local->proc;
- if (ap->proc == NULL)
- return;
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
- proc_create_single_data("ap_debug", 0, ap->proc, ap_debug_proc_show, ap);
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- proc_create_seq_data("ap_control", 0, ap->proc, &ap_control_proc_seqops,
- ap);
- proc_create_seq_data("ap", 0, ap->proc, &prism2_ap_proc_seqops, ap);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-}
-
-
-void hostap_free_data(struct ap_data *ap)
-{
- struct sta_info *n, *sta;
-
- if (ap == NULL || !ap->initialized) {
- printk(KERN_DEBUG "hostap_free_data: ap has not yet been "
- "initialized - skip resource freeing\n");
- return;
- }
-
- flush_work(&ap->add_sta_proc_queue);
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- flush_work(&ap->wds_oper_queue);
- if (ap->crypt)
- ap->crypt->deinit(ap->crypt_priv);
- ap->crypt = ap->crypt_priv = NULL;
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- list_for_each_entry_safe(sta, n, &ap->sta_list, list) {
- ap_sta_hash_del(ap, sta);
- list_del(&sta->list);
- if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
- hostap_event_expired_sta(sta->local->dev, sta);
- ap_free_sta(ap, sta);
- }
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
- if (ap->proc != NULL) {
- remove_proc_entry("ap_debug", ap->proc);
- }
-#endif /* PRISM2_NO_PROCFS_DEBUG */
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- if (ap->proc != NULL) {
- remove_proc_entry("ap", ap->proc);
- remove_proc_entry("ap_control", ap->proc);
- }
- ap_control_flush_macs(&ap->mac_restrictions);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- ap->initialized = 0;
-}
-
-
-/* caller should have mutex for AP STA list handling */
-static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta)
-{
- struct sta_info *s;
-
- s = ap->sta_hash[STA_HASH(sta)];
- while (s != NULL && !ether_addr_equal(s->addr, sta))
- s = s->hnext;
- return s;
-}
-
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-
-/* Called from timer handler and from scheduled AP queue handlers */
-static void prism2_send_mgmt(struct net_device *dev,
- u16 type_subtype, char *body,
- int body_len, u8 *addr, u16 tx_cb_idx)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct ieee80211_hdr *hdr;
- u16 fc;
- struct sk_buff *skb;
- struct hostap_skb_tx_data *meta;
- int hdrlen;
-
- iface = netdev_priv(dev);
- local = iface->local;
- dev = local->dev; /* always use master radio device */
- iface = netdev_priv(dev);
-
- if (!(dev->flags & IFF_UP)) {
- PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt - device is not UP - "
- "cannot send frame\n", dev->name);
- return;
- }
-
- skb = dev_alloc_skb(sizeof(*hdr) + body_len);
- if (skb == NULL) {
- PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt failed to allocate "
- "skb\n", dev->name);
- return;
- }
-
- fc = type_subtype;
- hdrlen = hostap_80211_get_hdrlen(cpu_to_le16(type_subtype));
- hdr = skb_put_zero(skb, hdrlen);
- if (body)
- skb_put_data(skb, body, body_len);
-
- /* FIX: ctrl::ack sending used special HFA384X_TX_CTRL_802_11
- * tx_control instead of using local->tx_control */
-
-
- memcpy(hdr->addr1, addr, ETH_ALEN); /* DA / RA */
- if (ieee80211_is_data(hdr->frame_control)) {
- fc |= IEEE80211_FCTL_FROMDS;
- memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* BSSID */
- memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */
- } else if (ieee80211_is_ctl(hdr->frame_control)) {
- /* control:ACK does not have addr2 or addr3 */
- eth_zero_addr(hdr->addr2);
- eth_zero_addr(hdr->addr3);
- } else {
- memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */
- memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */
- }
-
- hdr->frame_control = cpu_to_le16(fc);
-
- meta = (struct hostap_skb_tx_data *) skb->cb;
- memset(meta, 0, sizeof(*meta));
- meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
- meta->iface = iface;
- meta->tx_cb_idx = tx_cb_idx;
-
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- dev_queue_xmit(skb);
-}
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-#ifdef CONFIG_PROC_FS
-static int prism2_sta_proc_show(struct seq_file *m, void *v)
-{
- struct sta_info *sta = m->private;
- int i;
-
- /* FIX: possible race condition.. the STA data could have just expired,
- * but proc entry was still here so that the read could have started;
- * some locking should be done here.. */
-
- seq_printf(m,
- "%s=%pM\nusers=%d\naid=%d\n"
- "flags=0x%04x%s%s%s%s%s%s%s\n"
- "capability=0x%02x\nlisten_interval=%d\nsupported_rates=",
- sta->ap ? "AP" : "STA",
- sta->addr, atomic_read(&sta->users), sta->aid,
- sta->flags,
- sta->flags & WLAN_STA_AUTH ? " AUTH" : "",
- sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "",
- sta->flags & WLAN_STA_PS ? " PS" : "",
- sta->flags & WLAN_STA_TIM ? " TIM" : "",
- sta->flags & WLAN_STA_PERM ? " PERM" : "",
- sta->flags & WLAN_STA_AUTHORIZED ? " AUTHORIZED" : "",
- sta->flags & WLAN_STA_PENDING_POLL ? " POLL" : "",
- sta->capability, sta->listen_interval);
- /* supported_rates: 500 kbit/s units with msb ignored */
- for (i = 0; i < sizeof(sta->supported_rates); i++)
- if (sta->supported_rates[i] != 0)
- seq_printf(m, "%d%sMbps ",
- (sta->supported_rates[i] & 0x7f) / 2,
- sta->supported_rates[i] & 1 ? ".5" : "");
- seq_printf(m,
- "\njiffies=%lu\nlast_auth=%lu\nlast_assoc=%lu\n"
- "last_rx=%lu\nlast_tx=%lu\nrx_packets=%lu\n"
- "tx_packets=%lu\n"
- "rx_bytes=%lu\ntx_bytes=%lu\nbuffer_count=%d\n"
- "last_rx: silence=%d dBm signal=%d dBm rate=%d%s Mbps\n"
- "tx_rate=%d\ntx[1M]=%d\ntx[2M]=%d\ntx[5.5M]=%d\n"
- "tx[11M]=%d\n"
- "rx[1M]=%d\nrx[2M]=%d\nrx[5.5M]=%d\nrx[11M]=%d\n",
- jiffies, sta->last_auth, sta->last_assoc, sta->last_rx,
- sta->last_tx,
- sta->rx_packets, sta->tx_packets, sta->rx_bytes,
- sta->tx_bytes, skb_queue_len(&sta->tx_buf),
- sta->last_rx_silence,
- sta->last_rx_signal, sta->last_rx_rate / 10,
- sta->last_rx_rate % 10 ? ".5" : "",
- sta->tx_rate, sta->tx_count[0], sta->tx_count[1],
- sta->tx_count[2], sta->tx_count[3], sta->rx_count[0],
- sta->rx_count[1], sta->rx_count[2], sta->rx_count[3]);
- if (sta->crypt && sta->crypt->ops && sta->crypt->ops->print_stats)
- sta->crypt->ops->print_stats(m, sta->crypt->priv);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- if (sta->ap) {
- if (sta->u.ap.channel >= 0)
- seq_printf(m, "channel=%d\n", sta->u.ap.channel);
- seq_puts(m, "ssid=");
- for (i = 0; i < sta->u.ap.ssid_len; i++) {
- if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127)
- seq_putc(m, sta->u.ap.ssid[i]);
- else
- seq_printf(m, "<%02x>", sta->u.ap.ssid[i]);
- }
- seq_putc(m, '\n');
- }
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- return 0;
-}
-#endif
-
-static void handle_add_proc_queue(struct work_struct *work)
-{
- struct ap_data *ap = container_of(work, struct ap_data,
- add_sta_proc_queue);
- struct sta_info *sta;
- char name[20];
- struct add_sta_proc_data *entry, *prev;
-
- entry = ap->add_sta_proc_entries;
- ap->add_sta_proc_entries = NULL;
-
- while (entry) {
- spin_lock_bh(&ap->sta_table_lock);
- sta = ap_get_sta(ap, entry->addr);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (sta) {
- sprintf(name, "%pM", sta->addr);
- sta->proc = proc_create_single_data(
- name, 0, ap->proc,
- prism2_sta_proc_show, sta);
-
- atomic_dec(&sta->users);
- }
-
- prev = entry;
- entry = entry->next;
- kfree(prev);
- }
-}
-
-
-static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
-{
- struct sta_info *sta;
-
- sta = kzalloc(sizeof(struct sta_info), GFP_ATOMIC);
- if (sta == NULL) {
- PDEBUG(DEBUG_AP, "AP: kmalloc failed\n");
- return NULL;
- }
-
- /* initialize STA info data */
- sta->local = ap->local;
- skb_queue_head_init(&sta->tx_buf);
- memcpy(sta->addr, addr, ETH_ALEN);
-
- atomic_inc(&sta->users);
- spin_lock_bh(&ap->sta_table_lock);
- list_add(&sta->list, &ap->sta_list);
- ap->num_sta++;
- ap_sta_hash_add(ap, sta);
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (ap->proc) {
- struct add_sta_proc_data *entry;
- /* schedule a non-interrupt context process to add a procfs
- * entry for the STA since procfs code use GFP_KERNEL */
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (entry) {
- memcpy(entry->addr, sta->addr, ETH_ALEN);
- entry->next = ap->add_sta_proc_entries;
- ap->add_sta_proc_entries = entry;
- schedule_work(&ap->add_sta_proc_queue);
- } else
- printk(KERN_DEBUG "Failed to add STA proc data\n");
- }
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- timer_setup(&sta->timer, ap_handle_timer, 0);
- sta->timer.expires = jiffies + ap->max_inactivity;
- if (!ap->local->hostapd)
- add_timer(&sta->timer);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- return sta;
-}
-
-
-static int ap_tx_rate_ok(int rateidx, struct sta_info *sta,
- local_info_t *local)
-{
- if (rateidx > sta->tx_max_rate ||
- !(sta->tx_supp_rates & (1 << rateidx)))
- return 0;
-
- if (local->tx_rate_control != 0 &&
- !(local->tx_rate_control & (1 << rateidx)))
- return 0;
-
- return 1;
-}
-
-
-static void prism2_check_tx_rates(struct sta_info *sta)
-{
- int i;
-
- sta->tx_supp_rates = 0;
- for (i = 0; i < sizeof(sta->supported_rates); i++) {
- if ((sta->supported_rates[i] & 0x7f) == 2)
- sta->tx_supp_rates |= WLAN_RATE_1M;
- if ((sta->supported_rates[i] & 0x7f) == 4)
- sta->tx_supp_rates |= WLAN_RATE_2M;
- if ((sta->supported_rates[i] & 0x7f) == 11)
- sta->tx_supp_rates |= WLAN_RATE_5M5;
- if ((sta->supported_rates[i] & 0x7f) == 22)
- sta->tx_supp_rates |= WLAN_RATE_11M;
- }
- sta->tx_max_rate = sta->tx_rate = sta->tx_rate_idx = 0;
- if (sta->tx_supp_rates & WLAN_RATE_1M) {
- sta->tx_max_rate = 0;
- if (ap_tx_rate_ok(0, sta, sta->local)) {
- sta->tx_rate = 10;
- sta->tx_rate_idx = 0;
- }
- }
- if (sta->tx_supp_rates & WLAN_RATE_2M) {
- sta->tx_max_rate = 1;
- if (ap_tx_rate_ok(1, sta, sta->local)) {
- sta->tx_rate = 20;
- sta->tx_rate_idx = 1;
- }
- }
- if (sta->tx_supp_rates & WLAN_RATE_5M5) {
- sta->tx_max_rate = 2;
- if (ap_tx_rate_ok(2, sta, sta->local)) {
- sta->tx_rate = 55;
- sta->tx_rate_idx = 2;
- }
- }
- if (sta->tx_supp_rates & WLAN_RATE_11M) {
- sta->tx_max_rate = 3;
- if (ap_tx_rate_ok(3, sta, sta->local)) {
- sta->tx_rate = 110;
- sta->tx_rate_idx = 3;
- }
- }
-}
-
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-
-static void ap_crypt_init(struct ap_data *ap)
-{
- ap->crypt = lib80211_get_crypto_ops("WEP");
-
- if (ap->crypt) {
- if (ap->crypt->init) {
- ap->crypt_priv = ap->crypt->init(0);
- if (ap->crypt_priv == NULL)
- ap->crypt = NULL;
- else {
- u8 key[WEP_KEY_LEN];
- get_random_bytes(key, WEP_KEY_LEN);
- ap->crypt->set_key(key, WEP_KEY_LEN, NULL,
- ap->crypt_priv);
- }
- }
- }
-
- if (ap->crypt == NULL) {
- printk(KERN_WARNING "AP could not initialize WEP: load module "
- "lib80211_crypt_wep.ko\n");
- }
-}
-
-
-/* Generate challenge data for shared key authentication. IEEE 802.11 specifies
- * that WEP algorithm is used for generating challenge. This should be unique,
- * but otherwise there is not really need for randomness etc. Initialize WEP
- * with pseudo random key and then use increasing IV to get unique challenge
- * streams.
- *
- * Called only as a scheduled task for pending AP frames.
- */
-static char * ap_auth_make_challenge(struct ap_data *ap)
-{
- char *tmpbuf;
- struct sk_buff *skb;
-
- if (ap->crypt == NULL) {
- ap_crypt_init(ap);
- if (ap->crypt == NULL)
- return NULL;
- }
-
- tmpbuf = kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC);
- if (tmpbuf == NULL) {
- PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n");
- return NULL;
- }
-
- skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN +
- ap->crypt->extra_mpdu_prefix_len +
- ap->crypt->extra_mpdu_postfix_len);
- if (skb == NULL) {
- kfree(tmpbuf);
- return NULL;
- }
-
- skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len);
- skb_put_zero(skb, WLAN_AUTH_CHALLENGE_LEN);
- if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
- dev_kfree_skb(skb);
- kfree(tmpbuf);
- return NULL;
- }
-
- skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len,
- tmpbuf, WLAN_AUTH_CHALLENGE_LEN);
- dev_kfree_skb(skb);
-
- return tmpbuf;
-}
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void handle_authen(local_info_t *local, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct net_device *dev = local->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- size_t hdrlen;
- struct ap_data *ap = local->ap;
- char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL;
- int len, olen;
- u16 auth_alg, auth_transaction, status_code;
- __le16 *pos;
- u16 resp = WLAN_STATUS_SUCCESS;
- struct sta_info *sta = NULL;
- struct lib80211_crypt_data *crypt;
- char *txt = "";
-
- len = skb->len - IEEE80211_MGMT_HDR_LEN;
-
- hdrlen = hostap_80211_get_hdrlen(hdr->frame_control);
-
- if (len < 6) {
- PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload "
- "(len=%d) from %pM\n", dev->name, len, hdr->addr2);
- return;
- }
-
- spin_lock_bh(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock_bh(&local->ap->sta_table_lock);
-
- if (sta && sta->crypt)
- crypt = sta->crypt;
- else {
- int idx = 0;
- if (skb->len >= hdrlen + 3)
- idx = skb->data[hdrlen + 3] >> 6;
- crypt = local->crypt_info.crypt[idx];
- }
-
- pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
- auth_alg = __le16_to_cpu(*pos);
- pos++;
- auth_transaction = __le16_to_cpu(*pos);
- pos++;
- status_code = __le16_to_cpu(*pos);
- pos++;
-
- if (ether_addr_equal(dev->dev_addr, hdr->addr2) ||
- ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) {
- txt = "authentication denied";
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto fail;
- }
-
- if (((local->auth_algs & PRISM2_AUTH_OPEN) &&
- auth_alg == WLAN_AUTH_OPEN) ||
- ((local->auth_algs & PRISM2_AUTH_SHARED_KEY) &&
- crypt && auth_alg == WLAN_AUTH_SHARED_KEY)) {
- } else {
- txt = "unsupported algorithm";
- resp = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
- goto fail;
- }
-
- if (len >= 8) {
- u8 *u = (u8 *) pos;
- if (*u == WLAN_EID_CHALLENGE) {
- if (*(u + 1) != WLAN_AUTH_CHALLENGE_LEN) {
- txt = "invalid challenge len";
- resp = WLAN_STATUS_CHALLENGE_FAIL;
- goto fail;
- }
- if (len - 8 < WLAN_AUTH_CHALLENGE_LEN) {
- txt = "challenge underflow";
- resp = WLAN_STATUS_CHALLENGE_FAIL;
- goto fail;
- }
- challenge = (char *) (u + 2);
- }
- }
-
- if (sta && sta->ap) {
- if (time_after(jiffies, sta->u.ap.last_beacon +
- (10 * sta->listen_interval * HZ) / 1024)) {
- PDEBUG(DEBUG_AP, "%s: no beacons received for a while,"
- " assuming AP %pM is now STA\n",
- dev->name, sta->addr);
- sta->ap = 0;
- sta->flags = 0;
- sta->u.sta.challenge = NULL;
- } else {
- txt = "AP trying to authenticate?";
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto fail;
- }
- }
-
- if ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1) ||
- (auth_alg == WLAN_AUTH_SHARED_KEY &&
- (auth_transaction == 1 ||
- (auth_transaction == 3 && sta != NULL &&
- sta->u.sta.challenge != NULL)))) {
- } else {
- txt = "unknown authentication transaction number";
- resp = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
- goto fail;
- }
-
- if (sta == NULL) {
- txt = "new STA";
-
- if (local->ap->num_sta >= MAX_STA_COUNT) {
- /* FIX: might try to remove some old STAs first? */
- txt = "no more room for new STAs";
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto fail;
- }
-
- sta = ap_add_sta(local->ap, hdr->addr2);
- if (sta == NULL) {
- txt = "ap_add_sta failed";
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto fail;
- }
- }
-
- switch (auth_alg) {
- case WLAN_AUTH_OPEN:
- txt = "authOK";
- /* IEEE 802.11 standard is not completely clear about
- * whether STA is considered authenticated after
- * authentication OK frame has been send or after it
- * has been ACKed. In order to reduce interoperability
- * issues, mark the STA authenticated before ACK. */
- sta->flags |= WLAN_STA_AUTH;
- break;
-
- case WLAN_AUTH_SHARED_KEY:
- if (auth_transaction == 1) {
- if (sta->u.sta.challenge == NULL) {
- sta->u.sta.challenge =
- ap_auth_make_challenge(local->ap);
- if (sta->u.sta.challenge == NULL) {
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto fail;
- }
- }
- } else {
- if (sta->u.sta.challenge == NULL ||
- challenge == NULL ||
- memcmp(sta->u.sta.challenge, challenge,
- WLAN_AUTH_CHALLENGE_LEN) != 0 ||
- !ieee80211_has_protected(hdr->frame_control)) {
- txt = "challenge response incorrect";
- resp = WLAN_STATUS_CHALLENGE_FAIL;
- goto fail;
- }
-
- txt = "challenge OK - authOK";
- /* IEEE 802.11 standard is not completely clear about
- * whether STA is considered authenticated after
- * authentication OK frame has been send or after it
- * has been ACKed. In order to reduce interoperability
- * issues, mark the STA authenticated before ACK. */
- sta->flags |= WLAN_STA_AUTH;
- kfree(sta->u.sta.challenge);
- sta->u.sta.challenge = NULL;
- }
- break;
- }
-
- fail:
- pos = (__le16 *) body;
- *pos = cpu_to_le16(auth_alg);
- pos++;
- *pos = cpu_to_le16(auth_transaction + 1);
- pos++;
- *pos = cpu_to_le16(resp); /* status_code */
- pos++;
- olen = 6;
-
- if (resp == WLAN_STATUS_SUCCESS && sta != NULL &&
- sta->u.sta.challenge != NULL &&
- auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 1) {
- u8 *tmp = (u8 *) pos;
- *tmp++ = WLAN_EID_CHALLENGE;
- *tmp++ = WLAN_AUTH_CHALLENGE_LEN;
- pos++;
- memcpy(pos, sta->u.sta.challenge, WLAN_AUTH_CHALLENGE_LEN);
- olen += 2 + WLAN_AUTH_CHALLENGE_LEN;
- }
-
- prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH,
- body, olen, hdr->addr2, ap->tx_callback_auth);
-
- if (sta) {
- sta->last_rx = jiffies;
- atomic_dec(&sta->users);
- }
-
- if (resp) {
- PDEBUG(DEBUG_AP, "%s: %pM auth (alg=%d "
- "trans#=%d stat=%d len=%d fc=%04x) ==> %d (%s)\n",
- dev->name, hdr->addr2,
- auth_alg, auth_transaction, status_code, len,
- le16_to_cpu(hdr->frame_control), resp, txt);
- }
-}
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void handle_assoc(local_info_t *local, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats, int reassoc)
-{
- struct net_device *dev = local->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- char body[12], *p, *lpos;
- int len, left;
- __le16 *pos;
- u16 resp = WLAN_STATUS_SUCCESS;
- struct sta_info *sta = NULL;
- int send_deauth = 0;
- char __always_unused *txt = "";
- u8 prev_ap[ETH_ALEN];
-
- left = len = skb->len - IEEE80211_MGMT_HDR_LEN;
-
- if (len < (reassoc ? 10 : 4)) {
- PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload "
- "(len=%d, reassoc=%d) from %pM\n",
- dev->name, len, reassoc, hdr->addr2);
- return;
- }
-
- spin_lock_bh(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta == NULL || (sta->flags & WLAN_STA_AUTH) == 0) {
- spin_unlock_bh(&local->ap->sta_table_lock);
- txt = "trying to associate before authentication";
- send_deauth = 1;
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- sta = NULL; /* do not decrement sta->users */
- goto fail;
- }
- atomic_inc(&sta->users);
- spin_unlock_bh(&local->ap->sta_table_lock);
-
- pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
- sta->capability = __le16_to_cpu(*pos);
- pos++; left -= 2;
- sta->listen_interval = __le16_to_cpu(*pos);
- pos++; left -= 2;
-
- if (reassoc) {
- memcpy(prev_ap, pos, ETH_ALEN);
- pos++; pos++; pos++; left -= 6;
- } else
- eth_zero_addr(prev_ap);
-
- if (left >= 2) {
- unsigned int ileft;
- unsigned char *u = (unsigned char *) pos;
-
- if (*u == WLAN_EID_SSID) {
- u++; left--;
- ileft = *u;
- u++; left--;
-
- if (ileft > left || ileft > MAX_SSID_LEN) {
- txt = "SSID overflow";
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto fail;
- }
-
- if (ileft != strlen(local->essid) ||
- memcmp(local->essid, u, ileft) != 0) {
- txt = "not our SSID";
- resp = WLAN_STATUS_ASSOC_DENIED_UNSPEC;
- goto fail;
- }
-
- u += ileft;
- left -= ileft;
- }
-
- if (left >= 2 && *u == WLAN_EID_SUPP_RATES) {
- u++; left--;
- ileft = *u;
- u++; left--;
-
- if (ileft > left || ileft == 0 ||
- ileft > WLAN_SUPP_RATES_MAX) {
- txt = "SUPP_RATES len error";
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto fail;
- }
-
- memset(sta->supported_rates, 0,
- sizeof(sta->supported_rates));
- memcpy(sta->supported_rates, u, ileft);
- prism2_check_tx_rates(sta);
-
- u += ileft;
- left -= ileft;
- }
-
- if (left > 0) {
- PDEBUG(DEBUG_AP, "%s: assoc from %pM"
- " with extra data (%d bytes) [",
- dev->name, hdr->addr2, left);
- while (left > 0) {
- PDEBUG2(DEBUG_AP, "<%02x>", *u);
- u++; left--;
- }
- PDEBUG2(DEBUG_AP, "]\n");
- }
- } else {
- txt = "frame underflow";
- resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
- goto fail;
- }
-
- /* get a unique AID */
- if (sta->aid > 0)
- txt = "OK, old AID";
- else {
- spin_lock_bh(&local->ap->sta_table_lock);
- for (sta->aid = 1; sta->aid <= MAX_AID_TABLE_SIZE; sta->aid++)
- if (local->ap->sta_aid[sta->aid - 1] == NULL)
- break;
- if (sta->aid > MAX_AID_TABLE_SIZE) {
- sta->aid = 0;
- spin_unlock_bh(&local->ap->sta_table_lock);
- resp = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
- txt = "no room for more AIDs";
- } else {
- local->ap->sta_aid[sta->aid - 1] = sta;
- spin_unlock_bh(&local->ap->sta_table_lock);
- txt = "OK, new AID";
- }
- }
-
- fail:
- pos = (__le16 *) body;
-
- if (send_deauth) {
- *pos = cpu_to_le16(WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH);
- pos++;
- } else {
- /* FIX: CF-Pollable and CF-PollReq should be set to match the
- * values in beacons/probe responses */
- /* FIX: how about privacy and WEP? */
- /* capability */
- *pos = cpu_to_le16(WLAN_CAPABILITY_ESS);
- pos++;
-
- /* status_code */
- *pos = cpu_to_le16(resp);
- pos++;
-
- *pos = cpu_to_le16((sta && sta->aid > 0 ? sta->aid : 0) |
- BIT(14) | BIT(15)); /* AID */
- pos++;
-
- /* Supported rates (Information element) */
- p = (char *) pos;
- *p++ = WLAN_EID_SUPP_RATES;
- lpos = p;
- *p++ = 0; /* len */
- if (local->tx_rate_control & WLAN_RATE_1M) {
- *p++ = local->basic_rates & WLAN_RATE_1M ? 0x82 : 0x02;
- (*lpos)++;
- }
- if (local->tx_rate_control & WLAN_RATE_2M) {
- *p++ = local->basic_rates & WLAN_RATE_2M ? 0x84 : 0x04;
- (*lpos)++;
- }
- if (local->tx_rate_control & WLAN_RATE_5M5) {
- *p++ = local->basic_rates & WLAN_RATE_5M5 ?
- 0x8b : 0x0b;
- (*lpos)++;
- }
- if (local->tx_rate_control & WLAN_RATE_11M) {
- *p++ = local->basic_rates & WLAN_RATE_11M ?
- 0x96 : 0x16;
- (*lpos)++;
- }
- pos = (__le16 *) p;
- }
-
- prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
- (send_deauth ? IEEE80211_STYPE_DEAUTH :
- (reassoc ? IEEE80211_STYPE_REASSOC_RESP :
- IEEE80211_STYPE_ASSOC_RESP)),
- body, (u8 *) pos - (u8 *) body,
- hdr->addr2,
- send_deauth ? 0 : local->ap->tx_callback_assoc);
-
- if (sta) {
- if (resp == WLAN_STATUS_SUCCESS) {
- sta->last_rx = jiffies;
- /* STA will be marked associated from TX callback, if
- * AssocResp is ACKed */
- }
- atomic_dec(&sta->users);
- }
-
-#if 0
- PDEBUG(DEBUG_AP, "%s: %pM %sassoc (len=%d "
- "prev_ap=%pM) => %d(%d) (%s)\n",
- dev->name,
- hdr->addr2,
- reassoc ? "re" : "", len,
- prev_ap,
- resp, send_deauth, txt);
-#endif
-}
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void handle_deauth(local_info_t *local, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct net_device *dev = local->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN);
- int len;
- u16 reason_code;
- __le16 *pos;
- struct sta_info *sta = NULL;
-
- len = skb->len - IEEE80211_MGMT_HDR_LEN;
-
- if (len < 2) {
- printk("handle_deauth - too short payload (len=%d)\n", len);
- return;
- }
-
- pos = (__le16 *) body;
- reason_code = le16_to_cpu(*pos);
-
- PDEBUG(DEBUG_AP, "%s: deauthentication: %pM len=%d, "
- "reason_code=%d\n", dev->name, hdr->addr2,
- len, reason_code);
-
- spin_lock_bh(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta != NULL) {
- if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
- hostap_event_expired_sta(local->dev, sta);
- sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC);
- }
- spin_unlock_bh(&local->ap->sta_table_lock);
- if (sta == NULL) {
- printk("%s: deauthentication from %pM, "
- "reason_code=%d, but STA not authenticated\n", dev->name,
- hdr->addr2, reason_code);
- }
-}
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct net_device *dev = local->dev;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
- int len;
- u16 reason_code;
- __le16 *pos;
- struct sta_info *sta = NULL;
-
- len = skb->len - IEEE80211_MGMT_HDR_LEN;
-
- if (len < 2) {
- printk("handle_disassoc - too short payload (len=%d)\n", len);
- return;
- }
-
- pos = (__le16 *) body;
- reason_code = le16_to_cpu(*pos);
-
- PDEBUG(DEBUG_AP, "%s: disassociation: %pM len=%d, "
- "reason_code=%d\n", dev->name, hdr->addr2,
- len, reason_code);
-
- spin_lock_bh(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta != NULL) {
- if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
- hostap_event_expired_sta(local->dev, sta);
- sta->flags &= ~WLAN_STA_ASSOC;
- }
- spin_unlock_bh(&local->ap->sta_table_lock);
- if (sta == NULL) {
- printk("%s: disassociation from %pM, "
- "reason_code=%d, but STA not authenticated\n",
- dev->name, hdr->addr2, reason_code);
- }
-}
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void ap_handle_data_nullfunc(local_info_t *local,
- struct ieee80211_hdr *hdr)
-{
- struct net_device *dev = local->dev;
-
- /* some STA f/w's seem to require control::ACK frame for
- * data::nullfunc, but at least Prism2 station f/w version 0.8.0 does
- * not send this..
- * send control::ACK for the data::nullfunc */
-
- printk(KERN_DEBUG "Sending control::ACK for data::nullfunc\n");
- prism2_send_mgmt(dev, IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK,
- NULL, 0, hdr->addr2, 0);
-}
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void ap_handle_dropped_data(local_info_t *local,
- struct ieee80211_hdr *hdr)
-{
- struct net_device *dev = local->dev;
- struct sta_info *sta;
- __le16 reason;
-
- spin_lock_bh(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock_bh(&local->ap->sta_table_lock);
-
- if (sta != NULL && (sta->flags & WLAN_STA_ASSOC)) {
- PDEBUG(DEBUG_AP, "ap_handle_dropped_data: STA is now okay?\n");
- atomic_dec(&sta->users);
- return;
- }
-
- reason = cpu_to_le16(WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
- prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
- ((sta == NULL || !(sta->flags & WLAN_STA_ASSOC)) ?
- IEEE80211_STYPE_DEAUTH : IEEE80211_STYPE_DISASSOC),
- (char *) &reason, sizeof(reason), hdr->addr2, 0);
-
- if (sta)
- atomic_dec(&sta->users);
-}
-
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta,
- struct sk_buff *skb)
-{
- struct hostap_skb_tx_data *meta;
-
- if (!(sta->flags & WLAN_STA_PS)) {
- /* Station has moved to non-PS mode, so send all buffered
- * frames using normal device queue. */
- dev_queue_xmit(skb);
- return;
- }
-
- /* add a flag for hostap_handle_sta_tx() to know that this skb should
- * be passed through even though STA is using PS */
- meta = (struct hostap_skb_tx_data *) skb->cb;
- meta->flags |= HOSTAP_TX_FLAGS_BUFFERED_FRAME;
- if (!skb_queue_empty(&sta->tx_buf)) {
- /* indicate to STA that more frames follow */
- meta->flags |= HOSTAP_TX_FLAGS_ADD_MOREDATA;
- }
- dev_queue_xmit(skb);
-}
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void handle_pspoll(local_info_t *local,
- struct ieee80211_hdr *hdr,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct net_device *dev = local->dev;
- struct sta_info *sta;
- u16 aid;
- struct sk_buff *skb;
-
- PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%pM, TA=%pM PWRMGT=%d\n",
- hdr->addr1, hdr->addr2, !!ieee80211_has_pm(hdr->frame_control));
-
- if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
- PDEBUG(DEBUG_AP,
- "handle_pspoll - addr1(BSSID)=%pM not own MAC\n",
- hdr->addr1);
- return;
- }
-
- aid = le16_to_cpu(hdr->duration_id);
- if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) {
- PDEBUG(DEBUG_PS, " PSPOLL and AID[15:14] not set\n");
- return;
- }
- aid &= ~(BIT(15) | BIT(14));
- if (aid == 0 || aid > MAX_AID_TABLE_SIZE) {
- PDEBUG(DEBUG_PS, " invalid aid=%d\n", aid);
- return;
- }
- PDEBUG(DEBUG_PS2, " aid=%d\n", aid);
-
- spin_lock_bh(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock_bh(&local->ap->sta_table_lock);
-
- if (sta == NULL) {
- PDEBUG(DEBUG_PS, " STA not found\n");
- return;
- }
- if (sta->aid != aid) {
- PDEBUG(DEBUG_PS, " received aid=%i does not match with "
- "assoc.aid=%d\n", aid, sta->aid);
- return;
- }
-
- /* FIX: todo:
- * - add timeout for buffering (clear aid in TIM vector if buffer timed
- * out (expiry time must be longer than ListenInterval for
- * the corresponding STA; "8802-11: 11.2.1.9 AP aging function"
- * - what to do, if buffered, pspolled, and sent frame is not ACKed by
- * sta; store buffer for later use and leave TIM aid bit set? use
- * TX event to check whether frame was ACKed?
- */
-
- while ((skb = skb_dequeue(&sta->tx_buf)) != NULL) {
- /* send buffered frame .. */
- PDEBUG(DEBUG_PS2, "Sending buffered frame to STA after PS POLL"
- " (buffer_count=%d)\n", skb_queue_len(&sta->tx_buf));
-
- pspoll_send_buffered(local, sta, skb);
-
- if (sta->flags & WLAN_STA_PS) {
- /* send only one buffered packet per PS Poll */
- /* FIX: should ignore further PS Polls until the
- * buffered packet that was just sent is acknowledged
- * (Tx or TxExc event) */
- break;
- }
- }
-
- if (skb_queue_empty(&sta->tx_buf)) {
- /* try to clear aid from TIM */
- if (!(sta->flags & WLAN_STA_TIM))
- PDEBUG(DEBUG_PS2, "Re-unsetting TIM for aid %d\n",
- aid);
- hostap_set_tim(local, aid, 0);
- sta->flags &= ~WLAN_STA_TIM;
- }
-
- atomic_dec(&sta->users);
-}
-
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-
-static void handle_wds_oper_queue(struct work_struct *work)
-{
- struct ap_data *ap = container_of(work, struct ap_data,
- wds_oper_queue);
- local_info_t *local = ap->local;
- struct wds_oper_data *entry, *prev;
-
- spin_lock_bh(&local->lock);
- entry = local->ap->wds_oper_entries;
- local->ap->wds_oper_entries = NULL;
- spin_unlock_bh(&local->lock);
-
- while (entry) {
- PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection "
- "to AP %pM\n",
- local->dev->name,
- entry->type == WDS_ADD ? "adding" : "removing",
- entry->addr);
- if (entry->type == WDS_ADD)
- prism2_wds_add(local, entry->addr, 0);
- else if (entry->type == WDS_DEL)
- prism2_wds_del(local, entry->addr, 0, 1);
-
- prev = entry;
- entry = entry->next;
- kfree(prev);
- }
-}
-
-
-/* Called only as a scheduled task for pending AP frames. */
-static void handle_beacon(local_info_t *local, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
- int len, left;
- u16 beacon_int, capability;
- __le16 *pos;
- char *ssid = NULL;
- unsigned char *supp_rates = NULL;
- int ssid_len = 0, supp_rates_len = 0;
- struct sta_info *sta = NULL;
- int new_sta = 0, channel = -1;
-
- len = skb->len - IEEE80211_MGMT_HDR_LEN;
-
- if (len < 8 + 2 + 2) {
- printk(KERN_DEBUG "handle_beacon - too short payload "
- "(len=%d)\n", len);
- return;
- }
-
- pos = (__le16 *) body;
- left = len;
-
- /* Timestamp (8 octets) */
- pos += 4; left -= 8;
- /* Beacon interval (2 octets) */
- beacon_int = le16_to_cpu(*pos);
- pos++; left -= 2;
- /* Capability information (2 octets) */
- capability = le16_to_cpu(*pos);
- pos++; left -= 2;
-
- if (local->ap->ap_policy != AP_OTHER_AP_EVEN_IBSS &&
- capability & WLAN_CAPABILITY_IBSS)
- return;
-
- if (left >= 2) {
- unsigned int ileft;
- unsigned char *u = (unsigned char *) pos;
-
- if (*u == WLAN_EID_SSID) {
- u++; left--;
- ileft = *u;
- u++; left--;
-
- if (ileft > left || ileft > MAX_SSID_LEN) {
- PDEBUG(DEBUG_AP, "SSID: overflow\n");
- return;
- }
-
- if (local->ap->ap_policy == AP_OTHER_AP_SAME_SSID &&
- (ileft != strlen(local->essid) ||
- memcmp(local->essid, u, ileft) != 0)) {
- /* not our SSID */
- return;
- }
-
- ssid = u;
- ssid_len = ileft;
-
- u += ileft;
- left -= ileft;
- }
-
- if (*u == WLAN_EID_SUPP_RATES) {
- u++; left--;
- ileft = *u;
- u++; left--;
-
- if (ileft > left || ileft == 0 || ileft > 8) {
- PDEBUG(DEBUG_AP, " - SUPP_RATES len error\n");
- return;
- }
-
- supp_rates = u;
- supp_rates_len = ileft;
-
- u += ileft;
- left -= ileft;
- }
-
- if (*u == WLAN_EID_DS_PARAMS) {
- u++; left--;
- ileft = *u;
- u++; left--;
-
- if (ileft > left || ileft != 1) {
- PDEBUG(DEBUG_AP, " - DS_PARAMS len error\n");
- return;
- }
-
- channel = *u;
-
- u += ileft;
- left -= ileft;
- }
- }
-
- spin_lock_bh(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta != NULL)
- atomic_inc(&sta->users);
- spin_unlock_bh(&local->ap->sta_table_lock);
-
- if (sta == NULL) {
- /* add new AP */
- new_sta = 1;
- sta = ap_add_sta(local->ap, hdr->addr2);
- if (sta == NULL) {
- printk(KERN_INFO "prism2: kmalloc failed for AP "
- "data structure\n");
- return;
- }
- hostap_event_new_sta(local->dev, sta);
-
- /* mark APs authentication and associated for pseudo ad-hoc
- * style communication */
- sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
-
- if (local->ap->autom_ap_wds) {
- hostap_wds_link_oper(local, sta->addr, WDS_ADD);
- }
- }
-
- sta->ap = 1;
- if (ssid) {
- sta->u.ap.ssid_len = ssid_len;
- memcpy(sta->u.ap.ssid, ssid, ssid_len);
- sta->u.ap.ssid[ssid_len] = '\0';
- } else {
- sta->u.ap.ssid_len = 0;
- sta->u.ap.ssid[0] = '\0';
- }
- sta->u.ap.channel = channel;
- sta->rx_packets++;
- sta->rx_bytes += len;
- sta->u.ap.last_beacon = sta->last_rx = jiffies;
- sta->capability = capability;
- sta->listen_interval = beacon_int;
-
- atomic_dec(&sta->users);
-
- if (new_sta) {
- memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
- memcpy(sta->supported_rates, supp_rates, supp_rates_len);
- prism2_check_tx_rates(sta);
- }
-}
-
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-/* Called only as a tasklet. */
-static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- struct net_device *dev = local->dev;
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
- u16 fc, type, stype;
- struct ieee80211_hdr *hdr;
-
- /* FIX: should give skb->len to handler functions and check that the
- * buffer is long enough */
- hdr = (struct ieee80211_hdr *) skb->data;
- fc = le16_to_cpu(hdr->frame_control);
- type = fc & IEEE80211_FCTL_FTYPE;
- stype = fc & IEEE80211_FCTL_STYPE;
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- if (!local->hostapd && type == IEEE80211_FTYPE_DATA) {
- PDEBUG(DEBUG_AP, "handle_ap_item - data frame\n");
-
- if (!(fc & IEEE80211_FCTL_TODS) ||
- (fc & IEEE80211_FCTL_FROMDS)) {
- if (stype == IEEE80211_STYPE_NULLFUNC) {
- /* no ToDS nullfunc seems to be used to check
- * AP association; so send reject message to
- * speed up re-association */
- ap_handle_dropped_data(local, hdr);
- goto done;
- }
- PDEBUG(DEBUG_AP, " not ToDS frame (fc=0x%04x)\n",
- fc);
- goto done;
- }
-
- if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
- PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=%pM"
- " not own MAC\n", hdr->addr1);
- goto done;
- }
-
- if (local->ap->nullfunc_ack &&
- stype == IEEE80211_STYPE_NULLFUNC)
- ap_handle_data_nullfunc(local, hdr);
- else
- ap_handle_dropped_data(local, hdr);
- goto done;
- }
-
- if (type == IEEE80211_FTYPE_MGMT && stype == IEEE80211_STYPE_BEACON) {
- handle_beacon(local, skb, rx_stats);
- goto done;
- }
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- if (type == IEEE80211_FTYPE_CTL && stype == IEEE80211_STYPE_PSPOLL) {
- handle_pspoll(local, hdr, rx_stats);
- goto done;
- }
-
- if (local->hostapd) {
- PDEBUG(DEBUG_AP, "Unknown frame in AP queue: type=0x%02x "
- "subtype=0x%02x\n", type, stype);
- goto done;
- }
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- if (type != IEEE80211_FTYPE_MGMT) {
- PDEBUG(DEBUG_AP, "handle_ap_item - not a management frame?\n");
- goto done;
- }
-
- if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) {
- PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%pM"
- " not own MAC\n", hdr->addr1);
- goto done;
- }
-
- if (!ether_addr_equal(hdr->addr3, dev->dev_addr)) {
- PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%pM"
- " not own MAC\n", hdr->addr3);
- goto done;
- }
-
- switch (stype) {
- case IEEE80211_STYPE_ASSOC_REQ:
- handle_assoc(local, skb, rx_stats, 0);
- break;
- case IEEE80211_STYPE_ASSOC_RESP:
- PDEBUG(DEBUG_AP, "==> ASSOC RESP (ignored)\n");
- break;
- case IEEE80211_STYPE_REASSOC_REQ:
- handle_assoc(local, skb, rx_stats, 1);
- break;
- case IEEE80211_STYPE_REASSOC_RESP:
- PDEBUG(DEBUG_AP, "==> REASSOC RESP (ignored)\n");
- break;
- case IEEE80211_STYPE_ATIM:
- PDEBUG(DEBUG_AP, "==> ATIM (ignored)\n");
- break;
- case IEEE80211_STYPE_DISASSOC:
- handle_disassoc(local, skb, rx_stats);
- break;
- case IEEE80211_STYPE_AUTH:
- handle_authen(local, skb, rx_stats);
- break;
- case IEEE80211_STYPE_DEAUTH:
- handle_deauth(local, skb, rx_stats);
- break;
- default:
- PDEBUG(DEBUG_AP, "Unknown mgmt frame subtype 0x%02x\n",
- stype >> 4);
- break;
- }
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- done:
- dev_kfree_skb(skb);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-void hostap_rx(struct net_device *dev, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct ieee80211_hdr *hdr;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (skb->len < 16)
- goto drop;
-
- dev->stats.rx_packets++;
-
- hdr = (struct ieee80211_hdr *) skb->data;
-
- if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL &&
- ieee80211_is_beacon(hdr->frame_control))
- goto drop;
-
- skb->protocol = cpu_to_be16(ETH_P_HOSTAP);
- handle_ap_item(local, skb, rx_stats);
- return;
-
- drop:
- dev_kfree_skb(skb);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
-{
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- struct hostap_80211_rx_status rx_stats;
-
- if (skb_queue_empty(&sta->tx_buf))
- return;
-
- skb = dev_alloc_skb(16);
- if (skb == NULL) {
- printk(KERN_DEBUG "%s: schedule_packet_send: skb alloc "
- "failed\n", local->dev->name);
- return;
- }
-
- hdr = skb_put(skb, 16);
-
- /* Generate a fake pspoll frame to start packet delivery */
- hdr->frame_control = cpu_to_le16(
- IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
- memcpy(hdr->addr1, local->dev->dev_addr, ETH_ALEN);
- memcpy(hdr->addr2, sta->addr, ETH_ALEN);
- hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14));
-
- PDEBUG(DEBUG_PS2,
- "%s: Scheduling buffered packet delivery for STA %pM\n",
- local->dev->name, sta->addr);
-
- skb->dev = local->dev;
-
- memset(&rx_stats, 0, sizeof(rx_stats));
- hostap_rx(local->dev, skb, &rx_stats);
-}
-
-
-int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
- struct iw_quality qual[], int buf_size,
- int aplist)
-{
- struct ap_data *ap = local->ap;
- struct list_head *ptr;
- int count = 0;
-
- spin_lock_bh(&ap->sta_table_lock);
-
- for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list;
- ptr = ptr->next) {
- struct sta_info *sta = (struct sta_info *) ptr;
-
- if (aplist && !sta->ap)
- continue;
- addr[count].sa_family = ARPHRD_ETHER;
- memcpy(addr[count].sa_data, sta->addr, ETH_ALEN);
- if (sta->last_rx_silence == 0)
- qual[count].qual = sta->last_rx_signal < 27 ?
- 0 : (sta->last_rx_signal - 27) * 92 / 127;
- else
- qual[count].qual = sta->last_rx_signal -
- sta->last_rx_silence - 35;
- qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
- qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
- qual[count].updated = sta->last_rx_updated;
-
- sta->last_rx_updated = IW_QUAL_DBM;
-
- count++;
- if (count >= buf_size)
- break;
- }
- spin_unlock_bh(&ap->sta_table_lock);
-
- return count;
-}
-
-
-/* Translate our list of Access Points & Stations to a card independent
- * format that the Wireless Tools will understand - Jean II */
-int prism2_ap_translate_scan(struct net_device *dev,
- struct iw_request_info *info, char *buffer)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct ap_data *ap;
- struct list_head *ptr;
- struct iw_event iwe;
- char *current_ev = buffer;
- char *end_buf = buffer + IW_SCAN_MAX_DATA;
-#if !defined(PRISM2_NO_KERNEL_IEEE80211_MGMT)
- char buf[64];
-#endif
-
- iface = netdev_priv(dev);
- local = iface->local;
- ap = local->ap;
-
- spin_lock_bh(&ap->sta_table_lock);
-
- for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list;
- ptr = ptr->next) {
- struct sta_info *sta = (struct sta_info *) ptr;
-
- /* First entry *MUST* be the AP MAC address */
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, sta->addr, ETH_ALEN);
- iwe.len = IW_EV_ADDR_LEN;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_ADDR_LEN);
-
- /* Use the mode to indicate if it's a station or
- * an Access Point */
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWMODE;
- if (sta->ap)
- iwe.u.mode = IW_MODE_MASTER;
- else
- iwe.u.mode = IW_MODE_INFRA;
- iwe.len = IW_EV_UINT_LEN;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_UINT_LEN);
-
- /* Some quality */
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVQUAL;
- if (sta->last_rx_silence == 0)
- iwe.u.qual.qual = sta->last_rx_signal < 27 ?
- 0 : (sta->last_rx_signal - 27) * 92 / 127;
- else
- iwe.u.qual.qual = sta->last_rx_signal -
- sta->last_rx_silence - 35;
- iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
- iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
- iwe.u.qual.updated = sta->last_rx_updated;
- iwe.len = IW_EV_QUAL_LEN;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_QUAL_LEN);
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- if (sta->ap) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.length = sta->u.ap.ssid_len;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf, &iwe,
- sta->u.ap.ssid);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWENCODE;
- if (sta->capability & WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags =
- IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf, &iwe,
- sta->u.ap.ssid);
-
- if (sta->u.ap.channel > 0 &&
- sta->u.ap.channel <= FREQ_COUNT) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = freq_list[sta->u.ap.channel - 1]
- * 100000;
- iwe.u.freq.e = 1;
- current_ev = iwe_stream_add_event(
- info, current_ev, end_buf, &iwe,
- IW_EV_FREQ_LEN);
- }
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "beacon_interval=%d",
- sta->listen_interval);
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf, &iwe, buf);
- }
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- sta->last_rx_updated = IW_QUAL_DBM;
-
- /* To be continued, we should make good use of IWEVCUSTOM */
- }
-
- spin_unlock_bh(&ap->sta_table_lock);
-
- return current_ev - buffer;
-}
-
-
-static int prism2_hostapd_add_sta(struct ap_data *ap,
- struct prism2_hostapd_param *param)
-{
- struct sta_info *sta;
-
- spin_lock_bh(&ap->sta_table_lock);
- sta = ap_get_sta(ap, param->sta_addr);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (sta == NULL) {
- sta = ap_add_sta(ap, param->sta_addr);
- if (sta == NULL)
- return -1;
- }
-
- if (!(sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
- hostap_event_new_sta(sta->local->dev, sta);
-
- sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC;
- sta->last_rx = jiffies;
- sta->aid = param->u.add_sta.aid;
- sta->capability = param->u.add_sta.capability;
- sta->tx_supp_rates = param->u.add_sta.tx_supp_rates;
- if (sta->tx_supp_rates & WLAN_RATE_1M)
- sta->supported_rates[0] = 2;
- if (sta->tx_supp_rates & WLAN_RATE_2M)
- sta->supported_rates[1] = 4;
- if (sta->tx_supp_rates & WLAN_RATE_5M5)
- sta->supported_rates[2] = 11;
- if (sta->tx_supp_rates & WLAN_RATE_11M)
- sta->supported_rates[3] = 22;
- prism2_check_tx_rates(sta);
- atomic_dec(&sta->users);
- return 0;
-}
-
-
-static int prism2_hostapd_remove_sta(struct ap_data *ap,
- struct prism2_hostapd_param *param)
-{
- struct sta_info *sta;
-
- spin_lock_bh(&ap->sta_table_lock);
- sta = ap_get_sta(ap, param->sta_addr);
- if (sta) {
- ap_sta_hash_del(ap, sta);
- list_del(&sta->list);
- }
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (!sta)
- return -ENOENT;
-
- if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
- hostap_event_expired_sta(sta->local->dev, sta);
- ap_free_sta(ap, sta);
-
- return 0;
-}
-
-
-static int prism2_hostapd_get_info_sta(struct ap_data *ap,
- struct prism2_hostapd_param *param)
-{
- struct sta_info *sta;
-
- spin_lock_bh(&ap->sta_table_lock);
- sta = ap_get_sta(ap, param->sta_addr);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (!sta)
- return -ENOENT;
-
- param->u.get_info_sta.inactive_sec = (jiffies - sta->last_rx) / HZ;
-
- atomic_dec(&sta->users);
-
- return 1;
-}
-
-
-static int prism2_hostapd_set_flags_sta(struct ap_data *ap,
- struct prism2_hostapd_param *param)
-{
- struct sta_info *sta;
-
- spin_lock_bh(&ap->sta_table_lock);
- sta = ap_get_sta(ap, param->sta_addr);
- if (sta) {
- sta->flags |= param->u.set_flags_sta.flags_or;
- sta->flags &= param->u.set_flags_sta.flags_and;
- }
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (!sta)
- return -ENOENT;
-
- return 0;
-}
-
-
-static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
- struct prism2_hostapd_param *param)
-{
- struct sta_info *sta;
- int rate;
-
- spin_lock_bh(&ap->sta_table_lock);
- sta = ap_get_sta(ap, param->sta_addr);
- if (sta) {
- sta->rx_packets = sta->tx_packets = 0;
- sta->rx_bytes = sta->tx_bytes = 0;
- for (rate = 0; rate < WLAN_RATE_COUNT; rate++) {
- sta->tx_count[rate] = 0;
- sta->rx_count[rate] = 0;
- }
- }
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (!sta)
- return -ENOENT;
-
- return 0;
-}
-
-
-int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
-{
- switch (param->cmd) {
- case PRISM2_HOSTAPD_FLUSH:
- ap_control_kickall(ap);
- return 0;
- case PRISM2_HOSTAPD_ADD_STA:
- return prism2_hostapd_add_sta(ap, param);
- case PRISM2_HOSTAPD_REMOVE_STA:
- return prism2_hostapd_remove_sta(ap, param);
- case PRISM2_HOSTAPD_GET_INFO_STA:
- return prism2_hostapd_get_info_sta(ap, param);
- case PRISM2_HOSTAPD_SET_FLAGS_STA:
- return prism2_hostapd_set_flags_sta(ap, param);
- case PRISM2_HOSTAPD_STA_CLEAR_STATS:
- return prism2_hostapd_sta_clear_stats(ap, param);
- default:
- printk(KERN_WARNING "prism2_hostapd: unknown cmd=%d\n",
- param->cmd);
- return -EOPNOTSUPP;
- }
-}
-
-
-/* Update station info for host-based TX rate control and return current
- * TX rate */
-static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev)
-{
- int ret = sta->tx_rate;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- sta->tx_count[sta->tx_rate_idx]++;
- sta->tx_since_last_failure++;
- sta->tx_consecutive_exc = 0;
- if (sta->tx_since_last_failure >= WLAN_RATE_UPDATE_COUNT &&
- sta->tx_rate_idx < sta->tx_max_rate) {
- /* use next higher rate */
- int old_rate, new_rate;
- old_rate = new_rate = sta->tx_rate_idx;
- while (new_rate < sta->tx_max_rate) {
- new_rate++;
- if (ap_tx_rate_ok(new_rate, sta, local)) {
- sta->tx_rate_idx = new_rate;
- break;
- }
- }
- if (old_rate != sta->tx_rate_idx) {
- switch (sta->tx_rate_idx) {
- case 0: sta->tx_rate = 10; break;
- case 1: sta->tx_rate = 20; break;
- case 2: sta->tx_rate = 55; break;
- case 3: sta->tx_rate = 110; break;
- default: sta->tx_rate = 0; break;
- }
- PDEBUG(DEBUG_AP, "%s: STA %pM TX rate raised to %d\n",
- dev->name, sta->addr, sta->tx_rate);
- }
- sta->tx_since_last_failure = 0;
- }
-
- return ret;
-}
-
-
-/* Called only from software IRQ. Called for each TX frame prior possible
- * encryption and transmit. */
-ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
-{
- struct sta_info *sta = NULL;
- struct sk_buff *skb = tx->skb;
- int set_tim, ret;
- struct ieee80211_hdr *hdr;
- struct hostap_skb_tx_data *meta;
-
- meta = (struct hostap_skb_tx_data *) skb->cb;
- ret = AP_TX_CONTINUE;
- if (local->ap == NULL || skb->len < 10 ||
- meta->iface->type == HOSTAP_INTERFACE_STA)
- goto out;
-
- hdr = (struct ieee80211_hdr *) skb->data;
-
- if (hdr->addr1[0] & 0x01) {
- /* broadcast/multicast frame - no AP related processing */
- if (local->ap->num_sta <= 0)
- ret = AP_TX_DROP;
- goto out;
- }
-
- /* unicast packet - check whether destination STA is associated */
- spin_lock(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr1);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock(&local->ap->sta_table_lock);
-
- if (local->iw_mode == IW_MODE_MASTER && sta == NULL &&
- !(meta->flags & HOSTAP_TX_FLAGS_WDS) &&
- meta->iface->type != HOSTAP_INTERFACE_MASTER &&
- meta->iface->type != HOSTAP_INTERFACE_AP) {
-#if 0
- /* This can happen, e.g., when wlan0 is added to a bridge and
- * bridging code does not know which port is the correct target
- * for a unicast frame. In this case, the packet is send to all
- * ports of the bridge. Since this is a valid scenario, do not
- * print out any errors here. */
- if (net_ratelimit()) {
- printk(KERN_DEBUG "AP: drop packet to non-associated "
- "STA %pM\n", hdr->addr1);
- }
-#endif
- local->ap->tx_drop_nonassoc++;
- ret = AP_TX_DROP;
- goto out;
- }
-
- if (sta == NULL)
- goto out;
-
- if (!(sta->flags & WLAN_STA_AUTHORIZED))
- ret = AP_TX_CONTINUE_NOT_AUTHORIZED;
-
- /* Set tx_rate if using host-based TX rate control */
- if (!local->fw_tx_rate_control)
- local->ap->last_tx_rate = meta->rate =
- ap_update_sta_tx_rate(sta, local->dev);
-
- if (local->iw_mode != IW_MODE_MASTER)
- goto out;
-
- if (!(sta->flags & WLAN_STA_PS))
- goto out;
-
- if (meta->flags & HOSTAP_TX_FLAGS_ADD_MOREDATA) {
- /* indicate to STA that more frames follow */
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- }
-
- if (meta->flags & HOSTAP_TX_FLAGS_BUFFERED_FRAME) {
- /* packet was already buffered and now send due to
- * PS poll, so do not rebuffer it */
- goto out;
- }
-
- if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) {
- PDEBUG(DEBUG_PS, "%s: No more space in STA (%pM)'s"
- "PS mode buffer\n",
- local->dev->name, sta->addr);
- /* Make sure that TIM is set for the station (it might not be
- * after AP wlan hw reset). */
- /* FIX: should fix hw reset to restore bits based on STA
- * buffer state.. */
- hostap_set_tim(local, sta->aid, 1);
- sta->flags |= WLAN_STA_TIM;
- ret = AP_TX_DROP;
- goto out;
- }
-
- /* STA in PS mode, buffer frame for later delivery */
- set_tim = skb_queue_empty(&sta->tx_buf);
- skb_queue_tail(&sta->tx_buf, skb);
- /* FIX: could save RX time to skb and expire buffered frames after
- * some time if STA does not poll for them */
-
- if (set_tim) {
- if (sta->flags & WLAN_STA_TIM)
- PDEBUG(DEBUG_PS2, "Re-setting TIM for aid %d\n",
- sta->aid);
- hostap_set_tim(local, sta->aid, 1);
- sta->flags |= WLAN_STA_TIM;
- }
-
- ret = AP_TX_BUFFERED;
-
- out:
- if (sta != NULL) {
- if (ret == AP_TX_CONTINUE ||
- ret == AP_TX_CONTINUE_NOT_AUTHORIZED) {
- sta->tx_packets++;
- sta->tx_bytes += skb->len;
- sta->last_tx = jiffies;
- }
-
- if ((ret == AP_TX_CONTINUE ||
- ret == AP_TX_CONTINUE_NOT_AUTHORIZED) &&
- sta->crypt && tx->host_encrypt) {
- tx->crypt = sta->crypt;
- tx->sta_ptr = sta; /* hostap_handle_sta_release() will
- * be called to release sta info
- * later */
- } else
- atomic_dec(&sta->users);
- }
-
- return ret;
-}
-
-
-void hostap_handle_sta_release(void *ptr)
-{
- struct sta_info *sta = ptr;
- atomic_dec(&sta->users);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
-{
- struct sta_info *sta;
- struct ieee80211_hdr *hdr;
- struct hostap_skb_tx_data *meta;
-
- hdr = (struct ieee80211_hdr *) skb->data;
- meta = (struct hostap_skb_tx_data *) skb->cb;
-
- spin_lock(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr1);
- if (!sta) {
- spin_unlock(&local->ap->sta_table_lock);
- PDEBUG(DEBUG_AP, "%s: Could not find STA %pM"
- " for this TX error (@%lu)\n",
- local->dev->name, hdr->addr1, jiffies);
- return;
- }
-
- sta->tx_since_last_failure = 0;
- sta->tx_consecutive_exc++;
-
- if (sta->tx_consecutive_exc >= WLAN_RATE_DECREASE_THRESHOLD &&
- sta->tx_rate_idx > 0 && meta->rate <= sta->tx_rate) {
- /* use next lower rate */
- int old, rate;
- old = rate = sta->tx_rate_idx;
- while (rate > 0) {
- rate--;
- if (ap_tx_rate_ok(rate, sta, local)) {
- sta->tx_rate_idx = rate;
- break;
- }
- }
- if (old != sta->tx_rate_idx) {
- switch (sta->tx_rate_idx) {
- case 0: sta->tx_rate = 10; break;
- case 1: sta->tx_rate = 20; break;
- case 2: sta->tx_rate = 55; break;
- case 3: sta->tx_rate = 110; break;
- default: sta->tx_rate = 0; break;
- }
- PDEBUG(DEBUG_AP,
- "%s: STA %pM TX rate lowered to %d\n",
- local->dev->name, sta->addr, sta->tx_rate);
- }
- sta->tx_consecutive_exc = 0;
- }
- spin_unlock(&local->ap->sta_table_lock);
-}
-
-
-static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta,
- int pwrmgt, int type, int stype)
-{
- if (pwrmgt && !(sta->flags & WLAN_STA_PS)) {
- sta->flags |= WLAN_STA_PS;
- PDEBUG(DEBUG_PS2, "STA %pM changed to use PS "
- "mode (type=0x%02X, stype=0x%02X)\n",
- sta->addr, type >> 2, stype >> 4);
- } else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) {
- sta->flags &= ~WLAN_STA_PS;
- PDEBUG(DEBUG_PS2, "STA %pM changed to not use "
- "PS mode (type=0x%02X, stype=0x%02X)\n",
- sta->addr, type >> 2, stype >> 4);
- if (type != IEEE80211_FTYPE_CTL ||
- stype != IEEE80211_STYPE_PSPOLL)
- schedule_packet_send(local, sta);
- }
-}
-
-
-/* Called only as a tasklet (software IRQ). Called for each RX frame to update
- * STA power saving state. pwrmgt is a flag from 802.11 frame_control field. */
-int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr)
-{
- struct sta_info *sta;
- u16 fc;
-
- spin_lock(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock(&local->ap->sta_table_lock);
-
- if (!sta)
- return -1;
-
- fc = le16_to_cpu(hdr->frame_control);
- hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM,
- fc & IEEE80211_FCTL_FTYPE,
- fc & IEEE80211_FCTL_STYPE);
-
- atomic_dec(&sta->users);
- return 0;
-}
-
-
-/* Called only as a tasklet (software IRQ). Called for each RX frame after
- * getting RX header and payload from hardware. */
-ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
- struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats,
- int wds)
-{
- int ret;
- struct sta_info *sta;
- u16 fc, type, stype;
- struct ieee80211_hdr *hdr;
-
- if (local->ap == NULL)
- return AP_RX_CONTINUE;
-
- hdr = (struct ieee80211_hdr *) skb->data;
-
- fc = le16_to_cpu(hdr->frame_control);
- type = fc & IEEE80211_FCTL_FTYPE;
- stype = fc & IEEE80211_FCTL_STYPE;
-
- spin_lock(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock(&local->ap->sta_table_lock);
-
- if (sta && !(sta->flags & WLAN_STA_AUTHORIZED))
- ret = AP_RX_CONTINUE_NOT_AUTHORIZED;
- else
- ret = AP_RX_CONTINUE;
-
-
- if (fc & IEEE80211_FCTL_TODS) {
- if (!wds && (sta == NULL || !(sta->flags & WLAN_STA_ASSOC))) {
- if (local->hostapd) {
- prism2_rx_80211(local->apdev, skb, rx_stats,
- PRISM2_RX_NON_ASSOC);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- } else {
- printk(KERN_DEBUG "%s: dropped received packet"
- " from non-associated STA %pM"
- " (type=0x%02x, subtype=0x%02x)\n",
- dev->name, hdr->addr2,
- type >> 2, stype >> 4);
- hostap_rx(dev, skb, rx_stats);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
- }
- ret = AP_RX_EXIT;
- goto out;
- }
- } else if (fc & IEEE80211_FCTL_FROMDS) {
- if (!wds) {
- /* FromDS frame - not for us; probably
- * broadcast/multicast in another BSS - drop */
- if (ether_addr_equal(hdr->addr1, dev->dev_addr)) {
- printk(KERN_DEBUG "Odd.. FromDS packet "
- "received with own BSSID\n");
- hostap_dump_rx_80211(dev->name, skb, rx_stats);
- }
- ret = AP_RX_DROP;
- goto out;
- }
- } else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL &&
- ether_addr_equal(hdr->addr1, dev->dev_addr)) {
-
- if (local->hostapd) {
- prism2_rx_80211(local->apdev, skb, rx_stats,
- PRISM2_RX_NON_ASSOC);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- } else {
- /* At least Lucent f/w seems to send data::nullfunc
- * frames with no ToDS flag when the current AP returns
- * after being unavailable for some time. Speed up
- * re-association by informing the station about it not
- * being associated. */
- printk(KERN_DEBUG "%s: rejected received nullfunc frame"
- " without ToDS from not associated STA %pM\n",
- dev->name, hdr->addr2);
- hostap_rx(dev, skb, rx_stats);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
- }
- ret = AP_RX_EXIT;
- goto out;
- } else if (stype == IEEE80211_STYPE_NULLFUNC) {
- /* At least Lucent cards seem to send periodic nullfunc
- * frames with ToDS. Let these through to update SQ
- * stats and PS state. Nullfunc frames do not contain
- * any data and they will be dropped below. */
- } else {
- /* If BSSID (Addr3) is foreign, this frame is a normal
- * broadcast frame from an IBSS network. Drop it silently.
- * If BSSID is own, report the dropping of this frame. */
- if (ether_addr_equal(hdr->addr3, dev->dev_addr)) {
- printk(KERN_DEBUG "%s: dropped received packet from %pM"
- " with no ToDS flag "
- "(type=0x%02x, subtype=0x%02x)\n", dev->name,
- hdr->addr2, type >> 2, stype >> 4);
- hostap_dump_rx_80211(dev->name, skb, rx_stats);
- }
- ret = AP_RX_DROP;
- goto out;
- }
-
- if (sta) {
- hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM,
- type, stype);
-
- sta->rx_packets++;
- sta->rx_bytes += skb->len;
- sta->last_rx = jiffies;
- }
-
- if (local->ap->nullfunc_ack && stype == IEEE80211_STYPE_NULLFUNC &&
- fc & IEEE80211_FCTL_TODS) {
- if (local->hostapd) {
- prism2_rx_80211(local->apdev, skb, rx_stats,
- PRISM2_RX_NULLFUNC_ACK);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- } else {
- /* some STA f/w's seem to require control::ACK frame
- * for data::nullfunc, but Prism2 f/w 0.8.0 (at least
- * from Compaq) does not send this.. Try to generate
- * ACK for these frames from the host driver to make
- * power saving work with, e.g., Lucent WaveLAN f/w */
- hostap_rx(dev, skb, rx_stats);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
- }
- ret = AP_RX_EXIT;
- goto out;
- }
-
- out:
- if (sta)
- atomic_dec(&sta->users);
-
- return ret;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-int hostap_handle_sta_crypto(local_info_t *local,
- struct ieee80211_hdr *hdr,
- struct lib80211_crypt_data **crypt,
- void **sta_ptr)
-{
- struct sta_info *sta;
-
- spin_lock(&local->ap->sta_table_lock);
- sta = ap_get_sta(local->ap, hdr->addr2);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock(&local->ap->sta_table_lock);
-
- if (!sta)
- return -1;
-
- if (sta->crypt) {
- *crypt = sta->crypt;
- *sta_ptr = sta;
- /* hostap_handle_sta_release() will be called to release STA
- * info */
- } else
- atomic_dec(&sta->users);
-
- return 0;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr)
-{
- struct sta_info *sta;
- int ret = 0;
-
- spin_lock(&ap->sta_table_lock);
- sta = ap_get_sta(ap, sta_addr);
- if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap)
- ret = 1;
- spin_unlock(&ap->sta_table_lock);
-
- return ret;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr)
-{
- struct sta_info *sta;
- int ret = 0;
-
- spin_lock(&ap->sta_table_lock);
- sta = ap_get_sta(ap, sta_addr);
- if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap &&
- ((sta->flags & WLAN_STA_AUTHORIZED) ||
- ap->local->ieee_802_1x == 0))
- ret = 1;
- spin_unlock(&ap->sta_table_lock);
-
- return ret;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-int hostap_add_sta(struct ap_data *ap, u8 *sta_addr)
-{
- struct sta_info *sta;
- int ret = 1;
-
- if (!ap)
- return -1;
-
- spin_lock(&ap->sta_table_lock);
- sta = ap_get_sta(ap, sta_addr);
- if (sta)
- ret = 0;
- spin_unlock(&ap->sta_table_lock);
-
- if (ret == 1) {
- sta = ap_add_sta(ap, sta_addr);
- if (!sta)
- return -1;
- sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
- sta->ap = 1;
- memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
- /* No way of knowing which rates are supported since we did not
- * get supported rates element from beacon/assoc req. Assume
- * that remote end supports all 802.11b rates. */
- sta->supported_rates[0] = 0x82;
- sta->supported_rates[1] = 0x84;
- sta->supported_rates[2] = 0x0b;
- sta->supported_rates[3] = 0x16;
- sta->tx_supp_rates = WLAN_RATE_1M | WLAN_RATE_2M |
- WLAN_RATE_5M5 | WLAN_RATE_11M;
- sta->tx_rate = 110;
- sta->tx_max_rate = sta->tx_rate_idx = 3;
- }
-
- return ret;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-int hostap_update_rx_stats(struct ap_data *ap,
- struct ieee80211_hdr *hdr,
- struct hostap_80211_rx_status *rx_stats)
-{
- struct sta_info *sta;
-
- if (!ap)
- return -1;
-
- spin_lock(&ap->sta_table_lock);
- sta = ap_get_sta(ap, hdr->addr2);
- if (sta) {
- sta->last_rx_silence = rx_stats->noise;
- sta->last_rx_signal = rx_stats->signal;
- sta->last_rx_rate = rx_stats->rate;
- sta->last_rx_updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- if (rx_stats->rate == 10)
- sta->rx_count[0]++;
- else if (rx_stats->rate == 20)
- sta->rx_count[1]++;
- else if (rx_stats->rate == 55)
- sta->rx_count[2]++;
- else if (rx_stats->rate == 110)
- sta->rx_count[3]++;
- }
- spin_unlock(&ap->sta_table_lock);
-
- return sta ? 0 : -1;
-}
-
-
-void hostap_update_rates(local_info_t *local)
-{
- struct sta_info *sta;
- struct ap_data *ap = local->ap;
-
- if (!ap)
- return;
-
- spin_lock_bh(&ap->sta_table_lock);
- list_for_each_entry(sta, &ap->sta_list, list) {
- prism2_check_tx_rates(sta);
- }
- spin_unlock_bh(&ap->sta_table_lock);
-}
-
-
-void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
- struct lib80211_crypt_data ***crypt)
-{
- struct sta_info *sta;
-
- spin_lock_bh(&ap->sta_table_lock);
- sta = ap_get_sta(ap, addr);
- if (sta)
- atomic_inc(&sta->users);
- spin_unlock_bh(&ap->sta_table_lock);
-
- if (!sta && permanent)
- sta = ap_add_sta(ap, addr);
-
- if (!sta)
- return NULL;
-
- if (permanent)
- sta->flags |= WLAN_STA_PERM;
-
- *crypt = &sta->crypt;
-
- return sta;
-}
-
-
-void hostap_add_wds_links(local_info_t *local)
-{
- struct ap_data *ap = local->ap;
- struct sta_info *sta;
-
- spin_lock_bh(&ap->sta_table_lock);
- list_for_each_entry(sta, &ap->sta_list, list) {
- if (sta->ap)
- hostap_wds_link_oper(local, sta->addr, WDS_ADD);
- }
- spin_unlock_bh(&ap->sta_table_lock);
-
- schedule_work(&local->ap->wds_oper_queue);
-}
-
-
-void hostap_wds_link_oper(local_info_t *local, u8 *addr, wds_oper_type type)
-{
- struct wds_oper_data *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return;
- memcpy(entry->addr, addr, ETH_ALEN);
- entry->type = type;
- spin_lock_bh(&local->lock);
- entry->next = local->ap->wds_oper_entries;
- local->ap->wds_oper_entries = entry;
- spin_unlock_bh(&local->lock);
-
- schedule_work(&local->ap->wds_oper_queue);
-}
-
-
-EXPORT_SYMBOL(hostap_init_data);
-EXPORT_SYMBOL(hostap_init_ap_proc);
-EXPORT_SYMBOL(hostap_free_data);
-EXPORT_SYMBOL(hostap_check_sta_fw_version);
-EXPORT_SYMBOL(hostap_handle_sta_tx_exc);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.h b/drivers/net/wireless/intersil/hostap/hostap_ap.h
deleted file mode 100644
index b7ac9e2f1..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.h
+++ /dev/null
@@ -1,264 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef HOSTAP_AP_H
-#define HOSTAP_AP_H
-
-#include "hostap_80211.h"
-
-/* AP data structures for STAs */
-
-/* maximum number of frames to buffer per STA */
-#define STA_MAX_TX_BUFFER 32
-
-/* STA flags */
-#define WLAN_STA_AUTH BIT(0)
-#define WLAN_STA_ASSOC BIT(1)
-#define WLAN_STA_PS BIT(2)
-#define WLAN_STA_TIM BIT(3) /* TIM bit is on for PS stations */
-#define WLAN_STA_PERM BIT(4) /* permanent; do not remove entry on expiration */
-#define WLAN_STA_AUTHORIZED BIT(5) /* If 802.1X is used, this flag is
- * controlling whether STA is authorized to
- * send and receive non-IEEE 802.1X frames
- */
-#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
-
-#define WLAN_RATE_1M BIT(0)
-#define WLAN_RATE_2M BIT(1)
-#define WLAN_RATE_5M5 BIT(2)
-#define WLAN_RATE_11M BIT(3)
-#define WLAN_RATE_COUNT 4
-
-/* Maximum size of Supported Rates info element. IEEE 802.11 has a limit of 8,
- * but some pre-standard IEEE 802.11g products use longer elements. */
-#define WLAN_SUPP_RATES_MAX 32
-
-/* Try to increase TX rate after # successfully sent consecutive packets */
-#define WLAN_RATE_UPDATE_COUNT 50
-
-/* Decrease TX rate after # consecutive dropped packets */
-#define WLAN_RATE_DECREASE_THRESHOLD 2
-
-struct sta_info {
- struct list_head list;
- struct sta_info *hnext; /* next entry in hash table list */
- atomic_t users; /* number of users (do not remove if > 0) */
- struct proc_dir_entry *proc;
-
- u8 addr[6];
- u16 aid; /* STA's unique AID (1 .. 2007) or 0 if not yet assigned */
- u32 flags;
- u16 capability;
- u16 listen_interval; /* or beacon_int for APs */
- u8 supported_rates[WLAN_SUPP_RATES_MAX];
-
- unsigned long last_auth;
- unsigned long last_assoc;
- unsigned long last_rx;
- unsigned long last_tx;
- unsigned long rx_packets, tx_packets;
- unsigned long rx_bytes, tx_bytes;
- struct sk_buff_head tx_buf;
- /* FIX: timeout buffers with an expiry time somehow derived from
- * listen_interval */
-
- s8 last_rx_silence; /* Noise in dBm */
- s8 last_rx_signal; /* Signal strength in dBm */
- u8 last_rx_rate; /* TX rate in 0.1 Mbps */
- u8 last_rx_updated; /* IWSPY's struct iw_quality::updated */
-
- u8 tx_supp_rates; /* bit field of supported TX rates */
- u8 tx_rate; /* current TX rate (in 0.1 Mbps) */
- u8 tx_rate_idx; /* current TX rate (WLAN_RATE_*) */
- u8 tx_max_rate; /* max TX rate (WLAN_RATE_*) */
- u32 tx_count[WLAN_RATE_COUNT]; /* number of frames sent (per rate) */
- u32 rx_count[WLAN_RATE_COUNT]; /* number of frames received (per rate)
- */
- u32 tx_since_last_failure;
- u32 tx_consecutive_exc;
-
- struct lib80211_crypt_data *crypt;
-
- int ap; /* whether this station is an AP */
-
- local_info_t *local;
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- union {
- struct {
- char *challenge; /* shared key authentication
- * challenge */
- } sta;
- struct {
- int ssid_len;
- unsigned char ssid[MAX_SSID_LEN + 1]; /* AP's ssid */
- int channel;
- unsigned long last_beacon; /* last RX beacon time */
- } ap;
- } u;
-
- struct timer_list timer;
- enum { STA_NULLFUNC = 0, STA_DISASSOC, STA_DEAUTH } timeout_next;
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-};
-
-
-#define MAX_STA_COUNT 1024
-
-/* Maximum number of AIDs to use for STAs; must be 2007 or lower
- * (8802.11 limitation) */
-#define MAX_AID_TABLE_SIZE 128
-
-#define STA_HASH_SIZE 256
-#define STA_HASH(sta) (sta[5])
-
-
-/* Default value for maximum station inactivity. After AP_MAX_INACTIVITY_SEC
- * has passed since last received frame from the station, a nullfunc data
- * frame is sent to the station. If this frame is not acknowledged and no other
- * frames have been received, the station will be disassociated after
- * AP_DISASSOC_DELAY. Similarly, a the station will be deauthenticated after
- * AP_DEAUTH_DELAY. AP_TIMEOUT_RESOLUTION is the resolution that is used with
- * max inactivity timer. */
-#define AP_MAX_INACTIVITY_SEC (5 * 60)
-#define AP_DISASSOC_DELAY (HZ)
-#define AP_DEAUTH_DELAY (HZ)
-
-/* ap_policy: whether to accept frames to/from other APs/IBSS */
-typedef enum {
- AP_OTHER_AP_SKIP_ALL = 0,
- AP_OTHER_AP_SAME_SSID = 1,
- AP_OTHER_AP_ALL = 2,
- AP_OTHER_AP_EVEN_IBSS = 3
-} ap_policy_enum;
-
-#define PRISM2_AUTH_OPEN BIT(0)
-#define PRISM2_AUTH_SHARED_KEY BIT(1)
-
-
-/* MAC address-based restrictions */
-struct mac_entry {
- struct list_head list;
- u8 addr[6];
-};
-
-struct mac_restrictions {
- enum { MAC_POLICY_OPEN = 0, MAC_POLICY_ALLOW, MAC_POLICY_DENY } policy;
- unsigned int entries;
- struct list_head mac_list;
- spinlock_t lock;
-};
-
-
-struct add_sta_proc_data {
- u8 addr[ETH_ALEN];
- struct add_sta_proc_data *next;
-};
-
-
-typedef enum { WDS_ADD, WDS_DEL } wds_oper_type;
-struct wds_oper_data {
- wds_oper_type type;
- u8 addr[ETH_ALEN];
- struct wds_oper_data *next;
-};
-
-
-struct ap_data {
- int initialized; /* whether ap_data has been initialized */
- local_info_t *local;
- int bridge_packets; /* send packet to associated STAs directly to the
- * wireless media instead of higher layers in the
- * kernel */
- unsigned int bridged_unicast; /* number of unicast frames bridged on
- * wireless media */
- unsigned int bridged_multicast; /* number of non-unicast frames
- * bridged on wireless media */
- unsigned int tx_drop_nonassoc; /* number of unicast TX packets dropped
- * because they were to an address that
- * was not associated */
- int nullfunc_ack; /* use workaround for nullfunc frame ACKs */
-
- spinlock_t sta_table_lock;
- int num_sta; /* number of entries in sta_list */
- struct list_head sta_list; /* STA info list head */
- struct sta_info *sta_hash[STA_HASH_SIZE];
-
- struct proc_dir_entry *proc;
-
- ap_policy_enum ap_policy;
- unsigned int max_inactivity;
- int autom_ap_wds;
-
- struct mac_restrictions mac_restrictions; /* MAC-based auth */
- int last_tx_rate;
-
- struct work_struct add_sta_proc_queue;
- struct add_sta_proc_data *add_sta_proc_entries;
-
- struct work_struct wds_oper_queue;
- struct wds_oper_data *wds_oper_entries;
-
- u16 tx_callback_idx;
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- /* pointers to STA info; based on allocated AID or NULL if AID free
- * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
- * and so on
- */
- struct sta_info *sta_aid[MAX_AID_TABLE_SIZE];
-
- u16 tx_callback_auth, tx_callback_assoc, tx_callback_poll;
-
- /* WEP operations for generating challenges to be used with shared key
- * authentication */
- struct lib80211_crypto_ops *crypt;
- void *crypt_priv;
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-};
-
-
-void hostap_rx(struct net_device *dev, struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats);
-void hostap_init_data(local_info_t *local);
-void hostap_init_ap_proc(local_info_t *local);
-void hostap_free_data(struct ap_data *ap);
-void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver);
-
-typedef enum {
- AP_TX_CONTINUE, AP_TX_DROP, AP_TX_RETRY, AP_TX_BUFFERED,
- AP_TX_CONTINUE_NOT_AUTHORIZED
-} ap_tx_ret;
-struct hostap_tx_data {
- struct sk_buff *skb;
- int host_encrypt;
- struct lib80211_crypt_data *crypt;
- void *sta_ptr;
-};
-ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx);
-void hostap_handle_sta_release(void *ptr);
-void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb);
-int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr);
-typedef enum {
- AP_RX_CONTINUE, AP_RX_DROP, AP_RX_EXIT, AP_RX_CONTINUE_NOT_AUTHORIZED
-} ap_rx_ret;
-ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
- struct sk_buff *skb,
- struct hostap_80211_rx_status *rx_stats,
- int wds);
-int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr *hdr,
- struct lib80211_crypt_data **crypt,
- void **sta_ptr);
-int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr);
-int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr);
-int hostap_add_sta(struct ap_data *ap, u8 *sta_addr);
-int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr,
- struct hostap_80211_rx_status *rx_stats);
-void hostap_update_rates(local_info_t *local);
-void hostap_add_wds_links(local_info_t *local);
-void hostap_wds_link_oper(local_info_t *local, u8 *addr, wds_oper_type type);
-
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
- int resend);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-#endif /* HOSTAP_AP_H */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_common.h b/drivers/net/wireless/intersil/hostap/hostap_common.h
deleted file mode 100644
index dd29a8e8d..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_common.h
+++ /dev/null
@@ -1,420 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef HOSTAP_COMMON_H
-#define HOSTAP_COMMON_H
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-
-/* IEEE 802.11 defines */
-
-/* HFA384X Configuration RIDs */
-#define HFA384X_RID_CNFPORTTYPE 0xFC00
-#define HFA384X_RID_CNFOWNMACADDR 0xFC01
-#define HFA384X_RID_CNFDESIREDSSID 0xFC02
-#define HFA384X_RID_CNFOWNCHANNEL 0xFC03
-#define HFA384X_RID_CNFOWNSSID 0xFC04
-#define HFA384X_RID_CNFOWNATIMWINDOW 0xFC05
-#define HFA384X_RID_CNFSYSTEMSCALE 0xFC06
-#define HFA384X_RID_CNFMAXDATALEN 0xFC07
-#define HFA384X_RID_CNFWDSADDRESS 0xFC08
-#define HFA384X_RID_CNFPMENABLED 0xFC09
-#define HFA384X_RID_CNFPMEPS 0xFC0A
-#define HFA384X_RID_CNFMULTICASTRECEIVE 0xFC0B
-#define HFA384X_RID_CNFMAXSLEEPDURATION 0xFC0C
-#define HFA384X_RID_CNFPMHOLDOVERDURATION 0xFC0D
-#define HFA384X_RID_CNFOWNNAME 0xFC0E
-#define HFA384X_RID_CNFOWNDTIMPERIOD 0xFC10
-#define HFA384X_RID_CNFWDSADDRESS1 0xFC11 /* AP f/w only */
-#define HFA384X_RID_CNFWDSADDRESS2 0xFC12 /* AP f/w only */
-#define HFA384X_RID_CNFWDSADDRESS3 0xFC13 /* AP f/w only */
-#define HFA384X_RID_CNFWDSADDRESS4 0xFC14 /* AP f/w only */
-#define HFA384X_RID_CNFWDSADDRESS5 0xFC15 /* AP f/w only */
-#define HFA384X_RID_CNFWDSADDRESS6 0xFC16 /* AP f/w only */
-#define HFA384X_RID_CNFMULTICASTPMBUFFERING 0xFC17 /* AP f/w only */
-#define HFA384X_RID_UNKNOWN1 0xFC20
-#define HFA384X_RID_UNKNOWN2 0xFC21
-#define HFA384X_RID_CNFWEPDEFAULTKEYID 0xFC23
-#define HFA384X_RID_CNFDEFAULTKEY0 0xFC24
-#define HFA384X_RID_CNFDEFAULTKEY1 0xFC25
-#define HFA384X_RID_CNFDEFAULTKEY2 0xFC26
-#define HFA384X_RID_CNFDEFAULTKEY3 0xFC27
-#define HFA384X_RID_CNFWEPFLAGS 0xFC28
-#define HFA384X_RID_CNFWEPKEYMAPPINGTABLE 0xFC29
-#define HFA384X_RID_CNFAUTHENTICATION 0xFC2A
-#define HFA384X_RID_CNFMAXASSOCSTA 0xFC2B /* AP f/w only */
-#define HFA384X_RID_CNFTXCONTROL 0xFC2C
-#define HFA384X_RID_CNFROAMINGMODE 0xFC2D
-#define HFA384X_RID_CNFHOSTAUTHENTICATION 0xFC2E /* AP f/w only */
-#define HFA384X_RID_CNFRCVCRCERROR 0xFC30
-#define HFA384X_RID_CNFMMLIFE 0xFC31
-#define HFA384X_RID_CNFALTRETRYCOUNT 0xFC32
-#define HFA384X_RID_CNFBEACONINT 0xFC33
-#define HFA384X_RID_CNFAPPCFINFO 0xFC34 /* AP f/w only */
-#define HFA384X_RID_CNFSTAPCFINFO 0xFC35
-#define HFA384X_RID_CNFPRIORITYQUSAGE 0xFC37
-#define HFA384X_RID_CNFTIMCTRL 0xFC40
-#define HFA384X_RID_UNKNOWN3 0xFC41 /* added in STA f/w 0.7.x */
-#define HFA384X_RID_CNFTHIRTY2TALLY 0xFC42 /* added in STA f/w 0.8.0 */
-#define HFA384X_RID_CNFENHSECURITY 0xFC43 /* AP f/w or STA f/w >= 1.6.3 */
-#define HFA384X_RID_CNFDBMADJUST 0xFC46 /* added in STA f/w 1.3.1 */
-#define HFA384X_RID_GENERICELEMENT 0xFC48 /* added in STA f/w 1.7.0;
- * write only */
-#define HFA384X_RID_PROPAGATIONDELAY 0xFC49 /* added in STA f/w 1.7.6 */
-#define HFA384X_RID_GROUPADDRESSES 0xFC80
-#define HFA384X_RID_CREATEIBSS 0xFC81
-#define HFA384X_RID_FRAGMENTATIONTHRESHOLD 0xFC82
-#define HFA384X_RID_RTSTHRESHOLD 0xFC83
-#define HFA384X_RID_TXRATECONTROL 0xFC84
-#define HFA384X_RID_PROMISCUOUSMODE 0xFC85
-#define HFA384X_RID_FRAGMENTATIONTHRESHOLD0 0xFC90 /* AP f/w only */
-#define HFA384X_RID_FRAGMENTATIONTHRESHOLD1 0xFC91 /* AP f/w only */
-#define HFA384X_RID_FRAGMENTATIONTHRESHOLD2 0xFC92 /* AP f/w only */
-#define HFA384X_RID_FRAGMENTATIONTHRESHOLD3 0xFC93 /* AP f/w only */
-#define HFA384X_RID_FRAGMENTATIONTHRESHOLD4 0xFC94 /* AP f/w only */
-#define HFA384X_RID_FRAGMENTATIONTHRESHOLD5 0xFC95 /* AP f/w only */
-#define HFA384X_RID_FRAGMENTATIONTHRESHOLD6 0xFC96 /* AP f/w only */
-#define HFA384X_RID_RTSTHRESHOLD0 0xFC97 /* AP f/w only */
-#define HFA384X_RID_RTSTHRESHOLD1 0xFC98 /* AP f/w only */
-#define HFA384X_RID_RTSTHRESHOLD2 0xFC99 /* AP f/w only */
-#define HFA384X_RID_RTSTHRESHOLD3 0xFC9A /* AP f/w only */
-#define HFA384X_RID_RTSTHRESHOLD4 0xFC9B /* AP f/w only */
-#define HFA384X_RID_RTSTHRESHOLD5 0xFC9C /* AP f/w only */
-#define HFA384X_RID_RTSTHRESHOLD6 0xFC9D /* AP f/w only */
-#define HFA384X_RID_TXRATECONTROL0 0xFC9E /* AP f/w only */
-#define HFA384X_RID_TXRATECONTROL1 0xFC9F /* AP f/w only */
-#define HFA384X_RID_TXRATECONTROL2 0xFCA0 /* AP f/w only */
-#define HFA384X_RID_TXRATECONTROL3 0xFCA1 /* AP f/w only */
-#define HFA384X_RID_TXRATECONTROL4 0xFCA2 /* AP f/w only */
-#define HFA384X_RID_TXRATECONTROL5 0xFCA3 /* AP f/w only */
-#define HFA384X_RID_TXRATECONTROL6 0xFCA4 /* AP f/w only */
-#define HFA384X_RID_CNFSHORTPREAMBLE 0xFCB0
-#define HFA384X_RID_CNFEXCLUDELONGPREAMBLE 0xFCB1
-#define HFA384X_RID_CNFAUTHENTICATIONRSPTO 0xFCB2
-#define HFA384X_RID_CNFBASICRATES 0xFCB3
-#define HFA384X_RID_CNFSUPPORTEDRATES 0xFCB4
-#define HFA384X_RID_CNFFALLBACKCTRL 0xFCB5 /* added in STA f/w 1.3.1 */
-#define HFA384X_RID_WEPKEYDISABLE 0xFCB6 /* added in STA f/w 1.3.1 */
-#define HFA384X_RID_WEPKEYMAPINDEX 0xFCB7 /* ? */
-#define HFA384X_RID_BROADCASTKEYID 0xFCB8 /* ? */
-#define HFA384X_RID_ENTSECFLAGEYID 0xFCB9 /* ? */
-#define HFA384X_RID_CNFPASSIVESCANCTRL 0xFCBA /* added in STA f/w 1.5.0 */
-#define HFA384X_RID_SSNHANDLINGMODE 0xFCBB /* added in STA f/w 1.7.0 */
-#define HFA384X_RID_MDCCONTROL 0xFCBC /* added in STA f/w 1.7.0 */
-#define HFA384X_RID_MDCCOUNTRY 0xFCBD /* added in STA f/w 1.7.0 */
-#define HFA384X_RID_TXPOWERMAX 0xFCBE /* added in STA f/w 1.7.0 */
-#define HFA384X_RID_CNFLFOENABLED 0xFCBF /* added in STA f/w 1.6.3 */
-#define HFA384X_RID_CAPINFO 0xFCC0 /* added in STA f/w 1.7.0 */
-#define HFA384X_RID_LISTENINTERVAL 0xFCC1 /* added in STA f/w 1.7.0 */
-#define HFA384X_RID_SW_ANT_DIV 0xFCC2 /* added in STA f/w 1.7.0; Prism3 */
-#define HFA384X_RID_LED_CTRL 0xFCC4 /* added in STA f/w 1.7.6 */
-#define HFA384X_RID_HFODELAY 0xFCC5 /* added in STA f/w 1.7.6 */
-#define HFA384X_RID_DISALLOWEDBSSID 0xFCC6 /* added in STA f/w 1.8.0 */
-#define HFA384X_RID_TICKTIME 0xFCE0
-#define HFA384X_RID_SCANREQUEST 0xFCE1
-#define HFA384X_RID_JOINREQUEST 0xFCE2
-#define HFA384X_RID_AUTHENTICATESTATION 0xFCE3 /* AP f/w only */
-#define HFA384X_RID_CHANNELINFOREQUEST 0xFCE4 /* AP f/w only */
-#define HFA384X_RID_HOSTSCAN 0xFCE5 /* added in STA f/w 1.3.1 */
-
-/* HFA384X Information RIDs */
-#define HFA384X_RID_MAXLOADTIME 0xFD00
-#define HFA384X_RID_DOWNLOADBUFFER 0xFD01
-#define HFA384X_RID_PRIID 0xFD02
-#define HFA384X_RID_PRISUPRANGE 0xFD03
-#define HFA384X_RID_CFIACTRANGES 0xFD04
-#define HFA384X_RID_NICSERNUM 0xFD0A
-#define HFA384X_RID_NICID 0xFD0B
-#define HFA384X_RID_MFISUPRANGE 0xFD0C
-#define HFA384X_RID_CFISUPRANGE 0xFD0D
-#define HFA384X_RID_CHANNELLIST 0xFD10
-#define HFA384X_RID_REGULATORYDOMAINS 0xFD11
-#define HFA384X_RID_TEMPTYPE 0xFD12
-#define HFA384X_RID_CIS 0xFD13
-#define HFA384X_RID_STAID 0xFD20
-#define HFA384X_RID_STASUPRANGE 0xFD21
-#define HFA384X_RID_MFIACTRANGES 0xFD22
-#define HFA384X_RID_CFIACTRANGES2 0xFD23
-#define HFA384X_RID_PRODUCTNAME 0xFD24 /* added in STA f/w 1.3.1;
- * only Prism2.5(?) */
-#define HFA384X_RID_PORTSTATUS 0xFD40
-#define HFA384X_RID_CURRENTSSID 0xFD41
-#define HFA384X_RID_CURRENTBSSID 0xFD42
-#define HFA384X_RID_COMMSQUALITY 0xFD43
-#define HFA384X_RID_CURRENTTXRATE 0xFD44
-#define HFA384X_RID_CURRENTBEACONINTERVAL 0xFD45
-#define HFA384X_RID_CURRENTSCALETHRESHOLDS 0xFD46
-#define HFA384X_RID_PROTOCOLRSPTIME 0xFD47
-#define HFA384X_RID_SHORTRETRYLIMIT 0xFD48
-#define HFA384X_RID_LONGRETRYLIMIT 0xFD49
-#define HFA384X_RID_MAXTRANSMITLIFETIME 0xFD4A
-#define HFA384X_RID_MAXRECEIVELIFETIME 0xFD4B
-#define HFA384X_RID_CFPOLLABLE 0xFD4C
-#define HFA384X_RID_AUTHENTICATIONALGORITHMS 0xFD4D
-#define HFA384X_RID_PRIVACYOPTIONIMPLEMENTED 0xFD4F
-#define HFA384X_RID_DBMCOMMSQUALITY 0xFD51 /* added in STA f/w 1.3.1 */
-#define HFA384X_RID_CURRENTTXRATE1 0xFD80 /* AP f/w only */
-#define HFA384X_RID_CURRENTTXRATE2 0xFD81 /* AP f/w only */
-#define HFA384X_RID_CURRENTTXRATE3 0xFD82 /* AP f/w only */
-#define HFA384X_RID_CURRENTTXRATE4 0xFD83 /* AP f/w only */
-#define HFA384X_RID_CURRENTTXRATE5 0xFD84 /* AP f/w only */
-#define HFA384X_RID_CURRENTTXRATE6 0xFD85 /* AP f/w only */
-#define HFA384X_RID_OWNMACADDR 0xFD86 /* AP f/w only */
-#define HFA384X_RID_SCANRESULTSTABLE 0xFD88 /* added in STA f/w 0.8.3 */
-#define HFA384X_RID_HOSTSCANRESULTS 0xFD89 /* added in STA f/w 1.3.1 */
-#define HFA384X_RID_AUTHENTICATIONUSED 0xFD8A /* added in STA f/w 1.3.4 */
-#define HFA384X_RID_CNFFAASWITCHCTRL 0xFD8B /* added in STA f/w 1.6.3 */
-#define HFA384X_RID_ASSOCIATIONFAILURE 0xFD8D /* added in STA f/w 1.8.0 */
-#define HFA384X_RID_PHYTYPE 0xFDC0
-#define HFA384X_RID_CURRENTCHANNEL 0xFDC1
-#define HFA384X_RID_CURRENTPOWERSTATE 0xFDC2
-#define HFA384X_RID_CCAMODE 0xFDC3
-#define HFA384X_RID_SUPPORTEDDATARATES 0xFDC6
-#define HFA384X_RID_LFO_VOLT_REG_TEST_RES 0xFDC7 /* added in STA f/w 1.7.1 */
-#define HFA384X_RID_BUILDSEQ 0xFFFE
-#define HFA384X_RID_FWID 0xFFFF
-
-
-struct hfa384x_comp_ident
-{
- __le16 id;
- __le16 variant;
- __le16 major;
- __le16 minor;
-} __packed;
-
-#define HFA384X_COMP_ID_PRI 0x15
-#define HFA384X_COMP_ID_STA 0x1f
-#define HFA384X_COMP_ID_FW_AP 0x14b
-
-struct hfa384x_sup_range
-{
- __le16 role;
- __le16 id;
- __le16 variant;
- __le16 bottom;
- __le16 top;
-} __packed;
-
-
-struct hfa384x_build_id
-{
- __le16 pri_seq;
- __le16 sec_seq;
-} __packed;
-
-/* FD01 - Download Buffer */
-struct hfa384x_rid_download_buffer
-{
- __le16 page;
- __le16 offset;
- __le16 length;
-} __packed;
-
-/* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */
-struct hfa384x_comms_quality {
- __le16 comm_qual; /* 0 .. 92 */
- __le16 signal_level; /* 27 .. 154 */
- __le16 noise_level; /* 27 .. 154 */
-} __packed;
-
-
-/* netdevice private ioctls (used, e.g., with iwpriv from user space) */
-
-/* New wireless extensions API - SET/GET convention (even ioctl numbers are
- * root only)
- */
-#define PRISM2_IOCTL_PRISM2_PARAM (SIOCIWFIRSTPRIV + 0)
-#define PRISM2_IOCTL_GET_PRISM2_PARAM (SIOCIWFIRSTPRIV + 1)
-#define PRISM2_IOCTL_WRITEMIF (SIOCIWFIRSTPRIV + 2)
-#define PRISM2_IOCTL_READMIF (SIOCIWFIRSTPRIV + 3)
-#define PRISM2_IOCTL_MONITOR (SIOCIWFIRSTPRIV + 4)
-#define PRISM2_IOCTL_RESET (SIOCIWFIRSTPRIV + 6)
-#define PRISM2_IOCTL_INQUIRE (SIOCIWFIRSTPRIV + 8)
-#define PRISM2_IOCTL_WDS_ADD (SIOCIWFIRSTPRIV + 10)
-#define PRISM2_IOCTL_WDS_DEL (SIOCIWFIRSTPRIV + 12)
-#define PRISM2_IOCTL_SET_RID_WORD (SIOCIWFIRSTPRIV + 14)
-#define PRISM2_IOCTL_MACCMD (SIOCIWFIRSTPRIV + 16)
-#define PRISM2_IOCTL_ADDMAC (SIOCIWFIRSTPRIV + 18)
-#define PRISM2_IOCTL_DELMAC (SIOCIWFIRSTPRIV + 20)
-#define PRISM2_IOCTL_KICKMAC (SIOCIWFIRSTPRIV + 22)
-
-/* following are not in SIOCGIWPRIV list; check permission in the driver code
- */
-#define PRISM2_IOCTL_DOWNLOAD (SIOCDEVPRIVATE + 13)
-#define PRISM2_IOCTL_HOSTAPD (SIOCDEVPRIVATE + 14)
-
-
-/* PRISM2_IOCTL_PRISM2_PARAM ioctl() subtypes: */
-enum {
- /* PRISM2_PARAM_PTYPE = 1, */ /* REMOVED 2003-10-22 */
- PRISM2_PARAM_TXRATECTRL = 2,
- PRISM2_PARAM_BEACON_INT = 3,
- PRISM2_PARAM_PSEUDO_IBSS = 4,
- PRISM2_PARAM_ALC = 5,
- /* PRISM2_PARAM_TXPOWER = 6, */ /* REMOVED 2003-10-22 */
- PRISM2_PARAM_DUMP = 7,
- PRISM2_PARAM_OTHER_AP_POLICY = 8,
- PRISM2_PARAM_AP_MAX_INACTIVITY = 9,
- PRISM2_PARAM_AP_BRIDGE_PACKETS = 10,
- PRISM2_PARAM_DTIM_PERIOD = 11,
- PRISM2_PARAM_AP_NULLFUNC_ACK = 12,
- PRISM2_PARAM_MAX_WDS = 13,
- PRISM2_PARAM_AP_AUTOM_AP_WDS = 14,
- PRISM2_PARAM_AP_AUTH_ALGS = 15,
- PRISM2_PARAM_MONITOR_ALLOW_FCSERR = 16,
- PRISM2_PARAM_HOST_ENCRYPT = 17,
- PRISM2_PARAM_HOST_DECRYPT = 18,
- /* PRISM2_PARAM_BUS_MASTER_THRESHOLD_RX = 19, REMOVED 2005-08-14 */
- /* PRISM2_PARAM_BUS_MASTER_THRESHOLD_TX = 20, REMOVED 2005-08-14 */
- PRISM2_PARAM_HOST_ROAMING = 21,
- PRISM2_PARAM_BCRX_STA_KEY = 22,
- PRISM2_PARAM_IEEE_802_1X = 23,
- PRISM2_PARAM_ANTSEL_TX = 24,
- PRISM2_PARAM_ANTSEL_RX = 25,
- PRISM2_PARAM_MONITOR_TYPE = 26,
- PRISM2_PARAM_WDS_TYPE = 27,
- PRISM2_PARAM_HOSTSCAN = 28,
- PRISM2_PARAM_AP_SCAN = 29,
- PRISM2_PARAM_ENH_SEC = 30,
- PRISM2_PARAM_IO_DEBUG = 31,
- PRISM2_PARAM_BASIC_RATES = 32,
- PRISM2_PARAM_OPER_RATES = 33,
- PRISM2_PARAM_HOSTAPD = 34,
- PRISM2_PARAM_HOSTAPD_STA = 35,
- PRISM2_PARAM_WPA = 36,
- PRISM2_PARAM_PRIVACY_INVOKED = 37,
- PRISM2_PARAM_TKIP_COUNTERMEASURES = 38,
- PRISM2_PARAM_DROP_UNENCRYPTED = 39,
- PRISM2_PARAM_SCAN_CHANNEL_MASK = 40,
-};
-
-enum { HOSTAP_ANTSEL_DO_NOT_TOUCH = 0, HOSTAP_ANTSEL_DIVERSITY = 1,
- HOSTAP_ANTSEL_LOW = 2, HOSTAP_ANTSEL_HIGH = 3 };
-
-
-/* PRISM2_IOCTL_MACCMD ioctl() subcommands: */
-enum { AP_MAC_CMD_POLICY_OPEN = 0, AP_MAC_CMD_POLICY_ALLOW = 1,
- AP_MAC_CMD_POLICY_DENY = 2, AP_MAC_CMD_FLUSH = 3,
- AP_MAC_CMD_KICKALL = 4 };
-
-
-/* PRISM2_IOCTL_DOWNLOAD ioctl() dl_cmd: */
-enum {
- PRISM2_DOWNLOAD_VOLATILE = 1 /* RAM */,
- /* Note! Old versions of prism2_srec have a fatal error in CRC-16
- * calculation, which will corrupt all non-volatile downloads.
- * PRISM2_DOWNLOAD_NON_VOLATILE used to be 2, but it is now 3 to
- * prevent use of old versions of prism2_srec for non-volatile
- * download. */
- PRISM2_DOWNLOAD_NON_VOLATILE = 3 /* FLASH */,
- PRISM2_DOWNLOAD_VOLATILE_GENESIS = 4 /* RAM in Genesis mode */,
- /* Persistent versions of volatile download commands (keep firmware
- * data in memory and automatically re-download after hw_reset */
- PRISM2_DOWNLOAD_VOLATILE_PERSISTENT = 5,
- PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT = 6,
-};
-
-struct prism2_download_param {
- u32 dl_cmd;
- u32 start_addr;
- u32 num_areas;
- struct prism2_download_area {
- u32 addr; /* wlan card address */
- u32 len;
- void __user *ptr; /* pointer to data in user space */
- } data[];
-};
-
-#define PRISM2_MAX_DOWNLOAD_AREA_LEN 131072
-#define PRISM2_MAX_DOWNLOAD_LEN 262144
-
-
-/* PRISM2_IOCTL_HOSTAPD ioctl() cmd: */
-enum {
- PRISM2_HOSTAPD_FLUSH = 1,
- PRISM2_HOSTAPD_ADD_STA = 2,
- PRISM2_HOSTAPD_REMOVE_STA = 3,
- PRISM2_HOSTAPD_GET_INFO_STA = 4,
- /* REMOVED: PRISM2_HOSTAPD_RESET_TXEXC_STA = 5, */
- PRISM2_SET_ENCRYPTION = 6,
- PRISM2_GET_ENCRYPTION = 7,
- PRISM2_HOSTAPD_SET_FLAGS_STA = 8,
- PRISM2_HOSTAPD_GET_RID = 9,
- PRISM2_HOSTAPD_SET_RID = 10,
- PRISM2_HOSTAPD_SET_ASSOC_AP_ADDR = 11,
- PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
- PRISM2_HOSTAPD_MLME = 13,
- PRISM2_HOSTAPD_SCAN_REQ = 14,
- PRISM2_HOSTAPD_STA_CLEAR_STATS = 15,
-};
-
-#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
-#define PRISM2_HOSTAPD_RID_HDR_LEN \
-offsetof(struct prism2_hostapd_param, u.rid.data)
-#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
-offsetof(struct prism2_hostapd_param, u.generic_elem.data)
-
-/* Maximum length for algorithm names (-1 for nul termination) used in ioctl()
- */
-#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
-
-struct prism2_hostapd_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u16 aid;
- u16 capability;
- u8 tx_supp_rates;
- } add_sta;
- struct {
- u32 inactive_sec;
- } get_info_sta;
- struct {
- u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
- u32 flags;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[0];
- } crypt;
- struct {
- u32 flags_and;
- u32 flags_or;
- } set_flags_sta;
- struct {
- u16 rid;
- u16 len;
- u8 data[0];
- } rid;
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
- struct {
-#define MLME_STA_DEAUTH 0
-#define MLME_STA_DISASSOC 1
- u16 cmd;
- u16 reason_code;
- } mlme;
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
- } u;
-};
-
-#define HOSTAP_CRYPT_FLAG_SET_TX_KEY BIT(0)
-#define HOSTAP_CRYPT_FLAG_PERMANENT BIT(1)
-
-#define HOSTAP_CRYPT_ERR_UNKNOWN_ALG 2
-#define HOSTAP_CRYPT_ERR_UNKNOWN_ADDR 3
-#define HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED 4
-#define HOSTAP_CRYPT_ERR_KEY_SET_FAILED 5
-#define HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED 6
-#define HOSTAP_CRYPT_ERR_CARD_CONF_FAILED 7
-
-
-#endif /* HOSTAP_COMMON_H */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_config.h b/drivers/net/wireless/intersil/hostap/hostap_config.h
deleted file mode 100644
index 3ebd55847..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_config.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef HOSTAP_CONFIG_H
-#define HOSTAP_CONFIG_H
-
-/* In the previous versions of Host AP driver, support for user space version
- * of IEEE 802.11 management (hostapd) used to be disabled in the default
- * configuration. From now on, support for hostapd is always included and it is
- * possible to disable kernel driver version of IEEE 802.11 management with a
- * separate define, PRISM2_NO_KERNEL_IEEE80211_MGMT. */
-/* #define PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-/* Maximum number of events handler per one interrupt */
-#define PRISM2_MAX_INTERRUPT_EVENTS 20
-
-/* Include code for downloading firmware images into volatile RAM. */
-#define PRISM2_DOWNLOAD_SUPPORT
-
-/* Allow kernel configuration to enable download support. */
-#if !defined(PRISM2_DOWNLOAD_SUPPORT) && defined(CONFIG_HOSTAP_FIRMWARE)
-#define PRISM2_DOWNLOAD_SUPPORT
-#endif
-
-/* Allow kernel configuration to enable non-volatile download support. */
-#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
-#define PRISM2_NON_VOLATILE_DOWNLOAD
-#endif
-
-/* Save low-level I/O for debugging. This should not be enabled in normal use.
- */
-/* #define PRISM2_IO_DEBUG */
-
-/* Following defines can be used to remove unneeded parts of the driver, e.g.,
- * to limit the size of the kernel module. Definitions can be added here in
- * hostap_config.h or they can be added to make command with ccflags-y,
- * e.g.,
- * 'make pccard ccflags-y="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
- */
-
-/* Do not include debug messages into the driver */
-/* #define PRISM2_NO_DEBUG */
-
-/* Do not include /proc/net/prism2/wlan#/{registers,debug} */
-/* #define PRISM2_NO_PROCFS_DEBUG */
-
-/* Do not include station functionality (i.e., allow only Master (Host AP) mode
- */
-/* #define PRISM2_NO_STATION_MODES */
-
-#endif /* HOSTAP_CONFIG_H */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_cs.c b/drivers/net/wireless/intersil/hostap/hostap_cs.c
deleted file mode 100644
index ec7db2bad..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_cs.c
+++ /dev/null
@@ -1,710 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define PRISM2_PCCARD
-
-#include <linux/module.h>
-#include <linux/if.h>
-#include <linux/slab.h>
-#include <linux/wait.h>
-#include <linux/timer.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/workqueue.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#include <asm/io.h>
-
-#include "hostap_wlan.h"
-
-
-static char *dev_info = "hostap_cs";
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
- "cards (PC Card).");
-MODULE_LICENSE("GPL");
-
-
-static int ignore_cis_vcc;
-module_param(ignore_cis_vcc, int, 0444);
-MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
-
-
-/* struct local_info::hw_priv */
-struct hostap_cs_priv {
- struct pcmcia_device *link;
- int sandisk_connectplus;
-};
-
-
-#ifdef PRISM2_IO_DEBUG
-
-static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
- outb(v, dev->base_addr + a);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
- u8 v;
-
- iface = netdev_priv(dev);
- local = iface->local;
- spin_lock_irqsave(&local->lock, flags);
- v = inb(dev->base_addr + a);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
- spin_unlock_irqrestore(&local->lock, flags);
- return v;
-}
-
-static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
- outw(v, dev->base_addr + a);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
- u16 v;
-
- iface = netdev_priv(dev);
- local = iface->local;
- spin_lock_irqsave(&local->lock, flags);
- v = inw(dev->base_addr + a);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
- spin_unlock_irqrestore(&local->lock, flags);
- return v;
-}
-
-static inline void hfa384x_outsw_debug(struct net_device *dev, int a,
- u8 *buf, int wc)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc);
- outsw(dev->base_addr + a, buf, wc);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-static inline void hfa384x_insw_debug(struct net_device *dev, int a,
- u8 *buf, int wc)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc);
- insw(dev->base_addr + a, buf, wc);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
-#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
-#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
-#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
-#define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc))
-#define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc))
-
-#else /* PRISM2_IO_DEBUG */
-
-#define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a))
-#define HFA384X_INB(a) inb(dev->base_addr + (a))
-#define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a))
-#define HFA384X_INW(a) inw(dev->base_addr + (a))
-#define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc)
-#define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc)
-
-#endif /* PRISM2_IO_DEBUG */
-
-
-static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
- int len)
-{
- u16 d_off;
- u16 *pos;
-
- d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
- pos = (u16 *) buf;
-
- if (len / 2)
- HFA384X_INSW(d_off, buf, len / 2);
- pos += len / 2;
-
- if (len & 1)
- *((char *) pos) = HFA384X_INB(d_off);
-
- return 0;
-}
-
-
-static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
-{
- u16 d_off;
- u16 *pos;
-
- d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
- pos = (u16 *) buf;
-
- if (len / 2)
- HFA384X_OUTSW(d_off, buf, len / 2);
- pos += len / 2;
-
- if (len & 1)
- HFA384X_OUTB(*((char *) pos), d_off);
-
- return 0;
-}
-
-
-/* FIX: This might change at some point.. */
-#include "hostap_hw.c"
-
-
-
-static void prism2_detach(struct pcmcia_device *p_dev);
-static void prism2_release(u_long arg);
-static int prism2_config(struct pcmcia_device *link);
-
-
-static int prism2_pccard_card_present(local_info_t *local)
-{
- struct hostap_cs_priv *hw_priv = local->hw_priv;
- if (hw_priv != NULL && hw_priv->link != NULL && pcmcia_dev_present(hw_priv->link))
- return 1;
- return 0;
-}
-
-
-/*
- * SanDisk CompactFlash WLAN Flashcard - Product Manual v1.0
- * Document No. 20-10-00058, January 2004
- * http://www.sandisk.com/pdf/industrial/ProdManualCFWLANv1.0.pdf
- */
-#define SANDISK_WLAN_ACTIVATION_OFF 0x40
-#define SANDISK_HCR_OFF 0x42
-
-
-static void sandisk_set_iobase(local_info_t *local)
-{
- int res;
- struct hostap_cs_priv *hw_priv = local->hw_priv;
-
- res = pcmcia_write_config_byte(hw_priv->link, 0x10,
- hw_priv->link->resource[0]->start & 0x00ff);
- if (res != 0) {
- printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -"
- " res=%d\n", res);
- }
- udelay(10);
-
- res = pcmcia_write_config_byte(hw_priv->link, 0x12,
- (hw_priv->link->resource[0]->start >> 8) & 0x00ff);
- if (res != 0) {
- printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -"
- " res=%d\n", res);
- }
-}
-
-
-static void sandisk_write_hcr(local_info_t *local, int hcr)
-{
- struct net_device *dev = local->dev;
- int i;
-
- HFA384X_OUTB(0x80, SANDISK_WLAN_ACTIVATION_OFF);
- udelay(50);
- for (i = 0; i < 10; i++) {
- HFA384X_OUTB(hcr, SANDISK_HCR_OFF);
- }
- udelay(55);
- HFA384X_OUTB(0x45, SANDISK_WLAN_ACTIVATION_OFF);
-}
-
-
-static int sandisk_enable_wireless(struct net_device *dev)
-{
- int res, ret = 0;
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
- struct hostap_cs_priv *hw_priv = local->hw_priv;
-
- if (resource_size(hw_priv->link->resource[0]) < 0x42) {
- /* Not enough ports to be SanDisk multi-function card */
- ret = -ENODEV;
- goto done;
- }
-
- if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) {
- /* No SanDisk manfid found */
- ret = -ENODEV;
- goto done;
- }
-
- if (hw_priv->link->socket->functions < 2) {
- /* No multi-function links found */
- ret = -ENODEV;
- goto done;
- }
-
- printk(KERN_DEBUG "%s: Multi-function SanDisk ConnectPlus detected"
- " - using vendor-specific initialization\n", dev->name);
- hw_priv->sandisk_connectplus = 1;
-
- res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
- COR_SOFT_RESET);
- if (res != 0) {
- printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
- dev->name, res);
- goto done;
- }
- mdelay(5);
-
- /*
- * Do not enable interrupts here to avoid some bogus events. Interrupts
- * will be enabled during the first cor_sreset call.
- */
- res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
- (COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE |
- COR_FUNC_ENA));
- if (res != 0) {
- printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
- dev->name, res);
- goto done;
- }
- mdelay(5);
-
- sandisk_set_iobase(local);
-
- HFA384X_OUTB(0xc5, SANDISK_WLAN_ACTIVATION_OFF);
- udelay(10);
- HFA384X_OUTB(0x4b, SANDISK_WLAN_ACTIVATION_OFF);
- udelay(10);
-
-done:
- return ret;
-}
-
-
-static void prism2_pccard_cor_sreset(local_info_t *local)
-{
- int res;
- u8 val;
- struct hostap_cs_priv *hw_priv = local->hw_priv;
-
- if (!prism2_pccard_card_present(local))
- return;
-
- res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &val);
- if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n",
- res);
- return;
- }
- printk(KERN_DEBUG "prism2_pccard_cor_sreset: original COR %02x\n",
- val);
-
- val |= COR_SOFT_RESET;
- res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val);
- if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n",
- res);
- return;
- }
-
- mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
-
- val &= ~COR_SOFT_RESET;
- if (hw_priv->sandisk_connectplus)
- val |= COR_IREQ_ENA;
- res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val);
- if (res != 0) {
- printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n",
- res);
- return;
- }
-
- mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
-
- if (hw_priv->sandisk_connectplus)
- sandisk_set_iobase(local);
-}
-
-
-static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
-{
- int res;
- u8 old_cor;
- struct hostap_cs_priv *hw_priv = local->hw_priv;
-
- if (!prism2_pccard_card_present(local))
- return;
-
- if (hw_priv->sandisk_connectplus) {
- sandisk_write_hcr(local, hcr);
- return;
- }
-
- res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &old_cor);
- if (res != 0) {
- printk(KERN_DEBUG "%s failed 1 (%d)\n", __func__, res);
- return;
- }
- printk(KERN_DEBUG "%s: original COR %02x\n", __func__, old_cor);
-
- res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
- old_cor | COR_SOFT_RESET);
- if (res != 0) {
- printk(KERN_DEBUG "%s failed 2 (%d)\n", __func__, res);
- return;
- }
-
- mdelay(10);
-
- /* Setup Genesis mode */
- res = pcmcia_write_config_byte(hw_priv->link, CISREG_CCSR, hcr);
- if (res != 0) {
- printk(KERN_DEBUG "%s failed 3 (%d)\n", __func__, res);
- return;
- }
- mdelay(10);
-
- res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
- old_cor & ~COR_SOFT_RESET);
- if (res != 0) {
- printk(KERN_DEBUG "%s failed 4 (%d)\n", __func__, res);
- return;
- }
-
- mdelay(10);
-}
-
-
-static struct prism2_helper_functions prism2_pccard_funcs =
-{
- .card_present = prism2_pccard_card_present,
- .cor_sreset = prism2_pccard_cor_sreset,
- .genesis_reset = prism2_pccard_genesis_reset,
- .hw_type = HOSTAP_HW_PCCARD,
-};
-
-
-/* allocate local data and register with CardServices
- * initialize dev_link structure, but do not configure the card yet */
-static int hostap_cs_probe(struct pcmcia_device *p_dev)
-{
- int ret;
-
- PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info);
-
- ret = prism2_config(p_dev);
- if (ret) {
- PDEBUG(DEBUG_EXTRA, "prism2_config() failed\n");
- }
-
- return ret;
-}
-
-
-static void prism2_detach(struct pcmcia_device *link)
-{
- PDEBUG(DEBUG_FLOW, "prism2_detach\n");
-
- prism2_release((u_long)link);
-
- /* release net devices */
- if (link->priv) {
- struct hostap_cs_priv *hw_priv;
- struct net_device *dev;
- struct hostap_interface *iface;
- dev = link->priv;
- iface = netdev_priv(dev);
- hw_priv = iface->local->hw_priv;
- prism2_free_local_data(dev);
- kfree(hw_priv);
- }
-}
-
-
-static int prism2_config_check(struct pcmcia_device *p_dev, void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-}
-
-static int prism2_config(struct pcmcia_device *link)
-{
- struct net_device *dev;
- struct hostap_interface *iface;
- local_info_t *local;
- int ret;
- struct hostap_cs_priv *hw_priv;
- unsigned long flags;
-
- PDEBUG(DEBUG_FLOW, "prism2_config()\n");
-
- hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
- if (hw_priv == NULL) {
- ret = -ENOMEM;
- goto failed;
- }
-
- /* Look for an appropriate configuration table entry in the CIS */
- link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO |
- CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
- if (ignore_cis_vcc)
- link->config_flags &= ~CONF_AUTO_CHECK_VCC;
- ret = pcmcia_loop_config(link, prism2_config_check, NULL);
- if (ret) {
- if (!ignore_cis_vcc)
- printk(KERN_ERR "GetNextTuple(): No matching "
- "CIS configuration. Maybe you need the "
- "ignore_cis_vcc=1 parameter.\n");
- goto failed;
- }
-
- /* Need to allocate net_device before requesting IRQ handler */
- dev = prism2_init_local_data(&prism2_pccard_funcs, 0,
- &link->dev);
- if (!dev) {
- ret = -ENOMEM;
- goto failed;
- }
- link->priv = dev;
-
- iface = netdev_priv(dev);
- local = iface->local;
- local->hw_priv = hw_priv;
- hw_priv->link = link;
-
- /*
- * We enable IRQ here, but IRQ handler will not proceed
- * until dev->base_addr is set below. This protect us from
- * receive interrupts when driver is not initialized.
- */
- ret = pcmcia_request_irq(link, prism2_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- spin_lock_irqsave(&local->irq_init_lock, flags);
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- spin_unlock_irqrestore(&local->irq_init_lock, flags);
-
- local->shutdown = 0;
-
- sandisk_enable_wireless(dev);
-
- ret = prism2_hw_config(dev, 1);
- if (!ret)
- ret = hostap_hw_ready(dev);
-
- return ret;
-
- failed:
- kfree(hw_priv);
- prism2_release((u_long)link);
- return ret;
-}
-
-
-static void prism2_release(u_long arg)
-{
- struct pcmcia_device *link = (struct pcmcia_device *)arg;
-
- PDEBUG(DEBUG_FLOW, "prism2_release\n");
-
- if (link->priv) {
- struct net_device *dev = link->priv;
- struct hostap_interface *iface;
-
- iface = netdev_priv(dev);
- prism2_hw_shutdown(dev, 0);
- iface->local->shutdown = 1;
- }
-
- pcmcia_disable_device(link);
- PDEBUG(DEBUG_FLOW, "release - done\n");
-}
-
-static int hostap_cs_suspend(struct pcmcia_device *link)
-{
- struct net_device *dev = (struct net_device *) link->priv;
- int dev_open = 0;
- struct hostap_interface *iface = NULL;
-
- if (!dev)
- return -ENODEV;
-
- iface = netdev_priv(dev);
-
- PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
- if (iface && iface->local)
- dev_open = iface->local->num_dev_open > 0;
- if (dev_open) {
- netif_stop_queue(dev);
- netif_device_detach(dev);
- }
- prism2_suspend(dev);
-
- return 0;
-}
-
-static int hostap_cs_resume(struct pcmcia_device *link)
-{
- struct net_device *dev = (struct net_device *) link->priv;
- int dev_open = 0;
- struct hostap_interface *iface = NULL;
-
- if (!dev)
- return -ENODEV;
-
- iface = netdev_priv(dev);
-
- PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
-
- if (iface && iface->local)
- dev_open = iface->local->num_dev_open > 0;
-
- prism2_hw_shutdown(dev, 1);
- prism2_hw_config(dev, dev_open ? 0 : 1);
- if (dev_open) {
- netif_device_attach(dev);
- netif_start_queue(dev);
- }
-
- return 0;
-}
-
-static const struct pcmcia_device_id hostap_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100),
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
- PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
- PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000),
- PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x3301),
- PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b),
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612),
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613),
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x02d2, 0x0001),
- PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001),
- PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
-/* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */
- PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
- PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
- PCMCIA_DEVICE_MANF_CARD(0x0126, 0x0002),
- PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0xd601, 0x0005, "ADLINK 345 CF",
- 0x2d858104),
- PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL",
- 0x74c5e40d),
- PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
- 0x4b801a17),
- PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.02",
- 0x4b74baa0),
- PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
- 0x7a954bd9, 0x74be00c6),
- PCMCIA_DEVICE_PROD_ID123(
- "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
- 0xe6ec52ce, 0x08649af2, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID123(
- "Canon", "Wireless LAN CF Card K30225", "Version 01.00",
- 0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
- PCMCIA_DEVICE_PROD_ID123(
- "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
- 0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID123(
- "Instant Wireless ", " Network PC CARD", "Version 01.02",
- 0x11d901af, 0x6e9bd926, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID123(
- "SMC", "SMC2632W", "Version 01.02",
- 0xc4f8b18b, 0x474a1f2a, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G",
- 0x2decece3, 0x82067c18),
- PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card",
- 0x54f7c49c, 0x15a75e5b),
- PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE",
- 0x74c5e40d, 0xdb472a18),
- PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card",
- 0x0733cc81, 0x0c52f395),
- PCMCIA_DEVICE_PROD_ID12(
- "ZoomAir 11Mbps High", "Rate wireless Networking",
- 0x273fe3db, 0x32a1eaee),
- PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card",
- 0xa37434e9, 0x9762e8f1),
- PCMCIA_DEVICE_PROD_ID123(
- "Pretec", "CompactWLAN Card 802.11b", "2.5",
- 0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
- PCMCIA_DEVICE_PROD_ID123(
- "U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02",
- 0xc7b8df9d, 0x1700d087, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID123(
- "Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio",
- "Ver. 1.00",
- 0x5cd01705, 0x4271660f, 0x9d08ee12),
- PCMCIA_DEVICE_PROD_ID123(
- "Wireless LAN" , "11Mbps PC Card", "Version 01.02",
- 0x4b8870ff, 0x70e946d1, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
- PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
- PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
- PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
-
-
-static struct pcmcia_driver hostap_driver = {
- .name = "hostap_cs",
- .probe = hostap_cs_probe,
- .remove = prism2_detach,
- .owner = THIS_MODULE,
- .id_table = hostap_cs_ids,
- .suspend = hostap_cs_suspend,
- .resume = hostap_cs_resume,
-};
-module_pcmcia_driver(hostap_driver);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c
deleted file mode 100644
index 5e5bada28..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_download.c
+++ /dev/null
@@ -1,810 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-static int prism2_enable_aux_port(struct net_device *dev, int enable)
-{
- u16 val, reg;
- int i, tries;
- unsigned long flags;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->no_pri) {
- if (enable) {
- PDEBUG(DEBUG_EXTRA2, "%s: no PRI f/w - assuming Aux "
- "port is already enabled\n", dev->name);
- }
- return 0;
- }
-
- spin_lock_irqsave(&local->cmdlock, flags);
-
- /* wait until busy bit is clear */
- tries = HFA384X_CMD_BUSY_TIMEOUT;
- while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
- tries--;
- udelay(1);
- }
- if (tries == 0) {
- reg = HFA384X_INW(HFA384X_CMD_OFF);
- spin_unlock_irqrestore(&local->cmdlock, flags);
- printk("%s: prism2_enable_aux_port - timeout - reg=0x%04x\n",
- dev->name, reg);
- return -ETIMEDOUT;
- }
-
- val = HFA384X_INW(HFA384X_CONTROL_OFF);
-
- if (enable) {
- HFA384X_OUTW(HFA384X_AUX_MAGIC0, HFA384X_PARAM0_OFF);
- HFA384X_OUTW(HFA384X_AUX_MAGIC1, HFA384X_PARAM1_OFF);
- HFA384X_OUTW(HFA384X_AUX_MAGIC2, HFA384X_PARAM2_OFF);
-
- if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_DISABLED)
- printk("prism2_enable_aux_port: was not disabled!?\n");
- val &= ~HFA384X_AUX_PORT_MASK;
- val |= HFA384X_AUX_PORT_ENABLE;
- } else {
- HFA384X_OUTW(0, HFA384X_PARAM0_OFF);
- HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
- HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
-
- if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_ENABLED)
- printk("prism2_enable_aux_port: was not enabled!?\n");
- val &= ~HFA384X_AUX_PORT_MASK;
- val |= HFA384X_AUX_PORT_DISABLE;
- }
- HFA384X_OUTW(val, HFA384X_CONTROL_OFF);
-
- udelay(5);
-
- i = 10000;
- while (i > 0) {
- val = HFA384X_INW(HFA384X_CONTROL_OFF);
- val &= HFA384X_AUX_PORT_MASK;
-
- if ((enable && val == HFA384X_AUX_PORT_ENABLED) ||
- (!enable && val == HFA384X_AUX_PORT_DISABLED))
- break;
-
- udelay(10);
- i--;
- }
-
- spin_unlock_irqrestore(&local->cmdlock, flags);
-
- if (i == 0) {
- printk("prism2_enable_aux_port(%d) timed out\n",
- enable);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-
-static int hfa384x_from_aux(struct net_device *dev, unsigned int addr, int len,
- void *buf)
-{
- u16 page, offset;
- if (addr & 1 || len & 1)
- return -1;
-
- page = addr >> 7;
- offset = addr & 0x7f;
-
- HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF);
- HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF);
-
- udelay(5);
-
-#ifdef PRISM2_PCI
- {
- __le16 *pos = (__le16 *) buf;
- while (len > 0) {
- *pos++ = HFA384X_INW_DATA(HFA384X_AUXDATA_OFF);
- len -= 2;
- }
- }
-#else /* PRISM2_PCI */
- HFA384X_INSW(HFA384X_AUXDATA_OFF, buf, len / 2);
-#endif /* PRISM2_PCI */
-
- return 0;
-}
-
-
-static int hfa384x_to_aux(struct net_device *dev, unsigned int addr, int len,
- void *buf)
-{
- u16 page, offset;
- if (addr & 1 || len & 1)
- return -1;
-
- page = addr >> 7;
- offset = addr & 0x7f;
-
- HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF);
- HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF);
-
- udelay(5);
-
-#ifdef PRISM2_PCI
- {
- __le16 *pos = (__le16 *) buf;
- while (len > 0) {
- HFA384X_OUTW_DATA(*pos++, HFA384X_AUXDATA_OFF);
- len -= 2;
- }
- }
-#else /* PRISM2_PCI */
- HFA384X_OUTSW(HFA384X_AUXDATA_OFF, buf, len / 2);
-#endif /* PRISM2_PCI */
-
- return 0;
-}
-
-
-static int prism2_pda_ok(u8 *buf)
-{
- __le16 *pda = (__le16 *) buf;
- int pos;
- u16 len, pdr;
-
- if (buf[0] == 0xff && buf[1] == 0x00 && buf[2] == 0xff &&
- buf[3] == 0x00)
- return 0;
-
- pos = 0;
- while (pos + 1 < PRISM2_PDA_SIZE / 2) {
- len = le16_to_cpu(pda[pos]);
- pdr = le16_to_cpu(pda[pos + 1]);
- if (len == 0 || pos + len > PRISM2_PDA_SIZE / 2)
- return 0;
-
- if (pdr == 0x0000 && len == 2) {
- /* PDA end found */
- return 1;
- }
-
- pos += len + 1;
- }
-
- return 0;
-}
-
-
-#define prism2_download_aux_dump_npages 65536
-
-struct prism2_download_aux_dump {
- local_info_t *local;
- u16 page[0x80];
-};
-
-static int prism2_download_aux_dump_proc_show(struct seq_file *m, void *v)
-{
- struct prism2_download_aux_dump *ctx = m->private;
-
- hfa384x_from_aux(ctx->local->dev, (unsigned long)v - 1, 0x80, ctx->page);
- seq_write(m, ctx->page, 0x80);
- return 0;
-}
-
-static void *prism2_download_aux_dump_proc_start(struct seq_file *m, loff_t *_pos)
-{
- struct prism2_download_aux_dump *ctx = m->private;
- prism2_enable_aux_port(ctx->local->dev, 1);
- if (*_pos >= prism2_download_aux_dump_npages)
- return NULL;
- return (void *)((unsigned long)*_pos + 1);
-}
-
-static void *prism2_download_aux_dump_proc_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- ++*_pos;
- if (*_pos >= prism2_download_aux_dump_npages)
- return NULL;
- return (void *)((unsigned long)*_pos + 1);
-}
-
-static void prism2_download_aux_dump_proc_stop(struct seq_file *m, void *v)
-{
- struct prism2_download_aux_dump *ctx = m->private;
- prism2_enable_aux_port(ctx->local->dev, 0);
-}
-
-static const struct seq_operations prism2_download_aux_dump_proc_seqops = {
- .start = prism2_download_aux_dump_proc_start,
- .next = prism2_download_aux_dump_proc_next,
- .stop = prism2_download_aux_dump_proc_stop,
- .show = prism2_download_aux_dump_proc_show,
-};
-
-static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open_private(file, &prism2_download_aux_dump_proc_seqops,
- sizeof(struct prism2_download_aux_dump));
- if (ret == 0) {
- struct seq_file *m = file->private_data;
- m->private = pde_data(inode);
- }
- return ret;
-}
-
-static const struct proc_ops prism2_download_aux_dump_proc_ops = {
- .proc_open = prism2_download_aux_dump_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = seq_release_private,
-};
-
-
-static u8 * prism2_read_pda(struct net_device *dev)
-{
- u8 *buf;
- int res, i, found = 0;
-#define NUM_PDA_ADDRS 4
- unsigned int pda_addr[NUM_PDA_ADDRS] = {
- 0x7f0000 /* others than HFA3841 */,
- 0x3f0000 /* HFA3841 */,
- 0x390000 /* apparently used in older cards */,
- 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */,
- };
-
- buf = kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL);
- if (buf == NULL)
- return NULL;
-
- /* Note: wlan card should be in initial state (just after init cmd)
- * and no other operations should be performed concurrently. */
-
- prism2_enable_aux_port(dev, 1);
-
- for (i = 0; i < NUM_PDA_ADDRS; i++) {
- PDEBUG(DEBUG_EXTRA2, "%s: trying to read PDA from 0x%08x",
- dev->name, pda_addr[i]);
- res = hfa384x_from_aux(dev, pda_addr[i], PRISM2_PDA_SIZE, buf);
- if (res)
- continue;
- if (res == 0 && prism2_pda_ok(buf)) {
- PDEBUG2(DEBUG_EXTRA2, ": OK\n");
- found = 1;
- break;
- } else {
- PDEBUG2(DEBUG_EXTRA2, ": failed\n");
- }
- }
-
- prism2_enable_aux_port(dev, 0);
-
- if (!found) {
- printk(KERN_DEBUG "%s: valid PDA not found\n", dev->name);
- kfree(buf);
- buf = NULL;
- }
-
- return buf;
-}
-
-
-static int prism2_download_volatile(local_info_t *local,
- struct prism2_download_data *param)
-{
- struct net_device *dev = local->dev;
- int ret = 0, i;
- u16 param0, param1;
-
- if (local->hw_downloading) {
- printk(KERN_WARNING "%s: Already downloading - aborting new "
- "request\n", dev->name);
- return -1;
- }
-
- local->hw_downloading = 1;
- if (local->pri_only) {
- hfa384x_disable_interrupts(dev);
- } else {
- prism2_hw_shutdown(dev, 0);
-
- if (prism2_hw_init(dev, 0)) {
- printk(KERN_WARNING "%s: Could not initialize card for"
- " download\n", dev->name);
- ret = -1;
- goto out;
- }
- }
-
- if (prism2_enable_aux_port(dev, 1)) {
- printk(KERN_WARNING "%s: Could not enable AUX port\n",
- dev->name);
- ret = -1;
- goto out;
- }
-
- param0 = param->start_addr & 0xffff;
- param1 = param->start_addr >> 16;
-
- HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
- HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
- if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
- (HFA384X_PROGMODE_ENABLE_VOLATILE << 8),
- param0)) {
- printk(KERN_WARNING "%s: Download command execution failed\n",
- dev->name);
- ret = -1;
- goto out;
- }
-
- for (i = 0; i < param->num_areas; i++) {
- PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n",
- dev->name, param->data[i].len, param->data[i].addr);
- if (hfa384x_to_aux(dev, param->data[i].addr,
- param->data[i].len, param->data[i].data)) {
- printk(KERN_WARNING "%s: RAM download at 0x%08x "
- "(len=%d) failed\n", dev->name,
- param->data[i].addr, param->data[i].len);
- ret = -1;
- goto out;
- }
- }
-
- HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
- HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
- if (hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
- (HFA384X_PROGMODE_DISABLE << 8), param0)) {
- printk(KERN_WARNING "%s: Download command execution failed\n",
- dev->name);
- ret = -1;
- goto out;
- }
- /* ProgMode disable causes the hardware to restart itself from the
- * given starting address. Give hw some time and ACK command just in
- * case restart did not happen. */
- mdelay(5);
- HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
-
- if (prism2_enable_aux_port(dev, 0)) {
- printk(KERN_DEBUG "%s: Disabling AUX port failed\n",
- dev->name);
- /* continue anyway.. restart should have taken care of this */
- }
-
- mdelay(5);
- local->hw_downloading = 0;
- if (prism2_hw_config(dev, 2)) {
- printk(KERN_WARNING "%s: Card configuration after RAM "
- "download failed\n", dev->name);
- ret = -1;
- goto out;
- }
-
- out:
- local->hw_downloading = 0;
- return ret;
-}
-
-
-static int prism2_enable_genesis(local_info_t *local, int hcr)
-{
- struct net_device *dev = local->dev;
- u8 initseq[4] = { 0x00, 0xe1, 0xa1, 0xff };
- u8 readbuf[4];
-
- printk(KERN_DEBUG "%s: test Genesis mode with HCR 0x%02x\n",
- dev->name, hcr);
- local->func->cor_sreset(local);
- hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq);
- local->func->genesis_reset(local, hcr);
-
- /* Readback test */
- hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf);
- hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq);
- hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf);
-
- if (memcmp(initseq, readbuf, sizeof(initseq)) == 0) {
- printk(KERN_DEBUG "Readback test succeeded, HCR 0x%02x\n",
- hcr);
- return 0;
- } else {
- printk(KERN_DEBUG "Readback test failed, HCR 0x%02x write %4ph read %4ph\n",
- hcr, initseq, readbuf);
- return 1;
- }
-}
-
-
-static int prism2_get_ram_size(local_info_t *local)
-{
- int ret;
-
- /* Try to enable genesis mode; 0x1F for x8 SRAM or 0x0F for x16 SRAM */
- if (prism2_enable_genesis(local, 0x1f) == 0)
- ret = 8;
- else if (prism2_enable_genesis(local, 0x0f) == 0)
- ret = 16;
- else
- ret = -1;
-
- /* Disable genesis mode */
- local->func->genesis_reset(local, ret == 16 ? 0x07 : 0x17);
-
- return ret;
-}
-
-
-static int prism2_download_genesis(local_info_t *local,
- struct prism2_download_data *param)
-{
- struct net_device *dev = local->dev;
- int ram16 = 0, i;
- int ret = 0;
-
- if (local->hw_downloading) {
- printk(KERN_WARNING "%s: Already downloading - aborting new "
- "request\n", dev->name);
- return -EBUSY;
- }
-
- if (!local->func->genesis_reset || !local->func->cor_sreset) {
- printk(KERN_INFO "%s: Genesis mode downloading not supported "
- "with this hwmodel\n", dev->name);
- return -EOPNOTSUPP;
- }
-
- local->hw_downloading = 1;
-
- if (prism2_enable_aux_port(dev, 1)) {
- printk(KERN_DEBUG "%s: failed to enable AUX port\n",
- dev->name);
- ret = -EIO;
- goto out;
- }
-
- if (local->sram_type == -1) {
- /* 0x1F for x8 SRAM or 0x0F for x16 SRAM */
- if (prism2_enable_genesis(local, 0x1f) == 0) {
- ram16 = 0;
- PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x8 "
- "SRAM\n", dev->name);
- } else if (prism2_enable_genesis(local, 0x0f) == 0) {
- ram16 = 1;
- PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x16 "
- "SRAM\n", dev->name);
- } else {
- printk(KERN_DEBUG "%s: Could not initiate genesis "
- "mode\n", dev->name);
- ret = -EIO;
- goto out;
- }
- } else {
- if (prism2_enable_genesis(local, local->sram_type == 8 ?
- 0x1f : 0x0f)) {
- printk(KERN_DEBUG "%s: Failed to set Genesis "
- "mode (sram_type=%d)\n", dev->name,
- local->sram_type);
- ret = -EIO;
- goto out;
- }
- ram16 = local->sram_type != 8;
- }
-
- for (i = 0; i < param->num_areas; i++) {
- PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n",
- dev->name, param->data[i].len, param->data[i].addr);
- if (hfa384x_to_aux(dev, param->data[i].addr,
- param->data[i].len, param->data[i].data)) {
- printk(KERN_WARNING "%s: RAM download at 0x%08x "
- "(len=%d) failed\n", dev->name,
- param->data[i].addr, param->data[i].len);
- ret = -EIO;
- goto out;
- }
- }
-
- PDEBUG(DEBUG_EXTRA2, "Disable genesis mode\n");
- local->func->genesis_reset(local, ram16 ? 0x07 : 0x17);
- if (prism2_enable_aux_port(dev, 0)) {
- printk(KERN_DEBUG "%s: Failed to disable AUX port\n",
- dev->name);
- }
-
- mdelay(5);
- local->hw_downloading = 0;
-
- PDEBUG(DEBUG_EXTRA2, "Trying to initialize card\n");
- /*
- * Make sure the INIT command does not generate a command completion
- * event by disabling interrupts.
- */
- hfa384x_disable_interrupts(dev);
- if (prism2_hw_init(dev, 1)) {
- printk(KERN_DEBUG "%s: Initialization after genesis mode "
- "download failed\n", dev->name);
- ret = -EIO;
- goto out;
- }
-
- PDEBUG(DEBUG_EXTRA2, "Card initialized - running PRI only\n");
- if (prism2_hw_init2(dev, 1)) {
- printk(KERN_DEBUG "%s: Initialization(2) after genesis mode "
- "download failed\n", dev->name);
- ret = -EIO;
- goto out;
- }
-
- out:
- local->hw_downloading = 0;
- return ret;
-}
-
-
-#ifdef PRISM2_NON_VOLATILE_DOWNLOAD
-/* Note! Non-volatile downloading functionality has not yet been tested
- * thoroughly and it may corrupt flash image and effectively kill the card that
- * is being updated. You have been warned. */
-
-static inline int prism2_download_block(struct net_device *dev,
- u32 addr, u8 *data,
- u32 bufaddr, int rest_len)
-{
- u16 param0, param1;
- int block_len;
-
- block_len = rest_len < 4096 ? rest_len : 4096;
-
- param0 = addr & 0xffff;
- param1 = addr >> 16;
-
- HFA384X_OUTW(block_len, HFA384X_PARAM2_OFF);
- HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
-
- if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
- (HFA384X_PROGMODE_ENABLE_NON_VOLATILE << 8),
- param0)) {
- printk(KERN_WARNING "%s: Flash download command execution "
- "failed\n", dev->name);
- return -1;
- }
-
- if (hfa384x_to_aux(dev, bufaddr, block_len, data)) {
- printk(KERN_WARNING "%s: flash download at 0x%08x "
- "(len=%d) failed\n", dev->name, addr, block_len);
- return -1;
- }
-
- HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
- HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
- if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
- (HFA384X_PROGMODE_PROGRAM_NON_VOLATILE << 8),
- 0)) {
- printk(KERN_WARNING "%s: Flash write command execution "
- "failed\n", dev->name);
- return -1;
- }
-
- return block_len;
-}
-
-
-static int prism2_download_nonvolatile(local_info_t *local,
- struct prism2_download_data *dl)
-{
- struct net_device *dev = local->dev;
- int ret = 0, i;
- struct {
- __le16 page;
- __le16 offset;
- __le16 len;
- } dlbuffer;
- u32 bufaddr;
-
- if (local->hw_downloading) {
- printk(KERN_WARNING "%s: Already downloading - aborting new "
- "request\n", dev->name);
- return -1;
- }
-
- ret = local->func->get_rid(dev, HFA384X_RID_DOWNLOADBUFFER,
- &dlbuffer, 6, 0);
-
- if (ret < 0) {
- printk(KERN_WARNING "%s: Could not read download buffer "
- "parameters\n", dev->name);
- goto out;
- }
-
- printk(KERN_DEBUG "Download buffer: %d bytes at 0x%04x:0x%04x\n",
- le16_to_cpu(dlbuffer.len),
- le16_to_cpu(dlbuffer.page),
- le16_to_cpu(dlbuffer.offset));
-
- bufaddr = (le16_to_cpu(dlbuffer.page) << 7) + le16_to_cpu(dlbuffer.offset);
-
- local->hw_downloading = 1;
-
- if (!local->pri_only) {
- prism2_hw_shutdown(dev, 0);
-
- if (prism2_hw_init(dev, 0)) {
- printk(KERN_WARNING "%s: Could not initialize card for"
- " download\n", dev->name);
- ret = -1;
- goto out;
- }
- }
-
- hfa384x_disable_interrupts(dev);
-
- if (prism2_enable_aux_port(dev, 1)) {
- printk(KERN_WARNING "%s: Could not enable AUX port\n",
- dev->name);
- ret = -1;
- goto out;
- }
-
- printk(KERN_DEBUG "%s: starting flash download\n", dev->name);
- for (i = 0; i < dl->num_areas; i++) {
- int rest_len = dl->data[i].len;
- int data_off = 0;
-
- while (rest_len > 0) {
- int block_len;
-
- block_len = prism2_download_block(
- dev, dl->data[i].addr + data_off,
- dl->data[i].data + data_off, bufaddr,
- rest_len);
-
- if (block_len < 0) {
- ret = -1;
- goto out;
- }
-
- rest_len -= block_len;
- data_off += block_len;
- }
- }
-
- HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
- HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
- if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
- (HFA384X_PROGMODE_DISABLE << 8), 0)) {
- printk(KERN_WARNING "%s: Download command execution failed\n",
- dev->name);
- ret = -1;
- goto out;
- }
-
- if (prism2_enable_aux_port(dev, 0)) {
- printk(KERN_DEBUG "%s: Disabling AUX port failed\n",
- dev->name);
- /* continue anyway.. restart should have taken care of this */
- }
-
- mdelay(5);
-
- local->func->hw_reset(dev);
- local->hw_downloading = 0;
- if (prism2_hw_config(dev, 2)) {
- printk(KERN_WARNING "%s: Card configuration after flash "
- "download failed\n", dev->name);
- ret = -1;
- } else {
- printk(KERN_INFO "%s: Card initialized successfully after "
- "flash download\n", dev->name);
- }
-
- out:
- local->hw_downloading = 0;
- return ret;
-}
-#endif /* PRISM2_NON_VOLATILE_DOWNLOAD */
-
-
-static void prism2_download_free_data(struct prism2_download_data *dl)
-{
- int i;
-
- if (dl == NULL)
- return;
-
- for (i = 0; i < dl->num_areas; i++)
- kfree(dl->data[i].data);
- kfree(dl);
-}
-
-
-static int prism2_download(local_info_t *local,
- struct prism2_download_param *param)
-{
- int ret = 0;
- int i;
- u32 total_len = 0;
- struct prism2_download_data *dl = NULL;
-
- printk(KERN_DEBUG "prism2_download: dl_cmd=%d start_addr=0x%08x "
- "num_areas=%d\n",
- param->dl_cmd, param->start_addr, param->num_areas);
-
- if (param->num_areas > 100) {
- ret = -EINVAL;
- goto out;
- }
-
- dl = kzalloc(struct_size(dl, data, param->num_areas), GFP_KERNEL);
- if (dl == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- dl->dl_cmd = param->dl_cmd;
- dl->start_addr = param->start_addr;
- dl->num_areas = param->num_areas;
- for (i = 0; i < param->num_areas; i++) {
- PDEBUG(DEBUG_EXTRA2,
- " area %d: addr=0x%08x len=%d ptr=0x%p\n",
- i, param->data[i].addr, param->data[i].len,
- param->data[i].ptr);
-
- dl->data[i].addr = param->data[i].addr;
- dl->data[i].len = param->data[i].len;
-
- total_len += param->data[i].len;
- if (param->data[i].len > PRISM2_MAX_DOWNLOAD_AREA_LEN ||
- total_len > PRISM2_MAX_DOWNLOAD_LEN) {
- ret = -E2BIG;
- goto out;
- }
-
- dl->data[i].data = kmalloc(dl->data[i].len, GFP_KERNEL);
- if (dl->data[i].data == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(dl->data[i].data, param->data[i].ptr,
- param->data[i].len)) {
- ret = -EFAULT;
- goto out;
- }
- }
-
- switch (param->dl_cmd) {
- case PRISM2_DOWNLOAD_VOLATILE:
- case PRISM2_DOWNLOAD_VOLATILE_PERSISTENT:
- ret = prism2_download_volatile(local, dl);
- break;
- case PRISM2_DOWNLOAD_VOLATILE_GENESIS:
- case PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT:
- ret = prism2_download_genesis(local, dl);
- break;
- case PRISM2_DOWNLOAD_NON_VOLATILE:
-#ifdef PRISM2_NON_VOLATILE_DOWNLOAD
- ret = prism2_download_nonvolatile(local, dl);
-#else /* PRISM2_NON_VOLATILE_DOWNLOAD */
- printk(KERN_INFO "%s: non-volatile downloading not enabled\n",
- local->dev->name);
- ret = -EOPNOTSUPP;
-#endif /* PRISM2_NON_VOLATILE_DOWNLOAD */
- break;
- default:
- printk(KERN_DEBUG "%s: unsupported download command %d\n",
- local->dev->name, param->dl_cmd);
- ret = -EINVAL;
- break;
- }
-
- out:
- if (ret == 0 && dl &&
- param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT) {
- prism2_download_free_data(local->dl_pri);
- local->dl_pri = dl;
- } else if (ret == 0 && dl &&
- param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_PERSISTENT) {
- prism2_download_free_data(local->dl_sec);
- local->dl_sec = dl;
- } else
- prism2_download_free_data(dl);
-
- return ret;
-}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
deleted file mode 100644
index b74f4cb5d..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ /dev/null
@@ -1,3387 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Host AP (software wireless LAN access point) driver for
- * Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <j@w1.fi>
- * Copyright (c) 2002-2005, Jouni Malinen <j@w1.fi>
- *
- * FIX:
- * - there is currently no way of associating TX packets to correct wds device
- * when TX Exc/OK event occurs, so all tx_packets and some
- * tx_errors/tx_dropped are added to the main netdevice; using sw_support
- * field in txdesc might be used to fix this (using Alloc event to increment
- * tx_packets would need some further info in txfid table)
- *
- * Buffer Access Path (BAP) usage:
- * Prism2 cards have two separate BAPs for accessing the card memory. These
- * should allow concurrent access to two different frames and the driver
- * previously used BAP0 for sending data and BAP1 for receiving data.
- * However, there seems to be number of issues with concurrent access and at
- * least one know hardware bug in using BAP0 and BAP1 concurrently with PCI
- * Prism2.5. Therefore, the driver now only uses BAP0 for moving data between
- * host and card memories. BAP0 accesses are protected with local->baplock
- * (spin_lock_bh) to prevent concurrent use.
- */
-
-
-
-#include <asm/delay.h>
-#include <linux/uaccess.h>
-
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/wait.h>
-#include <linux/sched/signal.h>
-#include <linux/rtnetlink.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <net/lib80211.h>
-#include <asm/irq.h>
-
-#include "hostap_80211.h"
-#include "hostap.h"
-#include "hostap_ap.h"
-
-
-/* #define final_version */
-
-static int mtu = 1500;
-module_param(mtu, int, 0444);
-MODULE_PARM_DESC(mtu, "Maximum transfer unit");
-
-static int channel[MAX_PARM_DEVICES] = { 3, DEF_INTS };
-module_param_array(channel, int, NULL, 0444);
-MODULE_PARM_DESC(channel, "Initial channel");
-
-static char essid[33] = "test";
-module_param_string(essid, essid, sizeof(essid), 0444);
-MODULE_PARM_DESC(essid, "Host AP's ESSID");
-
-static int iw_mode[MAX_PARM_DEVICES] = { IW_MODE_MASTER, DEF_INTS };
-module_param_array(iw_mode, int, NULL, 0444);
-MODULE_PARM_DESC(iw_mode, "Initial operation mode");
-
-static int beacon_int[MAX_PARM_DEVICES] = { 100, DEF_INTS };
-module_param_array(beacon_int, int, NULL, 0444);
-MODULE_PARM_DESC(beacon_int, "Beacon interval (1 = 1024 usec)");
-
-static int dtim_period[MAX_PARM_DEVICES] = { 1, DEF_INTS };
-module_param_array(dtim_period, int, NULL, 0444);
-MODULE_PARM_DESC(dtim_period, "DTIM period");
-
-static char dev_template[16] = "wlan%d";
-module_param_string(dev_template, dev_template, sizeof(dev_template), 0444);
-MODULE_PARM_DESC(dev_template, "Prefix for network device name (default: "
- "wlan%d)");
-
-#ifdef final_version
-#define EXTRA_EVENTS_WTERR 0
-#else
-/* check WTERR events (Wait Time-out) in development versions */
-#define EXTRA_EVENTS_WTERR HFA384X_EV_WTERR
-#endif
-
-/* Events that will be using BAP0 */
-#define HFA384X_BAP0_EVENTS \
- (HFA384X_EV_TXEXC | HFA384X_EV_RX | HFA384X_EV_INFO | HFA384X_EV_TX)
-
-/* event mask, i.e., events that will result in an interrupt */
-#define HFA384X_EVENT_MASK \
- (HFA384X_BAP0_EVENTS | HFA384X_EV_ALLOC | HFA384X_EV_INFDROP | \
- HFA384X_EV_CMD | HFA384X_EV_TICK | \
- EXTRA_EVENTS_WTERR)
-
-/* Default TX control flags: use 802.11 headers and request interrupt for
- * failed transmits. Frames that request ACK callback, will add
- * _TX_OK flag and _ALT_RTRY flag may be used to select different retry policy.
- */
-#define HFA384X_TX_CTRL_FLAGS \
- (HFA384X_TX_CTRL_802_11 | HFA384X_TX_CTRL_TX_EX)
-
-
-/* ca. 1 usec */
-#define HFA384X_CMD_BUSY_TIMEOUT 5000
-#define HFA384X_BAP_BUSY_TIMEOUT 50000
-
-/* ca. 10 usec */
-#define HFA384X_CMD_COMPL_TIMEOUT 20000
-#define HFA384X_DL_COMPL_TIMEOUT 1000000
-
-/* Wait times for initialization; yield to other processes to avoid busy
- * waiting for long time. */
-#define HFA384X_INIT_TIMEOUT (HZ / 2) /* 500 ms */
-#define HFA384X_ALLOC_COMPL_TIMEOUT (HZ / 20) /* 50 ms */
-
-
-static void prism2_hw_reset(struct net_device *dev);
-static void prism2_check_sta_fw_version(local_info_t *local);
-
-#ifdef PRISM2_DOWNLOAD_SUPPORT
-/* hostap_download.c */
-static const struct proc_ops prism2_download_aux_dump_proc_ops;
-static u8 * prism2_read_pda(struct net_device *dev);
-static int prism2_download(local_info_t *local,
- struct prism2_download_param *param);
-static void prism2_download_free_data(struct prism2_download_data *dl);
-static int prism2_download_volatile(local_info_t *local,
- struct prism2_download_data *param);
-static int prism2_download_genesis(local_info_t *local,
- struct prism2_download_data *param);
-static int prism2_get_ram_size(local_info_t *local);
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
-
-
-
-
-#ifndef final_version
-/* magic value written to SWSUPPORT0 reg. for detecting whether card is still
- * present */
-#define HFA384X_MAGIC 0x8A32
-#endif
-
-static void hfa384x_read_regs(struct net_device *dev,
- struct hfa384x_regs *regs)
-{
- regs->cmd = HFA384X_INW(HFA384X_CMD_OFF);
- regs->evstat = HFA384X_INW(HFA384X_EVSTAT_OFF);
- regs->offset0 = HFA384X_INW(HFA384X_OFFSET0_OFF);
- regs->offset1 = HFA384X_INW(HFA384X_OFFSET1_OFF);
- regs->swsupport0 = HFA384X_INW(HFA384X_SWSUPPORT0_OFF);
-}
-
-
-/**
- * __hostap_cmd_queue_free - Free Prism2 command queue entry (private)
- * @local: pointer to private Host AP driver data
- * @entry: Prism2 command queue entry to be freed
- * @del_req: request the entry to be removed
- *
- * Internal helper function for freeing Prism2 command queue entries.
- * Caller must have acquired local->cmdlock before calling this function.
- */
-static inline void __hostap_cmd_queue_free(local_info_t *local,
- struct hostap_cmd_queue *entry,
- int del_req)
-{
- if (del_req) {
- entry->del_req = 1;
- if (!list_empty(&entry->list)) {
- list_del_init(&entry->list);
- local->cmd_queue_len--;
- }
- }
-
- if (refcount_dec_and_test(&entry->usecnt) && entry->del_req)
- kfree(entry);
-}
-
-
-/**
- * hostap_cmd_queue_free - Free Prism2 command queue entry
- * @local: pointer to private Host AP driver data
- * @entry: Prism2 command queue entry to be freed
- * @del_req: request the entry to be removed
- *
- * Free a Prism2 command queue entry.
- */
-static inline void hostap_cmd_queue_free(local_info_t *local,
- struct hostap_cmd_queue *entry,
- int del_req)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&local->cmdlock, flags);
- __hostap_cmd_queue_free(local, entry, del_req);
- spin_unlock_irqrestore(&local->cmdlock, flags);
-}
-
-
-/**
- * prism2_clear_cmd_queue - Free all pending Prism2 command queue entries
- * @local: pointer to private Host AP driver data
- */
-static void prism2_clear_cmd_queue(local_info_t *local)
-{
- struct list_head *ptr, *n;
- unsigned long flags;
- struct hostap_cmd_queue *entry;
-
- spin_lock_irqsave(&local->cmdlock, flags);
- list_for_each_safe(ptr, n, &local->cmd_queue) {
- entry = list_entry(ptr, struct hostap_cmd_queue, list);
- refcount_inc(&entry->usecnt);
- printk(KERN_DEBUG "%s: removed pending cmd_queue entry "
- "(type=%d, cmd=0x%04x, param0=0x%04x)\n",
- local->dev->name, entry->type, entry->cmd,
- entry->param0);
- __hostap_cmd_queue_free(local, entry, 1);
- }
- if (local->cmd_queue_len) {
- /* This should not happen; print debug message and clear
- * queue length. */
- printk(KERN_DEBUG "%s: cmd_queue_len (%d) not zero after "
- "flush\n", local->dev->name, local->cmd_queue_len);
- local->cmd_queue_len = 0;
- }
- spin_unlock_irqrestore(&local->cmdlock, flags);
-}
-
-
-/**
- * hfa384x_cmd_issue - Issue a Prism2 command to the hardware
- * @dev: pointer to net_device
- * @entry: Prism2 command queue entry to be issued
- */
-static int hfa384x_cmd_issue(struct net_device *dev,
- struct hostap_cmd_queue *entry)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int tries;
- u16 reg;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->card_present && !local->func->card_present(local))
- return -ENODEV;
-
- if (entry->issued) {
- printk(KERN_DEBUG "%s: driver bug - re-issuing command @%p\n",
- dev->name, entry);
- }
-
- /* wait until busy bit is clear; this should always be clear since the
- * commands are serialized */
- tries = HFA384X_CMD_BUSY_TIMEOUT;
- while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
- tries--;
- udelay(1);
- }
-#ifndef final_version
- if (tries != HFA384X_CMD_BUSY_TIMEOUT) {
- prism2_io_debug_error(dev, 1);
- printk(KERN_DEBUG "%s: hfa384x_cmd_issue: cmd reg was busy "
- "for %d usec\n", dev->name,
- HFA384X_CMD_BUSY_TIMEOUT - tries);
- }
-#endif
- if (tries == 0) {
- reg = HFA384X_INW(HFA384X_CMD_OFF);
- prism2_io_debug_error(dev, 2);
- printk(KERN_DEBUG "%s: hfa384x_cmd_issue - timeout - "
- "reg=0x%04x\n", dev->name, reg);
- return -ETIMEDOUT;
- }
-
- /* write command */
- spin_lock_irqsave(&local->cmdlock, flags);
- HFA384X_OUTW(entry->param0, HFA384X_PARAM0_OFF);
- HFA384X_OUTW(entry->param1, HFA384X_PARAM1_OFF);
- HFA384X_OUTW(entry->cmd, HFA384X_CMD_OFF);
- entry->issued = 1;
- spin_unlock_irqrestore(&local->cmdlock, flags);
-
- return 0;
-}
-
-
-/**
- * hfa384x_cmd - Issue a Prism2 command and wait (sleep) for completion
- * @dev: pointer to net_device
- * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
- * @param0: value for Param0 register
- * @param1: value for Param1 register (pointer; %NULL if not used)
- * @resp0: pointer for Resp0 data or %NULL if Resp0 is not needed
- *
- * Issue given command (possibly after waiting in command queue) and sleep
- * until the command is completed (or timed out or interrupted). This can be
- * called only from user process context.
- */
-static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
- u16 *param1, u16 *resp0)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int err, res, issue, issued = 0;
- unsigned long flags;
- struct hostap_cmd_queue *entry;
- DECLARE_WAITQUEUE(wait, current);
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN) {
- printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
- dev->name);
- return -1;
- }
-
- if (signal_pending(current))
- return -EINTR;
-
- entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (entry == NULL)
- return -ENOMEM;
-
- refcount_set(&entry->usecnt, 1);
- entry->type = CMD_SLEEP;
- entry->cmd = cmd;
- entry->param0 = param0;
- if (param1)
- entry->param1 = *param1;
- init_waitqueue_head(&entry->compl);
-
- /* prepare to wait for command completion event, but do not sleep yet
- */
- add_wait_queue(&entry->compl, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irqsave(&local->cmdlock, flags);
- issue = list_empty(&local->cmd_queue);
- if (issue)
- entry->issuing = 1;
- list_add_tail(&entry->list, &local->cmd_queue);
- local->cmd_queue_len++;
- spin_unlock_irqrestore(&local->cmdlock, flags);
-
- err = 0;
- if (!issue)
- goto wait_completion;
-
- if (signal_pending(current))
- err = -EINTR;
-
- if (!err) {
- if (hfa384x_cmd_issue(dev, entry))
- err = -ETIMEDOUT;
- else
- issued = 1;
- }
-
- wait_completion:
- if (!err && entry->type != CMD_COMPLETED) {
- /* sleep until command is completed or timed out */
- res = schedule_timeout(2 * HZ);
- } else
- res = -1;
-
- if (!err && signal_pending(current))
- err = -EINTR;
-
- if (err && issued) {
- /* the command was issued, so a CmdCompl event should occur
- * soon; however, there's a pending signal and
- * schedule_timeout() would be interrupted; wait a short period
- * of time to avoid removing entry from the list before
- * CmdCompl event */
- udelay(300);
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&entry->compl, &wait);
-
- /* If entry->list is still in the list, it must be removed
- * first and in this case prism2_cmd_ev() does not yet have
- * local reference to it, and the data can be kfree()'d
- * here. If the command completion event is still generated,
- * it will be assigned to next (possibly) pending command, but
- * the driver will reset the card anyway due to timeout
- *
- * If the entry is not in the list prism2_cmd_ev() has a local
- * reference to it, but keeps cmdlock as long as the data is
- * needed, so the data can be kfree()'d here. */
-
- /* FIX: if the entry->list is in the list, it has not been completed
- * yet, so removing it here is somewhat wrong.. this could cause
- * references to freed memory and next list_del() causing NULL pointer
- * dereference.. it would probably be better to leave the entry in the
- * list and the list should be emptied during hw reset */
-
- spin_lock_irqsave(&local->cmdlock, flags);
- if (!list_empty(&entry->list)) {
- printk(KERN_DEBUG "%s: hfa384x_cmd: entry still in list? "
- "(entry=%p, type=%d, res=%d)\n", dev->name, entry,
- entry->type, res);
- list_del_init(&entry->list);
- local->cmd_queue_len--;
- }
- spin_unlock_irqrestore(&local->cmdlock, flags);
-
- if (err) {
- printk(KERN_DEBUG "%s: hfa384x_cmd: interrupted; err=%d\n",
- dev->name, err);
- res = err;
- goto done;
- }
-
- if (entry->type != CMD_COMPLETED) {
- u16 reg = HFA384X_INW(HFA384X_EVSTAT_OFF);
- printk(KERN_DEBUG "%s: hfa384x_cmd: command was not "
- "completed (res=%d, entry=%p, type=%d, cmd=0x%04x, "
- "param0=0x%04x, EVSTAT=%04x INTEN=%04x)\n", dev->name,
- res, entry, entry->type, entry->cmd, entry->param0, reg,
- HFA384X_INW(HFA384X_INTEN_OFF));
- if (reg & HFA384X_EV_CMD) {
- /* Command completion event is pending, but the
- * interrupt was not delivered - probably an issue
- * with pcmcia-cs configuration. */
- printk(KERN_WARNING "%s: interrupt delivery does not "
- "seem to work\n", dev->name);
- }
- prism2_io_debug_error(dev, 3);
- res = -ETIMEDOUT;
- goto done;
- }
-
- if (resp0 != NULL)
- *resp0 = entry->resp0;
-#ifndef final_version
- if (entry->res) {
- printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x, "
- "resp0=0x%04x\n",
- dev->name, cmd, entry->res, entry->resp0);
- }
-#endif /* final_version */
-
- res = entry->res;
- done:
- hostap_cmd_queue_free(local, entry, 1);
- return res;
-}
-
-
-/**
- * hfa384x_cmd_callback - Issue a Prism2 command; callback when completed
- * @dev: pointer to net_device
- * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
- * @param0: value for Param0 register
- * @callback: command completion callback function (%NULL = no callback)
- * @context: context data to be given to the callback function
- *
- * Issue given command (possibly after waiting in command queue) and use
- * callback function to indicate command completion. This can be called both
- * from user and interrupt context. The callback function will be called in
- * hardware IRQ context. It can be %NULL, when no function is called when
- * command is completed.
- */
-static int hfa384x_cmd_callback(struct net_device *dev, u16 cmd, u16 param0,
- void (*callback)(struct net_device *dev,
- long context, u16 resp0,
- u16 status),
- long context)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int issue, ret;
- unsigned long flags;
- struct hostap_cmd_queue *entry;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN + 2) {
- printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
- dev->name);
- return -1;
- }
-
- entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
- if (entry == NULL)
- return -ENOMEM;
-
- refcount_set(&entry->usecnt, 1);
- entry->type = CMD_CALLBACK;
- entry->cmd = cmd;
- entry->param0 = param0;
- entry->callback = callback;
- entry->context = context;
-
- spin_lock_irqsave(&local->cmdlock, flags);
- issue = list_empty(&local->cmd_queue);
- if (issue)
- entry->issuing = 1;
- list_add_tail(&entry->list, &local->cmd_queue);
- local->cmd_queue_len++;
- spin_unlock_irqrestore(&local->cmdlock, flags);
-
- if (issue && hfa384x_cmd_issue(dev, entry))
- ret = -ETIMEDOUT;
- else
- ret = 0;
-
- hostap_cmd_queue_free(local, entry, ret);
-
- return ret;
-}
-
-
-/**
- * __hfa384x_cmd_no_wait - Issue a Prism2 command (private)
- * @dev: pointer to net_device
- * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
- * @param0: value for Param0 register
- * @io_debug_num: I/O debug error number
- *
- * Shared helper function for hfa384x_cmd_wait() and hfa384x_cmd_no_wait().
- */
-static int __hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd, u16 param0,
- int io_debug_num)
-{
- int tries;
- u16 reg;
-
- /* wait until busy bit is clear; this should always be clear since the
- * commands are serialized */
- tries = HFA384X_CMD_BUSY_TIMEOUT;
- while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
- tries--;
- udelay(1);
- }
- if (tries == 0) {
- reg = HFA384X_INW(HFA384X_CMD_OFF);
- prism2_io_debug_error(dev, io_debug_num);
- printk(KERN_DEBUG "%s: __hfa384x_cmd_no_wait(%d) - timeout - "
- "reg=0x%04x\n", dev->name, io_debug_num, reg);
- return -ETIMEDOUT;
- }
-
- /* write command */
- HFA384X_OUTW(param0, HFA384X_PARAM0_OFF);
- HFA384X_OUTW(cmd, HFA384X_CMD_OFF);
-
- return 0;
-}
-
-
-/**
- * hfa384x_cmd_wait - Issue a Prism2 command and busy wait for completion
- * @dev: pointer to net_device
- * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
- * @param0: value for Param0 register
- */
-static int hfa384x_cmd_wait(struct net_device *dev, u16 cmd, u16 param0)
-{
- int res, tries;
- u16 reg;
-
- res = __hfa384x_cmd_no_wait(dev, cmd, param0, 4);
- if (res)
- return res;
-
- /* wait for command completion */
- if ((cmd & HFA384X_CMDCODE_MASK) == HFA384X_CMDCODE_DOWNLOAD)
- tries = HFA384X_DL_COMPL_TIMEOUT;
- else
- tries = HFA384X_CMD_COMPL_TIMEOUT;
-
- while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) &&
- tries > 0) {
- tries--;
- udelay(10);
- }
- if (tries == 0) {
- reg = HFA384X_INW(HFA384X_EVSTAT_OFF);
- prism2_io_debug_error(dev, 5);
- printk(KERN_DEBUG "%s: hfa384x_cmd_wait - timeout2 - "
- "reg=0x%04x\n", dev->name, reg);
- return -ETIMEDOUT;
- }
-
- res = (HFA384X_INW(HFA384X_STATUS_OFF) &
- (BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) | BIT(9) |
- BIT(8))) >> 8;
-#ifndef final_version
- if (res) {
- printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x\n",
- dev->name, cmd, res);
- }
-#endif
-
- HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
-
- return res;
-}
-
-
-/**
- * hfa384x_cmd_no_wait - Issue a Prism2 command; do not wait for completion
- * @dev: pointer to net_device
- * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
- * @param0: value for Param0 register
- */
-static inline int hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd,
- u16 param0)
-{
- return __hfa384x_cmd_no_wait(dev, cmd, param0, 6);
-}
-
-
-/**
- * prism2_cmd_ev - Prism2 command completion event handler
- * @dev: pointer to net_device
- *
- * Interrupt handler for command completion events. Called by the main
- * interrupt handler in hardware IRQ context. Read Resp0 and status registers
- * from the hardware and ACK the event. Depending on the issued command type
- * either wake up the sleeping process that is waiting for command completion
- * or call the callback function. Issue the next command, if one is pending.
- */
-static void prism2_cmd_ev(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct hostap_cmd_queue *entry = NULL;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- spin_lock(&local->cmdlock);
- if (!list_empty(&local->cmd_queue)) {
- entry = list_entry(local->cmd_queue.next,
- struct hostap_cmd_queue, list);
- refcount_inc(&entry->usecnt);
- list_del_init(&entry->list);
- local->cmd_queue_len--;
-
- if (!entry->issued) {
- printk(KERN_DEBUG "%s: Command completion event, but "
- "cmd not issued\n", dev->name);
- __hostap_cmd_queue_free(local, entry, 1);
- entry = NULL;
- }
- }
- spin_unlock(&local->cmdlock);
-
- if (!entry) {
- HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
- printk(KERN_DEBUG "%s: Command completion event, but no "
- "pending commands\n", dev->name);
- return;
- }
-
- entry->resp0 = HFA384X_INW(HFA384X_RESP0_OFF);
- entry->res = (HFA384X_INW(HFA384X_STATUS_OFF) &
- (BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) |
- BIT(9) | BIT(8))) >> 8;
- HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
-
- /* TODO: rest of the CmdEv handling could be moved to tasklet */
- if (entry->type == CMD_SLEEP) {
- entry->type = CMD_COMPLETED;
- wake_up_interruptible(&entry->compl);
- } else if (entry->type == CMD_CALLBACK) {
- if (entry->callback)
- entry->callback(dev, entry->context, entry->resp0,
- entry->res);
- } else {
- printk(KERN_DEBUG "%s: Invalid command completion type %d\n",
- dev->name, entry->type);
- }
- hostap_cmd_queue_free(local, entry, 1);
-
- /* issue next command, if pending */
- entry = NULL;
- spin_lock(&local->cmdlock);
- if (!list_empty(&local->cmd_queue)) {
- entry = list_entry(local->cmd_queue.next,
- struct hostap_cmd_queue, list);
- if (entry->issuing) {
- /* hfa384x_cmd() has already started issuing this
- * command, so do not start here */
- entry = NULL;
- }
- if (entry)
- refcount_inc(&entry->usecnt);
- }
- spin_unlock(&local->cmdlock);
-
- if (entry) {
- /* issue next command; if command issuing fails, remove the
- * entry from cmd_queue */
- int res = hfa384x_cmd_issue(dev, entry);
- spin_lock(&local->cmdlock);
- __hostap_cmd_queue_free(local, entry, res);
- spin_unlock(&local->cmdlock);
- }
-}
-
-
-static int hfa384x_wait_offset(struct net_device *dev, u16 o_off)
-{
- int tries = HFA384X_BAP_BUSY_TIMEOUT;
- int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
-
- while (res && tries > 0) {
- tries--;
- udelay(1);
- res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
- }
- return res;
-}
-
-
-/* Offset must be even */
-static int hfa384x_setup_bap(struct net_device *dev, u16 bap, u16 id,
- int offset)
-{
- u16 o_off, s_off;
- int ret = 0;
-
- if (offset % 2 || bap > 1)
- return -EINVAL;
-
- if (bap == BAP1) {
- o_off = HFA384X_OFFSET1_OFF;
- s_off = HFA384X_SELECT1_OFF;
- } else {
- o_off = HFA384X_OFFSET0_OFF;
- s_off = HFA384X_SELECT0_OFF;
- }
-
- if (hfa384x_wait_offset(dev, o_off)) {
- prism2_io_debug_error(dev, 7);
- printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout before\n",
- dev->name);
- ret = -ETIMEDOUT;
- goto out;
- }
-
- HFA384X_OUTW(id, s_off);
- HFA384X_OUTW(offset, o_off);
-
- if (hfa384x_wait_offset(dev, o_off)) {
- prism2_io_debug_error(dev, 8);
- printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout after\n",
- dev->name);
- ret = -ETIMEDOUT;
- goto out;
- }
-#ifndef final_version
- if (HFA384X_INW(o_off) & HFA384X_OFFSET_ERR) {
- prism2_io_debug_error(dev, 9);
- printk(KERN_DEBUG "%s: hfa384x_setup_bap - offset error "
- "(%d,0x04%x,%d); reg=0x%04x\n",
- dev->name, bap, id, offset, HFA384X_INW(o_off));
- ret = -EINVAL;
- }
-#endif
-
- out:
- return ret;
-}
-
-
-static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
- int exact_len)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int res, rlen = 0;
- struct hfa384x_rid_hdr rec;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->no_pri) {
- printk(KERN_DEBUG "%s: cannot get RID %04x (len=%d) - no PRI "
- "f/w\n", dev->name, rid, len);
- return -ENOTTY; /* Well.. not really correct, but return
- * something unique enough.. */
- }
-
- if ((local->func->card_present && !local->func->card_present(local)) ||
- local->hw_downloading)
- return -ENODEV;
-
- res = mutex_lock_interruptible(&local->rid_bap_mtx);
- if (res)
- return res;
-
- res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS, rid, NULL, NULL);
- if (res) {
- printk(KERN_DEBUG "%s: hfa384x_get_rid: CMDCODE_ACCESS failed "
- "(res=%d, rid=%04x, len=%d)\n",
- dev->name, res, rid, len);
- mutex_unlock(&local->rid_bap_mtx);
- return res;
- }
-
- spin_lock_bh(&local->baplock);
-
- res = hfa384x_setup_bap(dev, BAP0, rid, 0);
- if (res)
- goto unlock;
-
- res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
- if (res)
- goto unlock;
-
- if (le16_to_cpu(rec.len) == 0) {
- /* RID not available */
- res = -ENODATA;
- goto unlock;
- }
-
- rlen = (le16_to_cpu(rec.len) - 1) * 2;
- if (exact_len && rlen != len) {
- printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
- "rid=0x%04x, len=%d (expected %d)\n",
- dev->name, rid, rlen, len);
- res = -ENODATA;
- }
-
- res = hfa384x_from_bap(dev, BAP0, buf, len);
-
-unlock:
- spin_unlock_bh(&local->baplock);
- mutex_unlock(&local->rid_bap_mtx);
-
- if (res) {
- if (res != -ENODATA)
- printk(KERN_DEBUG "%s: hfa384x_get_rid (rid=%04x, "
- "len=%d) - failed - res=%d\n", dev->name, rid,
- len, res);
- if (res == -ETIMEDOUT)
- prism2_hw_reset(dev);
- return res;
- }
-
- return rlen;
-}
-
-
-static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct hfa384x_rid_hdr rec;
- int res;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->no_pri) {
- printk(KERN_DEBUG "%s: cannot set RID %04x (len=%d) - no PRI "
- "f/w\n", dev->name, rid, len);
- return -ENOTTY; /* Well.. not really correct, but return
- * something unique enough.. */
- }
-
- if ((local->func->card_present && !local->func->card_present(local)) ||
- local->hw_downloading)
- return -ENODEV;
-
- rec.rid = cpu_to_le16(rid);
- /* RID len in words and +1 for rec.rid */
- rec.len = cpu_to_le16(len / 2 + len % 2 + 1);
-
- res = mutex_lock_interruptible(&local->rid_bap_mtx);
- if (res)
- return res;
-
- spin_lock_bh(&local->baplock);
- res = hfa384x_setup_bap(dev, BAP0, rid, 0);
- if (!res)
- res = hfa384x_to_bap(dev, BAP0, &rec, sizeof(rec));
- if (!res)
- res = hfa384x_to_bap(dev, BAP0, buf, len);
- spin_unlock_bh(&local->baplock);
-
- if (res) {
- printk(KERN_DEBUG "%s: hfa384x_set_rid (rid=%04x, len=%d) - "
- "failed - res=%d\n", dev->name, rid, len, res);
- mutex_unlock(&local->rid_bap_mtx);
- return res;
- }
-
- res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL);
- mutex_unlock(&local->rid_bap_mtx);
-
- if (res) {
- printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE "
- "failed (res=%d, rid=%04x, len=%d)\n",
- dev->name, res, rid, len);
-
- if (res == -ETIMEDOUT)
- prism2_hw_reset(dev);
- }
-
- return res;
-}
-
-
-static void hfa384x_disable_interrupts(struct net_device *dev)
-{
- /* disable interrupts and clear event status */
- HFA384X_OUTW(0, HFA384X_INTEN_OFF);
- HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
-}
-
-
-static void hfa384x_enable_interrupts(struct net_device *dev)
-{
- /* ack pending events and enable interrupts from selected events */
- HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
- HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF);
-}
-
-
-static void hfa384x_events_no_bap0(struct net_device *dev)
-{
- HFA384X_OUTW(HFA384X_EVENT_MASK & ~HFA384X_BAP0_EVENTS,
- HFA384X_INTEN_OFF);
-}
-
-
-static void hfa384x_events_all(struct net_device *dev)
-{
- HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF);
-}
-
-
-static void hfa384x_events_only_cmd(struct net_device *dev)
-{
- HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_INTEN_OFF);
-}
-
-
-static u16 hfa384x_allocate_fid(struct net_device *dev, int len)
-{
- u16 fid;
- unsigned long delay;
-
- /* FIX: this could be replace with hfa384x_cmd() if the Alloc event
- * below would be handled like CmdCompl event (sleep here, wake up from
- * interrupt handler */
- if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_ALLOC, len)) {
- printk(KERN_DEBUG "%s: cannot allocate fid, len=%d\n",
- dev->name, len);
- return 0xffff;
- }
-
- delay = jiffies + HFA384X_ALLOC_COMPL_TIMEOUT;
- while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC) &&
- time_before(jiffies, delay))
- yield();
- if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC)) {
- printk("%s: fid allocate, len=%d - timeout\n", dev->name, len);
- return 0xffff;
- }
-
- fid = HFA384X_INW(HFA384X_ALLOCFID_OFF);
- HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF);
-
- return fid;
-}
-
-
-static int prism2_reset_port(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int res;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (!local->dev_enabled)
- return 0;
-
- res = hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0,
- NULL, NULL);
- if (res)
- printk(KERN_DEBUG "%s: reset port failed to disable port\n",
- dev->name);
- else {
- res = hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0,
- NULL, NULL);
- if (res)
- printk(KERN_DEBUG "%s: reset port failed to enable "
- "port\n", dev->name);
- }
-
- /* It looks like at least some STA firmware versions reset
- * fragmentation threshold back to 2346 after enable command. Restore
- * the configured value, if it differs from this default. */
- if (local->fragm_threshold != 2346 &&
- hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
- local->fragm_threshold)) {
- printk(KERN_DEBUG "%s: failed to restore fragmentation "
- "threshold (%d) after Port0 enable\n",
- dev->name, local->fragm_threshold);
- }
-
- /* Some firmwares lose antenna selection settings on reset */
- (void) hostap_set_antsel(local);
-
- return res;
-}
-
-
-static int prism2_get_version_info(struct net_device *dev, u16 rid,
- const char *txt)
-{
- struct hfa384x_comp_ident comp;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->no_pri) {
- /* PRI f/w not yet available - cannot read RIDs */
- return -1;
- }
- if (hfa384x_get_rid(dev, rid, &comp, sizeof(comp), 1) < 0) {
- printk(KERN_DEBUG "Could not get RID for component %s\n", txt);
- return -1;
- }
-
- printk(KERN_INFO "%s: %s: id=0x%02x v%d.%d.%d\n", dev->name, txt,
- __le16_to_cpu(comp.id), __le16_to_cpu(comp.major),
- __le16_to_cpu(comp.minor), __le16_to_cpu(comp.variant));
- return 0;
-}
-
-
-static int prism2_setup_rids(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- __le16 tmp;
- int ret = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- hostap_set_word(dev, HFA384X_RID_TICKTIME, 2000);
-
- if (!local->fw_ap) {
- u16 tmp1 = hostap_get_porttype(local);
- ret = hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, tmp1);
- if (ret) {
- printk("%s: Port type setting to %d failed\n",
- dev->name, tmp1);
- goto fail;
- }
- }
-
- /* Setting SSID to empty string seems to kill the card in Host AP mode
- */
- if (local->iw_mode != IW_MODE_MASTER || local->essid[0] != '\0') {
- ret = hostap_set_string(dev, HFA384X_RID_CNFOWNSSID,
- local->essid);
- if (ret) {
- printk("%s: AP own SSID setting failed\n", dev->name);
- goto fail;
- }
- }
-
- ret = hostap_set_word(dev, HFA384X_RID_CNFMAXDATALEN,
- PRISM2_DATA_MAXLEN);
- if (ret) {
- printk("%s: MAC data length setting to %d failed\n",
- dev->name, PRISM2_DATA_MAXLEN);
- goto fail;
- }
-
- if (hfa384x_get_rid(dev, HFA384X_RID_CHANNELLIST, &tmp, 2, 1) < 0) {
- printk("%s: Channel list read failed\n", dev->name);
- ret = -EINVAL;
- goto fail;
- }
- local->channel_mask = le16_to_cpu(tmp);
-
- if (local->channel < 1 || local->channel > 14 ||
- !(local->channel_mask & (1 << (local->channel - 1)))) {
- printk(KERN_WARNING "%s: Channel setting out of range "
- "(%d)!\n", dev->name, local->channel);
- ret = -EBUSY;
- goto fail;
- }
-
- ret = hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel);
- if (ret) {
- printk("%s: Channel setting to %d failed\n",
- dev->name, local->channel);
- goto fail;
- }
-
- ret = hostap_set_word(dev, HFA384X_RID_CNFBEACONINT,
- local->beacon_int);
- if (ret) {
- printk("%s: Beacon interval setting to %d failed\n",
- dev->name, local->beacon_int);
- /* this may fail with Symbol/Lucent firmware */
- if (ret == -ETIMEDOUT)
- goto fail;
- }
-
- ret = hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD,
- local->dtim_period);
- if (ret) {
- printk("%s: DTIM period setting to %d failed\n",
- dev->name, local->dtim_period);
- /* this may fail with Symbol/Lucent firmware */
- if (ret == -ETIMEDOUT)
- goto fail;
- }
-
- ret = hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
- local->is_promisc);
- if (ret)
- printk(KERN_INFO "%s: Setting promiscuous mode (%d) failed\n",
- dev->name, local->is_promisc);
-
- if (!local->fw_ap) {
- ret = hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID,
- local->essid);
- if (ret) {
- printk("%s: Desired SSID setting failed\n", dev->name);
- goto fail;
- }
- }
-
- /* Setup TXRateControl, defaults to allow use of 1, 2, 5.5, and
- * 11 Mbps in automatic TX rate fallback and 1 and 2 Mbps as basic
- * rates */
- if (local->tx_rate_control == 0) {
- local->tx_rate_control =
- HFA384X_RATES_1MBPS |
- HFA384X_RATES_2MBPS |
- HFA384X_RATES_5MBPS |
- HFA384X_RATES_11MBPS;
- }
- if (local->basic_rates == 0)
- local->basic_rates = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS;
-
- if (!local->fw_ap) {
- ret = hostap_set_word(dev, HFA384X_RID_TXRATECONTROL,
- local->tx_rate_control);
- if (ret) {
- printk("%s: TXRateControl setting to %d failed\n",
- dev->name, local->tx_rate_control);
- goto fail;
- }
-
- ret = hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES,
- local->tx_rate_control);
- if (ret) {
- printk("%s: cnfSupportedRates setting to %d failed\n",
- dev->name, local->tx_rate_control);
- }
-
- ret = hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
- local->basic_rates);
- if (ret) {
- printk("%s: cnfBasicRates setting to %d failed\n",
- dev->name, local->basic_rates);
- }
-
- ret = hostap_set_word(dev, HFA384X_RID_CREATEIBSS, 1);
- if (ret) {
- printk("%s: Create IBSS setting to 1 failed\n",
- dev->name);
- }
- }
-
- if (local->name_set)
- (void) hostap_set_string(dev, HFA384X_RID_CNFOWNNAME,
- local->name);
-
- if (hostap_set_encryption(local)) {
- printk(KERN_INFO "%s: could not configure encryption\n",
- dev->name);
- }
-
- (void) hostap_set_antsel(local);
-
- if (hostap_set_roaming(local)) {
- printk(KERN_INFO "%s: could not set host roaming\n",
- dev->name);
- }
-
- if (local->sta_fw_ver >= PRISM2_FW_VER(1,6,3) &&
- hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY, local->enh_sec))
- printk(KERN_INFO "%s: cnfEnhSecurity setting to 0x%x failed\n",
- dev->name, local->enh_sec);
-
- /* 32-bit tallies were added in STA f/w 0.8.0, but they were apparently
- * not working correctly (last seven counters report bogus values).
- * This has been fixed in 0.8.2, so enable 32-bit tallies only
- * beginning with that firmware version. Another bug fix for 32-bit
- * tallies in 1.4.0; should 16-bit tallies be used for some other
- * versions, too? */
- if (local->sta_fw_ver >= PRISM2_FW_VER(0,8,2)) {
- if (hostap_set_word(dev, HFA384X_RID_CNFTHIRTY2TALLY, 1)) {
- printk(KERN_INFO "%s: cnfThirty2Tally setting "
- "failed\n", dev->name);
- local->tallies32 = 0;
- } else
- local->tallies32 = 1;
- } else
- local->tallies32 = 0;
-
- hostap_set_auth_algs(local);
-
- if (hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
- local->fragm_threshold)) {
- printk(KERN_INFO "%s: setting FragmentationThreshold to %d "
- "failed\n", dev->name, local->fragm_threshold);
- }
-
- if (hostap_set_word(dev, HFA384X_RID_RTSTHRESHOLD,
- local->rts_threshold)) {
- printk(KERN_INFO "%s: setting RTSThreshold to %d failed\n",
- dev->name, local->rts_threshold);
- }
-
- if (local->manual_retry_count >= 0 &&
- hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT,
- local->manual_retry_count)) {
- printk(KERN_INFO "%s: setting cnfAltRetryCount to %d failed\n",
- dev->name, local->manual_retry_count);
- }
-
- if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1) &&
- hfa384x_get_rid(dev, HFA384X_RID_CNFDBMADJUST, &tmp, 2, 1) == 2) {
- local->rssi_to_dBm = le16_to_cpu(tmp);
- }
-
- if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->wpa &&
- hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1)) {
- printk(KERN_INFO "%s: setting ssnHandlingMode to 1 failed\n",
- dev->name);
- }
-
- if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->generic_elem &&
- hfa384x_set_rid(dev, HFA384X_RID_GENERICELEMENT,
- local->generic_elem, local->generic_elem_len)) {
- printk(KERN_INFO "%s: setting genericElement failed\n",
- dev->name);
- }
-
- fail:
- return ret;
-}
-
-
-static int prism2_hw_init(struct net_device *dev, int initial)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int ret, first = 1;
- unsigned long start, delay;
-
- PDEBUG(DEBUG_FLOW, "prism2_hw_init()\n");
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits);
-
- init:
- /* initialize HFA 384x */
- ret = hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_INIT, 0);
- if (ret) {
- printk(KERN_INFO "%s: first command failed - assuming card "
- "does not have primary firmware\n", dev_info);
- }
-
- if (first && (HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) {
- /* EvStat has Cmd bit set in some cases, so retry once if no
- * wait was needed */
- HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
- printk(KERN_DEBUG "%s: init command completed too quickly - "
- "retrying\n", dev->name);
- first = 0;
- goto init;
- }
-
- start = jiffies;
- delay = jiffies + HFA384X_INIT_TIMEOUT;
- while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) &&
- time_before(jiffies, delay))
- yield();
- if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) {
- printk(KERN_DEBUG "%s: assuming no Primary image in "
- "flash - card initialization not completed\n",
- dev_info);
- local->no_pri = 1;
-#ifdef PRISM2_DOWNLOAD_SUPPORT
- if (local->sram_type == -1)
- local->sram_type = prism2_get_ram_size(local);
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
- return 1;
- }
- local->no_pri = 0;
- printk(KERN_DEBUG "prism2_hw_init: initialized in %lu ms\n",
- (jiffies - start) * 1000 / HZ);
- HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
- return 0;
-}
-
-
-static int prism2_hw_init2(struct net_device *dev, int initial)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int i;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
-#ifdef PRISM2_DOWNLOAD_SUPPORT
- kfree(local->pda);
- if (local->no_pri)
- local->pda = NULL;
- else
- local->pda = prism2_read_pda(dev);
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
-
- hfa384x_disable_interrupts(dev);
-
-#ifndef final_version
- HFA384X_OUTW(HFA384X_MAGIC, HFA384X_SWSUPPORT0_OFF);
- if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) {
- printk("SWSUPPORT0 write/read failed: %04X != %04X\n",
- HFA384X_INW(HFA384X_SWSUPPORT0_OFF), HFA384X_MAGIC);
- goto failed;
- }
-#endif
-
- if (initial || local->pri_only) {
- hfa384x_events_only_cmd(dev);
- /* get card version information */
- if (prism2_get_version_info(dev, HFA384X_RID_NICID, "NIC") ||
- prism2_get_version_info(dev, HFA384X_RID_PRIID, "PRI")) {
- hfa384x_disable_interrupts(dev);
- goto failed;
- }
-
- if (prism2_get_version_info(dev, HFA384X_RID_STAID, "STA")) {
- printk(KERN_DEBUG "%s: Failed to read STA f/w version "
- "- only Primary f/w present\n", dev->name);
- local->pri_only = 1;
- return 0;
- }
- local->pri_only = 0;
- hfa384x_disable_interrupts(dev);
- }
-
- /* FIX: could convert allocate_fid to use sleeping CmdCompl wait and
- * enable interrupts before this. This would also require some sort of
- * sleeping AllocEv waiting */
-
- /* allocate TX FIDs */
- local->txfid_len = PRISM2_TXFID_LEN;
- for (i = 0; i < PRISM2_TXFID_COUNT; i++) {
- local->txfid[i] = hfa384x_allocate_fid(dev, local->txfid_len);
- if (local->txfid[i] == 0xffff && local->txfid_len > 1600) {
- local->txfid[i] = hfa384x_allocate_fid(dev, 1600);
- if (local->txfid[i] != 0xffff) {
- printk(KERN_DEBUG "%s: Using shorter TX FID "
- "(1600 bytes)\n", dev->name);
- local->txfid_len = 1600;
- }
- }
- if (local->txfid[i] == 0xffff)
- goto failed;
- local->intransmitfid[i] = PRISM2_TXFID_EMPTY;
- }
-
- hfa384x_events_only_cmd(dev);
-
- if (initial) {
- u8 addr[ETH_ALEN] = {};
- struct list_head *ptr;
-
- prism2_check_sta_fw_version(local);
-
- if (hfa384x_get_rid(dev, HFA384X_RID_CNFOWNMACADDR,
- addr, ETH_ALEN, 1) < 0) {
- printk("%s: could not get own MAC address\n",
- dev->name);
- }
- eth_hw_addr_set(dev, addr);
- list_for_each(ptr, &local->hostap_interfaces) {
- iface = list_entry(ptr, struct hostap_interface, list);
- eth_hw_addr_inherit(iface->dev, dev);
- }
- } else if (local->fw_ap)
- prism2_check_sta_fw_version(local);
-
- prism2_setup_rids(dev);
-
- /* MAC is now configured, but port 0 is not yet enabled */
- return 0;
-
- failed:
- if (!local->no_pri)
- printk(KERN_WARNING "%s: Initialization failed\n", dev_info);
- return 1;
-}
-
-
-static int prism2_hw_enable(struct net_device *dev, int initial)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int was_resetting;
-
- iface = netdev_priv(dev);
- local = iface->local;
- was_resetting = local->hw_resetting;
-
- if (hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, NULL)) {
- printk("%s: MAC port 0 enabling failed\n", dev->name);
- return 1;
- }
-
- local->hw_ready = 1;
- local->hw_reset_tries = 0;
- local->hw_resetting = 0;
- hfa384x_enable_interrupts(dev);
-
- /* at least D-Link DWL-650 seems to require additional port reset
- * before it starts acting as an AP, so reset port automatically
- * here just in case */
- if (initial && prism2_reset_port(dev)) {
- printk("%s: MAC port 0 resetting failed\n", dev->name);
- return 1;
- }
-
- if (was_resetting && netif_queue_stopped(dev)) {
- /* If hw_reset() was called during pending transmit, netif
- * queue was stopped. Wake it up now since the wlan card has
- * been resetted. */
- netif_wake_queue(dev);
- }
-
- return 0;
-}
-
-
-static int prism2_hw_config(struct net_device *dev, int initial)
-{
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->hw_downloading)
- return 1;
-
- if (prism2_hw_init(dev, initial)) {
- return local->no_pri ? 0 : 1;
- }
-
- if (prism2_hw_init2(dev, initial))
- return 1;
-
- /* Enable firmware if secondary image is loaded and at least one of the
- * netdevices is up. */
- if (!local->pri_only &&
- (initial == 0 || (initial == 2 && local->num_dev_open > 0))) {
- if (!local->dev_enabled)
- prism2_callback(local, PRISM2_CALLBACK_ENABLE);
- local->dev_enabled = 1;
- return prism2_hw_enable(dev, initial);
- }
-
- return 0;
-}
-
-
-static void prism2_hw_shutdown(struct net_device *dev, int no_disable)
-{
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* Allow only command completion events during disable */
- hfa384x_events_only_cmd(dev);
-
- local->hw_ready = 0;
- if (local->dev_enabled)
- prism2_callback(local, PRISM2_CALLBACK_DISABLE);
- local->dev_enabled = 0;
-
- if (local->func->card_present && !local->func->card_present(local)) {
- printk(KERN_DEBUG "%s: card already removed or not configured "
- "during shutdown\n", dev->name);
- return;
- }
-
- if ((no_disable & HOSTAP_HW_NO_DISABLE) == 0 &&
- hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, NULL))
- printk(KERN_WARNING "%s: Shutdown failed\n", dev_info);
-
- hfa384x_disable_interrupts(dev);
-
- if (no_disable & HOSTAP_HW_ENABLE_CMDCOMPL)
- hfa384x_events_only_cmd(dev);
- else
- prism2_clear_cmd_queue(local);
-}
-
-
-static void prism2_hw_reset(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
-
-#if 0
- static long last_reset = 0;
-
- /* do not reset card more than once per second to avoid ending up in a
- * busy loop resetting the card */
- if (time_before_eq(jiffies, last_reset + HZ))
- return;
- last_reset = jiffies;
-#endif
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->hw_downloading)
- return;
-
- if (local->hw_resetting) {
- printk(KERN_WARNING "%s: %s: already resetting card - "
- "ignoring reset request\n", dev_info, dev->name);
- return;
- }
-
- local->hw_reset_tries++;
- if (local->hw_reset_tries > 10) {
- printk(KERN_WARNING "%s: too many reset tries, skipping\n",
- dev->name);
- return;
- }
-
- printk(KERN_WARNING "%s: %s: resetting card\n", dev_info, dev->name);
- hfa384x_disable_interrupts(dev);
- local->hw_resetting = 1;
- if (local->func->cor_sreset) {
- /* Host system seems to hang in some cases with high traffic
- * load or shared interrupts during COR sreset. Disable shared
- * interrupts during reset to avoid these crashes. COS sreset
- * takes quite a long time, so it is unfortunate that this
- * seems to be needed. Anyway, I do not know of any better way
- * of avoiding the crash. */
- disable_irq(dev->irq);
- local->func->cor_sreset(local);
- enable_irq(dev->irq);
- }
- prism2_hw_shutdown(dev, 1);
- prism2_hw_config(dev, 0);
- local->hw_resetting = 0;
-
-#ifdef PRISM2_DOWNLOAD_SUPPORT
- if (local->dl_pri) {
- printk(KERN_DEBUG "%s: persistent download of primary "
- "firmware\n", dev->name);
- if (prism2_download_genesis(local, local->dl_pri) < 0)
- printk(KERN_WARNING "%s: download (PRI) failed\n",
- dev->name);
- }
-
- if (local->dl_sec) {
- printk(KERN_DEBUG "%s: persistent download of secondary "
- "firmware\n", dev->name);
- if (prism2_download_volatile(local, local->dl_sec) < 0)
- printk(KERN_WARNING "%s: download (SEC) failed\n",
- dev->name);
- }
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
-
- /* TODO: restore beacon TIM bits for STAs that have buffered frames */
-}
-
-
-static void prism2_schedule_reset(local_info_t *local)
-{
- schedule_work(&local->reset_queue);
-}
-
-
-/* Called only as scheduled task after noticing card timeout in interrupt
- * context */
-static void handle_reset_queue(struct work_struct *work)
-{
- local_info_t *local = container_of(work, local_info_t, reset_queue);
-
- printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
- prism2_hw_reset(local->dev);
-
- if (netif_queue_stopped(local->dev)) {
- int i;
-
- for (i = 0; i < PRISM2_TXFID_COUNT; i++)
- if (local->intransmitfid[i] == PRISM2_TXFID_EMPTY) {
- PDEBUG(DEBUG_EXTRA, "prism2_tx_timeout: "
- "wake up queue\n");
- netif_wake_queue(local->dev);
- break;
- }
- }
-}
-
-
-static int prism2_get_txfid_idx(local_info_t *local)
-{
- int idx, end;
- unsigned long flags;
-
- spin_lock_irqsave(&local->txfidlock, flags);
- end = idx = local->next_txfid;
- do {
- if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) {
- local->intransmitfid[idx] = PRISM2_TXFID_RESERVED;
- spin_unlock_irqrestore(&local->txfidlock, flags);
- return idx;
- }
- idx++;
- if (idx >= PRISM2_TXFID_COUNT)
- idx = 0;
- } while (idx != end);
- spin_unlock_irqrestore(&local->txfidlock, flags);
-
- PDEBUG(DEBUG_EXTRA2, "prism2_get_txfid_idx: no room in txfid buf: "
- "packet dropped\n");
- local->dev->stats.tx_dropped++;
-
- return -1;
-}
-
-
-/* Called only from hardware IRQ */
-static void prism2_transmit_cb(struct net_device *dev, long context,
- u16 resp0, u16 res)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int idx = (int) context;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (res) {
- printk(KERN_DEBUG "%s: prism2_transmit_cb - res=0x%02x\n",
- dev->name, res);
- return;
- }
-
- if (idx < 0 || idx >= PRISM2_TXFID_COUNT) {
- printk(KERN_DEBUG "%s: prism2_transmit_cb called with invalid "
- "idx=%d\n", dev->name, idx);
- return;
- }
-
- if (!test_and_clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
- printk(KERN_DEBUG "%s: driver bug: prism2_transmit_cb called "
- "with no pending transmit\n", dev->name);
- }
-
- if (netif_queue_stopped(dev)) {
- /* ready for next TX, so wake up queue that was stopped in
- * prism2_transmit() */
- netif_wake_queue(dev);
- }
-
- spin_lock(&local->txfidlock);
-
- /* With reclaim, Resp0 contains new txfid for transmit; the old txfid
- * will be automatically allocated for the next TX frame */
- local->intransmitfid[idx] = resp0;
-
- PDEBUG(DEBUG_FID, "%s: prism2_transmit_cb: txfid[%d]=0x%04x, "
- "resp0=0x%04x, transmit_txfid=0x%04x\n",
- dev->name, idx, local->txfid[idx],
- resp0, local->intransmitfid[local->next_txfid]);
-
- idx++;
- if (idx >= PRISM2_TXFID_COUNT)
- idx = 0;
- local->next_txfid = idx;
-
- /* check if all TX buffers are occupied */
- do {
- if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) {
- spin_unlock(&local->txfidlock);
- return;
- }
- idx++;
- if (idx >= PRISM2_TXFID_COUNT)
- idx = 0;
- } while (idx != local->next_txfid);
- spin_unlock(&local->txfidlock);
-
- /* no empty TX buffers, stop queue */
- netif_stop_queue(dev);
-}
-
-
-/* Called only from software IRQ if PCI bus master is not used (with bus master
- * this can be called both from software and hardware IRQ) */
-static int prism2_transmit(struct net_device *dev, int idx)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int res;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* The driver tries to stop netif queue so that there would not be
- * more than one attempt to transmit frames going on; check that this
- * is really the case */
-
- if (test_and_set_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
- printk(KERN_DEBUG "%s: driver bug - prism2_transmit() called "
- "when previous TX was pending\n", dev->name);
- return -1;
- }
-
- /* stop the queue for the time that transmit is pending */
- netif_stop_queue(dev);
-
- /* transmit packet */
- res = hfa384x_cmd_callback(
- dev,
- HFA384X_CMDCODE_TRANSMIT | HFA384X_CMD_TX_RECLAIM,
- local->txfid[idx],
- prism2_transmit_cb, (long) idx);
-
- if (res) {
- printk(KERN_DEBUG "%s: prism2_transmit: CMDCODE_TRANSMIT "
- "failed (res=%d)\n", dev->name, res);
- dev->stats.tx_dropped++;
- netif_wake_queue(dev);
- return -1;
- }
- netif_trans_update(dev);
-
- /* Since we did not wait for command completion, the card continues
- * to process on the background and we will finish handling when
- * command completion event is handled (prism2_cmd_ev() function) */
-
- return 0;
-}
-
-
-/* Send IEEE 802.11 frame (convert the header into Prism2 TX descriptor and
- * send the payload with this descriptor) */
-/* Called only from software IRQ */
-static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct hfa384x_tx_frame txdesc;
- struct hostap_skb_tx_data *meta;
- int hdr_len, data_len, idx, res, ret = -1;
- u16 tx_control;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- meta = (struct hostap_skb_tx_data *) skb->cb;
-
- prism2_callback(local, PRISM2_CALLBACK_TX_START);
-
- if ((local->func->card_present && !local->func->card_present(local)) ||
- !local->hw_ready || local->hw_downloading || local->pri_only) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: prism2_tx_80211: hw not ready -"
- " skipping\n", dev->name);
- }
- goto fail;
- }
-
- memset(&txdesc, 0, sizeof(txdesc));
-
- /* skb->data starts with txdesc->frame_control */
- hdr_len = sizeof(txdesc.header);
- BUILD_BUG_ON(hdr_len != 24);
- skb_copy_from_linear_data(skb, &txdesc.header, hdr_len);
- if (ieee80211_is_data(txdesc.frame_control) &&
- ieee80211_has_a4(txdesc.frame_control) &&
- skb->len >= 30) {
- /* Addr4 */
- skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4,
- ETH_ALEN);
- hdr_len += ETH_ALEN;
- }
-
- tx_control = local->tx_control;
- if (meta->tx_cb_idx) {
- tx_control |= HFA384X_TX_CTRL_TX_OK;
- txdesc.sw_support = cpu_to_le32(meta->tx_cb_idx);
- }
- txdesc.tx_control = cpu_to_le16(tx_control);
- txdesc.tx_rate = meta->rate;
-
- data_len = skb->len - hdr_len;
- txdesc.data_len = cpu_to_le16(data_len);
- txdesc.len = cpu_to_be16(data_len);
-
- idx = prism2_get_txfid_idx(local);
- if (idx < 0)
- goto fail;
-
- if (local->frame_dump & PRISM2_DUMP_TX_HDR)
- hostap_dump_tx_header(dev->name, &txdesc);
-
- spin_lock(&local->baplock);
- res = hfa384x_setup_bap(dev, BAP0, local->txfid[idx], 0);
-
- if (!res)
- res = hfa384x_to_bap(dev, BAP0, &txdesc, sizeof(txdesc));
- if (!res)
- res = hfa384x_to_bap(dev, BAP0, skb->data + hdr_len,
- skb->len - hdr_len);
- spin_unlock(&local->baplock);
-
- if (!res)
- res = prism2_transmit(dev, idx);
- if (res) {
- printk(KERN_DEBUG "%s: prism2_tx_80211 - to BAP0 failed\n",
- dev->name);
- local->intransmitfid[idx] = PRISM2_TXFID_EMPTY;
- schedule_work(&local->reset_queue);
- goto fail;
- }
-
- ret = 0;
-
-fail:
- prism2_callback(local, PRISM2_CALLBACK_TX_END);
- return ret;
-}
-
-
-/* Some SMP systems have reported number of odd errors with hostap_pci. fid
- * register has changed values between consecutive reads for an unknown reason.
- * This should really not happen, so more debugging is needed. This test
- * version is a bit slower, but it will detect most of such register changes
- * and will try to get the correct fid eventually. */
-#define EXTRA_FID_READ_TESTS
-
-static u16 prism2_read_fid_reg(struct net_device *dev, u16 reg)
-{
-#ifdef EXTRA_FID_READ_TESTS
- u16 val, val2, val3;
- int i;
-
- for (i = 0; i < 10; i++) {
- val = HFA384X_INW(reg);
- val2 = HFA384X_INW(reg);
- val3 = HFA384X_INW(reg);
-
- if (val == val2 && val == val3)
- return val;
-
- printk(KERN_DEBUG "%s: detected fid change (try=%d, reg=%04x):"
- " %04x %04x %04x\n",
- dev->name, i, reg, val, val2, val3);
- if ((val == val2 || val == val3) && val != 0)
- return val;
- if (val2 == val3 && val2 != 0)
- return val2;
- }
- printk(KERN_WARNING "%s: Uhhuh.. could not read good fid from reg "
- "%04x (%04x %04x %04x)\n", dev->name, reg, val, val2, val3);
- return val;
-#else /* EXTRA_FID_READ_TESTS */
- return HFA384X_INW(reg);
-#endif /* EXTRA_FID_READ_TESTS */
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_rx(local_info_t *local)
-{
- struct net_device *dev = local->dev;
- int res, rx_pending = 0;
- u16 len, hdr_len, rxfid, status, macport;
- struct hfa384x_rx_frame rxdesc;
- struct sk_buff *skb = NULL;
-
- prism2_callback(local, PRISM2_CALLBACK_RX_START);
-
- rxfid = prism2_read_fid_reg(dev, HFA384X_RXFID_OFF);
-#ifndef final_version
- if (rxfid == 0) {
- rxfid = HFA384X_INW(HFA384X_RXFID_OFF);
- printk(KERN_DEBUG "prism2_rx: rxfid=0 (next 0x%04x)\n",
- rxfid);
- if (rxfid == 0) {
- schedule_work(&local->reset_queue);
- goto rx_dropped;
- }
- /* try to continue with the new rxfid value */
- }
-#endif
-
- spin_lock(&local->baplock);
- res = hfa384x_setup_bap(dev, BAP0, rxfid, 0);
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, &rxdesc, sizeof(rxdesc));
-
- if (res) {
- spin_unlock(&local->baplock);
- printk(KERN_DEBUG "%s: copy from BAP0 failed %d\n", dev->name,
- res);
- if (res == -ETIMEDOUT) {
- schedule_work(&local->reset_queue);
- }
- goto rx_dropped;
- }
-
- len = le16_to_cpu(rxdesc.data_len);
- hdr_len = sizeof(rxdesc);
- status = le16_to_cpu(rxdesc.status);
- macport = (status >> 8) & 0x07;
-
- /* Drop frames with too large reported payload length. Monitor mode
- * seems to sometimes pass frames (e.g., ctrl::ack) with signed and
- * negative value, so allow also values 65522 .. 65534 (-14 .. -2) for
- * macport 7 */
- if (len > PRISM2_DATA_MAXLEN + 8 /* WEP */) {
- if (macport == 7 && local->iw_mode == IW_MODE_MONITOR) {
- if (len >= (u16) -14) {
- hdr_len -= 65535 - len;
- hdr_len--;
- }
- len = 0;
- } else {
- spin_unlock(&local->baplock);
- printk(KERN_DEBUG "%s: Received frame with invalid "
- "length 0x%04x\n", dev->name, len);
- hostap_dump_rx_header(dev->name, &rxdesc);
- goto rx_dropped;
- }
- }
-
- skb = dev_alloc_skb(len + hdr_len);
- if (!skb) {
- spin_unlock(&local->baplock);
- printk(KERN_DEBUG "%s: RX failed to allocate skb\n",
- dev->name);
- goto rx_dropped;
- }
- skb->dev = dev;
- skb_put_data(skb, &rxdesc, hdr_len);
-
- if (len > 0)
- res = hfa384x_from_bap(dev, BAP0, skb_put(skb, len), len);
- spin_unlock(&local->baplock);
- if (res) {
- printk(KERN_DEBUG "%s: RX failed to read "
- "frame data\n", dev->name);
- goto rx_dropped;
- }
-
- skb_queue_tail(&local->rx_list, skb);
- tasklet_schedule(&local->rx_tasklet);
-
- rx_exit:
- prism2_callback(local, PRISM2_CALLBACK_RX_END);
- if (!rx_pending) {
- HFA384X_OUTW(HFA384X_EV_RX, HFA384X_EVACK_OFF);
- }
-
- return;
-
- rx_dropped:
- dev->stats.rx_dropped++;
- if (skb)
- dev_kfree_skb(skb);
- goto rx_exit;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void hostap_rx_skb(local_info_t *local, struct sk_buff *skb)
-{
- struct hfa384x_rx_frame *rxdesc;
- struct net_device *dev = skb->dev;
- struct hostap_80211_rx_status stats;
- int hdrlen, rx_hdrlen;
-
- rx_hdrlen = sizeof(*rxdesc);
- if (skb->len < sizeof(*rxdesc)) {
- /* Allow monitor mode to receive shorter frames */
- if (local->iw_mode == IW_MODE_MONITOR &&
- skb->len >= sizeof(*rxdesc) - 30) {
- rx_hdrlen = skb->len;
- } else {
- dev_kfree_skb(skb);
- return;
- }
- }
-
- rxdesc = (struct hfa384x_rx_frame *) skb->data;
-
- if (local->frame_dump & PRISM2_DUMP_RX_HDR &&
- skb->len >= sizeof(*rxdesc))
- hostap_dump_rx_header(dev->name, rxdesc);
-
- if (le16_to_cpu(rxdesc->status) & HFA384X_RX_STATUS_FCSERR &&
- (!local->monitor_allow_fcserr ||
- local->iw_mode != IW_MODE_MONITOR))
- goto drop;
-
- if (skb->len > PRISM2_DATA_MAXLEN) {
- printk(KERN_DEBUG "%s: RX: len(%d) > MAX(%d)\n",
- dev->name, skb->len, PRISM2_DATA_MAXLEN);
- goto drop;
- }
-
- stats.mac_time = le32_to_cpu(rxdesc->time);
- stats.signal = rxdesc->signal - local->rssi_to_dBm;
- stats.noise = rxdesc->silence - local->rssi_to_dBm;
- stats.rate = rxdesc->rate;
-
- /* Convert Prism2 RX structure into IEEE 802.11 header */
- hdrlen = hostap_80211_get_hdrlen(rxdesc->frame_control);
- if (hdrlen > rx_hdrlen)
- hdrlen = rx_hdrlen;
-
- memmove(skb_pull(skb, rx_hdrlen - hdrlen),
- &rxdesc->frame_control, hdrlen);
-
- hostap_80211_rx(dev, skb, &stats);
- return;
-
- drop:
- dev_kfree_skb(skb);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void hostap_rx_tasklet(struct tasklet_struct *t)
-{
- local_info_t *local = from_tasklet(local, t, rx_tasklet);
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&local->rx_list)) != NULL)
- hostap_rx_skb(local, skb);
-}
-
-
-/* Called only from hardware IRQ */
-static void prism2_alloc_ev(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int idx;
- u16 fid;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- fid = prism2_read_fid_reg(dev, HFA384X_ALLOCFID_OFF);
-
- PDEBUG(DEBUG_FID, "FID: interrupt: ALLOC - fid=0x%04x\n", fid);
-
- spin_lock(&local->txfidlock);
- idx = local->next_alloc;
-
- do {
- if (local->txfid[idx] == fid) {
- PDEBUG(DEBUG_FID, "FID: found matching txfid[%d]\n",
- idx);
-
-#ifndef final_version
- if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY)
- printk("Already released txfid found at idx "
- "%d\n", idx);
- if (local->intransmitfid[idx] == PRISM2_TXFID_RESERVED)
- printk("Already reserved txfid found at idx "
- "%d\n", idx);
-#endif
- local->intransmitfid[idx] = PRISM2_TXFID_EMPTY;
- idx++;
- local->next_alloc = idx >= PRISM2_TXFID_COUNT ? 0 :
- idx;
-
- if (!test_bit(HOSTAP_BITS_TRANSMIT, &local->bits) &&
- netif_queue_stopped(dev))
- netif_wake_queue(dev);
-
- spin_unlock(&local->txfidlock);
- return;
- }
-
- idx++;
- if (idx >= PRISM2_TXFID_COUNT)
- idx = 0;
- } while (idx != local->next_alloc);
-
- printk(KERN_WARNING "%s: could not find matching txfid (0x%04x, new "
- "read 0x%04x) for alloc event\n", dev->name, fid,
- HFA384X_INW(HFA384X_ALLOCFID_OFF));
- printk(KERN_DEBUG "TXFIDs:");
- for (idx = 0; idx < PRISM2_TXFID_COUNT; idx++)
- printk(" %04x[%04x]", local->txfid[idx],
- local->intransmitfid[idx]);
- printk("\n");
- spin_unlock(&local->txfidlock);
-
- /* FIX: should probably schedule reset; reference to one txfid was lost
- * completely.. Bad things will happen if we run out of txfids
- * Actually, this will cause netdev watchdog to notice TX timeout and
- * then card reset after all txfids have been leaked. */
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void hostap_tx_callback(local_info_t *local,
- struct hfa384x_tx_frame *txdesc, int ok,
- char *payload)
-{
- u16 sw_support, hdrlen, len;
- struct sk_buff *skb;
- struct hostap_tx_callback_info *cb;
-
- /* Make sure that frame was from us. */
- if (!ether_addr_equal(txdesc->addr2, local->dev->dev_addr)) {
- printk(KERN_DEBUG "%s: TX callback - foreign frame\n",
- local->dev->name);
- return;
- }
-
- sw_support = le32_to_cpu(txdesc->sw_support);
-
- spin_lock(&local->lock);
- cb = local->tx_callback;
- while (cb != NULL && cb->idx != sw_support)
- cb = cb->next;
- spin_unlock(&local->lock);
-
- if (cb == NULL) {
- printk(KERN_DEBUG "%s: could not find TX callback (idx %d)\n",
- local->dev->name, sw_support);
- return;
- }
-
- hdrlen = hostap_80211_get_hdrlen(txdesc->frame_control);
- len = le16_to_cpu(txdesc->data_len);
- skb = dev_alloc_skb(hdrlen + len);
- if (skb == NULL) {
- printk(KERN_DEBUG "%s: hostap_tx_callback failed to allocate "
- "skb\n", local->dev->name);
- return;
- }
-
- skb_put_data(skb, (void *)&txdesc->frame_control, hdrlen);
- if (payload)
- skb_put_data(skb, payload, len);
-
- skb->dev = local->dev;
- skb_reset_mac_header(skb);
-
- cb->func(skb, ok, cb->data);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static int hostap_tx_compl_read(local_info_t *local, int error,
- struct hfa384x_tx_frame *txdesc,
- char **payload)
-{
- u16 fid, len;
- int res, ret = 0;
- struct net_device *dev = local->dev;
-
- fid = prism2_read_fid_reg(dev, HFA384X_TXCOMPLFID_OFF);
-
- PDEBUG(DEBUG_FID, "interrupt: TX (err=%d) - fid=0x%04x\n", fid, error);
-
- spin_lock(&local->baplock);
- res = hfa384x_setup_bap(dev, BAP0, fid, 0);
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, txdesc, sizeof(*txdesc));
- if (res) {
- PDEBUG(DEBUG_EXTRA, "%s: TX (err=%d) - fid=0x%04x - could not "
- "read txdesc\n", dev->name, error, fid);
- if (res == -ETIMEDOUT) {
- schedule_work(&local->reset_queue);
- }
- ret = -1;
- goto fail;
- }
- if (txdesc->sw_support) {
- len = le16_to_cpu(txdesc->data_len);
- if (len < PRISM2_DATA_MAXLEN) {
- *payload = kmalloc(len, GFP_ATOMIC);
- if (*payload == NULL ||
- hfa384x_from_bap(dev, BAP0, *payload, len)) {
- PDEBUG(DEBUG_EXTRA, "%s: could not read TX "
- "frame payload\n", dev->name);
- kfree(*payload);
- *payload = NULL;
- ret = -1;
- goto fail;
- }
- }
- }
-
- fail:
- spin_unlock(&local->baplock);
-
- return ret;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_tx_ev(local_info_t *local)
-{
- struct net_device *dev = local->dev;
- char *payload = NULL;
- struct hfa384x_tx_frame txdesc;
-
- if (hostap_tx_compl_read(local, 0, &txdesc, &payload))
- goto fail;
-
- if (local->frame_dump & PRISM2_DUMP_TX_HDR) {
- PDEBUG(DEBUG_EXTRA, "%s: TX - status=0x%04x "
- "retry_count=%d tx_rate=%d seq_ctrl=%d "
- "duration_id=%d\n",
- dev->name, le16_to_cpu(txdesc.status),
- txdesc.retry_count, txdesc.tx_rate,
- le16_to_cpu(txdesc.seq_ctrl),
- le16_to_cpu(txdesc.duration_id));
- }
-
- if (txdesc.sw_support)
- hostap_tx_callback(local, &txdesc, 1, payload);
- kfree(payload);
-
- fail:
- HFA384X_OUTW(HFA384X_EV_TX, HFA384X_EVACK_OFF);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void hostap_sta_tx_exc_tasklet(struct tasklet_struct *t)
-{
- local_info_t *local = from_tasklet(local, t, sta_tx_exc_tasklet);
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&local->sta_tx_exc_list)) != NULL) {
- struct hfa384x_tx_frame *txdesc =
- (struct hfa384x_tx_frame *) skb->data;
-
- if (skb->len >= sizeof(*txdesc)) {
- /* Convert Prism2 RX structure into IEEE 802.11 header
- */
- int hdrlen = hostap_80211_get_hdrlen(txdesc->frame_control);
- memmove(skb_pull(skb, sizeof(*txdesc) - hdrlen),
- &txdesc->frame_control, hdrlen);
-
- hostap_handle_sta_tx_exc(local, skb);
- }
- dev_kfree_skb(skb);
- }
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_txexc(local_info_t *local)
-{
- struct net_device *dev = local->dev;
- u16 status, fc;
- int show_dump, res;
- char *payload = NULL;
- struct hfa384x_tx_frame txdesc;
-
- show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR;
- dev->stats.tx_errors++;
-
- res = hostap_tx_compl_read(local, 1, &txdesc, &payload);
- HFA384X_OUTW(HFA384X_EV_TXEXC, HFA384X_EVACK_OFF);
- if (res)
- return;
-
- status = le16_to_cpu(txdesc.status);
-
- /* We produce a TXDROP event only for retry or lifetime
- * exceeded, because that's the only status that really mean
- * that this particular node went away.
- * Other errors means that *we* screwed up. - Jean II */
- if (status & (HFA384X_TX_STATUS_RETRYERR | HFA384X_TX_STATUS_AGEDERR))
- {
- union iwreq_data wrqu;
-
- /* Copy 802.11 dest address. */
- memcpy(wrqu.addr.sa_data, txdesc.addr1, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL);
- } else
- show_dump = 1;
-
- if (local->iw_mode == IW_MODE_MASTER ||
- local->iw_mode == IW_MODE_REPEAT ||
- local->wds_type & HOSTAP_WDS_AP_CLIENT) {
- struct sk_buff *skb;
- skb = dev_alloc_skb(sizeof(txdesc));
- if (skb) {
- skb_put_data(skb, &txdesc, sizeof(txdesc));
- skb_queue_tail(&local->sta_tx_exc_list, skb);
- tasklet_schedule(&local->sta_tx_exc_tasklet);
- }
- }
-
- if (txdesc.sw_support)
- hostap_tx_callback(local, &txdesc, 0, payload);
- kfree(payload);
-
- if (!show_dump)
- return;
-
- PDEBUG(DEBUG_EXTRA, "%s: TXEXC - status=0x%04x (%s%s%s%s)"
- " tx_control=%04x\n",
- dev->name, status,
- status & HFA384X_TX_STATUS_RETRYERR ? "[RetryErr]" : "",
- status & HFA384X_TX_STATUS_AGEDERR ? "[AgedErr]" : "",
- status & HFA384X_TX_STATUS_DISCON ? "[Discon]" : "",
- status & HFA384X_TX_STATUS_FORMERR ? "[FormErr]" : "",
- le16_to_cpu(txdesc.tx_control));
-
- fc = le16_to_cpu(txdesc.frame_control);
- PDEBUG(DEBUG_EXTRA, " retry_count=%d tx_rate=%d fc=0x%04x "
- "(%s%s%s::%d%s%s)\n",
- txdesc.retry_count, txdesc.tx_rate, fc,
- ieee80211_is_mgmt(txdesc.frame_control) ? "Mgmt" : "",
- ieee80211_is_ctl(txdesc.frame_control) ? "Ctrl" : "",
- ieee80211_is_data(txdesc.frame_control) ? "Data" : "",
- (fc & IEEE80211_FCTL_STYPE) >> 4,
- ieee80211_has_tods(txdesc.frame_control) ? " ToDS" : "",
- ieee80211_has_fromds(txdesc.frame_control) ? " FromDS" : "");
- PDEBUG(DEBUG_EXTRA, " A1=%pM A2=%pM A3=%pM A4=%pM\n",
- txdesc.addr1, txdesc.addr2,
- txdesc.addr3, txdesc.addr4);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void hostap_info_tasklet(struct tasklet_struct *t)
-{
- local_info_t *local = from_tasklet(local, t, info_tasklet);
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&local->info_list)) != NULL) {
- hostap_info_process(local, skb);
- dev_kfree_skb(skb);
- }
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_info(local_info_t *local)
-{
- struct net_device *dev = local->dev;
- u16 fid;
- int res, left;
- struct hfa384x_info_frame info;
- struct sk_buff *skb;
-
- fid = HFA384X_INW(HFA384X_INFOFID_OFF);
-
- spin_lock(&local->baplock);
- res = hfa384x_setup_bap(dev, BAP0, fid, 0);
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, &info, sizeof(info));
- if (res) {
- spin_unlock(&local->baplock);
- printk(KERN_DEBUG "Could not get info frame (fid=0x%04x)\n",
- fid);
- if (res == -ETIMEDOUT) {
- schedule_work(&local->reset_queue);
- }
- goto out;
- }
-
- left = (le16_to_cpu(info.len) - 1) * 2;
-
- if (info.len & cpu_to_le16(0x8000) || info.len == 0 || left > 2060) {
- /* data register seems to give 0x8000 in some error cases even
- * though busy bit is not set in offset register;
- * in addition, length must be at least 1 due to type field */
- spin_unlock(&local->baplock);
- printk(KERN_DEBUG "%s: Received info frame with invalid "
- "length 0x%04x (type 0x%04x)\n", dev->name,
- le16_to_cpu(info.len), le16_to_cpu(info.type));
- goto out;
- }
-
- skb = dev_alloc_skb(sizeof(info) + left);
- if (skb == NULL) {
- spin_unlock(&local->baplock);
- printk(KERN_DEBUG "%s: Could not allocate skb for info "
- "frame\n", dev->name);
- goto out;
- }
-
- skb_put_data(skb, &info, sizeof(info));
- if (left > 0 && hfa384x_from_bap(dev, BAP0, skb_put(skb, left), left))
- {
- spin_unlock(&local->baplock);
- printk(KERN_WARNING "%s: Info frame read failed (fid=0x%04x, "
- "len=0x%04x, type=0x%04x\n", dev->name, fid,
- le16_to_cpu(info.len), le16_to_cpu(info.type));
- dev_kfree_skb(skb);
- goto out;
- }
- spin_unlock(&local->baplock);
-
- skb_queue_tail(&local->info_list, skb);
- tasklet_schedule(&local->info_tasklet);
-
- out:
- HFA384X_OUTW(HFA384X_EV_INFO, HFA384X_EVACK_OFF);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void hostap_bap_tasklet(struct tasklet_struct *t)
-{
- local_info_t *local = from_tasklet(local, t, bap_tasklet);
- struct net_device *dev = local->dev;
- u16 ev;
- int frames = 30;
-
- if (local->func->card_present && !local->func->card_present(local))
- return;
-
- set_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits);
-
- /* Process all pending BAP events without generating new interrupts
- * for them */
- while (frames-- > 0) {
- ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
- if (ev == 0xffff || !(ev & HFA384X_BAP0_EVENTS))
- break;
- if (ev & HFA384X_EV_RX)
- prism2_rx(local);
- if (ev & HFA384X_EV_INFO)
- prism2_info(local);
- if (ev & HFA384X_EV_TX)
- prism2_tx_ev(local);
- if (ev & HFA384X_EV_TXEXC)
- prism2_txexc(local);
- }
-
- set_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits);
- clear_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits);
-
- /* Enable interrupts for new BAP events */
- hfa384x_events_all(dev);
- clear_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits);
-}
-
-
-/* Called only from hardware IRQ */
-static void prism2_infdrop(struct net_device *dev)
-{
- static unsigned long last_inquire = 0;
-
- PDEBUG(DEBUG_EXTRA, "%s: INFDROP event\n", dev->name);
-
- /* some firmware versions seem to get stuck with
- * full CommTallies in high traffic load cases; every
- * packet will then cause INFDROP event and CommTallies
- * info frame will not be sent automatically. Try to
- * get out of this state by inquiring CommTallies. */
- if (!last_inquire || time_after(jiffies, last_inquire + HZ)) {
- hfa384x_cmd_callback(dev, HFA384X_CMDCODE_INQUIRE,
- HFA384X_INFO_COMMTALLIES, NULL, 0);
- last_inquire = jiffies;
- }
-}
-
-
-/* Called only from hardware IRQ */
-static void prism2_ev_tick(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- u16 evstat, inten;
- static int prev_stuck = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (time_after(jiffies, local->last_tick_timer + 5 * HZ) &&
- local->last_tick_timer) {
- evstat = HFA384X_INW(HFA384X_EVSTAT_OFF);
- inten = HFA384X_INW(HFA384X_INTEN_OFF);
- if (!prev_stuck) {
- printk(KERN_INFO "%s: SW TICK stuck? "
- "bits=0x%lx EvStat=%04x IntEn=%04x\n",
- dev->name, local->bits, evstat, inten);
- }
- local->sw_tick_stuck++;
- if ((evstat & HFA384X_BAP0_EVENTS) &&
- (inten & HFA384X_BAP0_EVENTS)) {
- printk(KERN_INFO "%s: trying to recover from IRQ "
- "hang\n", dev->name);
- hfa384x_events_no_bap0(dev);
- }
- prev_stuck = 1;
- } else
- prev_stuck = 0;
-}
-
-
-/* Called only from hardware IRQ */
-static void prism2_check_magic(local_info_t *local)
-{
- /* at least PCI Prism2.5 with bus mastering seems to sometimes
- * return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the
- * register once or twice seems to get the correct value.. PCI cards
- * cannot anyway be removed during normal operation, so there is not
- * really any need for this verification with them. */
-
-#ifndef PRISM2_PCI
-#ifndef final_version
- static unsigned long last_magic_err = 0;
- struct net_device *dev = local->dev;
-
- if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) {
- if (!local->hw_ready)
- return;
- HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
- if (time_after(jiffies, last_magic_err + 10 * HZ)) {
- printk("%s: Interrupt, but SWSUPPORT0 does not match: "
- "%04X != %04X - card removed?\n", dev->name,
- HFA384X_INW(HFA384X_SWSUPPORT0_OFF),
- HFA384X_MAGIC);
- last_magic_err = jiffies;
- } else if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: interrupt - SWSUPPORT0=%04x "
- "MAGIC=%04x\n", dev->name,
- HFA384X_INW(HFA384X_SWSUPPORT0_OFF),
- HFA384X_MAGIC);
- }
- if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != 0xffff)
- schedule_work(&local->reset_queue);
- return;
- }
-#endif /* final_version */
-#endif /* !PRISM2_PCI */
-}
-
-
-/* Called only from hardware IRQ */
-static irqreturn_t prism2_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct hostap_interface *iface;
- local_info_t *local;
- int events = 0;
- u16 ev;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* Detect early interrupt before driver is fully configured */
- spin_lock(&local->irq_init_lock);
- if (!dev->base_addr) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
- dev->name);
- }
- spin_unlock(&local->irq_init_lock);
- return IRQ_HANDLED;
- }
- spin_unlock(&local->irq_init_lock);
-
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
-
- if (local->func->card_present && !local->func->card_present(local)) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: Interrupt, but dev not OK\n",
- dev->name);
- }
- return IRQ_HANDLED;
- }
-
- prism2_check_magic(local);
-
- for (;;) {
- ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
- if (ev == 0xffff) {
- if (local->shutdown)
- return IRQ_HANDLED;
- HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
- printk(KERN_DEBUG "%s: prism2_interrupt: ev=0xffff\n",
- dev->name);
- return IRQ_HANDLED;
- }
-
- ev &= HFA384X_INW(HFA384X_INTEN_OFF);
- if (ev == 0)
- break;
-
- if (ev & HFA384X_EV_CMD) {
- prism2_cmd_ev(dev);
- }
-
- /* Above events are needed even before hw is ready, but other
- * events should be skipped during initialization. This may
- * change for AllocEv if allocate_fid is implemented without
- * busy waiting. */
- if (!local->hw_ready || local->hw_resetting ||
- !local->dev_enabled) {
- ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
- if (ev & HFA384X_EV_CMD)
- goto next_event;
- if ((ev & HFA384X_EVENT_MASK) == 0)
- return IRQ_HANDLED;
- if (local->dev_enabled && (ev & ~HFA384X_EV_TICK) &&
- net_ratelimit()) {
- printk(KERN_DEBUG "%s: prism2_interrupt: hw "
- "not ready; skipping events 0x%04x "
- "(IntEn=0x%04x)%s%s%s\n",
- dev->name, ev,
- HFA384X_INW(HFA384X_INTEN_OFF),
- !local->hw_ready ? " (!hw_ready)" : "",
- local->hw_resetting ?
- " (hw_resetting)" : "",
- !local->dev_enabled ?
- " (!dev_enabled)" : "");
- }
- HFA384X_OUTW(ev, HFA384X_EVACK_OFF);
- return IRQ_HANDLED;
- }
-
- if (ev & HFA384X_EV_TICK) {
- prism2_ev_tick(dev);
- HFA384X_OUTW(HFA384X_EV_TICK, HFA384X_EVACK_OFF);
- }
-
- if (ev & HFA384X_EV_ALLOC) {
- prism2_alloc_ev(dev);
- HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF);
- }
-
- /* Reading data from the card is quite time consuming, so do it
- * in tasklets. TX, TXEXC, RX, and INFO events will be ACKed
- * and unmasked after needed data has been read completely. */
- if (ev & HFA384X_BAP0_EVENTS) {
- hfa384x_events_no_bap0(dev);
- tasklet_schedule(&local->bap_tasklet);
- }
-
-#ifndef final_version
- if (ev & HFA384X_EV_WTERR) {
- PDEBUG(DEBUG_EXTRA, "%s: WTERR event\n", dev->name);
- HFA384X_OUTW(HFA384X_EV_WTERR, HFA384X_EVACK_OFF);
- }
-#endif /* final_version */
-
- if (ev & HFA384X_EV_INFDROP) {
- prism2_infdrop(dev);
- HFA384X_OUTW(HFA384X_EV_INFDROP, HFA384X_EVACK_OFF);
- }
-
- next_event:
- events++;
- if (events >= PRISM2_MAX_INTERRUPT_EVENTS) {
- PDEBUG(DEBUG_EXTRA, "prism2_interrupt: >%d events "
- "(EvStat=0x%04x)\n",
- PRISM2_MAX_INTERRUPT_EVENTS,
- HFA384X_INW(HFA384X_EVSTAT_OFF));
- break;
- }
- }
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 1);
- return IRQ_RETVAL(events);
-}
-
-
-static void prism2_check_sta_fw_version(local_info_t *local)
-{
- struct hfa384x_comp_ident comp;
- int id, variant, major, minor;
-
- if (hfa384x_get_rid(local->dev, HFA384X_RID_STAID,
- &comp, sizeof(comp), 1) < 0)
- return;
-
- local->fw_ap = 0;
- id = le16_to_cpu(comp.id);
- if (id != HFA384X_COMP_ID_STA) {
- if (id == HFA384X_COMP_ID_FW_AP)
- local->fw_ap = 1;
- return;
- }
-
- major = __le16_to_cpu(comp.major);
- minor = __le16_to_cpu(comp.minor);
- variant = __le16_to_cpu(comp.variant);
- local->sta_fw_ver = PRISM2_FW_VER(major, minor, variant);
-
- /* Station firmware versions before 1.4.x seem to have a bug in
- * firmware-based WEP encryption when using Host AP mode, so use
- * host_encrypt as a default for them. Firmware version 1.4.9 is the
- * first one that has been seen to produce correct encryption, but the
- * bug might be fixed before that (although, at least 1.4.2 is broken).
- */
- local->fw_encrypt_ok = local->sta_fw_ver >= PRISM2_FW_VER(1,4,9);
-
- if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt &&
- !local->fw_encrypt_ok) {
- printk(KERN_DEBUG "%s: defaulting to host-based encryption as "
- "a workaround for firmware bug in Host AP mode WEP\n",
- local->dev->name);
- local->host_encrypt = 1;
- }
-
- /* IEEE 802.11 standard compliant WDS frames (4 addresses) were broken
- * in station firmware versions before 1.5.x. With these versions, the
- * driver uses a workaround with bogus frame format (4th address after
- * the payload). This is not compatible with other AP devices. Since
- * the firmware bug is fixed in the latest station firmware versions,
- * automatically enable standard compliant mode for cards using station
- * firmware version 1.5.0 or newer. */
- if (local->sta_fw_ver >= PRISM2_FW_VER(1,5,0))
- local->wds_type |= HOSTAP_WDS_STANDARD_FRAME;
- else {
- printk(KERN_DEBUG "%s: defaulting to bogus WDS frame as a "
- "workaround for firmware bug in Host AP mode WDS\n",
- local->dev->name);
- }
-
- hostap_check_sta_fw_version(local->ap, local->sta_fw_ver);
-}
-
-
-static void hostap_passive_scan(struct timer_list *t)
-{
- local_info_t *local = from_timer(local, t, passive_scan_timer);
- struct net_device *dev = local->dev;
- u16 chan;
-
- if (local->passive_scan_interval <= 0)
- return;
-
- if (local->passive_scan_state == PASSIVE_SCAN_LISTEN) {
- int max_tries = 16;
-
- /* Even though host system does not really know when the WLAN
- * MAC is sending frames, try to avoid changing channels for
- * passive scanning when a host-generated frame is being
- * transmitted */
- if (test_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
- printk(KERN_DEBUG "%s: passive scan detected pending "
- "TX - delaying\n", dev->name);
- local->passive_scan_timer.expires = jiffies + HZ / 10;
- add_timer(&local->passive_scan_timer);
- return;
- }
-
- do {
- local->passive_scan_channel++;
- if (local->passive_scan_channel > 14)
- local->passive_scan_channel = 1;
- max_tries--;
- } while (!(local->channel_mask &
- (1 << (local->passive_scan_channel - 1))) &&
- max_tries > 0);
-
- if (max_tries == 0) {
- printk(KERN_INFO "%s: no allowed passive scan channels"
- " found\n", dev->name);
- return;
- }
-
- printk(KERN_DEBUG "%s: passive scan channel %d\n",
- dev->name, local->passive_scan_channel);
- chan = local->passive_scan_channel;
- local->passive_scan_state = PASSIVE_SCAN_WAIT;
- local->passive_scan_timer.expires = jiffies + HZ / 10;
- } else {
- chan = local->channel;
- local->passive_scan_state = PASSIVE_SCAN_LISTEN;
- local->passive_scan_timer.expires = jiffies +
- local->passive_scan_interval * HZ;
- }
-
- if (hfa384x_cmd_callback(dev, HFA384X_CMDCODE_TEST |
- (HFA384X_TEST_CHANGE_CHANNEL << 8),
- chan, NULL, 0))
- printk(KERN_ERR "%s: passive scan channel set %d "
- "failed\n", dev->name, chan);
-
- add_timer(&local->passive_scan_timer);
-}
-
-
-/* Called only as a scheduled task when communications quality values should
- * be updated. */
-static void handle_comms_qual_update(struct work_struct *work)
-{
- local_info_t *local =
- container_of(work, local_info_t, comms_qual_update);
- prism2_update_comms_qual(local->dev);
-}
-
-
-/* Software watchdog - called as a timer. Hardware interrupt (Tick event) is
- * used to monitor that local->last_tick_timer is being updated. If not,
- * interrupt busy-loop is assumed and driver tries to recover by masking out
- * some events. */
-static void hostap_tick_timer(struct timer_list *t)
-{
- static unsigned long last_inquire = 0;
- local_info_t *local = from_timer(local, t, tick_timer);
- local->last_tick_timer = jiffies;
-
- /* Inquire CommTallies every 10 seconds to keep the statistics updated
- * more often during low load and when using 32-bit tallies. */
- if ((!last_inquire || time_after(jiffies, last_inquire + 10 * HZ)) &&
- !local->hw_downloading && local->hw_ready &&
- !local->hw_resetting && local->dev_enabled) {
- hfa384x_cmd_callback(local->dev, HFA384X_CMDCODE_INQUIRE,
- HFA384X_INFO_COMMTALLIES, NULL, 0);
- last_inquire = jiffies;
- }
-
- if ((local->last_comms_qual_update == 0 ||
- time_after(jiffies, local->last_comms_qual_update + 10 * HZ)) &&
- (local->iw_mode == IW_MODE_INFRA ||
- local->iw_mode == IW_MODE_ADHOC)) {
- schedule_work(&local->comms_qual_update);
- }
-
- local->tick_timer.expires = jiffies + 2 * HZ;
- add_timer(&local->tick_timer);
-}
-
-
-#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
-static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
-{
- return HFA384X_INW(reg);
-}
-
-static int prism2_registers_proc_show(struct seq_file *m, void *v)
-{
- local_info_t *local = m->private;
-
-#define SHOW_REG(n) \
- seq_printf(m, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF))
-
- SHOW_REG(CMD);
- SHOW_REG(PARAM0);
- SHOW_REG(PARAM1);
- SHOW_REG(PARAM2);
- SHOW_REG(STATUS);
- SHOW_REG(RESP0);
- SHOW_REG(RESP1);
- SHOW_REG(RESP2);
- SHOW_REG(INFOFID);
- SHOW_REG(CONTROL);
- SHOW_REG(SELECT0);
- SHOW_REG(SELECT1);
- SHOW_REG(OFFSET0);
- SHOW_REG(OFFSET1);
- SHOW_REG(RXFID);
- SHOW_REG(ALLOCFID);
- SHOW_REG(TXCOMPLFID);
- SHOW_REG(SWSUPPORT0);
- SHOW_REG(SWSUPPORT1);
- SHOW_REG(SWSUPPORT2);
- SHOW_REG(EVSTAT);
- SHOW_REG(INTEN);
- SHOW_REG(EVACK);
- /* Do not read data registers, because they change the state of the
- * MAC (offset += 2) */
- /* SHOW_REG(DATA0); */
- /* SHOW_REG(DATA1); */
- SHOW_REG(AUXPAGE);
- SHOW_REG(AUXOFFSET);
- /* SHOW_REG(AUXDATA); */
-#ifdef PRISM2_PCI
- SHOW_REG(PCICOR);
- SHOW_REG(PCIHCR);
- SHOW_REG(PCI_M0_ADDRH);
- SHOW_REG(PCI_M0_ADDRL);
- SHOW_REG(PCI_M0_LEN);
- SHOW_REG(PCI_M0_CTL);
- SHOW_REG(PCI_STATUS);
- SHOW_REG(PCI_M1_ADDRH);
- SHOW_REG(PCI_M1_ADDRL);
- SHOW_REG(PCI_M1_LEN);
- SHOW_REG(PCI_M1_CTL);
-#endif /* PRISM2_PCI */
-
- return 0;
-}
-#endif
-
-struct set_tim_data {
- struct list_head list;
- int aid;
- int set;
-};
-
-static int prism2_set_tim(struct net_device *dev, int aid, int set)
-{
- struct list_head *ptr;
- struct set_tim_data *new_entry;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
- if (new_entry == NULL)
- return -ENOMEM;
-
- new_entry->aid = aid;
- new_entry->set = set;
-
- spin_lock_bh(&local->set_tim_lock);
- list_for_each(ptr, &local->set_tim_list) {
- struct set_tim_data *entry =
- list_entry(ptr, struct set_tim_data, list);
- if (entry->aid == aid) {
- PDEBUG(DEBUG_PS2, "%s: prism2_set_tim: aid=%d "
- "set=%d ==> %d\n",
- local->dev->name, aid, entry->set, set);
- entry->set = set;
- kfree(new_entry);
- new_entry = NULL;
- break;
- }
- }
- if (new_entry)
- list_add_tail(&new_entry->list, &local->set_tim_list);
- spin_unlock_bh(&local->set_tim_lock);
-
- schedule_work(&local->set_tim_queue);
-
- return 0;
-}
-
-
-static void handle_set_tim_queue(struct work_struct *work)
-{
- local_info_t *local = container_of(work, local_info_t, set_tim_queue);
- struct set_tim_data *entry;
- u16 val;
-
- for (;;) {
- entry = NULL;
- spin_lock_bh(&local->set_tim_lock);
- if (!list_empty(&local->set_tim_list)) {
- entry = list_entry(local->set_tim_list.next,
- struct set_tim_data, list);
- list_del(&entry->list);
- }
- spin_unlock_bh(&local->set_tim_lock);
- if (!entry)
- break;
-
- PDEBUG(DEBUG_PS2, "%s: handle_set_tim_queue: aid=%d set=%d\n",
- local->dev->name, entry->aid, entry->set);
-
- val = entry->aid;
- if (entry->set)
- val |= 0x8000;
- if (hostap_set_word(local->dev, HFA384X_RID_CNFTIMCTRL, val)) {
- printk(KERN_DEBUG "%s: set_tim failed (aid=%d "
- "set=%d)\n",
- local->dev->name, entry->aid, entry->set);
- }
-
- kfree(entry);
- }
-}
-
-
-static void prism2_clear_set_tim_queue(local_info_t *local)
-{
- struct list_head *ptr, *n;
-
- list_for_each_safe(ptr, n, &local->set_tim_list) {
- struct set_tim_data *entry;
- entry = list_entry(ptr, struct set_tim_data, list);
- list_del(&entry->list);
- kfree(entry);
- }
-}
-
-
-/*
- * HostAP uses two layers of net devices, where the inner
- * layer gets called all the time from the outer layer.
- * This is a natural nesting, which needs a split lock type.
- */
-static struct lock_class_key hostap_netdev_xmit_lock_key;
-static struct lock_class_key hostap_netdev_addr_lock_key;
-
-static void prism2_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &hostap_netdev_xmit_lock_key);
-}
-
-static void prism2_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &hostap_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
-}
-
-static struct net_device *
-prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
- struct device *sdev)
-{
- struct net_device *dev;
- struct hostap_interface *iface;
- struct local_info *local;
- int len, i, ret;
-
- if (funcs == NULL)
- return NULL;
-
- len = strlen(dev_template);
- if (len >= IFNAMSIZ || strstr(dev_template, "%d") == NULL) {
- printk(KERN_WARNING "hostap: Invalid dev_template='%s'\n",
- dev_template);
- return NULL;
- }
-
- len = sizeof(struct hostap_interface) +
- 3 + sizeof(struct local_info) +
- 3 + sizeof(struct ap_data);
-
- dev = alloc_etherdev(len);
- if (dev == NULL)
- return NULL;
-
- iface = netdev_priv(dev);
- local = (struct local_info *) ((((long) (iface + 1)) + 3) & ~3);
- local->ap = (struct ap_data *) ((((long) (local + 1)) + 3) & ~3);
- local->dev = iface->dev = dev;
- iface->local = local;
- iface->type = HOSTAP_INTERFACE_MASTER;
- INIT_LIST_HEAD(&local->hostap_interfaces);
-
- local->hw_module = THIS_MODULE;
-
-#ifdef PRISM2_IO_DEBUG
- local->io_debug_enabled = 1;
-#endif /* PRISM2_IO_DEBUG */
-
- local->func = funcs;
- local->func->cmd = hfa384x_cmd;
- local->func->read_regs = hfa384x_read_regs;
- local->func->get_rid = hfa384x_get_rid;
- local->func->set_rid = hfa384x_set_rid;
- local->func->hw_enable = prism2_hw_enable;
- local->func->hw_config = prism2_hw_config;
- local->func->hw_reset = prism2_hw_reset;
- local->func->hw_shutdown = prism2_hw_shutdown;
- local->func->reset_port = prism2_reset_port;
- local->func->schedule_reset = prism2_schedule_reset;
-#ifdef PRISM2_DOWNLOAD_SUPPORT
- local->func->read_aux_proc_ops = &prism2_download_aux_dump_proc_ops;
- local->func->download = prism2_download;
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
- local->func->tx = prism2_tx_80211;
- local->func->set_tim = prism2_set_tim;
- local->func->need_tx_headroom = 0; /* no need to add txdesc in
- * skb->data (FIX: maybe for DMA bus
- * mastering? */
-
- local->mtu = mtu;
-
- rwlock_init(&local->iface_lock);
- spin_lock_init(&local->txfidlock);
- spin_lock_init(&local->cmdlock);
- spin_lock_init(&local->baplock);
- spin_lock_init(&local->lock);
- spin_lock_init(&local->irq_init_lock);
- mutex_init(&local->rid_bap_mtx);
-
- if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
- card_idx = 0;
- local->card_idx = card_idx;
-
- len = strlen(essid);
- memcpy(local->essid, essid,
- len > MAX_SSID_LEN ? MAX_SSID_LEN : len);
- local->essid[MAX_SSID_LEN] = '\0';
- i = GET_INT_PARM(iw_mode, card_idx);
- if ((i >= IW_MODE_ADHOC && i <= IW_MODE_REPEAT) ||
- i == IW_MODE_MONITOR) {
- local->iw_mode = i;
- } else {
- printk(KERN_WARNING "prism2: Unknown iw_mode %d; using "
- "IW_MODE_MASTER\n", i);
- local->iw_mode = IW_MODE_MASTER;
- }
- local->channel = GET_INT_PARM(channel, card_idx);
- local->beacon_int = GET_INT_PARM(beacon_int, card_idx);
- local->dtim_period = GET_INT_PARM(dtim_period, card_idx);
- local->wds_max_connections = 16;
- local->tx_control = HFA384X_TX_CTRL_FLAGS;
- local->manual_retry_count = -1;
- local->rts_threshold = 2347;
- local->fragm_threshold = 2346;
- local->rssi_to_dBm = 100; /* default; to be overriden by
- * cnfDbmAdjust, if available */
- local->auth_algs = PRISM2_AUTH_OPEN | PRISM2_AUTH_SHARED_KEY;
- local->sram_type = -1;
- local->scan_channel_mask = 0xffff;
- local->monitor_type = PRISM2_MONITOR_RADIOTAP;
-
- /* Initialize task queue structures */
- INIT_WORK(&local->reset_queue, handle_reset_queue);
- INIT_WORK(&local->set_multicast_list_queue,
- hostap_set_multicast_list_queue);
-
- INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
- INIT_LIST_HEAD(&local->set_tim_list);
- spin_lock_init(&local->set_tim_lock);
-
- INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
-
- /* Initialize tasklets for handling hardware IRQ related operations
- * outside hw IRQ handler */
- tasklet_setup(&local->bap_tasklet, hostap_bap_tasklet);
- tasklet_setup(&local->info_tasklet, hostap_info_tasklet);
- hostap_info_init(local);
-
- tasklet_setup(&local->rx_tasklet, hostap_rx_tasklet);
- skb_queue_head_init(&local->rx_list);
-
- tasklet_setup(&local->sta_tx_exc_tasklet,
- hostap_sta_tx_exc_tasklet);
- skb_queue_head_init(&local->sta_tx_exc_list);
-
- INIT_LIST_HEAD(&local->cmd_queue);
- init_waitqueue_head(&local->hostscan_wq);
-
- lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
-
- timer_setup(&local->passive_scan_timer, hostap_passive_scan, 0);
- timer_setup(&local->tick_timer, hostap_tick_timer, 0);
- local->tick_timer.expires = jiffies + 2 * HZ;
- add_timer(&local->tick_timer);
-
- INIT_LIST_HEAD(&local->bss_list);
-
- hostap_setup_dev(dev, local, HOSTAP_INTERFACE_MASTER);
-
- dev->type = ARPHRD_IEEE80211;
- dev->header_ops = &hostap_80211_ops;
-
- rtnl_lock();
- ret = dev_alloc_name(dev, "wifi%d");
- SET_NETDEV_DEV(dev, sdev);
- if (ret >= 0)
- ret = register_netdevice(dev);
-
- prism2_set_lockdep_class(dev);
- rtnl_unlock();
- if (ret < 0) {
- printk(KERN_WARNING "%s: register netdevice failed!\n",
- dev_info);
- goto fail;
- }
- printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name);
-
- hostap_init_data(local);
- return dev;
-
- fail:
- free_netdev(dev);
- return NULL;
-}
-
-
-static int hostap_hw_ready(struct net_device *dev)
-{
- struct hostap_interface *iface;
- struct local_info *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
- local->ddev = hostap_add_interface(local, HOSTAP_INTERFACE_MAIN, 0,
- "", dev_template);
-
- if (local->ddev) {
- if (local->iw_mode == IW_MODE_INFRA ||
- local->iw_mode == IW_MODE_ADHOC) {
- netif_carrier_off(local->dev);
- netif_carrier_off(local->ddev);
- }
- hostap_init_proc(local);
-#ifndef PRISM2_NO_PROCFS_DEBUG
- proc_create_single_data("registers", 0, local->proc,
- prism2_registers_proc_show, local);
-#endif /* PRISM2_NO_PROCFS_DEBUG */
- hostap_init_ap_proc(local);
- return 0;
- }
-
- return -1;
-}
-
-
-static void prism2_free_local_data(struct net_device *dev)
-{
- struct hostap_tx_callback_info *tx_cb, *tx_cb_prev;
- int i;
- struct hostap_interface *iface;
- struct local_info *local;
- struct list_head *ptr, *n;
-
- if (dev == NULL)
- return;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* Unregister all netdevs before freeing local data. */
- list_for_each_safe(ptr, n, &local->hostap_interfaces) {
- iface = list_entry(ptr, struct hostap_interface, list);
- if (iface->type == HOSTAP_INTERFACE_MASTER) {
- /* special handling for this interface below */
- continue;
- }
- hostap_remove_interface(iface->dev, 0, 1);
- }
-
- unregister_netdev(local->dev);
-
- flush_work(&local->reset_queue);
- flush_work(&local->set_multicast_list_queue);
- flush_work(&local->set_tim_queue);
-#ifndef PRISM2_NO_STATION_MODES
- flush_work(&local->info_queue);
-#endif
- flush_work(&local->comms_qual_update);
-
- lib80211_crypt_info_free(&local->crypt_info);
-
- if (timer_pending(&local->passive_scan_timer))
- del_timer(&local->passive_scan_timer);
-
- if (timer_pending(&local->tick_timer))
- del_timer(&local->tick_timer);
-
- prism2_clear_cmd_queue(local);
-
- skb_queue_purge(&local->info_list);
- skb_queue_purge(&local->rx_list);
- skb_queue_purge(&local->sta_tx_exc_list);
-
- if (local->dev_enabled)
- prism2_callback(local, PRISM2_CALLBACK_DISABLE);
-
- if (local->ap != NULL)
- hostap_free_data(local->ap);
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
- if (local->proc != NULL)
- remove_proc_entry("registers", local->proc);
-#endif /* PRISM2_NO_PROCFS_DEBUG */
- hostap_remove_proc(local);
-
- tx_cb = local->tx_callback;
- while (tx_cb != NULL) {
- tx_cb_prev = tx_cb;
- tx_cb = tx_cb->next;
- kfree(tx_cb_prev);
- }
-
- hostap_set_hostapd(local, 0, 0);
- hostap_set_hostapd_sta(local, 0, 0);
-
- for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) {
- if (local->frag_cache[i].skb != NULL)
- dev_kfree_skb(local->frag_cache[i].skb);
- }
-
-#ifdef PRISM2_DOWNLOAD_SUPPORT
- prism2_download_free_data(local->dl_pri);
- prism2_download_free_data(local->dl_sec);
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
-
- prism2_clear_set_tim_queue(local);
-
- list_for_each_safe(ptr, n, &local->bss_list) {
- struct hostap_bss_info *bss =
- list_entry(ptr, struct hostap_bss_info, list);
- kfree(bss);
- }
-
- kfree(local->pda);
- kfree(local->last_scan_results);
- kfree(local->generic_elem);
-
- free_netdev(local->dev);
-}
-
-
-#if defined(PRISM2_PCI) || defined(PRISM2_PCCARD)
-static void __maybe_unused prism2_suspend(struct net_device *dev)
-{
- struct hostap_interface *iface;
- struct local_info *local;
- union iwreq_data wrqu;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* Send disconnect event, e.g., to trigger reassociation after resume
- * if wpa_supplicant is used. */
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
-
- /* Disable hardware and firmware */
- prism2_hw_shutdown(dev, 0);
-}
-#endif /* PRISM2_PCI || PRISM2_PCCARD */
-
-
-/* These might at some point be compiled separately and used as separate
- * kernel modules or linked into one */
-#ifdef PRISM2_DOWNLOAD_SUPPORT
-#include "hostap_download.c"
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
-
-#ifdef PRISM2_CALLBACK
-/* External hostap_callback.c file can be used to, e.g., blink activity led.
- * This can use platform specific code and must define prism2_callback()
- * function (if PRISM2_CALLBACK is not defined, these function calls are not
- * used. */
-#include "hostap_callback.c"
-#endif /* PRISM2_CALLBACK */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_info.c b/drivers/net/wireless/intersil/hostap/hostap_info.c
deleted file mode 100644
index da8c30f10..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_info.c
+++ /dev/null
@@ -1,509 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Host AP driver Info Frame processing (part of hostap.o module) */
-
-#include <linux/if_arp.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/etherdevice.h>
-#include "hostap_wlan.h"
-#include "hostap.h"
-#include "hostap_ap.h"
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
- int left)
-{
- struct hfa384x_comm_tallies *tallies;
-
- if (left < sizeof(struct hfa384x_comm_tallies)) {
- printk(KERN_DEBUG "%s: too short (len=%d) commtallies "
- "info frame\n", local->dev->name, left);
- return;
- }
-
- tallies = (struct hfa384x_comm_tallies *) buf;
-#define ADD_COMM_TALLIES(name) \
-local->comm_tallies.name += le16_to_cpu(tallies->name)
- ADD_COMM_TALLIES(tx_unicast_frames);
- ADD_COMM_TALLIES(tx_multicast_frames);
- ADD_COMM_TALLIES(tx_fragments);
- ADD_COMM_TALLIES(tx_unicast_octets);
- ADD_COMM_TALLIES(tx_multicast_octets);
- ADD_COMM_TALLIES(tx_deferred_transmissions);
- ADD_COMM_TALLIES(tx_single_retry_frames);
- ADD_COMM_TALLIES(tx_multiple_retry_frames);
- ADD_COMM_TALLIES(tx_retry_limit_exceeded);
- ADD_COMM_TALLIES(tx_discards);
- ADD_COMM_TALLIES(rx_unicast_frames);
- ADD_COMM_TALLIES(rx_multicast_frames);
- ADD_COMM_TALLIES(rx_fragments);
- ADD_COMM_TALLIES(rx_unicast_octets);
- ADD_COMM_TALLIES(rx_multicast_octets);
- ADD_COMM_TALLIES(rx_fcs_errors);
- ADD_COMM_TALLIES(rx_discards_no_buffer);
- ADD_COMM_TALLIES(tx_discards_wrong_sa);
- ADD_COMM_TALLIES(rx_discards_wep_undecryptable);
- ADD_COMM_TALLIES(rx_message_in_msg_fragments);
- ADD_COMM_TALLIES(rx_message_in_bad_msg_fragments);
-#undef ADD_COMM_TALLIES
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_info_commtallies32(local_info_t *local, unsigned char *buf,
- int left)
-{
- struct hfa384x_comm_tallies32 *tallies;
-
- if (left < sizeof(struct hfa384x_comm_tallies32)) {
- printk(KERN_DEBUG "%s: too short (len=%d) commtallies32 "
- "info frame\n", local->dev->name, left);
- return;
- }
-
- tallies = (struct hfa384x_comm_tallies32 *) buf;
-#define ADD_COMM_TALLIES(name) \
-local->comm_tallies.name += le32_to_cpu(tallies->name)
- ADD_COMM_TALLIES(tx_unicast_frames);
- ADD_COMM_TALLIES(tx_multicast_frames);
- ADD_COMM_TALLIES(tx_fragments);
- ADD_COMM_TALLIES(tx_unicast_octets);
- ADD_COMM_TALLIES(tx_multicast_octets);
- ADD_COMM_TALLIES(tx_deferred_transmissions);
- ADD_COMM_TALLIES(tx_single_retry_frames);
- ADD_COMM_TALLIES(tx_multiple_retry_frames);
- ADD_COMM_TALLIES(tx_retry_limit_exceeded);
- ADD_COMM_TALLIES(tx_discards);
- ADD_COMM_TALLIES(rx_unicast_frames);
- ADD_COMM_TALLIES(rx_multicast_frames);
- ADD_COMM_TALLIES(rx_fragments);
- ADD_COMM_TALLIES(rx_unicast_octets);
- ADD_COMM_TALLIES(rx_multicast_octets);
- ADD_COMM_TALLIES(rx_fcs_errors);
- ADD_COMM_TALLIES(rx_discards_no_buffer);
- ADD_COMM_TALLIES(tx_discards_wrong_sa);
- ADD_COMM_TALLIES(rx_discards_wep_undecryptable);
- ADD_COMM_TALLIES(rx_message_in_msg_fragments);
- ADD_COMM_TALLIES(rx_message_in_bad_msg_fragments);
-#undef ADD_COMM_TALLIES
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_info_commtallies(local_info_t *local, unsigned char *buf,
- int left)
-{
- if (local->tallies32)
- prism2_info_commtallies32(local, buf, left);
- else
- prism2_info_commtallies16(local, buf, left);
-}
-
-
-#ifndef PRISM2_NO_STATION_MODES
-#ifndef PRISM2_NO_DEBUG
-static const char* hfa384x_linkstatus_str(u16 linkstatus)
-{
- switch (linkstatus) {
- case HFA384X_LINKSTATUS_CONNECTED:
- return "Connected";
- case HFA384X_LINKSTATUS_DISCONNECTED:
- return "Disconnected";
- case HFA384X_LINKSTATUS_AP_CHANGE:
- return "Access point change";
- case HFA384X_LINKSTATUS_AP_OUT_OF_RANGE:
- return "Access point out of range";
- case HFA384X_LINKSTATUS_AP_IN_RANGE:
- return "Access point in range";
- case HFA384X_LINKSTATUS_ASSOC_FAILED:
- return "Association failed";
- default:
- return "Unknown";
- }
-}
-#endif /* PRISM2_NO_DEBUG */
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_info_linkstatus(local_info_t *local, unsigned char *buf,
- int left)
-{
- u16 val;
- int non_sta_mode;
-
- /* Alloc new JoinRequests to occur since LinkStatus for the previous
- * has been received */
- local->last_join_time = 0;
-
- if (left != 2) {
- printk(KERN_DEBUG "%s: invalid linkstatus info frame "
- "length %d\n", local->dev->name, left);
- return;
- }
-
- non_sta_mode = local->iw_mode == IW_MODE_MASTER ||
- local->iw_mode == IW_MODE_REPEAT ||
- local->iw_mode == IW_MODE_MONITOR;
-
- val = buf[0] | (buf[1] << 8);
- if (!non_sta_mode || val != HFA384X_LINKSTATUS_DISCONNECTED) {
- PDEBUG(DEBUG_EXTRA, "%s: LinkStatus=%d (%s)\n",
- local->dev->name, val, hfa384x_linkstatus_str(val));
- }
-
- if (non_sta_mode) {
- netif_carrier_on(local->dev);
- netif_carrier_on(local->ddev);
- return;
- }
-
- /* Get current BSSID later in scheduled task */
- set_bit(PRISM2_INFO_PENDING_LINKSTATUS, &local->pending_info);
- local->prev_link_status = val;
- schedule_work(&local->info_queue);
-}
-
-
-static void prism2_host_roaming(local_info_t *local)
-{
- struct hfa384x_join_request req;
- struct net_device *dev = local->dev;
- struct hfa384x_hostscan_result *selected, *entry;
- int i;
- unsigned long flags;
-
- if (local->last_join_time &&
- time_before(jiffies, local->last_join_time + 10 * HZ)) {
- PDEBUG(DEBUG_EXTRA, "%s: last join request has not yet been "
- "completed - waiting for it before issuing new one\n",
- dev->name);
- return;
- }
-
- /* ScanResults are sorted: first ESS results in decreasing signal
- * quality then IBSS results in similar order.
- * Trivial roaming policy: just select the first entry.
- * This could probably be improved by adding hysteresis to limit
- * number of handoffs, etc.
- *
- * Could do periodic RID_SCANREQUEST or Inquire F101 to get new
- * ScanResults */
- spin_lock_irqsave(&local->lock, flags);
- if (local->last_scan_results == NULL ||
- local->last_scan_results_count == 0) {
- spin_unlock_irqrestore(&local->lock, flags);
- PDEBUG(DEBUG_EXTRA, "%s: no scan results for host roaming\n",
- dev->name);
- return;
- }
-
- selected = &local->last_scan_results[0];
-
- if (local->preferred_ap[0] || local->preferred_ap[1] ||
- local->preferred_ap[2] || local->preferred_ap[3] ||
- local->preferred_ap[4] || local->preferred_ap[5]) {
- /* Try to find preferred AP */
- PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID %pM\n",
- dev->name, local->preferred_ap);
- for (i = 0; i < local->last_scan_results_count; i++) {
- entry = &local->last_scan_results[i];
- if (memcmp(local->preferred_ap, entry->bssid, 6) == 0)
- {
- PDEBUG(DEBUG_EXTRA, "%s: using preferred AP "
- "selection\n", dev->name);
- selected = entry;
- break;
- }
- }
- }
-
- memcpy(req.bssid, selected->bssid, ETH_ALEN);
- req.channel = selected->chid;
- spin_unlock_irqrestore(&local->lock, flags);
-
- PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=%pM"
- " channel=%d\n",
- dev->name, req.bssid, le16_to_cpu(req.channel));
- if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
- sizeof(req))) {
- printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name);
- }
- local->last_join_time = jiffies;
-}
-
-
-static void hostap_report_scan_complete(local_info_t *local)
-{
- union iwreq_data wrqu;
-
- /* Inform user space about new scan results (just empty event,
- * SIOCGIWSCAN can be used to fetch data */
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(local->dev, SIOCGIWSCAN, &wrqu, NULL);
-
- /* Allow SIOCGIWSCAN handling to occur since we have received
- * scanning result */
- local->scan_timestamp = 0;
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_info_scanresults(local_info_t *local, unsigned char *buf,
- int left)
-{
- u16 *pos;
- int new_count, i;
- unsigned long flags;
- struct hfa384x_scan_result *res;
- struct hfa384x_hostscan_result *results, *prev;
-
- if (left < 4) {
- printk(KERN_DEBUG "%s: invalid scanresult info frame "
- "length %d\n", local->dev->name, left);
- return;
- }
-
- pos = (u16 *) buf;
- pos++;
- pos++;
- left -= 4;
-
- new_count = left / sizeof(struct hfa384x_scan_result);
- results = kmalloc_array(new_count,
- sizeof(struct hfa384x_hostscan_result),
- GFP_ATOMIC);
- if (results == NULL)
- return;
-
- /* Convert to hostscan result format. */
- res = (struct hfa384x_scan_result *) pos;
- for (i = 0; i < new_count; i++) {
- memcpy(&results[i], &res[i],
- sizeof(struct hfa384x_scan_result));
- results[i].atim = 0;
- }
-
- spin_lock_irqsave(&local->lock, flags);
- local->last_scan_type = PRISM2_SCAN;
- prev = local->last_scan_results;
- local->last_scan_results = results;
- local->last_scan_results_count = new_count;
- spin_unlock_irqrestore(&local->lock, flags);
- kfree(prev);
-
- hostap_report_scan_complete(local);
-
- /* Perform rest of ScanResults handling later in scheduled task */
- set_bit(PRISM2_INFO_PENDING_SCANRESULTS, &local->pending_info);
- schedule_work(&local->info_queue);
-}
-
-
-/* Called only as a tasklet (software IRQ) */
-static void prism2_info_hostscanresults(local_info_t *local,
- unsigned char *buf, int left)
-{
- int i, result_size, copy_len, new_count;
- struct hfa384x_hostscan_result *results, *prev;
- unsigned long flags;
- __le16 *pos;
- u8 *ptr;
-
- wake_up_interruptible(&local->hostscan_wq);
-
- if (left < 4) {
- printk(KERN_DEBUG "%s: invalid hostscanresult info frame "
- "length %d\n", local->dev->name, left);
- return;
- }
-
- pos = (__le16 *) buf;
- copy_len = result_size = le16_to_cpu(*pos);
- if (result_size == 0) {
- printk(KERN_DEBUG "%s: invalid result_size (0) in "
- "hostscanresults\n", local->dev->name);
- return;
- }
- if (copy_len > sizeof(struct hfa384x_hostscan_result))
- copy_len = sizeof(struct hfa384x_hostscan_result);
-
- pos++;
- pos++;
- left -= 4;
- ptr = (u8 *) pos;
-
- new_count = left / result_size;
- results = kcalloc(new_count, sizeof(struct hfa384x_hostscan_result),
- GFP_ATOMIC);
- if (results == NULL)
- return;
-
- for (i = 0; i < new_count; i++) {
- memcpy(&results[i], ptr, copy_len);
- ptr += result_size;
- left -= result_size;
- }
-
- if (left) {
- printk(KERN_DEBUG "%s: short HostScan result entry (%d/%d)\n",
- local->dev->name, left, result_size);
- }
-
- spin_lock_irqsave(&local->lock, flags);
- local->last_scan_type = PRISM2_HOSTSCAN;
- prev = local->last_scan_results;
- local->last_scan_results = results;
- local->last_scan_results_count = new_count;
- spin_unlock_irqrestore(&local->lock, flags);
- kfree(prev);
-
- hostap_report_scan_complete(local);
-}
-#endif /* PRISM2_NO_STATION_MODES */
-
-
-/* Called only as a tasklet (software IRQ) */
-void hostap_info_process(local_info_t *local, struct sk_buff *skb)
-{
- struct hfa384x_info_frame *info;
- unsigned char *buf;
- int left;
-#ifndef PRISM2_NO_DEBUG
- int i;
-#endif /* PRISM2_NO_DEBUG */
-
- info = (struct hfa384x_info_frame *) skb->data;
- buf = skb->data + sizeof(*info);
- left = skb->len - sizeof(*info);
-
- switch (le16_to_cpu(info->type)) {
- case HFA384X_INFO_COMMTALLIES:
- prism2_info_commtallies(local, buf, left);
- break;
-
-#ifndef PRISM2_NO_STATION_MODES
- case HFA384X_INFO_LINKSTATUS:
- prism2_info_linkstatus(local, buf, left);
- break;
-
- case HFA384X_INFO_SCANRESULTS:
- prism2_info_scanresults(local, buf, left);
- break;
-
- case HFA384X_INFO_HOSTSCANRESULTS:
- prism2_info_hostscanresults(local, buf, left);
- break;
-#endif /* PRISM2_NO_STATION_MODES */
-
-#ifndef PRISM2_NO_DEBUG
- default:
- PDEBUG(DEBUG_EXTRA, "%s: INFO - len=%d type=0x%04x\n",
- local->dev->name, le16_to_cpu(info->len),
- le16_to_cpu(info->type));
- PDEBUG(DEBUG_EXTRA, "Unknown info frame:");
- for (i = 0; i < (left < 100 ? left : 100); i++)
- PDEBUG2(DEBUG_EXTRA, " %02x", buf[i]);
- PDEBUG2(DEBUG_EXTRA, "\n");
- break;
-#endif /* PRISM2_NO_DEBUG */
- }
-}
-
-
-#ifndef PRISM2_NO_STATION_MODES
-static void handle_info_queue_linkstatus(local_info_t *local)
-{
- int val = local->prev_link_status;
- int connected;
- union iwreq_data wrqu;
-
- connected =
- val == HFA384X_LINKSTATUS_CONNECTED ||
- val == HFA384X_LINKSTATUS_AP_CHANGE ||
- val == HFA384X_LINKSTATUS_AP_IN_RANGE;
-
- if (local->func->get_rid(local->dev, HFA384X_RID_CURRENTBSSID,
- local->bssid, ETH_ALEN, 1) < 0) {
- printk(KERN_DEBUG "%s: could not read CURRENTBSSID after "
- "LinkStatus event\n", local->dev->name);
- } else {
- PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=%pM\n",
- local->dev->name,
- (unsigned char *) local->bssid);
- if (local->wds_type & HOSTAP_WDS_AP_CLIENT)
- hostap_add_sta(local->ap, local->bssid);
- }
-
- /* Get BSSID if we have a valid AP address */
- if (connected) {
- netif_carrier_on(local->dev);
- netif_carrier_on(local->ddev);
- memcpy(wrqu.ap_addr.sa_data, local->bssid, ETH_ALEN);
- } else {
- netif_carrier_off(local->dev);
- netif_carrier_off(local->ddev);
- eth_zero_addr(wrqu.ap_addr.sa_data);
- }
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-
- /*
- * Filter out sequential disconnect events in order not to cause a
- * flood of SIOCGIWAP events that have a race condition with EAPOL
- * frames and can confuse wpa_supplicant about the current association
- * status.
- */
- if (connected || local->prev_linkstatus_connected)
- wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
- local->prev_linkstatus_connected = connected;
-}
-
-
-static void handle_info_queue_scanresults(local_info_t *local)
-{
- if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA)
- prism2_host_roaming(local);
-
- if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA &&
- !is_zero_ether_addr(local->preferred_ap)) {
- /*
- * Firmware seems to be getting into odd state in host_roaming
- * mode 2 when hostscan is used without join command, so try
- * to fix this by re-joining the current AP. This does not
- * actually trigger a new association if the current AP is
- * still in the scan results.
- */
- prism2_host_roaming(local);
- }
-}
-
-
-/* Called only as scheduled task after receiving info frames (used to avoid
- * pending too much time in HW IRQ handler). */
-static void handle_info_queue(struct work_struct *work)
-{
- local_info_t *local = container_of(work, local_info_t, info_queue);
-
- if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
- &local->pending_info))
- handle_info_queue_linkstatus(local);
-
- if (test_and_clear_bit(PRISM2_INFO_PENDING_SCANRESULTS,
- &local->pending_info))
- handle_info_queue_scanresults(local);
-}
-#endif /* PRISM2_NO_STATION_MODES */
-
-
-void hostap_info_init(local_info_t *local)
-{
- skb_queue_head_init(&local->info_list);
-#ifndef PRISM2_NO_STATION_MODES
- INIT_WORK(&local->info_queue, handle_info_queue);
-#endif /* PRISM2_NO_STATION_MODES */
-}
-
-
-EXPORT_SYMBOL(hostap_info_init);
-EXPORT_SYMBOL(hostap_info_process);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
deleted file mode 100644
index 26162f92e..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ /dev/null
@@ -1,3847 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
-
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/sched/signal.h>
-#include <linux/ethtool.h>
-#include <linux/if_arp.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <net/lib80211.h>
-
-#include "hostap_wlan.h"
-#include "hostap.h"
-#include "hostap_ap.h"
-
-static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct iw_statistics *wstats;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* Why are we doing that ? Jean II */
- if (iface->type != HOSTAP_INTERFACE_MAIN)
- return NULL;
-
- wstats = &local->wstats;
-
- wstats->status = 0;
- wstats->discard.code =
- local->comm_tallies.rx_discards_wep_undecryptable;
- wstats->discard.misc =
- local->comm_tallies.rx_fcs_errors +
- local->comm_tallies.rx_discards_no_buffer +
- local->comm_tallies.tx_discards_wrong_sa;
-
- wstats->discard.retries =
- local->comm_tallies.tx_retry_limit_exceeded;
- wstats->discard.fragment =
- local->comm_tallies.rx_message_in_bad_msg_fragments;
-
- if (local->iw_mode != IW_MODE_MASTER &&
- local->iw_mode != IW_MODE_REPEAT) {
-
- if (prism2_update_comms_qual(dev) == 0)
- wstats->qual.updated = IW_QUAL_ALL_UPDATED |
- IW_QUAL_DBM;
-
- wstats->qual.qual = local->comms_qual;
- wstats->qual.level = local->avg_signal;
- wstats->qual.noise = local->avg_noise;
- } else {
- wstats->qual.qual = 0;
- wstats->qual.level = 0;
- wstats->qual.noise = 0;
- wstats->qual.updated = IW_QUAL_ALL_INVALID;
- }
-
- return wstats;
-}
-
-
-static int prism2_get_datarates(struct net_device *dev, u8 *rates)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- u8 buf[12];
- int len;
- u16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- len = local->func->get_rid(dev, HFA384X_RID_SUPPORTEDDATARATES, buf,
- sizeof(buf), 0);
- if (len < 2)
- return 0;
-
- val = le16_to_cpu(*(__le16 *) buf); /* string length */
-
- if (len - 2 < val || val > 10)
- return 0;
-
- memcpy(rates, buf + 2, val);
- return val;
-}
-
-
-static int prism2_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u8 rates[10];
- int len, i, over2 = 0;
-
- len = prism2_get_datarates(dev, rates);
-
- for (i = 0; i < len; i++) {
- if (rates[i] == 0x0b || rates[i] == 0x16) {
- over2 = 1;
- break;
- }
- }
-
- strcpy(wrqu->name, over2 ? "IEEE 802.11b" : "IEEE 802.11-DS");
-
- return 0;
-}
-
-
-static int prism2_ioctl_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
-{
- struct iw_point *erq = &wrqu->encoding;
- struct hostap_interface *iface;
- local_info_t *local;
- int i;
- struct lib80211_crypt_data **crypt;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- i = erq->flags & IW_ENCODE_INDEX;
- if (i < 1 || i > 4)
- i = local->crypt_info.tx_keyidx;
- else
- i--;
- if (i < 0 || i >= WEP_KEYS)
- return -EINVAL;
-
- crypt = &local->crypt_info.crypt[i];
-
- if (erq->flags & IW_ENCODE_DISABLED) {
- if (*crypt)
- lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
- goto done;
- }
-
- if (*crypt != NULL && (*crypt)->ops != NULL &&
- strcmp((*crypt)->ops->name, "WEP") != 0) {
- /* changing to use WEP; deinit previously used algorithm */
- lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
- }
-
- if (*crypt == NULL) {
- struct lib80211_crypt_data *new_crypt;
-
- /* take WEP into use */
- new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
- GFP_KERNEL);
- if (new_crypt == NULL)
- return -ENOMEM;
- new_crypt->ops = lib80211_get_crypto_ops("WEP");
- if (!new_crypt->ops) {
- request_module("lib80211_crypt_wep");
- new_crypt->ops = lib80211_get_crypto_ops("WEP");
- }
- if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
- new_crypt->priv = new_crypt->ops->init(i);
- if (!new_crypt->ops || !new_crypt->priv) {
- kfree(new_crypt);
- new_crypt = NULL;
-
- printk(KERN_WARNING "%s: could not initialize WEP: "
- "load module hostap_crypt_wep.o\n",
- dev->name);
- return -EOPNOTSUPP;
- }
- *crypt = new_crypt;
- }
-
- if (erq->length > 0) {
- int len = erq->length <= 5 ? 5 : 13;
- int first = 1, j;
- if (len > erq->length)
- memset(keybuf + erq->length, 0, len - erq->length);
- (*crypt)->ops->set_key(keybuf, len, NULL, (*crypt)->priv);
- for (j = 0; j < WEP_KEYS; j++) {
- if (j != i && local->crypt_info.crypt[j]) {
- first = 0;
- break;
- }
- }
- if (first)
- local->crypt_info.tx_keyidx = i;
- } else {
- /* No key data - just set the default TX key index */
- local->crypt_info.tx_keyidx = i;
- }
-
- done:
- local->open_wep = erq->flags & IW_ENCODE_OPEN;
-
- if (hostap_set_encryption(local)) {
- printk(KERN_DEBUG "%s: set_encryption failed\n", dev->name);
- return -EINVAL;
- }
-
- /* Do not reset port0 if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. Prism2 documentation seem to require port reset
- * after WEP configuration. However, keys are apparently changed at
- * least in Managed mode. */
- if (local->iw_mode != IW_MODE_INFRA && local->func->reset_port(dev)) {
- printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static int prism2_ioctl_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *key)
-{
- struct iw_point *erq = &wrqu->encoding;
- struct hostap_interface *iface;
- local_info_t *local;
- int i, len;
- u16 val;
- struct lib80211_crypt_data *crypt;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- i = erq->flags & IW_ENCODE_INDEX;
- if (i < 1 || i > 4)
- i = local->crypt_info.tx_keyidx;
- else
- i--;
- if (i < 0 || i >= WEP_KEYS)
- return -EINVAL;
-
- crypt = local->crypt_info.crypt[i];
- erq->flags = i + 1;
-
- if (crypt == NULL || crypt->ops == NULL) {
- erq->length = 0;
- erq->flags |= IW_ENCODE_DISABLED;
- return 0;
- }
-
- if (strcmp(crypt->ops->name, "WEP") != 0) {
- /* only WEP is supported with wireless extensions, so just
- * report that encryption is used */
- erq->length = 0;
- erq->flags |= IW_ENCODE_ENABLED;
- return 0;
- }
-
- /* Reads from HFA384X_RID_CNFDEFAULTKEY* return bogus values, so show
- * the keys from driver buffer */
- len = crypt->ops->get_key(key, WEP_KEY_LEN, NULL, crypt->priv);
- erq->length = (len >= 0 ? len : 0);
-
- if (local->func->get_rid(dev, HFA384X_RID_CNFWEPFLAGS, &val, 2, 1) < 0)
- {
- printk("CNFWEPFLAGS reading failed\n");
- return -EOPNOTSUPP;
- }
- le16_to_cpus(&val);
- if (val & HFA384X_WEPFLAGS_PRIVACYINVOKED)
- erq->flags |= IW_ENCODE_ENABLED;
- else
- erq->flags |= IW_ENCODE_DISABLED;
- if (val & HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED)
- erq->flags |= IW_ENCODE_RESTRICTED;
- else
- erq->flags |= IW_ENCODE_OPEN;
-
- return 0;
-}
-
-
-static int hostap_set_rate(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int ret, basic_rates;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- basic_rates = local->basic_rates & local->tx_rate_control;
- if (!basic_rates || basic_rates != local->basic_rates) {
- printk(KERN_INFO "%s: updating basic rate set automatically "
- "to match with the new supported rate set\n",
- dev->name);
- if (!basic_rates)
- basic_rates = local->tx_rate_control;
-
- local->basic_rates = basic_rates;
- if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
- basic_rates))
- printk(KERN_WARNING "%s: failed to set "
- "cnfBasicRates\n", dev->name);
- }
-
- ret = (hostap_set_word(dev, HFA384X_RID_TXRATECONTROL,
- local->tx_rate_control) ||
- hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES,
- local->tx_rate_control) ||
- local->func->reset_port(dev));
-
- if (ret) {
- printk(KERN_WARNING "%s: TXRateControl/cnfSupportedRates "
- "setting to 0x%x failed\n",
- dev->name, local->tx_rate_control);
- }
-
- /* Update TX rate configuration for all STAs based on new operational
- * rate set. */
- hostap_update_rates(local);
-
- return ret;
-}
-
-
-static int prism2_ioctl_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->bitrate;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (rrq->fixed) {
- switch (rrq->value) {
- case 11000000:
- local->tx_rate_control = HFA384X_RATES_11MBPS;
- break;
- case 5500000:
- local->tx_rate_control = HFA384X_RATES_5MBPS;
- break;
- case 2000000:
- local->tx_rate_control = HFA384X_RATES_2MBPS;
- break;
- case 1000000:
- local->tx_rate_control = HFA384X_RATES_1MBPS;
- break;
- default:
- local->tx_rate_control = HFA384X_RATES_1MBPS |
- HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
- HFA384X_RATES_11MBPS;
- break;
- }
- } else {
- switch (rrq->value) {
- case 11000000:
- local->tx_rate_control = HFA384X_RATES_1MBPS |
- HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
- HFA384X_RATES_11MBPS;
- break;
- case 5500000:
- local->tx_rate_control = HFA384X_RATES_1MBPS |
- HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS;
- break;
- case 2000000:
- local->tx_rate_control = HFA384X_RATES_1MBPS |
- HFA384X_RATES_2MBPS;
- break;
- case 1000000:
- local->tx_rate_control = HFA384X_RATES_1MBPS;
- break;
- default:
- local->tx_rate_control = HFA384X_RATES_1MBPS |
- HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
- HFA384X_RATES_11MBPS;
- break;
- }
- }
-
- return hostap_set_rate(dev);
-}
-
-
-static int prism2_ioctl_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->bitrate;
- u16 val;
- struct hostap_interface *iface;
- local_info_t *local;
- int ret = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->get_rid(dev, HFA384X_RID_TXRATECONTROL, &val, 2, 1) <
- 0)
- return -EINVAL;
-
- if ((val & 0x1) && (val > 1))
- rrq->fixed = 0;
- else
- rrq->fixed = 1;
-
- if (local->iw_mode == IW_MODE_MASTER && local->ap != NULL &&
- !local->fw_tx_rate_control) {
- /* HFA384X_RID_CURRENTTXRATE seems to always be 2 Mbps in
- * Host AP mode, so use the recorded TX rate of the last sent
- * frame */
- rrq->value = local->ap->last_tx_rate > 0 ?
- local->ap->last_tx_rate * 100000 : 11000000;
- return 0;
- }
-
- if (local->func->get_rid(dev, HFA384X_RID_CURRENTTXRATE, &val, 2, 1) <
- 0)
- return -EINVAL;
-
- switch (val) {
- case HFA384X_RATES_1MBPS:
- rrq->value = 1000000;
- break;
- case HFA384X_RATES_2MBPS:
- rrq->value = 2000000;
- break;
- case HFA384X_RATES_5MBPS:
- rrq->value = 5500000;
- break;
- case HFA384X_RATES_11MBPS:
- rrq->value = 11000000;
- break;
- default:
- /* should not happen */
- rrq->value = 11000000;
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-
-static int prism2_ioctl_siwsens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *sens = &wrqu->sens;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* Set the desired AP density */
- if (sens->value < 1 || sens->value > 3)
- return -EINVAL;
-
- if (hostap_set_word(dev, HFA384X_RID_CNFSYSTEMSCALE, sens->value) ||
- local->func->reset_port(dev))
- return -EINVAL;
-
- return 0;
-}
-
-static int prism2_ioctl_giwsens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *sens = &wrqu->sens;
- struct hostap_interface *iface;
- local_info_t *local;
- __le16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* Get the current AP density */
- if (local->func->get_rid(dev, HFA384X_RID_CNFSYSTEMSCALE, &val, 2, 1) <
- 0)
- return -EINVAL;
-
- sens->value = le16_to_cpu(val);
- sens->fixed = 1;
-
- return 0;
-}
-
-
-/* Deprecated in new wireless extension API */
-static int prism2_ioctl_giwaplist(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface;
- local_info_t *local;
- struct sockaddr *addr;
- struct iw_quality *qual;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->iw_mode != IW_MODE_MASTER) {
- printk(KERN_DEBUG "SIOCGIWAPLIST is currently only supported "
- "in Host AP mode\n");
- data->length = 0;
- return -EOPNOTSUPP;
- }
-
- addr = kmalloc_array(IW_MAX_AP, sizeof(struct sockaddr), GFP_KERNEL);
- qual = kmalloc_array(IW_MAX_AP, sizeof(struct iw_quality), GFP_KERNEL);
- if (addr == NULL || qual == NULL) {
- kfree(addr);
- kfree(qual);
- data->length = 0;
- return -ENOMEM;
- }
-
- data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
-
- memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
- data->flags = 1; /* has quality information */
- memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
- sizeof(struct iw_quality) * data->length);
-
- kfree(addr);
- kfree(qual);
- return 0;
-}
-
-
-static int prism2_ioctl_siwrts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rts = &wrqu->rts;
- struct hostap_interface *iface;
- local_info_t *local;
- __le16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (rts->disabled)
- val = cpu_to_le16(2347);
- else if (rts->value < 0 || rts->value > 2347)
- return -EINVAL;
- else
- val = cpu_to_le16(rts->value);
-
- if (local->func->set_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2) ||
- local->func->reset_port(dev))
- return -EINVAL;
-
- local->rts_threshold = rts->value;
-
- return 0;
-}
-
-static int prism2_ioctl_giwrts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rts = &wrqu->rts;
- struct hostap_interface *iface;
- local_info_t *local;
- __le16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->get_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2, 1) <
- 0)
- return -EINVAL;
-
- rts->value = le16_to_cpu(val);
- rts->disabled = (rts->value == 2347);
- rts->fixed = 1;
-
- return 0;
-}
-
-
-static int prism2_ioctl_siwfrag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rts = &wrqu->rts;
- struct hostap_interface *iface;
- local_info_t *local;
- __le16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (rts->disabled)
- val = cpu_to_le16(2346);
- else if (rts->value < 256 || rts->value > 2346)
- return -EINVAL;
- else
- val = cpu_to_le16(rts->value & ~0x1); /* even numbers only */
-
- local->fragm_threshold = rts->value & ~0x1;
- if (local->func->set_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD, &val,
- 2)
- || local->func->reset_port(dev))
- return -EINVAL;
-
- return 0;
-}
-
-static int prism2_ioctl_giwfrag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rts = &wrqu->rts;
- struct hostap_interface *iface;
- local_info_t *local;
- __le16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->get_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
- &val, 2, 1) < 0)
- return -EINVAL;
-
- rts->value = le16_to_cpu(val);
- rts->disabled = (rts->value == 2346);
- rts->fixed = 1;
-
- return 0;
-}
-
-
-#ifndef PRISM2_NO_STATION_MODES
-static int hostap_join_ap(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct hfa384x_join_request req;
- unsigned long flags;
- int i;
- struct hfa384x_hostscan_result *entry;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- memcpy(req.bssid, local->preferred_ap, ETH_ALEN);
- req.channel = 0;
-
- spin_lock_irqsave(&local->lock, flags);
- for (i = 0; i < local->last_scan_results_count; i++) {
- if (!local->last_scan_results)
- break;
- entry = &local->last_scan_results[i];
- if (ether_addr_equal(local->preferred_ap, entry->bssid)) {
- req.channel = entry->chid;
- break;
- }
- }
- spin_unlock_irqrestore(&local->lock, flags);
-
- if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
- sizeof(req))) {
- printk(KERN_DEBUG "%s: JoinRequest %pM failed\n",
- dev->name, local->preferred_ap);
- return -1;
- }
-
- printk(KERN_DEBUG "%s: Trying to join BSSID %pM\n",
- dev->name, local->preferred_ap);
-
- return 0;
-}
-#endif /* PRISM2_NO_STATION_MODES */
-
-
-static int prism2_ioctl_siwap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct sockaddr *ap_addr = &wrqu->ap_addr;
-#ifdef PRISM2_NO_STATION_MODES
- return -EOPNOTSUPP;
-#else /* PRISM2_NO_STATION_MODES */
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- memcpy(local->preferred_ap, &ap_addr->sa_data, ETH_ALEN);
-
- if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA) {
- struct hfa384x_scan_request scan_req;
- memset(&scan_req, 0, sizeof(scan_req));
- scan_req.channel_list = cpu_to_le16(0x3fff);
- scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS);
- if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST,
- &scan_req, sizeof(scan_req))) {
- printk(KERN_DEBUG "%s: ScanResults request failed - "
- "preferred AP delayed to next unsolicited "
- "scan\n", dev->name);
- }
- } else if (local->host_roaming == 2 &&
- local->iw_mode == IW_MODE_INFRA) {
- if (hostap_join_ap(dev))
- return -EINVAL;
- } else {
- printk(KERN_DEBUG "%s: Preferred AP (SIOCSIWAP) is used only "
- "in Managed mode when host_roaming is enabled\n",
- dev->name);
- }
-
- return 0;
-#endif /* PRISM2_NO_STATION_MODES */
-}
-
-static int prism2_ioctl_giwap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct sockaddr *ap_addr = &wrqu->ap_addr;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- ap_addr->sa_family = ARPHRD_ETHER;
- switch (iface->type) {
- case HOSTAP_INTERFACE_AP:
- memcpy(&ap_addr->sa_data, dev->dev_addr, ETH_ALEN);
- break;
- case HOSTAP_INTERFACE_STA:
- memcpy(&ap_addr->sa_data, local->assoc_ap_addr, ETH_ALEN);
- break;
- case HOSTAP_INTERFACE_WDS:
- memcpy(&ap_addr->sa_data, iface->u.wds.remote_addr, ETH_ALEN);
- break;
- default:
- if (local->func->get_rid(dev, HFA384X_RID_CURRENTBSSID,
- &ap_addr->sa_data, ETH_ALEN, 1) < 0)
- return -EOPNOTSUPP;
-
- /* local->bssid is also updated in LinkStatus handler when in
- * station mode */
- memcpy(local->bssid, &ap_addr->sa_data, ETH_ALEN);
- break;
- }
-
- return 0;
-}
-
-
-static int prism2_ioctl_siwnickn(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *nickname)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- memset(local->name, 0, sizeof(local->name));
- memcpy(local->name, nickname, data->length);
- local->name_set = 1;
-
- if (hostap_set_string(dev, HFA384X_RID_CNFOWNNAME, local->name) ||
- local->func->reset_port(dev))
- return -EINVAL;
-
- return 0;
-}
-
-static int prism2_ioctl_giwnickn(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *nickname)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface;
- local_info_t *local;
- int len;
- char name[MAX_NAME_LEN + 3];
- u16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- len = local->func->get_rid(dev, HFA384X_RID_CNFOWNNAME,
- &name, MAX_NAME_LEN + 2, 0);
- val = le16_to_cpu(*(__le16 *) name);
- if (len > MAX_NAME_LEN + 2 || len < 0 || val > MAX_NAME_LEN)
- return -EOPNOTSUPP;
-
- name[val + 2] = '\0';
- data->length = val + 1;
- memcpy(nickname, name + 2, val + 1);
-
- return 0;
-}
-
-
-static int prism2_ioctl_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_freq *freq = &wrqu->freq;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* freq => chan. */
- if (freq->e == 1 &&
- freq->m / 100000 >= freq_list[0] &&
- freq->m / 100000 <= freq_list[FREQ_COUNT - 1]) {
- int ch;
- int fr = freq->m / 100000;
- for (ch = 0; ch < FREQ_COUNT; ch++) {
- if (fr == freq_list[ch]) {
- freq->e = 0;
- freq->m = ch + 1;
- break;
- }
- }
- }
-
- if (freq->e != 0 || freq->m < 1 || freq->m > FREQ_COUNT ||
- !(local->channel_mask & (1 << (freq->m - 1))))
- return -EINVAL;
-
- local->channel = freq->m; /* channel is used in prism2_setup_rids() */
- if (hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel) ||
- local->func->reset_port(dev))
- return -EINVAL;
-
- return 0;
-}
-
-static int prism2_ioctl_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_freq *freq = &wrqu->freq;
- struct hostap_interface *iface;
- local_info_t *local;
- u16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->get_rid(dev, HFA384X_RID_CURRENTCHANNEL, &val, 2, 1) <
- 0)
- return -EINVAL;
-
- le16_to_cpus(&val);
- if (val < 1 || val > FREQ_COUNT)
- return -EINVAL;
-
- freq->m = freq_list[val - 1] * 100000;
- freq->e = 1;
-
- return 0;
-}
-
-
-static void hostap_monitor_set_type(local_info_t *local)
-{
- struct net_device *dev = local->ddev;
-
- if (dev == NULL)
- return;
-
- if (local->monitor_type == PRISM2_MONITOR_PRISM ||
- local->monitor_type == PRISM2_MONITOR_CAPHDR) {
- dev->type = ARPHRD_IEEE80211_PRISM;
- } else if (local->monitor_type == PRISM2_MONITOR_RADIOTAP) {
- dev->type = ARPHRD_IEEE80211_RADIOTAP;
- } else {
- dev->type = ARPHRD_IEEE80211;
- }
-}
-
-
-static int prism2_ioctl_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *ssid)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (iface->type == HOSTAP_INTERFACE_WDS)
- return -EOPNOTSUPP;
-
- if (data->flags == 0)
- ssid[0] = '\0'; /* ANY */
-
- if (local->iw_mode == IW_MODE_MASTER && ssid[0] == '\0') {
- /* Setting SSID to empty string seems to kill the card in
- * Host AP mode */
- printk(KERN_DEBUG "%s: Host AP mode does not support "
- "'Any' essid\n", dev->name);
- return -EINVAL;
- }
-
- memcpy(local->essid, ssid, data->length);
- local->essid[data->length] = '\0';
-
- if ((!local->fw_ap &&
- hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID, local->essid))
- || hostap_set_string(dev, HFA384X_RID_CNFOWNSSID, local->essid) ||
- local->func->reset_port(dev))
- return -EINVAL;
-
- return 0;
-}
-
-static int prism2_ioctl_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *essid)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface;
- local_info_t *local;
- u16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (iface->type == HOSTAP_INTERFACE_WDS)
- return -EOPNOTSUPP;
-
- data->flags = 1; /* active */
- if (local->iw_mode == IW_MODE_MASTER) {
- data->length = strlen(local->essid);
- memcpy(essid, local->essid, IW_ESSID_MAX_SIZE);
- } else {
- int len;
- char ssid[MAX_SSID_LEN + 2];
- memset(ssid, 0, sizeof(ssid));
- len = local->func->get_rid(dev, HFA384X_RID_CURRENTSSID,
- &ssid, MAX_SSID_LEN + 2, 0);
- val = le16_to_cpu(*(__le16 *) ssid);
- if (len > MAX_SSID_LEN + 2 || len < 0 || val > MAX_SSID_LEN) {
- return -EOPNOTSUPP;
- }
- data->length = val;
- memcpy(essid, ssid + 2, IW_ESSID_MAX_SIZE);
- }
-
- return 0;
-}
-
-
-static int prism2_ioctl_giwrange(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface;
- local_info_t *local;
- struct iw_range *range = (struct iw_range *) extra;
- u8 rates[10];
- u16 val;
- int i, len, over2;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- data->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(struct iw_range));
-
- /* TODO: could fill num_txpower and txpower array with
- * something; however, there are 128 different values.. */
-
- range->txpower_capa = IW_TXPOW_DBM;
-
- if (local->iw_mode == IW_MODE_INFRA || local->iw_mode == IW_MODE_ADHOC)
- {
- range->min_pmp = 1 * 1024;
- range->max_pmp = 65535 * 1024;
- range->min_pmt = 1 * 1024;
- range->max_pmt = 1000 * 1024;
- range->pmp_flags = IW_POWER_PERIOD;
- range->pmt_flags = IW_POWER_TIMEOUT;
- range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT |
- IW_POWER_UNICAST_R | IW_POWER_ALL_R;
- }
-
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 18;
-
- range->retry_capa = IW_RETRY_LIMIT;
- range->retry_flags = IW_RETRY_LIMIT;
- range->min_retry = 0;
- range->max_retry = 255;
-
- range->num_channels = FREQ_COUNT;
-
- val = 0;
- for (i = 0; i < FREQ_COUNT; i++) {
- if (local->channel_mask & (1 << i)) {
- range->freq[val].i = i + 1;
- range->freq[val].m = freq_list[i] * 100000;
- range->freq[val].e = 1;
- val++;
- }
- if (val == IW_MAX_FREQUENCIES)
- break;
- }
- range->num_frequency = val;
-
- if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) {
- range->max_qual.qual = 70; /* what is correct max? This was not
- * documented exactly. At least
- * 69 has been observed. */
- range->max_qual.level = 0; /* dB */
- range->max_qual.noise = 0; /* dB */
-
- /* What would be suitable values for "average/typical" qual? */
- range->avg_qual.qual = 20;
- range->avg_qual.level = -60;
- range->avg_qual.noise = -95;
- } else {
- range->max_qual.qual = 92; /* 0 .. 92 */
- range->max_qual.level = 154; /* 27 .. 154 */
- range->max_qual.noise = 154; /* 27 .. 154 */
- }
- range->sensitivity = 3;
-
- range->max_encoding_tokens = WEP_KEYS;
- range->num_encoding_sizes = 2;
- range->encoding_size[0] = 5;
- range->encoding_size[1] = 13;
-
- over2 = 0;
- len = prism2_get_datarates(dev, rates);
- range->num_bitrates = 0;
- for (i = 0; i < len; i++) {
- if (range->num_bitrates < IW_MAX_BITRATES) {
- range->bitrate[range->num_bitrates] =
- rates[i] * 500000;
- range->num_bitrates++;
- }
- if (rates[i] == 0x0b || rates[i] == 0x16)
- over2 = 1;
- }
- /* estimated maximum TCP throughput values (bps) */
- range->throughput = over2 ? 5500000 : 1500000;
-
- range->min_rts = 0;
- range->max_rts = 2347;
- range->min_frag = 256;
- range->max_frag = 2346;
-
- /* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
- IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
- range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVTXDROP) |
- IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
- IW_EVENT_CAPA_MASK(IWEVREGISTERED) |
- IW_EVENT_CAPA_MASK(IWEVEXPIRED));
-
- range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
-
- if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1))
- range->scan_capa = IW_SCAN_CAPA_ESSID;
-
- return 0;
-}
-
-
-static int hostap_monitor_mode_enable(local_info_t *local)
-{
- struct net_device *dev = local->dev;
-
- printk(KERN_DEBUG "Enabling monitor mode\n");
- hostap_monitor_set_type(local);
-
- if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
- HFA384X_PORTTYPE_PSEUDO_IBSS)) {
- printk(KERN_DEBUG "Port type setting for monitor mode "
- "failed\n");
- return -EOPNOTSUPP;
- }
-
- /* Host decrypt is needed to get the IV and ICV fields;
- * however, monitor mode seems to remove WEP flag from frame
- * control field */
- if (hostap_set_word(dev, HFA384X_RID_CNFWEPFLAGS,
- HFA384X_WEPFLAGS_HOSTENCRYPT |
- HFA384X_WEPFLAGS_HOSTDECRYPT)) {
- printk(KERN_DEBUG "WEP flags setting failed\n");
- return -EOPNOTSUPP;
- }
-
- if (local->func->reset_port(dev) ||
- local->func->cmd(dev, HFA384X_CMDCODE_TEST |
- (HFA384X_TEST_MONITOR << 8),
- 0, NULL, NULL)) {
- printk(KERN_DEBUG "Setting monitor mode failed\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-
-static int hostap_monitor_mode_disable(local_info_t *local)
-{
- struct net_device *dev = local->ddev;
-
- if (dev == NULL)
- return -1;
-
- printk(KERN_DEBUG "%s: Disabling monitor mode\n", dev->name);
- dev->type = ARPHRD_ETHER;
-
- if (local->func->cmd(dev, HFA384X_CMDCODE_TEST |
- (HFA384X_TEST_STOP << 8),
- 0, NULL, NULL))
- return -1;
- return hostap_set_encryption(local);
-}
-
-
-static int prism2_ioctl_siwmode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- __u32 *mode = &wrqu->mode;
- struct hostap_interface *iface;
- local_info_t *local;
- int double_reset = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (*mode != IW_MODE_ADHOC && *mode != IW_MODE_INFRA &&
- *mode != IW_MODE_MASTER && *mode != IW_MODE_REPEAT &&
- *mode != IW_MODE_MONITOR)
- return -EOPNOTSUPP;
-
-#ifdef PRISM2_NO_STATION_MODES
- if (*mode == IW_MODE_ADHOC || *mode == IW_MODE_INFRA)
- return -EOPNOTSUPP;
-#endif /* PRISM2_NO_STATION_MODES */
-
- if (*mode == local->iw_mode)
- return 0;
-
- if (*mode == IW_MODE_MASTER && local->essid[0] == '\0') {
- printk(KERN_WARNING "%s: empty SSID not allowed in Master "
- "mode\n", dev->name);
- return -EINVAL;
- }
-
- if (local->iw_mode == IW_MODE_MONITOR)
- hostap_monitor_mode_disable(local);
-
- if ((local->iw_mode == IW_MODE_ADHOC ||
- local->iw_mode == IW_MODE_MONITOR) && *mode == IW_MODE_MASTER) {
- /* There seems to be a firmware bug in at least STA f/w v1.5.6
- * that leaves beacon frames to use IBSS type when moving from
- * IBSS to Host AP mode. Doing double Port0 reset seems to be
- * enough to workaround this. */
- double_reset = 1;
- }
-
- printk(KERN_DEBUG "prism2: %s: operating mode changed "
- "%d -> %d\n", dev->name, local->iw_mode, *mode);
- local->iw_mode = *mode;
-
- if (local->iw_mode == IW_MODE_MONITOR)
- hostap_monitor_mode_enable(local);
- else if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt &&
- !local->fw_encrypt_ok) {
- printk(KERN_DEBUG "%s: defaulting to host-based encryption as "
- "a workaround for firmware bug in Host AP mode WEP\n",
- dev->name);
- local->host_encrypt = 1;
- }
-
- if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
- hostap_get_porttype(local)))
- return -EOPNOTSUPP;
-
- if (local->func->reset_port(dev))
- return -EINVAL;
- if (double_reset && local->func->reset_port(dev))
- return -EINVAL;
-
- if (local->iw_mode != IW_MODE_INFRA && local->iw_mode != IW_MODE_ADHOC)
- {
- /* netif_carrier is used only in client modes for now, so make
- * sure carrier is on when moving to non-client modes. */
- netif_carrier_on(local->dev);
- netif_carrier_on(local->ddev);
- }
- return 0;
-}
-
-
-static int prism2_ioctl_giwmode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- __u32 *mode = &wrqu->mode;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- switch (iface->type) {
- case HOSTAP_INTERFACE_STA:
- *mode = IW_MODE_INFRA;
- break;
- case HOSTAP_INTERFACE_WDS:
- *mode = IW_MODE_REPEAT;
- break;
- default:
- *mode = local->iw_mode;
- break;
- }
- return 0;
-}
-
-
-static int prism2_ioctl_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *wrq = &wrqu->power;
-#ifdef PRISM2_NO_STATION_MODES
- return -EOPNOTSUPP;
-#else /* PRISM2_NO_STATION_MODES */
- int ret = 0;
-
- if (wrq->disabled)
- return hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 0);
-
- switch (wrq->flags & IW_POWER_MODE) {
- case IW_POWER_UNICAST_R:
- ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 0);
- if (ret)
- return ret;
- ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
- if (ret)
- return ret;
- break;
- case IW_POWER_ALL_R:
- ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 1);
- if (ret)
- return ret;
- ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
- if (ret)
- return ret;
- break;
- case IW_POWER_ON:
- break;
- default:
- return -EINVAL;
- }
-
- if (wrq->flags & IW_POWER_TIMEOUT) {
- ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
- if (ret)
- return ret;
- ret = hostap_set_word(dev, HFA384X_RID_CNFPMHOLDOVERDURATION,
- wrq->value / 1024);
- if (ret)
- return ret;
- }
- if (wrq->flags & IW_POWER_PERIOD) {
- ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
- if (ret)
- return ret;
- ret = hostap_set_word(dev, HFA384X_RID_CNFMAXSLEEPDURATION,
- wrq->value / 1024);
- if (ret)
- return ret;
- }
-
- return ret;
-#endif /* PRISM2_NO_STATION_MODES */
-}
-
-
-static int prism2_ioctl_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->power;
-#ifdef PRISM2_NO_STATION_MODES
- return -EOPNOTSUPP;
-#else /* PRISM2_NO_STATION_MODES */
- struct hostap_interface *iface;
- local_info_t *local;
- __le16 enable, mcast;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->get_rid(dev, HFA384X_RID_CNFPMENABLED, &enable, 2, 1)
- < 0)
- return -EINVAL;
-
- if (!le16_to_cpu(enable)) {
- rrq->disabled = 1;
- return 0;
- }
-
- rrq->disabled = 0;
-
- if ((rrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- __le16 timeout;
- if (local->func->get_rid(dev,
- HFA384X_RID_CNFPMHOLDOVERDURATION,
- &timeout, 2, 1) < 0)
- return -EINVAL;
-
- rrq->flags = IW_POWER_TIMEOUT;
- rrq->value = le16_to_cpu(timeout) * 1024;
- } else {
- __le16 period;
- if (local->func->get_rid(dev, HFA384X_RID_CNFMAXSLEEPDURATION,
- &period, 2, 1) < 0)
- return -EINVAL;
-
- rrq->flags = IW_POWER_PERIOD;
- rrq->value = le16_to_cpu(period) * 1024;
- }
-
- if (local->func->get_rid(dev, HFA384X_RID_CNFMULTICASTRECEIVE, &mcast,
- 2, 1) < 0)
- return -EINVAL;
-
- if (le16_to_cpu(mcast))
- rrq->flags |= IW_POWER_ALL_R;
- else
- rrq->flags |= IW_POWER_UNICAST_R;
-
- return 0;
-#endif /* PRISM2_NO_STATION_MODES */
-}
-
-
-static int prism2_ioctl_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->retry;
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (rrq->disabled)
- return -EINVAL;
-
- /* setting retry limits is not supported with the current station
- * firmware code; simulate this with alternative retry count for now */
- if (rrq->flags == IW_RETRY_LIMIT) {
- if (rrq->value < 0) {
- /* disable manual retry count setting and use firmware
- * defaults */
- local->manual_retry_count = -1;
- local->tx_control &= ~HFA384X_TX_CTRL_ALT_RTRY;
- } else {
- if (hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT,
- rrq->value)) {
- printk(KERN_DEBUG "%s: Alternate retry count "
- "setting to %d failed\n",
- dev->name, rrq->value);
- return -EOPNOTSUPP;
- }
-
- local->manual_retry_count = rrq->value;
- local->tx_control |= HFA384X_TX_CTRL_ALT_RTRY;
- }
- return 0;
- }
-
- return -EOPNOTSUPP;
-
-#if 0
- /* what could be done, if firmware would support this.. */
-
- if (rrq->flags & IW_RETRY_LIMIT) {
- if (rrq->flags & IW_RETRY_LONG)
- HFA384X_RID_LONGRETRYLIMIT = rrq->value;
- else if (rrq->flags & IW_RETRY_SHORT)
- HFA384X_RID_SHORTRETRYLIMIT = rrq->value;
- else {
- HFA384X_RID_LONGRETRYLIMIT = rrq->value;
- HFA384X_RID_SHORTRETRYLIMIT = rrq->value;
- }
-
- }
-
- if (rrq->flags & IW_RETRY_LIFETIME) {
- HFA384X_RID_MAXTRANSMITLIFETIME = rrq->value / 1024;
- }
-
- return 0;
-#endif /* 0 */
-}
-
-static int prism2_ioctl_giwretry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->retry;
- struct hostap_interface *iface;
- local_info_t *local;
- __le16 shortretry, longretry, lifetime, altretry;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->get_rid(dev, HFA384X_RID_SHORTRETRYLIMIT, &shortretry,
- 2, 1) < 0 ||
- local->func->get_rid(dev, HFA384X_RID_LONGRETRYLIMIT, &longretry,
- 2, 1) < 0 ||
- local->func->get_rid(dev, HFA384X_RID_MAXTRANSMITLIFETIME,
- &lifetime, 2, 1) < 0)
- return -EINVAL;
-
- rrq->disabled = 0;
-
- if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- rrq->flags = IW_RETRY_LIFETIME;
- rrq->value = le16_to_cpu(lifetime) * 1024;
- } else {
- if (local->manual_retry_count >= 0) {
- rrq->flags = IW_RETRY_LIMIT;
- if (local->func->get_rid(dev,
- HFA384X_RID_CNFALTRETRYCOUNT,
- &altretry, 2, 1) >= 0)
- rrq->value = le16_to_cpu(altretry);
- else
- rrq->value = local->manual_retry_count;
- } else if ((rrq->flags & IW_RETRY_LONG)) {
- rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- rrq->value = le16_to_cpu(longretry);
- } else {
- rrq->flags = IW_RETRY_LIMIT;
- rrq->value = le16_to_cpu(shortretry);
- if (shortretry != longretry)
- rrq->flags |= IW_RETRY_SHORT;
- }
- }
- return 0;
-}
-
-
-/* Note! This TX power controlling is experimental and should not be used in
- * production use. It just sets raw power register and does not use any kind of
- * feedback information from the measured TX power (CR58). This is now
- * commented out to make sure that it is not used by accident. TX power
- * configuration will be enabled again after proper algorithm using feedback
- * has been implemented. */
-
-#ifdef RAW_TXPOWER_SETTING
-/* Map HFA386x's CR31 to and from dBm with some sort of ad hoc mapping..
- * This version assumes following mapping:
- * CR31 is 7-bit value with -64 to +63 range.
- * -64 is mapped into +20dBm and +63 into -43dBm.
- * This is certainly not an exact mapping for every card, but at least
- * increasing dBm value should correspond to increasing TX power.
- */
-
-static int prism2_txpower_hfa386x_to_dBm(u16 val)
-{
- signed char tmp;
-
- if (val > 255)
- val = 255;
-
- tmp = val;
- tmp >>= 2;
-
- return -12 - tmp;
-}
-
-static u16 prism2_txpower_dBm_to_hfa386x(int val)
-{
- signed char tmp;
-
- if (val > 20)
- return 128;
- else if (val < -43)
- return 127;
-
- tmp = val;
- tmp = -12 - tmp;
- tmp <<= 2;
-
- return (unsigned char) tmp;
-}
-#endif /* RAW_TXPOWER_SETTING */
-
-
-static int prism2_ioctl_siwtxpow(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->txpower;
- struct hostap_interface *iface;
- local_info_t *local;
-#ifdef RAW_TXPOWER_SETTING
- char *tmp;
-#endif
- u16 val;
- int ret = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (rrq->disabled) {
- if (local->txpower_type != PRISM2_TXPOWER_OFF) {
- val = 0xff; /* use all standby and sleep modes */
- ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
- HFA386X_CR_A_D_TEST_MODES2,
- &val, NULL);
- printk(KERN_DEBUG "%s: Turning radio off: %s\n",
- dev->name, ret ? "failed" : "OK");
- local->txpower_type = PRISM2_TXPOWER_OFF;
- }
- return (ret ? -EOPNOTSUPP : 0);
- }
-
- if (local->txpower_type == PRISM2_TXPOWER_OFF) {
- val = 0; /* disable all standby and sleep modes */
- ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
- HFA386X_CR_A_D_TEST_MODES2, &val, NULL);
- printk(KERN_DEBUG "%s: Turning radio on: %s\n",
- dev->name, ret ? "failed" : "OK");
- local->txpower_type = PRISM2_TXPOWER_UNKNOWN;
- }
-
-#ifdef RAW_TXPOWER_SETTING
- if (!rrq->fixed && local->txpower_type != PRISM2_TXPOWER_AUTO) {
- printk(KERN_DEBUG "Setting ALC on\n");
- val = HFA384X_TEST_CFG_BIT_ALC;
- local->func->cmd(dev, HFA384X_CMDCODE_TEST |
- (HFA384X_TEST_CFG_BITS << 8), 1, &val, NULL);
- local->txpower_type = PRISM2_TXPOWER_AUTO;
- return 0;
- }
-
- if (local->txpower_type != PRISM2_TXPOWER_FIXED) {
- printk(KERN_DEBUG "Setting ALC off\n");
- val = HFA384X_TEST_CFG_BIT_ALC;
- local->func->cmd(dev, HFA384X_CMDCODE_TEST |
- (HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL);
- local->txpower_type = PRISM2_TXPOWER_FIXED;
- }
-
- if (rrq->flags == IW_TXPOW_DBM)
- tmp = "dBm";
- else if (rrq->flags == IW_TXPOW_MWATT)
- tmp = "mW";
- else
- tmp = "UNKNOWN";
- printk(KERN_DEBUG "Setting TX power to %d %s\n", rrq->value, tmp);
-
- if (rrq->flags != IW_TXPOW_DBM) {
- printk("SIOCSIWTXPOW with mW is not supported; use dBm\n");
- return -EOPNOTSUPP;
- }
-
- local->txpower = rrq->value;
- val = prism2_txpower_dBm_to_hfa386x(local->txpower);
- if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
- HFA386X_CR_MANUAL_TX_POWER, &val, NULL))
- ret = -EOPNOTSUPP;
-#else /* RAW_TXPOWER_SETTING */
- if (rrq->fixed)
- ret = -EOPNOTSUPP;
-#endif /* RAW_TXPOWER_SETTING */
-
- return ret;
-}
-
-static int prism2_ioctl_giwtxpow(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
-#ifdef RAW_TXPOWER_SETTING
- struct iw_param *rrq = &wrqu->txpower;
- struct hostap_interface *iface;
- local_info_t *local;
- u16 resp0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- rrq->flags = IW_TXPOW_DBM;
- rrq->disabled = 0;
- rrq->fixed = 0;
-
- if (local->txpower_type == PRISM2_TXPOWER_AUTO) {
- if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF,
- HFA386X_CR_MANUAL_TX_POWER,
- NULL, &resp0) == 0) {
- rrq->value = prism2_txpower_hfa386x_to_dBm(resp0);
- } else {
- /* Could not get real txpower; guess 15 dBm */
- rrq->value = 15;
- }
- } else if (local->txpower_type == PRISM2_TXPOWER_OFF) {
- rrq->value = 0;
- rrq->disabled = 1;
- } else if (local->txpower_type == PRISM2_TXPOWER_FIXED) {
- rrq->value = local->txpower;
- rrq->fixed = 1;
- } else {
- printk("SIOCGIWTXPOW - unknown txpower_type=%d\n",
- local->txpower_type);
- }
- return 0;
-#else /* RAW_TXPOWER_SETTING */
- return -EOPNOTSUPP;
-#endif /* RAW_TXPOWER_SETTING */
-}
-
-
-#ifndef PRISM2_NO_STATION_MODES
-
-/* HostScan request works with and without host_roaming mode. In addition, it
- * does not break current association. However, it requires newer station
- * firmware version (>= 1.3.1) than scan request. */
-static int prism2_request_hostscan(struct net_device *dev,
- u8 *ssid, u8 ssid_len)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct hfa384x_hostscan_request scan_req;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- memset(&scan_req, 0, sizeof(scan_req));
- scan_req.channel_list = cpu_to_le16(local->channel_mask &
- local->scan_channel_mask);
- scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS);
- if (ssid) {
- if (ssid_len > 32)
- return -EINVAL;
- scan_req.target_ssid_len = cpu_to_le16(ssid_len);
- memcpy(scan_req.target_ssid, ssid, ssid_len);
- }
-
- if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req,
- sizeof(scan_req))) {
- printk(KERN_DEBUG "%s: HOSTSCAN failed\n", dev->name);
- return -EINVAL;
- }
- return 0;
-}
-
-
-static int prism2_request_scan(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct hfa384x_scan_request scan_req;
- int ret = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- memset(&scan_req, 0, sizeof(scan_req));
- scan_req.channel_list = cpu_to_le16(local->channel_mask &
- local->scan_channel_mask);
- scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS);
-
- /* FIX:
- * It seems to be enough to set roaming mode for a short moment to
- * host-based and then setup scanrequest data and return the mode to
- * firmware-based.
- *
- * Master mode would need to drop to Managed mode for a short while
- * to make scanning work.. Or sweep through the different channels and
- * use passive scan based on beacons. */
-
- if (!local->host_roaming)
- hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
- HFA384X_ROAMING_HOST);
-
- if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST, &scan_req,
- sizeof(scan_req))) {
- printk(KERN_DEBUG "SCANREQUEST failed\n");
- ret = -EINVAL;
- }
-
- if (!local->host_roaming)
- hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
- HFA384X_ROAMING_FIRMWARE);
-
- return ret;
-}
-
-#else /* !PRISM2_NO_STATION_MODES */
-
-static inline int prism2_request_hostscan(struct net_device *dev,
- u8 *ssid, u8 ssid_len)
-{
- return -EOPNOTSUPP;
-}
-
-
-static inline int prism2_request_scan(struct net_device *dev)
-{
- return -EOPNOTSUPP;
-}
-
-#endif /* !PRISM2_NO_STATION_MODES */
-
-
-static int prism2_ioctl_siwscan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface;
- local_info_t *local;
- int ret;
- u8 *ssid = NULL, ssid_len = 0;
- struct iw_scan_req *req = (struct iw_scan_req *) extra;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (data->length < sizeof(struct iw_scan_req))
- req = NULL;
-
- if (local->iw_mode == IW_MODE_MASTER) {
- /* In master mode, we just return the results of our local
- * tables, so we don't need to start anything...
- * Jean II */
- data->length = 0;
- return 0;
- }
-
- if (!local->dev_enabled)
- return -ENETDOWN;
-
- if (req && data->flags & IW_SCAN_THIS_ESSID) {
- ssid = req->essid;
- ssid_len = req->essid_len;
-
- if (ssid_len &&
- ((local->iw_mode != IW_MODE_INFRA &&
- local->iw_mode != IW_MODE_ADHOC) ||
- (local->sta_fw_ver < PRISM2_FW_VER(1,3,1))))
- return -EOPNOTSUPP;
- }
-
- if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1))
- ret = prism2_request_hostscan(dev, ssid, ssid_len);
- else
- ret = prism2_request_scan(dev);
-
- if (ret == 0)
- local->scan_timestamp = jiffies;
-
- /* Could inquire F101, F103 or wait for SIOCGIWSCAN and read RID */
-
- return ret;
-}
-
-
-#ifndef PRISM2_NO_STATION_MODES
-static char * __prism2_translate_scan(local_info_t *local,
- struct iw_request_info *info,
- struct hfa384x_hostscan_result *scan,
- struct hostap_bss_info *bss,
- char *current_ev, char *end_buf)
-{
- int i, chan;
- struct iw_event iwe;
- char *current_val;
- u16 capabilities;
- u8 *pos;
- u8 *ssid, *bssid;
- size_t ssid_len;
- char *buf;
-
- if (bss) {
- ssid = bss->ssid;
- ssid_len = bss->ssid_len;
- bssid = bss->bssid;
- } else {
- ssid = scan->ssid;
- ssid_len = le16_to_cpu(scan->ssid_len);
- bssid = scan->bssid;
- }
- if (ssid_len > 32)
- ssid_len = 32;
-
- /* First entry *MUST* be the AP MAC address */
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN);
- current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
- IW_EV_ADDR_LEN);
-
- /* Other entries will be displayed in the order we give them */
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.length = ssid_len;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, ssid);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWMODE;
- if (bss) {
- capabilities = bss->capab_info;
- } else {
- capabilities = le16_to_cpu(scan->capability);
- }
- if (capabilities & (WLAN_CAPABILITY_ESS |
- WLAN_CAPABILITY_IBSS)) {
- if (capabilities & WLAN_CAPABILITY_ESS)
- iwe.u.mode = IW_MODE_MASTER;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_UINT_LEN);
- }
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWFREQ;
- if (scan) {
- chan = le16_to_cpu(scan->chid);
- } else if (bss) {
- chan = bss->chan;
- } else {
- chan = 0;
- }
-
- if (chan > 0) {
- iwe.u.freq.m = freq_list[chan - 1] * 100000;
- iwe.u.freq.e = 1;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_FREQ_LEN);
- }
-
- if (scan) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVQUAL;
- if (local->last_scan_type == PRISM2_HOSTSCAN) {
- iwe.u.qual.level = le16_to_cpu(scan->sl);
- iwe.u.qual.noise = le16_to_cpu(scan->anl);
- } else {
- iwe.u.qual.level =
- HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->sl));
- iwe.u.qual.noise =
- HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl));
- }
- iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_NOISE_UPDATED
- | IW_QUAL_QUAL_INVALID
- | IW_QUAL_DBM;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_QUAL_LEN);
- }
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWENCODE;
- if (capabilities & WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, "");
-
- /* TODO: add SuppRates into BSS table */
- if (scan) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWRATE;
- current_val = current_ev + iwe_stream_lcp_len(info);
- pos = scan->sup_rates;
- for (i = 0; i < sizeof(scan->sup_rates); i++) {
- if (pos[i] == 0)
- break;
- /* Bit rate given in 500 kb/s units (+ 0x80) */
- iwe.u.bitrate.value = ((pos[i] & 0x7f) * 500000);
- current_val = iwe_stream_add_value(
- info, current_ev, current_val, end_buf, &iwe,
- IW_EV_PARAM_LEN);
- }
- /* Check if we added any event */
- if ((current_val - current_ev) > iwe_stream_lcp_len(info))
- current_ev = current_val;
- }
-
- /* TODO: add BeaconInt,resp_rate,atim into BSS table */
- buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_ATOMIC);
- if (buf && scan) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "bcn_int=%d", le16_to_cpu(scan->beacon_interval));
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, buf);
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "resp_rate=%d", le16_to_cpu(scan->rate));
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, buf);
-
- if (local->last_scan_type == PRISM2_HOSTSCAN &&
- (capabilities & WLAN_CAPABILITY_IBSS)) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "atim=%d", le16_to_cpu(scan->atim));
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev,
- end_buf, &iwe, buf);
- }
- }
- kfree(buf);
-
- if (bss && bss->wpa_ie_len > 0 && bss->wpa_ie_len <= MAX_WPA_IE_LEN) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = bss->wpa_ie_len;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, bss->wpa_ie);
- }
-
- if (bss && bss->rsn_ie_len > 0 && bss->rsn_ie_len <= MAX_WPA_IE_LEN) {
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = bss->rsn_ie_len;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, bss->rsn_ie);
- }
-
- return current_ev;
-}
-
-
-/* Translate scan data returned from the card to a card independent
- * format that the Wireless Tools will understand - Jean II */
-static inline int prism2_translate_scan(local_info_t *local,
- struct iw_request_info *info,
- char *buffer, int buflen)
-{
- struct hfa384x_hostscan_result *scan;
- int entry;
- char *current_ev = buffer;
- char *end_buf = buffer + buflen;
- struct list_head *ptr;
-
- spin_lock_bh(&local->lock);
-
- list_for_each(ptr, &local->bss_list) {
- struct hostap_bss_info *bss;
- bss = list_entry(ptr, struct hostap_bss_info, list);
- bss->included = 0;
- }
-
- for (entry = 0; entry < local->last_scan_results_count; entry++) {
- int found = 0;
- scan = &local->last_scan_results[entry];
-
- /* Report every SSID if the AP is using multiple SSIDs. If no
- * BSS record is found (e.g., when WPA mode is disabled),
- * report the AP once. */
- list_for_each(ptr, &local->bss_list) {
- struct hostap_bss_info *bss;
- bss = list_entry(ptr, struct hostap_bss_info, list);
- if (ether_addr_equal(bss->bssid, scan->bssid)) {
- bss->included = 1;
- current_ev = __prism2_translate_scan(
- local, info, scan, bss, current_ev,
- end_buf);
- found++;
- }
- }
- if (!found) {
- current_ev = __prism2_translate_scan(
- local, info, scan, NULL, current_ev, end_buf);
- }
- /* Check if there is space for one more entry */
- if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) {
- /* Ask user space to try again with a bigger buffer */
- spin_unlock_bh(&local->lock);
- return -E2BIG;
- }
- }
-
- /* Prism2 firmware has limits (32 at least in some versions) for number
- * of BSSes in scan results. Extend this limit by using local BSS list.
- */
- list_for_each(ptr, &local->bss_list) {
- struct hostap_bss_info *bss;
- bss = list_entry(ptr, struct hostap_bss_info, list);
- if (bss->included)
- continue;
- current_ev = __prism2_translate_scan(local, info, NULL, bss,
- current_ev, end_buf);
- /* Check if there is space for one more entry */
- if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) {
- /* Ask user space to try again with a bigger buffer */
- spin_unlock_bh(&local->lock);
- return -E2BIG;
- }
- }
-
- spin_unlock_bh(&local->lock);
-
- return current_ev - buffer;
-}
-#endif /* PRISM2_NO_STATION_MODES */
-
-
-static inline int prism2_ioctl_giwscan_sta(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra)
-{
-#ifdef PRISM2_NO_STATION_MODES
- return -EOPNOTSUPP;
-#else /* PRISM2_NO_STATION_MODES */
- struct hostap_interface *iface;
- local_info_t *local;
- int res;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- /* Wait until the scan is finished. We can probably do better
- * than that - Jean II */
- if (local->scan_timestamp &&
- time_before(jiffies, local->scan_timestamp + 3 * HZ)) {
- /* Important note : we don't want to block the caller
- * until results are ready for various reasons.
- * First, managing wait queues is complex and racy
- * (there may be multiple simultaneous callers).
- * Second, we grab some rtnetlink lock before coming
- * here (in dev_ioctl()).
- * Third, the caller can wait on the Wireless Event
- * - Jean II */
- return -EAGAIN;
- }
- local->scan_timestamp = 0;
-
- res = prism2_translate_scan(local, info, extra, data->length);
-
- if (res >= 0) {
- data->length = res;
- return 0;
- } else {
- data->length = 0;
- return res;
- }
-#endif /* PRISM2_NO_STATION_MODES */
-}
-
-
-static int prism2_ioctl_giwscan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface;
- local_info_t *local;
- int res;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->iw_mode == IW_MODE_MASTER) {
- /* In MASTER mode, it doesn't make sense to go around
- * scanning the frequencies and make the stations we serve
- * wait when what the user is really interested about is the
- * list of stations and access points we are talking to.
- * So, just extract results from our cache...
- * Jean II */
-
- /* Translate to WE format */
- res = prism2_ap_translate_scan(dev, info, extra);
- if (res >= 0) {
- printk(KERN_DEBUG "Scan result translation succeeded "
- "(length=%d)\n", res);
- data->length = res;
- return 0;
- } else {
- printk(KERN_DEBUG
- "Scan result translation failed (res=%d)\n",
- res);
- data->length = 0;
- return res;
- }
- } else {
- /* Station mode */
- return prism2_ioctl_giwscan_sta(dev, info, data, extra);
- }
-}
-
-
-static const struct iw_priv_args prism2_priv[] = {
- { PRISM2_IOCTL_MONITOR,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor" },
- { PRISM2_IOCTL_READMIF,
- IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "readmif" },
- { PRISM2_IOCTL_WRITEMIF,
- IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 2, 0, "writemif" },
- { PRISM2_IOCTL_RESET,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "reset" },
- { PRISM2_IOCTL_INQUIRE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inquire" },
- { PRISM2_IOCTL_SET_RID_WORD,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "set_rid_word" },
- { PRISM2_IOCTL_MACCMD,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "maccmd" },
- { PRISM2_IOCTL_WDS_ADD,
- IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_add" },
- { PRISM2_IOCTL_WDS_DEL,
- IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_del" },
- { PRISM2_IOCTL_ADDMAC,
- IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "addmac" },
- { PRISM2_IOCTL_DELMAC,
- IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "delmac" },
- { PRISM2_IOCTL_KICKMAC,
- IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "kickmac" },
- /* --- raw access to sub-ioctls --- */
- { PRISM2_IOCTL_PRISM2_PARAM,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "prism2_param" },
- { PRISM2_IOCTL_GET_PRISM2_PARAM,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprism2_param" },
- /* --- sub-ioctls handlers --- */
- { PRISM2_IOCTL_PRISM2_PARAM,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "" },
- { PRISM2_IOCTL_GET_PRISM2_PARAM,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "" },
- /* --- sub-ioctls definitions --- */
- { PRISM2_PARAM_TXRATECTRL,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "txratectrl" },
- { PRISM2_PARAM_TXRATECTRL,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettxratectrl" },
- { PRISM2_PARAM_BEACON_INT,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "beacon_int" },
- { PRISM2_PARAM_BEACON_INT,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbeacon_int" },
-#ifndef PRISM2_NO_STATION_MODES
- { PRISM2_PARAM_PSEUDO_IBSS,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pseudo_ibss" },
- { PRISM2_PARAM_PSEUDO_IBSS,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpseudo_ibss" },
-#endif /* PRISM2_NO_STATION_MODES */
- { PRISM2_PARAM_ALC,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "alc" },
- { PRISM2_PARAM_ALC,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getalc" },
- { PRISM2_PARAM_DUMP,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dump" },
- { PRISM2_PARAM_DUMP,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdump" },
- { PRISM2_PARAM_OTHER_AP_POLICY,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "other_ap_policy" },
- { PRISM2_PARAM_OTHER_AP_POLICY,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getother_ap_pol" },
- { PRISM2_PARAM_AP_MAX_INACTIVITY,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_inactivity" },
- { PRISM2_PARAM_AP_MAX_INACTIVITY,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_inactivi" },
- { PRISM2_PARAM_AP_BRIDGE_PACKETS,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bridge_packets" },
- { PRISM2_PARAM_AP_BRIDGE_PACKETS,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbridge_packe" },
- { PRISM2_PARAM_DTIM_PERIOD,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dtim_period" },
- { PRISM2_PARAM_DTIM_PERIOD,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdtim_period" },
- { PRISM2_PARAM_AP_NULLFUNC_ACK,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "nullfunc_ack" },
- { PRISM2_PARAM_AP_NULLFUNC_ACK,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getnullfunc_ack" },
- { PRISM2_PARAM_MAX_WDS,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_wds" },
- { PRISM2_PARAM_MAX_WDS,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_wds" },
- { PRISM2_PARAM_AP_AUTOM_AP_WDS,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "autom_ap_wds" },
- { PRISM2_PARAM_AP_AUTOM_AP_WDS,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getautom_ap_wds" },
- { PRISM2_PARAM_AP_AUTH_ALGS,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_auth_algs" },
- { PRISM2_PARAM_AP_AUTH_ALGS,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_auth_algs" },
- { PRISM2_PARAM_MONITOR_ALLOW_FCSERR,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "allow_fcserr" },
- { PRISM2_PARAM_MONITOR_ALLOW_FCSERR,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getallow_fcserr" },
- { PRISM2_PARAM_HOST_ENCRYPT,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_encrypt" },
- { PRISM2_PARAM_HOST_ENCRYPT,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_encrypt" },
- { PRISM2_PARAM_HOST_DECRYPT,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_decrypt" },
- { PRISM2_PARAM_HOST_DECRYPT,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_decrypt" },
-#ifndef PRISM2_NO_STATION_MODES
- { PRISM2_PARAM_HOST_ROAMING,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_roaming" },
- { PRISM2_PARAM_HOST_ROAMING,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_roaming" },
-#endif /* PRISM2_NO_STATION_MODES */
- { PRISM2_PARAM_BCRX_STA_KEY,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bcrx_sta_key" },
- { PRISM2_PARAM_BCRX_STA_KEY,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbcrx_sta_key" },
- { PRISM2_PARAM_IEEE_802_1X,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ieee_802_1x" },
- { PRISM2_PARAM_IEEE_802_1X,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getieee_802_1x" },
- { PRISM2_PARAM_ANTSEL_TX,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_tx" },
- { PRISM2_PARAM_ANTSEL_TX,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_tx" },
- { PRISM2_PARAM_ANTSEL_RX,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_rx" },
- { PRISM2_PARAM_ANTSEL_RX,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_rx" },
- { PRISM2_PARAM_MONITOR_TYPE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor_type" },
- { PRISM2_PARAM_MONITOR_TYPE,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmonitor_type" },
- { PRISM2_PARAM_WDS_TYPE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wds_type" },
- { PRISM2_PARAM_WDS_TYPE,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwds_type" },
- { PRISM2_PARAM_HOSTSCAN,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostscan" },
- { PRISM2_PARAM_HOSTSCAN,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostscan" },
- { PRISM2_PARAM_AP_SCAN,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_scan" },
- { PRISM2_PARAM_AP_SCAN,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_scan" },
- { PRISM2_PARAM_ENH_SEC,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "enh_sec" },
- { PRISM2_PARAM_ENH_SEC,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getenh_sec" },
-#ifdef PRISM2_IO_DEBUG
- { PRISM2_PARAM_IO_DEBUG,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "io_debug" },
- { PRISM2_PARAM_IO_DEBUG,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getio_debug" },
-#endif /* PRISM2_IO_DEBUG */
- { PRISM2_PARAM_BASIC_RATES,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "basic_rates" },
- { PRISM2_PARAM_BASIC_RATES,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbasic_rates" },
- { PRISM2_PARAM_OPER_RATES,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "oper_rates" },
- { PRISM2_PARAM_OPER_RATES,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getoper_rates" },
- { PRISM2_PARAM_HOSTAPD,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd" },
- { PRISM2_PARAM_HOSTAPD,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd" },
- { PRISM2_PARAM_HOSTAPD_STA,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd_sta" },
- { PRISM2_PARAM_HOSTAPD_STA,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd_sta" },
- { PRISM2_PARAM_WPA,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wpa" },
- { PRISM2_PARAM_WPA,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwpa" },
- { PRISM2_PARAM_PRIVACY_INVOKED,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "privacy_invoked" },
- { PRISM2_PARAM_PRIVACY_INVOKED,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprivacy_invo" },
- { PRISM2_PARAM_TKIP_COUNTERMEASURES,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tkip_countermea" },
- { PRISM2_PARAM_TKIP_COUNTERMEASURES,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettkip_counter" },
- { PRISM2_PARAM_DROP_UNENCRYPTED,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "drop_unencrypte" },
- { PRISM2_PARAM_DROP_UNENCRYPTED,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdrop_unencry" },
- { PRISM2_PARAM_SCAN_CHANNEL_MASK,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scan_channels" },
- { PRISM2_PARAM_SCAN_CHANNEL_MASK,
- 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getscan_channel" },
-};
-
-
-static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *uwrq, char *extra)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int *i = (int *) extra;
- int param = *i;
- int value = *(i + 1);
- int ret = 0;
- u16 val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- switch (param) {
- case PRISM2_PARAM_TXRATECTRL:
- local->fw_tx_rate_control = value;
- break;
-
- case PRISM2_PARAM_BEACON_INT:
- if (hostap_set_word(dev, HFA384X_RID_CNFBEACONINT, value) ||
- local->func->reset_port(dev))
- ret = -EINVAL;
- else
- local->beacon_int = value;
- break;
-
-#ifndef PRISM2_NO_STATION_MODES
- case PRISM2_PARAM_PSEUDO_IBSS:
- if (value == local->pseudo_adhoc)
- break;
-
- if (value != 0 && value != 1) {
- ret = -EINVAL;
- break;
- }
-
- printk(KERN_DEBUG "prism2: %s: pseudo IBSS change %d -> %d\n",
- dev->name, local->pseudo_adhoc, value);
- local->pseudo_adhoc = value;
- if (local->iw_mode != IW_MODE_ADHOC)
- break;
-
- if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
- hostap_get_porttype(local))) {
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (local->func->reset_port(dev))
- ret = -EINVAL;
- break;
-#endif /* PRISM2_NO_STATION_MODES */
-
- case PRISM2_PARAM_ALC:
- printk(KERN_DEBUG "%s: %s ALC\n", dev->name,
- value == 0 ? "Disabling" : "Enabling");
- val = HFA384X_TEST_CFG_BIT_ALC;
- local->func->cmd(dev, HFA384X_CMDCODE_TEST |
- (HFA384X_TEST_CFG_BITS << 8),
- value == 0 ? 0 : 1, &val, NULL);
- break;
-
- case PRISM2_PARAM_DUMP:
- local->frame_dump = value;
- break;
-
- case PRISM2_PARAM_OTHER_AP_POLICY:
- if (value < 0 || value > 3) {
- ret = -EINVAL;
- break;
- }
- if (local->ap != NULL)
- local->ap->ap_policy = value;
- break;
-
- case PRISM2_PARAM_AP_MAX_INACTIVITY:
- if (value < 0 || value > 7 * 24 * 60 * 60) {
- ret = -EINVAL;
- break;
- }
- if (local->ap != NULL)
- local->ap->max_inactivity = value * HZ;
- break;
-
- case PRISM2_PARAM_AP_BRIDGE_PACKETS:
- if (local->ap != NULL)
- local->ap->bridge_packets = value;
- break;
-
- case PRISM2_PARAM_DTIM_PERIOD:
- if (value < 0 || value > 65535) {
- ret = -EINVAL;
- break;
- }
- if (hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD, value)
- || local->func->reset_port(dev))
- ret = -EINVAL;
- else
- local->dtim_period = value;
- break;
-
- case PRISM2_PARAM_AP_NULLFUNC_ACK:
- if (local->ap != NULL)
- local->ap->nullfunc_ack = value;
- break;
-
- case PRISM2_PARAM_MAX_WDS:
- local->wds_max_connections = value;
- break;
-
- case PRISM2_PARAM_AP_AUTOM_AP_WDS:
- if (local->ap != NULL) {
- if (!local->ap->autom_ap_wds && value) {
- /* add WDS link to all APs in STA table */
- hostap_add_wds_links(local);
- }
- local->ap->autom_ap_wds = value;
- }
- break;
-
- case PRISM2_PARAM_AP_AUTH_ALGS:
- local->auth_algs = value;
- if (hostap_set_auth_algs(local))
- ret = -EINVAL;
- break;
-
- case PRISM2_PARAM_MONITOR_ALLOW_FCSERR:
- local->monitor_allow_fcserr = value;
- break;
-
- case PRISM2_PARAM_HOST_ENCRYPT:
- local->host_encrypt = value;
- if (hostap_set_encryption(local) ||
- local->func->reset_port(dev))
- ret = -EINVAL;
- break;
-
- case PRISM2_PARAM_HOST_DECRYPT:
- local->host_decrypt = value;
- if (hostap_set_encryption(local) ||
- local->func->reset_port(dev))
- ret = -EINVAL;
- break;
-
-#ifndef PRISM2_NO_STATION_MODES
- case PRISM2_PARAM_HOST_ROAMING:
- if (value < 0 || value > 2) {
- ret = -EINVAL;
- break;
- }
- local->host_roaming = value;
- if (hostap_set_roaming(local) || local->func->reset_port(dev))
- ret = -EINVAL;
- break;
-#endif /* PRISM2_NO_STATION_MODES */
-
- case PRISM2_PARAM_BCRX_STA_KEY:
- local->bcrx_sta_key = value;
- break;
-
- case PRISM2_PARAM_IEEE_802_1X:
- local->ieee_802_1x = value;
- break;
-
- case PRISM2_PARAM_ANTSEL_TX:
- if (value < 0 || value > HOSTAP_ANTSEL_HIGH) {
- ret = -EINVAL;
- break;
- }
- local->antsel_tx = value;
- hostap_set_antsel(local);
- break;
-
- case PRISM2_PARAM_ANTSEL_RX:
- if (value < 0 || value > HOSTAP_ANTSEL_HIGH) {
- ret = -EINVAL;
- break;
- }
- local->antsel_rx = value;
- hostap_set_antsel(local);
- break;
-
- case PRISM2_PARAM_MONITOR_TYPE:
- if (value != PRISM2_MONITOR_80211 &&
- value != PRISM2_MONITOR_CAPHDR &&
- value != PRISM2_MONITOR_PRISM &&
- value != PRISM2_MONITOR_RADIOTAP) {
- ret = -EINVAL;
- break;
- }
- local->monitor_type = value;
- if (local->iw_mode == IW_MODE_MONITOR)
- hostap_monitor_set_type(local);
- break;
-
- case PRISM2_PARAM_WDS_TYPE:
- local->wds_type = value;
- break;
-
- case PRISM2_PARAM_HOSTSCAN:
- {
- struct hfa384x_hostscan_request scan_req;
- u16 rate;
-
- memset(&scan_req, 0, sizeof(scan_req));
- scan_req.channel_list = cpu_to_le16(0x3fff);
- switch (value) {
- case 1: rate = HFA384X_RATES_1MBPS; break;
- case 2: rate = HFA384X_RATES_2MBPS; break;
- case 3: rate = HFA384X_RATES_5MBPS; break;
- case 4: rate = HFA384X_RATES_11MBPS; break;
- default: rate = HFA384X_RATES_1MBPS; break;
- }
- scan_req.txrate = cpu_to_le16(rate);
- /* leave SSID empty to accept all SSIDs */
-
- if (local->iw_mode == IW_MODE_MASTER) {
- if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
- HFA384X_PORTTYPE_BSS) ||
- local->func->reset_port(dev))
- printk(KERN_DEBUG "Leaving Host AP mode "
- "for HostScan failed\n");
- }
-
- if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req,
- sizeof(scan_req))) {
- printk(KERN_DEBUG "HOSTSCAN failed\n");
- ret = -EINVAL;
- }
- if (local->iw_mode == IW_MODE_MASTER) {
- wait_queue_entry_t __wait;
- init_waitqueue_entry(&__wait, current);
- add_wait_queue(&local->hostscan_wq, &__wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- if (signal_pending(current))
- ret = -EINTR;
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&local->hostscan_wq, &__wait);
-
- if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
- HFA384X_PORTTYPE_HOSTAP) ||
- local->func->reset_port(dev))
- printk(KERN_DEBUG "Returning to Host AP mode "
- "after HostScan failed\n");
- }
- break;
- }
-
- case PRISM2_PARAM_AP_SCAN:
- local->passive_scan_interval = value;
- if (timer_pending(&local->passive_scan_timer))
- del_timer(&local->passive_scan_timer);
- if (value > 0 && value < INT_MAX / HZ) {
- local->passive_scan_timer.expires = jiffies +
- local->passive_scan_interval * HZ;
- add_timer(&local->passive_scan_timer);
- }
- break;
-
- case PRISM2_PARAM_ENH_SEC:
- if (value < 0 || value > 3) {
- ret = -EINVAL;
- break;
- }
- local->enh_sec = value;
- if (hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY,
- local->enh_sec) ||
- local->func->reset_port(dev)) {
- printk(KERN_INFO "%s: cnfEnhSecurity requires STA f/w "
- "1.6.3 or newer\n", dev->name);
- ret = -EOPNOTSUPP;
- }
- break;
-
-#ifdef PRISM2_IO_DEBUG
- case PRISM2_PARAM_IO_DEBUG:
- local->io_debug_enabled = value;
- break;
-#endif /* PRISM2_IO_DEBUG */
-
- case PRISM2_PARAM_BASIC_RATES:
- if ((value & local->tx_rate_control) != value || value == 0) {
- printk(KERN_INFO "%s: invalid basic rate set - basic "
- "rates must be in supported rate set\n",
- dev->name);
- ret = -EINVAL;
- break;
- }
- local->basic_rates = value;
- if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
- local->basic_rates) ||
- local->func->reset_port(dev))
- ret = -EINVAL;
- break;
-
- case PRISM2_PARAM_OPER_RATES:
- local->tx_rate_control = value;
- if (hostap_set_rate(dev))
- ret = -EINVAL;
- break;
-
- case PRISM2_PARAM_HOSTAPD:
- ret = hostap_set_hostapd(local, value, 1);
- break;
-
- case PRISM2_PARAM_HOSTAPD_STA:
- ret = hostap_set_hostapd_sta(local, value, 1);
- break;
-
- case PRISM2_PARAM_WPA:
- local->wpa = value;
- if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
- ret = -EOPNOTSUPP;
- else if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE,
- value ? 1 : 0))
- ret = -EINVAL;
- break;
-
- case PRISM2_PARAM_PRIVACY_INVOKED:
- local->privacy_invoked = value;
- if (hostap_set_encryption(local) ||
- local->func->reset_port(dev))
- ret = -EINVAL;
- break;
-
- case PRISM2_PARAM_TKIP_COUNTERMEASURES:
- local->tkip_countermeasures = value;
- break;
-
- case PRISM2_PARAM_DROP_UNENCRYPTED:
- local->drop_unencrypted = value;
- break;
-
- case PRISM2_PARAM_SCAN_CHANNEL_MASK:
- local->scan_channel_mask = value;
- break;
-
- default:
- printk(KERN_DEBUG "%s: prism2_param: unknown param %d\n",
- dev->name, param);
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-
-static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int *param = (int *) extra;
- int ret = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- switch (*param) {
- case PRISM2_PARAM_TXRATECTRL:
- *param = local->fw_tx_rate_control;
- break;
-
- case PRISM2_PARAM_BEACON_INT:
- *param = local->beacon_int;
- break;
-
- case PRISM2_PARAM_PSEUDO_IBSS:
- *param = local->pseudo_adhoc;
- break;
-
- case PRISM2_PARAM_ALC:
- ret = -EOPNOTSUPP; /* FIX */
- break;
-
- case PRISM2_PARAM_DUMP:
- *param = local->frame_dump;
- break;
-
- case PRISM2_PARAM_OTHER_AP_POLICY:
- if (local->ap != NULL)
- *param = local->ap->ap_policy;
- else
- ret = -EOPNOTSUPP;
- break;
-
- case PRISM2_PARAM_AP_MAX_INACTIVITY:
- if (local->ap != NULL)
- *param = local->ap->max_inactivity / HZ;
- else
- ret = -EOPNOTSUPP;
- break;
-
- case PRISM2_PARAM_AP_BRIDGE_PACKETS:
- if (local->ap != NULL)
- *param = local->ap->bridge_packets;
- else
- ret = -EOPNOTSUPP;
- break;
-
- case PRISM2_PARAM_DTIM_PERIOD:
- *param = local->dtim_period;
- break;
-
- case PRISM2_PARAM_AP_NULLFUNC_ACK:
- if (local->ap != NULL)
- *param = local->ap->nullfunc_ack;
- else
- ret = -EOPNOTSUPP;
- break;
-
- case PRISM2_PARAM_MAX_WDS:
- *param = local->wds_max_connections;
- break;
-
- case PRISM2_PARAM_AP_AUTOM_AP_WDS:
- if (local->ap != NULL)
- *param = local->ap->autom_ap_wds;
- else
- ret = -EOPNOTSUPP;
- break;
-
- case PRISM2_PARAM_AP_AUTH_ALGS:
- *param = local->auth_algs;
- break;
-
- case PRISM2_PARAM_MONITOR_ALLOW_FCSERR:
- *param = local->monitor_allow_fcserr;
- break;
-
- case PRISM2_PARAM_HOST_ENCRYPT:
- *param = local->host_encrypt;
- break;
-
- case PRISM2_PARAM_HOST_DECRYPT:
- *param = local->host_decrypt;
- break;
-
- case PRISM2_PARAM_HOST_ROAMING:
- *param = local->host_roaming;
- break;
-
- case PRISM2_PARAM_BCRX_STA_KEY:
- *param = local->bcrx_sta_key;
- break;
-
- case PRISM2_PARAM_IEEE_802_1X:
- *param = local->ieee_802_1x;
- break;
-
- case PRISM2_PARAM_ANTSEL_TX:
- *param = local->antsel_tx;
- break;
-
- case PRISM2_PARAM_ANTSEL_RX:
- *param = local->antsel_rx;
- break;
-
- case PRISM2_PARAM_MONITOR_TYPE:
- *param = local->monitor_type;
- break;
-
- case PRISM2_PARAM_WDS_TYPE:
- *param = local->wds_type;
- break;
-
- case PRISM2_PARAM_HOSTSCAN:
- ret = -EOPNOTSUPP;
- break;
-
- case PRISM2_PARAM_AP_SCAN:
- *param = local->passive_scan_interval;
- break;
-
- case PRISM2_PARAM_ENH_SEC:
- *param = local->enh_sec;
- break;
-
-#ifdef PRISM2_IO_DEBUG
- case PRISM2_PARAM_IO_DEBUG:
- *param = local->io_debug_enabled;
- break;
-#endif /* PRISM2_IO_DEBUG */
-
- case PRISM2_PARAM_BASIC_RATES:
- *param = local->basic_rates;
- break;
-
- case PRISM2_PARAM_OPER_RATES:
- *param = local->tx_rate_control;
- break;
-
- case PRISM2_PARAM_HOSTAPD:
- *param = local->hostapd;
- break;
-
- case PRISM2_PARAM_HOSTAPD_STA:
- *param = local->hostapd_sta;
- break;
-
- case PRISM2_PARAM_WPA:
- if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
- ret = -EOPNOTSUPP;
- *param = local->wpa;
- break;
-
- case PRISM2_PARAM_PRIVACY_INVOKED:
- *param = local->privacy_invoked;
- break;
-
- case PRISM2_PARAM_TKIP_COUNTERMEASURES:
- *param = local->tkip_countermeasures;
- break;
-
- case PRISM2_PARAM_DROP_UNENCRYPTED:
- *param = local->drop_unencrypted;
- break;
-
- case PRISM2_PARAM_SCAN_CHANNEL_MASK:
- *param = local->scan_channel_mask;
- break;
-
- default:
- printk(KERN_DEBUG "%s: get_prism2_param: unknown param %d\n",
- dev->name, *param);
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-
-static int prism2_ioctl_priv_readmif(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- u16 resp0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF, *extra, NULL,
- &resp0))
- return -EOPNOTSUPP;
- else
- *extra = resp0;
-
- return 0;
-}
-
-
-static int prism2_ioctl_priv_writemif(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- u16 cr, val;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- cr = *extra;
- val = *(extra + 1);
- if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, cr, &val, NULL))
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-
-#ifdef PRISM2_DOWNLOAD_SUPPORT
-static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
-{
- struct prism2_download_param *param;
- int ret = 0;
-
- if (p->length < sizeof(struct prism2_download_param) ||
- p->length > 1024 || !p->pointer)
- return -EINVAL;
-
- param = memdup_user(p->pointer, p->length);
- if (IS_ERR(param)) {
- return PTR_ERR(param);
- }
-
- if (p->length < sizeof(struct prism2_download_param) +
- param->num_areas * sizeof(struct prism2_download_area)) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = local->func->download(local, param);
-
- out:
- kfree(param);
- return ret;
-}
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
-
-
-static int prism2_set_genericelement(struct net_device *dev, u8 *elem,
- size_t len)
-{
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
- u8 *buf;
-
- /*
- * Add 16-bit length in the beginning of the buffer because Prism2 RID
- * includes it.
- */
- buf = kmalloc(len + 2, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- *((__le16 *) buf) = cpu_to_le16(len);
- memcpy(buf + 2, elem, len);
-
- kfree(local->generic_elem);
- local->generic_elem = buf;
- local->generic_elem_len = len + 2;
-
- return local->func->set_rid(local->dev, HFA384X_RID_GENERICELEMENT,
- buf, len + 2);
-}
-
-
-static int prism2_ioctl_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *data = &wrqu->param;
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
-
- switch (data->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_KEY_MGMT:
- /*
- * Host AP driver does not use these parameters and allows
- * wpa_supplicant to control them internally.
- */
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- local->tkip_countermeasures = data->value;
- break;
- case IW_AUTH_DROP_UNENCRYPTED:
- local->drop_unencrypted = data->value;
- break;
- case IW_AUTH_80211_AUTH_ALG:
- local->auth_algs = data->value;
- break;
- case IW_AUTH_WPA_ENABLED:
- if (data->value == 0) {
- local->wpa = 0;
- if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
- break;
- prism2_set_genericelement(dev, "", 0);
- local->host_roaming = 0;
- local->privacy_invoked = 0;
- if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE,
- 0) ||
- hostap_set_roaming(local) ||
- hostap_set_encryption(local) ||
- local->func->reset_port(dev))
- return -EINVAL;
- break;
- }
- if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
- return -EOPNOTSUPP;
- local->host_roaming = 2;
- local->privacy_invoked = 1;
- local->wpa = 1;
- if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1) ||
- hostap_set_roaming(local) ||
- hostap_set_encryption(local) ||
- local->func->reset_port(dev))
- return -EINVAL;
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- local->ieee_802_1x = data->value;
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- local->privacy_invoked = data->value;
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-
-static int prism2_ioctl_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *data = &wrqu->param;
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
-
- switch (data->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_KEY_MGMT:
- /*
- * Host AP driver does not use these parameters and allows
- * wpa_supplicant to control them internally.
- */
- return -EOPNOTSUPP;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- data->value = local->tkip_countermeasures;
- break;
- case IW_AUTH_DROP_UNENCRYPTED:
- data->value = local->drop_unencrypted;
- break;
- case IW_AUTH_80211_AUTH_ALG:
- data->value = local->auth_algs;
- break;
- case IW_AUTH_WPA_ENABLED:
- data->value = local->wpa;
- break;
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- data->value = local->ieee_802_1x;
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-
-static int prism2_ioctl_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *erq = &wrqu->encoding;
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
- struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
- int i, ret = 0;
- struct lib80211_crypto_ops *ops;
- struct lib80211_crypt_data **crypt;
- void *sta_ptr;
- u8 *addr;
- const char *alg, *module;
-
- i = erq->flags & IW_ENCODE_INDEX;
- if (i > WEP_KEYS)
- return -EINVAL;
- if (i < 1 || i > WEP_KEYS)
- i = local->crypt_info.tx_keyidx;
- else
- i--;
- if (i < 0 || i >= WEP_KEYS)
- return -EINVAL;
-
- addr = ext->addr.sa_data;
- if (is_broadcast_ether_addr(addr)) {
- sta_ptr = NULL;
- crypt = &local->crypt_info.crypt[i];
- } else {
- if (i != 0)
- return -EINVAL;
- sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
- if (sta_ptr == NULL) {
- if (local->iw_mode == IW_MODE_INFRA) {
- /*
- * TODO: add STA entry for the current AP so
- * that unicast key can be used. For now, this
- * is emulated by using default key idx 0.
- */
- i = 0;
- crypt = &local->crypt_info.crypt[i];
- } else
- return -EINVAL;
- }
- }
-
- if ((erq->flags & IW_ENCODE_DISABLED) ||
- ext->alg == IW_ENCODE_ALG_NONE) {
- if (*crypt)
- lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
- goto done;
- }
-
- switch (ext->alg) {
- case IW_ENCODE_ALG_WEP:
- alg = "WEP";
- module = "lib80211_crypt_wep";
- break;
- case IW_ENCODE_ALG_TKIP:
- alg = "TKIP";
- module = "lib80211_crypt_tkip";
- break;
- case IW_ENCODE_ALG_CCMP:
- alg = "CCMP";
- module = "lib80211_crypt_ccmp";
- break;
- default:
- printk(KERN_DEBUG "%s: unsupported algorithm %d\n",
- local->dev->name, ext->alg);
- ret = -EOPNOTSUPP;
- goto done;
- }
-
- ops = lib80211_get_crypto_ops(alg);
- if (ops == NULL) {
- request_module(module);
- ops = lib80211_get_crypto_ops(alg);
- }
- if (ops == NULL) {
- printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
- local->dev->name, alg);
- ret = -EOPNOTSUPP;
- goto done;
- }
-
- if (sta_ptr || ext->alg != IW_ENCODE_ALG_WEP) {
- /*
- * Per station encryption and other than WEP algorithms
- * require host-based encryption, so force them on
- * automatically.
- */
- local->host_decrypt = local->host_encrypt = 1;
- }
-
- if (*crypt == NULL || (*crypt)->ops != ops) {
- struct lib80211_crypt_data *new_crypt;
-
- lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
-
- new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
- GFP_KERNEL);
- if (new_crypt == NULL) {
- ret = -ENOMEM;
- goto done;
- }
- new_crypt->ops = ops;
- if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
- new_crypt->priv = new_crypt->ops->init(i);
- if (new_crypt->priv == NULL) {
- kfree(new_crypt);
- ret = -EINVAL;
- goto done;
- }
-
- *crypt = new_crypt;
- }
-
- /*
- * TODO: if ext_flags does not have IW_ENCODE_EXT_RX_SEQ_VALID, the
- * existing seq# should not be changed.
- * TODO: if ext_flags has IW_ENCODE_EXT_TX_SEQ_VALID, next TX seq#
- * should be changed to something else than zero.
- */
- if ((!(ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) || ext->key_len > 0)
- && (*crypt)->ops->set_key &&
- (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
- (*crypt)->priv) < 0) {
- printk(KERN_DEBUG "%s: key setting failed\n",
- local->dev->name);
- ret = -EINVAL;
- goto done;
- }
-
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- if (!sta_ptr)
- local->crypt_info.tx_keyidx = i;
- }
-
-
- if (sta_ptr == NULL && ext->key_len > 0) {
- int first = 1, j;
- for (j = 0; j < WEP_KEYS; j++) {
- if (j != i && local->crypt_info.crypt[j]) {
- first = 0;
- break;
- }
- }
- if (first)
- local->crypt_info.tx_keyidx = i;
- }
-
- done:
- if (sta_ptr)
- hostap_handle_sta_release(sta_ptr);
-
- local->open_wep = erq->flags & IW_ENCODE_OPEN;
-
- /*
- * Do not reset port0 if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. Prism2 documentation seem to require port reset
- * after WEP configuration. However, keys are apparently changed at
- * least in Managed mode.
- */
- if (ret == 0 &&
- (hostap_set_encryption(local) ||
- (local->iw_mode != IW_MODE_INFRA &&
- local->func->reset_port(local->dev))))
- ret = -EINVAL;
-
- return ret;
-}
-
-
-static int prism2_ioctl_giwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *erq = &wrqu->encoding;
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
- struct lib80211_crypt_data **crypt;
- void *sta_ptr;
- int max_key_len, i;
- struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
- u8 *addr;
-
- max_key_len = erq->length - sizeof(*ext);
- if (max_key_len < 0)
- return -EINVAL;
-
- i = erq->flags & IW_ENCODE_INDEX;
- if (i < 1 || i > WEP_KEYS)
- i = local->crypt_info.tx_keyidx;
- else
- i--;
-
- addr = ext->addr.sa_data;
- if (is_broadcast_ether_addr(addr)) {
- sta_ptr = NULL;
- crypt = &local->crypt_info.crypt[i];
- } else {
- i = 0;
- sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
- if (sta_ptr == NULL)
- return -EINVAL;
- }
- erq->flags = i + 1;
- memset(ext, 0, sizeof(*ext));
-
- if (*crypt == NULL || (*crypt)->ops == NULL) {
- ext->alg = IW_ENCODE_ALG_NONE;
- ext->key_len = 0;
- erq->flags |= IW_ENCODE_DISABLED;
- } else {
- if (strcmp((*crypt)->ops->name, "WEP") == 0)
- ext->alg = IW_ENCODE_ALG_WEP;
- else if (strcmp((*crypt)->ops->name, "TKIP") == 0)
- ext->alg = IW_ENCODE_ALG_TKIP;
- else if (strcmp((*crypt)->ops->name, "CCMP") == 0)
- ext->alg = IW_ENCODE_ALG_CCMP;
- else
- return -EINVAL;
-
- if ((*crypt)->ops->get_key) {
- ext->key_len =
- (*crypt)->ops->get_key(ext->key,
- max_key_len,
- ext->tx_seq,
- (*crypt)->priv);
- if (ext->key_len &&
- (ext->alg == IW_ENCODE_ALG_TKIP ||
- ext->alg == IW_ENCODE_ALG_CCMP))
- ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
- }
- }
-
- if (sta_ptr)
- hostap_handle_sta_release(sta_ptr);
-
- return 0;
-}
-
-
-static int prism2_ioctl_set_encryption(local_info_t *local,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- int ret = 0;
- struct lib80211_crypto_ops *ops;
- struct lib80211_crypt_data **crypt;
- void *sta_ptr;
-
- param->u.crypt.err = 0;
- param->u.crypt.alg[HOSTAP_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
- if (param_len !=
- (int) ((char *) param->u.crypt.key - (char *) param) +
- param->u.crypt.key_len)
- return -EINVAL;
-
- if (is_broadcast_ether_addr(param->sta_addr)) {
- if (param->u.crypt.idx >= WEP_KEYS)
- return -EINVAL;
- sta_ptr = NULL;
- crypt = &local->crypt_info.crypt[param->u.crypt.idx];
- } else {
- if (param->u.crypt.idx)
- return -EINVAL;
- sta_ptr = ap_crypt_get_ptrs(
- local->ap, param->sta_addr,
- (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_PERMANENT),
- &crypt);
-
- if (sta_ptr == NULL) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
- return -EINVAL;
- }
- }
-
- if (strcmp(param->u.crypt.alg, "none") == 0) {
- if (crypt)
- lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
- goto done;
- }
-
- ops = lib80211_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
- request_module("lib80211_crypt_wep");
- ops = lib80211_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
- request_module("lib80211_crypt_tkip");
- ops = lib80211_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
- request_module("lib80211_crypt_ccmp");
- ops = lib80211_get_crypto_ops(param->u.crypt.alg);
- }
- if (ops == NULL) {
- printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
- local->dev->name, param->u.crypt.alg);
- param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ALG;
- ret = -EINVAL;
- goto done;
- }
-
- /* station based encryption and other than WEP algorithms require
- * host-based encryption, so force them on automatically */
- local->host_decrypt = local->host_encrypt = 1;
-
- if (*crypt == NULL || (*crypt)->ops != ops) {
- struct lib80211_crypt_data *new_crypt;
-
- lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
-
- new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
- GFP_KERNEL);
- if (new_crypt == NULL) {
- ret = -ENOMEM;
- goto done;
- }
- new_crypt->ops = ops;
- new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
- if (new_crypt->priv == NULL) {
- kfree(new_crypt);
- param->u.crypt.err =
- HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED;
- ret = -EINVAL;
- goto done;
- }
-
- *crypt = new_crypt;
- }
-
- if ((!(param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) ||
- param->u.crypt.key_len > 0) && (*crypt)->ops->set_key &&
- (*crypt)->ops->set_key(param->u.crypt.key,
- param->u.crypt.key_len, param->u.crypt.seq,
- (*crypt)->priv) < 0) {
- printk(KERN_DEBUG "%s: key setting failed\n",
- local->dev->name);
- param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED;
- ret = -EINVAL;
- goto done;
- }
-
- if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
- if (!sta_ptr)
- local->crypt_info.tx_keyidx = param->u.crypt.idx;
- else if (param->u.crypt.idx) {
- printk(KERN_DEBUG "%s: TX key idx setting failed\n",
- local->dev->name);
- param->u.crypt.err =
- HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED;
- ret = -EINVAL;
- goto done;
- }
- }
-
- done:
- if (sta_ptr)
- hostap_handle_sta_release(sta_ptr);
-
- /* Do not reset port0 if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. Prism2 documentation seem to require port reset
- * after WEP configuration. However, keys are apparently changed at
- * least in Managed mode. */
- if (ret == 0 &&
- (hostap_set_encryption(local) ||
- (local->iw_mode != IW_MODE_INFRA &&
- local->func->reset_port(local->dev)))) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_CARD_CONF_FAILED;
- return -EINVAL;
- }
-
- return ret;
-}
-
-
-static int prism2_ioctl_get_encryption(local_info_t *local,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- struct lib80211_crypt_data **crypt;
- void *sta_ptr;
- int max_key_len;
-
- param->u.crypt.err = 0;
-
- max_key_len = param_len -
- (int) ((char *) param->u.crypt.key - (char *) param);
- if (max_key_len < 0)
- return -EINVAL;
-
- if (is_broadcast_ether_addr(param->sta_addr)) {
- sta_ptr = NULL;
- if (param->u.crypt.idx >= WEP_KEYS)
- param->u.crypt.idx = local->crypt_info.tx_keyidx;
- crypt = &local->crypt_info.crypt[param->u.crypt.idx];
- } else {
- param->u.crypt.idx = 0;
- sta_ptr = ap_crypt_get_ptrs(local->ap, param->sta_addr, 0,
- &crypt);
-
- if (sta_ptr == NULL) {
- param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
- return -EINVAL;
- }
- }
-
- if (*crypt == NULL || (*crypt)->ops == NULL) {
- memcpy(param->u.crypt.alg, "none", 5);
- param->u.crypt.key_len = 0;
- param->u.crypt.idx = 0xff;
- } else {
- strscpy(param->u.crypt.alg, (*crypt)->ops->name,
- HOSTAP_CRYPT_ALG_NAME_LEN);
- param->u.crypt.key_len = 0;
-
- memset(param->u.crypt.seq, 0, 8);
- if ((*crypt)->ops->get_key) {
- param->u.crypt.key_len =
- (*crypt)->ops->get_key(param->u.crypt.key,
- max_key_len,
- param->u.crypt.seq,
- (*crypt)->priv);
- }
- }
-
- if (sta_ptr)
- hostap_handle_sta_release(sta_ptr);
-
- return 0;
-}
-
-
-static int prism2_ioctl_get_rid(local_info_t *local,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- int max_len, res;
-
- max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN;
- if (max_len < 0)
- return -EINVAL;
-
- res = local->func->get_rid(local->dev, param->u.rid.rid,
- param->u.rid.data, param->u.rid.len, 0);
- if (res >= 0) {
- param->u.rid.len = res;
- return 0;
- }
-
- return res;
-}
-
-
-static int prism2_ioctl_set_rid(local_info_t *local,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- int max_len;
-
- max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN;
- if (max_len < 0 || max_len < param->u.rid.len)
- return -EINVAL;
-
- return local->func->set_rid(local->dev, param->u.rid.rid,
- param->u.rid.data, param->u.rid.len);
-}
-
-
-static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- printk(KERN_DEBUG "%ssta: associated as client with AP %pM\n",
- local->dev->name, param->sta_addr);
- memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN);
- return 0;
-}
-
-
-static int prism2_ioctl_siwgenie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *data = &wrqu->data;
- return prism2_set_genericelement(dev, extra, data->length);
-}
-
-
-static int prism2_ioctl_giwgenie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *data = &wrqu->data;
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
- int len = local->generic_elem_len - 2;
-
- if (len <= 0 || local->generic_elem == NULL) {
- data->length = 0;
- return 0;
- }
-
- if (data->length < len)
- return -E2BIG;
-
- data->length = len;
- memcpy(extra, local->generic_elem + 2, len);
-
- return 0;
-}
-
-
-static int prism2_ioctl_set_generic_element(local_info_t *local,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- int max_len, len;
-
- len = param->u.generic_elem.len;
- max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
- if (max_len < 0 || max_len < len)
- return -EINVAL;
-
- return prism2_set_genericelement(local->dev,
- param->u.generic_elem.data, len);
-}
-
-
-static int prism2_ioctl_siwmlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
- struct iw_mlme *mlme = (struct iw_mlme *) extra;
- __le16 reason;
-
- reason = cpu_to_le16(mlme->reason_code);
-
- switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
- return prism2_sta_send_mgmt(local, mlme->addr.sa_data,
- IEEE80211_STYPE_DEAUTH,
- (u8 *) &reason, 2);
- case IW_MLME_DISASSOC:
- return prism2_sta_send_mgmt(local, mlme->addr.sa_data,
- IEEE80211_STYPE_DISASSOC,
- (u8 *) &reason, 2);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-
-static int prism2_ioctl_mlme(local_info_t *local,
- struct prism2_hostapd_param *param)
-{
- __le16 reason;
-
- reason = cpu_to_le16(param->u.mlme.reason_code);
- switch (param->u.mlme.cmd) {
- case MLME_STA_DEAUTH:
- return prism2_sta_send_mgmt(local, param->sta_addr,
- IEEE80211_STYPE_DEAUTH,
- (u8 *) &reason, 2);
- case MLME_STA_DISASSOC:
- return prism2_sta_send_mgmt(local, param->sta_addr,
- IEEE80211_STYPE_DISASSOC,
- (u8 *) &reason, 2);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-
-static int prism2_ioctl_scan_req(local_info_t *local,
- struct prism2_hostapd_param *param)
-{
-#ifndef PRISM2_NO_STATION_MODES
- if ((local->iw_mode != IW_MODE_INFRA &&
- local->iw_mode != IW_MODE_ADHOC) ||
- (local->sta_fw_ver < PRISM2_FW_VER(1,3,1)))
- return -EOPNOTSUPP;
-
- if (!local->dev_enabled)
- return -ENETDOWN;
-
- return prism2_request_hostscan(local->dev, param->u.scan_req.ssid,
- param->u.scan_req.ssid_len);
-#else /* PRISM2_NO_STATION_MODES */
- return -EOPNOTSUPP;
-#endif /* PRISM2_NO_STATION_MODES */
-}
-
-
-static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
-{
- struct prism2_hostapd_param *param;
- int ret = 0;
- int ap_ioctl = 0;
-
- if (p->length < sizeof(struct prism2_hostapd_param) ||
- p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = memdup_user(p->pointer, p->length);
- if (IS_ERR(param)) {
- return PTR_ERR(param);
- }
-
- switch (param->cmd) {
- case PRISM2_SET_ENCRYPTION:
- ret = prism2_ioctl_set_encryption(local, param, p->length);
- break;
- case PRISM2_GET_ENCRYPTION:
- ret = prism2_ioctl_get_encryption(local, param, p->length);
- break;
- case PRISM2_HOSTAPD_GET_RID:
- ret = prism2_ioctl_get_rid(local, param, p->length);
- break;
- case PRISM2_HOSTAPD_SET_RID:
- ret = prism2_ioctl_set_rid(local, param, p->length);
- break;
- case PRISM2_HOSTAPD_SET_ASSOC_AP_ADDR:
- ret = prism2_ioctl_set_assoc_ap_addr(local, param, p->length);
- break;
- case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
- ret = prism2_ioctl_set_generic_element(local, param,
- p->length);
- break;
- case PRISM2_HOSTAPD_MLME:
- ret = prism2_ioctl_mlme(local, param);
- break;
- case PRISM2_HOSTAPD_SCAN_REQ:
- ret = prism2_ioctl_scan_req(local, param);
- break;
- default:
- ret = prism2_hostapd(local->ap, param);
- ap_ioctl = 1;
- break;
- }
-
- if (ret == 1 || !ap_ioctl) {
- if (copy_to_user(p->pointer, param, p->length)) {
- ret = -EFAULT;
- goto out;
- } else if (ap_ioctl)
- ret = 0;
- }
-
- out:
- kfree(param);
- return ret;
-}
-
-
-static void prism2_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- strscpy(info->driver, "hostap", sizeof(info->driver));
- snprintf(info->fw_version, sizeof(info->fw_version),
- "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
- (local->sta_fw_ver >> 8) & 0xff,
- local->sta_fw_ver & 0xff);
-}
-
-const struct ethtool_ops prism2_ethtool_ops = {
- .get_drvinfo = prism2_get_drvinfo
-};
-
-
-/* Structures to export the Wireless Handlers */
-
-static const iw_handler prism2_handler[] =
-{
- IW_HANDLER(SIOCGIWNAME, prism2_get_name),
- IW_HANDLER(SIOCSIWFREQ, prism2_ioctl_siwfreq),
- IW_HANDLER(SIOCGIWFREQ, prism2_ioctl_giwfreq),
- IW_HANDLER(SIOCSIWMODE, prism2_ioctl_siwmode),
- IW_HANDLER(SIOCGIWMODE, prism2_ioctl_giwmode),
- IW_HANDLER(SIOCSIWSENS, prism2_ioctl_siwsens),
- IW_HANDLER(SIOCGIWSENS, prism2_ioctl_giwsens),
- IW_HANDLER(SIOCGIWRANGE, prism2_ioctl_giwrange),
- IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
- IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
- IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
- IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
- IW_HANDLER(SIOCSIWAP, prism2_ioctl_siwap),
- IW_HANDLER(SIOCGIWAP, prism2_ioctl_giwap),
- IW_HANDLER(SIOCSIWMLME, prism2_ioctl_siwmlme),
- IW_HANDLER(SIOCGIWAPLIST, prism2_ioctl_giwaplist),
- IW_HANDLER(SIOCSIWSCAN, prism2_ioctl_siwscan),
- IW_HANDLER(SIOCGIWSCAN, prism2_ioctl_giwscan),
- IW_HANDLER(SIOCSIWESSID, prism2_ioctl_siwessid),
- IW_HANDLER(SIOCGIWESSID, prism2_ioctl_giwessid),
- IW_HANDLER(SIOCSIWNICKN, prism2_ioctl_siwnickn),
- IW_HANDLER(SIOCGIWNICKN, prism2_ioctl_giwnickn),
- IW_HANDLER(SIOCSIWRATE, prism2_ioctl_siwrate),
- IW_HANDLER(SIOCGIWRATE, prism2_ioctl_giwrate),
- IW_HANDLER(SIOCSIWRTS, prism2_ioctl_siwrts),
- IW_HANDLER(SIOCGIWRTS, prism2_ioctl_giwrts),
- IW_HANDLER(SIOCSIWFRAG, prism2_ioctl_siwfrag),
- IW_HANDLER(SIOCGIWFRAG, prism2_ioctl_giwfrag),
- IW_HANDLER(SIOCSIWTXPOW, prism2_ioctl_siwtxpow),
- IW_HANDLER(SIOCGIWTXPOW, prism2_ioctl_giwtxpow),
- IW_HANDLER(SIOCSIWRETRY, prism2_ioctl_siwretry),
- IW_HANDLER(SIOCGIWRETRY, prism2_ioctl_giwretry),
- IW_HANDLER(SIOCSIWENCODE, prism2_ioctl_siwencode),
- IW_HANDLER(SIOCGIWENCODE, prism2_ioctl_giwencode),
- IW_HANDLER(SIOCSIWPOWER, prism2_ioctl_siwpower),
- IW_HANDLER(SIOCGIWPOWER, prism2_ioctl_giwpower),
- IW_HANDLER(SIOCSIWGENIE, prism2_ioctl_siwgenie),
- IW_HANDLER(SIOCGIWGENIE, prism2_ioctl_giwgenie),
- IW_HANDLER(SIOCSIWAUTH, prism2_ioctl_siwauth),
- IW_HANDLER(SIOCGIWAUTH, prism2_ioctl_giwauth),
- IW_HANDLER(SIOCSIWENCODEEXT, prism2_ioctl_siwencodeext),
- IW_HANDLER(SIOCGIWENCODEEXT, prism2_ioctl_giwencodeext),
-};
-
-static const iw_handler prism2_private_handler[] =
-{ /* SIOCIWFIRSTPRIV + */
- prism2_ioctl_priv_prism2_param, /* 0 */
- prism2_ioctl_priv_get_prism2_param, /* 1 */
- prism2_ioctl_priv_writemif, /* 2 */
- prism2_ioctl_priv_readmif, /* 3 */
-};
-
-const struct iw_handler_def hostap_iw_handler_def =
-{
- .num_standard = ARRAY_SIZE(prism2_handler),
- .num_private = ARRAY_SIZE(prism2_private_handler),
- .num_private_args = ARRAY_SIZE(prism2_priv),
- .standard = prism2_handler,
- .private = prism2_private_handler,
- .private_args = (struct iw_priv_args *) prism2_priv,
- .get_wireless_stats = hostap_get_wireless_stats,
-};
-
-
-/* Private ioctls that are not used with iwpriv;
- * in SIOCDEVPRIVATE range */
-int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *)ifr;
- struct hostap_interface *iface;
- local_info_t *local;
- int ret = 0;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (in_compat_syscall()) /* not implemented yet */
- return -EOPNOTSUPP;
-
- switch (cmd) {
-#ifdef PRISM2_DOWNLOAD_SUPPORT
- case PRISM2_IOCTL_DOWNLOAD:
- if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
- else ret = prism2_ioctl_priv_download(local, &wrq->u.data);
- break;
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
-
- case PRISM2_IOCTL_HOSTAPD:
- if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
- else ret = prism2_ioctl_priv_hostapd(local, &wrq->u.data);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
deleted file mode 100644
index bf86ac26c..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ /dev/null
@@ -1,1123 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Host AP (software wireless LAN access point) driver for
- * Intersil Prism2/2.5/3 - hostap.o module, common routines
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <j@w1.fi>
- * Copyright (c) 2002-2005, Jouni Malinen <j@w1.fi>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/if_arp.h>
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/workqueue.h>
-#include <linux/kmod.h>
-#include <linux/rtnetlink.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <net/net_namespace.h>
-#include <net/iw_handler.h>
-#include <net/lib80211.h>
-#include <linux/uaccess.h>
-
-#include "hostap_wlan.h"
-#include "hostap_80211.h"
-#include "hostap_ap.h"
-#include "hostap.h"
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Host AP common routines");
-MODULE_LICENSE("GPL");
-
-#define TX_TIMEOUT (2 * HZ)
-
-#define PRISM2_MAX_FRAME_SIZE 2304
-#define PRISM2_MIN_MTU 256
-/* FIX: */
-#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
-
-
-struct net_device * hostap_add_interface(struct local_info *local,
- int type, int rtnl_locked,
- const char *prefix,
- const char *name)
-{
- struct net_device *dev, *mdev;
- struct hostap_interface *iface;
- int ret;
-
- dev = alloc_etherdev(sizeof(struct hostap_interface));
- if (dev == NULL)
- return NULL;
-
- iface = netdev_priv(dev);
- iface->dev = dev;
- iface->local = local;
- iface->type = type;
- list_add(&iface->list, &local->hostap_interfaces);
-
- mdev = local->dev;
- eth_hw_addr_inherit(dev, mdev);
- dev->base_addr = mdev->base_addr;
- dev->irq = mdev->irq;
- dev->mem_start = mdev->mem_start;
- dev->mem_end = mdev->mem_end;
-
- hostap_setup_dev(dev, local, type);
- dev->needs_free_netdev = true;
-
- sprintf(dev->name, "%s%s", prefix, name);
- if (!rtnl_locked)
- rtnl_lock();
-
- SET_NETDEV_DEV(dev, mdev->dev.parent);
- ret = register_netdevice(dev);
-
- if (!rtnl_locked)
- rtnl_unlock();
-
- if (ret < 0) {
- printk(KERN_WARNING "%s: failed to add new netdevice!\n",
- dev->name);
- free_netdev(dev);
- return NULL;
- }
-
- printk(KERN_DEBUG "%s: registered netdevice %s\n",
- mdev->name, dev->name);
-
- return dev;
-}
-
-
-void hostap_remove_interface(struct net_device *dev, int rtnl_locked,
- int remove_from_list)
-{
- struct hostap_interface *iface;
-
- if (!dev)
- return;
-
- iface = netdev_priv(dev);
-
- if (remove_from_list) {
- list_del(&iface->list);
- }
-
- if (dev == iface->local->ddev)
- iface->local->ddev = NULL;
- else if (dev == iface->local->apdev)
- iface->local->apdev = NULL;
- else if (dev == iface->local->stadev)
- iface->local->stadev = NULL;
-
- if (rtnl_locked)
- unregister_netdevice(dev);
- else
- unregister_netdev(dev);
-
- /* 'dev->needs_free_netdev = true' implies device data, including
- * private data, will be freed when the device is removed */
-}
-
-
-static inline int prism2_wds_special_addr(u8 *addr)
-{
- if (addr[0] || addr[1] || addr[2] || addr[3] || addr[4] || addr[5])
- return 0;
-
- return 1;
-}
-
-
-int prism2_wds_add(local_info_t *local, u8 *remote_addr,
- int rtnl_locked)
-{
- struct net_device *dev;
- struct list_head *ptr;
- struct hostap_interface *iface, *empty, *match;
-
- empty = match = NULL;
- read_lock_bh(&local->iface_lock);
- list_for_each(ptr, &local->hostap_interfaces) {
- iface = list_entry(ptr, struct hostap_interface, list);
- if (iface->type != HOSTAP_INTERFACE_WDS)
- continue;
-
- if (prism2_wds_special_addr(iface->u.wds.remote_addr))
- empty = iface;
- else if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
- match = iface;
- break;
- }
- }
- if (!match && empty && !prism2_wds_special_addr(remote_addr)) {
- /* take pre-allocated entry into use */
- memcpy(empty->u.wds.remote_addr, remote_addr, ETH_ALEN);
- read_unlock_bh(&local->iface_lock);
- printk(KERN_DEBUG "%s: using pre-allocated WDS netdevice %s\n",
- local->dev->name, empty->dev->name);
- return 0;
- }
- read_unlock_bh(&local->iface_lock);
-
- if (!prism2_wds_special_addr(remote_addr)) {
- if (match)
- return -EEXIST;
- hostap_add_sta(local->ap, remote_addr);
- }
-
- if (local->wds_connections >= local->wds_max_connections)
- return -ENOBUFS;
-
- /* verify that there is room for wds# postfix in the interface name */
- if (strlen(local->dev->name) >= IFNAMSIZ - 5) {
- printk(KERN_DEBUG "'%s' too long base device name\n",
- local->dev->name);
- return -EINVAL;
- }
-
- dev = hostap_add_interface(local, HOSTAP_INTERFACE_WDS, rtnl_locked,
- local->ddev->name, "wds%d");
- if (dev == NULL)
- return -ENOMEM;
-
- iface = netdev_priv(dev);
- memcpy(iface->u.wds.remote_addr, remote_addr, ETH_ALEN);
-
- local->wds_connections++;
-
- return 0;
-}
-
-
-int prism2_wds_del(local_info_t *local, u8 *remote_addr,
- int rtnl_locked, int do_not_remove)
-{
- unsigned long flags;
- struct list_head *ptr;
- struct hostap_interface *iface, *selected = NULL;
-
- write_lock_irqsave(&local->iface_lock, flags);
- list_for_each(ptr, &local->hostap_interfaces) {
- iface = list_entry(ptr, struct hostap_interface, list);
- if (iface->type != HOSTAP_INTERFACE_WDS)
- continue;
-
- if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) {
- selected = iface;
- break;
- }
- }
- if (selected && !do_not_remove)
- list_del(&selected->list);
- write_unlock_irqrestore(&local->iface_lock, flags);
-
- if (selected) {
- if (do_not_remove)
- eth_zero_addr(selected->u.wds.remote_addr);
- else {
- hostap_remove_interface(selected->dev, rtnl_locked, 0);
- local->wds_connections--;
- }
- }
-
- return selected ? 0 : -ENODEV;
-}
-
-
-u16 hostap_tx_callback_register(local_info_t *local,
- void (*func)(struct sk_buff *, int ok, void *),
- void *data)
-{
- unsigned long flags;
- struct hostap_tx_callback_info *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (entry == NULL)
- return 0;
-
- entry->func = func;
- entry->data = data;
-
- spin_lock_irqsave(&local->lock, flags);
- entry->idx = local->tx_callback ? local->tx_callback->idx + 1 : 1;
- entry->next = local->tx_callback;
- local->tx_callback = entry;
- spin_unlock_irqrestore(&local->lock, flags);
-
- return entry->idx;
-}
-
-
-int hostap_tx_callback_unregister(local_info_t *local, u16 idx)
-{
- unsigned long flags;
- struct hostap_tx_callback_info *cb, *prev = NULL;
-
- spin_lock_irqsave(&local->lock, flags);
- cb = local->tx_callback;
- while (cb != NULL && cb->idx != idx) {
- prev = cb;
- cb = cb->next;
- }
- if (cb) {
- if (prev == NULL)
- local->tx_callback = cb->next;
- else
- prev->next = cb->next;
- kfree(cb);
- }
- spin_unlock_irqrestore(&local->lock, flags);
-
- return cb ? 0 : -1;
-}
-
-
-/* val is in host byte order */
-int hostap_set_word(struct net_device *dev, int rid, u16 val)
-{
- struct hostap_interface *iface;
- __le16 tmp = cpu_to_le16(val);
- iface = netdev_priv(dev);
- return iface->local->func->set_rid(dev, rid, &tmp, 2);
-}
-
-
-int hostap_set_string(struct net_device *dev, int rid, const char *val)
-{
- struct hostap_interface *iface;
- char buf[MAX_SSID_LEN + 2];
- int len;
-
- iface = netdev_priv(dev);
- len = strlen(val);
- if (len > MAX_SSID_LEN)
- return -1;
- memset(buf, 0, sizeof(buf));
- buf[0] = len; /* little endian 16 bit word */
- memcpy(buf + 2, val, len);
-
- return iface->local->func->set_rid(dev, rid, &buf, MAX_SSID_LEN + 2);
-}
-
-
-u16 hostap_get_porttype(local_info_t *local)
-{
- if (local->iw_mode == IW_MODE_ADHOC && local->pseudo_adhoc)
- return HFA384X_PORTTYPE_PSEUDO_IBSS;
- if (local->iw_mode == IW_MODE_ADHOC)
- return HFA384X_PORTTYPE_IBSS;
- if (local->iw_mode == IW_MODE_INFRA)
- return HFA384X_PORTTYPE_BSS;
- if (local->iw_mode == IW_MODE_REPEAT)
- return HFA384X_PORTTYPE_WDS;
- if (local->iw_mode == IW_MODE_MONITOR)
- return HFA384X_PORTTYPE_PSEUDO_IBSS;
- return HFA384X_PORTTYPE_HOSTAP;
-}
-
-
-int hostap_set_encryption(local_info_t *local)
-{
- u16 val, old_val;
- int i, keylen, len, idx;
- char keybuf[WEP_KEY_LEN + 1];
- enum { NONE, WEP, OTHER } encrypt_type;
-
- idx = local->crypt_info.tx_keyidx;
- if (local->crypt_info.crypt[idx] == NULL ||
- local->crypt_info.crypt[idx]->ops == NULL)
- encrypt_type = NONE;
- else if (strcmp(local->crypt_info.crypt[idx]->ops->name, "WEP") == 0)
- encrypt_type = WEP;
- else
- encrypt_type = OTHER;
-
- if (local->func->get_rid(local->dev, HFA384X_RID_CNFWEPFLAGS, &val, 2,
- 1) < 0) {
- printk(KERN_DEBUG "Could not read current WEP flags.\n");
- goto fail;
- }
- le16_to_cpus(&val);
- old_val = val;
-
- if (encrypt_type != NONE || local->privacy_invoked)
- val |= HFA384X_WEPFLAGS_PRIVACYINVOKED;
- else
- val &= ~HFA384X_WEPFLAGS_PRIVACYINVOKED;
-
- if (local->open_wep || encrypt_type == NONE ||
- ((local->ieee_802_1x || local->wpa) && local->host_decrypt))
- val &= ~HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED;
- else
- val |= HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED;
-
- if ((encrypt_type != NONE || local->privacy_invoked) &&
- (encrypt_type == OTHER || local->host_encrypt))
- val |= HFA384X_WEPFLAGS_HOSTENCRYPT;
- else
- val &= ~HFA384X_WEPFLAGS_HOSTENCRYPT;
- if ((encrypt_type != NONE || local->privacy_invoked) &&
- (encrypt_type == OTHER || local->host_decrypt))
- val |= HFA384X_WEPFLAGS_HOSTDECRYPT;
- else
- val &= ~HFA384X_WEPFLAGS_HOSTDECRYPT;
-
- if (val != old_val &&
- hostap_set_word(local->dev, HFA384X_RID_CNFWEPFLAGS, val)) {
- printk(KERN_DEBUG "Could not write new WEP flags (0x%x)\n",
- val);
- goto fail;
- }
-
- if (encrypt_type != WEP)
- return 0;
-
- /* 104-bit support seems to require that all the keys are set to the
- * same keylen */
- keylen = 6; /* first 5 octets */
- len = local->crypt_info.crypt[idx]->ops->get_key(keybuf, sizeof(keybuf), NULL,
- local->crypt_info.crypt[idx]->priv);
- if (idx >= 0 && idx < WEP_KEYS && len > 5)
- keylen = WEP_KEY_LEN + 1; /* first 13 octets */
-
- for (i = 0; i < WEP_KEYS; i++) {
- memset(keybuf, 0, sizeof(keybuf));
- if (local->crypt_info.crypt[i]) {
- (void) local->crypt_info.crypt[i]->ops->get_key(
- keybuf, sizeof(keybuf),
- NULL, local->crypt_info.crypt[i]->priv);
- }
- if (local->func->set_rid(local->dev,
- HFA384X_RID_CNFDEFAULTKEY0 + i,
- keybuf, keylen)) {
- printk(KERN_DEBUG "Could not set key %d (len=%d)\n",
- i, keylen);
- goto fail;
- }
- }
- if (hostap_set_word(local->dev, HFA384X_RID_CNFWEPDEFAULTKEYID, idx)) {
- printk(KERN_DEBUG "Could not set default keyid %d\n", idx);
- goto fail;
- }
-
- return 0;
-
- fail:
- printk(KERN_DEBUG "%s: encryption setup failed\n", local->dev->name);
- return -1;
-}
-
-
-int hostap_set_antsel(local_info_t *local)
-{
- u16 val;
- int ret = 0;
-
- if (local->antsel_tx != HOSTAP_ANTSEL_DO_NOT_TOUCH &&
- local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF,
- HFA386X_CR_TX_CONFIGURE,
- NULL, &val) == 0) {
- val &= ~(BIT(2) | BIT(1));
- switch (local->antsel_tx) {
- case HOSTAP_ANTSEL_DIVERSITY:
- val |= BIT(1);
- break;
- case HOSTAP_ANTSEL_LOW:
- break;
- case HOSTAP_ANTSEL_HIGH:
- val |= BIT(2);
- break;
- }
-
- if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF,
- HFA386X_CR_TX_CONFIGURE, &val, NULL)) {
- printk(KERN_INFO "%s: setting TX AntSel failed\n",
- local->dev->name);
- ret = -1;
- }
- }
-
- if (local->antsel_rx != HOSTAP_ANTSEL_DO_NOT_TOUCH &&
- local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF,
- HFA386X_CR_RX_CONFIGURE,
- NULL, &val) == 0) {
- val &= ~(BIT(1) | BIT(0));
- switch (local->antsel_rx) {
- case HOSTAP_ANTSEL_DIVERSITY:
- break;
- case HOSTAP_ANTSEL_LOW:
- val |= BIT(0);
- break;
- case HOSTAP_ANTSEL_HIGH:
- val |= BIT(0) | BIT(1);
- break;
- }
-
- if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF,
- HFA386X_CR_RX_CONFIGURE, &val, NULL)) {
- printk(KERN_INFO "%s: setting RX AntSel failed\n",
- local->dev->name);
- ret = -1;
- }
- }
-
- return ret;
-}
-
-
-int hostap_set_roaming(local_info_t *local)
-{
- u16 val;
-
- switch (local->host_roaming) {
- case 1:
- val = HFA384X_ROAMING_HOST;
- break;
- case 2:
- val = HFA384X_ROAMING_DISABLED;
- break;
- case 0:
- default:
- val = HFA384X_ROAMING_FIRMWARE;
- break;
- }
-
- return hostap_set_word(local->dev, HFA384X_RID_CNFROAMINGMODE, val);
-}
-
-
-int hostap_set_auth_algs(local_info_t *local)
-{
- int val = local->auth_algs;
- /* At least STA f/w v0.6.2 seems to have issues with cnfAuthentication
- * set to include both Open and Shared Key flags. It tries to use
- * Shared Key authentication in that case even if WEP keys are not
- * configured.. STA f/w v0.7.6 is able to handle such configuration,
- * but it is unknown when this was fixed between 0.6.2 .. 0.7.6. */
- if (local->sta_fw_ver < PRISM2_FW_VER(0,7,0) &&
- val != PRISM2_AUTH_OPEN && val != PRISM2_AUTH_SHARED_KEY)
- val = PRISM2_AUTH_OPEN;
-
- if (hostap_set_word(local->dev, HFA384X_RID_CNFAUTHENTICATION, val)) {
- printk(KERN_INFO "%s: cnfAuthentication setting to 0x%x "
- "failed\n", local->dev->name, local->auth_algs);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx)
-{
- u16 status, fc;
-
- status = __le16_to_cpu(rx->status);
-
- printk(KERN_DEBUG "%s: RX status=0x%04x (port=%d, type=%d, "
- "fcserr=%d) silence=%d signal=%d rate=%d rxflow=%d; "
- "jiffies=%ld\n",
- name, status, (status >> 8) & 0x07, status >> 13, status & 1,
- rx->silence, rx->signal, rx->rate, rx->rxflow, jiffies);
-
- fc = __le16_to_cpu(rx->frame_control);
- printk(KERN_DEBUG " FC=0x%04x (type=%d:%d) dur=0x%04x seq=0x%04x "
- "data_len=%d%s%s\n",
- fc, (fc & IEEE80211_FCTL_FTYPE) >> 2,
- (fc & IEEE80211_FCTL_STYPE) >> 4,
- __le16_to_cpu(rx->duration_id), __le16_to_cpu(rx->seq_ctrl),
- __le16_to_cpu(rx->data_len),
- fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
- fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
-
- printk(KERN_DEBUG " A1=%pM A2=%pM A3=%pM A4=%pM\n",
- rx->addr1, rx->addr2, rx->addr3, rx->addr4);
-
- printk(KERN_DEBUG " dst=%pM src=%pM len=%d\n",
- rx->dst_addr, rx->src_addr,
- __be16_to_cpu(rx->len));
-}
-
-
-void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx)
-{
- u16 fc;
-
- printk(KERN_DEBUG "%s: TX status=0x%04x retry_count=%d tx_rate=%d "
- "tx_control=0x%04x; jiffies=%ld\n",
- name, __le16_to_cpu(tx->status), tx->retry_count, tx->tx_rate,
- __le16_to_cpu(tx->tx_control), jiffies);
-
- fc = __le16_to_cpu(tx->frame_control);
- printk(KERN_DEBUG " FC=0x%04x (type=%d:%d) dur=0x%04x seq=0x%04x "
- "data_len=%d%s%s\n",
- fc, (fc & IEEE80211_FCTL_FTYPE) >> 2,
- (fc & IEEE80211_FCTL_STYPE) >> 4,
- __le16_to_cpu(tx->duration_id), __le16_to_cpu(tx->seq_ctrl),
- __le16_to_cpu(tx->data_len),
- fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
- fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
-
- printk(KERN_DEBUG " A1=%pM A2=%pM A3=%pM A4=%pM\n",
- tx->addr1, tx->addr2, tx->addr3, tx->addr4);
-
- printk(KERN_DEBUG " dst=%pM src=%pM len=%d\n",
- tx->dst_addr, tx->src_addr,
- __be16_to_cpu(tx->len));
-}
-
-
-static int hostap_80211_header_parse(const struct sk_buff *skb,
- unsigned char *haddr)
-{
- memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
- return ETH_ALEN;
-}
-
-
-int hostap_80211_get_hdrlen(__le16 fc)
-{
- if (ieee80211_is_data(fc) && ieee80211_has_a4 (fc))
- return 30; /* Addr4 */
- else if (ieee80211_is_cts(fc) || ieee80211_is_ack(fc))
- return 10;
- else if (ieee80211_is_ctl(fc))
- return 16;
-
- return 24;
-}
-
-
-static int prism2_close(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
-
- PDEBUG(DEBUG_FLOW, "%s: prism2_close\n", dev->name);
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (dev == local->ddev) {
- prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING);
- }
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- if (!local->hostapd && dev == local->dev &&
- (!local->func->card_present || local->func->card_present(local)) &&
- local->hw_ready && local->ap && local->iw_mode == IW_MODE_MASTER)
- hostap_deauth_all_stas(dev, local->ap, 1);
-#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
- if (dev == local->dev) {
- local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL);
- }
-
- if (netif_running(dev)) {
- netif_stop_queue(dev);
- netif_device_detach(dev);
- }
-
- cancel_work_sync(&local->reset_queue);
- cancel_work_sync(&local->set_multicast_list_queue);
- cancel_work_sync(&local->set_tim_queue);
-#ifndef PRISM2_NO_STATION_MODES
- cancel_work_sync(&local->info_queue);
-#endif
- cancel_work_sync(&local->comms_qual_update);
-
- module_put(local->hw_module);
-
- local->num_dev_open--;
-
- if (dev != local->dev && local->dev->flags & IFF_UP &&
- local->master_dev_auto_open && local->num_dev_open == 1) {
- /* Close master radio interface automatically if it was also
- * opened automatically and we are now closing the last
- * remaining non-master device. */
- dev_close(local->dev);
- }
-
- return 0;
-}
-
-
-static int prism2_open(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
-
- PDEBUG(DEBUG_FLOW, "%s: prism2_open\n", dev->name);
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->no_pri) {
- printk(KERN_DEBUG "%s: could not set interface UP - no PRI "
- "f/w\n", dev->name);
- return -ENODEV;
- }
-
- if ((local->func->card_present && !local->func->card_present(local)) ||
- local->hw_downloading)
- return -ENODEV;
-
- if (!try_module_get(local->hw_module))
- return -ENODEV;
- local->num_dev_open++;
-
- if (!local->dev_enabled && local->func->hw_enable(dev, 1)) {
- printk(KERN_WARNING "%s: could not enable MAC port\n",
- dev->name);
- prism2_close(dev);
- return -ENODEV;
- }
- if (!local->dev_enabled)
- prism2_callback(local, PRISM2_CALLBACK_ENABLE);
- local->dev_enabled = 1;
-
- if (dev != local->dev && !(local->dev->flags & IFF_UP)) {
- /* Master radio interface is needed for all operation, so open
- * it automatically when any virtual net_device is opened. */
- local->master_dev_auto_open = 1;
- dev_open(local->dev, NULL);
- }
-
- netif_device_attach(dev);
- netif_start_queue(dev);
-
- return 0;
-}
-
-
-static int prism2_set_mac_address(struct net_device *dev, void *p)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct list_head *ptr;
- struct sockaddr *addr = p;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- if (local->func->set_rid(dev, HFA384X_RID_CNFOWNMACADDR, addr->sa_data,
- ETH_ALEN) < 0 || local->func->reset_port(dev))
- return -EINVAL;
-
- read_lock_bh(&local->iface_lock);
- list_for_each(ptr, &local->hostap_interfaces) {
- iface = list_entry(ptr, struct hostap_interface, list);
- eth_hw_addr_set(iface->dev, addr->sa_data);
- }
- eth_hw_addr_set(local->dev, addr->sa_data);
- read_unlock_bh(&local->iface_lock);
-
- return 0;
-}
-
-
-/* TODO: to be further implemented as soon as Prism2 fully supports
- * GroupAddresses and correct documentation is available */
-void hostap_set_multicast_list_queue(struct work_struct *work)
-{
- local_info_t *local =
- container_of(work, local_info_t, set_multicast_list_queue);
- struct net_device *dev = local->dev;
-
- if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
- local->is_promisc)) {
- printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
- dev->name, local->is_promisc ? "en" : "dis");
- }
-}
-
-
-static void hostap_set_multicast_list(struct net_device *dev)
-{
-#if 0
- /* FIX: promiscuous mode seems to be causing a lot of problems with
- * some station firmware versions (FCSErr frames, invalid MACPort, etc.
- * corrupted incoming frames). This code is now commented out while the
- * problems are investigated. */
- struct hostap_interface *iface;
- local_info_t *local;
-
- iface = netdev_priv(dev);
- local = iface->local;
- if ((dev->flags & IFF_ALLMULTI) || (dev->flags & IFF_PROMISC)) {
- local->is_promisc = 1;
- } else {
- local->is_promisc = 0;
- }
-
- schedule_work(&local->set_multicast_list_queue);
-#endif
-}
-
-
-static void prism2_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- struct hfa384x_regs regs;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- printk(KERN_WARNING "%s Tx timed out! Resetting card\n", dev->name);
- netif_stop_queue(local->dev);
-
- local->func->read_regs(dev, &regs);
- printk(KERN_DEBUG "%s: CMD=%04x EVSTAT=%04x "
- "OFFSET0=%04x OFFSET1=%04x SWSUPPORT0=%04x\n",
- dev->name, regs.cmd, regs.evstat, regs.offset0, regs.offset1,
- regs.swsupport0);
-
- local->func->schedule_reset(local);
-}
-
-const struct header_ops hostap_80211_ops = {
- .create = eth_header,
- .cache = eth_header_cache,
- .cache_update = eth_header_cache_update,
- .parse = hostap_80211_header_parse,
-};
-EXPORT_SYMBOL(hostap_80211_ops);
-
-
-static const struct net_device_ops hostap_netdev_ops = {
- .ndo_start_xmit = hostap_data_start_xmit,
-
- .ndo_open = prism2_open,
- .ndo_stop = prism2_close,
- .ndo_siocdevprivate = hostap_siocdevprivate,
- .ndo_set_mac_address = prism2_set_mac_address,
- .ndo_set_rx_mode = hostap_set_multicast_list,
- .ndo_tx_timeout = prism2_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static const struct net_device_ops hostap_mgmt_netdev_ops = {
- .ndo_start_xmit = hostap_mgmt_start_xmit,
-
- .ndo_open = prism2_open,
- .ndo_stop = prism2_close,
- .ndo_siocdevprivate = hostap_siocdevprivate,
- .ndo_set_mac_address = prism2_set_mac_address,
- .ndo_set_rx_mode = hostap_set_multicast_list,
- .ndo_tx_timeout = prism2_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static const struct net_device_ops hostap_master_ops = {
- .ndo_start_xmit = hostap_master_start_xmit,
-
- .ndo_open = prism2_open,
- .ndo_stop = prism2_close,
- .ndo_siocdevprivate = hostap_siocdevprivate,
- .ndo_set_mac_address = prism2_set_mac_address,
- .ndo_set_rx_mode = hostap_set_multicast_list,
- .ndo_tx_timeout = prism2_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-void hostap_setup_dev(struct net_device *dev, local_info_t *local,
- int type)
-{
- struct hostap_interface *iface;
-
- iface = netdev_priv(dev);
- ether_setup(dev);
- dev->min_mtu = PRISM2_MIN_MTU;
- dev->max_mtu = PRISM2_MAX_MTU;
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
-
- /* kernel callbacks */
- if (iface) {
- /* Currently, we point to the proper spy_data only on
- * the main_dev. This could be fixed. Jean II */
- iface->wireless_data.spy_data = &iface->spy_data;
- dev->wireless_data = &iface->wireless_data;
- }
- dev->wireless_handlers = &hostap_iw_handler_def;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- switch(type) {
- case HOSTAP_INTERFACE_AP:
- dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */
- dev->netdev_ops = &hostap_mgmt_netdev_ops;
- dev->type = ARPHRD_IEEE80211;
- dev->header_ops = &hostap_80211_ops;
- break;
- case HOSTAP_INTERFACE_MASTER:
- dev->netdev_ops = &hostap_master_ops;
- break;
- default:
- dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */
- dev->netdev_ops = &hostap_netdev_ops;
- }
-
- dev->mtu = local->mtu;
-
-
- dev->ethtool_ops = &prism2_ethtool_ops;
-
-}
-
-static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked)
-{
- struct net_device *dev = local->dev;
-
- if (local->apdev)
- return -EEXIST;
-
- printk(KERN_DEBUG "%s: enabling hostapd mode\n", dev->name);
-
- local->apdev = hostap_add_interface(local, HOSTAP_INTERFACE_AP,
- rtnl_locked, local->ddev->name,
- "ap");
- if (local->apdev == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-
-static int hostap_disable_hostapd(local_info_t *local, int rtnl_locked)
-{
- struct net_device *dev = local->dev;
-
- printk(KERN_DEBUG "%s: disabling hostapd mode\n", dev->name);
-
- hostap_remove_interface(local->apdev, rtnl_locked, 1);
- local->apdev = NULL;
-
- return 0;
-}
-
-
-static int hostap_enable_hostapd_sta(local_info_t *local, int rtnl_locked)
-{
- struct net_device *dev = local->dev;
-
- if (local->stadev)
- return -EEXIST;
-
- printk(KERN_DEBUG "%s: enabling hostapd STA mode\n", dev->name);
-
- local->stadev = hostap_add_interface(local, HOSTAP_INTERFACE_STA,
- rtnl_locked, local->ddev->name,
- "sta");
- if (local->stadev == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-
-static int hostap_disable_hostapd_sta(local_info_t *local, int rtnl_locked)
-{
- struct net_device *dev = local->dev;
-
- printk(KERN_DEBUG "%s: disabling hostapd mode\n", dev->name);
-
- hostap_remove_interface(local->stadev, rtnl_locked, 1);
- local->stadev = NULL;
-
- return 0;
-}
-
-
-int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked)
-{
- int ret;
-
- if (val < 0 || val > 1)
- return -EINVAL;
-
- if (local->hostapd == val)
- return 0;
-
- if (val) {
- ret = hostap_enable_hostapd(local, rtnl_locked);
- if (ret == 0)
- local->hostapd = 1;
- } else {
- local->hostapd = 0;
- ret = hostap_disable_hostapd(local, rtnl_locked);
- if (ret != 0)
- local->hostapd = 1;
- }
-
- return ret;
-}
-
-
-int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked)
-{
- int ret;
-
- if (val < 0 || val > 1)
- return -EINVAL;
-
- if (local->hostapd_sta == val)
- return 0;
-
- if (val) {
- ret = hostap_enable_hostapd_sta(local, rtnl_locked);
- if (ret == 0)
- local->hostapd_sta = 1;
- } else {
- local->hostapd_sta = 0;
- ret = hostap_disable_hostapd_sta(local, rtnl_locked);
- if (ret != 0)
- local->hostapd_sta = 1;
- }
-
-
- return ret;
-}
-
-
-int prism2_update_comms_qual(struct net_device *dev)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- int ret = 0;
- struct hfa384x_comms_quality sq;
-
- iface = netdev_priv(dev);
- local = iface->local;
- if (!local->sta_fw_ver)
- ret = -1;
- else if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) {
- if (local->func->get_rid(local->dev,
- HFA384X_RID_DBMCOMMSQUALITY,
- &sq, sizeof(sq), 1) >= 0) {
- local->comms_qual = (s16) le16_to_cpu(sq.comm_qual);
- local->avg_signal = (s16) le16_to_cpu(sq.signal_level);
- local->avg_noise = (s16) le16_to_cpu(sq.noise_level);
- local->last_comms_qual_update = jiffies;
- } else
- ret = -1;
- } else {
- if (local->func->get_rid(local->dev, HFA384X_RID_COMMSQUALITY,
- &sq, sizeof(sq), 1) >= 0) {
- local->comms_qual = le16_to_cpu(sq.comm_qual);
- local->avg_signal = HFA384X_LEVEL_TO_dBm(
- le16_to_cpu(sq.signal_level));
- local->avg_noise = HFA384X_LEVEL_TO_dBm(
- le16_to_cpu(sq.noise_level));
- local->last_comms_qual_update = jiffies;
- } else
- ret = -1;
- }
-
- return ret;
-}
-
-
-int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
- u8 *body, size_t bodylen)
-{
- struct sk_buff *skb;
- struct hostap_ieee80211_mgmt *mgmt;
- struct hostap_skb_tx_data *meta;
- struct net_device *dev = local->dev;
-
- skb = dev_alloc_skb(IEEE80211_MGMT_HDR_LEN + bodylen);
- if (skb == NULL)
- return -ENOMEM;
-
- mgmt = skb_put_zero(skb, IEEE80211_MGMT_HDR_LEN);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
- memcpy(mgmt->da, dst, ETH_ALEN);
- memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
- memcpy(mgmt->bssid, dst, ETH_ALEN);
- if (body)
- skb_put_data(skb, body, bodylen);
-
- meta = (struct hostap_skb_tx_data *) skb->cb;
- memset(meta, 0, sizeof(*meta));
- meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
- meta->iface = netdev_priv(dev);
-
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- dev_queue_xmit(skb);
-
- return 0;
-}
-
-
-int prism2_sta_deauth(local_info_t *local, u16 reason)
-{
- union iwreq_data wrqu;
- int ret;
- __le16 val = cpu_to_le16(reason);
-
- if (local->iw_mode != IW_MODE_INFRA ||
- is_zero_ether_addr(local->bssid) ||
- ether_addr_equal(local->bssid, "\x44\x44\x44\x44\x44\x44"))
- return 0;
-
- ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
- (u8 *) &val, 2);
- eth_zero_addr(wrqu.ap_addr.sa_data);
- wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
- return ret;
-}
-
-
-struct proc_dir_entry *hostap_proc;
-
-static int __init hostap_init(void)
-{
- if (init_net.proc_net != NULL) {
- hostap_proc = proc_mkdir("hostap", init_net.proc_net);
- if (!hostap_proc)
- printk(KERN_WARNING "Failed to mkdir "
- "/proc/net/hostap\n");
- } else
- hostap_proc = NULL;
-
- return 0;
-}
-
-
-static void __exit hostap_exit(void)
-{
- if (hostap_proc != NULL) {
- hostap_proc = NULL;
- remove_proc_entry("hostap", init_net.proc_net);
- }
-}
-
-
-EXPORT_SYMBOL(hostap_set_word);
-EXPORT_SYMBOL(hostap_set_string);
-EXPORT_SYMBOL(hostap_get_porttype);
-EXPORT_SYMBOL(hostap_set_encryption);
-EXPORT_SYMBOL(hostap_set_antsel);
-EXPORT_SYMBOL(hostap_set_roaming);
-EXPORT_SYMBOL(hostap_set_auth_algs);
-EXPORT_SYMBOL(hostap_dump_rx_header);
-EXPORT_SYMBOL(hostap_dump_tx_header);
-EXPORT_SYMBOL(hostap_80211_get_hdrlen);
-EXPORT_SYMBOL(hostap_setup_dev);
-EXPORT_SYMBOL(hostap_set_multicast_list_queue);
-EXPORT_SYMBOL(hostap_set_hostapd);
-EXPORT_SYMBOL(hostap_set_hostapd_sta);
-EXPORT_SYMBOL(hostap_add_interface);
-EXPORT_SYMBOL(hostap_remove_interface);
-EXPORT_SYMBOL(prism2_update_comms_qual);
-
-module_init(hostap_init);
-module_exit(hostap_exit);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_pci.c b/drivers/net/wireless/intersil/hostap/hostap_pci.c
deleted file mode 100644
index 52d77506e..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_pci.c
+++ /dev/null
@@ -1,445 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define PRISM2_PCI
-
-/* Host AP driver's support for Intersil Prism2.5 PCI cards is based on
- * driver patches from Reyk Floeter <reyk@vantronix.net> and
- * Andy Warner <andyw@pobox.com> */
-
-#include <linux/module.h>
-#include <linux/if.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-
-#include "hostap_wlan.h"
-
-
-static char *dev_info = "hostap_pci";
-
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
- "PCI cards.");
-MODULE_LICENSE("GPL");
-
-
-/* struct local_info::hw_priv */
-struct hostap_pci_priv {
- void __iomem *mem_start;
-};
-
-
-/* FIX: do we need mb/wmb/rmb with memory operations? */
-
-
-static const struct pci_device_id prism2_pci_id_table[] = {
- /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
- { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
- /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
- { 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID },
- /* Samsung MagicLAN SWL-2210P */
- { 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID },
- { 0 }
-};
-
-
-#ifdef PRISM2_IO_DEBUG
-
-static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
-{
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
- hw_priv = local->hw_priv;
-
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
- writeb(v, hw_priv->mem_start + a);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
-{
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
- local_info_t *local;
- unsigned long flags;
- u8 v;
-
- iface = netdev_priv(dev);
- local = iface->local;
- hw_priv = local->hw_priv;
-
- spin_lock_irqsave(&local->lock, flags);
- v = readb(hw_priv->mem_start + a);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
- spin_unlock_irqrestore(&local->lock, flags);
- return v;
-}
-
-static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
-{
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
- hw_priv = local->hw_priv;
-
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
- writew(v, hw_priv->mem_start + a);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
-{
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
- local_info_t *local;
- unsigned long flags;
- u16 v;
-
- iface = netdev_priv(dev);
- local = iface->local;
- hw_priv = local->hw_priv;
-
- spin_lock_irqsave(&local->lock, flags);
- v = readw(hw_priv->mem_start + a);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
- spin_unlock_irqrestore(&local->lock, flags);
- return v;
-}
-
-#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
-#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
-#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
-#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
-#define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), le16_to_cpu((v)))
-#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw_debug(dev, (a)))
-
-#else /* PRISM2_IO_DEBUG */
-
-static inline void hfa384x_outb(struct net_device *dev, int a, u8 v)
-{
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
- iface = netdev_priv(dev);
- hw_priv = iface->local->hw_priv;
- writeb(v, hw_priv->mem_start + a);
-}
-
-static inline u8 hfa384x_inb(struct net_device *dev, int a)
-{
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
- iface = netdev_priv(dev);
- hw_priv = iface->local->hw_priv;
- return readb(hw_priv->mem_start + a);
-}
-
-static inline void hfa384x_outw(struct net_device *dev, int a, u16 v)
-{
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
- iface = netdev_priv(dev);
- hw_priv = iface->local->hw_priv;
- writew(v, hw_priv->mem_start + a);
-}
-
-static inline u16 hfa384x_inw(struct net_device *dev, int a)
-{
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
- iface = netdev_priv(dev);
- hw_priv = iface->local->hw_priv;
- return readw(hw_priv->mem_start + a);
-}
-
-#define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v))
-#define HFA384X_INB(a) hfa384x_inb(dev, (a))
-#define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v))
-#define HFA384X_INW(a) hfa384x_inw(dev, (a))
-#define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), le16_to_cpu((v)))
-#define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw(dev, (a)))
-
-#endif /* PRISM2_IO_DEBUG */
-
-
-static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
- int len)
-{
- u16 d_off;
- __le16 *pos;
-
- d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
- pos = (__le16 *) buf;
-
- for ( ; len > 1; len -= 2)
- *pos++ = HFA384X_INW_DATA(d_off);
-
- if (len & 1)
- *((char *) pos) = HFA384X_INB(d_off);
-
- return 0;
-}
-
-
-static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
-{
- u16 d_off;
- __le16 *pos;
-
- d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
- pos = (__le16 *) buf;
-
- for ( ; len > 1; len -= 2)
- HFA384X_OUTW_DATA(*pos++, d_off);
-
- if (len & 1)
- HFA384X_OUTB(*((char *) pos), d_off);
-
- return 0;
-}
-
-
-/* FIX: This might change at some point.. */
-#include "hostap_hw.c"
-
-static void prism2_pci_cor_sreset(local_info_t *local)
-{
- struct net_device *dev = local->dev;
- u16 reg;
-
- reg = HFA384X_INB(HFA384X_PCICOR_OFF);
- printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg);
-
- /* linux-wlan-ng uses extremely long hold and settle times for
- * COR sreset. A comment in the driver code mentions that the long
- * delays appear to be necessary. However, at least IBM 22P6901 seems
- * to work fine with shorter delays.
- *
- * Longer delays can be configured by uncommenting following line: */
-/* #define PRISM2_PCI_USE_LONG_DELAYS */
-
-#ifdef PRISM2_PCI_USE_LONG_DELAYS
- int i;
-
- HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
- mdelay(250);
-
- HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
- mdelay(500);
-
- /* Wait for f/w to complete initialization (CMD:BUSY == 0) */
- i = 2000000 / 10;
- while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i)
- udelay(10);
-
-#else /* PRISM2_PCI_USE_LONG_DELAYS */
-
- HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
- mdelay(2);
- HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
- mdelay(2);
-
-#endif /* PRISM2_PCI_USE_LONG_DELAYS */
-
- if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) {
- printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name);
- }
-}
-
-
-static void prism2_pci_genesis_reset(local_info_t *local, int hcr)
-{
- struct net_device *dev = local->dev;
-
- HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF);
- mdelay(10);
- HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF);
- mdelay(10);
- HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF);
- mdelay(10);
-}
-
-
-static struct prism2_helper_functions prism2_pci_funcs =
-{
- .card_present = NULL,
- .cor_sreset = prism2_pci_cor_sreset,
- .genesis_reset = prism2_pci_genesis_reset,
- .hw_type = HOSTAP_HW_PCI,
-};
-
-
-static int prism2_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- unsigned long phymem;
- void __iomem *mem = NULL;
- local_info_t *local = NULL;
- struct net_device *dev = NULL;
- static int cards_found /* = 0 */;
- int irq_registered = 0;
- struct hostap_interface *iface;
- struct hostap_pci_priv *hw_priv;
-
- hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
- if (hw_priv == NULL)
- return -ENOMEM;
-
- if (pci_enable_device(pdev))
- goto err_out_free;
-
- phymem = pci_resource_start(pdev, 0);
-
- if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) {
- printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n");
- goto err_out_disable;
- }
-
- mem = pci_ioremap_bar(pdev, 0);
- if (mem == NULL) {
- printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ;
- goto fail;
- }
-
- dev = prism2_init_local_data(&prism2_pci_funcs, cards_found,
- &pdev->dev);
- if (dev == NULL)
- goto fail;
- iface = netdev_priv(dev);
- local = iface->local;
- local->hw_priv = hw_priv;
- cards_found++;
-
- dev->irq = pdev->irq;
- hw_priv->mem_start = mem;
- dev->base_addr = (unsigned long) mem;
-
- prism2_pci_cor_sreset(local);
-
- pci_set_drvdata(pdev, dev);
-
- if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name,
- dev)) {
- printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
- goto fail;
- } else
- irq_registered = 1;
-
- if (!local->pri_only && prism2_hw_config(dev, 1)) {
- printk(KERN_DEBUG "%s: hardware initialization failed\n",
- dev_info);
- goto fail;
- }
-
- printk(KERN_INFO "%s: Intersil Prism2.5 PCI: "
- "mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq);
-
- return hostap_hw_ready(dev);
-
- fail:
- if (irq_registered && dev)
- free_irq(dev->irq, dev);
-
- if (mem)
- iounmap(mem);
-
- release_mem_region(phymem, pci_resource_len(pdev, 0));
-
- err_out_disable:
- pci_disable_device(pdev);
- prism2_free_local_data(dev);
-
- err_out_free:
- kfree(hw_priv);
-
- return -ENODEV;
-}
-
-
-static void prism2_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev;
- struct hostap_interface *iface;
- void __iomem *mem_start;
- struct hostap_pci_priv *hw_priv;
-
- dev = pci_get_drvdata(pdev);
- iface = netdev_priv(dev);
- hw_priv = iface->local->hw_priv;
-
- /* Reset the hardware, and ensure interrupts are disabled. */
- prism2_pci_cor_sreset(iface->local);
- hfa384x_disable_interrupts(dev);
-
- if (dev->irq)
- free_irq(dev->irq, dev);
-
- mem_start = hw_priv->mem_start;
- prism2_free_local_data(dev);
- kfree(hw_priv);
-
- iounmap(mem_start);
-
- release_mem_region(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- pci_disable_device(pdev);
-}
-
-static int __maybe_unused prism2_pci_suspend(struct device *dev_d)
-{
- struct net_device *dev = dev_get_drvdata(dev_d);
-
- if (netif_running(dev)) {
- netif_stop_queue(dev);
- netif_device_detach(dev);
- }
- prism2_suspend(dev);
-
- return 0;
-}
-
-static int __maybe_unused prism2_pci_resume(struct device *dev_d)
-{
- struct net_device *dev = dev_get_drvdata(dev_d);
-
- prism2_hw_config(dev, 0);
- if (netif_running(dev)) {
- netif_device_attach(dev);
- netif_start_queue(dev);
- }
-
- return 0;
-}
-
-MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
-
-static SIMPLE_DEV_PM_OPS(prism2_pci_pm_ops,
- prism2_pci_suspend,
- prism2_pci_resume);
-
-static struct pci_driver prism2_pci_driver = {
- .name = "hostap_pci",
- .id_table = prism2_pci_id_table,
- .probe = prism2_pci_probe,
- .remove = prism2_pci_remove,
- .driver.pm = &prism2_pci_pm_ops,
-};
-
-module_pci_driver(prism2_pci_driver);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_plx.c b/drivers/net/wireless/intersil/hostap/hostap_plx.c
deleted file mode 100644
index 58247290f..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_plx.c
+++ /dev/null
@@ -1,617 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define PRISM2_PLX
-
-/* Host AP driver's support for PC Cards on PCI adapters using PLX9052 is
- * based on:
- * - Host AP driver patch from james@madingley.org
- * - linux-wlan-ng driver, Copyright (C) AbsoluteValue Systems, Inc.
- */
-
-
-#include <linux/module.h>
-#include <linux/if.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-
-#include "hostap_wlan.h"
-
-
-static char *dev_info = "hostap_plx";
-
-
-MODULE_AUTHOR("Jouni Malinen");
-MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
- "cards (PLX).");
-MODULE_LICENSE("GPL");
-
-
-static int ignore_cis;
-module_param(ignore_cis, int, 0444);
-MODULE_PARM_DESC(ignore_cis, "Do not verify manfid information in CIS");
-
-
-/* struct local_info::hw_priv */
-struct hostap_plx_priv {
- void __iomem *attr_mem;
- unsigned int cor_offset;
-};
-
-
-#define PLX_MIN_ATTR_LEN 512 /* at least 2 x 256 is needed for CIS */
-#define COR_SRESET 0x80
-#define COR_LEVLREQ 0x40
-#define COR_ENABLE_FUNC 0x01
-/* PCI Configuration Registers */
-#define PLX_PCIIPR 0x3d /* PCI Interrupt Pin */
-/* Local Configuration Registers */
-#define PLX_INTCSR 0x4c /* Interrupt Control/Status Register */
-#define PLX_INTCSR_PCI_INTEN BIT(6) /* PCI Interrupt Enable */
-#define PLX_CNTRL 0x50
-#define PLX_CNTRL_SERIAL_EEPROM_PRESENT BIT(28)
-
-
-#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
-
-static const struct pci_device_id prism2_plx_id_table[] = {
- PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
- PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
- PLXDEV(0x126c, 0x8030, "Nortel emobility"),
- PLXDEV(0x1562, 0x0001, "Symbol LA-4123"),
- PLXDEV(0x1385, 0x4100, "Netgear MA301"),
- PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"),
- PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"),
- PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"),
- PLXDEV(0x16ab, 0x1100, "Global Sun Tech GL24110P"),
- PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"),
- PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"),
- PLXDEV(0x16ab, 0x1103, "Longshine 8031"),
- PLXDEV(0x16ec, 0x3685, "US Robotics USR2415"),
- PLXDEV(0xec80, 0xec00, "Belkin F5D6000"),
- { 0 }
-};
-
-
-/* Array of known Prism2/2.5 PC Card manufactured ids. If your card's manfid
- * is not listed here, you will need to add it here to get the driver
- * initialized. */
-static struct prism2_plx_manfid {
- u16 manfid1, manfid2;
-} prism2_plx_known_manfids[] = {
- { 0x000b, 0x7110 } /* D-Link DWL-650 Rev. P1 */,
- { 0x000b, 0x7300 } /* Philips 802.11b WLAN PCMCIA */,
- { 0x0101, 0x0777 } /* 3Com AirConnect PCI 777A */,
- { 0x0126, 0x8000 } /* Proxim RangeLAN */,
- { 0x0138, 0x0002 } /* Compaq WL100 */,
- { 0x0156, 0x0002 } /* Intersil Prism II Ref. Design (and others) */,
- { 0x026f, 0x030b } /* Buffalo WLI-CF-S11G */,
- { 0x0274, 0x1612 } /* Linksys WPC11 Ver 2.5 */,
- { 0x0274, 0x1613 } /* Linksys WPC11 Ver 3 */,
- { 0x028a, 0x0002 } /* D-Link DRC-650 */,
- { 0x0250, 0x0002 } /* Samsung SWL2000-N */,
- { 0xc250, 0x0002 } /* EMTAC A2424i */,
- { 0xd601, 0x0002 } /* Z-Com XI300 */,
- { 0xd601, 0x0005 } /* Zcomax XI-325H 200mW */,
- { 0, 0}
-};
-
-
-#ifdef PRISM2_IO_DEBUG
-
-static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
- outb(v, dev->base_addr + a);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
- u8 v;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- spin_lock_irqsave(&local->lock, flags);
- v = inb(dev->base_addr + a);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
- spin_unlock_irqrestore(&local->lock, flags);
- return v;
-}
-
-static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
- outw(v, dev->base_addr + a);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
- u16 v;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- spin_lock_irqsave(&local->lock, flags);
- v = inw(dev->base_addr + a);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
- spin_unlock_irqrestore(&local->lock, flags);
- return v;
-}
-
-static inline void hfa384x_outsw_debug(struct net_device *dev, int a,
- u8 *buf, int wc)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc);
- outsw(dev->base_addr + a, buf, wc);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-static inline void hfa384x_insw_debug(struct net_device *dev, int a,
- u8 *buf, int wc)
-{
- struct hostap_interface *iface;
- local_info_t *local;
- unsigned long flags;
-
- iface = netdev_priv(dev);
- local = iface->local;
-
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc);
- insw(dev->base_addr + a, buf, wc);
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
-#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
-#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
-#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
-#define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc))
-#define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc))
-
-#else /* PRISM2_IO_DEBUG */
-
-#define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a))
-#define HFA384X_INB(a) inb(dev->base_addr + (a))
-#define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a))
-#define HFA384X_INW(a) inw(dev->base_addr + (a))
-#define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc)
-#define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc)
-
-#endif /* PRISM2_IO_DEBUG */
-
-
-static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
- int len)
-{
- u16 d_off;
- u16 *pos;
-
- d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
- pos = (u16 *) buf;
-
- if (len / 2)
- HFA384X_INSW(d_off, buf, len / 2);
- pos += len / 2;
-
- if (len & 1)
- *((char *) pos) = HFA384X_INB(d_off);
-
- return 0;
-}
-
-
-static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
-{
- u16 d_off;
- u16 *pos;
-
- d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
- pos = (u16 *) buf;
-
- if (len / 2)
- HFA384X_OUTSW(d_off, buf, len / 2);
- pos += len / 2;
-
- if (len & 1)
- HFA384X_OUTB(*((char *) pos), d_off);
-
- return 0;
-}
-
-
-/* FIX: This might change at some point.. */
-#include "hostap_hw.c"
-
-
-static void prism2_plx_cor_sreset(local_info_t *local)
-{
- unsigned char corsave;
- struct hostap_plx_priv *hw_priv = local->hw_priv;
-
- printk(KERN_DEBUG "%s: Doing reset via direct COR access.\n",
- dev_info);
-
- /* Set sreset bit of COR and clear it after hold time */
-
- if (hw_priv->attr_mem == NULL) {
- /* TMD7160 - COR at card's first I/O addr */
- corsave = inb(hw_priv->cor_offset);
- outb(corsave | COR_SRESET, hw_priv->cor_offset);
- mdelay(2);
- outb(corsave & ~COR_SRESET, hw_priv->cor_offset);
- mdelay(2);
- } else {
- /* PLX9052 */
- corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset);
- writeb(corsave | COR_SRESET,
- hw_priv->attr_mem + hw_priv->cor_offset);
- mdelay(2);
- writeb(corsave & ~COR_SRESET,
- hw_priv->attr_mem + hw_priv->cor_offset);
- mdelay(2);
- }
-}
-
-
-static void prism2_plx_genesis_reset(local_info_t *local, int hcr)
-{
- unsigned char corsave;
- struct hostap_plx_priv *hw_priv = local->hw_priv;
-
- if (hw_priv->attr_mem == NULL) {
- /* TMD7160 - COR at card's first I/O addr */
- corsave = inb(hw_priv->cor_offset);
- outb(corsave | COR_SRESET, hw_priv->cor_offset);
- mdelay(10);
- outb(hcr, hw_priv->cor_offset + 2);
- mdelay(10);
- outb(corsave & ~COR_SRESET, hw_priv->cor_offset);
- mdelay(10);
- } else {
- /* PLX9052 */
- corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset);
- writeb(corsave | COR_SRESET,
- hw_priv->attr_mem + hw_priv->cor_offset);
- mdelay(10);
- writeb(hcr, hw_priv->attr_mem + hw_priv->cor_offset + 2);
- mdelay(10);
- writeb(corsave & ~COR_SRESET,
- hw_priv->attr_mem + hw_priv->cor_offset);
- mdelay(10);
- }
-}
-
-
-static struct prism2_helper_functions prism2_plx_funcs =
-{
- .card_present = NULL,
- .cor_sreset = prism2_plx_cor_sreset,
- .genesis_reset = prism2_plx_genesis_reset,
- .hw_type = HOSTAP_HW_PLX,
-};
-
-
-static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len,
- unsigned int *cor_offset,
- unsigned int *cor_index)
-{
-#define CISTPL_CONFIG 0x1A
-#define CISTPL_MANFID 0x20
-#define CISTPL_END 0xFF
-#define CIS_MAX_LEN 256
- u8 *cis;
- int i, pos;
- unsigned int rmsz, rasz, manfid1, manfid2;
- struct prism2_plx_manfid *manfid;
-
- cis = kmalloc(CIS_MAX_LEN, GFP_KERNEL);
- if (cis == NULL)
- return -ENOMEM;
-
- /* read CIS; it is in even offsets in the beginning of attr_mem */
- for (i = 0; i < CIS_MAX_LEN; i++)
- cis[i] = readb(attr_mem + 2 * i);
- printk(KERN_DEBUG "%s: CIS: %6ph ...\n", dev_info, cis);
-
- /* set reasonable defaults for Prism2 cards just in case CIS parsing
- * fails */
- *cor_offset = 0x3e0;
- *cor_index = 0x01;
- manfid1 = manfid2 = 0;
-
- pos = 0;
- while (pos < CIS_MAX_LEN - 1 && cis[pos] != CISTPL_END) {
- if (pos + 2 + cis[pos + 1] > CIS_MAX_LEN)
- goto cis_error;
-
- switch (cis[pos]) {
- case CISTPL_CONFIG:
- if (cis[pos + 1] < 2)
- goto cis_error;
- rmsz = (cis[pos + 2] & 0x3c) >> 2;
- rasz = cis[pos + 2] & 0x03;
- if (4 + rasz + rmsz > cis[pos + 1])
- goto cis_error;
- *cor_index = cis[pos + 3] & 0x3F;
- *cor_offset = 0;
- for (i = 0; i <= rasz; i++)
- *cor_offset += cis[pos + 4 + i] << (8 * i);
- printk(KERN_DEBUG "%s: cor_index=0x%x "
- "cor_offset=0x%x\n", dev_info,
- *cor_index, *cor_offset);
- if (*cor_offset > attr_len) {
- printk(KERN_ERR "%s: COR offset not within "
- "attr_mem\n", dev_info);
- kfree(cis);
- return -1;
- }
- break;
-
- case CISTPL_MANFID:
- if (cis[pos + 1] < 4)
- goto cis_error;
- manfid1 = cis[pos + 2] + (cis[pos + 3] << 8);
- manfid2 = cis[pos + 4] + (cis[pos + 5] << 8);
- printk(KERN_DEBUG "%s: manfid=0x%04x, 0x%04x\n",
- dev_info, manfid1, manfid2);
- break;
- }
-
- pos += cis[pos + 1] + 2;
- }
-
- if (pos >= CIS_MAX_LEN || cis[pos] != CISTPL_END)
- goto cis_error;
-
- for (manfid = prism2_plx_known_manfids; manfid->manfid1 != 0; manfid++)
- if (manfid1 == manfid->manfid1 && manfid2 == manfid->manfid2) {
- kfree(cis);
- return 0;
- }
-
- printk(KERN_INFO "%s: unknown manfid 0x%04x, 0x%04x - assuming this is"
- " not supported card\n", dev_info, manfid1, manfid2);
- goto fail;
-
- cis_error:
- printk(KERN_WARNING "%s: invalid CIS data\n", dev_info);
-
- fail:
- kfree(cis);
- if (ignore_cis) {
- printk(KERN_INFO "%s: ignore_cis parameter set - ignoring "
- "errors during CIS verification\n", dev_info);
- return 0;
- }
- return -1;
-}
-
-
-static int prism2_plx_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- unsigned int pccard_ioaddr, plx_ioaddr;
- unsigned long pccard_attr_mem;
- unsigned int pccard_attr_len;
- void __iomem *attr_mem = NULL;
- unsigned int cor_offset = 0, cor_index = 0;
- u32 reg;
- local_info_t *local = NULL;
- struct net_device *dev = NULL;
- struct hostap_interface *iface;
- static int cards_found /* = 0 */;
- int irq_registered = 0;
- int tmd7160;
- struct hostap_plx_priv *hw_priv;
-
- hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
- if (hw_priv == NULL)
- return -ENOMEM;
-
- if (pci_enable_device(pdev))
- goto err_out_free;
-
- /* National Datacomm NCP130 based on TMD7160, not PLX9052. */
- tmd7160 = (pdev->vendor == 0x15e8) && (pdev->device == 0x0131);
-
- plx_ioaddr = pci_resource_start(pdev, 1);
- pccard_ioaddr = pci_resource_start(pdev, tmd7160 ? 2 : 3);
-
- if (tmd7160) {
- /* TMD7160 */
- attr_mem = NULL; /* no access to PC Card attribute memory */
-
- printk(KERN_INFO "TMD7160 PCI/PCMCIA adapter: io=0x%x, "
- "irq=%d, pccard_io=0x%x\n",
- plx_ioaddr, pdev->irq, pccard_ioaddr);
-
- cor_offset = plx_ioaddr;
- cor_index = 0x04;
-
- outb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, plx_ioaddr);
- mdelay(1);
- reg = inb(plx_ioaddr);
- if (reg != (cor_index | COR_LEVLREQ | COR_ENABLE_FUNC)) {
- printk(KERN_ERR "%s: Error setting COR (expected="
- "0x%02x, was=0x%02x)\n", dev_info,
- cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, reg);
- goto fail;
- }
- } else {
- /* PLX9052 */
- pccard_attr_mem = pci_resource_start(pdev, 2);
- pccard_attr_len = pci_resource_len(pdev, 2);
- if (pccard_attr_len < PLX_MIN_ATTR_LEN)
- goto fail;
-
-
- attr_mem = ioremap(pccard_attr_mem, pccard_attr_len);
- if (attr_mem == NULL) {
- printk(KERN_ERR "%s: cannot remap attr_mem\n",
- dev_info);
- goto fail;
- }
-
- printk(KERN_INFO "PLX9052 PCI/PCMCIA adapter: "
- "mem=0x%lx, plx_io=0x%x, irq=%d, pccard_io=0x%x\n",
- pccard_attr_mem, plx_ioaddr, pdev->irq, pccard_ioaddr);
-
- if (prism2_plx_check_cis(attr_mem, pccard_attr_len,
- &cor_offset, &cor_index)) {
- printk(KERN_INFO "Unknown PC Card CIS - not a "
- "Prism2/2.5 card?\n");
- goto fail;
- }
-
- printk(KERN_DEBUG "Prism2/2.5 PC Card detected in PLX9052 "
- "adapter\n");
-
- /* Write COR to enable PC Card */
- writeb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC,
- attr_mem + cor_offset);
-
- /* Enable PCI interrupts if they are not already enabled */
- reg = inl(plx_ioaddr + PLX_INTCSR);
- printk(KERN_DEBUG "PLX_INTCSR=0x%x\n", reg);
- if (!(reg & PLX_INTCSR_PCI_INTEN)) {
- outl(reg | PLX_INTCSR_PCI_INTEN,
- plx_ioaddr + PLX_INTCSR);
- if (!(inl(plx_ioaddr + PLX_INTCSR) &
- PLX_INTCSR_PCI_INTEN)) {
- printk(KERN_WARNING "%s: Could not enable "
- "Local Interrupts\n", dev_info);
- goto fail;
- }
- }
-
- reg = inl(plx_ioaddr + PLX_CNTRL);
- printk(KERN_DEBUG "PLX_CNTRL=0x%x (Serial EEPROM "
- "present=%d)\n",
- reg, (reg & PLX_CNTRL_SERIAL_EEPROM_PRESENT) != 0);
- /* should set PLX_PCIIPR to 0x01 (INTA#) if Serial EEPROM is
- * not present; but are there really such cards in use(?) */
- }
-
- dev = prism2_init_local_data(&prism2_plx_funcs, cards_found,
- &pdev->dev);
- if (dev == NULL)
- goto fail;
- iface = netdev_priv(dev);
- local = iface->local;
- local->hw_priv = hw_priv;
- cards_found++;
-
- dev->irq = pdev->irq;
- dev->base_addr = pccard_ioaddr;
- hw_priv->attr_mem = attr_mem;
- hw_priv->cor_offset = cor_offset;
-
- pci_set_drvdata(pdev, dev);
-
- if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name,
- dev)) {
- printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
- goto fail;
- } else
- irq_registered = 1;
-
- if (prism2_hw_config(dev, 1)) {
- printk(KERN_DEBUG "%s: hardware initialization failed\n",
- dev_info);
- goto fail;
- }
-
- return hostap_hw_ready(dev);
-
- fail:
- if (irq_registered && dev)
- free_irq(dev->irq, dev);
-
- if (attr_mem)
- iounmap(attr_mem);
-
- pci_disable_device(pdev);
- prism2_free_local_data(dev);
-
- err_out_free:
- kfree(hw_priv);
-
- return -ENODEV;
-}
-
-
-static void prism2_plx_remove(struct pci_dev *pdev)
-{
- struct net_device *dev;
- struct hostap_interface *iface;
- struct hostap_plx_priv *hw_priv;
-
- dev = pci_get_drvdata(pdev);
- iface = netdev_priv(dev);
- hw_priv = iface->local->hw_priv;
-
- /* Reset the hardware, and ensure interrupts are disabled. */
- prism2_plx_cor_sreset(iface->local);
- hfa384x_disable_interrupts(dev);
-
- if (hw_priv->attr_mem)
- iounmap(hw_priv->attr_mem);
- if (dev->irq)
- free_irq(dev->irq, dev);
-
- prism2_free_local_data(dev);
- kfree(hw_priv);
- pci_disable_device(pdev);
-}
-
-
-MODULE_DEVICE_TABLE(pci, prism2_plx_id_table);
-
-static struct pci_driver prism2_plx_driver = {
- .name = "hostap_plx",
- .id_table = prism2_plx_id_table,
- .probe = prism2_plx_probe,
- .remove = prism2_plx_remove,
-};
-
-module_pci_driver(prism2_plx_driver);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
deleted file mode 100644
index 61f687860..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ /dev/null
@@ -1,411 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* /proc routines for Host AP driver */
-
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/export.h>
-#include <net/lib80211.h>
-
-#include "hostap_wlan.h"
-#include "hostap.h"
-
-#define PROC_LIMIT (PAGE_SIZE - 80)
-
-#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
-static int prism2_debug_proc_show(struct seq_file *m, void *v)
-{
- local_info_t *local = m->private;
- int i;
-
- seq_printf(m, "next_txfid=%d next_alloc=%d\n",
- local->next_txfid, local->next_alloc);
- for (i = 0; i < PRISM2_TXFID_COUNT; i++)
- seq_printf(m, "FID: tx=%04X intransmit=%04X\n",
- local->txfid[i], local->intransmitfid[i]);
- seq_printf(m, "FW TX rate control: %d\n", local->fw_tx_rate_control);
- seq_printf(m, "beacon_int=%d\n", local->beacon_int);
- seq_printf(m, "dtim_period=%d\n", local->dtim_period);
- seq_printf(m, "wds_max_connections=%d\n", local->wds_max_connections);
- seq_printf(m, "dev_enabled=%d\n", local->dev_enabled);
- seq_printf(m, "sw_tick_stuck=%d\n", local->sw_tick_stuck);
- for (i = 0; i < WEP_KEYS; i++) {
- if (local->crypt_info.crypt[i] &&
- local->crypt_info.crypt[i]->ops) {
- seq_printf(m, "crypt[%d]=%s\n", i,
- local->crypt_info.crypt[i]->ops->name);
- }
- }
- seq_printf(m, "pri_only=%d\n", local->pri_only);
- seq_printf(m, "pci=%d\n", local->func->hw_type == HOSTAP_HW_PCI);
- seq_printf(m, "sram_type=%d\n", local->sram_type);
- seq_printf(m, "no_pri=%d\n", local->no_pri);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PROC_FS
-static int prism2_stats_proc_show(struct seq_file *m, void *v)
-{
- local_info_t *local = m->private;
- struct comm_tallies_sums *sums = &local->comm_tallies;
-
- seq_printf(m, "TxUnicastFrames=%u\n", sums->tx_unicast_frames);
- seq_printf(m, "TxMulticastframes=%u\n", sums->tx_multicast_frames);
- seq_printf(m, "TxFragments=%u\n", sums->tx_fragments);
- seq_printf(m, "TxUnicastOctets=%u\n", sums->tx_unicast_octets);
- seq_printf(m, "TxMulticastOctets=%u\n", sums->tx_multicast_octets);
- seq_printf(m, "TxDeferredTransmissions=%u\n",
- sums->tx_deferred_transmissions);
- seq_printf(m, "TxSingleRetryFrames=%u\n", sums->tx_single_retry_frames);
- seq_printf(m, "TxMultipleRetryFrames=%u\n",
- sums->tx_multiple_retry_frames);
- seq_printf(m, "TxRetryLimitExceeded=%u\n",
- sums->tx_retry_limit_exceeded);
- seq_printf(m, "TxDiscards=%u\n", sums->tx_discards);
- seq_printf(m, "RxUnicastFrames=%u\n", sums->rx_unicast_frames);
- seq_printf(m, "RxMulticastFrames=%u\n", sums->rx_multicast_frames);
- seq_printf(m, "RxFragments=%u\n", sums->rx_fragments);
- seq_printf(m, "RxUnicastOctets=%u\n", sums->rx_unicast_octets);
- seq_printf(m, "RxMulticastOctets=%u\n", sums->rx_multicast_octets);
- seq_printf(m, "RxFCSErrors=%u\n", sums->rx_fcs_errors);
- seq_printf(m, "RxDiscardsNoBuffer=%u\n", sums->rx_discards_no_buffer);
- seq_printf(m, "TxDiscardsWrongSA=%u\n", sums->tx_discards_wrong_sa);
- seq_printf(m, "RxDiscardsWEPUndecryptable=%u\n",
- sums->rx_discards_wep_undecryptable);
- seq_printf(m, "RxMessageInMsgFragments=%u\n",
- sums->rx_message_in_msg_fragments);
- seq_printf(m, "RxMessageInBadMsgFragments=%u\n",
- sums->rx_message_in_bad_msg_fragments);
- /* FIX: this may grow too long for one page(?) */
-
- return 0;
-}
-#endif
-
-static int prism2_wds_proc_show(struct seq_file *m, void *v)
-{
- struct list_head *ptr = v;
- struct hostap_interface *iface;
-
- iface = list_entry(ptr, struct hostap_interface, list);
- if (iface->type == HOSTAP_INTERFACE_WDS)
- seq_printf(m, "%s\t%pM\n",
- iface->dev->name, iface->u.wds.remote_addr);
- return 0;
-}
-
-static void *prism2_wds_proc_start(struct seq_file *m, loff_t *_pos)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- read_lock_bh(&local->iface_lock);
- return seq_list_start(&local->hostap_interfaces, *_pos);
-}
-
-static void *prism2_wds_proc_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- return seq_list_next(v, &local->hostap_interfaces, _pos);
-}
-
-static void prism2_wds_proc_stop(struct seq_file *m, void *v)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- read_unlock_bh(&local->iface_lock);
-}
-
-static const struct seq_operations prism2_wds_proc_seqops = {
- .start = prism2_wds_proc_start,
- .next = prism2_wds_proc_next,
- .stop = prism2_wds_proc_stop,
- .show = prism2_wds_proc_show,
-};
-
-static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- struct list_head *ptr = v;
- struct hostap_bss_info *bss;
-
- if (ptr == &local->bss_list) {
- seq_printf(m, "#BSSID\tlast_update\tcount\tcapab_info\tSSID(txt)\t"
- "SSID(hex)\tWPA IE\n");
- return 0;
- }
-
- bss = list_entry(ptr, struct hostap_bss_info, list);
- seq_printf(m, "%pM\t%lu\t%u\t0x%x\t",
- bss->bssid, bss->last_update,
- bss->count, bss->capab_info);
-
- seq_printf(m, "%*pE", (int)bss->ssid_len, bss->ssid);
-
- seq_putc(m, '\t');
- seq_printf(m, "%*phN", (int)bss->ssid_len, bss->ssid);
- seq_putc(m, '\t');
- seq_printf(m, "%*phN", (int)bss->wpa_ie_len, bss->wpa_ie);
- seq_putc(m, '\n');
- return 0;
-}
-
-static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos)
- __acquires(&local->lock)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- spin_lock_bh(&local->lock);
- return seq_list_start_head(&local->bss_list, *_pos);
-}
-
-static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- return seq_list_next(v, &local->bss_list, _pos);
-}
-
-static void prism2_bss_list_proc_stop(struct seq_file *m, void *v)
- __releases(&local->lock)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- spin_unlock_bh(&local->lock);
-}
-
-static const struct seq_operations prism2_bss_list_proc_seqops = {
- .start = prism2_bss_list_proc_start,
- .next = prism2_bss_list_proc_next,
- .stop = prism2_bss_list_proc_stop,
- .show = prism2_bss_list_proc_show,
-};
-
-#ifdef CONFIG_PROC_FS
-static int prism2_crypt_proc_show(struct seq_file *m, void *v)
-{
- local_info_t *local = m->private;
- int i;
-
- seq_printf(m, "tx_keyidx=%d\n", local->crypt_info.tx_keyidx);
- for (i = 0; i < WEP_KEYS; i++) {
- if (local->crypt_info.crypt[i] &&
- local->crypt_info.crypt[i]->ops &&
- local->crypt_info.crypt[i]->ops->print_stats) {
- local->crypt_info.crypt[i]->ops->print_stats(
- m, local->crypt_info.crypt[i]->priv);
- }
- }
- return 0;
-}
-#endif
-
-static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
- size_t count, loff_t *_pos)
-{
- local_info_t *local = pde_data(file_inode(file));
- size_t off;
-
- if (local->pda == NULL || *_pos >= PRISM2_PDA_SIZE)
- return 0;
-
- off = *_pos;
- if (count > PRISM2_PDA_SIZE - off)
- count = PRISM2_PDA_SIZE - off;
- if (copy_to_user(buf, local->pda + off, count) != 0)
- return -EFAULT;
- *_pos += count;
- return count;
-}
-
-static const struct proc_ops prism2_pda_proc_ops = {
- .proc_read = prism2_pda_proc_read,
- .proc_lseek = generic_file_llseek,
-};
-
-
-static ssize_t prism2_aux_dump_proc_no_read(struct file *file, char __user *buf,
- size_t bufsize, loff_t *_pos)
-{
- return 0;
-}
-
-static const struct proc_ops prism2_aux_dump_proc_ops = {
- .proc_read = prism2_aux_dump_proc_no_read,
- .proc_lseek = default_llseek,
-};
-
-
-#ifdef PRISM2_IO_DEBUG
-static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- local_info_t *local = (local_info_t *) data;
- int head = local->io_debug_head;
- int start_bytes, left, copy;
-
- if (off + count > PRISM2_IO_DEBUG_SIZE * 4) {
- *eof = 1;
- if (off >= PRISM2_IO_DEBUG_SIZE * 4)
- return 0;
- count = PRISM2_IO_DEBUG_SIZE * 4 - off;
- }
-
- start_bytes = (PRISM2_IO_DEBUG_SIZE - head) * 4;
- left = count;
-
- if (off < start_bytes) {
- copy = start_bytes - off;
- if (copy > count)
- copy = count;
- memcpy(page, ((u8 *) &local->io_debug[head]) + off, copy);
- left -= copy;
- if (left > 0)
- memcpy(&page[copy], local->io_debug, left);
- } else {
- memcpy(page, ((u8 *) local->io_debug) + (off - start_bytes),
- left);
- }
-
- *start = page;
-
- return count;
-}
-#endif /* PRISM2_IO_DEBUG */
-
-
-#ifndef PRISM2_NO_STATION_MODES
-static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- unsigned long entry;
- int i, len;
- struct hfa384x_hostscan_result *scanres;
- u8 *p;
-
- if (v == SEQ_START_TOKEN) {
- seq_printf(m,
- "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates SSID\n");
- return 0;
- }
-
- entry = (unsigned long)v - 2;
- scanres = &local->last_scan_results[entry];
-
- seq_printf(m, "%d %d %d %d 0x%02x %d %pM %d ",
- le16_to_cpu(scanres->chid),
- (s16) le16_to_cpu(scanres->anl),
- (s16) le16_to_cpu(scanres->sl),
- le16_to_cpu(scanres->beacon_interval),
- le16_to_cpu(scanres->capability),
- le16_to_cpu(scanres->rate),
- scanres->bssid,
- le16_to_cpu(scanres->atim));
-
- p = scanres->sup_rates;
- for (i = 0; i < sizeof(scanres->sup_rates); i++) {
- if (p[i] == 0)
- break;
- seq_printf(m, "<%02x>", p[i]);
- }
- seq_putc(m, ' ');
-
- p = scanres->ssid;
- len = le16_to_cpu(scanres->ssid_len);
- if (len > 32)
- len = 32;
- for (i = 0; i < len; i++) {
- unsigned char c = p[i];
- if (c >= 32 && c < 127)
- seq_putc(m, c);
- else
- seq_printf(m, "<%02x>", c);
- }
- seq_putc(m, '\n');
- return 0;
-}
-
-static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- spin_lock_bh(&local->lock);
-
- /* We have a header (pos 0) + N results to show (pos 1...N) */
- if (*_pos > local->last_scan_results_count)
- return NULL;
- return (void *)(unsigned long)(*_pos + 1); /* 0 would be EOF */
-}
-
-static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *_pos)
-{
- local_info_t *local = pde_data(file_inode(m->file));
-
- ++*_pos;
- if (*_pos > local->last_scan_results_count)
- return NULL;
- return (void *)(unsigned long)(*_pos + 1); /* 0 would be EOF */
-}
-
-static void prism2_scan_results_proc_stop(struct seq_file *m, void *v)
-{
- local_info_t *local = pde_data(file_inode(m->file));
- spin_unlock_bh(&local->lock);
-}
-
-static const struct seq_operations prism2_scan_results_proc_seqops = {
- .start = prism2_scan_results_proc_start,
- .next = prism2_scan_results_proc_next,
- .stop = prism2_scan_results_proc_stop,
- .show = prism2_scan_results_proc_show,
-};
-#endif /* PRISM2_NO_STATION_MODES */
-
-
-void hostap_init_proc(local_info_t *local)
-{
- local->proc = NULL;
-
- if (hostap_proc == NULL) {
- printk(KERN_WARNING "%s: hostap proc directory not created\n",
- local->dev->name);
- return;
- }
-
- local->proc = proc_mkdir(local->ddev->name, hostap_proc);
- if (local->proc == NULL) {
- printk(KERN_INFO "/proc/net/hostap/%s creation failed\n",
- local->ddev->name);
- return;
- }
-
-#ifndef PRISM2_NO_PROCFS_DEBUG
- proc_create_single_data("debug", 0, local->proc,
- prism2_debug_proc_show, local);
-#endif /* PRISM2_NO_PROCFS_DEBUG */
- proc_create_single_data("stats", 0, local->proc, prism2_stats_proc_show,
- local);
- proc_create_seq_data("wds", 0, local->proc,
- &prism2_wds_proc_seqops, local);
- proc_create_data("pda", 0, local->proc,
- &prism2_pda_proc_ops, local);
- proc_create_data("aux_dump", 0, local->proc,
- local->func->read_aux_proc_ops ?: &prism2_aux_dump_proc_ops,
- local);
- proc_create_seq_data("bss_list", 0, local->proc,
- &prism2_bss_list_proc_seqops, local);
- proc_create_single_data("crypt", 0, local->proc, prism2_crypt_proc_show,
- local);
-#ifdef PRISM2_IO_DEBUG
- proc_create_single_data("io_debug", 0, local->proc,
- prism2_debug_proc_show, local);
-#endif /* PRISM2_IO_DEBUG */
-#ifndef PRISM2_NO_STATION_MODES
- proc_create_seq_data("scan_results", 0, local->proc,
- &prism2_scan_results_proc_seqops, local);
-#endif /* PRISM2_NO_STATION_MODES */
-}
-
-
-void hostap_remove_proc(local_info_t *local)
-{
- proc_remove(local->proc);
-}
-
-
-EXPORT_SYMBOL(hostap_init_proc);
-EXPORT_SYMBOL(hostap_remove_proc);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
deleted file mode 100644
index f71c0545c..000000000
--- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h
+++ /dev/null
@@ -1,1051 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef HOSTAP_WLAN_H
-#define HOSTAP_WLAN_H
-
-#include <linux/interrupt.h>
-#include <linux/wireless.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/mutex.h>
-#include <linux/refcount.h>
-#include <net/iw_handler.h>
-#include <net/ieee80211_radiotap.h>
-#include <net/lib80211.h>
-
-#include "hostap_config.h"
-#include "hostap_common.h"
-
-#define MAX_PARM_DEVICES 8
-#define PARM_MIN_MAX "1-" __MODULE_STRING(MAX_PARM_DEVICES)
-#define DEF_INTS -1, -1, -1, -1, -1, -1, -1
-#define GET_INT_PARM(var,idx) var[var[idx] < 0 ? 0 : idx]
-
-
-/* Specific skb->protocol value that indicates that the packet already contains
- * txdesc header.
- * FIX: This might need own value that would be allocated especially for Prism2
- * txdesc; ETH_P_CONTROL is commented as "Card specific control frames".
- * However, these skb's should have only minimal path in the kernel side since
- * prism2_send_mgmt() sends these with dev_queue_xmit() to prism2_tx(). */
-#define ETH_P_HOSTAP ETH_P_CONTROL
-
-/* ARPHRD_IEEE80211_PRISM uses a bloated version of Prism2 RX frame header
- * (from linux-wlan-ng) */
-struct linux_wlan_ng_val {
- u32 did;
- u16 status, len;
- u32 data;
-} __packed;
-
-struct linux_wlan_ng_prism_hdr {
- u32 msgcode, msglen;
- char devname[16];
- struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal,
- noise, rate, istx, frmlen;
-} __packed;
-
-struct linux_wlan_ng_cap_hdr {
- __be32 version;
- __be32 length;
- __be64 mactime;
- __be64 hosttime;
- __be32 phytype;
- __be32 channel;
- __be32 datarate;
- __be32 antenna;
- __be32 priority;
- __be32 ssi_type;
- __be32 ssi_signal;
- __be32 ssi_noise;
- __be32 preamble;
- __be32 encoding;
-} __packed;
-
-struct hostap_radiotap_rx {
- struct ieee80211_radiotap_header hdr;
- __le64 tsft;
- u8 rate;
- u8 padding;
- __le16 chan_freq;
- __le16 chan_flags;
- s8 dbm_antsignal;
- s8 dbm_antnoise;
-} __packed;
-
-#define LWNG_CAP_DID_BASE (4 | (1 << 6)) /* section 4, group 1 */
-#define LWNG_CAPHDR_VERSION 0x80211001
-
-struct hfa384x_rx_frame {
- /* HFA384X RX frame descriptor */
- __le16 status; /* HFA384X_RX_STATUS_ flags */
- __le32 time; /* timestamp, 1 microsecond resolution */
- u8 silence; /* 27 .. 154; seems to be 0 */
- u8 signal; /* 27 .. 154 */
- u8 rate; /* 10, 20, 55, or 110 */
- u8 rxflow;
- __le32 reserved;
-
- /* 802.11 */
- __le16 frame_control;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctrl;
- u8 addr4[ETH_ALEN];
- __le16 data_len;
-
- /* 802.3 */
- u8 dst_addr[ETH_ALEN];
- u8 src_addr[ETH_ALEN];
- __be16 len;
-
- /* followed by frame data; max 2304 bytes */
-} __packed;
-
-
-struct hfa384x_tx_frame {
- /* HFA384X TX frame descriptor */
- __le16 status; /* HFA384X_TX_STATUS_ flags */
- __le16 reserved1;
- __le16 reserved2;
- __le32 sw_support;
- u8 retry_count; /* not yet implemented */
- u8 tx_rate; /* Host AP only; 0 = firmware, or 10, 20, 55, 110 */
- __le16 tx_control; /* HFA384X_TX_CTRL_ flags */
-
- /* 802.11 */
- struct_group(header,
- __le16 frame_control; /* parts not used */
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN]; /* filled by firmware */
- u8 addr3[ETH_ALEN];
- __le16 seq_ctrl; /* filled by firmware */
- );
- u8 addr4[ETH_ALEN];
- __le16 data_len;
-
- /* 802.3 */
- u8 dst_addr[ETH_ALEN];
- u8 src_addr[ETH_ALEN];
- __be16 len;
-
- /* followed by frame data; max 2304 bytes */
-} __packed;
-
-
-struct hfa384x_rid_hdr
-{
- __le16 len;
- __le16 rid;
-} __packed;
-
-
-/* Macro for converting signal levels (range 27 .. 154) to wireless ext
- * dBm value with some accuracy */
-#define HFA384X_LEVEL_TO_dBm(v) 0x100 + (v) * 100 / 255 - 100
-
-#define HFA384X_LEVEL_TO_dBm_sign(v) (v) * 100 / 255 - 100
-
-struct hfa384x_scan_request {
- __le16 channel_list;
- __le16 txrate; /* HFA384X_RATES_* */
-} __packed;
-
-struct hfa384x_hostscan_request {
- __le16 channel_list;
- __le16 txrate;
- __le16 target_ssid_len;
- u8 target_ssid[32];
-} __packed;
-
-struct hfa384x_join_request {
- u8 bssid[ETH_ALEN];
- __le16 channel;
-} __packed;
-
-struct hfa384x_info_frame {
- __le16 len;
- __le16 type;
-} __packed;
-
-struct hfa384x_comm_tallies {
- __le16 tx_unicast_frames;
- __le16 tx_multicast_frames;
- __le16 tx_fragments;
- __le16 tx_unicast_octets;
- __le16 tx_multicast_octets;
- __le16 tx_deferred_transmissions;
- __le16 tx_single_retry_frames;
- __le16 tx_multiple_retry_frames;
- __le16 tx_retry_limit_exceeded;
- __le16 tx_discards;
- __le16 rx_unicast_frames;
- __le16 rx_multicast_frames;
- __le16 rx_fragments;
- __le16 rx_unicast_octets;
- __le16 rx_multicast_octets;
- __le16 rx_fcs_errors;
- __le16 rx_discards_no_buffer;
- __le16 tx_discards_wrong_sa;
- __le16 rx_discards_wep_undecryptable;
- __le16 rx_message_in_msg_fragments;
- __le16 rx_message_in_bad_msg_fragments;
-} __packed;
-
-struct hfa384x_comm_tallies32 {
- __le32 tx_unicast_frames;
- __le32 tx_multicast_frames;
- __le32 tx_fragments;
- __le32 tx_unicast_octets;
- __le32 tx_multicast_octets;
- __le32 tx_deferred_transmissions;
- __le32 tx_single_retry_frames;
- __le32 tx_multiple_retry_frames;
- __le32 tx_retry_limit_exceeded;
- __le32 tx_discards;
- __le32 rx_unicast_frames;
- __le32 rx_multicast_frames;
- __le32 rx_fragments;
- __le32 rx_unicast_octets;
- __le32 rx_multicast_octets;
- __le32 rx_fcs_errors;
- __le32 rx_discards_no_buffer;
- __le32 tx_discards_wrong_sa;
- __le32 rx_discards_wep_undecryptable;
- __le32 rx_message_in_msg_fragments;
- __le32 rx_message_in_bad_msg_fragments;
-} __packed;
-
-struct hfa384x_scan_result_hdr {
- __le16 reserved;
- __le16 scan_reason;
-#define HFA384X_SCAN_IN_PROGRESS 0 /* no results available yet */
-#define HFA384X_SCAN_HOST_INITIATED 1
-#define HFA384X_SCAN_FIRMWARE_INITIATED 2
-#define HFA384X_SCAN_INQUIRY_FROM_HOST 3
-} __packed;
-
-#define HFA384X_SCAN_MAX_RESULTS 32
-
-struct hfa384x_scan_result {
- __le16 chid;
- __le16 anl;
- __le16 sl;
- u8 bssid[ETH_ALEN];
- __le16 beacon_interval;
- __le16 capability;
- __le16 ssid_len;
- u8 ssid[32];
- u8 sup_rates[10];
- __le16 rate;
-} __packed;
-
-struct hfa384x_hostscan_result {
- __le16 chid;
- __le16 anl;
- __le16 sl;
- u8 bssid[ETH_ALEN];
- __le16 beacon_interval;
- __le16 capability;
- __le16 ssid_len;
- u8 ssid[32];
- u8 sup_rates[10];
- __le16 rate;
- __le16 atim;
-} __packed;
-
-struct comm_tallies_sums {
- unsigned int tx_unicast_frames;
- unsigned int tx_multicast_frames;
- unsigned int tx_fragments;
- unsigned int tx_unicast_octets;
- unsigned int tx_multicast_octets;
- unsigned int tx_deferred_transmissions;
- unsigned int tx_single_retry_frames;
- unsigned int tx_multiple_retry_frames;
- unsigned int tx_retry_limit_exceeded;
- unsigned int tx_discards;
- unsigned int rx_unicast_frames;
- unsigned int rx_multicast_frames;
- unsigned int rx_fragments;
- unsigned int rx_unicast_octets;
- unsigned int rx_multicast_octets;
- unsigned int rx_fcs_errors;
- unsigned int rx_discards_no_buffer;
- unsigned int tx_discards_wrong_sa;
- unsigned int rx_discards_wep_undecryptable;
- unsigned int rx_message_in_msg_fragments;
- unsigned int rx_message_in_bad_msg_fragments;
-};
-
-
-struct hfa384x_regs {
- u16 cmd;
- u16 evstat;
- u16 offset0;
- u16 offset1;
- u16 swsupport0;
-};
-
-
-#if defined(PRISM2_PCCARD) || defined(PRISM2_PLX)
-/* I/O ports for HFA384X Controller access */
-#define HFA384X_CMD_OFF 0x00
-#define HFA384X_PARAM0_OFF 0x02
-#define HFA384X_PARAM1_OFF 0x04
-#define HFA384X_PARAM2_OFF 0x06
-#define HFA384X_STATUS_OFF 0x08
-#define HFA384X_RESP0_OFF 0x0A
-#define HFA384X_RESP1_OFF 0x0C
-#define HFA384X_RESP2_OFF 0x0E
-#define HFA384X_INFOFID_OFF 0x10
-#define HFA384X_CONTROL_OFF 0x14
-#define HFA384X_SELECT0_OFF 0x18
-#define HFA384X_SELECT1_OFF 0x1A
-#define HFA384X_OFFSET0_OFF 0x1C
-#define HFA384X_OFFSET1_OFF 0x1E
-#define HFA384X_RXFID_OFF 0x20
-#define HFA384X_ALLOCFID_OFF 0x22
-#define HFA384X_TXCOMPLFID_OFF 0x24
-#define HFA384X_SWSUPPORT0_OFF 0x28
-#define HFA384X_SWSUPPORT1_OFF 0x2A
-#define HFA384X_SWSUPPORT2_OFF 0x2C
-#define HFA384X_EVSTAT_OFF 0x30
-#define HFA384X_INTEN_OFF 0x32
-#define HFA384X_EVACK_OFF 0x34
-#define HFA384X_DATA0_OFF 0x36
-#define HFA384X_DATA1_OFF 0x38
-#define HFA384X_AUXPAGE_OFF 0x3A
-#define HFA384X_AUXOFFSET_OFF 0x3C
-#define HFA384X_AUXDATA_OFF 0x3E
-#endif /* PRISM2_PCCARD || PRISM2_PLX */
-
-#ifdef PRISM2_PCI
-/* Memory addresses for ISL3874 controller access */
-#define HFA384X_CMD_OFF 0x00
-#define HFA384X_PARAM0_OFF 0x04
-#define HFA384X_PARAM1_OFF 0x08
-#define HFA384X_PARAM2_OFF 0x0C
-#define HFA384X_STATUS_OFF 0x10
-#define HFA384X_RESP0_OFF 0x14
-#define HFA384X_RESP1_OFF 0x18
-#define HFA384X_RESP2_OFF 0x1C
-#define HFA384X_INFOFID_OFF 0x20
-#define HFA384X_CONTROL_OFF 0x28
-#define HFA384X_SELECT0_OFF 0x30
-#define HFA384X_SELECT1_OFF 0x34
-#define HFA384X_OFFSET0_OFF 0x38
-#define HFA384X_OFFSET1_OFF 0x3C
-#define HFA384X_RXFID_OFF 0x40
-#define HFA384X_ALLOCFID_OFF 0x44
-#define HFA384X_TXCOMPLFID_OFF 0x48
-#define HFA384X_PCICOR_OFF 0x4C
-#define HFA384X_SWSUPPORT0_OFF 0x50
-#define HFA384X_SWSUPPORT1_OFF 0x54
-#define HFA384X_SWSUPPORT2_OFF 0x58
-#define HFA384X_PCIHCR_OFF 0x5C
-#define HFA384X_EVSTAT_OFF 0x60
-#define HFA384X_INTEN_OFF 0x64
-#define HFA384X_EVACK_OFF 0x68
-#define HFA384X_DATA0_OFF 0x6C
-#define HFA384X_DATA1_OFF 0x70
-#define HFA384X_AUXPAGE_OFF 0x74
-#define HFA384X_AUXOFFSET_OFF 0x78
-#define HFA384X_AUXDATA_OFF 0x7C
-#define HFA384X_PCI_M0_ADDRH_OFF 0x80
-#define HFA384X_PCI_M0_ADDRL_OFF 0x84
-#define HFA384X_PCI_M0_LEN_OFF 0x88
-#define HFA384X_PCI_M0_CTL_OFF 0x8C
-#define HFA384X_PCI_STATUS_OFF 0x98
-#define HFA384X_PCI_M1_ADDRH_OFF 0xA0
-#define HFA384X_PCI_M1_ADDRL_OFF 0xA4
-#define HFA384X_PCI_M1_LEN_OFF 0xA8
-#define HFA384X_PCI_M1_CTL_OFF 0xAC
-
-/* PCI bus master control bits (these are undocumented; based on guessing and
- * experimenting..) */
-#define HFA384X_PCI_CTL_FROM_BAP (BIT(5) | BIT(1) | BIT(0))
-#define HFA384X_PCI_CTL_TO_BAP (BIT(5) | BIT(0))
-
-#endif /* PRISM2_PCI */
-
-
-/* Command codes for CMD reg. */
-#define HFA384X_CMDCODE_INIT 0x00
-#define HFA384X_CMDCODE_ENABLE 0x01
-#define HFA384X_CMDCODE_DISABLE 0x02
-#define HFA384X_CMDCODE_ALLOC 0x0A
-#define HFA384X_CMDCODE_TRANSMIT 0x0B
-#define HFA384X_CMDCODE_INQUIRE 0x11
-#define HFA384X_CMDCODE_ACCESS 0x21
-#define HFA384X_CMDCODE_ACCESS_WRITE (0x21 | BIT(8))
-#define HFA384X_CMDCODE_DOWNLOAD 0x22
-#define HFA384X_CMDCODE_READMIF 0x30
-#define HFA384X_CMDCODE_WRITEMIF 0x31
-#define HFA384X_CMDCODE_TEST 0x38
-
-#define HFA384X_CMDCODE_MASK 0x3F
-
-/* Test mode operations */
-#define HFA384X_TEST_CHANGE_CHANNEL 0x08
-#define HFA384X_TEST_MONITOR 0x0B
-#define HFA384X_TEST_STOP 0x0F
-#define HFA384X_TEST_CFG_BITS 0x15
-#define HFA384X_TEST_CFG_BIT_ALC BIT(3)
-
-#define HFA384X_CMD_BUSY BIT(15)
-
-#define HFA384X_CMD_TX_RECLAIM BIT(8)
-
-#define HFA384X_OFFSET_ERR BIT(14)
-#define HFA384X_OFFSET_BUSY BIT(15)
-
-
-/* ProgMode for download command */
-#define HFA384X_PROGMODE_DISABLE 0
-#define HFA384X_PROGMODE_ENABLE_VOLATILE 1
-#define HFA384X_PROGMODE_ENABLE_NON_VOLATILE 2
-#define HFA384X_PROGMODE_PROGRAM_NON_VOLATILE 3
-
-#define HFA384X_AUX_MAGIC0 0xfe01
-#define HFA384X_AUX_MAGIC1 0xdc23
-#define HFA384X_AUX_MAGIC2 0xba45
-
-#define HFA384X_AUX_PORT_DISABLED 0
-#define HFA384X_AUX_PORT_DISABLE BIT(14)
-#define HFA384X_AUX_PORT_ENABLE BIT(15)
-#define HFA384X_AUX_PORT_ENABLED (BIT(14) | BIT(15))
-#define HFA384X_AUX_PORT_MASK (BIT(14) | BIT(15))
-
-#define PRISM2_PDA_SIZE 1024
-
-
-/* Events; EvStat, Interrupt mask (IntEn), and acknowledge bits (EvAck) */
-#define HFA384X_EV_TICK BIT(15)
-#define HFA384X_EV_WTERR BIT(14)
-#define HFA384X_EV_INFDROP BIT(13)
-#ifdef PRISM2_PCI
-#define HFA384X_EV_PCI_M1 BIT(9)
-#define HFA384X_EV_PCI_M0 BIT(8)
-#endif /* PRISM2_PCI */
-#define HFA384X_EV_INFO BIT(7)
-#define HFA384X_EV_DTIM BIT(5)
-#define HFA384X_EV_CMD BIT(4)
-#define HFA384X_EV_ALLOC BIT(3)
-#define HFA384X_EV_TXEXC BIT(2)
-#define HFA384X_EV_TX BIT(1)
-#define HFA384X_EV_RX BIT(0)
-
-
-/* HFA384X Information frames */
-#define HFA384X_INFO_HANDOVERADDR 0xF000 /* AP f/w ? */
-#define HFA384X_INFO_HANDOVERDEAUTHADDR 0xF001 /* AP f/w 1.3.7 */
-#define HFA384X_INFO_COMMTALLIES 0xF100
-#define HFA384X_INFO_SCANRESULTS 0xF101
-#define HFA384X_INFO_CHANNELINFORESULTS 0xF102 /* AP f/w only */
-#define HFA384X_INFO_HOSTSCANRESULTS 0xF103
-#define HFA384X_INFO_LINKSTATUS 0xF200
-#define HFA384X_INFO_ASSOCSTATUS 0xF201 /* ? */
-#define HFA384X_INFO_AUTHREQ 0xF202 /* ? */
-#define HFA384X_INFO_PSUSERCNT 0xF203 /* ? */
-#define HFA384X_INFO_KEYIDCHANGED 0xF204 /* ? */
-
-enum { HFA384X_LINKSTATUS_CONNECTED = 1,
- HFA384X_LINKSTATUS_DISCONNECTED = 2,
- HFA384X_LINKSTATUS_AP_CHANGE = 3,
- HFA384X_LINKSTATUS_AP_OUT_OF_RANGE = 4,
- HFA384X_LINKSTATUS_AP_IN_RANGE = 5,
- HFA384X_LINKSTATUS_ASSOC_FAILED = 6 };
-
-enum { HFA384X_PORTTYPE_BSS = 1, HFA384X_PORTTYPE_WDS = 2,
- HFA384X_PORTTYPE_PSEUDO_IBSS = 3, HFA384X_PORTTYPE_IBSS = 0,
- HFA384X_PORTTYPE_HOSTAP = 6 };
-
-#define HFA384X_RATES_1MBPS BIT(0)
-#define HFA384X_RATES_2MBPS BIT(1)
-#define HFA384X_RATES_5MBPS BIT(2)
-#define HFA384X_RATES_11MBPS BIT(3)
-
-#define HFA384X_ROAMING_FIRMWARE 1
-#define HFA384X_ROAMING_HOST 2
-#define HFA384X_ROAMING_DISABLED 3
-
-#define HFA384X_WEPFLAGS_PRIVACYINVOKED BIT(0)
-#define HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED BIT(1)
-#define HFA384X_WEPFLAGS_HOSTENCRYPT BIT(4)
-#define HFA384X_WEPFLAGS_HOSTDECRYPT BIT(7)
-
-#define HFA384X_RX_STATUS_MSGTYPE (BIT(15) | BIT(14) | BIT(13))
-#define HFA384X_RX_STATUS_PCF BIT(12)
-#define HFA384X_RX_STATUS_MACPORT (BIT(10) | BIT(9) | BIT(8))
-#define HFA384X_RX_STATUS_UNDECR BIT(1)
-#define HFA384X_RX_STATUS_FCSERR BIT(0)
-
-#define HFA384X_RX_STATUS_GET_MSGTYPE(s) \
-(((s) & HFA384X_RX_STATUS_MSGTYPE) >> 13)
-#define HFA384X_RX_STATUS_GET_MACPORT(s) \
-(((s) & HFA384X_RX_STATUS_MACPORT) >> 8)
-
-enum { HFA384X_RX_MSGTYPE_NORMAL = 0, HFA384X_RX_MSGTYPE_RFC1042 = 1,
- HFA384X_RX_MSGTYPE_BRIDGETUNNEL = 2, HFA384X_RX_MSGTYPE_MGMT = 4 };
-
-
-#define HFA384X_TX_CTRL_ALT_RTRY BIT(5)
-#define HFA384X_TX_CTRL_802_11 BIT(3)
-#define HFA384X_TX_CTRL_802_3 0
-#define HFA384X_TX_CTRL_TX_EX BIT(2)
-#define HFA384X_TX_CTRL_TX_OK BIT(1)
-
-#define HFA384X_TX_STATUS_RETRYERR BIT(0)
-#define HFA384X_TX_STATUS_AGEDERR BIT(1)
-#define HFA384X_TX_STATUS_DISCON BIT(2)
-#define HFA384X_TX_STATUS_FORMERR BIT(3)
-
-/* HFA3861/3863 (BBP) Control Registers */
-#define HFA386X_CR_TX_CONFIGURE 0x12 /* CR9 */
-#define HFA386X_CR_RX_CONFIGURE 0x14 /* CR10 */
-#define HFA386X_CR_A_D_TEST_MODES2 0x1A /* CR13 */
-#define HFA386X_CR_MANUAL_TX_POWER 0x3E /* CR31 */
-#define HFA386X_CR_MEASURED_TX_POWER 0x74 /* CR58 */
-
-
-#ifdef __KERNEL__
-
-#define PRISM2_TXFID_COUNT 8
-#define PRISM2_DATA_MAXLEN 2304
-#define PRISM2_TXFID_LEN (PRISM2_DATA_MAXLEN + sizeof(struct hfa384x_tx_frame))
-#define PRISM2_TXFID_EMPTY 0xffff
-#define PRISM2_TXFID_RESERVED 0xfffe
-#define PRISM2_DUMMY_FID 0xffff
-#define MAX_SSID_LEN 32
-#define MAX_NAME_LEN 32 /* this is assumed to be equal to MAX_SSID_LEN */
-
-#define PRISM2_DUMP_RX_HDR BIT(0)
-#define PRISM2_DUMP_TX_HDR BIT(1)
-#define PRISM2_DUMP_TXEXC_HDR BIT(2)
-
-struct hostap_tx_callback_info {
- u16 idx;
- void (*func)(struct sk_buff *, int ok, void *);
- void *data;
- struct hostap_tx_callback_info *next;
-};
-
-
-/* IEEE 802.11 requires that STA supports concurrent reception of at least
- * three fragmented frames. This define can be increased to support more
- * concurrent frames, but it should be noted that each entry can consume about
- * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
-#define PRISM2_FRAG_CACHE_LEN 4
-
-struct prism2_frag_entry {
- unsigned long first_frag_time;
- unsigned int seq;
- unsigned int last_frag;
- struct sk_buff *skb;
- u8 src_addr[ETH_ALEN];
- u8 dst_addr[ETH_ALEN];
-};
-
-
-struct hostap_cmd_queue {
- struct list_head list;
- wait_queue_head_t compl;
- volatile enum { CMD_SLEEP, CMD_CALLBACK, CMD_COMPLETED } type;
- void (*callback)(struct net_device *dev, long context, u16 resp0,
- u16 res);
- long context;
- u16 cmd, param0, param1;
- u16 resp0, res;
- volatile int issued, issuing;
-
- refcount_t usecnt;
- int del_req;
-};
-
-/* options for hw_shutdown */
-#define HOSTAP_HW_NO_DISABLE BIT(0)
-#define HOSTAP_HW_ENABLE_CMDCOMPL BIT(1)
-
-typedef struct local_info local_info_t;
-
-struct prism2_helper_functions {
- /* these functions are defined in hardware model specific files
- * (hostap_{cs,plx,pci}.c */
- int (*card_present)(local_info_t *local);
- void (*cor_sreset)(local_info_t *local);
- void (*genesis_reset)(local_info_t *local, int hcr);
-
- /* the following functions are from hostap_hw.c, but they may have some
- * hardware model specific code */
-
- /* FIX: low-level commands like cmd might disappear at some point to
- * make it easier to change them if needed (e.g., cmd would be replaced
- * with write_mif/read_mif/testcmd/inquire); at least get_rid and
- * set_rid might move to hostap_{cs,plx,pci}.c */
- int (*cmd)(struct net_device *dev, u16 cmd, u16 param0, u16 *param1,
- u16 *resp0);
- void (*read_regs)(struct net_device *dev, struct hfa384x_regs *regs);
- int (*get_rid)(struct net_device *dev, u16 rid, void *buf, int len,
- int exact_len);
- int (*set_rid)(struct net_device *dev, u16 rid, void *buf, int len);
- int (*hw_enable)(struct net_device *dev, int initial);
- int (*hw_config)(struct net_device *dev, int initial);
- void (*hw_reset)(struct net_device *dev);
- void (*hw_shutdown)(struct net_device *dev, int no_disable);
- int (*reset_port)(struct net_device *dev);
- void (*schedule_reset)(local_info_t *local);
- int (*download)(local_info_t *local,
- struct prism2_download_param *param);
- int (*tx)(struct sk_buff *skb, struct net_device *dev);
- int (*set_tim)(struct net_device *dev, int aid, int set);
- const struct proc_ops *read_aux_proc_ops;
-
- int need_tx_headroom; /* number of bytes of headroom needed before
- * IEEE 802.11 header */
- enum { HOSTAP_HW_PCCARD, HOSTAP_HW_PLX, HOSTAP_HW_PCI } hw_type;
-};
-
-
-struct prism2_download_data {
- u32 dl_cmd;
- u32 start_addr;
- u32 num_areas;
- struct prism2_download_data_area {
- u32 addr; /* wlan card address */
- u32 len;
- u8 *data; /* allocated data */
- } data[] __counted_by(num_areas);
-};
-
-
-#define HOSTAP_MAX_BSS_COUNT 64
-#define MAX_WPA_IE_LEN 64
-
-struct hostap_bss_info {
- struct list_head list;
- unsigned long last_update;
- unsigned int count;
- u8 bssid[ETH_ALEN];
- u16 capab_info;
- u8 ssid[32];
- size_t ssid_len;
- u8 wpa_ie[MAX_WPA_IE_LEN];
- size_t wpa_ie_len;
- u8 rsn_ie[MAX_WPA_IE_LEN];
- size_t rsn_ie_len;
- int chan;
- int included;
-};
-
-
-/* Per radio private Host AP data - shared by all net devices interfaces used
- * by each radio (wlan#, wlan#ap, wlan#sta, WDS).
- * ((struct hostap_interface *) netdev_priv(dev))->local points to this
- * structure. */
-struct local_info {
- struct module *hw_module;
- int card_idx;
- int dev_enabled;
- int master_dev_auto_open; /* was master device opened automatically */
- int num_dev_open; /* number of open devices */
- struct net_device *dev; /* master radio device */
- struct net_device *ddev; /* main data device */
- struct list_head hostap_interfaces; /* Host AP interface list (contains
- * struct hostap_interface entries)
- */
- rwlock_t iface_lock; /* hostap_interfaces read lock; use write lock
- * when removing entries from the list.
- * TX and RX paths can use read lock. */
- spinlock_t cmdlock, baplock, lock, irq_init_lock;
- struct mutex rid_bap_mtx;
- u16 infofid; /* MAC buffer id for info frame */
- /* txfid, intransmitfid, next_txtid, and next_alloc are protected by
- * txfidlock */
- spinlock_t txfidlock;
- int txfid_len; /* length of allocated TX buffers */
- u16 txfid[PRISM2_TXFID_COUNT]; /* buffer IDs for TX frames */
- /* buffer IDs for intransmit frames or PRISM2_TXFID_EMPTY if
- * corresponding txfid is free for next TX frame */
- u16 intransmitfid[PRISM2_TXFID_COUNT];
- int next_txfid; /* index to the next txfid to be checked for
- * availability */
- int next_alloc; /* index to the next intransmitfid to be checked for
- * allocation events */
-
- /* bitfield for atomic bitops */
-#define HOSTAP_BITS_TRANSMIT 0
-#define HOSTAP_BITS_BAP_TASKLET 1
-#define HOSTAP_BITS_BAP_TASKLET2 2
- unsigned long bits;
-
- struct ap_data *ap;
-
- char essid[MAX_SSID_LEN + 1];
- char name[MAX_NAME_LEN + 1];
- int name_set;
- u16 channel_mask; /* mask of allowed channels */
- u16 scan_channel_mask; /* mask of channels to be scanned */
- struct comm_tallies_sums comm_tallies;
- struct proc_dir_entry *proc;
- int iw_mode; /* operating mode (IW_MODE_*) */
- int pseudo_adhoc; /* 0: IW_MODE_ADHOC is real 802.11 compliant IBSS
- * 1: IW_MODE_ADHOC is "pseudo IBSS" */
- char bssid[ETH_ALEN];
- int channel;
- int beacon_int;
- int dtim_period;
- int mtu;
- int frame_dump; /* dump RX/TX frame headers, PRISM2_DUMP_ flags */
- int fw_tx_rate_control;
- u16 tx_rate_control;
- u16 basic_rates;
- int hw_resetting;
- int hw_ready;
- int hw_reset_tries; /* how many times reset has been tried */
- int hw_downloading;
- int shutdown;
- int pri_only;
- int no_pri; /* no PRI f/w present */
- int sram_type; /* 8 = x8 SRAM, 16 = x16 SRAM, -1 = unknown */
-
- enum {
- PRISM2_TXPOWER_AUTO = 0, PRISM2_TXPOWER_OFF,
- PRISM2_TXPOWER_FIXED, PRISM2_TXPOWER_UNKNOWN
- } txpower_type;
- int txpower; /* if txpower_type == PRISM2_TXPOWER_FIXED */
-
- /* command queue for hfa384x_cmd(); protected with cmdlock */
- struct list_head cmd_queue;
- /* max_len for cmd_queue; in addition, cmd_callback can use two
- * additional entries to prevent sleeping commands from stopping
- * transmits */
-#define HOSTAP_CMD_QUEUE_MAX_LEN 16
- int cmd_queue_len; /* number of entries in cmd_queue */
-
- /* if card timeout is detected in interrupt context, reset_queue is
- * used to schedule card reseting to be done in user context */
- struct work_struct reset_queue;
-
- /* For scheduling a change of the promiscuous mode RID */
- int is_promisc;
- struct work_struct set_multicast_list_queue;
-
- struct work_struct set_tim_queue;
- struct list_head set_tim_list;
- spinlock_t set_tim_lock;
-
- int wds_max_connections;
- int wds_connections;
-#define HOSTAP_WDS_BROADCAST_RA BIT(0)
-#define HOSTAP_WDS_AP_CLIENT BIT(1)
-#define HOSTAP_WDS_STANDARD_FRAME BIT(2)
- u32 wds_type;
- u16 tx_control; /* flags to be used in TX description */
- int manual_retry_count; /* -1 = use f/w default; otherwise retry count
- * to be used with all frames */
-
- struct iw_statistics wstats;
- unsigned long scan_timestamp; /* Time started to scan */
- enum {
- PRISM2_MONITOR_80211 = 0, PRISM2_MONITOR_PRISM = 1,
- PRISM2_MONITOR_CAPHDR = 2, PRISM2_MONITOR_RADIOTAP = 3
- } monitor_type;
- int monitor_allow_fcserr;
-
- int hostapd; /* whether user space daemon, hostapd, is used for AP
- * management */
- int hostapd_sta; /* whether hostapd is used with an extra STA interface
- */
- struct net_device *apdev;
- struct net_device_stats apdevstats;
-
- char assoc_ap_addr[ETH_ALEN];
- struct net_device *stadev;
- struct net_device_stats stadevstats;
-
-#define WEP_KEYS 4
-#define WEP_KEY_LEN 13
- struct lib80211_crypt_info crypt_info;
-
- int open_wep; /* allow unencrypted frames */
- int host_encrypt;
- int host_decrypt;
- int privacy_invoked; /* force privacy invoked flag even if no keys are
- * configured */
- int fw_encrypt_ok; /* whether firmware-based WEP encrypt is working
- * in Host AP mode (STA f/w 1.4.9 or newer) */
- int bcrx_sta_key; /* use individual keys to override default keys even
- * with RX of broad/multicast frames */
-
- struct prism2_frag_entry frag_cache[PRISM2_FRAG_CACHE_LEN];
- unsigned int frag_next_idx;
-
- int ieee_802_1x; /* is IEEE 802.1X used */
-
- int antsel_tx, antsel_rx;
- int rts_threshold; /* dot11RTSThreshold */
- int fragm_threshold; /* dot11FragmentationThreshold */
- int auth_algs; /* PRISM2_AUTH_ flags */
-
- int enh_sec; /* cnfEnhSecurity options (broadcast SSID hide/ignore) */
- int tallies32; /* 32-bit tallies in use */
-
- struct prism2_helper_functions *func;
-
- u8 *pda;
- int fw_ap;
-#define PRISM2_FW_VER(major, minor, variant) \
-(((major) << 16) | ((minor) << 8) | variant)
- u32 sta_fw_ver;
-
- /* Tasklets for handling hardware IRQ related operations outside hw IRQ
- * handler */
- struct tasklet_struct bap_tasklet;
-
- struct tasklet_struct info_tasklet;
- struct sk_buff_head info_list; /* info frames as skb's for
- * info_tasklet */
-
- struct hostap_tx_callback_info *tx_callback; /* registered TX callbacks
- */
-
- struct tasklet_struct rx_tasklet;
- struct sk_buff_head rx_list;
-
- struct tasklet_struct sta_tx_exc_tasklet;
- struct sk_buff_head sta_tx_exc_list;
-
- int host_roaming;
- unsigned long last_join_time; /* time of last JoinRequest */
- struct hfa384x_hostscan_result *last_scan_results;
- int last_scan_results_count;
- enum { PRISM2_SCAN, PRISM2_HOSTSCAN } last_scan_type;
- struct work_struct info_queue;
- unsigned long pending_info; /* bit field of pending info_queue items */
-#define PRISM2_INFO_PENDING_LINKSTATUS 0
-#define PRISM2_INFO_PENDING_SCANRESULTS 1
- int prev_link_status; /* previous received LinkStatus info */
- int prev_linkstatus_connected;
- u8 preferred_ap[ETH_ALEN]; /* use this AP if possible */
-
-#ifdef PRISM2_CALLBACK
- void *callback_data; /* Can be used in callbacks; e.g., allocate
- * on enable event and free on disable event.
- * Host AP driver code does not touch this. */
-#endif /* PRISM2_CALLBACK */
-
- wait_queue_head_t hostscan_wq;
-
- /* Passive scan in Host AP mode */
- struct timer_list passive_scan_timer;
- int passive_scan_interval; /* in seconds, 0 = disabled */
- int passive_scan_channel;
- enum { PASSIVE_SCAN_WAIT, PASSIVE_SCAN_LISTEN } passive_scan_state;
-
- struct timer_list tick_timer;
- unsigned long last_tick_timer;
- unsigned int sw_tick_stuck;
-
- /* commsQuality / dBmCommsQuality data from periodic polling; only
- * valid for Managed and Ad-hoc modes */
- unsigned long last_comms_qual_update;
- int comms_qual; /* in some odd unit.. */
- int avg_signal; /* in dB (note: negative) */
- int avg_noise; /* in dB (note: negative) */
- struct work_struct comms_qual_update;
-
- /* RSSI to dBm adjustment (for RX descriptor fields) */
- int rssi_to_dBm; /* subtract from RSSI to get approximate dBm value */
-
- /* BSS list / protected by local->lock */
- struct list_head bss_list;
- int num_bss_info;
- int wpa; /* WPA support enabled */
- int tkip_countermeasures;
- int drop_unencrypted;
- /* Generic IEEE 802.11 info element to be added to
- * ProbeResp/Beacon/(Re)AssocReq */
- u8 *generic_elem;
- size_t generic_elem_len;
-
-#ifdef PRISM2_DOWNLOAD_SUPPORT
- /* Persistent volatile download data */
- struct prism2_download_data *dl_pri;
- struct prism2_download_data *dl_sec;
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
-
-#ifdef PRISM2_IO_DEBUG
-#define PRISM2_IO_DEBUG_SIZE 10000
- u32 io_debug[PRISM2_IO_DEBUG_SIZE];
- int io_debug_head;
- int io_debug_enabled;
-#endif /* PRISM2_IO_DEBUG */
-
- /* Pointer to hardware model specific (cs,pci,plx) private data. */
- void *hw_priv;
-};
-
-
-/* Per interface private Host AP data
- * Allocated for each net device that Host AP uses (wlan#, wlan#ap, wlan#sta,
- * WDS) and netdev_priv(dev) points to this structure. */
-struct hostap_interface {
- struct list_head list; /* list entry in Host AP interface list */
- struct net_device *dev; /* pointer to this device */
- struct local_info *local; /* pointer to shared private data */
- struct net_device_stats stats;
- struct iw_spy_data spy_data; /* iwspy support */
- struct iw_public_data wireless_data;
-
- enum {
- HOSTAP_INTERFACE_MASTER,
- HOSTAP_INTERFACE_MAIN,
- HOSTAP_INTERFACE_AP,
- HOSTAP_INTERFACE_STA,
- HOSTAP_INTERFACE_WDS,
- } type;
-
- union {
- struct hostap_interface_wds {
- u8 remote_addr[ETH_ALEN];
- } wds;
- } u;
-};
-
-
-#define HOSTAP_SKB_TX_DATA_MAGIC 0xf08a36a2
-
-/*
- * TX meta data - stored in skb->cb buffer, so this must not be increased over
- * the 48-byte limit.
- * THE PADDING THIS STARTS WITH IS A HORRIBLE HACK THAT SHOULD NOT LIVE
- * TO SEE THE DAY.
- */
-struct hostap_skb_tx_data {
- unsigned int __padding_for_default_qdiscs;
- u32 magic; /* HOSTAP_SKB_TX_DATA_MAGIC */
- u8 rate; /* transmit rate */
-#define HOSTAP_TX_FLAGS_WDS BIT(0)
-#define HOSTAP_TX_FLAGS_BUFFERED_FRAME BIT(1)
-#define HOSTAP_TX_FLAGS_ADD_MOREDATA BIT(2)
- u8 flags; /* HOSTAP_TX_FLAGS_* */
- u16 tx_cb_idx;
- struct hostap_interface *iface;
- unsigned long jiffies; /* queueing timestamp */
- unsigned short ethertype;
-};
-
-
-#ifndef PRISM2_NO_DEBUG
-
-#define DEBUG_FID BIT(0)
-#define DEBUG_PS BIT(1)
-#define DEBUG_FLOW BIT(2)
-#define DEBUG_AP BIT(3)
-#define DEBUG_HW BIT(4)
-#define DEBUG_EXTRA BIT(5)
-#define DEBUG_EXTRA2 BIT(6)
-#define DEBUG_PS2 BIT(7)
-#define DEBUG_MASK (DEBUG_PS | DEBUG_AP | DEBUG_HW | DEBUG_EXTRA)
-#define PDEBUG(n, args...) \
-do { if ((n) & DEBUG_MASK) printk(KERN_DEBUG args); } while (0)
-#define PDEBUG2(n, args...) \
-do { if ((n) & DEBUG_MASK) printk(args); } while (0)
-
-#else /* PRISM2_NO_DEBUG */
-
-#define PDEBUG(n, args...)
-#define PDEBUG2(n, args...)
-
-#endif /* PRISM2_NO_DEBUG */
-
-enum { BAP0 = 0, BAP1 = 1 };
-
-#define PRISM2_IO_DEBUG_CMD_INB 0
-#define PRISM2_IO_DEBUG_CMD_INW 1
-#define PRISM2_IO_DEBUG_CMD_INSW 2
-#define PRISM2_IO_DEBUG_CMD_OUTB 3
-#define PRISM2_IO_DEBUG_CMD_OUTW 4
-#define PRISM2_IO_DEBUG_CMD_OUTSW 5
-#define PRISM2_IO_DEBUG_CMD_ERROR 6
-#define PRISM2_IO_DEBUG_CMD_INTERRUPT 7
-
-#ifdef PRISM2_IO_DEBUG
-
-#define PRISM2_IO_DEBUG_ENTRY(cmd, reg, value) \
-(((cmd) << 24) | ((reg) << 16) | value)
-
-static inline void prism2_io_debug_add(struct net_device *dev, int cmd,
- int reg, int value)
-{
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
-
- if (!local->io_debug_enabled)
- return;
-
- local->io_debug[local->io_debug_head] = jiffies & 0xffffffff;
- if (++local->io_debug_head >= PRISM2_IO_DEBUG_SIZE)
- local->io_debug_head = 0;
- local->io_debug[local->io_debug_head] =
- PRISM2_IO_DEBUG_ENTRY(cmd, reg, value);
- if (++local->io_debug_head >= PRISM2_IO_DEBUG_SIZE)
- local->io_debug_head = 0;
-}
-
-
-static inline void prism2_io_debug_error(struct net_device *dev, int err)
-{
- struct hostap_interface *iface = netdev_priv(dev);
- local_info_t *local = iface->local;
- unsigned long flags;
-
- if (!local->io_debug_enabled)
- return;
-
- spin_lock_irqsave(&local->lock, flags);
- prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_ERROR, 0, err);
- if (local->io_debug_enabled == 1) {
- local->io_debug_enabled = 0;
- printk(KERN_DEBUG "%s: I/O debug stopped\n", dev->name);
- }
- spin_unlock_irqrestore(&local->lock, flags);
-}
-
-#else /* PRISM2_IO_DEBUG */
-
-static inline void prism2_io_debug_add(struct net_device *dev, int cmd,
- int reg, int value)
-{
-}
-
-static inline void prism2_io_debug_error(struct net_device *dev, int err)
-{
-}
-
-#endif /* PRISM2_IO_DEBUG */
-
-
-#ifdef PRISM2_CALLBACK
-enum {
- /* Called when card is enabled */
- PRISM2_CALLBACK_ENABLE,
-
- /* Called when card is disabled */
- PRISM2_CALLBACK_DISABLE,
-
- /* Called when RX/TX starts/ends */
- PRISM2_CALLBACK_RX_START, PRISM2_CALLBACK_RX_END,
- PRISM2_CALLBACK_TX_START, PRISM2_CALLBACK_TX_END
-};
-void prism2_callback(local_info_t *local, int event);
-#else /* PRISM2_CALLBACK */
-#define prism2_callback(d, e) do { } while (0)
-#endif /* PRISM2_CALLBACK */
-
-#endif /* __KERNEL__ */
-
-#endif /* HOSTAP_WLAN_H */
diff --git a/drivers/net/wireless/intersil/orinoco/Kconfig b/drivers/net/wireless/intersil/orinoco/Kconfig
deleted file mode 100644
index f62730aa7..000000000
--- a/drivers/net/wireless/intersil/orinoco/Kconfig
+++ /dev/null
@@ -1,143 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config HERMES
- tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
- depends on (PPC_PMAC || PCI || PCMCIA)
- depends on CFG80211
- select CFG80211_WEXT_EXPORT
- select WIRELESS_EXT
- select WEXT_SPY
- select WEXT_PRIV
- select FW_LOADER
- select CRYPTO
- select CRYPTO_MICHAEL_MIC
- help
- A driver for 802.11b wireless cards based on the "Hermes" or
- Intersil HFA384x (Prism 2) MAC controller. This includes the vast
- majority of the PCMCIA 802.11b cards (which are nearly all rebadges)
- - except for the Cisco/Aironet cards. Cards supported include the
- Apple Airport (not a PCMCIA card), WavelanIEEE/Orinoco,
- Cabletron/EnteraSys Roamabout, ELSA AirLancer, MELCO Buffalo, Avaya,
- IBM High Rate Wireless, Farralon Syyline, Samsung MagicLAN, Netgear
- MA401, LinkSys WPC-11, D-Link DWL-650, 3Com AirConnect, Intel
- IPW2011, and Symbol Spectrum24 High Rate amongst others.
-
- This option includes the guts of the driver, but in order to
- actually use a card you will also need to enable support for PCMCIA
- Hermes cards, PLX9052 based PCI adaptors or the Apple Airport below.
-
- You will also very likely also need the Wireless Tools in order to
- configure your card and that /etc/pcmcia/wireless.opts works :
- <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
-
-config HERMES_PRISM
- bool "Support Prism 2/2.5 chipset"
- depends on HERMES
- help
-
- Say Y to enable support for Prism 2 and 2.5 chipsets. These
- chipsets are better handled by the hostap driver. This driver
- would not support WPA or firmware download for Prism chipset.
-
- If you are not sure, say N.
-
-config HERMES_CACHE_FW_ON_INIT
- bool "Cache Hermes firmware on driver initialisation"
- depends on HERMES
- default y
- help
- Say Y to cache any firmware required by the Hermes drivers
- on startup. The firmware will remain cached until the
- driver is unloaded. The cache uses 64K of RAM.
-
- Otherwise load the firmware from userspace as required. In
- this case the driver should be unloaded and restarted
- whenever the firmware is changed.
-
- If you are not sure, say Y.
-
-config APPLE_AIRPORT
- tristate "Apple Airport support (built-in)"
- depends on PPC_PMAC && HERMES
- help
- Say Y here to support the Airport 802.11b wireless Ethernet hardware
- built into the Macintosh iBook and other recent PowerPC-based
- Macintosh machines. This is essentially a Lucent Orinoco card with
- a non-standard interface.
-
- This driver does not support the Airport Extreme (802.11b/g). Use
- the BCM43xx driver for Airport Extreme cards.
-
-config PLX_HERMES
- tristate "Hermes in PLX9052 based PCI adaptor support (Netgear MA301 etc.)"
- depends on PCI && HERMES
- help
- Enable support for PCMCIA cards supported by the "Hermes" (aka
- orinoco) driver when used in PLX9052 based PCI adaptors. These
- adaptors are not a full PCMCIA controller but act as a more limited
- PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
- 802.11b PCMCIA cards can be used in desktop machines. The Netgear
- MA301 is such an adaptor.
-
-config TMD_HERMES
- tristate "Hermes in TMD7160 based PCI adaptor support"
- depends on PCI && HERMES
- help
- Enable support for PCMCIA cards supported by the "Hermes" (aka
- orinoco) driver when used in TMD7160 based PCI adaptors. These
- adaptors are not a full PCMCIA controller but act as a more limited
- PCI <-> PCMCIA bridge. Several vendors sell such adaptors so that
- 802.11b PCMCIA cards can be used in desktop machines.
-
-config NORTEL_HERMES
- tristate "Nortel emobility PCI adaptor support"
- depends on PCI && HERMES
- help
- Enable support for PCMCIA cards supported by the "Hermes" (aka
- orinoco) driver when used in Nortel emobility PCI adaptors. These
- adaptors are not full PCMCIA controllers, but act as a more limited
- PCI <-> PCMCIA bridge.
-
-config PCI_HERMES
- tristate "Prism 2.5 PCI 802.11b adaptor support"
- depends on PCI && HERMES && HERMES_PRISM
- help
- Enable support for PCI and mini-PCI 802.11b wireless NICs based on
- the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
- PCMCIA cards bundled with PCI<->PCMCIA adaptors which are also
- common. Some of the built-in wireless adaptors in laptops are of
- this variety.
-
-config PCMCIA_HERMES
- tristate "Hermes PCMCIA card support"
- depends on PCMCIA && HERMES && HAS_IOPORT_MAP
- help
- A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
- as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
- EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and
- others). It should also be usable on various Prism II based cards
- such as the Linksys, D-Link and Farallon Skyline. It should also
- work on Symbol cards such as the 3Com AirConnect and Ericsson WLAN.
-
- You will very likely need the Wireless Tools in order to
- configure your card and that /etc/pcmcia/wireless.opts works:
- <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
-
-config PCMCIA_SPECTRUM
- tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
- depends on PCMCIA && HERMES && HAS_IOPORT_MAP
- help
-
- This is a driver for 802.11b cards using RAM-loadable Symbol
- firmware, such as Symbol Wireless Networker LA4100, CompactFlash
- cards by Socket Communications and Intel PRO/Wireless 2011B.
-
- This driver requires firmware download on startup. Utilities
- for downloading Symbol firmware are available at
- <http://sourceforge.net/projects/orinoco/>
-
-config ORINOCO_USB
- tristate "Agere Orinoco USB support"
- depends on USB && HERMES
- select FW_LOADER
- help
- This driver is for USB versions of the Agere Orinoco card.
diff --git a/drivers/net/wireless/intersil/orinoco/Makefile b/drivers/net/wireless/intersil/orinoco/Makefile
deleted file mode 100644
index 0c29c56c8..000000000
--- a/drivers/net/wireless/intersil/orinoco/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the orinoco wireless device drivers.
-#
-orinoco-objs := main.o fw.o hw.o mic.o scan.o wext.o hermes_dld.o hermes.o cfg.o
-
-obj-$(CONFIG_HERMES) += orinoco.o
-obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o
-obj-$(CONFIG_APPLE_AIRPORT) += airport.o
-obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o
-obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
-obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
-obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
-obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
-obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
diff --git a/drivers/net/wireless/intersil/orinoco/airport.c b/drivers/net/wireless/intersil/orinoco/airport.c
deleted file mode 100644
index 45ac00fda..000000000
--- a/drivers/net/wireless/intersil/orinoco/airport.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/* airport.c
- *
- * A driver for "Hermes" chipset based Apple Airport wireless
- * card.
- *
- * Copyright notice & release notes in file main.c
- *
- * Note specific to airport stub:
- *
- * 0.05 : first version of the new split driver
- * 0.06 : fix possible hang on powerup, add sleep support
- */
-
-#define DRIVER_NAME "airport"
-#define PFX DRIVER_NAME ": "
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/mod_devicetable.h>
-#include <asm/pmac_feature.h>
-
-#include "orinoco.h"
-
-#define AIRPORT_IO_LEN (0x1000) /* one page */
-
-struct airport {
- struct macio_dev *mdev;
- void __iomem *vaddr;
- unsigned int irq;
- int irq_requested;
- int ndev_registered;
-};
-
-static int
-airport_suspend(struct macio_dev *mdev, pm_message_t state)
-{
- struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev);
- struct net_device *dev = priv->ndev;
- struct airport *card = priv->card;
- unsigned long flags;
- int err;
-
- printk(KERN_DEBUG "%s: Airport entering sleep mode\n", dev->name);
-
- err = orinoco_lock(priv, &flags);
- if (err) {
- printk(KERN_ERR "%s: hw_unavailable on PBOOK_SLEEP_NOW\n",
- dev->name);
- return 0;
- }
-
- orinoco_down(priv);
- orinoco_unlock(priv, &flags);
-
- disable_irq(card->irq);
- pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
- macio_get_of_node(mdev), 0, 0);
-
- return 0;
-}
-
-static int
-airport_resume(struct macio_dev *mdev)
-{
- struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev);
- struct net_device *dev = priv->ndev;
- struct airport *card = priv->card;
- unsigned long flags;
- int err;
-
- printk(KERN_DEBUG "%s: Airport waking up\n", dev->name);
-
- pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
- macio_get_of_node(mdev), 0, 1);
- msleep(200);
-
- enable_irq(card->irq);
-
- priv->hw.ops->lock_irqsave(&priv->lock, &flags);
- err = orinoco_up(priv);
- priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
-
- return err;
-}
-
-static int
-airport_detach(struct macio_dev *mdev)
-{
- struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev);
- struct airport *card = priv->card;
-
- if (card->ndev_registered)
- orinoco_if_del(priv);
- card->ndev_registered = 0;
-
- if (card->irq_requested)
- free_irq(card->irq, priv);
- card->irq_requested = 0;
-
- if (card->vaddr)
- iounmap(card->vaddr);
- card->vaddr = NULL;
-
- macio_release_resource(mdev, 0);
-
- pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
- macio_get_of_node(mdev), 0, 0);
- ssleep(1);
-
- macio_set_drvdata(mdev, NULL);
- free_orinocodev(priv);
-
- return 0;
-}
-
-static int airport_hard_reset(struct orinoco_private *priv)
-{
- /* It would be nice to power cycle the Airport for a real hard
- * reset, but for some reason although it appears to
- * re-initialize properly, it falls in a screaming heap
- * shortly afterwards. */
-#if 0
- struct airport *card = priv->card;
-
- /* Vitally important. If we don't do this it seems we get an
- * interrupt somewhere during the power cycle, since
- * hw_unavailable is already set it doesn't get ACKed, we get
- * into an interrupt loop and the PMU decides to turn us
- * off. */
- disable_irq(card->irq);
-
- pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
- macio_get_of_node(card->mdev), 0, 0);
- ssleep(1);
- pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
- macio_get_of_node(card->mdev), 0, 1);
- ssleep(1);
-
- enable_irq(card->irq);
- ssleep(1);
-#endif
-
- return 0;
-}
-
-static int
-airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
-{
- struct orinoco_private *priv;
- struct airport *card;
- unsigned long phys_addr;
- struct hermes *hw;
-
- if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) {
- printk(KERN_ERR PFX "Wrong interrupt/addresses in OF tree\n");
- return -ENODEV;
- }
-
- /* Allocate space for private device-specific data */
- priv = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev,
- airport_hard_reset, NULL);
- if (!priv) {
- printk(KERN_ERR PFX "Cannot allocate network device\n");
- return -ENODEV;
- }
- card = priv->card;
-
- hw = &priv->hw;
- card->mdev = mdev;
-
- if (macio_request_resource(mdev, 0, DRIVER_NAME)) {
- printk(KERN_ERR PFX "can't request IO resource !\n");
- free_orinocodev(priv);
- return -EBUSY;
- }
-
- macio_set_drvdata(mdev, priv);
-
- /* Setup interrupts & base address */
- card->irq = macio_irq(mdev, 0);
- phys_addr = macio_resource_start(mdev, 0); /* Physical address */
- printk(KERN_DEBUG PFX "Physical address %lx\n", phys_addr);
- card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN);
- if (!card->vaddr) {
- printk(KERN_ERR PFX "ioremap() failed\n");
- goto failed;
- }
-
- hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING);
-
- /* Power up card */
- pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE,
- macio_get_of_node(mdev), 0, 1);
- ssleep(1);
-
- /* Reset it before we get the interrupt */
- hw->ops->init(hw);
-
- if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) {
- printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq);
- goto failed;
- }
- card->irq_requested = 1;
-
- /* Initialise the main driver */
- if (orinoco_init(priv) != 0) {
- printk(KERN_ERR PFX "orinoco_init() failed\n");
- goto failed;
- }
-
- /* Register an interface with the stack */
- if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) {
- printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto failed;
- }
- card->ndev_registered = 1;
- return 0;
- failed:
- airport_detach(mdev);
- return -ENODEV;
-} /* airport_attach */
-
-
-static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
- " (Benjamin Herrenschmidt <benh@kernel.crashing.org>)";
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("Driver for the Apple Airport wireless card.");
-MODULE_LICENSE("Dual MPL/GPL");
-
-static const struct of_device_id airport_match[] = {
- {
- .name = "radio",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, airport_match);
-
-static struct macio_driver airport_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- .of_match_table = airport_match,
- },
- .probe = airport_attach,
- .remove = airport_detach,
- .suspend = airport_suspend,
- .resume = airport_resume,
-};
-
-static int __init
-init_airport(void)
-{
- printk(KERN_DEBUG "%s\n", version);
-
- return macio_register_driver(&airport_driver);
-}
-
-static void __exit
-exit_airport(void)
-{
- macio_unregister_driver(&airport_driver);
-}
-
-module_init(init_airport);
-module_exit(exit_airport);
diff --git a/drivers/net/wireless/intersil/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c
deleted file mode 100644
index b2d5ec863..000000000
--- a/drivers/net/wireless/intersil/orinoco/cfg.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/* cfg80211 support
- *
- * See copyright notice in main.c
- */
-#include <linux/ieee80211.h>
-#include <net/cfg80211.h>
-#include "hw.h"
-#include "main.h"
-#include "orinoco.h"
-
-#include "cfg.h"
-
-/* Supported bitrates. Must agree with hw.c */
-static struct ieee80211_rate orinoco_rates[] = {
- { .bitrate = 10 },
- { .bitrate = 20 },
- { .bitrate = 55 },
- { .bitrate = 110 },
-};
-
-static const void * const orinoco_wiphy_privid = &orinoco_wiphy_privid;
-
-/* Called after orinoco_private is allocated. */
-void orinoco_wiphy_init(struct wiphy *wiphy)
-{
- struct orinoco_private *priv = wiphy_priv(wiphy);
-
- wiphy->privid = orinoco_wiphy_privid;
-
- set_wiphy_dev(wiphy, priv->dev);
-}
-
-/* Called after firmware is initialised */
-int orinoco_wiphy_register(struct wiphy *wiphy)
-{
- struct orinoco_private *priv = wiphy_priv(wiphy);
- int i, channels = 0;
-
- if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
- wiphy->max_scan_ssids = 1;
- else
- wiphy->max_scan_ssids = 0;
-
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-
- /* TODO: should we set if we only have demo ad-hoc?
- * (priv->has_port3)
- */
- if (priv->has_ibss)
- wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
-
- if (!priv->broken_monitor || force_monitor)
- wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
-
- priv->band.bitrates = orinoco_rates;
- priv->band.n_bitrates = ARRAY_SIZE(orinoco_rates);
-
- /* Only support channels allowed by the card EEPROM */
- for (i = 0; i < NUM_CHANNELS; i++) {
- if (priv->channel_mask & (1 << i)) {
- priv->channels[i].center_freq =
- ieee80211_channel_to_frequency(i + 1,
- NL80211_BAND_2GHZ);
- channels++;
- }
- }
- priv->band.channels = priv->channels;
- priv->band.n_channels = channels;
-
- wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
- wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- i = 0;
- if (priv->has_wep) {
- priv->cipher_suites[i] = WLAN_CIPHER_SUITE_WEP40;
- i++;
-
- if (priv->has_big_wep) {
- priv->cipher_suites[i] = WLAN_CIPHER_SUITE_WEP104;
- i++;
- }
- }
- if (priv->has_wpa) {
- priv->cipher_suites[i] = WLAN_CIPHER_SUITE_TKIP;
- i++;
- }
- wiphy->cipher_suites = priv->cipher_suites;
- wiphy->n_cipher_suites = i;
-
- wiphy->rts_threshold = priv->rts_thresh;
- if (!priv->has_mwo)
- wiphy->frag_threshold = priv->frag_thresh + 1;
- wiphy->retry_short = priv->short_retry_limit;
- wiphy->retry_long = priv->long_retry_limit;
-
- return wiphy_register(wiphy);
-}
-
-static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev,
- enum nl80211_iftype type,
- struct vif_params *params)
-{
- struct orinoco_private *priv = wiphy_priv(wiphy);
- int err = 0;
- unsigned long lock;
-
- if (orinoco_lock(priv, &lock) != 0)
- return -EBUSY;
-
- switch (type) {
- case NL80211_IFTYPE_ADHOC:
- if (!priv->has_ibss && !priv->has_port3)
- err = -EINVAL;
- break;
-
- case NL80211_IFTYPE_STATION:
- break;
-
- case NL80211_IFTYPE_MONITOR:
- if (priv->broken_monitor && !force_monitor) {
- wiphy_warn(wiphy,
- "Monitor mode support is buggy in this firmware, not enabling\n");
- err = -EINVAL;
- }
- break;
-
- default:
- err = -EINVAL;
- }
-
- if (!err) {
- priv->iw_mode = type;
- set_port_type(priv);
- err = orinoco_commit(priv);
- }
-
- orinoco_unlock(priv, &lock);
-
- return err;
-}
-
-static int orinoco_scan(struct wiphy *wiphy,
- struct cfg80211_scan_request *request)
-{
- struct orinoco_private *priv = wiphy_priv(wiphy);
- int err;
-
- if (!request)
- return -EINVAL;
-
- if (priv->scan_request && priv->scan_request != request)
- return -EBUSY;
-
- priv->scan_request = request;
-
- err = orinoco_hw_trigger_scan(priv, request->ssids);
- /* On error the we aren't processing the request */
- if (err)
- priv->scan_request = NULL;
-
- return err;
-}
-
-static int orinoco_set_monitor_channel(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef)
-{
- struct orinoco_private *priv = wiphy_priv(wiphy);
- int err = 0;
- unsigned long flags;
- int channel;
-
- if (!chandef->chan)
- return -EINVAL;
-
- if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
- return -EINVAL;
-
- if (chandef->chan->band != NL80211_BAND_2GHZ)
- return -EINVAL;
-
- channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
-
- if ((channel < 1) || (channel > NUM_CHANNELS) ||
- !(priv->channel_mask & (1 << (channel - 1))))
- return -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- priv->channel = channel;
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
- /* Fast channel change - no commit if successful */
- struct hermes *hw = &priv->hw;
- err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
- HERMES_TEST_SET_CHANNEL,
- channel, NULL);
- }
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
-{
- struct orinoco_private *priv = wiphy_priv(wiphy);
- int frag_value = -1;
- int rts_value = -1;
- int err = 0;
-
- if (changed & WIPHY_PARAM_RETRY_SHORT) {
- /* Setting short retry not supported */
- err = -EINVAL;
- }
-
- if (changed & WIPHY_PARAM_RETRY_LONG) {
- /* Setting long retry not supported */
- err = -EINVAL;
- }
-
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
- /* Set fragmentation */
- if (priv->has_mwo) {
- if (wiphy->frag_threshold == -1)
- frag_value = 0;
- else {
- printk(KERN_WARNING "%s: Fixed fragmentation "
- "is not supported on this firmware. "
- "Using MWO robust instead.\n",
- priv->ndev->name);
- frag_value = 1;
- }
- } else {
- if (wiphy->frag_threshold == -1)
- frag_value = 2346;
- else if ((wiphy->frag_threshold < 257) ||
- (wiphy->frag_threshold > 2347))
- err = -EINVAL;
- else
- /* cfg80211 value is 257-2347 (odd only)
- * orinoco rid has range 256-2346 (even only) */
- frag_value = wiphy->frag_threshold & ~0x1;
- }
- }
-
- if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- /* Set RTS.
- *
- * Prism documentation suggests default of 2432,
- * and a range of 0-3000.
- *
- * Current implementation uses 2347 as the default and
- * the upper limit.
- */
-
- if (wiphy->rts_threshold == -1)
- rts_value = 2347;
- else if (wiphy->rts_threshold > 2347)
- err = -EINVAL;
- else
- rts_value = wiphy->rts_threshold;
- }
-
- if (!err) {
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (frag_value >= 0) {
- if (priv->has_mwo)
- priv->mwo_robust = frag_value;
- else
- priv->frag_thresh = frag_value;
- }
- if (rts_value >= 0)
- priv->rts_thresh = rts_value;
-
- err = orinoco_commit(priv);
-
- orinoco_unlock(priv, &flags);
- }
-
- return err;
-}
-
-const struct cfg80211_ops orinoco_cfg_ops = {
- .change_virtual_intf = orinoco_change_vif,
- .set_monitor_channel = orinoco_set_monitor_channel,
- .scan = orinoco_scan,
- .set_wiphy_params = orinoco_set_wiphy_params,
-};
diff --git a/drivers/net/wireless/intersil/orinoco/cfg.h b/drivers/net/wireless/intersil/orinoco/cfg.h
deleted file mode 100644
index 3ddc96a06..000000000
--- a/drivers/net/wireless/intersil/orinoco/cfg.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* cfg80211 support.
- *
- * See copyright notice in main.c
- */
-#ifndef ORINOCO_CFG_H
-#define ORINOCO_CFG_H
-
-#include <net/cfg80211.h>
-
-extern const struct cfg80211_ops orinoco_cfg_ops;
-
-void orinoco_wiphy_init(struct wiphy *wiphy);
-int orinoco_wiphy_register(struct wiphy *wiphy);
-
-#endif /* ORINOCO_CFG_H */
diff --git a/drivers/net/wireless/intersil/orinoco/fw.c b/drivers/net/wireless/intersil/orinoco/fw.c
deleted file mode 100644
index 015af7828..000000000
--- a/drivers/net/wireless/intersil/orinoco/fw.c
+++ /dev/null
@@ -1,387 +0,0 @@
-/* Firmware file reading and download helpers
- *
- * See copyright notice in main.c
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/firmware.h>
-#include <linux/device.h>
-#include <linux/module.h>
-
-#include "hermes.h"
-#include "hermes_dld.h"
-#include "orinoco.h"
-
-#include "fw.h"
-
-/* End markers (for Symbol firmware only) */
-#define TEXT_END 0x1A /* End of text header */
-
-struct fw_info {
- char *pri_fw;
- char *sta_fw;
- char *ap_fw;
- u32 pda_addr;
- u16 pda_size;
-};
-
-static const struct fw_info orinoco_fw[] = {
- { NULL, "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
- { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
- { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 }
-};
-MODULE_FIRMWARE("agere_sta_fw.bin");
-MODULE_FIRMWARE("agere_ap_fw.bin");
-MODULE_FIRMWARE("prism_sta_fw.bin");
-MODULE_FIRMWARE("prism_ap_fw.bin");
-MODULE_FIRMWARE("symbol_sp24t_prim_fw");
-MODULE_FIRMWARE("symbol_sp24t_sec_fw");
-
-/* Structure used to access fields in FW
- * Make sure LE decoding macros are used
- */
-struct orinoco_fw_header {
- char hdr_vers[6]; /* ASCII string for header version */
- __le16 headersize; /* Total length of header */
- __le32 entry_point; /* NIC entry point */
- __le32 blocks; /* Number of blocks to program */
- __le32 block_offset; /* Offset of block data from eof header */
- __le32 pdr_offset; /* Offset to PDR data from eof header */
- __le32 pri_offset; /* Offset to primary plug data */
- __le32 compat_offset; /* Offset to compatibility data*/
- char signature[]; /* FW signature length headersize-20 */
-} __packed;
-
-/* Check the range of various header entries. Return a pointer to a
- * description of the problem, or NULL if everything checks out. */
-static const char *validate_fw(const struct orinoco_fw_header *hdr, size_t len)
-{
- u16 hdrsize;
-
- if (len < sizeof(*hdr))
- return "image too small";
- if (memcmp(hdr->hdr_vers, "HFW", 3) != 0)
- return "format not recognised";
-
- hdrsize = le16_to_cpu(hdr->headersize);
- if (hdrsize > len)
- return "bad headersize";
- if ((hdrsize + le32_to_cpu(hdr->block_offset)) > len)
- return "bad block offset";
- if ((hdrsize + le32_to_cpu(hdr->pdr_offset)) > len)
- return "bad PDR offset";
- if ((hdrsize + le32_to_cpu(hdr->pri_offset)) > len)
- return "bad PRI offset";
- if ((hdrsize + le32_to_cpu(hdr->compat_offset)) > len)
- return "bad compat offset";
-
- /* TODO: consider adding a checksum or CRC to the firmware format */
- return NULL;
-}
-
-#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
-static inline const struct firmware *
-orinoco_cached_fw_get(struct orinoco_private *priv, bool primary)
-{
- if (primary)
- return priv->cached_pri_fw;
- else
- return priv->cached_fw;
-}
-#else
-#define orinoco_cached_fw_get(priv, primary) (NULL)
-#endif
-
-/* Download either STA or AP firmware into the card. */
-static int
-orinoco_dl_firmware(struct orinoco_private *priv,
- const struct fw_info *fw,
- int ap)
-{
- /* Plug Data Area (PDA) */
- __le16 *pda;
-
- struct hermes *hw = &priv->hw;
- const struct firmware *fw_entry;
- const struct orinoco_fw_header *hdr;
- const unsigned char *first_block;
- const void *end;
- const char *firmware;
- const char *fw_err;
- struct device *dev = priv->dev;
- int err = 0;
-
- pda = kzalloc(fw->pda_size, GFP_KERNEL);
- if (!pda)
- return -ENOMEM;
-
- if (ap)
- firmware = fw->ap_fw;
- else
- firmware = fw->sta_fw;
-
- dev_dbg(dev, "Attempting to download firmware %s\n", firmware);
-
- /* Read current plug data */
- err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
- dev_dbg(dev, "Read PDA returned %d\n", err);
- if (err)
- goto free;
-
- if (!orinoco_cached_fw_get(priv, false)) {
- err = request_firmware(&fw_entry, firmware, priv->dev);
-
- if (err) {
- dev_err(dev, "Cannot find firmware %s\n", firmware);
- err = -ENOENT;
- goto free;
- }
- } else
- fw_entry = orinoco_cached_fw_get(priv, false);
-
- hdr = (const struct orinoco_fw_header *) fw_entry->data;
-
- fw_err = validate_fw(hdr, fw_entry->size);
- if (fw_err) {
- dev_warn(dev, "Invalid firmware image detected (%s). "
- "Aborting download\n", fw_err);
- err = -EINVAL;
- goto abort;
- }
-
- /* Enable aux port to allow programming */
- err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point));
- dev_dbg(dev, "Program init returned %d\n", err);
- if (err != 0)
- goto abort;
-
- /* Program data */
- first_block = (fw_entry->data +
- le16_to_cpu(hdr->headersize) +
- le32_to_cpu(hdr->block_offset));
- end = fw_entry->data + fw_entry->size;
-
- err = hermes_program(hw, first_block, end);
- dev_dbg(dev, "Program returned %d\n", err);
- if (err != 0)
- goto abort;
-
- /* Update production data */
- first_block = (fw_entry->data +
- le16_to_cpu(hdr->headersize) +
- le32_to_cpu(hdr->pdr_offset));
-
- err = hermes_apply_pda_with_defaults(hw, first_block, end, pda,
- &pda[fw->pda_size / sizeof(*pda)]);
- dev_dbg(dev, "Apply PDA returned %d\n", err);
- if (err)
- goto abort;
-
- /* Tell card we've finished */
- err = hw->ops->program_end(hw);
- dev_dbg(dev, "Program end returned %d\n", err);
- if (err != 0)
- goto abort;
-
- /* Check if we're running */
- dev_dbg(dev, "hermes_present returned %d\n", hermes_present(hw));
-
-abort:
- /* If we requested the firmware, release it. */
- if (!orinoco_cached_fw_get(priv, false))
- release_firmware(fw_entry);
-
-free:
- kfree(pda);
- return err;
-}
-
-/*
- * Process a firmware image - stop the card, load the firmware, reset
- * the card and make sure it responds. For the secondary firmware take
- * care of the PDA - read it and then write it on top of the firmware.
- */
-static int
-symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
- const unsigned char *image, const void *end,
- int secondary)
-{
- struct hermes *hw = &priv->hw;
- int ret = 0;
- const unsigned char *ptr;
- const unsigned char *first_block;
-
- /* Plug Data Area (PDA) */
- __le16 *pda = NULL;
-
- /* Binary block begins after the 0x1A marker */
- ptr = image;
- while (*ptr++ != TEXT_END);
- first_block = ptr;
-
- /* Read the PDA from EEPROM */
- if (secondary) {
- pda = kzalloc(fw->pda_size, GFP_KERNEL);
- if (!pda)
- return -ENOMEM;
-
- ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
- if (ret)
- goto free;
- }
-
- /* Stop the firmware, so that it can be safely rewritten */
- if (priv->stop_fw) {
- ret = priv->stop_fw(priv, 1);
- if (ret)
- goto free;
- }
-
- /* Program the adapter with new firmware */
- ret = hermes_program(hw, first_block, end);
- if (ret)
- goto free;
-
- /* Write the PDA to the adapter */
- if (secondary) {
- size_t len = hermes_blocks_length(first_block, end);
- ptr = first_block + len;
- ret = hermes_apply_pda(hw, ptr, end, pda,
- &pda[fw->pda_size / sizeof(*pda)]);
- kfree(pda);
- if (ret)
- return ret;
- }
-
- /* Run the firmware */
- if (priv->stop_fw) {
- ret = priv->stop_fw(priv, 0);
- if (ret)
- return ret;
- }
-
- /* Reset hermes chip and make sure it responds */
- ret = hw->ops->init(hw);
-
- /* hermes_reset() should return 0 with the secondary firmware */
- if (secondary && ret != 0)
- return -ENODEV;
-
- /* And this should work with any firmware */
- if (!hermes_present(hw))
- return -ENODEV;
-
- return 0;
-
-free:
- kfree(pda);
- return ret;
-}
-
-
-/*
- * Download the firmware into the card, this also does a PCMCIA soft
- * reset on the card, to make sure it's in a sane state.
- */
-static int
-symbol_dl_firmware(struct orinoco_private *priv,
- const struct fw_info *fw)
-{
- struct device *dev = priv->dev;
- int ret;
- const struct firmware *fw_entry;
-
- if (!orinoco_cached_fw_get(priv, true)) {
- if (request_firmware(&fw_entry, fw->pri_fw, priv->dev) != 0) {
- dev_err(dev, "Cannot find firmware: %s\n", fw->pri_fw);
- return -ENOENT;
- }
- } else
- fw_entry = orinoco_cached_fw_get(priv, true);
-
- /* Load primary firmware */
- ret = symbol_dl_image(priv, fw, fw_entry->data,
- fw_entry->data + fw_entry->size, 0);
-
- if (!orinoco_cached_fw_get(priv, true))
- release_firmware(fw_entry);
- if (ret) {
- dev_err(dev, "Primary firmware download failed\n");
- return ret;
- }
-
- if (!orinoco_cached_fw_get(priv, false)) {
- if (request_firmware(&fw_entry, fw->sta_fw, priv->dev) != 0) {
- dev_err(dev, "Cannot find firmware: %s\n", fw->sta_fw);
- return -ENOENT;
- }
- } else
- fw_entry = orinoco_cached_fw_get(priv, false);
-
- /* Load secondary firmware */
- ret = symbol_dl_image(priv, fw, fw_entry->data,
- fw_entry->data + fw_entry->size, 1);
- if (!orinoco_cached_fw_get(priv, false))
- release_firmware(fw_entry);
- if (ret)
- dev_err(dev, "Secondary firmware download failed\n");
-
- return ret;
-}
-
-int orinoco_download(struct orinoco_private *priv)
-{
- int err = 0;
- /* Reload firmware */
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE:
- /* case FIRMWARE_TYPE_INTERSIL: */
- err = orinoco_dl_firmware(priv,
- &orinoco_fw[priv->firmware_type], 0);
- break;
-
- case FIRMWARE_TYPE_SYMBOL:
- err = symbol_dl_firmware(priv,
- &orinoco_fw[priv->firmware_type]);
- break;
- case FIRMWARE_TYPE_INTERSIL:
- break;
- }
- /* TODO: if we fail we probably need to reinitialise
- * the driver */
-
- return err;
-}
-
-#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
-void orinoco_cache_fw(struct orinoco_private *priv, int ap)
-{
- const struct firmware *fw_entry = NULL;
- const char *pri_fw;
- const char *fw;
-
- pri_fw = orinoco_fw[priv->firmware_type].pri_fw;
- if (ap)
- fw = orinoco_fw[priv->firmware_type].ap_fw;
- else
- fw = orinoco_fw[priv->firmware_type].sta_fw;
-
- if (pri_fw) {
- if (request_firmware(&fw_entry, pri_fw, priv->dev) == 0)
- priv->cached_pri_fw = fw_entry;
- }
-
- if (fw) {
- if (request_firmware(&fw_entry, fw, priv->dev) == 0)
- priv->cached_fw = fw_entry;
- }
-}
-
-void orinoco_uncache_fw(struct orinoco_private *priv)
-{
- release_firmware(priv->cached_pri_fw);
- release_firmware(priv->cached_fw);
- priv->cached_pri_fw = NULL;
- priv->cached_fw = NULL;
-}
-#endif
diff --git a/drivers/net/wireless/intersil/orinoco/fw.h b/drivers/net/wireless/intersil/orinoco/fw.h
deleted file mode 100644
index aca63e3c4..000000000
--- a/drivers/net/wireless/intersil/orinoco/fw.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Firmware file reading and download helpers
- *
- * See copyright notice in main.c
- */
-#ifndef _ORINOCO_FW_H_
-#define _ORINOCO_FW_H_
-
-/* Forward declations */
-struct orinoco_private;
-
-int orinoco_download(struct orinoco_private *priv);
-
-#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
-void orinoco_cache_fw(struct orinoco_private *priv, int ap);
-void orinoco_uncache_fw(struct orinoco_private *priv);
-#else
-#define orinoco_cache_fw(priv, ap) do { } while (0)
-#define orinoco_uncache_fw(priv) do { } while (0)
-#endif
-
-#endif /* _ORINOCO_FW_H_ */
diff --git a/drivers/net/wireless/intersil/orinoco/hermes.c b/drivers/net/wireless/intersil/orinoco/hermes.c
deleted file mode 100644
index 488828672..000000000
--- a/drivers/net/wireless/intersil/orinoco/hermes.c
+++ /dev/null
@@ -1,778 +0,0 @@
-/* hermes.c
- *
- * Driver core for the "Hermes" wireless MAC controller, as used in
- * the Lucent Orinoco and Cabletron RoamAbout cards. It should also
- * work on the hfa3841 and hfa3842 MAC controller chips used in the
- * Prism II chipsets.
- *
- * This is not a complete driver, just low-level access routines for
- * the MAC controller itself.
- *
- * Based on the prism2 driver from Absolute Value Systems' linux-wlan
- * project, the Linux wvlan_cs driver, Lucent's HCF-Light
- * (wvlan_hcf.c) library, and the NetBSD wireless driver (in no
- * particular order).
- *
- * Copyright (C) 2000, David Gibson, Linuxcare Australia.
- * (C) Copyright David Gibson, IBM Corp. 2001-2003.
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL.
- */
-
-#include <linux/net.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-
-#include "hermes.h"
-
-/* These are maximum timeouts. Most often, card wil react much faster */
-#define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */
-#define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */
-#define CMD_COMPL_TIMEOUT (20000) /* in iterations of ~10us */
-#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
-
-/*
- * AUX port access. To unlock the AUX port write the access keys to the
- * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
- * register. Then read it and make sure it's HERMES_AUX_ENABLED.
- */
-#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
-#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
-#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
-#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
-
-#define HERMES_AUX_PW0 0xFE01
-#define HERMES_AUX_PW1 0xDC23
-#define HERMES_AUX_PW2 0xBA45
-
-/* HERMES_CMD_DOWNLD */
-#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
-#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
-
-/*
- * Debugging helpers
- */
-
-#define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %p: " , hw->iobase); \
- printk(stuff); } while (0)
-
-#undef HERMES_DEBUG
-#ifdef HERMES_DEBUG
-
-#define DEBUG(lvl, stuff...) if ((lvl) <= HERMES_DEBUG) DMSG(stuff)
-
-#else /* ! HERMES_DEBUG */
-
-#define DEBUG(lvl, stuff...) do { } while (0)
-
-#endif /* ! HERMES_DEBUG */
-
-static const struct hermes_ops hermes_ops_local;
-
-/*
- * Internal functions
- */
-
-/* Issue a command to the chip. Waiting for it to complete is the caller's
- problem.
-
- Returns -EBUSY if the command register is busy, 0 on success.
-
- Callable from any context.
-*/
-static int hermes_issue_cmd(struct hermes *hw, u16 cmd, u16 param0,
- u16 param1, u16 param2)
-{
- int k = CMD_BUSY_TIMEOUT;
- u16 reg;
-
- /* First wait for the command register to unbusy */
- reg = hermes_read_regn(hw, CMD);
- while ((reg & HERMES_CMD_BUSY) && k) {
- k--;
- udelay(1);
- reg = hermes_read_regn(hw, CMD);
- }
- if (reg & HERMES_CMD_BUSY)
- return -EBUSY;
-
- hermes_write_regn(hw, PARAM2, param2);
- hermes_write_regn(hw, PARAM1, param1);
- hermes_write_regn(hw, PARAM0, param0);
- hermes_write_regn(hw, CMD, cmd);
-
- return 0;
-}
-
-/*
- * Function definitions
- */
-
-/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
-static int hermes_doicmd_wait(struct hermes *hw, u16 cmd,
- u16 parm0, u16 parm1, u16 parm2,
- struct hermes_response *resp)
-{
- int err = 0;
- int k;
- u16 status, reg;
-
- err = hermes_issue_cmd(hw, cmd, parm0, parm1, parm2);
- if (err)
- return err;
-
- reg = hermes_read_regn(hw, EVSTAT);
- k = CMD_INIT_TIMEOUT;
- while ((!(reg & HERMES_EV_CMD)) && k) {
- k--;
- udelay(10);
- reg = hermes_read_regn(hw, EVSTAT);
- }
-
- hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
-
- if (!hermes_present(hw)) {
- DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
- hw->iobase);
- err = -ENODEV;
- goto out;
- }
-
- if (!(reg & HERMES_EV_CMD)) {
- printk(KERN_ERR "hermes @ %p: "
- "Timeout waiting for card to reset (reg=0x%04x)!\n",
- hw->iobase, reg);
- err = -ETIMEDOUT;
- goto out;
- }
-
- status = hermes_read_regn(hw, STATUS);
- if (resp) {
- resp->status = status;
- resp->resp0 = hermes_read_regn(hw, RESP0);
- resp->resp1 = hermes_read_regn(hw, RESP1);
- resp->resp2 = hermes_read_regn(hw, RESP2);
- }
-
- hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
-
- if (status & HERMES_STATUS_RESULT)
- err = -EIO;
-out:
- return err;
-}
-
-void hermes_struct_init(struct hermes *hw, void __iomem *address,
- int reg_spacing)
-{
- hw->iobase = address;
- hw->reg_spacing = reg_spacing;
- hw->inten = 0x0;
- hw->eeprom_pda = false;
- hw->ops = &hermes_ops_local;
-}
-EXPORT_SYMBOL(hermes_struct_init);
-
-static int hermes_init(struct hermes *hw)
-{
- u16 reg;
- int err = 0;
- int k;
-
- /* We don't want to be interrupted while resetting the chipset */
- hw->inten = 0x0;
- hermes_write_regn(hw, INTEN, 0);
- hermes_write_regn(hw, EVACK, 0xffff);
-
- /* Normally it's a "can't happen" for the command register to
- be busy when we go to issue a command because we are
- serializing all commands. However we want to have some
- chance of resetting the card even if it gets into a stupid
- state, so we actually wait to see if the command register
- will unbusy itself here. */
- k = CMD_BUSY_TIMEOUT;
- reg = hermes_read_regn(hw, CMD);
- while (k && (reg & HERMES_CMD_BUSY)) {
- if (reg == 0xffff) /* Special case - the card has probably been
- removed, so don't wait for the timeout */
- return -ENODEV;
-
- k--;
- udelay(1);
- reg = hermes_read_regn(hw, CMD);
- }
-
- /* No need to explicitly handle the timeout - if we've timed
- out hermes_issue_cmd() will probably return -EBUSY below */
-
- /* According to the documentation, EVSTAT may contain
- obsolete event occurrence information. We have to acknowledge
- it by writing EVACK. */
- reg = hermes_read_regn(hw, EVSTAT);
- hermes_write_regn(hw, EVACK, reg);
-
- /* We don't use hermes_docmd_wait here, because the reset wipes
- the magic constant in SWSUPPORT0 away, and it gets confused */
- err = hermes_doicmd_wait(hw, HERMES_CMD_INIT, 0, 0, 0, NULL);
-
- return err;
-}
-
-/* Issue a command to the chip, and (busy!) wait for it to
- * complete.
- *
- * Returns:
- * < 0 on internal error
- * 0 on success
- * > 0 on error returned by the firmware
- *
- * Callable from any context, but locking is your problem. */
-static int hermes_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp)
-{
- int err;
- int k;
- u16 reg;
- u16 status;
-
- err = hermes_issue_cmd(hw, cmd, parm0, 0, 0);
- if (err) {
- if (!hermes_present(hw)) {
- if (net_ratelimit())
- printk(KERN_WARNING "hermes @ %p: "
- "Card removed while issuing command "
- "0x%04x.\n", hw->iobase, cmd);
- err = -ENODEV;
- } else
- if (net_ratelimit())
- printk(KERN_ERR "hermes @ %p: "
- "Error %d issuing command 0x%04x.\n",
- hw->iobase, err, cmd);
- goto out;
- }
-
- reg = hermes_read_regn(hw, EVSTAT);
- k = CMD_COMPL_TIMEOUT;
- while ((!(reg & HERMES_EV_CMD)) && k) {
- k--;
- udelay(10);
- reg = hermes_read_regn(hw, EVSTAT);
- }
-
- if (!hermes_present(hw)) {
- printk(KERN_WARNING "hermes @ %p: Card removed "
- "while waiting for command 0x%04x completion.\n",
- hw->iobase, cmd);
- err = -ENODEV;
- goto out;
- }
-
- if (!(reg & HERMES_EV_CMD)) {
- printk(KERN_ERR "hermes @ %p: Timeout waiting for "
- "command 0x%04x completion.\n", hw->iobase, cmd);
- err = -ETIMEDOUT;
- goto out;
- }
-
- status = hermes_read_regn(hw, STATUS);
- if (resp) {
- resp->status = status;
- resp->resp0 = hermes_read_regn(hw, RESP0);
- resp->resp1 = hermes_read_regn(hw, RESP1);
- resp->resp2 = hermes_read_regn(hw, RESP2);
- }
-
- hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
-
- if (status & HERMES_STATUS_RESULT)
- err = -EIO;
-
- out:
- return err;
-}
-
-static int hermes_allocate(struct hermes *hw, u16 size, u16 *fid)
-{
- int err = 0;
- int k;
- u16 reg;
-
- if ((size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX))
- return -EINVAL;
-
- err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL);
- if (err)
- return err;
-
- reg = hermes_read_regn(hw, EVSTAT);
- k = ALLOC_COMPL_TIMEOUT;
- while ((!(reg & HERMES_EV_ALLOC)) && k) {
- k--;
- udelay(10);
- reg = hermes_read_regn(hw, EVSTAT);
- }
-
- if (!hermes_present(hw)) {
- printk(KERN_WARNING "hermes @ %p: "
- "Card removed waiting for frame allocation.\n",
- hw->iobase);
- return -ENODEV;
- }
-
- if (!(reg & HERMES_EV_ALLOC)) {
- printk(KERN_ERR "hermes @ %p: "
- "Timeout waiting for frame allocation\n",
- hw->iobase);
- return -ETIMEDOUT;
- }
-
- *fid = hermes_read_regn(hw, ALLOCFID);
- hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC);
-
- return 0;
-}
-
-/* Set up a BAP to read a particular chunk of data from card's internal buffer.
- *
- * Returns:
- * < 0 on internal failure (errno)
- * 0 on success
- * > 0 on error
- * from firmware
- *
- * Callable from any context */
-static int hermes_bap_seek(struct hermes *hw, int bap, u16 id, u16 offset)
-{
- int sreg = bap ? HERMES_SELECT1 : HERMES_SELECT0;
- int oreg = bap ? HERMES_OFFSET1 : HERMES_OFFSET0;
- int k;
- u16 reg;
-
- /* Paranoia.. */
- if ((offset > HERMES_BAP_OFFSET_MAX) || (offset % 2))
- return -EINVAL;
-
- k = HERMES_BAP_BUSY_TIMEOUT;
- reg = hermes_read_reg(hw, oreg);
- while ((reg & HERMES_OFFSET_BUSY) && k) {
- k--;
- udelay(1);
- reg = hermes_read_reg(hw, oreg);
- }
-
- if (reg & HERMES_OFFSET_BUSY)
- return -ETIMEDOUT;
-
- /* Now we actually set up the transfer */
- hermes_write_reg(hw, sreg, id);
- hermes_write_reg(hw, oreg, offset);
-
- /* Wait for the BAP to be ready */
- k = HERMES_BAP_BUSY_TIMEOUT;
- reg = hermes_read_reg(hw, oreg);
- while ((reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) {
- k--;
- udelay(1);
- reg = hermes_read_reg(hw, oreg);
- }
-
- if (reg != offset) {
- printk(KERN_ERR "hermes @ %p: BAP%d offset %s: "
- "reg=0x%x id=0x%x offset=0x%x\n", hw->iobase, bap,
- (reg & HERMES_OFFSET_BUSY) ? "timeout" : "error",
- reg, id, offset);
-
- if (reg & HERMES_OFFSET_BUSY)
- return -ETIMEDOUT;
-
- return -EIO; /* error or wrong offset */
- }
-
- return 0;
-}
-
-/* Read a block of data from the chip's buffer, via the
- * BAP. Synchronization/serialization is the caller's problem. len
- * must be even.
- *
- * Returns:
- * < 0 on internal failure (errno)
- * 0 on success
- * > 0 on error from firmware
- */
-static int hermes_bap_pread(struct hermes *hw, int bap, void *buf, int len,
- u16 id, u16 offset)
-{
- int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
- int err = 0;
-
- if ((len < 0) || (len % 2))
- return -EINVAL;
-
- err = hermes_bap_seek(hw, bap, id, offset);
- if (err)
- goto out;
-
- /* Actually do the transfer */
- hermes_read_words(hw, dreg, buf, len / 2);
-
- out:
- return err;
-}
-
-/* Write a block of data to the chip's buffer, via the
- * BAP. Synchronization/serialization is the caller's problem.
- *
- * Returns:
- * < 0 on internal failure (errno)
- * 0 on success
- * > 0 on error from firmware
- */
-static int hermes_bap_pwrite(struct hermes *hw, int bap, const void *buf,
- int len, u16 id, u16 offset)
-{
- int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
- int err = 0;
-
- if (len < 0)
- return -EINVAL;
-
- err = hermes_bap_seek(hw, bap, id, offset);
- if (err)
- goto out;
-
- /* Actually do the transfer */
- hermes_write_bytes(hw, dreg, buf, len);
-
- out:
- return err;
-}
-
-/* Read a Length-Type-Value record from the card.
- *
- * If length is NULL, we ignore the length read from the card, and
- * read the entire buffer regardless. This is useful because some of
- * the configuration records appear to have incorrect lengths in
- * practice.
- *
- * Callable from user or bh context. */
-static int hermes_read_ltv(struct hermes *hw, int bap, u16 rid,
- unsigned bufsize, u16 *length, void *buf)
-{
- int err = 0;
- int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
- u16 rlength, rtype;
- unsigned nwords;
-
- if (bufsize % 2)
- return -EINVAL;
-
- err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL);
- if (err)
- return err;
-
- err = hermes_bap_seek(hw, bap, rid, 0);
- if (err)
- return err;
-
- rlength = hermes_read_reg(hw, dreg);
-
- if (!rlength)
- return -ENODATA;
-
- rtype = hermes_read_reg(hw, dreg);
-
- if (length)
- *length = rlength;
-
- if (rtype != rid)
- printk(KERN_WARNING "hermes @ %p: %s(): "
- "rid (0x%04x) does not match type (0x%04x)\n",
- hw->iobase, __func__, rid, rtype);
- if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize)
- printk(KERN_WARNING "hermes @ %p: "
- "Truncating LTV record from %d to %d bytes. "
- "(rid=0x%04x, len=0x%04x)\n", hw->iobase,
- HERMES_RECLEN_TO_BYTES(rlength), bufsize, rid, rlength);
-
- nwords = min((unsigned)rlength - 1, bufsize / 2);
- hermes_read_words(hw, dreg, buf, nwords);
-
- return 0;
-}
-
-static int hermes_write_ltv(struct hermes *hw, int bap, u16 rid,
- u16 length, const void *value)
-{
- int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
- int err = 0;
- unsigned count;
-
- if (length == 0)
- return -EINVAL;
-
- err = hermes_bap_seek(hw, bap, rid, 0);
- if (err)
- return err;
-
- hermes_write_reg(hw, dreg, length);
- hermes_write_reg(hw, dreg, rid);
-
- count = length - 1;
-
- hermes_write_bytes(hw, dreg, value, count << 1);
-
- err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
- rid, NULL);
-
- return err;
-}
-
-/*** Hermes AUX control ***/
-
-static inline void
-hermes_aux_setaddr(struct hermes *hw, u32 addr)
-{
- hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
- hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
-}
-
-static inline int
-hermes_aux_control(struct hermes *hw, int enabled)
-{
- int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
- int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
- int i;
-
- /* Already open? */
- if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
- return 0;
-
- hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
- hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
- hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
- hermes_write_reg(hw, HERMES_CONTROL, action);
-
- for (i = 0; i < 20; i++) {
- udelay(10);
- if (hermes_read_reg(hw, HERMES_CONTROL) ==
- desired_state)
- return 0;
- }
-
- return -EBUSY;
-}
-
-/*** Hermes programming ***/
-
-/* About to start programming data (Hermes I)
- * offset is the entry point
- *
- * Spectrum_cs' Symbol fw does not require this
- * wl_lkm Agere fw does
- * Don't know about intersil
- */
-static int hermesi_program_init(struct hermes *hw, u32 offset)
-{
- int err;
-
- /* Disable interrupts?*/
- /*hw->inten = 0x0;*/
- /*hermes_write_regn(hw, INTEN, 0);*/
- /*hermes_set_irqmask(hw, 0);*/
-
- /* Acknowledge any outstanding command */
- hermes_write_regn(hw, EVACK, 0xFFFF);
-
- /* Using init_cmd_wait rather than cmd_wait */
- err = hw->ops->init_cmd_wait(hw,
- 0x0100 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
- if (err)
- return err;
-
- err = hw->ops->init_cmd_wait(hw,
- 0x0000 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
- if (err)
- return err;
-
- err = hermes_aux_control(hw, 1);
- pr_debug("AUX enable returned %d\n", err);
-
- if (err)
- return err;
-
- pr_debug("Enabling volatile, EP 0x%08x\n", offset);
- err = hw->ops->init_cmd_wait(hw,
- HERMES_PROGRAM_ENABLE_VOLATILE,
- offset & 0xFFFFu,
- offset >> 16,
- 0,
- NULL);
- pr_debug("PROGRAM_ENABLE returned %d\n", err);
-
- return err;
-}
-
-/* Done programming data (Hermes I)
- *
- * Spectrum_cs' Symbol fw does not require this
- * wl_lkm Agere fw does
- * Don't know about intersil
- */
-static int hermesi_program_end(struct hermes *hw)
-{
- struct hermes_response resp;
- int rc = 0;
- int err;
-
- rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
-
- pr_debug("PROGRAM_DISABLE returned %d, "
- "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
- rc, resp.resp0, resp.resp1, resp.resp2);
-
- if ((rc == 0) &&
- ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
- rc = -EIO;
-
- err = hermes_aux_control(hw, 0);
- pr_debug("AUX disable returned %d\n", err);
-
- /* Acknowledge any outstanding command */
- hermes_write_regn(hw, EVACK, 0xFFFF);
-
- /* Reinitialise, ignoring return */
- (void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
- 0, 0, 0, NULL);
-
- return rc ? rc : err;
-}
-
-static int hermes_program_bytes(struct hermes *hw, const char *data,
- u32 addr, u32 len)
-{
- /* wl lkm splits the programming into chunks of 2000 bytes.
- * This restriction appears to come from USB. The PCMCIA
- * adapters can program the whole lot in one go */
- hermes_aux_setaddr(hw, addr);
- hermes_write_bytes(hw, HERMES_AUXDATA, data, len);
- return 0;
-}
-
-/* Read PDA from the adapter */
-static int hermes_read_pda(struct hermes *hw, __le16 *pda, u32 pda_addr,
- u16 pda_len)
-{
- int ret;
- u16 pda_size;
- u16 data_len = pda_len;
- __le16 *data = pda;
-
- if (hw->eeprom_pda) {
- /* PDA of spectrum symbol is in eeprom */
-
- /* Issue command to read EEPROM */
- ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
- if (ret)
- return ret;
- } else {
- /* wl_lkm does not include PDA size in the PDA area.
- * We will pad the information into pda, so other routines
- * don't have to be modified */
- pda[0] = cpu_to_le16(pda_len - 2);
- /* Includes CFG_PROD_DATA but not itself */
- pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
- data_len = pda_len - 4;
- data = pda + 2;
- }
-
- /* Open auxiliary port */
- ret = hermes_aux_control(hw, 1);
- pr_debug("AUX enable returned %d\n", ret);
- if (ret)
- return ret;
-
- /* Read PDA */
- hermes_aux_setaddr(hw, pda_addr);
- hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
-
- /* Close aux port */
- ret = hermes_aux_control(hw, 0);
- pr_debug("AUX disable returned %d\n", ret);
-
- /* Check PDA length */
- pda_size = le16_to_cpu(pda[0]);
- pr_debug("Actual PDA length %d, Max allowed %d\n",
- pda_size, pda_len);
- if (pda_size > pda_len)
- return -EINVAL;
-
- return 0;
-}
-
-static void hermes_lock_irqsave(spinlock_t *lock,
- unsigned long *flags) __acquires(lock)
-{
- spin_lock_irqsave(lock, *flags);
-}
-
-static void hermes_unlock_irqrestore(spinlock_t *lock,
- unsigned long *flags) __releases(lock)
-{
- spin_unlock_irqrestore(lock, *flags);
-}
-
-static void hermes_lock_irq(spinlock_t *lock) __acquires(lock)
-{
- spin_lock_irq(lock);
-}
-
-static void hermes_unlock_irq(spinlock_t *lock) __releases(lock)
-{
- spin_unlock_irq(lock);
-}
-
-/* Hermes operations for local buses */
-static const struct hermes_ops hermes_ops_local = {
- .init = hermes_init,
- .cmd_wait = hermes_docmd_wait,
- .init_cmd_wait = hermes_doicmd_wait,
- .allocate = hermes_allocate,
- .read_ltv = hermes_read_ltv,
- .read_ltv_pr = hermes_read_ltv,
- .write_ltv = hermes_write_ltv,
- .bap_pread = hermes_bap_pread,
- .bap_pwrite = hermes_bap_pwrite,
- .read_pda = hermes_read_pda,
- .program_init = hermesi_program_init,
- .program_end = hermesi_program_end,
- .program = hermes_program_bytes,
- .lock_irqsave = hermes_lock_irqsave,
- .unlock_irqrestore = hermes_unlock_irqrestore,
- .lock_irq = hermes_lock_irq,
- .unlock_irq = hermes_unlock_irq,
-};
diff --git a/drivers/net/wireless/intersil/orinoco/hermes.h b/drivers/net/wireless/intersil/orinoco/hermes.h
deleted file mode 100644
index 3dc561a5c..000000000
--- a/drivers/net/wireless/intersil/orinoco/hermes.h
+++ /dev/null
@@ -1,534 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* hermes.h
- *
- * Driver core for the "Hermes" wireless MAC controller, as used in
- * the Lucent Orinoco and Cabletron RoamAbout cards. It should also
- * work on the hfa3841 and hfa3842 MAC controller chips used in the
- * Prism I & II chipsets.
- *
- * This is not a complete driver, just low-level access routines for
- * the MAC controller itself.
- *
- * Based on the prism2 driver from Absolute Value Systems' linux-wlan
- * project, the Linux wvlan_cs driver, Lucent's HCF-Light
- * (wvlan_hcf.c) library, and the NetBSD wireless driver.
- *
- * Copyright (C) 2000, David Gibson, Linuxcare Australia.
- * (C) Copyright David Gibson, IBM Corp. 2001-2003.
- *
- * Portions taken from hfa384x.h.
- * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
- */
-
-#ifndef _HERMES_H
-#define _HERMES_H
-
-/* Notes on locking:
- *
- * As a module of low level hardware access routines, there is no
- * locking. Users of this module should ensure that they serialize
- * access to the hermes structure, and to the hardware
-*/
-
-#include <linux/if_ether.h>
-#include <linux/io.h>
-
-/*
- * Limits and constants
- */
-#define HERMES_ALLOC_LEN_MIN (4)
-#define HERMES_ALLOC_LEN_MAX (2400)
-#define HERMES_LTV_LEN_MAX (34)
-#define HERMES_BAP_DATALEN_MAX (4096)
-#define HERMES_BAP_OFFSET_MAX (4096)
-#define HERMES_PORTID_MAX (7)
-#define HERMES_NUMPORTS_MAX (HERMES_PORTID_MAX + 1)
-#define HERMES_PDR_LEN_MAX (260) /* in bytes, from EK */
-#define HERMES_PDA_RECS_MAX (200) /* a guess */
-#define HERMES_PDA_LEN_MAX (1024) /* in bytes, from EK */
-#define HERMES_SCANRESULT_MAX (35)
-#define HERMES_CHINFORESULT_MAX (8)
-#define HERMES_MAX_MULTICAST (16)
-#define HERMES_MAGIC (0x7d1f)
-
-/*
- * Hermes register offsets
- */
-#define HERMES_CMD (0x00)
-#define HERMES_PARAM0 (0x02)
-#define HERMES_PARAM1 (0x04)
-#define HERMES_PARAM2 (0x06)
-#define HERMES_STATUS (0x08)
-#define HERMES_RESP0 (0x0A)
-#define HERMES_RESP1 (0x0C)
-#define HERMES_RESP2 (0x0E)
-#define HERMES_INFOFID (0x10)
-#define HERMES_RXFID (0x20)
-#define HERMES_ALLOCFID (0x22)
-#define HERMES_TXCOMPLFID (0x24)
-#define HERMES_SELECT0 (0x18)
-#define HERMES_OFFSET0 (0x1C)
-#define HERMES_DATA0 (0x36)
-#define HERMES_SELECT1 (0x1A)
-#define HERMES_OFFSET1 (0x1E)
-#define HERMES_DATA1 (0x38)
-#define HERMES_EVSTAT (0x30)
-#define HERMES_INTEN (0x32)
-#define HERMES_EVACK (0x34)
-#define HERMES_CONTROL (0x14)
-#define HERMES_SWSUPPORT0 (0x28)
-#define HERMES_SWSUPPORT1 (0x2A)
-#define HERMES_SWSUPPORT2 (0x2C)
-#define HERMES_AUXPAGE (0x3A)
-#define HERMES_AUXOFFSET (0x3C)
-#define HERMES_AUXDATA (0x3E)
-
-/*
- * CMD register bitmasks
- */
-#define HERMES_CMD_BUSY (0x8000)
-#define HERMES_CMD_AINFO (0x7f00)
-#define HERMES_CMD_MACPORT (0x0700)
-#define HERMES_CMD_RECL (0x0100)
-#define HERMES_CMD_WRITE (0x0100)
-#define HERMES_CMD_PROGMODE (0x0300)
-#define HERMES_CMD_CMDCODE (0x003f)
-
-/*
- * STATUS register bitmasks
- */
-#define HERMES_STATUS_RESULT (0x7f00)
-#define HERMES_STATUS_CMDCODE (0x003f)
-
-/*
- * OFFSET register bitmasks
- */
-#define HERMES_OFFSET_BUSY (0x8000)
-#define HERMES_OFFSET_ERR (0x4000)
-#define HERMES_OFFSET_DATAOFF (0x0ffe)
-
-/*
- * Event register bitmasks (INTEN, EVSTAT, EVACK)
- */
-#define HERMES_EV_TICK (0x8000)
-#define HERMES_EV_WTERR (0x4000)
-#define HERMES_EV_INFDROP (0x2000)
-#define HERMES_EV_INFO (0x0080)
-#define HERMES_EV_DTIM (0x0020)
-#define HERMES_EV_CMD (0x0010)
-#define HERMES_EV_ALLOC (0x0008)
-#define HERMES_EV_TXEXC (0x0004)
-#define HERMES_EV_TX (0x0002)
-#define HERMES_EV_RX (0x0001)
-
-/*
- * Command codes
- */
-/*--- Controller Commands ----------------------------*/
-#define HERMES_CMD_INIT (0x0000)
-#define HERMES_CMD_ENABLE (0x0001)
-#define HERMES_CMD_DISABLE (0x0002)
-#define HERMES_CMD_DIAG (0x0003)
-
-/*--- Buffer Mgmt Commands ---------------------------*/
-#define HERMES_CMD_ALLOC (0x000A)
-#define HERMES_CMD_TX (0x000B)
-
-/*--- Regulate Commands ------------------------------*/
-#define HERMES_CMD_NOTIFY (0x0010)
-#define HERMES_CMD_INQUIRE (0x0011)
-
-/*--- Configure Commands -----------------------------*/
-#define HERMES_CMD_ACCESS (0x0021)
-#define HERMES_CMD_DOWNLD (0x0022)
-
-/*--- Serial I/O Commands ----------------------------*/
-#define HERMES_CMD_READMIF (0x0030)
-#define HERMES_CMD_WRITEMIF (0x0031)
-
-/*--- Debugging Commands -----------------------------*/
-#define HERMES_CMD_TEST (0x0038)
-
-
-/* Test command arguments */
-#define HERMES_TEST_SET_CHANNEL 0x0800
-#define HERMES_TEST_MONITOR 0x0b00
-#define HERMES_TEST_STOP 0x0f00
-
-/* Authentication algorithms */
-#define HERMES_AUTH_OPEN 1
-#define HERMES_AUTH_SHARED_KEY 2
-
-/* WEP settings */
-#define HERMES_WEP_PRIVACY_INVOKED 0x0001
-#define HERMES_WEP_EXCL_UNENCRYPTED 0x0002
-#define HERMES_WEP_HOST_ENCRYPT 0x0010
-#define HERMES_WEP_HOST_DECRYPT 0x0080
-
-/* Symbol hostscan options */
-#define HERMES_HOSTSCAN_SYMBOL_5SEC 0x0001
-#define HERMES_HOSTSCAN_SYMBOL_ONCE 0x0002
-#define HERMES_HOSTSCAN_SYMBOL_PASSIVE 0x0040
-#define HERMES_HOSTSCAN_SYMBOL_BCAST 0x0080
-
-/*
- * Frame structures and constants
- */
-
-#define HERMES_DESCRIPTOR_OFFSET 0
-#define HERMES_802_11_OFFSET (14)
-#define HERMES_802_3_OFFSET (14 + 32)
-#define HERMES_802_2_OFFSET (14 + 32 + 14)
-#define HERMES_TXCNTL2_OFFSET (HERMES_802_3_OFFSET - 2)
-
-#define HERMES_RXSTAT_ERR (0x0003)
-#define HERMES_RXSTAT_BADCRC (0x0001)
-#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002)
-#define HERMES_RXSTAT_MIC (0x0010) /* Frame contains MIC */
-#define HERMES_RXSTAT_MACPORT (0x0700)
-#define HERMES_RXSTAT_PCF (0x1000) /* Frame was received in CF period */
-#define HERMES_RXSTAT_MIC_KEY_ID (0x1800) /* MIC key used */
-#define HERMES_RXSTAT_MSGTYPE (0xE000)
-#define HERMES_RXSTAT_1042 (0x2000) /* RFC-1042 frame */
-#define HERMES_RXSTAT_TUNNEL (0x4000) /* bridge-tunnel encoded frame */
-#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */
-
-/* Shift amount for key ID in RXSTAT and TXCTRL */
-#define HERMES_MIC_KEY_ID_SHIFT 11
-
-struct hermes_tx_descriptor {
- __le16 status;
- __le16 reserved1;
- __le16 reserved2;
- __le32 sw_support;
- u8 retry_count;
- u8 tx_rate;
- __le16 tx_control;
-} __packed;
-
-#define HERMES_TXSTAT_RETRYERR (0x0001)
-#define HERMES_TXSTAT_AGEDERR (0x0002)
-#define HERMES_TXSTAT_DISCON (0x0004)
-#define HERMES_TXSTAT_FORMERR (0x0008)
-
-#define HERMES_TXCTRL_TX_OK (0x0002) /* ?? interrupt on Tx complete */
-#define HERMES_TXCTRL_TX_EX (0x0004) /* ?? interrupt on Tx exception */
-#define HERMES_TXCTRL_802_11 (0x0008) /* We supply 802.11 header */
-#define HERMES_TXCTRL_MIC (0x0010) /* 802.3 + TKIP */
-#define HERMES_TXCTRL_MIC_KEY_ID (0x1800) /* MIC Key ID mask */
-#define HERMES_TXCTRL_ALT_RTRY (0x0020)
-
-/* Inquiry constants and data types */
-
-#define HERMES_INQ_TALLIES (0xF100)
-#define HERMES_INQ_SCAN (0xF101)
-#define HERMES_INQ_CHANNELINFO (0xF102)
-#define HERMES_INQ_HOSTSCAN (0xF103)
-#define HERMES_INQ_HOSTSCAN_SYMBOL (0xF104)
-#define HERMES_INQ_LINKSTATUS (0xF200)
-#define HERMES_INQ_SEC_STAT_AGERE (0xF202)
-
-struct hermes_tallies_frame {
- __le16 TxUnicastFrames;
- __le16 TxMulticastFrames;
- __le16 TxFragments;
- __le16 TxUnicastOctets;
- __le16 TxMulticastOctets;
- __le16 TxDeferredTransmissions;
- __le16 TxSingleRetryFrames;
- __le16 TxMultipleRetryFrames;
- __le16 TxRetryLimitExceeded;
- __le16 TxDiscards;
- __le16 RxUnicastFrames;
- __le16 RxMulticastFrames;
- __le16 RxFragments;
- __le16 RxUnicastOctets;
- __le16 RxMulticastOctets;
- __le16 RxFCSErrors;
- __le16 RxDiscards_NoBuffer;
- __le16 TxDiscardsWrongSA;
- __le16 RxWEPUndecryptable;
- __le16 RxMsgInMsgFragments;
- __le16 RxMsgInBadMsgFragments;
- /* Those last are probably not available in very old firmwares */
- __le16 RxDiscards_WEPICVError;
- __le16 RxDiscards_WEPExcluded;
-} __packed;
-
-/* Grabbed from wlan-ng - Thanks Mark... - Jean II
- * This is the result of a scan inquiry command */
-/* Structure describing info about an Access Point */
-struct prism2_scan_apinfo {
- __le16 channel; /* Channel where the AP sits */
- __le16 noise; /* Noise level */
- __le16 level; /* Signal level */
- u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
- __le16 beacon_interv; /* Beacon interval */
- __le16 capabilities; /* Capabilities */
- __le16 essid_len; /* ESSID length */
- u8 essid[32]; /* ESSID of the network */
- u8 rates[10]; /* Bit rate supported */
- __le16 proberesp_rate; /* Data rate of the response frame */
- __le16 atim; /* ATIM window time, Kus (hostscan only) */
-} __packed;
-
-/* Same stuff for the Lucent/Agere card.
- * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
-struct agere_scan_apinfo {
- __le16 channel; /* Channel where the AP sits */
- __le16 noise; /* Noise level */
- __le16 level; /* Signal level */
- u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
- __le16 beacon_interv; /* Beacon interval */
- __le16 capabilities; /* Capabilities */
- /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
- __le16 essid_len; /* ESSID length */
- u8 essid[32]; /* ESSID of the network */
-} __packed;
-
-/* Moustafa: Scan structure for Symbol cards */
-struct symbol_scan_apinfo {
- u8 channel; /* Channel where the AP sits */
- u8 unknown1; /* 8 in 2.9x and 3.9x f/w, 0 otherwise */
- __le16 noise; /* Noise level */
- __le16 level; /* Signal level */
- u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
- __le16 beacon_interv; /* Beacon interval */
- __le16 capabilities; /* Capabilities */
- /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
- __le16 essid_len; /* ESSID length */
- u8 essid[32]; /* ESSID of the network */
- __le16 rates[5]; /* Bit rate supported */
- __le16 basic_rates; /* Basic rates bitmask */
- u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
- u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
-} __packed;
-
-union hermes_scan_info {
- struct agere_scan_apinfo a;
- struct prism2_scan_apinfo p;
- struct symbol_scan_apinfo s;
-};
-
-/* Extended scan struct for HERMES_INQ_CHANNELINFO.
- * wl_lkm calls this an ACS scan (Automatic Channel Select).
- * Keep out of union hermes_scan_info because it is much bigger than
- * the older scan structures. */
-struct agere_ext_scan_info {
- __le16 reserved0;
-
- u8 noise;
- u8 level;
- u8 rx_flow;
- u8 rate;
- __le16 reserved1[2];
-
- __le16 frame_control;
- __le16 dur_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 bssid[ETH_ALEN];
- __le16 sequence;
- u8 addr4[ETH_ALEN];
-
- __le16 data_length;
-
- /* Next 3 fields do not get filled in. */
- u8 daddr[ETH_ALEN];
- u8 saddr[ETH_ALEN];
- __le16 len_type;
-
- __le64 timestamp;
- __le16 beacon_interval;
- __le16 capabilities;
- u8 data[];
-} __packed;
-
-#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
-#define HERMES_LINKSTATUS_CONNECTED (0x0001)
-#define HERMES_LINKSTATUS_DISCONNECTED (0x0002)
-#define HERMES_LINKSTATUS_AP_CHANGE (0x0003)
-#define HERMES_LINKSTATUS_AP_OUT_OF_RANGE (0x0004)
-#define HERMES_LINKSTATUS_AP_IN_RANGE (0x0005)
-#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006)
-
-struct hermes_linkstatus {
- __le16 linkstatus; /* Link status */
-} __packed;
-
-struct hermes_response {
- u16 status, resp0, resp1, resp2;
-};
-
-/* "ID" structure - used for ESSID and station nickname */
-struct hermes_idstring {
- __le16 len;
- __le16 val[16];
-} __packed;
-
-struct hermes_multicast {
- u8 addr[HERMES_MAX_MULTICAST][ETH_ALEN];
-} __packed;
-
-/* Timeouts */
-#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
-
-struct hermes;
-
-/* Functions to access hardware */
-struct hermes_ops {
- int (*init)(struct hermes *hw);
- int (*cmd_wait)(struct hermes *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp);
- int (*init_cmd_wait)(struct hermes *hw, u16 cmd,
- u16 parm0, u16 parm1, u16 parm2,
- struct hermes_response *resp);
- int (*allocate)(struct hermes *hw, u16 size, u16 *fid);
- int (*read_ltv)(struct hermes *hw, int bap, u16 rid, unsigned buflen,
- u16 *length, void *buf);
- int (*read_ltv_pr)(struct hermes *hw, int bap, u16 rid,
- unsigned buflen, u16 *length, void *buf);
- int (*write_ltv)(struct hermes *hw, int bap, u16 rid,
- u16 length, const void *value);
- int (*bap_pread)(struct hermes *hw, int bap, void *buf, int len,
- u16 id, u16 offset);
- int (*bap_pwrite)(struct hermes *hw, int bap, const void *buf,
- int len, u16 id, u16 offset);
- int (*read_pda)(struct hermes *hw, __le16 *pda,
- u32 pda_addr, u16 pda_len);
- int (*program_init)(struct hermes *hw, u32 entry_point);
- int (*program_end)(struct hermes *hw);
- int (*program)(struct hermes *hw, const char *buf,
- u32 addr, u32 len);
- void (*lock_irqsave)(spinlock_t *lock, unsigned long *flags);
- void (*unlock_irqrestore)(spinlock_t *lock, unsigned long *flags);
- void (*lock_irq)(spinlock_t *lock);
- void (*unlock_irq)(spinlock_t *lock);
-};
-
-/* Basic control structure */
-struct hermes {
- void __iomem *iobase;
- int reg_spacing;
-#define HERMES_16BIT_REGSPACING 0
-#define HERMES_32BIT_REGSPACING 1
- u16 inten; /* Which interrupts should be enabled? */
- bool eeprom_pda;
- const struct hermes_ops *ops;
- void *priv;
-};
-
-/* Register access convenience macros */
-#define hermes_read_reg(hw, off) \
- (ioread16((hw)->iobase + ((off) << (hw)->reg_spacing)))
-#define hermes_write_reg(hw, off, val) \
- (iowrite16((val), (hw)->iobase + ((off) << (hw)->reg_spacing)))
-#define hermes_read_regn(hw, name) hermes_read_reg((hw), HERMES_##name)
-#define hermes_write_regn(hw, name, val) \
- hermes_write_reg((hw), HERMES_##name, (val))
-
-/* Function prototypes */
-void hermes_struct_init(struct hermes *hw, void __iomem *address,
- int reg_spacing);
-
-/* Inline functions */
-
-static inline int hermes_present(struct hermes *hw)
-{
- return hermes_read_regn(hw, SWSUPPORT0) == HERMES_MAGIC;
-}
-
-static inline void hermes_set_irqmask(struct hermes *hw, u16 events)
-{
- hw->inten = events;
- hermes_write_regn(hw, INTEN, events);
-}
-
-static inline int hermes_enable_port(struct hermes *hw, int port)
-{
- return hw->ops->cmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
- 0, NULL);
-}
-
-static inline int hermes_disable_port(struct hermes *hw, int port)
-{
- return hw->ops->cmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
- 0, NULL);
-}
-
-/* Initiate an INQUIRE command (tallies or scan). The result will come as an
- * information frame in __orinoco_ev_info() */
-static inline int hermes_inquire(struct hermes *hw, u16 rid)
-{
- return hw->ops->cmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
-}
-
-#define HERMES_BYTES_TO_RECLEN(n) ((((n) + 1) / 2) + 1)
-#define HERMES_RECLEN_TO_BYTES(n) (((n) - 1) * 2)
-
-/* Note that for the next two, the count is in 16-bit words, not bytes */
-static inline void hermes_read_words(struct hermes *hw, int off,
- void *buf, unsigned count)
-{
- off = off << hw->reg_spacing;
- ioread16_rep(hw->iobase + off, buf, count);
-}
-
-static inline void hermes_write_bytes(struct hermes *hw, int off,
- const char *buf, unsigned count)
-{
- off = off << hw->reg_spacing;
- iowrite16_rep(hw->iobase + off, buf, count >> 1);
- if (unlikely(count & 1))
- iowrite8(buf[count - 1], hw->iobase + off);
-}
-
-static inline void hermes_clear_words(struct hermes *hw, int off,
- unsigned count)
-{
- unsigned i;
-
- off = off << hw->reg_spacing;
-
- for (i = 0; i < count; i++)
- iowrite16(0, hw->iobase + off);
-}
-
-#define HERMES_READ_RECORD(hw, bap, rid, buf) \
- (hw->ops->read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
-#define HERMES_READ_RECORD_PR(hw, bap, rid, buf) \
- (hw->ops->read_ltv_pr((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
-#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
- (hw->ops->write_ltv((hw), (bap), (rid), \
- HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
-
-static inline int hermes_read_wordrec(struct hermes *hw, int bap, u16 rid,
- u16 *word)
-{
- __le16 rec;
- int err;
-
- err = HERMES_READ_RECORD(hw, bap, rid, &rec);
- *word = le16_to_cpu(rec);
- return err;
-}
-
-static inline int hermes_read_wordrec_pr(struct hermes *hw, int bap, u16 rid,
- u16 *word)
-{
- __le16 rec;
- int err;
-
- err = HERMES_READ_RECORD_PR(hw, bap, rid, &rec);
- *word = le16_to_cpu(rec);
- return err;
-}
-
-static inline int hermes_write_wordrec(struct hermes *hw, int bap, u16 rid,
- u16 word)
-{
- __le16 rec = cpu_to_le16(word);
- return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
-}
-
-#endif /* _HERMES_H */
diff --git a/drivers/net/wireless/intersil/orinoco/hermes_dld.c b/drivers/net/wireless/intersil/orinoco/hermes_dld.c
deleted file mode 100644
index dbeadfcfe..000000000
--- a/drivers/net/wireless/intersil/orinoco/hermes_dld.c
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Hermes download helper.
- *
- * This helper:
- * - is capable of writing to the volatile area of the hermes device
- * - is currently not capable of writing to non-volatile areas
- * - provide helpers to identify and update plugin data
- * - is not capable of interpreting a fw image directly. That is up to
- * the main card driver.
- * - deals with Hermes I devices. It can probably be modified to deal
- * with Hermes II devices
- *
- * Copyright (C) 2007, David Kilroy
- *
- * Plug data code slightly modified from spectrum_cs driver
- * Copyright (C) 2002-2005 Pavel Roskin <proski@gnu.org>
- * Portions based on information in wl_lkm_718 Agere driver
- * COPYRIGHT (C) 2001-2004 by Agere Systems Inc. All Rights Reserved
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include "hermes.h"
-#include "hermes_dld.h"
-
-#define PFX "hermes_dld: "
-
-/* End markers used in dblocks */
-#define PDI_END 0x00000000 /* End of PDA */
-#define BLOCK_END 0xFFFFFFFF /* Last image block */
-#define TEXT_END 0x1A /* End of text header */
-
-/*
- * The following structures have little-endian fields denoted by
- * the leading underscore. Don't access them directly - use inline
- * functions defined below.
- */
-
-/*
- * The binary image to be downloaded consists of series of data blocks.
- * Each block has the following structure.
- */
-struct dblock {
- __le32 addr; /* adapter address where to write the block */
- __le16 len; /* length of the data only, in bytes */
- char data[]; /* data to be written */
-} __packed;
-
-/*
- * Plug Data References are located in the image after the last data
- * block. They refer to areas in the adapter memory where the plug data
- * items with matching ID should be written.
- */
-struct pdr {
- __le32 id; /* record ID */
- __le32 addr; /* adapter address where to write the data */
- __le32 len; /* expected length of the data, in bytes */
- char next[]; /* next PDR starts here */
-} __packed;
-
-/*
- * Plug Data Items are located in the EEPROM read from the adapter by
- * primary firmware. They refer to the device-specific data that should
- * be plugged into the secondary firmware.
- */
-struct pdi {
- __le16 len; /* length of ID and data, in words */
- __le16 id; /* record ID */
- char data[]; /* plug data */
-} __packed;
-
-/*** FW data block access functions ***/
-
-static inline u32
-dblock_addr(const struct dblock *blk)
-{
- return le32_to_cpu(blk->addr);
-}
-
-static inline u32
-dblock_len(const struct dblock *blk)
-{
- return le16_to_cpu(blk->len);
-}
-
-/*** PDR Access functions ***/
-
-static inline u32
-pdr_id(const struct pdr *pdr)
-{
- return le32_to_cpu(pdr->id);
-}
-
-static inline u32
-pdr_addr(const struct pdr *pdr)
-{
- return le32_to_cpu(pdr->addr);
-}
-
-static inline u32
-pdr_len(const struct pdr *pdr)
-{
- return le32_to_cpu(pdr->len);
-}
-
-/*** PDI Access functions ***/
-
-static inline u32
-pdi_id(const struct pdi *pdi)
-{
- return le16_to_cpu(pdi->id);
-}
-
-/* Return length of the data only, in bytes */
-static inline u32
-pdi_len(const struct pdi *pdi)
-{
- return 2 * (le16_to_cpu(pdi->len) - 1);
-}
-
-/*** Plug Data Functions ***/
-
-/*
- * Scan PDR for the record with the specified RECORD_ID.
- * If it's not found, return NULL.
- */
-static const struct pdr *
-hermes_find_pdr(const struct pdr *first_pdr, u32 record_id, const void *end)
-{
- const struct pdr *pdr = first_pdr;
-
- end -= sizeof(struct pdr);
-
- while (((void *) pdr <= end) &&
- (pdr_id(pdr) != PDI_END)) {
- /*
- * PDR area is currently not terminated by PDI_END.
- * It's followed by CRC records, which have the type
- * field where PDR has length. The type can be 0 or 1.
- */
- if (pdr_len(pdr) < 2)
- return NULL;
-
- /* If the record ID matches, we are done */
- if (pdr_id(pdr) == record_id)
- return pdr;
-
- pdr = (struct pdr *) pdr->next;
- }
- return NULL;
-}
-
-/* Scan production data items for a particular entry */
-static const struct pdi *
-hermes_find_pdi(const struct pdi *first_pdi, u32 record_id, const void *end)
-{
- const struct pdi *pdi = first_pdi;
-
- end -= sizeof(struct pdi);
-
- while (((void *) pdi <= end) &&
- (pdi_id(pdi) != PDI_END)) {
-
- /* If the record ID matches, we are done */
- if (pdi_id(pdi) == record_id)
- return pdi;
-
- pdi = (struct pdi *) &pdi->data[pdi_len(pdi)];
- }
- return NULL;
-}
-
-/* Process one Plug Data Item - find corresponding PDR and plug it */
-static int
-hermes_plug_pdi(struct hermes *hw, const struct pdr *first_pdr,
- const struct pdi *pdi, const void *pdr_end)
-{
- const struct pdr *pdr;
-
- /* Find the PDR corresponding to this PDI */
- pdr = hermes_find_pdr(first_pdr, pdi_id(pdi), pdr_end);
-
- /* No match is found, safe to ignore */
- if (!pdr)
- return 0;
-
- /* Lengths of the data in PDI and PDR must match */
- if (pdi_len(pdi) != pdr_len(pdr))
- return -EINVAL;
-
- /* do the actual plugging */
- hw->ops->program(hw, pdi->data, pdr_addr(pdr), pdi_len(pdi));
-
- return 0;
-}
-
-/* Parse PDA and write the records into the adapter
- *
- * Attempt to write every records that is in the specified pda
- * which also has a valid production data record for the firmware.
- */
-int hermes_apply_pda(struct hermes *hw,
- const char *first_pdr,
- const void *pdr_end,
- const __le16 *pda,
- const void *pda_end)
-{
- int ret;
- const struct pdi *pdi;
- const struct pdr *pdr;
-
- pdr = (const struct pdr *) first_pdr;
- pda_end -= sizeof(struct pdi);
-
- /* Go through every PDI and plug them into the adapter */
- pdi = (const struct pdi *) (pda + 2);
- while (((void *) pdi <= pda_end) &&
- (pdi_id(pdi) != PDI_END)) {
- ret = hermes_plug_pdi(hw, pdr, pdi, pdr_end);
- if (ret)
- return ret;
-
- /* Increment to the next PDI */
- pdi = (const struct pdi *) &pdi->data[pdi_len(pdi)];
- }
- return 0;
-}
-
-/* Identify the total number of bytes in all blocks
- * including the header data.
- */
-size_t
-hermes_blocks_length(const char *first_block, const void *end)
-{
- const struct dblock *blk = (const struct dblock *) first_block;
- int total_len = 0;
- int len;
-
- end -= sizeof(*blk);
-
- /* Skip all blocks to locate Plug Data References
- * (Spectrum CS) */
- while (((void *) blk <= end) &&
- (dblock_addr(blk) != BLOCK_END)) {
- len = dblock_len(blk);
- total_len += sizeof(*blk) + len;
- blk = (struct dblock *) &blk->data[len];
- }
-
- return total_len;
-}
-
-/*** Hermes programming ***/
-
-/* Program the data blocks */
-int hermes_program(struct hermes *hw, const char *first_block, const void *end)
-{
- const struct dblock *blk;
- u32 blkaddr;
- u32 blklen;
- int err = 0;
-
- blk = (const struct dblock *) first_block;
-
- if ((void *) blk > (end - sizeof(*blk)))
- return -EIO;
-
- blkaddr = dblock_addr(blk);
- blklen = dblock_len(blk);
-
- while ((blkaddr != BLOCK_END) &&
- (((void *) blk + blklen) <= end)) {
- pr_debug(PFX "Programming block of length %d "
- "to address 0x%08x\n", blklen, blkaddr);
-
- err = hw->ops->program(hw, blk->data, blkaddr, blklen);
- if (err)
- break;
-
- blk = (const struct dblock *) &blk->data[blklen];
-
- if ((void *) blk > (end - sizeof(*blk)))
- return -EIO;
-
- blkaddr = dblock_addr(blk);
- blklen = dblock_len(blk);
- }
- return err;
-}
-
-/*** Default plugging data for Hermes I ***/
-/* Values from wl_lkm_718/hcf/dhf.c */
-
-#define DEFINE_DEFAULT_PDR(pid, length, data) \
-static const struct { \
- __le16 len; \
- __le16 id; \
- u8 val[length]; \
-} __packed default_pdr_data_##pid = { \
- cpu_to_le16((sizeof(default_pdr_data_##pid)/ \
- sizeof(__le16)) - 1), \
- cpu_to_le16(pid), \
- data \
-}
-
-#define DEFAULT_PDR(pid) default_pdr_data_##pid
-
-/* HWIF Compatibility */
-DEFINE_DEFAULT_PDR(0x0005, 10, "\x00\x00\x06\x00\x01\x00\x01\x00\x01\x00");
-
-/* PPPPSign */
-DEFINE_DEFAULT_PDR(0x0108, 4, "\x00\x00\x00\x00");
-
-/* PPPPProf */
-DEFINE_DEFAULT_PDR(0x0109, 10, "\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00");
-
-/* Antenna diversity */
-DEFINE_DEFAULT_PDR(0x0150, 2, "\x00\x3F");
-
-/* Modem VCO band Set-up */
-DEFINE_DEFAULT_PDR(0x0160, 28,
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00");
-
-/* Modem Rx Gain Table Values */
-DEFINE_DEFAULT_PDR(0x0161, 256,
- "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
- "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
- "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
- "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
- "\x3F\x01\x3E\01\x3E\x01\x3D\x01"
- "\x3D\x01\x3C\01\x3C\x01\x3B\x01"
- "\x3B\x01\x3A\01\x3A\x01\x39\x01"
- "\x39\x01\x38\01\x38\x01\x37\x01"
- "\x37\x01\x36\01\x36\x01\x35\x01"
- "\x35\x01\x34\01\x34\x01\x33\x01"
- "\x33\x01\x32\x01\x32\x01\x31\x01"
- "\x31\x01\x30\x01\x30\x01\x7B\x01"
- "\x7B\x01\x7A\x01\x7A\x01\x79\x01"
- "\x79\x01\x78\x01\x78\x01\x77\x01"
- "\x77\x01\x76\x01\x76\x01\x75\x01"
- "\x75\x01\x74\x01\x74\x01\x73\x01"
- "\x73\x01\x72\x01\x72\x01\x71\x01"
- "\x71\x01\x70\x01\x70\x01\x68\x01"
- "\x68\x01\x67\x01\x67\x01\x66\x01"
- "\x66\x01\x65\x01\x65\x01\x57\x01"
- "\x57\x01\x56\x01\x56\x01\x55\x01"
- "\x55\x01\x54\x01\x54\x01\x53\x01"
- "\x53\x01\x52\x01\x52\x01\x51\x01"
- "\x51\x01\x50\x01\x50\x01\x48\x01"
- "\x48\x01\x47\x01\x47\x01\x46\x01"
- "\x46\x01\x45\x01\x45\x01\x44\x01"
- "\x44\x01\x43\x01\x43\x01\x42\x01"
- "\x42\x01\x41\x01\x41\x01\x40\x01"
- "\x40\x01\x40\x01\x40\x01\x40\x01"
- "\x40\x01\x40\x01\x40\x01\x40\x01"
- "\x40\x01\x40\x01\x40\x01\x40\x01"
- "\x40\x01\x40\x01\x40\x01\x40\x01");
-
-/* Write PDA according to certain rules.
- *
- * For every production data record, look for a previous setting in
- * the pda, and use that.
- *
- * For certain records, use defaults if they are not found in pda.
- */
-int hermes_apply_pda_with_defaults(struct hermes *hw,
- const char *first_pdr,
- const void *pdr_end,
- const __le16 *pda,
- const void *pda_end)
-{
- const struct pdr *pdr = (const struct pdr *) first_pdr;
- const struct pdi *first_pdi = (const struct pdi *) &pda[2];
- const struct pdi *pdi;
- const struct pdi *default_pdi = NULL;
- const struct pdi *outdoor_pdi;
- int record_id;
-
- pdr_end -= sizeof(struct pdr);
-
- while (((void *) pdr <= pdr_end) &&
- (pdr_id(pdr) != PDI_END)) {
- /*
- * For spectrum_cs firmwares,
- * PDR area is currently not terminated by PDI_END.
- * It's followed by CRC records, which have the type
- * field where PDR has length. The type can be 0 or 1.
- */
- if (pdr_len(pdr) < 2)
- break;
- record_id = pdr_id(pdr);
-
- pdi = hermes_find_pdi(first_pdi, record_id, pda_end);
- if (pdi)
- pr_debug(PFX "Found record 0x%04x at %p\n",
- record_id, pdi);
-
- switch (record_id) {
- case 0x110: /* Modem REFDAC values */
- case 0x120: /* Modem VGDAC values */
- outdoor_pdi = hermes_find_pdi(first_pdi, record_id + 1,
- pda_end);
- default_pdi = NULL;
- if (outdoor_pdi) {
- pdi = outdoor_pdi;
- pr_debug(PFX
- "Using outdoor record 0x%04x at %p\n",
- record_id + 1, pdi);
- }
- break;
- case 0x5: /* HWIF Compatibility */
- default_pdi = (struct pdi *) &DEFAULT_PDR(0x0005);
- break;
- case 0x108: /* PPPPSign */
- default_pdi = (struct pdi *) &DEFAULT_PDR(0x0108);
- break;
- case 0x109: /* PPPPProf */
- default_pdi = (struct pdi *) &DEFAULT_PDR(0x0109);
- break;
- case 0x150: /* Antenna diversity */
- default_pdi = (struct pdi *) &DEFAULT_PDR(0x0150);
- break;
- case 0x160: /* Modem VCO band Set-up */
- default_pdi = (struct pdi *) &DEFAULT_PDR(0x0160);
- break;
- case 0x161: /* Modem Rx Gain Table Values */
- default_pdi = (struct pdi *) &DEFAULT_PDR(0x0161);
- break;
- default:
- default_pdi = NULL;
- break;
- }
- if (!pdi && default_pdi) {
- /* Use default */
- pdi = default_pdi;
- pr_debug(PFX "Using default record 0x%04x at %p\n",
- record_id, pdi);
- }
-
- if (pdi) {
- /* Lengths of the data in PDI and PDR must match */
- if ((pdi_len(pdi) == pdr_len(pdr)) &&
- ((void *) pdi->data + pdi_len(pdi) < pda_end)) {
- /* do the actual plugging */
- hw->ops->program(hw, pdi->data, pdr_addr(pdr),
- pdi_len(pdi));
- }
- }
-
- pdr++;
- }
- return 0;
-}
diff --git a/drivers/net/wireless/intersil/orinoco/hermes_dld.h b/drivers/net/wireless/intersil/orinoco/hermes_dld.h
deleted file mode 100644
index b5377e232..000000000
--- a/drivers/net/wireless/intersil/orinoco/hermes_dld.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2007, David Kilroy
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL.
- */
-#ifndef _HERMES_DLD_H
-#define _HERMES_DLD_H
-
-#include "hermes.h"
-
-int hermesi_program_init(struct hermes *hw, u32 offset);
-int hermesi_program_end(struct hermes *hw);
-int hermes_program(struct hermes *hw, const char *first_block, const void *end);
-
-int hermes_read_pda(struct hermes *hw,
- __le16 *pda,
- u32 pda_addr,
- u16 pda_len,
- int use_eeprom);
-int hermes_apply_pda(struct hermes *hw,
- const char *first_pdr,
- const void *pdr_end,
- const __le16 *pda,
- const void *pda_end);
-int hermes_apply_pda_with_defaults(struct hermes *hw,
- const char *first_pdr,
- const void *pdr_end,
- const __le16 *pda,
- const void *pda_end);
-
-size_t hermes_blocks_length(const char *first_block, const void *end);
-
-#endif /* _HERMES_DLD_H */
diff --git a/drivers/net/wireless/intersil/orinoco/hermes_rid.h b/drivers/net/wireless/intersil/orinoco/hermes_rid.h
deleted file mode 100644
index 42eb67dea..000000000
--- a/drivers/net/wireless/intersil/orinoco/hermes_rid.h
+++ /dev/null
@@ -1,165 +0,0 @@
-#ifndef _HERMES_RID_H
-#define _HERMES_RID_H
-
-/*
- * Configuration RIDs
- */
-#define HERMES_RID_CNFPORTTYPE 0xFC00
-#define HERMES_RID_CNFOWNMACADDR 0xFC01
-#define HERMES_RID_CNFDESIREDSSID 0xFC02
-#define HERMES_RID_CNFOWNCHANNEL 0xFC03
-#define HERMES_RID_CNFOWNSSID 0xFC04
-#define HERMES_RID_CNFOWNATIMWINDOW 0xFC05
-#define HERMES_RID_CNFSYSTEMSCALE 0xFC06
-#define HERMES_RID_CNFMAXDATALEN 0xFC07
-#define HERMES_RID_CNFWDSADDRESS 0xFC08
-#define HERMES_RID_CNFPMENABLED 0xFC09
-#define HERMES_RID_CNFPMEPS 0xFC0A
-#define HERMES_RID_CNFMULTICASTRECEIVE 0xFC0B
-#define HERMES_RID_CNFMAXSLEEPDURATION 0xFC0C
-#define HERMES_RID_CNFPMHOLDOVERDURATION 0xFC0D
-#define HERMES_RID_CNFOWNNAME 0xFC0E
-#define HERMES_RID_CNFOWNDTIMPERIOD 0xFC10
-#define HERMES_RID_CNFWDSADDRESS1 0xFC11
-#define HERMES_RID_CNFWDSADDRESS2 0xFC12
-#define HERMES_RID_CNFWDSADDRESS3 0xFC13
-#define HERMES_RID_CNFWDSADDRESS4 0xFC14
-#define HERMES_RID_CNFWDSADDRESS5 0xFC15
-#define HERMES_RID_CNFWDSADDRESS6 0xFC16
-#define HERMES_RID_CNFMULTICASTPMBUFFERING 0xFC17
-#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20
-#define HERMES_RID_CNFAUTHENTICATION_AGERE 0xFC21
-#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21
-#define HERMES_RID_CNFDROPUNENCRYPTED 0xFC22
-#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23
-#define HERMES_RID_CNFDEFAULTKEY0 0xFC24
-#define HERMES_RID_CNFDEFAULTKEY1 0xFC25
-#define HERMES_RID_CNFMWOROBUST_AGERE 0xFC25
-#define HERMES_RID_CNFDEFAULTKEY2 0xFC26
-#define HERMES_RID_CNFDEFAULTKEY3 0xFC27
-#define HERMES_RID_CNFWEPFLAGS_INTERSIL 0xFC28
-#define HERMES_RID_CNFWEPKEYMAPPINGTABLE 0xFC29
-#define HERMES_RID_CNFAUTHENTICATION 0xFC2A
-#define HERMES_RID_CNFMAXASSOCSTA 0xFC2B
-#define HERMES_RID_CNFKEYLENGTH_SYMBOL 0xFC2B
-#define HERMES_RID_CNFTXCONTROL 0xFC2C
-#define HERMES_RID_CNFROAMINGMODE 0xFC2D
-#define HERMES_RID_CNFHOSTAUTHENTICATION 0xFC2E
-#define HERMES_RID_CNFRCVCRCERROR 0xFC30
-#define HERMES_RID_CNFMMLIFE 0xFC31
-#define HERMES_RID_CNFALTRETRYCOUNT 0xFC32
-#define HERMES_RID_CNFBEACONINT 0xFC33
-#define HERMES_RID_CNFAPPCFINFO 0xFC34
-#define HERMES_RID_CNFSTAPCFINFO 0xFC35
-#define HERMES_RID_CNFPRIORITYQUSAGE 0xFC37
-#define HERMES_RID_CNFTIMCTRL 0xFC40
-#define HERMES_RID_CNFTHIRTY2TALLY 0xFC42
-#define HERMES_RID_CNFENHSECURITY 0xFC43
-#define HERMES_RID_CNFGROUPADDRESSES 0xFC80
-#define HERMES_RID_CNFCREATEIBSS 0xFC81
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD 0xFC82
-#define HERMES_RID_CNFRTSTHRESHOLD 0xFC83
-#define HERMES_RID_CNFTXRATECONTROL 0xFC84
-#define HERMES_RID_CNFPROMISCUOUSMODE 0xFC85
-#define HERMES_RID_CNFBASICRATES_SYMBOL 0xFC8A
-#define HERMES_RID_CNFPREAMBLE_SYMBOL 0xFC8C
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD0 0xFC90
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD1 0xFC91
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD2 0xFC92
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD3 0xFC93
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD4 0xFC94
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD5 0xFC95
-#define HERMES_RID_CNFFRAGMENTATIONTHRESHOLD6 0xFC96
-#define HERMES_RID_CNFRTSTHRESHOLD0 0xFC97
-#define HERMES_RID_CNFRTSTHRESHOLD1 0xFC98
-#define HERMES_RID_CNFRTSTHRESHOLD2 0xFC99
-#define HERMES_RID_CNFRTSTHRESHOLD3 0xFC9A
-#define HERMES_RID_CNFRTSTHRESHOLD4 0xFC9B
-#define HERMES_RID_CNFRTSTHRESHOLD5 0xFC9C
-#define HERMES_RID_CNFRTSTHRESHOLD6 0xFC9D
-#define HERMES_RID_CNFHOSTSCAN_SYMBOL 0xFCAB
-#define HERMES_RID_CNFSHORTPREAMBLE 0xFCB0
-#define HERMES_RID_CNFWEPKEYS_AGERE 0xFCB0
-#define HERMES_RID_CNFEXCLUDELONGPREAMBLE 0xFCB1
-#define HERMES_RID_CNFTXKEY_AGERE 0xFCB1
-#define HERMES_RID_CNFAUTHENTICATIONRSPTO 0xFCB2
-#define HERMES_RID_CNFSCANSSID_AGERE 0xFCB2
-#define HERMES_RID_CNFBASICRATES 0xFCB3
-#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4
-#define HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE 0xFCB4
-#define HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE 0xFCB5
-#define HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE 0xFCB6
-#define HERMES_RID_CNFADDMAPPEDTKIPKEY_AGERE 0xFCB7
-#define HERMES_RID_CNFREMMAPPEDTKIPKEY_AGERE 0xFCB8
-#define HERMES_RID_CNFSETWPACAPABILITIES_AGERE 0xFCB9
-#define HERMES_RID_CNFCACHEDPMKADDRESS 0xFCBA
-#define HERMES_RID_CNFREMOVEPMKADDRESS 0xFCBB
-#define HERMES_RID_CNFSCANCHANNELS2GHZ 0xFCC2
-#define HERMES_RID_CNFDISASSOCIATE 0xFCC8
-#define HERMES_RID_CNFTICKTIME 0xFCE0
-#define HERMES_RID_CNFSCANREQUEST 0xFCE1
-#define HERMES_RID_CNFJOINREQUEST 0xFCE2
-#define HERMES_RID_CNFAUTHENTICATESTATION 0xFCE3
-#define HERMES_RID_CNFCHANNELINFOREQUEST 0xFCE4
-#define HERMES_RID_CNFHOSTSCAN 0xFCE5
-
-/*
- * Information RIDs
- */
-#define HERMES_RID_MAXLOADTIME 0xFD00
-#define HERMES_RID_DOWNLOADBUFFER 0xFD01
-#define HERMES_RID_PRIID 0xFD02
-#define HERMES_RID_PRISUPRANGE 0xFD03
-#define HERMES_RID_CFIACTRANGES 0xFD04
-#define HERMES_RID_NICSERNUM 0xFD0A
-#define HERMES_RID_NICID 0xFD0B
-#define HERMES_RID_MFISUPRANGE 0xFD0C
-#define HERMES_RID_CFISUPRANGE 0xFD0D
-#define HERMES_RID_CHANNELLIST 0xFD10
-#define HERMES_RID_REGULATORYDOMAINS 0xFD11
-#define HERMES_RID_TEMPTYPE 0xFD12
-#define HERMES_RID_CIS 0xFD13
-#define HERMES_RID_STAID 0xFD20
-#define HERMES_RID_STASUPRANGE 0xFD21
-#define HERMES_RID_MFIACTRANGES 0xFD22
-#define HERMES_RID_CFIACTRANGES2 0xFD23
-#define HERMES_RID_SECONDARYVERSION_SYMBOL 0xFD24
-#define HERMES_RID_PORTSTATUS 0xFD40
-#define HERMES_RID_CURRENTSSID 0xFD41
-#define HERMES_RID_CURRENTBSSID 0xFD42
-#define HERMES_RID_COMMSQUALITY 0xFD43
-#define HERMES_RID_CURRENTTXRATE 0xFD44
-#define HERMES_RID_CURRENTBEACONINTERVAL 0xFD45
-#define HERMES_RID_CURRENTSCALETHRESHOLDS 0xFD46
-#define HERMES_RID_PROTOCOLRSPTIME 0xFD47
-#define HERMES_RID_SHORTRETRYLIMIT 0xFD48
-#define HERMES_RID_LONGRETRYLIMIT 0xFD49
-#define HERMES_RID_MAXTRANSMITLIFETIME 0xFD4A
-#define HERMES_RID_MAXRECEIVELIFETIME 0xFD4B
-#define HERMES_RID_CFPOLLABLE 0xFD4C
-#define HERMES_RID_AUTHENTICATIONALGORITHMS 0xFD4D
-#define HERMES_RID_PRIVACYOPTIONIMPLEMENTED 0xFD4F
-#define HERMES_RID_DBMCOMMSQUALITY_INTERSIL 0xFD51
-#define HERMES_RID_CURRENTTXRATE1 0xFD80
-#define HERMES_RID_CURRENTTXRATE2 0xFD81
-#define HERMES_RID_CURRENTTXRATE3 0xFD82
-#define HERMES_RID_CURRENTTXRATE4 0xFD83
-#define HERMES_RID_CURRENTTXRATE5 0xFD84
-#define HERMES_RID_CURRENTTXRATE6 0xFD85
-#define HERMES_RID_OWNMACADDR 0xFD86
-#define HERMES_RID_SCANRESULTSTABLE 0xFD88
-#define HERMES_RID_CURRENT_COUNTRY_INFO 0xFD89
-#define HERMES_RID_CURRENT_WPA_IE 0xFD8A
-#define HERMES_RID_CURRENT_TKIP_IV 0xFD8B
-#define HERMES_RID_CURRENT_ASSOC_REQ_INFO 0xFD8C
-#define HERMES_RID_CURRENT_ASSOC_RESP_INFO 0xFD8D
-#define HERMES_RID_TXQUEUEEMPTY 0xFD91
-#define HERMES_RID_PHYTYPE 0xFDC0
-#define HERMES_RID_CURRENTCHANNEL 0xFDC1
-#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2
-#define HERMES_RID_CCAMODE 0xFDC3
-#define HERMES_RID_SUPPORTEDDATARATES 0xFDC6
-#define HERMES_RID_BUILDSEQ 0xFFFE
-#define HERMES_RID_FWID 0xFFFF
-
-#endif
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
deleted file mode 100644
index 4fcca08e5..000000000
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ /dev/null
@@ -1,1362 +0,0 @@
-/* Encapsulate basic setting changes and retrieval on Hermes hardware
- *
- * See copyright notice in main.c
- */
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/if_arp.h>
-#include <linux/ieee80211.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-#include "hermes.h"
-#include "hermes_rid.h"
-#include "orinoco.h"
-
-#include "hw.h"
-
-#define SYMBOL_MAX_VER_LEN (14)
-
-/* Symbol firmware has a bug allocating buffers larger than this */
-#define TX_NICBUF_SIZE_BUG 1585
-
-/********************************************************************/
-/* Data tables */
-/********************************************************************/
-
-/* This tables gives the actual meanings of the bitrate IDs returned
- * by the firmware. */
-static const struct {
- int bitrate; /* in 100s of kilobits */
- int automatic;
- u16 agere_txratectrl;
- u16 intersil_txratectrl;
-} bitrate_table[] = {
- {110, 1, 3, 15}, /* Entry 0 is the default */
- {10, 0, 1, 1},
- {10, 1, 1, 1},
- {20, 0, 2, 2},
- {20, 1, 6, 3},
- {55, 0, 4, 4},
- {55, 1, 7, 7},
- {110, 0, 5, 8},
-};
-#define BITRATE_TABLE_SIZE ARRAY_SIZE(bitrate_table)
-
-/* Firmware version encoding */
-struct comp_id {
- u16 id, variant, major, minor;
-} __packed;
-
-static inline enum fwtype determine_firmware_type(struct comp_id *nic_id)
-{
- if (nic_id->id < 0x8000)
- return FIRMWARE_TYPE_AGERE;
- else if (nic_id->id == 0x8000 && nic_id->major == 0)
- return FIRMWARE_TYPE_SYMBOL;
- else
- return FIRMWARE_TYPE_INTERSIL;
-}
-
-/* Set priv->firmware type, determine firmware properties
- * This function can be called before we have registerred with netdev,
- * so all errors go out with dev_* rather than printk
- *
- * If non-NULL stores a firmware description in fw_name.
- * If non-NULL stores a HW version in hw_ver
- *
- * These are output via generic cfg80211 ethtool support.
- */
-int determine_fw_capabilities(struct orinoco_private *priv,
- char *fw_name, size_t fw_name_len,
- u32 *hw_ver)
-{
- struct device *dev = priv->dev;
- struct hermes *hw = &priv->hw;
- int err;
- struct comp_id nic_id, sta_id;
- unsigned int firmver;
- char tmp[SYMBOL_MAX_VER_LEN + 1] __attribute__((aligned(2)));
-
- /* Get the hardware version */
- err = HERMES_READ_RECORD_PR(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
- if (err) {
- dev_err(dev, "Cannot read hardware identity: error %d\n",
- err);
- return err;
- }
-
- le16_to_cpus(&nic_id.id);
- le16_to_cpus(&nic_id.variant);
- le16_to_cpus(&nic_id.major);
- le16_to_cpus(&nic_id.minor);
- dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n",
- nic_id.id, nic_id.variant, nic_id.major, nic_id.minor);
-
- if (hw_ver)
- *hw_ver = (((nic_id.id & 0xff) << 24) |
- ((nic_id.variant & 0xff) << 16) |
- ((nic_id.major & 0xff) << 8) |
- (nic_id.minor & 0xff));
-
- priv->firmware_type = determine_firmware_type(&nic_id);
-
- /* Get the firmware version */
- err = HERMES_READ_RECORD_PR(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
- if (err) {
- dev_err(dev, "Cannot read station identity: error %d\n",
- err);
- return err;
- }
-
- le16_to_cpus(&sta_id.id);
- le16_to_cpus(&sta_id.variant);
- le16_to_cpus(&sta_id.major);
- le16_to_cpus(&sta_id.minor);
- dev_info(dev, "Station identity %04x:%04x:%04x:%04x\n",
- sta_id.id, sta_id.variant, sta_id.major, sta_id.minor);
-
- switch (sta_id.id) {
- case 0x15:
- dev_err(dev, "Primary firmware is active\n");
- return -ENODEV;
- case 0x14b:
- dev_err(dev, "Tertiary firmware is active\n");
- return -ENODEV;
- case 0x1f: /* Intersil, Agere, Symbol Spectrum24 */
- case 0x21: /* Symbol Spectrum24 Trilogy */
- break;
- default:
- dev_notice(dev, "Unknown station ID, please report\n");
- break;
- }
-
- /* Default capabilities */
- priv->has_sensitivity = 1;
- priv->has_mwo = 0;
- priv->has_preamble = 0;
- priv->has_port3 = 1;
- priv->has_ibss = 1;
- priv->has_wep = 0;
- priv->has_big_wep = 0;
- priv->has_alt_txcntl = 0;
- priv->has_ext_scan = 0;
- priv->has_wpa = 0;
- priv->do_fw_download = 0;
-
- /* Determine capabilities from the firmware version */
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE:
- /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
- ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
- if (fw_name)
- snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d",
- sta_id.major, sta_id.minor);
-
- firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
-
- priv->has_ibss = (firmver >= 0x60006);
- priv->has_wep = (firmver >= 0x40020);
- priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell
- Gold cards from the others? */
- priv->has_mwo = (firmver >= 0x60000);
- priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
- priv->ibss_port = 1;
- priv->has_hostscan = (firmver >= 0x8000a);
- priv->do_fw_download = 1;
- priv->broken_monitor = (firmver >= 0x80000);
- priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */
- priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */
- priv->has_wpa = (firmver >= 0x9002a);
- /* Tested with Agere firmware :
- * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
- * Tested CableTron firmware : 4.32 => Anton */
- break;
- case FIRMWARE_TYPE_SYMBOL:
- /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */
- /* Intel MAC : 00:02:B3:* */
- /* 3Com MAC : 00:50:DA:* */
- memset(tmp, 0, sizeof(tmp));
- /* Get the Symbol firmware version */
- err = hw->ops->read_ltv_pr(hw, USER_BAP,
- HERMES_RID_SECONDARYVERSION_SYMBOL,
- SYMBOL_MAX_VER_LEN, NULL, &tmp);
- if (err) {
- dev_warn(dev, "Error %d reading Symbol firmware info. "
- "Wildly guessing capabilities...\n", err);
- firmver = 0;
- tmp[0] = '\0';
- } else {
- /* The firmware revision is a string, the format is
- * something like : "V2.20-01".
- * Quick and dirty parsing... - Jean II
- */
- firmver = ((tmp[1] - '0') << 16)
- | ((tmp[3] - '0') << 12)
- | ((tmp[4] - '0') << 8)
- | ((tmp[6] - '0') << 4)
- | (tmp[7] - '0');
-
- tmp[SYMBOL_MAX_VER_LEN] = '\0';
- }
-
- if (fw_name)
- snprintf(fw_name, fw_name_len, "Symbol %s", tmp);
-
- priv->has_ibss = (firmver >= 0x20000);
- priv->has_wep = (firmver >= 0x15012);
- priv->has_big_wep = (firmver >= 0x20000);
- priv->has_pm = (firmver >= 0x20000 && firmver < 0x22000) ||
- (firmver >= 0x29000 && firmver < 0x30000) ||
- firmver >= 0x31000;
- priv->has_preamble = (firmver >= 0x20000);
- priv->ibss_port = 4;
-
- /* Symbol firmware is found on various cards, but
- * there has been no attempt to check firmware
- * download on non-spectrum_cs based cards.
- *
- * Given that the Agere firmware download works
- * differently, we should avoid doing a firmware
- * download with the Symbol algorithm on non-spectrum
- * cards.
- *
- * For now we can identify a spectrum_cs based card
- * because it has a firmware reset function.
- */
- priv->do_fw_download = (priv->stop_fw != NULL);
-
- priv->broken_disableport = (firmver == 0x25013) ||
- (firmver >= 0x30000 && firmver <= 0x31000);
- priv->has_hostscan = (firmver >= 0x31001) ||
- (firmver >= 0x29057 && firmver < 0x30000);
- /* Tested with Intel firmware : 0x20015 => Jean II */
- /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */
- break;
- case FIRMWARE_TYPE_INTERSIL:
- /* D-Link, Linksys, Adtron, ZoomAir, and many others...
- * Samsung, Compaq 100/200 and Proxim are slightly
- * different and less well tested */
- /* D-Link MAC : 00:40:05:* */
- /* Addtron MAC : 00:90:D1:* */
- if (fw_name)
- snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d",
- sta_id.major, sta_id.minor, sta_id.variant);
-
- firmver = ((unsigned long)sta_id.major << 16) |
- ((unsigned long)sta_id.minor << 8) | sta_id.variant;
-
- priv->has_ibss = (firmver >= 0x000700); /* FIXME */
- priv->has_big_wep = priv->has_wep = (firmver >= 0x000800);
- priv->has_pm = (firmver >= 0x000700);
- priv->has_hostscan = (firmver >= 0x010301);
-
- if (firmver >= 0x000800)
- priv->ibss_port = 0;
- else {
- dev_notice(dev, "Intersil firmware earlier than v0.8.x"
- " - several features not supported\n");
- priv->ibss_port = 1;
- }
- break;
- }
- if (fw_name)
- dev_info(dev, "Firmware determined as %s\n", fw_name);
-
-#ifndef CONFIG_HERMES_PRISM
- if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
- dev_err(dev, "Support for Prism chipset is not enabled\n");
- return -ENODEV;
- }
-#endif
-
- return 0;
-}
-
-/* Read settings from EEPROM into our private structure.
- * MAC address gets dropped into callers buffer
- * Can be called before netdev registration.
- */
-int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
-{
- struct device *dev = priv->dev;
- struct hermes_idstring nickbuf;
- struct hermes *hw = &priv->hw;
- int len;
- int err;
- u16 reclen;
-
- /* Get the MAC address */
- err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
- ETH_ALEN, NULL, dev_addr);
- if (err) {
- dev_warn(dev, "Failed to read MAC address!\n");
- goto out;
- }
-
- dev_dbg(dev, "MAC address %pM\n", dev_addr);
-
- /* Get the station name */
- err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- sizeof(nickbuf), &reclen, &nickbuf);
- if (err) {
- dev_err(dev, "failed to read station name\n");
- goto out;
- }
- if (nickbuf.len)
- len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len));
- else
- len = min(IW_ESSID_MAX_SIZE, 2 * reclen);
- memcpy(priv->nick, &nickbuf.val, len);
- priv->nick[len] = '\0';
-
- dev_dbg(dev, "Station name \"%s\"\n", priv->nick);
-
- /* Get allowed channels */
- err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CHANNELLIST,
- &priv->channel_mask);
- if (err) {
- dev_err(dev, "Failed to read channel list!\n");
- goto out;
- }
-
- /* Get initial AP density */
- err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
- &priv->ap_density);
- if (err || priv->ap_density < 1 || priv->ap_density > 3)
- priv->has_sensitivity = 0;
-
- /* Get initial RTS threshold */
- err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
- &priv->rts_thresh);
- if (err) {
- dev_err(dev, "Failed to read RTS threshold!\n");
- goto out;
- }
-
- /* Get initial fragmentation settings */
- if (priv->has_mwo)
- err = hermes_read_wordrec_pr(hw, USER_BAP,
- HERMES_RID_CNFMWOROBUST_AGERE,
- &priv->mwo_robust);
- else
- err = hermes_read_wordrec_pr(hw, USER_BAP,
- HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
- &priv->frag_thresh);
- if (err) {
- dev_err(dev, "Failed to read fragmentation settings!\n");
- goto out;
- }
-
- /* Power management setup */
- if (priv->has_pm) {
- priv->pm_on = 0;
- priv->pm_mcast = 1;
- err = hermes_read_wordrec_pr(hw, USER_BAP,
- HERMES_RID_CNFMAXSLEEPDURATION,
- &priv->pm_period);
- if (err) {
- dev_err(dev, "Failed to read power management "
- "period!\n");
- goto out;
- }
- err = hermes_read_wordrec_pr(hw, USER_BAP,
- HERMES_RID_CNFPMHOLDOVERDURATION,
- &priv->pm_timeout);
- if (err) {
- dev_err(dev, "Failed to read power management "
- "timeout!\n");
- goto out;
- }
- }
-
- /* Preamble setup */
- if (priv->has_preamble) {
- err = hermes_read_wordrec_pr(hw, USER_BAP,
- HERMES_RID_CNFPREAMBLE_SYMBOL,
- &priv->preamble);
- if (err) {
- dev_err(dev, "Failed to read preamble setup\n");
- goto out;
- }
- }
-
- /* Retry settings */
- err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
- &priv->short_retry_limit);
- if (err) {
- dev_err(dev, "Failed to read short retry limit\n");
- goto out;
- }
-
- err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
- &priv->long_retry_limit);
- if (err) {
- dev_err(dev, "Failed to read long retry limit\n");
- goto out;
- }
-
- err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
- &priv->retry_lifetime);
- if (err) {
- dev_err(dev, "Failed to read max retry lifetime\n");
- goto out;
- }
-
-out:
- return err;
-}
-
-/* Can be called before netdev registration */
-int orinoco_hw_allocate_fid(struct orinoco_private *priv)
-{
- struct device *dev = priv->dev;
- struct hermes *hw = &priv->hw;
- int err;
-
- err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
- if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
- /* Try workaround for old Symbol firmware bug */
- priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
- err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
-
- dev_warn(dev, "Firmware ALLOC bug detected "
- "(old Symbol firmware?). Work around %s\n",
- err ? "failed!" : "ok.");
- }
-
- return err;
-}
-
-int orinoco_get_bitratemode(int bitrate, int automatic)
-{
- int ratemode = -1;
- int i;
-
- if ((bitrate != 10) && (bitrate != 20) &&
- (bitrate != 55) && (bitrate != 110))
- return ratemode;
-
- for (i = 0; i < BITRATE_TABLE_SIZE; i++) {
- if ((bitrate_table[i].bitrate == bitrate) &&
- (bitrate_table[i].automatic == automatic)) {
- ratemode = i;
- break;
- }
- }
- return ratemode;
-}
-
-void orinoco_get_ratemode_cfg(int ratemode, int *bitrate, int *automatic)
-{
- BUG_ON((ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE));
-
- *bitrate = bitrate_table[ratemode].bitrate * 100000;
- *automatic = bitrate_table[ratemode].automatic;
-}
-
-int orinoco_hw_program_rids(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- struct wireless_dev *wdev = netdev_priv(dev);
- struct hermes *hw = &priv->hw;
- int err;
- struct hermes_idstring idbuf;
-
- /* Set the MAC address */
- err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
- HERMES_BYTES_TO_RECLEN(ETH_ALEN),
- dev->dev_addr);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting MAC address\n",
- dev->name, err);
- return err;
- }
-
- /* Set up the link mode */
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE,
- priv->port_type);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting port type\n",
- dev->name, err);
- return err;
- }
- /* Set the channel/frequency */
- if (priv->channel != 0 && priv->iw_mode != NL80211_IFTYPE_STATION) {
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFOWNCHANNEL,
- priv->channel);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting channel %d\n",
- dev->name, err, priv->channel);
- return err;
- }
- }
-
- if (priv->has_ibss) {
- u16 createibss;
-
- if ((strlen(priv->desired_essid) == 0) && (priv->createibss)) {
- printk(KERN_WARNING "%s: This firmware requires an "
- "ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
- /* With wvlan_cs, in this case, we would crash.
- * hopefully, this driver will behave better...
- * Jean II */
- createibss = 0;
- } else {
- createibss = priv->createibss;
- }
-
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFCREATEIBSS,
- createibss);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n",
- dev->name, err);
- return err;
- }
- }
-
- /* Set the desired BSSID */
- err = __orinoco_hw_set_wap(priv);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting AP address\n",
- dev->name, err);
- return err;
- }
-
- /* Set the desired ESSID */
- idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
- memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
- /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
- err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
- HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2),
- &idbuf);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting OWNSSID\n",
- dev->name, err);
- return err;
- }
- err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
- HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2),
- &idbuf);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n",
- dev->name, err);
- return err;
- }
-
- /* Set the station name */
- idbuf.len = cpu_to_le16(strlen(priv->nick));
- memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
- err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- HERMES_BYTES_TO_RECLEN(strlen(priv->nick) + 2),
- &idbuf);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting nickname\n",
- dev->name, err);
- return err;
- }
-
- /* Set AP density */
- if (priv->has_sensitivity) {
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFSYSTEMSCALE,
- priv->ap_density);
- if (err) {
- printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. "
- "Disabling sensitivity control\n",
- dev->name, err);
-
- priv->has_sensitivity = 0;
- }
- }
-
- /* Set RTS threshold */
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
- priv->rts_thresh);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting RTS threshold\n",
- dev->name, err);
- return err;
- }
-
- /* Set fragmentation threshold or MWO robustness */
- if (priv->has_mwo)
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFMWOROBUST_AGERE,
- priv->mwo_robust);
- else
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
- priv->frag_thresh);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting fragmentation\n",
- dev->name, err);
- return err;
- }
-
- /* Set bitrate */
- err = __orinoco_hw_set_bitrate(priv);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting bitrate\n",
- dev->name, err);
- return err;
- }
-
- /* Set power management */
- if (priv->has_pm) {
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFPMENABLED,
- priv->pm_on);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting up PM\n",
- dev->name, err);
- return err;
- }
-
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFMULTICASTRECEIVE,
- priv->pm_mcast);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting up PM\n",
- dev->name, err);
- return err;
- }
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFMAXSLEEPDURATION,
- priv->pm_period);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting up PM\n",
- dev->name, err);
- return err;
- }
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFPMHOLDOVERDURATION,
- priv->pm_timeout);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting up PM\n",
- dev->name, err);
- return err;
- }
- }
-
- /* Set preamble - only for Symbol so far... */
- if (priv->has_preamble) {
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFPREAMBLE_SYMBOL,
- priv->preamble);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting preamble\n",
- dev->name, err);
- return err;
- }
- }
-
- /* Set up encryption */
- if (priv->has_wep || priv->has_wpa) {
- err = __orinoco_hw_setup_enc(priv);
- if (err) {
- printk(KERN_ERR "%s: Error %d activating encryption\n",
- dev->name, err);
- return err;
- }
- }
-
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
- /* Enable monitor mode */
- dev->type = ARPHRD_IEEE80211;
- err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
- HERMES_TEST_MONITOR, 0, NULL);
- } else {
- /* Disable monitor mode */
- dev->type = ARPHRD_ETHER;
- err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
- HERMES_TEST_STOP, 0, NULL);
- }
- if (err)
- return err;
-
- /* Reset promiscuity / multicast*/
- priv->promiscuous = 0;
- priv->mc_count = 0;
-
- /* Record mode change */
- wdev->iftype = priv->iw_mode;
-
- return 0;
-}
-
-/* Get tsc from the firmware */
-int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
-{
- struct hermes *hw = &priv->hw;
- int err = 0;
- u8 tsc_arr[4][ORINOCO_SEQ_LEN];
-
- if ((key < 0) || (key >= 4))
- return -EINVAL;
-
- err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
- sizeof(tsc_arr), NULL, &tsc_arr);
- if (!err)
- memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
-
- return err;
-}
-
-int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
-{
- struct hermes *hw = &priv->hw;
- int ratemode = priv->bitratemode;
- int err = 0;
-
- if (ratemode >= BITRATE_TABLE_SIZE) {
- printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
- priv->ndev->name, ratemode);
- return -EINVAL;
- }
-
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE:
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFTXRATECONTROL,
- bitrate_table[ratemode].agere_txratectrl);
- break;
- case FIRMWARE_TYPE_INTERSIL:
- case FIRMWARE_TYPE_SYMBOL:
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFTXRATECONTROL,
- bitrate_table[ratemode].intersil_txratectrl);
- break;
- default:
- BUG();
- }
-
- return err;
-}
-
-int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate)
-{
- struct hermes *hw = &priv->hw;
- int i;
- int err = 0;
- u16 val;
-
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CURRENTTXRATE, &val);
- if (err)
- return err;
-
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE: /* Lucent style rate */
- /* Note : in Lucent firmware, the return value of
- * HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s,
- * and therefore is totally different from the
- * encoding of HERMES_RID_CNFTXRATECONTROL.
- * Don't forget that 6Mb/s is really 5.5Mb/s */
- if (val == 6)
- *bitrate = 5500000;
- else
- *bitrate = val * 1000000;
- break;
- case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
- case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
- for (i = 0; i < BITRATE_TABLE_SIZE; i++)
- if (bitrate_table[i].intersil_txratectrl == val) {
- *bitrate = bitrate_table[i].bitrate * 100000;
- break;
- }
-
- if (i >= BITRATE_TABLE_SIZE) {
- printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
- priv->ndev->name, val);
- err = -EIO;
- }
-
- break;
- default:
- BUG();
- }
-
- return err;
-}
-
-/* Set fixed AP address */
-int __orinoco_hw_set_wap(struct orinoco_private *priv)
-{
- int roaming_flag;
- int err = 0;
- struct hermes *hw = &priv->hw;
-
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE:
- /* not supported */
- break;
- case FIRMWARE_TYPE_INTERSIL:
- if (priv->bssid_fixed)
- roaming_flag = 2;
- else
- roaming_flag = 1;
-
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFROAMINGMODE,
- roaming_flag);
- break;
- case FIRMWARE_TYPE_SYMBOL:
- err = HERMES_WRITE_RECORD(hw, USER_BAP,
- HERMES_RID_CNFMANDATORYBSSID_SYMBOL,
- &priv->desired_bssid);
- break;
- }
- return err;
-}
-
-/* Change the WEP keys and/or the current keys. Can be called
- * either from __orinoco_hw_setup_enc() or directly from
- * orinoco_ioctl_setiwencode(). In the later case the association
- * with the AP is not broken (if the firmware can handle it),
- * which is needed for 802.1x implementations. */
-int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
-{
- struct hermes *hw = &priv->hw;
- int err = 0;
- int i;
-
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE:
- {
- struct orinoco_key keys[ORINOCO_MAX_KEYS];
-
- memset(&keys, 0, sizeof(keys));
- for (i = 0; i < ORINOCO_MAX_KEYS; i++) {
- int len = min(priv->keys[i].key_len,
- ORINOCO_MAX_KEY_SIZE);
- memcpy(&keys[i].data, priv->keys[i].key, len);
- if (len > SMALL_KEY_SIZE)
- keys[i].len = cpu_to_le16(LARGE_KEY_SIZE);
- else if (len > 0)
- keys[i].len = cpu_to_le16(SMALL_KEY_SIZE);
- else
- keys[i].len = cpu_to_le16(0);
- }
-
- err = HERMES_WRITE_RECORD(hw, USER_BAP,
- HERMES_RID_CNFWEPKEYS_AGERE,
- &keys);
- if (err)
- return err;
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFTXKEY_AGERE,
- priv->tx_key);
- if (err)
- return err;
- break;
- }
- case FIRMWARE_TYPE_INTERSIL:
- case FIRMWARE_TYPE_SYMBOL:
- {
- int keylen;
-
- /* Force uniform key length to work around
- * firmware bugs */
- keylen = priv->keys[priv->tx_key].key_len;
-
- if (keylen > LARGE_KEY_SIZE) {
- printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
- priv->ndev->name, priv->tx_key, keylen);
- return -E2BIG;
- } else if (keylen > SMALL_KEY_SIZE)
- keylen = LARGE_KEY_SIZE;
- else if (keylen > 0)
- keylen = SMALL_KEY_SIZE;
- else
- keylen = 0;
-
- /* Write all 4 keys */
- for (i = 0; i < ORINOCO_MAX_KEYS; i++) {
- u8 key[LARGE_KEY_SIZE] = { 0 };
-
- memcpy(key, priv->keys[i].key,
- priv->keys[i].key_len);
-
- err = hw->ops->write_ltv(hw, USER_BAP,
- HERMES_RID_CNFDEFAULTKEY0 + i,
- HERMES_BYTES_TO_RECLEN(keylen),
- key);
- if (err)
- return err;
- }
-
- /* Write the index of the key used in transmission */
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFWEPDEFAULTKEYID,
- priv->tx_key);
- if (err)
- return err;
- }
- break;
- }
-
- return 0;
-}
-
-int __orinoco_hw_setup_enc(struct orinoco_private *priv)
-{
- struct hermes *hw = &priv->hw;
- int err = 0;
- int master_wep_flag;
- int auth_flag;
- int enc_flag;
-
- /* Setup WEP keys */
- if (priv->encode_alg == ORINOCO_ALG_WEP)
- __orinoco_hw_setup_wepkeys(priv);
-
- if (priv->wep_restrict)
- auth_flag = HERMES_AUTH_SHARED_KEY;
- else
- auth_flag = HERMES_AUTH_OPEN;
-
- if (priv->wpa_enabled)
- enc_flag = 2;
- else if (priv->encode_alg == ORINOCO_ALG_WEP)
- enc_flag = 1;
- else
- enc_flag = 0;
-
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
- if (priv->encode_alg == ORINOCO_ALG_WEP) {
- /* Enable the shared-key authentication. */
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFAUTHENTICATION_AGERE,
- auth_flag);
- if (err)
- return err;
- }
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFWEPENABLED_AGERE,
- enc_flag);
- if (err)
- return err;
-
- if (priv->has_wpa) {
- /* Set WPA key management */
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE,
- priv->key_mgmt);
- if (err)
- return err;
- }
-
- break;
-
- case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
- case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
- if (priv->encode_alg == ORINOCO_ALG_WEP) {
- if (priv->wep_restrict ||
- (priv->firmware_type == FIRMWARE_TYPE_SYMBOL))
- master_wep_flag = HERMES_WEP_PRIVACY_INVOKED |
- HERMES_WEP_EXCL_UNENCRYPTED;
- else
- master_wep_flag = HERMES_WEP_PRIVACY_INVOKED;
-
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFAUTHENTICATION,
- auth_flag);
- if (err)
- return err;
- } else
- master_wep_flag = 0;
-
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
- master_wep_flag |= HERMES_WEP_HOST_DECRYPT;
-
- /* Master WEP setting : on/off */
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFWEPFLAGS_INTERSIL,
- master_wep_flag);
- if (err)
- return err;
-
- break;
- }
-
- return 0;
-}
-
-/* key must be 32 bytes, including the tx and rx MIC keys.
- * rsc must be NULL or up to 8 bytes
- * tsc must be NULL or up to 8 bytes
- */
-int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
- int set_tx, const u8 *key, size_t key_len,
- const u8 *rsc, size_t rsc_len,
- const u8 *tsc, size_t tsc_len)
-{
- struct {
- __le16 idx;
- u8 rsc[ORINOCO_SEQ_LEN];
- struct {
- u8 key[TKIP_KEYLEN];
- u8 tx_mic[MIC_KEYLEN];
- u8 rx_mic[MIC_KEYLEN];
- } tkip;
- u8 tsc[ORINOCO_SEQ_LEN];
- } __packed buf;
- struct hermes *hw = &priv->hw;
- int ret;
- int err;
- int k;
- u16 xmitting;
-
- key_idx &= 0x3;
-
- if (set_tx)
- key_idx |= 0x8000;
-
- buf.idx = cpu_to_le16(key_idx);
- if (key_len != sizeof(buf.tkip))
- return -EINVAL;
- memcpy(&buf.tkip, key, sizeof(buf.tkip));
-
- if (rsc_len > sizeof(buf.rsc))
- rsc_len = sizeof(buf.rsc);
-
- if (tsc_len > sizeof(buf.tsc))
- tsc_len = sizeof(buf.tsc);
-
- memset(buf.rsc, 0, sizeof(buf.rsc));
- memset(buf.tsc, 0, sizeof(buf.tsc));
-
- if (rsc != NULL)
- memcpy(buf.rsc, rsc, rsc_len);
-
- if (tsc != NULL)
- memcpy(buf.tsc, tsc, tsc_len);
- else
- buf.tsc[4] = 0x10;
-
- /* Wait up to 100ms for tx queue to empty */
- for (k = 100; k > 0; k--) {
- udelay(1000);
- ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
- &xmitting);
- if (ret || !xmitting)
- break;
- }
-
- if (k == 0)
- ret = -ETIMEDOUT;
-
- err = HERMES_WRITE_RECORD(hw, USER_BAP,
- HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE,
- &buf);
-
- return ret ? ret : err;
-}
-
-int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx)
-{
- struct hermes *hw = &priv->hw;
- int err;
-
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE,
- key_idx);
- if (err)
- printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n",
- priv->ndev->name, err, key_idx);
- return err;
-}
-
-int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
- struct net_device *dev,
- int mc_count, int promisc)
-{
- struct hermes *hw = &priv->hw;
- int err = 0;
-
- if (promisc != priv->promiscuous) {
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFPROMISCUOUSMODE,
- promisc);
- if (err) {
- printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
- priv->ndev->name, err);
- } else
- priv->promiscuous = promisc;
- }
-
- /* If we're not in promiscuous mode, then we need to set the
- * group address if either we want to multicast, or if we were
- * multicasting and want to stop */
- if (!promisc && (mc_count || priv->mc_count)) {
- struct netdev_hw_addr *ha;
- struct hermes_multicast mclist;
- int i = 0;
-
- netdev_for_each_mc_addr(ha, dev) {
- if (i == mc_count)
- break;
- memcpy(mclist.addr[i++], ha->addr, ETH_ALEN);
- }
-
- err = hw->ops->write_ltv(hw, USER_BAP,
- HERMES_RID_CNFGROUPADDRESSES,
- HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
- &mclist);
- if (err)
- printk(KERN_ERR "%s: Error %d setting multicast list.\n",
- priv->ndev->name, err);
- else
- priv->mc_count = mc_count;
- }
- return err;
-}
-
-/* Return : < 0 -> error code ; >= 0 -> length */
-int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
- char buf[IW_ESSID_MAX_SIZE + 1])
-{
- struct hermes *hw = &priv->hw;
- int err = 0;
- struct hermes_idstring essidbuf;
- char *p = (char *)(&essidbuf.val);
- int len;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (strlen(priv->desired_essid) > 0) {
- /* We read the desired SSID from the hardware rather
- than from priv->desired_essid, just in case the
- firmware is allowed to change it on us. I'm not
- sure about this */
- /* My guess is that the OWNSSID should always be whatever
- * we set to the card, whereas CURRENT_SSID is the one that
- * may change... - Jean II */
- u16 rid;
-
- *active = 1;
-
- rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
- HERMES_RID_CNFDESIREDSSID;
-
- err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
- NULL, &essidbuf);
- if (err)
- goto fail_unlock;
- } else {
- *active = 0;
-
- err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
- sizeof(essidbuf), NULL, &essidbuf);
- if (err)
- goto fail_unlock;
- }
-
- len = le16_to_cpu(essidbuf.len);
- BUG_ON(len > IW_ESSID_MAX_SIZE);
-
- memset(buf, 0, IW_ESSID_MAX_SIZE);
- memcpy(buf, p, len);
- err = len;
-
- fail_unlock:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-int orinoco_hw_get_freq(struct orinoco_private *priv)
-{
- struct hermes *hw = &priv->hw;
- int err = 0;
- u16 channel;
- int freq = 0;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL,
- &channel);
- if (err)
- goto out;
-
- /* Intersil firmware 1.3.5 returns 0 when the interface is down */
- if (channel == 0) {
- err = -EBUSY;
- goto out;
- }
-
- if ((channel < 1) || (channel > NUM_CHANNELS)) {
- printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
- priv->ndev->name, channel);
- err = -EBUSY;
- goto out;
-
- }
- freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
-
- out:
- orinoco_unlock(priv, &flags);
-
- if (err > 0)
- err = -EBUSY;
- return err ? err : freq;
-}
-
-int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
- int *numrates, s32 *rates, int max)
-{
- struct hermes *hw = &priv->hw;
- struct hermes_idstring list;
- unsigned char *p = (unsigned char *)&list.val;
- int err = 0;
- int num;
- int i;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
- sizeof(list), NULL, &list);
- orinoco_unlock(priv, &flags);
-
- if (err)
- return err;
-
- num = le16_to_cpu(list.len);
- *numrates = num;
- num = min(num, max);
-
- for (i = 0; i < num; i++)
- rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
-
- return 0;
-}
-
-int orinoco_hw_trigger_scan(struct orinoco_private *priv,
- const struct cfg80211_ssid *ssid)
-{
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- unsigned long flags;
- int err = 0;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- /* Scanning with port 0 disabled would fail */
- if (!netif_running(dev)) {
- err = -ENETDOWN;
- goto out;
- }
-
- /* In monitor mode, the scan results are always empty.
- * Probe responses are passed to the driver as received
- * frames and could be processed in software. */
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- if (priv->has_hostscan) {
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_SYMBOL:
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFHOSTSCAN_SYMBOL,
- HERMES_HOSTSCAN_SYMBOL_ONCE |
- HERMES_HOSTSCAN_SYMBOL_BCAST);
- break;
- case FIRMWARE_TYPE_INTERSIL: {
- __le16 req[3];
-
- req[0] = cpu_to_le16(0x3fff); /* All channels */
- req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */
- req[2] = 0; /* Any ESSID */
- err = HERMES_WRITE_RECORD(hw, USER_BAP,
- HERMES_RID_CNFHOSTSCAN, &req);
- break;
- }
- case FIRMWARE_TYPE_AGERE:
- if (ssid->ssid_len > 0) {
- struct hermes_idstring idbuf;
- size_t len = ssid->ssid_len;
-
- idbuf.len = cpu_to_le16(len);
- memcpy(idbuf.val, ssid->ssid, len);
-
- err = hw->ops->write_ltv(hw, USER_BAP,
- HERMES_RID_CNFSCANSSID_AGERE,
- HERMES_BYTES_TO_RECLEN(len + 2),
- &idbuf);
- } else
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFSCANSSID_AGERE,
- 0); /* Any ESSID */
- if (err)
- break;
-
- if (priv->has_ext_scan) {
- err = hermes_write_wordrec(hw, USER_BAP,
- HERMES_RID_CNFSCANCHANNELS2GHZ,
- 0x7FFF);
- if (err)
- goto out;
-
- err = hermes_inquire(hw,
- HERMES_INQ_CHANNELINFO);
- } else
- err = hermes_inquire(hw, HERMES_INQ_SCAN);
-
- break;
- }
- } else
- err = hermes_inquire(hw, HERMES_INQ_SCAN);
-
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-/* Disassociate from node with BSSID addr */
-int orinoco_hw_disassociate(struct orinoco_private *priv,
- u8 *addr, u16 reason_code)
-{
- struct hermes *hw = &priv->hw;
- int err;
-
- struct {
- u8 addr[ETH_ALEN];
- __le16 reason_code;
- } __packed buf;
-
- /* Currently only supported by WPA enabled Agere fw */
- if (!priv->has_wpa)
- return -EOPNOTSUPP;
-
- memcpy(buf.addr, addr, ETH_ALEN);
- buf.reason_code = cpu_to_le16(reason_code);
- err = HERMES_WRITE_RECORD(hw, USER_BAP,
- HERMES_RID_CNFDISASSOCIATE,
- &buf);
- return err;
-}
-
-int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
- u8 *addr)
-{
- struct hermes *hw = &priv->hw;
- int err;
-
- err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, addr);
-
- return err;
-}
diff --git a/drivers/net/wireless/intersil/orinoco/hw.h b/drivers/net/wireless/intersil/orinoco/hw.h
deleted file mode 100644
index da5804dbd..000000000
--- a/drivers/net/wireless/intersil/orinoco/hw.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* Encapsulate basic setting changes on Hermes hardware
- *
- * See copyright notice in main.c
- */
-#ifndef _ORINOCO_HW_H_
-#define _ORINOCO_HW_H_
-
-#include <linux/types.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-
-/* Hardware BAPs */
-#define USER_BAP 0
-#define IRQ_BAP 1
-
-/* WEP key sizes */
-#define SMALL_KEY_SIZE 5
-#define LARGE_KEY_SIZE 13
-
-/* Number of supported channels */
-#define NUM_CHANNELS 14
-
-/* Forward declarations */
-struct orinoco_private;
-
-int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
- size_t fw_name_len, u32 *hw_ver);
-int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr);
-int orinoco_hw_allocate_fid(struct orinoco_private *priv);
-int orinoco_get_bitratemode(int bitrate, int automatic);
-void orinoco_get_ratemode_cfg(int ratemode, int *bitrate, int *automatic);
-
-int orinoco_hw_program_rids(struct orinoco_private *priv);
-int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc);
-int __orinoco_hw_set_bitrate(struct orinoco_private *priv);
-int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate);
-int __orinoco_hw_set_wap(struct orinoco_private *priv);
-int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv);
-int __orinoco_hw_setup_enc(struct orinoco_private *priv);
-int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
- int set_tx, const u8 *key, size_t key_len,
- const u8 *rsc, size_t rsc_len,
- const u8 *tsc, size_t tsc_len);
-int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
-int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
- struct net_device *dev,
- int mc_count, int promisc);
-int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
- char buf[IW_ESSID_MAX_SIZE + 1]);
-int orinoco_hw_get_freq(struct orinoco_private *priv);
-int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
- int *numrates, s32 *rates, int max);
-int orinoco_hw_trigger_scan(struct orinoco_private *priv,
- const struct cfg80211_ssid *ssid);
-int orinoco_hw_disassociate(struct orinoco_private *priv,
- u8 *addr, u16 reason_code);
-int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
- u8 *addr);
-
-#endif /* _ORINOCO_HW_H_ */
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
deleted file mode 100644
index 7df88d20f..000000000
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ /dev/null
@@ -1,2414 +0,0 @@
-/* main.c - (formerly known as dldwd_cs.c, orinoco_cs.c and orinoco.c)
- *
- * A driver for Hermes or Prism 2 chipset based PCMCIA wireless
- * adaptors, with Lucent/Agere, Intersil or Symbol firmware.
- *
- * Current maintainers (as of 29 September 2003) are:
- * Pavel Roskin <proski AT gnu.org>
- * and David Gibson <hermes AT gibson.dropbear.id.au>
- *
- * (C) Copyright David Gibson, IBM Corporation 2001-2003.
- * Copyright (C) 2000 David Gibson, Linuxcare Australia.
- * With some help from :
- * Copyright (C) 2001 Jean Tourrilhes, HP Labs
- * Copyright (C) 2001 Benjamin Herrenschmidt
- *
- * Based on dummy_cs.c 1.27 2000/06/12 21:27:25
- *
- * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy
- * AT fasta.fh-dortmund.de>
- * http://www.stud.fh-dortmund.de/~andy/wvlan/
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * The initial developer of the original code is David A. Hinds
- * <dahinds AT users.sourceforge.net>. Portions created by David
- * A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights
- * Reserved.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL. */
-
-/*
- * TODO
- * o Handle de-encapsulation within network layer, provide 802.11
- * headers (patch from Thomas 'Dent' Mirlacher)
- * o Fix possible races in SPY handling.
- * o Disconnect wireless extensions from fundamental configuration.
- * o (maybe) Software WEP support (patch from Stano Meduna).
- * o (maybe) Use multiple Tx buffers - driver handling queue
- * rather than firmware.
- */
-
-/* Locking and synchronization:
- *
- * The basic principle is that everything is serialized through a
- * single spinlock, priv->lock. The lock is used in user, bh and irq
- * context, so when taken outside hardirq context it should always be
- * taken with interrupts disabled. The lock protects both the
- * hardware and the struct orinoco_private.
- *
- * Another flag, priv->hw_unavailable indicates that the hardware is
- * unavailable for an extended period of time (e.g. suspended, or in
- * the middle of a hard reset). This flag is protected by the
- * spinlock. All code which touches the hardware should check the
- * flag after taking the lock, and if it is set, give up on whatever
- * they are doing and drop the lock again. The orinoco_lock()
- * function handles this (it unlocks and returns -EBUSY if
- * hw_unavailable is non-zero).
- */
-
-#define DRIVER_NAME "orinoco"
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/suspend.h>
-#include <linux/if_arp.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <net/iw_handler.h>
-#include <net/cfg80211.h>
-
-#include "hermes_rid.h"
-#include "hermes_dld.h"
-#include "hw.h"
-#include "scan.h"
-#include "mic.h"
-#include "fw.h"
-#include "wext.h"
-#include "cfg.h"
-#include "main.h"
-
-#include "orinoco.h"
-
-/********************************************************************/
-/* Module information */
-/********************************************************************/
-
-MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & "
- "David Gibson <hermes@gibson.dropbear.id.au>");
-MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based "
- "and similar wireless cards");
-MODULE_LICENSE("Dual MPL/GPL");
-
-/* Level of debugging. Used in the macros in orinoco.h */
-#ifdef ORINOCO_DEBUG
-int orinoco_debug = ORINOCO_DEBUG;
-EXPORT_SYMBOL(orinoco_debug);
-module_param(orinoco_debug, int, 0644);
-MODULE_PARM_DESC(orinoco_debug, "Debug level");
-#endif
-
-static bool suppress_linkstatus; /* = 0 */
-module_param(suppress_linkstatus, bool, 0644);
-MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
-
-static int ignore_disconnect; /* = 0 */
-module_param(ignore_disconnect, int, 0644);
-MODULE_PARM_DESC(ignore_disconnect,
- "Don't report lost link to the network layer");
-
-int force_monitor; /* = 0 */
-module_param(force_monitor, int, 0644);
-MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions");
-
-/********************************************************************/
-/* Internal constants */
-/********************************************************************/
-
-/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
-static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
-#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
-
-#define ORINOCO_MIN_MTU 256
-#define ORINOCO_MAX_MTU (IEEE80211_MAX_DATA_LEN - ENCAPS_OVERHEAD)
-
-#define MAX_IRQLOOPS_PER_IRQ 10
-#define MAX_IRQLOOPS_PER_JIFFY (20000 / HZ) /* Based on a guestimate of
- * how many events the
- * device could
- * legitimately generate */
-
-#define DUMMY_FID 0xFFFF
-
-/*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \
- HERMES_MAX_MULTICAST : 0)*/
-#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
-
-#define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \
- | HERMES_EV_TX | HERMES_EV_TXEXC \
- | HERMES_EV_WTERR | HERMES_EV_INFO \
- | HERMES_EV_INFDROP)
-
-/********************************************************************/
-/* Data types */
-/********************************************************************/
-
-/* Beginning of the Tx descriptor, used in TxExc handling */
-struct hermes_txexc_data {
- struct hermes_tx_descriptor desc;
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
-} __packed;
-
-/* Rx frame header except compatibility 802.3 header */
-struct hermes_rx_descriptor {
- /* Control */
- __le16 status;
- __le32 time;
- u8 silence;
- u8 signal;
- u8 rate;
- u8 rxflow;
- __le32 reserved;
-
- /* 802.11 header */
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 addr4[ETH_ALEN];
-
- /* Data length */
- __le16 data_len;
-} __packed;
-
-struct orinoco_rx_data {
- struct hermes_rx_descriptor *desc;
- struct sk_buff *skb;
- struct list_head list;
-};
-
-struct orinoco_scan_data {
- void *buf;
- size_t len;
- int type;
- struct list_head list;
-};
-
-/********************************************************************/
-/* Function prototypes */
-/********************************************************************/
-
-static int __orinoco_set_multicast_list(struct net_device *dev);
-static int __orinoco_up(struct orinoco_private *priv);
-static int __orinoco_down(struct orinoco_private *priv);
-static int __orinoco_commit(struct orinoco_private *priv);
-
-/********************************************************************/
-/* Internal helper functions */
-/********************************************************************/
-
-void set_port_type(struct orinoco_private *priv)
-{
- switch (priv->iw_mode) {
- case NL80211_IFTYPE_STATION:
- priv->port_type = 1;
- priv->createibss = 0;
- break;
- case NL80211_IFTYPE_ADHOC:
- if (priv->prefer_port3) {
- priv->port_type = 3;
- priv->createibss = 0;
- } else {
- priv->port_type = priv->ibss_port;
- priv->createibss = 1;
- }
- break;
- case NL80211_IFTYPE_MONITOR:
- priv->port_type = 3;
- priv->createibss = 0;
- break;
- default:
- printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
- priv->ndev->name);
- }
-}
-
-/********************************************************************/
-/* Device methods */
-/********************************************************************/
-
-int orinoco_open(struct net_device *dev)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- unsigned long flags;
- int err;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = __orinoco_up(priv);
-
- if (!err)
- priv->open = 1;
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-EXPORT_SYMBOL(orinoco_open);
-
-int orinoco_stop(struct net_device *dev)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int err = 0;
-
- /* We mustn't use orinoco_lock() here, because we need to be
- able to close the interface even if hw_unavailable is set
- (e.g. as we're released after a PC Card removal) */
- orinoco_lock_irq(priv);
-
- priv->open = 0;
-
- err = __orinoco_down(priv);
-
- orinoco_unlock_irq(priv);
-
- return err;
-}
-EXPORT_SYMBOL(orinoco_stop);
-
-void orinoco_set_multicast_list(struct net_device *dev)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0) {
- printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
- "called when hw_unavailable\n", dev->name);
- return;
- }
-
- __orinoco_set_multicast_list(dev);
- orinoco_unlock(priv, &flags);
-}
-EXPORT_SYMBOL(orinoco_set_multicast_list);
-
-int orinoco_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct orinoco_private *priv = ndev_priv(dev);
-
- /* MTU + encapsulation + header length */
- if ((new_mtu + ENCAPS_OVERHEAD + sizeof(struct ieee80211_hdr)) >
- (priv->nicbuf_size - ETH_HLEN))
- return -EINVAL;
-
- dev->mtu = new_mtu;
-
- return 0;
-}
-EXPORT_SYMBOL(orinoco_change_mtu);
-
-/********************************************************************/
-/* Tx path */
-/********************************************************************/
-
-/* Add encapsulation and MIC to the existing SKB.
- * The main xmit routine will then send the whole lot to the card.
- * Need 8 bytes headroom
- * Need 8 bytes tailroom
- *
- * With encapsulated ethernet II frame
- * --------
- * 803.3 header (14 bytes)
- * dst[6]
- * -------- src[6]
- * 803.3 header (14 bytes) len[2]
- * dst[6] 803.2 header (8 bytes)
- * src[6] encaps[6]
- * len[2] <- leave alone -> len[2]
- * -------- -------- <-- 0
- * Payload Payload
- * ... ...
- *
- * -------- --------
- * MIC (8 bytes)
- * --------
- *
- * returns 0 on success, -ENOMEM on error.
- */
-int orinoco_process_xmit_skb(struct sk_buff *skb,
- struct net_device *dev,
- struct orinoco_private *priv,
- int *tx_control,
- u8 *mic_buf)
-{
- struct orinoco_tkip_key *key;
- struct ethhdr *eh;
- int do_mic;
-
- key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
-
- do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
- (key != NULL));
-
- if (do_mic)
- *tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
- HERMES_TXCTRL_MIC;
-
- eh = (struct ethhdr *)skb->data;
-
- /* Encapsulate Ethernet-II frames */
- if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
- struct header_struct {
- struct ethhdr eth; /* 802.3 header */
- u8 encap[6]; /* 802.2 header */
- } __packed hdr;
- int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
-
- if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
- if (net_ratelimit())
- printk(KERN_ERR
- "%s: Not enough headroom for 802.2 headers %d\n",
- dev->name, skb_headroom(skb));
- return -ENOMEM;
- }
-
- /* Fill in new header */
- memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
- hdr.eth.h_proto = htons(len);
- memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
-
- /* Make room for the new header, and copy it in */
- eh = skb_push(skb, ENCAPS_OVERHEAD);
- memcpy(eh, &hdr, sizeof(hdr));
- }
-
- /* Calculate Michael MIC */
- if (do_mic) {
- size_t len = skb->len - ETH_HLEN;
- u8 *mic = &mic_buf[0];
-
- /* Have to write to an even address, so copy the spare
- * byte across */
- if (skb->len % 2) {
- *mic = skb->data[skb->len - 1];
- mic++;
- }
-
- orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
- eh->h_dest, eh->h_source, 0 /* priority */,
- skb->data + ETH_HLEN,
- len, mic);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(orinoco_process_xmit_skb);
-
-static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct hermes *hw = &priv->hw;
- int err = 0;
- u16 txfid = priv->txfid;
- int tx_control;
- unsigned long flags;
- u8 mic_buf[MICHAEL_MIC_LEN + 1];
-
- if (!netif_running(dev)) {
- printk(KERN_ERR "%s: Tx on stopped device!\n",
- dev->name);
- return NETDEV_TX_BUSY;
- }
-
- if (netif_queue_stopped(dev)) {
- printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
- dev->name);
- return NETDEV_TX_BUSY;
- }
-
- if (orinoco_lock(priv, &flags) != 0) {
- printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
- dev->name);
- return NETDEV_TX_BUSY;
- }
-
- if (!netif_carrier_ok(dev) ||
- (priv->iw_mode == NL80211_IFTYPE_MONITOR)) {
- /* Oops, the firmware hasn't established a connection,
- silently drop the packet (this seems to be the
- safest approach). */
- goto drop;
- }
-
- /* Check packet length */
- if (skb->len < ETH_HLEN)
- goto drop;
-
- tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
-
- err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
- &mic_buf[0]);
- if (err)
- goto drop;
-
- if (priv->has_alt_txcntl) {
- /* WPA enabled firmwares have tx_cntl at the end of
- * the 802.11 header. So write zeroed descriptor and
- * 802.11 header at the same time
- */
- char desc[HERMES_802_3_OFFSET];
- __le16 *txcntl = (__le16 *) &desc[HERMES_TXCNTL2_OFFSET];
-
- memset(&desc, 0, sizeof(desc));
-
- *txcntl = cpu_to_le16(tx_control);
- err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
- txfid, 0);
- if (err) {
- if (net_ratelimit())
- printk(KERN_ERR "%s: Error %d writing Tx "
- "descriptor to BAP\n", dev->name, err);
- goto busy;
- }
- } else {
- struct hermes_tx_descriptor desc;
-
- memset(&desc, 0, sizeof(desc));
-
- desc.tx_control = cpu_to_le16(tx_control);
- err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
- txfid, 0);
- if (err) {
- if (net_ratelimit())
- printk(KERN_ERR "%s: Error %d writing Tx "
- "descriptor to BAP\n", dev->name, err);
- goto busy;
- }
-
- /* Clear the 802.11 header and data length fields - some
- * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
- * if this isn't done. */
- hermes_clear_words(hw, HERMES_DATA0,
- HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
- }
-
- err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len,
- txfid, HERMES_802_3_OFFSET);
- if (err) {
- printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
- dev->name, err);
- goto busy;
- }
-
- if (tx_control & HERMES_TXCTRL_MIC) {
- size_t offset = HERMES_802_3_OFFSET + skb->len;
- size_t len = MICHAEL_MIC_LEN;
-
- if (offset % 2) {
- offset--;
- len++;
- }
- err = hw->ops->bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
- txfid, offset);
- if (err) {
- printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
- dev->name, err);
- goto busy;
- }
- }
-
- /* Finally, we actually initiate the send */
- netif_stop_queue(dev);
-
- err = hw->ops->cmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
- txfid, NULL);
- if (err) {
- netif_start_queue(dev);
- if (net_ratelimit())
- printk(KERN_ERR "%s: Error %d transmitting packet\n",
- dev->name, err);
- goto busy;
- }
-
- stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
- goto ok;
-
- drop:
- stats->tx_errors++;
- stats->tx_dropped++;
-
- ok:
- orinoco_unlock(priv, &flags);
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-
- busy:
- if (err == -EIO)
- schedule_work(&priv->reset_work);
- orinoco_unlock(priv, &flags);
- return NETDEV_TX_BUSY;
-}
-
-static void __orinoco_ev_alloc(struct net_device *dev, struct hermes *hw)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- u16 fid = hermes_read_regn(hw, ALLOCFID);
-
- if (fid != priv->txfid) {
- if (fid != DUMMY_FID)
- printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
- dev->name, fid);
- return;
- }
-
- hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
-}
-
-static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw)
-{
- dev->stats.tx_packets++;
-
- netif_wake_queue(dev);
-
- hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
-}
-
-static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
-{
- struct net_device_stats *stats = &dev->stats;
- u16 fid = hermes_read_regn(hw, TXCOMPLFID);
- u16 status;
- struct hermes_txexc_data hdr;
- int err = 0;
-
- if (fid == DUMMY_FID)
- return; /* Nothing's really happened */
-
- /* Read part of the frame header - we need status and addr1 */
- err = hw->ops->bap_pread(hw, IRQ_BAP, &hdr,
- sizeof(struct hermes_txexc_data),
- fid, 0);
-
- hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
- stats->tx_errors++;
-
- if (err) {
- printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
- "(FID=%04X error %d)\n",
- dev->name, fid, err);
- return;
- }
-
- DEBUG(1, "%s: Tx error, err %d (FID=%04X)\n", dev->name,
- err, fid);
-
- /* We produce a TXDROP event only for retry or lifetime
- * exceeded, because that's the only status that really mean
- * that this particular node went away.
- * Other errors means that *we* screwed up. - Jean II */
- status = le16_to_cpu(hdr.desc.status);
- if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
- union iwreq_data wrqu;
-
- /* Copy 802.11 dest address.
- * We use the 802.11 header because the frame may
- * not be 802.3 or may be mangled...
- * In Ad-Hoc mode, it will be the node address.
- * In managed mode, it will be most likely the AP addr
- * User space will figure out how to convert it to
- * whatever it needs (IP address or else).
- * - Jean II */
- memcpy(wrqu.addr.sa_data, hdr.addr1, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- /* Send event to user space */
- wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL);
- }
-
- netif_wake_queue(dev);
-}
-
-void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct hermes *hw = &priv->hw;
-
- printk(KERN_WARNING "%s: Tx timeout! "
- "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
- dev->name, hermes_read_regn(hw, ALLOCFID),
- hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
-
- stats->tx_errors++;
-
- schedule_work(&priv->reset_work);
-}
-EXPORT_SYMBOL(orinoco_tx_timeout);
-
-/********************************************************************/
-/* Rx path (data frames) */
-/********************************************************************/
-
-/* Does the frame have a SNAP header indicating it should be
- * de-encapsulated to Ethernet-II? */
-static inline int is_ethersnap(void *_hdr)
-{
- u8 *hdr = _hdr;
-
- /* We de-encapsulate all packets which, a) have SNAP headers
- * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
- * and where b) the OUI of the SNAP header is 00:00:00 or
- * 00:00:f8 - we need both because different APs appear to use
- * different OUIs for some reason */
- return (memcmp(hdr, &encaps_hdr, 5) == 0)
- && ((hdr[5] == 0x00) || (hdr[5] == 0xf8));
-}
-
-static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
- int level, int noise)
-{
- struct iw_quality wstats;
- wstats.level = level - 0x95;
- wstats.noise = noise - 0x95;
- wstats.qual = (level > noise) ? (level - noise) : 0;
- wstats.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- /* Update spy records */
- wireless_spy_update(dev, mac, &wstats);
-}
-
-static void orinoco_stat_gather(struct net_device *dev,
- struct sk_buff *skb,
- struct hermes_rx_descriptor *desc)
-{
- struct orinoco_private *priv = ndev_priv(dev);
-
- /* Using spy support with lots of Rx packets, like in an
- * infrastructure (AP), will really slow down everything, because
- * the MAC address must be compared to each entry of the spy list.
- * If the user really asks for it (set some address in the
- * spy list), we do it, but he will pay the price.
- * Note that to get here, you need both WIRELESS_SPY
- * compiled in AND some addresses in the list !!!
- */
- /* Note : gcc will optimise the whole section away if
- * WIRELESS_SPY is not defined... - Jean II */
- if (SPY_NUMBER(priv)) {
- orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN,
- desc->signal, desc->silence);
- }
-}
-
-/*
- * orinoco_rx_monitor - handle received monitor frames.
- *
- * Arguments:
- * dev network device
- * rxfid received FID
- * desc rx descriptor of the frame
- *
- * Call context: interrupt
- */
-static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
- struct hermes_rx_descriptor *desc)
-{
- u32 hdrlen = 30; /* return full header by default */
- u32 datalen = 0;
- u16 fc;
- int err;
- int len;
- struct sk_buff *skb;
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct hermes *hw = &priv->hw;
-
- len = le16_to_cpu(desc->data_len);
-
- /* Determine the size of the header and the data */
- fc = le16_to_cpu(desc->frame_ctl);
- switch (fc & IEEE80211_FCTL_FTYPE) {
- case IEEE80211_FTYPE_DATA:
- if ((fc & IEEE80211_FCTL_TODS)
- && (fc & IEEE80211_FCTL_FROMDS))
- hdrlen = 30;
- else
- hdrlen = 24;
- datalen = len;
- break;
- case IEEE80211_FTYPE_MGMT:
- hdrlen = 24;
- datalen = len;
- break;
- case IEEE80211_FTYPE_CTL:
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PSPOLL:
- case IEEE80211_STYPE_RTS:
- case IEEE80211_STYPE_CFEND:
- case IEEE80211_STYPE_CFENDACK:
- hdrlen = 16;
- break;
- case IEEE80211_STYPE_CTS:
- case IEEE80211_STYPE_ACK:
- hdrlen = 10;
- break;
- }
- break;
- default:
- /* Unknown frame type */
- break;
- }
-
- /* sanity check the length */
- if (datalen > IEEE80211_MAX_DATA_LEN + 12) {
- printk(KERN_DEBUG "%s: oversized monitor frame, "
- "data length = %d\n", dev->name, datalen);
- stats->rx_length_errors++;
- goto update_stats;
- }
-
- skb = dev_alloc_skb(hdrlen + datalen);
- if (!skb) {
- printk(KERN_WARNING "%s: Cannot allocate skb for monitor frame\n",
- dev->name);
- goto update_stats;
- }
-
- /* Copy the 802.11 header to the skb */
- skb_put_data(skb, &(desc->frame_ctl), hdrlen);
- skb_reset_mac_header(skb);
-
- /* If any, copy the data from the card to the skb */
- if (datalen > 0) {
- err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
- ALIGN(datalen, 2), rxfid,
- HERMES_802_2_OFFSET);
- if (err) {
- printk(KERN_ERR "%s: error %d reading monitor frame\n",
- dev->name, err);
- goto drop;
- }
- }
-
- skb->dev = dev;
- skb->ip_summed = CHECKSUM_NONE;
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = cpu_to_be16(ETH_P_802_2);
-
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
-
- netif_rx(skb);
- return;
-
- drop:
- dev_kfree_skb_irq(skb);
- update_stats:
- stats->rx_errors++;
- stats->rx_dropped++;
-}
-
-void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct iw_statistics *wstats = &priv->wstats;
- struct sk_buff *skb = NULL;
- u16 rxfid, status;
- int length;
- struct hermes_rx_descriptor *desc;
- struct orinoco_rx_data *rx_data;
- int err;
-
- desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
- if (!desc)
- goto update_stats;
-
- rxfid = hermes_read_regn(hw, RXFID);
-
- err = hw->ops->bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
- rxfid, 0);
- if (err) {
- printk(KERN_ERR "%s: error %d reading Rx descriptor. "
- "Frame dropped.\n", dev->name, err);
- goto update_stats;
- }
-
- status = le16_to_cpu(desc->status);
-
- if (status & HERMES_RXSTAT_BADCRC) {
- DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n",
- dev->name);
- stats->rx_crc_errors++;
- goto update_stats;
- }
-
- /* Handle frames in monitor mode */
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
- orinoco_rx_monitor(dev, rxfid, desc);
- goto out;
- }
-
- if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
- DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
- dev->name);
- wstats->discard.code++;
- goto update_stats;
- }
-
- length = le16_to_cpu(desc->data_len);
-
- /* Sanity checks */
- if (length < 3) { /* No for even an 802.2 LLC header */
- /* At least on Symbol firmware with PCF we get quite a
- lot of these legitimately - Poll frames with no
- data. */
- goto out;
- }
- if (length > IEEE80211_MAX_DATA_LEN) {
- printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
- dev->name, length);
- stats->rx_length_errors++;
- goto update_stats;
- }
-
- /* Payload size does not include Michael MIC. Increase payload
- * size to read it together with the data. */
- if (status & HERMES_RXSTAT_MIC)
- length += MICHAEL_MIC_LEN;
-
- /* We need space for the packet data itself, plus an ethernet
- header, plus 2 bytes so we can align the IP header on a
- 32bit boundary, plus 1 byte so we can read in odd length
- packets from the card, which has an IO granularity of 16
- bits */
- skb = dev_alloc_skb(length + ETH_HLEN + 2 + 1);
- if (!skb) {
- printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
- dev->name);
- goto update_stats;
- }
-
- /* We'll prepend the header, so reserve space for it. The worst
- case is no decapsulation, when 802.3 header is prepended and
- nothing is removed. 2 is for aligning the IP header. */
- skb_reserve(skb, ETH_HLEN + 2);
-
- err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length),
- ALIGN(length, 2), rxfid,
- HERMES_802_2_OFFSET);
- if (err) {
- printk(KERN_ERR "%s: error %d reading frame. "
- "Frame dropped.\n", dev->name, err);
- goto drop;
- }
-
- /* Add desc and skb to rx queue */
- rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
- if (!rx_data)
- goto drop;
-
- rx_data->desc = desc;
- rx_data->skb = skb;
- list_add_tail(&rx_data->list, &priv->rx_list);
- tasklet_schedule(&priv->rx_tasklet);
-
- return;
-
-drop:
- dev_kfree_skb_irq(skb);
-update_stats:
- stats->rx_errors++;
- stats->rx_dropped++;
-out:
- kfree(desc);
-}
-EXPORT_SYMBOL(__orinoco_ev_rx);
-
-static void orinoco_rx(struct net_device *dev,
- struct hermes_rx_descriptor *desc,
- struct sk_buff *skb)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- u16 status, fc;
- int length;
- struct ethhdr *hdr;
-
- status = le16_to_cpu(desc->status);
- length = le16_to_cpu(desc->data_len);
- fc = le16_to_cpu(desc->frame_ctl);
-
- /* Calculate and check MIC */
- if (status & HERMES_RXSTAT_MIC) {
- struct orinoco_tkip_key *key;
- int key_id = ((status & HERMES_RXSTAT_MIC_KEY_ID) >>
- HERMES_MIC_KEY_ID_SHIFT);
- u8 mic[MICHAEL_MIC_LEN];
- u8 *rxmic;
- u8 *src = (fc & IEEE80211_FCTL_FROMDS) ?
- desc->addr3 : desc->addr2;
-
- /* Extract Michael MIC from payload */
- rxmic = skb->data + skb->len - MICHAEL_MIC_LEN;
-
- skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
- length -= MICHAEL_MIC_LEN;
-
- key = (struct orinoco_tkip_key *) priv->keys[key_id].key;
-
- if (!key) {
- printk(KERN_WARNING "%s: Received encrypted frame from "
- "%pM using key %i, but key is not installed\n",
- dev->name, src, key_id);
- goto drop;
- }
-
- orinoco_mic(priv->rx_tfm_mic, key->rx_mic, desc->addr1, src,
- 0, /* priority or QoS? */
- skb->data, skb->len, &mic[0]);
-
- if (memcmp(mic, rxmic,
- MICHAEL_MIC_LEN)) {
- union iwreq_data wrqu;
- struct iw_michaelmicfailure wxmic;
-
- printk(KERN_WARNING "%s: "
- "Invalid Michael MIC in data frame from %pM, "
- "using key %i\n",
- dev->name, src, key_id);
-
- /* TODO: update stats */
-
- /* Notify userspace */
- memset(&wxmic, 0, sizeof(wxmic));
- wxmic.flags = key_id & IW_MICFAILURE_KEY_ID;
- wxmic.flags |= (desc->addr1[0] & 1) ?
- IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE;
- wxmic.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(wxmic.src_addr.sa_data, src, ETH_ALEN);
-
- (void) orinoco_hw_get_tkip_iv(priv, key_id,
- &wxmic.tsc[0]);
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(wxmic);
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu,
- (char *) &wxmic);
-
- goto drop;
- }
- }
-
- /* Handle decapsulation
- * In most cases, the firmware tell us about SNAP frames.
- * For some reason, the SNAP frames sent by LinkSys APs
- * are not properly recognised by most firmwares.
- * So, check ourselves */
- if (length >= ENCAPS_OVERHEAD &&
- (((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
- ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
- is_ethersnap(skb->data))) {
- /* These indicate a SNAP within 802.2 LLC within
- 802.11 frame which we'll need to de-encapsulate to
- the original EthernetII frame. */
- hdr = skb_push(skb, ETH_HLEN - ENCAPS_OVERHEAD);
- } else {
- /* 802.3 frame - prepend 802.3 header as is */
- hdr = skb_push(skb, ETH_HLEN);
- hdr->h_proto = htons(length);
- }
- memcpy(hdr->h_dest, desc->addr1, ETH_ALEN);
- if (fc & IEEE80211_FCTL_FROMDS)
- memcpy(hdr->h_source, desc->addr3, ETH_ALEN);
- else
- memcpy(hdr->h_source, desc->addr2, ETH_ALEN);
-
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_NONE;
- if (fc & IEEE80211_FCTL_TODS)
- skb->pkt_type = PACKET_OTHERHOST;
-
- /* Process the wireless stats if needed */
- orinoco_stat_gather(dev, skb, desc);
-
- /* Pass the packet to the networking stack */
- netif_rx(skb);
- stats->rx_packets++;
- stats->rx_bytes += length;
-
- return;
-
- drop:
- dev_kfree_skb(skb);
- stats->rx_errors++;
- stats->rx_dropped++;
-}
-
-static void orinoco_rx_isr_tasklet(struct tasklet_struct *t)
-{
- struct orinoco_private *priv = from_tasklet(priv, t, rx_tasklet);
- struct net_device *dev = priv->ndev;
- struct orinoco_rx_data *rx_data, *temp;
- struct hermes_rx_descriptor *desc;
- struct sk_buff *skb;
- unsigned long flags;
-
- /* orinoco_rx requires the driver lock, and we also need to
- * protect priv->rx_list, so just hold the lock over the
- * lot.
- *
- * If orinoco_lock fails, we've unplugged the card. In this
- * case just abort. */
- if (orinoco_lock(priv, &flags) != 0)
- return;
-
- /* extract desc and skb from queue */
- list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
- desc = rx_data->desc;
- skb = rx_data->skb;
- list_del(&rx_data->list);
- kfree(rx_data);
-
- orinoco_rx(dev, desc, skb);
-
- kfree(desc);
- }
-
- orinoco_unlock(priv, &flags);
-}
-
-/********************************************************************/
-/* Rx path (info frames) */
-/********************************************************************/
-
-static void print_linkstatus(struct net_device *dev, u16 status)
-{
- char *s;
-
- if (suppress_linkstatus)
- return;
-
- switch (status) {
- case HERMES_LINKSTATUS_NOT_CONNECTED:
- s = "Not Connected";
- break;
- case HERMES_LINKSTATUS_CONNECTED:
- s = "Connected";
- break;
- case HERMES_LINKSTATUS_DISCONNECTED:
- s = "Disconnected";
- break;
- case HERMES_LINKSTATUS_AP_CHANGE:
- s = "AP Changed";
- break;
- case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
- s = "AP Out of Range";
- break;
- case HERMES_LINKSTATUS_AP_IN_RANGE:
- s = "AP In Range";
- break;
- case HERMES_LINKSTATUS_ASSOC_FAILED:
- s = "Association Failed";
- break;
- default:
- s = "UNKNOWN";
- }
-
- printk(KERN_DEBUG "%s: New link status: %s (%04x)\n",
- dev->name, s, status);
-}
-
-/* Search scan results for requested BSSID, join it if found */
-static void orinoco_join_ap(struct work_struct *work)
-{
- struct orinoco_private *priv =
- container_of(work, struct orinoco_private, join_work);
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- int err;
- unsigned long flags;
- struct join_req {
- u8 bssid[ETH_ALEN];
- __le16 channel;
- } __packed req;
- const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
- struct prism2_scan_apinfo *atom = NULL;
- int offset = 4;
- int found = 0;
- u8 *buf;
- u16 len;
-
- /* Allocate buffer for scan results */
- buf = kmalloc(MAX_SCAN_LEN, GFP_KERNEL);
- if (!buf)
- return;
-
- if (orinoco_lock(priv, &flags) != 0)
- goto fail_lock;
-
- /* Sanity checks in case user changed something in the meantime */
- if (!priv->bssid_fixed)
- goto out;
-
- if (strlen(priv->desired_essid) == 0)
- goto out;
-
- /* Read scan results from the firmware */
- err = hw->ops->read_ltv(hw, USER_BAP,
- HERMES_RID_SCANRESULTSTABLE,
- MAX_SCAN_LEN, &len, buf);
- if (err) {
- printk(KERN_ERR "%s: Cannot read scan results\n",
- dev->name);
- goto out;
- }
-
- len = HERMES_RECLEN_TO_BYTES(len);
-
- /* Go through the scan results looking for the channel of the AP
- * we were requested to join */
- for (; offset + atom_len <= len; offset += atom_len) {
- atom = (struct prism2_scan_apinfo *) (buf + offset);
- if (memcmp(&atom->bssid, priv->desired_bssid, ETH_ALEN) == 0) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- DEBUG(1, "%s: Requested AP not found in scan results\n",
- dev->name);
- goto out;
- }
-
- memcpy(req.bssid, priv->desired_bssid, ETH_ALEN);
- req.channel = atom->channel; /* both are little-endian */
- err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFJOINREQUEST,
- &req);
- if (err)
- printk(KERN_ERR "%s: Error issuing join request\n", dev->name);
-
- out:
- orinoco_unlock(priv, &flags);
-
- fail_lock:
- kfree(buf);
-}
-
-/* Send new BSSID to userspace */
-static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- union iwreq_data wrqu;
- int err;
-
- err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
- ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
- if (err != 0)
- return;
-
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-
- /* Send event to user space */
- wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
-}
-
-static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- union iwreq_data wrqu;
- int err;
- u8 buf[88];
- u8 *ie;
-
- if (!priv->has_wpa)
- return;
-
- err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
- sizeof(buf), NULL, &buf);
- if (err != 0)
- return;
-
- ie = orinoco_get_wpa_ie(buf, sizeof(buf));
- if (ie) {
- int rem = sizeof(buf) - (ie - &buf[0]);
- wrqu.data.length = ie[1] + 2;
- if (wrqu.data.length > rem)
- wrqu.data.length = rem;
-
- if (wrqu.data.length)
- /* Send event to user space */
- wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, ie);
- }
-}
-
-static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- union iwreq_data wrqu;
- int err;
- u8 buf[88]; /* TODO: verify max size or IW_GENERIC_IE_MAX */
- u8 *ie;
-
- if (!priv->has_wpa)
- return;
-
- err = hw->ops->read_ltv(hw, USER_BAP,
- HERMES_RID_CURRENT_ASSOC_RESP_INFO,
- sizeof(buf), NULL, &buf);
- if (err != 0)
- return;
-
- ie = orinoco_get_wpa_ie(buf, sizeof(buf));
- if (ie) {
- int rem = sizeof(buf) - (ie - &buf[0]);
- wrqu.data.length = ie[1] + 2;
- if (wrqu.data.length > rem)
- wrqu.data.length = rem;
-
- if (wrqu.data.length)
- /* Send event to user space */
- wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, ie);
- }
-}
-
-static void orinoco_send_wevents(struct work_struct *work)
-{
- struct orinoco_private *priv =
- container_of(work, struct orinoco_private, wevent_work);
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return;
-
- orinoco_send_assocreqie_wevent(priv);
- orinoco_send_assocrespie_wevent(priv);
- orinoco_send_bssid_wevent(priv);
-
- orinoco_unlock(priv, &flags);
-}
-
-static void qbuf_scan(struct orinoco_private *priv, void *buf,
- int len, int type)
-{
- struct orinoco_scan_data *sd;
- unsigned long flags;
-
- sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
- if (!sd)
- return;
-
- sd->buf = buf;
- sd->len = len;
- sd->type = type;
-
- spin_lock_irqsave(&priv->scan_lock, flags);
- list_add_tail(&sd->list, &priv->scan_list);
- spin_unlock_irqrestore(&priv->scan_lock, flags);
-
- schedule_work(&priv->process_scan);
-}
-
-static void qabort_scan(struct orinoco_private *priv)
-{
- struct orinoco_scan_data *sd;
- unsigned long flags;
-
- sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
- if (!sd)
- return;
-
- sd->len = -1; /* Abort */
-
- spin_lock_irqsave(&priv->scan_lock, flags);
- list_add_tail(&sd->list, &priv->scan_list);
- spin_unlock_irqrestore(&priv->scan_lock, flags);
-
- schedule_work(&priv->process_scan);
-}
-
-static void orinoco_process_scan_results(struct work_struct *work)
-{
- struct orinoco_private *priv =
- container_of(work, struct orinoco_private, process_scan);
- struct orinoco_scan_data *sd, *temp;
- unsigned long flags;
- void *buf;
- int len;
- int type;
-
- spin_lock_irqsave(&priv->scan_lock, flags);
- list_for_each_entry_safe(sd, temp, &priv->scan_list, list) {
-
- buf = sd->buf;
- len = sd->len;
- type = sd->type;
-
- list_del(&sd->list);
- spin_unlock_irqrestore(&priv->scan_lock, flags);
- kfree(sd);
-
- if (len > 0) {
- if (type == HERMES_INQ_CHANNELINFO)
- orinoco_add_extscan_result(priv, buf, len);
- else
- orinoco_add_hostscan_results(priv, buf, len);
-
- kfree(buf);
- } else {
- /* Either abort or complete the scan */
- orinoco_scan_done(priv, (len < 0));
- }
-
- spin_lock_irqsave(&priv->scan_lock, flags);
- }
- spin_unlock_irqrestore(&priv->scan_lock, flags);
-}
-
-void __orinoco_ev_info(struct net_device *dev, struct hermes *hw)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- u16 infofid;
- struct {
- __le16 len;
- __le16 type;
- } __packed info;
- int len, type;
- int err;
-
- /* This is an answer to an INQUIRE command that we did earlier,
- * or an information "event" generated by the card
- * The controller return to us a pseudo frame containing
- * the information in question - Jean II */
- infofid = hermes_read_regn(hw, INFOFID);
-
- /* Read the info frame header - don't try too hard */
- err = hw->ops->bap_pread(hw, IRQ_BAP, &info, sizeof(info),
- infofid, 0);
- if (err) {
- printk(KERN_ERR "%s: error %d reading info frame. "
- "Frame dropped.\n", dev->name, err);
- return;
- }
-
- len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
- type = le16_to_cpu(info.type);
-
- switch (type) {
- case HERMES_INQ_TALLIES: {
- struct hermes_tallies_frame tallies;
- struct iw_statistics *wstats = &priv->wstats;
-
- if (len > sizeof(tallies)) {
- printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
- dev->name, len);
- len = sizeof(tallies);
- }
-
- err = hw->ops->bap_pread(hw, IRQ_BAP, &tallies, len,
- infofid, sizeof(info));
- if (err)
- break;
-
- /* Increment our various counters */
- /* wstats->discard.nwid - no wrong BSSID stuff */
- wstats->discard.code +=
- le16_to_cpu(tallies.RxWEPUndecryptable);
- if (len == sizeof(tallies))
- wstats->discard.code +=
- le16_to_cpu(tallies.RxDiscards_WEPICVError) +
- le16_to_cpu(tallies.RxDiscards_WEPExcluded);
- wstats->discard.misc +=
- le16_to_cpu(tallies.TxDiscardsWrongSA);
- wstats->discard.fragment +=
- le16_to_cpu(tallies.RxMsgInBadMsgFragments);
- wstats->discard.retries +=
- le16_to_cpu(tallies.TxRetryLimitExceeded);
- /* wstats->miss.beacon - no match */
- }
- break;
- case HERMES_INQ_LINKSTATUS: {
- struct hermes_linkstatus linkstatus;
- u16 newstatus;
- int connected;
-
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
- break;
-
- if (len != sizeof(linkstatus)) {
- printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
- dev->name, len);
- break;
- }
-
- err = hw->ops->bap_pread(hw, IRQ_BAP, &linkstatus, len,
- infofid, sizeof(info));
- if (err)
- break;
- newstatus = le16_to_cpu(linkstatus.linkstatus);
-
- /* Symbol firmware uses "out of range" to signal that
- * the hostscan frame can be requested. */
- if (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE &&
- priv->firmware_type == FIRMWARE_TYPE_SYMBOL &&
- priv->has_hostscan && priv->scan_request) {
- hermes_inquire(hw, HERMES_INQ_HOSTSCAN_SYMBOL);
- break;
- }
-
- connected = (newstatus == HERMES_LINKSTATUS_CONNECTED)
- || (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
- || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE);
-
- if (connected)
- netif_carrier_on(dev);
- else if (!ignore_disconnect)
- netif_carrier_off(dev);
-
- if (newstatus != priv->last_linkstatus) {
- priv->last_linkstatus = newstatus;
- print_linkstatus(dev, newstatus);
- /* The info frame contains only one word which is the
- * status (see hermes.h). The status is pretty boring
- * in itself, that's why we export the new BSSID...
- * Jean II */
- schedule_work(&priv->wevent_work);
- }
- }
- break;
- case HERMES_INQ_SCAN:
- if (!priv->scan_request && priv->bssid_fixed &&
- priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
- schedule_work(&priv->join_work);
- break;
- }
- fallthrough;
- case HERMES_INQ_HOSTSCAN:
- case HERMES_INQ_HOSTSCAN_SYMBOL: {
- /* Result of a scanning. Contains information about
- * cells in the vicinity - Jean II */
- unsigned char *buf;
-
- /* Sanity check */
- if (len > 4096) {
- printk(KERN_WARNING "%s: Scan results too large (%d bytes)\n",
- dev->name, len);
- qabort_scan(priv);
- break;
- }
-
- /* Allocate buffer for results */
- buf = kmalloc(len, GFP_ATOMIC);
- if (buf == NULL) {
- /* No memory, so can't printk()... */
- qabort_scan(priv);
- break;
- }
-
- /* Read scan data */
- err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) buf, len,
- infofid, sizeof(info));
- if (err) {
- kfree(buf);
- qabort_scan(priv);
- break;
- }
-
-#ifdef ORINOCO_DEBUG
- {
- int i;
- printk(KERN_DEBUG "Scan result [%02X", buf[0]);
- for (i = 1; i < (len * 2); i++)
- printk(":%02X", buf[i]);
- printk("]\n");
- }
-#endif /* ORINOCO_DEBUG */
-
- qbuf_scan(priv, buf, len, type);
- }
- break;
- case HERMES_INQ_CHANNELINFO:
- {
- struct agere_ext_scan_info *bss;
-
- if (!priv->scan_request) {
- printk(KERN_DEBUG "%s: Got chaninfo without scan, "
- "len=%d\n", dev->name, len);
- break;
- }
-
- /* An empty result indicates that the scan is complete */
- if (len == 0) {
- qbuf_scan(priv, NULL, len, type);
- break;
- }
-
- /* Sanity check */
- else if (len < (offsetof(struct agere_ext_scan_info,
- data) + 2)) {
- /* Drop this result now so we don't have to
- * keep checking later */
- printk(KERN_WARNING
- "%s: Ext scan results too short (%d bytes)\n",
- dev->name, len);
- break;
- }
-
- bss = kmalloc(len, GFP_ATOMIC);
- if (bss == NULL)
- break;
-
- /* Read scan data */
- err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) bss, len,
- infofid, sizeof(info));
- if (err)
- kfree(bss);
- else
- qbuf_scan(priv, bss, len, type);
-
- break;
- }
- case HERMES_INQ_SEC_STAT_AGERE:
- /* Security status (Agere specific) */
- /* Ignore this frame for now */
- if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
- break;
- fallthrough;
- default:
- printk(KERN_DEBUG "%s: Unknown information frame received: "
- "type 0x%04x, length %d\n", dev->name, type, len);
- /* We don't actually do anything about it */
- break;
- }
-}
-EXPORT_SYMBOL(__orinoco_ev_info);
-
-static void __orinoco_ev_infdrop(struct net_device *dev, struct hermes *hw)
-{
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: Information frame lost.\n", dev->name);
-}
-
-/********************************************************************/
-/* Internal hardware control routines */
-/********************************************************************/
-
-static int __orinoco_up(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- int err;
-
- netif_carrier_off(dev); /* just to make sure */
-
- err = __orinoco_commit(priv);
- if (err) {
- printk(KERN_ERR "%s: Error %d configuring card\n",
- dev->name, err);
- return err;
- }
-
- /* Fire things up again */
- hermes_set_irqmask(hw, ORINOCO_INTEN);
- err = hermes_enable_port(hw, 0);
- if (err) {
- printk(KERN_ERR "%s: Error %d enabling MAC port\n",
- dev->name, err);
- return err;
- }
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int __orinoco_down(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- int err;
-
- netif_stop_queue(dev);
-
- if (!priv->hw_unavailable) {
- if (!priv->broken_disableport) {
- err = hermes_disable_port(hw, 0);
- if (err) {
- /* Some firmwares (e.g. Intersil 1.3.x) seem
- * to have problems disabling the port, oh
- * well, too bad. */
- printk(KERN_WARNING "%s: Error %d disabling MAC port\n",
- dev->name, err);
- priv->broken_disableport = 1;
- }
- }
- hermes_set_irqmask(hw, 0);
- hermes_write_regn(hw, EVACK, 0xffff);
- }
-
- orinoco_scan_done(priv, true);
-
- /* firmware will have to reassociate */
- netif_carrier_off(dev);
- priv->last_linkstatus = 0xffff;
-
- return 0;
-}
-
-static int orinoco_reinit_firmware(struct orinoco_private *priv)
-{
- struct hermes *hw = &priv->hw;
- int err;
-
- err = hw->ops->init(hw);
- if (priv->do_fw_download && !err) {
- err = orinoco_download(priv);
- if (err)
- priv->do_fw_download = 0;
- }
- if (!err)
- err = orinoco_hw_allocate_fid(priv);
-
- return err;
-}
-
-static int
-__orinoco_set_multicast_list(struct net_device *dev)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int err = 0;
- int promisc, mc_count;
-
- /* The Hermes doesn't seem to have an allmulti mode, so we go
- * into promiscuous mode and let the upper levels deal. */
- if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(dev) > MAX_MULTICAST(priv))) {
- promisc = 1;
- mc_count = 0;
- } else {
- promisc = 0;
- mc_count = netdev_mc_count(dev);
- }
-
- err = __orinoco_hw_set_multicast_list(priv, dev, mc_count, promisc);
-
- return err;
-}
-
-/* This must be called from user context, without locks held - use
- * schedule_work() */
-void orinoco_reset(struct work_struct *work)
-{
- struct orinoco_private *priv =
- container_of(work, struct orinoco_private, reset_work);
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- int err;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- /* When the hardware becomes available again, whatever
- * detects that is responsible for re-initializing
- * it. So no need for anything further */
- return;
-
- netif_stop_queue(dev);
-
- /* Shut off interrupts. Depending on what state the hardware
- * is in, this might not work, but we'll try anyway */
- hermes_set_irqmask(hw, 0);
- hermes_write_regn(hw, EVACK, 0xffff);
-
- priv->hw_unavailable++;
- priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */
- netif_carrier_off(dev);
-
- orinoco_unlock(priv, &flags);
-
- /* Scanning support: Notify scan cancellation */
- orinoco_scan_done(priv, true);
-
- if (priv->hard_reset) {
- err = (*priv->hard_reset)(priv);
- if (err) {
- printk(KERN_ERR "%s: orinoco_reset: Error %d "
- "performing hard reset\n", dev->name, err);
- goto disable;
- }
- }
-
- err = orinoco_reinit_firmware(priv);
- if (err) {
- printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
- dev->name, err);
- goto disable;
- }
-
- /* This has to be called from user context */
- orinoco_lock_irq(priv);
-
- priv->hw_unavailable--;
-
- /* priv->open or priv->hw_unavailable might have changed while
- * we dropped the lock */
- if (priv->open && (!priv->hw_unavailable)) {
- err = __orinoco_up(priv);
- if (err) {
- printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
- dev->name, err);
- } else
- netif_trans_update(dev);
- }
-
- orinoco_unlock_irq(priv);
-
- return;
- disable:
- hermes_set_irqmask(hw, 0);
- netif_device_detach(dev);
- printk(KERN_ERR "%s: Device has been disabled!\n", dev->name);
-}
-
-static int __orinoco_commit(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- int err = 0;
-
- /* If we've called commit, we are reconfiguring or bringing the
- * interface up. Maintaining countermeasures across this would
- * be confusing, so note that we've disabled them. The port will
- * be enabled later in orinoco_commit or __orinoco_up. */
- priv->tkip_cm_active = 0;
-
- err = orinoco_hw_program_rids(priv);
-
- /* FIXME: what about netif_tx_lock */
- (void) __orinoco_set_multicast_list(dev);
-
- return err;
-}
-
-/* Ensures configuration changes are applied. May result in a reset.
- * The caller should hold priv->lock
- */
-int orinoco_commit(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- int err;
-
- if (priv->broken_disableport) {
- schedule_work(&priv->reset_work);
- return 0;
- }
-
- err = hermes_disable_port(hw, 0);
- if (err) {
- printk(KERN_WARNING "%s: Unable to disable port "
- "while reconfiguring card\n", dev->name);
- priv->broken_disableport = 1;
- goto out;
- }
-
- err = __orinoco_commit(priv);
- if (err) {
- printk(KERN_WARNING "%s: Unable to reconfigure card\n",
- dev->name);
- goto out;
- }
-
- err = hermes_enable_port(hw, 0);
- if (err) {
- printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n",
- dev->name);
- goto out;
- }
-
- out:
- if (err) {
- printk(KERN_WARNING "%s: Resetting instead...\n", dev->name);
- schedule_work(&priv->reset_work);
- err = 0;
- }
- return err;
-}
-
-/********************************************************************/
-/* Interrupt handler */
-/********************************************************************/
-
-static void __orinoco_ev_tick(struct net_device *dev, struct hermes *hw)
-{
- printk(KERN_DEBUG "%s: TICK\n", dev->name);
-}
-
-static void __orinoco_ev_wterr(struct net_device *dev, struct hermes *hw)
-{
- /* This seems to happen a fair bit under load, but ignoring it
- seems to work fine...*/
- printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
- dev->name);
-}
-
-irqreturn_t orinoco_interrupt(int irq, void *dev_id)
-{
- struct orinoco_private *priv = dev_id;
- struct net_device *dev = priv->ndev;
- struct hermes *hw = &priv->hw;
- int count = MAX_IRQLOOPS_PER_IRQ;
- u16 evstat, events;
- /* These are used to detect a runaway interrupt situation.
- *
- * If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy,
- * we panic and shut down the hardware
- */
- /* jiffies value the last time we were called */
- static int last_irq_jiffy; /* = 0 */
- static int loops_this_jiffy; /* = 0 */
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0) {
- /* If hw is unavailable - we don't know if the irq was
- * for us or not */
- return IRQ_HANDLED;
- }
-
- evstat = hermes_read_regn(hw, EVSTAT);
- events = evstat & hw->inten;
- if (!events) {
- orinoco_unlock(priv, &flags);
- return IRQ_NONE;
- }
-
- if (jiffies != last_irq_jiffy)
- loops_this_jiffy = 0;
- last_irq_jiffy = jiffies;
-
- while (events && count--) {
- if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) {
- printk(KERN_WARNING "%s: IRQ handler is looping too "
- "much! Resetting.\n", dev->name);
- /* Disable interrupts for now */
- hermes_set_irqmask(hw, 0);
- schedule_work(&priv->reset_work);
- break;
- }
-
- /* Check the card hasn't been removed */
- if (!hermes_present(hw)) {
- DEBUG(0, "orinoco_interrupt(): card removed\n");
- break;
- }
-
- if (events & HERMES_EV_TICK)
- __orinoco_ev_tick(dev, hw);
- if (events & HERMES_EV_WTERR)
- __orinoco_ev_wterr(dev, hw);
- if (events & HERMES_EV_INFDROP)
- __orinoco_ev_infdrop(dev, hw);
- if (events & HERMES_EV_INFO)
- __orinoco_ev_info(dev, hw);
- if (events & HERMES_EV_RX)
- __orinoco_ev_rx(dev, hw);
- if (events & HERMES_EV_TXEXC)
- __orinoco_ev_txexc(dev, hw);
- if (events & HERMES_EV_TX)
- __orinoco_ev_tx(dev, hw);
- if (events & HERMES_EV_ALLOC)
- __orinoco_ev_alloc(dev, hw);
-
- hermes_write_regn(hw, EVACK, evstat);
-
- evstat = hermes_read_regn(hw, EVSTAT);
- events = evstat & hw->inten;
- }
-
- orinoco_unlock(priv, &flags);
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(orinoco_interrupt);
-
-/********************************************************************/
-/* Power management */
-/********************************************************************/
-#if defined(CONFIG_PM_SLEEP) && !defined(CONFIG_HERMES_CACHE_FW_ON_INIT)
-static int orinoco_pm_notifier(struct notifier_block *notifier,
- unsigned long pm_event,
- void *unused)
-{
- struct orinoco_private *priv = container_of(notifier,
- struct orinoco_private,
- pm_notifier);
-
- /* All we need to do is cache the firmware before suspend, and
- * release it when we come out.
- *
- * Only need to do this if we're downloading firmware. */
- if (!priv->do_fw_download)
- return NOTIFY_DONE;
-
- switch (pm_event) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- orinoco_cache_fw(priv, 0);
- break;
-
- case PM_POST_RESTORE:
- /* Restore from hibernation failed. We need to clean
- * up in exactly the same way, so fall through. */
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- orinoco_uncache_fw(priv);
- break;
-
- case PM_RESTORE_PREPARE:
- default:
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static void orinoco_register_pm_notifier(struct orinoco_private *priv)
-{
- priv->pm_notifier.notifier_call = orinoco_pm_notifier;
- register_pm_notifier(&priv->pm_notifier);
-}
-
-static void orinoco_unregister_pm_notifier(struct orinoco_private *priv)
-{
- unregister_pm_notifier(&priv->pm_notifier);
-}
-#else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */
-#define orinoco_register_pm_notifier(priv) do { } while (0)
-#define orinoco_unregister_pm_notifier(priv) do { } while (0)
-#endif
-
-/********************************************************************/
-/* Initialization */
-/********************************************************************/
-
-int orinoco_init(struct orinoco_private *priv)
-{
- struct device *dev = priv->dev;
- struct wiphy *wiphy = priv_to_wiphy(priv);
- struct hermes *hw = &priv->hw;
- int err = 0;
-
- /* No need to lock, the hw_unavailable flag is already set in
- * alloc_orinocodev() */
- priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
-
- /* Initialize the firmware */
- err = hw->ops->init(hw);
- if (err != 0) {
- dev_err(dev, "Failed to initialize firmware (err = %d)\n",
- err);
- goto out;
- }
-
- err = determine_fw_capabilities(priv, wiphy->fw_version,
- sizeof(wiphy->fw_version),
- &wiphy->hw_version);
- if (err != 0) {
- dev_err(dev, "Incompatible firmware, aborting\n");
- goto out;
- }
-
- if (priv->do_fw_download) {
-#ifdef CONFIG_HERMES_CACHE_FW_ON_INIT
- orinoco_cache_fw(priv, 0);
-#endif
-
- err = orinoco_download(priv);
- if (err)
- priv->do_fw_download = 0;
-
- /* Check firmware version again */
- err = determine_fw_capabilities(priv, wiphy->fw_version,
- sizeof(wiphy->fw_version),
- &wiphy->hw_version);
- if (err != 0) {
- dev_err(dev, "Incompatible firmware, aborting\n");
- goto out;
- }
- }
-
- if (priv->has_port3)
- dev_info(dev, "Ad-hoc demo mode supported\n");
- if (priv->has_ibss)
- dev_info(dev, "IEEE standard IBSS ad-hoc mode supported\n");
- if (priv->has_wep)
- dev_info(dev, "WEP supported, %s-bit key\n",
- priv->has_big_wep ? "104" : "40");
- if (priv->has_wpa) {
- dev_info(dev, "WPA-PSK supported\n");
- if (orinoco_mic_init(priv)) {
- dev_err(dev, "Failed to setup MIC crypto algorithm. "
- "Disabling WPA support\n");
- priv->has_wpa = 0;
- }
- }
-
- err = orinoco_hw_read_card_settings(priv, wiphy->perm_addr);
- if (err)
- goto out;
-
- err = orinoco_hw_allocate_fid(priv);
- if (err) {
- dev_err(dev, "Failed to allocate NIC buffer!\n");
- goto out;
- }
-
- /* Set up the default configuration */
- priv->iw_mode = NL80211_IFTYPE_STATION;
- /* By default use IEEE/IBSS ad-hoc mode if we have it */
- priv->prefer_port3 = priv->has_port3 && (!priv->has_ibss);
- set_port_type(priv);
- priv->channel = 0; /* use firmware default */
-
- priv->promiscuous = 0;
- priv->encode_alg = ORINOCO_ALG_NONE;
- priv->tx_key = 0;
- priv->wpa_enabled = 0;
- priv->tkip_cm_active = 0;
- priv->key_mgmt = 0;
- priv->wpa_ie_len = 0;
- priv->wpa_ie = NULL;
-
- if (orinoco_wiphy_register(wiphy)) {
- err = -ENODEV;
- goto out;
- }
-
- /* Make the hardware available, as long as it hasn't been
- * removed elsewhere (e.g. by PCMCIA hot unplug) */
- orinoco_lock_irq(priv);
- priv->hw_unavailable--;
- orinoco_unlock_irq(priv);
-
- dev_dbg(dev, "Ready\n");
-
- out:
- return err;
-}
-EXPORT_SYMBOL(orinoco_init);
-
-static const struct net_device_ops orinoco_netdev_ops = {
- .ndo_open = orinoco_open,
- .ndo_stop = orinoco_stop,
- .ndo_start_xmit = orinoco_xmit,
- .ndo_set_rx_mode = orinoco_set_multicast_list,
- .ndo_change_mtu = orinoco_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = orinoco_tx_timeout,
-};
-
-/* Allocate private data.
- *
- * This driver has a number of structures associated with it
- * netdev - Net device structure for each network interface
- * wiphy - structure associated with wireless phy
- * wireless_dev (wdev) - structure for each wireless interface
- * hw - structure for hermes chip info
- * card - card specific structure for use by the card driver
- * (airport, orinoco_cs)
- * priv - orinoco private data
- * device - generic linux device structure
- *
- * +---------+ +---------+
- * | wiphy | | netdev |
- * | +-------+ | +-------+
- * | | priv | | | wdev |
- * | | +-----+ +-+-------+
- * | | | hw |
- * | +-+-----+
- * | | card |
- * +-+-------+
- *
- * priv has a link to netdev and device
- * wdev has a link to wiphy
- */
-struct orinoco_private
-*alloc_orinocodev(int sizeof_card,
- struct device *device,
- int (*hard_reset)(struct orinoco_private *),
- int (*stop_fw)(struct orinoco_private *, int))
-{
- struct orinoco_private *priv;
- struct wiphy *wiphy;
-
- /* allocate wiphy
- * NOTE: We only support a single virtual interface
- * but this may change when monitor mode is added
- */
- wiphy = wiphy_new(&orinoco_cfg_ops,
- sizeof(struct orinoco_private) + sizeof_card);
- if (!wiphy)
- return NULL;
-
- priv = wiphy_priv(wiphy);
- priv->dev = device;
-
- if (sizeof_card)
- priv->card = (void *)((unsigned long)priv
- + sizeof(struct orinoco_private));
- else
- priv->card = NULL;
-
- orinoco_wiphy_init(wiphy);
-
-#ifdef WIRELESS_SPY
- priv->wireless_data.spy_data = &priv->spy_data;
-#endif
-
- /* Set up default callbacks */
- priv->hard_reset = hard_reset;
- priv->stop_fw = stop_fw;
-
- spin_lock_init(&priv->lock);
- priv->open = 0;
- priv->hw_unavailable = 1; /* orinoco_init() must clear this
- * before anything else touches the
- * hardware */
- INIT_WORK(&priv->reset_work, orinoco_reset);
- INIT_WORK(&priv->join_work, orinoco_join_ap);
- INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
-
- INIT_LIST_HEAD(&priv->rx_list);
- tasklet_setup(&priv->rx_tasklet, orinoco_rx_isr_tasklet);
-
- spin_lock_init(&priv->scan_lock);
- INIT_LIST_HEAD(&priv->scan_list);
- INIT_WORK(&priv->process_scan, orinoco_process_scan_results);
-
- priv->last_linkstatus = 0xffff;
-
-#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
- priv->cached_pri_fw = NULL;
- priv->cached_fw = NULL;
-#endif
-
- /* Register PM notifiers */
- orinoco_register_pm_notifier(priv);
-
- return priv;
-}
-EXPORT_SYMBOL(alloc_orinocodev);
-
-/* We can only support a single interface. We provide a separate
- * function to set it up to distinguish between hardware
- * initialisation and interface setup.
- *
- * The base_addr and irq parameters are passed on to netdev for use
- * with SIOCGIFMAP.
- */
-int orinoco_if_add(struct orinoco_private *priv,
- unsigned long base_addr,
- unsigned int irq,
- const struct net_device_ops *ops)
-{
- struct wiphy *wiphy = priv_to_wiphy(priv);
- struct wireless_dev *wdev;
- struct net_device *dev;
- int ret;
-
- dev = alloc_etherdev(sizeof(struct wireless_dev));
-
- if (!dev)
- return -ENOMEM;
-
- /* Initialise wireless_dev */
- wdev = netdev_priv(dev);
- wdev->wiphy = wiphy;
- wdev->iftype = NL80211_IFTYPE_STATION;
-
- /* Setup / override net_device fields */
- dev->ieee80211_ptr = wdev;
- dev->watchdog_timeo = HZ; /* 1 second timeout */
- dev->wireless_handlers = &orinoco_handler_def;
-#ifdef WIRELESS_SPY
- dev->wireless_data = &priv->wireless_data;
-#endif
- /* Default to standard ops if not set */
- if (ops)
- dev->netdev_ops = ops;
- else
- dev->netdev_ops = &orinoco_netdev_ops;
-
- /* we use the default eth_mac_addr for setting the MAC addr */
-
- /* Reserve space in skb for the SNAP header */
- dev->needed_headroom = ENCAPS_OVERHEAD;
-
- netif_carrier_off(dev);
-
- eth_hw_addr_set(dev, wiphy->perm_addr);
-
- dev->base_addr = base_addr;
- dev->irq = irq;
-
- dev->min_mtu = ORINOCO_MIN_MTU;
- dev->max_mtu = ORINOCO_MAX_MTU;
-
- SET_NETDEV_DEV(dev, priv->dev);
- ret = register_netdev(dev);
- if (ret)
- goto fail;
-
- priv->ndev = dev;
-
- /* Report what we've done */
- dev_dbg(priv->dev, "Registered interface %s.\n", dev->name);
-
- return 0;
-
- fail:
- free_netdev(dev);
- return ret;
-}
-EXPORT_SYMBOL(orinoco_if_add);
-
-void orinoco_if_del(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
-
- unregister_netdev(dev);
- free_netdev(dev);
-}
-EXPORT_SYMBOL(orinoco_if_del);
-
-void free_orinocodev(struct orinoco_private *priv)
-{
- struct wiphy *wiphy = priv_to_wiphy(priv);
- struct orinoco_rx_data *rx_data, *temp;
- struct orinoco_scan_data *sd, *sdtemp;
-
- /* If the tasklet is scheduled when we call tasklet_kill it
- * will run one final time. However the tasklet will only
- * drain priv->rx_list if the hw is still available. */
- tasklet_kill(&priv->rx_tasklet);
-
- /* Explicitly drain priv->rx_list */
- list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
- list_del(&rx_data->list);
-
- dev_kfree_skb(rx_data->skb);
- kfree(rx_data->desc);
- kfree(rx_data);
- }
-
- cancel_work_sync(&priv->process_scan);
- /* Explicitly drain priv->scan_list */
- list_for_each_entry_safe(sd, sdtemp, &priv->scan_list, list) {
- list_del(&sd->list);
-
- if (sd->len > 0)
- kfree(sd->buf);
- kfree(sd);
- }
-
- orinoco_unregister_pm_notifier(priv);
- orinoco_uncache_fw(priv);
-
- priv->wpa_ie_len = 0;
- kfree(priv->wpa_ie);
- orinoco_mic_free(priv);
- wiphy_free(wiphy);
-}
-EXPORT_SYMBOL(free_orinocodev);
-
-int orinoco_up(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- unsigned long flags;
- int err;
-
- priv->hw.ops->lock_irqsave(&priv->lock, &flags);
-
- err = orinoco_reinit_firmware(priv);
- if (err) {
- printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
- dev->name, err);
- goto exit;
- }
-
- netif_device_attach(dev);
- priv->hw_unavailable--;
-
- if (priv->open && !priv->hw_unavailable) {
- err = __orinoco_up(priv);
- if (err)
- printk(KERN_ERR "%s: Error %d restarting card\n",
- dev->name, err);
- }
-
-exit:
- priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
-
- return 0;
-}
-EXPORT_SYMBOL(orinoco_up);
-
-void orinoco_down(struct orinoco_private *priv)
-{
- struct net_device *dev = priv->ndev;
- unsigned long flags;
- int err;
-
- priv->hw.ops->lock_irqsave(&priv->lock, &flags);
- err = __orinoco_down(priv);
- if (err)
- printk(KERN_WARNING "%s: Error %d downing interface\n",
- dev->name, err);
-
- netif_device_detach(dev);
- priv->hw_unavailable++;
- priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
-}
-EXPORT_SYMBOL(orinoco_down);
-
-/********************************************************************/
-/* Module initialization */
-/********************************************************************/
-
-/* Can't be declared "const" or the whole __initdata section will
- * become const */
-static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
- " (David Gibson <hermes@gibson.dropbear.id.au>, "
- "Pavel Roskin <proski@gnu.org>, et al)";
-
-static int __init init_orinoco(void)
-{
- printk(KERN_DEBUG "%s\n", version);
- return 0;
-}
-
-static void __exit exit_orinoco(void)
-{
-}
-
-module_init(init_orinoco);
-module_exit(exit_orinoco);
diff --git a/drivers/net/wireless/intersil/orinoco/main.h b/drivers/net/wireless/intersil/orinoco/main.h
deleted file mode 100644
index 5a8fec261..000000000
--- a/drivers/net/wireless/intersil/orinoco/main.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Exports from main to helper modules
- *
- * See copyright notice in main.c
- */
-#ifndef _ORINOCO_MAIN_H_
-#define _ORINOCO_MAIN_H_
-
-#include <linux/ieee80211.h>
-#include "orinoco.h"
-
-/********************************************************************/
-/* Compile time configuration and compatibility stuff */
-/********************************************************************/
-
-/* We do this this way to avoid ifdefs in the actual code */
-#ifdef WIRELESS_SPY
-#define SPY_NUMBER(priv) (priv->spy_data.spy_number)
-#else
-#define SPY_NUMBER(priv) 0
-#endif /* WIRELESS_SPY */
-
-/********************************************************************/
-
-/* Export module parameter */
-extern int force_monitor;
-
-/* Forward declarations */
-struct net_device;
-struct work_struct;
-
-void set_port_type(struct orinoco_private *priv);
-int orinoco_commit(struct orinoco_private *priv);
-void orinoco_reset(struct work_struct *work);
-
-/* Information element helpers - find a home for these... */
-#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
-#define WPA_SELECTOR_LEN 4
-static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
-{
- u8 *p = data;
- while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
- if ((p[0] == WLAN_EID_VENDOR_SPECIFIC) &&
- (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
- return p;
- p += p[1] + 2;
- }
- return NULL;
-}
-
-#endif /* _ORINOCO_MAIN_H_ */
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
deleted file mode 100644
index a324bc4b7..000000000
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/* Orinoco MIC helpers
- *
- * See copyright notice in main.c
- */
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/if_ether.h>
-#include <linux/scatterlist.h>
-#include <crypto/hash.h>
-
-#include "orinoco.h"
-#include "mic.h"
-
-/********************************************************************/
-/* Michael MIC crypto setup */
-/********************************************************************/
-int orinoco_mic_init(struct orinoco_private *priv)
-{
- priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0);
- if (IS_ERR(priv->tx_tfm_mic)) {
- printk(KERN_DEBUG "%s: could not allocate "
- "crypto API michael_mic\n", __func__);
- priv->tx_tfm_mic = NULL;
- return -ENOMEM;
- }
-
- priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0);
- if (IS_ERR(priv->rx_tfm_mic)) {
- printk(KERN_DEBUG "%s: could not allocate "
- "crypto API michael_mic\n", __func__);
- priv->rx_tfm_mic = NULL;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void orinoco_mic_free(struct orinoco_private *priv)
-{
- if (priv->tx_tfm_mic)
- crypto_free_shash(priv->tx_tfm_mic);
- if (priv->rx_tfm_mic)
- crypto_free_shash(priv->rx_tfm_mic);
-}
-
-int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
- u8 *da, u8 *sa, u8 priority,
- u8 *data, size_t data_len, u8 *mic)
-{
- SHASH_DESC_ON_STACK(desc, tfm_michael);
- u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
- int err;
-
- if (tfm_michael == NULL) {
- printk(KERN_WARNING "%s: tfm_michael == NULL\n", __func__);
- return -1;
- }
-
- /* Copy header into buffer. We need the padding on the end zeroed */
- memcpy(&hdr[0], da, ETH_ALEN);
- memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
- hdr[ETH_ALEN * 2] = priority;
- hdr[ETH_ALEN * 2 + 1] = 0;
- hdr[ETH_ALEN * 2 + 2] = 0;
- hdr[ETH_ALEN * 2 + 3] = 0;
-
- desc->tfm = tfm_michael;
-
- err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
- if (err)
- return err;
-
- err = crypto_shash_init(desc);
- if (err)
- return err;
-
- err = crypto_shash_update(desc, hdr, sizeof(hdr));
- if (err)
- return err;
-
- err = crypto_shash_update(desc, data, data_len);
- if (err)
- return err;
-
- err = crypto_shash_final(desc, mic);
- shash_desc_zero(desc);
-
- return err;
-}
diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h
deleted file mode 100644
index e8724e889..000000000
--- a/drivers/net/wireless/intersil/orinoco/mic.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Orinoco MIC helpers
- *
- * See copyright notice in main.c
- */
-#ifndef _ORINOCO_MIC_H_
-#define _ORINOCO_MIC_H_
-
-#include <linux/types.h>
-#include <crypto/hash.h>
-
-#define MICHAEL_MIC_LEN 8
-
-/* Forward declarations */
-struct orinoco_private;
-struct crypto_ahash;
-
-int orinoco_mic_init(struct orinoco_private *priv);
-void orinoco_mic_free(struct orinoco_private *priv);
-int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
- u8 *da, u8 *sa, u8 priority,
- u8 *data, size_t data_len, u8 *mic);
-
-#endif /* ORINOCO_MIC_H */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
deleted file mode 100644
index cdd026af1..000000000
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/* orinoco.h
- *
- * Common definitions to all pieces of the various orinoco
- * drivers
- */
-
-#ifndef _ORINOCO_H
-#define _ORINOCO_H
-
-#define DRIVER_VERSION "0.15"
-
-#include <linux/interrupt.h>
-#include <linux/suspend.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <net/cfg80211.h>
-
-#include "hermes.h"
-
-/* To enable debug messages */
-/*#define ORINOCO_DEBUG 3*/
-
-#define WIRELESS_SPY /* enable iwspy support */
-
-#define MAX_SCAN_LEN 4096
-
-#define ORINOCO_SEQ_LEN 8
-#define ORINOCO_MAX_KEY_SIZE 14
-#define ORINOCO_MAX_KEYS 4
-
-struct orinoco_key {
- __le16 len; /* always stored as little-endian */
- char data[ORINOCO_MAX_KEY_SIZE];
-} __packed;
-
-#define TKIP_KEYLEN 16
-#define MIC_KEYLEN 8
-
-struct orinoco_tkip_key {
- u8 tkip[TKIP_KEYLEN];
- u8 tx_mic[MIC_KEYLEN];
- u8 rx_mic[MIC_KEYLEN];
-};
-
-enum orinoco_alg {
- ORINOCO_ALG_NONE,
- ORINOCO_ALG_WEP,
- ORINOCO_ALG_TKIP
-};
-
-enum fwtype {
- FIRMWARE_TYPE_AGERE,
- FIRMWARE_TYPE_INTERSIL,
- FIRMWARE_TYPE_SYMBOL
-};
-
-struct firmware;
-
-struct orinoco_private {
- void *card; /* Pointer to card dependent structure */
- struct device *dev;
- int (*hard_reset)(struct orinoco_private *);
- int (*stop_fw)(struct orinoco_private *, int);
-
- struct ieee80211_supported_band band;
- struct ieee80211_channel channels[14];
- u32 cipher_suites[3];
-
- /* Synchronisation stuff */
- spinlock_t lock;
- int hw_unavailable;
- struct work_struct reset_work;
-
- /* Interrupt tasklets */
- struct tasklet_struct rx_tasklet;
- struct list_head rx_list;
-
- /* driver state */
- int open;
- u16 last_linkstatus;
- struct work_struct join_work;
- struct work_struct wevent_work;
-
- /* Net device stuff */
- struct net_device *ndev;
- struct iw_statistics wstats;
-
- /* Hardware control variables */
- struct hermes hw;
- u16 txfid;
-
- /* Capabilities of the hardware/firmware */
- enum fwtype firmware_type;
- int ibss_port;
- int nicbuf_size;
- u16 channel_mask;
-
- /* Boolean capabilities */
- unsigned int has_ibss:1;
- unsigned int has_port3:1;
- unsigned int has_wep:1;
- unsigned int has_big_wep:1;
- unsigned int has_mwo:1;
- unsigned int has_pm:1;
- unsigned int has_preamble:1;
- unsigned int has_sensitivity:1;
- unsigned int has_hostscan:1;
- unsigned int has_alt_txcntl:1;
- unsigned int has_ext_scan:1;
- unsigned int has_wpa:1;
- unsigned int do_fw_download:1;
- unsigned int broken_disableport:1;
- unsigned int broken_monitor:1;
- unsigned int prefer_port3:1;
-
- /* Configuration paramaters */
- enum nl80211_iftype iw_mode;
- enum orinoco_alg encode_alg;
- u16 wep_restrict, tx_key;
- struct key_params keys[ORINOCO_MAX_KEYS];
-
- int bitratemode;
- char nick[IW_ESSID_MAX_SIZE + 1];
- char desired_essid[IW_ESSID_MAX_SIZE + 1];
- char desired_bssid[ETH_ALEN];
- int bssid_fixed;
- u16 frag_thresh, mwo_robust;
- u16 channel;
- u16 ap_density, rts_thresh;
- u16 pm_on, pm_mcast, pm_period, pm_timeout;
- u16 preamble;
- u16 short_retry_limit, long_retry_limit;
- u16 retry_lifetime;
-#ifdef WIRELESS_SPY
- struct iw_spy_data spy_data; /* iwspy support */
- struct iw_public_data wireless_data;
-#endif
-
- /* Configuration dependent variables */
- int port_type, createibss;
- int promiscuous, mc_count;
-
- /* Scanning support */
- struct cfg80211_scan_request *scan_request;
- struct work_struct process_scan;
- struct list_head scan_list;
- spinlock_t scan_lock; /* protects the scan list */
-
- /* WPA support */
- u8 *wpa_ie;
- int wpa_ie_len;
-
- struct crypto_shash *rx_tfm_mic;
- struct crypto_shash *tx_tfm_mic;
-
- unsigned int wpa_enabled:1;
- unsigned int tkip_cm_active:1;
- unsigned int key_mgmt:3;
-
-#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
- /* Cached in memory firmware to use during ->resume. */
- const struct firmware *cached_pri_fw;
- const struct firmware *cached_fw;
-#endif
-
- struct notifier_block pm_notifier;
-};
-
-#ifdef ORINOCO_DEBUG
-extern int orinoco_debug;
-#define DEBUG(n, args...) do { \
- if (orinoco_debug > (n)) \
- printk(KERN_DEBUG args); \
-} while (0)
-#else
-#define DEBUG(n, args...) do { } while (0)
-#endif /* ORINOCO_DEBUG */
-
-/********************************************************************/
-/* Exported prototypes */
-/********************************************************************/
-
-struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
- int (*hard_reset)(struct orinoco_private *),
- int (*stop_fw)(struct orinoco_private *, int));
-void free_orinocodev(struct orinoco_private *priv);
-int orinoco_init(struct orinoco_private *priv);
-int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
- unsigned int irq, const struct net_device_ops *ops);
-void orinoco_if_del(struct orinoco_private *priv);
-int orinoco_up(struct orinoco_private *priv);
-void orinoco_down(struct orinoco_private *priv);
-irqreturn_t orinoco_interrupt(int irq, void *dev_id);
-
-void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
-void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
-
-int orinoco_process_xmit_skb(struct sk_buff *skb,
- struct net_device *dev,
- struct orinoco_private *priv,
- int *tx_control,
- u8 *mic);
-
-/* Common ndo functions exported for reuse by orinoco_usb */
-int orinoco_open(struct net_device *dev);
-int orinoco_stop(struct net_device *dev);
-void orinoco_set_multicast_list(struct net_device *dev);
-int orinoco_change_mtu(struct net_device *dev, int new_mtu);
-void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue);
-
-/********************************************************************/
-/* Locking and synchronization functions */
-/********************************************************************/
-
-static inline int orinoco_lock(struct orinoco_private *priv,
- unsigned long *flags)
-{
- priv->hw.ops->lock_irqsave(&priv->lock, flags);
- if (priv->hw_unavailable) {
- DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n",
- priv->ndev);
- priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
- return -EBUSY;
- }
- return 0;
-}
-
-static inline void orinoco_unlock(struct orinoco_private *priv,
- unsigned long *flags)
-{
- priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
-}
-
-static inline void orinoco_lock_irq(struct orinoco_private *priv)
-{
- priv->hw.ops->lock_irq(&priv->lock);
-}
-
-static inline void orinoco_unlock_irq(struct orinoco_private *priv)
-{
- priv->hw.ops->unlock_irq(&priv->lock);
-}
-
-/*** Navigate from net_device to orinoco_private ***/
-static inline struct orinoco_private *ndev_priv(struct net_device *dev)
-{
- struct wireless_dev *wdev = netdev_priv(dev);
- return wdev_priv(wdev);
-}
-#endif /* _ORINOCO_H */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c b/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
deleted file mode 100644
index 03bfd2482..000000000
--- a/drivers/net/wireless/intersil/orinoco/orinoco_cs.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/* orinoco_cs.c (formerly known as dldwd_cs.c)
- *
- * A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
- * as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
- * EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and others).
- * It should also be usable on various Prism II based cards such as the
- * Linksys, D-Link and Farallon Skyline. It should also work on Symbol
- * cards such as the 3Com AirConnect and Ericsson WLAN.
- *
- * Copyright notice & release notes in file main.c
- */
-
-#define DRIVER_NAME "orinoco_cs"
-#define PFX DRIVER_NAME ": "
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#include "orinoco.h"
-
-/********************************************************************/
-/* Module stuff */
-/********************************************************************/
-
-MODULE_AUTHOR("David Gibson <hermes@gibson.dropbear.id.au>");
-MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco,"
- " Prism II based and similar wireless cards");
-MODULE_LICENSE("Dual MPL/GPL");
-
-/* Module parameters */
-
-/* Some D-Link cards have buggy CIS. They do work at 5v properly, but
- * don't have any CIS entry for it. This workaround it... */
-static int ignore_cis_vcc; /* = 0 */
-module_param(ignore_cis_vcc, int, 0);
-MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket");
-
-/********************************************************************/
-/* Data structures */
-/********************************************************************/
-
-/* PCMCIA specific device information (goes in the card field of
- * struct orinoco_private */
-struct orinoco_pccard {
- struct pcmcia_device *p_dev;
-
- /* Used to handle hard reset */
- /* yuck, we need this hack to work around the insanity of the
- * PCMCIA layer */
- unsigned long hard_reset_in_progress;
-};
-
-
-/********************************************************************/
-/* Function prototypes */
-/********************************************************************/
-
-static int orinoco_cs_config(struct pcmcia_device *link);
-static void orinoco_cs_release(struct pcmcia_device *link);
-static void orinoco_cs_detach(struct pcmcia_device *p_dev);
-
-/********************************************************************/
-/* Device methods */
-/********************************************************************/
-
-static int
-orinoco_cs_hard_reset(struct orinoco_private *priv)
-{
- struct orinoco_pccard *card = priv->card;
- struct pcmcia_device *link = card->p_dev;
- int err;
-
- /* We need atomic ops here, because we're not holding the lock */
- set_bit(0, &card->hard_reset_in_progress);
-
- err = pcmcia_reset_card(link->socket);
- if (err)
- return err;
-
- msleep(100);
- clear_bit(0, &card->hard_reset_in_progress);
-
- return 0;
-}
-
-/********************************************************************/
-/* PCMCIA stuff */
-/********************************************************************/
-
-static int
-orinoco_cs_probe(struct pcmcia_device *link)
-{
- struct orinoco_private *priv;
- struct orinoco_pccard *card;
- int ret;
-
- priv = alloc_orinocodev(sizeof(*card), &link->dev,
- orinoco_cs_hard_reset, NULL);
- if (!priv)
- return -ENOMEM;
- card = priv->card;
-
- /* Link both structures together */
- card->p_dev = link;
- link->priv = priv;
-
- ret = orinoco_cs_config(link);
- if (ret)
- goto err_free_orinocodev;
-
- return 0;
-
-err_free_orinocodev:
- free_orinocodev(priv);
- return ret;
-}
-
-static void orinoco_cs_detach(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
-
- orinoco_if_del(priv);
-
- orinoco_cs_release(link);
-
- wiphy_unregister(priv_to_wiphy(priv));
- free_orinocodev(priv);
-} /* orinoco_cs_detach */
-
-static int orinoco_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-};
-
-static int
-orinoco_cs_config(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
- struct hermes *hw = &priv->hw;
- int ret;
- void __iomem *mem;
-
- link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC |
- CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
- if (ignore_cis_vcc)
- link->config_flags &= ~CONF_AUTO_CHECK_VCC;
- ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL);
- if (ret) {
- if (!ignore_cis_vcc)
- printk(KERN_ERR PFX "GetNextTuple(): No matching "
- "CIS configuration. Maybe you need the "
- "ignore_cis_vcc=1 parameter.\n");
- goto failed;
- }
-
- mem = ioport_map(link->resource[0]->start,
- resource_size(link->resource[0]));
- if (!mem)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
- hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
-
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- /* Initialise the main driver */
- if (orinoco_init(priv) != 0) {
- printk(KERN_ERR PFX "orinoco_init() failed\n");
- goto failed;
- }
-
- /* Register an interface with the stack */
- if (orinoco_if_add(priv, link->resource[0]->start,
- link->irq, NULL) != 0) {
- printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto failed;
- }
-
- return 0;
-
- failed:
- orinoco_cs_release(link);
- return -ENODEV;
-} /* orinoco_cs_config */
-
-static void
-orinoco_cs_release(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
- unsigned long flags;
-
- /* We're committed to taking the device away now, so mark the
- * hardware as unavailable */
- priv->hw.ops->lock_irqsave(&priv->lock, &flags);
- priv->hw_unavailable++;
- priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
-
- pcmcia_disable_device(link);
- if (priv->hw.iobase)
- ioport_unmap(priv->hw.iobase);
-} /* orinoco_cs_release */
-
-static int orinoco_cs_suspend(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
- struct orinoco_pccard *card = priv->card;
-
- /* This is probably racy, but I can't think of
- a better way, short of rewriting the PCMCIA
- layer to not suck :-( */
- if (!test_bit(0, &card->hard_reset_in_progress))
- orinoco_down(priv);
-
- return 0;
-}
-
-static int orinoco_cs_resume(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
- struct orinoco_pccard *card = priv->card;
- int err = 0;
-
- if (!test_bit(0, &card->hard_reset_in_progress))
- err = orinoco_up(priv);
-
- return err;
-}
-
-
-/********************************************************************/
-/* Module initialization */
-/********************************************************************/
-
-static const struct pcmcia_device_id orinoco_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
- PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
- PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
- PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
- PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
- PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
- PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
- PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
- PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
- PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
- PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
- PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
- PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
- PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
- PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
- PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
- PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
- PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
- PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
- PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
- PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
- PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
- PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
- PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
- PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
- PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
- PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
- PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
- PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
- PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
- PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.01", 0xd27deb1a), /* Lucent Orinoco */
-#ifdef CONFIG_HERMES_PRISM
- /* Only entries that certainly identify Prism chipset */
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
- PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
- PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
- PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
- PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
- PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
- PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
- PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
- PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
- PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
- PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
- PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
- PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
- PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
- PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
- PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
- PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
- PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
- PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
- PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
- PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
- PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
- PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
- PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
- PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
- PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
- PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
- PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
- PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
- PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
- PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
- PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
- PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
- PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
- PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
- PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
- PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
- PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
- PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
- PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
- PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
- PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
- PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
- PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
-
- /* This may be Agere or Intersil Firmware */
- PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
-#endif
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
-
-static struct pcmcia_driver orinoco_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .probe = orinoco_cs_probe,
- .remove = orinoco_cs_detach,
- .id_table = orinoco_cs_ids,
- .suspend = orinoco_cs_suspend,
- .resume = orinoco_cs_resume,
-};
-module_pcmcia_driver(orinoco_driver);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c
deleted file mode 100644
index 18bd0d987..000000000
--- a/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/* orinoco_nortel.c
- *
- * Driver for Prism II devices which would usually be driven by orinoco_cs,
- * but are connected to the PCI bus by a PCI-to-PCMCIA adapter used in
- * Nortel emobility, Symbol LA-4113 and Symbol LA-4123.
- *
- * Copyright (C) 2002 Tobias Hoffmann
- * (C) 2003 Christoph Jungegger <disdos@traum404.de>
- *
- * Some of this code is borrowed from orinoco_plx.c
- * Copyright (C) 2001 Daniel Barlow
- * Some of this code is borrowed from orinoco_pci.c
- * Copyright (C) 2001 Jean Tourrilhes
- * Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing
- * has been copied from it. linux-wlan-ng-0.1.10 is originally :
- * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL.
- */
-
-#define DRIVER_NAME "orinoco_nortel"
-#define PFX DRIVER_NAME ": "
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <pcmcia/cisreg.h>
-
-#include "orinoco.h"
-#include "orinoco_pci.h"
-
-#define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */
-#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
-
-
-/*
- * Do a soft reset of the card using the Configuration Option Register
- * We need this to get going...
- * This is the part of the code that is strongly inspired from wlan-ng
- *
- * Note bis : Don't try to access HERMES_CMD during the reset phase.
- * It just won't work !
- */
-static int orinoco_nortel_cor_reset(struct orinoco_private *priv)
-{
- struct orinoco_pci_card *card = priv->card;
-
- /* Assert the reset until the card notices */
- iowrite16(8, card->bridge_io + 2);
- ioread16(card->attr_io + COR_OFFSET);
- iowrite16(0x80, card->attr_io + COR_OFFSET);
- mdelay(1);
-
- /* Give time for the card to recover from this hard effort */
- iowrite16(0, card->attr_io + COR_OFFSET);
- iowrite16(0, card->attr_io + COR_OFFSET);
- mdelay(1);
-
- /* Set COR as usual */
- iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
- iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
- mdelay(1);
-
- iowrite16(0x228, card->bridge_io + 2);
-
- return 0;
-}
-
-static int orinoco_nortel_hw_init(struct orinoco_pci_card *card)
-{
- int i;
- u32 reg;
-
- /* Setup bridge */
- if (ioread16(card->bridge_io) & 1) {
- printk(KERN_ERR PFX "brg1 answer1 wrong\n");
- return -EBUSY;
- }
- iowrite16(0x118, card->bridge_io + 2);
- iowrite16(0x108, card->bridge_io + 2);
- mdelay(30);
- iowrite16(0x8, card->bridge_io + 2);
- for (i = 0; i < 30; i++) {
- mdelay(30);
- if (ioread16(card->bridge_io) & 0x10)
- break;
- }
- if (i == 30) {
- printk(KERN_ERR PFX "brg1 timed out\n");
- return -EBUSY;
- }
- if (ioread16(card->attr_io + COR_OFFSET) & 1) {
- printk(KERN_ERR PFX "brg2 answer1 wrong\n");
- return -EBUSY;
- }
- if (ioread16(card->attr_io + COR_OFFSET + 2) & 1) {
- printk(KERN_ERR PFX "brg2 answer2 wrong\n");
- return -EBUSY;
- }
- if (ioread16(card->attr_io + COR_OFFSET + 4) & 1) {
- printk(KERN_ERR PFX "brg2 answer3 wrong\n");
- return -EBUSY;
- }
-
- /* Set the PCMCIA COR register */
- iowrite16(COR_VALUE, card->attr_io + COR_OFFSET);
- mdelay(1);
- reg = ioread16(card->attr_io + COR_OFFSET);
- if (reg != COR_VALUE) {
- printk(KERN_ERR PFX "Error setting COR value (reg=%x)\n",
- reg);
- return -EBUSY;
- }
-
- /* Set LEDs */
- iowrite16(1, card->bridge_io + 10);
- return 0;
-}
-
-static int orinoco_nortel_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int err;
- struct orinoco_private *priv;
- struct orinoco_pci_card *card;
- void __iomem *hermes_io, *bridge_io, *attr_io;
-
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device\n");
- return err;
- }
-
- err = pci_request_regions(pdev, DRIVER_NAME);
- if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
- goto fail_resources;
- }
-
- bridge_io = pci_iomap(pdev, 0, 0);
- if (!bridge_io) {
- printk(KERN_ERR PFX "Cannot map bridge registers\n");
- err = -EIO;
- goto fail_map_bridge;
- }
-
- attr_io = pci_iomap(pdev, 1, 0);
- if (!attr_io) {
- printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n");
- err = -EIO;
- goto fail_map_attr;
- }
-
- hermes_io = pci_iomap(pdev, 2, 0);
- if (!hermes_io) {
- printk(KERN_ERR PFX "Cannot map chipset registers\n");
- err = -EIO;
- goto fail_map_hermes;
- }
-
- /* Allocate network device */
- priv = alloc_orinocodev(sizeof(*card), &pdev->dev,
- orinoco_nortel_cor_reset, NULL);
- if (!priv) {
- printk(KERN_ERR PFX "Cannot allocate network device\n");
- err = -ENOMEM;
- goto fail_alloc;
- }
-
- card = priv->card;
- card->bridge_io = bridge_io;
- card->attr_io = attr_io;
-
- hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
-
- err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
- DRIVER_NAME, priv);
- if (err) {
- printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
- err = -EBUSY;
- goto fail_irq;
- }
-
- err = orinoco_nortel_hw_init(card);
- if (err) {
- printk(KERN_ERR PFX "Hardware initialization failed\n");
- goto fail;
- }
-
- err = orinoco_nortel_cor_reset(priv);
- if (err) {
- printk(KERN_ERR PFX "Initial reset failed\n");
- goto fail;
- }
-
- err = orinoco_init(priv);
- if (err) {
- printk(KERN_ERR PFX "orinoco_init() failed\n");
- goto fail;
- }
-
- err = orinoco_if_add(priv, 0, 0, NULL);
- if (err) {
- printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail_wiphy;
- }
-
- pci_set_drvdata(pdev, priv);
-
- return 0;
-
- fail_wiphy:
- wiphy_unregister(priv_to_wiphy(priv));
- fail:
- free_irq(pdev->irq, priv);
-
- fail_irq:
- free_orinocodev(priv);
-
- fail_alloc:
- pci_iounmap(pdev, hermes_io);
-
- fail_map_hermes:
- pci_iounmap(pdev, attr_io);
-
- fail_map_attr:
- pci_iounmap(pdev, bridge_io);
-
- fail_map_bridge:
- pci_release_regions(pdev);
-
- fail_resources:
- pci_disable_device(pdev);
-
- return err;
-}
-
-static void orinoco_nortel_remove_one(struct pci_dev *pdev)
-{
- struct orinoco_private *priv = pci_get_drvdata(pdev);
- struct orinoco_pci_card *card = priv->card;
-
- /* Clear LEDs */
- iowrite16(0, card->bridge_io + 10);
-
- orinoco_if_del(priv);
- wiphy_unregister(priv_to_wiphy(priv));
- free_irq(pdev->irq, priv);
- free_orinocodev(priv);
- pci_iounmap(pdev, priv->hw.iobase);
- pci_iounmap(pdev, card->attr_io);
- pci_iounmap(pdev, card->bridge_io);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-static const struct pci_device_id orinoco_nortel_id_table[] = {
- /* Nortel emobility PCI */
- {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
- /* Symbol LA-4123 PCI */
- {0x1562, 0x0001, PCI_ANY_ID, PCI_ANY_ID,},
- {0,},
-};
-
-MODULE_DEVICE_TABLE(pci, orinoco_nortel_id_table);
-
-static struct pci_driver orinoco_nortel_driver = {
- .name = DRIVER_NAME,
- .id_table = orinoco_nortel_id_table,
- .probe = orinoco_nortel_init_one,
- .remove = orinoco_nortel_remove_one,
- .driver.pm = &orinoco_pci_pm_ops,
-};
-
-static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
- " (Tobias Hoffmann & Christoph Jungegger <disdos@traum404.de>)";
-MODULE_AUTHOR("Christoph Jungegger <disdos@traum404.de>");
-MODULE_DESCRIPTION("Driver for wireless LAN cards using the Nortel PCI bridge");
-MODULE_LICENSE("Dual MPL/GPL");
-
-static int __init orinoco_nortel_init(void)
-{
- printk(KERN_DEBUG "%s\n", version);
- return pci_register_driver(&orinoco_nortel_driver);
-}
-
-static void __exit orinoco_nortel_exit(void)
-{
- pci_unregister_driver(&orinoco_nortel_driver);
-}
-
-module_init(orinoco_nortel_init);
-module_exit(orinoco_nortel_exit);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_pci.c b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c
deleted file mode 100644
index 7e3a6dd60..000000000
--- a/drivers/net/wireless/intersil/orinoco/orinoco_pci.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/* orinoco_pci.c
- *
- * Driver for Prism 2.5/3 devices that have a direct PCI interface
- * (i.e. these are not PCMCIA cards in a PCMCIA-to-PCI bridge).
- * The card contains only one PCI region, which contains all the usual
- * hermes registers, as well as the COR register.
- *
- * Current maintainers are:
- * Pavel Roskin <proski AT gnu.org>
- * and David Gibson <hermes AT gibson.dropbear.id.au>
- *
- * Some of this code is borrowed from orinoco_plx.c
- * Copyright (C) 2001 Daniel Barlow <dan AT telent.net>
- * Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing
- * has been copied from it. linux-wlan-ng-0.1.10 is originally :
- * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
- * This file originally written by:
- * Copyright (C) 2001 Jean Tourrilhes <jt AT hpl.hp.com>
- * And is now maintained by:
- * (C) Copyright David Gibson, IBM Corp. 2002-2003.
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL.
- */
-
-#define DRIVER_NAME "orinoco_pci"
-#define PFX DRIVER_NAME ": "
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
-#include "orinoco.h"
-#include "orinoco_pci.h"
-
-/* Offset of the COR register of the PCI card */
-#define HERMES_PCI_COR (0x26)
-
-/* Bitmask to reset the card */
-#define HERMES_PCI_COR_MASK (0x0080)
-
-/* Magic timeouts for doing the reset.
- * Those times are straight from wlan-ng, and it is claimed that they
- * are necessary. Alan will kill me. Take your time and grab a coffee. */
-#define HERMES_PCI_COR_ONT (250) /* ms */
-#define HERMES_PCI_COR_OFFT (500) /* ms */
-#define HERMES_PCI_COR_BUSYT (500) /* ms */
-
-/*
- * Do a soft reset of the card using the Configuration Option Register
- * We need this to get going...
- * This is the part of the code that is strongly inspired from wlan-ng
- *
- * Note : This code is done with irq enabled. This mean that many
- * interrupts will occur while we are there. This is why we use the
- * jiffies to regulate time instead of a straight mdelay(). Usually we
- * need only around 245 iteration of the loop to do 250 ms delay.
- *
- * Note bis : Don't try to access HERMES_CMD during the reset phase.
- * It just won't work !
- */
-static int orinoco_pci_cor_reset(struct orinoco_private *priv)
-{
- struct hermes *hw = &priv->hw;
- unsigned long timeout;
- u16 reg;
-
- /* Assert the reset until the card notices */
- hermes_write_regn(hw, PCI_COR, HERMES_PCI_COR_MASK);
- mdelay(HERMES_PCI_COR_ONT);
-
- /* Give time for the card to recover from this hard effort */
- hermes_write_regn(hw, PCI_COR, 0x0000);
- mdelay(HERMES_PCI_COR_OFFT);
-
- /* The card is ready when it's no longer busy */
- timeout = jiffies + msecs_to_jiffies(HERMES_PCI_COR_BUSYT);
- reg = hermes_read_regn(hw, CMD);
- while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
- mdelay(1);
- reg = hermes_read_regn(hw, CMD);
- }
-
- /* Still busy? */
- if (reg & HERMES_CMD_BUSY) {
- printk(KERN_ERR PFX "Busy timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int orinoco_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int err;
- struct orinoco_private *priv;
- struct orinoco_pci_card *card;
- void __iomem *hermes_io;
-
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device\n");
- return err;
- }
-
- err = pci_request_regions(pdev, DRIVER_NAME);
- if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
- goto fail_resources;
- }
-
- hermes_io = pci_iomap(pdev, 0, 0);
- if (!hermes_io) {
- printk(KERN_ERR PFX "Cannot remap chipset registers\n");
- err = -EIO;
- goto fail_map_hermes;
- }
-
- /* Allocate network device */
- priv = alloc_orinocodev(sizeof(*card), &pdev->dev,
- orinoco_pci_cor_reset, NULL);
- if (!priv) {
- printk(KERN_ERR PFX "Cannot allocate network device\n");
- err = -ENOMEM;
- goto fail_alloc;
- }
-
- card = priv->card;
-
- hermes_struct_init(&priv->hw, hermes_io, HERMES_32BIT_REGSPACING);
-
- err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
- DRIVER_NAME, priv);
- if (err) {
- printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
- err = -EBUSY;
- goto fail_irq;
- }
-
- err = orinoco_pci_cor_reset(priv);
- if (err) {
- printk(KERN_ERR PFX "Initial reset failed\n");
- goto fail;
- }
-
- err = orinoco_init(priv);
- if (err) {
- printk(KERN_ERR PFX "orinoco_init() failed\n");
- goto fail;
- }
-
- err = orinoco_if_add(priv, 0, 0, NULL);
- if (err) {
- printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail_wiphy;
- }
-
- pci_set_drvdata(pdev, priv);
-
- return 0;
-
- fail_wiphy:
- wiphy_unregister(priv_to_wiphy(priv));
- fail:
- free_irq(pdev->irq, priv);
-
- fail_irq:
- free_orinocodev(priv);
-
- fail_alloc:
- pci_iounmap(pdev, hermes_io);
-
- fail_map_hermes:
- pci_release_regions(pdev);
-
- fail_resources:
- pci_disable_device(pdev);
-
- return err;
-}
-
-static void orinoco_pci_remove_one(struct pci_dev *pdev)
-{
- struct orinoco_private *priv = pci_get_drvdata(pdev);
-
- orinoco_if_del(priv);
- wiphy_unregister(priv_to_wiphy(priv));
- free_irq(pdev->irq, priv);
- free_orinocodev(priv);
- pci_iounmap(pdev, priv->hw.iobase);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-static const struct pci_device_id orinoco_pci_id_table[] = {
- /* Intersil Prism 3 */
- {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
- /* Intersil Prism 2.5 */
- {0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID,},
- /* Samsung MagicLAN SWL-2210P */
- {0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID,},
- {0,},
-};
-
-MODULE_DEVICE_TABLE(pci, orinoco_pci_id_table);
-
-static struct pci_driver orinoco_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = orinoco_pci_id_table,
- .probe = orinoco_pci_init_one,
- .remove = orinoco_pci_remove_one,
- .driver.pm = &orinoco_pci_pm_ops,
-};
-
-static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
- " (Pavel Roskin <proski@gnu.org>,"
- " David Gibson <hermes@gibson.dropbear.id.au> &"
- " Jean Tourrilhes <jt@hpl.hp.com>)";
-MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> &"
- " David Gibson <hermes@gibson.dropbear.id.au>");
-MODULE_DESCRIPTION("Driver for wireless LAN cards using direct PCI interface");
-MODULE_LICENSE("Dual MPL/GPL");
-
-static int __init orinoco_pci_init(void)
-{
- printk(KERN_DEBUG "%s\n", version);
- return pci_register_driver(&orinoco_pci_driver);
-}
-
-static void __exit orinoco_pci_exit(void)
-{
- pci_unregister_driver(&orinoco_pci_driver);
-}
-
-module_init(orinoco_pci_init);
-module_exit(orinoco_pci_exit);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_pci.h b/drivers/net/wireless/intersil/orinoco/orinoco_pci.h
deleted file mode 100644
index d49d94086..000000000
--- a/drivers/net/wireless/intersil/orinoco/orinoco_pci.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* orinoco_pci.h
- *
- * Common code for all Orinoco drivers for PCI devices, including
- * both native PCI and PCMCIA-to-PCI bridges.
- *
- * Copyright (C) 2005, Pavel Roskin.
- * See main.c for license.
- */
-
-#ifndef _ORINOCO_PCI_H
-#define _ORINOCO_PCI_H
-
-#include <linux/netdevice.h>
-
-/* Driver specific data */
-struct orinoco_pci_card {
- void __iomem *bridge_io;
- void __iomem *attr_io;
-};
-
-static int __maybe_unused orinoco_pci_suspend(struct device *dev_d)
-{
- struct pci_dev *pdev = to_pci_dev(dev_d);
- struct orinoco_private *priv = pci_get_drvdata(pdev);
-
- orinoco_down(priv);
- free_irq(pdev->irq, priv);
-
- return 0;
-}
-
-static int __maybe_unused orinoco_pci_resume(struct device *dev_d)
-{
- struct pci_dev *pdev = to_pci_dev(dev_d);
- struct orinoco_private *priv = pci_get_drvdata(pdev);
- struct net_device *dev = priv->ndev;
- int err;
-
- err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
- dev->name, priv);
- if (err) {
- printk(KERN_ERR "%s: cannot re-allocate IRQ on resume\n",
- dev->name);
- return -EBUSY;
- }
-
- return orinoco_up(priv);
-}
-
-static SIMPLE_DEV_PM_OPS(orinoco_pci_pm_ops,
- orinoco_pci_suspend,
- orinoco_pci_resume);
-
-#endif /* _ORINOCO_PCI_H */
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_plx.c b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c
deleted file mode 100644
index 73e6ae124..000000000
--- a/drivers/net/wireless/intersil/orinoco/orinoco_plx.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/* orinoco_plx.c
- *
- * Driver for Prism II devices which would usually be driven by orinoco_cs,
- * but are connected to the PCI bus by a PLX9052.
- *
- * Current maintainers are:
- * Pavel Roskin <proski AT gnu.org>
- * and David Gibson <hermes AT gibson.dropbear.id.au>
- *
- * (C) Copyright David Gibson, IBM Corp. 2001-2003.
- * Copyright (C) 2001 Daniel Barlow
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL.
- *
- * Here's the general details on how the PLX9052 adapter works:
- *
- * - Two PCI I/O address spaces, one 0x80 long which contains the
- * PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA
- * slot I/O address space.
- *
- * - One PCI memory address space, mapped to the PCMCIA attribute space
- * (containing the CIS).
- *
- * Using the later, you can read through the CIS data to make sure the
- * card is compatible with the driver. Keep in mind that the PCMCIA
- * spec specifies the CIS as the lower 8 bits of each word read from
- * the CIS, so to read the bytes of the CIS, read every other byte
- * (0,2,4,...). Passing that test, you need to enable the I/O address
- * space on the PCMCIA card via the PCMCIA COR register. This is the
- * first byte following the CIS. In my case (which may not have any
- * relation to what's on the PRISM2 cards), COR was at offset 0x800
- * within the PCI memory space. Write 0x41 to the COR register to
- * enable I/O mode and to select level triggered interrupts. To
- * confirm you actually succeeded, read the COR register back and make
- * sure it actually got set to 0x41, in case you have an unexpected
- * card inserted.
- *
- * Following that, you can treat the second PCI I/O address space (the
- * one that's not 0x80 in length) as the PCMCIA I/O space.
- *
- * Note that in the Eumitcom's source for their drivers, they register
- * the interrupt as edge triggered when registering it with the
- * Windows kernel. I don't recall how to register edge triggered on
- * Linux (if it can be done at all). But in some experimentation, I
- * don't see much operational difference between using either
- * interrupt mode. Don't mess with the interrupt mode in the COR
- * register though, as the PLX9052 wants level triggers with the way
- * the serial EEPROM configures it on the WL11000.
- *
- * There's some other little quirks related to timing that I bumped
- * into, but I don't recall right now. Also, there's two variants of
- * the WL11000 I've seen, revision A1 and T2. These seem to differ
- * slightly in the timings configured in the wait-state generator in
- * the PLX9052. There have also been some comments from Eumitcom that
- * cards shouldn't be hot swapped, apparently due to risk of cooking
- * the PLX9052. I'm unsure why they believe this, as I can't see
- * anything in the design that would really cause a problem, except
- * for crashing drivers not written to expect it. And having developed
- * drivers for the WL11000, I'd say it's quite tricky to write code
- * that will successfully deal with a hot unplug. Very odd things
- * happen on the I/O side of things. But anyway, be warned. Despite
- * that, I've hot-swapped a number of times during debugging and
- * driver development for various reasons (stuck WAIT# line after the
- * radio card's firmware locks up).
- */
-
-#define DRIVER_NAME "orinoco_plx"
-#define PFX DRIVER_NAME ": "
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <pcmcia/cisreg.h>
-
-#include "orinoco.h"
-#include "orinoco_pci.h"
-
-#define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */
-#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
-#define COR_RESET (0x80) /* reset bit in the COR register */
-#define PLX_RESET_TIME (500) /* milliseconds */
-
-#define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */
-#define PLX_INTCSR_INTEN (1 << 6) /* Interrupt Enable bit */
-
-/*
- * Do a soft reset of the card using the Configuration Option Register
- */
-static int orinoco_plx_cor_reset(struct orinoco_private *priv)
-{
- struct hermes *hw = &priv->hw;
- struct orinoco_pci_card *card = priv->card;
- unsigned long timeout;
- u16 reg;
-
- iowrite8(COR_VALUE | COR_RESET, card->attr_io + COR_OFFSET);
- mdelay(1);
-
- iowrite8(COR_VALUE, card->attr_io + COR_OFFSET);
- mdelay(1);
-
- /* Just in case, wait more until the card is no longer busy */
- timeout = jiffies + msecs_to_jiffies(PLX_RESET_TIME);
- reg = hermes_read_regn(hw, CMD);
- while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
- mdelay(1);
- reg = hermes_read_regn(hw, CMD);
- }
-
- /* Still busy? */
- if (reg & HERMES_CMD_BUSY) {
- printk(KERN_ERR PFX "Busy timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int orinoco_plx_hw_init(struct orinoco_pci_card *card)
-{
- int i;
- u32 csr_reg;
- static const u8 cis_magic[] = {
- 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67
- };
-
- printk(KERN_DEBUG PFX "CIS: ");
- for (i = 0; i < 16; i++)
- printk("%02X:", ioread8(card->attr_io + (i << 1)));
- printk("\n");
-
- /* Verify whether a supported PC card is present */
- /* FIXME: we probably need to be smarted about this */
- for (i = 0; i < sizeof(cis_magic); i++) {
- if (cis_magic[i] != ioread8(card->attr_io + (i << 1))) {
- printk(KERN_ERR PFX "The CIS value of Prism2 PC "
- "card is unexpected\n");
- return -ENODEV;
- }
- }
-
- /* bjoern: We need to tell the card to enable interrupts, in
- case the serial eprom didn't do this already. See the
- PLX9052 data book, p8-1 and 8-24 for reference. */
- csr_reg = ioread32(card->bridge_io + PLX_INTCSR);
- if (!(csr_reg & PLX_INTCSR_INTEN)) {
- csr_reg |= PLX_INTCSR_INTEN;
- iowrite32(csr_reg, card->bridge_io + PLX_INTCSR);
- csr_reg = ioread32(card->bridge_io + PLX_INTCSR);
- if (!(csr_reg & PLX_INTCSR_INTEN)) {
- printk(KERN_ERR PFX "Cannot enable interrupts\n");
- return -EIO;
- }
- }
-
- return 0;
-}
-
-static int orinoco_plx_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int err;
- struct orinoco_private *priv;
- struct orinoco_pci_card *card;
- void __iomem *hermes_io, *attr_io, *bridge_io;
-
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device\n");
- return err;
- }
-
- err = pci_request_regions(pdev, DRIVER_NAME);
- if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
- goto fail_resources;
- }
-
- bridge_io = pci_iomap(pdev, 1, 0);
- if (!bridge_io) {
- printk(KERN_ERR PFX "Cannot map bridge registers\n");
- err = -EIO;
- goto fail_map_bridge;
- }
-
- attr_io = pci_iomap(pdev, 2, 0);
- if (!attr_io) {
- printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n");
- err = -EIO;
- goto fail_map_attr;
- }
-
- hermes_io = pci_iomap(pdev, 3, 0);
- if (!hermes_io) {
- printk(KERN_ERR PFX "Cannot map chipset registers\n");
- err = -EIO;
- goto fail_map_hermes;
- }
-
- /* Allocate network device */
- priv = alloc_orinocodev(sizeof(*card), &pdev->dev,
- orinoco_plx_cor_reset, NULL);
- if (!priv) {
- printk(KERN_ERR PFX "Cannot allocate network device\n");
- err = -ENOMEM;
- goto fail_alloc;
- }
-
- card = priv->card;
- card->bridge_io = bridge_io;
- card->attr_io = attr_io;
-
- hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
-
- err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
- DRIVER_NAME, priv);
- if (err) {
- printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
- err = -EBUSY;
- goto fail_irq;
- }
-
- err = orinoco_plx_hw_init(card);
- if (err) {
- printk(KERN_ERR PFX "Hardware initialization failed\n");
- goto fail;
- }
-
- err = orinoco_plx_cor_reset(priv);
- if (err) {
- printk(KERN_ERR PFX "Initial reset failed\n");
- goto fail;
- }
-
- err = orinoco_init(priv);
- if (err) {
- printk(KERN_ERR PFX "orinoco_init() failed\n");
- goto fail;
- }
-
- err = orinoco_if_add(priv, 0, 0, NULL);
- if (err) {
- printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail_wiphy;
- }
-
- pci_set_drvdata(pdev, priv);
-
- return 0;
-
- fail_wiphy:
- wiphy_unregister(priv_to_wiphy(priv));
- fail:
- free_irq(pdev->irq, priv);
-
- fail_irq:
- free_orinocodev(priv);
-
- fail_alloc:
- pci_iounmap(pdev, hermes_io);
-
- fail_map_hermes:
- pci_iounmap(pdev, attr_io);
-
- fail_map_attr:
- pci_iounmap(pdev, bridge_io);
-
- fail_map_bridge:
- pci_release_regions(pdev);
-
- fail_resources:
- pci_disable_device(pdev);
-
- return err;
-}
-
-static void orinoco_plx_remove_one(struct pci_dev *pdev)
-{
- struct orinoco_private *priv = pci_get_drvdata(pdev);
- struct orinoco_pci_card *card = priv->card;
-
- orinoco_if_del(priv);
- wiphy_unregister(priv_to_wiphy(priv));
- free_irq(pdev->irq, priv);
- free_orinocodev(priv);
- pci_iounmap(pdev, priv->hw.iobase);
- pci_iounmap(pdev, card->attr_io);
- pci_iounmap(pdev, card->bridge_io);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-static const struct pci_device_id orinoco_plx_id_table[] = {
- {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
- {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
- {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
- {0x1638, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* SMC EZConnect SMC2602W,
- Eumitcom PCI WL11000,
- Addtron AWA-100 */
- {0x16ab, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* Global Sun Tech GL24110P */
- {0x16ab, 0x1101, PCI_ANY_ID, PCI_ANY_ID,}, /* Reported working, but unknown */
- {0x16ab, 0x1102, PCI_ANY_ID, PCI_ANY_ID,}, /* Linksys WDT11 */
- {0x16ec, 0x3685, PCI_ANY_ID, PCI_ANY_ID,}, /* USR 2415 */
- {0xec80, 0xec00, PCI_ANY_ID, PCI_ANY_ID,}, /* Belkin F5D6000 tested by
- Brendan W. McAdams <rit AT jacked-in.org> */
- {0x10b7, 0x7770, PCI_ANY_ID, PCI_ANY_ID,}, /* 3Com AirConnect PCI tested by
- Damien Persohn <damien AT persohn.net> */
- {0,},
-};
-
-MODULE_DEVICE_TABLE(pci, orinoco_plx_id_table);
-
-static struct pci_driver orinoco_plx_driver = {
- .name = DRIVER_NAME,
- .id_table = orinoco_plx_id_table,
- .probe = orinoco_plx_init_one,
- .remove = orinoco_plx_remove_one,
- .driver.pm = &orinoco_pci_pm_ops,
-};
-
-static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
- " (Pavel Roskin <proski@gnu.org>,"
- " David Gibson <hermes@gibson.dropbear.id.au>,"
- " Daniel Barlow <dan@telent.net>)";
-MODULE_AUTHOR("Daniel Barlow <dan@telent.net>");
-MODULE_DESCRIPTION("Driver for wireless LAN cards using the PLX9052 PCI bridge");
-MODULE_LICENSE("Dual MPL/GPL");
-
-static int __init orinoco_plx_init(void)
-{
- printk(KERN_DEBUG "%s\n", version);
- return pci_register_driver(&orinoco_plx_driver);
-}
-
-static void __exit orinoco_plx_exit(void)
-{
- pci_unregister_driver(&orinoco_plx_driver);
-}
-
-module_init(orinoco_plx_init);
-module_exit(orinoco_plx_exit);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c
deleted file mode 100644
index 939d5a1dc..000000000
--- a/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/* orinoco_tmd.c
- *
- * Driver for Prism II devices which would usually be driven by orinoco_cs,
- * but are connected to the PCI bus by a TMD7160.
- *
- * Copyright (C) 2003 Joerg Dorchain <joerg AT dorchain.net>
- * based heavily upon orinoco_plx.c Copyright (C) 2001 Daniel Barlow
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL.
- *
- * The actual driving is done by main.c, this is just resource
- * allocation stuff.
- *
- * This driver is modeled after the orinoco_plx driver. The main
- * difference is that the TMD chip has only IO port ranges and doesn't
- * provide access to the PCMCIA attribute space.
- *
- * Pheecom sells cards with the TMD chip as "ASIC version"
- */
-
-#define DRIVER_NAME "orinoco_tmd"
-#define PFX DRIVER_NAME ": "
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <pcmcia/cisreg.h>
-
-#include "orinoco.h"
-#include "orinoco_pci.h"
-
-#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
-#define COR_RESET (0x80) /* reset bit in the COR register */
-#define TMD_RESET_TIME (500) /* milliseconds */
-
-/*
- * Do a soft reset of the card using the Configuration Option Register
- */
-static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
-{
- struct hermes *hw = &priv->hw;
- struct orinoco_pci_card *card = priv->card;
- unsigned long timeout;
- u16 reg;
-
- iowrite8(COR_VALUE | COR_RESET, card->bridge_io);
- mdelay(1);
-
- iowrite8(COR_VALUE, card->bridge_io);
- mdelay(1);
-
- /* Just in case, wait more until the card is no longer busy */
- timeout = jiffies + msecs_to_jiffies(TMD_RESET_TIME);
- reg = hermes_read_regn(hw, CMD);
- while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) {
- mdelay(1);
- reg = hermes_read_regn(hw, CMD);
- }
-
- /* Still busy? */
- if (reg & HERMES_CMD_BUSY) {
- printk(KERN_ERR PFX "Busy timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-
-static int orinoco_tmd_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int err;
- struct orinoco_private *priv;
- struct orinoco_pci_card *card;
- void __iomem *hermes_io, *bridge_io;
-
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device\n");
- return err;
- }
-
- err = pci_request_regions(pdev, DRIVER_NAME);
- if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources\n");
- goto fail_resources;
- }
-
- bridge_io = pci_iomap(pdev, 1, 0);
- if (!bridge_io) {
- printk(KERN_ERR PFX "Cannot map bridge registers\n");
- err = -EIO;
- goto fail_map_bridge;
- }
-
- hermes_io = pci_iomap(pdev, 2, 0);
- if (!hermes_io) {
- printk(KERN_ERR PFX "Cannot map chipset registers\n");
- err = -EIO;
- goto fail_map_hermes;
- }
-
- /* Allocate network device */
- priv = alloc_orinocodev(sizeof(*card), &pdev->dev,
- orinoco_tmd_cor_reset, NULL);
- if (!priv) {
- printk(KERN_ERR PFX "Cannot allocate network device\n");
- err = -ENOMEM;
- goto fail_alloc;
- }
-
- card = priv->card;
- card->bridge_io = bridge_io;
-
- hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING);
-
- err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
- DRIVER_NAME, priv);
- if (err) {
- printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq);
- err = -EBUSY;
- goto fail_irq;
- }
-
- err = orinoco_tmd_cor_reset(priv);
- if (err) {
- printk(KERN_ERR PFX "Initial reset failed\n");
- goto fail;
- }
-
- err = orinoco_init(priv);
- if (err) {
- printk(KERN_ERR PFX "orinoco_init() failed\n");
- goto fail;
- }
-
- err = orinoco_if_add(priv, 0, 0, NULL);
- if (err) {
- printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto fail;
- }
-
- pci_set_drvdata(pdev, priv);
-
- return 0;
-
- fail:
- free_irq(pdev->irq, priv);
-
- fail_irq:
- free_orinocodev(priv);
-
- fail_alloc:
- pci_iounmap(pdev, hermes_io);
-
- fail_map_hermes:
- pci_iounmap(pdev, bridge_io);
-
- fail_map_bridge:
- pci_release_regions(pdev);
-
- fail_resources:
- pci_disable_device(pdev);
-
- return err;
-}
-
-static void orinoco_tmd_remove_one(struct pci_dev *pdev)
-{
- struct orinoco_private *priv = pci_get_drvdata(pdev);
- struct orinoco_pci_card *card = priv->card;
-
- orinoco_if_del(priv);
- free_irq(pdev->irq, priv);
- free_orinocodev(priv);
- pci_iounmap(pdev, priv->hw.iobase);
- pci_iounmap(pdev, card->bridge_io);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-static const struct pci_device_id orinoco_tmd_id_table[] = {
- {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
- {0,},
-};
-
-MODULE_DEVICE_TABLE(pci, orinoco_tmd_id_table);
-
-static struct pci_driver orinoco_tmd_driver = {
- .name = DRIVER_NAME,
- .id_table = orinoco_tmd_id_table,
- .probe = orinoco_tmd_init_one,
- .remove = orinoco_tmd_remove_one,
- .driver.pm = &orinoco_pci_pm_ops,
-};
-
-static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
- " (Joerg Dorchain <joerg@dorchain.net>)";
-MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
-MODULE_DESCRIPTION("Driver for wireless LAN cards using the TMD7160 PCI bridge");
-MODULE_LICENSE("Dual MPL/GPL");
-
-static int __init orinoco_tmd_init(void)
-{
- printk(KERN_DEBUG "%s\n", version);
- return pci_register_driver(&orinoco_tmd_driver);
-}
-
-static void __exit orinoco_tmd_exit(void)
-{
- pci_unregister_driver(&orinoco_tmd_driver);
-}
-
-module_init(orinoco_tmd_init);
-module_exit(orinoco_tmd_exit);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
deleted file mode 100644
index 866e0230d..000000000
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ /dev/null
@@ -1,1787 +0,0 @@
-/*
- * USB Orinoco driver
- *
- * Copyright (c) 2003 Manuel Estrada Sainz
- *
- * The contents of this file are subject to the Mozilla Public License
- * Version 1.1 (the "License"); you may not use this file except in
- * compliance with the License. You may obtain a copy of the License
- * at http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
- * the License for the specific language governing rights and
- * limitations under the License.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the MPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the MPL or the GPL.
- *
- * Queueing code based on linux-wlan-ng 0.2.1-pre5
- *
- * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
- *
- * The license is the same as above.
- *
- * Initialy based on USB Skeleton driver - 0.7
- *
- * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * NOTE: The original USB Skeleton driver is GPL, but all that code is
- * gone so MPL/GPL applies.
- */
-
-#define DRIVER_NAME "orinoco_usb"
-#define PFX DRIVER_NAME ": "
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/fcntl.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/timer.h>
-
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/refcount.h>
-
-#include "mic.h"
-#include "orinoco.h"
-
-#ifndef URB_ASYNC_UNLINK
-#define URB_ASYNC_UNLINK 0
-#endif
-
-struct header_struct {
- /* 802.3 */
- u8 dest[ETH_ALEN];
- u8 src[ETH_ALEN];
- __be16 len;
- /* 802.2 */
- u8 dsap;
- u8 ssap;
- u8 ctrl;
- /* SNAP */
- u8 oui[3];
- __be16 ethertype;
-} __packed;
-
-struct ez_usb_fw {
- u16 size;
- const u8 *code;
-};
-
-static struct ez_usb_fw firmware = {
- .size = 0,
- .code = NULL,
-};
-
-/* Debugging macros */
-#undef err
-#define err(format, arg...) \
- do { printk(KERN_ERR PFX format "\n", ## arg); } while (0)
-
-MODULE_FIRMWARE("orinoco_ezusb_fw");
-
-/*
- * Under some conditions, the card gets stuck and stops paying attention
- * to the world (i.e. data communication stalls) until we do something to
- * it. Sending an INQ_TALLIES command seems to be enough and should be
- * harmless otherwise. This behaviour has been observed when using the
- * driver on a systemimager client during installation. In the past a
- * timer was used to send INQ_TALLIES commands when there was no other
- * activity, but it was troublesome and was removed.
- */
-
-#define USB_COMPAQ_VENDOR_ID 0x049f /* Compaq Computer Corp. */
-#define USB_COMPAQ_WL215_ID 0x001f /* Compaq WL215 USB Adapter */
-#define USB_COMPAQ_W200_ID 0x0076 /* Compaq W200 USB Adapter */
-#define USB_HP_WL215_ID 0x0082 /* Compaq WL215 USB Adapter */
-
-#define USB_MELCO_VENDOR_ID 0x0411
-#define USB_BUFFALO_L11_ID 0x0006 /* BUFFALO WLI-USB-L11 */
-#define USB_BUFFALO_L11G_WR_ID 0x000B /* BUFFALO WLI-USB-L11G-WR */
-#define USB_BUFFALO_L11G_ID 0x000D /* BUFFALO WLI-USB-L11G */
-
-#define USB_LUCENT_VENDOR_ID 0x047E /* Lucent Technologies */
-#define USB_LUCENT_ORINOCO_ID 0x0300 /* Lucent/Agere Orinoco USB Client */
-
-#define USB_AVAYA8_VENDOR_ID 0x0D98
-#define USB_AVAYAE_VENDOR_ID 0x0D9E
-#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya USB Wireless Card */
-
-#define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */
-#define USB_AGERE_MODEL0801_ID 0x1000 /* USB Wireless Card Model 0801 */
-#define USB_AGERE_MODEL0802_ID 0x1001 /* USB Wireless Card Model 0802 */
-#define USB_AGERE_REBRANDED_ID 0x047A /* USB WLAN Card */
-
-#define USB_ELSA_VENDOR_ID 0x05CC
-#define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */
-
-#define USB_LEGEND_VENDOR_ID 0x0E7C
-#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet USB WLAN Card */
-
-#define USB_SAMSUNG_VENDOR_ID 0x04E8
-#define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */
-#define USB_SAMSUNG_SEW2001U2_ID 0x5B11 /* Samsung SEW-2001u Card */
-#define USB_SAMSUNG_SEW2003U_ID 0x7011 /* Samsung SEW-2003U Card */
-
-#define USB_IGATE_VENDOR_ID 0x0681
-#define USB_IGATE_IGATE_11M_ID 0x0012 /* I-GATE 11M USB Card */
-
-#define USB_FUJITSU_VENDOR_ID 0x0BF8
-#define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */
-
-#define USB_2WIRE_VENDOR_ID 0x1630
-#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire USB Wireless adapter */
-
-
-#define EZUSB_REQUEST_FW_TRANS 0xA0
-#define EZUSB_REQUEST_TRIGGER 0xAA
-#define EZUSB_REQUEST_TRIG_AC 0xAC
-#define EZUSB_CPUCS_REG 0x7F92
-
-#define EZUSB_RID_TX 0x0700
-#define EZUSB_RID_RX 0x0701
-#define EZUSB_RID_INIT1 0x0702
-#define EZUSB_RID_ACK 0x0710
-#define EZUSB_RID_READ_PDA 0x0800
-#define EZUSB_RID_PROG_INIT 0x0852
-#define EZUSB_RID_PROG_SET_ADDR 0x0853
-#define EZUSB_RID_PROG_BYTES 0x0854
-#define EZUSB_RID_PROG_END 0x0855
-#define EZUSB_RID_DOCMD 0x0860
-
-/* Recognize info frames */
-#define EZUSB_IS_INFO(id) ((id >= 0xF000) && (id <= 0xF2FF))
-
-#define EZUSB_MAGIC 0x0210
-
-#define EZUSB_FRAME_DATA 1
-#define EZUSB_FRAME_CONTROL 2
-
-#define DEF_TIMEOUT (3 * HZ)
-
-#define BULK_BUF_SIZE 2048
-
-#define MAX_DL_SIZE (BULK_BUF_SIZE - sizeof(struct ezusb_packet))
-
-#define FW_BUF_SIZE 64
-#define FW_VAR_OFFSET_PTR 0x359
-#define FW_VAR_VALUE 0
-#define FW_HOLE_START 0x100
-#define FW_HOLE_END 0x300
-
-struct ezusb_packet {
- __le16 magic; /* 0x0210 */
- u8 req_reply_count;
- u8 ans_reply_count;
- __le16 frame_type; /* 0x01 for data frames, 0x02 otherwise */
- __le16 size; /* transport size */
- __le16 crc; /* CRC up to here */
- __le16 hermes_len;
- __le16 hermes_rid;
- u8 data[];
-} __packed;
-
-/* Table of devices that work or may work with this driver */
-static const struct usb_device_id ezusb_table[] = {
- {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)},
- {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)},
- {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)},
- {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11_ID)},
- {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_WR_ID)},
- {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_ID)},
- {USB_DEVICE(USB_LUCENT_VENDOR_ID, USB_LUCENT_ORINOCO_ID)},
- {USB_DEVICE(USB_AVAYA8_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
- {USB_DEVICE(USB_AVAYAE_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
- {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0801_ID)},
- {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0802_ID)},
- {USB_DEVICE(USB_ELSA_VENDOR_ID, USB_ELSA_AIRLANCER_ID)},
- {USB_DEVICE(USB_LEGEND_VENDOR_ID, USB_LEGEND_JOYNET_ID)},
- {USB_DEVICE_VER(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U1_ID,
- 0, 0)},
- {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U2_ID)},
- {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2003U_ID)},
- {USB_DEVICE(USB_IGATE_VENDOR_ID, USB_IGATE_IGATE_11M_ID)},
- {USB_DEVICE(USB_FUJITSU_VENDOR_ID, USB_FUJITSU_E1100_ID)},
- {USB_DEVICE(USB_2WIRE_VENDOR_ID, USB_2WIRE_WIRELESS_ID)},
- {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_REBRANDED_ID)},
- {} /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, ezusb_table);
-
-/* Structure to hold all of our device specific stuff */
-struct ezusb_priv {
- struct usb_device *udev;
- struct net_device *dev;
- struct mutex mtx;
- spinlock_t req_lock;
- struct list_head req_pending;
- struct list_head req_active;
- spinlock_t reply_count_lock;
- u16 hermes_reg_fake[0x40];
- u8 *bap_buf;
- struct urb *read_urb;
- int read_pipe;
- int write_pipe;
- u8 reply_count;
-};
-
-enum ezusb_state {
- EZUSB_CTX_START,
- EZUSB_CTX_QUEUED,
- EZUSB_CTX_REQ_SUBMITTED,
- EZUSB_CTX_REQ_COMPLETE,
- EZUSB_CTX_RESP_RECEIVED,
- EZUSB_CTX_REQ_TIMEOUT,
- EZUSB_CTX_REQ_FAILED,
- EZUSB_CTX_RESP_TIMEOUT,
- EZUSB_CTX_REQSUBMIT_FAIL,
- EZUSB_CTX_COMPLETE,
-};
-
-struct request_context {
- struct list_head list;
- refcount_t refcount;
- struct completion done; /* Signals that CTX is dead */
- int killed;
- struct urb *outurb; /* OUT for req pkt */
- struct ezusb_priv *upriv;
- struct ezusb_packet *buf;
- int buf_length;
- struct timer_list timer; /* Timeout handling */
- enum ezusb_state state; /* Current state */
- /* the RID that we will wait for */
- u16 out_rid;
- u16 in_rid;
-};
-
-
-/* Forward declarations */
-static void ezusb_ctx_complete(struct request_context *ctx);
-static void ezusb_req_queue_run(struct ezusb_priv *upriv);
-static void ezusb_bulk_in_callback(struct urb *urb);
-
-static inline u8 ezusb_reply_inc(u8 count)
-{
- if (count < 0x7F)
- return count + 1;
- else
- return 1;
-}
-
-static void ezusb_request_context_put(struct request_context *ctx)
-{
- if (!refcount_dec_and_test(&ctx->refcount))
- return;
-
- WARN_ON(!ctx->done.done);
- BUG_ON(ctx->outurb->status == -EINPROGRESS);
- BUG_ON(timer_pending(&ctx->timer));
- usb_free_urb(ctx->outurb);
- kfree(ctx->buf);
- kfree(ctx);
-}
-
-static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
- struct timer_list *timer,
- unsigned long expire)
-{
- if (!upriv->udev)
- return;
- mod_timer(timer, expire);
-}
-
-static void ezusb_request_timerfn(struct timer_list *t)
-{
- struct request_context *ctx = from_timer(ctx, t, timer);
-
- ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
- if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
- ctx->state = EZUSB_CTX_REQ_TIMEOUT;
- } else {
- ctx->state = EZUSB_CTX_RESP_TIMEOUT;
- dev_dbg(&ctx->outurb->dev->dev, "couldn't unlink\n");
- refcount_inc(&ctx->refcount);
- ctx->killed = 1;
- ezusb_ctx_complete(ctx);
- ezusb_request_context_put(ctx);
- }
-};
-
-static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
- u16 out_rid, u16 in_rid)
-{
- struct request_context *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
- if (!ctx)
- return NULL;
-
- ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
- if (!ctx->buf) {
- kfree(ctx);
- return NULL;
- }
- ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!ctx->outurb) {
- kfree(ctx->buf);
- kfree(ctx);
- return NULL;
- }
-
- ctx->upriv = upriv;
- ctx->state = EZUSB_CTX_START;
- ctx->out_rid = out_rid;
- ctx->in_rid = in_rid;
-
- refcount_set(&ctx->refcount, 1);
- init_completion(&ctx->done);
-
- timer_setup(&ctx->timer, ezusb_request_timerfn, 0);
- return ctx;
-}
-
-static void ezusb_ctx_complete(struct request_context *ctx)
-{
- struct ezusb_priv *upriv = ctx->upriv;
- unsigned long flags;
-
- spin_lock_irqsave(&upriv->req_lock, flags);
-
- list_del_init(&ctx->list);
- if (upriv->udev) {
- spin_unlock_irqrestore(&upriv->req_lock, flags);
- ezusb_req_queue_run(upriv);
- spin_lock_irqsave(&upriv->req_lock, flags);
- }
-
- switch (ctx->state) {
- case EZUSB_CTX_COMPLETE:
- case EZUSB_CTX_REQSUBMIT_FAIL:
- case EZUSB_CTX_REQ_FAILED:
- case EZUSB_CTX_REQ_TIMEOUT:
- case EZUSB_CTX_RESP_TIMEOUT:
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) {
- struct net_device *dev = upriv->dev;
- struct net_device_stats *stats = &dev->stats;
-
- if (ctx->state != EZUSB_CTX_COMPLETE)
- stats->tx_errors++;
- else
- stats->tx_packets++;
-
- netif_wake_queue(dev);
- }
- complete_all(&ctx->done);
- ezusb_request_context_put(ctx);
- break;
-
- default:
- spin_unlock_irqrestore(&upriv->req_lock, flags);
- if (!upriv->udev) {
- /* This is normal, as all request contexts get flushed
- * when the device is disconnected */
- err("Called, CTX not terminating, but device gone");
- complete_all(&ctx->done);
- ezusb_request_context_put(ctx);
- break;
- }
-
- err("Called, CTX not in terminating state.");
- /* Things are really bad if this happens. Just leak
- * the CTX because it may still be linked to the
- * queue or the OUT urb may still be active.
- * Just leaking at least prevents an Oops or Panic.
- */
- break;
- }
-}
-
-/*
- * ezusb_req_queue_run:
- * Description:
- * Note: Only one active CTX at any one time, because there's no
- * other (reliable) way to match the response URB to the correct
- * CTX.
- */
-static void ezusb_req_queue_run(struct ezusb_priv *upriv)
-{
- unsigned long flags;
- struct request_context *ctx;
- int result;
-
- spin_lock_irqsave(&upriv->req_lock, flags);
-
- if (!list_empty(&upriv->req_active))
- goto unlock;
-
- if (list_empty(&upriv->req_pending))
- goto unlock;
-
- ctx =
- list_entry(upriv->req_pending.next, struct request_context,
- list);
-
- if (!ctx->upriv->udev)
- goto unlock;
-
- /* We need to split this off to avoid a race condition */
- list_move_tail(&ctx->list, &upriv->req_active);
-
- if (ctx->state == EZUSB_CTX_QUEUED) {
- refcount_inc(&ctx->refcount);
- result = usb_submit_urb(ctx->outurb, GFP_ATOMIC);
- if (result) {
- ctx->state = EZUSB_CTX_REQSUBMIT_FAIL;
-
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- err("Fatal, failed to submit command urb."
- " error=%d\n", result);
-
- ezusb_ctx_complete(ctx);
- ezusb_request_context_put(ctx);
- goto done;
- }
-
- ctx->state = EZUSB_CTX_REQ_SUBMITTED;
- ezusb_mod_timer(ctx->upriv, &ctx->timer,
- jiffies + DEF_TIMEOUT);
- }
-
- unlock:
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- done:
- return;
-}
-
-static void ezusb_req_enqueue_run(struct ezusb_priv *upriv,
- struct request_context *ctx)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&upriv->req_lock, flags);
-
- if (!ctx->upriv->udev) {
- spin_unlock_irqrestore(&upriv->req_lock, flags);
- goto done;
- }
- refcount_inc(&ctx->refcount);
- list_add_tail(&ctx->list, &upriv->req_pending);
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- ctx->state = EZUSB_CTX_QUEUED;
- ezusb_req_queue_run(upriv);
-
- done:
- return;
-}
-
-static void ezusb_request_out_callback(struct urb *urb)
-{
- unsigned long flags;
- enum ezusb_state state;
- struct request_context *ctx = urb->context;
- struct ezusb_priv *upriv = ctx->upriv;
-
- spin_lock_irqsave(&upriv->req_lock, flags);
-
- del_timer(&ctx->timer);
-
- if (ctx->killed) {
- spin_unlock_irqrestore(&upriv->req_lock, flags);
- pr_warn("interrupt called with dead ctx\n");
- goto out;
- }
-
- state = ctx->state;
-
- if (urb->status == 0) {
- switch (state) {
- case EZUSB_CTX_REQ_SUBMITTED:
- if (ctx->in_rid) {
- ctx->state = EZUSB_CTX_REQ_COMPLETE;
- /* reply URB still pending */
- ezusb_mod_timer(upriv, &ctx->timer,
- jiffies + DEF_TIMEOUT);
- spin_unlock_irqrestore(&upriv->req_lock,
- flags);
- break;
- }
- fallthrough;
- case EZUSB_CTX_RESP_RECEIVED:
- /* IN already received before this OUT-ACK */
- ctx->state = EZUSB_CTX_COMPLETE;
- spin_unlock_irqrestore(&upriv->req_lock, flags);
- ezusb_ctx_complete(ctx);
- break;
-
- default:
- spin_unlock_irqrestore(&upriv->req_lock, flags);
- err("Unexpected state(0x%x, %d) in OUT URB",
- state, urb->status);
- break;
- }
- } else {
- /* If someone cancels the OUT URB then its status
- * should be either -ECONNRESET or -ENOENT.
- */
- switch (state) {
- case EZUSB_CTX_REQ_SUBMITTED:
- case EZUSB_CTX_RESP_RECEIVED:
- ctx->state = EZUSB_CTX_REQ_FAILED;
- fallthrough;
-
- case EZUSB_CTX_REQ_FAILED:
- case EZUSB_CTX_REQ_TIMEOUT:
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- ezusb_ctx_complete(ctx);
- break;
-
- default:
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- err("Unexpected state(0x%x, %d) in OUT URB",
- state, urb->status);
- break;
- }
- }
- out:
- ezusb_request_context_put(ctx);
-}
-
-static void ezusb_request_in_callback(struct ezusb_priv *upriv,
- struct urb *urb)
-{
- struct ezusb_packet *ans = urb->transfer_buffer;
- struct request_context *ctx = NULL;
- enum ezusb_state state;
- unsigned long flags;
-
- /* Find the CTX on the active queue that requested this URB */
- spin_lock_irqsave(&upriv->req_lock, flags);
- if (upriv->udev) {
- struct list_head *item;
-
- list_for_each(item, &upriv->req_active) {
- struct request_context *c;
- int reply_count;
-
- c = list_entry(item, struct request_context, list);
- reply_count =
- ezusb_reply_inc(c->buf->req_reply_count);
- if ((ans->ans_reply_count == reply_count)
- && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) {
- ctx = c;
- break;
- }
- netdev_dbg(upriv->dev, "Skipped (0x%x/0x%x) (%d/%d)\n",
- le16_to_cpu(ans->hermes_rid), c->in_rid,
- ans->ans_reply_count, reply_count);
- }
- }
-
- if (ctx == NULL) {
- spin_unlock_irqrestore(&upriv->req_lock, flags);
- err("%s: got unexpected RID: 0x%04X", __func__,
- le16_to_cpu(ans->hermes_rid));
- ezusb_req_queue_run(upriv);
- return;
- }
-
- /* The data we want is in the in buffer, exchange */
- urb->transfer_buffer = ctx->buf;
- ctx->buf = (void *) ans;
- ctx->buf_length = urb->actual_length;
-
- state = ctx->state;
- switch (state) {
- case EZUSB_CTX_REQ_SUBMITTED:
- /* We have received our response URB before
- * our request has been acknowledged. Do NOT
- * destroy our CTX yet, because our OUT URB
- * is still alive ...
- */
- ctx->state = EZUSB_CTX_RESP_RECEIVED;
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- /* Let the machine continue running. */
- break;
-
- case EZUSB_CTX_REQ_COMPLETE:
- /* This is the usual path: our request
- * has already been acknowledged, and
- * we have now received the reply.
- */
- ctx->state = EZUSB_CTX_COMPLETE;
-
- /* Stop the intimer */
- del_timer(&ctx->timer);
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- /* Call the completion handler */
- ezusb_ctx_complete(ctx);
- break;
-
- default:
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- pr_warn("Matched IN URB, unexpected context state(0x%x)\n",
- state);
- /* Throw this CTX away and try submitting another */
- del_timer(&ctx->timer);
- ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
- usb_unlink_urb(ctx->outurb);
- ezusb_req_queue_run(upriv);
- break;
- } /* switch */
-}
-
-typedef void (*ezusb_ctx_wait)(struct ezusb_priv *, struct request_context *);
-
-static void ezusb_req_ctx_wait_compl(struct ezusb_priv *upriv,
- struct request_context *ctx)
-{
- switch (ctx->state) {
- case EZUSB_CTX_QUEUED:
- case EZUSB_CTX_REQ_SUBMITTED:
- case EZUSB_CTX_REQ_COMPLETE:
- case EZUSB_CTX_RESP_RECEIVED:
- wait_for_completion(&ctx->done);
- break;
- default:
- /* Done or failed - nothing to wait for */
- break;
- }
-}
-
-static void ezusb_req_ctx_wait_poll(struct ezusb_priv *upriv,
- struct request_context *ctx)
-{
- int msecs;
-
- switch (ctx->state) {
- case EZUSB_CTX_QUEUED:
- case EZUSB_CTX_REQ_SUBMITTED:
- case EZUSB_CTX_REQ_COMPLETE:
- case EZUSB_CTX_RESP_RECEIVED:
- /* If we get called from a timer or with our lock acquired, then
- * we can't wait for the completion and have to poll. This won't
- * happen if the USB controller completes the URB requests in
- * BH.
- */
- msecs = DEF_TIMEOUT * (1000 / HZ);
-
- while (!try_wait_for_completion(&ctx->done) && msecs--)
- udelay(1000);
- break;
- default:
- /* Done or failed - nothing to wait for */
- break;
- }
-}
-
-static void ezusb_req_ctx_wait_skip(struct ezusb_priv *upriv,
- struct request_context *ctx)
-{
- WARN(1, "Shouldn't be invoked for in_rid\n");
-}
-
-static inline u16 build_crc(struct ezusb_packet *data)
-{
- u16 crc = 0;
- u8 *bytes = (u8 *)data;
- int i;
-
- for (i = 0; i < 8; i++)
- crc = (crc << 1) + bytes[i];
-
- return crc;
-}
-
-/*
- * ezusb_fill_req:
- *
- * if data == NULL and length > 0 the data is assumed to be already in
- * the target buffer and only the header is filled.
- *
- */
-static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid,
- const void *data, u16 frame_type, u8 reply_count)
-{
- int total_size = sizeof(*req) + length;
-
- BUG_ON(total_size > BULK_BUF_SIZE);
-
- req->magic = cpu_to_le16(EZUSB_MAGIC);
- req->req_reply_count = reply_count;
- req->ans_reply_count = 0;
- req->frame_type = cpu_to_le16(frame_type);
- req->size = cpu_to_le16(length + 4);
- req->crc = cpu_to_le16(build_crc(req));
- req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length));
- req->hermes_rid = cpu_to_le16(rid);
- if (data)
- memcpy(req->data, data, length);
- return total_size;
-}
-
-static int ezusb_submit_in_urb(struct ezusb_priv *upriv)
-{
- int retval = 0;
- void *cur_buf = upriv->read_urb->transfer_buffer;
-
- if (upriv->read_urb->status == -EINPROGRESS) {
- netdev_dbg(upriv->dev, "urb busy, not resubmiting\n");
- retval = -EBUSY;
- goto exit;
- }
- usb_fill_bulk_urb(upriv->read_urb, upriv->udev, upriv->read_pipe,
- cur_buf, BULK_BUF_SIZE,
- ezusb_bulk_in_callback, upriv);
- upriv->read_urb->transfer_flags = 0;
- retval = usb_submit_urb(upriv->read_urb, GFP_ATOMIC);
- if (retval)
- err("%s submit failed %d", __func__, retval);
-
- exit:
- return retval;
-}
-
-static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
-{
- int ret;
- u8 *res_val = NULL;
-
- if (!upriv->udev) {
- err("%s: !upriv->udev", __func__);
- return -EFAULT;
- }
-
- res_val = kmalloc(sizeof(*res_val), GFP_KERNEL);
-
- if (!res_val)
- return -ENOMEM;
-
- *res_val = reset; /* avoid argument promotion */
-
- ret = usb_control_msg(upriv->udev,
- usb_sndctrlpipe(upriv->udev, 0),
- EZUSB_REQUEST_FW_TRANS,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE |
- USB_DIR_OUT, EZUSB_CPUCS_REG, 0, res_val,
- sizeof(*res_val), DEF_TIMEOUT);
-
- kfree(res_val);
-
- return ret;
-}
-
-static int ezusb_firmware_download(struct ezusb_priv *upriv,
- struct ez_usb_fw *fw)
-{
- u8 *fw_buffer;
- int retval, addr;
- int variant_offset;
-
- fw_buffer = kmalloc(FW_BUF_SIZE, GFP_KERNEL);
- if (!fw_buffer) {
- printk(KERN_ERR PFX "Out of memory for firmware buffer.\n");
- return -ENOMEM;
- }
- /*
- * This byte is 1 and should be replaced with 0. The offset is
- * 0x10AD in version 0.0.6. The byte in question should follow
- * the end of the code pointed to by the jump in the beginning
- * of the firmware. Also, it is read by code located at 0x358.
- */
- variant_offset = be16_to_cpup((__be16 *) &fw->code[FW_VAR_OFFSET_PTR]);
- if (variant_offset >= fw->size) {
- printk(KERN_ERR PFX "Invalid firmware variant offset: "
- "0x%04x\n", variant_offset);
- retval = -EINVAL;
- goto fail;
- }
-
- retval = ezusb_8051_cpucs(upriv, 1);
- if (retval < 0)
- goto fail;
- for (addr = 0; addr < fw->size; addr += FW_BUF_SIZE) {
- /* 0x100-0x300 should be left alone, it contains card
- * specific data, like USB enumeration information */
- if ((addr >= FW_HOLE_START) && (addr < FW_HOLE_END))
- continue;
-
- memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE);
- if (variant_offset >= addr &&
- variant_offset < addr + FW_BUF_SIZE) {
- netdev_dbg(upriv->dev,
- "Patching card_variant byte at 0x%04X\n",
- variant_offset);
- fw_buffer[variant_offset - addr] = FW_VAR_VALUE;
- }
- retval = usb_control_msg(upriv->udev,
- usb_sndctrlpipe(upriv->udev, 0),
- EZUSB_REQUEST_FW_TRANS,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE
- | USB_DIR_OUT,
- addr, 0x0,
- fw_buffer, FW_BUF_SIZE,
- DEF_TIMEOUT);
-
- if (retval < 0)
- goto fail;
- }
- retval = ezusb_8051_cpucs(upriv, 0);
- if (retval < 0)
- goto fail;
-
- goto exit;
- fail:
- printk(KERN_ERR PFX "Firmware download failed, error %d\n",
- retval);
- exit:
- kfree(fw_buffer);
- return retval;
-}
-
-static int ezusb_access_ltv(struct ezusb_priv *upriv,
- struct request_context *ctx,
- u16 length, const void *data, u16 frame_type,
- void *ans_buff, unsigned ans_size, u16 *ans_length,
- ezusb_ctx_wait ezusb_ctx_wait_func)
-{
- int req_size;
- int retval = 0;
- enum ezusb_state state;
-
- if (!upriv->udev) {
- retval = -ENODEV;
- goto exit;
- }
-
- if (upriv->read_urb->status != -EINPROGRESS)
- err("%s: in urb not pending", __func__);
-
- /* protect upriv->reply_count, guarantee sequential numbers */
- spin_lock_bh(&upriv->reply_count_lock);
- req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data,
- frame_type, upriv->reply_count);
- usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe,
- ctx->buf, req_size,
- ezusb_request_out_callback, ctx);
-
- if (ctx->in_rid)
- upriv->reply_count = ezusb_reply_inc(upriv->reply_count);
-
- ezusb_req_enqueue_run(upriv, ctx);
-
- spin_unlock_bh(&upriv->reply_count_lock);
-
- if (ctx->in_rid)
- ezusb_ctx_wait_func(upriv, ctx);
-
- state = ctx->state;
- switch (state) {
- case EZUSB_CTX_COMPLETE:
- retval = ctx->outurb->status;
- break;
-
- case EZUSB_CTX_QUEUED:
- case EZUSB_CTX_REQ_SUBMITTED:
- if (!ctx->in_rid)
- break;
- fallthrough;
- default:
- err("%s: Unexpected context state %d", __func__,
- state);
- fallthrough;
- case EZUSB_CTX_REQ_TIMEOUT:
- case EZUSB_CTX_REQ_FAILED:
- case EZUSB_CTX_RESP_TIMEOUT:
- case EZUSB_CTX_REQSUBMIT_FAIL:
- printk(KERN_ERR PFX "Access failed, resetting (state %d,"
- " reply_count %d)\n", state, upriv->reply_count);
- upriv->reply_count = 0;
- if (state == EZUSB_CTX_REQ_TIMEOUT
- || state == EZUSB_CTX_RESP_TIMEOUT) {
- printk(KERN_ERR PFX "ctx timed out\n");
- retval = -ETIMEDOUT;
- } else {
- printk(KERN_ERR PFX "ctx failed\n");
- retval = -EFAULT;
- }
- goto exit;
- }
- if (ctx->in_rid) {
- struct ezusb_packet *ans = ctx->buf;
- unsigned exp_len;
-
- if (ans->hermes_len != 0)
- exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
- else
- exp_len = 14;
-
- if (exp_len != ctx->buf_length) {
- err("%s: length mismatch for RID 0x%04x: "
- "expected %d, got %d", __func__,
- ctx->in_rid, exp_len, ctx->buf_length);
- retval = -EIO;
- goto exit;
- }
-
- if (ans_buff)
- memcpy(ans_buff, ans->data, min(exp_len, ans_size));
- if (ans_length)
- *ans_length = le16_to_cpu(ans->hermes_len);
- }
- exit:
- ezusb_request_context_put(ctx);
- return retval;
-}
-
-static int __ezusb_write_ltv(struct hermes *hw, int bap, u16 rid,
- u16 length, const void *data,
- ezusb_ctx_wait ezusb_ctx_wait_func)
-{
- struct ezusb_priv *upriv = hw->priv;
- u16 frame_type;
- struct request_context *ctx;
-
- if (length == 0)
- return -EINVAL;
-
- length = HERMES_RECLEN_TO_BYTES(length);
-
- /* On memory mapped devices HERMES_RID_CNFGROUPADDRESSES can be
- * set to be empty, but the USB bridge doesn't like it */
- if (length == 0)
- return 0;
-
- ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK);
- if (!ctx)
- return -ENOMEM;
-
- if (rid == EZUSB_RID_TX)
- frame_type = EZUSB_FRAME_DATA;
- else
- frame_type = EZUSB_FRAME_CONTROL;
-
- return ezusb_access_ltv(upriv, ctx, length, data, frame_type,
- NULL, 0, NULL, ezusb_ctx_wait_func);
-}
-
-static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid,
- u16 length, const void *data)
-{
- return __ezusb_write_ltv(hw, bap, rid, length, data,
- ezusb_req_ctx_wait_poll);
-}
-
-static int __ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
- unsigned bufsize, u16 *length, void *buf,
- ezusb_ctx_wait ezusb_ctx_wait_func)
-
-{
- struct ezusb_priv *upriv = hw->priv;
- struct request_context *ctx;
-
- if (bufsize % 2)
- return -EINVAL;
-
- ctx = ezusb_alloc_ctx(upriv, rid, rid);
- if (!ctx)
- return -ENOMEM;
-
- return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL,
- buf, bufsize, length, ezusb_req_ctx_wait_poll);
-}
-
-static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
- unsigned bufsize, u16 *length, void *buf)
-{
- return __ezusb_read_ltv(hw, bap, rid, bufsize, length, buf,
- ezusb_req_ctx_wait_poll);
-}
-
-static int ezusb_read_ltv_preempt(struct hermes *hw, int bap, u16 rid,
- unsigned bufsize, u16 *length, void *buf)
-{
- return __ezusb_read_ltv(hw, bap, rid, bufsize, length, buf,
- ezusb_req_ctx_wait_compl);
-}
-
-static int ezusb_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1,
- u16 parm2, struct hermes_response *resp)
-{
- WARN_ON_ONCE(1);
- return -EINVAL;
-}
-
-static int __ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp,
- ezusb_ctx_wait ezusb_ctx_wait_func)
-{
- struct ezusb_priv *upriv = hw->priv;
- struct request_context *ctx;
-
- __le16 data[4] = {
- cpu_to_le16(cmd),
- cpu_to_le16(parm0),
- 0,
- 0,
- };
- netdev_dbg(upriv->dev, "0x%04X, parm0 0x%04X\n", cmd, parm0);
- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
- if (!ctx)
- return -ENOMEM;
-
- return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
- EZUSB_FRAME_CONTROL, NULL, 0, NULL,
- ezusb_ctx_wait_func);
-}
-
-static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
- struct hermes_response *resp)
-{
- return __ezusb_docmd_wait(hw, cmd, parm0, resp, ezusb_req_ctx_wait_poll);
-}
-
-static int ezusb_bap_pread(struct hermes *hw, int bap,
- void *buf, int len, u16 id, u16 offset)
-{
- struct ezusb_priv *upriv = hw->priv;
- struct ezusb_packet *ans = (void *) upriv->read_urb->transfer_buffer;
- int actual_length = upriv->read_urb->actual_length;
-
- if (id == EZUSB_RID_RX) {
- if ((sizeof(*ans) + offset + len) > actual_length) {
- printk(KERN_ERR PFX "BAP read beyond buffer end "
- "in rx frame\n");
- return -EINVAL;
- }
- memcpy(buf, ans->data + offset, len);
- return 0;
- }
-
- if (EZUSB_IS_INFO(id)) {
- /* Include 4 bytes for length/type */
- if ((sizeof(*ans) + offset + len - 4) > actual_length) {
- printk(KERN_ERR PFX "BAP read beyond buffer end "
- "in info frame\n");
- return -EFAULT;
- }
- memcpy(buf, ans->data + offset - 4, len);
- } else {
- printk(KERN_ERR PFX "Unexpected fid 0x%04x\n", id);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ezusb_read_pda(struct hermes *hw, __le16 *pda,
- u32 pda_addr, u16 pda_len)
-{
- struct ezusb_priv *upriv = hw->priv;
- struct request_context *ctx;
- __le16 data[] = {
- cpu_to_le16(pda_addr & 0xffff),
- cpu_to_le16(pda_len - 4)
- };
- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA);
- if (!ctx)
- return -ENOMEM;
-
- /* wl_lkm does not include PDA size in the PDA area.
- * We will pad the information into pda, so other routines
- * don't have to be modified */
- pda[0] = cpu_to_le16(pda_len - 2);
- /* Includes CFG_PROD_DATA but not itself */
- pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
-
- return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
- EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4,
- NULL, ezusb_req_ctx_wait_compl);
-}
-
-static int ezusb_program_init(struct hermes *hw, u32 entry_point)
-{
- struct ezusb_priv *upriv = hw->priv;
- struct request_context *ctx;
- __le32 data = cpu_to_le32(entry_point);
-
- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK);
- if (!ctx)
- return -ENOMEM;
-
- return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
- EZUSB_FRAME_CONTROL, NULL, 0, NULL,
- ezusb_req_ctx_wait_compl);
-}
-
-static int ezusb_program_end(struct hermes *hw)
-{
- struct ezusb_priv *upriv = hw->priv;
- struct request_context *ctx;
-
- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK);
- if (!ctx)
- return -ENOMEM;
-
- return ezusb_access_ltv(upriv, ctx, 0, NULL,
- EZUSB_FRAME_CONTROL, NULL, 0, NULL,
- ezusb_req_ctx_wait_compl);
-}
-
-static int ezusb_program_bytes(struct hermes *hw, const char *buf,
- u32 addr, u32 len)
-{
- struct ezusb_priv *upriv = hw->priv;
- struct request_context *ctx;
- __le32 data = cpu_to_le32(addr);
- int err;
-
- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK);
- if (!ctx)
- return -ENOMEM;
-
- err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
- EZUSB_FRAME_CONTROL, NULL, 0, NULL,
- ezusb_req_ctx_wait_compl);
- if (err)
- return err;
-
- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK);
- if (!ctx)
- return -ENOMEM;
-
- return ezusb_access_ltv(upriv, ctx, len, buf,
- EZUSB_FRAME_CONTROL, NULL, 0, NULL,
- ezusb_req_ctx_wait_compl);
-}
-
-static int ezusb_program(struct hermes *hw, const char *buf,
- u32 addr, u32 len)
-{
- u32 ch_addr;
- u32 ch_len;
- int err = 0;
-
- /* We can only send 2048 bytes out of the bulk xmit at a time,
- * so we have to split any programming into chunks of <2048
- * bytes. */
-
- ch_len = (len < MAX_DL_SIZE) ? len : MAX_DL_SIZE;
- ch_addr = addr;
-
- while (ch_addr < (addr + len)) {
- pr_debug("Programming subblock of length %d "
- "to address 0x%08x. Data @ %p\n",
- ch_len, ch_addr, &buf[ch_addr - addr]);
-
- err = ezusb_program_bytes(hw, &buf[ch_addr - addr],
- ch_addr, ch_len);
- if (err)
- break;
-
- ch_addr += ch_len;
- ch_len = ((addr + len - ch_addr) < MAX_DL_SIZE) ?
- (addr + len - ch_addr) : MAX_DL_SIZE;
- }
-
- return err;
-}
-
-static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct ezusb_priv *upriv = priv->card;
- u8 mic[MICHAEL_MIC_LEN + 1];
- int err = 0;
- int tx_control;
- unsigned long flags;
- struct request_context *ctx;
- u8 *buf;
- int tx_size;
-
- if (!netif_running(dev)) {
- printk(KERN_ERR "%s: Tx on stopped device!\n",
- dev->name);
- return NETDEV_TX_BUSY;
- }
-
- if (netif_queue_stopped(dev)) {
- printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
- dev->name);
- return NETDEV_TX_BUSY;
- }
-
- if (orinoco_lock(priv, &flags) != 0) {
- printk(KERN_ERR
- "%s: ezusb_xmit() called while hw_unavailable\n",
- dev->name);
- return NETDEV_TX_BUSY;
- }
-
- if (!netif_carrier_ok(dev) ||
- (priv->iw_mode == NL80211_IFTYPE_MONITOR)) {
- /* Oops, the firmware hasn't established a connection,
- silently drop the packet (this seems to be the
- safest approach). */
- goto drop;
- }
-
- /* Check packet length */
- if (skb->len < ETH_HLEN)
- goto drop;
-
- tx_control = 0;
-
- err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
- &mic[0]);
- if (err)
- goto drop;
-
- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
- if (!ctx)
- goto drop;
-
- memset(ctx->buf, 0, BULK_BUF_SIZE);
- buf = ctx->buf->data;
-
- {
- __le16 *tx_cntl = (__le16 *)buf;
- *tx_cntl = cpu_to_le16(tx_control);
- buf += sizeof(*tx_cntl);
- }
-
- memcpy(buf, skb->data, skb->len);
- buf += skb->len;
-
- if (tx_control & HERMES_TXCTRL_MIC) {
- u8 *m = mic;
- /* Mic has been offset so it can be copied to an even
- * address. We're copying eveything anyway, so we
- * don't need to copy that first byte. */
- if (skb->len % 2)
- m++;
- memcpy(buf, m, MICHAEL_MIC_LEN);
- buf += MICHAEL_MIC_LEN;
- }
-
- /* Finally, we actually initiate the send */
- netif_stop_queue(dev);
-
- /* The card may behave better if we send evenly sized usb transfers */
- tx_size = ALIGN(buf - ctx->buf->data, 2);
-
- err = ezusb_access_ltv(upriv, ctx, tx_size, NULL,
- EZUSB_FRAME_DATA, NULL, 0, NULL,
- ezusb_req_ctx_wait_skip);
-
- if (err) {
- netif_start_queue(dev);
- if (net_ratelimit())
- printk(KERN_ERR "%s: Error %d transmitting packet\n",
- dev->name, err);
- goto busy;
- }
-
- netif_trans_update(dev);
- stats->tx_bytes += skb->len;
- goto ok;
-
- drop:
- stats->tx_errors++;
- stats->tx_dropped++;
-
- ok:
- orinoco_unlock(priv, &flags);
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-
- busy:
- orinoco_unlock(priv, &flags);
- return NETDEV_TX_BUSY;
-}
-
-static int ezusb_allocate(struct hermes *hw, u16 size, u16 *fid)
-{
- *fid = EZUSB_RID_TX;
- return 0;
-}
-
-
-static int ezusb_hard_reset(struct orinoco_private *priv)
-{
- struct ezusb_priv *upriv = priv->card;
- int retval = ezusb_8051_cpucs(upriv, 1);
-
- if (retval < 0) {
- err("Failed to reset");
- return retval;
- }
-
- retval = ezusb_8051_cpucs(upriv, 0);
- if (retval < 0) {
- err("Failed to unreset");
- return retval;
- }
-
- netdev_dbg(upriv->dev, "sending control message\n");
- retval = usb_control_msg(upriv->udev,
- usb_sndctrlpipe(upriv->udev, 0),
- EZUSB_REQUEST_TRIGGER,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE |
- USB_DIR_OUT, 0x0, 0x0, NULL, 0,
- DEF_TIMEOUT);
- if (retval < 0) {
- err("EZUSB_REQUEST_TRIGGER failed retval %d", retval);
- return retval;
- }
-#if 0
- dbg("Sending EZUSB_REQUEST_TRIG_AC");
- retval = usb_control_msg(upriv->udev,
- usb_sndctrlpipe(upriv->udev, 0),
- EZUSB_REQUEST_TRIG_AC,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE |
- USB_DIR_OUT, 0x00FA, 0x0, NULL, 0,
- DEF_TIMEOUT);
- if (retval < 0) {
- err("EZUSB_REQUEST_TRIG_AC failed retval %d", retval);
- return retval;
- }
-#endif
-
- return 0;
-}
-
-
-static int ezusb_init(struct hermes *hw)
-{
- struct ezusb_priv *upriv = hw->priv;
- int retval;
-
- if (!upriv)
- return -EINVAL;
-
- upriv->reply_count = 0;
- /* Write the MAGIC number on the simulated registers to keep
- * orinoco.c happy */
- hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
- hermes_write_regn(hw, RXFID, EZUSB_RID_RX);
-
- usb_kill_urb(upriv->read_urb);
- ezusb_submit_in_urb(upriv);
-
- retval = __ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1,
- HERMES_BYTES_TO_RECLEN(2), "\x10\x00",
- ezusb_req_ctx_wait_compl);
- if (retval < 0) {
- printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval);
- return retval;
- }
-
- retval = __ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL,
- ezusb_req_ctx_wait_compl);
- if (retval < 0) {
- printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval);
- return retval;
- }
-
- return 0;
-}
-
-static void ezusb_bulk_in_callback(struct urb *urb)
-{
- struct ezusb_priv *upriv = (struct ezusb_priv *) urb->context;
- struct ezusb_packet *ans = urb->transfer_buffer;
- u16 crc;
- u16 hermes_rid;
-
- if (upriv->udev == NULL)
- return;
-
- if (urb->status == -ETIMEDOUT) {
- /* When a device gets unplugged we get this every time
- * we resubmit, flooding the logs. Since we don't use
- * USB timeouts, it shouldn't happen any other time*/
- pr_warn("%s: urb timed out, not resubmitting\n", __func__);
- return;
- }
- if (urb->status == -ECONNABORTED) {
- pr_warn("%s: connection abort, resubmitting urb\n",
- __func__);
- goto resubmit;
- }
- if ((urb->status == -EILSEQ)
- || (urb->status == -ENOENT)
- || (urb->status == -ECONNRESET)) {
- netdev_dbg(upriv->dev, "status %d, not resubmiting\n",
- urb->status);
- return;
- }
- if (urb->status)
- netdev_dbg(upriv->dev, "status: %d length: %d\n",
- urb->status, urb->actual_length);
- if (urb->actual_length < sizeof(*ans)) {
- err("%s: short read, ignoring", __func__);
- goto resubmit;
- }
- crc = build_crc(ans);
- if (le16_to_cpu(ans->crc) != crc) {
- err("CRC error, ignoring packet");
- goto resubmit;
- }
-
- hermes_rid = le16_to_cpu(ans->hermes_rid);
- if ((hermes_rid != EZUSB_RID_RX) && !EZUSB_IS_INFO(hermes_rid)) {
- ezusb_request_in_callback(upriv, urb);
- } else if (upriv->dev) {
- struct net_device *dev = upriv->dev;
- struct orinoco_private *priv = ndev_priv(dev);
- struct hermes *hw = &priv->hw;
-
- if (hermes_rid == EZUSB_RID_RX) {
- __orinoco_ev_rx(dev, hw);
- } else {
- hermes_write_regn(hw, INFOFID,
- le16_to_cpu(ans->hermes_rid));
- __orinoco_ev_info(dev, hw);
- }
- }
-
- resubmit:
- if (upriv->udev)
- ezusb_submit_in_urb(upriv);
-}
-
-static inline void ezusb_delete(struct ezusb_priv *upriv)
-{
- struct list_head *item;
- struct list_head *tmp_item;
- unsigned long flags;
-
- BUG_ON(!upriv);
-
- mutex_lock(&upriv->mtx);
-
- upriv->udev = NULL; /* No timer will be rearmed from here */
-
- usb_kill_urb(upriv->read_urb);
-
- spin_lock_irqsave(&upriv->req_lock, flags);
- list_for_each_safe(item, tmp_item, &upriv->req_active) {
- struct request_context *ctx;
- int err;
-
- ctx = list_entry(item, struct request_context, list);
- refcount_inc(&ctx->refcount);
-
- ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
- err = usb_unlink_urb(ctx->outurb);
-
- spin_unlock_irqrestore(&upriv->req_lock, flags);
- if (err == -EINPROGRESS)
- wait_for_completion(&ctx->done);
-
- del_timer_sync(&ctx->timer);
- /* FIXME: there is an slight chance for the irq handler to
- * be running */
- if (!list_empty(&ctx->list))
- ezusb_ctx_complete(ctx);
-
- ezusb_request_context_put(ctx);
- spin_lock_irqsave(&upriv->req_lock, flags);
- }
- spin_unlock_irqrestore(&upriv->req_lock, flags);
-
- list_for_each_safe(item, tmp_item, &upriv->req_pending)
- ezusb_ctx_complete(list_entry(item,
- struct request_context, list));
-
- if (upriv->read_urb && upriv->read_urb->status == -EINPROGRESS)
- printk(KERN_ERR PFX "Some URB in progress\n");
-
- mutex_unlock(&upriv->mtx);
-
- if (upriv->read_urb) {
- kfree(upriv->read_urb->transfer_buffer);
- usb_free_urb(upriv->read_urb);
- }
- kfree(upriv->bap_buf);
- if (upriv->dev) {
- struct orinoco_private *priv = ndev_priv(upriv->dev);
- orinoco_if_del(priv);
- wiphy_unregister(priv_to_wiphy(upriv));
- free_orinocodev(priv);
- }
-}
-
-static void ezusb_lock_irqsave(spinlock_t *lock,
- unsigned long *flags) __acquires(lock)
-{
- spin_lock_bh(lock);
-}
-
-static void ezusb_unlock_irqrestore(spinlock_t *lock,
- unsigned long *flags) __releases(lock)
-{
- spin_unlock_bh(lock);
-}
-
-static void ezusb_lock_irq(spinlock_t *lock) __acquires(lock)
-{
- spin_lock_bh(lock);
-}
-
-static void ezusb_unlock_irq(spinlock_t *lock) __releases(lock)
-{
- spin_unlock_bh(lock);
-}
-
-static const struct hermes_ops ezusb_ops = {
- .init = ezusb_init,
- .cmd_wait = ezusb_docmd_wait,
- .init_cmd_wait = ezusb_doicmd_wait,
- .allocate = ezusb_allocate,
- .read_ltv = ezusb_read_ltv,
- .read_ltv_pr = ezusb_read_ltv_preempt,
- .write_ltv = ezusb_write_ltv,
- .bap_pread = ezusb_bap_pread,
- .read_pda = ezusb_read_pda,
- .program_init = ezusb_program_init,
- .program_end = ezusb_program_end,
- .program = ezusb_program,
- .lock_irqsave = ezusb_lock_irqsave,
- .unlock_irqrestore = ezusb_unlock_irqrestore,
- .lock_irq = ezusb_lock_irq,
- .unlock_irq = ezusb_unlock_irq,
-};
-
-static const struct net_device_ops ezusb_netdev_ops = {
- .ndo_open = orinoco_open,
- .ndo_stop = orinoco_stop,
- .ndo_start_xmit = ezusb_xmit,
- .ndo_set_rx_mode = orinoco_set_multicast_list,
- .ndo_change_mtu = orinoco_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = orinoco_tx_timeout,
-};
-
-static int ezusb_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(interface);
- struct orinoco_private *priv;
- struct hermes *hw;
- struct ezusb_priv *upriv = NULL;
- struct usb_interface_descriptor *iface_desc;
- struct usb_endpoint_descriptor *ep;
- const struct firmware *fw_entry = NULL;
- int retval = 0;
- int i;
-
- priv = alloc_orinocodev(sizeof(*upriv), &udev->dev,
- ezusb_hard_reset, NULL);
- if (!priv) {
- err("Couldn't allocate orinocodev");
- retval = -ENOMEM;
- goto exit;
- }
-
- hw = &priv->hw;
-
- upriv = priv->card;
-
- mutex_init(&upriv->mtx);
- spin_lock_init(&upriv->reply_count_lock);
-
- spin_lock_init(&upriv->req_lock);
- INIT_LIST_HEAD(&upriv->req_pending);
- INIT_LIST_HEAD(&upriv->req_active);
-
- upriv->udev = udev;
-
- hw->iobase = (void __force __iomem *) &upriv->hermes_reg_fake;
- hw->reg_spacing = HERMES_16BIT_REGSPACING;
- hw->priv = upriv;
- hw->ops = &ezusb_ops;
-
- /* set up the endpoint information */
- /* check out the endpoints */
-
- iface_desc = &interface->cur_altsetting->desc;
- for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
- ep = &interface->cur_altsetting->endpoint[i].desc;
-
- if (usb_endpoint_is_bulk_in(ep)) {
- /* we found a bulk in endpoint */
- if (upriv->read_urb != NULL) {
- pr_warn("Found a second bulk in ep, ignored\n");
- continue;
- }
-
- upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!upriv->read_urb)
- goto error;
- if (le16_to_cpu(ep->wMaxPacketSize) != 64)
- pr_warn("bulk in: wMaxPacketSize!= 64\n");
- if (ep->bEndpointAddress != (2 | USB_DIR_IN))
- pr_warn("bulk in: bEndpointAddress: %d\n",
- ep->bEndpointAddress);
- upriv->read_pipe = usb_rcvbulkpipe(udev,
- ep->
- bEndpointAddress);
- upriv->read_urb->transfer_buffer =
- kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
- if (!upriv->read_urb->transfer_buffer) {
- err("Couldn't allocate IN buffer");
- goto error;
- }
- }
-
- if (usb_endpoint_is_bulk_out(ep)) {
- /* we found a bulk out endpoint */
- if (upriv->bap_buf != NULL) {
- pr_warn("Found a second bulk out ep, ignored\n");
- continue;
- }
-
- if (le16_to_cpu(ep->wMaxPacketSize) != 64)
- pr_warn("bulk out: wMaxPacketSize != 64\n");
- if (ep->bEndpointAddress != 2)
- pr_warn("bulk out: bEndpointAddress: %d\n",
- ep->bEndpointAddress);
- upriv->write_pipe = usb_sndbulkpipe(udev,
- ep->
- bEndpointAddress);
- upriv->bap_buf = kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
- if (!upriv->bap_buf) {
- err("Couldn't allocate bulk_out_buffer");
- goto error;
- }
- }
- }
- if (!upriv->bap_buf || !upriv->read_urb) {
- err("Didn't find the required bulk endpoints");
- goto error;
- }
-
- if (request_firmware(&fw_entry, "orinoco_ezusb_fw",
- &interface->dev) == 0) {
- firmware.size = fw_entry->size;
- firmware.code = fw_entry->data;
- }
- if (firmware.size && firmware.code) {
- if (ezusb_firmware_download(upriv, &firmware) < 0)
- goto error;
- } else {
- err("No firmware to download");
- goto error;
- }
-
- if (ezusb_hard_reset(priv) < 0) {
- err("Cannot reset the device");
- goto error;
- }
-
- /* If the firmware is already downloaded orinoco.c will call
- * ezusb_init but if the firmware is not already there, that will make
- * the kernel very unstable, so we try initializing here and quit in
- * case of error */
- if (ezusb_init(hw) < 0) {
- err("Couldn't initialize the device");
- err("Firmware may not be downloaded or may be wrong.");
- goto error;
- }
-
- /* Initialise the main driver */
- if (orinoco_init(priv) != 0) {
- err("orinoco_init() failed\n");
- goto error;
- }
-
- if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
- upriv->dev = NULL;
- err("%s: orinoco_if_add() failed", __func__);
- wiphy_unregister(priv_to_wiphy(priv));
- goto error;
- }
- upriv->dev = priv->ndev;
-
- goto exit;
-
- error:
- ezusb_delete(upriv);
- if (upriv->dev) {
- /* upriv->dev was 0, so ezusb_delete() didn't free it */
- free_orinocodev(priv);
- }
- upriv = NULL;
- retval = -EFAULT;
- exit:
- if (fw_entry) {
- firmware.code = NULL;
- firmware.size = 0;
- release_firmware(fw_entry);
- }
- usb_set_intfdata(interface, upriv);
- return retval;
-}
-
-
-static void ezusb_disconnect(struct usb_interface *intf)
-{
- struct ezusb_priv *upriv = usb_get_intfdata(intf);
- usb_set_intfdata(intf, NULL);
- ezusb_delete(upriv);
- printk(KERN_INFO PFX "Disconnected\n");
-}
-
-
-/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver orinoco_driver = {
- .name = DRIVER_NAME,
- .probe = ezusb_probe,
- .disconnect = ezusb_disconnect,
- .id_table = ezusb_table,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(orinoco_driver);
-
-MODULE_AUTHOR("Manuel Estrada Sainz");
-MODULE_DESCRIPTION("Driver for Orinoco wireless LAN cards using EZUSB bridge");
-MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c
deleted file mode 100644
index 6d1d08485..000000000
--- a/drivers/net/wireless/intersil/orinoco/scan.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/* Helpers for managing scan queues
- *
- * See copyright notice in main.c
- */
-
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ieee80211.h>
-#include <net/cfg80211.h>
-
-#include "hermes.h"
-#include "orinoco.h"
-#include "main.h"
-
-#include "scan.h"
-
-#define ZERO_DBM_OFFSET 0x95
-#define MAX_SIGNAL_LEVEL 0x8A
-#define MIN_SIGNAL_LEVEL 0x2F
-
-#define SIGNAL_TO_DBM(x) \
- (clamp_t(s32, (x), MIN_SIGNAL_LEVEL, MAX_SIGNAL_LEVEL) \
- - ZERO_DBM_OFFSET)
-#define SIGNAL_TO_MBM(x) (SIGNAL_TO_DBM(x) * 100)
-
-static int symbol_build_supp_rates(u8 *buf, const __le16 *rates)
-{
- int i;
- u8 rate;
-
- buf[0] = WLAN_EID_SUPP_RATES;
- for (i = 0; i < 5; i++) {
- rate = le16_to_cpu(rates[i]);
- /* NULL terminated */
- if (rate == 0x0)
- break;
- buf[i + 2] = rate;
- }
- buf[1] = i;
-
- return i + 2;
-}
-
-static int prism_build_supp_rates(u8 *buf, const u8 *rates)
-{
- int i;
-
- buf[0] = WLAN_EID_SUPP_RATES;
- for (i = 0; i < 8; i++) {
- /* NULL terminated */
- if (rates[i] == 0x0)
- break;
- buf[i + 2] = rates[i];
- }
- buf[1] = i;
-
- /* We might still have another 2 rates, which need to go in
- * extended supported rates */
- if (i == 8 && rates[i] > 0) {
- buf[10] = WLAN_EID_EXT_SUPP_RATES;
- for (; i < 10; i++) {
- /* NULL terminated */
- if (rates[i] == 0x0)
- break;
- buf[i + 2] = rates[i];
- }
- buf[11] = i - 8;
- }
-
- return (i < 8) ? i + 2 : i + 4;
-}
-
-static void orinoco_add_hostscan_result(struct orinoco_private *priv,
- const union hermes_scan_info *bss)
-{
- struct wiphy *wiphy = priv_to_wiphy(priv);
- struct ieee80211_channel *channel;
- struct cfg80211_bss *cbss;
- u8 *ie;
- u8 ie_buf[46];
- u64 timestamp;
- s32 signal;
- u16 capability;
- u16 beacon_interval;
- int ie_len;
- int freq;
- int len;
-
- len = le16_to_cpu(bss->a.essid_len);
-
- /* Reconstruct SSID and bitrate IEs to pass up */
- ie_buf[0] = WLAN_EID_SSID;
- ie_buf[1] = len;
- memcpy(&ie_buf[2], bss->a.essid, len);
-
- ie = ie_buf + len + 2;
- ie_len = ie_buf[1] + 2;
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_SYMBOL:
- ie_len += symbol_build_supp_rates(ie, bss->s.rates);
- break;
-
- case FIRMWARE_TYPE_INTERSIL:
- ie_len += prism_build_supp_rates(ie, bss->p.rates);
- break;
-
- case FIRMWARE_TYPE_AGERE:
- default:
- break;
- }
-
- freq = ieee80211_channel_to_frequency(
- le16_to_cpu(bss->a.channel), NL80211_BAND_2GHZ);
- channel = ieee80211_get_channel(wiphy, freq);
- if (!channel) {
- printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
- bss->a.channel, freq);
- return; /* Then ignore it for now */
- }
- timestamp = 0;
- capability = le16_to_cpu(bss->a.capabilities);
- beacon_interval = le16_to_cpu(bss->a.beacon_interv);
- signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level));
-
- cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN,
- bss->a.bssid, timestamp, capability,
- beacon_interval, ie_buf, ie_len, signal,
- GFP_KERNEL);
- cfg80211_put_bss(wiphy, cbss);
-}
-
-void orinoco_add_extscan_result(struct orinoco_private *priv,
- struct agere_ext_scan_info *bss,
- size_t len)
-{
- struct wiphy *wiphy = priv_to_wiphy(priv);
- struct ieee80211_channel *channel;
- struct cfg80211_bss *cbss;
- const u8 *ie;
- u64 timestamp;
- s32 signal;
- u16 capability;
- u16 beacon_interval;
- size_t ie_len;
- int chan, freq;
-
- ie_len = len - sizeof(*bss);
- ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
- chan = ie ? ie[2] : 0;
- freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ);
- channel = ieee80211_get_channel(wiphy, freq);
-
- timestamp = le64_to_cpu(bss->timestamp);
- capability = le16_to_cpu(bss->capabilities);
- beacon_interval = le16_to_cpu(bss->beacon_interval);
- ie = bss->data;
- signal = SIGNAL_TO_MBM(bss->level);
-
- cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN,
- bss->bssid, timestamp, capability,
- beacon_interval, ie, ie_len, signal,
- GFP_KERNEL);
- cfg80211_put_bss(wiphy, cbss);
-}
-
-void orinoco_add_hostscan_results(struct orinoco_private *priv,
- unsigned char *buf,
- size_t len)
-{
- int offset; /* In the scan data */
- size_t atom_len;
- bool abort = false;
-
- switch (priv->firmware_type) {
- case FIRMWARE_TYPE_AGERE:
- atom_len = sizeof(struct agere_scan_apinfo);
- offset = 0;
- break;
-
- case FIRMWARE_TYPE_SYMBOL:
- /* Lack of documentation necessitates this hack.
- * Different firmwares have 68 or 76 byte long atoms.
- * We try modulo first. If the length divides by both,
- * we check what would be the channel in the second
- * frame for a 68-byte atom. 76-byte atoms have 0 there.
- * Valid channel cannot be 0. */
- if (len % 76)
- atom_len = 68;
- else if (len % 68)
- atom_len = 76;
- else if (len >= 1292 && buf[68] == 0)
- atom_len = 76;
- else
- atom_len = 68;
- offset = 0;
- break;
-
- case FIRMWARE_TYPE_INTERSIL:
- offset = 4;
- if (priv->has_hostscan) {
- atom_len = le16_to_cpup((__le16 *)buf);
- /* Sanity check for atom_len */
- if (atom_len < sizeof(struct prism2_scan_apinfo)) {
- printk(KERN_ERR "%s: Invalid atom_len in scan "
- "data: %zu\n", priv->ndev->name,
- atom_len);
- abort = true;
- goto scan_abort;
- }
- } else
- atom_len = offsetof(struct prism2_scan_apinfo, atim);
- break;
-
- default:
- abort = true;
- goto scan_abort;
- }
-
- /* Check that we got an whole number of atoms */
- if ((len - offset) % atom_len) {
- printk(KERN_ERR "%s: Unexpected scan data length %zu, "
- "atom_len %zu, offset %d\n", priv->ndev->name, len,
- atom_len, offset);
- abort = true;
- goto scan_abort;
- }
-
- /* Process the entries one by one */
- for (; offset + atom_len <= len; offset += atom_len) {
- union hermes_scan_info *atom;
-
- atom = (union hermes_scan_info *) (buf + offset);
-
- orinoco_add_hostscan_result(priv, atom);
- }
-
- scan_abort:
- if (priv->scan_request) {
- struct cfg80211_scan_info info = {
- .aborted = abort,
- };
-
- cfg80211_scan_done(priv->scan_request, &info);
- priv->scan_request = NULL;
- }
-}
-
-void orinoco_scan_done(struct orinoco_private *priv, bool abort)
-{
- if (priv->scan_request) {
- struct cfg80211_scan_info info = {
- .aborted = abort,
- };
-
- cfg80211_scan_done(priv->scan_request, &info);
- priv->scan_request = NULL;
- }
-}
diff --git a/drivers/net/wireless/intersil/orinoco/scan.h b/drivers/net/wireless/intersil/orinoco/scan.h
deleted file mode 100644
index 27281fb0a..000000000
--- a/drivers/net/wireless/intersil/orinoco/scan.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Helpers for managing scan queues
- *
- * See copyright notice in main.c
- */
-#ifndef _ORINOCO_SCAN_H_
-#define _ORINOCO_SCAN_H_
-
-/* Forward declarations */
-struct orinoco_private;
-struct agere_ext_scan_info;
-
-/* Add scan results */
-void orinoco_add_extscan_result(struct orinoco_private *priv,
- struct agere_ext_scan_info *atom,
- size_t len);
-void orinoco_add_hostscan_results(struct orinoco_private *dev,
- unsigned char *buf,
- size_t len);
-void orinoco_scan_done(struct orinoco_private *priv, bool abort);
-
-#endif /* _ORINOCO_SCAN_H_ */
diff --git a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c b/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
deleted file mode 100644
index 841d623c6..000000000
--- a/drivers/net/wireless/intersil/orinoco/spectrum_cs.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Driver for 802.11b cards using RAM-loadable Symbol firmware, such as
- * Symbol Wireless Networker LA4137, CompactFlash cards by Socket
- * Communications and Intel PRO/Wireless 2011B.
- *
- * The driver implements Symbol firmware download. The rest is handled
- * in hermes.c and main.c.
- *
- * Utilities for downloading the Symbol firmware are available at
- * http://sourceforge.net/projects/orinoco/
- *
- * Copyright (C) 2002-2005 Pavel Roskin <proski@gnu.org>
- * Portions based on orinoco_cs.c:
- * Copyright (C) David Gibson, Linuxcare Australia
- * Portions based on Spectrum24tDnld.c from original spectrum24 driver:
- * Copyright (C) Symbol Technologies.
- *
- * See copyright notice in file main.c.
- */
-
-#define DRIVER_NAME "spectrum_cs"
-#define PFX DRIVER_NAME ": "
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#include "orinoco.h"
-
-/********************************************************************/
-/* Module stuff */
-/********************************************************************/
-
-MODULE_AUTHOR("Pavel Roskin <proski@gnu.org>");
-MODULE_DESCRIPTION("Driver for Symbol Spectrum24 Trilogy cards with firmware downloader");
-MODULE_LICENSE("Dual MPL/GPL");
-
-/* Module parameters */
-
-/* Some D-Link cards have buggy CIS. They do work at 5v properly, but
- * don't have any CIS entry for it. This workaround it... */
-static int ignore_cis_vcc; /* = 0 */
-module_param(ignore_cis_vcc, int, 0);
-MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket");
-
-/********************************************************************/
-/* Data structures */
-/********************************************************************/
-
-/* PCMCIA specific device information (goes in the card field of
- * struct orinoco_private */
-struct orinoco_pccard {
- struct pcmcia_device *p_dev;
-};
-
-/********************************************************************/
-/* Function prototypes */
-/********************************************************************/
-
-static int spectrum_cs_config(struct pcmcia_device *link);
-static void spectrum_cs_release(struct pcmcia_device *link);
-
-/* Constants for the CISREG_CCSR register */
-#define HCR_RUN 0x07 /* run firmware after reset */
-#define HCR_IDLE 0x0E /* don't run firmware after reset */
-#define HCR_MEM16 0x10 /* memory width bit, should be preserved */
-
-
-/*
- * Reset the card using configuration registers COR and CCSR.
- * If IDLE is 1, stop the firmware, so that it can be safely rewritten.
- */
-static int
-spectrum_reset(struct pcmcia_device *link, int idle)
-{
- int ret;
- u8 save_cor;
- u8 ccsr;
-
- /* Doing it if hardware is gone is guaranteed crash */
- if (!pcmcia_dev_present(link))
- return -ENODEV;
-
- /* Save original COR value */
- ret = pcmcia_read_config_byte(link, CISREG_COR, &save_cor);
- if (ret)
- goto failed;
-
- /* Soft-Reset card */
- ret = pcmcia_write_config_byte(link, CISREG_COR,
- (save_cor | COR_SOFT_RESET));
- if (ret)
- goto failed;
- udelay(1000);
-
- /* Read CCSR */
- ret = pcmcia_read_config_byte(link, CISREG_CCSR, &ccsr);
- if (ret)
- goto failed;
-
- /*
- * Start or stop the firmware. Memory width bit should be
- * preserved from the value we've just read.
- */
- ccsr = (idle ? HCR_IDLE : HCR_RUN) | (ccsr & HCR_MEM16);
- ret = pcmcia_write_config_byte(link, CISREG_CCSR, ccsr);
- if (ret)
- goto failed;
- udelay(1000);
-
- /* Restore original COR configuration index */
- ret = pcmcia_write_config_byte(link, CISREG_COR,
- (save_cor & ~COR_SOFT_RESET));
- if (ret)
- goto failed;
- udelay(1000);
- return 0;
-
-failed:
- return -ENODEV;
-}
-
-/********************************************************************/
-/* Device methods */
-/********************************************************************/
-
-static int
-spectrum_cs_hard_reset(struct orinoco_private *priv)
-{
- struct orinoco_pccard *card = priv->card;
- struct pcmcia_device *link = card->p_dev;
-
- /* Soft reset using COR and HCR */
- spectrum_reset(link, 0);
-
- return 0;
-}
-
-static int
-spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle)
-{
- struct orinoco_pccard *card = priv->card;
- struct pcmcia_device *link = card->p_dev;
-
- return spectrum_reset(link, idle);
-}
-
-/********************************************************************/
-/* PCMCIA stuff */
-/********************************************************************/
-
-static int
-spectrum_cs_probe(struct pcmcia_device *link)
-{
- struct orinoco_private *priv;
- struct orinoco_pccard *card;
- int ret;
-
- priv = alloc_orinocodev(sizeof(*card), &link->dev,
- spectrum_cs_hard_reset,
- spectrum_cs_stop_firmware);
- if (!priv)
- return -ENOMEM;
- card = priv->card;
-
- /* Link both structures together */
- card->p_dev = link;
- link->priv = priv;
-
- ret = spectrum_cs_config(link);
- if (ret)
- goto err_free_orinocodev;
-
- return 0;
-
-err_free_orinocodev:
- free_orinocodev(priv);
- return ret;
-}
-
-static void spectrum_cs_detach(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
-
- orinoco_if_del(priv);
-
- spectrum_cs_release(link);
-
- free_orinocodev(priv);
-} /* spectrum_cs_detach */
-
-static int spectrum_cs_config_check(struct pcmcia_device *p_dev,
- void *priv_data)
-{
- if (p_dev->config_index == 0)
- return -EINVAL;
-
- return pcmcia_request_io(p_dev);
-};
-
-static int
-spectrum_cs_config(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
- struct hermes *hw = &priv->hw;
- int ret;
- void __iomem *mem;
-
- link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC |
- CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
- if (ignore_cis_vcc)
- link->config_flags &= ~CONF_AUTO_CHECK_VCC;
- ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL);
- if (ret) {
- if (!ignore_cis_vcc)
- printk(KERN_ERR PFX "GetNextTuple(): No matching "
- "CIS configuration. Maybe you need the "
- "ignore_cis_vcc=1 parameter.\n");
- goto failed;
- }
-
- mem = ioport_map(link->resource[0]->start,
- resource_size(link->resource[0]));
- if (!mem)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
- hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
- hw->eeprom_pda = true;
-
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- /* Reset card */
- if (spectrum_cs_hard_reset(priv) != 0)
- goto failed;
-
- /* Initialise the main driver */
- if (orinoco_init(priv) != 0) {
- printk(KERN_ERR PFX "orinoco_init() failed\n");
- goto failed;
- }
-
- /* Register an interface with the stack */
- if (orinoco_if_add(priv, link->resource[0]->start,
- link->irq, NULL) != 0) {
- printk(KERN_ERR PFX "orinoco_if_add() failed\n");
- goto failed;
- }
-
- return 0;
-
- failed:
- spectrum_cs_release(link);
- return -ENODEV;
-} /* spectrum_cs_config */
-
-static void
-spectrum_cs_release(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
- unsigned long flags;
-
- /* We're committed to taking the device away now, so mark the
- * hardware as unavailable */
- priv->hw.ops->lock_irqsave(&priv->lock, &flags);
- priv->hw_unavailable++;
- priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
-
- pcmcia_disable_device(link);
- if (priv->hw.iobase)
- ioport_unmap(priv->hw.iobase);
-} /* spectrum_cs_release */
-
-
-static int
-spectrum_cs_suspend(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
-
- /* Mark the device as stopped, to block IO until later */
- orinoco_down(priv);
-
- return 0;
-}
-
-static int
-spectrum_cs_resume(struct pcmcia_device *link)
-{
- struct orinoco_private *priv = link->priv;
- int err = orinoco_up(priv);
-
- return err;
-}
-
-
-/********************************************************************/
-/* Module initialization */
-/********************************************************************/
-
-static const struct pcmcia_device_id spectrum_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */
- PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */
- PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids);
-
-static struct pcmcia_driver orinoco_driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .probe = spectrum_cs_probe,
- .remove = spectrum_cs_detach,
- .suspend = spectrum_cs_suspend,
- .resume = spectrum_cs_resume,
- .id_table = spectrum_cs_ids,
-};
-module_pcmcia_driver(orinoco_driver);
diff --git a/drivers/net/wireless/intersil/orinoco/wext.c b/drivers/net/wireless/intersil/orinoco/wext.c
deleted file mode 100644
index dea1ff044..000000000
--- a/drivers/net/wireless/intersil/orinoco/wext.c
+++ /dev/null
@@ -1,1428 +0,0 @@
-/* Wireless extensions support.
- *
- * See copyright notice in main.c
- */
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/if_arp.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
-#include <net/iw_handler.h>
-#include <net/cfg80211.h>
-#include <net/cfg80211-wext.h>
-
-#include "hermes.h"
-#include "hermes_rid.h"
-#include "orinoco.h"
-
-#include "hw.h"
-#include "mic.h"
-#include "scan.h"
-#include "main.h"
-
-#include "wext.h"
-
-#define MAX_RID_LEN 1024
-
-/* Helper routine to record keys
- * It is called under orinoco_lock so it may not sleep */
-static int orinoco_set_key(struct orinoco_private *priv, int index,
- enum orinoco_alg alg, const u8 *key, int key_len,
- const u8 *seq, int seq_len)
-{
- kfree_sensitive(priv->keys[index].key);
- kfree_sensitive(priv->keys[index].seq);
-
- if (key_len) {
- priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
- if (!priv->keys[index].key)
- goto nomem;
- } else
- priv->keys[index].key = NULL;
-
- if (seq_len) {
- priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
- if (!priv->keys[index].seq)
- goto free_key;
- } else
- priv->keys[index].seq = NULL;
-
- priv->keys[index].key_len = key_len;
- priv->keys[index].seq_len = seq_len;
-
- if (key_len)
- memcpy((void *)priv->keys[index].key, key, key_len);
- if (seq_len)
- memcpy((void *)priv->keys[index].seq, seq, seq_len);
-
- switch (alg) {
- case ORINOCO_ALG_TKIP:
- priv->keys[index].cipher = WLAN_CIPHER_SUITE_TKIP;
- break;
-
- case ORINOCO_ALG_WEP:
- priv->keys[index].cipher = (key_len > SMALL_KEY_SIZE) ?
- WLAN_CIPHER_SUITE_WEP104 : WLAN_CIPHER_SUITE_WEP40;
- break;
-
- case ORINOCO_ALG_NONE:
- default:
- priv->keys[index].cipher = 0;
- break;
- }
-
- return 0;
-
-free_key:
- kfree(priv->keys[index].key);
- priv->keys[index].key = NULL;
-
-nomem:
- priv->keys[index].key_len = 0;
- priv->keys[index].seq_len = 0;
- priv->keys[index].cipher = 0;
-
- return -ENOMEM;
-}
-
-static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct hermes *hw = &priv->hw;
- struct iw_statistics *wstats = &priv->wstats;
- int err;
- unsigned long flags;
-
- if (!netif_device_present(dev)) {
- printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
- dev->name);
- return NULL; /* FIXME: Can we do better than this? */
- }
-
- /* If busy, return the old stats. Returning NULL may cause
- * the interface to disappear from /proc/net/wireless */
- if (orinoco_lock(priv, &flags) != 0)
- return wstats;
-
- /* We can't really wait for the tallies inquiry command to
- * complete, so we just use the previous results and trigger
- * a new tallies inquiry command for next time - Jean II */
- /* FIXME: Really we should wait for the inquiry to come back -
- * as it is the stats we give don't make a whole lot of sense.
- * Unfortunately, it's not clear how to do that within the
- * wireless extensions framework: I think we're in user
- * context, but a lock seems to be held by the time we get in
- * here so we're not safe to sleep here. */
- hermes_inquire(hw, HERMES_INQ_TALLIES);
-
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC) {
- memset(&wstats->qual, 0, sizeof(wstats->qual));
- /* If a spy address is defined, we report stats of the
- * first spy address - Jean II */
- if (SPY_NUMBER(priv)) {
- wstats->qual.qual = priv->spy_data.spy_stat[0].qual;
- wstats->qual.level = priv->spy_data.spy_stat[0].level;
- wstats->qual.noise = priv->spy_data.spy_stat[0].noise;
- wstats->qual.updated =
- priv->spy_data.spy_stat[0].updated;
- }
- } else {
- struct {
- __le16 qual, signal, noise, unused;
- } __packed cq;
-
- err = HERMES_READ_RECORD(hw, USER_BAP,
- HERMES_RID_COMMSQUALITY, &cq);
-
- if (!err) {
- wstats->qual.qual = (int)le16_to_cpu(cq.qual);
- wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
- wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
- wstats->qual.updated =
- IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- }
- }
-
- orinoco_unlock(priv, &flags);
- return wstats;
-}
-
-/********************************************************************/
-/* Wireless extensions */
-/********************************************************************/
-
-static int orinoco_ioctl_setwap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct sockaddr *ap_addr = &wrqu->ap_addr;
- struct orinoco_private *priv = ndev_priv(dev);
- int err = -EINPROGRESS; /* Call commit handler */
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- /* Enable automatic roaming - no sanity checks are needed */
- if (is_zero_ether_addr(ap_addr->sa_data) ||
- is_broadcast_ether_addr(ap_addr->sa_data)) {
- priv->bssid_fixed = 0;
- eth_zero_addr(priv->desired_bssid);
-
- /* "off" means keep existing connection */
- if (ap_addr->sa_data[0] == 0) {
- __orinoco_hw_set_wap(priv);
- err = 0;
- }
- goto out;
- }
-
- if (priv->firmware_type == FIRMWARE_TYPE_AGERE) {
- printk(KERN_WARNING "%s: Lucent/Agere firmware doesn't "
- "support manual roaming\n",
- dev->name);
- err = -EOPNOTSUPP;
- goto out;
- }
-
- if (priv->iw_mode != NL80211_IFTYPE_STATION) {
- printk(KERN_WARNING "%s: Manual roaming supported only in "
- "managed mode\n", dev->name);
- err = -EOPNOTSUPP;
- goto out;
- }
-
- /* Intersil firmware hangs without Desired ESSID */
- if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL &&
- strlen(priv->desired_essid) == 0) {
- printk(KERN_WARNING "%s: Desired ESSID must be set for "
- "manual roaming\n", dev->name);
- err = -EOPNOTSUPP;
- goto out;
- }
-
- /* Finally, enable manual roaming */
- priv->bssid_fixed = 1;
- memcpy(priv->desired_bssid, &ap_addr->sa_data, ETH_ALEN);
-
- out:
- orinoco_unlock(priv, &flags);
- return err;
-}
-
-static int orinoco_ioctl_getwap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct sockaddr *ap_addr = &wrqu->ap_addr;
- struct orinoco_private *priv = ndev_priv(dev);
-
- int err = 0;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- ap_addr->sa_family = ARPHRD_ETHER;
- err = orinoco_hw_get_current_bssid(priv, ap_addr->sa_data);
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_setiwencode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *keybuf)
-{
- struct iw_point *erq = &wrqu->encoding;
- struct orinoco_private *priv = ndev_priv(dev);
- int index = (erq->flags & IW_ENCODE_INDEX) - 1;
- int setindex = priv->tx_key;
- enum orinoco_alg encode_alg = priv->encode_alg;
- int restricted = priv->wep_restrict;
- int err = -EINPROGRESS; /* Call commit handler */
- unsigned long flags;
-
- if (!priv->has_wep)
- return -EOPNOTSUPP;
-
- if (erq->pointer) {
- /* We actually have a key to set - check its length */
- if (erq->length > LARGE_KEY_SIZE)
- return -E2BIG;
-
- if ((erq->length > SMALL_KEY_SIZE) && !priv->has_big_wep)
- return -E2BIG;
- }
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- /* Clear any TKIP key we have */
- if ((priv->has_wpa) && (priv->encode_alg == ORINOCO_ALG_TKIP))
- (void) orinoco_clear_tkip_key(priv, setindex);
-
- if (erq->length > 0) {
- if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
- index = priv->tx_key;
-
- /* Switch on WEP if off */
- if (encode_alg != ORINOCO_ALG_WEP) {
- setindex = index;
- encode_alg = ORINOCO_ALG_WEP;
- }
- } else {
- /* Important note : if the user do "iwconfig eth0 enc off",
- * we will arrive there with an index of -1. This is valid
- * but need to be taken care off... Jean II */
- if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) {
- if ((index != -1) || (erq->flags == 0)) {
- err = -EINVAL;
- goto out;
- }
- } else {
- /* Set the index : Check that the key is valid */
- if (priv->keys[index].key_len == 0) {
- err = -EINVAL;
- goto out;
- }
- setindex = index;
- }
- }
-
- if (erq->flags & IW_ENCODE_DISABLED)
- encode_alg = ORINOCO_ALG_NONE;
- if (erq->flags & IW_ENCODE_OPEN)
- restricted = 0;
- if (erq->flags & IW_ENCODE_RESTRICTED)
- restricted = 1;
-
- if (erq->pointer && erq->length > 0) {
- err = orinoco_set_key(priv, index, ORINOCO_ALG_WEP, keybuf,
- erq->length, NULL, 0);
- }
- priv->tx_key = setindex;
-
- /* Try fast key change if connected and only keys are changed */
- if ((priv->encode_alg == encode_alg) &&
- (priv->wep_restrict == restricted) &&
- netif_carrier_ok(dev)) {
- err = __orinoco_hw_setup_wepkeys(priv);
- /* No need to commit if successful */
- goto out;
- }
-
- priv->encode_alg = encode_alg;
- priv->wep_restrict = restricted;
-
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_getiwencode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *keybuf)
-{
- struct iw_point *erq = &wrqu->encoding;
- struct orinoco_private *priv = ndev_priv(dev);
- int index = (erq->flags & IW_ENCODE_INDEX) - 1;
- unsigned long flags;
-
- if (!priv->has_wep)
- return -EOPNOTSUPP;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
- index = priv->tx_key;
-
- erq->flags = 0;
- if (!priv->encode_alg)
- erq->flags |= IW_ENCODE_DISABLED;
- erq->flags |= index + 1;
-
- if (priv->wep_restrict)
- erq->flags |= IW_ENCODE_RESTRICTED;
- else
- erq->flags |= IW_ENCODE_OPEN;
-
- erq->length = priv->keys[index].key_len;
-
- memcpy(keybuf, priv->keys[index].key, erq->length);
-
- orinoco_unlock(priv, &flags);
- return 0;
-}
-
-static int orinoco_ioctl_setessid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *essidbuf)
-{
- struct iw_point *erq = &wrqu->essid;
- struct orinoco_private *priv = ndev_priv(dev);
- unsigned long flags;
-
- /* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it
- * anyway... - Jean II */
-
- /* Hum... Should not use Wireless Extension constant (may change),
- * should use our own... - Jean II */
- if (erq->length > IW_ESSID_MAX_SIZE)
- return -E2BIG;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- /* NULL the string (for NULL termination & ESSID = ANY) - Jean II */
- memset(priv->desired_essid, 0, sizeof(priv->desired_essid));
-
- /* If not ANY, get the new ESSID */
- if (erq->flags)
- memcpy(priv->desired_essid, essidbuf, erq->length);
-
- orinoco_unlock(priv, &flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int orinoco_ioctl_getessid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *essidbuf)
-{
- struct iw_point *erq = &wrqu->essid;
- struct orinoco_private *priv = ndev_priv(dev);
- int active;
- int err = 0;
- unsigned long flags;
-
- if (netif_running(dev)) {
- err = orinoco_hw_get_essid(priv, &active, essidbuf);
- if (err < 0)
- return err;
- erq->length = err;
- } else {
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
- memcpy(essidbuf, priv->desired_essid, IW_ESSID_MAX_SIZE);
- erq->length = strlen(priv->desired_essid);
- orinoco_unlock(priv, &flags);
- }
-
- erq->flags = 1;
-
- return 0;
-}
-
-static int orinoco_ioctl_setfreq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_freq *frq = &wrqu->freq;
- struct orinoco_private *priv = ndev_priv(dev);
- int chan = -1;
- unsigned long flags;
- int err = -EINPROGRESS; /* Call commit handler */
-
- /* In infrastructure mode the AP sets the channel */
- if (priv->iw_mode == NL80211_IFTYPE_STATION)
- return -EBUSY;
-
- if ((frq->e == 0) && (frq->m <= 1000)) {
- /* Setting by channel number */
- chan = frq->m;
- } else {
- /* Setting by frequency */
- int denom = 1;
- int i;
-
- /* Calculate denominator to rescale to MHz */
- for (i = 0; i < (6 - frq->e); i++)
- denom *= 10;
-
- chan = ieee80211_frequency_to_channel(frq->m / denom);
- }
-
- if ((chan < 1) || (chan > NUM_CHANNELS) ||
- !(priv->channel_mask & (1 << (chan - 1))))
- return -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- priv->channel = chan;
- if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
- /* Fast channel change - no commit if successful */
- struct hermes *hw = &priv->hw;
- err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
- HERMES_TEST_SET_CHANNEL,
- chan, NULL);
- }
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_getfreq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_freq *frq = &wrqu->freq;
- struct orinoco_private *priv = ndev_priv(dev);
- int tmp;
-
- /* Locking done in there */
- tmp = orinoco_hw_get_freq(priv);
- if (tmp < 0)
- return tmp;
-
- frq->m = tmp * 100000;
- frq->e = 1;
-
- return 0;
-}
-
-static int orinoco_ioctl_getsens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *srq = &wrqu->sens;
- struct orinoco_private *priv = ndev_priv(dev);
- struct hermes *hw = &priv->hw;
- u16 val;
- int err;
- unsigned long flags;
-
- if (!priv->has_sensitivity)
- return -EOPNOTSUPP;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFSYSTEMSCALE, &val);
- orinoco_unlock(priv, &flags);
-
- if (err)
- return err;
-
- srq->value = val;
- srq->fixed = 0; /* auto */
-
- return 0;
-}
-
-static int orinoco_ioctl_setsens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *srq = &wrqu->sens;
- struct orinoco_private *priv = ndev_priv(dev);
- int val = srq->value;
- unsigned long flags;
-
- if (!priv->has_sensitivity)
- return -EOPNOTSUPP;
-
- if ((val < 1) || (val > 3))
- return -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
- priv->ap_density = val;
- orinoco_unlock(priv, &flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int orinoco_ioctl_setrate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *rrq = &wrqu->bitrate;
- struct orinoco_private *priv = ndev_priv(dev);
- int ratemode;
- int bitrate; /* 100s of kilobits */
- unsigned long flags;
-
- /* As the user space doesn't know our highest rate, it uses -1
- * to ask us to set the highest rate. Test it using "iwconfig
- * ethX rate auto" - Jean II */
- if (rrq->value == -1)
- bitrate = 110;
- else {
- if (rrq->value % 100000)
- return -EINVAL;
- bitrate = rrq->value / 100000;
- }
-
- ratemode = orinoco_get_bitratemode(bitrate, !rrq->fixed);
-
- if (ratemode == -1)
- return -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
- priv->bitratemode = ratemode;
- orinoco_unlock(priv, &flags);
-
- return -EINPROGRESS;
-}
-
-static int orinoco_ioctl_getrate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *rrq = &wrqu->bitrate;
- struct orinoco_private *priv = ndev_priv(dev);
- int err = 0;
- int bitrate, automatic;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- orinoco_get_ratemode_cfg(priv->bitratemode, &bitrate, &automatic);
-
- /* If the interface is running we try to find more about the
- current mode */
- if (netif_running(dev)) {
- int act_bitrate;
- int lerr;
-
- /* Ignore errors if we can't get the actual bitrate */
- lerr = orinoco_hw_get_act_bitrate(priv, &act_bitrate);
- if (!lerr)
- bitrate = act_bitrate;
- }
-
- orinoco_unlock(priv, &flags);
-
- rrq->value = bitrate;
- rrq->fixed = !automatic;
- rrq->disabled = 0;
-
- return err;
-}
-
-static int orinoco_ioctl_setpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *prq = &wrqu->power;
- struct orinoco_private *priv = ndev_priv(dev);
- int err = -EINPROGRESS; /* Call commit handler */
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (prq->disabled) {
- priv->pm_on = 0;
- } else {
- switch (prq->flags & IW_POWER_MODE) {
- case IW_POWER_UNICAST_R:
- priv->pm_mcast = 0;
- priv->pm_on = 1;
- break;
- case IW_POWER_ALL_R:
- priv->pm_mcast = 1;
- priv->pm_on = 1;
- break;
- case IW_POWER_ON:
- /* No flags : but we may have a value - Jean II */
- break;
- default:
- err = -EINVAL;
- goto out;
- }
-
- if (prq->flags & IW_POWER_TIMEOUT) {
- priv->pm_on = 1;
- priv->pm_timeout = prq->value / 1000;
- }
- if (prq->flags & IW_POWER_PERIOD) {
- priv->pm_on = 1;
- priv->pm_period = prq->value / 1000;
- }
- /* It's valid to not have a value if we are just toggling
- * the flags... Jean II */
- if (!priv->pm_on) {
- err = -EINVAL;
- goto out;
- }
- }
-
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_getpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_param *prq = &wrqu->power;
- struct orinoco_private *priv = ndev_priv(dev);
- struct hermes *hw = &priv->hw;
- int err = 0;
- u16 enable, period, timeout, mcast;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFPMENABLED, &enable);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFMAXSLEEPDURATION, &period);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFPMHOLDOVERDURATION, &timeout);
- if (err)
- goto out;
-
- err = hermes_read_wordrec(hw, USER_BAP,
- HERMES_RID_CNFMULTICASTRECEIVE, &mcast);
- if (err)
- goto out;
-
- prq->disabled = !enable;
- /* Note : by default, display the period */
- if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- prq->flags = IW_POWER_TIMEOUT;
- prq->value = timeout * 1000;
- } else {
- prq->flags = IW_POWER_PERIOD;
- prq->value = period * 1000;
- }
- if (mcast)
- prq->flags |= IW_POWER_ALL_R;
- else
- prq->flags |= IW_POWER_UNICAST_R;
-
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_set_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int idx, alg = ext->alg, set_key = 1;
- unsigned long flags;
- int err = -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- /* Determine and validate the key index */
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if ((idx < 1) || (idx > 4))
- goto out;
- idx--;
- } else
- idx = priv->tx_key;
-
- if (encoding->flags & IW_ENCODE_DISABLED)
- alg = IW_ENCODE_ALG_NONE;
-
- if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) {
- /* Clear any TKIP TX key we had */
- (void) orinoco_clear_tkip_key(priv, priv->tx_key);
- }
-
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- priv->tx_key = idx;
- set_key = ((alg == IW_ENCODE_ALG_TKIP) ||
- (ext->key_len > 0)) ? 1 : 0;
- }
-
- if (set_key) {
- /* Set the requested key first */
- switch (alg) {
- case IW_ENCODE_ALG_NONE:
- priv->encode_alg = ORINOCO_ALG_NONE;
- err = orinoco_set_key(priv, idx, ORINOCO_ALG_NONE,
- NULL, 0, NULL, 0);
- break;
-
- case IW_ENCODE_ALG_WEP:
- if (ext->key_len <= 0)
- goto out;
-
- priv->encode_alg = ORINOCO_ALG_WEP;
- err = orinoco_set_key(priv, idx, ORINOCO_ALG_WEP,
- ext->key, ext->key_len, NULL, 0);
- break;
-
- case IW_ENCODE_ALG_TKIP:
- {
- u8 *tkip_iv = NULL;
-
- if (!priv->has_wpa ||
- (ext->key_len > sizeof(struct orinoco_tkip_key)))
- goto out;
-
- priv->encode_alg = ORINOCO_ALG_TKIP;
-
- if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
- tkip_iv = &ext->rx_seq[0];
-
- err = orinoco_set_key(priv, idx, ORINOCO_ALG_TKIP,
- ext->key, ext->key_len, tkip_iv,
- ORINOCO_SEQ_LEN);
-
- err = __orinoco_hw_set_tkip_key(priv, idx,
- ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
- priv->keys[idx].key, priv->keys[idx].key_len,
- tkip_iv, ORINOCO_SEQ_LEN, NULL, 0);
- if (err)
- printk(KERN_ERR "%s: Error %d setting TKIP key"
- "\n", dev->name, err);
-
- goto out;
- }
- default:
- goto out;
- }
- }
- err = -EINPROGRESS;
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_get_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int idx, max_key_len;
- unsigned long flags;
- int err;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = -EINVAL;
- max_key_len = encoding->length - sizeof(*ext);
- if (max_key_len < 0)
- goto out;
-
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if ((idx < 1) || (idx > 4))
- goto out;
- idx--;
- } else
- idx = priv->tx_key;
-
- encoding->flags = idx + 1;
- memset(ext, 0, sizeof(*ext));
-
- switch (priv->encode_alg) {
- case ORINOCO_ALG_NONE:
- ext->alg = IW_ENCODE_ALG_NONE;
- ext->key_len = 0;
- encoding->flags |= IW_ENCODE_DISABLED;
- break;
- case ORINOCO_ALG_WEP:
- ext->alg = IW_ENCODE_ALG_WEP;
- ext->key_len = min(priv->keys[idx].key_len, max_key_len);
- memcpy(ext->key, priv->keys[idx].key, ext->key_len);
- encoding->flags |= IW_ENCODE_ENABLED;
- break;
- case ORINOCO_ALG_TKIP:
- ext->alg = IW_ENCODE_ALG_TKIP;
- ext->key_len = min(priv->keys[idx].key_len, max_key_len);
- memcpy(ext->key, priv->keys[idx].key, ext->key_len);
- encoding->flags |= IW_ENCODE_ENABLED;
- break;
- }
-
- err = 0;
- out:
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct hermes *hw = &priv->hw;
- struct iw_param *param = &wrqu->param;
- unsigned long flags;
- int ret = -EINPROGRESS;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- case IW_AUTH_PRIVACY_INVOKED:
- case IW_AUTH_DROP_UNENCRYPTED:
- /*
- * orinoco does not use these parameters
- */
- break;
-
- case IW_AUTH_MFP:
- /* Management Frame Protection not supported.
- * Only fail if set to required.
- */
- if (param->value == IW_AUTH_MFP_REQUIRED)
- ret = -EINVAL;
- break;
-
- case IW_AUTH_KEY_MGMT:
- /* wl_lkm implies value 2 == PSK for Hermes I
- * which ties in with WEXT
- * no other hints tho :(
- */
- priv->key_mgmt = param->value;
- break;
-
- case IW_AUTH_TKIP_COUNTERMEASURES:
- /* When countermeasures are enabled, shut down the
- * card; when disabled, re-enable the card. This must
- * take effect immediately.
- *
- * TODO: Make sure that the EAPOL message is getting
- * out before card disabled
- */
- if (param->value) {
- priv->tkip_cm_active = 1;
- ret = hermes_disable_port(hw, 0);
- } else {
- priv->tkip_cm_active = 0;
- ret = hermes_enable_port(hw, 0);
- }
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- if (param->value & IW_AUTH_ALG_SHARED_KEY)
- priv->wep_restrict = 1;
- else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
- priv->wep_restrict = 0;
- else
- ret = -EINVAL;
- break;
-
- case IW_AUTH_WPA_ENABLED:
- if (priv->has_wpa) {
- priv->wpa_enabled = param->value ? 1 : 0;
- } else {
- if (param->value)
- ret = -EOPNOTSUPP;
- /* else silently accept disable of WPA */
- priv->wpa_enabled = 0;
- }
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- orinoco_unlock(priv, &flags);
- return ret;
-}
-
-static int orinoco_ioctl_get_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct iw_param *param = &wrqu->param;
- unsigned long flags;
- int ret = 0;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_KEY_MGMT:
- param->value = priv->key_mgmt;
- break;
-
- case IW_AUTH_TKIP_COUNTERMEASURES:
- param->value = priv->tkip_cm_active;
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- if (priv->wep_restrict)
- param->value = IW_AUTH_ALG_SHARED_KEY;
- else
- param->value = IW_AUTH_ALG_OPEN_SYSTEM;
- break;
-
- case IW_AUTH_WPA_ENABLED:
- param->value = priv->wpa_enabled;
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- orinoco_unlock(priv, &flags);
- return ret;
-}
-
-static int orinoco_ioctl_set_genie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- u8 *buf;
- unsigned long flags;
-
- /* cut off at IEEE80211_MAX_DATA_LEN */
- if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) ||
- (wrqu->data.length && (extra == NULL)))
- return -EINVAL;
-
- if (wrqu->data.length) {
- buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
- } else
- buf = NULL;
-
- if (orinoco_lock(priv, &flags) != 0) {
- kfree(buf);
- return -EBUSY;
- }
-
- kfree(priv->wpa_ie);
- priv->wpa_ie = buf;
- priv->wpa_ie_len = wrqu->data.length;
-
- if (priv->wpa_ie) {
- /* Looks like wl_lkm wants to check the auth alg, and
- * somehow pass it to the firmware.
- * Instead it just calls the key mgmt rid
- * - we do this in set auth.
- */
- }
-
- orinoco_unlock(priv, &flags);
- return 0;
-}
-
-static int orinoco_ioctl_get_genie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- unsigned long flags;
- int err = 0;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) {
- wrqu->data.length = 0;
- goto out;
- }
-
- if (wrqu->data.length < priv->wpa_ie_len) {
- err = -E2BIG;
- goto out;
- }
-
- wrqu->data.length = priv->wpa_ie_len;
- memcpy(extra, priv->wpa_ie, priv->wpa_ie_len);
-
-out:
- orinoco_unlock(priv, &flags);
- return err;
-}
-
-static int orinoco_ioctl_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- struct iw_mlme *mlme = (struct iw_mlme *)extra;
- unsigned long flags;
- int ret = 0;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
- /* silently ignore */
- break;
-
- case IW_MLME_DISASSOC:
-
- ret = orinoco_hw_disassociate(priv, mlme->addr.sa_data,
- mlme->reason_code);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- orinoco_unlock(priv, &flags);
- return ret;
-}
-
-static int orinoco_ioctl_reset(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (info->cmd == (SIOCIWFIRSTPRIV + 0x1)) {
- printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
-
- /* Firmware reset */
- orinoco_reset(&priv->reset_work);
- } else {
- printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
-
- schedule_work(&priv->reset_work);
- }
-
- return 0;
-}
-
-static int orinoco_ioctl_setibssport(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int val = *((int *) extra);
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- priv->ibss_port = val;
-
- /* Actually update the mode we are using */
- set_port_type(priv);
-
- orinoco_unlock(priv, &flags);
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int orinoco_ioctl_getibssport(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int *val = (int *) extra;
-
- *val = priv->ibss_port;
- return 0;
-}
-
-static int orinoco_ioctl_setport3(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int val = *((int *) extra);
- int err = 0;
- unsigned long flags;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- switch (val) {
- case 0: /* Try to do IEEE ad-hoc mode */
- if (!priv->has_ibss) {
- err = -EINVAL;
- break;
- }
- priv->prefer_port3 = 0;
-
- break;
-
- case 1: /* Try to do Lucent proprietary ad-hoc mode */
- if (!priv->has_port3) {
- err = -EINVAL;
- break;
- }
- priv->prefer_port3 = 1;
- break;
-
- default:
- err = -EINVAL;
- }
-
- if (!err) {
- /* Actually update the mode we are using */
- set_port_type(priv);
- err = -EINPROGRESS;
- }
-
- orinoco_unlock(priv, &flags);
-
- return err;
-}
-
-static int orinoco_ioctl_getport3(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int *val = (int *) extra;
-
- *val = priv->prefer_port3;
- return 0;
-}
-
-static int orinoco_ioctl_setpreamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- unsigned long flags;
- int val;
-
- if (!priv->has_preamble)
- return -EOPNOTSUPP;
-
- /* 802.11b has recently defined some short preamble.
- * Basically, the Phy header has been reduced in size.
- * This increase performance, especially at high rates
- * (the preamble is transmitted at 1Mb/s), unfortunately
- * this give compatibility troubles... - Jean II */
- val = *((int *) extra);
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- if (val)
- priv->preamble = 1;
- else
- priv->preamble = 0;
-
- orinoco_unlock(priv, &flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-static int orinoco_ioctl_getpreamble(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- int *val = (int *) extra;
-
- if (!priv->has_preamble)
- return -EOPNOTSUPP;
-
- *val = priv->preamble;
- return 0;
-}
-
-/* ioctl interface to hermes_read_ltv()
- * To use with iwpriv, pass the RID as the token argument, e.g.
- * iwpriv get_rid [0xfc00]
- * At least Wireless Tools 25 is required to use iwpriv.
- * For Wireless Tools 25 and 26 append "dummy" are the end. */
-static int orinoco_ioctl_getrid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct iw_point *data = &wrqu->data;
- struct orinoco_private *priv = ndev_priv(dev);
- struct hermes *hw = &priv->hw;
- int rid = data->flags;
- u16 length;
- int err;
- unsigned long flags;
-
- /* It's a "get" function, but we don't want users to access the
- * WEP key and other raw firmware data */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (rid < 0xfc00 || rid > 0xffff)
- return -EINVAL;
-
- if (orinoco_lock(priv, &flags) != 0)
- return -EBUSY;
-
- err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
- extra);
- if (err)
- goto out;
-
- data->length = min_t(u16, HERMES_RECLEN_TO_BYTES(length),
- MAX_RID_LEN);
-
- out:
- orinoco_unlock(priv, &flags);
- return err;
-}
-
-
-/* Commit handler, called after set operations */
-static int orinoco_ioctl_commit(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct orinoco_private *priv = ndev_priv(dev);
- unsigned long flags;
- int err = 0;
-
- if (!priv->open)
- return 0;
-
- if (orinoco_lock(priv, &flags) != 0)
- return err;
-
- err = orinoco_commit(priv);
-
- orinoco_unlock(priv, &flags);
- return err;
-}
-
-static const struct iw_priv_args orinoco_privtab[] = {
- { SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" },
- { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" },
- { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- 0, "set_port3" },
- { SIOCIWFIRSTPRIV + 0x3, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- "get_port3" },
- { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- 0, "set_preamble" },
- { SIOCIWFIRSTPRIV + 0x5, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- "get_preamble" },
- { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- 0, "set_ibssport" },
- { SIOCIWFIRSTPRIV + 0x7, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- "get_ibssport" },
- { SIOCIWFIRSTPRIV + 0x9, 0, IW_PRIV_TYPE_BYTE | MAX_RID_LEN,
- "get_rid" },
-};
-
-
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const iw_handler orinoco_handler[] = {
- IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
- IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname),
- IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
- IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
- IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode),
- IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode),
- IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
- IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
- IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange),
- IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
- IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
- IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
- IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
- IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
- IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
- IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan),
- IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan),
- IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
- IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
- IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
- IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
- IW_HANDLER(SIOCSIWRTS, cfg80211_wext_siwrts),
- IW_HANDLER(SIOCGIWRTS, cfg80211_wext_giwrts),
- IW_HANDLER(SIOCSIWFRAG, cfg80211_wext_siwfrag),
- IW_HANDLER(SIOCGIWFRAG, cfg80211_wext_giwfrag),
- IW_HANDLER(SIOCGIWRETRY, cfg80211_wext_giwretry),
- IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
- IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
- IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
- IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
- IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
- IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
- IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
- IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
- IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
- IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
- IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
-};
-
-
-/*
- Added typecasting since we no longer use iwreq_data -- Moustafa
- */
-static const iw_handler orinoco_private_handler[] = {
- [0] = orinoco_ioctl_reset,
- [1] = orinoco_ioctl_reset,
- [2] = orinoco_ioctl_setport3,
- [3] = orinoco_ioctl_getport3,
- [4] = orinoco_ioctl_setpreamble,
- [5] = orinoco_ioctl_getpreamble,
- [6] = orinoco_ioctl_setibssport,
- [7] = orinoco_ioctl_getibssport,
- [9] = orinoco_ioctl_getrid,
-};
-
-const struct iw_handler_def orinoco_handler_def = {
- .num_standard = ARRAY_SIZE(orinoco_handler),
- .num_private = ARRAY_SIZE(orinoco_private_handler),
- .num_private_args = ARRAY_SIZE(orinoco_privtab),
- .standard = orinoco_handler,
- .private = orinoco_private_handler,
- .private_args = orinoco_privtab,
- .get_wireless_stats = orinoco_get_wireless_stats,
-};
diff --git a/drivers/net/wireless/intersil/orinoco/wext.h b/drivers/net/wireless/intersil/orinoco/wext.h
deleted file mode 100644
index 1479f4e26..000000000
--- a/drivers/net/wireless/intersil/orinoco/wext.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* Wireless extensions support.
- *
- * See copyright notice in main.c
- */
-#ifndef _ORINOCO_WEXT_H_
-#define _ORINOCO_WEXT_H_
-
-#include <net/iw_handler.h>
-
-/* Structure defining all our WEXT handlers */
-extern const struct iw_handler_def orinoco_handler_def;
-
-#endif /* _ORINOCO_WEXT_H_ */
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index b52cce381..c4fe70e05 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
"FW rev %s - Softmac protocol %x.%x\n",
fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
- "%s - %x.%x", fw_version,
+ "%.19s - %x.%x", fw_version,
priv->fw_var >> 8, priv->fw_var & 0xff);
}
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ce0179b8a..0073b5e0f 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -700,6 +700,7 @@ static struct spi_driver p54spi_driver = {
module_spi_driver(p54spi_driver);
+MODULE_DESCRIPTION("Prism54 SPI wireless driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
MODULE_ALIAS("spi:cx3110x");
diff --git a/drivers/net/wireless/legacy/Kconfig b/drivers/net/wireless/legacy/Kconfig
deleted file mode 100644
index 3a5275941..000000000
--- a/drivers/net/wireless/legacy/Kconfig
+++ /dev/null
@@ -1,55 +0,0 @@
-config PCMCIA_RAYCS
- tristate "Aviator/Raytheon 2.4GHz wireless support"
- depends on PCMCIA
- select WIRELESS_EXT
- select WEXT_SPY
- select WEXT_PRIV
- help
- Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
- (PC-card) wireless Ethernet networking card to your computer.
- Please read the file
- <file:Documentation/networking/device_drivers/wifi/ray_cs.rst> for
- details.
-
- To compile this driver as a module, choose M here: the module will be
- called ray_cs. If unsure, say N.
-
-config PCMCIA_WL3501
- tristate "Planet WL3501 PCMCIA cards"
- depends on CFG80211 && PCMCIA
- select WIRELESS_EXT
- select WEXT_SPY
- help
- A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
- It has basic support for Linux wireless extensions and initial
- micro support for ethtool.
-
-config USB_NET_RNDIS_WLAN
- tristate "Wireless RNDIS USB support"
- depends on USB
- depends on CFG80211
- select USB_NET_DRIVERS
- select USB_USBNET
- select USB_NET_CDCETHER
- select USB_NET_RNDIS_HOST
- help
- This is a driver for wireless RNDIS devices.
- These are USB based adapters found in devices such as:
-
- Buffalo WLI-U2-KG125S
- U.S. Robotics USR5421
- Belkin F5D7051
- Linksys WUSB54GSv2
- Linksys WUSB54GSC
- Asus WL169gE
- Eminent EM4045
- BT Voyager 1055
- Linksys WUSB54GSv1
- U.S. Robotics USR5420
- BUFFALO WLI-USB-G54
-
- All of these devices are based on Broadcom 4320 chip which is the
- only wireless RNDIS chip known to date.
-
- If you choose to build a module, it'll be called rndis_wlan.
-
diff --git a/drivers/net/wireless/legacy/Makefile b/drivers/net/wireless/legacy/Makefile
deleted file mode 100644
index 36878f080..000000000
--- a/drivers/net/wireless/legacy/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# 16-bit wireless PCMCIA client drivers
-obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
-obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
-
-obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
-
diff --git a/drivers/net/wireless/legacy/ray_cs.c b/drivers/net/wireless/legacy/ray_cs.c
deleted file mode 100644
index c95a79e01..000000000
--- a/drivers/net/wireless/legacy/ray_cs.c
+++ /dev/null
@@ -1,2824 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*=============================================================================
- *
- * A PCMCIA client driver for the Raylink wireless LAN card.
- * The starting point for this module was the skeleton.c in the
- * PCMCIA 2.9.12 package written by David Hinds, dahinds@users.sourceforge.net
- *
- * Copyright (c) 1998 Corey Thomas (corey@world.std.com)
- *
- * Changes:
- * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000
- * - reorganize kmallocs in ray_attach, checking all for failure
- * and releasing the previous allocations if one fails
- *
- * Daniele Bellucci <bellucda@tiscali.it> - 07/10/2003
- * - Audit copy_to_user in ioctl(SIOCGIWESSID)
- *
-=============================================================================*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/ptrace.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/skbuff.h>
-#include <linux/ieee80211.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-
-#include <asm/io.h>
-#include <asm/byteorder.h>
-#include <linux/uaccess.h>
-
-/* Warning : these stuff will slow down the driver... */
-#define WIRELESS_SPY /* Enable spying addresses */
-/* Definitions we need for spy */
-typedef struct iw_statistics iw_stats;
-typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */
-
-#include "rayctl.h"
-#include "ray_cs.h"
-
-
-/** Prototypes based on PCMCIA skeleton driver *******************************/
-static int ray_config(struct pcmcia_device *link);
-static void ray_release(struct pcmcia_device *link);
-static void ray_detach(struct pcmcia_device *p_dev);
-
-/***** Prototypes indicated by device structure ******************************/
-static int ray_dev_close(struct net_device *dev);
-static int ray_dev_config(struct net_device *dev, struct ifmap *map);
-static struct net_device_stats *ray_get_stats(struct net_device *dev);
-static int ray_dev_init(struct net_device *dev);
-
-static int ray_open(struct net_device *dev);
-static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void ray_update_multi_list(struct net_device *dev, int all);
-static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
- unsigned char *data, int len);
-static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx,
- UCHAR msg_type, unsigned char *data);
-static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len);
-static iw_stats *ray_get_wireless_stats(struct net_device *dev);
-static const struct iw_handler_def ray_handler_def;
-
-/***** Prototypes for raylink functions **************************************/
-static void authenticate(ray_dev_t *local);
-static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type);
-static void authenticate_timeout(struct timer_list *t);
-static int get_free_ccs(ray_dev_t *local);
-static int get_free_tx_ccs(ray_dev_t *local);
-static void init_startup_params(ray_dev_t *local);
-static int parse_addr(char *in_str, UCHAR *out);
-static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev, UCHAR type);
-static int ray_init(struct net_device *dev);
-static int interrupt_ecf(ray_dev_t *local, int ccs);
-static void ray_reset(struct net_device *dev);
-static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len);
-static void verify_dl_startup(struct timer_list *t);
-
-/* Prototypes for interrpt time functions **********************************/
-static irqreturn_t ray_interrupt(int reg, void *dev_id);
-static void clear_interrupt(ray_dev_t *local);
-static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
- unsigned int pkt_addr, int rx_len);
-static int copy_from_rx_buff(ray_dev_t *local, UCHAR *dest, int pkt_addr, int len);
-static void ray_rx(struct net_device *dev, ray_dev_t *local, struct rcs __iomem *prcs);
-static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs);
-static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
- unsigned int pkt_addr, int rx_len);
-static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
- unsigned int pkt_addr, int rx_len);
-static void associate(ray_dev_t *local);
-
-/* Card command functions */
-static int dl_startup_params(struct net_device *dev);
-static void join_net(struct timer_list *t);
-static void start_net(struct timer_list *t);
-
-/*===========================================================================*/
-/* Parameters that can be set with 'insmod' */
-
-/* ADHOC=0, Infrastructure=1 */
-static int net_type = ADHOC;
-
-/* Hop dwell time in Kus (1024 us units defined by 802.11) */
-static int hop_dwell = 128;
-
-/* Beacon period in Kus */
-static int beacon_period = 256;
-
-/* power save mode (0 = off, 1 = save power) */
-static int psm;
-
-/* String for network's Extended Service Set ID. 32 Characters max */
-static char *essid;
-
-/* Default to encapsulation unless translation requested */
-static bool translate = true;
-
-static int country = USA;
-
-static int sniffer;
-
-static int bc;
-
-/* 48 bit physical card address if overriding card's real physical
- * address is required. Since IEEE 802.11 addresses are 48 bits
- * like ethernet, an int can't be used, so a string is used. To
- * allow use of addresses starting with a decimal digit, the first
- * character must be a letter and will be ignored. This letter is
- * followed by up to 12 hex digits which are the address. If less
- * than 12 digits are used, the address will be left filled with 0's.
- * Note that bit 0 of the first byte is the broadcast bit, and evil
- * things will happen if it is not 0 in a card address.
- */
-static char *phy_addr = NULL;
-
-static unsigned int ray_mem_speed = 500;
-
-/* WARNING: THIS DRIVER IS NOT CAPABLE OF HANDLING MULTIPLE DEVICES! */
-static struct pcmcia_device *this_device = NULL;
-
-MODULE_AUTHOR("Corey Thomas <corey@world.std.com>");
-MODULE_DESCRIPTION("Raylink/WebGear wireless LAN driver");
-MODULE_LICENSE("GPL");
-
-module_param(net_type, int, 0);
-module_param(hop_dwell, int, 0);
-module_param(beacon_period, int, 0);
-module_param(psm, int, 0);
-module_param(essid, charp, 0);
-module_param(translate, bool, 0);
-module_param(country, int, 0);
-module_param(sniffer, int, 0);
-module_param(bc, int, 0);
-module_param(phy_addr, charp, 0);
-module_param(ray_mem_speed, int, 0);
-
-static const UCHAR b5_default_startup_parms[] = {
- 0, 0, /* Adhoc station */
- 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0, /* Active scan, CA Mode */
- 0, 0, 0, 0, 0, 0, /* No default MAC addr */
- 0x7f, 0xff, /* Frag threshold */
- 0x00, 0x80, /* Hop time 128 Kus */
- 0x01, 0x00, /* Beacon period 256 Kus */
- 0x01, 0x07, 0xa3, /* DTIM, retries, ack timeout */
- 0x1d, 0x82, 0x4e, /* SIFS, DIFS, PIFS */
- 0x7f, 0xff, /* RTS threshold */
- 0x04, 0xe2, 0x38, 0xA4, /* scan_dwell, max_scan_dwell */
- 0x05, /* assoc resp timeout thresh */
- 0x08, 0x02, 0x08, /* adhoc, infra, super cycle max */
- 0, /* Promiscuous mode */
- 0x0c, 0x0bd, /* Unique word */
- 0x32, /* Slot time */
- 0xff, 0xff, /* roam-low snr, low snr count */
- 0x05, 0xff, /* Infra, adhoc missed bcn thresh */
- 0x01, 0x0b, 0x4f, /* USA, hop pattern, hop pat length */
-/* b4 - b5 differences start here */
- 0x00, 0x3f, /* CW max */
- 0x00, 0x0f, /* CW min */
- 0x04, 0x08, /* Noise gain, limit offset */
- 0x28, 0x28, /* det rssi, med busy offsets */
- 7, /* det sync thresh */
- 0, 2, 2, /* test mode, min, max */
- 0, /* allow broadcast SSID probe resp */
- 0, 0, /* privacy must start, can join */
- 2, 0, 0, 0, 0, 0, 0, 0 /* basic rate set */
-};
-
-static const UCHAR b4_default_startup_parms[] = {
- 0, 0, /* Adhoc station */
- 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0, /* Active scan, CA Mode */
- 0, 0, 0, 0, 0, 0, /* No default MAC addr */
- 0x7f, 0xff, /* Frag threshold */
- 0x02, 0x00, /* Hop time */
- 0x00, 0x01, /* Beacon period */
- 0x01, 0x07, 0xa3, /* DTIM, retries, ack timeout */
- 0x1d, 0x82, 0xce, /* SIFS, DIFS, PIFS */
- 0x7f, 0xff, /* RTS threshold */
- 0xfb, 0x1e, 0xc7, 0x5c, /* scan_dwell, max_scan_dwell */
- 0x05, /* assoc resp timeout thresh */
- 0x04, 0x02, 0x4, /* adhoc, infra, super cycle max */
- 0, /* Promiscuous mode */
- 0x0c, 0x0bd, /* Unique word */
- 0x4e, /* Slot time (TBD seems wrong) */
- 0xff, 0xff, /* roam-low snr, low snr count */
- 0x05, 0xff, /* Infra, adhoc missed bcn thresh */
- 0x01, 0x0b, 0x4e, /* USA, hop pattern, hop pat length */
-/* b4 - b5 differences start here */
- 0x3f, 0x0f, /* CW max, min */
- 0x04, 0x08, /* Noise gain, limit offset */
- 0x28, 0x28, /* det rssi, med busy offsets */
- 7, /* det sync thresh */
- 0, 2, 2, /* test mode, min, max */
- 0, /* rx/tx delay */
- 0, 0, 0, 0, 0, 0, /* current BSS id */
- 0 /* hop set */
-};
-
-/*===========================================================================*/
-static const u8 eth2_llc[] = { 0xaa, 0xaa, 3, 0, 0, 0 };
-
-static const char hop_pattern_length[] = { 1,
- USA_HOP_MOD, EUROPE_HOP_MOD,
- JAPAN_HOP_MOD, KOREA_HOP_MOD,
- SPAIN_HOP_MOD, FRANCE_HOP_MOD,
- ISRAEL_HOP_MOD, AUSTRALIA_HOP_MOD,
- JAPAN_TEST_HOP_MOD
-};
-
-static const char rcsid[] =
- "Raylink/WebGear wireless LAN - Corey <Thomas corey@world.std.com>";
-
-static const struct net_device_ops ray_netdev_ops = {
- .ndo_init = ray_dev_init,
- .ndo_open = ray_open,
- .ndo_stop = ray_dev_close,
- .ndo_start_xmit = ray_dev_start_xmit,
- .ndo_set_config = ray_dev_config,
- .ndo_get_stats = ray_get_stats,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int ray_probe(struct pcmcia_device *p_dev)
-{
- ray_dev_t *local;
- struct net_device *dev;
- int ret;
-
- dev_dbg(&p_dev->dev, "ray_attach()\n");
-
- /* Allocate space for private device-specific data */
- dev = alloc_etherdev(sizeof(ray_dev_t));
- if (!dev)
- return -ENOMEM;
-
- local = netdev_priv(dev);
- local->finder = p_dev;
-
- /* The io structure describes IO port mapping. None used here */
- p_dev->resource[0]->end = 0;
- p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
-
- /* General socket configuration */
- p_dev->config_flags |= CONF_ENABLE_IRQ;
- p_dev->config_index = 1;
-
- p_dev->priv = dev;
-
- local->finder = p_dev;
- local->card_status = CARD_INSERTED;
- local->authentication_state = UNAUTHENTICATED;
- local->num_multi = 0;
- dev_dbg(&p_dev->dev, "ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n",
- p_dev, dev, local, &ray_interrupt);
-
- /* Raylink entries in the device structure */
- dev->netdev_ops = &ray_netdev_ops;
- dev->wireless_handlers = &ray_handler_def;
-#ifdef WIRELESS_SPY
- local->wireless_data.spy_data = &local->spy_data;
- dev->wireless_data = &local->wireless_data;
-#endif /* WIRELESS_SPY */
-
-
- dev_dbg(&p_dev->dev, "ray_cs ray_attach calling ether_setup.)\n");
- netif_stop_queue(dev);
-
- timer_setup(&local->timer, NULL, 0);
-
- this_device = p_dev;
- ret = ray_config(p_dev);
- if (ret)
- goto err_free_dev;
-
- return 0;
-
-err_free_dev:
- free_netdev(dev);
- return ret;
-}
-
-static void ray_detach(struct pcmcia_device *link)
-{
- struct net_device *dev;
-
- dev_dbg(&link->dev, "ray_detach\n");
-
- this_device = NULL;
- dev = link->priv;
-
- ray_release(link);
-
- if (link->priv) {
- unregister_netdev(dev);
- free_netdev(dev);
- }
- dev_dbg(&link->dev, "ray_cs ray_detach ending\n");
-} /* ray_detach */
-
-#define MAX_TUPLE_SIZE 128
-static int ray_config(struct pcmcia_device *link)
-{
- int ret = 0;
- int i;
- struct net_device *dev = link->priv;
- ray_dev_t *local = netdev_priv(dev);
-
- dev_dbg(&link->dev, "ray_config\n");
-
- /* Determine card type and firmware version */
- printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n",
- link->prod_id[0] ? link->prod_id[0] : " ",
- link->prod_id[1] ? link->prod_id[1] : " ",
- link->prod_id[2] ? link->prod_id[2] : " ",
- link->prod_id[3] ? link->prod_id[3] : " ");
-
- /* Now allocate an interrupt line. Note that this does not
- actually assign a handler to the interrupt.
- */
- ret = pcmcia_request_irq(link, ray_interrupt);
- if (ret)
- goto failed;
- dev->irq = link->irq;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
-/*** Set up 32k window for shared memory (transmit and control) ************/
- link->resource[2]->flags |= WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT;
- link->resource[2]->start = 0;
- link->resource[2]->end = 0x8000;
- ret = pcmcia_request_window(link, link->resource[2], ray_mem_speed);
- if (ret)
- goto failed;
- ret = pcmcia_map_mem_page(link, link->resource[2], 0);
- if (ret)
- goto failed;
- local->sram = ioremap(link->resource[2]->start,
- resource_size(link->resource[2]));
- if (!local->sram)
- goto failed;
-
-/*** Set up 16k window for shared memory (receive buffer) ***************/
- link->resource[3]->flags |=
- WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT;
- link->resource[3]->start = 0;
- link->resource[3]->end = 0x4000;
- ret = pcmcia_request_window(link, link->resource[3], ray_mem_speed);
- if (ret)
- goto failed;
- ret = pcmcia_map_mem_page(link, link->resource[3], 0x8000);
- if (ret)
- goto failed;
- local->rmem = ioremap(link->resource[3]->start,
- resource_size(link->resource[3]));
- if (!local->rmem)
- goto failed;
-
-/*** Set up window for attribute memory ***********************************/
- link->resource[4]->flags |=
- WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM | WIN_ENABLE | WIN_USE_WAIT;
- link->resource[4]->start = 0;
- link->resource[4]->end = 0x1000;
- ret = pcmcia_request_window(link, link->resource[4], ray_mem_speed);
- if (ret)
- goto failed;
- ret = pcmcia_map_mem_page(link, link->resource[4], 0);
- if (ret)
- goto failed;
- local->amem = ioremap(link->resource[4]->start,
- resource_size(link->resource[4]));
- if (!local->amem)
- goto failed;
-
- dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
- dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
- dev_dbg(&link->dev, "ray_config amem=%p\n", local->amem);
- if (ray_init(dev) < 0) {
- ray_release(link);
- return -ENODEV;
- }
-
- SET_NETDEV_DEV(dev, &link->dev);
- i = register_netdev(dev);
- if (i != 0) {
- printk("ray_config register_netdev() failed\n");
- ray_release(link);
- return i;
- }
-
- printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n",
- dev->name, dev->irq, dev->dev_addr);
-
- return 0;
-
-failed:
- ray_release(link);
- return -ENODEV;
-} /* ray_config */
-
-static inline struct ccs __iomem *ccs_base(ray_dev_t *dev)
-{
- return dev->sram + CCS_BASE;
-}
-
-static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
-{
- /*
- * This looks nonsensical, since there is a separate
- * RCS_BASE. But the difference between a "struct rcs"
- * and a "struct ccs" ends up being in the _index_ off
- * the base, so the base pointer is the same for both
- * ccs/rcs.
- */
- return dev->sram + CCS_BASE;
-}
-
-/*===========================================================================*/
-static int ray_init(struct net_device *dev)
-{
- int i;
- struct ccs __iomem *pccs;
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link = local->finder;
- dev_dbg(&link->dev, "ray_init(0x%p)\n", dev);
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_init - device not present\n");
- return -1;
- }
-
- local->net_type = net_type;
- local->sta_type = TYPE_STA;
-
- /* Copy the startup results to local memory */
- memcpy_fromio(&local->startup_res, local->sram + ECF_TO_HOST_BASE,
- sizeof(struct startup_res_6));
-
- /* Check Power up test status and get mac address from card */
- if (local->startup_res.startup_word != 0x80) {
- printk(KERN_INFO "ray_init ERROR card status = %2x\n",
- local->startup_res.startup_word);
- local->card_status = CARD_INIT_ERROR;
- return -1;
- }
-
- local->fw_ver = local->startup_res.firmware_version[0];
- local->fw_bld = local->startup_res.firmware_version[1];
- local->fw_var = local->startup_res.firmware_version[2];
- dev_dbg(&link->dev, "ray_init firmware version %d.%d\n", local->fw_ver,
- local->fw_bld);
-
- local->tib_length = 0x20;
- if ((local->fw_ver == 5) && (local->fw_bld >= 30))
- local->tib_length = local->startup_res.tib_length;
- dev_dbg(&link->dev, "ray_init tib_length = 0x%02x\n", local->tib_length);
- /* Initialize CCS's to buffer free state */
- pccs = ccs_base(local);
- for (i = 0; i < NUMBER_OF_CCS; i++) {
- writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
- }
- init_startup_params(local);
-
- /* copy mac address to startup parameters */
- if (!parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
- memcpy(&local->sparm.b4.a_mac_addr,
- &local->startup_res.station_addr, ADDRLEN);
- }
-
- clear_interrupt(local); /* Clear any interrupt from the card */
- local->card_status = CARD_AWAITING_PARAM;
- dev_dbg(&link->dev, "ray_init ending\n");
- return 0;
-} /* ray_init */
-
-/*===========================================================================*/
-/* Download startup parameters to the card and command it to read them */
-static int dl_startup_params(struct net_device *dev)
-{
- int ccsindex;
- ray_dev_t *local = netdev_priv(dev);
- struct ccs __iomem *pccs;
- struct pcmcia_device *link = local->finder;
-
- dev_dbg(&link->dev, "dl_startup_params entered\n");
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs dl_startup_params - device not present\n");
- return -1;
- }
-
- /* Copy parameters to host to ECF area */
- if (local->fw_ver == 0x55)
- memcpy_toio(local->sram + HOST_TO_ECF_BASE, &local->sparm.b4,
- sizeof(struct b4_startup_params));
- else
- memcpy_toio(local->sram + HOST_TO_ECF_BASE, &local->sparm.b5,
- sizeof(struct b5_startup_params));
-
- /* Fill in the CCS fields for the ECF */
- if ((ccsindex = get_free_ccs(local)) < 0)
- return -1;
- local->dl_param_ccs = ccsindex;
- pccs = ccs_base(local) + ccsindex;
- writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd);
- dev_dbg(&link->dev, "dl_startup_params start ccsindex = %d\n",
- local->dl_param_ccs);
- /* Interrupt the firmware to process the command */
- if (interrupt_ecf(local, ccsindex)) {
- printk(KERN_INFO "ray dl_startup_params failed - "
- "ECF not ready for intr\n");
- local->card_status = CARD_DL_PARAM_ERROR;
- writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
- return -2;
- }
- local->card_status = CARD_DL_PARAM;
- /* Start kernel timer to wait for dl startup to complete. */
- local->timer.expires = jiffies + HZ / 2;
- local->timer.function = verify_dl_startup;
- add_timer(&local->timer);
- dev_dbg(&link->dev,
- "ray_cs dl_startup_params started timer for verify_dl_startup\n");
- return 0;
-} /* dl_startup_params */
-
-/*===========================================================================*/
-static void init_startup_params(ray_dev_t *local)
-{
- int i;
-
- if (country > JAPAN_TEST)
- country = USA;
- else if (country < USA)
- country = USA;
- /* structure for hop time and beacon period is defined here using
- * New 802.11D6.1 format. Card firmware is still using old format
- * until version 6.
- * Before After
- * a_hop_time ms byte a_hop_time ms byte
- * a_hop_time 2s byte a_hop_time ls byte
- * a_hop_time ls byte a_beacon_period ms byte
- * a_beacon_period a_beacon_period ls byte
- *
- * a_hop_time = uS a_hop_time = KuS
- * a_beacon_period = hops a_beacon_period = KuS
- *//* 64ms = 010000 */
- if (local->fw_ver == 0x55) {
- memcpy(&local->sparm.b4, b4_default_startup_parms,
- sizeof(struct b4_startup_params));
- /* Translate sane kus input values to old build 4/5 format */
- /* i = hop time in uS truncated to 3 bytes */
- i = (hop_dwell * 1024) & 0xffffff;
- local->sparm.b4.a_hop_time[0] = (i >> 16) & 0xff;
- local->sparm.b4.a_hop_time[1] = (i >> 8) & 0xff;
- local->sparm.b4.a_beacon_period[0] = 0;
- local->sparm.b4.a_beacon_period[1] =
- ((beacon_period / hop_dwell) - 1) & 0xff;
- local->sparm.b4.a_curr_country_code = country;
- local->sparm.b4.a_hop_pattern_length =
- hop_pattern_length[(int)country] - 1;
- if (bc) {
- local->sparm.b4.a_ack_timeout = 0x50;
- local->sparm.b4.a_sifs = 0x3f;
- }
- } else { /* Version 5 uses real kus values */
- memcpy((UCHAR *) &local->sparm.b5, b5_default_startup_parms,
- sizeof(struct b5_startup_params));
-
- local->sparm.b5.a_hop_time[0] = (hop_dwell >> 8) & 0xff;
- local->sparm.b5.a_hop_time[1] = hop_dwell & 0xff;
- local->sparm.b5.a_beacon_period[0] =
- (beacon_period >> 8) & 0xff;
- local->sparm.b5.a_beacon_period[1] = beacon_period & 0xff;
- if (psm)
- local->sparm.b5.a_power_mgt_state = 1;
- local->sparm.b5.a_curr_country_code = country;
- local->sparm.b5.a_hop_pattern_length =
- hop_pattern_length[(int)country];
- }
-
- local->sparm.b4.a_network_type = net_type & 0x01;
- local->sparm.b4.a_acting_as_ap_status = TYPE_STA;
-
- if (essid != NULL)
- strscpy(local->sparm.b4.a_current_ess_id, essid, ESSID_SIZE);
-} /* init_startup_params */
-
-/*===========================================================================*/
-static void verify_dl_startup(struct timer_list *t)
-{
- ray_dev_t *local = from_timer(local, t, timer);
- struct ccs __iomem *pccs = ccs_base(local) + local->dl_param_ccs;
- UCHAR status;
- struct pcmcia_device *link = local->finder;
-
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs verify_dl_startup - device not present\n");
- return;
- }
-#if 0
- {
- int i;
- printk(KERN_DEBUG
- "verify_dl_startup parameters sent via ccs %d:\n",
- local->dl_param_ccs);
- for (i = 0; i < sizeof(struct b5_startup_params); i++) {
- printk(" %2x",
- (unsigned int)readb(local->sram +
- HOST_TO_ECF_BASE + i));
- }
- printk("\n");
- }
-#endif
-
- status = readb(&pccs->buffer_status);
- if (status != CCS_BUFFER_FREE) {
- printk(KERN_INFO
- "Download startup params failed. Status = %d\n",
- status);
- local->card_status = CARD_DL_PARAM_ERROR;
- return;
- }
- if (local->sparm.b4.a_network_type == ADHOC)
- start_net(&local->timer);
- else
- join_net(&local->timer);
-} /* end verify_dl_startup */
-
-/*===========================================================================*/
-/* Command card to start a network */
-static void start_net(struct timer_list *t)
-{
- ray_dev_t *local = from_timer(local, t, timer);
- struct ccs __iomem *pccs;
- int ccsindex;
- struct pcmcia_device *link = local->finder;
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs start_net - device not present\n");
- return;
- }
- /* Fill in the CCS fields for the ECF */
- if ((ccsindex = get_free_ccs(local)) < 0)
- return;
- pccs = ccs_base(local) + ccsindex;
- writeb(CCS_START_NETWORK, &pccs->cmd);
- writeb(0, &pccs->var.start_network.update_param);
- /* Interrupt the firmware to process the command */
- if (interrupt_ecf(local, ccsindex)) {
- dev_dbg(&link->dev, "ray start net failed - card not ready for intr\n");
- writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
- return;
- }
- local->card_status = CARD_DOING_ACQ;
-} /* end start_net */
-
-/*===========================================================================*/
-/* Command card to join a network */
-static void join_net(struct timer_list *t)
-{
- ray_dev_t *local = from_timer(local, t, timer);
-
- struct ccs __iomem *pccs;
- int ccsindex;
- struct pcmcia_device *link = local->finder;
-
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs join_net - device not present\n");
- return;
- }
- /* Fill in the CCS fields for the ECF */
- if ((ccsindex = get_free_ccs(local)) < 0)
- return;
- pccs = ccs_base(local) + ccsindex;
- writeb(CCS_JOIN_NETWORK, &pccs->cmd);
- writeb(0, &pccs->var.join_network.update_param);
- writeb(0, &pccs->var.join_network.net_initiated);
- /* Interrupt the firmware to process the command */
- if (interrupt_ecf(local, ccsindex)) {
- dev_dbg(&link->dev, "ray join net failed - card not ready for intr\n");
- writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
- return;
- }
- local->card_status = CARD_DOING_ACQ;
-}
-
-
-static void ray_release(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
- ray_dev_t *local = netdev_priv(dev);
-
- dev_dbg(&link->dev, "ray_release\n");
-
- del_timer_sync(&local->timer);
-
- if (local->sram)
- iounmap(local->sram);
- if (local->rmem)
- iounmap(local->rmem);
- if (local->amem)
- iounmap(local->amem);
- pcmcia_disable_device(link);
-
- dev_dbg(&link->dev, "ray_release ending\n");
-}
-
-static int ray_suspend(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- if (link->open)
- netif_device_detach(dev);
-
- return 0;
-}
-
-static int ray_resume(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- if (link->open) {
- ray_reset(dev);
- netif_device_attach(dev);
- }
-
- return 0;
-}
-
-/*===========================================================================*/
-static int ray_dev_init(struct net_device *dev)
-{
-#ifdef RAY_IMMEDIATE_INIT
- int i;
-#endif /* RAY_IMMEDIATE_INIT */
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link = local->finder;
-
- dev_dbg(&link->dev, "ray_dev_init(dev=%p)\n", dev);
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_dev_init - device not present\n");
- return -1;
- }
-#ifdef RAY_IMMEDIATE_INIT
- /* Download startup parameters */
- if ((i = dl_startup_params(dev)) < 0) {
- printk(KERN_INFO "ray_dev_init dl_startup_params failed - "
- "returns 0x%x\n", i);
- return -1;
- }
-#else /* RAY_IMMEDIATE_INIT */
- /* Postpone the card init so that we can still configure the card,
- * for example using the Wireless Extensions. The init will happen
- * in ray_open() - Jean II */
- dev_dbg(&link->dev,
- "ray_dev_init: postponing card init to ray_open() ; Status = %d\n",
- local->card_status);
-#endif /* RAY_IMMEDIATE_INIT */
-
- /* copy mac and broadcast addresses to linux device */
- eth_hw_addr_set(dev, local->sparm.b4.a_mac_addr);
- eth_broadcast_addr(dev->broadcast);
-
- dev_dbg(&link->dev, "ray_dev_init ending\n");
- return 0;
-}
-
-/*===========================================================================*/
-static int ray_dev_config(struct net_device *dev, struct ifmap *map)
-{
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link = local->finder;
- /* Dummy routine to satisfy device structure */
- dev_dbg(&link->dev, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map);
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_dev_config - device not present\n");
- return -1;
- }
-
- return 0;
-}
-
-/*===========================================================================*/
-static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link = local->finder;
- short length = skb->len;
-
- if (!pcmcia_dev_present(link)) {
- dev_dbg(&link->dev, "ray_dev_start_xmit - device not present\n");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- dev_dbg(&link->dev, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev);
- if (local->authentication_state == NEED_TO_AUTH) {
- dev_dbg(&link->dev, "ray_cs Sending authentication request.\n");
- if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) {
- local->authentication_state = AUTHENTICATED;
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
- }
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- switch (ray_hw_xmit(skb->data, length, dev, DATA_TYPE)) {
- case XMIT_NO_CCS:
- case XMIT_NEED_AUTH:
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- case XMIT_NO_INTR:
- case XMIT_MSG_BAD:
- case XMIT_OK:
- default:
- dev_kfree_skb(skb);
- }
-
- return NETDEV_TX_OK;
-} /* ray_dev_start_xmit */
-
-/*===========================================================================*/
-static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
- UCHAR msg_type)
-{
- ray_dev_t *local = netdev_priv(dev);
- struct ccs __iomem *pccs;
- int ccsindex;
- int offset;
- struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */
- short int addr; /* Address of xmit buffer in card space */
-
- pr_debug("ray_hw_xmit(data=%p, len=%d, dev=%p)\n", data, len, dev);
- if (len + TX_HEADER_LENGTH > TX_BUF_SIZE) {
- printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n",
- len);
- return XMIT_MSG_BAD;
- }
- switch (ccsindex = get_free_tx_ccs(local)) {
- case ECCSBUSY:
- pr_debug("ray_hw_xmit tx_ccs table busy\n");
- fallthrough;
- case ECCSFULL:
- pr_debug("ray_hw_xmit No free tx ccs\n");
- fallthrough;
- case ECARDGONE:
- netif_stop_queue(dev);
- return XMIT_NO_CCS;
- default:
- break;
- }
- addr = TX_BUF_BASE + (ccsindex << 11);
-
- if (msg_type == DATA_TYPE) {
- local->stats.tx_bytes += len;
- local->stats.tx_packets++;
- }
-
- ptx = local->sram + addr;
-
- ray_build_header(local, ptx, msg_type, data);
- if (translate) {
- offset = translate_frame(local, ptx, data, len);
- } else { /* Encapsulate frame */
- /* TBD TIB length will move address of ptx->var */
- memcpy_toio(&ptx->var, data, len);
- offset = 0;
- }
-
- /* fill in the CCS */
- pccs = ccs_base(local) + ccsindex;
- len += TX_HEADER_LENGTH + offset;
- writeb(CCS_TX_REQUEST, &pccs->cmd);
- writeb(addr >> 8, &pccs->var.tx_request.tx_data_ptr[0]);
- writeb(local->tib_length, &pccs->var.tx_request.tx_data_ptr[1]);
- writeb(len >> 8, &pccs->var.tx_request.tx_data_length[0]);
- writeb(len & 0xff, &pccs->var.tx_request.tx_data_length[1]);
-/* TBD still need psm_cam? */
- writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode);
- writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate);
- writeb(0, &pccs->var.tx_request.antenna);
- pr_debug("ray_hw_xmit default_tx_rate = 0x%x\n",
- local->net_default_tx_rate);
-
- /* Interrupt the firmware to process the command */
- if (interrupt_ecf(local, ccsindex)) {
- pr_debug("ray_hw_xmit failed - ECF not ready for intr\n");
-/* TBD very inefficient to copy packet to buffer, and then not
- send it, but the alternative is to queue the messages and that
- won't be done for a while. Maybe set tbusy until a CCS is free?
-*/
- writeb(CCS_BUFFER_FREE, &pccs->buffer_status);
- return XMIT_NO_INTR;
- }
- return XMIT_OK;
-} /* end ray_hw_xmit */
-
-/*===========================================================================*/
-static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
- unsigned char *data, int len)
-{
- __be16 proto = ((struct ethhdr *)data)->h_proto;
- if (ntohs(proto) >= ETH_P_802_3_MIN) { /* DIX II ethernet frame */
- pr_debug("ray_cs translate_frame DIX II\n");
- /* Copy LLC header to card buffer */
- memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
- memcpy_toio(((void __iomem *)&ptx->var) + sizeof(eth2_llc),
- (UCHAR *) &proto, 2);
- if (proto == htons(ETH_P_AARP) || proto == htons(ETH_P_IPX)) {
- /* This is the selective translation table, only 2 entries */
- writeb(0xf8,
- &((struct snaphdr_t __iomem *)ptx->var)->org[2]);
- }
- /* Copy body of ethernet packet without ethernet header */
- memcpy_toio((void __iomem *)&ptx->var +
- sizeof(struct snaphdr_t), data + ETH_HLEN,
- len - ETH_HLEN);
- return (int)sizeof(struct snaphdr_t) - ETH_HLEN;
- } else { /* already 802 type, and proto is length */
- pr_debug("ray_cs translate_frame 802\n");
- if (proto == htons(0xffff)) { /* evil netware IPX 802.3 without LLC */
- pr_debug("ray_cs translate_frame evil IPX\n");
- memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN);
- return 0 - ETH_HLEN;
- }
- memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN);
- return 0 - ETH_HLEN;
- }
- /* TBD do other frame types */
-} /* end translate_frame */
-
-/*===========================================================================*/
-static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx,
- UCHAR msg_type, unsigned char *data)
-{
- writeb(PROTOCOL_VER | msg_type, &ptx->mac.frame_ctl_1);
-/*** IEEE 802.11 Address field assignments *************
- TODS FROMDS addr_1 addr_2 addr_3 addr_4
-Adhoc 0 0 dest src (terminal) BSSID N/A
-AP to Terminal 0 1 dest AP(BSSID) source N/A
-Terminal to AP 1 0 AP(BSSID) src (terminal) dest N/A
-AP to AP 1 1 dest AP src AP dest source
-*******************************************************/
- if (local->net_type == ADHOC) {
- writeb(0, &ptx->mac.frame_ctl_2);
- memcpy_toio(ptx->mac.addr_1, ((struct ethhdr *)data)->h_dest,
- ADDRLEN);
- memcpy_toio(ptx->mac.addr_2, ((struct ethhdr *)data)->h_source,
- ADDRLEN);
- memcpy_toio(ptx->mac.addr_3, local->bss_id, ADDRLEN);
- } else { /* infrastructure */
-
- if (local->sparm.b4.a_acting_as_ap_status) {
- writeb(FC2_FROM_DS, &ptx->mac.frame_ctl_2);
- memcpy_toio(ptx->mac.addr_1,
- ((struct ethhdr *)data)->h_dest, ADDRLEN);
- memcpy_toio(ptx->mac.addr_2, local->bss_id, 6);
- memcpy_toio(ptx->mac.addr_3,
- ((struct ethhdr *)data)->h_source, ADDRLEN);
- } else { /* Terminal */
-
- writeb(FC2_TO_DS, &ptx->mac.frame_ctl_2);
- memcpy_toio(ptx->mac.addr_1, local->bss_id, ADDRLEN);
- memcpy_toio(ptx->mac.addr_2,
- ((struct ethhdr *)data)->h_source, ADDRLEN);
- memcpy_toio(ptx->mac.addr_3,
- ((struct ethhdr *)data)->h_dest, ADDRLEN);
- }
- }
-} /* end encapsulate_frame */
-
-/*====================================================================*/
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get protocol name
- */
-static int ray_get_name(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- strcpy(wrqu->name, "IEEE 802.11-FH");
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set frequency
- */
-static int ray_set_freq(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
- int err = -EINPROGRESS; /* Call commit handler */
-
- /* Reject if card is already initialised */
- if (local->card_status != CARD_AWAITING_PARAM)
- return -EBUSY;
-
- /* Setting by channel number */
- if ((wrqu->freq.m > USA_HOP_MOD) || (wrqu->freq.e > 0))
- err = -EOPNOTSUPP;
- else
- local->sparm.b5.a_hop_pattern = wrqu->freq.m;
-
- return err;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get frequency
- */
-static int ray_get_freq(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
-
- wrqu->freq.m = local->sparm.b5.a_hop_pattern;
- wrqu->freq.e = 0;
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set ESSID
- */
-static int ray_set_essid(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
-
- /* Reject if card is already initialised */
- if (local->card_status != CARD_AWAITING_PARAM)
- return -EBUSY;
-
- /* Check if we asked for `any' */
- if (wrqu->essid.flags == 0)
- /* Corey : can you do that ? */
- return -EOPNOTSUPP;
-
- /* Check the size of the string */
- if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
- return -E2BIG;
-
- /* Set the ESSID in the card */
- memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
- memcpy(local->sparm.b5.a_current_ess_id, extra, wrqu->essid.length);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get ESSID
- */
-static int ray_get_essid(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
- UCHAR tmp[IW_ESSID_MAX_SIZE + 1];
-
- /* Get the essid that was set */
- memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
- memcpy(tmp, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
- tmp[IW_ESSID_MAX_SIZE] = '\0';
-
- /* Push it out ! */
- wrqu->essid.length = strlen(tmp);
- wrqu->essid.flags = 1; /* active */
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get AP address
- */
-static int ray_get_wap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
-
- memcpy(wrqu->ap_addr.sa_data, local->bss_id, ETH_ALEN);
- wrqu->ap_addr.sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Bit-Rate
- */
-static int ray_set_rate(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
-
- /* Reject if card is already initialised */
- if (local->card_status != CARD_AWAITING_PARAM)
- return -EBUSY;
-
- /* Check if rate is in range */
- if ((wrqu->bitrate.value != 1000000) && (wrqu->bitrate.value != 2000000))
- return -EINVAL;
-
- /* Hack for 1.5 Mb/s instead of 2 Mb/s */
- if ((local->fw_ver == 0x55) && /* Please check */
- (wrqu->bitrate.value == 2000000))
- local->net_default_tx_rate = 3;
- else
- local->net_default_tx_rate = wrqu->bitrate.value / 500000;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Bit-Rate
- */
-static int ray_get_rate(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
-
- if (local->net_default_tx_rate == 3)
- wrqu->bitrate.value = 2000000; /* Hum... */
- else
- wrqu->bitrate.value = local->net_default_tx_rate * 500000;
- wrqu->bitrate.fixed = 0; /* We are in auto mode */
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set RTS threshold
- */
-static int ray_set_rts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
- int rthr = wrqu->rts.value;
-
- /* Reject if card is already initialised */
- if (local->card_status != CARD_AWAITING_PARAM)
- return -EBUSY;
-
- /* if(wrq->u.rts.fixed == 0) we should complain */
- if (wrqu->rts.disabled)
- rthr = 32767;
- else {
- if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
- return -EINVAL;
- }
- local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF;
- local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get RTS threshold
- */
-static int ray_get_rts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
-
- wrqu->rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
- + local->sparm.b5.a_rts_threshold[1];
- wrqu->rts.disabled = (wrqu->rts.value == 32767);
- wrqu->rts.fixed = 1;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Fragmentation threshold
- */
-static int ray_set_frag(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
- int fthr = wrqu->frag.value;
-
- /* Reject if card is already initialised */
- if (local->card_status != CARD_AWAITING_PARAM)
- return -EBUSY;
-
- /* if(wrq->u.frag.fixed == 0) should complain */
- if (wrqu->frag.disabled)
- fthr = 32767;
- else {
- if ((fthr < 256) || (fthr > 2347)) /* To check out ! */
- return -EINVAL;
- }
- local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF;
- local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF;
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Fragmentation threshold
- */
-static int ray_get_frag(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
-
- wrqu->frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
- + local->sparm.b5.a_frag_threshold[1];
- wrqu->frag.disabled = (wrqu->frag.value == 32767);
- wrqu->frag.fixed = 1;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Mode of Operation
- */
-static int ray_set_mode(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
- int err = -EINPROGRESS; /* Call commit handler */
- char card_mode = 1;
-
- /* Reject if card is already initialised */
- if (local->card_status != CARD_AWAITING_PARAM)
- return -EBUSY;
-
- switch (wrqu->mode) {
- case IW_MODE_ADHOC:
- card_mode = 0;
- fallthrough;
- case IW_MODE_INFRA:
- local->sparm.b5.a_network_type = card_mode;
- break;
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Mode of Operation
- */
-static int ray_get_mode(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- ray_dev_t *local = netdev_priv(dev);
-
- if (local->sparm.b5.a_network_type)
- wrqu->mode = IW_MODE_INFRA;
- else
- wrqu->mode = IW_MODE_ADHOC;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get range info
- */
-static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
-
- memset(range, 0, sizeof(struct iw_range));
-
- /* Set the length (very important for backward compatibility) */
- wrqu->data.length = sizeof(struct iw_range);
-
- /* Set the Wireless Extension versions */
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 9;
-
- /* Set information in the range struct */
- range->throughput = 1.1 * 1000 * 1000; /* Put the right number here */
- range->num_channels = hop_pattern_length[(int)country];
- range->num_frequency = 0;
- range->max_qual.qual = 0;
- range->max_qual.level = 255; /* What's the correct value ? */
- range->max_qual.noise = 255; /* Idem */
- range->num_bitrates = 2;
- range->bitrate[0] = 1000000; /* 1 Mb/s */
- range->bitrate[1] = 2000000; /* 2 Mb/s */
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : set framing mode
- */
-static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- translate = !!*(extra); /* Set framing mode */
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : get framing mode
- */
-static int ray_get_framing(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- *(extra) = translate;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Private Handler : get country
- */
-static int ray_get_country(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- *(extra) = country;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Commit handler : called after a bunch of SET operations
- */
-static int ray_commit(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Stats handler : return Wireless Stats
- */
-static iw_stats *ray_get_wireless_stats(struct net_device *dev)
-{
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link = local->finder;
- struct status __iomem *p = local->sram + STATUS_BASE;
-
- local->wstats.status = local->card_status;
-#ifdef WIRELESS_SPY
- if ((local->spy_data.spy_number > 0)
- && (local->sparm.b5.a_network_type == 0)) {
- /* Get it from the first node in spy list */
- local->wstats.qual.qual = local->spy_data.spy_stat[0].qual;
- local->wstats.qual.level = local->spy_data.spy_stat[0].level;
- local->wstats.qual.noise = local->spy_data.spy_stat[0].noise;
- local->wstats.qual.updated =
- local->spy_data.spy_stat[0].updated;
- }
-#endif /* WIRELESS_SPY */
-
- if (pcmcia_dev_present(link)) {
- local->wstats.qual.noise = readb(&p->rxnoise);
- local->wstats.qual.updated |= 4;
- }
-
- return &local->wstats;
-} /* end ray_get_wireless_stats */
-
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const iw_handler ray_handler[] = {
- IW_HANDLER(SIOCSIWCOMMIT, ray_commit),
- IW_HANDLER(SIOCGIWNAME, ray_get_name),
- IW_HANDLER(SIOCSIWFREQ, ray_set_freq),
- IW_HANDLER(SIOCGIWFREQ, ray_get_freq),
- IW_HANDLER(SIOCSIWMODE, ray_set_mode),
- IW_HANDLER(SIOCGIWMODE, ray_get_mode),
- IW_HANDLER(SIOCGIWRANGE, ray_get_range),
-#ifdef WIRELESS_SPY
- IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
- IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
- IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
- IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
-#endif /* WIRELESS_SPY */
- IW_HANDLER(SIOCGIWAP, ray_get_wap),
- IW_HANDLER(SIOCSIWESSID, ray_set_essid),
- IW_HANDLER(SIOCGIWESSID, ray_get_essid),
- IW_HANDLER(SIOCSIWRATE, ray_set_rate),
- IW_HANDLER(SIOCGIWRATE, ray_get_rate),
- IW_HANDLER(SIOCSIWRTS, ray_set_rts),
- IW_HANDLER(SIOCGIWRTS, ray_get_rts),
- IW_HANDLER(SIOCSIWFRAG, ray_set_frag),
- IW_HANDLER(SIOCGIWFRAG, ray_get_frag),
-};
-
-#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
-#define SIOCGIPFRAMING SIOCIWFIRSTPRIV + 1 /* Get framing mode */
-#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
-
-static const iw_handler ray_private_handler[] = {
- [0] = ray_set_framing,
- [1] = ray_get_framing,
- [3] = ray_get_country,
-};
-
-static const struct iw_priv_args ray_private_args[] = {
-/* cmd, set_args, get_args, name */
- {SIOCSIPFRAMING, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0,
- "set_framing"},
- {SIOCGIPFRAMING, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
- "get_framing"},
- {SIOCGIPCOUNTRY, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
- "get_country"},
-};
-
-static const struct iw_handler_def ray_handler_def = {
- .num_standard = ARRAY_SIZE(ray_handler),
- .num_private = ARRAY_SIZE(ray_private_handler),
- .num_private_args = ARRAY_SIZE(ray_private_args),
- .standard = ray_handler,
- .private = ray_private_handler,
- .private_args = ray_private_args,
- .get_wireless_stats = ray_get_wireless_stats,
-};
-
-/*===========================================================================*/
-static int ray_open(struct net_device *dev)
-{
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link;
- link = local->finder;
-
- dev_dbg(&link->dev, "ray_open('%s')\n", dev->name);
-
- if (link->open == 0)
- local->num_multi = 0;
- link->open++;
-
- /* If the card is not started, time to start it ! - Jean II */
- if (local->card_status == CARD_AWAITING_PARAM) {
- int i;
-
- dev_dbg(&link->dev, "ray_open: doing init now !\n");
-
- /* Download startup parameters */
- if ((i = dl_startup_params(dev)) < 0) {
- printk(KERN_INFO
- "ray_dev_init dl_startup_params failed - "
- "returns 0x%x\n", i);
- return -1;
- }
- }
-
- if (sniffer)
- netif_stop_queue(dev);
- else
- netif_start_queue(dev);
-
- dev_dbg(&link->dev, "ray_open ending\n");
- return 0;
-} /* end ray_open */
-
-/*===========================================================================*/
-static int ray_dev_close(struct net_device *dev)
-{
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link;
- link = local->finder;
-
- dev_dbg(&link->dev, "ray_dev_close('%s')\n", dev->name);
-
- link->open--;
- netif_stop_queue(dev);
-
- /* In here, we should stop the hardware (stop card from beeing active)
- * and set local->card_status to CARD_AWAITING_PARAM, so that while the
- * card is closed we can chage its configuration.
- * Probably also need a COR reset to get sane state - Jean II */
-
- return 0;
-} /* end ray_dev_close */
-
-/*===========================================================================*/
-static void ray_reset(struct net_device *dev)
-{
- pr_debug("ray_reset entered\n");
-}
-
-/*===========================================================================*/
-/* Cause a firmware interrupt if it is ready for one */
-/* Return nonzero if not ready */
-static int interrupt_ecf(ray_dev_t *local, int ccs)
-{
- int i = 50;
- struct pcmcia_device *link = local->finder;
-
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs interrupt_ecf - device not present\n");
- return -1;
- }
- dev_dbg(&link->dev, "interrupt_ecf(local=%p, ccs = 0x%x\n", local, ccs);
-
- while (i &&
- (readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) &
- ECF_INTR_SET))
- i--;
- if (i == 0) {
- dev_dbg(&link->dev, "ray_cs interrupt_ecf card not ready for interrupt\n");
- return -1;
- }
- /* Fill the mailbox, then kick the card */
- writeb(ccs, local->sram + SCB_BASE);
- writeb(ECF_INTR_SET, local->amem + CIS_OFFSET + ECF_INTR_OFFSET);
- return 0;
-} /* interrupt_ecf */
-
-/*===========================================================================*/
-/* Get next free transmit CCS */
-/* Return - index of current tx ccs */
-static int get_free_tx_ccs(ray_dev_t *local)
-{
- int i;
- struct ccs __iomem *pccs = ccs_base(local);
- struct pcmcia_device *link = local->finder;
-
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs get_free_tx_ccs - device not present\n");
- return ECARDGONE;
- }
-
- if (test_and_set_bit(0, &local->tx_ccs_lock)) {
- dev_dbg(&link->dev, "ray_cs tx_ccs_lock busy\n");
- return ECCSBUSY;
- }
-
- for (i = 0; i < NUMBER_OF_TX_CCS; i++) {
- if (readb(&(pccs + i)->buffer_status) == CCS_BUFFER_FREE) {
- writeb(CCS_BUFFER_BUSY, &(pccs + i)->buffer_status);
- writeb(CCS_END_LIST, &(pccs + i)->link);
- local->tx_ccs_lock = 0;
- return i;
- }
- }
- local->tx_ccs_lock = 0;
- dev_dbg(&link->dev, "ray_cs ERROR no free tx CCS for raylink card\n");
- return ECCSFULL;
-} /* get_free_tx_ccs */
-
-/*===========================================================================*/
-/* Get next free CCS */
-/* Return - index of current ccs */
-static int get_free_ccs(ray_dev_t *local)
-{
- int i;
- struct ccs __iomem *pccs = ccs_base(local);
- struct pcmcia_device *link = local->finder;
-
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs get_free_ccs - device not present\n");
- return ECARDGONE;
- }
- if (test_and_set_bit(0, &local->ccs_lock)) {
- dev_dbg(&link->dev, "ray_cs ccs_lock busy\n");
- return ECCSBUSY;
- }
-
- for (i = NUMBER_OF_TX_CCS; i < NUMBER_OF_CCS; i++) {
- if (readb(&(pccs + i)->buffer_status) == CCS_BUFFER_FREE) {
- writeb(CCS_BUFFER_BUSY, &(pccs + i)->buffer_status);
- writeb(CCS_END_LIST, &(pccs + i)->link);
- local->ccs_lock = 0;
- return i;
- }
- }
- local->ccs_lock = 0;
- dev_dbg(&link->dev, "ray_cs ERROR no free CCS for raylink card\n");
- return ECCSFULL;
-} /* get_free_ccs */
-
-/*===========================================================================*/
-static void authenticate_timeout(struct timer_list *t)
-{
- ray_dev_t *local = from_timer(local, t, timer);
- del_timer(&local->timer);
- printk(KERN_INFO "ray_cs Authentication with access point failed"
- " - timeout\n");
- join_net(&local->timer);
-}
-
-/*===========================================================================*/
-static int parse_addr(char *in_str, UCHAR *out)
-{
- int i, k;
- int len;
-
- if (in_str == NULL)
- return 0;
- len = strnlen(in_str, ADDRLEN * 2 + 1) - 1;
- if (len < 1)
- return 0;
- memset(out, 0, ADDRLEN);
-
- i = 5;
-
- while (len > 0) {
- if ((k = hex_to_bin(in_str[len--])) != -1)
- out[i] = k;
- else
- return 0;
-
- if (len == 0)
- break;
- if ((k = hex_to_bin(in_str[len--])) != -1)
- out[i] += k << 4;
- else
- return 0;
- if (!i--)
- break;
- }
- return 1;
-}
-
-/*===========================================================================*/
-static struct net_device_stats *ray_get_stats(struct net_device *dev)
-{
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link = local->finder;
- struct status __iomem *p = local->sram + STATUS_BASE;
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs net_device_stats - device not present\n");
- return &local->stats;
- }
- if (readb(&p->mrx_overflow_for_host)) {
- local->stats.rx_over_errors += swab16(readw(&p->mrx_overflow));
- writeb(0, &p->mrx_overflow);
- writeb(0, &p->mrx_overflow_for_host);
- }
- if (readb(&p->mrx_checksum_error_for_host)) {
- local->stats.rx_crc_errors +=
- swab16(readw(&p->mrx_checksum_error));
- writeb(0, &p->mrx_checksum_error);
- writeb(0, &p->mrx_checksum_error_for_host);
- }
- if (readb(&p->rx_hec_error_for_host)) {
- local->stats.rx_frame_errors += swab16(readw(&p->rx_hec_error));
- writeb(0, &p->rx_hec_error);
- writeb(0, &p->rx_hec_error_for_host);
- }
- return &local->stats;
-}
-
-/*===========================================================================*/
-static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
- int len)
-{
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link = local->finder;
- int ccsindex;
- int i;
- struct ccs __iomem *pccs;
-
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_update_parm - device not present\n");
- return;
- }
-
- if ((ccsindex = get_free_ccs(local)) < 0) {
- dev_dbg(&link->dev, "ray_update_parm - No free ccs\n");
- return;
- }
- pccs = ccs_base(local) + ccsindex;
- writeb(CCS_UPDATE_PARAMS, &pccs->cmd);
- writeb(objid, &pccs->var.update_param.object_id);
- writeb(1, &pccs->var.update_param.number_objects);
- writeb(0, &pccs->var.update_param.failure_cause);
- for (i = 0; i < len; i++) {
- writeb(value[i], local->sram + HOST_TO_ECF_BASE);
- }
- /* Interrupt the firmware to process the command */
- if (interrupt_ecf(local, ccsindex)) {
- dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n");
- writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
- }
-}
-
-/*===========================================================================*/
-static void ray_update_multi_list(struct net_device *dev, int all)
-{
- int ccsindex;
- struct ccs __iomem *pccs;
- ray_dev_t *local = netdev_priv(dev);
- struct pcmcia_device *link = local->finder;
- void __iomem *p = local->sram + HOST_TO_ECF_BASE;
-
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_update_multi_list - device not present\n");
- return;
- } else
- dev_dbg(&link->dev, "ray_update_multi_list(%p)\n", dev);
- if ((ccsindex = get_free_ccs(local)) < 0) {
- dev_dbg(&link->dev, "ray_update_multi - No free ccs\n");
- return;
- }
- pccs = ccs_base(local) + ccsindex;
- writeb(CCS_UPDATE_MULTICAST_LIST, &pccs->cmd);
-
- if (all) {
- writeb(0xff, &pccs->var);
- local->num_multi = 0xff;
- } else {
- struct netdev_hw_addr *ha;
- int i = 0;
-
- /* Copy the kernel's list of MC addresses to card */
- netdev_for_each_mc_addr(ha, dev) {
- memcpy_toio(p, ha->addr, ETH_ALEN);
- dev_dbg(&link->dev, "ray_update_multi add addr %pm\n",
- ha->addr);
- p += ETH_ALEN;
- i++;
- }
- if (i > 256 / ADDRLEN)
- i = 256 / ADDRLEN;
- writeb((UCHAR) i, &pccs->var);
- dev_dbg(&link->dev, "ray_cs update_multi %d addresses in list\n", i);
- /* Interrupt the firmware to process the command */
- local->num_multi = i;
- }
- if (interrupt_ecf(local, ccsindex)) {
- dev_dbg(&link->dev,
- "ray_cs update_multi failed - ECF not ready for intr\n");
- writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
- }
-} /* end ray_update_multi_list */
-
-/*===========================================================================*/
-static void set_multicast_list(struct net_device *dev)
-{
- ray_dev_t *local = netdev_priv(dev);
- UCHAR promisc;
-
- pr_debug("ray_cs set_multicast_list(%p)\n", dev);
-
- if (dev->flags & IFF_PROMISC) {
- if (local->sparm.b5.a_promiscuous_mode == 0) {
- pr_debug("ray_cs set_multicast_list promisc on\n");
- local->sparm.b5.a_promiscuous_mode = 1;
- promisc = 1;
- ray_update_parm(dev, OBJID_promiscuous_mode,
- &promisc, sizeof(promisc));
- }
- } else {
- if (local->sparm.b5.a_promiscuous_mode == 1) {
- pr_debug("ray_cs set_multicast_list promisc off\n");
- local->sparm.b5.a_promiscuous_mode = 0;
- promisc = 0;
- ray_update_parm(dev, OBJID_promiscuous_mode,
- &promisc, sizeof(promisc));
- }
- }
-
- if (dev->flags & IFF_ALLMULTI)
- ray_update_multi_list(dev, 1);
- else {
- if (local->num_multi != netdev_mc_count(dev))
- ray_update_multi_list(dev, 0);
- }
-} /* end set_multicast_list */
-
-/*=============================================================================
- * All routines below here are run at interrupt time.
-=============================================================================*/
-static irqreturn_t ray_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct pcmcia_device *link;
- ray_dev_t *local;
- struct ccs __iomem *pccs;
- struct rcs __iomem *prcs;
- UCHAR rcsindex;
- UCHAR tmp;
- UCHAR cmd;
- UCHAR status;
- UCHAR memtmp[ESSID_SIZE + 1];
-
-
- if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */
- return IRQ_NONE;
-
- pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
-
- local = netdev_priv(dev);
- link = local->finder;
- if (!pcmcia_dev_present(link)) {
- pr_debug(
- "ray_cs interrupt from device not present or suspended.\n");
- return IRQ_NONE;
- }
- rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index);
-
- if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
- dev_dbg(&link->dev, "ray_cs interrupt bad rcsindex = 0x%x\n", rcsindex);
- clear_interrupt(local);
- return IRQ_HANDLED;
- }
- if (rcsindex < NUMBER_OF_CCS) { /* If it's a returned CCS */
- pccs = ccs_base(local) + rcsindex;
- cmd = readb(&pccs->cmd);
- status = readb(&pccs->buffer_status);
- switch (cmd) {
- case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */
- del_timer(&local->timer);
- if (status == CCS_COMMAND_COMPLETE) {
- dev_dbg(&link->dev,
- "ray_cs interrupt download_startup_parameters OK\n");
- } else {
- dev_dbg(&link->dev,
- "ray_cs interrupt download_startup_parameters fail\n");
- }
- break;
- case CCS_UPDATE_PARAMS:
- dev_dbg(&link->dev, "ray_cs interrupt update params done\n");
- if (status != CCS_COMMAND_COMPLETE) {
- tmp =
- readb(&pccs->var.update_param.
- failure_cause);
- dev_dbg(&link->dev,
- "ray_cs interrupt update params failed - reason %d\n",
- tmp);
- }
- break;
- case CCS_REPORT_PARAMS:
- dev_dbg(&link->dev, "ray_cs interrupt report params done\n");
- break;
- case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */
- dev_dbg(&link->dev,
- "ray_cs interrupt CCS Update Multicast List done\n");
- break;
- case CCS_UPDATE_POWER_SAVINGS_MODE:
- dev_dbg(&link->dev,
- "ray_cs interrupt update power save mode done\n");
- break;
- case CCS_START_NETWORK:
- case CCS_JOIN_NETWORK:
- memcpy(memtmp, local->sparm.b4.a_current_ess_id,
- ESSID_SIZE);
- memtmp[ESSID_SIZE] = '\0';
-
- if (status == CCS_COMMAND_COMPLETE) {
- if (readb
- (&pccs->var.start_network.net_initiated) ==
- 1) {
- dev_dbg(&link->dev,
- "ray_cs interrupt network \"%s\" started\n",
- memtmp);
- } else {
- dev_dbg(&link->dev,
- "ray_cs interrupt network \"%s\" joined\n",
- memtmp);
- }
- memcpy_fromio(&local->bss_id,
- pccs->var.start_network.bssid,
- ADDRLEN);
-
- if (local->fw_ver == 0x55)
- local->net_default_tx_rate = 3;
- else
- local->net_default_tx_rate =
- readb(&pccs->var.start_network.
- net_default_tx_rate);
- local->encryption =
- readb(&pccs->var.start_network.encryption);
- if (!sniffer && (local->net_type == INFRA)
- && !(local->sparm.b4.a_acting_as_ap_status)) {
- authenticate(local);
- }
- local->card_status = CARD_ACQ_COMPLETE;
- } else {
- local->card_status = CARD_ACQ_FAILED;
-
- del_timer(&local->timer);
- local->timer.expires = jiffies + HZ * 5;
- if (status == CCS_START_NETWORK) {
- dev_dbg(&link->dev,
- "ray_cs interrupt network \"%s\" start failed\n",
- memtmp);
- local->timer.function = start_net;
- } else {
- dev_dbg(&link->dev,
- "ray_cs interrupt network \"%s\" join failed\n",
- memtmp);
- local->timer.function = join_net;
- }
- add_timer(&local->timer);
- }
- break;
- case CCS_START_ASSOCIATION:
- if (status == CCS_COMMAND_COMPLETE) {
- local->card_status = CARD_ASSOC_COMPLETE;
- dev_dbg(&link->dev, "ray_cs association successful\n");
- } else {
- dev_dbg(&link->dev, "ray_cs association failed,\n");
- local->card_status = CARD_ASSOC_FAILED;
- join_net(&local->timer);
- }
- break;
- case CCS_TX_REQUEST:
- if (status == CCS_COMMAND_COMPLETE) {
- dev_dbg(&link->dev,
- "ray_cs interrupt tx request complete\n");
- } else {
- dev_dbg(&link->dev,
- "ray_cs interrupt tx request failed\n");
- }
- if (!sniffer)
- netif_start_queue(dev);
- netif_wake_queue(dev);
- break;
- case CCS_TEST_MEMORY:
- dev_dbg(&link->dev, "ray_cs interrupt mem test done\n");
- break;
- case CCS_SHUTDOWN:
- dev_dbg(&link->dev,
- "ray_cs interrupt Unexpected CCS returned - Shutdown\n");
- break;
- case CCS_DUMP_MEMORY:
- dev_dbg(&link->dev, "ray_cs interrupt dump memory done\n");
- break;
- case CCS_START_TIMER:
- dev_dbg(&link->dev,
- "ray_cs interrupt DING - raylink timer expired\n");
- break;
- default:
- dev_dbg(&link->dev,
- "ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n",
- rcsindex, cmd);
- }
- writeb(CCS_BUFFER_FREE, &pccs->buffer_status);
- } else { /* It's an RCS */
-
- prcs = rcs_base(local) + rcsindex;
-
- switch (readb(&prcs->interrupt_id)) {
- case PROCESS_RX_PACKET:
- ray_rx(dev, local, prcs);
- break;
- case REJOIN_NET_COMPLETE:
- dev_dbg(&link->dev, "ray_cs interrupt rejoin net complete\n");
- local->card_status = CARD_ACQ_COMPLETE;
- /* do we need to clear tx buffers CCS's? */
- if (local->sparm.b4.a_network_type == ADHOC) {
- if (!sniffer)
- netif_start_queue(dev);
- } else {
- memcpy_fromio(&local->bss_id,
- prcs->var.rejoin_net_complete.
- bssid, ADDRLEN);
- dev_dbg(&link->dev, "ray_cs new BSSID = %pm\n",
- local->bss_id);
- if (!sniffer)
- authenticate(local);
- }
- break;
- case ROAMING_INITIATED:
- dev_dbg(&link->dev, "ray_cs interrupt roaming initiated\n");
- netif_stop_queue(dev);
- local->card_status = CARD_DOING_ACQ;
- break;
- case JAPAN_CALL_SIGN_RXD:
- dev_dbg(&link->dev, "ray_cs interrupt japan call sign rx\n");
- break;
- default:
- dev_dbg(&link->dev,
- "ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n",
- rcsindex,
- (unsigned int)readb(&prcs->interrupt_id));
- break;
- }
- writeb(CCS_BUFFER_FREE, &prcs->buffer_status);
- }
- clear_interrupt(local);
- return IRQ_HANDLED;
-} /* ray_interrupt */
-
-/*===========================================================================*/
-static void ray_rx(struct net_device *dev, ray_dev_t *local,
- struct rcs __iomem *prcs)
-{
- int rx_len;
- unsigned int pkt_addr;
- void __iomem *pmsg;
- pr_debug("ray_rx process rx packet\n");
-
- /* Calculate address of packet within Rx buffer */
- pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8)
- + readb(&prcs->var.rx_packet.rx_data_ptr[1])) & RX_BUFF_END;
- /* Length of first packet fragment */
- rx_len = (readb(&prcs->var.rx_packet.rx_data_length[0]) << 8)
- + readb(&prcs->var.rx_packet.rx_data_length[1]);
-
- local->last_rsl = readb(&prcs->var.rx_packet.rx_sig_lev);
- pmsg = local->rmem + pkt_addr;
- switch (readb(pmsg)) {
- case DATA_TYPE:
- pr_debug("ray_rx data type\n");
- rx_data(dev, prcs, pkt_addr, rx_len);
- break;
- case AUTHENTIC_TYPE:
- pr_debug("ray_rx authentic type\n");
- if (sniffer)
- rx_data(dev, prcs, pkt_addr, rx_len);
- else
- rx_authenticate(local, prcs, pkt_addr, rx_len);
- break;
- case DEAUTHENTIC_TYPE:
- pr_debug("ray_rx deauth type\n");
- if (sniffer)
- rx_data(dev, prcs, pkt_addr, rx_len);
- else
- rx_deauthenticate(local, prcs, pkt_addr, rx_len);
- break;
- case NULL_MSG_TYPE:
- pr_debug("ray_cs rx NULL msg\n");
- break;
- case BEACON_TYPE:
- pr_debug("ray_rx beacon type\n");
- if (sniffer)
- rx_data(dev, prcs, pkt_addr, rx_len);
-
- copy_from_rx_buff(local, (UCHAR *) &local->last_bcn, pkt_addr,
- rx_len < sizeof(struct beacon_rx) ?
- rx_len : sizeof(struct beacon_rx));
-
- local->beacon_rxed = 1;
- /* Get the statistics so the card counters never overflow */
- ray_get_stats(dev);
- break;
- default:
- pr_debug("ray_cs unknown pkt type %2x\n",
- (unsigned int)readb(pmsg));
- break;
- }
-
-} /* end ray_rx */
-
-/*===========================================================================*/
-static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
- unsigned int pkt_addr, int rx_len)
-{
- struct sk_buff *skb = NULL;
- struct rcs __iomem *prcslink = prcs;
- ray_dev_t *local = netdev_priv(dev);
- UCHAR *rx_ptr;
- int total_len;
- int tmp;
-#ifdef WIRELESS_SPY
- int siglev = local->last_rsl;
- u_char linksrcaddr[ETH_ALEN]; /* Other end of the wireless link */
-#endif
-
- if (!sniffer) {
- if (translate) {
-/* TBD length needs fixing for translated header */
- if (rx_len < (ETH_HLEN + RX_MAC_HEADER_LENGTH) ||
- rx_len >
- (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
- FCS_LEN)) {
- pr_debug(
- "ray_cs invalid packet length %d received\n",
- rx_len);
- return;
- }
- } else { /* encapsulated ethernet */
-
- if (rx_len < (ETH_HLEN + RX_MAC_HEADER_LENGTH) ||
- rx_len >
- (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
- FCS_LEN)) {
- pr_debug(
- "ray_cs invalid packet length %d received\n",
- rx_len);
- return;
- }
- }
- }
- pr_debug("ray_cs rx_data packet\n");
- /* If fragmented packet, verify sizes of fragments add up */
- if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
- pr_debug("ray_cs rx'ed fragment\n");
- tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8)
- + readb(&prcs->var.rx_packet.totalpacketlength[1]);
- total_len = tmp;
- prcslink = prcs;
- do {
- tmp -=
- (readb(&prcslink->var.rx_packet.rx_data_length[0])
- << 8)
- + readb(&prcslink->var.rx_packet.rx_data_length[1]);
- if (readb(&prcslink->var.rx_packet.next_frag_rcs_index)
- == 0xFF || tmp < 0)
- break;
- prcslink = rcs_base(local)
- + readb(&prcslink->link_field);
- } while (1);
-
- if (tmp < 0) {
- pr_debug(
- "ray_cs rx_data fragment lengths don't add up\n");
- local->stats.rx_dropped++;
- release_frag_chain(local, prcs);
- return;
- }
- } else { /* Single unfragmented packet */
- total_len = rx_len;
- }
-
- skb = dev_alloc_skb(total_len + 5);
- if (skb == NULL) {
- pr_debug("ray_cs rx_data could not allocate skb\n");
- local->stats.rx_dropped++;
- if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF)
- release_frag_chain(local, prcs);
- return;
- }
- skb_reserve(skb, 2); /* Align IP on 16 byte (TBD check this) */
-
- pr_debug("ray_cs rx_data total_len = %x, rx_len = %x\n", total_len,
- rx_len);
-
-/************************/
- /* Reserve enough room for the whole damn packet. */
- rx_ptr = skb_put(skb, total_len);
- /* Copy the whole packet to sk_buff */
- rx_ptr +=
- copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len);
- /* Get source address */
-#ifdef WIRELESS_SPY
- skb_copy_from_linear_data_offset(skb,
- offsetof(struct mac_header, addr_2),
- linksrcaddr, ETH_ALEN);
-#endif
- /* Now, deal with encapsulation/translation/sniffer */
- if (!sniffer) {
- if (!translate) {
- /* Encapsulated ethernet, so just lop off 802.11 MAC header */
-/* TBD reserve skb_reserve( skb, RX_MAC_HEADER_LENGTH); */
- skb_pull(skb, RX_MAC_HEADER_LENGTH);
- } else {
- /* Do translation */
- untranslate(local, skb, total_len);
- }
- } else { /* sniffer mode, so just pass whole packet */
- }
-
-/************************/
- /* Now pick up the rest of the fragments if any */
- tmp = 17;
- if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
- prcslink = prcs;
- pr_debug("ray_cs rx_data in fragment loop\n");
- do {
- prcslink = rcs_base(local)
- +
- readb(&prcslink->var.rx_packet.next_frag_rcs_index);
- rx_len =
- ((readb(&prcslink->var.rx_packet.rx_data_length[0])
- << 8)
- +
- readb(&prcslink->var.rx_packet.rx_data_length[1]))
- & RX_BUFF_END;
- pkt_addr =
- ((readb(&prcslink->var.rx_packet.rx_data_ptr[0]) <<
- 8)
- + readb(&prcslink->var.rx_packet.rx_data_ptr[1]))
- & RX_BUFF_END;
-
- rx_ptr +=
- copy_from_rx_buff(local, rx_ptr, pkt_addr, rx_len);
-
- } while (tmp-- &&
- readb(&prcslink->var.rx_packet.next_frag_rcs_index) !=
- 0xFF);
- release_frag_chain(local, prcs);
- }
-
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- local->stats.rx_packets++;
- local->stats.rx_bytes += total_len;
-
- /* Gather signal strength per address */
-#ifdef WIRELESS_SPY
- /* For the Access Point or the node having started the ad-hoc net
- * note : ad-hoc work only in some specific configurations, but we
- * kludge in ray_get_wireless_stats... */
- if (!memcmp(linksrcaddr, local->bss_id, ETH_ALEN)) {
- /* Update statistics */
- /*local->wstats.qual.qual = none ? */
- local->wstats.qual.level = siglev;
- /*local->wstats.qual.noise = none ? */
- local->wstats.qual.updated = 0x2;
- }
- /* Now, update the spy stuff */
- {
- struct iw_quality wstats;
- wstats.level = siglev;
- /* wstats.noise = none ? */
- /* wstats.qual = none ? */
- wstats.updated = 0x2;
- /* Update spy records */
- wireless_spy_update(dev, linksrcaddr, &wstats);
- }
-#endif /* WIRELESS_SPY */
-} /* end rx_data */
-
-/*===========================================================================*/
-static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
-{
- snaphdr_t *psnap = (snaphdr_t *) (skb->data + RX_MAC_HEADER_LENGTH);
- struct ieee80211_hdr *pmac = (struct ieee80211_hdr *)skb->data;
- __be16 type = *(__be16 *) psnap->ethertype;
- int delta;
- struct ethhdr *peth;
- UCHAR srcaddr[ADDRLEN];
- UCHAR destaddr[ADDRLEN];
- static const UCHAR org_bridge[3] = { 0, 0, 0xf8 };
- static const UCHAR org_1042[3] = { 0, 0, 0 };
-
- memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN);
- memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN);
-
-#if 0
- if {
- print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ",
- DUMP_PREFIX_NONE, 16, 1,
- skb->data, 64, true);
- printk(KERN_DEBUG
- "type = %08x, xsap = %02x%02x%02x, org = %02x02x02x\n",
- ntohs(type), psnap->dsap, psnap->ssap, psnap->ctrl,
- psnap->org[0], psnap->org[1], psnap->org[2]);
- printk(KERN_DEBUG "untranslate skb->data = %p\n", skb->data);
- }
-#endif
-
- if (psnap->dsap != 0xaa || psnap->ssap != 0xaa || psnap->ctrl != 3) {
- /* not a snap type so leave it alone */
- pr_debug("ray_cs untranslate NOT SNAP %02x %02x %02x\n",
- psnap->dsap, psnap->ssap, psnap->ctrl);
-
- delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
- peth = (struct ethhdr *)(skb->data + delta);
- peth->h_proto = htons(len - RX_MAC_HEADER_LENGTH);
- } else { /* Its a SNAP */
- if (memcmp(psnap->org, org_bridge, 3) == 0) {
- /* EtherII and nuke the LLC */
- pr_debug("ray_cs untranslate Bridge encap\n");
- delta = RX_MAC_HEADER_LENGTH
- + sizeof(struct snaphdr_t) - ETH_HLEN;
- peth = (struct ethhdr *)(skb->data + delta);
- peth->h_proto = type;
- } else if (memcmp(psnap->org, org_1042, 3) == 0) {
- switch (ntohs(type)) {
- case ETH_P_IPX:
- case ETH_P_AARP:
- pr_debug("ray_cs untranslate RFC IPX/AARP\n");
- delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
- peth = (struct ethhdr *)(skb->data + delta);
- peth->h_proto =
- htons(len - RX_MAC_HEADER_LENGTH);
- break;
- default:
- pr_debug("ray_cs untranslate RFC default\n");
- delta = RX_MAC_HEADER_LENGTH +
- sizeof(struct snaphdr_t) - ETH_HLEN;
- peth = (struct ethhdr *)(skb->data + delta);
- peth->h_proto = type;
- break;
- }
- } else {
- printk("ray_cs untranslate very confused by packet\n");
- delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
- peth = (struct ethhdr *)(skb->data + delta);
- peth->h_proto = type;
- }
- }
-/* TBD reserve skb_reserve(skb, delta); */
- skb_pull(skb, delta);
- pr_debug("untranslate after skb_pull(%d), skb->data = %p\n", delta,
- skb->data);
- memcpy(peth->h_dest, destaddr, ADDRLEN);
- memcpy(peth->h_source, srcaddr, ADDRLEN);
-#if 0
- {
- int i;
- printk(KERN_DEBUG "skb->data after untranslate:");
- for (i = 0; i < 64; i++)
- printk("%02x ", skb->data[i]);
- printk("\n");
- }
-#endif
-} /* end untranslate */
-
-/*===========================================================================*/
-/* Copy data from circular receive buffer to PC memory.
- * dest = destination address in PC memory
- * pkt_addr = source address in receive buffer
- * len = length of packet to copy
- */
-static int copy_from_rx_buff(ray_dev_t *local, UCHAR *dest, int pkt_addr,
- int length)
-{
- int wrap_bytes = (pkt_addr + length) - (RX_BUFF_END + 1);
- if (wrap_bytes <= 0) {
- memcpy_fromio(dest, local->rmem + pkt_addr, length);
- } else { /* Packet wrapped in circular buffer */
-
- memcpy_fromio(dest, local->rmem + pkt_addr,
- length - wrap_bytes);
- memcpy_fromio(dest + length - wrap_bytes, local->rmem,
- wrap_bytes);
- }
- return length;
-}
-
-/*===========================================================================*/
-static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs)
-{
- struct rcs __iomem *prcslink = prcs;
- int tmp = 17;
- unsigned rcsindex = readb(&prcs->var.rx_packet.next_frag_rcs_index);
-
- while (tmp--) {
- writeb(CCS_BUFFER_FREE, &prcslink->buffer_status);
- if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
- pr_debug("ray_cs interrupt bad rcsindex = 0x%x\n",
- rcsindex);
- break;
- }
- prcslink = rcs_base(local) + rcsindex;
- rcsindex = readb(&prcslink->var.rx_packet.next_frag_rcs_index);
- }
- writeb(CCS_BUFFER_FREE, &prcslink->buffer_status);
-}
-
-/*===========================================================================*/
-static void authenticate(ray_dev_t *local)
-{
- struct pcmcia_device *link = local->finder;
- dev_dbg(&link->dev, "ray_cs Starting authentication.\n");
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs authenticate - device not present\n");
- return;
- }
-
- del_timer(&local->timer);
- if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
- local->timer.function = join_net;
- } else {
- local->timer.function = authenticate_timeout;
- }
- local->timer.expires = jiffies + HZ * 2;
- add_timer(&local->timer);
- local->authentication_state = AWAITING_RESPONSE;
-} /* end authenticate */
-
-/*===========================================================================*/
-static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
- unsigned int pkt_addr, int rx_len)
-{
- UCHAR buff[256];
- struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
-
- del_timer(&local->timer);
-
- copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
- /* if we are trying to get authenticated */
- if (local->sparm.b4.a_network_type == ADHOC) {
- pr_debug("ray_cs rx_auth var= %6ph\n", msg->var);
- if (msg->var[2] == 1) {
- pr_debug("ray_cs Sending authentication response.\n");
- if (!build_auth_frame
- (local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) {
- local->authentication_state = NEED_TO_AUTH;
- memcpy(local->auth_id, msg->mac.addr_2,
- ADDRLEN);
- }
- }
- } else { /* Infrastructure network */
-
- if (local->authentication_state == AWAITING_RESPONSE) {
- /* Verify authentication sequence #2 and success */
- if (msg->var[2] == 2) {
- if ((msg->var[3] | msg->var[4]) == 0) {
- pr_debug("Authentication successful\n");
- local->card_status = CARD_AUTH_COMPLETE;
- associate(local);
- local->authentication_state =
- AUTHENTICATED;
- } else {
- pr_debug("Authentication refused\n");
- local->card_status = CARD_AUTH_REFUSED;
- join_net(&local->timer);
- local->authentication_state =
- UNAUTHENTICATED;
- }
- }
- }
- }
-
-} /* end rx_authenticate */
-
-/*===========================================================================*/
-static void associate(ray_dev_t *local)
-{
- struct ccs __iomem *pccs;
- struct pcmcia_device *link = local->finder;
- struct net_device *dev = link->priv;
- int ccsindex;
- if (!(pcmcia_dev_present(link))) {
- dev_dbg(&link->dev, "ray_cs associate - device not present\n");
- return;
- }
- /* If no tx buffers available, return */
- if ((ccsindex = get_free_ccs(local)) < 0) {
-/* TBD should never be here but... what if we are? */
- dev_dbg(&link->dev, "ray_cs associate - No free ccs\n");
- return;
- }
- dev_dbg(&link->dev, "ray_cs Starting association with access point\n");
- pccs = ccs_base(local) + ccsindex;
- /* fill in the CCS */
- writeb(CCS_START_ASSOCIATION, &pccs->cmd);
- /* Interrupt the firmware to process the command */
- if (interrupt_ecf(local, ccsindex)) {
- dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n");
- writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
-
- del_timer(&local->timer);
- local->timer.expires = jiffies + HZ * 2;
- local->timer.function = join_net;
- add_timer(&local->timer);
- local->card_status = CARD_ASSOC_FAILED;
- return;
- }
- if (!sniffer)
- netif_start_queue(dev);
-
-} /* end associate */
-
-/*===========================================================================*/
-static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
- unsigned int pkt_addr, int rx_len)
-{
-/* UCHAR buff[256];
- struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
-*/
- pr_debug("Deauthentication frame received\n");
- local->authentication_state = UNAUTHENTICATED;
- /* Need to reauthenticate or rejoin depending on reason code */
-/* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
- */
-}
-
-/*===========================================================================*/
-static void clear_interrupt(ray_dev_t *local)
-{
- writeb(0, local->amem + CIS_OFFSET + HCS_INTR_OFFSET);
-}
-
-/*===========================================================================*/
-#ifdef CONFIG_PROC_FS
-#define MAXDATA (PAGE_SIZE - 80)
-
-static const char *card_status[] = {
- "Card inserted - uninitialized", /* 0 */
- "Card not downloaded", /* 1 */
- "Waiting for download parameters", /* 2 */
- "Card doing acquisition", /* 3 */
- "Acquisition complete", /* 4 */
- "Authentication complete", /* 5 */
- "Association complete", /* 6 */
- "???", "???", "???", "???", /* 7 8 9 10 undefined */
- "Card init error", /* 11 */
- "Download parameters error", /* 12 */
- "???", /* 13 */
- "Acquisition failed", /* 14 */
- "Authentication refused", /* 15 */
- "Association failed" /* 16 */
-};
-
-static const char *nettype[] = { "Adhoc", "Infra " };
-static const char *framing[] = { "Encapsulation", "Translation" }
-
-;
-/*===========================================================================*/
-static int ray_cs_proc_show(struct seq_file *m, void *v)
-{
-/* Print current values which are not available via other means
- * eg ifconfig
- */
- int i;
- struct pcmcia_device *link;
- struct net_device *dev;
- ray_dev_t *local;
- UCHAR *p;
- struct freq_hop_element *pfh;
- UCHAR c[33];
-
- link = this_device;
- if (!link)
- return 0;
- dev = link->priv;
- if (!dev)
- return 0;
- local = netdev_priv(dev);
- if (!local)
- return 0;
-
- seq_puts(m, "Raylink Wireless LAN driver status\n");
- seq_printf(m, "%s\n", rcsid);
- /* build 4 does not report version, and field is 0x55 after memtest */
- seq_puts(m, "Firmware version = ");
- if (local->fw_ver == 0x55)
- seq_puts(m, "4 - Use dump_cis for more details\n");
- else
- seq_printf(m, "%2d.%02d.%02d\n",
- local->fw_ver, local->fw_bld, local->fw_var);
-
- for (i = 0; i < 32; i++)
- c[i] = local->sparm.b5.a_current_ess_id[i];
- c[32] = 0;
- seq_printf(m, "%s network ESSID = \"%s\"\n",
- nettype[local->sparm.b5.a_network_type], c);
-
- p = local->bss_id;
- seq_printf(m, "BSSID = %pM\n", p);
-
- seq_printf(m, "Country code = %d\n",
- local->sparm.b5.a_curr_country_code);
-
- i = local->card_status;
- if (i < 0)
- i = 10;
- if (i > 16)
- i = 10;
- seq_printf(m, "Card status = %s\n", card_status[i]);
-
- seq_printf(m, "Framing mode = %s\n", framing[translate]);
-
- seq_printf(m, "Last pkt signal lvl = %d\n", local->last_rsl);
-
- if (local->beacon_rxed) {
- /* Pull some fields out of last beacon received */
- seq_printf(m, "Beacon Interval = %d Kus\n",
- local->last_bcn.beacon_intvl[0]
- + 256 * local->last_bcn.beacon_intvl[1]);
-
- p = local->last_bcn.elements;
- if (p[0] == C_ESSID_ELEMENT_ID)
- p += p[1] + 2;
- else {
- seq_printf(m,
- "Parse beacon failed at essid element id = %d\n",
- p[0]);
- return 0;
- }
-
- if (p[0] == C_SUPPORTED_RATES_ELEMENT_ID) {
- seq_puts(m, "Supported rate codes = ");
- for (i = 2; i < p[1] + 2; i++)
- seq_printf(m, "0x%02x ", p[i]);
- seq_putc(m, '\n');
- p += p[1] + 2;
- } else {
- seq_puts(m, "Parse beacon failed at rates element\n");
- return 0;
- }
-
- if (p[0] == C_FH_PARAM_SET_ELEMENT_ID) {
- pfh = (struct freq_hop_element *)p;
- seq_printf(m, "Hop dwell = %d Kus\n",
- pfh->dwell_time[0] +
- 256 * pfh->dwell_time[1]);
- seq_printf(m, "Hop set = %d\n",
- pfh->hop_set);
- seq_printf(m, "Hop pattern = %d\n",
- pfh->hop_pattern);
- seq_printf(m, "Hop index = %d\n",
- pfh->hop_index);
- p += p[1] + 2;
- } else {
- seq_puts(m,
- "Parse beacon failed at FH param element\n");
- return 0;
- }
- } else {
- seq_puts(m, "No beacons received\n");
- }
- return 0;
-}
-#endif
-/*===========================================================================*/
-static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
-{
- int addr;
- struct ccs __iomem *pccs;
- struct tx_msg __iomem *ptx;
- int ccsindex;
-
- /* If no tx buffers available, return */
- if ((ccsindex = get_free_tx_ccs(local)) < 0) {
- pr_debug("ray_cs send authenticate - No free tx ccs\n");
- return -1;
- }
-
- pccs = ccs_base(local) + ccsindex;
-
- /* Address in card space */
- addr = TX_BUF_BASE + (ccsindex << 11);
- /* fill in the CCS */
- writeb(CCS_TX_REQUEST, &pccs->cmd);
- writeb(addr >> 8, pccs->var.tx_request.tx_data_ptr);
- writeb(0x20, pccs->var.tx_request.tx_data_ptr + 1);
- writeb(TX_AUTHENTICATE_LENGTH_MSB, pccs->var.tx_request.tx_data_length);
- writeb(TX_AUTHENTICATE_LENGTH_LSB,
- pccs->var.tx_request.tx_data_length + 1);
- writeb(0, &pccs->var.tx_request.pow_sav_mode);
-
- ptx = local->sram + addr;
- /* fill in the mac header */
- writeb(PROTOCOL_VER | AUTHENTIC_TYPE, &ptx->mac.frame_ctl_1);
- writeb(0, &ptx->mac.frame_ctl_2);
-
- memcpy_toio(ptx->mac.addr_1, dest, ADDRLEN);
- memcpy_toio(ptx->mac.addr_2, local->sparm.b4.a_mac_addr, ADDRLEN);
- memcpy_toio(ptx->mac.addr_3, local->bss_id, ADDRLEN);
-
- /* Fill in msg body with protocol 00 00, sequence 01 00 ,status 00 00 */
- memset_io(ptx->var, 0, 6);
- writeb(auth_type & 0xff, ptx->var + 2);
-
- /* Interrupt the firmware to process the command */
- if (interrupt_ecf(local, ccsindex)) {
- pr_debug(
- "ray_cs send authentication request failed - ECF not ready for intr\n");
- writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
- return -1;
- }
- return 0;
-} /* End build_auth_frame */
-
-/*===========================================================================*/
-#ifdef CONFIG_PROC_FS
-static ssize_t ray_cs_essid_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *pos)
-{
- static char proc_essid[33];
- unsigned int len = count;
-
- if (len > 32)
- len = 32;
- memset(proc_essid, 0, 33);
- if (copy_from_user(proc_essid, buffer, len))
- return -EFAULT;
- essid = proc_essid;
- return count;
-}
-
-static const struct proc_ops ray_cs_essid_proc_ops = {
- .proc_write = ray_cs_essid_proc_write,
- .proc_lseek = noop_llseek,
-};
-
-static ssize_t int_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- static char proc_number[10];
- char *p;
- int nr, len;
-
- if (!count)
- return 0;
-
- if (count > 9)
- return -EINVAL;
- if (copy_from_user(proc_number, buffer, count))
- return -EFAULT;
- p = proc_number;
- nr = 0;
- len = count;
- do {
- unsigned int c = *p - '0';
- if (c > 9)
- return -EINVAL;
- nr = nr * 10 + c;
- p++;
- } while (--len);
- *(int *)pde_data(file_inode(file)) = nr;
- return count;
-}
-
-static const struct proc_ops int_proc_ops = {
- .proc_write = int_proc_write,
- .proc_lseek = noop_llseek,
-};
-#endif
-
-static const struct pcmcia_device_id ray_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x01a6, 0x0000),
- PCMCIA_DEVICE_NULL,
-};
-
-MODULE_DEVICE_TABLE(pcmcia, ray_ids);
-
-static struct pcmcia_driver ray_driver = {
- .owner = THIS_MODULE,
- .name = "ray_cs",
- .probe = ray_probe,
- .remove = ray_detach,
- .id_table = ray_ids,
- .suspend = ray_suspend,
- .resume = ray_resume,
-};
-
-static int __init init_ray_cs(void)
-{
- int rc;
-
- pr_debug("%s\n", rcsid);
- rc = pcmcia_register_driver(&ray_driver);
- pr_debug("raylink init_module register_pcmcia_driver returns 0x%x\n",
- rc);
- if (rc)
- return rc;
-
-#ifdef CONFIG_PROC_FS
- proc_mkdir("driver/ray_cs", NULL);
-
- proc_create_single("driver/ray_cs/ray_cs", 0, NULL, ray_cs_proc_show);
- proc_create("driver/ray_cs/essid", 0200, NULL, &ray_cs_essid_proc_ops);
- proc_create_data("driver/ray_cs/net_type", 0200, NULL, &int_proc_ops,
- &net_type);
- proc_create_data("driver/ray_cs/translate", 0200, NULL, &int_proc_ops,
- &translate);
-#endif
- translate = !!translate;
- return 0;
-} /* init_ray_cs */
-
-/*===========================================================================*/
-
-static void __exit exit_ray_cs(void)
-{
- pr_debug("ray_cs: cleanup_module\n");
-
-#ifdef CONFIG_PROC_FS
- remove_proc_subtree("driver/ray_cs", NULL);
-#endif
-
- pcmcia_unregister_driver(&ray_driver);
-} /* exit_ray_cs */
-
-module_init(init_ray_cs);
-module_exit(exit_ray_cs);
-
-/*===========================================================================*/
diff --git a/drivers/net/wireless/legacy/ray_cs.h b/drivers/net/wireless/legacy/ray_cs.h
deleted file mode 100644
index 0609d8625..000000000
--- a/drivers/net/wireless/legacy/ray_cs.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Raytheon wireless LAN PCMCIA card driver for Linux
- A PCMCIA client driver for the Raylink wireless network card
- Written by Corey Thomas
-*/
-
-#ifndef _RAY_CS_H_
-#define _RAY_CS_H_
-
-struct beacon_rx {
- struct mac_header mac;
- UCHAR timestamp[8];
- UCHAR beacon_intvl[2];
- UCHAR capability[2];
- UCHAR elements[sizeof(struct essid_element)
- + sizeof(struct rates_element)
- + sizeof(struct freq_hop_element)
- + sizeof(struct japan_call_sign_element)
- + sizeof(struct tim_element)];
-};
-
-/* Return values for get_free{,_tx}_ccs */
-#define ECCSFULL (-1)
-#define ECCSBUSY (-2)
-#define ECARDGONE (-3)
-
-typedef struct ray_dev_t {
- int card_status;
- int authentication_state;
- void __iomem *sram; /* pointer to beginning of shared RAM */
- void __iomem *amem; /* pointer to attribute mem window */
- void __iomem *rmem; /* pointer to receive buffer window */
- struct pcmcia_device *finder; /* pointer back to struct pcmcia_device for card */
- struct timer_list timer;
- unsigned long tx_ccs_lock;
- unsigned long ccs_lock;
- int dl_param_ccs;
- union {
- struct b4_startup_params b4;
- struct b5_startup_params b5;
- } sparm;
- int timeout_flag;
- UCHAR supported_rates[8];
- UCHAR japan_call_sign[12];
- struct startup_res_6 startup_res;
- int num_multi;
- /* Network parameters from start/join */
- UCHAR bss_id[6];
- UCHAR auth_id[6];
- UCHAR net_default_tx_rate;
- UCHAR encryption;
- struct net_device_stats stats;
-
- UCHAR net_type;
- UCHAR sta_type;
- UCHAR fw_ver;
- UCHAR fw_bld;
- UCHAR fw_var;
- UCHAR ASIC_version;
- UCHAR assoc_id[2];
- UCHAR tib_length;
- UCHAR last_rsl;
- int beacon_rxed;
- struct beacon_rx last_bcn;
- iw_stats wstats; /* Wireless specific stats */
-#ifdef WIRELESS_SPY
- struct iw_spy_data spy_data;
- struct iw_public_data wireless_data;
-#endif /* WIRELESS_SPY */
-
-} ray_dev_t;
-/*****************************************************************************/
-
-#endif /* _RAY_CS_H_ */
diff --git a/drivers/net/wireless/legacy/rayctl.h b/drivers/net/wireless/legacy/rayctl.h
deleted file mode 100644
index 1f3bde8ac..000000000
--- a/drivers/net/wireless/legacy/rayctl.h
+++ /dev/null
@@ -1,734 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _RAYCTL_H_
-#define _RAYCTL_H_
-
-typedef unsigned char UCHAR;
-
-/****** IEEE 802.11 constants ************************************************/
-#define ADDRLEN 6
-/* Frame control 1 bit fields */
-#define PROTOCOL_VER 0x00
-#define DATA_TYPE 0x08
-#define ASSOC_REQ_TYPE 0x00
-#define ASSOC_RESP_TYPE 0x10
-#define REASSOC_REQ_TYPE 0x20
-#define REASSOC_RESP_TYPE 0x30
-#define NULL_MSG_TYPE 0x48
-#define BEACON_TYPE 0x80
-#define DISASSOC_TYPE 0xA0
-#define PSPOLL_TYPE 0xA4
-#define AUTHENTIC_TYPE 0xB0
-#define DEAUTHENTIC_TYPE 0xC0
-/* Frame control 2 bit fields */
-#define FC2_TO_DS 0x01
-#define FC2_FROM_DS 0x02
-#define FC2_MORE_FRAG 0x04
-#define FC2_RETRY 0x08
-#define FC2_PSM 0x10
-#define FC2_MORE_DATA 0x20
-#define FC2_WEP 0x40
-#define FC2_ORDER 0x80
-/*****************************************************************************/
-/* 802.11 element ID's and lengths */
-#define C_BP_CAPABILITY_ESS 0x01
-#define C_BP_CAPABILITY_IBSS 0x02
-#define C_BP_CAPABILITY_CF_POLLABLE 0x04
-#define C_BP_CAPABILITY_CF_POLL_REQUEST 0x08
-#define C_BP_CAPABILITY_PRIVACY 0x10
-
-#define C_ESSID_ELEMENT_ID 0
-#define C_ESSID_ELEMENT_MAX_LENGTH 32
-
-#define C_SUPPORTED_RATES_ELEMENT_ID 1
-#define C_SUPPORTED_RATES_ELEMENT_LENGTH 2
-
-#define C_FH_PARAM_SET_ELEMENT_ID 2
-#define C_FH_PARAM_SET_ELEMENT_LNGTH 5
-
-#define C_CF_PARAM_SET_ELEMENT_ID 4
-#define C_CF_PARAM_SET_ELEMENT_LNGTH 6
-
-#define C_TIM_ELEMENT_ID 5
-#define C_TIM_BITMAP_LENGTH 251
-#define C_TIM_BMCAST_BIT 0x01
-
-#define C_IBSS_ELEMENT_ID 6
-#define C_IBSS_ELEMENT_LENGTH 2
-
-#define C_JAPAN_CALL_SIGN_ELEMENT_ID 51
-#define C_JAPAN_CALL_SIGN_ELEMENT_LNGTH 12
-
-#define C_DISASSOC_REASON_CODE_LEN 2
-#define C_DISASSOC_REASON_CODE_DEFAULT 8
-
-#define C_CRC_LEN 4
-#define C_NUM_SUPPORTED_RATES 8
-/****** IEEE 802.11 mac header for type data packets *************************/
-struct mac_header {
- UCHAR frame_ctl_1;
- UCHAR frame_ctl_2;
- UCHAR duration_lsb;
- UCHAR duration_msb;
- UCHAR addr_1[ADDRLEN];
- UCHAR addr_2[ADDRLEN];
- UCHAR addr_3[ADDRLEN];
- UCHAR seq_frag_num[2];
-/* UCHAR addr_4[ADDRLEN]; *//* only present for AP to AP (TO DS and FROM DS */
-};
-/****** IEEE 802.11 frame element structures *********************************/
-struct essid_element
-{
- UCHAR id;
- UCHAR length;
- UCHAR text[C_ESSID_ELEMENT_MAX_LENGTH];
-};
-struct rates_element
-{
- UCHAR id;
- UCHAR length;
- UCHAR value[8];
-};
-struct freq_hop_element
-{
- UCHAR id;
- UCHAR length;
- UCHAR dwell_time[2];
- UCHAR hop_set;
- UCHAR hop_pattern;
- UCHAR hop_index;
-};
-struct tim_element
-{
- UCHAR id;
- UCHAR length;
- UCHAR dtim_count;
- UCHAR dtim_period;
- UCHAR bitmap_control;
- UCHAR tim[C_TIM_BITMAP_LENGTH];
-};
-struct ibss_element
-{
- UCHAR id;
- UCHAR length;
- UCHAR atim_window[2];
-};
-struct japan_call_sign_element
-{
- UCHAR id;
- UCHAR length;
- UCHAR call_sign[12];
-};
-/****** Beacon message structures ********************************************/
-/* .elements is a large lump of max size because elements are variable size */
-struct infra_beacon
-{
- UCHAR timestamp[8];
- UCHAR beacon_intvl[2];
- UCHAR capability[2];
- UCHAR elements[sizeof(struct essid_element)
- + sizeof(struct rates_element)
- + sizeof(struct freq_hop_element)
- + sizeof(struct japan_call_sign_element)
- + sizeof(struct tim_element)];
-};
-struct adhoc_beacon
-{
- UCHAR timestamp[8];
- UCHAR beacon_intvl[2];
- UCHAR capability[2];
- UCHAR elements[sizeof(struct essid_element)
- + sizeof(struct rates_element)
- + sizeof(struct freq_hop_element)
- + sizeof(struct japan_call_sign_element)
- + sizeof(struct ibss_element)];
-};
-/*****************************************************************************/
-/*****************************************************************************/
-/* #define C_MAC_HDR_2_WEP 0x40 */
-/* TX/RX CCS constants */
-#define TX_HEADER_LENGTH 0x1C
-#define RX_MAC_HEADER_LENGTH 0x18
-#define TX_AUTHENTICATE_LENGTH (TX_HEADER_LENGTH + 6)
-#define TX_AUTHENTICATE_LENGTH_MSB (TX_AUTHENTICATE_LENGTH >> 8)
-#define TX_AUTHENTICATE_LENGTH_LSB (TX_AUTHENTICATE_LENGTH & 0xff)
-#define TX_DEAUTHENTICATE_LENGTH (TX_HEADER_LENGTH + 2)
-#define TX_DEAUTHENTICATE_LENGTH_MSB (TX_AUTHENTICATE_LENGTH >> 8)
-#define TX_DEAUTHENTICATE_LENGTH_LSB (TX_AUTHENTICATE_LENGTH & 0xff)
-#define FCS_LEN 4
-
-#define ADHOC 0
-#define INFRA 1
-
-#define TYPE_STA 0
-#define TYPE_AP 1
-
-#define PASSIVE_SCAN 1
-#define ACTIVE_SCAN 1
-
-#define PSM_CAM 0
-
-/* Country codes */
-#define USA 1
-#define EUROPE 2
-#define JAPAN 3
-#define KOREA 4
-#define SPAIN 5
-#define FRANCE 6
-#define ISRAEL 7
-#define AUSTRALIA 8
-#define JAPAN_TEST 9
-
-/* Hop pattern lengths */
-#define USA_HOP_MOD 79
-#define EUROPE_HOP_MOD 79
-#define JAPAN_HOP_MOD 23
-#define KOREA_HOP_MOD 23
-#define SPAIN_HOP_MOD 27
-#define FRANCE_HOP_MOD 35
-#define ISRAEL_HOP_MOD 35
-#define AUSTRALIA_HOP_MOD 47
-#define JAPAN_TEST_HOP_MOD 23
-
-#define ESSID_SIZE 32
-/**********************************************************************/
-/* CIS Register Constants */
-#define CIS_OFFSET 0x0f00
-/* Configuration Option Register (0x0F00) */
-#define COR_OFFSET 0x00
-#define COR_SOFT_RESET 0x80
-#define COR_LEVEL_IRQ 0x40
-#define COR_CONFIG_NUM 0x01
-#define COR_DEFAULT (COR_LEVEL_IRQ | COR_CONFIG_NUM)
-
-/* Card Configuration and Status Register (0x0F01) */
-#define CCSR_OFFSET 0x01
-#define CCSR_HOST_INTR_PENDING 0x01
-#define CCSR_POWER_DOWN 0x04
-
-/* HCS Interrupt Register (0x0F05) */
-#define HCS_INTR_OFFSET 0x05
-/* #define HCS_INTR_OFFSET 0x0A */
-#define HCS_INTR_CLEAR 0x00
-
-/* ECF Interrupt Register (0x0F06) */
-#define ECF_INTR_OFFSET 0x06
-/* #define ECF_INTR_OFFSET 0x0C */
-#define ECF_INTR_SET 0x01
-
-/* Authorization Register 0 (0x0F08) */
-#define AUTH_0_ON 0x57
-
-/* Authorization Register 1 (0x0F09) */
-#define AUTH_1_ON 0x82
-
-/* Program Mode Register (0x0F0A) */
-#define PC2PM 0x02
-#define PC2CAL 0x10
-#define PC2MLSE 0x20
-
-/* PC Test Mode Register (0x0F0B) */
-#define PC_TEST_MODE 0x08
-
-/* Frequency Control Word (0x0F10) */
-/* Range 0x02 - 0xA6 */
-
-/* Test Mode Control 1-4 (0x0F14 - 0x0F17) */
-
-/**********************************************************************/
-
-/* Shared RAM Area */
-#define SCB_BASE 0x0000
-#define STATUS_BASE 0x0100
-#define HOST_TO_ECF_BASE 0x0200
-#define ECF_TO_HOST_BASE 0x0300
-#define CCS_BASE 0x0400
-#define RCS_BASE 0x0800
-#define INFRA_TIM_BASE 0x0C00
-#define SSID_LIST_BASE 0x0D00
-#define TX_BUF_BASE 0x1000
-#define RX_BUF_BASE 0x8000
-
-#define NUMBER_OF_CCS 64
-#define NUMBER_OF_RCS 64
-/*#define NUMBER_OF_TX_CCS 14 */
-#define NUMBER_OF_TX_CCS 14
-
-#define TX_BUF_SIZE (2048 - sizeof(struct tx_msg))
-#define RX_BUFF_END 0x3FFF
-/* Values for buffer_status */
-#define CCS_BUFFER_FREE 0
-#define CCS_BUFFER_BUSY 1
-#define CCS_COMMAND_COMPLETE 2
-#define CCS_COMMAND_FAILED 3
-
-/* Values for cmd */
-#define CCS_DOWNLOAD_STARTUP_PARAMS 1
-#define CCS_UPDATE_PARAMS 2
-#define CCS_REPORT_PARAMS 3
-#define CCS_UPDATE_MULTICAST_LIST 4
-#define CCS_UPDATE_POWER_SAVINGS_MODE 5
-#define CCS_START_NETWORK 6
-#define CCS_JOIN_NETWORK 7
-#define CCS_START_ASSOCIATION 8
-#define CCS_TX_REQUEST 9
-#define CCS_TEST_MEMORY 0xa
-#define CCS_SHUTDOWN 0xb
-#define CCS_DUMP_MEMORY 0xc
-#define CCS_START_TIMER 0xe
-#define CCS_LAST_CMD CCS_START_TIMER
-
-/* Values for link field */
-#define CCS_END_LIST 0xff
-
-/* values for buffer_status field */
-#define RCS_BUFFER_FREE 0
-#define RCS_BUFFER_BUSY 1
-#define RCS_COMPLETE 2
-#define RCS_FAILED 3
-#define RCS_BUFFER_RELEASE 0xFF
-
-/* values for interrupt_id field */
-#define PROCESS_RX_PACKET 0x80 /* */
-#define REJOIN_NET_COMPLETE 0x81 /* RCS ID: Rejoin Net Complete */
-#define ROAMING_INITIATED 0x82 /* RCS ID: Roaming Initiated */
-#define JAPAN_CALL_SIGN_RXD 0x83 /* RCS ID: New Japan Call Sign */
-
-/*****************************************************************************/
-/* Memory types for dump memory command */
-#define C_MEM_PROG 0
-#define C_MEM_XDATA 1
-#define C_MEM_SFR 2
-#define C_MEM_IDATA 3
-
-/*** Return values for hw_xmit **********/
-#define XMIT_OK (0)
-#define XMIT_MSG_BAD (-1)
-#define XMIT_NO_CCS (-2)
-#define XMIT_NO_INTR (-3)
-#define XMIT_NEED_AUTH (-4)
-
-/*** Values for card status */
-#define CARD_INSERTED (0)
-
-#define CARD_AWAITING_PARAM (1)
-#define CARD_INIT_ERROR (11)
-
-#define CARD_DL_PARAM (2)
-#define CARD_DL_PARAM_ERROR (12)
-
-#define CARD_DOING_ACQ (3)
-
-#define CARD_ACQ_COMPLETE (4)
-#define CARD_ACQ_FAILED (14)
-
-#define CARD_AUTH_COMPLETE (5)
-#define CARD_AUTH_REFUSED (15)
-
-#define CARD_ASSOC_COMPLETE (6)
-#define CARD_ASSOC_FAILED (16)
-
-/*** Values for authentication_state ***********************************/
-#define UNAUTHENTICATED (0)
-#define AWAITING_RESPONSE (1)
-#define AUTHENTICATED (2)
-#define NEED_TO_AUTH (3)
-
-/*** Values for authentication type ************************************/
-#define OPEN_AUTH_REQUEST (1)
-#define OPEN_AUTH_RESPONSE (2)
-#define BROADCAST_DEAUTH (0xc0)
-/*** Values for timer functions ****************************************/
-#define TODO_NOTHING (0)
-#define TODO_VERIFY_DL_START (-1)
-#define TODO_START_NET (-2)
-#define TODO_JOIN_NET (-3)
-#define TODO_AUTHENTICATE_TIMEOUT (-4)
-#define TODO_SEND_CCS (-5)
-/***********************************************************************/
-/* Parameter passing structure for update/report parameter CCS's */
-struct object_id {
- void *object_addr;
- unsigned char object_length;
-};
-
-#define OBJID_network_type 0
-#define OBJID_acting_as_ap_status 1
-#define OBJID_current_ess_id 2
-#define OBJID_scanning_mode 3
-#define OBJID_power_mgt_state 4
-#define OBJID_mac_address 5
-#define OBJID_frag_threshold 6
-#define OBJID_hop_time 7
-#define OBJID_beacon_period 8
-#define OBJID_dtim_period 9
-#define OBJID_retry_max 10
-#define OBJID_ack_timeout 11
-#define OBJID_sifs 12
-#define OBJID_difs 13
-#define OBJID_pifs 14
-#define OBJID_rts_threshold 15
-#define OBJID_scan_dwell_time 16
-#define OBJID_max_scan_dwell_time 17
-#define OBJID_assoc_resp_timeout 18
-#define OBJID_adhoc_scan_cycle_max 19
-#define OBJID_infra_scan_cycle_max 20
-#define OBJID_infra_super_cycle_max 21
-#define OBJID_promiscuous_mode 22
-#define OBJID_unique_word 23
-#define OBJID_slot_time 24
-#define OBJID_roaming_low_snr 25
-#define OBJID_low_snr_count_thresh 26
-#define OBJID_infra_missed_bcn 27
-#define OBJID_adhoc_missed_bcn 28
-#define OBJID_curr_country_code 29
-#define OBJID_hop_pattern 30
-#define OBJID_reserved 31
-#define OBJID_cw_max_msb 32
-#define OBJID_cw_min_msb 33
-#define OBJID_noise_filter_gain 34
-#define OBJID_noise_limit_offset 35
-#define OBJID_det_rssi_thresh_offset 36
-#define OBJID_med_busy_thresh_offset 37
-#define OBJID_det_sync_thresh 38
-#define OBJID_test_mode 39
-#define OBJID_test_min_chan_num 40
-#define OBJID_test_max_chan_num 41
-#define OBJID_allow_bcast_ID_prbrsp 42
-#define OBJID_privacy_must_start 43
-#define OBJID_privacy_can_join 44
-#define OBJID_basic_rate_set 45
-
-/**** Configuration/Status/Control Area ***************************/
-/* System Control Block (SCB) Area
- * Located at Shared RAM offset 0
- */
-struct scb {
- UCHAR ccs_index;
- UCHAR rcs_index;
-};
-
-/****** Status area at Shared RAM offset 0x0100 ******************************/
-struct status {
- UCHAR mrx_overflow_for_host; /* 0=ECF may write, 1=host may write*/
- UCHAR mrx_checksum_error_for_host; /* 0=ECF may write, 1=host may write*/
- UCHAR rx_hec_error_for_host; /* 0=ECF may write, 1=host may write*/
- UCHAR reserved1;
- short mrx_overflow; /* ECF increments on rx overflow */
- short mrx_checksum_error; /* ECF increments on rx CRC error */
- short rx_hec_error; /* ECF incs on mac header CRC error */
- UCHAR rxnoise; /* Average RSL measurement */
-};
-
-/****** Host-to-ECF Data Area at Shared RAM offset 0x200 *********************/
-struct host_to_ecf_area {
-
-};
-
-/****** ECF-to-Host Data Area at Shared RAM offset 0x0300 ********************/
-struct startup_res_518 {
- UCHAR startup_word;
- UCHAR station_addr[ADDRLEN];
- UCHAR calc_prog_chksum;
- UCHAR calc_cis_chksum;
- UCHAR ecf_spare[7];
- UCHAR japan_call_sign[12];
-};
-
-struct startup_res_6 {
- UCHAR startup_word;
- UCHAR station_addr[ADDRLEN];
- UCHAR reserved;
- UCHAR supp_rates[8];
- UCHAR japan_call_sign[12];
- UCHAR calc_prog_chksum;
- UCHAR calc_cis_chksum;
- UCHAR firmware_version[3];
- UCHAR asic_version;
- UCHAR tib_length;
-};
-
-struct start_join_net_params {
- UCHAR net_type;
- UCHAR ssid[ESSID_SIZE];
- UCHAR reserved;
- UCHAR privacy_can_join;
-};
-
-/****** Command Control Structure area at Shared ram offset 0x0400 ***********/
-/* Structures for command specific parameters (ccs.var) */
-struct update_param_cmd {
- UCHAR object_id;
- UCHAR number_objects;
- UCHAR failure_cause;
-};
-struct report_param_cmd {
- UCHAR object_id;
- UCHAR number_objects;
- UCHAR failure_cause;
- UCHAR length;
-};
-struct start_network_cmd {
- UCHAR update_param;
- UCHAR bssid[ADDRLEN];
- UCHAR net_initiated;
- UCHAR net_default_tx_rate;
- UCHAR encryption;
-};
-struct join_network_cmd {
- UCHAR update_param;
- UCHAR bssid[ADDRLEN];
- UCHAR net_initiated;
- UCHAR net_default_tx_rate;
- UCHAR encryption;
-};
-struct tx_requested_cmd {
-
- UCHAR tx_data_ptr[2];
- UCHAR tx_data_length[2];
- UCHAR host_reserved[2];
- UCHAR reserved[3];
- UCHAR tx_rate;
- UCHAR pow_sav_mode;
- UCHAR retries;
- UCHAR antenna;
-};
-struct tx_requested_cmd_4 {
-
- UCHAR tx_data_ptr[2];
- UCHAR tx_data_length[2];
- UCHAR dest_addr[ADDRLEN];
- UCHAR pow_sav_mode;
- UCHAR retries;
- UCHAR station_id;
-};
-struct memory_dump_cmd {
- UCHAR memory_type;
- UCHAR memory_ptr[2];
- UCHAR length;
-};
-struct update_association_cmd {
- UCHAR status;
- UCHAR aid[2];
-};
-struct start_timer_cmd {
- UCHAR duration[2];
-};
-
-struct ccs {
- UCHAR buffer_status; /* 0 = buffer free, 1 = buffer busy */
- /* 2 = command complete, 3 = failed */
- UCHAR cmd; /* command to ECF */
- UCHAR link; /* link to next CCS, FF=end of list */
- /* command specific parameters */
- union {
- char reserved[13];
- struct update_param_cmd update_param;
- struct report_param_cmd report_param;
- UCHAR nummulticast;
- UCHAR mode;
- struct start_network_cmd start_network;
- struct join_network_cmd join_network;
- struct tx_requested_cmd tx_request;
- struct memory_dump_cmd memory_dump;
- struct update_association_cmd update_assoc;
- struct start_timer_cmd start_timer;
- } var;
-};
-
-/*****************************************************************************/
-/* Transmit buffer structures */
-struct tib_structure {
- UCHAR ccs_index;
- UCHAR psm;
- UCHAR pass_fail;
- UCHAR retry_count;
- UCHAR max_retries;
- UCHAR frags_remaining;
- UCHAR no_rb;
- UCHAR rts_reqd;
- UCHAR csma_tx_cntrl_2;
- UCHAR sifs_tx_cntrl_2;
- UCHAR tx_dma_addr_1[2];
- UCHAR tx_dma_addr_2[2];
- UCHAR var_dur_2mhz[2];
- UCHAR var_dur_1mhz[2];
- UCHAR max_dur_2mhz[2];
- UCHAR max_dur_1mhz[2];
- UCHAR hdr_len;
- UCHAR max_frag_len[2];
- UCHAR var_len[2];
- UCHAR phy_hdr_4;
- UCHAR mac_hdr_1;
- UCHAR mac_hdr_2;
- UCHAR sid[2];
-};
-
-struct phy_header {
- UCHAR sfd[2];
- UCHAR hdr_3;
- UCHAR hdr_4;
-};
-struct ray_rx_msg {
- struct mac_header mac;
- UCHAR var[];
-};
-
-struct tx_msg {
- struct tib_structure tib;
- struct phy_header phy;
- struct mac_header mac;
- UCHAR var[];
-};
-
-/****** ECF Receive Control Structure (RCS) Area at Shared RAM offset 0x0800 */
-/* Structures for command specific parameters (rcs.var) */
-struct rx_packet_cmd {
- UCHAR rx_data_ptr[2];
- UCHAR rx_data_length[2];
- UCHAR rx_sig_lev;
- UCHAR next_frag_rcs_index;
- UCHAR totalpacketlength[2];
-};
-struct rejoin_net_cmplt_cmd {
- UCHAR reserved;
- UCHAR bssid[ADDRLEN];
-};
-struct japan_call_sign_rxd {
- UCHAR rxd_call_sign[8];
- UCHAR reserved[5];
-};
-
-struct rcs {
- UCHAR buffer_status;
- UCHAR interrupt_id;
- UCHAR link_field;
- /* command specific parameters */
- union {
- UCHAR reserved[13];
- struct rx_packet_cmd rx_packet;
- struct rejoin_net_cmplt_cmd rejoin_net_complete;
- struct japan_call_sign_rxd japan_call_sign;
- } var;
-};
-
-/****** Startup parameter structures for both versions of firmware ***********/
-struct b4_startup_params {
- UCHAR a_network_type; /* C_ADHOC, C_INFRA */
- UCHAR a_acting_as_ap_status; /* C_TYPE_STA, C_TYPE_AP */
- UCHAR a_current_ess_id[ESSID_SIZE]; /* Null terminated unless 32 long */
- UCHAR a_scanning_mode; /* passive 0, active 1 */
- UCHAR a_power_mgt_state; /* CAM 0, */
- UCHAR a_mac_addr[ADDRLEN]; /* */
- UCHAR a_frag_threshold[2]; /* 512 */
- UCHAR a_hop_time[2]; /* 16k * 2**n, n=0-4 in Kus */
- UCHAR a_beacon_period[2]; /* n * a_hop_time in Kus */
- UCHAR a_dtim_period; /* in beacons */
- UCHAR a_retry_max; /* */
- UCHAR a_ack_timeout; /* */
- UCHAR a_sifs; /* */
- UCHAR a_difs; /* */
- UCHAR a_pifs; /* */
- UCHAR a_rts_threshold[2]; /* */
- UCHAR a_scan_dwell_time[2]; /* */
- UCHAR a_max_scan_dwell_time[2]; /* */
- UCHAR a_assoc_resp_timeout_thresh; /* */
- UCHAR a_adhoc_scan_cycle_max; /* */
- UCHAR a_infra_scan_cycle_max; /* */
- UCHAR a_infra_super_scan_cycle_max; /* */
- UCHAR a_promiscuous_mode; /* */
- UCHAR a_unique_word[2]; /* */
- UCHAR a_slot_time; /* */
- UCHAR a_roaming_low_snr_thresh; /* */
- UCHAR a_low_snr_count_thresh; /* */
- UCHAR a_infra_missed_bcn_thresh; /* */
- UCHAR a_adhoc_missed_bcn_thresh; /* */
- UCHAR a_curr_country_code; /* C_USA */
- UCHAR a_hop_pattern; /* */
- UCHAR a_hop_pattern_length; /* */
-/* b4 - b5 differences start here */
- UCHAR a_cw_max; /* */
- UCHAR a_cw_min; /* */
- UCHAR a_noise_filter_gain; /* */
- UCHAR a_noise_limit_offset; /* */
- UCHAR a_det_rssi_thresh_offset; /* */
- UCHAR a_med_busy_thresh_offset; /* */
- UCHAR a_det_sync_thresh; /* */
- UCHAR a_test_mode; /* */
- UCHAR a_test_min_chan_num; /* */
- UCHAR a_test_max_chan_num; /* */
- UCHAR a_rx_tx_delay; /* */
- UCHAR a_current_bss_id[ADDRLEN]; /* */
- UCHAR a_hop_set; /* */
-};
-struct b5_startup_params {
- UCHAR a_network_type; /* C_ADHOC, C_INFRA */
- UCHAR a_acting_as_ap_status; /* C_TYPE_STA, C_TYPE_AP */
- UCHAR a_current_ess_id[ESSID_SIZE]; /* Null terminated unless 32 long */
- UCHAR a_scanning_mode; /* passive 0, active 1 */
- UCHAR a_power_mgt_state; /* CAM 0, */
- UCHAR a_mac_addr[ADDRLEN]; /* */
- UCHAR a_frag_threshold[2]; /* 512 */
- UCHAR a_hop_time[2]; /* 16k * 2**n, n=0-4 in Kus */
- UCHAR a_beacon_period[2]; /* n * a_hop_time in Kus */
- UCHAR a_dtim_period; /* in beacons */
- UCHAR a_retry_max; /* 4 */
- UCHAR a_ack_timeout; /* */
- UCHAR a_sifs; /* */
- UCHAR a_difs; /* */
- UCHAR a_pifs; /* */
- UCHAR a_rts_threshold[2]; /* */
- UCHAR a_scan_dwell_time[2]; /* */
- UCHAR a_max_scan_dwell_time[2]; /* */
- UCHAR a_assoc_resp_timeout_thresh; /* */
- UCHAR a_adhoc_scan_cycle_max; /* */
- UCHAR a_infra_scan_cycle_max; /* */
- UCHAR a_infra_super_scan_cycle_max; /* */
- UCHAR a_promiscuous_mode; /* */
- UCHAR a_unique_word[2]; /* */
- UCHAR a_slot_time; /* */
- UCHAR a_roaming_low_snr_thresh; /* */
- UCHAR a_low_snr_count_thresh; /* */
- UCHAR a_infra_missed_bcn_thresh; /* */
- UCHAR a_adhoc_missed_bcn_thresh; /* */
- UCHAR a_curr_country_code; /* C_USA */
- UCHAR a_hop_pattern; /* */
- UCHAR a_hop_pattern_length; /* */
-/* b4 - b5 differences start here */
- UCHAR a_cw_max[2]; /* */
- UCHAR a_cw_min[2]; /* */
- UCHAR a_noise_filter_gain; /* */
- UCHAR a_noise_limit_offset; /* */
- UCHAR a_det_rssi_thresh_offset; /* */
- UCHAR a_med_busy_thresh_offset; /* */
- UCHAR a_det_sync_thresh; /* */
- UCHAR a_test_mode; /* */
- UCHAR a_test_min_chan_num; /* */
- UCHAR a_test_max_chan_num; /* */
- UCHAR a_allow_bcast_SSID_probe_rsp;
- UCHAR a_privacy_must_start;
- UCHAR a_privacy_can_join;
- UCHAR a_basic_rate_set[8];
-};
-
-/*****************************************************************************/
-#define RAY_IOCG_PARMS (SIOCDEVPRIVATE)
-#define RAY_IOCS_PARMS (SIOCDEVPRIVATE + 1)
-#define RAY_DO_CMD (SIOCDEVPRIVATE + 2)
-
-/****** ethernet <-> 802.11 translation **************************************/
-typedef struct snaphdr_t
-{
- UCHAR dsap;
- UCHAR ssap;
- UCHAR ctrl;
- UCHAR org[3];
- UCHAR ethertype[2];
-} snaphdr_t;
-
-#define BRIDGE_ENCAP 0xf80000
-#define RFC1042_ENCAP 0
-#define SNAP_ID 0x0003aaaa
-#define RAY_IPX_TYPE 0x8137
-#define APPLEARP_TYPE 0x80f3
-/*****************************************************************************/
-#endif /* _RAYCTL_H_ */
diff --git a/drivers/net/wireless/legacy/rndis_wlan.c b/drivers/net/wireless/legacy/rndis_wlan.c
deleted file mode 100644
index e7fea7ded..000000000
--- a/drivers/net/wireless/legacy/rndis_wlan.c
+++ /dev/null
@@ -1,3760 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for RNDIS based USB wireless devices.
- *
- * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net>
- * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@iki.fi>
- *
- * Portions of this file are based on NDISwrapper project,
- * Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani
- * http://ndiswrapper.sourceforge.net/
- */
-
-// #define DEBUG // error path messages, extra info
-// #define VERBOSE // more; success messages
-
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/workqueue.h>
-#include <linux/mutex.h>
-#include <linux/mii.h>
-#include <linux/usb.h>
-#include <linux/usb/cdc.h>
-#include <linux/ieee80211.h>
-#include <linux/if_arp.h>
-#include <linux/ctype.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <net/cfg80211.h>
-#include <linux/usb/usbnet.h>
-#include <linux/usb/rndis_host.h>
-
-
-/* NOTE: All these are settings for Broadcom chipset */
-static char modparam_country[4] = "EU";
-module_param_string(country, modparam_country, 4, 0444);
-MODULE_PARM_DESC(country, "Country code (ISO 3166-1 alpha-2), default: EU");
-
-static int modparam_frameburst = 1;
-module_param_named(frameburst, modparam_frameburst, int, 0444);
-MODULE_PARM_DESC(frameburst, "enable frame bursting (default: on)");
-
-static int modparam_afterburner = 0;
-module_param_named(afterburner, modparam_afterburner, int, 0444);
-MODULE_PARM_DESC(afterburner,
- "enable afterburner aka '125 High Speed Mode' (default: off)");
-
-static int modparam_power_save = 0;
-module_param_named(power_save, modparam_power_save, int, 0444);
-MODULE_PARM_DESC(power_save,
- "set power save mode: 0=off, 1=on, 2=fast (default: off)");
-
-static int modparam_power_output = 3;
-module_param_named(power_output, modparam_power_output, int, 0444);
-MODULE_PARM_DESC(power_output,
- "set power output: 0=25%, 1=50%, 2=75%, 3=100% (default: 100%)");
-
-static int modparam_roamtrigger = -70;
-module_param_named(roamtrigger, modparam_roamtrigger, int, 0444);
-MODULE_PARM_DESC(roamtrigger,
- "set roaming dBm trigger: -80=optimize for distance, "
- "-60=bandwidth (default: -70)");
-
-static int modparam_roamdelta = 1;
-module_param_named(roamdelta, modparam_roamdelta, int, 0444);
-MODULE_PARM_DESC(roamdelta,
- "set roaming tendency: 0=aggressive, 1=moderate, "
- "2=conservative (default: moderate)");
-
-static int modparam_workaround_interval;
-module_param_named(workaround_interval, modparam_workaround_interval,
- int, 0444);
-MODULE_PARM_DESC(workaround_interval,
- "set stall workaround interval in msecs (0=disabled) (default: 0)");
-
-/* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */
-#define WL_NOISE -96 /* typical noise level in dBm */
-#define WL_SIGMAX -32 /* typical maximum signal level in dBm */
-
-
-/* Assume that Broadcom 4320 (only chipset at time of writing known to be
- * based on wireless rndis) has default txpower of 13dBm.
- * This value is from Linksys WUSB54GSC User Guide, Appendix F: Specifications.
- * 100% : 20 mW ~ 13dBm
- * 75% : 15 mW ~ 12dBm
- * 50% : 10 mW ~ 10dBm
- * 25% : 5 mW ~ 7dBm
- */
-#define BCM4320_DEFAULT_TXPOWER_DBM_100 13
-#define BCM4320_DEFAULT_TXPOWER_DBM_75 12
-#define BCM4320_DEFAULT_TXPOWER_DBM_50 10
-#define BCM4320_DEFAULT_TXPOWER_DBM_25 7
-
-/* Known device types */
-#define RNDIS_UNKNOWN 0
-#define RNDIS_BCM4320A 1
-#define RNDIS_BCM4320B 2
-
-
-/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c
- * slightly modified for datatype endianess, etc
- */
-#define NDIS_802_11_LENGTH_SSID 32
-#define NDIS_802_11_LENGTH_RATES 8
-#define NDIS_802_11_LENGTH_RATES_EX 16
-
-enum ndis_80211_net_type {
- NDIS_80211_TYPE_FREQ_HOP,
- NDIS_80211_TYPE_DIRECT_SEQ,
- NDIS_80211_TYPE_OFDM_A,
- NDIS_80211_TYPE_OFDM_G
-};
-
-enum ndis_80211_net_infra {
- NDIS_80211_INFRA_ADHOC,
- NDIS_80211_INFRA_INFRA,
- NDIS_80211_INFRA_AUTO_UNKNOWN
-};
-
-enum ndis_80211_auth_mode {
- NDIS_80211_AUTH_OPEN,
- NDIS_80211_AUTH_SHARED,
- NDIS_80211_AUTH_AUTO_SWITCH,
- NDIS_80211_AUTH_WPA,
- NDIS_80211_AUTH_WPA_PSK,
- NDIS_80211_AUTH_WPA_NONE,
- NDIS_80211_AUTH_WPA2,
- NDIS_80211_AUTH_WPA2_PSK
-};
-
-enum ndis_80211_encr_status {
- NDIS_80211_ENCR_WEP_ENABLED,
- NDIS_80211_ENCR_DISABLED,
- NDIS_80211_ENCR_WEP_KEY_ABSENT,
- NDIS_80211_ENCR_NOT_SUPPORTED,
- NDIS_80211_ENCR_TKIP_ENABLED,
- NDIS_80211_ENCR_TKIP_KEY_ABSENT,
- NDIS_80211_ENCR_CCMP_ENABLED,
- NDIS_80211_ENCR_CCMP_KEY_ABSENT
-};
-
-enum ndis_80211_priv_filter {
- NDIS_80211_PRIV_ACCEPT_ALL,
- NDIS_80211_PRIV_8021X_WEP
-};
-
-enum ndis_80211_status_type {
- NDIS_80211_STATUSTYPE_AUTHENTICATION,
- NDIS_80211_STATUSTYPE_MEDIASTREAMMODE,
- NDIS_80211_STATUSTYPE_PMKID_CANDIDATELIST,
- NDIS_80211_STATUSTYPE_RADIOSTATE,
-};
-
-enum ndis_80211_media_stream_mode {
- NDIS_80211_MEDIA_STREAM_OFF,
- NDIS_80211_MEDIA_STREAM_ON
-};
-
-enum ndis_80211_radio_status {
- NDIS_80211_RADIO_STATUS_ON,
- NDIS_80211_RADIO_STATUS_HARDWARE_OFF,
- NDIS_80211_RADIO_STATUS_SOFTWARE_OFF,
-};
-
-enum ndis_80211_addkey_bits {
- NDIS_80211_ADDKEY_8021X_AUTH = cpu_to_le32(1 << 28),
- NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ = cpu_to_le32(1 << 29),
- NDIS_80211_ADDKEY_PAIRWISE_KEY = cpu_to_le32(1 << 30),
- NDIS_80211_ADDKEY_TRANSMIT_KEY = cpu_to_le32(1 << 31)
-};
-
-enum ndis_80211_addwep_bits {
- NDIS_80211_ADDWEP_PERCLIENT_KEY = cpu_to_le32(1 << 30),
- NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31)
-};
-
-enum ndis_80211_power_mode {
- NDIS_80211_POWER_MODE_CAM,
- NDIS_80211_POWER_MODE_MAX_PSP,
- NDIS_80211_POWER_MODE_FAST_PSP,
-};
-
-enum ndis_80211_pmkid_cand_list_flag_bits {
- NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
-};
-
-struct ndis_80211_auth_request {
- __le32 length;
- u8 bssid[ETH_ALEN];
- u8 padding[2];
- __le32 flags;
-} __packed;
-
-struct ndis_80211_pmkid_candidate {
- u8 bssid[ETH_ALEN];
- u8 padding[2];
- __le32 flags;
-} __packed;
-
-struct ndis_80211_pmkid_cand_list {
- __le32 version;
- __le32 num_candidates;
- struct ndis_80211_pmkid_candidate candidate_list[];
-} __packed;
-
-struct ndis_80211_status_indication {
- __le32 status_type;
- union {
- __le32 media_stream_mode;
- __le32 radio_status;
- DECLARE_FLEX_ARRAY(struct ndis_80211_auth_request, auth_request);
- struct ndis_80211_pmkid_cand_list cand_list;
- } u;
-} __packed;
-
-struct ndis_80211_ssid {
- __le32 length;
- u8 essid[NDIS_802_11_LENGTH_SSID];
-} __packed;
-
-struct ndis_80211_conf_freq_hop {
- __le32 length;
- __le32 hop_pattern;
- __le32 hop_set;
- __le32 dwell_time;
-} __packed;
-
-struct ndis_80211_conf {
- __le32 length;
- __le32 beacon_period;
- __le32 atim_window;
- __le32 ds_config;
- struct ndis_80211_conf_freq_hop fh_config;
-} __packed;
-
-struct ndis_80211_bssid_ex {
- __le32 length;
- u8 mac[ETH_ALEN];
- u8 padding[2];
- struct ndis_80211_ssid ssid;
- __le32 privacy;
- __le32 rssi;
- __le32 net_type;
- struct ndis_80211_conf config;
- __le32 net_infra;
- u8 rates[NDIS_802_11_LENGTH_RATES_EX];
- __le32 ie_length;
- u8 ies[];
-} __packed;
-
-struct ndis_80211_bssid_list_ex {
- __le32 num_items;
- u8 bssid_data[];
-} __packed;
-
-struct ndis_80211_fixed_ies {
- u8 timestamp[8];
- __le16 beacon_interval;
- __le16 capabilities;
-} __packed;
-
-struct ndis_80211_wep_key {
- __le32 size;
- __le32 index;
- __le32 length;
- u8 material[32];
-} __packed;
-
-struct ndis_80211_key {
- __le32 size;
- __le32 index;
- __le32 length;
- u8 bssid[ETH_ALEN];
- u8 padding[6];
- u8 rsc[8];
- u8 material[32];
-} __packed;
-
-struct ndis_80211_remove_key {
- __le32 size;
- __le32 index;
- u8 bssid[ETH_ALEN];
- u8 padding[2];
-} __packed;
-
-struct ndis_config_param {
- __le32 name_offs;
- __le32 name_length;
- __le32 type;
- __le32 value_offs;
- __le32 value_length;
-} __packed;
-
-struct ndis_80211_assoc_info {
- __le32 length;
- __le16 req_ies;
- struct req_ie {
- __le16 capa;
- __le16 listen_interval;
- u8 cur_ap_address[ETH_ALEN];
- } req_ie;
- __le32 req_ie_length;
- __le32 offset_req_ies;
- __le16 resp_ies;
- struct resp_ie {
- __le16 capa;
- __le16 status_code;
- __le16 assoc_id;
- } resp_ie;
- __le32 resp_ie_length;
- __le32 offset_resp_ies;
-} __packed;
-
-struct ndis_80211_capability {
- __le32 length;
- __le32 version;
- __le32 num_pmkids;
- __le32 num_auth_encr_pair;
-} __packed;
-
-struct ndis_80211_bssid_info {
- u8 bssid[ETH_ALEN];
- u8 pmkid[16];
-} __packed;
-
-struct ndis_80211_pmkid {
- __le32 length;
- __le32 bssid_info_count;
- struct ndis_80211_bssid_info bssid_info[];
-} __packed;
-
-/*
- * private data
- */
-#define CAP_MODE_80211A 1
-#define CAP_MODE_80211B 2
-#define CAP_MODE_80211G 4
-#define CAP_MODE_MASK 7
-
-#define WORK_LINK_UP 0
-#define WORK_LINK_DOWN 1
-#define WORK_SET_MULTICAST_LIST 2
-
-#define RNDIS_WLAN_ALG_NONE 0
-#define RNDIS_WLAN_ALG_WEP (1<<0)
-#define RNDIS_WLAN_ALG_TKIP (1<<1)
-#define RNDIS_WLAN_ALG_CCMP (1<<2)
-
-#define RNDIS_WLAN_NUM_KEYS 4
-#define RNDIS_WLAN_KEY_MGMT_NONE 0
-#define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0)
-#define RNDIS_WLAN_KEY_MGMT_PSK (1<<1)
-
-#define COMMAND_BUFFER_SIZE (CONTROL_BUFFER_SIZE + sizeof(struct rndis_set))
-
-static const struct ieee80211_channel rndis_channels[] = {
- { .center_freq = 2412 },
- { .center_freq = 2417 },
- { .center_freq = 2422 },
- { .center_freq = 2427 },
- { .center_freq = 2432 },
- { .center_freq = 2437 },
- { .center_freq = 2442 },
- { .center_freq = 2447 },
- { .center_freq = 2452 },
- { .center_freq = 2457 },
- { .center_freq = 2462 },
- { .center_freq = 2467 },
- { .center_freq = 2472 },
- { .center_freq = 2484 },
-};
-
-static const struct ieee80211_rate rndis_rates[] = {
- { .bitrate = 10 },
- { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
- { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
- { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
- { .bitrate = 60 },
- { .bitrate = 90 },
- { .bitrate = 120 },
- { .bitrate = 180 },
- { .bitrate = 240 },
- { .bitrate = 360 },
- { .bitrate = 480 },
- { .bitrate = 540 }
-};
-
-static const u32 rndis_cipher_suites[] = {
- WLAN_CIPHER_SUITE_WEP40,
- WLAN_CIPHER_SUITE_WEP104,
- WLAN_CIPHER_SUITE_TKIP,
- WLAN_CIPHER_SUITE_CCMP,
-};
-
-struct rndis_wlan_encr_key {
- int len;
- u32 cipher;
- u8 material[32];
- u8 bssid[ETH_ALEN];
- bool pairwise;
- bool tx_key;
-};
-
-/* RNDIS device private data */
-struct rndis_wlan_private {
- struct usbnet *usbdev;
-
- struct wireless_dev wdev;
-
- struct cfg80211_scan_request *scan_request;
-
- struct workqueue_struct *workqueue;
- struct delayed_work dev_poller_work;
- struct delayed_work scan_work;
- struct work_struct work;
- struct mutex command_lock;
- unsigned long work_pending;
- int last_qual;
- s32 cqm_rssi_thold;
- u32 cqm_rssi_hyst;
- int last_cqm_event_rssi;
-
- struct ieee80211_supported_band band;
- struct ieee80211_channel channels[ARRAY_SIZE(rndis_channels)];
- struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)];
- u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)];
-
- int device_type;
- int caps;
- int multicast_size;
-
- /* module parameters */
- char param_country[4];
- int param_frameburst;
- int param_afterburner;
- int param_power_save;
- int param_power_output;
- int param_roamtrigger;
- int param_roamdelta;
- u32 param_workaround_interval;
-
- /* hardware state */
- bool radio_on;
- int power_mode;
- int infra_mode;
- bool connected;
- u8 bssid[ETH_ALEN];
- u32 current_command_oid;
-
- /* encryption stuff */
- u8 encr_tx_key_index;
- struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS];
- int wpa_version;
-
- u8 command_buffer[COMMAND_BUFFER_SIZE];
-};
-
-/*
- * cfg80211 ops
- */
-static int rndis_change_virtual_intf(struct wiphy *wiphy,
- struct net_device *dev,
- enum nl80211_iftype type,
- struct vif_params *params);
-
-static int rndis_scan(struct wiphy *wiphy,
- struct cfg80211_scan_request *request);
-
-static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed);
-
-static int rndis_set_tx_power(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type,
- int mbm);
-static int rndis_get_tx_power(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- int *dbm);
-
-static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme);
-
-static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code);
-
-static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params);
-
-static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
-
-static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
- int link_id, u8 key_index, bool pairwise,
- const u8 *mac_addr, struct key_params *params);
-
-static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
- int link_id, u8 key_index, bool pairwise,
- const u8 *mac_addr);
-
-static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- int link_id, u8 key_index, bool unicast,
- bool multicast);
-
-static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac, struct station_info *sinfo);
-
-static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
- int idx, u8 *mac, struct station_info *sinfo);
-
-static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa);
-
-static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa);
-
-static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev);
-
-static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
- bool enabled, int timeout);
-
-static int rndis_set_cqm_rssi_config(struct wiphy *wiphy,
- struct net_device *dev,
- s32 rssi_thold, u32 rssi_hyst);
-
-static const struct cfg80211_ops rndis_config_ops = {
- .change_virtual_intf = rndis_change_virtual_intf,
- .scan = rndis_scan,
- .set_wiphy_params = rndis_set_wiphy_params,
- .set_tx_power = rndis_set_tx_power,
- .get_tx_power = rndis_get_tx_power,
- .connect = rndis_connect,
- .disconnect = rndis_disconnect,
- .join_ibss = rndis_join_ibss,
- .leave_ibss = rndis_leave_ibss,
- .add_key = rndis_add_key,
- .del_key = rndis_del_key,
- .set_default_key = rndis_set_default_key,
- .get_station = rndis_get_station,
- .dump_station = rndis_dump_station,
- .set_pmksa = rndis_set_pmksa,
- .del_pmksa = rndis_del_pmksa,
- .flush_pmksa = rndis_flush_pmksa,
- .set_power_mgmt = rndis_set_power_mgmt,
- .set_cqm_rssi_config = rndis_set_cqm_rssi_config,
-};
-
-static void *rndis_wiphy_privid = &rndis_wiphy_privid;
-
-
-static struct rndis_wlan_private *get_rndis_wlan_priv(struct usbnet *dev)
-{
- return (struct rndis_wlan_private *)dev->driver_priv;
-}
-
-static u32 get_bcm4320_power_dbm(struct rndis_wlan_private *priv)
-{
- switch (priv->param_power_output) {
- default:
- case 3:
- return BCM4320_DEFAULT_TXPOWER_DBM_100;
- case 2:
- return BCM4320_DEFAULT_TXPOWER_DBM_75;
- case 1:
- return BCM4320_DEFAULT_TXPOWER_DBM_50;
- case 0:
- return BCM4320_DEFAULT_TXPOWER_DBM_25;
- }
-}
-
-static bool is_wpa_key(struct rndis_wlan_private *priv, u8 idx)
-{
- int cipher = priv->encr_keys[idx].cipher;
-
- return (cipher == WLAN_CIPHER_SUITE_CCMP ||
- cipher == WLAN_CIPHER_SUITE_TKIP);
-}
-
-static int rndis_cipher_to_alg(u32 cipher)
-{
- switch (cipher) {
- default:
- return RNDIS_WLAN_ALG_NONE;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- return RNDIS_WLAN_ALG_WEP;
- case WLAN_CIPHER_SUITE_TKIP:
- return RNDIS_WLAN_ALG_TKIP;
- case WLAN_CIPHER_SUITE_CCMP:
- return RNDIS_WLAN_ALG_CCMP;
- }
-}
-
-static int rndis_akm_suite_to_key_mgmt(u32 akm_suite)
-{
- switch (akm_suite) {
- default:
- return RNDIS_WLAN_KEY_MGMT_NONE;
- case WLAN_AKM_SUITE_8021X:
- return RNDIS_WLAN_KEY_MGMT_802_1X;
- case WLAN_AKM_SUITE_PSK:
- return RNDIS_WLAN_KEY_MGMT_PSK;
- }
-}
-
-#ifdef DEBUG
-static const char *oid_to_string(u32 oid)
-{
- switch (oid) {
-#define OID_STR(oid) case oid: return(#oid)
- /* from rndis_host.h */
- OID_STR(RNDIS_OID_802_3_PERMANENT_ADDRESS);
- OID_STR(RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE);
- OID_STR(RNDIS_OID_GEN_CURRENT_PACKET_FILTER);
- OID_STR(RNDIS_OID_GEN_PHYSICAL_MEDIUM);
-
- /* from rndis_wlan.c */
- OID_STR(RNDIS_OID_GEN_LINK_SPEED);
- OID_STR(RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER);
-
- OID_STR(RNDIS_OID_GEN_XMIT_OK);
- OID_STR(RNDIS_OID_GEN_RCV_OK);
- OID_STR(RNDIS_OID_GEN_XMIT_ERROR);
- OID_STR(RNDIS_OID_GEN_RCV_ERROR);
- OID_STR(RNDIS_OID_GEN_RCV_NO_BUFFER);
-
- OID_STR(RNDIS_OID_802_3_CURRENT_ADDRESS);
- OID_STR(RNDIS_OID_802_3_MULTICAST_LIST);
- OID_STR(RNDIS_OID_802_3_MAXIMUM_LIST_SIZE);
-
- OID_STR(RNDIS_OID_802_11_BSSID);
- OID_STR(RNDIS_OID_802_11_SSID);
- OID_STR(RNDIS_OID_802_11_INFRASTRUCTURE_MODE);
- OID_STR(RNDIS_OID_802_11_ADD_WEP);
- OID_STR(RNDIS_OID_802_11_REMOVE_WEP);
- OID_STR(RNDIS_OID_802_11_DISASSOCIATE);
- OID_STR(RNDIS_OID_802_11_AUTHENTICATION_MODE);
- OID_STR(RNDIS_OID_802_11_PRIVACY_FILTER);
- OID_STR(RNDIS_OID_802_11_BSSID_LIST_SCAN);
- OID_STR(RNDIS_OID_802_11_ENCRYPTION_STATUS);
- OID_STR(RNDIS_OID_802_11_ADD_KEY);
- OID_STR(RNDIS_OID_802_11_REMOVE_KEY);
- OID_STR(RNDIS_OID_802_11_ASSOCIATION_INFORMATION);
- OID_STR(RNDIS_OID_802_11_CAPABILITY);
- OID_STR(RNDIS_OID_802_11_PMKID);
- OID_STR(RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED);
- OID_STR(RNDIS_OID_802_11_NETWORK_TYPE_IN_USE);
- OID_STR(RNDIS_OID_802_11_TX_POWER_LEVEL);
- OID_STR(RNDIS_OID_802_11_RSSI);
- OID_STR(RNDIS_OID_802_11_RSSI_TRIGGER);
- OID_STR(RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD);
- OID_STR(RNDIS_OID_802_11_RTS_THRESHOLD);
- OID_STR(RNDIS_OID_802_11_SUPPORTED_RATES);
- OID_STR(RNDIS_OID_802_11_CONFIGURATION);
- OID_STR(RNDIS_OID_802_11_POWER_MODE);
- OID_STR(RNDIS_OID_802_11_BSSID_LIST);
-#undef OID_STR
- }
-
- return "?";
-}
-#else
-static const char *oid_to_string(u32 oid)
-{
- return "?";
-}
-#endif
-
-/* translate error code */
-static int rndis_error_status(__le32 rndis_status)
-{
- int ret = -EINVAL;
- switch (le32_to_cpu(rndis_status)) {
- case RNDIS_STATUS_SUCCESS:
- ret = 0;
- break;
- case RNDIS_STATUS_FAILURE:
- case RNDIS_STATUS_INVALID_DATA:
- ret = -EINVAL;
- break;
- case RNDIS_STATUS_NOT_SUPPORTED:
- ret = -EOPNOTSUPP;
- break;
- case RNDIS_STATUS_ADAPTER_NOT_READY:
- case RNDIS_STATUS_ADAPTER_NOT_OPEN:
- ret = -EBUSY;
- break;
- }
- return ret;
-}
-
-static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
- union {
- void *buf;
- struct rndis_msg_hdr *header;
- struct rndis_query *get;
- struct rndis_query_c *get_c;
- } u;
- int ret;
- size_t buflen, resplen, respoffs, copylen;
-
- buflen = *len + sizeof(*u.get);
- if (buflen < CONTROL_BUFFER_SIZE)
- buflen = CONTROL_BUFFER_SIZE;
-
- if (buflen > COMMAND_BUFFER_SIZE) {
- u.buf = kmalloc(buflen, GFP_KERNEL);
- if (!u.buf)
- return -ENOMEM;
- } else {
- u.buf = priv->command_buffer;
- }
-
- mutex_lock(&priv->command_lock);
-
- memset(u.get, 0, sizeof *u.get);
- u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY);
- u.get->msg_len = cpu_to_le32(sizeof *u.get);
- u.get->oid = cpu_to_le32(oid);
-
- priv->current_command_oid = oid;
- ret = rndis_command(dev, u.header, buflen);
- priv->current_command_oid = 0;
- if (ret < 0)
- netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n",
- __func__, oid_to_string(oid), ret,
- le32_to_cpu(u.get_c->status));
-
- if (ret == 0) {
- resplen = le32_to_cpu(u.get_c->len);
- respoffs = le32_to_cpu(u.get_c->offset) + 8;
-
- if (respoffs > buflen) {
- /* Device returned data offset outside buffer, error. */
- netdev_dbg(dev->net,
- "%s(%s): received invalid data offset: %zu > %zu\n",
- __func__, oid_to_string(oid), respoffs, buflen);
-
- ret = -EINVAL;
- goto exit_unlock;
- }
-
- copylen = min(resplen, buflen - respoffs);
-
- if (copylen > *len)
- copylen = *len;
-
- memcpy(data, u.buf + respoffs, copylen);
-
- *len = resplen;
-
- ret = rndis_error_status(u.get_c->status);
- if (ret < 0)
- netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n",
- __func__, oid_to_string(oid),
- le32_to_cpu(u.get_c->status), ret);
- }
-
-exit_unlock:
- mutex_unlock(&priv->command_lock);
-
- if (u.buf != priv->command_buffer)
- kfree(u.buf);
- return ret;
-}
-
-static int rndis_set_oid(struct usbnet *dev, u32 oid, const void *data,
- int len)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
- union {
- void *buf;
- struct rndis_msg_hdr *header;
- struct rndis_set *set;
- struct rndis_set_c *set_c;
- } u;
- int ret, buflen;
-
- buflen = len + sizeof(*u.set);
- if (buflen < CONTROL_BUFFER_SIZE)
- buflen = CONTROL_BUFFER_SIZE;
-
- if (buflen > COMMAND_BUFFER_SIZE) {
- u.buf = kmalloc(buflen, GFP_KERNEL);
- if (!u.buf)
- return -ENOMEM;
- } else {
- u.buf = priv->command_buffer;
- }
-
- mutex_lock(&priv->command_lock);
-
- memset(u.set, 0, sizeof *u.set);
- u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET);
- u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len);
- u.set->oid = cpu_to_le32(oid);
- u.set->len = cpu_to_le32(len);
- u.set->offset = cpu_to_le32(sizeof(*u.set) - 8);
- u.set->handle = cpu_to_le32(0);
- memcpy(u.buf + sizeof(*u.set), data, len);
-
- priv->current_command_oid = oid;
- ret = rndis_command(dev, u.header, buflen);
- priv->current_command_oid = 0;
- if (ret < 0)
- netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n",
- __func__, oid_to_string(oid), ret,
- le32_to_cpu(u.set_c->status));
-
- if (ret == 0) {
- ret = rndis_error_status(u.set_c->status);
-
- if (ret < 0)
- netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n",
- __func__, oid_to_string(oid),
- le32_to_cpu(u.set_c->status), ret);
- }
-
- mutex_unlock(&priv->command_lock);
-
- if (u.buf != priv->command_buffer)
- kfree(u.buf);
- return ret;
-}
-
-static int rndis_reset(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct rndis_reset *reset;
- int ret;
-
- mutex_lock(&priv->command_lock);
-
- reset = (void *)priv->command_buffer;
- memset(reset, 0, sizeof(*reset));
- reset->msg_type = cpu_to_le32(RNDIS_MSG_RESET);
- reset->msg_len = cpu_to_le32(sizeof(*reset));
- priv->current_command_oid = 0;
- ret = rndis_command(usbdev, (void *)reset, CONTROL_BUFFER_SIZE);
-
- mutex_unlock(&priv->command_lock);
-
- if (ret < 0)
- return ret;
- return 0;
-}
-
-/*
- * Specs say that we can only set config parameters only soon after device
- * initialization.
- * value_type: 0 = u32, 2 = unicode string
- */
-static int rndis_set_config_parameter(struct usbnet *dev, char *param,
- int value_type, void *value)
-{
- struct ndis_config_param *infobuf;
- int value_len, info_len, param_len, ret, i;
- __le16 *unibuf;
- __le32 *dst_value;
-
- if (value_type == 0)
- value_len = sizeof(__le32);
- else if (value_type == 2)
- value_len = strlen(value) * sizeof(__le16);
- else
- return -EINVAL;
-
- param_len = strlen(param) * sizeof(__le16);
- info_len = sizeof(*infobuf) + param_len + value_len;
-
-#ifdef DEBUG
- info_len += 12;
-#endif
- infobuf = kmalloc(info_len, GFP_KERNEL);
- if (!infobuf)
- return -ENOMEM;
-
-#ifdef DEBUG
- info_len -= 12;
- /* extra 12 bytes are for padding (debug output) */
- memset(infobuf, 0xCC, info_len + 12);
-#endif
-
- if (value_type == 2)
- netdev_dbg(dev->net, "setting config parameter: %s, value: %s\n",
- param, (u8 *)value);
- else
- netdev_dbg(dev->net, "setting config parameter: %s, value: %d\n",
- param, *(u32 *)value);
-
- infobuf->name_offs = cpu_to_le32(sizeof(*infobuf));
- infobuf->name_length = cpu_to_le32(param_len);
- infobuf->type = cpu_to_le32(value_type);
- infobuf->value_offs = cpu_to_le32(sizeof(*infobuf) + param_len);
- infobuf->value_length = cpu_to_le32(value_len);
-
- /* simple string to unicode string conversion */
- unibuf = (void *)infobuf + sizeof(*infobuf);
- for (i = 0; i < param_len / sizeof(__le16); i++)
- unibuf[i] = cpu_to_le16(param[i]);
-
- if (value_type == 2) {
- unibuf = (void *)infobuf + sizeof(*infobuf) + param_len;
- for (i = 0; i < value_len / sizeof(__le16); i++)
- unibuf[i] = cpu_to_le16(((u8 *)value)[i]);
- } else {
- dst_value = (void *)infobuf + sizeof(*infobuf) + param_len;
- *dst_value = cpu_to_le32(*(u32 *)value);
- }
-
-#ifdef DEBUG
- netdev_dbg(dev->net, "info buffer (len: %d)\n", info_len);
- for (i = 0; i < info_len; i += 12) {
- u32 *tmp = (u32 *)((u8 *)infobuf + i);
- netdev_dbg(dev->net, "%08X:%08X:%08X\n",
- cpu_to_be32(tmp[0]),
- cpu_to_be32(tmp[1]),
- cpu_to_be32(tmp[2]));
- }
-#endif
-
- ret = rndis_set_oid(dev, RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER,
- infobuf, info_len);
- if (ret != 0)
- netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n",
- ret);
-
- kfree(infobuf);
- return ret;
-}
-
-static int rndis_set_config_parameter_str(struct usbnet *dev,
- char *param, char *value)
-{
- return rndis_set_config_parameter(dev, param, 2, value);
-}
-
-/*
- * data conversion functions
- */
-static int level_to_qual(int level)
-{
- int qual = 100 * (level - WL_NOISE) / (WL_SIGMAX - WL_NOISE);
- return qual >= 0 ? (qual <= 100 ? qual : 100) : 0;
-}
-
-/*
- * common functions
- */
-static int set_infra_mode(struct usbnet *usbdev, int mode);
-static void restore_keys(struct usbnet *usbdev);
-static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
- bool *matched);
-
-static int rndis_start_bssid_list_scan(struct usbnet *usbdev)
-{
- __le32 tmp;
-
- /* Note: RNDIS_OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */
- tmp = cpu_to_le32(1);
- return rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST_SCAN, &tmp,
- sizeof(tmp));
-}
-
-static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- int ret;
-
- ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_SSID,
- ssid, sizeof(*ssid));
- if (ret < 0) {
- netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret);
- return ret;
- }
- if (ret == 0) {
- priv->radio_on = true;
- netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__);
- }
-
- return ret;
-}
-
-static int set_bssid(struct usbnet *usbdev, const u8 *bssid)
-{
- int ret;
-
- ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID,
- bssid, ETH_ALEN);
- if (ret < 0) {
- netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n",
- bssid, ret);
- return ret;
- }
-
- return ret;
-}
-
-static int clear_bssid(struct usbnet *usbdev)
-{
- static const u8 broadcast_mac[ETH_ALEN] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
-
- return set_bssid(usbdev, broadcast_mac);
-}
-
-static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
-{
- int ret, len;
-
- len = ETH_ALEN;
- ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID,
- bssid, &len);
-
- if (ret != 0)
- eth_zero_addr(bssid);
-
- return ret;
-}
-
-static int get_association_info(struct usbnet *usbdev,
- struct ndis_80211_assoc_info *info, int len)
-{
- return rndis_query_oid(usbdev,
- RNDIS_OID_802_11_ASSOCIATION_INFORMATION,
- info, &len);
-}
-
-static bool is_associated(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- u8 bssid[ETH_ALEN];
-
- if (!priv->radio_on)
- return false;
-
- return (get_bssid(usbdev, bssid) == 0 && !is_zero_ether_addr(bssid));
-}
-
-static int disassociate(struct usbnet *usbdev, bool reset_ssid)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ndis_80211_ssid ssid;
- int i, ret = 0;
-
- if (priv->radio_on) {
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_DISASSOCIATE,
- NULL, 0);
- if (ret == 0) {
- priv->radio_on = false;
- netdev_dbg(usbdev->net, "%s(): radio_on = false\n",
- __func__);
-
- if (reset_ssid)
- msleep(100);
- }
- }
-
- /* disassociate causes radio to be turned off; if reset_ssid
- * is given, set random ssid to enable radio */
- if (reset_ssid) {
- /* Set device to infrastructure mode so we don't get ad-hoc
- * 'media connect' indications with the random ssid.
- */
- set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA);
-
- ssid.length = cpu_to_le32(sizeof(ssid.essid));
- get_random_bytes(&ssid.essid[2], sizeof(ssid.essid)-2);
- ssid.essid[0] = 0x1;
- ssid.essid[1] = 0xff;
- for (i = 2; i < sizeof(ssid.essid); i++)
- ssid.essid[i] = 0x1 + (ssid.essid[i] * 0xfe / 0xff);
- ret = set_essid(usbdev, &ssid);
- }
- return ret;
-}
-
-static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
- enum nl80211_auth_type auth_type, int keymgmt)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- __le32 tmp;
- int auth_mode, ret;
-
- netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x authalg=0x%x keymgmt=0x%x\n",
- __func__, wpa_version, auth_type, keymgmt);
-
- if (wpa_version & NL80211_WPA_VERSION_2) {
- if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X)
- auth_mode = NDIS_80211_AUTH_WPA2;
- else
- auth_mode = NDIS_80211_AUTH_WPA2_PSK;
- } else if (wpa_version & NL80211_WPA_VERSION_1) {
- if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X)
- auth_mode = NDIS_80211_AUTH_WPA;
- else if (keymgmt & RNDIS_WLAN_KEY_MGMT_PSK)
- auth_mode = NDIS_80211_AUTH_WPA_PSK;
- else
- auth_mode = NDIS_80211_AUTH_WPA_NONE;
- } else if (auth_type == NL80211_AUTHTYPE_SHARED_KEY)
- auth_mode = NDIS_80211_AUTH_SHARED;
- else if (auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
- auth_mode = NDIS_80211_AUTH_OPEN;
- else if (auth_type == NL80211_AUTHTYPE_AUTOMATIC)
- auth_mode = NDIS_80211_AUTH_AUTO_SWITCH;
- else
- return -ENOTSUPP;
-
- tmp = cpu_to_le32(auth_mode);
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_AUTHENTICATION_MODE,
- &tmp, sizeof(tmp));
- if (ret != 0) {
- netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n",
- ret);
- return ret;
- }
-
- priv->wpa_version = wpa_version;
-
- return 0;
-}
-
-static int set_priv_filter(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- __le32 tmp;
-
- netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x\n",
- __func__, priv->wpa_version);
-
- if (priv->wpa_version & NL80211_WPA_VERSION_2 ||
- priv->wpa_version & NL80211_WPA_VERSION_1)
- tmp = cpu_to_le32(NDIS_80211_PRIV_8021X_WEP);
- else
- tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL);
-
- return rndis_set_oid(usbdev,
- RNDIS_OID_802_11_PRIVACY_FILTER, &tmp,
- sizeof(tmp));
-}
-
-static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
-{
- __le32 tmp;
- int encr_mode, ret;
-
- netdev_dbg(usbdev->net, "%s(): cipher_pair=0x%x cipher_group=0x%x\n",
- __func__, pairwise, groupwise);
-
- if (pairwise & RNDIS_WLAN_ALG_CCMP)
- encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
- else if (pairwise & RNDIS_WLAN_ALG_TKIP)
- encr_mode = NDIS_80211_ENCR_TKIP_ENABLED;
- else if (pairwise & RNDIS_WLAN_ALG_WEP)
- encr_mode = NDIS_80211_ENCR_WEP_ENABLED;
- else if (groupwise & RNDIS_WLAN_ALG_CCMP)
- encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
- else if (groupwise & RNDIS_WLAN_ALG_TKIP)
- encr_mode = NDIS_80211_ENCR_TKIP_ENABLED;
- else
- encr_mode = NDIS_80211_ENCR_DISABLED;
-
- tmp = cpu_to_le32(encr_mode);
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_ENCRYPTION_STATUS, &tmp,
- sizeof(tmp));
- if (ret != 0) {
- netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-static int set_infra_mode(struct usbnet *usbdev, int mode)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- __le32 tmp;
- int ret;
-
- netdev_dbg(usbdev->net, "%s(): infra_mode=0x%x\n",
- __func__, priv->infra_mode);
-
- tmp = cpu_to_le32(mode);
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_INFRASTRUCTURE_MODE,
- &tmp, sizeof(tmp));
- if (ret != 0) {
- netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n",
- ret);
- return ret;
- }
-
- /* NDIS drivers clear keys when infrastructure mode is
- * changed. But Linux tools assume otherwise. So set the
- * keys */
- restore_keys(usbdev);
-
- priv->infra_mode = mode;
- return 0;
-}
-
-static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
-{
- __le32 tmp;
-
- netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
-
- if (rts_threshold == -1 || rts_threshold > 2347)
- rts_threshold = 2347;
-
- tmp = cpu_to_le32(rts_threshold);
- return rndis_set_oid(usbdev,
- RNDIS_OID_802_11_RTS_THRESHOLD,
- &tmp, sizeof(tmp));
-}
-
-static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold)
-{
- __le32 tmp;
-
- netdev_dbg(usbdev->net, "%s(): %i\n", __func__, frag_threshold);
-
- if (frag_threshold < 256 || frag_threshold > 2346)
- frag_threshold = 2346;
-
- tmp = cpu_to_le32(frag_threshold);
- return rndis_set_oid(usbdev,
- RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD,
- &tmp, sizeof(tmp));
-}
-
-static void set_default_iw_params(struct usbnet *usbdev)
-{
- set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA);
- set_auth_mode(usbdev, 0, NL80211_AUTHTYPE_OPEN_SYSTEM,
- RNDIS_WLAN_KEY_MGMT_NONE);
- set_priv_filter(usbdev);
- set_encr_mode(usbdev, RNDIS_WLAN_ALG_NONE, RNDIS_WLAN_ALG_NONE);
-}
-
-static int deauthenticate(struct usbnet *usbdev)
-{
- int ret;
-
- ret = disassociate(usbdev, true);
- set_default_iw_params(usbdev);
- return ret;
-}
-
-static int set_channel(struct usbnet *usbdev, int channel)
-{
- struct ndis_80211_conf config;
- unsigned int dsconfig;
- int len, ret;
-
- netdev_dbg(usbdev->net, "%s(%d)\n", __func__, channel);
-
- /* this OID is valid only when not associated */
- if (is_associated(usbdev))
- return 0;
-
- dsconfig = 1000 *
- ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
-
- len = sizeof(config);
- ret = rndis_query_oid(usbdev,
- RNDIS_OID_802_11_CONFIGURATION,
- &config, &len);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "%s(): querying configuration failed\n",
- __func__);
- return ret;
- }
-
- config.ds_config = cpu_to_le32(dsconfig);
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_CONFIGURATION,
- &config, sizeof(config));
-
- netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret);
-
- return ret;
-}
-
-static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
- u32 *beacon_period)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ieee80211_channel *channel;
- struct ndis_80211_conf config;
- int len, ret;
-
- /* Get channel and beacon interval */
- len = sizeof(config);
- ret = rndis_query_oid(usbdev,
- RNDIS_OID_802_11_CONFIGURATION,
- &config, &len);
- netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_CONFIGURATION -> %d\n",
- __func__, ret);
- if (ret < 0)
- return NULL;
-
- channel = ieee80211_get_channel(priv->wdev.wiphy,
- KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
- if (!channel)
- return NULL;
-
- if (beacon_period)
- *beacon_period = le32_to_cpu(config.beacon_period);
- return channel;
-}
-
-/* index must be 0 - N, as per NDIS */
-static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
- u8 index)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ndis_80211_wep_key ndis_key;
- u32 cipher;
- int ret;
-
- netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n",
- __func__, index, key_len);
-
- if (index >= RNDIS_WLAN_NUM_KEYS)
- return -EINVAL;
-
- if (key_len == 5)
- cipher = WLAN_CIPHER_SUITE_WEP40;
- else if (key_len == 13)
- cipher = WLAN_CIPHER_SUITE_WEP104;
- else
- return -EINVAL;
-
- memset(&ndis_key, 0, sizeof(ndis_key));
-
- ndis_key.size = cpu_to_le32(sizeof(ndis_key));
- ndis_key.length = cpu_to_le32(key_len);
- ndis_key.index = cpu_to_le32(index);
- memcpy(&ndis_key.material, key, key_len);
-
- if (index == priv->encr_tx_key_index) {
- ndis_key.index |= NDIS_80211_ADDWEP_TRANSMIT_KEY;
- ret = set_encr_mode(usbdev, RNDIS_WLAN_ALG_WEP,
- RNDIS_WLAN_ALG_NONE);
- if (ret)
- netdev_warn(usbdev->net, "encryption couldn't be enabled (%08X)\n",
- ret);
- }
-
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_ADD_WEP, &ndis_key,
- sizeof(ndis_key));
- if (ret != 0) {
- netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n",
- index + 1, ret);
- return ret;
- }
-
- priv->encr_keys[index].len = key_len;
- priv->encr_keys[index].cipher = cipher;
- memcpy(&priv->encr_keys[index].material, key, key_len);
- eth_broadcast_addr(priv->encr_keys[index].bssid);
-
- return 0;
-}
-
-static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
- u8 index, const u8 *addr, const u8 *rx_seq,
- int seq_len, u32 cipher, __le32 flags)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ndis_80211_key ndis_key;
- bool is_addr_ok;
- int ret;
-
- if (index >= RNDIS_WLAN_NUM_KEYS) {
- netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n",
- __func__, index);
- return -EINVAL;
- }
- if (key_len > sizeof(ndis_key.material) || key_len < 0) {
- netdev_dbg(usbdev->net, "%s(): key length out of range (%i)\n",
- __func__, key_len);
- return -EINVAL;
- }
- if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) {
- if (!rx_seq || seq_len <= 0) {
- netdev_dbg(usbdev->net, "%s(): recv seq flag without buffer\n",
- __func__);
- return -EINVAL;
- }
- if (rx_seq && seq_len > sizeof(ndis_key.rsc)) {
- netdev_dbg(usbdev->net, "%s(): too big recv seq buffer\n", __func__);
- return -EINVAL;
- }
- }
-
- is_addr_ok = addr && !is_zero_ether_addr(addr) &&
- !is_broadcast_ether_addr(addr);
- if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !is_addr_ok) {
- netdev_dbg(usbdev->net, "%s(): pairwise but bssid invalid (%pM)\n",
- __func__, addr);
- return -EINVAL;
- }
-
- netdev_dbg(usbdev->net, "%s(%i): flags:%i%i%i\n",
- __func__, index,
- !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY),
- !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY),
- !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ));
-
- memset(&ndis_key, 0, sizeof(ndis_key));
-
- ndis_key.size = cpu_to_le32(sizeof(ndis_key) -
- sizeof(ndis_key.material) + key_len);
- ndis_key.length = cpu_to_le32(key_len);
- ndis_key.index = cpu_to_le32(index) | flags;
-
- if (cipher == WLAN_CIPHER_SUITE_TKIP && key_len == 32) {
- /* wpa_supplicant gives us the Michael MIC RX/TX keys in
- * different order than NDIS spec, so swap the order here. */
- memcpy(ndis_key.material, key, 16);
- memcpy(ndis_key.material + 16, key + 24, 8);
- memcpy(ndis_key.material + 24, key + 16, 8);
- } else
- memcpy(ndis_key.material, key, key_len);
-
- if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ)
- memcpy(ndis_key.rsc, rx_seq, seq_len);
-
- if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) {
- /* pairwise key */
- memcpy(ndis_key.bssid, addr, ETH_ALEN);
- } else {
- /* group key */
- if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
- eth_broadcast_addr(ndis_key.bssid);
- else
- get_bssid(usbdev, ndis_key.bssid);
- }
-
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_ADD_KEY, &ndis_key,
- le32_to_cpu(ndis_key.size));
- netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_ADD_KEY -> %08X\n",
- __func__, ret);
- if (ret != 0)
- return ret;
-
- memset(&priv->encr_keys[index], 0, sizeof(priv->encr_keys[index]));
- priv->encr_keys[index].len = key_len;
- priv->encr_keys[index].cipher = cipher;
- memcpy(&priv->encr_keys[index].material, key, key_len);
- if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY)
- memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN);
- else
- eth_broadcast_addr(priv->encr_keys[index].bssid);
-
- if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY)
- priv->encr_tx_key_index = index;
-
- return 0;
-}
-
-static int restore_key(struct usbnet *usbdev, u8 key_idx)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct rndis_wlan_encr_key key;
-
- if (is_wpa_key(priv, key_idx))
- return 0;
-
- key = priv->encr_keys[key_idx];
-
- netdev_dbg(usbdev->net, "%s(): %i:%i\n", __func__, key_idx, key.len);
-
- if (key.len == 0)
- return 0;
-
- return add_wep_key(usbdev, key.material, key.len, key_idx);
-}
-
-static void restore_keys(struct usbnet *usbdev)
-{
- int i;
-
- for (i = 0; i < 4; i++)
- restore_key(usbdev, i);
-}
-
-static void clear_key(struct rndis_wlan_private *priv, u8 idx)
-{
- memset(&priv->encr_keys[idx], 0, sizeof(priv->encr_keys[idx]));
-}
-
-/* remove_key is for both wep and wpa */
-static int remove_key(struct usbnet *usbdev, u8 index, const u8 *bssid)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ndis_80211_remove_key remove_key;
- __le32 keyindex;
- bool is_wpa;
- int ret;
-
- if (index >= RNDIS_WLAN_NUM_KEYS)
- return -ENOENT;
-
- if (priv->encr_keys[index].len == 0)
- return 0;
-
- is_wpa = is_wpa_key(priv, index);
-
- netdev_dbg(usbdev->net, "%s(): %i:%s:%i\n",
- __func__, index, is_wpa ? "wpa" : "wep",
- priv->encr_keys[index].len);
-
- clear_key(priv, index);
-
- if (is_wpa) {
- remove_key.size = cpu_to_le32(sizeof(remove_key));
- remove_key.index = cpu_to_le32(index);
- if (bssid) {
- /* pairwise key */
- if (!is_broadcast_ether_addr(bssid))
- remove_key.index |=
- NDIS_80211_ADDKEY_PAIRWISE_KEY;
- memcpy(remove_key.bssid, bssid,
- sizeof(remove_key.bssid));
- } else
- memset(remove_key.bssid, 0xff,
- sizeof(remove_key.bssid));
-
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_REMOVE_KEY,
- &remove_key, sizeof(remove_key));
- if (ret != 0)
- return ret;
- } else {
- keyindex = cpu_to_le32(index);
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_11_REMOVE_WEP,
- &keyindex, sizeof(keyindex));
- if (ret != 0) {
- netdev_warn(usbdev->net,
- "removing encryption key %d failed (%08X)\n",
- index, ret);
- return ret;
- }
- }
-
- /* if it is transmit key, disable encryption */
- if (index == priv->encr_tx_key_index)
- set_encr_mode(usbdev, RNDIS_WLAN_ALG_NONE, RNDIS_WLAN_ALG_NONE);
-
- return 0;
-}
-
-static void set_multicast_list(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct netdev_hw_addr *ha;
- __le32 filter, basefilter;
- int ret;
- char *mc_addrs = NULL;
- int mc_count;
-
- basefilter = filter = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED |
- RNDIS_PACKET_TYPE_BROADCAST);
-
- if (usbdev->net->flags & IFF_PROMISC) {
- filter |= cpu_to_le32(RNDIS_PACKET_TYPE_PROMISCUOUS |
- RNDIS_PACKET_TYPE_ALL_LOCAL);
- } else if (usbdev->net->flags & IFF_ALLMULTI) {
- filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
- }
-
- if (filter != basefilter)
- goto set_filter;
-
- /*
- * mc_list should be accessed holding the lock, so copy addresses to
- * local buffer first.
- */
- netif_addr_lock_bh(usbdev->net);
- mc_count = netdev_mc_count(usbdev->net);
- if (mc_count > priv->multicast_size) {
- filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
- } else if (mc_count) {
- int i = 0;
-
- mc_addrs = kmalloc_array(mc_count, ETH_ALEN, GFP_ATOMIC);
- if (!mc_addrs) {
- netif_addr_unlock_bh(usbdev->net);
- return;
- }
-
- netdev_for_each_mc_addr(ha, usbdev->net)
- memcpy(mc_addrs + i++ * ETH_ALEN,
- ha->addr, ETH_ALEN);
- }
- netif_addr_unlock_bh(usbdev->net);
-
- if (filter != basefilter)
- goto set_filter;
-
- if (mc_count) {
- ret = rndis_set_oid(usbdev,
- RNDIS_OID_802_3_MULTICAST_LIST,
- mc_addrs, mc_count * ETH_ALEN);
- kfree(mc_addrs);
- if (ret == 0)
- filter |= cpu_to_le32(RNDIS_PACKET_TYPE_MULTICAST);
- else
- filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
-
- netdev_dbg(usbdev->net, "RNDIS_OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n",
- mc_count, priv->multicast_size, ret);
- }
-
-set_filter:
- ret = rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter,
- sizeof(filter));
- if (ret < 0) {
- netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n",
- le32_to_cpu(filter));
- }
-
- netdev_dbg(usbdev->net, "RNDIS_OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n",
- le32_to_cpu(filter), ret);
-}
-
-#ifdef DEBUG
-static void debug_print_pmkids(struct usbnet *usbdev,
- struct ndis_80211_pmkid *pmkids,
- const char *func_str)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- int i, len, count, max_pmkids, entry_len;
-
- max_pmkids = priv->wdev.wiphy->max_num_pmkids;
- len = le32_to_cpu(pmkids->length);
- count = le32_to_cpu(pmkids->bssid_info_count);
-
- entry_len = (count > 0) ? (len - sizeof(*pmkids)) / count : -1;
-
- netdev_dbg(usbdev->net, "%s(): %d PMKIDs (data len: %d, entry len: "
- "%d)\n", func_str, count, len, entry_len);
-
- if (count > max_pmkids)
- count = max_pmkids;
-
- for (i = 0; i < count; i++) {
- u32 *tmp = (u32 *)pmkids->bssid_info[i].pmkid;
-
- netdev_dbg(usbdev->net, "%s(): bssid: %pM, "
- "pmkid: %08X:%08X:%08X:%08X\n",
- func_str, pmkids->bssid_info[i].bssid,
- cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
- cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
- }
-}
-#else
-static void debug_print_pmkids(struct usbnet *usbdev,
- struct ndis_80211_pmkid *pmkids,
- const char *func_str)
-{
- return;
-}
-#endif
-
-static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ndis_80211_pmkid *pmkids;
- int len, ret, max_pmkids;
-
- max_pmkids = priv->wdev.wiphy->max_num_pmkids;
- len = struct_size(pmkids, bssid_info, max_pmkids);
-
- pmkids = kzalloc(len, GFP_KERNEL);
- if (!pmkids)
- return ERR_PTR(-ENOMEM);
-
- pmkids->length = cpu_to_le32(len);
- pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
-
- ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_PMKID,
- pmkids, &len);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d)"
- " -> %d\n", __func__, len, max_pmkids, ret);
-
- kfree(pmkids);
- return ERR_PTR(ret);
- }
-
- if (le32_to_cpu(pmkids->bssid_info_count) > max_pmkids)
- pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
-
- debug_print_pmkids(usbdev, pmkids, __func__);
-
- return pmkids;
-}
-
-static int set_device_pmkids(struct usbnet *usbdev,
- struct ndis_80211_pmkid *pmkids)
-{
- int ret, len, num_pmkids;
-
- num_pmkids = le32_to_cpu(pmkids->bssid_info_count);
- len = struct_size(pmkids, bssid_info, num_pmkids);
- pmkids->length = cpu_to_le32(len);
-
- debug_print_pmkids(usbdev, pmkids, __func__);
-
- ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID, pmkids,
- le32_to_cpu(pmkids->length));
- if (ret < 0) {
- netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d) -> %d"
- "\n", __func__, len, num_pmkids, ret);
- }
-
- kfree(pmkids);
- return ret;
-}
-
-static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
- struct ndis_80211_pmkid *pmkids,
- struct cfg80211_pmksa *pmksa,
- int max_pmkids)
-{
- int i, err;
- unsigned int count;
-
- count = le32_to_cpu(pmkids->bssid_info_count);
-
- if (count > max_pmkids)
- count = max_pmkids;
-
- for (i = 0; i < count; i++)
- if (ether_addr_equal(pmkids->bssid_info[i].bssid,
- pmksa->bssid))
- break;
-
- /* pmkid not found */
- if (i == count) {
- netdev_dbg(usbdev->net, "%s(): bssid not found (%pM)\n",
- __func__, pmksa->bssid);
- err = -ENOENT;
- goto error;
- }
-
- for (; i + 1 < count; i++)
- pmkids->bssid_info[i] = pmkids->bssid_info[i + 1];
-
- count--;
- pmkids->length = cpu_to_le32(struct_size(pmkids, bssid_info, count));
- pmkids->bssid_info_count = cpu_to_le32(count);
-
- return pmkids;
-error:
- kfree(pmkids);
- return ERR_PTR(err);
-}
-
-static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
- struct ndis_80211_pmkid *pmkids,
- struct cfg80211_pmksa *pmksa,
- int max_pmkids)
-{
- struct ndis_80211_pmkid *new_pmkids;
- int i, err, newlen;
- unsigned int count;
-
- count = le32_to_cpu(pmkids->bssid_info_count);
-
- if (count > max_pmkids)
- count = max_pmkids;
-
- /* update with new pmkid */
- for (i = 0; i < count; i++) {
- if (!ether_addr_equal(pmkids->bssid_info[i].bssid,
- pmksa->bssid))
- continue;
-
- memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid,
- WLAN_PMKID_LEN);
-
- return pmkids;
- }
-
- /* out of space, return error */
- if (i == max_pmkids) {
- netdev_dbg(usbdev->net, "%s(): out of space\n", __func__);
- err = -ENOSPC;
- goto error;
- }
-
- /* add new pmkid */
- newlen = struct_size(pmkids, bssid_info, count + 1);
-
- new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
- if (!new_pmkids) {
- err = -ENOMEM;
- goto error;
- }
- pmkids = new_pmkids;
-
- pmkids->length = cpu_to_le32(newlen);
- pmkids->bssid_info_count = cpu_to_le32(count + 1);
-
- memcpy(pmkids->bssid_info[count].bssid, pmksa->bssid, ETH_ALEN);
- memcpy(pmkids->bssid_info[count].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
-
- return pmkids;
-error:
- kfree(pmkids);
- return ERR_PTR(err);
-}
-
-/*
- * cfg80211 ops
- */
-static int rndis_change_virtual_intf(struct wiphy *wiphy,
- struct net_device *dev,
- enum nl80211_iftype type,
- struct vif_params *params)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- int mode;
-
- switch (type) {
- case NL80211_IFTYPE_ADHOC:
- mode = NDIS_80211_INFRA_ADHOC;
- break;
- case NL80211_IFTYPE_STATION:
- mode = NDIS_80211_INFRA_INFRA;
- break;
- default:
- return -EINVAL;
- }
-
- priv->wdev.iftype = type;
-
- return set_infra_mode(usbdev, mode);
-}
-
-static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- int err;
-
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
- err = set_frag_threshold(usbdev, wiphy->frag_threshold);
- if (err < 0)
- return err;
- }
-
- if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- err = set_rts_threshold(usbdev, wiphy->rts_threshold);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
-static int rndis_set_tx_power(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type,
- int mbm)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- netdev_dbg(usbdev->net, "%s(): type:0x%x mbm:%i\n",
- __func__, type, mbm);
-
- if (mbm < 0 || (mbm % 100))
- return -ENOTSUPP;
-
- /* Device doesn't support changing txpower after initialization, only
- * turn off/on radio. Support 'auto' mode and setting same dBm that is
- * currently used.
- */
- if (type == NL80211_TX_POWER_AUTOMATIC ||
- MBM_TO_DBM(mbm) == get_bcm4320_power_dbm(priv)) {
- if (!priv->radio_on)
- disassociate(usbdev, true); /* turn on radio */
-
- return 0;
- }
-
- return -ENOTSUPP;
-}
-
-static int rndis_get_tx_power(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- int *dbm)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- *dbm = get_bcm4320_power_dbm(priv);
-
- netdev_dbg(usbdev->net, "%s(): dbm:%i\n", __func__, *dbm);
-
- return 0;
-}
-
-#define SCAN_DELAY_JIFFIES (6 * HZ)
-static int rndis_scan(struct wiphy *wiphy,
- struct cfg80211_scan_request *request)
-{
- struct net_device *dev = request->wdev->netdev;
- struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- int ret;
- int delay = SCAN_DELAY_JIFFIES;
-
- netdev_dbg(usbdev->net, "cfg80211.scan\n");
-
- /* Get current bssid list from device before new scan, as new scan
- * clears internal bssid list.
- */
- rndis_check_bssid_list(usbdev, NULL, NULL);
-
- if (priv->scan_request && priv->scan_request != request)
- return -EBUSY;
-
- priv->scan_request = request;
-
- ret = rndis_start_bssid_list_scan(usbdev);
- if (ret == 0) {
- if (priv->device_type == RNDIS_BCM4320A)
- delay = HZ;
-
- /* Wait before retrieving scan results from device */
- queue_delayed_work(priv->workqueue, &priv->scan_work, delay);
- }
-
- return ret;
-}
-
-static bool rndis_bss_info_update(struct usbnet *usbdev,
- struct ndis_80211_bssid_ex *bssid)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ieee80211_channel *channel;
- struct cfg80211_bss *bss;
- s32 signal;
- u64 timestamp;
- u16 capability;
- u16 beacon_interval;
- struct ndis_80211_fixed_ies *fixed;
- int ie_len, bssid_len;
- u8 *ie;
-
- netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM], len: %d\n",
- bssid->ssid.essid, bssid->mac, le32_to_cpu(bssid->length));
-
- /* parse bssid structure */
- bssid_len = le32_to_cpu(bssid->length);
-
- if (bssid_len < sizeof(struct ndis_80211_bssid_ex) +
- sizeof(struct ndis_80211_fixed_ies))
- return false;
-
- fixed = (struct ndis_80211_fixed_ies *)bssid->ies;
-
- ie = (void *)(bssid->ies + sizeof(struct ndis_80211_fixed_ies));
- ie_len = min(bssid_len - (int)sizeof(*bssid),
- (int)le32_to_cpu(bssid->ie_length));
- ie_len -= sizeof(struct ndis_80211_fixed_ies);
- if (ie_len < 0)
- return false;
-
- /* extract data for cfg80211_inform_bss */
- channel = ieee80211_get_channel(priv->wdev.wiphy,
- KHZ_TO_MHZ(le32_to_cpu(bssid->config.ds_config)));
- if (!channel)
- return false;
-
- signal = level_to_qual(le32_to_cpu(bssid->rssi));
- timestamp = le64_to_cpu(*(__le64 *)fixed->timestamp);
- capability = le16_to_cpu(fixed->capabilities);
- beacon_interval = le16_to_cpu(fixed->beacon_interval);
-
- bss = cfg80211_inform_bss(priv->wdev.wiphy, channel,
- CFG80211_BSS_FTYPE_UNKNOWN, bssid->mac,
- timestamp, capability, beacon_interval,
- ie, ie_len, signal, GFP_KERNEL);
- cfg80211_put_bss(priv->wdev.wiphy, bss);
-
- return (bss != NULL);
-}
-
-static struct ndis_80211_bssid_ex *next_bssid_list_item(
- struct ndis_80211_bssid_ex *bssid,
- int *bssid_len, void *buf, int len)
-{
- void *buf_end, *bssid_end;
-
- buf_end = (char *)buf + len;
- bssid_end = (char *)bssid + *bssid_len;
-
- if ((int)(buf_end - bssid_end) < sizeof(bssid->length)) {
- *bssid_len = 0;
- return NULL;
- } else {
- bssid = (void *)((char *)bssid + *bssid_len);
- *bssid_len = le32_to_cpu(bssid->length);
- return bssid;
- }
-}
-
-static bool check_bssid_list_item(struct ndis_80211_bssid_ex *bssid,
- int bssid_len, void *buf, int len)
-{
- void *buf_end, *bssid_end;
-
- if (!bssid || bssid_len <= 0 || bssid_len > len)
- return false;
-
- buf_end = (char *)buf + len;
- bssid_end = (char *)bssid + bssid_len;
-
- return (int)(buf_end - bssid_end) >= 0 && (int)(bssid_end - buf) >= 0;
-}
-
-static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
- bool *matched)
-{
- void *buf = NULL;
- struct ndis_80211_bssid_list_ex *bssid_list;
- struct ndis_80211_bssid_ex *bssid;
- int ret = -EINVAL, len, count, bssid_len, real_count, new_len;
-
- netdev_dbg(usbdev->net, "%s()\n", __func__);
-
- len = CONTROL_BUFFER_SIZE;
-resize_buf:
- buf = kzalloc(len, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* BSSID-list might have got bigger last time we checked, keep
- * resizing until it won't get any bigger.
- */
- new_len = len;
- ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST,
- buf, &new_len);
- if (ret != 0 || new_len < sizeof(struct ndis_80211_bssid_list_ex))
- goto out;
-
- if (new_len > len) {
- len = new_len;
- kfree(buf);
- goto resize_buf;
- }
-
- len = new_len;
-
- bssid_list = buf;
- count = le32_to_cpu(bssid_list->num_items);
- real_count = 0;
- netdev_dbg(usbdev->net, "%s(): buflen: %d\n", __func__, len);
-
- bssid_len = 0;
- bssid = next_bssid_list_item((void *)bssid_list->bssid_data,
- &bssid_len, buf, len);
-
- /* Device returns incorrect 'num_items'. Workaround by ignoring the
- * received 'num_items' and walking through full bssid buffer instead.
- */
- while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
- if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
- matched) {
- if (ether_addr_equal(bssid->mac, match_bssid))
- *matched = true;
- }
-
- real_count++;
- bssid = next_bssid_list_item(bssid, &bssid_len, buf, len);
- }
-
- netdev_dbg(usbdev->net, "%s(): num_items from device: %d, really found:"
- " %d\n", __func__, count, real_count);
-
-out:
- kfree(buf);
- return ret;
-}
-
-static void rndis_get_scan_results(struct work_struct *work)
-{
- struct rndis_wlan_private *priv =
- container_of(work, struct rndis_wlan_private, scan_work.work);
- struct usbnet *usbdev = priv->usbdev;
- struct cfg80211_scan_info info = {};
- int ret;
-
- netdev_dbg(usbdev->net, "get_scan_results\n");
-
- if (!priv->scan_request)
- return;
-
- ret = rndis_check_bssid_list(usbdev, NULL, NULL);
-
- info.aborted = ret < 0;
- cfg80211_scan_done(priv->scan_request, &info);
-
- priv->scan_request = NULL;
-}
-
-static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- struct ieee80211_channel *channel = sme->channel;
- struct ndis_80211_ssid ssid;
- int pairwise = RNDIS_WLAN_ALG_NONE;
- int groupwise = RNDIS_WLAN_ALG_NONE;
- int keymgmt = RNDIS_WLAN_KEY_MGMT_NONE;
- int length, i, ret, chan = -1;
-
- if (channel)
- chan = ieee80211_frequency_to_channel(channel->center_freq);
-
- groupwise = rndis_cipher_to_alg(sme->crypto.cipher_group);
- for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++)
- pairwise |=
- rndis_cipher_to_alg(sme->crypto.ciphers_pairwise[i]);
-
- if (sme->crypto.n_ciphers_pairwise > 0 &&
- pairwise == RNDIS_WLAN_ALG_NONE) {
- netdev_err(usbdev->net, "Unsupported pairwise cipher\n");
- return -ENOTSUPP;
- }
-
- for (i = 0; i < sme->crypto.n_akm_suites; i++)
- keymgmt |=
- rndis_akm_suite_to_key_mgmt(sme->crypto.akm_suites[i]);
-
- if (sme->crypto.n_akm_suites > 0 &&
- keymgmt == RNDIS_WLAN_KEY_MGMT_NONE) {
- netdev_err(usbdev->net, "Invalid keymgmt\n");
- return -ENOTSUPP;
- }
-
- netdev_dbg(usbdev->net, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:0x%x]:0x%x)\n",
- sme->ssid, sme->bssid, chan,
- sme->privacy, sme->crypto.wpa_versions, sme->auth_type,
- groupwise, pairwise, keymgmt);
-
- if (is_associated(usbdev))
- disassociate(usbdev, false);
-
- ret = set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "connect: set_infra_mode failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
-
- ret = set_auth_mode(usbdev, sme->crypto.wpa_versions, sme->auth_type,
- keymgmt);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "connect: set_auth_mode failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
-
- set_priv_filter(usbdev);
-
- ret = set_encr_mode(usbdev, pairwise, groupwise);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "connect: set_encr_mode failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
-
- if (channel) {
- ret = set_channel(usbdev, chan);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "connect: set_channel failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
- }
-
- if (sme->key && ((groupwise | pairwise) & RNDIS_WLAN_ALG_WEP)) {
- priv->encr_tx_key_index = sme->key_idx;
- ret = add_wep_key(usbdev, sme->key, sme->key_len, sme->key_idx);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "connect: add_wep_key failed, %d (%d, %d)\n",
- ret, sme->key_len, sme->key_idx);
- goto err_turn_radio_on;
- }
- }
-
- if (sme->bssid && !is_zero_ether_addr(sme->bssid) &&
- !is_broadcast_ether_addr(sme->bssid)) {
- ret = set_bssid(usbdev, sme->bssid);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "connect: set_bssid failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
- } else
- clear_bssid(usbdev);
-
- length = sme->ssid_len;
- if (length > NDIS_802_11_LENGTH_SSID)
- length = NDIS_802_11_LENGTH_SSID;
-
- memset(&ssid, 0, sizeof(ssid));
- ssid.length = cpu_to_le32(length);
- memcpy(ssid.essid, sme->ssid, length);
-
- /* Pause and purge rx queue, so we don't pass packets before
- * 'media connect'-indication.
- */
- usbnet_pause_rx(usbdev);
- usbnet_purge_paused_rxq(usbdev);
-
- ret = set_essid(usbdev, &ssid);
- if (ret < 0)
- netdev_dbg(usbdev->net, "connect: set_essid failed, %d\n", ret);
- return ret;
-
-err_turn_radio_on:
- disassociate(usbdev, true);
-
- return ret;
-}
-
-static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code);
-
- priv->connected = false;
- eth_zero_addr(priv->bssid);
-
- return deauthenticate(usbdev);
-}
-
-static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- struct ieee80211_channel *channel = params->chandef.chan;
- struct ndis_80211_ssid ssid;
- enum nl80211_auth_type auth_type;
- int ret, alg, length, chan = -1;
-
- if (channel)
- chan = ieee80211_frequency_to_channel(channel->center_freq);
-
- /* TODO: How to handle ad-hoc encryption?
- * connect() has *key, join_ibss() doesn't. RNDIS requires key to be
- * pre-shared for encryption (open/shared/wpa), is key set before
- * join_ibss? Which auth_type to use (not in params)? What about WPA?
- */
- if (params->privacy) {
- auth_type = NL80211_AUTHTYPE_SHARED_KEY;
- alg = RNDIS_WLAN_ALG_WEP;
- } else {
- auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
- alg = RNDIS_WLAN_ALG_NONE;
- }
-
- netdev_dbg(usbdev->net, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)\n",
- params->ssid, params->bssid, chan, params->privacy);
-
- if (is_associated(usbdev))
- disassociate(usbdev, false);
-
- ret = set_infra_mode(usbdev, NDIS_80211_INFRA_ADHOC);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "join_ibss: set_infra_mode failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
-
- ret = set_auth_mode(usbdev, 0, auth_type, RNDIS_WLAN_KEY_MGMT_NONE);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "join_ibss: set_auth_mode failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
-
- set_priv_filter(usbdev);
-
- ret = set_encr_mode(usbdev, alg, RNDIS_WLAN_ALG_NONE);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "join_ibss: set_encr_mode failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
-
- if (channel) {
- ret = set_channel(usbdev, chan);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "join_ibss: set_channel failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
- }
-
- if (params->bssid && !is_zero_ether_addr(params->bssid) &&
- !is_broadcast_ether_addr(params->bssid)) {
- ret = set_bssid(usbdev, params->bssid);
- if (ret < 0) {
- netdev_dbg(usbdev->net, "join_ibss: set_bssid failed, %d\n",
- ret);
- goto err_turn_radio_on;
- }
- } else
- clear_bssid(usbdev);
-
- length = params->ssid_len;
- if (length > NDIS_802_11_LENGTH_SSID)
- length = NDIS_802_11_LENGTH_SSID;
-
- memset(&ssid, 0, sizeof(ssid));
- ssid.length = cpu_to_le32(length);
- memcpy(ssid.essid, params->ssid, length);
-
- /* Don't need to pause rx queue for ad-hoc. */
- usbnet_purge_paused_rxq(usbdev);
- usbnet_resume_rx(usbdev);
-
- ret = set_essid(usbdev, &ssid);
- if (ret < 0)
- netdev_dbg(usbdev->net, "join_ibss: set_essid failed, %d\n",
- ret);
- return ret;
-
-err_turn_radio_on:
- disassociate(usbdev, true);
-
- return ret;
-}
-
-static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n");
-
- priv->connected = false;
- eth_zero_addr(priv->bssid);
-
- return deauthenticate(usbdev);
-}
-
-static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
- int link_id, u8 key_index, bool pairwise,
- const u8 *mac_addr, struct key_params *params)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- __le32 flags;
-
- netdev_dbg(usbdev->net, "%s(%i, %pM, %08x)\n",
- __func__, key_index, mac_addr, params->cipher);
-
- switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- return add_wep_key(usbdev, params->key, params->key_len,
- key_index);
- case WLAN_CIPHER_SUITE_TKIP:
- case WLAN_CIPHER_SUITE_CCMP:
- flags = 0;
-
- if (params->seq && params->seq_len > 0)
- flags |= NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ;
- if (mac_addr)
- flags |= NDIS_80211_ADDKEY_PAIRWISE_KEY |
- NDIS_80211_ADDKEY_TRANSMIT_KEY;
-
- return add_wpa_key(usbdev, params->key, params->key_len,
- key_index, mac_addr, params->seq,
- params->seq_len, params->cipher, flags);
- default:
- netdev_dbg(usbdev->net, "%s(): unsupported cipher %08x\n",
- __func__, params->cipher);
- return -ENOTSUPP;
- }
-}
-
-static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
- int link_id, u8 key_index, bool pairwise,
- const u8 *mac_addr)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- netdev_dbg(usbdev->net, "%s(%i, %pM)\n", __func__, key_index, mac_addr);
-
- return remove_key(usbdev, key_index, mac_addr);
-}
-
-static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- int link_id, u8 key_index, bool unicast,
- bool multicast)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- struct rndis_wlan_encr_key key;
-
- netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
-
- if (key_index >= RNDIS_WLAN_NUM_KEYS)
- return -ENOENT;
-
- priv->encr_tx_key_index = key_index;
-
- if (is_wpa_key(priv, key_index))
- return 0;
-
- key = priv->encr_keys[key_index];
-
- return add_wep_key(usbdev, key.material, key.len, key_index);
-}
-
-static void rndis_fill_station_info(struct usbnet *usbdev,
- struct station_info *sinfo)
-{
- __le32 linkspeed, rssi;
- int ret, len;
-
- memset(sinfo, 0, sizeof(*sinfo));
-
- len = sizeof(linkspeed);
- ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len);
- if (ret == 0) {
- sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
- }
-
- len = sizeof(rssi);
- ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
- &rssi, &len);
- if (ret == 0) {
- sinfo->signal = level_to_qual(le32_to_cpu(rssi));
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
- }
-}
-
-static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac, struct station_info *sinfo)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- if (!ether_addr_equal(priv->bssid, mac))
- return -ENOENT;
-
- rndis_fill_station_info(usbdev, sinfo);
-
- return 0;
-}
-
-static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
- int idx, u8 *mac, struct station_info *sinfo)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- if (idx != 0)
- return -ENOENT;
-
- memcpy(mac, priv->bssid, ETH_ALEN);
-
- rndis_fill_station_info(usbdev, sinfo);
-
- return 0;
-}
-
-static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- struct ndis_80211_pmkid *pmkids;
- u32 *tmp = (u32 *)pmksa->pmkid;
-
- netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
- pmksa->bssid,
- cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
- cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
-
- pmkids = get_device_pmkids(usbdev);
- if (IS_ERR(pmkids)) {
- /* couldn't read PMKID cache from device */
- return PTR_ERR(pmkids);
- }
-
- pmkids = update_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
- if (IS_ERR(pmkids)) {
- /* not found, list full, etc */
- return PTR_ERR(pmkids);
- }
-
- return set_device_pmkids(usbdev, pmkids);
-}
-
-static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- struct ndis_80211_pmkid *pmkids;
- u32 *tmp = (u32 *)pmksa->pmkid;
-
- netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
- pmksa->bssid,
- cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
- cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
-
- pmkids = get_device_pmkids(usbdev);
- if (IS_ERR(pmkids)) {
- /* Couldn't read PMKID cache from device */
- return PTR_ERR(pmkids);
- }
-
- pmkids = remove_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
- if (IS_ERR(pmkids)) {
- /* not found, etc */
- return PTR_ERR(pmkids);
- }
-
- return set_device_pmkids(usbdev, pmkids);
-}
-
-static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- struct ndis_80211_pmkid pmkid;
-
- netdev_dbg(usbdev->net, "%s()\n", __func__);
-
- memset(&pmkid, 0, sizeof(pmkid));
-
- pmkid.length = cpu_to_le32(sizeof(pmkid));
- pmkid.bssid_info_count = cpu_to_le32(0);
-
- return rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID,
- &pmkid, sizeof(pmkid));
-}
-
-static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
- bool enabled, int timeout)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
- int power_mode;
- __le32 mode;
- int ret;
-
- if (priv->device_type != RNDIS_BCM4320B)
- return -ENOTSUPP;
-
- netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
- enabled ? "enabled" : "disabled",
- timeout);
-
- if (enabled)
- power_mode = NDIS_80211_POWER_MODE_FAST_PSP;
- else
- power_mode = NDIS_80211_POWER_MODE_CAM;
-
- if (power_mode == priv->power_mode)
- return 0;
-
- priv->power_mode = power_mode;
-
- mode = cpu_to_le32(power_mode);
- ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_POWER_MODE,
- &mode, sizeof(mode));
-
- netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_POWER_MODE -> %d\n",
- __func__, ret);
-
- return ret;
-}
-
-static int rndis_set_cqm_rssi_config(struct wiphy *wiphy,
- struct net_device *dev,
- s32 rssi_thold, u32 rssi_hyst)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
-
- priv->cqm_rssi_thold = rssi_thold;
- priv->cqm_rssi_hyst = rssi_hyst;
- priv->last_cqm_event_rssi = 0;
-
- return 0;
-}
-
-static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
- struct ndis_80211_assoc_info *info)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ieee80211_channel *channel;
- struct ndis_80211_ssid ssid;
- struct cfg80211_bss *bss;
- s32 signal;
- u64 timestamp;
- u16 capability;
- u32 beacon_period = 0;
- __le32 rssi;
- u8 ie_buf[34];
- int len, ret, ie_len;
-
- /* Get signal quality, in case of error use rssi=0 and ignore error. */
- len = sizeof(rssi);
- rssi = 0;
- ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
- &rssi, &len);
- signal = level_to_qual(le32_to_cpu(rssi));
-
- netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_RSSI -> %d, "
- "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi),
- level_to_qual(le32_to_cpu(rssi)));
-
- /* Get AP capabilities */
- if (info) {
- capability = le16_to_cpu(info->resp_ie.capa);
- } else {
- /* Set atleast ESS/IBSS capability */
- capability = (priv->infra_mode == NDIS_80211_INFRA_INFRA) ?
- WLAN_CAPABILITY_ESS : WLAN_CAPABILITY_IBSS;
- }
-
- /* Get channel and beacon interval */
- channel = get_current_channel(usbdev, &beacon_period);
- if (!channel) {
- netdev_warn(usbdev->net, "%s(): could not get channel.\n",
- __func__);
- return;
- }
-
- /* Get SSID, in case of error, use zero length SSID and ignore error. */
- len = sizeof(ssid);
- memset(&ssid, 0, sizeof(ssid));
- ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_SSID,
- &ssid, &len);
- netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_SSID -> %d, len: %d, ssid: "
- "'%.32s'\n", __func__, ret,
- le32_to_cpu(ssid.length), ssid.essid);
-
- if (le32_to_cpu(ssid.length) > 32)
- ssid.length = cpu_to_le32(32);
-
- ie_buf[0] = WLAN_EID_SSID;
- ie_buf[1] = le32_to_cpu(ssid.length);
- memcpy(&ie_buf[2], ssid.essid, le32_to_cpu(ssid.length));
-
- ie_len = le32_to_cpu(ssid.length) + 2;
-
- /* no tsf */
- timestamp = 0;
-
- netdev_dbg(usbdev->net, "%s(): channel:%d(freq), bssid:[%pM], tsf:%d, "
- "capa:%x, beacon int:%d, resp_ie(len:%d, essid:'%.32s'), "
- "signal:%d\n", __func__, (channel ? channel->center_freq : -1),
- bssid, (u32)timestamp, capability, beacon_period, ie_len,
- ssid.essid, signal);
-
- bss = cfg80211_inform_bss(priv->wdev.wiphy, channel,
- CFG80211_BSS_FTYPE_UNKNOWN, bssid,
- timestamp, capability, beacon_period,
- ie_buf, ie_len, signal, GFP_KERNEL);
- cfg80211_put_bss(priv->wdev.wiphy, bss);
-}
-
-/*
- * workers, indication handlers, device poller
- */
-static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct ndis_80211_assoc_info *info = NULL;
- u8 bssid[ETH_ALEN];
- unsigned int resp_ie_len, req_ie_len;
- unsigned int offset;
- u8 *req_ie, *resp_ie;
- int ret;
- bool roamed = false;
- bool match_bss;
-
- if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) {
- /* received media connect indication while connected, either
- * device reassociated with same AP or roamed to new. */
- roamed = true;
- }
-
- req_ie_len = 0;
- resp_ie_len = 0;
- req_ie = NULL;
- resp_ie = NULL;
-
- if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
- info = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
- if (!info) {
- /* No memory? Try resume work later */
- set_bit(WORK_LINK_UP, &priv->work_pending);
- queue_work(priv->workqueue, &priv->work);
- return;
- }
-
- /* Get association info IEs from device. */
- ret = get_association_info(usbdev, info, CONTROL_BUFFER_SIZE);
- if (!ret) {
- req_ie_len = le32_to_cpu(info->req_ie_length);
- if (req_ie_len > CONTROL_BUFFER_SIZE)
- req_ie_len = CONTROL_BUFFER_SIZE;
- if (req_ie_len != 0) {
- offset = le32_to_cpu(info->offset_req_ies);
-
- if (offset > CONTROL_BUFFER_SIZE)
- offset = CONTROL_BUFFER_SIZE;
-
- req_ie = (u8 *)info + offset;
-
- if (offset + req_ie_len > CONTROL_BUFFER_SIZE)
- req_ie_len =
- CONTROL_BUFFER_SIZE - offset;
- }
-
- resp_ie_len = le32_to_cpu(info->resp_ie_length);
- if (resp_ie_len > CONTROL_BUFFER_SIZE)
- resp_ie_len = CONTROL_BUFFER_SIZE;
- if (resp_ie_len != 0) {
- offset = le32_to_cpu(info->offset_resp_ies);
-
- if (offset > CONTROL_BUFFER_SIZE)
- offset = CONTROL_BUFFER_SIZE;
-
- resp_ie = (u8 *)info + offset;
-
- if (offset + resp_ie_len > CONTROL_BUFFER_SIZE)
- resp_ie_len =
- CONTROL_BUFFER_SIZE - offset;
- }
- } else {
- /* Since rndis_wlan_craft_connected_bss() might use info
- * later and expects info to contain valid data if
- * non-null, free info and set NULL here.
- */
- kfree(info);
- info = NULL;
- }
- } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC))
- return;
-
- ret = get_bssid(usbdev, bssid);
- if (ret < 0)
- memset(bssid, 0, sizeof(bssid));
-
- netdev_dbg(usbdev->net, "link up work: [%pM]%s\n",
- bssid, roamed ? " roamed" : "");
-
- /* Internal bss list in device should contain at least the currently
- * connected bss and we can get it to cfg80211 with
- * rndis_check_bssid_list().
- *
- * NDIS spec says: "If the device is associated, but the associated
- * BSSID is not in its BSSID scan list, then the driver must add an
- * entry for the BSSID at the end of the data that it returns in
- * response to query of RNDIS_OID_802_11_BSSID_LIST."
- *
- * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a.
- */
- match_bss = false;
- rndis_check_bssid_list(usbdev, bssid, &match_bss);
-
- if (!is_zero_ether_addr(bssid) && !match_bss) {
- /* Couldn't get bss from device, we need to manually craft bss
- * for cfg80211.
- */
- rndis_wlan_craft_connected_bss(usbdev, bssid, info);
- }
-
- if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
- if (!roamed) {
- cfg80211_connect_result(usbdev->net, bssid, req_ie,
- req_ie_len, resp_ie,
- resp_ie_len, 0, GFP_KERNEL);
- } else {
- struct cfg80211_roam_info roam_info = {
- .links[0].channel =
- get_current_channel(usbdev, NULL),
- .links[0].bssid = bssid,
- .req_ie = req_ie,
- .req_ie_len = req_ie_len,
- .resp_ie = resp_ie,
- .resp_ie_len = resp_ie_len,
- };
-
- cfg80211_roamed(usbdev->net, &roam_info, GFP_KERNEL);
- }
- } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
- cfg80211_ibss_joined(usbdev->net, bssid,
- get_current_channel(usbdev, NULL),
- GFP_KERNEL);
-
- kfree(info);
-
- priv->connected = true;
- memcpy(priv->bssid, bssid, ETH_ALEN);
-
- usbnet_resume_rx(usbdev);
- netif_carrier_on(usbdev->net);
-}
-
-static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
-
- if (priv->connected) {
- priv->connected = false;
- eth_zero_addr(priv->bssid);
-
- deauthenticate(usbdev);
-
- cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL);
- }
-
- netif_carrier_off(usbdev->net);
-}
-
-static void rndis_wlan_worker(struct work_struct *work)
-{
- struct rndis_wlan_private *priv =
- container_of(work, struct rndis_wlan_private, work);
- struct usbnet *usbdev = priv->usbdev;
-
- if (test_and_clear_bit(WORK_LINK_UP, &priv->work_pending))
- rndis_wlan_do_link_up_work(usbdev);
-
- if (test_and_clear_bit(WORK_LINK_DOWN, &priv->work_pending))
- rndis_wlan_do_link_down_work(usbdev);
-
- if (test_and_clear_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending))
- set_multicast_list(usbdev);
-}
-
-static void rndis_wlan_set_multicast_list(struct net_device *dev)
-{
- struct usbnet *usbdev = netdev_priv(dev);
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
-
- if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending))
- return;
-
- set_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending);
- queue_work(priv->workqueue, &priv->work);
-}
-
-static void rndis_wlan_auth_indication(struct usbnet *usbdev,
- struct ndis_80211_status_indication *indication,
- int len)
-{
- u8 *buf;
- const char *type;
- int flags, buflen, key_id;
- bool pairwise_error, group_error;
- struct ndis_80211_auth_request *auth_req;
- enum nl80211_key_type key_type;
-
- /* must have at least one array entry */
- if (len < offsetof(struct ndis_80211_status_indication, u) +
- sizeof(struct ndis_80211_auth_request)) {
- netdev_info(usbdev->net, "authentication indication: too short message (%i)\n",
- len);
- return;
- }
-
- buf = (void *)&indication->u.auth_request[0];
- buflen = len - offsetof(struct ndis_80211_status_indication, u);
-
- while (buflen >= sizeof(*auth_req)) {
- auth_req = (void *)buf;
- if (buflen < le32_to_cpu(auth_req->length))
- return;
- type = "unknown";
- flags = le32_to_cpu(auth_req->flags);
- pairwise_error = false;
- group_error = false;
-
- if (flags & 0x1)
- type = "reauth request";
- if (flags & 0x2)
- type = "key update request";
- if (flags & 0x6) {
- pairwise_error = true;
- type = "pairwise_error";
- }
- if (flags & 0xe) {
- group_error = true;
- type = "group_error";
- }
-
- netdev_info(usbdev->net, "authentication indication: %s (0x%08x)\n",
- type, le32_to_cpu(auth_req->flags));
-
- if (pairwise_error) {
- key_type = NL80211_KEYTYPE_PAIRWISE;
- key_id = -1;
-
- cfg80211_michael_mic_failure(usbdev->net,
- auth_req->bssid,
- key_type, key_id, NULL,
- GFP_KERNEL);
- }
-
- if (group_error) {
- key_type = NL80211_KEYTYPE_GROUP;
- key_id = -1;
-
- cfg80211_michael_mic_failure(usbdev->net,
- auth_req->bssid,
- key_type, key_id, NULL,
- GFP_KERNEL);
- }
-
- buflen -= le32_to_cpu(auth_req->length);
- buf += le32_to_cpu(auth_req->length);
- }
-}
-
-static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
- struct ndis_80211_status_indication *indication,
- int len)
-{
- struct ndis_80211_pmkid_cand_list *cand_list;
- int list_len, expected_len, i;
-
- if (len < offsetof(struct ndis_80211_status_indication, u) +
- sizeof(struct ndis_80211_pmkid_cand_list)) {
- netdev_info(usbdev->net, "pmkid candidate list indication: too short message (%i)\n",
- len);
- return;
- }
-
- list_len = le32_to_cpu(indication->u.cand_list.num_candidates) *
- sizeof(struct ndis_80211_pmkid_candidate);
- expected_len = sizeof(struct ndis_80211_pmkid_cand_list) + list_len +
- offsetof(struct ndis_80211_status_indication, u);
-
- if (len < expected_len) {
- netdev_info(usbdev->net, "pmkid candidate list indication: list larger than buffer (%i < %i)\n",
- len, expected_len);
- return;
- }
-
- cand_list = &indication->u.cand_list;
-
- netdev_info(usbdev->net, "pmkid candidate list indication: version %i, candidates %i\n",
- le32_to_cpu(cand_list->version),
- le32_to_cpu(cand_list->num_candidates));
-
- if (le32_to_cpu(cand_list->version) != 1)
- return;
-
- for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
- struct ndis_80211_pmkid_candidate *cand =
- &cand_list->candidate_list[i];
- bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
-
- netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
- i, le32_to_cpu(cand->flags), preauth, cand->bssid);
-
- cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
- preauth, GFP_ATOMIC);
- }
-}
-
-static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
- struct rndis_indicate *msg, int buflen)
-{
- struct ndis_80211_status_indication *indication;
- unsigned int len, offset;
-
- offset = offsetof(struct rndis_indicate, status) +
- le32_to_cpu(msg->offset);
- len = le32_to_cpu(msg->length);
-
- if (len < 8) {
- netdev_info(usbdev->net, "media specific indication, ignore too short message (%i < 8)\n",
- len);
- return;
- }
-
- if (len > buflen || offset > buflen || offset + len > buflen) {
- netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n",
- offset + len, buflen);
- return;
- }
-
- indication = (void *)((u8 *)msg + offset);
-
- switch (le32_to_cpu(indication->status_type)) {
- case NDIS_80211_STATUSTYPE_RADIOSTATE:
- netdev_info(usbdev->net, "radio state indication: %i\n",
- le32_to_cpu(indication->u.radio_status));
- return;
-
- case NDIS_80211_STATUSTYPE_MEDIASTREAMMODE:
- netdev_info(usbdev->net, "media stream mode indication: %i\n",
- le32_to_cpu(indication->u.media_stream_mode));
- return;
-
- case NDIS_80211_STATUSTYPE_AUTHENTICATION:
- rndis_wlan_auth_indication(usbdev, indication, len);
- return;
-
- case NDIS_80211_STATUSTYPE_PMKID_CANDIDATELIST:
- rndis_wlan_pmkid_cand_list_indication(usbdev, indication, len);
- return;
-
- default:
- netdev_info(usbdev->net, "media specific indication: unknown status type 0x%08x\n",
- le32_to_cpu(indication->status_type));
- }
-}
-
-static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- struct rndis_indicate *msg = ind;
-
- switch (le32_to_cpu(msg->status)) {
- case RNDIS_STATUS_MEDIA_CONNECT:
- if (priv->current_command_oid == RNDIS_OID_802_11_ADD_KEY) {
- /* RNDIS_OID_802_11_ADD_KEY causes sometimes extra
- * "media connect" indications which confuses driver
- * and userspace to think that device is
- * roaming/reassociating when it isn't.
- */
- netdev_dbg(usbdev->net, "ignored RNDIS_OID_802_11_ADD_KEY triggered 'media connect'\n");
- return;
- }
-
- usbnet_pause_rx(usbdev);
-
- netdev_info(usbdev->net, "media connect\n");
-
- /* queue work to avoid recursive calls into rndis_command */
- set_bit(WORK_LINK_UP, &priv->work_pending);
- queue_work(priv->workqueue, &priv->work);
- break;
-
- case RNDIS_STATUS_MEDIA_DISCONNECT:
- netdev_info(usbdev->net, "media disconnect\n");
-
- /* queue work to avoid recursive calls into rndis_command */
- set_bit(WORK_LINK_DOWN, &priv->work_pending);
- queue_work(priv->workqueue, &priv->work);
- break;
-
- case RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION:
- rndis_wlan_media_specific_indication(usbdev, msg, buflen);
- break;
-
- default:
- netdev_info(usbdev->net, "indication: 0x%08x\n",
- le32_to_cpu(msg->status));
- break;
- }
-}
-
-static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
-{
- struct {
- __le32 num_items;
- __le32 items[8];
- } networks_supported;
- struct ndis_80211_capability caps;
- int len, retval, i, n;
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
-
- /* determine supported modes */
- len = sizeof(networks_supported);
- retval = rndis_query_oid(usbdev,
- RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED,
- &networks_supported, &len);
- if (!retval) {
- n = le32_to_cpu(networks_supported.num_items);
- if (n > 8)
- n = 8;
- for (i = 0; i < n; i++) {
- switch (le32_to_cpu(networks_supported.items[i])) {
- case NDIS_80211_TYPE_FREQ_HOP:
- case NDIS_80211_TYPE_DIRECT_SEQ:
- priv->caps |= CAP_MODE_80211B;
- break;
- case NDIS_80211_TYPE_OFDM_A:
- priv->caps |= CAP_MODE_80211A;
- break;
- case NDIS_80211_TYPE_OFDM_G:
- priv->caps |= CAP_MODE_80211G;
- break;
- }
- }
- }
-
- /* get device 802.11 capabilities, number of PMKIDs */
- len = sizeof(caps);
- retval = rndis_query_oid(usbdev,
- RNDIS_OID_802_11_CAPABILITY,
- &caps, &len);
- if (!retval) {
- netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, "
- "ver %d, pmkids %d, auth-encr-pairs %d\n",
- le32_to_cpu(caps.length),
- le32_to_cpu(caps.version),
- le32_to_cpu(caps.num_pmkids),
- le32_to_cpu(caps.num_auth_encr_pair));
- wiphy->max_num_pmkids = le32_to_cpu(caps.num_pmkids);
- } else
- wiphy->max_num_pmkids = 0;
-
- return retval;
-}
-
-static void rndis_do_cqm(struct usbnet *usbdev, s32 rssi)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- enum nl80211_cqm_rssi_threshold_event event;
- int thold, hyst, last_event;
-
- if (priv->cqm_rssi_thold >= 0 || rssi >= 0)
- return;
- if (priv->infra_mode != NDIS_80211_INFRA_INFRA)
- return;
-
- last_event = priv->last_cqm_event_rssi;
- thold = priv->cqm_rssi_thold;
- hyst = priv->cqm_rssi_hyst;
-
- if (rssi < thold && (last_event == 0 || rssi < last_event - hyst))
- event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
- else if (rssi > thold && (last_event == 0 || rssi > last_event + hyst))
- event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
- else
- return;
-
- priv->last_cqm_event_rssi = rssi;
- cfg80211_cqm_rssi_notify(usbdev->net, event, rssi, GFP_KERNEL);
-}
-
-#define DEVICE_POLLER_JIFFIES (HZ)
-static void rndis_device_poller(struct work_struct *work)
-{
- struct rndis_wlan_private *priv =
- container_of(work, struct rndis_wlan_private,
- dev_poller_work.work);
- struct usbnet *usbdev = priv->usbdev;
- __le32 rssi, tmp;
- int len, ret, j;
- int update_jiffies = DEVICE_POLLER_JIFFIES;
- void *buf;
-
- /* Only check/do workaround when connected. Calling is_associated()
- * also polls device with rndis_command() and catches for media link
- * indications.
- */
- if (!is_associated(usbdev)) {
- /* Workaround bad scanning in BCM4320a devices with active
- * background scanning when not associated.
- */
- if (priv->device_type == RNDIS_BCM4320A && priv->radio_on &&
- !priv->scan_request) {
- /* Get previous scan results */
- rndis_check_bssid_list(usbdev, NULL, NULL);
-
- /* Initiate new scan */
- rndis_start_bssid_list_scan(usbdev);
- }
-
- goto end;
- }
-
- len = sizeof(rssi);
- ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
- &rssi, &len);
- if (ret == 0) {
- priv->last_qual = level_to_qual(le32_to_cpu(rssi));
- rndis_do_cqm(usbdev, le32_to_cpu(rssi));
- }
-
- netdev_dbg(usbdev->net, "dev-poller: RNDIS_OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
- ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
-
- /* Workaround transfer stalls on poor quality links.
- * TODO: find right way to fix these stalls (as stalls do not happen
- * with ndiswrapper/windows driver). */
- if (priv->param_workaround_interval > 0 && priv->last_qual <= 25) {
- /* Decrease stats worker interval to catch stalls.
- * faster. Faster than 400-500ms causes packet loss,
- * Slower doesn't catch stalls fast enough.
- */
- j = msecs_to_jiffies(priv->param_workaround_interval);
- if (j > DEVICE_POLLER_JIFFIES)
- j = DEVICE_POLLER_JIFFIES;
- else if (j <= 0)
- j = 1;
- update_jiffies = j;
-
- /* Send scan OID. Use of both OIDs is required to get device
- * working.
- */
- tmp = cpu_to_le32(1);
- rndis_set_oid(usbdev,
- RNDIS_OID_802_11_BSSID_LIST_SCAN,
- &tmp, sizeof(tmp));
-
- len = CONTROL_BUFFER_SIZE;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- goto end;
-
- rndis_query_oid(usbdev,
- RNDIS_OID_802_11_BSSID_LIST,
- buf, &len);
- kfree(buf);
- }
-
-end:
- if (update_jiffies >= HZ)
- update_jiffies = round_jiffies_relative(update_jiffies);
- else {
- j = round_jiffies_relative(update_jiffies);
- if (abs(j - update_jiffies) <= 10)
- update_jiffies = j;
- }
-
- queue_delayed_work(priv->workqueue, &priv->dev_poller_work,
- update_jiffies);
-}
-
-/*
- * driver/device initialization
- */
-static void rndis_copy_module_params(struct usbnet *usbdev, int device_type)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
-
- priv->device_type = device_type;
-
- priv->param_country[0] = modparam_country[0];
- priv->param_country[1] = modparam_country[1];
- priv->param_country[2] = 0;
- priv->param_frameburst = modparam_frameburst;
- priv->param_afterburner = modparam_afterburner;
- priv->param_power_save = modparam_power_save;
- priv->param_power_output = modparam_power_output;
- priv->param_roamtrigger = modparam_roamtrigger;
- priv->param_roamdelta = modparam_roamdelta;
-
- priv->param_country[0] = toupper(priv->param_country[0]);
- priv->param_country[1] = toupper(priv->param_country[1]);
- /* doesn't support EU as country code, use FI instead */
- if (!strcmp(priv->param_country, "EU"))
- strcpy(priv->param_country, "FI");
-
- if (priv->param_power_save < 0)
- priv->param_power_save = 0;
- else if (priv->param_power_save > 2)
- priv->param_power_save = 2;
-
- if (priv->param_power_output < 0)
- priv->param_power_output = 0;
- else if (priv->param_power_output > 3)
- priv->param_power_output = 3;
-
- if (priv->param_roamtrigger < -80)
- priv->param_roamtrigger = -80;
- else if (priv->param_roamtrigger > -60)
- priv->param_roamtrigger = -60;
-
- if (priv->param_roamdelta < 0)
- priv->param_roamdelta = 0;
- else if (priv->param_roamdelta > 2)
- priv->param_roamdelta = 2;
-
- if (modparam_workaround_interval < 0)
- priv->param_workaround_interval = 500;
- else
- priv->param_workaround_interval = modparam_workaround_interval;
-}
-
-static int unknown_early_init(struct usbnet *usbdev)
-{
- /* copy module parameters for unknown so that iwconfig reports txpower
- * and workaround parameter is copied to private structure correctly.
- */
- rndis_copy_module_params(usbdev, RNDIS_UNKNOWN);
-
- /* This is unknown device, so do not try set configuration parameters.
- */
-
- return 0;
-}
-
-static int bcm4320a_early_init(struct usbnet *usbdev)
-{
- /* copy module parameters for bcm4320a so that iwconfig reports txpower
- * and workaround parameter is copied to private structure correctly.
- */
- rndis_copy_module_params(usbdev, RNDIS_BCM4320A);
-
- /* bcm4320a doesn't handle configuration parameters well. Try
- * set any and you get partially zeroed mac and broken device.
- */
-
- return 0;
-}
-
-static int bcm4320b_early_init(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- char buf[8];
-
- rndis_copy_module_params(usbdev, RNDIS_BCM4320B);
-
- /* Early initialization settings, setting these won't have effect
- * if called after generic_rndis_bind().
- */
-
- rndis_set_config_parameter_str(usbdev, "Country", priv->param_country);
- rndis_set_config_parameter_str(usbdev, "FrameBursting",
- priv->param_frameburst ? "1" : "0");
- rndis_set_config_parameter_str(usbdev, "Afterburner",
- priv->param_afterburner ? "1" : "0");
- sprintf(buf, "%d", priv->param_power_save);
- rndis_set_config_parameter_str(usbdev, "PowerSaveMode", buf);
- sprintf(buf, "%d", priv->param_power_output);
- rndis_set_config_parameter_str(usbdev, "PwrOut", buf);
- sprintf(buf, "%d", priv->param_roamtrigger);
- rndis_set_config_parameter_str(usbdev, "RoamTrigger", buf);
- sprintf(buf, "%d", priv->param_roamdelta);
- rndis_set_config_parameter_str(usbdev, "RoamDelta", buf);
-
- return 0;
-}
-
-/* same as rndis_netdev_ops but with local multicast handler */
-static const struct net_device_ops rndis_wlan_netdev_ops = {
- .ndo_open = usbnet_open,
- .ndo_stop = usbnet_stop,
- .ndo_start_xmit = usbnet_start_xmit,
- .ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = dev_get_tstats64,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = rndis_wlan_set_multicast_list,
-};
-
-static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
-{
- struct wiphy *wiphy;
- struct rndis_wlan_private *priv;
- int retval, len;
- __le32 tmp;
-
- /* allocate wiphy and rndis private data
- * NOTE: We only support a single virtual interface, so wiphy
- * and wireless_dev are somewhat synonymous for this device.
- */
- wiphy = wiphy_new(&rndis_config_ops, sizeof(struct rndis_wlan_private));
- if (!wiphy)
- return -ENOMEM;
-
- priv = wiphy_priv(wiphy);
- usbdev->net->ieee80211_ptr = &priv->wdev;
- priv->wdev.wiphy = wiphy;
- priv->wdev.iftype = NL80211_IFTYPE_STATION;
-
- /* These have to be initialized before calling generic_rndis_bind().
- * Otherwise we'll be in big trouble in rndis_wlan_early_init().
- */
- usbdev->driver_priv = priv;
- priv->usbdev = usbdev;
-
- mutex_init(&priv->command_lock);
-
- /* because rndis_command() sleeps we need to use workqueue */
- priv->workqueue = create_singlethread_workqueue("rndis_wlan");
- if (!priv->workqueue) {
- wiphy_free(wiphy);
- return -ENOMEM;
- }
- INIT_WORK(&priv->work, rndis_wlan_worker);
- INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller);
- INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
-
- /* try bind rndis_host */
- retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS);
- if (retval < 0)
- goto fail;
-
- /* generic_rndis_bind set packet filter to multicast_all+
- * promisc mode which doesn't work well for our devices (device
- * picks up rssi to closest station instead of to access point).
- *
- * rndis_host wants to avoid all OID as much as possible
- * so do promisc/multicast handling in rndis_wlan.
- */
- usbdev->net->netdev_ops = &rndis_wlan_netdev_ops;
-
- tmp = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST);
- retval = rndis_set_oid(usbdev,
- RNDIS_OID_GEN_CURRENT_PACKET_FILTER,
- &tmp, sizeof(tmp));
-
- len = sizeof(tmp);
- retval = rndis_query_oid(usbdev,
- RNDIS_OID_802_3_MAXIMUM_LIST_SIZE,
- &tmp, &len);
- priv->multicast_size = le32_to_cpu(tmp);
- if (retval < 0 || priv->multicast_size < 0)
- priv->multicast_size = 0;
- if (priv->multicast_size > 0)
- usbdev->net->flags |= IFF_MULTICAST;
- else
- usbdev->net->flags &= ~IFF_MULTICAST;
-
- /* fill-out wiphy structure and register w/ cfg80211 */
- memcpy(wiphy->perm_addr, usbdev->net->dev_addr, ETH_ALEN);
- wiphy->privid = rndis_wiphy_privid;
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
- | BIT(NL80211_IFTYPE_ADHOC);
- wiphy->max_scan_ssids = 1;
-
- /* TODO: fill-out band/encr information based on priv->caps */
- rndis_wlan_get_caps(usbdev, wiphy);
-
- memcpy(priv->channels, rndis_channels, sizeof(rndis_channels));
- memcpy(priv->rates, rndis_rates, sizeof(rndis_rates));
- priv->band.channels = priv->channels;
- priv->band.n_channels = ARRAY_SIZE(rndis_channels);
- priv->band.bitrates = priv->rates;
- priv->band.n_bitrates = ARRAY_SIZE(rndis_rates);
- wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
- wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
-
- memcpy(priv->cipher_suites, rndis_cipher_suites,
- sizeof(rndis_cipher_suites));
- wiphy->cipher_suites = priv->cipher_suites;
- wiphy->n_cipher_suites = ARRAY_SIZE(rndis_cipher_suites);
-
- set_wiphy_dev(wiphy, &usbdev->udev->dev);
-
- if (wiphy_register(wiphy)) {
- retval = -ENODEV;
- goto fail;
- }
-
- set_default_iw_params(usbdev);
-
- priv->power_mode = -1;
-
- /* set default rts/frag */
- rndis_set_wiphy_params(wiphy,
- WIPHY_PARAM_FRAG_THRESHOLD | WIPHY_PARAM_RTS_THRESHOLD);
-
- /* turn radio off on init */
- priv->radio_on = false;
- disassociate(usbdev, false);
- netif_carrier_off(usbdev->net);
-
- return 0;
-
-fail:
- cancel_delayed_work_sync(&priv->dev_poller_work);
- cancel_delayed_work_sync(&priv->scan_work);
- cancel_work_sync(&priv->work);
- destroy_workqueue(priv->workqueue);
-
- wiphy_free(wiphy);
- return retval;
-}
-
-static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
-
- /* turn radio off */
- disassociate(usbdev, false);
-
- cancel_delayed_work_sync(&priv->dev_poller_work);
- cancel_delayed_work_sync(&priv->scan_work);
- cancel_work_sync(&priv->work);
- destroy_workqueue(priv->workqueue);
-
- rndis_unbind(usbdev, intf);
-
- wiphy_unregister(priv->wdev.wiphy);
- wiphy_free(priv->wdev.wiphy);
-}
-
-static int rndis_wlan_reset(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- int retval;
-
- netdev_dbg(usbdev->net, "%s()\n", __func__);
-
- retval = rndis_reset(usbdev);
- if (retval)
- netdev_warn(usbdev->net, "rndis_reset failed: %d\n", retval);
-
- /* rndis_reset cleared multicast list, so restore here.
- (set_multicast_list() also turns on current packet filter) */
- set_multicast_list(usbdev);
-
- queue_delayed_work(priv->workqueue, &priv->dev_poller_work,
- round_jiffies_relative(DEVICE_POLLER_JIFFIES));
-
- return deauthenticate(usbdev);
-}
-
-static int rndis_wlan_stop(struct usbnet *usbdev)
-{
- struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- int retval;
- __le32 filter;
-
- netdev_dbg(usbdev->net, "%s()\n", __func__);
-
- retval = disassociate(usbdev, false);
-
- priv->work_pending = 0;
- cancel_delayed_work_sync(&priv->dev_poller_work);
- cancel_delayed_work_sync(&priv->scan_work);
- cancel_work_sync(&priv->work);
- flush_workqueue(priv->workqueue);
-
- if (priv->scan_request) {
- struct cfg80211_scan_info info = {
- .aborted = true,
- };
-
- cfg80211_scan_done(priv->scan_request, &info);
- priv->scan_request = NULL;
- }
-
- /* Set current packet filter zero to block receiving data packets from
- device. */
- filter = 0;
- rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter,
- sizeof(filter));
-
- return retval;
-}
-
-static const struct driver_info bcm4320b_info = {
- .description = "Wireless RNDIS device, BCM4320b based",
- .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT |
- FLAG_AVOID_UNLINK_URBS,
- .bind = rndis_wlan_bind,
- .unbind = rndis_wlan_unbind,
- .status = rndis_status,
- .rx_fixup = rndis_rx_fixup,
- .tx_fixup = rndis_tx_fixup,
- .reset = rndis_wlan_reset,
- .stop = rndis_wlan_stop,
- .early_init = bcm4320b_early_init,
- .indication = rndis_wlan_indication,
-};
-
-static const struct driver_info bcm4320a_info = {
- .description = "Wireless RNDIS device, BCM4320a based",
- .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT |
- FLAG_AVOID_UNLINK_URBS,
- .bind = rndis_wlan_bind,
- .unbind = rndis_wlan_unbind,
- .status = rndis_status,
- .rx_fixup = rndis_rx_fixup,
- .tx_fixup = rndis_tx_fixup,
- .reset = rndis_wlan_reset,
- .stop = rndis_wlan_stop,
- .early_init = bcm4320a_early_init,
- .indication = rndis_wlan_indication,
-};
-
-static const struct driver_info rndis_wlan_info = {
- .description = "Wireless RNDIS device",
- .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT |
- FLAG_AVOID_UNLINK_URBS,
- .bind = rndis_wlan_bind,
- .unbind = rndis_wlan_unbind,
- .status = rndis_status,
- .rx_fixup = rndis_rx_fixup,
- .tx_fixup = rndis_tx_fixup,
- .reset = rndis_wlan_reset,
- .stop = rndis_wlan_stop,
- .early_init = unknown_early_init,
- .indication = rndis_wlan_indication,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static const struct usb_device_id products [] = {
-#define RNDIS_MASTER_INTERFACE \
- .bInterfaceClass = USB_CLASS_COMM, \
- .bInterfaceSubClass = 2 /* ACM */, \
- .bInterfaceProtocol = 0x0ff
-
-/* INF driver for these devices have DriverVer >= 4.xx.xx.xx and many custom
- * parameters available. Chipset marked as 'BCM4320SKFBG' in NDISwrapper-wiki.
- */
-{
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x0411,
- .idProduct = 0x00bc, /* Buffalo WLI-U2-KG125S */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x0baf,
- .idProduct = 0x011b, /* U.S. Robotics USR5421 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x050d,
- .idProduct = 0x011b, /* Belkin F5D7051 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x1799, /* Belkin has two vendor ids */
- .idProduct = 0x011b, /* Belkin F5D7051 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x13b1,
- .idProduct = 0x0014, /* Linksys WUSB54GSv2 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x13b1,
- .idProduct = 0x0026, /* Linksys WUSB54GSC */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x0b05,
- .idProduct = 0x1717, /* Asus WL169gE */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x0a5c,
- .idProduct = 0xd11b, /* Eminent EM4045 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x1690,
- .idProduct = 0x0715, /* BT Voyager 1055 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320b_info,
-},
-/* These devices have DriverVer < 4.xx.xx.xx and do not have any custom
- * parameters available, hardware probably contain older firmware version with
- * no way of updating. Chipset marked as 'BCM4320????' in NDISwrapper-wiki.
- */
-{
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x13b1,
- .idProduct = 0x000e, /* Linksys WUSB54GSv1 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320a_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x0baf,
- .idProduct = 0x0111, /* U.S. Robotics USR5420 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320a_info,
-}, {
- .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
- | USB_DEVICE_ID_MATCH_DEVICE,
- .idVendor = 0x0411,
- .idProduct = 0x004b, /* BUFFALO WLI-USB-G54 */
- RNDIS_MASTER_INTERFACE,
- .driver_info = (unsigned long) &bcm4320a_info,
-},
-/* Generic Wireless RNDIS devices that we don't have exact
- * idVendor/idProduct/chip yet.
- */
-{
- /* RNDIS is MSFT's un-official variant of CDC ACM */
- USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
- .driver_info = (unsigned long) &rndis_wlan_info,
-}, {
- /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
- USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
- .driver_info = (unsigned long) &rndis_wlan_info,
-},
- { }, // END
-};
-MODULE_DEVICE_TABLE(usb, products);
-
-static struct usb_driver rndis_wlan_driver = {
- .name = "rndis_wlan",
- .id_table = products,
- .probe = usbnet_probe,
- .disconnect = usbnet_disconnect,
- .suspend = usbnet_suspend,
- .resume = usbnet_resume,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(rndis_wlan_driver);
-
-MODULE_AUTHOR("Bjorge Dijkstra");
-MODULE_AUTHOR("Jussi Kivilinna");
-MODULE_DESCRIPTION("Driver for RNDIS based USB Wireless adapters");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/wireless/legacy/wl3501.h b/drivers/net/wireless/legacy/wl3501.h
deleted file mode 100644
index 91f276dd2..000000000
--- a/drivers/net/wireless/legacy/wl3501.h
+++ /dev/null
@@ -1,615 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __WL3501_H__
-#define __WL3501_H__
-
-#include <linux/spinlock.h>
-#include <linux/ieee80211.h>
-
-/* define for WLA 2.0 */
-#define WL3501_BLKSZ 256
-/*
- * ID for input Signals of DRIVER block
- * bit[7-5] is block ID: 000
- * bit[4-0] is signal ID
-*/
-enum wl3501_signals {
- WL3501_SIG_ALARM,
- WL3501_SIG_MD_CONFIRM,
- WL3501_SIG_MD_IND,
- WL3501_SIG_ASSOC_CONFIRM,
- WL3501_SIG_ASSOC_IND,
- WL3501_SIG_AUTH_CONFIRM,
- WL3501_SIG_AUTH_IND,
- WL3501_SIG_DEAUTH_CONFIRM,
- WL3501_SIG_DEAUTH_IND,
- WL3501_SIG_DISASSOC_CONFIRM,
- WL3501_SIG_DISASSOC_IND,
- WL3501_SIG_GET_CONFIRM,
- WL3501_SIG_JOIN_CONFIRM,
- WL3501_SIG_PWR_MGMT_CONFIRM,
- WL3501_SIG_REASSOC_CONFIRM,
- WL3501_SIG_REASSOC_IND,
- WL3501_SIG_SCAN_CONFIRM,
- WL3501_SIG_SET_CONFIRM,
- WL3501_SIG_START_CONFIRM,
- WL3501_SIG_RESYNC_CONFIRM,
- WL3501_SIG_SITE_CONFIRM,
- WL3501_SIG_SAVE_CONFIRM,
- WL3501_SIG_RFTEST_CONFIRM,
-/*
- * ID for input Signals of MLME block
- * bit[7-5] is block ID: 010
- * bit[4-0] is signal ID
- */
- WL3501_SIG_ASSOC_REQ = 0x20,
- WL3501_SIG_AUTH_REQ,
- WL3501_SIG_DEAUTH_REQ,
- WL3501_SIG_DISASSOC_REQ,
- WL3501_SIG_GET_REQ,
- WL3501_SIG_JOIN_REQ,
- WL3501_SIG_PWR_MGMT_REQ,
- WL3501_SIG_REASSOC_REQ,
- WL3501_SIG_SCAN_REQ,
- WL3501_SIG_SET_REQ,
- WL3501_SIG_START_REQ,
- WL3501_SIG_MD_REQ,
- WL3501_SIG_RESYNC_REQ,
- WL3501_SIG_SITE_REQ,
- WL3501_SIG_SAVE_REQ,
- WL3501_SIG_RF_TEST_REQ,
- WL3501_SIG_MM_CONFIRM = 0x60,
- WL3501_SIG_MM_IND,
-};
-
-enum wl3501_mib_attribs {
- WL3501_MIB_ATTR_STATION_ID,
- WL3501_MIB_ATTR_AUTH_ALGORITHMS,
- WL3501_MIB_ATTR_AUTH_TYPE,
- WL3501_MIB_ATTR_MEDIUM_OCCUPANCY_LIMIT,
- WL3501_MIB_ATTR_CF_POLLABLE,
- WL3501_MIB_ATTR_CFP_PERIOD,
- WL3501_MIB_ATTR_CFPMAX_DURATION,
- WL3501_MIB_ATTR_AUTH_RESP_TMOUT,
- WL3501_MIB_ATTR_RX_DTIMS,
- WL3501_MIB_ATTR_PRIV_OPT_IMPLEMENTED,
- WL3501_MIB_ATTR_PRIV_INVOKED,
- WL3501_MIB_ATTR_WEP_DEFAULT_KEYS,
- WL3501_MIB_ATTR_WEP_DEFAULT_KEY_ID,
- WL3501_MIB_ATTR_WEP_KEY_MAPPINGS,
- WL3501_MIB_ATTR_WEP_KEY_MAPPINGS_LEN,
- WL3501_MIB_ATTR_EXCLUDE_UNENCRYPTED,
- WL3501_MIB_ATTR_WEP_ICV_ERROR_COUNT,
- WL3501_MIB_ATTR_WEP_UNDECRYPTABLE_COUNT,
- WL3501_MIB_ATTR_WEP_EXCLUDED_COUNT,
- WL3501_MIB_ATTR_MAC_ADDR,
- WL3501_MIB_ATTR_GROUP_ADDRS,
- WL3501_MIB_ATTR_RTS_THRESHOLD,
- WL3501_MIB_ATTR_SHORT_RETRY_LIMIT,
- WL3501_MIB_ATTR_LONG_RETRY_LIMIT,
- WL3501_MIB_ATTR_FRAG_THRESHOLD,
- WL3501_MIB_ATTR_MAX_TX_MSDU_LIFETIME,
- WL3501_MIB_ATTR_MAX_RX_LIFETIME,
- WL3501_MIB_ATTR_MANUFACTURER_ID,
- WL3501_MIB_ATTR_PRODUCT_ID,
- WL3501_MIB_ATTR_TX_FRAG_COUNT,
- WL3501_MIB_ATTR_MULTICAST_TX_FRAME_COUNT,
- WL3501_MIB_ATTR_FAILED_COUNT,
- WL3501_MIB_ATTR_RX_FRAG_COUNT,
- WL3501_MIB_ATTR_MULTICAST_RX_COUNT,
- WL3501_MIB_ATTR_FCS_ERROR_COUNT,
- WL3501_MIB_ATTR_RETRY_COUNT,
- WL3501_MIB_ATTR_MULTIPLE_RETRY_COUNT,
- WL3501_MIB_ATTR_RTS_SUCCESS_COUNT,
- WL3501_MIB_ATTR_RTS_FAILURE_COUNT,
- WL3501_MIB_ATTR_ACK_FAILURE_COUNT,
- WL3501_MIB_ATTR_FRAME_DUPLICATE_COUNT,
- WL3501_MIB_ATTR_PHY_TYPE,
- WL3501_MIB_ATTR_REG_DOMAINS_SUPPORT,
- WL3501_MIB_ATTR_CURRENT_REG_DOMAIN,
- WL3501_MIB_ATTR_SLOT_TIME,
- WL3501_MIB_ATTR_CCA_TIME,
- WL3501_MIB_ATTR_RX_TX_TURNAROUND_TIME,
- WL3501_MIB_ATTR_TX_PLCP_DELAY,
- WL3501_MIB_ATTR_RX_TX_SWITCH_TIME,
- WL3501_MIB_ATTR_TX_RAMP_ON_TIME,
- WL3501_MIB_ATTR_TX_RF_DELAY,
- WL3501_MIB_ATTR_SIFS_TIME,
- WL3501_MIB_ATTR_RX_RF_DELAY,
- WL3501_MIB_ATTR_RX_PLCP_DELAY,
- WL3501_MIB_ATTR_MAC_PROCESSING_DELAY,
- WL3501_MIB_ATTR_TX_RAMP_OFF_TIME,
- WL3501_MIB_ATTR_PREAMBLE_LEN,
- WL3501_MIB_ATTR_PLCP_HEADER_LEN,
- WL3501_MIB_ATTR_MPDU_DURATION_FACTOR,
- WL3501_MIB_ATTR_AIR_PROPAGATION_TIME,
- WL3501_MIB_ATTR_TEMP_TYPE,
- WL3501_MIB_ATTR_CW_MIN,
- WL3501_MIB_ATTR_CW_MAX,
- WL3501_MIB_ATTR_SUPPORT_DATA_RATES_TX,
- WL3501_MIB_ATTR_SUPPORT_DATA_RATES_RX,
- WL3501_MIB_ATTR_MPDU_MAX_LEN,
- WL3501_MIB_ATTR_SUPPORT_TX_ANTENNAS,
- WL3501_MIB_ATTR_CURRENT_TX_ANTENNA,
- WL3501_MIB_ATTR_SUPPORT_RX_ANTENNAS,
- WL3501_MIB_ATTR_DIVERSITY_SUPPORT,
- WL3501_MIB_ATTR_DIVERSITY_SELECTION_RS,
- WL3501_MIB_ATTR_NR_SUPPORTED_PWR_LEVELS,
- WL3501_MIB_ATTR_TX_PWR_LEVEL1,
- WL3501_MIB_ATTR_TX_PWR_LEVEL2,
- WL3501_MIB_ATTR_TX_PWR_LEVEL3,
- WL3501_MIB_ATTR_TX_PWR_LEVEL4,
- WL3501_MIB_ATTR_TX_PWR_LEVEL5,
- WL3501_MIB_ATTR_TX_PWR_LEVEL6,
- WL3501_MIB_ATTR_TX_PWR_LEVEL7,
- WL3501_MIB_ATTR_TX_PWR_LEVEL8,
- WL3501_MIB_ATTR_CURRENT_TX_PWR_LEVEL,
- WL3501_MIB_ATTR_CURRENT_CHAN,
- WL3501_MIB_ATTR_CCA_MODE_SUPPORTED,
- WL3501_MIB_ATTR_CURRENT_CCA_MODE,
- WL3501_MIB_ATTR_ED_THRESHOLD,
- WL3501_MIB_ATTR_SINTHESIZER_LOCKED,
- WL3501_MIB_ATTR_CURRENT_PWR_STATE,
- WL3501_MIB_ATTR_DOZE_TURNON_TIME,
- WL3501_MIB_ATTR_RCR33,
- WL3501_MIB_ATTR_DEFAULT_CHAN,
- WL3501_MIB_ATTR_SSID,
- WL3501_MIB_ATTR_PWR_MGMT_ENABLE,
- WL3501_MIB_ATTR_NET_CAPABILITY,
- WL3501_MIB_ATTR_ROUTING,
-};
-
-enum wl3501_net_type {
- WL3501_NET_TYPE_INFRA,
- WL3501_NET_TYPE_ADHOC,
- WL3501_NET_TYPE_ANY_BSS,
-};
-
-enum wl3501_scan_type {
- WL3501_SCAN_TYPE_ACTIVE,
- WL3501_SCAN_TYPE_PASSIVE,
-};
-
-enum wl3501_tx_result {
- WL3501_TX_RESULT_SUCCESS,
- WL3501_TX_RESULT_NO_BSS,
- WL3501_TX_RESULT_RETRY_LIMIT,
-};
-
-enum wl3501_sys_type {
- WL3501_SYS_TYPE_OPEN,
- WL3501_SYS_TYPE_SHARE_KEY,
-};
-
-enum wl3501_status {
- WL3501_STATUS_SUCCESS,
- WL3501_STATUS_INVALID,
- WL3501_STATUS_TIMEOUT,
- WL3501_STATUS_REFUSED,
- WL3501_STATUS_MANY_REQ,
- WL3501_STATUS_ALREADY_BSS,
-};
-
-#define WL3501_MGMT_CAPABILITY_ESS 0x0001 /* see 802.11 p.58 */
-#define WL3501_MGMT_CAPABILITY_IBSS 0x0002 /* - " - */
-#define WL3501_MGMT_CAPABILITY_CF_POLLABLE 0x0004 /* - " - */
-#define WL3501_MGMT_CAPABILITY_CF_POLL_REQUEST 0x0008 /* - " - */
-#define WL3501_MGMT_CAPABILITY_PRIVACY 0x0010 /* - " - */
-
-#define IW_REG_DOMAIN_FCC 0x10 /* Channel 1 to 11 USA */
-#define IW_REG_DOMAIN_DOC 0x20 /* Channel 1 to 11 Canada */
-#define IW_REG_DOMAIN_ETSI 0x30 /* Channel 1 to 13 Europe */
-#define IW_REG_DOMAIN_SPAIN 0x31 /* Channel 10 to 11 Spain */
-#define IW_REG_DOMAIN_FRANCE 0x32 /* Channel 10 to 13 France */
-#define IW_REG_DOMAIN_MKK 0x40 /* Channel 14 Japan */
-#define IW_REG_DOMAIN_MKK1 0x41 /* Channel 1-14 Japan */
-#define IW_REG_DOMAIN_ISRAEL 0x50 /* Channel 3 - 9 Israel */
-
-#define IW_MGMT_RATE_LABEL_MANDATORY 128 /* MSB */
-
-enum iw_mgmt_rate_labels {
- IW_MGMT_RATE_LABEL_1MBIT = 2,
- IW_MGMT_RATE_LABEL_2MBIT = 4,
- IW_MGMT_RATE_LABEL_5_5MBIT = 11,
- IW_MGMT_RATE_LABEL_11MBIT = 22,
-};
-
-enum iw_mgmt_info_element_ids {
- IW_MGMT_INFO_ELEMENT_SSID, /* Service Set Identity */
- IW_MGMT_INFO_ELEMENT_SUPPORTED_RATES,
- IW_MGMT_INFO_ELEMENT_FH_PARAMETER_SET,
- IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
- IW_MGMT_INFO_ELEMENT_CS_PARAMETER_SET,
- IW_MGMT_INFO_ELEMENT_CS_TIM, /* Traffic Information Map */
- IW_MGMT_INFO_ELEMENT_IBSS_PARAMETER_SET,
- /* 7-15: Reserved, unused */
- IW_MGMT_INFO_ELEMENT_CHALLENGE_TEXT = 16,
- /* 17-31 Reserved for challenge text extension */
- /* 32-255 Reserved, unused */
-};
-
-struct iw_mgmt_info_element {
- u8 id; /* one of enum iw_mgmt_info_element_ids,
- but sizeof(enum) > sizeof(u8) :-( */
- u8 len;
- u8 data[];
-} __packed;
-
-struct iw_mgmt_essid_pset {
- struct iw_mgmt_info_element el;
- u8 essid[IW_ESSID_MAX_SIZE];
-} __packed;
-
-/*
- * According to 802.11 Wireless Networks, the definitive guide - O'Reilly
- * Pg 75
- */
-#define IW_DATA_RATE_MAX_LABELS 8
-
-struct iw_mgmt_data_rset {
- struct iw_mgmt_info_element el;
- u8 data_rate_labels[IW_DATA_RATE_MAX_LABELS];
-} __packed;
-
-struct iw_mgmt_ds_pset {
- struct iw_mgmt_info_element el;
- u8 chan;
-} __packed;
-
-struct iw_mgmt_cf_pset {
- struct iw_mgmt_info_element el;
- u8 cfp_count;
- u8 cfp_period;
- u16 cfp_max_duration;
- u16 cfp_dur_remaining;
-} __packed;
-
-struct iw_mgmt_ibss_pset {
- struct iw_mgmt_info_element el;
- u16 atim_window;
-} __packed;
-
-struct wl3501_tx_hdr {
- u16 tx_cnt;
- u8 sync[16];
- u16 sfd;
- u8 signal;
- u8 service;
- u16 len;
- u16 crc16;
- u16 frame_ctrl;
- u16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctrl;
- u8 addr4[ETH_ALEN];
-};
-
-struct wl3501_rx_hdr {
- u16 rx_next_blk;
- u16 rc_next_frame_blk;
- u8 rx_blk_ctrl;
- u8 rx_next_frame;
- u8 rx_next_frame1;
- u8 rssi;
- char time[8];
- u8 signal;
- u8 service;
- u16 len;
- u16 crc16;
- u16 frame_ctrl;
- u16 duration;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq;
- u8 addr4[ETH_ALEN];
-};
-
-struct wl3501_start_req {
- u16 next_blk;
- u8 sig_id;
- u8 bss_type;
- u16 beacon_period;
- u16 dtim_period;
- u16 probe_delay;
- u16 cap_info;
- struct iw_mgmt_essid_pset ssid;
- struct iw_mgmt_data_rset bss_basic_rset;
- struct iw_mgmt_data_rset operational_rset;
- struct iw_mgmt_cf_pset cf_pset;
- struct iw_mgmt_ds_pset ds_pset;
- struct iw_mgmt_ibss_pset ibss_pset;
-};
-
-struct wl3501_assoc_req {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 timeout;
- u16 cap_info;
- u16 listen_interval;
- u8 mac_addr[ETH_ALEN];
-};
-
-struct wl3501_assoc_confirm {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 status;
-};
-
-struct wl3501_assoc_ind {
- u16 next_blk;
- u8 sig_id;
- u8 mac_addr[ETH_ALEN];
-};
-
-struct wl3501_auth_req {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 type;
- u16 timeout;
- u8 mac_addr[ETH_ALEN];
-};
-
-struct wl3501_auth_confirm {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 type;
- u16 status;
- u8 mac_addr[ETH_ALEN];
-};
-
-struct wl3501_get_req {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 mib_attrib;
-};
-
-struct wl3501_get_confirm {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 mib_status;
- u16 mib_attrib;
- u8 mib_value[100];
-};
-
-struct wl3501_req {
- u16 beacon_period;
- u16 dtim_period;
- u16 cap_info;
- u8 bss_type;
- u8 bssid[ETH_ALEN];
- struct iw_mgmt_essid_pset ssid;
- struct iw_mgmt_ds_pset ds_pset;
- struct iw_mgmt_cf_pset cf_pset;
- struct iw_mgmt_ibss_pset ibss_pset;
- struct iw_mgmt_data_rset bss_basic_rset;
-};
-
-struct wl3501_join_req {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- struct iw_mgmt_data_rset operational_rset;
- u16 reserved2;
- u16 timeout;
- u16 probe_delay;
- u8 timestamp[8];
- u8 local_time[8];
- struct wl3501_req req;
-};
-
-struct wl3501_join_confirm {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 status;
-};
-
-struct wl3501_pwr_mgmt_req {
- u16 next_blk;
- u8 sig_id;
- u8 pwr_save;
- u8 wake_up;
- u8 receive_dtims;
-};
-
-struct wl3501_pwr_mgmt_confirm {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 status;
-};
-
-struct wl3501_scan_req {
- u16 next_blk;
- u8 sig_id;
- u8 bss_type;
- u16 probe_delay;
- u16 min_chan_time;
- u16 max_chan_time;
- u8 chan_list[14];
- u8 bssid[ETH_ALEN];
- struct iw_mgmt_essid_pset ssid;
- enum wl3501_scan_type scan_type;
-};
-
-struct wl3501_scan_confirm {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 status;
- char timestamp[8];
- char localtime[8];
- struct wl3501_req req;
- u8 rssi;
-};
-
-struct wl3501_start_confirm {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 status;
-};
-
-struct wl3501_md_req {
- u16 next_blk;
- u8 sig_id;
- u8 routing;
- u16 data;
- u16 size;
- u8 pri;
- u8 service_class;
- struct {
- u8 daddr[ETH_ALEN];
- u8 saddr[ETH_ALEN];
- } addr;
-};
-
-struct wl3501_md_ind {
- u16 next_blk;
- u8 sig_id;
- u8 routing;
- u16 data;
- u16 size;
- u8 reception;
- u8 pri;
- u8 service_class;
- struct {
- u8 daddr[ETH_ALEN];
- u8 saddr[ETH_ALEN];
- } addr;
-};
-
-struct wl3501_md_confirm {
- u16 next_blk;
- u8 sig_id;
- u8 reserved;
- u16 data;
- u8 status;
- u8 pri;
- u8 service_class;
-};
-
-struct wl3501_resync_req {
- u16 next_blk;
- u8 sig_id;
-};
-
-/* Definitions for supporting clone adapters. */
-/* System Interface Registers (SIR space) */
-#define WL3501_NIC_GCR ((u8)0x00) /* SIR0 - General Conf Register */
-#define WL3501_NIC_BSS ((u8)0x01) /* SIR1 - Bank Switching Select Reg */
-#define WL3501_NIC_LMAL ((u8)0x02) /* SIR2 - Local Mem addr Reg [7:0] */
-#define WL3501_NIC_LMAH ((u8)0x03) /* SIR3 - Local Mem addr Reg [14:8] */
-#define WL3501_NIC_IODPA ((u8)0x04) /* SIR4 - I/O Data Port A */
-#define WL3501_NIC_IODPB ((u8)0x05) /* SIR5 - I/O Data Port B */
-#define WL3501_NIC_IODPC ((u8)0x06) /* SIR6 - I/O Data Port C */
-#define WL3501_NIC_IODPD ((u8)0x07) /* SIR7 - I/O Data Port D */
-
-/* Bits in GCR */
-#define WL3501_GCR_SWRESET ((u8)0x80)
-#define WL3501_GCR_CORESET ((u8)0x40)
-#define WL3501_GCR_DISPWDN ((u8)0x20)
-#define WL3501_GCR_ECWAIT ((u8)0x10)
-#define WL3501_GCR_ECINT ((u8)0x08)
-#define WL3501_GCR_INT2EC ((u8)0x04)
-#define WL3501_GCR_ENECINT ((u8)0x02)
-#define WL3501_GCR_DAM ((u8)0x01)
-
-/* Bits in BSS (Bank Switching Select Register) */
-#define WL3501_BSS_FPAGE0 ((u8)0x20) /* Flash memory page0 */
-#define WL3501_BSS_FPAGE1 ((u8)0x28)
-#define WL3501_BSS_FPAGE2 ((u8)0x30)
-#define WL3501_BSS_FPAGE3 ((u8)0x38)
-#define WL3501_BSS_SPAGE0 ((u8)0x00) /* SRAM page0 */
-#define WL3501_BSS_SPAGE1 ((u8)0x08)
-#define WL3501_BSS_SPAGE2 ((u8)0x10)
-#define WL3501_BSS_SPAGE3 ((u8)0x18)
-
-/* Define Driver Interface */
-/* Refer IEEE 802.11 */
-/* Tx packet header, include PLCP and MPDU */
-/* Tx PLCP Header */
-struct wl3501_80211_tx_plcp_hdr {
- u8 sync[16];
- u16 sfd;
- u8 signal;
- u8 service;
- u16 len;
- u16 crc16;
-} __packed;
-
-struct wl3501_80211_tx_hdr {
- struct wl3501_80211_tx_plcp_hdr pclp_hdr;
- struct ieee80211_hdr mac_hdr;
-} __packed __aligned(2);
-
-/*
- Reserve the beginning Tx space for descriptor use.
-
- TxBlockOffset --> *----*----*----*----* \
- (TxFreeDesc) | 0 | 1 | 2 | 3 | \
- | 4 | 5 | 6 | 7 | |
- | 8 | 9 | 10 | 11 | TX_DESC * 20
- | 12 | 13 | 14 | 15 | |
- | 16 | 17 | 18 | 19 | /
- TxBufferBegin --> *----*----*----*----* /
- (TxBufferHead) | |
- (TxBufferTail) | |
- | Send Buffer |
- | |
- | |
- *-------------------*
- TxBufferEnd -------------------------/
-
-*/
-
-struct wl3501_card {
- int base_addr;
- u8 mac_addr[ETH_ALEN];
- spinlock_t lock;
- wait_queue_head_t wait;
- struct wl3501_get_confirm sig_get_confirm;
- struct wl3501_pwr_mgmt_confirm sig_pwr_mgmt_confirm;
- u16 tx_buffer_size;
- u16 tx_buffer_head;
- u16 tx_buffer_tail;
- u16 tx_buffer_cnt;
- u16 esbq_req_start;
- u16 esbq_req_end;
- u16 esbq_req_head;
- u16 esbq_req_tail;
- u16 esbq_confirm_start;
- u16 esbq_confirm_end;
- u16 esbq_confirm;
- struct iw_mgmt_essid_pset essid;
- struct iw_mgmt_essid_pset keep_essid;
- u8 bssid[ETH_ALEN];
- int net_type;
- char nick[32];
- char card_name[32];
- char firmware_date[32];
- u8 chan;
- u8 cap_info;
- u16 start_seg;
- u16 bss_cnt;
- u16 join_sta_bss;
- u8 rssi;
- u8 adhoc_times;
- u8 reg_domain;
- u8 version[2];
- struct wl3501_scan_confirm bss_set[20];
-
- struct iw_statistics wstats;
- struct iw_spy_data spy_data;
- struct iw_public_data wireless_data;
- struct pcmcia_device *p_dev;
-};
-#endif
diff --git a/drivers/net/wireless/legacy/wl3501_cs.c b/drivers/net/wireless/legacy/wl3501_cs.c
deleted file mode 100644
index c45c4b7cb..000000000
--- a/drivers/net/wireless/legacy/wl3501_cs.c
+++ /dev/null
@@ -1,2036 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * WL3501 Wireless LAN PCMCIA Card Driver for Linux
- * Written originally for Linux 2.0.30 by Fox Chen, mhchen@golf.ccl.itri.org.tw
- * Ported to 2.2, 2.4 & 2.5 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
- * Wireless extensions in 2.4 by Gustavo Niemeyer <niemeyer@conectiva.com>
- *
- * References used by Fox Chen while writing the original driver for 2.0.30:
- *
- * 1. WL24xx packet drivers (tooasm.asm)
- * 2. Access Point Firmware Interface Specification for IEEE 802.11 SUTRO
- * 3. IEEE 802.11
- * 4. Linux network driver (/usr/src/linux/drivers/net)
- * 5. ISA card driver - wl24.c
- * 6. Linux PCMCIA skeleton driver - skeleton.c
- * 7. Linux PCMCIA 3c589 network driver - 3c589_cs.c
- *
- * Tested with WL2400 firmware 1.2, Linux 2.0.30, and pcmcia-cs-2.9.12
- * 1. Performance: about 165 Kbytes/sec in TCP/IP with Ad-Hoc mode.
- * rsh 192.168.1.3 "dd if=/dev/zero bs=1k count=1000" > /dev/null
- * (Specification 2M bits/sec. is about 250 Kbytes/sec., but we must deduct
- * ETHER/IP/UDP/TCP header, and acknowledgement overhead)
- *
- * Tested with Planet AP in 2.4.17, 184 Kbytes/s in UDP in Infrastructure mode,
- * 173 Kbytes/s in TCP.
- *
- * Tested with Planet AP in 2.5.73-bk, 216 Kbytes/s in Infrastructure mode
- * with a SMP machine (dual pentium 100), using pktgen, 432 pps (pkt_size = 60)
- */
-
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/fcntl.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-
-#include <net/iw_handler.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#include "wl3501.h"
-
-#ifndef __i386__
-#define slow_down_io()
-#endif
-
-/* For rough constant delay */
-#define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); }
-
-
-
-#define wl3501_outb(a, b) { outb(a, b); slow_down_io(); }
-#define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); }
-#define wl3501_outsb(a, b, c) { outsb(a, b, c); slow_down_io(); }
-
-#define WL3501_RELEASE_TIMEOUT (25 * HZ)
-#define WL3501_MAX_ADHOC_TRIES 16
-
-#define WL3501_RESUME 0
-#define WL3501_SUSPEND 1
-
-static int wl3501_config(struct pcmcia_device *link);
-static void wl3501_release(struct pcmcia_device *link);
-
-static const struct {
- int reg_domain;
- int min, max, deflt;
-} iw_channel_table[] = {
- {
- .reg_domain = IW_REG_DOMAIN_FCC,
- .min = 1,
- .max = 11,
- .deflt = 1,
- },
- {
- .reg_domain = IW_REG_DOMAIN_DOC,
- .min = 1,
- .max = 11,
- .deflt = 1,
- },
- {
- .reg_domain = IW_REG_DOMAIN_ETSI,
- .min = 1,
- .max = 13,
- .deflt = 1,
- },
- {
- .reg_domain = IW_REG_DOMAIN_SPAIN,
- .min = 10,
- .max = 11,
- .deflt = 10,
- },
- {
- .reg_domain = IW_REG_DOMAIN_FRANCE,
- .min = 10,
- .max = 13,
- .deflt = 10,
- },
- {
- .reg_domain = IW_REG_DOMAIN_MKK,
- .min = 14,
- .max = 14,
- .deflt = 14,
- },
- {
- .reg_domain = IW_REG_DOMAIN_MKK1,
- .min = 1,
- .max = 14,
- .deflt = 1,
- },
- {
- .reg_domain = IW_REG_DOMAIN_ISRAEL,
- .min = 3,
- .max = 9,
- .deflt = 9,
- },
-};
-
-/**
- * iw_valid_channel - validate channel in regulatory domain
- * @reg_domain: regulatory domain
- * @channel: channel to validate
- *
- * Returns 0 if invalid in the specified regulatory domain, non-zero if valid.
- */
-static int iw_valid_channel(int reg_domain, int channel)
-{
- int i, rc = 0;
-
- for (i = 0; i < ARRAY_SIZE(iw_channel_table); i++)
- if (reg_domain == iw_channel_table[i].reg_domain) {
- rc = channel >= iw_channel_table[i].min &&
- channel <= iw_channel_table[i].max;
- break;
- }
- return rc;
-}
-
-/**
- * iw_default_channel - get default channel for a regulatory domain
- * @reg_domain: regulatory domain
- *
- * Returns the default channel for a regulatory domain
- */
-static int iw_default_channel(int reg_domain)
-{
- int i, rc = 1;
-
- for (i = 0; i < ARRAY_SIZE(iw_channel_table); i++)
- if (reg_domain == iw_channel_table[i].reg_domain) {
- rc = iw_channel_table[i].deflt;
- break;
- }
- return rc;
-}
-
-static void iw_set_mgmt_info_element(enum iw_mgmt_info_element_ids id,
- struct iw_mgmt_info_element *el,
- void *value, int len)
-{
- el->id = id;
- el->len = len;
- memcpy(el->data, value, len);
-}
-
-static void iw_copy_mgmt_info_element(struct iw_mgmt_info_element *to,
- struct iw_mgmt_info_element *from)
-{
- iw_set_mgmt_info_element(from->id, to, from->data, from->len);
-}
-
-static inline void wl3501_switch_page(struct wl3501_card *this, u8 page)
-{
- wl3501_outb(page, this->base_addr + WL3501_NIC_BSS);
-}
-
-/*
- * Get Ethernet MAC address.
- *
- * WARNING: We switch to FPAGE0 and switc back again.
- * Making sure there is no other WL function beening called by ISR.
- */
-static int wl3501_get_flash_mac_addr(struct wl3501_card *this)
-{
- int base_addr = this->base_addr;
-
- /* get MAC addr */
- wl3501_outb(WL3501_BSS_FPAGE3, base_addr + WL3501_NIC_BSS); /* BSS */
- wl3501_outb(0x00, base_addr + WL3501_NIC_LMAL); /* LMAL */
- wl3501_outb(0x40, base_addr + WL3501_NIC_LMAH); /* LMAH */
-
- /* wait for reading EEPROM */
- WL3501_NOPLOOP(100);
- this->mac_addr[0] = inb(base_addr + WL3501_NIC_IODPA);
- WL3501_NOPLOOP(100);
- this->mac_addr[1] = inb(base_addr + WL3501_NIC_IODPA);
- WL3501_NOPLOOP(100);
- this->mac_addr[2] = inb(base_addr + WL3501_NIC_IODPA);
- WL3501_NOPLOOP(100);
- this->mac_addr[3] = inb(base_addr + WL3501_NIC_IODPA);
- WL3501_NOPLOOP(100);
- this->mac_addr[4] = inb(base_addr + WL3501_NIC_IODPA);
- WL3501_NOPLOOP(100);
- this->mac_addr[5] = inb(base_addr + WL3501_NIC_IODPA);
- WL3501_NOPLOOP(100);
- this->reg_domain = inb(base_addr + WL3501_NIC_IODPA);
- WL3501_NOPLOOP(100);
- wl3501_outb(WL3501_BSS_FPAGE0, base_addr + WL3501_NIC_BSS);
- wl3501_outb(0x04, base_addr + WL3501_NIC_LMAL);
- wl3501_outb(0x40, base_addr + WL3501_NIC_LMAH);
- WL3501_NOPLOOP(100);
- this->version[0] = inb(base_addr + WL3501_NIC_IODPA);
- WL3501_NOPLOOP(100);
- this->version[1] = inb(base_addr + WL3501_NIC_IODPA);
- /* switch to SRAM Page 0 (for safety) */
- wl3501_switch_page(this, WL3501_BSS_SPAGE0);
-
- /* The MAC addr should be 00:60:... */
- return this->mac_addr[0] == 0x00 && this->mac_addr[1] == 0x60;
-}
-
-/**
- * wl3501_set_to_wla - Move 'size' bytes from PC to card
- * @this: Card
- * @dest: Card addressing space
- * @src: PC addressing space
- * @size: Bytes to move
- *
- * Move 'size' bytes from PC to card. (Shouldn't be interrupted)
- */
-static void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src,
- int size)
-{
- /* switch to SRAM Page 0 */
- wl3501_switch_page(this, (dest & 0x8000) ? WL3501_BSS_SPAGE1 :
- WL3501_BSS_SPAGE0);
- /* set LMAL and LMAH */
- wl3501_outb(dest & 0xff, this->base_addr + WL3501_NIC_LMAL);
- wl3501_outb(((dest >> 8) & 0x7f), this->base_addr + WL3501_NIC_LMAH);
-
- /* rep out to Port A */
- wl3501_outsb(this->base_addr + WL3501_NIC_IODPA, src, size);
-}
-
-/**
- * wl3501_get_from_wla - Move 'size' bytes from card to PC
- * @this: Card
- * @src: Card addressing space
- * @dest: PC addressing space
- * @size: Bytes to move
- *
- * Move 'size' bytes from card to PC. (Shouldn't be interrupted)
- */
-static void wl3501_get_from_wla(struct wl3501_card *this, u16 src, void *dest,
- int size)
-{
- /* switch to SRAM Page 0 */
- wl3501_switch_page(this, (src & 0x8000) ? WL3501_BSS_SPAGE1 :
- WL3501_BSS_SPAGE0);
- /* set LMAL and LMAH */
- wl3501_outb(src & 0xff, this->base_addr + WL3501_NIC_LMAL);
- wl3501_outb((src >> 8) & 0x7f, this->base_addr + WL3501_NIC_LMAH);
-
- /* rep get from Port A */
- insb(this->base_addr + WL3501_NIC_IODPA, dest, size);
-}
-
-/*
- * Get/Allocate a free Tx Data Buffer
- *
- * *--------------*-----------------*----------------------------------*
- * | PLCP | MAC Header | DST SRC Data ... |
- * | (24 bytes) | (30 bytes) | (6) (6) (Ethernet Row Data) |
- * *--------------*-----------------*----------------------------------*
- * \ \- IEEE 802.11 -/ \-------------- len --------------/
- * \-struct wl3501_80211_tx_hdr--/ \-------- Ethernet Frame -------/
- *
- * Return = Position in Card
- */
-static u16 wl3501_get_tx_buffer(struct wl3501_card *this, u16 len)
-{
- u16 next, blk_cnt = 0, zero = 0;
- u16 full_len = sizeof(struct wl3501_80211_tx_hdr) + len;
- u16 ret = 0;
-
- if (full_len > this->tx_buffer_cnt * 254)
- goto out;
- ret = this->tx_buffer_head;
- while (full_len) {
- if (full_len < 254)
- full_len = 0;
- else
- full_len -= 254;
- wl3501_get_from_wla(this, this->tx_buffer_head, &next,
- sizeof(next));
- if (!full_len)
- wl3501_set_to_wla(this, this->tx_buffer_head, &zero,
- sizeof(zero));
- this->tx_buffer_head = next;
- blk_cnt++;
- /* if buffer is not enough */
- if (!next && full_len) {
- this->tx_buffer_head = ret;
- ret = 0;
- goto out;
- }
- }
- this->tx_buffer_cnt -= blk_cnt;
-out:
- return ret;
-}
-
-/*
- * Free an allocated Tx Buffer. ptr must be correct position.
- */
-static void wl3501_free_tx_buffer(struct wl3501_card *this, u16 ptr)
-{
- /* check if all space is not free */
- if (!this->tx_buffer_head)
- this->tx_buffer_head = ptr;
- else
- wl3501_set_to_wla(this, this->tx_buffer_tail,
- &ptr, sizeof(ptr));
- while (ptr) {
- u16 next;
-
- this->tx_buffer_cnt++;
- wl3501_get_from_wla(this, ptr, &next, sizeof(next));
- this->tx_buffer_tail = ptr;
- ptr = next;
- }
-}
-
-static int wl3501_esbq_req_test(struct wl3501_card *this)
-{
- u8 tmp = 0;
-
- wl3501_get_from_wla(this, this->esbq_req_head + 3, &tmp, sizeof(tmp));
- return tmp & 0x80;
-}
-
-static void wl3501_esbq_req(struct wl3501_card *this, u16 *ptr)
-{
- u16 tmp = 0;
-
- wl3501_set_to_wla(this, this->esbq_req_head, ptr, 2);
- wl3501_set_to_wla(this, this->esbq_req_head + 2, &tmp, sizeof(tmp));
- this->esbq_req_head += 4;
- if (this->esbq_req_head >= this->esbq_req_end)
- this->esbq_req_head = this->esbq_req_start;
-}
-
-static int wl3501_esbq_exec(struct wl3501_card *this, void *sig, int sig_size)
-{
- int rc = -EIO;
-
- if (wl3501_esbq_req_test(this)) {
- u16 ptr = wl3501_get_tx_buffer(this, sig_size);
- if (ptr) {
- wl3501_set_to_wla(this, ptr, sig, sig_size);
- wl3501_esbq_req(this, &ptr);
- rc = 0;
- }
- }
- return rc;
-}
-
-static int wl3501_request_mib(struct wl3501_card *this, u8 index, void *bf)
-{
- struct wl3501_get_req sig = {
- .sig_id = WL3501_SIG_GET_REQ,
- .mib_attrib = index,
- };
- unsigned long flags;
- int rc = -EIO;
-
- spin_lock_irqsave(&this->lock, flags);
- if (wl3501_esbq_req_test(this)) {
- u16 ptr = wl3501_get_tx_buffer(this, sizeof(sig));
- if (ptr) {
- wl3501_set_to_wla(this, ptr, &sig, sizeof(sig));
- wl3501_esbq_req(this, &ptr);
- this->sig_get_confirm.mib_status = 255;
- rc = 0;
- }
- }
- spin_unlock_irqrestore(&this->lock, flags);
-
- return rc;
-}
-
-static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
- void *bf, int size)
-{
- int rc;
-
- rc = wl3501_request_mib(this, index, bf);
- if (rc)
- return rc;
-
- rc = wait_event_interruptible(this->wait,
- this->sig_get_confirm.mib_status != 255);
- if (rc)
- return rc;
-
- memcpy(bf, this->sig_get_confirm.mib_value, size);
- return 0;
-}
-
-static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend)
-{
- struct wl3501_pwr_mgmt_req sig = {
- .sig_id = WL3501_SIG_PWR_MGMT_REQ,
- .pwr_save = suspend,
- .wake_up = !suspend,
- .receive_dtims = 10,
- };
- unsigned long flags;
- int rc = -EIO;
-
- spin_lock_irqsave(&this->lock, flags);
- if (wl3501_esbq_req_test(this)) {
- u16 ptr = wl3501_get_tx_buffer(this, sizeof(sig));
- if (ptr) {
- wl3501_set_to_wla(this, ptr, &sig, sizeof(sig));
- wl3501_esbq_req(this, &ptr);
- this->sig_pwr_mgmt_confirm.status = 255;
- spin_unlock_irqrestore(&this->lock, flags);
- rc = wait_event_interruptible(this->wait,
- this->sig_pwr_mgmt_confirm.status != 255);
- printk(KERN_INFO "%s: %s status=%d\n", __func__,
- suspend ? "suspend" : "resume",
- this->sig_pwr_mgmt_confirm.status);
- goto out;
- }
- }
- spin_unlock_irqrestore(&this->lock, flags);
-out:
- return rc;
-}
-
-/**
- * wl3501_send_pkt - Send a packet.
- * @this: Card
- * @data: Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr,
- * data[6] - data[11] is Src MAC Addr)
- * @len: Packet length
- * Ref: IEEE 802.11
- */
-static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
-{
- u16 bf, sig_bf, next, tmplen, pktlen;
- struct wl3501_md_req sig = {
- .sig_id = WL3501_SIG_MD_REQ,
- };
- size_t sig_addr_len = sizeof(sig.addr);
- u8 *pdata = (char *)data;
- int rc = -EIO;
-
- if (wl3501_esbq_req_test(this)) {
- sig_bf = wl3501_get_tx_buffer(this, sizeof(sig));
- rc = -ENOMEM;
- if (!sig_bf) /* No free buffer available */
- goto out;
- bf = wl3501_get_tx_buffer(this, len + 26 + 24);
- if (!bf) {
- /* No free buffer available */
- wl3501_free_tx_buffer(this, sig_bf);
- goto out;
- }
- rc = 0;
- memcpy(&sig.addr, pdata, sig_addr_len);
- pktlen = len - sig_addr_len;
- pdata += sig_addr_len;
- sig.data = bf;
- if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
- u8 addr4[ETH_ALEN] = {
- [0] = 0xAA, [1] = 0xAA, [2] = 0x03, [4] = 0x00,
- };
-
- wl3501_set_to_wla(this, bf + 2 +
- offsetof(struct wl3501_tx_hdr, addr4),
- addr4, sizeof(addr4));
- sig.size = pktlen + 24 + 4 + 6;
- if (pktlen > (254 - sizeof(struct wl3501_tx_hdr))) {
- tmplen = 254 - sizeof(struct wl3501_tx_hdr);
- pktlen -= tmplen;
- } else {
- tmplen = pktlen;
- pktlen = 0;
- }
- wl3501_set_to_wla(this,
- bf + 2 + sizeof(struct wl3501_tx_hdr),
- pdata, tmplen);
- pdata += tmplen;
- wl3501_get_from_wla(this, bf, &next, sizeof(next));
- bf = next;
- } else {
- sig.size = pktlen + 24 + 4 - 2;
- pdata += 2;
- pktlen -= 2;
- if (pktlen > (254 - sizeof(struct wl3501_tx_hdr) + 6)) {
- tmplen = 254 - sizeof(struct wl3501_tx_hdr) + 6;
- pktlen -= tmplen;
- } else {
- tmplen = pktlen;
- pktlen = 0;
- }
- wl3501_set_to_wla(this, bf + 2 +
- offsetof(struct wl3501_tx_hdr, addr4),
- pdata, tmplen);
- pdata += tmplen;
- wl3501_get_from_wla(this, bf, &next, sizeof(next));
- bf = next;
- }
- while (pktlen > 0) {
- if (pktlen > 254) {
- tmplen = 254;
- pktlen -= 254;
- } else {
- tmplen = pktlen;
- pktlen = 0;
- }
- wl3501_set_to_wla(this, bf + 2, pdata, tmplen);
- pdata += tmplen;
- wl3501_get_from_wla(this, bf, &next, sizeof(next));
- bf = next;
- }
- wl3501_set_to_wla(this, sig_bf, &sig, sizeof(sig));
- wl3501_esbq_req(this, &sig_bf);
- }
-out:
- return rc;
-}
-
-static int wl3501_mgmt_resync(struct wl3501_card *this)
-{
- struct wl3501_resync_req sig = {
- .sig_id = WL3501_SIG_RESYNC_REQ,
- };
-
- return wl3501_esbq_exec(this, &sig, sizeof(sig));
-}
-
-static inline int wl3501_fw_bss_type(struct wl3501_card *this)
-{
- return this->net_type == IW_MODE_INFRA ? WL3501_NET_TYPE_INFRA :
- WL3501_NET_TYPE_ADHOC;
-}
-
-static inline int wl3501_fw_cap_info(struct wl3501_card *this)
-{
- return this->net_type == IW_MODE_INFRA ? WL3501_MGMT_CAPABILITY_ESS :
- WL3501_MGMT_CAPABILITY_IBSS;
-}
-
-static int wl3501_mgmt_scan(struct wl3501_card *this, u16 chan_time)
-{
- struct wl3501_scan_req sig = {
- .sig_id = WL3501_SIG_SCAN_REQ,
- .scan_type = WL3501_SCAN_TYPE_ACTIVE,
- .probe_delay = 0x10,
- .min_chan_time = chan_time,
- .max_chan_time = chan_time,
- .bss_type = wl3501_fw_bss_type(this),
- };
-
- this->bss_cnt = this->join_sta_bss = 0;
- return wl3501_esbq_exec(this, &sig, sizeof(sig));
-}
-
-static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
-{
- struct wl3501_join_req sig = {
- .sig_id = WL3501_SIG_JOIN_REQ,
- .timeout = 10,
- .req.ds_pset = {
- .el = {
- .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
- .len = 1,
- },
- .chan = this->chan,
- },
- };
-
- memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
- return wl3501_esbq_exec(this, &sig, sizeof(sig));
-}
-
-static int wl3501_mgmt_start(struct wl3501_card *this)
-{
- struct wl3501_start_req sig = {
- .sig_id = WL3501_SIG_START_REQ,
- .beacon_period = 400,
- .dtim_period = 1,
- .ds_pset = {
- .el = {
- .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
- .len = 1,
- },
- .chan = this->chan,
- },
- .bss_basic_rset = {
- .el = {
- .id = IW_MGMT_INFO_ELEMENT_SUPPORTED_RATES,
- .len = 2,
- },
- .data_rate_labels = {
- [0] = IW_MGMT_RATE_LABEL_MANDATORY |
- IW_MGMT_RATE_LABEL_1MBIT,
- [1] = IW_MGMT_RATE_LABEL_MANDATORY |
- IW_MGMT_RATE_LABEL_2MBIT,
- },
- },
- .operational_rset = {
- .el = {
- .id = IW_MGMT_INFO_ELEMENT_SUPPORTED_RATES,
- .len = 2,
- },
- .data_rate_labels = {
- [0] = IW_MGMT_RATE_LABEL_MANDATORY |
- IW_MGMT_RATE_LABEL_1MBIT,
- [1] = IW_MGMT_RATE_LABEL_MANDATORY |
- IW_MGMT_RATE_LABEL_2MBIT,
- },
- },
- .ibss_pset = {
- .el = {
- .id = IW_MGMT_INFO_ELEMENT_IBSS_PARAMETER_SET,
- .len = 2,
- },
- .atim_window = 10,
- },
- .bss_type = wl3501_fw_bss_type(this),
- .cap_info = wl3501_fw_cap_info(this),
- };
-
- iw_copy_mgmt_info_element(&sig.ssid.el, &this->essid.el);
- iw_copy_mgmt_info_element(&this->keep_essid.el, &this->essid.el);
- return wl3501_esbq_exec(this, &sig, sizeof(sig));
-}
-
-static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
-{
- u16 i = 0;
- int matchflag = 0;
- struct wl3501_scan_confirm sig;
-
- pr_debug("entry");
- wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
- if (sig.status == WL3501_STATUS_SUCCESS) {
- pr_debug("success");
- if ((this->net_type == IW_MODE_INFRA &&
- (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
- (this->net_type == IW_MODE_ADHOC &&
- (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
- this->net_type == IW_MODE_AUTO) {
- if (!this->essid.el.len)
- matchflag = 1;
- else if (this->essid.el.len == 3 &&
- !memcmp(this->essid.essid, "ANY", 3))
- matchflag = 1;
- else if (this->essid.el.len != sig.req.ssid.el.len)
- matchflag = 0;
- else if (memcmp(this->essid.essid, sig.req.ssid.essid,
- this->essid.el.len))
- matchflag = 0;
- else
- matchflag = 1;
- if (matchflag) {
- for (i = 0; i < this->bss_cnt; i++) {
- if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
- sig.req.bssid)) {
- matchflag = 0;
- break;
- }
- }
- }
- if (matchflag && (i < 20)) {
- memcpy(&this->bss_set[i].req,
- &sig.req, sizeof(sig.req));
- this->bss_cnt++;
- this->rssi = sig.rssi;
- this->bss_set[i].rssi = sig.rssi;
- }
- }
- } else if (sig.status == WL3501_STATUS_TIMEOUT) {
- pr_debug("timeout");
- this->join_sta_bss = 0;
- for (i = this->join_sta_bss; i < this->bss_cnt; i++)
- if (!wl3501_mgmt_join(this, i))
- break;
- this->join_sta_bss = i;
- if (this->join_sta_bss == this->bss_cnt) {
- if (this->net_type == IW_MODE_INFRA)
- wl3501_mgmt_scan(this, 100);
- else {
- this->adhoc_times++;
- if (this->adhoc_times > WL3501_MAX_ADHOC_TRIES)
- wl3501_mgmt_start(this);
- else
- wl3501_mgmt_scan(this, 100);
- }
- }
- }
-}
-
-/**
- * wl3501_block_interrupt - Mask interrupt from SUTRO
- * @this: Card
- *
- * Mask interrupt from SUTRO. (i.e. SUTRO cannot interrupt the HOST)
- * Return: 1 if interrupt is originally enabled
- */
-static int wl3501_block_interrupt(struct wl3501_card *this)
-{
- u8 old = inb(this->base_addr + WL3501_NIC_GCR);
- u8 new = old & (~(WL3501_GCR_ECINT | WL3501_GCR_INT2EC |
- WL3501_GCR_ENECINT));
-
- wl3501_outb(new, this->base_addr + WL3501_NIC_GCR);
- return old & WL3501_GCR_ENECINT;
-}
-
-/**
- * wl3501_unblock_interrupt - Enable interrupt from SUTRO
- * @this: Card
- *
- * Enable interrupt from SUTRO. (i.e. SUTRO can interrupt the HOST)
- * Return: 1 if interrupt is originally enabled
- */
-static int wl3501_unblock_interrupt(struct wl3501_card *this)
-{
- u8 old = inb(this->base_addr + WL3501_NIC_GCR);
- u8 new = (old & ~(WL3501_GCR_ECINT | WL3501_GCR_INT2EC)) |
- WL3501_GCR_ENECINT;
-
- wl3501_outb(new, this->base_addr + WL3501_NIC_GCR);
- return old & WL3501_GCR_ENECINT;
-}
-
-/**
- * wl3501_receive - Receive data from Receive Queue.
- *
- * Receive data from Receive Queue.
- *
- * @this: card
- * @bf: address of host
- * @size: size of buffer.
- */
-static u16 wl3501_receive(struct wl3501_card *this, u8 *bf, u16 size)
-{
- u16 next_addr, next_addr1;
- u8 *data = bf + 12;
-
- size -= 12;
- wl3501_get_from_wla(this, this->start_seg + 2,
- &next_addr, sizeof(next_addr));
- if (size > WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr)) {
- wl3501_get_from_wla(this,
- this->start_seg +
- sizeof(struct wl3501_rx_hdr), data,
- WL3501_BLKSZ -
- sizeof(struct wl3501_rx_hdr));
- size -= WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr);
- data += WL3501_BLKSZ - sizeof(struct wl3501_rx_hdr);
- } else {
- wl3501_get_from_wla(this,
- this->start_seg +
- sizeof(struct wl3501_rx_hdr),
- data, size);
- size = 0;
- }
- while (size > 0) {
- if (size > WL3501_BLKSZ - 5) {
- wl3501_get_from_wla(this, next_addr + 5, data,
- WL3501_BLKSZ - 5);
- size -= WL3501_BLKSZ - 5;
- data += WL3501_BLKSZ - 5;
- wl3501_get_from_wla(this, next_addr + 2, &next_addr1,
- sizeof(next_addr1));
- next_addr = next_addr1;
- } else {
- wl3501_get_from_wla(this, next_addr + 5, data, size);
- size = 0;
- }
- }
- return 0;
-}
-
-static void wl3501_esbq_req_free(struct wl3501_card *this)
-{
- u8 tmp;
- u16 addr;
-
- if (this->esbq_req_head == this->esbq_req_tail)
- goto out;
- wl3501_get_from_wla(this, this->esbq_req_tail + 3, &tmp, sizeof(tmp));
- if (!(tmp & 0x80))
- goto out;
- wl3501_get_from_wla(this, this->esbq_req_tail, &addr, sizeof(addr));
- wl3501_free_tx_buffer(this, addr);
- this->esbq_req_tail += 4;
- if (this->esbq_req_tail >= this->esbq_req_end)
- this->esbq_req_tail = this->esbq_req_start;
-out:
- return;
-}
-
-static int wl3501_esbq_confirm(struct wl3501_card *this)
-{
- u8 tmp;
-
- wl3501_get_from_wla(this, this->esbq_confirm + 3, &tmp, sizeof(tmp));
- return tmp & 0x80;
-}
-
-static void wl3501_online(struct net_device *dev)
-{
- struct wl3501_card *this = netdev_priv(dev);
-
- printk(KERN_INFO "%s: Wireless LAN online. BSSID: %pM\n",
- dev->name, this->bssid);
- netif_wake_queue(dev);
-}
-
-static void wl3501_esbq_confirm_done(struct wl3501_card *this)
-{
- u8 tmp = 0;
-
- wl3501_set_to_wla(this, this->esbq_confirm + 3, &tmp, sizeof(tmp));
- this->esbq_confirm += 4;
- if (this->esbq_confirm >= this->esbq_confirm_end)
- this->esbq_confirm = this->esbq_confirm_start;
-}
-
-static int wl3501_mgmt_auth(struct wl3501_card *this)
-{
- struct wl3501_auth_req sig = {
- .sig_id = WL3501_SIG_AUTH_REQ,
- .type = WL3501_SYS_TYPE_OPEN,
- .timeout = 1000,
- };
-
- pr_debug("entry");
- memcpy(sig.mac_addr, this->bssid, ETH_ALEN);
- return wl3501_esbq_exec(this, &sig, sizeof(sig));
-}
-
-static int wl3501_mgmt_association(struct wl3501_card *this)
-{
- struct wl3501_assoc_req sig = {
- .sig_id = WL3501_SIG_ASSOC_REQ,
- .timeout = 1000,
- .listen_interval = 5,
- .cap_info = this->cap_info,
- };
-
- pr_debug("entry");
- memcpy(sig.mac_addr, this->bssid, ETH_ALEN);
- return wl3501_esbq_exec(this, &sig, sizeof(sig));
-}
-
-static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
-{
- struct wl3501_card *this = netdev_priv(dev);
- struct wl3501_join_confirm sig;
-
- pr_debug("entry");
- wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
- if (sig.status == WL3501_STATUS_SUCCESS) {
- if (this->net_type == IW_MODE_INFRA) {
- if (this->join_sta_bss < this->bss_cnt) {
- const int i = this->join_sta_bss;
- memcpy(this->bssid,
- this->bss_set[i].req.bssid, ETH_ALEN);
- this->chan = this->bss_set[i].req.ds_pset.chan;
- iw_copy_mgmt_info_element(&this->keep_essid.el,
- &this->bss_set[i].req.ssid.el);
- wl3501_mgmt_auth(this);
- }
- } else {
- const int i = this->join_sta_bss;
-
- memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
- this->chan = this->bss_set[i].req.ds_pset.chan;
- iw_copy_mgmt_info_element(&this->keep_essid.el,
- &this->bss_set[i].req.ssid.el);
- wl3501_online(dev);
- }
- } else {
- int i;
- this->join_sta_bss++;
- for (i = this->join_sta_bss; i < this->bss_cnt; i++)
- if (!wl3501_mgmt_join(this, i))
- break;
- this->join_sta_bss = i;
- if (this->join_sta_bss == this->bss_cnt) {
- if (this->net_type == IW_MODE_INFRA)
- wl3501_mgmt_scan(this, 100);
- else {
- this->adhoc_times++;
- if (this->adhoc_times > WL3501_MAX_ADHOC_TRIES)
- wl3501_mgmt_start(this);
- else
- wl3501_mgmt_scan(this, 100);
- }
- }
- }
-}
-
-static inline void wl3501_alarm_interrupt(struct net_device *dev,
- struct wl3501_card *this)
-{
- if (this->net_type == IW_MODE_INFRA) {
- printk(KERN_INFO "Wireless LAN offline\n");
- netif_stop_queue(dev);
- wl3501_mgmt_resync(this);
- }
-}
-
-static inline void wl3501_md_confirm_interrupt(struct net_device *dev,
- struct wl3501_card *this,
- u16 addr)
-{
- struct wl3501_md_confirm sig;
-
- pr_debug("entry");
- wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
- wl3501_free_tx_buffer(this, sig.data);
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
-}
-
-static inline void wl3501_md_ind_interrupt(struct net_device *dev,
- struct wl3501_card *this, u16 addr)
-{
- struct wl3501_md_ind sig;
- struct sk_buff *skb;
- u8 rssi, addr4[ETH_ALEN];
- u16 pkt_len;
-
- wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
- this->start_seg = sig.data;
- wl3501_get_from_wla(this,
- sig.data + offsetof(struct wl3501_rx_hdr, rssi),
- &rssi, sizeof(rssi));
- this->rssi = rssi <= 63 ? (rssi * 100) / 64 : 255;
-
- wl3501_get_from_wla(this,
- sig.data +
- offsetof(struct wl3501_rx_hdr, addr4),
- &addr4, sizeof(addr4));
- if (!(addr4[0] == 0xAA && addr4[1] == 0xAA &&
- addr4[2] == 0x03 && addr4[4] == 0x00)) {
- printk(KERN_INFO "Unsupported packet type!\n");
- return;
- }
- pkt_len = sig.size + 12 - 24 - 4 - 6;
-
- skb = dev_alloc_skb(pkt_len + 5);
-
- if (!skb) {
- printk(KERN_WARNING "%s: Can't alloc a sk_buff of size %d.\n",
- dev->name, pkt_len);
- dev->stats.rx_dropped++;
- } else {
- skb->dev = dev;
- skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
- skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
- sizeof(sig.addr));
- wl3501_receive(this, skb->data, pkt_len);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
- }
-}
-
-static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this,
- u16 addr, void *sig, int size)
-{
- pr_debug("entry");
- wl3501_get_from_wla(this, addr, &this->sig_get_confirm,
- sizeof(this->sig_get_confirm));
- wake_up(&this->wait);
-}
-
-static inline void wl3501_start_confirm_interrupt(struct net_device *dev,
- struct wl3501_card *this,
- u16 addr)
-{
- struct wl3501_start_confirm sig;
-
- pr_debug("entry");
- wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
- if (sig.status == WL3501_STATUS_SUCCESS)
- netif_wake_queue(dev);
-}
-
-static inline void wl3501_assoc_confirm_interrupt(struct net_device *dev,
- u16 addr)
-{
- struct wl3501_card *this = netdev_priv(dev);
- struct wl3501_assoc_confirm sig;
-
- pr_debug("entry");
- wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
-
- if (sig.status == WL3501_STATUS_SUCCESS)
- wl3501_online(dev);
-}
-
-static inline void wl3501_auth_confirm_interrupt(struct wl3501_card *this,
- u16 addr)
-{
- struct wl3501_auth_confirm sig;
-
- pr_debug("entry");
- wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
-
- if (sig.status == WL3501_STATUS_SUCCESS)
- wl3501_mgmt_association(this);
- else
- wl3501_mgmt_resync(this);
-}
-
-static inline void wl3501_rx_interrupt(struct net_device *dev)
-{
- int morepkts;
- u16 addr;
- u8 sig_id;
- struct wl3501_card *this = netdev_priv(dev);
-
- pr_debug("entry");
-loop:
- morepkts = 0;
- if (!wl3501_esbq_confirm(this))
- goto free;
- wl3501_get_from_wla(this, this->esbq_confirm, &addr, sizeof(addr));
- wl3501_get_from_wla(this, addr + 2, &sig_id, sizeof(sig_id));
-
- switch (sig_id) {
- case WL3501_SIG_DEAUTH_IND:
- case WL3501_SIG_DISASSOC_IND:
- case WL3501_SIG_ALARM:
- wl3501_alarm_interrupt(dev, this);
- break;
- case WL3501_SIG_MD_CONFIRM:
- wl3501_md_confirm_interrupt(dev, this, addr);
- break;
- case WL3501_SIG_MD_IND:
- wl3501_md_ind_interrupt(dev, this, addr);
- break;
- case WL3501_SIG_GET_CONFIRM:
- wl3501_get_confirm_interrupt(this, addr,
- &this->sig_get_confirm,
- sizeof(this->sig_get_confirm));
- break;
- case WL3501_SIG_PWR_MGMT_CONFIRM:
- wl3501_get_confirm_interrupt(this, addr,
- &this->sig_pwr_mgmt_confirm,
- sizeof(this->sig_pwr_mgmt_confirm));
- break;
- case WL3501_SIG_START_CONFIRM:
- wl3501_start_confirm_interrupt(dev, this, addr);
- break;
- case WL3501_SIG_SCAN_CONFIRM:
- wl3501_mgmt_scan_confirm(this, addr);
- break;
- case WL3501_SIG_JOIN_CONFIRM:
- wl3501_mgmt_join_confirm(dev, addr);
- break;
- case WL3501_SIG_ASSOC_CONFIRM:
- wl3501_assoc_confirm_interrupt(dev, addr);
- break;
- case WL3501_SIG_AUTH_CONFIRM:
- wl3501_auth_confirm_interrupt(this, addr);
- break;
- case WL3501_SIG_RESYNC_CONFIRM:
- wl3501_mgmt_resync(this); /* FIXME: should be resync_confirm */
- break;
- }
- wl3501_esbq_confirm_done(this);
- morepkts = 1;
- /* free request if necessary */
-free:
- wl3501_esbq_req_free(this);
- if (morepkts)
- goto loop;
-}
-
-static inline void wl3501_ack_interrupt(struct wl3501_card *this)
-{
- wl3501_outb(WL3501_GCR_ECINT, this->base_addr + WL3501_NIC_GCR);
-}
-
-/**
- * wl3501_interrupt - Hardware interrupt from card.
- * @irq: Interrupt number
- * @dev_id: net_device
- *
- * We must acknowledge the interrupt as soon as possible, and block the
- * interrupt from the same card immediately to prevent re-entry.
- *
- * Before accessing the Control_Status_Block, we must lock SUTRO first.
- * On the other hand, to prevent SUTRO from malfunctioning, we must
- * unlock the SUTRO as soon as possible.
- */
-static irqreturn_t wl3501_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct wl3501_card *this;
-
- this = netdev_priv(dev);
- spin_lock(&this->lock);
- wl3501_ack_interrupt(this);
- wl3501_block_interrupt(this);
- wl3501_rx_interrupt(dev);
- wl3501_unblock_interrupt(this);
- spin_unlock(&this->lock);
-
- return IRQ_HANDLED;
-}
-
-static int wl3501_reset_board(struct wl3501_card *this)
-{
- u8 tmp = 0;
- int i, rc = 0;
-
- /* Coreset */
- wl3501_outb_p(WL3501_GCR_CORESET, this->base_addr + WL3501_NIC_GCR);
- wl3501_outb_p(0, this->base_addr + WL3501_NIC_GCR);
- wl3501_outb_p(WL3501_GCR_CORESET, this->base_addr + WL3501_NIC_GCR);
-
- /* Reset SRAM 0x480 to zero */
- wl3501_set_to_wla(this, 0x480, &tmp, sizeof(tmp));
-
- /* Start up */
- wl3501_outb_p(0, this->base_addr + WL3501_NIC_GCR);
-
- WL3501_NOPLOOP(1024 * 50);
-
- wl3501_unblock_interrupt(this); /* acme: was commented */
-
- /* Polling Self_Test_Status */
- for (i = 0; i < 10000; i++) {
- wl3501_get_from_wla(this, 0x480, &tmp, sizeof(tmp));
-
- if (tmp == 'W') {
- /* firmware complete all test successfully */
- tmp = 'A';
- wl3501_set_to_wla(this, 0x480, &tmp, sizeof(tmp));
- goto out;
- }
- WL3501_NOPLOOP(10);
- }
- printk(KERN_WARNING "%s: failed to reset the board!\n", __func__);
- rc = -ENODEV;
-out:
- return rc;
-}
-
-static int wl3501_init_firmware(struct wl3501_card *this)
-{
- u16 ptr, next;
- int rc = wl3501_reset_board(this);
-
- if (rc)
- goto fail;
- this->card_name[0] = '\0';
- wl3501_get_from_wla(this, 0x1a00,
- this->card_name, sizeof(this->card_name));
- this->card_name[sizeof(this->card_name) - 1] = '\0';
- this->firmware_date[0] = '\0';
- wl3501_get_from_wla(this, 0x1a40,
- this->firmware_date, sizeof(this->firmware_date));
- this->firmware_date[sizeof(this->firmware_date) - 1] = '\0';
- /* Switch to SRAM Page 0 */
- wl3501_switch_page(this, WL3501_BSS_SPAGE0);
- /* Read parameter from card */
- wl3501_get_from_wla(this, 0x482, &this->esbq_req_start, 2);
- wl3501_get_from_wla(this, 0x486, &this->esbq_req_end, 2);
- wl3501_get_from_wla(this, 0x488, &this->esbq_confirm_start, 2);
- wl3501_get_from_wla(this, 0x48c, &this->esbq_confirm_end, 2);
- wl3501_get_from_wla(this, 0x48e, &this->tx_buffer_head, 2);
- wl3501_get_from_wla(this, 0x492, &this->tx_buffer_size, 2);
- this->esbq_req_tail = this->esbq_req_head = this->esbq_req_start;
- this->esbq_req_end += this->esbq_req_start;
- this->esbq_confirm = this->esbq_confirm_start;
- this->esbq_confirm_end += this->esbq_confirm_start;
- /* Initial Tx Buffer */
- this->tx_buffer_cnt = 1;
- ptr = this->tx_buffer_head;
- next = ptr + WL3501_BLKSZ;
- while ((next - this->tx_buffer_head) < this->tx_buffer_size) {
- this->tx_buffer_cnt++;
- wl3501_set_to_wla(this, ptr, &next, sizeof(next));
- ptr = next;
- next = ptr + WL3501_BLKSZ;
- }
- rc = 0;
- next = 0;
- wl3501_set_to_wla(this, ptr, &next, sizeof(next));
- this->tx_buffer_tail = ptr;
-out:
- return rc;
-fail:
- printk(KERN_WARNING "%s: failed!\n", __func__);
- goto out;
-}
-
-static int wl3501_close(struct net_device *dev)
-{
- struct wl3501_card *this = netdev_priv(dev);
- unsigned long flags;
- struct pcmcia_device *link;
- link = this->p_dev;
-
- spin_lock_irqsave(&this->lock, flags);
- link->open--;
-
- /* Stop wl3501_hard_start_xmit() from now on */
- netif_stop_queue(dev);
- wl3501_ack_interrupt(this);
-
- /* Mask interrupts from the SUTRO */
- wl3501_block_interrupt(this);
-
- printk(KERN_INFO "%s: WL3501 closed\n", dev->name);
- spin_unlock_irqrestore(&this->lock, flags);
- return 0;
-}
-
-/**
- * wl3501_reset - Reset the SUTRO.
- * @dev: network device
- *
- * It is almost the same as wl3501_open(). In fact, we may just wl3501_close()
- * and wl3501_open() again, but I wouldn't like to free_irq() when the driver
- * is running. It seems to be dangerous.
- */
-static int wl3501_reset(struct net_device *dev)
-{
- struct wl3501_card *this = netdev_priv(dev);
- int rc = -ENODEV;
- unsigned long flags;
-
- spin_lock_irqsave(&this->lock, flags);
- wl3501_block_interrupt(this);
-
- if (wl3501_init_firmware(this)) {
- printk(KERN_WARNING "%s: Can't initialize Firmware!\n",
- dev->name);
- /* Free IRQ, and mark IRQ as unused */
- free_irq(dev->irq, dev);
- goto out;
- }
-
- /*
- * Queue has to be started only when the Card is Started
- */
- netif_stop_queue(dev);
- this->adhoc_times = 0;
- wl3501_ack_interrupt(this);
- wl3501_unblock_interrupt(this);
- wl3501_mgmt_scan(this, 100);
- pr_debug("%s: device reset", dev->name);
- rc = 0;
-out:
- spin_unlock_irqrestore(&this->lock, flags);
- return rc;
-}
-
-static void wl3501_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct net_device_stats *stats = &dev->stats;
- int rc;
-
- stats->tx_errors++;
- rc = wl3501_reset(dev);
- if (rc)
- printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
- dev->name, rc);
- else {
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
- }
-}
-
-/*
- * Return : 0 - OK
- * 1 - Could not transmit (dev_queue_xmit will queue it)
- * and try to sent it later
- */
-static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- int enabled, rc;
- struct wl3501_card *this = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&this->lock, flags);
- enabled = wl3501_block_interrupt(this);
- rc = wl3501_send_pkt(this, skb->data, skb->len);
- if (enabled)
- wl3501_unblock_interrupt(this);
- if (rc) {
- ++dev->stats.tx_dropped;
- netif_stop_queue(dev);
- } else {
- ++dev->stats.tx_packets;
- dev->stats.tx_bytes += skb->len;
- dev_kfree_skb_irq(skb);
-
- if (this->tx_buffer_cnt < 2)
- netif_stop_queue(dev);
- }
- spin_unlock_irqrestore(&this->lock, flags);
- return NETDEV_TX_OK;
-}
-
-static int wl3501_open(struct net_device *dev)
-{
- int rc = -ENODEV;
- struct wl3501_card *this = netdev_priv(dev);
- unsigned long flags;
- struct pcmcia_device *link;
- link = this->p_dev;
-
- spin_lock_irqsave(&this->lock, flags);
- if (!pcmcia_dev_present(link))
- goto out;
- netif_device_attach(dev);
- link->open++;
-
- /* Initial WL3501 firmware */
- pr_debug("%s: Initialize WL3501 firmware...", dev->name);
- if (wl3501_init_firmware(this))
- goto fail;
- /* Initial device variables */
- this->adhoc_times = 0;
- /* Acknowledge Interrupt, for cleaning last state */
- wl3501_ack_interrupt(this);
-
- /* Enable interrupt from card after all */
- wl3501_unblock_interrupt(this);
- wl3501_mgmt_scan(this, 100);
- rc = 0;
- pr_debug("%s: WL3501 opened", dev->name);
- printk(KERN_INFO "%s: Card Name: %s\n"
- "%s: Firmware Date: %s\n",
- dev->name, this->card_name,
- dev->name, this->firmware_date);
-out:
- spin_unlock_irqrestore(&this->lock, flags);
- return rc;
-fail:
- printk(KERN_WARNING "%s: Can't initialize firmware!\n", dev->name);
- goto out;
-}
-
-static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
-{
- struct wl3501_card *this = netdev_priv(dev);
- struct iw_statistics *wstats = &this->wstats;
- u32 value; /* size checked: it is u32 */
-
- memset(wstats, 0, sizeof(*wstats));
- wstats->status = netif_running(dev);
- if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_ICV_ERROR_COUNT,
- &value, sizeof(value)))
- wstats->discard.code += value;
- if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_UNDECRYPTABLE_COUNT,
- &value, sizeof(value)))
- wstats->discard.code += value;
- if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_EXCLUDED_COUNT,
- &value, sizeof(value)))
- wstats->discard.code += value;
- if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_RETRY_COUNT,
- &value, sizeof(value)))
- wstats->discard.retries = value;
- if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_FAILED_COUNT,
- &value, sizeof(value)))
- wstats->discard.misc += value;
- if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_RTS_FAILURE_COUNT,
- &value, sizeof(value)))
- wstats->discard.misc += value;
- if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_ACK_FAILURE_COUNT,
- &value, sizeof(value)))
- wstats->discard.misc += value;
- if (!wl3501_get_mib_value(this, WL3501_MIB_ATTR_FRAME_DUPLICATE_COUNT,
- &value, sizeof(value)))
- wstats->discard.misc += value;
- return wstats;
-}
-
-/**
- * wl3501_detach - deletes a driver "instance"
- * @link: FILL_IN
- *
- * This deletes a driver "instance". The device is de-registered with Card
- * Services. If it has been released, all local data structures are freed.
- * Otherwise, the structures will be freed when the device is released.
- */
-static void wl3501_detach(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- /* If the device is currently configured and active, we won't actually
- * delete it yet. Instead, it is marked so that when the release()
- * function is called, that will trigger a proper detach(). */
-
- while (link->open > 0)
- wl3501_close(dev);
-
- netif_device_detach(dev);
- wl3501_release(link);
-
- unregister_netdev(dev);
- free_netdev(dev);
-}
-
-static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- strscpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name));
- return 0;
-}
-
-static int wl3501_set_freq(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
- int channel = wrqu->freq.m;
- int rc = -EINVAL;
-
- if (iw_valid_channel(this->reg_domain, channel)) {
- this->chan = channel;
- rc = wl3501_reset(dev);
- }
- return rc;
-}
-
-static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
-
- wrqu->freq.m = 100000 *
- ieee80211_channel_to_frequency(this->chan, NL80211_BAND_2GHZ);
- wrqu->freq.e = 1;
- return 0;
-}
-
-static int wl3501_set_mode(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int rc = -EINVAL;
-
- if (wrqu->mode == IW_MODE_INFRA ||
- wrqu->mode == IW_MODE_ADHOC ||
- wrqu->mode == IW_MODE_AUTO) {
- struct wl3501_card *this = netdev_priv(dev);
-
- this->net_type = wrqu->mode;
- rc = wl3501_reset(dev);
- }
- return rc;
-}
-
-static int wl3501_get_mode(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
-
- wrqu->mode = this->net_type;
- return 0;
-}
-
-static int wl3501_get_sens(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
-
- wrqu->sens.value = this->rssi;
- wrqu->sens.disabled = !wrqu->sens.value;
- wrqu->sens.fixed = 1;
- return 0;
-}
-
-static int wl3501_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
-
- /* Set the length (very important for backward compatibility) */
- wrqu->data.length = sizeof(*range);
-
- /* Set all the info we don't care or don't know about to zero */
- memset(range, 0, sizeof(*range));
-
- /* Set the Wireless Extension versions */
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 1;
- range->throughput = 2 * 1000 * 1000; /* ~2 Mb/s */
- /* FIXME: study the code to fill in more fields... */
- return 0;
-}
-
-static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
- int rc = -EINVAL;
-
- /* FIXME: we support other ARPHRDs...*/
- if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
- goto out;
- if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data)) {
- /* FIXME: rescan? */
- } else
- memcpy(this->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
- /* FIXME: rescan? deassoc & scan? */
- rc = 0;
-out:
- return rc;
-}
-
-static int wl3501_get_wap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
-
- wrqu->ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(wrqu->ap_addr.sa_data, this->bssid, ETH_ALEN);
- return 0;
-}
-
-static int wl3501_set_scan(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- /*
- * FIXME: trigger scanning with a reset, yes, I'm lazy
- */
- return wl3501_reset(dev);
-}
-
-static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
- int i;
- char *current_ev = extra;
- struct iw_event iwe;
-
- for (i = 0; i < this->bss_cnt; ++i) {
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
- current_ev = iwe_stream_add_event(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, IW_EV_ADDR_LEN);
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
- current_ev = iwe_stream_add_point(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe,
- this->bss_set[i].req.ssid.essid);
- iwe.cmd = SIOCGIWMODE;
- iwe.u.mode = this->bss_set[i].req.bss_type;
- current_ev = iwe_stream_add_event(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, IW_EV_UINT_LEN);
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
- iwe.u.freq.e = 0;
- current_ev = iwe_stream_add_event(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, IW_EV_FREQ_LEN);
- iwe.cmd = SIOCGIWENCODE;
- if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- current_ev = iwe_stream_add_point(info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &iwe, NULL);
- }
- /* Length of data */
- wrqu->data.length = (current_ev - extra);
- wrqu->data.flags = 0; /* FIXME: set properly these flags */
- return 0;
-}
-
-static int wl3501_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
-
- if (wrqu->data.flags) {
- iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID,
- &this->essid.el,
- extra, wrqu->data.length);
- } else { /* We accept any ESSID */
- iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID,
- &this->essid.el, "ANY", 3);
- }
- return wl3501_reset(dev);
-}
-
-static int wl3501_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&this->lock, flags);
- wrqu->essid.flags = 1;
- wrqu->essid.length = this->essid.el.len;
- memcpy(extra, this->essid.essid, this->essid.el.len);
- spin_unlock_irqrestore(&this->lock, flags);
- return 0;
-}
-
-static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
-
- if (wrqu->data.length > sizeof(this->nick))
- return -E2BIG;
- strscpy(this->nick, extra, wrqu->data.length);
- return 0;
-}
-
-static int wl3501_get_nick(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct wl3501_card *this = netdev_priv(dev);
-
- strscpy(extra, this->nick, 32);
- wrqu->data.length = strlen(extra);
- return 0;
-}
-
-static int wl3501_get_rate(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- /*
- * FIXME: have to see from where to get this info, perhaps this card
- * works at 1 Mbit/s too... for now leave at 2 Mbit/s that is the most
- * common with the Planet Access Points. -acme
- */
- wrqu->bitrate.value = 2000000;
- wrqu->bitrate.fixed = 1;
- return 0;
-}
-
-static int wl3501_get_rts_threshold(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u16 threshold; /* size checked: it is u16 */
- struct wl3501_card *this = netdev_priv(dev);
- int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_RTS_THRESHOLD,
- &threshold, sizeof(threshold));
- if (!rc) {
- wrqu->rts.value = threshold;
- wrqu->rts.disabled = threshold >= 2347;
- wrqu->rts.fixed = 1;
- }
- return rc;
-}
-
-static int wl3501_get_frag_threshold(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u16 threshold; /* size checked: it is u16 */
- struct wl3501_card *this = netdev_priv(dev);
- int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_FRAG_THRESHOLD,
- &threshold, sizeof(threshold));
- if (!rc) {
- wrqu->frag.value = threshold;
- wrqu->frag.disabled = threshold >= 2346;
- wrqu->frag.fixed = 1;
- }
- return rc;
-}
-
-static int wl3501_get_txpow(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u16 txpow;
- struct wl3501_card *this = netdev_priv(dev);
- int rc = wl3501_get_mib_value(this,
- WL3501_MIB_ATTR_CURRENT_TX_PWR_LEVEL,
- &txpow, sizeof(txpow));
- if (!rc) {
- wrqu->txpower.value = txpow;
- wrqu->txpower.disabled = 0;
- /*
- * From the MIB values I think this can be configurable,
- * as it lists several tx power levels -acme
- */
- wrqu->txpower.fixed = 0;
- wrqu->txpower.flags = IW_TXPOW_MWATT;
- }
- return rc;
-}
-
-static int wl3501_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u8 retry; /* size checked: it is u8 */
- struct wl3501_card *this = netdev_priv(dev);
- int rc = wl3501_get_mib_value(this,
- WL3501_MIB_ATTR_LONG_RETRY_LIMIT,
- &retry, sizeof(retry));
- if (rc)
- goto out;
- if (wrqu->retry.flags & IW_RETRY_LONG) {
- wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- goto set_value;
- }
- rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_SHORT_RETRY_LIMIT,
- &retry, sizeof(retry));
- if (rc)
- goto out;
- wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
-set_value:
- wrqu->retry.value = retry;
- wrqu->retry.disabled = 0;
-out:
- return rc;
-}
-
-static int wl3501_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u8 implemented, restricted, keys[100], len_keys, tocopy;
- struct wl3501_card *this = netdev_priv(dev);
- int rc = wl3501_get_mib_value(this,
- WL3501_MIB_ATTR_PRIV_OPT_IMPLEMENTED,
- &implemented, sizeof(implemented));
- if (rc)
- goto out;
- if (!implemented) {
- wrqu->encoding.flags = IW_ENCODE_DISABLED;
- goto out;
- }
- rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_EXCLUDE_UNENCRYPTED,
- &restricted, sizeof(restricted));
- if (rc)
- goto out;
- wrqu->encoding.flags = restricted ? IW_ENCODE_RESTRICTED :
- IW_ENCODE_OPEN;
- rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_KEY_MAPPINGS_LEN,
- &len_keys, sizeof(len_keys));
- if (rc)
- goto out;
- rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_WEP_KEY_MAPPINGS,
- keys, len_keys);
- if (rc)
- goto out;
- tocopy = min_t(u16, len_keys, wrqu->encoding.length);
- tocopy = min_t(u8, tocopy, 100);
- wrqu->encoding.length = tocopy;
- memcpy(extra, keys, tocopy);
-out:
- return rc;
-}
-
-static int wl3501_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u8 pwr_state;
- struct wl3501_card *this = netdev_priv(dev);
- int rc = wl3501_get_mib_value(this,
- WL3501_MIB_ATTR_CURRENT_PWR_STATE,
- &pwr_state, sizeof(pwr_state));
- if (rc)
- goto out;
- wrqu->power.disabled = !pwr_state;
- wrqu->power.flags = IW_POWER_ON;
-out:
- return rc;
-}
-
-static const iw_handler wl3501_handler[] = {
- IW_HANDLER(SIOCGIWNAME, wl3501_get_name),
- IW_HANDLER(SIOCSIWFREQ, wl3501_set_freq),
- IW_HANDLER(SIOCGIWFREQ, wl3501_get_freq),
- IW_HANDLER(SIOCSIWMODE, wl3501_set_mode),
- IW_HANDLER(SIOCGIWMODE, wl3501_get_mode),
- IW_HANDLER(SIOCGIWSENS, wl3501_get_sens),
- IW_HANDLER(SIOCGIWRANGE, wl3501_get_range),
- IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
- IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
- IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
- IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
- IW_HANDLER(SIOCSIWAP, wl3501_set_wap),
- IW_HANDLER(SIOCGIWAP, wl3501_get_wap),
- IW_HANDLER(SIOCSIWSCAN, wl3501_set_scan),
- IW_HANDLER(SIOCGIWSCAN, wl3501_get_scan),
- IW_HANDLER(SIOCSIWESSID, wl3501_set_essid),
- IW_HANDLER(SIOCGIWESSID, wl3501_get_essid),
- IW_HANDLER(SIOCSIWNICKN, wl3501_set_nick),
- IW_HANDLER(SIOCGIWNICKN, wl3501_get_nick),
- IW_HANDLER(SIOCGIWRATE, wl3501_get_rate),
- IW_HANDLER(SIOCGIWRTS, wl3501_get_rts_threshold),
- IW_HANDLER(SIOCGIWFRAG, wl3501_get_frag_threshold),
- IW_HANDLER(SIOCGIWTXPOW, wl3501_get_txpow),
- IW_HANDLER(SIOCGIWRETRY, wl3501_get_retry),
- IW_HANDLER(SIOCGIWENCODE, wl3501_get_encode),
- IW_HANDLER(SIOCGIWPOWER, wl3501_get_power),
-};
-
-static const struct iw_handler_def wl3501_handler_def = {
- .num_standard = ARRAY_SIZE(wl3501_handler),
- .standard = (iw_handler *)wl3501_handler,
- .get_wireless_stats = wl3501_get_wireless_stats,
-};
-
-static const struct net_device_ops wl3501_netdev_ops = {
- .ndo_open = wl3501_open,
- .ndo_stop = wl3501_close,
- .ndo_start_xmit = wl3501_hard_start_xmit,
- .ndo_tx_timeout = wl3501_tx_timeout,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int wl3501_probe(struct pcmcia_device *p_dev)
-{
- struct net_device *dev;
- struct wl3501_card *this;
- int ret;
-
- /* The io structure describes IO port mapping */
- p_dev->resource[0]->end = 16;
- p_dev->resource[0]->flags = IO_DATA_PATH_WIDTH_8;
-
- /* General socket configuration */
- p_dev->config_flags = CONF_ENABLE_IRQ;
- p_dev->config_index = 1;
-
- dev = alloc_etherdev(sizeof(struct wl3501_card));
- if (!dev)
- return -ENOMEM;
-
- dev->netdev_ops = &wl3501_netdev_ops;
- dev->watchdog_timeo = 5 * HZ;
-
- this = netdev_priv(dev);
- this->wireless_data.spy_data = &this->spy_data;
- this->p_dev = p_dev;
- dev->wireless_data = &this->wireless_data;
- dev->wireless_handlers = &wl3501_handler_def;
- netif_stop_queue(dev);
- p_dev->priv = dev;
-
- ret = wl3501_config(p_dev);
- if (ret)
- goto out_free_etherdev;
-
- return 0;
-
-out_free_etherdev:
- free_netdev(dev);
- return ret;
-}
-
-static int wl3501_config(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
- int i = 0, j, ret;
- struct wl3501_card *this;
-
- /* Try allocating IO ports. This tries a few fixed addresses. If you
- * want, you can also read the card's config table to pick addresses --
- * see the serial driver for an example. */
- link->io_lines = 5;
-
- for (j = 0x280; j < 0x400; j += 0x20) {
- /* The '^0x300' is so that we probe 0x300-0x3ff first, then
- * 0x200-0x2ff, and so on, because this seems safer */
- link->resource[0]->start = j;
- link->resource[1]->start = link->resource[0]->start + 0x10;
- i = pcmcia_request_io(link);
- if (i == 0)
- break;
- }
- if (i != 0)
- goto failed;
-
- /* Now allocate an interrupt line. Note that this does not actually
- * assign a handler to the interrupt. */
-
- ret = pcmcia_request_irq(link, wl3501_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- SET_NETDEV_DEV(dev, &link->dev);
- if (register_netdev(dev)) {
- printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n");
- goto failed;
- }
-
- this = netdev_priv(dev);
-
- this->base_addr = dev->base_addr;
-
- if (!wl3501_get_flash_mac_addr(this)) {
- printk(KERN_WARNING "%s: Can't read MAC addr in flash ROM?\n",
- dev->name);
- unregister_netdev(dev);
- goto failed;
- }
-
- eth_hw_addr_set(dev, this->mac_addr);
-
- /* print probe information */
- printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, "
- "MAC addr in flash ROM:%pM\n",
- dev->name, this->base_addr, (int)dev->irq,
- dev->dev_addr);
- /*
- * Initialize card parameters - added by jss
- */
- this->net_type = IW_MODE_INFRA;
- this->bss_cnt = 0;
- this->join_sta_bss = 0;
- this->adhoc_times = 0;
- iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID, &this->essid.el,
- "ANY", 3);
- this->card_name[0] = '\0';
- this->firmware_date[0] = '\0';
- this->rssi = 255;
- this->chan = iw_default_channel(this->reg_domain);
- strscpy(this->nick, "Planet WL3501", sizeof(this->nick));
- spin_lock_init(&this->lock);
- init_waitqueue_head(&this->wait);
- netif_start_queue(dev);
- return 0;
-
-failed:
- wl3501_release(link);
- return -ENODEV;
-}
-
-static void wl3501_release(struct pcmcia_device *link)
-{
- pcmcia_disable_device(link);
-}
-
-static int wl3501_suspend(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- wl3501_pwr_mgmt(netdev_priv(dev), WL3501_SUSPEND);
- if (link->open)
- netif_device_detach(dev);
-
- return 0;
-}
-
-static int wl3501_resume(struct pcmcia_device *link)
-{
- struct net_device *dev = link->priv;
-
- wl3501_pwr_mgmt(netdev_priv(dev), WL3501_RESUME);
- if (link->open) {
- wl3501_reset(dev);
- netif_device_attach(dev);
- }
-
- return 0;
-}
-
-
-static const struct pcmcia_device_id wl3501_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0001),
- PCMCIA_DEVICE_NULL
-};
-MODULE_DEVICE_TABLE(pcmcia, wl3501_ids);
-
-static struct pcmcia_driver wl3501_driver = {
- .owner = THIS_MODULE,
- .name = "wl3501_cs",
- .probe = wl3501_probe,
- .remove = wl3501_detach,
- .id_table = wl3501_ids,
- .suspend = wl3501_suspend,
- .resume = wl3501_resume,
-};
-module_pcmcia_driver(wl3501_driver);
-
-MODULE_AUTHOR("Fox Chen <mhchen@golf.ccl.itri.org.tw>, "
- "Arnaldo Carvalho de Melo <acme@conectiva.com.br>,"
- "Gustavo Niemeyer <niemeyer@conectiva.com>");
-MODULE_DESCRIPTION("Planet wl3501 wireless driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/marvell/libertas/Kconfig b/drivers/net/wireless/marvell/libertas/Kconfig
index c7d02adb3..36b234bc5 100644
--- a/drivers/net/wireless/marvell/libertas/Kconfig
+++ b/drivers/net/wireless/marvell/libertas/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config LIBERTAS
tristate "Marvell 8xxx Libertas WLAN driver support"
+ depends on USB || MMC || SPI
depends on CFG80211
select LIB80211
select FW_LOADER
@@ -13,12 +14,6 @@ config LIBERTAS_USB
help
A driver for Marvell Libertas 8388 USB devices.
-config LIBERTAS_CS
- tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
- depends on LIBERTAS && PCMCIA && HAS_IOPORT_MAP
- help
- A driver for Marvell Libertas 8385 CompactFlash devices.
-
config LIBERTAS_SDIO
tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
depends on LIBERTAS && MMC
diff --git a/drivers/net/wireless/marvell/libertas/Makefile b/drivers/net/wireless/marvell/libertas/Makefile
index 41b9b440a..2ac04f4d6 100644
--- a/drivers/net/wireless/marvell/libertas/Makefile
+++ b/drivers/net/wireless/marvell/libertas/Makefile
@@ -17,6 +17,5 @@ libertas_spi-objs += if_spi.o
obj-$(CONFIG_LIBERTAS) += libertas.o
obj-$(CONFIG_LIBERTAS_USB) += usb8xxx.o
-obj-$(CONFIG_LIBERTAS_CS) += libertas_cs.o
obj-$(CONFIG_LIBERTAS_SDIO) += libertas_sdio.o
obj-$(CONFIG_LIBERTAS_SPI) += libertas_spi.o
diff --git a/drivers/net/wireless/marvell/libertas/if_cs.c b/drivers/net/wireless/marvell/libertas/if_cs.c
deleted file mode 100644
index 4103f15bc..000000000
--- a/drivers/net/wireless/marvell/libertas/if_cs.c
+++ /dev/null
@@ -1,957 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-
- Driver for the Marvell 8385 based compact flash WLAN cards.
-
- (C) 2007 by Holger Schurig <hs4233@mail.mn-solutions.de>
-
-
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/firmware.h>
-#include <linux/netdevice.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-
-#include <linux/io.h>
-
-#define DRV_NAME "libertas_cs"
-
-#include "decl.h"
-#include "defs.h"
-#include "dev.h"
-
-
-/********************************************************************/
-/* Module stuff */
-/********************************************************************/
-
-MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>");
-MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards");
-MODULE_LICENSE("GPL");
-
-
-
-/********************************************************************/
-/* Data structures */
-/********************************************************************/
-
-struct if_cs_card {
- struct pcmcia_device *p_dev;
- struct lbs_private *priv;
- void __iomem *iobase;
- bool align_regs;
- u32 model;
-};
-
-
-enum {
- MODEL_UNKNOWN = 0x00,
- MODEL_8305 = 0x01,
- MODEL_8381 = 0x02,
- MODEL_8385 = 0x03
-};
-
-static const struct lbs_fw_table fw_table[] = {
- { MODEL_8305, "libertas/cf8305.bin", NULL },
- { MODEL_8305, "libertas_cs_helper.fw", NULL },
- { MODEL_8381, "libertas/cf8381_helper.bin", "libertas/cf8381.bin" },
- { MODEL_8381, "libertas_cs_helper.fw", "libertas_cs.fw" },
- { MODEL_8385, "libertas/cf8385_helper.bin", "libertas/cf8385.bin" },
- { MODEL_8385, "libertas_cs_helper.fw", "libertas_cs.fw" },
- { 0, NULL, NULL }
-};
-MODULE_FIRMWARE("libertas/cf8305.bin");
-MODULE_FIRMWARE("libertas/cf8381_helper.bin");
-MODULE_FIRMWARE("libertas/cf8381.bin");
-MODULE_FIRMWARE("libertas/cf8385_helper.bin");
-MODULE_FIRMWARE("libertas/cf8385.bin");
-MODULE_FIRMWARE("libertas_cs_helper.fw");
-MODULE_FIRMWARE("libertas_cs.fw");
-
-
-/********************************************************************/
-/* Hardware access */
-/********************************************************************/
-
-/* This define enables wrapper functions which allow you
- to dump all register accesses. You normally won't this,
- except for development */
-/* #define DEBUG_IO */
-
-#ifdef DEBUG_IO
-static int debug_output = 0;
-#else
-/* This way the compiler optimizes the printk's away */
-#define debug_output 0
-#endif
-
-static inline unsigned int if_cs_read8(struct if_cs_card *card, uint reg)
-{
- unsigned int val = ioread8(card->iobase + reg);
- if (debug_output)
- printk(KERN_INFO "inb %08x<%02x\n", reg, val);
- return val;
-}
-static inline unsigned int if_cs_read16(struct if_cs_card *card, uint reg)
-{
- unsigned int val = ioread16(card->iobase + reg);
- if (debug_output)
- printk(KERN_INFO "inw %08x<%04x\n", reg, val);
- return val;
-}
-static inline void if_cs_read16_rep(
- struct if_cs_card *card,
- uint reg,
- void *buf,
- unsigned long count)
-{
- if (debug_output)
- printk(KERN_INFO "insw %08x<(0x%lx words)\n",
- reg, count);
- ioread16_rep(card->iobase + reg, buf, count);
-}
-
-static inline void if_cs_write8(struct if_cs_card *card, uint reg, u8 val)
-{
- if (debug_output)
- printk(KERN_INFO "outb %08x>%02x\n", reg, val);
- iowrite8(val, card->iobase + reg);
-}
-
-static inline void if_cs_write16(struct if_cs_card *card, uint reg, u16 val)
-{
- if (debug_output)
- printk(KERN_INFO "outw %08x>%04x\n", reg, val);
- iowrite16(val, card->iobase + reg);
-}
-
-static inline void if_cs_write16_rep(
- struct if_cs_card *card,
- uint reg,
- const void *buf,
- unsigned long count)
-{
- if (debug_output)
- printk(KERN_INFO "outsw %08x>(0x%lx words)\n",
- reg, count);
- iowrite16_rep(card->iobase + reg, buf, count);
-}
-
-
-/*
- * I know that polling/delaying is frowned upon. However, this procedure
- * with polling is needed while downloading the firmware. At this stage,
- * the hardware does unfortunately not create any interrupts.
- *
- * Fortunately, this function is never used once the firmware is in
- * the card. :-)
- *
- * As a reference, see the "Firmware Specification v5.1", page 18
- * and 19. I did not follow their suggested timing to the word,
- * but this works nice & fast anyway.
- */
-static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 reg)
-{
- int i;
-
- for (i = 0; i < 100000; i++) {
- u8 val = if_cs_read8(card, addr);
- if (val == reg)
- return 0;
- udelay(5);
- }
- return -ETIME;
-}
-
-
-
-/*
- * First the bitmasks for the host/card interrupt/status registers:
- */
-#define IF_CS_BIT_TX 0x0001
-#define IF_CS_BIT_RX 0x0002
-#define IF_CS_BIT_COMMAND 0x0004
-#define IF_CS_BIT_RESP 0x0008
-#define IF_CS_BIT_EVENT 0x0010
-#define IF_CS_BIT_MASK 0x001f
-
-
-
-/*
- * It's not really clear to me what the host status register is for. It
- * needs to be set almost in union with "host int cause". The following
- * bits from above are used:
- *
- * IF_CS_BIT_TX driver downloaded a data packet
- * IF_CS_BIT_RX driver got a data packet
- * IF_CS_BIT_COMMAND driver downloaded a command
- * IF_CS_BIT_RESP not used (has some meaning with powerdown)
- * IF_CS_BIT_EVENT driver read a host event
- */
-#define IF_CS_HOST_STATUS 0x00000000
-
-/*
- * With the host int cause register can the host (that is, Linux) cause
- * an interrupt in the firmware, to tell the firmware about those events:
- *
- * IF_CS_BIT_TX a data packet has been downloaded
- * IF_CS_BIT_RX a received data packet has retrieved
- * IF_CS_BIT_COMMAND a firmware block or a command has been downloaded
- * IF_CS_BIT_RESP not used (has some meaning with powerdown)
- * IF_CS_BIT_EVENT a host event (link lost etc) has been retrieved
- */
-#define IF_CS_HOST_INT_CAUSE 0x00000002
-
-/*
- * The host int mask register is used to enable/disable interrupt. However,
- * I have the suspicion that disabled interrupts are lost.
- */
-#define IF_CS_HOST_INT_MASK 0x00000004
-
-/*
- * Used to send or receive data packets:
- */
-#define IF_CS_WRITE 0x00000016
-#define IF_CS_WRITE_LEN 0x00000014
-#define IF_CS_READ 0x00000010
-#define IF_CS_READ_LEN 0x00000024
-
-/*
- * Used to send commands (and to send firmware block) and to
- * receive command responses:
- */
-#define IF_CS_CMD 0x0000001A
-#define IF_CS_CMD_LEN 0x00000018
-#define IF_CS_RESP 0x00000012
-#define IF_CS_RESP_LEN 0x00000030
-
-/*
- * The card status registers shows what the card/firmware actually
- * accepts:
- *
- * IF_CS_BIT_TX you may send a data packet
- * IF_CS_BIT_RX you may retrieve a data packet
- * IF_CS_BIT_COMMAND you may send a command
- * IF_CS_BIT_RESP you may retrieve a command response
- * IF_CS_BIT_EVENT the card has a event for use (link lost, snr low etc)
- *
- * When reading this register several times, you will get back the same
- * results --- with one exception: the IF_CS_BIT_EVENT clear itself
- * automatically.
- *
- * Not that we don't rely on BIT_RX,_BIT_RESP or BIT_EVENT because
- * we handle this via the card int cause register.
- */
-#define IF_CS_CARD_STATUS 0x00000020
-#define IF_CS_CARD_STATUS_MASK 0x7f00
-
-/*
- * The card int cause register is used by the card/firmware to notify us
- * about the following events:
- *
- * IF_CS_BIT_TX a data packet has successfully been sentx
- * IF_CS_BIT_RX a data packet has been received and can be retrieved
- * IF_CS_BIT_COMMAND not used
- * IF_CS_BIT_RESP the firmware has a command response for us
- * IF_CS_BIT_EVENT the card has a event for use (link lost, snr low etc)
- */
-#define IF_CS_CARD_INT_CAUSE 0x00000022
-
-/*
- * This is used to for handshaking with the card's bootloader/helper image
- * to synchronize downloading of firmware blocks.
- */
-#define IF_CS_SQ_READ_LOW 0x00000028
-#define IF_CS_SQ_HELPER_OK 0x10
-
-/*
- * The scratch register tells us ...
- *
- * IF_CS_SCRATCH_BOOT_OK the bootloader runs
- * IF_CS_SCRATCH_HELPER_OK the helper firmware already runs
- */
-#define IF_CS_SCRATCH 0x0000003F
-#define IF_CS_SCRATCH_BOOT_OK 0x00
-#define IF_CS_SCRATCH_HELPER_OK 0x5a
-
-/*
- * Used to detect ancient chips:
- */
-#define IF_CS_PRODUCT_ID 0x0000001C
-#define IF_CS_CF8385_B1_REV 0x12
-#define IF_CS_CF8381_B3_REV 0x04
-#define IF_CS_CF8305_B1_REV 0x03
-
-/*
- * Used to detect other cards than CF8385 since their revisions of silicon
- * doesn't match those from CF8385, eg. CF8381 B3 works with this driver.
- */
-#define CF8305_MANFID 0x02db
-#define CF8305_CARDID 0x8103
-#define CF8381_MANFID 0x02db
-#define CF8381_CARDID 0x6064
-#define CF8385_MANFID 0x02df
-#define CF8385_CARDID 0x8103
-
-/*
- * FIXME: just use the 'driver_info' field of 'struct pcmcia_device_id' when
- * that gets fixed. Currently there's no way to access it from the probe hook.
- */
-static inline u32 get_model(u16 manf_id, u16 card_id)
-{
- /* NOTE: keep in sync with if_cs_ids */
- if (manf_id == CF8305_MANFID && card_id == CF8305_CARDID)
- return MODEL_8305;
- else if (manf_id == CF8381_MANFID && card_id == CF8381_CARDID)
- return MODEL_8381;
- else if (manf_id == CF8385_MANFID && card_id == CF8385_CARDID)
- return MODEL_8385;
- return MODEL_UNKNOWN;
-}
-
-/********************************************************************/
-/* I/O and interrupt handling */
-/********************************************************************/
-
-static inline void if_cs_enable_ints(struct if_cs_card *card)
-{
- if_cs_write16(card, IF_CS_HOST_INT_MASK, 0);
-}
-
-static inline void if_cs_disable_ints(struct if_cs_card *card)
-{
- if_cs_write16(card, IF_CS_HOST_INT_MASK, IF_CS_BIT_MASK);
-}
-
-/*
- * Called from if_cs_host_to_card to send a command to the hardware
- */
-static int if_cs_send_cmd(struct lbs_private *priv, u8 *buf, u16 nb)
-{
- struct if_cs_card *card = (struct if_cs_card *)priv->card;
- int ret = -1;
- int loops = 0;
-
- if_cs_disable_ints(card);
-
- /* Is hardware ready? */
- while (1) {
- u16 status = if_cs_read16(card, IF_CS_CARD_STATUS);
- if (status & IF_CS_BIT_COMMAND)
- break;
- if (++loops > 100) {
- netdev_err(priv->dev, "card not ready for commands\n");
- goto done;
- }
- mdelay(1);
- }
-
- if_cs_write16(card, IF_CS_CMD_LEN, nb);
-
- if_cs_write16_rep(card, IF_CS_CMD, buf, nb / 2);
- /* Are we supposed to transfer an odd amount of bytes? */
- if (nb & 1)
- if_cs_write8(card, IF_CS_CMD, buf[nb-1]);
-
- /* "Assert the download over interrupt command in the Host
- * status register" */
- if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
-
- /* "Assert the download over interrupt command in the Card
- * interrupt case register" */
- if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
- ret = 0;
-
-done:
- if_cs_enable_ints(card);
- return ret;
-}
-
-/*
- * Called from if_cs_host_to_card to send a data to the hardware
- */
-static void if_cs_send_data(struct lbs_private *priv, u8 *buf, u16 nb)
-{
- struct if_cs_card *card = (struct if_cs_card *)priv->card;
- u16 status;
-
- if_cs_disable_ints(card);
-
- status = if_cs_read16(card, IF_CS_CARD_STATUS);
- BUG_ON((status & IF_CS_BIT_TX) == 0);
-
- if_cs_write16(card, IF_CS_WRITE_LEN, nb);
-
- /* write even number of bytes, then odd byte if necessary */
- if_cs_write16_rep(card, IF_CS_WRITE, buf, nb / 2);
- if (nb & 1)
- if_cs_write8(card, IF_CS_WRITE, buf[nb-1]);
-
- if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_TX);
- if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_TX);
- if_cs_enable_ints(card);
-}
-
-/*
- * Get the command result out of the card.
- */
-static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len)
-{
- unsigned long flags;
- int ret = -1;
- u16 status;
-
- /* is hardware ready? */
- status = if_cs_read16(priv->card, IF_CS_CARD_STATUS);
- if ((status & IF_CS_BIT_RESP) == 0) {
- netdev_err(priv->dev, "no cmd response in card\n");
- *len = 0;
- goto out;
- }
-
- *len = if_cs_read16(priv->card, IF_CS_RESP_LEN);
- if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) {
- netdev_err(priv->dev,
- "card cmd buffer has invalid # of bytes (%d)\n",
- *len);
- goto out;
- }
-
- /* read even number of bytes, then odd byte if necessary */
- if_cs_read16_rep(priv->card, IF_CS_RESP, data, *len/sizeof(u16));
- if (*len & 1)
- data[*len-1] = if_cs_read8(priv->card, IF_CS_RESP);
-
- /* This is a workaround for a firmware that reports too much
- * bytes */
- *len -= 8;
- ret = 0;
-
- /* Clear this flag again */
- spin_lock_irqsave(&priv->driver_lock, flags);
- priv->dnld_sent = DNLD_RES_RECEIVED;
- spin_unlock_irqrestore(&priv->driver_lock, flags);
-
-out:
- return ret;
-}
-
-static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
-{
- struct sk_buff *skb = NULL;
- u16 len;
- u8 *data;
-
- len = if_cs_read16(priv->card, IF_CS_READ_LEN);
- if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
- netdev_err(priv->dev,
- "card data buffer has invalid # of bytes (%d)\n",
- len);
- priv->dev->stats.rx_dropped++;
- goto dat_err;
- }
-
- skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + 2);
- if (!skb)
- goto out;
- skb_put(skb, len);
- skb_reserve(skb, 2);/* 16 byte align */
- data = skb->data;
-
- /* read even number of bytes, then odd byte if necessary */
- if_cs_read16_rep(priv->card, IF_CS_READ, data, len/sizeof(u16));
- if (len & 1)
- data[len-1] = if_cs_read8(priv->card, IF_CS_READ);
-
-dat_err:
- if_cs_write16(priv->card, IF_CS_HOST_STATUS, IF_CS_BIT_RX);
- if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_RX);
-
-out:
- return skb;
-}
-
-static irqreturn_t if_cs_interrupt(int irq, void *data)
-{
- struct if_cs_card *card = data;
- struct lbs_private *priv = card->priv;
- u16 cause;
-
- /* Ask card interrupt cause register if there is something for us */
- cause = if_cs_read16(card, IF_CS_CARD_INT_CAUSE);
- lbs_deb_cs("cause 0x%04x\n", cause);
-
- if (cause == 0) {
- /* Not for us */
- return IRQ_NONE;
- }
-
- if (cause == 0xffff) {
- /* Read in junk, the card has probably been removed */
- card->priv->surpriseremoved = 1;
- return IRQ_HANDLED;
- }
-
- if (cause & IF_CS_BIT_RX) {
- struct sk_buff *skb;
- lbs_deb_cs("rx packet\n");
- skb = if_cs_receive_data(priv);
- if (skb)
- lbs_process_rxed_packet(priv, skb);
- }
-
- if (cause & IF_CS_BIT_TX) {
- lbs_deb_cs("tx done\n");
- lbs_host_to_card_done(priv);
- }
-
- if (cause & IF_CS_BIT_RESP) {
- unsigned long flags;
- u8 i;
-
- lbs_deb_cs("cmd resp\n");
- spin_lock_irqsave(&priv->driver_lock, flags);
- i = (priv->resp_idx == 0) ? 1 : 0;
- spin_unlock_irqrestore(&priv->driver_lock, flags);
-
- BUG_ON(priv->resp_len[i]);
- if_cs_receive_cmdres(priv, priv->resp_buf[i],
- &priv->resp_len[i]);
-
- spin_lock_irqsave(&priv->driver_lock, flags);
- lbs_notify_command_response(priv, i);
- spin_unlock_irqrestore(&priv->driver_lock, flags);
- }
-
- if (cause & IF_CS_BIT_EVENT) {
- u16 status = if_cs_read16(priv->card, IF_CS_CARD_STATUS);
- if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE,
- IF_CS_BIT_EVENT);
- lbs_queue_event(priv, (status & IF_CS_CARD_STATUS_MASK) >> 8);
- }
-
- /* Clear interrupt cause */
- if_cs_write16(card, IF_CS_CARD_INT_CAUSE, cause & IF_CS_BIT_MASK);
-
- return IRQ_HANDLED;
-}
-
-
-
-
-/********************************************************************/
-/* Firmware */
-/********************************************************************/
-
-/*
- * Tries to program the helper firmware.
- *
- * Return 0 on success
- */
-static int if_cs_prog_helper(struct if_cs_card *card, const struct firmware *fw)
-{
- int ret = 0;
- int sent = 0;
- u8 scratch;
-
- /*
- * This is the only place where an unaligned register access happens on
- * the CF8305 card, therefore for the sake of speed of the driver, we do
- * the alignment correction here.
- */
- if (card->align_regs)
- scratch = if_cs_read16(card, IF_CS_SCRATCH) >> 8;
- else
- scratch = if_cs_read8(card, IF_CS_SCRATCH);
-
- /* "If the value is 0x5a, the firmware is already
- * downloaded successfully"
- */
- if (scratch == IF_CS_SCRATCH_HELPER_OK)
- goto done;
-
- /* "If the value is != 00, it is invalid value of register */
- if (scratch != IF_CS_SCRATCH_BOOT_OK) {
- ret = -ENODEV;
- goto done;
- }
-
- lbs_deb_cs("helper size %td\n", fw->size);
-
- /* "Set the 5 bytes of the helper image to 0" */
- /* Not needed, this contains an ARM branch instruction */
-
- for (;;) {
- /* "the number of bytes to send is 256" */
- int count = 256;
- int remain = fw->size - sent;
-
- if (remain < count)
- count = remain;
-
- /*
- * "write the number of bytes to be sent to the I/O Command
- * write length register"
- */
- if_cs_write16(card, IF_CS_CMD_LEN, count);
-
- /* "write this to I/O Command port register as 16 bit writes */
- if (count)
- if_cs_write16_rep(card, IF_CS_CMD,
- &fw->data[sent],
- count >> 1);
-
- /*
- * "Assert the download over interrupt command in the Host
- * status register"
- */
- if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
-
- /*
- * "Assert the download over interrupt command in the Card
- * interrupt case register"
- */
- if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
-
- /*
- * "The host polls the Card Status register ... for 50 ms before
- * declaring a failure"
- */
- ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS,
- IF_CS_BIT_COMMAND);
- if (ret < 0) {
- pr_err("can't download helper at 0x%x, ret %d\n",
- sent, ret);
- goto done;
- }
-
- if (count == 0)
- break;
-
- sent += count;
- }
-
-done:
- return ret;
-}
-
-
-static int if_cs_prog_real(struct if_cs_card *card, const struct firmware *fw)
-{
- int ret = 0;
- int retry = 0;
- int len = 0;
- int sent;
-
- lbs_deb_cs("fw size %td\n", fw->size);
-
- ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW,
- IF_CS_SQ_HELPER_OK);
- if (ret < 0) {
- pr_err("helper firmware doesn't answer\n");
- goto done;
- }
-
- for (sent = 0; sent < fw->size; sent += len) {
- len = if_cs_read16(card, IF_CS_SQ_READ_LOW);
- if (len & 1) {
- retry++;
- pr_info("odd, need to retry this firmware block\n");
- } else {
- retry = 0;
- }
-
- if (retry > 20) {
- pr_err("could not download firmware\n");
- ret = -ENODEV;
- goto done;
- }
- if (retry) {
- sent -= len;
- }
-
-
- if_cs_write16(card, IF_CS_CMD_LEN, len);
-
- if_cs_write16_rep(card, IF_CS_CMD,
- &fw->data[sent],
- (len+1) >> 1);
- if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND);
- if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND);
-
- ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS,
- IF_CS_BIT_COMMAND);
- if (ret < 0) {
- pr_err("can't download firmware at 0x%x\n", sent);
- goto done;
- }
- }
-
- ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a);
- if (ret < 0)
- pr_err("firmware download failed\n");
-
-done:
- return ret;
-}
-
-static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
- const struct firmware *helper,
- const struct firmware *mainfw)
-{
- struct if_cs_card *card = priv->card;
-
- if (ret) {
- pr_err("failed to find firmware (%d)\n", ret);
- return;
- }
-
- /* Load the firmware */
- ret = if_cs_prog_helper(card, helper);
- if (ret == 0 && (card->model != MODEL_8305))
- ret = if_cs_prog_real(card, mainfw);
- if (ret)
- return;
-
- /* Now actually get the IRQ */
- ret = request_irq(card->p_dev->irq, if_cs_interrupt,
- IRQF_SHARED, DRV_NAME, card);
- if (ret) {
- pr_err("error in request_irq\n");
- return;
- }
-
- /*
- * Clear any interrupt cause that happened while sending
- * firmware/initializing card
- */
- if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
- if_cs_enable_ints(card);
-
- /* And finally bring the card up */
- priv->fw_ready = 1;
- if (lbs_start_card(priv) != 0) {
- pr_err("could not activate card\n");
- free_irq(card->p_dev->irq, card);
- }
-}
-
-
-/********************************************************************/
-/* Callback functions for libertas.ko */
-/********************************************************************/
-
-/* Send commands or data packets to the card */
-static int if_cs_host_to_card(struct lbs_private *priv,
- u8 type,
- u8 *buf,
- u16 nb)
-{
- int ret = -1;
-
- switch (type) {
- case MVMS_DAT:
- priv->dnld_sent = DNLD_DATA_SENT;
- if_cs_send_data(priv, buf, nb);
- ret = 0;
- break;
- case MVMS_CMD:
- priv->dnld_sent = DNLD_CMD_SENT;
- ret = if_cs_send_cmd(priv, buf, nb);
- break;
- default:
- netdev_err(priv->dev, "%s: unsupported type %d\n",
- __func__, type);
- }
-
- return ret;
-}
-
-
-static void if_cs_release(struct pcmcia_device *p_dev)
-{
- struct if_cs_card *card = p_dev->priv;
-
- free_irq(p_dev->irq, card);
- pcmcia_disable_device(p_dev);
- if (card->iobase)
- ioport_unmap(card->iobase);
-}
-
-
-static int if_cs_ioprobe(struct pcmcia_device *p_dev, void *priv_data)
-{
- p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
- p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
-
- if (p_dev->resource[1]->end) {
- pr_err("wrong CIS (check number of IO windows)\n");
- return -ENODEV;
- }
-
- /* This reserves IO space but doesn't actually enable it */
- return pcmcia_request_io(p_dev);
-}
-
-static int if_cs_probe(struct pcmcia_device *p_dev)
-{
- int ret = -ENOMEM;
- unsigned int prod_id;
- struct lbs_private *priv;
- struct if_cs_card *card;
-
- card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL);
- if (!card)
- goto out;
-
- card->p_dev = p_dev;
- p_dev->priv = card;
-
- p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
-
- if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) {
- pr_err("error in pcmcia_loop_config\n");
- goto out1;
- }
-
- /*
- * Allocate an interrupt line. Note that this does not assign
- * a handler to the interrupt, unless the 'Handler' member of
- * the irq structure is initialized.
- */
- if (!p_dev->irq)
- goto out1;
-
- /* Initialize io access */
- card->iobase = ioport_map(p_dev->resource[0]->start,
- resource_size(p_dev->resource[0]));
- if (!card->iobase) {
- pr_err("error in ioport_map\n");
- ret = -EIO;
- goto out1;
- }
-
- ret = pcmcia_enable_device(p_dev);
- if (ret) {
- pr_err("error in pcmcia_enable_device\n");
- goto out2;
- }
-
- /* Finally, report what we've done */
- lbs_deb_cs("irq %d, io %pR", p_dev->irq, p_dev->resource[0]);
-
- /*
- * Most of the libertas cards can do unaligned register access, but some
- * weird ones cannot. That's especially true for the CF8305 card.
- */
- card->align_regs = false;
-
- card->model = get_model(p_dev->manf_id, p_dev->card_id);
- if (card->model == MODEL_UNKNOWN) {
- pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n",
- p_dev->manf_id, p_dev->card_id);
- ret = -ENODEV;
- goto out2;
- }
-
- /* Check if we have a current silicon */
- prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
- if (card->model == MODEL_8305) {
- card->align_regs = true;
- if (prod_id < IF_CS_CF8305_B1_REV) {
- pr_err("8305 rev B0 and older are not supported\n");
- ret = -ENODEV;
- goto out2;
- }
- }
-
- if ((card->model == MODEL_8381) && prod_id < IF_CS_CF8381_B3_REV) {
- pr_err("8381 rev B2 and older are not supported\n");
- ret = -ENODEV;
- goto out2;
- }
-
- if ((card->model == MODEL_8385) && prod_id < IF_CS_CF8385_B1_REV) {
- pr_err("8385 rev B0 and older are not supported\n");
- ret = -ENODEV;
- goto out2;
- }
-
- /* Make this card known to the libertas driver */
- priv = lbs_add_card(card, &p_dev->dev);
- if (IS_ERR(priv)) {
- ret = PTR_ERR(priv);
- goto out2;
- }
-
- /* Set up fields in lbs_private */
- card->priv = priv;
- priv->card = card;
- priv->hw_host_to_card = if_cs_host_to_card;
- priv->enter_deep_sleep = NULL;
- priv->exit_deep_sleep = NULL;
- priv->reset_deep_sleep_wakeup = NULL;
-
- /* Get firmware */
- ret = lbs_get_firmware_async(priv, &p_dev->dev, card->model, fw_table,
- if_cs_prog_firmware);
- if (ret) {
- pr_err("failed to find firmware (%d)\n", ret);
- goto out3;
- }
-
- goto out;
-
-out3:
- lbs_remove_card(priv);
-out2:
- ioport_unmap(card->iobase);
-out1:
- pcmcia_disable_device(p_dev);
-out:
- return ret;
-}
-
-
-static void if_cs_detach(struct pcmcia_device *p_dev)
-{
- struct if_cs_card *card = p_dev->priv;
-
- lbs_stop_card(card->priv);
- lbs_remove_card(card->priv);
- if_cs_disable_ints(card);
- if_cs_release(p_dev);
- kfree(card);
-}
-
-
-
-/********************************************************************/
-/* Module initialization */
-/********************************************************************/
-
-static const struct pcmcia_device_id if_cs_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID),
- PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID),
- PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID),
- /* NOTE: keep in sync with get_model() */
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, if_cs_ids);
-
-static struct pcmcia_driver lbs_driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .probe = if_cs_probe,
- .remove = if_cs_detach,
- .id_table = if_cs_ids,
-};
-module_pcmcia_driver(lbs_driver);
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 3756aa247..9eff29a25 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1244,8 +1244,6 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
u8 *pbuf, u32 upld_len)
{
struct host_cmd_ds_command *cmd = (struct host_cmd_ds_command *) pbuf;
- struct mwifiex_private *priv =
- mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
uint16_t result = le16_to_cpu(cmd->result);
uint16_t command = le16_to_cpu(cmd->command);
uint16_t seq_num = le16_to_cpu(cmd->seq_num);
@@ -1260,12 +1258,6 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
"cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
command, result, le16_to_cpu(cmd->size), seq_num);
- /* Get BSS number and corresponding priv */
- priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
- HostCmd_GET_BSS_TYPE(seq_num));
- if (!priv)
- priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
-
/* Update sequence number */
seq_num = HostCmd_GET_SEQ_NO(seq_num);
/* Clear RET_BIT from HostCmd */
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index a6e254a11..9d98a1908 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -1427,8 +1427,8 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
/* Check if the requested SSID is already joined */
if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
- !mwifiex_ssid_cmp(&bss_desc->ssid,
- &priv->curr_bss_params.bss_descriptor.ssid) &&
+ cfg80211_ssid_eq(&bss_desc->ssid,
+ &priv->curr_bss_params.bss_descriptor.ssid) &&
(priv->curr_bss_params.bss_descriptor.bss_mode ==
NL80211_IFTYPE_ADHOC)) {
mwifiex_dbg(priv->adapter, INFO,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index d263eae60..318b42b18 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1152,7 +1152,6 @@ void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node);
int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-s32 mwifiex_ssid_cmp(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2);
int mwifiex_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 72904c275..a2ddac363 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -180,17 +180,6 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
}
/*
- * This function compares two SSIDs and checks if they match.
- */
-s32
-mwifiex_ssid_cmp(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2)
-{
- if (!ssid1 || !ssid2 || (ssid1->ssid_len != ssid2->ssid_len))
- return -1;
- return memcmp(ssid1->ssid, ssid2->ssid, ssid1->ssid_len);
-}
-
-/*
* This function checks if wapi is enabled in driver and scanned network is
* compatible with it.
*/
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index a2ad2b53f..32a27fad7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -345,8 +345,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
/* Adhoc mode */
/* If the requested SSID matches current SSID, return */
if (bss_desc && bss_desc->ssid.ssid_len &&
- (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
- ssid, &bss_desc->ssid))) {
+ cfg80211_ssid_eq(&priv->curr_bss_params.bss_descriptor.ssid,
+ &bss_desc->ssid)) {
ret = 0;
goto done;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index d3ab9572e..515e6db41 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -687,7 +687,7 @@ static struct usb_driver mwifiex_usb_driver = {
.suspend = mwifiex_usb_suspend,
.resume = mwifiex_usb_resume,
.soft_unbind = 1,
- .drvwrap.driver = {
+ .driver = {
.coredump = mwifiex_usb_coredump,
},
};
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 68ad91520..00230f106 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -9,11 +9,11 @@
#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
-#define Q_READ(_dev, _q, _field) ({ \
+#define Q_READ(_q, _field) ({ \
u32 _offset = offsetof(struct mt76_queue_regs, _field); \
u32 _val; \
if ((_q)->flags & MT_QFLAG_WED) \
- _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
+ _val = mtk_wed_device_reg_read((_q)->wed, \
((_q)->wed_regs + \
_offset)); \
else \
@@ -21,10 +21,10 @@
_val; \
})
-#define Q_WRITE(_dev, _q, _field, _val) do { \
+#define Q_WRITE(_q, _field, _val) do { \
u32 _offset = offsetof(struct mt76_queue_regs, _field); \
if ((_q)->flags & MT_QFLAG_WED) \
- mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
+ mtk_wed_device_reg_write((_q)->wed, \
((_q)->wed_regs + _offset), \
_val); \
else \
@@ -33,8 +33,8 @@
#else
-#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
-#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
+#define Q_READ(_q, _field) readl(&(_q)->regs->_field)
+#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
#endif
@@ -188,41 +188,67 @@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
- Q_WRITE(dev, q, desc_base, q->desc_dma);
- Q_WRITE(dev, q, ring_size, q->ndesc);
- q->head = Q_READ(dev, q, dma_idx);
+ Q_WRITE(q, desc_base, q->desc_dma);
+ if (q->flags & MT_QFLAG_WED_RRO_EN)
+ Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
+ else
+ Q_WRITE(q, ring_size, q->ndesc);
+ q->head = Q_READ(q, dma_idx);
q->tail = q->head;
}
static void
-mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
+__mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx)
{
- int i;
-
if (!q || !q->ndesc)
return;
- /* clear descriptors */
- for (i = 0; i < q->ndesc; i++)
- q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ if (!mt76_queue_is_wed_rro_ind(q)) {
+ int i;
+
+ /* clear descriptors */
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ }
- Q_WRITE(dev, q, cpu_idx, 0);
- Q_WRITE(dev, q, dma_idx, 0);
+ if (reset_idx) {
+ Q_WRITE(q, cpu_idx, 0);
+ Q_WRITE(q, dma_idx, 0);
+ }
mt76_dma_sync_idx(dev, q);
}
+static void
+mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ __mt76_dma_queue_reset(dev, q, true);
+}
+
static int
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, void *data)
{
- struct mt76_desc *desc = &q->desc[q->head];
struct mt76_queue_entry *entry = &q->entry[q->head];
struct mt76_txwi_cache *txwi = NULL;
- u32 buf1 = 0, ctrl;
+ struct mt76_desc *desc;
int idx = q->head;
+ u32 buf1 = 0, ctrl;
int rx_token;
+ if (mt76_queue_is_wed_rro_ind(q)) {
+ struct mt76_wed_rro_desc *rro_desc;
+
+ rro_desc = (struct mt76_wed_rro_desc *)q->desc;
+ data = &rro_desc[q->head];
+ goto done;
+ }
+
+ desc = &q->desc[q->head];
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
+#endif
if (mt76_queue_is_wed_rx(q)) {
txwi = mt76_get_rxwi(dev);
@@ -244,6 +270,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
WRITE_ONCE(desc->info, 0);
+done:
entry->dma_addr[0] = buf->addr;
entry->dma_len[0] = buf->len;
entry->txwi = txwi;
@@ -288,11 +315,18 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
entry->dma_len[0] = buf[0].len;
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
+#endif
if (i < nbufs - 1) {
entry->dma_addr[1] = buf[1].addr;
entry->dma_len[1] = buf[1].len;
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
+ buf[1].addr >> 32);
+#endif
if (buf[1].skip_unmap)
entry->skip_buf1 = true;
}
@@ -343,7 +377,7 @@ static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
- Q_WRITE(dev, q, cpu_idx, q->head);
+ Q_WRITE(q, cpu_idx, q->head);
}
static void
@@ -359,7 +393,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
if (flush)
last = -1;
else
- last = Q_READ(dev, q, dma_idx);
+ last = Q_READ(q, dma_idx);
while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
@@ -371,7 +405,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
}
if (!flush && q->tail == last)
- last = Q_READ(dev, q, dma_idx);
+ last = Q_READ(q, dma_idx);
}
spin_unlock_bh(&q->cleanup_lock);
@@ -392,19 +426,26 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
- void *buf;
+ u32 ctrl, desc_info, buf1;
+ void *buf = e->buf;
+
+ if (mt76_queue_is_wed_rro_ind(q))
+ goto done;
+ ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
if (len) {
- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
}
+ desc_info = le32_to_cpu(desc->info);
if (info)
- *info = le32_to_cpu(desc->info);
+ *info = desc_info;
+
+ buf1 = le32_to_cpu(desc->buf1);
+ mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
if (mt76_queue_is_wed_rx(q)) {
- u32 buf1 = le32_to_cpu(desc->buf1);
u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
@@ -420,23 +461,16 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
t->ptr = NULL;
mt76_put_rxwi(dev, t);
-
- if (drop) {
- u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
-
- *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
- MT_DMA_CTL_DROP));
-
+ if (drop)
*drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
- }
} else {
- buf = e->buf;
- e->buf = NULL;
dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
SKB_WITH_OVERHEAD(q->buf_size),
page_pool_get_dma_dir(q->page_pool));
}
+done:
+ e->buf = NULL;
return buf;
}
@@ -450,11 +484,16 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
if (!q->queued)
return NULL;
- if (flush)
- q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
- else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ if (mt76_queue_is_wed_rro_data(q))
return NULL;
+ if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (flush)
+ q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+ return NULL;
+ }
+
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
@@ -606,11 +645,14 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
+ struct mt76_queue_buf qbuf = {};
enum dma_data_direction dir;
- struct mt76_queue_buf qbuf;
dma_addr_t addr;
int offset;
- void *buf;
+ void *buf = NULL;
+
+ if (mt76_queue_is_wed_rro_ind(q))
+ goto done;
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
if (!buf)
@@ -621,6 +663,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
qbuf.addr = addr + q->buf_offset;
+done:
qbuf.len = len - q->buf_offset;
qbuf.skip_unmap = false;
if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
@@ -630,7 +673,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
frames++;
}
- if (frames)
+ if (frames || mt76_queue_is_wed_rx(q))
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
@@ -641,15 +684,14 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
- struct mtk_wed_device *wed = &dev->mmio.wed;
- int ret, type, ring;
- u8 flags;
+ int ret = 0, type, ring;
+ u16 flags;
if (!q || !q->ndesc)
return -EINVAL;
flags = q->flags;
- if (!mtk_wed_device_active(wed))
+ if (!q->wed || !mtk_wed_device_active(q->wed))
q->flags &= ~MT_QFLAG_WED;
if (!(q->flags & MT_QFLAG_WED))
@@ -660,29 +702,52 @@ int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
switch (type) {
case MT76_WED_Q_TX:
- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
+ ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
+ reset);
if (!ret)
- q->wed_regs = wed->tx_ring[ring].reg_base;
+ q->wed_regs = q->wed->tx_ring[ring].reg_base;
break;
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
mt76_dma_queue_reset(dev, q);
mt76_dma_rx_fill(dev, q, false);
- q->flags = flags;
- ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
+ ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
if (!ret)
- q->wed_regs = wed->txfree_ring.reg_base;
+ q->wed_regs = q->wed->txfree_ring.reg_base;
break;
case MT76_WED_Q_RX:
- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
+ ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
+ reset);
if (!ret)
- q->wed_regs = wed->rx_ring[ring].reg_base;
+ q->wed_regs = q->wed->rx_ring[ring].reg_base;
+ break;
+ case MT76_WED_RRO_Q_DATA:
+ q->flags &= ~MT_QFLAG_WED;
+ __mt76_dma_queue_reset(dev, q, false);
+ mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
+ q->head = q->ndesc - 1;
+ q->queued = q->head;
+ break;
+ case MT76_WED_RRO_Q_MSDU_PG:
+ q->flags &= ~MT_QFLAG_WED;
+ __mt76_dma_queue_reset(dev, q, false);
+ mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
+ q->head = q->ndesc - 1;
+ q->queued = q->head;
+ break;
+ case MT76_WED_RRO_Q_IND:
+ q->flags &= ~MT_QFLAG_WED;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+ mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
break;
default:
ret = -EINVAL;
+ break;
}
+ q->flags = flags;
return ret;
#else
@@ -706,11 +771,26 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
q->buf_size = bufsize;
q->hw_idx = idx;
- size = q->ndesc * sizeof(struct mt76_desc);
- q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
+ size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
+ : sizeof(struct mt76_desc);
+ q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
+ &q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
+ if (mt76_queue_is_wed_rro_ind(q)) {
+ struct mt76_wed_rro_desc *rro_desc;
+ int i;
+
+ rro_desc = (struct mt76_wed_rro_desc *)q->desc;
+ for (i = 0; i < q->ndesc; i++) {
+ struct mt76_wed_rro_ind *cmd;
+
+ cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
+ cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
+ }
+ }
+
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
@@ -724,8 +804,13 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (ret)
return ret;
- if (q->flags != MT_WED_Q_TXFREE)
- mt76_dma_queue_reset(dev, q);
+ if (mtk_wed_device_active(&dev->mmio.wed)) {
+ if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
+ mt76_queue_is_wed_tx_free(q))
+ return 0;
+ }
+
+ mt76_dma_queue_reset(dev, q);
return 0;
}
@@ -747,7 +832,8 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
if (!buf)
break;
- mt76_put_page_pool_buf(buf, false);
+ if (!mt76_queue_is_wed_rro(q))
+ mt76_put_page_pool_buf(buf, false);
} while (1);
spin_lock_bh(&q->lock);
@@ -763,22 +849,31 @@ static void
mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
struct mt76_queue *q = &dev->q_rx[qid];
- int i;
if (!q->ndesc)
return;
- for (i = 0; i < q->ndesc; i++)
- q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ if (!mt76_queue_is_wed_rro_ind(q)) {
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+ }
mt76_dma_rx_cleanup(dev, q);
/* reset WED rx queues */
mt76_dma_wed_setup(dev, q, true);
- if (q->flags != MT_WED_Q_TXFREE) {
- mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q, false);
- }
+
+ if (mt76_queue_is_wed_tx_free(q))
+ return;
+
+ if (mtk_wed_device_active(&dev->mmio.wed) &&
+ mt76_queue_is_wed_rro(q))
+ return;
+
+ mt76_dma_sync_idx(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
}
static void
@@ -819,8 +914,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
bool more;
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
- q->flags == MT_WED_Q_TXFREE) {
- dma_idx = Q_READ(dev, q, dma_idx);
+ mt76_queue_is_wed_tx_free(q)) {
+ dma_idx = Q_READ(q, dma_idx);
check_ddone = true;
}
@@ -830,7 +925,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (check_ddone) {
if (q->tail == dma_idx)
- dma_idx = Q_READ(dev, q, dma_idx);
+ dma_idx = Q_READ(q, dma_idx);
if (q->tail == dma_idx)
break;
@@ -959,6 +1054,20 @@ void mt76_dma_attach(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
+void mt76_dma_wed_reset(struct mt76_dev *dev)
+{
+ struct mt76_mmio *mmio = &dev->mmio;
+
+ if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
+ return;
+
+ complete(&mmio->wed_reset);
+
+ if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
+ dev_err(dev->dev, "wed reset complete timeout\n");
+}
+EXPORT_SYMBOL_GPL(mt76_dma_wed_reset);
+
void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
@@ -983,16 +1092,23 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_for_each_q_rx(dev, i) {
struct mt76_queue *q = &dev->q_rx[i];
+ if (mtk_wed_device_active(&dev->mmio.wed) &&
+ mt76_queue_is_wed_rro(q))
+ continue;
+
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, q);
page_pool_destroy(q->page_pool);
}
- mt76_free_pending_txwi(dev);
- mt76_free_pending_rxwi(dev);
-
if (mtk_wed_device_active(&dev->mmio.wed))
mtk_wed_device_detach(&dev->mmio.wed);
+
+ if (mtk_wed_device_active(&dev->mmio.wed_hif2))
+ mtk_wed_device_detach(&dev->mmio.wed_hif2);
+
+ mt76_free_pending_txwi(dev);
+ mt76_free_pending_rxwi(dev);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1b090d78c..c479cc638 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -19,12 +19,23 @@
#define MT_DMA_CTL_TO_HOST_A BIT(12)
#define MT_DMA_CTL_DROP BIT(14)
#define MT_DMA_CTL_TOKEN GENMASK(31, 16)
+#define MT_DMA_CTL_SDP1_H GENMASK(19, 16)
+#define MT_DMA_CTL_SDP0_H GENMASK(3, 0)
#define MT_DMA_CTL_WO_DROP BIT(8)
#define MT_DMA_PPE_CPU_REASON GENMASK(15, 11)
#define MT_DMA_PPE_ENTRY GENMASK(30, 16)
+#define MT_DMA_INFO_DMA_FRAG BIT(9)
#define MT_DMA_INFO_PPE_VLD BIT(31)
+#define MT_DMA_CTL_PN_CHK_FAIL BIT(13)
+#define MT_DMA_CTL_VER_MASK BIT(7)
+
+#define MT_DMA_RRO_EN BIT(13)
+
+#define MT_DMA_WED_IND_CMD_CNT 8
+#define MT_DMA_WED_IND_REASON GENMASK(15, 12)
+
#define MT_DMA_HDR_LEN 4
#define MT_RX_INFO_LEN 4
#define MT_FCE_INFO_LEN 4
@@ -37,6 +48,11 @@ struct mt76_desc {
__le32 info;
} __packed __aligned(4);
+struct mt76_wed_rro_desc {
+ __le32 buf0;
+ __le32 buf1;
+} __packed __aligned(4);
+
enum mt76_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
@@ -54,9 +70,47 @@ enum mt76_mcu_evt_type {
EVT_EVENT_DFS_DETECT_RSP,
};
+enum mt76_dma_wed_ind_reason {
+ MT_DMA_WED_IND_REASON_NORMAL,
+ MT_DMA_WED_IND_REASON_REPEAT,
+ MT_DMA_WED_IND_REASON_OLDPKT,
+};
+
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
+void mt76_dma_wed_reset(struct mt76_dev *dev);
+
+static inline void
+mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ dev->queue_ops->reset_q(dev, q);
+ if (mtk_wed_device_active(&dev->mmio.wed))
+ mt76_dma_wed_setup(dev, q, true);
+}
+
+static inline void
+mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)
+{
+ if (!drop)
+ return;
+
+ *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));
+ if (!(ctrl & MT_DMA_CTL_VER_MASK))
+ return;
+
+ switch (FIELD_GET(MT_DMA_WED_IND_REASON, buf1)) {
+ case MT_DMA_WED_IND_REASON_REPEAT:
+ *drop = true;
+ break;
+ case MT_DMA_WED_IND_REASON_OLDPKT:
+ *drop = !(info & MT_DMA_INFO_DMA_FRAG);
+ break;
+ default:
+ *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);
+ break;
+ }
+}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 50820fe00..0bc66cc19 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -28,7 +28,7 @@ static int mt76_get_of_eeprom_data(struct mt76_dev *dev, void *eep, int len)
return 0;
}
-static int mt76_get_of_epprom_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len)
+int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len)
{
#ifdef CONFIG_MTD
struct device_node *np = dev->dev->of_node;
@@ -105,8 +105,10 @@ out_put_node:
return -ENOENT;
#endif
}
+EXPORT_SYMBOL_GPL(mt76_get_of_data_from_mtd);
-static int mt76_get_of_eeprom_from_nvmem(struct mt76_dev *dev, void *eep, int len)
+int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
+ const char *cell_name, int len)
{
struct device_node *np = dev->dev->of_node;
struct nvmem_cell *cell;
@@ -114,7 +116,7 @@ static int mt76_get_of_eeprom_from_nvmem(struct mt76_dev *dev, void *eep, int le
size_t retlen;
int ret = 0;
- cell = of_nvmem_cell_get(np, "eeprom");
+ cell = of_nvmem_cell_get(np, cell_name);
if (IS_ERR(cell))
return PTR_ERR(cell);
@@ -136,8 +138,9 @@ exit:
return ret;
}
+EXPORT_SYMBOL_GPL(mt76_get_of_data_from_nvmem);
-int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
+static int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int len)
{
struct device_node *np = dev->dev->of_node;
int ret;
@@ -149,13 +152,12 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
if (!ret)
return 0;
- ret = mt76_get_of_epprom_from_mtd(dev, eep, offset, len);
+ ret = mt76_get_of_data_from_mtd(dev, eep, 0, len);
if (!ret)
return 0;
- return mt76_get_of_eeprom_from_nvmem(dev, eep, len);
+ return mt76_get_of_data_from_nvmem(dev, eep, "eeprom", len);
}
-EXPORT_SYMBOL_GPL(mt76_get_of_eeprom);
void
mt76_eeprom_override(struct mt76_phy *phy)
@@ -379,7 +381,7 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
if (!np)
return target_power;
- txs_delta = mt76_get_txs_delta(np, hweight8(phy->antenna_mask));
+ txs_delta = mt76_get_txs_delta(np, hweight16(phy->chainmask));
val = mt76_get_of_array(np, "rates-cck", &len, ARRAY_SIZE(dest->cck));
mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val,
@@ -412,6 +414,6 @@ mt76_eeprom_init(struct mt76_dev *dev, int len)
if (!dev->eeprom.data)
return -ENOMEM;
- return !mt76_get_of_eeprom(dev, dev->eeprom.data, 0, len);
+ return !mt76_get_of_eeprom(dev, dev->eeprom.data, len);
}
EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 51a767121..8a3a90d1b 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -197,10 +197,33 @@ static int mt76_led_init(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_hw *hw = phy->hw;
+ struct device_node *np = dev->dev->of_node;
if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
return 0;
+ np = of_get_child_by_name(np, "led");
+ if (np) {
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ dev_info(dev->dev,
+ "led registration was explicitly disabled by dts\n");
+ return 0;
+ }
+
+ if (phy == &dev->phy) {
+ int led_pin;
+
+ if (!of_property_read_u32(np, "led-sources", &led_pin))
+ phy->leds.pin = led_pin;
+
+ phy->leds.al =
+ of_property_read_bool(np, "led-active-low");
+ }
+
+ of_node_put(np);
+ }
+
snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
wiphy_name(hw->wiphy));
@@ -211,20 +234,8 @@ static int mt76_led_init(struct mt76_phy *phy)
mt76_tpt_blink,
ARRAY_SIZE(mt76_tpt_blink));
- if (phy == &dev->phy) {
- struct device_node *np = dev->dev->of_node;
-
- np = of_get_child_by_name(np, "led");
- if (np) {
- int led_pin;
-
- if (!of_property_read_u32(np, "led-sources", &led_pin))
- phy->leds.pin = led_pin;
- phy->leds.al = of_property_read_bool(np,
- "led-active-low");
- of_node_put(np);
- }
- }
+ dev_info(dev->dev,
+ "registering led '%s'\n", phy->leds.name);
return led_classdev_register(dev->dev, &phy->leds.cdev);
}
@@ -1537,7 +1548,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm)
{
struct mt76_phy *phy = hw->priv;
- int n_chains = hweight8(phy->antenna_mask);
+ int n_chains = hweight16(phy->chainmask);
int delta = mt76_tx_power_nss_delta(n_chains);
*dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
@@ -1725,7 +1736,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- int ring_base, u32 flags)
+ int ring_base, void *wed, u32 flags)
{
struct mt76_queue *hwq;
int err;
@@ -1735,6 +1746,7 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
return ERR_PTR(-ENOMEM);
hwq->flags = flags;
+ hwq->wed = wed;
err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
if (err < 0)
@@ -1842,3 +1854,19 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
return MT_DFS_STATE_ACTIVE;
}
EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mtk_wed_device *wed = &phy->dev->mmio.wed;
+
+ if (!mtk_wed_device_active(wed))
+ return -EOPNOTSUPP;
+
+ return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
+}
+EXPORT_SYMBOL_GPL(mt76_net_setup_tc);
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 86e3d2ac4..c3e0e23e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -4,6 +4,7 @@
*/
#include "mt76.h"
+#include "dma.h"
#include "trace.h"
static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
@@ -84,6 +85,113 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ int i;
+
+ for (i = 0; i < dev->rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(dev, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->ptr = NULL;
+
+ mt76_put_rxwi(dev, t);
+ }
+
+ mt76_free_pending_rxwi(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
+
+u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, len = SKB_WITH_OVERHEAD(q->buf_size);
+ struct mt76_txwi_cache *t = NULL;
+
+ for (i = 0; i < size; i++) {
+ enum dma_data_direction dir;
+ dma_addr_t addr;
+ u32 offset;
+ int token;
+ void *buf;
+
+ t = mt76_get_rxwi(dev);
+ if (!t)
+ goto unmap;
+
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+ if (!buf)
+ goto unmap;
+
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
+
+ desc->buf0 = cpu_to_le32(addr);
+ token = mt76_rx_token_consume(dev, buf, t, addr);
+ if (token < 0) {
+ mt76_put_page_pool_buf(buf, false);
+ goto unmap;
+ }
+
+ token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
+#endif
+ desc->token |= cpu_to_le32(token);
+ desc++;
+ }
+
+ return 0;
+
+unmap:
+ if (t)
+ mt76_put_rxwi(dev, t);
+ mt76_mmio_wed_release_rx_buf(wed);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf);
+
+int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ spin_lock_bh(&dev->token_lock);
+ dev->token_size = wed->wlan.token_start;
+ spin_unlock_bh(&dev->token_lock);
+
+ return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable);
+
+void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ spin_lock_bh(&dev->token_lock);
+ dev->token_size = dev->drv->token_size;
+ spin_unlock_bh(&dev->token_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable);
+
+void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ complete(&dev->mmio.wed_reset_complete);
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete);
+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
{
static const struct mt76_bus_ops mt76_mmio_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index a17b2fbd6..b20c34d5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -29,15 +29,22 @@
#define MT76_TOKEN_FREE_THR 64
#define MT_QFLAG_WED_RING GENMASK(1, 0)
-#define MT_QFLAG_WED_TYPE GENMASK(3, 2)
-#define MT_QFLAG_WED BIT(4)
+#define MT_QFLAG_WED_TYPE GENMASK(4, 2)
+#define MT_QFLAG_WED BIT(5)
+#define MT_QFLAG_WED_RRO BIT(6)
+#define MT_QFLAG_WED_RRO_EN BIT(7)
#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
FIELD_PREP(MT_QFLAG_WED_RING, _n))
+#define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n))
+
#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
#define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n)
#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
+#define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n)
+#define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n)
+#define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0)
struct mt76_dev;
struct mt76_phy;
@@ -59,6 +66,9 @@ enum mt76_wed_type {
MT76_WED_Q_TX,
MT76_WED_Q_TXFREE,
MT76_WED_Q_RX,
+ MT76_WED_RRO_Q_DATA,
+ MT76_WED_RRO_Q_MSDU_PG,
+ MT76_WED_RRO_Q_IND,
};
struct mt76_bus_ops {
@@ -107,6 +117,16 @@ enum mt76_rxq_id {
MT_RXQ_MAIN_WA,
MT_RXQ_BAND2,
MT_RXQ_BAND2_WA,
+ MT_RXQ_RRO_BAND0,
+ MT_RXQ_RRO_BAND1,
+ MT_RXQ_RRO_BAND2,
+ MT_RXQ_MSDU_PAGE_BAND0,
+ MT_RXQ_MSDU_PAGE_BAND1,
+ MT_RXQ_MSDU_PAGE_BAND2,
+ MT_RXQ_TXFREE_BAND0,
+ MT_RXQ_TXFREE_BAND1,
+ MT_RXQ_TXFREE_BAND2,
+ MT_RXQ_RRO_IND,
__MT_RXQ_MAX
};
@@ -163,7 +183,7 @@ struct mt76_queue_entry {
struct urb *urb;
int buf_sz;
};
- u32 dma_addr[2];
+ dma_addr_t dma_addr[2];
u16 dma_len[2];
u16 wcid;
bool skip_buf0:1;
@@ -184,6 +204,7 @@ struct mt76_queue {
spinlock_t lock;
spinlock_t cleanup_lock;
struct mt76_queue_entry *entry;
+ struct mt76_rro_desc *rro_desc;
struct mt76_desc *desc;
u16 first;
@@ -197,8 +218,9 @@ struct mt76_queue {
u8 buf_offset;
u8 hw_idx;
- u8 flags;
+ u16 flags;
+ struct mtk_wed_device *wed;
u32 wed_regs;
dma_addr_t desc_dma;
@@ -353,6 +375,17 @@ struct mt76_txq {
bool aggr;
};
+struct mt76_wed_rro_ind {
+ u32 se_id : 12;
+ u32 rsv : 4;
+ u32 start_sn : 12;
+ u32 ind_reason : 4;
+ u32 ind_cnt : 13;
+ u32 win_sz : 3;
+ u32 rsv2 : 13;
+ u32 magic_cnt : 3;
+};
+
struct mt76_txwi_cache {
struct list_head list;
dma_addr_t dma_addr;
@@ -371,6 +404,7 @@ struct mt76_rx_tid {
spinlock_t lock;
struct delayed_work reorder_work;
+ u16 id;
u16 head;
u16 size;
u16 nframes;
@@ -602,6 +636,7 @@ struct mt76_mmio {
u32 irqmask;
struct mtk_wed_device wed;
+ struct mtk_wed_device wed_hif2;
struct completion wed_reset;
struct completion wed_reset_complete;
};
@@ -1046,6 +1081,12 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+
static inline u16 mt76_chip(struct mt76_dev *dev)
{
return dev->rev >> 16;
@@ -1056,6 +1097,14 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
return dev->rev & 0xffff;
}
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
+void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed);
+int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed);
+void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed);
+void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed);
+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+
#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
@@ -1101,19 +1150,22 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_phy *phy);
-int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
+int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len);
+int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
+ const char *cell_name, int len);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
- int ring_base, u32 flags);
+ int ring_base, void *wed, u32 flags);
u16 mt76_calculate_default_rate(struct mt76_phy *phy,
struct ieee80211_vif *vif, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
- int n_desc, int ring_base, u32 flags)
+ int n_desc, int ring_base, void *wed,
+ u32 flags)
{
struct mt76_queue *q;
- q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
+ q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags);
if (IS_ERR(q))
return PTR_ERR(q);
@@ -1127,7 +1179,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
{
struct mt76_queue *q;
- q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
+ q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0);
if (IS_ERR(q))
return PTR_ERR(q);
@@ -1546,10 +1598,38 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_power_limits *dest,
s8 target_power);
-static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q)
{
return (q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
+}
+
+static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q)
+{
+ return q->flags & MT_QFLAG_WED_RRO;
+}
+
+static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q)
+{
+ return mt76_queue_is_wed_rro(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND;
+}
+
+static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q)
+{
+ return mt76_queue_is_wed_rro(q) &&
+ (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA ||
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG);
+}
+
+static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+{
+ if (!(q->flags & MT_QFLAG_WED))
+ return false;
+
+ return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX ||
+ mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q);
+
}
struct mt76_txwi_cache *
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 03ba11a61..7a2f5d385 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -173,13 +173,14 @@ int mt7603_dma_init(struct mt7603_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
- MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0);
+ MT7603_TX_RING_SIZE, MT_TX_RING_BASE,
+ NULL, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
- MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
+ MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
@@ -189,12 +190,12 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN,
- MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
+ MT_MCU_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC,
- MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0);
+ MT_MCU_RING_SIZE, MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 89d738dee..e2146d30e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -728,6 +728,7 @@ const struct ieee80211_ops mt7603_ops = {
.set_sar_specs = mt7603_set_sar_specs,
};
+MODULE_DESCRIPTION("MediaTek MT7603E and MT76x8 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static int __init mt7603_init(void)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index ba927033b..ec02148a7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -52,15 +52,12 @@ error:
return ret;
}
-static int
-mt76_wmac_remove(struct platform_device *pdev)
+static void mt76_wmac_remove(struct platform_device *pdev)
{
struct mt76_dev *mdev = platform_get_drvdata(pdev);
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mt7603_unregister_device(dev);
-
- return 0;
}
static const struct of_device_id of_wmac_match[] = {
@@ -74,7 +71,7 @@ MODULE_FIRMWARE(MT7628_FIRMWARE_E2);
struct platform_driver mt76_wmac_driver = {
.probe = mt76_wmac_probe,
- .remove = mt76_wmac_remove,
+ .remove_new = mt76_wmac_remove,
.driver = {
.name = "mt76_wmac",
.of_match_table = of_wmac_match,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 0ce01ccc5..e7135b2f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2,
- MT_TX_RING_BASE, 0);
+ MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT,
MT7615_TX_MGMT_RING_SIZE,
- MT_TX_RING_BASE, 0);
+ MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
@@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
return mt7622_init_tx_queues_multi(dev);
ret = mt76_connac_init_tx_queues(&dev->mphy, 0, MT7615_TX_RING_SIZE,
- MT_TX_RING_BASE, 0);
+ MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index dab16b5fc..0971c164b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1375,4 +1375,5 @@ const struct ieee80211_ops mt7615_ops = {
};
EXPORT_SYMBOL_GPL(mt7615_ops);
+MODULE_DESCRIPTION("MediaTek MT7615E and MT7663E wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 955974a82..ae34d019e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -453,7 +453,7 @@ mt7615_mcu_scan_event(struct mt7615_dev *dev, struct sk_buff *skb)
else
mphy = &dev->mt76.phy;
- phy = (struct mt7615_phy *)mphy->priv;
+ phy = mphy->priv;
spin_lock_bh(&dev->mt76.lock);
__skb_queue_tail(&phy->scan_event_list, skb);
@@ -481,7 +481,7 @@ mt7615_mcu_roc_event(struct mt7615_dev *dev, struct sk_buff *skb)
ieee80211_ready_on_channel(mphy->hw);
- phy = (struct mt7615_phy *)mphy->priv;
+ phy = mphy->priv;
phy->roc_grant = true;
wake_up(&phy->roc_wait);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index ac036a072..87a956ea3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -270,4 +270,5 @@ static void __exit mt7615_exit(void)
module_init(mt7615_init);
module_exit(mt7615_exit);
+MODULE_DESCRIPTION("MediaTek MT7615E MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 67cedd255..9692890ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -253,4 +253,5 @@ module_sdio_driver(mt7663s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663S (SDIO) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
index f13d1b418..12e3e4a91 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -45,13 +45,11 @@ static int mt7622_wmac_probe(struct platform_device *pdev)
return mt7615_mmio_probe(&pdev->dev, mem_base, irq, mt7615e_reg_map);
}
-static int mt7622_wmac_remove(struct platform_device *pdev)
+static void mt7622_wmac_remove(struct platform_device *pdev)
{
struct mt7615_dev *dev = platform_get_drvdata(pdev);
mt7615_unregister_device(dev);
-
- return 0;
}
static const struct of_device_id mt7622_wmac_of_match[] = {
@@ -65,7 +63,7 @@ struct platform_driver mt7622_wmac_driver = {
.of_match_table = mt7622_wmac_of_match,
},
.probe = mt7622_wmac_probe,
- .remove = mt7622_wmac_remove,
+ .remove_new = mt7622_wmac_remove,
};
MODULE_FIRMWARE(MT7622_FIRMWARE_N9);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 04963b9f7..df737e1ff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -281,4 +281,5 @@ module_usb_driver(mt7663u_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 0052d103e..820b39590 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -349,4 +349,5 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7663 SDIO/USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 1f29d8cd9..fdde3d70b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -222,6 +222,11 @@ static inline bool is_mt7996(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7990;
}
+static inline bool is_mt7992(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7992;
+}
+
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))
@@ -391,7 +396,8 @@ mt76_connac_mutex_release(struct mt76_dev *dev, struct mt76_connac_pm *pm)
void mt76_connac_gen_ppe_thresh(u8 *he_ppet, int nss);
int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
- int ring_base, u32 flags);
+ int ring_base, void *wed, u32 flags);
+
void mt76_connac_write_hw_txp(struct mt76_dev *dev,
struct mt76_tx_info *tx_info,
void *txp_ptr, u32 id);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 2250252b2..83dcd964b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -239,11 +239,13 @@ enum tx_mgnt_type {
#define MT_TXD6_TX_SRC GENMASK(31, 30)
#define MT_TXD6_VTA BIT(28)
-#define MT_TXD6_BW GENMASK(25, 22)
+#define MT_TXD6_FIXED_BW BIT(25)
+#define MT_TXD6_BW GENMASK(24, 22)
#define MT_TXD6_TX_RATE GENMASK(21, 16)
#define MT_TXD6_TIMESTAMP_OFS_EN BIT(15)
#define MT_TXD6_TIMESTAMP_OFS_IDX GENMASK(14, 10)
#define MT_TXD6_MSDU_CNT GENMASK(9, 4)
+#define MT_TXD6_MSDU_CNT_V2 GENMASK(15, 10)
#define MT_TXD6_DIS_MAT BIT(3)
#define MT_TXD6_DAS BIT(2)
#define MT_TXD6_AMSDU_CAP BIT(1)
@@ -259,6 +261,9 @@ enum tx_mgnt_type {
#define MT_TXD9_WLAN_IDX GENMASK(23, 8)
+#define MT_TXP_BUF_LEN GENMASK(11, 0)
+#define MT_TXP_DMA_ADDR_H GENMASK(15, 12)
+
#define MT_TX_RATE_STBC BIT(14)
#define MT_TX_RATE_NSS GENMASK(13, 10)
#define MT_TX_RATE_MODE GENMASK(9, 6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index 93402d2c2..c7914643e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -256,11 +256,12 @@ void mt76_connac_txp_skb_unmap(struct mt76_dev *dev,
EXPORT_SYMBOL_GPL(mt76_connac_txp_skb_unmap);
int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
- int ring_base, u32 flags)
+ int ring_base, void *wed, u32 flags)
{
int i, err;
- err = mt76_init_tx_queue(phy, 0, idx, n_desc, ring_base, flags);
+ err = mt76_init_tx_queue(phy, 0, idx, n_desc, ring_base,
+ wed, flags);
if (err < 0)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 633f56210..ae19174b4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -66,8 +66,9 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000) ||
- (is_mt7925(dev) && addr == 0x900000) ||
- (is_mt7996(dev) && addr == 0x900000))
+ (is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) ||
+ (is_mt7996(dev) && addr == 0x900000) ||
+ (is_mt7992(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
@@ -3159,4 +3160,5 @@ exit:
EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x connac layer helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index bf023f317..657a4d1f8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -1035,7 +1035,9 @@ enum {
MCU_UNI_EVENT_RDD_REPORT = 0x11,
MCU_UNI_EVENT_ROC = 0x27,
MCU_UNI_EVENT_TX_DONE = 0x2d,
+ MCU_UNI_EVENT_THERMAL = 0x35,
MCU_UNI_EVENT_NIC_CAPAB = 0x43,
+ MCU_UNI_EVENT_WED_RRO = 0x57,
MCU_UNI_EVENT_PER_STA_INFO = 0x6d,
MCU_UNI_EVENT_ALL_STA_INFO = 0x6e,
};
@@ -1261,6 +1263,7 @@ enum {
MCU_UNI_CMD_CHANNEL_SWITCH = 0x34,
MCU_UNI_CMD_THERMAL = 0x35,
MCU_UNI_CMD_VOW = 0x37,
+ MCU_UNI_CMD_FIXED_RATE_TABLE = 0x40,
MCU_UNI_CMD_RRO = 0x57,
MCU_UNI_CMD_OFFCH_SCAN_CTRL = 0x58,
MCU_UNI_CMD_PER_STA_INFO = 0x6d,
@@ -1347,7 +1350,7 @@ enum {
};
enum UNI_ALL_STA_INFO_TAG {
- UNI_ALL_STA_TX_RATE,
+ UNI_ALL_STA_TXRX_RATE,
UNI_ALL_STA_TX_STAT,
UNI_ALL_STA_TXRX_ADM_STAT,
UNI_ALL_STA_TXRX_AIR_TIME,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index c3a392a1a..bcd24c907 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -342,4 +342,5 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
return 0;
}
+MODULE_DESCRIPTION("MediaTek MT76x EEPROM helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 9277ff38b..293e66fa8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -302,6 +302,7 @@ static const struct pci_device_id mt76x0e_device_table[] = {
MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7650E_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76x0e_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0422c3323..dd042949c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -336,6 +336,7 @@ err:
MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7610U_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0U (USB) wireless driver");
MODULE_LICENSE("GPL");
static struct usb_driver mt76x0_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 9b5e3fb7b..e5ad635d3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -199,13 +199,14 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
MT76x02_TX_RING_SIZE,
- MT_TX_RING_BASE, 0);
+ MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
}
ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
- MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0);
+ MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE,
+ NULL, 0);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index 02da543df..b2cc44914 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -293,4 +293,5 @@ void mt76x02u_init_mcu(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x02 MCU helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 8a0e8124b..8020446be 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -696,4 +696,5 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
+MODULE_DESCRIPTION("MediaTek MT76x02 helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 8c0185588..1fe5f5a02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -506,4 +506,5 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+MODULE_DESCRIPTION("MediaTek MT76x2 EEPROM helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index df85ebc6e..30959746e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -165,6 +165,7 @@ mt76x2e_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, mt76x2e_device_table);
MODULE_FIRMWARE(MT7662_FIRMWARE);
MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_DESCRIPTION("MediaTek MT76x2E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76pci_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 55068f325..ca78e1425 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -147,4 +147,5 @@ static struct usb_driver mt76x2u_driver = {
module_usb_driver(mt76x2u_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x2U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 59a44d79a..c91a1c540 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -9,18 +9,20 @@ static int
mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
struct mt7915_dev *dev = phy->dev;
+ struct mtk_wed_device *wed = NULL;
- if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
if (is_mt798x(&dev->mt76))
ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
else
ring_base = MT_WED_TX_RING_BASE;
idx -= MT_TXQ_ID(0);
+ wed = &dev->mt76.mmio.wed;
}
return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base,
- MT_WED_Q_TX(idx));
+ wed, MT_WED_Q_TX(idx));
}
static int mt7915_poll_tx(struct napi_struct *napi, int budget)
@@ -492,7 +494,8 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) {
wa_rx_base = MT_WED_RX_RING_BASE;
wa_rx_idx = MT7915_RXQ_MCU_WA;
- dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
+ mdev->q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
+ mdev->q_rx[MT_RXQ_MCU_WA].wed = &mdev->mmio.wed;
} else {
wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
@@ -507,9 +510,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (!dev->phy.mt76->band_idx) {
if (mtk_wed_device_active(&mdev->mmio.wed) &&
mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
- dev->mt76.q_rx[MT_RXQ_MAIN].flags =
+ mdev->q_rx[MT_RXQ_MAIN].flags =
MT_WED_Q_RX(MT7915_RXQ_BAND0);
dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ mdev->q_rx[MT_RXQ_MAIN].wed = &mdev->mmio.wed;
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
@@ -528,6 +532,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (mtk_wed_device_active(&mdev->mmio.wed)) {
mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
+ mdev->q_rx[MT_RXQ_MAIN_WA].wed = &mdev->mmio.wed;
if (is_mt7916(mdev)) {
wa_rx_base = MT_WED_RX_RING_BASE;
wa_rx_idx = MT7915_RXQ_MCU_WA;
@@ -544,9 +549,10 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (dev->dbdc_support || dev->phy.mt76->band_idx) {
if (mtk_wed_device_active(&mdev->mmio.wed) &&
mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
- dev->mt76.q_rx[MT_RXQ_BAND1].flags =
+ mdev->q_rx[MT_RXQ_BAND1].flags =
MT_WED_Q_RX(MT7915_RXQ_BAND1);
dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ mdev->q_rx[MT_RXQ_BAND1].wed = &mdev->mmio.wed;
}
/* rx data queue for band1 */
@@ -581,28 +587,6 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return 0;
}
-static void mt7915_dma_wed_reset(struct mt7915_dev *dev)
-{
- struct mt76_dev *mdev = &dev->mt76;
-
- if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state))
- return;
-
- complete(&mdev->mmio.wed_reset);
-
- if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete,
- 3 * HZ))
- dev_err(dev->mt76.dev, "wed reset complete timeout\n");
-}
-
-static void
-mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q)
-{
- mt76_queue_reset(dev, q);
- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
- mt76_dma_wed_setup(&dev->mt76, q, true);
-}
-
int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
{
struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
@@ -630,20 +614,20 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
mtk_wed_device_dma_reset(wed);
mt7915_dma_disable(dev, force);
- mt7915_dma_wed_reset(dev);
+ mt76_dma_wed_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
- mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]);
+ mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]);
if (mphy_ext)
- mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]);
+ mt76_dma_reset_tx_queue(&dev->mt76, mphy_ext->q_tx[i]);
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
mt76_for_each_q_rx(&dev->mt76, i) {
- if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE)
+ if (mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
continue;
mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 76be73084..3bb2643d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -11,6 +11,7 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
u8 *eeprom = mdev->eeprom.data;
u32 val = eeprom[MT_EE_DO_PRE_CAL];
u32 offs;
+ int ret;
if (!dev->flash_mode)
return 0;
@@ -25,7 +26,11 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
- return mt76_get_of_eeprom(mdev, dev->cal, offs, val);
+ ret = mt76_get_of_data_from_mtd(mdev, dev->cal, offs, val);
+ if (!ret)
+ return ret;
+
+ return mt76_get_of_data_from_nvmem(mdev, dev->cal, "precal", val);
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 81478289f..cea2f6d90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -275,10 +275,11 @@ static void mt7915_led_set_brightness(struct led_classdev *led_cdev,
mt7915_led_set_config(led_cdev, 0xff, 0);
}
-void mt7915_init_txpower(struct mt7915_dev *dev,
- struct ieee80211_supported_band *sband)
+static void __mt7915_init_txpower(struct mt7915_phy *phy,
+ struct ieee80211_supported_band *sband)
{
- int i, n_chains = hweight8(dev->mphy.antenna_mask);
+ struct mt7915_dev *dev = phy->dev;
+ int i, n_chains = hweight16(phy->mt76->chainmask);
int nss_delta = mt76_tx_power_nss_delta(n_chains);
int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band);
struct mt76_power_limits limits;
@@ -296,7 +297,7 @@ void mt7915_init_txpower(struct mt7915_dev *dev,
}
target_power += pwr_delta;
- target_power = mt76_get_rate_power_limits(&dev->mphy, chan,
+ target_power = mt76_get_rate_power_limits(phy->mt76, chan,
&limits,
target_power);
target_power += nss_delta;
@@ -307,6 +308,19 @@ void mt7915_init_txpower(struct mt7915_dev *dev,
}
}
+void mt7915_init_txpower(struct mt7915_phy *phy)
+{
+ if (!phy)
+ return;
+
+ if (phy->mt76->cap.has_2ghz)
+ __mt7915_init_txpower(phy, &phy->mt76->sband_2g.sband);
+ if (phy->mt76->cap.has_5ghz)
+ __mt7915_init_txpower(phy, &phy->mt76->sband_5g.sband);
+ if (phy->mt76->cap.has_6ghz)
+ __mt7915_init_txpower(phy, &phy->mt76->sband_6g.sband);
+}
+
static void
mt7915_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
@@ -322,9 +336,7 @@ mt7915_regd_notifier(struct wiphy *wiphy,
if (dev->mt76.region == NL80211_DFS_UNSET)
mt7915_mcu_rdd_background_enable(phy, NULL);
- mt7915_init_txpower(dev, &mphy->sband_2g.sband);
- mt7915_init_txpower(dev, &mphy->sband_5g.sband);
- mt7915_init_txpower(dev, &mphy->sband_6g.sband);
+ mt7915_init_txpower(phy);
mphy->dfs_state = MT_DFS_STATE_UNKNOWN;
mt7915_dfs_init_radar_detector(phy);
@@ -442,6 +454,7 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
mt7915_set_stream_he_caps(phy);
+ mt7915_init_txpower(phy);
wiphy->available_antennas_rx = phy->mt76->antenna_mask;
wiphy->available_antennas_tx = phy->mt76->antenna_mask;
@@ -703,9 +716,6 @@ static void mt7915_init_work(struct work_struct *work)
mt7915_mcu_set_eeprom(dev);
mt7915_mac_init(dev);
- mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
- mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
- mt7915_init_txpower(dev, &dev->mphy.sband_6g.sband);
mt7915_txbf_init(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 2222fb9aa..b01edbed9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1247,7 +1247,7 @@ mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
void mt7915_update_channel(struct mt76_phy *mphy)
{
- struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
+ struct mt7915_phy *phy = mphy->priv;
struct mt76_channel_state *state = mphy->chan_state;
int nf;
@@ -1401,8 +1401,8 @@ mt7915_mac_restart(struct mt7915_dev *dev)
goto out;
mt7915_mac_init(dev);
- mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
- mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ mt7915_init_txpower(&dev->phy);
+ mt7915_init_txpower(phy2);
ret = mt7915_txbf_init(dev);
if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 9d747eb8a..df2d42797 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -1654,20 +1654,6 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
return 0;
}
-
-static int
-mt7915_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct net_device *netdev, enum tc_setup_type type,
- void *type_data)
-{
- struct mt7915_dev *dev = mt7915_hw_dev(hw);
- struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
-
- if (!mtk_wed_device_active(wed))
- return -EOPNOTSUPP;
-
- return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
-}
#endif
const struct ieee80211_ops mt7915_ops = {
@@ -1722,6 +1708,6 @@ const struct ieee80211_ops mt7915_ops = {
.set_radar_background = mt7915_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7915_net_fill_forward_path,
- .net_setup_tc = mt7915_net_setup_tc,
+ .net_setup_tc = mt76_net_setup_tc,
#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index b22f06d44..c67c4f6ca 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -269,7 +269,7 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
- phy = (struct mt7915_phy *)mphy->priv;
+ phy = mphy->priv;
phy->throttle_state = t->ctrl.duty.duty_cycle;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 1592b5d67..b41ac4aac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -519,7 +519,7 @@ static inline s8
mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
{
struct mt76_phy *mphy = phy->mt76;
- int n_chains = hweight8(mphy->antenna_mask);
+ int n_chains = hweight16(mphy->chainmask);
txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
txpower -= mt76_tx_power_nss_delta(n_chains);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 04a49ef67..dceb50598 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -490,6 +490,11 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
return dev->reg.map[i].maps + ofs;
}
+ return 0;
+}
+
+static u32 __mt7915_reg_remap_addr(struct mt7915_dev *dev, u32 addr)
+{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@@ -514,133 +519,63 @@ void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7915_reg_addr(dev, offset);
- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ if (addr) {
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ memcpy_fromio(buf, dev->mt76.mmio.regs +
+ __mt7915_reg_remap_addr(dev, offset), len);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- u32 addr = __mt7915_reg_addr(dev, offset);
+ u32 addr = __mt7915_reg_addr(dev, offset), val;
- return dev->bus_ops->rr(mdev, addr);
-}
+ if (addr)
+ return dev->bus_ops->rr(mdev, addr);
-static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- u32 addr = __mt7915_reg_addr(dev, offset);
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rr(mdev, __mt7915_reg_remap_addr(dev, offset));
+ spin_unlock_bh(&dev->reg_lock);
- dev->bus_ops->wr(mdev, addr, val);
+ return val;
}
-static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
- return dev->bus_ops->rmw(mdev, addr, mask, val);
-}
-
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed)
-{
- struct mt7915_dev *dev;
-
- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
-
- spin_lock_bh(&dev->mt76.token_lock);
- dev->mt76.token_size = wed->wlan.token_start;
- spin_unlock_bh(&dev->mt76.token_lock);
-
- return !wait_event_timeout(dev->mt76.tx_wait,
- !dev->mt76.wed_token_count, HZ);
-}
-
-static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
-{
- struct mt7915_dev *dev;
-
- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
-
- spin_lock_bh(&dev->mt76.token_lock);
- dev->mt76.token_size = MT7915_TOKEN_SIZE;
- spin_unlock_bh(&dev->mt76.token_lock);
-}
-
-static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
-{
- struct mt7915_dev *dev;
- int i;
-
- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
- for (i = 0; i < dev->mt76.rx_token_size; i++) {
- struct mt76_txwi_cache *t;
-
- t = mt76_rx_token_release(&dev->mt76, i);
- if (!t || !t->ptr)
- continue;
-
- mt76_put_page_pool_buf(t->ptr, false);
- t->ptr = NULL;
-
- mt76_put_rxwi(&dev->mt76, t);
+ if (addr) {
+ dev->bus_ops->wr(mdev, addr, val);
+ return;
}
- mt76_free_pending_rxwi(&dev->mt76);
+ spin_lock_bh(&dev->reg_lock);
+ dev->bus_ops->wr(mdev, __mt7915_reg_remap_addr(dev, offset), val);
+ spin_unlock_bh(&dev->reg_lock);
}
-static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
- struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
- struct mt76_txwi_cache *t = NULL;
- struct mt7915_dev *dev;
- struct mt76_queue *q;
- int i, len;
-
- dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
- q = &dev->mt76.q_rx[MT_RXQ_MAIN];
- len = SKB_WITH_OVERHEAD(q->buf_size);
-
- for (i = 0; i < size; i++) {
- enum dma_data_direction dir;
- dma_addr_t addr;
- u32 offset;
- int token;
- void *buf;
-
- t = mt76_get_rxwi(&dev->mt76);
- if (!t)
- goto unmap;
-
- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
- if (!buf)
- goto unmap;
-
- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
- dir = page_pool_get_dma_dir(q->page_pool);
- dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
-
- desc->buf0 = cpu_to_le32(addr);
- token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
- if (token < 0) {
- mt76_put_page_pool_buf(buf, false);
- goto unmap;
- }
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
- desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
- token));
- desc++;
- }
+ if (addr)
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
- return 0;
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rmw(mdev, __mt7915_reg_remap_addr(dev, offset), mask, val);
+ spin_unlock_bh(&dev->reg_lock);
-unmap:
- if (t)
- mt76_put_rxwi(&dev->mt76, t);
- mt7915_mmio_wed_release_rx_buf(wed);
- return -ENOMEM;
+ return val;
}
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed,
struct mtk_wed_wo_rx_stats *stats)
{
@@ -694,13 +629,6 @@ out:
return ret;
}
-
-static void mt7915_mmio_wed_reset_complete(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- complete(&dev->mmio.wed_reset_complete);
-}
#endif
int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
@@ -778,13 +706,13 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
}
wed->wlan.init_buf = mt7915_wed_init_buf;
- wed->wlan.offload_enable = mt7915_mmio_wed_offload_enable;
- wed->wlan.offload_disable = mt7915_mmio_wed_offload_disable;
- wed->wlan.init_rx_buf = mt7915_mmio_wed_init_rx_buf;
- wed->wlan.release_rx_buf = mt7915_mmio_wed_release_rx_buf;
+ wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
+ wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
wed->wlan.reset = mt7915_mmio_wed_reset;
- wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
+ wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
dev->mt76.rx_token_size = wed->wlan.rx_npkt;
@@ -813,6 +741,7 @@ static int mt7915_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7915:
@@ -1064,4 +993,5 @@ static void __exit mt7915_exit(void)
module_init(mt7915_init);
module_exit(mt7915_exit);
+MODULE_DESCRIPTION("MediaTek MT7915E MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index d317c523b..6e79bc65f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -287,6 +287,7 @@ struct mt7915_dev {
struct list_head sta_rc_list;
struct list_head twt_list;
+ spinlock_t reg_lock;
u32 hw_pattern;
@@ -425,8 +426,7 @@ void mt7915_dma_cleanup(struct mt7915_dev *dev);
int mt7915_dma_reset(struct mt7915_dev *dev, bool force);
int mt7915_dma_start(struct mt7915_dev *dev, bool reset, bool wed_reset);
int mt7915_txbf_init(struct mt7915_dev *dev);
-void mt7915_init_txpower(struct mt7915_dev *dev,
- struct ieee80211_supported_band *sband);
+void mt7915_init_txpower(struct mt7915_phy *phy);
void mt7915_reset(struct mt7915_dev *dev);
int mt7915_run(struct ieee80211_hw *hw);
int mt7915_mcu_init(struct mt7915_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index 06e3d9db9..8b4809703 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -1282,13 +1282,11 @@ free_device:
return ret;
}
-static int mt798x_wmac_remove(struct platform_device *pdev)
+static void mt798x_wmac_remove(struct platform_device *pdev)
{
struct mt7915_dev *dev = platform_get_drvdata(pdev);
mt7915_unregister_device(dev);
-
- return 0;
}
static const struct of_device_id mt798x_wmac_of_match[] = {
@@ -1305,7 +1303,7 @@ struct platform_driver mt798x_wmac_driver = {
.of_match_table = mt798x_wmac_of_match,
},
.probe = mt798x_wmac_probe,
- .remove = mt798x_wmac_remove,
+ .remove_new = mt798x_wmac_remove,
};
MODULE_FIRMWARE(MT7986_FIRMWARE_WA);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 0645417e0..0d5adc5dd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -1418,5 +1418,6 @@ const struct ieee80211_ops mt7921_ops = {
};
EXPORT_SYMBOL_GPL(mt7921_ops);
+MODULE_DESCRIPTION("MediaTek MT7921 core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 18056045d..8b4ce32a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -160,7 +160,7 @@ static void
mt7921_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv;
+ struct mt792x_phy *phy = mphy->priv;
spin_lock_bh(&dev->mt76.lock);
__skb_queue_tail(&phy->scan_event_list, skb);
@@ -1261,13 +1261,15 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
u8 alpha2[2];
u8 type[2];
u8 env_6g;
- u8 rsvd[63];
+ u8 mtcl_conf;
+ u8 rsvd[62];
} __packed req = {
.ver = 1,
.idx = idx,
.env = env_cap,
.env_6g = dev->phy.power_type,
.acpi_conf = mt792x_acpi_get_flags(&dev->phy),
+ .mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2),
};
int ret, valid_cnt = 0;
u32 buf_len = 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 5c4cc370e..1cb211339 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -12,7 +12,8 @@
#define MT7921_TX_FWDL_RING_SIZE 128
#define MT7921_RX_RING_SIZE 1536
-#define MT7921_RX_MCU_RING_SIZE 512
+#define MT7921_RX_MCU_RING_SIZE 8
+#define MT7921_RX_MCU_WA_RING_SIZE 512
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 4d0491124..82cf3ce90 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -171,7 +171,7 @@ static int mt7921_dma_init(struct mt792x_dev *dev)
/* init tx queue */
ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0,
MT7921_TX_RING_SIZE,
- MT_TX_RING_BASE, 0);
+ MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
@@ -200,7 +200,7 @@ static int mt7921_dma_init(struct mt792x_dev *dev)
/* Change mcu queue after firmware download */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT7921_RXQ_MCU_WM,
- MT7921_RX_MCU_RING_SIZE,
+ MT7921_RX_MCU_WA_RING_SIZE,
MT_RX_BUF_SIZE, MT_WFDMA0(0x540));
if (ret)
return ret;
@@ -545,4 +545,5 @@ MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
MODULE_FIRMWARE(MT7922_ROM_PATCH);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7921E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 7591e54d2..a9ce1e746 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -323,5 +323,6 @@ static struct sdio_driver mt7921s_driver = {
.drv.pm = pm_sleep_ptr(&mt7921s_pm_ops),
};
module_sdio_driver(mt7921s_driver);
+MODULE_DESCRIPTION("MediaTek MT7921S (SDIO) wireless driver");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index e5258c74f..8b7c03c47 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -336,5 +336,6 @@ static struct usb_driver mt7921u_driver = {
};
module_usb_driver(mt7921u_driver);
+MODULE_DESCRIPTION("MediaTek MT7921U (USB) wireless driver");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 6f0b18ae3..6179798a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -154,8 +154,7 @@ mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
static void
mt7925_init_eht_caps(struct mt792x_phy *phy, enum nl80211_band band,
- struct ieee80211_sband_iftype_data *data,
- enum nl80211_iftype iftype)
+ struct ieee80211_sband_iftype_data *data)
{
struct ieee80211_sta_eht_cap *eht_cap = &data->eht_cap;
struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem;
@@ -256,7 +255,7 @@ __mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy,
data[n].types_mask = BIT(i);
mt7925_init_he_caps(phy, band, &data[n], i);
- mt7925_init_eht_caps(phy, band, &data[n], i);
+ mt7925_init_eht_caps(phy, band, &data[n]);
n++;
}
@@ -1471,4 +1470,5 @@ const struct ieee80211_ops mt7925_ops = {
EXPORT_SYMBOL_GPL(mt7925_ops);
MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7925 core driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 8e7201247..e1dd89a7a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -345,7 +345,7 @@ static void
mt7925_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv;
+ struct mt792x_phy *phy = mphy->priv;
spin_lock_bh(&dev->mt76.lock);
__skb_queue_tail(&phy->scan_event_list, skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index bf02a1506..07b74d492 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -218,7 +218,7 @@ static int mt7925_dma_init(struct mt792x_dev *dev)
/* init tx queue */
ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7925_TXQ_BAND0,
MT7925_TX_RING_SIZE,
- MT_TX_RING_BASE, 0);
+ MT_TX_RING_BASE, NULL, 0);
if (ret)
return ret;
@@ -586,4 +586,5 @@ MODULE_FIRMWARE(MT7925_FIRMWARE_WM);
MODULE_FIRMWARE(MT7925_ROM_PATCH);
MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 9b885c5b3..1e0f094fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -329,4 +329,5 @@ static struct usb_driver mt7925u_driver = {
module_usb_driver(mt7925u_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 36fae736d..3c897b34a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -382,6 +382,7 @@ int mt792xe_mcu_fw_pmctrl(struct mt792x_dev *dev);
int mt792x_init_acpi_sar(struct mt792x_dev *dev);
int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default);
u8 mt792x_acpi_get_flags(struct mt792x_phy *phy);
+u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2);
#else
static inline int mt792x_init_acpi_sar(struct mt792x_dev *dev)
{
@@ -398,6 +399,11 @@ static inline u8 mt792x_acpi_get_flags(struct mt792x_phy *phy)
{
return 0;
}
+
+static inline u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
+{
+ return 0xf;
+}
#endif
#endif /* __MT7925_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
index c4e3bfcc5..8fee3d481 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
@@ -350,3 +350,56 @@ u8 mt792x_acpi_get_flags(struct mt792x_phy *phy)
return flags;
}
EXPORT_SYMBOL_GPL(mt792x_acpi_get_flags);
+
+static u8
+mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
+{
+ u8 config = 0;
+
+ if (cl->cl6g[row] & BIT(column))
+ config |= (cl->mode_6g & 0x3) << 2;
+ if (cl->version > 1 && cl->cl5g9[row] & BIT(column))
+ config |= (cl->mode_5g9 & 0x3);
+
+ return config;
+}
+
+u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
+{
+ static const char * const cc_list_all[] = {
+ "00", "EU", "AR", "AU", "AZ", "BY", "BO", "BR",
+ "CA", "CL", "CN", "ID", "JP", "MY", "MX", "ME",
+ "MA", "NZ", "NG", "PH", "RU", "RS", "SG", "KR",
+ "TW", "TH", "UA", "GB", "US", "VN", "KH", "PY",
+ };
+ static const char * const cc_list_eu[] = {
+ "AT", "BE", "BG", "CY", "CZ", "HR", "DK", "EE",
+ "FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT",
+ "LV", "LI", "LT", "LU", "MT", "NL", "NO", "PL",
+ "PT", "RO", "MT", "SK", "SI", "ES", "CH",
+ };
+ struct mt792x_acpi_sar *sar = phy->acpisar;
+ struct mt792x_asar_cl *cl;
+ int col, row, i;
+
+ if (!sar)
+ return 0xf;
+
+ cl = sar->countrylist;
+ if (!cl)
+ return 0xc;
+
+ for (i = 0; i < ARRAY_SIZE(cc_list_all); i++) {
+ col = 7 - i % 8;
+ row = i / 8;
+ if (!memcmp(cc_list_all[i], alpha2, 2))
+ return mt792x_acpi_get_mtcl_map(row, col, cl);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cc_list_eu); i++)
+ if (!memcmp(cc_list_eu[i], alpha2, 2))
+ return mt792x_acpi_get_mtcl_map(0, 6, cl);
+
+ return mt792x_acpi_get_mtcl_map(0, 7, cl);
+}
+EXPORT_SYMBOL_GPL(mt792x_acpi_get_mtcl_conf);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
index d6d332e86..2298983b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
@@ -77,6 +77,8 @@ struct mt792x_asar_cl {
u8 version;
u8 mode_6g;
u8 cl6g[6];
+ u8 mode_5g9;
+ u8 cl5g9[6];
} __packed;
struct mt792x_asar_fg {
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index fb35eff6d..9ac5161df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -863,5 +863,6 @@ int mt792x_load_firmware(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt792x_load_firmware);
+MODULE_DESCRIPTION("MediaTek MT792x core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
index 5d1f8229f..eb29434ab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
@@ -223,7 +223,7 @@ static void
mt792x_phy_update_channel(struct mt76_phy *mphy, int idx)
{
struct mt792x_dev *dev = container_of(mphy->dev, struct mt792x_dev, mt76);
- struct mt792x_phy *phy = (struct mt792x_phy *)mphy->priv;
+ struct mt792x_phy *phy = mphy->priv;
struct mt76_channel_state *state;
u64 busy_time, tx_time, rx_time, obss_time;
int nf;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index 2dd283cae..589a3efb9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -314,5 +314,6 @@ void mt792xu_disconnect(struct usb_interface *usb_intf)
}
EXPORT_SYMBOL_GPL(mt792xu_disconnect);
+MODULE_DESCRIPTION("MediaTek MT792x USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 4d40ec7ff..9bd953586 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -476,7 +476,7 @@ mt7996_txbf_stat_read_phy(struct mt7996_phy *phy, struct seq_file *s)
{
struct mt76_mib_stats *mib = &phy->mib;
static const char * const bw[] = {
- "BW20", "BW40", "BW80", "BW160"
+ "BW20", "BW40", "BW80", "BW160", "BW320"
};
/* Tx Beamformer monitor */
@@ -489,8 +489,9 @@ mt7996_txbf_stat_read_phy(struct mt7996_phy *phy, struct seq_file *s)
/* Tx Beamformer Rx feedback monitor */
seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
- seq_printf(s, "All: %d, HE: %d, VHT: %d, HT: %d, ",
+ seq_printf(s, "All: %d, EHT: %d, HE: %d, VHT: %d, HT: %d, ",
mib->tx_bf_rx_fb_all_cnt,
+ mib->tx_bf_rx_fb_eht_cnt,
mib->tx_bf_rx_fb_he_cnt,
mib->tx_bf_rx_fb_vht_cnt,
mib->tx_bf_rx_fb_ht_cnt);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 586e247a1..fe37110e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -7,6 +7,26 @@
#include "../dma.h"
#include "mac.h"
+int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
+ int ring_base, struct mtk_wed_device *wed)
+{
+ struct mt7996_dev *dev = phy->dev;
+ u32 flags = 0;
+
+ if (mtk_wed_device_active(wed)) {
+ ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
+ idx -= MT_TXQ_ID(0);
+
+ if (phy->mt76->band_idx == MT_BAND2)
+ flags = MT_WED_Q_TX(0);
+ else
+ flags = MT_WED_Q_TX(idx);
+ }
+
+ return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
+ ring_base, wed, flags);
+}
+
static int mt7996_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7996_dev *dev;
@@ -37,18 +57,51 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
- /* band0/band1 */
+ /* mt7996: band0 and band1, mt7992: band0 */
RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN);
- /* band2 */
- RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
- RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
+ if (is_mt7996(&dev->mt76)) {
+ /* mt7996 band2 */
+ RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
+ RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
+ } else {
+ /* mt7992 band1 */
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
+ }
+
+ if (dev->has_rro) {
+ /* band0 */
+ RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
+ MT7996_RXQ_RRO_BAND0);
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
+ MT7996_RXQ_MSDU_PG_BAND0);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
+ MT7996_RXQ_TXFREE0);
+ /* band1 */
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
+ MT7996_RXQ_MSDU_PG_BAND1);
+ /* band2 */
+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
+ MT7996_RXQ_RRO_BAND2);
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
+ MT7996_RXQ_MSDU_PG_BAND2);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
+ MT7996_RXQ_TXFREE2);
+
+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
+ MT7996_RXQ_RRO_IND);
+ }
/* data tx queue */
TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
- TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+ if (is_mt7996(&dev->mt76)) {
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+ } else {
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ }
/* mcu tx queue */
MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
@@ -56,22 +109,57 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
}
+static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
+{
+ u32 ret = *base << 16 | depth;
+
+ *base = *base + (depth << 4);
+
+ return ret;
+}
+
static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
{
-#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
+ u16 base = 0;
+ u8 queue;
+
+#define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth)))
/* prefetch SRAM wrapping boundary for tx/rx ring. */
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x2));
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x20, 0x2));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x40, 0x4));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x80, 0x4));
- mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0xc0, 0x2));
- mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0xe0, 0x4));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x120, 0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x140, 0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x160, 0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x180, 0x2));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x1a0, 0x10));
- mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x2a0, 0x10));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x2));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x2));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x2));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x2));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x2));
+
+ queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x2));
+
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
+
+ queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1;
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x10));
+
+ if (dev->has_rro) {
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs,
+ PREFETCH(0x10));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs,
+ PREFETCH(0x10));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs,
+ PREFETCH(0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
+ PREFETCH(0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
+ PREFETCH(0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs,
+ PREFETCH(0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs,
+ PREFETCH(0x4));
+ }
+#undef PREFETCH
mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
}
@@ -128,8 +216,9 @@ static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
}
}
-void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
+void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
{
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
u32 hif1_ofs = 0;
u32 irq_mask;
@@ -138,37 +227,50 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
/* enable WFDMA Tx/Rx */
if (!reset) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN |
- MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_EXT_EN);
+ else
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+ MT_WFDMA0_GLO_CFG_EXT_EN);
if (dev->hif2)
mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+ MT_WFDMA0_GLO_CFG_EXT_EN);
}
/* enable interrupts for TX/RX rings */
- irq_mask = MT_INT_MCU_CMD;
- if (reset)
- goto done;
-
- irq_mask = MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
+ irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
- if (!dev->mphy.band_idx)
+ if (mt7996_band_valid(dev, MT_BAND0))
irq_mask |= MT_INT_BAND0_RX_DONE;
- if (dev->dbdc_support)
+ if (mt7996_band_valid(dev, MT_BAND1))
irq_mask |= MT_INT_BAND1_RX_DONE;
- if (dev->tbtc_support)
+ if (mt7996_band_valid(dev, MT_BAND2))
irq_mask |= MT_INT_BAND2_RX_DONE;
-done:
+ if (mtk_wed_device_active(wed) && wed_reset) {
+ u32 wed_irq_mask = irq_mask;
+
+ wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start(wed, wed_irq_mask);
+ }
+
+ irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
+
mt7996_irq_enable(dev, irq_mask);
mt7996_irq_disable(dev, 0);
}
@@ -223,6 +325,12 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1,
WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
+ /* WFDMA rx threshold */
+ mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH, 0xc000c);
+ mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH, 0x10008);
+ mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH, 0x10008);
+ mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH, 0x20);
+
if (dev->hif2) {
/* GLO_CFG_EXT0 */
mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
@@ -234,24 +342,108 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND |
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+
+ /* AXI read outstanding number */
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
+ MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
+
+ /* WFDMA rx threshold */
+ mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
+ mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
+ mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH + hif1_ofs, 0x10008);
+ mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH + hif1_ofs, 0x20);
}
if (dev->hif2) {
/* fix hardware limitation, pcie1's rx ring3 is not available
* so, redirect pcie0 rx ring3 interrupt to pcie1
*/
- mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
- MT_WFDMA0_RX_INT_SEL_RING3);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ dev->has_rro)
+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
+ MT_WFDMA0_RX_INT_SEL_RING6);
+ else
+ mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
+ MT_WFDMA0_RX_INT_SEL_RING3);
+ }
- /* TODO: redirect rx ring6 interrupt to pcie0 for wed function */
+ mt7996_dma_start(dev, reset, true);
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+int mt7996_dma_rro_init(struct mt7996_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 irq_mask;
+ int ret;
+
+ /* ind cmd */
+ mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
+ mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
+ MT_RXQ_ID(MT_RXQ_RRO_IND),
+ MT7996_RX_RING_SIZE,
+ 0, MT_RXQ_RRO_IND_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* rx msdu page queue for band0 */
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
+ MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_MSDU_PAGE_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0));
+ if (ret)
+ return ret;
+
+ if (mt7996_band_valid(dev, MT_BAND1)) {
+ /* rx msdu page queue for band1 */
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
+ MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_MSDU_PAGE_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1));
+ if (ret)
+ return ret;
}
- mt7996_dma_start(dev, reset);
+ if (mt7996_band_valid(dev, MT_BAND2)) {
+ /* rx msdu page queue for band2 */
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
+ MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
+ MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_MSDU_PAGE_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2));
+ if (ret)
+ return ret;
+ }
+
+ irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
+ MT_INT_TX_DONE_BAND2;
+ mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
+ mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
+ mt7996_irq_enable(dev, irq_mask);
+
+ return 0;
}
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
int mt7996_dma_init(struct mt7996_dev *dev)
{
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
+ u32 rx_base;
u32 hif1_ofs = 0;
int ret;
@@ -265,10 +457,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
mt7996_dma_disable(dev, true);
/* init tx queue */
- ret = mt76_connac_init_tx_queues(dev->phy.mt76,
- MT_TXQ_ID(dev->mphy.band_idx),
- MT7996_TX_RING_SIZE,
- MT_TXQ_RING_BASE(0), 0);
+ ret = mt7996_init_tx_queues(&dev->phy,
+ MT_TXQ_ID(dev->mphy.band_idx),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(0),
+ wed);
if (ret)
return ret;
@@ -314,7 +507,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret)
return ret;
- /* rx data queue for band0 and band1 */
+ /* rx data queue for band0 and mt7996 band1 */
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
+ dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT_RXQ_ID(MT_RXQ_MAIN),
MT7996_RX_RING_SIZE,
@@ -324,6 +522,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for band0 */
+ if (mtk_wed_device_active(wed) && !dev->has_rro) {
+ dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
MT_RXQ_ID(MT_RXQ_MAIN_WA),
MT7996_RX_MCU_RING_SIZE,
@@ -332,19 +535,25 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret)
return ret;
- if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) {
- /* rx data queue for band2 */
+ if (mt7996_band_valid(dev, MT_BAND2)) {
+ /* rx data queue for mt7996 band2 */
+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
MT_RXQ_ID(MT_RXQ_BAND2),
MT7996_RX_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs);
+ rx_base);
if (ret)
return ret;
- /* tx free notify event from WA for band2
+ /* tx free notify event from WA for mt7996 band2
* use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
*/
+ if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
+ dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
MT_RXQ_ID(MT_RXQ_BAND2_WA),
MT7996_RX_MCU_RING_SIZE,
@@ -352,6 +561,80 @@ int mt7996_dma_init(struct mt7996_dev *dev)
MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA));
if (ret)
return ret;
+ } else if (mt7996_band_valid(dev, MT_BAND1)) {
+ /* rx data queue for mt7992 band1 */
+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
+ MT_RXQ_ID(MT_RXQ_BAND1),
+ MT7996_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ rx_base);
+ if (ret)
+ return ret;
+
+ /* tx free notify event from WA for mt7992 band1 */
+ rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
+ MT_RXQ_ID(MT_RXQ_BAND1_WA),
+ MT7996_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ rx_base);
+ if (ret)
+ return ret;
+ }
+
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
+ dev->has_rro) {
+ /* rx rro data queue for band0 */
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
+ MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
+ MT_RXQ_ID(MT_RXQ_RRO_BAND0),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0));
+ if (ret)
+ return ret;
+
+ /* tx free notify event from WA for band0 */
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+
+ if (mt7996_band_valid(dev, MT_BAND2)) {
+ /* rx rro data queue for band2 */
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
+ MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
+ MT_RXQ_ID(MT_RXQ_RRO_BAND2),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs);
+ if (ret)
+ return ret;
+
+ /* tx free notify event from MAC for band2 */
+ if (mtk_wed_device_active(wed_hif2)) {
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2;
+ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs);
+ if (ret)
+ return ret;
+ }
}
ret = mt76_init_queues(dev, mt76_dma_rx_poll);
@@ -405,21 +688,33 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
if (force)
mt7996_wfsys_reset(dev);
+ if (dev->hif2 && mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
+ mtk_wed_device_dma_reset(&dev->mt76.mmio.wed_hif2);
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
+
mt7996_dma_disable(dev, force);
+ mt76_dma_wed_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]);
if (phy2)
- mt76_queue_reset(dev, phy2->q_tx[i]);
+ mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]);
if (phy3)
- mt76_queue_reset(dev, phy3->q_tx[i]);
+ mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]);
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
+ continue;
+
mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index 544b6c6f1..4a8237118 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -14,7 +14,9 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev)
switch (val) {
case 0x7990:
- return 0;
+ return is_mt7996(&dev->mt76) ? 0 : -EINVAL;
+ case 0x7992:
+ return is_mt7992(&dev->mt76) ? 0 : -EINVAL;
default:
return -EINVAL;
}
@@ -22,8 +24,14 @@ static int mt7996_check_eeprom(struct mt7996_dev *dev)
static char *mt7996_eeprom_name(struct mt7996_dev *dev)
{
- /* reserve for future variants */
- return MT7996_EEPROM_DEFAULT;
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7990:
+ return MT7996_EEPROM_DEFAULT;
+ case 0x7992:
+ return MT7992_EEPROM_DEFAULT;
+ default:
+ return MT7996_EEPROM_DEFAULT;
+ }
}
static int
@@ -103,7 +111,8 @@ static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_dev *dev)
dev->wtbl_size_group = u32_get_bits(cap, WTBL_SIZE_GROUP);
}
- if (dev->wtbl_size_group < 2 || dev->wtbl_size_group > 4)
+ if (dev->wtbl_size_group < 2 || dev->wtbl_size_group > 4 ||
+ is_mt7992(&dev->mt76))
dev->wtbl_size_group = 2; /* set default */
return 0;
@@ -148,36 +157,49 @@ static int mt7996_eeprom_parse_band_config(struct mt7996_phy *phy)
int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
{
- u8 path, nss, band_idx = phy->mt76->band_idx;
+ u8 path, rx_path, nss, band_idx = phy->mt76->band_idx;
u8 *eeprom = dev->mt76.eeprom.data;
struct mt76_phy *mphy = phy->mt76;
+ int max_path = 5, max_nss = 4;
int ret;
switch (band_idx) {
case MT_BAND1:
path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND1,
eeprom[MT_EE_WIFI_CONF + 2]);
+ rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND1,
eeprom[MT_EE_WIFI_CONF + 5]);
break;
case MT_BAND2:
path = FIELD_GET(MT_EE_WIFI_CONF2_TX_PATH_BAND2,
eeprom[MT_EE_WIFI_CONF + 2]);
+ rx_path = FIELD_GET(MT_EE_WIFI_CONF4_RX_PATH_BAND2,
+ eeprom[MT_EE_WIFI_CONF + 4]);
nss = FIELD_GET(MT_EE_WIFI_CONF5_STREAM_NUM_BAND2,
eeprom[MT_EE_WIFI_CONF + 5]);
break;
default:
path = FIELD_GET(MT_EE_WIFI_CONF1_TX_PATH_BAND0,
eeprom[MT_EE_WIFI_CONF + 1]);
+ rx_path = FIELD_GET(MT_EE_WIFI_CONF3_RX_PATH_BAND0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
nss = FIELD_GET(MT_EE_WIFI_CONF4_STREAM_NUM_BAND0,
eeprom[MT_EE_WIFI_CONF + 4]);
break;
}
- if (!path || path > 4)
- path = 4;
+ if (!path || path > max_path)
+ path = max_path;
+
+ if (!nss || nss > max_nss)
+ nss = max_nss;
+
+ nss = min_t(u8, nss, path);
- nss = min_t(u8, min_t(u8, 4, nss), path);
+ if (path != rx_path)
+ phy->has_aux_rx = true;
mphy->antenna_mask = BIT(nss) - 1;
mphy->chainmask = (BIT(path) - 1) << dev->chainshift[band_idx];
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
index 0c749774f..412d6e2f8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
@@ -33,6 +33,9 @@ enum mt7996_eeprom_field {
#define MT_EE_WIFI_CONF1_TX_PATH_BAND0 GENMASK(5, 3)
#define MT_EE_WIFI_CONF2_TX_PATH_BAND1 GENMASK(2, 0)
#define MT_EE_WIFI_CONF2_TX_PATH_BAND2 GENMASK(5, 3)
+#define MT_EE_WIFI_CONF3_RX_PATH_BAND0 GENMASK(2, 0)
+#define MT_EE_WIFI_CONF3_RX_PATH_BAND1 GENMASK(5, 3)
+#define MT_EE_WIFI_CONF4_RX_PATH_BAND2 GENMASK(2, 0)
#define MT_EE_WIFI_CONF4_STREAM_NUM_BAND0 GENMASK(5, 3)
#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND1 GENMASK(2, 0)
#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND2 GENMASK(5, 3)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index f8b61df15..a929c657b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -5,6 +5,8 @@
#include <linux/etherdevice.h>
#include <linux/of.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/thermal.h>
#include "mt7996.h"
#include "mac.h"
@@ -43,6 +45,183 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
+static ssize_t mt7996_thermal_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mt7996_phy *phy = dev_get_drvdata(dev);
+ int i = to_sensor_dev_attr(attr)->index;
+ int temperature;
+
+ switch (i) {
+ case 0:
+ temperature = mt7996_mcu_get_temperature(phy);
+ if (temperature < 0)
+ return temperature;
+ /* display in millidegree celcius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+ case 1:
+ case 2:
+ return sprintf(buf, "%u\n",
+ phy->throttle_temp[i - 1] * 1000);
+ case 3:
+ return sprintf(buf, "%hhu\n", phy->throttle_state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t mt7996_thermal_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mt7996_phy *phy = dev_get_drvdata(dev);
+ int ret, i = to_sensor_dev_attr(attr)->index;
+ long val;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&phy->dev->mt76.mutex);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 40, 130);
+
+ /* add a safety margin ~10 */
+ if ((i - 1 == MT7996_CRIT_TEMP_IDX &&
+ val > phy->throttle_temp[MT7996_MAX_TEMP_IDX] - 10) ||
+ (i - 1 == MT7996_MAX_TEMP_IDX &&
+ val - 10 < phy->throttle_temp[MT7996_CRIT_TEMP_IDX])) {
+ dev_err(phy->dev->mt76.dev,
+ "temp1_max shall be 10 degrees higher than temp1_crit.");
+ mutex_unlock(&phy->dev->mt76.mutex);
+ return -EINVAL;
+ }
+
+ phy->throttle_temp[i - 1] = val;
+ mutex_unlock(&phy->dev->mt76.mutex);
+
+ ret = mt7996_mcu_set_thermal_protect(phy, true);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7996_thermal_temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, mt7996_thermal_temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, mt7996_thermal_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(throttle1, mt7996_thermal_temp, 3);
+
+static struct attribute *mt7996_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_throttle1.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mt7996_hwmon);
+
+static int
+mt7996_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MT7996_CDEV_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+mt7996_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct mt7996_phy *phy = cdev->devdata;
+
+ *state = phy->cdev_state;
+
+ return 0;
+}
+
+static int
+mt7996_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct mt7996_phy *phy = cdev->devdata;
+ u8 throttling = MT7996_THERMAL_THROTTLE_MAX - state;
+ int ret;
+
+ if (state > MT7996_CDEV_THROTTLE_MAX) {
+ dev_err(phy->dev->mt76.dev,
+ "please specify a valid throttling state\n");
+ return -EINVAL;
+ }
+
+ if (state == phy->cdev_state)
+ return 0;
+
+ /* cooling_device convention: 0 = no cooling, more = more cooling
+ * mcu convention: 1 = max cooling, more = less cooling
+ */
+ ret = mt7996_mcu_set_thermal_throttling(phy, throttling);
+ if (ret)
+ return ret;
+
+ phy->cdev_state = state;
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mt7996_thermal_ops = {
+ .get_max_state = mt7996_thermal_get_max_throttle_state,
+ .get_cur_state = mt7996_thermal_get_cur_throttle_state,
+ .set_cur_state = mt7996_thermal_set_cur_throttle_state,
+};
+
+static void mt7996_unregister_thermal(struct mt7996_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+
+ if (!phy->cdev)
+ return;
+
+ sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
+ thermal_cooling_device_unregister(phy->cdev);
+}
+
+static int mt7996_thermal_init(struct mt7996_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon;
+ const char *name;
+
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7996_%s",
+ wiphy_name(wiphy));
+
+ cdev = thermal_cooling_device_register(name, phy, &mt7996_thermal_ops);
+ if (!IS_ERR(cdev)) {
+ if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
+ "cooling_device") < 0)
+ thermal_cooling_device_unregister(cdev);
+ else
+ phy->cdev = cdev;
+ }
+
+ /* initialize critical/maximum high temperature */
+ phy->throttle_temp[MT7996_CRIT_TEMP_IDX] = MT7996_CRIT_TEMP;
+ phy->throttle_temp[MT7996_MAX_TEMP_IDX] = MT7996_MAX_TEMP;
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
+ mt7996_hwmon_groups);
+
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+
+ return 0;
+}
+
static void mt7996_led_set_config(struct led_classdev *led_cdev,
u8 delay_on, u8 delay_off)
{
@@ -109,10 +288,11 @@ static void mt7996_led_set_brightness(struct led_classdev *led_cdev,
mt7996_led_set_config(led_cdev, 0xff, 0);
}
-void mt7996_init_txpower(struct mt7996_dev *dev,
- struct ieee80211_supported_band *sband)
+static void __mt7996_init_txpower(struct mt7996_phy *phy,
+ struct ieee80211_supported_band *sband)
{
- int i, nss = hweight8(dev->mphy.antenna_mask);
+ struct mt7996_dev *dev = phy->dev;
+ int i, nss = hweight16(phy->mt76->chainmask);
int nss_delta = mt76_tx_power_nss_delta(nss);
int pwr_delta = mt7996_eeprom_get_power_delta(dev, sband->band);
struct mt76_power_limits limits;
@@ -122,7 +302,7 @@ void mt7996_init_txpower(struct mt7996_dev *dev,
int target_power = mt7996_eeprom_get_target_power(dev, chan);
target_power += pwr_delta;
- target_power = mt76_get_rate_power_limits(&dev->mphy, chan,
+ target_power = mt76_get_rate_power_limits(phy->mt76, chan,
&limits,
target_power);
target_power += nss_delta;
@@ -133,6 +313,19 @@ void mt7996_init_txpower(struct mt7996_dev *dev,
}
}
+void mt7996_init_txpower(struct mt7996_phy *phy)
+{
+ if (!phy)
+ return;
+
+ if (phy->mt76->cap.has_2ghz)
+ __mt7996_init_txpower(phy, &phy->mt76->sband_2g.sband);
+ if (phy->mt76->cap.has_5ghz)
+ __mt7996_init_txpower(phy, &phy->mt76->sband_5g.sband);
+ if (phy->mt76->cap.has_6ghz)
+ __mt7996_init_txpower(phy, &phy->mt76->sband_6g.sband);
+}
+
static void
mt7996_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
@@ -147,16 +340,14 @@ mt7996_regd_notifier(struct wiphy *wiphy,
if (dev->mt76.region == NL80211_DFS_UNSET)
mt7996_mcu_rdd_background_enable(phy, NULL);
- mt7996_init_txpower(dev, &phy->mt76->sband_2g.sband);
- mt7996_init_txpower(dev, &phy->mt76->sband_5g.sband);
- mt7996_init_txpower(dev, &phy->mt76->sband_6g.sband);
+ mt7996_init_txpower(phy);
phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
mt7996_dfs_init_radar_detector(phy);
}
static void
-mt7996_init_wiphy(struct ieee80211_hw *hw)
+mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
{
struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt76_dev *mdev = &phy->dev->mt76;
@@ -168,11 +359,14 @@ mt7996_init_wiphy(struct ieee80211_hw *hw)
hw->max_rx_aggregation_subframes = max_subframes;
hw->max_tx_aggregation_subframes = max_subframes;
hw->netdev_features = NETIF_F_RXCSUM;
+ if (mtk_wed_device_active(wed))
+ hw->netdev_features |= NETIF_F_HW_TC;
hw->radiotap_timestamp.units_pos =
IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
phy->slottime = 9;
+ phy->beacon_rate = -1;
hw->sta_data_size = sizeof(struct mt7996_sta);
hw->vif_data_size = sizeof(struct mt7996_vif);
@@ -242,6 +436,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw)
mt76_set_stream_caps(phy->mt76, true);
mt7996_set_stream_vht_txbf_caps(phy);
mt7996_set_stream_he_eht_caps(phy);
+ mt7996_init_txpower(phy);
wiphy->available_antennas_rx = phy->mt76->antenna_mask;
wiphy->available_antennas_tx = phy->mt76->antenna_mask;
@@ -287,11 +482,12 @@ static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev)
for (i = 0; i < ARRAY_SIZE(mt76_rates); i++) {
u16 rate = mt76_rates[i].hw_value;
- u16 idx = MT7996_BASIC_RATES_TBL + i;
+ /* odd index for driver, even index for firmware */
+ u16 idx = MT7996_BASIC_RATES_TBL + 2 * i;
rate = FIELD_PREP(MT_TX_RATE_MODE, rate >> 8) |
FIELD_PREP(MT_TX_RATE_IDX, rate & GENMASK(7, 0));
- mt7996_mac_set_fixed_rate_table(dev, idx, rate);
+ mt7996_mcu_set_fixed_rate_table(&dev->phy, idx, rate, false);
}
}
@@ -317,9 +513,23 @@ void mt7996_mac_init(struct mt7996_dev *dev)
mt76_rmw_field(dev, MT_DMA_TCRF1(2), MT_DMA_TCRF1_QIDX, 0);
/* rro module init */
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
+ if (is_mt7996(&dev->mt76))
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
+ else
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE,
+ dev->hif2 ? 7 : 0);
+
+ if (dev->has_rro) {
+ u16 timeout;
+
+ timeout = mt76_rr(dev, MT_HW_REV) == MT_HW_REV1 ? 512 : 128;
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_FLUSH_TIMEOUT, timeout);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0);
+ } else {
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
+ }
mt7996_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
MCU_WA_PARAM_HW_PATH_HIF_VER,
@@ -335,7 +545,8 @@ int mt7996_txbf_init(struct mt7996_dev *dev)
{
int ret;
- if (dev->dbdc_support) {
+ if (mt7996_band_valid(dev, MT_BAND1) ||
+ mt7996_band_valid(dev, MT_BAND2)) {
ret = mt7996_mcu_set_txbf(dev, BF_MOD_EN_CTRL);
if (ret)
return ret;
@@ -356,19 +567,18 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
struct mt76_phy *mphy;
u32 mac_ofs, hif1_ofs = 0;
int ret;
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
- if (band != MT_BAND1 && band != MT_BAND2)
- return 0;
-
- if ((band == MT_BAND1 && !dev->dbdc_support) ||
- (band == MT_BAND2 && !dev->tbtc_support))
+ if (!mt7996_band_valid(dev, band) || band == MT_BAND0)
return 0;
if (phy)
return 0;
- if (band == MT_BAND2 && dev->hif2)
+ if (is_mt7996(&dev->mt76) && band == MT_BAND2 && dev->hif2) {
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+ wed = &dev->mt76.mmio.wed_hif2;
+ }
mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7996_ops, band);
if (!mphy)
@@ -401,11 +611,12 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
mt76_eeprom_override(mphy);
/* init wiphy according to mphy and phy */
- mt7996_init_wiphy(mphy->hw);
- ret = mt76_connac_init_tx_queues(phy->mt76,
- MT_TXQ_ID(band),
- MT7996_TX_RING_SIZE,
- MT_TXQ_RING_BASE(band) + hif1_ofs, 0);
+ mt7996_init_wiphy(mphy->hw, wed);
+ ret = mt7996_init_tx_queues(mphy->priv,
+ MT_TXQ_ID(band),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(band) + hif1_ofs,
+ wed);
if (ret)
goto error;
@@ -414,10 +625,21 @@ static int mt7996_register_phy(struct mt7996_dev *dev, struct mt7996_phy *phy,
if (ret)
goto error;
+ ret = mt7996_thermal_init(phy);
+ if (ret)
+ goto error;
+
ret = mt7996_init_debugfs(phy);
if (ret)
goto error;
+ if (wed == &dev->mt76.mmio.wed_hif2 && mtk_wed_device_active(wed)) {
+ u32 irq_mask = dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2;
+
+ mt76_wr(dev, MT_INT1_MASK_CSR, irq_mask);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, irq_mask);
+ }
+
return 0;
error:
@@ -434,6 +656,8 @@ mt7996_unregister_phy(struct mt7996_phy *phy, enum mt76_band_id band)
if (!phy)
return;
+ mt7996_unregister_thermal(phy);
+
mphy = phy->dev->mt76.phys[band];
mt76_unregister_phy(mphy);
ieee80211_free_hw(mphy->hw);
@@ -447,9 +671,6 @@ static void mt7996_init_work(struct work_struct *work)
mt7996_mcu_set_eeprom(dev);
mt7996_mac_init(dev);
- mt7996_init_txpower(dev, &dev->mphy.sband_2g.sband);
- mt7996_init_txpower(dev, &dev->mphy.sband_5g.sband);
- mt7996_init_txpower(dev, &dev->mphy.sband_6g.sband);
mt7996_txbf_init(dev);
}
@@ -462,16 +683,225 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
msleep(20);
}
+static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
+ struct mt7996_wed_rro_addr *addr;
+ void *ptr;
+ int i;
+
+ if (!dev->has_rro)
+ return 0;
+
+ if (!mtk_wed_device_active(wed))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_BA_BITMAP_CR_SIZE,
+ &dev->wed_rro.ba_bitmap[i].phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.ba_bitmap[i].ptr = ptr;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
+ int j;
+
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_WINDOW_MAX_SIZE * sizeof(*addr),
+ &dev->wed_rro.addr_elem[i].phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.addr_elem[i].ptr = ptr;
+ memset(dev->wed_rro.addr_elem[i].ptr, 0,
+ MT7996_RRO_WINDOW_MAX_SIZE * sizeof(*addr));
+
+ addr = dev->wed_rro.addr_elem[i].ptr;
+ for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) {
+ addr->signature = 0xff;
+ addr++;
+ }
+
+ wed->wlan.ind_cmd.addr_elem_phys[i] =
+ dev->wed_rro.addr_elem[i].phy_addr;
+ }
+
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_WINDOW_MAX_LEN * sizeof(*addr),
+ &dev->wed_rro.session.phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.session.ptr = ptr;
+ addr = dev->wed_rro.session.ptr;
+ for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
+ addr->signature = 0xff;
+ addr++;
+ }
+
+ /* rro hw init */
+ /* TODO: remove line after WM has set */
+ mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
+
+ /* setup BA bitmap cache address */
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
+ dev->wed_rro.ba_bitmap[0].phy_addr);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
+ dev->wed_rro.ba_bitmap[1].phy_addr);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
+
+ /* setup Address element address */
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
+ mt76_wr(dev, reg, dev->wed_rro.addr_elem[i].phy_addr >> 4);
+ reg += 4;
+ }
+
+ /* setup Address element address - separate address segment mode */
+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
+ MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
+
+ wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
+ wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
+ wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
+ wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
+ wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
+
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
+ mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
+ MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
+
+ /* particular session configure */
+ /* use max session idx + 1 as particular session id */
+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr);
+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG1,
+ MT_RRO_PARTICULAR_CONFG_EN |
+ FIELD_PREP(MT_RRO_PARTICULAR_SID, MT7996_RRO_MAX_SESSION));
+
+ /* interrupt enable */
+ mt76_wr(dev, MT_RRO_HOST_INT_ENA,
+ MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
+
+ /* rro ind cmd queue init */
+ return mt7996_dma_rro_init(dev);
+#else
+ return 0;
+#endif
+}
+
+static void mt7996_wed_rro_free(struct mt7996_dev *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ int i;
+
+ if (!dev->has_rro)
+ return;
+
+ if (!mtk_wed_device_active(&dev->mt76.mmio.wed))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
+ if (!dev->wed_rro.ba_bitmap[i].ptr)
+ continue;
+
+ dmam_free_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_BA_BITMAP_CR_SIZE,
+ dev->wed_rro.ba_bitmap[i].ptr,
+ dev->wed_rro.ba_bitmap[i].phy_addr);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
+ if (!dev->wed_rro.addr_elem[i].ptr)
+ continue;
+
+ dmam_free_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_WINDOW_MAX_SIZE *
+ sizeof(struct mt7996_wed_rro_addr),
+ dev->wed_rro.addr_elem[i].ptr,
+ dev->wed_rro.addr_elem[i].phy_addr);
+ }
+
+ if (!dev->wed_rro.session.ptr)
+ return;
+
+ dmam_free_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_WINDOW_MAX_LEN *
+ sizeof(struct mt7996_wed_rro_addr),
+ dev->wed_rro.session.ptr,
+ dev->wed_rro.session.phy_addr);
+#endif
+}
+
+static void mt7996_wed_rro_work(struct work_struct *work)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mt7996_dev *dev;
+ LIST_HEAD(list);
+
+ dev = (struct mt7996_dev *)container_of(work, struct mt7996_dev,
+ wed_rro.work);
+
+ spin_lock_bh(&dev->wed_rro.lock);
+ list_splice_init(&dev->wed_rro.poll_list, &list);
+ spin_unlock_bh(&dev->wed_rro.lock);
+
+ while (!list_empty(&list)) {
+ struct mt7996_wed_rro_session_id *e;
+ int i;
+
+ e = list_first_entry(&list, struct mt7996_wed_rro_session_id,
+ list);
+ list_del_init(&e->list);
+
+ for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
+ void *ptr = dev->wed_rro.session.ptr;
+ struct mt7996_wed_rro_addr *elem;
+ u32 idx, elem_id = i;
+
+ if (e->id == MT7996_RRO_MAX_SESSION)
+ goto reset;
+
+ idx = e->id / MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ if (idx >= ARRAY_SIZE(dev->wed_rro.addr_elem))
+ goto out;
+
+ ptr = dev->wed_rro.addr_elem[idx].ptr;
+ elem_id +=
+ (e->id % MT7996_RRO_BA_BITMAP_SESSION_SIZE) *
+ MT7996_RRO_WINDOW_MAX_LEN;
+reset:
+ elem = ptr + elem_id * sizeof(*elem);
+ elem->signature = 0xff;
+ }
+ mt7996_mcu_wed_rro_reset_sessions(dev, e->id);
+out:
+ kfree(e);
+ }
+#endif
+}
+
static int mt7996_init_hardware(struct mt7996_dev *dev)
{
int ret, idx;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+ if (is_mt7992(&dev->mt76)) {
+ mt76_rmw(dev, MT_AFE_CTL_BAND_PLL_03(MT_BAND0), MT_AFE_CTL_BAND_PLL_03_MSB_EN, 0);
+ mt76_rmw(dev, MT_AFE_CTL_BAND_PLL_03(MT_BAND1), MT_AFE_CTL_BAND_PLL_03_MSB_EN, 0);
+ }
INIT_WORK(&dev->init_work, mt7996_init_work);
-
- dev->dbdc_support = true;
- dev->tbtc_support = true;
+ INIT_WORK(&dev->wed_rro.work, mt7996_wed_rro_work);
+ INIT_LIST_HEAD(&dev->wed_rro.poll_list);
+ spin_lock_init(&dev->wed_rro.lock);
ret = mt7996_dma_init(dev);
if (ret)
@@ -483,6 +913,10 @@ static int mt7996_init_hardware(struct mt7996_dev *dev)
if (ret)
return ret;
+ ret = mt7996_wed_rro_init(dev);
+ if (ret)
+ return ret;
+
ret = mt7996_eeprom_init(dev);
if (ret < 0)
return ret;
@@ -890,14 +1324,16 @@ int mt7996_register_device(struct mt7996_dev *dev)
if (ret)
return ret;
- mt7996_init_wiphy(hw);
+ mt7996_init_wiphy(hw, &dev->mt76.mmio.wed);
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
return ret;
- ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+ ret = mt7996_thermal_init(&dev->phy);
+ if (ret)
+ return ret;
ret = mt7996_register_phy(dev, mt7996_phy2(dev), MT_BAND1);
if (ret)
@@ -907,21 +1343,35 @@ int mt7996_register_device(struct mt7996_dev *dev)
if (ret)
return ret;
+ ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+
dev->recovery.hw_init_done = true;
ret = mt7996_init_debugfs(&dev->phy);
if (ret)
- return ret;
+ goto error;
+
+ ret = mt7996_coredump_register(dev);
+ if (ret)
+ goto error;
- return mt7996_coredump_register(dev);
+ return 0;
+
+error:
+ cancel_work_sync(&dev->init_work);
+
+ return ret;
}
void mt7996_unregister_device(struct mt7996_dev *dev)
{
+ cancel_work_sync(&dev->wed_rro.work);
mt7996_unregister_phy(mt7996_phy3(dev), MT_BAND2);
mt7996_unregister_phy(mt7996_phy2(dev), MT_BAND1);
+ mt7996_unregister_thermal(&dev->phy);
mt7996_coredump_unregister(dev);
mt76_unregister_device(&dev->mt76);
+ mt7996_wed_rro_free(dev);
mt7996_mcu_exit(dev);
mt7996_tx_token_put(dev);
mt7996_dma_cleanup(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 7d33b0c89..0384fb059 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -102,7 +102,6 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
};
struct ieee80211_sta *sta;
struct mt7996_sta *msta;
- struct rate_info *rate;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
int i;
@@ -118,7 +117,6 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
u32 addr, val;
u16 idx;
s8 rssi[4];
- u8 bw;
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (list_empty(&sta_poll_list)) {
@@ -174,49 +172,6 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
}
- /* We don't support reading GI info from txs packets.
- * For accurate tx status reporting and AQL improvement,
- * we need to make sure that flags match so polling GI
- * from per-sta counters directly.
- */
- rate = &msta->wcid.rate;
-
- switch (rate->bw) {
- case RATE_INFO_BW_320:
- bw = IEEE80211_STA_RX_BW_320;
- break;
- case RATE_INFO_BW_160:
- bw = IEEE80211_STA_RX_BW_160;
- break;
- case RATE_INFO_BW_80:
- bw = IEEE80211_STA_RX_BW_80;
- break;
- case RATE_INFO_BW_40:
- bw = IEEE80211_STA_RX_BW_40;
- break;
- default:
- bw = IEEE80211_STA_RX_BW_20;
- break;
- }
-
- addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
- val = mt76_rr(dev, addr);
- if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
- addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 5);
- val = mt76_rr(dev, addr);
- rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
- } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
- u8 offs = 24 + 2 * bw;
-
- rate->he_gi = (val & (0x3 << offs)) >> offs;
- } else if (rate->flags &
- (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
- if (val & BIT(12 + bw))
- rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
- else
- rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
- }
-
/* get signal strength of resp frames (CTS/BA/ACK) */
addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
val = mt76_rr(dev, addr);
@@ -248,17 +203,6 @@ void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
mt76_clear(dev, addr, BIT(5));
}
-void mt7996_mac_set_fixed_rate_table(struct mt7996_dev *dev,
- u8 tbl_idx, u16 rate_idx)
-{
- u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx;
-
- mt76_wr(dev, MT_WTBL_ITDR0, rate_idx);
- /* use wtbl spe idx */
- mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL);
- mt76_wr(dev, MT_WTBL_ITCR, ctrl);
-}
-
/* The HW does not translate the mac header to 802.3 for mesh point */
static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
@@ -449,8 +393,36 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
return 0;
}
+static void
+mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
+ struct mt7996_sta *msta, struct sk_buff *skb,
+ u32 info)
+{
+ struct ieee80211_vif *vif;
+ struct wireless_dev *wdev;
+
+ if (!msta || !msta->vif)
+ return;
+
+ if (!mt76_queue_is_wed_rx(q))
+ return;
+
+ if (!(info & MT_DMA_INFO_PPE_VLD))
+ return;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ wdev = ieee80211_vif_to_wdev(vif);
+ skb->dev = wdev->netdev;
+
+ mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
+ FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
+ FIELD_GET(MT_DMA_PPE_ENTRY, info));
+}
+
static int
-mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
+mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
+ struct sk_buff *skb, u32 *info)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -475,7 +447,10 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
u16 seq_ctrl = 0;
__le16 fc = 0;
int idx;
+ u8 hw_aggr = false;
+ struct mt7996_sta *msta = NULL;
+ hw_aggr = status->aggr;
memset(status, 0, sizeof(*status));
band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
@@ -502,8 +477,6 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- struct mt7996_sta *msta;
-
msta = container_of(status->wcid, struct mt7996_sta, wcid);
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (list_empty(&msta->wcid.poll_list))
@@ -708,12 +681,14 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, struct sk_buff *skb)
}
} else {
status->flag |= RX_FLAG_8023;
+ mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
+ *info);
}
if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
- if (!status->wcid || !ieee80211_is_data_qos(fc))
+ if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
return 0;
status->aggr = unicast &&
@@ -757,6 +732,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
txwi[2] |= cpu_to_le32(val);
+
+ if (wcid->amsdu)
+ txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
}
static void
@@ -887,8 +865,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD3_PROTECT_FRAME;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
val |= MT_TXD3_NO_ACK;
- if (wcid->amsdu)
- val |= MT_TXD3_HW_AMSDU;
txwi[3] = cpu_to_le32(val);
txwi[4] = 0;
@@ -898,8 +874,11 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD5_TX_STATUS_HOST;
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
- FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
+ if (is_mt7996(&dev->mt76))
+ val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ else
+ val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
txwi[6] = cpu_to_le32(val);
txwi[7] = 0;
@@ -923,7 +902,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
idx = mvif->basic_rates_idx;
}
- txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
+ val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
+ txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
}
@@ -963,8 +943,16 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
+ u16 len;
+
+ len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
+ tx_info->buf[i + 1].addr >> 32);
+#endif
+
txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
- txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ txp->fw.len[i] = cpu_to_le16(len);
}
txp->fw.nbuf = nbuf;
@@ -996,6 +984,29 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
+{
+ struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
+ __le32 *txwi = ptr;
+ u32 val;
+
+ memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
+ txwi[0] = cpu_to_le32(val);
+
+ val = BIT(31) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
+ txwi[1] = cpu_to_le32(val);
+
+ txp->token = cpu_to_le16(token_id);
+ txp->nbuf = 1;
+ txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
+
+ return MT_TXD_SIZE + sizeof(*txp);
+}
+
static void
mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
{
@@ -1257,6 +1268,8 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
goto out;
rate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
+ rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
@@ -1406,6 +1419,12 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
+ q == MT_RXQ_TXFREE_BAND2) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
mt7996_mac_tx_free(dev, skb->data, skb->len);
napi_consume_skb(skb, 1);
break;
@@ -1422,7 +1441,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
dev_kfree_skb(skb);
break;
case PKT_TYPE_NORMAL:
- if (!mt7996_mac_fill_rx(dev, skb)) {
+ if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
mt76_rx(&dev->mt76, q, skb);
return;
}
@@ -1528,7 +1547,7 @@ mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
void mt7996_update_channel(struct mt76_phy *mphy)
{
- struct mt7996_phy *phy = (struct mt7996_phy *)mphy->priv;
+ struct mt7996_phy *phy = mphy->priv;
struct mt76_channel_state *state = mphy->chan_state;
int nf;
@@ -1655,6 +1674,10 @@ mt7996_mac_restart(struct mt7996_dev *dev)
/* disable all tx/rx napi */
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(mdev, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt76_queue_is_wed_rro(&mdev->q_rx[i]))
+ continue;
+
if (mdev->q_rx[i].ndesc)
napi_disable(&dev->mt76.napi[i]);
}
@@ -1668,6 +1691,10 @@ mt7996_mac_restart(struct mt7996_dev *dev)
local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt76_queue_is_wed_rro(&mdev->q_rx[i]))
+ continue;
+
if (mdev->q_rx[i].ndesc) {
napi_enable(&dev->mt76.napi[i]);
napi_schedule(&dev->mt76.napi[i]);
@@ -1700,9 +1727,9 @@ mt7996_mac_restart(struct mt7996_dev *dev)
goto out;
mt7996_mac_init(dev);
- mt7996_init_txpower(dev, &dev->mphy.sband_2g.sband);
- mt7996_init_txpower(dev, &dev->mphy.sband_5g.sband);
- mt7996_init_txpower(dev, &dev->mphy.sband_6g.sband);
+ mt7996_init_txpower(&dev->phy);
+ mt7996_init_txpower(phy2);
+ mt7996_init_txpower(phy3);
ret = mt7996_txbf_init(dev);
if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
@@ -1757,6 +1784,7 @@ mt7996_mac_full_reset(struct mt7996_dev *dev)
if (phy3)
ieee80211_stop_queues(phy3->mt76->hw);
+ cancel_work_sync(&dev->wed_rro.work);
cancel_delayed_work_sync(&dev->mphy.mac_work);
if (phy2)
cancel_delayed_work_sync(&phy2->mt76->mac_work);
@@ -1839,6 +1867,13 @@ void mt7996_mac_reset_work(struct work_struct *work)
dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
wiphy_name(dev->mt76.hw->wiphy));
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
+ mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
+
ieee80211_stop_queues(mt76_hw(dev));
if (phy2)
ieee80211_stop_queues(phy2->mt76->hw);
@@ -1848,6 +1883,8 @@ void mt7996_mac_reset_work(struct work_struct *work)
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
+
+ cancel_work_sync(&dev->wed_rro.work);
cancel_delayed_work_sync(&dev->mphy.mac_work);
if (phy2) {
set_bit(MT76_RESET, &phy2->mt76->state);
@@ -1858,8 +1895,13 @@ void mt7996_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&phy3->mt76->mac_work);
}
mt76_worker_disable(&dev->mt76.tx_worker);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
+ continue;
+
napi_disable(&dev->mt76.napi[i]);
+ }
napi_disable(&dev->mt76.tx_napi);
mutex_lock(&dev->mt76.mutex);
@@ -1880,7 +1922,28 @@ void mt7996_mac_reset_work(struct work_struct *work)
mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
/* enable DMA Tx/Tx and interrupt */
- mt7996_dma_start(dev, false);
+ mt7996_dma_start(dev, false, false);
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
+ dev->mt76.mmio.irqmask;
+
+ if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
+ wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+
+ mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
+ true);
+ mt7996_irq_enable(dev, wed_irq_mask);
+ mt7996_irq_disable(dev, 0);
+ }
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
+ }
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_RESET, &dev->mphy.state);
@@ -1891,6 +1954,10 @@ void mt7996_mac_reset_work(struct work_struct *work)
local_bh_disable();
mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
+ continue;
+
napi_enable(&dev->mt76.napi[i]);
napi_schedule(&dev->mt76.napi[i]);
}
@@ -2190,7 +2257,9 @@ void mt7996_mac_sta_rc_work(struct work_struct *work)
IEEE80211_RC_BW_CHANGED))
mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
- /* TODO: smps change */
+ if (changed & IEEE80211_RC_SMPS_CHANGED)
+ mt7996_mcu_set_fixed_field(dev, vif, sta, NULL,
+ RATE_PARAM_MMPS_UPDATE);
spin_lock_bh(&dev->mt76.sta_poll_lock);
}
@@ -2215,6 +2284,7 @@ void mt7996_mac_work(struct work_struct *work)
mt7996_mac_update_stats(phy);
+ mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 482a8f7d7..51deea84b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -51,6 +51,14 @@ int mt7996_run(struct ieee80211_hw *hw)
if (ret)
goto out;
+ ret = mt7996_mcu_set_thermal_throttling(phy, MT7996_THERMAL_THROTTLE_MAX);
+ if (ret)
+ goto out;
+
+ ret = mt7996_mcu_set_thermal_protect(phy, true);
+ if (ret)
+ goto out;
+
set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
@@ -394,6 +402,13 @@ static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
ieee80211_wake_queues(hw);
}
+ if (changed & (IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ ret = mt7996_mcu_set_txpower_sku(phy);
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
@@ -520,24 +535,25 @@ mt7996_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct mt76_phy *mphy = hw->priv;
u16 rate;
- u8 i, idx, ht;
+ u8 i, idx;
rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon, mcast);
- ht = FIELD_GET(MT_TX_RATE_MODE, rate) > MT_PHY_TYPE_OFDM;
- if (beacon && ht) {
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ if (beacon) {
+ struct mt7996_phy *phy = mphy->priv;
+
+ /* odd index for driver, even index for firmware */
+ idx = MT7996_BEACON_RATES_TBL + 2 * phy->mt76->band_idx;
+ if (phy->beacon_rate != rate)
+ mt7996_mcu_set_fixed_rate_table(phy, idx, rate, beacon);
- /* must odd index */
- idx = MT7996_BEACON_RATES_TBL + 2 * (mvif->idx % 20);
- mt7996_mac_set_fixed_rate_table(dev, idx, rate);
return idx;
}
idx = FIELD_GET(MT_TX_RATE_IDX, rate);
for (i = 0; i < ARRAY_SIZE(mt76_rates); i++)
if ((mt76_rates[i].hw_value & GENMASK(7, 0)) == idx)
- return MT7996_BASIC_RATES_TBL + i;
+ return MT7996_BASIC_RATES_TBL + 2 * i;
return mvif->basic_rates_idx;
}
@@ -962,8 +978,8 @@ mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
mt76_set_stream_caps(phy->mt76, true);
mt7996_set_stream_vht_txbf_caps(phy);
mt7996_set_stream_he_eht_caps(phy);
+ mt7996_mcu_set_txpower_sku(phy);
- /* TODO: update bmc_wtbl spe_idx when antenna changes */
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -988,6 +1004,7 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.he_gi = txrate->he_gi;
sinfo->txrate.he_dcm = txrate->he_dcm;
sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc;
+ sinfo->txrate.eht_gi = txrate->eht_gi;
}
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
@@ -1394,6 +1411,44 @@ out:
return ret;
}
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int
+mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy = mt7996_hw_phy(hw);
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ if (phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
+ wed = &dev->mt76.mmio.wed_hif2;
+
+ if (!mtk_wed_device_active(wed))
+ return -ENODEV;
+
+ if (msta->wcid.idx > MT7996_WTBL_STA)
+ return -EIO;
+
+ path->type = DEV_PATH_MTK_WDMA;
+ path->dev = ctx->dev;
+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
+ path->mtk_wdma.bss = mvif->mt76.idx;
+ path->mtk_wdma.queue = 0;
+ path->mtk_wdma.wcid = msta->wcid.idx;
+
+ path->mtk_wdma.amsdu = mtk_wed_is_amsdu_supported(wed);
+ ctx->dev = NULL;
+
+ return 0;
+}
+
+#endif
+
const struct ieee80211_ops mt7996_ops = {
.tx = mt7996_tx,
.start = mt7996_start,
@@ -1438,4 +1493,8 @@ const struct ieee80211_ops mt7996_ops = {
.sta_add_debugfs = mt7996_sta_add_debugfs,
#endif
.set_radar_background = mt7996_set_radar_background,
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ .net_fill_forward_path = mt7996_net_fill_forward_path,
+ .net_setup_tc = mt76_net_setup_tc,
+#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 0586f5bd4..9e70b9600 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -10,6 +10,20 @@
#include "mac.h"
#include "eeprom.h"
+#define fw_name(_dev, name, ...) ({ \
+ char *_fw; \
+ switch (mt76_chip(&(_dev)->mt76)) { \
+ case 0x7992: \
+ _fw = MT7992_##name; \
+ break; \
+ case 0x7990: \
+ default: \
+ _fw = MT7996_##name; \
+ break; \
+ } \
+ _fw; \
+})
+
struct mt7996_patch_hdr {
char build_date[16];
char platform[4];
@@ -449,6 +463,43 @@ mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb)
}
}
+static int
+mt7996_mcu_update_tx_gi(struct rate_info *rate, struct all_sta_trx_rate *mcu_rate)
+{
+ switch (mcu_rate->tx_mode) {
+ case MT_PHY_TYPE_CCK:
+ case MT_PHY_TYPE_OFDM:
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_VHT:
+ if (mcu_rate->tx_gi)
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ else
+ rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ if (mcu_rate->tx_gi > NL80211_RATE_INFO_HE_GI_3_2)
+ return -EINVAL;
+ rate->he_gi = mcu_rate->tx_gi;
+ break;
+ case MT_PHY_TYPE_EHT_SU:
+ case MT_PHY_TYPE_EHT_TRIG:
+ case MT_PHY_TYPE_EHT_MU:
+ if (mcu_rate->tx_gi > NL80211_RATE_INFO_EHT_GI_3_2)
+ return -EINVAL;
+ rate->eht_gi = mcu_rate->tx_gi;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void
mt7996_mcu_rx_all_sta_info_event(struct mt7996_dev *dev, struct sk_buff *skb)
{
@@ -465,6 +516,16 @@ mt7996_mcu_rx_all_sta_info_event(struct mt7996_dev *dev, struct sk_buff *skb)
struct mt76_wcid *wcid;
switch (le16_to_cpu(res->tag)) {
+ case UNI_ALL_STA_TXRX_RATE:
+ wlan_idx = le16_to_cpu(res->rate[i].wlan_idx);
+ wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
+
+ if (!wcid)
+ break;
+
+ if (mt7996_mcu_update_tx_gi(&wcid->rate, &res->rate[i]))
+ dev_err(dev->mt76.dev, "Failed to update TX GI\n");
+ break;
case UNI_ALL_STA_TXRX_ADM_STAT:
wlan_idx = le16_to_cpu(res->adm_stat[i].wlan_idx);
wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
@@ -498,6 +559,34 @@ mt7996_mcu_rx_all_sta_info_event(struct mt7996_dev *dev, struct sk_buff *skb)
}
static void
+mt7996_mcu_rx_thermal_notify(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+#define THERMAL_NOTIFY_TAG 0x4
+#define THERMAL_NOTIFY 0x2
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7996_mcu_thermal_notify *n;
+ struct mt7996_phy *phy;
+
+ n = (struct mt7996_mcu_thermal_notify *)skb->data;
+
+ if (le16_to_cpu(n->tag) != THERMAL_NOTIFY_TAG)
+ return;
+
+ if (n->event_id != THERMAL_NOTIFY)
+ return;
+
+ if (n->band_idx > MT_BAND2)
+ return;
+
+ mphy = dev->mt76.phys[n->band_idx];
+ if (!mphy)
+ return;
+
+ phy = (struct mt7996_phy *)mphy->priv;
+ phy->throttle_state = n->duty_percent;
+}
+
+static void
mt7996_mcu_rx_ext_event(struct mt7996_dev *dev, struct sk_buff *skb)
{
struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data;
@@ -520,6 +609,9 @@ mt7996_mcu_rx_unsolicited_event(struct mt7996_dev *dev, struct sk_buff *skb)
case MCU_EVENT_EXT:
mt7996_mcu_rx_ext_event(dev, skb);
break;
+ case MCU_UNI_EVENT_THERMAL:
+ mt7996_mcu_rx_thermal_notify(dev, skb);
+ break;
default:
break;
}
@@ -527,6 +619,73 @@ mt7996_mcu_rx_unsolicited_event(struct mt7996_dev *dev, struct sk_buff *skb)
}
static void
+mt7996_mcu_wed_rro_event(struct mt7996_dev *dev, struct sk_buff *skb)
+{
+ struct mt7996_mcu_wed_rro_event *event = (void *)skb->data;
+
+ if (!dev->has_rro)
+ return;
+
+ skb_pull(skb, sizeof(struct mt7996_mcu_rxd) + 4);
+
+ switch (le16_to_cpu(event->tag)) {
+ case UNI_WED_RRO_BA_SESSION_STATUS: {
+ struct mt7996_mcu_wed_rro_ba_event *e;
+
+ while (skb->len >= sizeof(*e)) {
+ struct mt76_rx_tid *tid;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ e = (void *)skb->data;
+ idx = le16_to_cpu(e->wlan_id);
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ break;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (!wcid || !wcid->sta)
+ break;
+
+ if (e->tid >= ARRAY_SIZE(wcid->aggr))
+ break;
+
+ tid = rcu_dereference(wcid->aggr[e->tid]);
+ if (!tid)
+ break;
+
+ tid->id = le16_to_cpu(e->id);
+ skb_pull(skb, sizeof(*e));
+ }
+ break;
+ }
+ case UNI_WED_RRO_BA_SESSION_DELETE: {
+ struct mt7996_mcu_wed_rro_ba_delete_event *e;
+
+ while (skb->len >= sizeof(*e)) {
+ struct mt7996_wed_rro_session_id *session;
+
+ e = (void *)skb->data;
+ session = kzalloc(sizeof(*session), GFP_ATOMIC);
+ if (!session)
+ break;
+
+ session->id = le16_to_cpu(e->session_id);
+
+ spin_lock_bh(&dev->wed_rro.lock);
+ list_add_tail(&session->list, &dev->wed_rro.poll_list);
+ spin_unlock_bh(&dev->wed_rro.lock);
+
+ ieee80211_queue_work(mt76_hw(dev), &dev->wed_rro.work);
+ skb_pull(skb, sizeof(*e));
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void
mt7996_mcu_uni_rx_unsolicited_event(struct mt7996_dev *dev, struct sk_buff *skb)
{
struct mt7996_mcu_rxd *rxd = (struct mt7996_mcu_rxd *)skb->data;
@@ -544,6 +703,9 @@ mt7996_mcu_uni_rx_unsolicited_event(struct mt7996_dev *dev, struct sk_buff *skb)
case MCU_UNI_EVENT_ALL_STA_INFO:
mt7996_mcu_rx_all_sta_info_event(dev, skb);
break;
+ case MCU_UNI_EVENT_WED_RRO:
+ mt7996_mcu_wed_rro_event(dev, skb);
+ break;
default:
break;
}
@@ -963,7 +1125,7 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif)
}
static int
-mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
+mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
bool enable, bool tx)
{
@@ -972,7 +1134,7 @@ mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct sk_buff *skb;
struct tlv *tlv;
- skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, mvif, wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -986,8 +1148,9 @@ mt7996_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
ba->ba_en = enable << params->tid;
ba->amsdu = params->amsdu;
ba->tid = params->tid;
+ ba->ba_rdd_rro = !tx && enable && dev->has_rro;
- return mt76_mcu_skb_send_msg(dev, skb,
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
@@ -1002,8 +1165,7 @@ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
if (enable && !params->amsdu)
msta->wcid.amsdu = false;
- return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
- enable, true);
+ return mt7996_mcu_sta_ba(dev, &mvif->mt76, params, enable, true);
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
@@ -1013,8 +1175,7 @@ int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
struct mt7996_vif *mvif = msta->vif;
- return mt7996_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
- enable, false);
+ return mt7996_mcu_sta_ba(dev, &mvif->mt76, params, enable, false);
}
static void
@@ -1120,7 +1281,7 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
static void
mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
- struct sta_rec_ht *ht;
+ struct sta_rec_ht_uni *ht;
struct tlv *tlv;
if (!sta->deflink.ht_cap.ht_supported)
@@ -1128,8 +1289,12 @@ mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
- ht = (struct sta_rec_ht *)tlv;
+ ht = (struct sta_rec_ht_uni *)tlv;
ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
+ ht->ampdu_param = u8_encode_bits(sta->deflink.ht_cap.ampdu_factor,
+ IEEE80211_HT_AMPDU_PARM_FACTOR) |
+ u8_encode_bits(sta->deflink.ht_cap.ampdu_density,
+ IEEE80211_HT_AMPDU_PARM_DENSITY);
}
static void
@@ -1587,44 +1752,6 @@ mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
}
static void
-mt7996_mcu_sta_phy_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
-{
- struct sta_rec_phy *phy;
- struct tlv *tlv;
- u8 af = 0, mm = 0;
-
- if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa)
- return;
-
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
-
- phy = (struct sta_rec_phy *)tlv;
- if (sta->deflink.ht_cap.ht_supported) {
- af = sta->deflink.ht_cap.ampdu_factor;
- mm = sta->deflink.ht_cap.ampdu_density;
- }
-
- if (sta->deflink.vht_cap.vht_supported) {
- u8 vht_af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->deflink.vht_cap.cap);
-
- af = max_t(u8, af, vht_af);
- }
-
- if (sta->deflink.he_6ghz_capa.capa) {
- af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
- mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
- IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
- }
-
- phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR, af) |
- FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY, mm);
- phy->max_ampdu_len = af;
-}
-
-static void
mt7996_mcu_sta_hdrt_tlv(struct mt7996_dev *dev, struct sk_buff *skb)
{
struct sta_rec_hdrt *hdrt;
@@ -1712,14 +1839,13 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
MCU_WM_UNI_CMD(RA), true);
}
-static int
-mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *data, u32 field)
+int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *data, u32 field)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct sta_phy *phy = data;
- struct sta_rec_ra_fixed *ra;
+ struct sta_phy_uni *phy = data;
+ struct sta_rec_ra_fixed_uni *ra;
struct sk_buff *skb;
struct tlv *tlv;
@@ -1730,7 +1856,7 @@ mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
- ra = (struct sta_rec_ra_fixed *)tlv;
+ ra = (struct sta_rec_ra_fixed_uni *)tlv;
switch (field) {
case RATE_PARAM_AUTO:
@@ -1742,6 +1868,9 @@ mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
if (phy)
ra->phy = *phy;
break;
+ case RATE_PARAM_MMPS_UPDATE:
+ ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode);
+ break;
default:
break;
}
@@ -1759,7 +1888,7 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
enum nl80211_band band = chandef->chan->band;
- struct sta_phy phy = {};
+ struct sta_phy_uni phy = {};
int ret, nrates = 0;
#define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \
@@ -1847,13 +1976,13 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
struct cfg80211_chan_def *chandef = &mphy->chandef;
struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
enum nl80211_band band = chandef->chan->band;
- struct sta_rec_ra *ra;
+ struct sta_rec_ra_uni *ra;
struct tlv *tlv;
u32 supp_rate = sta->deflink.supp_rates[band];
u32 cap = sta->wme ? STA_CAP_WMM : 0;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
- ra = (struct sta_rec_ra *)tlv;
+ ra = (struct sta_rec_ra_uni *)tlv;
ra->valid = true;
ra->auto_rate = true;
@@ -2030,8 +2159,6 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
/* tag order is in accordance with firmware dependency. */
if (sta) {
- /* starec phy */
- mt7996_mcu_sta_phy_tlv(dev, skb, vif, sta);
/* starec hdrt mode */
mt7996_mcu_sta_hdrt_tlv(dev, skb);
/* starec bfer */
@@ -2538,7 +2665,7 @@ static int mt7996_load_patch(struct mt7996_dev *dev)
return -EAGAIN;
}
- ret = request_firmware(&fw, MT7996_ROM_PATCH, dev->mt76.dev);
+ ret = request_firmware(&fw, fw_name(dev, ROM_PATCH), dev->mt76.dev);
if (ret)
goto out;
@@ -2701,17 +2828,17 @@ static int mt7996_load_ram(struct mt7996_dev *dev)
{
int ret;
- ret = __mt7996_load_ram(dev, "WM", MT7996_FIRMWARE_WM,
+ ret = __mt7996_load_ram(dev, "WM", fw_name(dev, FIRMWARE_WM),
MT7996_RAM_TYPE_WM);
if (ret)
return ret;
- ret = __mt7996_load_ram(dev, "DSP", MT7996_FIRMWARE_DSP,
+ ret = __mt7996_load_ram(dev, "DSP", fw_name(dev, FIRMWARE_DSP),
MT7996_RAM_TYPE_DSP);
if (ret)
return ret;
- return __mt7996_load_ram(dev, "WA", MT7996_FIRMWARE_WA,
+ return __mt7996_load_ram(dev, "WA", fw_name(dev, FIRMWARE_WA),
MT7996_RAM_TYPE_WA);
}
@@ -2863,9 +2990,10 @@ mt7996_mcu_init_rx_airtime(struct mt7996_dev *dev)
{
struct uni_header hdr = {};
struct sk_buff *skb;
- int len, num;
+ int len, num, i;
- num = 2 + 2 * (dev->dbdc_support + dev->tbtc_support);
+ num = 2 + 2 * (mt7996_band_valid(dev, MT_BAND1) +
+ mt7996_band_valid(dev, MT_BAND2));
len = sizeof(hdr) + num * sizeof(struct vow_rx_airtime);
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
if (!skb)
@@ -2873,13 +3001,10 @@ mt7996_mcu_init_rx_airtime(struct mt7996_dev *dev)
skb_put_data(skb, &hdr, sizeof(hdr));
- mt7996_add_rx_airtime_tlv(skb, dev->mt76.phy.band_idx);
-
- if (dev->dbdc_support)
- mt7996_add_rx_airtime_tlv(skb, MT_BAND1);
-
- if (dev->tbtc_support)
- mt7996_add_rx_airtime_tlv(skb, MT_BAND2);
+ for (i = 0; i < __MT_MAX_BAND; i++) {
+ if (mt7996_band_valid(dev, i))
+ mt7996_add_rx_airtime_tlv(skb, i);
+ }
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WM_UNI_CMD(VOW), true);
@@ -3305,7 +3430,7 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
.center_ch = ieee80211_frequency_to_channel(freq1),
.bw = mt76_connac_chan_bw(chandef),
.tx_path_num = hweight16(phy->mt76->chainmask),
- .rx_path = phy->mt76->chainmask >> dev->chainshift[band_idx],
+ .rx_path = mt7996_rx_chainmask(phy) >> dev->chainshift[band_idx],
.band_idx = band_idx,
.channel_band = ch_band[chandef->chan->band],
};
@@ -3577,6 +3702,121 @@ out:
return 0;
}
+int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
+{
+#define TEMPERATURE_QUERY 0
+#define GET_TEMPERATURE 0
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ u8 rsv1;
+ u8 action;
+ u8 band_idx;
+ u8 rsv2;
+ } req = {
+ .tag = cpu_to_le16(TEMPERATURE_QUERY),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .action = GET_TEMPERATURE,
+ .band_idx = phy->mt76->band_idx,
+ };
+ struct mt7996_mcu_thermal {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 rsv;
+ __le32 temperature;
+ } __packed * res;
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (void *)skb->data;
+
+ return le32_to_cpu(res->temperature);
+}
+
+int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ struct mt7996_mcu_thermal_ctrl ctrl;
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_CMD_THERMAL_PROTECT_DUTY_CONFIG),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .ctrl = {
+ .band_idx = phy->mt76->band_idx,
+ },
+ };
+ int level, ret;
+
+ /* set duty cycle and level */
+ for (level = 0; level < 4; level++) {
+ req.ctrl.duty.duty_level = level;
+ req.ctrl.duty.duty_cycle = state;
+ state /= 2;
+
+ ret = mt76_mcu_send_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL),
+ &req, sizeof(req), false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int mt7996_mcu_set_thermal_protect(struct mt7996_phy *phy, bool enable)
+{
+#define SUSTAIN_PERIOD 10
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ struct mt7996_mcu_thermal_ctrl ctrl;
+ struct mt7996_mcu_thermal_enable enable;
+ } __packed req = {
+ .len = cpu_to_le16(sizeof(req) - 4 - sizeof(req.enable)),
+ .ctrl = {
+ .band_idx = phy->mt76->band_idx,
+ .type.protect_type = 1,
+ .type.trigger_type = 1,
+ },
+ };
+ int ret;
+
+ req.tag = cpu_to_le16(UNI_CMD_THERMAL_PROTECT_DISABLE);
+
+ ret = mt76_mcu_send_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL),
+ &req, sizeof(req) - sizeof(req.enable), false);
+ if (ret || !enable)
+ return ret;
+
+ /* set high-temperature trigger threshold */
+ req.tag = cpu_to_le16(UNI_CMD_THERMAL_PROTECT_ENABLE);
+ req.enable.restore_temp = cpu_to_le32(phy->throttle_temp[0]);
+ req.enable.trigger_temp = cpu_to_le32(phy->throttle_temp[1]);
+ req.enable.sustain_time = cpu_to_le16(SUSTAIN_PERIOD);
+
+ req.len = cpu_to_le16(sizeof(req) - 4);
+
+ return mt76_mcu_send_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL),
+ &req, sizeof(req), false);
+}
+
int mt7996_mcu_set_ser(struct mt7996_dev *dev, u8 action, u8 val, u8 band)
{
struct {
@@ -4039,6 +4279,35 @@ int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
+int mt7996_mcu_set_fixed_rate_table(struct mt7996_phy *phy, u8 table_idx,
+ u16 rate_idx, bool beacon)
+{
+#define UNI_FIXED_RATE_TABLE_SET 0
+#define SPE_IXD_SELECT_TXD 0
+#define SPE_IXD_SELECT_BMC_WTBL 1
+ struct mt7996_dev *dev = phy->dev;
+ struct fixed_rate_table_ctrl req = {
+ .tag = cpu_to_le16(UNI_FIXED_RATE_TABLE_SET),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .table_idx = table_idx,
+ .rate_idx = cpu_to_le16(rate_idx),
+ .gi = 1,
+ .he_ltf = 1,
+ };
+ u8 band_idx = phy->mt76->band_idx;
+
+ if (beacon) {
+ req.spe_idx_sel = SPE_IXD_SELECT_TXD;
+ req.spe_idx = 24 + band_idx;
+ phy->beacon_rate = rate_idx;
+ } else {
+ req.spe_idx_sel = SPE_IXD_SELECT_BMC_WTBL;
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(FIXED_RATE_TABLE),
+ &req, sizeof(req), false);
+}
+
int mt7996_mcu_rf_regval(struct mt7996_dev *dev, u32 regidx, u32 *val, bool set)
{
struct {
@@ -4094,14 +4363,12 @@ int mt7996_mcu_trigger_assert(struct mt7996_dev *dev)
&req, sizeof(req), false);
}
-int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u8 val)
+int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u16 val)
{
struct {
u8 __rsv1[4];
-
__le16 tag;
__le16 len;
-
union {
struct {
u8 type;
@@ -4116,6 +4383,11 @@ int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u8 val)
u8 path;
u8 __rsv2[3];
} __packed txfree_path;
+ struct {
+ __le16 flush_one;
+ __le16 flush_all;
+ u8 __rsv2[4];
+ } __packed timeout;
};
} __packed req = {
.tag = cpu_to_le16(tag),
@@ -4132,6 +4404,10 @@ int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u8 val)
case UNI_RRO_SET_TXFREE_PATH:
req.txfree_path.path = val;
break;
+ case UNI_RRO_SET_FLUSH_TIMEOUT:
+ req.timeout.flush_one = cpu_to_le16(val);
+ req.timeout.flush_all = cpu_to_le16(2 * val);
+ break;
default:
return -EINVAL;
}
@@ -4156,3 +4432,81 @@ int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag)
return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(ALL_STA_INFO),
&req, sizeof(req), false);
}
+
+int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id)
+{
+ struct {
+ u8 __rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ __le16 session_id;
+ u8 pad[4];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_RRO_DEL_BA_SESSION),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .session_id = cpu_to_le16(id),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(RRO), &req,
+ sizeof(req), true);
+}
+
+int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
+{
+#define TX_POWER_LIMIT_TABLE_RATE 0
+ struct mt7996_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct tx_power_limit_table_ctrl {
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 power_ctrl_id;
+ u8 power_limit_type;
+ u8 band_idx;
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL),
+ .len = cpu_to_le16(sizeof(req) + MT7996_SKU_RATE_NUM - 4),
+ .power_ctrl_id = UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL,
+ .power_limit_type = TX_POWER_LIMIT_TABLE_RATE,
+ .band_idx = phy->mt76->band_idx,
+ };
+ struct mt76_power_limits la = {};
+ struct sk_buff *skb;
+ int i, tx_power;
+
+ tx_power = mt7996_get_power_bound(phy, hw->conf.power_level);
+ tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
+ &la, tx_power);
+ mphy->txpower_cur = tx_power;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(req) + MT7996_SKU_RATE_NUM);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, &req, sizeof(req));
+ /* cck and ofdm */
+ skb_put_data(skb, &la.cck, sizeof(la.cck));
+ skb_put_data(skb, &la.ofdm, sizeof(la.ofdm));
+ /* ht20 */
+ skb_put_data(skb, &la.mcs[0], 8);
+ /* ht40 */
+ skb_put_data(skb, &la.mcs[1], 9);
+
+ /* vht */
+ for (i = 0; i < 4; i++) {
+ skb_put_data(skb, &la.mcs[i], sizeof(la.mcs[i]));
+ skb_put_zero(skb, 2); /* padding */
+ }
+
+ /* he */
+ skb_put_data(skb, &la.ru[0], sizeof(la.ru));
+ /* eht */
+ skb_put_data(skb, &la.eht[0], sizeof(la.eht));
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WM_UNI_CMD(TXPOWER), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 32ce57c8c..36cacc495 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -30,6 +30,28 @@ struct mt7996_mcu_uni_event {
__le32 status; /* 0: success, others: fail */
} __packed;
+struct mt7996_mcu_thermal_ctrl {
+ u8 ctrl_id;
+ u8 band_idx;
+ union {
+ struct {
+ u8 protect_type; /* 1: duty admit, 2: radio off */
+ u8 trigger_type; /* 0: low, 1: high */
+ } __packed type;
+ struct {
+ u8 duty_level; /* level 0~3 */
+ u8 duty_cycle;
+ } __packed duty;
+ };
+} __packed;
+
+struct mt7996_mcu_thermal_enable {
+ __le32 trigger_temp;
+ __le32 restore_temp;
+ __le16 sustain_time;
+ u8 rsv[2];
+} __packed;
+
struct mt7996_mcu_csa_notify {
struct mt7996_mcu_rxd rxd;
@@ -153,6 +175,27 @@ struct mt7996_mcu_mib {
__le64 data;
} __packed;
+struct all_sta_trx_rate {
+ __le16 wlan_idx;
+ u8 __rsv1[2];
+ u8 tx_mode;
+ u8 flags;
+ u8 tx_stbc;
+ u8 tx_gi;
+ u8 tx_bw;
+ u8 tx_ldpc;
+ u8 tx_mcs;
+ u8 tx_nss;
+ u8 rx_rate;
+ u8 rx_mode;
+ u8 rx_nsts;
+ u8 rx_gi;
+ u8 rx_coding;
+ u8 rx_stbc;
+ u8 rx_bw;
+ u8 __rsv2;
+} __packed;
+
struct mt7996_mcu_all_sta_info_event {
u8 rsv[4];
__le16 tag;
@@ -163,22 +206,74 @@ struct mt7996_mcu_all_sta_info_event {
u8 rsv3[4];
union {
- struct {
+ DECLARE_FLEX_ARRAY(struct all_sta_trx_rate, rate);
+ DECLARE_FLEX_ARRAY(struct {
__le16 wlan_idx;
u8 rsv[2];
__le32 tx_bytes[IEEE80211_NUM_ACS];
__le32 rx_bytes[IEEE80211_NUM_ACS];
- } adm_stat[0] __packed;
+ } __packed, adm_stat);
- struct {
+ DECLARE_FLEX_ARRAY(struct {
__le16 wlan_idx;
u8 rsv[2];
__le32 tx_msdu_cnt;
__le32 rx_msdu_cnt;
- } msdu_cnt[0] __packed;
+ } __packed, msdu_cnt);
} __packed;
} __packed;
+struct mt7996_mcu_wed_rro_event {
+ struct mt7996_mcu_rxd rxd;
+
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+} __packed;
+
+struct mt7996_mcu_wed_rro_ba_event {
+ __le16 tag;
+ __le16 len;
+
+ __le16 wlan_id;
+ u8 tid;
+ u8 __rsv1;
+ __le32 status;
+ __le16 id;
+ u8 __rsv2[2];
+} __packed;
+
+struct mt7996_mcu_wed_rro_ba_delete_event {
+ __le16 tag;
+ __le16 len;
+
+ __le16 session_id;
+ u8 __rsv2[2];
+} __packed;
+
+enum {
+ UNI_WED_RRO_BA_SESSION_STATUS,
+ UNI_WED_RRO_BA_SESSION_TBL,
+ UNI_WED_RRO_BA_SESSION_DELETE,
+};
+
+struct mt7996_mcu_thermal_notify {
+ struct mt7996_mcu_rxd rxd;
+
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+
+ u8 event_id;
+ u8 band_idx;
+ u8 level_idx;
+ u8 duty_percent;
+ __le32 restore_temp;
+ u8 __rsv2[4];
+} __packed;
+
enum mt7996_chan_mib_offs {
UNI_MIB_OBSS_AIRTIME = 26,
UNI_MIB_NON_WIFI_TIME = 27,
@@ -389,6 +484,15 @@ struct bss_mld_tlv {
u8 __rsv[3];
} __packed;
+struct sta_rec_ht_uni {
+ __le16 tag;
+ __le16 len;
+ __le16 ht_cap;
+ __le16 ht_cap_ext;
+ u8 ampdu_param;
+ u8 _rsv[3];
+} __packed;
+
struct sta_rec_ba_uni {
__le16 tag;
__le16 len;
@@ -438,6 +542,73 @@ struct sta_rec_sec_uni {
struct sec_key_uni key[2];
} __packed;
+struct sta_phy_uni {
+ u8 type;
+ u8 flag;
+ u8 stbc;
+ u8 sgi;
+ u8 bw;
+ u8 ldpc;
+ u8 mcs;
+ u8 nss;
+ u8 he_ltf;
+ u8 rsv[3];
+};
+
+struct sta_rec_ra_uni {
+ __le16 tag;
+ __le16 len;
+
+ u8 valid;
+ u8 auto_rate;
+ u8 phy_mode;
+ u8 channel;
+ u8 bw;
+ u8 disable_cck;
+ u8 ht_mcs32;
+ u8 ht_gf;
+ u8 ht_mcs[4];
+ u8 mmps_mode;
+ u8 gband_256;
+ u8 af;
+ u8 auth_wapi_mode;
+ u8 rate_len;
+
+ u8 supp_mode;
+ u8 supp_cck_rate;
+ u8 supp_ofdm_rate;
+ __le32 supp_ht_mcs;
+ __le16 supp_vht_mcs[4];
+
+ u8 op_mode;
+ u8 op_vht_chan_width;
+ u8 op_vht_rx_nss;
+ u8 op_vht_rx_nss_type;
+
+ __le32 sta_cap;
+
+ struct sta_phy_uni phy;
+ u8 rx_rcpi[4];
+} __packed;
+
+struct sta_rec_ra_fixed_uni {
+ __le16 tag;
+ __le16 len;
+
+ __le32 field;
+ u8 op_mode;
+ u8 op_vht_chan_width;
+ u8 op_vht_rx_nss;
+ u8 op_vht_rx_nss_type;
+
+ struct sta_phy_uni phy;
+
+ u8 spe_idx;
+ u8 short_preamble;
+ u8 is_5g;
+ u8 mmps_mode;
+} __packed;
+
struct sta_rec_hdrt {
__le16 tag;
__le16 len;
@@ -613,17 +784,16 @@ enum {
#define MT7996_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_bf) + \
- sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_ht_uni) + \
sizeof(struct sta_rec_he_v2) + \
sizeof(struct sta_rec_ba_uni) + \
sizeof(struct sta_rec_vht) + \
sizeof(struct sta_rec_uapsd) + \
sizeof(struct sta_rec_amsdu) + \
sizeof(struct sta_rec_bfee) + \
- sizeof(struct sta_rec_phy) + \
- sizeof(struct sta_rec_ra) + \
+ sizeof(struct sta_rec_ra_uni) + \
sizeof(struct sta_rec_sec) + \
- sizeof(struct sta_rec_ra_fixed) + \
+ sizeof(struct sta_rec_ra_fixed_uni) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_eht) + \
sizeof(struct sta_rec_hdrt) + \
@@ -639,6 +809,18 @@ enum {
#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
MT7996_BEACON_UPDATE_SIZE)
+static inline s8
+mt7996_get_power_bound(struct mt7996_phy *phy, s8 txpower)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ int n_chains = hweight16(mphy->chainmask);
+
+ txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
+ txpower -= mt76_tx_power_nss_delta(n_chains);
+
+ return txpower;
+}
+
enum {
UNI_BAND_CONFIG_RADIO_ENABLE,
UNI_BAND_CONFIG_RTS_THRESHOLD = 0x08,
@@ -686,6 +868,8 @@ enum {
UNI_RRO_GET_BA_SESSION_TABLE,
UNI_RRO_SET_BYPASS_MODE,
UNI_RRO_SET_TXFREE_PATH,
+ UNI_RRO_DEL_BA_SESSION,
+ UNI_RRO_SET_FLUSH_TIMEOUT
};
enum{
@@ -700,6 +884,16 @@ enum{
};
enum {
+ UNI_CMD_THERMAL_PROTECT_ENABLE = 0x6,
+ UNI_CMD_THERMAL_PROTECT_DISABLE,
+ UNI_CMD_THERMAL_PROTECT_DUTY_CONFIG,
+};
+
+enum {
+ UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL = 4,
+};
+
+enum {
UNI_CMD_ACCESS_REG_BASIC = 0x0,
UNI_CMD_ACCESS_RF_REG_BASIC,
};
@@ -737,4 +931,24 @@ enum {
#define MT7996_SEC_KEY_IDX GENMASK(2, 1)
#define MT7996_SEC_IV BIT(3)
+struct fixed_rate_table_ctrl {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ u8 table_idx;
+ u8 antenna_idx;
+ __le16 rate_idx;
+ u8 spe_idx_sel;
+ u8 spe_idx;
+ u8 gi;
+ u8 he_ltf;
+ bool ldpc;
+ bool txbf;
+ bool dynamic_bw;
+
+ u8 _rsv2;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 3a591a7b4..efd4a767e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -6,10 +6,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/rtnetlink.h>
#include "mt7996.h"
#include "mac.h"
+#include "mcu.h"
#include "../trace.h"
+#include "../dma.h"
+
+static bool wed_enable;
+module_param(wed_enable, bool, 0644);
static const struct __base mt7996_reg_base[] = {
[WF_AGG_BASE] = { { 0x820e2000, 0x820f2000, 0x830e2000 } },
@@ -24,6 +30,58 @@ static const struct __base mt7996_reg_base[] = {
[WF_RATE_BASE] = { { 0x820ee000, 0x820fe000, 0x830ee000 } },
};
+static const u32 mt7996_offs[] = {
+ [MIB_RVSR0] = 0x720,
+ [MIB_RVSR1] = 0x724,
+ [MIB_BTSCR5] = 0x788,
+ [MIB_BTSCR6] = 0x798,
+ [MIB_RSCR1] = 0x7ac,
+ [MIB_RSCR27] = 0x954,
+ [MIB_RSCR28] = 0x958,
+ [MIB_RSCR29] = 0x95c,
+ [MIB_RSCR30] = 0x960,
+ [MIB_RSCR31] = 0x964,
+ [MIB_RSCR33] = 0x96c,
+ [MIB_RSCR35] = 0x974,
+ [MIB_RSCR36] = 0x978,
+ [MIB_BSCR0] = 0x9cc,
+ [MIB_BSCR1] = 0x9d0,
+ [MIB_BSCR2] = 0x9d4,
+ [MIB_BSCR3] = 0x9d8,
+ [MIB_BSCR4] = 0x9dc,
+ [MIB_BSCR5] = 0x9e0,
+ [MIB_BSCR6] = 0x9e4,
+ [MIB_BSCR7] = 0x9e8,
+ [MIB_BSCR17] = 0xa10,
+ [MIB_TRDR1] = 0xa28,
+};
+
+static const u32 mt7992_offs[] = {
+ [MIB_RVSR0] = 0x760,
+ [MIB_RVSR1] = 0x764,
+ [MIB_BTSCR5] = 0x7c8,
+ [MIB_BTSCR6] = 0x7d8,
+ [MIB_RSCR1] = 0x7f0,
+ [MIB_RSCR27] = 0x998,
+ [MIB_RSCR28] = 0x99c,
+ [MIB_RSCR29] = 0x9a0,
+ [MIB_RSCR30] = 0x9a4,
+ [MIB_RSCR31] = 0x9a8,
+ [MIB_RSCR33] = 0x9b0,
+ [MIB_RSCR35] = 0x9b8,
+ [MIB_RSCR36] = 0x9bc,
+ [MIB_BSCR0] = 0xac8,
+ [MIB_BSCR1] = 0xacc,
+ [MIB_BSCR2] = 0xad0,
+ [MIB_BSCR3] = 0xad4,
+ [MIB_BSCR4] = 0xad8,
+ [MIB_BSCR5] = 0xadc,
+ [MIB_BSCR6] = 0xae0,
+ [MIB_BSCR7] = 0xae4,
+ [MIB_BSCR17] = 0xb0c,
+ [MIB_TRDR1] = 0xb24,
+};
+
static const struct __map mt7996_reg_map[] = {
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
@@ -82,7 +140,6 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
- dev->reg_l1_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
MT_HIF_REMAP_L1_MASK,
FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
@@ -97,7 +154,6 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
- dev->reg_l2_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
MT_HIF_REMAP_L2_MASK,
FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
@@ -107,26 +163,10 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
return MT_HIF_REMAP_BASE_L2 + offset;
}
-static void mt7996_reg_remap_restore(struct mt7996_dev *dev)
-{
- /* remap to ori status */
- if (unlikely(dev->reg_l1_backup)) {
- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->reg_l1_backup);
- dev->reg_l1_backup = 0;
- }
-
- if (dev->reg_l2_backup) {
- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->reg_l2_backup);
- dev->reg_l2_backup = 0;
- }
-}
-
static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
{
int i;
- mt7996_reg_remap_restore(dev);
-
if (addr < 0x100000)
return addr;
@@ -143,6 +183,11 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
return dev->reg.map[i].mapped + ofs;
}
+ return 0;
+}
+
+static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr)
+{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@@ -167,28 +212,223 @@ void mt7996_memcpy_fromio(struct mt7996_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7996_reg_addr(dev, offset);
- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ if (addr) {
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ memcpy_fromio(buf, dev->mt76.mmio.regs +
+ __mt7996_reg_remap_addr(dev, offset), len);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset), val;
- return dev->bus_ops->rr(mdev, __mt7996_reg_addr(dev, offset));
+ if (addr)
+ return dev->bus_ops->rr(mdev, addr);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rr(mdev, __mt7996_reg_remap_addr(dev, offset));
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
static void mt7996_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset);
+
+ if (addr) {
+ dev->bus_ops->wr(mdev, addr, val);
+ return;
+ }
- dev->bus_ops->wr(mdev, __mt7996_reg_addr(dev, offset), val);
+ spin_lock_bh(&dev->reg_lock);
+ dev->bus_ops->wr(mdev, __mt7996_reg_remap_addr(dev, offset), val);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset);
+
+ if (addr)
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
- return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rmw(mdev, __mt7996_reg_remap_addr(dev, offset), mask, val);
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int mt7996_mmio_wed_reset(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *mdev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt76_phy *mphy = &dev->mphy;
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
+ return -EBUSY;
+
+ ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_L1,
+ mphy->band_idx);
+ if (ret)
+ goto out;
+
+ rtnl_unlock();
+ if (!wait_for_completion_timeout(&mdev->mmio.wed_reset, 20 * HZ)) {
+ dev_err(mdev->dev, "wed reset timeout\n");
+ ret = -ETIMEDOUT;
+ }
+ rtnl_lock();
+out:
+ clear_bit(MT76_STATE_WED_RESET, &mphy->state);
+
+ return ret;
+}
+#endif
+
+int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
+ bool hif2, int *irq)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct pci_dev *pci_dev = pdev_ptr;
+ u32 hif1_ofs = 0;
+
+ if (!wed_enable)
+ return 0;
+
+ dev->has_rro = true;
+
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ if (hif2)
+ wed = &dev->mt76.mmio.wed_hif2;
+
+ wed->wlan.pci_dev = pci_dev;
+ wed->wlan.bus_type = MTK_WED_BUS_PCIE;
+
+ wed->wlan.base = devm_ioremap(dev->mt76.dev,
+ pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
+
+ if (hif2) {
+ wed->wlan.wpdma_int = wed->wlan.phy_base +
+ MT_INT_PCIE1_SOURCE_CSR_EXT;
+ wed->wlan.wpdma_mask = wed->wlan.phy_base +
+ MT_INT_PCIE1_MASK_CSR;
+ wed->wlan.wpdma_tx = wed->wlan.phy_base + hif1_ofs +
+ MT_TXQ_RING_BASE(0) +
+ MT7996_TXQ_BAND2 * MT_RING_SIZE;
+ if (dev->has_rro) {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
+ } else {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_MCU_WA_TRI * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_TRI) - 1;
+ }
+
+ wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG;
+ wed->wlan.wpdma_rx = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
+
+ wed->wlan.id = 0x7991;
+ wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
+ } else {
+ wed->wlan.hw_rro = dev->has_rro; /* default on */
+ wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
+ wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
+ wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
+ MT7996_TXQ_BAND0 * MT_RING_SIZE;
+
+ wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + MT_WFDMA0_GLO_CFG;
+
+ wed->wlan.wpdma_rx = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
+
+ wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) +
+ MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
+ MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx_pg = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) +
+ MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE;
+
+ wed->wlan.rx_nbuf = 65536;
+ wed->wlan.rx_npkt = dev->hif2 ? 32768 : 24576;
+ wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
+
+ wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
+
+ wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1;
+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
+
+ wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1;
+ wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1;
+ wed->wlan.rx_pg_tbit[2] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND2) - 1;
+
+ wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
+ wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
+ if (dev->has_rro) {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
+ } else {
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
+ }
+ dev->mt76.rx_token_size = MT7996_TOKEN_SIZE + wed->wlan.rx_npkt;
+ }
+
+ wed->wlan.nbuf = MT7996_HW_TOKEN_SIZE;
+ wed->wlan.token_start = MT7996_TOKEN_SIZE - wed->wlan.nbuf;
+
+ wed->wlan.amsdu_max_subframes = 8;
+ wed->wlan.amsdu_max_len = 1536;
+
+ wed->wlan.init_buf = mt7996_wed_init_buf;
+ wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
+ wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
+ wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
+ if (!hif2) {
+ wed->wlan.reset = mt7996_mmio_wed_reset;
+ wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
+ }
+
+ if (mtk_wed_device_attach(wed))
+ return 0;
+
+ *irq = wed->irq;
+ dev->mt76.dma_dev = wed->dev;
+
+ return 1;
+#else
+ return 0;
+#endif
}
static int mt7996_mmio_init(struct mt76_dev *mdev,
@@ -200,10 +440,18 @@ static int mt7996_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7996_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7990:
dev->reg.base = mt7996_reg_base;
+ dev->reg.offs_rev = mt7996_offs;
+ dev->reg.map = mt7996_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map);
+ break;
+ case 0x7992:
+ dev->reg.base = mt7996_reg_base;
+ dev->reg.offs_rev = mt7992_offs;
dev->reg.map = mt7996_reg_map;
dev->reg.map_size = ARRAY_SIZE(mt7996_reg_map);
break;
@@ -241,8 +489,17 @@ void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
mdev->mmio.irqmask |= set;
if (write_reg) {
- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
- mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed,
+ mdev->mmio.irqmask);
+ if (mtk_wed_device_active(&mdev->mmio.wed_hif2)) {
+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed_hif2,
+ mdev->mmio.irqmask);
+ }
+ } else {
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
}
spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
@@ -260,22 +517,36 @@ static void mt7996_rx_poll_complete(struct mt76_dev *mdev,
static void mt7996_irq_tasklet(struct tasklet_struct *t)
{
struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
u32 i, intr, mask, intr1;
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- intr &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (dev->hif2) {
- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
- intr1 &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+ if (dev->hif2 && mtk_wed_device_active(wed_hif2)) {
+ mtk_wed_device_irq_set_mask(wed_hif2, 0);
+ intr1 = mtk_wed_device_irq_get(wed_hif2,
+ dev->mt76.mmio.irqmask);
+ if (intr1 & MT_INT_RX_TXFREE_EXT)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_TXFREE_BAND2]);
+ }
- intr |= intr1;
+ if (mtk_wed_device_active(wed)) {
+ mtk_wed_device_irq_set_mask(wed, 0);
+ intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
+ intr |= (intr1 & ~MT_INT_RX_TXFREE_EXT);
+ } else {
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+ if (dev->hif2) {
+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+ intr1 &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+ intr |= intr1;
+ }
}
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
@@ -308,9 +579,17 @@ irqreturn_t mt7996_irq_handler(int irq, void *dev_instance)
{
struct mt7996_dev *dev = dev_instance;
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed, 0);
+ else
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ if (dev->hif2) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
+ mtk_wed_device_irq_set_mask(&dev->mt76.mmio.wed_hif2, 0);
+ else
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ }
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
return IRQ_NONE;
@@ -391,4 +670,5 @@ static void __exit mt7996_exit(void)
module_init(mt7996_init);
module_exit(mt7996_exit);
+MODULE_DESCRIPTION("MediaTek MT7996 MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 6733ee974..36d1f247d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -13,6 +13,7 @@
#define MT7996_MAX_INTERFACES 19 /* per-band */
#define MT7996_MAX_WMM_SETS 4
+#define MT7996_WTBL_BMC_SIZE (is_mt7992(&dev->mt76) ? 32 : 64)
#define MT7996_WTBL_RESERVED (mt7996_wtbl_size(dev) - 1)
#define MT7996_WTBL_STA (MT7996_WTBL_RESERVED - \
mt7996_max_interface_num(dev))
@@ -33,23 +34,55 @@
#define MT7996_FIRMWARE_DSP "mediatek/mt7996/mt7996_dsp.bin"
#define MT7996_ROM_PATCH "mediatek/mt7996/mt7996_rom_patch.bin"
+#define MT7992_FIRMWARE_WA "mediatek/mt7996/mt7992_wa.bin"
+#define MT7992_FIRMWARE_WM "mediatek/mt7996/mt7992_wm.bin"
+#define MT7992_FIRMWARE_DSP "mediatek/mt7996/mt7992_dsp.bin"
+#define MT7992_ROM_PATCH "mediatek/mt7996/mt7992_rom_patch.bin"
+
#define MT7996_EEPROM_DEFAULT "mediatek/mt7996/mt7996_eeprom.bin"
+#define MT7992_EEPROM_DEFAULT "mediatek/mt7996/mt7992_eeprom.bin"
#define MT7996_EEPROM_SIZE 7680
#define MT7996_EEPROM_BLOCK_SIZE 16
#define MT7996_TOKEN_SIZE 16384
+#define MT7996_HW_TOKEN_SIZE 8192
#define MT7996_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+#define MT7996_SKU_RATE_NUM 417
+
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
#define MT7996_MIN_TWT_DUR 64
#define MT7996_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 3)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
-#define MT7996_BASIC_RATES_TBL 11
+#define MT7996_BASIC_RATES_TBL 31
#define MT7996_BEACON_RATES_TBL 25
+#define MT7996_THERMAL_THROTTLE_MAX 100
+#define MT7996_CDEV_THROTTLE_MAX 99
+#define MT7996_CRIT_TEMP_IDX 0
+#define MT7996_MAX_TEMP_IDX 1
+#define MT7996_CRIT_TEMP 110
+#define MT7996_MAX_TEMP 120
+
+#define MT7996_RRO_MAX_SESSION 1024
+#define MT7996_RRO_WINDOW_MAX_LEN 1024
+#define MT7996_RRO_ADDR_ELEM_LEN 128
+#define MT7996_RRO_BA_BITMAP_LEN 2
+#define MT7996_RRO_BA_BITMAP_CR_SIZE ((MT7996_RRO_MAX_SESSION * 128) / \
+ MT7996_RRO_BA_BITMAP_LEN)
+#define MT7996_RRO_BA_BITMAP_SESSION_SIZE (MT7996_RRO_MAX_SESSION / \
+ MT7996_RRO_ADDR_ELEM_LEN)
+#define MT7996_RRO_WINDOW_MAX_SIZE (MT7996_RRO_WINDOW_MAX_LEN * \
+ MT7996_RRO_BA_BITMAP_SESSION_SIZE)
+
+#define MT7996_RX_BUF_SIZE (1800 + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MT7996_RX_MSDU_PAGE_SIZE (128 + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
struct mt7996_vif;
struct mt7996_sta;
struct mt7996_dfs_pulse;
@@ -74,11 +107,21 @@ enum mt7996_rxq_id {
MT7996_RXQ_MCU_WM = 0,
MT7996_RXQ_MCU_WA,
MT7996_RXQ_MCU_WA_MAIN = 2,
- MT7996_RXQ_MCU_WA_EXT = 2,/* unused */
+ MT7996_RXQ_MCU_WA_EXT = 3, /* for mt7992 */
MT7996_RXQ_MCU_WA_TRI = 3,
MT7996_RXQ_BAND0 = 4,
- MT7996_RXQ_BAND1 = 4,/* unused */
+ MT7996_RXQ_BAND1 = 5, /* for mt7992 */
MT7996_RXQ_BAND2 = 5,
+ MT7996_RXQ_RRO_BAND0 = 8,
+ MT7996_RXQ_RRO_BAND1 = 8,/* unused */
+ MT7996_RXQ_RRO_BAND2 = 6,
+ MT7996_RXQ_MSDU_PG_BAND0 = 10,
+ MT7996_RXQ_MSDU_PG_BAND1 = 11,
+ MT7996_RXQ_MSDU_PG_BAND2 = 12,
+ MT7996_RXQ_TXFREE0 = 9,
+ MT7996_RXQ_TXFREE1 = 9,
+ MT7996_RXQ_TXFREE2 = 7,
+ MT7996_RXQ_RRO_IND = 0,
};
struct mt7996_twt_flow {
@@ -147,6 +190,20 @@ struct mt7996_hif {
int irq;
};
+struct mt7996_wed_rro_addr {
+ u32 head_low;
+ u32 head_high : 4;
+ u32 count: 11;
+ u32 oor: 1;
+ u32 rsv : 8;
+ u32 signature : 8;
+};
+
+struct mt7996_wed_rro_session_id {
+ struct list_head list;
+ u16 id;
+};
+
struct mt7996_phy {
struct mt76_phy *mt76;
struct mt7996_dev *dev;
@@ -155,6 +212,11 @@ struct mt7996_phy {
struct ieee80211_vif *monitor_vif;
+ struct thermal_cooling_device *cdev;
+ u8 cdev_state;
+ u8 throttle_state;
+ u32 throttle_temp[2]; /* 0: critical high, 1: maximum */
+
u32 rxfilter;
u64 omac_mask;
@@ -165,11 +227,15 @@ struct mt7996_phy {
u8 rdd_state;
+ u16 beacon_rate;
+
u32 rx_ampdu_ts;
u32 ampdu_ref;
struct mt76_mib_stats mib;
struct mt76_channel_state state_ts;
+
+ bool has_aux_rx;
};
struct mt7996_dev {
@@ -222,10 +288,28 @@ struct mt7996_dev {
u32 hw_pattern;
- bool dbdc_support:1;
- bool tbtc_support:1;
bool flash_mode:1;
bool has_eht:1;
+ bool has_rro:1;
+
+ struct {
+ struct {
+ void *ptr;
+ dma_addr_t phy_addr;
+ } ba_bitmap[MT7996_RRO_BA_BITMAP_LEN];
+ struct {
+ void *ptr;
+ dma_addr_t phy_addr;
+ } addr_elem[MT7996_RRO_ADDR_ELEM_LEN];
+ struct {
+ void *ptr;
+ dma_addr_t phy_addr;
+ } session;
+
+ struct work_struct work;
+ struct list_head poll_list;
+ spinlock_t lock;
+ } wed_rro;
bool ibf;
u8 fw_debug_wm;
@@ -241,8 +325,7 @@ struct mt7996_dev {
u8 n_agrt;
} twt;
- u32 reg_l1_backup;
- u32 reg_l2_backup;
+ spinlock_t reg_lock;
u8 wtbl_size_group;
};
@@ -315,6 +398,20 @@ mt7996_phy3(struct mt7996_dev *dev)
return __mt7996_phy(dev, MT_BAND2);
}
+static inline bool
+mt7996_band_valid(struct mt7996_dev *dev, u8 band)
+{
+ if (is_mt7992(&dev->mt76))
+ return band <= MT_BAND1;
+
+ /* tri-band support */
+ if (band <= MT_BAND2 &&
+ mt76_get_field(dev, MT_PAD_GPIO, MT_PAD_GPIO_ADIE_COMB) <= 1)
+ return true;
+
+ return band == MT_BAND0 || band == MT_BAND2;
+}
+
extern const struct ieee80211_ops mt7996_ops;
extern struct pci_driver mt7996_pci_driver;
extern struct pci_driver mt7996_hif_driver;
@@ -335,9 +432,10 @@ int mt7996_dma_init(struct mt7996_dev *dev);
void mt7996_dma_reset(struct mt7996_dev *dev, bool force);
void mt7996_dma_prefetch(struct mt7996_dev *dev);
void mt7996_dma_cleanup(struct mt7996_dev *dev);
-void mt7996_dma_start(struct mt7996_dev *dev, bool reset);
-void mt7996_init_txpower(struct mt7996_dev *dev,
- struct ieee80211_supported_band *sband);
+void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset);
+int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx,
+ int n_desc, int ring_base, struct mtk_wed_device *wed);
+void mt7996_init_txpower(struct mt7996_phy *phy);
int mt7996_txbf_init(struct mt7996_dev *dev);
void mt7996_reset(struct mt7996_dev *dev);
int mt7996_run(struct ieee80211_hw *hw);
@@ -374,6 +472,8 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
void *data, u16 version);
+int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *data, u32 field);
int mt7996_mcu_set_eeprom(struct mt7996_dev *dev);
int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset);
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num);
@@ -389,13 +489,19 @@ int mt7996_mcu_set_radio_en(struct mt7996_phy *phy, bool enable);
int mt7996_mcu_set_rts_thresh(struct mt7996_phy *phy, u32 val);
int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif);
int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch);
+int mt7996_mcu_get_temperature(struct mt7996_phy *phy);
+int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state);
+int mt7996_mcu_set_thermal_protect(struct mt7996_phy *phy, bool enable);
+int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy);
int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
int mt7996_mcu_rdd_background_enable(struct mt7996_phy *phy,
struct cfg80211_chan_def *chandef);
+int mt7996_mcu_set_fixed_rate_table(struct mt7996_phy *phy, u8 table_idx,
+ u16 rate_idx, bool beacon);
int mt7996_mcu_rf_regval(struct mt7996_dev *dev, u32 regidx, u32 *val, bool set);
int mt7996_mcu_set_hdr_trans(struct mt7996_dev *dev, bool hdr_trans);
-int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u8 val);
+int mt7996_mcu_set_rro(struct mt7996_dev *dev, u16 tag, u16 val);
int mt7996_mcu_wa_cmd(struct mt7996_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
int mt7996_mcu_fw_log_2_host(struct mt7996_dev *dev, u8 type, u8 ctrl);
int mt7996_mcu_fw_dbg_ctrl(struct mt7996_dev *dev, u32 module, u8 level);
@@ -403,15 +509,18 @@ int mt7996_mcu_trigger_assert(struct mt7996_dev *dev);
void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb);
void mt7996_mcu_exit(struct mt7996_dev *dev);
int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag);
+int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id);
static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev)
{
- return MT7996_MAX_INTERFACES * (1 + dev->dbdc_support + dev->tbtc_support);
+ return min(MT7996_MAX_INTERFACES * (1 + mt7996_band_valid(dev, MT_BAND1) +
+ mt7996_band_valid(dev, MT_BAND2)),
+ MT7996_WTBL_BMC_SIZE);
}
static inline u16 mt7996_wtbl_size(struct mt7996_dev *dev)
{
- return (dev->wtbl_size_group << 8) + 64;
+ return (dev->wtbl_size_group << 8) + MT7996_WTBL_BMC_SIZE;
}
void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
@@ -438,6 +547,18 @@ static inline void mt7996_irq_disable(struct mt7996_dev *dev, u32 mask)
void mt7996_memcpy_fromio(struct mt7996_dev *dev, void *buf, u32 offset,
size_t len);
+static inline u16 mt7996_rx_chainmask(struct mt7996_phy *phy)
+{
+ int max_nss = hweight8(phy->mt76->hw->wiphy->available_antennas_tx);
+ int cur_nss = hweight8(phy->mt76->antenna_mask);
+ u16 tx_chainmask = phy->mt76->chainmask;
+
+ if (cur_nss != max_nss)
+ return tx_chainmask;
+
+ return tx_chainmask | (BIT(fls(tx_chainmask)) * phy->has_aux_rx);
+}
+
void mt7996_mac_init(struct mt7996_dev *dev);
u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw);
bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask);
@@ -446,8 +567,6 @@ void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy);
void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band);
void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
struct ieee80211_vif *vif, bool enable);
-void mt7996_mac_set_fixed_rate_table(struct mt7996_dev *dev,
- u8 tbl_idx, u16 rate_idx);
void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
@@ -497,5 +616,16 @@ int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
#endif
+int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
+ bool hif2, int *irq);
+u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
+
+#ifdef CONFIG_MTK_DEBUG
+int mt7996_mtk_init_debugfs(struct mt7996_phy *phy, struct dentry *dir);
+#endif
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+int mt7996_dma_rro_init(struct mt7996_dev *dev);
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
index 67c015896..040561813 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
@@ -96,10 +96,10 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pci_dev *hif2_dev;
+ struct mt7996_hif *hif2;
struct mt7996_dev *dev;
+ int irq, hif2_irq, ret;
struct mt76_dev *mdev;
- struct mt7996_hif *hif2;
- int irq, ret;
ret = pcim_enable_device(pdev);
if (ret)
@@ -111,7 +111,11 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
+ if (ret)
+ return ret;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -129,15 +133,22 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
mt7996_wfsys_reset(dev);
hif2 = mt7996_pci_init_hif2(pdev);
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
if (ret < 0)
- goto free_device;
+ goto free_wed_or_irq_vector;
+
+ if (!ret) {
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ goto free_device;
+
+ irq = pdev->irq;
+ }
- irq = pdev->irq;
ret = devm_request_irq(mdev->dev, irq, mt7996_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
- goto free_irq_vector;
+ goto free_wed_or_irq_vector;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
/* master switch of PCIe tnterrupt enable */
@@ -147,16 +158,25 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
dev->hif2 = hif2;
- ret = pci_alloc_irq_vectors(hif2_dev, 1, 1, PCI_IRQ_ALL_TYPES);
+ ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &hif2_irq);
if (ret < 0)
- goto free_hif2;
+ goto free_hif2_wed_irq_vector;
+
+ if (!ret) {
+ ret = pci_alloc_irq_vectors(hif2_dev, 1, 1,
+ PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ goto free_hif2;
- dev->hif2->irq = hif2_dev->irq;
- ret = devm_request_irq(mdev->dev, dev->hif2->irq,
- mt7996_irq_handler, IRQF_SHARED,
- KBUILD_MODNAME "-hif", dev);
+ dev->hif2->irq = hif2_dev->irq;
+ hif2_irq = dev->hif2->irq;
+ }
+
+ ret = devm_request_irq(mdev->dev, hif2_irq, mt7996_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME "-hif",
+ dev);
if (ret)
- goto free_hif2_irq_vector;
+ goto free_hif2_wed_irq_vector;
mt76_wr(dev, MT_INT1_MASK_CSR, 0);
/* master switch of PCIe tnterrupt enable */
@@ -171,16 +191,23 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
free_hif2_irq:
if (dev->hif2)
- devm_free_irq(mdev->dev, dev->hif2->irq, dev);
-free_hif2_irq_vector:
- if (dev->hif2)
- pci_free_irq_vectors(hif2_dev);
+ devm_free_irq(mdev->dev, hif2_irq, dev);
+free_hif2_wed_irq_vector:
+ if (dev->hif2) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
+ mtk_wed_device_detach(&dev->mt76.mmio.wed_hif2);
+ else
+ pci_free_irq_vectors(hif2_dev);
+ }
free_hif2:
if (dev->hif2)
put_device(dev->hif2->dev);
devm_free_irq(mdev->dev, irq, dev);
-free_irq_vector:
- pci_free_irq_vectors(pdev);
+free_wed_or_irq_vector:
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mtk_wed_device_detach(&dev->mt76.mmio.wed);
+ else
+ pci_free_irq_vectors(pdev);
free_device:
mt76_free_device(&dev->mt76);
@@ -225,3 +252,7 @@ MODULE_FIRMWARE(MT7996_FIRMWARE_WA);
MODULE_FIRMWARE(MT7996_FIRMWARE_WM);
MODULE_FIRMWARE(MT7996_FIRMWARE_DSP);
MODULE_FIRMWARE(MT7996_ROM_PATCH);
+MODULE_FIRMWARE(MT7992_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7992_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7992_FIRMWARE_DSP);
+MODULE_FIRMWARE(MT7992_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index 0086a7866..47b429d8b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -19,6 +19,7 @@ struct __base {
/* used to differentiate between generations */
struct mt7996_reg_desc {
const struct __base *base;
+ const u32 *offs_rev;
const struct __map *map;
u32 map_size;
};
@@ -39,6 +40,73 @@ enum base_rev {
#define __BASE(_id, _band) (dev->reg.base[(_id)].band_base[(_band)])
+enum offs_rev {
+ MIB_RVSR0,
+ MIB_RVSR1,
+ MIB_BTSCR5,
+ MIB_BTSCR6,
+ MIB_RSCR1,
+ MIB_RSCR27,
+ MIB_RSCR28,
+ MIB_RSCR29,
+ MIB_RSCR30,
+ MIB_RSCR31,
+ MIB_RSCR33,
+ MIB_RSCR35,
+ MIB_RSCR36,
+ MIB_BSCR0,
+ MIB_BSCR1,
+ MIB_BSCR2,
+ MIB_BSCR3,
+ MIB_BSCR4,
+ MIB_BSCR5,
+ MIB_BSCR6,
+ MIB_BSCR7,
+ MIB_BSCR17,
+ MIB_TRDR1,
+ __MT_OFFS_MAX,
+};
+
+#define __OFFS(id) (dev->reg.offs_rev[(id)])
+
+/* RRO TOP */
+#define MT_RRO_TOP_BASE 0xA000
+#define MT_RRO_TOP(ofs) (MT_RRO_TOP_BASE + (ofs))
+
+#define MT_RRO_BA_BITMAP_BASE0 MT_RRO_TOP(0x8)
+#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC)
+#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8)
+#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12)
+#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34)
+#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31)
+
+#define MT_RRO_IND_CMD_SIGNATURE_BASE0 MT_RRO_TOP(0x38)
+#define MT_RRO_IND_CMD_SIGNATURE_BASE1 MT_RRO_TOP(0x3C)
+#define MT_RRO_IND_CMD_0_CTRL0 MT_RRO_TOP(0x40)
+#define MT_RRO_IND_CMD_SIGNATURE_BASE1_EN BIT(31)
+
+#define MT_RRO_PARTICULAR_CFG0 MT_RRO_TOP(0x5C)
+#define MT_RRO_PARTICULAR_CFG1 MT_RRO_TOP(0x60)
+#define MT_RRO_PARTICULAR_CONFG_EN BIT(31)
+#define MT_RRO_PARTICULAR_SID GENMASK(30, 16)
+
+#define MT_RRO_BA_BITMAP_BASE_EXT0 MT_RRO_TOP(0x70)
+#define MT_RRO_BA_BITMAP_BASE_EXT1 MT_RRO_TOP(0x74)
+#define MT_RRO_HOST_INT_ENA MT_RRO_TOP(0x204)
+#define MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA BIT(0)
+
+#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400)
+
+#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50)
+#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16)
+#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0)
+
+#define MT_RRO_DBG_RD_CTRL MT_RRO_TOP(0xe0)
+#define MT_RRO_DBG_RD_ADDR GENMASK(15, 0)
+#define MT_RRO_DBG_RD_EXEC BIT(31)
+
+#define MT_RRO_DBG_RDAT_DW(_n) MT_RRO_TOP(0xf0 + (_n) * 0x4)
+
#define MT_MCU_INT_EVENT 0x2108
#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
@@ -140,32 +208,32 @@ enum base_rev {
#define MT_WF_MIB_BASE(_band) __BASE(WF_MIB_BASE, (_band))
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
-#define MT_MIB_BSCR0(_band) MT_WF_MIB(_band, 0x9cc)
-#define MT_MIB_BSCR1(_band) MT_WF_MIB(_band, 0x9d0)
-#define MT_MIB_BSCR2(_band) MT_WF_MIB(_band, 0x9d4)
-#define MT_MIB_BSCR3(_band) MT_WF_MIB(_band, 0x9d8)
-#define MT_MIB_BSCR4(_band) MT_WF_MIB(_band, 0x9dc)
-#define MT_MIB_BSCR5(_band) MT_WF_MIB(_band, 0x9e0)
-#define MT_MIB_BSCR6(_band) MT_WF_MIB(_band, 0x9e4)
-#define MT_MIB_BSCR7(_band) MT_WF_MIB(_band, 0x9e8)
-#define MT_MIB_BSCR17(_band) MT_WF_MIB(_band, 0xa10)
+#define MT_MIB_BSCR0(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR0))
+#define MT_MIB_BSCR1(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR1))
+#define MT_MIB_BSCR2(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR2))
+#define MT_MIB_BSCR3(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR3))
+#define MT_MIB_BSCR4(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR4))
+#define MT_MIB_BSCR5(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR5))
+#define MT_MIB_BSCR6(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR6))
+#define MT_MIB_BSCR7(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR7))
+#define MT_MIB_BSCR17(_band) MT_WF_MIB(_band, __OFFS(MIB_BSCR17))
#define MT_MIB_TSCR5(_band) MT_WF_MIB(_band, 0x6c4)
#define MT_MIB_TSCR6(_band) MT_WF_MIB(_band, 0x6c8)
#define MT_MIB_TSCR7(_band) MT_WF_MIB(_band, 0x6d0)
-#define MT_MIB_RSCR1(_band) MT_WF_MIB(_band, 0x7ac)
+#define MT_MIB_RSCR1(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR1))
/* rx mpdu counter, full 32 bits */
-#define MT_MIB_RSCR31(_band) MT_WF_MIB(_band, 0x964)
-#define MT_MIB_RSCR33(_band) MT_WF_MIB(_band, 0x96c)
+#define MT_MIB_RSCR31(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR31))
+#define MT_MIB_RSCR33(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR33))
#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_RVSR0(_band) MT_WF_MIB(_band, 0x720)
+#define MT_MIB_RVSR0(_band) MT_WF_MIB(_band, __OFFS(MIB_RVSR0))
-#define MT_MIB_RSCR35(_band) MT_WF_MIB(_band, 0x974)
-#define MT_MIB_RSCR36(_band) MT_WF_MIB(_band, 0x978)
+#define MT_MIB_RSCR35(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR35))
+#define MT_MIB_RSCR36(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR36))
/* tx ampdu cnt, full 32 bits */
#define MT_MIB_TSCR0(_band) MT_WF_MIB(_band, 0x6b0)
@@ -178,16 +246,16 @@ enum base_rev {
#define MT_MIB_TSCR4(_band) MT_WF_MIB(_band, 0x6c0)
/* rx ampdu count, 32-bit */
-#define MT_MIB_RSCR27(_band) MT_WF_MIB(_band, 0x954)
+#define MT_MIB_RSCR27(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR27))
/* rx ampdu bytes count, 32-bit */
-#define MT_MIB_RSCR28(_band) MT_WF_MIB(_band, 0x958)
+#define MT_MIB_RSCR28(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR28))
/* rx ampdu valid subframe count */
-#define MT_MIB_RSCR29(_band) MT_WF_MIB(_band, 0x95c)
+#define MT_MIB_RSCR29(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR29))
/* rx ampdu valid subframe bytes count, 32bits */
-#define MT_MIB_RSCR30(_band) MT_WF_MIB(_band, 0x960)
+#define MT_MIB_RSCR30(_band) MT_WF_MIB(_band, __OFFS(MIB_RSCR30))
/* remaining windows protected stats */
#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x080)
@@ -196,18 +264,18 @@ enum base_rev {
#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x084)
#define MT_MIB_SDR28_TX_RWP_NEED_CNT GENMASK(15, 0)
-#define MT_MIB_RVSR1(_band) MT_WF_MIB(_band, 0x724)
+#define MT_MIB_RVSR1(_band) MT_WF_MIB(_band, __OFFS(MIB_RVSR1))
/* rx blockack count, 32 bits */
#define MT_MIB_TSCR1(_band) MT_WF_MIB(_band, 0x6b4)
#define MT_MIB_BTSCR0(_band) MT_WF_MIB(_band, 0x5e0)
-#define MT_MIB_BTSCR5(_band) MT_WF_MIB(_band, 0x788)
-#define MT_MIB_BTSCR6(_band) MT_WF_MIB(_band, 0x798)
+#define MT_MIB_BTSCR5(_band) MT_WF_MIB(_band, __OFFS(MIB_BTSCR5))
+#define MT_MIB_BTSCR6(_band) MT_WF_MIB(_band, __OFFS(MIB_BTSCR6))
#define MT_MIB_BFTFCR(_band) MT_WF_MIB(_band, 0x5d0)
-#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0xa28 + ((n) << 2))
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, __OFFS(MIB_TRDR1) + ((n) << 2))
#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 4)) & GENMASK(9, 0))
@@ -330,15 +398,22 @@ enum base_rev {
#define MT_WFDMA0_RX_INT_PCIE_SEL MT_WFDMA0(0x154)
#define MT_WFDMA0_RX_INT_SEL_RING3 BIT(3)
+#define MT_WFDMA0_RX_INT_SEL_RING6 BIT(6)
#define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
-#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
-#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
+#define MT_WFDMA0_GLO_CFG_EXT_EN BIT(26)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
+
+#define MT_WFDMA0_PAUSE_RX_Q_45_TH MT_WFDMA0(0x268)
+#define MT_WFDMA0_PAUSE_RX_Q_67_TH MT_WFDMA0(0x26c)
+#define MT_WFDMA0_PAUSE_RX_Q_89_TH MT_WFDMA0(0x270)
+#define MT_WFDMA0_PAUSE_RX_Q_RRO_TH MT_WFDMA0(0x27c)
#define WF_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
#define WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD BIT(18)
@@ -362,10 +437,14 @@ enum base_rev {
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+#define MT_WFDMA_HOST_CONFIG_BAND2_PCIE1 BIT(22)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
+#define MT_WFDMA_AXI_R2A_CTRL MT_WFDMA_EXT_CSR(0x500)
+#define MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK GENMASK(4, 0)
+
#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
@@ -374,6 +453,9 @@ enum base_rev {
#define MT_WFDMA0_PCIE1_BASE 0xd8000
#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_INT_PCIE1_SOURCE_CSR_EXT MT_WFDMA0_PCIE1(0x118)
+#define MT_INT_PCIE1_MASK_CSR MT_WFDMA0_PCIE1(0x11c)
+
#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
@@ -394,6 +476,7 @@ enum base_rev {
#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
+#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40)
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
MT_MCUQ_ID(q) * 0x4)
@@ -409,17 +492,27 @@ enum base_rev {
#define MT_INT1_MASK_CSR MT_WFDMA0_PCIE1(0x204)
#define MT_INT_RX_DONE_BAND0 BIT(12)
-#define MT_INT_RX_DONE_BAND1 BIT(12)
+#define MT_INT_RX_DONE_BAND1 BIT(13) /* for mt7992 */
#define MT_INT_RX_DONE_BAND2 BIT(13)
#define MT_INT_RX_DONE_WM BIT(0)
#define MT_INT_RX_DONE_WA BIT(1)
#define MT_INT_RX_DONE_WA_MAIN BIT(2)
-#define MT_INT_RX_DONE_WA_EXT BIT(2)
+#define MT_INT_RX_DONE_WA_EXT BIT(3) /* for mt7992 */
#define MT_INT_RX_DONE_WA_TRI BIT(3)
#define MT_INT_RX_TXFREE_MAIN BIT(17)
#define MT_INT_RX_TXFREE_TRI BIT(15)
+#define MT_INT_RX_DONE_BAND2_EXT BIT(23)
+#define MT_INT_RX_TXFREE_EXT BIT(26)
#define MT_INT_MCU_CMD BIT(29)
+#define MT_INT_RX_DONE_RRO_BAND0 BIT(16)
+#define MT_INT_RX_DONE_RRO_BAND1 BIT(16)
+#define MT_INT_RX_DONE_RRO_BAND2 BIT(14)
+#define MT_INT_RX_DONE_RRO_IND BIT(11)
+#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18)
+#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19)
+#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23)
+
#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
@@ -427,20 +520,31 @@ enum base_rev {
MT_INT_RX(MT_RXQ_MCU_WA))
#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
- MT_INT_RX(MT_RXQ_MAIN_WA))
+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \
MT_INT_RX(MT_RXQ_BAND1_WA) | \
- MT_INT_RX(MT_RXQ_MAIN_WA))
+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
#define MT_INT_BAND2_RX_DONE (MT_INT_RX(MT_RXQ_BAND2) | \
MT_INT_RX(MT_RXQ_BAND2_WA) | \
- MT_INT_RX(MT_RXQ_MAIN_WA))
+ MT_INT_RX(MT_RXQ_MAIN_WA) | \
+ MT_INT_RX(MT_RXQ_TXFREE_BAND0))
+
+#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \
+ MT_INT_RX(MT_RXQ_RRO_BAND1) | \
+ MT_INT_RX(MT_RXQ_RRO_BAND2) | \
+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \
+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \
+ MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
MT_INT_BAND0_RX_DONE | \
MT_INT_BAND1_RX_DONE | \
- MT_INT_BAND2_RX_DONE)
+ MT_INT_BAND2_RX_DONE | \
+ MT_INT_RRO_RX_DONE)
#define MT_INT_TX_DONE_FWDL BIT(26)
#define MT_INT_TX_DONE_MCU_WM BIT(27)
@@ -449,6 +553,10 @@ enum base_rev {
#define MT_INT_TX_DONE_BAND1 BIT(31)
#define MT_INT_TX_DONE_BAND2 BIT(15)
+#define MT_INT_TX_RX_DONE_EXT (MT_INT_TX_DONE_BAND2 | \
+ MT_INT_RX_DONE_BAND2_EXT | \
+ MT_INT_RX_TXFREE_EXT)
+
#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
MT_INT_TX_MCU(MT_MCUQ_WM) | \
MT_INT_TX_MCU(MT_MCUQ_FWDL))
@@ -552,7 +660,12 @@ enum base_rev {
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+#define MT_PAD_GPIO 0x700056f0
+#define MT_PAD_GPIO_ADIE_COMB GENMASK(16, 15)
+
#define MT_HW_REV 0x70010204
+#define MT_HW_REV1 0x8a00
+
#define MT_WF_SUBSYS_RST 0x70028600
/* PCIE MAC */
@@ -601,4 +714,11 @@ enum base_rev {
#define MT_MCU_WM_EXCP_LR_CTRL MT_MCU_WM_EXCP(0x200)
#define MT_MCU_WM_EXCP_LR_LOG MT_MCU_WM_EXCP(0x204)
+/* CONN AFE CTL CON */
+#define MT_AFE_CTL_BASE 0x18043000
+#define MT_AFE_CTL_BAND(_band, ofs) (MT_AFE_CTL_BASE + \
+ ((_band) * 0x1000) + (ofs))
+#define MT_AFE_CTL_BAND_PLL_03(_band) MT_AFE_CTL_BAND(_band, 0x2c)
+#define MT_AFE_CTL_BAND_PLL_03_MSB_EN BIT(1)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index c52d550f0..3e88798df 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -672,4 +672,5 @@ EXPORT_SYMBOL_GPL(mt76s_init);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 1584665fe..5a0bcb507 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1128,4 +1128,5 @@ int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index fc76c66ff..d6c01a2dd 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -138,4 +138,5 @@ int __mt76_worker_fn(void *ptr)
}
EXPORT_SYMBOL_GPL(__mt76_worker_fn);
+MODULE_DESCRIPTION("MediaTek MT76x helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index e9a047a8c..f03fd15c0 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -105,10 +105,9 @@ struct wilc_ch_list_elem {
} __packed;
static void cfg_scan_result(enum scan_event scan_event,
- struct wilc_rcvd_net_info *info, void *user_void)
+ struct wilc_rcvd_net_info *info,
+ struct wilc_priv *priv)
{
- struct wilc_priv *priv = user_void;
-
if (!priv->cfg_scanning)
return;
@@ -162,9 +161,8 @@ static void cfg_scan_result(enum scan_event scan_event,
}
static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
- void *priv_data)
+ struct wilc_priv *priv)
{
- struct wilc_priv *priv = priv_data;
struct net_device *dev = priv->dev;
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wl = vif->wilc;
@@ -286,9 +284,8 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
else
scan_type = WILC_FW_PASSIVE_SCAN;
- ret = wilc_scan(vif, WILC_FW_USER_SCAN, scan_type, scan_ch_list,
- request->n_channels, cfg_scan_result, (void *)priv,
- request);
+ ret = wilc_scan(vif, WILC_FW_USER_SCAN, scan_type,
+ scan_ch_list, cfg_scan_result, request);
if (ret) {
priv->scan_req = NULL;
@@ -412,9 +409,8 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
wfi_drv->conn_info.security = security;
wfi_drv->conn_info.auth_type = auth_type;
- wfi_drv->conn_info.ch = ch;
wfi_drv->conn_info.conn_result = cfg_connect_result;
- wfi_drv->conn_info.arg = priv;
+ wfi_drv->conn_info.priv = priv;
wfi_drv->conn_info.param = join_params;
if (sme->mfp == NL80211_MFP_OPTIONAL)
@@ -1094,9 +1090,8 @@ static void wilc_wfi_mgmt_tx_complete(void *priv, int status)
kfree(pv_data);
}
-static void wilc_wfi_remain_on_channel_expired(void *data, u64 cookie)
+static void wilc_wfi_remain_on_channel_expired(struct wilc_vif *vif, u64 cookie)
{
- struct wilc_vif *vif = data;
struct wilc_priv *priv = &vif->priv;
struct wilc_wfi_p2p_listen_params *params = &priv->remain_on_ch_params;
@@ -1128,9 +1123,8 @@ static int remain_on_channel(struct wiphy *wiphy,
if (id == 0)
id = ++priv->inc_roc_cookie;
- ret = wilc_remain_on_channel(vif, id, duration, chan->hw_value,
- wilc_wfi_remain_on_channel_expired,
- (void *)vif);
+ ret = wilc_remain_on_channel(vif, id, chan->hw_value,
+ wilc_wfi_remain_on_channel_expired);
if (ret)
return ret;
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index e202013e6..d2b8c2630 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -144,18 +144,19 @@ static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
scan_req = &hif_drv->usr_scan_req;
if (scan_req->scan_result) {
- scan_req->scan_result(evt, NULL, scan_req->arg);
+ scan_req->scan_result(evt, NULL, scan_req->priv);
scan_req->scan_result = NULL;
}
return result;
}
-int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
- u8 *ch_freq_list, u8 ch_list_len,
+int wilc_scan(struct wilc_vif *vif, u8 scan_source,
+ u8 scan_type, u8 *ch_freq_list,
void (*scan_result_fn)(enum scan_event,
- struct wilc_rcvd_net_info *, void *),
- void *user_arg, struct cfg80211_scan_request *request)
+ struct wilc_rcvd_net_info *,
+ struct wilc_priv *),
+ struct cfg80211_scan_request *request)
{
int result = 0;
struct wid wid_list[WILC_SCAN_WID_LIST_SIZE];
@@ -164,6 +165,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
u8 *buffer;
u8 valuesize = 0;
u8 *search_ssid_vals = NULL;
+ const u8 ch_list_len = request->n_channels;
struct host_if_drv *hif_drv = vif->hif_drv;
if (hif_drv->hif_state >= HOST_IF_SCANNING &&
@@ -249,7 +251,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
index++;
hif_drv->usr_scan_req.scan_result = scan_result_fn;
- hif_drv->usr_scan_req.arg = user_arg;
+ hif_drv->usr_scan_req.priv = &vif->priv;
result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, index);
if (result) {
@@ -348,7 +350,7 @@ static void handle_connect_timeout(struct work_struct *work)
if (hif_drv->conn_info.conn_result) {
hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_CONN_RESP,
WILC_MAC_STATUS_DISCONNECTED,
- hif_drv->conn_info.arg);
+ hif_drv->conn_info.priv);
} else {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
@@ -371,8 +373,9 @@ out:
kfree(msg);
}
-void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
- struct cfg80211_crypto_settings *crypto)
+struct wilc_join_bss_param *
+wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ struct cfg80211_crypto_settings *crypto)
{
const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
@@ -557,7 +560,7 @@ static void handle_rcvd_ntwrk_info(struct work_struct *work)
if (scan_req->scan_result)
scan_req->scan_result(SCAN_EVENT_NETWORK_FOUND, rcvd_info,
- scan_req->arg);
+ scan_req->priv);
done:
kfree(rcvd_info->mgmt);
@@ -639,7 +642,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
del_timer(&hif_drv->connect_timer);
conn_info->conn_result(CONN_DISCONN_EVENT_CONN_RESP, mac_status,
- hif_drv->conn_info.arg);
+ hif_drv->conn_info.priv);
if (mac_status == WILC_MAC_STATUS_CONNECTED &&
conn_info->status == WLAN_STATUS_SUCCESS) {
@@ -669,7 +672,7 @@ void wilc_handle_disconnect(struct wilc_vif *vif)
if (hif_drv->conn_info.conn_result)
hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
- 0, hif_drv->conn_info.arg);
+ 0, hif_drv->conn_info.priv);
eth_zero_addr(hif_drv->assoc_bssid);
@@ -741,7 +744,7 @@ int wilc_disconnect(struct wilc_vif *vif)
if (scan_req->scan_result) {
del_timer(&hif_drv->scan_timer);
- scan_req->scan_result(SCAN_EVENT_ABORTED, NULL, scan_req->arg);
+ scan_req->scan_result(SCAN_EVENT_ABORTED, NULL, scan_req->priv);
scan_req->scan_result = NULL;
}
@@ -751,7 +754,7 @@ int wilc_disconnect(struct wilc_vif *vif)
del_timer(&hif_drv->connect_timer);
conn_info->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, 0,
- conn_info->arg);
+ conn_info->priv);
} else {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
}
@@ -890,7 +893,7 @@ static int handle_remain_on_chan(struct wilc_vif *vif,
if (result)
return -EBUSY;
- hif_drv->remain_on_ch.arg = hif_remain_ch->arg;
+ hif_drv->remain_on_ch.vif = hif_remain_ch->vif;
hif_drv->remain_on_ch.expired = hif_remain_ch->expired;
hif_drv->remain_on_ch.ch = hif_remain_ch->ch;
hif_drv->remain_on_ch.cookie = hif_remain_ch->cookie;
@@ -927,7 +930,7 @@ static int wilc_handle_roc_expired(struct wilc_vif *vif, u64 cookie)
}
if (hif_drv->remain_on_ch.expired) {
- hif_drv->remain_on_ch.expired(hif_drv->remain_on_ch.arg,
+ hif_drv->remain_on_ch.expired(hif_drv->remain_on_ch.vif,
cookie);
}
} else {
@@ -1550,7 +1553,7 @@ int wilc_deinit(struct wilc_vif *vif)
if (hif_drv->usr_scan_req.scan_result) {
hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL,
- hif_drv->usr_scan_req.arg);
+ hif_drv->usr_scan_req.priv);
hif_drv->usr_scan_req.scan_result = NULL;
}
@@ -1681,18 +1684,15 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
}
}
-int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie,
- u32 duration, u16 chan,
- void (*expired)(void *, u64),
- void *user_arg)
+int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
+ void (*expired)(struct wilc_vif *, u64))
{
struct wilc_remain_ch roc;
int result;
roc.ch = chan;
roc.expired = expired;
- roc.arg = user_arg;
- roc.duration = duration;
+ roc.vif = vif;
roc.cookie = cookie;
result = handle_remain_on_chan(vif, &roc);
if (result)
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 8e386db72..0d380586b 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -95,34 +95,37 @@ struct wilc_rcvd_net_info {
struct ieee80211_mgmt *mgmt;
};
+struct wilc_priv;
struct wilc_user_scan_req {
void (*scan_result)(enum scan_event evt,
- struct wilc_rcvd_net_info *info, void *priv);
- void *arg;
+ struct wilc_rcvd_net_info *info,
+ struct wilc_priv *priv);
+ struct wilc_priv *priv;
u32 ch_cnt;
};
+struct wilc_join_bss_param;
struct wilc_conn_info {
u8 bssid[ETH_ALEN];
u8 security;
enum authtype auth_type;
enum mfptype mfp_type;
- u8 ch;
u8 *req_ies;
size_t req_ies_len;
u8 *resp_ies;
u16 resp_ies_len;
u16 status;
- void (*conn_result)(enum conn_event evt, u8 status, void *priv_data);
- void *arg;
- void *param;
+ void (*conn_result)(enum conn_event evt, u8 status,
+ struct wilc_priv *priv);
+ struct wilc_priv *priv;
+ struct wilc_join_bss_param *param;
};
+struct wilc_vif;
struct wilc_remain_ch {
u16 ch;
- u32 duration;
- void (*expired)(void *priv, u64 cookie);
- void *arg;
+ void (*expired)(struct wilc_vif *vif, u64 cookie);
+ struct wilc_vif *vif;
u64 cookie;
};
@@ -150,7 +153,6 @@ struct host_if_drv {
u8 assoc_resp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
};
-struct wilc_vif;
int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
u8 mode, u8 cipher_mode, u8 index);
@@ -171,11 +173,12 @@ int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies,
int wilc_disconnect(struct wilc_vif *vif);
int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel);
int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level);
-int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
- u8 *ch_freq_list, u8 ch_list_len,
+int wilc_scan(struct wilc_vif *vif, u8 scan_source,
+ u8 scan_type, u8 *ch_freq_list,
void (*scan_result_fn)(enum scan_event,
- struct wilc_rcvd_net_info *, void *),
- void *user_arg, struct cfg80211_scan_request *request);
+ struct wilc_rcvd_net_info *,
+ struct wilc_priv *),
+ struct cfg80211_scan_request *request);
int wilc_hif_set_cfg(struct wilc_vif *vif,
struct cfg_param_attr *cfg_param);
int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler);
@@ -192,10 +195,8 @@ int wilc_edit_station(struct wilc_vif *vif, const u8 *mac,
int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout);
int wilc_setup_multicast_filter(struct wilc_vif *vif, u32 enabled, u32 count,
u8 *mc_list);
-int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie,
- u32 duration, u16 chan,
- void (*expired)(void *, u64),
- void *user_arg);
+int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
+ void (*expired)(struct wilc_vif *, u64));
int wilc_listen_state_expired(struct wilc_vif *vif, u64 cookie);
void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg);
int wilc_set_operation_mode(struct wilc_vif *vif, int index, u8 mode,
@@ -210,8 +211,9 @@ int wilc_set_external_auth_param(struct wilc_vif *vif,
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
-void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
- struct cfg80211_crypto_settings *crypto);
+struct wilc_join_bss_param *
+wilc_parse_join_bss_param(struct cfg80211_bss *bss,
+ struct cfg80211_crypto_settings *crypto);
int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index);
void wilc_handle_disconnect(struct wilc_vif *vif);
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 87fce5f41..6068699e4 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -996,5 +996,6 @@ error:
return ERR_PTR(ret);
}
+MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER));
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 87948ba69..d6d394693 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -106,9 +106,10 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
size = cmd->count;
if (cmd->use_global_buf) {
- if (size > sizeof(u32))
- return -EINVAL;
-
+ if (size > sizeof(u32)) {
+ ret = -EINVAL;
+ goto out;
+ }
buf = sdio_priv->cmd53_buf;
}
@@ -123,7 +124,7 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
if (cmd->use_global_buf)
memcpy(cmd->buffer, buf, size);
}
-
+out:
sdio_release_host(func);
if (ret)
@@ -983,4 +984,5 @@ static struct sdio_driver wilc_sdio_driver = {
module_driver(wilc_sdio_driver,
sdio_register_driver,
sdio_unregister_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 4cf8586ed..6a82b6ca2 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -273,6 +273,7 @@ static struct spi_driver wilc_spi_driver = {
.remove = wilc_bus_remove,
};
module_spi_driver(wilc_spi_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SPI wireless driver");
MODULE_LICENSE("GPL");
static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index 48521e455..8930589b4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -3194,4 +3194,8 @@ enum rt2800_eeprom_word {
*/
#define BCN_TBTT_OFFSET 64
+/* Watchdog type mask */
+#define RT2800_WATCHDOG_HANG BIT(0)
+#define RT2800_WATCHDOG_DMA_BUSY BIT(1)
+
#endif /* RT2800_H */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 1926ffdff..aaf31857a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -30,9 +30,10 @@
#include "rt2800lib.h"
#include "rt2800.h"
-static bool modparam_watchdog;
-module_param_named(watchdog, modparam_watchdog, bool, S_IRUGO);
-MODULE_PARM_DESC(watchdog, "Enable watchdog to detect tx/rx hangs and reset hardware if detected");
+static unsigned int modparam_watchdog = RT2800_WATCHDOG_DMA_BUSY;
+module_param_named(watchdog, modparam_watchdog, uint, 0444);
+MODULE_PARM_DESC(watchdog, "Enable watchdog to recover tx/rx hangs.\n"
+ "\t\t(0=disabled, 1=hang watchdog, 2=DMA watchdog(default), 3=both)");
/*
* Register access.
@@ -1261,15 +1262,12 @@ static void rt2800_update_survey(struct rt2x00_dev *rt2x00dev)
chan_survey->time_ext_busy += rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC);
}
-void rt2800_watchdog(struct rt2x00_dev *rt2x00dev)
+static bool rt2800_watchdog_hung(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
bool hung_tx = false;
bool hung_rx = false;
- if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
- return;
-
rt2800_update_survey(rt2x00dev);
queue_for_each(rt2x00dev, queue) {
@@ -1297,18 +1295,72 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev)
}
}
+ if (!hung_tx && !hung_rx)
+ return false;
+
if (hung_tx)
rt2x00_warn(rt2x00dev, "Watchdog TX hung detected\n");
if (hung_rx)
rt2x00_warn(rt2x00dev, "Watchdog RX hung detected\n");
- if (hung_tx || hung_rx) {
- queue_for_each(rt2x00dev, queue)
- queue->wd_count = 0;
+ queue_for_each(rt2x00dev, queue)
+ queue->wd_count = 0;
+
+ return true;
+}
+
+static bool rt2800_watchdog_dma_busy(struct rt2x00_dev *rt2x00dev)
+{
+ bool busy_rx, busy_tx;
+ u32 reg_cfg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG);
+ u32 reg_int = rt2800_register_read(rt2x00dev, INT_SOURCE_CSR);
+
+ if (rt2x00_get_field32(reg_cfg, WPDMA_GLO_CFG_RX_DMA_BUSY) &&
+ rt2x00_get_field32(reg_int, INT_SOURCE_CSR_RX_COHERENT))
+ rt2x00dev->rxdma_busy++;
+ else
+ rt2x00dev->rxdma_busy = 0;
+
+ if (rt2x00_get_field32(reg_cfg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+ rt2x00_get_field32(reg_int, INT_SOURCE_CSR_TX_COHERENT))
+ rt2x00dev->txdma_busy++;
+ else
+ rt2x00dev->txdma_busy = 0;
+
+ busy_rx = rt2x00dev->rxdma_busy > 30;
+ busy_tx = rt2x00dev->txdma_busy > 30;
+
+ if (!busy_rx && !busy_tx)
+ return false;
+
+ if (busy_rx)
+ rt2x00_warn(rt2x00dev, "Watchdog RX DMA busy detected\n");
+
+ if (busy_tx)
+ rt2x00_warn(rt2x00dev, "Watchdog TX DMA busy detected\n");
+
+ rt2x00dev->rxdma_busy = 0;
+ rt2x00dev->txdma_busy = 0;
+
+ return true;
+}
+
+void rt2800_watchdog(struct rt2x00_dev *rt2x00dev)
+{
+ bool reset = false;
+
+ if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
+ return;
+
+ if (rt2x00dev->link.watchdog & RT2800_WATCHDOG_DMA_BUSY)
+ reset = rt2800_watchdog_dma_busy(rt2x00dev);
+ if (rt2x00dev->link.watchdog & RT2800_WATCHDOG_HANG)
+ reset = rt2800_watchdog_hung(rt2x00dev) || reset;
+
+ if (reset)
ieee80211_restart_hw(rt2x00dev->hw);
- }
}
EXPORT_SYMBOL_GPL(rt2800_watchdog);
@@ -6048,7 +6100,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 0);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
reg = rt2800_register_read(rt2x00dev, OFDM_PROT_CFG);
@@ -6061,7 +6113,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 0);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 0);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, 0);
rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
reg = rt2800_register_read(rt2x00dev, MM20_PROT_CFG);
@@ -12013,11 +12065,13 @@ int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags);
}
- if (modparam_watchdog) {
+ rt2x00dev->link.watchdog = modparam_watchdog;
+ /* USB NICs don't support DMA watchdog as INT_SOURCE_CSR is invalid */
+ if (rt2x00_is_usb(rt2x00dev))
+ rt2x00dev->link.watchdog &= ~RT2800_WATCHDOG_DMA_BUSY;
+ if (rt2x00dev->link.watchdog) {
__set_bit(CAPABILITY_RESTART_HW, &rt2x00dev->cap_flags);
rt2x00dev->link.watchdog_interval = msecs_to_jiffies(100);
- } else {
- rt2x00dev->link.watchdog_disabled = true;
}
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index aaaf99331..82af01448 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -334,7 +334,7 @@ struct link {
*/
struct delayed_work watchdog_work;
unsigned int watchdog_interval;
- bool watchdog_disabled;
+ unsigned int watchdog;
/*
* Work structure for scheduling periodic AGC adjustments.
@@ -926,6 +926,9 @@ struct rt2x00_dev {
*/
u16 beacon_int;
+ /* Rx/Tx DMA busy watchdog counter */
+ u16 rxdma_busy, txdma_busy;
+
/**
* Timestamp of last received beacon
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
index 6cf7e7c99..fb23d409f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00link.c
@@ -384,7 +384,7 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
struct link *link = &rt2x00dev->link;
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
- rt2x00dev->ops->lib->watchdog && !link->watchdog_disabled)
+ rt2x00dev->ops->lib->watchdog && link->watchdog)
ieee80211_queue_delayed_work(rt2x00dev->hw,
&link->watchdog_work,
link->watchdog_interval);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 98df0aef8..013003777 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -416,9 +416,6 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
else
__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
- if (tx_info->control.rts_cts_rate_idx >= 0)
- rate =
- ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
}
/*
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 7ce37fb4f..1a8d715b7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1402,10 +1402,6 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
sta_entry =
(struct rtl_sta_info *)sta->drv_priv;
- if (!sta_entry) {
- rcu_read_unlock();
- return true;
- }
capab =
le16_to_cpu(mgmt->u.action.u.addba_req.capab);
tid = (capab &
@@ -1760,8 +1756,6 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -EINVAL;
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if (!sta_entry)
- return -ENXIO;
tid_data = &sta_entry->tids[tid];
rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
@@ -1818,8 +1812,6 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
}
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if (!sta_entry)
- return -ENXIO;
tid_data = &sta_entry->tids[tid];
rtl_dbg(rtlpriv, COMP_RECV, DBG_DMESG,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index b118df035..96ce05bcf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -64,13 +64,12 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
- u8 init_aspm;
+ u16 init_aspm;
ppsc->reg_rfps_level = 0;
ppsc->support_aspm = false;
/*Update PCI ASPM setting */
- ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
switch (rtlpci->const_pci_aspm) {
case 0:
/*No ASPM */
@@ -151,9 +150,10 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
/* toshiba aspm issue, toshiba will set aspm selfly
* so we should not set aspm in driver
*/
- pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
+ pcie_capability_read_word(rtlpci->pdev, PCI_EXP_LNKCTL, &init_aspm);
if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
- init_aspm == 0x43)
+ ((u8)init_aspm) == (PCI_EXP_LNKCTL_ASPM_L0S |
+ PCI_EXP_LNKCTL_ASPM_L1 | PCI_EXP_LNKCTL_CCC))
ppsc->support_aspm = false;
}
@@ -203,7 +203,7 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
/*Retrieve original configuration settings. */
u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
u16 aspmlevel = 0;
- u8 tmp_u1b = 0;
+ u16 tmp_u1b = 0;
if (!ppsc->support_aspm)
return;
@@ -221,10 +221,10 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
}
/*for promising device will in L0 state after an I/O. */
- pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
+ pcie_capability_read_word(rtlpci->pdev, PCI_EXP_LNKCTL, &tmp_u1b);
/*Set corresponding value. */
- aspmlevel |= BIT(0) | BIT(1);
+ aspmlevel |= PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1;
linkctrl_reg &= ~aspmlevel;
_rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
@@ -351,9 +351,8 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
pcipriv->ndis_adapter.linkctrl_reg);
- pci_read_config_byte(pdev, 0x98, &tmp);
- tmp |= BIT(4);
- pci_write_config_byte(pdev, 0x98, tmp);
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
tmp = 0x17;
pci_write_config_byte(pdev, 0x70f, tmp);
@@ -1969,7 +1968,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
*/
if (bridge_pdev) {
/*find bridge info if available */
- pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
pcipriv->ndis_adapter.pcibridge_vendor = tmp;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index d6307197d..e8fa022df 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -44,18 +44,6 @@
#define ATI_DEVICE_ID 0x7914
#define AMD_VENDOR_ID 0x1022
-#define PCI_MAX_BRIDGE_NUMBER 255
-#define PCI_MAX_DEVICES 32
-#define PCI_MAX_FUNCTION 8
-
-#define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */
-#define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */
-
-#define PCI_CLASS_BRIDGE_DEV 0x06
-#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
-#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
-#define PCI_CAP_ID_EXP 0x10
-
#define U1DONTCARE 0xFF
#define U2DONTCARE 0xFFFF
#define U4DONTCARE 0xFFFFFFFF
@@ -113,11 +101,6 @@ enum pci_bridge_vendor {
PCI_BRIDGE_VENDOR_MAX,
};
-struct rtl_pci_capabilities_header {
- u8 capability_id;
- u8 next;
-};
-
/* In new TRX flow, Buffer_desc is new concept
* But TX wifi info == TX descriptor in old flow
* RX wifi info == RX descriptor in old flow
@@ -195,7 +178,6 @@ struct rtl_pci {
u32 reg_bcn_ctrl_val;
/*ASPM*/ u8 const_pci_aspm;
- u8 const_amdpci_aspm;
u8 const_hwsw_rfoff_d3;
u8 const_support_pciaspm;
/*pci-e bridge */
@@ -233,8 +215,6 @@ struct mp_adapter {
u8 pcibridge_funcnum;
u8 pcibridge_vendor;
- u16 pcibridge_vendorid;
- u16 pcibridge_deviceid;
bool amd_l1_patch;
};
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index b77937fe2..37bb59fa8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -21,9 +21,6 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
/* ASPM PS mode.
* 0 - Disable ASPM,
* 1 - Enable ASPM without Clock Req,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 144ee780e..3730613a3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -1479,12 +1479,8 @@ EXPORT_SYMBOL(rtl92c_phy_lc_calibrate);
void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- if (rtlphy->apk_done)
- return;
if (IS_92C_SERIAL(rtlhal->version))
_rtl92c_phy_ap_calibrate(hw, delta, true);
else
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index e452275d8..e20f2bec4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -24,9 +24,6 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
/*
* ASPM PS mode.
* 0 - Disable ASPM,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 11f319c97..afd685ed4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -21,9 +21,6 @@ static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
/*
* ASPM PS mode.
* 0 - Disable ASPM,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index 5a828a934..17486e3f3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -432,10 +432,8 @@ static void rtl92ee_dm_check_rssi_monitor(struct ieee80211_hw *hw)
static void rtl92ee_dm_init_primary_cca_check(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct dynamic_primary_cca *primarycca = &rtlpriv->primarycca;
- rtlhal->rts_en = 0;
primarycca->dup_rts_flag = 0;
primarycca->intf_flag = 0;
primarycca->intf_type = 0;
@@ -562,7 +560,7 @@ static void rtl92ee_dm_write_dynamic_cca(struct ieee80211_hw *hw,
primarycca->mf_state = cur_mf_state;
}
-static void rtl92ee_dm_dynamic_primary_cca_ckeck(struct ieee80211_hw *hw)
+static void rtl92ee_dm_dynamic_primary_cca_check(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt;
@@ -615,13 +613,11 @@ static void rtl92ee_dm_dynamic_primary_cca_ckeck(struct ieee80211_hw *hw)
rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state);
primarycca->pricca_flag = 1;
primarycca->dup_rts_flag = 1;
- rtlpriv->rtlhal.rts_en = 1;
} else {
primarycca->intf_type = 0;
primarycca->intf_flag = 0;
cur_mf_state = MF_USC_LSC;
rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state);
- rtlpriv->rtlhal.rts_en = 0;
primarycca->dup_rts_flag = 0;
}
} else if (sec_ch_offset == 1) {
@@ -642,13 +638,11 @@ static void rtl92ee_dm_dynamic_primary_cca_ckeck(struct ieee80211_hw *hw)
rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state);
primarycca->pricca_flag = 1;
primarycca->dup_rts_flag = 1;
- rtlpriv->rtlhal.rts_en = 1;
} else {
primarycca->intf_type = 0;
primarycca->intf_flag = 0;
cur_mf_state = MF_USC_LSC;
rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state);
- rtlpriv->rtlhal.rts_en = 0;
primarycca->dup_rts_flag = 0;
}
}
@@ -660,7 +654,6 @@ static void rtl92ee_dm_dynamic_primary_cca_ckeck(struct ieee80211_hw *hw)
cur_mf_state = MF_USC_LSC;
/* default */
rtl92ee_dm_write_dynamic_cca(hw, cur_mf_state);
- rtlpriv->rtlhal.rts_en = 0;
primarycca->dup_rts_flag = 0;
primarycca->intf_type = 0;
primarycca->intf_flag = 0;
@@ -1090,7 +1083,7 @@ void rtl92ee_dm_watchdog(struct ieee80211_hw *hw)
rtl92ee_dm_refresh_rate_adaptive_mask(hw);
rtl92ee_dm_check_edca_turbo(hw);
rtl92ee_dm_dynamic_atc_switch(hw);
- rtl92ee_dm_dynamic_primary_cca_ckeck(hw);
+ rtl92ee_dm_dynamic_primary_cca_check(hw);
}
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index ebb7abd0c..d4da5cdc8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1320,7 +1320,6 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
err = 1;
return err;
}
- rtlhal->rx_tag = 0;
rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, 0x8000);
err = rtl92ee_download_fw(hw, false);
if (err) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index 011ce82ef..7bde20fdb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -24,9 +24,6 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
/**
* ASPM PS mode.
* 0 - Disable ASPM,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 30bce381c..675bdd32f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -21,9 +21,6 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
/* ASPM PS mode.
* 0 - Disable ASPM,
* 1 - Enable ASPM without Clock Req,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index c821436a1..dd7505e2f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -26,9 +26,6 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
/**
* ASPM PS mode.
* 0 - Disable ASPM,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 43b611d52..162c34f0e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -26,9 +26,6 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
/* ASPM PS mode.
* 0 - Disable ASPM,
* 1 - Enable ASPM without Clock Req,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
index 47b6c1aa3..d97c88ebc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
@@ -17,7 +17,7 @@ u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
"regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
@@ -39,7 +39,7 @@ void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) | (data << bitshift));
}
@@ -51,14 +51,6 @@ void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
}
EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);
-u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i = ffs(bitmask);
-
- return i ? i - 1 : 32;
-}
-EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);
-
u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset)
{
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
index edf1c52f0..af85c3287 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
@@ -27,7 +27,6 @@ u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
u32 regaddr, u32 bitmask);
void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
u32 bitmask, u32 data);
-u32 rtl8723_phy_calculate_bit_shift(u32 bitmask);
u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset);
void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 1633328bc..f4b232f03 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2270,67 +2270,27 @@ static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- u16 cap_hdr;
- u8 cap_pointer;
- u8 cap_id = 0xff;
- u8 pmcs_reg;
- u8 cnt = 0;
+ struct pci_dev *pdev = rtlpci->pdev;
+ u16 pmcs_reg;
+ u8 pm_cap;
- /* Get the Capability pointer first,
- * the Capability Pointer is located at
- * offset 0x34 from the Function Header */
-
- pci_read_config_byte(rtlpci->pdev, 0x34, &cap_pointer);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "PCI configuration 0x34 = 0x%2x\n", cap_pointer);
-
- do {
- pci_read_config_word(rtlpci->pdev, cap_pointer, &cap_hdr);
- cap_id = cap_hdr & 0xFF;
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "in pci configuration, cap_pointer%x = %x\n",
- cap_pointer, cap_id);
-
- if (cap_id == 0x01) {
- break;
- } else {
- /* point to next Capability */
- cap_pointer = (cap_hdr >> 8) & 0xFF;
- /* 0: end of pci capability, 0xff: invalid value */
- if (cap_pointer == 0x00 || cap_pointer == 0xff) {
- cap_id = 0xff;
- break;
- }
- }
- } while (cnt++ < 200);
-
- if (cap_id == 0x01) {
- /* Get the PM CSR (Control/Status Register),
- * The PME_Status is located at PM Capatibility offset 5, bit 7
- */
- pci_read_config_byte(rtlpci->pdev, cap_pointer + 5, &pmcs_reg);
-
- if (pmcs_reg & BIT(7)) {
- /* PME event occured, clear the PM_Status by write 1 */
- pmcs_reg = pmcs_reg | BIT(7);
-
- pci_write_config_byte(rtlpci->pdev, cap_pointer + 5,
- pmcs_reg);
- /* Read it back to check */
- pci_read_config_byte(rtlpci->pdev, cap_pointer + 5,
- &pmcs_reg);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
- "Clear PME status 0x%2x to 0x%2x\n",
- cap_pointer + 5, pmcs_reg);
- } else {
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
- "PME status(0x%2x) = 0x%2x\n",
- cap_pointer + 5, pmcs_reg);
- }
- } else {
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (!pm_cap) {
rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING,
"Cannot find PME Capability\n");
+ return;
+ }
+
+ pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pmcs_reg);
+ if (pmcs_reg & PCI_PM_CTRL_PME_STATUS) {
+ /* Clear PME_Status with write */
+ pci_write_config_word(pdev, pm_cap + PCI_PM_CTRL, pmcs_reg);
+ pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pmcs_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Cleared PME status, PMCS reg = 0x%4x\n", pmcs_reg);
+ } else {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PMCS reg = 0x%4x\n", pmcs_reg);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index fa1839d8e..1be51ea3f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -27,13 +27,6 @@ static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
-{
- if (WARN_ON_ONCE(!bitmask))
- return 0;
-
- return __ffs(bitmask);
-}
static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
/*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/
static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -106,7 +99,7 @@ u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
"regaddr(%#x), bitmask(%#x)\n",
regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
@@ -127,7 +120,7 @@ void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
- bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((originalvalue & (~bitmask)) |
((data << bitshift) & bitmask));
}
@@ -153,7 +146,7 @@ u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl8821ae_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock(&rtlpriv->locks.rf_lock);
@@ -181,7 +174,7 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
if (bitmask != RFREG_OFFSET_MASK) {
original_value =
_rtl8821ae_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
+ bitshift = calculate_bit_shift(bitmask);
data = ((original_value & (~bitmask)) | (data << bitshift));
}
@@ -2039,15 +2032,9 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
/*don't need the hw_body*/
if (!_rtl8821ae_check_condition(hw, v1)) {
i += 2; /* skip the pair of expression*/
- v1 = array[i];
v2 = array[i+1];
- v3 = array[i+2];
- while (v2 != 0xDEAD) {
+ while (v2 != 0xDEAD)
i += 3;
- v1 = array[i];
- v2 = array[i+1];
- v3 = array[i+2];
- }
}
}
}
@@ -3544,7 +3531,6 @@ u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 timeout = 1000, timecount = 0;
- u8 channel = rtlphy->current_channel;
if (rtlphy->sw_chnl_inprogress)
return 0;
@@ -3567,8 +3553,6 @@ u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G);
rtlphy->sw_chnl_inprogress = true;
- if (channel == 0)
- channel = 1;
rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
"switch to channel%d, band type is %d\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 0bca542e1..7b911695d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -23,9 +23,6 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*close ASPM for AMD defaultly */
- rtlpci->const_amdpci_aspm = 0;
-
/**
* ASPM PS mode.
* 0 - Disable ASPM,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 5d842cc39..d87cd2252 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1316,7 +1316,6 @@ struct rtl_phy {
u8 sw_chnl_stage;
u8 sw_chnl_step;
u8 current_channel;
- u8 h2c_box_num;
u8 set_io_inprogress;
u8 lck_inprogress;
@@ -1329,9 +1328,6 @@ struct rtl_phy {
s32 reg_ebc;
s32 reg_ec4;
s32 reg_ecc;
- u8 rfpienable;
- u8 reserve_0;
- u16 reserve_1;
u32 reg_c04, reg_c08, reg_874;
u32 adda_backup[16];
u32 iqk_mac_backup[IQK_MAC_REG_NUM];
@@ -1345,7 +1341,6 @@ struct rtl_phy {
struct iqk_matrix_regs iqk_matrix[IQK_MATRIX_SETTINGS_NUM];
bool rfpi_enable;
- bool iqk_in_progress;
u8 pwrgroup_cnt;
u8 cck_high_power;
@@ -1383,7 +1378,6 @@ struct rtl_phy {
[MAX_RF_PATH_NUM];
u32 rfreg_chnlval[2];
- bool apk_done;
u32 reg_rf3c[2]; /* pathA / pathB */
u32 backup_rf_0x1a;/*92ee*/
@@ -1395,7 +1389,6 @@ struct rtl_phy {
struct phy_parameters hwparam_tables[MAX_TAB];
u16 rf_pathmap;
- u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
enum rt_polarity_ctl polarity_ctl;
};
@@ -1610,7 +1603,6 @@ struct rtl_hal {
bool up_first_time;
bool first_init;
bool being_init_adapter;
- bool bbrf_ready;
bool mac_func_enable;
bool pre_edcca_enable;
struct bt_coexist_8723 hal_coex_8723;
@@ -1623,9 +1615,7 @@ struct rtl_hal {
u8 state; /*stop 0, start 1 */
u8 board_type;
u8 package_type;
- u8 external_pa;
- u8 pa_mode;
u8 pa_type_2g;
u8 pa_type_5g;
u8 lna_type_2g;
@@ -1691,14 +1681,9 @@ struct rtl_hal {
bool master_of_dmsp;
bool slave_of_dmsp;
- u16 rx_tag;/*for 92ee*/
- u8 rts_en;
-
/*for wowlan*/
- bool wow_enable;
bool enter_pnp_sleep;
bool wake_from_pnp_sleep;
- bool wow_enabled;
time64_t last_suspend_sec;
u32 wowlan_fwsize;
u8 *wowlan_firmware;
@@ -2023,8 +2008,6 @@ struct rtl_ps_ctl {
u32 cur_ps_level;
u32 reg_rfps_level;
- /*just for PCIE ASPM */
- u8 const_amdpci_aspm;
bool pwrdown_mode;
enum rf_pwrstate inactive_pwrstate;
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 35bc37a3c..1b2ad8183 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -1314,8 +1314,8 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
#ifdef CONFIG_RTW88_DEBUG
-void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
- const char *fmt, ...)
+void rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
+ const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -1330,6 +1330,6 @@ void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
va_end(args);
}
-EXPORT_SYMBOL(__rtw_dbg);
+EXPORT_SYMBOL(rtw_dbg);
#endif /* CONFIG_RTW88_DEBUG */
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index a03ced11b..f20c0471c 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -43,10 +43,8 @@ static inline void rtw_debugfs_init(struct rtw_dev *rtwdev) {}
#ifdef CONFIG_RTW88_DEBUG
__printf(3, 4)
-void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
- const char *fmt, ...);
-
-#define rtw_dbg(rtwdev, a...) __rtw_dbg(rtwdev, ##a)
+void rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
+ const char *fmt, ...);
static inline bool rtw_dbg_is_enabled(struct rtw_dev *rtwdev,
enum rtw_debug_mask mask)
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index acd78311c..3f037ddce 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -998,7 +998,7 @@ static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
if (rsvd_pkt->type != RSVD_PROBE_REQ)
continue;
if ((!ssid && !rsvd_pkt->ssid) ||
- rtw_ssid_equal(rsvd_pkt->ssid, ssid))
+ cfg80211_ssid_eq(rsvd_pkt->ssid, ssid))
location = rsvd_pkt->page;
}
@@ -1015,7 +1015,7 @@ static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
if (rsvd_pkt->type != RSVD_PROBE_REQ)
continue;
if ((!ssid && !rsvd_pkt->ssid) ||
- rtw_ssid_equal(rsvd_pkt->ssid, ssid))
+ cfg80211_ssid_eq(rsvd_pkt->ssid, ssid))
size = rsvd_pkt->probe_req_size;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 63673005c..ffba6b88f 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -2008,6 +2008,11 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
+ if (!is_valid_ether_addr(efuse->addr)) {
+ eth_random_addr(efuse->addr);
+ dev_warn(rtwdev->dev, "efuse MAC invalid, using random\n");
+ }
+
out_disable:
rtw_chip_efuse_disable(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index b6bfd4c02..e14d1da43 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -2090,18 +2090,6 @@ static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw_vif *rtwvif)
return container_of(p, struct ieee80211_vif, drv_priv);
}
-static inline bool rtw_ssid_equal(struct cfg80211_ssid *a,
- struct cfg80211_ssid *b)
-{
- if (!a || !b || a->ssid_len != b->ssid_len)
- return false;
-
- if (memcmp(a->ssid, b->ssid, a->ssid_len))
- return false;
-
- return true;
-}
-
static inline void rtw_chip_efuse_grant_on(struct rtw_dev *rtwdev)
{
if (rtwdev->chip->ops->efuse_grant)
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index f63900b66..c02ac673b 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -656,9 +656,8 @@ void __rtw_tx_work(struct rtw_dev *rtwdev)
list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) {
struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
unsigned long frame_cnt;
- unsigned long byte_cnt;
- ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ ieee80211_txq_get_depth(txq, &frame_cnt, NULL);
rtw_txq_push(rtwdev, rtwtxq, frame_cnt);
list_del_init(&rtwtxq->list);
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index 8aaf83a2a..2e7326a8e 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -12,27 +12,74 @@ static const guid_t rtw89_guid = GUID_INIT(0xD2A8C3E8, 0x4B69, 0x4F00,
0x82, 0xBD, 0xFE, 0x86,
0x07, 0x80, 0x3A, 0xA7);
-static int rtw89_acpi_dsm_get(struct rtw89_dev *rtwdev, union acpi_object *obj,
- u8 *value)
+static
+int rtw89_acpi_dsm_get_value(struct rtw89_dev *rtwdev, union acpi_object *obj,
+ u8 *value)
{
- switch (obj->type) {
- case ACPI_TYPE_INTEGER:
- *value = (u8)obj->integer.value;
- break;
- case ACPI_TYPE_BUFFER:
- *value = obj->buffer.pointer[0];
- break;
- default:
- rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
- "acpi dsm return unhandled type: %d\n", obj->type);
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect integer but type: %d\n", obj->type);
return -EINVAL;
}
+ *value = (u8)obj->integer.value;
+ return 0;
+}
+
+static bool chk_acpi_policy_6ghz_sig(const struct rtw89_acpi_policy_6ghz *p)
+{
+ return p->signature[0] == 0x00 &&
+ p->signature[1] == 0xE0 &&
+ p->signature[2] == 0x4C;
+}
+
+static
+int rtw89_acpi_dsm_get_policy_6ghz(struct rtw89_dev *rtwdev,
+ union acpi_object *obj,
+ struct rtw89_acpi_policy_6ghz **policy_6ghz)
+{
+ const struct rtw89_acpi_policy_6ghz *ptr;
+ u32 expect_len;
+ u32 len;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ return -EINVAL;
+ }
+
+ len = obj->buffer.length;
+ if (len < sizeof(*ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ ptr = (typeof(ptr))obj->buffer.pointer;
+ if (!chk_acpi_policy_6ghz_sig(ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__);
+ return -EINVAL;
+ }
+
+ expect_len = struct_size(ptr, country_list, ptr->country_count);
+ if (len < expect_len) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: expect %u but length: %u\n",
+ __func__, expect_len, len);
+ return -EINVAL;
+ }
+
+ *policy_6ghz = kmemdup(ptr, expect_len, GFP_KERNEL);
+ if (!*policy_6ghz)
+ return -ENOMEM;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_6ghz: ", *policy_6ghz,
+ expect_len);
return 0;
}
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
- enum rtw89_acpi_dsm_func func, u8 *value)
+ enum rtw89_acpi_dsm_func func,
+ struct rtw89_acpi_dsm_result *res)
{
union acpi_object *obj;
int ret;
@@ -40,12 +87,16 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
obj = acpi_evaluate_dsm(ACPI_HANDLE(rtwdev->dev), &rtw89_guid,
0, func, NULL);
if (!obj) {
- rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
"acpi dsm fail to evaluate func: %d\n", func);
return -ENOENT;
}
- ret = rtw89_acpi_dsm_get(rtwdev, obj, value);
+ if (func == RTW89_ACPI_DSM_FUNC_6G_BP)
+ ret = rtw89_acpi_dsm_get_policy_6ghz(rtwdev, obj,
+ &res->u.policy_6ghz);
+ else
+ ret = rtw89_acpi_dsm_get_value(rtwdev, obj, &res->u.value);
ACPI_FREE(obj);
return ret;
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index ed74d8ceb..fe85b40cf 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -15,7 +15,37 @@ enum rtw89_acpi_dsm_func {
RTW89_ACPI_DSM_FUNC_59G_EN = 6,
};
+enum rtw89_acpi_policy_mode {
+ RTW89_ACPI_POLICY_BLOCK = 0,
+ RTW89_ACPI_POLICY_ALLOW = 1,
+};
+
+struct rtw89_acpi_country_code {
+ /* below are allowed:
+ * * ISO alpha2 country code
+ * * EU for countries in Europe
+ */
+ char alpha2[2];
+} __packed;
+
+struct rtw89_acpi_policy_6ghz {
+ u8 signature[3];
+ u8 rsvd;
+ u8 policy_mode;
+ u8 country_count;
+ struct rtw89_acpi_country_code country_list[] __counted_by(country_count);
+} __packed;
+
+struct rtw89_acpi_dsm_result {
+ union {
+ u8 value;
+ /* caller needs to free it after using */
+ struct rtw89_acpi_policy_6ghz *policy_6ghz;
+ } u;
+};
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
- enum rtw89_acpi_dsm_func func, u8 *value);
+ enum rtw89_acpi_dsm_func func,
+ struct rtw89_acpi_dsm_result *res);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index f5301c2bb..914c94988 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -488,6 +488,20 @@ static int rtw89_cam_get_avail_addr_cam(struct rtw89_dev *rtwdev,
return 0;
}
+static u8 rtw89_get_addr_cam_entry_size(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ switch (chip->chip_id) {
+ case RTL8852A:
+ case RTL8852B:
+ case RTL8851B:
+ return ADDR_CAM_ENT_SIZE;
+ default:
+ return ADDR_CAM_ENT_SHORT_SIZE;
+ }
+}
+
int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
struct rtw89_addr_cam_entry *addr_cam,
const struct rtw89_bssid_cam_entry *bssid_cam)
@@ -509,7 +523,7 @@ int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
}
addr_cam->addr_cam_idx = addr_cam_idx;
- addr_cam->len = ADDR_CAM_ENT_SIZE;
+ addr_cam->len = rtw89_get_addr_cam_entry_size(rtwdev);
addr_cam->offset = 0;
addr_cam->valid = true;
addr_cam->addr_mask = 0;
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index ace7bbf2c..f37afb4cb 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -6,6 +6,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
@@ -122,7 +123,8 @@ static const u32 cxtbl[] = {
0xea55556a, /* 21 */
0xaafafafa, /* 22 */
0xfafaaafa, /* 23 */
- 0xfafffaff /* 24 */
+ 0xfafffaff, /* 24 */
+ 0xea6a5a5a, /* 25 */
};
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
@@ -246,6 +248,11 @@ struct rtw89_btc_btf_set_mon_reg {
struct rtw89_btc_fbtc_mreg regs[] __counted_by(reg_num);
} __packed;
+struct _wl_rinfo_now {
+ u8 link_mode;
+ u32 dbcc_2g_phy: 2;
+};
+
enum btc_btf_set_cx_policy {
CXPOLICY_TDMA = 0x0,
CXPOLICY_SLOT = 0x1,
@@ -262,6 +269,8 @@ enum btc_b2w_scoreboard {
BTC_BSCB_RFK_RUN = BIT(5),
BTC_BSCB_RFK_REQ = BIT(6),
BTC_BSCB_LPS = BIT(7),
+ BTC_BSCB_BT_LNAB0 = BIT(8),
+ BTC_BSCB_BT_LNAB1 = BIT(10),
BTC_BSCB_WLRFK = BIT(11),
BTC_BSCB_BT_HILNA = BIT(13),
BTC_BSCB_BT_CONNECT = BIT(16),
@@ -405,11 +414,14 @@ enum btc_cx_poicy_type {
/* TDMA Fix slot-8: W1:B1 = user-define */
BTC_CXP_FIX_TDW1B1 = (BTC_CXP_FIX << 8) | 8,
- /* TDMA Fix slot-9: W1:B1 = 40:20 */
- BTC_CXP_FIX_TD4020 = (BTC_CXP_FIX << 8) | 9,
-
/* TDMA Fix slot-9: W1:B1 = 40:10 */
- BTC_CXP_FIX_TD4010ISO = (BTC_CXP_FIX << 8) | 10,
+ BTC_CXP_FIX_TD4010ISO = (BTC_CXP_FIX << 8) | 9,
+
+ /* TDMA Fix slot-10: W1:B1 = 40:10 */
+ BTC_CXP_FIX_TD4010ISO_DL = (BTC_CXP_FIX << 8) | 10,
+
+ /* TDMA Fix slot-11: W1:B1 = 40:10 */
+ BTC_CXP_FIX_TD4010ISO_UL = (BTC_CXP_FIX << 8) | 11,
/* PS-TDMA Fix slot-0: W1:B1 = 30:30 */
BTC_CXP_PFIX_TD3030 = (BTC_CXP_PFIX << 8) | 0,
@@ -710,7 +722,8 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
if (type & BTC_RESET_CX)
memset(cx, 0, sizeof(*cx));
- else if (type & BTC_RESET_BTINFO) /* only for BT enable */
+
+ if (type & BTC_RESET_BTINFO) /* only for BT enable */
memset(bt, 0, sizeof(*bt));
if (type & BTC_RESET_CTRL) {
@@ -739,12 +752,115 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
btc->dm.coex_info_map = BTC_COEX_INFO_ALL;
btc->dm.wl_tx_limit.tx_time = BTC_MAX_TX_TIME_DEF;
btc->dm.wl_tx_limit.tx_retry = BTC_MAX_TX_RETRY_DEF;
+ btc->dm.wl_pre_agc_rb = BTC_PREAGC_NOTFOUND;
+ btc->dm.wl_btg_rx_rb = BTC_BTGCTRL_BB_GNT_NOTFOUND;
}
if (type & BTC_RESET_MDINFO)
memset(&btc->mdinfo, 0, sizeof(btc->mdinfo));
}
+static u8 _search_reg_index(struct rtw89_dev *rtwdev, u8 mreg_num, u16 reg_type, u32 target)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 i;
+
+ for (i = 0; i < mreg_num; i++)
+ if (le16_to_cpu(chip->mon_reg[i].type) == reg_type &&
+ le32_to_cpu(chip->mon_reg[i].offset) == target) {
+ return i;
+ }
+ return BTC_REG_NOTFOUND;
+}
+
+static void _get_reg_status(struct rtw89_dev *rtwdev, u8 type, u8 *val)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ struct rtw89_btc_module *md = &btc->mdinfo;
+ union rtw89_btc_fbtc_mreg_val *pmreg;
+ u32 pre_agc_addr = R_BTC_BB_PRE_AGC_S1;
+ u32 reg_val;
+ u8 idx;
+
+ if (md->ant.btg_pos == RF_PATH_A)
+ pre_agc_addr = R_BTC_BB_PRE_AGC_S0;
+
+ switch (type) {
+ case BTC_CSTATUS_TXDIV_POS:
+ if (md->switch_type == BTC_SWITCH_INTERNAL)
+ *val = BTC_ANT_DIV_MAIN;
+ break;
+ case BTC_CSTATUS_RXDIV_POS:
+ if (md->switch_type == BTC_SWITCH_INTERNAL)
+ *val = BTC_ANT_DIV_MAIN;
+ break;
+ case BTC_CSTATUS_BB_GNT_MUX:
+ reg_val = rtw89_phy_read32(rtwdev, R_BTC_BB_BTG_RX);
+ *val = !(reg_val & B_BTC_BB_GNT_MUX);
+ break;
+ case BTC_CSTATUS_BB_GNT_MUX_MON:
+ if (!btc->fwinfo.rpt_fbtc_mregval.cinfo.valid)
+ return;
+
+ pmreg = &btc->fwinfo.rpt_fbtc_mregval.finfo;
+ if (ver->fcxmreg == 1) {
+ idx = _search_reg_index(rtwdev, pmreg->v1.reg_num,
+ REG_BB, R_BTC_BB_BTG_RX);
+ if (idx == BTC_REG_NOTFOUND) {
+ *val = BTC_BTGCTRL_BB_GNT_NOTFOUND;
+ } else {
+ reg_val = le32_to_cpu(pmreg->v1.mreg_val[idx]);
+ *val = !(reg_val & B_BTC_BB_GNT_MUX);
+ }
+ } else if (ver->fcxmreg == 2) {
+ idx = _search_reg_index(rtwdev, pmreg->v2.reg_num,
+ REG_BB, R_BTC_BB_BTG_RX);
+ if (idx == BTC_REG_NOTFOUND) {
+ *val = BTC_BTGCTRL_BB_GNT_NOTFOUND;
+ } else {
+ reg_val = le32_to_cpu(pmreg->v2.mreg_val[idx]);
+ *val = !(reg_val & B_BTC_BB_GNT_MUX);
+ }
+ }
+ break;
+ case BTC_CSTATUS_BB_PRE_AGC:
+ reg_val = rtw89_phy_read32(rtwdev, pre_agc_addr);
+ reg_val &= B_BTC_BB_PRE_AGC_MASK;
+ *val = (reg_val == B_BTC_BB_PRE_AGC_VAL);
+ break;
+ case BTC_CSTATUS_BB_PRE_AGC_MON:
+ if (!btc->fwinfo.rpt_fbtc_mregval.cinfo.valid)
+ return;
+
+ pmreg = &btc->fwinfo.rpt_fbtc_mregval.finfo;
+ if (ver->fcxmreg == 1) {
+ idx = _search_reg_index(rtwdev, pmreg->v1.reg_num,
+ REG_BB, pre_agc_addr);
+ if (idx == BTC_REG_NOTFOUND) {
+ *val = BTC_PREAGC_NOTFOUND;
+ } else {
+ reg_val = le32_to_cpu(pmreg->v1.mreg_val[idx]) &
+ B_BTC_BB_PRE_AGC_MASK;
+ *val = (reg_val == B_BTC_BB_PRE_AGC_VAL);
+ }
+ } else if (ver->fcxmreg == 2) {
+ idx = _search_reg_index(rtwdev, pmreg->v2.reg_num,
+ REG_BB, pre_agc_addr);
+ if (idx == BTC_REG_NOTFOUND) {
+ *val = BTC_PREAGC_NOTFOUND;
+ } else {
+ reg_val = le32_to_cpu(pmreg->v2.mreg_val[idx]) &
+ B_BTC_BB_PRE_AGC_MASK;
+ *val = (reg_val == B_BTC_BB_PRE_AGC_VAL);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
#define BTC_RPT_HDR_SIZE 3
#define BTC_CHK_WLSLOT_DRIFT_MAX 15
#define BTC_CHK_BTSLOT_DRIFT_MAX 15
@@ -1003,7 +1119,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
u16 wl_slot_set = 0, wl_slot_real = 0;
u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t = 0;
u32 cnt_leak_slot, bt_slot_real, bt_slot_set, cnt_rx_imr;
- u8 i;
+ u8 i, val = 0;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): index:%d\n",
@@ -1508,6 +1624,19 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
goto err;
}
break;
+ case BTC_RPT_TYPE_MREG:
+ _get_reg_status(rtwdev, BTC_CSTATUS_BB_GNT_MUX_MON, &val);
+ if (dm->wl_btg_rx == BTC_BTGCTRL_BB_GNT_FWCTRL)
+ dm->wl_btg_rx_rb = BTC_BTGCTRL_BB_GNT_FWCTRL;
+ else
+ dm->wl_btg_rx_rb = val;
+
+ _get_reg_status(rtwdev, BTC_CSTATUS_BB_PRE_AGC_MON, &val);
+ if (dm->wl_pre_agc == BTC_PREAGC_BB_FWCTRL)
+ dm->wl_pre_agc_rb = BTC_PREAGC_BB_FWCTRL;
+ else
+ dm->wl_pre_agc_rb = val;
+ break;
case BTC_RPT_TYPE_BT_VER:
case BTC_RPT_TYPE_BT_SCAN:
case BTC_RPT_TYPE_BT_AFH:
@@ -1576,13 +1705,13 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
if (ver->fcxtdma == 1) {
v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
tlv->len = sizeof(*v);
- memcpy(v, &dm->tdma, sizeof(*v));
+ *v = dm->tdma;
btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
} else {
tlv->len = sizeof(*v3);
v3 = (struct rtw89_btc_fbtc_tdma_v3 *)&tlv->val[0];
v3->fver = ver->fcxtdma;
- memcpy(&v3->tdma, &dm->tdma, sizeof(v3->tdma));
+ v3->tdma = dm->tdma;
btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v3);
}
@@ -2155,8 +2284,9 @@ static void _set_bt_rx_gain(struct rtw89_dev *rtwdev, u8 level)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
- if (bt->rf_para.rx_gain_freerun == level ||
- level > BTC_BT_RX_NORMAL_LVL)
+ if ((bt->rf_para.rx_gain_freerun == level ||
+ level > BTC_BT_RX_NORMAL_LVL) &&
+ (!rtwdev->chip->scbd || bt->lna_constrain == level))
return;
bt->rf_para.rx_gain_freerun = level;
@@ -2171,32 +2301,59 @@ static void _set_bt_rx_gain(struct rtw89_dev *rtwdev, u8 level)
else
_write_scbd(rtwdev, BTC_WSCB_RXGAIN, true);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_LNA_CONSTRAIN, &level, 1);
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_LNA_CONSTRAIN, &level, sizeof(level));
}
static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *b = &bt->link_info;
+ struct rtw89_btc_wl_smap *wl_smap = &wl->status.map;
struct rtw89_btc_rf_trx_para para;
u32 wl_stb_chg = 0;
- u8 level_id = 0;
+ u8 level_id = 0, link_mode = 0, i, dbcc_2g_phy = 0;
+
+ if (ver->fwlrole == 0) {
+ link_mode = wl->role_info.link_mode;
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ if (wl->dbcc_info.real_band[i] == RTW89_BAND_2G)
+ dbcc_2g_phy = i;
+ }
+ } else if (ver->fwlrole == 1) {
+ link_mode = wl->role_info_v1.link_mode;
+ dbcc_2g_phy = wl->role_info_v1.dbcc_2g_phy;
+ } else if (ver->fwlrole == 2) {
+ link_mode = wl->role_info_v2.link_mode;
+ dbcc_2g_phy = wl->role_info_v2.dbcc_2g_phy;
+ }
- if (!dm->freerun) {
- /* fix LNA2 = level-5 for BT ACI issue at BTG */
+ /* decide trx_para_level */
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ /* fix LNA2 + TIA gain not change by GNT_BT */
if ((btc->dm.wl_btg_rx && b->profile_cnt.now != 0) ||
dm->bt_only == 1)
- dm->trx_para_level = 1;
+ dm->trx_para_level = 1; /* for better BT ACI issue */
else
dm->trx_para_level = 0;
+ } else { /* non-shared antenna */
+ dm->trx_para_level = 5;
+ /* modify trx_para if WK 2.4G-STA-DL + bt link */
+ if (b->profile_cnt.now != 0 &&
+ link_mode == BTC_WLINK_2G_STA &&
+ wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) { /* uplink */
+ if (wl->rssi_level == 4 && bt->rssi_level > 2)
+ dm->trx_para_level = 6;
+ else if (wl->rssi_level == 3 && bt->rssi_level > 3)
+ dm->trx_para_level = 7;
+ }
}
- level_id = (u8)dm->trx_para_level;
-
+ level_id = dm->trx_para_level;
if (level_id >= chip->rf_para_dlink_num ||
level_id >= chip->rf_para_ulink_num) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -2210,25 +2367,26 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
else
para = chip->rf_para_dlink[level_id];
- if (para.wl_tx_power != RTW89_BTC_WL_DEF_TX_PWR)
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): wl_tx_power=%d\n",
- __func__, para.wl_tx_power);
- _set_wl_tx_power(rtwdev, para.wl_tx_power);
- _set_wl_rx_gain(rtwdev, para.wl_rx_gain);
- _set_bt_tx_power(rtwdev, para.bt_tx_power);
- _set_bt_rx_gain(rtwdev, para.bt_rx_gain);
-
- if (bt->enable.now == 0 || wl->status.map.rf_off == 1 ||
- wl->status.map.lps == BTC_LPS_RF_OFF)
+ if (dm->fddt_train) {
+ _set_wl_rx_gain(rtwdev, 1);
+ _write_scbd(rtwdev, BTC_WSCB_RXGAIN, true);
+ } else {
+ _set_wl_tx_power(rtwdev, para.wl_tx_power);
+ _set_wl_rx_gain(rtwdev, para.wl_rx_gain);
+ _set_bt_tx_power(rtwdev, para.bt_tx_power);
+ _set_bt_rx_gain(rtwdev, para.bt_rx_gain);
+ }
+
+ if (!bt->enable.now || dm->wl_only || wl_smap->rf_off ||
+ wl_smap->lps == BTC_LPS_RF_OFF ||
+ link_mode == BTC_WLINK_5G ||
+ link_mode == BTC_WLINK_NOLINK ||
+ (rtwdev->dbcc_en && dbcc_2g_phy != RTW89_PHY_1))
wl_stb_chg = 0;
else
wl_stb_chg = 1;
if (wl_stb_chg != dm->wl_stb_chg) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): wl_stb_chg=%d\n",
- __func__, wl_stb_chg);
dm->wl_stb_chg = wl_stb_chg;
chip->ops->btc_wl_s1_standby(rtwdev, dm->wl_stb_chg);
}
@@ -2661,9 +2819,17 @@ void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
_slot_set(btc, CXST_W1, 40, tbl_w1, SLOT_ISO);
_slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_FIX_TD4020:
- _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_MIX);
- _slot_set(btc, CXST_B1, 20, tbl_b1, SLOT_MIX);
+ case BTC_CXP_FIX_TD4010ISO:
+ _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4010ISO_DL:
+ _slot_set(btc, CXST_W1, 40, cxtbl[25], SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, cxtbl[25], SLOT_ISO);
+ break;
+ case BTC_CXP_FIX_TD4010ISO_UL:
+ _slot_set(btc, CXST_W1, 40, cxtbl[20], SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, cxtbl[25], SLOT_MIX);
break;
case BTC_CXP_FIX_TD7010:
_slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO);
@@ -3002,9 +3168,13 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
_slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_ISO);
_slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_FIX_TD4020:
- _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_MIX);
- _slot_set(btc, CXST_B1, 20, tbl_b1, SLOT_MIX);
+ case BTC_CXP_FIX_TD4010ISO_DL:
+ _slot_set(btc, CXST_W1, 40, cxtbl[25], SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, cxtbl[25], SLOT_ISO);
+ break;
+ case BTC_CXP_FIX_TD4010ISO_UL:
+ _slot_set(btc, CXST_W1, 40, cxtbl[20], SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, cxtbl[25], SLOT_MIX);
break;
case BTC_CXP_FIX_TD7010:
_slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO);
@@ -3381,17 +3551,32 @@ static void _action_wl_init(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_INIT);
}
-static void _action_wl_off(struct rtw89_dev *rtwdev)
+static void _action_wl_off(struct rtw89_dev *rtwdev, u8 mode)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
- if (wl->status.map.rf_off || btc->dm.bt_only)
+ if (wl->status.map.rf_off || btc->dm.bt_only) {
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_WOFF);
+ } else if (wl->status.map.lps == BTC_LPS_RF_ON) {
+ if (wl->role_info.link_mode == BTC_WLINK_5G)
+ _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W5G);
+ else
+ _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ }
- _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_OFF);
+ if (mode == BTC_WLINK_5G) {
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_OFF);
+ } else if (wl->status.map.lps == BTC_LPS_RF_ON) {
+ if (btc->cx.bt.link_info.a2dp_desc.active)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_OFF);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_WL_OFF);
+ } else {
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_OFF);
+ }
}
static void _action_freerun(struct rtw89_dev *rtwdev)
@@ -3426,31 +3611,25 @@ static void _action_bt_idle(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_link_info *b = &btc->cx.bt.link_info;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /*wl-busy + bt idle*/
- if (b->profile_cnt.now > 0)
- _set_policy(rtwdev, BTC_CXP_FIX_TD4010,
- BTC_ACT_BT_IDLE);
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-idle */
+ if (b->status.map.connect)
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4010, BTC_ACT_BT_IDLE);
+ else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_DL))
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4010ISO_DL, BTC_ACT_BT_IDLE);
else
- _set_policy(rtwdev, BTC_CXP_FIX_TD4020,
- BTC_ACT_BT_IDLE);
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4010ISO_UL, BTC_ACT_BT_IDLE);
break;
case BTC_WBUSY_BSCAN: /*wl-busy + bt-inq */
_set_policy(rtwdev, BTC_CXP_PFIX_TD5050,
BTC_ACT_BT_IDLE);
break;
- case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-idle */
- if (b->profile_cnt.now > 0)
- _set_policy(rtwdev, BTC_CXP_FIX_TD4010,
- BTC_ACT_BT_IDLE);
- else
- _set_policy(rtwdev, BTC_CXP_FIX_TD4020,
- BTC_ACT_BT_IDLE);
- break;
case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq */
_set_policy(rtwdev, BTC_CXP_FIX_TD5050,
BTC_ACT_BT_IDLE);
@@ -3617,7 +3796,7 @@ static void _action_bt_pan(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_FIX_TD3060, BTC_ACT_BT_PAN);
break;
case BTC_WLINKING: /* wl-connecting + bt-PAN */
- _set_policy(rtwdev, BTC_CXP_FIX_TD4020, BTC_ACT_BT_PAN);
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4010ISO, BTC_ACT_BT_PAN);
break;
case BTC_WIDLE: /* wl-idle + bt-pan */
_set_policy(rtwdev, BTC_CXP_PFIX_TD2080, BTC_ACT_BT_PAN);
@@ -3798,46 +3977,134 @@ static void _action_wl_rfk(struct rtw89_dev *rtwdev)
static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- bool is_btg;
- u8 mode;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct _wl_rinfo_now wl_rinfo;
+ u32 run_reason = btc->dm.run_reason;
+ u32 is_btg;
+ u8 i, val;
if (btc->ctrl.manual)
return;
if (ver->fwlrole == 0)
- mode = wl_rinfo->link_mode;
+ wl_rinfo.link_mode = wl_rinfo_v0->link_mode;
else if (ver->fwlrole == 1)
- mode = wl_rinfo_v1->link_mode;
+ wl_rinfo.link_mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
- mode = wl_rinfo_v2->link_mode;
+ wl_rinfo.link_mode = wl_rinfo_v2->link_mode;
else
return;
- /* notify halbb ignore GNT_BT or not for WL BB Rx-AGC control */
- if (mode == BTC_WLINK_5G) /* always 0 if 5G */
- is_btg = false;
- else if (mode == BTC_WLINK_25G_DBCC &&
- wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G)
- is_btg = false;
+ if (rtwdev->dbcc_en) {
+ if (ver->fwlrole == 0) {
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ if (wl_dinfo->real_band[i] == RTW89_BAND_2G)
+ wl_rinfo.dbcc_2g_phy = i;
+ }
+ } else if (ver->fwlrole == 1) {
+ wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy;
+ } else if (ver->fwlrole == 2) {
+ wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy;
+ } else {
+ return;
+ }
+ }
+
+ if (wl_rinfo.link_mode == BTC_WLINK_25G_MCC)
+ is_btg = BTC_BTGCTRL_BB_GNT_FWCTRL;
+ else if (!(bt->run_patch_code && bt->enable.now))
+ is_btg = BTC_BTGCTRL_DISABLE;
+ else if (wl_rinfo.link_mode == BTC_WLINK_5G)
+ is_btg = BTC_BTGCTRL_DISABLE;
+ else if (dm->freerun)
+ is_btg = BTC_BTGCTRL_DISABLE;
+ else if (rtwdev->dbcc_en && wl_rinfo.dbcc_2g_phy != RTW89_PHY_1)
+ is_btg = BTC_BTGCTRL_DISABLE;
else
- is_btg = true;
+ is_btg = BTC_BTGCTRL_ENABLE;
- if (btc->dm.run_reason != BTC_RSN_NTFY_INIT &&
- is_btg == btc->dm.wl_btg_rx)
- return;
+ if (dm->wl_btg_rx_rb != dm->wl_btg_rx &&
+ dm->wl_btg_rx_rb != BTC_BTGCTRL_BB_GNT_NOTFOUND) {
+ _get_reg_status(rtwdev, BTC_CSTATUS_BB_GNT_MUX, &val);
+ dm->wl_btg_rx_rb = val;
+ }
+
+ if (run_reason == BTC_RSN_NTFY_INIT ||
+ run_reason == BTC_RSN_NTFY_SWBAND ||
+ dm->wl_btg_rx_rb != dm->wl_btg_rx ||
+ is_btg != dm->wl_btg_rx) {
+
+ dm->wl_btg_rx = is_btg;
+
+ if (is_btg > BTC_BTGCTRL_ENABLE)
+ return;
+
+ chip->ops->ctrl_btg_bt_rx(rtwdev, is_btg, RTW89_PHY_0);
+ }
+}
- btc->dm.wl_btg_rx = is_btg;
+static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 is_preagc, val;
- if (mode == BTC_WLINK_25G_MCC)
+ if (btc->ctrl.manual)
return;
- rtw89_ctrl_btg_bt_rx(rtwdev, is_btg, RTW89_PHY_0);
+ if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ is_preagc = BTC_PREAGC_BB_FWCTRL;
+ else if (!(bt->run_patch_code && bt->enable.now))
+ is_preagc = BTC_PREAGC_DISABLE;
+ else if (wl_rinfo->link_mode == BTC_WLINK_5G)
+ is_preagc = BTC_PREAGC_DISABLE;
+ else if (wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
+ btc->cx.bt.link_info.profile_cnt.now == 0)
+ is_preagc = BTC_PREAGC_DISABLE;
+ else if (dm->tdma_now.type != CXTDMA_OFF &&
+ !bt_linfo->hfp_desc.exist &&
+ !bt_linfo->hid_desc.exist &&
+ dm->fddt_train == BTC_FDDT_DISABLE)
+ is_preagc = BTC_PREAGC_DISABLE;
+ else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en &&
+ wl_rinfo->dbcc_2g_phy != RTW89_PHY_1)
+ is_preagc = BTC_PREAGC_DISABLE;
+ else if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ is_preagc = BTC_PREAGC_DISABLE;
+ else
+ is_preagc = BTC_PREAGC_ENABLE;
+
+ if (dm->wl_pre_agc_rb != dm->wl_pre_agc &&
+ dm->wl_pre_agc_rb != BTC_PREAGC_NOTFOUND) {
+ _get_reg_status(rtwdev, BTC_CSTATUS_BB_PRE_AGC, &val);
+ dm->wl_pre_agc_rb = val;
+ }
+
+ if ((wl->coex_mode == BTC_MODE_NORMAL &&
+ (dm->run_reason == BTC_RSN_NTFY_INIT ||
+ dm->run_reason == BTC_RSN_NTFY_SWBAND ||
+ dm->wl_pre_agc_rb != dm->wl_pre_agc)) ||
+ is_preagc != dm->wl_pre_agc) {
+ dm->wl_pre_agc = is_preagc;
+
+ if (is_preagc > BTC_PREAGC_ENABLE)
+ return;
+ chip->ops->ctrl_nbtg_bt_tx(rtwdev, dm->wl_pre_agc, RTW89_PHY_0);
+ }
}
struct rtw89_txtime_data {
@@ -4024,6 +4291,7 @@ static void _action_common(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
_set_btg_ctrl(rtwdev);
+ _set_wl_preagc_ctrl(rtwdev);
_set_wl_tx_limit(rtwdev);
_set_bt_afh_info(rtwdev);
_set_bt_rx_agc(rtwdev);
@@ -5008,8 +5276,7 @@ static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update)
return;
}
- if (!(val & BTC_BSCB_ON) ||
- btc->dm.cnt_dm[BTC_DCNT_BTCNT_HANG] >= BTC_CHK_HANG_MAX)
+ if (!(val & BTC_BSCB_ON))
bt->enable.now = 0;
else
bt->enable.now = 1;
@@ -5035,6 +5302,9 @@ static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update)
bt->btg_type = val & BTC_BSCB_BT_S1 ? BTC_BT_BTG : BTC_BT_ALONE;
bt->link_info.a2dp_desc.exist = !!(val & BTC_BSCB_A2DP_ACT);
+ bt->lna_constrain = !!(val & BTC_BSCB_BT_LNAB0) +
+ !!(val & BTC_BSCB_BT_LNAB1) * 2 + 4;
+
/* if rfk run 1->0 */
if (bt->rfk_info.map.run && !(val & BTC_BSCB_RFK_RUN))
status_change = true;
@@ -5128,17 +5398,28 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
}
if (wl->status.map.rf_off_pre == wl->status.map.rf_off &&
- wl->status.map.lps_pre == wl->status.map.lps &&
- (reason == BTC_RSN_NTFY_POWEROFF ||
- reason == BTC_RSN_NTFY_RADIO_STATE)) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): return for WL rf off state no change!!\n",
- __func__);
- return;
+ wl->status.map.lps_pre == wl->status.map.lps) {
+ if (reason == BTC_RSN_NTFY_POWEROFF ||
+ reason == BTC_RSN_NTFY_RADIO_STATE) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return for WL rf off state no change!!\n",
+ __func__);
+ return;
+ }
+ if (wl->status.map.rf_off == 1 ||
+ wl->status.map.lps == BTC_LPS_RF_OFF) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return for WL rf off state!!\n",
+ __func__);
+ return;
+ }
}
+ dm->freerun = false;
dm->cnt_dm[BTC_DCNT_RUN]++;
dm->fddt_train = BTC_FDDT_DISABLE;
+ btc->ctrl.igno_bt = false;
+ bt->scan_rx_low_pri = false;
if (btc->ctrl.always_freerun) {
_action_freerun(rtwdev);
@@ -5153,15 +5434,11 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
}
if (wl->status.map.rf_off || wl->status.map.lps || dm->bt_only) {
- _action_wl_off(rtwdev);
+ _action_wl_off(rtwdev, mode);
btc->ctrl.igno_bt = true;
goto exit;
}
- btc->ctrl.igno_bt = false;
- dm->freerun = false;
- bt->scan_rx_low_pri = false;
-
if (reason == BTC_RSN_NTFY_INIT) {
_action_wl_init(rtwdev);
goto exit;
@@ -5186,12 +5463,14 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
if (mode == BTC_WLINK_NOLINK || mode == BTC_WLINK_2G_STA ||
mode == BTC_WLINK_5G) {
_action_wl_scan(rtwdev);
+ bt->scan_rx_low_pri = false;
goto exit;
}
}
if (wl->status.map.scan) {
_action_wl_scan(rtwdev);
+ bt->scan_rx_low_pri = false;
goto exit;
}
@@ -5308,6 +5587,7 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): mode=%d\n", __func__, mode);
+ wl->coex_mode = mode;
dm->cnt_notify[BTC_NCNT_INIT_COEX]++;
dm->wl_only = mode == BTC_MODE_WL ? 1 : 0;
dm->bt_only = mode == BTC_MODE_BT ? 1 : 0;
@@ -5352,6 +5632,10 @@ void rtw89_btc_ntfy_scan_start(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): phy_idx=%d, band=%d\n",
__func__, phy_idx, band);
+
+ if (phy_idx >= RTW89_PHY_MAX)
+ return;
+
btc->dm.cnt_notify[BTC_NCNT_SCAN_START]++;
wl->status.map.scan = true;
wl->scan_info.band[phy_idx] = band;
@@ -5396,6 +5680,10 @@ void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): phy_idx=%d, band=%d\n",
__func__, phy_idx, band);
+
+ if (phy_idx >= RTW89_PHY_MAX)
+ return;
+
btc->dm.cnt_notify[BTC_NCNT_SWITCH_BAND]++;
wl->scan_info.band[phy_idx] = band;
@@ -5517,6 +5805,37 @@ void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work)
mutex_unlock(&rtwdev->mutex);
}
+static u8 _update_bt_rssi_level(struct rtw89_dev *rtwdev, u8 rssi)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ u8 *rssi_st, rssi_th, rssi_level = 0;
+ u8 i;
+
+ /* for rssi locate in which {40, 36, 31, 28}
+ * if rssi >= 40% (-60dBm) --> rssi_level = 4
+ * if 36% <= rssi < 40% --> rssi_level = 3
+ * if 31% <= rssi < 36% --> rssi_level = 2
+ * if 28% <= rssi < 31% --> rssi_level = 1
+ * if rssi < 28% --> rssi_level = 0
+ */
+
+ /* check if rssi across bt_rssi_thres boundary */
+ for (i = 0; i < BTC_BT_RSSI_THMAX; i++) {
+ rssi_th = chip->bt_rssi_thres[i];
+ rssi_st = &bt->link_info.rssi_state[i];
+
+ *rssi_st = _update_rssi_state(rtwdev, *rssi_st, rssi, rssi_th);
+
+ if (BTC_RSSI_HIGH(*rssi_st)) {
+ rssi_level = BTC_BT_RSSI_THMAX - i;
+ break;
+ }
+ }
+ return rssi_level;
+}
+
#define BT_PROFILE_PROTOCOL_MASK GENMASK(7, 4)
static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
@@ -5592,7 +5911,8 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
btinfo.val = bt->raw_info[BTC_BTINFO_H0];
/* raw val is dBm unit, translate from -100~ 0dBm to 0~100%*/
b->rssi = chip->ops->btc_get_bt_rssi(rtwdev, btinfo.hb0.rssi);
- btc->dm.trx_info.bt_rssi = b->rssi;
+ bt->rssi_level = _update_bt_rssi_level(rtwdev, b->rssi);
+ btc->dm.trx_info.bt_rssi = bt->rssi_level;
/* parse raw info high-Byte1 */
btinfo.val = bt->raw_info[BTC_BTINFO_H1];
@@ -5796,22 +6116,22 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
chip->ops->btc_init_cfg(rtwdev);
} else {
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, false);
- if (rf_state == BTC_RFCTRL_WL_OFF)
+ if (rf_state == BTC_RFCTRL_FW_CTRL)
+ _write_scbd(rtwdev, BTC_WSCB_ACTIVE, false);
+ else if (rf_state == BTC_RFCTRL_WL_OFF)
_write_scbd(rtwdev, BTC_WSCB_ALL, false);
- else if (rf_state == BTC_RFCTRL_LPS_WL_ON &&
- wl->status.map.lps_pre != BTC_LPS_OFF)
+ else
+ _write_scbd(rtwdev, BTC_WSCB_ACTIVE, false);
+
+ if (rf_state == BTC_RFCTRL_LPS_WL_ON &&
+ wl->status.map.lps_pre != BTC_LPS_OFF)
_update_bt_scbd(rtwdev, true);
}
btc->dm.cnt_dm[BTC_DCNT_BTCNT_HANG] = 0;
- if (wl->status.map.lps_pre == BTC_LPS_OFF &&
- wl->status.map.lps_pre != wl->status.map.lps)
- btc->dm.tdma_instant_excute = 1;
- else
- btc->dm.tdma_instant_excute = 0;
+ btc->dm.tdma_instant_excute = 1;
_run_coex(rtwdev, BTC_RSN_NTFY_RADIO_STATE);
- btc->dm.tdma_instant_excute = 0;
wl->status.map.rf_off_pre = wl->status.map.rf_off;
wl->status.map.lps_pre = wl->status.map.lps;
}
@@ -6050,6 +6370,13 @@ static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
dm->trx_info.tx_tp = link_info_t->tx_throughput;
dm->trx_info.rx_tp = link_info_t->rx_throughput;
+ /* Trigger coex-run if 0x10980 reg-value is diff with coex setup */
+ if ((dm->wl_btg_rx_rb != dm->wl_btg_rx &&
+ dm->wl_btg_rx_rb != BTC_BTGCTRL_BB_GNT_NOTFOUND) ||
+ (dm->wl_pre_agc_rb != dm->wl_pre_agc &&
+ dm->wl_pre_agc_rb != BTC_PREAGC_NOTFOUND))
+ iter_data->is_sta_change = true;
+
if (is_sta_change)
iter_data->is_sta_change = true;
@@ -6435,8 +6762,9 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
bt_linfo->pan_desc.active ? "Y" : "N");
seq_printf(m,
- " %-15s : rssi:%ddBm, tx_rate:%dM, %s%s%s",
+ " %-15s : rssi:%ddBm(lvl:%d), tx_rate:%dM, %s%s%s",
"[link]", bt_linfo->rssi - 100,
+ bt->rssi_level,
bt_linfo->tx_3m ? 3 : 2,
bt_linfo->status.map.inq_pag ? " inq-page!!" : "",
bt_linfo->status.map.acl_busy ? " acl_busy!!" : "",
@@ -6545,6 +6873,8 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
case BTC_CXP_ ## e | BTC_POLICY_EXT_BIT: return #e
#define CASE_BTC_SLOT_STR(e) case CXST_ ## e: return #e
#define CASE_BTC_EVT_STR(e) case CXEVNT_## e: return #e
+#define CASE_BTC_INIT(e) case BTC_MODE_## e: return #e
+#define CASE_BTC_ANTPATH_STR(e) case BTC_ANT_##e: return #e
static const char *steps_to_str(u16 step)
{
@@ -6625,8 +6955,9 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(FIX_TD3060);
CASE_BTC_POLICY_STR(FIX_TD2080);
CASE_BTC_POLICY_STR(FIX_TDW1B1);
- CASE_BTC_POLICY_STR(FIX_TD4020);
CASE_BTC_POLICY_STR(FIX_TD4010ISO);
+ CASE_BTC_POLICY_STR(FIX_TD4010ISO_DL);
+ CASE_BTC_POLICY_STR(FIX_TD4010ISO_UL);
CASE_BTC_POLICY_STR(PFIX_TD3030);
CASE_BTC_POLICY_STR(PFIX_TD5050);
CASE_BTC_POLICY_STR(PFIX_TD2030);
@@ -6719,6 +7050,37 @@ static const char *id_to_evt(u32 id)
}
}
+static const char *id_to_mode(u8 id)
+{
+ switch (id) {
+ CASE_BTC_INIT(NORMAL);
+ CASE_BTC_INIT(WL);
+ CASE_BTC_INIT(BT);
+ CASE_BTC_INIT(WLOFF);
+ default:
+ return "unknown";
+ }
+}
+
+static const char *id_to_ant(u32 id)
+{
+ switch (id) {
+ CASE_BTC_ANTPATH_STR(WPOWERON);
+ CASE_BTC_ANTPATH_STR(WINIT);
+ CASE_BTC_ANTPATH_STR(WONLY);
+ CASE_BTC_ANTPATH_STR(WOFF);
+ CASE_BTC_ANTPATH_STR(W2G);
+ CASE_BTC_ANTPATH_STR(W5G);
+ CASE_BTC_ANTPATH_STR(W25G);
+ CASE_BTC_ANTPATH_STR(FREERUN);
+ CASE_BTC_ANTPATH_STR(WRFK);
+ CASE_BTC_ANTPATH_STR(BRFK);
+ CASE_BTC_ANTPATH_STR(MAX);
+ default:
+ return "unknown";
+ }
+}
+
static
void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
@@ -6773,12 +7135,13 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
(btc->ctrl.manual ? "(Manual)" : "(Auto)"));
seq_printf(m,
- " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%ld, run_cnt:%d\n",
+ " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n",
"[status]",
module->ant.type == BTC_ANT_SHARED ? "shared" : "dedicated",
steps_to_str(dm->run_reason),
steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
- FIELD_GET(GENMASK(7, 0), dm->set_ant_path),
+ id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)),
+ id_to_mode(wl->coex_mode),
dm->cnt_dm[BTC_DCNT_RUN]);
_show_dm_step(rtwdev, m);
@@ -7681,7 +8044,8 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt
struct rtw89_mac_ax_gnt *gnt;
u32 val, status;
- if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B) {
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
+ chip->chip_id == RTL8851B) {
rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
rtw89_mac_read_lte(rtwdev, R_AX_GNT_VAL, &status);
@@ -7743,27 +8107,25 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
- /* To avoid I/O if WL LPS or power-off */
- if (!wl->status.map.lps && !wl->status.map.rf_off) {
- btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+ btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+ _get_gnt(rtwdev, &gnt_cfg);
+
+ gnt = gnt_cfg.band[0];
+ seq_printf(m,
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
+ "[gnt_status]",
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+
+ gnt = gnt_cfg.band[1];
+ seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
- _get_gnt(rtwdev, &gnt_cfg);
- gnt = gnt_cfg.band[0];
- seq_printf(m,
- " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
- "[gnt_status]",
- chip->chip_id == RTL8852C ? "HW" :
- btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
- gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
-
- gnt = gnt_cfg.band[1];
- seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- gnt.gnt_wl_sw_en ? "SW" : "HW",
- gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW",
- gnt.gnt_bt);
- }
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -7847,27 +8209,25 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
- /* To avoid I/O if WL LPS or power-off */
- if (!wl->status.map.lps && !wl->status.map.rf_off) {
- btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+ btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+ _get_gnt(rtwdev, &gnt_cfg);
+
+ gnt = gnt_cfg.band[0];
+ seq_printf(m,
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
+ "[gnt_status]",
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+
+ gnt = gnt_cfg.band[1];
+ seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
- _get_gnt(rtwdev, &gnt_cfg);
- gnt = gnt_cfg.band[0];
- seq_printf(m,
- " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
- "[gnt_status]",
- chip->chip_id == RTL8852C ? "HW" :
- btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
- gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
-
- gnt = gnt_cfg.band[1];
- seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- gnt.gnt_wl_sw_en ? "SW" : "HW",
- gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW",
- gnt.gnt_bt);
- }
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index e76153709..46e25c6f8 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -142,6 +142,44 @@ enum btc_lps_state {
BTC_LPS_RF_ON = 2
};
+#define R_BTC_BB_BTG_RX 0x980
+#define R_BTC_BB_PRE_AGC_S1 0x476C
+#define R_BTC_BB_PRE_AGC_S0 0x4688
+
+#define B_BTC_BB_GNT_MUX GENMASK(20, 17)
+#define B_BTC_BB_PRE_AGC_MASK GENMASK(31, 24)
+#define B_BTC_BB_PRE_AGC_VAL BIT(31)
+
+#define BTC_REG_NOTFOUND 0xff
+
+enum btc_ant_div_pos {
+ BTC_ANT_DIV_MAIN = 0,
+ BTC_ANT_DIV_AUX = 1,
+};
+
+enum btc_get_reg_status {
+ BTC_CSTATUS_TXDIV_POS = 0,
+ BTC_CSTATUS_RXDIV_POS = 1,
+ BTC_CSTATUS_BB_GNT_MUX = 2,
+ BTC_CSTATUS_BB_GNT_MUX_MON = 3,
+ BTC_CSTATUS_BB_PRE_AGC = 4,
+ BTC_CSTATUS_BB_PRE_AGC_MON = 5,
+};
+
+enum btc_preagc_type {
+ BTC_PREAGC_DISABLE,
+ BTC_PREAGC_ENABLE,
+ BTC_PREAGC_BB_FWCTRL,
+ BTC_PREAGC_NOTFOUND,
+};
+
+enum btc_btgctrl_type {
+ BTC_BTGCTRL_DISABLE,
+ BTC_BTGCTRL_ENABLE,
+ BTC_BTGCTRL_BB_GNT_FWCTRL,
+ BTC_BTGCTRL_BB_GNT_NOTFOUND,
+};
+
void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index a3624ebf0..fd527a249 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -1407,29 +1407,65 @@ static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
struct sk_buff *skb,
struct rtw89_rx_phy_ppdu *phy_ppdu)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_rxinfo *rxinfo = (const struct rtw89_rxinfo *)skb->data;
+ const struct rtw89_rxinfo_user *user;
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+ int rx_cnt_size = RTW89_PPDU_MAC_RX_CNT_SIZE;
bool rx_cnt_valid = false;
+ bool invalid = false;
u8 plcp_size = 0;
- u8 usr_num = 0;
u8 *phy_sts;
+ u8 usr_num;
+ int i;
+
+ if (chip_gen == RTW89_CHIP_BE) {
+ invalid = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_INVALID_V1);
+ rx_cnt_size = RTW89_PPDU_MAC_RX_CNT_SIZE_V1;
+ }
+
+ if (invalid)
+ return -EINVAL;
rx_cnt_valid = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_RX_CNT_VLD);
- plcp_size = le32_get_bits(rxinfo->w1, RTW89_RXINFO_W1_PLCP_LEN) << 3;
- usr_num = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_USR_NUM);
- if (usr_num > RTW89_PPDU_MAX_USR) {
- rtw89_warn(rtwdev, "Invalid user number in mac info\n");
+ if (chip_gen == RTW89_CHIP_BE) {
+ plcp_size = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_PLCP_LEN_V1) << 3;
+ usr_num = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_USR_NUM_V1);
+ } else {
+ plcp_size = le32_get_bits(rxinfo->w1, RTW89_RXINFO_W1_PLCP_LEN) << 3;
+ usr_num = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_USR_NUM);
+ }
+ if (usr_num > chip->ppdu_max_usr) {
+ rtw89_warn(rtwdev, "Invalid user number (%d) in mac info\n",
+ usr_num);
return -EINVAL;
}
+ /* For WiFi 7 chips, RXWD.mac_id of PPDU status is not set by hardware,
+ * so update mac_id by rxinfo_user[].mac_id.
+ */
+ for (i = 0; i < usr_num && chip_gen == RTW89_CHIP_BE; i++) {
+ user = &rxinfo->user[i];
+ if (!le32_get_bits(user->w0, RTW89_RXINFO_USER_MAC_ID_VALID))
+ continue;
+
+ phy_ppdu->mac_id =
+ le32_get_bits(user->w0, RTW89_RXINFO_USER_MACID);
+ break;
+ }
+
phy_sts = skb->data + RTW89_PPDU_MAC_INFO_SIZE;
phy_sts += usr_num * RTW89_PPDU_MAC_INFO_USR_SIZE;
/* 8-byte alignment */
if (usr_num & BIT(0))
phy_sts += RTW89_PPDU_MAC_INFO_USR_SIZE;
if (rx_cnt_valid)
- phy_sts += RTW89_PPDU_MAC_RX_CNT_SIZE;
+ phy_sts += rx_cnt_size;
phy_sts += plcp_size;
+ if (phy_sts > skb->data + skb->len)
+ return -EINVAL;
+
phy_ppdu->buf = phy_sts;
phy_ppdu->len = skb->data + skb->len - phy_sts;
@@ -1477,14 +1513,24 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev,
const struct rtw89_phy_sts_iehdr *iehdr)
{
- static const u8 physts_ie_len_tab[32] = {
- 16, 32, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN,
- VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN,
- VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32
+ static const u8 physts_ie_len_tabs[RTW89_CHIP_GEN_NUM][32] = {
+ [RTW89_CHIP_AX] = {
+ 16, 32, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN,
+ VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN,
+ VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32
+ },
+ [RTW89_CHIP_BE] = {
+ 32, 40, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN,
+ VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN,
+ VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32
+ },
};
+ const u8 *physts_ie_len_tab;
u16 ie_len;
u8 ie;
+ physts_ie_len_tab = physts_ie_len_tabs[rtwdev->chip->chip_gen];
+
ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
if (physts_ie_len_tab[ie] != VAR_LEN)
ie_len = physts_ie_len_tab[ie];
@@ -1563,9 +1609,17 @@ static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
{
const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
u32 len_from_header;
+ bool physts_valid;
+
+ physts_valid = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_VALID);
+ if (!physts_valid)
+ return -EINVAL;
len_from_header = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_LEN) << 3;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ len_from_header += PHY_STS_HDR_LEN;
+
if (len_from_header != phy_ppdu->len) {
rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "phy ppdu len mismatch\n");
return -EINVAL;
@@ -2052,13 +2106,19 @@ static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev,
.mac_id = desc_info->mac_id};
int ret;
- if (desc_info->mac_info_valid)
- rtw89_core_rx_process_mac_ppdu(rtwdev, skb, &phy_ppdu);
+ if (desc_info->mac_info_valid) {
+ ret = rtw89_core_rx_process_mac_ppdu(rtwdev, skb, &phy_ppdu);
+ if (ret)
+ goto out;
+ }
+
ret = rtw89_core_rx_process_phy_ppdu(rtwdev, &phy_ppdu);
if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_TXRX, "process ppdu failed\n");
+ goto out;
rtw89_core_rx_process_phy_sts(rtwdev, &phy_ppdu);
+
+out:
rtw89_core_rx_pending_skb(rtwdev, &phy_ppdu, desc_info, skb);
dev_kfree_skb_any(skb);
}
@@ -2823,9 +2883,6 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
lockdep_assert_held(&rtwdev->mutex);
- ieee80211_queue_delayed_work(hw, &rtwvif->roc.roc_work,
- msecs_to_jiffies(rtwvif->roc.duration));
-
rtw89_leave_ips_by_hwflags(rtwdev);
rtw89_leave_lps(rtwdev);
rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_ROC);
@@ -2847,6 +2904,9 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH);
ieee80211_ready_on_channel(hw);
+ cancel_delayed_work(&rtwvif->roc.roc_work);
+ ieee80211_queue_delayed_work(hw, &rtwvif->roc.roc_work,
+ msecs_to_jiffies(rtwvif->roc.duration));
}
void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -3069,6 +3129,7 @@ static void rtw89_track_work(struct work_struct *work)
rtw89_phy_tx_path_div_track(rtwdev);
rtw89_phy_antdiv_track(rtwdev);
rtw89_phy_ul_tb_ctrl_track(rtwdev);
+ rtw89_phy_edcca_track(rtwdev);
rtw89_tas_track(rtwdev);
rtw89_chanctx_track(rtwdev);
@@ -3895,10 +3956,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
/* efuse process */
/* pre-config BB/RF, BB reset/RFC reset */
- ret = rtw89_chip_disable_bb_rf(rtwdev);
- if (ret)
- return ret;
- ret = rtw89_chip_enable_bb_rf(rtwdev);
+ ret = rtw89_chip_reset_bb_rf(rtwdev);
if (ret)
return ret;
@@ -4156,17 +4214,18 @@ out:
static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
ret = rtw89_mac_partial_init(rtwdev, false);
if (ret)
return ret;
- ret = rtw89_parse_efuse_map(rtwdev);
+ ret = mac->parse_efuse_map(rtwdev);
if (ret)
return ret;
- ret = rtw89_parse_phycap_map(rtwdev);
+ ret = mac->parse_phycap_map(rtwdev);
if (ret)
return ret;
@@ -4176,6 +4235,8 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
rtw89_core_setup_phycap(rtwdev);
+ rtw89_hci_mac_pre_deinit(rtwdev);
+
rtw89_mac_pwr_off(rtwdev);
return 0;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 8f59461e5..ea6df859b 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -16,6 +16,9 @@ struct rtw89_dev;
struct rtw89_pci_info;
struct rtw89_mac_gen_def;
struct rtw89_phy_gen_def;
+struct rtw89_efuse_block_cfg;
+struct rtw89_fw_txpwr_track_cfg;
+struct rtw89_phy_rfk_log_fmt;
extern const struct ieee80211_ops rtw89_ops;
@@ -37,6 +40,8 @@ extern const struct ieee80211_ops rtw89_ops;
#define RSSI_FACTOR 1
#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
#define RTW89_TX_DIV_RSSI_RAW_TH (2 << RSSI_FACTOR)
+#define DELTA_SWINGIDX_SIZE 30
+
#define RTW89_RADIOTAP_ROOM_HE sizeof(struct ieee80211_radiotap_he)
#define RTW89_RADIOTAP_ROOM_EHT \
(sizeof(struct ieee80211_radiotap_tlv) + \
@@ -103,6 +108,14 @@ enum rtw89_gain_offset {
RTW89_GAIN_OFFSET_5G_LOW,
RTW89_GAIN_OFFSET_5G_MID,
RTW89_GAIN_OFFSET_5G_HIGH,
+ RTW89_GAIN_OFFSET_6G_L0,
+ RTW89_GAIN_OFFSET_6G_L1,
+ RTW89_GAIN_OFFSET_6G_M0,
+ RTW89_GAIN_OFFSET_6G_M1,
+ RTW89_GAIN_OFFSET_6G_H0,
+ RTW89_GAIN_OFFSET_6G_H1,
+ RTW89_GAIN_OFFSET_6G_UH0,
+ RTW89_GAIN_OFFSET_6G_UH1,
RTW89_GAIN_OFFSET_NR,
};
@@ -1693,6 +1706,7 @@ struct rtw89_btc_wl_info {
u8 port_id[RTW89_WIFI_ROLE_MLME_MAX];
u8 rssi_level;
u8 cn_report;
+ u8 coex_mode;
bool scbd_change;
u32 scbd;
@@ -1800,6 +1814,7 @@ struct rtw89_btc_bt_info {
union rtw89_btc_bt_rfk_info_map rfk_info;
u8 raw_info[BTC_BTINFO_MAX]; /* raw bt info from mailbox */
+ u8 rssi_level;
u32 scbd;
u32 feature;
@@ -1816,7 +1831,8 @@ struct rtw89_btc_bt_info {
u32 hi_lna_rx: 1;
u32 scan_rx_low_pri: 1;
u32 scan_info_update: 1;
- u32 rsvd: 20;
+ u32 lna_constrain: 3;
+ u32 rsvd: 17;
};
struct rtw89_btc_cx {
@@ -2492,18 +2508,22 @@ struct rtw89_btc_dm {
u32 noisy_level: 3;
u32 coex_info_map: 8;
u32 bt_only: 1;
- u32 wl_btg_rx: 1;
+ u32 wl_btg_rx: 2;
u32 trx_para_level: 8;
u32 wl_stb_chg: 1;
u32 pta_owner: 1;
+
u32 tdma_instant_excute: 1;
+ u32 wl_btg_rx_rb: 2;
u16 slot_dur[CXST_MAX];
u8 run_reason;
u8 run_action;
+ u8 wl_pre_agc: 2;
u8 wl_lna2: 1;
+ u8 wl_pre_agc_rb: 2;
};
struct rtw89_btc_ctrl {
@@ -2771,6 +2791,20 @@ enum rtw89_rx_frame_type {
RTW89_RX_TYPE_RSVD = 3,
};
+enum rtw89_efuse_block {
+ RTW89_EFUSE_BLOCK_SYS = 0,
+ RTW89_EFUSE_BLOCK_RF = 1,
+ RTW89_EFUSE_BLOCK_HCI_DIG_PCIE_SDIO = 2,
+ RTW89_EFUSE_BLOCK_HCI_DIG_USB = 3,
+ RTW89_EFUSE_BLOCK_HCI_PHY_PCIE = 4,
+ RTW89_EFUSE_BLOCK_HCI_PHY_USB3 = 5,
+ RTW89_EFUSE_BLOCK_HCI_PHY_USB2 = 6,
+ RTW89_EFUSE_BLOCK_ADIE = 7,
+
+ RTW89_EFUSE_BLOCK_NUM,
+ RTW89_EFUSE_BLOCK_IGNORE,
+};
+
struct rtw89_ra_info {
u8 is_dis_ra:1;
/* Bit0 : CCK
@@ -2809,10 +2843,10 @@ struct rtw89_ra_info {
u8 csi_bw:3;
};
-#define RTW89_PPDU_MAX_USR 4
#define RTW89_PPDU_MAC_INFO_USR_SIZE 4
#define RTW89_PPDU_MAC_INFO_SIZE 8
#define RTW89_PPDU_MAC_RX_CNT_SIZE 96
+#define RTW89_PPDU_MAC_RX_CNT_SIZE_V1 128
#define RTW89_MAX_RX_AGG_NUM 64
#define RTW89_MAX_TX_AGG_NUM 128
@@ -3058,6 +3092,7 @@ struct rtw89_hci_ops {
void (*write32)(struct rtw89_dev *rtwdev, u32 addr, u32 data);
int (*mac_pre_init)(struct rtw89_dev *rtwdev);
+ int (*mac_pre_deinit)(struct rtw89_dev *rtwdev);
int (*mac_post_init)(struct rtw89_dev *rtwdev);
int (*deinit)(struct rtw89_dev *rtwdev);
@@ -3112,7 +3147,8 @@ struct rtw89_chip_ops {
const struct rtw89_chan *chan,
enum rtw89_mac_idx mac_idx,
enum rtw89_phy_idx phy_idx);
- int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map);
+ int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block);
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
void (*fem_setup)(struct rtw89_dev *rtwdev);
void (*rfe_gpio)(struct rtw89_dev *rtwdev);
@@ -3261,6 +3297,8 @@ struct rtw89_dle_size {
u16 pge_size;
u16 lnk_pge_num;
u16 unlnk_pge_num;
+ /* for WiFi 7 chips below */
+ u32 srt_ofst;
};
struct rtw89_wde_quota {
@@ -3283,6 +3321,26 @@ struct rtw89_ple_quota {
u16 wd_rel;
u16 cpu_io;
u16 tx_rpt;
+ /* for WiFi 7 chips below */
+ u16 h2d;
+};
+
+struct rtw89_rsvd_quota {
+ u16 mpdu_info_tbl;
+ u16 b0_csi;
+ u16 b1_csi;
+ u16 b0_lmr;
+ u16 b1_lmr;
+ u16 b0_ftm;
+ u16 b1_ftm;
+ u16 b0_smr;
+ u16 b1_smr;
+ u16 others;
+};
+
+struct rtw89_dle_rsvd_size {
+ u32 srt_ofst;
+ u32 size;
};
struct rtw89_dle_mem {
@@ -3293,6 +3351,10 @@ struct rtw89_dle_mem {
const struct rtw89_wde_quota *wde_max_qt;
const struct rtw89_ple_quota *ple_min_qt;
const struct rtw89_ple_quota *ple_max_qt;
+ /* for WiFi 7 chips below */
+ const struct rtw89_rsvd_quota *rsvd_qt;
+ const struct rtw89_dle_rsvd_size *rsvd0_size;
+ const struct rtw89_dle_rsvd_size *rsvd1_size;
};
struct rtw89_reg_def {
@@ -3319,6 +3381,12 @@ struct rtw89_reg5_def {
u32 data;
};
+struct rtw89_reg_imr {
+ u32 addr;
+ u32 clr;
+ u32 set;
+};
+
struct rtw89_phy_table {
const struct rtw89_reg2_def *regs;
u32 n_regs;
@@ -3528,6 +3596,11 @@ struct rtw89_imr_info {
u32 tmac_imr_set;
};
+struct rtw89_imr_table {
+ const struct rtw89_reg_imr *regs;
+ u32 n_regs;
+};
+
struct rtw89_xtal_info {
u32 xcap_reg;
u32 sc_xo_mask;
@@ -3559,6 +3632,22 @@ struct rtw89_dig_regs {
struct rtw89_reg_def p1_s20_pagcugc_en;
};
+struct rtw89_edcca_regs {
+ u32 edcca_level;
+ u32 edcca_mask;
+ u32 edcca_p_mask;
+ u32 ppdu_level;
+ u32 ppdu_mask;
+ u32 rpt_a;
+ u32 rpt_b;
+ u32 rpt_sel;
+ u32 rpt_sel_mask;
+ u32 rpt_sel_be;
+ u32 rpt_sel_be_mask;
+ u32 tx_collision_t2r_st;
+ u32 tx_collision_t2r_st_mask;
+};
+
struct rtw89_phy_ul_tb_info {
bool dyn_tb_tri_en;
u8 def_if_bandedge;
@@ -3619,8 +3708,8 @@ struct rtw89_chip_info {
u32 rsvd_ple_ofst;
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
- u8 wde_qempty_acq_num;
- u8 wde_qempty_mgq_sel;
+ u8 wde_qempty_acq_grpnum;
+ u8 wde_qempty_mgq_grpsel;
u32 rf_base_addr[2];
u8 support_chanctx_num;
u8 support_bands;
@@ -3638,6 +3727,7 @@ struct rtw89_chip_info {
u8 bacam_num;
u8 bacam_dynamic_num;
enum rtw89_bacam_ver bacam_ver;
+ u8 ppdu_max_usr;
u8 sec_ctrl_efuse_size;
u32 physical_efuse_size;
@@ -3647,6 +3737,7 @@ struct rtw89_chip_info {
u32 dav_log_efuse_size;
u32 phycap_addr;
u32 phycap_size;
+ const struct rtw89_efuse_block_cfg *efuse_blocks;
const struct rtw89_pwr_cfg * const *pwr_on_seq;
const struct rtw89_pwr_cfg * const *pwr_off_seq;
@@ -3704,11 +3795,13 @@ struct rtw89_chip_info {
const struct rtw89_reg_def *dcfo_comp;
u8 dcfo_comp_sft;
const struct rtw89_imr_info *imr_info;
+ const struct rtw89_imr_table *imr_dmac_table;
+ const struct rtw89_imr_table *imr_cmac_table;
const struct rtw89_rrsr_cfgs *rrsr_cfgs;
struct rtw89_reg_def bss_clr_vld;
u32 bss_clr_map_reg;
u32 dma_ch_mask;
- u32 edcca_lvl_reg;
+ const struct rtw89_edcca_regs *edcca_regs;
const struct wiphy_wowlan_support *wowlan_stub;
const struct rtw89_xtal_info *xtal_info;
};
@@ -3732,8 +3825,10 @@ enum rtw89_hcifc_mode {
};
struct rtw89_dle_info {
+ const struct rtw89_rsvd_quota *rsvd_qt;
enum rtw89_qta_mode qta_mode;
u16 ple_pg_size;
+ u16 ple_free_pg;
u16 c0_rx_qta;
u16 c1_rx_qta;
};
@@ -3858,6 +3953,8 @@ struct rtw89_fw_elm_info {
struct rtw89_phy_table *bb_gain;
struct rtw89_phy_table *rf_radio[RF_PATH_MAX];
struct rtw89_phy_table *rf_nctl;
+ struct rtw89_fw_txpwr_track_cfg *txpwr_trk;
+ struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
};
struct rtw89_fw_info {
@@ -3977,6 +4074,17 @@ struct rtw89_sub_entity {
struct rtw89_chanctx_cfg *cfg;
};
+struct rtw89_edcca_bak {
+ u8 a;
+ u8 p;
+ u8 ppdu;
+ u8 th_old;
+};
+
+enum rtw89_dm_type {
+ RTW89_DM_DYNAMIC_EDCCA,
+};
+
struct rtw89_hal {
u32 rx_fltr;
u8 cv;
@@ -4001,7 +4109,8 @@ struct rtw89_hal {
bool entity_pause;
enum rtw89_entity_mode entity_mode;
- u32 edcca_bak;
+ struct rtw89_edcca_bak edcca_bak;
+ u32 disabled_dm_bitmap; /* bitmap of enum rtw89_dm_type */
};
#define RTW89_MAX_MAC_ID_NUM 128
@@ -4009,6 +4118,9 @@ struct rtw89_hal {
enum rtw89_flags {
RTW89_FLAG_POWERON,
+ RTW89_FLAG_DMAC_FUNC,
+ RTW89_FLAG_CMAC0_FUNC,
+ RTW89_FLAG_CMAC1_FUNC,
RTW89_FLAG_FW_RDY,
RTW89_FLAG_RUNNING,
RTW89_FLAG_BFEE_MON,
@@ -4312,6 +4424,7 @@ struct rtw89_power_trim_info {
bool pg_pa_bias_trim;
u8 thermal_trim[RF_PATH_MAX];
u8 pa_bias_trim[RF_PATH_MAX];
+ u8 pad_bias_trim[RF_PATH_MAX];
};
struct rtw89_regd {
@@ -4319,9 +4432,12 @@ struct rtw89_regd {
u8 txpwr_regd[RTW89_BAND_NUM];
};
+#define RTW89_REGD_MAX_COUNTRY_NUM U8_MAX
+
struct rtw89_regulatory_info {
const struct rtw89_regd *regd;
enum rtw89_reg_6ghz_power reg_6ghz_power;
+ DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM);
};
enum rtw89_ifs_clm_application {
@@ -4796,6 +4912,11 @@ static inline void rtw89_hci_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
return rtwdev->hci.ops->tx_kick_off(rtwdev, txch);
}
+static inline int rtw89_hci_mac_pre_deinit(struct rtw89_dev *rtwdev)
+{
+ return rtwdev->hci.ops->mac_pre_deinit(rtwdev);
+}
+
static inline void rtw89_hci_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
bool drop)
{
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index a3f795d24..44829a148 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -3330,13 +3330,14 @@ out:
static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_cpuio_ctrl ctrl_para = {0};
u16 pkt_id;
int ret;
rtw89_leave_ps_mode(rtwdev);
- ret = rtw89_mac_dle_buf_req(rtwdev, 0x20, true, &pkt_id);
+ ret = mac->dle_buf_req(rtwdev, 0x20, true, &pkt_id);
if (ret)
return ret;
@@ -3348,7 +3349,7 @@ static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS;
ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT;
- if (rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true))
+ if (mac->set_cpuio(rtwdev, &ctrl_para, true))
return -EFAULT;
return 0;
@@ -3770,6 +3771,58 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
return 0;
}
+#define DM_INFO(type) {RTW89_DM_ ## type, #type}
+
+static const struct rtw89_disabled_dm_info {
+ enum rtw89_dm_type type;
+ const char *name;
+} rtw89_disabled_dm_infos[] = {
+ DM_INFO(DYNAMIC_EDCCA),
+};
+
+static int
+rtw89_debug_priv_disable_dm_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ const struct rtw89_disabled_dm_info *info;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 disabled;
+ int i;
+
+ seq_printf(m, "Disabled DM: 0x%x\n", hal->disabled_dm_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(rtw89_disabled_dm_infos); i++) {
+ info = &rtw89_disabled_dm_infos[i];
+ disabled = BIT(info->type) & hal->disabled_dm_bitmap;
+
+ seq_printf(m, "[%d] %s: %c\n", info->type, info->name,
+ disabled ? 'X' : 'O');
+ }
+
+ return 0;
+}
+
+static ssize_t
+rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 conf;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &conf);
+ if (ret)
+ return -EINVAL;
+
+ hal->disabled_dm_bitmap = conf;
+
+ return count;
+}
+
static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
.cb_read = rtw89_debug_priv_read_reg_get,
.cb_write = rtw89_debug_priv_read_reg_select,
@@ -3845,6 +3898,11 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
.cb_read = rtw89_debug_priv_stations_get,
};
+static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
+ .cb_read = rtw89_debug_priv_disable_dm_get,
+ .cb_write = rtw89_debug_priv_disable_dm_set,
+};
+
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
@@ -3885,13 +3943,13 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_w(fw_log_manual);
rtw89_debugfs_add_r(phy_info);
rtw89_debugfs_add_r(stations);
+ rtw89_debugfs_add_rw(disable_dm);
}
#endif
#ifdef CONFIG_RTW89_DEBUGMSG
-void __rtw89_debug(struct rtw89_dev *rtwdev,
- enum rtw89_debug_mask mask,
- const char *fmt, ...)
+void rtw89_debug(struct rtw89_dev *rtwdev, enum rtw89_debug_mask mask,
+ const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -3907,5 +3965,5 @@ void __rtw89_debug(struct rtw89_dev *rtwdev,
va_end(args);
}
-EXPORT_SYMBOL(__rtw89_debug);
+EXPORT_SYMBOL(rtw89_debug);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index 079269bb5..800ea5987 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -29,6 +29,8 @@ enum rtw89_debug_mask {
RTW89_DBG_WOW = BIT(18),
RTW89_DBG_UL_TB = BIT(19),
RTW89_DBG_CHAN = BIT(20),
+ RTW89_DBG_ACPI = BIT(21),
+ RTW89_DBG_EDCCA = BIT(22),
RTW89_DBG_UNEXP = BIT(31),
};
@@ -57,12 +59,10 @@ static inline void rtw89_debugfs_init(struct rtw89_dev *rtwdev) {}
#ifdef CONFIG_RTW89_DEBUGMSG
extern unsigned int rtw89_debug_mask;
-#define rtw89_debug(rtwdev, a...) __rtw89_debug(rtwdev, ##a)
__printf(3, 4)
-void __rtw89_debug(struct rtw89_dev *rtwdev,
- enum rtw89_debug_mask mask,
- const char *fmt, ...);
+void rtw89_debug(struct rtw89_dev *rtwdev, enum rtw89_debug_mask mask,
+ const char *fmt, ...);
static inline void rtw89_hex_dump(struct rtw89_dev *rtwdev,
enum rtw89_debug_mask mask,
const char *prefix_str,
@@ -73,6 +73,12 @@ static inline void rtw89_hex_dump(struct rtw89_dev *rtwdev,
print_hex_dump_bytes(prefix_str, DUMP_PREFIX_OFFSET, buf, len);
}
+
+static inline bool rtw89_debug_is_enabled(struct rtw89_dev *rtwdev,
+ enum rtw89_debug_mask mask)
+{
+ return !!(rtw89_debug_mask & mask);
+}
#else
static inline void rtw89_debug(struct rtw89_dev *rtwdev,
enum rtw89_debug_mask mask,
@@ -81,6 +87,11 @@ static inline void rtw89_hex_dump(struct rtw89_dev *rtwdev,
enum rtw89_debug_mask mask,
const char *prefix_str,
const void *buf, size_t len) {}
+static inline bool rtw89_debug_is_enabled(struct rtw89_dev *rtwdev,
+ enum rtw89_debug_mask mask)
+{
+ return false;
+}
#endif
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.c b/drivers/net/wireless/realtek/rtw89/efuse.c
index 2aaf4d013..e1236079a 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse.c
@@ -114,6 +114,11 @@ static int rtw89_dump_physical_efuse_map_ddv(struct rtw89_dev *rtwdev, u8 *map,
return 0;
}
+int rtw89_cnv_efuse_state_ax(struct rtw89_dev *rtwdev, bool idle)
+{
+ return 0;
+}
+
static int rtw89_dump_physical_efuse_map_dav(struct rtw89_dev *rtwdev, u8 *map,
u32 dump_addr, u32 dump_size)
{
@@ -231,7 +236,7 @@ static int rtw89_dump_logical_efuse_map(struct rtw89_dev *rtwdev, u8 *phy_map,
return 0;
}
-int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
+int rtw89_parse_efuse_map_ax(struct rtw89_dev *rtwdev)
{
u32 phy_size = rtwdev->chip->physical_efuse_size;
u32 log_size = rtwdev->chip->logical_efuse_size;
@@ -286,7 +291,7 @@ int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, full_log_size);
- ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map);
+ ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map, RTW89_EFUSE_BLOCK_IGNORE);
if (ret) {
rtw89_warn(rtwdev, "failed to read efuse map\n");
goto out_free;
@@ -300,7 +305,7 @@ out_free:
return ret;
}
-int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev)
+int rtw89_parse_phycap_map_ax(struct rtw89_dev *rtwdev)
{
u32 phycap_addr = rtwdev->chip->phycap_addr;
u32 phycap_size = rtwdev->chip->phycap_size;
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h
index 79071aff2..5c6787179 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.h
+++ b/drivers/net/wireless/realtek/rtw89/efuse.h
@@ -7,8 +7,21 @@
#include "core.h"
-int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev);
-int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev);
+#define RTW89_EFUSE_BLOCK_ID_MASK GENMASK(31, 16)
+#define RTW89_EFUSE_BLOCK_SIZE_MASK GENMASK(15, 0)
+#define RTW89_EFUSE_MAX_BLOCK_SIZE 0x10000
+
+struct rtw89_efuse_block_cfg {
+ u32 offset;
+ u32 size;
+};
+
+int rtw89_parse_efuse_map_ax(struct rtw89_dev *rtwdev);
+int rtw89_parse_phycap_map_ax(struct rtw89_dev *rtwdev);
+int rtw89_cnv_efuse_state_ax(struct rtw89_dev *rtwdev, bool idle);
+int rtw89_parse_efuse_map_be(struct rtw89_dev *rtwdev);
+int rtw89_parse_phycap_map_be(struct rtw89_dev *rtwdev);
+int rtw89_cnv_efuse_state_be(struct rtw89_dev *rtwdev, bool idle);
int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *efv);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/efuse_be.c b/drivers/net/wireless/realtek/rtw89/efuse_be.c
new file mode 100644
index 000000000..8e8b7cd31
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/efuse_be.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#include "debug.h"
+#include "efuse.h"
+#include "mac.h"
+#include "reg.h"
+
+static void rtw89_enable_efuse_pwr_cut_ddv_be(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool aphy_patch = true;
+
+ if (chip->chip_id == RTL8922A && hal->cv == CHIP_CAV)
+ aphy_patch = false;
+
+ rtw89_write8_set(rtwdev, R_BE_PMC_DBG_CTRL2, B_BE_SYSON_DIS_PMCR_BE_WRMSK);
+
+ if (aphy_patch) {
+ rtw89_write16_set(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_S);
+ mdelay(1);
+ rtw89_write16_set(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_B);
+ rtw89_write16_clr(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_ISO_EB2CORE);
+ }
+
+ rtw89_write32_set(rtwdev, R_BE_EFUSE_CTRL_2_V1, B_BE_EF_BURST);
+}
+
+static void rtw89_disable_efuse_pwr_cut_ddv_be(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool aphy_patch = true;
+
+ if (chip->chip_id == RTL8922A && hal->cv == CHIP_CAV)
+ aphy_patch = false;
+
+ if (aphy_patch) {
+ rtw89_write16_set(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_ISO_EB2CORE);
+ rtw89_write16_clr(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_B);
+ mdelay(1);
+ rtw89_write16_clr(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_S);
+ }
+
+ rtw89_write8_clr(rtwdev, R_BE_PMC_DBG_CTRL2, B_BE_SYSON_DIS_PMCR_BE_WRMSK);
+ rtw89_write32_clr(rtwdev, R_BE_EFUSE_CTRL_2_V1, B_BE_EF_BURST);
+}
+
+static int rtw89_dump_physical_efuse_map_ddv_be(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
+{
+ u32 efuse_ctl;
+ u32 addr;
+ u32 data;
+ int ret;
+
+ if (!IS_ALIGNED(dump_addr, 4) || !IS_ALIGNED(dump_size, 4)) {
+ rtw89_err(rtwdev, "Efuse addr 0x%x or size 0x%x not aligned\n",
+ dump_addr, dump_size);
+ return -EINVAL;
+ }
+
+ rtw89_enable_efuse_pwr_cut_ddv_be(rtwdev);
+
+ for (addr = dump_addr; addr < dump_addr + dump_size; addr += 4, map += 4) {
+ efuse_ctl = u32_encode_bits(addr, B_BE_EF_ADDR_MASK);
+ rtw89_write32(rtwdev, R_BE_EFUSE_CTRL, efuse_ctl & ~B_BE_EF_RDY);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, efuse_ctl,
+ efuse_ctl & B_BE_EF_RDY, 1, 1000000,
+ true, rtwdev, R_BE_EFUSE_CTRL);
+ if (ret)
+ return -EBUSY;
+
+ data = rtw89_read32(rtwdev, R_BE_EFUSE_CTRL_1_V1);
+ *((__le32 *)map) = cpu_to_le32(data);
+ }
+
+ rtw89_disable_efuse_pwr_cut_ddv_be(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_dump_physical_efuse_map_dav_be(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
+{
+ u32 addr;
+ u8 val8;
+ int err;
+ int ret;
+
+ for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0x40,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_LOW_ADDR, addr & 0xff,
+ XTAL_SI_LOW_ADDR_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, addr >> 8,
+ XTAL_SI_HIGH_ADDR_MASK);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0,
+ XTAL_SI_MODE_SEL_MASK);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout_atomic(rtw89_mac_read_xtal_si, err,
+ !err && (val8 & XTAL_SI_RDY),
+ 1, 10000, false,
+ rtwdev, XTAL_SI_CTRL, &val8);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to read dav efuse\n");
+ return ret;
+ }
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_READ_VAL, &val8);
+ if (ret)
+ return ret;
+ *map++ = val8;
+ }
+
+ return 0;
+}
+
+int rtw89_cnv_efuse_state_be(struct rtw89_dev *rtwdev, bool idle)
+{
+ u32 val;
+ int ret = 0;
+
+ if (idle) {
+ rtw89_write32_set(rtwdev, R_BE_WL_BT_PWR_CTRL, B_BE_BT_DISN_EN);
+ } else {
+ rtw89_write32_clr(rtwdev, R_BE_WL_BT_PWR_CTRL, B_BE_BT_DISN_EN);
+
+ ret = read_poll_timeout(rtw89_read32_mask, val,
+ val == MAC_AX_SYS_ACT, 50, 5000,
+ false, rtwdev, R_BE_IC_PWR_STATE,
+ B_BE_WHOLE_SYS_PWR_STE_MASK);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to convert efuse state\n");
+ }
+
+ return ret;
+}
+
+static int rtw89_dump_physical_efuse_map_be(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size, bool dav)
+{
+ int ret;
+
+ if (!map || dump_size == 0)
+ return 0;
+
+ rtw89_cnv_efuse_state_be(rtwdev, false);
+
+ if (dav) {
+ ret = rtw89_dump_physical_efuse_map_dav_be(rtwdev, map,
+ dump_addr, dump_size);
+ if (ret)
+ return ret;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "phy_map dav: ", map, dump_size);
+ } else {
+ ret = rtw89_dump_physical_efuse_map_ddv_be(rtwdev, map,
+ dump_addr, dump_size);
+ if (ret)
+ return ret;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "phy_map ddv: ", map, dump_size);
+ }
+
+ rtw89_cnv_efuse_state_be(rtwdev, true);
+
+ return 0;
+}
+
+#define EFUSE_HDR_CONST_MASK GENMASK(23, 20)
+#define EFUSE_HDR_PAGE_MASK GENMASK(19, 17)
+#define EFUSE_HDR_OFFSET_MASK GENMASK(16, 4)
+#define EFUSE_HDR_OFFSET_DAV_MASK GENMASK(11, 4)
+#define EFUSE_HDR_WORD_EN_MASK GENMASK(3, 0)
+
+#define invalid_efuse_header_be(hdr1, hdr2, hdr3) \
+ ((hdr1) == 0xff || (hdr2) == 0xff || (hdr3) == 0xff)
+#define invalid_efuse_content_be(word_en, i) \
+ (((word_en) & BIT(i)) != 0x0)
+#define get_efuse_blk_idx_be(hdr1, hdr2, hdr3) \
+ (((hdr1) << 16) | ((hdr2) << 8) | (hdr3))
+#define block_idx_to_logical_idx_be(blk_idx, i) \
+ (((blk_idx) << 3) + ((i) << 1))
+
+#define invalid_efuse_header_dav_be(hdr1, hdr2) \
+ ((hdr1) == 0xff || (hdr2) == 0xff)
+#define get_efuse_blk_idx_dav_be(hdr1, hdr2) \
+ (((hdr1) << 8) | (hdr2))
+
+static int rtw89_eeprom_parser_be(struct rtw89_dev *rtwdev,
+ const u8 *phy_map, u32 phy_size, u8 *log_map,
+ const struct rtw89_efuse_block_cfg *efuse_block)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ enum rtw89_efuse_block blk_page, page;
+ u32 size = efuse_block->size;
+ u32 phy_idx, log_idx;
+ u32 hdr, page_offset;
+ u8 hdr1, hdr2, hdr3;
+ u8 i, val0, val1;
+ u32 min, max;
+ u16 blk_idx;
+ u8 word_en;
+
+ page = u32_get_bits(efuse_block->offset, RTW89_EFUSE_BLOCK_ID_MASK);
+ page_offset = u32_get_bits(efuse_block->offset, RTW89_EFUSE_BLOCK_SIZE_MASK);
+
+ min = ALIGN_DOWN(page_offset, 2);
+ max = ALIGN(page_offset + size, 2);
+
+ memset(log_map, 0xff, size);
+
+ phy_idx = chip->sec_ctrl_efuse_size;
+
+ do {
+ if (page == RTW89_EFUSE_BLOCK_ADIE) {
+ hdr1 = phy_map[phy_idx];
+ hdr2 = phy_map[phy_idx + 1];
+ if (invalid_efuse_header_dav_be(hdr1, hdr2))
+ break;
+
+ phy_idx += 2;
+
+ hdr = get_efuse_blk_idx_dav_be(hdr1, hdr2);
+
+ blk_page = RTW89_EFUSE_BLOCK_ADIE;
+ blk_idx = u32_get_bits(hdr, EFUSE_HDR_OFFSET_DAV_MASK);
+ word_en = u32_get_bits(hdr, EFUSE_HDR_WORD_EN_MASK);
+ } else {
+ hdr1 = phy_map[phy_idx];
+ hdr2 = phy_map[phy_idx + 1];
+ hdr3 = phy_map[phy_idx + 2];
+ if (invalid_efuse_header_be(hdr1, hdr2, hdr3))
+ break;
+
+ phy_idx += 3;
+
+ hdr = get_efuse_blk_idx_be(hdr1, hdr2, hdr3);
+
+ blk_page = u32_get_bits(hdr, EFUSE_HDR_PAGE_MASK);
+ blk_idx = u32_get_bits(hdr, EFUSE_HDR_OFFSET_MASK);
+ word_en = u32_get_bits(hdr, EFUSE_HDR_WORD_EN_MASK);
+ }
+
+ if (blk_idx >= RTW89_EFUSE_MAX_BLOCK_SIZE >> 3) {
+ rtw89_err(rtwdev, "[ERR]efuse idx:0x%X\n", phy_idx - 3);
+ rtw89_err(rtwdev, "[ERR]read hdr:0x%X\n", hdr);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (invalid_efuse_content_be(word_en, i))
+ continue;
+
+ if (phy_idx >= phy_size - 1)
+ return -EINVAL;
+
+ log_idx = block_idx_to_logical_idx_be(blk_idx, i);
+
+ if (blk_page == page && log_idx >= min && log_idx < max) {
+ val0 = phy_map[phy_idx];
+ val1 = phy_map[phy_idx + 1];
+
+ if (log_idx == min && page_offset > min) {
+ log_map[log_idx - page_offset + 1] = val1;
+ } else if (log_idx + 2 == max &&
+ page_offset + size < max) {
+ log_map[log_idx - page_offset] = val0;
+ } else {
+ log_map[log_idx - page_offset] = val0;
+ log_map[log_idx - page_offset + 1] = val1;
+ }
+ }
+ phy_idx += 2;
+ }
+ } while (phy_idx < phy_size);
+
+ return 0;
+}
+
+static int rtw89_parse_logical_efuse_block_be(struct rtw89_dev *rtwdev,
+ const u8 *phy_map, u32 phy_size,
+ enum rtw89_efuse_block block)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_efuse_block_cfg *efuse_block;
+ u8 *log_map;
+ int ret;
+
+ efuse_block = &chip->efuse_blocks[block];
+
+ log_map = kmalloc(efuse_block->size, GFP_KERNEL);
+ if (!log_map)
+ return -ENOMEM;
+
+ ret = rtw89_eeprom_parser_be(rtwdev, phy_map, phy_size, log_map, efuse_block);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse logical block %d\n", block);
+ goto out_free;
+ }
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, efuse_block->size);
+
+ ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map, block);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to read efuse map\n");
+ goto out_free;
+ }
+
+out_free:
+ kfree(log_map);
+
+ return ret;
+}
+
+int rtw89_parse_efuse_map_be(struct rtw89_dev *rtwdev)
+{
+ u32 phy_size = rtwdev->chip->physical_efuse_size;
+ u32 dav_phy_size = rtwdev->chip->dav_phy_efuse_size;
+ enum rtw89_efuse_block block;
+ u8 *phy_map = NULL;
+ u8 *dav_phy_map = NULL;
+ int ret;
+
+ if (rtw89_read16(rtwdev, R_BE_SYS_WL_EFUSE_CTRL) & B_BE_AUTOLOAD_SUS)
+ rtwdev->efuse.valid = true;
+ else
+ rtw89_warn(rtwdev, "failed to check efuse autoload\n");
+
+ phy_map = kmalloc(phy_size, GFP_KERNEL);
+ if (dav_phy_size)
+ dav_phy_map = kmalloc(dav_phy_size, GFP_KERNEL);
+
+ if (!phy_map || (dav_phy_size && !dav_phy_map)) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ ret = rtw89_dump_physical_efuse_map_be(rtwdev, phy_map, 0, phy_size, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse physical map\n");
+ goto out_free;
+ }
+ ret = rtw89_dump_physical_efuse_map_be(rtwdev, dav_phy_map, 0, dav_phy_size, true);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse dav physical map\n");
+ goto out_free;
+ }
+
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
+ block = RTW89_EFUSE_BLOCK_HCI_DIG_USB;
+ else
+ block = RTW89_EFUSE_BLOCK_HCI_DIG_PCIE_SDIO;
+
+ ret = rtw89_parse_logical_efuse_block_be(rtwdev, phy_map, phy_size, block);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to parse efuse logic block %d\n",
+ RTW89_EFUSE_BLOCK_HCI_DIG_PCIE_SDIO);
+ goto out_free;
+ }
+
+ ret = rtw89_parse_logical_efuse_block_be(rtwdev, phy_map, phy_size,
+ RTW89_EFUSE_BLOCK_RF);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to parse efuse logic block %d\n",
+ RTW89_EFUSE_BLOCK_RF);
+ goto out_free;
+ }
+
+out_free:
+ kfree(dav_phy_map);
+ kfree(phy_map);
+
+ return ret;
+}
+
+int rtw89_parse_phycap_map_be(struct rtw89_dev *rtwdev)
+{
+ u32 phycap_addr = rtwdev->chip->phycap_addr;
+ u32 phycap_size = rtwdev->chip->phycap_size;
+ u8 *phycap_map = NULL;
+ int ret = 0;
+
+ if (!phycap_size)
+ return 0;
+
+ phycap_map = kmalloc(phycap_size, GFP_KERNEL);
+ if (!phycap_map)
+ return -ENOMEM;
+
+ ret = rtw89_dump_physical_efuse_map_be(rtwdev, phycap_map,
+ phycap_addr, phycap_size, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump phycap map\n");
+ goto out_free;
+ }
+
+ ret = rtwdev->chip->ops->read_phycap(rtwdev, phycap_map);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to read phycap map\n");
+ goto out_free;
+ }
+
+out_free:
+ kfree(phycap_map);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 313ed4c45..09684cea9 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -401,10 +401,14 @@ int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
const union rtw89_fw_element_arg arg)
{
enum rtw89_fw_type type = arg.fw_type;
+ struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_fw_suit *fw_suit;
+ if (hal->cv != elm->u.bbmcu.cv)
+ return 1; /* ignore this element */
+
fw_suit = rtw89_fw_suit_get(rtwdev, type);
- fw_suit->data = elm->u.common.contents;
+ fw_suit->data = elm->u.bbmcu.contents;
fw_suit->size = le32_to_cpu(elm->size);
return rtw89_fw_update_ver(rtwdev, type, fw_suit);
@@ -453,6 +457,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -658,6 +663,97 @@ setup:
return 0;
}
+static
+int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_element_hdr *elm,
+ const union rtw89_fw_element_arg arg)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 needed_bitmap = 0;
+ u32 offset = 0;
+ int subband;
+ u32 bitmap;
+ int type;
+
+ if (chip->support_bands & BIT(NL80211_BAND_6GHZ))
+ needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ;
+ if (chip->support_bands & BIT(NL80211_BAND_5GHZ))
+ needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ;
+ if (chip->support_bands & BIT(NL80211_BAND_2GHZ))
+ needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ;
+
+ bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
+
+ if ((bitmap & needed_bitmap) != needed_bitmap) {
+ rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n",
+ needed_bitmap, bitmap);
+ return -ENOENT;
+ }
+
+ elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL);
+ if (!elm_info->txpwr_trk)
+ return -ENOMEM;
+
+ for (type = 0; bitmap; type++, bitmap >>= 1) {
+ if (!(bitmap & BIT(0)))
+ continue;
+
+ if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START &&
+ type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX)
+ subband = 4;
+ else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START &&
+ type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX)
+ subband = 3;
+ else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START &&
+ type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX)
+ subband = 1;
+ else
+ break;
+
+ elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset];
+
+ offset += subband;
+ if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n",
+ offset, le32_to_cpu(elm->size));
+ kfree(elm_info->txpwr_trk);
+ elm_info->txpwr_trk = NULL;
+
+ return -EFAULT;
+}
+
+static
+int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_element_hdr *elm,
+ const union rtw89_fw_element_arg arg)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ u8 rfk_id;
+
+ if (elm_info->rfk_log_fmt)
+ goto allocated;
+
+ elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL);
+ if (!elm_info->rfk_log_fmt)
+ return 1; /* this is an optional element, so just ignore this */
+
+allocated:
+ rfk_id = elm->u.rfk_log_fmt.rfk_id;
+ if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM)
+ return 1;
+
+ elm_info->rfk_log_fmt->elm[rfk_id] = elm;
+
+ return 0;
+}
+
static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
@@ -710,6 +806,12 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
rtw89_fw_recognize_txpwr_from_elm,
{ .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL,
},
+ [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = {
+ rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK",
+ },
+ [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
+ rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
+ },
};
int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
@@ -750,6 +852,8 @@ int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
goto next;
ret = handler->fn(rtwdev, hdr, handler->arg);
+ if (ret == 1) /* ignore this element */
+ goto next;
if (ret)
return ret;
@@ -956,16 +1060,24 @@ static int rtw89_fw_download_main(struct rtw89_dev *rtwdev,
static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
{
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+ u32 addr = R_AX_DBG_PORT_SEL;
u32 val32;
u16 index;
+ if (chip_gen == RTW89_CHIP_BE) {
+ addr = R_BE_WLCPU_PORT_PC;
+ goto dump;
+ }
+
rtw89_write32(rtwdev, R_AX_DBG_CTRL,
FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
+dump:
for (index = 0; index < 15; index++) {
- val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
+ val32 = rtw89_read32(rtwdev, addr);
rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
fsleep(10);
}
@@ -1135,6 +1247,9 @@ static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
+
+ kfree(elm_info->txpwr_trk);
+ kfree(elm_info->rfk_log_fmt);
}
void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
@@ -2215,6 +2330,41 @@ fail:
return ret;
}
+int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en)
+{
+ struct rtw89_h2c_notify_dbcc *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_notify_dbcc *)skb->data;
+
+ h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
+ H2C_FUNC_NOTIFY_DBCC, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause)
{
@@ -3451,6 +3601,8 @@ static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
return false;
case RTW89_C2H_CAT_MAC:
return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
+ case RTW89_C2H_CAT_OUTSRC:
+ return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
}
}
@@ -3867,6 +4019,8 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
if (info->channel_6ghz &&
ch_info->pri_ch != info->channel_6ghz)
continue;
+ else if (info->channel_6ghz && probe_count != 0)
+ ch_info->period += RTW89_CHANNEL_TIME_6G;
ch_info->pkt_id[probe_count++] = info->id;
if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
break;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index d4db9ab0b..01016588b 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -1685,6 +1685,12 @@ static inline void SET_JOININFO_SELF_ROLE(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30));
}
+struct rtw89_h2c_notify_dbcc {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_NOTIFY_DBCC_EN BIT(0)
+
static inline void SET_GENERAL_PKT_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
@@ -3426,6 +3432,8 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ = 15,
RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT = 16,
RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU = 17,
+ RTW89_FW_ELEMENT_ID_TXPWR_TRK = 18,
+ RTW89_FW_ELEMENT_ID_RFKLOG_FMT = 19,
RTW89_FW_ELEMENT_ID_NUM,
};
@@ -3446,6 +3454,7 @@ enum rtw89_fw_element_id {
BIT(RTW89_FW_ELEMENT_ID_RADIO_A) | \
BIT(RTW89_FW_ELEMENT_ID_RADIO_B) | \
BIT(RTW89_FW_ELEMENT_ID_RF_NCTL) | \
+ BIT(RTW89_FW_ELEMENT_ID_TXPWR_TRK) | \
BITS_OF_RTW89_TXPWR_FW_ELEMENTS)
struct __rtw89_fw_txpwr_element {
@@ -3457,6 +3466,59 @@ struct __rtw89_fw_txpwr_element {
u8 content[];
} __packed;
+enum rtw89_fw_txpwr_trk_type {
+ __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START = 0,
+ RTW89_FW_TXPWR_TRK_TYPE_6GB_N = 0,
+ RTW89_FW_TXPWR_TRK_TYPE_6GB_P = 1,
+ RTW89_FW_TXPWR_TRK_TYPE_6GA_N = 2,
+ RTW89_FW_TXPWR_TRK_TYPE_6GA_P = 3,
+ __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX = 3,
+
+ __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START = 4,
+ RTW89_FW_TXPWR_TRK_TYPE_5GB_N = 4,
+ RTW89_FW_TXPWR_TRK_TYPE_5GB_P = 5,
+ RTW89_FW_TXPWR_TRK_TYPE_5GA_N = 6,
+ RTW89_FW_TXPWR_TRK_TYPE_5GA_P = 7,
+ __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX = 7,
+
+ __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START = 8,
+ RTW89_FW_TXPWR_TRK_TYPE_2GB_N = 8,
+ RTW89_FW_TXPWR_TRK_TYPE_2GB_P = 9,
+ RTW89_FW_TXPWR_TRK_TYPE_2GA_N = 10,
+ RTW89_FW_TXPWR_TRK_TYPE_2GA_P = 11,
+ RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_B_N = 12,
+ RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_B_P = 13,
+ RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_N = 14,
+ RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_P = 15,
+ __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX = 15,
+
+ RTW89_FW_TXPWR_TRK_TYPE_NR,
+};
+
+struct rtw89_fw_txpwr_track_cfg {
+ const s8 (*delta[RTW89_FW_TXPWR_TRK_TYPE_NR])[DELTA_SWINGIDX_SIZE];
+};
+
+#define RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ \
+ (BIT(RTW89_FW_TXPWR_TRK_TYPE_6GB_N) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_6GB_P) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_6GA_N) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_6GA_P))
+#define RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ \
+ (BIT(RTW89_FW_TXPWR_TRK_TYPE_5GB_N) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_5GB_P) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_5GA_N) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_5GA_P))
+#define RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ \
+ (BIT(RTW89_FW_TXPWR_TRK_TYPE_2GB_N) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_2GB_P) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_2GA_N) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_2GA_P) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_B_N) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_B_P) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_N) | \
+ BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_P))
+
struct rtw89_fw_element_hdr {
__le32 id; /* enum rtw89_fw_element_id */
__le32 size; /* exclude header size */
@@ -3477,6 +3539,23 @@ struct rtw89_fw_element_hdr {
__le32 data;
} __packed regs[];
} __packed reg2;
+ struct {
+ u8 cv;
+ u8 priv[7];
+ u8 contents[];
+ } __packed bbmcu;
+ struct {
+ __le32 bitmap; /* bitmap of enum rtw89_fw_txpwr_trk_type */
+ __le32 rsvd;
+ s8 contents[][DELTA_SWINGIDX_SIZE];
+ } __packed txpwr_trk;
+ struct {
+ u8 nr;
+ u8 rsvd[3];
+ u8 rfk_id; /* enum rtw89_phy_c2h_rfk_log_func */
+ u8 rsvd1[3];
+ __le16 offset[];
+ } __packed rfk_log_fmt;
struct __rtw89_fw_txpwr_element txpwr;
} __packed u;
} __packed;
@@ -3577,6 +3656,7 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_CL_MAC_MEDIA_RPT 0x8
#define H2C_FUNC_MAC_JOININFO 0x0
#define H2C_FUNC_MAC_FWROLE_MAINTAIN 0x4
+#define H2C_FUNC_NOTIFY_DBCC 0x5
/* CLASS 9 - FW offload */
#define H2C_CL_MAC_FW_OFLD 0x9
@@ -3649,9 +3729,78 @@ struct rtw89_fw_h2c_rf_get_mccch {
__le32 current_band_type;
} __packed;
-#define RTW89_FW_RSVD_PLE_SIZE 0x800
+enum rtw89_rf_log_type {
+ RTW89_RF_RUN_LOG = 0,
+ RTW89_RF_RPT_LOG = 1,
+};
-#define RTW89_WCPU_BASE_MASK GENMASK(27, 0)
+struct rtw89_c2h_rf_log_hdr {
+ u8 type; /* enum rtw89_rf_log_type */
+ __le16 len;
+ u8 content[];
+} __packed;
+
+struct rtw89_c2h_rf_run_log {
+ __le32 fmt_idx;
+ __le32 arg[4];
+} __packed;
+
+struct rtw89_c2h_rf_dpk_rpt_log {
+ u8 ver;
+ u8 idx[2];
+ u8 band[2];
+ u8 bw[2];
+ u8 ch[2];
+ u8 path_ok[2];
+ u8 txagc[2];
+ u8 ther[2];
+ u8 gs[2];
+ u8 dc_i[4];
+ u8 dc_q[4];
+ u8 corr_val[2];
+ u8 corr_idx[2];
+ u8 is_timeout[2];
+ u8 rxbb_ov[2];
+ u8 rsvd;
+} __packed;
+
+struct rtw89_c2h_rf_dack_rpt_log {
+ u8 fwdack_ver;
+ u8 fwdack_rpt_ver;
+ u8 msbk_d[2][2][16];
+ u8 dadck_d[2][2];
+ u8 cdack_d[2][2][2];
+ __le16 addck2_d[2][2][2];
+ u8 adgaink_d[2][2];
+ __le16 biask_d[2][2];
+ u8 addck_timeout;
+ u8 cdack_timeout;
+ u8 dadck_timeout;
+ u8 msbk_timeout;
+ u8 adgaink_timeout;
+ u8 dack_fail;
+} __packed;
+
+struct rtw89_c2h_rf_rxdck_rpt_log {
+ u8 ver;
+ u8 band[2];
+ u8 bw[2];
+ u8 ch[2];
+ u8 timeout[2];
+} __packed;
+
+struct rtw89_c2h_rf_txgapk_rpt_log {
+ __le32 r0x8010[2];
+ __le32 chk_cnt;
+ u8 track_d[2][17];
+ u8 power_d[2][17];
+ u8 is_txgapk_ok;
+ u8 chk_id;
+ u8 ver;
+ u8 rsv1;
+} __packed;
+
+#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
#define RTW89_VALID_FW_BACKTRACE_SIZE(_size) \
@@ -3704,6 +3853,7 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
enum rtw89_upd_mode upd_mode);
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, bool dis_conn);
+int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en);
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause);
int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index d0c7de4e8..c485ef2cc 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -5,6 +5,7 @@
#include "cam.h"
#include "chan.h"
#include "debug.h"
+#include "efuse.h"
#include "fw.h"
#include "mac.h"
#include "pci.h"
@@ -56,8 +57,8 @@ static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset,
return rtw89_read32(rtwdev, mac->indir_access_addr);
}
-int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx,
- enum rtw89_mac_hwmod_sel sel)
+static int rtw89_mac_check_mac_en_ax(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_mac_hwmod_sel sel)
{
u32 val, r_val;
@@ -112,8 +113,7 @@ int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val)
return ret;
}
-static
-int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl)
+int rtw89_mac_dle_dfi_cfg(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl)
{
u32 ctrl_reg, data_reg, ctrl_data;
u32 val;
@@ -153,8 +153,8 @@ int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl)
return 0;
}
-static int dle_dfi_quota(struct rtw89_dev *rtwdev,
- struct rtw89_mac_dle_dfi_quota *quota)
+int rtw89_mac_dle_dfi_quota_cfg(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_dle_dfi_quota *quota)
{
struct rtw89_mac_dle_dfi_ctrl ctrl;
int ret;
@@ -162,9 +162,9 @@ static int dle_dfi_quota(struct rtw89_dev *rtwdev,
ctrl.type = quota->dle_type;
ctrl.target = DLE_DFI_TYPE_QUOTA;
ctrl.addr = quota->qtaid;
- ret = dle_dfi_ctrl(rtwdev, &ctrl);
+ ret = rtw89_mac_dle_dfi_cfg(rtwdev, &ctrl);
if (ret) {
- rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret);
+ rtw89_warn(rtwdev, "[ERR] dle dfi quota %d\n", ret);
return ret;
}
@@ -173,8 +173,8 @@ static int dle_dfi_quota(struct rtw89_dev *rtwdev,
return 0;
}
-static int dle_dfi_qempty(struct rtw89_dev *rtwdev,
- struct rtw89_mac_dle_dfi_qempty *qempty)
+int rtw89_mac_dle_dfi_qempty_cfg(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_dle_dfi_qempty *qempty)
{
struct rtw89_mac_dle_dfi_ctrl ctrl;
u32 ret;
@@ -182,9 +182,9 @@ static int dle_dfi_qempty(struct rtw89_dev *rtwdev,
ctrl.type = qempty->dle_type;
ctrl.target = DLE_DFI_TYPE_QEMPTY;
ctrl.addr = qempty->grpsel;
- ret = dle_dfi_ctrl(rtwdev, &ctrl);
+ ret = rtw89_mac_dle_dfi_cfg(rtwdev, &ctrl);
if (ret) {
- rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret);
+ rtw89_warn(rtwdev, "[ERR] dle dfi qempty %d\n", ret);
return ret;
}
@@ -192,7 +192,7 @@ static int dle_dfi_qempty(struct rtw89_dev *rtwdev,
return 0;
}
-static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev)
+static void dump_err_status_dispatcher_ax(struct rtw89_dev *rtwdev)
{
rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ",
rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
@@ -208,7 +208,7 @@ static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev)
rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
}
-static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
+static void rtw89_mac_dump_qta_lost_ax(struct rtw89_dev *rtwdev)
{
struct rtw89_mac_dle_dfi_qempty qempty;
struct rtw89_mac_dle_dfi_quota quota;
@@ -219,7 +219,7 @@ static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
qempty.dle_type = DLE_CTRL_TYPE_PLE;
qempty.grpsel = 0;
qempty.qempty = ~(u32)0;
- ret = dle_dfi_qempty(rtwdev, &qempty);
+ ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty);
if (ret)
rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
else
@@ -231,19 +231,19 @@ static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
ctrl.type = DLE_CTRL_TYPE_PLE;
ctrl.target = DLE_DFI_TYPE_QLNKTBL;
ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) |
- FIELD_PREP(QLNKTBL_ADDR_TBL_IDX_MASK, i);
- ret = dle_dfi_ctrl(rtwdev, &ctrl);
+ u32_encode_bits(i, QLNKTBL_ADDR_TBL_IDX_MASK);
+ ret = rtw89_mac_dle_dfi_cfg(rtwdev, &ctrl);
if (ret)
rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
else
- rtw89_info(rtwdev, "qidx%d pktcnt = %ld\n", i,
- FIELD_GET(QLNKTBL_DATA_SEL1_PKT_CNT_MASK,
- ctrl.out_data));
+ rtw89_info(rtwdev, "qidx%d pktcnt = %d\n", i,
+ u32_get_bits(ctrl.out_data,
+ QLNKTBL_DATA_SEL1_PKT_CNT_MASK));
}
quota.dle_type = DLE_CTRL_TYPE_PLE;
quota.qtaid = 6;
- ret = dle_dfi_quota(rtwdev, &quota);
+ ret = rtw89_mac_dle_dfi_quota_cfg(rtwdev, &quota);
if (ret)
rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
else
@@ -251,33 +251,74 @@ static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
quota.rsv_pgnum, quota.use_pgnum);
val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG);
- rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%lx\n",
- FIELD_GET(B_AX_PLE_Q6_MIN_SIZE_MASK, val));
- rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%lx\n",
- FIELD_GET(B_AX_PLE_Q6_MAX_SIZE_MASK, val));
+ rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%x\n",
+ u32_get_bits(val, B_AX_PLE_Q6_MIN_SIZE_MASK));
+ rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%x\n",
+ u32_get_bits(val, B_AX_PLE_Q6_MAX_SIZE_MASK));
+ val = rtw89_read32(rtwdev, R_AX_RX_FLTR_OPT);
+ rtw89_info(rtwdev, "[PLE][CMAC0_RX]B_AX_RX_MPDU_MAX_LEN=0x%x\n",
+ u32_get_bits(val, B_AX_RX_MPDU_MAX_LEN_MASK));
+ rtw89_info(rtwdev, "R_AX_RSP_CHK_SIG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RSP_CHK_SIG));
+ rtw89_info(rtwdev, "R_AX_TRXPTCL_RESP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TRXPTCL_RESP_0));
+ rtw89_info(rtwdev, "R_AX_CCA_CONTROL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CCA_CONTROL));
+
+ if (!rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL)) {
+ quota.dle_type = DLE_CTRL_TYPE_PLE;
+ quota.qtaid = 7;
+ ret = rtw89_mac_dle_dfi_quota_cfg(rtwdev, &quota);
+ if (ret)
+ rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
+ else
+ rtw89_info(rtwdev, "quota7 rsv/use: 0x%x/0x%x\n",
+ quota.rsv_pgnum, quota.use_pgnum);
- dump_err_status_dispatcher(rtwdev);
+ val = rtw89_read32(rtwdev, R_AX_PLE_QTA7_CFG);
+ rtw89_info(rtwdev, "[PLE][CMAC1_RX]min_pgnum=0x%x\n",
+ u32_get_bits(val, B_AX_PLE_Q7_MIN_SIZE_MASK));
+ rtw89_info(rtwdev, "[PLE][CMAC1_RX]max_pgnum=0x%x\n",
+ u32_get_bits(val, B_AX_PLE_Q7_MAX_SIZE_MASK));
+ val = rtw89_read32(rtwdev, R_AX_RX_FLTR_OPT_C1);
+ rtw89_info(rtwdev, "[PLE][CMAC1_RX]B_AX_RX_MPDU_MAX_LEN=0x%x\n",
+ u32_get_bits(val, B_AX_RX_MPDU_MAX_LEN_MASK));
+ rtw89_info(rtwdev, "R_AX_RSP_CHK_SIG_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RSP_CHK_SIG_C1));
+ rtw89_info(rtwdev, "R_AX_TRXPTCL_RESP_0_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TRXPTCL_RESP_0_C1));
+ rtw89_info(rtwdev, "R_AX_CCA_CONTROL_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CCA_CONTROL_C1));
+ }
+
+ rtw89_info(rtwdev, "R_AX_DLE_EMPTY0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DLE_EMPTY0));
+ rtw89_info(rtwdev, "R_AX_DLE_EMPTY1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DLE_EMPTY1));
+
+ dump_err_status_dispatcher_ax(rtwdev);
}
-static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev,
- enum mac_ax_err_info err)
+void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev,
+ enum mac_ax_err_info err)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 dbg, event;
dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO);
- event = FIELD_GET(B_AX_L0_TO_L1_EVENT_MASK, dbg);
+ event = u32_get_bits(dbg, B_AX_L0_TO_L1_EVENT_MASK);
switch (event) {
case MAC_AX_L0_TO_L1_RX_QTA_LOST:
rtw89_info(rtwdev, "quota lost!\n");
- rtw89_mac_dump_qta_lost(rtwdev);
+ mac->dump_qta_lost(rtwdev);
break;
default:
break;
}
}
-static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
+void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 dmac_err;
@@ -357,6 +398,21 @@ static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
rtw89_info(rtwdev, "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n",
i, rtw89_read32(rtwdev, R_AX_SEC_DEBUG2));
}
+ } else if (chip->chip_id == RTL8922A) {
+ rtw89_info(rtwdev, "R_BE_SEC_ERROR_FLAG=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SEC_ERROR_FLAG));
+ rtw89_info(rtwdev, "R_BE_SEC_ERROR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SEC_ERROR_IMR));
+ rtw89_info(rtwdev, "R_BE_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SEC_ENG_CTRL));
+ rtw89_info(rtwdev, "R_BE_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SEC_MPDU_PROC));
+ rtw89_info(rtwdev, "R_BE_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SEC_CAM_ACCESS));
+ rtw89_info(rtwdev, "R_BE_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SEC_CAM_RDATA));
+ rtw89_info(rtwdev, "R_BE_SEC_DEBUG2=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SEC_DEBUG2));
} else {
rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
@@ -393,10 +449,17 @@ static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
}
if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
+ if (chip->chip_id == RTL8922A) {
+ rtw89_info(rtwdev, "R_BE_INTERRUPT_MASK_REG=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_INTERRUPT_MASK_REG));
+ rtw89_info(rtwdev, "R_BE_INTERRUPT_STS_REG=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_INTERRUPT_STS_REG));
+ } else {
+ rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
+ }
}
if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) {
@@ -411,7 +474,7 @@ static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
}
if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) {
- if (chip->chip_id == RTL8852C) {
+ if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A) {
rtw89_info(rtwdev, "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR));
rtw89_info(rtwdev, "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n",
@@ -443,30 +506,41 @@ static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1));
rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n",
rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2));
- rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0));
rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1));
rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n",
rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
- rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
- if (chip->chip_id == RTL8852C) {
- rtw89_info(rtwdev, "R_AX_RX_CTRL0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL0));
- rtw89_info(rtwdev, "R_AX_RX_CTRL1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL1));
- rtw89_info(rtwdev, "R_AX_RX_CTRL2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL2));
+ if (chip->chip_id == RTL8922A) {
+ rtw89_info(rtwdev, "R_BE_WD_CPUQ_OP_3=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_WD_CPUQ_OP_3));
+ rtw89_info(rtwdev, "R_BE_WD_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_WD_CPUQ_OP_STATUS));
+ rtw89_info(rtwdev, "R_BE_PLE_CPUQ_OP_3=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_PL_CPUQ_OP_3));
+ rtw89_info(rtwdev, "R_BE_PL_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_PL_CPUQ_OP_STATUS));
} else {
- rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
- rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
- rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
+ rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
+ rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
+ if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_RX_CTRL0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL0));
+ rtw89_info(rtwdev, "R_AX_RX_CTRL1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL1));
+ rtw89_info(rtwdev, "R_AX_RX_CTRL2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL2));
+ } else {
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
+ }
}
}
@@ -478,22 +552,37 @@ static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
}
if (dmac_err & B_AX_DISPATCH_ERR_FLAG) {
- rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
- rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
- rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
- rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
+ if (chip->chip_id == RTL8922A) {
+ rtw89_info(rtwdev, "R_BE_DISP_HOST_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_HOST_IMR));
+ rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR1));
+ rtw89_info(rtwdev, "R_BE_DISP_CPU_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_CPU_IMR));
+ rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR2=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR2));
+ rtw89_info(rtwdev, "R_BE_DISP_OTHER_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_OTHER_IMR));
+ rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR0=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR0));
+ } else {
+ rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
+ }
}
if (dmac_err & B_AX_BBRPT_ERR_FLAG) {
- if (chip->chip_id == RTL8852C) {
+ if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A) {
rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR));
rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n",
@@ -518,18 +607,54 @@ static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev)
rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
}
+ if (chip->chip_id == RTL8922A) {
+ rtw89_info(rtwdev, "R_BE_LA_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_LA_ERRFLAG_IMR));
+ rtw89_info(rtwdev, "R_BE_LA_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_LA_ERRFLAG_ISR));
+ }
+ }
+
+ if (dmac_err & B_AX_HAXIDMA_ERR_FLAG) {
+ if (chip->chip_id == RTL8922A) {
+ rtw89_info(rtwdev, "R_BE_HAXI_IDCT_MSK=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_HAXI_IDCT_MSK));
+ rtw89_info(rtwdev, "R_BE_HAXI_IDCT=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_HAXI_IDCT));
+ } else if (chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK));
+ rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT));
+ }
+ }
+
+ if (dmac_err & B_BE_P_AXIDMA_ERR_INT) {
+ rtw89_info(rtwdev, "R_BE_PL_AXIDMA_IDCT_MSK=0x%08x\n",
+ rtw89_mac_mem_read(rtwdev, R_BE_PL_AXIDMA_IDCT_MSK,
+ RTW89_MAC_MEM_AXIDMA));
+ rtw89_info(rtwdev, "R_BE_PL_AXIDMA_IDCT=0x%08x\n",
+ rtw89_mac_mem_read(rtwdev, R_BE_PL_AXIDMA_IDCT,
+ RTW89_MAC_MEM_AXIDMA));
}
- if (dmac_err & B_AX_HAXIDMA_ERR_FLAG && chip->chip_id == RTL8852C) {
- rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK));
- rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HAXI_IDCT));
+ if (dmac_err & B_BE_MLO_ERR_INT) {
+ rtw89_info(rtwdev, "R_BE_MLO_ERR_IDCT_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_MLO_ERR_IDCT_IMR));
+ rtw89_info(rtwdev, "R_BE_PKTIN_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_MLO_ERR_IDCT_ISR));
+ }
+
+ if (dmac_err & B_BE_PLRLS_ERR_INT) {
+ rtw89_info(rtwdev, "R_BE_PLRLS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_PLRLS_ERR_IMR));
+ rtw89_info(rtwdev, "R_BE_PLRLS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_PLRLS_ERR_ISR));
}
}
-static void rtw89_mac_dump_cmac_err_status(struct rtw89_dev *rtwdev,
- u8 band)
+static void rtw89_mac_dump_cmac_err_status_ax(struct rtw89_dev *rtwdev,
+ u8 band)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 offset = 0;
@@ -619,8 +744,8 @@ static void rtw89_mac_dump_cmac_err_status(struct rtw89_dev *rtwdev,
rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset));
}
-static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
- enum mac_ax_err_info err)
+static void rtw89_mac_dump_err_status_ax(struct rtw89_dev *rtwdev,
+ enum mac_ax_err_info err)
{
if (err != MAC_AX_ERR_L1_ERR_DMAC &&
err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
@@ -632,11 +757,16 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n",
rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
+ rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
+ rtw89_info(rtwdev, "DBG Counter 1 (R_AX_DRV_FW_HSK_4)=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DRV_FW_HSK_4));
+ rtw89_info(rtwdev, "DBG Counter 2 (R_AX_DRV_FW_HSK_5)=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DRV_FW_HSK_5));
rtw89_mac_dump_dmac_err_status(rtwdev);
- rtw89_mac_dump_cmac_err_status(rtwdev, RTW89_MAC_0);
- if (rtwdev->dbcc_en)
- rtw89_mac_dump_cmac_err_status(rtwdev, RTW89_MAC_1);
+ rtw89_mac_dump_cmac_err_status_ax(rtwdev, RTW89_MAC_0);
+ rtw89_mac_dump_cmac_err_status_ax(rtwdev, RTW89_MAC_1);
rtwdev->hci.ops->dump_err_status(rtwdev);
@@ -681,6 +811,7 @@ static bool rtw89_mac_suppress_log(struct rtw89_dev *rtwdev, u32 err)
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 err, err_scnr;
int ret;
@@ -706,7 +837,7 @@ u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
return err;
rtw89_fw_st_dbg_dump(rtwdev);
- rtw89_mac_dump_err_status(rtwdev, err);
+ mac->dump_err_status(rtwdev, err);
return err;
}
@@ -900,7 +1031,7 @@ static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
return 0;
}
-static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
+static void hfc_get_mix_info_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_page_regs *regs = chip->page_regs;
@@ -909,11 +1040,6 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
struct rtw89_hfc_pub_info *info = &param->pub_info;
u32 val;
- int ret;
-
- ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
- if (ret)
- return ret;
val = rtw89_read32(rtwdev, regs->pub_page_info1);
info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK);
@@ -958,6 +1084,19 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
val = rtw89_read32(rtwdev, regs->pub_page_ctrl1);
pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK);
pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK);
+}
+
+static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ mac->hfc_get_mix_info(rtwdev);
ret = hfc_pub_info_chk(rtwdev);
if (param->en && ret)
@@ -966,7 +1105,7 @@ static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
return 0;
}
-static void hfc_h2c_cfg(struct rtw89_dev *rtwdev)
+static void hfc_h2c_cfg_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_page_regs *regs = chip->page_regs;
@@ -982,7 +1121,7 @@ static void hfc_h2c_cfg(struct rtw89_dev *rtwdev)
prec_cfg->h2c_full_cond);
}
-static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
+static void hfc_mix_cfg_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_page_regs *regs = chip->page_regs;
@@ -1017,7 +1156,7 @@ static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
}
-static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
+static void hfc_func_en_ax(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_page_regs *regs = chip->page_regs;
@@ -1033,8 +1172,9 @@ static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
rtw89_write32(rtwdev, regs->hci_fc_ctrl, val);
}
-static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
+int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 dma_ch_mask = chip->dma_ch_mask;
u8 ch;
@@ -1049,11 +1189,11 @@ static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
if (ret)
return ret;
- hfc_func_en(rtwdev, false, false);
+ mac->hfc_func_en(rtwdev, false, false);
if (!en && h2c_en) {
- hfc_h2c_cfg(rtwdev);
- hfc_func_en(rtwdev, en, h2c_en);
+ mac->hfc_h2c_cfg(rtwdev);
+ mac->hfc_func_en(rtwdev, en, h2c_en);
return ret;
}
@@ -1069,9 +1209,9 @@ static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
if (ret)
return ret;
- hfc_mix_cfg(rtwdev);
+ mac->hfc_mix_cfg(rtwdev);
if (en || h2c_en) {
- hfc_func_en(rtwdev, en, h2c_en);
+ mac->hfc_func_en(rtwdev, en, h2c_en);
udelay(10);
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
@@ -1333,9 +1473,14 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
if (on) {
set_bit(RTW89_FLAG_POWERON, rtwdev->flags);
+ set_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags);
+ set_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags);
rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR);
} else {
clear_bit(RTW89_FLAG_POWERON, rtwdev->flags);
+ clear_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags);
+ clear_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags);
+ clear_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags);
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR);
rtw89_set_entity_state(rtwdev, false);
@@ -1350,7 +1495,7 @@ void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev)
rtw89_mac_power_switch(rtwdev, false);
}
-static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
+static int cmac_func_en_ax(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
{
u32 func_en = 0;
u32 ck_en = 0;
@@ -1396,7 +1541,7 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
return 0;
}
-static int dmac_func_en(struct rtw89_dev *rtwdev)
+static int dmac_func_en_ax(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val32;
@@ -1428,7 +1573,7 @@ static int dmac_func_en(struct rtw89_dev *rtwdev)
return 0;
}
-static int chip_func_en(struct rtw89_dev *rtwdev)
+static int chip_func_en_ax(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -1439,19 +1584,19 @@ static int chip_func_en(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev)
+static int sys_init_ax(struct rtw89_dev *rtwdev)
{
int ret;
- ret = dmac_func_en(rtwdev);
+ ret = dmac_func_en_ax(rtwdev);
if (ret)
return ret;
- ret = cmac_func_en(rtwdev, 0, true);
+ ret = cmac_func_en_ax(rtwdev, 0, true);
if (ret)
return ret;
- ret = chip_func_en(rtwdev);
+ ret = chip_func_en_ax(rtwdev);
if (ret)
return ret;
@@ -1460,10 +1605,14 @@ static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev)
const struct rtw89_mac_size_set rtw89_mac_size = {
.hfc_preccfg_pcie = {2, 40, 0, 0, 1, 0, 0, 0},
+ .hfc_prec_cfg_c0 = {2, 32, 0, 0, 0, 0, 0, 0},
+ .hfc_prec_cfg_c2 = {0, 256, 0, 0, 0, 0, 0, 0},
/* PCIE 64 */
.wde_size0 = {RTW89_WDE_PG_64, 4095, 1,},
+ .wde_size0_v1 = {RTW89_WDE_PG_64, 3328, 0, 0,},
/* DLFW */
.wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
+ .wde_size4_v1 = {RTW89_WDE_PG_64, 0, 3328, 0,},
/* PCIE 64 */
.wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
/* 8852B PCIE SCC */
@@ -1476,6 +1625,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
/* PCIE */
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
+ .ple_size0_v1 = {RTW89_PLE_PG_128, 2672, 256, 212992,},
+ .ple_size3_v1 = {RTW89_PLE_PG_128, 2928, 0, 212992,},
/* DLFW */
.ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
/* PCIE 64 */
@@ -1488,6 +1639,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_size19 = {RTW89_PLE_PG_128, 1904, 16,},
/* PCIE 64 */
.wde_qt0 = {3792, 196, 0, 107,},
+ .wde_qt0_v1 = {3302, 6, 0, 20,},
/* DLFW */
.wde_qt4 = {0, 0, 0, 0,},
/* PCIE 64 */
@@ -1498,10 +1650,13 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
+ .ple_qt0 = {320, 0, 32, 16, 13, 13, 292, 0, 32, 18, 1, 4, 0,},
+ .ple_qt1 = {320, 0, 32, 16, 1944, 1944, 2223, 0, 1963, 1949, 1, 1935, 0,},
/* PCIE SCC */
.ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
/* PCIE SCC */
.ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,},
+ .ple_qt9 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 1, 0, 0,},
/* DLFW */
.ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,},
/* PCIE 64 */
@@ -1522,6 +1677,10 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
/* 8851B PCIE WOW */
.ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
+ .ple_rsvd_qt0 = {2, 112, 56, 6, 6, 6, 6, 0, 0, 62,},
+ .ple_rsvd_qt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
+ .rsvd0_size0 = {212992, 0,},
+ .rsvd1_size0 = {587776, 2048,},
};
EXPORT_SYMBOL(rtw89_mac_size);
@@ -1540,7 +1699,9 @@ static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
return NULL;
}
+ mac->dle_info.rsvd_qt = cfg->rsvd_qt;
mac->dle_info.ple_pg_size = cfg->ple_size->pge_size;
+ mac->dle_info.ple_free_pg = cfg->ple_size->lnk_pge_num;
mac->dle_info.qta_mode = mode;
mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma;
mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma;
@@ -1548,33 +1709,86 @@ static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
return cfg;
}
-static bool mac_is_txq_empty(struct rtw89_dev *rtwdev)
+int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_dle_rsvd_qt_type type,
+ struct rtw89_mac_dle_rsvd_qt_cfg *cfg)
+{
+ struct rtw89_dle_info *dle_info = &rtwdev->mac.dle_info;
+ const struct rtw89_rsvd_quota *rsvd_qt = dle_info->rsvd_qt;
+
+ switch (type) {
+ case DLE_RSVD_QT_MPDU_INFO:
+ cfg->pktid = dle_info->ple_free_pg;
+ cfg->pg_num = rsvd_qt->mpdu_info_tbl;
+ break;
+ case DLE_RSVD_QT_B0_CSI:
+ cfg->pktid = dle_info->ple_free_pg + rsvd_qt->mpdu_info_tbl;
+ cfg->pg_num = rsvd_qt->b0_csi;
+ break;
+ case DLE_RSVD_QT_B1_CSI:
+ cfg->pktid = dle_info->ple_free_pg +
+ rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi;
+ cfg->pg_num = rsvd_qt->b1_csi;
+ break;
+ case DLE_RSVD_QT_B0_LMR:
+ cfg->pktid = dle_info->ple_free_pg +
+ rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi;
+ cfg->pg_num = rsvd_qt->b0_lmr;
+ break;
+ case DLE_RSVD_QT_B1_LMR:
+ cfg->pktid = dle_info->ple_free_pg +
+ rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi +
+ rsvd_qt->b0_lmr;
+ cfg->pg_num = rsvd_qt->b1_lmr;
+ break;
+ case DLE_RSVD_QT_B0_FTM:
+ cfg->pktid = dle_info->ple_free_pg +
+ rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi +
+ rsvd_qt->b0_lmr + rsvd_qt->b1_lmr;
+ cfg->pg_num = rsvd_qt->b0_ftm;
+ break;
+ case DLE_RSVD_QT_B1_FTM:
+ cfg->pktid = dle_info->ple_free_pg +
+ rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi +
+ rsvd_qt->b0_lmr + rsvd_qt->b1_lmr + rsvd_qt->b0_ftm;
+ cfg->pg_num = rsvd_qt->b1_ftm;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cfg->size = (u32)cfg->pg_num * dle_info->ple_pg_size;
+
+ return 0;
+}
+
+static bool mac_is_txq_empty_ax(struct rtw89_dev *rtwdev)
{
struct rtw89_mac_dle_dfi_qempty qempty;
- u32 qnum, qtmp, val32, msk32;
+ u32 grpnum, qtmp, val32, msk32;
int i, j, ret;
- qnum = rtwdev->chip->wde_qempty_acq_num;
+ grpnum = rtwdev->chip->wde_qempty_acq_grpnum;
qempty.dle_type = DLE_CTRL_TYPE_WDE;
- for (i = 0; i < qnum; i++) {
+ for (i = 0; i < grpnum; i++) {
qempty.grpsel = i;
- ret = dle_dfi_qempty(rtwdev, &qempty);
+ ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty);
if (ret) {
rtw89_warn(rtwdev, "dle dfi acq empty %d\n", ret);
return false;
}
qtmp = qempty.qempty;
for (j = 0 ; j < QEMP_ACQ_GRP_MACID_NUM; j++) {
- val32 = FIELD_GET(QEMP_ACQ_GRP_QSEL_MASK, qtmp);
+ val32 = u32_get_bits(qtmp, QEMP_ACQ_GRP_QSEL_MASK);
if (val32 != QEMP_ACQ_GRP_QSEL_MASK)
return false;
qtmp >>= QEMP_ACQ_GRP_QSEL_SH;
}
}
- qempty.grpsel = rtwdev->chip->wde_qempty_mgq_sel;
- ret = dle_dfi_qempty(rtwdev, &qempty);
+ qempty.grpsel = rtwdev->chip->wde_qempty_mgq_grpsel;
+ ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty);
if (ret) {
rtw89_warn(rtwdev, "dle dfi mgq empty %d\n", ret);
return false;
@@ -1602,11 +1816,21 @@ static bool mac_is_txq_empty(struct rtw89_dev *rtwdev)
return (val32 & msk32) == msk32;
}
-static inline u32 dle_used_size(const struct rtw89_dle_size *wde,
- const struct rtw89_dle_size *ple)
+static inline u32 dle_used_size(const struct rtw89_dle_mem *cfg)
{
- return wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) +
+ const struct rtw89_dle_size *wde = cfg->wde_size;
+ const struct rtw89_dle_size *ple = cfg->ple_size;
+ u32 used;
+
+ used = wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) +
ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num);
+
+ if (cfg->rsvd0_size && cfg->rsvd1_size) {
+ used += cfg->rsvd0_size->size;
+ used += cfg->rsvd1_size->size;
+ }
+
+ return used;
}
static u32 dle_expected_used_size(struct rtw89_dev *rtwdev,
@@ -1620,7 +1844,7 @@ static u32 dle_expected_used_size(struct rtw89_dev *rtwdev,
return size;
}
-static void dle_func_en(struct rtw89_dev *rtwdev, bool enable)
+static void dle_func_en_ax(struct rtw89_dev *rtwdev, bool enable)
{
if (enable)
rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
@@ -1630,7 +1854,7 @@ static void dle_func_en(struct rtw89_dev *rtwdev, bool enable)
B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN);
}
-static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable)
+static void dle_clk_en_ax(struct rtw89_dev *rtwdev, bool enable)
{
u32 val = B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN;
@@ -1643,7 +1867,7 @@ static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable)
}
}
-static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg)
+static int dle_mix_cfg_ax(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg)
{
const struct rtw89_dle_size *size_cfg;
u32 val;
@@ -1700,6 +1924,23 @@ static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg
return 0;
}
+static int chk_dle_rdy_ax(struct rtw89_dev *rtwdev, bool wde_or_ple)
+{
+ u32 reg, mask;
+ u32 ini;
+
+ if (wde_or_ple) {
+ reg = R_AX_WDE_INI_STATUS;
+ mask = WDE_MGN_INI_RDY;
+ } else {
+ reg = R_AX_PLE_INI_STATUS;
+ mask = PLE_MGN_INI_RDY;
+ }
+
+ return read_poll_timeout(rtw89_read32, ini, (ini & mask) == mask, 1,
+ 2000, false, rtwdev, reg);
+}
+
#define INVALID_QT_WCPU U16_MAX
#define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \
do { \
@@ -1712,10 +1953,10 @@ static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg
#define SET_QUOTA(_x, _module, _idx) \
SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx)
-static void wde_quota_cfg(struct rtw89_dev *rtwdev,
- const struct rtw89_wde_quota *min_cfg,
- const struct rtw89_wde_quota *max_cfg,
- u16 ext_wde_min_qt_wcpu)
+static void wde_quota_cfg_ax(struct rtw89_dev *rtwdev,
+ const struct rtw89_wde_quota *min_cfg,
+ const struct rtw89_wde_quota *max_cfg,
+ u16 ext_wde_min_qt_wcpu)
{
u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ?
ext_wde_min_qt_wcpu : min_cfg->wcpu;
@@ -1727,9 +1968,9 @@ static void wde_quota_cfg(struct rtw89_dev *rtwdev,
SET_QUOTA(cpu_io, WDE, 4);
}
-static void ple_quota_cfg(struct rtw89_dev *rtwdev,
- const struct rtw89_ple_quota *min_cfg,
- const struct rtw89_ple_quota *max_cfg)
+static void ple_quota_cfg_ax(struct rtw89_dev *rtwdev,
+ const struct rtw89_ple_quota *min_cfg,
+ const struct rtw89_ple_quota *max_cfg)
{
u32 val;
@@ -1794,17 +2035,19 @@ static void dle_quota_cfg(struct rtw89_dev *rtwdev,
const struct rtw89_dle_mem *cfg,
u16 ext_wde_min_qt_wcpu)
{
- wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu);
- ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt);
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ mac->wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu);
+ mac->ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt);
}
-static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
- enum rtw89_qta_mode ext_mode)
+int rtw89_mac_dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ enum rtw89_qta_mode ext_mode)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_dle_mem *cfg, *ext_cfg;
u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU;
- int ret = 0;
- u32 ini;
+ int ret;
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
if (ret)
@@ -1828,36 +2071,31 @@ static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu;
}
- if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
- dle_expected_used_size(rtwdev, mode)) {
+ if (dle_used_size(cfg) != dle_expected_used_size(rtwdev, mode)) {
rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
ret = -EINVAL;
goto error;
}
- dle_func_en(rtwdev, false);
- dle_clk_en(rtwdev, true);
+ mac->dle_func_en(rtwdev, false);
+ mac->dle_clk_en(rtwdev, true);
- ret = dle_mix_cfg(rtwdev, cfg);
+ ret = mac->dle_mix_cfg(rtwdev, cfg);
if (ret) {
rtw89_err(rtwdev, "[ERR] dle mix cfg\n");
goto error;
}
dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu);
- dle_func_en(rtwdev, true);
+ mac->dle_func_en(rtwdev, true);
- ret = read_poll_timeout(rtw89_read32, ini,
- (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1,
- 2000, false, rtwdev, R_AX_WDE_INI_STATUS);
+ ret = mac->chk_dle_rdy(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]WDE cfg ready\n");
return ret;
}
- ret = read_poll_timeout(rtw89_read32, ini,
- (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1,
- 2000, false, rtwdev, R_AX_PLE_INI_STATUS);
+ ret = mac->chk_dle_rdy(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "[ERR]PLE cfg ready\n");
return ret;
@@ -1865,7 +2103,7 @@ static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
return 0;
error:
- dle_func_en(rtwdev, false);
+ mac->dle_func_en(rtwdev, false);
rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n",
rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS));
rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n",
@@ -1900,8 +2138,8 @@ static bool is_qta_poh(struct rtw89_dev *rtwdev)
return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE;
}
-static int preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
- enum rtw89_qta_mode mode)
+int rtw89_mac_preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+ enum rtw89_qta_mode mode)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -1950,7 +2188,7 @@ static void _patch_ss2f_path(struct rtw89_dev *rtwdev)
SS2F_PATH_WLCPU);
}
-static int sta_sch_init(struct rtw89_dev *rtwdev)
+static int sta_sch_init_ax(struct rtw89_dev *rtwdev)
{
u32 p_val;
u8 val;
@@ -1979,7 +2217,7 @@ static int sta_sch_init(struct rtw89_dev *rtwdev)
return 0;
}
-static int mpdu_proc_init(struct rtw89_dev *rtwdev)
+static int mpdu_proc_init_ax(struct rtw89_dev *rtwdev)
{
int ret;
@@ -1996,7 +2234,7 @@ static int mpdu_proc_init(struct rtw89_dev *rtwdev)
return 0;
}
-static int sec_eng_init(struct rtw89_dev *rtwdev)
+static int sec_eng_init_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 val = 0;
@@ -2031,41 +2269,41 @@ static int sec_eng_init(struct rtw89_dev *rtwdev)
return 0;
}
-static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int dmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
int ret;
- ret = dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID);
+ ret = rtw89_mac_dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret);
return ret;
}
- ret = preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode);
if (ret) {
rtw89_err(rtwdev, "[ERR]preload init %d\n", ret);
return ret;
}
- ret = hfc_init(rtwdev, true, true, true);
+ ret = rtw89_mac_hfc_init(rtwdev, true, true, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret);
return ret;
}
- ret = sta_sch_init(rtwdev);
+ ret = sta_sch_init_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret);
return ret;
}
- ret = mpdu_proc_init(rtwdev);
+ ret = mpdu_proc_init_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret);
return ret;
}
- ret = sec_eng_init(rtwdev);
+ ret = sec_eng_init_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret);
return ret;
@@ -2074,7 +2312,7 @@ static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
-static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int addr_cam_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 val, reg;
u16 p_val;
@@ -2101,7 +2339,7 @@ static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 ret;
u32 reg;
@@ -2142,10 +2380,10 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev,
- enum rtw89_machdr_frame_type type,
- enum rtw89_mac_fwd_target fwd_target,
- u8 mac_idx)
+static int rtw89_mac_typ_fltr_opt_ax(struct rtw89_dev *rtwdev,
+ enum rtw89_machdr_frame_type type,
+ enum rtw89_mac_fwd_target fwd_target,
+ u8 mac_idx)
{
u32 reg;
u32 val;
@@ -2184,7 +2422,7 @@ int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev,
return 0;
}
-static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int rx_fltr_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
int ret, i;
u32 mac_ftlr, plcp_ftlr;
@@ -2194,8 +2432,8 @@ static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
for (i = RTW89_MGNT; i <= RTW89_DATA; i++) {
- ret = rtw89_mac_typ_fltr_opt(rtwdev, i, RTW89_FWD_TO_HOST,
- mac_idx);
+ ret = rtw89_mac_typ_fltr_opt_ax(rtwdev, i, RTW89_FWD_TO_HOST,
+ mac_idx);
if (ret)
return ret;
}
@@ -2246,7 +2484,7 @@ static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx)
}
}
-static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int cca_ctrl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 val, reg;
int ret;
@@ -2278,7 +2516,7 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static int nav_ctrl_init(struct rtw89_dev *rtwdev)
+static int nav_ctrl_init_ax(struct rtw89_dev *rtwdev)
{
rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN |
B_AX_WMAC_TF_UP_NAV_EN |
@@ -2288,7 +2526,7 @@ static int nav_ctrl_init(struct rtw89_dev *rtwdev)
return 0;
}
-static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int spatial_reuse_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 reg;
int ret;
@@ -2302,7 +2540,7 @@ static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int tmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 reg;
int ret;
@@ -2324,7 +2562,7 @@ static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int trxptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_rrsr_cfgs *rrsr = chip->rrsr_cfgs;
@@ -2381,7 +2619,7 @@ static void rst_bacam(struct rtw89_dev *rtwdev)
rtw89_warn(rtwdev, "failed to reset BA CAM\n");
}
-static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
#define TRXCFG_RMAC_CCA_TO 32
#define TRXCFG_RMAC_DATA_TO 15
@@ -2439,7 +2677,7 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
-static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int cmac_com_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val, reg;
@@ -2464,7 +2702,7 @@ static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
+bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
{
const struct rtw89_dle_mem *cfg;
@@ -2477,7 +2715,7 @@ static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma);
}
-static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 val, reg;
int ret;
@@ -2520,7 +2758,7 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int cmac_dma_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 reg;
@@ -2539,82 +2777,82 @@ static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+static int cmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
int ret;
- ret = scheduler_init(rtwdev, mac_idx);
+ ret = scheduler_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret);
return ret;
}
- ret = addr_cam_init(rtwdev, mac_idx);
+ ret = addr_cam_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx,
ret);
return ret;
}
- ret = rx_fltr_init(rtwdev, mac_idx);
+ ret = rx_fltr_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx,
ret);
return ret;
}
- ret = cca_ctrl_init(rtwdev, mac_idx);
+ ret = cca_ctrl_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx,
ret);
return ret;
}
- ret = nav_ctrl_init(rtwdev);
+ ret = nav_ctrl_init_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d NAV CTRL init %d\n", mac_idx,
ret);
return ret;
}
- ret = spatial_reuse_init(rtwdev, mac_idx);
+ ret = spatial_reuse_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n",
mac_idx, ret);
return ret;
}
- ret = tmac_init(rtwdev, mac_idx);
+ ret = tmac_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret);
return ret;
}
- ret = trxptcl_init(rtwdev, mac_idx);
+ ret = trxptcl_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret);
return ret;
}
- ret = rmac_init(rtwdev, mac_idx);
+ ret = rmac_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret);
return ret;
}
- ret = cmac_com_init(rtwdev, mac_idx);
+ ret = cmac_com_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret);
return ret;
}
- ret = ptcl_init(rtwdev, mac_idx);
+ ret = ptcl_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret);
return ret;
}
- ret = cmac_dma_init(rtwdev, mac_idx);
+ ret = cmac_dma_init_ax(rtwdev, mac_idx);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d DMA init %d\n", mac_idx, ret);
return ret;
@@ -2626,20 +2864,26 @@ static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
struct rtw89_mac_c2h_info *c2h_info)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_mac_h2c_info h2c_info = {0};
u32 ret;
+ mac->cnv_efuse_state(rtwdev, false);
+
h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE;
h2c_info.content_len = 0;
ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info);
if (ret)
- return ret;
+ goto out;
if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP)
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+out:
+ mac->cnv_efuse_state(rtwdev, true);
+
+ return ret;
}
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
@@ -2871,7 +3115,7 @@ int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
}
EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1);
-int rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd, u16 *pkt_id)
+static int dle_buf_req_ax(struct rtw89_dev *rtwdev, u16 buf_len, bool wd, u16 *pkt_id)
{
u32 val, reg;
int ret;
@@ -2895,7 +3139,7 @@ int rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd, u16 *p
return 0;
}
-int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
+static int set_cpuio_ax(struct rtw89_dev *rtwdev,
struct rtw89_cpuio_ctrl *ctrl_para, bool wd)
{
u32 val, cmd_type, reg;
@@ -2948,8 +3192,9 @@ int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
return 0;
}
-static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_dle_mem *cfg;
struct rtw89_cpuio_ctrl ctrl_para = {0};
u16 pkt_id;
@@ -2961,15 +3206,14 @@ static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
return -EINVAL;
}
- if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
- dle_expected_used_size(rtwdev, mode)) {
+ if (dle_used_size(cfg) != dle_expected_used_size(rtwdev, mode)) {
rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
return -EINVAL;
}
dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU);
- ret = rtw89_mac_dle_buf_req(rtwdev, 0x20, true, &pkt_id);
+ ret = mac->dle_buf_req(rtwdev, 0x20, true, &pkt_id);
if (ret) {
rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n");
return ret;
@@ -2981,13 +3225,13 @@ static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
ctrl_para.pkt_num = 0;
ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS;
ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT;
- ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true);
+ ret = mac->set_cpuio(rtwdev, &ctrl_para, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n");
return -EFAULT;
}
- ret = rtw89_mac_dle_buf_req(rtwdev, 0x20, false, &pkt_id);
+ ret = mac->dle_buf_req(rtwdev, 0x20, false, &pkt_id);
if (ret) {
rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n");
return ret;
@@ -2999,7 +3243,7 @@ static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
ctrl_para.pkt_num = 0;
ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS;
ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT;
- ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, false);
+ ret = mac->set_cpuio(rtwdev, &ctrl_para, false);
if (ret) {
rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n");
return -EFAULT;
@@ -3031,7 +3275,7 @@ static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
-static int band1_enable(struct rtw89_dev *rtwdev)
+static int band1_enable_ax(struct rtw89_dev *rtwdev)
{
int ret, i;
u32 sleep_bak[4] = {0};
@@ -3057,7 +3301,7 @@ static int band1_enable(struct rtw89_dev *rtwdev)
return ret;
}
- ret = dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -3074,13 +3318,13 @@ static int band1_enable(struct rtw89_dev *rtwdev)
return ret;
}
- ret = cmac_func_en(rtwdev, 1, true);
+ ret = cmac_func_en_ax(rtwdev, 1, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret);
return ret;
}
- ret = cmac_init(rtwdev, 1);
+ ret = cmac_init_ax(rtwdev, 1);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret);
return ret;
@@ -3289,8 +3533,8 @@ static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set);
}
-static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
- enum rtw89_mac_hwmod_sel sel)
+static int enable_imr_ax(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_mac_hwmod_sel sel)
{
int ret;
@@ -3327,7 +3571,7 @@ static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
return 0;
}
-static void rtw89_mac_err_imr_ctrl(struct rtw89_dev *rtwdev, bool en)
+static void err_imr_ctrl_ax(struct rtw89_dev *rtwdev, bool en)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -3340,18 +3584,18 @@ static void rtw89_mac_err_imr_ctrl(struct rtw89_dev *rtwdev, bool en)
en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS);
}
-static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable)
+static int dbcc_enable_ax(struct rtw89_dev *rtwdev, bool enable)
{
int ret = 0;
if (enable) {
- ret = band1_enable(rtwdev);
+ ret = band1_enable_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret);
return ret;
}
- ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
+ ret = enable_imr_ax(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
if (ret) {
rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret);
return ret;
@@ -3364,7 +3608,7 @@ static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable)
return 0;
}
-static int set_host_rpr(struct rtw89_dev *rtwdev)
+static int set_host_rpr_ax(struct rtw89_dev *rtwdev)
{
if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) {
rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG,
@@ -3384,46 +3628,46 @@ static int set_host_rpr(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev)
+static int trx_init_ax(struct rtw89_dev *rtwdev)
{
enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode;
int ret;
- ret = dmac_init(rtwdev, 0);
+ ret = dmac_init_ax(rtwdev, 0);
if (ret) {
rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret);
return ret;
}
- ret = cmac_init(rtwdev, 0);
+ ret = cmac_init_ax(rtwdev, 0);
if (ret) {
rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret);
return ret;
}
- if (is_qta_dbcc(rtwdev, qta_mode)) {
- ret = rtw89_mac_dbcc_enable(rtwdev, true);
+ if (rtw89_mac_is_qta_dbcc(rtwdev, qta_mode)) {
+ ret = dbcc_enable_ax(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret);
return ret;
}
}
- ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ ret = enable_imr_ax(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
if (ret) {
rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret);
return ret;
}
- ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
+ ret = enable_imr_ax(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
if (ret) {
rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret);
return ret;
}
- rtw89_mac_err_imr_ctrl(rtwdev, true);
+ err_imr_ctrl_ax(rtwdev, true);
- ret = set_host_rpr(rtwdev);
+ ret = set_host_rpr_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret);
return ret;
@@ -3514,11 +3758,10 @@ static int rtw89_mac_enable_cpu_ax(struct rtw89_dev *rtwdev, u8 boot_reason,
return 0;
}
-static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev)
+static void rtw89_mac_hci_func_en_ax(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val;
- int ret;
if (chip_id == RTL8852C)
val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
@@ -3527,6 +3770,12 @@ static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev)
val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
B_AX_PKT_BUF_EN;
rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val);
+}
+
+static void rtw89_mac_dmac_func_pre_en_ax(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 val;
if (chip_id == RTL8851B)
val = B_AX_DISPATCHER_CLK_EN | B_AX_AXIDMA_CLK_EN;
@@ -3535,7 +3784,7 @@ static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val);
if (chip_id != RTL8852C)
- goto dle;
+ return;
val = rtw89_read32(rtwdev, R_AX_HAXI_INIT_CFG1);
val &= ~(B_AX_DMA_MODE_MASK | B_AX_STOP_AXI_MST);
@@ -3550,15 +3799,23 @@ static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev)
B_AX_STOP_CH12 | B_AX_STOP_ACH2);
rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP2, B_AX_STOP_CH10 | B_AX_STOP_CH11);
rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_AXIDMA_EN);
+}
-dle:
- ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode);
+static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ int ret;
+
+ mac->hci_func_en(rtwdev);
+ mac->dmac_func_pre_en(rtwdev);
+
+ ret = rtw89_mac_dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret);
return ret;
}
- ret = hfc_init(rtwdev, true, false, true);
+ ret = rtw89_mac_hfc_init(rtwdev, true, false, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret);
return ret;
@@ -3632,6 +3889,7 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb)
int rtw89_mac_init(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_chip_info *chip = rtwdev->chip;
bool include_bb = !!chip->bbmcu_nr;
int ret;
@@ -3644,11 +3902,11 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
if (ret)
goto fail;
- ret = rtw89_mac_sys_init(rtwdev);
+ ret = mac->sys_init(rtwdev);
if (ret)
goto fail;
- ret = rtw89_mac_trx_init(rtwdev);
+ ret = mac->trx_init(rtwdev);
if (ret)
goto fail;
@@ -5249,7 +5507,8 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
if (chip->chip_id == RTL8852C)
return false;
- else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
+ else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
+ chip->chip_id == RTL8851B)
val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3,
B_AX_LTE_MUX_CTRL_PATH >> 24);
@@ -5694,7 +5953,8 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
+static
+int rtw89_mac_write_xtal_si_ax(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
{
u32 val32;
int ret;
@@ -5716,9 +5976,9 @@ int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask
return 0;
}
-EXPORT_SYMBOL(rtw89_mac_write_xtal_si);
-int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
+static
+int rtw89_mac_read_xtal_si_ax(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
{
u32 val32;
int ret;
@@ -5741,7 +6001,6 @@ int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
return 0;
}
-EXPORT_SYMBOL(rtw89_mac_read_xtal_si);
static
void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
@@ -5791,6 +6050,7 @@ void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx band)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_pkt_drop_params params = {0};
bool empty;
int i, ret = 0, try_cnt = 3;
@@ -5799,7 +6059,7 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
params.sel = RTW89_PKT_DROP_SEL_BAND_ONCE;
for (i = 0; i < try_cnt; i++) {
- ret = read_poll_timeout(mac_is_txq_empty, empty, empty, 50,
+ ret = read_poll_timeout(mac->is_txq_empty, empty, empty, 50,
50000, false, rtwdev);
if (ret && !RTW89_CHK_FW_FEATURE(NO_PACKET_DROP, &rtwdev->fw))
rtw89_fw_h2c_pkt_drop(rtwdev, &params);
@@ -5847,13 +6107,44 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
B_AX_BFMEE_HE_NDPA_EN,
},
+ .check_mac_en = rtw89_mac_check_mac_en_ax,
+ .sys_init = sys_init_ax,
+ .trx_init = trx_init_ax,
+ .hci_func_en = rtw89_mac_hci_func_en_ax,
+ .dmac_func_pre_en = rtw89_mac_dmac_func_pre_en_ax,
+ .dle_func_en = dle_func_en_ax,
+ .dle_clk_en = dle_clk_en_ax,
.bf_assoc = rtw89_mac_bf_assoc_ax,
+ .typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
+
+ .dle_mix_cfg = dle_mix_cfg_ax,
+ .chk_dle_rdy = chk_dle_rdy_ax,
+ .dle_buf_req = dle_buf_req_ax,
+ .hfc_func_en = hfc_func_en_ax,
+ .hfc_h2c_cfg = hfc_h2c_cfg_ax,
+ .hfc_mix_cfg = hfc_mix_cfg_ax,
+ .hfc_get_mix_info = hfc_get_mix_info_ax,
+ .wde_quota_cfg = wde_quota_cfg_ax,
+ .ple_quota_cfg = ple_quota_cfg_ax,
+ .set_cpuio = set_cpuio_ax,
+
.disable_cpu = rtw89_mac_disable_cpu_ax,
.fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax,
.fwdl_get_status = rtw89_fw_get_rdy_ax,
.fwdl_check_path_ready = rtw89_fwdl_check_path_ready_ax,
+ .parse_efuse_map = rtw89_parse_efuse_map_ax,
+ .parse_phycap_map = rtw89_parse_phycap_map_ax,
+ .cnv_efuse_state = rtw89_cnv_efuse_state_ax,
.get_txpwr_cr = rtw89_mac_get_txpwr_cr_ax,
+
+ .write_xtal_si = rtw89_mac_write_xtal_si_ax,
+ .read_xtal_si = rtw89_mac_read_xtal_si_ax,
+
+ .dump_qta_lost = rtw89_mac_dump_qta_lost_ax,
+ .dump_err_status = rtw89_mac_dump_err_status_ax,
+
+ .is_txq_empty = mac_is_txq_empty_ax,
};
EXPORT_SYMBOL(rtw89_mac_gen_ax);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index f9fef678f..ed98b4980 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -10,6 +10,7 @@
#define MAC_MEM_DUMP_PAGE_SIZE 0x40000
#define ADDR_CAM_ENT_SIZE 0x40
+#define ADDR_CAM_ENT_SHORT_SIZE 0x20
#define BSSID_CAM_ENT_SIZE 0x08
#define HFC_PAGE_UNIT 64
#define RPWM_TRY_CNT 3
@@ -536,6 +537,9 @@ enum rtw89_mac_bf_rrsc_rate {
#define B_CMAC1_MGQ_NO_PWRSAV BIT(11)
#define B_CMAC1_CPUMGQ BIT(12)
+#define B_CMAC0_MGQ_NORMAL_BE BIT(2)
+#define B_CMAC1_MGQ_NORMAL_BE BIT(30)
+
#define QEMP_ACQ_GRP_MACID_NUM 8
#define QEMP_ACQ_GRP_QSEL_SH 4
#define QEMP_ACQ_GRP_QSEL_MASK 0xF
@@ -649,6 +653,22 @@ struct rtw89_mac_dle_dfi_qempty {
u32 qempty;
};
+enum rtw89_mac_dle_rsvd_qt_type {
+ DLE_RSVD_QT_MPDU_INFO,
+ DLE_RSVD_QT_B0_CSI,
+ DLE_RSVD_QT_B1_CSI,
+ DLE_RSVD_QT_B0_LMR,
+ DLE_RSVD_QT_B1_LMR,
+ DLE_RSVD_QT_B0_FTM,
+ DLE_RSVD_QT_B1_FTM,
+};
+
+struct rtw89_mac_dle_rsvd_qt_cfg {
+ u16 pktid;
+ u16 pg_num;
+ u32 size;
+};
+
enum rtw89_mac_error_scenario {
RTW89_RXI300_ERROR = 1,
RTW89_WCPU_CPU_EXCEPTION = 2,
@@ -817,27 +837,37 @@ enum mac_ax_err_info {
struct rtw89_mac_size_set {
const struct rtw89_hfc_prec_cfg hfc_preccfg_pcie;
+ const struct rtw89_hfc_prec_cfg hfc_prec_cfg_c0;
+ const struct rtw89_hfc_prec_cfg hfc_prec_cfg_c2;
const struct rtw89_dle_size wde_size0;
+ const struct rtw89_dle_size wde_size0_v1;
const struct rtw89_dle_size wde_size4;
+ const struct rtw89_dle_size wde_size4_v1;
const struct rtw89_dle_size wde_size6;
const struct rtw89_dle_size wde_size7;
const struct rtw89_dle_size wde_size9;
const struct rtw89_dle_size wde_size18;
const struct rtw89_dle_size wde_size19;
const struct rtw89_dle_size ple_size0;
+ const struct rtw89_dle_size ple_size0_v1;
+ const struct rtw89_dle_size ple_size3_v1;
const struct rtw89_dle_size ple_size4;
const struct rtw89_dle_size ple_size6;
const struct rtw89_dle_size ple_size8;
const struct rtw89_dle_size ple_size18;
const struct rtw89_dle_size ple_size19;
const struct rtw89_wde_quota wde_qt0;
+ const struct rtw89_wde_quota wde_qt0_v1;
const struct rtw89_wde_quota wde_qt4;
const struct rtw89_wde_quota wde_qt6;
const struct rtw89_wde_quota wde_qt7;
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
+ const struct rtw89_ple_quota ple_qt0;
+ const struct rtw89_ple_quota ple_qt1;
const struct rtw89_ple_quota ple_qt4;
const struct rtw89_ple_quota ple_qt5;
+ const struct rtw89_ple_quota ple_qt9;
const struct rtw89_ple_quota ple_qt13;
const struct rtw89_ple_quota ple_qt18;
const struct rtw89_ple_quota ple_qt44;
@@ -848,6 +878,10 @@ struct rtw89_mac_size_set {
const struct rtw89_ple_quota ple_qt_52a_wow;
const struct rtw89_ple_quota ple_qt_52b_wow;
const struct rtw89_ple_quota ple_qt_51b_wow;
+ const struct rtw89_rsvd_quota ple_rsvd_qt0;
+ const struct rtw89_rsvd_quota ple_rsvd_qt1;
+ const struct rtw89_dle_rsvd_size rsvd0_size0;
+ const struct rtw89_dle_rsvd_size rsvd1_size0;
};
extern const struct rtw89_mac_size_set rtw89_mac_size;
@@ -864,18 +898,60 @@ struct rtw89_mac_gen_def {
struct rtw89_reg_def muedca_ctrl;
struct rtw89_reg_def bfee_ctrl;
+ int (*check_mac_en)(struct rtw89_dev *rtwdev, u8 band,
+ enum rtw89_mac_hwmod_sel sel);
+ int (*sys_init)(struct rtw89_dev *rtwdev);
+ int (*trx_init)(struct rtw89_dev *rtwdev);
+ void (*hci_func_en)(struct rtw89_dev *rtwdev);
+ void (*dmac_func_pre_en)(struct rtw89_dev *rtwdev);
+ void (*dle_func_en)(struct rtw89_dev *rtwdev, bool enable);
+ void (*dle_clk_en)(struct rtw89_dev *rtwdev, bool enable);
void (*bf_assoc)(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+ int (*typ_fltr_opt)(struct rtw89_dev *rtwdev,
+ enum rtw89_machdr_frame_type type,
+ enum rtw89_mac_fwd_target fwd_target,
+ u8 mac_idx);
+
+ int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
+ int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
+ int (*dle_buf_req)(struct rtw89_dev *rtwdev, u16 buf_len, bool wd, u16 *pkt_id);
+ void (*hfc_func_en)(struct rtw89_dev *rtwdev, bool en, bool h2c_en);
+ void (*hfc_h2c_cfg)(struct rtw89_dev *rtwdev);
+ void (*hfc_mix_cfg)(struct rtw89_dev *rtwdev);
+ void (*hfc_get_mix_info)(struct rtw89_dev *rtwdev);
+ void (*wde_quota_cfg)(struct rtw89_dev *rtwdev,
+ const struct rtw89_wde_quota *min_cfg,
+ const struct rtw89_wde_quota *max_cfg,
+ u16 ext_wde_min_qt_wcpu);
+ void (*ple_quota_cfg)(struct rtw89_dev *rtwdev,
+ const struct rtw89_ple_quota *min_cfg,
+ const struct rtw89_ple_quota *max_cfg);
+ int (*set_cpuio)(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
+
void (*disable_cpu)(struct rtw89_dev *rtwdev);
int (*fwdl_enable_wcpu)(struct rtw89_dev *rtwdev, u8 boot_reason,
bool dlfw, bool include_bb);
u8 (*fwdl_get_status)(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type);
int (*fwdl_check_path_ready)(struct rtw89_dev *rtwdev, bool h2c_or_fwdl);
+ int (*parse_efuse_map)(struct rtw89_dev *rtwdev);
+ int (*parse_phycap_map)(struct rtw89_dev *rtwdev);
+ int (*cnv_efuse_state)(struct rtw89_dev *rtwdev, bool idle);
bool (*get_txpwr_cr)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr);
+
+ int (*write_xtal_si)(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask);
+ int (*read_xtal_si)(struct rtw89_dev *rtwdev, u8 offset, u8 *val);
+
+ void (*dump_qta_lost)(struct rtw89_dev *rtwdev);
+ void (*dump_err_status)(struct rtw89_dev *rtwdev,
+ enum mac_ax_err_info err);
+
+ bool (*is_txq_empty)(struct rtw89_dev *rtwdev);
};
extern const struct rtw89_mac_gen_def rtw89_mac_gen_ax;
@@ -977,10 +1053,31 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev);
int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb);
int rtw89_mac_init(struct rtw89_dev *rtwdev);
+int rtw89_mac_dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ enum rtw89_qta_mode ext_mode);
+int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en);
+int rtw89_mac_preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
+ enum rtw89_qta_mode mode);
+bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode);
+static inline
int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 band,
- enum rtw89_mac_hwmod_sel sel);
+ enum rtw89_mac_hwmod_sel sel)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->check_mac_en(rtwdev, band, sel);
+}
+
int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val);
int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
+int rtw89_mac_dle_dfi_cfg(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl);
+int rtw89_mac_dle_dfi_quota_cfg(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_dle_dfi_quota *quota);
+void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev);
+int rtw89_mac_dle_dfi_qempty_cfg(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_dle_dfi_qempty *qempty);
+void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev,
+ enum mac_ax_err_info err);
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
@@ -1011,6 +1108,23 @@ static inline int rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev)
return chip->ops->disable_bb_rf(rtwdev);
}
+static inline int rtw89_chip_reset_bb_rf(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
+ return 0;
+
+ ret = rtw89_chip_disable_bb_rf(rtwdev);
+ if (ret)
+ return ret;
+ ret = rtw89_chip_enable_bb_rf(rtwdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
@@ -1186,6 +1300,7 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SC_XI_MASK GENMASK(7, 0)
XTAL_SI_XTAL_SC_XO = 0x05,
#define XTAL_SC_XO_MASK GENMASK(7, 0)
+ XTAL_SI_XREF_MODE = 0x0B,
XTAL_SI_PWR_CUT = 0x10,
#define XTAL_SI_SMALL_PWR_CUT BIT(0)
#define XTAL_SI_BIG_PWR_CUT BIT(1)
@@ -1195,6 +1310,8 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SI_LDO_LPS GENMASK(6, 4)
XTAL_SI_XTAL_XMD_4 = 0x26,
#define XTAL_SI_LPS_CAP GENMASK(3, 0)
+ XTAL_SI_XREF_RF1 = 0x2D,
+ XTAL_SI_XREF_RF2 = 0x2E,
XTAL_SI_CV = 0x41,
#define XTAL_SI_ACV_MASK GENMASK(3, 0)
XTAL_SI_LOW_ADDR = 0x62,
@@ -1222,20 +1339,34 @@ enum rtw89_mac_xtal_si_offset {
XTAL_SI_SRAM_CTRL = 0xA1,
#define XTAL_SI_SRAM_DIS BIT(1)
#define FULL_BIT_MASK GENMASK(7, 0)
+ XTAL_SI_PLL = 0xE0,
+ XTAL_SI_PLL_1 = 0xE1,
};
-int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask);
-int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val);
+static inline
+int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->write_xtal_si(rtwdev, offset, val, mask);
+}
+
+static inline
+int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->read_xtal_si(rtwdev, offset, val);
+}
+
void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
-int rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd, u16 *pkt_id);
-int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
- struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
-int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev,
- enum rtw89_machdr_frame_type type,
- enum rtw89_mac_fwd_target fwd_target, u8 mac_idx);
int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow);
int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx band);
void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool wow);
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode);
+int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_dle_rsvd_qt_type type,
+ struct rtw89_mac_dle_rsvd_qt_cfg *cfg);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index b7ceaf559..956a06c8c 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -226,6 +226,7 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ u32 rx_fltr;
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
@@ -272,16 +273,29 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
}
}
+ rx_fltr = rtwdev->hal.rx_fltr;
+
+ /* mac80211 doesn't configure filter when HW scan, driver need to
+ * set by itself. However, during P2P scan might have configure
+ * filter to overwrite filter that HW scan needed, so we need to
+ * check scan and append related filter
+ */
+ if (rtwdev->scanning) {
+ rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+ rx_fltr &= ~B_AX_A_BC;
+ rx_fltr &= ~B_AX_A_A1_MATCH;
+ }
+
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
B_AX_RX_FLTR_CFG_MASK,
- rtwdev->hal.rx_fltr);
+ rx_fltr);
if (!rtwdev->dbcc_en)
goto out;
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_1),
B_AX_RX_FLTR_CFG_MASK,
- rtwdev->hal.rx_fltr);
+ rx_fltr);
out:
mutex_unlock(&rtwdev->mutex);
@@ -427,7 +441,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
* when disconnected by peer
*/
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
}
}
@@ -976,7 +990,7 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
}
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
if (type == IEEE80211_ROC_TYPE_MGMT_TX)
roc->state = RTW89_ROC_MGMT;
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 3278f241d..be30c9346 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -3,6 +3,7 @@
*/
#include "debug.h"
+#include "efuse.h"
#include "fw.h"
#include "mac.h"
#include "reg.h"
@@ -56,6 +57,369 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
R_BE_PORT_HGQ_WINDOW_CFG + 3},
};
+static int rtw89_mac_check_mac_en_be(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_mac_hwmod_sel sel)
+{
+ if (sel == RTW89_DMAC_SEL &&
+ test_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags))
+ return 0;
+ if (sel == RTW89_CMAC_SEL && mac_idx == RTW89_MAC_0 &&
+ test_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags))
+ return 0;
+ if (sel == RTW89_CMAC_SEL && mac_idx == RTW89_MAC_1 &&
+ test_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags))
+ return 0;
+
+ return -EFAULT;
+}
+
+static bool is_qta_poh(struct rtw89_dev *rtwdev)
+{
+ return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE;
+}
+
+static void hfc_get_mix_info_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
+ struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
+ struct rtw89_hfc_pub_info *info = &param->pub_info;
+ u32 val;
+
+ val = rtw89_read32(rtwdev, R_BE_PUB_PAGE_INFO1);
+ info->g0_used = u32_get_bits(val, B_BE_G0_USE_PG_MASK);
+ info->g1_used = u32_get_bits(val, B_BE_G1_USE_PG_MASK);
+
+ val = rtw89_read32(rtwdev, R_BE_PUB_PAGE_INFO3);
+ info->g0_aval = u32_get_bits(val, B_BE_G0_AVAL_PG_MASK);
+ info->g1_aval = u32_get_bits(val, B_BE_G1_AVAL_PG_MASK);
+ info->pub_aval = u32_get_bits(rtw89_read32(rtwdev, R_BE_PUB_PAGE_INFO2),
+ B_BE_PUB_AVAL_PG_MASK);
+ info->wp_aval = u32_get_bits(rtw89_read32(rtwdev, R_BE_WP_PAGE_INFO1),
+ B_BE_WP_AVAL_PG_MASK);
+
+ val = rtw89_read32(rtwdev, R_BE_HCI_FC_CTRL);
+ param->en = !!(val & B_BE_HCI_FC_EN);
+ param->h2c_en = !!(val & B_BE_HCI_FC_CH12_EN);
+ param->mode = u32_get_bits(val, B_BE_HCI_FC_MODE_MASK);
+ prec_cfg->ch011_full_cond = u32_get_bits(val, B_BE_HCI_FC_WD_FULL_COND_MASK);
+ prec_cfg->h2c_full_cond = u32_get_bits(val, B_BE_HCI_FC_CH12_FULL_COND_MASK);
+ prec_cfg->wp_ch07_full_cond =
+ u32_get_bits(val, B_BE_HCI_FC_WP_CH07_FULL_COND_MASK);
+ prec_cfg->wp_ch811_full_cond =
+ u32_get_bits(val, B_BE_HCI_FC_WP_CH811_FULL_COND_MASK);
+
+ val = rtw89_read32(rtwdev, R_BE_CH_PAGE_CTRL);
+ prec_cfg->ch011_prec = u32_get_bits(val, B_BE_PREC_PAGE_CH011_V1_MASK);
+ prec_cfg->h2c_prec = u32_get_bits(val, B_BE_PREC_PAGE_CH12_V1_MASK);
+
+ val = rtw89_read32(rtwdev, R_BE_PUB_PAGE_CTRL2);
+ pub_cfg->pub_max = u32_get_bits(val, B_BE_PUBPG_ALL_MASK);
+
+ val = rtw89_read32(rtwdev, R_BE_WP_PAGE_CTRL1);
+ prec_cfg->wp_ch07_prec = u32_get_bits(val, B_BE_PREC_PAGE_WP_CH07_MASK);
+ prec_cfg->wp_ch811_prec = u32_get_bits(val, B_BE_PREC_PAGE_WP_CH811_MASK);
+
+ val = rtw89_read32(rtwdev, R_BE_WP_PAGE_CTRL2);
+ pub_cfg->wp_thrd = u32_get_bits(val, B_BE_WP_THRD_MASK);
+
+ val = rtw89_read32(rtwdev, R_BE_PUB_PAGE_CTRL1);
+ pub_cfg->grp0 = u32_get_bits(val, B_BE_PUBPG_G0_MASK);
+ pub_cfg->grp1 = u32_get_bits(val, B_BE_PUBPG_G1_MASK);
+}
+
+static void hfc_h2c_cfg_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
+ u32 val;
+
+ val = u32_encode_bits(prec_cfg->h2c_prec, B_BE_PREC_PAGE_CH12_V1_MASK);
+ rtw89_write32(rtwdev, R_BE_CH_PAGE_CTRL, val);
+}
+
+static void hfc_mix_cfg_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
+ const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
+ u32 val;
+
+ val = u32_encode_bits(prec_cfg->ch011_prec, B_BE_PREC_PAGE_CH011_V1_MASK) |
+ u32_encode_bits(prec_cfg->h2c_prec, B_BE_PREC_PAGE_CH12_V1_MASK);
+ rtw89_write32(rtwdev, R_BE_CH_PAGE_CTRL, val);
+
+ val = u32_encode_bits(pub_cfg->pub_max, B_BE_PUBPG_ALL_MASK);
+ rtw89_write32(rtwdev, R_BE_PUB_PAGE_CTRL2, val);
+
+ val = u32_encode_bits(prec_cfg->wp_ch07_prec, B_BE_PREC_PAGE_WP_CH07_MASK) |
+ u32_encode_bits(prec_cfg->wp_ch811_prec, B_BE_PREC_PAGE_WP_CH811_MASK);
+ rtw89_write32(rtwdev, R_BE_WP_PAGE_CTRL1, val);
+
+ val = u32_replace_bits(rtw89_read32(rtwdev, R_BE_HCI_FC_CTRL),
+ param->mode, B_BE_HCI_FC_MODE_MASK);
+ val = u32_replace_bits(val, prec_cfg->ch011_full_cond,
+ B_BE_HCI_FC_WD_FULL_COND_MASK);
+ val = u32_replace_bits(val, prec_cfg->h2c_full_cond,
+ B_BE_HCI_FC_CH12_FULL_COND_MASK);
+ val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond,
+ B_BE_HCI_FC_WP_CH07_FULL_COND_MASK);
+ val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond,
+ B_BE_HCI_FC_WP_CH811_FULL_COND_MASK);
+ rtw89_write32(rtwdev, R_BE_HCI_FC_CTRL, val);
+}
+
+static void hfc_func_en_be(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ u32 val;
+
+ val = rtw89_read32(rtwdev, R_BE_HCI_FC_CTRL);
+ param->en = en;
+ param->h2c_en = h2c_en;
+ val = en ? (val | B_BE_HCI_FC_EN) : (val & ~B_BE_HCI_FC_EN);
+ val = h2c_en ? (val | B_BE_HCI_FC_CH12_EN) :
+ (val & ~B_BE_HCI_FC_CH12_EN);
+ rtw89_write32(rtwdev, R_BE_HCI_FC_CTRL, val);
+}
+
+static void dle_func_en_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (enable)
+ rtw89_write32_set(rtwdev, R_BE_DMAC_FUNC_EN,
+ B_BE_DLE_WDE_EN | B_BE_DLE_PLE_EN);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_DMAC_FUNC_EN,
+ B_BE_DLE_WDE_EN | B_BE_DLE_PLE_EN);
+}
+
+static void dle_clk_en_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (enable)
+ rtw89_write32_set(rtwdev, R_BE_DMAC_CLK_EN,
+ B_BE_DLE_WDE_CLK_EN | B_BE_DLE_PLE_CLK_EN);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_DMAC_CLK_EN,
+ B_BE_DLE_WDE_CLK_EN | B_BE_DLE_PLE_CLK_EN);
+}
+
+static int dle_mix_cfg_be(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg)
+{
+ const struct rtw89_dle_size *wde_size_cfg, *ple_size_cfg;
+ u32 bound;
+ u32 val;
+
+ wde_size_cfg = cfg->wde_size;
+ ple_size_cfg = cfg->ple_size;
+
+ val = rtw89_read32(rtwdev, R_BE_WDE_PKTBUF_CFG);
+
+ switch (wde_size_cfg->pge_size) {
+ default:
+ case RTW89_WDE_PG_64:
+ val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64,
+ B_BE_WDE_PAGE_SEL_MASK);
+ break;
+ case RTW89_WDE_PG_128:
+ val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128,
+ B_BE_WDE_PAGE_SEL_MASK);
+ break;
+ case RTW89_WDE_PG_256:
+ rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n");
+ return -EINVAL;
+ }
+
+ bound = wde_size_cfg->srt_ofst / DLE_BOUND_UNIT;
+ val = u32_replace_bits(val, bound, B_BE_WDE_START_BOUND_MASK);
+ val = u32_replace_bits(val, wde_size_cfg->lnk_pge_num,
+ B_BE_WDE_FREE_PAGE_NUM_MASK);
+ rtw89_write32(rtwdev, R_BE_WDE_PKTBUF_CFG, val);
+
+ val = rtw89_read32(rtwdev, R_BE_PLE_PKTBUF_CFG);
+
+ switch (ple_size_cfg->pge_size) {
+ default:
+ case RTW89_PLE_PG_64:
+ rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n");
+ return -EINVAL;
+ case RTW89_PLE_PG_128:
+ val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128,
+ B_BE_PLE_PAGE_SEL_MASK);
+ break;
+ case RTW89_PLE_PG_256:
+ val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256,
+ B_BE_PLE_PAGE_SEL_MASK);
+ break;
+ }
+
+ bound = ple_size_cfg->srt_ofst / DLE_BOUND_UNIT;
+ val = u32_replace_bits(val, bound, B_BE_PLE_START_BOUND_MASK);
+ val = u32_replace_bits(val, ple_size_cfg->lnk_pge_num,
+ B_BE_PLE_FREE_PAGE_NUM_MASK);
+ rtw89_write32(rtwdev, R_BE_PLE_PKTBUF_CFG, val);
+
+ return 0;
+}
+
+static int chk_dle_rdy_be(struct rtw89_dev *rtwdev, bool wde_or_ple)
+{
+ u32 reg, mask;
+ u32 ini;
+
+ if (wde_or_ple) {
+ reg = R_AX_WDE_INI_STATUS;
+ mask = WDE_MGN_INI_RDY;
+ } else {
+ reg = R_AX_PLE_INI_STATUS;
+ mask = PLE_MGN_INI_RDY;
+ }
+
+ return read_poll_timeout(rtw89_read32, ini, (ini & mask) == mask, 1,
+ 2000, false, rtwdev, reg);
+}
+
+#define INVALID_QT_WCPU U16_MAX
+#define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \
+ do { \
+ val = u32_encode_bits(_min_x, B_BE_ ## _module ## _Q ## _idx ## _MIN_SIZE_MASK) | \
+ u32_encode_bits(_max_x, B_BE_ ## _module ## _Q ## _idx ## _MAX_SIZE_MASK); \
+ rtw89_write32(rtwdev, \
+ R_BE_ ## _module ## _QTA ## _idx ## _CFG, \
+ val); \
+ } while (0)
+#define SET_QUOTA(_x, _module, _idx) \
+ SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx)
+
+static void wde_quota_cfg_be(struct rtw89_dev *rtwdev,
+ const struct rtw89_wde_quota *min_cfg,
+ const struct rtw89_wde_quota *max_cfg,
+ u16 ext_wde_min_qt_wcpu)
+{
+ u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ?
+ ext_wde_min_qt_wcpu : min_cfg->wcpu;
+ u16 max_qt_wcpu = max(max_cfg->wcpu, min_qt_wcpu);
+ u32 val;
+
+ SET_QUOTA(hif, WDE, 0);
+ SET_QUOTA_VAL(min_qt_wcpu, max_qt_wcpu, WDE, 1);
+ SET_QUOTA_VAL(0, 0, WDE, 2);
+ SET_QUOTA(pkt_in, WDE, 3);
+ SET_QUOTA(cpu_io, WDE, 4);
+}
+
+static void ple_quota_cfg_be(struct rtw89_dev *rtwdev,
+ const struct rtw89_ple_quota *min_cfg,
+ const struct rtw89_ple_quota *max_cfg)
+{
+ u32 val;
+
+ SET_QUOTA(cma0_tx, PLE, 0);
+ SET_QUOTA(cma1_tx, PLE, 1);
+ SET_QUOTA(c2h, PLE, 2);
+ SET_QUOTA(h2c, PLE, 3);
+ SET_QUOTA(wcpu, PLE, 4);
+ SET_QUOTA(mpdu_proc, PLE, 5);
+ SET_QUOTA(cma0_dma, PLE, 6);
+ SET_QUOTA(cma1_dma, PLE, 7);
+ SET_QUOTA(bb_rpt, PLE, 8);
+ SET_QUOTA(wd_rel, PLE, 9);
+ SET_QUOTA(cpu_io, PLE, 10);
+ SET_QUOTA(tx_rpt, PLE, 11);
+ SET_QUOTA(h2d, PLE, 12);
+}
+
+static void rtw89_mac_hci_func_en_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_BE_HCI_FUNC_EN, B_BE_HCI_TXDMA_EN |
+ B_BE_HCI_RXDMA_EN);
+}
+
+static void rtw89_mac_dmac_func_pre_en_be(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+
+ val = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ val = u32_replace_bits(val, S_BE_DMA_MOD_PCIE_NO_DATA_CPU,
+ B_BE_DMA_MODE_MASK);
+ break;
+ case RTW89_HCI_TYPE_USB:
+ val = u32_replace_bits(val, S_BE_DMA_MOD_USB, B_BE_DMA_MODE_MASK);
+ val = (val & ~B_BE_STOP_AXI_MST) | B_BE_TXDMA_EN | B_BE_RXDMA_EN;
+ break;
+ case RTW89_HCI_TYPE_SDIO:
+ val = u32_replace_bits(val, S_BE_DMA_MOD_SDIO, B_BE_DMA_MODE_MASK);
+ val = (val & ~B_BE_STOP_AXI_MST) | B_BE_TXDMA_EN | B_BE_RXDMA_EN;
+ break;
+ default:
+ return;
+ }
+
+ rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
+
+ rtw89_write32_clr(rtwdev, R_BE_HAXI_DMA_STOP1,
+ B_BE_STOP_CH0 | B_BE_STOP_CH1 | B_BE_STOP_CH2 |
+ B_BE_STOP_CH3 | B_BE_STOP_CH4 | B_BE_STOP_CH5 |
+ B_BE_STOP_CH6 | B_BE_STOP_CH7 | B_BE_STOP_CH8 |
+ B_BE_STOP_CH9 | B_BE_STOP_CH10 | B_BE_STOP_CH11 |
+ B_BE_STOP_CH12 | B_BE_STOP_CH13 | B_BE_STOP_CH14);
+
+ rtw89_write32_set(rtwdev, R_BE_DMAC_TABLE_CTRL, B_BE_DMAC_ADDR_MODE);
+}
+
+static
+int rtw89_mac_write_xtal_si_be(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask)
+{
+ u32 val32;
+ int ret;
+
+ val32 = u32_encode_bits(offset, B_BE_WL_XTAL_SI_ADDR_MASK) |
+ u32_encode_bits(val, B_BE_WL_XTAL_SI_DATA_MASK) |
+ u32_encode_bits(mask, B_BE_WL_XTAL_SI_BITMASK_MASK) |
+ u32_encode_bits(XTAL_SI_NORMAL_WRITE, B_BE_WL_XTAL_SI_MODE_MASK) |
+ u32_encode_bits(0, B_BE_WL_XTAL_SI_CHIPID_MASK) |
+ B_BE_WL_XTAL_SI_CMD_POLL;
+ rtw89_write32(rtwdev, R_BE_WLAN_XTAL_SI_CTRL, val32);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_BE_WL_XTAL_SI_CMD_POLL),
+ 50, 50000, false, rtwdev, R_BE_WLAN_XTAL_SI_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "xtal si not ready(W): offset=%x val=%x mask=%x\n",
+ offset, val, mask);
+ return ret;
+ }
+
+ return 0;
+}
+
+static
+int rtw89_mac_read_xtal_si_be(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
+{
+ u32 val32;
+ int ret;
+
+ val32 = u32_encode_bits(offset, B_BE_WL_XTAL_SI_ADDR_MASK) |
+ u32_encode_bits(0x0, B_BE_WL_XTAL_SI_DATA_MASK) |
+ u32_encode_bits(0x0, B_BE_WL_XTAL_SI_BITMASK_MASK) |
+ u32_encode_bits(XTAL_SI_NORMAL_READ, B_BE_WL_XTAL_SI_MODE_MASK) |
+ u32_encode_bits(0, B_BE_WL_XTAL_SI_CHIPID_MASK) |
+ B_BE_WL_XTAL_SI_CMD_POLL;
+ rtw89_write32(rtwdev, R_BE_WLAN_XTAL_SI_CTRL, val32);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_BE_WL_XTAL_SI_CMD_POLL),
+ 50, 50000, false, rtwdev, R_BE_WLAN_XTAL_SI_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "xtal si not ready(R): offset=%x\n", offset);
+ return ret;
+ }
+
+ *val = rtw89_read8(rtwdev, R_BE_WLAN_XTAL_SI_CTRL + 1);
+
+ return 0;
+}
+
static void rtw89_mac_disable_cpu_be(struct rtw89_dev *rtwdev)
{
u32 val32;
@@ -92,8 +456,6 @@ static int wcpu_on(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw)
u32 val32;
int ret;
- rtw89_write32_set(rtwdev, R_BE_UDM0, B_BE_UDM0_DBG_MODE_CTRL);
-
val32 = rtw89_read32(rtwdev, R_BE_HALT_C2H);
if (val32) {
rtw89_warn(rtwdev, "[SER] AON L2 Debug register not empty before Boot.\n");
@@ -117,6 +479,10 @@ static int wcpu_on(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw)
rtw89_write32(rtwdev, R_BE_HALT_H2C_CTRL, 0);
rtw89_write32(rtwdev, R_BE_HALT_C2H_CTRL, 0);
+ val32 = rtw89_read32(rtwdev, R_BE_HISR0);
+ rtw89_write32(rtwdev, R_BE_HISR0, B_BE_HALT_C2H_INT);
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "HISR0=0x%x\n", val32);
+
rtw89_write32_set(rtwdev, R_BE_SYS_CLK_CTRL, B_BE_CPU_CLK_EN);
rtw89_write32_clr(rtwdev, R_BE_SYS_CFG5,
B_BE_WDT_WAKE_PCIE_EN | B_BE_WDT_WAKE_USB_EN);
@@ -205,6 +571,1153 @@ static int rtw89_fwdl_check_path_ready_be(struct rtw89_dev *rtwdev,
rtwdev, R_BE_WCPU_FW_CTRL);
}
+static int dmac_func_en_be(struct rtw89_dev *rtwdev)
+{
+ return 0;
+}
+
+static int cmac_func_en_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
+{
+ u32 reg;
+
+ if (mac_idx > RTW89_MAC_1)
+ return -EINVAL;
+
+ if (mac_idx == RTW89_MAC_0)
+ return 0;
+
+ if (en) {
+ rtw89_write32_set(rtwdev, R_BE_AFE_CTRL1, B_BE_AFE_CTRL1_SET);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_ISO_CTRL_EXTEND, B_BE_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_CMAC1_FEN);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CK_EN, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_CK_EN_SET);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CMAC_FUNC_EN, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_CMAC_FUNC_EN_SET);
+
+ set_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags);
+ } else {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CMAC_FUNC_EN, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_CMAC_FUNC_EN_SET);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CK_EN, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_CK_EN_SET);
+
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_CMAC1_FEN);
+ rtw89_write32_set(rtwdev, R_BE_SYS_ISO_CTRL_EXTEND, B_BE_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_clr(rtwdev, R_BE_AFE_CTRL1, B_BE_AFE_CTRL1_SET);
+
+ clear_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags);
+ }
+
+ return 0;
+}
+
+static int chip_func_en_be(struct rtw89_dev *rtwdev)
+{
+ return 0;
+}
+
+static int sys_init_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = dmac_func_en_be(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = cmac_func_en_be(rtwdev, RTW89_MAC_0, true);
+ if (ret)
+ return ret;
+
+ ret = chip_func_en_be(rtwdev);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int sta_sch_init_be(struct rtw89_dev *rtwdev)
+{
+ u32 p_val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ rtw89_write8_set(rtwdev, R_BE_SS_CTRL, B_BE_SS_EN);
+
+ ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_BE_SS_INIT_DONE,
+ 1, TRXCFG_WAIT_CNT, false, rtwdev, R_BE_SS_CTRL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]STA scheduler init\n");
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_BE_SS_CTRL, B_BE_WARM_INIT);
+ rtw89_write32_clr(rtwdev, R_BE_SS_CTRL, B_BE_BAND_TRIG_EN | B_BE_BAND1_TRIG_EN);
+
+ return 0;
+}
+
+static int mpdu_proc_init_be(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_MPDU_PROC, B_BE_APPEND_FCS);
+ rtw89_write32(rtwdev, R_BE_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL);
+
+ val32 = rtw89_read32(rtwdev, R_BE_HDR_SHCUT_SETTING);
+ val32 |= (B_BE_TX_HW_SEQ_EN | B_BE_TX_HW_ACK_POLICY_EN | B_BE_TX_MAC_MPDU_PROC_EN);
+ val32 &= ~B_BE_TX_ADDR_MLD_TO_LIK;
+ rtw89_write32_set(rtwdev, R_BE_HDR_SHCUT_SETTING, val32);
+
+ rtw89_write32(rtwdev, R_BE_RX_HDRTRNS, TRXCFG_MPDU_PROC_RX_HDR_CONV);
+
+ val32 = rtw89_read32(rtwdev, R_BE_DISP_FWD_WLAN_0);
+ val32 = u32_replace_bits(val32, 1, B_BE_FWD_WLAN_CPU_TYPE_0_DATA_MASK);
+ val32 = u32_replace_bits(val32, 1, B_BE_FWD_WLAN_CPU_TYPE_0_MNG_MASK);
+ val32 = u32_replace_bits(val32, 1, B_BE_FWD_WLAN_CPU_TYPE_0_CTL_MASK);
+ val32 = u32_replace_bits(val32, 1, B_BE_FWD_WLAN_CPU_TYPE_1_MASK);
+ rtw89_write32(rtwdev, R_BE_DISP_FWD_WLAN_0, val32);
+
+ return 0;
+}
+
+static int sec_eng_init_be(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ val32 = rtw89_read32(rtwdev, R_BE_SEC_ENG_CTRL);
+ val32 |= B_BE_CLK_EN_CGCMP | B_BE_CLK_EN_WAPI | B_BE_CLK_EN_WEP_TKIP |
+ B_BE_SEC_TX_ENC | B_BE_SEC_RX_DEC |
+ B_BE_MC_DEC | B_BE_BC_DEC |
+ B_BE_BMC_MGNT_DEC | B_BE_UC_MGNT_DEC;
+ val32 &= ~B_BE_SEC_PRE_ENQUE_TX;
+ rtw89_write32(rtwdev, R_BE_SEC_ENG_CTRL, val32);
+
+ rtw89_write32_set(rtwdev, R_BE_SEC_MPDU_PROC, B_BE_APPEND_ICV | B_BE_APPEND_MIC);
+
+ return 0;
+}
+
+static int txpktctrl_init_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mac_dle_rsvd_qt_cfg qt_cfg;
+ u32 val32;
+ int ret;
+
+ ret = rtw89_mac_get_dle_rsvd_qt_cfg(rtwdev, DLE_RSVD_QT_MPDU_INFO, &qt_cfg);
+ if (ret) {
+ rtw89_err(rtwdev, "get dle rsvd qt %d cfg fail %d\n",
+ DLE_RSVD_QT_MPDU_INFO, ret);
+ return ret;
+ }
+
+ val32 = rtw89_read32(rtwdev, R_BE_TXPKTCTL_MPDUINFO_CFG);
+ val32 = u32_replace_bits(val32, qt_cfg.pktid, B_BE_MPDUINFO_PKTID_MASK);
+ val32 = u32_replace_bits(val32, MPDU_INFO_B1_OFST, B_BE_MPDUINFO_B1_BADDR_MASK);
+ val32 |= B_BE_MPDUINFO_FEN;
+ rtw89_write32(rtwdev, R_BE_TXPKTCTL_MPDUINFO_CFG, val32);
+
+ return 0;
+}
+
+static int mlo_init_be(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ int ret;
+
+ val32 = rtw89_read32(rtwdev, R_BE_MLO_INIT_CTL);
+
+ val32 |= B_BE_MLO_TABLE_REINIT;
+ rtw89_write32(rtwdev, R_BE_MLO_INIT_CTL, val32);
+ val32 &= ~B_BE_MLO_TABLE_REINIT;
+ rtw89_write32(rtwdev, R_BE_MLO_INIT_CTL, val32);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, val32,
+ val32 & B_BE_MLO_TABLE_INIT_DONE,
+ 1, 1000, false, rtwdev, R_BE_MLO_INIT_CTL);
+ if (ret)
+ rtw89_err(rtwdev, "[MLO]%s: MLO init polling timeout\n", __func__);
+
+ rtw89_write32_set(rtwdev, R_BE_SS_CTRL, B_BE_MLO_HW_CHGLINK_EN);
+ rtw89_write32_set(rtwdev, R_BE_CMAC_SHARE_ACQCHK_CFG_0, B_BE_R_MACID_ACQ_CHK_EN);
+
+ return ret;
+}
+
+static int dmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ int ret;
+
+ ret = rtw89_mac_dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret);
+ return ret;
+ }
+
+ ret = rtw89_mac_preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]preload init %d\n", ret);
+ return ret;
+ }
+
+ ret = rtw89_mac_hfc_init(rtwdev, true, true, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret);
+ return ret;
+ }
+
+ ret = sta_sch_init_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret);
+ return ret;
+ }
+
+ ret = mpdu_proc_init_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret);
+ return ret;
+ }
+
+ ret = sec_eng_init_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret);
+ return ret;
+ }
+
+ ret = txpktctrl_init_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]TX pkt ctrl init %d\n", ret);
+ return ret;
+ }
+
+ ret = mlo_init_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]MLO init %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int scheduler_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val32;
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_HE_CTN_CHK_CCA_NAV, mac_idx);
+ val32 = B_BE_HE_CTN_CHK_CCA_P20 | B_BE_HE_CTN_CHK_EDCCA_P20 |
+ B_BE_HE_CTN_CHK_CCA_BITMAP | B_BE_HE_CTN_CHK_EDCCA_BITMAP |
+ B_BE_HE_CTN_CHK_NO_GNT_WL | B_BE_HE_CTN_CHK_BASIC_NAV |
+ B_BE_HE_CTN_CHK_INTRA_NAV | B_BE_HE_CTN_CHK_TX_NAV;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_HE_SIFS_CHK_CCA_NAV, mac_idx);
+ val32 = B_BE_HE_SIFS_CHK_EDCCA_P20 | B_BE_HE_SIFS_CHK_EDCCA_BITMAP |
+ B_BE_HE_SIFS_CHK_NO_GNT_WL;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TB_CHK_CCA_NAV, mac_idx);
+ val32 = B_BE_TB_CHK_EDCCA_BITMAP | B_BE_TB_CHK_NO_GNT_WL | B_BE_TB_CHK_BASIC_NAV;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CCA_CFG_0, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_NO_GNT_WL_EN);
+
+ if (is_qta_poh(rtwdev)) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_PREBKF_TIME_MASK,
+ SCH_PREBKF_24US);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_CFG_0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_PREBKF_TIME_NONAC_MASK,
+ SCH_PREBKF_24US);
+ }
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_EDCA_BCNQ_PARAM, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_BCNQ_CW_MASK, 0x32);
+ rtw89_write32_mask(rtwdev, reg, B_BE_BCNQ_AIFS_MASK, BCN_IFS_25US);
+
+ return 0;
+}
+
+static int addr_cam_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val32;
+ u16 val16;
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_ADDR_CAM_CTRL, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, ADDR_CAM_SERCH_RANGE, B_BE_ADDR_CAM_RANGE_MASK);
+ val32 |= B_BE_ADDR_CAM_EN;
+ if (mac_idx == RTW89_MAC_0)
+ val32 |= B_BE_ADDR_CAM_CLR;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_ADDR_CAM_CTRL, mac_idx);
+ ret = read_poll_timeout_atomic(rtw89_read16, val16, !(val16 & B_BE_ADDR_CAM_CLR),
+ 1, TRXCFG_WAIT_CNT, false, rtwdev, reg);
+ if (ret)
+ rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n");
+
+ return ret;
+}
+
+static int rtw89_mac_typ_fltr_opt_be(struct rtw89_dev *rtwdev,
+ enum rtw89_machdr_frame_type type,
+ enum rtw89_mac_fwd_target fwd_target,
+ u8 mac_idx)
+{
+ u32 reg;
+ u32 val;
+
+ switch (fwd_target) {
+ case RTW89_FWD_DONT_CARE:
+ val = RX_FLTR_FRAME_DROP_BE;
+ break;
+ case RTW89_FWD_TO_HOST:
+ case RTW89_FWD_TO_WLAN_CPU:
+ val = RX_FLTR_FRAME_ACCEPT_BE;
+ break;
+ default:
+ rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case RTW89_MGNT:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MGNT_FLTR, mac_idx);
+ break;
+ case RTW89_CTRL:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CTRL_FLTR, mac_idx);
+ break;
+ case RTW89_DATA:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_DATA_FLTR, mac_idx);
+ break;
+ default:
+ rtw89_err(rtwdev, "[ERR]set rx filter type err\n");
+ return -EINVAL;
+ }
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
+static int rx_fltr_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+ u32 val;
+
+ rtw89_mac_typ_fltr_opt_be(rtwdev, RTW89_MGNT, RTW89_FWD_TO_HOST, mac_idx);
+ rtw89_mac_typ_fltr_opt_be(rtwdev, RTW89_CTRL, RTW89_FWD_TO_HOST, mac_idx);
+ rtw89_mac_typ_fltr_opt_be(rtwdev, RTW89_DATA, RTW89_FWD_TO_HOST, mac_idx);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_FLTR_OPT, mac_idx);
+ val = B_BE_A_BC_CAM_MATCH | B_BE_A_UC_CAM_MATCH | B_BE_A_MC |
+ B_BE_A_BC | B_BE_A_A1_MATCH | B_BE_SNIFFER_MODE |
+ u32_encode_bits(15, B_BE_UID_FILTER_MASK);
+ rtw89_write32(rtwdev, reg, val);
+ u32p_replace_bits(&rtwdev->hal.rx_fltr, 15, B_BE_UID_FILTER_MASK);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PLCP_HDR_FLTR, mac_idx);
+ val = B_BE_HE_SIGB_CRC_CHK | B_BE_VHT_MU_SIGB_CRC_CHK |
+ B_BE_VHT_SU_SIGB_CRC_CHK | B_BE_SIGA_CRC_CHK |
+ B_BE_LSIG_PARITY_CHK_EN | B_BE_CCK_SIG_CHK | B_BE_CCK_CRC_CHK;
+ rtw89_write16(rtwdev, reg, val);
+
+ return 0;
+}
+
+static int cca_ctrl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ return 0;
+}
+
+static int nav_ctrl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val32;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMAC_NAV_CTL, mac_idx);
+
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 &= ~B_BE_WMAC_PLCP_UP_NAV_EN;
+ val32 |= B_BE_WMAC_TF_UP_NAV_EN | B_BE_WMAC_NAV_UPPER_EN;
+ val32 = u32_replace_bits(val32, NAV_25MS, B_BE_WMAC_NAV_UPPER_MASK);
+
+ rtw89_write32(rtwdev, reg, val32);
+
+ return 0;
+}
+
+static int spatial_reuse_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_SR_CTRL, mac_idx);
+ rtw89_write8_clr(rtwdev, reg, B_BE_SR_EN | B_BE_SR_CTRL_PLCP_EN);
+
+ return 0;
+}
+
+static int tmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+
+ rtw89_write32_clr(rtwdev, R_BE_TB_PPDU_CTRL, B_BE_QOSNULL_UPD_MUEDCA_EN);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMTX_TCR_BE_4, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_4XLTF_ZLD_USTIMER_MASK, 0x12);
+ rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_2XLTF_ZLD_USTIMER_MASK, 0xe);
+
+ return 0;
+}
+
+static int trxptcl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_rrsr_cfgs *rrsr = chip->rrsr_cfgs;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 val32;
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MAC_LOOPBACK, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, S_BE_MACLBK_PLCP_DLY_DEF,
+ B_BE_MACLBK_PLCP_DLY_MASK);
+ val32 &= ~B_BE_MACLBK_EN;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_0, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, WMAC_SPEC_SIFS_CCK,
+ B_BE_WMAC_SPEC_SIFS_CCK_MASK);
+ val32 = u32_replace_bits(val32, WMAC_SPEC_SIFS_OFDM_1115E,
+ B_BE_WMAC_SPEC_SIFS_OFDM_MASK);
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_LEGACY, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_ACK_BA_RESP_LEGACY_CHK_EDCCA);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_HE, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_ACK_BA_RESP_HE_CHK_EDCCA);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMAC_ACK_BA_RESP_EHT_LEG_PUNC, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_ACK_BA_EHT_LEG_PUNC_CHK_EDCCA);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RXTRIG_TEST_USER_2, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_BE_RXTRIG_FCSCHK_EN);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TRXPTCL_RESP_1, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 &= B_BE_FTM_RRSR_RATE_EN_MASK | B_BE_WMAC_RESP_DOPPLEB_BE_EN |
+ B_BE_WMAC_RESP_DCM_EN | B_BE_WMAC_RESP_REF_RATE_MASK;
+ rtw89_write32(rtwdev, reg, val32);
+ rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PTCL_RRSR1, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 &= B_BE_RRSR_RATE_EN_MASK | B_BE_RRSR_CCK_MASK | B_BE_RSC_MASK;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PTCL_RRSR0, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 &= B_BE_RRSR_OFDM_MASK | B_BE_RRSR_HT_MASK | B_BE_RRSR_VHT_MASK |
+ B_BE_RRSR_HE_MASK;
+ rtw89_write32(rtwdev, reg, val32);
+
+ if (chip->chip_id == RTL8922A && hal->cv == CHIP_CAV) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PTCL_RRSR1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_RSC_MASK, 1);
+ }
+
+ return 0;
+}
+
+static int rst_bacam_be(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+ int ret;
+
+ rtw89_write32_mask(rtwdev, R_BE_RESPBA_CAM_CTRL, B_BE_BACAM_RST_MASK,
+ S_BE_BACAM_RST_ALL);
+
+ ret = read_poll_timeout_atomic(rtw89_read32_mask, val, val == S_BE_BACAM_RST_DONE,
+ 1, 1000, false,
+ rtwdev, R_BE_RESPBA_CAM_CTRL, B_BE_BACAM_RST_MASK);
+ if (ret)
+ rtw89_err(rtwdev, "[ERR]bacam rst timeout\n");
+
+ return ret;
+}
+
+#define PLD_RLS_MAX_PG 127
+#define RX_MAX_LEN_UNIT 512
+#define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT)
+
+static int rmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 rx_min_qta, rx_max_len, rx_max_pg;
+ u16 val16;
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (mac_idx == RTW89_MAC_0) {
+ ret = rst_bacam_be(rtwdev);
+ if (ret)
+ return ret;
+ }
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_DLK_PROTECT_CTL, mac_idx);
+ val16 = rtw89_read16(rtwdev, reg);
+ val16 = u16_replace_bits(val16, TRXCFG_RMAC_DATA_TO, B_BE_RX_DLK_DATA_TIME_MASK);
+ val16 = u16_replace_bits(val16, TRXCFG_RMAC_CCA_TO, B_BE_RX_DLK_CCA_TIME_MASK);
+ val16 |= B_BE_RX_DLK_RST_EN;
+ rtw89_write16(rtwdev, reg, val16);
+
+ if (mac_idx == RTW89_MAC_0)
+ rx_min_qta = rtwdev->mac.dle_info.c0_rx_qta;
+ else
+ rx_min_qta = rtwdev->mac.dle_info.c1_rx_qta;
+ rx_max_pg = min_t(u32, rx_min_qta, PLD_RLS_MAX_PG);
+ rx_max_len = rx_max_pg * rtwdev->mac.dle_info.ple_pg_size;
+ rx_max_len = min_t(u32, rx_max_len, RX_SPEC_MAX_LEN);
+ rx_max_len /= RX_MAX_LEN_UNIT;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_FLTR_OPT, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_RX_MPDU_MAX_LEN_MASK, rx_max_len);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PLCP_HDR_FLTR, mac_idx);
+ rtw89_write8_clr(rtwdev, reg, B_BE_VHT_SU_SIGB_CRC_CHK);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RCR, mac_idx);
+ rtw89_write16_set(rtwdev, reg, B_BE_BUSY_CHKSN);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_PLCP_EXT_OPTION_1, mac_idx);
+ rtw89_write16_set(rtwdev, reg, B_BE_PLCP_SU_PSDU_LEN_SRC);
+
+ return 0;
+}
+
+static int resp_pktctl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ struct rtw89_mac_dle_rsvd_qt_cfg qt_cfg;
+ enum rtw89_mac_dle_rsvd_qt_type type;
+ u32 reg;
+ int ret;
+
+ if (mac_idx == RTW89_MAC_1)
+ type = DLE_RSVD_QT_B1_CSI;
+ else
+ type = DLE_RSVD_QT_B0_CSI;
+
+ ret = rtw89_mac_get_dle_rsvd_qt_cfg(rtwdev, type, &qt_cfg);
+ if (ret) {
+ rtw89_err(rtwdev, "get dle rsvd qt %d cfg fail %d\n", type, ret);
+ return ret;
+ }
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RESP_CSI_RESERVED_PAGE, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_CSI_RESERVED_START_PAGE_MASK, qt_cfg.pktid);
+ rtw89_write32_mask(rtwdev, reg, B_BE_CSI_RESERVED_PAGE_NUM_MASK, qt_cfg.pg_num);
+
+ return 0;
+}
+
+static int cmac_com_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val32;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (mac_idx == RTW89_MAC_0) {
+ val32 = rtw89_read32(rtwdev, R_BE_TX_SUB_BAND_VALUE);
+ val32 = u32_replace_bits(val32, S_BE_TXSB_20M_8, B_BE_TXSB_20M_MASK);
+ val32 = u32_replace_bits(val32, S_BE_TXSB_40M_4, B_BE_TXSB_40M_MASK);
+ val32 = u32_replace_bits(val32, S_BE_TXSB_80M_2, B_BE_TXSB_80M_MASK);
+ val32 = u32_replace_bits(val32, S_BE_TXSB_160M_1, B_BE_TXSB_160M_MASK);
+ rtw89_write32(rtwdev, R_BE_TX_SUB_BAND_VALUE, val32);
+ } else {
+ val32 = rtw89_read32(rtwdev, R_BE_TX_SUB_BAND_VALUE_C1);
+ val32 = u32_replace_bits(val32, S_BE_TXSB_20M_2, B_BE_TXSB_20M_MASK);
+ val32 = u32_replace_bits(val32, S_BE_TXSB_40M_1, B_BE_TXSB_40M_MASK);
+ val32 = u32_replace_bits(val32, S_BE_TXSB_80M_0, B_BE_TXSB_80M_MASK);
+ val32 = u32_replace_bits(val32, S_BE_TXSB_160M_0, B_BE_TXSB_160M_MASK);
+ rtw89_write32(rtwdev, R_BE_TX_SUB_BAND_VALUE_C1, val32);
+ }
+
+ return 0;
+}
+
+static int ptcl_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val32;
+ u8 val8;
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (is_qta_poh(rtwdev)) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_SIFS_SETTING, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, S_AX_CTS2S_TH_1K,
+ B_BE_HW_CTS2SELF_PKT_LEN_TH_MASK);
+ val32 = u32_replace_bits(val32, S_AX_CTS2S_TH_SEC_256B,
+ B_BE_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
+ val32 |= B_BE_HW_CTS2SELF_EN;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PTCL_FSM_MON, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, S_AX_PTCL_TO_2MS,
+ B_BE_PTCL_TX_ARB_TO_THR_MASK);
+ val32 &= ~B_BE_PTCL_TX_ARB_TO_MODE;
+ rtw89_write32(rtwdev, reg, val32);
+ }
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PTCL_COMMON_SETTING_0, mac_idx);
+ val8 = rtw89_read8(rtwdev, reg);
+ val8 |= B_BE_CMAC_TX_MODE_0 | B_BE_CMAC_TX_MODE_1;
+ val8 &= ~(B_BE_PTCL_TRIGGER_SS_EN_0 |
+ B_BE_PTCL_TRIGGER_SS_EN_1 |
+ B_BE_PTCL_TRIGGER_SS_EN_UL);
+ rtw89_write8(rtwdev, reg, val8);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_AMPDU_AGG_LIMIT, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_AMPDU_MAX_TIME_MASK, AMPDU_MAX_TIME);
+
+ return 0;
+}
+
+static int cmac_dma_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val32;
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_CTRL_1, mac_idx);
+
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, WLCPU_RXCH2_QID,
+ B_BE_RXDMA_TXRPT_QUEUE_ID_SW_MASK);
+ val32 = u32_replace_bits(val32, WLCPU_RXCH2_QID,
+ B_BE_RXDMA_F2PCMDRPT_QUEUE_ID_SW_MASK);
+ rtw89_write32(rtwdev, reg, val32);
+
+ return 0;
+}
+
+static int cmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ int ret;
+
+ ret = scheduler_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = addr_cam_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
+ ret = rx_fltr_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
+ ret = cca_ctrl_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
+ ret = nav_ctrl_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d NAV CTRL init %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
+ ret = spatial_reuse_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n",
+ mac_idx, ret);
+ return ret;
+ }
+
+ ret = tmac_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = trxptcl_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = rmac_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = resp_pktctl_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d resp pktctl init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = cmac_com_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = ptcl_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = cmac_dma_init_be(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d DMA init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int tx_idle_poll_band_be(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+ u8 val8;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PTCL_TX_CTN_SEL, mac_idx);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, val8, !(val8 & B_BE_PTCL_BUSY),
+ 30, 66000, false, rtwdev, reg);
+
+ return ret;
+}
+
+static int dle_buf_req_be(struct rtw89_dev *rtwdev, u16 buf_len, bool wd, u16 *pkt_id)
+{
+ u32 val, reg;
+ int ret;
+
+ reg = wd ? R_BE_WD_BUF_REQ : R_BE_PL_BUF_REQ;
+ val = buf_len;
+ val |= B_BE_WD_BUF_REQ_EXEC;
+ rtw89_write32(rtwdev, reg, val);
+
+ reg = wd ? R_BE_WD_BUF_STATUS : R_BE_PL_BUF_STATUS;
+
+ ret = read_poll_timeout(rtw89_read32, val, val & B_BE_WD_BUF_STAT_DONE,
+ 1, 2000, false, rtwdev, reg);
+ if (ret)
+ return ret;
+
+ *pkt_id = u32_get_bits(val, B_BE_WD_BUF_STAT_PKTID_MASK);
+ if (*pkt_id == S_WD_BUF_STAT_PKTID_INVALID)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int set_cpuio_be(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para, bool wd)
+{
+ u32 val_op0, val_op1, val_op2, val_op3;
+ u32 val, cmd_type, reg;
+ int ret;
+
+ cmd_type = ctrl_para->cmd_type;
+
+ reg = wd ? R_BE_WD_CPUQ_OP_3 : R_BE_PL_CPUQ_OP_3;
+ val_op3 = u32_replace_bits(0, ctrl_para->start_pktid,
+ B_BE_WD_CPUQ_OP_STRT_PKTID_MASK);
+ val_op3 = u32_replace_bits(val_op3, ctrl_para->end_pktid,
+ B_BE_WD_CPUQ_OP_END_PKTID_MASK);
+ rtw89_write32(rtwdev, reg, val_op3);
+
+ reg = wd ? R_BE_WD_CPUQ_OP_1 : R_BE_PL_CPUQ_OP_1;
+ val_op1 = u32_replace_bits(0, ctrl_para->src_pid,
+ B_BE_WD_CPUQ_OP_SRC_PID_MASK);
+ val_op1 = u32_replace_bits(val_op1, ctrl_para->src_qid,
+ B_BE_WD_CPUQ_OP_SRC_QID_MASK);
+ val_op1 = u32_replace_bits(val_op1, ctrl_para->macid,
+ B_BE_WD_CPUQ_OP_SRC_MACID_MASK);
+ rtw89_write32(rtwdev, reg, val_op1);
+
+ reg = wd ? R_BE_WD_CPUQ_OP_2 : R_BE_PL_CPUQ_OP_2;
+ val_op2 = u32_replace_bits(0, ctrl_para->dst_pid,
+ B_BE_WD_CPUQ_OP_DST_PID_MASK);
+ val_op2 = u32_replace_bits(val_op2, ctrl_para->dst_qid,
+ B_BE_WD_CPUQ_OP_DST_QID_MASK);
+ val_op2 = u32_replace_bits(val_op2, ctrl_para->macid,
+ B_BE_WD_CPUQ_OP_DST_MACID_MASK);
+ rtw89_write32(rtwdev, reg, val_op2);
+
+ reg = wd ? R_BE_WD_CPUQ_OP_0 : R_BE_PL_CPUQ_OP_0;
+ val_op0 = u32_replace_bits(0, cmd_type,
+ B_BE_WD_CPUQ_OP_CMD_TYPE_MASK);
+ val_op0 = u32_replace_bits(val_op0, ctrl_para->pkt_num,
+ B_BE_WD_CPUQ_OP_PKTNUM_MASK);
+ val_op0 |= B_BE_WD_CPUQ_OP_EXEC;
+ rtw89_write32(rtwdev, reg, val_op0);
+
+ reg = wd ? R_BE_WD_CPUQ_OP_STATUS : R_BE_PL_CPUQ_OP_STATUS;
+
+ ret = read_poll_timeout(rtw89_read32, val, val & B_BE_WD_CPUQ_OP_STAT_DONE,
+ 1, 2000, false, rtwdev, reg);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]set cpuio wd timeout\n");
+ rtw89_err(rtwdev, "[ERR]op_0=0x%X, op_1=0x%X, op_2=0x%X\n",
+ val_op0, val_op1, val_op2);
+ return ret;
+ }
+
+ if (cmd_type == CPUIO_OP_CMD_GET_NEXT_PID ||
+ cmd_type == CPUIO_OP_CMD_GET_1ST_PID)
+ ctrl_para->pktid = u32_get_bits(val, B_BE_WD_CPUQ_OP_PKTID_MASK);
+
+ return 0;
+}
+
+static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_qta_mode mode)
+{
+ u32 max_preld_size, min_rsvd_size;
+ u32 val32;
+ u32 reg;
+
+ max_preld_size = mac_idx == RTW89_MAC_0 ?
+ PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM;
+ max_preld_size *= PRELD_AMSDU_SIZE;
+
+ reg = mac_idx == RTW89_MAC_0 ? R_BE_TXPKTCTL_B0_PRELD_CFG0 :
+ R_BE_TXPKTCTL_B1_PRELD_CFG0;
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, max_preld_size, B_BE_B0_PRELD_USEMAXSZ_MASK);
+ val32 |= B_BE_B0_PRELD_FEN;
+ rtw89_write32(rtwdev, reg, val32);
+
+ min_rsvd_size = PRELD_AMSDU_SIZE;
+ reg = mac_idx == RTW89_MAC_0 ? R_BE_TXPKTCTL_B0_PRELD_CFG1 :
+ R_BE_TXPKTCTL_B1_PRELD_CFG1;
+ val32 = rtw89_read32(rtwdev, reg);
+ val32 = u32_replace_bits(val32, PRELD_NEXT_WND, B_BE_B0_PRELD_NXT_TXENDWIN_MASK);
+ val32 = u32_replace_bits(val32, min_rsvd_size, B_BE_B0_PRELD_NXT_RSVMINSZ_MASK);
+ rtw89_write32(rtwdev, reg, val32);
+
+ return 0;
+}
+
+static int dbcc_bb_ctrl_be(struct rtw89_dev *rtwdev, bool bb1_en)
+{
+ return 0;
+}
+
+static int enable_imr_be(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_mac_hwmod_sel sel)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_imr_table *table;
+ const struct rtw89_reg_imr *reg;
+ u32 addr;
+ u32 val;
+ int i;
+
+ if (sel == RTW89_DMAC_SEL)
+ table = chip->imr_dmac_table;
+ else if (sel == RTW89_CMAC_SEL)
+ table = chip->imr_cmac_table;
+ else
+ return -EINVAL;
+
+ for (i = 0; i < table->n_regs; i++) {
+ reg = &table->regs[i];
+ addr = rtw89_mac_reg_by_idx(rtwdev, reg->addr, mac_idx);
+
+ val = rtw89_read32(rtwdev, addr);
+ val &= ~reg->clr;
+ val |= reg->set;
+ rtw89_write32(rtwdev, addr, val);
+ }
+
+ return 0;
+}
+
+static void err_imr_ctrl_be(struct rtw89_dev *rtwdev, bool en)
+{
+ u32 v32_dmac = en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS;
+ u32 v32_cmac0 = en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS;
+ u32 v32_cmac1 = en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS;
+
+ v32_dmac &= ~B_BE_DMAC_NOTX_ERR_INT_EN;
+
+ rtw89_write32(rtwdev, R_BE_DMAC_ERR_IMR, v32_dmac);
+ rtw89_write32(rtwdev, R_BE_CMAC_ERR_IMR, v32_cmac0);
+
+ if (rtwdev->dbcc_en)
+ rtw89_write32(rtwdev, R_BE_CMAC_ERR_IMR_C1, v32_cmac1);
+}
+
+static int band1_enable_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = tx_idle_poll_band_be(rtwdev, RTW89_MAC_0);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret);
+ return ret;
+ }
+
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
+ return ret;
+ }
+
+ ret = preload_init_be(rtwdev, RTW89_MAC_1, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]preload init B1 %d\n", ret);
+ return ret;
+ }
+
+ ret = cmac_func_en_be(rtwdev, RTW89_MAC_1, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d func en %d\n", RTW89_MAC_1, ret);
+ return ret;
+ }
+
+ ret = cmac_init_be(rtwdev, RTW89_MAC_1);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", RTW89_MAC_1, ret);
+ return ret;
+ }
+
+ ret = dbcc_bb_ctrl_be(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]enable bb 1 %d\n", ret);
+ return ret;
+ }
+
+ ret = enable_imr_be(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int band1_disable_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = dbcc_bb_ctrl_be(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]disable bb 1 %d\n", ret);
+ return ret;
+ }
+
+ ret = cmac_func_en_be(rtwdev, RTW89_MAC_1, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d func dis %d\n", RTW89_MAC_1, ret);
+ return ret;
+ }
+
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ int ret;
+
+ if (enable) {
+ ret = band1_enable_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret);
+ return ret;
+ }
+
+ if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
+ ret = rtw89_fw_h2c_notify_dbcc(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ } else {
+ if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
+ ret = rtw89_fw_h2c_notify_dbcc(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ ret = band1_disable_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] band1_disable %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int set_host_rpr_be(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 mode;
+ u32 fltr;
+ bool poh;
+
+ poh = is_qta_poh(rtwdev);
+
+ if (poh) {
+ mode = RTW89_RPR_MODE_POH;
+ fltr = S_BE_WDRLS_FLTR_TXOK | S_BE_WDRLS_FLTR_RTYLMT |
+ S_BE_WDRLS_FLTR_LIFTIM | S_BE_WDRLS_FLTR_MACID;
+ } else {
+ mode = RTW89_RPR_MODE_STF;
+ fltr = 0;
+ }
+
+ rtw89_write32_mask(rtwdev, R_BE_WDRLS_CFG, B_BE_WDRLS_MODE_MASK, mode);
+
+ val32 = rtw89_read32(rtwdev, R_BE_RLSRPT0_CFG1);
+ val32 = u32_replace_bits(val32, fltr, B_BE_RLSRPT0_FLTR_MAP_MASK);
+ val32 = u32_replace_bits(val32, 30, B_BE_RLSRPT0_AGGNUM_MASK);
+ val32 = u32_replace_bits(val32, 255, B_BE_RLSRPT0_TO_MASK);
+ rtw89_write32(rtwdev, R_BE_RLSRPT0_CFG1, val32);
+
+ return 0;
+}
+
+static int trx_init_be(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode;
+ int ret;
+
+ ret = dmac_init_be(rtwdev, 0);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret);
+ return ret;
+ }
+
+ ret = cmac_init_be(rtwdev, 0);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret);
+ return ret;
+ }
+
+ if (rtw89_mac_is_qta_dbcc(rtwdev, qta_mode)) {
+ ret = dbcc_enable_be(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = enable_imr_be(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret);
+ return ret;
+ }
+
+ ret = enable_imr_be(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret);
+ return ret;
+ }
+
+ err_imr_ctrl_be(rtwdev, true);
+
+ ret = set_host_rpr_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static bool rtw89_mac_get_txpwr_cr_be(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
@@ -404,6 +1917,299 @@ static void rtw89_mac_bf_assoc_be(struct rtw89_dev *rtwdev,
}
}
+static void dump_err_status_dispatcher_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_info(rtwdev, "R_BE_DISP_HOST_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_BE_DISP_HOST_IMR));
+ rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR1));
+ rtw89_info(rtwdev, "R_BE_DISP_CPU_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_BE_DISP_CPU_IMR));
+ rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR2=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR2));
+ rtw89_info(rtwdev, "R_BE_DISP_OTHER_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_BE_DISP_OTHER_IMR));
+ rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR0=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR0));
+}
+
+static void rtw89_mac_dump_qta_lost_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mac_dle_dfi_qempty qempty;
+ struct rtw89_mac_dle_dfi_quota quota;
+ struct rtw89_mac_dle_dfi_ctrl ctrl;
+ u32 val, not_empty, i;
+ int ret;
+
+ qempty.dle_type = DLE_CTRL_TYPE_PLE;
+ qempty.grpsel = 0;
+ qempty.qempty = ~(u32)0;
+ ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty);
+ if (ret)
+ rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
+ else
+ rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty);
+
+ for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) {
+ if (!(not_empty & BIT(0)))
+ continue;
+ ctrl.type = DLE_CTRL_TYPE_PLE;
+ ctrl.target = DLE_DFI_TYPE_QLNKTBL;
+ ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) |
+ u32_encode_bits(i, QLNKTBL_ADDR_TBL_IDX_MASK);
+ ret = rtw89_mac_dle_dfi_cfg(rtwdev, &ctrl);
+ if (ret)
+ rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
+ else
+ rtw89_info(rtwdev, "qidx%d pktcnt = %d\n", i,
+ u32_get_bits(ctrl.out_data,
+ QLNKTBL_DATA_SEL1_PKT_CNT_MASK));
+ }
+
+ quota.dle_type = DLE_CTRL_TYPE_PLE;
+ quota.qtaid = 6;
+ ret = rtw89_mac_dle_dfi_quota_cfg(rtwdev, &quota);
+ if (ret)
+ rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
+ else
+ rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n",
+ quota.rsv_pgnum, quota.use_pgnum);
+
+ val = rtw89_read32(rtwdev, R_BE_PLE_QTA6_CFG);
+ rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%x\n",
+ u32_get_bits(val, B_BE_PLE_Q6_MIN_SIZE_MASK));
+ rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%x\n",
+ u32_get_bits(val, B_BE_PLE_Q6_MAX_SIZE_MASK));
+ val = rtw89_read32(rtwdev, R_BE_RX_FLTR_OPT);
+ rtw89_info(rtwdev, "[PLE][CMAC0_RX]B_BE_RX_MPDU_MAX_LEN=0x%x\n",
+ u32_get_bits(val, B_BE_RX_MPDU_MAX_LEN_MASK));
+ rtw89_info(rtwdev, "R_BE_RSP_CHK_SIG=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_RSP_CHK_SIG));
+ rtw89_info(rtwdev, "R_BE_TRXPTCL_RESP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_TRXPTCL_RESP_0));
+
+ if (!rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL)) {
+ quota.dle_type = DLE_CTRL_TYPE_PLE;
+ quota.qtaid = 7;
+ ret = rtw89_mac_dle_dfi_quota_cfg(rtwdev, &quota);
+ if (ret)
+ rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
+ else
+ rtw89_info(rtwdev, "quota7 rsv/use: 0x%x/0x%x\n",
+ quota.rsv_pgnum, quota.use_pgnum);
+
+ val = rtw89_read32(rtwdev, R_BE_PLE_QTA7_CFG);
+ rtw89_info(rtwdev, "[PLE][CMAC1_RX]min_pgnum=0x%x\n",
+ u32_get_bits(val, B_BE_PLE_Q7_MIN_SIZE_MASK));
+ rtw89_info(rtwdev, "[PLE][CMAC1_RX]max_pgnum=0x%x\n",
+ u32_get_bits(val, B_BE_PLE_Q7_MAX_SIZE_MASK));
+ val = rtw89_read32(rtwdev, R_BE_RX_FLTR_OPT_C1);
+ rtw89_info(rtwdev, "[PLE][CMAC1_RX]B_BE_RX_MPDU_MAX_LEN=0x%x\n",
+ u32_get_bits(val, B_BE_RX_MPDU_MAX_LEN_MASK));
+ rtw89_info(rtwdev, "R_BE_RSP_CHK_SIG_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_RSP_CHK_SIG_C1));
+ rtw89_info(rtwdev, "R_BE_TRXPTCL_RESP_0_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_TRXPTCL_RESP_0_C1));
+ }
+
+ rtw89_info(rtwdev, "R_BE_DLE_EMPTY0=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DLE_EMPTY0));
+ rtw89_info(rtwdev, "R_BE_DLE_EMPTY1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_DLE_EMPTY1));
+
+ dump_err_status_dispatcher_be(rtwdev);
+}
+
+static void rtw89_mac_dump_cmac_err_status_be(struct rtw89_dev *rtwdev,
+ u8 band)
+{
+ u32 offset = 0;
+ u32 cmac_err;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, band, RTW89_CMAC_SEL);
+ if (ret) {
+ rtw89_info(rtwdev, "[CMAC] : CMAC%d not enabled\n", band);
+ return;
+ }
+
+ if (band)
+ offset = RTW89_MAC_BE_BAND_REG_OFFSET;
+
+ cmac_err = rtw89_read32(rtwdev, R_BE_CMAC_ERR_ISR + offset);
+ rtw89_info(rtwdev, "R_BE_CMAC_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_CMAC_ERR_ISR + offset));
+ rtw89_info(rtwdev, "R_BE_CMAC_FUNC_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_CMAC_FUNC_EN + offset));
+ rtw89_info(rtwdev, "R_BE_CK_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_CK_EN + offset));
+
+ if (cmac_err & B_BE_SCHEDULE_TOP_ERR_IND) {
+ rtw89_info(rtwdev, "R_BE_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_SCHEDULE_ERR_IMR + offset));
+ rtw89_info(rtwdev, "R_BE_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_SCHEDULE_ERR_ISR + offset));
+ }
+
+ if (cmac_err & B_BE_PTCL_TOP_ERR_IND) {
+ rtw89_info(rtwdev, "R_BE_PTCL_IMR0 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_PTCL_IMR0 + offset));
+ rtw89_info(rtwdev, "R_BE_PTCL_ISR0 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_PTCL_ISR0 + offset));
+ rtw89_info(rtwdev, "R_BE_PTCL_IMR1 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_PTCL_IMR1 + offset));
+ rtw89_info(rtwdev, "R_BE_PTCL_ISR1 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_PTCL_ISR1 + offset));
+ }
+
+ if (cmac_err & B_BE_DMA_TOP_ERR_IND) {
+ rtw89_info(rtwdev, "R_BE_RX_ERROR_FLAG_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_RX_ERROR_FLAG_IMR + offset));
+ rtw89_info(rtwdev, "R_BE_RX_ERROR_FLAG [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_RX_ERROR_FLAG + offset));
+ rtw89_info(rtwdev, "R_BE_TX_ERROR_FLAG_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_TX_ERROR_FLAG_IMR + offset));
+ rtw89_info(rtwdev, "R_BE_TX_ERROR_FLAG [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_TX_ERROR_FLAG + offset));
+ rtw89_info(rtwdev, "R_BE_RX_ERROR_FLAG_IMR_1 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_RX_ERROR_FLAG_IMR_1 + offset));
+ rtw89_info(rtwdev, "R_BE_RX_ERROR_FLAG_1 [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_RX_ERROR_FLAG_1 + offset));
+ }
+
+ if (cmac_err & B_BE_PHYINTF_ERR_IND) {
+ rtw89_info(rtwdev, "R_BE_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_PHYINFO_ERR_IMR_V1 + offset));
+ rtw89_info(rtwdev, "R_BE_PHYINFO_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_PHYINFO_ERR_ISR + offset));
+ }
+
+ if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) {
+ rtw89_info(rtwdev, "R_BE_TXPWR_ERR_FLAG [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_TXPWR_ERR_FLAG + offset));
+ rtw89_info(rtwdev, "R_BE_TXPWR_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_TXPWR_ERR_IMR + offset));
+ }
+
+ if (cmac_err & (B_BE_WMAC_RX_ERR_IND | B_BE_WMAC_TX_ERR_IND |
+ B_BE_WMAC_RX_IDLETO_IDCT | B_BE_PTCL_TX_IDLETO_IDCT)) {
+ rtw89_info(rtwdev, "R_BE_DBGSEL_TRXPTCL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_DBGSEL_TRXPTCL + offset));
+ rtw89_info(rtwdev, "R_BE_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_TRXPTCL_ERROR_INDICA_MASK + offset));
+ rtw89_info(rtwdev, "R_BE_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_TRXPTCL_ERROR_INDICA + offset));
+ rtw89_info(rtwdev, "R_BE_RX_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_RX_ERR_IMR + offset));
+ rtw89_info(rtwdev, "R_BE_RX_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_RX_ERR_ISR + offset));
+ }
+
+ rtw89_info(rtwdev, "R_BE_CMAC_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_BE_CMAC_ERR_IMR + offset));
+}
+
+static void rtw89_mac_dump_err_status_be(struct rtw89_dev *rtwdev,
+ enum mac_ax_err_info err)
+{
+ if (err != MAC_AX_ERR_L1_ERR_DMAC &&
+ err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
+ err != MAC_AX_ERR_L0_ERR_CMAC0 &&
+ err != MAC_AX_ERR_L0_ERR_CMAC1 &&
+ err != MAC_AX_ERR_RXI300)
+ return;
+
+ rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
+ rtw89_info(rtwdev, "R_BE_SER_DBG_INFO=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_DBG_INFO));
+ rtw89_info(rtwdev, "R_BE_SER_L0_DBG_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L0_DBG_CNT));
+ rtw89_info(rtwdev, "R_BE_SER_L0_DBG_CNT1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L0_DBG_CNT1));
+ rtw89_info(rtwdev, "R_BE_SER_L0_DBG_CNT2=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L0_DBG_CNT2));
+ rtw89_info(rtwdev, "R_BE_SER_L0_DBG_CNT3=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L0_DBG_CNT3));
+ if (!rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL)) {
+ rtw89_info(rtwdev, "R_BE_SER_L0_DBG_CNT_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L0_DBG_CNT_C1));
+ rtw89_info(rtwdev, "R_BE_SER_L0_DBG_CNT1_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L0_DBG_CNT1_C1));
+ }
+ rtw89_info(rtwdev, "R_BE_SER_L1_DBG_CNT_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L1_DBG_CNT_0));
+ rtw89_info(rtwdev, "R_BE_SER_L1_DBG_CNT_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L1_DBG_CNT_1));
+ rtw89_info(rtwdev, "R_BE_SER_L1_DBG_CNT_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L1_DBG_CNT_2));
+ rtw89_info(rtwdev, "R_BE_SER_L1_DBG_CNT_3=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L1_DBG_CNT_3));
+ rtw89_info(rtwdev, "R_BE_SER_L1_DBG_CNT_4=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L1_DBG_CNT_4));
+ rtw89_info(rtwdev, "R_BE_SER_L1_DBG_CNT_5=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L1_DBG_CNT_5));
+ rtw89_info(rtwdev, "R_BE_SER_L1_DBG_CNT_6=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L1_DBG_CNT_6));
+ rtw89_info(rtwdev, "R_BE_SER_L1_DBG_CNT_7=0x%08x\n",
+ rtw89_read32(rtwdev, R_BE_SER_L1_DBG_CNT_7));
+
+ rtw89_mac_dump_dmac_err_status(rtwdev);
+ rtw89_mac_dump_cmac_err_status_be(rtwdev, RTW89_MAC_0);
+ rtw89_mac_dump_cmac_err_status_be(rtwdev, RTW89_MAC_1);
+
+ rtwdev->hci.ops->dump_err_status(rtwdev);
+
+ if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1)
+ rtw89_mac_dump_l0_to_l1(rtwdev, err);
+
+ rtw89_info(rtwdev, "<---\n");
+}
+
+static bool mac_is_txq_empty_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mac_dle_dfi_qempty qempty;
+ u32 val32, msk32;
+ u32 grpnum;
+ int ret;
+ int i;
+
+ grpnum = rtwdev->chip->wde_qempty_acq_grpnum;
+ qempty.dle_type = DLE_CTRL_TYPE_WDE;
+
+ for (i = 0; i < grpnum; i++) {
+ qempty.grpsel = i;
+ ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty);
+ if (ret) {
+ rtw89_warn(rtwdev,
+ "%s: failed to dle dfi acq empty: %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ /* Each acq group contains 32 queues (8 macid * 4 acq),
+ * but here, we can simply check if all bits are set.
+ */
+ if (qempty.qempty != MASKDWORD)
+ return false;
+ }
+
+ qempty.grpsel = rtwdev->chip->wde_qempty_mgq_grpsel;
+ ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty);
+ if (ret) {
+ rtw89_warn(rtwdev, "%s: failed to dle dfi mgq empty: %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ msk32 = B_CMAC0_MGQ_NORMAL_BE | B_CMAC1_MGQ_NORMAL_BE;
+ if ((qempty.qempty & msk32) != msk32)
+ return false;
+
+ msk32 = B_BE_WDE_EMPTY_QUE_OTHERS;
+ val32 = rtw89_read32(rtwdev, R_BE_DLE_EMPTY0);
+ return (val32 & msk32) == msk32;
+}
+
const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.band1_offset = RTW89_MAC_BE_BAND_REG_OFFSET,
.filter_model_addr = R_BE_FILTER_MODEL_ADDR,
@@ -423,13 +2229,44 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
B_BE_BFMEE_HE_NDPA_EN | B_BE_BFMEE_EHT_NDPA_EN,
},
+ .check_mac_en = rtw89_mac_check_mac_en_be,
+ .sys_init = sys_init_be,
+ .trx_init = trx_init_be,
+ .hci_func_en = rtw89_mac_hci_func_en_be,
+ .dmac_func_pre_en = rtw89_mac_dmac_func_pre_en_be,
+ .dle_func_en = dle_func_en_be,
+ .dle_clk_en = dle_clk_en_be,
.bf_assoc = rtw89_mac_bf_assoc_be,
+ .typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
+
+ .dle_mix_cfg = dle_mix_cfg_be,
+ .chk_dle_rdy = chk_dle_rdy_be,
+ .dle_buf_req = dle_buf_req_be,
+ .hfc_func_en = hfc_func_en_be,
+ .hfc_h2c_cfg = hfc_h2c_cfg_be,
+ .hfc_mix_cfg = hfc_mix_cfg_be,
+ .hfc_get_mix_info = hfc_get_mix_info_be,
+ .wde_quota_cfg = wde_quota_cfg_be,
+ .ple_quota_cfg = ple_quota_cfg_be,
+ .set_cpuio = set_cpuio_be,
+
.disable_cpu = rtw89_mac_disable_cpu_be,
.fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be,
.fwdl_get_status = fwdl_get_status_be,
.fwdl_check_path_ready = rtw89_fwdl_check_path_ready_be,
+ .parse_efuse_map = rtw89_parse_efuse_map_be,
+ .parse_phycap_map = rtw89_parse_phycap_map_be,
+ .cnv_efuse_state = rtw89_cnv_efuse_state_be,
.get_txpwr_cr = rtw89_mac_get_txpwr_cr_be,
+
+ .write_xtal_si = rtw89_mac_write_xtal_si_be,
+ .read_xtal_si = rtw89_mac_read_xtal_si_be,
+
+ .dump_qta_lost = rtw89_mac_dump_qta_lost_be,
+ .dump_err_status = rtw89_mac_dump_err_status_be,
+
+ .is_txq_empty = mac_is_txq_empty_be,
};
EXPORT_SYMBOL(rtw89_mac_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 14ddb0d39..cb03474f8 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -19,28 +19,25 @@ MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
-static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev)
+static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
{
u32 val;
int ret;
- rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1,
- rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM);
ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
rtwdev, R_AX_PCIE_INIT_CFG1);
- if (ret)
- return -EBUSY;
-
- return 0;
+ return ret;
}
static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
struct rtw89_pci_dma_ring *bd_ring,
u32 cur_idx, bool tx)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 cnt, cur_rp, wp, rp, len;
rp = bd_ring->rp;
@@ -48,10 +45,14 @@ static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
len = bd_ring->len;
cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
- if (tx)
+ if (tx) {
cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
- else
+ } else {
+ if (info->rx_ring_eq_is_full)
+ wp += 1;
+
cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
+ }
bd_ring->rp = cur_rp;
@@ -154,8 +155,8 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
DMA_FROM_DEVICE);
}
-static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
- struct sk_buff *skb)
+static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
{
struct rtw89_pci_rxbd_info *rxbd_info;
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
@@ -165,10 +166,58 @@ static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
+}
+
+static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 target_rx_tag;
+
+ if (!info->check_rx_tag)
+ return 0;
+
+ /* valid range is 1 ~ 0x1FFF */
+ if (rx_ring->target_rx_tag == 0)
+ target_rx_tag = 1;
+ else
+ target_rx_tag = rx_ring->target_rx_tag;
+
+ if (rx_info->tag != target_rx_tag) {
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
+ rx_info->tag, target_rx_tag);
+ return -EAGAIN;
+ }
return 0;
}
+static
+int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ int rx_tag_retry = 100;
+ int ret;
+
+ do {
+ rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+ rtw89_pci_rxbd_info_update(rtwdev, skb);
+
+ ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
+ if (ret != -EAGAIN)
+ break;
+ } while (rx_tag_retry--);
+
+ /* update target rx_tag for next RX */
+ rx_ring->target_rx_tag = rx_info->tag + 1;
+
+ return ret;
+}
+
static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
@@ -226,6 +275,21 @@ rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
return true;
}
+static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_dma_ring *bd_ring)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 wp = bd_ring->wp;
+
+ if (!info->rx_ring_eq_is_full)
+ return wp;
+
+ if (++wp >= bd_ring->len)
+ wp = 0;
+
+ return wp;
+}
+
static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring)
{
@@ -235,15 +299,16 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
struct sk_buff *new = rx_ring->diliver_skb;
struct sk_buff *skb;
u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
+ u32 skb_idx;
u32 offset;
u32 cnt = 1;
bool fs, ls;
int ret;
- skb = rx_ring->buf[bd_ring->wp];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+ skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
+ skb = rx_ring->buf[skb_idx];
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -525,13 +590,14 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
u32 cnt = 0;
u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
+ u32 skb_idx;
u32 offset;
int ret;
- skb = rx_ring->buf[bd_ring->wp];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+ skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
+ skb = rx_ring->buf[skb_idx];
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -676,11 +742,26 @@ void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
-static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
+void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
{
- /* write 1 clear */
- rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
+ isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
+ isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
+ rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
+ isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
+ rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
+ isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR);
+
+ if (isrs->halt_c2h_isrs)
+ rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
+ if (isrs->isrs[0])
+ rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]);
+ if (isrs->isrs[1])
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
+ rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
}
+EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
@@ -713,6 +794,22 @@ void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpc
}
EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
+void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
+ rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]);
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
+ rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
+}
+EXPORT_SYMBOL(rtw89_pci_enable_intr_v2);
+
+void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
+}
+EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
+
static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@@ -753,6 +850,8 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
{
struct rtw89_dev *rtwdev = dev;
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
struct rtw89_pci_isrs isrs;
unsigned long flags;
@@ -760,13 +859,13 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
- if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
+ if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
- if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
+ if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
- if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN))
+ if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
if (unlikely(rtwpci->under_recovery))
@@ -817,6 +916,15 @@ exit:
return irqret;
}
+#define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
+ [RTW89_TXCH_##ch_idx] = { \
+ .num = R_##gen##_##txch##_TXBD_NUM ##v, \
+ .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
+ .bdram = 0, \
+ .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \
+ .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \
+ }
+
#define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
[RTW89_TXCH_##txch] = { \
.num = R_AX_##txch##_TXBD_NUM ##v, \
@@ -835,12 +943,12 @@ exit:
.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
}
-#define DEF_RXCHADDRS(info, rxch, v...) \
- [RTW89_RXCH_##rxch] = { \
- .num = R_AX_##rxch##_RXBD_NUM ##v, \
- .idx = R_AX_##rxch##_RXBD_IDX ##v, \
- .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
- .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
+#define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
+ [RTW89_RXCH_##ch_idx] = { \
+ .num = R_##gen##_##rxch##_RXBD_NUM ##v, \
+ .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \
+ .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \
+ .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \
}
const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
@@ -860,8 +968,8 @@ const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
DEF_TXCHADDRS(info, CH12),
},
.rx = {
- DEF_RXCHADDRS(info, RXQ),
- DEF_RXCHADDRS(info, RPQ),
+ DEF_RXCHADDRS(AX, RXQ, RXQ),
+ DEF_RXCHADDRS(AX, RPQ, RPQ),
},
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
@@ -883,12 +991,35 @@ const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
DEF_TXCHADDRS(info, CH12, _V1),
},
.rx = {
- DEF_RXCHADDRS(info, RXQ, _V1),
- DEF_RXCHADDRS(info, RPQ, _V1),
+ DEF_RXCHADDRS(AX, RXQ, RXQ, _V1),
+ DEF_RXCHADDRS(AX, RPQ, RPQ, _V1),
},
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
+ .tx = {
+ DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1),
+ DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1),
+ },
+ .rx = {
+ DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1),
+ DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
+
#undef DEF_TXCHADDRS_TYPE1
#undef DEF_TXCHADDRS
#undef DEF_RXCHADDRS
@@ -1422,6 +1553,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
struct rtw89_pci_dma_ring *bd_ring;
const struct rtw89_pci_bd_ram *bd_ram;
u32 addr_num;
+ u32 addr_idx;
u32 addr_bdram;
u32 addr_desa_l;
u32 val32;
@@ -1433,19 +1565,21 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
tx_ring = &rtwpci->tx_rings[i];
bd_ring = &tx_ring->bd_ring;
- bd_ram = &bd_ram_table[i];
+ bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
addr_num = bd_ring->addr.num;
addr_bdram = bd_ring->addr.bdram;
addr_desa_l = bd_ring->addr.desa_l;
bd_ring->wp = 0;
bd_ring->rp = 0;
- val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
- FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
- FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
-
rtw89_write16(rtwdev, addr_num, bd_ring->len);
- rtw89_write32(rtwdev, addr_bdram, val32);
+ if (addr_bdram && bd_ram) {
+ val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
+ FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
+ FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
+
+ rtw89_write32(rtwdev, addr_bdram, val32);
+ }
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
}
@@ -1453,14 +1587,22 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rx_ring = &rtwpci->rx_rings[i];
bd_ring = &rx_ring->bd_ring;
addr_num = bd_ring->addr.num;
+ addr_idx = bd_ring->addr.idx;
addr_desa_l = bd_ring->addr.desa_l;
- bd_ring->wp = 0;
+ if (info->rx_ring_eq_is_full)
+ bd_ring->wp = bd_ring->len - 1;
+ else
+ bd_ring->wp = 0;
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+
+ if (info->rx_ring_eq_is_full)
+ rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
}
}
@@ -1471,7 +1613,7 @@ static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
}
-static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
+void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
@@ -1684,24 +1826,16 @@ static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- u32 reg, mask;
-
- if (chip_id == RTL8852C) {
- reg = R_AX_HAXI_INIT_CFG1;
- mask = B_AX_STOP_AXI_MST;
- } else {
- reg = R_AX_PCIE_DMA_STOP1;
- mask = B_AX_STOP_PCIEIO;
- }
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_reg_def *reg = &info->dma_io_stop;
if (enable)
- rtw89_write32_clr(rtwdev, reg, mask);
+ rtw89_write32_clr(rtwdev, reg->addr, reg->mask);
else
- rtw89_write32_set(rtwdev, reg, mask);
+ rtw89_write32_set(rtwdev, reg->addr, reg->mask);
}
-static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
+void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
{
rtw89_pci_ctrl_dma_io(rtwdev, enable);
rtw89_pci_ctrl_dma_trx(rtwdev, enable);
@@ -2303,7 +2437,7 @@ static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
}
-static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
+static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -2491,7 +2625,7 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
+static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
int ret;
@@ -2550,7 +2684,7 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
/* fill TRX BD indexes */
rtw89_pci_ops_reset(rtwdev);
- ret = rtw89_pci_rst_bdram_pcie(rtwdev);
+ ret = rtw89_pci_rst_bdram_ax(rtwdev);
if (ret) {
rtw89_warn(rtwdev, "reset bdram busy\n");
return ret;
@@ -2648,7 +2782,7 @@ int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
}
EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
-static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
+static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -3026,6 +3160,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
struct rtw89_pci_rx_ring *rx_ring,
u32 desc_size, u32 len, u32 rxch)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_ch_dma_addr *rxch_addr;
struct sk_buff *skb;
u8 *head;
@@ -3052,11 +3187,15 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->bd_ring.len = len;
rx_ring->bd_ring.desc_size = desc_size;
rx_ring->bd_ring.addr = *rxch_addr;
- rx_ring->bd_ring.wp = 0;
+ if (info->rx_ring_eq_is_full)
+ rx_ring->bd_ring.wp = len - 1;
+ else
+ rx_ring->bd_ring.wp = 0;
rx_ring->bd_ring.rp = 0;
rx_ring->buf_sz = buf_sz;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
for (i = 0; i < len; i++) {
skb = dev_alloc_skb(buf_sz);
@@ -3289,6 +3428,55 @@ void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
+static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
+ B_BE_PCIE_RX_RPQ0_IMR0_V1;
+}
+
+static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 |
+ B_BE_HS0_IND_INT_EN0;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 |
+ B_BE_RDU_CH0_INT_IMR_V1;
+ rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
+ B_BE_PCIE_RX_RPQ0_IMR0_V1;
+}
+
+static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 |
+ B_BE_HS1_IND_INT_EN0;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
+ B_BE_PCIE_RX_RPQ0_IMR0_V1;
+}
+
+void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ if (rtwpci->under_recovery)
+ rtw89_pci_recovery_intr_mask_v2(rtwdev);
+ else if (rtwpci->low_power)
+ rtw89_pci_low_power_intr_mask_v2(rtwdev);
+ else
+ rtw89_pci_default_intr_mask_v2(rtwdev);
+}
+EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
+
static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
@@ -3480,19 +3668,27 @@ static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
{
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_traffic_stats *stats = &rtwdev->stats;
enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
u32 val = 0;
- if (!rtwdev->scanning &&
- (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH))
+ if (rtwdev->scanning ||
+ (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH))
+ goto out;
+
+ if (chip_gen == RTW89_CHIP_BE)
+ val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN;
+ else
val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
- rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val);
+out:
+ rtw89_write32(rtwdev, info->mit_addr, val);
}
static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
@@ -3582,7 +3778,7 @@ static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
rtw89_pci_l1ss_set(rtwdev, true);
}
-static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
+static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
{
int ret = 0;
u32 sts;
@@ -3599,7 +3795,7 @@ static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
return ret;
}
-static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
+static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
{
u32 val;
int ret;
@@ -3608,7 +3804,7 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
return 0;
rtw89_pci_ctrl_dma_all(rtwdev, false);
- ret = rtw89_pci_poll_io_idle(rtwdev);
+ ret = rtw89_pci_poll_io_idle_ax(rtwdev);
if (ret) {
val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
rtw89_debug(rtwdev, RTW89_DBG_HCI,
@@ -3619,7 +3815,7 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
if (val & B_AX_RX_STUCK)
rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
- ret = rtw89_pci_poll_io_idle(rtwdev);
+ ret = rtw89_pci_poll_io_idle_ax(rtwdev);
val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
rtw89_debug(rtwdev, RTW89_DBG_HCI,
"[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
@@ -3629,23 +3825,7 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
return ret;
}
-
-
-static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
-{
- int ret = 0;
- u32 val32, sts;
-
- val32 = B_AX_RST_BDRAM;
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
-
- ret = read_poll_timeout_atomic(rtw89_read32, sts,
- (sts & B_AX_RST_BDRAM) == 0x0, 1, 100,
- true, rtwdev, R_AX_PCIE_INIT_CFG1);
- return ret;
-}
-
-static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
+static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
{
u32 ret;
@@ -3656,7 +3836,7 @@ static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
rtw89_pci_clr_idx_all(rtwdev);
- ret = rtw89_pci_rst_bdram(rtwdev);
+ ret = rtw89_pci_rst_bdram_ax(rtwdev);
if (ret)
return ret;
@@ -3667,18 +3847,20 @@ static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
enum rtw89_lv1_rcvy_step step)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
int ret;
switch (step) {
case RTW89_LV1_RCVY_STEP_1:
- ret = rtw89_pci_lv1rst_stop_dma(rtwdev);
+ ret = gen_def->lv1rst_stop_dma(rtwdev);
if (ret)
rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
break;
case RTW89_LV1_RCVY_STEP_2:
- ret = rtw89_pci_lv1rst_start_dma(rtwdev);
+ ret = gen_def->lv1rst_start_dma(rtwdev);
if (ret)
rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
break;
@@ -3692,29 +3874,41 @@ static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
{
- rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
- rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
- rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ return;
+
+ if (rtwdev->chip->chip_id == RTL8852C) {
+ rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1));
+ rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1));
+ } else {
+ rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
+ rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
+ rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
+ }
}
static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
{
struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
unsigned long flags;
int work_done;
rtwdev->napi_budget_countdown = budget;
- rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT);
+ rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data);
work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
if (work_done == budget)
return budget;
- rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT);
+ rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data);
work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&rtwpci->irq_lock, flags);
@@ -3791,6 +3985,26 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
EXPORT_SYMBOL(rtw89_pm_ops);
+const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
+ .isr_rdu = B_AX_RDU_INT,
+ .isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
+ .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
+ .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
+ .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
+ B_AX_RDU_INT},
+
+ .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
+ .mac_pre_deinit = NULL,
+ .mac_post_init = rtw89_pci_ops_mac_post_init_ax,
+
+ .clr_idx_all = rtw89_pci_clr_idx_all_ax,
+ .rst_bdram = rtw89_pci_rst_bdram_ax,
+
+ .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
+ .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
+};
+EXPORT_SYMBOL(rtw89_pci_gen_ax);
+
static const struct rtw89_hci_ops rtw89_pci_ops = {
.tx_write = rtw89_pci_ops_tx_write,
.tx_kick_off = rtw89_pci_ops_tx_kick_off,
@@ -3810,6 +4024,7 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.write32 = rtw89_pci_ops_write32,
.mac_pre_init = rtw89_pci_ops_mac_pre_init,
+ .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit,
.mac_post_init = rtw89_pci_ops_mac_post_init,
.deinit = rtw89_pci_ops_deinit,
@@ -3829,7 +4044,7 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.clear = rtw89_pci_clear_resource,
.disable_intr = rtw89_pci_disable_intr_lock,
.enable_intr = rtw89_pci_enable_intr_lock,
- .rst_bdram = rtw89_pci_rst_bdram_pcie,
+ .rst_bdram = rtw89_pci_reset_bdram,
};
int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 2f3d1ad3b..772a84bd8 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -245,6 +245,203 @@
#define B_AX_HS1ISR_IND_INT BIT(25)
#define B_AX_PCIE_DBG_STE_INT BIT(13)
+#define R_BE_PCIE_FRZ_CLK 0x3004
+#define B_BE_PCIE_FRZ_MAC_HW_RST BIT(31)
+#define B_BE_PCIE_FRZ_CFG_SPC_RST BIT(30)
+#define B_BE_PCIE_FRZ_ELBI_RST BIT(29)
+#define B_BE_PCIE_MAC_IS_ACTIVE BIT(28)
+#define B_BE_PCIE_FRZ_RTK_HW_RST BIT(27)
+#define B_BE_PCIE_FRZ_REG_RST BIT(26)
+#define B_BE_PCIE_FRZ_ANA_RST BIT(25)
+#define B_BE_PCIE_FRZ_WLAN_RST BIT(24)
+#define B_BE_PCIE_FRZ_FLR_RST BIT(23)
+#define B_BE_PCIE_FRZ_RET_NON_STKY_RST BIT(22)
+#define B_BE_PCIE_FRZ_RET_STKY_RST BIT(21)
+#define B_BE_PCIE_FRZ_NON_STKY_RST BIT(20)
+#define B_BE_PCIE_FRZ_STKY_RST BIT(19)
+#define B_BE_PCIE_FRZ_RET_CORE_RST BIT(18)
+#define B_BE_PCIE_FRZ_PWR_RST BIT(17)
+#define B_BE_PCIE_FRZ_PERST_RST BIT(16)
+#define B_BE_PCIE_FRZ_PHY_ALOAD BIT(15)
+#define B_BE_PCIE_FRZ_PHY_HW_RST BIT(14)
+#define B_BE_PCIE_DBG_CLK BIT(4)
+#define B_BE_PCIE_EN_CLK BIT(3)
+#define B_BE_PCIE_DBI_ACLK_ACT BIT(2)
+#define B_BE_PCIE_S1_ACLK_ACT BIT(1)
+#define B_BE_PCIE_EN_AUX_CLK BIT(0)
+
+#define R_BE_PCIE_PS_CTRL 0x3008
+#define B_BE_RSM_L0S_EN BIT(8)
+#define B_BE_CMAC_EXIT_L1_EN BIT(7)
+#define B_BE_DMAC0_EXIT_L1_EN BIT(6)
+#define B_BE_FORCE_L0 BIT(5)
+#define B_BE_DBI_RO_WR_DISABLE BIT(4)
+#define B_BE_SEL_XFER_PENDING BIT(3)
+#define B_BE_SEL_REQ_ENTR_L1 BIT(2)
+#define B_BE_PCIE_EN_SWENT_L23 BIT(1)
+#define B_BE_SEL_REQ_EXIT_L1 BIT(0)
+
+#define R_BE_PCIE_LAT_CTRL 0x3044
+#define B_BE_ELBI_PHY_REMAP_MASK GENMASK(29, 24)
+#define B_BE_SYS_SUS_L12_EN BIT(17)
+#define B_BE_MDIO_S_EN BIT(16)
+#define B_BE_SYM_AUX_CLK_SEL BIT(15)
+#define B_BE_RTK_LDO_POWER_LATENCY_MASK GENMASK(11, 10)
+#define B_BE_RTK_LDO_BIAS_LATENCY_MASK GENMASK(9, 8)
+#define B_BE_CLK_REQ_LAT_MASK GENMASK(7, 4)
+
+#define R_BE_PCIE_HIMR0 0x30B0
+#define B_BE_PCIE_HB1_IND_INTA_IMR BIT(31)
+#define B_BE_PCIE_HB0_IND_INTA_IMR BIT(30)
+#define B_BE_HCI_AXIDMA_INTA_IMR BIT(29)
+#define B_BE_HC0_IND_INTA_IMR BIT(28)
+#define B_BE_HD1_IND_INTA_IMR BIT(27)
+#define B_BE_HD0_IND_INTA_IMR BIT(26)
+#define B_BE_HS1_IND_INTA_IMR BIT(25)
+#define B_BE_HS0_IND_INTA_IMR BIT(24)
+#define B_BE_PCIE_HOTRST_INT_EN BIT(16)
+#define B_BE_PCIE_FLR_INT_EN BIT(15)
+#define B_BE_PCIE_PERST_INT_EN BIT(14)
+#define B_BE_PCIE_DBG_STE_INT_EN BIT(13)
+#define B_BE_HB1_IND_INT_EN0 BIT(9)
+#define B_BE_HB0_IND_INT_EN0 BIT(8)
+#define B_BE_HC1_IND_INT_EN0 BIT(7)
+#define B_BE_HCI_AXIDMA_INT_EN0 BIT(5)
+#define B_BE_HC0_IND_INT_EN0 BIT(4)
+#define B_BE_HD1_IND_INT_EN0 BIT(3)
+#define B_BE_HD0_IND_INT_EN0 BIT(2)
+#define B_BE_HS1_IND_INT_EN0 BIT(1)
+#define B_BE_HS0_IND_INT_EN0 BIT(0)
+
+#define R_BE_PCIE_HISR 0x30B4
+#define B_BE_PCIE_HOTRST_INT BIT(16)
+#define B_BE_PCIE_FLR_INT BIT(15)
+#define B_BE_PCIE_PERST_INT BIT(14)
+#define B_BE_PCIE_DBG_STE_INT BIT(13)
+#define B_BE_HB1IMR_IND BIT(9)
+#define B_BE_HB0IMR_IND BIT(8)
+#define B_BE_HC1ISR_IND_INT BIT(7)
+#define B_BE_HCI_AXIDMA_INT BIT(5)
+#define B_BE_HC0ISR_IND_INT BIT(4)
+#define B_BE_HD1ISR_IND_INT BIT(3)
+#define B_BE_HD0ISR_IND_INT BIT(2)
+#define B_BE_HS1ISR_IND_INT BIT(1)
+#define B_BE_HS0ISR_IND_INT BIT(0)
+
+#define R_BE_PCIE_DMA_IMR_0_V1 0x30B8
+#define B_BE_PCIE_RX_RX1P1_IMR0_V1 BIT(23)
+#define B_BE_PCIE_RX_RX0P1_IMR0_V1 BIT(22)
+#define B_BE_PCIE_RX_ROQ1_IMR0_V1 BIT(21)
+#define B_BE_PCIE_RX_RPQ1_IMR0_V1 BIT(20)
+#define B_BE_PCIE_RX_RX1P2_IMR0_V1 BIT(19)
+#define B_BE_PCIE_RX_ROQ0_IMR0_V1 BIT(18)
+#define B_BE_PCIE_RX_RPQ0_IMR0_V1 BIT(17)
+#define B_BE_PCIE_RX_RX0P2_IMR0_V1 BIT(16)
+#define B_BE_PCIE_TX_CH14_IMR0 BIT(14)
+#define B_BE_PCIE_TX_CH13_IMR0 BIT(13)
+#define B_BE_PCIE_TX_CH12_IMR0 BIT(12)
+#define B_BE_PCIE_TX_CH11_IMR0 BIT(11)
+#define B_BE_PCIE_TX_CH10_IMR0 BIT(10)
+#define B_BE_PCIE_TX_CH9_IMR0 BIT(9)
+#define B_BE_PCIE_TX_CH8_IMR0 BIT(8)
+#define B_BE_PCIE_TX_CH7_IMR0 BIT(7)
+#define B_BE_PCIE_TX_CH6_IMR0 BIT(6)
+#define B_BE_PCIE_TX_CH5_IMR0 BIT(5)
+#define B_BE_PCIE_TX_CH4_IMR0 BIT(4)
+#define B_BE_PCIE_TX_CH3_IMR0 BIT(3)
+#define B_BE_PCIE_TX_CH2_IMR0 BIT(2)
+#define B_BE_PCIE_TX_CH1_IMR0 BIT(1)
+#define B_BE_PCIE_TX_CH0_IMR0 BIT(0)
+
+#define R_BE_PCIE_DMA_ISR 0x30BC
+#define B_BE_PCIE_RX_RX1P1_ISR_V1 BIT(23)
+#define B_BE_PCIE_RX_RX0P1_ISR_V1 BIT(22)
+#define B_BE_PCIE_RX_ROQ1_ISR_V1 BIT(21)
+#define B_BE_PCIE_RX_RPQ1_ISR_V1 BIT(20)
+#define B_BE_PCIE_RX_RX1P2_ISR_V1 BIT(19)
+#define B_BE_PCIE_RX_ROQ0_ISR_V1 BIT(18)
+#define B_BE_PCIE_RX_RPQ0_ISR_V1 BIT(17)
+#define B_BE_PCIE_RX_RX0P2_ISR_V1 BIT(16)
+#define B_BE_PCIE_TX_CH14_ISR BIT(14)
+#define B_BE_PCIE_TX_CH13_ISR BIT(13)
+#define B_BE_PCIE_TX_CH12_ISR BIT(12)
+#define B_BE_PCIE_TX_CH11_ISR BIT(11)
+#define B_BE_PCIE_TX_CH10_ISR BIT(10)
+#define B_BE_PCIE_TX_CH9_ISR BIT(9)
+#define B_BE_PCIE_TX_CH8_ISR BIT(8)
+#define B_BE_PCIE_TX_CH7_ISR BIT(7)
+#define B_BE_PCIE_TX_CH6_ISR BIT(6)
+#define B_BE_PCIE_TX_CH5_ISR BIT(5)
+#define B_BE_PCIE_TX_CH4_ISR BIT(4)
+#define B_BE_PCIE_TX_CH3_ISR BIT(3)
+#define B_BE_PCIE_TX_CH2_ISR BIT(2)
+#define B_BE_PCIE_TX_CH1_ISR BIT(1)
+#define B_BE_PCIE_TX_CH0_ISR BIT(0)
+
+#define R_BE_HAXI_HIMR00 0xB0B0
+#define B_BE_RDU_CH5_INT_IMR_V1 BIT(30)
+#define B_BE_RDU_CH4_INT_IMR_V1 BIT(29)
+#define B_BE_RDU_CH3_INT_IMR_V1 BIT(28)
+#define B_BE_RDU_CH2_INT_IMR_V1 BIT(27)
+#define B_BE_RDU_CH1_INT_IMR_V1 BIT(26)
+#define B_BE_RDU_CH0_INT_IMR_V1 BIT(25)
+#define B_BE_RXDMA_STUCK_INT_EN_V1 BIT(24)
+#define B_BE_TXDMA_STUCK_INT_EN_V1 BIT(23)
+#define B_BE_TXDMA_CH14_INT_EN_V1 BIT(22)
+#define B_BE_TXDMA_CH13_INT_EN_V1 BIT(21)
+#define B_BE_TXDMA_CH12_INT_EN_V1 BIT(20)
+#define B_BE_TXDMA_CH11_INT_EN_V1 BIT(19)
+#define B_BE_TXDMA_CH10_INT_EN_V1 BIT(18)
+#define B_BE_TXDMA_CH9_INT_EN_V1 BIT(17)
+#define B_BE_TXDMA_CH8_INT_EN_V1 BIT(16)
+#define B_BE_TXDMA_CH7_INT_EN_V1 BIT(15)
+#define B_BE_TXDMA_CH6_INT_EN_V1 BIT(14)
+#define B_BE_TXDMA_CH5_INT_EN_V1 BIT(13)
+#define B_BE_TXDMA_CH4_INT_EN_V1 BIT(12)
+#define B_BE_TXDMA_CH3_INT_EN_V1 BIT(11)
+#define B_BE_TXDMA_CH2_INT_EN_V1 BIT(10)
+#define B_BE_TXDMA_CH1_INT_EN_V1 BIT(9)
+#define B_BE_TXDMA_CH0_INT_EN_V1 BIT(8)
+#define B_BE_RX1P1DMA_INT_EN_V1 BIT(7)
+#define B_BE_RX0P1DMA_INT_EN_V1 BIT(6)
+#define B_BE_RO1DMA_INT_EN BIT(5)
+#define B_BE_RP1DMA_INT_EN BIT(4)
+#define B_BE_RX1DMA_INT_EN BIT(3)
+#define B_BE_RO0DMA_INT_EN BIT(2)
+#define B_BE_RP0DMA_INT_EN BIT(1)
+#define B_BE_RX0DMA_INT_EN BIT(0)
+
+#define R_BE_HAXI_HISR00 0xB0B4
+#define B_BE_RDU_CH6_INT BIT(28)
+#define B_BE_RDU_CH5_INT BIT(27)
+#define B_BE_RDU_CH4_INT BIT(26)
+#define B_BE_RDU_CH2_INT BIT(25)
+#define B_BE_RDU_CH1_INT BIT(24)
+#define B_BE_RDU_CH0_INT BIT(23)
+#define B_BE_RXDMA_STUCK_INT BIT(22)
+#define B_BE_TXDMA_STUCK_INT BIT(21)
+#define B_BE_TXDMA_CH14_INT BIT(20)
+#define B_BE_TXDMA_CH13_INT BIT(19)
+#define B_BE_TXDMA_CH12_INT BIT(18)
+#define B_BE_TXDMA_CH11_INT BIT(17)
+#define B_BE_TXDMA_CH10_INT BIT(16)
+#define B_BE_TXDMA_CH9_INT BIT(15)
+#define B_BE_TXDMA_CH8_INT BIT(14)
+#define B_BE_TXDMA_CH7_INT BIT(13)
+#define B_BE_TXDMA_CH6_INT BIT(12)
+#define B_BE_TXDMA_CH5_INT BIT(11)
+#define B_BE_TXDMA_CH4_INT BIT(10)
+#define B_BE_TXDMA_CH3_INT BIT(9)
+#define B_BE_TXDMA_CH2_INT BIT(8)
+#define B_BE_TXDMA_CH1_INT BIT(7)
+#define B_BE_TXDMA_CH0_INT BIT(6)
+#define B_BE_RPQ1DMA_INT BIT(5)
+#define B_BE_RX1P1DMA_INT BIT(4)
+#define B_BE_RX1DMA_INT BIT(3)
+#define B_BE_RPQ0DMA_INT BIT(2)
+#define B_BE_RX0P1DMA_INT BIT(1)
+#define B_BE_RX0DMA_INT BIT(0)
+
/* TX/RX */
#define R_AX_DRV_FW_HSK_0 0x01B0
#define R_AX_DRV_FW_HSK_1 0x01B4
@@ -496,6 +693,105 @@
#define B_AX_CH11_BUSY BIT(1)
#define B_AX_CH10_BUSY BIT(0)
+#define R_BE_HAXI_DMA_STOP1 0xB010
+#define B_BE_STOP_WPDMA BIT(31)
+#define B_BE_STOP_CH14 BIT(14)
+#define B_BE_STOP_CH13 BIT(13)
+#define B_BE_STOP_CH12 BIT(12)
+#define B_BE_STOP_CH11 BIT(11)
+#define B_BE_STOP_CH10 BIT(10)
+#define B_BE_STOP_CH9 BIT(9)
+#define B_BE_STOP_CH8 BIT(8)
+#define B_BE_STOP_CH7 BIT(7)
+#define B_BE_STOP_CH6 BIT(6)
+#define B_BE_STOP_CH5 BIT(5)
+#define B_BE_STOP_CH4 BIT(4)
+#define B_BE_STOP_CH3 BIT(3)
+#define B_BE_STOP_CH2 BIT(2)
+#define B_BE_STOP_CH1 BIT(1)
+#define B_BE_STOP_CH0 BIT(0)
+#define B_BE_TX_STOP1_MASK (B_BE_STOP_CH0 | B_BE_STOP_CH1 | \
+ B_BE_STOP_CH2 | B_BE_STOP_CH3 | \
+ B_BE_STOP_CH4 | B_BE_STOP_CH5 | \
+ B_BE_STOP_CH6 | B_BE_STOP_CH7 | \
+ B_BE_STOP_CH8 | B_BE_STOP_CH9 | \
+ B_BE_STOP_CH10 | B_BE_STOP_CH11 | \
+ B_BE_STOP_CH12)
+
+#define R_BE_CH0_TXBD_NUM_V1 0xB030
+#define R_BE_CH1_TXBD_NUM_V1 0xB032
+#define R_BE_CH2_TXBD_NUM_V1 0xB034
+#define R_BE_CH3_TXBD_NUM_V1 0xB036
+#define R_BE_CH4_TXBD_NUM_V1 0xB038
+#define R_BE_CH5_TXBD_NUM_V1 0xB03A
+#define R_BE_CH6_TXBD_NUM_V1 0xB03C
+#define R_BE_CH7_TXBD_NUM_V1 0xB03E
+#define R_BE_CH8_TXBD_NUM_V1 0xB040
+#define R_BE_CH9_TXBD_NUM_V1 0xB042
+#define R_BE_CH10_TXBD_NUM_V1 0xB044
+#define R_BE_CH11_TXBD_NUM_V1 0xB046
+#define R_BE_CH12_TXBD_NUM_V1 0xB048
+#define R_BE_CH13_TXBD_NUM_V1 0xB04C
+#define R_BE_CH14_TXBD_NUM_V1 0xB04E
+
+#define R_BE_RXQ0_RXBD_NUM_V1 0xB050
+#define R_BE_RPQ0_RXBD_NUM_V1 0xB052
+
+#define R_BE_CH0_TXBD_IDX_V1 0xB100
+#define R_BE_CH1_TXBD_IDX_V1 0xB104
+#define R_BE_CH2_TXBD_IDX_V1 0xB108
+#define R_BE_CH3_TXBD_IDX_V1 0xB10C
+#define R_BE_CH4_TXBD_IDX_V1 0xB110
+#define R_BE_CH5_TXBD_IDX_V1 0xB114
+#define R_BE_CH6_TXBD_IDX_V1 0xB118
+#define R_BE_CH7_TXBD_IDX_V1 0xB11C
+#define R_BE_CH8_TXBD_IDX_V1 0xB120
+#define R_BE_CH9_TXBD_IDX_V1 0xB124
+#define R_BE_CH10_TXBD_IDX_V1 0xB128
+#define R_BE_CH11_TXBD_IDX_V1 0xB12C
+#define R_BE_CH12_TXBD_IDX_V1 0xB130
+#define R_BE_CH13_TXBD_IDX_V1 0xB134
+#define R_BE_CH14_TXBD_IDX_V1 0xB138
+
+#define R_BE_RXQ0_RXBD_IDX_V1 0xB160
+#define R_BE_RPQ0_RXBD_IDX_V1 0xB164
+
+#define R_BE_CH0_TXBD_DESA_L_V1 0xB200
+#define R_BE_CH0_TXBD_DESA_H_V1 0xB204
+#define R_BE_CH1_TXBD_DESA_L_V1 0xB208
+#define R_BE_CH1_TXBD_DESA_H_V1 0xB20C
+#define R_BE_CH2_TXBD_DESA_L_V1 0xB210
+#define R_BE_CH2_TXBD_DESA_H_V1 0xB214
+#define R_BE_CH3_TXBD_DESA_L_V1 0xB218
+#define R_BE_CH3_TXBD_DESA_H_V1 0xB21C
+#define R_BE_CH4_TXBD_DESA_L_V1 0xB220
+#define R_BE_CH4_TXBD_DESA_H_V1 0xB224
+#define R_BE_CH5_TXBD_DESA_L_V1 0xB228
+#define R_BE_CH5_TXBD_DESA_H_V1 0xB22C
+#define R_BE_CH6_TXBD_DESA_L_V1 0xB230
+#define R_BE_CH6_TXBD_DESA_H_V1 0xB234
+#define R_BE_CH7_TXBD_DESA_L_V1 0xB238
+#define R_BE_CH7_TXBD_DESA_H_V1 0xB23C
+#define R_BE_CH8_TXBD_DESA_L_V1 0xB240
+#define R_BE_CH8_TXBD_DESA_H_V1 0xB244
+#define R_BE_CH9_TXBD_DESA_L_V1 0xB248
+#define R_BE_CH9_TXBD_DESA_H_V1 0xB24C
+#define R_BE_CH10_TXBD_DESA_L_V1 0xB250
+#define R_BE_CH10_TXBD_DESA_H_V1 0xB254
+#define R_BE_CH11_TXBD_DESA_L_V1 0xB258
+#define R_BE_CH11_TXBD_DESA_H_V1 0xB25C
+#define R_BE_CH12_TXBD_DESA_L_V1 0xB260
+#define R_BE_CH12_TXBD_DESA_H_V1 0xB264
+#define R_BE_CH13_TXBD_DESA_L_V1 0xB268
+#define R_BE_CH13_TXBD_DESA_H_V1 0xB26C
+#define R_BE_CH14_TXBD_DESA_L_V1 0xB270
+#define R_BE_CH14_TXBD_DESA_H_V1 0xB274
+
+#define R_BE_RXQ0_RXBD_DESA_L_V1 0xB300
+#define R_BE_RXQ0_RXBD_DESA_H_V1 0xB304
+#define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308
+#define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C
+
/* Configure */
#define R_AX_PCIE_INIT_CFG2 0x1004
#define B_AX_WD_ITVL_IDLE GENMASK(27, 24)
@@ -516,6 +812,15 @@
#define B_AX_RXCOUNTER_MATCH_MASK GENMASK(15, 8)
#define B_AX_RXTIMER_MATCH_MASK GENMASK(7, 0)
+#define R_AX_DBG_ERR_FLAG_V1 0x1104
+
+#define R_AX_INT_MIT_RX_V1 0x1184
+#define B_AX_RXMIT_RXP2_SEL_V1 BIT(19)
+#define B_AX_RXMIT_RXP1_SEL_V1 BIT(18)
+#define B_AX_MIT_RXTIMER_UNIT_MASK GENMASK(17, 16)
+#define B_AX_MIT_RXCOUNTER_MATCH_MASK GENMASK(15, 8)
+#define B_AX_MIT_RXTIMER_MATCH_MASK GENMASK(7, 0)
+
#define R_AX_DBG_ERR_FLAG 0x11C4
#define B_AX_PCIE_RPQ_FULL BIT(29)
#define B_AX_PCIE_RXQ_FULL BIT(28)
@@ -554,23 +859,158 @@
#define R_AX_PCIE_HRPWM_V1 0x30C0
#define R_AX_PCIE_CRPWM 0x30C4
+#define R_AX_LBC_WATCHDOG_V1 0x30D8
+
+#define R_BE_PCIE_HRPWM 0x30C0
+#define R_BE_PCIE_CRPWM 0x30C4
+
+#define R_BE_L1_2_CTRL_HCILDO 0x3110
+#define B_BE_PCIE_DIS_L1_2_CTRL_HCILDO BIT(0)
+
+#define R_BE_PL1_DBG_INFO 0x3120
+#define B_BE_END_PL1_CNT_MASK GENMASK(23, 16)
+#define B_BE_START_PL1_CNT_MASK GENMASK(7, 0)
+
+#define R_BE_PCIE_MIT0_TMR 0x3330
+#define B_BE_PCIE_MIT0_RX_TMR_MASK GENMASK(5, 4)
+#define BE_MIT0_TMR_UNIT_1MS 0
+#define BE_MIT0_TMR_UNIT_2MS 1
+#define BE_MIT0_TMR_UNIT_4MS 2
+#define BE_MIT0_TMR_UNIT_8MS 3
+#define B_BE_PCIE_MIT0_TX_TMR_MASK GENMASK(1, 0)
+
+#define R_BE_PCIE_MIT0_CNT 0x3334
+#define B_BE_PCIE_RX_MIT0_CNT_MASK GENMASK(31, 24)
+#define B_BE_PCIE_TX_MIT0_CNT_MASK GENMASK(23, 16)
+#define B_BE_PCIE_RX_MIT0_TMR_CNT_MASK GENMASK(15, 8)
+#define B_BE_PCIE_TX_MIT0_TMR_CNT_MASK GENMASK(7, 0)
+
+#define R_BE_PCIE_MIT_CH_EN 0x3338
+#define B_BE_PCIE_MIT_RX1P1_EN BIT(23)
+#define B_BE_PCIE_MIT_RX0P1_EN BIT(22)
+#define B_BE_PCIE_MIT_ROQ1_EN BIT(21)
+#define B_BE_PCIE_MIT_RPQ1_EN BIT(20)
+#define B_BE_PCIE_MIT_RX1P2_EN BIT(19)
+#define B_BE_PCIE_MIT_ROQ0_EN BIT(18)
+#define B_BE_PCIE_MIT_RPQ0_EN BIT(17)
+#define B_BE_PCIE_MIT_RX0P2_EN BIT(16)
+#define B_BE_PCIE_MIT_TXCH14_EN BIT(14)
+#define B_BE_PCIE_MIT_TXCH13_EN BIT(13)
+#define B_BE_PCIE_MIT_TXCH12_EN BIT(12)
+#define B_BE_PCIE_MIT_TXCH11_EN BIT(11)
+#define B_BE_PCIE_MIT_TXCH10_EN BIT(10)
+#define B_BE_PCIE_MIT_TXCH9_EN BIT(9)
+#define B_BE_PCIE_MIT_TXCH8_EN BIT(8)
+#define B_BE_PCIE_MIT_TXCH7_EN BIT(7)
+#define B_BE_PCIE_MIT_TXCH6_EN BIT(6)
+#define B_BE_PCIE_MIT_TXCH5_EN BIT(5)
+#define B_BE_PCIE_MIT_TXCH4_EN BIT(4)
+#define B_BE_PCIE_MIT_TXCH3_EN BIT(3)
+#define B_BE_PCIE_MIT_TXCH2_EN BIT(2)
+#define B_BE_PCIE_MIT_TXCH1_EN BIT(1)
+#define B_BE_PCIE_MIT_TXCH0_EN BIT(0)
+
+#define R_BE_SER_PL1_CTRL 0x34A8
+#define B_BE_PL1_SER_PL1_EN BIT(31)
+#define B_BE_PL1_IGNORE_HOT_RST BIT(30)
+#define B_BE_PL1_TIMER_UNIT_MASK GENMASK(19, 17)
+#define B_BE_PL1_TIMER_CLEAR BIT(0)
+
+#define R_BE_REG_PL1_MASK 0x34B0
+#define B_BE_SER_PCLKREQ_ACK_MASK BIT(5)
+#define B_BE_SER_PM_CLK_MASK BIT(4)
+#define B_BE_SER_LTSSM_IMR BIT(3)
+#define B_BE_SER_PM_MASTER_IMR BIT(2)
+#define B_BE_SER_L1SUB_IMR BIT(1)
+#define B_BE_SER_PMU_IMR BIT(0)
+
+#define R_BE_RX_APPEND_MODE 0x8920
+#define B_BE_APPEND_OFFSET_MASK GENMASK(23, 16)
+#define B_BE_APPEND_LEN_MASK GENMASK(15, 0)
+
+#define R_BE_TXBD_RWPTR_CLR1 0xB014
+#define B_BE_CLR_CH14_IDX BIT(14)
+#define B_BE_CLR_CH13_IDX BIT(13)
+#define B_BE_CLR_CH12_IDX BIT(12)
+#define B_BE_CLR_CH11_IDX BIT(11)
+#define B_BE_CLR_CH10_IDX BIT(10)
+#define B_BE_CLR_CH9_IDX BIT(9)
+#define B_BE_CLR_CH8_IDX BIT(8)
+#define B_BE_CLR_CH7_IDX BIT(7)
+#define B_BE_CLR_CH6_IDX BIT(6)
+#define B_BE_CLR_CH5_IDX BIT(5)
+#define B_BE_CLR_CH4_IDX BIT(4)
+#define B_BE_CLR_CH3_IDX BIT(3)
+#define B_BE_CLR_CH2_IDX BIT(2)
+#define B_BE_CLR_CH1_IDX BIT(1)
+#define B_BE_CLR_CH0_IDX BIT(0)
+
+#define R_BE_RXBD_RWPTR_CLR1_V1 0xB018
+#define B_BE_CLR_ROQ1_IDX_V1 BIT(5)
+#define B_BE_CLR_RPQ1_IDX_V1 BIT(4)
+#define B_BE_CLR_RXQ1_IDX_V1 BIT(3)
+#define B_BE_CLR_ROQ0_IDX BIT(2)
+#define B_BE_CLR_RPQ0_IDX BIT(1)
+#define B_BE_CLR_RXQ0_IDX BIT(0)
+
+#define R_BE_HAXI_DMA_BUSY1 0xB01C
+#define B_BE_HAXI_MST_BUSY BIT(31)
+#define B_BE_HAXI_RX_IDLE BIT(25)
+#define B_BE_HAXI_TX_IDLE BIT(24)
+#define B_BE_ROQ1_BUSY_V1 BIT(21)
+#define B_BE_RPQ1_BUSY_V1 BIT(20)
+#define B_BE_RXQ1_BUSY_V1 BIT(19)
+#define B_BE_ROQ0_BUSY_V1 BIT(18)
+#define B_BE_RPQ0_BUSY_V1 BIT(17)
+#define B_BE_RXQ0_BUSY_V1 BIT(16)
+#define B_BE_WPDMA_BUSY BIT(15)
+#define B_BE_CH14_BUSY BIT(14)
+#define B_BE_CH13_BUSY BIT(13)
+#define B_BE_CH12_BUSY BIT(12)
+#define B_BE_CH11_BUSY BIT(11)
+#define B_BE_CH10_BUSY BIT(10)
+#define B_BE_CH9_BUSY BIT(9)
+#define B_BE_CH8_BUSY BIT(8)
+#define B_BE_CH7_BUSY BIT(7)
+#define B_BE_CH6_BUSY BIT(6)
+#define B_BE_CH5_BUSY BIT(5)
+#define B_BE_CH4_BUSY BIT(4)
+#define B_BE_CH3_BUSY BIT(3)
+#define B_BE_CH2_BUSY BIT(2)
+#define B_BE_CH1_BUSY BIT(1)
+#define B_BE_CH0_BUSY BIT(0)
+#define DMA_BUSY1_CHECK_BE (B_BE_CH0_BUSY | B_BE_CH1_BUSY | B_BE_CH2_BUSY | \
+ B_BE_CH3_BUSY | B_BE_CH4_BUSY | B_BE_CH5_BUSY | \
+ B_BE_CH6_BUSY | B_BE_CH7_BUSY | B_BE_CH8_BUSY | \
+ B_BE_CH9_BUSY | B_BE_CH10_BUSY | B_BE_CH11_BUSY | \
+ B_BE_CH12_BUSY | B_BE_CH13_BUSY | B_BE_CH14_BUSY)
+
+#define R_BE_HAXI_EXP_CTRL_V1 0xB020
+#define B_BE_R_NO_SEC_ACCESS BIT(31)
+#define B_BE_FORCE_EN_DMA_RX_GCLK BIT(5)
+#define B_BE_FORCE_EN_DMA_TX_GCLK BIT(4)
+#define B_BE_MAX_TAG_NUM_MASK GENMASK(3, 0)
+
#define RTW89_PCI_TXBD_NUM_MAX 256
#define RTW89_PCI_RXBD_NUM_MAX 256
#define RTW89_PCI_TXWD_NUM_MAX 512
#define RTW89_PCI_TXWD_PAGE_SIZE 128
#define RTW89_PCI_ADDRINFO_MAX 4
-#define RTW89_PCI_RX_BUF_SIZE 11460
+#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
#define RTW89_PCI_MULTITAG 8
/* PCIE CFG register */
+#define RTW89_PCIE_CAPABILITY_SPEED 0x7C
+#define RTW89_PCIE_SUPPORT_GEN_MASK GENMASK(3, 0)
#define RTW89_PCIE_L1_STS_V1 0x80
#define RTW89_BCFG_LINK_SPEED_MASK GENMASK(19, 16)
#define RTW89_PCIE_GEN1_SPEED 0x01
#define RTW89_PCIE_GEN2_SPEED 0x02
#define RTW89_PCIE_PHY_RATE 0x82
#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0)
+#define RTW89_PCIE_LINK_CHANGE_SPEED 0xA0
#define RTW89_PCIE_L1SS_STS_V1 0x0168
#define RTW89_PCIE_BIT_ASPM_L11 BIT(3)
#define RTW89_PCIE_BIT_ASPM_L12 BIT(2)
@@ -585,6 +1025,8 @@
#define RTW89_PCIE_BIT_CLK BIT(4)
#define RTW89_PCIE_BIT_L1 BIT(3)
#define RTW89_PCIE_CLK_CTRL 0x0725
+#define RTW89_PCIE_FTS 0x080C
+#define RTW89_PCIE_POLLING_BIT BIT(17)
#define RTW89_PCIE_RST_MSTATE 0x0B48
#define RTW89_PCIE_BIT_CFG_RST_MSTATE BIT(0)
@@ -757,7 +1199,26 @@ struct rtw89_pci_bd_ram {
u8 min_num;
};
+struct rtw89_pci_gen_def {
+ u32 isr_rdu;
+ u32 isr_halt_c2h;
+ u32 isr_wdt_timeout;
+ struct rtw89_reg2_def isr_clear_rpq;
+ struct rtw89_reg2_def isr_clear_rxq;
+
+ int (*mac_pre_init)(struct rtw89_dev *rtwdev);
+ int (*mac_pre_deinit)(struct rtw89_dev *rtwdev);
+ int (*mac_post_init)(struct rtw89_dev *rtwdev);
+
+ void (*clr_idx_all)(struct rtw89_dev *rtwdev);
+ int (*rst_bdram)(struct rtw89_dev *rtwdev);
+
+ int (*lv1rst_stop_dma)(struct rtw89_dev *rtwdev);
+ int (*lv1rst_start_dma)(struct rtw89_dev *rtwdev);
+};
+
struct rtw89_pci_info {
+ const struct rtw89_pci_gen_def *gen_def;
enum mac_ax_bd_trunc_mode txbd_trunc_mode;
enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
enum mac_ax_rxbd_mode rxbd_mode;
@@ -772,6 +1233,8 @@ struct rtw89_pci_info {
enum mac_ax_pcie_func_ctrl autok_en;
enum mac_ax_pcie_func_ctrl io_rcy_en;
enum mac_ax_io_rcy_tmr io_rcy_tmr;
+ bool rx_ring_eq_is_full;
+ bool check_rx_tag;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -781,6 +1244,7 @@ struct rtw89_pci_info {
u32 max_tag_num_mask;
u32 rxbd_rwptr_clr_reg;
u32 txbd_rwptr_clr2_reg;
+ struct rtw89_reg_def dma_io_stop;
struct rtw89_reg_def dma_stop1;
struct rtw89_reg_def dma_stop2;
struct rtw89_reg_def dma_busy1;
@@ -789,6 +1253,7 @@ struct rtw89_pci_info {
u32 rpwm_addr;
u32 cpwm_addr;
+ u32 mit_addr;
u32 tx_dma_ch_mask;
const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
@@ -812,7 +1277,7 @@ struct rtw89_pci_tx_data {
struct rtw89_pci_rx_info {
dma_addr_t dma;
- u32 fs:1, ls:1, tag:11, len:14;
+ u32 fs:1, ls:1, tag:13, len:14;
};
#define RTW89_PCI_TXBD_OPTION_LS BIT(14)
@@ -941,6 +1406,7 @@ struct rtw89_pci_rx_ring {
u32 buf_sz;
struct sk_buff *diliver_skb;
struct rtw89_rx_desc_info diliver_desc;
+ u32 target_rx_tag:13;
};
struct rtw89_pci_isrs {
@@ -1059,33 +1525,45 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
extern const struct dev_pm_ops rtw89_pm_ops;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be;
extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM];
extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM];
+extern const struct rtw89_pci_gen_def rtw89_pci_gen_ax;
+extern const struct rtw89_pci_gen_def rtw89_pci_gen_be;
struct pci_device_id;
int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
void rtw89_pci_remove(struct pci_dev *pdev);
+void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev);
int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en);
int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en);
+int rtw89_pci_ltr_set_v2(struct rtw89_dev *rtwdev, bool en);
u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr);
u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr);
+void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable);
void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev);
void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev);
+void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev);
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
+void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
static inline
u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev,
@@ -1157,4 +1635,47 @@ void rtw89_chip_recognize_intrs(struct rtw89_dev *rtwdev,
info->recognize_intrs(rtwdev, rtwpci, isrs);
}
+static inline int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->mac_pre_init(rtwdev);
+}
+
+static inline int rtw89_pci_ops_mac_pre_deinit(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ if (!gen_def->mac_pre_deinit)
+ return 0;
+
+ return gen_def->mac_pre_deinit(rtwdev);
+}
+
+static inline int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->mac_post_init(rtwdev);
+}
+
+static inline void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ gen_def->clr_idx_all(rtwdev);
+}
+
+static inline int rtw89_pci_reset_bdram(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->rst_bdram(rtwdev);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
new file mode 100644
index 000000000..629ffa4be
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#include <linux/pci.h>
+
+#include "mac.h"
+#include "pci.h"
+#include "reg.h"
+
+enum pcie_rxbd_mode {
+ PCIE_RXBD_NORM = 0,
+ PCIE_RXBD_SEP,
+ PCIE_RXBD_EXT,
+};
+
+#define PL0_TMR_SCALE_ASIC 1
+#define PL0_TMR_ANA_172US 0x800
+#define PL0_TMR_MAC_1MS 0x27100
+#define PL0_TMR_AUX_1MS 0x1E848
+
+static void _patch_pcie_power_wake_be(struct rtw89_dev *rtwdev, bool power_up)
+{
+ if (power_up)
+ rtw89_write32_set(rtwdev, R_BE_HCI_OPT_CTRL, BIT_WAKE_CTRL_V1);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, BIT_WAKE_CTRL_V1);
+}
+
+static void rtw89_pci_set_io_rcy_be(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 scale = PL0_TMR_SCALE_ASIC;
+ u32 val32;
+
+ if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
+ val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
+ PL0_TMR_ANA_172US : info->io_rcy_tmr;
+ val32 /= scale;
+
+ rtw89_write32(rtwdev, R_BE_AON_WDT_TMR, val32);
+ rtw89_write32(rtwdev, R_BE_MDIO_WDT_TMR, val32);
+ rtw89_write32(rtwdev, R_BE_LA_MODE_WDT_TMR, val32);
+ rtw89_write32(rtwdev, R_BE_WDT_AR_TMR, val32);
+ rtw89_write32(rtwdev, R_BE_WDT_AW_TMR, val32);
+ rtw89_write32(rtwdev, R_BE_WDT_W_TMR, val32);
+ rtw89_write32(rtwdev, R_BE_WDT_B_TMR, val32);
+ rtw89_write32(rtwdev, R_BE_WDT_R_TMR, val32);
+
+ val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
+ PL0_TMR_MAC_1MS : info->io_rcy_tmr;
+ val32 /= scale;
+ rtw89_write32(rtwdev, R_BE_WLAN_WDT_TMR, val32);
+ rtw89_write32(rtwdev, R_BE_AXIDMA_WDT_TMR, val32);
+
+ val32 = info->io_rcy_tmr == MAC_AX_IO_RCY_ANA_TMR_DEF ?
+ PL0_TMR_AUX_1MS : info->io_rcy_tmr;
+ val32 /= scale;
+ rtw89_write32(rtwdev, R_BE_LOCAL_WDT_TMR, val32);
+ } else {
+ rtw89_write32_clr(rtwdev, R_BE_WLAN_WDT, B_BE_WLAN_WDT_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_AXIDMA_WDT, B_BE_AXIDMA_WDT_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_AON_WDT, B_BE_AON_WDT_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_LOCAL_WDT, B_BE_LOCAL_WDT_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_MDIO_WDT, B_BE_MDIO_WDT_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_LA_MODE_WDT, B_BE_LA_MODE_WDT_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_WDT_AR, B_BE_WDT_AR_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_WDT_AW, B_BE_WDT_AW_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_WDT_W, B_BE_WDT_W_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_WDT_B, B_BE_WDT_B_ENABLE);
+ rtw89_write32_clr(rtwdev, R_BE_WDT_R, B_BE_WDT_R_ENABLE);
+ }
+}
+
+static void rtw89_pci_ctrl_wpdma_pcie_be(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_write32_clr(rtwdev, R_BE_HAXI_DMA_STOP1, B_BE_STOP_WPDMA);
+ else
+ rtw89_write32_set(rtwdev, R_BE_HAXI_DMA_STOP1, B_BE_STOP_WPDMA);
+}
+
+static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
+ enum mac_ax_pcie_func_ctrl tx_en,
+ enum mac_ax_pcie_func_ctrl rx_en,
+ enum mac_ax_pcie_func_ctrl io_en)
+{
+ u32 val;
+
+ val = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
+
+ if (tx_en == MAC_AX_PCIE_ENABLE)
+ val |= B_BE_TXDMA_EN;
+ else if (tx_en == MAC_AX_PCIE_DISABLE)
+ val &= ~B_BE_TXDMA_EN;
+
+ if (rx_en == MAC_AX_PCIE_ENABLE)
+ val |= B_BE_RXDMA_EN;
+ else if (rx_en == MAC_AX_PCIE_DISABLE)
+ val &= ~B_BE_RXDMA_EN;
+
+ if (io_en == MAC_AX_PCIE_ENABLE)
+ val &= ~B_BE_STOP_AXI_MST;
+ else if (io_en == MAC_AX_PCIE_DISABLE)
+ val |= B_BE_STOP_AXI_MST;
+
+ rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
+}
+
+static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_rx_ring *rx_ring;
+ u32 val;
+
+ val = B_BE_CLR_CH0_IDX | B_BE_CLR_CH1_IDX | B_BE_CLR_CH2_IDX |
+ B_BE_CLR_CH3_IDX | B_BE_CLR_CH4_IDX | B_BE_CLR_CH5_IDX |
+ B_BE_CLR_CH6_IDX | B_BE_CLR_CH7_IDX | B_BE_CLR_CH8_IDX |
+ B_BE_CLR_CH9_IDX | B_BE_CLR_CH10_IDX | B_BE_CLR_CH11_IDX |
+ B_BE_CLR_CH12_IDX | B_BE_CLR_CH13_IDX | B_BE_CLR_CH14_IDX;
+ rtw89_write32(rtwdev, R_BE_TXBD_RWPTR_CLR1, val);
+
+ rtw89_write32(rtwdev, R_BE_RXBD_RWPTR_CLR1_V1,
+ B_BE_CLR_RXQ0_IDX | B_BE_CLR_RPQ0_IDX);
+
+ rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
+ rtw89_write16(rtwdev, R_BE_RXQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
+
+ rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+ rtw89_write16(rtwdev, R_BE_RPQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
+}
+
+static int rtw89_pci_poll_txdma_ch_idle_be(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+
+ return read_poll_timeout(rtw89_read32, val, (val & DMA_BUSY1_CHECK_BE) == 0,
+ 10, 1000, false, rtwdev, R_BE_HAXI_DMA_BUSY1);
+}
+
+static int rtw89_pci_poll_rxdma_ch_idle_be(struct rtw89_dev *rtwdev)
+{
+ u32 check;
+ u32 val;
+
+ check = B_BE_RXQ0_BUSY_V1 | B_BE_RPQ0_BUSY_V1;
+
+ return read_poll_timeout(rtw89_read32, val, (val & check) == 0,
+ 10, 1000, false, rtwdev, R_BE_HAXI_DMA_BUSY1);
+}
+
+static int rtw89_pci_poll_dma_all_idle_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_pci_poll_txdma_ch_idle_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "txdma ch busy\n");
+ return ret;
+ }
+
+ ret = rtw89_pci_poll_rxdma_ch_idle_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "rxdma ch busy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rtw89_pci_mode_op_be(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 val32_init1, val32_rxapp, val32_exp;
+
+ val32_init1 = rtw89_read32(rtwdev, R_BE_HAXI_INIT_CFG1);
+ val32_rxapp = rtw89_read32(rtwdev, R_BE_RX_APPEND_MODE);
+ val32_exp = rtw89_read32(rtwdev, R_BE_HAXI_EXP_CTRL_V1);
+
+ if (info->rxbd_mode == MAC_AX_RXBD_PKT) {
+ val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_NORM,
+ B_BE_RXQ_RXBD_MODE_MASK);
+ } else if (info->rxbd_mode == MAC_AX_RXBD_SEP) {
+ val32_init1 = u32_replace_bits(val32_init1, PCIE_RXBD_SEP,
+ B_BE_RXQ_RXBD_MODE_MASK);
+ val32_rxapp = u32_replace_bits(val32_rxapp, 0,
+ B_BE_APPEND_LEN_MASK);
+ }
+
+ val32_init1 = u32_replace_bits(val32_init1, info->tx_burst,
+ B_BE_MAX_TXDMA_MASK);
+ val32_init1 = u32_replace_bits(val32_init1, info->rx_burst,
+ B_BE_MAX_RXDMA_MASK);
+ val32_exp = u32_replace_bits(val32_exp, info->multi_tag_num,
+ B_BE_MAX_TAG_NUM_MASK);
+ val32_init1 = u32_replace_bits(val32_init1, info->wd_dma_idle_intvl,
+ B_BE_CFG_WD_PERIOD_IDLE_MASK);
+ val32_init1 = u32_replace_bits(val32_init1, info->wd_dma_act_intvl,
+ B_BE_CFG_WD_PERIOD_ACTIVE_MASK);
+
+ rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val32_init1);
+ rtw89_write32(rtwdev, R_BE_RX_APPEND_MODE, val32_rxapp);
+ rtw89_write32(rtwdev, R_BE_HAXI_EXP_CTRL_V1, val32_exp);
+}
+
+static int rtw89_pci_rst_bdram_be(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+
+ rtw89_write32_set(rtwdev, R_BE_HAXI_INIT_CFG1, B_BE_SET_BDRAM_BOUND);
+
+ return read_poll_timeout(rtw89_read32, val, !(val & B_BE_SET_BDRAM_BOUND),
+ 50, 500000, false, rtwdev, R_BE_HAXI_INIT_CFG1);
+}
+
+static void rtw89_pci_debounce_be(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+
+ val32 = rtw89_read32(rtwdev, R_BE_SYS_PAGE_CLK_GATED);
+ val32 = u32_replace_bits(val32, 0, B_BE_PCIE_PRST_DEBUNC_PERIOD_MASK);
+ val32 |= B_BE_SYM_PRST_DEBUNC_SEL;
+ rtw89_write32(rtwdev, R_BE_SYS_PAGE_CLK_GATED, val32);
+}
+
+static void rtw89_pci_ldo_low_pwr_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_PSUS_OFF_CAPC_EN);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PAGE_CLK_GATED,
+ B_BE_SOP_OFFPOOBS_PC | B_BE_CPHY_AUXCLK_OP |
+ B_BE_CPHY_POWER_READY_CHK);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_SDIO_CTRL, B_BE_PCIE_FORCE_IBX_EN |
+ B_BE_PCIE_DIS_L2_RTK_PERST |
+ B_BE_PCIE_DIS_L2__CTRL_LDO_HCI);
+ rtw89_write32_clr(rtwdev, R_BE_L1_2_CTRL_HCILDO, B_BE_PCIE_DIS_L1_2_CTRL_HCILDO);
+}
+
+static void rtw89_pci_pcie_setting_be(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtw89_write32_set(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_EN_AUX_CLK);
+ rtw89_write32_clr(rtwdev, R_BE_PCIE_PS_CTRL, B_BE_CMAC_EXIT_L1_EN);
+
+ if (chip->chip_id == RTL8922A && hal->cv == CHIP_CAV)
+ return;
+
+ rtw89_write32_set(rtwdev, R_BE_EFUSE_CTRL_2_V1, B_BE_R_SYM_AUTOLOAD_WITH_PMC_SEL);
+ rtw89_write32_set(rtwdev, R_BE_PCIE_LAT_CTRL, B_BE_SYM_AUX_CLK_SEL);
+}
+
+static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+
+ rtw89_write32(rtwdev, R_BE_PL1_DBG_INFO, 0x0);
+ rtw89_write32_set(rtwdev, R_BE_FWS1IMR, B_BE_PCIE_SER_TIMEOUT_INDIC_EN);
+ rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+
+ val32 = rtw89_read32(rtwdev, R_BE_REG_PL1_MASK);
+ val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
+ B_BE_SER_LTSSM_IMR | B_BE_SER_PM_CLK_MASK | B_BE_SER_PCLKREQ_ACK_MASK;
+ rtw89_write32(rtwdev, R_BE_REG_PL1_MASK, val32);
+}
+
+static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool all_en,
+ bool h2c_en)
+{
+ u32 mask_all;
+ u32 val;
+
+ mask_all = B_BE_STOP_CH0 | B_BE_STOP_CH1 | B_BE_STOP_CH2 |
+ B_BE_STOP_CH3 | B_BE_STOP_CH4 | B_BE_STOP_CH5 |
+ B_BE_STOP_CH6 | B_BE_STOP_CH7 | B_BE_STOP_CH8 |
+ B_BE_STOP_CH9 | B_BE_STOP_CH10 | B_BE_STOP_CH11;
+
+ val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
+ val |= B_BE_STOP_CH13 | B_BE_STOP_CH14;
+
+ if (all_en)
+ val &= ~mask_all;
+ else
+ val |= mask_all;
+
+ if (h2c_en)
+ val &= ~B_BE_STOP_CH12;
+ else
+ val |= B_BE_STOP_CH12;
+
+ rtw89_write32(rtwdev, R_BE_HAXI_DMA_STOP1, val);
+}
+
+static int rtw89_pci_ops_mac_pre_init_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_pci_set_io_rcy_be(rtwdev);
+ _patch_pcie_power_wake_be(rtwdev, true);
+ rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, false);
+ rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_DISABLE,
+ MAC_AX_PCIE_DISABLE, MAC_AX_PCIE_DISABLE);
+ rtw89_pci_clr_idx_all_be(rtwdev);
+
+ ret = rtw89_pci_poll_dma_all_idle_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
+ return ret;
+ }
+
+ rtw89_pci_mode_op_be(rtwdev);
+ rtw89_pci_ops_reset(rtwdev);
+
+ ret = rtw89_pci_rst_bdram_be(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]pcie rst bdram\n");
+ return ret;
+ }
+
+ rtw89_pci_debounce_be(rtwdev);
+ rtw89_pci_ldo_low_pwr_be(rtwdev);
+ rtw89_pci_pcie_setting_be(rtwdev);
+ rtw89_pci_ser_setting_be(rtwdev);
+
+ rtw89_pci_ctrl_txdma_ch_be(rtwdev, false, true);
+ rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_ENABLE,
+ MAC_AX_PCIE_ENABLE, MAC_AX_PCIE_ENABLE);
+
+ return 0;
+}
+
+static int rtw89_pci_ops_mac_pre_deinit_be(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+
+ _patch_pcie_power_wake_be(rtwdev, false);
+
+ val = rtw89_read32_mask(rtwdev, R_BE_IC_PWR_STATE, B_BE_WLMAC_PWR_STE_MASK);
+ if (val == 0)
+ return 0;
+
+ rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_DISABLE,
+ MAC_AX_PCIE_DISABLE, MAC_AX_PCIE_DISABLE);
+ rtw89_pci_clr_idx_all_be(rtwdev);
+
+ return 0;
+}
+
+int rtw89_pci_ltr_set_v2(struct rtw89_dev *rtwdev, bool en)
+{
+ u32 ctrl0, cfg0, cfg1, dec_ctrl, idle_ltcy, act_ltcy, dis_ltcy;
+
+ ctrl0 = rtw89_read32(rtwdev, R_BE_LTR_CTRL_0);
+ if (rtw89_pci_ltr_is_err_reg_val(ctrl0))
+ return -EINVAL;
+ cfg0 = rtw89_read32(rtwdev, R_BE_LTR_CFG_0);
+ if (rtw89_pci_ltr_is_err_reg_val(cfg0))
+ return -EINVAL;
+ cfg1 = rtw89_read32(rtwdev, R_BE_LTR_CFG_1);
+ if (rtw89_pci_ltr_is_err_reg_val(cfg1))
+ return -EINVAL;
+ dec_ctrl = rtw89_read32(rtwdev, R_BE_LTR_DECISION_CTRL_V1);
+ if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
+ return -EINVAL;
+ idle_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX3_V1);
+ if (rtw89_pci_ltr_is_err_reg_val(idle_ltcy))
+ return -EINVAL;
+ act_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX1_V1);
+ if (rtw89_pci_ltr_is_err_reg_val(act_ltcy))
+ return -EINVAL;
+ dis_ltcy = rtw89_read32(rtwdev, R_BE_LTR_LATENCY_IDX0_V1);
+ if (rtw89_pci_ltr_is_err_reg_val(dis_ltcy))
+ return -EINVAL;
+
+ if (en) {
+ dec_ctrl |= B_BE_ENABLE_LTR_CTL_DECISION | B_BE_LTR_HW_DEC_EN_V1;
+ ctrl0 |= B_BE_LTR_HW_EN;
+ } else {
+ dec_ctrl &= ~(B_BE_ENABLE_LTR_CTL_DECISION | B_BE_LTR_HW_DEC_EN_V1 |
+ B_BE_LTR_EN_PORT_V1_MASK);
+ ctrl0 &= ~B_BE_LTR_HW_EN;
+ }
+
+ dec_ctrl = u32_replace_bits(dec_ctrl, PCI_LTR_SPC_500US,
+ B_BE_LTR_SPACE_IDX_MASK);
+ cfg0 = u32_replace_bits(cfg0, PCI_LTR_IDLE_TIMER_3_2MS,
+ B_BE_LTR_IDLE_TIMER_IDX_MASK);
+ cfg1 = u32_replace_bits(cfg1, 0xC0, B_BE_LTR_CMAC0_RX_USE_PG_TH_MASK);
+ cfg1 = u32_replace_bits(cfg1, 0xC0, B_BE_LTR_CMAC1_RX_USE_PG_TH_MASK);
+ cfg0 = u32_replace_bits(cfg0, 1, B_BE_LTR_IDX_ACTIVE_MASK);
+ cfg0 = u32_replace_bits(cfg0, 3, B_BE_LTR_IDX_IDLE_MASK);
+ dec_ctrl = u32_replace_bits(dec_ctrl, 0, B_BE_LTR_IDX_DISABLE_V1_MASK);
+
+ rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX3_V1, 0x90039003);
+ rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX1_V1, 0x880b880b);
+ rtw89_write32(rtwdev, R_BE_LTR_LATENCY_IDX0_V1, 0);
+ rtw89_write32(rtwdev, R_BE_LTR_DECISION_CTRL_V1, dec_ctrl);
+ rtw89_write32(rtwdev, R_BE_LTR_CFG_0, cfg0);
+ rtw89_write32(rtwdev, R_BE_LTR_CFG_1, cfg1);
+ rtw89_write32(rtwdev, R_BE_LTR_CTRL_0, ctrl0);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_pci_ltr_set_v2);
+
+static void rtw89_pci_configure_mit_be(struct rtw89_dev *rtwdev)
+{
+ u32 cnt;
+ u32 val;
+
+ rtw89_write32_mask(rtwdev, R_BE_PCIE_MIT0_TMR,
+ B_BE_PCIE_MIT0_RX_TMR_MASK, BE_MIT0_TMR_UNIT_1MS);
+
+ val = rtw89_read32(rtwdev, R_BE_PCIE_MIT0_CNT);
+ cnt = min_t(u32, U8_MAX, RTW89_PCI_RXBD_NUM_MAX / 2);
+ val = u32_replace_bits(val, cnt, B_BE_PCIE_RX_MIT0_CNT_MASK);
+ val = u32_replace_bits(val, 2, B_BE_PCIE_RX_MIT0_TMR_CNT_MASK);
+ rtw89_write32(rtwdev, R_BE_PCIE_MIT0_CNT, val);
+}
+
+static int rtw89_pci_ops_mac_post_init_be(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ int ret;
+
+ ret = info->ltr_set(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "pci ltr set fail\n");
+ return ret;
+ }
+
+ rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_IGNORE,
+ MAC_AX_PCIE_IGNORE, MAC_AX_PCIE_ENABLE);
+ rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, true);
+ rtw89_pci_ctrl_txdma_ch_be(rtwdev, true, true);
+ rtw89_pci_configure_mit_be(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_pci_poll_io_idle_be(struct rtw89_dev *rtwdev)
+{
+ u32 sts;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_read32, sts,
+ !(sts & B_BE_HAXI_MST_BUSY),
+ 10, 1000, false, rtwdev,
+ R_BE_HAXI_DMA_BUSY1);
+ if (ret) {
+ rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", sts);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_pci_lv1rst_stop_dma_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_pci_ctrl_dma_all(rtwdev, false);
+ ret = rtw89_pci_poll_io_idle_be(rtwdev);
+ if (!ret)
+ return 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_HCI,
+ "[PCIe] poll_io_idle fail; reset hci dma trx\n");
+
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
+
+ return rtw89_pci_poll_io_idle_be(rtwdev);
+}
+
+static int rtw89_pci_lv1rst_start_dma_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
+ rtw89_pci_clr_idx_all(rtwdev);
+
+ ret = rtw89_pci_rst_bdram_be(rtwdev);
+ if (ret)
+ return ret;
+
+ rtw89_pci_ctrl_dma_all(rtwdev, true);
+ return 0;
+}
+
+const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
+ .isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT,
+ .isr_halt_c2h = B_BE_HALT_C2H_INT,
+ .isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
+ .isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
+ .isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
+
+ .mac_pre_init = rtw89_pci_ops_mac_pre_init_be,
+ .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_be,
+ .mac_post_init = rtw89_pci_ops_mac_post_init_be,
+
+ .clr_idx_all = rtw89_pci_clr_idx_all_be,
+ .rst_bdram = rtw89_pci_rst_bdram_be,
+
+ .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_be,
+ .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_be,
+};
+EXPORT_SYMBOL(rtw89_pci_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 17ccc9efe..bafc7b1cc 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2445,6 +2445,298 @@ void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
};
+static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_c2h_rfk_log_func func,
+ void *content, u16 len)
+{
+ struct rtw89_c2h_rf_txgapk_rpt_log *txgapk;
+ struct rtw89_c2h_rf_rxdck_rpt_log *rxdck;
+ struct rtw89_c2h_rf_dack_rpt_log *dack;
+ struct rtw89_c2h_rf_dpk_rpt_log *dpk;
+
+ switch (func) {
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
+ if (len != sizeof(*dpk))
+ goto out;
+
+ dpk = content;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n",
+ dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n",
+ dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n",
+ dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov);
+ return;
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
+ if (len != sizeof(*dack))
+ goto out;
+
+ dack = content;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ver=0x%x 0x%x\n",
+ dack->fwdack_ver, dack->fwdack_rpt_ver);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n",
+ dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n",
+ dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n",
+ dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n",
+ dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n",
+ dack->addck2_d[0][0][0], dack->addck2_d[0][0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n",
+ dack->addck2_d[0][1][0], dack->addck2_d[0][1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n",
+ dack->addck2_d[1][0][0], dack->addck2_d[1][0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n",
+ dack->addck2_d[1][1][0], dack->addck2_d[1][1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
+ dack->adgaink_d[0][0], dack->adgaink_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
+ dack->adgaink_d[1][0], dack->adgaink_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[0][0], dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[1][0], dack->dadck_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n",
+ dack->biask_d[0][0]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n",
+ dack->biask_d[1][0]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic: %*ph\n",
+ (int)sizeof(dack->msbk_d[0][0]), dack->msbk_d[0][0]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc: %*ph\n",
+ (int)sizeof(dack->msbk_d[0][1]), dack->msbk_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic: %*ph\n",
+ (int)sizeof(dack->msbk_d[1][0]), dack->msbk_d[1][0]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc: %*ph\n",
+ (int)sizeof(dack->msbk_d[1][1]), dack->msbk_d[1][1]);
+ return;
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
+ if (len != sizeof(*rxdck))
+ goto out;
+
+ rxdck = content;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n",
+ rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch,
+ rxdck->timeout);
+ return;
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
+ if (len != sizeof(*txgapk))
+ goto out;
+
+ txgapk = content;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n",
+ le32_to_cpu(txgapk->r0x8010[0]),
+ le32_to_cpu(txgapk->r0x8010[1]));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n",
+ txgapk->chk_id);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n",
+ le32_to_cpu(txgapk->chk_cnt));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n",
+ txgapk->ver);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt rsv1 = %d\n",
+ txgapk->rsv1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n",
+ (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n",
+ (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n",
+ (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n",
+ (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]);
+ return;
+ default:
+ break;
+ }
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "unexpected RFK func %d report log with length %d\n", func, len);
+}
+
+static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_c2h_rfk_log_func func,
+ void *content, u16 len)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_c2h_rf_run_log *log = content;
+ const struct rtw89_fw_element_hdr *elm;
+ u32 fmt_idx;
+ u16 offset;
+
+ if (sizeof(*log) != len)
+ return false;
+
+ if (!elm_info->rfk_log_fmt)
+ return false;
+
+ elm = elm_info->rfk_log_fmt->elm[func];
+ fmt_idx = le32_to_cpu(log->fmt_idx);
+ if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr)
+ return false;
+
+ offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]);
+ if (offset == 0)
+ return false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset],
+ le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]),
+ le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3]));
+
+ return true;
+}
+
+static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len, enum rtw89_phy_c2h_rfk_log_func func,
+ const char *rfk_name)
+{
+ struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data;
+ struct rtw89_c2h_rf_log_hdr *log_hdr;
+ void *log_ptr = c2h_hdr;
+ u16 content_len;
+ u16 chunk_len;
+ bool handled;
+
+ if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK))
+ return;
+
+ log_ptr += sizeof(*c2h_hdr);
+ len -= sizeof(*c2h_hdr);
+
+ while (len > sizeof(*log_hdr)) {
+ log_hdr = log_ptr;
+ content_len = le16_to_cpu(log_hdr->len);
+ chunk_len = content_len + sizeof(*log_hdr);
+
+ if (chunk_len > len)
+ break;
+
+ switch (log_hdr->type) {
+ case RTW89_RF_RUN_LOG:
+ handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func,
+ log_hdr->content, content_len);
+ if (handled)
+ break;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n",
+ rfk_name, content_len, log_hdr->content);
+ break;
+ case RTW89_RF_RPT_LOG:
+ rtw89_phy_c2h_rfk_rpt_log(rtwdev, func,
+ log_hdr->content, content_len);
+ break;
+ default:
+ return;
+ }
+
+ log_ptr += chunk_len;
+ len -= chunk_len;
+ }
+}
+
+static void
+rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK");
+}
+
+static void
+rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK");
+}
+
+static void
+rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK");
+}
+
+static void
+rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK");
+}
+
+static void
+rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI");
+}
+
+static void
+rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK");
+}
+
+static
+void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
+};
+
+static void
+rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static
+void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
+};
+
+bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
+{
+ switch (class) {
+ case RTW89_PHY_C2H_RFK_LOG:
+ switch (func) {
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK:
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
+ return true;
+ default:
+ return false;
+ }
+ case RTW89_PHY_C2H_RFK_REPORT:
+ switch (func) {
+ case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func)
{
@@ -2456,6 +2748,14 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
handler = rtw89_phy_c2h_ra_handler[func];
break;
+ case RTW89_PHY_C2H_RFK_LOG:
+ if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler))
+ handler = rtw89_phy_c2h_rfk_log_handler[func];
+ break;
+ case RTW89_PHY_C2H_RFK_REPORT:
+ if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler))
+ handler = rtw89_phy_c2h_rfk_report_handler[func];
+ break;
case RTW89_PHY_C2H_CLASS_DM:
if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY)
return;
@@ -4620,6 +4920,29 @@ static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
rtw89_phy_ifs_clm_setting_init(rtwdev);
}
+static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
+ struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
+
+ memset(edcca_bak, 0, sizeof(*edcca_bak));
+
+ if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) {
+ rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0);
+ rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2);
+ rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1);
+ rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0);
+ rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0);
+ rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1);
+ rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1);
+ }
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st,
+ edcca_regs->tx_collision_t2r_st_mask, 0x29);
+}
+
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
{
rtw89_phy_stat_init(rtwdev);
@@ -4630,6 +4953,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
+ rtw89_phy_edcca_init(rtwdev);
rtw89_phy_ul_tb_info_init(rtwdev);
rtw89_phy_antdiv_init(rtwdev);
rtw89_chip_rfe_gpio(rtwdev);
@@ -4892,23 +5216,188 @@ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
}
EXPORT_SYMBOL(rtw89_decode_chan_idx);
-#define EDCCA_DEFAULT 249
void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan)
{
- u32 reg = rtwdev->chip->edcca_lvl_reg;
- struct rtw89_hal *hal = &rtwdev->hal;
- u32 val;
+ const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
+ struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
if (scan) {
- hal->edcca_bak = rtw89_phy_read32(rtwdev, reg);
- val = hal->edcca_bak;
- u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_EDCCA_LVL_A_MSK);
- u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_EDCCA_LVL_P_MSK);
- u32p_replace_bits(&val, EDCCA_DEFAULT, B_SEG0R_PPDU_LVL_MSK);
- rtw89_phy_write32(rtwdev, reg, val);
+ edcca_bak->a =
+ rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask);
+ edcca_bak->p =
+ rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask);
+ edcca_bak->ppdu =
+ rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask);
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask, EDCCA_MAX);
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask, EDCCA_MAX);
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask, EDCCA_MAX);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask,
+ edcca_bak->a);
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask,
+ edcca_bak->p);
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask,
+ edcca_bak->ppdu);
+ }
+}
+
+static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
+ bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
+ s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
+ u8 path, per20_bitmap;
+ u8 pwdb[8];
+ u32 tmp;
+
+ if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA))
+ return;
+
+ if (rtwdev->chip->chip_id == RTL8922A)
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
+ edcca_regs->rpt_sel_be_mask, 0);
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
+ edcca_regs->rpt_sel_mask, 0);
+ tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK);
+ flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80);
+ flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40);
+ flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20);
+ flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20);
+ flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB);
+ pwdb_s20 = u32_get_bits(tmp, MASKBYTE1);
+ pwdb_p20 = u32_get_bits(tmp, MASKBYTE2);
+ pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
+ edcca_regs->rpt_sel_mask, 4);
+ tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
+ pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
+
+ per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a,
+ MASKBYTE0);
+
+ if (rtwdev->chip->chip_id == RTL8922A) {
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
+ edcca_regs->rpt_sel_be_mask, 4);
+ tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
+ pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
+ pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
+ pwdb[3] = u32_get_bits(tmp, MASKBYTE0);
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
+ edcca_regs->rpt_sel_be_mask, 5);
+ tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
+ pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
+ pwdb[6] = u32_get_bits(tmp, MASKBYTE1);
+ pwdb[7] = u32_get_bits(tmp, MASKBYTE0);
} else {
- rtw89_phy_write32(rtwdev, reg, hal->edcca_bak);
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
+ edcca_regs->rpt_sel_mask, 0);
+ tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
+ pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
+ edcca_regs->rpt_sel_mask, 1);
+ tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ pwdb[2] = u32_get_bits(tmp, MASKBYTE3);
+ pwdb[3] = u32_get_bits(tmp, MASKBYTE2);
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
+ edcca_regs->rpt_sel_mask, 2);
+ tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
+ pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
+ edcca_regs->rpt_sel_mask, 3);
+ tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ pwdb[6] = u32_get_bits(tmp, MASKBYTE3);
+ pwdb[7] = u32_get_bits(tmp, MASKBYTE2);
}
+
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
+ "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap);
+
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
+ "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n",
+ pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5],
+ pwdb[6], pwdb[7]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
+ "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n",
+ path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80);
+
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
+ "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n",
+ pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80);
+}
+
+static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
+ bool is_linked = rtwdev->total_sta_assoc > 0;
+ u8 rssi_min = ch_info->rssi_min >> 1;
+ u8 edcca_thre;
+
+ if (!is_linked) {
+ edcca_thre = EDCCA_MAX;
+ } else {
+ edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER -
+ EDCCA_TH_REF;
+ edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB);
+ }
+
+ return edcca_thre;
+}
+
+void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
+ struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
+ u8 th;
+
+ th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev);
+ if (th == edcca_bak->th_old)
+ return;
+
+ edcca_bak->th_old = th;
+
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
+ "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th);
+
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask, th);
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask, th);
+ rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask, th);
+}
+
+void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA))
+ return;
+
+ rtw89_phy_edcca_thre_calc(rtwdev);
+ rtw89_phy_edcca_log(rtwdev);
}
static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 5c85122e7..3e379077c 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -122,6 +122,13 @@
#define PHYSTS_RSVD BIT(RTW89_RX_TYPE_RSVD)
#define PPDU_FILTER_BITMAP (PHYSTS_MGNT | PHYSTS_DATA)
+#define EDCCA_MAX 249
+#define EDCCA_TH_L2H_LB 66
+#define EDCCA_TH_REF 3
+#define EDCCA_HL_DIFF_NORMAL 8
+#define RSSI_UNIT_CONVER 110
+#define EDCCA_UNIT_CONVER 128
+
enum rtw89_phy_c2h_ra_func {
RTW89_PHY_C2H_FUNC_STS_RPT,
RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT,
@@ -129,6 +136,21 @@ enum rtw89_phy_c2h_ra_func {
RTW89_PHY_C2H_FUNC_RA_MAX,
};
+enum rtw89_phy_c2h_rfk_log_func {
+ RTW89_PHY_C2H_RFK_LOG_FUNC_IQK = 0,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_DPK = 1,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_DACK = 2,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK = 3,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI = 4,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK = 5,
+
+ RTW89_PHY_C2H_RFK_LOG_FUNC_NUM,
+};
+
+enum rtw89_phy_c2h_rfk_report_func {
+ RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
+};
+
enum rtw89_phy_c2h_dm_func {
RTW89_PHY_C2H_DM_FUNC_FW_TEST,
RTW89_PHY_C2H_DM_FUNC_FW_TRIG_TX_RPT,
@@ -142,6 +164,8 @@ enum rtw89_phy_c2h_class {
RTW89_PHY_C2H_CLASS_RUA,
RTW89_PHY_C2H_CLASS_RA,
RTW89_PHY_C2H_CLASS_DM,
+ RTW89_PHY_C2H_RFK_LOG = 0x8,
+ RTW89_PHY_C2H_RFK_REPORT = 0x9,
RTW89_PHY_C2H_CLASS_BTC_MIN = 0x10,
RTW89_PHY_C2H_CLASS_BTC_MAX = 0x17,
RTW89_PHY_C2H_CLASS_MAX,
@@ -284,8 +308,6 @@ struct rtw89_txpwr_byrate_cfg {
u32 data;
};
-#define DELTA_SWINGIDX_SIZE 30
-
struct rtw89_txpwr_track_cfg {
const s8 (*delta_swingidx_6gb_n)[DELTA_SWINGIDX_SIZE];
const s8 (*delta_swingidx_6gb_p)[DELTA_SWINGIDX_SIZE];
@@ -478,6 +500,10 @@ struct rtw89_txpwr_limit_ru_be {
s8 ru106_26[RTW89_RU_SEC_NUM_BE];
};
+struct rtw89_phy_rfk_log_fmt {
+ const struct rtw89_fw_element_hdr *elm[RTW89_PHY_C2H_RFK_LOG_FUNC_NUM];
+};
+
struct rtw89_phy_gen_def {
u32 cr_base;
const struct rtw89_ccx_regs *ccx;
@@ -591,6 +617,22 @@ enum rtw89_gain_offset rtw89_subband_to_gain_offset_band_of_ofdm(enum rtw89_subb
return RTW89_GAIN_OFFSET_5G_MID;
case RTW89_CH_5G_BAND_4:
return RTW89_GAIN_OFFSET_5G_HIGH;
+ case RTW89_CH_6G_BAND_IDX0:
+ return RTW89_GAIN_OFFSET_6G_L0;
+ case RTW89_CH_6G_BAND_IDX1:
+ return RTW89_GAIN_OFFSET_6G_L1;
+ case RTW89_CH_6G_BAND_IDX2:
+ return RTW89_GAIN_OFFSET_6G_M0;
+ case RTW89_CH_6G_BAND_IDX3:
+ return RTW89_GAIN_OFFSET_6G_M1;
+ case RTW89_CH_6G_BAND_IDX4:
+ return RTW89_GAIN_OFFSET_6G_H0;
+ case RTW89_CH_6G_BAND_IDX5:
+ return RTW89_GAIN_OFFSET_6G_H1;
+ case RTW89_CH_6G_BAND_IDX6:
+ return RTW89_GAIN_OFFSET_6G_UH0;
+ case RTW89_CH_6G_BAND_IDX7:
+ return RTW89_GAIN_OFFSET_6G_UH1;
}
}
@@ -764,6 +806,7 @@ void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
+bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev);
@@ -791,5 +834,7 @@ u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band);
void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
u8 *ch, enum nl80211_band *band);
void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan);
+void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev);
+void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index aff0fba71..54486e455 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -33,6 +33,10 @@ static inline void rtw89_enter_ips_by_hwflags(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
+ /* prevent entering IPS after ROC, but it is scanning */
+ if (rtwdev->scanning)
+ return;
+
if (hw->conf.flags & IEEE80211_CONF_IDLE)
rtw89_enter_ips(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 672010b9e..8456e2b0c 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -116,6 +116,7 @@
#define B_AX_LTE_MUX_CTRL_PATH BIT(26)
#define R_AX_HCI_OPT_CTRL 0x0074
+#define BIT_WAKE_CTRL_V1 BIT(23)
#define BIT_WAKE_CTRL BIT(5)
#define R_AX_HCI_BG_CTRL 0x0078
@@ -1456,6 +1457,8 @@
#define B_AX_PLE_Q6_MAX_SIZE_MASK GENMASK(27, 16)
#define B_AX_PLE_Q6_MIN_SIZE_MASK GENMASK(11, 0)
#define R_AX_PLE_QTA7_CFG 0x905C
+#define B_AX_PLE_Q7_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_AX_PLE_Q7_MIN_SIZE_MASK GENMASK(11, 0)
#define R_AX_PLE_QTA8_CFG 0x9060
#define R_AX_PLE_QTA9_CFG 0x9064
#define R_AX_PLE_QTA10_CFG 0x9068
@@ -3646,6 +3649,50 @@
#define B_AX_GNT_BT_TX_SW_VAL BIT(1)
#define B_AX_GNT_BT_TX_SW_CTRL BIT(0)
+#define R_BE_SYS_ISO_CTRL 0x0000
+#define B_BE_PWC_EV2EF_B BIT(15)
+#define B_BE_PWC_EV2EF_S BIT(14)
+#define B_BE_PA33V_EN BIT(13)
+#define B_BE_PA12V_EN BIT(12)
+#define B_BE_PAOOBS33V_EN BIT(11)
+#define B_BE_PAOOBS12V_EN BIT(10)
+#define B_BE_ISO_RFDIO BIT(9)
+#define B_BE_ISO_EB2CORE BIT(8)
+#define B_BE_ISO_DIOE BIT(7)
+#define B_BE_ISO_WLPON2PP BIT(6)
+#define B_BE_ISO_IP2MAC_WA02PP BIT(5)
+#define B_BE_ISO_PD2CORE BIT(4)
+#define B_BE_ISO_PA2PCIE BIT(3)
+#define B_BE_ISO_PAOOBS2PCIE BIT(1)
+#define B_BE_ISO_WD2PP BIT(0)
+
+#define R_BE_SYS_PW_CTRL 0x0004
+#define B_BE_SOP_ASWRM BIT(31)
+#define B_BE_SOP_EASWR BIT(30)
+#define B_BE_SOP_PWMM_DSWR BIT(29)
+#define B_BE_SOP_EDSWR BIT(28)
+#define B_BE_SOP_ACKF BIT(27)
+#define B_BE_SOP_ERCK BIT(26)
+#define B_BE_SOP_ANA_CLK_DIVISION_2 BIT(25)
+#define B_BE_SOP_EXTL BIT(24)
+#define B_BE_SOP_OFF_CAPC_EN BIT(23)
+#define B_BE_XTAL_OFF_A_DIE BIT(22)
+#define B_BE_ROP_SWPR BIT(21)
+#define B_BE_DIS_HW_LPLDM BIT(20)
+#define B_BE_DIS_HW_LPURLDO BIT(19)
+#define B_BE_DIS_WLBT_PDNSUSEN_SOPC BIT(18)
+#define B_BE_RDY_SYSPWR BIT(17)
+#define B_BE_EN_WLON BIT(16)
+#define B_BE_APDM_HPDN BIT(15)
+#define B_BE_PSUS_OFF_CAPC_EN BIT(14)
+#define B_BE_AFSM_PCIE_SUS_EN BIT(12)
+#define B_BE_AFSM_WLSUS_EN BIT(11)
+#define B_BE_APFM_SWLPS BIT(10)
+#define B_BE_APFM_OFFMAC BIT(9)
+#define B_BE_APFN_ONMAC BIT(8)
+#define B_BE_CHIP_PDN_EN BIT(7)
+#define B_BE_RDY_MACDIS BIT(6)
+
#define R_BE_SYS_CLK_CTRL 0x0008
#define B_BE_CPU_CLK_EN BIT(14)
#define B_BE_SYMR_BE_CLK_EN BIT(13)
@@ -3656,6 +3703,249 @@
#define B_BE_ANA_CLK_DIVISION_2 BIT(1)
#define B_BE_CNTD16V_EN BIT(0)
+#define R_BE_SYS_WL_EFUSE_CTRL 0x000A
+#define B_BE_OTP_B_PWC_RPT BIT(15)
+#define B_BE_OTP_S_PWC_RPT BIT(14)
+#define B_BE_OTP_ISO_RPT BIT(13)
+#define B_BE_OTP_BURST_RPT BIT(12)
+#define B_BE_OTP_AUTOLOAD_RPT BIT(11)
+#define B_BE_AUTOLOAD_DIS_A_DIE BIT(6)
+#define B_BE_AUTOLOAD_SUS BIT(5)
+#define B_BE_AUTOLOAD_DIS BIT(4)
+
+#define R_BE_SYS_PAGE_CLK_GATED 0x000C
+#define B_BE_USB_APHY_PC_DLP_OP BIT(27)
+#define B_BE_PCIE_APHY_PC_DLP_OP BIT(26)
+#define B_BE_UPHY_POWER_READY_CHK BIT(25)
+#define B_BE_CPHY_POWER_READY_CHK BIT(24)
+#define B_BE_PCIE_PRST_DEBUNC_PERIOD_MASK GENMASK(23, 22)
+#define B_BE_SYM_PRST_DEBUNC_SEL BIT(21)
+#define B_BE_CPHY_AUXCLK_OP BIT(20)
+#define B_BE_SOP_OFFUA_PC BIT(19)
+#define B_BE_SOP_OFFPOOBS_PC BIT(18)
+#define B_BE_PCIE_LAN1_MASK BIT(17)
+#define B_BE_PCIE_LAN0_MASK BIT(16)
+#define B_BE_DIS_CLK_REGF_GATE BIT(15)
+#define B_BE_DIS_CLK_REGE_GATE BIT(14)
+#define B_BE_DIS_CLK_REGD_GATE BIT(13)
+#define B_BE_DIS_CLK_REGC_GATE BIT(12)
+#define B_BE_DIS_CLK_REGB_GATE BIT(11)
+#define B_BE_DIS_CLK_REGA_GATE BIT(10)
+#define B_BE_DIS_CLK_REG9_GATE BIT(9)
+#define B_BE_DIS_CLK_REG8_GATE BIT(8)
+#define B_BE_DIS_CLK_REG7_GATE BIT(7)
+#define B_BE_DIS_CLK_REG6_GATE BIT(6)
+#define B_BE_DIS_CLK_REG5_GATE BIT(5)
+#define B_BE_DIS_CLK_REG4_GATE BIT(4)
+#define B_BE_DIS_CLK_REG3_GATE BIT(3)
+#define B_BE_DIS_CLK_REG2_GATE BIT(2)
+#define B_BE_DIS_CLK_REG1_GATE BIT(1)
+#define B_BE_DIS_CLK_REG0_GATE BIT(0)
+
+#define R_BE_ANAPAR_POW_MAC 0x0016
+#define B_BE_POW_PC_LDO_PORT1 BIT(3)
+#define B_BE_POW_PC_LDO_PORT0 BIT(2)
+#define B_BE_POW_PLL_V1 BIT(1)
+#define B_BE_POW_POWER_CUT_POW_LDO BIT(0)
+
+#define R_BE_SYS_ADIE_PAD_PWR_CTRL 0x0018
+#define B_BE_SYM_PADPDN_WL_RFC1_1P3 BIT(6)
+#define B_BE_SYM_PADPDN_WL_RFC0_1P3 BIT(5)
+
+#define R_BE_AFE_LDO_CTRL 0x0020
+#define B_BE_FORCE_MACBBBT_PWR_ON BIT(31)
+#define B_BE_R_SYM_WLPOFF_P4_PC_EN BIT(28)
+#define B_BE_R_SYM_WLPOFF_P3_PC_EN BIT(27)
+#define B_BE_R_SYM_WLPOFF_P2_PC_EN BIT(26)
+#define B_BE_R_SYM_WLPOFF_P1_PC_EN BIT(25)
+#define B_BE_R_SYM_WLPOFF_PC_EN BIT(24)
+#define B_BE_AON_OFF_PC_EN BIT(23)
+#define B_BE_R_SYM_WLPON_P3_PC_EN BIT(21)
+#define B_BE_R_SYM_WLPON_P2_PC_EN BIT(20)
+#define B_BE_R_SYM_WLPON_P1_PC_EN BIT(19)
+#define B_BE_R_SYM_WLPON_PC_EN BIT(18)
+#define B_BE_R_SYM_WLBBPON1_P1_PC_EN BIT(15)
+#define B_BE_R_SYM_WLBBPON1_PC_EN BIT(14)
+#define B_BE_R_SYM_WLBBPON_P1_PC_EN BIT(13)
+#define B_BE_R_SYM_WLBBPON_PC_EN BIT(12)
+#define B_BE_R_SYM_DIS_WPHYBBOFF_PC BIT(10)
+#define B_BE_R_SYM_WLBBOFF1_P4_PC_EN BIT(9)
+#define B_BE_R_SYM_WLBBOFF1_P3_PC_EN BIT(8)
+#define B_BE_R_SYM_WLBBOFF1_P2_PC_EN BIT(7)
+#define B_BE_R_SYM_WLBBOFF1_P1_PC_EN BIT(6)
+#define B_BE_R_SYM_WLBBOFF1_PC_EN BIT(5)
+#define B_BE_R_SYM_WLBBOFF_P4_PC_EN BIT(4)
+#define B_BE_R_SYM_WLBBOFF_P3_PC_EN BIT(3)
+#define B_BE_R_SYM_WLBBOFF_P2_PC_EN BIT(2)
+#define B_BE_R_SYM_WLBBOFF_P1_PC_EN BIT(1)
+#define B_BE_R_SYM_WLBBOFF_PC_EN BIT(0)
+
+#define R_BE_AFE_CTRL1 0x0024
+#define B_BE_R_SYM_WLCMAC0_P4_PC_EN BIT(28)
+#define B_BE_R_SYM_WLCMAC0_P3_PC_EN BIT(27)
+#define B_BE_R_SYM_WLCMAC0_P2_PC_EN BIT(26)
+#define B_BE_R_SYM_WLCMAC0_P1_PC_EN BIT(25)
+#define B_BE_R_SYM_WLCMAC0_PC_EN BIT(24)
+#define B_BE_DATAMEM_PC3_EN BIT(23)
+#define B_BE_DATAMEM_PC2_EN BIT(22)
+#define B_BE_DATAMEM_PC1_EN BIT(21)
+#define B_BE_DATAMEM_PC_EN BIT(20)
+#define B_BE_DMEM7_PC_EN BIT(19)
+#define B_BE_DMEM6_PC_EN BIT(18)
+#define B_BE_DMEM5_PC_EN BIT(17)
+#define B_BE_DMEM4_PC_EN BIT(16)
+#define B_BE_DMEM3_PC_EN BIT(15)
+#define B_BE_DMEM2_PC_EN BIT(14)
+#define B_BE_DMEM1_PC_EN BIT(13)
+#define B_BE_IMEM4_PC_EN BIT(12)
+#define B_BE_IMEM3_PC_EN BIT(11)
+#define B_BE_IMEM2_PC_EN BIT(10)
+#define B_BE_IMEM1_PC_EN BIT(9)
+#define B_BE_IMEM0_PC_EN BIT(8)
+#define B_BE_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
+#define B_BE_R_SYM_WLCMAC1_P3_PC_EN BIT(3)
+#define B_BE_R_SYM_WLCMAC1_P2_PC_EN BIT(2)
+#define B_BE_R_SYM_WLCMAC1_P1_PC_EN BIT(1)
+#define B_BE_R_SYM_WLCMAC1_PC_EN BIT(0)
+#define B_BE_AFE_CTRL1_SET (B_BE_R_SYM_WLCMAC1_PC_EN | \
+ B_BE_R_SYM_WLCMAC1_P1_PC_EN | \
+ B_BE_R_SYM_WLCMAC1_P2_PC_EN | \
+ B_BE_R_SYM_WLCMAC1_P3_PC_EN | \
+ B_BE_R_SYM_WLCMAC1_P4_PC_EN)
+
+#define R_BE_EFUSE_CTRL 0x0030
+#define B_BE_EF_MODE_SEL_MASK GENMASK(31, 30)
+#define B_BE_EF_RDY BIT(29)
+#define B_BE_EF_COMP_RESULT BIT(28)
+#define B_BE_EF_ADDR_MASK GENMASK(15, 0)
+
+#define R_BE_EFUSE_CTRL_1_V1 0x0034
+#define B_BE_EF_DATA_MASK GENMASK(31, 0)
+
+#define R_BE_WL_BT_PWR_CTRL 0x0068
+#define B_BE_ISO_BD2PP BIT(31)
+#define B_BE_LDOV12B_EN BIT(30)
+#define B_BE_CKEN_BT BIT(29)
+#define B_BE_FEN_BT BIT(28)
+#define B_BE_BTCPU_BOOTSEL BIT(27)
+#define B_BE_SPI_SPEEDUP BIT(26)
+#define B_BE_BT_LDO_MODE BIT(25)
+#define B_BE_ISO_BTPON2PP BIT(22)
+#define B_BE_BT_FUNC_EN BIT(18)
+#define B_BE_BT_HWPDN_SL BIT(17)
+#define B_BE_BT_DISN_EN BIT(16)
+#define B_BE_SDM_SRC_SEL BIT(12)
+#define B_BE_ISO_BA2PP BIT(11)
+#define B_BE_BT_AFE_LDO_EN BIT(10)
+#define B_BE_BT_AFE_PLL_EN BIT(9)
+#define B_BE_WLAN_32K_SEL BIT(6)
+#define B_BE_WL_DRV_EXIST_IDX BIT(5)
+#define B_BE_DOP_EHPAD BIT(4)
+#define B_BE_WL_FUNC_EN BIT(2)
+#define B_BE_WL_HWPDN_SL BIT(1)
+#define B_BE_WL_HWPDN_EN BIT(0)
+
+#define R_BE_SYS_SDIO_CTRL 0x0070
+#define B_BE_MCM_FLASH_EN BIT(28)
+#define B_BE_PCIE_SEC_LOAD BIT(26)
+#define B_BE_PCIE_SER_RSTB BIT(25)
+#define B_BE_PCIE_SEC_LOAD_CLR BIT(24)
+#define B_BE_SDIO_CMD_SW_RST BIT(20)
+#define B_BE_SDIO_INT_POLARITY BIT(19)
+#define B_BE_SDIO_OFF_EN BIT(17)
+#define B_BE_SDIO_ON_EN BIT(16)
+#define B_BE_PCIE_DIS_L2__CTRL_LDO_HCI BIT(15)
+#define B_BE_PCIE_DIS_L2_RTK_PERST BIT(14)
+#define B_BE_PCIE_FORCE_PWR_NGAT BIT(13)
+#define B_BE_PCIE_FORCE_IBX_EN BIT(12)
+#define B_BE_PCIE_AUXCLK_GATE BIT(11)
+#define B_BE_PCIE_WAIT_TIMEOUT_EVENT BIT(10)
+#define B_BE_PCIE_WAIT_TIME BIT(9)
+#define B_BE_L1OFF_TO_L0_RESUME_EVT BIT(8)
+#define B_BE_USBA_FORCE_PWR_NGAT BIT(7)
+#define B_BE_USBD_FORCE_PWR_NGAT BIT(6)
+#define B_BE_BT_CTRL_USB_PWR BIT(5)
+#define B_BE_USB_D_STATE_HOLD BIT(4)
+#define B_BE_R_BE_FORCE_DP BIT(3)
+#define B_BE_R_BE_DP_MODE BIT(2)
+#define B_BE_RES_USB_MASS_STORAGE_DESC BIT(1)
+#define B_BE_USB_WAIT_TIME BIT(0)
+
+#define R_BE_HCI_OPT_CTRL 0x0074
+#define B_BE_HCI_WLAN_IO_ST BIT(31)
+#define B_BE_HCI_WLAN_IO_EN BIT(28)
+#define B_BE_HAXIDMA_IO_ST BIT(27)
+#define B_BE_HAXIDMA_BACKUP_RESTORE_ST BIT(26)
+#define B_BE_HAXIDMA_IO_EN BIT(24)
+#define B_BE_EN_PCIE_WAKE BIT(23)
+#define B_BE_SDIO_PAD_H3L1 BIT(22)
+#define B_BE_USBMAC_ANACLK_SW BIT(21)
+#define B_BE_PCIE_CPHY_CCK_XTAL_SEL BIT(20)
+#define B_BE_SDIO_DATA_PAD_SMT BIT(19)
+#define B_BE_SDIO_PAD_E5 BIT(18)
+#define B_BE_FORCE_PCIE_AUXCLK BIT(17)
+#define B_BE_HCI_LA_ADDR_MAP BIT(16)
+#define B_BE_HCI_LA_GLO_RST BIT(15)
+#define B_BE_USB3_SUS_DIS BIT(14)
+#define B_BE_NOPWR_CTRL_SEL BIT(13)
+#define B_BE_USB_HOST_PWR_OFF_EN BIT(12)
+#define B_BE_SYM_LPS_BLOCK_EN BIT(11)
+#define B_BE_USB_LPM_ACT_EN BIT(10)
+#define B_BE_USB_LPM_NY BIT(9)
+#define B_BE_USB2_SUS_DIS BIT(8)
+#define B_BE_SDIO_PAD_E_MASK GENMASK(7, 5)
+#define B_BE_USB_LPPLL_EN BIT(4)
+#define B_BE_USB1_1_USB2_0_DECISION BIT(3)
+#define B_BE_ROP_SW15 BIT(2)
+#define B_BE_PCI_CKRDY_OPT BIT(1)
+#define B_BE_PCI_VAUX_EN BIT(0)
+
+#define R_BE_SYS_ISO_CTRL_EXTEND 0x0080
+#define B_BE_R_SYM_ISO_DMEM62PP BIT(29)
+#define B_BE_R_SYM_ISO_DMEM52PP BIT(28)
+#define B_BE_R_SYM_ISO_DMEM42PP BIT(27)
+#define B_BE_R_SYM_ISO_DMEM32PP BIT(26)
+#define B_BE_R_SYM_ISO_DMEM22PP BIT(25)
+#define B_BE_R_SYM_ISO_DMEM12PP BIT(24)
+#define B_BE_R_SYM_ISO_IMEM42PP BIT(22)
+#define B_BE_R_SYM_ISO_IMEM32PP BIT(21)
+#define B_BE_R_SYM_ISO_IMEM22PP BIT(20)
+#define B_BE_R_SYM_ISO_IMEM12PP BIT(19)
+#define B_BE_R_SYM_ISO_IMEM02PP BIT(18)
+#define B_BE_R_SYM_ISO_AON_OFF2PP BIT(15)
+#define B_BE_R_SYM_PWC_HCILA BIT(13)
+#define B_BE_R_SYM_PWC_PD12V BIT(12)
+#define B_BE_R_SYM_PWC_UD12V BIT(11)
+#define B_BE_R_SYM_PWC_BTBRG BIT(10)
+#define B_BE_R_SYM_LDOBTSDIO_EN BIT(9)
+#define B_BE_R_SYM_LDOSPDIO_EN BIT(8)
+#define B_BE_R_SYM_ISO_HCILA BIT(4)
+#define B_BE_R_SYM_ISO_BTBRG2PP BIT(2)
+#define B_BE_R_SYM_ISO_BTSDIO2PP BIT(1)
+#define B_BE_R_SYM_ISO_SPDIO2PP BIT(0)
+
+#define R_BE_FEN_RST_ENABLE 0x0084
+#define B_BE_R_SYM_FEN_WLMACOFF BIT(31)
+#define B_BE_R_SYM_ISO_WA12PP BIT(28)
+#define B_BE_R_SYM_ISO_CMAC12PP BIT(25)
+#define B_BE_R_SYM_ISO_CMAC02PP BIT(24)
+#define B_BE_R_SYM_ISO_ADDA_P32PP BIT(23)
+#define B_BE_R_SYM_ISO_ADDA_P22PP BIT(22)
+#define B_BE_R_SYM_ISO_ADDA_P12PP BIT(21)
+#define B_BE_R_SYM_ISO_ADDA_P02PP BIT(20)
+#define B_BE_CMAC1_FEN BIT(17)
+#define B_BE_CMAC0_FEN BIT(16)
+#define B_BE_SYM_ISO_BBPON12PP BIT(13)
+#define B_BE_SYM_ISO_BB12PP BIT(12)
+#define B_BE_BOOT_RDY1 BIT(10)
+#define B_BE_FEN_BB1_IP_RSTN BIT(9)
+#define B_BE_FEN_BB1PLAT_RSTB BIT(8)
+#define B_BE_SYM_ISO_BBPON02PP BIT(5)
+#define B_BE_SYM_ISO_BB02PP BIT(4)
+#define B_BE_BOOT_RDY0 BIT(2)
+#define B_BE_FEN_BB_IP_RSTN BIT(1)
+#define B_BE_FEN_BBPLAT_RSTB BIT(0)
+
#define R_BE_PLATFORM_ENABLE 0x0088
#define B_BE_HOLD_AFTER_RESET BIT(11)
#define B_BE_SYM_WLPLT_MEM_MUX_EN BIT(10)
@@ -3669,6 +3959,92 @@
#define B_BE_WCPU_EN BIT(1)
#define B_BE_PLATFORM_EN BIT(0)
+#define R_BE_WLLPS_CTRL 0x0090
+#define B_BE_LPSOP_BBMEMDS BIT(30)
+#define B_BE_LPSOP_BBOFF BIT(29)
+#define B_BE_LPSOP_MACOFF BIT(28)
+#define B_BE_LPSOP_OFF_CAPC_EN BIT(27)
+#define B_BE_LPSOP_MEM_DS BIT(26)
+#define B_BE_LPSOP_XTALM_LPS BIT(23)
+#define B_BE_LPSOP_XTAL BIT(22)
+#define B_BE_LPSOP_ACLK_DIV_2 BIT(21)
+#define B_BE_LPSOP_ACLK_SEL BIT(20)
+#define B_BE_LPSOP_ASWRM BIT(17)
+#define B_BE_LPSOP_ASWR BIT(16)
+#define B_BE_LPSOP_DSWR_ADJ_MASK GENMASK(15, 12)
+#define B_BE_LPSOP_DSWRSD BIT(10)
+#define B_BE_LPSOP_DSWRM BIT(9)
+#define B_BE_LPSOP_DSWR BIT(8)
+#define B_BE_LPSOP_OLD_ADJ_MASK GENMASK(7, 4)
+#define B_BE_FORCE_LEAVE_LPS BIT(3)
+#define B_BE_LPSOP_OLDSD BIT(2)
+#define B_BE_DIS_WLBT_LPSEN_LOPC BIT(1)
+#define B_BE_WL_LPS_EN BIT(0)
+
+#define R_BE_WLRESUME_CTRL 0x0094
+#define B_BE_LPSROP_DMEM5_RSU_EN BIT(31)
+#define B_BE_LPSROP_DMEM4_RSU_EN BIT(30)
+#define B_BE_LPSROP_DMEM3_RSU_EN BIT(29)
+#define B_BE_LPSROP_DMEM2_RSU_EN BIT(28)
+#define B_BE_LPSROP_DMEM1_RSU_EN BIT(27)
+#define B_BE_LPSROP_DMEM0_RSU_EN BIT(26)
+#define B_BE_LPSROP_IMEM5_RSU_EN BIT(25)
+#define B_BE_LPSROP_IMEM4_RSU_EN BIT(24)
+#define B_BE_LPSROP_IMEM3_RSU_EN BIT(23)
+#define B_BE_LPSROP_IMEM2_RSU_EN BIT(22)
+#define B_BE_LPSROP_IMEM1_RSU_EN BIT(21)
+#define B_BE_LPSROP_IMEM0_RSU_EN BIT(20)
+#define B_BE_LPSROP_BB1_W_BB0 BIT(14)
+#define B_BE_LPSROP_CMAC1 BIT(13)
+#define B_BE_LPSROP_CMAC0 BIT(12)
+#define B_BE_LPSROP_XTALM BIT(11)
+#define B_BE_LPSROP_PLLM BIT(10)
+#define B_BE_LPSROP_HIOE BIT(9)
+#define B_BE_LPSROP_CPU BIT(8)
+#define B_BE_LPSROP_LOWPWRPLL BIT(7)
+#define B_BE_LPSROP_DSWRSD_SEL_MASK GENMASK(5, 4)
+
+#define R_BE_EFUSE_CTRL_2_V1 0x00A4
+#define B_BE_EF_ENT BIT(31)
+#define B_BE_EF_TCOLUMN_EN BIT(29)
+#define B_BE_BT_OTP_PWC_DIS BIT(28)
+#define B_BE_EF_RDT BIT(27)
+#define B_BE_R_SYM_AUTOLOAD_WITH_PMC_SEL BIT(24)
+#define B_BE_EF_PGTS_MASK GENMASK(23, 20)
+#define B_BE_EF_BURST BIT(19)
+#define B_BE_EF_TEST_SEL_MASK GENMASK(18, 16)
+#define B_BE_EF_TROW_EN BIT(15)
+#define B_BE_EF_ERR_FLAG BIT(14)
+#define B_BE_EF_FBURST_DIS BIT(13)
+#define B_BE_EF_HT_SEL BIT(12)
+#define B_BE_EF_DSB_EN BIT(11)
+#define B_BE_EF_DLY_SEL_MASK GENMASK(3, 0)
+
+#define R_BE_PMC_DBG_CTRL2 0x00CC
+#define B_BE_EFUSE_BURN_GNT_MASK GENMASK(31, 24)
+#define B_BE_DIS_IOWRAP_TIMEOUT BIT(16)
+#define B_BE_STOP_WL_PMC BIT(9)
+#define B_BE_STOP_SYM_PMC BIT(8)
+#define B_BE_SYM_REG_PCIE_WRMSK BIT(7)
+#define B_BE_BT_ACCESS_WL_PAGE0 BIT(6)
+#define B_BE_R_BE_RST_WLPMC BIT(5)
+#define B_BE_R_BE_RST_PD12N BIT(4)
+#define B_BE_SYSON_DIS_WLR_BE_WRMSK BIT(3)
+#define B_BE_SYSON_DIS_PMCR_BE_WRMSK BIT(2)
+#define B_BE_SYSON_R_BE_ARB_MASK GENMASK(1, 0)
+
+#define R_BE_PCIE_MIO_INTF 0x00E4
+#define B_BE_AON_MIO_EPHY_1K_SEL_MASK GENMASK(29, 24)
+#define B_BE_PCIE_MIO_ADDR_PAGE_V1_MASK GENMASK(20, 16)
+#define B_BE_PCIE_MIO_ASIF BIT(15)
+#define B_BE_PCIE_MIO_BYIOREG BIT(13)
+#define B_BE_PCIE_MIO_RE BIT(12)
+#define B_BE_PCIE_MIO_WE_MASK GENMASK(11, 8)
+#define B_BE_PCIE_MIO_ADDR_MASK GENMASK(7, 0)
+
+#define R_BE_PCIE_MIO_INTD 0x00E8
+#define B_BE_PCIE_MIO_DATA_MASK GENMASK(31, 0)
+
#define R_BE_HALT_H2C_CTRL 0x0160
#define B_BE_HALT_H2C_TRIGGER BIT(0)
@@ -3693,6 +4069,82 @@
#define R_BE_SECURE_BOOT_MALLOC_INFO 0x0184
+#define R_BE_FWS1IMR 0x0198
+#define B_BE_FS_RPWM_INT_EN_V1 BIT(24)
+#define B_BE_PCIE_HOTRST_EN BIT(22)
+#define B_BE_PCIE_SER_TIMEOUT_INDIC_EN BIT(21)
+#define B_BE_PCIE_RXI300_SLVTOUT_INDIC_EN BIT(20)
+#define B_BE_AON_PCIE_FLR_INT_EN BIT(19)
+#define B_BE_PCIE_ERR_INDIC_INT_EN BIT(18)
+#define B_BE_SDIO_ERR_INDIC_INT_EN BIT(17)
+#define B_BE_USB_ERR_INDIC_INT_EN BIT(16)
+#define B_BE_FS_GPIO27_INT_EN BIT(11)
+#define B_BE_FS_GPIO26_INT_EN BIT(10)
+#define B_BE_FS_GPIO25_INT_EN BIT(9)
+#define B_BE_FS_GPIO24_INT_EN BIT(8)
+#define B_BE_FS_GPIO23_INT_EN BIT(7)
+#define B_BE_FS_GPIO22_INT_EN BIT(6)
+#define B_BE_FS_GPIO21_INT_EN BIT(5)
+#define B_BE_FS_GPIO20_INT_EN BIT(4)
+#define B_BE_FS_GPIO19_INT_EN BIT(3)
+#define B_BE_FS_GPIO18_INT_EN BIT(2)
+#define B_BE_FS_GPIO17_INT_EN BIT(1)
+#define B_BE_FS_GPIO16_INT_EN BIT(0)
+
+#define R_BE_HIMR0 0x01A0
+#define B_BE_WDT_DATACPU_TIMEOUT_INT_EN BIT(25)
+#define B_BE_HALT_D2H_INT_EN BIT(24)
+#define B_BE_WDT_TIMEOUT_INT_EN BIT(22)
+#define B_BE_HALT_C2H_INT_EN BIT(21)
+#define B_BE_RON_INT_EN BIT(20)
+#define B_BE_PDNINT_EN BIT(19)
+#define B_BE_SPSANA_OCP_INT_EN BIT(18)
+#define B_BE_SPS_OCP_INT_EN BIT(17)
+#define B_BE_BTON_STS_UPDATE_INT_EN BIT(16)
+#define B_BE_GPIOF_INT_EN BIT(15)
+#define B_BE_GPIOE_INT_EN BIT(14)
+#define B_BE_GPIOD_INT_EN BIT(13)
+#define B_BE_GPIOC_INT_EN BIT(12)
+#define B_BE_GPIOB_INT_EN BIT(11)
+#define B_BE_GPIOA_INT_EN BIT(10)
+#define B_BE_GPIO9_INT_EN BIT(9)
+#define B_BE_GPIO8_INT_EN BIT(8)
+#define B_BE_GPIO7_INT_EN BIT(7)
+#define B_BE_GPIO6_INT_EN BIT(6)
+#define B_BE_GPIO5_INT_EN BIT(5)
+#define B_BE_GPIO4_INT_EN BIT(4)
+#define B_BE_GPIO3_INT_EN BIT(3)
+#define B_BE_GPIO2_INT_EN BIT(2)
+#define B_BE_GPIO1_INT_EN BIT(1)
+#define B_BE_GPIO0_INT_EN BIT(0)
+
+#define R_BE_HISR0 0x01A4
+#define B_BE_WDT_DATACPU_TIMEOUT_INT BIT(25)
+#define B_BE_HALT_D2H_INT BIT(24)
+#define B_BE_WDT_TIMEOUT_INT BIT(22)
+#define B_BE_HALT_C2H_INT BIT(21)
+#define B_BE_RON_INT BIT(20)
+#define B_BE_PDNINT BIT(19)
+#define B_BE_SPSANA_OCP_INT BIT(18)
+#define B_BE_SPS_OCP_INT BIT(17)
+#define B_BE_BTON_STS_UPDATE_INT BIT(16)
+#define B_BE_GPIOF_INT BIT(15)
+#define B_BE_GPIOE_INT BIT(14)
+#define B_BE_GPIOD_INT BIT(13)
+#define B_BE_GPIOC_INT BIT(12)
+#define B_BE_GPIOB_INT BIT(11)
+#define B_BE_GPIOA_INT BIT(10)
+#define B_BE_GPIO9_INT BIT(9)
+#define B_BE_GPIO8_INT BIT(8)
+#define B_BE_GPIO7_INT BIT(7)
+#define B_BE_GPIO6_INT BIT(6)
+#define B_BE_GPIO5_INT BIT(5)
+#define B_BE_GPIO4_INT BIT(4)
+#define B_BE_GPIO3_INT BIT(3)
+#define B_BE_GPIO2_INT BIT(2)
+#define B_BE_GPIO1_INT BIT(1)
+#define B_BE_GPIO0_INT BIT(0)
+
#define R_BE_WCPU_FW_CTRL 0x01E0
#define B_BE_RUN_ENV_MASK GENMASK(31, 30)
#define B_BE_WCPU_FWDL_STATUS_MASK GENMASK(29, 26)
@@ -3738,6 +4190,78 @@
#define R_BE_UDM2 0x01F8
#define B_BE_UDM2_EPC_RA_MASK GENMASK(31, 0)
+#define R_BE_AFE_ON_CTRL0 0x0240
+#define B_BE_REG_LPF_R3_3_0_MASK GENMASK(31, 29)
+#define B_BE_REG_LPF_R2_MASK GENMASK(28, 24)
+#define B_BE_REG_LPF_C3_MASK GENMASK(23, 21)
+#define B_BE_REG_LPF_C2_MASK GENMASK(20, 18)
+#define B_BE_REG_LPF_C1_MASK GENMASK(17, 15)
+#define B_BE_REG_CP_ICPX2 BIT(14)
+#define B_BE_REG_CP_ICP_SEL_FAST_MASK GENMASK(13, 10)
+#define B_BE_REG_CP_ICP_SEL_MASK GENMASK(9, 6)
+#define B_BE_REG_IB_PI_MASK GENMASK(5, 4)
+#define B_BE_REG_CK_DEBUG_BT BIT(3)
+#define B_BE_EN_PC_LDO BIT(2)
+#define B_BE_LDO_VSEL_MASK GENMASK(1, 0)
+
+#define R_BE_AFE_ON_CTRL1 0x0244
+#define B_BE_REG_CK_MON_SEL_MASK GENMASK(31, 29)
+#define B_BE_REG_CK_MON_CK960M_EN BIT(28)
+#define B_BE_REG_XTAL_FREQ_SEL BIT(27)
+#define B_BE_REG_XTAL_EDGE_SEL BIT(26)
+#define B_BE_REG_VCO_KVCO BIT(25)
+#define B_BE_REG_SDM_EDGE_SEL BIT(24)
+#define B_BE_REG_SDM_CK_SEL BIT(23)
+#define B_BE_REG_SDM_CK_GATED BIT(22)
+#define B_BE_REG_PFD_RESET_GATED BIT(21)
+#define B_BE_REG_LPF_R3_FAST_MASK GENMASK(20, 16)
+#define B_BE_REG_LPF_R2_FAST_MASK GENMASK(15, 11)
+#define B_BE_REG_LPF_C3_FAST_MASK GENMASK(10, 8)
+#define B_BE_REG_LPF_C2_FAST_MASK GENMASK(7, 5)
+#define B_BE_REG_LPF_C1_FAST_MASK GENMASK(4, 2)
+#define B_BE_REG_LPF_R3_4_MASK GENMASK(1, 0)
+
+#define R_BE_AFE_ON_CTRL3 0x024C
+#define B_BE_LDO_VSEL_DA_1_MASK GENMASK(31, 30)
+#define B_BE_LDO_VSEL_DA_0_MASK GENMASK(29, 28)
+#define B_BE_LDO_VSEL_D2S_1_MASK GENMASK(27, 26)
+#define B_BE_LDO_VSEL_D2S_0_MASK GENMASK(25, 24)
+#define B_BE_LDO_VSEL_BUF_MASK GENMASK(23, 22)
+#define B_BE_REG_R2_L_MASK GENMASK(21, 19)
+#define B_BE_REG_R1_L_MASK GENMASK(18, 16)
+#define B_BE_REG_CK_DEBUG_BT_MON BIT(15)
+#define B_BE_REG_BT_CLK_BUF_POWER BIT(14)
+#define B_BE_REG_BG_OUT_BTADC_V1 BIT(13)
+#define B_BE_REG_SEL_V18 BIT(11)
+#define B_BE_REG_FRAC_EN BIT(10)
+#define B_BE_REG_CK1920M_EN BIT(9)
+#define B_BE_REG_CK1280M_EN BIT(8)
+#define B_BE_REG_12LDO_SEL_MASK GENMASK(7, 6)
+#define B_BE_REG_09LDO_SEL_MASK GENMASK(5, 4)
+#define B_BE_REG_VC_TH BIT(3)
+#define B_BE_REG_VC_TL BIT(2)
+#define B_BE_REG_CK40M_EN BIT(1)
+#define B_BE_REG_CK640M_EN BIT(0)
+
+#define R_BE_WLAN_XTAL_SI_CTRL 0x0270
+#define B_BE_WL_XTAL_SI_CMD_POLL BIT(31)
+#define B_BE_WL_XTAL_SI_CHIPID_MASK GENMASK(30, 28)
+#define B_BE_WL_XTAL_SI_MODE_MASK GENMASK(25, 24)
+#define B_BE_WL_XTAL_SI_BITMASK_MASK GENMASK(23, 16)
+#define B_BE_WL_XTAL_SI_DATA_MASK GENMASK(15, 8)
+#define B_BE_WL_XTAL_SI_ADDR_MASK GENMASK(7, 0)
+
+#define R_BE_IC_PWR_STATE 0x03F0
+#define B_BE_WHOLE_SYS_PWR_STE_MASK GENMASK(25, 16)
+#define MAC_AX_SYS_ACT 0x220
+#define B_BE_WLMAC_PWR_STE_MASK GENMASK(9, 8)
+#define B_BE_UART_HCISYS_PWR_STE_MASK GENMASK(7, 6)
+#define B_BE_SDIO_HCISYS_PWR_STE_MASK GENMASK(5, 4)
+#define B_BE_USB_HCISYS_PWR_STE_MASK GENMASK(3, 2)
+#define B_BE_PCIE_HCISYS_PWR_STE_MASK GENMASK(1, 0)
+
+#define R_BE_WLCPU_PORT_PC 0x03FC
+
#define R_BE_DCPU_PLATFORM_ENABLE 0x0888
#define B_BE_DCPU_SYM_DPLT_MEM_MUX_EN BIT(10)
#define B_BE_DCPU_WARM_EN BIT(9)
@@ -3747,8 +4271,947 @@
#define B_BE_DCPU_EN BIT(1)
#define B_BE_DCPU_PLATFORM_EN BIT(0)
+#define R_BE_PL_AXIDMA_IDCT_MSK 0x0910
+#define B_BE_PL_AXIDMA_RRESP_ERR_MASK BIT(6)
+#define B_BE_PL_AXIDMA_BRESP_ERR_MASK BIT(5)
+#define B_BE_PL_AXIDMA_FC_ERR_MASK BIT(4)
+#define B_BE_PL_AXIDMA_TXBD_LEN0_MASK BIT(3)
+#define B_BE_PL_AXIDMA_TXBD_4KBOUD_LENERR_MASK BIT(2)
+#define B_BE_PL_AXIDMA_TXBD_RX_STUCK_MASK BIT(1)
+#define B_BE_PL_AXIDMA_TXBD_TX_STUCK_MASK BIT(0)
+#define B_BE_PL_AXIDMA_IDCT_MSK_CLR (B_BE_PL_AXIDMA_TXBD_TX_STUCK_MASK | \
+ B_BE_PL_AXIDMA_TXBD_RX_STUCK_MASK | \
+ B_BE_PL_AXIDMA_TXBD_LEN0_MASK | \
+ B_BE_PL_AXIDMA_FC_ERR_MASK | \
+ B_BE_PL_AXIDMA_BRESP_ERR_MASK | \
+ B_BE_PL_AXIDMA_RRESP_ERR_MASK)
+#define B_BE_PL_AXIDMA_IDCT_MSK_SET (B_BE_PL_AXIDMA_TXBD_TX_STUCK_MASK | \
+ B_BE_PL_AXIDMA_TXBD_RX_STUCK_MASK | \
+ B_BE_PL_AXIDMA_TXBD_LEN0_MASK | \
+ B_BE_PL_AXIDMA_FC_ERR_MASK)
+
+#define R_BE_PL_AXIDMA_IDCT 0x0914
+#define B_BE_PL_AXIDMA_RRESP_ERR BIT(6)
+#define B_BE_PL_AXIDMA_BRESP_ERR BIT(5)
+#define B_BE_PL_AXIDMA_FC_ERR BIT(4)
+#define B_BE_PL_AXIDMA_TXBD_LEN0 BIT(3)
+#define B_BE_PL_AXIDMA_TXBD_4KBOUD_LENERR BIT(2)
+#define B_BE_PL_AXIDMA_TXBD_RX_STUCK BIT(1)
+#define B_BE_PL_AXIDMA_TXBD_TX_STUCK BIT(0)
+
#define R_BE_FILTER_MODEL_ADDR 0x0C04
+#define R_BE_WLAN_WDT 0x3050
+#define B_BE_WLAN_WDT_TIMEOUT BIT(31)
+#define B_BE_WLAN_WDT_TIMER_CLEAR BIT(4)
+#define B_BE_WLAN_WDT_BYPASS BIT(1)
+#define B_BE_WLAN_WDT_ENABLE BIT(0)
+
+#define R_BE_AXIDMA_WDT 0x305C
+#define B_BE_AXIDMA_WDT_TIMEOUT BIT(31)
+#define B_BE_AXIDMA_WDT_TIMER_CLEAR BIT(4)
+#define B_BE_AXIDMA_WDT_BYPASS BIT(1)
+#define B_BE_AXIDMA_WDT_ENABLE BIT(0)
+
+#define R_BE_AON_WDT 0x3068
+#define B_BE_AON_WDT_TIMEOUT BIT(31)
+#define B_BE_AON_WDT_TIMER_CLEAR BIT(4)
+#define B_BE_AON_WDT_BYPASS BIT(1)
+#define B_BE_AON_WDT_ENABLE BIT(0)
+
+#define R_BE_AON_WDT_TMR 0x306C
+#define R_BE_MDIO_WDT_TMR 0x3090
+#define R_BE_LA_MODE_WDT_TMR 0x309C
+#define R_BE_WDT_AR_TMR 0x3144
+#define R_BE_WDT_AW_TMR 0x3150
+#define R_BE_WLAN_WDT_TMR 0x3054
+#define R_BE_WDT_W_TMR 0x315C
+#define R_BE_AXIDMA_WDT_TMR 0x3060
+#define R_BE_WDT_B_TMR 0x3164
+#define R_BE_WDT_R_TMR 0x316C
+#define R_BE_LOCAL_WDT_TMR 0x3084
+
+#define R_BE_LOCAL_WDT 0x3080
+#define B_BE_LOCAL_WDT_TIMEOUT BIT(31)
+#define B_BE_LOCAL_WDT_TIMER_CLEAR BIT(4)
+#define B_BE_LOCAL_WDT_BYPASS BIT(1)
+#define B_BE_LOCAL_WDT_ENABLE BIT(0)
+
+#define R_BE_MDIO_WDT 0x308C
+#define B_BE_MDIO_WDT_TIMEOUT BIT(31)
+#define B_BE_MDIO_WDT_TIMER_CLEAR BIT(4)
+#define B_BE_MDIO_WDT_BYPASS BIT(1)
+#define B_BE_MDIO_WDT_ENABLE BIT(0)
+
+#define R_BE_LA_MODE_WDT 0x3098
+#define B_BE_LA_MODE_WDT_TIMEOUT BIT(31)
+#define B_BE_LA_MODE_WDT_TIMER_CLEAR BIT(4)
+#define B_BE_LA_MODE_WDT_BYPASS BIT(1)
+#define B_BE_LA_MODE_WDT_ENABLE BIT(0)
+
+#define R_BE_WDT_AR 0x3140
+#define B_BE_WDT_AR_TIMEOUT BIT(31)
+#define B_BE_WDT_AR_TIMER_CLEAR BIT(4)
+#define B_BE_WDT_AR_BYPASS BIT(1)
+#define B_BE_WDT_AR_ENABLE BIT(0)
+
+#define R_BE_WDT_AW 0x314C
+#define B_BE_WDT_AW_TIMEOUT BIT(31)
+#define B_BE_WDT_AW_TIMER_CLEAR BIT(4)
+#define B_BE_WDT_AW_BYPASS BIT(1)
+#define B_BE_WDT_AW_ENABLE BIT(0)
+
+#define R_BE_WDT_W 0x3158
+#define B_BE_WDT_W_TIMEOUT BIT(31)
+#define B_BE_WDT_W_TIMER_CLEAR BIT(4)
+#define B_BE_WDT_W_BYPASS BIT(1)
+#define B_BE_WDT_W_ENABLE BIT(0)
+
+#define R_BE_WDT_B 0x3160
+#define B_BE_WDT_B_TIMEOUT BIT(31)
+#define B_BE_WDT_B_TIMER_CLEAR BIT(4)
+#define B_BE_WDT_B_BYPASS BIT(1)
+#define B_BE_WDT_B_ENABLE BIT(0)
+
+#define R_BE_WDT_R 0x3168
+#define B_BE_WDT_R_TIMEOUT BIT(31)
+#define B_BE_WDT_R_TIMER_CLEAR BIT(4)
+#define B_BE_WDT_R_BYPASS BIT(1)
+#define B_BE_WDT_R_ENABLE BIT(0)
+
+#define R_BE_LTR_DECISION_CTRL_V1 0x3610
+#define B_BE_ENABLE_LTR_CTL_DECISION BIT(31)
+#define B_BE_LAT_LTR_IDX_DRV_VLD_V1 BIT(24)
+#define B_BE_LAT_LTR_IDX_DRV_V1_MASK GENMASK(23, 22)
+#define B_BE_LAT_LTR_IDX_FW_VLD_V1 BIT(21)
+#define B_BE_LAT_LTR_IDX_FW_V1_MASK GENMASK(20, 19)
+#define B_BE_LAT_LTR_IDX_HW_VLD_V1 BIT(18)
+#define B_BE_LAT_LTR_IDX_HW_V1_MASK GENMASK(17, 16)
+#define B_BE_LTR_IDX_DRV_V1_MASK GENMASK(15, 14)
+#define B_BE_LTR_REQ_DRV_V1 BIT(13)
+#define B_BE_LTR_IDX_DISABLE_V1_MASK GENMASK(9, 8)
+#define B_BE_LTR_EN_PORT_V1_MASK GENMASK(6, 4)
+#define B_BE_LTR_DRV_DEC_EN_V1 BIT(6)
+#define B_BE_LTR_FW_DEC_EN_V1 BIT(5)
+#define B_BE_LTR_HW_DEC_EN_V1 BIT(4)
+#define B_BE_LTR_SPACE_IDX_MASK GENMASK(1, 0)
+
+#define R_BE_LTR_LATENCY_IDX0_V1 0x3614
+#define R_BE_LTR_LATENCY_IDX1_V1 0x3618
+#define R_BE_LTR_LATENCY_IDX2_V1 0x361C
+#define R_BE_LTR_LATENCY_IDX3_V1 0x3620
+
+#define R_BE_HCI_FUNC_EN 0x7880
+#define B_BE_HCI_CR_PROTECT BIT(31)
+#define B_BE_HCI_TRXBUF_EN BIT(2)
+#define B_BE_HCI_RXDMA_EN BIT(1)
+#define B_BE_HCI_TXDMA_EN BIT(0)
+
+#define R_BE_DMAC_FUNC_EN 0x8400
+#define B_BE_DMAC_CRPRT BIT(31)
+#define B_BE_MAC_FUNC_EN BIT(30)
+#define B_BE_DMAC_FUNC_EN BIT(29)
+#define B_BE_MPDU_PROC_EN BIT(28)
+#define B_BE_WD_RLS_EN BIT(27)
+#define B_BE_DLE_WDE_EN BIT(26)
+#define B_BE_TXPKT_CTRL_EN BIT(25)
+#define B_BE_STA_SCH_EN BIT(24)
+#define B_BE_DLE_PLE_EN BIT(23)
+#define B_BE_PKT_BUF_EN BIT(22)
+#define B_BE_DMAC_TBL_EN BIT(21)
+#define B_BE_PKT_IN_EN BIT(20)
+#define B_BE_DLE_CPUIO_EN BIT(19)
+#define B_BE_DISPATCHER_EN BIT(18)
+#define B_BE_BBRPT_EN BIT(17)
+#define B_BE_MAC_SEC_EN BIT(16)
+#define B_BE_DMACREG_GCKEN BIT(15)
+#define B_BE_H_AXIDMA_EN BIT(14)
+#define B_BE_DMAC_MLO_EN BIT(11)
+#define B_BE_PLRLS_EN BIT(10)
+#define B_BE_P_AXIDMA_EN BIT(9)
+#define B_BE_DLE_DATACPUIO_EN BIT(8)
+#define B_BE_LTR_CTL_EN BIT(7)
+
+#define R_BE_DMAC_CLK_EN 0x8404
+#define B_BE_MAC_CKEN BIT(30)
+#define B_BE_DMAC_CKEN BIT(29)
+#define B_BE_MPDU_CKEN BIT(28)
+#define B_BE_WD_RLS_CLK_EN BIT(27)
+#define B_BE_DLE_WDE_CLK_EN BIT(26)
+#define B_BE_TXPKT_CTRL_CLK_EN BIT(25)
+#define B_BE_STA_SCH_CLK_EN BIT(24)
+#define B_BE_DLE_PLE_CLK_EN BIT(23)
+#define B_BE_PKTBUF_CKEN BIT(22)
+#define B_BE_DMAC_TABLE_CLK_EN BIT(21)
+#define B_BE_PKT_IN_CLK_EN BIT(20)
+#define B_BE_DLE_CPUIO_CLK_EN BIT(19)
+#define B_BE_DISPATCHER_CLK_EN BIT(18)
+#define B_BE_BBRPT_CLK_EN BIT(17)
+#define B_BE_MAC_SEC_CLK_EN BIT(16)
+#define B_BE_H_AXIDMA_CKEN BIT(14)
+#define B_BE_DMAC_MLO_CKEN BIT(11)
+#define B_BE_PLRLS_CKEN BIT(10)
+#define B_BE_P_AXIDMA_CKEN BIT(9)
+#define B_BE_DLE_DATACPUIO_CKEN BIT(8)
+
+#define R_BE_LTR_CTRL_0 0x8410
+#define B_BE_LTR_REQ_FW BIT(18)
+#define B_BE_LTR_IDX_FW_MASK GENMASK(17, 16)
+#define B_BE_LTR_IDLE_TIMER_IDX_MASK GENMASK(10, 8)
+#define B_BE_LTR_WD_NOEMP_CHK BIT(1)
+#define B_BE_LTR_HW_EN BIT(0)
+
+#define R_BE_LTR_CFG_0 0x8414
+#define B_BE_LTR_IDX_DISABLE_MASK GENMASK(17, 16)
+#define B_BE_LTR_IDX_IDLE_MASK GENMASK(15, 14)
+#define B_BE_LTR_IDX_ACTIVE_MASK GENMASK(13, 12)
+#define B_BE_LTR_IDLE_TIMER_IDX_MASK GENMASK(10, 8)
+#define B_BE_EN_LTR_CMAC_RX_USE_PG_CHK BIT(3)
+#define B_BE_EN_LTR_WD_NON_EMPTY_CHK BIT(2)
+#define B_BE_EN_LTR_HAXIDMA_TX_IDLE_CHK BIT(1)
+#define B_BE_EN_LTR_HAXIDMA_RX_IDLE_CHK BIT(0)
+
+#define R_BE_LTR_CFG_1 0x8418
+#define B_BE_LTR_CMAC1_RX_USE_PG_TH_MASK GENMASK(27, 16)
+#define B_BE_LTR_CMAC0_RX_USE_PG_TH_MASK GENMASK(11, 0)
+
+#define R_BE_DMAC_TABLE_CTRL 0x8420
+#define B_BE_HWAMSDU_PADDING_MODE BIT(31)
+#define B_BE_MACID_MPDU_PROCESSOR_OFFSET_MASK GENMASK(26, 16)
+#define B_BE_DMAC_ADDR_MODE BIT(12)
+#define B_BE_DMAC_CTRL_INFO_SER_IO BIT(11)
+#define B_BE_DMAC_CTRL_INFO_OFFSET_MASK GENMASK(10, 0)
+
+#define R_BE_SER_DBG_INFO 0x8424
+#define B_BE_SER_L0_PROMOTE_L1_EVENT_MASK GENMASK(31, 28)
+#define B_BE_SER_L1_COUNTER_MASK GENMASK(27, 24)
+#define B_BE_RMAC_PPDU_HANG_CNT_MASK GENMASK(23, 16)
+#define B_BE_SER_L0_COUNTER_MASK GENMASK(8, 0)
+
+#define R_BE_DLE_EMPTY0 0x8430
+#define B_BE_PLE_EMPTY_QTA_DMAC_H2D BIT(27)
+#define B_BE_PLE_EMPTY_QTA_DMAC_CPUIO BIT(26)
+#define B_BE_PLE_EMPTY_QTA_DMAC_MPDU_TX BIT(25)
+#define B_BE_PLE_EMPTY_QTA_DMAC_WLAN_CPU BIT(24)
+#define B_BE_PLE_EMPTY_QTA_DMAC_H2C BIT(23)
+#define B_BE_PLE_EMPTY_QTA_DMAC_B1_TXPL BIT(22)
+#define B_BE_PLE_EMPTY_QTA_DMAC_B0_TXPL BIT(21)
+#define B_BE_WDE_EMPTY_QTA_DMAC_CPUIO BIT(20)
+#define B_BE_WDE_EMPTY_QTA_DMAC_PKTIN BIT(19)
+#define B_BE_WDE_EMPTY_QTA_DMAC_DATA_CPU BIT(18)
+#define B_BE_WDE_EMPTY_QTA_DMAC_WLAN_CPU BIT(17)
+#define B_BE_WDE_EMPTY_QTA_DMAC_HIF BIT(16)
+#define B_BE_WDE_EMPTY_QUE_CMAC_B1_HIQ BIT(15)
+#define B_BE_WDE_EMPTY_QUE_CMAC_B1_MBH BIT(14)
+#define B_BE_WDE_EMPTY_QUE_CMAC_B0_OTHERS BIT(13)
+#define B_BE_WDE_EMPTY_QUE_DMAC_MLO_ACQ BIT(12)
+#define B_BE_WDE_EMPTY_QUE_DMAC_MLO_MISC BIT(11)
+#define B_BE_WDE_EMPTY_QUE_DMAC_PKTIN BIT(10)
+#define B_BE_PLE_EMPTY_QUE_DMAC_SEC_TX BIT(9)
+#define B_BE_PLE_EMPTY_QUE_DMAC_MPDU_TX BIT(8)
+#define B_BE_WDE_EMPTY_QUE_OTHERS BIT(7)
+#define B_BE_WDE_EMPTY_QUE_CMAC_WMM3 BIT(6)
+#define B_BE_WDE_EMPTY_QUE_CMAC_WMM2 BIT(5)
+#define B_BE_WDE_EMPTY_QUE_CMAC0_WMM1 BIT(4)
+#define B_BE_WDE_EMPTY_QUE_CMAC0_WMM0 BIT(3)
+#define B_BE_WDE_EMPTY_QUE_CMAC1_MBH BIT(2)
+#define B_BE_WDE_EMPTY_QUE_CMAC0_MBH BIT(1)
+#define B_BE_WDE_EMPTY_QUE_CMAC0_ALL_AC BIT(0)
+
+#define R_BE_DLE_EMPTY1 0x8434
+#define B_BE_PLE_EMPTY_QTA_CMAC_DMA_TXRPT BIT(21)
+#define B_BE_PLE_EMPTY_QTA_DMAC_WDRLS BIT(20)
+#define B_BE_PLE_EMPTY_QTA_CMAC1_DMA_BBRPT BIT(19)
+#define B_BE_PLE_EMPTY_QTA_CMAC1_DMA_RX BIT(18)
+#define B_BE_PLE_EMPTY_QTA_CMAC0_DMA_RX BIT(17)
+#define B_BE_PLE_EMPTY_QTA_DMAC_C2H BIT(16)
+#define B_BE_PLE_EMPTY_QUE_DMAC_PLRLS BIT(5)
+#define B_BE_PLE_EMPTY_QUE_DMAC_CPUIO BIT(4)
+#define B_BE_PLE_EMPTY_QUE_DMAC_SEC_RX BIT(3)
+#define B_BE_PLE_EMPTY_QUE_DMAC_MPDU_RX BIT(2)
+#define B_BE_PLE_EMPTY_QUE_DMAC_HDP BIT(1)
+#define B_BE_WDE_EMPTY_QUE_DMAC_WDRLS BIT(0)
+
+#define R_BE_SER_L1_DBG_CNT_0 0x8440
+#define B_BE_SER_L1_WDRLS_CNT_MASK GENMASK(31, 24)
+#define B_BE_SER_L1_SEC_CNT_MASK GENMASK(23, 16)
+#define B_BE_SER_L1_MPDU_CNT_MASK GENMASK(15, 8)
+#define B_BE_SER_L1_STA_SCH_CNT_MASK GENMASK(7, 0)
+
+#define R_BE_SER_L1_DBG_CNT_1 0x8444
+#define B_BE_SER_L1_WDE_CNT_MASK GENMASK(31, 24)
+#define B_BE_SER_L1_TXPKTCTRL_CNT_MASK GENMASK(23, 16)
+#define B_BE_SER_L1_PLE_CNT_MASK GENMASK(15, 8)
+#define B_BE_SER_L1_PKTIN_CNT_MASK GENMASK(7, 0)
+
+#define R_BE_SER_L1_DBG_CNT_2 0x8448
+#define B_BE_SER_L1_DISP_CNT_MASK GENMASK(31, 24)
+#define B_BE_SER_L1_APB_BRIDGE_CNT_MASK GENMASK(23, 16)
+#define B_BE_SER_L1_DLE_W_CPUIO_CNT_MASK GENMASK(15, 8)
+#define B_BE_SER_L1_BBRPT_CNT_MASK GENMASK(7, 0)
+
+#define R_BE_SER_L1_DBG_CNT_3 0x844C
+#define B_BE_SER_L1_HCI_BUF_CNT_MASK GENMASK(31, 24)
+#define B_BE_SER_L1_P_AXIDMA_CNT_MASK GENMASK(23, 16)
+#define B_BE_SER_L1_H_AXIDMA_CNT_MASK GENMASK(15, 8)
+#define B_BE_SER_L1_MLO_ERR_CNT_MASK GENMASK(7, 0)
+
+#define R_BE_SER_L1_DBG_CNT_4 0x8450
+#define B_BE_SER_L1_PLDRLS_ERR_CNT_MASK GENMASK(31, 24)
+#define B_BE_SER_L1_DLE_D_CPUIO_CNT_MASK GENMASK(23, 16)
+
+#define R_BE_SER_L1_DBG_CNT_5 0x8454
+#define B_BE_SER_L1_DBG_0_MASK GENMASK(31, 0)
+
+#define R_BE_SER_L1_DBG_CNT_6 0x8458
+#define B_BE_SER_L1_DBG_1_MASK GENMASK(31, 0)
+
+#define R_BE_SER_L1_DBG_CNT_7 0x845C
+#define B_BE_SER_L1_DBG_2_MASK GENMASK(31, 0)
+
+#define R_BE_DMAC_ERR_IMR 0x8520
+#define B_BE_DMAC_NOTX_ERR_INT_EN BIT(21)
+#define B_BE_DMAC_NORX_ERR_INT_EN BIT(20)
+#define B_BE_DLE_DATACPUIO_ERR_INT_EN BIT(19)
+#define B_BE_PLRSL_ERR_INT_EN BIT(18)
+#define B_BE_MLO_ERR_INT_EN BIT(17)
+#define B_BE_DMAC_FW_ERR_INT_EN BIT(16)
+#define B_BE_H_AXIDMA_ERR_INT_EN BIT(14)
+#define B_BE_P_AXIDMA_ERR_INT_EN BIT(13)
+#define B_BE_HCI_BUF_ERR_INT_EN BIT(12)
+#define B_BE_BBRPT_ERR_INT_EN BIT(11)
+#define B_BE_DLE_CPUIO_ERR_INT_EN BIT(10)
+#define B_BE_APB_BRIDGE_ERR_INT_EN BIT(9)
+#define B_BE_DISPATCH_ERR_INT_EN BIT(8)
+#define B_BE_PKTIN_ERR_INT_EN BIT(7)
+#define B_BE_PLE_DLE_ERR_INT_EN BIT(6)
+#define B_BE_TXPKTCTRL_ERR_INT_EN BIT(5)
+#define B_BE_WDE_DLE_ERR_INT_EN BIT(4)
+#define B_BE_STA_SCHEDULER_ERR_INT_EN BIT(3)
+#define B_BE_MPDU_ERR_INT_EN BIT(2)
+#define B_BE_WSEC_ERR_INT_EN BIT(1)
+#define B_BE_WDRLS_ERR_INT_EN BIT(0)
+
+#define R_BE_DMAC_ERR_ISR 0x8524
+#define B_BE_DLE_DATACPUIO_ERR_INT BIT(19)
+#define B_BE_PLRLS_ERR_INT BIT(18)
+#define B_BE_MLO_ERR_INT BIT(17)
+#define B_BE_DMAC_FW_ERR_IDCT BIT(16)
+#define B_BE_H_AXIDMA_ERR_INT BIT(14)
+#define B_BE_P_AXIDMA_ERR_INT BIT(13)
+#define B_BE_HCI_BUF_ERR_FLAG BIT(12)
+#define B_BE_BBRPT_ERR_FLAG BIT(11)
+#define B_BE_DLE_CPUIO_ERR_FLAG BIT(10)
+#define B_BE_APB_BRIDGE_ERR_FLAG BIT(9)
+#define B_BE_DISPATCH_ERR_FLAG BIT(8)
+#define B_BE_PKTIN_ERR_FLAG BIT(7)
+#define B_BE_PLE_DLE_ERR_FLAG BIT(6)
+#define B_BE_TXPKTCTRL_ERR_FLAG BIT(5)
+#define B_BE_WDE_DLE_ERR_FLAG BIT(4)
+#define B_BE_STA_SCHEDULER_ERR_FLAG BIT(3)
+#define B_BE_MPDU_ERR_FLAG BIT(2)
+#define B_BE_WSEC_ERR_FLAG BIT(1)
+#define B_BE_WDRLS_ERR_FLAG BIT(0)
+
+#define R_BE_DISP_ERROR_ISR0 0x8804
+#define B_BE_REUSE_SIZE_ERR BIT(31)
+#define B_BE_REUSE_EN_ERR BIT(30)
+#define B_BE_STF_OQT_UNDERFLOW_ERR BIT(29)
+#define B_BE_STF_OQT_OVERFLOW_ERR BIT(28)
+#define B_BE_STF_WRFF_UNDERFLOW_ERR BIT(27)
+#define B_BE_STF_WRFF_OVERFLOW_ERR BIT(26)
+#define B_BE_STF_CMD_UNDERFLOW_ERR BIT(25)
+#define B_BE_STF_CMD_OVERFLOW_ERR BIT(24)
+#define B_BE_REUSE_SIZE_ZERO_ERR BIT(23)
+#define B_BE_REUSE_PKT_CNT_ERR BIT(22)
+#define B_BE_CDT_PTR_TIMEOUT_ERR BIT(21)
+#define B_BE_CDT_HCI_TIMEOUT_ERR BIT(20)
+#define B_BE_HDT_PTR_TIMEOUT_ERR BIT(19)
+#define B_BE_HDT_HCI_TIMEOUT_ERR BIT(18)
+#define B_BE_CDT_ADDR_INFO_LEN_ERR BIT(17)
+#define B_BE_HDT_ADDR_INFO_LEN_ERR BIT(16)
+#define B_BE_CDR_DMA_TIMEOUT_ERR BIT(15)
+#define B_BE_CDR_RX_TIMEOUT_ERR BIT(14)
+#define B_BE_PLE_OUTPUT_ERR BIT(12)
+#define B_BE_PLE_RESPOSE_ERR BIT(11)
+#define B_BE_PLE_BURST_NUM_ERR BIT(10)
+#define B_BE_PLE_NULL_PKT_ERR BIT(9)
+#define B_BE_PLE_FLOW_CTRL_ERR BIT(8)
+#define B_BE_HDR_DMA_TIMEOUT_ERR BIT(7)
+#define B_BE_HDR_RX_TIMEOUT_ERR BIT(6)
+#define B_BE_WDE_OUTPUT_ERR BIT(4)
+#define B_BE_WDE_RESPONSE_ERR BIT(3)
+#define B_BE_WDE_BURST_NUM_ERR BIT(2)
+#define B_BE_WDE_NULL_PKT_ERR BIT(1)
+#define B_BE_WDE_FLOW_CTRL_ERR BIT(0)
+
+#define R_BE_DISP_ERROR_ISR1 0x8808
+#define B_BE_HR_WRFF_UNDERFLOW_ERR BIT(31)
+#define B_BE_HR_WRFF_OVERFLOW_ERR BIT(30)
+#define B_BE_HR_CHKSUM_FSM_ERR BIT(29)
+#define B_BE_HR_SHIFT_DMA_CFG_ERR BIT(28)
+#define B_BE_HR_DMA_PROCESS_ERR BIT(27)
+#define B_BE_HR_TOTAL_LEN_UNDER_ERR BIT(26)
+#define B_BE_HR_SHIFT_EN_ERR BIT(25)
+#define B_BE_HR_AGG_CFG_ERR BIT(24)
+#define B_BE_HR_PLD_LEN_ZERO_ERR BIT(22)
+#define B_BE_HT_ILL_CH_ERR BIT(20)
+#define B_BE_HT_ADDR_INFO_LEN_ERR BIT(18)
+#define B_BE_HT_WD_LEN_OVER_ERR BIT(17)
+#define B_BE_HT_PLD_CMD_UNDERFLOW_ERR BIT(16)
+#define B_BE_HT_PLD_CMD_OVERFLOW_ERR BIT(15)
+#define B_BE_HT_WRFF_UNDERFLOW_ERR BIT(14)
+#define B_BE_HT_WRFF_OVERFLOW_ERR BIT(13)
+#define B_BE_HT_CHKSUM_FSM_ERR BIT(12)
+#define B_BE_HT_NON_IDLE_PKT_STR_ERR BIT(11)
+#define B_BE_HT_PRE_SUB_BE_ERR BIT(10)
+#define B_BE_HT_WD_CHKSUM_ERR BIT(9)
+#define B_BE_HT_CHANNEL_DMA_ERR BIT(8)
+#define B_BE_HT_OFFSET_UNMATCH_ERR BIT(7)
+#define B_BE_HT_PAYLOAD_UNDER_ERR BIT(6)
+#define B_BE_HT_PAYLOAD_OVER_ERR BIT(5)
+#define B_BE_HT_PERMU_FF_UNDERFLOW_ERR BIT(4)
+#define B_BE_HT_PERMU_FF_OVERFLOW_ERR BIT(3)
+#define B_BE_HT_PKT_FAIL_ERR BIT(2)
+#define B_BE_HT_CH_ID_ERR BIT(1)
+#define B_BE_HT_EP_CH_DIFF_ERR BIT(0)
+
+#define R_BE_DISP_ERROR_ISR2 0x880C
+#define B_BE_CR_PLD_LEN_ERR BIT(30)
+#define B_BE_CR_WRFF_UNDERFLOW_ERR BIT(29)
+#define B_BE_CR_WRFF_OVERFLOW_ERR BIT(28)
+#define B_BE_CR_SHIFT_DMA_CFG_ERR BIT(27)
+#define B_BE_CR_DMA_PROCESS_ERR BIT(26)
+#define B_BE_CR_SHIFT_EN_ERR BIT(24)
+#define B_BE_REUSE_FIFO_B_UNDER_ERR BIT(22)
+#define B_BE_REUSE_FIFO_B_OVER_ERR BIT(21)
+#define B_BE_REUSE_FIFO_A_UNDER_ERR BIT(20)
+#define B_BE_REUSE_FIFO_A_OVER_ERR BIT(19)
+#define B_BE_CT_ADDR_INFO_LEN_MISS_ERR BIT(17)
+#define B_BE_CT_WD_LEN_OVER_ERR BIT(16)
+#define B_BE_CT_F2P_SEQ_ERR BIT(15)
+#define B_BE_CT_F2P_QSEL_ERR BIT(14)
+#define B_BE_CT_PLD_CMD_UNDERFLOW_ERR BIT(13)
+#define B_BE_CT_PLD_CMD_OVERFLOW_ERR BIT(12)
+#define B_BE_CT_PRE_SUB_ERR BIT(11)
+#define B_BE_CT_WD_CHKSUM_ERR BIT(10)
+#define B_BE_CT_CHANNEL_DMA_ERR BIT(9)
+#define B_BE_CT_OFFSET_UNMATCH_ERR BIT(8)
+#define B_BE_F2P_TOTAL_NUM_ERR BIT(7)
+#define B_BE_CT_PAYLOAD_UNDER_ERR BIT(6)
+#define B_BE_CT_PAYLOAD_OVER_ERR BIT(5)
+#define B_BE_CT_PERMU_FF_UNDERFLOW_ERR BIT(4)
+#define B_BE_CT_PERMU_FF_OVERFLOW_ERR BIT(3)
+#define B_BE_CT_CH_ID_ERR BIT(2)
+#define B_BE_CT_EP_CH_DIFF_ERR BIT(0)
+
+#define R_BE_DISP_OTHER_IMR 0x8870
+#define B_BE_REUSE_SIZE_ERR_INT_EN BIT(31)
+#define B_BE_REUSE_EN_ERR_INT_EN BIT(30)
+#define B_BE_STF_OQT_UNDERFLOW_ERR_INT_EN BIT(29)
+#define B_BE_STF_OQT_OVERFLOW_ERR_INT_EN BIT(28)
+#define B_BE_STF_WRFF_UNDERFLOW_ERR_INT_EN BIT(27)
+#define B_BE_STF_WRFF_OVERFLOW_ERR_INT_EN BIT(26)
+#define B_BE_STF_CMD_UNDERFLOW_ERR_INT_EN BIT(25)
+#define B_BE_STF_CMD_OVERFLOW_ERR_INT_EN BIT(24)
+#define B_BE_REUSE_SIZE_ZERO_ERR_INT_EN BIT(23)
+#define B_BE_REUSE_PKT_CNT_ERR_INT_EN BIT(22)
+#define B_BE_CDT_PTR_TIMEOUT_ERR_INT_EN BIT(21)
+#define B_BE_CDT_HCI_TIMEOUT_ERR_INT_EN BIT(20)
+#define B_BE_HDT_PTR_TIMEOUT_ERR_INT_EN BIT(19)
+#define B_BE_HDT_HCI_TIMEOUT_ERR_INT_EN BIT(18)
+#define B_BE_CDT_ADDR_INFO_LEN_ERR_INT_EN BIT(17)
+#define B_BE_HDT_ADDR_INFO_LEN_ERR_INT_EN BIT(16)
+#define B_BE_CDR_DMA_TIMEOUT_ERR_INT_EN BIT(15)
+#define B_BE_CDR_RX_TIMEOUT_ERR_INT_EN BIT(14)
+#define B_BE_PLE_OUTPUT_ERR_INT_EN BIT(12)
+#define B_BE_PLE_RESPOSE_ERR_INT_EN BIT(11)
+#define B_BE_PLE_BURST_NUM_ERR_INT_EN BIT(10)
+#define B_BE_PLE_NULL_PKT_ERR_INT_EN BIT(9)
+#define B_BE_PLE_FLOW_CTRL_ERR_INT_EN BIT(8)
+#define B_BE_HDR_DMA_TIMEOUT_ERR_INT_EN BIT(7)
+#define B_BE_HDR_RX_TIMEOUT_ERR_INT_EN BIT(6)
+#define B_BE_WDE_OUTPUT_ERR_INT_EN BIT(4)
+#define B_BE_WDE_RESPONSE_ERR_INT_EN BIT(3)
+#define B_BE_WDE_BURST_NUM_ERR_INT_EN BIT(2)
+#define B_BE_WDE_NULL_PKT_ERR_INT_EN BIT(1)
+#define B_BE_WDE_FLOW_CTRL_ERR_INT_EN BIT(0)
+#define B_BE_DISP_OTHER_IMR_CLR (B_BE_WDE_FLOW_CTRL_ERR_INT_EN | \
+ B_BE_WDE_NULL_PKT_ERR_INT_EN | \
+ B_BE_WDE_BURST_NUM_ERR_INT_EN | \
+ B_BE_WDE_RESPONSE_ERR_INT_EN | \
+ B_BE_WDE_OUTPUT_ERR_INT_EN | \
+ B_BE_HDR_RX_TIMEOUT_ERR_INT_EN | \
+ B_BE_HDR_DMA_TIMEOUT_ERR_INT_EN | \
+ B_BE_PLE_FLOW_CTRL_ERR_INT_EN | \
+ B_BE_PLE_NULL_PKT_ERR_INT_EN | \
+ B_BE_PLE_BURST_NUM_ERR_INT_EN | \
+ B_BE_PLE_RESPOSE_ERR_INT_EN | \
+ B_BE_PLE_OUTPUT_ERR_INT_EN | \
+ B_BE_CDR_RX_TIMEOUT_ERR_INT_EN | \
+ B_BE_CDR_DMA_TIMEOUT_ERR_INT_EN | \
+ B_BE_HDT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_BE_CDT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_BE_HDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_BE_HDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_BE_CDT_HCI_TIMEOUT_ERR_INT_EN | \
+ B_BE_CDT_PTR_TIMEOUT_ERR_INT_EN | \
+ B_BE_REUSE_PKT_CNT_ERR_INT_EN | \
+ B_BE_REUSE_SIZE_ZERO_ERR_INT_EN | \
+ B_BE_STF_CMD_OVERFLOW_ERR_INT_EN | \
+ B_BE_STF_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_BE_STF_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_BE_STF_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_STF_OQT_OVERFLOW_ERR_INT_EN | \
+ B_BE_STF_OQT_UNDERFLOW_ERR_INT_EN | \
+ B_BE_REUSE_EN_ERR_INT_EN | \
+ B_BE_REUSE_SIZE_ERR_INT_EN)
+#define B_BE_DISP_OTHER_IMR_SET (B_BE_STF_CMD_OVERFLOW_ERR_INT_EN | \
+ B_BE_STF_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_BE_STF_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_BE_STF_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_STF_OQT_OVERFLOW_ERR_INT_EN | \
+ B_BE_STF_OQT_UNDERFLOW_ERR_INT_EN)
+
+#define R_BE_DISP_HOST_IMR 0x8874
+#define B_BE_HR_WRFF_UNDERFLOW_ERR_INT_EN BIT(31)
+#define B_BE_HR_WRFF_OVERFLOW_ERR_INT_EN BIT(30)
+#define B_BE_HR_CHKSUM_FSM_ERR_INT_EN BIT(29)
+#define B_BE_HR_SHIFT_DMA_CFG_ERR_INT_EN BIT(28)
+#define B_BE_HR_DMA_PROCESS_ERR_INT_EN BIT(27)
+#define B_BE_HR_TOTAL_LEN_UNDER_ERR_INT_EN BIT(26)
+#define B_BE_HR_SHIFT_EN_ERR_INT_EN BIT(25)
+#define B_BE_HR_AGG_CFG_ERR_INT_EN BIT(24)
+#define B_BE_HR_PLD_LEN_ZERO_ERR_INT_EN BIT(22)
+#define B_BE_HT_ILL_CH_ERR_INT_EN BIT(20)
+#define B_BE_HT_ADDR_INFO_LEN_ERR_INT_EN BIT(18)
+#define B_BE_HT_WD_LEN_OVER_ERR_INT_EN BIT(17)
+#define B_BE_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN BIT(16)
+#define B_BE_HT_PLD_CMD_OVERFLOW_ERR_INT_EN BIT(15)
+#define B_BE_HT_WRFF_UNDERFLOW_ERR_INT_EN BIT(14)
+#define B_BE_HT_WRFF_OVERFLOW_ERR_INT_EN BIT(13)
+#define B_BE_HT_CHKSUM_FSM_ERR_INT_EN BIT(12)
+#define B_BE_HT_NON_IDLE_PKT_STR_ERR_EN BIT(11)
+#define B_BE_HT_PRE_SUB_ERR_INT_EN BIT(10)
+#define B_BE_HT_WD_CHKSUM_ERR_INT_EN BIT(9)
+#define B_BE_HT_CHANNEL_DMA_ERR_INT_EN BIT(8)
+#define B_BE_HT_OFFSET_UNMATCH_ERR_INT_EN BIT(7)
+#define B_BE_HT_PAYLOAD_UNDER_ERR_INT_EN BIT(6)
+#define B_BE_HT_PAYLOAD_OVER_ERR_INT_EN BIT(5)
+#define B_BE_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN BIT(4)
+#define B_BE_HT_PERMU_FF_OVERFLOW_ERR_INT_EN BIT(3)
+#define B_BE_HT_PKT_FAIL_ERR_INT_EN BIT(2)
+#define B_BE_HT_CH_ID_ERR_INT_EN BIT(1)
+#define B_BE_HT_EP_CH_DIFF_ERR_INT_EN BIT(0)
+#define B_BE_DISP_HOST_IMR_CLR (B_BE_HT_EP_CH_DIFF_ERR_INT_EN | \
+ B_BE_HT_CH_ID_ERR_INT_EN | \
+ B_BE_HT_PKT_FAIL_ERR_INT_EN | \
+ B_BE_HT_PERMU_FF_OVERFLOW_ERR_INT_EN | \
+ B_BE_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_HT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_BE_HT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_BE_HT_OFFSET_UNMATCH_ERR_INT_EN | \
+ B_BE_HT_CHANNEL_DMA_ERR_INT_EN | \
+ B_BE_HT_WD_CHKSUM_ERR_INT_EN | \
+ B_BE_HT_PRE_SUB_ERR_INT_EN | \
+ B_BE_HT_NON_IDLE_PKT_STR_ERR_EN | \
+ B_BE_HT_CHKSUM_FSM_ERR_INT_EN | \
+ B_BE_HT_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_BE_HT_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_HT_PLD_CMD_OVERFLOW_ERR_INT_EN | \
+ B_BE_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_BE_HT_WD_LEN_OVER_ERR_INT_EN | \
+ B_BE_HT_ADDR_INFO_LEN_ERR_INT_EN | \
+ B_BE_HT_ILL_CH_ERR_INT_EN | \
+ B_BE_HR_PLD_LEN_ZERO_ERR_INT_EN | \
+ B_BE_HR_AGG_CFG_ERR_INT_EN | \
+ B_BE_HR_SHIFT_EN_ERR_INT_EN | \
+ B_BE_HR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_BE_HR_DMA_PROCESS_ERR_INT_EN | \
+ B_BE_HR_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_BE_HR_CHKSUM_FSM_ERR_INT_EN | \
+ B_BE_HR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_BE_HR_WRFF_UNDERFLOW_ERR_INT_EN)
+#define B_BE_DISP_HOST_IMR_SET (B_BE_HT_EP_CH_DIFF_ERR_INT_EN | \
+ B_BE_HT_PERMU_FF_OVERFLOW_ERR_INT_EN | \
+ B_BE_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_HT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_BE_HT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_BE_HT_CHANNEL_DMA_ERR_INT_EN | \
+ B_BE_HT_PRE_SUB_ERR_INT_EN | \
+ B_BE_HT_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_BE_HT_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_HT_PLD_CMD_OVERFLOW_ERR_INT_EN | \
+ B_BE_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_BE_HT_WD_LEN_OVER_ERR_INT_EN | \
+ B_BE_HT_ILL_CH_ERR_INT_EN | \
+ B_BE_HR_TOTAL_LEN_UNDER_ERR_INT_EN | \
+ B_BE_HR_DMA_PROCESS_ERR_INT_EN | \
+ B_BE_HR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_BE_HR_WRFF_UNDERFLOW_ERR_INT_EN)
+
+#define R_BE_DISP_CPU_IMR 0x8878
+#define B_BE_CR_PLD_LEN_ERR_INT_EN BIT(30)
+#define B_BE_CR_WRFF_UNDERFLOW_ERR_INT_EN BIT(29)
+#define B_BE_CR_WRFF_OVERFLOW_ERR_INT_EN BIT(28)
+#define B_BE_CR_SHIFT_DMA_CFG_ERR_INT_EN BIT(27)
+#define B_BE_CR_DMA_PROCESS_ERR_INT_EN BIT(26)
+#define B_BE_CR_TOTAL_LEN_UNDER_ERR_INT_EN BIT(25)
+#define B_BE_CR_SHIFT_EN_ERR_INT_EN BIT(24)
+#define B_BE_REUSE_FIFO_B_UNDER_ERR_INT_EN BIT(22)
+#define B_BE_REUSE_FIFO_B_OVER_ERR_INT_EN BIT(21)
+#define B_BE_REUSE_FIFO_A_UNDER_ERR_INT_EN BIT(20)
+#define B_BE_REUSE_FIFO_A_OVER_ERR_INT_EN BIT(19)
+#define B_BE_CT_ADDR_INFO_LEN_MISS_ERR_INT_EN BIT(17)
+#define B_BE_CT_WD_LEN_OVER_ERR_INT_EN BIT(16)
+#define B_BE_CT_F2P_SEQ_ERR_INT_EN BIT(15)
+#define B_BE_CT_F2P_QSEL_ERR_INT_EN BIT(14)
+#define B_BE_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN BIT(13)
+#define B_BE_CT_PLD_CMD_OVERFLOW_ERR_INT_EN BIT(12)
+#define B_BE_CT_PRE_SUB_ERR_INT_EN BIT(11)
+#define B_BE_CT_WD_CHKSUM_ERR_INT_EN BIT(10)
+#define B_BE_CT_CHANNEL_DMA_ERR_INT_EN BIT(9)
+#define B_BE_CT_OFFSET_UNMATCH_ERR_INT_EN BIT(8)
+#define B_BE_CT_PAYLOAD_CHKSUM_ERR_INT_EN BIT(7)
+#define B_BE_CT_PAYLOAD_UNDER_ERR_INT_EN BIT(6)
+#define B_BE_CT_PAYLOAD_OVER_ERR_INT_EN BIT(5)
+#define B_BE_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN BIT(4)
+#define B_BE_CT_PERMU_FF_OVERFLOW_ERR_INT_EN BIT(3)
+#define B_BE_CT_CH_ID_ERR_INT_EN BIT(2)
+#define B_BE_CT_PKT_FAIL_ERR_INT_EN BIT(1)
+#define B_BE_CT_EP_CH_DIFF_ERR_INT_EN BIT(0)
+#define B_BE_DISP_CPU_IMR_CLR (B_BE_CT_EP_CH_DIFF_ERR_INT_EN | \
+ B_BE_CT_CH_ID_ERR_INT_EN | \
+ B_BE_CT_PERMU_FF_OVERFLOW_ERR_INT_EN | \
+ B_BE_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_CT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_BE_CT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_BE_CT_OFFSET_UNMATCH_ERR_INT_EN | \
+ B_BE_CT_CHANNEL_DMA_ERR_INT_EN | \
+ B_BE_CT_WD_CHKSUM_ERR_INT_EN | \
+ B_BE_CT_PRE_SUB_ERR_INT_EN | \
+ B_BE_CT_PLD_CMD_OVERFLOW_ERR_INT_EN | \
+ B_BE_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_BE_CT_F2P_QSEL_ERR_INT_EN | \
+ B_BE_CT_F2P_SEQ_ERR_INT_EN | \
+ B_BE_CT_WD_LEN_OVER_ERR_INT_EN | \
+ B_BE_CT_ADDR_INFO_LEN_MISS_ERR_INT_EN | \
+ B_BE_REUSE_FIFO_A_OVER_ERR_INT_EN | \
+ B_BE_REUSE_FIFO_A_UNDER_ERR_INT_EN | \
+ B_BE_REUSE_FIFO_B_OVER_ERR_INT_EN | \
+ B_BE_REUSE_FIFO_B_UNDER_ERR_INT_EN | \
+ B_BE_CR_SHIFT_EN_ERR_INT_EN | \
+ B_BE_CR_DMA_PROCESS_ERR_INT_EN | \
+ B_BE_CR_SHIFT_DMA_CFG_ERR_INT_EN | \
+ B_BE_CR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_BE_CR_WRFF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_CR_PLD_LEN_ERR_INT_EN)
+#define B_BE_DISP_CPU_IMR_SET (B_BE_CT_EP_CH_DIFF_ERR_INT_EN | \
+ B_BE_CT_CH_ID_ERR_INT_EN | \
+ B_BE_CT_PERMU_FF_OVERFLOW_ERR_INT_EN | \
+ B_BE_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \
+ B_BE_CT_PAYLOAD_OVER_ERR_INT_EN | \
+ B_BE_CT_PAYLOAD_UNDER_ERR_INT_EN | \
+ B_BE_CT_PRE_SUB_ERR_INT_EN | \
+ B_BE_CT_PLD_CMD_OVERFLOW_ERR_INT_EN | \
+ B_BE_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \
+ B_BE_CT_WD_LEN_OVER_ERR_INT_EN | \
+ B_BE_REUSE_FIFO_A_OVER_ERR_INT_EN | \
+ B_BE_REUSE_FIFO_A_UNDER_ERR_INT_EN | \
+ B_BE_REUSE_FIFO_B_OVER_ERR_INT_EN | \
+ B_BE_REUSE_FIFO_B_UNDER_ERR_INT_EN | \
+ B_BE_CR_DMA_PROCESS_ERR_INT_EN | \
+ B_BE_CR_WRFF_OVERFLOW_ERR_INT_EN | \
+ B_BE_CR_WRFF_UNDERFLOW_ERR_INT_EN)
+
+#define R_BE_DISP_FWD_WLAN_0 0x8938
+#define B_BE_FWD_WLAN_CPU_TYPE_13_MASK GENMASK(31, 30)
+#define B_BE_FWD_WLAN_CPU_TYPE_12_MASK GENMASK(29, 28)
+#define B_BE_FWD_WLAN_CPU_TYPE_11_MASK GENMASK(27, 26)
+#define B_BE_FWD_WLAN_CPU_TYPE_10_MASK GENMASK(25, 24)
+#define B_BE_FWD_WLAN_CPU_TYPE_9_MASK GENMASK(23, 22)
+#define B_BE_FWD_WLAN_CPU_TYPE_8_MASK GENMASK(21, 20)
+#define B_BE_FWD_WLAN_CPU_TYPE_7_MASK GENMASK(19, 18)
+#define B_BE_FWD_WLAN_CPU_TYPE_6_MASK GENMASK(17, 16)
+#define B_BE_FWD_WLAN_CPU_TYPE_5_MASK GENMASK(15, 14)
+#define B_BE_FWD_WLAN_CPU_TYPE_4_MASK GENMASK(13, 12)
+#define B_BE_FWD_WLAN_CPU_TYPE_3_MASK GENMASK(11, 10)
+#define B_BE_FWD_WLAN_CPU_TYPE_2_MASK GENMASK(9, 8)
+#define B_BE_FWD_WLAN_CPU_TYPE_1_MASK GENMASK(7, 6)
+#define B_BE_FWD_WLAN_CPU_TYPE_0_CTL_MASK GENMASK(5, 4)
+#define B_BE_FWD_WLAN_CPU_TYPE_0_MNG_MASK GENMASK(3, 2)
+#define B_BE_FWD_WLAN_CPU_TYPE_0_DATA_MASK GENMASK(1, 0)
+
+#define R_BE_WDE_PKTBUF_CFG 0x8C08
+#define B_BE_WDE_FREE_PAGE_NUM_MASK GENMASK(28, 16)
+#define B_BE_WDE_START_BOUND_MASK GENMASK(14, 8)
+#define B_BE_WDE_PAGE_SEL_MASK GENMASK(1, 0)
+
+#define R_BE_WDE_ERR_IMR 0x8C38
+#define B_BE_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
+#define B_BE_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
+#define B_BE_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27)
+#define B_BE_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
+#define B_BE_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
+#define B_BE_WDE_DATCHN_ARBT_ERR_INT_EN BIT(24)
+#define B_BE_WDE_QUEMGN_FRZTO_ERR_INT_EN BIT(23)
+#define B_BE_WDE_NXTPKTLL_AD_ERR_INT_EN BIT(22)
+#define B_BE_WDE_PREPKTLLT_AD_ERR_INT_EN BIT(21)
+#define B_BE_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(20)
+#define B_BE_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(19)
+#define B_BE_WDE_QUE_SRCQUEID_ERR_INT_EN BIT(18)
+#define B_BE_WDE_QUE_DSTQUEID_ERR_INT_EN BIT(17)
+#define B_BE_WDE_QUE_CMDTYPE_ERR_INT_EN BIT(16)
+#define B_BE_WDE_BUFMGN_MRG_SZLMT_ERR_INT_EN BIT(13)
+#define B_BE_WDE_BUFMGN_MRG_QTAID_ERR_INT_EN BIT(12)
+#define B_BE_WDE_BUFMGN_MRG_ENDPKTID_ERR_INT_EN BIT(11)
+#define B_BE_WDE_ERR_BUFMGN_MRG_STRPKTID_ERR_INT_EN BIT(10)
+#define B_BE_WDE_BUFMGN_FRZTO_ERR_INT_EN BIT(9)
+#define B_BE_WDE_GETNPG_PGOFST_ERR_INT_EN BIT(8)
+#define B_BE_WDE_GETNPG_STRPG_ERR_INT_EN BIT(7)
+#define B_BE_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN BIT(6)
+#define B_BE_WDE_BUFRTN_SIZE_ERR_INT_EN BIT(5)
+#define B_BE_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN BIT(4)
+#define B_BE_WDE_BUFREQ_UNAVAL_ERR_INT_EN BIT(3)
+#define B_BE_WDE_BUFREQ_SIZELMT_INT_EN BIT(2)
+#define B_BE_WDE_BUFREQ_SIZE0_INT_EN BIT(1)
+#define B_BE_WDE_BUFREQ_QTAID_ERR_INT_EN BIT(0)
+#define B_BE_WDE_ERR_IMR_CLR (B_BE_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_BE_WDE_BUFREQ_SIZE0_INT_EN | \
+ B_BE_WDE_BUFREQ_SIZELMT_INT_EN | \
+ B_BE_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_BE_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_BE_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_BE_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_BE_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_BE_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_BE_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_BE_WDE_ERR_BUFMGN_MRG_STRPKTID_ERR_INT_EN | \
+ B_BE_WDE_BUFMGN_MRG_ENDPKTID_ERR_INT_EN | \
+ B_BE_WDE_BUFMGN_MRG_QTAID_ERR_INT_EN | \
+ B_BE_WDE_BUFMGN_MRG_SZLMT_ERR_INT_EN | \
+ B_BE_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_BE_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_BE_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_BE_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_BE_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_BE_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_BE_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_BE_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+#define B_BE_WDE_ERR_IMR_SET (B_BE_WDE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_BE_WDE_BUFREQ_SIZE0_INT_EN | \
+ B_BE_WDE_BUFREQ_SIZELMT_INT_EN | \
+ B_BE_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_BE_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_BE_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_BE_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_BE_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_BE_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_BE_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_BE_WDE_ERR_BUFMGN_MRG_STRPKTID_ERR_INT_EN | \
+ B_BE_WDE_BUFMGN_MRG_ENDPKTID_ERR_INT_EN | \
+ B_BE_WDE_BUFMGN_MRG_QTAID_ERR_INT_EN | \
+ B_BE_WDE_BUFMGN_MRG_SZLMT_ERR_INT_EN | \
+ B_BE_WDE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_BE_WDE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_BE_WDE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_BE_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_BE_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_BE_WDE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_BE_WDE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_BE_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_ARBT_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_RRDY_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_BE_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+
+#define R_BE_WDE_QTA0_CFG 0x8C40
+#define B_BE_WDE_Q0_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_WDE_Q0_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_WDE_QTA1_CFG 0x8C44
+#define B_BE_WDE_Q1_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_WDE_Q1_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_WDE_QTA2_CFG 0x8C48
+#define B_BE_WDE_Q2_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_WDE_Q2_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_WDE_QTA3_CFG 0x8C4C
+#define B_BE_WDE_Q3_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_WDE_Q3_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_WDE_QTA4_CFG 0x8C50
+#define B_BE_WDE_Q4_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_WDE_Q4_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_WDE_ERR1_IMR 0x8CC0
+#define B_BE_WDE_QUEMGN_CMACACQ_DEQNTFY_INT_EN BIT(8)
+#define B_BE_WDE_ERR1_IMR_CLR B_BE_WDE_QUEMGN_CMACACQ_DEQNTFY_INT_EN
+#define B_BE_WDE_ERR1_IMR_SET B_BE_WDE_QUEMGN_CMACACQ_DEQNTFY_INT_EN
+
+#define R_BE_PLE_PKTBUF_CFG 0x9008
+#define B_BE_PLE_FREE_PAGE_NUM_MASK GENMASK(28, 16)
+#define B_BE_PLE_START_BOUND_MASK GENMASK(14, 8)
+#define B_BE_PLE_PAGE_SEL_MASK GENMASK(1, 0)
+
+#define R_BE_PLE_ERR_IMR 0x9038
+#define B_BE_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
+#define B_BE_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
+#define B_BE_PLE_DATCHN_RRDY_ERR_INT_EN BIT(27)
+#define B_BE_PLE_DATCHN_FRZTO_ERR_INT_EN BIT(26)
+#define B_BE_PLE_DATCHN_NULLPG_ERR_INT_EN BIT(25)
+#define B_BE_PLE_DATCHN_ARBT_ERR_INT_EN BIT(24)
+#define B_BE_PLE_QUEMGN_FRZTO_ERR_INT_EN BIT(23)
+#define B_BE_PLE_NXTPKTLL_AD_ERR_INT_EN BIT(22)
+#define B_BE_PLE_PREPKTLLT_AD_ERR_INT_EN BIT(21)
+#define B_BE_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(20)
+#define B_BE_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(19)
+#define B_BE_PLE_QUE_SRCQUEID_ERR_INT_EN BIT(18)
+#define B_BE_PLE_QUE_DSTQUEID_ERR_INT_EN BIT(17)
+#define B_BE_PLE_QUE_CMDTYPE_ERR_INT_EN BIT(16)
+#define B_BE_PLE_BUFMGN_MRG_SZLMT_ERR_INT_EN BIT(13)
+#define B_BE_PLE_BUFMGN_MRG_QTAID_ERR_INT_EN BIT(12)
+#define B_BE_PLE_BUFMGN_MRG_ENDPKTID_ERR_INT_EN BIT(11)
+#define B_BE_PLE_BUFMGN_MRG_STRPKTID_ERR_INT_EN BIT(10)
+#define B_BE_PLE_BUFMGN_FRZTO_ERR_INT_EN BIT(9)
+#define B_BE_PLE_GETNPG_PGOFST_ERR_INT_EN BIT(8)
+#define B_BE_PLE_GETNPG_STRPG_ERR_INT_EN BIT(7)
+#define B_BE_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN BIT(6)
+#define B_BE_PLE_BUFRTN_SIZE_ERR_INT_EN BIT(5)
+#define B_BE_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN BIT(4)
+#define B_BE_PLE_BUFREQ_UNAVAL_ERR_INT_EN BIT(3)
+#define B_BE_PLE_BUFREQ_SIZELMT_INT_EN BIT(2)
+#define B_BE_PLE_BUFREQ_SIZE0_INT_EN BIT(1)
+#define B_BE_PLE_BUFREQ_QTAID_ERR_INT_EN BIT(0)
+#define B_BE_PLE_ERR_IMR_CLR (B_BE_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_BE_PLE_BUFREQ_SIZE0_INT_EN | \
+ B_BE_PLE_BUFREQ_SIZELMT_INT_EN | \
+ B_BE_PLE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_BE_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_BE_PLE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_BE_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_BE_PLE_GETNPG_STRPG_ERR_INT_EN | \
+ B_BE_PLE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_MRG_STRPKTID_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_MRG_ENDPKTID_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_MRG_QTAID_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_MRG_SZLMT_ERR_INT_EN | \
+ B_BE_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_BE_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_BE_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_BE_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_BE_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_BE_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_BE_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_BE_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_RRDY_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_CAMREQ_ERR_INT_EN)
+#define B_BE_PLE_ERR_IMR_SET (B_BE_PLE_BUFREQ_QTAID_ERR_INT_EN | \
+ B_BE_PLE_BUFREQ_SIZE0_INT_EN | \
+ B_BE_PLE_BUFREQ_SIZELMT_INT_EN | \
+ B_BE_PLE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_BE_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_BE_PLE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_BE_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_BE_PLE_GETNPG_STRPG_ERR_INT_EN | \
+ B_BE_PLE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_FRZTO_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_MRG_STRPKTID_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_MRG_ENDPKTID_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_MRG_QTAID_ERR_INT_EN | \
+ B_BE_PLE_BUFMGN_MRG_SZLMT_ERR_INT_EN | \
+ B_BE_PLE_QUE_CMDTYPE_ERR_INT_EN | \
+ B_BE_PLE_QUE_DSTQUEID_ERR_INT_EN | \
+ B_BE_PLE_QUE_SRCQUEID_ERR_INT_EN | \
+ B_BE_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \
+ B_BE_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \
+ B_BE_PLE_PREPKTLLT_AD_ERR_INT_EN | \
+ B_BE_PLE_NXTPKTLL_AD_ERR_INT_EN | \
+ B_BE_PLE_QUEMGN_FRZTO_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_ARBT_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_NULLPG_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_FRZTO_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_RRDY_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_ADRERR_ERR_INT_EN | \
+ B_BE_PLE_DATCHN_CAMREQ_ERR_INT_EN)
+
+#define R_BE_PLE_QTA0_CFG 0x9040
+#define B_BE_PLE_Q0_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q0_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA1_CFG 0x9044
+#define B_BE_PLE_Q1_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q1_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA2_CFG 0x9048
+#define B_BE_PLE_Q2_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q2_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA3_CFG 0x904C
+#define B_BE_PLE_Q3_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q3_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA4_CFG 0x9050
+#define B_BE_PLE_Q4_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q4_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA5_CFG 0x9054
+#define B_BE_PLE_Q5_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q5_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA6_CFG 0x9058
+#define B_BE_PLE_Q6_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q6_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA7_CFG 0x905C
+#define B_BE_PLE_Q7_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q7_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA8_CFG 0x9060
+#define B_BE_PLE_Q8_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q8_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA9_CFG 0x9064
+#define B_BE_PLE_Q9_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q9_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA10_CFG 0x9068
+#define B_BE_PLE_Q10_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q10_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA11_CFG 0x906C
+#define B_BE_PLE_Q11_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q11_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_QTA12_CFG 0x9070
+#define B_BE_PLE_Q12_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_BE_PLE_Q12_MIN_SIZE_MASK GENMASK(11, 0)
+
+#define R_BE_PLE_ERRFLAG1_IMR 0x90C0
+#define B_BE_PLE_SRCHPG_PGOFST_IMR BIT(26)
+#define B_BE_PLE_SRCHPG_STRPG_IMR BIT(25)
+#define B_BE_PLE_SRCHPG_FRZTO_IMR BIT(24)
+#define B_BE_PLE_ERRFLAG1_IMR_CLR (B_BE_PLE_SRCHPG_FRZTO_IMR | \
+ B_BE_PLE_SRCHPG_STRPG_IMR | \
+ B_BE_PLE_SRCHPG_PGOFST_IMR)
+#define B_BE_PLE_ERRFLAG1_IMR_SET (B_BE_PLE_SRCHPG_FRZTO_IMR | \
+ B_BE_PLE_SRCHPG_STRPG_IMR | \
+ B_BE_PLE_SRCHPG_PGOFST_IMR)
+
#define R_BE_PLE_DBG_FUN_INTF_CTL 0x9110
#define B_BE_PLE_DFI_ACTIVE BIT(31)
#define B_BE_PLE_DFI_TRGSEL_MASK GENMASK(19, 16)
@@ -3757,6 +5220,608 @@
#define R_BE_PLE_DBG_FUN_INTF_DATA 0x9114
#define B_BE_PLE_DFI_DATA_MASK GENMASK(31, 0)
+#define R_BE_WDRLS_CFG 0x9408
+#define B_BE_WDRLS_DIS_AGAC BIT(31)
+#define B_BE_RLSRPT_BUFREQ_TO_MASK GENMASK(15, 8)
+#define B_BE_RLSRPT_BUFREQ_TO_SEL_MASK GENMASK(7, 6)
+#define B_BE_WDRLS_MODE_MASK GENMASK(1, 0)
+
+#define R_BE_WDRLS_ERR_IMR 0x9430
+#define B_BE_WDRLS_RPT3_FRZTO_ERR_INT_EN BIT(21)
+#define B_BE_WDRLS_RPT3_AGGNUM0_ERR_INT_EN BIT(20)
+#define B_BE_WDRLS_RPT2_FRZTO_ERR_INT_EN BIT(17)
+#define B_BE_WDRLS_RPT2_AGGNUM0_ERR_INT_EN BIT(16)
+#define B_BE_WDRLS_RPT1_FRZTO_ERR_INT_EN BIT(13)
+#define B_BE_WDRLS_RPT1_AGGNUM0_ERR_INT_EN BIT(12)
+#define B_BE_WDRLS_RPT0_FRZTO_ERR_INT_EN BIT(9)
+#define B_BE_WDRLS_RPT0_AGGNUM0_ERR_INT_EN BIT(8)
+#define B_BE_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN BIT(5)
+#define B_BE_WDRLS_PLEBREQ_TO_ERR_INT_EN BIT(4)
+#define B_BE_WDRLS_CTL_FRZTO_ERR_INT_EN BIT(2)
+#define B_BE_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN BIT(1)
+#define B_BE_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN BIT(0)
+#define B_BE_WDRLS_ERR_IMR_CLR (B_BE_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \
+ B_BE_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \
+ B_BE_WDRLS_CTL_FRZTO_ERR_INT_EN | \
+ B_BE_WDRLS_PLEBREQ_TO_ERR_INT_EN | \
+ B_BE_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \
+ B_BE_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \
+ B_BE_WDRLS_RPT0_FRZTO_ERR_INT_EN | \
+ B_BE_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
+ B_BE_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+#define B_BE_WDRLS_ERR_IMR_SET (B_BE_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \
+ B_BE_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \
+ B_BE_WDRLS_CTL_FRZTO_ERR_INT_EN | \
+ B_BE_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \
+ B_BE_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \
+ B_BE_WDRLS_RPT0_FRZTO_ERR_INT_EN | \
+ B_BE_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \
+ B_BE_WDRLS_RPT1_FRZTO_ERR_INT_EN)
+
+#define R_BE_RLSRPT0_CFG1 0x9444
+#define B_BE_RLSRPT0_FLTR_MAP_MASK GENMASK(27, 24)
+#define S_BE_WDRLS_FLTR_TXOK 1
+#define S_BE_WDRLS_FLTR_RTYLMT 2
+#define S_BE_WDRLS_FLTR_LIFTIM 4
+#define S_BE_WDRLS_FLTR_MACID 8
+#define B_BE_RLSRPT0_TO_MASK GENMASK(23, 16)
+#define B_BE_RLSRPT0_AGGNUM_MASK GENMASK(7, 0)
+
+#define R_BE_BBRPT_COM_ERR_IMR 0x9608
+#define B_BE_BBRPT_COM_EVT01_ISR_EN BIT(1)
+#define B_BE_BBRPT_COM_NULL_PLPKTID_ISR_EN BIT(0)
+#define B_BE_BBRPT_COM_ERR_IMR_CLR (B_BE_BBRPT_COM_NULL_PLPKTID_ISR_EN | \
+ B_BE_BBRPT_COM_EVT01_ISR_EN)
+#define B_BE_BBRPT_COM_ERR_IMR_SET B_BE_BBRPT_COM_NULL_PLPKTID_ISR_EN
+
+#define R_BE_BBRPT_CHINFO_ERR_IMR 0x9628
+#define B_BE_ERR_BB_ONETEN_INT_EN BIT(1)
+#define B_BE_ERR_GEN_FRZTO_INT_EN BIT(0)
+#define B_BE_BBRPT_CHINFO_ERR_IMR_CLR (B_BE_ERR_GEN_FRZTO_INT_EN | \
+ B_BE_ERR_BB_ONETEN_INT_EN)
+#define B_BE_BBRPT_CHINFO_ERR_IMR_SET (B_BE_ERR_GEN_FRZTO_INT_EN | \
+ B_BE_ERR_BB_ONETEN_INT_EN)
+
+#define R_BE_BBRPT_DFS_ERR_IMR 0x9638
+#define B_BE_BBRPT_DFS_TO_ERR_INT_EN BIT(0)
+#define B_BE_BBRPT_DFS_ERR_IMR_CLR B_BE_BBRPT_DFS_TO_ERR_INT_EN
+#define B_BE_BBRPT_DFS_ERR_IMR_SET B_BE_BBRPT_DFS_TO_ERR_INT_EN
+
+#define R_BE_LA_ERRFLAG_IMR 0x9668
+#define B_BE_LA_IMR_DATA_LOSS BIT(0)
+#define B_BE_LA_ERRFLAG_IMR_CLR B_BE_LA_IMR_DATA_LOSS
+#define B_BE_LA_ERRFLAG_IMR_SET B_BE_LA_IMR_DATA_LOSS
+
+#define R_BE_LA_ERRFLAG_ISR 0x966C
+#define B_BE_LA_ISR_DATA_LOSS BIT(0)
+
+#define R_BE_CH_INFO_DBGFLAG_IMR 0x9688
+#define B_BE_BCHN_EVT01_ISR_EN BIT(29)
+#define B_BE_BCHN_REQTO_ISR_EN BIT(28)
+#define B_BE_CHIF_RXDATA_AFACT_ISR_EN BIT(11)
+#define B_BE_CHIF_RXDATA_BFACT_ISR_EN BIT(10)
+#define B_BE_CHIF_HDR_SEGLEN_ISR_EN BIT(9)
+#define B_BE_CHIF_HDR_INVLD_ISR_EN BIT(8)
+#define B_BE_CHIF_BBONL_BFACT_ISR_EN BIT(4)
+#define B_BE_CHIF_RPT_OVF_ISR_EN BIT(3)
+#define B_BE_DBG_CHIF_DATA_LOSS_ISR_EN BIT(2)
+#define B_BE_CHIF_DATA_WTOUT_ISR_EN BIT(1)
+#define B_BE_CHIF_RPT_WTOUT_ISR_EN BIT(0)
+#define B_BE_CH_INFO_DBGFLAG_IMR_CLR (B_BE_CHIF_RPT_WTOUT_ISR_EN | \
+ B_BE_CHIF_DATA_WTOUT_ISR_EN | \
+ B_BE_DBG_CHIF_DATA_LOSS_ISR_EN | \
+ B_BE_CHIF_RPT_OVF_ISR_EN | \
+ B_BE_CHIF_HDR_INVLD_ISR_EN | \
+ B_BE_CHIF_HDR_SEGLEN_ISR_EN | \
+ B_BE_CHIF_RXDATA_BFACT_ISR_EN | \
+ B_BE_CHIF_RXDATA_AFACT_ISR_EN)
+#define B_BE_CH_INFO_DBGFLAG_IMR_SET 0
+
+#define R_BE_WD_BUF_REQ 0x9800
+#define B_BE_WD_BUF_REQ_EXEC BIT(31)
+#define B_BE_WD_BUF_REQ_QUOTA_ID_MASK GENMASK(23, 16)
+#define B_BE_WD_BUF_REQ_LEN_MASK GENMASK(15, 0)
+
+#define R_BE_WD_BUF_STATUS 0x9804
+#define B_BE_WD_BUF_STAT_DONE BIT(31)
+#define B_BE_WD_BUF_STAT_PKTID_MASK GENMASK(11, 0)
+
+#define R_BE_WD_CPUQ_OP_0 0x9810
+#define B_BE_WD_CPUQ_OP_EXEC BIT(31)
+#define B_BE_WD_CPUQ_OP_CMD_TYPE_MASK GENMASK(27, 24)
+#define B_BE_WD_CPUQ_OP_PKTNUM_MASK GENMASK(7, 0)
+
+#define R_BE_WD_CPUQ_OP_1 0x9814
+#define B_BE_WD_CPUQ_OP_SRC_MACID_MASK GENMASK(19, 12)
+#define B_BE_WD_CPUQ_OP_SRC_QID_MASK GENMASK(9, 4)
+#define B_BE_WD_CPUQ_OP_SRC_PID_MASK GENMASK(2, 0)
+
+#define R_BE_WD_CPUQ_OP_2 0x9818
+#define B_BE_WD_CPUQ_OP_DST_MACID_MASK GENMASK(19, 12)
+#define B_BE_WD_CPUQ_OP_DST_QID_MASK GENMASK(9, 4)
+#define B_BE_WD_CPUQ_OP_DST_PID_MASK GENMASK(2, 0)
+
+#define R_BE_WD_CPUQ_OP_3 0x981C
+#define B_BE_WD_CPUQ_OP_STRT_PKTID_MASK GENMASK(27, 16)
+#define B_BE_WD_CPUQ_OP_END_PKTID_MASK GENMASK(11, 0)
+
+#define R_BE_WD_CPUQ_OP_STATUS 0x9820
+#define B_BE_WD_CPUQ_OP_STAT_DONE BIT(31)
+#define B_BE_WD_CPUQ_OP_PKTCNT_MASK GENMASK(27, 16)
+#define B_BE_WD_CPUQ_OP_PKTID_MASK GENMASK(11, 0)
+
+#define R_BE_PL_BUF_REQ 0x9840
+#define B_BE_PL_BUF_REQ_EXEC BIT(31)
+#define B_BE_PL_BUF_REQ_QUOTA_ID_MASK GENMASK(19, 16)
+#define B_BE_PL_BUF_REQ_LEN_MASK GENMASK(15, 0)
+
+#define R_BE_PL_BUF_STATUS 0x9844
+#define B_BE_PL_BUF_STAT_DONE BIT(31)
+#define B_BE_PL_BUF_STAT_PKTID_MASK GENMASK(11, 0)
+
+#define R_BE_PL_CPUQ_OP_0 0x9850
+#define B_BE_PL_CPUQ_OP_EXEC BIT(31)
+#define B_BE_PL_CPUQ_OP_CMD_TYPE_MASK GENMASK(27, 24)
+#define B_BE_PL_CPUQ_OP_PKTNUM_MASK GENMASK(7, 0)
+
+#define R_BE_PL_CPUQ_OP_1 0x9854
+#define B_BE_PL_CPUQ_OP_SRC_MACID_MASK GENMASK(19, 12)
+#define B_BE_PL_CPUQ_OP_SRC_QID_MASK GENMASK(9, 4)
+#define B_BE_PL_CPUQ_OP_SRC_PID_MASK GENMASK(2, 0)
+
+#define R_BE_PL_CPUQ_OP_2 0x9858
+#define B_BE_PL_CPUQ_OP_DST_MACID_MASK GENMASK(19, 12)
+#define B_BE_PL_CPUQ_OP_DST_QID_MASK GENMASK(9, 4)
+#define B_BE_PL_CPUQ_OP_DST_PID_MASK GENMASK(2, 0)
+
+#define R_BE_PL_CPUQ_OP_3 0x985C
+#define B_BE_PL_CPUQ_OP_STRT_PKTID_MASK GENMASK(27, 16)
+#define B_BE_PL_CPUQ_OP_END_PKTID_MASK GENMASK(11, 0)
+
+#define R_BE_PL_CPUQ_OP_STATUS 0x9860
+#define B_BE_PL_CPUQ_OP_STAT_DONE BIT(31)
+#define B_BE_PL_CPUQ_OP_PKTCNT_MASK GENMASK(27, 16)
+#define B_BE_PL_CPUQ_OP_PKTID_MASK GENMASK(11, 0)
+
+#define R_BE_CPUIO_ERR_IMR 0x9888
+#define B_BE_PLEQUE_OP_ERR_INT_EN BIT(12)
+#define B_BE_PLEBUF_OP_ERR_INT_EN BIT(8)
+#define B_BE_WDEQUE_OP_ERR_INT_EN BIT(4)
+#define B_BE_WDEBUF_OP_ERR_INT_EN BIT(0)
+#define B_BE_CPUIO_ERR_IMR_CLR (B_BE_WDEBUF_OP_ERR_INT_EN | \
+ B_BE_WDEQUE_OP_ERR_INT_EN | \
+ B_BE_PLEBUF_OP_ERR_INT_EN | \
+ B_BE_PLEQUE_OP_ERR_INT_EN)
+#define B_BE_CPUIO_ERR_IMR_SET (B_BE_WDEBUF_OP_ERR_INT_EN | \
+ B_BE_WDEQUE_OP_ERR_INT_EN | \
+ B_BE_PLEBUF_OP_ERR_INT_EN | \
+ B_BE_PLEQUE_OP_ERR_INT_EN)
+
+#define R_BE_PKTIN_ERR_IMR 0x9A20
+#define B_BE_SW_MERGE_ERR_INT_EN BIT(1)
+#define B_BE_GET_NULL_PKTID_ERR_INT_EN BIT(0)
+#define B_BE_PKTIN_ERR_IMR_CLR (B_BE_SW_MERGE_ERR_INT_EN | \
+ B_BE_GET_NULL_PKTID_ERR_INT_EN)
+#define B_BE_PKTIN_ERR_IMR_SET (B_BE_SW_MERGE_ERR_INT_EN | \
+ B_BE_GET_NULL_PKTID_ERR_INT_EN)
+
+#define R_BE_HDR_SHCUT_SETTING 0x9B00
+#define B_BE_TX_ADDR_MLD_TO_LIK BIT(4)
+#define B_BE_TX_HW_SEC_HDR_EN BIT(3)
+#define B_BE_TX_MAC_MPDU_PROC_EN BIT(2)
+#define B_BE_TX_HW_ACK_POLICY_EN BIT(1)
+#define B_BE_TX_HW_SEQ_EN BIT(0)
+
+#define R_BE_MPDU_TX_ERR_IMR 0x9BF4
+#define B_BE_TX_TIMEOUT_ERR_EN BIT(0)
+#define B_BE_MPDU_TX_ERR_IMR_CLR B_BE_TX_TIMEOUT_ERR_EN
+#define B_BE_MPDU_TX_ERR_IMR_SET 0
+
+#define R_BE_MPDU_PROC 0x9C00
+#define B_BE_PORT_SEL BIT(29)
+#define B_BE_WPKT_WLANCPU_QSEL_MASK GENMASK(28, 27)
+#define B_BE_WPKT_DATACPU_QSEL_MASK GENMASK(26, 25)
+#define B_BE_WPKT_FW_RLS BIT(24)
+#define B_BE_FWD_RPKT_MASK GENMASK(23, 16)
+#define B_BE_FWD_WPKT_MASK GENMASK(15, 8)
+#define B_BE_RXFWD_PRIO_MASK GENMASK(5, 4)
+#define B_BE_RXFWD_EN BIT(3)
+#define B_BE_DROP_NONDMA_PPDU BIT(2)
+#define B_BE_APPEND_FCS BIT(0)
+
+#define R_BE_CUT_AMSDU_CTRL 0x9C94
+#define B_BE_EN_CUT_AMSDU BIT(31)
+#define B_BE_CUT_AMSDU_CHKLEN_EN BIT(30)
+#define B_BE_CA_CHK_ADDRCAM_EN BIT(29)
+#define B_BE_MPDU_CUT_CTRL_EN BIT(24)
+#define B_BE_CUT_AMSDU_CHKLEN_L_TH_MASK GENMASK(23, 16)
+#define B_BE_CUT_AMSDU_CHKLEN_H_TH_MASK GENMASK(15, 0)
+
+#define R_BE_RX_HDRTRNS 0x9CC0
+#define B_BE_RX_MGN_MLD_ADDR_EN BIT(6)
+#define B_BE_HDR_INFO_MASK GENMASK(5, 4)
+#define B_BE_HC_ADDR_HIT_EN BIT(3)
+#define B_BE_RX_ADDR_LINK_TO_MLO BIT(2)
+#define B_BE_HDR_CNV BIT(1)
+#define B_BE_RX_HDR_CNV_EN BIT(0)
+#define TRXCFG_MPDU_PROC_RX_HDR_CONV 0x00000000
+
+#define R_BE_MPDU_RX_ERR_IMR 0x9CF4
+#define B_BE_LEN_ERR_IMR BIT(3)
+#define B_BE_TIMEOUT_ERR_IMR BIT(1)
+#define B_BE_MPDU_RX_ERR_IMR_CLR B_BE_TIMEOUT_ERR_IMR
+#define B_BE_MPDU_RX_ERR_IMR_SET 0
+
+#define R_BE_SEC_ENG_CTRL 0x9D00
+#define B_BE_SEC_ENG_EN BIT(31)
+#define B_BE_CCMP_SPP_MIC BIT(30)
+#define B_BE_CCMP_SPP_CTR BIT(29)
+#define B_BE_SEC_CAM_ACC BIT(28)
+#define B_BE_WMAC_SEC_PN_SEL_MASK GENMASK(27, 26)
+#define B_BE_WMAC_SEC_MASKIV BIT(25)
+#define B_BE_WAPI_SPEC BIT(24)
+#define B_BE_REVERT_TA_RA_MLD_EN BIT(23)
+#define B_BE_SEC_DBG_SEL_MASK GENMASK(19, 16)
+#define B_BE_CAM_FORCE_CLK BIT(15)
+#define B_BE_SEC_FORCE_CLK BIT(14)
+#define B_BE_SEC_RX_SHORT_ADD_ICVERR BIT(13)
+#define B_BE_SRAM_IO_PROT BIT(12)
+#define B_BE_SEC_PRE_ENQUE_TX BIT(11)
+#define B_BE_CLK_EN_CGCMP BIT(10)
+#define B_BE_CLK_EN_WAPI BIT(9)
+#define B_BE_CLK_EN_WEP_TKIP BIT(8)
+#define B_BE_BMC_MGNT_DEC BIT(5)
+#define B_BE_UC_MGNT_DEC BIT(4)
+#define B_BE_MC_DEC BIT(3)
+#define B_BE_BC_DEC BIT(2)
+#define B_BE_SEC_RX_DEC BIT(1)
+#define B_BE_SEC_TX_ENC BIT(0)
+
+#define R_BE_SEC_MPDU_PROC 0x9D04
+#define B_BE_DBG_ENGINE_SEL BIT(8)
+#define B_BE_STOP_RX_PKT_HANDLE BIT(7)
+#define B_BE_STOP_TX_PKT_HANDLE BIT(6)
+#define B_BE_QUEUE_FOWARD_SEL BIT(5)
+#define B_BE_RESP1_PROTECT BIT(4)
+#define B_BE_RESP0_PROTECT BIT(3)
+#define B_BE_TX_ACTIVE_PROTECT BIT(2)
+#define B_BE_APPEND_ICV BIT(1)
+#define B_BE_APPEND_MIC BIT(0)
+
+#define R_BE_SEC_CAM_ACCESS 0x9D10
+#define B_BE_SEC_TIME_OUT_MASK GENMASK(31, 16)
+#define B_BE_SEC_CAM_POLL BIT(15)
+#define B_BE_SEC_CAM_RW BIT(14)
+#define B_BE_SEC_CAM_ACC_FAIL BIT(13)
+#define B_BE_SEC_CAM_OFFSET_MASK GENMASK(10, 0)
+
+#define R_BE_SEC_CAM_RDATA 0x9D14
+#define B_BE_SEC_CAM_RDATA_MASK GENMASK(31, 0)
+
+#define R_BE_SEC_DEBUG2 0x9D28
+#define B_BE_DBG_READ_MASK GENMASK(31, 0)
+
+#define R_BE_SEC_ERROR_IMR 0x9D2C
+#define B_BE_QUEUE_OPERATION_HANG_IMR BIT(4)
+#define B_BE_SEC1_RX_HANG_IMR BIT(3)
+#define B_BE_SEC1_TX_HANG_IMR BIT(2)
+#define B_BE_RX_HANG_IMR BIT(1)
+#define B_BE_TX_HANG_IMR BIT(0)
+#define B_BE_SEC_ERROR_IMR_CLR (B_BE_TX_HANG_IMR | \
+ B_BE_RX_HANG_IMR | \
+ B_BE_SEC1_TX_HANG_IMR | \
+ B_BE_SEC1_RX_HANG_IMR | \
+ B_BE_QUEUE_OPERATION_HANG_IMR)
+#define B_BE_SEC_ERROR_IMR_SET (B_BE_TX_HANG_IMR | \
+ B_BE_RX_HANG_IMR | \
+ B_BE_SEC1_TX_HANG_IMR | \
+ B_BE_SEC1_RX_HANG_IMR | \
+ B_BE_QUEUE_OPERATION_HANG_IMR)
+
+#define R_BE_SEC_ERROR_FLAG 0x9D30
+#define B_BE_TXD_DIFF_KEYCAM_TYPE_ERROR BIT(5)
+#define B_BE_QUEUE_OPERATION_HANG_ERROR BIT(4)
+#define B_BE_SEC1_RX_HANG_ERROR BIT(3)
+#define B_BE_SEC1_TX_HANG_ERROR BIT(2)
+#define B_BE_RX_HANG_ERROR BIT(1)
+#define B_BE_TX_HANG_ERROR BIT(0)
+
+#define R_BE_TXPKTCTL_MPDUINFO_CFG 0x9F10
+#define B_BE_MPDUINFO_FEN BIT(31)
+#define B_BE_MPDUINFO_PKTID_MASK GENMASK(27, 16)
+#define B_BE_MPDUINFO_B1_BADDR_MASK GENMASK(5, 0)
+#define MPDU_INFO_B1_OFST 18
+
+#define R_BE_TXPKTCTL_B0_PRELD_CFG0 0x9F48
+#define B_BE_B0_PRELD_FEN BIT(31)
+#define B_BE_B0_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define B_BE_B0_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_BE_B0_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_BE_TXPKTCTL_B0_PRELD_CFG1 0x9F4C
+#define B_BE_B0_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define B_BE_B0_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
+#define R_BE_TXPKTCTL_B0_ERRFLAG_IMR 0x9F78
+#define B_BE_B0_IMR_DBG_USRCTL_RLSBMPLEN BIT(25)
+#define B_BE_B0_IMR_DBG_USRCTL_RDNRLSCMD BIT(24)
+#define B_BE_B0_IMR_ERR_PRELD_ENTNUMCFG BIT(17)
+#define B_BE_B0_IMR_ERR_PRELD_RLSPKTSZERR BIT(16)
+#define B_BE_B0_IMR_ERR_CMDPSR_TBLSZ BIT(11)
+#define B_BE_B0_IMR_ERR_CMDPSR_FRZTO BIT(10)
+#define B_BE_B0_IMR_ERR_CMDPSR_CMDTYPE BIT(9)
+#define B_BE_B0_IMR_ERR_CMDPSR_1STCMDERR BIT(8)
+#define B_BE_B0_IMR_ERR_USRCTL_NOINIT BIT(1)
+#define B_BE_B0_IMR_ERR_USRCTL_REINIT BIT(0)
+#define B_BE_TXPKTCTL_B0_ERRFLAG_IMR_CLR (B_BE_B0_IMR_ERR_USRCTL_REINIT | \
+ B_BE_B0_IMR_ERR_USRCTL_NOINIT | \
+ B_BE_B0_IMR_DBG_USRCTL_RDNRLSCMD | \
+ B_BE_B0_IMR_DBG_USRCTL_RLSBMPLEN | \
+ B_BE_B0_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_BE_B0_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_BE_B0_IMR_ERR_CMDPSR_FRZTO | \
+ B_BE_B0_IMR_ERR_CMDPSR_TBLSZ | \
+ B_BE_B0_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_BE_B0_IMR_ERR_PRELD_ENTNUMCFG)
+#define B_BE_TXPKTCTL_B0_ERRFLAG_IMR_SET (B_BE_B0_IMR_ERR_USRCTL_REINIT | \
+ B_BE_B0_IMR_ERR_USRCTL_NOINIT | \
+ B_BE_B0_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_BE_B0_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_BE_B0_IMR_ERR_CMDPSR_FRZTO | \
+ B_BE_B0_IMR_ERR_CMDPSR_TBLSZ | \
+ B_BE_B0_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_BE_B0_IMR_ERR_PRELD_ENTNUMCFG)
+
+#define R_BE_TXPKTCTL_B1_PRELD_CFG0 0x9F88
+#define B_BE_B1_PRELD_FEN BIT(31)
+#define B_BE_B1_PRELD_USEMAXSZ_MASK GENMASK(25, 16)
+#define B_BE_B1_PRELD_CAM_G1ENTNUM_MASK GENMASK(12, 8)
+#define B_BE_B1_PRELD_CAM_G0ENTNUM_MASK GENMASK(4, 0)
+
+#define R_BE_TXPKTCTL_B1_PRELD_CFG1 0x9F8C
+#define B_BE_B1_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8)
+#define B_BE_B1_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0)
+
+#define R_BE_TXPKTCTL_B1_ERRFLAG_IMR 0x9FB8
+#define B_BE_B1_IMR_DBG_USRCTL_RLSBMPLEN BIT(25)
+#define B_BE_B1_IMR_DBG_USRCTL_RDNRLSCMD BIT(24)
+#define B_BE_B1_IMR_ERR_PRELD_ENTNUMCFG BIT(17)
+#define B_BE_B1_IMR_ERR_PRELD_RLSPKTSZERR BIT(16)
+#define B_BE_B1_IMR_ERR_CMDPSR_TBLSZ BIT(11)
+#define B_BE_B1_IMR_ERR_CMDPSR_FRZTO BIT(10)
+#define B_BE_B1_IMR_ERR_CMDPSR_CMDTYPE BIT(9)
+#define B_BE_B1_IMR_ERR_CMDPSR_1STCMDERR BIT(8)
+#define B_BE_B1_IMR_ERR_USRCTL_NOINIT BIT(1)
+#define B_BE_B1_IMR_ERR_USRCTL_REINIT BIT(0)
+#define B_BE_TXPKTCTL_B1_ERRFLAG_IMR_CLR (B_BE_B1_IMR_ERR_USRCTL_REINIT | \
+ B_BE_B1_IMR_ERR_USRCTL_NOINIT | \
+ B_BE_B1_IMR_DBG_USRCTL_RDNRLSCMD | \
+ B_BE_B1_IMR_DBG_USRCTL_RLSBMPLEN | \
+ B_BE_B1_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_BE_B1_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_BE_B1_IMR_ERR_CMDPSR_FRZTO | \
+ B_BE_B1_IMR_ERR_CMDPSR_TBLSZ | \
+ B_BE_B1_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_BE_B1_IMR_ERR_PRELD_ENTNUMCFG)
+#define B_BE_TXPKTCTL_B1_ERRFLAG_IMR_SET (B_BE_B1_IMR_ERR_USRCTL_REINIT | \
+ B_BE_B1_IMR_ERR_USRCTL_NOINIT | \
+ B_BE_B1_IMR_ERR_CMDPSR_1STCMDERR | \
+ B_BE_B1_IMR_ERR_CMDPSR_CMDTYPE | \
+ B_BE_B1_IMR_ERR_CMDPSR_FRZTO | \
+ B_BE_B1_IMR_ERR_CMDPSR_TBLSZ | \
+ B_BE_B1_IMR_ERR_PRELD_RLSPKTSZERR | \
+ B_BE_B1_IMR_ERR_PRELD_ENTNUMCFG)
+
+#define R_BE_MLO_INIT_CTL 0xA114
+#define B_BE_MLO_TABLE_INIT_DONE BIT(31)
+#define B_BE_MLO_TABLE_CLR_DONE BIT(30)
+#define B_BE_MLO_TABLE_REINIT BIT(23)
+#define B_BE_MLO_TABLE_HW_FLAG_CLR BIT(22)
+
+#define R_BE_MLO_ERR_IDCT_IMR 0xA128
+#define B_BE_MLO_ERR_IDCT_IMR_0 BIT(31)
+#define B_BE_MLO_ERR_IDCT_IMR_1 BIT(30)
+#define B_BE_MLO_ERR_IDCT_IMR_2 BIT(29)
+#define B_BE_MLO_ERR_IDCT_IMR_3 BIT(28)
+#define B_BE_MLO_ERR_IDCT_IMR_CLR (B_BE_MLO_ERR_IDCT_IMR_2 | \
+ B_BE_MLO_ERR_IDCT_IMR_1 | \
+ B_BE_MLO_ERR_IDCT_IMR_0)
+#define B_BE_MLO_ERR_IDCT_IMR_SET (B_BE_MLO_ERR_IDCT_IMR_2 | \
+ B_BE_MLO_ERR_IDCT_IMR_1 | \
+ B_BE_MLO_ERR_IDCT_IMR_0)
+
+#define R_BE_MLO_ERR_IDCT_ISR 0xA12C
+#define B_BE_MLO_ISR_IDCT_0 BIT(31)
+#define B_BE_MLO_ISR_IDCT_1 BIT(30)
+#define B_BE_MLO_ISR_IDCT_2 BIT(29)
+#define B_BE_MLO_ISR_IDCT_3 BIT(28)
+
+#define R_BE_PLRLS_ERR_IMR 0xA218
+#define B_BE_PLRLS_CTL_FRZTO_IMR BIT(0)
+#define B_BE_PLRLS_ERR_IMR_CLR B_BE_PLRLS_CTL_FRZTO_IMR
+#define B_BE_PLRLS_ERR_IMR_SET B_BE_PLRLS_CTL_FRZTO_IMR
+
+#define R_BE_PLRLS_ERR_ISR 0xA21C
+#define B_BE_PLRLS_CTL_EVT03_ISR BIT(3)
+#define B_BE_PLRLS_CTL_EVT02_ISR BIT(2)
+#define B_BE_PLRLS_CTL_EVT01_ISR BIT(1)
+#define B_BE_PLRLS_CTL_FRZTO_ISR BIT(0)
+
+#define R_BE_SS_CTRL 0xA310
+#define B_BE_SS_INIT_DONE BIT(31)
+#define B_BE_WDE_STA_DIS BIT(30)
+#define B_BE_WARM_INIT BIT(29)
+#define B_BE_BAND_TRIG_EN BIT(28)
+#define B_BE_RMAC_REQ_DIS BIT(27)
+#define B_BE_DLYTX_SEL_MASK GENMASK(25, 24)
+#define B_BE_WMM3_SWITCH_MASK GENMASK(23, 22)
+#define B_BE_WMM2_SWITCH_MASK GENMASK(21, 20)
+#define B_BE_WMM1_SWITCH_MASK GENMASK(19, 18)
+#define B_BE_WMM0_SWITCH_MASK GENMASK(17, 16)
+#define B_BE_STA_OPTION_CR BIT(15)
+#define B_BE_EMLSR_STA_EMPTY_EN BIT(11)
+#define B_BE_MLO_HW_CHGLINK_EN BIT(10)
+#define B_BE_BAND1_TRIG_EN BIT(9)
+#define B_BE_RMAC1_REQ_DIS BIT(8)
+#define B_BE_MRT_SRAM_EN BIT(7)
+#define B_BE_MRT_INIT_EN BIT(6)
+#define B_BE_AVG_LENG_EN BIT(5)
+#define B_BE_AVG_INIT_EN BIT(4)
+#define B_BE_LENG_INIT_EN BIT(2)
+#define B_BE_PMPA_INIT_EN BIT(1)
+#define B_BE_SS_EN BIT(0)
+
+#define R_BE_INTERRUPT_MASK_REG 0xA3F0
+#define B_BE_PLE_B_PKTID_ERR_IMR BIT(2)
+#define B_BE_RPT_TIMEOUT_IMR BIT(1)
+#define B_BE_SEARCH_TIMEOUT_IMR BIT(0)
+#define B_BE_INTERRUPT_MASK_REG_CLR (B_BE_SEARCH_TIMEOUT_IMR | \
+ B_BE_RPT_TIMEOUT_IMR | \
+ B_BE_PLE_B_PKTID_ERR_IMR)
+#define B_BE_INTERRUPT_MASK_REG_SET (B_BE_SEARCH_TIMEOUT_IMR | \
+ B_BE_RPT_TIMEOUT_IMR | \
+ B_BE_PLE_B_PKTID_ERR_IMR)
+
+#define R_BE_INTERRUPT_STS_REG 0xA3F4
+#define B_BE_PLE_B_PKTID_ERR_ISR BIT(2)
+#define B_BE_RPT_TIMEOUT_ISR BIT(1)
+#define B_BE_SEARCH_TIMEOUT_ISR BIT(0)
+
+#define R_BE_HAXI_INIT_CFG1 0xB000
+#define B_BE_CFG_WD_PERIOD_IDLE_MASK GENMASK(31, 28)
+#define B_BE_CFG_WD_PERIOD_ACTIVE_MASK GENMASK(27, 24)
+#define B_BE_EN_RO_IDX_UPD_BY_IO BIT(19)
+#define B_BE_RST_KEEP_REG BIT(18)
+#define B_BE_FLUSH_HAXI_MST BIT(17)
+#define B_BE_SET_BDRAM_BOUND BIT(16)
+#define B_BE_ADDRINFO_ALIGN4B_EN BIT(15)
+#define B_BE_RXBD_DONE_MODE_MASK GENMASK(14, 13)
+#define B_BE_RXQ_RXBD_MODE_MASK GENMASK(12, 11)
+#define B_BE_DMA_MODE_MASK GENMASK(10, 8)
+#define S_BE_DMA_MOD_PCIE_NO_DATA_CPU 0x0
+#define S_BE_DMA_MOD_PCIE_DATA_CPU 0x1
+#define S_BE_DMA_MOD_USB 0x4
+#define S_BE_DMA_MOD_SDIO 0x6
+#define B_BE_STOP_AXI_MST BIT(7)
+#define B_BE_RXDMA_ALIGN64B_EN BIT(6)
+#define B_BE_RXDMA_EN BIT(5)
+#define B_BE_TXDMA_EN BIT(4)
+#define B_BE_MAX_RXDMA_MASK GENMASK(3, 2)
+#define B_BE_MAX_TXDMA_MASK GENMASK(1, 0)
+
+#define R_BE_HAXI_DMA_STOP1 0xB010
+#define B_BE_STOP_WPDMA BIT(31)
+#define B_BE_STOP_CH14 BIT(14)
+#define B_BE_STOP_CH13 BIT(13)
+#define B_BE_STOP_CH12 BIT(12)
+#define B_BE_STOP_CH11 BIT(11)
+#define B_BE_STOP_CH10 BIT(10)
+#define B_BE_STOP_CH9 BIT(9)
+#define B_BE_STOP_CH8 BIT(8)
+#define B_BE_STOP_CH7 BIT(7)
+#define B_BE_STOP_CH6 BIT(6)
+#define B_BE_STOP_CH5 BIT(5)
+#define B_BE_STOP_CH4 BIT(4)
+#define B_BE_STOP_CH3 BIT(3)
+#define B_BE_STOP_CH2 BIT(2)
+#define B_BE_STOP_CH1 BIT(1)
+#define B_BE_STOP_CH0 BIT(0)
+
+#define R_BE_HAXI_IDCT_MSK 0xB0B8
+#define B_BE_HAXI_RRESP_ERR_IDCT_MSK BIT(7)
+#define B_BE_HAXI_BRESP_ERR_IDCT_MSK BIT(6)
+#define B_BE_RXDMA_ERR_FLAG_IDCT_MSK BIT(5)
+#define B_BE_SET_FC_ERROR_FLAG_IDCT_MSK BIT(4)
+#define B_BE_TXBD_LEN0_ERR_IDCT_MSK BIT(3)
+#define B_BE_TXBD_4KBOUND_ERR_IDCT_MSK BIT(2)
+#define B_BE_RXMDA_STUCK_IDCT_MSK BIT(1)
+#define B_BE_TXMDA_STUCK_IDCT_MSK BIT(0)
+#define B_BE_HAXI_IDCT_MSK_CLR (B_BE_TXMDA_STUCK_IDCT_MSK | \
+ B_BE_RXMDA_STUCK_IDCT_MSK | \
+ B_BE_TXBD_LEN0_ERR_IDCT_MSK | \
+ B_BE_SET_FC_ERROR_FLAG_IDCT_MSK | \
+ B_BE_RXDMA_ERR_FLAG_IDCT_MSK | \
+ B_BE_HAXI_BRESP_ERR_IDCT_MSK | \
+ B_BE_HAXI_RRESP_ERR_IDCT_MSK)
+#define B_BE_HAXI_IDCT_MSK_SET (B_BE_TXMDA_STUCK_IDCT_MSK | \
+ B_BE_RXMDA_STUCK_IDCT_MSK | \
+ B_BE_TXBD_LEN0_ERR_IDCT_MSK | \
+ B_BE_SET_FC_ERROR_FLAG_IDCT_MSK | \
+ B_BE_RXDMA_ERR_FLAG_IDCT_MSK | \
+ B_BE_HAXI_BRESP_ERR_IDCT_MSK | \
+ B_BE_HAXI_RRESP_ERR_IDCT_MSK)
+
+#define R_BE_HAXI_IDCT 0xB0BC
+#define B_BE_HAXI_RRESP_ERR_IDCT BIT(7)
+#define B_BE_HAXI_BRESP_ERR_IDCT BIT(6)
+#define B_BE_RXDMA_ERR_FLAG_IDCT BIT(5)
+#define B_BE_SET_FC_ERROR_FLAG_IDCT BIT(4)
+#define B_BE__TXBD_LEN0_ERR_IDCT BIT(3)
+#define B_BE__TXBD_4KBOUND_ERR_IDCT BIT(2)
+#define B_BE_RXMDA_STUCK_IDCT BIT(1)
+#define B_BE_TXMDA_STUCK_IDCT BIT(0)
+
+#define R_BE_HCI_FC_CTRL 0xB700
+#define B_BE_WD_PAGE_MODE_MASK GENMASK(17, 16)
+#define B_BE_HCI_FC_CH14_FULL_COND_MASK GENMASK(15, 14)
+#define B_BE_HCI_FC_TWD_FULL_COND_MASK GENMASK(13, 12)
+#define B_BE_HCI_FC_CH12_FULL_COND_MASK GENMASK(11, 10)
+#define B_BE_HCI_FC_WP_CH811_FULL_COND_MASK GENMASK(9, 8)
+#define B_BE_HCI_FC_WP_CH07_FULL_COND_MASK GENMASK(7, 6)
+#define B_BE_HCI_FC_WD_FULL_COND_MASK GENMASK(5, 4)
+#define B_BE_HCI_FC_CH12_EN BIT(3)
+#define B_BE_HCI_FC_MODE_MASK GENMASK(2, 1)
+#define B_BE_HCI_FC_EN BIT(0)
+
+#define R_BE_CH_PAGE_CTRL 0xB704
+#define B_BE_PREC_PAGE_CH12_V1_MASK GENMASK(21, 16)
+#define B_BE_PREC_PAGE_CH011_V1_MASK GENMASK(5, 0)
+
+#define R_BE_PUB_PAGE_INFO3 0xB78C
+#define B_BE_G1_AVAL_PG_MASK GENMASK(28, 16)
+#define B_BE_G0_AVAL_PG_MASK GENMASK(12, 0)
+
+#define R_BE_PUB_PAGE_CTRL1 0xB790
+#define B_BE_PUBPG_G1_MASK GENMASK(28, 16)
+#define B_BE_PUBPG_G0_MASK GENMASK(12, 0)
+
+#define R_BE_PUB_PAGE_CTRL2 0xB794
+#define B_BE_PUBPG_ALL_MASK GENMASK(12, 0)
+
+#define R_BE_PUB_PAGE_INFO1 0xB79C
+#define B_BE_G1_USE_PG_MASK GENMASK(28, 16)
+#define B_BE_G0_USE_PG_MASK GENMASK(12, 0)
+
+#define R_BE_PUB_PAGE_INFO2 0xB7A0
+#define B_BE_PUB_AVAL_PG_MASK GENMASK(12, 0)
+
+#define R_BE_WP_PAGE_CTRL1 0xB7A4
+#define B_BE_PREC_PAGE_WP_CH811_MASK GENMASK(24, 16)
+#define B_BE_PREC_PAGE_WP_CH07_MASK GENMASK(8, 0)
+
+#define R_BE_WP_PAGE_CTRL2 0xB7A8
+#define B_BE_WP_THRD_MASK GENMASK(12, 0)
+
+#define R_BE_WP_PAGE_INFO1 0xB7AC
+#define B_BE_WP_AVAL_PG_MASK GENMASK(28, 16)
+
+#define R_BE_CMAC_SHARE_FUNC_EN 0x0E000
+#define B_BE_CMAC_SHARE_CRPRT BIT(31)
+#define B_BE_CMAC_SHARE_EN BIT(30)
+#define B_BE_FORCE_BTCOEX_REG_GCKEN BIT(24)
+#define B_BE_FORCE_CMAC_SHARE_COMMON_REG_GCKEN BIT(16)
+#define B_BE_FORCE_CMAC_SHARE_REG_GCKEN BIT(15)
+#define B_BE_RESPBA_EN BIT(2)
+#define B_BE_ADDRSRCH_EN BIT(1)
+#define B_BE_BTCOEX_EN BIT(0)
+
+#define R_BE_CMAC_SHARE_ACQCHK_CFG_0 0x0E010
+#define B_BE_ACQCHK_ERR_FLAG_MASK GENMASK(31, 24)
+#define B_BE_R_ACQCHK_ENTRY_IDX_SEL_MASK GENMASK(7, 4)
+#define B_BE_MACID_ACQ_GRP1_CLR_P BIT(3)
+#define B_BE_MACID_ACQ_GRP0_CLR_P BIT(2)
+#define B_BE_R_MACID_ACQ_CHK_EN BIT(0)
+
#define R_BE_CMAC_FUNC_EN 0x10000
#define R_BE_CMAC_FUNC_EN_C1 0x14000
#define B_BE_CMAC_CRPRT BIT(31)
@@ -3789,6 +5854,138 @@
B_BE_CMAC_CRPRT | B_BE_TXTIME_EN | B_BE_RESP_PKTCTL_EN | \
B_BE_SIGB_EN)
+#define R_BE_CK_EN 0x10004
+#define R_BE_CK_EN_C1 0x14004
+#define B_BE_CMAC_CKEN BIT(30)
+#define B_BE_BCN_P1_P4_CKEN BIT(15)
+#define B_BE_BCN_P0MB1_15_CKEN BIT(14)
+#define B_BE_TXTIME_CKEN BIT(8)
+#define B_BE_RESP_PKTCTL_CKEN BIT(7)
+#define B_BE_SIGB_CKEN BIT(6)
+#define B_BE_PHYINTF_CKEN BIT(5)
+#define B_BE_CMAC_DMA_CKEN BIT(4)
+#define B_BE_PTCLTOP_CKEN BIT(3)
+#define B_BE_SCHEDULER_CKEN BIT(2)
+#define B_BE_TMAC_CKEN BIT(1)
+#define B_BE_RMAC_CKEN BIT(0)
+#define B_BE_CK_EN_SET (B_BE_CMAC_CKEN | B_BE_PHYINTF_CKEN | B_BE_CMAC_DMA_CKEN | \
+ B_BE_PTCLTOP_CKEN | B_BE_SCHEDULER_CKEN | B_BE_TMAC_CKEN | \
+ B_BE_RMAC_CKEN | B_BE_TXTIME_CKEN | B_BE_RESP_PKTCTL_CKEN | \
+ B_BE_SIGB_CKEN)
+
+#define R_BE_TX_SUB_BAND_VALUE 0x10088
+#define R_BE_TX_SUB_BAND_VALUE_C1 0x14088
+#define B_BE_PRI20_BITMAP_MASK GENMASK(31, 16)
+#define BE_PRI20_BITMAP_MAX 15
+#define B_BE_TXSB_160M_MASK GENMASK(15, 12)
+#define S_BE_TXSB_160M_0 0
+#define S_BE_TXSB_160M_1 1
+#define B_BE_TXSB_80M_MASK GENMASK(11, 8)
+#define S_BE_TXSB_80M_0 0
+#define S_BE_TXSB_80M_2 2
+#define S_BE_TXSB_80M_4 4
+#define B_BE_TXSB_40M_MASK GENMASK(7, 4)
+#define S_BE_TXSB_40M_0 0
+#define S_BE_TXSB_40M_1 1
+#define S_BE_TXSB_40M_4 4
+#define B_BE_TXSB_20M_MASK GENMASK(3, 0)
+#define S_BE_TXSB_20M_8 8
+#define S_BE_TXSB_20M_4 4
+#define S_BE_TXSB_20M_2 2
+
+#define R_BE_PTCL_RRSR0 0x1008C
+#define R_BE_PTCL_RRSR0_C1 0x1408C
+#define B_BE_RRSR_HE_MASK GENMASK(31, 24)
+#define B_BE_RRSR_VHT_MASK GENMASK(23, 16)
+#define B_BE_RRSR_HT_MASK GENMASK(15, 8)
+#define B_BE_RRSR_OFDM_MASK GENMASK(7, 0)
+
+#define R_BE_PTCL_RRSR1 0x10090
+#define R_BE_PTCL_RRSR1_C1 0x14090
+#define B_BE_RRSR_EHT_MASK GENMASK(23, 16)
+#define B_BE_RRSR_RATE_EN_MASK GENMASK(12, 8)
+#define B_BE_RSC_MASK GENMASK(7, 6)
+#define B_BE_RRSR_CCK_MASK GENMASK(3, 0)
+
+#define R_BE_CMAC_ERR_IMR 0x10160
+#define R_BE_CMAC_ERR_IMR_C1 0x14160
+#define B_BE_CMAC_FW_ERR_IDCT_EN BIT(16)
+#define B_BE_PTCL_TX_IDLETO_IDCT_EN BIT(9)
+#define B_BE_WMAC_RX_IDLETO_IDCT_EN BIT(8)
+#define B_BE_WMAC_TX_ERR_IND_EN BIT(7)
+#define B_BE_WMAC_RX_ERR_IND_EN BIT(6)
+#define B_BE_TXPWR_CTRL_ERR_IND_EN BIT(5)
+#define B_BE_PHYINTF_ERR_IND_EN BIT(4)
+#define B_BE_DMA_TOP_ERR_IND_EN BIT(3)
+#define B_BE_RESP_PKTCTL_ERR_IND_EN BIT(2)
+#define B_BE_PTCL_TOP_ERR_IND_EN BIT(1)
+#define B_BE_SCHEDULE_TOP_ERR_IND_EN BIT(0)
+
+#define R_BE_CMAC_ERR_ISR 0x10164
+#define R_BE_CMAC_ERR_ISR_C1 0x14164
+#define B_BE_CMAC_FW_ERR_IDCT BIT(16)
+#define B_BE_PTCL_TX_IDLETO_IDCT BIT(9)
+#define B_BE_WMAC_RX_IDLETO_IDCT BIT(8)
+#define B_BE_WMAC_TX_ERR_IND BIT(7)
+#define B_BE_WMAC_RX_ERR_IND BIT(6)
+#define B_BE_TXPWR_CTRL_ERR_IND BIT(5)
+#define B_BE_PHYINTF_ERR_IND BIT(4)
+#define B_BE_DMA_TOP_ERR_IND BIT(3)
+#define B_BE_RESP_PKTCTL_ERR_IDCT BIT(2)
+#define B_BE_PTCL_TOP_ERR_IND BIT(1)
+#define B_BE_SCHEDULE_TOP_ERR_IND BIT(0)
+
+#define R_BE_SER_L0_DBG_CNT 0x10170
+#define R_BE_SER_L0_DBG_CNT_C1 0x14170
+#define B_BE_SER_L0_PHYINTF_CNT_MASK GENMASK(31, 24)
+#define B_BE_SER_L0_DMA_CNT_MASK GENMASK(23, 16)
+#define B_BE_SER_L0_PTCL_CNT_MASK GENMASK(15, 8)
+#define B_BE_SER_L0_SCH_CNT_MASK GENMASK(7, 0)
+
+#define R_BE_SER_L0_DBG_CNT1 0x10174
+#define R_BE_SER_L0_DBG_CNT1_C1 0x14174
+#define B_BE_SER_L0_TMAC_COUNTER_MASK GENMASK(23, 16)
+#define B_BE_SER_L0_RMAC_COUNTER_MASK GENMASK(15, 8)
+#define B_BE_SER_L0_TXPWR_COUNTER_MASK GENMASK(7, 0)
+
+#define R_BE_SER_L0_DBG_CNT2 0x10178
+#define R_BE_SER_L0_DBG_CNT2_C1 0x14178
+
+#define R_BE_SER_L0_DBG_CNT3 0x1017C
+#define R_BE_SER_L0_DBG_CNT3_C1 0x1417C
+#define B_BE_SER_L0_SUBMODULE_BIT31_CNT BIT(31)
+#define B_BE_SER_L0_SUBMODULE_BIT30_CNT BIT(30)
+#define B_BE_SER_L0_SUBMODULE_BIT29_CNT BIT(29)
+#define B_BE_SER_L0_SUBMODULE_BIT28_CNT BIT(28)
+#define B_BE_SER_L0_SUBMODULE_BIT27_CNT BIT(27)
+#define B_BE_SER_L0_SUBMODULE_BIT26_CNT BIT(26)
+#define B_BE_SER_L0_SUBMODULE_BIT25_CNT BIT(25)
+#define B_BE_SER_L0_SUBMODULE_BIT24_CNT BIT(24)
+#define B_BE_SER_L0_SUBMODULE_BIT23_CNT BIT(23)
+#define B_BE_SER_L0_SUBMODULE_BIT22_CNT BIT(22)
+#define B_BE_SER_L0_SUBMODULE_BIT21_CNT BIT(21)
+#define B_BE_SER_L0_SUBMODULE_BIT20_CNT BIT(20)
+#define B_BE_SER_L0_SUBMODULE_BIT19_CNT BIT(19)
+#define B_BE_SER_L0_SUBMODULE_BIT18_CNT BIT(18)
+#define B_BE_SER_L0_SUBMODULE_BIT17_CNT BIT(17)
+#define B_BE_SER_L0_SUBMODULE_BIT16_CNT BIT(16)
+#define B_BE_SER_L0_SUBMODULE_BIT15_CNT BIT(15)
+#define B_BE_SER_L0_SUBMODULE_BIT14_CNT BIT(14)
+#define B_BE_SER_L0_SUBMODULE_BIT13_CNT BIT(13)
+#define B_BE_SER_L0_SUBMODULE_BIT12_CNT BIT(12)
+#define B_BE_SER_L0_SUBMODULE_BIT11_CNT BIT(11)
+#define B_BE_SER_L0_SUBMODULE_BIT10_CNT BIT(10)
+#define B_BE_SER_L0_SUBMODULE_BIT9_CNT BIT(9)
+#define B_BE_SER_L0_SUBMODULE_BIT8_CNT BIT(8)
+#define B_BE_SER_L0_SUBMODULE_BIT7_CNT BIT(7)
+#define B_BE_SER_L0_SUBMODULE_BIT6_CNT BIT(6)
+#define B_BE_SER_L0_SUBMODULE_BIT5_CNT BIT(5)
+#define B_BE_SER_L0_SUBMODULE_BIT4_CNT BIT(4)
+#define B_BE_SER_L0_SUBMODULE_BIT3_CNT BIT(3)
+#define B_BE_SER_L0_SUBMODULE_BIT2_CNT BIT(2)
+#define B_BE_SER_L0_SUBMODULE_BIT1_CNT BIT(1)
+#define B_BE_SER_L0_SUBMODULE_BIT0_CNT BIT(0)
+
#define R_BE_PORT_0_TSF_SYNC 0x102A0
#define R_BE_PORT_0_TSF_SYNC_C1 0x142A0
#define B_BE_P0_SYNC_NOW_P BIT(30)
@@ -3797,6 +5994,55 @@
#define B_BE_P0_SYNC_PORT_SRC_SEL_MASK GENMASK(26, 24)
#define B_BE_P0_TSFTR_SYNC_OFFSET_MASK GENMASK(18, 0)
+#define R_BE_EDCA_BCNQ_PARAM 0x10324
+#define R_BE_EDCA_BCNQ_PARAM_C1 0x14324
+#define B_BE_BCNQ_CW_MASK GENMASK(31, 24)
+#define B_BE_BCNQ_AIFS_MASK GENMASK(23, 16)
+#define BCN_IFS_25US 0x19
+#define B_BE_PIFS_MASK GENMASK(15, 8)
+#define B_BE_FORCE_BCN_IFS_MASK GENMASK(7, 0)
+
+#define R_BE_PREBKF_CFG_0 0x10338
+#define R_BE_PREBKF_CFG_0_C1 0x14338
+#define B_BE_100NS_TIME_MASK GENMASK(28, 24)
+#define B_BE_RX_AIR_END_TIME_MASK GENMASK(22, 16)
+#define B_BE_MACTX_LATENCY_MASK GENMASK(10, 8)
+#define B_BE_PREBKF_TIME_MASK GENMASK(4, 0)
+
+#define R_BE_CCA_CFG_0 0x10340
+#define R_BE_CCA_CFG_0_C1 0x14340
+#define B_BE_R_SIFS_AGGR_TIME_V1_MASK GENMASK(31, 24)
+#define B_BE_EDCCA_SEC160_EN BIT(23)
+#define B_BE_EDCCA_SEC80_EN BIT(22)
+#define B_BE_EDCCA_SEC40_EN BIT(21)
+#define B_BE_EDCCA_SEC20_EN BIT(20)
+#define B_BE_SEC160_EN BIT(19)
+#define B_BE_CCA_BITMAP_EN BIT(18)
+#define B_BE_TXPKTCTL_RST_EDCA_EN BIT(17)
+#define B_BE_WMAC_RST_EDCA_EN BIT(16)
+#define B_BE_TXFAIL_BRK_TXOP_EN BIT(11)
+#define B_BE_EDCCA_PER20_BITMAP_SIFS_EN BIT(10)
+#define B_BE_NO_GNT_WL_BRK_TXOP_EN BIT(9)
+#define B_BE_NAV_BRK_TXOP_EN BIT(8)
+#define B_BE_TX_NAV_EN BIT(7)
+#define B_BE_BCN_IGNORE_EDCCA BIT(6)
+#define B_BE_NO_GNT_WL_EN BIT(5)
+#define B_BE_EDCCA_EN BIT(4)
+#define B_BE_SEC80_EN BIT(3)
+#define B_BE_SEC40_EN BIT(2)
+#define B_BE_SEC20_EN BIT(1)
+#define B_BE_CCA_EN BIT(0)
+
+#define R_BE_CTN_CFG_0 0x1034C
+#define R_BE_CTN_CFG_0_C1 0x1434C
+#define B_BE_OTHER_LINK_BKF_BLK_TX_THD_MASK GENMASK(30, 24)
+#define B_BE_CCK_SIFS_COMP_MASK GENMASK(22, 16)
+#define B_BE_PIFS_TIMEUNIT_MASK GENMASK(15, 14)
+#define B_BE_PREBKF_TIME_NONAC_MASK GENMASK(12, 8)
+#define B_BE_SR_TX_EN BIT(2)
+#define B_BE_NAV_BLK_MGQ BIT(1)
+#define B_BE_NAV_BLK_HGQ BIT(0)
+
#define R_BE_MUEDCA_BE_PARAM_0 0x10350
#define R_BE_MUEDCA_BK_PARAM_0 0x10354
#define R_BE_MUEDCA_VI_PARAM_0 0x10358
@@ -3809,6 +6055,74 @@
#define B_BE_SET_MUEDCATIMER_TF_0 BIT(4)
#define B_BE_MUEDCA_EN_0 BIT(0)
+#define R_BE_TB_CHK_CCA_NAV 0x103AC
+#define R_BE_TB_CHK_CCA_NAV_C1 0x143AC
+#define B_BE_TB_CHK_TX_NAV BIT(15)
+#define B_BE_TB_CHK_INTRA_NAV BIT(14)
+#define B_BE_TB_CHK_BASIC_NAV BIT(13)
+#define B_BE_TB_CHK_NO_GNT_WL BIT(12)
+#define B_BE_TB_CHK_EDCCA_S160 BIT(11)
+#define B_BE_TB_CHK_EDCCA_S80 BIT(10)
+#define B_BE_TB_CHK_EDCCA_S40 BIT(9)
+#define B_BE_TB_CHK_EDCCA_S20 BIT(8)
+#define B_BE_TB_CHK_CCA_S160 BIT(7)
+#define B_BE_TB_CHK_CCA_S80 BIT(6)
+#define B_BE_TB_CHK_CCA_S40 BIT(5)
+#define B_BE_TB_CHK_CCA_S20 BIT(4)
+#define B_BE_TB_CHK_EDCCA_BITMAP BIT(3)
+#define B_BE_TB_CHK_CCA_BITMAP BIT(2)
+#define B_BE_TB_CHK_EDCCA_P20 BIT(1)
+#define B_BE_TB_CHK_CCA_P20 BIT(0)
+
+#define R_BE_HE_SIFS_CHK_CCA_NAV 0x103B4
+#define R_BE_HE_SIFS_CHK_CCA_NAV_C1 0x143B4
+#define B_BE_HE_SIFS_CHK_TX_NAV BIT(15)
+#define B_BE_HE_SIFS_CHK_INTRA_NAV BIT(14)
+#define B_BE_HE_SIFS_CHK_BASIC_NAV BIT(13)
+#define B_BE_HE_SIFS_CHK_NO_GNT_WL BIT(12)
+#define B_BE_HE_SIFS_CHK_EDCCA_S160 BIT(11)
+#define B_BE_HE_SIFS_CHK_EDCCA_S80 BIT(10)
+#define B_BE_HE_SIFS_CHK_EDCCA_S40 BIT(9)
+#define B_BE_HE_SIFS_CHK_EDCCA_S20 BIT(8)
+#define B_BE_HE_SIFS_CHK_CCA_S160 BIT(7)
+#define B_BE_HE_SIFS_CHK_CCA_S80 BIT(6)
+#define B_BE_HE_SIFS_CHK_CCA_S40 BIT(5)
+#define B_BE_HE_SIFS_CHK_CCA_S20 BIT(4)
+#define B_BE_HE_SIFS_CHK_EDCCA_BITMAP BIT(3)
+#define B_BE_HE_SIFS_CHK_CCA_BITMAP BIT(2)
+#define B_BE_HE_SIFS_CHK_EDCCA_P20 BIT(1)
+#define B_BE_HE_SIFS_CHK_CCA_P20 BIT(0)
+
+#define R_BE_HE_CTN_CHK_CCA_NAV 0x103C4
+#define R_BE_HE_CTN_CHK_CCA_NAV_C1 0x143C4
+#define B_BE_HE_CTN_CHK_TX_NAV BIT(15)
+#define B_BE_HE_CTN_CHK_INTRA_NAV BIT(14)
+#define B_BE_HE_CTN_CHK_BASIC_NAV BIT(13)
+#define B_BE_HE_CTN_CHK_NO_GNT_WL BIT(12)
+#define B_BE_HE_CTN_CHK_EDCCA_S160 BIT(11)
+#define B_BE_HE_CTN_CHK_EDCCA_S80 BIT(10)
+#define B_BE_HE_CTN_CHK_EDCCA_S40 BIT(9)
+#define B_BE_HE_CTN_CHK_EDCCA_S20 BIT(8)
+#define B_BE_HE_CTN_CHK_CCA_S160 BIT(7)
+#define B_BE_HE_CTN_CHK_CCA_S80 BIT(6)
+#define B_BE_HE_CTN_CHK_CCA_S40 BIT(5)
+#define B_BE_HE_CTN_CHK_CCA_S20 BIT(4)
+#define B_BE_HE_CTN_CHK_EDCCA_BITMAP BIT(3)
+#define B_BE_HE_CTN_CHK_CCA_BITMAP BIT(2)
+#define B_BE_HE_CTN_CHK_EDCCA_P20 BIT(1)
+#define B_BE_HE_CTN_CHK_CCA_P20 BIT(0)
+
+#define R_BE_SCHEDULE_ERR_IMR 0x103E8
+#define R_BE_SCHEDULE_ERR_IMR_C1 0x143E8
+#define B_BE_FSM_TIMEOUT_ERR_INT_EN BIT(0)
+#define B_BE_SCHEDULE_ERR_IMR_CLR B_BE_FSM_TIMEOUT_ERR_INT_EN
+#define B_BE_SCHEDULE_ERR_IMR_SET B_BE_FSM_TIMEOUT_ERR_INT_EN
+
+#define R_BE_SCHEDULE_ERR_ISR 0x103EC
+#define R_BE_SCHEDULE_ERR_ISR_C1 0x143EC
+#define B_BE_SORT_NON_IDLE_ERR_INT BIT(1)
+#define B_BE_FSM_TIMEOUT_ERR_INT BIT(0)
+
#define R_BE_PORT_CFG_P0 0x10400
#define R_BE_PORT_CFG_P0_C1 0x14400
#define B_BE_BCN_ERLY_SORT_EN_P0 BIT(18)
@@ -3923,12 +6237,51 @@
#define R_BE_PORT_HGQ_WINDOW_CFG 0x105A0
#define R_BE_PORT_HGQ_WINDOW_CFG_C1 0x145A0
+#define R_BE_PTCL_COMMON_SETTING_0 0x10800
+#define R_BE_PTCL_COMMON_SETTING_0_C1 0x14800
+#define B_BE_PCIE_MODE_MASK GENMASK(15, 14)
+#define B_BE_CPUMGQ_LIFETIME_EN BIT(8)
+#define B_BE_MGQ_LIFETIME_EN BIT(7)
+#define B_BE_LIFETIME_EN BIT(6)
+#define B_BE_DIS_PTCL_CLK_GATING BIT(5)
+#define B_BE_PTCL_TRIGGER_SS_EN_UL BIT(4)
+#define B_BE_PTCL_TRIGGER_SS_EN_1 BIT(3)
+#define B_BE_PTCL_TRIGGER_SS_EN_0 BIT(2)
+#define B_BE_CMAC_TX_MODE_1 BIT(1)
+#define B_BE_CMAC_TX_MODE_0 BIT(0)
+
+#define R_BE_TB_PPDU_CTRL 0x1080C
+#define R_BE_TB_PPDU_CTRL_C1 0x1480C
+#define B_BE_TB_PPDU_BK_DIS BIT(15)
+#define B_BE_TB_PPDU_BE_DIS BIT(14)
+#define B_BE_TB_PPDU_VI_DIS BIT(13)
+#define B_BE_TB_PPDU_VO_DIS BIT(12)
+#define B_BE_QOSNULL_UPD_MUEDCA_EN BIT(3)
+#define B_BE_TB_BYPASS_TXPWR BIT(2)
+#define B_BE_SW_PREFER_AC_MASK GENMASK(1, 0)
+
+#define R_BE_AMPDU_AGG_LIMIT 0x10810
+#define R_BE_AMPDU_AGG_LIMIT_C1 0x14810
+#define B_BE_AMPDU_MAX_TIME_MASK GENMASK(31, 24)
+#define AMPDU_MAX_TIME 0x9E
+#define B_BE_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16)
+#define B_BE_RTS_MAX_AGG_NUM_MASK GENMASK(15, 8)
+#define B_BE_MAX_AGG_NUM_MASK GENMASK(7, 0)
+
#define R_BE_AGG_LEN_HT_0 0x10814
#define R_BE_AGG_LEN_HT_0_C1 0x14814
#define B_BE_AMPDU_MAX_LEN_HT_MASK GENMASK(31, 16)
#define B_BE_RTS_TXTIME_TH_MASK GENMASK(15, 8)
#define B_BE_RTS_LEN_TH_MASK GENMASK(7, 0)
+#define R_BE_SIFS_SETTING 0x10824
+#define R_BE_SIFS_SETTING_C1 0x14824
+#define B_BE_HW_CTS2SELF_PKT_LEN_TH_MASK GENMASK(31, 24)
+#define B_BE_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK GENMASK(23, 18)
+#define B_BE_HW_CTS2SELF_EN BIT(16)
+#define B_BE_SPEC_SIFS_OFDM_PTCL_MASK GENMASK(15, 8)
+#define B_BE_SPEC_SIFS_CCK_PTCL_MASK GENMASK(7, 0)
+
#define R_BE_MBSSID_DROP_0 0x1083C
#define R_BE_MBSSID_DROP_0_C1 0x1483C
#define B_BE_GI_LTF_FB_SEL BIT(30)
@@ -3947,6 +6300,375 @@
#define R_BE_PTCL_BSS_COLOR_1_C1 0x148A4
#define B_BE_BSS_COLOB_BE_PORT_4_MASK GENMASK(5, 0)
+#define R_BE_PTCL_IMR_2 0x108B8
+#define R_BE_PTCL_IMR_2_C1 0x148B8
+#define B_BE_NO_TRX_TIMEOUT_IMR BIT(1)
+#define B_BE_TX_IDLE_TIMEOUT_IMR BIT(0)
+#define B_BE_PTCL_IMR_2_CLR B_BE_TX_IDLE_TIMEOUT_IMR
+#define B_BE_PTCL_IMR_2_SET 0
+
+#define R_BE_PTCL_IMR0 0x108C0
+#define R_BE_PTCL_IMR0_C1 0x148C0
+#define B_BE_PTCL_ERROR_FLAG_IMR BIT(31)
+#define B_BE_FSM1_TIMEOUT_ERR_INT_EN BIT(1)
+#define B_BE_FSM_TIMEOUT_ERR_INT_EN BIT(0)
+#define B_BE_PTCL_IMR0_CLR (B_BE_FSM_TIMEOUT_ERR_INT_EN | \
+ B_BE_FSM1_TIMEOUT_ERR_INT_EN | \
+ B_BE_PTCL_ERROR_FLAG_IMR)
+#define B_BE_PTCL_IMR0_SET (B_BE_FSM_TIMEOUT_ERR_INT_EN | \
+ B_BE_FSM1_TIMEOUT_ERR_INT_EN | \
+ B_BE_PTCL_ERROR_FLAG_IMR)
+
+#define R_BE_PTCL_ISR0 0x108C4
+#define R_BE_PTCL_ISR0_C1 0x148C4
+#define B_BE_PTCL_ERROR_FLAG_ISR BIT(31)
+#define B_BE_FSM1_TIMEOUT_ERR BIT(1)
+#define B_BE_FSM_TIMEOUT_ERR BIT(0)
+
+#define R_BE_PTCL_IMR1 0x108C8
+#define R_BE_PTCL_IMR1_C1 0x148C8
+#define B_BE_F2PCMD_PKTID_IMR BIT(30)
+#define B_BE_F2PCMD_RD_PKTID_IMR BIT(29)
+#define B_BE_F2PCMD_ASSIGN_PKTID_IMR BIT(28)
+#define B_BE_F2PCMD_USER_ALLC_IMR BIT(27)
+#define B_BE_RX_SPF_U0_PKTID_IMR BIT(26)
+#define B_BE_TX_SPF_U1_PKTID_IMR BIT(25)
+#define B_BE_TX_SPF_U2_PKTID_IMR BIT(24)
+#define B_BE_TX_SPF_U3_PKTID_IMR BIT(23)
+#define B_BE_TX_RECORD_PKTID_IMR BIT(22)
+#define B_BE_TWTSP_QSEL_IMR BIT(14)
+#define B_BE_F2P_RLS_CTN_SEL_IMR BIT(13)
+#define B_BE_BCNQ_ORDER_IMR BIT(12)
+#define B_BE_Q_PKTID_IMR BIT(11)
+#define B_BE_D_PKTID_IMR BIT(10)
+#define B_BE_TXPRT_FULL_DROP_IMR BIT(9)
+#define B_BE_F2PCMDRPT_FULL_DROP_IMR BIT(8)
+#define B_BE_PTCL_IMR1_CLR (B_BE_F2PCMDRPT_FULL_DROP_IMR | \
+ B_BE_TXPRT_FULL_DROP_IMR | \
+ B_BE_D_PKTID_IMR | \
+ B_BE_Q_PKTID_IMR | \
+ B_BE_BCNQ_ORDER_IMR | \
+ B_BE_F2P_RLS_CTN_SEL_IMR | \
+ B_BE_TWTSP_QSEL_IMR | \
+ B_BE_TX_RECORD_PKTID_IMR | \
+ B_BE_TX_SPF_U3_PKTID_IMR | \
+ B_BE_TX_SPF_U2_PKTID_IMR | \
+ B_BE_TX_SPF_U1_PKTID_IMR | \
+ B_BE_RX_SPF_U0_PKTID_IMR | \
+ B_BE_F2PCMD_USER_ALLC_IMR | \
+ B_BE_F2PCMD_ASSIGN_PKTID_IMR | \
+ B_BE_F2PCMD_RD_PKTID_IMR | \
+ B_BE_F2PCMD_PKTID_IMR)
+#define B_BE_PTCL_IMR1_SET B_BE_F2PCMD_USER_ALLC_IMR
+
+#define R_BE_PTCL_ISR1 0x108CC
+#define R_BE_PTCL_ISR1_C1 0x148CC
+#define B_BE_F2PCMD_PKTID_ERR BIT(30)
+#define B_BE_F2PCMD_RD_PKTID_ERR BIT(29)
+#define B_BE_F2PCMD_ASSIGN_PKTID_ERR BIT(28)
+#define B_BE_F2PCMD_USER_ALLC_ERR BIT(27)
+#define B_BE_RX_SPF_U0_PKTID_ERR BIT(26)
+#define B_BE_TX_SPF_U1_PKTID_ERR BIT(25)
+#define B_BE_TX_SPF_U2_PKTID_ERR BIT(24)
+#define B_BE_TX_SPF_U3_PKTID_ERR BIT(23)
+#define B_BE_TX_RECORD_PKTID_ERR BIT(22)
+#define B_BE_TWTSP_QSEL_ERR BIT(14)
+#define B_BE_F2P_RLS_CTN_SEL_ERR BIT(13)
+#define B_BE_BCNQ_ORDER_ERR BIT(12)
+#define B_BE_Q_PKTID_ERR BIT(11)
+#define B_BE_D_PKTID_ERR BIT(10)
+#define B_BE_TXPRT_FULL_DROP_ERR BIT(9)
+#define B_BE_F2PCMDRPT_FULL_DROP_ERR BIT(8)
+
+#define R_BE_PTCL_FSM_MON 0x108E8
+#define R_BE_PTCL_FSM_MON_C1 0x148E8
+#define B_BE_PTCL_FSM2_TO_MODE BIT(30)
+#define B_BE_PTCL_FSM2_TO_THR_MASK GENMASK(29, 24)
+#define B_BE_PTCL_FSM1_TO_MODE BIT(22)
+#define B_BE_PTCL_FSM1_TO_THR_MASK GENMASK(21, 16)
+#define B_BE_PTCL_FSM0_TO_MODE BIT(14)
+#define B_BE_PTCL_FSM0_TO_THR_MASK GENMASK(13, 8)
+#define B_BE_PTCL_TX_ARB_TO_MODE BIT(6)
+#define B_BE_PTCL_TX_ARB_TO_THR_MASK GENMASK(5, 0)
+
+#define R_BE_PTCL_TX_CTN_SEL 0x108EC
+#define R_BE_PTCL_TX_CTN_SEL_C1 0x148EC
+#define B_BE_PTCL_TXOP_STAT BIT(8)
+#define B_BE_PTCL_BUSY BIT(7)
+#define B_BE_PTCL_DROP BIT(5)
+#define B_BE_PTCL_TX_QUEUE_IDX_MASK GENMASK(4, 0)
+
+#define R_BE_RX_ERROR_FLAG 0x10C00
+#define R_BE_RX_ERROR_FLAG_C1 0x14C00
+#define B_BE_RX_CSI_NOT_RELEASE_ERROR BIT(31)
+#define B_BE_RX_GET_NULL_PKT_ERROR BIT(30)
+#define B_BE_RX_RU0_FSM_HANG_ERROR BIT(29)
+#define B_BE_RX_RU1_FSM_HANG_ERROR BIT(28)
+#define B_BE_RX_RU2_FSM_HANG_ERROR BIT(27)
+#define B_BE_RX_RU3_FSM_HANG_ERROR BIT(26)
+#define B_BE_RX_RU4_FSM_HANG_ERROR BIT(25)
+#define B_BE_RX_RU5_FSM_HANG_ERROR BIT(24)
+#define B_BE_RX_RU6_FSM_HANG_ERROR BIT(23)
+#define B_BE_RX_RU7_FSM_HANG_ERROR BIT(22)
+#define B_BE_RX_RXSTS_FSM_HANG_ERROR BIT(21)
+#define B_BE_RX_CSI_FSM_HANG_ERROR BIT(20)
+#define B_BE_RX_TXRPT_FSM_HANG_ERROR BIT(19)
+#define B_BE_RX_F2PCMD_FSM_HANG_ERROR BIT(18)
+#define B_BE_RX_RU0_ZERO_LENGTH_ERROR BIT(17)
+#define B_BE_RX_RU1_ZERO_LENGTH_ERROR BIT(16)
+#define B_BE_RX_RU2_ZERO_LENGTH_ERROR BIT(15)
+#define B_BE_RX_RU3_ZERO_LENGTH_ERROR BIT(14)
+#define B_BE_RX_RU4_ZERO_LENGTH_ERROR BIT(13)
+#define B_BE_RX_RU5_ZERO_LENGTH_ERROR BIT(12)
+#define B_BE_RX_RU6_ZERO_LENGTH_ERROR BIT(11)
+#define B_BE_RX_RU7_ZERO_LENGTH_ERROR BIT(10)
+#define B_BE_RX_RXSTS_ZERO_LENGTH_ERROR BIT(9)
+#define B_BE_RX_CSI_ZERO_LENGTH_ERROR BIT(8)
+#define B_BE_PLE_DATA_OPT_FSM_HANG BIT(7)
+#define B_BE_PLE_RXDATA_REQUEST_BUFFER_FSM_HANG BIT(6)
+#define B_BE_PLE_TXRPT_REQUEST_BUFFER_FSM_HANG BIT(5)
+#define B_BE_PLE_WD_OPT_FSM_HANG BIT(4)
+#define B_BE_PLE_ENQ_FSM_HANG BIT(3)
+#define B_BE_RXDATA_ENQUE_ORDER_ERROR BIT(2)
+#define B_BE_RXSTS_ENQUE_ORDER_ERROR BIT(1)
+#define B_BE_RX_CSI_PKT_NUM_ERROR BIT(0)
+
+#define R_BE_RX_ERROR_FLAG_IMR 0x10C04
+#define R_BE_RX_ERROR_FLAG_IMR_C1 0x14C04
+#define B_BE_RX_CSI_NOT_RELEASE_ERROR_IMR BIT(31)
+#define B_BE_RX_GET_NULL_PKT_ERROR_IMR BIT(30)
+#define B_BE_RX_RU0_FSM_HANG_ERROR_IMR BIT(29)
+#define B_BE_RX_RU1_FSM_HANG_ERROR_IMR BIT(28)
+#define B_BE_RX_RU2_FSM_HANG_ERROR_IMR BIT(27)
+#define B_BE_RX_RU3_FSM_HANG_ERROR_IMR BIT(26)
+#define B_BE_RX_RU4_FSM_HANG_ERROR_IMR BIT(25)
+#define B_BE_RX_RU5_FSM_HANG_ERROR_IMR BIT(24)
+#define B_BE_RX_RU6_FSM_HANG_ERROR_IMR BIT(23)
+#define B_BE_RX_RU7_FSM_HANG_ERROR_IMR BIT(22)
+#define B_BE_RX_RXSTS_FSM_HANG_ERROR_IMR BIT(21)
+#define B_BE_RX_CSI_FSM_HANG_ERROR_IMR BIT(20)
+#define B_BE_RX_TXRPT_FSM_HANG_ERROR_IMR BIT(19)
+#define B_BE_RX_F2PCMD_FSM_HANG_ERROR_IMR BIT(18)
+#define B_BE_RX_RU0_ZERO_LENGTH_ERROR_IMR BIT(17)
+#define B_BE_RX_RU1_ZERO_LENGTH_ERROR_IMR BIT(16)
+#define B_BE_RX_RU2_ZERO_LENGTH_ERROR_IMR BIT(15)
+#define B_BE_RX_RU3_ZERO_LENGTH_ERROR_IMR BIT(14)
+#define B_BE_RX_RU4_ZERO_LENGTH_ERROR_IMR BIT(13)
+#define B_BE_RX_RU5_ZERO_LENGTH_ERROR_IMR BIT(12)
+#define B_BE_RX_RU6_ZERO_LENGTH_ERROR_IMR BIT(11)
+#define B_BE_RX_RU7_ZERO_LENGTH_ERROR_IMR BIT(10)
+#define B_BE_RX_RXSTS_ZERO_LENGTH_ERROR_IMR BIT(9)
+#define B_BE_RX_CSI_ZERO_LENGTH_ERROR_IMR BIT(8)
+#define B_BE_PLE_DATA_OPT_FSM_HANG_IMR BIT(7)
+#define B_BE_PLE_RXDATA_REQUEST_BUFFER_FSM_HANG_IMR BIT(6)
+#define B_BE_PLE_TXRPT_REQUEST_BUFFER_FSM_HANG_IMR BIT(5)
+#define B_BE_PLE_WD_OPT_FSM_HANG_IMR BIT(4)
+#define B_BE_PLE_ENQ_FSM_HANG_IMR BIT(3)
+#define B_BE_RXDATA_ENQUE_ORDER_ERROR_IMR BIT(2)
+#define B_BE_RXSTS_ENQUE_ORDER_ERROR_IMR BIT(1)
+#define B_BE_RX_CSI_PKT_NUM_ERROR_IMR BIT(0)
+#define B_BE_RX_ERROR_FLAG_IMR_CLR (B_BE_RX_RXSTS_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU7_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU6_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU5_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU4_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU3_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU2_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU1_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU0_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_F2PCMD_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_TXRPT_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_CSI_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RXSTS_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU7_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU6_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU5_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU4_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU3_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU2_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU1_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU0_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_GET_NULL_PKT_ERROR_IMR)
+#define B_BE_RX_ERROR_FLAG_IMR_SET (B_BE_RX_RXSTS_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU7_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU6_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU5_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU4_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU3_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU2_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU1_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU0_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_F2PCMD_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_TXRPT_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_CSI_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RXSTS_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU7_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU6_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU5_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU4_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU3_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU2_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU1_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU0_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_GET_NULL_PKT_ERROR_IMR)
+
+#define R_BE_RX_CTRL_1 0x10C0C
+#define R_BE_RX_CTRL_1_C1 0x14C0C
+#define B_BE_RXDMA_TXRPT_QUEUE_ID_SW_MASK GENMASK(30, 25)
+#define B_BE_RXDMA_F2PCMDRPT_QUEUE_ID_SW_MASK GENMASK(23, 18)
+#define B_BE_RXDMA_TXRPT_PORT_ID_SW_MASK GENMASK(17, 14)
+#define B_BE_RXDMA_F2PCMDRPT_PORT_ID_SW_MASK GENMASK(13, 10)
+#define B_BE_DBG_SEL_MASK GENMASK(1, 0)
+#define WLCPU_RXCH2_QID 0xA
+
+#define R_BE_TX_ERROR_FLAG 0x10C6C
+#define R_BE_TX_ERROR_FLAG_C1 0x14C6C
+#define B_BE_TX_RU0_FSM_HANG_ERROR BIT(31)
+#define B_BE_TX_RU1_FSM_HANG_ERROR BIT(30)
+#define B_BE_TX_RU2_FSM_HANG_ERROR BIT(29)
+#define B_BE_TX_RU3_FSM_HANG_ERROR BIT(28)
+#define B_BE_TX_RU4_FSM_HANG_ERROR BIT(27)
+#define B_BE_TX_RU5_FSM_HANG_ERROR BIT(26)
+#define B_BE_TX_RU6_FSM_HANG_ERROR BIT(25)
+#define B_BE_TX_RU7_FSM_HANG_ERROR BIT(24)
+#define B_BE_TX_RU8_FSM_HANG_ERROR BIT(23)
+#define B_BE_TX_RU9_FSM_HANG_ERROR BIT(22)
+#define B_BE_TX_RU10_FSM_HANG_ERROR BIT(21)
+#define B_BE_TX_RU11_FSM_HANG_ERROR BIT(20)
+#define B_BE_TX_RU12_FSM_HANG_ERROR BIT(19)
+#define B_BE_TX_RU13_FSM_HANG_ERROR BIT(18)
+#define B_BE_TX_RU14_FSM_HANG_ERROR BIT(17)
+#define B_BE_TX_RU15_FSM_HANG_ERROR BIT(16)
+#define B_BE_TX_CSI_FSM_HANG_ERROR BIT(15)
+#define B_BE_TX_WD_PLD_ID_FSM_HANG_ERROR BIT(14)
+
+#define R_BE_TX_ERROR_FLAG_IMR 0x10C70
+#define R_BE_TX_ERROR_FLAG_IMR_C1 0x14C70
+#define B_BE_TX_RU0_FSM_HANG_ERROR_IMR BIT(31)
+#define B_BE_TX_RU1_FSM_HANG_ERROR_IMR BIT(30)
+#define B_BE_TX_RU2_FSM_HANG_ERROR_IMR BIT(29)
+#define B_BE_TX_RU3_FSM_HANG_ERROR_IMR BIT(28)
+#define B_BE_TX_RU4_FSM_HANG_ERROR_IMR BIT(27)
+#define B_BE_TX_RU5_FSM_HANG_ERROR_IMR BIT(26)
+#define B_BE_TX_RU6_FSM_HANG_ERROR_IMR BIT(25)
+#define B_BE_TX_RU7_FSM_HANG_ERROR_IMR BIT(24)
+#define B_BE_TX_RU8_FSM_HANG_ERROR_IMR BIT(23)
+#define B_BE_TX_RU9_FSM_HANG_ERROR_IMR BIT(22)
+#define B_BE_TX_RU10_FSM_HANG_ERROR_IMR BIT(21)
+#define B_BE_TX_RU11_FSM_HANG_ERROR_IMR BIT(20)
+#define B_BE_TX_RU12_FSM_HANG_ERROR_IMR BIT(19)
+#define B_BE_TX_RU13_FSM_HANG_ERROR_IMR BIT(18)
+#define B_BE_TX_RU14_FSM_HANG_ERROR_IMR BIT(17)
+#define B_BE_TX_RU15_FSM_HANG_ERROR_IMR BIT(16)
+#define B_BE_TX_CSI_FSM_HANG_ERROR_IMR BIT(15)
+#define B_BE_TX_WD_PLD_ID_FSM_HANG_ERROR_IMR BIT(14)
+#define B_BE_TX_ERROR_FLAG_IMR_CLR (B_BE_TX_WD_PLD_ID_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_CSI_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU15_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU14_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU13_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU12_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU11_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU10_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU9_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU8_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU7_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU6_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU5_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU4_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU3_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU2_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU1_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU0_FSM_HANG_ERROR_IMR)
+#define B_BE_TX_ERROR_FLAG_IMR_SET (B_BE_TX_WD_PLD_ID_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_CSI_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU15_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU14_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU13_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU12_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU11_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU10_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU9_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU8_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU7_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU6_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU5_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU4_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU3_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU2_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU1_FSM_HANG_ERROR_IMR | \
+ B_BE_TX_RU0_FSM_HANG_ERROR_IMR)
+
+#define R_BE_RX_ERROR_FLAG_1 0x10C84
+#define R_BE_RX_ERROR_FLAG_1_C1 0x14C84
+#define B_BE_RX_RU8_FSM_HANG_ERROR BIT(29)
+#define B_BE_RX_RU9_FSM_HANG_ERROR BIT(28)
+#define B_BE_RX_RU10_FSM_HANG_ERROR BIT(27)
+#define B_BE_RX_RU11_FSM_HANG_ERROR BIT(26)
+#define B_BE_RX_RU12_FSM_HANG_ERROR BIT(25)
+#define B_BE_RX_RU13_FSM_HANG_ERROR BIT(24)
+#define B_BE_RX_RU14_FSM_HANG_ERROR BIT(23)
+#define B_BE_RX_RU15_FSM_HANG_ERROR BIT(22)
+#define B_BE_RX_RU8_ZERO_LENGTH_ERROR BIT(17)
+#define B_BE_RX_RU9_ZERO_LENGTH_ERROR BIT(16)
+#define B_BE_RX_RU10_ZERO_LENGTH_ERROR BIT(15)
+#define B_BE_RX_RU11_ZERO_LENGTH_ERROR BIT(14)
+#define B_BE_RX_RU12_ZERO_LENGTH_ERROR BIT(13)
+#define B_BE_RX_RU13_ZERO_LENGTH_ERROR BIT(12)
+#define B_BE_RX_RU14_ZERO_LENGTH_ERROR BIT(11)
+#define B_BE_RX_RU15_ZERO_LENGTH_ERROR BIT(10)
+
+#define R_BE_RX_ERROR_FLAG_IMR_1 0x10C88
+#define R_BE_RX_ERROR_FLAG_IMR_1_C1 0x14C88
+#define B_BE_RX_RU8_FSM_HANG_ERROR_IMR BIT(29)
+#define B_BE_RX_RU9_FSM_HANG_ERROR_IMR BIT(28)
+#define B_BE_RX_RU10_FSM_HANG_ERROR_IMR BIT(27)
+#define B_BE_RX_RU11_FSM_HANG_ERROR_IMR BIT(26)
+#define B_BE_RX_RU12_FSM_HANG_ERROR_IMR BIT(25)
+#define B_BE_RX_RU13_FSM_HANG_ERROR_IMR BIT(24)
+#define B_BE_RX_RU14_FSM_HANG_ERROR_IMR BIT(23)
+#define B_BE_RX_RU15_FSM_HANG_ERROR_IMR BIT(22)
+#define B_BE_RX_RU8_ZERO_LENGTH_ERROR_IMR BIT(17)
+#define B_BE_RX_RU9_ZERO_LENGTH_ERROR_IMR BIT(16)
+#define B_BE_RX_RU10_ZERO_LENGTH_ERROR_IMR BIT(15)
+#define B_BE_RX_RU11_ZERO_LENGTH_ERROR_IMR BIT(14)
+#define B_BE_RX_RU12_ZERO_LENGTH_ERROR_IMR BIT(13)
+#define B_BE_RX_RU13_ZERO_LENGTH_ERROR_IMR BIT(12)
+#define B_BE_RX_RU14_ZERO_LENGTH_ERROR_IMR BIT(11)
+#define B_BE_RX_RU15_ZERO_LENGTH_ERROR_IMR BIT(10)
+#define B_BE_TX_ERROR_FLAG_IMR_1_CLR (B_BE_RX_RU8_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU9_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU10_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU11_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU12_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU13_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU14_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU15_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU8_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU9_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU10_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU11_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU12_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU13_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU14_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU15_ZERO_LENGTH_ERROR_IMR)
+#define B_BE_TX_ERROR_FLAG_IMR_1_SET (B_BE_RX_RU8_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU9_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU10_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU11_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU12_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU13_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU14_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU15_FSM_HANG_ERROR_IMR | \
+ B_BE_RX_RU8_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU9_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU10_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU11_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU12_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU13_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU14_ZERO_LENGTH_ERROR_IMR | \
+ B_BE_RX_RU15_ZERO_LENGTH_ERROR_IMR)
+
#define R_BE_WMTX_MOREDATA_TSFT_STMP_CTL 0x10E08
#define R_BE_WMTX_MOREDATA_TSFT_STMP_CTL_C1 0x14E08
#define B_BE_TSFT_OFS_MASK GENMASK(31, 16)
@@ -3954,6 +6676,173 @@
#define B_BE_UPD_HGQMD BIT(1)
#define B_BE_UPD_TIMIE BIT(0)
+#define R_BE_WMTX_TCR_BE_4 0x10E2C
+#define R_BE_WMTX_TCR_BE_4_C1 0x14E2C
+#define B_BE_UL_EHT_MUMIMO_LTF_MODE BIT(30)
+#define B_BE_UL_HE_MUMIMO_LTF_MODE BIT(29)
+#define B_BE_EHT_HE_PPDU_4XLTF_ZLD_USTIMER_MASK GENMASK(28, 24)
+#define B_BE_EHT_HE_PPDU_2XLTF_ZLD_USTIMER_MASK GENMASK(20, 16)
+#define B_BE_NON_LEGACY_PPDU_ZLD_USTIMER_MASK GENMASK(12, 8)
+#define B_BE_LEGACY_PPDU_ZLD_USTIMER_MASK GENMASK(4, 0)
+
+#define R_BE_RSP_CHK_SIG 0x11000
+#define R_BE_RSP_CHK_SIG_C1 0x15000
+#define B_BE_RSP_STATIC_RTS_CHK_SERV_BW_EN BIT(30)
+#define B_BE_RSP_TBPPDU_CHK_PWR BIT(29)
+#define B_BE_RESP_PAIR_MACID_LEN_EN BIT(25)
+#define B_BE_RESP_TX_ABORT_TEST_EN BIT(24)
+#define B_BE_RESP_ER_SU_RU106_EN BIT(23)
+#define B_BE_RESP_ER_SU_EN BIT(22)
+#define B_BE_TXDATA_END_PS_OPT BIT(18)
+#define B_BE_CHECK_SOUNDING_SEQ BIT(17)
+#define B_BE_RXBA_IGNOREA2 BIT(16)
+#define B_BE_ACKTO_CCK_MASK GENMASK(15, 8)
+#define B_BE_ACKTO_MASK GENMASK(8, 0)
+
+#define R_BE_TRXPTCL_RESP_0 0x11004
+#define R_BE_TRXPTCL_RESP_0_C1 0x15004
+#define B_BE_WMAC_RESP_STBC_EN BIT(31)
+#define B_BE_WMAC_RXFTM_TXACK_SB BIT(30)
+#define B_BE_WMAC_RXFTM_TXACKBWEQ BIT(29)
+#define B_BE_RESP_TB_CHK_TXTIME BIT(24)
+#define B_BE_RSP_CHK_CCA BIT(23)
+#define B_BE_WMAC_LDPC_EN BIT(22)
+#define B_BE_WMAC_SGIEN BIT(21)
+#define B_BE_WMAC_SPLCPEN BIT(20)
+#define B_BE_RESP_EHT_MCS15_REF BIT(19)
+#define B_BE_RESP_EHT_MCS14_REF BIT(18)
+#define B_BE_WMAC_BESP_EARLY_TXBA BIT(17)
+#define B_BE_WMAC_MBA_DUR_FORCE BIT(16)
+#define B_BE_WMAC_SPEC_SIFS_OFDM_MASK GENMASK(15, 8)
+#define WMAC_SPEC_SIFS_OFDM_1115E 0x11
+#define B_BE_WMAC_SPEC_SIFS_CCK_MASK GENMASK(7, 0)
+
+#define R_BE_TRXPTCL_RESP_1 0x11008
+#define R_BE_TRXPTCL_RESP_1_C1 0x15008
+#define B_BE_WMAC_RESP_SR_MODE_EN BIT(31)
+#define B_BE_FTM_RRSR_RATE_EN_MASK GENMASK(28, 24)
+#define B_BE_NESS_MASK GENMASK(23, 22)
+#define B_BE_WMAC_RESP_DOPPLEB_BE_EN BIT(21)
+#define B_BE_WMAC_RESP_DCM_EN BIT(20)
+#define B_BE_WMAC_CLR_ABORT_RESP_TX_CNT BIT(15)
+#define B_BE_WMAC_RESP_REF_RATE_SEL BIT(12)
+#define B_BE_WMAC_RESP_REF_RATE_MASK GENMASK(11, 0)
+
+#define R_BE_MAC_LOOPBACK 0x11020
+#define R_BE_MAC_LOOPBACK_C1 0x15020
+#define B_BE_MACLBK_DIS_GCLK BIT(30)
+#define B_BE_MACLBK_STS_EN BIT(29)
+#define B_BE_MACLBK_RDY_PERIOD_MASK GENMASK(28, 17)
+#define B_BE_MACLBK_PLCP_DLY_MASK GENMASK(16, 8)
+#define S_BE_MACLBK_PLCP_DLY_DEF 0x28
+#define B_BE_MACLBK_RDY_NUM_MASK GENMASK(7, 3)
+#define B_BE_MACLBK_EN BIT(0)
+
+#define R_BE_WMAC_NAV_CTL 0x11080
+#define R_BE_WMAC_NAV_CTL_C1 0x15080
+#define B_BE_WMAC_NAV_UPPER_EN BIT(26)
+#define B_BE_WMAC_0P125US_TIMER_MASK GENMASK(25, 18)
+#define B_BE_WMAC_PLCP_UP_NAV_EN BIT(17)
+#define B_BE_WMAC_TF_UP_NAV_EN BIT(16)
+#define B_BE_WMAC_NAV_UPPER_MASK GENMASK(15, 8)
+#define NAV_25MS 0xC4
+#define B_BE_WMAC_RTS_RST_DUR_MASK GENMASK(7, 0)
+
+#define R_BE_RXTRIG_TEST_USER_2 0x110B0
+#define R_BE_RXTRIG_TEST_USER_2_C1 0x150B0
+#define B_BE_RXTRIG_MACID_MASK GENMASK(31, 24)
+#define B_BE_RXTRIG_RU26_DIS BIT(21)
+#define B_BE_RXTRIG_FCSCHK_EN BIT(20)
+#define B_BE_RXTRIG_PORT_SEL_MASK GENMASK(19, 17)
+#define B_BE_RXTRIG_EN BIT(16)
+#define B_BE_RXTRIG_USERINFO_2_MASK GENMASK(15, 0)
+
+#define R_BE_TRXPTCL_ERROR_INDICA_MASK 0x110BC
+#define R_BE_TRXPTCL_ERROR_INDICA_MASK_C1 0x150BC
+#define B_BE_WMAC_FTM_TIMEOUT_MODE BIT(30)
+#define B_BE_WMAC_FTM_TIMEOUT_THR_MASK GENMASK(29, 24)
+#define B_BE_WMAC_MODE BIT(22)
+#define B_BE_WMAC_TIMETOUT_THR_MASK GENMASK(21, 16)
+#define B_BE_RMAC_BFMER BIT(9)
+#define B_BE_RMAC_FTM BIT(8)
+#define B_BE_RMAC_CSI BIT(7)
+#define B_BE_TMAC_MIMO_CTRL BIT(6)
+#define B_BE_TMAC_RXTB BIT(5)
+#define B_BE_TMAC_HWSIGB_GEN BIT(4)
+#define B_BE_TMAC_TXPLCP BIT(3)
+#define B_BE_TMAC_RESP BIT(2)
+#define B_BE_TMAC_TXCTL BIT(1)
+#define B_BE_TMAC_MACTX BIT(0)
+#define B_BE_TRXPTCL_ERROR_INDICA_MASK_CLR (B_BE_TMAC_MACTX | \
+ B_BE_TMAC_TXCTL | \
+ B_BE_TMAC_RESP | \
+ B_BE_TMAC_TXPLCP | \
+ B_BE_TMAC_HWSIGB_GEN | \
+ B_BE_TMAC_RXTB | \
+ B_BE_TMAC_MIMO_CTRL | \
+ B_BE_RMAC_CSI | \
+ B_BE_RMAC_FTM | \
+ B_BE_RMAC_BFMER)
+#define B_BE_TRXPTCL_ERROR_INDICA_MASK_SET (B_BE_TMAC_MACTX | \
+ B_BE_TMAC_TXCTL | \
+ B_BE_TMAC_RESP | \
+ B_BE_TMAC_TXPLCP | \
+ B_BE_TMAC_HWSIGB_GEN | \
+ B_BE_TMAC_RXTB | \
+ B_BE_TMAC_MIMO_CTRL | \
+ B_BE_RMAC_CSI | \
+ B_BE_RMAC_FTM | \
+ B_BE_RMAC_BFMER)
+
+#define R_BE_TRXPTCL_ERROR_INDICA 0x110C0
+#define R_BE_TRXPTCL_ERROR_INDICA_C1 0x150C0
+#define B_BE_BFMER_ERR_FLAG BIT(9)
+#define B_BE_FTM_ERROR_FLAG_CLR BIT(8)
+#define B_BE_CSI_ERROR_FLAG_CLR BIT(7)
+#define B_BE_MIMOCTRL_ERROR_FLAG_CLR BIT(6)
+#define B_BE_RXTB_ERROR_FLAG_CLR BIT(5)
+#define B_BE_HWSIGB_GEN_ERROR_FLAG_CLR BIT(4)
+#define B_BE_TXPLCP_ERROR_FLAG_CLR BIT(3)
+#define B_BE_RESP_ERROR_FLAG_CLR BIT(2)
+#define B_BE_TXCTL_ERROR_FLAG_CLR BIT(1)
+#define B_BE_MACTX_ERROR_FLAG_CLR BIT(0)
+
+#define R_BE_DBGSEL_TRXPTCL 0x110F4
+#define R_BE_DBGSEL_TRXPTCL_C1 0x150F4
+#define B_BE_WMAC_CHNSTS_STATE_MASK GENMASK(19, 16)
+#define B_BE_DBGSEL_TRIGCMD_SEL_MASK GENMASK(11, 8)
+#define B_BE_DBGSEL_TRXPTCL_MASK GENMASK(7, 0)
+
+#define R_BE_PHYINFO_ERR_IMR_V1 0x110F8
+#define R_BE_PHYINFO_ERR_IMR_V1_C1 0x150F8
+#define B_BE_PHYINTF_RXTB_WIDTH_MASK GENMASK(31, 30)
+#define B_BE_PHYINTF_RXTB_EN_PHASE_MASK GENMASK(29, 28)
+#define B_BE_PHYINTF_MIMO_WIDTH_MASK GENMASK(27, 26)
+#define B_BE_PHYINTF_MIMO_EN_PHASE_MASK GENMASK(25, 24)
+#define B_BE_PHYINTF_TIMEOUT_THR_V1_MASK GENMASK(21, 16)
+#define B_BE_CSI_ON_TIMEOUT_EN BIT(5)
+#define B_BE_STS_ON_TIMEOUT_EN BIT(4)
+#define B_BE_DATA_ON_TIMEOUT_EN BIT(3)
+#define B_BE_OFDM_CCA_TIMEOUT_EN BIT(2)
+#define B_BE_CCK_CCA_TIMEOUT_EN BIT(1)
+#define B_BE_PHY_TXON_TIMEOUT_EN BIT(0)
+#define B_BE_PHYINFO_ERR_IMR_V1_CLR (B_BE_PHY_TXON_TIMEOUT_EN | \
+ B_BE_CCK_CCA_TIMEOUT_EN | \
+ B_BE_OFDM_CCA_TIMEOUT_EN | \
+ B_BE_DATA_ON_TIMEOUT_EN | \
+ B_BE_STS_ON_TIMEOUT_EN | \
+ B_BE_CSI_ON_TIMEOUT_EN)
+#define B_BE_PHYINFO_ERR_IMR_V1_SET 0
+
+#define R_BE_PHYINFO_ERR_ISR 0x110FC
+#define R_BE_PHYINFO_ERR_ISR_C1 0x150FC
+#define B_BE_CSI_ON_TIMEOUT_ERR BIT(5)
+#define B_BE_STS_ON_TIMEOUT_ERR BIT(4)
+#define B_BE_DATA_ON_TIMEOUT_ERR BIT(3)
+#define B_BE_OFDM_CCA_TIMEOUT_ERR BIT(2)
+#define B_BE_CCK_CCA_TIMEOUT_ERR BIT(1)
+#define B_BE_PHY_TXON_TIMEOUT_ERR BIT(0)
+
#define R_BE_BFMEE_RESP_OPTION 0x11180
#define R_BE_BFMEE_RESP_OPTION_C1 0x15180
#define B_BE_BFMEE_CSI_SEC_TYPE_SH 20
@@ -4009,6 +6898,103 @@
#define B_BE_BFMEE_HT_CSI_RATE_MASK GENMASK(7, 0)
#define CSI_INIT_RATE_EHT 0x3
+#define R_BE_WMAC_ACK_BA_RESP_LEGACY 0x11200
+#define R_BE_WMAC_ACK_BA_RESP_LEGACY_C1 0x15200
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_NSTR BIT(16)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_TX_NAV BIT(15)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_INTRA_NAV BIT(14)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_BASIC_NAV BIT(13)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_BTCCA BIT(12)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_CCA160 BIT(5)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_CCA80 BIT(4)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_CCA40 BIT(3)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_SEC_CCA20 BIT(2)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_EDCCA BIT(1)
+#define B_BE_ACK_BA_RESP_LEGACY_CHK_CCA BIT(0)
+
+#define R_BE_WMAC_ACK_BA_RESP_HE 0x11204
+#define R_BE_WMAC_ACK_BA_RESP_HE_C1 0x15204
+#define B_BE_ACK_BA_RESP_HE_CHK_NSTR BIT(16)
+#define B_BE_ACK_BA_RESP_HE_CHK_TX_NAV BIT(15)
+#define B_BE_ACK_BA_RESP_HE_CHK_INTRA_NAV BIT(14)
+#define B_BE_ACK_BA_RESP_HE_CHK_BASIC_NAV BIT(13)
+#define B_BE_ACK_BA_RESP_HE_CHK_BTCCA BIT(12)
+#define B_BE_ACK_BA_RESP_HE_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_ACK_BA_RESP_HE_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_ACK_BA_RESP_HE_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_ACK_BA_RESP_HE_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_ACK_BA_RESP_HE_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_ACK_BA_RESP_HE_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_ACK_BA_RESP_HE_CHK_SEC_CCA160 BIT(5)
+#define B_BE_ACK_BA_RESP_HE_CHK_SEC_CCA80 BIT(4)
+#define B_BE_ACK_BA_RESP_HE_CHK_SEC_CCA40 BIT(3)
+#define B_BE_ACK_BA_RESP_HE_CHK_SEC_CCA20 BIT(2)
+#define B_BE_ACK_BA_RESP_HE_CHK_EDCCA BIT(1)
+#define B_BE_ACK_BA_RESP_HE_CHK_CCA BIT(0)
+
+#define R_BE_WMAC_ACK_BA_RESP_EHT_LEG_PUNC 0x11208
+#define R_BE_WMAC_ACK_BA_RESP_EHT_LEG_PUNC_C1 0x15208
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_NSTR BIT(16)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_TX_NAV BIT(15)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_INTRA_NAV BIT(14)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_BASIC_NAV BIT(13)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_BTCCA BIT(12)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_SEC_EDCCA160 BIT(11)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_SEC_EDCCA80 BIT(10)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_SEC_EDCCA40 BIT(9)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_SEC_EDCCA20 BIT(8)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_EDCCA_PER20_BMP BIT(7)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_CCA_PER20_BMP BIT(6)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_SEC_CCA160 BIT(5)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_SEC_CCA80 BIT(4)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_SEC_CCA40 BIT(3)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_SEC_CCA20 BIT(2)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_EDCCA BIT(1)
+#define B_BE_ACK_BA_EHT_LEG_PUNC_CHK_CCA BIT(0)
+
+#define R_BE_RCR 0x11400
+#define R_BE_RCR_C1 0x15400
+#define B_BE_BUSY_CHKSN BIT(15)
+#define B_BE_DYN_CHEN BIT(14)
+#define B_BE_AUTO_RST BIT(13)
+#define B_BE_TIMER_SEL BIT(12)
+#define B_BE_STOP_RX_IN BIT(11)
+#define B_BE_PSR_RDY_CHKDIS BIT(10)
+#define B_BE_DRV_INFO_SZ_MASK GENMASK(9, 8)
+#define B_BE_HDR_CNV_SZ_MASK GENMASK(7, 6)
+#define B_BE_PHY_RPT_SZ_MASK GENMASK(5, 4)
+#define B_BE_CH_EN BIT(0)
+
+#define R_BE_DLK_PROTECT_CTL 0x11402
+#define R_BE_DLK_PROTECT_CTL_C1 0x15402
+#define B_BE_RX_DLK_CCA_TIME_MASK GENMASK(15, 8)
+#define TRXCFG_RMAC_CCA_TO 32
+#define B_BE_RX_DLK_DATA_TIME_MASK GENMASK(7, 4)
+#define TRXCFG_RMAC_DATA_TO 15
+#define B_BE_RX_DLK_RST_FSM BIT(3)
+#define B_BE_RX_DLK_RST_SKIPDMA BIT(2)
+#define B_BE_RX_DLK_RST_EN BIT(1)
+#define B_BE_RX_DLK_INT_EN BIT(0)
+
+#define R_BE_PLCP_HDR_FLTR 0x11404
+#define R_BE_PLCP_HDR_FLTR_C1 0x15404
+#define B_BE_PLCP_RXFA_RESET_TYPE_MASK GENMASK(15, 12)
+#define B_BE_PLCP_RXFA_RESET_EN BIT(11)
+#define B_BE_DIS_CHK_MIN_LEN BIT(8)
+#define B_BE_HE_SIGB_CRC_CHK BIT(6)
+#define B_BE_VHT_MU_SIGB_CRC_CHK BIT(5)
+#define B_BE_VHT_SU_SIGB_CRC_CHK BIT(4)
+#define B_BE_SIGA_CRC_CHK BIT(3)
+#define B_BE_LSIG_PARITY_CHK_EN BIT(2)
+#define B_BE_CCK_SIG_CHK BIT(1)
+#define B_BE_CCK_CRC_CHK BIT(0)
+
#define R_BE_RX_FLTR_OPT 0x11420
#define R_BE_RX_FLTR_OPT_C1 0x15420
#define B_BE_UID_FILTER_MASK GENMASK(31, 24)
@@ -4028,12 +7014,168 @@
#define B_BE_A_A1_MATCH BIT(1)
#define B_BE_SNIFFER_MODE BIT(0)
+#define R_BE_CTRL_FLTR 0x11424
+#define R_BE_CTRL_FLTR_C1 0x15424
+#define B_BE_CTRL_STYPE_MASK GENMASK(15, 0)
+#define RX_FLTR_FRAME_DROP_BE 0x0000
+#define RX_FLTR_FRAME_ACCEPT_BE 0xFFFF
+
+#define R_BE_MGNT_FLTR 0x11428
+#define R_BE_MGNT_FLTR_C1 0x15428
+#define B_BE_MGNT_STYPE_MASK GENMASK(15, 0)
+
+#define R_BE_DATA_FLTR 0x1142C
+#define R_BE_DATA_FLTR_C1 0x1542C
+#define B_BE_DATA_STYPE_MASK GENMASK(15, 0)
+
+#define R_BE_ADDR_CAM_CTRL 0x11434
+#define R_BE_ADDR_CAM_CTRL_C1 0x15434
+#define B_BE_ADDR_CAM_RANGE_MASK GENMASK(23, 16)
+#define ADDR_CAM_SERCH_RANGE 0x7f
+#define B_BE_ADDR_CAM_CMPLIMT_MASK GENMASK(15, 12)
+#define B_BE_ADDR_CAM_IORST BIT(10)
+#define B_BE_DIS_ADDR_CLK_GATED BIT(9)
+#define B_BE_ADDR_CAM_CLR BIT(8)
+#define B_BE_ADDR_CAM_A2_B0_CHK BIT(2)
+#define B_BE_ADDR_CAM_SRCH_PERPKT BIT(1)
+#define B_BE_ADDR_CAM_EN BIT(0)
+
+#define R_BE_RESPBA_CAM_CTRL 0x1143C
+#define R_BE_RESPBA_CAM_CTRL_C1 0x1543C
+#define B_BE_BACAM_SKIP_ALL_QOSNULL BIT(24)
+#define B_BE_BACAM_STD_SSN_SEL BIT(20)
+#define B_BE_BACAM_TEMP_SZ_MASK GENMASK(17, 16)
+#define B_BE_BACAM_RST_IDX_MASK GENMASK(15, 8)
+#define B_BE_BACAM_SHIFT_POLL BIT(7)
+#define B_BE_BACAM_IORST BIT(6)
+#define B_BE_BACAM_GCK_DIS BIT(5)
+#define B_BE_COMPL_VAL BIT(3)
+#define B_BE_SSN_SEL BIT(2)
+#define B_BE_BACAM_RST_MASK GENMASK(1, 0)
+#define S_BE_BACAM_RST_DONE 0
+#define S_BE_BACAM_RST_ENT 1
+#define S_BE_BACAM_RST_ALL 2
+
+#define R_BE_RX_SR_CTRL 0x1144A
+#define R_BE_RX_SR_CTRL_C1 0x1544A
+#define B_BE_SR_OP_MODE_MASK GENMASK(5, 4)
+#define B_BE_SRG_CHK_EN BIT(2)
+#define B_BE_SR_CTRL_PLCP_EN BIT(1)
+#define B_BE_SR_EN BIT(0)
+
#define R_BE_CSIRPT_OPTION 0x11464
#define R_BE_CSIRPT_OPTION_C1 0x15464
#define B_BE_CSIPRT_EHTSU_AID_EN BIT(26)
#define B_BE_CSIPRT_HESU_AID_EN BIT(25)
#define B_BE_CSIPRT_VHTSU_AID_EN BIT(24)
+#define R_BE_RX_ERR_ISR 0x114F4
+#define R_BE_RX_ERR_ISR_C1 0x154F4
+#define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)
+#define B_BE_RX_ERR_STS_ACT_TO BIT(8)
+#define B_BE_RX_ERR_CSI_ACT_TO BIT(7)
+#define B_BE_RX_ERR_ACT_TO BIT(6)
+#define B_BE_CSI_DATAON_ASSERT_TO BIT(5)
+#define B_BE_DATAON_ASSERT_TO BIT(4)
+#define B_BE_CCA_ASSERT_TO BIT(3)
+#define B_BE_RX_ERR_DMA_TO BIT(2)
+#define B_BE_RX_ERR_DATA_TO BIT(1)
+#define B_BE_RX_ERR_CCA_TO BIT(0)
+
+#define R_BE_RX_ERR_IMR 0x114F8
+#define R_BE_RX_ERR_IMR_C1 0x154F8
+#define B_BE_RX_ERR_TRIG_ACT_TO_MSK BIT(9)
+#define B_BE_RX_ERR_STS_ACT_TO_MSK BIT(8)
+#define B_BE_RX_ERR_CSI_ACT_TO_MSK BIT(7)
+#define B_BE_RX_ERR_ACT_TO_MSK BIT(6)
+#define B_BE_CSI_DATAON_ASSERT_TO_MSK BIT(5)
+#define B_BE_DATAON_ASSERT_TO_MSK BIT(4)
+#define B_BE_CCA_ASSERT_TO_MSK BIT(3)
+#define B_BE_RX_ERR_DMA_TO_MSK BIT(2)
+#define B_BE_RX_ERR_DATA_TO_MSK BIT(1)
+#define B_BE_RX_ERR_CCA_TO_MSK BIT(0)
+#define B_BE_RX_ERR_IMR_CLR (B_BE_RX_ERR_CCA_TO_MSK | \
+ B_BE_RX_ERR_DATA_TO_MSK | \
+ B_BE_RX_ERR_DMA_TO_MSK | \
+ B_BE_CCA_ASSERT_TO_MSK | \
+ B_BE_DATAON_ASSERT_TO_MSK | \
+ B_BE_CSI_DATAON_ASSERT_TO_MSK | \
+ B_BE_RX_ERR_ACT_TO_MSK | \
+ B_BE_RX_ERR_CSI_ACT_TO_MSK | \
+ B_BE_RX_ERR_STS_ACT_TO_MSK | \
+ B_BE_RX_ERR_TRIG_ACT_TO_MSK)
+#define B_BE_RX_ERR_IMR_SET (B_BE_RX_ERR_ACT_TO_MSK | \
+ B_BE_RX_ERR_STS_ACT_TO_MSK | \
+ B_BE_RX_ERR_TRIG_ACT_TO_MSK)
+
+#define R_BE_RX_PLCP_EXT_OPTION_1 0x11514
+#define R_BE_RX_PLCP_EXT_OPTION_1_C1 0x15514
+#define B_BE_PLCP_CLOSE_RX_UNSPUUORT BIT(19)
+#define B_BE_PLCP_CLOSE_RX_BB_BRK BIT(18)
+#define B_BE_PLCP_CLOSE_RX_PSDU_PRES BIT(17)
+#define B_BE_PLCP_CLOSE_RX_NDP BIT(16)
+#define B_BE_PLCP_NSS_SRC BIT(11)
+#define B_BE_PLCP_DOPPLEB_BE_SRC BIT(10)
+#define B_BE_PLCP_STBC_SRC BIT(9)
+#define B_BE_PLCP_SU_PSDU_LEN_SRC BIT(8)
+#define B_BE_PLCP_RXSB_SRC BIT(7)
+#define B_BE_PLCP_BW_SRC_MASK GENMASK(6, 5)
+#define B_BE_PLCP_GILTF_SRC BIT(4)
+#define B_BE_PLCP_NSTS_SRC BIT(3)
+#define B_BE_PLCP_MCS_SRC BIT(2)
+#define B_BE_PLCP_CH20_WIDATA_SRC BIT(1)
+#define B_BE_PLCP_PPDU_TYPE_SRC BIT(0)
+
+#define R_BE_RESP_CSI_RESERVED_PAGE 0x11810
+#define R_BE_RESP_CSI_RESERVED_PAGE_C1 0x15810
+#define B_BE_CSI_RESERVED_PAGE_NUM_MASK GENMASK(27, 16)
+#define B_BE_CSI_RESERVED_START_PAGE_MASK GENMASK(11, 0)
+
+#define R_BE_RESP_IMR 0x11884
+#define R_BE_RESP_IMR_C1 0x15884
+#define B_BE_RESP_TBL_FLAG_ERR_ISR_EN BIT(17)
+#define B_BE_RESP_SEC_DOUBLE_HIT_ERR_ISR_EN BIT(16)
+#define B_BE_RESP_WRPTR_CROSS_ERR_ISR_EN BIT(15)
+#define B_BE_RESP_TOO_MANY_PLD_ERR_ISR_EN BIT(14)
+#define B_BE_RESP_TXDMA_READ_DATA_ERR_ISR_EN BIT(13)
+#define B_BE_RESP_PLDID_RDY_ERR_ISR_EN BIT(12)
+#define B_BE_RESP_RX_OVERWRITE_ERR_ISR_EN BIT(11)
+#define B_BE_RESP_RXDMA_WRPTR_INVLD_ERR_ISR_EN BIT(10)
+#define B_BE_RESP_RXDMA_REQ_INVLD_ERR_ISR_EN BIT(9)
+#define B_BE_RESP_RXDMA_REQ_MACID_ERR_ISR_EN BIT(8)
+#define B_BE_RESP_TXCMD_TX_ST_ABORT_ERR_ISR_EN BIT(6)
+#define B_BE_RESP_TXCMD_DMAC_PROC_ERR_ISR_EN BIT(5)
+#define B_BE_RESP_TXCMD_TBL_ERR_ISR_EN BIT(4)
+#define B_BE_RESP_INITCMD_RX_ST_ABORT_ERR_ISR_EN BIT(3)
+#define B_BE_RESP_INITCMD_RESERVD_PAGE_ABORT_ERR_ISR_EN BIT(2)
+#define B_BE_RESP_INITCMD_TX_ST_ABORT_ERR_ISR_EN BIT(1)
+#define B_BE_RESP_DMAC_PROC_ERR_ISR_EN BIT(0)
+#define B_BE_RESP_IMR_CLR (B_BE_RESP_DMAC_PROC_ERR_ISR_EN | \
+ B_BE_RESP_INITCMD_TX_ST_ABORT_ERR_ISR_EN | \
+ B_BE_RESP_INITCMD_RX_ST_ABORT_ERR_ISR_EN | \
+ B_BE_RESP_TXCMD_TBL_ERR_ISR_EN | \
+ B_BE_RESP_TXCMD_DMAC_PROC_ERR_ISR_EN | \
+ B_BE_RESP_TXCMD_TX_ST_ABORT_ERR_ISR_EN | \
+ B_BE_RESP_RXDMA_REQ_MACID_ERR_ISR_EN | \
+ B_BE_RESP_RXDMA_REQ_INVLD_ERR_ISR_EN | \
+ B_BE_RESP_RXDMA_WRPTR_INVLD_ERR_ISR_EN | \
+ B_BE_RESP_RX_OVERWRITE_ERR_ISR_EN | \
+ B_BE_RESP_PLDID_RDY_ERR_ISR_EN | \
+ B_BE_RESP_TXDMA_READ_DATA_ERR_ISR_EN | \
+ B_BE_RESP_TOO_MANY_PLD_ERR_ISR_EN | \
+ B_BE_RESP_WRPTR_CROSS_ERR_ISR_EN | \
+ B_BE_RESP_SEC_DOUBLE_HIT_ERR_ISR_EN)
+#define B_BE_RESP_IMR_SET (B_BE_RESP_DMAC_PROC_ERR_ISR_EN | \
+ B_BE_RESP_INITCMD_TX_ST_ABORT_ERR_ISR_EN | \
+ B_BE_RESP_INITCMD_RX_ST_ABORT_ERR_ISR_EN | \
+ B_BE_RESP_TXCMD_TBL_ERR_ISR_EN | \
+ B_BE_RESP_TXCMD_DMAC_PROC_ERR_ISR_EN | \
+ B_BE_RESP_TXCMD_TX_ST_ABORT_ERR_ISR_EN | \
+ B_BE_RESP_RX_OVERWRITE_ERR_ISR_EN | \
+ B_BE_RESP_PLDID_RDY_ERR_ISR_EN | \
+ B_BE_RESP_WRPTR_CROSS_ERR_ISR_EN | \
+ B_BE_RESP_SEC_DOUBLE_HIT_ERR_ISR_EN)
+
#define R_BE_PWR_MODULE 0x11900
#define R_BE_PWR_MODULE_C1 0x15900
@@ -4045,6 +7187,17 @@
#define R_BE_PWR_RU_LMT 0x12048
#define R_BE_PWR_RU_LMT_MAX 0x120E4
+#define R_BE_C0_TXPWR_IMR 0x128E0
+#define R_BE_C0_TXPWR_IMR_C1 0x168E0
+#define B_BE_FSM_TIMEOUT_ERR_INT_EN BIT(0)
+#define B_BE_C0_TXPWR_IMR_CLR B_BE_FSM_TIMEOUT_ERR_INT_EN
+#define B_BE_C0_TXPWR_IMR_SET B_BE_FSM_TIMEOUT_ERR_INT_EN
+
+#define R_BE_TXPWR_ERR_FLAG 0x128E4
+#define R_BE_TXPWR_ERR_IMR 0x128E0
+#define R_BE_TXPWR_ERR_FLAG_C1 0x158E4
+#define R_BE_TXPWR_ERR_IMR_C1 0x158E0
+
#define CMAC1_START_ADDR_BE 0x14000
#define CMAC1_END_ADDR_BE 0x17FFF
@@ -4310,6 +7463,11 @@
#define B_P0_RSTB_WATCH_DOG BIT(0)
#define B_P1_RSTB_WATCH_DOG BIT(1)
#define B_UPD_P0_EN BIT(31)
+#define R_SPOOF_CG 0x00B4
+#define B_SPOOF_CG_EN BIT(17)
+#define R_DFS_FFT_CG 0x00B8
+#define B_DFS_CG_EN BIT(1)
+#define B_DFS_FFT_EN BIT(0)
#define R_ANAPAR_PW15 0x030C
#define B_ANAPAR_PW15 GENMASK(31, 24)
#define B_ANAPAR_PW15_H GENMASK(27, 24)
@@ -4372,6 +7530,8 @@
#define R_PHY_STS_BITMAP_HT 0x076C
#define R_PHY_STS_BITMAP_VHT 0x0770
#define R_PHY_STS_BITMAP_HE 0x0774
+#define R_EDCCA_RPTREG_SEL_BE 0x078C
+#define B_EDCCA_RPTREG_SEL_BE_MSK GENMASK(22, 20)
#define R_PMAC_GNT 0x0980
#define B_PMAC_GNT_TXEN BIT(0)
#define B_PMAC_GNT_RXEN BIT(16)
@@ -4431,12 +7591,18 @@
#define B_IOQ_IQK_DPK_EN BIT(1)
#define R_GNT_BT_WGT_EN 0x0C6C
#define B_GNT_BT_WGT_EN BIT(21)
+#define R_TX_COLLISION_T2R_ST 0x0C70
+#define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20)
+#define R_TXGATING 0x0C74
+#define B_TXGATING_EN BIT(4)
#define R_PD_ARBITER_OFF 0x0C80
#define B_PD_ARBITER_OFF BIT(31)
#define R_SNDCCA_A1 0x0C9C
#define B_SNDCCA_A1_EN GENMASK(19, 12)
#define R_SNDCCA_A2 0x0CA0
#define B_SNDCCA_A2_VAL GENMASK(19, 12)
+#define R_TX_COLLISION_T2R_ST_BE 0x0CC8
+#define B_TX_COLLISION_T2R_ST_BE_M GENMASK(13, 8)
#define R_RXHT_MCS_LIMIT 0x0D18
#define B_RXHT_MCS_LIMIT GENMASK(9, 8)
#define R_RXVHT_MCS_LIMIT 0x0D18
@@ -4455,6 +7621,10 @@
#define R_BRK_ASYNC_RST_EN_1 0x0DC0
#define R_BRK_ASYNC_RST_EN_2 0x0DC4
#define R_BRK_ASYNC_RST_EN_3 0x0DC8
+#define R_CTLTOP 0x1008
+#define B_CTLTOP_ON BIT(23)
+#define B_CTLTOP_VAL GENMASK(15, 12)
+#define R_EDCCA_RPT_SEL_BE 0x10CC
#define R_S0_HW_SI_DIS 0x1200
#define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P0_RXCK 0x12A0
@@ -4486,6 +7656,14 @@
#define R_CFO_COMP_SEG0_H 0x1388
#define R_CFO_COMP_SEG0_CTRL 0x138C
#define R_DBG32_D 0x1730
+#define R_EDCCA_RPT_A 0x1738
+#define R_EDCCA_RPT_B 0x173c
+#define B_EDCCA_RPT_B_FB BIT(7)
+#define B_EDCCA_RPT_B_P20 BIT(6)
+#define B_EDCCA_RPT_B_S20 BIT(5)
+#define B_EDCCA_RPT_B_S40 BIT(4)
+#define B_EDCCA_RPT_B_S80 BIT(3)
+#define B_EDCCA_RPT_B_PATH_MASK GENMASK(2, 1)
#define R_SWSI_V1 0x174C
#define B_SWSI_W_BUSY_V1 BIT(24)
#define B_SWSI_R_BUSY_V1 BIT(25)
@@ -4547,6 +7725,8 @@
#define R_S0_ADDCK 0x1E00
#define B_S0_ADDCK_I GENMASK(9, 0)
#define B_S0_ADDCK_Q GENMASK(19, 10)
+#define R_EDCCA_RPT_SEL 0x20CC
+#define B_EDCCA_RPT_SEL_MSK GENMASK(2, 0)
#define R_ADC_FIFO 0x20fc
#define B_ADC_FIFO_RST GENMASK(31, 24)
#define B_ADC_FIFO_RXK GENMASK(31, 16)
@@ -4593,6 +7773,8 @@
#define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0)
#define R_P1_EN_SOUND_WO_NDP 0x2D7C
#define B_P1_EN_SOUND_WO_NDP BIT(1)
+#define R_EDCCA_RPT_A_BE 0x2E38
+#define R_EDCCA_RPT_B_BE 0x2E3C
#define R_S1_HW_SI_DIS 0x3200
#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P1_RXCK 0x32A0
@@ -4636,6 +7818,7 @@
#define B_SEG0CSI_EN BIT(23)
#define R_BSS_CLR_MAP 0x43ac
#define R_BSS_CLR_MAP_V1 0x43B0
+#define R_BSS_CLR_MAP_V2 0x4EB0
#define B_BSS_CLR_MAP_VLD0 BIT(28)
#define B_BSS_CLR_MAP_TGT GENMASK(27, 22)
#define B_BSS_CLR_MAP_STAID GENMASK(21, 11)
@@ -4800,9 +7983,9 @@
#define R_SEG0R_PD_V2 0x6A74
#define R_SEG0R_EDCCA_LVL 0x4840
#define R_SEG0R_EDCCA_LVL_V1 0x4884
-#define B_SEG0R_PPDU_LVL_MSK GENMASK(31, 24)
-#define B_SEG0R_EDCCA_LVL_P_MSK GENMASK(15, 8)
-#define B_SEG0R_EDCCA_LVL_A_MSK GENMASK(7, 0)
+#define B_EDCCA_LVL_MSK3 GENMASK(31, 24)
+#define B_EDCCA_LVL_MSK1 GENMASK(15, 8)
+#define B_EDCCA_LVL_MSK0 GENMASK(7, 0)
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30)
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29)
#define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6)
@@ -4929,6 +8112,8 @@
#define R_BMODE_PDTH_EN_V1 0x4B74
#define R_BMODE_PDTH_EN_V2 0x6718
#define B_BMODE_PDTH_LIMIT_EN_MSK_V1 BIT(30)
+#define R_BSS_CLR_VLD_V2 0x4EBC
+#define B_BSS_CLR_VLD0_V2 BIT(2)
#define R_CFO_COMP_SEG1_L 0x5384
#define R_CFO_COMP_SEG1_H 0x5388
#define R_CFO_COMP_SEG1_CTRL 0x538C
@@ -5056,6 +8241,10 @@
#define B_DCFO_WEIGHT_MSK_V1 GENMASK(31, 28)
#define R_DCFO_OPT_V1 0x6260
#define B_DCFO_OPT_EN_V1 BIT(17)
+#define R_SEG0R_EDCCA_LVL_BE 0x69EC
+#define R_SEG0R_PPDU_LVL_BE 0x69F0
+#define R_SEGSND 0x6A14
+#define B_SEGSND_EN BIT(31)
#define R_RPL_BIAS_COMP1 0x6DF0
#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0)
#define R_P1_TSSI_ALIM1 0x7630
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index ca99422e6..d0857ef60 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -112,9 +112,9 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI),
COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_THAILAND),
COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
@@ -179,7 +179,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -257,7 +257,42 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
};
-static const struct rtw89_regd *rtw89_regd_find_reg_by_name(char *alpha2)
+static const char rtw89_alpha2_list_eu[][3] = {
+ "AT",
+ "BE",
+ "CY",
+ "CZ",
+ "DK",
+ "EE",
+ "FI",
+ "FR",
+ "DE",
+ "GR",
+ "HU",
+ "IS",
+ "IE",
+ "IT",
+ "LV",
+ "LI",
+ "LT",
+ "LU",
+ "MT",
+ "MC",
+ "NL",
+ "NO",
+ "PL",
+ "PT",
+ "SK",
+ "SI",
+ "ES",
+ "SE",
+ "CH",
+ "BG",
+ "HR",
+ "RO",
+};
+
+static const struct rtw89_regd *rtw89_regd_find_reg_by_name(const char *alpha2)
{
u32 i;
@@ -274,6 +309,24 @@ static bool rtw89_regd_is_ww(const struct rtw89_regd *regd)
return regd == &rtw89_ww_regd;
}
+static u8 rtw89_regd_get_index(const struct rtw89_regd *regd)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(rtw89_regd_map) > RTW89_REGD_MAX_COUNTRY_NUM);
+
+ if (rtw89_regd_is_ww(regd))
+ return RTW89_REGD_MAX_COUNTRY_NUM;
+
+ return regd - rtw89_regd_map;
+}
+
+static u8 rtw89_regd_get_index_by_name(const char *alpha2)
+{
+ const struct rtw89_regd *regd;
+
+ regd = rtw89_regd_find_reg_by_name(alpha2);
+ return rtw89_regd_get_index(regd);
+}
+
#define rtw89_debug_regd(_dev, _regd, _desc, _argv...) \
do { \
typeof(_regd) __r = _regd; \
@@ -291,19 +344,22 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
const struct rtw89_chip_info *chip = rtwdev->chip;
bool regd_allow_unii_4 = chip->support_unii4;
struct ieee80211_supported_band *sband;
+ struct rtw89_acpi_dsm_result res = {};
int ret;
u8 val;
if (!chip->support_unii4)
goto bottom;
- ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_59G_EN, &val);
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_59G_EN, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"acpi: cannot eval unii 4: %d\n", ret);
goto bottom;
}
+ val = res.u.value;
+
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"acpi: eval if allow unii 4: %d\n", val);
@@ -332,25 +388,99 @@ bottom:
sband->n_channels -= 3;
}
+static void __rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev, bool block,
+ const char *alpha2)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ u8 index;
+
+ index = rtw89_regd_get_index_by_name(alpha2);
+ if (index == RTW89_REGD_MAX_COUNTRY_NUM) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD, "%s: unknown alpha2 %c%c\n",
+ __func__, alpha2[0], alpha2[1]);
+ return;
+ }
+
+ if (block)
+ set_bit(index, regulatory->block_6ghz);
+ else
+ clear_bit(index, regulatory->block_6ghz);
+}
+
+static void rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_acpi_country_code *country;
+ const struct rtw89_acpi_policy_6ghz *ptr;
+ struct rtw89_acpi_dsm_result res = {};
+ bool to_block;
+ int i, j;
+ int ret;
+
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_6G_BP, &res);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "acpi: cannot eval policy 6ghz: %d\n", ret);
+ return;
+ }
+
+ ptr = res.u.policy_6ghz;
+
+ switch (ptr->policy_mode) {
+ case RTW89_ACPI_POLICY_BLOCK:
+ to_block = true;
+ break;
+ case RTW89_ACPI_POLICY_ALLOW:
+ to_block = false;
+ /* only below list is allowed; block all first */
+ bitmap_fill(regulatory->block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "%s: unknown policy mode: %d\n", __func__,
+ ptr->policy_mode);
+ goto out;
+ }
+
+ for (i = 0; i < ptr->country_count; i++) {
+ country = &ptr->country_list[i];
+ if (memcmp("EU", country->alpha2, 2) != 0) {
+ __rtw89_regd_setup_policy_6ghz(rtwdev, to_block,
+ country->alpha2);
+ continue;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(rtw89_alpha2_list_eu); j++)
+ __rtw89_regd_setup_policy_6ghz(rtwdev, to_block,
+ rtw89_alpha2_list_eu[j]);
+ }
+
+out:
+ kfree(ptr);
+}
+
static void rtw89_regd_setup_6ghz(struct rtw89_dev *rtwdev, struct wiphy *wiphy)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
bool chip_support_6ghz = chip->support_bands & BIT(NL80211_BAND_6GHZ);
bool regd_allow_6ghz = chip_support_6ghz;
struct ieee80211_supported_band *sband;
+ struct rtw89_acpi_dsm_result res = {};
int ret;
u8 val;
if (!chip_support_6ghz)
goto bottom;
- ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_6G_DIS, &val);
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_6G_DIS, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"acpi: cannot eval 6ghz: %d\n", ret);
goto bottom;
}
+ val = res.u.value;
+
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"acpi: eval if disallow 6ghz: %d\n", val);
@@ -369,8 +499,10 @@ bottom:
rtw89_debug(rtwdev, RTW89_DBG_REGD, "regd: allow 6ghz: %d\n",
regd_allow_6ghz);
- if (regd_allow_6ghz)
+ if (regd_allow_6ghz) {
+ rtw89_regd_setup_policy_6ghz(rtwdev);
return;
+ }
sband = wiphy->bands[NL80211_BAND_6GHZ];
if (!sband)
@@ -430,6 +562,33 @@ int rtw89_regd_init(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev,
+ struct wiphy *wiphy)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
+ struct ieee80211_supported_band *sband;
+ u8 index;
+ int i;
+
+ index = rtw89_regd_get_index(regd);
+ if (index == RTW89_REGD_MAX_COUNTRY_NUM)
+ return;
+
+ if (!test_bit(index, regulatory->block_6ghz))
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c 6 GHz is blocked by policy\n",
+ regd->alpha2[0], regd->alpha2[1]);
+
+ sband = wiphy->bands[NL80211_BAND_6GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++)
+ sband->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+}
+
static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
struct wiphy *wiphy,
struct regulatory_request *request)
@@ -444,6 +603,8 @@ static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
else
wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+
+ rtw89_regd_apply_policy_6ghz(rtwdev, wiphy);
}
void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 50522ff85..5c167a927 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -205,6 +205,20 @@ static const struct rtw89_dig_regs rtw8851b_dig_regs = {
B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
};
+static const struct rtw89_edcca_regs rtw8851b_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL_V1,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_EDCCA_LVL_V1,
+ .ppdu_mask = B_EDCCA_LVL_MSK3,
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
+};
+
static const struct rtw89_btc_rf_trx_para rtw89_btc_8851b_rf_ul[] = {
{255, 0, 0, 7}, /* 0 -> original */
{255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
@@ -500,7 +514,8 @@ static void rtw8851b_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
gain->offset_valid = valid;
}
-static int rtw8851b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+static int rtw8851b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
{
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw8851b_efuse *map;
@@ -2359,8 +2374,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.rsvd_ple_ofst = 0x2f800,
.hfc_param_ini = rtw8851b_hfc_param_ini_pcie,
.dle_mem = rtw8851b_dle_mem_pcie,
- .wde_qempty_acq_num = 4,
- .wde_qempty_mgq_sel = 4,
+ .wde_qempty_acq_grpnum = 4,
+ .wde_qempty_mgq_grpsel = 4,
.rf_base_addr = {0xe000},
.pwr_on_seq = NULL,
.pwr_off_seq = NULL,
@@ -2393,12 +2408,14 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.bacam_num = 2,
.bacam_dynamic_num = 4,
.bacam_ver = RTW89_BACAM_V0,
+ .ppdu_max_usr = 4,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
.limit_efuse_size = 1280,
.dav_phy_efuse_size = 0,
.dav_log_efuse_size = 0,
+ .efuse_blocks = NULL,
.phycap_addr = 0x580,
.phycap_size = 128,
.para_ver = 0,
@@ -2437,13 +2454,15 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.dcfo_comp = &rtw8851b_dcfo_comp,
.dcfo_comp_sft = 12,
.imr_info = &rtw8851b_imr_info,
+ .imr_dmac_table = NULL,
+ .imr_cmac_table = NULL,
.rrsr_cfgs = &rtw8851b_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
- .edcca_lvl_reg = R_SEG0R_EDCCA_LVL_V1,
+ .edcca_regs = &rtw8851b_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8851b,
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index 0f7711c50..ca1374a71 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -10,6 +10,7 @@
#include "rtw8851b.h"
static const struct rtw89_pci_info rtw8851b_pci_info = {
+ .gen_def = &rtw89_pci_gen_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -24,6 +25,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.autok_en = MAC_AX_PCIE_DISABLE,
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -33,6 +36,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.max_tag_num_mask = B_AX_MAX_TAG_NUM,
.rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
.txbd_rwptr_clr2_reg = 0,
+ .dma_io_stop = {R_AX_PCIE_DMA_STOP1, B_AX_STOP_PCIEIO},
.dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
.dma_stop2 = {0},
.dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
@@ -41,6 +45,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
+ .mit_addr = R_AX_INT_MIT_RX,
.tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 0c36e6180..0c76c52ce 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -498,6 +498,20 @@ static const struct rtw89_dig_regs rtw8852a_dig_regs = {
B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
};
+static const struct rtw89_edcca_regs rtw8852a_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_EDCCA_LVL,
+ .ppdu_mask = B_EDCCA_LVL_MSK3,
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
+};
+
static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
struct rtw8852a_efuse *map)
{
@@ -537,7 +551,8 @@ static void rtw8852a_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
}
}
-static int rtw8852a_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+static int rtw8852a_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
{
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw8852a_efuse *map;
@@ -2094,8 +2109,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = rtw8852a_hfc_param_ini_pcie,
.dle_mem = rtw8852a_dle_mem_pcie,
- .wde_qempty_acq_num = 16,
- .wde_qempty_mgq_sel = 16,
+ .wde_qempty_acq_grpnum = 16,
+ .wde_qempty_mgq_grpsel = 16,
.rf_base_addr = {0xc000, 0xd000},
.pwr_on_seq = pwr_on_seq_8852a,
.pwr_off_seq = pwr_off_seq_8852a,
@@ -2129,12 +2144,14 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.bacam_num = 2,
.bacam_dynamic_num = 4,
.bacam_ver = RTW89_BACAM_V0,
+ .ppdu_max_usr = 4,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 1536,
.limit_efuse_size = 1152,
.dav_phy_efuse_size = 0,
.dav_log_efuse_size = 0,
+ .efuse_blocks = NULL,
.phycap_addr = 0x580,
.phycap_size = 128,
.para_ver = 0x0,
@@ -2174,11 +2191,13 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dcfo_comp = &rtw8852a_dcfo_comp,
.dcfo_comp_sft = 10,
.imr_info = &rtw8852a_imr_info,
+ .imr_dmac_table = NULL,
+ .imr_cmac_table = NULL,
.rrsr_cfgs = &rtw8852a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP,
.dma_ch_mask = 0,
- .edcca_lvl_reg = R_SEG0R_EDCCA_LVL,
+ .edcca_regs = &rtw8852a_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852a,
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index d835a44a1..7c6ffedb7 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -10,6 +10,7 @@
#include "rtw8852a.h"
static const struct rtw89_pci_info rtw8852a_pci_info = {
+ .gen_def = &rtw89_pci_gen_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -24,6 +25,8 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.autok_en = MAC_AX_PCIE_DISABLE,
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -33,6 +36,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.max_tag_num_mask = B_AX_MAX_TAG_NUM,
.rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
.txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2,
+ .dma_io_stop = {R_AX_PCIE_DMA_STOP1, B_AX_STOP_PCIEIO},
.dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK},
.dma_stop2 = {R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL},
.dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK},
@@ -41,6 +45,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
+ .mit_addr = R_AX_INT_MIT_RX,
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index 9d4e6f082..de887a35f 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -330,6 +330,20 @@ static const struct rtw89_dig_regs rtw8852b_dig_regs = {
B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
};
+static const struct rtw89_edcca_regs rtw8852b_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL_V1,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_EDCCA_LVL_V1,
+ .ppdu_mask = B_EDCCA_LVL_MSK3,
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
+};
+
static const struct rtw89_btc_rf_trx_para rtw89_btc_8852b_rf_ul[] = {
{255, 0, 0, 7}, /* 0 -> original */
{255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
@@ -638,7 +652,8 @@ static void rtw8852b_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
gain->offset_valid = valid;
}
-static int rtw8852b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+static int rtw8852b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
{
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw8852b_efuse *map;
@@ -2528,8 +2543,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.rsvd_ple_ofst = 0x2f800,
.hfc_param_ini = rtw8852b_hfc_param_ini_pcie,
.dle_mem = rtw8852b_dle_mem_pcie,
- .wde_qempty_acq_num = 4,
- .wde_qempty_mgq_sel = 4,
+ .wde_qempty_acq_grpnum = 4,
+ .wde_qempty_mgq_grpsel = 4,
.rf_base_addr = {0xe000, 0xf000},
.pwr_on_seq = NULL,
.pwr_off_seq = NULL,
@@ -2563,12 +2578,14 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.bacam_num = 2,
.bacam_dynamic_num = 4,
.bacam_ver = RTW89_BACAM_V0,
+ .ppdu_max_usr = 4,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
.limit_efuse_size = 1280,
.dav_phy_efuse_size = 96,
.dav_log_efuse_size = 16,
+ .efuse_blocks = NULL,
.phycap_addr = 0x580,
.phycap_size = 128,
.para_ver = 0,
@@ -2608,13 +2625,15 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dcfo_comp = &rtw8852b_dcfo_comp,
.dcfo_comp_sft = 10,
.imr_info = &rtw8852b_imr_info,
+ .imr_dmac_table = NULL,
+ .imr_cmac_table = NULL,
.rrsr_cfgs = &rtw8852b_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP_V1, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP_V1,
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
- .edcca_lvl_reg = R_SEG0R_EDCCA_LVL_V1,
+ .edcca_regs = &rtw8852b_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852b,
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index ecf39d2d9..ed71364e6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -10,6 +10,7 @@
#include "rtw8852b.h"
static const struct rtw89_pci_info rtw8852b_pci_info = {
+ .gen_def = &rtw89_pci_gen_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -24,6 +25,8 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.autok_en = MAC_AX_PCIE_DISABLE,
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -33,6 +36,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.max_tag_num_mask = B_AX_MAX_TAG_NUM,
.rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
.txbd_rwptr_clr2_reg = 0,
+ .dma_io_stop = {R_AX_PCIE_DMA_STOP1, B_AX_STOP_PCIEIO},
.dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
.dma_stop2 = {0},
.dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
@@ -41,6 +45,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
+ .mit_addr = R_AX_INT_MIT_RX,
.tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 3b7d8ab39..8618d0204 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -167,6 +167,20 @@ static const struct rtw89_dig_regs rtw8852c_dig_regs = {
B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
};
+static const struct rtw89_edcca_regs rtw8852c_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_EDCCA_LVL,
+ .ppdu_mask = B_EDCCA_LVL_MSK3,
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
+};
+
static void rtw8852c_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
@@ -426,11 +440,36 @@ static void rtw8852c_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
valid |= _decode_efuse_gain(map->rx_gain_5g_high,
&gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH],
&gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]);
+ valid |= _decode_efuse_gain(map->rx_gain_6g_l0,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_L0],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_L0]);
+ valid |= _decode_efuse_gain(map->rx_gain_6g_l1,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_L1],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_L1]);
+ valid |= _decode_efuse_gain(map->rx_gain_6g_m0,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_M0],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_M0]);
+ valid |= _decode_efuse_gain(map->rx_gain_6g_m1,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_M1],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_M1]);
+ valid |= _decode_efuse_gain(map->rx_gain_6g_h0,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_H0],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_H0]);
+ valid |= _decode_efuse_gain(map->rx_gain_6g_h1,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_H1],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_H1]);
+ valid |= _decode_efuse_gain(map->rx_gain_6g_uh0,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_UH0],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_UH0]);
+ valid |= _decode_efuse_gain(map->rx_gain_6g_uh1,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_UH1],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_UH1]);
gain->offset_valid = valid;
}
-static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
{
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw8852c_efuse *map;
@@ -2840,8 +2879,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.rsvd_ple_ofst = 0x6f800,
.hfc_param_ini = rtw8852c_hfc_param_ini_pcie,
.dle_mem = rtw8852c_dle_mem_pcie,
- .wde_qempty_acq_num = 16,
- .wde_qempty_mgq_sel = 16,
+ .wde_qempty_acq_grpnum = 16,
+ .wde_qempty_mgq_grpsel = 16,
.rf_base_addr = {0xe000, 0xf000},
.pwr_on_seq = NULL,
.pwr_off_seq = NULL,
@@ -2877,12 +2916,14 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.bacam_num = 8,
.bacam_dynamic_num = 8,
.bacam_ver = RTW89_BACAM_V0_EXT,
+ .ppdu_max_usr = 8,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
.limit_efuse_size = 1280,
.dav_phy_efuse_size = 96,
.dav_log_efuse_size = 16,
+ .efuse_blocks = NULL,
.phycap_addr = 0x590,
.phycap_size = 0x60,
.para_ver = 0x1,
@@ -2923,11 +2964,13 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dcfo_comp = &rtw8852c_dcfo_comp,
.dcfo_comp_sft = 12,
.imr_info = &rtw8852c_imr_info,
+ .imr_dmac_table = NULL,
+ .imr_cmac_table = NULL,
.rrsr_cfgs = &rtw8852c_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0},
.bss_clr_map_reg = R_BSS_CLR_MAP,
.dma_ch_mask = 0,
- .edcca_lvl_reg = R_SEG0R_EDCCA_LVL,
+ .edcca_regs = &rtw8852c_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852c,
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
index ac642808a..77b05daed 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h
@@ -73,9 +73,25 @@ struct rtw8852c_efuse {
u8 bw40_1s_tssi_6g_a[TSSI_MCS_6G_CH_GROUP_NUM];
u8 rsvd14[10];
u8 bw40_1s_tssi_6g_b[TSSI_MCS_6G_CH_GROUP_NUM];
- u8 rsvd15[110];
+ u8 rsvd15[94];
+ u8 rx_gain_6g_l0;
+ u8 rsvd16;
+ u8 rx_gain_6g_l1;
+ u8 rsvd17;
+ u8 rx_gain_6g_m0;
+ u8 rsvd18;
+ u8 rx_gain_6g_m1;
+ u8 rsvd19;
+ u8 rx_gain_6g_h0;
+ u8 rsvd20;
+ u8 rx_gain_6g_h1;
+ u8 rsvd21;
+ u8 rx_gain_6g_uh0;
+ u8 rsvd22;
+ u8 rx_gain_6g_uh1;
+ u8 rsvd23;
u8 channel_plan_6g;
- u8 rsvd16[71];
+ u8 rsvd24[71];
union {
struct rtw8852c_u_efuse u;
struct rtw8852c_e_efuse e;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 80490a543..583ea673a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -19,6 +19,7 @@ static const struct rtw89_pci_bd_idx_addr rtw8852c_bd_idx_addr_low_power = {
};
static const struct rtw89_pci_info rtw8852c_pci_info = {
+ .gen_def = &rtw89_pci_gen_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -33,6 +34,8 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.autok_en = MAC_AX_PCIE_DISABLE,
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
@@ -42,6 +45,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.max_tag_num_mask = B_AX_MAX_TAG_NUM_V1_MASK,
.rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR_V1,
.txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2_V1,
+ .dma_io_stop = {R_AX_HAXI_INIT_CFG1, B_AX_STOP_AXI_MST},
.dma_stop1 = {R_AX_HAXI_DMA_STOP1, B_AX_TX_STOP1_MASK},
.dma_stop2 = {R_AX_HAXI_DMA_STOP2, B_AX_TX_STOP2_ALL},
.dma_busy1 = {R_AX_HAXI_DMA_BUSY1, DMA_BUSY1_CHECK},
@@ -50,6 +54,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM_V1,
.cpwm_addr = R_AX_PCIE_CRPWM,
+ .mit_addr = R_AX_INT_MIT_RX_V1,
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
new file mode 100644
index 000000000..0e7300cc6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#include "debug.h"
+#include "efuse.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8922a.h"
+
+#define RTW8922A_FW_FORMAT_MAX 0
+#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
+#define RTW8922A_MODULE_FIRMWARE \
+ RTW8922A_FW_BASENAME ".bin"
+
+static const struct rtw89_hfc_ch_cfg rtw8922a_hfc_chcfg_pcie[] = {
+ {2, 1641, grp_0}, /* ACH 0 */
+ {2, 1641, grp_0}, /* ACH 1 */
+ {2, 1641, grp_0}, /* ACH 2 */
+ {2, 1641, grp_0}, /* ACH 3 */
+ {2, 1641, grp_1}, /* ACH 4 */
+ {2, 1641, grp_1}, /* ACH 5 */
+ {2, 1641, grp_1}, /* ACH 6 */
+ {2, 1641, grp_1}, /* ACH 7 */
+ {2, 1641, grp_0}, /* B0MGQ */
+ {2, 1641, grp_0}, /* B0HIQ */
+ {2, 1641, grp_1}, /* B1MGQ */
+ {2, 1641, grp_1}, /* B1HIQ */
+ {0, 0, 0}, /* FWCMDQ */
+ {0, 0, 0}, /* BMC */
+ {0, 0, 0}, /* H2D */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8922a_hfc_pubcfg_pcie = {
+ 1651, /* Group 0 */
+ 1651, /* Group 1 */
+ 3302, /* Public Max */
+ 0, /* WP threshold */
+};
+
+static const struct rtw89_hfc_param_ini rtw8922a_hfc_param_ini_pcie[] = {
+ [RTW89_QTA_SCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_prec_cfg_c2,
+ RTW89_HCIFC_POH},
+ [RTW89_QTA_INVALID] = {NULL},
+};
+
+static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size0_v1,
+ &rtw89_mac_size.ple_size0_v1, &rtw89_mac_size.wde_qt0_v1,
+ &rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
+ &rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
+ &rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4_v1,
+ &rtw89_mac_size.ple_size3_v1, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt9,
+ &rtw89_mac_size.ple_qt9, &rtw89_mac_size.ple_rsvd_qt1,
+ &rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const struct rtw89_reg_imr rtw8922a_imr_dmac_regs[] = {
+ {R_BE_DISP_HOST_IMR, B_BE_DISP_HOST_IMR_CLR, B_BE_DISP_HOST_IMR_SET},
+ {R_BE_DISP_CPU_IMR, B_BE_DISP_CPU_IMR_CLR, B_BE_DISP_CPU_IMR_SET},
+ {R_BE_DISP_OTHER_IMR, B_BE_DISP_OTHER_IMR_CLR, B_BE_DISP_OTHER_IMR_SET},
+ {R_BE_PKTIN_ERR_IMR, B_BE_PKTIN_ERR_IMR_CLR, B_BE_PKTIN_ERR_IMR_SET},
+ {R_BE_INTERRUPT_MASK_REG, B_BE_INTERRUPT_MASK_REG_CLR, B_BE_INTERRUPT_MASK_REG_SET},
+ {R_BE_MLO_ERR_IDCT_IMR, B_BE_MLO_ERR_IDCT_IMR_CLR, B_BE_MLO_ERR_IDCT_IMR_SET},
+ {R_BE_MPDU_TX_ERR_IMR, B_BE_MPDU_TX_ERR_IMR_CLR, B_BE_MPDU_TX_ERR_IMR_SET},
+ {R_BE_MPDU_RX_ERR_IMR, B_BE_MPDU_RX_ERR_IMR_CLR, B_BE_MPDU_RX_ERR_IMR_SET},
+ {R_BE_SEC_ERROR_IMR, B_BE_SEC_ERROR_IMR_CLR, B_BE_SEC_ERROR_IMR_SET},
+ {R_BE_CPUIO_ERR_IMR, B_BE_CPUIO_ERR_IMR_CLR, B_BE_CPUIO_ERR_IMR_SET},
+ {R_BE_WDE_ERR_IMR, B_BE_WDE_ERR_IMR_CLR, B_BE_WDE_ERR_IMR_SET},
+ {R_BE_WDE_ERR1_IMR, B_BE_WDE_ERR1_IMR_CLR, B_BE_WDE_ERR1_IMR_SET},
+ {R_BE_PLE_ERR_IMR, B_BE_PLE_ERR_IMR_CLR, B_BE_PLE_ERR_IMR_SET},
+ {R_BE_PLE_ERRFLAG1_IMR, B_BE_PLE_ERRFLAG1_IMR_CLR, B_BE_PLE_ERRFLAG1_IMR_SET},
+ {R_BE_WDRLS_ERR_IMR, B_BE_WDRLS_ERR_IMR_CLR, B_BE_WDRLS_ERR_IMR_SET},
+ {R_BE_TXPKTCTL_B0_ERRFLAG_IMR, B_BE_TXPKTCTL_B0_ERRFLAG_IMR_CLR,
+ B_BE_TXPKTCTL_B0_ERRFLAG_IMR_SET},
+ {R_BE_TXPKTCTL_B1_ERRFLAG_IMR, B_BE_TXPKTCTL_B1_ERRFLAG_IMR_CLR,
+ B_BE_TXPKTCTL_B1_ERRFLAG_IMR_SET},
+ {R_BE_BBRPT_COM_ERR_IMR, B_BE_BBRPT_COM_ERR_IMR_CLR, B_BE_BBRPT_COM_ERR_IMR_SET},
+ {R_BE_BBRPT_CHINFO_ERR_IMR, B_BE_BBRPT_CHINFO_ERR_IMR_CLR,
+ B_BE_BBRPT_CHINFO_ERR_IMR_SET},
+ {R_BE_BBRPT_DFS_ERR_IMR, B_BE_BBRPT_DFS_ERR_IMR_CLR, B_BE_BBRPT_DFS_ERR_IMR_SET},
+ {R_BE_LA_ERRFLAG_IMR, B_BE_LA_ERRFLAG_IMR_CLR, B_BE_LA_ERRFLAG_IMR_SET},
+ {R_BE_CH_INFO_DBGFLAG_IMR, B_BE_CH_INFO_DBGFLAG_IMR_CLR, B_BE_CH_INFO_DBGFLAG_IMR_SET},
+ {R_BE_PLRLS_ERR_IMR, B_BE_PLRLS_ERR_IMR_CLR, B_BE_PLRLS_ERR_IMR_SET},
+ {R_BE_HAXI_IDCT_MSK, B_BE_HAXI_IDCT_MSK_CLR, B_BE_HAXI_IDCT_MSK_SET},
+};
+
+static const struct rtw89_imr_table rtw8922a_imr_dmac_table = {
+ .regs = rtw8922a_imr_dmac_regs,
+ .n_regs = ARRAY_SIZE(rtw8922a_imr_dmac_regs),
+};
+
+static const struct rtw89_reg_imr rtw8922a_imr_cmac_regs[] = {
+ {R_BE_RESP_IMR, B_BE_RESP_IMR_CLR, B_BE_RESP_IMR_SET},
+ {R_BE_RX_ERROR_FLAG_IMR, B_BE_RX_ERROR_FLAG_IMR_CLR, B_BE_RX_ERROR_FLAG_IMR_SET},
+ {R_BE_TX_ERROR_FLAG_IMR, B_BE_TX_ERROR_FLAG_IMR_CLR, B_BE_TX_ERROR_FLAG_IMR_SET},
+ {R_BE_RX_ERROR_FLAG_IMR_1, B_BE_TX_ERROR_FLAG_IMR_1_CLR, B_BE_TX_ERROR_FLAG_IMR_1_SET},
+ {R_BE_PTCL_IMR1, B_BE_PTCL_IMR1_CLR, B_BE_PTCL_IMR1_SET},
+ {R_BE_PTCL_IMR0, B_BE_PTCL_IMR0_CLR, B_BE_PTCL_IMR0_SET},
+ {R_BE_PTCL_IMR_2, B_BE_PTCL_IMR_2_CLR, B_BE_PTCL_IMR_2_SET},
+ {R_BE_SCHEDULE_ERR_IMR, B_BE_SCHEDULE_ERR_IMR_CLR, B_BE_SCHEDULE_ERR_IMR_SET},
+ {R_BE_C0_TXPWR_IMR, B_BE_C0_TXPWR_IMR_CLR, B_BE_C0_TXPWR_IMR_SET},
+ {R_BE_TRXPTCL_ERROR_INDICA_MASK, B_BE_TRXPTCL_ERROR_INDICA_MASK_CLR,
+ B_BE_TRXPTCL_ERROR_INDICA_MASK_SET},
+ {R_BE_RX_ERR_IMR, B_BE_RX_ERR_IMR_CLR, B_BE_RX_ERR_IMR_SET},
+ {R_BE_PHYINFO_ERR_IMR_V1, B_BE_PHYINFO_ERR_IMR_V1_CLR, B_BE_PHYINFO_ERR_IMR_V1_SET},
+};
+
+static const struct rtw89_imr_table rtw8922a_imr_cmac_table = {
+ .regs = rtw8922a_imr_cmac_regs,
+ .n_regs = ARRAY_SIZE(rtw8922a_imr_cmac_regs),
+};
+
+static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
+ [RTW89_EFUSE_BLOCK_SYS] = {.offset = 0x00000, .size = 0x310},
+ [RTW89_EFUSE_BLOCK_RF] = {.offset = 0x10000, .size = 0x240},
+ [RTW89_EFUSE_BLOCK_HCI_DIG_PCIE_SDIO] = {.offset = 0x20000, .size = 0x4800},
+ [RTW89_EFUSE_BLOCK_HCI_DIG_USB] = {.offset = 0x30000, .size = 0x890},
+ [RTW89_EFUSE_BLOCK_HCI_PHY_PCIE] = {.offset = 0x40000, .size = 0x200},
+ [RTW89_EFUSE_BLOCK_HCI_PHY_USB3] = {.offset = 0x50000, .size = 0x80},
+ [RTW89_EFUSE_BLOCK_HCI_PHY_USB2] = {.offset = 0x60000, .size = 0x0},
+ [RTW89_EFUSE_BLOCK_ADIE] = {.offset = 0x70000, .size = 0x10},
+};
+
+static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 val32;
+ int ret;
+
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_AFSM_WLSUS_EN |
+ B_BE_AFSM_PCIE_SUS_EN);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_DIS_WLBT_PDNSUSEN_SOPC);
+ rtw89_write32_set(rtwdev, R_BE_WLLPS_CTRL, B_BE_DIS_WLBT_LPSEN_LOPC);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APDM_HPDN);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_SWLPS);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_BE_RDY_SYSPWR,
+ 1000, 3000000, false, rtwdev, R_BE_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_EN_WLON);
+ rtw89_write32_set(rtwdev, R_BE_WLRESUME_CTRL, B_BE_LPSROP_CMAC0 |
+ B_BE_LPSROP_CMAC1);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFN_ONMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_BE_APFN_ONMAC),
+ 1000, 3000000, false, rtwdev, R_BE_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_BE_AFE_ON_CTRL1, B_BE_REG_CK_MON_CK960M_EN);
+ rtw89_write8_set(rtwdev, R_BE_ANAPAR_POW_MAC, B_BE_POW_PC_LDO_PORT0 |
+ B_BE_POW_PC_LDO_PORT1);
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_R_SYM_ISO_ADDA_P02PP |
+ B_BE_R_SYM_ISO_ADDA_P12PP);
+ rtw89_write8_set(rtwdev, R_BE_PLATFORM_ENABLE, B_BE_PLATFORM_EN);
+ rtw89_write32_set(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HAXIDMA_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_BE_HAXIDMA_IO_ST,
+ 1000, 3000000, false, rtwdev, R_BE_HCI_OPT_CTRL);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_BE_HAXIDMA_BACKUP_RESTORE_ST),
+ 1000, 3000000, false, rtwdev, R_BE_HCI_OPT_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HCI_WLAN_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32, val32, val32 & B_BE_HCI_WLAN_IO_ST,
+ 1000, 3000000, false, rtwdev, R_BE_HCI_OPT_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_BE_SYS_SDIO_CTRL, B_BE_PCIE_FORCE_IBX_EN);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL, 0x02, 0x02);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL, 0x01, 0x01);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_SYS_ADIE_PAD_PWR_CTRL, B_BE_SYM_PADPDN_WL_RFC1_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x40, 0x40);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_SYS_ADIE_PAD_PWR_CTRL, B_BE_SYM_PADPDN_WL_RFC0_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x20, 0x20);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x04, 0x04);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x08, 0x08);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, 0x10);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xEB, 0xFF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xEB, 0xFF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x01, 0x01);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x02, 0x02);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, 0x80);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XREF_RF1, 0, 0x40);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XREF_RF2, 0, 0x40);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL_1, 0x40, 0x60);
+ if (ret)
+ return ret;
+
+ if (hal->cv != CHIP_CAV) {
+ rtw89_write32_set(rtwdev, R_BE_PMC_DBG_CTRL2, B_BE_SYSON_DIS_PMCR_BE_WRMSK);
+ rtw89_write32_set(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_ISO_EB2CORE);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_B);
+
+ mdelay(1);
+
+ rtw89_write32_clr(rtwdev, R_BE_SYS_ISO_CTRL, B_BE_PWC_EV2EF_S);
+ rtw89_write32_clr(rtwdev, R_BE_PMC_DBG_CTRL2, B_BE_SYSON_DIS_PMCR_BE_WRMSK);
+ }
+
+ rtw89_write32_set(rtwdev, R_BE_DMAC_FUNC_EN,
+ B_BE_MAC_FUNC_EN | B_BE_DMAC_FUNC_EN | B_BE_MPDU_PROC_EN |
+ B_BE_WD_RLS_EN | B_BE_DLE_WDE_EN | B_BE_TXPKT_CTRL_EN |
+ B_BE_STA_SCH_EN | B_BE_DLE_PLE_EN | B_BE_PKT_BUF_EN |
+ B_BE_DMAC_TBL_EN | B_BE_PKT_IN_EN | B_BE_DLE_CPUIO_EN |
+ B_BE_DISPATCHER_EN | B_BE_BBRPT_EN | B_BE_MAC_SEC_EN |
+ B_BE_H_AXIDMA_EN | B_BE_DMAC_MLO_EN | B_BE_PLRLS_EN |
+ B_BE_P_AXIDMA_EN | B_BE_DLE_DATACPUIO_EN | B_BE_LTR_CTL_EN);
+
+ set_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags);
+
+ rtw89_write32_set(rtwdev, R_BE_CMAC_SHARE_FUNC_EN,
+ B_BE_CMAC_SHARE_EN | B_BE_RESPBA_EN | B_BE_ADDRSRCH_EN |
+ B_BE_BTCOEX_EN);
+ rtw89_write32_set(rtwdev, R_BE_CMAC_FUNC_EN,
+ B_BE_CMAC_EN | B_BE_CMAC_TXEN | B_BE_CMAC_RXEN |
+ B_BE_SIGB_EN | B_BE_PHYINTF_EN | B_BE_CMAC_DMA_EN |
+ B_BE_PTCLTOP_EN | B_BE_SCHEDULER_EN | B_BE_TMAC_EN |
+ B_BE_RMAC_EN | B_BE_TXTIME_EN | B_BE_RESP_PKTCTL_EN);
+
+ set_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags);
+
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_FEN_BB_IP_RSTN |
+ B_BE_FEN_BBPLAT_RSTB);
+
+ return 0;
+}
+
+static int rtw8922a_pwr_off_func(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ int ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x10, 0x10);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, 0x08);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, 0x04);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC6, 0xFF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC6, 0xFF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x80, 0x80);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, 0x02);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, 0x01);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL, 0x02, 0xFF);
+ if (ret)
+ return ret;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL, 0x00, 0xFF);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_R_SYM_ISO_ADDA_P02PP |
+ B_BE_R_SYM_ISO_ADDA_P12PP);
+ rtw89_write8_clr(rtwdev, R_BE_ANAPAR_POW_MAC, B_BE_POW_PC_LDO_PORT0 |
+ B_BE_POW_PC_LDO_PORT1);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_EN_WLON);
+ rtw89_write8_clr(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_FEN_BB_IP_RSTN |
+ B_BE_FEN_BBPLAT_RSTB);
+ rtw89_write32_clr(rtwdev, R_BE_SYS_ADIE_PAD_PWR_CTRL, B_BE_SYM_PADPDN_WL_RFC0_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, 0x20);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_BE_SYS_ADIE_PAD_PWR_CTRL, B_BE_SYM_PADPDN_WL_RFC1_1P3);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, 0x40);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HAXIDMA_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_BE_HAXIDMA_IO_ST),
+ 1000, 3000000, false, rtwdev, R_BE_HCI_OPT_CTRL);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_BE_HAXIDMA_BACKUP_RESTORE_ST),
+ 1000, 3000000, false, rtwdev, R_BE_HCI_OPT_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_clr(rtwdev, R_BE_HCI_OPT_CTRL, B_BE_HCI_WLAN_IO_EN);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_BE_HCI_WLAN_IO_ST),
+ 1000, 3000000, false, rtwdev, R_BE_HCI_OPT_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_OFFMAC);
+
+ ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_BE_APFM_OFFMAC),
+ 1000, 3000000, false, rtwdev, R_BE_SYS_PW_CTRL);
+ if (ret)
+ return ret;
+
+ rtw89_write32(rtwdev, R_BE_WLLPS_CTRL, 0x0000A1B2);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_XTAL_OFF_A_DIE);
+ rtw89_write32_set(rtwdev, R_BE_SYS_PW_CTRL, B_BE_APFM_SWLPS);
+ rtw89_write32(rtwdev, R_BE_UDM1, 0);
+
+ return 0;
+}
+
+static void rtw8922a_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+ struct rtw8922a_efuse *map)
+{
+ struct rtw8922a_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+ u8 *bw40_1s_tssi_6g_ofst[] = {map->bw40_1s_tssi_6g_a, map->bw40_1s_tssi_6g_b};
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ u8 i, j;
+
+ tssi->thermal[RF_PATH_A] = map->path_a_therm;
+ tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+ sizeof(ofst[i]->cck_tssi));
+
+ for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+ i, j, tssi->tssi_cck[i][j]);
+
+ memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+ sizeof(ofst[i]->bw40_tssi));
+ memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+ ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+ memcpy(tssi->tssi_6g_mcs[i], bw40_1s_tssi_6g_ofst[i],
+ sizeof(tssi->tssi_6g_mcs[i]));
+
+ for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+ i, j, tssi->tssi_mcs[i][j]);
+ }
+}
+
+static void rtw8922a_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
+ struct rtw8922a_efuse *map)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ bool all_0xff = true, all_0x00 = true;
+ int i, j;
+ u8 t;
+
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK] = map->rx_gain_a._2g_cck;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK] = map->rx_gain_b._2g_cck;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM] = map->rx_gain_a._2g_ofdm;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM] = map->rx_gain_b._2g_ofdm;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW] = map->rx_gain_a._5g_low;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW] = map->rx_gain_b._5g_low;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID] = map->rx_gain_a._5g_mid;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID] = map->rx_gain_b._5g_mid;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH] = map->rx_gain_a._5g_high;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH] = map->rx_gain_b._5g_high;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_L0] = map->rx_gain_6g_a._6g_l0;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_L0] = map->rx_gain_6g_b._6g_l0;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_L1] = map->rx_gain_6g_a._6g_l1;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_L1] = map->rx_gain_6g_b._6g_l1;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_M0] = map->rx_gain_6g_a._6g_m0;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_M0] = map->rx_gain_6g_b._6g_m0;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_M1] = map->rx_gain_6g_a._6g_m1;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_M1] = map->rx_gain_6g_b._6g_m1;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_H0] = map->rx_gain_6g_a._6g_h0;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_H0] = map->rx_gain_6g_b._6g_h0;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_H1] = map->rx_gain_6g_a._6g_h1;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_H1] = map->rx_gain_6g_b._6g_h1;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_UH0] = map->rx_gain_6g_a._6g_uh0;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_UH0] = map->rx_gain_6g_b._6g_uh0;
+ gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_6G_UH1] = map->rx_gain_6g_a._6g_uh1;
+ gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_6G_UH1] = map->rx_gain_6g_b._6g_uh1;
+
+ for (i = RF_PATH_A; i <= RF_PATH_B; i++)
+ for (j = 0; j < RTW89_GAIN_OFFSET_NR; j++) {
+ t = gain->offset[i][j];
+ if (t != 0xff)
+ all_0xff = false;
+ if (t != 0x0)
+ all_0x00 = false;
+
+ /* transform: sign-bit + U(7,2) to S(8,2) */
+ if (t & 0x80)
+ gain->offset[i][j] = (t ^ 0x7f) + 1;
+ }
+
+ gain->offset_valid = !all_0xff && !all_0x00;
+}
+
+static void rtw8922a_read_efuse_mac_addr(struct rtw89_dev *rtwdev, u32 addr)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ u16 val;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i += 2, addr += 2) {
+ val = rtw89_read16(rtwdev, addr);
+ efuse->addr[i] = val & 0xff;
+ efuse->addr[i + 1] = val >> 8;
+ }
+}
+
+static int rtw8922a_read_efuse_pci_sdio(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE)
+ rtw8922a_read_efuse_mac_addr(rtwdev, 0x3104);
+ else
+ ether_addr_copy(efuse->addr, log_map + 0x001A);
+
+ return 0;
+}
+
+static int rtw8922a_read_efuse_usb(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+ rtw8922a_read_efuse_mac_addr(rtwdev, 0x4078);
+
+ return 0;
+}
+
+static int rtw8922a_read_efuse_rf(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+ struct rtw8922a_efuse *map = (struct rtw8922a_efuse *)log_map;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ rtw8922a_efuse_parsing_tssi(rtwdev, map);
+ rtw8922a_efuse_parsing_gain_offset(rtwdev, map);
+
+ rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+ return 0;
+}
+
+static int rtw8922a_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
+{
+ switch (block) {
+ case RTW89_EFUSE_BLOCK_HCI_DIG_PCIE_SDIO:
+ return rtw8922a_read_efuse_pci_sdio(rtwdev, log_map);
+ case RTW89_EFUSE_BLOCK_HCI_DIG_USB:
+ return rtw8922a_read_efuse_usb(rtwdev, log_map);
+ case RTW89_EFUSE_BLOCK_RF:
+ return rtw8922a_read_efuse_rf(rtwdev, log_map);
+ default:
+ return 0;
+ }
+}
+
+#define THM_TRIM_POSITIVE_MASK BIT(6)
+#define THM_TRIM_MAGNITUDE_MASK GENMASK(5, 0)
+
+static void rtw8922a_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ static const u32 thm_trim_addr[RF_PATH_NUM_8922A] = {0x1706, 0x1733};
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u32 addr = rtwdev->chip->phycap_addr;
+ bool pg = true;
+ u8 pg_th;
+ s8 val;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pg_th = phycap_map[thm_trim_addr[i] - addr];
+ if (pg_th == 0xff) {
+ info->thermal_trim[i] = 0;
+ pg = false;
+ break;
+ }
+
+ val = u8_get_bits(pg_th, THM_TRIM_MAGNITUDE_MASK);
+
+ if (!(pg_th & THM_TRIM_POSITIVE_MASK))
+ val *= -1;
+
+ info->thermal_trim[i] = val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_trim=0x%x (%d)\n",
+ i, pg_th, val);
+ }
+
+ info->pg_thermal_trim = pg;
+}
+
+static void rtw8922a_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ static const u32 pabias_trim_addr[RF_PATH_NUM_8922A] = {0x1707, 0x1734};
+ static const u32 check_pa_pad_trim_addr = 0x1700;
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 val;
+ u8 i;
+
+ val = phycap_map[check_pa_pad_trim_addr - addr];
+ if (val != 0xff)
+ info->pg_pa_bias_trim = true;
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+ i, info->pa_bias_trim[i]);
+ }
+}
+
+static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ static const u32 pad_bias_trim_addr[RF_PATH_NUM_8922A] = {0x1708, 0x1735};
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ info->pad_bias_trim[i] = phycap_map[pad_bias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] path=%d pad_bias_trim=0x%x\n",
+ i, info->pad_bias_trim[i]);
+ }
+}
+
+static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ rtw8922a_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+ rtw8922a_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+ rtw8922a_phycap_parsing_pad_bias_trim(rtwdev, phycap_map);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = RTW89_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW89_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+};
+#endif
+
+static const struct rtw89_chip_ops rtw8922a_chip_ops = {
+ .read_efuse = rtw8922a_read_efuse,
+ .read_phycap = rtw8922a_read_phycap,
+ .pwr_on_func = rtw8922a_pwr_on_func,
+ .pwr_off_func = rtw8922a_pwr_off_func,
+};
+
+const struct rtw89_chip_info rtw8922a_chip_info = {
+ .chip_id = RTL8922A,
+ .chip_gen = RTW89_CHIP_BE,
+ .ops = &rtw8922a_chip_ops,
+ .mac_def = &rtw89_mac_gen_be,
+ .phy_def = &rtw89_phy_gen_be,
+ .fw_basename = RTW8922A_FW_BASENAME,
+ .fw_format_max = RTW8922A_FW_FORMAT_MAX,
+ .try_ce_fw = false,
+ .bbmcu_nr = 1,
+ .needed_fw_elms = RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS,
+ .fifo_size = 589824,
+ .small_fifo_size = false,
+ .dle_scc_rsvd_size = 0,
+ .max_amsdu_limit = 8000,
+ .dis_2g_40m_ul_ofdma = false,
+ .rsvd_ple_ofst = 0x8f800,
+ .hfc_param_ini = rtw8922a_hfc_param_ini_pcie,
+ .dle_mem = rtw8922a_dle_mem_pcie,
+ .wde_qempty_acq_grpnum = 4,
+ .wde_qempty_mgq_grpsel = 4,
+ .rf_base_addr = {0xe000, 0xf000},
+ .pwr_on_seq = NULL,
+ .pwr_off_seq = NULL,
+ .bb_table = NULL,
+ .bb_gain_table = NULL,
+ .rf_table = {},
+ .nctl_table = NULL,
+ .nctl_post_table = NULL,
+ .dflt_parms = NULL, /* load parm from fw */
+ .rfe_parms_conf = NULL, /* load parm from fw */
+ .txpwr_factor_rf = 2,
+ .txpwr_factor_mac = 1,
+ .dig_table = NULL,
+ .tssi_dbw_table = NULL,
+ .support_chanctx_num = 1,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ) |
+ BIT(NL80211_BAND_6GHZ),
+ .support_unii4 = true,
+ .ul_tb_waveform_ctrl = false,
+ .ul_tb_pwr_diff = false,
+ .hw_sec_hdr = true,
+ .rf_path_num = 2,
+ .tx_nss = 2,
+ .rx_nss = 2,
+ .acam_num = 128,
+ .bcam_num = 20,
+ .scam_num = 32,
+ .bacam_num = 8,
+ .bacam_dynamic_num = 8,
+ .bacam_ver = RTW89_BACAM_V1,
+ .ppdu_max_usr = 16,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 0x1300,
+ .logical_efuse_size = 0x70000,
+ .limit_efuse_size = 0x40000,
+ .dav_phy_efuse_size = 0,
+ .dav_log_efuse_size = 0,
+ .efuse_blocks = rtw8922a_efuse_blocks,
+ .phycap_addr = 0x1700,
+ .phycap_size = 0x38,
+
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
+ .low_power_hci_modes = 0,
+ .hci_func_en_addr = R_BE_HCI_FUNC_EN,
+ .h2c_desc_size = sizeof(struct rtw89_rxdesc_short_v2),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body_v2),
+ .txwd_info_size = sizeof(struct rtw89_txwd_info_v2),
+ .cfo_src_fd = true,
+ .cfo_hw_comp = true,
+ .dcfo_comp = NULL,
+ .dcfo_comp_sft = 0,
+ .imr_info = NULL,
+ .imr_dmac_table = &rtw8922a_imr_dmac_table,
+ .imr_cmac_table = &rtw8922a_imr_cmac_table,
+ .bss_clr_vld = {R_BSS_CLR_VLD_V2, B_BSS_CLR_VLD0_V2},
+ .bss_clr_map_reg = R_BSS_CLR_MAP_V2,
+ .dma_ch_mask = 0,
+#ifdef CONFIG_PM
+ .wowlan_stub = &rtw_wowlan_stub_8922a,
+#endif
+ .xtal_info = NULL,
+};
+EXPORT_SYMBOL(rtw8922a_chip_info);
+
+MODULE_FIRMWARE(RTW8922A_MODULE_FIRMWARE);
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11be wireless 8922A driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.h b/drivers/net/wireless/realtek/rtw89/rtw8922a.h
new file mode 100644
index 000000000..597317ab6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#ifndef __RTW89_8922A_H__
+#define __RTW89_8922A_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8922A 2
+#define BB_PATH_NUM_8922A 2
+
+struct rtw8922a_tssi_offset {
+ u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+ u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+ u8 rsvd[7];
+ u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+ u8 bw_diff_5g[10];
+} __packed;
+
+struct rtw8922a_rx_gain {
+ u8 _2g_ofdm;
+ u8 _2g_cck;
+ u8 _5g_low;
+ u8 _5g_mid;
+ u8 _5g_high;
+} __packed;
+
+struct rtw8922a_rx_gain_6g {
+ u8 _6g_l0;
+ u8 _6g_l1;
+ u8 _6g_m0;
+ u8 _6g_m1;
+ u8 _6g_h0;
+ u8 _6g_h1;
+ u8 _6g_uh0;
+ u8 _6g_uh1;
+} __packed;
+
+struct rtw8922a_efuse {
+ u8 country_code[2];
+ u8 rsvd[0xe];
+ struct rtw8922a_tssi_offset path_a_tssi;
+ struct rtw8922a_tssi_offset path_b_tssi;
+ u8 rsvd1[0x54];
+ u8 channel_plan;
+ u8 xtal_k;
+ u8 rsvd2[0x7];
+ u8 board_info;
+ u8 rsvd3[0x8];
+ u8 rfe_type;
+ u8 rsvd4[0x5];
+ u8 path_a_therm;
+ u8 path_b_therm;
+ u8 rsvd5[0x2];
+ struct rtw8922a_rx_gain rx_gain_a;
+ struct rtw8922a_rx_gain rx_gain_b;
+ u8 rsvd6[0x22];
+ u8 bw40_1s_tssi_6g_a[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd7[0xa];
+ u8 bw40_1s_tssi_6g_b[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd8[0xa];
+ u8 bw40_1s_tssi_6g_c[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd9[0xa];
+ u8 bw40_1s_tssi_6g_d[TSSI_MCS_6G_CH_GROUP_NUM];
+ u8 rsvd10[0xa];
+ struct rtw8922a_rx_gain_6g rx_gain_6g_a;
+ struct rtw8922a_rx_gain_6g rx_gain_6g_b;
+} __packed;
+
+extern const struct rtw89_chip_info rtw8922a_chip_info;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
new file mode 100644
index 000000000..9f46fb166
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8922a.h"
+
+static const struct rtw89_pci_info rtw8922a_pci_info = {
+ .gen_def = &rtw89_pci_gen_be,
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_V1_256B,
+ .rx_burst = MAC_AX_RX_BURST_V1_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_ENABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_DEF,
+ .rx_ring_eq_is_full = true,
+ .check_rx_tag = true,
+
+ .init_cfg_reg = R_BE_HAXI_INIT_CFG1,
+ .txhci_en_bit = B_BE_TXDMA_EN,
+ .rxhci_en_bit = B_BE_RXDMA_EN,
+ .rxbd_mode_bit = B_BE_RXQ_RXBD_MODE_MASK,
+ .exp_ctrl_reg = R_BE_HAXI_EXP_CTRL_V1,
+ .max_tag_num_mask = B_BE_MAX_TAG_NUM_MASK,
+ .rxbd_rwptr_clr_reg = R_BE_RXBD_RWPTR_CLR1_V1,
+ .txbd_rwptr_clr2_reg = R_BE_TXBD_RWPTR_CLR1,
+ .dma_io_stop = {R_BE_HAXI_INIT_CFG1, B_BE_STOP_AXI_MST},
+ .dma_stop1 = {R_BE_HAXI_DMA_STOP1, B_BE_TX_STOP1_MASK},
+ .dma_stop2 = {0},
+ .dma_busy1 = {R_BE_HAXI_DMA_BUSY1, DMA_BUSY1_CHECK_BE},
+ .dma_busy2_reg = 0,
+ .dma_busy3_reg = R_BE_HAXI_DMA_BUSY1,
+
+ .rpwm_addr = R_BE_PCIE_HRPWM,
+ .cpwm_addr = R_BE_PCIE_CRPWM,
+ .mit_addr = R_BE_PCIE_MIT_CH_EN,
+ .tx_dma_ch_mask = 0,
+ .bd_idx_addr_low_power = NULL,
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set_be,
+ .bd_ram_table = NULL,
+
+ .ltr_set = rtw89_pci_ltr_set_v2,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
+ .config_intr_mask = rtw89_pci_config_intr_mask_v2,
+ .enable_intr = rtw89_pci_enable_intr_v2,
+ .disable_intr = rtw89_pci_disable_intr_v2,
+ .recognize_intrs = rtw89_pci_recognize_intrs_v2,
+};
+
+static const struct rtw89_driver_info rtw89_8922ae_info = {
+ .chip = &rtw8922a_chip_info,
+ .bus = {
+ .pci = &rtw8922a_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8922ae_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8922),
+ .driver_data = (kernel_ulong_t)&rtw89_8922ae_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8922ae_id_table);
+
+static struct pci_driver rtw89_8922ae_driver = {
+ .name = "rtw89_8922ae",
+ .id_table = rtw89_8922ae_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8922ae_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11be wireless 8922AE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index aed05b026..1b2a40040 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -404,16 +404,18 @@ out:
void rtw89_tas_init(struct rtw89_dev *rtwdev)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
+ struct rtw89_acpi_dsm_result res = {};
int ret;
u8 val;
- ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_TAS_EN, &val);
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_TAS_EN, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_SAR,
"acpi: cannot get TAS: %d\n", ret);
return;
}
+ val = res.u.value;
switch (val) {
case 0:
tas->enable = false;
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index c16443530..99896d85d 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -361,6 +361,9 @@ static int hal_enable_dma(struct rtw89_ser *ser)
ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
if (!ret)
clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_SER,
+ "lv1 rcvy fail to start dma: %d\n", ret);
return ret;
}
@@ -376,6 +379,9 @@ static int hal_stop_dma(struct rtw89_ser *ser)
ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
if (!ret)
set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_SER,
+ "lv1 rcvy fail to stop dma: %d\n", ret);
return ret;
}
@@ -584,6 +590,14 @@ struct __fw_backtrace_info {
static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
sizeof(struct __fw_backtrace_info));
+static u32 convert_addr_from_wcpu(u32 wcpu_addr)
+{
+ if (wcpu_addr < 0x30000000)
+ return wcpu_addr;
+
+ return wcpu_addr & GENMASK(28, 0);
+}
+
static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
const struct __fw_backtrace_entry *ent)
{
@@ -591,7 +605,7 @@ static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 filter_model_addr = mac->filter_model_addr;
u32 indir_access_addr = mac->indir_access_addr;
- u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK;
+ u32 fwbt_addr = convert_addr_from_wcpu(ent->wcpu_addr);
u32 fwbt_size = ent->size;
u32 fwbt_key = ent->key;
u32 i;
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 7142cce16..c467a80ff 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -416,8 +416,11 @@ struct rtw89_rxinfo {
} __packed;
#define RTW89_RXINFO_W0_USR_NUM GENMASK(3, 0)
+#define RTW89_RXINFO_W0_USR_NUM_V1 GENMASK(4, 0)
#define RTW89_RXINFO_W0_FW_DEFINE GENMASK(15, 8)
+#define RTW89_RXINFO_W0_PLCP_LEN_V1 GENMASK(23, 16)
#define RTW89_RXINFO_W0_LSIG_LEN GENMASK(27, 16)
+#define RTW89_RXINFO_W0_INVALID_V1 BIT(27)
#define RTW89_RXINFO_W0_IS_TO_SELF BIT(28)
#define RTW89_RXINFO_W0_RX_CNT_VLD BIT(29)
#define RTW89_RXINFO_W0_LONG_RXD GENMASK(31, 30)
@@ -430,6 +433,7 @@ struct rtw89_phy_sts_hdr {
} __packed;
#define RTW89_PHY_STS_HDR_W0_IE_MAP GENMASK(4, 0)
+#define RTW89_PHY_STS_HDR_W0_VALID BIT(7)
#define RTW89_PHY_STS_HDR_W0_LEN GENMASK(15, 8)
#define RTW89_PHY_STS_HDR_W0_RSSI_AVG GENMASK(31, 24)
#define RTW89_PHY_STS_HDR_W1_RSSI_A GENMASK(7, 0)
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 660bf2ece..5c7ca36c0 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -73,13 +73,14 @@ static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow)
static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
enum rtw89_mac_fwd_target fwd_target = enable ?
RTW89_FWD_DONT_CARE :
RTW89_FWD_TO_HOST;
- rtw89_mac_typ_fltr_opt(rtwdev, RTW89_MGNT, fwd_target, RTW89_MAC_0);
- rtw89_mac_typ_fltr_opt(rtwdev, RTW89_CTRL, fwd_target, RTW89_MAC_0);
- rtw89_mac_typ_fltr_opt(rtwdev, RTW89_DATA, fwd_target, RTW89_MAC_0);
+ mac->typ_fltr_opt(rtwdev, RTW89_MGNT, fwd_target, RTW89_MAC_0);
+ mac->typ_fltr_opt(rtwdev, RTW89_CTRL, fwd_target, RTW89_MAC_0);
+ mac->typ_fltr_opt(rtwdev, RTW89_DATA, fwd_target, RTW89_MAC_0);
}
static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 301bd0043..4e5b351f8 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -343,5 +343,6 @@ static void __exit wl1251_sdio_exit(void)
module_init(wl1251_sdio_init);
module_exit(wl1251_sdio_exit);
+MODULE_DESCRIPTION("TI WL1251 SDIO helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 29292f06b..1936bb3af 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -342,6 +342,7 @@ static struct spi_driver wl1251_spi_driver = {
module_spi_driver(wl1251_spi_driver);
+MODULE_DESCRIPTION("TI WL1251 SPI helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index de045fe4c..b26d42b4e 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1955,6 +1955,7 @@ module_param_named(tcxo, tcxo_param, charp, 0);
MODULE_PARM_DESC(tcxo,
"TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
+MODULE_DESCRIPTION("TI WL12xx wireless driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 20d9181b3..2ccac1cde 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -2086,6 +2086,7 @@ module_param_named(num_rx_desc, num_rx_desc_param, int, 0400);
MODULE_PARM_DESC(num_rx_desc_param,
"Number of Rx descriptors: u8 (default is 32)");
+MODULE_DESCRIPTION("TI WiLink 8 wireless driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL18XX_FW_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index fb9ed9777..5736acb4d 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6793,6 +6793,7 @@ MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
module_param(no_recovery, int, 0600);
MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
+MODULE_DESCRIPTION("TI WLAN core driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index f0686635d..eb5482ed7 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -447,6 +447,7 @@ module_sdio_driver(wl1271_sdio_driver);
module_param(dump, bool, 0600);
MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
+MODULE_DESCRIPTION("TI WLAN SDIO helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 7d9a139db..0aa2b2f3c 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -562,6 +562,7 @@ static struct spi_driver wl1271_spi_driver = {
};
module_spi_driver(wl1271_spi_driver);
+MODULE_DESCRIPTION("TI WLAN SPI helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index c7b4414cc..a9c3fb2cc 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -190,10 +190,25 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_03 = {
}
};
+static const struct ieee80211_regdomain hwsim_world_regdom_custom_04 = {
+ .n_reg_rules = 6,
+ .alpha2 = "99",
+ .reg_rules = {
+ REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
+ REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
+ REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, 0),
+ REG_RULE(5260 - 10, 5320 + 10, 80, 0, 30,
+ NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS),
+ REG_RULE(5745 - 10, 5825 + 10, 80, 0, 30, 0),
+ REG_RULE(5855 - 10, 5925 + 10, 80, 0, 33, 0),
+ }
+};
+
static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
&hwsim_world_regdom_custom_01,
&hwsim_world_regdom_custom_02,
&hwsim_world_regdom_custom_03,
+ &hwsim_world_regdom_custom_04,
};
struct hwsim_vif_priv {
@@ -3803,7 +3818,7 @@ static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info)
}
nla_for_each_nested(peer, peers, rem) {
- struct cfg80211_pmsr_result result;
+ struct cfg80211_pmsr_result result = {};
err = mac80211_hwsim_parse_pmsr_result(peer, &result, info);
if (err)
@@ -4029,6 +4044,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
.mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
@@ -4134,6 +4151,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
.mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
@@ -4237,6 +4256,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
.mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
@@ -5288,6 +5309,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
schedule_timeout_interruptible(1);
}
+ /* TODO: Add param */
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_DFS_CONCURRENT);
+
if (param->no_vif)
ieee80211_hw_set(hw, NO_AUTO_VIF);
diff --git a/drivers/net/wireless/zydas/Kconfig b/drivers/net/wireless/zydas/Kconfig
index 08574433d..839e1217e 100644
--- a/drivers/net/wireless/zydas/Kconfig
+++ b/drivers/net/wireless/zydas/Kconfig
@@ -12,25 +12,6 @@ config WLAN_VENDOR_ZYDAS
if WLAN_VENDOR_ZYDAS
-config USB_ZD1201
- tristate "USB ZD1201 based Wireless device support"
- depends on CFG80211 && USB
- select WIRELESS_EXT
- select WEXT_PRIV
- select FW_LOADER
- help
- Say Y if you want to use wireless LAN adapters based on the ZyDAS
- ZD1201 chip.
-
- This driver makes the adapter appear as a normal Ethernet interface,
- typically on wlan0.
-
- The zd1201 device requires external firmware to be loaded.
- This can be found at http://linux-lc100020.sourceforge.net/
-
- To compile this driver as a module, choose M here: the
- module will be called zd1201.
-
source "drivers/net/wireless/zydas/zd1211rw/Kconfig"
endif # WLAN_VENDOR_ZYDAS
diff --git a/drivers/net/wireless/zydas/Makefile b/drivers/net/wireless/zydas/Makefile
index c70003d30..3e0a51db9 100644
--- a/drivers/net/wireless/zydas/Makefile
+++ b/drivers/net/wireless/zydas/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ZD1211RW) += zd1211rw/
-
-obj-$(CONFIG_USB_ZD1201) += zd1201.o
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
deleted file mode 100644
index 2814df1ec..000000000
--- a/drivers/net/wireless/zydas/zd1201.c
+++ /dev/null
@@ -1,1909 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for ZyDAS zd1201 based USB wireless devices.
- *
- * Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org)
- *
- * Parts of this driver have been derived from a wlan-ng version
- * modified by ZyDAS. They also made documentation available, thanks!
- * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
- */
-
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-#include <net/iw_handler.h>
-#include <linux/string.h>
-#include <linux/if_arp.h>
-#include <linux/firmware.h>
-#include "zd1201.h"
-
-static const struct usb_device_id zd1201_table[] = {
- {USB_DEVICE(0x0586, 0x3400)}, /* Peabird USB Wireless Adapter */
- {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 USB Wireless Adapter */
- {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */
- {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */
- {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */
- {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */
- {}
-};
-
-static int ap; /* Are we an AP or a normal station? */
-
-#define ZD1201_VERSION "0.15"
-
-MODULE_AUTHOR("Jeroen Vreeken <pe1rxq@amsat.org>");
-MODULE_DESCRIPTION("Driver for ZyDAS ZD1201 based USB Wireless adapters");
-MODULE_VERSION(ZD1201_VERSION);
-MODULE_LICENSE("GPL");
-module_param(ap, int, 0);
-MODULE_PARM_DESC(ap, "If non-zero Access Point firmware will be loaded");
-MODULE_DEVICE_TABLE(usb, zd1201_table);
-
-
-static int zd1201_fw_upload(struct usb_device *dev, int apfw)
-{
- const struct firmware *fw_entry;
- const char *data;
- unsigned long len;
- int err;
- unsigned char ret;
- char *buf;
- char *fwfile;
-
- if (apfw)
- fwfile = "zd1201-ap.fw";
- else
- fwfile = "zd1201.fw";
-
- err = request_firmware(&fw_entry, fwfile, &dev->dev);
- if (err) {
- dev_err(&dev->dev, "Failed to load %s firmware file!\n", fwfile);
- dev_err(&dev->dev, "Make sure the hotplug firmware loader is installed.\n");
- dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info.\n");
- return err;
- }
-
- data = fw_entry->data;
- len = fw_entry->size;
-
- buf = kmalloc(1024, GFP_ATOMIC);
- if (!buf) {
- err = -ENOMEM;
- goto exit;
- }
-
- while (len > 0) {
- int translen = (len > 1024) ? 1024 : len;
- memcpy(buf, data, translen);
-
- err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0,
- USB_DIR_OUT | 0x40, 0, 0, buf, translen,
- ZD1201_FW_TIMEOUT);
- if (err < 0)
- goto exit;
-
- len -= translen;
- data += translen;
- }
-
- err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x2,
- USB_DIR_OUT | 0x40, 0, 0, NULL, 0, ZD1201_FW_TIMEOUT);
- if (err < 0)
- goto exit;
-
- err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
- USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
- if (err < 0)
- goto exit;
-
- memcpy(&ret, buf, sizeof(ret));
-
- if (ret & 0x80) {
- err = -EIO;
- goto exit;
- }
-
- err = 0;
-exit:
- kfree(buf);
- release_firmware(fw_entry);
- return err;
-}
-
-MODULE_FIRMWARE("zd1201-ap.fw");
-MODULE_FIRMWARE("zd1201.fw");
-
-static void zd1201_usbfree(struct urb *urb)
-{
- struct zd1201 *zd = urb->context;
-
- switch(urb->status) {
- case -EILSEQ:
- case -ENODEV:
- case -ETIME:
- case -ENOENT:
- case -EPIPE:
- case -EOVERFLOW:
- case -ESHUTDOWN:
- dev_warn(&zd->usb->dev, "%s: urb failed: %d\n",
- zd->dev->name, urb->status);
- }
-
- kfree(urb->transfer_buffer);
- usb_free_urb(urb);
-}
-
-/* cmdreq message:
- u32 type
- u16 cmd
- u16 parm0
- u16 parm1
- u16 parm2
- u8 pad[4]
-
- total: 4 + 2 + 2 + 2 + 2 + 4 = 16
-*/
-static int zd1201_docmd(struct zd1201 *zd, int cmd, int parm0,
- int parm1, int parm2)
-{
- unsigned char *command;
- int ret;
- struct urb *urb;
-
- command = kmalloc(16, GFP_ATOMIC);
- if (!command)
- return -ENOMEM;
-
- *((__le32*)command) = cpu_to_le32(ZD1201_USB_CMDREQ);
- *((__le16*)&command[4]) = cpu_to_le16(cmd);
- *((__le16*)&command[6]) = cpu_to_le16(parm0);
- *((__le16*)&command[8]) = cpu_to_le16(parm1);
- *((__le16*)&command[10])= cpu_to_le16(parm2);
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- kfree(command);
- return -ENOMEM;
- }
- usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2),
- command, 16, zd1201_usbfree, zd);
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret) {
- kfree(command);
- usb_free_urb(urb);
- }
-
- return ret;
-}
-
-/* Callback after sending out a packet */
-static void zd1201_usbtx(struct urb *urb)
-{
- struct zd1201 *zd = urb->context;
- netif_wake_queue(zd->dev);
-}
-
-/* Incoming data */
-static void zd1201_usbrx(struct urb *urb)
-{
- struct zd1201 *zd = urb->context;
- int free = 0;
- unsigned char *data = urb->transfer_buffer;
- struct sk_buff *skb;
- unsigned char type;
-
- if (!zd)
- return;
-
- switch(urb->status) {
- case -EILSEQ:
- case -ENODEV:
- case -ETIME:
- case -ENOENT:
- case -EPIPE:
- case -EOVERFLOW:
- case -ESHUTDOWN:
- dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n",
- zd->dev->name, urb->status);
- free = 1;
- goto exit;
- }
-
- if (urb->status != 0 || urb->actual_length == 0)
- goto resubmit;
-
- type = data[0];
- if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) {
- memcpy(zd->rxdata, data, urb->actual_length);
- zd->rxlen = urb->actual_length;
- zd->rxdatas = 1;
- wake_up(&zd->rxdataq);
- }
- /* Info frame */
- if (type == ZD1201_PACKET_INQUIRE) {
- int i = 0;
- unsigned short infotype, copylen;
- infotype = le16_to_cpu(*(__le16*)&data[6]);
-
- if (infotype == ZD1201_INF_LINKSTATUS) {
- short linkstatus;
-
- linkstatus = le16_to_cpu(*(__le16*)&data[8]);
- switch(linkstatus) {
- case 1:
- netif_carrier_on(zd->dev);
- break;
- case 2:
- netif_carrier_off(zd->dev);
- break;
- case 3:
- netif_carrier_off(zd->dev);
- break;
- case 4:
- netif_carrier_on(zd->dev);
- break;
- default:
- netif_carrier_off(zd->dev);
- }
- goto resubmit;
- }
- if (infotype == ZD1201_INF_ASSOCSTATUS) {
- short status = le16_to_cpu(*(__le16*)(data+8));
- int event;
- union iwreq_data wrqu;
-
- switch (status) {
- case ZD1201_ASSOCSTATUS_STAASSOC:
- case ZD1201_ASSOCSTATUS_REASSOC:
- event = IWEVREGISTERED;
- break;
- case ZD1201_ASSOCSTATUS_DISASSOC:
- case ZD1201_ASSOCSTATUS_ASSOCFAIL:
- case ZD1201_ASSOCSTATUS_AUTHFAIL:
- default:
- event = IWEVEXPIRED;
- }
- memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- /* Send event to user space */
- wireless_send_event(zd->dev, event, &wrqu, NULL);
-
- goto resubmit;
- }
- if (infotype == ZD1201_INF_AUTHREQ) {
- union iwreq_data wrqu;
-
- memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- /* There isn't a event that trully fits this request.
- We assume that userspace will be smart enough to
- see a new station being expired and sends back a
- authstation ioctl to authorize it. */
- wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL);
- goto resubmit;
- }
- /* Other infotypes are handled outside this handler */
- zd->rxlen = 0;
- while (i < urb->actual_length) {
- copylen = le16_to_cpu(*(__le16*)&data[i+2]);
- /* Sanity check, sometimes we get junk */
- if (copylen+zd->rxlen > sizeof(zd->rxdata))
- break;
- memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen);
- zd->rxlen += copylen;
- i += 64;
- }
- if (i >= urb->actual_length) {
- zd->rxdatas = 1;
- wake_up(&zd->rxdataq);
- }
- goto resubmit;
- }
- /* Actual data */
- if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
- int datalen = urb->actual_length-1;
- unsigned short len, fc, seq;
-
- len = ntohs(*(__be16 *)&data[datalen-2]);
- if (len>datalen)
- len=datalen;
- fc = le16_to_cpu(*(__le16 *)&data[datalen-16]);
- seq = le16_to_cpu(*(__le16 *)&data[datalen-24]);
-
- if (zd->monitor) {
- if (datalen < 24)
- goto resubmit;
- if (!(skb = dev_alloc_skb(datalen+24)))
- goto resubmit;
-
- skb_put_data(skb, &data[datalen - 16], 2);
- skb_put_data(skb, &data[datalen - 2], 2);
- skb_put_data(skb, &data[datalen - 14], 6);
- skb_put_data(skb, &data[datalen - 22], 6);
- skb_put_data(skb, &data[datalen - 8], 6);
- skb_put_data(skb, &data[datalen - 24], 2);
- skb_put_data(skb, data, len);
- skb->protocol = eth_type_trans(skb, zd->dev);
- zd->dev->stats.rx_packets++;
- zd->dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
- goto resubmit;
- }
-
- if ((seq & IEEE80211_SCTL_FRAG) ||
- (fc & IEEE80211_FCTL_MOREFRAGS)) {
- struct zd1201_frag *frag = NULL;
- char *ptr;
-
- if (datalen<14)
- goto resubmit;
- if ((seq & IEEE80211_SCTL_FRAG) == 0) {
- frag = kmalloc(sizeof(*frag), GFP_ATOMIC);
- if (!frag)
- goto resubmit;
- skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2);
- if (!skb) {
- kfree(frag);
- goto resubmit;
- }
- frag->skb = skb;
- frag->seq = seq & IEEE80211_SCTL_SEQ;
- skb_reserve(skb, 2);
- skb_put_data(skb, &data[datalen - 14], 12);
- skb_put_data(skb, &data[6], 2);
- skb_put_data(skb, data + 8, len);
- hlist_add_head(&frag->fnode, &zd->fraglist);
- goto resubmit;
- }
- hlist_for_each_entry(frag, &zd->fraglist, fnode)
- if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
- break;
- if (!frag)
- goto resubmit;
- skb = frag->skb;
- ptr = skb_put(skb, len);
- if (ptr)
- memcpy(ptr, data+8, len);
- if (fc & IEEE80211_FCTL_MOREFRAGS)
- goto resubmit;
- hlist_del_init(&frag->fnode);
- kfree(frag);
- } else {
- if (datalen<14)
- goto resubmit;
- skb = dev_alloc_skb(len + 14 + 2);
- if (!skb)
- goto resubmit;
- skb_reserve(skb, 2);
- skb_put_data(skb, &data[datalen - 14], 12);
- skb_put_data(skb, &data[6], 2);
- skb_put_data(skb, data + 8, len);
- }
- skb->protocol = eth_type_trans(skb, zd->dev);
- zd->dev->stats.rx_packets++;
- zd->dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
- }
-resubmit:
- memset(data, 0, ZD1201_RXSIZE);
-
- urb->status = 0;
- urb->dev = zd->usb;
- if(usb_submit_urb(urb, GFP_ATOMIC))
- free = 1;
-
-exit:
- if (free) {
- zd->rxlen = 0;
- zd->rxdatas = 1;
- wake_up(&zd->rxdataq);
- kfree(urb->transfer_buffer);
- }
-}
-
-static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
- unsigned int riddatalen)
-{
- int err;
- int i = 0;
- int code;
- int rid_fid;
- int length;
- unsigned char *pdata;
-
- zd->rxdatas = 0;
- err = zd1201_docmd(zd, ZD1201_CMDCODE_ACCESS, rid, 0, 0);
- if (err)
- return err;
-
- wait_event_interruptible(zd->rxdataq, zd->rxdatas);
- if (!zd->rxlen)
- return -EIO;
-
- code = le16_to_cpu(*(__le16*)(&zd->rxdata[4]));
- rid_fid = le16_to_cpu(*(__le16*)(&zd->rxdata[6]));
- length = le16_to_cpu(*(__le16*)(&zd->rxdata[8]));
- if (length > zd->rxlen)
- length = zd->rxlen-6;
-
- /* If access bit is not on, then error */
- if ((code & ZD1201_ACCESSBIT) != ZD1201_ACCESSBIT || rid_fid != rid )
- return -EINVAL;
-
- /* Not enough buffer for allocating data */
- if (riddatalen != (length - 4)) {
- dev_dbg(&zd->usb->dev, "riddatalen mismatches, expected=%u, (packet=%u) length=%u, rid=0x%04X, rid_fid=0x%04X\n",
- riddatalen, zd->rxlen, length, rid, rid_fid);
- return -ENODATA;
- }
-
- zd->rxdatas = 0;
- /* Issue SetRxRid commnd */
- err = zd1201_docmd(zd, ZD1201_CMDCODE_SETRXRID, rid, 0, length);
- if (err)
- return err;
-
- /* Receive RID record from resource packets */
- wait_event_interruptible(zd->rxdataq, zd->rxdatas);
- if (!zd->rxlen)
- return -EIO;
-
- if (zd->rxdata[zd->rxlen - 1] != ZD1201_PACKET_RESOURCE) {
- dev_dbg(&zd->usb->dev, "Packet type mismatch: 0x%x not 0x3\n",
- zd->rxdata[zd->rxlen-1]);
- return -EINVAL;
- }
-
- /* Set the data pointer and received data length */
- pdata = zd->rxdata;
- length = zd->rxlen;
-
- do {
- int actual_length;
-
- actual_length = (length > 64) ? 64 : length;
-
- if (pdata[0] != 0x3) {
- dev_dbg(&zd->usb->dev, "Rx Resource packet type error: %02X\n",
- pdata[0]);
- return -EINVAL;
- }
-
- if (actual_length != 64) {
- /* Trim the last packet type byte */
- actual_length--;
- }
-
- /* Skip the 4 bytes header (RID length and RID) */
- if (i == 0) {
- pdata += 8;
- actual_length -= 8;
- } else {
- pdata += 4;
- actual_length -= 4;
- }
-
- memcpy(riddata, pdata, actual_length);
- riddata += actual_length;
- pdata += actual_length;
- length -= 64;
- i++;
- } while (length > 0);
-
- return 0;
-}
-
-/*
- * resreq:
- * byte type
- * byte sequence
- * u16 reserved
- * byte data[12]
- * total: 16
- */
-static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len, int wait)
-{
- int err;
- unsigned char *request;
- int reqlen;
- char seq=0;
- struct urb *urb;
- gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC;
-
- len += 4; /* first 4 are for header */
-
- zd->rxdatas = 0;
- zd->rxlen = 0;
- for (seq=0; len > 0; seq++) {
- request = kzalloc(16, gfp_mask);
- if (!request)
- return -ENOMEM;
- urb = usb_alloc_urb(0, gfp_mask);
- if (!urb) {
- kfree(request);
- return -ENOMEM;
- }
- reqlen = len>12 ? 12 : len;
- request[0] = ZD1201_USB_RESREQ;
- request[1] = seq;
- request[2] = 0;
- request[3] = 0;
- if (request[1] == 0) {
- /* add header */
- *(__le16*)&request[4] = cpu_to_le16((len-2+1)/2);
- *(__le16*)&request[6] = cpu_to_le16(rid);
- memcpy(request+8, buf, reqlen-4);
- buf += reqlen-4;
- } else {
- memcpy(request+4, buf, reqlen);
- buf += reqlen;
- }
-
- len -= reqlen;
-
- usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb,
- zd->endp_out2), request, 16, zd1201_usbfree, zd);
- err = usb_submit_urb(urb, gfp_mask);
- if (err)
- goto err;
- }
-
- request = kmalloc(16, gfp_mask);
- if (!request)
- return -ENOMEM;
- urb = usb_alloc_urb(0, gfp_mask);
- if (!urb) {
- kfree(request);
- return -ENOMEM;
- }
- *((__le32*)request) = cpu_to_le32(ZD1201_USB_CMDREQ);
- *((__le16*)&request[4]) =
- cpu_to_le16(ZD1201_CMDCODE_ACCESS|ZD1201_ACCESSBIT);
- *((__le16*)&request[6]) = cpu_to_le16(rid);
- *((__le16*)&request[8]) = cpu_to_le16(0);
- *((__le16*)&request[10]) = cpu_to_le16(0);
- usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2),
- request, 16, zd1201_usbfree, zd);
- err = usb_submit_urb(urb, gfp_mask);
- if (err)
- goto err;
-
- if (wait) {
- wait_event_interruptible(zd->rxdataq, zd->rxdatas);
- if (!zd->rxlen || le16_to_cpu(*(__le16*)&zd->rxdata[6]) != rid) {
- dev_dbg(&zd->usb->dev, "wrong or no RID received\n");
- }
- }
-
- return 0;
-err:
- kfree(request);
- usb_free_urb(urb);
- return err;
-}
-
-static inline int zd1201_getconfig16(struct zd1201 *zd, int rid, short *val)
-{
- int err;
- __le16 zdval;
-
- err = zd1201_getconfig(zd, rid, &zdval, sizeof(__le16));
- if (err)
- return err;
- *val = le16_to_cpu(zdval);
- return 0;
-}
-
-static inline int zd1201_setconfig16(struct zd1201 *zd, int rid, short val)
-{
- __le16 zdval = cpu_to_le16(val);
- return (zd1201_setconfig(zd, rid, &zdval, sizeof(__le16), 1));
-}
-
-static int zd1201_drvr_start(struct zd1201 *zd)
-{
- int err, i;
- short max;
- __le16 zdmax;
- unsigned char *buffer;
-
- buffer = kzalloc(ZD1201_RXSIZE, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- usb_fill_bulk_urb(zd->rx_urb, zd->usb,
- usb_rcvbulkpipe(zd->usb, zd->endp_in), buffer, ZD1201_RXSIZE,
- zd1201_usbrx, zd);
-
- err = usb_submit_urb(zd->rx_urb, GFP_KERNEL);
- if (err)
- goto err_buffer;
-
- err = zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0);
- if (err)
- goto err_urb;
-
- err = zd1201_getconfig(zd, ZD1201_RID_CNFMAXTXBUFFERNUMBER, &zdmax,
- sizeof(__le16));
- if (err)
- goto err_urb;
-
- max = le16_to_cpu(zdmax);
- for (i=0; i<max; i++) {
- err = zd1201_docmd(zd, ZD1201_CMDCODE_ALLOC, 1514, 0, 0);
- if (err)
- goto err_urb;
- }
-
- return 0;
-
-err_urb:
- usb_kill_urb(zd->rx_urb);
- return err;
-err_buffer:
- kfree(buffer);
- return err;
-}
-
-/* Magic alert: The firmware doesn't seem to like the MAC state being
- * toggled in promisc (aka monitor) mode.
- * (It works a number of times, but will halt eventually)
- * So we turn it of before disabling and on after enabling if needed.
- */
-static int zd1201_enable(struct zd1201 *zd)
-{
- int err;
-
- if (zd->mac_enabled)
- return 0;
-
- err = zd1201_docmd(zd, ZD1201_CMDCODE_ENABLE, 0, 0, 0);
- if (!err)
- zd->mac_enabled = 1;
-
- if (zd->monitor)
- err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 1);
-
- return err;
-}
-
-static int zd1201_disable(struct zd1201 *zd)
-{
- int err;
-
- if (!zd->mac_enabled)
- return 0;
- if (zd->monitor) {
- err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0);
- if (err)
- return err;
- }
-
- err = zd1201_docmd(zd, ZD1201_CMDCODE_DISABLE, 0, 0, 0);
- if (!err)
- zd->mac_enabled = 0;
- return err;
-}
-
-static int zd1201_mac_reset(struct zd1201 *zd)
-{
- if (!zd->mac_enabled)
- return 0;
- zd1201_disable(zd);
- return zd1201_enable(zd);
-}
-
-static int zd1201_join(struct zd1201 *zd, char *essid, int essidlen)
-{
- int err, val;
- char buf[IW_ESSID_MAX_SIZE+2];
-
- err = zd1201_disable(zd);
- if (err)
- return err;
-
- val = ZD1201_CNFAUTHENTICATION_OPENSYSTEM;
- val |= ZD1201_CNFAUTHENTICATION_SHAREDKEY;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, val);
- if (err)
- return err;
-
- *(__le16 *)buf = cpu_to_le16(essidlen);
- memcpy(buf+2, essid, essidlen);
- if (!zd->ap) { /* Normal station */
- err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf,
- IW_ESSID_MAX_SIZE+2, 1);
- if (err)
- return err;
- } else { /* AP */
- err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNSSID, buf,
- IW_ESSID_MAX_SIZE+2, 1);
- if (err)
- return err;
- }
-
- err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR,
- zd->dev->dev_addr, zd->dev->addr_len, 1);
- if (err)
- return err;
-
- err = zd1201_enable(zd);
- if (err)
- return err;
-
- msleep(100);
- return 0;
-}
-
-static int zd1201_net_open(struct net_device *dev)
-{
- struct zd1201 *zd = netdev_priv(dev);
-
- /* Start MAC with wildcard if no essid set */
- if (!zd->mac_enabled)
- zd1201_join(zd, zd->essid, zd->essidlen);
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int zd1201_net_stop(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-/*
- RFC 1042 encapsulates Ethernet frames in 802.11 frames
- by prefixing them with 0xaa, 0xaa, 0x03) followed by a SNAP OID of 0
- (0x00, 0x00, 0x00). Zd requires an additional padding, copy
- of ethernet addresses, length of the standard RFC 1042 packet
- and a command byte (which is nul for tx).
-
- tx frame (from Wlan NG):
- RFC 1042:
- llc 0xAA 0xAA 0x03 (802.2 LLC)
- snap 0x00 0x00 0x00 (Ethernet encapsulated)
- type 2 bytes, Ethernet type field
- payload (minus eth header)
- Zydas specific:
- padding 1B if (skb->len+8+1)%64==0
- Eth MAC addr 12 bytes, Ethernet MAC addresses
- length 2 bytes, RFC 1042 packet length
- (llc+snap+type+payload)
- zd 1 null byte, zd1201 packet type
- */
-static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct zd1201 *zd = netdev_priv(dev);
- unsigned char *txbuf = zd->txdata;
- int txbuflen, pad = 0, err;
- struct urb *urb = zd->tx_urb;
-
- if (!zd->mac_enabled || zd->monitor) {
- dev->stats.tx_dropped++;
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- netif_stop_queue(dev);
-
- txbuflen = skb->len + 8 + 1;
- if (txbuflen%64 == 0) {
- pad = 1;
- txbuflen++;
- }
- txbuf[0] = 0xAA;
- txbuf[1] = 0xAA;
- txbuf[2] = 0x03;
- txbuf[3] = 0x00; /* rfc1042 */
- txbuf[4] = 0x00;
- txbuf[5] = 0x00;
-
- skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12);
- if (pad)
- txbuf[skb->len-12+6]=0;
- skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12);
- *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6);
- txbuf[txbuflen-1] = 0;
-
- usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out),
- txbuf, txbuflen, zd1201_usbtx, zd);
-
- err = usb_submit_urb(zd->tx_urb, GFP_ATOMIC);
- if (err) {
- dev->stats.tx_errors++;
- netif_start_queue(dev);
- } else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- }
- kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
-static void zd1201_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct zd1201 *zd = netdev_priv(dev);
-
- if (!zd)
- return;
- dev_warn(&zd->usb->dev, "%s: TX timeout, shooting down urb\n",
- dev->name);
- usb_unlink_urb(zd->tx_urb);
- dev->stats.tx_errors++;
- /* Restart the timeout to quiet the watchdog: */
- netif_trans_update(dev); /* prevent tx timeout */
-}
-
-static int zd1201_set_mac_address(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
- struct zd1201 *zd = netdev_priv(dev);
- int err;
-
- if (!zd)
- return -ENODEV;
-
- err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR,
- addr->sa_data, dev->addr_len, 1);
- if (err)
- return err;
- eth_hw_addr_set(dev, addr->sa_data);
-
- return zd1201_mac_reset(zd);
-}
-
-static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
-{
- struct zd1201 *zd = netdev_priv(dev);
-
- return &zd->iwstats;
-}
-
-static void zd1201_set_multicast(struct net_device *dev)
-{
- struct zd1201 *zd = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
- int i;
-
- if (netdev_mc_count(dev) > ZD1201_MAXMULTI)
- return;
-
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
- zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
- netdev_mc_count(dev) * ETH_ALEN, 0);
-}
-
-static int zd1201_config_commit(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
-{
- struct zd1201 *zd = netdev_priv(dev);
-
- return zd1201_mac_reset(zd);
-}
-
-static int zd1201_get_name(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- strcpy(wrqu->name, "IEEE 802.11b");
- return 0;
-}
-
-static int zd1201_set_freq(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_freq *freq = &wrqu->freq;
- struct zd1201 *zd = netdev_priv(dev);
- short channel = 0;
- int err;
-
- if (freq->e == 0)
- channel = freq->m;
- else
- channel = ieee80211_frequency_to_channel(freq->m);
-
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel);
- if (err)
- return err;
-
- zd1201_mac_reset(zd);
-
- return 0;
-}
-
-static int zd1201_get_freq(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_freq *freq = &wrqu->freq;
- struct zd1201 *zd = netdev_priv(dev);
- short channel;
- int err;
-
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, &channel);
- if (err)
- return err;
- freq->e = 0;
- freq->m = channel;
-
- return 0;
-}
-
-static int zd1201_set_mode(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- __u32 *mode = &wrqu->mode;
- struct zd1201 *zd = netdev_priv(dev);
- short porttype, monitor = 0;
- unsigned char buffer[IW_ESSID_MAX_SIZE+2];
- int err;
-
- if (zd->ap) {
- if (*mode != IW_MODE_MASTER)
- return -EINVAL;
- return 0;
- }
-
- err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0);
- if (err)
- return err;
- zd->dev->type = ARPHRD_ETHER;
- switch(*mode) {
- case IW_MODE_MONITOR:
- monitor = 1;
- zd->dev->type = ARPHRD_IEEE80211;
- /* Make sure we are no longer associated with by
- setting an 'impossible' essid.
- (otherwise we mess up firmware)
- */
- zd1201_join(zd, "\0-*#\0", 5);
- /* Put port in pIBSS */
- fallthrough;
- case 8: /* No pseudo-IBSS in wireless extensions (yet) */
- porttype = ZD1201_PORTTYPE_PSEUDOIBSS;
- break;
- case IW_MODE_ADHOC:
- porttype = ZD1201_PORTTYPE_IBSS;
- break;
- case IW_MODE_INFRA:
- porttype = ZD1201_PORTTYPE_BSS;
- break;
- default:
- return -EINVAL;
- }
-
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype);
- if (err)
- return err;
- if (zd->monitor && !monitor) {
- zd1201_disable(zd);
- *(__le16 *)buffer = cpu_to_le16(zd->essidlen);
- memcpy(buffer+2, zd->essid, zd->essidlen);
- err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID,
- buffer, IW_ESSID_MAX_SIZE+2, 1);
- if (err)
- return err;
- }
- zd->monitor = monitor;
- /* If monitor mode is set we don't actually turn it on here since it
- * is done during mac reset anyway (see zd1201_mac_enable).
- */
- zd1201_mac_reset(zd);
-
- return 0;
-}
-
-static int zd1201_get_mode(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- __u32 *mode = &wrqu->mode;
- struct zd1201 *zd = netdev_priv(dev);
- short porttype;
- int err;
-
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFPORTTYPE, &porttype);
- if (err)
- return err;
- switch(porttype) {
- case ZD1201_PORTTYPE_IBSS:
- *mode = IW_MODE_ADHOC;
- break;
- case ZD1201_PORTTYPE_BSS:
- *mode = IW_MODE_INFRA;
- break;
- case ZD1201_PORTTYPE_WDS:
- *mode = IW_MODE_REPEAT;
- break;
- case ZD1201_PORTTYPE_PSEUDOIBSS:
- *mode = 8;/* No Pseudo-IBSS... */
- break;
- case ZD1201_PORTTYPE_AP:
- *mode = IW_MODE_MASTER;
- break;
- default:
- dev_dbg(&zd->usb->dev, "Unknown porttype: %d\n",
- porttype);
- *mode = IW_MODE_AUTO;
- }
- if (zd->monitor)
- *mode = IW_MODE_MONITOR;
-
- return 0;
-}
-
-static int zd1201_get_range(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *wrq = &wrqu->data;
- struct iw_range *range = (struct iw_range *)extra;
-
- wrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(struct iw_range));
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = WIRELESS_EXT;
-
- range->max_qual.qual = 128;
- range->max_qual.level = 128;
- range->max_qual.noise = 128;
- range->max_qual.updated = 7;
-
- range->encoding_size[0] = 5;
- range->encoding_size[1] = 13;
- range->num_encoding_sizes = 2;
- range->max_encoding_tokens = ZD1201_NUMKEYS;
-
- range->num_bitrates = 4;
- range->bitrate[0] = 1000000;
- range->bitrate[1] = 2000000;
- range->bitrate[2] = 5500000;
- range->bitrate[3] = 11000000;
-
- range->min_rts = 0;
- range->min_frag = ZD1201_FRAGMIN;
- range->max_rts = ZD1201_RTSMAX;
- range->min_frag = ZD1201_FRAGMAX;
-
- return 0;
-}
-
-/* Little bit of magic here: we only get the quality if we poll
- * for it, and we never get an actual request to trigger such
- * a poll. Therefore we 'assume' that the user will soon ask for
- * the stats after asking the bssid.
- */
-static int zd1201_get_wap(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct sockaddr *ap_addr = &wrqu->ap_addr;
- struct zd1201 *zd = netdev_priv(dev);
- unsigned char buffer[6];
-
- if (!zd1201_getconfig(zd, ZD1201_RID_COMMSQUALITY, buffer, 6)) {
- /* Unfortunately the quality and noise reported is useless.
- they seem to be accumulators that increase until you
- read them, unless we poll on a fixed interval we can't
- use them
- */
- /*zd->iwstats.qual.qual = le16_to_cpu(((__le16 *)buffer)[0]);*/
- zd->iwstats.qual.level = le16_to_cpu(((__le16 *)buffer)[1]);
- /*zd->iwstats.qual.noise = le16_to_cpu(((__le16 *)buffer)[2]);*/
- zd->iwstats.qual.updated = 2;
- }
-
- return zd1201_getconfig(zd, ZD1201_RID_CURRENTBSSID, ap_addr->sa_data, 6);
-}
-
-static int zd1201_set_scan(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- /* We do everything in get_scan */
- return 0;
-}
-
-static int zd1201_get_scan(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *srq = &wrqu->data;
- struct zd1201 *zd = netdev_priv(dev);
- int err, i, j, enabled_save;
- struct iw_event iwe;
- char *cev = extra;
- char *end_buf = extra + IW_SCAN_MAX_DATA;
-
- /* No scanning in AP mode */
- if (zd->ap)
- return -EOPNOTSUPP;
-
- /* Scan doesn't seem to work if disabled */
- enabled_save = zd->mac_enabled;
- zd1201_enable(zd);
-
- zd->rxdatas = 0;
- err = zd1201_docmd(zd, ZD1201_CMDCODE_INQUIRE,
- ZD1201_INQ_SCANRESULTS, 0, 0);
- if (err)
- return err;
-
- wait_event_interruptible(zd->rxdataq, zd->rxdatas);
- if (!zd->rxlen)
- return -EIO;
-
- if (le16_to_cpu(*(__le16*)&zd->rxdata[2]) != ZD1201_INQ_SCANRESULTS)
- return -EIO;
-
- for(i=8; i<zd->rxlen; i+=62) {
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, zd->rxdata+i+6, 6);
- cev = iwe_stream_add_event(info, cev, end_buf,
- &iwe, IW_EV_ADDR_LEN);
-
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.length = zd->rxdata[i+16];
- iwe.u.data.flags = 1;
- cev = iwe_stream_add_point(info, cev, end_buf,
- &iwe, zd->rxdata+i+18);
-
- iwe.cmd = SIOCGIWMODE;
- if (zd->rxdata[i+14]&0x01)
- iwe.u.mode = IW_MODE_MASTER;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- cev = iwe_stream_add_event(info, cev, end_buf,
- &iwe, IW_EV_UINT_LEN);
-
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = zd->rxdata[i+0];
- iwe.u.freq.e = 0;
- cev = iwe_stream_add_event(info, cev, end_buf,
- &iwe, IW_EV_FREQ_LEN);
-
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = 0;
- iwe.u.bitrate.disabled = 0;
- for (j=0; j<10; j++) if (zd->rxdata[i+50+j]) {
- iwe.u.bitrate.value = (zd->rxdata[i+50+j]&0x7f)*500000;
- cev = iwe_stream_add_event(info, cev, end_buf,
- &iwe, IW_EV_PARAM_LEN);
- }
-
- iwe.cmd = SIOCGIWENCODE;
- iwe.u.data.length = 0;
- if (zd->rxdata[i+14]&0x10)
- iwe.u.data.flags = IW_ENCODE_ENABLED;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- cev = iwe_stream_add_point(info, cev, end_buf, &iwe, NULL);
-
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = zd->rxdata[i+4];
- iwe.u.qual.noise= zd->rxdata[i+2]/10-100;
- iwe.u.qual.level = (256+zd->rxdata[i+4]*100)/255-100;
- iwe.u.qual.updated = 7;
- cev = iwe_stream_add_event(info, cev, end_buf,
- &iwe, IW_EV_QUAL_LEN);
- }
-
- if (!enabled_save)
- zd1201_disable(zd);
-
- srq->length = cev - extra;
- srq->flags = 0;
-
- return 0;
-}
-
-static int zd1201_set_essid(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
-{
- struct iw_point *data = &wrqu->data;
- struct zd1201 *zd = netdev_priv(dev);
-
- if (data->length > IW_ESSID_MAX_SIZE)
- return -EINVAL;
- if (data->length < 1)
- data->length = 1;
- zd->essidlen = data->length;
- memset(zd->essid, 0, IW_ESSID_MAX_SIZE+1);
- memcpy(zd->essid, essid, data->length);
- return zd1201_join(zd, zd->essid, zd->essidlen);
-}
-
-static int zd1201_get_essid(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
-{
- struct iw_point *data = &wrqu->data;
- struct zd1201 *zd = netdev_priv(dev);
-
- memcpy(essid, zd->essid, zd->essidlen);
- data->flags = 1;
- data->length = zd->essidlen;
-
- return 0;
-}
-
-static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *nick)
-{
- struct iw_point *data = &wrqu->data;
- strcpy(nick, "zd1201");
- data->flags = 1;
- data->length = strlen(nick);
- return 0;
-}
-
-static int zd1201_set_rate(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->bitrate;
- struct zd1201 *zd = netdev_priv(dev);
- short rate;
- int err;
-
- switch (rrq->value) {
- case 1000000:
- rate = ZD1201_RATEB1;
- break;
- case 2000000:
- rate = ZD1201_RATEB2;
- break;
- case 5500000:
- rate = ZD1201_RATEB5;
- break;
- case 11000000:
- default:
- rate = ZD1201_RATEB11;
- break;
- }
- if (!rrq->fixed) { /* Also enable all lower bitrates */
- rate |= rate-1;
- }
-
- err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, rate);
- if (err)
- return err;
-
- return zd1201_mac_reset(zd);
-}
-
-static int zd1201_get_rate(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->bitrate;
- struct zd1201 *zd = netdev_priv(dev);
- short rate;
- int err;
-
- err = zd1201_getconfig16(zd, ZD1201_RID_CURRENTTXRATE, &rate);
- if (err)
- return err;
-
- switch(rate) {
- case 1:
- rrq->value = 1000000;
- break;
- case 2:
- rrq->value = 2000000;
- break;
- case 5:
- rrq->value = 5500000;
- break;
- case 11:
- rrq->value = 11000000;
- break;
- default:
- rrq->value = 0;
- }
- rrq->fixed = 0;
- rrq->disabled = 0;
-
- return 0;
-}
-
-static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rts = &wrqu->rts;
- struct zd1201 *zd = netdev_priv(dev);
- int err;
- short val = rts->value;
-
- if (rts->disabled || !rts->fixed)
- val = ZD1201_RTSMAX;
- if (val > ZD1201_RTSMAX)
- return -EINVAL;
- if (val < 0)
- return -EINVAL;
-
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, val);
- if (err)
- return err;
- return zd1201_mac_reset(zd);
-}
-
-static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rts = &wrqu->rts;
- struct zd1201 *zd = netdev_priv(dev);
- short rtst;
- int err;
-
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, &rtst);
- if (err)
- return err;
- rts->value = rtst;
- rts->disabled = (rts->value == ZD1201_RTSMAX);
- rts->fixed = 1;
-
- return 0;
-}
-
-static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *frag = &wrqu->frag;
- struct zd1201 *zd = netdev_priv(dev);
- int err;
- short val = frag->value;
-
- if (frag->disabled || !frag->fixed)
- val = ZD1201_FRAGMAX;
- if (val > ZD1201_FRAGMAX)
- return -EINVAL;
- if (val < ZD1201_FRAGMIN)
- return -EINVAL;
- if (val & 1)
- return -EINVAL;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, val);
- if (err)
- return err;
- return zd1201_mac_reset(zd);
-}
-
-static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *frag = &wrqu->frag;
- struct zd1201 *zd = netdev_priv(dev);
- short fragt;
- int err;
-
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, &fragt);
- if (err)
- return err;
- frag->value = fragt;
- frag->disabled = (frag->value == ZD1201_FRAGMAX);
- frag->fixed = 1;
-
- return 0;
-}
-
-static int zd1201_set_retry(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-static int zd1201_get_retry(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-static int zd1201_set_encode(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *key)
-{
- struct iw_point *erq = &wrqu->encoding;
- struct zd1201 *zd = netdev_priv(dev);
- short i;
- int err, rid;
-
- if (erq->length > ZD1201_MAXKEYLEN)
- return -EINVAL;
-
- i = (erq->flags & IW_ENCODE_INDEX)-1;
- if (i == -1) {
- err = zd1201_getconfig16(zd,ZD1201_RID_CNFDEFAULTKEYID,&i);
- if (err)
- return err;
- } else {
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, i);
- if (err)
- return err;
- }
-
- if (i < 0 || i >= ZD1201_NUMKEYS)
- return -EINVAL;
-
- rid = ZD1201_RID_CNFDEFAULTKEY0 + i;
- err = zd1201_setconfig(zd, rid, key, erq->length, 1);
- if (err)
- return err;
- zd->encode_keylen[i] = erq->length;
- memcpy(zd->encode_keys[i], key, erq->length);
-
- i=0;
- if (!(erq->flags & IW_ENCODE_DISABLED & IW_ENCODE_MODE)) {
- i |= 0x01;
- zd->encode_enabled = 1;
- } else
- zd->encode_enabled = 0;
- if (erq->flags & IW_ENCODE_RESTRICTED & IW_ENCODE_MODE) {
- i |= 0x02;
- zd->encode_restricted = 1;
- } else
- zd->encode_restricted = 0;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFWEBFLAGS, i);
- if (err)
- return err;
-
- if (zd->encode_enabled)
- i = ZD1201_CNFAUTHENTICATION_SHAREDKEY;
- else
- i = ZD1201_CNFAUTHENTICATION_OPENSYSTEM;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, i);
- if (err)
- return err;
-
- return zd1201_mac_reset(zd);
-}
-
-static int zd1201_get_encode(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *key)
-{
- struct iw_point *erq = &wrqu->encoding;
- struct zd1201 *zd = netdev_priv(dev);
- short i;
- int err;
-
- if (zd->encode_enabled)
- erq->flags = IW_ENCODE_ENABLED;
- else
- erq->flags = IW_ENCODE_DISABLED;
- if (zd->encode_restricted)
- erq->flags |= IW_ENCODE_RESTRICTED;
- else
- erq->flags |= IW_ENCODE_OPEN;
-
- i = (erq->flags & IW_ENCODE_INDEX) -1;
- if (i == -1) {
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, &i);
- if (err)
- return err;
- }
- if (i<0 || i>= ZD1201_NUMKEYS)
- return -EINVAL;
-
- erq->flags |= i+1;
-
- erq->length = zd->encode_keylen[i];
- memcpy(key, zd->encode_keys[i], erq->length);
-
- return 0;
-}
-
-static int zd1201_set_power(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *vwrq = &wrqu->power;
- struct zd1201 *zd = netdev_priv(dev);
- short enabled, duration, level;
- int err;
-
- enabled = vwrq->disabled ? 0 : 1;
- if (enabled) {
- if (vwrq->flags & IW_POWER_PERIOD) {
- duration = vwrq->value;
- err = zd1201_setconfig16(zd,
- ZD1201_RID_CNFMAXSLEEPDURATION, duration);
- if (err)
- return err;
- goto out;
- }
- if (vwrq->flags & IW_POWER_TIMEOUT) {
- err = zd1201_getconfig16(zd,
- ZD1201_RID_CNFMAXSLEEPDURATION, &duration);
- if (err)
- return err;
- level = vwrq->value * 4 / duration;
- if (level > 4)
- level = 4;
- if (level < 0)
- level = 0;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFPMEPS,
- level);
- if (err)
- return err;
- goto out;
- }
- return -EINVAL;
- }
-out:
- return zd1201_setconfig16(zd, ZD1201_RID_CNFPMENABLED, enabled);
-}
-
-static int zd1201_get_power(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *vwrq = &wrqu->power;
- struct zd1201 *zd = netdev_priv(dev);
- short enabled, level, duration;
- int err;
-
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMENABLED, &enabled);
- if (err)
- return err;
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMEPS, &level);
- if (err)
- return err;
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, &duration);
- if (err)
- return err;
- vwrq->disabled = enabled ? 0 : 1;
- if (vwrq->flags & IW_POWER_TYPE) {
- if (vwrq->flags & IW_POWER_PERIOD) {
- vwrq->value = duration;
- vwrq->flags = IW_POWER_PERIOD;
- } else {
- vwrq->value = duration * level / 4;
- vwrq->flags = IW_POWER_TIMEOUT;
- }
- }
- if (vwrq->flags & IW_POWER_MODE) {
- if (enabled && level)
- vwrq->flags = IW_POWER_UNICAST_R;
- else
- vwrq->flags = IW_POWER_ALL_R;
- }
-
- return 0;
-}
-
-
-static const iw_handler zd1201_iw_handler[] =
-{
- IW_HANDLER(SIOCSIWCOMMIT, zd1201_config_commit),
- IW_HANDLER(SIOCGIWNAME, zd1201_get_name),
- IW_HANDLER(SIOCSIWFREQ, zd1201_set_freq),
- IW_HANDLER(SIOCGIWFREQ, zd1201_get_freq),
- IW_HANDLER(SIOCSIWMODE, zd1201_set_mode),
- IW_HANDLER(SIOCGIWMODE, zd1201_get_mode),
- IW_HANDLER(SIOCGIWRANGE, zd1201_get_range),
- IW_HANDLER(SIOCGIWAP, zd1201_get_wap),
- IW_HANDLER(SIOCSIWSCAN, zd1201_set_scan),
- IW_HANDLER(SIOCGIWSCAN, zd1201_get_scan),
- IW_HANDLER(SIOCSIWESSID, zd1201_set_essid),
- IW_HANDLER(SIOCGIWESSID, zd1201_get_essid),
- IW_HANDLER(SIOCGIWNICKN, zd1201_get_nick),
- IW_HANDLER(SIOCSIWRATE, zd1201_set_rate),
- IW_HANDLER(SIOCGIWRATE, zd1201_get_rate),
- IW_HANDLER(SIOCSIWRTS, zd1201_set_rts),
- IW_HANDLER(SIOCGIWRTS, zd1201_get_rts),
- IW_HANDLER(SIOCSIWFRAG, zd1201_set_frag),
- IW_HANDLER(SIOCGIWFRAG, zd1201_get_frag),
- IW_HANDLER(SIOCSIWRETRY, zd1201_set_retry),
- IW_HANDLER(SIOCGIWRETRY, zd1201_get_retry),
- IW_HANDLER(SIOCSIWENCODE, zd1201_set_encode),
- IW_HANDLER(SIOCGIWENCODE, zd1201_get_encode),
- IW_HANDLER(SIOCSIWPOWER, zd1201_set_power),
- IW_HANDLER(SIOCGIWPOWER, zd1201_get_power),
-};
-
-static int zd1201_set_hostauth(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->param;
- struct zd1201 *zd = netdev_priv(dev);
-
- if (!zd->ap)
- return -EOPNOTSUPP;
-
- return zd1201_setconfig16(zd, ZD1201_RID_CNFHOSTAUTH, rrq->value);
-}
-
-static int zd1201_get_hostauth(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->param;
- struct zd1201 *zd = netdev_priv(dev);
- short hostauth;
- int err;
-
- if (!zd->ap)
- return -EOPNOTSUPP;
-
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFHOSTAUTH, &hostauth);
- if (err)
- return err;
- rrq->value = hostauth;
- rrq->fixed = 1;
-
- return 0;
-}
-
-static int zd1201_auth_sta(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct sockaddr *sta = &wrqu->ap_addr;
- struct zd1201 *zd = netdev_priv(dev);
- unsigned char buffer[10];
-
- if (!zd->ap)
- return -EOPNOTSUPP;
-
- memcpy(buffer, sta->sa_data, ETH_ALEN);
- *(short*)(buffer+6) = 0; /* 0==success, 1==failure */
- *(short*)(buffer+8) = 0;
-
- return zd1201_setconfig(zd, ZD1201_RID_AUTHENTICATESTA, buffer, 10, 1);
-}
-
-static int zd1201_set_maxassoc(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->param;
- struct zd1201 *zd = netdev_priv(dev);
-
- if (!zd->ap)
- return -EOPNOTSUPP;
-
- return zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value);
-}
-
-static int zd1201_get_maxassoc(struct net_device *dev,
- struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *rrq = &wrqu->param;
- struct zd1201 *zd = netdev_priv(dev);
- short maxassoc;
- int err;
-
- if (!zd->ap)
- return -EOPNOTSUPP;
-
- err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, &maxassoc);
- if (err)
- return err;
- rrq->value = maxassoc;
- rrq->fixed = 1;
-
- return 0;
-}
-
-static const iw_handler zd1201_private_handler[] = {
- zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */
- zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */
- zd1201_auth_sta, /* ZD1201SIWAUTHSTA */
- NULL, /* nothing to get */
- zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */
- zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */
-};
-
-static const struct iw_priv_args zd1201_private_args[] = {
- { ZD1201SIWHOSTAUTH, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "sethostauth" },
- { ZD1201GIWHOSTAUTH, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostauth" },
- { ZD1201SIWAUTHSTA, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "authstation" },
- { ZD1201SIWMAXASSOC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "setmaxassoc" },
- { ZD1201GIWMAXASSOC, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmaxassoc" },
-};
-
-static const struct iw_handler_def zd1201_iw_handlers = {
- .num_standard = ARRAY_SIZE(zd1201_iw_handler),
- .num_private = ARRAY_SIZE(zd1201_private_handler),
- .num_private_args = ARRAY_SIZE(zd1201_private_args),
- .standard = zd1201_iw_handler,
- .private = zd1201_private_handler,
- .private_args = (struct iw_priv_args *) zd1201_private_args,
- .get_wireless_stats = zd1201_get_wireless_stats,
-};
-
-static const struct net_device_ops zd1201_netdev_ops = {
- .ndo_open = zd1201_net_open,
- .ndo_stop = zd1201_net_stop,
- .ndo_start_xmit = zd1201_hard_start_xmit,
- .ndo_tx_timeout = zd1201_tx_timeout,
- .ndo_set_rx_mode = zd1201_set_multicast,
- .ndo_set_mac_address = zd1201_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int zd1201_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct zd1201 *zd;
- struct net_device *dev;
- struct usb_device *usb;
- int err;
- short porttype;
- char buf[IW_ESSID_MAX_SIZE+2];
- u8 addr[ETH_ALEN];
-
- usb = interface_to_usbdev(interface);
-
- dev = alloc_etherdev(sizeof(*zd));
- if (!dev)
- return -ENOMEM;
- zd = netdev_priv(dev);
- zd->dev = dev;
-
- zd->ap = ap;
- zd->usb = usb;
- zd->removed = 0;
- init_waitqueue_head(&zd->rxdataq);
- INIT_HLIST_HEAD(&zd->fraglist);
-
- err = zd1201_fw_upload(usb, zd->ap);
- if (err) {
- dev_err(&usb->dev, "zd1201 firmware upload failed: %d\n", err);
- goto err_zd;
- }
-
- zd->endp_in = 1;
- zd->endp_out = 1;
- zd->endp_out2 = 2;
- zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!zd->rx_urb || !zd->tx_urb) {
- err = -ENOMEM;
- goto err_zd;
- }
-
- mdelay(100);
- err = zd1201_drvr_start(zd);
- if (err)
- goto err_zd;
-
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXDATALEN, 2312);
- if (err)
- goto err_start;
-
- err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL,
- ZD1201_RATEB1 | ZD1201_RATEB2 | ZD1201_RATEB5 | ZD1201_RATEB11);
- if (err)
- goto err_start;
-
- dev->netdev_ops = &zd1201_netdev_ops;
- dev->wireless_handlers = &zd1201_iw_handlers;
- dev->watchdog_timeo = ZD1201_TX_TIMEOUT;
- strcpy(dev->name, "wlan%d");
-
- err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR, addr, ETH_ALEN);
- if (err)
- goto err_start;
- eth_hw_addr_set(dev, addr);
-
- /* Set wildcard essid to match zd->essid */
- *(__le16 *)buf = cpu_to_le16(0);
- err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf,
- IW_ESSID_MAX_SIZE+2, 1);
- if (err)
- goto err_start;
-
- if (zd->ap)
- porttype = ZD1201_PORTTYPE_AP;
- else
- porttype = ZD1201_PORTTYPE_BSS;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype);
- if (err)
- goto err_start;
-
- SET_NETDEV_DEV(dev, &usb->dev);
-
- err = register_netdev(dev);
- if (err)
- goto err_start;
- dev_info(&usb->dev, "%s: ZD1201 USB Wireless interface\n",
- dev->name);
-
- usb_set_intfdata(interface, zd);
- zd1201_enable(zd); /* zd1201 likes to startup enabled, */
- zd1201_disable(zd); /* interfering with all the wifis in range */
- return 0;
-
-err_start:
- /* Leave the device in reset state */
- zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0);
-err_zd:
- usb_free_urb(zd->tx_urb);
- usb_free_urb(zd->rx_urb);
- free_netdev(dev);
- return err;
-}
-
-static void zd1201_disconnect(struct usb_interface *interface)
-{
- struct zd1201 *zd = usb_get_intfdata(interface);
- struct hlist_node *node2;
- struct zd1201_frag *frag;
-
- if (!zd)
- return;
- usb_set_intfdata(interface, NULL);
-
- hlist_for_each_entry_safe(frag, node2, &zd->fraglist, fnode) {
- hlist_del_init(&frag->fnode);
- kfree_skb(frag->skb);
- kfree(frag);
- }
-
- if (zd->tx_urb) {
- usb_kill_urb(zd->tx_urb);
- usb_free_urb(zd->tx_urb);
- }
- if (zd->rx_urb) {
- usb_kill_urb(zd->rx_urb);
- usb_free_urb(zd->rx_urb);
- }
-
- if (zd->dev) {
- unregister_netdev(zd->dev);
- free_netdev(zd->dev);
- }
-}
-
-#ifdef CONFIG_PM
-
-static int zd1201_suspend(struct usb_interface *interface,
- pm_message_t message)
-{
- struct zd1201 *zd = usb_get_intfdata(interface);
-
- netif_device_detach(zd->dev);
-
- zd->was_enabled = zd->mac_enabled;
-
- if (zd->was_enabled)
- return zd1201_disable(zd);
- else
- return 0;
-}
-
-static int zd1201_resume(struct usb_interface *interface)
-{
- struct zd1201 *zd = usb_get_intfdata(interface);
-
- if (!zd || !zd->dev)
- return -ENODEV;
-
- netif_device_attach(zd->dev);
-
- if (zd->was_enabled)
- return zd1201_enable(zd);
- else
- return 0;
-}
-
-#else
-
-#define zd1201_suspend NULL
-#define zd1201_resume NULL
-
-#endif
-
-static struct usb_driver zd1201_usb = {
- .name = "zd1201",
- .probe = zd1201_probe,
- .disconnect = zd1201_disconnect,
- .id_table = zd1201_table,
- .suspend = zd1201_suspend,
- .resume = zd1201_resume,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(zd1201_usb);
diff --git a/drivers/net/wireless/zydas/zd1201.h b/drivers/net/wireless/zydas/zd1201.h
deleted file mode 100644
index c46ac8755..000000000
--- a/drivers/net/wireless/zydas/zd1201.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org)
- *
- * Parts of this driver have been derived from a wlan-ng version
- * modified by ZyDAS.
- * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
- */
-
-#ifndef _INCLUDE_ZD1201_H_
-#define _INCLUDE_ZD1201_H_
-
-#define ZD1201_NUMKEYS 4
-#define ZD1201_MAXKEYLEN 13
-#define ZD1201_MAXMULTI 16
-#define ZD1201_FRAGMAX 2500
-#define ZD1201_FRAGMIN 256
-#define ZD1201_RTSMAX 2500
-
-#define ZD1201_RXSIZE 3000
-
-struct zd1201 {
- struct usb_device *usb;
- int removed;
- struct net_device *dev;
- struct iw_statistics iwstats;
-
- int endp_in;
- int endp_out;
- int endp_out2;
- struct urb *rx_urb;
- struct urb *tx_urb;
-
- unsigned char rxdata[ZD1201_RXSIZE];
- int rxlen;
- wait_queue_head_t rxdataq;
- int rxdatas;
- struct hlist_head fraglist;
- unsigned char txdata[ZD1201_RXSIZE];
-
- int ap;
- char essid[IW_ESSID_MAX_SIZE+1];
- int essidlen;
- int mac_enabled;
- int was_enabled;
- int monitor;
- int encode_enabled;
- int encode_restricted;
- unsigned char encode_keys[ZD1201_NUMKEYS][ZD1201_MAXKEYLEN];
- int encode_keylen[ZD1201_NUMKEYS];
-};
-
-struct zd1201_frag {
- struct hlist_node fnode;
- int seq;
- struct sk_buff *skb;
-};
-
-#define ZD1201SIWHOSTAUTH SIOCIWFIRSTPRIV
-#define ZD1201GIWHOSTAUTH ZD1201SIWHOSTAUTH+1
-#define ZD1201SIWAUTHSTA SIOCIWFIRSTPRIV+2
-#define ZD1201SIWMAXASSOC SIOCIWFIRSTPRIV+4
-#define ZD1201GIWMAXASSOC ZD1201SIWMAXASSOC+1
-
-#define ZD1201_FW_TIMEOUT (1000)
-
-#define ZD1201_TX_TIMEOUT (2000)
-
-#define ZD1201_USB_CMDREQ 0
-#define ZD1201_USB_RESREQ 1
-
-#define ZD1201_CMDCODE_INIT 0x00
-#define ZD1201_CMDCODE_ENABLE 0x01
-#define ZD1201_CMDCODE_DISABLE 0x02
-#define ZD1201_CMDCODE_ALLOC 0x0a
-#define ZD1201_CMDCODE_INQUIRE 0x11
-#define ZD1201_CMDCODE_SETRXRID 0x17
-#define ZD1201_CMDCODE_ACCESS 0x21
-
-#define ZD1201_PACKET_EVENTSTAT 0x0
-#define ZD1201_PACKET_RXDATA 0x1
-#define ZD1201_PACKET_INQUIRE 0x2
-#define ZD1201_PACKET_RESOURCE 0x3
-
-#define ZD1201_ACCESSBIT 0x0100
-
-#define ZD1201_RID_CNFPORTTYPE 0xfc00
-#define ZD1201_RID_CNFOWNMACADDR 0xfc01
-#define ZD1201_RID_CNFDESIREDSSID 0xfc02
-#define ZD1201_RID_CNFOWNCHANNEL 0xfc03
-#define ZD1201_RID_CNFOWNSSID 0xfc04
-#define ZD1201_RID_CNFMAXDATALEN 0xfc07
-#define ZD1201_RID_CNFPMENABLED 0xfc09
-#define ZD1201_RID_CNFPMEPS 0xfc0a
-#define ZD1201_RID_CNFMAXSLEEPDURATION 0xfc0c
-#define ZD1201_RID_CNFDEFAULTKEYID 0xfc23
-#define ZD1201_RID_CNFDEFAULTKEY0 0xfc24
-#define ZD1201_RID_CNFDEFAULTKEY1 0xfc25
-#define ZD1201_RID_CNFDEFAULTKEY2 0xfc26
-#define ZD1201_RID_CNFDEFAULTKEY3 0xfc27
-#define ZD1201_RID_CNFWEBFLAGS 0xfc28
-#define ZD1201_RID_CNFAUTHENTICATION 0xfc2a
-#define ZD1201_RID_CNFMAXASSOCSTATIONS 0xfc2b
-#define ZD1201_RID_CNFHOSTAUTH 0xfc2e
-#define ZD1201_RID_CNFGROUPADDRESS 0xfc80
-#define ZD1201_RID_CNFFRAGTHRESHOLD 0xfc82
-#define ZD1201_RID_CNFRTSTHRESHOLD 0xfc83
-#define ZD1201_RID_TXRATECNTL 0xfc84
-#define ZD1201_RID_PROMISCUOUSMODE 0xfc85
-#define ZD1201_RID_CNFBASICRATES 0xfcb3
-#define ZD1201_RID_AUTHENTICATESTA 0xfce3
-#define ZD1201_RID_CURRENTBSSID 0xfd42
-#define ZD1201_RID_COMMSQUALITY 0xfd43
-#define ZD1201_RID_CURRENTTXRATE 0xfd44
-#define ZD1201_RID_CNFMAXTXBUFFERNUMBER 0xfda0
-#define ZD1201_RID_CURRENTCHANNEL 0xfdc1
-
-#define ZD1201_INQ_SCANRESULTS 0xf101
-
-#define ZD1201_INF_LINKSTATUS 0xf200
-#define ZD1201_INF_ASSOCSTATUS 0xf201
-#define ZD1201_INF_AUTHREQ 0xf202
-
-#define ZD1201_ASSOCSTATUS_STAASSOC 0x1
-#define ZD1201_ASSOCSTATUS_REASSOC 0x2
-#define ZD1201_ASSOCSTATUS_DISASSOC 0x3
-#define ZD1201_ASSOCSTATUS_ASSOCFAIL 0x4
-#define ZD1201_ASSOCSTATUS_AUTHFAIL 0x5
-
-#define ZD1201_PORTTYPE_IBSS 0
-#define ZD1201_PORTTYPE_BSS 1
-#define ZD1201_PORTTYPE_WDS 2
-#define ZD1201_PORTTYPE_PSEUDOIBSS 3
-#define ZD1201_PORTTYPE_AP 6
-
-#define ZD1201_RATEB1 1
-#define ZD1201_RATEB2 2
-#define ZD1201_RATEB5 4 /* 5.5 really, but 5 is shorter :) */
-#define ZD1201_RATEB11 8
-
-#define ZD1201_CNFAUTHENTICATION_OPENSYSTEM 0x0001
-#define ZD1201_CNFAUTHENTICATION_SHAREDKEY 0x0002
-
-#endif /* _INCLUDE_ZD1201_H_ */
diff --git a/drivers/net/wwan/qcom_bam_dmux.c b/drivers/net/wwan/qcom_bam_dmux.c
index 17d46f4d2..26ca719fa 100644
--- a/drivers/net/wwan/qcom_bam_dmux.c
+++ b/drivers/net/wwan/qcom_bam_dmux.c
@@ -846,7 +846,7 @@ static int bam_dmux_probe(struct platform_device *pdev)
return 0;
}
-static int bam_dmux_remove(struct platform_device *pdev)
+static void bam_dmux_remove(struct platform_device *pdev)
{
struct bam_dmux *dmux = platform_get_drvdata(pdev);
struct device *dev = dmux->dev;
@@ -877,8 +877,6 @@ static int bam_dmux_remove(struct platform_device *pdev)
disable_irq(dmux->pc_irq);
bam_dmux_power_off(dmux);
bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE);
-
- return 0;
}
static const struct dev_pm_ops bam_dmux_pm_ops = {
@@ -893,7 +891,7 @@ MODULE_DEVICE_TABLE(of, bam_dmux_of_match);
static struct platform_driver bam_dmux_driver = {
.probe = bam_dmux_probe,
- .remove = bam_dmux_remove,
+ .remove_new = bam_dmux_remove,
.driver = {
.name = "bam-dmux",
.pm = &bam_dmux_pm_ops,
diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c
index 9f43f256d..f0a4783ba 100644
--- a/drivers/net/wwan/t7xx/t7xx_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_cldma.c
@@ -106,7 +106,7 @@ bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
{
u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
- return ioread64(hw_info->ap_pdn_base + offset);
+ return ioread64_lo_hi(hw_info->ap_pdn_base + offset);
}
void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
@@ -117,7 +117,7 @@ void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qn
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
- iowrite64(address, reg + offset);
+ iowrite64_lo_hi(address, reg + offset);
}
void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index cc7036036..554ba4669 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -139,8 +139,9 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
return -ENODEV;
}
- gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
- queue->index * sizeof(u64));
+ gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
+ REG_CLDMA_DL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
return 0;
@@ -318,8 +319,8 @@ static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
/* Check current processing TGPD, 64-bit address is in a table by Q index */
- ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
- queue->index * sizeof(u64));
+ ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
+ queue->index * sizeof(u64));
if (req->gpd_addr != ul_curr_addr) {
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
index 76da4c15e..f071ec7ff 100644
--- a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
+++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c
@@ -75,7 +75,7 @@ static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_
for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
- iowrite64(0, reg);
+ iowrite64_lo_hi(0, reg);
}
}
@@ -112,17 +112,17 @@ static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
- iowrite64(value, reg);
+ iowrite64_lo_hi(value, reg);
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
iowrite32(cfg->trsl_id, reg);
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
- iowrite64(value, reg);
+ iowrite64_lo_hi(value, reg);
/* Ensure ATR is set */
- ioread64(reg);
+ ioread64_lo_hi(reg);
return 0;
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fab361a25..ef76850d9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1778,5 +1778,6 @@ static void __exit netback_fini(void)
}
module_exit(netback_fini);
+MODULE_DESCRIPTION("Xen backend network device module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vif");
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ad29f3700..8d2aee885 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -285,6 +285,7 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
return NULL;
}
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
+ skb_mark_for_recycle(skb);
/* Align ip header to a 16 bytes boundary */
skb_reserve(skb, NET_IP_ALIGN);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index a556acdb9..2eb5978bd 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -203,8 +203,8 @@ static int pn532_uart_rx_is_frame(struct sk_buff *skb)
return 0;
}
-static int pn532_receive_buf(struct serdev_device *serdev,
- const unsigned char *data, size_t count)
+static ssize_t pn532_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev);
size_t i;
diff --git a/drivers/nfc/s3fwrn5/uart.c b/drivers/nfc/s3fwrn5/uart.c
index 82ea35d74..456d39471 100644
--- a/drivers/nfc/s3fwrn5/uart.c
+++ b/drivers/nfc/s3fwrn5/uart.c
@@ -51,9 +51,8 @@ static const struct s3fwrn5_phy_ops uart_phy_ops = {
.write = s3fwrn82_uart_write,
};
-static int s3fwrn82_uart_read(struct serdev_device *serdev,
- const unsigned char *data,
- size_t count)
+static ssize_t s3fwrn82_uart_read(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev);
size_t i;
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 7eb17f46a..9e1a34e23 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -424,7 +424,8 @@ struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
- struct regulator *regulator;
+ struct regulator *vin_regulator;
+ struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
- ret = regulator_disable(trf->regulator);
+ ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
- trf->regulator = devm_regulator_get(&spi->dev, "vin");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
+ if (IS_ERR(trf->vin_regulator)) {
+ ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
- uvolts = regulator_get_voltage(trf->regulator);
+ uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
- trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(trf->vddio_regulator)) {
+ ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- if (regulator_get_voltage(trf->regulator) == 1800000) {
+ if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
- goto err_disable_regulator;
+ goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@@ -2137,8 +2138,10 @@ err_shutdown:
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
-err_disable_regulator:
- regulator_disable(trf->regulator);
+err_disable_vddio_regulator:
+ regulator_disable(trf->vddio_regulator);
+err_disable_vin_regulator:
+ regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
- regulator_disable(trf->regulator);
+ regulator_disable(trf->vddio_regulator);
+ regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);
}
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
index 72921e4f3..12df4d889 100644
--- a/drivers/nubus/bus.c
+++ b/drivers/nubus/bus.c
@@ -32,12 +32,11 @@ static void nubus_device_remove(struct device *dev)
ndrv->remove(to_nubus_board(dev));
}
-struct bus_type nubus_bus_type = {
+static const struct bus_type nubus_bus_type = {
.name = "nubus",
.probe = nubus_device_probe,
.remove = nubus_device_remove,
};
-EXPORT_SYMBOL(nubus_bus_type);
int nubus_driver_register(struct nubus_driver *ndrv)
{
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index d5593b0dc..bb3726b62 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/nd.h>
#include <linux/backing-dev.h>
+#include <linux/cleanup.h>
#include "btt.h"
#include "nd.h"
@@ -847,23 +848,20 @@ static int discover_arenas(struct btt *btt)
{
int ret = 0;
struct arena_info *arena;
- struct btt_sb *super;
size_t remaining = btt->rawsize;
u64 cur_nlba = 0;
size_t cur_off = 0;
int num_arenas = 0;
- super = kzalloc(sizeof(*super), GFP_KERNEL);
+ struct btt_sb *super __free(kfree) = kzalloc(sizeof(*super), GFP_KERNEL);
if (!super)
return -ENOMEM;
while (remaining) {
/* Alloc memory for arena */
arena = alloc_arena(btt, 0, 0, 0);
- if (!arena) {
- ret = -ENOMEM;
- goto out_super;
- }
+ if (!arena)
+ return -ENOMEM;
arena->infooff = cur_off;
ret = btt_info_read(arena, super);
@@ -919,14 +917,11 @@ static int discover_arenas(struct btt *btt)
btt->nlba = cur_nlba;
btt->init_state = INIT_READY;
- kfree(super);
return ret;
out:
kfree(arena);
free_arenas(btt);
- out_super:
- kfree(super);
return ret;
}
@@ -986,7 +981,7 @@ static int btt_arena_write_layout(struct arena_info *arena)
if (!super)
return -ENOMEM;
- strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
+ strscpy(super->signature, BTT_SIG, sizeof(super->signature));
export_uuid(super->uuid, nd_btt->uuid);
export_uuid(super->parent_uuid, parent_uuid);
super->flags = cpu_to_le32(arena->flags);
@@ -1550,7 +1545,7 @@ static void btt_blk_cleanup(struct btt *btt)
* @rawsize: raw size in bytes of the backing device
* @lbasize: lba size of the backing device
* @uuid: A uuid for the backing device - this is stored on media
- * @maxlane: maximum number of parallel requests the device can handle
+ * @nd_region: &struct nd_region for the REGION device
*
* Initialize a Block Translation Table on a backing device to provide
* single sector power fail atomicity.
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index fabbb31f2..497fd434a 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -19,7 +19,7 @@ static void nd_btt_release(struct device *dev)
dev_dbg(dev, "trace\n");
nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns);
- ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+ ida_free(&nd_region->btt_ida, nd_btt->id);
kfree(nd_btt->uuid);
kfree(nd_btt);
}
@@ -191,7 +191,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
if (!nd_btt)
return NULL;
- nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
+ nd_btt->id = ida_alloc(&nd_region->btt_ida, GFP_KERNEL);
if (nd_btt->id < 0)
goto out_nd_btt;
@@ -217,7 +217,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
return dev;
out_put_id:
- ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+ ida_free(&nd_region->btt_ida, nd_btt->id);
out_nd_btt:
kfree(nd_btt);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 5852fe290..ef3d0f833 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -285,7 +285,7 @@ static void nvdimm_bus_release(struct device *dev)
struct nvdimm_bus *nvdimm_bus;
nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
- ida_simple_remove(&nd_ida, nvdimm_bus->id);
+ ida_free(&nd_ida, nvdimm_bus->id);
kfree(nvdimm_bus);
}
@@ -342,7 +342,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
init_waitqueue_head(&nvdimm_bus->wait);
- nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
+ nvdimm_bus->id = ida_alloc(&nd_ida, GFP_KERNEL);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
return NULL;
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 3bd61f245..6b4922de3 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -18,7 +18,7 @@ static void nd_dax_release(struct device *dev)
dev_dbg(dev, "trace\n");
nd_detach_ndns(dev, &nd_pfn->ndns);
- ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
+ ida_free(&nd_region->dax_ida, nd_pfn->id);
kfree(nd_pfn->uuid);
kfree(nd_dax);
}
@@ -55,7 +55,7 @@ static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
return NULL;
nd_pfn = &nd_dax->nd_pfn;
- nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
+ nd_pfn->id = ida_alloc(&nd_region->dax_ida, GFP_KERNEL);
if (nd_pfn->id < 0) {
kfree(nd_dax);
return NULL;
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 127387358..21498d461 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -53,7 +53,9 @@ static int validate_dimm(struct nvdimm_drvdata *ndd)
/**
* nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
- * @nvdimm: dimm to initialize
+ * @ndd: dimm to initialize
+ *
+ * Returns: %0 if the area is already valid, -errno on error
*/
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
{
@@ -194,7 +196,7 @@ static void nvdimm_release(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- ida_simple_remove(&dimm_ida, nvdimm->id);
+ ida_free(&dimm_ida, nvdimm->id);
kfree(nvdimm);
}
@@ -592,7 +594,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
if (!nvdimm)
return NULL;
- nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
+ nvdimm->id = ida_alloc(&dimm_ida, GFP_KERNEL);
if (nvdimm->id < 0) {
kfree(nvdimm);
return NULL;
@@ -722,6 +724,9 @@ static unsigned long dpa_align(struct nd_region *nd_region)
* contiguous unallocated dpa range.
* @nd_region: constrain available space check to this reference region
* @nd_mapping: container of dpa-resource-root + labels
+ *
+ * Returns: %0 if there is an alignment error, otherwise the max
+ * unallocated dpa range
*/
resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping)
@@ -767,6 +772,8 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
*
* Validate that a PMEM label, if present, aligns with the start of an
* interleave set.
+ *
+ * Returns: %0 if there is an alignment error, otherwise the unallocated dpa
*/
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping)
@@ -836,8 +843,10 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
/**
* nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
- * @nvdimm: container of dpa-resource-root + labels
+ * @ndd: container of dpa-resource-root + labels
* @label_id: dpa resource name of the form pmem-<human readable uuid>
+ *
+ * Returns: sum of the dpa allocated to the label_id
*/
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id)
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 07177eadc..d6d558f94 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -27,7 +27,7 @@ static void namespace_pmem_release(struct device *dev)
struct nd_region *nd_region = to_nd_region(dev->parent);
if (nspm->id >= 0)
- ida_simple_remove(&nd_region->ns_ida, nspm->id);
+ ida_free(&nd_region->ns_ida, nspm->id);
kfree(nspm->alt_name);
kfree(nspm->uuid);
kfree(nspm);
@@ -71,6 +71,8 @@ static int is_namespace_uuid_busy(struct device *dev, void *data)
* nd_is_uuid_unique - verify that no other namespace has @uuid
* @dev: any device on a nvdimm_bus
* @uuid: uuid to check
+ *
+ * Returns: %true if the uuid is unique, %false if not
*/
bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
{
@@ -337,6 +339,8 @@ static int scan_free(struct nd_region *nd_region,
* adjust_resource() the allocation to @n, but if @n is larger than the
* allocation delete it and find the 'new' last allocation in the label
* set.
+ *
+ * Returns: %0 on success on -errno on error
*/
static int shrink_dpa_allocation(struct nd_region *nd_region,
struct nd_label_id *label_id, resource_size_t n)
@@ -662,6 +666,8 @@ void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
* allocations from the start of an interleave set and end at the first
* BLK allocation or the end of the interleave set, whichever comes
* first.
+ *
+ * Returns: %0 on success on -errno on error
*/
static int grow_dpa_allocation(struct nd_region *nd_region,
struct nd_label_id *label_id, resource_size_t n)
@@ -951,6 +957,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
* @dev: namespace type for generating label_id
* @new_uuid: incoming uuid
* @old_uuid: reference to the uuid storage location in the namespace object
+ *
+ * Returns: %0 on success on -errno on error
*/
static int namespace_update_uuid(struct nd_region *nd_region,
struct device *dev, uuid_t *new_uuid,
@@ -1656,8 +1664,10 @@ static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
/**
* create_namespace_pmem - validate interleave set labelling, retrieve label0
* @nd_region: region with mappings to validate
- * @nspm: target namespace to create
+ * @nd_mapping: container of dpa-resource-root + labels
* @nd_label: target pmem namespace label to evaluate
+ *
+ * Returns: the created &struct device on success or ERR_PTR(-errno) on error
*/
static struct device *create_namespace_pmem(struct nd_region *nd_region,
struct nd_mapping *nd_mapping,
@@ -1810,7 +1820,7 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
res->name = dev_name(&nd_region->dev);
res->flags = IORESOURCE_MEM;
- nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
+ nspm->id = ida_alloc(&nd_region->ns_ida, GFP_KERNEL);
if (nspm->id < 0) {
kfree(nspm);
return NULL;
@@ -2188,8 +2198,7 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
struct nd_namespace_pmem *nspm;
nspm = to_nd_namespace_pmem(dev);
- id = ida_simple_get(&nd_region->ns_ida, 0, 0,
- GFP_KERNEL);
+ id = ida_alloc(&nd_region->ns_ida, GFP_KERNEL);
nspm->id = id;
} else
id = i;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 0d08e21a1..586348125 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -22,7 +22,7 @@ static void nd_pfn_release(struct device *dev)
dev_dbg(dev, "trace\n");
nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns);
- ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id);
+ ida_free(&nd_region->pfn_ida, nd_pfn->id);
kfree(nd_pfn->uuid);
kfree(nd_pfn);
}
@@ -326,7 +326,7 @@ static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
if (!nd_pfn)
return NULL;
- nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL);
+ nd_pfn->id = ida_alloc(&nd_region->pfn_ida, GFP_KERNEL);
if (nd_pfn->id < 0) {
kfree(nd_pfn);
return NULL;
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index a92eb172f..4ceced5ce 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -29,12 +29,27 @@ static int init_vq(struct virtio_pmem *vpmem)
return 0;
};
+static int virtio_pmem_validate(struct virtio_device *vdev)
+{
+ struct virtio_shm_region shm_reg;
+
+ if (virtio_has_feature(vdev, VIRTIO_PMEM_F_SHMEM_REGION) &&
+ !virtio_get_shm_region(vdev, &shm_reg, (u8)VIRTIO_PMEM_SHMEM_REGION_ID)
+ ) {
+ dev_notice(&vdev->dev, "failed to get shared memory region %d\n",
+ VIRTIO_PMEM_SHMEM_REGION_ID);
+ __virtio_clear_bit(vdev, VIRTIO_PMEM_F_SHMEM_REGION);
+ }
+ return 0;
+}
+
static int virtio_pmem_probe(struct virtio_device *vdev)
{
struct nd_region_desc ndr_desc = {};
struct nd_region *nd_region;
struct virtio_pmem *vpmem;
struct resource res;
+ struct virtio_shm_region shm_reg;
int err = 0;
if (!vdev->config->get) {
@@ -57,10 +72,16 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
goto out_err;
}
- virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
- start, &vpmem->start);
- virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
- size, &vpmem->size);
+ if (virtio_has_feature(vdev, VIRTIO_PMEM_F_SHMEM_REGION)) {
+ virtio_get_shm_region(vdev, &shm_reg, (u8)VIRTIO_PMEM_SHMEM_REGION_ID);
+ vpmem->start = shm_reg.addr;
+ vpmem->size = shm_reg.len;
+ } else {
+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
+ start, &vpmem->start);
+ virtio_cread_le(vpmem->vdev, struct virtio_pmem_config,
+ size, &vpmem->size);
+ }
res.start = vpmem->start;
res.end = vpmem->start + vpmem->size - 1;
@@ -122,10 +143,17 @@ static void virtio_pmem_remove(struct virtio_device *vdev)
virtio_reset_device(vdev);
}
+static unsigned int features[] = {
+ VIRTIO_PMEM_F_SHMEM_REGION,
+};
+
static struct virtio_driver virtio_pmem_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
+ .validate = virtio_pmem_validate,
.probe = virtio_pmem_probe,
.remove = virtio_pmem_remove,
};
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index a23ab5c96..a3455f1d6 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+MODULE_DESCRIPTION("NVMe Authentication framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c
index ee341b83e..6f7e7a8fa 100644
--- a/drivers/nvme/common/keyring.c
+++ b/drivers/nvme/common/keyring.c
@@ -111,7 +111,7 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
* should be preferred to 'generated' PSKs,
* and SHA-384 should be preferred to SHA-256.
*/
-struct nvme_tls_psk_priority_list {
+static struct nvme_tls_psk_priority_list {
bool generated;
enum nvme_tcp_tls_cipher cipher;
} nvme_tls_psk_prio[] = {
@@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void)
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_DESCRIPTION("NVMe Keyring implementation");
module_init(nvme_keyring_init);
module_exit(nvme_keyring_exit);
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 596bb11ee..c727cd1f2 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set,
static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
bool dead = false, freeze = false;
unsigned long flags;
@@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
if (csts & NVME_CSTS_CFS)
dead = true;
- if (anv->ctrl.state == NVME_CTRL_LIVE ||
- anv->ctrl.state == NVME_CTRL_RESETTING) {
+ if (state == NVME_CTRL_LIVE ||
+ state == NVME_CTRL_RESETTING) {
freeze = true;
nvme_start_freeze(&anv->ctrl);
}
@@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
unsigned long flags;
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
- if (anv->ctrl.state != NVME_CTRL_LIVE) {
+ if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
/*
* From rdma.c:
* If we are resetting, connecting or deleting we should
@@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work)
u32 boot_status, aqa;
struct apple_nvme *anv =
container_of(work, struct apple_nvme, ctrl.reset_work);
+ enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
- if (anv->ctrl.state != NVME_CTRL_RESETTING) {
- dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
- anv->ctrl.state);
+ if (state != NVME_CTRL_RESETTING) {
+ dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 72c0525c7..a264b3ae0 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context {
static struct workqueue_struct *nvme_auth_wq;
-#define nvme_auth_flags_from_qid(qid) \
- (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
-#define nvme_auth_queue_from_qid(ctrl, qid) \
- (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
-
static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
{
return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
@@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send)
{
struct nvme_command cmd = {};
- blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
- struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
+ struct request_queue *q = ctrl->fabrics_q;
int ret;
+ if (qid != 0) {
+ flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
+ q = ctrl->connect_q;
+ }
+
cmd.auth_common.opcode = nvme_fabrics_command;
cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
cmd.auth_common.spsp0 = 0x01;
@@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
}
ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
- qid == 0 ? NVME_QID_ANY : qid,
- 0, flags);
+ qid == 0 ? NVME_QID_ANY : qid, flags);
if (ret > 0)
dev_warn(ctrl->device,
"qid %d auth_send failed with status %d\n", qid, ret);
@@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
* If the ctrl is no connected, bail as reconnect will handle
* authentication.
*/
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
/* Authenticate admin queue first */
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 20f46c230..6f2ebb5fc 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = {
[NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
};
-const unsigned char *nvme_get_error_status_str(u16 status)
+const char *nvme_get_error_status_str(u16 status)
{
status &= 0x7ff;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
- return nvme_statuses[status & 0x7ff];
+ return nvme_statuses[status];
return "Unknown";
}
-const unsigned char *nvme_get_opcode_str(u8 opcode)
+const char *nvme_get_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
return nvme_ops[opcode];
@@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+const char *nvme_get_admin_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
return nvme_admin_ops[opcode];
@@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
+const char *nvme_get_fabrics_opcode_str(u8 opcode) {
if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
return nvme_fabrics_ops[opcode];
return "Unknown";
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 771eac141..fe3627c5b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -20,6 +20,7 @@
#include <linux/ptrace.h>
#include <linux/nvme_ioctl.h>
#include <linux/pm_qos.h>
+#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include "nvme.h"
@@ -312,12 +313,12 @@ static void nvme_log_error(struct request *req)
struct nvme_request *nr = nvme_req(req);
if (ns) {
- pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
+ pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
ns->disk ? ns->disk->disk_name : "?",
nvme_get_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
- (unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)),
- (unsigned long long)blk_rq_bytes(req) >> ns->lba_shift,
+ nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
+ blk_rq_bytes(req) >> ns->head->lba_shift,
nvme_get_error_status_str(nr->status),
nr->status >> 8 & 7, /* Status Code Type */
nr->status & 0xff, /* Status Code */
@@ -337,6 +338,30 @@ static void nvme_log_error(struct request *req)
nr->status & NVME_SC_DNR ? "DNR " : "");
}
+static void nvme_log_err_passthru(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ struct nvme_request *nr = nvme_req(req);
+
+ pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
+ "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
+ ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
+ ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
+ nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "",
+ nr->cmd->common.cdw10,
+ nr->cmd->common.cdw11,
+ nr->cmd->common.cdw12,
+ nr->cmd->common.cdw13,
+ nr->cmd->common.cdw14,
+ nr->cmd->common.cdw14);
+}
+
enum nvme_disposition {
COMPLETE,
RETRY,
@@ -372,17 +397,24 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
static inline void nvme_end_req_zoned(struct request *req)
{
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
- req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = nvme_lba_to_sect(req->q->queuedata,
+ req_op(req) == REQ_OP_ZONE_APPEND) {
+ struct nvme_ns *ns = req->q->queuedata;
+
+ req->__sector = nvme_lba_to_sect(ns->head,
le64_to_cpu(nvme_req(req)->result.u64));
+ }
}
static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
- nvme_log_error(req);
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+ if (blk_rq_is_passthrough(req))
+ nvme_log_err_passthru(req);
+ else
+ nvme_log_error(req);
+ }
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
@@ -675,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req)
/* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
- if (req->q->queuedata)
+ struct nvme_request *nr = nvme_req(req);
+ bool logging_enabled;
+
+ if (req->q->queuedata) {
+ struct nvme_ns *ns = req->q->disk->private_data;
+
+ logging_enabled = ns->head->passthru_err_log_enabled;
req->timeout = NVME_IO_TIMEOUT;
- else /* no queuedata implies admin queue */
+ } else { /* no queuedata implies admin queue */
+ logging_enabled = nr->ctrl->passthru_err_log_enabled;
req->timeout = NVME_ADMIN_TIMEOUT;
+ }
+
+ if (!logging_enabled)
+ req->rq_flags |= RQF_QUIET;
/* passthru commands should let the driver set the SGL flags */
cmd->common.flags &= ~NVME_CMD_SGL_ALL;
@@ -687,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->mq_hctx->type == HCTX_TYPE_POLL)
req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
- req->rq_flags |= RQF_QUIET;
- memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
+ memcpy(nr->cmd, cmd, sizeof(*cmd));
}
EXPORT_SYMBOL_GPL(nvme_init_request);
@@ -717,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live)
+ bool queue_live, enum nvme_ctrl_state state)
{
struct nvme_request *req = nvme_req(rq);
@@ -738,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* command, which is require to set the queue live in the
* appropinquate states.
*/
- switch (nvme_ctrl_state(ctrl)) {
+ switch (state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
(req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
@@ -793,8 +835,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
}
if (queue_max_discard_segments(req->q) == 1) {
- u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
- u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
+ u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req));
+ u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
range[0].cattr = cpu_to_le32(0);
range[0].nlb = cpu_to_le32(nlb);
@@ -802,8 +844,9 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
n = 1;
} else {
__rq_for_each_bio(bio, req) {
- u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
- u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+ u64 slba = nvme_sect_to_lba(ns->head,
+ bio->bi_iter.bi_sector);
+ u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
if (n < segments) {
range[n].cattr = cpu_to_le32(0);
@@ -841,7 +884,7 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
u64 ref48;
/* both rw and write zeroes share the same reftag format */
- switch (ns->guard_type) {
+ switch (ns->head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
break;
@@ -869,17 +912,18 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->write_zeroes.slba =
- cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
cmnd->write_zeroes.length =
- cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
- if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC))
+ if (!(req->cmd_flags & REQ_NOUNMAP) &&
+ (ns->head->features & NVME_NS_DEAC))
cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
- if (nvme_ns_has_pi(ns)) {
+ if (nvme_ns_has_pi(ns->head)) {
cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
- switch (ns->pi_type) {
+ switch (ns->head->pi_type) {
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
nvme_set_ref_tag(ns, cmnd, req);
@@ -911,13 +955,15 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
cmnd->rw.cdw2 = 0;
cmnd->rw.cdw3 = 0;
cmnd->rw.metadata = 0;
- cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
- cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ cmnd->rw.slba =
+ cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
+ cmnd->rw.length =
+ cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
cmnd->rw.reftag = 0;
cmnd->rw.apptag = 0;
cmnd->rw.appmask = 0;
- if (ns->ms) {
+ if (ns->head->ms) {
/*
* If formated with metadata, the block layer always provides a
* metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
@@ -925,12 +971,12 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
* namespace capacity to zero to prevent any I/O.
*/
if (!blk_integrity_rq(req)) {
- if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
+ if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT;
}
- switch (ns->pi_type) {
+ switch (ns->head->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD;
break;
@@ -1043,20 +1089,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head, blk_mq_req_flags_t flags)
+ int qid, nvme_submit_flags_t flags)
{
struct request *req;
int ret;
+ blk_mq_req_flags_t blk_flags = 0;
+ if (flags & NVME_SUBMIT_NOWAIT)
+ blk_flags |= BLK_MQ_REQ_NOWAIT;
+ if (flags & NVME_SUBMIT_RESERVED)
+ blk_flags |= BLK_MQ_REQ_RESERVED;
if (qid == NVME_QID_ANY)
- req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
else
- req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
+ if (flags & NVME_SUBMIT_RETRY)
+ req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -1064,7 +1117,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- ret = nvme_execute_rq(req, at_head);
+ ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
if (result && ret >= 0)
*result = nvme_req(req)->result;
out:
@@ -1077,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1100,6 +1153,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
} else {
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+
+ /* Ignore execution restrictions if any relaxation bits are set */
+ if (effects & NVME_CMD_EFFECTS_CSER_MASK)
+ effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
}
return effects;
@@ -1452,7 +1509,7 @@ free_data:
return status;
}
-static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_id_ns **id)
{
struct nvme_command c = { };
@@ -1552,7 +1609,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1671,14 +1728,14 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
- u32 max_integrity_segments)
+static void nvme_init_integrity(struct gendisk *disk,
+ struct nvme_ns_head *head, u32 max_integrity_segments)
{
struct blk_integrity integrity = { };
- switch (ns->pi_type) {
+ switch (head->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
- switch (ns->guard_type) {
+ switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
integrity.profile = &t10_pi_type3_crc;
integrity.tag_size = sizeof(u16) + sizeof(u32);
@@ -1696,7 +1753,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
break;
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
- switch (ns->guard_type) {
+ switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
integrity.profile = &t10_pi_type1_crc;
integrity.tag_size = sizeof(u16);
@@ -1717,27 +1774,28 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
break;
}
- integrity.tuple_size = ns->ms;
+ integrity.tuple_size = head->ms;
blk_integrity_register(disk, &integrity);
blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
}
#else
-static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
- u32 max_integrity_segments)
+static void nvme_init_integrity(struct gendisk *disk,
+ struct nvme_ns_head *head, u32 max_integrity_segments)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
+static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
+ struct nvme_ns_head *head)
{
- struct nvme_ctrl *ctrl = ns->ctrl;
struct request_queue *queue = disk->queue;
- u32 size = queue_logical_block_size(queue);
+ u32 max_discard_sectors;
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
- ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
-
- if (ctrl->max_discard_sectors == 0) {
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX)) {
+ max_discard_sectors = nvme_lba_to_sect(head, ctrl->dmrsl);
+ } else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
+ max_discard_sectors = UINT_MAX;
+ } else {
blk_queue_max_discard_sectors(queue, 0);
return;
}
@@ -1745,14 +1803,22 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- queue->limits.discard_granularity = size;
-
- /* If discard is already enabled, don't reset queue limits */
+ /*
+ * If discard is already enabled, don't reset queue limits.
+ *
+ * This works around the fact that the block layer can't cope well with
+ * updating the hardware limits when overridden through sysfs. This is
+ * harmless because discard limits in NVMe are purely advisory.
+ */
if (queue->limits.max_discard_sectors)
return;
- blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
- blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
+ blk_queue_max_discard_sectors(queue, max_discard_sectors);
+ if (ctrl->dmrl)
+ blk_queue_max_discard_segments(queue, ctrl->dmrl);
+ else
+ blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
+ queue->limits.discard_granularity = queue_logical_block_size(queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
@@ -1766,21 +1832,21 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
a->csi == b->csi;
}
-static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head,
+ struct nvme_id_ns *id)
{
bool first = id->dps & NVME_NS_DPS_PI_FIRST;
unsigned lbaf = nvme_lbaf_index(id->flbas);
- struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_command c = { };
struct nvme_id_ns_nvm *nvm;
int ret = 0;
u32 elbaf;
- ns->pi_size = 0;
- ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+ head->pi_size = 0;
+ head->ms = le16_to_cpu(id->lbaf[lbaf].ms);
if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
- ns->pi_size = sizeof(struct t10_pi_tuple);
- ns->guard_type = NVME_NVM_NS_16B_GUARD;
+ head->pi_size = sizeof(struct t10_pi_tuple);
+ head->guard_type = NVME_NVM_NS_16B_GUARD;
goto set_pi;
}
@@ -1789,11 +1855,11 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
return -ENOMEM;
c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(ns->head->ns_id);
+ c.identify.nsid = cpu_to_le32(head->ns_id);
c.identify.cns = NVME_ID_CNS_CS_NS;
c.identify.csi = NVME_CSI_NVM;
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm));
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
if (ret)
goto free_data;
@@ -1803,13 +1869,13 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_elbaf_sts(elbaf))
goto free_data;
- ns->guard_type = nvme_elbaf_guard_type(elbaf);
- switch (ns->guard_type) {
+ head->guard_type = nvme_elbaf_guard_type(elbaf);
+ switch (head->guard_type) {
case NVME_NVM_NS_64B_GUARD:
- ns->pi_size = sizeof(struct crc64_pi_tuple);
+ head->pi_size = sizeof(struct crc64_pi_tuple);
break;
case NVME_NVM_NS_16B_GUARD:
- ns->pi_size = sizeof(struct t10_pi_tuple);
+ head->pi_size = sizeof(struct t10_pi_tuple);
break;
default:
break;
@@ -1818,25 +1884,25 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
free_data:
kfree(nvm);
set_pi:
- if (ns->pi_size && (first || ns->ms == ns->pi_size))
- ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ if (head->pi_size && (first || head->ms == head->pi_size))
+ head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
else
- ns->pi_type = 0;
+ head->pi_type = 0;
return ret;
}
-static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head, struct nvme_id_ns *id)
{
- struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
- ret = nvme_init_ms(ns, id);
+ ret = nvme_init_ms(ctrl, head, id);
if (ret)
return ret;
- ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
- if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+ head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
return 0;
if (ctrl->ops->flags & NVME_F_FABRICS) {
@@ -1848,7 +1914,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
return 0;
- ns->features |= NVME_NS_EXT_LBAS;
+ head->features |= NVME_NS_EXT_LBAS;
/*
* The current fabrics transport drivers support namespace
@@ -1859,8 +1925,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* Note, this check will need to be modified if any drivers
* gain the ability to use other metadata formats.
*/
- if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
- ns->features |= NVME_NS_METADATA_SUPPORTED;
+ if (ctrl->max_integrity_segments && nvme_ns_has_pi(head))
+ head->features |= NVME_NS_METADATA_SUPPORTED;
} else {
/*
* For PCIe controllers, we can't easily remap the separate
@@ -1869,9 +1935,9 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* We allow extended LBAs for the passthrough interface, though.
*/
if (id->flbas & NVME_NS_FLBAS_META_EXT)
- ns->features |= NVME_NS_EXT_LBAS;
+ head->features |= NVME_NS_EXT_LBAS;
else
- ns->features |= NVME_NS_METADATA_SUPPORTED;
+ head->features |= NVME_NS_METADATA_SUPPORTED;
}
return 0;
}
@@ -1894,11 +1960,11 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_write_cache(q, vwc, vwc);
}
-static void nvme_update_disk_info(struct gendisk *disk,
- struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
+ struct nvme_ns_head *head, struct nvme_id_ns *id)
{
- sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
- u32 bs = 1U << ns->lba_shift;
+ sector_t capacity = nvme_lba_to_sect(head, le64_to_cpu(id->nsze));
+ u32 bs = 1U << head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
/*
@@ -1906,7 +1972,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
* or smaller than a sector size yet, so catch this early and don't
* allow block I/O.
*/
- if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
+ if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) {
capacity = 0;
bs = (1 << 9);
}
@@ -1923,7 +1989,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
- atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
+ atomic_bs = (1 + ctrl->subsys->awupf) * bs;
}
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
@@ -1949,20 +2015,20 @@ static void nvme_update_disk_info(struct gendisk *disk,
* I/O to namespaces with metadata except when the namespace supports
* PI, as it can strip/insert in that case.
*/
- if (ns->ms) {
+ if (head->ms) {
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
- (ns->features & NVME_NS_METADATA_SUPPORTED))
- nvme_init_integrity(disk, ns,
- ns->ctrl->max_integrity_segments);
- else if (!nvme_ns_has_pi(ns))
+ (head->features & NVME_NS_METADATA_SUPPORTED))
+ nvme_init_integrity(disk, head,
+ ctrl->max_integrity_segments);
+ else if (!nvme_ns_has_pi(head))
capacity = 0;
}
set_capacity_and_notify(disk, capacity);
- nvme_config_discard(disk, ns);
+ nvme_config_discard(ctrl, disk, head);
blk_queue_max_write_zeroes_sectors(disk->queue,
- ns->ctrl->max_zeroes_sectors);
+ ctrl->max_zeroes_sectors);
}
static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
@@ -1985,7 +2051,7 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
is_power_of_2(ctrl->max_hw_sectors))
iob = ctrl->max_hw_sectors;
else
- iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
+ iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob));
if (!iob)
return;
@@ -2052,16 +2118,17 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas);
- ns->lba_shift = id->lbaf[lbaf].ds;
+ ns->head->lba_shift = id->lbaf[lbaf].ds;
+ ns->head->nuse = le64_to_cpu(id->nuse);
nvme_set_queue_limits(ns->ctrl, ns->queue);
- ret = nvme_configure_metadata(ns, id);
+ ret = nvme_configure_metadata(ns->ctrl, ns->head, id);
if (ret < 0) {
blk_mq_unfreeze_queue(ns->disk->queue);
goto out;
}
nvme_set_chunk_sectors(ns, id);
- nvme_update_disk_info(ns->disk, ns, id);
+ nvme_update_disk_info(ns->ctrl, ns->disk, ns->head, id);
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
@@ -2078,7 +2145,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
* do not return zeroes.
*/
if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
- ns->features |= NVME_NS_DEAC;
+ ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
@@ -2091,7 +2158,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
- nvme_update_disk_info(ns->head->disk, ns, id);
+ nvme_update_disk_info(ns->ctrl, ns->head->disk, ns->head, id);
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
@@ -2154,7 +2221,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- NVME_QID_ANY, 1, 0);
+ NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
}
static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
@@ -2920,14 +2987,6 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
struct nvme_id_ctrl_nvm *id;
int ret;
- if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
- ctrl->max_discard_sectors = UINT_MAX;
- ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
- } else {
- ctrl->max_discard_sectors = 0;
- ctrl->max_discard_segments = 0;
- }
-
/*
* Even though NVMe spec explicitly states that MDTS is not applicable
* to the write-zeroes, we are cautious and limit the size to the
@@ -2957,8 +3016,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
if (ret)
goto free_data;
- if (id->dmrl)
- ctrl->max_discard_segments = id->dmrl;
+ ctrl->dmrl = id->dmrl;
ctrl->dmrsl = le32_to_cpu(id->dmrsl);
if (id->wzsl)
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
@@ -3026,6 +3084,42 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
return 0;
}
+static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ /*
+ * In fabrics we need to verify the cntlid matches the
+ * admin connect
+ */
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
+ dev_err(ctrl->device,
+ "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n",
+ ctrl->cntlid, le16_to_cpu(id->cntlid));
+ return -EINVAL;
+ }
+
+ if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
+ dev_err(ctrl->device,
+ "keep-alive support is mandatory for fabrics\n");
+ return -EINVAL;
+ }
+
+ if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) {
+ dev_err(ctrl->device,
+ "I/O queue command capsule supported size %d < 4\n",
+ ctrl->ioccsz);
+ return -EINVAL;
+ }
+
+ if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) {
+ dev_err(ctrl->device,
+ "I/O queue response capsule supported size %d < 1\n",
+ ctrl->iorcsz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nvme_init_identify(struct nvme_ctrl *ctrl)
{
struct nvme_id_ctrl *id;
@@ -3138,25 +3232,9 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->iorcsz = le32_to_cpu(id->iorcsz);
ctrl->maxcmd = le16_to_cpu(id->maxcmd);
- /*
- * In fabrics we need to verify the cntlid matches the
- * admin connect
- */
- if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
- dev_err(ctrl->device,
- "Mismatching cntlid: Connect %u vs Identify "
- "%u, rejecting\n",
- ctrl->cntlid, le16_to_cpu(id->cntlid));
- ret = -EINVAL;
- goto out_free;
- }
-
- if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
- dev_err(ctrl->device,
- "keep-alive support is mandatory for fabrics\n");
- ret = -EINVAL;
+ ret = nvme_check_ctrl_fabric_info(ctrl, id);
+ if (ret)
goto out_free;
- }
} else {
ctrl->hmpre = le32_to_cpu(id->hmpre);
ctrl->hmmin = le32_to_cpu(id->hmmin);
@@ -3415,6 +3493,8 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
head->ns_id = info->nsid;
head->ids = info->ids;
head->shared = info->is_shared;
+ ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1);
+ ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE);
kref_init(&head->ref);
if (head->ids.csi) {
@@ -3674,7 +3754,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
up_write(&ctrl->namespaces_rwsem);
nvme_get_ctrl(ctrl);
- if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
+ if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
goto out_cleanup_ns_from_list;
if (!nvme_ns_head_multipath(ns->head))
@@ -3683,6 +3763,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
+ /*
+ * Set ns->disk->device->driver_data to ns so we can access
+ * ns->head->passthru_err_log_enabled in
+ * nvme_io_passthru_err_log_enabled_[store | show]().
+ */
+ dev_set_drvdata(disk_to_dev(ns->disk), ns);
+
return;
out_cleanup_ns_from_list:
@@ -4488,6 +4575,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
int ret;
WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
+ ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
@@ -4825,5 +4913,6 @@ static void __exit nvme_core_exit(void)
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host core framework");
module_init(nvme_core_init);
module_exit(nvme_core_exit);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index aa88606a4..495c171da 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -387,8 +387,8 @@ static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl,
uuid_copy(&data->hostid, &ctrl->opts->host->id);
data->cntlid = cpu_to_le16(cntlid);
- strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
- strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+ strscpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strscpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
return data;
}
@@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), NVME_QID_ANY, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), NVME_QID_ANY,
+ NVME_SUBMIT_AT_HEAD |
+ NVME_SUBMIT_NOWAIT |
+ NVME_SUBMIT_RESERVED);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -525,11 +527,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), qid, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), qid,
+ NVME_SUBMIT_AT_HEAD |
+ NVME_SUBMIT_RESERVED |
+ NVME_SUBMIT_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
+ goto out_free_data;
}
result = le32_to_cpu(res.u32);
if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
@@ -1488,6 +1493,7 @@ static void __exit nvmf_exit(void)
}
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NVMe host fabrics library");
module_init(nvmf_init);
module_exit(nvmf_exit);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index e4acccc31..37c974c38 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -178,9 +178,11 @@ static inline bool
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
- if (ctrl->state == NVME_CTRL_DELETING ||
- ctrl->state == NVME_CTRL_DELETING_NOIO ||
- ctrl->state == NVME_CTRL_DEAD ||
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (state == NVME_CTRL_DELETING ||
+ state == NVME_CTRL_DELETING_NOIO ||
+ state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
!uuid_equal(&opts->host->id, &ctrl->opts->host->id))
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e0f4129c3..68a5d9716 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1211,10 +1211,10 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
/* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
- strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
- min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
- strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
- min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
+ strscpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
+ sizeof(assoc_rqst->assoc_cmd.hostnqn));
+ strscpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
+ sizeof(assoc_rqst->assoc_cmd.subnqn));
lsop->queue = queue;
lsreq->rqstaddr = assoc_rqst;
@@ -2567,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
+ u16 qnum = op->queue->qnum;
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
@@ -2575,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
* will detect the aborted io and will fail the connection.
*/
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
"x%08x/x%08x\n",
- ctrl->cnum, op->queue->qnum, sqe->common.opcode,
- sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
+ nvme_fabrics_opcode_str(qnum, sqe),
+ sqe->common.cdw10, sqe->common.cdw11);
if (__nvme_fc_abort_op(ctrl, op))
nvme_fc_error_recovery(ctrl, "io timeout abort failed");
@@ -3491,10 +3493,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->ctrl.opts = opts;
ctrl->ctrl.nr_reconnects = 0;
- if (lport->dev)
- ctrl->ctrl.numa_node = dev_to_node(lport->dev);
- else
- ctrl->ctrl.numa_node = NUMA_NO_NODE;
INIT_LIST_HEAD(&ctrl->ctrl_list);
ctrl->lport = lport;
ctrl->rport = rport;
@@ -3539,6 +3537,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
if (ret)
goto out_free_queues;
+ if (lport->dev)
+ ctrl->ctrl.numa_node = dev_to_node(lport->dev);
/* at this point, teardown path changes to ref counting on nvme ctrl */
@@ -3570,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
flush_delayed_work(&ctrl->connect_work);
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
+ "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
return &ctrl->ctrl;
@@ -3971,4 +3971,5 @@ static void __exit nvme_fc_exit_module(void)
module_init(nvme_fc_init_module);
module_exit(nvme_fc_exit_module);
+MODULE_DESCRIPTION("NVMe host FC transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 4939ed356..3dfd5ae99 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -5,7 +5,7 @@
*/
#include <linux/ptrace.h> /* for force_successful_syscall_return */
#include <linux/nvme_ioctl.h>
-#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include "nvme.h"
enum {
@@ -97,58 +97,6 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
return (void __user *)ptrval;
}
-static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
- unsigned len, u32 seed)
-{
- struct bio_integrity_payload *bip;
- int ret = -ENOMEM;
- void *buf;
- struct bio *bio = req->bio;
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- goto out;
-
- if (req_op(req) == REQ_OP_DRV_OUT) {
- ret = -EFAULT;
- if (copy_from_user(buf, ubuf, len))
- goto out_free_meta;
- } else {
- memset(buf, 0, len);
- }
-
- bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
- if (IS_ERR(bip)) {
- ret = PTR_ERR(bip);
- goto out_free_meta;
- }
-
- bip->bip_iter.bi_sector = seed;
- ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
- offset_in_page(buf));
- if (ret != len) {
- ret = -ENOMEM;
- goto out_free_meta;
- }
-
- req->cmd_flags |= REQ_INTEGRITY;
- return buf;
-out_free_meta:
- kfree(buf);
-out:
- return ERR_PTR(ret);
-}
-
-static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
- void *meta, unsigned len, int ret)
-{
- if (!ret && req_op(req) == REQ_OP_DRV_IN &&
- copy_to_user(ubuf, meta, len))
- ret = -EFAULT;
- kfree(meta);
- return ret;
-}
-
static struct request *nvme_alloc_user_request(struct request_queue *q,
struct nvme_command *cmd, blk_opf_t rq_flags,
blk_mq_req_flags_t blk_flags)
@@ -165,14 +113,12 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
- unsigned int flags)
+ u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct bio *bio = NULL;
- void *meta = NULL;
int ret;
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
@@ -194,18 +140,17 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (ret)
goto out;
+
bio = req->bio;
- if (bdev)
+ if (bdev) {
bio_set_dev(bio, bdev);
-
- if (bdev && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
- meta_seed);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
- goto out_unmap;
+ if (meta_buffer && meta_len) {
+ ret = bio_integrity_map_user(bio, meta_buffer, meta_len,
+ meta_seed);
+ if (ret)
+ goto out_unmap;
+ req->cmd_flags |= REQ_INTEGRITY;
}
- *metap = meta;
}
return ret;
@@ -226,7 +171,6 @@ static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl;
struct request *req;
- void *meta = NULL;
struct bio *bio;
u32 effects;
int ret;
@@ -238,7 +182,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, NULL, flags);
+ meta_len, meta_seed, NULL, flags);
if (ret)
return ret;
}
@@ -250,9 +194,6 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ret = nvme_execute_rq(req, false);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
- if (meta)
- ret = nvme_finish_user_metadata(req, meta_buffer, meta,
- meta_len, ret);
if (bio)
blk_rq_unmap_user(bio);
blk_mq_free_request(req);
@@ -284,10 +225,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return -EINVAL;
}
- length = (io.nblocks + 1) << ns->lba_shift;
+ length = (io.nblocks + 1) << ns->head->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) &&
- ns->ms == sizeof(struct t10_pi_tuple)) {
+ (ns->head->ms == ns->head->pi_size)) {
/*
* Protection information is stripped/inserted by the
* controller.
@@ -297,11 +238,11 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
meta_len = 0;
metadata = NULL;
} else {
- meta_len = (io.nblocks + 1) * ns->ms;
+ meta_len = (io.nblocks + 1) * ns->head->ms;
metadata = nvme_to_user_ptr(io.metadata);
}
- if (ns->features & NVME_NS_EXT_LBAS) {
+ if (ns->head->features & NVME_NS_EXT_LBAS) {
length += meta_len;
meta_len = 0;
} else if (meta_len) {
@@ -447,19 +388,10 @@ struct nvme_uring_data {
* Expect build errors if this grows larger than that.
*/
struct nvme_uring_cmd_pdu {
- union {
- struct bio *bio;
- struct request *req;
- };
- u32 meta_len;
- u32 nvme_status;
- union {
- struct {
- void *meta; /* kernel-resident buffer */
- void __user *meta_buffer;
- };
- u64 result;
- } u;
+ struct request *req;
+ struct bio *bio;
+ u64 result;
+ int status;
};
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
@@ -468,31 +400,6 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
- unsigned issue_flags)
-{
- struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- struct request *req = pdu->req;
- int status;
- u64 result;
-
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- status = -EINTR;
- else
- status = nvme_req(req)->status;
-
- result = le64_to_cpu(nvme_req(req)->result.u64);
-
- if (pdu->meta_len)
- status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
- pdu->u.meta, pdu->meta_len, status);
- if (req->bio)
- blk_rq_unmap_user(req->bio);
- blk_mq_free_request(req);
-
- io_uring_cmd_done(ioucmd, status, result, issue_flags);
-}
-
static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
unsigned issue_flags)
{
@@ -500,8 +407,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
-
- io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
+ io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
@@ -510,53 +416,24 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- req->bio = pdu->bio;
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
- pdu->nvme_status = -EINTR;
- } else {
- pdu->nvme_status = nvme_req(req)->status;
- if (!pdu->nvme_status)
- pdu->nvme_status = blk_status_to_errno(err);
- }
- pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ pdu->status = -EINTR;
+ else
+ pdu->status = nvme_req(req)->status;
+ pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
* For iopoll, complete it directly.
* Otherwise, move the completion to task work.
*/
- if (blk_rq_is_poll(req)) {
- WRITE_ONCE(ioucmd->cookie, NULL);
+ if (blk_rq_is_poll(req))
nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
- } else {
+ else
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
- }
return RQ_END_IO_FREE;
}
-static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
- blk_status_t err)
-{
- struct io_uring_cmd *ioucmd = req->end_io_data;
- struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-
- req->bio = pdu->bio;
- pdu->req = req;
-
- /*
- * For iopoll, complete it directly.
- * Otherwise, move the completion to task work.
- */
- if (blk_rq_is_poll(req)) {
- WRITE_ONCE(ioucmd->cookie, NULL);
- nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
- } else {
- io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
- }
-
- return RQ_END_IO_NONE;
-}
-
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
{
@@ -568,7 +445,6 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct request *req;
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
blk_mq_req_flags_t blk_flags = 0;
- void *meta = NULL;
int ret;
c.common.opcode = READ_ONCE(cmd->opcode);
@@ -616,27 +492,16 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.addr && d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, &meta, ioucmd, vec);
+ d.metadata_len, 0, ioucmd, vec);
if (ret)
return ret;
}
- if (blk_rq_is_poll(req)) {
- ioucmd->flags |= IORING_URING_CMD_POLLED;
- WRITE_ONCE(ioucmd->cookie, req);
- }
-
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
- pdu->meta_len = d.metadata_len;
+ pdu->req = req;
req->end_io_data = ioucmd;
- if (pdu->meta_len) {
- pdu->u.meta = meta;
- pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
- req->end_io = nvme_uring_cmd_end_io_meta;
- } else {
- req->end_io = nvme_uring_cmd_end_io;
- }
+ req->end_io = nvme_uring_cmd_end_io;
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
@@ -787,16 +652,12 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
struct io_comp_batch *iob,
unsigned int poll_flags)
{
- struct request *req;
- int ret = 0;
-
- if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
- return 0;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ struct request *req = pdu->req;
- req = READ_ONCE(ioucmd->cookie);
if (req && blk_rq_is_poll(req))
- ret = blk_rq_poll(req, iob, poll_flags);
- return ret;
+ return blk_rq_poll(req, iob, poll_flags);
+ return 0;
}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 0a88d7bdc..74de1e64a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
@@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
+
/*
* We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
* still be able to complete assuming that the controller is connected.
* Otherwise it will fail immediately and return to the requeue list.
*/
- if (ns->ctrl->state != NVME_CTRL_LIVE &&
- ns->ctrl->state != NVME_CTRL_DELETING)
+ if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
!test_bit(NVME_NS_READY, &ns->flags))
@@ -331,7 +332,7 @@ out:
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
{
- return ns->ctrl->state == NVME_CTRL_LIVE &&
+ return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
ns->ana_state == NVME_ANA_OPTIMIZED;
}
@@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
- switch (ns->ctrl->state) {
+ switch (nvme_ctrl_state(ns->ctrl)) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
@@ -579,7 +580,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
*/
if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
rc = device_add_disk(&head->subsys->dev, head->disk,
- nvme_ns_id_attr_groups);
+ nvme_ns_attr_groups);
if (rc) {
clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
return;
@@ -667,7 +668,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
- ns->ctrl->state == NVME_CTRL_LIVE)
+ nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@@ -748,7 +749,7 @@ static void nvme_ana_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
nvme_read_ana_log(ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index e7411dac0..7b87763e2 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -16,6 +16,7 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
#include <linux/t10-pi.h>
+#include <linux/ratelimit_types.h>
#include <trace/events/block.h>
@@ -262,6 +263,7 @@ enum nvme_ctrl_flags {
struct nvme_ctrl {
bool comp_seen;
bool identified;
+ bool passthru_err_log_enabled;
enum nvme_ctrl_state state;
spinlock_t lock;
struct mutex scan_lock;
@@ -302,14 +304,13 @@ struct nvme_ctrl {
u32 max_hw_sectors;
u32 max_segments;
u32 max_integrity_segments;
- u32 max_discard_sectors;
- u32 max_discard_segments;
u32 max_zeroes_sectors;
#ifdef CONFIG_BLK_DEV_ZONED
u32 max_zone_append;
#endif
u16 crdt[3];
u16 oncs;
+ u8 dmrl;
u32 dmrsl;
u16 oacs;
u16 sqsize;
@@ -450,13 +451,28 @@ struct nvme_ns_head {
struct list_head list;
struct srcu_struct srcu;
struct nvme_subsystem *subsys;
- unsigned ns_id;
struct nvme_ns_ids ids;
struct list_head entry;
struct kref ref;
bool shared;
+ bool passthru_err_log_enabled;
int instance;
struct nvme_effects_log *effects;
+ u64 nuse;
+ unsigned ns_id;
+ int lba_shift;
+ u16 ms;
+ u16 pi_size;
+ u8 pi_type;
+ u8 guard_type;
+ u16 sgs;
+ u32 sws;
+#ifdef CONFIG_BLK_DEV_ZONED
+ u64 zsze;
+#endif
+ unsigned long features;
+
+ struct ratelimit_state rs_nuse;
struct cdev cdev;
struct device cdev_device;
@@ -498,17 +514,6 @@ struct nvme_ns {
struct kref kref;
struct nvme_ns_head *head;
- int lba_shift;
- u16 ms;
- u16 pi_size;
- u16 sgs;
- u32 sws;
- u8 pi_type;
- u8 guard_type;
-#ifdef CONFIG_BLK_DEV_ZONED
- u64 zsze;
-#endif
- unsigned long features;
unsigned long flags;
#define NVME_NS_REMOVING 0
#define NVME_NS_ANA_PENDING 2
@@ -519,13 +524,12 @@ struct nvme_ns {
struct device cdev_device;
struct nvme_fault_inject fault_inject;
-
};
/* NVMe ns supports metadata actions by the controller (generate/strip) */
-static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
+static inline bool nvme_ns_has_pi(struct nvme_ns_head *head)
{
- return ns->pi_type && ns->ms == ns->pi_size;
+ return head->pi_type && head->ms == head->pi_size;
}
struct nvme_ctrl_ops {
@@ -657,17 +661,17 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
/*
* Convert a 512B sector number to a device logical block number.
*/
-static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
+static inline u64 nvme_sect_to_lba(struct nvme_ns_head *head, sector_t sector)
{
- return sector >> (ns->lba_shift - SECTOR_SHIFT);
+ return sector >> (head->lba_shift - SECTOR_SHIFT);
}
/*
* Convert a device logical block number to a 512B sector number.
*/
-static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
+static inline sector_t nvme_lba_to_sect(struct nvme_ns_head *head, u64 lba)
{
- return lba << (ns->lba_shift - SECTOR_SHIFT);
+ return lba << (head->lba_shift - SECTOR_SHIFT);
}
/*
@@ -802,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *req);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live);
+ bool queue_live, enum nvme_ctrl_state state);
static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE))
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (likely(state == NVME_CTRL_LIVE))
return true;
- if (ctrl->ops->flags & NVME_F_FABRICS &&
- ctrl->state == NVME_CTRL_DELETING)
+ if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
return queue_live;
- return __nvme_check_ready(ctrl, rq, queue_live);
+ return __nvme_check_ready(ctrl, rq, queue_live, state);
}
/*
@@ -833,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
}
+/*
+ * Flags for __nvme_submit_sync_cmd()
+ */
+typedef __u32 __bitwise nvme_submit_flags_t;
+
+enum {
+ /* Insert request at the head of the queue */
+ NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0),
+ /* Set BLK_MQ_REQ_NOWAIT when allocating request */
+ NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
+ /* Set BLK_MQ_REQ_RESERVED when allocating request */
+ NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
+ /* Retry command when NVME_SC_DNR is not set in the result */
+ NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
+};
+
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head,
- blk_mq_req_flags_t flags);
+ int qid, nvme_submit_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
@@ -873,10 +893,12 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
+int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_id_ns **id);
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
-extern const struct attribute_group *nvme_ns_id_attr_groups[];
+extern const struct attribute_group *nvme_ns_attr_groups[];
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
extern const struct attribute_group nvme_dev_attrs_group;
@@ -926,6 +948,10 @@ extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
+static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
+{
+ return disk->fops == &nvme_ns_head_ops;
+}
#else
#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
@@ -1003,6 +1029,10 @@ static inline void nvme_mpath_start_request(struct request *rq)
static inline void nvme_mpath_end_request(struct request *rq)
{
}
+static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
+{
+ return false;
+}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns);
@@ -1031,7 +1061,10 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
- return dev_to_disk(dev)->private_data;
+ struct gendisk *disk = dev_to_disk(dev);
+
+ WARN_ON(nvme_disk_is_ns_head(disk));
+ return disk->private_data;
}
#ifdef CONFIG_NVME_HWMON
@@ -1108,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
}
#ifdef CONFIG_NVME_VERBOSE_ERRORS
-const unsigned char *nvme_get_error_status_str(u16 status);
-const unsigned char *nvme_get_opcode_str(u8 opcode);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
+const char *nvme_get_error_status_str(u16 status);
+const char *nvme_get_opcode_str(u8 opcode);
+const char *nvme_get_admin_opcode_str(u8 opcode);
+const char *nvme_get_fabrics_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_get_error_status_str(u16 status)
+static inline const char *nvme_get_error_status_str(u16 status)
{
return "I/O Error";
}
-static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+static inline const char *nvme_get_opcode_str(u8 opcode)
{
return "I/O Cmd";
}
-static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+static inline const char *nvme_get_admin_opcode_str(u8 opcode)
{
return "Admin Cmd";
}
-static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
+static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
{
return "Fabrics Cmd";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
+static inline const char *nvme_opcode_str(int qid, u8 opcode)
{
- if (opcode == nvme_fabrics_command)
- return nvme_get_fabrics_opcode_str(fctype);
return qid ? nvme_get_opcode_str(opcode) :
nvme_get_admin_opcode_str(opcode);
}
+
+static inline const char *nvme_fabrics_opcode_str(
+ int qid, const struct nvme_command *cmd)
+{
+ if (nvme_is_fabrics(cmd))
+ return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
+
+ return nvme_opcode_str(qid, cmd->common.opcode);
+}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 61af7ff1a..8e0bb9692 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1284,6 +1284,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct request *abort_req;
struct nvme_command cmd = { };
u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ u8 opcode;
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
@@ -1310,8 +1311,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
dev_warn(dev->ctrl.device,
- "I/O %d QID %d timeout, completion polled\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) QID %d timeout, completion polled\n",
+ req->tag, nvme_cid(req), nvmeq->qid);
return BLK_EH_DONE;
}
@@ -1327,8 +1328,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
fallthrough;
case NVME_CTRL_DELETING:
dev_warn_ratelimited(dev->ctrl.device,
- "I/O %d QID %d timeout, disable controller\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) QID %d timeout, disable controller\n",
+ req->tag, nvme_cid(req), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
nvme_dev_disable(dev, true);
return BLK_EH_DONE;
@@ -1343,10 +1344,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* command was already aborted once before and still hasn't been
* returned to the driver, or if this is the admin queue.
*/
+ opcode = nvme_req(req)->cmd->common.opcode;
if (!nvmeq->qid || iod->aborted) {
dev_warn(dev->ctrl.device,
- "I/O %d QID %d timeout, reset controller\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
+ req->tag, nvme_cid(req), opcode,
+ nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
goto disable;
}
@@ -1362,10 +1365,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d (%s) QID %d timeout, aborting\n",
- req->tag,
- nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
- nvmeq->qid);
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n",
+ req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode),
+ nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
+ blk_rq_bytes(req));
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT);
@@ -2743,10 +2746,10 @@ static void nvme_reset_work(struct work_struct *work)
* controller around but remove all namespaces.
*/
if (dev->online_queues > 1) {
+ nvme_dbbuf_set(dev);
nvme_unquiesce_io_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
nvme_pci_update_nr_queues(dev);
- nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
} else {
dev_warn(dev->ctrl.device, "IO queues lost\n");
@@ -3360,6 +3363,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_BOGUS_NID, },
@@ -3408,6 +3414,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
@@ -3538,5 +3546,6 @@ static void __exit nvme_exit(void)
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host PCIe transport driver");
module_init(nvme_init);
module_exit(nvme_exit);
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index 391b1465e..fc3eed00f 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -98,7 +98,7 @@ static int nvme_send_pr_command(struct block_device *bdev,
struct nvme_command *c, void *data, unsigned int data_len)
{
if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- bdev->bd_disk->fops == &nvme_ns_head_ops)
+ nvme_disk_is_ns_head(bdev->bd_disk))
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 81e262116..20fdd40b1 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1410,6 +1410,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
struct nvme_ns *ns = rq->q->queuedata;
struct bio *bio = rq->bio;
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ u32 xfer_len;
int nr;
req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
@@ -1422,8 +1424,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr))
goto mr_put;
- nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
- req->mr->sig_attrs, ns->pi_type);
+ nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
@@ -1441,7 +1442,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_WRITE;
sg->addr = cpu_to_le64(req->mr->iova);
- put_unaligned_le24(req->mr->length, sg->length);
+ xfer_len = req->mr->length;
+ /* Check if PI is added by the HW */
+ if (!pi_count)
+ xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
+ put_unaligned_le24(xfer_len, sg->length);
put_unaligned_le32(req->mr->rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
@@ -1946,9 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct nvme_command *cmd = req->req.cmd;
+ int qid = nvme_rdma_queue_idx(queue);
- dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
- rq->tag, nvme_rdma_queue_idx(queue));
+ dev_warn(ctrl->ctrl.device,
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
+ rq->tag, nvme_cid(rq), cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
/*
@@ -2017,7 +2026,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
queue->pi_support &&
(c->common.opcode == nvme_cmd_write ||
c->common.opcode == nvme_cmd_read) &&
- nvme_ns_has_pi(ns))
+ nvme_ns_has_pi(ns->head))
req->use_sig_mr = true;
else
req->use_sig_mr = false;
@@ -2291,8 +2300,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
@@ -2395,4 +2404,5 @@ static void __exit nvme_rdma_cleanup_module(void)
module_init(nvme_rdma_init_module);
module_exit(nvme_rdma_cleanup_module);
+MODULE_DESCRIPTION("NVMe host RDMA transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index c6b7fbd4d..09fcaa519 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -35,16 +35,71 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf,
+ ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ bool passthru_err_log_enabled;
+ int err;
+
+ err = kstrtobool(buf, &passthru_err_log_enabled);
+ if (err)
+ return -EINVAL;
+
+ ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
+
+ return count;
+}
+
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
- if (disk->fops == &nvme_bdev_ops)
- return nvme_get_ns_from_dev(dev)->head;
- else
+ if (nvme_disk_is_ns_head(disk))
return disk->private_data;
+ return nvme_get_ns_from_dev(dev)->head;
}
+static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+
+ return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ bool passthru_err_log_enabled;
+ int err;
+
+ err = kstrtobool(buf, &passthru_err_log_enabled);
+ if (err)
+ return -EINVAL;
+ head->passthru_err_log_enabled = passthru_err_log_enabled;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
+ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+ nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
+
+static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
+ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+ nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -114,20 +169,103 @@ static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(nsid);
-static struct attribute *nvme_ns_id_attrs[] = {
+static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
+}
+static DEVICE_ATTR_RO(csi);
+
+static ssize_t metadata_bytes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
+}
+static DEVICE_ATTR_RO(metadata_bytes);
+
+static int ns_head_update_nuse(struct nvme_ns_head *head)
+{
+ struct nvme_id_ns *id;
+ struct nvme_ns *ns;
+ int srcu_idx, ret = -EWOULDBLOCK;
+
+ /* Avoid issuing commands too often by rate limiting the update */
+ if (!__ratelimit(&head->rs_nuse))
+ return 0;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (!ns)
+ goto out_unlock;
+
+ ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
+ if (ret)
+ goto out_unlock;
+
+ head->nuse = le64_to_cpu(id->nuse);
+ kfree(id);
+
+out_unlock:
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static int ns_update_nuse(struct nvme_ns *ns)
+{
+ struct nvme_id_ns *id;
+ int ret;
+
+ /* Avoid issuing commands too often by rate limiting the update. */
+ if (!__ratelimit(&ns->head->rs_nuse))
+ return 0;
+
+ ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
+ if (ret)
+ return ret;
+
+ ns->head->nuse = le64_to_cpu(id->nuse);
+ kfree(id);
+ return 0;
+}
+
+static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct block_device *bdev = disk->part0;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
+ bdev->bd_disk->fops == &nvme_ns_head_ops)
+ ret = ns_head_update_nuse(head);
+ else
+ ret = ns_update_nuse(bdev->bd_disk->private_data);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", head->nuse);
+}
+static DEVICE_ATTR_RO(nuse);
+
+static struct attribute *nvme_ns_attrs[] = {
&dev_attr_wwid.attr,
&dev_attr_uuid.attr,
&dev_attr_nguid.attr,
&dev_attr_eui.attr,
+ &dev_attr_csi.attr,
&dev_attr_nsid.attr,
+ &dev_attr_metadata_bytes.attr,
+ &dev_attr_nuse.attr,
#ifdef CONFIG_NVME_MULTIPATH
&dev_attr_ana_grpid.attr,
&dev_attr_ana_state.attr,
#endif
+ &dev_attr_io_passthru_err_log_enabled.attr,
NULL,
};
-static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
+static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -148,7 +286,8 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
}
#ifdef CONFIG_NVME_MULTIPATH
if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
- if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
+ /* per-path attr */
+ if (nvme_disk_is_ns_head(dev_to_disk(dev)))
return 0;
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
return 0;
@@ -157,13 +296,13 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-static const struct attribute_group nvme_ns_id_attr_group = {
- .attrs = nvme_ns_id_attrs,
- .is_visible = nvme_ns_id_attrs_are_visible,
+static const struct attribute_group nvme_ns_attr_group = {
+ .attrs = nvme_ns_attrs,
+ .is_visible = nvme_ns_attrs_are_visible,
};
-const struct attribute_group *nvme_ns_id_attr_groups[] = {
- &nvme_ns_id_attr_group,
+const struct attribute_group *nvme_ns_attr_groups[] = {
+ &nvme_ns_attr_group,
NULL,
};
@@ -226,6 +365,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned state = (unsigned)nvme_ctrl_state(ctrl);
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
@@ -236,9 +376,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_DEAD] = "dead",
};
- if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
- state_name[ctrl->state])
- return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
+ if (state < ARRAY_SIZE(state_name) && state_name[state])
+ return sysfs_emit(buf, "%s\n", state_name[state]);
return sysfs_emit(buf, "unknown state\n");
}
@@ -570,6 +709,7 @@ static struct attribute *nvme_dev_attrs[] = {
#ifdef CONFIG_NVME_TCP_TLS
&dev_attr_tls_key.attr,
#endif
+ &dev_attr_adm_passthru_err_log_enabled.attr,
NULL
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 08805f027..a6d596e05 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1922,14 +1922,13 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
ctrl->opts->subsysnqn);
if (!pskid) {
dev_err(ctrl->device, "no valid PSK found\n");
- ret = -ENOKEY;
- goto out_free_queue;
+ return -ENOKEY;
}
}
ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
if (ret)
- goto out_free_queue;
+ return ret;
ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
if (ret)
@@ -2429,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
- u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
+ struct nvme_command *cmd = &pdu->cmd;
int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device,
- "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
- nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
- opc, nvme_opcode_str(qid, opc, fctype));
+ "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
+ rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
/*
@@ -2754,8 +2753,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
@@ -2827,4 +2826,5 @@ static void __exit nvme_tcp_cleanup_module(void)
module_init(nvme_tcp_init_module);
module_exit(nvme_tcp_cleanup_module);
+MODULE_DESCRIPTION("NVMe host TCP transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index ec8557810..499bbb0ee 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -11,7 +11,7 @@ int nvme_revalidate_zones(struct nvme_ns *ns)
{
struct request_queue *q = ns->queue;
- blk_queue_chunk_sectors(q, ns->zsze);
+ blk_queue_chunk_sectors(q, ns->head->zsze);
blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
return blk_revalidate_disk_zones(ns->disk, NULL);
@@ -99,16 +99,17 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data;
}
- ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
- if (!is_power_of_2(ns->zsze)) {
+ ns->head->zsze =
+ nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze));
+ if (!is_power_of_2(ns->head->zsze)) {
dev_warn(ns->ctrl->device,
"invalid zone size:%llu for namespace:%u\n",
- ns->zsze, ns->head->ns_id);
+ ns->head->zsze, ns->head->ns_id);
status = -ENODEV;
goto free_data;
}
- disk_set_zoned(ns->disk, BLK_ZONED_HM);
+ disk_set_zoned(ns->disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
@@ -128,7 +129,7 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
sizeof(struct nvme_zone_descriptor);
nr_zones = min_t(unsigned int, nr_zones,
- get_capacity(ns->disk) >> ilog2(ns->zsze));
+ get_capacity(ns->disk) >> ilog2(ns->head->zsze));
bufsize = sizeof(struct nvme_zone_report) +
nr_zones * sizeof(struct nvme_zone_descriptor);
@@ -147,7 +148,8 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
return NULL;
}
-static int nvme_zone_parse_entry(struct nvme_ns *ns,
+static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head,
struct nvme_zone_descriptor *entry,
unsigned int idx, report_zones_cb cb,
void *data)
@@ -155,20 +157,20 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
struct blk_zone zone = { };
if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
- dev_err(ns->ctrl->device, "invalid zone type %#x\n",
+ dev_err(ctrl->device, "invalid zone type %#x\n",
entry->zt);
return -EINVAL;
}
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone.cond = entry->zs >> 4;
- zone.len = ns->zsze;
- zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
- zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
+ zone.len = head->zsze;
+ zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap));
+ zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba));
if (zone.cond == BLK_ZONE_COND_FULL)
zone.wp = zone.start + zone.len;
else
- zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+ zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp));
return cb(&zone, idx, data);
}
@@ -196,11 +198,11 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
- sector &= ~(ns->zsze - 1);
+ sector &= ~(ns->head->zsze - 1);
while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
memset(report, 0, buflen);
- c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
+ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, sector));
ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
if (ret) {
if (ret > 0)
@@ -213,14 +215,15 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
- ret = nvme_zone_parse_entry(ns, &report->entries[i],
+ ret = nvme_zone_parse_entry(ns->ctrl, ns->head,
+ &report->entries[i],
zone_idx, cb, data);
if (ret)
goto out_free;
zone_idx++;
}
- sector += ns->zsze * nz;
+ sector += ns->head->zsze * nz;
}
if (zone_idx > 0)
@@ -239,7 +242,7 @@ blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
c->zms.opcode = nvme_cmd_zone_mgmt_send;
c->zms.nsid = cpu_to_le32(ns->head->ns_id);
- c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
+ c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
c->zms.zsa = action;
if (req_op(req) == REQ_OP_ZONE_RESET_ALL)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d937fe051..2482a0db2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1276,7 +1276,7 @@ static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
return -EINVAL;
down_write(&nvmet_config_sem);
- if (cntlid_min >= to_subsys(item)->cntlid_max)
+ if (cntlid_min > to_subsys(item)->cntlid_max)
goto out_unlock;
to_subsys(item)->cntlid_min = cntlid_min;
up_write(&nvmet_config_sem);
@@ -1306,7 +1306,7 @@ static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
return -EINVAL;
down_write(&nvmet_config_sem);
- if (cntlid_max <= to_subsys(item)->cntlid_min)
+ if (cntlid_max < to_subsys(item)->cntlid_min)
goto out_unlock;
to_subsys(item)->cntlid_max = cntlid_max;
up_write(&nvmet_config_sem);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 393516504..8658e9c08 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
continue;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_NS_CHANGED,
NVME_LOG_CHANGED_NS);
}
@@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
continue;
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
continue;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
}
mutex_unlock(&subsys->lock);
@@ -1425,9 +1425,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!ctrl->sqs)
goto out_free_changed_ns_list;
- if (subsys->cntlid_min > subsys->cntlid_max)
- goto out_free_sqs;
-
ret = ida_alloc_range(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
@@ -1708,4 +1705,5 @@ static void __exit nvmet_exit(void)
module_init(nvmet_init);
module_exit(nvmet_exit);
+MODULE_DESCRIPTION("NVMe target core framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 668d257fa..68e82ccc0 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port,
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
return;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d8da840a1..9964ffe34 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -209,7 +209,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
- u16 status = 0;
+ u16 status;
int ret;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
@@ -290,7 +290,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid);
- u16 status = 0;
+ u16 status;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 666130878..fd229f310 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -497,8 +497,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
* message is normal. Otherwise, send unless the hostport has
* already been invalidated by the lldd.
*/
- if (!tgtport->ops->ls_req || !assoc->hostport ||
- assoc->hostport->invalid)
+ if (!tgtport->ops->ls_req || assoc->hostport->invalid)
return;
lsop = kzalloc((sizeof(*lsop) +
@@ -1030,7 +1029,7 @@ nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host))
- return (host);
+ return host;
}
}
@@ -1115,14 +1114,27 @@ nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
queue_work(nvmet_wq, &assoc->del_work);
}
+static bool
+nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id)
+{
+ struct nvmet_fc_tgt_assoc *a;
+
+ list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
+ if (association_id == a->association_id)
+ return true;
+ }
+
+ return false;
+}
+
static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
- struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+ struct nvmet_fc_tgt_assoc *assoc;
unsigned long flags;
+ bool done;
u64 ran;
int idx;
- bool needrandom = true;
if (!tgtport->pe)
return NULL;
@@ -1135,12 +1147,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (idx < 0)
goto out_free_assoc;
- if (!nvmet_fc_tgtport_get(tgtport))
- goto out_ida;
-
assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
if (IS_ERR(assoc->hostport))
- goto out_put;
+ goto out_ida;
assoc->tgtport = tgtport;
assoc->a_id = idx;
@@ -1149,29 +1158,24 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
atomic_set(&assoc->terminating, 0);
- while (needrandom) {
+ done = false;
+ do {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
ran = ran << BYTES_FOR_QID_SHIFT;
spin_lock_irqsave(&tgtport->lock, flags);
- needrandom = false;
- list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
- if (ran == tmpassoc->association_id) {
- needrandom = true;
- break;
- }
- }
- if (!needrandom) {
+ rcu_read_lock();
+ if (!nvmet_fc_assoc_exits(tgtport, ran)) {
assoc->association_id = ran;
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
+ done = true;
}
+ rcu_read_unlock();
spin_unlock_irqrestore(&tgtport->lock, flags);
- }
+ } while (!done);
return assoc;
-out_put:
- nvmet_fc_tgtport_put(tgtport);
out_ida:
ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
@@ -1208,7 +1212,6 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
- nvmet_fc_tgtport_put(tgtport);
kfree(assoc);
}
@@ -1552,8 +1555,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
- if (!assoc->hostport ||
- assoc->hostport->hosthandle != hosthandle)
+ if (assoc->hostport->hosthandle != hosthandle)
continue;
if (!nvmet_fc_tgt_a_get(assoc))
continue;
@@ -2962,4 +2964,5 @@ static void __exit nvmet_fc_exit_module(void)
module_init(nvmet_fc_init_module);
module_exit(nvmet_fc_exit_module);
+MODULE_DESCRIPTION("NVMe target FC transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index e6d422682..1471af250 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -995,11 +995,6 @@ fcloop_nport_free(struct kref *ref)
{
struct fcloop_nport *nport =
container_of(ref, struct fcloop_nport, ref);
- unsigned long flags;
-
- spin_lock_irqsave(&fcloop_lock, flags);
- list_del(&nport->nport_list);
- spin_unlock_irqrestore(&fcloop_lock, flags);
kfree(nport);
}
@@ -1357,6 +1352,8 @@ __unlink_remote_port(struct fcloop_nport *nport)
nport->tport->remoteport = NULL;
nport->rport = NULL;
+ list_del(&nport->nport_list);
+
return rport;
}
@@ -1653,4 +1650,5 @@ static void __exit fcloop_exit(void)
module_init(fcloop_init);
module_exit(fcloop_exit);
+MODULE_DESCRIPTION("NVMe target FC loop transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9cb434c58..e589915dd 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
}
nvme_quiesce_admin_queue(&ctrl->ctrl);
- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
nvme_disable_ctrl(&ctrl->ctrl, true);
nvme_cancel_admin_tagset(&ctrl->ctrl);
@@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ if (state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO)
/* state change failure for non-deleted ctrl? */
WARN_ON_ONCE(1);
return;
@@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void)
module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module);
+MODULE_DESCRIPTION("NVMe target loop transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 9fe07d7ef..f2d963e1f 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -602,7 +602,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
goto out_put_file;
}
- old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
+ old = xa_cmpxchg(&passthru_subsystems, ctrl->instance, NULL,
subsys, GFP_KERNEL);
if (xa_is_err(old)) {
ret = xa_err(old);
@@ -635,7 +635,7 @@ out_unlock:
static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
{
if (subsys->passthru_ctrl) {
- xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
+ xa_erase(&passthru_subsystems, subsys->passthru_ctrl->instance);
module_put(subsys->passthru_ctrl->ops->module);
nvme_put_ctrl(subsys->passthru_ctrl);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 4597bca43..3a0f2c170 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -37,6 +37,8 @@
#define NVMET_RDMA_MAX_MDTS 8
#define NVMET_RDMA_MAX_METADATA_MDTS 5
+#define NVMET_RDMA_BACKLOG 128
+
struct nvmet_rdma_srq;
struct nvmet_rdma_cmd {
@@ -1583,8 +1585,19 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
}
if (queue->host_qid == 0) {
- /* Let inflight controller teardown complete */
- flush_workqueue(nvmet_wq);
+ struct nvmet_rdma_queue *q;
+ int pending = 0;
+
+ /* Check for pending controller teardown */
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) {
+ if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl &&
+ q->state == NVMET_RDMA_Q_DISCONNECTING)
+ pending++;
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+ if (pending > NVMET_RDMA_BACKLOG)
+ return NVME_SC_CONNECT_CTRL_BUSY;
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1880,7 +1893,7 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
- ret = rdma_listen(cm_id, 128);
+ ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG);
if (ret) {
pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
@@ -2091,5 +2104,6 @@ static void __exit nvmet_rdma_exit(void)
module_init(nvmet_rdma_init);
module_exit(nvmet_rdma_exit);
+MODULE_DESCRIPTION("NVMe target RDMA transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index bb42ae42b..c8655fc5a 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -25,6 +25,7 @@
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
+#define NVMET_TCP_BACKLOG 128
static int param_store_val(const char *str, int *val, int min, int max)
{
@@ -985,8 +986,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
if (unlikely(data->ttag >= queue->nr_cmds)) {
pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
queue->idx, data->ttag, queue->nr_cmds);
- nvmet_tcp_fatal_error(queue);
- return -EPROTO;
+ goto err_proto;
}
cmd = &queue->cmds[data->ttag];
} else {
@@ -997,9 +997,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
data->ttag, le32_to_cpu(data->data_offset),
cmd->rbytes_done);
- /* FIXME: use path and transport errors */
- nvmet_tcp_fatal_error(queue);
- return -EPROTO;
+ goto err_proto;
}
exp_data_len = le32_to_cpu(data->hdr.plen) -
@@ -1012,9 +1010,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
cmd->pdu_len == 0 ||
cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
- /* FIXME: use proper transport errors */
- nvmet_tcp_fatal_error(queue);
- return -EPROTO;
+ goto err_proto;
}
cmd->pdu_recv = 0;
nvmet_tcp_build_pdu_iovec(cmd);
@@ -1022,6 +1018,11 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
queue->rcv_state = NVMET_TCP_RECV_DATA;
return 0;
+
+err_proto:
+ /* FIXME: use proper transport errors */
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
}
static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
@@ -2067,7 +2068,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
goto err_sock;
}
- ret = kernel_listen(port->sock, 128);
+ ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
if (ret) {
pr_err("failed to listen %d on port sock\n", ret);
goto err_sock;
@@ -2133,8 +2134,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
container_of(sq, struct nvmet_tcp_queue, nvme_sq);
if (sq->qid == 0) {
- /* Let inflight controller teardown complete */
- flush_workqueue(nvmet_wq);
+ struct nvmet_tcp_queue *q;
+ int pending = 0;
+
+ /* Check for pending controller teardown */
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
+ if (q->nvme_sq.ctrl == sq->ctrl &&
+ q->state == NVMET_TCP_Q_DISCONNECTING)
+ pending++;
+ }
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+ if (pending > NVMET_TCP_BACKLOG)
+ return NVME_SC_CONNECT_CTRL_BUSY;
}
queue->nr_cmds = sq->size * 2;
@@ -2210,5 +2222,6 @@ static void __exit nvmet_tcp_exit(void)
module_init(nvmet_tcp_init);
module_exit(nvmet_tcp_exit);
+MODULE_DESCRIPTION("NVMe target TCP transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index bff454d46..6ee1f3db8 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -211,7 +211,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
return ret;
}
-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -224,8 +224,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
* If we can know the extra data of the connect command in this stage,
* we can update this print statement later.
*/
- if (ctrl)
- trace_seq_printf(p, "%d", ctrl->cntlid);
+ if (ctrl_id)
+ trace_seq_printf(p, "%d", ctrl_id);
else
trace_seq_printf(p, "_");
trace_seq_putc(p, 0);
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index 974d99d47..7f7ebf955 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \
nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
-#define __print_ctrl_name(ctrl) \
- nvmet_trace_ctrl_name(p, ctrl)
+const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id);
+#define __print_ctrl_id(ctrl_id) \
+ nvmet_trace_ctrl_id(p, ctrl_id)
const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
#define __print_disk_name(name) \
nvmet_trace_disk_name(p, name)
#ifndef TRACE_HEADER_MULTI_READ
-static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
+static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req)
{
- return req->sq->ctrl;
+ /*
+ * The queue and controller pointers are not valid until an association
+ * has been established.
+ */
+ if (!req->sq || !req->sq->ctrl)
+ return 0;
+ return req->sq->ctrl->cntlid;
}
static inline void __assign_req_name(char *name, struct nvmet_req *req)
@@ -62,7 +68,7 @@ TRACE_EVENT(nvmet_req_init,
TP_ARGS(req, cmd),
TP_STRUCT__entry(
__field(struct nvme_command *, cmd)
- __field(struct nvmet_ctrl *, ctrl)
+ __field(u16, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(u16, cid)
@@ -75,7 +81,7 @@ TRACE_EVENT(nvmet_req_init,
),
TP_fast_assign(
__entry->cmd = cmd;
- __entry->ctrl = nvmet_req_to_ctrl(req);
+ __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
__assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id;
@@ -89,7 +95,7 @@ TRACE_EVENT(nvmet_req_init,
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
"meta=%#llx, cmd=(%s, %s)",
- __print_ctrl_name(__entry->ctrl),
+ __print_ctrl_id(__entry->ctrl_id),
__print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
@@ -103,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete,
TP_PROTO(struct nvmet_req *req),
TP_ARGS(req),
TP_STRUCT__entry(
- __field(struct nvmet_ctrl *, ctrl)
+ __field(u16, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(int, cid)
@@ -111,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete,
__field(u16, status)
),
TP_fast_assign(
- __entry->ctrl = nvmet_req_to_ctrl(req);
+ __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
__entry->qid = req->cq->qid;
__entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64);
@@ -119,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete,
__assign_req_name(__entry->disk, req);
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
- __print_ctrl_name(__entry->ctrl),
+ __print_ctrl_id(__entry->ctrl_id),
__print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->result, __entry->status)
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 5bc9c4874..283134498 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig NVMEM
bool "NVMEM Support"
+ imply NVMEM_LAYOUTS
help
Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 423baf089..cdd01fbf1 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -5,6 +5,8 @@
obj-$(CONFIG_NVMEM) += nvmem_core.o
nvmem_core-y := core.o
+obj-$(CONFIG_NVMEM_LAYOUTS) += nvmem_layouts.o
+nvmem_layouts-y := layouts.o
obj-y += layouts/
# Devices
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 608b352a7..eb357ac2e 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -19,29 +19,7 @@
#include <linux/of.h>
#include <linux/slab.h>
-struct nvmem_device {
- struct module *owner;
- struct device dev;
- int stride;
- int word_size;
- int id;
- struct kref refcnt;
- size_t size;
- bool read_only;
- bool root_only;
- int flags;
- enum nvmem_type type;
- struct bin_attribute eeprom;
- struct device *base_dev;
- struct list_head cells;
- const struct nvmem_keepout *keepout;
- unsigned int nkeepout;
- nvmem_reg_read_t reg_read;
- nvmem_reg_write_t reg_write;
- struct gpio_desc *wp_gpio;
- struct nvmem_layout *layout;
- void *priv;
-};
+#include "internals.h"
#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
@@ -77,9 +55,6 @@ static LIST_HEAD(nvmem_lookup_list);
static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
-static DEFINE_SPINLOCK(nvmem_layout_lock);
-static LIST_HEAD(nvmem_layouts);
-
static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
void *val, size_t bytes)
{
@@ -324,6 +299,43 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
return nvmem_bin_attr_get_umode(nvmem);
}
+static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
+ const char *id, int index);
+
+static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct nvmem_cell_entry *entry;
+ struct nvmem_cell *cell = NULL;
+ size_t cell_sz, read_len;
+ void *content;
+
+ entry = attr->private;
+ cell = nvmem_create_cell(entry, entry->name, 0);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ if (!cell)
+ return -EINVAL;
+
+ content = nvmem_cell_read(cell, &cell_sz);
+ if (IS_ERR(content)) {
+ read_len = PTR_ERR(content);
+ goto destroy_cell;
+ }
+
+ read_len = min_t(unsigned int, cell_sz - pos, count);
+ memcpy(buf, content + pos, read_len);
+ kfree(content);
+
+destroy_cell:
+ kfree_const(cell->id);
+ kfree(cell);
+
+ return read_len;
+}
+
/* default read/write permissions */
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
@@ -345,11 +357,21 @@ static const struct attribute_group nvmem_bin_group = {
.is_bin_visible = nvmem_bin_attr_is_visible,
};
+/* Cell attributes will be dynamically allocated */
+static struct attribute_group nvmem_cells_group = {
+ .name = "cells",
+};
+
static const struct attribute_group *nvmem_dev_groups[] = {
&nvmem_bin_group,
NULL,
};
+static const struct attribute_group *nvmem_cells_groups[] = {
+ &nvmem_cells_group,
+ NULL,
+};
+
static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
.name = "eeprom",
@@ -405,6 +427,69 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
}
+static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
+{
+ struct bin_attribute **cells_attrs, *attrs;
+ struct nvmem_cell_entry *entry;
+ unsigned int ncells = 0, i = 0;
+ int ret = 0;
+
+ mutex_lock(&nvmem_mutex);
+
+ if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) {
+ nvmem_cells_group.bin_attrs = NULL;
+ goto unlock_mutex;
+ }
+
+ /* Allocate an array of attributes with a sentinel */
+ ncells = list_count_nodes(&nvmem->cells);
+ cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
+ sizeof(struct bin_attribute *), GFP_KERNEL);
+ if (!cells_attrs) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL);
+ if (!attrs) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ /* Initialize each attribute to take the name and size of the cell */
+ list_for_each_entry(entry, &nvmem->cells, node) {
+ sysfs_bin_attr_init(&attrs[i]);
+ attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
+ "%s@%x,%x", entry->name,
+ entry->offset,
+ entry->bit_offset);
+ attrs[i].attr.mode = 0444;
+ attrs[i].size = entry->bytes;
+ attrs[i].read = &nvmem_cell_attr_read;
+ attrs[i].private = entry;
+ if (!attrs[i].attr.name) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ cells_attrs[i] = &attrs[i];
+ i++;
+ }
+
+ nvmem_cells_group.bin_attrs = cells_attrs;
+
+ ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
+ if (ret)
+ goto unlock_mutex;
+
+ nvmem->sysfs_cells_populated = true;
+
+unlock_mutex:
+ mutex_unlock(&nvmem_mutex);
+
+ return ret;
+}
+
#else /* CONFIG_NVMEM_SYSFS */
static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
@@ -697,7 +782,6 @@ static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
{
- struct nvmem_layout *layout = nvmem->layout;
struct device *dev = &nvmem->dev;
struct device_node *child;
const __be32 *addr;
@@ -727,8 +811,8 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod
info.np = of_node_get(child);
- if (layout && layout->fixup_cell_info)
- layout->fixup_cell_info(nvmem, layout, &info);
+ if (nvmem->fixup_dt_cell_info)
+ nvmem->fixup_dt_cell_info(nvmem, &info);
ret = nvmem_add_one_cell(nvmem, &info);
kfree(info.name);
@@ -763,117 +847,35 @@ static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
return err;
}
-int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
-{
- layout->owner = owner;
-
- spin_lock(&nvmem_layout_lock);
- list_add(&layout->node, &nvmem_layouts);
- spin_unlock(&nvmem_layout_lock);
-
- blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__nvmem_layout_register);
-
-void nvmem_layout_unregister(struct nvmem_layout *layout)
-{
- blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout);
-
- spin_lock(&nvmem_layout_lock);
- list_del(&layout->node);
- spin_unlock(&nvmem_layout_lock);
-}
-EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
-
-static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
+int nvmem_layout_register(struct nvmem_layout *layout)
{
- struct device_node *layout_np;
- struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
-
- layout_np = of_nvmem_layout_get_container(nvmem);
- if (!layout_np)
- return NULL;
-
- /* Fixed layouts don't have a matching driver */
- if (of_device_is_compatible(layout_np, "fixed-layout")) {
- of_node_put(layout_np);
- return NULL;
- }
-
- /*
- * In case the nvmem device was built-in while the layout was built as a
- * module, we shall manually request the layout driver loading otherwise
- * we'll never have any match.
- */
- of_request_module(layout_np);
-
- spin_lock(&nvmem_layout_lock);
-
- list_for_each_entry(l, &nvmem_layouts, node) {
- if (of_match_node(l->of_match_table, layout_np)) {
- if (try_module_get(l->owner))
- layout = l;
-
- break;
- }
- }
-
- spin_unlock(&nvmem_layout_lock);
- of_node_put(layout_np);
-
- return layout;
-}
+ int ret;
-static void nvmem_layout_put(struct nvmem_layout *layout)
-{
- if (layout)
- module_put(layout->owner);
-}
+ if (!layout->add_cells)
+ return -EINVAL;
-static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
-{
- struct nvmem_layout *layout = nvmem->layout;
- int ret;
+ /* Populate the cells */
+ ret = layout->add_cells(layout);
+ if (ret)
+ return ret;
- if (layout && layout->add_cells) {
- ret = layout->add_cells(&nvmem->dev, nvmem, layout);
- if (ret)
- return ret;
+#ifdef CONFIG_NVMEM_SYSFS
+ ret = nvmem_populate_sysfs_cells(layout->nvmem);
+ if (ret) {
+ nvmem_device_remove_all_cells(layout->nvmem);
+ return ret;
}
+#endif
return 0;
}
+EXPORT_SYMBOL_GPL(nvmem_layout_register);
-#if IS_ENABLED(CONFIG_OF)
-/**
- * of_nvmem_layout_get_container() - Get OF node to layout container.
- *
- * @nvmem: nvmem device.
- *
- * Return: a node pointer with refcount incremented or NULL if no
- * container exists. Use of_node_put() on it when done.
- */
-struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
-{
- return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
-}
-EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
-#endif
-
-const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
- struct nvmem_layout *layout)
+void nvmem_layout_unregister(struct nvmem_layout *layout)
{
- struct device_node __maybe_unused *layout_np;
- const struct of_device_id *match;
-
- layout_np = of_nvmem_layout_get_container(nvmem);
- match = of_match_node(layout->of_match_table, layout_np);
-
- return match ? match->data : NULL;
+ /* Keep the API even with an empty stub in case we need it later */
}
-EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
+EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
/**
* nvmem_register() - Register a nvmem device for given nvmem_config.
@@ -925,6 +927,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
+ nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info;
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
@@ -980,19 +983,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
goto err_put_device;
}
- /*
- * If the driver supplied a layout by config->layout, the module
- * pointer will be NULL and nvmem_layout_put() will be a noop.
- */
- nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
- if (IS_ERR(nvmem->layout)) {
- rval = PTR_ERR(nvmem->layout);
- nvmem->layout = NULL;
-
- if (rval == -EPROBE_DEFER)
- goto err_teardown_compat;
- }
-
if (config->cells) {
rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
if (rval)
@@ -1013,24 +1003,34 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
- rval = nvmem_add_cells_from_layout(nvmem);
- if (rval)
- goto err_remove_cells;
-
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
rval = device_add(&nvmem->dev);
if (rval)
goto err_remove_cells;
+ rval = nvmem_populate_layout(nvmem);
+ if (rval)
+ goto err_remove_dev;
+
+#ifdef CONFIG_NVMEM_SYSFS
+ rval = nvmem_populate_sysfs_cells(nvmem);
+ if (rval)
+ goto err_destroy_layout;
+#endif
+
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
+#ifdef CONFIG_NVMEM_SYSFS
+err_destroy_layout:
+ nvmem_destroy_layout(nvmem);
+#endif
+err_remove_dev:
+ device_del(&nvmem->dev);
err_remove_cells:
nvmem_device_remove_all_cells(nvmem);
- nvmem_layout_put(nvmem->layout);
-err_teardown_compat:
if (config->compat)
nvmem_sysfs_remove_compat(nvmem, config);
err_put_device:
@@ -1052,7 +1052,7 @@ static void nvmem_device_release(struct kref *kref)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
nvmem_device_remove_all_cells(nvmem);
- nvmem_layout_put(nvmem->layout);
+ nvmem_destroy_layout(nvmem);
device_unregister(&nvmem->dev);
}
@@ -1354,6 +1354,12 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
return cell;
}
+static void nvmem_layout_module_put(struct nvmem_device *nvmem)
+{
+ if (nvmem->layout && nvmem->layout->dev.driver)
+ module_put(nvmem->layout->dev.driver->owner);
+}
+
#if IS_ENABLED(CONFIG_OF)
static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
@@ -1372,6 +1378,18 @@ nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np
return cell;
}
+static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem)
+{
+ if (!nvmem->layout)
+ return 0;
+
+ if (!nvmem->layout->dev.driver ||
+ !try_module_get(nvmem->layout->dev.driver->owner))
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
@@ -1434,16 +1452,29 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
return ERR_CAST(nvmem);
}
+ ret = nvmem_layout_module_get_optional(nvmem);
+ if (ret) {
+ of_node_put(cell_np);
+ __nvmem_device_put(nvmem);
+ return ERR_PTR(ret);
+ }
+
cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
of_node_put(cell_np);
if (!cell_entry) {
__nvmem_device_put(nvmem);
- return ERR_PTR(-ENOENT);
+ nvmem_layout_module_put(nvmem);
+ if (nvmem->layout)
+ return ERR_PTR(-EPROBE_DEFER);
+ else
+ return ERR_PTR(-ENOENT);
}
cell = nvmem_create_cell(cell_entry, id, cell_index);
- if (IS_ERR(cell))
+ if (IS_ERR(cell)) {
__nvmem_device_put(nvmem);
+ nvmem_layout_module_put(nvmem);
+ }
return cell;
}
@@ -1557,6 +1588,7 @@ void nvmem_cell_put(struct nvmem_cell *cell)
kfree(cell);
__nvmem_device_put(nvmem);
+ nvmem_layout_module_put(nvmem);
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
@@ -2132,13 +2164,37 @@ const char *nvmem_dev_name(struct nvmem_device *nvmem)
}
EXPORT_SYMBOL_GPL(nvmem_dev_name);
+/**
+ * nvmem_dev_size() - Get the size of a given nvmem device.
+ *
+ * @nvmem: nvmem device.
+ *
+ * Return: size of the nvmem device.
+ */
+size_t nvmem_dev_size(struct nvmem_device *nvmem)
+{
+ return nvmem->size;
+}
+EXPORT_SYMBOL_GPL(nvmem_dev_size);
+
static int __init nvmem_init(void)
{
- return bus_register(&nvmem_bus_type);
+ int ret;
+
+ ret = bus_register(&nvmem_bus_type);
+ if (ret)
+ return ret;
+
+ ret = nvmem_layout_bus_register();
+ if (ret)
+ bus_unregister(&nvmem_bus_type);
+
+ return ret;
}
static void __exit nvmem_exit(void)
{
+ nvmem_layout_bus_unregister();
bus_unregister(&nvmem_bus_type);
}
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index f1e202efa..79dd4fda0 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -583,17 +583,12 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
-static void imx_ocotp_fixup_cell_info(struct nvmem_device *nvmem,
- struct nvmem_layout *layout,
- struct nvmem_cell_info *cell)
+static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell)
{
cell->read_post_process = imx_ocotp_cell_pp;
}
-static struct nvmem_layout imx_ocotp_layout = {
- .fixup_cell_info = imx_ocotp_fixup_cell_info,
-};
-
static int imx_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -619,7 +614,7 @@ static int imx_ocotp_probe(struct platform_device *pdev)
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
- imx_ocotp_nvmem_config.layout = &imx_ocotp_layout;
+ imx_ocotp_nvmem_config.fixup_dt_cell_info = &imx_ocotp_fixup_dt_cell_info;
priv->config = &imx_ocotp_nvmem_config;
diff --git a/drivers/nvmem/internals.h b/drivers/nvmem/internals.h
new file mode 100644
index 000000000..18fed5727
--- /dev/null
+++ b/drivers/nvmem/internals.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_NVMEM_INTERNALS_H
+#define _LINUX_NVMEM_INTERNALS_H
+
+#include <linux/device.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+
+struct nvmem_device {
+ struct module *owner;
+ struct device dev;
+ struct list_head node;
+ int stride;
+ int word_size;
+ int id;
+ struct kref refcnt;
+ size_t size;
+ bool read_only;
+ bool root_only;
+ int flags;
+ enum nvmem_type type;
+ struct bin_attribute eeprom;
+ struct device *base_dev;
+ struct list_head cells;
+ void (*fixup_dt_cell_info)(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell);
+ const struct nvmem_keepout *keepout;
+ unsigned int nkeepout;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ struct gpio_desc *wp_gpio;
+ struct nvmem_layout *layout;
+ void *priv;
+ bool sysfs_cells_populated;
+};
+
+#if IS_ENABLED(CONFIG_OF)
+int nvmem_layout_bus_register(void);
+void nvmem_layout_bus_unregister(void);
+int nvmem_populate_layout(struct nvmem_device *nvmem);
+void nvmem_destroy_layout(struct nvmem_device *nvmem);
+#else /* CONFIG_OF */
+static inline int nvmem_layout_bus_register(void)
+{
+ return 0;
+}
+
+static inline void nvmem_layout_bus_unregister(void) {}
+
+static inline int nvmem_populate_layout(struct nvmem_device *nvmem)
+{
+ return 0;
+}
+
+static inline void nvmem_destroy_layout(struct nvmem_device *nvmem) { }
+#endif /* CONFIG_OF */
+
+#endif /* ifndef _LINUX_NVMEM_INTERNALS_H */
diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
new file mode 100644
index 000000000..6a6aa5836
--- /dev/null
+++ b/drivers/nvmem/layouts.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMEM layout bus handling
+ *
+ * Copyright (C) 2023 Bootlin
+ * Author: Miquel Raynal <miquel.raynal@bootlin.com
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+
+#include "internals.h"
+
+#define to_nvmem_layout_driver(drv) \
+ (container_of((drv), struct nvmem_layout_driver, driver))
+#define to_nvmem_layout_device(_dev) \
+ container_of((_dev), struct nvmem_layout, dev)
+
+static int nvmem_layout_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+static int nvmem_layout_bus_probe(struct device *dev)
+{
+ struct nvmem_layout_driver *drv = to_nvmem_layout_driver(dev->driver);
+ struct nvmem_layout *layout = to_nvmem_layout_device(dev);
+
+ if (!drv->probe || !drv->remove)
+ return -EINVAL;
+
+ return drv->probe(layout);
+}
+
+static void nvmem_layout_bus_remove(struct device *dev)
+{
+ struct nvmem_layout_driver *drv = to_nvmem_layout_driver(dev->driver);
+ struct nvmem_layout *layout = to_nvmem_layout_device(dev);
+
+ return drv->remove(layout);
+}
+
+static struct bus_type nvmem_layout_bus_type = {
+ .name = "nvmem-layout",
+ .match = nvmem_layout_bus_match,
+ .probe = nvmem_layout_bus_probe,
+ .remove = nvmem_layout_bus_remove,
+};
+
+int nvmem_layout_driver_register(struct nvmem_layout_driver *drv)
+{
+ drv->driver.bus = &nvmem_layout_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvmem_layout_driver_register);
+
+void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvmem_layout_driver_unregister);
+
+static void nvmem_layout_release_device(struct device *dev)
+{
+ struct nvmem_layout *layout = to_nvmem_layout_device(dev);
+
+ of_node_put(layout->dev.of_node);
+ kfree(layout);
+}
+
+static int nvmem_layout_create_device(struct nvmem_device *nvmem,
+ struct device_node *np)
+{
+ struct nvmem_layout *layout;
+ struct device *dev;
+ int ret;
+
+ layout = kzalloc(sizeof(*layout), GFP_KERNEL);
+ if (!layout)
+ return -ENOMEM;
+
+ /* Create a bidirectional link */
+ layout->nvmem = nvmem;
+ nvmem->layout = layout;
+
+ /* Device model registration */
+ dev = &layout->dev;
+ device_initialize(dev);
+ dev->parent = &nvmem->dev;
+ dev->bus = &nvmem_layout_bus_type;
+ dev->release = nvmem_layout_release_device;
+ dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ dev->dma_mask = &dev->coherent_dma_mask;
+ device_set_node(dev, of_fwnode_handle(of_node_get(np)));
+ of_device_make_bus_id(dev);
+ of_msi_configure(dev, dev->of_node);
+
+ ret = device_add(dev);
+ if (ret) {
+ put_device(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_nvmem_layout_skip_table[] = {
+ { .compatible = "fixed-layout", },
+ {}
+};
+
+static int nvmem_layout_bus_populate(struct nvmem_device *nvmem,
+ struct device_node *layout_dn)
+{
+ int ret;
+
+ /* Make sure it has a compatible property */
+ if (!of_get_property(layout_dn, "compatible", NULL)) {
+ pr_debug("%s() - skipping %pOF, no compatible prop\n",
+ __func__, layout_dn);
+ return 0;
+ }
+
+ /* Fixed layouts are parsed manually somewhere else for now */
+ if (of_match_node(of_nvmem_layout_skip_table, layout_dn)) {
+ pr_debug("%s() - skipping %pOF node\n", __func__, layout_dn);
+ return 0;
+ }
+
+ if (of_node_check_flag(layout_dn, OF_POPULATED_BUS)) {
+ pr_debug("%s() - skipping %pOF, already populated\n",
+ __func__, layout_dn);
+
+ return 0;
+ }
+
+ /* NVMEM layout buses expect only a single device representing the layout */
+ ret = nvmem_layout_create_device(nvmem, layout_dn);
+ if (ret)
+ return ret;
+
+ of_node_set_flag(layout_dn, OF_POPULATED_BUS);
+
+ return 0;
+}
+
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
+{
+ return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout");
+}
+EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
+
+/*
+ * Returns the number of devices populated, 0 if the operation was not relevant
+ * for this nvmem device, an error code otherwise.
+ */
+int nvmem_populate_layout(struct nvmem_device *nvmem)
+{
+ struct device_node *layout_dn;
+ int ret;
+
+ layout_dn = of_nvmem_layout_get_container(nvmem);
+ if (!layout_dn)
+ return 0;
+
+ /* Populate the layout device */
+ device_links_supplier_sync_state_pause();
+ ret = nvmem_layout_bus_populate(nvmem, layout_dn);
+ device_links_supplier_sync_state_resume();
+
+ of_node_put(layout_dn);
+ return ret;
+}
+
+void nvmem_destroy_layout(struct nvmem_device *nvmem)
+{
+ struct device *dev;
+
+ if (!nvmem->layout)
+ return;
+
+ dev = &nvmem->layout->dev;
+ of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
+ device_unregister(dev);
+}
+
+int nvmem_layout_bus_register(void)
+{
+ return bus_register(&nvmem_layout_bus_type);
+}
+
+void nvmem_layout_bus_unregister(void)
+{
+ bus_unregister(&nvmem_layout_bus_type);
+}
diff --git a/drivers/nvmem/layouts/Kconfig b/drivers/nvmem/layouts/Kconfig
index 7ff1ee1c1..9c6e672fc 100644
--- a/drivers/nvmem/layouts/Kconfig
+++ b/drivers/nvmem/layouts/Kconfig
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+config NVMEM_LAYOUTS
+ bool
+ depends on OF
+
+if NVMEM_LAYOUTS
+
menu "Layout Types"
config NVMEM_LAYOUT_SL28_VPD
@@ -21,3 +27,5 @@ config NVMEM_LAYOUT_ONIE_TLV
If unsure, say N.
endmenu
+
+endif
diff --git a/drivers/nvmem/layouts/onie-tlv.c b/drivers/nvmem/layouts/onie-tlv.c
index 59fc87ccf..9d2ad5f2d 100644
--- a/drivers/nvmem/layouts/onie-tlv.c
+++ b/drivers/nvmem/layouts/onie-tlv.c
@@ -182,9 +182,10 @@ static bool onie_tlv_crc_is_valid(struct device *dev, size_t table_len, u8 *tabl
return true;
}
-static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem,
- struct nvmem_layout *layout)
+static int onie_tlv_parse_table(struct nvmem_layout *layout)
{
+ struct nvmem_device *nvmem = layout->nvmem;
+ struct device *dev = &layout->dev;
struct onie_tlv_hdr hdr;
size_t table_len, data_len, hdr_len;
u8 *table, *data;
@@ -226,16 +227,32 @@ static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem,
return 0;
}
+static int onie_tlv_probe(struct nvmem_layout *layout)
+{
+ layout->add_cells = onie_tlv_parse_table;
+
+ return nvmem_layout_register(layout);
+}
+
+static void onie_tlv_remove(struct nvmem_layout *layout)
+{
+ nvmem_layout_unregister(layout);
+}
+
static const struct of_device_id onie_tlv_of_match_table[] = {
{ .compatible = "onie,tlv-layout", },
{},
};
MODULE_DEVICE_TABLE(of, onie_tlv_of_match_table);
-static struct nvmem_layout onie_tlv_layout = {
- .name = "ONIE tlv layout",
- .of_match_table = onie_tlv_of_match_table,
- .add_cells = onie_tlv_parse_table,
+static struct nvmem_layout_driver onie_tlv_layout = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "onie-tlv-layout",
+ .of_match_table = onie_tlv_of_match_table,
+ },
+ .probe = onie_tlv_probe,
+ .remove = onie_tlv_remove,
};
module_nvmem_layout_driver(onie_tlv_layout);
diff --git a/drivers/nvmem/layouts/sl28vpd.c b/drivers/nvmem/layouts/sl28vpd.c
index 05671371f..53fa50f17 100644
--- a/drivers/nvmem/layouts/sl28vpd.c
+++ b/drivers/nvmem/layouts/sl28vpd.c
@@ -80,9 +80,10 @@ static int sl28vpd_v1_check_crc(struct device *dev, struct nvmem_device *nvmem)
return 0;
}
-static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem,
- struct nvmem_layout *layout)
+static int sl28vpd_add_cells(struct nvmem_layout *layout)
{
+ struct nvmem_device *nvmem = layout->nvmem;
+ struct device *dev = &layout->dev;
const struct nvmem_cell_info *pinfo;
struct nvmem_cell_info info = {0};
struct device_node *layout_np;
@@ -135,16 +136,32 @@ static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem,
return 0;
}
+static int sl28vpd_probe(struct nvmem_layout *layout)
+{
+ layout->add_cells = sl28vpd_add_cells;
+
+ return nvmem_layout_register(layout);
+}
+
+static void sl28vpd_remove(struct nvmem_layout *layout)
+{
+ nvmem_layout_unregister(layout);
+}
+
static const struct of_device_id sl28vpd_of_match_table[] = {
{ .compatible = "kontron,sl28-vpd" },
{},
};
MODULE_DEVICE_TABLE(of, sl28vpd_of_match_table);
-static struct nvmem_layout sl28vpd_layout = {
- .name = "sl28-vpd",
- .of_match_table = sl28vpd_of_match_table,
- .add_cells = sl28vpd_add_cells,
+static struct nvmem_layout_driver sl28vpd_layout = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "kontron-sl28vpd-layout",
+ .of_match_table = sl28vpd_of_match_table,
+ },
+ .probe = sl28vpd_probe,
+ .remove = sl28vpd_remove,
};
module_nvmem_layout_driver(sl28vpd_layout);
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 87c94686c..84f05b40a 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -45,9 +45,8 @@ static int mtk_efuse_gpu_speedbin_pp(void *context, const char *id, int index,
return 0;
}
-static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem,
- struct nvmem_layout *layout,
- struct nvmem_cell_info *cell)
+static void mtk_efuse_fixup_dt_cell_info(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell)
{
size_t sz = strlen(cell->name);
@@ -61,10 +60,6 @@ static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem,
cell->read_post_process = mtk_efuse_gpu_speedbin_pp;
}
-static struct nvmem_layout mtk_efuse_layout = {
- .fixup_cell_info = mtk_efuse_fixup_cell_info,
-};
-
static int mtk_efuse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -91,7 +86,7 @@ static int mtk_efuse_probe(struct platform_device *pdev)
econfig.priv = priv;
econfig.dev = dev;
if (pdata->uses_post_processing)
- econfig.layout = &mtk_efuse_layout;
+ econfig.fixup_dt_cell_info = &mtk_efuse_fixup_dt_cell_info;
nvmem = devm_nvmem_register(dev, &econfig);
return PTR_ERR_OR_ZERO(nvmem);
diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c
index 8a553b179..82879b1c9 100644
--- a/drivers/nvmem/stm32-romem.c
+++ b/drivers/nvmem/stm32-romem.c
@@ -269,6 +269,19 @@ static const struct stm32_romem_cfg stm32mp13_bsec_cfg = {
.ta = true,
};
+/*
+ * STM32MP25 BSEC OTP: 3 regions of 32-bits data words
+ * lower OTP (OTP0 to OTP127), bitwise (1-bit) programmable
+ * mid OTP (OTP128 to OTP255), bulk (32-bit) programmable
+ * upper OTP (OTP256 to OTP383), bulk (32-bit) programmable
+ * but no access to HWKEY and ECIES key: limited at OTP367
+ */
+static const struct stm32_romem_cfg stm32mp25_bsec_cfg = {
+ .size = 368 * 4,
+ .lower = 127,
+ .ta = true,
+};
+
static const struct of_device_id stm32_romem_of_match[] __maybe_unused = {
{ .compatible = "st,stm32f4-otp", }, {
.compatible = "st,stm32mp15-bsec",
@@ -276,6 +289,9 @@ static const struct of_device_id stm32_romem_of_match[] __maybe_unused = {
}, {
.compatible = "st,stm32mp13-bsec",
.data = (void *)&stm32mp13_bsec_cfg,
+ }, {
+ .compatible = "st,stm32mp25-bsec",
+ .data = (void *)&stm32mp25_bsec_cfg,
},
{ /* sentinel */ },
};
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
index c4ae94af4..befbab156 100644
--- a/drivers/nvmem/u-boot-env.c
+++ b/drivers/nvmem/u-boot-env.c
@@ -23,13 +23,10 @@ enum u_boot_env_format {
struct u_boot_env {
struct device *dev;
+ struct nvmem_device *nvmem;
enum u_boot_env_format format;
struct mtd_info *mtd;
-
- /* Cells */
- struct nvmem_cell_info *cells;
- int ncells;
};
struct u_boot_env_image_single {
@@ -94,70 +91,71 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i
static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
size_t data_offset, size_t data_len)
{
+ struct nvmem_device *nvmem = priv->nvmem;
struct device *dev = priv->dev;
char *data = buf + data_offset;
char *var, *value, *eq;
- int idx;
-
- priv->ncells = 0;
- for (var = data; var < data + data_len && *var; var += strlen(var) + 1)
- priv->ncells++;
-
- priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
- if (!priv->cells)
- return -ENOMEM;
- for (var = data, idx = 0;
+ for (var = data;
var < data + data_len && *var;
- var = value + strlen(value) + 1, idx++) {
+ var = value + strlen(value) + 1) {
+ struct nvmem_cell_info info = {};
+
eq = strchr(var, '=');
if (!eq)
break;
*eq = '\0';
value = eq + 1;
- priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
- if (!priv->cells[idx].name)
+ info.name = devm_kstrdup(dev, var, GFP_KERNEL);
+ if (!info.name)
return -ENOMEM;
- priv->cells[idx].offset = data_offset + value - data;
- priv->cells[idx].bytes = strlen(value);
- priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+ info.offset = data_offset + value - data;
+ info.bytes = strlen(value);
+ info.np = of_get_child_by_name(dev->of_node, info.name);
if (!strcmp(var, "ethaddr")) {
- priv->cells[idx].raw_len = strlen(value);
- priv->cells[idx].bytes = ETH_ALEN;
- priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr;
+ info.raw_len = strlen(value);
+ info.bytes = ETH_ALEN;
+ info.read_post_process = u_boot_env_read_post_process_ethaddr;
}
- }
- if (WARN_ON(idx != priv->ncells))
- priv->ncells = idx;
+ nvmem_add_one_cell(nvmem, &info);
+ }
return 0;
}
static int u_boot_env_parse(struct u_boot_env *priv)
{
+ struct nvmem_device *nvmem = priv->nvmem;
struct device *dev = priv->dev;
size_t crc32_data_offset;
size_t crc32_data_len;
size_t crc32_offset;
+ __le32 *crc32_addr;
size_t data_offset;
size_t data_len;
+ size_t dev_size;
uint32_t crc32;
uint32_t calc;
- size_t bytes;
uint8_t *buf;
+ int bytes;
int err;
- buf = kcalloc(1, priv->mtd->size, GFP_KERNEL);
+ dev_size = nvmem_dev_size(nvmem);
+
+ buf = kzalloc(dev_size, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf);
- if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) {
- dev_err(dev, "Failed to read from mtd: %d\n", err);
+ bytes = nvmem_device_read(nvmem, 0, dev_size, buf);
+ if (bytes < 0) {
+ err = bytes;
+ goto err_kfree;
+ } else if (bytes != dev_size) {
+ err = -EIO;
goto err_kfree;
}
@@ -178,9 +176,10 @@ static int u_boot_env_parse(struct u_boot_env *priv)
data_offset = offsetof(struct u_boot_env_image_broadcom, data);
break;
}
- crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset));
- crc32_data_len = priv->mtd->size - crc32_data_offset;
- data_len = priv->mtd->size - data_offset;
+ crc32_addr = (__le32 *)(buf + crc32_offset);
+ crc32 = le32_to_cpu(*crc32_addr);
+ crc32_data_len = dev_size - crc32_data_offset;
+ data_len = dev_size - data_offset;
calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
if (calc != crc32) {
@@ -189,10 +188,8 @@ static int u_boot_env_parse(struct u_boot_env *priv)
goto err_kfree;
}
- buf[priv->mtd->size - 1] = '\0';
+ buf[dev_size - 1] = '\0';
err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
- if (err)
- dev_err(dev, "Failed to add cells: %d\n", err);
err_kfree:
kfree(buf);
@@ -209,7 +206,6 @@ static int u_boot_env_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct u_boot_env *priv;
- int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -224,17 +220,15 @@ static int u_boot_env_probe(struct platform_device *pdev)
return PTR_ERR(priv->mtd);
}
- err = u_boot_env_parse(priv);
- if (err)
- return err;
-
config.dev = dev;
- config.cells = priv->cells;
- config.ncells = priv->ncells;
config.priv = priv;
config.size = priv->mtd->size;
- return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+ priv->nvmem = devm_nvmem_register(dev, &config);
+ if (IS_ERR(priv->nvmem))
+ return PTR_ERR(priv->nvmem);
+
+ return u_boot_env_parse(priv);
}
static const struct of_device_id u_boot_env_of_match_table[] = {
diff --git a/drivers/of/address.c b/drivers/of/address.c
index b59956310..ae46a3605 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -955,7 +955,6 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
r->cpu_start = range.cpu_addr;
r->dma_start = range.bus_addr;
r->size = range.size;
- r->offset = range.cpu_addr - range.bus_addr;
r++;
}
out:
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 1ca42ad9d..de89f9906 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -93,12 +93,12 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
int of_dma_configure_id(struct device *dev, struct device_node *np,
bool force_dma, const u32 *id)
{
- const struct iommu_ops *iommu;
const struct bus_dma_region *map = NULL;
struct device_node *bus_np;
u64 dma_start = 0;
u64 mask, end, size = 0;
bool coherent;
+ int iommu_ret;
int ret;
if (np == dev->of_node)
@@ -181,21 +181,29 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
- iommu = of_iommu_configure(dev, np, id);
- if (PTR_ERR(iommu) == -EPROBE_DEFER) {
+ iommu_ret = of_iommu_configure(dev, np, id);
+ if (iommu_ret == -EPROBE_DEFER) {
/* Don't touch range map if it wasn't set from a valid dma-ranges */
if (!ret)
dev->dma_range_map = NULL;
kfree(map);
return -EPROBE_DEFER;
- }
+ } else if (iommu_ret == -ENODEV) {
+ dev_dbg(dev, "device is not behind an iommu\n");
+ } else if (iommu_ret) {
+ dev_err(dev, "iommu configuration for device failed with %pe\n",
+ ERR_PTR(iommu_ret));
- dev_dbg(dev, "device is%sbehind an iommu\n",
- iommu ? " " : " not ");
+ /*
+ * Historically this routine doesn't fail driver probing
+ * due to errors in of_iommu_configure()
+ */
+ } else
+ dev_dbg(dev, "device is behind an iommu\n");
- arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
+ arch_setup_dma_ops(dev, dma_start, size, coherent);
- if (!iommu)
+ if (iommu_ret)
of_dma_set_restricted_buffer(dev, np);
return 0;
@@ -304,3 +312,44 @@ int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *
return 0;
}
EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
+
+/**
+ * of_device_make_bus_id - Use the device node data to assign a unique name
+ * @dev: pointer to device structure that is linked to a device tree node
+ *
+ * This routine will first try using the translated bus address to
+ * derive a unique name. If it cannot, then it will prepend names from
+ * parent nodes until a unique name can be derived.
+ */
+void of_device_make_bus_id(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ const __be32 *reg;
+ u64 addr;
+ u32 mask;
+
+ /* Construct the name, using parent nodes if necessary to ensure uniqueness */
+ while (node->parent) {
+ /*
+ * If the address can be translated, then that is as much
+ * uniqueness as we need. Make it the first component and return
+ */
+ reg = of_get_property(node, "reg", NULL);
+ if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
+ if (!of_property_read_u32(node, "mask", &mask))
+ dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
+ addr, ffs(mask) - 1, node, dev_name(dev));
+
+ else
+ dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
+ addr, node, dev_name(dev));
+ return;
+ }
+
+ /* format arguments only used if dev_name() resolves to NULL */
+ dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
+ kbasename(node->full_name), dev_name(dev));
+ node = node->parent;
+ }
+}
+EXPORT_SYMBOL_GPL(of_device_make_bus_id);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 3bf270528..4d57a4e34 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "OF: " fmt
+#include <linux/device.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -667,6 +668,17 @@ void of_changeset_destroy(struct of_changeset *ocs)
{
struct of_changeset_entry *ce, *cen;
+ /*
+ * When a device is deleted, the device links to/from it are also queued
+ * for deletion. Until these device links are freed, the devices
+ * themselves aren't freed. If the device being deleted is due to an
+ * overlay change, this device might be holding a reference to a device
+ * node that will be freed. So, wait until all already pending device
+ * links are deleted before freeing a device node. This ensures we don't
+ * free any device node that has a non-zero reference count.
+ */
+ device_link_wait_removal();
+
list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
__of_changeset_entry_destroy(ce);
}
diff --git a/drivers/of/module.c b/drivers/of/module.c
index 0e8aa974f..f58e62495 100644
--- a/drivers/of/module.c
+++ b/drivers/of/module.c
@@ -16,6 +16,14 @@ ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len)
ssize_t csize;
ssize_t tsize;
+ /*
+ * Prevent a kernel oops in vsnprintf() -- it only allows passing a
+ * NULL ptr when the length is also 0. Also filter out the negative
+ * lengths...
+ */
+ if ((len > 0 && !str) || len < 0)
+ return -EINVAL;
+
/* Name & Type */
/* %p eats all alphanum characters, so %c must be used here */
csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T',
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index a9a292d6d..2ae7e9d24 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -964,7 +964,7 @@ out:
return ret;
}
-/*
+/**
* of_overlay_fdt_apply() - Create and apply an overlay changeset
* @overlay_fdt: pointer to overlay FDT
* @overlay_fdt_size: number of bytes in @overlay_fdt
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 126d265aa..b7708a06d 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -20,6 +20,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/sysfb.h>
#include "of_private.h"
@@ -98,46 +99,6 @@ static const struct of_device_id of_skipped_node_table[] = {
*/
/**
- * of_device_make_bus_id - Use the device node data to assign a unique name
- * @dev: pointer to device structure that is linked to a device tree node
- *
- * This routine will first try using the translated bus address to
- * derive a unique name. If it cannot, then it will prepend names from
- * parent nodes until a unique name can be derived.
- */
-static void of_device_make_bus_id(struct device *dev)
-{
- struct device_node *node = dev->of_node;
- const __be32 *reg;
- u64 addr;
- u32 mask;
-
- /* Construct the name, using parent nodes if necessary to ensure uniqueness */
- while (node->parent) {
- /*
- * If the address can be translated, then that is as much
- * uniqueness as we need. Make it the first component and return
- */
- reg = of_get_property(node, "reg", NULL);
- if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
- if (!of_property_read_u32(node, "mask", &mask))
- dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
- addr, ffs(mask) - 1, node, dev_name(dev));
-
- else
- dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
- addr, node, dev_name(dev));
- return;
- }
-
- /* format arguments only used if dev_name() resolves to NULL */
- dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
- kbasename(node->full_name), dev_name(dev));
- node = node->parent;
- }
-}
-
-/**
* of_device_alloc - Allocate and initialize an of_device
* @np: device node to assign to device
* @bus_id: Name to assign to the device. May be null to use default name.
@@ -621,8 +582,21 @@ static int __init of_platform_default_populate_init(void)
}
node = of_get_compatible_child(of_chosen, "simple-framebuffer");
- of_platform_device_create(node, NULL, NULL);
- of_node_put(node);
+ if (node) {
+ /*
+ * Since a "simple-framebuffer" device is already added
+ * here, disable the Generic System Framebuffers (sysfb)
+ * to prevent it from registering another device for the
+ * system framebuffer later (e.g: using the screen_info
+ * data that may had been filled as well).
+ *
+ * This can happen for example on DT systems that do EFI
+ * booting and may provide a GOP handle to the EFI stub.
+ */
+ sysfb_disable();
+ of_platform_device_create(node, NULL, NULL);
+ of_node_put(node);
+ }
/* Populate everything else. */
of_platform_default_populate(NULL, NULL, NULL);
@@ -668,7 +642,7 @@ EXPORT_SYMBOL_GPL(of_platform_device_destroy);
* @parent: device which children will be removed
*
* Complementary to of_platform_populate(), this function removes children
- * of the given device (and, recurrently, their children) that have been
+ * of the given device (and, recursively, their children) that have been
* created from their respective device tree nodes (and only those,
* leaving others - eg. manually created - unharmed).
*/
@@ -737,7 +711,7 @@ static int devm_of_platform_match(struct device *dev, void *res, void *data)
* @dev: device that requested to depopulate from device tree data
*
* Complementary to devm_of_platform_populate(), this function removes children
- * of the given device (and, recurrently, their children) that have been
+ * of the given device (and, recursively, their children) that have been
* created from their respective device tree nodes (and only those,
* leaving others - eg. manually created - unharmed).
*/
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 15d115684..fa8cd33be 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -441,6 +441,7 @@ int of_property_read_string(const struct device_node *np, const char *propname,
const char **out_string)
{
const struct property *prop = of_find_property(np, propname, NULL);
+
if (!prop)
return -EINVAL;
if (!prop->length)
@@ -1189,9 +1190,9 @@ static struct device_node *parse_##fname(struct device_node *np, \
*
* @parse_prop: function name
* parse_prop() finds the node corresponding to a supplier phandle
- * @parse_prop.np: Pointer to device node holding supplier phandle property
- * @parse_prop.prop_name: Name of property holding a phandle value
- * @parse_prop.index: For properties holding a list of phandles, this is the
+ * parse_prop.np: Pointer to device node holding supplier phandle property
+ * parse_prop.prop_name: Name of property holding a phandle value
+ * parse_prop.index: For properties holding a list of phandles, this is the
* index into the list
* @get_con_dev: If the consumer node containing the property is never converted
* to a struct device, implement this ops so fw_devlink can use it
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index f1e54a3a1..c4e0432ae 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -201,7 +201,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed);
* @opp: opp for which level value has to be returned for
*
* Return: level read from device tree corresponding to the opp, else
- * return 0.
+ * return U32_MAX.
*/
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
{
@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
* @index: index of the required opp
*
* Return: performance state read from device tree corresponding to the
- * required opp, else return 0.
+ * required opp, else return U32_MAX.
*/
unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
unsigned int index)
@@ -808,6 +808,16 @@ struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
struct dev_pm_opp *opp;
opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL);
+ if (IS_ERR(opp))
+ return opp;
+
+ /* False match */
+ if (temp == OPP_LEVEL_UNSET) {
+ dev_err(dev, "%s: OPP levels aren't available\n", __func__);
+ dev_pm_opp_put(opp);
+ return ERR_PTR(-ENODEV);
+ }
+
*level = temp;
return opp;
}
@@ -953,7 +963,7 @@ _opp_config_clk_single(struct device *dev, struct opp_table *opp_table,
dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
ret);
} else {
- opp_table->rate_clk_single = freq;
+ opp_table->current_rate_single_clk = freq;
}
return ret;
@@ -1051,40 +1061,23 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
}
-static int _set_performance_state(struct device *dev, struct device *pd_dev,
- struct dev_pm_opp *opp, int i)
+/* This is only called for PM domain for now */
+static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, bool up)
{
- unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0;
- int ret;
+ struct device **devs = opp_table->required_devs;
+ struct dev_pm_opp *required_opp;
+ int index, target, delta, ret;
- if (!pd_dev)
+ if (!devs)
return 0;
- ret = dev_pm_domain_set_performance_state(pd_dev, pstate);
- if (ret) {
- dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
- dev_name(pd_dev), pstate, ret);
- }
-
- return ret;
-}
-
-static int _opp_set_required_opps_generic(struct device *dev,
- struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down)
-{
- dev_err(dev, "setting required-opps isn't supported for non-genpd devices\n");
- return -ENOENT;
-}
-
-static int _opp_set_required_opps_genpd(struct device *dev,
- struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down)
-{
- struct device **genpd_virt_devs =
- opp_table->genpd_virt_devs ? opp_table->genpd_virt_devs : &dev;
- int index, target, delta, ret;
+ /* required-opps not fully initialized yet */
+ if (lazy_linking_pending(opp_table))
+ return -EBUSY;
/* Scaling up? Set required OPPs in normal order, else reverse */
- if (!scaling_down) {
+ if (up) {
index = 0;
target = opp_table->required_opp_count;
delta = 1;
@@ -1095,9 +1088,13 @@ static int _opp_set_required_opps_genpd(struct device *dev,
}
while (index != target) {
- ret = _set_performance_state(dev, genpd_virt_devs[index], opp, index);
- if (ret)
- return ret;
+ if (devs[index]) {
+ required_opp = opp ? opp->required_opps[index] : NULL;
+
+ ret = dev_pm_opp_set_opp(devs[index], required_opp);
+ if (ret)
+ return ret;
+ }
index += delta;
}
@@ -1105,34 +1102,6 @@ static int _opp_set_required_opps_genpd(struct device *dev,
return 0;
}
-/* This is only called for PM domain for now */
-static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
- struct dev_pm_opp *opp, bool up)
-{
- /* required-opps not fully initialized yet */
- if (lazy_linking_pending(opp_table))
- return -EBUSY;
-
- if (opp_table->set_required_opps)
- return opp_table->set_required_opps(dev, opp_table, opp, up);
-
- return 0;
-}
-
-/* Update set_required_opps handler */
-void _update_set_required_opps(struct opp_table *opp_table)
-{
- /* Already set */
- if (opp_table->set_required_opps)
- return;
-
- /* All required OPPs will belong to genpd or none */
- if (opp_table->required_opp_tables[0]->is_genpd)
- opp_table->set_required_opps = _opp_set_required_opps_genpd;
- else
- opp_table->set_required_opps = _opp_set_required_opps_generic;
-}
-
static int _set_opp_level(struct device *dev, struct opp_table *opp_table,
struct dev_pm_opp *opp)
{
@@ -1140,7 +1109,7 @@ static int _set_opp_level(struct device *dev, struct opp_table *opp_table,
int ret = 0;
if (opp) {
- if (!opp->level)
+ if (opp->level == OPP_LEVEL_UNSET)
return 0;
level = opp->level;
@@ -1383,7 +1352,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
* value of the frequency. In such a case, do not abort but
* configure the hardware to the desired frequency forcefully.
*/
- forced = opp_table->rate_clk_single != freq;
+ forced = opp_table->current_rate_single_clk != freq;
}
ret = _set_opp(dev, opp_table, opp, &freq, forced);
@@ -1872,6 +1841,8 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table)
INIT_LIST_HEAD(&opp->node);
+ opp->level = OPP_LEVEL_UNSET;
+
return opp;
}
@@ -2393,19 +2364,13 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
{
int index;
- if (!opp_table->genpd_virt_devs)
- return;
-
for (index = 0; index < opp_table->required_opp_count; index++) {
- if (!opp_table->genpd_virt_devs[index])
+ if (!opp_table->required_devs[index])
continue;
- dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false);
- opp_table->genpd_virt_devs[index] = NULL;
+ dev_pm_domain_detach(opp_table->required_devs[index], false);
+ opp_table->required_devs[index] = NULL;
}
-
- kfree(opp_table->genpd_virt_devs);
- opp_table->genpd_virt_devs = NULL;
}
/*
@@ -2432,14 +2397,20 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
int index = 0, ret = -EINVAL;
const char * const *name = names;
- if (opp_table->genpd_virt_devs)
- return 0;
+ if (!opp_table->required_devs) {
+ dev_err(dev, "Required OPPs not available, can't attach genpd\n");
+ return -EINVAL;
+ }
- opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count,
- sizeof(*opp_table->genpd_virt_devs),
- GFP_KERNEL);
- if (!opp_table->genpd_virt_devs)
- return -ENOMEM;
+ /* Genpd core takes care of propagation to parent genpd */
+ if (opp_table->is_genpd) {
+ dev_err(dev, "%s: Operation not supported for genpds\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* Checking only the first one is enough ? */
+ if (opp_table->required_devs[0])
+ return 0;
while (*name) {
if (index >= opp_table->required_opp_count) {
@@ -2455,13 +2426,25 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
goto err;
}
- opp_table->genpd_virt_devs[index] = virt_dev;
+ /*
+ * Add the virtual genpd device as a user of the OPP table, so
+ * we can call dev_pm_opp_set_opp() on it directly.
+ *
+ * This will be automatically removed when the OPP table is
+ * removed, don't need to handle that here.
+ */
+ if (!_add_opp_dev(virt_dev, opp_table->required_opp_tables[index])) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ opp_table->required_devs[index] = virt_dev;
index++;
name++;
}
if (virt_devs)
- *virt_devs = opp_table->genpd_virt_devs;
+ *virt_devs = opp_table->required_devs;
return 0;
@@ -2471,10 +2454,50 @@ err:
}
+static int _opp_set_required_devs(struct opp_table *opp_table,
+ struct device *dev,
+ struct device **required_devs)
+{
+ int i;
+
+ if (!opp_table->required_devs) {
+ dev_err(dev, "Required OPPs not available, can't set required devs\n");
+ return -EINVAL;
+ }
+
+ /* Another device that shares the OPP table has set the required devs ? */
+ if (opp_table->required_devs[0])
+ return 0;
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ /* Genpd core takes care of propagation to parent genpd */
+ if (required_devs[i] && opp_table->is_genpd &&
+ opp_table->required_opp_tables[i]->is_genpd) {
+ dev_err(dev, "%s: Operation not supported for genpds\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ opp_table->required_devs[i] = required_devs[i];
+ }
+
+ return 0;
+}
+
+static void _opp_put_required_devs(struct opp_table *opp_table)
+{
+ int i;
+
+ for (i = 0; i < opp_table->required_opp_count; i++)
+ opp_table->required_devs[i] = NULL;
+}
+
static void _opp_clear_config(struct opp_config_data *data)
{
- if (data->flags & OPP_CONFIG_GENPD)
+ if (data->flags & OPP_CONFIG_REQUIRED_DEVS)
+ _opp_put_required_devs(data->opp_table);
+ else if (data->flags & OPP_CONFIG_GENPD)
_opp_detach_genpd(data->opp_table);
+
if (data->flags & OPP_CONFIG_REGULATOR)
_opp_put_regulators(data->opp_table);
if (data->flags & OPP_CONFIG_SUPPORTED_HW)
@@ -2588,12 +2611,22 @@ int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
/* Attach genpds */
if (config->genpd_names) {
+ if (config->required_devs)
+ goto err;
+
ret = _opp_attach_genpd(opp_table, dev, config->genpd_names,
config->virt_devs);
if (ret)
goto err;
data->flags |= OPP_CONFIG_GENPD;
+ } else if (config->required_devs) {
+ ret = _opp_set_required_devs(opp_table, dev,
+ config->required_devs);
+ if (ret)
+ goto err;
+
+ data->flags |= OPP_CONFIG_REQUIRED_DEVS;
}
ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX),
@@ -2980,6 +3013,47 @@ put_table:
EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
/**
+ * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
+ * @dev: device for which we do this operation
+ *
+ * Sync voltage state of the OPP table regulators.
+ *
+ * Return: 0 on success or a negative error value.
+ */
+int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int i, ret = 0;
+
+ /* Device may not have OPP table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return 0;
+
+ /* Regulator may not be required for the device */
+ if (unlikely(!opp_table->regulators))
+ goto put_table;
+
+ /* Nothing to sync if voltage wasn't changed */
+ if (!opp_table->enabled)
+ goto put_table;
+
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+ ret = regulator_sync_voltage(reg);
+ if (ret)
+ break;
+ }
+put_table:
+ /* Drop reference taken by _find_opp_table() */
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
+
+/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
@@ -3102,44 +3176,3 @@ void dev_pm_opp_remove_table(struct device *dev)
dev_pm_opp_put_opp_table(opp_table);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
-
-/**
- * dev_pm_opp_sync_regulators() - Sync state of voltage regulators
- * @dev: device for which we do this operation
- *
- * Sync voltage state of the OPP table regulators.
- *
- * Return: 0 on success or a negative error value.
- */
-int dev_pm_opp_sync_regulators(struct device *dev)
-{
- struct opp_table *opp_table;
- struct regulator *reg;
- int i, ret = 0;
-
- /* Device may not have OPP table */
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return 0;
-
- /* Regulator may not be required for the device */
- if (unlikely(!opp_table->regulators))
- goto put_table;
-
- /* Nothing to sync if voltage wasn't changed */
- if (!opp_table->enabled)
- goto put_table;
-
- for (i = 0; i < opp_table->regulator_count; i++) {
- reg = opp_table->regulators[i];
- ret = regulator_sync_voltage(reg);
- if (ret)
- break;
- }
-put_table:
- /* Drop reference taken by _find_opp_table() */
- dev_pm_opp_put_opp_table(opp_table);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 81fa27599..f9f0b22bc 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -165,7 +165,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
struct opp_table **required_opp_tables;
struct device_node *required_np, *np;
bool lazy = false;
- int count, i;
+ int count, i, size;
/* Traversing the first OPP node is all we need */
np = of_get_next_available_child(opp_np, NULL);
@@ -179,12 +179,13 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
if (count <= 0)
goto put_np;
- required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
- GFP_KERNEL);
+ size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs);
+ required_opp_tables = kcalloc(count, size, GFP_KERNEL);
if (!required_opp_tables)
goto put_np;
opp_table->required_opp_tables = required_opp_tables;
+ opp_table->required_devs = (void *)(required_opp_tables + count);
opp_table->required_opp_count = count;
for (i = 0; i < count; i++) {
@@ -208,8 +209,6 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
mutex_lock(&opp_table_lock);
list_add(&opp_table->lazy, &lazy_opp_tables);
mutex_unlock(&opp_table_lock);
- } else {
- _update_set_required_opps(opp_table);
}
goto put_np;
@@ -296,7 +295,7 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
of_node_put(opp->np);
}
-static int _link_required_opps(struct dev_pm_opp *opp,
+static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *opp_table,
struct opp_table *required_table, int index)
{
struct device_node *np;
@@ -314,6 +313,39 @@ static int _link_required_opps(struct dev_pm_opp *opp,
return -ENODEV;
}
+ /*
+ * There are two genpd (as required-opp) cases that we need to handle,
+ * devices with a single genpd and ones with multiple genpds.
+ *
+ * The single genpd case requires special handling as we need to use the
+ * same `dev` structure (instead of a virtual one provided by genpd
+ * core) for setting the performance state.
+ *
+ * It doesn't make sense for a device's DT entry to have both
+ * "opp-level" and single "required-opps" entry pointing to a genpd's
+ * OPP, as that would make the OPP core call
+ * dev_pm_domain_set_performance_state() for two different values for
+ * the same device structure. Lets treat single genpd configuration as a
+ * case where the OPP's level is directly available without required-opp
+ * link in the DT.
+ *
+ * Just update the `level` with the right value, which
+ * dev_pm_opp_set_opp() will take care of in the normal path itself.
+ *
+ * There is another case though, where a genpd's OPP table has
+ * required-opps set to a parent genpd. The OPP core expects the user to
+ * set the respective required `struct device` pointer via
+ * dev_pm_opp_set_config().
+ */
+ if (required_table->is_genpd && opp_table->required_opp_count == 1 &&
+ !opp_table->required_devs[0]) {
+ /* Genpd core takes care of propagation to parent genpd */
+ if (!opp_table->is_genpd) {
+ if (!WARN_ON(opp->level != OPP_LEVEL_UNSET))
+ opp->level = opp->required_opps[0]->level;
+ }
+ }
+
return 0;
}
@@ -338,7 +370,7 @@ static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
if (IS_ERR_OR_NULL(required_table))
continue;
- ret = _link_required_opps(opp, required_table, i);
+ ret = _link_required_opps(opp, opp_table, required_table, i);
if (ret)
goto free_required_opps;
}
@@ -359,7 +391,7 @@ static int lazy_link_required_opps(struct opp_table *opp_table,
int ret;
list_for_each_entry(opp, &opp_table->opp_list, node) {
- ret = _link_required_opps(opp, new_table, index);
+ ret = _link_required_opps(opp, opp_table, new_table, index);
if (ret)
return ret;
}
@@ -422,7 +454,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
/* All required opp-tables found, remove from lazy list */
if (!lazy) {
- _update_set_required_opps(opp_table);
list_del_init(&opp_table->lazy);
list_for_each_entry(opp, &opp_table->opp_list, node)
@@ -1393,8 +1424,14 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
opp = _find_opp_of_np(opp_table, required_np);
if (opp) {
- pstate = opp->level;
+ if (opp->level == OPP_LEVEL_UNSET) {
+ pr_err("%s: OPP levels aren't available for %pOF\n",
+ __func__, np);
+ } else {
+ pstate = opp->level;
+ }
dev_pm_opp_put(opp);
+
}
dev_pm_opp_put_opp_table(opp_table);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 08366f90f..cff1fabd1 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -35,6 +35,7 @@ extern struct list_head opp_tables;
#define OPP_CONFIG_PROP_NAME BIT(3)
#define OPP_CONFIG_SUPPORTED_HW BIT(4)
#define OPP_CONFIG_GENPD BIT(5)
+#define OPP_CONFIG_REQUIRED_DEVS BIT(6)
/**
* struct opp_config_data - data for set config operations
@@ -49,6 +50,18 @@ struct opp_config_data {
unsigned int flags;
};
+/**
+ * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
+ * @avg: Average bandwidth corresponding to this OPP (in icc units)
+ * @peak: Peak bandwidth corresponding to this OPP (in icc units)
+ *
+ * This structure stores the bandwidth values for a single interconnect path.
+ */
+struct dev_pm_opp_icc_bw {
+ u32 avg;
+ u32 peak;
+};
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
@@ -157,12 +170,12 @@ enum opp_table_access {
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @parsed_static_opps: Count of devices for which OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
- * @rate_clk_single: Currently configured frequency for single clk.
+ * @current_rate_single_clk: Currently configured frequency for single clk.
* @current_opp: Currently configured OPP for the table.
* @suspend_opp: Pointer to OPP to be used during device suspend.
- * @genpd_virt_devs: List of virtual devices for multiple genpd support.
* @required_opp_tables: List of device OPP tables that are required by OPPs in
* this table.
+ * @required_devs: List of devices for required OPP tables.
* @required_opp_count: Number of required devices.
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
@@ -180,7 +193,6 @@ enum opp_table_access {
* @path_count: Number of interconnect paths
* @enabled: Set to true if the device's resources are enabled/configured.
* @is_genpd: Marks if the OPP table belongs to a genpd.
- * @set_required_opps: Helper responsible to set required OPPs.
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -207,12 +219,12 @@ struct opp_table {
unsigned int parsed_static_opps;
enum opp_table_access shared_opp;
- unsigned long rate_clk_single;
+ unsigned long current_rate_single_clk;
struct dev_pm_opp *current_opp;
struct dev_pm_opp *suspend_opp;
- struct device **genpd_virt_devs;
struct opp_table **required_opp_tables;
+ struct device **required_devs;
unsigned int required_opp_count;
unsigned int *supported_hw;
@@ -229,8 +241,6 @@ struct opp_table {
unsigned int path_count;
bool enabled;
bool is_genpd;
- int (*set_required_opps)(struct device *dev,
- struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down);
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 8f3f13fbb..e3b97cd1f 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -373,23 +374,15 @@ static int ti_opp_supply_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *cpu_dev = get_cpu_device(0);
- const struct of_device_id *match;
const struct ti_opp_supply_of_data *of_data;
int ret = 0;
- match = of_match_device(ti_opp_supply_of_match, dev);
- if (!match) {
- /* We do not expect this to happen */
- dev_err(dev, "%s: Unable to match device\n", __func__);
- return -ENODEV;
- }
- if (!match->data) {
+ of_data = device_get_match_data(dev);
+ if (!of_data) {
/* Again, unlikely.. but mistakes do happen */
dev_err(dev, "%s: Bad data in match\n", __func__);
return -EINVAL;
}
- of_data = match->data;
-
dev_set_drvdata(dev, (void *)of_data);
/* If we need optimized voltage */
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index e21831d93..49c74ded8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -611,7 +611,7 @@ static void free_pardevice(struct device *dev)
{
struct pardevice *par_dev = to_pardevice(dev);
- kfree(par_dev->name);
+ kfree_const(par_dev->name);
kfree(par_dev);
}
@@ -682,8 +682,8 @@ parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int id)
{
struct pardevice *par_dev;
+ const char *devname;
int ret;
- char *devname;
if (port->physport->flags & PARPORT_FLAG_EXCL) {
/* An exclusive device is registered. */
@@ -726,7 +726,7 @@ parport_register_dev_model(struct parport *port, const char *name,
if (!par_dev->state)
goto err_put_par_dev;
- devname = kstrdup(name, GFP_KERNEL);
+ devname = kstrdup_const(name, GFP_KERNEL);
if (!devname)
goto err_free_par_dev;
@@ -804,7 +804,7 @@ parport_register_dev_model(struct parport *port, const char *name,
return par_dev;
err_free_devname:
- kfree(devname);
+ kfree_const(devname);
err_free_par_dev:
kfree(par_dev->state);
err_put_par_dev:
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 6554a2e89..6449056b5 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where,
return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_dword);
+
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set)
+{
+ u32 val;
+
+ pci_read_config_dword(dev, pos, &val);
+ val &= ~clear;
+ val |= set;
+ pci_write_config_dword(dev, pos, val);
+}
+EXPORT_SYMBOL(pci_clear_and_set_config_dword);
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 291d12711..1d5a70c90 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -47,6 +47,7 @@ config PCI_J721E
config PCI_J721E_HOST
bool "TI J721E PCIe controller (host mode)"
+ depends on ARCH_K3 || COMPILE_TEST
depends on OF
select PCIE_CADENCE_HOST
select PCI_J721E
@@ -57,6 +58,7 @@ config PCI_J721E_HOST
config PCI_J721E_EP
bool "TI J721E PCIe controller (endpoint mode)"
+ depends on ARCH_K3 || COMPILE_TEST
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE_EP
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 2c87e7728..857182460 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -42,18 +42,16 @@ enum link_status {
};
#define J721E_MODE_RC BIT(7)
-#define LANE_COUNT_MASK BIT(8)
#define LANE_COUNT(n) ((n) << 8)
#define GENERATION_SEL_MASK GENMASK(1, 0)
-#define MAX_LANES 2
-
struct j721e_pcie {
struct cdns_pcie *cdns_pcie;
struct clk *refclk;
u32 mode;
u32 num_lanes;
+ u32 max_lanes;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -71,6 +69,7 @@ struct j721e_pcie_data {
unsigned int quirk_disable_flr:1;
u32 linkdown_irq_regfield;
unsigned int byte_access_allowed:1;
+ unsigned int max_lanes;
};
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -206,11 +205,15 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
{
struct device *dev = pcie->cdns_pcie->dev;
u32 lanes = pcie->num_lanes;
+ u32 mask = BIT(8);
u32 val = 0;
int ret;
+ if (pcie->max_lanes == 4)
+ mask = GENMASK(9, 8);
+
val = LANE_COUNT(lanes - 1);
- ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret)
dev_err(dev, "failed to set link count\n");
@@ -290,11 +293,13 @@ static const struct j721e_pcie_data j721e_pcie_rc_data = {
.quirk_retrain_flag = true,
.byte_access_allowed = false,
.linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j721e_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j7200_pcie_rc_data = {
@@ -302,23 +307,41 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
.quirk_detect_quiet_flag = true,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.byte_access_allowed = true,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
.quirk_disable_flr = true,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data am64_pcie_rc_data = {
.mode = PCI_MODE_RC,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.byte_access_allowed = true,
+ .max_lanes = 1,
};
static const struct j721e_pcie_data am64_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 1,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_retrain_flag = true,
+ .byte_access_allowed = false,
+ .linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 4,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 4,
};
static const struct of_device_id of_j721e_pcie_match[] = {
@@ -346,6 +369,14 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,am64-pcie-ep",
.data = &am64_pcie_ep_data,
},
+ {
+ .compatible = "ti,j784s4-pcie-host",
+ .data = &j784s4_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j784s4-pcie-ep",
+ .data = &j784s4_pcie_ep_data,
+ },
{},
};
@@ -432,9 +463,13 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->user_cfg_base = base;
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
- if (ret || num_lanes > MAX_LANES)
+ if (ret || num_lanes > data->max_lanes) {
+ dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n");
num_lanes = 1;
+ }
+
pcie->num_lanes = num_lanes;
+ pcie->max_lanes = data->max_lanes;
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
return -EINVAL;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 3142feb8a..2d0a8d78b 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -360,8 +360,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
- u8 intx)
+static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -371,7 +371,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
cdns_pcie_ep_assert_intx(ep, fn, intx, true);
/*
- * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
+ * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq()
*/
mdelay(1);
cdns_pcie_ep_assert_intx(ep, fn, intx, false);
@@ -532,25 +532,24 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
}
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
+ case PCI_IRQ_INTX:
if (vfn > 0) {
- dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ dev_err(dev, "Cannot raise INTX interrupts for VF\n");
return -EINVAL;
}
- return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
+ return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 373cb50fc..03b96798f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -347,16 +347,16 @@ struct cdns_pcie_epf {
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
+ * the sending of a memory write (MSI) / normal message (INTX
* IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ * the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
* @lock: spin lock to disable interrupts while modifying PCIe controller
* registers fields (RMW) accessible by both remote RC and EP to
* minimize time between read and write
@@ -374,7 +374,7 @@ struct cdns_pcie_ep {
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
- /* protect writing to PCI_STATUS while raising legacy interrupts */
+ /* protect writing to PCI_STATUS while raising INTX interrupts */
spinlock_t lock;
struct cdns_pcie_epf *epf;
unsigned int quirk_detect_quiet_flag:1;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 5ac021dbd..8afacc90c 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -336,7 +336,7 @@ config PCI_EXYNOS
config PCIE_FU740
bool "SiFive FU740 PCIe controller"
depends on PCI_MSI
- depends on SOC_SIFIVE || COMPILE_TEST
+ depends on ARCH_SIFIVE || COMPILE_TEST
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support for the SiFive
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index b445ffe95..0e4066770 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -371,7 +371,7 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
- .host_init = dra7xx_pcie_host_init,
+ .init = dra7xx_pcie_host_init,
};
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -386,7 +386,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
}
-static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_raise_intx_irq(struct dra7xx_pcie *dra7xx)
{
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
mdelay(1);
@@ -404,16 +404,16 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
}
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- dra7xx_pcie_raise_legacy_irq(dra7xx);
+ case PCI_IRQ_INTX:
+ dra7xx_pcie_raise_intx_irq(dra7xx);
break;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
break;
default:
@@ -436,7 +436,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = dra7xx_pcie_ep_init,
+ .init = dra7xx_pcie_ep_init,
.raise_irq = dra7xx_pcie_raise_irq,
.get_features = dra7xx_pcie_get_features,
};
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index c6bede346..a33fa98a2 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -268,7 +268,7 @@ static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
- .host_init = exynos_pcie_host_init,
+ .init = exynos_pcie_host_init,
};
static int exynos_add_pcie_port(struct exynos_pcie *ep,
@@ -375,7 +375,7 @@ fail_probe:
return ret;
}
-static int exynos_pcie_remove(struct platform_device *pdev)
+static void exynos_pcie_remove(struct platform_device *pdev)
{
struct exynos_pcie *ep = platform_get_drvdata(pdev);
@@ -385,8 +385,6 @@ static int exynos_pcie_remove(struct platform_device *pdev)
phy_exit(ep->phy);
exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
-
- return 0;
}
static int exynos_pcie_suspend_noirq(struct device *dev)
@@ -431,7 +429,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
static struct platform_driver exynos_pcie_driver = {
.probe = exynos_pcie_probe,
- .remove = exynos_pcie_remove,
+ .remove_new = exynos_pcie_remove,
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 74703362a..dc2c036ab 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1039,8 +1039,8 @@ static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .host_init = imx6_pcie_host_init,
- .host_deinit = imx6_pcie_host_exit,
+ .init = imx6_pcie_host_init,
+ .deinit = imx6_pcie_host_exit,
};
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -1058,17 +1058,16 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -1093,7 +1092,7 @@ imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = imx6_pcie_ep_init,
+ .init = imx6_pcie_ep_init,
.raise_irq = imx6_pcie_ep_raise_irq,
.get_features = imx6_pcie_ep_get_features,
};
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index cf3836561..c0c62533a 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -115,8 +115,7 @@ struct keystone_pcie {
struct dw_pcie *pci;
/* PCI Device ID */
u32 device_id;
- int legacy_host_irqs[PCI_NUM_INTX];
- struct device_node *legacy_intc_np;
+ int intx_host_irqs[PCI_NUM_INTX];
int msi_host_irq;
int num_lanes;
@@ -124,7 +123,7 @@ struct keystone_pcie {
struct phy **phy;
struct device_link **link;
struct device_node *msi_intc_np;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct device_node *np;
/* Application register space */
@@ -252,8 +251,8 @@ static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
return dw_pcie_allocate_domains(pp);
}
-static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
- int offset)
+static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie,
+ int offset)
{
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
@@ -263,7 +262,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
if (BIT(0) & pending) {
dev_dbg(dev, ": irq: irq_offset %d", offset);
- generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
+ generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset);
}
/* EOI the INTx interrupt */
@@ -307,38 +306,37 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
return IRQ_HANDLED;
}
-static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+static void ks_pcie_ack_intx_irq(struct irq_data *d)
{
}
-static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+static void ks_pcie_mask_intx_irq(struct irq_data *d)
{
}
-static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+static void ks_pcie_unmask_intx_irq(struct irq_data *d)
{
}
-static struct irq_chip ks_pcie_legacy_irq_chip = {
- .name = "Keystone-PCI-Legacy-IRQ",
- .irq_ack = ks_pcie_ack_legacy_irq,
- .irq_mask = ks_pcie_mask_legacy_irq,
- .irq_unmask = ks_pcie_unmask_legacy_irq,
+static struct irq_chip ks_pcie_intx_irq_chip = {
+ .name = "Keystone-PCI-INTX-IRQ",
+ .irq_ack = ks_pcie_ack_intx_irq,
+ .irq_mask = ks_pcie_mask_intx_irq,
+ .irq_unmask = ks_pcie_unmask_intx_irq,
};
-static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
- unsigned int irq,
- irq_hw_number_t hw_irq)
+static int ks_pcie_init_intx_irq_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hw_irq)
{
- irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, d->host_data);
return 0;
}
-static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
- .map = ks_pcie_init_legacy_irq_map,
+static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
+ .map = ks_pcie_init_intx_irq_map,
.xlate = irq_domain_xlate_onetwocell,
};
@@ -605,22 +603,22 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
}
/**
- * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * ks_pcie_intx_irq_handler() - Handle INTX interrupt
* @desc: Pointer to irq descriptor
*
- * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * Traverse through pending INTX interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
-static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+static void ks_pcie_intx_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
- u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+ u32 irq_offset = irq - ks_pcie->intx_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+ dev_dbg(dev, ": Handling INTX irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
@@ -628,7 +626,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ ks_pcie_handle_intx_irq(ks_pcie, irq_offset);
chained_irq_exit(chip, desc);
}
@@ -686,10 +684,10 @@ err:
return ret;
}
-static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
+static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
{
struct device *dev = ks_pcie->pci->dev;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct device_node *np = ks_pcie->np;
struct device_node *intc_np;
int irq_count, irq, ret = 0, i;
@@ -697,7 +695,7 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
if (!intc_np) {
/*
- * Since legacy interrupts are modeled as edge-interrupts in
+ * Since INTX interrupts are modeled as edge-interrupts in
* AM6, keep it disabled for now.
*/
if (ks_pcie->is_am6)
@@ -719,22 +717,21 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
ret = -EINVAL;
goto err;
}
- ks_pcie->legacy_host_irqs[i] = irq;
+ ks_pcie->intx_host_irqs[i] = irq;
irq_set_chained_handler_and_data(irq,
- ks_pcie_legacy_irq_handler,
+ ks_pcie_intx_irq_handler,
ks_pcie);
}
- legacy_irq_domain =
- irq_domain_add_linear(intc_np, PCI_NUM_INTX,
- &ks_pcie_legacy_irq_domain_ops, NULL);
- if (!legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ &ks_pcie_intx_irq_domain_ops, NULL);
+ if (!intx_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for INTX irqs\n");
ret = -EINVAL;
goto err;
}
- ks_pcie->legacy_irq_domain = legacy_irq_domain;
+ ks_pcie->intx_irq_domain = intx_irq_domain;
for (i = 0; i < PCI_NUM_INTX; i++)
ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
@@ -808,7 +805,7 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
if (!ks_pcie->is_am6)
pp->bridge->child_ops = &ks_child_pcie_ops;
- ret = ks_pcie_config_legacy_irq(ks_pcie);
+ ret = ks_pcie_config_intx_irq(ks_pcie);
if (ret)
return ret;
@@ -838,12 +835,12 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops ks_pcie_host_ops = {
- .host_init = ks_pcie_host_init,
- .msi_host_init = ks_pcie_msi_host_init,
+ .init = ks_pcie_host_init,
+ .msi_init = ks_pcie_msi_host_init,
};
static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
- .host_init = ks_pcie_host_init,
+ .init = ks_pcie_host_init,
};
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
@@ -881,7 +878,7 @@ static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
}
-static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie)
{
struct dw_pcie *pci = ks_pcie->pci;
u8 int_pin;
@@ -900,20 +897,19 @@ static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
}
static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- ks_pcie_am654_raise_legacy_irq(ks_pcie);
+ case PCI_IRQ_INTX:
+ ks_pcie_am654_raise_intx_irq(ks_pcie);
break;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
break;
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
break;
default:
@@ -944,7 +940,7 @@ ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
- .ep_init = ks_pcie_am654_ep_init,
+ .init = ks_pcie_am654_ep_init,
.raise_irq = ks_pcie_am654_raise_irq,
.get_features = &ks_pcie_am654_get_features,
};
@@ -1311,7 +1307,7 @@ err_link:
return ret;
}
-static int ks_pcie_remove(struct platform_device *pdev)
+static void ks_pcie_remove(struct platform_device *pdev)
{
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
struct device_link **link = ks_pcie->link;
@@ -1323,13 +1319,11 @@ static int ks_pcie_remove(struct platform_device *pdev)
ks_pcie_disable_phy(ks_pcie);
while (num_lanes--)
device_link_del(link[num_lanes]);
-
- return 0;
}
static struct platform_driver ks_pcie_driver = {
.probe = ks_pcie_probe,
- .remove = ks_pcie_remove,
+ .remove_new = ks_pcie_remove,
.driver = {
.name = "keystone-pcie",
.of_match_table = ks_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 3d3c50ef4..2e398494e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -49,7 +49,7 @@ struct ls_pcie_ep {
bool big_endian;
};
-static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
{
struct dw_pcie *pci = pcie->pci;
@@ -59,7 +59,7 @@ static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
return ioread32(pci->dbi_base + offset);
}
-static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
+static void ls_pcie_pf_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
{
struct dw_pcie *pci = pcie->pci;
@@ -76,8 +76,8 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
u32 val, cfg;
u8 offset;
- val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR);
- ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+ val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
if (!val)
return IRQ_NONE;
@@ -96,9 +96,9 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
dw_pcie_dbi_ro_wr_dis(pci);
- cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG);
+ cfg = ls_pcie_pf_lut_readl(pcie, PEX_PF0_CONFIG);
cfg |= PEX_PF0_CFG_READY;
- ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
dw_pcie_ep_linkup(&pci->ep);
dev_dbg(pci->dev, "Link up\n");
@@ -130,10 +130,10 @@ static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
}
/* Enable interrupts */
- val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER);
+ val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_IER);
val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
PEX_PF0_PME_MES_IER_LUDIE;
- ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
return 0;
}
@@ -166,16 +166,16 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
interrupt_num);
default:
@@ -184,8 +184,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
-static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
- u8 func_no)
+static unsigned int ls_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
@@ -195,10 +194,10 @@ static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
}
static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
- .ep_init = ls_pcie_ep_init,
+ .init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
- .func_conf_select = ls_pcie_ep_func_conf_select,
+ .get_dbi_offset = ls_pcie_ep_get_dbi_offset,
};
static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 37956e09c..ee6f52568 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -35,21 +35,41 @@
#define PF_MCR_PTOMR BIT(0)
#define PF_MCR_EXL2S BIT(1)
+/* LS1021A PEXn PM Write Control Register */
+#define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64)
+#define PMXMTTURNOFF BIT(31)
+#define SCFG_PEXSFTRSTCR 0x190
+#define PEXSR(idx) BIT(idx)
+
+/* LS1043A PEX PME control register */
+#define SCFG_PEXPMECR 0x144
+#define PEXPME(idx) BIT(31 - (idx) * 4)
+
+/* LS1043A PEX LUT debug register */
+#define LS_PCIE_LDBG 0x7fc
+#define LDBG_SR BIT(30)
+#define LDBG_WE BIT(31)
+
#define PCIE_IATU_NUM 6
struct ls_pcie_drvdata {
- const u32 pf_off;
+ const u32 pf_lut_off;
+ const struct dw_pcie_host_ops *ops;
+ int (*exit_from_l2)(struct dw_pcie_rp *pp);
+ bool scfg_support;
bool pm_support;
};
struct ls_pcie {
struct dw_pcie *pci;
const struct ls_pcie_drvdata *drvdata;
- void __iomem *pf_base;
+ void __iomem *pf_lut_base;
+ struct regmap *scfg;
+ int index;
bool big_endian;
};
-#define ls_pcie_pf_readl_addr(addr) ls_pcie_pf_readl(pcie, addr)
+#define ls_pcie_pf_lut_readl_addr(addr) ls_pcie_pf_lut_readl(pcie, addr)
#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
@@ -90,20 +110,20 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
}
-static u32 ls_pcie_pf_readl(struct ls_pcie *pcie, u32 off)
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie *pcie, u32 off)
{
if (pcie->big_endian)
- return ioread32be(pcie->pf_base + off);
+ return ioread32be(pcie->pf_lut_base + off);
- return ioread32(pcie->pf_base + off);
+ return ioread32(pcie->pf_lut_base + off);
}
-static void ls_pcie_pf_writel(struct ls_pcie *pcie, u32 off, u32 val)
+static void ls_pcie_pf_lut_writel(struct ls_pcie *pcie, u32 off, u32 val)
{
if (pcie->big_endian)
- iowrite32be(val, pcie->pf_base + off);
+ iowrite32be(val, pcie->pf_lut_base + off);
else
- iowrite32(val, pcie->pf_base + off);
+ iowrite32(val, pcie->pf_lut_base + off);
}
static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
@@ -113,11 +133,11 @@ static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
u32 val;
int ret;
- val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
val |= PF_MCR_PTOMR;
- ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
- ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+ ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
val, !(val & PF_MCR_PTOMR),
PCIE_PME_TO_L2_TIMEOUT_US/10,
PCIE_PME_TO_L2_TIMEOUT_US);
@@ -125,7 +145,7 @@ static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
dev_err(pcie->pci->dev, "PME_Turn_off timeout\n");
}
-static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+static int ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
@@ -136,20 +156,22 @@ static void ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
* Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
* to exit L2 state.
*/
- val = ls_pcie_pf_readl(pcie, LS_PCIE_PF_MCR);
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
val |= PF_MCR_EXL2S;
- ls_pcie_pf_writel(pcie, LS_PCIE_PF_MCR, val);
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
/*
* L2 exit timeout of 10ms is not defined in the specifications,
* it was chosen based on empirical observations.
*/
- ret = readx_poll_timeout(ls_pcie_pf_readl_addr, LS_PCIE_PF_MCR,
+ ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
val, !(val & PF_MCR_EXL2S),
1000,
10000);
if (ret)
dev_err(pcie->pci->dev, "L2 exit timeout\n");
+
+ return ret;
}
static int ls_pcie_host_init(struct dw_pcie_rp *pp)
@@ -168,25 +190,130 @@ static int ls_pcie_host_init(struct dw_pcie_rp *pp)
return 0;
}
+static void scfg_pcie_send_turnoff_msg(struct regmap *scfg, u32 reg, u32 mask)
+{
+ /* Send PME_Turn_Off message */
+ regmap_write_bits(scfg, reg, mask, mask);
+
+ /*
+ * There is no specific register to check for PME_To_Ack from endpoint.
+ * So on the safe side, wait for PCIE_PME_TO_L2_TIMEOUT_US.
+ */
+ mdelay(PCIE_PME_TO_L2_TIMEOUT_US/1000);
+
+ /*
+ * Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit
+ * to complete the PME_Turn_Off handshake.
+ */
+ regmap_write_bits(scfg, reg, mask, 0);
+}
+
+static void ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), PMXMTTURNOFF);
+}
+
+static int scfg_pcie_exit_from_l2(struct regmap *scfg, u32 reg, u32 mask)
+{
+ /* Reset the PEX wrapper to bring the link out of L2 */
+ regmap_write_bits(scfg, reg, mask, mask);
+ regmap_write_bits(scfg, reg, mask, 0);
+
+ return 0;
+}
+
+static int ls1021a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ return scfg_pcie_exit_from_l2(pcie->scfg, SCFG_PEXSFTRSTCR, PEXSR(pcie->index));
+}
+
+static void ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMECR, PEXPME(pcie->index));
+}
+
+static int ls1043a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+
+ /*
+ * Reset the PEX wrapper to bring the link out of L2.
+ * LDBG_WE: allows the user to have write access to the PEXDBG[SR] for both setting and
+ * clearing the soft reset on the PEX module.
+ * LDBG_SR: When SR is set to 1, the PEX module enters soft reset.
+ */
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_WE;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_SR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_SR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_WE;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ return 0;
+}
+
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
- .host_init = ls_pcie_host_init,
+ .init = ls_pcie_host_init,
.pme_turn_off = ls_pcie_send_turnoff_msg,
};
+static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls1021a_pcie_send_turnoff_msg,
+};
+
static const struct ls_pcie_drvdata ls1021a_drvdata = {
- .pm_support = false,
+ .pm_support = true,
+ .scfg_support = true,
+ .ops = &ls1021a_pcie_host_ops,
+ .exit_from_l2 = ls1021a_pcie_exit_from_l2,
+};
+
+static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls1043a_pcie_send_turnoff_msg,
+};
+
+static const struct ls_pcie_drvdata ls1043a_drvdata = {
+ .pf_lut_off = 0x10000,
+ .pm_support = true,
+ .scfg_support = true,
+ .ops = &ls1043a_pcie_host_ops,
+ .exit_from_l2 = ls1043a_pcie_exit_from_l2,
};
static const struct ls_pcie_drvdata layerscape_drvdata = {
- .pf_off = 0xc0000,
+ .pf_lut_off = 0xc0000,
.pm_support = true,
+ .ops = &ls_pcie_host_ops,
+ .exit_from_l2 = ls_pcie_exit_from_l2,
};
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
{ .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
- { .compatible = "fsl,ls1043a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
{ .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
@@ -201,6 +328,8 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct ls_pcie *pcie;
struct resource *dbi_base;
+ u32 index[2];
+ int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -213,9 +342,8 @@ static int ls_pcie_probe(struct platform_device *pdev)
pcie->drvdata = of_device_get_match_data(dev);
pci->dev = dev;
- pci->pp.ops = &ls_pcie_host_ops;
-
pcie->pci = pci;
+ pci->pp.ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
@@ -224,7 +352,21 @@ static int ls_pcie_probe(struct platform_device *pdev)
pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
- pcie->pf_base = pci->dbi_base + pcie->drvdata->pf_off;
+ pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
+
+ if (pcie->drvdata->scfg_support) {
+ pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg");
+ if (IS_ERR(pcie->scfg)) {
+ dev_err(dev, "No syscfg phandle specified\n");
+ return PTR_ERR(pcie->scfg);
+ }
+
+ ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2);
+ if (ret)
+ return ret;
+
+ pcie->index = index[1];
+ }
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
@@ -247,11 +389,14 @@ static int ls_pcie_suspend_noirq(struct device *dev)
static int ls_pcie_resume_noirq(struct device *dev)
{
struct ls_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
if (!pcie->drvdata->pm_support)
return 0;
- ls_pcie_exit_from_l2(&pcie->pci->pp);
+ ret = pcie->drvdata->exit_from_l2(&pcie->pci->pp);
+ if (ret)
+ return ret;
return dw_pcie_resume_noirq(pcie->pci);
}
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 407558f5d..6477c8326 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -389,7 +389,7 @@ static int meson_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
- .host_init = meson_pcie_host_init,
+ .init = meson_pcie_host_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index b8cb77c9c..6dfdda59f 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -311,7 +311,7 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops al_pcie_host_ops = {
- .host_init = al_pcie_host_init,
+ .init = al_pcie_host_init,
};
static int al_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 5c999e15c..b5c599cca 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -225,7 +225,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
}
static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
- .host_init = armada8k_pcie_host_init,
+ .init = armada8k_pcie_host_init,
};
static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 9b572a2b2..9ed0a9ba7 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -333,7 +333,7 @@ static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
- .host_init = artpec6_pcie_host_init,
+ .init = artpec6_pcie_host_init,
};
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -352,15 +352,15 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ case PCI_IRQ_INTX:
+ dev_err(pci->dev, "EP cannot trigger INTx IRQs\n");
return -EINVAL;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -370,7 +370,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = artpec6_pcie_ep_init,
+ .init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
};
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
index 17e696797..76d0ddea8 100644
--- a/drivers/pci/controller/dwc/pcie-bt1.c
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -559,8 +559,8 @@ static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
- .host_init = bt1_pcie_host_init,
- .host_deinit = bt1_pcie_host_deinit,
+ .init = bt1_pcie_host_init,
+ .deinit = bt1_pcie_host_deinit,
};
static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 19b6708f6..746a11dcb 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -44,46 +44,19 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
return NULL;
}
-static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
-{
- unsigned int func_offset = 0;
-
- if (ep->ops->func_conf_select)
- func_offset = ep->ops->func_conf_select(ep, func_no);
-
- return func_offset;
-}
-
-static unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep, u8 func_no)
-{
- unsigned int dbi2_offset = 0;
-
- if (ep->ops->get_dbi2_offset)
- dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no);
- else if (ep->ops->func_conf_select) /* for backward compatibility */
- dbi2_offset = ep->ops->func_conf_select(ep, func_no);
-
- return dbi2_offset;
-}
-
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
enum pci_barno bar, int flags)
{
- unsigned int func_offset, dbi2_offset;
struct dw_pcie_ep *ep = &pci->ep;
- u32 reg, reg_dbi2;
-
- func_offset = dw_pcie_ep_func_select(ep, func_no);
- dbi2_offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
+ u32 reg;
- reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
- reg_dbi2 = dbi2_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writel_dbi2(pci, reg_dbi2, 0x0);
- dw_pcie_writel_dbi(pci, reg, 0x0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_writel_dbi2(pci, reg_dbi2 + 4, 0x0);
- dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
}
dw_pcie_dbi_ro_wr_dis(pci);
}
@@ -100,19 +73,15 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
- u8 cap_ptr, u8 cap)
+ u8 cap_ptr, u8 cap)
{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
u8 cap_id, next_cap_ptr;
u16 reg;
if (!cap_ptr)
return 0;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
+ reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr);
cap_id = (reg & 0x00ff);
if (cap_id > PCI_CAP_ID_MAX)
@@ -127,14 +96,10 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
u8 next_cap_ptr;
u16 reg;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
+ reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST);
next_cap_ptr = (reg & 0x00ff);
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
@@ -145,24 +110,21 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
-
- func_offset = dw_pcie_ep_func_select(ep, func_no);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
- dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
- dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
- hdr->subclass_code | hdr->baseclass_code << 8);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
- hdr->cache_line_size);
- dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
- hdr->subsys_vendor_id);
- dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
- hdr->interrupt_pin);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
+ hdr->subsys_vendor_id);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -244,18 +206,13 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset, dbi2_offset;
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
int flags = epf_bar->flags;
- u32 reg, reg_dbi2;
int ret, type;
+ u32 reg;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
- dbi2_offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
-
- reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
- reg_dbi2 = PCI_BASE_ADDRESS_0 + (4 * bar) + dbi2_offset;
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
if (!(flags & PCI_BASE_ADDRESS_SPACE))
type = PCIE_ATU_TYPE_MEM;
@@ -271,12 +228,12 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writel_dbi2(pci, reg_dbi2, lower_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg, flags);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_writel_dbi2(pci, reg_dbi2 + 4, upper_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg + 4, 0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
}
ep->epf_bar[bar] = epf_bar;
@@ -336,19 +293,15 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
if (!(val & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
@@ -362,22 +315,19 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -386,19 +336,15 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
return -EINVAL;
@@ -412,9 +358,8 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
@@ -422,21 +367,19 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_dbi_ro_wr_en(pci);
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= interrupts;
dw_pcie_writew_dbi(pci, reg, val);
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+ reg = ep_func->msix_cap + PCI_MSIX_TABLE;
val = offset | bir;
- dw_pcie_writel_dbi(pci, reg, val);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
+ reg = ep_func->msix_cap + PCI_MSIX_PBA;
val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
- dw_pcie_writel_dbi(pci, reg, val);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -444,7 +387,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -497,56 +440,53 @@ static const struct pci_epc_ops epc_ops = {
.get_features = dw_pcie_ep_get_features,
};
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
- dev_err(dev, "EP cannot trigger legacy IRQs\n");
+ dev_err(dev, "EP cannot raise INTX IRQs\n");
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 msg_addr_lower, msg_addr_upper, reg;
struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
unsigned int aligned_offset;
- unsigned int func_offset = 0;
u16 msg_ctrl, msg_data;
- u32 msg_addr_lower, msg_addr_upper, reg;
- u64 msg_addr;
bool has_upper;
+ u64 msg_addr;
int ret;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
- msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
if (has_upper) {
- reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
- msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
- reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
- msg_data = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
+ reg = ep_func->msi_cap + PCI_MSI_DATA_64;
+ msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
} else {
msg_addr_upper = 0;
- reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
- msg_data = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_DATA_32;
+ msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
- msg_addr = ((u64)msg_addr_upper) << 32 |
- (msg_addr_lower & ~aligned_offset);
+ msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
@@ -583,10 +523,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct dw_pcie_ep_func *ep_func;
struct pci_epf_msix_tbl *msix_tbl;
+ struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
- unsigned int func_offset = 0;
u32 reg, msg_data, vec_ctrl;
unsigned int aligned_offset;
u32 tbl_offset;
@@ -598,10 +537,8 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
- tbl_offset = dw_pcie_readl_dbi(pci, reg);
+ reg = ep_func->msix_cap + PCI_MSIX_TABLE;
+ tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
@@ -801,8 +738,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
list_add_tail(&ep_func->list, &ep->func_list);
}
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
+ if (ep->ops->init)
+ ep->ops->init(ep);
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 7991f0e17..d5fc31f83 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -441,14 +441,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
- if (pp->ops->host_init) {
- ret = pp->ops->host_init(pp);
+ if (pp->ops->init) {
+ ret = pp->ops->init(pp);
if (ret)
return ret;
}
if (pci_msi_enabled()) {
- pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
+ pp->has_msi_ctrl = !(pp->ops->msi_init ||
of_property_read_bool(np, "msi-parent") ||
of_property_read_bool(np, "msi-map"));
@@ -464,8 +464,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_deinit_host;
}
- if (pp->ops->msi_host_init) {
- ret = pp->ops->msi_host_init(pp);
+ if (pp->ops->msi_init) {
+ ret = pp->ops->msi_init(pp);
if (ret < 0)
goto err_deinit_host;
} else if (pp->has_msi_ctrl) {
@@ -502,8 +502,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
goto err_stop_link;
- if (pp->ops->host_post_init)
- pp->ops->host_post_init(pp);
+ if (pp->ops->post_init)
+ pp->ops->post_init(pp);
return 0;
@@ -518,8 +518,8 @@ err_free_msi:
dw_pcie_free_msi(pp);
err_deinit_host:
- if (pp->ops->host_deinit)
- pp->ops->host_deinit(pp);
+ if (pp->ops->deinit)
+ pp->ops->deinit(pp);
return ret;
}
@@ -539,8 +539,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
- if (pp->ops->host_deinit)
- pp->ops->host_deinit(pp);
+ if (pp->ops->deinit)
+ pp->ops->deinit(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
@@ -842,8 +842,8 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
return ret;
}
- if (pci->pp.ops->host_deinit)
- pci->pp.ops->host_deinit(&pci->pp);
+ if (pci->pp.ops->deinit)
+ pci->pp.ops->deinit(&pci->pp);
pci->suspended = true;
@@ -860,8 +860,8 @@ int dw_pcie_resume_noirq(struct dw_pcie *pci)
pci->suspended = false;
- if (pci->pp.ops->host_init) {
- ret = pci->pp.ops->host_init(&pci->pp);
+ if (pci->pp.ops->init) {
+ ret = pci->pp.ops->init(&pci->pp);
if (ret) {
dev_err(pci->dev, "Host init failed: %d\n", ret);
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index b625841e9..778588b4b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -42,17 +42,16 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -74,7 +73,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = dw_plat_pcie_ep_init,
+ .init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
.get_features = dw_plat_pcie_get_features,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 55ff76e3d..26dae4837 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -300,10 +300,10 @@ enum dw_pcie_ltssm {
};
struct dw_pcie_host_ops {
- int (*host_init)(struct dw_pcie_rp *pp);
- void (*host_deinit)(struct dw_pcie_rp *pp);
- void (*host_post_init)(struct dw_pcie_rp *pp);
- int (*msi_host_init)(struct dw_pcie_rp *pp);
+ int (*init)(struct dw_pcie_rp *pp);
+ void (*deinit)(struct dw_pcie_rp *pp);
+ void (*post_init)(struct dw_pcie_rp *pp);
+ int (*msi_init)(struct dw_pcie_rp *pp);
void (*pme_turn_off)(struct dw_pcie_rp *pp);
};
@@ -332,10 +332,10 @@ struct dw_pcie_rp {
struct dw_pcie_ep_ops {
void (*pre_init)(struct dw_pcie_ep *ep);
- void (*ep_init)(struct dw_pcie_ep *ep);
+ void (*init)(struct dw_pcie_ep *ep);
void (*deinit)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ unsigned int type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
/*
* Provide a method to implement the different func config space
@@ -344,7 +344,7 @@ struct dw_pcie_ep_ops {
* return a 0, and implement code in callback function of platform
* driver.
*/
- unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+ unsigned int (*get_dbi_offset)(struct dw_pcie_ep *ep, u8 func_no);
unsigned int (*get_dbi2_offset)(struct dw_pcie_ep *ep, u8 func_no);
};
@@ -486,6 +486,99 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
+static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ unsigned int dbi_offset = 0;
+
+ if (ep->ops->get_dbi_offset)
+ dbi_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+ return dbi_offset;
+}
+
+static inline u32 dw_pcie_ep_read_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ return dw_pcie_read_dbi(pci, offset + reg, size);
+}
+
+static inline void dw_pcie_ep_write_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size, u32 val)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_write_dbi(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u32 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_ep_readl_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x4);
+}
+
+static inline void dw_pcie_ep_writew_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u16 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_ep_readw_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x2);
+}
+
+static inline void dw_pcie_ep_writeb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u8 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1);
+}
+
+static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ unsigned int dbi2_offset = 0;
+
+ if (ep->ops->get_dbi2_offset)
+ dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no);
+ else if (ep->ops->get_dbi_offset) /* for backward compatibility */
+ dbi2_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+ return dbi2_offset;
+}
+
+static inline void dw_pcie_ep_write_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size, u32 val)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_write_dbi2(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u32 val)
+{
+ dw_pcie_ep_write_dbi2(ep, func_no, reg, 0x4, val);
+}
+
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;
@@ -580,7 +673,7 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep);
int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -613,7 +706,7 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
-static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+static inline int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 2fe42c700..d6842141d 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -72,7 +72,7 @@ static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
writel_relaxed(val, rockchip->apb_base + reg);
}
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
@@ -202,7 +202,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
if (ret < 0)
dev_err(dev, "failed to init irq domain\n");
- irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
/* LTSSM enable control mode */
@@ -215,7 +215,7 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
- .host_init = rockchip_pcie_host_init,
+ .init = rockchip_pcie_host_init,
};
static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
index 1e9b44b8b..663672520 100644
--- a/drivers/pci/controller/dwc/pcie-fu740.c
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -279,7 +279,7 @@ static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
- .host_init = fu740_pcie_host_init,
+ .init = fu740_pcie_host_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index fd484cc7c..7a11c618b 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -198,7 +198,7 @@ static int histb_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
- .host_init = histb_pcie_host_init,
+ .init = histb_pcie_host_init,
};
static void histb_pcie_host_disable(struct histb_pcie *hipcie)
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index c9c93524e..acbe4f6d3 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -391,7 +391,7 @@ static const struct dw_pcie_ops intel_pcie_ops = {
};
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
- .host_init = intel_pcie_rc_init,
+ .init = intel_pcie_rc_init,
};
static int intel_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 289bff99d..208d3b0ba 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -289,19 +289,18 @@ static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- /* Legacy interrupts are not supported in Keem Bay */
- dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ case PCI_IRQ_INTX:
+ /* INTx interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "INTx IRQ is not supported\n");
return -EINVAL;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "Unknown IRQ type %d\n", type);
@@ -325,7 +324,7 @@ keembay_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
- .ep_init = keembay_pcie_ep_init,
+ .init = keembay_pcie_ep_init,
.raise_irq = keembay_pcie_ep_raise_irq,
.get_features = keembay_pcie_get_features,
};
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 2ee146767..d5523f302 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -366,7 +366,6 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- char name[32];
int ret, i;
/* This is an optional property */
@@ -387,9 +386,8 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
if (pcie->gpio_id_clkreq[i] < 0)
return pcie->gpio_id_clkreq[i];
- sprintf(name, "pcie_clkreq_%d", i);
- pcie->clkreq_names[i] = devm_kstrdup_const(dev, name,
- GFP_KERNEL);
+ pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "pcie_clkreq_%d", i);
if (!pcie->clkreq_names[i])
return -ENOMEM;
}
@@ -404,7 +402,6 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
struct device *dev = &pdev->dev;
struct device_node *parent, *child;
int ret, slot, i;
- char name[32];
for_each_available_child_of_node(node, parent) {
for_each_available_child_of_node(parent, child) {
@@ -430,9 +427,9 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
slot = PCI_SLOT(ret);
- sprintf(name, "pcie_perst_%d", slot);
- pcie->reset_names[i] = devm_kstrdup_const(dev, name,
- GFP_KERNEL);
+ pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "pcie_perst_%d",
+ slot);
if (!pcie->reset_names[i]) {
ret = -ENOMEM;
goto put_node;
@@ -672,7 +669,7 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
};
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
- .host_init = kirin_pcie_host_init,
+ .init = kirin_pcie_host_init,
};
static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
@@ -741,15 +738,13 @@ err:
return ret;
}
-static int kirin_pcie_remove(struct platform_device *pdev)
+static void kirin_pcie_remove(struct platform_device *pdev)
{
struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
dw_pcie_host_deinit(&kirin_pcie->pci->pp);
kirin_pcie_power_off(kirin_pcie);
-
- return 0;
}
struct kirin_pcie_data {
@@ -818,7 +813,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
static struct platform_driver kirin_pcie_driver = {
.probe = kirin_pcie_probe,
- .remove = kirin_pcie_remove,
+ .remove_new = kirin_pcie_remove,
.driver = {
.name = "kirin-pcie",
.of_match_table = kirin_pcie_match,
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 9e58f0551..36e5e80cd 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -726,14 +726,14 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
}
static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "Unknown IRQ type\n");
@@ -796,7 +796,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pci_ep_ops = {
- .ep_init = qcom_pcie_ep_init,
+ .init = qcom_pcie_ep_init,
.raise_irq = qcom_pcie_ep_raise_irq,
.get_features = qcom_pcie_epc_get_features,
};
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index e3dc27f2a..23bb0eee5 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1279,9 +1279,9 @@ static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
- .host_init = qcom_pcie_host_init,
- .host_deinit = qcom_pcie_host_deinit,
- .host_post_init = qcom_pcie_host_post_init,
+ .init = qcom_pcie_host_init,
+ .deinit = qcom_pcie_host_deinit,
+ .post_init = qcom_pcie_host_post_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index 3bc45e513..e9166619b 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -307,8 +307,8 @@ static void rcar_gen4_pcie_host_deinit(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops rcar_gen4_pcie_host_ops = {
- .host_init = rcar_gen4_pcie_host_init,
- .host_deinit = rcar_gen4_pcie_host_deinit,
+ .init = rcar_gen4_pcie_host_init,
+ .deinit = rcar_gen4_pcie_host_deinit,
};
static int rcar_gen4_add_dw_pcie_rp(struct rcar_gen4_pcie *rcar)
@@ -362,15 +362,14 @@ static void rcar_gen4_pcie_ep_deinit(struct dw_pcie_ep *ep)
}
static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(dw->dev, "Unknown IRQ type\n");
@@ -394,7 +393,7 @@ rcar_gen4_pcie_ep_get_features(struct dw_pcie_ep *ep)
return &rcar_gen4_pcie_epc_features;
}
-static unsigned int rcar_gen4_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
+static unsigned int rcar_gen4_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
u8 func_no)
{
return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET;
@@ -408,11 +407,11 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.pre_init = rcar_gen4_pcie_ep_pre_init,
- .ep_init = rcar_gen4_pcie_ep_init,
+ .init = rcar_gen4_pcie_ep_init,
.deinit = rcar_gen4_pcie_ep_deinit,
.raise_irq = rcar_gen4_pcie_ep_raise_irq,
.get_features = rcar_gen4_pcie_ep_get_features,
- .func_conf_select = rcar_gen4_pcie_ep_func_conf_select,
+ .get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
.get_dbi2_offset = rcar_gen4_pcie_ep_get_dbi2_offset,
};
@@ -436,7 +435,7 @@ static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
/* Common */
static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
{
- rcar->mode = (enum dw_pcie_device_mode)of_device_get_match_data(&rcar->pdev->dev);
+ rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev);
switch (rcar->mode) {
case DW_PCIE_RC_TYPE:
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 99d47ae80..201dced20 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -148,7 +148,7 @@ static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
- .host_init = spear13xx_pcie_host_init,
+ .init = spear13xx_pcie_host_init,
};
static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 0fe113598..7afa9e9aa 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -773,13 +773,13 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
val_w);
}
-static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
+static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
- /* Enable legacy interrupt generation */
+ /* Enable INTX interrupt generation */
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
val |= APPL_INTR_EN_L0_0_INT_INT_EN;
@@ -830,7 +830,7 @@ static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
tegra_pcie_enable_system_interrupts(pp);
- tegra_pcie_enable_legacy_interrupts(pp);
+ tegra_pcie_enable_intx_interrupts(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
tegra_pcie_enable_msi_interrupts(pp);
}
@@ -1060,7 +1060,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
};
static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
- .host_init = tegra_pcie_dw_host_init,
+ .init = tegra_pcie_dw_host_init,
};
static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
@@ -1947,7 +1947,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
if (irq > 1)
@@ -1979,20 +1979,19 @@ static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
}
static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+ case PCI_IRQ_INTX:
+ return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
default:
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index cba3c88fc..3fced0d3e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -212,7 +212,7 @@ static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+static int uniphier_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
@@ -256,15 +256,14 @@ static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
}
static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return uniphier_pcie_ep_raise_legacy_irq(ep);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return uniphier_pcie_ep_raise_intx_irq(ep);
+ case PCI_IRQ_MSI:
return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
interrupt_num);
default:
@@ -284,7 +283,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
- .ep_init = uniphier_pcie_ep_init,
+ .init = uniphier_pcie_ep_init,
.raise_irq = uniphier_pcie_ep_raise_irq,
.get_features = uniphier_pcie_get_features,
};
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 48c3eba81..5757ca380 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -67,7 +67,7 @@ struct uniphier_pcie {
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
};
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
@@ -253,12 +253,12 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
for_each_set_bit(bit, &reg, PCI_NUM_INTX)
- generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+ generic_handle_domain_irq(pcie->intx_irq_domain, bit);
chained_irq_exit(chip, desc);
}
-static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
+static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
@@ -279,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
goto out_put_node;
}
- pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
- if (!pcie->legacy_irq_domain) {
+ if (!pcie->intx_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
ret = -ENODEV;
goto out_put_node;
@@ -301,7 +301,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
int ret;
- ret = uniphier_pcie_config_legacy_irq(pp);
+ ret = uniphier_pcie_config_intx_irq(pp);
if (ret)
return ret;
@@ -311,7 +311,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
- .host_init = uniphier_pcie_host_init,
+ .init = uniphier_pcie_host_init,
};
static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 71026fefa..318c278e6 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -236,7 +236,7 @@ static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
- .host_init = visconti_pcie_host_init,
+ .init = visconti_pcie_host_init,
};
static int visconti_get_resources(struct platform_device *pdev,
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 6be3266cd..45b718061 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -85,7 +85,7 @@ int pci_host_common_probe(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(pci_host_common_probe);
-int pci_host_common_remove(struct platform_device *pdev)
+void pci_host_common_remove(struct platform_device *pdev)
{
struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
@@ -93,8 +93,6 @@ int pci_host_common_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
pci_unlock_rescan_remove();
-
- return 0;
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 63865aeb6..41cb6a057 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -82,7 +82,7 @@ static struct platform_driver gen_pci_driver = {
.of_match_table = gen_pci_of_match,
},
.probe = pci_host_common_probe,
- .remove = pci_host_common_remove,
+ .remove_new = pci_host_common_remove,
};
module_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 11cc354a3..5992280e8 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -651,13 +651,6 @@ static void hv_arch_irq_unmask(struct irq_data *data)
PCI_FUNC(pdev->devfn);
params->int_target.vector = hv_msi_get_int_vector(data);
- /*
- * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
- * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
- * spurious interrupt storm. Not doing so does not seem to have a
- * negative effect (yet?).
- */
-
if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
/*
* PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index e47a77f94..c08683feb 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -48,6 +48,9 @@
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
+#define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8
+#define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8
+
#define PCIE_RC_DL_MDIO_ADDR 0x1100
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
@@ -121,9 +124,12 @@
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
#define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000
-
+#define PCIE_CLKREQ_MASK \
+ (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
+ PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
#define PCIE_INTR2_CPU_BASE 0x4300
#define PCIE_MSI_INTR2_BASE 0x4500
@@ -1028,13 +1034,89 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
return 0;
}
+/*
+ * This extends the timeout period for an access to an internal bus. This
+ * access timeout may occur during L1SS sleep periods, even without the
+ * presence of a PCIe access.
+ */
+static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
+{
+ /* TIMEOUT register is two registers before RGR1_SW_INIT_1 */
+ const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
+ u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+
+ /* Each unit in timeout register is 1/216,000,000 seconds */
+ writel(216 * timeout_us, pcie->base + REG_OFFSET);
+}
+
+static void brcm_config_clkreq(struct brcm_pcie *pcie)
+{
+ static const char err_msg[] = "invalid 'brcm,clkreq-mode' DT string\n";
+ const char *mode = "default";
+ u32 clkreq_cntl;
+ int ret, tmp;
+
+ ret = of_property_read_string(pcie->np, "brcm,clkreq-mode", &mode);
+ if (ret && ret != -EINVAL) {
+ dev_err(pcie->dev, err_msg);
+ mode = "safe";
+ }
+
+ /* Start out assuming safe mode (both mode bits cleared) */
+ clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ clkreq_cntl &= ~PCIE_CLKREQ_MASK;
+
+ if (strcmp(mode, "no-l1ss") == 0) {
+ /*
+ * "no-l1ss" -- Provides Clock Power Management, L0s, and
+ * L1, but cannot provide L1 substate (L1SS) power
+ * savings. If the downstream device connected to the RC is
+ * L1SS capable AND the OS enables L1SS, all PCIe traffic
+ * may abruptly halt, potentially hanging the system.
+ */
+ clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
+ /*
+ * We want to un-advertise L1 substates because if the OS
+ * tries to configure the controller into using L1 substate
+ * power savings it may fail or hang when the RC HW is in
+ * "no-l1ss" mode.
+ */
+ tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
+ u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK);
+ writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
+
+ } else if (strcmp(mode, "default") == 0) {
+ /*
+ * "default" -- Provides L0s, L1, and L1SS, but not
+ * compliant to provide Clock Power Management;
+ * specifically, may not be able to meet the Tclron max
+ * timing of 400ns as specified in "Dynamic Clock Control",
+ * section 3.2.5.2.2 of the PCIe spec. This situation is
+ * atypical and should happen only with older devices.
+ */
+ clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK;
+ brcm_extend_rbus_timeout(pcie);
+
+ } else {
+ /*
+ * "safe" -- No power savings; refclk is driven by RC
+ * unconditionally.
+ */
+ if (strcmp(mode, "safe") != 0)
+ dev_err(pcie->dev, err_msg);
+ mode = "safe";
+ }
+ writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+ dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
+}
+
static int brcm_pcie_start_link(struct brcm_pcie *pcie)
{
struct device *dev = pcie->dev;
void __iomem *base = pcie->base;
u16 nlw, cls, lnksta;
bool ssc_good = false;
- u32 tmp;
int ret, i;
/* Unassert the fundamental reset */
@@ -1059,6 +1141,8 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
return -ENODEV;
}
+ brcm_config_clkreq(pcie);
+
if (pcie->gen)
brcm_pcie_set_gen(pcie, pcie->gen);
@@ -1077,14 +1161,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
pci_speed_string(pcie_link_speed[cls]), nlw,
ssc_good ? "(SSC)" : "(!SSC)");
- /*
- * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
- * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
- */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
- tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
-
return 0;
}
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index acdc583d2..4e6aa882a 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -52,7 +52,7 @@ static int iproc_pltfm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
- pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
+ pcie->type = (uintptr_t)of_device_get_match_data(dev);
ret = of_address_to_resource(np, 0, &reg);
if (ret < 0) {
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 7034c0ff2..e6909271d 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -402,16 +402,15 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
}
static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
+ case PCI_IRQ_INTX:
return rcar_pcie_ep_assert_intx(ep, fn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
default:
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index bf7cc0b6a..996077ab7 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -29,6 +29,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include "pcie-rcar.h"
@@ -953,14 +954,22 @@ static const struct of_device_id rcar_pcie_of_match[] = {
{},
};
+/* Design note 346 from Linear Technology says order is not important. */
+static const char * const rcar_pcie_supplies[] = {
+ "vpcie1v5",
+ "vpcie3v3",
+ "vpcie12v",
+};
+
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
struct rcar_pcie_host *host;
struct rcar_pcie *pcie;
+ unsigned int i;
u32 data;
int err;
- struct pci_host_bridge *bridge;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
if (!bridge)
@@ -971,6 +980,13 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
platform_set_drvdata(pdev, host);
+ for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) {
+ err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]);
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(dev, err, "failed to enable regulator: %s\n",
+ rcar_pcie_supplies[i]);
+ }
+
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
if (err < 0) {
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 0af0e965f..c9046e97a 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -26,16 +26,16 @@
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
+ * the sending of a memory write (MSI) / normal message (INTX
* IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ * the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip;
@@ -325,8 +325,8 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
}
}
-static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
- u8 intx)
+static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx)
{
u16 cmd;
@@ -407,15 +407,14 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
}
static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return rockchip_pcie_ep_send_intx_irq(ep, fn, 0);
+ case PCI_IRQ_MSI:
return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
default:
return -EINVAL;
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index afbbdccd1..300b9dc85 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -505,7 +505,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
@@ -553,7 +553,7 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
return irq;
irq_set_chained_handler_and_data(irq,
- rockchip_pcie_legacy_int_handler,
+ rockchip_pcie_intx_handler,
rockchip);
irq = platform_get_irq_byname(pdev, "client");
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index 3d7f280ed..5be5dfd83 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -684,10 +684,8 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
int ret;
port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
- if (port->msi.irq_msi0 <= 0) {
- dev_err(dev, "Unable to find msi0 IRQ line\n");
+ if (port->msi.irq_msi0 <= 0)
return port->msi.irq_msi0;
- }
ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low,
IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
@@ -698,10 +696,8 @@ static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
}
port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
- if (port->msi.irq_msi1 <= 0) {
- dev_err(dev, "Unable to find msi1 IRQ line\n");
+ if (port->msi.irq_msi1 <= 0)
return port->msi.irq_msi1;
- }
ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high,
IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index e307aceba..0408f4d61 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -166,7 +166,7 @@ struct nwl_pcie {
int irq_intx;
int irq_misc;
struct nwl_msi msi;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -324,7 +324,7 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL) != 0) {
for_each_set_bit(bit, &status, PCI_NUM_INTX)
- generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+ generic_handle_domain_irq(pcie->intx_irq_domain, bit);
}
chained_irq_exit(chip, desc);
@@ -364,7 +364,7 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void nwl_mask_leg_irq(struct irq_data *data)
+static void nwl_mask_intx_irq(struct irq_data *data)
{
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
@@ -378,7 +378,7 @@ static void nwl_mask_leg_irq(struct irq_data *data)
raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
}
-static void nwl_unmask_leg_irq(struct irq_data *data)
+static void nwl_unmask_intx_irq(struct irq_data *data)
{
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
@@ -392,26 +392,26 @@ static void nwl_unmask_leg_irq(struct irq_data *data)
raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
}
-static struct irq_chip nwl_leg_irq_chip = {
+static struct irq_chip nwl_intx_irq_chip = {
.name = "nwl_pcie:legacy",
- .irq_enable = nwl_unmask_leg_irq,
- .irq_disable = nwl_mask_leg_irq,
- .irq_mask = nwl_mask_leg_irq,
- .irq_unmask = nwl_unmask_leg_irq,
+ .irq_enable = nwl_unmask_intx_irq,
+ .irq_disable = nwl_mask_intx_irq,
+ .irq_mask = nwl_mask_intx_irq,
+ .irq_unmask = nwl_unmask_intx_irq,
};
-static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static int nwl_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &nwl_intx_irq_chip, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
irq_set_status_flags(irq, IRQ_LEVEL);
return 0;
}
-static const struct irq_domain_ops legacy_domain_ops = {
- .map = nwl_legacy_map,
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = nwl_intx_map,
.xlate = pci_irqd_intx_xlate,
};
@@ -525,20 +525,20 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
- struct device_node *legacy_intc_node;
+ struct device_node *intc_node;
- legacy_intc_node = of_get_next_child(node, NULL);
- if (!legacy_intc_node) {
+ intc_node = of_get_next_child(node, NULL);
+ if (!intc_node) {
dev_err(dev, "No legacy intc node found\n");
return -EINVAL;
}
- pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
- PCI_NUM_INTX,
- &legacy_domain_ops,
- pcie);
- of_node_put(legacy_intc_node);
- if (!pcie->legacy_irq_domain) {
+ pcie->intx_irq_domain = irq_domain_add_linear(intc_node,
+ PCI_NUM_INTX,
+ &intx_domain_ops,
+ pcie);
+ of_node_put(intc_node);
+ if (!pcie->intx_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -710,14 +710,14 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
/* Enable all misc interrupts */
nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
- /* Disable all legacy interrupts */
+ /* Disable all INTX interrupts */
nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
- /* Clear pending legacy interrupts */
+ /* Clear pending INTX interrupts */
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
- /* Enable all legacy interrupts */
+ /* Enable all INTX interrupts */
nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
/* Enable the bridge config interrupt */
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 0452cbc36..87b7856f3 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -984,7 +984,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENOMEM;
vmd->dev = dev;
- vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL);
+ vmd->instance = ida_alloc(&vmd_instance_ida, GFP_KERNEL);
if (vmd->instance < 0)
return vmd->instance;
@@ -1026,7 +1026,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
out_release_instance:
- ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ ida_free(&vmd_instance_ida, vmd->instance);
return err;
}
@@ -1048,7 +1048,7 @@ static void vmd_remove(struct pci_dev *dev)
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
vmd_remove_irq_domain(vmd);
- ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ ida_free(&vmd_instance_ida, vmd->instance);
}
static void vmd_shutdown(struct pci_dev *dev)
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 6dc918a8a..1c3e4ea76 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -21,6 +21,15 @@
/* Platform specific flags */
#define MHI_EPF_USE_DMA BIT(0)
+struct pci_epf_mhi_dma_transfer {
+ struct pci_epf_mhi *epf_mhi;
+ struct mhi_ep_buf_info buf_info;
+ struct list_head node;
+ dma_addr_t paddr;
+ enum dma_data_direction dir;
+ size_t size;
+};
+
struct pci_epf_mhi_ep_info {
const struct mhi_ep_cntrl_config *config;
struct pci_epf_header *epf_header;
@@ -124,6 +133,10 @@ struct pci_epf_mhi {
resource_size_t mmio_phys;
struct dma_chan *dma_chan_tx;
struct dma_chan *dma_chan_rx;
+ struct workqueue_struct *dma_wq;
+ struct work_struct dma_work;
+ struct list_head dma_list;
+ spinlock_t list_lock;
u32 mmio_size;
int irq;
};
@@ -205,7 +218,7 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
* MHI supplies 0 based MSI vectors but the API expects the vector
* number to start from 1, so we need to increment the vector by 1.
*/
- pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI,
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_MSI,
vector + 1);
}
@@ -234,6 +247,9 @@ static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
mutex_unlock(&epf_mhi->lock);
+ if (buf_info->cb)
+ buf_info->cb(buf_info);
+
return 0;
}
@@ -262,6 +278,9 @@ static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
mutex_unlock(&epf_mhi->lock);
+ if (buf_info->cb)
+ buf_info->cb(buf_info);
+
return 0;
}
@@ -412,6 +431,198 @@ err_unlock:
return ret;
}
+static void pci_epf_mhi_dma_worker(struct work_struct *work)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *itr, *tmp;
+ struct mhi_ep_buf_info *buf_info;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&epf_mhi->list_lock, flags);
+ list_splice_tail_init(&epf_mhi->dma_list, &head);
+ spin_unlock_irqrestore(&epf_mhi->list_lock, flags);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir);
+ buf_info = &itr->buf_info;
+ buf_info->cb(buf_info);
+ kfree(itr);
+ }
+}
+
+static void pci_epf_mhi_dma_async_callback(void *param)
+{
+ struct pci_epf_mhi_dma_transfer *transfer = param;
+ struct pci_epf_mhi *epf_mhi = transfer->epf_mhi;
+
+ spin_lock(&epf_mhi->list_lock);
+ list_add_tail(&transfer->node, &epf_mhi->dma_list);
+ spin_unlock(&epf_mhi->list_lock);
+
+ queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work);
+}
+
+static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *transfer = NULL;
+ struct dma_chan *chan = epf_mhi->dma_chan_rx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t dst_addr;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+ if (!transfer) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ transfer->epf_mhi = epf_mhi;
+ transfer->paddr = dst_addr;
+ transfer->size = buf_info->size;
+ transfer->dir = DMA_FROM_DEVICE;
+ memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+
+ desc->callback = pci_epf_mhi_dma_async_callback;
+ desc->callback_param = transfer;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_free_transfer;
+ }
+
+ dma_async_issue_pending(chan);
+
+ goto err_unlock;
+
+err_free_transfer:
+ kfree(transfer);
+err_unmap:
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *transfer = NULL;
+ struct dma_chan *chan = epf_mhi->dma_chan_tx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t src_addr;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+ if (!transfer) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ transfer->epf_mhi = epf_mhi;
+ transfer->paddr = src_addr;
+ transfer->size = buf_info->size;
+ transfer->dir = DMA_TO_DEVICE;
+ memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+
+ desc->callback = pci_epf_mhi_dma_async_callback;
+ desc->callback_param = transfer;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_free_transfer;
+ }
+
+ dma_async_issue_pending(chan);
+
+ goto err_unlock;
+
+err_free_transfer:
+ kfree(transfer);
+err_unmap:
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
struct epf_dma_filter {
struct device *dev;
u32 dma_mask;
@@ -435,6 +646,7 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
struct device *dev = &epf_mhi->epf->dev;
struct epf_dma_filter filter;
dma_cap_mask_t mask;
+ int ret;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -453,16 +665,35 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
&filter);
if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
dev_err(dev, "Failed to request rx channel\n");
- dma_release_channel(epf_mhi->dma_chan_tx);
- epf_mhi->dma_chan_tx = NULL;
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_release_tx;
+ }
+
+ epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0);
+ if (!epf_mhi->dma_wq) {
+ ret = -ENOMEM;
+ goto err_release_rx;
}
+ INIT_LIST_HEAD(&epf_mhi->dma_list);
+ INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker);
+ spin_lock_init(&epf_mhi->list_lock);
+
return 0;
+
+err_release_rx:
+ dma_release_channel(epf_mhi->dma_chan_rx);
+ epf_mhi->dma_chan_rx = NULL;
+err_release_tx:
+ dma_release_channel(epf_mhi->dma_chan_tx);
+ epf_mhi->dma_chan_tx = NULL;
+
+ return ret;
}
static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
{
+ destroy_workqueue(epf_mhi->dma_wq);
dma_release_channel(epf_mhi->dma_chan_tx);
dma_release_channel(epf_mhi->dma_chan_rx);
epf_mhi->dma_chan_tx = NULL;
@@ -535,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
+ mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
+ mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
if (info->flags & MHI_EPF_USE_DMA) {
- mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
- mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
- } else {
- mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
- mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
+ mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
+ mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
+ mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
+ mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
}
/* Register the MHI EP controller */
@@ -648,7 +880,7 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
}
-static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
+static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
.core_init = pci_epf_mhi_core_init,
.link_up = pci_epf_mhi_link_up,
.link_down = pci_epf_mhi_link_down,
@@ -686,7 +918,7 @@ static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
{},
};
-static struct pci_epf_ops pci_epf_mhi_ops = {
+static const struct pci_epf_ops pci_epf_mhi_ops = {
.unbind = pci_epf_mhi_unbind,
.bind = pci_epf_mhi_bind,
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 9aac2c6f3..055394600 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -140,9 +140,9 @@ static struct pci_epf_header epf_ntb_header = {
static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
{
enum pci_epc_interface_type type;
- enum pci_epc_irq_type irq_type;
struct epf_ntb_epc *ntb_epc;
struct epf_ntb_ctrl *ctrl;
+ unsigned int irq_type;
struct pci_epc *epc;
u8 func_no, vfunc_no;
bool is_msix;
@@ -159,7 +159,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
ctrl->link_status |= LINK_STATUS_UP;
else
ctrl->link_status &= ~LINK_STATUS_UP;
- irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
+ irq_type = is_msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
if (ret) {
dev_err(&epc->dev,
@@ -2099,7 +2099,7 @@ static int epf_ntb_probe(struct pci_epf *epf,
return 0;
}
-static struct pci_epf_ops epf_ntb_ops = {
+static const struct pci_epf_ops epf_ntb_ops = {
.bind = epf_ntb_bind,
.unbind = epf_ntb_unbind,
.add_cfs = epf_ntb_add_cfs,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 1f0d2b842..18c80002d 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -19,11 +19,11 @@
#include <linux/pci-epf.h>
#include <linux/pci_regs.h>
-#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_INTX 0
#define IRQ_TYPE_MSI 1
#define IRQ_TYPE_MSIX 2
-#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_INTX_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
#define COMMAND_READ BIT(3)
@@ -600,9 +600,9 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
WRITE_ONCE(reg->status, status);
switch (reg->irq_type) {
- case IRQ_TYPE_LEGACY:
+ case IRQ_TYPE_INTX:
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_EPC_IRQ_LEGACY, 0);
+ PCI_IRQ_INTX, 0);
break;
case IRQ_TYPE_MSI:
count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
@@ -612,7 +612,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_EPC_IRQ_MSI, reg->irq_number);
+ PCI_IRQ_MSI, reg->irq_number);
break;
case IRQ_TYPE_MSIX:
count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
@@ -622,7 +622,7 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_EPC_IRQ_MSIX, reg->irq_number);
+ PCI_IRQ_MSIX, reg->irq_number);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -659,7 +659,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
}
switch (command) {
- case COMMAND_RAISE_LEGACY_IRQ:
+ case COMMAND_RAISE_INTX_IRQ:
case COMMAND_RAISE_MSI_IRQ:
case COMMAND_RAISE_MSIX_IRQ:
pci_epf_test_raise_irq(epf_test, reg);
@@ -973,7 +973,7 @@ static int pci_epf_test_probe(struct pci_epf *epf,
return 0;
}
-static struct pci_epf_ops ops = {
+static const struct pci_epf_ops ops = {
.unbind = pci_epf_test_unbind,
.bind = pci_epf_test_bind,
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 2b7bc5a73..5b84821c0 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -1172,11 +1172,8 @@ static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
func_no = ntb->epf->func_no;
vfunc_no = ntb->epf->vfunc_no;
- ret = pci_epc_raise_irq(ntb->epf->epc,
- func_no,
- vfunc_no,
- PCI_EPC_IRQ_MSI,
- interrupt_num + 1);
+ ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no,
+ PCI_IRQ_MSI, interrupt_num + 1);
if (ret)
dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
@@ -1383,7 +1380,7 @@ static void epf_ntb_unbind(struct pci_epf *epf)
}
// EPF driver probe
-static struct pci_epf_ops epf_ntb_ops = {
+static const struct pci_epf_ops epf_ntb_ops = {
.bind = epf_ntb_bind,
.unbind = epf_ntb_unbind,
.add_cfs = epf_ntb_add_cfs,
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 56e1184bc..dcd4e6643 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -211,13 +211,13 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
* @epc: the EPC device which has to interrupt the host
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @type: specify the type of interrupt; legacy, MSI or MSI-X
+ * @type: specify the type of interrupt; INTX, MSI or MSI-X
* @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
*
- * Invoke to raise an legacy, MSI or MSI-X interrupt
+ * Invoke to raise an INTX, MSI or MSI-X interrupt
*/
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
int ret;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 25dbe85c4..aaa33e8dc 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -745,6 +745,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
u16 ctrl, total;
struct pci_sriov *iov;
struct resource *res;
+ const char *res_name;
struct pci_dev *pdev;
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
@@ -785,6 +786,8 @@ found:
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
+ res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES);
+
/*
* If it is already FIXED, don't change it, something
* (perhaps EA or header fixups) wants it this way.
@@ -802,8 +805,8 @@ found:
}
iov->barsz[i] = resource_size(res);
res->end = res->start + resource_size(res) * total - 1;
- pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
- i, res, i, total);
+ pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
+ res_name, res, i, total);
i += bar64;
nres++;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e62de9730..c3585229c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -851,6 +851,66 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
EXPORT_SYMBOL(pci_find_resource);
/**
+ * pci_resource_name - Return the name of the PCI resource
+ * @dev: PCI device to query
+ * @i: index of the resource
+ *
+ * Return the standard PCI resource (BAR) name according to their index.
+ */
+const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
+{
+ static const char * const bar_name[] = {
+ "BAR 0",
+ "BAR 1",
+ "BAR 2",
+ "BAR 3",
+ "BAR 4",
+ "BAR 5",
+ "ROM",
+#ifdef CONFIG_PCI_IOV
+ "VF BAR 0",
+ "VF BAR 1",
+ "VF BAR 2",
+ "VF BAR 3",
+ "VF BAR 4",
+ "VF BAR 5",
+#endif
+ "bridge window", /* "io" included in %pR */
+ "bridge window", /* "mem" included in %pR */
+ "bridge window", /* "mem pref" included in %pR */
+ };
+ static const char * const cardbus_name[] = {
+ "BAR 1",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+#ifdef CONFIG_PCI_IOV
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+#endif
+ "CardBus bridge window 0", /* I/O */
+ "CardBus bridge window 1", /* I/O */
+ "CardBus bridge window 0", /* mem */
+ "CardBus bridge window 1", /* mem */
+ };
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
+ i < ARRAY_SIZE(cardbus_name))
+ return cardbus_name[i];
+
+ if (i < ARRAY_SIZE(bar_name))
+ return bar_name[i];
+
+ return "unknown";
+}
+
+/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
* @pos: config space offset of status word
@@ -1219,6 +1279,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
if (delay > PCI_RESET_WAIT)
pci_info(dev, "ready %dms after %s\n", delay - 1,
reset_type);
+ else
+ pci_dbg(dev, "ready %dms after %s\n", delay - 1,
+ reset_type);
return 0;
}
@@ -3329,6 +3392,7 @@ static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
static int pci_ea_read(struct pci_dev *dev, int offset)
{
struct resource *res;
+ const char *res_name;
int ent_size, ent_offset = offset;
resource_size_t start, end;
unsigned long flags;
@@ -3358,6 +3422,7 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
goto out;
res = pci_ea_get_resource(dev, bei, prop);
+ res_name = pci_resource_name(dev, bei);
if (!res) {
pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
goto out;
@@ -3431,16 +3496,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
res->flags = flags;
if (bei <= PCI_EA_BEI_BAR5)
- pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
- bei, res, prop);
+ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+ res_name, res, prop);
else if (bei == PCI_EA_BEI_ROM)
- pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
- res, prop);
+ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+ res_name, res, prop);
else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
- pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
- bei - PCI_EA_BEI_VF_BAR0, res, prop);
+ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+ res_name, res, prop);
else
- pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
bei, res, prop);
out:
@@ -6263,6 +6328,41 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
}
EXPORT_SYMBOL(pcie_set_mps);
+static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
+{
+ return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
+}
+
+int pcie_link_speed_mbps(struct pci_dev *pdev)
+{
+ u16 lnksta;
+ int err;
+
+ err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
+ if (err)
+ return err;
+
+ switch (to_pcie_link_speed(lnksta)) {
+ case PCIE_SPEED_2_5GT:
+ return 2500;
+ case PCIE_SPEED_5_0GT:
+ return 5000;
+ case PCIE_SPEED_8_0GT:
+ return 8000;
+ case PCIE_SPEED_16_0GT:
+ return 16000;
+ case PCIE_SPEED_32_0GT:
+ return 32000;
+ case PCIE_SPEED_64_0GT:
+ return 64000;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(pcie_link_speed_mbps);
+
/**
* pcie_bandwidth_available - determine minimum link settings of a PCIe
* device and its bandwidth limitation
@@ -6296,8 +6396,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
while (dev) {
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
- next_speed = pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS,
- lnksta)];
+ next_speed = to_pcie_link_speed(lnksta);
next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
@@ -6728,14 +6827,15 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
resource_size_t align, bool resize)
{
struct resource *r = &dev->resource[bar];
+ const char *r_name = pci_resource_name(dev, bar);
resource_size_t size;
if (!(r->flags & IORESOURCE_MEM))
return;
if (r->flags & IORESOURCE_PCI_FIXED) {
- pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
- bar, r, (unsigned long long)align);
+ pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
+ r_name, r, (unsigned long long)align);
return;
}
@@ -6771,8 +6871,8 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
* devices and we use the second.
*/
- pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
- bar, r, (unsigned long long)align);
+ pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
+ r_name, r, (unsigned long long)align);
if (resize) {
r->start = 0;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 1697cb828..bfc56f7be 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -255,6 +255,8 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *fail_head);
bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
+const char *pci_resource_name(struct pci_dev *dev, unsigned int i);
+
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
struct pci_bus *pci_bus_get(struct pci_bus *bus);
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 38e334677..05fc30bb5 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -41,8 +41,8 @@
#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
- unsigned int status;
- unsigned int id;
+ u32 status; /* PCI_ERR_ROOT_STATUS */
+ u32 id; /* PCI_ERR_ROOT_ERR_SRC */
};
struct aer_rpc {
@@ -435,10 +435,10 @@ void pci_aer_exit(struct pci_dev *dev)
/*
* AER error strings
*/
-static const char *aer_error_severity_string[] = {
- "Uncorrected (Non-Fatal)",
- "Uncorrected (Fatal)",
- "Corrected"
+static const char * const aer_error_severity_string[] = {
+ "Uncorrectable (Non-Fatal)",
+ "Uncorrectable (Fatal)",
+ "Correctable"
};
static const char *aer_error_layer[] = {
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index eb2c77d52..bc0bd8669 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -426,17 +426,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
}
}
-static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
- u32 clear, u32 set)
-{
- u32 val;
-
- pci_read_config_dword(pdev, pos, &val);
- val &= ~clear;
- val |= set;
- pci_write_config_dword(pdev, pos, val);
-}
-
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l12_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -501,10 +490,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(child,
+ child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(parent,
+ parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
}
/* Program T_POWER_ON times in both ports */
@@ -512,22 +503,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
/* Program Common_Mode_Restore_Time in upstream device */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
/* Program LTR_L1.2_THRESHOLD time in both ports */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ ctl1);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ ctl1);
if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
- pl1_2_enables);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
- cl1_2_enables);
+ pci_clear_and_set_config_dword(parent,
+ parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_config_dword(child,
+ child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
}
}
@@ -687,10 +682,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
*/
/* Disable all L1 substates */
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, 0);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, 0);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, 0);
/*
* If needed, disable L1, and it gets enabled later
* in pcie_config_aspm_link().
@@ -713,10 +708,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/* Enable what we need to enable */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, val);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, val);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, val);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ed6b7f487..b7335be56 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -180,6 +180,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
u64 l64, sz64, mask64;
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
+ const char *res_name = pci_resource_name(dev, res - dev->resource);
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
@@ -254,8 +255,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
sz64 = pci_size(l64, sz64, mask64);
if (!sz64) {
- pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
- pos);
+ pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name);
goto fail;
}
@@ -265,8 +265,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
res->start = 0;
res->end = 0;
- pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
- pos, (unsigned long long)sz64);
+ pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
+ res_name, (unsigned long long)sz64);
goto out;
}
@@ -275,8 +275,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_UNSET;
res->start = 0;
res->end = sz64 - 1;
- pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
- pos, (unsigned long long)l64);
+ pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
+ res_name, (unsigned long long)l64);
goto out;
}
}
@@ -302,8 +302,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_UNSET;
res->start = 0;
res->end = region.end - region.start;
- pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
- pos, (unsigned long long)region.start);
+ pci_info(dev, "%s: initial BAR value %#010llx invalid\n",
+ res_name, (unsigned long long)region.start);
}
goto out;
@@ -313,7 +313,7 @@ fail:
res->flags = 0;
out:
if (res->flags)
- pci_info(dev, "reg 0x%x: %pR\n", pos, res);
+ pci_info(dev, "%s %pR\n", res_name, res);
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
@@ -344,64 +344,12 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
}
}
-static void pci_read_bridge_windows(struct pci_dev *bridge)
+static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res,
+ bool log)
{
- u16 io;
- u32 pmem, tmp;
-
- pci_read_config_word(bridge, PCI_IO_BASE, &io);
- if (!io) {
- pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
- pci_read_config_word(bridge, PCI_IO_BASE, &io);
- pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
- }
- if (io)
- bridge->io_window = 1;
-
- /*
- * DECchip 21050 pass 2 errata: the bridge may miss an address
- * disconnect boundary by one PCI data phase. Workaround: do not
- * use prefetching on this device.
- */
- if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
- return;
-
- pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
- if (!pmem) {
- pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
- 0xffe0fff0);
- pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
- pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
- }
- if (!pmem)
- return;
-
- bridge->pref_window = 1;
-
- if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
-
- /*
- * Bridge claims to have a 64-bit prefetchable memory
- * window; verify that the upper bits are actually
- * writable.
- */
- pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
- 0xffffffff);
- pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
- if (tmp)
- bridge->pref_64_window = 1;
- }
-}
-
-static void pci_read_bridge_io(struct pci_bus *child)
-{
- struct pci_dev *dev = child->self;
u8 io_base_lo, io_limit_lo;
unsigned long io_mask, io_granularity, base, limit;
struct pci_bus_region region;
- struct resource *res;
io_mask = PCI_IO_RANGE_MASK;
io_granularity = 0x1000;
@@ -411,7 +359,6 @@ static void pci_read_bridge_io(struct pci_bus *child)
io_granularity = 0x400;
}
- res = child->resource[0];
pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
base = (io_base_lo & io_mask) << 8;
@@ -431,19 +378,18 @@ static void pci_read_bridge_io(struct pci_bus *child)
region.start = base;
region.end = limit + io_granularity - 1;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, " bridge window %pR\n", res);
+ if (log)
+ pci_info(dev, " bridge window %pR\n", res);
}
}
-static void pci_read_bridge_mmio(struct pci_bus *child)
+static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res,
+ bool log)
{
- struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
unsigned long base, limit;
struct pci_bus_region region;
- struct resource *res;
- res = child->resource[1];
pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
@@ -453,20 +399,19 @@ static void pci_read_bridge_mmio(struct pci_bus *child)
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, " bridge window %pR\n", res);
+ if (log)
+ pci_info(dev, " bridge window %pR\n", res);
}
}
-static void pci_read_bridge_mmio_pref(struct pci_bus *child)
+static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res,
+ bool log)
{
- struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
u64 base64, limit64;
pci_bus_addr_t base, limit;
struct pci_bus_region region;
- struct resource *res;
- res = child->resource[2];
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
@@ -506,10 +451,77 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, " bridge window %pR\n", res);
+ if (log)
+ pci_info(dev, " bridge window %pR\n", res);
}
}
+static void pci_read_bridge_windows(struct pci_dev *bridge)
+{
+ u32 buses;
+ u16 io;
+ u32 pmem, tmp;
+ struct resource res;
+
+ pci_read_config_dword(bridge, PCI_PRIMARY_BUS, &buses);
+ res.flags = IORESOURCE_BUS;
+ res.start = (buses >> 8) & 0xff;
+ res.end = (buses >> 16) & 0xff;
+ pci_info(bridge, "PCI bridge to %pR%s\n", &res,
+ bridge->transparent ? " (subtractive decode)" : "");
+
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ if (!io) {
+ pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
+ }
+ if (io) {
+ bridge->io_window = 1;
+ pci_read_bridge_io(bridge, &res, true);
+ }
+
+ pci_read_bridge_mmio(bridge, &res, true);
+
+ /*
+ * DECchip 21050 pass 2 errata: the bridge may miss an address
+ * disconnect boundary by one PCI data phase. Workaround: do not
+ * use prefetching on this device.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
+ return;
+
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ if (!pmem) {
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
+ 0xffe0fff0);
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
+ }
+ if (!pmem)
+ return;
+
+ bridge->pref_window = 1;
+
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+
+ /*
+ * Bridge claims to have a 64-bit prefetchable memory
+ * window; verify that the upper bits are actually
+ * writable.
+ */
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
+ if (tmp)
+ bridge->pref_64_window = 1;
+ }
+
+ pci_read_bridge_mmio_pref(bridge, &res, true);
+}
+
void pci_read_bridge_bases(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
@@ -527,9 +539,9 @@ void pci_read_bridge_bases(struct pci_bus *child)
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
- pci_read_bridge_io(child);
- pci_read_bridge_mmio(child);
- pci_read_bridge_mmio_pref(child);
+ pci_read_bridge_io(child->self, child->resource[0], false);
+ pci_read_bridge_mmio(child->self, child->resource[1], false);
+ pci_read_bridge_mmio_pref(child->self, child->resource[2], false);
if (dev->transparent) {
pci_bus_for_each_resource(child->parent, res) {
@@ -1817,6 +1829,43 @@ static void early_dump_pci_device(struct pci_dev *pdev)
value, 256, false);
}
+static const char *pci_type_str(struct pci_dev *dev)
+{
+ static const char * const str[] = {
+ "PCIe Endpoint",
+ "PCIe Legacy Endpoint",
+ "PCIe unknown",
+ "PCIe unknown",
+ "PCIe Root Port",
+ "PCIe Switch Upstream Port",
+ "PCIe Switch Downstream Port",
+ "PCIe to PCI/PCI-X bridge",
+ "PCI/PCI-X to PCIe bridge",
+ "PCIe Root Complex Integrated Endpoint",
+ "PCIe Root Complex Event Collector",
+ };
+ int type;
+
+ if (pci_is_pcie(dev)) {
+ type = pci_pcie_type(dev);
+ if (type < ARRAY_SIZE(str))
+ return str[type];
+
+ return "PCIe unknown";
+ }
+
+ switch (dev->hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ return "conventional PCI endpoint";
+ case PCI_HEADER_TYPE_BRIDGE:
+ return "conventional PCI bridge";
+ case PCI_HEADER_TYPE_CARDBUS:
+ return "CardBus bridge";
+ default:
+ return "conventional PCI";
+ }
+}
+
/**
* pci_setup_device - Fill in class and map information of a device
* @dev: the device structure to fill
@@ -1887,8 +1936,9 @@ int pci_setup_device(struct pci_dev *dev)
pci_set_removable(dev);
- pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
- dev->vendor, dev->device, dev->hdr_type, dev->class);
+ pci_info(dev, "[%04x:%04x] type %02x class %#08x %s\n",
+ dev->vendor, dev->device, dev->hdr_type, dev->class,
+ pci_type_str(dev));
/* Device class may be changed after fixup */
class = dev->class >> 8;
@@ -1929,14 +1979,14 @@ int pci_setup_device(struct pci_dev *dev)
res = &dev->resource[0];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n",
+ pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n",
res);
region.start = 0x3F6;
region.end = 0x3F6;
res = &dev->resource[1];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n",
+ pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n",
res);
}
if ((progif & 4) == 0) {
@@ -1945,14 +1995,14 @@ int pci_setup_device(struct pci_dev *dev)
res = &dev->resource[2];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n",
+ pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n",
res);
region.start = 0x376;
region.end = 0x376;
res = &dev->resource[3];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n",
+ pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n",
res);
}
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fa770601c..eff7f5df0 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -570,13 +570,14 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *r = &dev->resource[i];
+ const char *r_name = pci_resource_name(dev, i);
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
r->end = PAGE_SIZE - 1;
r->start = 0;
r->flags |= IORESOURCE_UNSET;
- pci_info(dev, "expanded BAR %d to page size: %pR\n",
- i, r);
+ pci_info(dev, "%s %pR: expanded to page size\n",
+ r_name, r);
}
}
}
@@ -605,6 +606,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
u32 region;
struct pci_bus_region bus_region;
struct resource *res = dev->resource + pos;
+ const char *res_name = pci_resource_name(dev, pos);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
@@ -622,8 +624,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
bus_region.end = region + size - 1;
pcibios_bus_to_resource(dev->bus, res, &bus_region);
- pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
- name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+ pci_info(dev, FW_BUG "%s %pR: %s quirk\n", res_name, res, name);
}
/*
@@ -670,6 +671,12 @@ static void quirk_io_region(struct pci_dev *dev, int port,
bus_region.end = region + size - 1;
pcibios_bus_to_resource(dev->bus, res, &bus_region);
+ /*
+ * "res" is typically a bridge window resource that's not being
+ * used for a bridge window, so it's just a place to stash this
+ * non-standard resource. Printing "nr" or pci_resource_name() of
+ * it doesn't really make sense.
+ */
if (!pci_claim_resource(dev, nr))
pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index fd74f1c99..909e6a7c3 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -213,6 +213,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
struct list_head *head)
{
struct resource *res;
+ const char *res_name;
struct pci_dev_resource *add_res, *tmp;
struct pci_dev_resource *dev_res;
resource_size_t add_size, align;
@@ -222,6 +223,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
bool found_match = false;
res = add_res->res;
+
/* Skip resource that has been reset */
if (!res->flags)
goto out;
@@ -237,6 +239,7 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
continue;
idx = res - &add_res->dev->resource[0];
+ res_name = pci_resource_name(add_res->dev, idx);
add_size = add_res->add_size;
align = add_res->min_align;
if (!resource_size(res)) {
@@ -249,9 +252,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
(IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
if (pci_reassign_resource(add_res->dev, idx,
add_size, align))
- pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
- (unsigned long long) add_size, idx,
- res);
+ pci_info(add_res->dev, "%s %pR: failed to add %llx\n",
+ res_name, res,
+ (unsigned long long) add_size);
}
out:
list_del(&add_res->list);
@@ -571,6 +574,7 @@ EXPORT_SYMBOL(pci_setup_cardbus);
static void pci_setup_bridge_io(struct pci_dev *bridge)
{
struct resource *res;
+ const char *res_name;
struct pci_bus_region region;
unsigned long io_mask;
u8 io_base_lo, io_limit_lo;
@@ -583,6 +587,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
/* Set up the top and bottom of the PCI I/O segment for this bus */
res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -591,7 +596,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
l = ((u16) io_limit_lo << 8) | io_base_lo;
/* Set up upper 16 bits of I/O base/limit */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
- pci_info(bridge, " bridge window %pR\n", res);
+ pci_info(bridge, " %s %pR\n", res_name, res);
} else {
/* Clear upper 16 bits of I/O base/limit */
io_upper16 = 0;
@@ -608,16 +613,18 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
static void pci_setup_bridge_mmio(struct pci_dev *bridge)
{
struct resource *res;
+ const char *res_name;
struct pci_bus_region region;
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus */
res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+ res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- pci_info(bridge, " bridge window %pR\n", res);
+ pci_info(bridge, " %s %pR\n", res_name, res);
} else {
l = 0x0000fff0;
}
@@ -627,6 +634,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
{
struct resource *res;
+ const char *res_name;
struct pci_bus_region region;
u32 l, bu, lu;
@@ -640,6 +648,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
/* Set up PREF base/limit */
bu = lu = 0;
res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
@@ -648,7 +657,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
bu = upper_32_bits(region.start);
lu = upper_32_bits(region.end);
}
- pci_info(bridge, " bridge window %pR\n", res);
+ pci_info(bridge, " %s %pR\n", res_name, res);
} else {
l = 0x0000fff0;
}
@@ -1013,6 +1022,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
int i;
pci_dev_for_each_resource(dev, r, i) {
+ const char *r_name = pci_resource_name(dev, i);
resource_size_t r_size;
if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
@@ -1043,8 +1053,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (order < 0)
order = 0;
if (order >= ARRAY_SIZE(aligns)) {
- pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
- i, r, (unsigned long long) align);
+ pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n",
+ r_name, r, (unsigned long long) align);
r->flags = 0;
continue;
}
@@ -2235,6 +2245,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
i++) {
struct resource *res = &bridge->resource[i];
+ const char *res_name = pci_resource_name(bridge, i);
if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
continue;
@@ -2247,8 +2258,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
if (ret)
goto cleanup;
- pci_info(bridge, "BAR %d: releasing %pR\n",
- i, res);
+ pci_info(bridge, "%s %pR: releasing\n", res_name, res);
if (res->parent)
release_resource(res);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index ceaa69491..c6d933ddf 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -30,6 +30,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
u32 new, check, mask;
int reg;
struct resource *res = dev->resource + resno;
+ const char *res_name = pci_resource_name(dev, resno);
/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
if (dev->is_virtfn)
@@ -104,8 +105,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
pci_read_config_dword(dev, reg, &check);
if ((new ^ check) & mask) {
- pci_err(dev, "BAR %d: error updating (%#010x != %#010x)\n",
- resno, new, check);
+ pci_err(dev, "%s: error updating (%#010x != %#010x)\n",
+ res_name, new, check);
}
if (res->flags & IORESOURCE_MEM_64) {
@@ -113,8 +114,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
pci_write_config_dword(dev, reg + 4, new);
pci_read_config_dword(dev, reg + 4, &check);
if (check != new) {
- pci_err(dev, "BAR %d: error updating (high %#010x != %#010x)\n",
- resno, new, check);
+ pci_err(dev, "%s: error updating (high %#010x != %#010x)\n",
+ res_name, new, check);
}
}
@@ -135,11 +136,12 @@ void pci_update_resource(struct pci_dev *dev, int resno)
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
+ const char *res_name = pci_resource_name(dev, resource);
struct resource *root, *conflict;
if (res->flags & IORESOURCE_UNSET) {
- pci_info(dev, "can't claim BAR %d %pR: no address assigned\n",
- resource, res);
+ pci_info(dev, "%s %pR: can't claim; no address assigned\n",
+ res_name, res);
return -EINVAL;
}
@@ -153,16 +155,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
root = pci_find_parent_resource(dev, res);
if (!root) {
- pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n",
- resource, res);
+ pci_info(dev, "%s %pR: can't claim; no compatible bridge window\n",
+ res_name, res);
res->flags |= IORESOURCE_UNSET;
return -EINVAL;
}
conflict = request_resource_conflict(root, res);
if (conflict) {
- pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
- resource, res, conflict->name, conflict);
+ pci_info(dev, "%s %pR: can't claim; address conflict with %s %pR\n",
+ res_name, res, conflict->name, conflict);
res->flags |= IORESOURCE_UNSET;
return -EBUSY;
}
@@ -201,6 +203,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
{
struct resource *root, *conflict;
resource_size_t fw_addr, start, end;
+ const char *res_name = pci_resource_name(dev, resno);
fw_addr = pcibios_retrieve_fw_addr(dev, resno);
if (!fw_addr)
@@ -231,12 +234,11 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
root = &iomem_resource;
}
- pci_info(dev, "BAR %d: trying firmware assignment %pR\n",
- resno, res);
+ pci_info(dev, "%s: trying firmware assignment %pR\n", res_name, res);
conflict = request_resource_conflict(root, res);
if (conflict) {
- pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n",
- resno, res, conflict->name, conflict);
+ pci_info(dev, "%s %pR: conflicts with %s %pR\n", res_name, res,
+ conflict->name, conflict);
res->start = start;
res->end = end;
res->flags |= IORESOURCE_UNSET;
@@ -325,6 +327,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
int pci_assign_resource(struct pci_dev *dev, int resno)
{
struct resource *res = dev->resource + resno;
+ const char *res_name = pci_resource_name(dev, resno);
resource_size_t align, size;
int ret;
@@ -334,8 +337,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
res->flags |= IORESOURCE_UNSET;
align = pci_resource_alignment(dev, res);
if (!align) {
- pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n",
- resno, res);
+ pci_info(dev, "%s %pR: can't assign; bogus alignment\n",
+ res_name, res);
return -EINVAL;
}
@@ -348,18 +351,18 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
* working, which is better than just leaving it disabled.
*/
if (ret < 0) {
- pci_info(dev, "BAR %d: no space for %pR\n", resno, res);
+ pci_info(dev, "%s %pR: can't assign; no space\n", res_name, res);
ret = pci_revert_fw_address(res, dev, resno, size);
}
if (ret < 0) {
- pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res);
+ pci_info(dev, "%s %pR: failed to assign\n", res_name, res);
return ret;
}
res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
- pci_info(dev, "BAR %d: assigned %pR\n", resno, res);
+ pci_info(dev, "%s %pR: assigned\n", res_name, res);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -367,10 +370,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
}
EXPORT_SYMBOL(pci_assign_resource);
-int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
- resource_size_t min_align)
+int pci_reassign_resource(struct pci_dev *dev, int resno,
+ resource_size_t addsize, resource_size_t min_align)
{
struct resource *res = dev->resource + resno;
+ const char *res_name = pci_resource_name(dev, resno);
unsigned long flags;
resource_size_t new_size;
int ret;
@@ -381,8 +385,8 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
flags = res->flags;
res->flags |= IORESOURCE_UNSET;
if (!res->parent) {
- pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n",
- resno, res);
+ pci_info(dev, "%s %pR: can't reassign; unassigned resource\n",
+ res_name, res);
return -EINVAL;
}
@@ -391,15 +395,15 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (ret) {
res->flags = flags;
- pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n",
- resno, res, (unsigned long long) addsize);
+ pci_info(dev, "%s %pR: failed to expand by %#llx\n",
+ res_name, res, (unsigned long long) addsize);
return ret;
}
res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
- pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
- resno, res, (unsigned long long) addsize);
+ pci_info(dev, "%s %pR: reassigned; expanded by %#llx\n",
+ res_name, res, (unsigned long long) addsize);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -409,8 +413,9 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
void pci_release_resource(struct pci_dev *dev, int resno)
{
struct resource *res = dev->resource + resno;
+ const char *res_name = pci_resource_name(dev, resno);
- pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
+ pci_info(dev, "%s %pR: releasing\n", res_name, res);
if (!res->parent)
return;
@@ -480,6 +485,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
u16 cmd, old_cmd;
int i;
struct resource *r;
+ const char *r_name;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
@@ -488,6 +494,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
if (!(mask & (1 << i)))
continue;
+ r_name = pci_resource_name(dev, i);
+
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
if ((i == PCI_ROM_RESOURCE) &&
@@ -495,14 +503,14 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
continue;
if (r->flags & IORESOURCE_UNSET) {
- pci_err(dev, "can't enable device: BAR %d %pR not assigned\n",
- i, r);
+ pci_err(dev, "%s %pR: not assigned; can't enable device\n",
+ r_name, r);
return -EINVAL;
}
if (!r->parent) {
- pci_err(dev, "can't enable device: BAR %d %pR not claimed\n",
- i, r);
+ pci_err(dev, "%s %pR: not claimed; can't enable device\n",
+ r_name, r);
return -EINVAL;
}
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index dd3c26099..a54144418 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -437,7 +437,7 @@ err:
return ret;
}
-static int bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
+static void bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
{
struct bcm63xx_pcmcia_socket *skt;
struct resource *res;
@@ -449,12 +449,11 @@ static int bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
res = skt->reg_res;
release_mem_region(res->start, resource_size(res));
kfree(skt);
- return 0;
}
struct platform_driver bcm63xx_pcmcia_driver = {
.probe = bcm63xx_drv_pcmcia_probe,
- .remove = bcm63xx_drv_pcmcia_remove,
+ .remove_new = bcm63xx_drv_pcmcia_remove,
.driver = {
.name = "bcm63xx_pcmcia",
.owner = THIS_MODULE,
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 87a33ecc2..509713b9a 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -577,7 +577,7 @@ out0:
return ret;
}
-static int db1x_pcmcia_socket_remove(struct platform_device *pdev)
+static void db1x_pcmcia_socket_remove(struct platform_device *pdev)
{
struct db1x_pcmcia_sock *sock = platform_get_drvdata(pdev);
@@ -585,8 +585,6 @@ static int db1x_pcmcia_socket_remove(struct platform_device *pdev)
pcmcia_unregister_socket(&sock->socket);
iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
kfree(sock);
-
- return 0;
}
static struct platform_driver db1x_pcmcia_socket_driver = {
@@ -594,7 +592,7 @@ static struct platform_driver db1x_pcmcia_socket_driver = {
.name = "db1xxx_pcmcia",
},
.probe = db1x_pcmcia_socket_probe,
- .remove = db1x_pcmcia_socket_remove,
+ .remove_new = db1x_pcmcia_socket_remove,
};
module_platform_driver(db1x_pcmcia_socket_driver);
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index efc27bc15..5ae826e54 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -307,7 +307,7 @@ out_free_cf:
}
-static int electra_cf_remove(struct platform_device *ofdev)
+static void electra_cf_remove(struct platform_device *ofdev)
{
struct device *device = &ofdev->dev;
struct electra_cf_socket *cf;
@@ -326,8 +326,6 @@ static int electra_cf_remove(struct platform_device *ofdev)
release_region(cf->io_base, cf->io_size);
kfree(cf);
-
- return 0;
}
static const struct of_device_id electra_cf_match[] = {
@@ -344,7 +342,7 @@ static struct platform_driver electra_cf_driver = {
.of_match_table = electra_cf_match,
},
.probe = electra_cf_probe,
- .remove = electra_cf_remove,
+ .remove_new = electra_cf_remove,
};
module_platform_driver(electra_cf_driver);
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index e613818dc..80137c7af 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -290,7 +290,7 @@ fail0:
return status;
}
-static int __exit omap_cf_remove(struct platform_device *pdev)
+static void __exit omap_cf_remove(struct platform_device *pdev)
{
struct omap_cf_socket *cf = platform_get_drvdata(pdev);
@@ -300,14 +300,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
release_mem_region(cf->phys_cf, SZ_8K);
free_irq(cf->irq, cf);
kfree(cf);
- return 0;
}
static struct platform_driver omap_cf_driver = {
.driver = {
.name = driver_name,
},
- .remove = __exit_p(omap_cf_remove),
+ .remove_new = __exit_p(omap_cf_remove),
};
static int __init omap_cf_init(void)
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 525402835..457fb81b4 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -313,15 +313,13 @@ err0:
return ret;
}
-static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
+static void pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
{
struct skt_dev_info *sinfo = platform_get_drvdata(dev);
int i;
for (i = 0; i < sinfo->nskt; i++)
soc_pcmcia_remove_one(&sinfo->skt[i]);
-
- return 0;
}
static int pxa2xx_drv_pcmcia_resume(struct device *dev)
@@ -338,7 +336,7 @@ static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
static struct platform_driver pxa2xx_pcmcia_driver = {
.probe = pxa2xx_drv_pcmcia_probe,
- .remove = pxa2xx_drv_pcmcia_remove,
+ .remove_new = pxa2xx_drv_pcmcia_remove,
.driver = {
.name = "pxa2xx-pcmcia",
.pm = &pxa2xx_drv_pcmcia_pm_ops,
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 89d4ba58c..ccb219c38 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -158,20 +158,18 @@ static int sa11x0_drv_pcmcia_probe(struct platform_device *pdev)
return sa11xx_drv_pcmcia_add_one(skt);
}
-static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
+static void sa11x0_drv_pcmcia_remove(struct platform_device *dev)
{
struct soc_pcmcia_socket *skt;
if (dev->id == -1) {
sa11x0_drv_pcmcia_legacy_remove(dev);
- return 0;
+ return;
}
skt = platform_get_drvdata(dev);
soc_pcmcia_remove_one(skt);
-
- return 0;
}
static struct platform_driver sa11x0_pcmcia_driver = {
@@ -179,7 +177,7 @@ static struct platform_driver sa11x0_pcmcia_driver = {
.name = "sa11x0-pcmcia",
},
.probe = sa11x0_drv_pcmcia_probe,
- .remove = sa11x0_drv_pcmcia_remove,
+ .remove_new = sa11x0_drv_pcmcia_remove,
};
/* sa11x0_pcmcia_init()
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index b11c7abb1..2a93fbbd1 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -301,7 +301,7 @@ out0:
return ret;
}
-static int xxs1500_pcmcia_remove(struct platform_device *pdev)
+static void xxs1500_pcmcia_remove(struct platform_device *pdev)
{
struct xxs1500_pcmcia_sock *sock = platform_get_drvdata(pdev);
@@ -309,8 +309,6 @@ static int xxs1500_pcmcia_remove(struct platform_device *pdev)
free_irq(gpio_to_irq(GPIO_CDA), sock);
iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
kfree(sock);
-
- return 0;
}
static struct platform_driver xxs1500_pcmcia_socket_driver = {
@@ -318,7 +316,7 @@ static struct platform_driver xxs1500_pcmcia_socket_driver = {
.name = "xxs1500_pcmcia",
},
.probe = xxs1500_pcmcia_probe,
- .remove = xxs1500_pcmcia_remove,
+ .remove_new = xxs1500_pcmcia_remove,
};
module_platform_driver(xxs1500_pcmcia_socket_driver);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 273d67ecf..ec6e0d919 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -217,6 +217,13 @@ config MARVELL_CN10K_DDR_PMU
Enable perf support for Marvell DDR Performance monitoring
event on CN10K platform.
+config DWC_PCIE_PMU
+ tristate "Synopsys DesignWare PCIe PMU"
+ depends on PCI
+ help
+ Enable perf support for Synopsys DesignWare PCIe PMU Performance
+ monitoring event on platform including the Alibaba Yitian 710.
+
source "drivers/perf/arm_cspmu/Kconfig"
source "drivers/perf/amlogic/Kconfig"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 16b3ec4db..a06338e34 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o
+obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o
obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/
obj-$(CONFIG_MESON_DDR_PMU) += amlogic/
obj-$(CONFIG_CXL_PMU) += cxl_pmu.o
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index cd2de44b6..f322e5ca1 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -524,8 +524,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
{
unsigned long config_base = 0;
- if (!attr->exclude_guest)
- return -EINVAL;
+ if (!attr->exclude_guest) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
if (!attr->exclude_kernel)
config_base |= M1_PMU_CFG_COUNT_KERNEL;
if (!attr->exclude_user)
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 2cc35dded..50b89b989 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -1108,7 +1108,6 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
{
- u64 acpi_uid;
struct device *cpu_dev;
struct acpi_device *acpi_dev;
@@ -1118,8 +1117,7 @@ static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
acpi_dev = ACPI_COMPANION(cpu_dev);
while (acpi_dev) {
- if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, NULL) &&
- !acpi_dev_uid_to_integer(acpi_dev, &acpi_uid) && acpi_uid == container_uid)
+ if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, container_uid))
return 0;
acpi_dev = acpi_dev_parent(acpi_dev);
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 8223c49bd..7ec4498e3 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -371,7 +371,7 @@ static inline u32 dsu_pmu_get_reset_overflow(void)
return __dsu_pmu_get_reset_overflow();
}
-/**
+/*
* dsu_pmu_set_event_period: Set the period for the counter.
*
* All DSU PMU event counters, except the cycle counter are 32bit
@@ -602,7 +602,7 @@ static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
return dsu_pmu;
}
-/**
+/*
* dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster
* from device tree.
*/
@@ -632,7 +632,7 @@ static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask)
return 0;
}
-/**
+/*
* dsu_pmu_acpi_get_cpus: Get the list of CPUs in the cluster
* from ACPI.
*/
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index d712a19e4..8458fe2ce 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -445,7 +445,7 @@ __hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- int mapping;
+ int mapping, ret;
hwc->flags = 0;
mapping = armpmu->map_event(event);
@@ -470,11 +470,10 @@ __hw_perf_event_init(struct perf_event *event)
/*
* Check whether we need to exclude the counter from certain modes.
*/
- if (armpmu->set_event_filter &&
- armpmu->set_event_filter(hwc, &event->attr)) {
- pr_debug("ARM performance counters do not support "
- "mode exclusion\n");
- return -EOPNOTSUPP;
+ if (armpmu->set_event_filter) {
+ ret = armpmu->set_event_filter(hwc, &event->attr);
+ if (ret)
+ return ret;
}
/*
@@ -893,7 +892,6 @@ struct arm_pmu *armpmu_alloc(void)
struct pmu_hw_events *events;
events = per_cpu_ptr(pmu->hw_events, cpu);
- raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = pmu;
}
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 0e80fdc9f..23fa6c5da 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -15,6 +15,7 @@
#include <clocksource/arm_arch_timer.h>
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
@@ -298,26 +299,66 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = {
.is_visible = armv8pmu_event_attr_is_visible,
};
-PMU_FORMAT_ATTR(event, "config:0-15");
-PMU_FORMAT_ATTR(long, "config1:0");
-PMU_FORMAT_ATTR(rdpmc, "config1:1");
+/* User ABI */
+#define ATTR_CFG_FLD_event_CFG config
+#define ATTR_CFG_FLD_event_LO 0
+#define ATTR_CFG_FLD_event_HI 15
+#define ATTR_CFG_FLD_long_CFG config1
+#define ATTR_CFG_FLD_long_LO 0
+#define ATTR_CFG_FLD_long_HI 0
+#define ATTR_CFG_FLD_rdpmc_CFG config1
+#define ATTR_CFG_FLD_rdpmc_LO 1
+#define ATTR_CFG_FLD_rdpmc_HI 1
+#define ATTR_CFG_FLD_threshold_count_CFG config1 /* PMEVTYPER.TC[0] */
+#define ATTR_CFG_FLD_threshold_count_LO 2
+#define ATTR_CFG_FLD_threshold_count_HI 2
+#define ATTR_CFG_FLD_threshold_compare_CFG config1 /* PMEVTYPER.TC[2:1] */
+#define ATTR_CFG_FLD_threshold_compare_LO 3
+#define ATTR_CFG_FLD_threshold_compare_HI 4
+#define ATTR_CFG_FLD_threshold_CFG config1 /* PMEVTYPER.TH */
+#define ATTR_CFG_FLD_threshold_LO 5
+#define ATTR_CFG_FLD_threshold_HI 16
+
+GEN_PMU_FORMAT_ATTR(event);
+GEN_PMU_FORMAT_ATTR(long);
+GEN_PMU_FORMAT_ATTR(rdpmc);
+GEN_PMU_FORMAT_ATTR(threshold_count);
+GEN_PMU_FORMAT_ATTR(threshold_compare);
+GEN_PMU_FORMAT_ATTR(threshold);
static int sysctl_perf_user_access __read_mostly;
-static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
+static bool armv8pmu_event_is_64bit(struct perf_event *event)
{
- return event->attr.config1 & 0x1;
+ return ATTR_CFG_GET_FLD(&event->attr, long);
}
-static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
+static bool armv8pmu_event_want_user_access(struct perf_event *event)
{
- return event->attr.config1 & 0x2;
+ return ATTR_CFG_GET_FLD(&event->attr, rdpmc);
+}
+
+static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr)
+{
+ u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare);
+ u8 th_count = ATTR_CFG_GET_FLD(attr, threshold_count);
+
+ /*
+ * The count bit is always the bottom bit of the full control field, and
+ * the comparison is the upper two bits, but it's not explicitly
+ * labelled in the Arm ARM. For the Perf interface we split it into two
+ * fields, so reconstruct it here.
+ */
+ return (th_compare << 1) | th_count;
}
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
&format_attr_long.attr,
&format_attr_rdpmc.attr,
+ &format_attr_threshold.attr,
+ &format_attr_threshold_compare.attr,
+ &format_attr_threshold_count.attr,
NULL,
};
@@ -331,7 +372,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
+ u32 slots = FIELD_GET(ARMV8_PMU_SLOTS, cpu_pmu->reg_pmmir);
return sysfs_emit(page, "0x%08x\n", slots);
}
@@ -343,8 +384,7 @@ static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
- & ARMV8_PMU_BUS_SLOTS_MASK;
+ u32 bus_slots = FIELD_GET(ARMV8_PMU_BUS_SLOTS, cpu_pmu->reg_pmmir);
return sysfs_emit(page, "0x%08x\n", bus_slots);
}
@@ -356,8 +396,7 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
- & ARMV8_PMU_BUS_WIDTH_MASK;
+ u32 bus_width = FIELD_GET(ARMV8_PMU_BUS_WIDTH, cpu_pmu->reg_pmmir);
u32 val = 0;
/* Encoded as Log2(number of bytes), plus one */
@@ -369,10 +408,38 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(bus_width);
+static u32 threshold_max(struct arm_pmu *cpu_pmu)
+{
+ /*
+ * PMMIR.THWIDTH is readable and non-zero on aarch32, but it would be
+ * impossible to write the threshold in the upper 32 bits of PMEVTYPER.
+ */
+ if (IS_ENABLED(CONFIG_ARM))
+ return 0;
+
+ /*
+ * The largest value that can be written to PMEVTYPER<n>_EL0.TH is
+ * (2 ^ PMMIR.THWIDTH) - 1.
+ */
+ return (1 << FIELD_GET(ARMV8_PMU_THWIDTH, cpu_pmu->reg_pmmir)) - 1;
+}
+
+static ssize_t threshold_max_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+
+ return sysfs_emit(page, "0x%08x\n", threshold_max(cpu_pmu));
+}
+
+static DEVICE_ATTR_RO(threshold_max);
+
static struct attribute *armv8_pmuv3_caps_attrs[] = {
&dev_attr_slots.attr,
&dev_attr_bus_slots.attr,
&dev_attr_bus_width.attr,
+ &dev_attr_threshold_max.attr,
NULL,
};
@@ -401,7 +468,7 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver));
}
-static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
+static bool armv8pmu_event_has_user_read(struct perf_event *event)
{
return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
}
@@ -411,7 +478,7 @@ static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
* except when we have allocated the 64bit cycle counter (for CPU
* cycles event) or when user space counter access is enabled.
*/
-static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+static bool armv8pmu_event_is_chained(struct perf_event *event)
{
int idx = event->hw.idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
@@ -432,36 +499,36 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
-static inline u64 armv8pmu_pmcr_read(void)
+static u64 armv8pmu_pmcr_read(void)
{
return read_pmcr();
}
-static inline void armv8pmu_pmcr_write(u64 val)
+static void armv8pmu_pmcr_write(u64 val)
{
val &= ARMV8_PMU_PMCR_MASK;
isb();
write_pmcr(val);
}
-static inline int armv8pmu_has_overflowed(u32 pmovsr)
+static int armv8pmu_has_overflowed(u32 pmovsr)
{
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
}
-static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+static int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
{
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline u64 armv8pmu_read_evcntr(int idx)
+static u64 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
return read_pmevcntrn(counter);
}
-static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
+static u64 armv8pmu_read_hw_counter(struct perf_event *event)
{
int idx = event->hw.idx;
u64 val = armv8pmu_read_evcntr(idx);
@@ -523,14 +590,14 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
return armv8pmu_unbias_long_counter(event, value);
}
-static inline void armv8pmu_write_evcntr(int idx, u64 value)
+static void armv8pmu_write_evcntr(int idx, u64 value)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
write_pmevcntrn(counter, value);
}
-static inline void armv8pmu_write_hw_counter(struct perf_event *event,
+static void armv8pmu_write_hw_counter(struct perf_event *event,
u64 value)
{
int idx = event->hw.idx;
@@ -556,15 +623,22 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
armv8pmu_write_hw_counter(event, value);
}
-static inline void armv8pmu_write_evtype(int idx, u32 val)
+static void armv8pmu_write_evtype(int idx, unsigned long val)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ unsigned long mask = ARMV8_PMU_EVTYPE_EVENT |
+ ARMV8_PMU_INCLUDE_EL2 |
+ ARMV8_PMU_EXCLUDE_EL0 |
+ ARMV8_PMU_EXCLUDE_EL1;
+
+ if (IS_ENABLED(CONFIG_ARM64))
+ mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH;
- val &= ARMV8_PMU_EVTYPE_MASK;
+ val &= mask;
write_pmevtypern(counter, val);
}
-static inline void armv8pmu_write_event_type(struct perf_event *event)
+static void armv8pmu_write_event_type(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -598,7 +672,7 @@ static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
return mask;
}
-static inline void armv8pmu_enable_counter(u32 mask)
+static void armv8pmu_enable_counter(u32 mask)
{
/*
* Make sure event configuration register writes are visible before we
@@ -608,7 +682,7 @@ static inline void armv8pmu_enable_counter(u32 mask)
write_pmcntenset(mask);
}
-static inline void armv8pmu_enable_event_counter(struct perf_event *event)
+static void armv8pmu_enable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u32 mask = armv8pmu_event_cnten_mask(event);
@@ -620,7 +694,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
armv8pmu_enable_counter(mask);
}
-static inline void armv8pmu_disable_counter(u32 mask)
+static void armv8pmu_disable_counter(u32 mask)
{
write_pmcntenclr(mask);
/*
@@ -630,7 +704,7 @@ static inline void armv8pmu_disable_counter(u32 mask)
isb();
}
-static inline void armv8pmu_disable_event_counter(struct perf_event *event)
+static void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
u32 mask = armv8pmu_event_cnten_mask(event);
@@ -642,18 +716,18 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
armv8pmu_disable_counter(mask);
}
-static inline void armv8pmu_enable_intens(u32 mask)
+static void armv8pmu_enable_intens(u32 mask)
{
write_pmintenset(mask);
}
-static inline void armv8pmu_enable_event_irq(struct perf_event *event)
+static void armv8pmu_enable_event_irq(struct perf_event *event)
{
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
armv8pmu_enable_intens(BIT(counter));
}
-static inline void armv8pmu_disable_intens(u32 mask)
+static void armv8pmu_disable_intens(u32 mask)
{
write_pmintenclr(mask);
isb();
@@ -662,13 +736,13 @@ static inline void armv8pmu_disable_intens(u32 mask)
isb();
}
-static inline void armv8pmu_disable_event_irq(struct perf_event *event)
+static void armv8pmu_disable_event_irq(struct perf_event *event)
{
u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
armv8pmu_disable_intens(BIT(counter));
}
-static inline u32 armv8pmu_getreset_flags(void)
+static u32 armv8pmu_getreset_flags(void)
{
u32 value;
@@ -676,7 +750,7 @@ static inline u32 armv8pmu_getreset_flags(void)
value = read_pmovsclr();
/* Write to clear flags */
- value &= ARMV8_PMU_OVSR_MASK;
+ value &= ARMV8_PMU_OVERFLOWED_MASK;
write_pmovsclr(value);
return value;
@@ -918,9 +992,15 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
-
- if (attr->exclude_idle)
- return -EPERM;
+ struct perf_event *perf_event = container_of(attr, struct perf_event,
+ attr);
+ struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
+ u32 th;
+
+ if (attr->exclude_idle) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
/*
* If we're running in hyp mode, then we *are* the hypervisor.
@@ -950,6 +1030,22 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
config_base |= ARMV8_PMU_EXCLUDE_EL0;
/*
+ * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will
+ * be 0 and will also trigger this check, preventing it from being used.
+ */
+ th = ATTR_CFG_GET_FLD(attr, threshold);
+ if (th > threshold_max(cpu_pmu)) {
+ pr_debug("PMU event threshold exceeds max value\n");
+ return -EINVAL;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64) && th) {
+ config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th);
+ config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC,
+ armv8pmu_event_threshold_control(attr));
+ }
+
+ /*
* Install the filter into config_base as this is used to
* construct the event type.
*/
@@ -1111,8 +1207,7 @@ static void __armv8pmu_probe_pmu(void *info)
probe->present = true;
/* Read the nb of CNTx counters supported from PMNC */
- cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
- & ARMV8_PMU_PMCR_N_MASK;
+ cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read());
/* Add the CPU cycles counter */
cpu_pmu->num_events += 1;
@@ -1225,6 +1320,12 @@ static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
return armv8_pmu_init(cpu_pmu, #name, armv8_pmuv3_map_event); \
}
+#define PMUV3_INIT_MAP_EVENT(name, map_event) \
+static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
+{ \
+ return armv8_pmu_init(cpu_pmu, #name, map_event); \
+}
+
PMUV3_INIT_SIMPLE(armv8_pmuv3)
PMUV3_INIT_SIMPLE(armv8_cortex_a34)
@@ -1251,51 +1352,24 @@ PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
-static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a35", armv8_a53_map_event);
-}
-
-static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a53", armv8_a53_map_event);
-}
-
-static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a57", armv8_a57_map_event);
-}
-
-static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a72", armv8_a57_map_event);
-}
-
-static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cortex_a73", armv8_a73_map_event);
-}
-
-static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_cavium_thunder", armv8_thunder_map_event);
-}
-
-static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init(cpu_pmu, "armv8_brcm_vulcan", armv8_vulcan_map_event);
-}
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a35, armv8_a53_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a53, armv8_a53_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a57, armv8_a57_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a72, armv8_a57_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a73, armv8_a73_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cavium_thunder, armv8_thunder_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_brcm_vulcan, armv8_vulcan_map_event)
static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
{.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
- {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
- {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
+ {.compatible = "arm,cortex-a35-pmu", .data = armv8_cortex_a35_pmu_init},
+ {.compatible = "arm,cortex-a53-pmu", .data = armv8_cortex_a53_pmu_init},
{.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
- {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
+ {.compatible = "arm,cortex-a57-pmu", .data = armv8_cortex_a57_pmu_init},
{.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
- {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
- {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
+ {.compatible = "arm,cortex-a72-pmu", .data = armv8_cortex_a72_pmu_init},
+ {.compatible = "arm,cortex-a73-pmu", .data = armv8_cortex_a73_pmu_init},
{.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
{.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
{.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
@@ -1313,8 +1387,8 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
{.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
{.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
- {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
- {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
+ {.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
+ {.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
{.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
{},
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d2b0cbf0e..b622d75d8 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -206,28 +206,6 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_inv_event_filter_LO 0
#define ATTR_CFG_FLD_inv_event_filter_HI 63
-/* Why does everything I do descend into this? */
-#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
- (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
-
-#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
- __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
-
-#define GEN_PMU_FORMAT_ATTR(name) \
- PMU_FORMAT_ATTR(name, \
- _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
- ATTR_CFG_FLD_##name##_LO, \
- ATTR_CFG_FLD_##name##_HI))
-
-#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
- ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
-
-#define ATTR_CFG_GET_FLD(attr, name) \
- _ATTR_CFG_GET_FLD(attr, \
- ATTR_CFG_FLD_##name##_CFG, \
- ATTR_CFG_FLD_##name##_LO, \
- ATTR_CFG_FLD_##name##_HI)
-
GEN_PMU_FORMAT_ATTR(ts_enable);
GEN_PMU_FORMAT_ATTR(pa_enable);
GEN_PMU_FORMAT_ATTR(pct_enable);
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
new file mode 100644
index 000000000..957058ad0
--- /dev/null
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe PMU driver
+ *
+ * Copyright (C) 2021-2023 Alibaba Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/perf_event.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define DWC_PCIE_VSEC_RAS_DES_ID 0x02
+#define DWC_PCIE_EVENT_CNT_CTL 0x8
+
+/*
+ * Event Counter Data Select includes two parts:
+ * - 27-24: Group number(4-bit: 0..0x7)
+ * - 23-16: Event number(8-bit: 0..0x13) within the Group
+ *
+ * Put them together as in TRM.
+ */
+#define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16)
+#define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8)
+#define DWC_PCIE_CNT_STATUS BIT(7)
+#define DWC_PCIE_CNT_ENABLE GENMASK(4, 2)
+#define DWC_PCIE_PER_EVENT_OFF 0x1
+#define DWC_PCIE_PER_EVENT_ON 0x3
+#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0)
+#define DWC_PCIE_EVENT_PER_CLEAR 0x1
+
+#define DWC_PCIE_EVENT_CNT_DATA 0xC
+
+#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10
+#define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24)
+#define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8)
+#define DWC_PCIE_DURATION_MANUAL_CTL 0x0
+#define DWC_PCIE_DURATION_1MS 0x1
+#define DWC_PCIE_DURATION_10MS 0x2
+#define DWC_PCIE_DURATION_100MS 0x3
+#define DWC_PCIE_DURATION_1S 0x4
+#define DWC_PCIE_DURATION_2S 0x5
+#define DWC_PCIE_DURATION_4S 0x6
+#define DWC_PCIE_DURATION_4US 0xFF
+#define DWC_PCIE_TIME_BASED_TIMER_START BIT(0)
+#define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1
+
+#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14
+#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18
+
+/* Event attributes */
+#define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0)
+#define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16)
+#define DWC_PCIE_CONFIG_LANE GENMASK(27, 20)
+
+#define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config)
+#define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config)
+#define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config)
+
+enum dwc_pcie_event_type {
+ DWC_PCIE_TIME_BASE_EVENT,
+ DWC_PCIE_LANE_EVENT,
+ DWC_PCIE_EVENT_TYPE_MAX,
+};
+
+#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0)
+#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0)
+
+struct dwc_pcie_pmu {
+ struct pmu pmu;
+ struct pci_dev *pdev; /* Root Port device */
+ u16 ras_des_offset;
+ u32 nr_lanes;
+
+ struct list_head pmu_node;
+ struct hlist_node cpuhp_node;
+ struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX];
+ int on_cpu;
+};
+
+#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu))
+
+static int dwc_pcie_pmu_hp_state;
+static struct list_head dwc_pcie_dev_info_head =
+ LIST_HEAD_INIT(dwc_pcie_dev_info_head);
+static bool notify;
+
+struct dwc_pcie_dev_info {
+ struct platform_device *plat_dev;
+ struct pci_dev *pdev;
+ struct list_head dev_node;
+};
+
+struct dwc_pcie_vendor_id {
+ int vendor_id;
+};
+
+static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
+ {.vendor_id = PCI_VENDOR_ID_ALIBABA },
+ {} /* terminator */
+};
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static struct attribute_group dwc_pcie_cpumask_attr_group = {
+ .attrs = dwc_pcie_pmu_cpumask_attrs,
+};
+
+struct dwc_pcie_format_attr {
+ struct device_attribute attr;
+ u64 field;
+ int config;
+};
+
+PMU_FORMAT_ATTR(eventid, "config:0-15");
+PMU_FORMAT_ATTR(type, "config:16-19");
+PMU_FORMAT_ATTR(lane, "config:20-27");
+
+static struct attribute *dwc_pcie_format_attrs[] = {
+ &format_attr_type.attr,
+ &format_attr_eventid.attr,
+ &format_attr_lane.attr,
+ NULL,
+};
+
+static struct attribute_group dwc_pcie_format_attrs_group = {
+ .name = "format",
+ .attrs = dwc_pcie_format_attrs,
+};
+
+struct dwc_pcie_event_attr {
+ struct device_attribute attr;
+ enum dwc_pcie_event_type type;
+ u16 eventid;
+ u8 lane;
+};
+
+static ssize_t dwc_pcie_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dwc_pcie_event_attr *eattr;
+
+ eattr = container_of(attr, typeof(*eattr), attr);
+
+ if (eattr->type == DWC_PCIE_LANE_EVENT)
+ return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n",
+ eattr->eventid, eattr->type);
+ else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT)
+ return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n",
+ eattr->eventid, eattr->type);
+
+ return 0;
+}
+
+#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \
+ (&((struct dwc_pcie_event_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \
+ .type = _type, \
+ .eventid = _eventid, \
+ .lane = _lane, \
+ }})[0].attr.attr)
+
+#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \
+ DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0)
+#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \
+ DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0)
+
+static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
+ /* Group #0 */
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09),
+
+ /* Group #1 */
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23),
+
+ /*
+ * Leave it to the user to specify the lane ID to avoid generating
+ * a list of hundreds of events.
+ */
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717),
+ NULL
+};
+
+static const struct attribute_group dwc_pcie_event_attrs_group = {
+ .name = "events",
+ .attrs = dwc_pcie_pmu_time_event_attrs,
+};
+
+static const struct attribute_group *dwc_pcie_attr_groups[] = {
+ &dwc_pcie_event_attrs_group,
+ &dwc_pcie_format_attrs_group,
+ &dwc_pcie_cpumask_attr_group,
+ NULL
+};
+
+static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu,
+ bool enable)
+{
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+
+ if (enable)
+ pci_clear_and_set_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
+ else
+ pci_clear_and_set_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
+}
+
+static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu,
+ bool enable)
+{
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+
+ pci_clear_and_set_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL,
+ DWC_PCIE_TIME_BASED_TIMER_START, enable);
+}
+
+static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 val;
+
+ pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val);
+
+ return val;
+}
+
+static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 lo, hi, ss;
+ u64 val;
+
+ /*
+ * The 64-bit value of the data counter is spread across two
+ * registers that are not synchronized. In order to read them
+ * atomically, ensure that the high 32 bits match before and after
+ * reading the low 32 bits.
+ */
+ pci_read_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi);
+ do {
+ /* snapshot the high 32 bits */
+ ss = hi;
+
+ pci_read_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW,
+ &lo);
+ pci_read_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH,
+ &hi);
+ } while (hi != ss);
+
+ val = ((u64)hi << 32) | lo;
+ /*
+ * The Group#1 event measures the amount of data processed in 16-byte
+ * units. Simplify the end-user interface by multiplying the counter
+ * at the point of read.
+ */
+ if (event_id >= 0x20 && event_id <= 0x23)
+ val *= 16;
+
+ return val;
+}
+
+static void dwc_pcie_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ u64 delta, prev, now = 0;
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+
+ if (type == DWC_PCIE_LANE_EVENT)
+ now = dwc_pcie_pmu_read_lane_event_counter(event);
+ else if (type == DWC_PCIE_TIME_BASE_EVENT)
+ now = dwc_pcie_pmu_read_time_based_counter(event);
+
+ } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+ delta = (now - prev) & DWC_PCIE_MAX_PERIOD;
+ /* 32-bit counter for Lane Event Counting */
+ if (type == DWC_PCIE_LANE_EVENT)
+ delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD;
+
+ local64_add(delta, &event->count);
+}
+
+static int dwc_pcie_pmu_event_init(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ struct perf_event *sibling;
+ u32 lane;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* We don't support sampling */
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ /* We cannot support task bound events */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->group_leader != event &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu && !is_software_event(sibling))
+ return -EINVAL;
+ }
+
+ if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ lane = DWC_PCIE_EVENT_LANE(event);
+ if (lane < 0 || lane >= pcie_pmu->nr_lanes)
+ return -EINVAL;
+ }
+
+ event->cpu = pcie_pmu->on_cpu;
+
+ return 0;
+}
+
+static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+
+ hwc->state = 0;
+ local64_set(&hwc->prev_count, 0);
+
+ if (type == DWC_PCIE_LANE_EVENT)
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, true);
+ else if (type == DWC_PCIE_TIME_BASE_EVENT)
+ dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true);
+}
+
+static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ if (type == DWC_PCIE_LANE_EVENT)
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, false);
+ else if (type == DWC_PCIE_TIME_BASE_EVENT)
+ dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false);
+
+ dwc_pcie_pmu_event_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ struct hw_perf_event *hwc = &event->hw;
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 ctrl;
+
+ /* one counter for each type and it is in use */
+ if (pcie_pmu->event[type])
+ return -ENOSPC;
+
+ pcie_pmu->event[type] = event;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ /* EVENT_COUNTER_DATA_REG needs clear manually */
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) |
+ FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
+ } else if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ /*
+ * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely
+ * use it with any manually controlled duration. And it is
+ * cleared when next measurement starts.
+ */
+ ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL,
+ DWC_PCIE_DURATION_MANUAL_CTL) |
+ DWC_PCIE_TIME_BASED_CNT_ENABLE;
+ pci_write_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl);
+ }
+
+ if (flags & PERF_EF_START)
+ dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+
+ dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE);
+ perf_event_update_userpage(event);
+ pcie_pmu->event[type] = NULL;
+}
+
+static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node);
+}
+
+/*
+ * Find the binded DES capability device info of a PCI device.
+ * @pdev: The PCI device.
+ */
+static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev)
+{
+ struct dwc_pcie_dev_info *dev_info;
+
+ list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node)
+ if (dev_info->pdev == pdev)
+ return dev_info;
+
+ return NULL;
+}
+
+static void dwc_pcie_unregister_pmu(void *data)
+{
+ struct dwc_pcie_pmu *pcie_pmu = data;
+
+ perf_pmu_unregister(&pcie_pmu->pmu);
+}
+
+static bool dwc_pcie_match_des_cap(struct pci_dev *pdev)
+{
+ const struct dwc_pcie_vendor_id *vid;
+ u16 vsec = 0;
+ u32 val;
+
+ if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
+ return false;
+
+ for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) {
+ vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
+ DWC_PCIE_VSEC_RAS_DES_ID);
+ if (vsec)
+ break;
+ }
+ if (!vsec)
+ return false;
+
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ if (PCI_VNDR_HEADER_REV(val) != 0x04)
+ return false;
+
+ pci_dbg(pdev,
+ "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
+ return true;
+}
+
+static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info)
+{
+ platform_device_unregister(dev_info->plat_dev);
+ list_del(&dev_info->dev_node);
+ kfree(dev_info);
+}
+
+static int dwc_pcie_register_dev(struct pci_dev *pdev)
+{
+ struct platform_device *plat_dev;
+ struct dwc_pcie_dev_info *dev_info;
+ u32 bdf;
+
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf,
+ pdev, sizeof(*pdev));
+
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+ if (!dev_info)
+ return -ENOMEM;
+
+ /* Cache platform device to handle pci device hotplug */
+ dev_info->plat_dev = plat_dev;
+ dev_info->pdev = pdev;
+ list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head);
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dwc_pcie_dev_info *dev_info;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (!dwc_pcie_match_des_cap(pdev))
+ return NOTIFY_DONE;
+ if (dwc_pcie_register_dev(pdev))
+ return NOTIFY_BAD;
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ dev_info = dwc_pcie_find_dev_info(pdev);
+ if (!dev_info)
+ return NOTIFY_DONE;
+ dwc_pcie_unregister_dev(dev_info);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dwc_pcie_pmu_nb = {
+ .notifier_call = dwc_pcie_pmu_notifier,
+};
+
+static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
+{
+ struct pci_dev *pdev = plat_dev->dev.platform_data;
+ struct dwc_pcie_pmu *pcie_pmu;
+ char *name;
+ u32 bdf, val;
+ u16 vsec;
+ int ret;
+
+ vsec = pci_find_vsec_capability(pdev, pdev->vendor,
+ DWC_PCIE_VSEC_RAS_DES_ID);
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf);
+ if (!name)
+ return -ENOMEM;
+
+ pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
+ if (!pcie_pmu)
+ return -ENOMEM;
+
+ pcie_pmu->pdev = pdev;
+ pcie_pmu->ras_des_offset = vsec;
+ pcie_pmu->nr_lanes = pcie_get_width_cap(pdev);
+ pcie_pmu->on_cpu = -1;
+ pcie_pmu->pmu = (struct pmu){
+ .name = name,
+ .parent = &pdev->dev,
+ .module = THIS_MODULE,
+ .attr_groups = dwc_pcie_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = dwc_pcie_pmu_event_init,
+ .add = dwc_pcie_pmu_event_add,
+ .del = dwc_pcie_pmu_event_del,
+ .start = dwc_pcie_pmu_event_start,
+ .stop = dwc_pcie_pmu_event_stop,
+ .read = dwc_pcie_pmu_event_update,
+ };
+
+ /* Add this instance to the list used by the offline callback */
+ ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state,
+ &pcie_pmu->cpuhp_node);
+ if (ret) {
+ pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf);
+ return ret;
+ }
+
+ /* Unwind when platform driver removes */
+ ret = devm_add_action_or_reset(&plat_dev->dev,
+ dwc_pcie_pmu_remove_cpuhp_instance,
+ &pcie_pmu->cpuhp_node);
+ if (ret)
+ return ret;
+
+ ret = perf_pmu_register(&pcie_pmu->pmu, name, -1);
+ if (ret) {
+ pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu,
+ pcie_pmu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct dwc_pcie_pmu *pcie_pmu;
+
+ pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
+ if (pcie_pmu->on_cpu == -1)
+ pcie_pmu->on_cpu = cpumask_local_spread(
+ 0, dev_to_node(&pcie_pmu->pdev->dev));
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct dwc_pcie_pmu *pcie_pmu;
+ struct pci_dev *pdev;
+ int node;
+ cpumask_t mask;
+ unsigned int target;
+
+ pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
+ /* Nothing to do if this CPU doesn't own the PMU */
+ if (cpu != pcie_pmu->on_cpu)
+ return 0;
+
+ pcie_pmu->on_cpu = -1;
+ pdev = pcie_pmu->pdev;
+ node = dev_to_node(&pdev->dev);
+ if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
+ cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
+ target = cpumask_any(&mask);
+ else
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids) {
+ pci_err(pdev, "There is no CPU to set\n");
+ return 0;
+ }
+
+ /* This PMU does NOT support interrupt, just migrate context. */
+ perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
+ pcie_pmu->on_cpu = target;
+
+ return 0;
+}
+
+static struct platform_driver dwc_pcie_pmu_driver = {
+ .probe = dwc_pcie_pmu_probe,
+ .driver = {.name = "dwc_pcie_pmu",},
+};
+
+static int __init dwc_pcie_pmu_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ bool found = false;
+ int ret;
+
+ for_each_pci_dev(pdev) {
+ if (!dwc_pcie_match_des_cap(pdev))
+ continue;
+
+ ret = dwc_pcie_register_dev(pdev);
+ if (ret) {
+ pci_dev_put(pdev);
+ return ret;
+ }
+
+ found = true;
+ }
+ if (!found)
+ return -ENODEV;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/dwc_pcie_pmu:online",
+ dwc_pcie_pmu_online_cpu,
+ dwc_pcie_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ dwc_pcie_pmu_hp_state = ret;
+
+ ret = platform_driver_register(&dwc_pcie_pmu_driver);
+ if (ret)
+ goto platform_driver_register_err;
+
+ ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
+ if (ret)
+ goto platform_driver_register_err;
+ notify = true;
+
+ return 0;
+
+platform_driver_register_err:
+ cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
+
+ return ret;
+}
+
+static void __exit dwc_pcie_pmu_exit(void)
+{
+ struct dwc_pcie_dev_info *dev_info, *tmp;
+
+ if (notify)
+ bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
+ list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
+ dwc_pcie_unregister_dev(dev_info);
+ platform_driver_unregister(&dwc_pcie_pmu_driver);
+ cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
+}
+
+module_init(dwc_pcie_pmu_init);
+module_exit(dwc_pcie_pmu_exit);
+
+MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller");
+MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 92611c981..7dbfaee37 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -19,6 +19,8 @@
#define COUNTER_READ 0x20
#define COUNTER_DPCR1 0x30
+#define COUNTER_MUX_CNTL 0x50
+#define COUNTER_MASK_COMP 0x54
#define CNTL_OVER 0x1
#define CNTL_CLEAR 0x2
@@ -32,6 +34,13 @@
#define CNTL_CSV_SHIFT 24
#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
+#define READ_PORT_SHIFT 0
+#define READ_PORT_MASK (0x7 << READ_PORT_SHIFT)
+#define READ_CHANNEL_REVERT 0x00000008 /* bit 3 for read channel select */
+#define WRITE_PORT_SHIFT 8
+#define WRITE_PORT_MASK (0x7 << WRITE_PORT_SHIFT)
+#define WRITE_CHANNEL_REVERT 0x00000800 /* bit 11 for write channel select */
+
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0
#define NUM_COUNTERS 4
@@ -50,6 +59,7 @@ static DEFINE_IDA(ddr_ida);
/* DDR Perf hardware feature */
#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
+#define DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER 0x4 /* support AXI ID PORT CHANNEL filter */
struct fsl_ddr_devtype_data {
unsigned int quirks; /* quirks needed for different DDR Perf core */
@@ -82,6 +92,11 @@ static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
.identifier = "i.MX8MP",
};
+static const struct fsl_ddr_devtype_data imx8dxl_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER,
+ .identifier = "i.MX8DXL",
+};
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
@@ -89,6 +104,7 @@ static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
{ .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
{ .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
+ { .compatible = "fsl,imx8dxl-ddr-pmu", .data = &imx8dxl_devtype_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
@@ -144,6 +160,7 @@ static const struct attribute_group ddr_perf_identifier_attr_group = {
enum ddr_perf_filter_capabilities {
PERF_CAP_AXI_ID_FILTER = 0,
PERF_CAP_AXI_ID_FILTER_ENHANCED,
+ PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER,
PERF_CAP_AXI_ID_FEAT_MAX,
};
@@ -157,6 +174,8 @@ static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
case PERF_CAP_AXI_ID_FILTER_ENHANCED:
quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ case PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER:
+ return !!(quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER);
default:
WARN(1, "unknown filter cap %d\n", cap);
}
@@ -187,6 +206,7 @@ static ssize_t ddr_perf_filter_cap_show(struct device *dev,
static struct attribute *ddr_perf_filter_cap_attr[] = {
PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
+ PERF_FILTER_EXT_ATTR_ENTRY(super_filter, PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER),
NULL,
};
@@ -272,11 +292,15 @@ static const struct attribute_group ddr_perf_events_attr_group = {
PMU_FORMAT_ATTR(event, "config:0-7");
PMU_FORMAT_ATTR(axi_id, "config1:0-15");
PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
+PMU_FORMAT_ATTR(axi_port, "config2:0-2");
+PMU_FORMAT_ATTR(axi_channel, "config2:3-3");
static struct attribute *ddr_perf_format_attrs[] = {
&format_attr_event.attr,
&format_attr_axi_id.attr,
&format_attr_axi_mask.attr,
+ &format_attr_axi_port.attr,
+ &format_attr_axi_channel.attr,
NULL,
};
@@ -530,6 +554,7 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
int counter;
int cfg = event->attr.config;
int cfg1 = event->attr.config1;
+ int cfg2 = event->attr.config2;
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
int i;
@@ -553,6 +578,26 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
return -EOPNOTSUPP;
}
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER) {
+ if (ddr_perf_is_filtered(event)) {
+ /* revert axi id masking(axi_mask) value */
+ cfg1 ^= AXI_MASKING_REVERT;
+ writel(cfg1, pmu->base + COUNTER_MASK_COMP + ((counter - 1) << 4));
+
+ if (cfg == 0x41) {
+ /* revert axi read channel(axi_channel) value */
+ cfg2 ^= READ_CHANNEL_REVERT;
+ cfg2 |= FIELD_PREP(READ_PORT_MASK, cfg2);
+ } else {
+ /* revert axi write channel(axi_channel) value */
+ cfg2 ^= WRITE_CHANNEL_REVERT;
+ cfg2 |= FIELD_PREP(WRITE_PORT_MASK, cfg2);
+ }
+
+ writel(cfg2, pmu->base + COUNTER_MUX_CNTL + ((counter - 1) << 4));
+ }
+ }
+
pmu->events[counter] = event;
hwc->idx = counter;
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 5cf770a1b..9685645bf 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -617,7 +617,7 @@ static int ddr_perf_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmu);
- pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+ pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", pmu->id);
if (!name) {
ret = -ENOMEM;
@@ -674,7 +674,7 @@ cpuhp_instance_err:
cpuhp_remove_multi_state(pmu->cpuhp_state);
cpuhp_state_err:
format_string_err:
- ida_simple_remove(&ddr_ida, pmu->id);
+ ida_free(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX9 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
}
@@ -688,7 +688,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
- ida_simple_remove(&ddr_ida, pmu->id);
+ ida_free(&ddr_ida, pmu->id);
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 636fb7964..481dcc9e8 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -287,12 +287,52 @@ static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
-static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+static bool hisi_uc_pmu_get_glb_en_state(struct hisi_pmu *uc_pmu)
+{
+ u32 val;
+
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+ return !!FIELD_GET(HISI_UC_EVENT_GLB_EN, val);
+}
+
+static void hisi_uc_pmu_write_counter_normal(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
+static void hisi_uc_pmu_write_counter_quirk_v2(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ hisi_uc_pmu_start_counters(uc_pmu);
+ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
+ hisi_uc_pmu_stop_counters(uc_pmu);
+}
+
+static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ bool enable = hisi_uc_pmu_get_glb_en_state(uc_pmu);
+ bool erratum = uc_pmu->identifier == HISI_PMU_V2;
+
+ /*
+ * HiSilicon UC PMU v2 suffers the erratum 162700402 that the
+ * PMU counter cannot be set due to the lack of clock under power
+ * saving mode. This will lead to error or inaccurate counts.
+ * The clock can be enabled by the PMU global enabling control.
+ * The irq handler and pmu_start() will call the function to set
+ * period. If the function under irq context, the PMU has been
+ * enabled therefore we set counter directly. Other situations
+ * the PMU is disabled, we need to enable it to turn on the
+ * counter clock to set period, and then restore PMU enable
+ * status, the counter can hold its value without a clock.
+ */
+ if (enable || !erratum)
+ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
+ else
+ hisi_uc_pmu_write_counter_quirk_v2(uc_pmu, hwc, val);
+}
+
static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index c78a6fd6c..b4efdddb2 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -313,6 +313,10 @@ static int riscv_pmu_event_init(struct perf_event *event)
u64 event_config = 0;
uint64_t cmask;
+ /* driver does not support branch stack sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
hwc->flags = 0;
mapped_event = rvpmu->event_map(event, &event_config);
if (mapped_event < 0) {
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index b700f52b7..11fcb1867 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -110,8 +110,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
/* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
- writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
- imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ if (imx8_phy->drvdata->variant != IMX8MM) {
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ }
val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
writel(val | ANA_AUX_RX_TERM_GND_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 24c3371e2..27f221a0f 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -603,7 +603,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
u16 val;
fix_idx = 0;
- for (addr = 0; addr < 512; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
/*
* All PHY register values are defined in full for 3.125Gbps
* SERDES speed. The values required for 1.25 Gbps are almost
@@ -611,11 +611,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
* comparison to 3.125 Gbps values. These register values are
* stored in "gbe_phy_init_fix" array.
*/
- if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
+ if (!is_1gbps &&
+ fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
+ gbe_phy_init_fix[fix_idx].addr == addr) {
/* Use new value */
val = gbe_phy_init_fix[fix_idx].value;
- if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
- fix_idx++;
+ fix_idx++;
} else {
val = gbe_phy_init[addr];
}
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 05eab9014..a4746f6cb 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -185,6 +185,10 @@
#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
#define P3D_RG_CDR_BIR_LTD0 GENMASK(12, 8)
+#define U3P_U3_PHYD_TOP1 0x100
+#define P3D_RG_PHY_MODE GENMASK(2, 1)
+#define P3D_RG_FORCE_PHY_MODE BIT(0)
+
#define U3P_U3_PHYD_RXDET1 0x128
#define P3D_RG_RXDET_STB2_SET GENMASK(17, 9)
@@ -327,6 +331,7 @@ struct mtk_phy_instance {
int discth;
int pre_emphasis;
bool bc12_en;
+ bool type_force_mode;
};
struct mtk_tphy {
@@ -768,6 +773,23 @@ static void u3_phy_instance_init(struct mtk_tphy *tphy,
void __iomem *phya = u3_banks->phya;
void __iomem *phyd = u3_banks->phyd;
+ if (instance->type_force_mode) {
+ /* force phy as usb mode, default is pcie rc mode */
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_TOP1, P3D_RG_PHY_MODE, 1);
+ mtk_phy_set_bits(phyd + U3P_U3_PHYD_TOP1, P3D_RG_FORCE_PHY_MODE);
+ /* power down phy by ip and pipe reset */
+ mtk_phy_set_bits(u3_banks->chip + U3P_U3_CHIP_GPIO_CTLD,
+ P3C_FORCE_IP_SW_RST | P3C_MCU_BUS_CK_GATE_EN);
+ mtk_phy_set_bits(u3_banks->chip + U3P_U3_CHIP_GPIO_CTLE,
+ P3C_RG_SWRST_U3_PHYD | P3C_RG_SWRST_U3_PHYD_FORCE_EN);
+ udelay(10);
+ /* power on phy again */
+ mtk_phy_clear_bits(u3_banks->chip + U3P_U3_CHIP_GPIO_CTLD,
+ P3C_FORCE_IP_SW_RST | P3C_MCU_BUS_CK_GATE_EN);
+ mtk_phy_clear_bits(u3_banks->chip + U3P_U3_CHIP_GPIO_CTLE,
+ P3C_RG_SWRST_U3_PHYD | P3C_RG_SWRST_U3_PHYD_FORCE_EN);
+ }
+
/* gating PCIe Analog XTAL clock */
mtk_phy_set_bits(u3_banks->spllc + U3P_SPLLC_XTALCTL3,
XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD);
@@ -1120,6 +1142,9 @@ static void phy_parse_property(struct mtk_tphy *tphy,
{
struct device *dev = &instance->phy->dev;
+ if (instance->type == PHY_TYPE_USB3)
+ instance->type_force_mode = device_property_read_bool(dev, "mediatek,force-mode");
+
if (instance->type != PHY_TYPE_USB2)
return;
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
index c1a41b6cd..b5ac2b799 100644
--- a/drivers/phy/microchip/lan966x_serdes.c
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -96,6 +96,8 @@ static const struct serdes_mux lan966x_serdes_muxes[] = {
SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG,
HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
+ SERDES_MUX_SGMII(SERDES6G(2), 4, 0, 0),
+
SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
HSIO_HW_CFG_RGMII_ENA |
HSIO_HW_CFG_GMII_ENA,
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index 840b7f8a3..ee4ce4249 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -6,11 +6,11 @@
*
*/
#include <linux/of.h>
-#include<linux/phy/phy.h>
-#include<linux/platform_device.h>
-#include<linux/module.h>
-#include<linux/gpio.h>
-#include<linux/gpio/consumer.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/mux/consumer.h>
struct can_transceiver_data {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 96a0b1e11..d9be6a4d5 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -959,7 +959,7 @@ struct phy *phy_create(struct device *dev, struct device_node *node,
if (!phy)
return ERR_PTR(-ENOMEM);
- id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&phy_ida, GFP_KERNEL);
if (id < 0) {
dev_err(dev, "unable to get id\n");
ret = id;
@@ -1232,7 +1232,7 @@ static void phy_release(struct device *dev)
dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
debugfs_remove_recursive(phy->debugfs);
regulator_put(phy->pwr);
- ida_simple_remove(&phy_ida, phy->id);
+ ida_free(&phy_ida, phy->id);
kfree(phy);
}
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index d891058b7..846f8c995 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -63,7 +63,7 @@ config PHY_QCOM_QMP_COMBO
depends on DRM || DRM=n
select GENERIC_PHY
select MFD_SYSCON
- select DRM_PANEL_BRIDGE if DRM
+ select DRM_AUX_BRIDGE if DRM_BRIDGE
help
Enable this to support the QMP Combo PHY transceiver that is used
with USB3 and DisplayPort controllers on Qualcomm chips.
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index ffd609ac6..eb60e950a 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_M31_USB) += phy-qcom-m31.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
-obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o
+obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o phy-qcom-qmp-usbc.o
obj-$(CONFIG_PHY_QCOM_QMP_PCIE) += phy-qcom-qmp-pcie.o
obj-$(CONFIG_PHY_QCOM_QMP_PCIE_8996) += phy-qcom-qmp-pcie-msm8996.o
obj-$(CONFIG_PHY_QCOM_QMP_UFS) += phy-qcom-qmp-ufs.o
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index a623f092b..a43e20abb 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -37,56 +37,28 @@
#define EUSB2_TUNE_EUSB_EQU 0x5A
#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
-#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
- { \
- .reg = r, \
- .val = v, \
- }
-
-enum reg_fields {
- F_TUNE_EUSB_HS_COMP_CUR,
- F_TUNE_EUSB_EQU,
- F_TUNE_EUSB_SLEW,
- F_TUNE_USB2_HS_COMP_CUR,
- F_TUNE_USB2_PREEM,
- F_TUNE_USB2_EQU,
- F_TUNE_USB2_SLEW,
- F_TUNE_SQUELCH_U,
- F_TUNE_HSDISC,
- F_TUNE_RES_FSDIF,
- F_TUNE_IUSB2,
- F_TUNE_USB2_CROSSOVER,
- F_NUM_TUNE_FIELDS,
-
- F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
- F_FORCE_EN_5,
-
- F_EN_CTL1,
-
- F_RPTR_STATUS,
- F_NUM_FIELDS,
-};
-
-static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
- [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
- [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
- [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
- [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
- [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
- [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
- [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
- [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
- [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
- [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
- [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
- [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
-
- [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
- [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
-
- [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
-
- [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+enum eusb2_reg_layout {
+ TUNE_EUSB_HS_COMP_CUR,
+ TUNE_EUSB_EQU,
+ TUNE_EUSB_SLEW,
+ TUNE_USB2_HS_COMP_CUR,
+ TUNE_USB2_PREEM,
+ TUNE_USB2_EQU,
+ TUNE_USB2_SLEW,
+ TUNE_SQUELCH_U,
+ TUNE_HSDISC,
+ TUNE_RES_FSDIF,
+ TUNE_IUSB2,
+ TUNE_USB2_CROSSOVER,
+ NUM_TUNE_FIELDS,
+
+ FORCE_VAL_5 = NUM_TUNE_FIELDS,
+ FORCE_EN_5,
+
+ EN_CTL1,
+
+ RPTR_STATUS,
+ LAYOUT_SIZE,
};
struct eusb2_repeater_cfg {
@@ -98,10 +70,11 @@ struct eusb2_repeater_cfg {
struct eusb2_repeater {
struct device *dev;
- struct regmap_field *regs[F_NUM_FIELDS];
+ struct regmap *regmap;
struct phy *phy;
struct regulator_bulk_data *vregs;
const struct eusb2_repeater_cfg *cfg;
+ u32 base;
enum phy_mode mode;
};
@@ -109,10 +82,10 @@ static const char * const pm8550b_vreg_l[] = {
"vdd18", "vdd3",
};
-static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
- [F_TUNE_IUSB2] = 0x8,
- [F_TUNE_SQUELCH_U] = 0x3,
- [F_TUNE_USB2_PREEM] = 0x5,
+static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
+ [TUNE_IUSB2] = 0x8,
+ [TUNE_SQUELCH_U] = 0x3,
+ [TUNE_USB2_PREEM] = 0x5,
};
static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
@@ -140,47 +113,42 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
static int eusb2_repeater_init(struct phy *phy)
{
- struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
struct device_node *np = rptr->dev->of_node;
- u32 init_tbl[F_NUM_TUNE_FIELDS] = { 0 };
- u8 override;
+ struct regmap *regmap = rptr->regmap;
+ const u32 *init_tbl = rptr->cfg->init_tbl;
+ u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
+ u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
+ u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
+ u32 base = rptr->base;
u32 val;
int ret;
- int i;
+
+ of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
+ of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
+ of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
if (ret)
return ret;
- regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+ regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
- if (init_tbl[i]) {
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
- } else {
- /* Write 0 if there's no value set */
- u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
-
- regmap_field_update_bits(rptr->regs[i], mask, 0);
- }
- }
- memcpy(init_tbl, rptr->cfg->init_tbl, sizeof(init_tbl));
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
+ regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
- if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &override))
- init_tbl[F_TUNE_IUSB2] = override;
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
+ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
+ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
- if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &override))
- init_tbl[F_TUNE_HSDISC] = override;
-
- if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &override))
- init_tbl[F_TUNE_USB2_PREEM] = override;
-
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++)
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
-
- ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
- val, val & RPTR_OK, 10, 5);
+ ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
if (ret)
dev_err(rptr->dev, "initialization timed-out\n");
@@ -191,6 +159,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+ struct regmap *regmap = rptr->regmap;
+ u32 base = rptr->base;
switch (mode) {
case PHY_MODE_USB_HOST:
@@ -199,10 +169,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* per eUSB 1.2 Spec. Below implement software workaround until
* PHY and controller is fixing seen observation.
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, F_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, V_CLK_19P2M_EN);
break;
case PHY_MODE_USB_DEVICE:
/*
@@ -211,10 +179,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* repeater doesn't clear previous value due to shared
* regulators (say host <-> device mode switch).
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, 0);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, 0);
break;
default:
return -EINVAL;
@@ -243,9 +209,8 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct device_node *np = dev->of_node;
- struct regmap *regmap;
- int i, ret;
u32 res;
+ int ret;
rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
if (!rptr)
@@ -258,22 +223,15 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
if (!rptr->cfg)
return -EINVAL;
- regmap = dev_get_regmap(dev->parent, NULL);
- if (!regmap)
+ rptr->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!rptr->regmap)
return -ENODEV;
ret = of_property_read_u32(np, "reg", &res);
if (ret < 0)
return ret;
- for (i = 0; i < F_NUM_FIELDS; i++)
- eusb2_repeater_tune_reg_fields[i].reg += res;
-
- ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
- eusb2_repeater_tune_reg_fields,
- F_NUM_FIELDS);
- if (ret)
- return ret;
+ rptr->base = res;
ret = eusb2_repeater_init_vregs(rptr);
if (ret < 0) {
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index 03fb0d4b7..20d4c020a 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -297,7 +297,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(qphy->phy),
"failed to create phy\n");
- qphy->vreg = devm_regulator_get(dev, "vdda-phy");
+ qphy->vreg = devm_regulator_get(dev, "vdd");
if (IS_ERR(qphy->vreg))
return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 9c87845c7..e74e1bf9e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -21,7 +21,7 @@
#include <linux/usb/typec.h>
#include <linux/usb/typec_mux.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <dt-bindings/phy/phy-qcom-qmp.h>
@@ -112,6 +112,7 @@ enum qphy_reg_layout {
QPHY_COM_BIAS_EN_CLKBUFLR_EN,
QPHY_DP_PHY_STATUS,
+ QPHY_DP_PHY_VCO_DIV,
QPHY_TX_TX_POL_INV,
QPHY_TX_TX_DRV_LVL,
@@ -137,6 +138,7 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V3_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
@@ -161,6 +163,7 @@ static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V4_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
@@ -185,6 +188,7 @@ static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V5_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
@@ -209,6 +213,7 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
[QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
[QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
[QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
@@ -1203,6 +1208,127 @@ static const struct qmp_phy_init_tbl sc8280xp_usb43dp_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
};
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE0, 0xba),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE1, 0xba),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x76),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE1, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAXVAL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_LANE_MODE_2, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_LANE_MODE_3, 0x50),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B0, 0xc3),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B1, 0xc3),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B2, 0xd8),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B3, 0x9e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B4, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B5, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE_0_1_B6, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B0, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B1, 0xee),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B2, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B3, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B4, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B5, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_MODE_RATE2_B6, 0xe3),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_IVCM_CAL_CODE_OVERRIDE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_RX_IVCM_CAL_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_RX_SUMMER_CAL_SPD_MODE, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_PI_CONTROLS, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_PI_CTRL1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_PI_CTRL2, 0x48),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_SB2_GAIN2_RATE2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_RX_IVCM_POSTCAL_OFFSET, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_VGA_CAL_CNTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_VGA_CAL_MAN_VAL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_DFE_DAC_ENABLE1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_DFE_3, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_GM_CAL, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_RX_RX_BKUP_CTRL1, 0x14),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
/* list of regulators */
struct qmp_regulator_data {
const char *name;
@@ -1419,8 +1545,6 @@ struct qmp_combo {
struct clk_hw dp_link_hw;
struct clk_hw dp_pixel_hw;
- struct drm_bridge bridge;
-
struct typec_switch_dev *sw;
enum typec_orientation orientation;
};
@@ -1684,6 +1808,51 @@ static const struct qmp_phy_cfg sc8280xp_usb43dpphy_cfg = {
.regs = qmp_v5_5nm_usb3phy_regs_layout,
};
+static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v5,
+
+ .serdes_tbl = x1e80100_usb43dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_serdes_tbl),
+ .tx_tbl = x1e80100_usb43dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_tx_tbl),
+ .rx_tbl = x1e80100_usb43dp_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_rx_tbl),
+ .pcs_tbl = x1e80100_usb43dp_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_tbl),
+ .pcs_usb_tbl = x1e80100_usb43dp_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl),
+
+ .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v4_dp_aux_init,
+ .configure_dp_tx = qmp_v4_configure_dp_tx,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
+
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v45_usb3phy_regs_layout,
+};
+
static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
.offsets = &qmp_combo_offsets_v3,
@@ -2047,9 +2216,9 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
writel(val, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
if (reverse)
- writel(0x4c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x4c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
else
- writel(0x5c, qmp->pcs + QSERDES_DP_PHY_MODE);
+ writel(0x5c, qmp->dp_dp_phy + QSERDES_DP_PHY_MODE);
return reverse;
}
@@ -2059,6 +2228,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 phy_vco_div;
unsigned long pixel_freq;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
switch (dp_opts->link_rate) {
case 1620:
@@ -2081,7 +2251,7 @@ static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
/* Other link rates aren't supported */
return -EINVAL;
}
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
+ writel(phy_vco_div, qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_VCO_DIV]);
clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
@@ -3191,44 +3361,6 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
}
#endif
-#if IS_ENABLED(CONFIG_DRM)
-static int qmp_combo_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct qmp_combo *qmp = container_of(bridge, struct qmp_combo, bridge);
- struct drm_bridge *next_bridge;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
- return -EINVAL;
-
- next_bridge = devm_drm_of_get_bridge(qmp->dev, qmp->dev->of_node, 0, 0);
- if (IS_ERR(next_bridge)) {
- dev_err(qmp->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
- return PTR_ERR(next_bridge);
- }
-
- return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-}
-
-static const struct drm_bridge_funcs qmp_combo_bridge_funcs = {
- .attach = qmp_combo_bridge_attach,
-};
-
-static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
-{
- qmp->bridge.funcs = &qmp_combo_bridge_funcs;
- qmp->bridge.of_node = qmp->dev->of_node;
-
- return devm_drm_bridge_add(qmp->dev, &qmp->bridge);
-}
-#else
-static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
-{
- return 0;
-}
-#endif
-
static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np)
{
struct device *dev = qmp->dev;
@@ -3436,14 +3568,6 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qmp_combo_typec_switch_register(qmp);
- if (ret)
- return ret;
-
- ret = qmp_combo_dp_register_bridge(qmp);
- if (ret)
- return ret;
-
/* Check for legacy binding with child nodes. */
usb_np = of_get_child_by_name(dev->of_node, "usb3-phy");
if (usb_np) {
@@ -3463,6 +3587,14 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
+ ret = qmp_combo_typec_switch_register(qmp);
+ if (ret)
+ goto err_node_put;
+
+ ret = drm_aux_bridge_register(dev);
+ if (ret)
+ goto err_node_put;
+
pm_runtime_set_active(dev);
ret = devm_pm_runtime_enable(dev);
if (ret)
@@ -3558,6 +3690,14 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
.compatible = "qcom,sm8550-qmp-usb3-dp-phy",
.data = &sm8550_usb3dpphy_cfg,
},
+ {
+ .compatible = "qcom,sm8650-qmp-usb3-dp-phy",
+ .data = &sm8550_usb3dpphy_cfg,
+ },
+ {
+ .compatible = "qcom,x1e80100-qmp-usb3-dp-phy",
+ .data = &x1e80100_usb3dpphy_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, qmp_combo_of_match_table);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index b64598ac5..2af7115ef 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -1909,6 +1909,35 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0xf2),
};
+static const struct qmp_phy_init_tbl sm8650_qmp_gen4x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_CAL_CTRL2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_3, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_GM_CAL, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B0, 0xd3),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B1, 0xd3),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B3, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B4, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B5, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B6, 0xee),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B0, 0x23),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B1, 0x9b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B2, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B3, 0xdf),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B4, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B5, 0x76),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff),
+};
+
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
@@ -3047,6 +3076,36 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.has_nocsr_reset = true,
};
+static const struct qmp_phy_cfg sm8650_qmp_gen4x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_v6_20,
+
+ .tbls = {
+ .serdes = sm8550_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_serdes_tbl),
+ .tx = sm8550_qmp_gen4x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_tx_tbl),
+ .rx = sm8650_qmp_gen4x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8650_qmp_gen4x2_pcie_rx_tbl),
+ .pcs = sm8550_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc = sm8550_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .ln_shrd = sm8550_qmp_gen4x2_pcie_ln_shrd_tbl,
+ .ln_shrd_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_ln_shrd_tbl),
+ },
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = sm8550_qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+ .has_nocsr_reset = true,
+};
+
static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
.lanes = 2,
.offsets = &qmp_pcie_offsets_v5_20,
@@ -3820,6 +3879,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
}, {
.compatible = "qcom,sm8550-qmp-gen4x2-pcie-phy",
.data = &sm8550_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8650-qmp-gen3x2-pcie-phy",
+ .data = &sm8550_qmp_gen3x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8650-qmp-gen4x2-pcie-phy",
+ .data = &sm8650_qmp_gen4x2_pciephy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
index c23d5e41e..fe6c450f6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
@@ -12,6 +12,7 @@
#define QPHY_V6_PCS_UFS_SW_RESET 0x008
#define QPHY_V6_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
#define QPHY_V6_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V6_PCS_UFS_PCS_CTRL1 0x020
#define QPHY_V6_PCS_UFS_PLL_CNTL 0x02c
#define QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
#define QPHY_V6_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v7.h
new file mode 100644
index 000000000..24368d45a
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v7.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_USB_V7_H_
+#define QCOM_PHY_QMP_PCS_USB_V7_H_
+
+#define QPHY_V7_PCS_USB3_POWER_STATE_CONFIG1 0x00
+#define QPHY_V7_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x08
+#define QPHY_V7_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x14
+#define QPHY_V7_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x18
+#define QPHY_V7_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x3c
+#define QPHY_V7_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x40
+#define QPHY_V7_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x44
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
new file mode 100644
index 000000000..c7759892e
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V7_H_
+#define QCOM_PHY_QMP_PCS_V7_H_
+
+/* Only for QMP V7 PHY - USB/PCIe PCS registers */
+#define QPHY_V7_PCS_SW_RESET 0x000
+#define QPHY_V7_PCS_PCS_STATUS1 0x014
+#define QPHY_V7_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V7_PCS_START_CONTROL 0x044
+#define QPHY_V7_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V7_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V7_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V7_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V7_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V7_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V7_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V7_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V7_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_V7_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V7_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V7_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V7_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V7_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V7_PCS_EQ_CONFIG5 0x1ec
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
index f420f8faf..ec7291424 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
@@ -22,6 +22,8 @@
#define QSERDES_V6_COM_DIV_FRAC_START2_MODE1 0x34
#define QSERDES_V6_COM_DIV_FRAC_START3_MODE1 0x38
#define QSERDES_V6_COM_HSCLK_SEL_1 0x3c
+#define QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE1 0x40
+#define QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE1 0x44
#define QSERDES_V6_COM_VCO_TUNE1_MODE1 0x48
#define QSERDES_V6_COM_VCO_TUNE2_MODE1 0x4c
#define QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x50
@@ -48,6 +50,7 @@
#define QSERDES_V6_COM_VCO_TUNE2_MODE0 0xac
#define QSERDES_V6_COM_BG_TIMER 0xbc
#define QSERDES_V6_COM_SSC_EN_CENTER 0xc0
+#define QSERDES_V6_COM_SSC_ADJ_PER1 0xc4
#define QSERDES_V6_COM_SSC_PER1 0xcc
#define QSERDES_V6_COM_SSC_PER2 0xd0
#define QSERDES_V6_COM_PLL_POST_DIV_MUX 0xd8
@@ -56,6 +59,7 @@
#define QSERDES_V6_COM_SYS_CLK_CTRL 0xe4
#define QSERDES_V6_COM_SYSCLK_BUF_ENABLE 0xe8
#define QSERDES_V6_COM_PLL_IVCO 0xf4
+#define QSERDES_V6_COM_PLL_IVCO_MODE1 0xf8
#define QSERDES_V6_COM_SYSCLK_EN_SEL 0x110
#define QSERDES_V6_COM_RESETSM_CNTRL 0x118
#define QSERDES_V6_COM_LOCK_CMP_EN 0x120
@@ -63,6 +67,7 @@
#define QSERDES_V6_COM_VCO_TUNE_CTRL 0x13c
#define QSERDES_V6_COM_VCO_TUNE_MAP 0x140
#define QSERDES_V6_COM_VCO_TUNE_INITVAL2 0x148
+#define QSERDES_V6_COM_VCO_TUNE_MAXVAL2 0x158
#define QSERDES_V6_COM_CLK_SELECT 0x164
#define QSERDES_V6_COM_CORE_CLK_EN 0x170
#define QSERDES_V6_COM_CMN_CONFIG_1 0x174
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v7.h
new file mode 100644
index 000000000..7430f4921
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v7.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_COM_V7_H_
+#define QCOM_PHY_QMP_QSERDES_COM_V7_H_
+
+/* Only for QMP V7 PHY - QSERDES COM registers */
+
+#define QSERDES_V7_COM_SSC_STEP_SIZE1_MODE1 0x00
+#define QSERDES_V7_COM_SSC_STEP_SIZE2_MODE1 0x04
+#define QSERDES_V7_COM_CP_CTRL_MODE1 0x10
+#define QSERDES_V7_COM_PLL_RCTRL_MODE1 0x14
+#define QSERDES_V7_COM_PLL_CCTRL_MODE1 0x18
+#define QSERDES_V7_COM_CORECLK_DIV_MODE1 0x1c
+#define QSERDES_V7_COM_LOCK_CMP1_MODE1 0x20
+#define QSERDES_V7_COM_LOCK_CMP2_MODE1 0x24
+#define QSERDES_V7_COM_DEC_START_MODE1 0x28
+#define QSERDES_V7_COM_DEC_START_MSB_MODE1 0x2c
+#define QSERDES_V7_COM_DIV_FRAC_START1_MODE1 0x30
+#define QSERDES_V7_COM_DIV_FRAC_START2_MODE1 0x34
+#define QSERDES_V7_COM_DIV_FRAC_START3_MODE1 0x38
+#define QSERDES_V7_COM_HSCLK_SEL_1 0x3c
+#define QSERDES_V7_COM_INTEGLOOP_GAIN0_MODE1 0x40
+#define QSERDES_V7_COM_INTEGLOOP_GAIN1_MODE1 0x44
+#define QSERDES_V7_COM_VCO_TUNE1_MODE1 0x48
+#define QSERDES_V7_COM_VCO_TUNE2_MODE1 0x4c
+#define QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x50
+#define QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x54
+#define QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x58
+#define QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x5c
+#define QSERDES_V7_COM_SSC_STEP_SIZE1_MODE0 0x60
+#define QSERDES_V7_COM_SSC_STEP_SIZE2_MODE0 0x64
+#define QSERDES_V7_COM_CP_CTRL_MODE0 0x70
+#define QSERDES_V7_COM_PLL_RCTRL_MODE0 0x74
+#define QSERDES_V7_COM_PLL_CCTRL_MODE0 0x78
+#define QSERDES_V7_COM_PLL_CORE_CLK_DIV_MODE0 0x7c
+#define QSERDES_V7_COM_LOCK_CMP1_MODE0 0x80
+#define QSERDES_V7_COM_LOCK_CMP2_MODE0 0x84
+#define QSERDES_V7_COM_DEC_START_MODE0 0x88
+#define QSERDES_V7_COM_DEC_START_MSB_MODE0 0x8c
+#define QSERDES_V7_COM_DIV_FRAC_START1_MODE0 0x90
+#define QSERDES_V7_COM_DIV_FRAC_START2_MODE0 0x94
+#define QSERDES_V7_COM_DIV_FRAC_START3_MODE0 0x98
+#define QSERDES_V7_COM_HSCLK_HS_SWITCH_SEL_1 0x9c
+#define QSERDES_V7_COM_INTEGLOOP_GAIN0_MODE0 0xa0
+#define QSERDES_V7_COM_INTEGLOOP_GAIN1_MODE0 0xa4
+#define QSERDES_V7_COM_VCO_TUNE1_MODE0 0xa8
+#define QSERDES_V7_COM_VCO_TUNE2_MODE0 0xac
+#define QSERDES_V7_COM_BG_TIMER 0xbc
+#define QSERDES_V7_COM_SSC_EN_CENTER 0xc0
+#define QSERDES_V7_COM_SSC_ADJ_PER1 0xc4
+#define QSERDES_V7_COM_SSC_PER1 0xcc
+#define QSERDES_V7_COM_SSC_PER2 0xd0
+#define QSERDES_V7_COM_PLL_POST_DIV_MUX 0xd8
+#define QSERDES_V7_COM_PLL_BIAS_EN_CLK_BUFLR_EN 0xdc
+#define QSERDES_V7_COM_CLK_ENABLE1 0xe0
+#define QSERDES_V7_COM_SYS_CLK_CTRL 0xe4
+#define QSERDES_V7_COM_SYSCLK_BUF_ENABLE 0xe8
+#define QSERDES_V7_COM_PLL_IVCO 0xf4
+#define QSERDES_V7_COM_PLL_IVCO_MODE1 0xf8
+#define QSERDES_V7_COM_SYSCLK_EN_SEL 0x110
+#define QSERDES_V7_COM_RESETSM_CNTRL 0x118
+#define QSERDES_V7_COM_LOCK_CMP_EN 0x120
+#define QSERDES_V7_COM_LOCK_CMP_CFG 0x124
+#define QSERDES_V7_COM_VCO_TUNE_CTRL 0x13c
+#define QSERDES_V7_COM_VCO_TUNE_MAP 0x140
+#define QSERDES_V7_COM_VCO_TUNE_INITVAL2 0x148
+#define QSERDES_V7_COM_VCO_TUNE_MAXVAL2 0x158
+#define QSERDES_V7_COM_CLK_SELECT 0x164
+#define QSERDES_V7_COM_CORE_CLK_EN 0x170
+#define QSERDES_V7_COM_CMN_CONFIG_1 0x174
+#define QSERDES_V7_COM_SVS_MODE_CLK_SEL 0x17c
+#define QSERDES_V7_COM_CMN_MISC_1 0x184
+#define QSERDES_V7_COM_CMN_MODE 0x188
+#define QSERDES_V7_COM_PLL_VCO_DC_LEVEL_CTRL 0x198
+#define QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_1 0x1a4
+#define QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_2 0x1a8
+#define QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_3 0x1ac
+#define QSERDES_V7_COM_ADDITIONAL_MISC 0x1b4
+#define QSERDES_V7_COM_ADDITIONAL_MISC_2 0x1b8
+#define QSERDES_V7_COM_ADDITIONAL_MISC_3 0x1bc
+#define QSERDES_V7_COM_CMN_STATUS 0x1d0
+#define QSERDES_V7_COM_C_READY_STATUS 0x1f8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
index 15bcb4ba9..35d497fd9 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
@@ -10,10 +10,18 @@
#define QSERDES_UFS_V6_TX_RES_CODE_LANE_RX 0x2c
#define QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX 0x30
#define QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_UFS_V6_TX_LANE_MODE_1 0x7c
+#define QSERDES_UFS_V6_TX_FR_DCC_CTRL 0x108
#define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE2 0x08
#define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4 0x10
+#define QSERDES_UFS_V6_RX_UCDR_SO_SATURATION 0x28
+#define QSERDES_UFS_V6_RX_UCDR_PI_CTRL1 0x58
+#define QSERDES_UFS_V6_RX_RX_TERM_BW_CTRL0 0xc4
+#define QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2 0xd4
+#define QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE4 0xdc
#define QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL 0x178
+#define QSERDES_UFS_V6_RX_INTERFACE_MODE 0x1e0
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0 0x208
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1 0x20c
#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3 0x214
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
index 8883e1de7..23ffcfae9 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
@@ -23,6 +23,7 @@
#define QSERDES_V6_TX_PARRATE_REC_DETECT_IDLE_EN 0x60
#define QSERDES_V6_TX_BIST_PATTERN7 0x7c
#define QSERDES_V6_TX_LANE_MODE_1 0x84
+#define QSERDES_V6_TX_LANE_MODE_2 0x88
#define QSERDES_V6_TX_LANE_MODE_3 0x8c
#define QSERDES_V6_TX_LANE_MODE_4 0x90
#define QSERDES_V6_TX_LANE_MODE_5 0x94
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
index 5385a8b60..6ed5339fd 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
@@ -15,10 +15,13 @@
#define QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2 0x08
#define QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3 0x0c
+#define QSERDES_V6_20_RX_UCDR_SO_GAIN_RATE_2 0x18
#define QSERDES_V6_20_RX_UCDR_PI_CONTROLS 0x20
#define QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3 0x34
#define QSERDES_V6_20_RX_IVCM_CAL_CTRL2 0x9c
#define QSERDES_V6_20_RX_IVCM_POSTCAL_OFFSET 0xa0
+#define QSERDES_V6_20_RX_DFE_1 0xac
+#define QSERDES_V6_20_RX_DFE_2 0xb0
#define QSERDES_V6_20_RX_DFE_3 0xb4
#define QSERDES_V6_20_RX_VGA_CAL_MAN_VAL 0xe8
#define QSERDES_V6_20_RX_GM_CAL 0x10c
@@ -41,5 +44,6 @@
#define QSERDES_V6_20_RX_MODE_RATE3_B4 0x220
#define QSERDES_V6_20_RX_MODE_RATE3_B5 0x224
#define QSERDES_V6_20_RX_MODE_RATE3_B6 0x228
+#define QSERDES_V6_20_RX_BKUP_CTRL1 0x22c
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
new file mode 100644
index 000000000..a814ad11a
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+
+#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V6_N4_TX_LANE_MODE_1 0x78
+#define QSERDES_V6_N4_TX_LANE_MODE_2 0x7c
+#define QSERDES_V6_N4_TX_LANE_MODE_3 0x80
+
+#define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2 0x8
+#define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2 0x18
+#define QSERDES_V6_N4_RX_UCDR_PI_CONTROLS 0x20
+#define QSERDES_V6_N4_RX_IVCM_CAL_CODE_OVERRIDE 0x94
+#define QSERDES_V6_N4_RX_RX_IVCM_CAL_CTRL2 0x9c
+#define QSERDES_V6_N4_RX_RX_IVCM_POSTCAL_OFFSET 0xa0
+#define QSERDES_V6_N4_RX_DFE_3 0xb4
+#define QSERDES_V6_N4_RX_VGA_CAL_CNTRL1 0xe0
+#define QSERDES_V6_N4_RX_VGA_CAL_MAN_VAL 0xe8
+#define QSERDES_V6_N4_RX_GM_CAL 0x10c
+#define QSERDES_V6_N4_RX_SIGDET_ENABLES 0x148
+#define QSERDES_V6_N4_RX_SIGDET_CNTRL 0x14c
+#define QSERDES_V6_N4_RX_SIGDET_DEGLITCH_CNTRL 0x154
+#define QSERDES_V6_N4_RX_DFE_CTLE_POST_CAL_OFFSET 0x194
+#define QSERDES_V6_N4_RX_Q_PI_INTRINSIC_BIAS_RATE32 0x1dc
+#define QSERDES_V6_N4_RX_UCDR_PI_CTRL1 0x23c
+#define QSERDES_V6_N4_RX_UCDR_PI_CTRL2 0x240
+#define QSERDES_V6_N4_RX_UCDR_SB2_GAIN2_RATE2 0x27c
+#define QSERDES_V6_N4_RX_DFE_DAC_ENABLE1 0x298
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B0 0x2b8
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B1 0x2bc
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B2 0x2c0
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B3 0x2c4
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B4 0x2c8
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B5 0x2cc
+#define QSERDES_V6_N4_RX_MODE_RATE_0_1_B6 0x2d0
+#define QSERDES_V6_N4_RX_MODE_RATE2_B0 0x2d4
+#define QSERDES_V6_N4_RX_MODE_RATE2_B1 0x2d8
+#define QSERDES_V6_N4_RX_MODE_RATE2_B2 0x2dc
+#define QSERDES_V6_N4_RX_MODE_RATE2_B3 0x2e0
+#define QSERDES_V6_N4_RX_MODE_RATE2_B4 0x2e4
+#define QSERDES_V6_N4_RX_MODE_RATE2_B5 0x2e8
+#define QSERDES_V6_N4_RX_MODE_RATE2_B6 0x2ec
+#define QSERDES_V6_N4_RX_RX_SUMMER_CAL_SPD_MODE 0x30c
+#define QSERDES_V6_N4_RX_RX_BKUP_CTRL1 0x310
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
new file mode 100644
index 000000000..91f865b11
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V7_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V7_H_
+
+#define QSERDES_V7_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V7_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V7_TX_PRE_STALL_LDO_BOOST_EN 0x20
+#define QSERDES_V7_TX_TX_BAND 0x24
+#define QSERDES_V7_TX_INTERFACE_SELECT 0x2c
+#define QSERDES_V7_TX_RES_CODE_LANE_TX 0x34
+#define QSERDES_V7_TX_RES_CODE_LANE_RX 0x38
+#define QSERDES_V7_TX_RES_CODE_LANE_OFFSET_TX 0x3c
+#define QSERDES_V7_TX_RES_CODE_LANE_OFFSET_RX 0x40
+#define QSERDES_V7_TX_PARRATE_REC_DETECT_IDLE_EN 0x60
+#define QSERDES_V7_TX_BIST_PATTERN7 0x7c
+#define QSERDES_V7_TX_LANE_MODE_1 0x84
+#define QSERDES_V7_TX_LANE_MODE_2 0x88
+#define QSERDES_V7_TX_LANE_MODE_3 0x8c
+#define QSERDES_V7_TX_LANE_MODE_4 0x90
+#define QSERDES_V7_TX_LANE_MODE_5 0x94
+#define QSERDES_V7_TX_RCV_DETECT_LVL_2 0xa4
+#define QSERDES_V7_TX_TRAN_DRVR_EMP_EN 0xc0
+#define QSERDES_V7_TX_TX_INTERFACE_MODE 0xc4
+#define QSERDES_V7_TX_VMODE_CTRL1 0xc8
+#define QSERDES_V7_TX_PI_QEC_CTRL 0xe4
+
+#define QSERDES_V7_RX_UCDR_FO_GAIN 0x08
+#define QSERDES_V7_RX_UCDR_SO_GAIN 0x14
+#define QSERDES_V7_RX_UCDR_FASTLOCK_FO_GAIN 0x30
+#define QSERDES_V7_RX_UCDR_SO_SATURATION_AND_ENABLE 0x34
+#define QSERDES_V7_RX_UCDR_FASTLOCK_COUNT_LOW 0x3c
+#define QSERDES_V7_RX_UCDR_FASTLOCK_COUNT_HIGH 0x40
+#define QSERDES_V7_RX_UCDR_PI_CONTROLS 0x44
+#define QSERDES_V7_RX_UCDR_SB2_THRESH1 0x4c
+#define QSERDES_V7_RX_UCDR_SB2_THRESH2 0x50
+#define QSERDES_V7_RX_UCDR_SB2_GAIN1 0x54
+#define QSERDES_V7_RX_UCDR_SB2_GAIN2 0x58
+#define QSERDES_V7_RX_AUX_DATA_TCOARSE_TFINE 0x60
+#define QSERDES_V7_RX_TX_ADAPT_POST_THRESH 0xcc
+#define QSERDES_V7_RX_VGA_CAL_CNTRL1 0xd4
+#define QSERDES_V7_RX_VGA_CAL_CNTRL2 0xd8
+#define QSERDES_V7_RX_GM_CAL 0xdc
+#define QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL2 0xec
+#define QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL3 0xf0
+#define QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL4 0xf4
+#define QSERDES_V7_RX_RX_IDAC_TSETTLE_LOW 0xf8
+#define QSERDES_V7_RX_RX_IDAC_TSETTLE_HIGH 0xfc
+#define QSERDES_V7_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
+#define QSERDES_V7_RX_SIDGET_ENABLES 0x118
+#define QSERDES_V7_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V7_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V7_RX_RX_MODE_00_LOW 0x15c
+#define QSERDES_V7_RX_RX_MODE_00_HIGH 0x160
+#define QSERDES_V7_RX_RX_MODE_00_HIGH2 0x164
+#define QSERDES_V7_RX_RX_MODE_00_HIGH3 0x168
+#define QSERDES_V7_RX_RX_MODE_00_HIGH4 0x16c
+#define QSERDES_V7_RX_RX_MODE_01_LOW 0x170
+#define QSERDES_V7_RX_RX_MODE_01_HIGH 0x174
+#define QSERDES_V7_RX_RX_MODE_01_HIGH2 0x178
+#define QSERDES_V7_RX_RX_MODE_01_HIGH3 0x17c
+#define QSERDES_V7_RX_RX_MODE_01_HIGH4 0x180
+#define QSERDES_V7_RX_RX_MODE_10_LOW 0x184
+#define QSERDES_V7_RX_RX_MODE_10_HIGH 0x188
+#define QSERDES_V7_RX_RX_MODE_10_HIGH2 0x18c
+#define QSERDES_V7_RX_RX_MODE_10_HIGH3 0x190
+#define QSERDES_V7_RX_RX_MODE_10_HIGH4 0x194
+#define QSERDES_V7_RX_DFE_EN_TIMER 0x1a0
+#define QSERDES_V7_RX_DFE_CTLE_POST_CAL_OFFSET 0x1a4
+#define QSERDES_V7_RX_DCC_CTRL1 0x1a8
+#define QSERDES_V7_RX_VTH_CODE 0x1b0
+#define QSERDES_V7_RX_SIGDET_CAL_CTRL1 0x1e4
+#define QSERDES_V7_RX_SIGDET_CAL_TRIM 0x1f8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 514fa14df..3c2e6255e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -763,22 +763,26 @@ static const struct qmp_phy_init_tbl sm8550_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x7f),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x4c),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_hs_b_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x44),
};
static const struct qmp_phy_init_tbl sm8550_ufsphy_tx[] = {
- QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_LANE_MODE_1, 0x05),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_FR_DCC_CTRL, 0x4c),
};
static const struct qmp_phy_init_tbl sm8550_ufsphy_rx[] = {
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2, 0x0c),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x0e),
QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0, 0xc2),
@@ -801,6 +805,69 @@ static const struct qmp_phy_init_tbl sm8550_ufsphy_pcs[] = {
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x2b),
QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x04),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x44),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_tx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FO_GAIN_RATE4, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B6, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE2_B3, 0x9e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE2_B6, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B3, 0x9e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B4, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B5, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B8, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B3, 0xb9),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE4_B6, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_SO_SATURATION, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_PI_CTRL1, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_RX_TERM_BW_CTRL0, 0xfa),
+};
+
+static const struct qmp_phy_init_tbl sm8650_ufsphy_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PCS_CTRL1, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x33),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_SIGDET_CTRL2, 0x69),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
};
struct qmp_ufs_offsets {
@@ -1296,6 +1363,32 @@ static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
.pcs = sm8550_ufsphy_pcs,
.pcs_num = ARRAY_SIZE(sm8550_ufsphy_pcs),
},
+ .tbls_hs_b = {
+ .serdes = sm8550_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm8550_ufsphy_hs_b_serdes),
+ },
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = ufsphy_v6_regs_layout,
+};
+
+static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_ufs_offsets_v6,
+
+ .tbls = {
+ .serdes = sm8650_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8650_ufsphy_serdes),
+ .tx = sm8650_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8650_ufsphy_tx),
+ .rx = sm8650_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8650_ufsphy_rx),
+ .pcs = sm8650_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8650_ufsphy_pcs),
+ },
.clk_list = sdm845_ufs_phy_clk_l,
.num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1826,6 +1919,9 @@ static const struct of_device_id qmp_ufs_of_match_table[] = {
}, {
.compatible = "qcom,sm8550-qmp-ufs-phy",
.data = &sm8550_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8650-qmp-ufs-phy",
+ .data = &sm8650_ufsphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 365f5d858..4c32304da 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -24,6 +24,8 @@
#include "phy-qcom-qmp-pcs-misc-v4.h"
#include "phy-qcom-qmp-pcs-usb-v4.h"
#include "phy-qcom-qmp-pcs-usb-v5.h"
+#include "phy-qcom-qmp-pcs-usb-v6.h"
+#include "phy-qcom-qmp-pcs-usb-v7.h"
/* QPHY_SW_RESET bit */
#define SW_RESET BIT(0)
@@ -119,15 +121,6 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_MISC_CLAMP_ENABLE] = QPHY_V3_PCS_MISC_CLAMP_ENABLE,
};
-static const unsigned int qmp_v3_usb3phy_regs_layout_qcm2290[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
- [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
- [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
- [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
-};
-
static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
[QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
@@ -151,6 +144,28 @@ static const unsigned int qmp_v5_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
};
+static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V6_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V6_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V6_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+};
+
+static const unsigned int qmp_v7_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V7_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V7_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V7_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V7_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V7_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V7_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+};
+
static const struct qmp_phy_init_tbl ipq9574_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
@@ -490,115 +505,6 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
};
-static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05),
-};
-
-static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
-};
-
static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
@@ -871,6 +777,134 @@ static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
};
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x9e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x2e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE1, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x9e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE0, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADDITIONAL_MISC, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_1, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_5, 0x5f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_GAIN2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_GM_CAL, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_LOW, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH3, 0xdf),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH4, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH2, 0x9c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH3, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH4, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_VTH_CODE, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CAL_CTRL1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CAL_TRIM, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sdx75_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+};
+
static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
@@ -937,99 +971,6 @@ static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_pcs_usb_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
};
-static const struct qmp_phy_init_tbl qcm2290_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
- QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_INITVAL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x01),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x00),
-};
-
-static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
-};
-
static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
@@ -1161,6 +1102,134 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_usb_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
};
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE1_MODE1, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE2_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP1_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP2_MODE1, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DEC_START_MODE1, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START2_MODE1, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START3_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE1_MODE1, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE1_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP1_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START2_MODE0, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START3_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE1_MODE0, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP_CFG, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_AUTO_GAIN_ADJ_CTRL_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_ADDITIONAL_MISC, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_1, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_5, 0x5f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_GAIN2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_GM_CAL, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_LOW, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH3, 0xdf),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH4, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH2, 0x9c),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH3, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH4, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_VTH_CODE, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CAL_CTRL1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CAL_TRIM, 0x08),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl x1e80100_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+};
+
struct qmp_usb_offsets {
u16 serdes;
u16 pcs;
@@ -1299,16 +1368,6 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v3_msm8996 = {
.rx = 0x400,
};
-static const struct qmp_usb_offsets qmp_usb_offsets_v3_qcm2290 = {
- .serdes = 0x0,
- .pcs = 0xc00,
- .pcs_misc = 0xa00,
- .tx = 0x200,
- .rx = 0x400,
- .tx2 = 0x600,
- .rx2 = 0x800,
-};
-
static const struct qmp_usb_offsets qmp_usb_offsets_v4 = {
.serdes = 0,
.pcs = 0x0800,
@@ -1325,6 +1384,22 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v5 = {
.rx = 0x1000,
};
+static const struct qmp_usb_offsets qmp_usb_offsets_v6 = {
+ .serdes = 0,
+ .pcs = 0x0200,
+ .pcs_usb = 0x1200,
+ .tx = 0x0e00,
+ .rx = 0x1000,
+};
+
+static const struct qmp_usb_offsets qmp_usb_offsets_v7 = {
+ .serdes = 0,
+ .pcs = 0x0200,
+ .pcs_usb = 0x1200,
+ .tx = 0x0e00,
+ .rx = 0x1000,
+};
+
static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
.lanes = 1,
@@ -1457,24 +1532,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
-static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
- .lanes = 2,
-
- .offsets = &qmp_usb_offsets_v3_qcm2290,
-
- .serdes_tbl = msm8998_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
- .tx_tbl = msm8998_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl),
- .rx_tbl = msm8998_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl),
- .pcs_tbl = msm8998_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-};
-
static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
.lanes = 1,
@@ -1567,6 +1624,28 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
+static const struct qmp_phy_cfg sdx75_usb3_uniphy_cfg = {
+ .lanes = 1,
+ .offsets = &qmp_usb_offsets_v6,
+
+ .serdes_tbl = sdx75_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdx75_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sdx75_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdx75_usb3_uniphy_tx_tbl),
+ .rx_tbl = sdx75_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdx75_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sdx75_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdx75_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sdx75_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sdx75_usb3_uniphy_pcs_usb_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v6_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x1000,
+
+ .has_pwrdn_delay = true,
+};
+
static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.lanes = 1,
@@ -1590,22 +1669,24 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
-static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
- .lanes = 2,
-
- .offsets = &qmp_usb_offsets_v3_qcm2290,
+static const struct qmp_phy_cfg x1e80100_usb3_uniphy_cfg = {
+ .lanes = 1,
- .serdes_tbl = qcm2290_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
- .tx_tbl = qcm2290_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qcm2290_usb3_tx_tbl),
- .rx_tbl = qcm2290_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qcm2290_usb3_rx_tbl),
- .pcs_tbl = qcm2290_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl),
+ .offsets = &qmp_usb_offsets_v7,
+
+ .serdes_tbl = x1e80100_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(x1e80100_usb3_uniphy_serdes_tbl),
+ .tx_tbl = x1e80100_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(x1e80100_usb3_uniphy_tx_tbl),
+ .rx_tbl = x1e80100_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(x1e80100_usb3_uniphy_rx_tbl),
+ .pcs_tbl = x1e80100_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(x1e80100_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = x1e80100_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb3_uniphy_pcs_usb_tbl),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout_qcm2290,
+ .regs = qmp_v7_usb3phy_regs_layout,
};
static void qmp_usb_configure_lane(void __iomem *base,
@@ -2262,12 +2343,6 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,msm8996-qmp-usb3-phy",
.data = &msm8996_usb3phy_cfg,
}, {
- .compatible = "qcom,msm8998-qmp-usb3-phy",
- .data = &msm8998_usb3phy_cfg,
- }, {
- .compatible = "qcom,qcm2290-qmp-usb3-phy",
- .data = &qcm2290_usb3phy_cfg,
- }, {
.compatible = "qcom,sa8775p-qmp-usb3-uni-phy",
.data = &sa8775p_usb3_uniphy_cfg,
}, {
@@ -2283,8 +2358,8 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,sdx65-qmp-usb3-uni-phy",
.data = &sdx65_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sm6115-qmp-usb3-phy",
- .data = &qcm2290_usb3phy_cfg,
+ .compatible = "qcom,sdx75-qmp-usb3-uni-phy",
+ .data = &sdx75_usb3_uniphy_cfg,
}, {
.compatible = "qcom,sm8150-qmp-usb3-uni-phy",
.data = &sm8150_usb3_uniphy_cfg,
@@ -2294,6 +2369,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
}, {
.compatible = "qcom,sm8350-qmp-usb3-uni-phy",
.data = &sm8350_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,x1e80100-qmp-usb3-uni-phy",
+ .data = &x1e80100_usb3_uniphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
new file mode 100644
index 000000000..3a4b4849d
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
@@ -0,0 +1,1195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_mux.h>
+
+#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-pcs-misc-v3.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout_qcm2290[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05),
+};
+
+static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_INITVAL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0xc6),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x85),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
+};
+
+struct qmp_usbc_offsets {
+ u16 serdes;
+ u16 pcs;
+ u16 pcs_misc;
+ u16 tx;
+ u16 rx;
+ /* for PHYs with >= 2 lanes */
+ u16 tx2;
+ u16 rx2;
+};
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ const struct qmp_usbc_offsets *offsets;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+};
+
+struct qmp_usbc {
+ struct device *dev;
+
+ const struct qmp_phy_cfg *cfg;
+
+ void __iomem *serdes;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *tx2;
+ void __iomem *rx2;
+
+ struct regmap *tcsr_map;
+ u32 vls_clamp_reg;
+
+ struct clk *pipe_clk;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ int num_resets;
+ struct reset_control_bulk_data *resets;
+ struct regulator_bulk_data *vregs;
+
+ struct mutex phy_mutex;
+
+ enum phy_mode mode;
+ unsigned int usb_init_count;
+
+ struct phy *phy;
+
+ struct clk_fixed_rate pipe_clk_fixed;
+
+ struct typec_switch_dev *sw;
+ enum typec_orientation orientation;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const qmp_usbc_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+/* list of resets */
+static const char * const usb3phy_legacy_reset_l[] = {
+ "phy", "common",
+};
+
+static const char * const usb3phy_reset_l[] = {
+ "phy_phy", "phy",
+};
+
+/* list of regulators */
+static const char * const qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_usbc_offsets qmp_usbc_offsets_v3_qcm2290 = {
+ .serdes = 0x0,
+ .pcs = 0xc00,
+ .pcs_misc = 0xa00,
+ .tx = 0x200,
+ .rx = 0x400,
+ .tx2 = 0x600,
+ .rx2 = 0x800,
+};
+
+static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+ .offsets = &qmp_usbc_offsets_v3_qcm2290,
+
+ .serdes_tbl = msm8998_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
+ .tx_tbl = msm8998_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl),
+ .rx_tbl = msm8998_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl),
+ .pcs_tbl = msm8998_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
+static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
+ .offsets = &qmp_usbc_offsets_v3_qcm2290,
+
+ .serdes_tbl = qcm2290_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
+ .tx_tbl = qcm2290_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qcm2290_usb3_tx_tbl),
+ .rx_tbl = qcm2290_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qcm2290_usb3_rx_tbl),
+ .pcs_tbl = qcm2290_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qcm2290_usb3_pcs_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout_qcm2290,
+};
+
+static void qmp_usbc_configure_lane(void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ writel(t->val, base + t->offset);
+ }
+}
+
+static void qmp_usbc_configure(void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qmp_usbc_configure_lane(base, tbl, num, 0xff);
+}
+
+static int qmp_usbc_init(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+ u32 val = 0;
+ int ret;
+
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_bulk_assert(qmp->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = reset_control_bulk_deassert(qmp->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
+ if (ret)
+ goto err_assert_reset;
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
+
+#define SW_PORTSELECT_VAL BIT(0)
+#define SW_PORTSELECT_MUX BIT(1)
+ /* Use software based port select and switch on typec orientation */
+ val = SW_PORTSELECT_MUX;
+ if (qmp->orientation == TYPEC_ORIENTATION_REVERSE)
+ val |= SW_PORTSELECT_VAL;
+ writel(val, qmp->pcs_misc);
+
+ return 0;
+
+err_assert_reset:
+ reset_control_bulk_assert(qmp->num_resets, qmp->resets);
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return ret;
+}
+
+static int qmp_usbc_exit(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ reset_control_bulk_assert(qmp->num_resets, qmp->resets);
+
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return 0;
+}
+
+static int qmp_usbc_power_on(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *status;
+ unsigned int val;
+ int ret;
+
+ qmp_usbc_configure(qmp->serdes, cfg->serdes_tbl, cfg->serdes_tbl_num);
+
+ ret = clk_prepare_enable(qmp->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ return ret;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qmp_usbc_configure_lane(qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_usbc_configure_lane(qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+
+ qmp_usbc_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_usbc_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+
+ qmp_usbc_configure(qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(10, 20);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(qmp->pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START);
+
+ status = qmp->pcs + cfg->regs[QPHY_PCS_STATUS];
+ ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_disable_pipe_clk;
+ }
+
+ return 0;
+
+err_disable_pipe_clk:
+ clk_disable_unprepare(qmp->pipe_clk);
+
+ return ret;
+}
+
+static int qmp_usbc_power_off(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ clk_disable_unprepare(qmp->pipe_clk);
+
+ /* PHY reset */
+ qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL],
+ SERDES_START | PCS_START);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ return 0;
+}
+
+static int qmp_usbc_enable(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ int ret;
+
+ mutex_lock(&qmp->phy_mutex);
+
+ ret = qmp_usbc_init(phy);
+ if (ret)
+ goto out_unlock;
+
+ ret = qmp_usbc_power_on(phy);
+ if (ret) {
+ qmp_usbc_exit(phy);
+ goto out_unlock;
+ }
+
+ qmp->usb_init_count++;
+out_unlock:
+ mutex_unlock(&qmp->phy_mutex);
+
+ return ret;
+}
+
+static int qmp_usbc_disable(struct phy *phy)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+ int ret;
+
+ qmp->usb_init_count--;
+ ret = qmp_usbc_power_off(phy);
+ if (ret)
+ return ret;
+ return qmp_usbc_exit(phy);
+}
+
+static int qmp_usbc_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct qmp_usbc *qmp = phy_get_drvdata(phy);
+
+ qmp->mode = mode;
+
+ return 0;
+}
+
+static const struct phy_ops qmp_usbc_phy_ops = {
+ .init = qmp_usbc_enable,
+ .exit = qmp_usbc_disable,
+ .set_mode = qmp_usbc_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static void qmp_usbc_enable_autonomous_mode(struct qmp_usbc *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+ u32 intr_mask;
+
+ if (qmp->mode == PHY_MODE_USB_HOST_SS ||
+ qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (qmp->tcsr_map && qmp->vls_clamp_reg)
+ regmap_write(qmp->tcsr_map, qmp->vls_clamp_reg, 1);
+}
+
+static void qmp_usbc_disable_autonomous_mode(struct qmp_usbc *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (qmp->tcsr_map && qmp->vls_clamp_reg)
+ regmap_write(qmp->tcsr_map, qmp->vls_clamp_reg, 0);
+
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qmp_usbc_runtime_suspend(struct device *dev)
+{
+ struct qmp_usbc *qmp = dev_get_drvdata(dev);
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+
+ if (!qmp->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qmp_usbc_enable_autonomous_mode(qmp);
+
+ clk_disable_unprepare(qmp->pipe_clk);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qmp_usbc_runtime_resume(struct device *dev)
+{
+ struct qmp_usbc *qmp = dev_get_drvdata(dev);
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+
+ if (!qmp->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(qmp->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qmp_usbc_disable_autonomous_mode(qmp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops qmp_usbc_pm_ops = {
+ SET_RUNTIME_PM_OPS(qmp_usbc_runtime_suspend,
+ qmp_usbc_runtime_resume, NULL)
+};
+
+static int qmp_usbc_vreg_init(struct qmp_usbc *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ int num = cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qmp_usbc_reset_init(struct qmp_usbc *qmp,
+ const char *const *reset_list,
+ int num_resets)
+{
+ struct device *dev = qmp->dev;
+ int i;
+ int ret;
+
+ qmp->resets = devm_kcalloc(dev, num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < num_resets; i++)
+ qmp->resets[i].id = reset_list[i];
+
+ qmp->num_resets = num_resets;
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, num_resets, qmp->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get resets\n");
+
+ return 0;
+}
+
+static int qmp_usbc_clk_init(struct qmp_usbc *qmp)
+{
+ struct device *dev = qmp->dev;
+ int num = ARRAY_SIZE(qmp_usbc_phy_clk_l);
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = qmp_usbc_phy_clk_l[i];
+
+ qmp->num_clks = num;
+
+ return devm_clk_bulk_get_optional(dev, num, qmp->clks);
+}
+
+static void phy_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qmp_usbc *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed = &qmp->pipe_clk_fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
+ return ret;
+ }
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+#if IS_ENABLED(CONFIG_TYPEC)
+static int qmp_usbc_typec_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct qmp_usbc *qmp = typec_switch_get_drvdata(sw);
+
+ if (orientation == qmp->orientation || orientation == TYPEC_ORIENTATION_NONE)
+ return 0;
+
+ mutex_lock(&qmp->phy_mutex);
+ qmp->orientation = orientation;
+
+ if (qmp->usb_init_count) {
+ qmp_usbc_power_off(qmp->phy);
+ qmp_usbc_exit(qmp->phy);
+
+ qmp_usbc_init(qmp->phy);
+ qmp_usbc_power_on(qmp->phy);
+ }
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+static void qmp_usbc_typec_unregister(void *data)
+{
+ struct qmp_usbc *qmp = data;
+
+ typec_switch_unregister(qmp->sw);
+}
+
+static int qmp_usbc_typec_switch_register(struct qmp_usbc *qmp)
+{
+ struct typec_switch_desc sw_desc = {};
+ struct device *dev = qmp->dev;
+
+ sw_desc.drvdata = qmp;
+ sw_desc.fwnode = dev->fwnode;
+ sw_desc.set = qmp_usbc_typec_switch_set;
+ qmp->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(qmp->sw)) {
+ dev_err(dev, "Unable to register typec switch: %pe\n", qmp->sw);
+ return PTR_ERR(qmp->sw);
+ }
+
+ return devm_add_action_or_reset(dev, qmp_usbc_typec_unregister, qmp);
+}
+#else
+static int qmp_usbc_typec_switch_register(struct qmp_usbc *qmp)
+{
+ return 0;
+}
+#endif
+
+static int qmp_usbc_parse_dt_legacy(struct qmp_usbc *qmp, struct device_node *np)
+{
+ struct platform_device *pdev = to_platform_device(qmp->dev);
+ struct device *dev = qmp->dev;
+ int ret;
+
+ qmp->serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qmp->serdes))
+ return PTR_ERR(qmp->serdes);
+
+ /*
+ * Get memory resources for the PHY:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qmp->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qmp->tx))
+ return PTR_ERR(qmp->tx);
+
+ qmp->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qmp->rx))
+ return PTR_ERR(qmp->rx);
+
+ qmp->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qmp->pcs))
+ return PTR_ERR(qmp->pcs);
+
+ qmp->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qmp->tx2))
+ return PTR_ERR(qmp->tx2);
+
+ qmp->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qmp->rx2))
+ return PTR_ERR(qmp->rx2);
+
+ qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
+ if (IS_ERR(qmp->pcs_misc)) {
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qmp->pcs_misc = NULL;
+ }
+
+ qmp->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
+ if (IS_ERR(qmp->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
+ "failed to get pipe clock\n");
+ }
+
+ ret = devm_clk_bulk_get_all(qmp->dev, &qmp->clks);
+ if (ret < 0)
+ return ret;
+
+ qmp->num_clks = ret;
+
+ ret = qmp_usbc_reset_init(qmp, usb3phy_legacy_reset_l,
+ ARRAY_SIZE(usb3phy_legacy_reset_l));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int qmp_usbc_parse_dt(struct qmp_usbc *qmp)
+{
+ struct platform_device *pdev = to_platform_device(qmp->dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_usbc_offsets *offs = cfg->offsets;
+ struct device *dev = qmp->dev;
+ void __iomem *base;
+ int ret;
+
+ if (!offs)
+ return -EINVAL;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qmp->serdes = base + offs->serdes;
+ qmp->pcs = base + offs->pcs;
+ if (offs->pcs_misc)
+ qmp->pcs_misc = base + offs->pcs_misc;
+ qmp->tx = base + offs->tx;
+ qmp->rx = base + offs->rx;
+
+ qmp->tx2 = base + offs->tx2;
+ qmp->rx2 = base + offs->rx2;
+
+ ret = qmp_usbc_clk_init(qmp);
+ if (ret)
+ return ret;
+
+ qmp->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(qmp->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
+ "failed to get pipe clock\n");
+ }
+
+ ret = qmp_usbc_reset_init(qmp, usb3phy_reset_l,
+ ARRAY_SIZE(usb3phy_reset_l));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int qmp_usbc_parse_vls_clamp(struct qmp_usbc *qmp)
+{
+ struct of_phandle_args tcsr_args;
+ struct device *dev = qmp->dev;
+ int ret;
+
+ /* for backwards compatibility ignore if there is no property */
+ ret = of_parse_phandle_with_fixed_args(dev->of_node, "qcom,tcsr-reg", 1, 0,
+ &tcsr_args);
+ if (ret == -ENOENT)
+ return 0;
+ else if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to parse qcom,tcsr-reg\n");
+
+ qmp->tcsr_map = syscon_node_to_regmap(tcsr_args.np);
+ of_node_put(tcsr_args.np);
+ if (IS_ERR(qmp->tcsr_map))
+ return PTR_ERR(qmp->tcsr_map);
+
+ qmp->vls_clamp_reg = tcsr_args.args[0];
+
+ return 0;
+}
+
+static int qmp_usbc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct device_node *np;
+ struct qmp_usbc *qmp;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+
+ qmp->orientation = TYPEC_ORIENTATION_NORMAL;
+
+ qmp->cfg = of_device_get_match_data(dev);
+ if (!qmp->cfg)
+ return -EINVAL;
+
+ mutex_init(&qmp->phy_mutex);
+
+ ret = qmp_usbc_vreg_init(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_usbc_typec_switch_register(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_usbc_parse_vls_clamp(qmp);
+ if (ret)
+ return ret;
+
+ /* Check for legacy binding with child node. */
+ np = of_get_child_by_name(dev->of_node, "phy");
+ if (np) {
+ ret = qmp_usbc_parse_dt_legacy(qmp, np);
+ } else {
+ np = of_node_get(dev->of_node);
+ ret = qmp_usbc_parse_dt(qmp);
+ }
+ if (ret)
+ goto err_node_put;
+
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ goto err_node_put;
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ ret = phy_pipe_clk_register(qmp, np);
+ if (ret)
+ goto err_node_put;
+
+ qmp->phy = devm_phy_create(dev, np, &qmp_usbc_phy_ops);
+ if (IS_ERR(qmp->phy)) {
+ ret = PTR_ERR(qmp->phy);
+ dev_err(dev, "failed to create PHY: %d\n", ret);
+ goto err_node_put;
+ }
+
+ phy_set_drvdata(qmp->phy, qmp);
+
+ of_node_put(np);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ of_node_put(np);
+ return ret;
+}
+
+static const struct of_device_id qmp_usbc_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8998-qmp-usb3-phy",
+ .data = &msm8998_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,qcm2290-qmp-usb3-phy",
+ .data = &qcm2290_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm6115-qmp-usb3-phy",
+ .data = &qcm2290_usb3phy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qmp_usbc_of_match_table);
+
+static struct platform_driver qmp_usbc_driver = {
+ .probe = qmp_usbc_probe,
+ .driver = {
+ .name = "qcom-qmp-usbc-phy",
+ .pm = &qmp_usbc_pm_ops,
+ .of_match_table = qmp_usbc_of_match_table,
+ },
+};
+
+module_platform_driver(qmp_usbc_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP USB-C PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 71f063f4a..49ceded9b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -24,8 +24,12 @@
#include "phy-qcom-qmp-qserdes-com-v6.h"
#include "phy-qcom-qmp-qserdes-txrx-v6.h"
#include "phy-qcom-qmp-qserdes-txrx-v6_20.h"
+#include "phy-qcom-qmp-qserdes-txrx-v6_n4.h"
#include "phy-qcom-qmp-qserdes-ln-shrd-v6.h"
+#include "phy-qcom-qmp-qserdes-com-v7.h"
+#include "phy-qcom-qmp-qserdes-txrx-v7.h"
+
#include "phy-qcom-qmp-qserdes-pll.h"
#include "phy-qcom-qmp-pcs-v2.h"
@@ -44,6 +48,8 @@
#include "phy-qcom-qmp-pcs-v6_20.h"
+#include "phy-qcom-qmp-pcs-v7.h"
+
/* Only for QMP V3 & V4 PHY - DP COM registers */
#define QPHY_V3_DP_COM_PHY_MODE_CTRL 0x00
#define QPHY_V3_DP_COM_SW_RESET 0x04
@@ -126,9 +132,11 @@
#define QSERDES_V4_DP_PHY_AUX_INTERRUPT_STATUS 0x0d8
#define QSERDES_V4_DP_PHY_STATUS 0x0dc
+#define QSERDES_V5_DP_PHY_VCO_DIV 0x070
#define QSERDES_V5_DP_PHY_STATUS 0x0dc
/* Only for QMP V6 PHY - DP PHY registers */
+#define QSERDES_V6_DP_PHY_VCO_DIV 0x070
#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
#define QSERDES_V6_DP_PHY_STATUS 0x0e4
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index 36505fc5f..e342eef06 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -13,7 +13,7 @@ config PHY_R8A779F0_ETHERNET_SERDES
config PHY_RCAR_GEN2
tristate "Renesas R-Car generation 2 USB PHY driver"
depends on ARCH_RENESAS
- depends on GENERIC_PHY
+ select GENERIC_PHY
help
Support for USB PHY found on Renesas R-Car generation 2 SoCs.
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index a24d2af15..4f71373ae 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -123,9 +123,12 @@ struct rockchip_chg_det_reg {
* @disrise_en: host disconnect rise edge detection enable.
* @disrise_st: host disconnect rise edge detection state.
* @disrise_clr: host disconnect rise edge detection clear.
- * @id_det_en: id detection enable register.
- * @id_det_st: id detection state register.
- * @id_det_clr: id detection clear register.
+ * @idfall_det_en: id detection enable register, falling edge
+ * @idfall_det_st: id detection state register, falling edge
+ * @idfall_det_clr: id detection clear register, falling edge
+ * @idrise_det_en: id detection enable register, rising edge
+ * @idrise_det_st: id detection state register, rising edge
+ * @idrise_det_clr: id detection clear register, rising edge
* @ls_det_en: linestate detection enable register.
* @ls_det_st: linestate detection state register.
* @ls_det_clr: linestate detection clear register.
@@ -146,9 +149,12 @@ struct rockchip_usb2phy_port_cfg {
struct usb2phy_reg disrise_en;
struct usb2phy_reg disrise_st;
struct usb2phy_reg disrise_clr;
- struct usb2phy_reg id_det_en;
- struct usb2phy_reg id_det_st;
- struct usb2phy_reg id_det_clr;
+ struct usb2phy_reg idfall_det_en;
+ struct usb2phy_reg idfall_det_st;
+ struct usb2phy_reg idfall_det_clr;
+ struct usb2phy_reg idrise_det_en;
+ struct usb2phy_reg idrise_det_st;
+ struct usb2phy_reg idrise_det_clr;
struct usb2phy_reg ls_det_en;
struct usb2phy_reg ls_det_st;
struct usb2phy_reg ls_det_clr;
@@ -488,15 +494,27 @@ static int rockchip_usb2phy_init(struct phy *phy)
if (ret)
goto out;
- /* clear id status and enable id detect irq */
+ /* clear id status and enable id detect irqs */
ret = property_enable(rphy->grf,
- &rport->port_cfg->id_det_clr,
+ &rport->port_cfg->idfall_det_clr,
true);
if (ret)
goto out;
ret = property_enable(rphy->grf,
- &rport->port_cfg->id_det_en,
+ &rport->port_cfg->idrise_det_clr,
+ true);
+ if (ret)
+ goto out;
+
+ ret = property_enable(rphy->grf,
+ &rport->port_cfg->idfall_det_en,
+ true);
+ if (ret)
+ goto out;
+
+ ret = property_enable(rphy->grf,
+ &rport->port_cfg->idrise_det_en,
true);
if (ret)
goto out;
@@ -1030,11 +1048,16 @@ static irqreturn_t rockchip_usb2phy_id_irq(int irq, void *data)
struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
bool id;
- if (!property_enabled(rphy->grf, &rport->port_cfg->id_det_st))
+ if (!property_enabled(rphy->grf, &rport->port_cfg->idfall_det_st) &&
+ !property_enabled(rphy->grf, &rport->port_cfg->idrise_det_st))
return IRQ_NONE;
/* clear id detect irq pending status */
- property_enable(rphy->grf, &rport->port_cfg->id_det_clr, true);
+ if (property_enabled(rphy->grf, &rport->port_cfg->idfall_det_st))
+ property_enable(rphy->grf, &rport->port_cfg->idfall_det_clr, true);
+
+ if (property_enabled(rphy->grf, &rport->port_cfg->idrise_det_st))
+ property_enable(rphy->grf, &rport->port_cfg->idrise_det_clr, true);
id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id);
@@ -1464,6 +1487,14 @@ put_child:
return ret;
}
+static int rk3128_usb2phy_tuning(struct rockchip_usb2phy *rphy)
+{
+ /* Turn off differential receiver in suspend mode */
+ return regmap_write_bits(rphy->grf, 0x298,
+ BIT(2) << BIT_WRITEABLE_SHIFT | BIT(2),
+ BIT(2) << BIT_WRITEABLE_SHIFT | 0);
+}
+
static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy)
{
int ret;
@@ -1513,6 +1544,54 @@ static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy)
return ret;
}
+static const struct rockchip_usb2phy_cfg rk3128_phy_cfgs[] = {
+ {
+ .reg = 0x17c,
+ .num_ports = 2,
+ .phy_tuning = rk3128_usb2phy_tuning,
+ .clkout_ctl = { 0x0190, 15, 15, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x017c, 8, 0, 0, 0x1d1 },
+ .bvalid_det_en = { 0x017c, 14, 14, 0, 1 },
+ .bvalid_det_st = { 0x017c, 15, 15, 0, 1 },
+ .bvalid_det_clr = { 0x017c, 15, 15, 0, 1 },
+ .idfall_det_en = { 0x01a0, 2, 2, 0, 1 },
+ .idfall_det_st = { 0x01a0, 3, 3, 0, 1 },
+ .idfall_det_clr = { 0x01a0, 3, 3, 0, 1 },
+ .idrise_det_en = { 0x01a0, 0, 0, 0, 1 },
+ .idrise_det_st = { 0x01a0, 1, 1, 0, 1 },
+ .idrise_det_clr = { 0x01a0, 1, 1, 0, 1 },
+ .ls_det_en = { 0x017c, 12, 12, 0, 1 },
+ .ls_det_st = { 0x017c, 13, 13, 0, 1 },
+ .ls_det_clr = { 0x017c, 13, 13, 0, 1 },
+ .utmi_bvalid = { 0x014c, 5, 5, 0, 1 },
+ .utmi_id = { 0x014c, 8, 8, 0, 1 },
+ .utmi_ls = { 0x014c, 7, 6, 0, 1 },
+ },
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0194, 8, 0, 0, 0x1d1 },
+ .ls_det_en = { 0x0194, 14, 14, 0, 1 },
+ .ls_det_st = { 0x0194, 15, 15, 0, 1 },
+ .ls_det_clr = { 0x0194, 15, 15, 0, 1 }
+ }
+ },
+ .chg_det = {
+ .opmode = { 0x017c, 3, 0, 5, 1 },
+ .cp_det = { 0x02c0, 6, 6, 0, 1 },
+ .dcp_det = { 0x02c0, 5, 5, 0, 1 },
+ .dp_det = { 0x02c0, 7, 7, 0, 1 },
+ .idm_sink_en = { 0x0184, 8, 8, 0, 1 },
+ .idp_sink_en = { 0x0184, 7, 7, 0, 1 },
+ .idp_src_en = { 0x0184, 9, 9, 0, 1 },
+ .rdm_pdwn_en = { 0x0184, 10, 10, 0, 1 },
+ .vdm_src_en = { 0x0184, 12, 12, 0, 1 },
+ .vdp_src_en = { 0x0184, 11, 11, 0, 1 },
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
{
.reg = 0x760,
@@ -1524,9 +1603,12 @@ static const struct rockchip_usb2phy_cfg rk3228_phy_cfgs[] = {
.bvalid_det_en = { 0x0680, 3, 3, 0, 1 },
.bvalid_det_st = { 0x0690, 3, 3, 0, 1 },
.bvalid_det_clr = { 0x06a0, 3, 3, 0, 1 },
- .id_det_en = { 0x0680, 6, 5, 0, 3 },
- .id_det_st = { 0x0690, 6, 5, 0, 3 },
- .id_det_clr = { 0x06a0, 6, 5, 0, 3 },
+ .idfall_det_en = { 0x0680, 6, 6, 0, 1 },
+ .idfall_det_st = { 0x0690, 6, 6, 0, 1 },
+ .idfall_det_clr = { 0x06a0, 6, 6, 0, 1 },
+ .idrise_det_en = { 0x0680, 5, 5, 0, 1 },
+ .idrise_det_st = { 0x0690, 5, 5, 0, 1 },
+ .idrise_det_clr = { 0x06a0, 5, 5, 0, 1 },
.ls_det_en = { 0x0680, 2, 2, 0, 1 },
.ls_det_st = { 0x0690, 2, 2, 0, 1 },
.ls_det_clr = { 0x06a0, 2, 2, 0, 1 },
@@ -1587,9 +1669,12 @@ static const struct rockchip_usb2phy_cfg rk3308_phy_cfgs[] = {
.bvalid_det_en = { 0x3020, 3, 2, 0, 3 },
.bvalid_det_st = { 0x3024, 3, 2, 0, 3 },
.bvalid_det_clr = { 0x3028, 3, 2, 0, 3 },
- .id_det_en = { 0x3020, 5, 4, 0, 3 },
- .id_det_st = { 0x3024, 5, 4, 0, 3 },
- .id_det_clr = { 0x3028, 5, 4, 0, 3 },
+ .idfall_det_en = { 0x3020, 5, 5, 0, 1 },
+ .idfall_det_st = { 0x3024, 5, 5, 0, 1 },
+ .idfall_det_clr = { 0x3028, 5, 5, 0, 1 },
+ .idrise_det_en = { 0x3020, 4, 4, 0, 1 },
+ .idrise_det_st = { 0x3024, 4, 4, 0, 1 },
+ .idrise_det_clr = { 0x3028, 4, 4, 0, 1 },
.ls_det_en = { 0x3020, 0, 0, 0, 1 },
.ls_det_st = { 0x3024, 0, 0, 0, 1 },
.ls_det_clr = { 0x3028, 0, 0, 0, 1 },
@@ -1634,9 +1719,12 @@ static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
.bvalid_det_en = { 0x0110, 3, 2, 0, 3 },
.bvalid_det_st = { 0x0114, 3, 2, 0, 3 },
.bvalid_det_clr = { 0x0118, 3, 2, 0, 3 },
- .id_det_en = { 0x0110, 5, 4, 0, 3 },
- .id_det_st = { 0x0114, 5, 4, 0, 3 },
- .id_det_clr = { 0x0118, 5, 4, 0, 3 },
+ .idfall_det_en = { 0x0110, 5, 5, 0, 1 },
+ .idfall_det_st = { 0x0114, 5, 5, 0, 1 },
+ .idfall_det_clr = { 0x0118, 5, 5, 0, 1 },
+ .idrise_det_en = { 0x0110, 4, 4, 0, 1 },
+ .idrise_det_st = { 0x0114, 4, 4, 0, 1 },
+ .idrise_det_clr = { 0x0118, 4, 4, 0, 1 },
.ls_det_en = { 0x0110, 0, 0, 0, 1 },
.ls_det_st = { 0x0114, 0, 0, 0, 1 },
.ls_det_clr = { 0x0118, 0, 0, 0, 1 },
@@ -1700,9 +1788,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
.bvalid_det_en = { 0xe3c0, 3, 3, 0, 1 },
.bvalid_det_st = { 0xe3e0, 3, 3, 0, 1 },
.bvalid_det_clr = { 0xe3d0, 3, 3, 0, 1 },
- .id_det_en = { 0xe3c0, 5, 4, 0, 3 },
- .id_det_st = { 0xe3e0, 5, 4, 0, 3 },
- .id_det_clr = { 0xe3d0, 5, 4, 0, 3 },
+ .idfall_det_en = { 0xe3c0, 5, 5, 0, 1 },
+ .idfall_det_st = { 0xe3e0, 5, 5, 0, 1 },
+ .idfall_det_clr = { 0xe3d0, 5, 5, 0, 1 },
+ .idrise_det_en = { 0xe3c0, 4, 4, 0, 1 },
+ .idrise_det_st = { 0xe3e0, 4, 4, 0, 1 },
+ .idrise_det_clr = { 0xe3d0, 4, 4, 0, 1 },
.utmi_avalid = { 0xe2ac, 7, 7, 0, 1 },
.utmi_bvalid = { 0xe2ac, 12, 12, 0, 1 },
.utmi_id = { 0xe2ac, 8, 8, 0, 1 },
@@ -1739,9 +1830,12 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
.bvalid_det_en = { 0xe3c0, 8, 8, 0, 1 },
.bvalid_det_st = { 0xe3e0, 8, 8, 0, 1 },
.bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
- .id_det_en = { 0xe3c0, 10, 9, 0, 3 },
- .id_det_st = { 0xe3e0, 10, 9, 0, 3 },
- .id_det_clr = { 0xe3d0, 10, 9, 0, 3 },
+ .idfall_det_en = { 0xe3c0, 10, 10, 0, 1 },
+ .idfall_det_st = { 0xe3e0, 10, 10, 0, 1 },
+ .idfall_det_clr = { 0xe3d0, 10, 10, 0, 1 },
+ .idrise_det_en = { 0xe3c0, 9, 9, 0, 1 },
+ .idrise_det_st = { 0xe3e0, 9, 9, 0, 1 },
+ .idrise_det_clr = { 0xe3d0, 9, 9, 0, 1 },
.utmi_avalid = { 0xe2ac, 10, 10, 0, 1 },
.utmi_bvalid = { 0xe2ac, 16, 16, 0, 1 },
.utmi_id = { 0xe2ac, 11, 11, 0, 1 },
@@ -1770,9 +1864,12 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
.bvalid_det_en = { 0x0080, 3, 2, 0, 3 },
.bvalid_det_st = { 0x0084, 3, 2, 0, 3 },
.bvalid_det_clr = { 0x0088, 3, 2, 0, 3 },
- .id_det_en = { 0x0080, 5, 4, 0, 3 },
- .id_det_st = { 0x0084, 5, 4, 0, 3 },
- .id_det_clr = { 0x0088, 5, 4, 0, 3 },
+ .idfall_det_en = { 0x0080, 5, 5, 0, 1 },
+ .idfall_det_st = { 0x0084, 5, 5, 0, 1 },
+ .idfall_det_clr = { 0x0088, 5, 5, 0, 1 },
+ .idrise_det_en = { 0x0080, 4, 4, 0, 1 },
+ .idrise_det_st = { 0x0084, 4, 4, 0, 1 },
+ .idrise_det_clr = { 0x0088, 4, 4, 0, 1 },
.utmi_avalid = { 0x00c0, 10, 10, 0, 1 },
.utmi_bvalid = { 0x00c0, 9, 9, 0, 1 },
.utmi_id = { 0x00c0, 6, 6, 0, 1 },
@@ -1990,6 +2087,7 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
static const struct of_device_id rockchip_usb2phy_dt_match[] = {
{ .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
+ { .compatible = "rockchip,rk3128-usb2phy", .data = &rk3128_phy_cfgs },
{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
{ .compatible = "rockchip,rk3308-usb2phy", .data = &rk3308_phy_cfgs },
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 5de5e2e97..26b157f53 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -125,12 +125,15 @@ struct rockchip_combphy_grfcfg {
};
struct rockchip_combphy_cfg {
+ unsigned int num_phys;
+ unsigned int phy_ids[3];
const struct rockchip_combphy_grfcfg *grfcfg;
int (*combphy_cfg)(struct rockchip_combphy_priv *priv);
};
struct rockchip_combphy_priv {
u8 type;
+ int id;
void __iomem *mmio;
int num_clks;
struct clk_bulk_data *clks;
@@ -320,7 +323,7 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
struct rockchip_combphy_priv *priv;
const struct rockchip_combphy_cfg *phy_cfg;
struct resource *res;
- int ret;
+ int ret, id;
phy_cfg = of_device_get_match_data(dev);
if (!phy_cfg) {
@@ -338,6 +341,15 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return ret;
}
+ /* find the phy-id from the io address */
+ priv->id = -ENODEV;
+ for (id = 0; id < phy_cfg->num_phys; id++) {
+ if (res->start == phy_cfg->phy_ids[id]) {
+ priv->id = id;
+ break;
+ }
+ }
+
priv->dev = dev;
priv->type = PHY_NONE;
priv->cfg = phy_cfg;
@@ -562,6 +574,12 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfe820000,
+ 0xfe830000,
+ 0xfe840000,
+ },
.grfcfg = &rk3568_combphy_grfcfgs,
.combphy_cfg = rk3568_combphy_cfg,
};
@@ -578,8 +596,14 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
- rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ switch (priv->id) {
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l0_sel, true);
+ break;
+ case 2:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->pipe_pcie1l1_sel, true);
+ break;
+ }
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
@@ -736,6 +760,12 @@ static const struct rockchip_combphy_grfcfg rk3588_combphy_grfcfgs = {
};
static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
+ .num_phys = 3,
+ .phy_ids = {
+ 0xfee00000,
+ 0xfee10000,
+ 0xfee20000,
+ },
.grfcfg = &rk3588_combphy_grfcfgs,
.combphy_cfg = rk3588_combphy_cfg,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 121e5961c..9857ee45b 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -40,6 +40,8 @@
#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
#define RK3588_LANE_AGGREGATION BIT(2)
+#define RK3588_PCIE1LN_SEL_EN (GENMASK(1, 0) << 16)
+#define RK3588_PCIE30_PHY_MODE_EN (GENMASK(2, 0) << 16)
struct rockchip_p3phy_ops;
@@ -132,7 +134,7 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
{
u32 reg = 0;
- u8 mode = 0;
+ u8 mode = RK3588_LANE_AGGREGATION; /* default */
int ret;
/* Deassert PCIe PMA output clamp mode */
@@ -140,31 +142,24 @@ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
/* Set bifurcation if needed */
for (int i = 0; i < priv->num_lanes; i++) {
- if (!priv->lanes[i])
- mode |= (BIT(i) << 3);
-
if (priv->lanes[i] > 1)
- mode |= (BIT(i) >> 1);
- }
-
- if (!mode)
- reg = RK3588_LANE_AGGREGATION;
- else {
- if (mode & (BIT(0) | BIT(1)))
- reg |= RK3588_BIFURCATION_LANE_0_1;
-
- if (mode & (BIT(2) | BIT(3)))
- reg |= RK3588_BIFURCATION_LANE_2_3;
+ mode &= ~RK3588_LANE_AGGREGATION;
+ if (priv->lanes[i] == 3)
+ mode |= RK3588_BIFURCATION_LANE_0_1;
+ if (priv->lanes[i] == 4)
+ mode |= RK3588_BIFURCATION_LANE_2_3;
}
- regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+ reg = mode;
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
+ RK3588_PCIE30_PHY_MODE_EN | reg);
/* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
if (!IS_ERR(priv->pipe_grf)) {
- reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
if (reg)
regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
- (reg << 16) | reg);
+ RK3588_PCIE1LN_SEL_EN | reg);
}
reset_control_deassert(priv->p30phy);
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index bc847d387..0f4818adb 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -248,7 +248,7 @@ static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j784s4 = {
.use_of_data = true,
.regfields = phy_gmii_sel_fields_am654,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) |
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
BIT(PHY_INTERFACE_MODE_USXGMII),
.num_ports = 8,
.num_qsgmii_main_ports = 2,
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index fc3cd98c6..00d7e6a6d 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1240,6 +1240,7 @@ static int wiz_phy_fullrt_div(struct wiz *wiz, int lane)
case J721E_WIZ_10G:
case J7200_WIZ_10G:
case J721S2_WIZ_10G:
+ case J784S4_WIZ_10G:
if (wiz->lane_phy_type[lane] == PHY_TYPE_SGMII)
return regmap_field_write(wiz->p0_fullrt_div[lane], 0x2);
break;
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index b4881cb34..c23eecc7d 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -65,7 +65,6 @@ struct tusb1210 {
struct delayed_work chg_det_work;
struct notifier_block psy_nb;
struct power_supply *psy;
- struct power_supply *charger;
#endif
};
@@ -231,19 +230,24 @@ static const char * const tusb1210_chargers[] = {
static bool tusb1210_get_online(struct tusb1210 *tusb)
{
+ struct power_supply *charger = NULL;
union power_supply_propval val;
- int i;
+ bool online = false;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
- tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
+ for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
+ charger = power_supply_get_by_name(tusb1210_chargers[i]);
- if (!tusb->charger)
+ if (!charger)
return false;
- if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
- return false;
+ ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0)
+ online = val.intval;
+
+ power_supply_put(charger);
- return val.intval;
+ return online;
}
static void tusb1210_chg_det_work(struct work_struct *work)
@@ -467,9 +471,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
cancel_delayed_work_sync(&tusb->chg_det_work);
power_supply_unregister(tusb->psy);
}
-
- if (tusb->charger)
- power_supply_put(tusb->charger);
}
#else
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1de4e1ede..8163a5983 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -366,6 +366,21 @@ config PINCTRL_PALMAS
open drain configuration for the Palmas series devices like
TPS65913, TPS80036 etc.
+config PINCTRL_PEF2256
+ tristate "Lantiq PEF2256 (FALC56) pin controller driver"
+ depends on OF && FRAMER_PEF2256
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ This option enables the pin controller support for the Lantiq PEF2256
+ framer, also known as FALC56.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pinctrl-pef2256.
+
config PINCTRL_PIC32
bool "Microchip PIC32 pin controller driver"
depends on OF
@@ -469,6 +484,22 @@ config PINCTRL_TB10X
depends on OF && ARC_PLAT_TB10X
select GPIOLIB
+config PINCTRL_TPS6594
+ tristate "Pinctrl and GPIO driver for TI TPS6594 PMIC"
+ depends on OF && MFD_TPS6594
+ default MFD_TPS6594
+ select PINMUX
+ select GPIOLIB
+ select REGMAP
+ select GPIO_REGMAP
+ select GENERIC_PINCONF
+ help
+ Say Y to select the pinmuxing and GPIOs driver for the TPS6594
+ PMICs chip family.
+
+ This driver can also be built as a module
+ called tps6594-pinctrl.
+
config PINCTRL_ZYNQ
bool "Pinctrl driver for Xilinx Zynq"
depends on ARCH_ZYNQ
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 37575deb7..1071f301c 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_PINCTRL_MICROCHIP_SGPIO) += pinctrl-microchip-sgpio.o
obj-$(CONFIG_PINCTRL_MLXBF3) += pinctrl-mlxbf3.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
+obj-$(CONFIG_PINCTRL_PEF2256) += pinctrl-pef2256.o
obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
@@ -48,6 +49,7 @@ obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
+obj-$(CONFIG_PINCTRL_TPS6594) += pinctrl-tps6594.o
obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
diff --git a/drivers/pinctrl/aspeed/Makefile b/drivers/pinctrl/aspeed/Makefile
index 489ea1778..db2a7600a 100644
--- a/drivers/pinctrl/aspeed/Makefile
+++ b/drivers/pinctrl/aspeed/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Aspeed pinctrl support
-ccflags-y += $(call cc-option,-Woverride-init)
+ccflags-y += -Woverride-init
obj-$(CONFIG_PINCTRL_ASPEED) += pinctrl-aspeed.o pinmux-aspeed.o
obj-$(CONFIG_PINCTRL_ASPEED_G4) += pinctrl-aspeed-g4.o
obj-$(CONFIG_PINCTRL_ASPEED_G5) += pinctrl-aspeed-g5.o
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index d099a7f25..6bb2b4619 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -171,8 +171,8 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
if (!group)
return -EINVAL;
- for (i = 0; i < group->num_pins; i++)
- unset |= BIT(group->pins[i]);
+ for (i = 0; i < group->grp.npins; i++)
+ unset |= BIT(group->grp.pins[i]);
tmp = readl(ns_pinctrl->base);
tmp &= ~unset;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index f2977eb65..bbcdece83 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) "pinctrl core: " fmt
#include <linux/array_size.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -23,6 +24,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/pinctrl/consumer.h>
@@ -30,10 +32,6 @@
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
-#ifdef CONFIG_GPIOLIB
-#include "../gpio/gpiolib.h"
-#endif
-
#include "core.h"
#include "devicetree.h"
#include "pinconf.h"
@@ -145,7 +143,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np)
*/
int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
{
- unsigned i, pin;
+ unsigned int i, pin;
/* The pin number can be retrived from the pin controller descriptor */
for (i = 0; i < pctldev->desc->npins; i++) {
@@ -166,7 +164,7 @@ int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
* @pctldev: the pin control device to lookup the pin on
* @pin: pin number/id to look up
*/
-const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin)
+const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned int pin)
{
const struct pin_desc *desc;
@@ -184,7 +182,7 @@ EXPORT_SYMBOL_GPL(pin_get_name);
/* Deletes a range of pin descriptors */
static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
const struct pinctrl_pin_desc *pins,
- unsigned num_pins)
+ unsigned int num_pins)
{
int i;
@@ -252,9 +250,9 @@ failed:
static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
const struct pinctrl_pin_desc *pins,
- unsigned num_descs)
+ unsigned int num_descs)
{
- unsigned i;
+ unsigned int i;
int ret = 0;
for (i = 0; i < num_descs; i++) {
@@ -428,7 +426,7 @@ EXPORT_SYMBOL_GPL(pinctrl_add_gpio_range);
void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *ranges,
- unsigned nranges)
+ unsigned int nranges)
{
int i;
@@ -459,7 +457,7 @@ struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname,
EXPORT_SYMBOL_GPL(pinctrl_find_and_add_gpio_range);
int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, const char *pin_group,
- const unsigned **pins, unsigned *num_pins)
+ const unsigned int **pins, unsigned int *num_pins)
{
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
int gs;
@@ -559,7 +557,7 @@ const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
if (!group)
return NULL;
- return group->name;
+ return group->grp.name;
}
EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_name);
@@ -585,8 +583,8 @@ int pinctrl_generic_get_group_pins(struct pinctrl_dev *pctldev,
return -EINVAL;
}
- *pins = group->pins;
- *num_pins = group->num_pins;
+ *pins = group->grp.pins;
+ *num_pins = group->grp.npins;
return 0;
}
@@ -642,7 +640,7 @@ static int pinctrl_generic_group_name_to_selector(struct pinctrl_dev *pctldev,
* Note that the caller must take care of locking.
*/
int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
- int *pins, int num_pins, void *data)
+ const unsigned int *pins, int num_pins, void *data)
{
struct group_desc *group;
int selector, error;
@@ -660,10 +658,7 @@ int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
if (!group)
return -ENOMEM;
- group->name = name;
- group->pins = pins;
- group->num_pins = num_pins;
- group->data = data;
+ *group = PINCTRL_GROUP_DESC(name, pins, num_pins, data);
error = radix_tree_insert(&pctldev->pin_group_tree, selector, group);
if (error)
@@ -734,8 +729,8 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
const char *pin_group)
{
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
- unsigned ngroups = pctlops->get_groups_count(pctldev);
- unsigned group_selector = 0;
+ unsigned int ngroups = pctlops->get_groups_count(pctldev);
+ unsigned int group_selector = 0;
while (group_selector < ngroups) {
const char *gname = pctlops->get_group_name(pctldev,
@@ -1432,7 +1427,7 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_put);
* @num_maps: the number of maps in the mapping table
*/
int pinctrl_register_mappings(const struct pinctrl_map *maps,
- unsigned num_maps)
+ unsigned int num_maps)
{
int i, ret;
struct pinctrl_maps *maps_node;
@@ -1647,10 +1642,10 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
- unsigned i, pin;
+ unsigned int i, pin;
#ifdef CONFIG_GPIOLIB
+ struct gpio_device *gdev = NULL;
struct pinctrl_gpio_range *range;
- struct gpio_chip *chip;
int gpio_num;
#endif
@@ -1685,11 +1680,11 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
* we need to get rid of the range->base eventually and
* get the descriptor directly from the gpio_chip.
*/
- chip = gpiod_to_chip(gpio_to_desc(gpio_num));
- else
- chip = NULL;
- if (chip)
- seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
+ gdev = gpiod_to_gpio_device(gpio_to_desc(gpio_num));
+ if (gdev)
+ seq_printf(s, "%u:%s ",
+ gpio_num - gpio_device_get_base(gdev),
+ gpio_device_get_label(gdev));
else
seq_puts(s, "0:? ");
#endif
@@ -1711,7 +1706,7 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
- unsigned ngroups, selector = 0;
+ unsigned int ngroups, selector = 0;
mutex_lock(&pctldev->mutex);
@@ -1719,8 +1714,8 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
seq_puts(s, "registered pin groups:\n");
while (selector < ngroups) {
- const unsigned *pins = NULL;
- unsigned num_pins = 0;
+ const unsigned int *pins = NULL;
+ unsigned int num_pins = 0;
const char *gname = ops->get_group_name(pctldev, selector);
const char *pname;
int ret = 0;
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 530370443..837fd5bd9 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -111,8 +111,8 @@ struct pinctrl_state {
* @func: the function selector to program
*/
struct pinctrl_setting_mux {
- unsigned group;
- unsigned func;
+ unsigned int group;
+ unsigned int func;
};
/**
@@ -124,9 +124,9 @@ struct pinctrl_setting_mux {
* @num_configs: the number of entries in array @configs
*/
struct pinctrl_setting_configs {
- unsigned group_or_pin;
+ unsigned int group_or_pin;
unsigned long *configs;
- unsigned num_configs;
+ unsigned int num_configs;
};
/**
@@ -173,7 +173,7 @@ struct pin_desc {
void *drv_data;
/* These fields only added when supporting pinmux drivers */
#ifdef CONFIG_PINMUX
- unsigned mux_usecount;
+ unsigned int mux_usecount;
const char *mux_owner;
const struct pinctrl_setting_mux *mux_setting;
const char *gpio_owner;
@@ -189,25 +189,30 @@ struct pin_desc {
struct pinctrl_maps {
struct list_head node;
const struct pinctrl_map *maps;
- unsigned num_maps;
+ unsigned int num_maps;
};
#ifdef CONFIG_GENERIC_PINCTRL_GROUPS
+#include <linux/pinctrl/pinctrl.h>
+
/**
* struct group_desc - generic pin group descriptor
- * @name: name of the pin group
- * @pins: array of pins that belong to the group
- * @num_pins: number of pins in the group
+ * @grp: generic data of the pin group (name and pins)
* @data: pin controller driver specific data
*/
struct group_desc {
- const char *name;
- int *pins;
- int num_pins;
+ struct pingroup grp;
void *data;
};
+/* Convenience macro to define a generic pin group descriptor */
+#define PINCTRL_GROUP_DESC(_name, _pins, _num_pins, _data) \
+(struct group_desc) { \
+ .grp = PINCTRL_PINGROUP(_name, _pins, _num_pins), \
+ .data = _data, \
+}
+
int pinctrl_generic_get_group_count(struct pinctrl_dev *pctldev);
const char *pinctrl_generic_get_group_name(struct pinctrl_dev *pctldev,
@@ -222,7 +227,7 @@ struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev,
unsigned int group_selector);
int pinctrl_generic_add_group(struct pinctrl_dev *pctldev, const char *name,
- int *gpins, int ngpins, void *data);
+ const unsigned int *pins, int num_pins, void *data);
int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
unsigned int group_selector);
@@ -232,7 +237,7 @@ int pinctrl_generic_remove_group(struct pinctrl_dev *pctldev,
struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
struct pinctrl_dev *get_pinctrl_dev_from_of_node(struct device_node *np);
int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
-const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin);
+const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned int pin);
int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
const char *pin_group);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 6e0a40962..df1efc2e5 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -24,11 +24,11 @@ struct pinctrl_dt_map {
struct list_head node;
struct pinctrl_dev *pctldev;
struct pinctrl_map *map;
- unsigned num_maps;
+ unsigned int num_maps;
};
static void dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
+ struct pinctrl_map *map, unsigned int num_maps)
{
int i;
@@ -64,7 +64,7 @@ void pinctrl_dt_free_maps(struct pinctrl *p)
static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
+ struct pinctrl_map *map, unsigned int num_maps)
{
int i;
struct pinctrl_dt_map *dt_map;
@@ -116,7 +116,7 @@ static int dt_to_map_one_config(struct pinctrl *p,
const struct pinctrl_ops *ops;
int ret;
struct pinctrl_map *map;
- unsigned num_maps;
+ unsigned int num_maps;
bool allow_default = false;
/* Find the pin controller containing np_config */
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 9bc169430..2d3d80921 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -42,7 +42,7 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name(
for (i = 0; i < pctldev->num_groups; i++) {
grp = pinctrl_generic_get_group(pctldev, i);
- if (grp && !strcmp(grp->name, name))
+ if (grp && !strcmp(grp->grp.name, name))
break;
}
@@ -79,9 +79,9 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
}
if (info->flags & IMX_USE_SCU) {
- map_num += grp->num_pins;
+ map_num += grp->grp.npins;
} else {
- for (i = 0; i < grp->num_pins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
pin = &((struct imx_pin *)(grp->data))[i];
if (!(pin->conf.mmio.config & IMX_NO_PAD_CTL))
map_num++;
@@ -109,7 +109,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create config map */
new_map++;
- for (i = j = 0; i < grp->num_pins; i++) {
+ for (i = j = 0; i < grp->grp.npins; i++) {
pin = &((struct imx_pin *)(grp->data))[i];
/*
@@ -263,10 +263,10 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
if (!func)
return -EINVAL;
- npins = grp->num_pins;
+ npins = grp->grp.npins;
dev_dbg(ipctl->dev, "enable function %s group %s\n",
- func->name, grp->name);
+ func->name, grp->grp.name);
for (i = 0; i < npins; i++) {
/*
@@ -423,7 +423,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
if (!grp)
return;
- for (i = 0; i < grp->num_pins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
struct imx_pin *pin = &((struct imx_pin *)(grp->data))[i];
name = pin_get_name(pctldev, pin->pin);
@@ -511,6 +511,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
{
const struct imx_pinctrl_soc_info *info = ipctl->info;
struct imx_pin *pin;
+ unsigned int *pins;
int size, pin_size;
const __be32 *list;
int i;
@@ -525,7 +526,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
pin_size = FSL_PIN_SIZE;
/* Initialise group */
- grp->name = np->name;
+ grp->grp.name = np->name;
/*
* the binding format is fsl,pins = <PIN_FUNC_ID CONFIG ...>,
@@ -553,24 +554,22 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
return -EINVAL;
}
- grp->num_pins = size / pin_size;
- grp->data = devm_kcalloc(ipctl->dev,
- grp->num_pins, sizeof(struct imx_pin),
- GFP_KERNEL);
- grp->pins = devm_kcalloc(ipctl->dev,
- grp->num_pins, sizeof(unsigned int),
- GFP_KERNEL);
- if (!grp->pins || !grp->data)
+ grp->grp.npins = size / pin_size;
+ grp->data = devm_kcalloc(ipctl->dev, grp->grp.npins, sizeof(*pin), GFP_KERNEL);
+ if (!grp->data)
return -ENOMEM;
- for (i = 0; i < grp->num_pins; i++) {
+ pins = devm_kcalloc(ipctl->dev, grp->grp.npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+ grp->grp.pins = pins;
+
+ for (i = 0; i < grp->grp.npins; i++) {
pin = &((struct imx_pin *)(grp->data))[i];
if (info->flags & IMX_USE_SCU)
- info->imx_pinctrl_parse_pin(ipctl, &grp->pins[i],
- pin, &list);
+ info->imx_pinctrl_parse_pin(ipctl, &pins[i], pin, &list);
else
- imx_pinctrl_parse_pin_mmio(ipctl, &grp->pins[i],
- pin, &list, np);
+ imx_pinctrl_parse_pin_mmio(ipctl, &pins[i], pin, &list, np);
}
return 0;
@@ -612,8 +611,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
i = 0;
for_each_child_of_node(np, child) {
- grp = devm_kzalloc(ipctl->dev, sizeof(struct group_desc),
- GFP_KERNEL);
+ grp = devm_kzalloc(ipctl->dev, sizeof(*grp), GFP_KERNEL);
if (!grp) {
of_node_put(child);
return -ENOMEM;
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index d66f4f693..2101d30bd 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Intel pin control drivers
menu "Intel pinctrl drivers"
- depends on ACPI && (X86 || COMPILE_TEST)
+ depends on (ACPI && X86) || COMPILE_TEST
config PINCTRL_BAYTRAIL
bool "Intel Baytrail GPIO pin control"
@@ -37,6 +37,16 @@ config PINCTRL_INTEL
select GPIOLIB
select GPIOLIB_IRQCHIP
+config PINCTRL_INTEL_PLATFORM
+ tristate "Intel pinctrl and GPIO platform driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel PCH pins and using them as GPIOs. Currently the following
+ Intel SoCs / platforms require this to be functional:
+ - Lunar Lake
+
config PINCTRL_ALDERLAKE
tristate "Intel Alder Lake pinctrl and GPIO driver"
select PINCTRL_INTEL
@@ -128,6 +138,15 @@ config PINCTRL_METEORLAKE
This pinctrl driver provides an interface that allows configuring
of Intel Meteor Lake pins and using them as GPIOs.
+config PINCTRL_METEORPOINT
+ tristate "Intel Meteor Point pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ Meteor Point is the PCH of Intel Meteor Lake. This pinctrl driver
+ provides an interface that allows configuring of PCH pins and
+ using them as GPIOs.
+
config PINCTRL_SUNRISEPOINT
tristate "Intel Sunrisepoint pinctrl and GPIO driver"
select PINCTRL_INTEL
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index f6d30f2d9..d0d868c9a 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_PINCTRL_TANGIER) += pinctrl-tangier.o
obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_MOOREFIELD) += pinctrl-moorefield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
+obj-$(CONFIG_PINCTRL_INTEL_PLATFORM) += pinctrl-intel-platform.o
obj-$(CONFIG_PINCTRL_ALDERLAKE) += pinctrl-alderlake.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o
@@ -21,5 +22,6 @@ obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o
obj-$(CONFIG_PINCTRL_LAKEFIELD) += pinctrl-lakefield.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_METEORLAKE) += pinctrl-meteorlake.o
+obj-$(CONFIG_PINCTRL_METEORPOINT) += pinctrl-meteorpoint.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-alderlake.c b/drivers/pinctrl/intel/pinctrl-alderlake.c
index 4a37dc273..7d9948e5f 100644
--- a/drivers/pinctrl/intel/pinctrl-alderlake.c
+++ b/drivers/pinctrl/intel/pinctrl-alderlake.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -733,14 +734,12 @@ static const struct acpi_device_id adl_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, adl_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(adl_pinctrl_pm_ops);
-
static struct platform_driver adl_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "alderlake-pinctrl",
.acpi_match_table = adl_pinctrl_acpi_match,
- .pm = &adl_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(adl_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index f1af21dbd..ac97724c5 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/array_size.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -587,10 +588,9 @@ static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
const struct intel_pingroup group,
unsigned int func)
{
- unsigned long flags;
int i;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
@@ -608,18 +608,15 @@ static void byt_set_group_simple_mux(struct intel_pinctrl *vg,
value |= func;
writel(value, padcfg0);
}
-
- raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
const struct intel_pingroup group,
const unsigned int *func)
{
- unsigned long flags;
int i;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
for (i = 0; i < group.grp.npins; i++) {
void __iomem *padcfg0;
@@ -637,8 +634,6 @@ static void byt_set_group_mixed_mux(struct intel_pinctrl *vg,
value |= func[i];
writel(value, padcfg0);
}
-
- raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
@@ -676,10 +671,10 @@ static u32 byt_get_gpio_mux(struct intel_pinctrl *vg, unsigned int offset)
static void byt_gpio_clear_triggering(struct intel_pinctrl *vg, unsigned int offset)
{
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
- unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
+
value = readl(reg);
/* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
@@ -687,7 +682,6 @@ static void byt_gpio_clear_triggering(struct intel_pinctrl *vg, unsigned int off
value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
writel(value, reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
@@ -697,9 +691,8 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
u32 value, gpio_mux;
- unsigned long flags;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
/*
* In most cases, func pin mux 000 means GPIO function.
@@ -712,15 +705,14 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
*/
value = readl(reg) & BYT_PIN_MUX;
gpio_mux = byt_get_gpio_mux(vg, offset);
- if (gpio_mux != value) {
- value = readl(reg) & ~BYT_PIN_MUX;
- value |= gpio_mux;
- writel(value, reg);
+ if (gpio_mux == value)
+ return 0;
- dev_warn(vg->dev, FW_BUG "Pin %i: forcibly re-configured as GPIO\n", offset);
- }
+ value = readl(reg) & ~BYT_PIN_MUX;
+ value |= gpio_mux;
+ writel(value, reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
+ dev_warn(vg->dev, FW_BUG "Pin %i: forcibly re-configured as GPIO\n", offset);
return 0;
}
@@ -758,10 +750,9 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
value = readl(val_reg);
value &= ~BYT_DIR_MASK;
@@ -772,8 +763,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
writel(value, val_reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
-
return 0;
}
@@ -810,6 +799,7 @@ static int byt_set_pull_strength(u32 *reg, u16 strength)
*reg &= ~BYT_PULL_STR_MASK;
switch (strength) {
+ case 1: /* Set default strength value in case none is given */
case 2000:
*reg |= BYT_PULL_STR_2K;
break;
@@ -829,6 +819,24 @@ static int byt_set_pull_strength(u32 *reg, u16 strength)
return 0;
}
+static void byt_gpio_force_input_mode(struct intel_pinctrl *vg, unsigned int offset)
+{
+ void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ u32 value;
+
+ value = readl(reg);
+ if (!(value & BYT_INPUT_EN))
+ return;
+
+ /*
+ * Pull assignment is only applicable in input mode. If
+ * chip is not in input mode, set it and warn about it.
+ */
+ value &= ~BYT_INPUT_EN;
+ writel(value, reg);
+ dev_warn(vg->dev, "Pin %i: forcibly set to input mode\n", offset);
+}
+
static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
unsigned long *config)
{
@@ -837,15 +845,15 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
- unsigned long flags;
u32 conf, pull, val, debounce;
u16 arg = 0;
- raw_spin_lock_irqsave(&byt_lock, flags);
- conf = readl(conf_reg);
+ scoped_guard(raw_spinlock_irqsave, &byt_lock) {
+ conf = readl(conf_reg);
+ val = readl(val_reg);
+ }
+
pull = conf & BYT_PULL_ASSIGN_MASK;
- val = readl(val_reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@@ -872,9 +880,8 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
if (!(conf & BYT_DEBOUNCE_EN))
return -EINVAL;
- raw_spin_lock_irqsave(&byt_lock, flags);
- debounce = readl(db_reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &byt_lock)
+ debounce = readl(db_reg);
switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
case BYT_DEBOUNCE_PULSE_375US:
@@ -919,18 +926,15 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
- void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
- u32 conf, val, db_pulse, debounce;
+ u32 conf, db_pulse, debounce;
enum pin_config_param param;
- unsigned long flags;
- int i, ret = 0;
+ int i, ret;
u32 arg;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
conf = readl(conf_reg);
- val = readl(val_reg);
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
@@ -941,59 +945,30 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
conf &= ~BYT_PULL_ASSIGN_MASK;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- /* Set default strength value in case none is given */
- if (arg == 1)
- arg = 2000;
-
- /*
- * Pull assignment is only applicable in input mode. If
- * chip is not in input mode, set it and warn about it.
- */
- if (val & BYT_INPUT_EN) {
- val &= ~BYT_INPUT_EN;
- writel(val, val_reg);
- dev_warn(vg->dev, "Pin %i: forcibly set to input mode\n", offset);
- }
+ byt_gpio_force_input_mode(vg, offset);
conf &= ~BYT_PULL_ASSIGN_MASK;
conf |= BYT_PULL_ASSIGN_DOWN;
ret = byt_set_pull_strength(&conf, arg);
+ if (ret)
+ return ret;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- /* Set default strength value in case none is given */
- if (arg == 1)
- arg = 2000;
-
- /*
- * Pull assignment is only applicable in input mode. If
- * chip is not in input mode, set it and warn about it.
- */
- if (val & BYT_INPUT_EN) {
- val &= ~BYT_INPUT_EN;
- writel(val, val_reg);
- dev_warn(vg->dev, "Pin %i: forcibly set to input mode\n", offset);
- }
+ byt_gpio_force_input_mode(vg, offset);
conf &= ~BYT_PULL_ASSIGN_MASK;
conf |= BYT_PULL_ASSIGN_UP;
ret = byt_set_pull_strength(&conf, arg);
+ if (ret)
+ return ret;
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
- if (arg) {
- conf |= BYT_DEBOUNCE_EN;
- } else {
- conf &= ~BYT_DEBOUNCE_EN;
-
- /*
- * No need to update the pulse value.
- * Debounce is going to be disabled.
- */
- break;
- }
-
switch (arg) {
+ case 0:
+ db_pulse = 0;
+ break;
case 375:
db_pulse = BYT_DEBOUNCE_PULSE_375US;
break;
@@ -1016,33 +991,28 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
db_pulse = BYT_DEBOUNCE_PULSE_24MS;
break;
default:
- if (arg)
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- if (ret)
- break;
+ if (db_pulse) {
+ debounce = readl(db_reg);
+ debounce = (debounce & ~BYT_DEBOUNCE_PULSE_MASK) | db_pulse;
+ writel(debounce, db_reg);
- debounce = readl(db_reg);
- debounce = (debounce & ~BYT_DEBOUNCE_PULSE_MASK) | db_pulse;
- writel(debounce, db_reg);
+ conf |= BYT_DEBOUNCE_EN;
+ } else {
+ conf &= ~BYT_DEBOUNCE_EN;
+ }
break;
default:
- ret = -ENOTSUPP;
+ return -ENOTSUPP;
}
-
- if (ret)
- break;
}
- if (!ret)
- writel(conf, conf_reg);
+ writel(conf, conf_reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
-
- return ret;
+ return 0;
}
static const struct pinconf_ops byt_pinconf_ops = {
@@ -1062,12 +1032,10 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&byt_lock, flags);
- val = readl(reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &byt_lock)
+ val = readl(reg);
return !!(val & BYT_LEVEL);
}
@@ -1075,35 +1043,34 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void byt_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct intel_pinctrl *vg = gpiochip_get_data(chip);
- void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- unsigned long flags;
+ void __iomem *reg;
u32 old_val;
+ reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
if (!reg)
return;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
+
old_val = readl(reg);
if (value)
writel(old_val | BYT_LEVEL, reg);
else
writel(old_val & ~BYT_LEVEL, reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct intel_pinctrl *vg = gpiochip_get_data(chip);
- void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- unsigned long flags;
+ void __iomem *reg;
u32 value;
+ reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
if (!reg)
return -EINVAL;
- raw_spin_lock_irqsave(&byt_lock, flags);
- value = readl(reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &byt_lock)
+ value = readl(reg);
if (!(value & BYT_OUTPUT_EN))
return GPIO_LINE_DIRECTION_OUT;
@@ -1117,17 +1084,15 @@ static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- unsigned long flags;
u32 reg;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
reg = readl(val_reg);
reg &= ~BYT_DIR_MASK;
reg |= BYT_OUTPUT_EN;
writel(reg, val_reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1142,10 +1107,9 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
{
struct intel_pinctrl *vg = gpiochip_get_data(chip);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- unsigned long flags;
u32 reg;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
byt_gpio_direct_irq_check(vg, offset);
@@ -1158,7 +1122,6 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
writel(reg, val_reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1173,8 +1136,6 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
void __iomem *conf_reg, *val_reg;
const char *pull_str = NULL;
const char *pull = NULL;
- unsigned long flags;
- const char *label;
unsigned int pin;
pin = vg->soc->pins[i].number;
@@ -1191,19 +1152,20 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
}
- raw_spin_lock_irqsave(&byt_lock, flags);
- conf0 = readl(conf_reg);
- val = readl(val_reg);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &byt_lock) {
+ conf0 = readl(conf_reg);
+ val = readl(val_reg);
+ }
comm = intel_get_community(vg, pin);
if (!comm) {
seq_printf(s, "Pin %i: can't retrieve community\n", pin);
continue;
}
- label = gpiochip_is_requested(chip, i);
- if (!label)
- label = "Unrequested";
+
+ char *label __free(kfree) = gpiochip_dup_line_label(chip, i);
+ if (IS_ERR(label))
+ continue;
switch (conf0 & BYT_PULL_ASSIGN_MASK) {
case BYT_PULL_ASSIGN_UP:
@@ -1232,7 +1194,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s,
" gpio-%-3d (%-20.20s) %s %s %s pad-%-3d offset:0x%03x mux:%d %s%s%s",
pin,
- label,
+ label ?: "Unrequested",
val & BYT_INPUT_EN ? " " : "in",
val & BYT_OUTPUT_EN ? " " : "out",
str_hi_lo(val & BYT_LEVEL),
@@ -1278,9 +1240,9 @@ static void byt_irq_ack(struct irq_data *d)
if (!reg)
return;
- raw_spin_lock(&byt_lock);
+ guard(raw_spinlock)(&byt_lock);
+
writel(BIT(hwirq % 32), reg);
- raw_spin_unlock(&byt_lock);
}
static void byt_irq_mask(struct irq_data *d)
@@ -1298,7 +1260,6 @@ static void byt_irq_unmask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *vg = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
void __iomem *reg;
u32 value;
@@ -1308,7 +1269,8 @@ static void byt_irq_unmask(struct irq_data *d)
if (!reg)
return;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
+
value = readl(reg);
switch (irqd_get_trigger_type(d)) {
@@ -1330,23 +1292,21 @@ static void byt_irq_unmask(struct irq_data *d)
}
writel(value, reg);
-
- raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_irq_type(struct irq_data *d, unsigned int type)
{
struct intel_pinctrl *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- u32 value;
- unsigned long flags;
void __iomem *reg;
+ u32 value;
reg = byt_gpio_reg(vg, hwirq, BYT_CONF0_REG);
if (!reg)
return -EINVAL;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
+
value = readl(reg);
WARN(value & BYT_DIRECT_IRQ_EN,
@@ -1368,8 +1328,6 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&byt_lock, flags);
-
return 0;
}
@@ -1401,9 +1359,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
continue;
}
- raw_spin_lock(&byt_lock);
- pending = readl(reg);
- raw_spin_unlock(&byt_lock);
+ scoped_guard(raw_spinlock, &byt_lock)
+ pending = readl(reg);
for_each_set_bit(pin, &pending, 32)
generic_handle_domain_irq(vg->chip.irq.domain, base + pin);
}
@@ -1666,10 +1623,9 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
static int byt_gpio_suspend(struct device *dev)
{
struct intel_pinctrl *vg = dev_get_drvdata(dev);
- unsigned long flags;
int i;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
for (i = 0; i < vg->soc->npins; i++) {
void __iomem *reg;
@@ -1693,17 +1649,15 @@ static int byt_gpio_suspend(struct device *dev)
vg->context.pads[i].val = value;
}
- raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
static int byt_gpio_resume(struct device *dev)
{
struct intel_pinctrl *vg = dev_get_drvdata(dev);
- unsigned long flags;
int i;
- raw_spin_lock_irqsave(&byt_lock, flags);
+ guard(raw_spinlock_irqsave)(&byt_lock);
for (i = 0; i < vg->soc->npins; i++) {
void __iomem *reg;
@@ -1743,7 +1697,6 @@ static int byt_gpio_resume(struct device *dev)
}
}
- raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 3118c7c88..d99541676 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -1000,14 +1001,12 @@ static const struct platform_device_id bxt_pinctrl_platform_ids[] = {
};
MODULE_DEVICE_TABLE(platform, bxt_pinctrl_platform_ids);
-static INTEL_PINCTRL_PM_OPS(bxt_pinctrl_pm_ops);
-
static struct platform_driver bxt_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_uid,
.driver = {
.name = "broxton-pinctrl",
.acpi_match_table = bxt_pinctrl_acpi_match,
- .pm = &bxt_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
.id_table = bxt_pinctrl_platform_ids,
};
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index 95976abfb..1aa09f950 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -10,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -824,14 +825,12 @@ static const struct acpi_device_id cnl_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, cnl_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(cnl_pinctrl_pm_ops);
-
static struct platform_driver cnl_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "cannonlake-pinctrl",
.acpi_match_table = cnl_pinctrl_acpi_match,
- .pm = &cnl_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(cnl_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
index a20465eb2..48af8930d 100644
--- a/drivers/pinctrl/intel/pinctrl-cedarfork.c
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -319,8 +320,6 @@ static const struct intel_pinctrl_soc_data cdf_soc_data = {
.ncommunities = ARRAY_SIZE(cdf_communities),
};
-static INTEL_PINCTRL_PM_OPS(cdf_pinctrl_pm_ops);
-
static const struct acpi_device_id cdf_pinctrl_acpi_match[] = {
{ "INTC3001", (kernel_ulong_t)&cdf_soc_data },
{ }
@@ -332,7 +331,7 @@ static struct platform_driver cdf_pinctrl_driver = {
.driver = {
.name = "cedarfork-pinctrl",
.acpi_match_table = cdf_pinctrl_acpi_match,
- .pm = &cdf_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
index 562a4f918..666507f54 100644
--- a/drivers/pinctrl/intel/pinctrl-denverton.c
+++ b/drivers/pinctrl/intel/pinctrl-denverton.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -249,8 +250,6 @@ static const struct intel_pinctrl_soc_data dnv_soc_data = {
.ncommunities = ARRAY_SIZE(dnv_communities),
};
-static INTEL_PINCTRL_PM_OPS(dnv_pinctrl_pm_ops);
-
static const struct acpi_device_id dnv_pinctrl_acpi_match[] = {
{ "INTC3000", (kernel_ulong_t)&dnv_soc_data },
{ }
@@ -268,7 +267,7 @@ static struct platform_driver dnv_pinctrl_driver = {
.driver = {
.name = "denverton-pinctrl",
.acpi_match_table = dnv_pinctrl_acpi_match,
- .pm = &dnv_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
.id_table = dnv_pinctrl_platform_ids,
};
diff --git a/drivers/pinctrl/intel/pinctrl-elkhartlake.c b/drivers/pinctrl/intel/pinctrl-elkhartlake.c
index 81581ab85..1678634eb 100644
--- a/drivers/pinctrl/intel/pinctrl-elkhartlake.c
+++ b/drivers/pinctrl/intel/pinctrl-elkhartlake.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -485,14 +486,12 @@ static const struct acpi_device_id ehl_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, ehl_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(ehl_pinctrl_pm_ops);
-
static struct platform_driver ehl_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_uid,
.driver = {
.name = "elkhartlake-pinctrl",
.acpi_match_table = ehl_pinctrl_acpi_match,
- .pm = &ehl_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(ehl_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-emmitsburg.c b/drivers/pinctrl/intel/pinctrl-emmitsburg.c
index 099ec8351..e4798d324 100644
--- a/drivers/pinctrl/intel/pinctrl-emmitsburg.c
+++ b/drivers/pinctrl/intel/pinctrl-emmitsburg.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -358,14 +359,12 @@ static const struct acpi_device_id ebg_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, ebg_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(ebg_pinctrl_pm_ops);
-
static struct platform_driver ebg_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "emmitsburg-pinctrl",
.acpi_match_table = ebg_pinctrl_acpi_match,
- .pm = &ebg_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(ebg_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-geminilake.c b/drivers/pinctrl/intel/pinctrl-geminilake.c
index 9effa06b6..6dcf0ac20 100644
--- a/drivers/pinctrl/intel/pinctrl-geminilake.c
+++ b/drivers/pinctrl/intel/pinctrl-geminilake.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -447,14 +448,12 @@ static const struct acpi_device_id glk_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, glk_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(glk_pinctrl_pm_ops);
-
static struct platform_driver glk_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_uid,
.driver = {
.name = "geminilake-pinctrl",
.acpi_match_table = glk_pinctrl_acpi_match,
- .pm = &glk_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
diff --git a/drivers/pinctrl/intel/pinctrl-icelake.c b/drivers/pinctrl/intel/pinctrl-icelake.c
index 300e1538c..fe3042de8 100644
--- a/drivers/pinctrl/intel/pinctrl-icelake.c
+++ b/drivers/pinctrl/intel/pinctrl-icelake.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -668,8 +669,6 @@ static const struct intel_pinctrl_soc_data icln_soc_data = {
.ncommunities = ARRAY_SIZE(icln_communities),
};
-static INTEL_PINCTRL_PM_OPS(icl_pinctrl_pm_ops);
-
static const struct acpi_device_id icl_pinctrl_acpi_match[] = {
{ "INT3455", (kernel_ulong_t)&icllp_soc_data },
{ "INT34C3", (kernel_ulong_t)&icln_soc_data },
@@ -682,7 +681,7 @@ static struct platform_driver icl_pinctrl_driver = {
.driver = {
.name = "icelake-pinctrl",
.acpi_match_table = icl_pinctrl_acpi_match,
- .pm = &icl_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(icl_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-intel-platform.c b/drivers/pinctrl/intel/pinctrl-intel-platform.c
new file mode 100644
index 000000000..4a19ab3b4
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-intel-platform.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2021-2023, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/string_helpers.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+struct intel_platform_pins {
+ struct pinctrl_pin_desc *pins;
+ size_t npins;
+};
+
+static int intel_platform_pinctrl_prepare_pins(struct device *dev, size_t base,
+ const char *name, u32 size,
+ struct intel_platform_pins *pins)
+{
+ struct pinctrl_pin_desc *descs;
+ char **pin_names;
+ unsigned int i;
+
+ pin_names = devm_kasprintf_strarray(dev, name, size);
+ if (IS_ERR(pin_names))
+ return PTR_ERR(pin_names);
+
+ descs = devm_krealloc_array(dev, pins->pins, base + size, sizeof(*descs), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ unsigned int pin_number = base + i;
+ char *pin_name = pin_names[i];
+ struct pinctrl_pin_desc *desc;
+
+ /* Unify delimiter for pin name */
+ strreplace(pin_name, '-', '_');
+
+ desc = &descs[pin_number];
+ desc->number = pin_number;
+ desc->name = pin_name;
+ }
+
+ pins->pins = descs;
+ pins->npins = base + size;
+
+ return 0;
+}
+
+static int intel_platform_pinctrl_prepare_group(struct device *dev,
+ struct fwnode_handle *child,
+ struct intel_padgroup *gpp,
+ struct intel_platform_pins *pins)
+{
+ size_t base = pins->npins;
+ const char *name;
+ u32 size;
+ int ret;
+
+ ret = fwnode_property_read_string(child, "intc-gpio-group-name", &name);
+ if (ret)
+ return ret;
+
+ ret = fwnode_property_read_u32(child, "intc-gpio-pad-count", &size);
+ if (ret)
+ return ret;
+
+ ret = intel_platform_pinctrl_prepare_pins(dev, base, name, size, pins);
+ if (ret)
+ return ret;
+
+ gpp->base = base;
+ gpp->size = size;
+ gpp->gpio_base = INTEL_GPIO_BASE_MATCH;
+
+ return 0;
+}
+
+static int intel_platform_pinctrl_prepare_community(struct device *dev,
+ struct intel_community *community,
+ struct intel_platform_pins *pins)
+{
+ struct fwnode_handle *child;
+ struct intel_padgroup *gpps;
+ unsigned int group;
+ size_t ngpps;
+ u32 offset;
+ int ret;
+
+ ret = device_property_read_u32(dev, "intc-gpio-pad-ownership-offset", &offset);
+ if (ret)
+ return ret;
+ community->padown_offset = offset;
+
+ ret = device_property_read_u32(dev, "intc-gpio-pad-configuration-lock-offset", &offset);
+ if (ret)
+ return ret;
+ community->padcfglock_offset = offset;
+
+ ret = device_property_read_u32(dev, "intc-gpio-host-software-pad-ownership-offset", &offset);
+ if (ret)
+ return ret;
+ community->hostown_offset = offset;
+
+ ret = device_property_read_u32(dev, "intc-gpio-gpi-interrupt-status-offset", &offset);
+ if (ret)
+ return ret;
+ community->is_offset = offset;
+
+ ret = device_property_read_u32(dev, "intc-gpio-gpi-interrupt-enable-offset", &offset);
+ if (ret)
+ return ret;
+ community->ie_offset = offset;
+
+ ngpps = device_get_child_node_count(dev);
+ if (!ngpps)
+ return -ENODEV;
+
+ gpps = devm_kcalloc(dev, ngpps, sizeof(*gpps), GFP_KERNEL);
+ if (!gpps)
+ return -ENOMEM;
+
+ group = 0;
+ device_for_each_child_node(dev, child) {
+ struct intel_padgroup *gpp = &gpps[group];
+
+ gpp->reg_num = group;
+
+ ret = intel_platform_pinctrl_prepare_group(dev, child, gpp, pins);
+ if (ret)
+ return ret;
+
+ group++;
+ }
+
+ community->ngpps = ngpps;
+ community->gpps = gpps;
+
+ return 0;
+}
+
+static int intel_platform_pinctrl_prepare_soc_data(struct device *dev,
+ struct intel_pinctrl_soc_data *data)
+{
+ struct intel_platform_pins pins = {};
+ struct intel_community *communities;
+ size_t ncommunities;
+ unsigned int i;
+ int ret;
+
+ /* Version 1.0 of the specification assumes only a single community per device node */
+ ncommunities = 1,
+ communities = devm_kcalloc(dev, ncommunities, sizeof(*communities), GFP_KERNEL);
+ if (!communities)
+ return -ENOMEM;
+
+ for (i = 0; i < ncommunities; i++) {
+ struct intel_community *community = &communities[i];
+
+ community->barno = i;
+ community->pin_base = pins.npins;
+
+ ret = intel_platform_pinctrl_prepare_community(dev, community, &pins);
+ if (ret)
+ return ret;
+
+ community->npins = pins.npins - community->pin_base;
+ }
+
+ data->ncommunities = ncommunities;
+ data->communities = communities;
+
+ data->npins = pins.npins;
+ data->pins = pins.pins;
+
+ return 0;
+}
+
+static int intel_platform_pinctrl_probe(struct platform_device *pdev)
+{
+ struct intel_pinctrl_soc_data *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = intel_platform_pinctrl_prepare_soc_data(dev, data);
+ if (ret)
+ return ret;
+
+ return intel_pinctrl_probe(pdev, data);
+}
+
+static const struct acpi_device_id intel_platform_pinctrl_acpi_match[] = {
+ { "INTC105F" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, intel_platform_pinctrl_acpi_match);
+
+static struct platform_driver intel_platform_pinctrl_driver = {
+ .probe = intel_platform_pinctrl_probe,
+ .driver = {
+ .name = "intel-pinctrl",
+ .acpi_match_table = intel_platform_pinctrl_acpi_match,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
+ },
+};
+module_platform_driver(intel_platform_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(PINCTRL_INTEL);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 652ba451f..d6f29e6fa 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -677,10 +677,6 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
u32 term = 0, up = 0, value;
void __iomem *padcfg1;
- /* Set default strength value in case none is given */
- if (arg == 1)
- arg = 5000;
-
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
break;
@@ -690,6 +686,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case 20000:
term = PADCFG1_TERM_20K;
break;
+ case 1: /* Set default strength value in case none is given */
case 5000:
term = PADCFG1_TERM_5K;
break;
@@ -716,6 +713,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case 20000:
term = PADCFG1_TERM_20K;
break;
+ case 1: /* Set default strength value in case none is given */
case 5000:
term = PADCFG1_TERM_5K;
break;
@@ -899,7 +897,7 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
*
* Return: a GPIO offset, or negative error code if translation can't be done.
*/
-static __maybe_unused int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin)
+static int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin)
{
const struct intel_community *community;
const struct intel_padgroup *padgrp;
@@ -1506,8 +1504,8 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl,
return PTR_ERR_OR_ZERO(pwm);
}
-static int intel_pinctrl_probe(struct platform_device *pdev,
- const struct intel_pinctrl_soc_data *soc_data)
+int intel_pinctrl_probe(struct platform_device *pdev,
+ const struct intel_pinctrl_soc_data *soc_data)
{
struct device *dev = &pdev->dev;
struct intel_pinctrl *pctrl;
@@ -1625,6 +1623,7 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(intel_pinctrl_probe, PINCTRL_INTEL);
int intel_pinctrl_probe_by_hid(struct platform_device *pdev)
{
@@ -1653,7 +1652,7 @@ EXPORT_SYMBOL_NS_GPL(intel_pinctrl_probe_by_uid, PINCTRL_INTEL);
const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
{
const struct intel_pinctrl_soc_data * const *table;
- const struct intel_pinctrl_soc_data *data = NULL;
+ const struct intel_pinctrl_soc_data *data;
struct device *dev = &pdev->dev;
table = device_get_match_data(dev);
@@ -1662,11 +1661,10 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
unsigned int i;
for (i = 0; table[i]; i++) {
- if (acpi_dev_uid_match(adev, table[i]->uid)) {
- data = table[i];
+ if (acpi_dev_uid_match(adev, table[i]->uid))
break;
- }
}
+ data = table[i];
} else {
const struct platform_device_id *id;
@@ -1682,7 +1680,6 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
}
EXPORT_SYMBOL_NS_GPL(intel_pinctrl_get_soc_data, PINCTRL_INTEL);
-#ifdef CONFIG_PM_SLEEP
static bool __intel_gpio_is_direct_irq(u32 value)
{
return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
@@ -1728,7 +1725,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
return false;
}
-int intel_pinctrl_suspend_noirq(struct device *dev)
+static int intel_pinctrl_suspend_noirq(struct device *dev)
{
struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
struct intel_community_context *communities;
@@ -1771,7 +1768,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq);
static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
{
@@ -1838,7 +1834,7 @@ static void intel_restore_padcfg(struct intel_pinctrl *pctrl, unsigned int pin,
dev_dbg(dev, "restored pin %u padcfg%u %#08x\n", pin, n, readl(padcfg));
}
-int intel_pinctrl_resume_noirq(struct device *dev)
+static int intel_pinctrl_resume_noirq(struct device *dev)
{
struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
const struct intel_community_context *communities;
@@ -1882,8 +1878,10 @@ int intel_pinctrl_resume_noirq(struct device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(intel_pinctrl_resume_noirq);
-#endif
+
+EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(intel_pinctrl_pm_ops, PINCTRL_INTEL) = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend_noirq, intel_pinctrl_resume_noirq)
+};
MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 2bb553598..fde65e18c 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -252,19 +252,13 @@ struct intel_pinctrl {
int irq;
};
+int intel_pinctrl_probe(struct platform_device *pdev,
+ const struct intel_pinctrl_soc_data *soc_data);
+
int intel_pinctrl_probe_by_hid(struct platform_device *pdev);
int intel_pinctrl_probe_by_uid(struct platform_device *pdev);
-#ifdef CONFIG_PM_SLEEP
-int intel_pinctrl_suspend_noirq(struct device *dev);
-int intel_pinctrl_resume_noirq(struct device *dev);
-#endif
-
-#define INTEL_PINCTRL_PM_OPS(_name) \
-const struct dev_pm_ops _name = { \
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend_noirq, \
- intel_pinctrl_resume_noirq) \
-}
+extern const struct dev_pm_ops intel_pinctrl_pm_ops;
struct intel_community *intel_get_community(struct intel_pinctrl *pctrl, unsigned int pin);
diff --git a/drivers/pinctrl/intel/pinctrl-jasperlake.c b/drivers/pinctrl/intel/pinctrl-jasperlake.c
index 50f137dee..352548042 100644
--- a/drivers/pinctrl/intel/pinctrl-jasperlake.c
+++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -326,14 +327,12 @@ static const struct acpi_device_id jsl_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, jsl_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(jsl_pinctrl_pm_ops);
-
static struct platform_driver jsl_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "jasperlake-pinctrl",
.acpi_match_table = jsl_pinctrl_acpi_match,
- .pm = &jsl_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(jsl_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-lakefield.c b/drivers/pinctrl/intel/pinctrl-lakefield.c
index 0b94e11b7..adef85db8 100644
--- a/drivers/pinctrl/intel/pinctrl-lakefield.c
+++ b/drivers/pinctrl/intel/pinctrl-lakefield.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -347,14 +348,12 @@ static const struct acpi_device_id lkf_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, lkf_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(lkf_pinctrl_pm_ops);
-
static struct platform_driver lkf_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "lakefield-pinctrl",
.acpi_match_table = lkf_pinctrl_acpi_match,
- .pm = &lkf_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(lkf_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index aa725a5d6..a304d30ea 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -301,8 +302,6 @@ static const struct intel_pinctrl_soc_data lbg_soc_data = {
.ncommunities = ARRAY_SIZE(lbg_communities),
};
-static INTEL_PINCTRL_PM_OPS(lbg_pinctrl_pm_ops);
-
static const struct acpi_device_id lbg_pinctrl_acpi_match[] = {
{ "INT3536", (kernel_ulong_t)&lbg_soc_data },
{ }
@@ -314,7 +313,7 @@ static struct platform_driver lbg_pinctrl_driver = {
.driver = {
.name = "lewisburg-pinctrl",
.acpi_match_table = lbg_pinctrl_acpi_match,
- .pm = &lbg_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(lbg_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index e6878e4cf..1fb0bba8b 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/array_size.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -291,10 +292,9 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
const struct intel_pingroup *grp = &lg->soc->groups[group];
- unsigned long flags;
int i;
- raw_spin_lock_irqsave(&lg->lock, flags);
+ guard(raw_spinlock_irqsave)(&lg->lock);
/* Now enable the mux setting for each pin in the group */
for (i = 0; i < grp->grp.npins; i++) {
@@ -312,8 +312,6 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev,
iowrite32(value, reg);
}
- raw_spin_unlock_irqrestore(&lg->lock, flags);
-
return 0;
}
@@ -334,10 +332,9 @@ static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
- unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&lg->lock, flags);
+ guard(raw_spinlock_irqsave)(&lg->lock);
/*
* Reconfigure pin to GPIO mode if needed and issue a warning,
@@ -352,8 +349,6 @@ static int lp_gpio_request_enable(struct pinctrl_dev *pctldev,
/* Enable input sensing */
lp_gpio_enable_input(conf2);
- raw_spin_unlock_irqrestore(&lg->lock, flags);
-
return 0;
}
@@ -363,14 +358,11 @@ static void lp_gpio_disable_free(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
- unsigned long flags;
- raw_spin_lock_irqsave(&lg->lock, flags);
+ guard(raw_spinlock_irqsave)(&lg->lock);
/* Disable input sensing */
lp_gpio_disable_input(conf2);
-
- raw_spin_unlock_irqrestore(&lg->lock, flags);
}
static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -379,10 +371,9 @@ static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
void __iomem *reg = lp_gpio_reg(&lg->chip, pin, LP_CONFIG1);
- unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&lg->lock, flags);
+ guard(raw_spinlock_irqsave)(&lg->lock);
value = ioread32(reg);
value &= ~DIR_BIT;
@@ -400,8 +391,6 @@ static int lp_gpio_set_direction(struct pinctrl_dev *pctldev,
}
iowrite32(value, reg);
- raw_spin_unlock_irqrestore(&lg->lock, flags);
-
return 0;
}
@@ -421,13 +410,11 @@ static int lp_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
enum pin_config_param param = pinconf_to_config_param(*config);
- unsigned long flags;
u32 value, pull;
u16 arg;
- raw_spin_lock_irqsave(&lg->lock, flags);
- value = ioread32(conf2);
- raw_spin_unlock_irqrestore(&lg->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &lg->lock)
+ value = ioread32(conf2);
pull = value & GPIWP_MASK;
@@ -464,11 +451,10 @@ static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
void __iomem *conf2 = lp_gpio_reg(&lg->chip, pin, LP_CONFIG2);
enum pin_config_param param;
- unsigned long flags;
- int i, ret = 0;
+ unsigned int i;
u32 value;
- raw_spin_lock_irqsave(&lg->lock, flags);
+ guard(raw_spinlock_irqsave)(&lg->lock);
value = ioread32(conf2);
@@ -489,19 +475,13 @@ static int lp_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
value |= GPIWP_UP;
break;
default:
- ret = -ENOTSUPP;
+ return -ENOTSUPP;
}
-
- if (ret)
- break;
}
- if (!ret)
- iowrite32(value, conf2);
+ iowrite32(value, conf2);
- raw_spin_unlock_irqrestore(&lg->lock, flags);
-
- return ret;
+ return 0;
}
static const struct pinconf_ops lptlp_pinconf_ops = {
@@ -527,16 +507,13 @@ static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct intel_pinctrl *lg = gpiochip_get_data(chip);
void __iomem *reg = lp_gpio_reg(chip, offset, LP_CONFIG1);
- unsigned long flags;
- raw_spin_lock_irqsave(&lg->lock, flags);
+ guard(raw_spinlock_irqsave)(&lg->lock);
if (value)
iowrite32(ioread32(reg) | OUT_LVL_BIT, reg);
else
iowrite32(ioread32(reg) & ~OUT_LVL_BIT, reg);
-
- raw_spin_unlock_irqrestore(&lg->lock, flags);
}
static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -592,11 +569,10 @@ static void lp_irq_ack(struct irq_data *d)
struct intel_pinctrl *lg = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_STAT);
- unsigned long flags;
- raw_spin_lock_irqsave(&lg->lock, flags);
+ guard(raw_spinlock_irqsave)(&lg->lock);
+
iowrite32(BIT(hwirq % 32), reg);
- raw_spin_unlock_irqrestore(&lg->lock, flags);
}
static void lp_irq_unmask(struct irq_data *d)
@@ -613,13 +589,11 @@ static void lp_irq_enable(struct irq_data *d)
struct intel_pinctrl *lg = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
- unsigned long flags;
gpiochip_enable_irq(gc, hwirq);
- raw_spin_lock_irqsave(&lg->lock, flags);
- iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
- raw_spin_unlock_irqrestore(&lg->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &lg->lock)
+ iowrite32(ioread32(reg) | BIT(hwirq % 32), reg);
}
static void lp_irq_disable(struct irq_data *d)
@@ -628,11 +602,9 @@ static void lp_irq_disable(struct irq_data *d)
struct intel_pinctrl *lg = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
- unsigned long flags;
- raw_spin_lock_irqsave(&lg->lock, flags);
- iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
- raw_spin_unlock_irqrestore(&lg->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &lg->lock)
+ iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg);
gpiochip_disable_irq(gc, hwirq);
}
@@ -642,7 +614,6 @@ static int lp_irq_set_type(struct irq_data *d, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *lg = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
void __iomem *reg;
u32 value;
@@ -656,7 +627,8 @@ static int lp_irq_set_type(struct irq_data *d, unsigned int type)
return -EBUSY;
}
- raw_spin_lock_irqsave(&lg->lock, flags);
+ guard(raw_spinlock_irqsave)(&lg->lock);
+
value = ioread32(reg);
/* set both TRIG_SEL and INV bits to 0 for rising edge */
@@ -682,8 +654,6 @@ static int lp_irq_set_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&lg->lock, flags);
-
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
index 7ced2b402..cc44890c6 100644
--- a/drivers/pinctrl/intel/pinctrl-meteorlake.c
+++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
@@ -9,6 +9,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -589,14 +590,12 @@ static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, mtl_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(mtl_pinctrl_pm_ops);
-
static struct platform_driver mtl_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "meteorlake-pinctrl",
.acpi_match_table = mtl_pinctrl_acpi_match,
- .pm = &mtl_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(mtl_pinctrl_driver);
diff --git a/drivers/pinctrl/intel/pinctrl-meteorpoint.c b/drivers/pinctrl/intel/pinctrl-meteorpoint.c
new file mode 100644
index 000000000..77e97775a
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-meteorpoint.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Meteor Point PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2022-2023, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define MTP_PAD_OWN 0x0b0
+#define MTP_PADCFGLOCK 0x110
+#define MTP_HOSTSW_OWN 0x150
+#define MTP_GPI_IS 0x200
+#define MTP_GPI_IE 0x220
+
+#define MTP_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define MTP_COMMUNITY(b, s, e, g) \
+ INTEL_COMMUNITY_GPPS(b, s, e, g, MTP)
+
+/* Meteor Point-S */
+static const struct pinctrl_pin_desc mtps_pins[] = {
+ /* GPP_D */
+ PINCTRL_PIN(0, "GPP_D_0"),
+ PINCTRL_PIN(1, "GPP_D_1"),
+ PINCTRL_PIN(2, "GPP_D_2"),
+ PINCTRL_PIN(3, "GPP_D_3"),
+ PINCTRL_PIN(4, "GPP_D_4"),
+ PINCTRL_PIN(5, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(6, "CRF_CLKREQ"),
+ PINCTRL_PIN(7, "GPP_D_7"),
+ PINCTRL_PIN(8, "GPP_D_8"),
+ PINCTRL_PIN(9, "SML0CLK"),
+ PINCTRL_PIN(10, "SML0DATA"),
+ PINCTRL_PIN(11, "GPP_D_11"),
+ PINCTRL_PIN(12, "GPP_D_12"),
+ PINCTRL_PIN(13, "GPP_D_13"),
+ PINCTRL_PIN(14, "GPP_D_14"),
+ PINCTRL_PIN(15, "GPP_D_15"),
+ PINCTRL_PIN(16, "GPP_D_16"),
+ PINCTRL_PIN(17, "GPP_D_17"),
+ PINCTRL_PIN(18, "GPP_D_18"),
+ PINCTRL_PIN(19, "GPP_D_19"),
+ PINCTRL_PIN(20, "GPP_D_20"),
+ PINCTRL_PIN(21, "GPP_D_21"),
+ PINCTRL_PIN(22, "GPP_D_22"),
+ PINCTRL_PIN(23, "GPP_D_23"),
+ PINCTRL_PIN(24, "GSPI3_CLK_LOOPBK"),
+ /* GPP_R */
+ PINCTRL_PIN(25, "HDA_BCLK"),
+ PINCTRL_PIN(26, "HDA_SYNC"),
+ PINCTRL_PIN(27, "HDA_SDO"),
+ PINCTRL_PIN(28, "HDA_SDI_0"),
+ PINCTRL_PIN(29, "HDA_RSTB"),
+ PINCTRL_PIN(30, "GPP_R_5"),
+ PINCTRL_PIN(31, "GPP_R_6"),
+ PINCTRL_PIN(32, "GPP_R_7"),
+ PINCTRL_PIN(33, "GPP_R_8"),
+ PINCTRL_PIN(34, "GPP_R_9"),
+ PINCTRL_PIN(35, "GPP_R_10"),
+ PINCTRL_PIN(36, "GPP_R_11"),
+ PINCTRL_PIN(37, "GPP_R_12"),
+ PINCTRL_PIN(38, "GSPI2_CLK_LOOPBK"),
+ /* GPP_J */
+ PINCTRL_PIN(39, "GPP_J_0"),
+ PINCTRL_PIN(40, "CNV_BRI_DT"),
+ PINCTRL_PIN(41, "CNV_BRI_RSP"),
+ PINCTRL_PIN(42, "CNV_RGI_DT"),
+ PINCTRL_PIN(43, "CNV_RGI_RSP"),
+ PINCTRL_PIN(44, "GPP_J_5"),
+ PINCTRL_PIN(45, "GPP_J_6"),
+ PINCTRL_PIN(46, "BOOTHALT_B"),
+ PINCTRL_PIN(47, "RTCCLKOUT"),
+ PINCTRL_PIN(48, "BPKI3C_SDA"),
+ PINCTRL_PIN(49, "BPKI3C_SCL"),
+ PINCTRL_PIN(50, "DAM"),
+ PINCTRL_PIN(51, "HDACPU_SDI"),
+ PINCTRL_PIN(52, "HDACPU_SDO"),
+ PINCTRL_PIN(53, "HDACPU_BCLK"),
+ PINCTRL_PIN(54, "AUX_PWRGD"),
+ PINCTRL_PIN(55, "GLB_RST_WARN_B"),
+ PINCTRL_PIN(56, "RESET_SYNCB"),
+ /* vGPIO */
+ PINCTRL_PIN(57, "CNV_BTEN"),
+ PINCTRL_PIN(58, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(59, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(60, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(61, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(62, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(63, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(64, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(65, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(66, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(67, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(68, "vUART0_TXD"),
+ PINCTRL_PIN(69, "vUART0_RXD"),
+ PINCTRL_PIN(70, "vUART0_CTS_B"),
+ PINCTRL_PIN(71, "vUART0_RTS_B"),
+ PINCTRL_PIN(72, "vISH_UART0_TXD"),
+ PINCTRL_PIN(73, "vISH_UART0_RXD"),
+ PINCTRL_PIN(74, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(75, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(76, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(77, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(78, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(79, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(80, "vI2S2_SCLK"),
+ PINCTRL_PIN(81, "vI2S2_SFRM"),
+ PINCTRL_PIN(82, "vI2S2_TXD"),
+ PINCTRL_PIN(83, "vI2S2_RXD"),
+ PINCTRL_PIN(84, "THC0_WOT_INT"),
+ PINCTRL_PIN(85, "THC1_WOT_INT"),
+ PINCTRL_PIN(86, "THC0_WHC_INT"),
+ PINCTRL_PIN(87, "THC1_WHC_INT"),
+ /* GPP_A */
+ PINCTRL_PIN(88, "ESPI_IO_0"),
+ PINCTRL_PIN(89, "ESPI_IO_1"),
+ PINCTRL_PIN(90, "ESPI_IO_2"),
+ PINCTRL_PIN(91, "ESPI_IO_3"),
+ PINCTRL_PIN(92, "ESPI_CS0B"),
+ PINCTRL_PIN(93, "ESPI_CLK"),
+ PINCTRL_PIN(94, "ESPI_RESETB"),
+ PINCTRL_PIN(95, "ESPI_CS1B"),
+ PINCTRL_PIN(96, "ESPI_CS2B"),
+ PINCTRL_PIN(97, "ESPI_CS3B"),
+ PINCTRL_PIN(98, "ESPI_ALERT0B"),
+ PINCTRL_PIN(99, "ESPI_ALERT1B"),
+ PINCTRL_PIN(100, "ESPI_ALERT2B"),
+ PINCTRL_PIN(101, "ESPI_ALERT3B"),
+ PINCTRL_PIN(102, "ESPI_CLK_LOOPBK"),
+ /* DIR_ESPI */
+ PINCTRL_PIN(103, "PWRBTNB_OUT"),
+ PINCTRL_PIN(104, "DMI_PERSTB"),
+ PINCTRL_PIN(105, "DMI_CLKREQB"),
+ PINCTRL_PIN(106, "DIR_ESPI_IO_0"),
+ PINCTRL_PIN(107, "DIR_ESPI_IO_1"),
+ PINCTRL_PIN(108, "DIR_ESPI_IO_2"),
+ PINCTRL_PIN(109, "DIR_ESPI_IO_3"),
+ PINCTRL_PIN(110, "DIR_ESPI_CSB"),
+ PINCTRL_PIN(111, "DIR_ESPI_RESETB"),
+ PINCTRL_PIN(112, "DIR_ESPI_CLK"),
+ PINCTRL_PIN(113, "DIR_ESPI_RCLK"),
+ PINCTRL_PIN(114, "DIR_ESPI_ALERTB"),
+ /* GPP_B */
+ PINCTRL_PIN(115, "GPP_B_0"),
+ PINCTRL_PIN(116, "GPP_B_1"),
+ PINCTRL_PIN(117, "GPP_B_2"),
+ PINCTRL_PIN(118, "GPP_B_3"),
+ PINCTRL_PIN(119, "GPP_B_4"),
+ PINCTRL_PIN(120, "GPP_B_5"),
+ PINCTRL_PIN(121, "CLKOUT_48"),
+ PINCTRL_PIN(122, "GPP_B_7"),
+ PINCTRL_PIN(123, "GPP_B_8"),
+ PINCTRL_PIN(124, "GPP_B_9"),
+ PINCTRL_PIN(125, "GPP_B_10"),
+ PINCTRL_PIN(126, "GPP_B_11"),
+ PINCTRL_PIN(127, "SLP_S0B"),
+ PINCTRL_PIN(128, "PLTRSTB"),
+ PINCTRL_PIN(129, "GPP_B_14"),
+ PINCTRL_PIN(130, "GPP_B_15"),
+ PINCTRL_PIN(131, "GPP_B_16"),
+ PINCTRL_PIN(132, "GPP_B_17"),
+ PINCTRL_PIN(133, "GPP_B_18"),
+ PINCTRL_PIN(134, "FUSA_DIAGTEST_EN"),
+ PINCTRL_PIN(135, "FUSA_DIAGTEST_MODE"),
+ PINCTRL_PIN(136, "GPP_B_21"),
+ /* SPI0 */
+ PINCTRL_PIN(137, "SPI0_IO_2"),
+ PINCTRL_PIN(138, "SPI0_IO_3"),
+ PINCTRL_PIN(139, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(140, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(141, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(142, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(143, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(144, "SPI0_CLK"),
+ PINCTRL_PIN(145, "SPI0_CLK_LOOPBK"),
+ /* GPP_C */
+ PINCTRL_PIN(146, "SMBCLK"),
+ PINCTRL_PIN(147, "SMBDATA"),
+ PINCTRL_PIN(148, "SMBALERTB"),
+ PINCTRL_PIN(149, "GPP_C_3"),
+ PINCTRL_PIN(150, "GPP_C_4"),
+ PINCTRL_PIN(151, "GPP_C_5"),
+ PINCTRL_PIN(152, "GPP_C_6"),
+ PINCTRL_PIN(153, "GPP_C_7"),
+ PINCTRL_PIN(154, "GPP_C_8"),
+ PINCTRL_PIN(155, "GPP_C_9"),
+ PINCTRL_PIN(156, "GPP_C_10"),
+ PINCTRL_PIN(157, "GPP_C_11"),
+ PINCTRL_PIN(158, "GPP_C_12"),
+ PINCTRL_PIN(159, "GPP_C_13"),
+ PINCTRL_PIN(160, "GPP_C_14"),
+ PINCTRL_PIN(161, "GPP_C_15"),
+ PINCTRL_PIN(162, "GPP_C_16"),
+ PINCTRL_PIN(163, "GPP_C_17"),
+ PINCTRL_PIN(164, "GPP_C_18"),
+ PINCTRL_PIN(165, "GPP_C_19"),
+ PINCTRL_PIN(166, "GPP_C_20"),
+ PINCTRL_PIN(167, "GPP_C_21"),
+ PINCTRL_PIN(168, "GPP_C_22"),
+ PINCTRL_PIN(169, "GPP_C_23"),
+ /* GPP_H */
+ PINCTRL_PIN(170, "GPP_H_0"),
+ PINCTRL_PIN(171, "GPP_H_1"),
+ PINCTRL_PIN(172, "GPP_H_2"),
+ PINCTRL_PIN(173, "GPP_H_3"),
+ PINCTRL_PIN(174, "GPP_H_4"),
+ PINCTRL_PIN(175, "GPP_H_5"),
+ PINCTRL_PIN(176, "GPP_H_6"),
+ PINCTRL_PIN(177, "GPP_H_7"),
+ PINCTRL_PIN(178, "GPP_H_8"),
+ PINCTRL_PIN(179, "GPP_H_9"),
+ PINCTRL_PIN(180, "GPP_H_10"),
+ PINCTRL_PIN(181, "GPP_H_11"),
+ PINCTRL_PIN(182, "GPP_H_12"),
+ PINCTRL_PIN(183, "GPP_H_13"),
+ PINCTRL_PIN(184, "GPP_H_14"),
+ PINCTRL_PIN(185, "GPP_H_15"),
+ PINCTRL_PIN(186, "GPP_H_16"),
+ PINCTRL_PIN(187, "GPP_H_17"),
+ PINCTRL_PIN(188, "GPP_H_18"),
+ PINCTRL_PIN(189, "GPP_H_19"),
+ /* vGPIO_3 */
+ PINCTRL_PIN(190, "CPU_PCIE_LNK_DN_0"),
+ PINCTRL_PIN(191, "CPU_PCIE_LNK_DN_1"),
+ PINCTRL_PIN(192, "CPU_PCIE_LNK_DN_2"),
+ PINCTRL_PIN(193, "CPU_PCIE_LNK_DN_3"),
+ /* vGPIO_0 */
+ PINCTRL_PIN(194, "ESPI_USB_OCB_0"),
+ PINCTRL_PIN(195, "ESPI_USB_OCB_1"),
+ PINCTRL_PIN(196, "ESPI_USB_OCB_2"),
+ PINCTRL_PIN(197, "ESPI_USB_OCB_3"),
+ PINCTRL_PIN(198, "USB_CPU_OCB_0"),
+ PINCTRL_PIN(199, "USB_CPU_OCB_1"),
+ PINCTRL_PIN(200, "USB_CPU_OCB_2"),
+ PINCTRL_PIN(201, "USB_CPU_OCB_3"),
+ /* vGPIO_4 */
+ PINCTRL_PIN(202, "ESPI_ISCLK_XTAL_CLKREQ"),
+ PINCTRL_PIN(203, "ISCLK_ESPI_XTAL_CLKACK"),
+ PINCTRL_PIN(204, "ME_SLPC_FTPM_ENABLE"),
+ PINCTRL_PIN(205, "GP_SLPC_DTFUS_CORE_SPITPM_DIS"),
+ PINCTRL_PIN(206, "GP_SLPC_SPI_STRAP_TOS"),
+ PINCTRL_PIN(207, "GP_SLPC_DTFUS_CORE_SPITPM_DIS_L01"),
+ PINCTRL_PIN(208, "GP_SLPC_SPI_STRAP_TOS_L01"),
+ PINCTRL_PIN(209, "LPC_PRR_TS_OVR"),
+ PINCTRL_PIN(210, "ITSS_KU1_SHTDWN"),
+ PINCTRL_PIN(211, "vGPIO_SPARE_0"),
+ PINCTRL_PIN(212, "vGPIO_SPARE_1"),
+ PINCTRL_PIN(213, "vGPIO_SPARE_2"),
+ PINCTRL_PIN(214, "vGPIO_SPARE_3"),
+ PINCTRL_PIN(215, "vGPIO_SPARE_4"),
+ PINCTRL_PIN(216, "vGPIO_SPARE_5"),
+ PINCTRL_PIN(217, "vGPIO_SPARE_6"),
+ PINCTRL_PIN(218, "vGPIO_SPARE_7"),
+ PINCTRL_PIN(219, "vGPIO_SPARE_8"),
+ PINCTRL_PIN(220, "vGPIO_SPARE_9"),
+ PINCTRL_PIN(221, "vGPIO_SPARE_10"),
+ PINCTRL_PIN(222, "vGPIO_SPARE_11"),
+ PINCTRL_PIN(223, "vGPIO_SPARE_12"),
+ PINCTRL_PIN(224, "vGPIO_SPARE_13"),
+ PINCTRL_PIN(225, "vGPIO_SPARE_14"),
+ PINCTRL_PIN(226, "vGPIO_SPARE_15"),
+ PINCTRL_PIN(227, "vGPIO_SPARE_16"),
+ PINCTRL_PIN(228, "vGPIO_SPARE_17"),
+ PINCTRL_PIN(229, "vGPIO_SPARE_18"),
+ PINCTRL_PIN(230, "vGPIO_SPARE_19"),
+ PINCTRL_PIN(231, "vGPIO_SPARE_20"),
+ PINCTRL_PIN(232, "vGPIO_SPARE_21"),
+ /* GPP_S */
+ PINCTRL_PIN(233, "GPP_S_0"),
+ PINCTRL_PIN(234, "GPP_S_1"),
+ PINCTRL_PIN(235, "GPP_S_2"),
+ PINCTRL_PIN(236, "GPP_S_3"),
+ PINCTRL_PIN(237, "GPP_S_4"),
+ PINCTRL_PIN(238, "GPP_S_5"),
+ PINCTRL_PIN(239, "GPP_S_6"),
+ PINCTRL_PIN(240, "GPP_S_7"),
+ /* GPP_E */
+ PINCTRL_PIN(241, "GPP_E_0"),
+ PINCTRL_PIN(242, "GPP_E_1"),
+ PINCTRL_PIN(243, "GPP_E_2"),
+ PINCTRL_PIN(244, "GPP_E_3"),
+ PINCTRL_PIN(245, "GPP_E_4"),
+ PINCTRL_PIN(246, "GPP_E_5"),
+ PINCTRL_PIN(247, "GPP_E_6"),
+ PINCTRL_PIN(248, "GPP_E_7"),
+ PINCTRL_PIN(249, "GPP_E_8"),
+ PINCTRL_PIN(250, "GPP_E_9"),
+ PINCTRL_PIN(251, "GPP_E_10"),
+ PINCTRL_PIN(252, "GPP_E_11"),
+ PINCTRL_PIN(253, "GPP_E_12"),
+ PINCTRL_PIN(254, "GPP_E_13"),
+ PINCTRL_PIN(255, "GPP_E_14"),
+ PINCTRL_PIN(256, "GPP_E_15"),
+ PINCTRL_PIN(257, "GPP_E_16"),
+ PINCTRL_PIN(258, "GPP_E_17"),
+ PINCTRL_PIN(259, "GPP_E_18"),
+ PINCTRL_PIN(260, "GPP_E_19"),
+ PINCTRL_PIN(261, "GPP_E_20"),
+ PINCTRL_PIN(262, "GPP_E_21"),
+ PINCTRL_PIN(263, "SPI1_CLK_LOOPBK"),
+ /* GPP_K */
+ PINCTRL_PIN(264, "GPP_K_0"),
+ PINCTRL_PIN(265, "GPP_K_1"),
+ PINCTRL_PIN(266, "GPP_K_2"),
+ PINCTRL_PIN(267, "GPP_K_3"),
+ PINCTRL_PIN(268, "GPP_K_4"),
+ PINCTRL_PIN(269, "GPP_K_5"),
+ PINCTRL_PIN(270, "FUSE_SORT_BUMP_0"),
+ PINCTRL_PIN(271, "FUSE_SORT_BUMP_1"),
+ PINCTRL_PIN(272, "CORE_VID_0"),
+ PINCTRL_PIN(273, "CORE_VID_1"),
+ PINCTRL_PIN(274, "FUSE_SORT_BUMP_2"),
+ PINCTRL_PIN(275, "MISC_SPARE"),
+ PINCTRL_PIN(276, "SYS_RESETB"),
+ PINCTRL_PIN(277, "MLK_RSTB"),
+ /* GPP_F */
+ PINCTRL_PIN(278, "SATAXPCIE_3"),
+ PINCTRL_PIN(279, "SATAXPCIE_4"),
+ PINCTRL_PIN(280, "SATAXPCIE_5"),
+ PINCTRL_PIN(281, "SATAXPCIE_6"),
+ PINCTRL_PIN(282, "SATAXPCIE_7"),
+ PINCTRL_PIN(283, "SATA_DEVSLP_3"),
+ PINCTRL_PIN(284, "SATA_DEVSLP_4"),
+ PINCTRL_PIN(285, "SATA_DEVSLP_5"),
+ PINCTRL_PIN(286, "SATA_DEVSLP_6"),
+ PINCTRL_PIN(287, "GPP_F_9"),
+ PINCTRL_PIN(288, "GPP_F_10"),
+ PINCTRL_PIN(289, "GPP_F_11"),
+ PINCTRL_PIN(290, "GPP_F_12"),
+ PINCTRL_PIN(291, "GPP_F_13"),
+ PINCTRL_PIN(292, "GPP_F_14"),
+ PINCTRL_PIN(293, "GPP_F_15"),
+ PINCTRL_PIN(294, "GPP_F_16"),
+ PINCTRL_PIN(295, "GPP_F_17"),
+ PINCTRL_PIN(296, "GPP_F_18"),
+ PINCTRL_PIN(297, "DNX_FORCE_RELOAD"),
+ PINCTRL_PIN(298, "GPP_F_20"),
+ PINCTRL_PIN(299, "GPP_F_21"),
+ PINCTRL_PIN(300, "GPP_F_22"),
+ PINCTRL_PIN(301, "GPP_F_23"),
+ /* GPP_I */
+ PINCTRL_PIN(302, "GPP_I_0"),
+ PINCTRL_PIN(303, "GPP_I_1"),
+ PINCTRL_PIN(304, "GPP_I_2"),
+ PINCTRL_PIN(305, "GPP_I_3"),
+ PINCTRL_PIN(306, "GPP_I_4"),
+ PINCTRL_PIN(307, "GPP_I_5"),
+ PINCTRL_PIN(308, "GPP_I_6"),
+ PINCTRL_PIN(309, "GPP_I_7"),
+ PINCTRL_PIN(310, "GPP_I_8"),
+ PINCTRL_PIN(311, "GPP_I_9"),
+ PINCTRL_PIN(312, "GPP_I_10"),
+ PINCTRL_PIN(313, "GPP_I_11"),
+ PINCTRL_PIN(314, "GPP_I_12"),
+ PINCTRL_PIN(315, "GPP_I_13"),
+ PINCTRL_PIN(316, "GPP_I_14"),
+ PINCTRL_PIN(317, "GPP_I_15"),
+ PINCTRL_PIN(318, "GPP_I_16"),
+ PINCTRL_PIN(319, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(320, "GSPI1_CLK_LOOPBK"),
+ PINCTRL_PIN(321, "ISH_I3C0_CLK_LOOPBK"),
+ PINCTRL_PIN(322, "I3C0_CLK_LOOPBK"),
+ /* JTAG_CPU */
+ PINCTRL_PIN(323, "JTAG_TDO"),
+ PINCTRL_PIN(324, "JTAGX"),
+ PINCTRL_PIN(325, "PRDYB"),
+ PINCTRL_PIN(326, "PREQB"),
+ PINCTRL_PIN(327, "JTAG_TDI"),
+ PINCTRL_PIN(328, "JTAG_TMS"),
+ PINCTRL_PIN(329, "JTAG_TCK"),
+ PINCTRL_PIN(330, "DBG_PMODE"),
+ PINCTRL_PIN(331, "CPU_TRSTB"),
+ PINCTRL_PIN(332, "CPUPWRGD"),
+ PINCTRL_PIN(333, "PM_SPARE0"),
+ PINCTRL_PIN(334, "PM_SPARE1"),
+ PINCTRL_PIN(335, "CRASHLOG_TRIG_N"),
+ PINCTRL_PIN(336, "TRIGGER_IN"),
+ PINCTRL_PIN(337, "TRIGGER_OUT"),
+ PINCTRL_PIN(338, "FBRK_OUT_N"),
+};
+
+static const struct intel_padgroup mtps_community0_gpps[] = {
+ MTP_GPP(0, 0, 24, 0), /* GPP_D */
+ MTP_GPP(1, 25, 38, 32), /* GPP_R */
+ MTP_GPP(2, 39, 56, 64), /* GPP_J */
+ MTP_GPP(3, 57, 87, 96), /* vGPIO */
+};
+
+static const struct intel_padgroup mtps_community1_gpps[] = {
+ MTP_GPP(0, 88, 102, 128), /* GPP_A */
+ MTP_GPP(1, 103, 114, 160), /* DIR_ESPI */
+ MTP_GPP(2, 115, 136, 192), /* GPP_B */
+};
+
+static const struct intel_padgroup mtps_community3_gpps[] = {
+ MTP_GPP(0, 137, 145, 224), /* SPI0 */
+ MTP_GPP(1, 146, 169, 256), /* GPP_C */
+ MTP_GPP(2, 170, 189, 288), /* GPP_H */
+ MTP_GPP(3, 190, 193, 320), /* vGPIO_3 */
+ MTP_GPP(4, 194, 201, 352), /* vGPIO_0 */
+ MTP_GPP(5, 202, 232, 384), /* vGPIO_4 */
+};
+
+static const struct intel_padgroup mtps_community4_gpps[] = {
+ MTP_GPP(0, 233, 240, 416), /* GPP_S */
+ MTP_GPP(1, 241, 263, 448), /* GPP_E */
+ MTP_GPP(2, 264, 277, 480), /* GPP_K */
+ MTP_GPP(3, 278, 301, 512), /* GPP_F */
+};
+
+static const struct intel_padgroup mtps_community5_gpps[] = {
+ MTP_GPP(0, 302, 322, 544), /* GPP_I */
+ MTP_GPP(1, 323, 338, 576), /* JTAG_CPU */
+};
+
+static const struct intel_community mtps_communities[] = {
+ MTP_COMMUNITY(0, 0, 87, mtps_community0_gpps),
+ MTP_COMMUNITY(1, 88, 136, mtps_community1_gpps),
+ MTP_COMMUNITY(2, 137, 232, mtps_community3_gpps),
+ MTP_COMMUNITY(3, 233, 301, mtps_community4_gpps),
+ MTP_COMMUNITY(4, 302, 338, mtps_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data mtps_soc_data = {
+ .pins = mtps_pins,
+ .npins = ARRAY_SIZE(mtps_pins),
+ .communities = mtps_communities,
+ .ncommunities = ARRAY_SIZE(mtps_communities),
+};
+
+static const struct acpi_device_id mtp_pinctrl_acpi_match[] = {
+ { "INTC1084", (kernel_ulong_t)&mtps_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, mtp_pinctrl_acpi_match);
+
+static struct platform_driver mtp_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "meteorpoint-pinctrl",
+ .acpi_match_table = mtp_pinctrl_acpi_match,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
+ },
+};
+module_platform_driver(mtp_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Meteor Point PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(PINCTRL_INTEL);
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index b7a40ab0b..55df9d2cf 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -10,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -579,14 +580,12 @@ static const struct acpi_device_id spt_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, spt_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(spt_pinctrl_pm_ops);
-
static struct platform_driver spt_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "sunrisepoint-pinctrl",
.acpi_match_table = spt_pinctrl_acpi_match,
- .pm = &spt_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
diff --git a/drivers/pinctrl/intel/pinctrl-tangier.c b/drivers/pinctrl/intel/pinctrl-tangier.c
index 40dd60c9e..2cb0b4758 100644
--- a/drivers/pinctrl/intel/pinctrl-tangier.c
+++ b/drivers/pinctrl/intel/pinctrl-tangier.c
@@ -9,6 +9,7 @@
*/
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -220,7 +221,6 @@ static int tng_pinmux_set_mux(struct pinctrl_dev *pctldev,
const struct intel_pingroup *grp = &tp->groups[group];
u32 bits = grp->mode << BUFCFG_PINMODE_SHIFT;
u32 mask = BUFCFG_PINMODE_MASK;
- unsigned long flags;
unsigned int i;
/*
@@ -232,11 +232,11 @@ static int tng_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EBUSY;
}
+ guard(raw_spinlock_irqsave)(&tp->lock);
+
/* Now enable the mux setting for each pin in the group */
- raw_spin_lock_irqsave(&tp->lock, flags);
for (i = 0; i < grp->grp.npins; i++)
tng_update_bufcfg(tp, grp->grp.pins[i], bits, mask);
- raw_spin_unlock_irqrestore(&tp->lock, flags);
return 0;
}
@@ -248,14 +248,13 @@ static int tng_gpio_request_enable(struct pinctrl_dev *pctldev,
struct tng_pinctrl *tp = pinctrl_dev_get_drvdata(pctldev);
u32 bits = BUFCFG_PINMODE_GPIO << BUFCFG_PINMODE_SHIFT;
u32 mask = BUFCFG_PINMODE_MASK;
- unsigned long flags;
if (!tng_buf_available(tp, pin))
return -EBUSY;
- raw_spin_lock_irqsave(&tp->lock, flags);
+ guard(raw_spinlock_irqsave)(&tp->lock);
+
tng_update_bufcfg(tp, pin, bits, mask);
- raw_spin_unlock_irqrestore(&tp->lock, flags);
return 0;
}
@@ -360,7 +359,6 @@ static int tng_config_set_pin(struct tng_pinctrl *tp, unsigned int pin,
unsigned int param = pinconf_to_config_param(config);
unsigned int arg = pinconf_to_config_argument(config);
u32 mask, term, value = 0;
- unsigned long flags;
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@@ -368,20 +366,20 @@ static int tng_config_set_pin(struct tng_pinctrl *tp, unsigned int pin,
break;
case PIN_CONFIG_BIAS_PULL_UP:
- /* Set default strength value in case none is given */
- if (arg == 1)
- arg = 20000;
-
switch (arg) {
case 50000:
term = BUFCFG_PUPD_VAL_50K;
break;
+ case 1: /* Set default strength value in case none is given */
case 20000:
term = BUFCFG_PUPD_VAL_20K;
break;
case 2000:
term = BUFCFG_PUPD_VAL_2K;
break;
+ case 910:
+ term = BUFCFG_PUPD_VAL_910;
+ break;
default:
return -EINVAL;
}
@@ -391,20 +389,20 @@ static int tng_config_set_pin(struct tng_pinctrl *tp, unsigned int pin,
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- /* Set default strength value in case none is given */
- if (arg == 1)
- arg = 20000;
-
switch (arg) {
case 50000:
term = BUFCFG_PUPD_VAL_50K;
break;
+ case 1: /* Set default strength value in case none is given */
case 20000:
term = BUFCFG_PUPD_VAL_20K;
break;
case 2000:
term = BUFCFG_PUPD_VAL_2K;
break;
+ case 910:
+ term = BUFCFG_PUPD_VAL_910;
+ break;
default:
return -EINVAL;
}
@@ -432,9 +430,9 @@ static int tng_config_set_pin(struct tng_pinctrl *tp, unsigned int pin,
return -EINVAL;
}
- raw_spin_lock_irqsave(&tp->lock, flags);
+ guard(raw_spinlock_irqsave)(&tp->lock);
+
tng_update_bufcfg(tp, pin, value, mask);
- raw_spin_unlock_irqrestore(&tp->lock, flags);
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 4768a69a9..80cd7a06f 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -10,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
@@ -743,14 +744,12 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
-static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
-
static struct platform_driver tgl_pinctrl_driver = {
.probe = intel_pinctrl_probe_by_hid,
.driver = {
.name = "tigerlake-pinctrl",
.acpi_match_table = tgl_pinctrl_acpi_match,
- .pm = &tgl_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops),
},
};
module_platform_driver(tgl_pinctrl_driver);
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index c3d59eddd..d972584c0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -56,12 +56,12 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->name, grp->name);
+ func->name, grp->grp.name);
- for (i = 0; i < grp->num_pins; i++) {
+ for (i = 0; i < grp->grp.npins; i++) {
const struct mtk_pin_desc *desc;
int *pin_modes = grp->data;
- int pin = grp->pins[i];
+ int pin = grp->grp.pins[i];
desc = (const struct mtk_pin_desc *)&hw->soc->pins[pin];
if (!desc->name)
@@ -602,13 +602,12 @@ static int mtk_build_groups(struct mtk_pinctrl *hw)
for (i = 0; i < hw->soc->ngrps; i++) {
const struct group_desc *group = hw->soc->grps + i;
+ const struct pingroup *grp = &group->grp;
- err = pinctrl_generic_add_group(hw->pctrl, group->name,
- group->pins, group->num_pins,
+ err = pinctrl_generic_add_group(hw->pctrl, grp->name, grp->pins, grp->npins,
group->data);
if (err < 0) {
- dev_err(hw->dev, "Failed to register group %s\n",
- group->name);
+ dev_err(hw->dev, "Failed to register group %s\n", grp->name);
return err;
}
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.h b/drivers/pinctrl/mediatek/pinctrl-moore.h
index e1b4b82b9..e0313e7a1 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.h
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.h
@@ -37,12 +37,10 @@
.funcs = NULL, \
}
-#define PINCTRL_PIN_GROUP(name, id) \
- { \
- name, \
- id##_pins, \
- ARRAY_SIZE(id##_pins), \
- id##_funcs, \
+#define PINCTRL_PIN_GROUP(_name_, id) \
+ { \
+ .grp = PINCTRL_PINGROUP(_name_, id##_pins, ARRAY_SIZE(id##_pins)), \
+ .data = id##_funcs, \
}
int mtk_moore_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2701.c b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
index 5fb377c16..6b1c7122b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2701.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
@@ -533,7 +533,7 @@ static struct platform_driver mtk_pinctrl_driver = {
.driver = {
.name = "mediatek-mt2701-pinctrl",
.of_match_table = mt2701_pctrl_match,
- .pm = &mtk_eint_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2712.c b/drivers/pinctrl/mediatek/pinctrl-mt2712.c
index 8a6daa0db..bb7394ae2 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2712.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2712.c
@@ -581,7 +581,7 @@ static struct platform_driver mtk_pinctrl_driver = {
.driver = {
.name = "mediatek-mt2712-pinctrl",
.of_match_table = mt2712_pctrl_match,
- .pm = &mtk_eint_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6795.c b/drivers/pinctrl/mediatek/pinctrl-mt6795.c
index 01e855ccd..ee3ae3d2f 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6795.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6795.c
@@ -612,7 +612,7 @@ static struct platform_driver mt6795_pinctrl_driver = {
.driver = {
.name = "mt6795-pinctrl",
.of_match_table = mt6795_pctrl_match,
- .pm = &mtk_paris_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
},
.probe = mtk_paris_pinctrl_probe,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8167.c b/drivers/pinctrl/mediatek/pinctrl-mt8167.c
index ba7f30c32..143c26622 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8167.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8167.c
@@ -334,7 +334,7 @@ static struct platform_driver mtk_pinctrl_driver = {
.driver = {
.name = "mediatek-mt8167-pinctrl",
.of_match_table = mt8167_pctrl_match,
- .pm = &mtk_eint_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index 455eec018..b214deeaf 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -347,7 +347,7 @@ static struct platform_driver mtk_pinctrl_driver = {
.driver = {
.name = "mediatek-mt8173-pinctrl",
.of_match_table = mt8173_pctrl_match,
- .pm = &mtk_eint_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8183.c b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
index ddc48b725..93e482c6b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8183.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8183.c
@@ -576,7 +576,7 @@ static struct platform_driver mt8183_pinctrl_driver = {
.driver = {
.name = "mt8183-pinctrl",
.of_match_table = mt8183_pinctrl_of_match,
- .pm = &mtk_paris_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
},
.probe = mtk_paris_pinctrl_probe,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
index 09edcf47e..dd19e7485 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
@@ -1254,7 +1254,7 @@ static struct platform_driver mt8186_pinctrl_driver = {
.driver = {
.name = "mt8186-pinctrl",
.of_match_table = mt8186_pinctrl_of_match,
- .pm = &mtk_paris_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
},
.probe = mtk_paris_pinctrl_probe,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8188.c b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
index c067e043e..3975e99d9 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8188.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8188.c
@@ -1658,7 +1658,7 @@ static struct platform_driver mt8188_pinctrl_driver = {
.driver = {
.name = "mt8188-pinctrl",
.of_match_table = mt8188_pinctrl_of_match,
- .pm = &mtk_paris_pinctrl_pm_ops
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops)
},
.probe = mtk_paris_pinctrl_probe,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index bf5788d68..3f8a9dbcb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1419,7 +1419,7 @@ static struct platform_driver mt8192_pinctrl_driver = {
.driver = {
.name = "mt8192-pinctrl",
.of_match_table = mt8192_pinctrl_of_match,
- .pm = &mtk_paris_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
},
.probe = mtk_paris_pinctrl_probe,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
index 09c4dcef9..83345c52b 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8195.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
@@ -968,7 +968,7 @@ static struct platform_driver mt8195_pinctrl_driver = {
.driver = {
.name = "mt8195-pinctrl",
.of_match_table = mt8195_pinctrl_of_match,
- .pm = &mtk_paris_pinctrl_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops),
},
.probe = mtk_paris_pinctrl_probe,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
index 1db04bbdb..e3e0d66cf 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c
@@ -484,7 +484,7 @@ static struct platform_driver mtk_pinctrl_driver = {
.driver = {
.name = "mediatek-mt8365-pinctrl",
.of_match_table = mt8365_pctrl_match,
- .pm = &mtk_eint_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8516.c b/drivers/pinctrl/mediatek/pinctrl-mt8516.c
index 950275c47..abda75d43 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8516.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8516.c
@@ -334,7 +334,7 @@ static struct platform_driver mtk_pinctrl_driver = {
.driver = {
.name = "mediatek-mt8516-pinctrl",
.of_match_table = mt8516_pctrl_match,
- .pm = &mtk_eint_pm_ops,
+ .pm = pm_sleep_ptr(&mtk_eint_pm_ops),
},
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index e79d66a04..d39afc122 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -914,9 +914,8 @@ static int mtk_eint_resume(struct device *device)
return mtk_eint_do_resume(pctl->eint);
}
-const struct dev_pm_ops mtk_eint_pm_ops = {
- .suspend_noirq = mtk_eint_suspend,
- .resume_noirq = mtk_eint_resume,
+EXPORT_GPL_DEV_SLEEP_PM_OPS(mtk_eint_pm_ops) = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_eint_suspend, mtk_eint_resume)
};
static int mtk_pctrl_build_state(struct platform_device *pdev)
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 6392f1e05..b6bc31abd 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -1131,9 +1131,8 @@ static int mtk_paris_pinctrl_resume(struct device *device)
return mtk_eint_do_resume(pctl->eint);
}
-const struct dev_pm_ops mtk_paris_pinctrl_pm_ops = {
- .suspend_noirq = mtk_paris_pinctrl_suspend,
- .resume_noirq = mtk_paris_pinctrl_resume,
+EXPORT_GPL_DEV_SLEEP_PM_OPS(mtk_paris_pinctrl_pm_ops) = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_paris_pinctrl_suspend, mtk_paris_pinctrl_resume)
};
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.h b/drivers/pinctrl/mediatek/pinctrl-paris.h
index 8762ac599..948ce126a 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.h
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.h
@@ -49,12 +49,10 @@
__VA_ARGS__, { } }, \
}
-#define PINCTRL_PIN_GROUP(name, id) \
- { \
- name, \
- id##_pins, \
- ARRAY_SIZE(id##_pins), \
- id##_funcs, \
+#define PINCTRL_PIN_GROUP(_name_, id) \
+ { \
+ .grp = PINCTRL_PINGROUP(_name_,id##_pins, ARRAY_SIZE(id##_pins)), \
+ .data = id##_funcs, \
}
int mtk_paris_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index d3c32d809..80e3ac333 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -6,7 +6,9 @@
*
* Driver allows to use AxB5xx unused pins to be used as GPIO
*/
+
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -453,12 +455,11 @@ static void abx500_gpio_dbg_show_one(struct seq_file *s,
unsigned offset, unsigned gpio)
{
struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
- const char *label = gpiochip_is_requested(chip, offset - 1);
u8 gpio_offset = offset - 1;
int mode = -1;
bool is_out;
bool pd;
- int ret;
+ int ret = -ENOMEM;
const char *modes[] = {
[ABX500_DEFAULT] = "default",
@@ -474,6 +475,10 @@ static void abx500_gpio_dbg_show_one(struct seq_file *s,
[ABX500_GPIO_PULL_UP] = "pull up",
};
+ char *label __free(kfree) = gpiochip_dup_line_label(chip, offset - 1);
+ if (IS_ERR(label))
+ goto out;
+
ret = abx500_gpio_get_bit(chip, AB8500_GPIO_DIR1_REG,
gpio_offset, &is_out);
if (ret < 0)
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 445c61a4a..4f7c4af4f 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -8,6 +8,7 @@
* Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -917,7 +918,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
struct pinctrl_dev *pctldev, struct gpio_chip *chip,
unsigned offset, unsigned gpio)
{
- const char *label = gpiochip_is_requested(chip, offset);
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
int mode;
bool is_out;
@@ -934,6 +934,10 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
[NMK_GPIO_ALT_C+4] = "altC4",
};
+ char *label = gpiochip_dup_line_label(chip, offset);
+ if (IS_ERR(label))
+ return;
+
clk_enable(nmk_chip->clk);
is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & BIT(offset));
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 1e658721a..62a46d824 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1790,8 +1790,8 @@ static int npcm7xx_config_set_one(struct npcm7xx_pinctrl *npcm,
bank->direction_input(&bank->gc, pin % bank->gc.ngpio);
break;
case PIN_CONFIG_OUTPUT:
- iowrite32(gpio, bank->base + NPCM7XX_GP_N_OES);
bank->direction_output(&bank->gc, pin % bank->gc.ngpio, arg);
+ iowrite32(gpio, bank->base + NPCM7XX_GP_N_OES);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_OTYP, gpio);
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 0cff44b07..458990024 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -474,9 +474,8 @@ enum {
#undef WPCM450_GRP
};
-static struct group_desc wpcm450_groups[] = {
-#define WPCM450_GRP(x) { .name = #x, .pins = x ## _pins, \
- .num_pins = ARRAY_SIZE(x ## _pins) }
+static struct pingroup wpcm450_groups[] = {
+#define WPCM450_GRP(x) PINCTRL_PINGROUP(#x, x ## _pins, ARRAY_SIZE(x ## _pins))
WPCM450_GRPS
#undef WPCM450_GRP
};
@@ -852,7 +851,7 @@ static int wpcm450_get_group_pins(struct pinctrl_dev *pctldev,
const unsigned int **pins,
unsigned int *npins)
{
- *npins = wpcm450_groups[selector].num_pins;
+ *npins = wpcm450_groups[selector].npins;
*pins = wpcm450_groups[selector].pins;
return 0;
@@ -901,7 +900,7 @@ static int wpcm450_pinmux_set_mux(struct pinctrl_dev *pctldev,
struct wpcm450_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
wpcm450_setfunc(pctrl->gcr_regmap, wpcm450_groups[group].pins,
- wpcm450_groups[group].num_pins, function);
+ wpcm450_groups[group].npins, function);
return 0;
}
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 8313cb5f3..cada5d18f 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -57,7 +57,7 @@ static const struct pin_config_item conf_items[] = {
static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
struct seq_file *s, const char *gname,
- unsigned pin,
+ unsigned int pin,
const struct pin_config_item *items,
int nitems, int *print_sep)
{
@@ -110,7 +110,7 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
* to be specified the other can be NULL/0.
*/
void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev, struct seq_file *s,
- const char *gname, unsigned pin)
+ const char *gname, unsigned int pin)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
int print_sep = 0;
@@ -295,15 +295,15 @@ EXPORT_SYMBOL_GPL(pinconf_generic_parse_dt_config);
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
- unsigned *reserved_maps, unsigned *num_maps,
+ unsigned int *reserved_maps, unsigned int *num_maps,
enum pinctrl_map_type type)
{
int ret;
const char *function;
struct device *dev = pctldev->dev;
unsigned long *configs = NULL;
- unsigned num_configs = 0;
- unsigned reserve, strings_count;
+ unsigned int num_configs = 0;
+ unsigned int reserve, strings_count;
struct property *prop;
const char *group;
const char *subnode_target_type = "pins";
@@ -379,9 +379,9 @@ EXPORT_SYMBOL_GPL(pinconf_generic_dt_subnode_to_map);
int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps, enum pinctrl_map_type type)
+ unsigned int *num_maps, enum pinctrl_map_type type)
{
- unsigned reserved_maps;
+ unsigned int reserved_maps;
struct device_node *np;
int ret;
@@ -412,7 +412,7 @@ EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map,
- unsigned num_maps)
+ unsigned int num_maps)
{
pinctrl_utils_free_map(pctldev, map, num_maps);
}
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 96d853a8f..dca963633 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -55,7 +55,7 @@ int pinconf_validate_map(const struct pinctrl_map *map, int i)
return 0;
}
-int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
@@ -199,7 +199,7 @@ int pinconf_apply_setting(const struct pinctrl_setting *setting)
return 0;
}
-int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, size_t nconfigs)
{
const struct pinconf_ops *ops;
@@ -214,7 +214,7 @@ int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
#ifdef CONFIG_DEBUG_FS
static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
- unsigned long *configs, unsigned num_configs)
+ unsigned long *configs, unsigned int num_configs)
{
const struct pinconf_ops *confops;
int i;
@@ -304,7 +304,7 @@ static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
static int pinconf_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
- unsigned i, pin;
+ unsigned int i, pin;
seq_puts(s, "Pin config settings per pin\n");
seq_puts(s, "Format: pin (name): configs\n");
@@ -333,7 +333,7 @@ static int pinconf_pins_show(struct seq_file *s, void *what)
}
static void pinconf_dump_group(struct pinctrl_dev *pctldev,
- struct seq_file *s, unsigned selector,
+ struct seq_file *s, unsigned int selector,
const char *gname)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
@@ -348,8 +348,8 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
- unsigned ngroups = pctlops->get_groups_count(pctldev);
- unsigned selector = 0;
+ unsigned int ngroups = pctlops->get_groups_count(pctldev);
+ unsigned int selector = 0;
seq_puts(s, "Pin config settings per pin group\n");
seq_puts(s, "Format: group (name): configs\n");
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index 694bfc996..a14c950bc 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -29,14 +29,14 @@ int pinconf_map_to_setting(const struct pinctrl_map *map,
void pinconf_free_setting(const struct pinctrl_setting *setting);
int pinconf_apply_setting(const struct pinctrl_setting *setting);
-int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, size_t nconfigs);
/*
* You will only be interested in these if you're using PINCONF
* so don't supply any stubs for these.
*/
-int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config);
int pin_config_group_get(const char *dev_name, const char *pin_group,
unsigned long *config);
@@ -68,7 +68,7 @@ static inline int pinconf_apply_setting(const struct pinctrl_setting *setting)
return 0;
}
-static inline int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned pin,
+static inline int pinconf_set_config(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, size_t nconfigs)
{
return -ENOTSUPP;
@@ -112,7 +112,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot,
void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev,
struct seq_file *s, const char *gname,
- unsigned pin);
+ unsigned int pin);
void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned long config);
@@ -120,7 +120,7 @@ void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
static inline void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev,
struct seq_file *s,
- const char *gname, unsigned pin)
+ const char *gname, unsigned int pin)
{
return;
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 03ecb3d1a..7f66ec731 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
+ IRQF_SHARED | IRQF_COND_ONESHOT, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 6a5f23cf7..0d8c75ce2 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -542,7 +542,6 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
as_pci->dev = &pdev->dev;
as_pci->as3722 = dev_get_drvdata(pdev->dev.parent);
- platform_set_drvdata(pdev, as_pci);
as_pci->pins = as3722_pins_desc;
as_pci->num_pins = ARRAY_SIZE(as3722_pins_desc);
@@ -562,7 +561,7 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
as_pci->gpio_chip = as3722_gpio_chip;
as_pci->gpio_chip.parent = &pdev->dev;
- ret = gpiochip_add_data(&as_pci->gpio_chip, as_pci);
+ ret = devm_gpiochip_add_data(&pdev->dev, &as_pci->gpio_chip, as_pci);
if (ret < 0) {
dev_err(&pdev->dev, "Couldn't register gpiochip, %d\n", ret);
return ret;
@@ -572,21 +571,10 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
0, 0, AS3722_PIN_NUM);
if (ret < 0) {
dev_err(&pdev->dev, "Couldn't add pin range, %d\n", ret);
- goto fail_range_add;
+ return ret;
}
return 0;
-
-fail_range_add:
- gpiochip_remove(&as_pci->gpio_chip);
- return ret;
-}
-
-static void as3722_pinctrl_remove(struct platform_device *pdev)
-{
- struct as3722_pctrl_info *as_pci = platform_get_drvdata(pdev);
-
- gpiochip_remove(&as_pci->gpio_chip);
}
static const struct of_device_id as3722_pinctrl_of_match[] = {
@@ -601,7 +589,6 @@ static struct platform_driver as3722_pinctrl_driver = {
.of_match_table = as3722_pinctrl_of_match,
},
.probe = as3722_pinctrl_probe,
- .remove_new = as3722_pinctrl_remove,
};
module_platform_driver(as3722_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index fe9545c63..67b5d160c 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -58,6 +58,10 @@
#define CY8C95X0_PIN_TO_OFFSET(x) (((x) >= 20) ? ((x) + 4) : (x))
+#define CY8C95X0_MUX_REGMAP_TO_PORT(x) ((x) / MUXED_STRIDE)
+#define CY8C95X0_MUX_REGMAP_TO_REG(x) (((x) % MUXED_STRIDE) + CY8C95X0_INTMASK)
+#define CY8C95X0_MUX_REGMAP_TO_OFFSET(x, p) ((x) - CY8C95X0_INTMASK + (p) * MUXED_STRIDE)
+
static const struct i2c_device_id cy8c95x0_id[] = {
{ "cy8c9520", 20, },
{ "cy8c9540", 40, },
@@ -119,12 +123,13 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
#define MAX_BANK 8
#define BANK_SZ 8
#define MAX_LINE (MAX_BANK * BANK_SZ)
-
+#define MUXED_STRIDE 16
#define CY8C95X0_GPIO_MASK GENMASK(7, 0)
/**
* struct cy8c95x0_pinctrl - driver data
- * @regmap: Device's regmap
+ * @regmap: Device's regmap. Only direct access registers.
+ * @muxed_regmap: Regmap for all muxed registers.
* @irq_lock: IRQ bus lock
* @i2c_lock: Mutex for the device internal mux register
* @irq_mask: I/O bits affected by interrupts
@@ -147,6 +152,7 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
*/
struct cy8c95x0_pinctrl {
struct regmap *regmap;
+ struct regmap *muxed_regmap;
struct mutex irq_lock;
struct mutex i2c_lock;
DECLARE_BITMAP(irq_mask, MAX_LINE);
@@ -379,6 +385,54 @@ static bool cy8c95x0_precious_register(struct device *dev, unsigned int reg)
}
}
+static bool cy8c95x0_muxed_register(unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_INTMASK:
+ case CY8C95X0_PWMSEL:
+ case CY8C95X0_INVERT:
+ case CY8C95X0_DIRECTION:
+ case CY8C95X0_DRV_PU:
+ case CY8C95X0_DRV_PD:
+ case CY8C95X0_DRV_ODH:
+ case CY8C95X0_DRV_ODL:
+ case CY8C95X0_DRV_PP_FAST:
+ case CY8C95X0_DRV_PP_SLOW:
+ case CY8C95X0_DRV_HIZ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cy8c95x0_wc_register(unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_DRV_PU:
+ case CY8C95X0_DRV_PD:
+ case CY8C95X0_DRV_ODH:
+ case CY8C95X0_DRV_ODL:
+ case CY8C95X0_DRV_PP_FAST:
+ case CY8C95X0_DRV_PP_SLOW:
+ case CY8C95X0_DRV_HIZ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cy8c95x0_quick_path_register(unsigned int reg)
+{
+ switch (reg) {
+ case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
+ case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
+ case CY8C95X0_OUTPUT_(0) ... CY8C95X0_OUTPUT_(7):
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct reg_default cy8c95x0_reg_defaults[] = {
{ CY8C95X0_OUTPUT_(0), GENMASK(7, 0) },
{ CY8C95X0_OUTPUT_(1), GENMASK(7, 0) },
@@ -392,7 +446,89 @@ static const struct reg_default cy8c95x0_reg_defaults[] = {
{ CY8C95X0_PWMSEL, 0 },
};
+static int
+cy8c95x0_mux_reg_read(void *context, unsigned int off, unsigned int *val)
+{
+ struct cy8c95x0_pinctrl *chip = context;
+ u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
+ int ret, reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
+
+ mutex_lock(&chip->i2c_lock);
+ /* Select the correct bank */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Read the register through direct access regmap. The target range
+ * is marked volatile.
+ */
+ ret = regmap_read(chip->regmap, reg, val);
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static int
+cy8c95x0_mux_reg_write(void *context, unsigned int off, unsigned int val)
+{
+ struct cy8c95x0_pinctrl *chip = context;
+ u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
+ int ret, reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
+
+ mutex_lock(&chip->i2c_lock);
+ /* Select the correct bank */
+ ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Write the register through direct access regmap. The target range
+ * is marked volatile.
+ */
+ ret = regmap_write(chip->regmap, reg, val);
+out:
+ mutex_unlock(&chip->i2c_lock);
+
+ return ret;
+}
+
+static bool cy8c95x0_mux_accessible_register(struct device *dev, unsigned int off)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct cy8c95x0_pinctrl *chip = i2c_get_clientdata(i2c);
+ u8 port = CY8C95X0_MUX_REGMAP_TO_PORT(off);
+ u8 reg = CY8C95X0_MUX_REGMAP_TO_REG(off);
+
+ if (port >= chip->nport)
+ return false;
+
+ return cy8c95x0_muxed_register(reg);
+}
+
+static struct regmap_bus cy8c95x0_regmap_bus = {
+ .reg_read = cy8c95x0_mux_reg_read,
+ .reg_write = cy8c95x0_mux_reg_write,
+};
+
+/* Regmap for muxed registers CY8C95X0_INTMASK - CY8C95X0_DRV_HIZ */
+static const struct regmap_config cy8c95x0_muxed_regmap = {
+ .name = "muxed",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_FLAT,
+ .use_single_read = true,
+ .use_single_write = true,
+ .max_register = MUXED_STRIDE * BANK_SZ,
+ .num_reg_defaults_raw = MUXED_STRIDE * BANK_SZ,
+ .readable_reg = cy8c95x0_mux_accessible_register,
+ .writeable_reg = cy8c95x0_mux_accessible_register,
+};
+
+/* Direct access regmap */
static const struct regmap_config cy8c95x0_i2c_regmap = {
+ .name = "direct",
.reg_bits = 8,
.val_bits = 8,
@@ -408,6 +544,147 @@ static const struct regmap_config cy8c95x0_i2c_regmap = {
.max_register = CY8C95X0_COMMAND,
};
+static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
+ unsigned int reg,
+ unsigned int port,
+ unsigned int mask,
+ unsigned int val,
+ bool *change, bool async,
+ bool force)
+{
+ struct regmap *regmap;
+ int ret, off, i, read_val;
+
+ /* Caller should never modify PORTSEL directly */
+ if (reg == CY8C95X0_PORTSEL)
+ return -EINVAL;
+
+ /* Registers behind the PORTSEL mux have their own regmap */
+ if (cy8c95x0_muxed_register(reg)) {
+ regmap = chip->muxed_regmap;
+ off = CY8C95X0_MUX_REGMAP_TO_OFFSET(reg, port);
+ } else {
+ regmap = chip->regmap;
+ /* Quick path direct access registers honor the port argument */
+ if (cy8c95x0_quick_path_register(reg))
+ off = reg + port;
+ else
+ off = reg;
+ }
+
+ ret = regmap_update_bits_base(regmap, off, mask, val, change, async, force);
+ if (ret < 0)
+ return ret;
+
+ /* Update the cache when a WC bit is written */
+ if (cy8c95x0_wc_register(reg) && (mask & val)) {
+ for (i = CY8C95X0_DRV_PU; i <= CY8C95X0_DRV_HIZ; i++) {
+ if (i == reg)
+ continue;
+ off = CY8C95X0_MUX_REGMAP_TO_OFFSET(i, port);
+
+ ret = regmap_read(regmap, off, &read_val);
+ if (ret < 0)
+ continue;
+
+ if (!(read_val & mask & val))
+ continue;
+
+ regcache_cache_only(regmap, true);
+ regmap_update_bits(regmap, off, mask & val, 0);
+ regcache_cache_only(regmap, false);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * cy8c95x0_regmap_write_bits() - writes a register using the regmap cache
+ * @chip: The pinctrl to work on
+ * @reg: The register to write to. Can be direct access or muxed register.
+ * MUST NOT be the PORTSEL register.
+ * @port: The port to be used for muxed registers or quick path direct access
+ * registers. Otherwise unused.
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * This function handles the register writes to the direct access registers and
+ * the muxed registers while caching all register accesses, internally handling
+ * the correct state of the PORTSEL register and protecting the access to muxed
+ * registers.
+ * The caller must only use this function to change registers behind the PORTSEL mux.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int cy8c95x0_regmap_write_bits(struct cy8c95x0_pinctrl *chip, unsigned int reg,
+ unsigned int port, unsigned int mask, unsigned int val)
+{
+ return cy8c95x0_regmap_update_bits_base(chip, reg, port, mask, val, NULL, false, true);
+}
+
+/**
+ * cy8c95x0_regmap_update_bits() - updates a register using the regmap cache
+ * @chip: The pinctrl to work on
+ * @reg: The register to write to. Can be direct access or muxed register.
+ * MUST NOT be the PORTSEL register.
+ * @port: The port to be used for muxed registers or quick path direct access
+ * registers. Otherwise unused.
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * This function handles the register updates to the direct access registers and
+ * the muxed registers while caching all register accesses, internally handling
+ * the correct state of the PORTSEL register and protecting the access to muxed
+ * registers.
+ * The caller must only use this function to change registers behind the PORTSEL mux.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int cy8c95x0_regmap_update_bits(struct cy8c95x0_pinctrl *chip, unsigned int reg,
+ unsigned int port, unsigned int mask, unsigned int val)
+{
+ return cy8c95x0_regmap_update_bits_base(chip, reg, port, mask, val, NULL, false, false);
+}
+
+/**
+ * cy8c95x0_regmap_read() - reads a register using the regmap cache
+ * @chip: The pinctrl to work on
+ * @reg: The register to read from. Can be direct access or muxed register.
+ * @port: The port to be used for muxed registers or quick path direct access
+ * registers. Otherwise unused.
+ * @read_val: Value read from hardware or cache
+ *
+ * This function handles the register reads from the direct access registers and
+ * the muxed registers while caching all register accesses, internally handling
+ * the correct state of the PORTSEL register and protecting the access to muxed
+ * registers.
+ * The caller must only use this function to read registers behind the PORTSEL mux.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int cy8c95x0_regmap_read(struct cy8c95x0_pinctrl *chip, unsigned int reg,
+ unsigned int port, unsigned int *read_val)
+{
+ struct regmap *regmap;
+ int off;
+
+ /* Registers behind the PORTSEL mux have their own regmap */
+ if (cy8c95x0_muxed_register(reg)) {
+ regmap = chip->muxed_regmap;
+ off = CY8C95X0_MUX_REGMAP_TO_OFFSET(reg, port);
+ } else {
+ regmap = chip->regmap;
+ /* Quick path direct access registers honor the port argument */
+ if (cy8c95x0_quick_path_register(reg))
+ off = reg + port;
+ else
+ off = reg;
+ }
+
+ return regmap_read(regmap, off, read_val);
+}
+
static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
unsigned long *val, unsigned long *mask)
{
@@ -415,7 +692,7 @@ static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
DECLARE_BITMAP(tval, MAX_LINE);
int write_val;
int ret = 0;
- int i, off = 0;
+ int i;
u8 bits;
/* Add the 4 bit gap of Gport2 */
@@ -427,53 +704,22 @@ static int cy8c95x0_write_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
bitmap_shift_left(tval, tval, 4, MAX_LINE);
bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
- mutex_lock(&chip->i2c_lock);
for (i = 0; i < chip->nport; i++) {
/* Skip over unused banks */
bits = bitmap_get_value8(tmask, i * BANK_SZ);
if (!bits)
continue;
- switch (reg) {
- /* Muxed registers */
- case CY8C95X0_INTMASK:
- case CY8C95X0_PWMSEL:
- case CY8C95X0_INVERT:
- case CY8C95X0_DIRECTION:
- case CY8C95X0_DRV_PU:
- case CY8C95X0_DRV_PD:
- case CY8C95X0_DRV_ODH:
- case CY8C95X0_DRV_ODL:
- case CY8C95X0_DRV_PP_FAST:
- case CY8C95X0_DRV_PP_SLOW:
- case CY8C95X0_DRV_HIZ:
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, i);
- if (ret < 0)
- goto out;
- off = reg;
- break;
- /* Direct access registers */
- case CY8C95X0_INPUT:
- case CY8C95X0_OUTPUT:
- case CY8C95X0_INTSTATUS:
- off = reg + i;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
write_val = bitmap_get_value8(tval, i * BANK_SZ);
- ret = regmap_update_bits(chip->regmap, off, bits, write_val);
+ ret = cy8c95x0_regmap_update_bits(chip, reg, i, bits, write_val);
if (ret < 0)
goto out;
}
out:
- mutex_unlock(&chip->i2c_lock);
if (ret < 0)
- dev_err(chip->dev, "failed writing register %d: err %d\n", off, ret);
+ dev_err(chip->dev, "failed writing register %d, port %d: err %d\n", reg, i, ret);
return ret;
}
@@ -486,7 +732,7 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
DECLARE_BITMAP(tmp, MAX_LINE);
int read_val;
int ret = 0;
- int i, off = 0;
+ int i;
u8 bits;
/* Add the 4 bit gap of Gport2 */
@@ -498,43 +744,13 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
bitmap_shift_left(tval, tval, 4, MAX_LINE);
bitmap_replace(tval, tval, val, chip->shiftmask, BANK_SZ * 3);
- mutex_lock(&chip->i2c_lock);
for (i = 0; i < chip->nport; i++) {
/* Skip over unused banks */
bits = bitmap_get_value8(tmask, i * BANK_SZ);
if (!bits)
continue;
- switch (reg) {
- /* Muxed registers */
- case CY8C95X0_INTMASK:
- case CY8C95X0_PWMSEL:
- case CY8C95X0_INVERT:
- case CY8C95X0_DIRECTION:
- case CY8C95X0_DRV_PU:
- case CY8C95X0_DRV_PD:
- case CY8C95X0_DRV_ODH:
- case CY8C95X0_DRV_ODL:
- case CY8C95X0_DRV_PP_FAST:
- case CY8C95X0_DRV_PP_SLOW:
- case CY8C95X0_DRV_HIZ:
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, i);
- if (ret < 0)
- goto out;
- off = reg;
- break;
- /* Direct access registers */
- case CY8C95X0_INPUT:
- case CY8C95X0_OUTPUT:
- case CY8C95X0_INTSTATUS:
- off = reg + i;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
-
- ret = regmap_read(chip->regmap, off, &read_val);
+ ret = cy8c95x0_regmap_read(chip, reg, i, &read_val);
if (ret < 0)
goto out;
@@ -548,10 +764,8 @@ static int cy8c95x0_read_regs_mask(struct cy8c95x0_pinctrl *chip, int reg,
bitmap_replace(val, tmp, tval, chip->shiftmask, MAX_LINE);
out:
- mutex_unlock(&chip->i2c_lock);
-
if (ret < 0)
- dev_err(chip->dev, "failed reading register %d: err %d\n", off, ret);
+ dev_err(chip->dev, "failed reading register %d, port %d: err %d\n", reg, i, ret);
return ret;
}
@@ -566,12 +780,11 @@ static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
{
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
u8 port = cypress_get_port(chip, off);
- u8 outreg = CY8C95X0_OUTPUT_(port);
u8 bit = cypress_get_pin_mask(chip, off);
int ret;
/* Set output level */
- ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, val ? bit : 0);
if (ret)
return ret;
@@ -581,12 +794,12 @@ static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
{
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
- u8 inreg = CY8C95X0_INPUT_(cypress_get_port(chip, off));
+ u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
u32 reg_val;
int ret;
- ret = regmap_read(chip->regmap, inreg, &reg_val);
+ ret = cy8c95x0_regmap_read(chip, CY8C95X0_INPUT, port, &reg_val);
if (ret < 0) {
/*
* NOTE:
@@ -604,10 +817,10 @@ static void cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off,
int val)
{
struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc);
- u8 outreg = CY8C95X0_OUTPUT_(cypress_get_port(chip, off));
+ u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, val ? bit : 0);
}
static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
@@ -618,24 +831,15 @@ static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
u32 reg_val;
int ret;
- mutex_lock(&chip->i2c_lock);
-
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
- if (ret < 0)
- goto out;
-
- ret = regmap_read(chip->regmap, CY8C95X0_DIRECTION, &reg_val);
+ ret = cy8c95x0_regmap_read(chip, CY8C95X0_DIRECTION, port, &reg_val);
if (ret < 0)
goto out;
- mutex_unlock(&chip->i2c_lock);
-
if (reg_val & bit)
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
out:
- mutex_unlock(&chip->i2c_lock);
return ret;
}
@@ -651,13 +855,6 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
u16 arg = 0;
int ret;
- mutex_lock(&chip->i2c_lock);
-
- /* Select port */
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
- if (ret < 0)
- goto out;
-
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
reg = CY8C95X0_DRV_PU;
@@ -684,7 +881,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_PWMSEL;
break;
case PIN_CONFIG_OUTPUT:
- reg = CY8C95X0_OUTPUT_(port);
+ reg = CY8C95X0_OUTPUT;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
reg = CY8C95X0_DIRECTION;
@@ -712,7 +909,10 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
* Writing 1 to one of the drive mode registers will automatically
* clear conflicting set bits in the other drive mode registers.
*/
- ret = regmap_read(chip->regmap, reg, &reg_val);
+ ret = cy8c95x0_regmap_read(chip, reg, port, &reg_val);
+ if (ret < 0)
+ goto out;
+
if (reg_val & bit)
arg = 1;
if (param == PIN_CONFIG_OUTPUT_ENABLE)
@@ -720,8 +920,6 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
*config = pinconf_to_config_packed(param, (u16)arg);
out:
- mutex_unlock(&chip->i2c_lock);
-
return ret;
}
@@ -736,13 +934,6 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
unsigned int reg;
int ret;
- mutex_lock(&chip->i2c_lock);
-
- /* Select port */
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
- if (ret < 0)
- goto out;
-
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
__clear_bit(off, chip->push_pull);
@@ -785,10 +976,8 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
* Writing 1 to one of the drive mode registers will automatically
* clear conflicting set bits in the other drive mode registers.
*/
- ret = regmap_write_bits(chip->regmap, reg, bit, bit);
-
+ ret = cy8c95x0_regmap_write_bits(chip, reg, port, bit, bit);
out:
- mutex_unlock(&chip->i2c_lock);
return ret;
}
@@ -1105,14 +1294,8 @@ static int cy8c95x0_set_mode(struct cy8c95x0_pinctrl *chip, unsigned int off, bo
{
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- int ret;
- /* Select port */
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
- if (ret < 0)
- return ret;
-
- return regmap_write_bits(chip->regmap, CY8C95X0_PWMSEL, bit, mode ? bit : 0);
+ return cy8c95x0_regmap_write_bits(chip, CY8C95X0_PWMSEL, port, bit, mode ? bit : 0);
}
static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
@@ -1130,24 +1313,19 @@ static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
return 0;
/* Set direction to output & set output to 1 so that PWM can work */
- ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, bit);
+ ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DIRECTION, port, bit, bit);
if (ret < 0)
return ret;
- return regmap_write_bits(chip->regmap, CY8C95X0_OUTPUT_(port), bit, bit);
+ return cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, bit);
}
static int cy8c95x0_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
unsigned int group)
{
struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
- int ret;
- mutex_lock(&chip->i2c_lock);
- ret = cy8c95x0_pinmux_mode(chip, selector, group);
- mutex_unlock(&chip->i2c_lock);
-
- return ret;
+ return cy8c95x0_pinmux_mode(chip, selector, group);
}
static int cy8c95x0_gpio_request_enable(struct pinctrl_dev *pctldev,
@@ -1155,13 +1333,8 @@ static int cy8c95x0_gpio_request_enable(struct pinctrl_dev *pctldev,
unsigned int pin)
{
struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
- int ret;
-
- mutex_lock(&chip->i2c_lock);
- ret = cy8c95x0_set_mode(chip, pin, false);
- mutex_unlock(&chip->i2c_lock);
- return ret;
+ return cy8c95x0_set_mode(chip, pin, false);
}
static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
@@ -1171,13 +1344,7 @@ static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
u8 bit = cypress_get_pin_mask(chip, pin);
int ret;
- /* Select port... */
- ret = regmap_write(chip->regmap, CY8C95X0_PORTSEL, port);
- if (ret)
- return ret;
-
- /* ...then direction */
- ret = regmap_write_bits(chip->regmap, CY8C95X0_DIRECTION, bit, input ? bit : 0);
+ ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DIRECTION, port, bit, input ? bit : 0);
if (ret)
return ret;
@@ -1186,7 +1353,7 @@ static int cy8c95x0_pinmux_direction(struct cy8c95x0_pinctrl *chip,
* the direction register isn't sufficient in Push-Pull mode.
*/
if (input && test_bit(pin, chip->push_pull)) {
- ret = regmap_write_bits(chip->regmap, CY8C95X0_DRV_HIZ, bit, bit);
+ ret = cy8c95x0_regmap_write_bits(chip, CY8C95X0_DRV_HIZ, port, bit, bit);
if (ret)
return ret;
@@ -1201,13 +1368,8 @@ static int cy8c95x0_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned int pin, bool input)
{
struct cy8c95x0_pinctrl *chip = pinctrl_dev_get_drvdata(pctldev);
- int ret;
- mutex_lock(&chip->i2c_lock);
- ret = cy8c95x0_pinmux_direction(chip, pin, input);
- mutex_unlock(&chip->i2c_lock);
-
- return ret;
+ return cy8c95x0_pinmux_direction(chip, pin, input);
}
static const struct pinmux_ops cy8c95x0_pmxops = {
@@ -1409,12 +1571,22 @@ static int cy8c95x0_probe(struct i2c_client *client)
gpiod_set_consumer_name(chip->gpio_reset, "CY8C95X0 RESET");
}
+ /* Generic regmap for direct access registers */
chip->regmap = devm_regmap_init_i2c(client, &cy8c95x0_i2c_regmap);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
goto err_exit;
}
+ /* Port specific regmap behind PORTSEL mux */
+ chip->muxed_regmap = devm_regmap_init(&client->dev, &cy8c95x0_regmap_bus,
+ chip, &cy8c95x0_muxed_regmap);
+ if (IS_ERR(chip->muxed_regmap)) {
+ ret = dev_err_probe(&client->dev, PTR_ERR(chip->muxed_regmap),
+ "Failed to register muxed regmap\n");
+ goto err_exit;
+ }
+
bitmap_zero(chip->push_pull, MAX_LINE);
bitmap_zero(chip->shiftmask, MAX_LINE);
bitmap_set(chip->shiftmask, 0, 20);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index 5b5ddf7e5..6e1be3886 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -331,8 +331,8 @@ static int eqbr_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
pinmux = grp->data;
- for (i = 0; i < grp->num_pins; i++)
- eqbr_set_pin_mux(pctl, pinmux[i], grp->pins[i]);
+ for (i = 0; i < grp->grp.npins; i++)
+ eqbr_set_pin_mux(pctl, pinmux[i], grp->grp.pins[i]);
return 0;
}
@@ -704,8 +704,8 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
{
struct device *dev = drvdata->dev;
struct device_node *node = dev->of_node;
- unsigned int *pinmux, pin_id, pinmux_id;
- struct group_desc group;
+ unsigned int *pins, *pinmux, pin_id, pinmux_id;
+ struct pingroup group, *grp = &group;
struct device_node *np;
struct property *prop;
int j, err;
@@ -715,55 +715,55 @@ static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
if (!prop)
continue;
- group.num_pins = of_property_count_u32_elems(np, "pins");
- if (group.num_pins < 0) {
+ err = of_property_count_u32_elems(np, "pins");
+ if (err < 0) {
dev_err(dev, "No pins in the group: %s\n", prop->name);
of_node_put(np);
- return -EINVAL;
+ return err;
}
- group.name = prop->value;
- group.pins = devm_kcalloc(dev, group.num_pins,
- sizeof(*(group.pins)), GFP_KERNEL);
- if (!group.pins) {
+ grp->npins = err;
+ grp->name = prop->value;
+ pins = devm_kcalloc(dev, grp->npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins) {
of_node_put(np);
return -ENOMEM;
}
+ grp->pins = pins;
- pinmux = devm_kcalloc(dev, group.num_pins, sizeof(*pinmux),
- GFP_KERNEL);
+ pinmux = devm_kcalloc(dev, grp->npins, sizeof(*pinmux), GFP_KERNEL);
if (!pinmux) {
of_node_put(np);
return -ENOMEM;
}
- for (j = 0; j < group.num_pins; j++) {
+ for (j = 0; j < grp->npins; j++) {
if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
dev_err(dev, "Group %s: Read intel pins id failed\n",
- group.name);
+ grp->name);
of_node_put(np);
return -EINVAL;
}
if (pin_id >= drvdata->pctl_desc.npins) {
dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
- group.name, j, pin_id);
+ grp->name, j, pin_id);
of_node_put(np);
return -EINVAL;
}
- group.pins[j] = pin_id;
+ pins[j] = pin_id;
if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
dev_err(dev, "Group %s: Read intel pinmux id failed\n",
- group.name);
+ grp->name);
of_node_put(np);
return -EINVAL;
}
pinmux[j] = pinmux_id;
}
- err = pinctrl_generic_add_group(drvdata->pctl_dev, group.name,
- group.pins, group.num_pins,
+ err = pinctrl_generic_add_group(drvdata->pctl_dev,
+ grp->name, grp->pins, grp->npins,
pinmux);
if (err < 0) {
- dev_err(dev, "Failed to register group %s\n", group.name);
+ dev_err(dev, "Failed to register group %s\n", grp->name);
of_node_put(np);
return err;
}
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index ee718f6e2..bc6358a68 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -82,16 +82,17 @@
#define PINS_PER_GPIO_CHIP 32
#define JZ4730_PINS_PER_PAIRED_REG 16
-#define INGENIC_PIN_GROUP_FUNCS(name, id, funcs) \
- { \
- name, \
- id##_pins, \
- ARRAY_SIZE(id##_pins), \
- funcs, \
+#define INGENIC_PIN_GROUP_FUNCS(_name_, id, funcs) \
+ { \
+ .grp = PINCTRL_PINGROUP(_name_, id##_pins, ARRAY_SIZE(id##_pins)), \
+ .data = funcs, \
}
-#define INGENIC_PIN_GROUP(name, id, func) \
- INGENIC_PIN_GROUP_FUNCS(name, id, (void *)(func))
+#define INGENIC_PIN_GROUP(_name_, id, func) \
+ { \
+ .grp = PINCTRL_PINGROUP(_name_, id##_pins, ARRAY_SIZE(id##_pins)), \
+ .data = (void *)func, \
+ }
enum jz_version {
ID_JZ4730,
@@ -3761,17 +3762,17 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->name, grp->name);
+ func->name, grp->grp.name);
mode = (uintptr_t)grp->data;
if (mode <= 3) {
- for (i = 0; i < grp->num_pins; i++)
- ingenic_pinmux_set_pin_fn(jzpc, grp->pins[i], mode);
+ for (i = 0; i < grp->grp.npins; i++)
+ ingenic_pinmux_set_pin_fn(jzpc, grp->grp.pins[i], mode);
} else {
pin_modes = grp->data;
- for (i = 0; i < grp->num_pins; i++)
- ingenic_pinmux_set_pin_fn(jzpc, grp->pins[i], pin_modes[i]);
+ for (i = 0; i < grp->grp.npins; i++)
+ ingenic_pinmux_set_pin_fn(jzpc, grp->grp.pins[i], pin_modes[i]);
}
return 0;
@@ -4298,12 +4299,12 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
for (i = 0; i < chip_info->num_groups; i++) {
const struct group_desc *group = &chip_info->groups[i];
+ const struct pingroup *grp = &group->grp;
- err = pinctrl_generic_add_group(jzpc->pctl, group->name,
- group->pins, group->num_pins, group->data);
+ err = pinctrl_generic_add_group(jzpc->pctl, grp->name, grp->pins, grp->npins,
+ group->data);
if (err < 0) {
- dev_err(dev, "Failed to register group %s\n",
- group->name);
+ dev_err(dev, "Failed to register group %s\n", grp->name);
return err;
}
}
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
index 152c35bce..b1349ee22 100644
--- a/drivers/pinctrl/pinctrl-keembay.c
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -945,7 +945,7 @@ static int keembay_set_mux(struct pinctrl_dev *pctldev, unsigned int fun_sel,
return -EINVAL;
/* Change modes for pins in the selected group */
- pin = *grp->pins;
+ pin = *grp->grp.pins;
pin_mode = *(u8 *)(func->data);
val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
@@ -1517,7 +1517,7 @@ static int keembay_gpiochip_probe(struct keembay_pinctrl *kpc,
static int keembay_build_groups(struct keembay_pinctrl *kpc)
{
- struct group_desc *grp;
+ struct pingroup *grp;
unsigned int i;
kpc->ngroups = kpc->npins;
@@ -1528,7 +1528,7 @@ static int keembay_build_groups(struct keembay_pinctrl *kpc)
/* Each pin is categorised as one group */
for (i = 0; i < kpc->ngroups; i++) {
const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
- struct group_desc *kmb_grp = grp + i;
+ struct pingroup *kmb_grp = grp + i;
kmb_grp->name = pdesc->name;
kmb_grp->pins = (int *)&pdesc->number;
diff --git a/drivers/pinctrl/pinctrl-pef2256.c b/drivers/pinctrl/pinctrl-pef2256.c
new file mode 100644
index 000000000..868ea33be
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-pef2256.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PEF2256 also known as FALC56 driver
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/framer/pef2256.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Port Configuration 1..4 */
+#define PEF2256_PC1 0x80
+#define PEF2256_PC2 0x81
+#define PEF2256_PC3 0x82
+#define PEF2256_PC4 0x83
+#define PEF2256_12_PC_RPC_MASK GENMASK(6, 4)
+#define PEF2256_12_PC_RPC_SYPR FIELD_PREP_CONST(PEF2256_12_PC_RPC_MASK, 0x0)
+#define PEF2256_12_PC_RPC_RFM FIELD_PREP_CONST(PEF2256_12_PC_RPC_MASK, 0x1)
+#define PEF2256_12_PC_RPC_RFMB FIELD_PREP_CONST(PEF2256_12_PC_RPC_MASK, 0x2)
+#define PEF2256_12_PC_RPC_RSIGM FIELD_PREP_CONST(PEF2256_12_PC_RPC_MASK, 0x3)
+#define PEF2256_12_PC_RPC_RSIG FIELD_PREP_CONST(PEF2256_12_PC_RPC_MASK, 0x4)
+#define PEF2256_12_PC_RPC_DLR FIELD_PREP_CONST(PEF2256_12_PC_RPC_MASK, 0x5)
+#define PEF2256_12_PC_RPC_FREEZE FIELD_PREP_CONST(PEF2256_12_PC_RPC_MASK, 0x6)
+#define PEF2256_12_PC_RPC_RFSP FIELD_PREP_CONST(PEF2256_12_PC_RPC_MASK, 0x7)
+#define PEF2256_12_PC_XPC_MASK GENMASK(4, 0)
+#define PEF2256_12_PC_XPC_SYPX FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x0)
+#define PEF2256_12_PC_XPC_XFMS FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x1)
+#define PEF2256_12_PC_XPC_XSIG FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x2)
+#define PEF2256_12_PC_XPC_TCLK FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x3)
+#define PEF2256_12_PC_XPC_XMFB FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x4)
+#define PEF2256_12_PC_XPC_XSIGM FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x5)
+#define PEF2256_12_PC_XPC_DLX FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x6)
+#define PEF2256_12_PC_XPC_XCLK FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x7)
+#define PEF2256_12_PC_XPC_XLT FIELD_PREP_CONST(PEF2256_12_PC_XPC_MASK, 0x8)
+#define PEF2256_2X_PC_RPC_MASK GENMASK(7, 4)
+#define PEF2256_2X_PC_RPC_SYPR FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x0)
+#define PEF2256_2X_PC_RPC_RFM FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x1)
+#define PEF2256_2X_PC_RPC_RFMB FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x2)
+#define PEF2256_2X_PC_RPC_RSIGM FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x3)
+#define PEF2256_2X_PC_RPC_RSIG FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x4)
+#define PEF2256_2X_PC_RPC_DLR FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x5)
+#define PEF2256_2X_PC_RPC_FREEZE FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x6)
+#define PEF2256_2X_PC_RPC_RFSP FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x7)
+#define PEF2256_2X_PC_RPC_GPI FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0x9)
+#define PEF2256_2X_PC_RPC_GPOH FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0xa)
+#define PEF2256_2X_PC_RPC_GPOL FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0xb)
+#define PEF2256_2X_PC_RPC_LOS FIELD_PREP_CONST(PEF2256_2X_PC_RPC_MASK, 0xc)
+#define PEF2256_2X_PC_XPC_MASK GENMASK(3, 0)
+#define PEF2256_2X_PC_XPC_SYPX FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x0)
+#define PEF2256_2X_PC_XPC_XFMS FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x1)
+#define PEF2256_2X_PC_XPC_XSIG FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x2)
+#define PEF2256_2X_PC_XPC_TCLK FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x3)
+#define PEF2256_2X_PC_XPC_XMFB FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x4)
+#define PEF2256_2X_PC_XPC_XSIGM FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x5)
+#define PEF2256_2X_PC_XPC_DLX FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x6)
+#define PEF2256_2X_PC_XPC_XCLK FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x7)
+#define PEF2256_2X_PC_XPC_XLT FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x8)
+#define PEF2256_2X_PC_XPC_GPI FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0x9)
+#define PEF2256_2X_PC_XPC_GPOH FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0xa)
+#define PEF2256_2X_PC_XPC_GPOL FIELD_PREP_CONST(PEF2256_2X_PC_XPC_MASK, 0xb)
+
+struct pef2256_pinreg_desc {
+ int offset;
+ u8 mask;
+};
+
+struct pef2256_function_desc {
+ const char *name;
+ const char * const*groups;
+ unsigned int ngroups;
+ u8 func_val;
+};
+
+struct pef2256_pinctrl {
+ struct device *dev;
+ struct regmap *regmap;
+ enum pef2256_version version;
+ struct pinctrl_desc pctrl_desc;
+ const struct pef2256_function_desc *functions;
+ unsigned int nfunctions;
+};
+
+static int pef2256_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct pef2256_pinctrl *pef2256 = pinctrl_dev_get_drvdata(pctldev);
+
+ /* We map 1 group <-> 1 pin */
+ return pef2256->pctrl_desc.npins;
+}
+
+static const char *pef2256_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct pef2256_pinctrl *pef2256 = pinctrl_dev_get_drvdata(pctldev);
+
+ /* We map 1 group <-> 1 pin */
+ return pef2256->pctrl_desc.pins[selector].name;
+}
+
+static int pef2256_get_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct pef2256_pinctrl *pef2256 = pinctrl_dev_get_drvdata(pctldev);
+
+ /* We map 1 group <-> 1 pin */
+ *pins = &pef2256->pctrl_desc.pins[selector].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops pef2256_pctlops = {
+ .get_groups_count = pef2256_get_groups_count,
+ .get_group_name = pef2256_get_group_name,
+ .get_group_pins = pef2256_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int pef2256_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct pef2256_pinctrl *pef2256 = pinctrl_dev_get_drvdata(pctldev);
+
+ return pef2256->nfunctions;
+}
+
+static const char *pef2256_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct pef2256_pinctrl *pef2256 = pinctrl_dev_get_drvdata(pctldev);
+
+ return pef2256->functions[selector].name;
+}
+
+static int pef2256_get_function_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct pef2256_pinctrl *pef2256 = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pef2256->functions[selector].groups;
+ *num_groups = pef2256->functions[selector].ngroups;
+ return 0;
+}
+
+static int pef2256_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct pef2256_pinctrl *pef2256 = pinctrl_dev_get_drvdata(pctldev);
+ const struct pef2256_pinreg_desc *pinreg_desc;
+ u8 func_val;
+
+ /* We map 1 group <-> 1 pin */
+ pinreg_desc = pef2256->pctrl_desc.pins[group_selector].drv_data;
+ func_val = pef2256->functions[func_selector].func_val;
+
+ return regmap_update_bits(pef2256->regmap, pinreg_desc->offset,
+ pinreg_desc->mask, func_val);
+}
+
+static const struct pinmux_ops pef2256_pmxops = {
+ .get_functions_count = pef2256_get_functions_count,
+ .get_function_name = pef2256_get_function_name,
+ .get_function_groups = pef2256_get_function_groups,
+ .set_mux = pef2256_set_mux,
+};
+
+#define PEF2256_PINCTRL_PIN(_number, _name, _offset, _mask) { \
+ .number = _number, \
+ .name = _name, \
+ .drv_data = &(struct pef2256_pinreg_desc) { \
+ .offset = _offset, \
+ .mask = _mask, \
+ }, \
+}
+
+static const struct pinctrl_pin_desc pef2256_v12_pins[] = {
+ PEF2256_PINCTRL_PIN(0, "RPA", PEF2256_PC1, PEF2256_12_PC_RPC_MASK),
+ PEF2256_PINCTRL_PIN(1, "RPB", PEF2256_PC2, PEF2256_12_PC_RPC_MASK),
+ PEF2256_PINCTRL_PIN(2, "RPC", PEF2256_PC3, PEF2256_12_PC_RPC_MASK),
+ PEF2256_PINCTRL_PIN(3, "RPD", PEF2256_PC4, PEF2256_12_PC_RPC_MASK),
+ PEF2256_PINCTRL_PIN(4, "XPA", PEF2256_PC1, PEF2256_12_PC_XPC_MASK),
+ PEF2256_PINCTRL_PIN(5, "XPB", PEF2256_PC2, PEF2256_12_PC_XPC_MASK),
+ PEF2256_PINCTRL_PIN(6, "XPC", PEF2256_PC3, PEF2256_12_PC_XPC_MASK),
+ PEF2256_PINCTRL_PIN(7, "XPD", PEF2256_PC4, PEF2256_12_PC_XPC_MASK),
+};
+
+static const struct pinctrl_pin_desc pef2256_v2x_pins[] = {
+ PEF2256_PINCTRL_PIN(0, "RPA", PEF2256_PC1, PEF2256_2X_PC_RPC_MASK),
+ PEF2256_PINCTRL_PIN(1, "RPB", PEF2256_PC2, PEF2256_2X_PC_RPC_MASK),
+ PEF2256_PINCTRL_PIN(2, "RPC", PEF2256_PC3, PEF2256_2X_PC_RPC_MASK),
+ PEF2256_PINCTRL_PIN(3, "RPD", PEF2256_PC4, PEF2256_2X_PC_RPC_MASK),
+ PEF2256_PINCTRL_PIN(4, "XPA", PEF2256_PC1, PEF2256_2X_PC_XPC_MASK),
+ PEF2256_PINCTRL_PIN(5, "XPB", PEF2256_PC2, PEF2256_2X_PC_XPC_MASK),
+ PEF2256_PINCTRL_PIN(6, "XPC", PEF2256_PC3, PEF2256_2X_PC_XPC_MASK),
+ PEF2256_PINCTRL_PIN(7, "XPD", PEF2256_PC4, PEF2256_2X_PC_XPC_MASK),
+};
+
+static const char *const pef2256_rp_groups[] = { "RPA", "RPB", "RPC", "RPD" };
+static const char *const pef2256_xp_groups[] = { "XPA", "XPB", "XPC", "XPD" };
+static const char *const pef2256_all_groups[] = { "RPA", "RPB", "RPC", "RPD",
+ "XPA", "XPB", "XPC", "XPD" };
+
+#define PEF2256_FUNCTION(_name, _func_val, _groups) { \
+ .name = _name, \
+ .groups = _groups, \
+ .ngroups = ARRAY_SIZE(_groups), \
+ .func_val = _func_val, \
+}
+
+static const struct pef2256_function_desc pef2256_v2x_functions[] = {
+ PEF2256_FUNCTION("SYPR", PEF2256_2X_PC_RPC_SYPR, pef2256_rp_groups),
+ PEF2256_FUNCTION("RFM", PEF2256_2X_PC_RPC_RFM, pef2256_rp_groups),
+ PEF2256_FUNCTION("RFMB", PEF2256_2X_PC_RPC_RFMB, pef2256_rp_groups),
+ PEF2256_FUNCTION("RSIGM", PEF2256_2X_PC_RPC_RSIGM, pef2256_rp_groups),
+ PEF2256_FUNCTION("RSIG", PEF2256_2X_PC_RPC_RSIG, pef2256_rp_groups),
+ PEF2256_FUNCTION("DLR", PEF2256_2X_PC_RPC_DLR, pef2256_rp_groups),
+ PEF2256_FUNCTION("FREEZE", PEF2256_2X_PC_RPC_FREEZE, pef2256_rp_groups),
+ PEF2256_FUNCTION("RFSP", PEF2256_2X_PC_RPC_RFSP, pef2256_rp_groups),
+ PEF2256_FUNCTION("LOS", PEF2256_2X_PC_RPC_LOS, pef2256_rp_groups),
+
+ PEF2256_FUNCTION("SYPX", PEF2256_2X_PC_XPC_SYPX, pef2256_xp_groups),
+ PEF2256_FUNCTION("XFMS", PEF2256_2X_PC_XPC_XFMS, pef2256_xp_groups),
+ PEF2256_FUNCTION("XSIG", PEF2256_2X_PC_XPC_XSIG, pef2256_xp_groups),
+ PEF2256_FUNCTION("TCLK", PEF2256_2X_PC_XPC_TCLK, pef2256_xp_groups),
+ PEF2256_FUNCTION("XMFB", PEF2256_2X_PC_XPC_XMFB, pef2256_xp_groups),
+ PEF2256_FUNCTION("XSIGM", PEF2256_2X_PC_XPC_XSIGM, pef2256_xp_groups),
+ PEF2256_FUNCTION("DLX", PEF2256_2X_PC_XPC_DLX, pef2256_xp_groups),
+ PEF2256_FUNCTION("XCLK", PEF2256_2X_PC_XPC_XCLK, pef2256_xp_groups),
+ PEF2256_FUNCTION("XLT", PEF2256_2X_PC_XPC_XLT, pef2256_xp_groups),
+
+ PEF2256_FUNCTION("GPI", PEF2256_2X_PC_RPC_GPI | PEF2256_2X_PC_XPC_GPI,
+ pef2256_all_groups),
+ PEF2256_FUNCTION("GPOH", PEF2256_2X_PC_RPC_GPOH | PEF2256_2X_PC_XPC_GPOH,
+ pef2256_all_groups),
+ PEF2256_FUNCTION("GPOL", PEF2256_2X_PC_RPC_GPOL | PEF2256_2X_PC_XPC_GPOL,
+ pef2256_all_groups),
+};
+
+static const struct pef2256_function_desc pef2256_v12_functions[] = {
+ PEF2256_FUNCTION("SYPR", PEF2256_12_PC_RPC_SYPR, pef2256_rp_groups),
+ PEF2256_FUNCTION("RFM", PEF2256_12_PC_RPC_RFM, pef2256_rp_groups),
+ PEF2256_FUNCTION("RFMB", PEF2256_12_PC_RPC_RFMB, pef2256_rp_groups),
+ PEF2256_FUNCTION("RSIGM", PEF2256_12_PC_RPC_RSIGM, pef2256_rp_groups),
+ PEF2256_FUNCTION("RSIG", PEF2256_12_PC_RPC_RSIG, pef2256_rp_groups),
+ PEF2256_FUNCTION("DLR", PEF2256_12_PC_RPC_DLR, pef2256_rp_groups),
+ PEF2256_FUNCTION("FREEZE", PEF2256_12_PC_RPC_FREEZE, pef2256_rp_groups),
+ PEF2256_FUNCTION("RFSP", PEF2256_12_PC_RPC_RFSP, pef2256_rp_groups),
+
+ PEF2256_FUNCTION("SYPX", PEF2256_12_PC_XPC_SYPX, pef2256_xp_groups),
+ PEF2256_FUNCTION("XFMS", PEF2256_12_PC_XPC_XFMS, pef2256_xp_groups),
+ PEF2256_FUNCTION("XSIG", PEF2256_12_PC_XPC_XSIG, pef2256_xp_groups),
+ PEF2256_FUNCTION("TCLK", PEF2256_12_PC_XPC_TCLK, pef2256_xp_groups),
+ PEF2256_FUNCTION("XMFB", PEF2256_12_PC_XPC_XMFB, pef2256_xp_groups),
+ PEF2256_FUNCTION("XSIGM", PEF2256_12_PC_XPC_XSIGM, pef2256_xp_groups),
+ PEF2256_FUNCTION("DLX", PEF2256_12_PC_XPC_DLX, pef2256_xp_groups),
+ PEF2256_FUNCTION("XCLK", PEF2256_12_PC_XPC_XCLK, pef2256_xp_groups),
+ PEF2256_FUNCTION("XLT", PEF2256_12_PC_XPC_XLT, pef2256_xp_groups),
+};
+
+static int pef2256_register_pinctrl(struct pef2256_pinctrl *pef2256)
+{
+ struct pinctrl_dev *pctrl;
+
+ pef2256->pctrl_desc.name = dev_name(pef2256->dev);
+ pef2256->pctrl_desc.owner = THIS_MODULE;
+ pef2256->pctrl_desc.pctlops = &pef2256_pctlops;
+ pef2256->pctrl_desc.pmxops = &pef2256_pmxops;
+ if (pef2256->version == PEF2256_VERSION_1_2) {
+ pef2256->pctrl_desc.pins = pef2256_v12_pins;
+ pef2256->pctrl_desc.npins = ARRAY_SIZE(pef2256_v12_pins);
+ pef2256->functions = pef2256_v12_functions;
+ pef2256->nfunctions = ARRAY_SIZE(pef2256_v12_functions);
+ } else {
+ pef2256->pctrl_desc.pins = pef2256_v2x_pins;
+ pef2256->pctrl_desc.npins = ARRAY_SIZE(pef2256_v2x_pins);
+ pef2256->functions = pef2256_v2x_functions;
+ pef2256->nfunctions = ARRAY_SIZE(pef2256_v2x_functions);
+ }
+
+ pctrl = devm_pinctrl_register(pef2256->dev, &pef2256->pctrl_desc, pef2256);
+ if (IS_ERR(pctrl))
+ return dev_err_probe(pef2256->dev, PTR_ERR(pctrl),
+ "pinctrl driver registration failed\n");
+
+ return 0;
+}
+
+static void pef2256_reset_pinmux(struct pef2256_pinctrl *pef2256)
+{
+ u8 val;
+ /*
+ * Reset values cannot be used.
+ * They define the SYPR/SYPX pin mux for all the RPx and XPx pins and
+ * Only one pin can be muxed to SYPR and one pin can be muxed to SYPX.
+ * Choose here an other reset value.
+ */
+ if (pef2256->version == PEF2256_VERSION_1_2)
+ val = PEF2256_12_PC_XPC_XCLK | PEF2256_12_PC_RPC_RFSP;
+ else
+ val = PEF2256_2X_PC_XPC_GPI | PEF2256_2X_PC_RPC_GPI;
+
+ regmap_write(pef2256->regmap, PEF2256_PC1, val);
+ regmap_write(pef2256->regmap, PEF2256_PC2, val);
+ regmap_write(pef2256->regmap, PEF2256_PC3, val);
+ regmap_write(pef2256->regmap, PEF2256_PC4, val);
+}
+
+static int pef2256_pinctrl_probe(struct platform_device *pdev)
+{
+ struct pef2256_pinctrl *pef2256_pinctrl;
+ struct pef2256 *pef2256;
+ int ret;
+
+ pef2256_pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pef2256_pinctrl), GFP_KERNEL);
+ if (!pef2256_pinctrl)
+ return -ENOMEM;
+
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
+ pef2256 = dev_get_drvdata(pdev->dev.parent);
+
+ pef2256_pinctrl->dev = &pdev->dev;
+ pef2256_pinctrl->regmap = pef2256_get_regmap(pef2256);
+ pef2256_pinctrl->version = pef2256_get_version(pef2256);
+
+ platform_set_drvdata(pdev, pef2256_pinctrl);
+
+ pef2256_reset_pinmux(pef2256_pinctrl);
+ ret = pef2256_register_pinctrl(pef2256_pinctrl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver pef2256_pinctrl_driver = {
+ .driver = {
+ .name = "lantiq-pef2256-pinctrl",
+ },
+ .probe = pef2256_pinctrl_probe,
+};
+module_platform_driver(pef2256_pinctrl_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("PEF2256 pin controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 8267be769..19cc0db77 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1955,6 +1955,10 @@ static const struct pcs_soc_data pinctrl_single_am654 = {
.irq_status_mask = (1 << 30), /* WKUP_EVT */
};
+static const struct pcs_soc_data pinctrl_single_j7200 = {
+ .flags = PCS_CONTEXT_LOSS_OFF,
+};
+
static const struct pcs_soc_data pinctrl_single = {
};
@@ -1969,6 +1973,7 @@ static const struct of_device_id pcs_of_match[] = {
{ .compatible = "ti,omap3-padconf", .data = &pinctrl_single_omap_wkup },
{ .compatible = "ti,omap4-padconf", .data = &pinctrl_single_omap_wkup },
{ .compatible = "ti,omap5-padconf", .data = &pinctrl_single_omap_wkup },
+ { .compatible = "ti,j7200-padconf", .data = &pinctrl_single_j7200 },
{ .compatible = "pinctrl-single", .data = &pinctrl_single },
{ .compatible = "pinconf-single", .data = &pinconf_single },
{ },
diff --git a/drivers/pinctrl/pinctrl-tps6594.c b/drivers/pinctrl/pinctrl-tps6594.c
new file mode 100644
index 000000000..66985e54b
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tps6594.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinmux and GPIO driver for tps6594 PMIC
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+
+#include <linux/mfd/tps6594.h>
+
+#define TPS6594_PINCTRL_PINS_NB 11
+
+#define TPS6594_PINCTRL_GPIO_FUNCTION 0
+#define TPS6594_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION 1
+#define TPS6594_PINCTRL_TRIG_WDOG_FUNCTION 1
+#define TPS6594_PINCTRL_CLK32KOUT_FUNCTION 1
+#define TPS6594_PINCTRL_SCLK_SPMI_FUNCTION 1
+#define TPS6594_PINCTRL_SDATA_SPMI_FUNCTION 1
+#define TPS6594_PINCTRL_NERR_MCU_FUNCTION 1
+#define TPS6594_PINCTRL_PDOG_FUNCTION 1
+#define TPS6594_PINCTRL_SYNCCLKIN_FUNCTION 1
+#define TPS6594_PINCTRL_NRSTOUT_SOC_FUNCTION 2
+#define TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION 2
+#define TPS6594_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION 2
+#define TPS6594_PINCTRL_NERR_SOC_FUNCTION 2
+#define TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION 3
+#define TPS6594_PINCTRL_NSLEEP1_FUNCTION 4
+#define TPS6594_PINCTRL_NSLEEP2_FUNCTION 5
+#define TPS6594_PINCTRL_WKUP1_FUNCTION 6
+#define TPS6594_PINCTRL_WKUP2_FUNCTION 7
+
+/* Special muxval for recalcitrant pins */
+#define TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION_GPIO8 2
+#define TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION_GPIO8 3
+#define TPS6594_PINCTRL_CLK32KOUT_FUNCTION_GPIO9 3
+
+#define TPS6594_OFFSET_GPIO_SEL 5
+
+#define FUNCTION(fname, v) \
+{ \
+ .pinfunction = PINCTRL_PINFUNCTION(#fname, \
+ tps6594_##fname##_func_group_names, \
+ ARRAY_SIZE(tps6594_##fname##_func_group_names)),\
+ .muxval = v, \
+}
+
+static const struct pinctrl_pin_desc tps6594_pins[TPS6594_PINCTRL_PINS_NB] = {
+ PINCTRL_PIN(0, "GPIO0"), PINCTRL_PIN(1, "GPIO1"),
+ PINCTRL_PIN(2, "GPIO2"), PINCTRL_PIN(3, "GPIO3"),
+ PINCTRL_PIN(4, "GPIO4"), PINCTRL_PIN(5, "GPIO5"),
+ PINCTRL_PIN(6, "GPIO6"), PINCTRL_PIN(7, "GPIO7"),
+ PINCTRL_PIN(8, "GPIO8"), PINCTRL_PIN(9, "GPIO9"),
+ PINCTRL_PIN(10, "GPIO10"),
+};
+
+static const char *const tps6594_gpio_func_group_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+ "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_nsleep1_func_group_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+ "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_nsleep2_func_group_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+ "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_wkup1_func_group_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+ "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_wkup2_func_group_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
+ "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10",
+};
+
+static const char *const tps6594_scl_i2c2_cs_spi_func_group_names[] = {
+ "GPIO0",
+ "GPIO1",
+};
+
+static const char *const tps6594_nrstout_soc_func_group_names[] = {
+ "GPIO0",
+ "GPIO10",
+};
+
+static const char *const tps6594_trig_wdog_func_group_names[] = {
+ "GPIO1",
+ "GPIO10",
+};
+
+static const char *const tps6594_sda_i2c2_sdo_spi_func_group_names[] = {
+ "GPIO1",
+};
+
+static const char *const tps6594_clk32kout_func_group_names[] = {
+ "GPIO2",
+ "GPIO3",
+ "GPIO7",
+};
+
+static const char *const tps6594_nerr_soc_func_group_names[] = {
+ "GPIO2",
+};
+
+static const char *const tps6594_sclk_spmi_func_group_names[] = {
+ "GPIO4",
+};
+
+static const char *const tps6594_sdata_spmi_func_group_names[] = {
+ "GPIO5",
+};
+
+static const char *const tps6594_nerr_mcu_func_group_names[] = {
+ "GPIO6",
+};
+
+static const char *const tps6594_syncclkout_func_group_names[] = {
+ "GPIO7",
+ "GPIO9",
+};
+
+static const char *const tps6594_disable_wdog_func_group_names[] = {
+ "GPIO7",
+ "GPIO8",
+};
+
+static const char *const tps6594_pdog_func_group_names[] = {
+ "GPIO8",
+};
+
+static const char *const tps6594_syncclkin_func_group_names[] = {
+ "GPIO9",
+};
+
+struct tps6594_pinctrl_function {
+ struct pinfunction pinfunction;
+ u8 muxval;
+};
+
+static const struct tps6594_pinctrl_function pinctrl_functions[] = {
+ FUNCTION(gpio, TPS6594_PINCTRL_GPIO_FUNCTION),
+ FUNCTION(nsleep1, TPS6594_PINCTRL_NSLEEP1_FUNCTION),
+ FUNCTION(nsleep2, TPS6594_PINCTRL_NSLEEP2_FUNCTION),
+ FUNCTION(wkup1, TPS6594_PINCTRL_WKUP1_FUNCTION),
+ FUNCTION(wkup2, TPS6594_PINCTRL_WKUP2_FUNCTION),
+ FUNCTION(scl_i2c2_cs_spi, TPS6594_PINCTRL_SCL_I2C2_CS_SPI_FUNCTION),
+ FUNCTION(nrstout_soc, TPS6594_PINCTRL_NRSTOUT_SOC_FUNCTION),
+ FUNCTION(trig_wdog, TPS6594_PINCTRL_TRIG_WDOG_FUNCTION),
+ FUNCTION(sda_i2c2_sdo_spi, TPS6594_PINCTRL_SDA_I2C2_SDO_SPI_FUNCTION),
+ FUNCTION(clk32kout, TPS6594_PINCTRL_CLK32KOUT_FUNCTION),
+ FUNCTION(nerr_soc, TPS6594_PINCTRL_NERR_SOC_FUNCTION),
+ FUNCTION(sclk_spmi, TPS6594_PINCTRL_SCLK_SPMI_FUNCTION),
+ FUNCTION(sdata_spmi, TPS6594_PINCTRL_SDATA_SPMI_FUNCTION),
+ FUNCTION(nerr_mcu, TPS6594_PINCTRL_NERR_MCU_FUNCTION),
+ FUNCTION(syncclkout, TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION),
+ FUNCTION(disable_wdog, TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION),
+ FUNCTION(pdog, TPS6594_PINCTRL_PDOG_FUNCTION),
+ FUNCTION(syncclkin, TPS6594_PINCTRL_SYNCCLKIN_FUNCTION),
+};
+
+struct tps6594_pinctrl {
+ struct tps6594 *tps;
+ struct gpio_regmap *gpio_regmap;
+ struct pinctrl_dev *pctl_dev;
+ const struct tps6594_pinctrl_function *funcs;
+ const struct pinctrl_pin_desc *pins;
+};
+
+static int tps6594_gpio_regmap_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ unsigned int line = offset % 8;
+ unsigned int stride = offset / 8;
+
+ switch (base) {
+ case TPS6594_REG_GPIOX_CONF(0):
+ *reg = TPS6594_REG_GPIOX_CONF(offset);
+ *mask = TPS6594_BIT_GPIO_DIR;
+ return 0;
+ case TPS6594_REG_GPIO_IN_1:
+ case TPS6594_REG_GPIO_OUT_1:
+ *reg = base + stride;
+ *mask = BIT(line);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tps6594_pmx_func_cnt(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(pinctrl_functions);
+}
+
+static const char *tps6594_pmx_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl->funcs[selector].pinfunction.name;
+}
+
+static int tps6594_pmx_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char *const **groups,
+ unsigned int *num_groups)
+{
+ struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pinctrl->funcs[selector].pinfunction.groups;
+ *num_groups = pinctrl->funcs[selector].pinfunction.ngroups;
+
+ return 0;
+}
+
+static int tps6594_pmx_set(struct tps6594_pinctrl *pinctrl, unsigned int pin,
+ u8 muxval)
+{
+ u8 mux_sel_val = muxval << TPS6594_OFFSET_GPIO_SEL;
+
+ return regmap_update_bits(pinctrl->tps->regmap,
+ TPS6594_REG_GPIOX_CONF(pin),
+ TPS6594_MASK_GPIO_SEL, mux_sel_val);
+}
+
+static int tps6594_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+ u8 muxval = pinctrl->funcs[function].muxval;
+
+ /* Some pins don't have the same muxval for the same function... */
+ if (group == 8) {
+ if (muxval == TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION)
+ muxval = TPS6594_PINCTRL_DISABLE_WDOG_FUNCTION_GPIO8;
+ else if (muxval == TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION)
+ muxval = TPS6594_PINCTRL_SYNCCLKOUT_FUNCTION_GPIO8;
+ } else if (group == 9) {
+ if (muxval == TPS6594_PINCTRL_CLK32KOUT_FUNCTION)
+ muxval = TPS6594_PINCTRL_CLK32KOUT_FUNCTION_GPIO9;
+ }
+
+ return tps6594_pmx_set(pinctrl, group, muxval);
+}
+
+static int tps6594_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+ u8 muxval = pinctrl->funcs[TPS6594_PINCTRL_GPIO_FUNCTION].muxval;
+
+ return tps6594_pmx_set(pinctrl, offset, muxval);
+}
+
+static const struct pinmux_ops tps6594_pmx_ops = {
+ .get_functions_count = tps6594_pmx_func_cnt,
+ .get_function_name = tps6594_pmx_func_name,
+ .get_function_groups = tps6594_pmx_func_groups,
+ .set_mux = tps6594_pmx_set_mux,
+ .gpio_set_direction = tps6594_pmx_gpio_set_direction,
+ .strict = true,
+};
+
+static int tps6594_groups_cnt(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(tps6594_pins);
+}
+
+static int tps6594_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector, const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pinctrl->pins[selector].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const char *tps6594_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct tps6594_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pinctrl->pins[selector].name;
+}
+
+static const struct pinctrl_ops tps6594_pctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .get_groups_count = tps6594_groups_cnt,
+ .get_group_name = tps6594_group_name,
+ .get_group_pins = tps6594_group_pins,
+};
+
+static int tps6594_pinctrl_probe(struct platform_device *pdev)
+{
+ struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct tps6594_pinctrl *pinctrl;
+ struct pinctrl_desc *pctrl_desc;
+ struct gpio_regmap_config config = {};
+
+ pctrl_desc = devm_kzalloc(dev, sizeof(*pctrl_desc), GFP_KERNEL);
+ if (!pctrl_desc)
+ return -ENOMEM;
+ pctrl_desc->name = dev_name(dev);
+ pctrl_desc->owner = THIS_MODULE;
+ pctrl_desc->pins = tps6594_pins;
+ pctrl_desc->npins = ARRAY_SIZE(tps6594_pins);
+ pctrl_desc->pctlops = &tps6594_pctrl_ops;
+ pctrl_desc->pmxops = &tps6594_pmx_ops;
+
+ pinctrl = devm_kzalloc(dev, sizeof(*pinctrl), GFP_KERNEL);
+ if (!pinctrl)
+ return -ENOMEM;
+ pinctrl->tps = dev_get_drvdata(dev->parent);
+ pinctrl->funcs = pinctrl_functions;
+ pinctrl->pins = tps6594_pins;
+ pinctrl->pctl_dev = devm_pinctrl_register(dev, pctrl_desc, pinctrl);
+ if (IS_ERR(pinctrl->pctl_dev))
+ return dev_err_probe(dev, PTR_ERR(pinctrl->pctl_dev),
+ "Couldn't register pinctrl driver\n");
+
+ config.parent = tps->dev;
+ config.regmap = tps->regmap;
+ config.ngpio = TPS6594_PINCTRL_PINS_NB;
+ config.ngpio_per_reg = 8;
+ config.reg_dat_base = TPS6594_REG_GPIO_IN_1;
+ config.reg_set_base = TPS6594_REG_GPIO_OUT_1;
+ config.reg_dir_out_base = TPS6594_REG_GPIOX_CONF(0);
+ config.reg_mask_xlate = tps6594_gpio_regmap_xlate;
+
+ pinctrl->gpio_regmap = devm_gpio_regmap_register(dev, &config);
+ if (IS_ERR(pinctrl->gpio_regmap))
+ return dev_err_probe(dev, PTR_ERR(pinctrl->gpio_regmap),
+ "Couldn't register gpio_regmap driver\n");
+
+ return 0;
+}
+
+static const struct platform_device_id tps6594_pinctrl_id_table[] = {
+ { "tps6594-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, tps6594_pinctrl_id_table);
+
+static struct platform_driver tps6594_pinctrl_driver = {
+ .probe = tps6594_pinctrl_probe,
+ .driver = {
+ .name = "tps6594-pinctrl",
+ },
+ .id_table = tps6594_pinctrl_id_table,
+};
+module_platform_driver(tps6594_pinctrl_driver);
+
+MODULE_AUTHOR("Esteban Blanc <eblanc@baylibre.com>");
+MODULE_DESCRIPTION("TPS6594 pinctrl and GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c
index 40862f7bd..d81d7b461 100644
--- a/drivers/pinctrl/pinctrl-utils.c
+++ b/drivers/pinctrl/pinctrl-utils.c
@@ -18,11 +18,11 @@
#include "pinctrl-utils.h"
int pinctrl_utils_reserve_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, unsigned reserve)
+ struct pinctrl_map **map, unsigned int *reserved_maps,
+ unsigned int *num_maps, unsigned int reserve)
{
- unsigned old_num = *reserved_maps;
- unsigned new_num = *num_maps + reserve;
+ unsigned int old_num = *reserved_maps;
+ unsigned int new_num = *num_maps + reserve;
struct pinctrl_map *new_map;
if (old_num >= new_num)
@@ -43,8 +43,8 @@ int pinctrl_utils_reserve_map(struct pinctrl_dev *pctldev,
EXPORT_SYMBOL_GPL(pinctrl_utils_reserve_map);
int pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev,
- struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
+ struct pinctrl_map **map, unsigned int *reserved_maps,
+ unsigned int *num_maps, const char *group,
const char *function)
{
if (WARN_ON(*num_maps == *reserved_maps))
@@ -60,9 +60,9 @@ int pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev,
EXPORT_SYMBOL_GPL(pinctrl_utils_add_map_mux);
int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
- struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
- unsigned long *configs, unsigned num_configs,
+ struct pinctrl_map **map, unsigned int *reserved_maps,
+ unsigned int *num_maps, const char *group,
+ unsigned long *configs, unsigned int num_configs,
enum pinctrl_map_type type)
{
unsigned long *dup_configs;
@@ -86,11 +86,11 @@ int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
EXPORT_SYMBOL_GPL(pinctrl_utils_add_map_configs);
int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
- unsigned long **configs, unsigned *num_configs,
+ unsigned long **configs, unsigned int *num_configs,
unsigned long config)
{
- unsigned old_num = *num_configs;
- unsigned new_num = old_num + 1;
+ unsigned int old_num = *num_configs;
+ unsigned int new_num = old_num + 1;
unsigned long *new_configs;
new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
@@ -110,7 +110,7 @@ int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
EXPORT_SYMBOL_GPL(pinctrl_utils_add_config);
void pinctrl_utils_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
+ struct pinctrl_map *map, unsigned int num_maps)
{
int i;
diff --git a/drivers/pinctrl/pinctrl-utils.h b/drivers/pinctrl/pinctrl-utils.h
index 4108ee2dd..203fba257 100644
--- a/drivers/pinctrl/pinctrl-utils.h
+++ b/drivers/pinctrl/pinctrl-utils.h
@@ -15,21 +15,21 @@ struct pinctrl_dev;
struct pinctrl_map;
int pinctrl_utils_reserve_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, unsigned reserve);
+ struct pinctrl_map **map, unsigned int *reserved_maps,
+ unsigned int *num_maps, unsigned int reserve);
int pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev,
- struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
+ struct pinctrl_map **map, unsigned int *reserved_maps,
+ unsigned int *num_maps, const char *group,
const char *function);
int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
- struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
- unsigned long *configs, unsigned num_configs,
+ struct pinctrl_map **map, unsigned int *reserved_maps,
+ unsigned int *num_maps, const char *group,
+ unsigned long *configs, unsigned int num_configs,
enum pinctrl_map_type type);
int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
- unsigned long **configs, unsigned *num_configs,
+ unsigned long **configs, unsigned int *num_configs,
unsigned long config);
void pinctrl_utils_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
#endif /* __PINCTRL_UTILS_H__ */
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 23d2da0b9..abbb044d6 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -35,8 +35,8 @@
int pinmux_check_ops(struct pinctrl_dev *pctldev)
{
const struct pinmux_ops *ops = pctldev->desc->pmxops;
- unsigned nfuncs;
- unsigned selector = 0;
+ unsigned int nfuncs;
+ unsigned int selector = 0;
/* Check that we implement required operations */
if (!ops ||
@@ -84,7 +84,7 @@ int pinmux_validate_map(const struct pinctrl_map *map, int i)
* Controllers not defined as strict will always return true,
* menaning that the gpio can be used.
*/
-bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin)
+bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin)
{
struct pin_desc *desc = pin_desc_get(pctldev, pin);
const struct pinmux_ops *ops = pctldev->desc->pmxops;
@@ -262,7 +262,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
*/
int pinmux_request_gpio(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned pin, unsigned gpio)
+ unsigned int pin, unsigned int gpio)
{
const char *owner;
int ret;
@@ -285,7 +285,7 @@ int pinmux_request_gpio(struct pinctrl_dev *pctldev,
* @pin: the affected currently GPIO-muxed in pin
* @range: applicable GPIO range
*/
-void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned pin,
+void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned int pin,
struct pinctrl_gpio_range *range)
{
const char *owner;
@@ -303,7 +303,7 @@ void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned pin,
*/
int pinmux_gpio_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned pin, bool input)
+ unsigned int pin, bool input)
{
const struct pinmux_ops *ops;
int ret;
@@ -322,8 +322,8 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
const char *function)
{
const struct pinmux_ops *ops = pctldev->desc->pmxops;
- unsigned nfuncs = ops->get_functions_count(pctldev);
- unsigned selector = 0;
+ unsigned int nfuncs = ops->get_functions_count(pctldev);
+ unsigned int selector = 0;
/* See if this pctldev has this function */
while (selector < nfuncs) {
@@ -344,7 +344,7 @@ int pinmux_map_to_setting(const struct pinctrl_map *map,
struct pinctrl_dev *pctldev = setting->pctldev;
const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
char const * const *groups;
- unsigned num_groups;
+ unsigned int num_groups;
int ret;
const char *group;
@@ -409,8 +409,8 @@ int pinmux_enable_setting(const struct pinctrl_setting *setting)
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
int ret = 0;
- const unsigned *pins = NULL;
- unsigned num_pins = 0;
+ const unsigned int *pins = NULL;
+ unsigned int num_pins = 0;
int i;
struct pin_desc *desc;
@@ -489,8 +489,8 @@ void pinmux_disable_setting(const struct pinctrl_setting *setting)
struct pinctrl_dev *pctldev = setting->pctldev;
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
int ret = 0;
- const unsigned *pins = NULL;
- unsigned num_pins = 0;
+ const unsigned int *pins = NULL;
+ unsigned int num_pins = 0;
int i;
struct pin_desc *desc;
@@ -541,8 +541,8 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
- unsigned nfuncs;
- unsigned func_selector = 0;
+ unsigned int nfuncs;
+ unsigned int func_selector = 0;
if (!pmxops)
return 0;
@@ -553,7 +553,7 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
const char *func = pmxops->get_function_name(pctldev,
func_selector);
const char * const *groups;
- unsigned num_groups;
+ unsigned int num_groups;
int ret;
int i;
@@ -584,7 +584,7 @@ static int pinmux_pins_show(struct seq_file *s, void *what)
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
- unsigned i, pin;
+ unsigned int i, pin;
if (!pmxops)
return 0;
@@ -818,7 +818,7 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function_name);
int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
const char * const **groups,
- unsigned * const num_groups)
+ unsigned int * const num_groups)
{
struct function_desc *function;
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index ea6f99c24..7c8aa25cc 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -26,16 +26,16 @@ int pinmux_check_ops(struct pinctrl_dev *pctldev);
int pinmux_validate_map(const struct pinctrl_map *map, int i);
-bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin);
+bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin);
int pinmux_request_gpio(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned pin, unsigned gpio);
-void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned int pin, unsigned int gpio);
+void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned int pin,
struct pinctrl_gpio_range *range);
int pinmux_gpio_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned pin, bool input);
+ unsigned int pin, bool input);
int pinmux_map_to_setting(const struct pinctrl_map *map,
struct pinctrl_setting *setting);
@@ -56,27 +56,27 @@ static inline int pinmux_validate_map(const struct pinctrl_map *map, int i)
}
static inline bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev,
- unsigned pin)
+ unsigned int pin)
{
return true;
}
static inline int pinmux_request_gpio(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned pin, unsigned gpio)
+ unsigned int pin, unsigned int gpio)
{
return 0;
}
static inline void pinmux_free_gpio(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
struct pinctrl_gpio_range *range)
{
}
static inline int pinmux_gpio_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned pin, bool input)
+ unsigned int pin, bool input)
{
return 0;
}
@@ -154,7 +154,7 @@ pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
const char * const **groups,
- unsigned * const num_groups);
+ unsigned int * const num_groups);
struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
unsigned int selector);
@@ -162,7 +162,7 @@ struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
const char * const *groups,
- unsigned const num_groups,
+ unsigned int const num_groups,
void *data);
int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index f84c0d3b7..24619e80b 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -124,4 +124,14 @@ config PINCTRL_SM8550_LPASS_LPI
(Low Power Island) found on the Qualcomm Technologies Inc SM8550
platform.
+config PINCTRL_SM8650_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SM8650 LPASS LPI pin controller driver"
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SM8650
+ platform.
+
endif
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 01dd7b134..8fe459d08 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -286,6 +286,14 @@ config PINCTRL_SDX75
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SDX75 platform.
+config PINCTRL_SM4450
+ tristate "Qualcomm Technologies Inc SM4450 pin controller driver"
+ depends on ARM64 || COMPILE_TEST
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM4450 platform.
+
config PINCTRL_SM6115
tristate "Qualcomm Technologies Inc SM6115,SM4250 pin controller driver"
depends on ARM64 || COMPILE_TEST
@@ -366,4 +374,22 @@ config PINCTRL_SM8550
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8550 platform.
+config PINCTRL_SM8650
+ tristate "Qualcomm Technologies Inc SM8650 pin controller driver"
+ depends on ARM64 || COMPILE_TEST
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8650 platform.
+
+config PINCTRL_X1E80100
+ tristate "Qualcomm Technologies Inc X1E80100 pin controller driver"
+ depends on ARM64 || COMPILE_TEST
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc Top Level Mode Multiplexer block (TLMM)
+ block found on the Qualcomm Technologies Inc X1E80100 platform.
+ Say Y here to compile statically, or M here to compile it as a module.
+ If unsure, say N.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 5910e08c8..e2e76071d 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SDX75) += pinctrl-sdx75.o
+obj-$(CONFIG_PINCTRL_SM4450) += pinctrl-sm4450.o
obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6115_LPASS_LPI) += pinctrl-sm6115-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
@@ -58,5 +59,8 @@ obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
obj-$(CONFIG_PINCTRL_SM8450_LPASS_LPI) += pinctrl-sm8450-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM8550) += pinctrl-sm8550.o
obj-$(CONFIG_PINCTRL_SM8550_LPASS_LPI) += pinctrl-sm8550-lpass-lpi.o
+obj-$(CONFIG_PINCTRL_SM8650) += pinctrl-sm8650.o
+obj-$(CONFIG_PINCTRL_SM8650_LPASS_LPI) += pinctrl-sm8650-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SC8280XP_LPASS_LPI) += pinctrl-sc8280xp-lpass-lpi.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
+obj-$(CONFIG_PINCTRL_X1E80100) += pinctrl-x1e80100.o
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 9651aed04..0d98008e3 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -186,6 +186,41 @@ static int lpi_config_get(struct pinctrl_dev *pctldev,
return 0;
}
+static int lpi_config_set_slew_rate(struct lpi_pinctrl *pctrl,
+ const struct lpi_pingroup *g,
+ unsigned int group, unsigned int slew)
+{
+ unsigned long sval;
+ void __iomem *reg;
+ int slew_offset;
+
+ if (slew > LPI_SLEW_RATE_MAX) {
+ dev_err(pctrl->dev, "invalid slew rate %u for pin: %d\n",
+ slew, group);
+ return -EINVAL;
+ }
+
+ slew_offset = g->slew_offset;
+ if (slew_offset == LPI_NO_SLEW)
+ return 0;
+
+ if (pctrl->data->flags & LPI_FLAG_SLEW_RATE_SAME_REG)
+ reg = pctrl->tlmm_base + LPI_TLMM_REG_OFFSET * group + LPI_GPIO_CFG_REG;
+ else
+ reg = pctrl->slew_base + LPI_SLEW_RATE_CTL_REG;
+
+ mutex_lock(&pctrl->lock);
+
+ sval = ioread32(reg);
+ sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
+ sval |= slew << slew_offset;
+ iowrite32(sval, reg);
+
+ mutex_unlock(&pctrl->lock);
+
+ return 0;
+}
+
static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
unsigned long *configs, unsigned int nconfs)
{
@@ -193,8 +228,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
bool value, output_enabled = false;
const struct lpi_pingroup *g;
- unsigned long sval;
- int i, slew_offset;
+ int i, ret;
u32 val;
g = &pctrl->data->groups[group];
@@ -226,24 +260,9 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
strength = arg;
break;
case PIN_CONFIG_SLEW_RATE:
- if (arg > LPI_SLEW_RATE_MAX) {
- dev_err(pctldev->dev, "invalid slew rate %u for pin: %d\n",
- arg, group);
- return -EINVAL;
- }
-
- slew_offset = g->slew_offset;
- if (slew_offset == LPI_NO_SLEW)
- break;
-
- mutex_lock(&pctrl->lock);
-
- sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
- sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
- sval |= arg << slew_offset;
- iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
-
- mutex_unlock(&pctrl->lock);
+ ret = lpi_config_set_slew_rate(pctrl, g, group, arg);
+ if (ret)
+ return ret;
break;
default:
return -EINVAL;
@@ -319,7 +338,6 @@ static void lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
}
#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
static unsigned int lpi_regval_to_drive(u32 val)
{
@@ -439,10 +457,12 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(pctrl->tlmm_base),
"TLMM resource not provided\n");
- pctrl->slew_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(pctrl->slew_base))
- return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
- "Slew resource not provided\n");
+ if (!(data->flags & LPI_FLAG_SLEW_RATE_SAME_REG)) {
+ pctrl->slew_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pctrl->slew_base))
+ return dev_err_probe(dev, PTR_ERR(pctrl->slew_base),
+ "Slew resource not provided\n");
+ }
ret = devm_clk_bulk_get_optional(dev, MAX_LPI_NUM_CLKS, pctrl->clks);
if (ret)
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
index 387d83ee9..a9b2f65c1 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -6,8 +6,8 @@
#ifndef __PINCTRL_LPASS_LPI_H__
#define __PINCTRL_LPASS_LPI_H__
+#include <linux/array_size.h>
#include <linux/bits.h>
-#include <linux/kernel.h>
#include "../core.h"
@@ -45,11 +45,8 @@ struct pinctrl_pin_desc;
#define LPI_PINGROUP(id, soff, f1, f2, f3, f4) \
{ \
- .group.name = "gpio" #id, \
- .group.pins = gpio##id##_pins, \
.pin = id, \
.slew_offset = soff, \
- .group.num_pins = ARRAY_SIZE(gpio##id##_pins), \
.funcs = (int[]){ \
LPI_MUX_gpio, \
LPI_MUX_##f1, \
@@ -60,8 +57,13 @@ struct pinctrl_pin_desc;
.nfuncs = 5, \
}
+/*
+ * Slew rate control is done in the same register as rest of the
+ * pin configuration.
+ */
+#define LPI_FLAG_SLEW_RATE_SAME_REG BIT(0)
+
struct lpi_pingroup {
- struct group_desc group;
unsigned int pin;
/* Bit offset in slew register for SoundWire pins only */
int slew_offset;
@@ -82,6 +84,7 @@ struct lpi_pinctrl_variant_data {
int ngroups;
const struct lpi_function *functions;
int nfunctions;
+ unsigned int flags;
};
int lpi_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 395040346..aeaf0d195 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -358,6 +358,10 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
int ret;
u32 val;
+ /* Pin information can only be requested from valid pin groups */
+ if (!gpiochip_line_is_valid(&pctrl->chip, group))
+ return -EINVAL;
+
g = &pctrl->soc->groups[group];
ret = msm_config_reg(pctrl, g, param, &mask, &bit);
@@ -1196,6 +1200,8 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
+ unsigned long flags;
int ret;
if (!try_module_get(gc->owner))
@@ -1221,6 +1227,28 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
*/
irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
+ /*
+ * If the wakeup_enable bit is present and marked as available for the
+ * requested GPIO, it should be enabled when the GPIO is marked as
+ * wake irq in order to allow the interrupt event to be transfered to
+ * the PDC HW.
+ * While the name implies only the wakeup event, it's also required for
+ * the interrupt event.
+ */
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs) && g->intr_wakeup_present_bit) {
+ u32 intr_cfg;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ intr_cfg = msm_readl_intr_cfg(pctrl, g);
+ if (intr_cfg & BIT(g->intr_wakeup_present_bit)) {
+ intr_cfg |= BIT(g->intr_wakeup_enable_bit);
+ msm_writel_intr_cfg(intr_cfg, pctrl, g);
+ }
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
+
return 0;
out:
module_put(gc->owner);
@@ -1230,6 +1258,24 @@ out:
static void msm_gpio_irq_relres(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
+ unsigned long flags;
+
+ /* Disable the wakeup_enable bit if it has been set in msm_gpio_irq_reqres() */
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs) && g->intr_wakeup_present_bit) {
+ u32 intr_cfg;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
+ intr_cfg = msm_readl_intr_cfg(pctrl, g);
+ if (intr_cfg & BIT(g->intr_wakeup_present_bit)) {
+ intr_cfg &= ~BIT(g->intr_wakeup_enable_bit);
+ msm_writel_intr_cfg(intr_cfg, pctrl, g);
+ }
+
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
gpiochip_unlock_as_irq(gc, d->hwirq);
module_put(gc->owner);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 4968d08a3..63852ed70 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -58,6 +58,9 @@ struct pinctrl_pin_desc;
* @intr_enable_bit: Offset in @intr_cfg_reg for enabling the interrupt for this group.
* @intr_status_bit: Offset in @intr_status_reg for reading and acking the interrupt
* status.
+ * @intr_wakeup_present_bit: Offset in @intr_target_reg specifying the GPIO can generate
+ * wakeup events.
+ * @intr_wakeup_enable_bit: Offset in @intr_target_reg to enable wakeup events for the GPIO.
* @intr_target_bit: Offset in @intr_target_reg for configuring the interrupt routing.
* @intr_target_width: Number of bits used for specifying interrupt routing target.
* @intr_target_kpss_val: Value in @intr_target_bit for specifying that the interrupt from
@@ -100,6 +103,8 @@ struct msm_pingroup {
unsigned intr_status_bit:5;
unsigned intr_ack_high:1;
+ unsigned intr_wakeup_present_bit:5;
+ unsigned intr_wakeup_enable_bit:5;
unsigned intr_target_bit:5;
unsigned intr_target_width:5;
unsigned intr_target_kpss_val:5;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
index 99156217c..6bb39812e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c
@@ -36,22 +36,6 @@ enum lpass_lpi_functions {
LPI_MUX__,
};
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-
static const struct pinctrl_pin_desc sc7280_lpi_pins[] = {
PINCTRL_PIN(0, "gpio0"),
PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
index b33483056..c0369baf3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c
@@ -45,26 +45,6 @@ enum lpass_lpi_functions {
LPI_MUX__,
};
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-static int gpio15_pins[] = { 15 };
-static int gpio16_pins[] = { 16 };
-static int gpio17_pins[] = { 17 };
-static int gpio18_pins[] = { 18 };
-
static const struct pinctrl_pin_desc sc8280xp_lpi_pins[] = {
PINCTRL_PIN(0, "gpio0"),
PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm4450.c b/drivers/pinctrl/qcom/pinctrl-sm4450.c
new file mode 100644
index 000000000..27317b86d
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm4450.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define QUP_I3C(qup_mode, qup_offset) \
+ { \
+ .mode = qup_mode, \
+ .offset = qup_offset, \
+ }
+
+
+static const struct pinctrl_pin_desc sm4450_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "UFS_RESET"),
+ PINCTRL_PIN(137, "SDC1_RCLK"),
+ PINCTRL_PIN(138, "SDC1_CLK"),
+ PINCTRL_PIN(139, "SDC1_CMD"),
+ PINCTRL_PIN(140, "SDC1_DATA"),
+ PINCTRL_PIN(141, "SDC2_CLK"),
+ PINCTRL_PIN(142, "SDC2_CMD"),
+ PINCTRL_PIN(143, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+
+static const unsigned int ufs_reset_pins[] = { 136 };
+static const unsigned int sdc1_rclk_pins[] = { 137 };
+static const unsigned int sdc1_clk_pins[] = { 138 };
+static const unsigned int sdc1_cmd_pins[] = { 139 };
+static const unsigned int sdc1_data_pins[] = { 140 };
+static const unsigned int sdc2_clk_pins[] = { 141 };
+static const unsigned int sdc2_cmd_pins[] = { 142 };
+static const unsigned int sdc2_data_pins[] = { 143 };
+
+enum sm4450_functions {
+ msm_mux_gpio,
+ msm_mux_atest_char,
+ msm_mux_atest_usb0,
+ msm_mux_audio_ref_clk,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async_in0,
+ msm_mux_cci_i2c,
+ msm_mux_cci,
+ msm_mux_cmu_rng,
+ msm_mux_coex_uart1_rx,
+ msm_mux_coex_uart1_tx,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out_clk,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0_test,
+ msm_mux_ddr_pxi1_test,
+ msm_mux_gcc_gp1_clk,
+ msm_mux_gcc_gp2_clk,
+ msm_mux_gcc_gp3_clk,
+ msm_mux_host2wlan_sol,
+ msm_mux_ibi_i3c_qup0,
+ msm_mux_ibi_i3c_qup1,
+ msm_mux_jitter_bist_ref,
+ msm_mux_mdp_vsync0_out,
+ msm_mux_mdp_vsync1_out,
+ msm_mux_mdp_vsync2_out,
+ msm_mux_mdp_vsync3_out,
+ msm_mux_mdp_vsync,
+ msm_mux_nav,
+ msm_mux_pcie0_clk_req,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist_sync,
+ msm_mux_pll_clk_aux,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti_trig0,
+ msm_mux_qdss_cti_trig1,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss_reset,
+ msm_mux_qup0_se0,
+ msm_mux_qup0_se1,
+ msm_mux_qup0_se2,
+ msm_mux_qup0_se3,
+ msm_mux_qup0_se4,
+ msm_mux_qup1_se0,
+ msm_mux_qup1_se1,
+ msm_mux_qup1_se2,
+ msm_mux_qup1_se3,
+ msm_mux_qup1_se4,
+ msm_mux_sd_write_protect,
+ msm_mux_tb_trig_sdc1,
+ msm_mux_tb_trig_sdc2,
+ msm_mux_tgu_ch0_trigout,
+ msm_mux_tgu_ch1_trigout,
+ msm_mux_tgu_ch2_trigout,
+ msm_mux_tgu_ch3_trigout,
+ msm_mux_tmess_prng,
+ msm_mux_tsense_pwm1_out,
+ msm_mux_tsense_pwm2_out,
+ msm_mux_uim0,
+ msm_mux_uim1,
+ msm_mux_usb0_hs_ac,
+ msm_mux_usb0_phy_ps,
+ msm_mux_vfr_0_mira,
+ msm_mux_vfr_0_mirb,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger_mirnat,
+ msm_mux_wlan1_adc_dtest0,
+ msm_mux_wlan1_adc_dtest1,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135",
+};
+static const char * const atest_char_groups[] = {
+ "gpio95", "gpio97", "gpio98", "gpio99", "gpio100",
+};
+static const char * const atest_usb0_groups[] = {
+ "gpio75", "gpio10", "gpio78", "gpio79", "gpio80",
+};
+static const char * const audio_ref_clk_groups[] = {
+ "gpio71",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const cci_async_in0_groups[] = {
+ "gpio40",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio45", "gpio47", "gpio49", "gpio44",
+ "gpio46", "gpio48",
+};
+static const char * const cci_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+static const char * const cmu_rng_groups[] = {
+ "gpio28", "gpio3", "gpio1", "gpio0",
+};
+static const char * const coex_uart1_rx_groups[] = {
+ "gpio54",
+};
+static const char * const coex_uart1_tx_groups[] = {
+ "gpio55",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio42", "gpio40", "gpio41",
+};
+static const char * const dbg_out_clk_groups[] = {
+ "gpio80",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio32", "gpio29", "gpio30", "gpio31",
+};
+static const char * const ddr_pxi0_test_groups[] = {
+ "gpio90", "gpio127",
+};
+static const char * const ddr_pxi1_test_groups[] = {
+ "gpio118", "gpio122",
+};
+static const char * const gcc_gp1_clk_groups[] = {
+ "gpio37", "gpio48",
+};
+static const char * const gcc_gp2_clk_groups[] = {
+ "gpio30", "gpio49",
+};
+static const char * const gcc_gp3_clk_groups[] = {
+ "gpio3", "gpio50",
+};
+static const char * const host2wlan_sol_groups[] = {
+ "gpio106",
+};
+static const char * const ibi_i3c_qup0_groups[] = {
+ "gpio4", "gpio5",
+};
+static const char * const ibi_i3c_qup1_groups[] = {
+ "gpio0", "gpio1",
+};
+static const char * const jitter_bist_ref_groups[] = {
+ "gpio90",
+};
+static const char * const mdp_vsync0_out_groups[] = {
+ "gpio93",
+};
+static const char * const mdp_vsync1_out_groups[] = {
+ "gpio93",
+};
+static const char * const mdp_vsync2_out_groups[] = {
+ "gpio22",
+};
+static const char * const mdp_vsync3_out_groups[] = {
+ "gpio22",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio26", "gpio22", "gpio30", "gpio34", "gpio93", "gpio97",
+};
+static const char * const nav_groups[] = {
+ "gpio81", "gpio83", "gpio84",
+};
+static const char * const pcie0_clk_req_groups[] = {
+ "gpio107",
+};
+static const char * const phase_flag_groups[] = {
+ "gpio7", "gpio8", "gpio9", "gpio11", "gpio13", "gpio14", "gpio15",
+ "gpio17", "gpio18", "gpio19", "gpio21", "gpio24", "gpio25", "gpio31",
+ "gpio32", "gpio33", "gpio35", "gpio61", "gpio72", "gpio82", "gpio91",
+ "gpio95", "gpio97", "gpio98", "gpio99", "gpio100", "gpio105", "gpio115",
+ "gpio116", "gpio117", "gpio133", "gpio135",
+};
+static const char * const pll_bist_sync_groups[] = {
+ "gpio73",
+};
+static const char * const pll_clk_aux_groups[] = {
+ "gpio108",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const qdss_cti_trig0_groups[] = {
+ "gpio26", "gpio60", "gpio113", "gpio114",
+};
+static const char * const qdss_cti_trig1_groups[] = {
+ "gpio6", "gpio27", "gpio57", "gpio58",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio0", "gpio1", "gpio3", "gpio4", "gpio5", "gpio7", "gpio8",
+ "gpio9", "gpio14", "gpio15", "gpio17", "gpio23", "gpio31", "gpio32",
+ "gpio33", "gpio35", "gpio36", "gpio37", "gpio38", "gpio39", "gpio40",
+ "gpio41", "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47",
+ "gpio49", "gpio59", "gpio62", "gpio118", "gpio121", "gpio122", "gpio126",
+ "gpio127",
+};
+static const char * const qlink0_enable_groups[] = {
+ "gpio88",
+};
+static const char * const qlink0_request_groups[] = {
+ "gpio87",
+};
+static const char * const qlink0_wmss_reset_groups[] = {
+ "gpio89",
+};
+static const char * const qup0_se0_groups[] = {
+ "gpio4", "gpio5", "gpio34", "gpio35",
+};
+static const char * const qup0_se1_groups[] = {
+ "gpio10", "gpio11", "gpio12", "gpio13",
+};
+static const char * const qup0_se2_groups[] = {
+ "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const qup0_se3_groups[] = {
+ "gpio18", "gpio19", "gpio20", "gpio21",
+};
+static const char * const qup0_se4_groups[] = {
+ "gpio6", "gpio7", "gpio8", "gpio9",
+ "gpio26", "gpio27", "gpio34",
+};
+static const char * const qup1_se0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup1_se1_groups[] = {
+ "gpio26", "gpio27", "gpio50", "gpio51",
+};
+static const char * const qup1_se2_groups[] = {
+ "gpio22", "gpio23", "gpio31", "gpio32",
+};
+static const char * const qup1_se3_groups[] = {
+ "gpio24", "gpio25", "gpio51", "gpio50",
+};
+static const char * const qup1_se4_groups[] = {
+ "gpio43", "gpio48", "gpio49", "gpio90",
+ "gpio91",
+};
+static const char * const sd_write_protect_groups[] = {
+ "gpio102",
+};
+static const char * const tb_trig_sdc1_groups[] = {
+ "gpio128",
+};
+static const char * const tb_trig_sdc2_groups[] = {
+ "gpio51",
+};
+static const char * const tgu_ch0_trigout_groups[] = {
+ "gpio20",
+};
+static const char * const tgu_ch1_trigout_groups[] = {
+ "gpio21",
+};
+static const char * const tgu_ch2_trigout_groups[] = {
+ "gpio22",
+};
+static const char * const tgu_ch3_trigout_groups[] = {
+ "gpio23",
+};
+static const char * const tmess_prng_groups[] = {
+ "gpio57", "gpio58", "gpio59", "gpio60",
+};
+static const char * const tsense_pwm1_out_groups[] = {
+ "gpio134",
+};
+static const char * const tsense_pwm2_out_groups[] = {
+ "gpio134",
+};
+static const char * const uim0_groups[] = {
+ "gpio64", "gpio63", "gpio66", "gpio65",
+};
+static const char * const uim1_groups[] = {
+ "gpio68", "gpio67", "gpio69", "gpio70",
+};
+static const char * const usb0_hs_ac_groups[] = {
+ "gpio99",
+};
+static const char * const usb0_phy_ps_groups[] = {
+ "gpio94",
+};
+static const char * const vfr_0_mira_groups[] = {
+ "gpio19",
+};
+static const char * const vfr_0_mirb_groups[] = {
+ "gpio100",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio84",
+};
+static const char * const vsense_trigger_mirnat_groups[] = {
+ "gpio75",
+};
+static const char * const wlan1_adc_dtest0_groups[] = {
+ "gpio79",
+};
+static const char * const wlan1_adc_dtest1_groups[] = {
+ "gpio80",
+};
+
+static const struct pinfunction sm4450_functions[] = {
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_usb0),
+ MSM_PIN_FUNCTION(audio_ref_clk),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async_in0),
+ MSM_PIN_FUNCTION(cci_i2c),
+ MSM_PIN_FUNCTION(cci),
+ MSM_PIN_FUNCTION(cmu_rng),
+ MSM_PIN_FUNCTION(coex_uart1_rx),
+ MSM_PIN_FUNCTION(coex_uart1_tx),
+ MSM_PIN_FUNCTION(cri_trng),
+ MSM_PIN_FUNCTION(dbg_out_clk),
+ MSM_PIN_FUNCTION(ddr_bist),
+ MSM_PIN_FUNCTION(ddr_pxi0_test),
+ MSM_PIN_FUNCTION(ddr_pxi1_test),
+ MSM_PIN_FUNCTION(gcc_gp1_clk),
+ MSM_PIN_FUNCTION(gcc_gp2_clk),
+ MSM_PIN_FUNCTION(gcc_gp3_clk),
+ MSM_PIN_FUNCTION(host2wlan_sol),
+ MSM_PIN_FUNCTION(ibi_i3c_qup0),
+ MSM_PIN_FUNCTION(ibi_i3c_qup1),
+ MSM_PIN_FUNCTION(jitter_bist_ref),
+ MSM_PIN_FUNCTION(mdp_vsync0_out),
+ MSM_PIN_FUNCTION(mdp_vsync1_out),
+ MSM_PIN_FUNCTION(mdp_vsync2_out),
+ MSM_PIN_FUNCTION(mdp_vsync3_out),
+ MSM_PIN_FUNCTION(mdp_vsync),
+ MSM_PIN_FUNCTION(nav),
+ MSM_PIN_FUNCTION(pcie0_clk_req),
+ MSM_PIN_FUNCTION(phase_flag),
+ MSM_PIN_FUNCTION(pll_bist_sync),
+ MSM_PIN_FUNCTION(pll_clk_aux),
+ MSM_PIN_FUNCTION(prng_rosc),
+ MSM_PIN_FUNCTION(qdss_cti_trig0),
+ MSM_PIN_FUNCTION(qdss_cti_trig1),
+ MSM_PIN_FUNCTION(qdss_gpio),
+ MSM_PIN_FUNCTION(qlink0_enable),
+ MSM_PIN_FUNCTION(qlink0_request),
+ MSM_PIN_FUNCTION(qlink0_wmss_reset),
+ MSM_PIN_FUNCTION(qup0_se0),
+ MSM_PIN_FUNCTION(qup0_se1),
+ MSM_PIN_FUNCTION(qup0_se2),
+ MSM_PIN_FUNCTION(qup0_se3),
+ MSM_PIN_FUNCTION(qup0_se4),
+ MSM_PIN_FUNCTION(qup1_se0),
+ MSM_PIN_FUNCTION(qup1_se1),
+ MSM_PIN_FUNCTION(qup1_se2),
+ MSM_PIN_FUNCTION(qup1_se3),
+ MSM_PIN_FUNCTION(qup1_se4),
+ MSM_PIN_FUNCTION(sd_write_protect),
+ MSM_PIN_FUNCTION(tb_trig_sdc1),
+ MSM_PIN_FUNCTION(tb_trig_sdc2),
+ MSM_PIN_FUNCTION(tgu_ch0_trigout),
+ MSM_PIN_FUNCTION(tgu_ch1_trigout),
+ MSM_PIN_FUNCTION(tgu_ch2_trigout),
+ MSM_PIN_FUNCTION(tgu_ch3_trigout),
+ MSM_PIN_FUNCTION(tmess_prng),
+ MSM_PIN_FUNCTION(tsense_pwm1_out),
+ MSM_PIN_FUNCTION(tsense_pwm2_out),
+ MSM_PIN_FUNCTION(uim0),
+ MSM_PIN_FUNCTION(uim1),
+ MSM_PIN_FUNCTION(usb0_hs_ac),
+ MSM_PIN_FUNCTION(usb0_phy_ps),
+ MSM_PIN_FUNCTION(vfr_0_mira),
+ MSM_PIN_FUNCTION(vfr_0_mirb),
+ MSM_PIN_FUNCTION(vfr_1),
+ MSM_PIN_FUNCTION(vsense_trigger_mirnat),
+ MSM_PIN_FUNCTION(wlan1_adc_dtest0),
+ MSM_PIN_FUNCTION(wlan1_adc_dtest1),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm4450_groups[] = {
+ [0] = PINGROUP(0, qup1_se0, ibi_i3c_qup1, cmu_rng, qdss_gpio, _, _, _, _, _),
+ [1] = PINGROUP(1, qup1_se0, ibi_i3c_qup1, cmu_rng, qdss_gpio, _, _, _, _, _),
+ [2] = PINGROUP(2, qup1_se0, _, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup1_se0, gcc_gp3_clk, cmu_rng, qdss_gpio, _, _, _, _, _),
+ [4] = PINGROUP(4, qup0_se0, ibi_i3c_qup0, qdss_gpio, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup0_se0, ibi_i3c_qup0, qdss_gpio, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup0_se4, qdss_cti_trig1, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup0_se4, _, phase_flag, qdss_gpio, _, _, _, _, _),
+ [8] = PINGROUP(8, qup0_se4, _, phase_flag, qdss_gpio, _, _, _, _, _),
+ [9] = PINGROUP(9, qup0_se4, _, phase_flag, qdss_gpio, _, _, _, _, _),
+ [10] = PINGROUP(10, qup0_se1, _, atest_usb0, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup0_se1, _, phase_flag, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup0_se1, _, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, qup0_se1, _, phase_flag, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup0_se2, _, phase_flag, _, qdss_gpio, _, _, _, _),
+ [15] = PINGROUP(15, qup0_se2, _, phase_flag, _, qdss_gpio, _, _, _, _),
+ [16] = PINGROUP(16, qup0_se2, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup0_se2, _, phase_flag, _, qdss_gpio, _, _, _, _),
+ [18] = PINGROUP(18, qup0_se3, _, phase_flag, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup0_se3, vfr_0_mira, _, phase_flag, _, _, _, _, _),
+ [20] = PINGROUP(20, qup0_se3, tgu_ch0_trigout, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup0_se3, _, phase_flag, tgu_ch1_trigout, _, _, _, _, _),
+ [22] = PINGROUP(22, qup1_se2, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, tgu_ch2_trigout, _, _, _, _),
+ [23] = PINGROUP(23, qup1_se2, tgu_ch3_trigout, qdss_gpio, _, _, _, _, _, _),
+ [24] = PINGROUP(24, qup1_se3, _, phase_flag, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup1_se3, _, phase_flag, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup1_se1, mdp_vsync, qup0_se4, qdss_cti_trig0, _, _, _, _, _),
+ [27] = PINGROUP(27, qup1_se1, qup0_se4, qdss_cti_trig1, _, _, _, _, _, _),
+ [28] = PINGROUP(28, cmu_rng, _, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, ddr_bist, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, mdp_vsync, gcc_gp2_clk, ddr_bist, _, _, _, _, _, _),
+ [31] = PINGROUP(31, qup1_se2, _, phase_flag, ddr_bist, qdss_gpio, _, _, _, _),
+ [32] = PINGROUP(32, qup1_se2, _, phase_flag, ddr_bist, qdss_gpio, _, _, _, _),
+ [33] = PINGROUP(33, _, phase_flag, qdss_gpio, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup0_se0, qup0_se4, mdp_vsync, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup0_se0, _, phase_flag, qdss_gpio, _, _, _, _, _),
+ [36] = PINGROUP(36, cam_mclk, prng_rosc, qdss_gpio, _, _, _, _, _, _),
+ [37] = PINGROUP(37, cam_mclk, gcc_gp1_clk, prng_rosc, qdss_gpio, _, _, _, _, _),
+ [38] = PINGROUP(38, cam_mclk, prng_rosc, qdss_gpio, _, _, _, _, _, _),
+ [39] = PINGROUP(39, cam_mclk, prng_rosc, qdss_gpio, _, _, _, _, _, _),
+ [40] = PINGROUP(40, cci, cci_async_in0, cri_trng, qdss_gpio, _, _, _, _, _),
+ [41] = PINGROUP(41, cci, cri_trng, qdss_gpio, _, _, _, _, _, _),
+ [42] = PINGROUP(42, cci, cri_trng, qdss_gpio, _, _, _, _, _, _),
+ [43] = PINGROUP(43, cci, qup1_se4, qdss_gpio, _, _, _, _, _, _),
+ [44] = PINGROUP(44, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [48] = PINGROUP(48, cci_i2c, qup1_se4, gcc_gp1_clk, _, _, _, _, _, _),
+ [49] = PINGROUP(49, cci_i2c, qup1_se4, gcc_gp2_clk, qdss_gpio, _, _, _, _, _),
+ [50] = PINGROUP(50, qup1_se1, qup1_se3, _, gcc_gp3_clk, _, _, _, _, _),
+ [51] = PINGROUP(51, qup1_se1, qup1_se3, _, tb_trig_sdc2, _, _, _, _, _),
+ [52] = PINGROUP(52, _, _, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, _, _, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, coex_uart1_rx, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, coex_uart1_tx, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, _, _, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, tmess_prng, qdss_cti_trig1, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, tmess_prng, qdss_cti_trig1, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, tmess_prng, qdss_gpio, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, tmess_prng, qdss_cti_trig0, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, _, phase_flag, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, qdss_gpio, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, uim0, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, uim0, _, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, uim0, _, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, uim0, _, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, uim1, _, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, uim1, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, uim1, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, uim1, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, _, _, _, audio_ref_clk, _, _, _, _, _),
+ [72] = PINGROUP(72, _, _, _, phase_flag, _, _, _, _, _),
+ [73] = PINGROUP(73, _, _, _, pll_bist_sync, _, _, _, _, _),
+ [74] = PINGROUP(74, _, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, _, _, _, vsense_trigger_mirnat, atest_usb0, _, _, _, _),
+ [76] = PINGROUP(76, _, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, _, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, _, _, _, atest_usb0, _, _, _, _, _),
+ [79] = PINGROUP(79, _, _, _, wlan1_adc_dtest0, atest_usb0, _, _, _, _),
+ [80] = PINGROUP(80, _, _, dbg_out_clk, wlan1_adc_dtest1, atest_usb0, _, _, _, _),
+ [81] = PINGROUP(81, _, nav, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, _, _, phase_flag, _, _, _, _, _, _),
+ [83] = PINGROUP(83, nav, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, nav, vfr_1, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, _, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, _, _, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, qlink0_request, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, qlink0_enable, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, qlink0_wmss_reset, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, qup1_se4, jitter_bist_ref, ddr_pxi0_test, _, _, _, _, _, _),
+ [91] = PINGROUP(91, qup1_se4, _, phase_flag, _, _, _, _, _, _),
+ [92] = PINGROUP(92, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, _, _, _, _, _, _),
+ [94] = PINGROUP(94, usb0_phy_ps, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, _, phase_flag, atest_char, _, _, _, _, _, _),
+ [96] = PINGROUP(96, _, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, mdp_vsync, _, phase_flag, atest_char, _, _, _, _, _),
+ [98] = PINGROUP(98, _, phase_flag, atest_char, _, _, _, _, _, _),
+ [99] = PINGROUP(99, usb0_hs_ac, _, phase_flag, atest_char, _, _, _, _, _),
+ [100] = PINGROUP(100, vfr_0_mirb, _, phase_flag, atest_char, _, _, _, _, _),
+ [101] = PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, sd_write_protect, _, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, _, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, _, phase_flag, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, host2wlan_sol, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, pcie0_clk_req, _, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, pll_clk_aux, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, qdss_cti_trig0, _, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, qdss_cti_trig0, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, _, phase_flag, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, phase_flag, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, phase_flag, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, qdss_gpio, _, ddr_pxi1_test, _, _, _, _, _, _),
+ [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, qdss_gpio, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, qdss_gpio, _, ddr_pxi1_test, _, _, _, _, _, _),
+ [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, _, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, qdss_gpio, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, qdss_gpio, ddr_pxi0_test, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, tb_trig_sdc1, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, _, phase_flag, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, tsense_pwm1_out, tsense_pwm2_out, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, _, phase_flag, _, _, _, _, _, _, _),
+ [136] = UFS_RESET(ufs_reset, 0x97000),
+ [137] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x8c004, 0, 0),
+ [138] = SDC_QDSD_PINGROUP(sdc1_clk, 0x8c000, 13, 6),
+ [139] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x8c000, 11, 3),
+ [140] = SDC_QDSD_PINGROUP(sdc1_data, 0x8c000, 9, 0),
+ [141] = SDC_QDSD_PINGROUP(sdc2_clk, 0x8f000, 14, 6),
+ [142] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x8f000, 11, 3),
+ [143] = SDC_QDSD_PINGROUP(sdc2_data, 0x8f000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm4450_pdc_map[] = {
+ { 0, 67 }, { 3, 82 }, { 4, 69 }, { 5, 70 }, { 6, 44 }, { 7, 43 },
+ { 8, 71 }, { 9, 86 }, { 10, 48 }, { 11, 77 }, { 12, 90 },
+ { 13, 54 }, { 14, 91 }, { 17, 97 }, { 18, 102 }, { 21, 103 },
+ { 22, 104 }, { 23, 105 }, { 24, 53 }, { 25, 106 }, { 26, 65 },
+ { 27, 55 }, { 28, 89 }, { 30, 80 }, { 31, 109 }, { 33, 87 },
+ { 34, 81 }, { 35, 75 }, { 40, 88 }, { 41, 98 }, { 42, 110 },
+ { 43, 95 }, { 47, 118 }, { 50, 111 }, { 52, 52 }, { 53, 114 },
+ { 54, 115 }, { 55, 99 }, { 56, 45 }, { 57, 85 }, { 58, 56 },
+ { 59, 84 }, { 60, 83 }, { 61, 96 }, { 62, 93 }, { 66, 116 },
+ { 67, 113 }, { 70, 42 }, { 71, 122 }, { 73, 119 }, { 75, 121 },
+ { 77, 120 }, { 79, 123 }, { 81, 124 }, { 83, 64 }, { 84, 128 },
+ { 86, 129 }, { 87, 63 }, { 91, 92 }, { 92, 66 }, { 93, 125 },
+ { 94, 76 }, { 95, 62 }, { 96, 132 }, { 97, 135 }, { 98, 73 },
+ { 99, 133 }, { 101, 46 }, { 102, 134 }, { 103, 49 }, { 105, 58 },
+ { 107, 94 }, { 110, 59 }, { 113, 57 }, { 114, 60 }, { 118, 107 },
+ { 120, 61 }, { 121, 108 }, { 123, 68 }, { 125, 72 }, { 128, 112 },
+};
+
+static const struct msm_pinctrl_soc_data sm4450_tlmm = {
+ .pins = sm4450_pins,
+ .npins = ARRAY_SIZE(sm4450_pins),
+ .functions = sm4450_functions,
+ .nfunctions = ARRAY_SIZE(sm4450_functions),
+ .groups = sm4450_groups,
+ .ngroups = ARRAY_SIZE(sm4450_groups),
+ .ngpios = 137,
+ .wakeirq_map = sm4450_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm4450_pdc_map),
+};
+
+static int sm4450_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm4450_tlmm);
+}
+
+static const struct of_device_id sm4450_tlmm_of_match[] = {
+ { .compatible = "qcom,sm4450-tlmm", },
+ { }
+};
+
+static struct platform_driver sm4450_tlmm_driver = {
+ .driver = {
+ .name = "sm4450-tlmm",
+ .of_match_table = sm4450_tlmm_of_match,
+ },
+ .probe = sm4450_tlmm_probe,
+ .remove_new = msm_pinctrl_remove,
+};
+MODULE_DEVICE_TABLE(of, sm4450_tlmm_of_match);
+
+static int __init sm4450_tlmm_init(void)
+{
+ return platform_driver_register(&sm4450_tlmm_driver);
+}
+arch_initcall(sm4450_tlmm_init);
+
+static void __exit sm4450_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm4450_tlmm_driver);
+}
+module_exit(sm4450_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM4450 TLMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c
index e8a6f6f6a..316d6fc69 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c
@@ -36,26 +36,6 @@ enum lpass_lpi_functions {
LPI_MUX__,
};
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-static int gpio15_pins[] = { 15 };
-static int gpio16_pins[] = { 16 };
-static int gpio17_pins[] = { 17 };
-static int gpio18_pins[] = { 18 };
-
static const struct pinctrl_pin_desc sm6115_lpi_pins[] = {
PINCTRL_PIN(0, "gpio0"),
PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
index cb10ce8d5..9791d9ba5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c
@@ -36,21 +36,6 @@ enum lpass_lpi_functions {
LPI_MUX__,
};
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-
static const struct pinctrl_pin_desc sm8250_lpi_pins[] = {
PINCTRL_PIN(0, "gpio0"),
PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c
index 297cc95ac..5b9a2cb21 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c
@@ -36,22 +36,6 @@ enum lpass_lpi_functions {
LPI_MUX__,
};
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-
static const struct pinctrl_pin_desc sm8350_lpi_pins[] = {
PINCTRL_PIN(0, "gpio0"),
PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
index 2e7896791..a028cbb49 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
@@ -52,30 +52,6 @@ enum lpass_lpi_functions {
LPI_MUX__,
};
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-static int gpio15_pins[] = { 15 };
-static int gpio16_pins[] = { 16 };
-static int gpio17_pins[] = { 17 };
-static int gpio18_pins[] = { 18 };
-static int gpio19_pins[] = { 19 };
-static int gpio20_pins[] = { 20 };
-static int gpio21_pins[] = { 21 };
-static int gpio22_pins[] = { 22 };
-
static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
PINCTRL_PIN(0, "gpio0"),
PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
index 64458c3fb..852192b04 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
@@ -52,30 +52,6 @@ enum lpass_lpi_functions {
LPI_MUX__,
};
-static int gpio0_pins[] = { 0 };
-static int gpio1_pins[] = { 1 };
-static int gpio2_pins[] = { 2 };
-static int gpio3_pins[] = { 3 };
-static int gpio4_pins[] = { 4 };
-static int gpio5_pins[] = { 5 };
-static int gpio6_pins[] = { 6 };
-static int gpio7_pins[] = { 7 };
-static int gpio8_pins[] = { 8 };
-static int gpio9_pins[] = { 9 };
-static int gpio10_pins[] = { 10 };
-static int gpio11_pins[] = { 11 };
-static int gpio12_pins[] = { 12 };
-static int gpio13_pins[] = { 13 };
-static int gpio14_pins[] = { 14 };
-static int gpio15_pins[] = { 15 };
-static int gpio16_pins[] = { 16 };
-static int gpio17_pins[] = { 17 };
-static int gpio18_pins[] = { 18 };
-static int gpio19_pins[] = { 19 };
-static int gpio20_pins[] = { 20 };
-static int gpio21_pins[] = { 21 };
-static int gpio22_pins[] = { 22 };
-
static const struct pinctrl_pin_desc sm8550_lpi_pins[] = {
PINCTRL_PIN(0, "gpio0"),
PINCTRL_PIN(1, "gpio1"),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c
new file mode 100644
index 000000000..04400c832
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_dmic4_clk,
+ LPI_MUX_dmic4_data,
+ LPI_MUX_i2s0_clk,
+ LPI_MUX_i2s0_data,
+ LPI_MUX_i2s0_ws,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_i2s3_clk,
+ LPI_MUX_i2s3_data,
+ LPI_MUX_i2s3_ws,
+ LPI_MUX_i2s4_clk,
+ LPI_MUX_i2s4_data,
+ LPI_MUX_i2s4_ws,
+ LPI_MUX_qca_swr_clk,
+ LPI_MUX_qca_swr_data,
+ LPI_MUX_slimbus_clk,
+ LPI_MUX_slimbus_data,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_wsa2_swr_clk,
+ LPI_MUX_wsa2_swr_data,
+ LPI_MUX_ext_mclk1_a,
+ LPI_MUX_ext_mclk1_b,
+ LPI_MUX_ext_mclk1_c,
+ LPI_MUX_ext_mclk1_d,
+ LPI_MUX_ext_mclk1_e,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static const struct pinctrl_pin_desc sm8650_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22",
+};
+
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const dmic4_clk_groups[] = { "gpio17" };
+static const char * const dmic4_data_groups[] = { "gpio18" };
+static const char * const i2s0_clk_groups[] = { "gpio0" };
+static const char * const i2s0_ws_groups[] = { "gpio1" };
+static const char * const i2s0_data_groups[] = { "gpio2", "gpio3", "gpio4", "gpio5" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const i2s2_data_groups[] = { "gpio15", "gpio16" };
+static const char * const i2s3_clk_groups[] = { "gpio12" };
+static const char * const i2s3_ws_groups[] = { "gpio13" };
+static const char * const i2s3_data_groups[] = { "gpio17", "gpio18" };
+static const char * const i2s4_clk_groups[] = { "gpio19"};
+static const char * const i2s4_ws_groups[] = { "gpio20"};
+static const char * const i2s4_data_groups[] = { "gpio21", "gpio22"};
+static const char * const qca_swr_clk_groups[] = { "gpio19" };
+static const char * const qca_swr_data_groups[] = { "gpio20" };
+static const char * const slimbus_clk_groups[] = { "gpio19"};
+static const char * const slimbus_data_groups[] = { "gpio20"};
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const wsa2_swr_clk_groups[] = { "gpio15" };
+static const char * const wsa2_swr_data_groups[] = { "gpio16" };
+static const char * const ext_mclk1_c_groups[] = { "gpio5" };
+static const char * const ext_mclk1_b_groups[] = { "gpio9" };
+static const char * const ext_mclk1_a_groups[] = { "gpio13" };
+static const char * const ext_mclk1_d_groups[] = { "gpio14" };
+static const char * const ext_mclk1_e_groups[] = { "gpio22" };
+
+static const struct lpi_pingroup sm8650_groups[] = {
+ LPI_PINGROUP(0, 11, swr_tx_clk, i2s0_clk, _, _),
+ LPI_PINGROUP(1, 11, swr_tx_data, i2s0_ws, _, _),
+ LPI_PINGROUP(2, 11, swr_tx_data, i2s0_data, _, _),
+ LPI_PINGROUP(3, 11, swr_rx_clk, i2s0_data, _, _),
+ LPI_PINGROUP(4, 11, swr_rx_data, i2s0_data, _, _),
+ LPI_PINGROUP(5, 11, swr_rx_data, ext_mclk1_c, i2s0_data, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, ext_mclk1_b, _),
+ LPI_PINGROUP(10, 11, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 11, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s3_clk, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s3_ws, ext_mclk1_a, _),
+ LPI_PINGROUP(14, 11, swr_tx_data, ext_mclk1_d, _, _),
+ LPI_PINGROUP(15, 11, i2s2_data, wsa2_swr_clk, _, _),
+ LPI_PINGROUP(16, 11, i2s2_data, wsa2_swr_data, _, _),
+ LPI_PINGROUP(17, LPI_NO_SLEW, dmic4_clk, i2s3_data, _, _),
+ LPI_PINGROUP(18, LPI_NO_SLEW, dmic4_data, i2s3_data, _, _),
+ LPI_PINGROUP(19, 11, i2s4_clk, slimbus_clk, qca_swr_clk, _),
+ LPI_PINGROUP(20, 11, i2s4_ws, slimbus_data, qca_swr_data, _),
+ LPI_PINGROUP(21, LPI_NO_SLEW, i2s4_data, _, _, _),
+ LPI_PINGROUP(22, LPI_NO_SLEW, i2s4_data, ext_mclk1_e, _, _),
+};
+
+static const struct lpi_function sm8650_functions[] = {
+ LPI_FUNCTION(gpio),
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(dmic4_clk),
+ LPI_FUNCTION(dmic4_data),
+ LPI_FUNCTION(i2s0_clk),
+ LPI_FUNCTION(i2s0_data),
+ LPI_FUNCTION(i2s0_ws),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(i2s3_clk),
+ LPI_FUNCTION(i2s3_data),
+ LPI_FUNCTION(i2s3_ws),
+ LPI_FUNCTION(i2s4_clk),
+ LPI_FUNCTION(i2s4_data),
+ LPI_FUNCTION(i2s4_ws),
+ LPI_FUNCTION(qca_swr_clk),
+ LPI_FUNCTION(qca_swr_data),
+ LPI_FUNCTION(slimbus_clk),
+ LPI_FUNCTION(slimbus_data),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+ LPI_FUNCTION(wsa2_swr_clk),
+ LPI_FUNCTION(wsa2_swr_data),
+ LPI_FUNCTION(ext_mclk1_a),
+ LPI_FUNCTION(ext_mclk1_b),
+ LPI_FUNCTION(ext_mclk1_c),
+ LPI_FUNCTION(ext_mclk1_d),
+ LPI_FUNCTION(ext_mclk1_e),
+};
+
+static const struct lpi_pinctrl_variant_data sm8650_lpi_data = {
+ .pins = sm8650_lpi_pins,
+ .npins = ARRAY_SIZE(sm8650_lpi_pins),
+ .groups = sm8650_groups,
+ .ngroups = ARRAY_SIZE(sm8650_groups),
+ .functions = sm8650_functions,
+ .nfunctions = ARRAY_SIZE(sm8650_functions),
+ .flags = LPI_FLAG_SLEW_RATE_SAME_REG,
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sm8650-lpass-lpi-pinctrl",
+ .data = &sm8650_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sm8650-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove_new = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("Qualcomm SM8650 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650.c b/drivers/pinctrl/qcom/pinctrl-sm8650.c
new file mode 100644
index 000000000..adaddd728
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8650.c
@@ -0,0 +1,1762 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9, \
+ msm_mux_##f10 \
+ }, \
+ .nfuncs = 11, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .i2c_pull_bit = 13, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_wakeup_present_bit = 6, \
+ .intr_wakeup_enable_bit = 7, \
+ .intr_target_bit = 8, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, ctl, io) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = io, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm8650_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "GPIO_190"),
+ PINCTRL_PIN(191, "GPIO_191"),
+ PINCTRL_PIN(192, "GPIO_192"),
+ PINCTRL_PIN(193, "GPIO_193"),
+ PINCTRL_PIN(194, "GPIO_194"),
+ PINCTRL_PIN(195, "GPIO_195"),
+ PINCTRL_PIN(196, "GPIO_196"),
+ PINCTRL_PIN(197, "GPIO_197"),
+ PINCTRL_PIN(198, "GPIO_198"),
+ PINCTRL_PIN(199, "GPIO_199"),
+ PINCTRL_PIN(200, "GPIO_200"),
+ PINCTRL_PIN(201, "GPIO_201"),
+ PINCTRL_PIN(202, "GPIO_202"),
+ PINCTRL_PIN(203, "GPIO_203"),
+ PINCTRL_PIN(204, "GPIO_204"),
+ PINCTRL_PIN(205, "GPIO_205"),
+ PINCTRL_PIN(206, "GPIO_206"),
+ PINCTRL_PIN(207, "GPIO_207"),
+ PINCTRL_PIN(208, "GPIO_208"),
+ PINCTRL_PIN(209, "GPIO_209"),
+ PINCTRL_PIN(210, "UFS_RESET"),
+ PINCTRL_PIN(211, "SDC2_CLK"),
+ PINCTRL_PIN(212, "SDC2_CMD"),
+ PINCTRL_PIN(213, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+
+static const unsigned int ufs_reset_pins[] = { 210 };
+static const unsigned int sdc2_clk_pins[] = { 211 };
+static const unsigned int sdc2_cmd_pins[] = { 212 };
+static const unsigned int sdc2_data_pins[] = { 213 };
+
+enum sm8650_functions {
+ msm_mux_gpio,
+ msm_mux_aoss_cti,
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_audio_ext_mclk0,
+ msm_mux_audio_ext_mclk1,
+ msm_mux_audio_ref_clk,
+ msm_mux_cam_aon_mclk2,
+ msm_mux_cam_aon_mclk4,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async_in,
+ msm_mux_cci_i2c_scl,
+ msm_mux_cci_i2c_sda,
+ msm_mux_cci_timer,
+ msm_mux_cmu_rng,
+ msm_mux_coex_uart1_rx,
+ msm_mux_coex_uart1_tx,
+ msm_mux_coex_uart2_rx,
+ msm_mux_coex_uart2_tx,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out_clk,
+ msm_mux_ddr_bist_complete,
+ msm_mux_ddr_bist_fail,
+ msm_mux_ddr_bist_start,
+ msm_mux_ddr_bist_stop,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_do_not,
+ msm_mux_dp_hot,
+ msm_mux_egpio,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gnss_adc0,
+ msm_mux_gnss_adc1,
+ msm_mux_i2chub0_se0,
+ msm_mux_i2chub0_se1,
+ msm_mux_i2chub0_se2,
+ msm_mux_i2chub0_se3,
+ msm_mux_i2chub0_se4,
+ msm_mux_i2chub0_se5,
+ msm_mux_i2chub0_se6,
+ msm_mux_i2chub0_se7,
+ msm_mux_i2chub0_se8,
+ msm_mux_i2chub0_se9,
+ msm_mux_i2s0_data0,
+ msm_mux_i2s0_data1,
+ msm_mux_i2s0_sck,
+ msm_mux_i2s0_ws,
+ msm_mux_i2s1_data0,
+ msm_mux_i2s1_data1,
+ msm_mux_i2s1_sck,
+ msm_mux_i2s1_ws,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0_out,
+ msm_mux_mdp_vsync1_out,
+ msm_mux_mdp_vsync2_out,
+ msm_mux_mdp_vsync3_out,
+ msm_mux_mdp_vsync_e,
+ msm_mux_nav_gpio0,
+ msm_mux_nav_gpio1,
+ msm_mux_nav_gpio2,
+ msm_mux_nav_gpio3,
+ msm_mux_pcie0_clk_req_n,
+ msm_mux_pcie1_clk_req_n,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist_sync,
+ msm_mux_pll_clk_aux,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink_big_enable,
+ msm_mux_qlink_big_request,
+ msm_mux_qlink_little_enable,
+ msm_mux_qlink_little_request,
+ msm_mux_qlink_wmss,
+ msm_mux_qspi0,
+ msm_mux_qspi1,
+ msm_mux_qspi2,
+ msm_mux_qspi3,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qup1_se0,
+ msm_mux_qup1_se1,
+ msm_mux_qup1_se2,
+ msm_mux_qup1_se3,
+ msm_mux_qup1_se4,
+ msm_mux_qup1_se5,
+ msm_mux_qup1_se6,
+ msm_mux_qup1_se7,
+ msm_mux_qup2_se0,
+ msm_mux_qup2_se1,
+ msm_mux_qup2_se2,
+ msm_mux_qup2_se3,
+ msm_mux_qup2_se4,
+ msm_mux_qup2_se5,
+ msm_mux_qup2_se6,
+ msm_mux_qup2_se7,
+ msm_mux_sd_write_protect,
+ msm_mux_sdc40,
+ msm_mux_sdc41,
+ msm_mux_sdc42,
+ msm_mux_sdc43,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_tb_trig_sdc2,
+ msm_mux_tb_trig_sdc4,
+ msm_mux_tgu_ch0_trigout,
+ msm_mux_tgu_ch1_trigout,
+ msm_mux_tgu_ch2_trigout,
+ msm_mux_tgu_ch3_trigout,
+ msm_mux_tmess_prng0,
+ msm_mux_tmess_prng1,
+ msm_mux_tmess_prng2,
+ msm_mux_tmess_prng3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_tsense_pwm3,
+ msm_mux_uim0_clk,
+ msm_mux_uim0_data,
+ msm_mux_uim0_present,
+ msm_mux_uim0_reset,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_usb1_hs,
+ msm_mux_usb_phy,
+ msm_mux_vfr_0,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger_mirnat,
+ msm_mux__,
+};
+
+static const char *const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+ "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio20", "gpio21", "gpio22", "gpio23",
+ "gpio24", "gpio25", "gpio26", "gpio27",
+ "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39",
+ "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46", "gpio47",
+ "gpio48", "gpio49", "gpio50", "gpio51",
+ "gpio52", "gpio53", "gpio54", "gpio55",
+ "gpio56", "gpio57", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67",
+ "gpio68", "gpio69", "gpio70", "gpio71",
+ "gpio72", "gpio73", "gpio74", "gpio75",
+ "gpio76", "gpio77", "gpio78", "gpio79",
+ "gpio80", "gpio81", "gpio82", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio87",
+ "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95",
+ "gpio96", "gpio97", "gpio98", "gpio99",
+ "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio108", "gpio109", "gpio110", "gpio111",
+ "gpio112", "gpio113", "gpio114", "gpio115",
+ "gpio116", "gpio117", "gpio118", "gpio119",
+ "gpio120", "gpio121", "gpio122", "gpio123",
+ "gpio124", "gpio125", "gpio126", "gpio127",
+ "gpio128", "gpio129", "gpio130", "gpio131",
+ "gpio132", "gpio133", "gpio134", "gpio135",
+ "gpio136", "gpio137", "gpio138", "gpio139",
+ "gpio140", "gpio141", "gpio142", "gpio143",
+ "gpio144", "gpio145", "gpio146", "gpio147",
+ "gpio148", "gpio149", "gpio150", "gpio151",
+ "gpio152", "gpio153", "gpio154", "gpio155",
+ "gpio156", "gpio157", "gpio158", "gpio159",
+ "gpio160", "gpio161", "gpio162", "gpio163",
+ "gpio164", "gpio165", "gpio166", "gpio167",
+ "gpio168", "gpio169", "gpio170", "gpio171",
+ "gpio172", "gpio173", "gpio174", "gpio175",
+ "gpio176", "gpio177", "gpio178", "gpio179",
+ "gpio180", "gpio181", "gpio182", "gpio183",
+ "gpio184", "gpio185", "gpio186", "gpio187",
+ "gpio188", "gpio189", "gpio190", "gpio191",
+ "gpio192", "gpio193", "gpio194", "gpio195",
+ "gpio196", "gpio197", "gpio198", "gpio199",
+ "gpio200", "gpio201", "gpio202", "gpio203",
+ "gpio204", "gpio205", "gpio206", "gpio207",
+ "gpio208", "gpio209",
+};
+
+static const char * const egpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5",
+ "gpio6", "gpio7", "gpio165", "gpio166", "gpio167", "gpio168",
+ "gpio169", "gpio170", "gpio171", "gpio172", "gpio173", "gpio174",
+ "gpio175", "gpio176", "gpio177", "gpio178", "gpio179", "gpio180",
+ "gpio181", "gpio182", "gpio183", "gpio184", "gpio185", "gpio186",
+ "gpio187", "gpio188", "gpio189", "gpio190", "gpio191", "gpio192",
+ "gpio193", "gpio194", "gpio195", "gpio196", "gpio197", "gpio198",
+ "gpio199", "gpio200", "gpio201", "gpio202", "gpio203", "gpio204",
+ "gpio205", "gpio206", "gpio207", "gpio208", "gpio209",
+};
+
+static const char * const aoss_cti_groups[] = {
+ "gpio50", "gpio51", "gpio60", "gpio61",
+};
+
+static const char *const atest_char_groups[] = {
+ "gpio130", "gpio131", "gpio132", "gpio133",
+ "gpio137",
+};
+
+static const char *const atest_usb_groups[] = {
+ "gpio71", "gpio72", "gpio74", "gpio130",
+ "gpio131",
+};
+
+static const char *const audio_ext_mclk0_groups[] = {
+ "gpio125",
+};
+
+static const char *const audio_ext_mclk1_groups[] = {
+ "gpio124",
+};
+
+static const char *const audio_ref_clk_groups[] = {
+ "gpio124",
+};
+
+static const char *const cam_aon_mclk2_groups[] = {
+ "gpio102",
+};
+
+static const char *const cam_aon_mclk4_groups[] = {
+ "gpio104",
+};
+
+static const char *const cam_mclk_groups[] = {
+ "gpio100", "gpio101", "gpio103", "gpio105",
+ "gpio106", "gpio108",
+};
+
+static const char *const cci_async_in_groups[] = {
+ "gpio15", "gpio163", "gpio164",
+};
+
+static const char *const cci_i2c_scl_groups[] = {
+ "gpio13", "gpio114", "gpio116", "gpio118",
+ "gpio120", "gpio153",
+};
+
+static const char *const cci_i2c_sda_groups[] = {
+ "gpio12", "gpio112", "gpio113", "gpio115",
+ "gpio117", "gpio119",
+};
+
+static const char *const cci_timer_groups[] = {
+ "gpio10", "gpio11", "gpio109", "gpio110",
+ "gpio111",
+};
+
+static const char *const cmu_rng_groups[] = {
+ "gpio95", "gpio96", "gpio112", "gpio127",
+ "gpio122", "gpio128",
+};
+
+static const char *const coex_uart1_rx_groups[] = {
+ "gpio148",
+};
+
+static const char *const coex_uart1_tx_groups[] = {
+ "gpio149",
+};
+
+static const char *const coex_uart2_rx_groups[] = {
+ "gpio150",
+};
+
+static const char *const coex_uart2_tx_groups[] = {
+ "gpio151",
+};
+
+static const char *const cri_trng_groups[] = {
+ "gpio187",
+};
+
+static const char *const dbg_out_clk_groups[] = {
+ "gpio92",
+};
+
+static const char *const ddr_bist_complete_groups[] = {
+ "gpio44",
+};
+
+static const char *const ddr_bist_fail_groups[] = {
+ "gpio40",
+};
+
+static const char *const ddr_bist_start_groups[] = {
+ "gpio41",
+};
+
+static const char *const ddr_bist_stop_groups[] = {
+ "gpio45",
+};
+
+static const char *const ddr_pxi0_groups[] = {
+ "gpio75", "gpio76",
+};
+
+static const char *const ddr_pxi1_groups[] = {
+ "gpio44", "gpio45",
+};
+
+static const char *const ddr_pxi2_groups[] = {
+ "gpio51", "gpio62",
+};
+
+static const char *const ddr_pxi3_groups[] = {
+ "gpio46", "gpio47",
+};
+
+static const char *const do_not_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+ "gpio134", "gpio135", "gpio136",
+};
+
+static const char *const dp_hot_groups[] = {
+ "gpio47",
+};
+
+static const char *const gcc_gp1_groups[] = {
+ "gpio86", "gpio134",
+};
+
+static const char *const gcc_gp2_groups[] = {
+ "gpio87", "gpio135",
+};
+
+static const char *const gcc_gp3_groups[] = {
+ "gpio88", "gpio136",
+};
+
+static const char *const gnss_adc0_groups[] = {
+ "gpio89", "gpio91",
+};
+
+static const char *const gnss_adc1_groups[] = {
+ "gpio90", "gpio92",
+};
+
+static const char *const i2chub0_se0_groups[] = {
+ "gpio64", "gpio65",
+};
+
+static const char *const i2chub0_se1_groups[] = {
+ "gpio66", "gpio67",
+};
+
+static const char *const i2chub0_se2_groups[] = {
+ "gpio68", "gpio69",
+};
+
+static const char *const i2chub0_se3_groups[] = {
+ "gpio70", "gpio71",
+};
+
+static const char *const i2chub0_se4_groups[] = {
+ "gpio72", "gpio73",
+};
+
+static const char *const i2chub0_se5_groups[] = {
+ "gpio74", "gpio75",
+};
+
+static const char *const i2chub0_se6_groups[] = {
+ "gpio76", "gpio77",
+};
+
+static const char *const i2chub0_se7_groups[] = {
+ "gpio78", "gpio79",
+};
+
+static const char *const i2chub0_se8_groups[] = {
+ "gpio206", "gpio207",
+};
+
+static const char *const i2chub0_se9_groups[] = {
+ "gpio80", "gpio81",
+};
+
+static const char *const i2s0_data0_groups[] = {
+ "gpio127",
+};
+
+static const char *const i2s0_data1_groups[] = {
+ "gpio128",
+};
+
+static const char *const i2s0_sck_groups[] = {
+ "gpio126",
+};
+
+static const char *const i2s0_ws_groups[] = {
+ "gpio129",
+};
+
+static const char *const i2s1_data0_groups[] = {
+ "gpio122",
+};
+
+static const char *const i2s1_data1_groups[] = {
+ "gpio124",
+};
+
+static const char *const i2s1_sck_groups[] = {
+ "gpio121",
+};
+
+static const char *const i2s1_ws_groups[] = {
+ "gpio123",
+};
+
+static const char *const ibi_i3c_groups[] = {
+ "gpio0", "gpio1", "gpio4", "gpio5",
+ "gpio8", "gpio9", "gpio12", "gpio13",
+ "gpio32", "gpio33", "gpio36", "gpio37",
+ "gpio48", "gpio49", "gpio56", "gpio57",
+};
+
+static const char *const jitter_bist_groups[] = {
+ "gpio73",
+};
+
+static const char *const mdp_vsync_groups[] = {
+ "gpio86", "gpio87", "gpio133", "gpio137",
+};
+
+static const char *const mdp_vsync0_out_groups[] = {
+ "gpio86",
+};
+
+static const char *const mdp_vsync1_out_groups[] = {
+ "gpio86",
+};
+
+static const char *const mdp_vsync2_out_groups[] = {
+ "gpio87",
+};
+
+static const char *const mdp_vsync3_out_groups[] = {
+ "gpio87",
+};
+
+static const char *const mdp_vsync_e_groups[] = {
+ "gpio88",
+};
+
+static const char *const nav_gpio0_groups[] = {
+ "gpio154",
+};
+
+static const char *const nav_gpio1_groups[] = {
+ "gpio155",
+};
+
+static const char *const nav_gpio2_groups[] = {
+ "gpio152",
+};
+
+static const char *const nav_gpio3_groups[] = {
+ "gpio154",
+};
+
+static const char *const pcie0_clk_req_n_groups[] = {
+ "gpio95",
+};
+
+static const char *const pcie1_clk_req_n_groups[] = {
+ "gpio98",
+};
+
+static const char *const phase_flag_groups[] = {
+ "gpio0", "gpio1", "gpio3", "gpio4",
+ "gpio5", "gpio7", "gpio8", "gpio9",
+ "gpio11", "gpio12", "gpio13", "gpio15",
+ "gpio16", "gpio17", "gpio19", "gpio94",
+ "gpio95", "gpio96", "gpio109", "gpio111",
+ "gpio112", "gpio113", "gpio114", "gpio115",
+ "gpio116", "gpio117", "gpio118", "gpio119",
+ "gpio120", "gpio153", "gpio163", "gpio164",
+};
+
+static const char *const pll_bist_sync_groups[] = {
+ "gpio68",
+};
+
+static const char *const pll_clk_aux_groups[] = {
+ "gpio106",
+};
+
+static const char *const prng_rosc0_groups[] = {
+ "gpio186",
+};
+
+static const char *const prng_rosc1_groups[] = {
+ "gpio183",
+};
+
+static const char *const prng_rosc2_groups[] = {
+ "gpio182",
+};
+
+static const char *const prng_rosc3_groups[] = {
+ "gpio181",
+};
+
+static const char *const qdss_cti_groups[] = {
+ "gpio27", "gpio31", "gpio78", "gpio79",
+ "gpio82", "gpio83", "gpio159", "gpio162",
+};
+
+static const char *const qdss_gpio_groups[] = {
+ "gpio3", "gpio7", "gpio8", "gpio13",
+ "gpio15", "gpio100", "gpio101", "gpio102",
+ "gpio103", "gpio104", "gpio105", "gpio113",
+ "gpio114", "gpio115", "gpio116", "gpio117",
+ "gpio118", "gpio140", "gpio141", "gpio142",
+ "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150",
+ "gpio151", "gpio152", "gpio153", "gpio154",
+ "gpio155", "gpio156", "gpio157", "gpio158",
+};
+
+static const char *const qlink_big_enable_groups[] = {
+ "gpio160",
+};
+
+static const char *const qlink_big_request_groups[] = {
+ "gpio159",
+};
+
+static const char *const qlink_little_enable_groups[] = {
+ "gpio157",
+};
+
+static const char *const qlink_little_request_groups[] = {
+ "gpio156",
+};
+
+static const char *const qlink_wmss_groups[] = {
+ "gpio158",
+};
+
+static const char *const qspi0_groups[] = {
+ "gpio134",
+};
+
+static const char *const qspi1_groups[] = {
+ "gpio136",
+};
+
+static const char *const qspi2_groups[] = {
+ "gpio56",
+};
+
+static const char *const qspi3_groups[] = {
+ "gpio57",
+};
+
+static const char *const qspi_clk_groups[] = {
+ "gpio135",
+};
+
+static const char *const qspi_cs_groups[] = {
+ "gpio58", "gpio59",
+};
+
+static const char *const qup1_se0_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char *const qup1_se1_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char *const qup1_se2_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46",
+};
+
+static const char *const qup1_se3_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char *const qup1_se4_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char *const qup1_se5_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char *const qup1_se6_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char *const qup1_se7_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+
+static const char *const qup2_se0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char *const qup2_se1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char *const qup2_se2_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio13", "gpio15", "gpio12",
+};
+
+static const char *const qup2_se3_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char *const qup2_se4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char *const qup2_se5_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+ "gpio23",
+};
+
+static const char *const qup2_se6_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char *const qup2_se7_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+
+static const char *const sd_write_protect_groups[] = {
+ "gpio93",
+};
+
+static const char *const sdc40_groups[] = {
+ "gpio134",
+};
+
+static const char *const sdc41_groups[] = {
+ "gpio136",
+};
+
+static const char *const sdc42_groups[] = {
+ "gpio56",
+};
+
+static const char *const sdc43_groups[] = {
+ "gpio57",
+};
+
+static const char *const sdc4_clk_groups[] = {
+ "gpio135",
+};
+
+static const char *const sdc4_cmd_groups[] = {
+ "gpio59",
+};
+
+static const char *const tb_trig_sdc2_groups[] = {
+ "gpio8",
+};
+
+static const char *const tb_trig_sdc4_groups[] = {
+ "gpio58",
+};
+
+static const char *const tgu_ch0_trigout_groups[] = {
+ "gpio8",
+};
+
+static const char *const tgu_ch1_trigout_groups[] = {
+ "gpio9",
+};
+
+static const char *const tgu_ch2_trigout_groups[] = {
+ "gpio10",
+};
+
+static const char *const tgu_ch3_trigout_groups[] = {
+ "gpio11",
+};
+
+static const char *const tmess_prng0_groups[] = {
+ "gpio94",
+};
+
+static const char *const tmess_prng1_groups[] = {
+ "gpio95",
+};
+
+static const char *const tmess_prng2_groups[] = {
+ "gpio96",
+};
+
+static const char *const tmess_prng3_groups[] = {
+ "gpio109",
+};
+
+static const char *const tsense_pwm1_groups[] = {
+ "gpio58",
+};
+
+static const char *const tsense_pwm2_groups[] = {
+ "gpio58",
+};
+
+static const char *const tsense_pwm3_groups[] = {
+ "gpio58",
+};
+
+static const char *const uim0_clk_groups[] = {
+ "gpio131",
+};
+
+static const char *const uim0_data_groups[] = {
+ "gpio130",
+};
+
+static const char *const uim0_present_groups[] = {
+ "gpio47",
+};
+
+static const char *const uim0_reset_groups[] = {
+ "gpio132",
+};
+
+static const char *const uim1_clk_groups[] = {
+ "gpio135",
+};
+
+static const char *const uim1_data_groups[] = {
+ "gpio134",
+};
+
+static const char *const uim1_present_groups[] = {
+ "gpio76",
+};
+
+static const char *const uim1_reset_groups[] = {
+ "gpio136",
+};
+
+static const char *const usb1_hs_groups[] = {
+ "gpio89",
+};
+
+static const char *const usb_phy_groups[] = {
+ "gpio29", "gpio54",
+};
+
+static const char *const vfr_0_groups[] = {
+ "gpio150",
+};
+
+static const char *const vfr_1_groups[] = {
+ "gpio155",
+};
+
+static const char *const vsense_trigger_mirnat_groups[] = {
+ "gpio60",
+};
+
+static const struct pinfunction sm8650_functions[] = {
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(aoss_cti),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_usb),
+ MSM_PIN_FUNCTION(audio_ext_mclk0),
+ MSM_PIN_FUNCTION(audio_ext_mclk1),
+ MSM_PIN_FUNCTION(audio_ref_clk),
+ MSM_PIN_FUNCTION(cam_aon_mclk2),
+ MSM_PIN_FUNCTION(cam_aon_mclk4),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async_in),
+ MSM_PIN_FUNCTION(cci_i2c_scl),
+ MSM_PIN_FUNCTION(cci_i2c_sda),
+ MSM_PIN_FUNCTION(cci_timer),
+ MSM_PIN_FUNCTION(cmu_rng),
+ MSM_PIN_FUNCTION(coex_uart1_rx),
+ MSM_PIN_FUNCTION(coex_uart1_tx),
+ MSM_PIN_FUNCTION(coex_uart2_rx),
+ MSM_PIN_FUNCTION(coex_uart2_tx),
+ MSM_PIN_FUNCTION(cri_trng),
+ MSM_PIN_FUNCTION(dbg_out_clk),
+ MSM_PIN_FUNCTION(ddr_bist_complete),
+ MSM_PIN_FUNCTION(ddr_bist_fail),
+ MSM_PIN_FUNCTION(ddr_bist_start),
+ MSM_PIN_FUNCTION(ddr_bist_stop),
+ MSM_PIN_FUNCTION(ddr_pxi0),
+ MSM_PIN_FUNCTION(ddr_pxi1),
+ MSM_PIN_FUNCTION(ddr_pxi2),
+ MSM_PIN_FUNCTION(ddr_pxi3),
+ MSM_PIN_FUNCTION(do_not),
+ MSM_PIN_FUNCTION(dp_hot),
+ MSM_PIN_FUNCTION(egpio),
+ MSM_PIN_FUNCTION(gcc_gp1),
+ MSM_PIN_FUNCTION(gcc_gp2),
+ MSM_PIN_FUNCTION(gcc_gp3),
+ MSM_PIN_FUNCTION(gnss_adc0),
+ MSM_PIN_FUNCTION(gnss_adc1),
+ MSM_PIN_FUNCTION(i2chub0_se0),
+ MSM_PIN_FUNCTION(i2chub0_se1),
+ MSM_PIN_FUNCTION(i2chub0_se2),
+ MSM_PIN_FUNCTION(i2chub0_se3),
+ MSM_PIN_FUNCTION(i2chub0_se4),
+ MSM_PIN_FUNCTION(i2chub0_se5),
+ MSM_PIN_FUNCTION(i2chub0_se6),
+ MSM_PIN_FUNCTION(i2chub0_se7),
+ MSM_PIN_FUNCTION(i2chub0_se8),
+ MSM_PIN_FUNCTION(i2chub0_se9),
+ MSM_PIN_FUNCTION(i2s0_data0),
+ MSM_PIN_FUNCTION(i2s0_data1),
+ MSM_PIN_FUNCTION(i2s0_sck),
+ MSM_PIN_FUNCTION(i2s0_ws),
+ MSM_PIN_FUNCTION(i2s1_data0),
+ MSM_PIN_FUNCTION(i2s1_data1),
+ MSM_PIN_FUNCTION(i2s1_sck),
+ MSM_PIN_FUNCTION(i2s1_ws),
+ MSM_PIN_FUNCTION(ibi_i3c),
+ MSM_PIN_FUNCTION(jitter_bist),
+ MSM_PIN_FUNCTION(mdp_vsync),
+ MSM_PIN_FUNCTION(mdp_vsync0_out),
+ MSM_PIN_FUNCTION(mdp_vsync1_out),
+ MSM_PIN_FUNCTION(mdp_vsync2_out),
+ MSM_PIN_FUNCTION(mdp_vsync3_out),
+ MSM_PIN_FUNCTION(mdp_vsync_e),
+ MSM_PIN_FUNCTION(nav_gpio0),
+ MSM_PIN_FUNCTION(nav_gpio1),
+ MSM_PIN_FUNCTION(nav_gpio2),
+ MSM_PIN_FUNCTION(nav_gpio3),
+ MSM_PIN_FUNCTION(pcie0_clk_req_n),
+ MSM_PIN_FUNCTION(pcie1_clk_req_n),
+ MSM_PIN_FUNCTION(phase_flag),
+ MSM_PIN_FUNCTION(pll_bist_sync),
+ MSM_PIN_FUNCTION(pll_clk_aux),
+ MSM_PIN_FUNCTION(prng_rosc0),
+ MSM_PIN_FUNCTION(prng_rosc1),
+ MSM_PIN_FUNCTION(prng_rosc2),
+ MSM_PIN_FUNCTION(prng_rosc3),
+ MSM_PIN_FUNCTION(qdss_cti),
+ MSM_PIN_FUNCTION(qdss_gpio),
+ MSM_PIN_FUNCTION(qlink_big_enable),
+ MSM_PIN_FUNCTION(qlink_big_request),
+ MSM_PIN_FUNCTION(qlink_little_enable),
+ MSM_PIN_FUNCTION(qlink_little_request),
+ MSM_PIN_FUNCTION(qlink_wmss),
+ MSM_PIN_FUNCTION(qspi0),
+ MSM_PIN_FUNCTION(qspi1),
+ MSM_PIN_FUNCTION(qspi2),
+ MSM_PIN_FUNCTION(qspi3),
+ MSM_PIN_FUNCTION(qspi_clk),
+ MSM_PIN_FUNCTION(qspi_cs),
+ MSM_PIN_FUNCTION(qup1_se0),
+ MSM_PIN_FUNCTION(qup1_se1),
+ MSM_PIN_FUNCTION(qup1_se2),
+ MSM_PIN_FUNCTION(qup1_se3),
+ MSM_PIN_FUNCTION(qup1_se4),
+ MSM_PIN_FUNCTION(qup1_se5),
+ MSM_PIN_FUNCTION(qup1_se6),
+ MSM_PIN_FUNCTION(qup1_se7),
+ MSM_PIN_FUNCTION(qup2_se0),
+ MSM_PIN_FUNCTION(qup2_se1),
+ MSM_PIN_FUNCTION(qup2_se2),
+ MSM_PIN_FUNCTION(qup2_se3),
+ MSM_PIN_FUNCTION(qup2_se4),
+ MSM_PIN_FUNCTION(qup2_se5),
+ MSM_PIN_FUNCTION(qup2_se6),
+ MSM_PIN_FUNCTION(qup2_se7),
+ MSM_PIN_FUNCTION(sd_write_protect),
+ MSM_PIN_FUNCTION(sdc40),
+ MSM_PIN_FUNCTION(sdc41),
+ MSM_PIN_FUNCTION(sdc42),
+ MSM_PIN_FUNCTION(sdc43),
+ MSM_PIN_FUNCTION(sdc4_clk),
+ MSM_PIN_FUNCTION(sdc4_cmd),
+ MSM_PIN_FUNCTION(tb_trig_sdc2),
+ MSM_PIN_FUNCTION(tb_trig_sdc4),
+ MSM_PIN_FUNCTION(tgu_ch0_trigout),
+ MSM_PIN_FUNCTION(tgu_ch1_trigout),
+ MSM_PIN_FUNCTION(tgu_ch2_trigout),
+ MSM_PIN_FUNCTION(tgu_ch3_trigout),
+ MSM_PIN_FUNCTION(tmess_prng0),
+ MSM_PIN_FUNCTION(tmess_prng1),
+ MSM_PIN_FUNCTION(tmess_prng2),
+ MSM_PIN_FUNCTION(tmess_prng3),
+ MSM_PIN_FUNCTION(tsense_pwm1),
+ MSM_PIN_FUNCTION(tsense_pwm2),
+ MSM_PIN_FUNCTION(tsense_pwm3),
+ MSM_PIN_FUNCTION(uim0_clk),
+ MSM_PIN_FUNCTION(uim0_data),
+ MSM_PIN_FUNCTION(uim0_present),
+ MSM_PIN_FUNCTION(uim0_reset),
+ MSM_PIN_FUNCTION(uim1_clk),
+ MSM_PIN_FUNCTION(uim1_data),
+ MSM_PIN_FUNCTION(uim1_present),
+ MSM_PIN_FUNCTION(uim1_reset),
+ MSM_PIN_FUNCTION(usb1_hs),
+ MSM_PIN_FUNCTION(usb_phy),
+ MSM_PIN_FUNCTION(vfr_0),
+ MSM_PIN_FUNCTION(vfr_1),
+ MSM_PIN_FUNCTION(vsense_trigger_mirnat),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm8650_groups[] = {
+ [0] = PINGROUP(0, qup2_se0, ibi_i3c, phase_flag, _, _, _, _, _, _, egpio),
+ [1] = PINGROUP(1, qup2_se0, ibi_i3c, phase_flag, _, _, _, _, _, _, egpio),
+ [2] = PINGROUP(2, qup2_se0, _, _, _, _, _, _, _, _, egpio),
+ [3] = PINGROUP(3, qup2_se0, phase_flag, _, qdss_gpio, _, _, _, _, _, egpio),
+ [4] = PINGROUP(4, qup2_se1, ibi_i3c, phase_flag, _, _, _, _, _, _, egpio),
+ [5] = PINGROUP(5, qup2_se1, ibi_i3c, phase_flag, _, _, _, _, _, _, egpio),
+ [6] = PINGROUP(6, qup2_se1, _, _, _, _, _, _, _, _, egpio),
+ [7] = PINGROUP(7, qup2_se1, phase_flag, _, qdss_gpio, _, _, _, _, _, egpio),
+ [8] = PINGROUP(8, qup2_se2, ibi_i3c, tb_trig_sdc2, phase_flag, tgu_ch0_trigout, _, qdss_gpio, _, _, _),
+ [9] = PINGROUP(9, qup2_se2, ibi_i3c, phase_flag, tgu_ch1_trigout, _, _, _, _, _, _),
+ [10] = PINGROUP(10, qup2_se2, cci_timer, tgu_ch2_trigout, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup2_se2, cci_timer, phase_flag, tgu_ch3_trigout, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup2_se3, cci_i2c_sda, ibi_i3c, qup2_se2, phase_flag, _, _, _, _, _),
+ [13] = PINGROUP(13, qup2_se3, cci_i2c_scl, ibi_i3c, qup2_se2, phase_flag, _, qdss_gpio, _, _, _),
+ [14] = PINGROUP(14, qup2_se3, _, _, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup2_se3, cci_async_in, qup2_se2, phase_flag, _, qdss_gpio, _, _, _, _),
+ [16] = PINGROUP(16, qup2_se4, phase_flag, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup2_se4, phase_flag, _, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, qup2_se4, _, _, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup2_se4, phase_flag, _, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup2_se5, _, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup2_se5, _, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, qup2_se5, _, _, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup2_se5, qup2_se5, _, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, qup2_se6, _, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup2_se6, _, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup2_se6, _, _, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup2_se6, qdss_cti, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qup2_se7, _, _, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, qup2_se7, usb_phy, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, qup2_se7, _, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, qup2_se7, qdss_cti, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup1_se0, ibi_i3c, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup1_se0, ibi_i3c, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup1_se0, _, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup1_se0, _, _, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup1_se1, do_not, ibi_i3c, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup1_se1, do_not, ibi_i3c, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup1_se1, do_not, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, qup1_se1, do_not, _, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup1_se2, ddr_bist_fail, _, _, _, _, _, _, _, _),
+ [41] = PINGROUP(41, qup1_se2, ddr_bist_start, _, _, _, _, _, _, _, _),
+ [42] = PINGROUP(42, qup1_se2, _, _, _, _, _, _, _, _, _),
+ [43] = PINGROUP(43, qup1_se2, _, _, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, qup1_se3, qup1_se2, ddr_bist_complete, ddr_pxi1, _, _, _, _, _, _),
+ [45] = PINGROUP(45, qup1_se3, qup1_se2, ddr_bist_stop, ddr_pxi1, _, _, _, _, _, _),
+ [46] = PINGROUP(46, qup1_se3, qup1_se2, ddr_pxi3, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, qup1_se3, uim0_present, dp_hot, ddr_pxi3, _, _, _, _, _, _),
+ [48] = PINGROUP(48, qup1_se4, ibi_i3c, _, _, _, _, _, _, _, _),
+ [49] = PINGROUP(49, qup1_se4, ibi_i3c, _, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, qup1_se4, aoss_cti, _, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, qup1_se4, aoss_cti, ddr_pxi2, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, qup1_se5, _, _, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, qup1_se5, _, _, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qup1_se5, usb_phy, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qup1_se5, _, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup1_se6, ibi_i3c, qspi2, sdc42, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup1_se6, ibi_i3c, qspi3, sdc43, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup1_se6, qspi_cs, tb_trig_sdc4, tsense_pwm1, tsense_pwm2, tsense_pwm3, _, _, _, _),
+ [59] = PINGROUP(59, qup1_se6, _, qspi_cs, sdc4_cmd, _, _, _, _, _, _),
+ [60] = PINGROUP(60, qup1_se7, aoss_cti, vsense_trigger_mirnat, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, qup1_se7, aoss_cti, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, qup1_se7, ddr_pxi2, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, qup1_se7, _, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, i2chub0_se0, _, _, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, i2chub0_se0, _, _, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, i2chub0_se1, _, _, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, i2chub0_se1, _, _, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, i2chub0_se2, pll_bist_sync, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, i2chub0_se2, _, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, i2chub0_se3, _, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, i2chub0_se3, _, atest_usb, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, i2chub0_se4, _, atest_usb, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, i2chub0_se4, jitter_bist, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, i2chub0_se5, atest_usb, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, i2chub0_se5, ddr_pxi0, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, i2chub0_se6, ddr_pxi0, uim1_present, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, i2chub0_se6, _, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, i2chub0_se7, qdss_cti, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, i2chub0_se7, qdss_cti, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, i2chub0_se9, _, _, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, i2chub0_se9, _, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, qdss_cti, _, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qdss_cti, _, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, _, _, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, _, _, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, gcc_gp1, _, _, _, _, _, _),
+ [87] = PINGROUP(87, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, gcc_gp2, _, _, _, _, _, _),
+ [88] = PINGROUP(88, mdp_vsync_e, gcc_gp3, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, usb1_hs, gnss_adc0, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, gnss_adc1, _, _, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, _, gnss_adc0, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, dbg_out_clk, gnss_adc1, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, sd_write_protect, _, _, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, cmu_rng, phase_flag, tmess_prng0, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, pcie0_clk_req_n, cmu_rng, phase_flag, tmess_prng1, _, _, _, _, _, _),
+ [96] = PINGROUP(96, cmu_rng, phase_flag, tmess_prng2, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, _, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, pcie1_clk_req_n, _, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, cam_aon_mclk2, qdss_gpio, _, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, cam_aon_mclk4, qdss_gpio, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, cam_mclk, pll_clk_aux, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, _, _, _, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, cam_mclk, _, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, cci_timer, phase_flag, tmess_prng3, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, cci_timer, _, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, cci_timer, phase_flag, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, cci_i2c_sda, cmu_rng, phase_flag, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, cci_i2c_sda, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [114] = PINGROUP(114, cci_i2c_scl, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [115] = PINGROUP(115, cci_i2c_sda, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [116] = PINGROUP(116, cci_i2c_scl, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [117] = PINGROUP(117, cci_i2c_sda, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [118] = PINGROUP(118, cci_i2c_scl, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [119] = PINGROUP(119, cci_i2c_sda, phase_flag, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, cci_i2c_scl, phase_flag, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, i2s1_sck, _, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, i2s1_data0, cmu_rng, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, i2s1_ws, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, i2s1_data1, audio_ext_mclk1, audio_ref_clk, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, audio_ext_mclk0, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, i2s0_sck, _, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, i2s0_data0, cmu_rng, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, i2s0_data1, cmu_rng, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, i2s0_ws, cmu_rng, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, uim0_data, atest_usb, atest_char, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, uim0_clk, atest_usb, atest_char, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, uim0_reset, atest_char, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, mdp_vsync, atest_char, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, uim1_data, do_not, qspi0, sdc40, gcc_gp1, _, _, _, _, _),
+ [135] = PINGROUP(135, uim1_clk, do_not, qspi_clk, sdc4_clk, gcc_gp2, _, _, _, _, _),
+ [136] = PINGROUP(136, uim1_reset, do_not, qspi1, sdc41, gcc_gp3, _, _, _, _, _),
+ [137] = PINGROUP(137, mdp_vsync, atest_char, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, _, qdss_gpio, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, _, qdss_gpio, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, _, qdss_gpio, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, qdss_gpio, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, qdss_gpio, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, qdss_gpio, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, qdss_gpio, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, qdss_gpio, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, coex_uart1_rx, qdss_gpio, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, coex_uart1_tx, qdss_gpio, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, _, vfr_0, coex_uart2_rx, qdss_gpio, _, _, _, _, _, _),
+ [151] = PINGROUP(151, _, coex_uart2_tx, qdss_gpio, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, nav_gpio2, _, qdss_gpio, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, cci_i2c_scl, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [154] = PINGROUP(154, nav_gpio0, nav_gpio3, qdss_gpio, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, nav_gpio1, vfr_1, qdss_gpio, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, qlink_little_request, qdss_gpio, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, qlink_little_enable, qdss_gpio, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, qlink_wmss, qdss_gpio, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, qlink_big_request, qdss_cti, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, qlink_big_enable, _, _, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, _, _, _, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, qdss_cti, _, _, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, cci_async_in, phase_flag, _, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, cci_async_in, phase_flag, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, _, _, _, _, _, _, _, _, _, egpio),
+ [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _, egpio),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _, egpio),
+ [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _, egpio),
+ [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _, egpio),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _, egpio),
+ [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _, egpio),
+ [172] = PINGROUP(172, _, _, _, _, _, _, _, _, _, egpio),
+ [173] = PINGROUP(173, _, _, _, _, _, _, _, _, _, egpio),
+ [174] = PINGROUP(174, _, _, _, _, _, _, _, _, _, egpio),
+ [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _, egpio),
+ [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _, egpio),
+ [177] = PINGROUP(177, _, _, _, _, _, _, _, _, _, egpio),
+ [178] = PINGROUP(178, _, _, _, _, _, _, _, _, _, egpio),
+ [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _, egpio),
+ [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _, egpio),
+ [181] = PINGROUP(181, prng_rosc3, _, _, _, _, _, _, _, _, egpio),
+ [182] = PINGROUP(182, prng_rosc2, _, _, _, _, _, _, _, _, egpio),
+ [183] = PINGROUP(183, prng_rosc1, _, _, _, _, _, _, _, _, egpio),
+ [184] = PINGROUP(184, _, _, _, _, _, _, _, _, _, egpio),
+ [185] = PINGROUP(185, _, _, _, _, _, _, _, _, _, egpio),
+ [186] = PINGROUP(186, prng_rosc0, _, _, _, _, _, _, _, _, egpio),
+ [187] = PINGROUP(187, cri_trng, _, _, _, _, _, _, _, _, egpio),
+ [188] = PINGROUP(188, _, _, _, _, _, _, _, _, _, egpio),
+ [189] = PINGROUP(189, _, _, _, _, _, _, _, _, _, egpio),
+ [190] = PINGROUP(190, _, _, _, _, _, _, _, _, _, egpio),
+ [191] = PINGROUP(191, _, _, _, _, _, _, _, _, _, egpio),
+ [192] = PINGROUP(192, _, _, _, _, _, _, _, _, _, egpio),
+ [193] = PINGROUP(193, _, _, _, _, _, _, _, _, _, egpio),
+ [194] = PINGROUP(194, _, _, _, _, _, _, _, _, _, egpio),
+ [195] = PINGROUP(195, _, _, _, _, _, _, _, _, _, egpio),
+ [196] = PINGROUP(196, _, _, _, _, _, _, _, _, _, egpio),
+ [197] = PINGROUP(197, _, _, _, _, _, _, _, _, _, egpio),
+ [198] = PINGROUP(198, _, _, _, _, _, _, _, _, _, egpio),
+ [199] = PINGROUP(199, _, _, _, _, _, _, _, _, _, egpio),
+ [200] = PINGROUP(200, _, _, _, _, _, _, _, _, _, egpio),
+ [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _, egpio),
+ [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _, egpio),
+ [203] = PINGROUP(203, _, _, _, _, _, _, _, _, _, egpio),
+ [204] = PINGROUP(204, _, _, _, _, _, _, _, _, _, egpio),
+ [205] = PINGROUP(205, _, _, _, _, _, _, _, _, _, egpio),
+ [206] = PINGROUP(206, i2chub0_se8, _, _, _, _, _, _, _, _, egpio),
+ [207] = PINGROUP(207, i2chub0_se8, _, _, _, _, _, _, _, _, egpio),
+ [208] = PINGROUP(208, _, _, _, _, _, _, _, _, _, egpio),
+ [209] = PINGROUP(209, _, _, _, _, _, _, _, _, _, egpio),
+ [210] = UFS_RESET(ufs_reset, 0xde004, 0xdf000),
+ [211] = SDC_QDSD_PINGROUP(sdc2_clk, 0xd6000, 14, 6),
+ [212] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xd6000, 11, 3),
+ [213] = SDC_QDSD_PINGROUP(sdc2_data, 0xd6000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm8650_pdc_map[] = {
+ { 0, 94 }, { 3, 105 }, { 4, 78 }, { 7, 67 }, { 8, 64 },
+ { 11, 121 }, { 12, 71 }, { 15, 82 }, { 18, 75 }, { 19, 63 },
+ { 20, 114 }, { 23, 84 }, { 27, 61 }, { 29, 112 }, { 31, 113 },
+ { 32, 66 }, { 35, 52 }, { 36, 123 }, { 39, 56 }, { 43, 59 },
+ { 46, 79 }, { 47, 124 }, { 48, 125 }, { 51, 93 }, { 54, 60 },
+ { 55, 104 }, { 56, 72 }, { 57, 77 }, { 59, 51 }, { 63, 85 },
+ { 64, 107 }, { 65, 108 }, { 66, 109 }, { 67, 83 }, { 68, 110 },
+ { 69, 111 }, { 75, 96 }, { 76, 97 }, { 77, 98 }, { 80, 89 },
+ { 81, 90 }, { 84, 106 }, { 85, 100 }, { 86, 87 }, { 87, 88 },
+ { 88, 65 }, { 90, 92 }, { 92, 99 }, { 95, 118 }, { 96, 119 },
+ { 98, 101 }, { 99, 62 }, { 112, 120 }, { 133, 80 }, { 136, 69 },
+ { 137, 81 }, { 148, 57 }, { 150, 58 }, { 152, 127 }, { 153, 74 },
+ { 154, 126 }, { 155, 73 }, { 156, 128 }, { 159, 129 }, { 162, 86 },
+ { 163, 122 }, { 166, 139 }, { 169, 140 }, { 171, 141 }, { 172, 142 },
+ { 174, 102 }, { 176, 143 }, { 177, 55 }, { 181, 144 }, { 182, 145 },
+ { 185, 146 }, { 187, 95 }, { 188, 130 }, { 190, 131 }, { 191, 132 },
+ { 192, 133 }, { 193, 134 }, { 195, 68 }, { 196, 135 }, { 197, 136 },
+ { 198, 54 }, { 199, 103 }, { 200, 53 }, { 201, 137 }, { 202, 70 },
+ { 203, 138 }, { 204, 76 }, { 205, 91 },
+};
+
+static const struct msm_pinctrl_soc_data sm8650_tlmm = {
+ .pins = sm8650_pins,
+ .npins = ARRAY_SIZE(sm8650_pins),
+ .functions = sm8650_functions,
+ .nfunctions = ARRAY_SIZE(sm8650_functions),
+ .groups = sm8650_groups,
+ .ngroups = ARRAY_SIZE(sm8650_groups),
+ .ngpios = 211,
+ .wakeirq_map = sm8650_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8650_pdc_map),
+ .egpio_func = 10,
+};
+
+static int sm8650_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm8650_tlmm);
+}
+
+static const struct of_device_id sm8650_tlmm_of_match[] = {
+ { .compatible = "qcom,sm8650-tlmm", },
+ {},
+};
+
+static struct platform_driver sm8650_tlmm_driver = {
+ .driver = {
+ .name = "sm8650-tlmm",
+ .of_match_table = sm8650_tlmm_of_match,
+ },
+ .probe = sm8650_tlmm_probe,
+ .remove_new = msm_pinctrl_remove,
+};
+
+static int __init sm8650_tlmm_init(void)
+{
+ return platform_driver_register(&sm8650_tlmm_driver);
+}
+arch_initcall(sm8650_tlmm_init);
+
+static void __exit sm8650_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm8650_tlmm_driver);
+}
+module_exit(sm8650_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM8650 TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, sm8650_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
new file mode 100644
index 000000000..e30e93840
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
@@ -0,0 +1,1876 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .i2c_pull_bit = 13, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc x1e80100_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "GPIO_190"),
+ PINCTRL_PIN(191, "GPIO_191"),
+ PINCTRL_PIN(192, "GPIO_192"),
+ PINCTRL_PIN(193, "GPIO_193"),
+ PINCTRL_PIN(194, "GPIO_194"),
+ PINCTRL_PIN(195, "GPIO_195"),
+ PINCTRL_PIN(196, "GPIO_196"),
+ PINCTRL_PIN(197, "GPIO_197"),
+ PINCTRL_PIN(198, "GPIO_198"),
+ PINCTRL_PIN(199, "GPIO_199"),
+ PINCTRL_PIN(200, "GPIO_200"),
+ PINCTRL_PIN(201, "GPIO_201"),
+ PINCTRL_PIN(202, "GPIO_202"),
+ PINCTRL_PIN(203, "GPIO_203"),
+ PINCTRL_PIN(204, "GPIO_204"),
+ PINCTRL_PIN(205, "GPIO_205"),
+ PINCTRL_PIN(206, "GPIO_206"),
+ PINCTRL_PIN(207, "GPIO_207"),
+ PINCTRL_PIN(208, "GPIO_208"),
+ PINCTRL_PIN(209, "GPIO_209"),
+ PINCTRL_PIN(210, "GPIO_210"),
+ PINCTRL_PIN(211, "GPIO_211"),
+ PINCTRL_PIN(212, "GPIO_212"),
+ PINCTRL_PIN(213, "GPIO_213"),
+ PINCTRL_PIN(214, "GPIO_214"),
+ PINCTRL_PIN(215, "GPIO_215"),
+ PINCTRL_PIN(216, "GPIO_216"),
+ PINCTRL_PIN(217, "GPIO_217"),
+ PINCTRL_PIN(218, "GPIO_218"),
+ PINCTRL_PIN(219, "GPIO_219"),
+ PINCTRL_PIN(220, "GPIO_220"),
+ PINCTRL_PIN(221, "GPIO_221"),
+ PINCTRL_PIN(222, "GPIO_222"),
+ PINCTRL_PIN(223, "GPIO_223"),
+ PINCTRL_PIN(224, "GPIO_224"),
+ PINCTRL_PIN(225, "GPIO_225"),
+ PINCTRL_PIN(226, "GPIO_226"),
+ PINCTRL_PIN(227, "GPIO_227"),
+ PINCTRL_PIN(228, "GPIO_228"),
+ PINCTRL_PIN(229, "GPIO_229"),
+ PINCTRL_PIN(230, "GPIO_230"),
+ PINCTRL_PIN(231, "GPIO_231"),
+ PINCTRL_PIN(232, "GPIO_232"),
+ PINCTRL_PIN(233, "GPIO_233"),
+ PINCTRL_PIN(234, "GPIO_234"),
+ PINCTRL_PIN(235, "GPIO_235"),
+ PINCTRL_PIN(236, "GPIO_236"),
+ PINCTRL_PIN(237, "GPIO_237"),
+ PINCTRL_PIN(238, "UFS_RESET"),
+ PINCTRL_PIN(239, "SDC2_CLK"),
+ PINCTRL_PIN(240, "SDC2_CMD"),
+ PINCTRL_PIN(241, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+DECLARE_MSM_GPIO_PINS(210);
+DECLARE_MSM_GPIO_PINS(211);
+DECLARE_MSM_GPIO_PINS(212);
+DECLARE_MSM_GPIO_PINS(213);
+DECLARE_MSM_GPIO_PINS(214);
+DECLARE_MSM_GPIO_PINS(215);
+DECLARE_MSM_GPIO_PINS(216);
+DECLARE_MSM_GPIO_PINS(217);
+DECLARE_MSM_GPIO_PINS(218);
+DECLARE_MSM_GPIO_PINS(219);
+DECLARE_MSM_GPIO_PINS(220);
+DECLARE_MSM_GPIO_PINS(221);
+DECLARE_MSM_GPIO_PINS(222);
+DECLARE_MSM_GPIO_PINS(223);
+DECLARE_MSM_GPIO_PINS(224);
+DECLARE_MSM_GPIO_PINS(225);
+DECLARE_MSM_GPIO_PINS(226);
+DECLARE_MSM_GPIO_PINS(227);
+DECLARE_MSM_GPIO_PINS(228);
+DECLARE_MSM_GPIO_PINS(229);
+DECLARE_MSM_GPIO_PINS(230);
+DECLARE_MSM_GPIO_PINS(231);
+DECLARE_MSM_GPIO_PINS(232);
+DECLARE_MSM_GPIO_PINS(233);
+DECLARE_MSM_GPIO_PINS(234);
+DECLARE_MSM_GPIO_PINS(235);
+DECLARE_MSM_GPIO_PINS(236);
+DECLARE_MSM_GPIO_PINS(237);
+
+static const unsigned int ufs_reset_pins[] = { 238 };
+static const unsigned int sdc2_clk_pins[] = { 239 };
+static const unsigned int sdc2_cmd_pins[] = { 240 };
+static const unsigned int sdc2_data_pins[] = { 241 };
+
+enum x1e80100_functions {
+ msm_mux_gpio,
+ msm_mux_RESOUT_GPIO,
+ msm_mux_aon_cci,
+ msm_mux_aoss_cti,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_usb,
+ msm_mux_audio_ext,
+ msm_mux_audio_ref,
+ msm_mux_cam_aon,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cmu_rng0,
+ msm_mux_cmu_rng1,
+ msm_mux_cmu_rng2,
+ msm_mux_cmu_rng3,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_ddr_pxi4,
+ msm_mux_ddr_pxi5,
+ msm_mux_ddr_pxi6,
+ msm_mux_ddr_pxi7,
+ msm_mux_edp0_hot,
+ msm_mux_edp0_lcd,
+ msm_mux_edp1_hot,
+ msm_mux_edp1_lcd,
+ msm_mux_eusb0_ac,
+ msm_mux_eusb1_ac,
+ msm_mux_eusb2_ac,
+ msm_mux_eusb3_ac,
+ msm_mux_eusb5_ac,
+ msm_mux_eusb6_ac,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_i2s0_data0,
+ msm_mux_i2s0_data1,
+ msm_mux_i2s0_sck,
+ msm_mux_i2s0_ws,
+ msm_mux_i2s1_data0,
+ msm_mux_i2s1_data1,
+ msm_mux_i2s1_sck,
+ msm_mux_i2s1_ws,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mdp_vsync4,
+ msm_mux_mdp_vsync5,
+ msm_mux_mdp_vsync6,
+ msm_mux_mdp_vsync7,
+ msm_mux_mdp_vsync8,
+ msm_mux_pcie3_clk,
+ msm_mux_pcie4_clk,
+ msm_mux_pcie5_clk,
+ msm_mux_pcie6a_clk,
+ msm_mux_pcie6b_clk,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_clk,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qspi00,
+ msm_mux_qspi01,
+ msm_mux_qspi02,
+ msm_mux_qspi03,
+ msm_mux_qspi0_clk,
+ msm_mux_qspi0_cs0,
+ msm_mux_qspi0_cs1,
+ msm_mux_qup0_se0,
+ msm_mux_qup0_se1,
+ msm_mux_qup0_se2,
+ msm_mux_qup0_se3,
+ msm_mux_qup0_se4,
+ msm_mux_qup0_se5,
+ msm_mux_qup0_se6,
+ msm_mux_qup0_se7,
+ msm_mux_qup1_se0,
+ msm_mux_qup1_se1,
+ msm_mux_qup1_se2,
+ msm_mux_qup1_se3,
+ msm_mux_qup1_se4,
+ msm_mux_qup1_se5,
+ msm_mux_qup1_se6,
+ msm_mux_qup1_se7,
+ msm_mux_qup2_se0,
+ msm_mux_qup2_se1,
+ msm_mux_qup2_se2,
+ msm_mux_qup2_se3,
+ msm_mux_qup2_se4,
+ msm_mux_qup2_se5,
+ msm_mux_qup2_se6,
+ msm_mux_qup2_se7,
+ msm_mux_sd_write,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sdc4_data0,
+ msm_mux_sdc4_data1,
+ msm_mux_sdc4_data2,
+ msm_mux_sdc4_data3,
+ msm_mux_sys_throttle,
+ msm_mux_tb_trig,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tgu_ch4,
+ msm_mux_tgu_ch5,
+ msm_mux_tgu_ch6,
+ msm_mux_tgu_ch7,
+ msm_mux_tmess_prng0,
+ msm_mux_tmess_prng1,
+ msm_mux_tmess_prng2,
+ msm_mux_tmess_prng3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_tsense_pwm3,
+ msm_mux_tsense_pwm4,
+ msm_mux_usb0_dp,
+ msm_mux_usb0_phy,
+ msm_mux_usb0_sbrx,
+ msm_mux_usb0_sbtx,
+ msm_mux_usb1_dp,
+ msm_mux_usb1_phy,
+ msm_mux_usb1_sbrx,
+ msm_mux_usb1_sbtx,
+ msm_mux_usb2_dp,
+ msm_mux_usb2_phy,
+ msm_mux_usb2_sbrx,
+ msm_mux_usb2_sbtx,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+ "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+ "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
+ "gpio177", "gpio178", "gpio179", "gpio180", "gpio181", "gpio182",
+ "gpio183", "gpio184", "gpio185", "gpio186", "gpio187", "gpio188",
+ "gpio189", "gpio190", "gpio191", "gpio192", "gpio193", "gpio194",
+ "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200",
+ "gpio201", "gpio202", "gpio203", "gpio204", "gpio205", "gpio206",
+ "gpio207", "gpio208", "gpio209", "gpio210", "gpio211", "gpio212",
+ "gpio213", "gpio214", "gpio215", "gpio216", "gpio217", "gpio218",
+ "gpio219", "gpio220", "gpio221", "gpio222", "gpio223", "gpio224",
+ "gpio225", "gpio226", "gpio227", "gpio228", "gpio229", "gpio230",
+ "gpio231", "gpio232", "gpio233", "gpio234", "gpio235", "gpio236",
+ "gpio237",
+};
+
+static const char * const RESOUT_GPIO_groups[] = {
+ "gpio160",
+};
+
+static const char * const aon_cci_groups[] = {
+ "gpio235", "gpio236",
+};
+
+static const char * const aoss_cti_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio181",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio185",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio184",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio188",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio182",
+};
+
+static const char * const atest_usb_groups[] = {
+ "gpio9", "gpio10", "gpio35", "gpio38", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48",
+ "gpio49", "gpio50", "gpio51", "gpio52", "gpio53", "gpio54",
+ "gpio58", "gpio59", "gpio65", "gpio66", "gpio67", "gpio72",
+ "gpio73", "gpio74", "gpio75", "gpio80", "gpio81", "gpio83",
+};
+
+static const char * const audio_ext_groups[] = {
+ "gpio134", "gpio142",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio142",
+};
+
+static const char * const cam_aon_groups[] = {
+ "gpio100",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio111", "gpio112", "gpio113",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106",
+};
+
+static const char * const cci_timer0_groups[] = {
+ "gpio109",
+};
+
+static const char * const cci_timer1_groups[] = {
+ "gpio110",
+};
+
+static const char * const cci_timer2_groups[] = {
+ "gpio111",
+};
+
+static const char * const cci_timer3_groups[] = {
+ "gpio112",
+};
+
+static const char * const cci_timer4_groups[] = {
+ "gpio113",
+};
+
+static const char * const cmu_rng0_groups[] = {
+ "gpio48",
+};
+
+static const char * const cmu_rng1_groups[] = {
+ "gpio47",
+};
+
+static const char * const cmu_rng2_groups[] = {
+ "gpio46",
+};
+
+static const char * const cmu_rng3_groups[] = {
+ "gpio45",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio187",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio51",
+};
+
+static const char * const ddr_bist_groups[] = {
+ "gpio54", "gpio55", "gpio56", "gpio57",
+};
+
+static const char * const ddr_pxi0_groups[] = {
+ "gpio9", "gpio38",
+};
+
+static const char * const ddr_pxi1_groups[] = {
+ "gpio10", "gpio41",
+};
+
+static const char * const ddr_pxi2_groups[] = {
+ "gpio42", "gpio43",
+};
+
+static const char * const ddr_pxi3_groups[] = {
+ "gpio44", "gpio45",
+};
+
+static const char * const ddr_pxi4_groups[] = {
+ "gpio46", "gpio47",
+};
+
+static const char * const ddr_pxi5_groups[] = {
+ "gpio48", "gpio49",
+};
+
+static const char * const ddr_pxi6_groups[] = {
+ "gpio50", "gpio51",
+};
+
+static const char * const ddr_pxi7_groups[] = {
+ "gpio52", "gpio53",
+};
+
+static const char * const edp0_hot_groups[] = {
+ "gpio119",
+};
+
+static const char * const edp0_lcd_groups[] = {
+ "gpio120",
+};
+
+static const char * const edp1_hot_groups[] = {
+ "gpio120",
+};
+
+static const char * const edp1_lcd_groups[] = {
+ "gpio115", "gpio119",
+};
+
+static const char * const eusb0_ac_groups[] = {
+ "gpio168",
+};
+
+static const char * const eusb1_ac_groups[] = {
+ "gpio177",
+};
+
+static const char * const eusb2_ac_groups[] = {
+ "gpio186",
+};
+
+static const char * const eusb3_ac_groups[] = {
+ "gpio169",
+};
+
+static const char * const eusb5_ac_groups[] = {
+ "gpio187",
+};
+
+static const char * const eusb6_ac_groups[] = {
+ "gpio178",
+};
+
+static const char * const gcc_gp1_groups[] = {
+ "gpio71", "gpio72",
+};
+
+static const char * const gcc_gp2_groups[] = {
+ "gpio64", "gpio73",
+};
+
+static const char * const gcc_gp3_groups[] = {
+ "gpio74", "gpio82",
+};
+
+static const char * const i2s0_data0_groups[] = {
+ "gpio136",
+};
+
+static const char * const i2s0_data1_groups[] = {
+ "gpio137",
+};
+
+static const char * const i2s0_sck_groups[] = {
+ "gpio135",
+};
+
+static const char * const i2s0_ws_groups[] = {
+ "gpio138",
+};
+
+static const char * const i2s1_data0_groups[] = {
+ "gpio140",
+};
+
+static const char * const i2s1_data1_groups[] = {
+ "gpio142",
+};
+
+static const char * const i2s1_sck_groups[] = {
+ "gpio139",
+};
+
+static const char * const i2s1_ws_groups[] = {
+ "gpio141",
+};
+
+static const char * const ibi_i3c_groups[] = {
+ "gpio0", "gpio1", "gpio32", "gpio33", "gpio36", "gpio37", "gpio68",
+ "gpio69",
+};
+
+static const char * const jitter_bist_groups[] = {
+ "gpio42",
+};
+
+static const char * const mdp_vsync0_groups[] = {
+ "gpio114",
+};
+
+static const char * const mdp_vsync1_groups[] = {
+ "gpio114",
+};
+
+static const char * const mdp_vsync2_groups[] = {
+ "gpio115",
+};
+
+static const char * const mdp_vsync3_groups[] = {
+ "gpio115",
+};
+
+static const char * const mdp_vsync4_groups[] = {
+ "gpio109",
+};
+
+static const char * const mdp_vsync5_groups[] = {
+ "gpio110",
+};
+
+static const char * const mdp_vsync6_groups[] = {
+ "gpio111",
+};
+
+static const char * const mdp_vsync7_groups[] = {
+ "gpio112",
+};
+
+static const char * const mdp_vsync8_groups[] = {
+ "gpio113",
+};
+
+static const char * const pcie3_clk_groups[] = {
+ "gpio144",
+};
+
+static const char * const pcie4_clk_groups[] = {
+ "gpio147",
+};
+
+static const char * const pcie5_clk_groups[] = {
+ "gpio150",
+};
+
+static const char * const pcie6a_clk_groups[] = {
+ "gpio153",
+};
+
+static const char * const pcie6b_clk_groups[] = {
+ "gpio156",
+};
+
+static const char * const phase_flag_groups[] = {
+ "gpio6", "gpio7", "gpio8", "gpio11", "gpio12", "gpio13",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio20", "gpio21", "gpio22", "gpio23", "gpio24", "gpio25",
+ "gpio26", "gpio27", "gpio39", "gpio40", "gpio76", "gpio77",
+ "gpio78", "gpio181", "gpio182", "gpio184", "gpio185",
+ "gpio186", "gpio187", "gpio188",
+};
+
+static const char * const pll_bist_groups[] = {
+ "gpio28",
+};
+
+static const char * const pll_clk_groups[] = {
+ "gpio35",
+};
+
+static const char * const prng_rosc0_groups[] = {
+ "gpio186",
+};
+
+static const char * const prng_rosc1_groups[] = {
+ "gpio188",
+};
+
+static const char * const prng_rosc2_groups[] = {
+ "gpio182",
+};
+
+static const char * const prng_rosc3_groups[] = {
+ "gpio181",
+};
+
+static const char * const qdss_cti_groups[] = {
+ "gpio18", "gpio19", "gpio23", "gpio27", "gpio161", "gpio162",
+ "gpio215", "gpio217",
+};
+
+static const char * const qdss_gpio_groups[] = {
+ "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101",
+ "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113",
+ "gpio219", "gpio220", "gpio221", "gpio222", "gpio223", "gpio224",
+ "gpio225", "gpio226", "gpio227", "gpio228", "gpio229", "gpio230",
+ "gpio231", "gpio232", "gpio233", "gpio234", "gpio235", "gpio236",
+};
+
+static const char * const qspi00_groups[] = {
+ "gpio128",
+};
+
+static const char * const qspi01_groups[] = {
+ "gpio129",
+};
+
+static const char * const qspi02_groups[] = {
+ "gpio130",
+};
+
+static const char * const qspi03_groups[] = {
+ "gpio131",
+};
+
+static const char * const qspi0_clk_groups[] = {
+ "gpio127",
+};
+
+static const char * const qspi0_cs0_groups[] = {
+ "gpio132",
+};
+
+static const char * const qspi0_cs1_groups[] = {
+ "gpio133",
+};
+
+static const char * const qup0_se0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const qup0_se1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const qup0_se2_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qup0_se3_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qup0_se4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qup0_se5_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qup0_se6_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char * const qup0_se7_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const qup1_se0_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char * const qup1_se1_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char * const qup1_se2_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const qup1_se3_groups[] = {
+ "gpio33", "gpio34", "gpio35", "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char * const qup1_se4_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const qup1_se5_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char * const qup1_se6_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char * const qup1_se7_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char * const qup2_se0_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char * const qup2_se1_groups[] = {
+ "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char * const qup2_se2_groups[] = {
+ "gpio72", "gpio73", "gpio74", "gpio75", "gpio81", "gpio82", "gpio83",
+};
+
+static const char * const qup2_se3_groups[] = {
+ "gpio65", "gpio66", "gpio67", "gpio76", "gpio77", "gpio78", "gpio79",
+};
+
+static const char * const qup2_se4_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char * const qup2_se5_groups[] = {
+ "gpio84", "gpio85", "gpio86", "gpio87",
+};
+
+static const char * const qup2_se6_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91",
+};
+
+static const char * const qup2_se7_groups[] = {
+ "gpio84", "gpio85", "gpio86", "gpio87",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio162",
+};
+
+static const char * const sdc4_clk_groups[] = {
+ "gpio127",
+};
+
+static const char * const sdc4_cmd_groups[] = {
+ "gpio132",
+};
+
+static const char * const sdc4_data0_groups[] = {
+ "gpio128",
+};
+
+static const char * const sdc4_data1_groups[] = {
+ "gpio129",
+};
+
+static const char * const sdc4_data2_groups[] = {
+ "gpio130",
+};
+
+static const char * const sdc4_data3_groups[] = {
+ "gpio131",
+};
+
+static const char * const sys_throttle_groups[] = {
+ "gpio39", "gpio94",
+};
+
+static const char * const tb_trig_groups[] = {
+ "gpio133", "gpio137",
+};
+
+static const char * const tgu_ch0_groups[] = {
+ "gpio81",
+};
+
+static const char * const tgu_ch1_groups[] = {
+ "gpio65",
+};
+
+static const char * const tgu_ch2_groups[] = {
+ "gpio66",
+};
+
+static const char * const tgu_ch3_groups[] = {
+ "gpio67",
+};
+
+static const char * const tgu_ch4_groups[] = {
+ "gpio68",
+};
+
+static const char * const tgu_ch5_groups[] = {
+ "gpio69",
+};
+
+static const char * const tgu_ch6_groups[] = {
+ "gpio83",
+};
+
+static const char * const tgu_ch7_groups[] = {
+ "gpio80",
+};
+
+static const char * const tmess_prng0_groups[] = {
+ "gpio92",
+};
+
+static const char * const tmess_prng1_groups[] = {
+ "gpio93",
+};
+
+static const char * const tmess_prng2_groups[] = {
+ "gpio94",
+};
+
+static const char * const tmess_prng3_groups[] = {
+ "gpio95",
+};
+
+static const char * const tsense_pwm1_groups[] = {
+ "gpio34",
+};
+
+static const char * const tsense_pwm2_groups[] = {
+ "gpio34",
+};
+
+static const char * const tsense_pwm3_groups[] = {
+ "gpio34",
+};
+
+static const char * const tsense_pwm4_groups[] = {
+ "gpio34",
+};
+
+static const char * const usb0_dp_groups[] = {
+ "gpio122",
+};
+
+static const char * const usb0_phy_groups[] = {
+ "gpio121",
+};
+
+static const char * const usb0_sbrx_groups[] = {
+ "gpio163",
+};
+
+static const char * const usb0_sbtx_groups[] = {
+ "gpio164", "gpio165",
+};
+
+static const char * const usb1_dp_groups[] = {
+ "gpio124",
+};
+
+static const char * const usb1_phy_groups[] = {
+ "gpio123",
+};
+
+static const char * const usb1_sbrx_groups[] = {
+ "gpio172",
+};
+
+static const char * const usb1_sbtx_groups[] = {
+ "gpio173", "gpio174",
+};
+
+static const char * const usb2_dp_groups[] = {
+ "gpio126",
+};
+
+static const char * const usb2_phy_groups[] = {
+ "gpio125",
+};
+
+static const char * const usb2_sbrx_groups[] = {
+ "gpio181",
+};
+
+static const char * const usb2_sbtx_groups[] = {
+ "gpio182", "gpio183",
+};
+
+static const char * const vsense_trigger_groups[] = {
+ "gpio38",
+};
+
+static const struct pinfunction x1e80100_functions[] = {
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(RESOUT_GPIO),
+ MSM_PIN_FUNCTION(aon_cci),
+ MSM_PIN_FUNCTION(aoss_cti),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_char0),
+ MSM_PIN_FUNCTION(atest_char1),
+ MSM_PIN_FUNCTION(atest_char2),
+ MSM_PIN_FUNCTION(atest_char3),
+ MSM_PIN_FUNCTION(atest_usb),
+ MSM_PIN_FUNCTION(audio_ext),
+ MSM_PIN_FUNCTION(audio_ref),
+ MSM_PIN_FUNCTION(cam_aon),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async),
+ MSM_PIN_FUNCTION(cci_i2c),
+ MSM_PIN_FUNCTION(cci_timer0),
+ MSM_PIN_FUNCTION(cci_timer1),
+ MSM_PIN_FUNCTION(cci_timer2),
+ MSM_PIN_FUNCTION(cci_timer3),
+ MSM_PIN_FUNCTION(cci_timer4),
+ MSM_PIN_FUNCTION(cmu_rng0),
+ MSM_PIN_FUNCTION(cmu_rng1),
+ MSM_PIN_FUNCTION(cmu_rng2),
+ MSM_PIN_FUNCTION(cmu_rng3),
+ MSM_PIN_FUNCTION(cri_trng),
+ MSM_PIN_FUNCTION(dbg_out),
+ MSM_PIN_FUNCTION(ddr_bist),
+ MSM_PIN_FUNCTION(ddr_pxi0),
+ MSM_PIN_FUNCTION(ddr_pxi1),
+ MSM_PIN_FUNCTION(ddr_pxi2),
+ MSM_PIN_FUNCTION(ddr_pxi3),
+ MSM_PIN_FUNCTION(ddr_pxi4),
+ MSM_PIN_FUNCTION(ddr_pxi5),
+ MSM_PIN_FUNCTION(ddr_pxi6),
+ MSM_PIN_FUNCTION(ddr_pxi7),
+ MSM_PIN_FUNCTION(edp0_hot),
+ MSM_PIN_FUNCTION(edp0_lcd),
+ MSM_PIN_FUNCTION(edp1_hot),
+ MSM_PIN_FUNCTION(edp1_lcd),
+ MSM_PIN_FUNCTION(eusb0_ac),
+ MSM_PIN_FUNCTION(eusb1_ac),
+ MSM_PIN_FUNCTION(eusb2_ac),
+ MSM_PIN_FUNCTION(eusb3_ac),
+ MSM_PIN_FUNCTION(eusb5_ac),
+ MSM_PIN_FUNCTION(eusb6_ac),
+ MSM_PIN_FUNCTION(gcc_gp1),
+ MSM_PIN_FUNCTION(gcc_gp2),
+ MSM_PIN_FUNCTION(gcc_gp3),
+ MSM_PIN_FUNCTION(i2s0_data0),
+ MSM_PIN_FUNCTION(i2s0_data1),
+ MSM_PIN_FUNCTION(i2s0_sck),
+ MSM_PIN_FUNCTION(i2s0_ws),
+ MSM_PIN_FUNCTION(i2s1_data0),
+ MSM_PIN_FUNCTION(i2s1_data1),
+ MSM_PIN_FUNCTION(i2s1_sck),
+ MSM_PIN_FUNCTION(i2s1_ws),
+ MSM_PIN_FUNCTION(ibi_i3c),
+ MSM_PIN_FUNCTION(jitter_bist),
+ MSM_PIN_FUNCTION(mdp_vsync0),
+ MSM_PIN_FUNCTION(mdp_vsync1),
+ MSM_PIN_FUNCTION(mdp_vsync2),
+ MSM_PIN_FUNCTION(mdp_vsync3),
+ MSM_PIN_FUNCTION(mdp_vsync4),
+ MSM_PIN_FUNCTION(mdp_vsync5),
+ MSM_PIN_FUNCTION(mdp_vsync6),
+ MSM_PIN_FUNCTION(mdp_vsync7),
+ MSM_PIN_FUNCTION(mdp_vsync8),
+ MSM_PIN_FUNCTION(pcie3_clk),
+ MSM_PIN_FUNCTION(pcie4_clk),
+ MSM_PIN_FUNCTION(pcie5_clk),
+ MSM_PIN_FUNCTION(pcie6a_clk),
+ MSM_PIN_FUNCTION(pcie6b_clk),
+ MSM_PIN_FUNCTION(phase_flag),
+ MSM_PIN_FUNCTION(pll_bist),
+ MSM_PIN_FUNCTION(pll_clk),
+ MSM_PIN_FUNCTION(prng_rosc0),
+ MSM_PIN_FUNCTION(prng_rosc1),
+ MSM_PIN_FUNCTION(prng_rosc2),
+ MSM_PIN_FUNCTION(prng_rosc3),
+ MSM_PIN_FUNCTION(qdss_cti),
+ MSM_PIN_FUNCTION(qdss_gpio),
+ MSM_PIN_FUNCTION(qspi00),
+ MSM_PIN_FUNCTION(qspi01),
+ MSM_PIN_FUNCTION(qspi02),
+ MSM_PIN_FUNCTION(qspi03),
+ MSM_PIN_FUNCTION(qspi0_clk),
+ MSM_PIN_FUNCTION(qspi0_cs0),
+ MSM_PIN_FUNCTION(qspi0_cs1),
+ MSM_PIN_FUNCTION(qup0_se0),
+ MSM_PIN_FUNCTION(qup0_se1),
+ MSM_PIN_FUNCTION(qup0_se2),
+ MSM_PIN_FUNCTION(qup0_se3),
+ MSM_PIN_FUNCTION(qup0_se4),
+ MSM_PIN_FUNCTION(qup0_se5),
+ MSM_PIN_FUNCTION(qup0_se6),
+ MSM_PIN_FUNCTION(qup0_se7),
+ MSM_PIN_FUNCTION(qup1_se0),
+ MSM_PIN_FUNCTION(qup1_se1),
+ MSM_PIN_FUNCTION(qup1_se2),
+ MSM_PIN_FUNCTION(qup1_se3),
+ MSM_PIN_FUNCTION(qup1_se4),
+ MSM_PIN_FUNCTION(qup1_se5),
+ MSM_PIN_FUNCTION(qup1_se6),
+ MSM_PIN_FUNCTION(qup1_se7),
+ MSM_PIN_FUNCTION(qup2_se0),
+ MSM_PIN_FUNCTION(qup2_se1),
+ MSM_PIN_FUNCTION(qup2_se2),
+ MSM_PIN_FUNCTION(qup2_se3),
+ MSM_PIN_FUNCTION(qup2_se4),
+ MSM_PIN_FUNCTION(qup2_se5),
+ MSM_PIN_FUNCTION(qup2_se6),
+ MSM_PIN_FUNCTION(qup2_se7),
+ MSM_PIN_FUNCTION(sd_write),
+ MSM_PIN_FUNCTION(sdc4_clk),
+ MSM_PIN_FUNCTION(sdc4_cmd),
+ MSM_PIN_FUNCTION(sdc4_data0),
+ MSM_PIN_FUNCTION(sdc4_data1),
+ MSM_PIN_FUNCTION(sdc4_data2),
+ MSM_PIN_FUNCTION(sdc4_data3),
+ MSM_PIN_FUNCTION(sys_throttle),
+ MSM_PIN_FUNCTION(tb_trig),
+ MSM_PIN_FUNCTION(tgu_ch0),
+ MSM_PIN_FUNCTION(tgu_ch1),
+ MSM_PIN_FUNCTION(tgu_ch2),
+ MSM_PIN_FUNCTION(tgu_ch3),
+ MSM_PIN_FUNCTION(tgu_ch4),
+ MSM_PIN_FUNCTION(tgu_ch5),
+ MSM_PIN_FUNCTION(tgu_ch6),
+ MSM_PIN_FUNCTION(tgu_ch7),
+ MSM_PIN_FUNCTION(tmess_prng0),
+ MSM_PIN_FUNCTION(tmess_prng1),
+ MSM_PIN_FUNCTION(tmess_prng2),
+ MSM_PIN_FUNCTION(tmess_prng3),
+ MSM_PIN_FUNCTION(tsense_pwm1),
+ MSM_PIN_FUNCTION(tsense_pwm2),
+ MSM_PIN_FUNCTION(tsense_pwm3),
+ MSM_PIN_FUNCTION(tsense_pwm4),
+ MSM_PIN_FUNCTION(usb0_dp),
+ MSM_PIN_FUNCTION(usb0_phy),
+ MSM_PIN_FUNCTION(usb0_sbrx),
+ MSM_PIN_FUNCTION(usb0_sbtx),
+ MSM_PIN_FUNCTION(usb1_dp),
+ MSM_PIN_FUNCTION(usb1_phy),
+ MSM_PIN_FUNCTION(usb1_sbrx),
+ MSM_PIN_FUNCTION(usb1_sbtx),
+ MSM_PIN_FUNCTION(usb2_dp),
+ MSM_PIN_FUNCTION(usb2_phy),
+ MSM_PIN_FUNCTION(usb2_sbrx),
+ MSM_PIN_FUNCTION(usb2_sbtx),
+ MSM_PIN_FUNCTION(vsense_trigger),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup x1e80100_groups[] = {
+ [0] = PINGROUP(0, qup0_se0, ibi_i3c, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup0_se0, ibi_i3c, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup0_se0, _, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup0_se0, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, qup0_se1, _, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup0_se1, _, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup0_se1, phase_flag, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup0_se1, phase_flag, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, qup0_se2, phase_flag, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qup0_se2, _, atest_usb, ddr_pxi0, _, _, _, _, _),
+ [10] = PINGROUP(10, qup0_se2, _, atest_usb, ddr_pxi1, _, _, _, _, _),
+ [11] = PINGROUP(11, qup0_se2, phase_flag, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _),
+ [13] = PINGROUP(13, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup0_se4, phase_flag, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup0_se4, qup0_se2, phase_flag, _, _, _, _, _, _),
+ [18] = PINGROUP(18, qup0_se4, qup0_se2, phase_flag, _, qdss_cti, _, _, _, _),
+ [19] = PINGROUP(19, qup0_se4, qup0_se2, phase_flag, _, qdss_cti, _, _, _, _),
+ [20] = PINGROUP(20, qup0_se5, _, phase_flag, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup0_se5, qup0_se3, _, phase_flag, _, _, _, _, _),
+ [22] = PINGROUP(22, qup0_se5, qup0_se3, _, phase_flag, _, _, _, _, _),
+ [23] = PINGROUP(23, qup0_se5, qup0_se3, phase_flag, _, qdss_cti, _, _, _, _),
+ [24] = PINGROUP(24, qup0_se6, phase_flag, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup0_se6, phase_flag, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup0_se6, phase_flag, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup0_se6, phase_flag, _, qdss_cti, _, _, _, _, _),
+ [28] = PINGROUP(28, pll_bist, _, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, _, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, _, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, _, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup1_se0, ibi_i3c, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup1_se0, ibi_i3c, qup1_se3, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup1_se0, qup1_se3, tsense_pwm1, tsense_pwm2, tsense_pwm3, tsense_pwm4, _, _, _),
+ [35] = PINGROUP(35, qup1_se0, qup1_se3, pll_clk, atest_usb, _, _, _, _, _),
+ [36] = PINGROUP(36, qup1_se1, ibi_i3c, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup1_se1, ibi_i3c, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup1_se1, vsense_trigger, atest_usb, ddr_pxi0, _, _, _, _, _),
+ [39] = PINGROUP(39, qup1_se1, sys_throttle, phase_flag, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup1_se2, phase_flag, _, _, _, _, _, _, _),
+ [41] = PINGROUP(41, qup1_se2, atest_usb, ddr_pxi1, _, _, _, _, _, _),
+ [42] = PINGROUP(42, qup1_se2, jitter_bist, atest_usb, ddr_pxi2, _, _, _, _, _),
+ [43] = PINGROUP(43, qup1_se2, _, atest_usb, ddr_pxi2, _, _, _, _, _),
+ [44] = PINGROUP(44, qup1_se3, _, atest_usb, ddr_pxi3, _, _, _, _, _),
+ [45] = PINGROUP(45, qup1_se3, cmu_rng3, _, atest_usb, ddr_pxi3, _, _, _, _),
+ [46] = PINGROUP(46, qup1_se3, cmu_rng2, _, atest_usb, ddr_pxi4, _, _, _, _),
+ [47] = PINGROUP(47, qup1_se3, cmu_rng1, _, atest_usb, ddr_pxi4, _, _, _, _),
+ [48] = PINGROUP(48, qup1_se4, cmu_rng0, _, atest_usb, ddr_pxi5, _, _, _, _),
+ [49] = PINGROUP(49, qup1_se4, qup1_se2, _, atest_usb, ddr_pxi5, _, _, _, _),
+ [50] = PINGROUP(50, qup1_se4, qup1_se2, _, atest_usb, ddr_pxi6, _, _, _, _),
+ [51] = PINGROUP(51, qup1_se4, qup1_se2, dbg_out, atest_usb, ddr_pxi6, _, _, _, _),
+ [52] = PINGROUP(52, qup1_se5, qup1_se7, atest_usb, ddr_pxi7, _, _, _, _, _),
+ [53] = PINGROUP(53, qup1_se5, qup1_se7, _, atest_usb, ddr_pxi7, _, _, _, _),
+ [54] = PINGROUP(54, qup1_se5, qup1_se7, ddr_bist, atest_usb, _, _, _, _, _),
+ [55] = PINGROUP(55, qup1_se5, qup1_se7, ddr_bist, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup1_se6, ddr_bist, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup1_se6, ddr_bist, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup1_se6, atest_usb, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qup1_se6, atest_usb, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, aoss_cti, _, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, aoss_cti, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, aoss_cti, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, aoss_cti, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, qup2_se0, gcc_gp2, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, qup2_se0, qup2_se3, tgu_ch1, atest_usb, _, _, _, _, _),
+ [66] = PINGROUP(66, qup2_se0, qup2_se3, tgu_ch2, atest_usb, _, _, _, _, _),
+ [67] = PINGROUP(67, qup2_se0, qup2_se3, tgu_ch3, atest_usb, _, _, _, _, _),
+ [68] = PINGROUP(68, qup2_se1, ibi_i3c, tgu_ch4, _, _, _, _, _, _),
+ [69] = PINGROUP(69, qup2_se1, ibi_i3c, tgu_ch5, _, _, _, _, _, _),
+ [70] = PINGROUP(70, qup2_se1, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, qup2_se1, gcc_gp1, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qup2_se2, gcc_gp1, atest_usb, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qup2_se2, gcc_gp2, atest_usb, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qup2_se2, gcc_gp3, atest_usb, _, _, _, _, _, _),
+ [75] = PINGROUP(75, qup2_se2, atest_usb, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, qup2_se3, phase_flag, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, qup2_se3, phase_flag, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, qup2_se3, phase_flag, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, qup2_se3, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, qup2_se4, tgu_ch7, atest_usb, _, _, _, _, _, _),
+ [81] = PINGROUP(81, qup2_se4, qup2_se2, tgu_ch0, atest_usb, _, _, _, _, _),
+ [82] = PINGROUP(82, qup2_se4, qup2_se2, gcc_gp3, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qup2_se4, qup2_se2, tgu_ch6, atest_usb, _, _, _, _, _),
+ [84] = PINGROUP(84, qup2_se5, qup2_se7, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, qup2_se5, qup2_se7, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, qup2_se5, qup2_se7, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, qup2_se5, qup2_se7, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, qup2_se6, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, qup2_se6, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, qup2_se6, _, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, qup2_se6, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, tmess_prng0, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, tmess_prng1, _, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, sys_throttle, tmess_prng2, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, tmess_prng3, _, _, _, _, _, _, _, _),
+ [96] = PINGROUP(96, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, cam_aon, qdss_gpio, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, qdss_gpio, _, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, qdss_gpio, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, cci_timer0, mdp_vsync4, qdss_gpio, _, _, _, _, _, _),
+ [110] = PINGROUP(110, cci_timer1, mdp_vsync5, qdss_gpio, _, _, _, _, _, _),
+ [111] = PINGROUP(111, cci_timer2, cci_async, mdp_vsync6, qdss_gpio, _, _, _, _, _),
+ [112] = PINGROUP(112, cci_timer3, cci_async, mdp_vsync7, qdss_gpio, _, _, _, _, _),
+ [113] = PINGROUP(113, cci_timer4, cci_async, mdp_vsync8, qdss_gpio, _, _, _, _, _),
+ [114] = PINGROUP(114, mdp_vsync0, mdp_vsync1, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, mdp_vsync3, mdp_vsync2, edp1_lcd, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, _, _, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, edp0_hot, edp1_lcd, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, edp1_hot, edp0_lcd, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, usb0_phy, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, usb0_dp, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, usb1_phy, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, usb1_dp, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, usb2_phy, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, usb2_dp, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, qspi0_clk, sdc4_clk, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, qspi00, sdc4_data0, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, qspi01, sdc4_data1, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, qspi02, sdc4_data2, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, qspi03, sdc4_data3, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, qspi0_cs0, sdc4_cmd, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, qspi0_cs1, tb_trig, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, audio_ext, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, i2s0_sck, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, i2s0_data0, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, i2s0_data1, tb_trig, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, i2s0_ws, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, i2s1_sck, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, i2s1_data0, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, i2s1_ws, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, i2s1_data1, audio_ext, audio_ref, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, pcie3_clk, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, pcie4_clk, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, pcie5_clk, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, pcie6a_clk, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, pcie6b_clk, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, _, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, _, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, _, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, RESOUT_GPIO, _, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, qdss_cti, _, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, sd_write, qdss_cti, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, usb0_sbrx, _, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, usb0_sbtx, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, usb0_sbtx, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, eusb0_ac, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, eusb3_ac, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, usb1_sbrx, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, usb1_sbtx, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, usb1_sbtx, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, eusb1_ac, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, eusb6_ac, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _),
+ [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _),
+ [181] = PINGROUP(181, usb2_sbrx, prng_rosc3, phase_flag, _, atest_char, _, _, _, _),
+ [182] = PINGROUP(182, usb2_sbtx, prng_rosc2, phase_flag, _, atest_char3, _, _, _, _),
+ [183] = PINGROUP(183, usb2_sbtx, _, _, _, _, _, _, _, _),
+ [184] = PINGROUP(184, phase_flag, _, atest_char1, _, _, _, _, _, _),
+ [185] = PINGROUP(185, phase_flag, _, atest_char0, _, _, _, _, _, _),
+ [186] = PINGROUP(186, eusb2_ac, prng_rosc0, phase_flag, _, _, _, _, _, _),
+ [187] = PINGROUP(187, eusb5_ac, cri_trng, phase_flag, _, _, _, _, _, _),
+ [188] = PINGROUP(188, prng_rosc1, phase_flag, _, atest_char2, _, _, _, _, _),
+ [189] = PINGROUP(189, _, _, _, _, _, _, _, _, _),
+ [190] = PINGROUP(190, _, _, _, _, _, _, _, _, _),
+ [191] = PINGROUP(191, _, _, _, _, _, _, _, _, _),
+ [192] = PINGROUP(192, _, _, _, _, _, _, _, _, _),
+ [193] = PINGROUP(193, _, _, _, _, _, _, _, _, _),
+ [194] = PINGROUP(194, _, _, _, _, _, _, _, _, _),
+ [195] = PINGROUP(195, _, _, _, _, _, _, _, _, _),
+ [196] = PINGROUP(196, _, _, _, _, _, _, _, _, _),
+ [197] = PINGROUP(197, _, _, _, _, _, _, _, _, _),
+ [198] = PINGROUP(198, _, _, _, _, _, _, _, _, _),
+ [199] = PINGROUP(199, _, _, _, _, _, _, _, _, _),
+ [200] = PINGROUP(200, _, _, _, _, _, _, _, _, _),
+ [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _),
+ [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _),
+ [203] = PINGROUP(203, _, _, _, _, _, _, _, _, _),
+ [204] = PINGROUP(204, _, _, _, _, _, _, _, _, _),
+ [205] = PINGROUP(205, _, _, _, _, _, _, _, _, _),
+ [206] = PINGROUP(206, _, _, _, _, _, _, _, _, _),
+ [207] = PINGROUP(207, _, _, _, _, _, _, _, _, _),
+ [208] = PINGROUP(208, _, _, _, _, _, _, _, _, _),
+ [209] = PINGROUP(209, _, _, _, _, _, _, _, _, _),
+ [210] = PINGROUP(210, _, _, _, _, _, _, _, _, _),
+ [211] = PINGROUP(211, _, _, _, _, _, _, _, _, _),
+ [212] = PINGROUP(212, _, _, _, _, _, _, _, _, _),
+ [213] = PINGROUP(213, _, _, _, _, _, _, _, _, _),
+ [214] = PINGROUP(214, _, _, _, _, _, _, _, _, _),
+ [215] = PINGROUP(215, _, qdss_cti, _, _, _, _, _, _, _),
+ [216] = PINGROUP(216, _, _, _, _, _, _, _, _, _),
+ [217] = PINGROUP(217, _, qdss_cti, _, _, _, _, _, _, _),
+ [218] = PINGROUP(218, _, _, _, _, _, _, _, _, _),
+ [219] = PINGROUP(219, _, qdss_gpio, _, _, _, _, _, _, _),
+ [220] = PINGROUP(220, _, qdss_gpio, _, _, _, _, _, _, _),
+ [221] = PINGROUP(221, _, qdss_gpio, _, _, _, _, _, _, _),
+ [222] = PINGROUP(222, _, qdss_gpio, _, _, _, _, _, _, _),
+ [223] = PINGROUP(223, _, qdss_gpio, _, _, _, _, _, _, _),
+ [224] = PINGROUP(224, _, qdss_gpio, _, _, _, _, _, _, _),
+ [225] = PINGROUP(225, _, qdss_gpio, _, _, _, _, _, _, _),
+ [226] = PINGROUP(226, _, qdss_gpio, _, _, _, _, _, _, _),
+ [227] = PINGROUP(227, _, qdss_gpio, _, _, _, _, _, _, _),
+ [228] = PINGROUP(228, _, qdss_gpio, _, _, _, _, _, _, _),
+ [229] = PINGROUP(229, qdss_gpio, _, _, _, _, _, _, _, _),
+ [230] = PINGROUP(230, qdss_gpio, _, _, _, _, _, _, _, _),
+ [231] = PINGROUP(231, qdss_gpio, _, _, _, _, _, _, _, _),
+ [232] = PINGROUP(232, qdss_gpio, _, _, _, _, _, _, _, _),
+ [233] = PINGROUP(233, qdss_gpio, _, _, _, _, _, _, _, _),
+ [234] = PINGROUP(234, qdss_gpio, _, _, _, _, _, _, _, _),
+ [235] = PINGROUP(235, aon_cci, qdss_gpio, _, _, _, _, _, _, _),
+ [236] = PINGROUP(236, aon_cci, qdss_gpio, _, _, _, _, _, _, _),
+ [237] = PINGROUP(237, _, _, _, _, _, _, _, _, _),
+ [238] = UFS_RESET(ufs_reset, 0x1f9000),
+ [239] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1f2000, 14, 6),
+ [240] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1f2000, 11, 3),
+ [241] = SDC_QDSD_PINGROUP(sdc2_data, 0x1f2000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map x1e80100_pdc_map[] = {
+ { 0, 72 }, { 2, 70 }, { 3, 71 }, { 6, 123 }, { 7, 67 }, { 11, 85 },
+ { 15, 68 }, { 18, 122 }, { 19, 69 }, { 21, 158 }, { 23, 143 }, { 26, 129 },
+ { 27, 144 }, { 28, 77 }, { 29, 78 }, { 30, 92 }, { 32, 145 }, { 33, 115 },
+ { 34, 130 }, { 35, 146 }, { 36, 147 }, { 39, 80 }, { 43, 148 }, { 47, 149 },
+ { 51, 79 }, { 53, 89 }, { 59, 87 }, { 64, 90 }, { 65, 106 }, { 66, 142 },
+ { 67, 88 }, { 71, 91 }, { 75, 152 }, { 79, 153 }, { 80, 125 }, { 81, 128 },
+ { 84, 137 }, { 85, 155 }, { 87, 156 }, { 91, 157 }, { 92, 138 }, { 94, 140 },
+ { 95, 141 }, { 113, 84 }, { 121, 73 }, { 123, 74 }, { 129, 76 }, { 131, 82 },
+ { 134, 83 }, { 141, 93 }, { 144, 94 }, { 147, 96 }, { 148, 97 }, { 150, 102 },
+ { 151, 103 }, { 153, 104 }, { 156, 105 }, { 157, 107 }, { 163, 98 }, { 166, 112 },
+ { 172, 99 }, { 181, 101 }, { 184, 116 }, { 193, 40 }, { 193, 117 }, { 196, 108 },
+ { 203, 133 }, { 212, 120 }, { 213, 150 }, { 214, 121 }, { 215, 118 }, { 217, 109 },
+ { 220, 110 }, { 221, 111 }, { 222, 124 }, { 224, 131 }, { 225, 132 },
+};
+
+static const struct msm_pinctrl_soc_data x1e80100_pinctrl = {
+ .pins = x1e80100_pins,
+ .npins = ARRAY_SIZE(x1e80100_pins),
+ .functions = x1e80100_functions,
+ .nfunctions = ARRAY_SIZE(x1e80100_functions),
+ .groups = x1e80100_groups,
+ .ngroups = ARRAY_SIZE(x1e80100_groups),
+ .ngpios = 239,
+ .wakeirq_map = x1e80100_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(x1e80100_pdc_map),
+ .egpio_func = 9,
+};
+
+static int x1e80100_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &x1e80100_pinctrl);
+}
+
+static const struct of_device_id x1e80100_pinctrl_of_match[] = {
+ { .compatible = "qcom,x1e80100-tlmm", },
+ { },
+};
+
+static struct platform_driver x1e80100_pinctrl_driver = {
+ .driver = {
+ .name = "x1e80100-tlmm",
+ .of_match_table = x1e80100_pinctrl_of_match,
+ },
+ .probe = x1e80100_pinctrl_probe,
+ .remove_new = msm_pinctrl_remove,
+};
+
+static int __init x1e80100_pinctrl_init(void)
+{
+ return platform_driver_register(&x1e80100_pinctrl_driver);
+}
+arch_initcall(x1e80100_pinctrl_init);
+
+static void __exit x1e80100_pinctrl_exit(void)
+{
+ platform_driver_unregister(&x1e80100_pinctrl_driver);
+}
+module_exit(x1e80100_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI X1E80100 TLMM pinctrl driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, x1e80100_pinctrl_of_match);
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index d1e92bbed..757bbc549 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -909,9 +909,11 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname,
sh_pfc_err("reg 0x%x: var_field_width declares %u instead of %u bits\n",
cfg_reg->reg, rw, cfg_reg->reg_width);
- if (n != cfg_reg->nr_enum_ids)
+ if (n != cfg_reg->nr_enum_ids) {
sh_pfc_err("reg 0x%x: enum_ids[] has %u instead of %u values\n",
cfg_reg->reg, cfg_reg->nr_enum_ids, n);
+ n = cfg_reg->nr_enum_ids;
+ }
check_enum_ids:
sh_pfc_check_reg_enums(drvname, cfg_reg->reg, cfg_reg->enum_ids, n);
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index f43f1196f..edcbe7c9a 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -1131,7 +1131,7 @@ static int rza1_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
return -EINVAL;
mux_confs = (struct rza1_mux_conf *)func->data;
- for (i = 0; i < grp->num_pins; ++i) {
+ for (i = 0; i < grp->grp.npins; ++i) {
int ret;
ret = rza1_pin_mux_single(rza1_pctl, &mux_confs[i]);
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index 990b96d45..af689d7c1 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -447,15 +447,15 @@ static int rza2_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
psel_val = func->data;
- for (i = 0; i < grp->num_pins; ++i) {
+ for (i = 0; i < grp->grp.npins; ++i) {
dev_dbg(priv->dev, "Setting P%c_%d to PSEL=%d\n",
- port_names[RZA2_PIN_ID_TO_PORT(grp->pins[i])],
- RZA2_PIN_ID_TO_PIN(grp->pins[i]),
+ port_names[RZA2_PIN_ID_TO_PORT(grp->grp.pins[i])],
+ RZA2_PIN_ID_TO_PIN(grp->grp.pins[i]),
psel_val[i]);
rza2_set_pin_function(
priv->base,
- RZA2_PIN_ID_TO_PORT(grp->pins[i]),
- RZA2_PIN_ID_TO_PIN(grp->pins[i]),
+ RZA2_PIN_ID_TO_PORT(grp->grp.pins[i]),
+ RZA2_PIN_ID_TO_PIN(grp->grp.pins[i]),
psel_val[i]);
}
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 9de350ad7..01ef6921c 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -57,6 +57,7 @@
#define PIN_CFG_FILCLKSEL BIT(12)
#define PIN_CFG_IOLH_C BIT(13)
#define PIN_CFG_SOFT_PS BIT(14)
+#define PIN_CFG_OEN BIT(15)
#define RZG2L_MPXED_COMMON_PIN_FUNCS(group) \
(PIN_CFG_IOLH_##group | \
@@ -107,8 +108,11 @@
#define IEN(off) (0x1800 + (off) * 8)
#define ISEL(off) (0x2C00 + (off) * 8)
#define SD_CH(off, ch) ((off) + (ch) * 4)
+#define ETH_POC(off, ch) ((off) + (ch) * 4)
#define QSPI (0x3008)
+#define ETH_MODE (0x3018)
+#define PVDD_2500 2 /* I/O domain voltage 2.5V */
#define PVDD_1800 1 /* I/O domain voltage <= 1.8V */
#define PVDD_3300 0 /* I/O domain voltage >= 3.3V */
@@ -116,7 +120,6 @@
#define PWPR_PFCWE BIT(6) /* PFC Register Write Enable */
#define PM_MASK 0x03
-#define PVDD_MASK 0x01
#define PFC_MASK 0x07
#define IEN_MASK 0x01
#define IOLH_MASK 0x03
@@ -135,10 +138,12 @@
* struct rzg2l_register_offsets - specific register offsets
* @pwpr: PWPR register offset
* @sd_ch: SD_CH register offset
+ * @eth_poc: ETH_POC register offset
*/
struct rzg2l_register_offsets {
u16 pwpr;
u16 sd_ch;
+ u16 eth_poc;
};
/**
@@ -167,6 +172,8 @@ enum rzg2l_iolh_index {
* @iolh_groupb_oi: IOLH group B output impedance specific values
* @drive_strength_ua: drive strength in uA is supported (otherwise mA is supported)
* @func_base: base number for port function (see register PFC)
+ * @oen_max_pin: the maximum pin number supporting output enable
+ * @oen_max_port: the maximum port number supporting output enable
*/
struct rzg2l_hwcfg {
const struct rzg2l_register_offsets regs;
@@ -176,6 +183,8 @@ struct rzg2l_hwcfg {
u16 iolh_groupb_oi[4];
bool drive_strength_ua;
u8 func_base;
+ u8 oen_max_pin;
+ u8 oen_max_port;
};
struct rzg2l_dedicated_configs {
@@ -273,7 +282,7 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *group;
- int *pins;
+ const unsigned int *pins;
func = pinmux_generic_get_function(pctldev, func_selector);
if (!func)
@@ -283,9 +292,9 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
psel_val = func->data;
- pins = group->pins;
+ pins = group->grp.pins;
- for (i = 0; i < group->num_pins; i++) {
+ for (i = 0; i < group->grp.npins; i++) {
unsigned int *pin_data = pctrl->desc.pins[pins[i]].drv_data;
u32 off = RZG2L_PIN_CFG_TO_PORT_OFFSET(*pin_data);
u32 pin = RZG2L_PIN_ID_TO_PIN(pins[i]);
@@ -376,8 +385,11 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
goto done;
}
- if (num_pinmux)
+ if (num_pinmux) {
nmaps += 1;
+ if (num_configs)
+ nmaps += 1;
+ }
if (num_pins)
nmaps += num_pins;
@@ -435,6 +447,16 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
name = np->name;
}
+ if (num_configs) {
+ ret = rzg2l_map_add_config(&maps[idx], name,
+ PIN_MAP_TYPE_CONFIGS_GROUP,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+
mutex_lock(&pctrl->mutex);
/* Register a single pin group listing all the pins we read from DT */
@@ -591,6 +613,10 @@ static int rzg2l_caps_to_pwr_reg(const struct rzg2l_register_offsets *regs, u32
return SD_CH(regs->sd_ch, 0);
if (caps & PIN_CFG_IO_VMC_SD1)
return SD_CH(regs->sd_ch, 1);
+ if (caps & PIN_CFG_IO_VMC_ETH0)
+ return ETH_POC(regs->eth_poc, 0);
+ if (caps & PIN_CFG_IO_VMC_ETH1)
+ return ETH_POC(regs->eth_poc, 1);
if (caps & PIN_CFG_IO_VMC_QSPI)
return QSPI;
@@ -602,6 +628,7 @@ static int rzg2l_get_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
const struct rzg2l_register_offsets *regs = &hwcfg->regs;
int pwr_reg;
+ u8 val;
if (caps & PIN_CFG_SOFT_PS)
return pctrl->settings[pin].power_source;
@@ -610,7 +637,18 @@ static int rzg2l_get_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
if (pwr_reg < 0)
return pwr_reg;
- return (readl(pctrl->base + pwr_reg) & PVDD_MASK) ? 1800 : 3300;
+ val = readb(pctrl->base + pwr_reg);
+ switch (val) {
+ case PVDD_1800:
+ return 1800;
+ case PVDD_2500:
+ return 2500;
+ case PVDD_3300:
+ return 3300;
+ default:
+ /* Should not happen. */
+ return -EINVAL;
+ }
}
static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps, u32 ps)
@@ -618,17 +656,32 @@ static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
const struct rzg2l_register_offsets *regs = &hwcfg->regs;
int pwr_reg;
+ u8 val;
if (caps & PIN_CFG_SOFT_PS) {
pctrl->settings[pin].power_source = ps;
return 0;
}
+ switch (ps) {
+ case 1800:
+ val = PVDD_1800;
+ break;
+ case 2500:
+ val = PVDD_2500;
+ break;
+ case 3300:
+ val = PVDD_3300;
+ break;
+ default:
+ return -EINVAL;
+ }
+
pwr_reg = rzg2l_caps_to_pwr_reg(regs, caps);
if (pwr_reg < 0)
return pwr_reg;
- writel((ps == 1800) ? PVDD_1800 : PVDD_3300, pctrl->base + pwr_reg);
+ writeb(val, pctrl->base + pwr_reg);
pctrl->settings[pin].power_source = ps;
return 0;
@@ -735,6 +788,66 @@ static bool rzg2l_ds_is_supported(struct rzg2l_pinctrl *pctrl, u32 caps,
return false;
}
+static bool rzg2l_oen_is_supported(u32 caps, u8 pin, u8 max_pin)
+{
+ if (!(caps & PIN_CFG_OEN))
+ return false;
+
+ if (pin > max_pin)
+ return false;
+
+ return true;
+}
+
+static u8 rzg2l_pin_to_oen_bit(u32 offset, u8 pin, u8 max_port)
+{
+ if (pin)
+ pin *= 2;
+
+ if (offset / RZG2L_PINS_PER_PORT == max_port)
+ pin += 1;
+
+ return pin;
+}
+
+static u32 rzg2l_read_oen(struct rzg2l_pinctrl *pctrl, u32 caps, u32 offset, u8 pin)
+{
+ u8 max_port = pctrl->data->hwcfg->oen_max_port;
+ u8 max_pin = pctrl->data->hwcfg->oen_max_pin;
+ u8 bit;
+
+ if (!rzg2l_oen_is_supported(caps, pin, max_pin))
+ return 0;
+
+ bit = rzg2l_pin_to_oen_bit(offset, pin, max_port);
+
+ return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
+}
+
+static int rzg2l_write_oen(struct rzg2l_pinctrl *pctrl, u32 caps, u32 offset, u8 pin, u8 oen)
+{
+ u8 max_port = pctrl->data->hwcfg->oen_max_port;
+ u8 max_pin = pctrl->data->hwcfg->oen_max_pin;
+ unsigned long flags;
+ u8 val, bit;
+
+ if (!rzg2l_oen_is_supported(caps, pin, max_pin))
+ return -EINVAL;
+
+ bit = rzg2l_pin_to_oen_bit(offset, pin, max_port);
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ val = readb(pctrl->base + ETH_MODE);
+ if (oen)
+ val &= ~BIT(bit);
+ else
+ val |= BIT(bit);
+ writeb(val, pctrl->base + ETH_MODE);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+
+ return 0;
+}
+
static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int _pin,
unsigned long *config)
@@ -772,6 +885,12 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
return -EINVAL;
break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ arg = rzg2l_read_oen(pctrl, cfg, _pin, bit);
+ if (!arg)
+ return -EINVAL;
+ break;
+
case PIN_CONFIG_POWER_SOURCE:
ret = rzg2l_get_power_source(pctrl, _pin, cfg);
if (ret < 0)
@@ -842,7 +961,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
struct rzg2l_pinctrl_pin_settings settings = pctrl->settings[_pin];
unsigned int *pin_data = pin->drv_data;
enum pin_config_param param;
- unsigned int i;
+ unsigned int i, arg, index;
u32 cfg, off;
int ret;
u8 bit;
@@ -864,24 +983,28 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(_configs[i]);
switch (param) {
- case PIN_CONFIG_INPUT_ENABLE: {
- unsigned int arg =
- pinconf_to_config_argument(_configs[i]);
+ case PIN_CONFIG_INPUT_ENABLE:
+ arg = pinconf_to_config_argument(_configs[i]);
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
rzg2l_rmw_pin_config(pctrl, IEN(off), bit, IEN_MASK, !!arg);
break;
- }
+
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ arg = pinconf_to_config_argument(_configs[i]);
+ ret = rzg2l_write_oen(pctrl, cfg, _pin, bit, !!arg);
+ if (ret)
+ return ret;
+ break;
case PIN_CONFIG_POWER_SOURCE:
settings.power_source = pinconf_to_config_argument(_configs[i]);
break;
- case PIN_CONFIG_DRIVE_STRENGTH: {
- unsigned int arg = pinconf_to_config_argument(_configs[i]);
- unsigned int index;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = pinconf_to_config_argument(_configs[i]);
if (!(cfg & PIN_CFG_IOLH_A) || hwcfg->drive_strength_ua)
return -EINVAL;
@@ -896,7 +1019,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, index);
break;
- }
case PIN_CONFIG_DRIVE_STRENGTH_UA:
if (!(cfg & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C)) ||
@@ -906,9 +1028,8 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
settings.drive_strength_ua = pinconf_to_config_argument(_configs[i]);
break;
- case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: {
- unsigned int arg = pinconf_to_config_argument(_configs[i]);
- unsigned int index;
+ case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS:
+ arg = pinconf_to_config_argument(_configs[i]);
if (!(cfg & PIN_CFG_IOLH_B) || !hwcfg->iolh_groupb_oi[0])
return -EINVAL;
@@ -922,7 +1043,6 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
rzg2l_rmw_pin_config(pctrl, IOLH(off), bit, IOLH_MASK, index);
break;
- }
default:
return -EOPNOTSUPP;
@@ -1323,7 +1443,8 @@ static const u32 r9a07g043_gpio_configs[] = {
static const u32 r9a08g045_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(4, 0x20, RZG3S_MPXED_PIN_FUNCS(A)), /* P0 */
RZG2L_GPIO_PORT_PACK(5, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
- PIN_CFG_IO_VMC_ETH0)), /* P1 */
+ PIN_CFG_IO_VMC_ETH0)) |
+ PIN_CFG_OEN | PIN_CFG_IEN, /* P1 */
RZG2L_GPIO_PORT_PACK(4, 0x31, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
PIN_CFG_IO_VMC_ETH0)), /* P2 */
RZG2L_GPIO_PORT_PACK(4, 0x32, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
@@ -1333,7 +1454,8 @@ static const u32 r9a08g045_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(5, 0x21, RZG3S_MPXED_PIN_FUNCS(A)), /* P5 */
RZG2L_GPIO_PORT_PACK(5, 0x22, RZG3S_MPXED_PIN_FUNCS(A)), /* P6 */
RZG2L_GPIO_PORT_PACK(5, 0x34, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
- PIN_CFG_IO_VMC_ETH1)), /* P7 */
+ PIN_CFG_IO_VMC_ETH1)) |
+ PIN_CFG_OEN | PIN_CFG_IEN, /* P7 */
RZG2L_GPIO_PORT_PACK(5, 0x35, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
PIN_CFG_IO_VMC_ETH1)), /* P8 */
RZG2L_GPIO_PORT_PACK(4, 0x36, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_C |
@@ -1576,6 +1698,7 @@ static const struct irq_chip rzg2l_gpio_irqchip = {
.irq_set_type = rzg2l_gpio_irq_set_type,
.irq_eoi = rzg2l_gpio_irqc_eoi,
.irq_print_chip = rzg2l_gpio_irq_print_chip,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
@@ -1877,6 +2000,7 @@ static const struct rzg2l_hwcfg rzg2l_hwcfg = {
.regs = {
.pwpr = 0x3014,
.sd_ch = 0x3000,
+ .eth_poc = 0x300c,
},
.iolh_groupa_ua = {
/* 3v3 power source */
@@ -1889,6 +2013,7 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
.regs = {
.pwpr = 0x3000,
.sd_ch = 0x3004,
+ .eth_poc = 0x3010,
},
.iolh_groupa_ua = {
/* 1v8 power source */
@@ -1912,6 +2037,8 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
},
.drive_strength_ua = true,
.func_base = 1,
+ .oen_max_pin = 1, /* Pin 1 of P0 and P7 is the maximum OEN pin. */
+ .oen_max_port = 7, /* P7_1 is the maximum OEN port. */
};
static struct rzg2l_pinctrl_data r9a07g043_data = {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 21d7d5ac8..0767a5ac2 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -165,7 +165,7 @@ static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *group;
- int *pins;
+ const unsigned int *pins;
func = pinmux_generic_get_function(pctldev, func_selector);
if (!func)
@@ -175,9 +175,9 @@ static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
psel_val = func->data;
- pins = group->pins;
+ pins = group->grp.pins;
- for (i = 0; i < group->num_pins; i++) {
+ for (i = 0; i < group->grp.npins; i++) {
dev_dbg(pctrl->dev, "port:%u pin: %u PSEL:%u\n",
RZV2M_PIN_ID_TO_PORT(pins[i]), RZV2M_PIN_ID_TO_PIN(pins[i]),
psel_val[i]);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index cb965cf93..5480e0884 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -726,6 +726,146 @@ const struct samsung_pinctrl_of_match_data exynosautov9_of_data __initconst = {
.num_ctrl = ARRAY_SIZE(exynosautov9_pin_ctrl),
};
+/* pin banks of exynosautov920 pin-controller 0 (ALIVE) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks0[] = {
+ EXYNOSV920_PIN_BANK_EINTW(8, 0x0000, "gpa0", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTW(2, 0x1000, "gpa1", 0x18, 0x20, 0x24),
+ EXYNOS850_PIN_BANK_EINTN(2, 0x2000, "gpq0"),
+};
+
+/* pin banks of exynosautov920 pin-controller 1 (AUD) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks1[] = {
+ EXYNOSV920_PIN_BANK_EINTG(7, 0x0000, "gpb0", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(6, 0x1000, "gpb1", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x2000, "gpb2", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x3000, "gpb3", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x4000, "gpb4", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(5, 0x5000, "gpb5", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(5, 0x6000, "gpb6", 0x18, 0x24, 0x28),
+};
+
+/* pin banks of exynosautov920 pin-controller 2 (HSI0) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks2[] = {
+ EXYNOSV920_PIN_BANK_EINTG(6, 0x0000, "gph0", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(2, 0x1000, "gph1", 0x18, 0x20, 0x24),
+};
+
+/* pin banks of exynosautov920 pin-controller 3 (HSI1) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks3[] = {
+ EXYNOSV920_PIN_BANK_EINTG(7, 0x000, "gph8", 0x18, 0x24, 0x28),
+};
+
+/* pin banks of exynosautov920 pin-controller 4 (HSI2) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks4[] = {
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x0000, "gph3", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(7, 0x1000, "gph4", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x2000, "gph5", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(7, 0x3000, "gph6", 0x18, 0x24, 0x28),
+};
+
+/* pin banks of exynosautov920 pin-controller 5 (HSI2UFS) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks5[] = {
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x000, "gph2", 0x18, 0x20, 0x24),
+};
+
+/* pin banks of exynosautov920 pin-controller 6 (PERIC0) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks6[] = {
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x0000, "gpp0", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x1000, "gpp1", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x2000, "gpp2", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(5, 0x3000, "gpg0", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x4000, "gpp3", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x5000, "gpp4", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x6000, "gpg2", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x7000, "gpg5", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(3, 0x8000, "gpg3", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(5, 0x9000, "gpg4", 0x18, 0x24, 0x28),
+};
+
+/* pin banks of exynosautov920 pin-controller 7 (PERIC1) */
+static const struct samsung_pin_bank_data exynosautov920_pin_banks7[] = {
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x0000, "gpp5", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(5, 0x1000, "gpp6", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x2000, "gpp10", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x3000, "gpp7", 0x18, 0x24, 0x28),
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x4000, "gpp8", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x5000, "gpp11", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x6000, "gpp9", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(4, 0x7000, "gpp12", 0x18, 0x20, 0x24),
+ EXYNOSV920_PIN_BANK_EINTG(8, 0x8000, "gpg1", 0x18, 0x24, 0x28),
+};
+
+static const struct samsung_retention_data exynosautov920_retention_data __initconst = {
+ .regs = NULL,
+ .nr_regs = 0,
+ .value = 0,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = {
+ {
+ /* pin-controller instance 0 ALIVE data */
+ .pin_banks = exynosautov920_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks0),
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynosautov920_retention_data,
+ }, {
+ /* pin-controller instance 1 AUD data */
+ .pin_banks = exynosautov920_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks1),
+ }, {
+ /* pin-controller instance 2 HSI0 data */
+ .pin_banks = exynosautov920_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 3 HSI1 data */
+ .pin_banks = exynosautov920_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 4 HSI2 data */
+ .pin_banks = exynosautov920_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 5 HSI2UFS data */
+ .pin_banks = exynosautov920_pin_banks5,
+ .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks5),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 6 PERIC0 data */
+ .pin_banks = exynosautov920_pin_banks6,
+ .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks6),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 7 PERIC1 data */
+ .pin_banks = exynosautov920_pin_banks7,
+ .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks7),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ },
+};
+
+const struct samsung_pinctrl_of_match_data exynosautov920_of_data __initconst = {
+ .ctrl = exynosautov920_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(exynosautov920_pin_ctrl),
+};
+
/*
* Pinctrl driver data for Tesla FSD SoC. FSD SoC includes three
* gpio/pin-mux/pinconfig controllers.
@@ -796,3 +936,143 @@ const struct samsung_pinctrl_of_match_data fsd_of_data __initconst = {
.ctrl = fsd_pin_ctrl,
.num_ctrl = ARRAY_SIZE(fsd_pin_ctrl),
};
+
+/* pin banks of gs101 pin-controller (ALIVE) */
+static const struct samsung_pin_bank_data gs101_pin_alive[] = {
+ EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00),
+ EXYNOS850_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04),
+ EXYNOS850_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08),
+ EXYNOS850_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c),
+ EXYNOS850_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10),
+ EXYNOS850_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14),
+ EXYNOS850_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18),
+ EXYNOS850_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c),
+};
+
+/* pin banks of gs101 pin-controller (FAR_ALIVE) */
+static const struct samsung_pin_bank_data gs101_pin_far_alive[] = {
+ EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00),
+ EXYNOS850_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08),
+ EXYNOS850_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c),
+};
+
+/* pin banks of gs101 pin-controller (GSACORE) */
+static const struct samsung_pin_bank_data gs101_pin_gsacore[] = {
+ EXYNOS850_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08),
+};
+
+/* pin banks of gs101 pin-controller (GSACTRL) */
+static const struct samsung_pin_bank_data gs101_pin_gsactrl[] = {
+ EXYNOS850_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00),
+};
+
+/* pin banks of gs101 pin-controller (PERIC0) */
+static const struct samsung_pin_bank_data gs101_pin_peric0[] = {
+ EXYNOS850_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10),
+ EXYNOS850_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14),
+ EXYNOS850_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18),
+ EXYNOS850_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c),
+};
+
+/* pin banks of gs101 pin-controller (PERIC1) */
+static const struct samsung_pin_bank_data gs101_pin_peric1[] = {
+ EXYNOS850_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10),
+ EXYNOS850_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14),
+ EXYNOS850_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18),
+ EXYNOS850_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c),
+};
+
+/* pin banks of gs101 pin-controller (HSI1) */
+static const struct samsung_pin_bank_data gs101_pin_hsi1[] = {
+ EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04),
+};
+
+/* pin banks of gs101 pin-controller (HSI2) */
+static const struct samsung_pin_bank_data gs101_pin_hsi2[] = {
+ EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08),
+};
+
+static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {
+ {
+ /* pin banks of gs101 pin-controller (ALIVE) */
+ .pin_banks = gs101_pin_alive,
+ .nr_banks = ARRAY_SIZE(gs101_pin_alive),
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin banks of gs101 pin-controller (FAR_ALIVE) */
+ .pin_banks = gs101_pin_far_alive,
+ .nr_banks = ARRAY_SIZE(gs101_pin_far_alive),
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin banks of gs101 pin-controller (GSACORE) */
+ .pin_banks = gs101_pin_gsacore,
+ .nr_banks = ARRAY_SIZE(gs101_pin_gsacore),
+ }, {
+ /* pin banks of gs101 pin-controller (GSACTRL) */
+ .pin_banks = gs101_pin_gsactrl,
+ .nr_banks = ARRAY_SIZE(gs101_pin_gsactrl),
+ }, {
+ /* pin banks of gs101 pin-controller (PERIC0) */
+ .pin_banks = gs101_pin_peric0,
+ .nr_banks = ARRAY_SIZE(gs101_pin_peric0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin banks of gs101 pin-controller (PERIC1) */
+ .pin_banks = gs101_pin_peric1,
+ .nr_banks = ARRAY_SIZE(gs101_pin_peric1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin banks of gs101 pin-controller (HSI1) */
+ .pin_banks = gs101_pin_hsi1,
+ .nr_banks = ARRAY_SIZE(gs101_pin_hsi1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin banks of gs101 pin-controller (HSI2) */
+ .pin_banks = gs101_pin_hsi2,
+ .nr_banks = ARRAY_SIZE(gs101_pin_hsi2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ },
+};
+
+const struct samsung_pinctrl_of_match_data gs101_of_data __initconst = {
+ .ctrl = gs101_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(gs101_pin_ctrl),
+};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 6b58ec84e..871c1eb46 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -52,10 +52,15 @@ static void exynos_irq_mask(struct irq_data *irqd)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
+ unsigned long reg_mask;
unsigned int mask;
unsigned long flags;
+ if (bank->eint_mask_offset)
+ reg_mask = bank->pctl_offset + bank->eint_mask_offset;
+ else
+ reg_mask = our_chip->eint_mask + bank->eint_offset;
+
raw_spin_lock_irqsave(&bank->slock, flags);
mask = readl(bank->eint_base + reg_mask);
@@ -70,7 +75,12 @@ static void exynos_irq_ack(struct irq_data *irqd)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- unsigned long reg_pend = our_chip->eint_pend + bank->eint_offset;
+ unsigned long reg_pend;
+
+ if (bank->eint_pend_offset)
+ reg_pend = bank->pctl_offset + bank->eint_pend_offset;
+ else
+ reg_pend = our_chip->eint_pend + bank->eint_offset;
writel(1 << irqd->hwirq, bank->eint_base + reg_pend);
}
@@ -80,7 +90,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
+ unsigned long reg_mask;
unsigned int mask;
unsigned long flags;
@@ -95,6 +105,11 @@ static void exynos_irq_unmask(struct irq_data *irqd)
if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK)
exynos_irq_ack(irqd);
+ if (bank->eint_mask_offset)
+ reg_mask = bank->pctl_offset + bank->eint_mask_offset;
+ else
+ reg_mask = our_chip->eint_mask + bank->eint_offset;
+
raw_spin_lock_irqsave(&bank->slock, flags);
mask = readl(bank->eint_base + reg_mask);
@@ -111,7 +126,7 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned int shift = EXYNOS_EINT_CON_LEN * irqd->hwirq;
unsigned int con, trig_type;
- unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
+ unsigned long reg_con;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
@@ -139,6 +154,11 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
else
irq_set_handler_locked(irqd, handle_level_irq);
+ if (bank->eint_con_offset)
+ reg_con = bank->pctl_offset + bank->eint_con_offset;
+ else
+ reg_con = our_chip->eint_con + bank->eint_offset;
+
con = readl(bank->eint_base + reg_con);
con &= ~(EXYNOS_EINT_CON_MASK << shift);
con |= trig_type << shift;
@@ -147,6 +167,19 @@ static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
return 0;
}
+static int exynos_irq_set_affinity(struct irq_data *irqd,
+ const struct cpumask *dest, bool force)
+{
+ struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
+ struct samsung_pinctrl_drv_data *d = bank->drvdata;
+ struct irq_data *parent = irq_get_irq_data(d->irq);
+
+ if (parent)
+ return parent->chip->irq_set_affinity(parent, dest, force);
+
+ return -EINVAL;
+}
+
static int exynos_irq_request_resources(struct irq_data *irqd)
{
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
@@ -212,6 +245,7 @@ static const struct exynos_irq_chip exynos_gpio_irq_chip __initconst = {
.irq_mask = exynos_irq_mask,
.irq_ack = exynos_irq_ack,
.irq_set_type = exynos_irq_set_type,
+ .irq_set_affinity = exynos_irq_set_affinity,
.irq_request_resources = exynos_irq_request_resources,
.irq_release_resources = exynos_irq_release_resources,
},
@@ -247,7 +281,10 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
unsigned int svc, group, pin;
int ret;
- svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET);
+ if (bank->eint_con_offset)
+ svc = readl(bank->eint_base + EXYNOSAUTO_SVC_OFFSET);
+ else
+ svc = readl(bank->eint_base + EXYNOS_SVC_OFFSET);
group = EXYNOS_SVC_GROUP(svc);
pin = svc & EXYNOS_SVC_NUM_MASK;
@@ -456,6 +493,22 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
.set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
+static const struct exynos_irq_chip exynosautov920_wkup_irq_chip __initconst = {
+ .chip = {
+ .name = "exynosautov920_wkup_irq_chip",
+ .irq_unmask = exynos_irq_unmask,
+ .irq_mask = exynos_irq_mask,
+ .irq_ack = exynos_irq_ack,
+ .irq_set_type = exynos_irq_set_type,
+ .irq_set_wake = exynos_wkup_irq_set_wake,
+ .irq_request_resources = exynos_irq_request_resources,
+ .irq_release_resources = exynos_irq_release_resources,
+ },
+ .eint_wake_mask_value = &eint_wake_mask_value,
+ .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
+};
+
/* list of external wakeup controllers supported */
static const struct of_device_id exynos_wkup_irq_ids[] = {
{ .compatible = "samsung,s5pv210-wakeup-eint",
@@ -468,6 +521,8 @@ static const struct of_device_id exynos_wkup_irq_ids[] = {
.data = &exynos7_wkup_irq_chip },
{ .compatible = "samsung,exynosautov9-wakeup-eint",
.data = &exynos7_wkup_irq_chip },
+ { .compatible = "samsung,exynosautov920-wakeup-eint",
+ .data = &exynosautov920_wkup_irq_chip },
{ }
};
@@ -638,7 +693,7 @@ static void exynos_pinctrl_suspend_bank(
struct samsung_pin_bank *bank)
{
struct exynos_eint_gpio_save *save = bank->soc_priv;
- void __iomem *regs = bank->eint_base;
+ const void __iomem *regs = bank->eint_base;
save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET
+ bank->eint_offset);
@@ -655,6 +710,19 @@ static void exynos_pinctrl_suspend_bank(
pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
+static void exynosauto_pinctrl_suspend_bank(struct samsung_pinctrl_drv_data *drvdata,
+ struct samsung_pin_bank *bank)
+{
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
+ const void __iomem *regs = bank->eint_base;
+
+ save->eint_con = readl(regs + bank->pctl_offset + bank->eint_con_offset);
+ save->eint_mask = readl(regs + bank->pctl_offset + bank->eint_mask_offset);
+
+ pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
+ pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
+}
+
void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_bank *bank = drvdata->pin_banks;
@@ -662,8 +730,12 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
int i;
for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
- if (bank->eint_type == EINT_TYPE_GPIO)
- exynos_pinctrl_suspend_bank(drvdata, bank);
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ if (bank->eint_con_offset)
+ exynosauto_pinctrl_suspend_bank(drvdata, bank);
+ else
+ exynos_pinctrl_suspend_bank(drvdata, bank);
+ }
else if (bank->eint_type == EINT_TYPE_WKUP) {
if (!irq_chip) {
irq_chip = bank->irq_chip;
@@ -704,14 +776,33 @@ static void exynos_pinctrl_resume_bank(
+ bank->eint_offset);
}
+static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvdata,
+ struct samsung_pin_bank *bank)
+{
+ struct exynos_eint_gpio_save *save = bank->soc_priv;
+ void __iomem *regs = bank->eint_base;
+
+ pr_debug("%s: con %#010x => %#010x\n", bank->name,
+ readl(regs + bank->pctl_offset + bank->eint_con_offset), save->eint_con);
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->pctl_offset + bank->eint_mask_offset), save->eint_mask);
+
+ writel(save->eint_con, regs + bank->pctl_offset + bank->eint_con_offset);
+ writel(save->eint_mask, regs + bank->pctl_offset + bank->eint_mask_offset);
+}
+
void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_bank *bank = drvdata->pin_banks;
int i;
for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
- if (bank->eint_type == EINT_TYPE_GPIO)
- exynos_pinctrl_resume_bank(drvdata, bank);
+ if (bank->eint_type == EINT_TYPE_GPIO) {
+ if (bank->eint_con_offset)
+ exynosauto_pinctrl_resume_bank(drvdata, bank);
+ else
+ exynos_pinctrl_resume_bank(drvdata, bank);
+ }
}
static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata)
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 3ac52c2cf..305cb1d31 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -31,6 +31,7 @@
#define EXYNOS7_WKUP_EMASK_OFFSET 0x900
#define EXYNOS7_WKUP_EPEND_OFFSET 0xA00
#define EXYNOS_SVC_OFFSET 0xB08
+#define EXYNOSAUTO_SVC_OFFSET 0xF008
/* helpers to access interrupt service register */
#define EXYNOS_SVC_GROUP_SHIFT 3
@@ -140,6 +141,30 @@
.name = id \
}
+#define EXYNOSV920_PIN_BANK_EINTG(pins, reg, id, con_offs, mask_offs, pend_offs) \
+ { \
+ .type = &exynos850_bank_type_off, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_GPIO, \
+ .eint_con_offset = con_offs, \
+ .eint_mask_offset = mask_offs, \
+ .eint_pend_offset = pend_offs, \
+ .name = id \
+ }
+
+#define EXYNOSV920_PIN_BANK_EINTW(pins, reg, id, con_offs, mask_offs, pend_offs) \
+ { \
+ .type = &exynos850_bank_type_alive, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_WKUP, \
+ .eint_con_offset = con_offs, \
+ .eint_mask_offset = mask_offs, \
+ .eint_pend_offset = pend_offs, \
+ .name = id \
+ }
+
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 79babbb39..ed07e23e0 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -565,7 +565,7 @@ static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
/* gpiolib gpio_get callback function */
static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
{
- void __iomem *reg;
+ const void __iomem *reg;
u32 data;
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
const struct samsung_pin_bank_type *type = bank->type;
@@ -1106,6 +1106,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
bank->eint_type = bdata->eint_type;
bank->eint_mask = bdata->eint_mask;
bank->eint_offset = bdata->eint_offset;
+ bank->eint_con_offset = bdata->eint_con_offset;
+ bank->eint_mask_offset = bdata->eint_mask_offset;
+ bank->eint_pend_offset = bdata->eint_pend_offset;
bank->name = bdata->name;
raw_spin_lock_init(&bank->slock);
@@ -1201,7 +1204,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev)
for (i = 0; i < drvdata->nr_banks; i++) {
struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
- void __iomem *reg = bank->pctl_base + bank->pctl_offset;
+ const void __iomem *reg = bank->pctl_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
enum pincfg_type type;
@@ -1309,6 +1312,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = &s5pv210_of_data },
#endif
#ifdef CONFIG_PINCTRL_EXYNOS_ARM64
+ { .compatible = "google,gs101-pinctrl",
+ .data = &gs101_of_data },
{ .compatible = "samsung,exynos5433-pinctrl",
.data = &exynos5433_of_data },
{ .compatible = "samsung,exynos7-pinctrl",
@@ -1319,6 +1324,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = &exynos850_of_data },
{ .compatible = "samsung,exynosautov9-pinctrl",
.data = &exynosautov9_of_data },
+ { .compatible = "samsung,exynosautov920-pinctrl",
+ .data = &exynosautov920_of_data },
{ .compatible = "tesla,fsd-pinctrl",
.data = &fsd_of_data },
#endif
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 9b3db50ad..ab791afaa 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -122,6 +122,9 @@ struct samsung_pin_bank_type {
* @eint_type: type of the external interrupt supported by the bank.
* @eint_mask: bit mask of pins which support EINT function.
* @eint_offset: SoC-specific EINT register or interrupt offset of bank.
+ * @eint_con_offset: ExynosAuto SoC-specific EINT control register offset of bank.
+ * @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
+ * @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
* @name: name to be prefixed for each pin in this pin bank.
*/
struct samsung_pin_bank_data {
@@ -133,6 +136,9 @@ struct samsung_pin_bank_data {
enum eint_type eint_type;
u32 eint_mask;
u32 eint_offset;
+ u32 eint_con_offset;
+ u32 eint_mask_offset;
+ u32 eint_pend_offset;
const char *name;
};
@@ -147,6 +153,9 @@ struct samsung_pin_bank_data {
* @eint_type: type of the external interrupt supported by the bank.
* @eint_mask: bit mask of pins which support EINT function.
* @eint_offset: SoC-specific EINT register or interrupt offset of bank.
+ * @eint_con_offset: ExynosAuto SoC-specific EINT register or interrupt offset of bank.
+ * @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
+ * @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
* @name: name to be prefixed for each pin in this pin bank.
* @id: id of the bank, propagated to the pin range.
* @pin_base: starting pin number of the bank.
@@ -170,6 +179,9 @@ struct samsung_pin_bank {
enum eint_type eint_type;
u32 eint_mask;
u32 eint_offset;
+ u32 eint_con_offset;
+ u32 eint_mask_offset;
+ u32 eint_pend_offset;
const char *name;
u32 id;
@@ -350,7 +362,9 @@ extern const struct samsung_pinctrl_of_match_data exynos7_of_data;
extern const struct samsung_pinctrl_of_match_data exynos7885_of_data;
extern const struct samsung_pinctrl_of_match_data exynos850_of_data;
extern const struct samsung_pinctrl_of_match_data exynosautov9_of_data;
+extern const struct samsung_pinctrl_of_match_data exynosautov920_of_data;
extern const struct samsung_pinctrl_of_match_data fsd_of_data;
+extern const struct samsung_pinctrl_of_match_data gs101_of_data;
extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
index b29b0ab98..6df7a310c 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
@@ -654,7 +654,7 @@ static int starfive_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
pinmux = group->data;
- for (i = 0; i < group->num_pins; i++) {
+ for (i = 0; i < group->grp.npins; i++) {
u32 v = pinmux[i];
unsigned int gpio = starfive_pinmux_to_gpio(v);
u32 dout = starfive_pinmux_to_dout(v);
@@ -797,7 +797,7 @@ static int starfive_pinconf_group_get(struct pinctrl_dev *pctldev,
if (!group)
return -EINVAL;
- return starfive_pinconf_get(pctldev, group->pins[0], config);
+ return starfive_pinconf_get(pctldev, group->grp.pins[0], config);
}
static int starfive_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -876,8 +876,8 @@ static int starfive_pinconf_group_set(struct pinctrl_dev *pctldev,
}
}
- for (i = 0; i < group->num_pins; i++)
- starfive_padctl_rmw(sfp, group->pins[i], mask, value);
+ for (i = 0; i < group->grp.npins; i++)
+ starfive_padctl_rmw(sfp, group->grp.pins[i], mask, value);
return 0;
}
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
index 6de11a405..9609eb1ec 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -307,7 +307,7 @@ static int jh7110_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
pinmux = group->data;
- for (i = 0; i < group->num_pins; i++) {
+ for (i = 0; i < group->grp.npins; i++) {
u32 v = pinmux[i];
if (info->jh7110_set_one_pin_mux)
@@ -437,7 +437,7 @@ static int jh7110_pinconf_group_get(struct pinctrl_dev *pctldev,
if (!group)
return -EINVAL;
- return jh7110_pinconf_get(pctldev, group->pins[0], config);
+ return jh7110_pinconf_get(pctldev, group->grp.pins[0], config);
}
static int jh7110_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -508,8 +508,8 @@ static int jh7110_pinconf_group_set(struct pinctrl_dev *pctldev,
}
}
- for (i = 0; i < group->num_pins; i++)
- jh7110_padcfg_rmw(sfp, group->pins[i], mask, value);
+ for (i = 0; i < group->grp.npins; i++)
+ jh7110_padcfg_rmw(sfp, group->grp.pins[i], mask, value);
return 0;
}
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 603f900e8..978ccdbaf 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -240,9 +240,8 @@ static int stm32_gpio_direction_output(struct gpio_chip *chip,
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
__stm32_gpio_set(bank, offset, value);
- pinctrl_gpio_direction_output(chip, offset);
- return 0;
+ return pinctrl_gpio_direction_output(chip, offset);
}
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
index 73f091cd8..23aebd469 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
@@ -2562,7 +2562,7 @@ static const struct of_device_id stm32mp257_pctrl_match[] = {
};
static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume)
};
static struct platform_driver stm32mp257_pinctrl_driver = {
diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
index bb5ef391d..ae156f779 100644
--- a/drivers/pinctrl/sunplus/sppctl.c
+++ b/drivers/pinctrl/sunplus/sppctl.c
@@ -4,6 +4,7 @@
* Copyright (C) Sunplus Tech / Tibbo Tech.
*/
+#include <linux/cleanup.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -500,16 +501,15 @@ static int sppctl_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
static void sppctl_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- const char *label;
int i;
for (i = 0; i < chip->ngpio; i++) {
- label = gpiochip_is_requested(chip, i);
- if (!label)
- label = "";
+ char *label __free(kfree) = gpiochip_dup_line_label(chip, i);
+ if (IS_ERR(label))
+ continue;
seq_printf(s, " gpio-%03d (%-16.16s | %-16.16s)", i + chip->base,
- chip->names[i], label);
+ chip->names[i], label ?: "");
seq_printf(s, " %c", sppctl_gpio_get_direction(chip, i) ? 'I' : 'O');
seq_printf(s, ":%d", sppctl_gpio_get(chip, i));
seq_printf(s, " %s", sppctl_first_get(chip, i) ? "gpi" : "mux");
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 6bf8db424..ccfa3870a 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -636,6 +636,14 @@ static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "\n\t%s=%u",
strip_prefix(cfg_params[i].property), val);
}
+
+ if (g->mux_reg >= 0) {
+ /* read pinmux function and dump to seq_file */
+ val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
+ val = g->funcs[(val >> g->mux_bit) & 0x3];
+
+ seq_printf(s, "\n\tfunction=%s", pmx->functions[val].name);
+ }
}
static void tegra_pinconf_config_dbg_show(struct pinctrl_dev *pctldev,
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 091fdc154..6bf6f0e7b 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -454,7 +454,7 @@ static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
debug_info->panicinfo_blob.data = data;
debug_info->panicinfo_blob.size = ret;
- debugfs_create_blob("panicinfo", S_IFREG | 0444, debug_info->dir,
+ debugfs_create_blob("panicinfo", 0444, debug_info->dir,
&debug_info->panicinfo_blob);
return 0;
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index cb2031cf7..5ac37bd02 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -367,55 +367,33 @@ static void ish_event_cb(struct ishtp_cl_device *cl_device)
/**
* cros_ish_init() - Init function for ISHTP client
* @cros_ish_cl: ISHTP client instance
+ * @reset: true if called from reset handler
*
* This function complete the initializtion of the client.
*
* Return: 0 for success, negative error code for failure.
*/
-static int cros_ish_init(struct ishtp_cl *cros_ish_cl)
+static int cros_ish_init(struct ishtp_cl *cros_ish_cl, bool reset)
{
int rv;
- struct ishtp_device *dev;
- struct ishtp_fw_client *fw_client;
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
- rv = ishtp_cl_link(cros_ish_cl);
- if (rv) {
- dev_err(cl_data_to_dev(client_data),
- "ishtp_cl_link failed\n");
- return rv;
- }
-
- dev = ishtp_get_ishtp_device(cros_ish_cl);
-
- /* Connect to firmware client */
- ishtp_set_tx_ring_size(cros_ish_cl, CROS_ISH_CL_TX_RING_SIZE);
- ishtp_set_rx_ring_size(cros_ish_cl, CROS_ISH_CL_RX_RING_SIZE);
-
- fw_client = ishtp_fw_cl_get_client(dev, &cros_ec_ishtp_id_table[0].guid);
- if (!fw_client) {
- dev_err(cl_data_to_dev(client_data),
- "ish client uuid not found\n");
- rv = -ENOENT;
- goto err_cl_unlink;
- }
-
- ishtp_cl_set_fw_client_id(cros_ish_cl,
- ishtp_get_fw_client_id(fw_client));
- ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_CONNECTING);
-
- rv = ishtp_cl_connect(cros_ish_cl);
+ rv = ishtp_cl_establish_connection(cros_ish_cl,
+ &cros_ec_ishtp_id_table[0].guid,
+ CROS_ISH_CL_TX_RING_SIZE,
+ CROS_ISH_CL_RX_RING_SIZE,
+ reset);
if (rv) {
dev_err(cl_data_to_dev(client_data),
"client connect fail\n");
- goto err_cl_unlink;
+ goto err_cl_disconnect;
}
ishtp_register_event_cb(client_data->cl_device, ish_event_cb);
return 0;
-err_cl_unlink:
- ishtp_cl_unlink(cros_ish_cl);
+err_cl_disconnect:
+ ishtp_cl_destroy_connection(cros_ish_cl, reset);
return rv;
}
@@ -427,10 +405,7 @@ err_cl_unlink:
*/
static void cros_ish_deinit(struct ishtp_cl *cros_ish_cl)
{
- ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_DISCONNECTING);
- ishtp_cl_disconnect(cros_ish_cl);
- ishtp_cl_unlink(cros_ish_cl);
- ishtp_cl_flush_queues(cros_ish_cl);
+ ishtp_cl_destroy_connection(cros_ish_cl, false);
/* Disband and free all Tx and Rx client-level rings */
ishtp_cl_free(cros_ish_cl);
@@ -592,7 +567,6 @@ static void reset_handler(struct work_struct *work)
int rv;
struct device *dev;
struct ishtp_cl *cros_ish_cl;
- struct ishtp_cl_device *cl_device;
struct ishtp_cl_data *client_data =
container_of(work, struct ishtp_cl_data, work_ishtp_reset);
@@ -600,26 +574,11 @@ static void reset_handler(struct work_struct *work)
down_write(&init_lock);
cros_ish_cl = client_data->cros_ish_cl;
- cl_device = client_data->cl_device;
- /* Unlink, flush queues & start again */
- ishtp_cl_unlink(cros_ish_cl);
- ishtp_cl_flush_queues(cros_ish_cl);
- ishtp_cl_free(cros_ish_cl);
-
- cros_ish_cl = ishtp_cl_allocate(cl_device);
- if (!cros_ish_cl) {
- up_write(&init_lock);
- return;
- }
-
- ishtp_set_drvdata(cl_device, cros_ish_cl);
- ishtp_set_client_data(cros_ish_cl, client_data);
- client_data->cros_ish_cl = cros_ish_cl;
+ ishtp_cl_destroy_connection(cros_ish_cl, true);
- rv = cros_ish_init(cros_ish_cl);
+ rv = cros_ish_init(cros_ish_cl, true);
if (rv) {
- ishtp_cl_free(cros_ish_cl);
dev_err(cl_data_to_dev(client_data), "Reset Failed\n");
up_write(&init_lock);
return;
@@ -672,7 +631,7 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
INIT_WORK(&client_data->work_ec_evt,
ish_evt_handler);
- rv = cros_ish_init(cros_ish_cl);
+ rv = cros_ish_init(cros_ish_cl, false);
if (rv)
goto end_ishtp_cl_init_error;
@@ -690,10 +649,7 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
return 0;
end_cros_ec_dev_init_error:
- ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_DISCONNECTING);
- ishtp_cl_disconnect(cros_ish_cl);
- ishtp_cl_unlink(cros_ish_cl);
- ishtp_cl_flush_queues(cros_ish_cl);
+ ishtp_cl_destroy_connection(cros_ish_cl, false);
ishtp_put_device(cl_device);
end_ishtp_cl_init_error:
ishtp_cl_free(cros_ish_cl);
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
index 71948dade..120521951 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(cros_ec_sensorhub_unregister_push_data);
* @sensorhub: Sensor Hub object
* @on: true when events are requested.
*
- * To be called before sleeping or when noone is listening.
+ * To be called before sleeping or when no one is listening.
* Return: 0 on success, or an error when we can not communicate with the EC.
*
*/
@@ -133,33 +133,61 @@ int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
return ret;
}
-static int cros_ec_sensor_ring_median_cmp(const void *pv1, const void *pv2)
+static void cros_ec_sensor_ring_median_swap(s64 *a, s64 *b)
{
- s64 v1 = *(s64 *)pv1;
- s64 v2 = *(s64 *)pv2;
-
- if (v1 > v2)
- return 1;
- else if (v1 < v2)
- return -1;
- else
- return 0;
+ s64 tmp = *a;
+ *a = *b;
+ *b = tmp;
}
/*
* cros_ec_sensor_ring_median: Gets median of an array of numbers
*
- * For now it's implemented using an inefficient > O(n) sort then return
- * the middle element. A more optimal method would be something like
- * quickselect, but given that n = 64 we can probably live with it in the
- * name of clarity.
+ * It's implemented using the quickselect algorithm, which achieves an
+ * average time complexity of O(n) the middle element. In the worst case,
+ * the runtime of quickselect could regress to O(n^2). To mitigate this,
+ * algorithms like median-of-medians exist, which can guarantee O(n) even
+ * in the worst case. However, these algorithms come with a higher
+ * overhead and are more complex to implement, making quickselect a
+ * pragmatic choice for our use case.
*
- * Warning: the input array gets modified (sorted)!
+ * Warning: the input array gets modified!
*/
static s64 cros_ec_sensor_ring_median(s64 *array, size_t length)
{
- sort(array, length, sizeof(s64), cros_ec_sensor_ring_median_cmp, NULL);
- return array[length / 2];
+ int lo = 0;
+ int hi = length - 1;
+
+ while (lo <= hi) {
+ int mid = lo + (hi - lo) / 2;
+ int pivot, i;
+
+ if (array[lo] > array[mid])
+ cros_ec_sensor_ring_median_swap(&array[lo], &array[mid]);
+ if (array[lo] > array[hi])
+ cros_ec_sensor_ring_median_swap(&array[lo], &array[hi]);
+ if (array[mid] < array[hi])
+ cros_ec_sensor_ring_median_swap(&array[mid], &array[hi]);
+
+ pivot = array[hi];
+ i = lo - 1;
+
+ for (int j = lo; j < hi; j++)
+ if (array[j] < pivot)
+ cros_ec_sensor_ring_median_swap(&array[++i], &array[j]);
+
+ /* The pivot's index corresponds to i+1. */
+ cros_ec_sensor_ring_median_swap(&array[i + 1], &array[hi]);
+ if (i + 1 == length / 2)
+ return array[i + 1];
+ if (i + 1 > length / 2)
+ hi = i;
+ else
+ lo = i + 2;
+ }
+
+ /* Should never reach here. */
+ return -1;
}
/*
@@ -175,8 +203,8 @@ static s64 cros_ec_sensor_ring_median(s64 *array, size_t length)
*
* While a and b are recorded at accurate times (due to the EC real time
* nature); c is pretty untrustworthy, even though it's recorded the
- * first thing in ec_irq_handler(). There is a very good change we'll get
- * added lantency due to:
+ * first thing in ec_irq_handler(). There is a very good chance we'll get
+ * added latency due to:
* other irqs
* ddrfreq
* cpuidle
@@ -511,7 +539,7 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
* ringbuffer.
*
* This is the new spreading code, assumes every sample's timestamp
- * preceeds the sample. Run if tight_timestamps == true.
+ * precedes the sample. Run if tight_timestamps == true.
*
* Sometimes the EC receives only one interrupt (hence timestamp) for
* a batch of samples. Only the first sample will have the correct
@@ -595,7 +623,7 @@ cros_ec_sensor_ring_spread_add(struct cros_ec_sensorhub *sensorhub,
} else {
/*
* Push first sample in the batch to the,
- * kifo, it's guaranteed to be correct, the
+ * kfifo, it's guaranteed to be correct, the
* rest will follow later on.
*/
sample_idx = 1;
@@ -701,7 +729,7 @@ done_with_this_batch:
* last_out -->
*
*
- * We spread time for the samples using perod p = (current - TS1)/4.
+ * We spread time for the samples using period p = (current - TS1)/4.
* between TS1 and TS2: [TS1+p/4, TS1+2p/4, TS1+3p/4, current_timestamp].
*
*/
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 788246559..eb5eddeb7 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -81,9 +81,8 @@ struct cros_ec_uart {
struct response_info response;
};
-static int cros_ec_uart_rx_bytes(struct serdev_device *serdev,
- const u8 *data,
- size_t count)
+static ssize_t cros_ec_uart_rx_bytes(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct ec_host_response *host_response;
struct cros_ec_device *ec_dev = serdev_device_get_drvdata(serdev);
@@ -264,12 +263,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
if (!ec_dev)
return -ENOMEM;
- ret = devm_serdev_device_open(dev, serdev);
- if (ret) {
- dev_err(dev, "Unable to open UART device");
- return ret;
- }
-
serdev_device_set_drvdata(serdev, ec_dev);
init_waitqueue_head(&ec_uart->response.wait_queue);
@@ -281,14 +274,6 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
return ret;
}
- ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
- if (ret < 0) {
- dev_err(dev, "Failed to set up host baud rate (%d)", ret);
- return ret;
- }
-
- serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
-
/* Initialize ec_dev for cros_ec */
ec_dev->phys_name = dev_name(dev);
ec_dev->dev = dev;
@@ -302,6 +287,20 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret) {
+ dev_err(dev, "Unable to open UART device");
+ return ret;
+ }
+
+ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+ return ret;
+ }
+
+ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+
return cros_ec_register(ec_dev);
}
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 2e4af10c7..274ea0c64 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -20,10 +20,14 @@ static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
struct cros_ec_device *ecdev = ec->ec_dev;
- struct ec_params_vbnvcontext *params;
struct cros_ec_command *msg;
+ /*
+ * This should be a pointer to the same type as op field in
+ * struct ec_params_vbnvcontext.
+ */
+ uint32_t *params_op;
int err;
- const size_t para_sz = sizeof(params->op);
+ const size_t para_sz = sizeof(*params_op);
const size_t resp_sz = sizeof(struct ec_response_vbnvcontext);
const size_t payload = max(para_sz, resp_sz);
@@ -32,8 +36,8 @@ static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
return -ENOMEM;
/* NB: we only kmalloc()ated enough space for the op field */
- params = (struct ec_params_vbnvcontext *)msg->data;
- params->op = EC_VBNV_CONTEXT_OP_READ;
+ params_op = (uint32_t *)msg->data;
+ *params_op = EC_VBNV_CONTEXT_OP_READ;
msg->version = EC_VER_VBNV_CONTEXT;
msg->command = EC_CMD_VBNV_CONTEXT;
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index f80a7c83c..13291fb42 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -495,7 +495,7 @@ static int event_device_add(struct acpi_device *adev)
free_dev_data:
hangup_device(dev_data);
free_minor:
- ida_simple_remove(&event_ida, minor);
+ ida_free(&event_ida, minor);
return error;
}
@@ -504,7 +504,7 @@ static void event_device_remove(struct acpi_device *adev)
struct event_device_data *dev_data = adev->driver_data;
cdev_device_del(&dev_data->cdev, &dev_data->dev);
- ida_simple_remove(&event_ida, MINOR(dev_data->dev.devt));
+ ida_free(&event_ida, MINOR(dev_data->dev.devt));
hangup_device(dev_data);
}
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index 253098bac..b7c616f3d 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -372,7 +372,7 @@ static int telem_device_probe(struct platform_device *pdev)
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data) {
- ida_simple_remove(&telem_ida, minor);
+ ida_free(&telem_ida, minor);
return -ENOMEM;
}
@@ -393,7 +393,7 @@ static int telem_device_probe(struct platform_device *pdev)
error = cdev_device_add(&dev_data->cdev, &dev_data->dev);
if (error) {
put_device(&dev_data->dev);
- ida_simple_remove(&telem_ida, minor);
+ ida_free(&telem_ida, minor);
return error;
}
@@ -405,7 +405,7 @@ static void telem_device_remove(struct platform_device *pdev)
struct telem_device_data *dev_data = platform_get_drvdata(pdev);
cdev_device_del(&dev_data->cdev, &dev_data->dev);
- ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt));
+ ida_free(&telem_ida, MINOR(dev_data->dev.devt));
put_device(&dev_data->dev);
}
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 7737d5619..061aa9647 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -915,12 +915,11 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
return goldfish_pipe_device_init(pdev, dev);
}
-static int goldfish_pipe_remove(struct platform_device *pdev)
+static void goldfish_pipe_remove(struct platform_device *pdev)
{
struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
goldfish_pipe_device_deinit(pdev, dev);
- return 0;
}
static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
@@ -937,7 +936,7 @@ MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
static struct platform_driver goldfish_pipe_driver = {
.probe = goldfish_pipe_probe,
- .remove = goldfish_pipe_remove,
+ .remove_new = goldfish_pipe_remove,
.driver = {
.name = "goldfish_pipe",
.of_match_table = goldfish_pipe_of_match,
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index f39b7b9d2..b8d1e32e9 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -98,7 +98,7 @@ struct mlxbf_tmfifo_vring {
/* Check whether vring is in drop mode. */
#define IS_VRING_DROP(_r) ({ \
typeof(_r) (r) = (_r); \
- (r->desc_head == &r->drop_desc ? true : false); })
+ r->desc_head == &r->drop_desc; })
/* A stub length to drop maximum length packet. */
#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
diff --git a/drivers/platform/mips/rs780e-acpi.c b/drivers/platform/mips/rs780e-acpi.c
index bb0e8ae0e..5b8f9cc32 100644
--- a/drivers/platform/mips/rs780e-acpi.c
+++ b/drivers/platform/mips/rs780e-acpi.c
@@ -32,29 +32,25 @@ static u8 pmio_read_index(u16 index, u8 reg)
return inb(index + 1);
}
-void pm_iowrite(u8 reg, u8 value)
+static void pm_iowrite(u8 reg, u8 value)
{
pmio_write_index(PM_INDEX, reg, value);
}
-EXPORT_SYMBOL(pm_iowrite);
-u8 pm_ioread(u8 reg)
+static u8 pm_ioread(u8 reg)
{
return pmio_read_index(PM_INDEX, reg);
}
-EXPORT_SYMBOL(pm_ioread);
-void pm2_iowrite(u8 reg, u8 value)
+static void pm2_iowrite(u8 reg, u8 value)
{
pmio_write_index(PM2_INDEX, reg, value);
}
-EXPORT_SYMBOL(pm2_iowrite);
-u8 pm2_ioread(u8 reg)
+static u8 pm2_ioread(u8 reg)
{
return pmio_read_index(PM2_INDEX, reg);
}
-EXPORT_SYMBOL(pm2_ioread);
static void acpi_hw_clear_status(void)
{
diff --git a/drivers/platform/surface/aggregator/Kconfig b/drivers/platform/surface/aggregator/Kconfig
index 88afc38ff..957c216c1 100644
--- a/drivers/platform/surface/aggregator/Kconfig
+++ b/drivers/platform/surface/aggregator/Kconfig
@@ -5,7 +5,7 @@ menuconfig SURFACE_AGGREGATOR
tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
depends on SERIAL_DEV_BUS
depends on ACPI && !RISCV
- select CRC_CCITT
+ select CRC_ITU_T
help
The Surface System Aggregator Module (Surface SAM or SSAM) is an
embedded controller (EC) found on 5th- and later-generation Microsoft
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index 42ccd7f1c..118caa651 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -35,6 +35,8 @@ static struct attribute *ssam_device_attrs[] = {
};
ATTRIBUTE_GROUPS(ssam_device);
+static const struct bus_type ssam_bus_type;
+
static int ssam_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct ssam_device *sdev = to_ssam_device(dev);
@@ -329,13 +331,12 @@ static void ssam_bus_remove(struct device *dev)
sdrv->remove(to_ssam_device(dev));
}
-struct bus_type ssam_bus_type = {
+static const struct bus_type ssam_bus_type = {
.name = "surface_aggregator",
.match = ssam_bus_match,
.probe = ssam_bus_probe,
.remove = ssam_bus_remove,
};
-EXPORT_SYMBOL_GPL(ssam_bus_type);
/**
* __ssam_device_driver_register() - Register a SSAM client device driver.
diff --git a/drivers/platform/surface/aggregator/controller.h b/drivers/platform/surface/aggregator/controller.h
index f0d987abc..f1638c208 100644
--- a/drivers/platform/surface/aggregator/controller.h
+++ b/drivers/platform/surface/aggregator/controller.h
@@ -238,8 +238,8 @@ struct ssam_controller {
* layer of the controller has been shut down, %-ESHUTDOWN.
*/
static inline
-int ssam_controller_receive_buf(struct ssam_controller *ctrl,
- const unsigned char *buf, size_t n)
+ssize_t ssam_controller_receive_buf(struct ssam_controller *ctrl, const u8 *buf,
+ size_t n)
{
return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
}
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index 6152be383..9591a28bc 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -227,8 +227,8 @@ EXPORT_SYMBOL_GPL(ssam_client_bind);
/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
-static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
- size_t n)
+static ssize_t ssam_receive_buf(struct serdev_device *dev, const u8 *buf,
+ size_t n)
{
struct ssam_controller *ctrl;
int ret;
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
index def8d7ac5..d726b1a86 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -1887,9 +1887,9 @@ int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
* Return: Returns the number of bytes transferred (positive or zero) on
* success. Returns %-ESHUTDOWN if the packet layer has been shut down.
*/
-int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
+ssize_t ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
{
- int used;
+ size_t used;
if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
return -ESHUTDOWN;
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.h b/drivers/platform/surface/aggregator/ssh_packet_layer.h
index 64633522f..c80e82207 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.h
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.h
@@ -162,7 +162,7 @@ void ssh_ptl_shutdown(struct ssh_ptl *ptl);
int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p);
void ssh_ptl_cancel(struct ssh_packet *p);
-int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
+ssize_t ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
/**
* ssh_ptl_tx_wakeup_transfer() - Wake up packet transmitter thread for
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index e4dee920d..20f387091 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -736,34 +736,6 @@ do { \
#define san_consumer_warn(dev, handle, fmt, ...) \
san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__)
-static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle)
-{
- struct acpi_handle_list dep_devices;
- acpi_handle supplier = ACPI_HANDLE(&pdev->dev);
- acpi_status status;
- bool ret = false;
- int i;
-
- if (!acpi_has_method(handle, "_DEP"))
- return false;
-
- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
- if (ACPI_FAILURE(status)) {
- san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n");
- return false;
- }
-
- for (i = 0; i < dep_devices.count; i++) {
- if (dep_devices.handles[i] == supplier) {
- ret = true;
- break;
- }
- }
-
- acpi_handle_list_free(&dep_devices);
- return ret;
-}
-
static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
void *context, void **rv)
{
@@ -772,7 +744,7 @@ static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
struct acpi_device *adev;
struct device_link *link;
- if (!is_san_consumer(pdev, handle))
+ if (!acpi_device_dep(handle, ACPI_HANDLE(&pdev->dev)))
return AE_OK;
/* Ignore ACPI devices that are not present. */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c94f31a5c..bdd302274 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -177,10 +177,12 @@ config ACER_WMI
depends on INPUT
depends on RFKILL || RFKILL = n
depends on ACPI_WMI
- select ACPI_VIDEO
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on HWMON
select INPUT_SPARSEKMAP
select LEDS_CLASS
select NEW_LEDS
+ select ACPI_PLATFORM_PROFILE
help
This is a driver for newer Acer (and Wistron) laptops. It adds
wireless radio and bluetooth control, and on some laptops,
@@ -1087,6 +1089,21 @@ config INTEL_SCU_IPC_UTIL
source "drivers/platform/x86/siemens/Kconfig"
+config SILICOM_PLATFORM
+ tristate "Silicom Edge Networking device support"
+ depends on HWMON
+ depends on GPIOLIB
+ depends on LEDS_CLASS_MULTICOLOR
+ help
+ This option enables support for the LEDs/GPIO/etc downstream of the
+ embedded controller on Silicom "Cordoba" hardware and derivatives.
+
+ This platform driver provides support for various functions via
+ the Linux LED framework, GPIO framework, Hardware Monitoring (HWMON)
+ and device attributes.
+
+ If you have a Silicom network appliance, say Y or M here.
+
config WINMATE_FM07_KEYS
tristate "Winmate FM07/FM07P front-panel keys driver"
depends on INPUT
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index c7a18e95a..1de432e88 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -136,6 +136,9 @@ obj-$(CONFIG_X86_INTEL_LPSS) += pmc_atom.o
# Siemens Simatic Industrial PCs
obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += siemens/
+# Silicom
+obj-$(CONFIG_SILICOM_PLATFORM) += silicom-platform.o
+
# Winmate
obj-$(CONFIG_WINMATE_FM07_KEYS) += winmate-fm07-keys.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 0e472aa9b..ee2e164f8 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -20,6 +20,7 @@
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
#include <linux/acpi.h>
#include <linux/i8042.h>
#include <linux/rfkill.h>
@@ -29,6 +30,8 @@
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
+#include <linux/hwmon.h>
+#include <linux/bitfield.h>
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
@@ -62,9 +65,14 @@ MODULE_LICENSE("GPL");
#define ACER_WMID_SET_GAMING_LED_METHODID 2
#define ACER_WMID_GET_GAMING_LED_METHODID 4
+#define ACER_WMID_GET_GAMING_SYS_INFO_METHODID 5
#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14
#define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22
+#define ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET 0x54
+
+#define ACER_PREDATOR_V4_FAN_SPEED_READ_BIT_MASK GENMASK(20, 8)
+
/*
* Acer ACPI method GUIDs
*/
@@ -90,6 +98,12 @@ enum acer_wmi_event_ids {
WMID_GAMING_TURBO_KEY_EVENT = 0x7,
};
+enum acer_wmi_predator_v4_sys_info_command {
+ ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS = 0x02,
+ ACER_WMID_CMD_GET_PREDATOR_V4_CPU_FAN_SPEED = 0x0201,
+ ACER_WMID_CMD_GET_PREDATOR_V4_GPU_FAN_SPEED = 0x0601,
+};
+
static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
@@ -229,9 +243,11 @@ struct hotkey_function_type_aa {
#define ACER_CAP_THREEG BIT(4)
#define ACER_CAP_SET_FUNCTION_MODE BIT(5)
#define ACER_CAP_KBD_DOCK BIT(6)
-#define ACER_CAP_TURBO_OC BIT(7)
-#define ACER_CAP_TURBO_LED BIT(8)
-#define ACER_CAP_TURBO_FAN BIT(9)
+#define ACER_CAP_TURBO_OC BIT(7)
+#define ACER_CAP_TURBO_LED BIT(8)
+#define ACER_CAP_TURBO_FAN BIT(9)
+#define ACER_CAP_PLATFORM_PROFILE BIT(10)
+#define ACER_CAP_FAN_SPEED_READ BIT(11)
/*
* Interface type flags
@@ -259,6 +275,8 @@ static bool ec_raw_mode;
static bool has_type_aa;
static u16 commun_func_bitmap;
static u8 commun_fn_key_number;
+static bool cycle_gaming_thermal_profile = true;
+static bool predator_v4;
module_param(mailled, int, 0444);
module_param(brightness, int, 0444);
@@ -266,12 +284,18 @@ module_param(threeg, int, 0444);
module_param(force_series, int, 0444);
module_param(force_caps, int, 0444);
module_param(ec_raw_mode, bool, 0444);
+module_param(cycle_gaming_thermal_profile, bool, 0644);
+module_param(predator_v4, bool, 0444);
MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
MODULE_PARM_DESC(force_series, "Force a different laptop series");
MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value");
MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
+MODULE_PARM_DESC(cycle_gaming_thermal_profile,
+ "Set thermal mode key in cycle mode. Disabling it sets the mode key in turbo toggle mode");
+MODULE_PARM_DESC(predator_v4,
+ "Enable features for predator laptops that use predator sense v4");
struct acer_data {
int mailled;
@@ -321,6 +345,7 @@ struct quirk_entry {
u8 turbo;
u8 cpu_fans;
u8 gpu_fans;
+ u8 predator_v4;
};
static struct quirk_entry *quirks;
@@ -336,6 +361,10 @@ static void __init set_quirks(void)
if (quirks->turbo)
interface->capability |= ACER_CAP_TURBO_OC | ACER_CAP_TURBO_LED
| ACER_CAP_TURBO_FAN;
+
+ if (quirks->predator_v4)
+ interface->capability |= ACER_CAP_PLATFORM_PROFILE |
+ ACER_CAP_FAN_SPEED_READ;
}
static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -370,6 +399,10 @@ static struct quirk_entry quirk_acer_predator_ph315_53 = {
.gpu_fans = 1,
};
+static struct quirk_entry quirk_acer_predator_v4 = {
+ .predator_v4 = 1,
+};
+
/* This AMW0 laptop has no bluetooth */
static struct quirk_entry quirk_medion_md_98300 = {
.wireless = 1,
@@ -547,6 +580,24 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
.driver_data = &quirk_acer_predator_ph315_53,
},
{
+ .callback = dmi_matched,
+ .ident = "Acer Predator PHN16-71",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PHN16-71"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Predator PH16-71",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH16-71"),
+ },
+ .driver_data = &quirk_acer_predator_v4,
+ },
+ {
.callback = set_force_caps,
.ident = "Acer Aspire Switch 10E SW3-016",
.matches = {
@@ -659,10 +710,37 @@ static const struct dmi_system_id non_acer_quirks[] __initconst = {
{}
};
+static struct platform_profile_handler platform_profile_handler;
+static bool platform_profile_support;
+
+/*
+ * The profile used before turbo mode. This variable is needed for
+ * returning from turbo mode when the mode key is in toggle mode.
+ */
+static int last_non_turbo_profile;
+
+enum acer_predator_v4_thermal_profile_ec {
+ ACER_PREDATOR_V4_THERMAL_PROFILE_ECO = 0x04,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO = 0x03,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE = 0x02,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x01,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x00,
+};
+
+enum acer_predator_v4_thermal_profile_wmi {
+ ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI = 0x060B,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI = 0x050B,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI = 0x040B,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI = 0x0B,
+ ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI = 0x010B,
+};
+
/* Find which quirks are needed for a particular vendor/ model pair */
static void __init find_quirks(void)
{
- if (!force_series) {
+ if (predator_v4) {
+ quirks = &quirk_acer_predator_v4;
+ } else if (!force_series) {
dmi_check_system(acer_quirks);
dmi_check_system(non_acer_quirks);
} else if (force_series == 2490) {
@@ -1339,7 +1417,7 @@ WMI_gaming_execute_u64(u32 method_id, u64 in, u64 *out)
struct acpi_buffer input = { (acpi_size) sizeof(u64), (void *)(&in) };
struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
- u32 tmp = 0;
+ u64 tmp = 0;
acpi_status status;
status = wmi_evaluate_method(WMID_GUID4, 0, method_id, &input, &result);
@@ -1663,6 +1741,26 @@ static int acer_gsensor_event(void)
return 0;
}
+static int acer_get_fan_speed(int fan)
+{
+ if (quirks->predator_v4) {
+ acpi_status status;
+ u64 fanspeed;
+
+ status = WMI_gaming_execute_u64(
+ ACER_WMID_GET_GAMING_SYS_INFO_METHODID,
+ fan == 0 ? ACER_WMID_CMD_GET_PREDATOR_V4_CPU_FAN_SPEED :
+ ACER_WMID_CMD_GET_PREDATOR_V4_GPU_FAN_SPEED,
+ &fanspeed);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return FIELD_GET(ACER_PREDATOR_V4_FAN_SPEED_READ_BIT_MASK, fanspeed);
+ }
+ return -EOPNOTSUPP;
+}
+
/*
* Predator series turbo button
*/
@@ -1698,6 +1796,199 @@ static int acer_toggle_turbo(void)
return turbo_led_state;
}
+static int
+acer_predator_v4_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ u8 tp;
+ int err;
+
+ err = ec_read(ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET, &tp);
+
+ if (err < 0)
+ return err;
+
+ switch (tp) {
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE:
+ *profile = PLATFORM_PROFILE_BALANCED_PERFORMANCE;
+ break;
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_ECO:
+ *profile = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+acer_predator_v4_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ int tp;
+ acpi_status status;
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
+ break;
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ break;
+ case PLATFORM_PROFILE_QUIET:
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ status = WMI_gaming_execute_u64(
+ ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, tp, NULL);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI)
+ last_non_turbo_profile = tp;
+
+ return 0;
+}
+
+static int acer_platform_profile_setup(void)
+{
+ if (quirks->predator_v4) {
+ int err;
+
+ platform_profile_handler.profile_get =
+ acer_predator_v4_platform_profile_get;
+ platform_profile_handler.profile_set =
+ acer_predator_v4_platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_PERFORMANCE,
+ platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED,
+ platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_QUIET,
+ platform_profile_handler.choices);
+ set_bit(PLATFORM_PROFILE_LOW_POWER,
+ platform_profile_handler.choices);
+
+ err = platform_profile_register(&platform_profile_handler);
+ if (err)
+ return err;
+
+ platform_profile_support = true;
+
+ /* Set default non-turbo profile */
+ last_non_turbo_profile =
+ ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ }
+ return 0;
+}
+
+static int acer_thermal_profile_change(void)
+{
+ /*
+ * This mode key can rotate each mode or toggle turbo mode.
+ * On battery, only ECO and BALANCED mode are available.
+ */
+ if (quirks->predator_v4) {
+ u8 current_tp;
+ int tp, err;
+ u64 on_AC;
+ acpi_status status;
+
+ err = ec_read(ACER_PREDATOR_V4_THERMAL_PROFILE_EC_OFFSET,
+ &current_tp);
+
+ if (err < 0)
+ return err;
+
+ /* Check power source */
+ status = WMI_gaming_execute_u64(
+ ACER_WMID_GET_GAMING_SYS_INFO_METHODID,
+ ACER_WMID_CMD_GET_PREDATOR_V4_BAT_STATUS, &on_AC);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ switch (current_tp) {
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO:
+ if (!on_AC)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ else if (cycle_gaming_thermal_profile)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
+ else
+ tp = last_non_turbo_profile;
+ break;
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE:
+ if (!on_AC)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ else
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
+ break;
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED:
+ if (!on_AC)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO_WMI;
+ else if (cycle_gaming_thermal_profile)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE_WMI;
+ else
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
+ break;
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET:
+ if (!on_AC)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ else if (cycle_gaming_thermal_profile)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ else
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
+ break;
+ case ACER_PREDATOR_V4_THERMAL_PROFILE_ECO:
+ if (!on_AC)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED_WMI;
+ else if (cycle_gaming_thermal_profile)
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET_WMI;
+ else
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ status = WMI_gaming_execute_u64(
+ ACER_WMID_SET_GAMING_MISC_SETTING_METHODID, tp, NULL);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* Store non-turbo profile for turbo mode toggle*/
+ if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO_WMI)
+ last_non_turbo_profile = tp;
+
+ platform_profile_notify();
+ }
+
+ return 0;
+}
+
/*
* Switch series keyboard dock status
*/
@@ -1997,6 +2288,8 @@ static void acer_wmi_notify(u32 value, void *context)
case WMID_GAMING_TURBO_KEY_EVENT:
if (return_value.key_num == 0x4)
acer_toggle_turbo();
+ if (return_value.key_num == 0x5 && has_cap(ACER_CAP_PLATFORM_PROFILE))
+ acer_thermal_profile_change();
break;
default:
pr_warn("Unknown function number - %d - %d\n",
@@ -2222,6 +2515,8 @@ static u32 get_wmid_devices(void)
return devices;
}
+static int acer_wmi_hwmon_init(void);
+
/*
* Platform device
*/
@@ -2245,8 +2540,25 @@ static int acer_platform_probe(struct platform_device *device)
if (err)
goto error_rfkill;
- return err;
+ if (has_cap(ACER_CAP_PLATFORM_PROFILE)) {
+ err = acer_platform_profile_setup();
+ if (err)
+ goto error_platform_profile;
+ }
+ if (has_cap(ACER_CAP_FAN_SPEED_READ)) {
+ err = acer_wmi_hwmon_init();
+ if (err)
+ goto error_hwmon;
+ }
+
+ return 0;
+
+error_hwmon:
+ if (platform_profile_support)
+ platform_profile_remove();
+error_platform_profile:
+ acer_rfkill_exit();
error_rfkill:
if (has_cap(ACER_CAP_BRIGHTNESS))
acer_backlight_exit();
@@ -2265,6 +2577,9 @@ static void acer_platform_remove(struct platform_device *device)
acer_backlight_exit();
acer_rfkill_exit();
+
+ if (platform_profile_support)
+ platform_profile_remove();
}
#ifdef CONFIG_PM_SLEEP
@@ -2351,6 +2666,73 @@ static void __init create_debugfs(void)
&interface->debug.wmid_devices);
}
+static umode_t acer_wmi_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ if (acer_get_fan_speed(channel) >= 0)
+ return 0444;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ ret = acer_get_fan_speed(channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *const acer_wmi_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT), NULL
+};
+
+static const struct hwmon_ops acer_wmi_hwmon_ops = {
+ .read = acer_wmi_hwmon_read,
+ .is_visible = acer_wmi_hwmon_is_visible,
+};
+
+static const struct hwmon_chip_info acer_wmi_hwmon_chip_info = {
+ .ops = &acer_wmi_hwmon_ops,
+ .info = acer_wmi_hwmon_info,
+};
+
+static int acer_wmi_hwmon_init(void)
+{
+ struct device *dev = &acer_platform_device->dev;
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "acer",
+ &acer_platform_driver,
+ &acer_wmi_hwmon_chip_info,
+ NULL);
+
+ if (IS_ERR(hwmon)) {
+ dev_err(dev, "Could not register acer hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
static int __init acer_wmi_init(void)
{
int err;
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index 55f3a2fc6..54753213c 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -18,3 +18,17 @@ config AMD_HSMP
If you choose to compile this driver as a module the module will be
called amd_hsmp.
+
+config AMD_WBRF
+ bool "AMD Wifi RF Band mitigations (WBRF)"
+ depends on ACPI
+ help
+ WBRF(Wifi Band RFI mitigation) mechanism allows Wifi drivers
+ to notify the frequencies they are using so that other hardware
+ can be reconfigured to avoid harmonic conflicts.
+
+ AMD provides an ACPI based mechanism to support WBRF on platform with
+ appropriate underlying support.
+
+ This mechanism will only be activated on platforms that advertise a
+ need for it.
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
index f04932b7a..dcec0a46f 100644
--- a/drivers/platform/x86/amd/Makefile
+++ b/drivers/platform/x86/amd/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_AMD_PMC) += pmc/
amd_hsmp-y := hsmp.o
obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
obj-$(CONFIG_AMD_PMF) += pmf/
+obj-$(CONFIG_AMD_WBRF) += wbrf.o
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index b45637016..b4f49720c 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
}
},
+ {
+ .ident = "Framework Laptop 13 (Phoenix)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+ DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 864c8cc2f..108e12fd5 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -31,13 +31,13 @@
#include "pmc.h"
/* SMU communication registers */
-#define AMD_PMC_REGISTER_MESSAGE 0x538
#define AMD_PMC_REGISTER_RESPONSE 0x980
#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
/* PMC Scratch Registers */
#define AMD_PMC_SCRATCH_REG_CZN 0x94
#define AMD_PMC_SCRATCH_REG_YC 0xD14
+#define AMD_PMC_SCRATCH_REG_1AH 0xF14
/* STB Registers */
#define AMD_PMC_STB_PMI_0 0x03E30600
@@ -145,6 +145,7 @@ static const struct amd_pmc_bit_map soc15_ip_blk[] = {
{"JPEG", BIT(18)},
{"IPU", BIT(19)},
{"UMSCH", BIT(20)},
+ {"VPE", BIT(21)},
{}
};
@@ -350,10 +351,17 @@ static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
case AMD_CPU_ID_CB:
dev->num_ips = 12;
dev->s2d_msg_id = 0xBE;
+ dev->smu_msg = 0x538;
break;
case AMD_CPU_ID_PS:
dev->num_ips = 21;
dev->s2d_msg_id = 0x85;
+ dev->smu_msg = 0x538;
+ break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ dev->num_ips = 22;
+ dev->s2d_msg_id = 0xDE;
+ dev->smu_msg = 0x938;
break;
}
}
@@ -588,6 +596,9 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
case AMD_CPU_ID_PS:
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
break;
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_1AH);
+ break;
default:
return -EINVAL;
}
@@ -618,6 +629,7 @@ static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev)
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
case AMD_CPU_ID_PS:
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
return true;
default:
return false;
@@ -653,7 +665,7 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
argument = AMD_S2D_REGISTER_ARGUMENT;
response = AMD_S2D_REGISTER_RESPONSE;
} else {
- message = AMD_PMC_REGISTER_MESSAGE;
+ message = dev->smu_msg;
argument = AMD_PMC_REGISTER_ARGUMENT;
response = AMD_PMC_REGISTER_RESPONSE;
}
@@ -680,7 +692,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg,
argument = AMD_S2D_REGISTER_ARGUMENT;
response = AMD_S2D_REGISTER_RESPONSE;
} else {
- message = AMD_PMC_REGISTER_MESSAGE;
+ message = dev->smu_msg;
argument = AMD_PMC_REGISTER_ARGUMENT;
response = AMD_PMC_REGISTER_RESPONSE;
}
@@ -751,6 +763,7 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
case AMD_CPU_ID_PS:
+ case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
return MSG_OS_HINT_RN;
}
return -EINVAL;
@@ -967,9 +980,6 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
/* Spill to DRAM feature uses separate SMU message port */
dev->msg_port = 1;
- /* Get num of IP blocks within the SoC */
- amd_pmc_get_ip_info(dev);
-
amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true);
if (size != S2D_TELEMETRY_BYTES_MAX)
return -EIO;
@@ -1077,6 +1087,9 @@ static int amd_pmc_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
+ /* Get num of IP blocks within the SoC */
+ amd_pmc_get_ip_info(dev);
+
if (enable_stb && amd_pmc_is_stb_supported(dev)) {
err = amd_pmc_s2d_init(dev);
if (err)
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index b4794f118..827eef65e 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -26,6 +26,7 @@ struct amd_pmc_dev {
u32 dram_size;
u32 num_ips;
u32 s2d_msg_id;
+ u32 smu_msg;
/* SMU version information */
u8 smu_program;
u8 major;
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
index 3064bc8ea..f4fa8bd8b 100644
--- a/drivers/platform/x86/amd/pmf/Kconfig
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -9,6 +9,8 @@ config AMD_PMF
depends on POWER_SUPPLY
depends on AMD_NB
select ACPI_PLATFORM_PROFILE
+ depends on TEE && AMDTEE
+ depends on AMD_SFH_HID
help
This driver provides support for the AMD Platform Management Framework.
The goal is to enhance end user experience by making AMD PCs smarter,
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index fdededf54..6b26e48ce 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_AMD_PMF) += amd-pmf.o
amd-pmf-objs := core.o acpi.o sps.o \
- auto-mode.o cnqf.o
+ auto-mode.o cnqf.o \
+ tee-if.o spc.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index 3fc5e4547..f2eb07ef8 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -111,7 +111,6 @@ int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
struct os_power_slider args;
struct acpi_buffer params;
union acpi_object *info;
- int err = 0;
args.size = sizeof(args);
args.slider_event = event;
@@ -121,10 +120,10 @@ int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, &params);
if (!info)
- err = -EIO;
+ return -EIO;
kfree(info);
- return err;
+ return 0;
}
static void apmf_sbios_heartbeat_notify(struct work_struct *work)
@@ -135,11 +134,9 @@ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n");
info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL);
if (!info)
- goto out;
+ return;
schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
-
-out:
kfree(info);
}
@@ -148,7 +145,6 @@ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
union acpi_object *info;
struct apmf_fan_idx args;
struct acpi_buffer params;
- int err = 0;
args.size = sizeof(args);
args.fan_ctl_mode = manual;
@@ -158,14 +154,11 @@ int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
params.pointer = (void *)&args;
info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, &params);
- if (!info) {
- err = -EIO;
- goto out;
- }
+ if (!info)
+ return -EIO;
-out:
kfree(info);
- return err;
+ return 0;
}
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data)
@@ -286,6 +279,43 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
return 0;
}
+static acpi_status apmf_walk_resources(struct acpi_resource *res, void *data)
+{
+ struct amd_pmf_dev *dev = data;
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ dev->policy_addr = res->data.address64.address.minimum;
+ dev->policy_sz = res->data.address64.address.address_length;
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ dev->policy_addr = res->data.fixed_memory32.address;
+ dev->policy_sz = res->data.fixed_memory32.address_length;
+ break;
+ }
+
+ if (!dev->policy_addr || dev->policy_sz > POLICY_BUF_MAX_SZ || dev->policy_sz == 0) {
+ pr_err("Incorrect Policy params, possibly a SBIOS bug\n");
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+ acpi_status status;
+
+ status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, apmf_walk_resources, pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 78ed3ee22..4f734e049 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -251,29 +251,37 @@ static const struct pci_device_id pmf_pci_ids[] = {
{ }
};
-static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
+int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
{
u64 phys_addr;
u32 hi, low;
+ /* Get Metrics Table Address */
+ if (alloc_buffer) {
+ dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
+ if (!dev->buf)
+ return -ENOMEM;
+ }
+
phys_addr = virt_to_phys(dev->buf);
hi = phys_addr >> 32;
low = phys_addr & GENMASK(31, 0);
amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+
+ return 0;
}
int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
{
- /* Get Metrics Table Address */
- dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
- if (!dev->buf)
- return -ENOMEM;
+ int ret;
INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
- amd_pmf_set_dram_addr(dev);
+ ret = amd_pmf_set_dram_addr(dev, true);
+ if (ret)
+ return ret;
/*
* Start collecting the metrics data after a small delay
@@ -284,17 +292,34 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
return 0;
}
+static int amd_pmf_suspend_handler(struct device *dev)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ if (pdev->smart_pc_enabled)
+ cancel_delayed_work_sync(&pdev->pb_work);
+
+ return 0;
+}
+
static int amd_pmf_resume_handler(struct device *dev)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (pdev->buf) {
+ ret = amd_pmf_set_dram_addr(pdev, false);
+ if (ret)
+ return ret;
+ }
- if (pdev->buf)
- amd_pmf_set_dram_addr(pdev);
+ if (pdev->smart_pc_enabled)
+ schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
return 0;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, amd_pmf_suspend_handler, amd_pmf_resume_handler);
static void amd_pmf_init_features(struct amd_pmf_dev *dev)
{
@@ -309,13 +334,18 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
- /* Enable Auto Mode */
+ amd_pmf_init_smart_pc(dev);
+ if (dev->smart_pc_enabled) {
+ dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
+ /* If Smart PC is enabled, no need to check for other features */
+ return;
+ }
+
if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_init_auto_mode(dev);
dev_dbg(dev->dev, "Auto Mode Init done\n");
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
- /* Enable Cool n Quiet Framework (CnQF) */
ret = amd_pmf_init_cnqf(dev);
if (ret)
dev_warn(dev->dev, "CnQF Init failed\n");
@@ -330,7 +360,9 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
amd_pmf_deinit_sps(dev);
}
- if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ if (dev->smart_pc_enabled) {
+ amd_pmf_deinit_smart_pc(dev);
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
@@ -408,9 +440,9 @@ static int amd_pmf_probe(struct platform_device *pdev)
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
+ amd_pmf_dbgfs_register(dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
- amd_pmf_dbgfs_register(dev);
dev_info(dev->dev, "registered PMF device successfully\n");
@@ -448,3 +480,4 @@ module_platform_driver(amd_pmf_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
+MODULE_SOFTDEP("pre: amdtee");
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index deba88e6e..66cae1cca 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -14,6 +14,11 @@
#include <linux/acpi.h>
#include <linux/platform_profile.h>
+#define POLICY_BUF_MAX_SZ 0x4b000
+#define POLICY_SIGN_COOKIE 0x31535024
+#define POLICY_COOKIE_OFFSET 0x10
+#define POLICY_COOKIE_LEN 0x14
+
/* APMF Functions */
#define APMF_FUNC_VERIFY_INTERFACE 0
#define APMF_FUNC_GET_SYS_PARAMS 1
@@ -44,6 +49,7 @@
#define GET_STT_MIN_LIMIT 0x1F
#define GET_STT_LIMIT_APU 0x20
#define GET_STT_LIMIT_HS2 0x21
+#define SET_P3T 0x23 /* P3T: Peak Package Power Limit */
/* OS slider update notification */
#define DC_BEST_PERF 0
@@ -59,6 +65,24 @@
#define ARG_NONE 0
#define AVG_SAMPLE_SIZE 3
+/* Policy Actions */
+#define PMF_POLICY_SPL 2
+#define PMF_POLICY_SPPT 3
+#define PMF_POLICY_FPPT 4
+#define PMF_POLICY_SPPT_APU_ONLY 5
+#define PMF_POLICY_STT_MIN 6
+#define PMF_POLICY_STT_SKINTEMP_APU 7
+#define PMF_POLICY_STT_SKINTEMP_HS2 8
+#define PMF_POLICY_SYSTEM_STATE 9
+#define PMF_POLICY_P3T 38
+
+/* TA macros */
+#define PMF_TA_IF_VERSION_MAJOR 1
+#define TA_PMF_ACTION_MAX 32
+#define TA_PMF_UNDO_MAX 8
+#define TA_OUTPUT_RESERVED_MEM 906
+#define MAX_OPERATION_PARAMS 4
+
/* AMD PMF BIOS interfaces */
struct apmf_verify_interface {
u16 size;
@@ -129,6 +153,21 @@ struct smu_pmf_metrics {
u16 infra_gfx_maxfreq; /* in MHz */
u16 skin_temp; /* in centi-Celsius */
u16 device_state;
+ u16 curtemp; /* in centi-Celsius */
+ u16 filter_alpha_value;
+ u16 avg_gfx_clkfrequency;
+ u16 avg_fclk_frequency;
+ u16 avg_gfx_activity;
+ u16 avg_socclk_frequency;
+ u16 avg_vclk_frequency;
+ u16 avg_vcn_activity;
+ u16 avg_dram_reads;
+ u16 avg_dram_writes;
+ u16 avg_socket_power;
+ u16 avg_core_power[2];
+ u16 avg_core_c0residency[16];
+ u16 spare1;
+ u32 metrics_counter;
} __packed;
enum amd_stt_skin_temp {
@@ -179,6 +218,19 @@ struct amd_pmf_dev {
bool cnqf_enabled;
bool cnqf_supported;
struct notifier_block pwr_src_notifier;
+ /* Smart PC solution builder */
+ struct dentry *esbin;
+ unsigned char *policy_buf;
+ u32 policy_sz;
+ struct tee_context *tee_ctx;
+ struct tee_shm *fw_shm_pool;
+ u32 session_id;
+ void *shbuf;
+ struct delayed_work pb_work;
+ struct pmf_action_table *prev_data;
+ u64 policy_addr;
+ void *policy_base;
+ bool smart_pc_enabled;
};
struct apmf_sps_prop_granular {
@@ -389,6 +441,140 @@ struct apmf_dyn_slider_output {
struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
} __packed;
+/* Smart PC - TA internals */
+enum system_state {
+ SYSTEM_STATE_S0i3,
+ SYSTEM_STATE_S4,
+ SYSTEM_STATE_SCREEN_LOCK,
+ SYSTEM_STATE_MAX,
+};
+
+enum ta_slider {
+ TA_BEST_BATTERY,
+ TA_BETTER_BATTERY,
+ TA_BETTER_PERFORMANCE,
+ TA_BEST_PERFORMANCE,
+ TA_MAX,
+};
+
+/* Command ids for TA communication */
+enum ta_pmf_command {
+ TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE,
+ TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES,
+};
+
+enum ta_pmf_error_type {
+ TA_PMF_TYPE_SUCCESS,
+ TA_PMF_ERROR_TYPE_GENERIC,
+ TA_PMF_ERROR_TYPE_CRYPTO,
+ TA_PMF_ERROR_TYPE_CRYPTO_VALIDATE,
+ TA_PMF_ERROR_TYPE_CRYPTO_VERIFY_OEM,
+ TA_PMF_ERROR_TYPE_POLICY_BUILDER,
+ TA_PMF_ERROR_TYPE_PB_CONVERT,
+ TA_PMF_ERROR_TYPE_PB_SETUP,
+ TA_PMF_ERROR_TYPE_PB_ENACT,
+ TA_PMF_ERROR_TYPE_ASD_GET_DEVICE_INFO,
+ TA_PMF_ERROR_TYPE_ASD_GET_DEVICE_PCIE_INFO,
+ TA_PMF_ERROR_TYPE_SYS_DRV_FW_VALIDATION,
+ TA_PMF_ERROR_TYPE_MAX,
+};
+
+struct pmf_action_table {
+ enum system_state system_state;
+ u32 spl; /* in mW */
+ u32 sppt; /* in mW */
+ u32 sppt_apuonly; /* in mW */
+ u32 fppt; /* in mW */
+ u32 stt_minlimit; /* in mW */
+ u32 stt_skintemp_apu; /* in C */
+ u32 stt_skintemp_hs2; /* in C */
+ u32 p3t_limit; /* in mW */
+};
+
+/* Input conditions */
+struct ta_pmf_condition_info {
+ u32 power_source;
+ u32 bat_percentage;
+ u32 power_slider;
+ u32 lid_state;
+ bool user_present;
+ u32 rsvd1[2];
+ u32 monitor_count;
+ u32 rsvd2[2];
+ u32 bat_design;
+ u32 full_charge_capacity;
+ int drain_rate;
+ bool user_engaged;
+ u32 device_state;
+ u32 socket_power;
+ u32 skin_temperature;
+ u32 rsvd3[5];
+ u32 ambient_light;
+ u32 length;
+ u32 avg_c0residency;
+ u32 max_c0residency;
+ u32 s0i3_entry;
+ u32 gfx_busy;
+ u32 rsvd4[7];
+ bool camera_state;
+ u32 workload_type;
+ u32 display_type;
+ u32 display_state;
+ u32 rsvd5[150];
+};
+
+struct ta_pmf_load_policy_table {
+ u32 table_size;
+ u8 table[POLICY_BUF_MAX_SZ];
+};
+
+/* TA initialization params */
+struct ta_pmf_init_table {
+ u32 frequency; /* SMU sampling frequency */
+ bool validate;
+ bool sku_check;
+ bool metadata_macrocheck;
+ struct ta_pmf_load_policy_table policies_table;
+};
+
+/* Everything the TA needs to Enact Policies */
+struct ta_pmf_enact_table {
+ struct ta_pmf_condition_info ev_info;
+ u32 name;
+};
+
+struct ta_pmf_action {
+ u32 action_index;
+ u32 value;
+};
+
+/* Output actions from TA */
+struct ta_pmf_enact_result {
+ u32 actions_count;
+ struct ta_pmf_action actions_list[TA_PMF_ACTION_MAX];
+ u32 undo_count;
+ struct ta_pmf_action undo_list[TA_PMF_UNDO_MAX];
+};
+
+union ta_pmf_input {
+ struct ta_pmf_enact_table enact_table;
+ struct ta_pmf_init_table init_table;
+};
+
+union ta_pmf_output {
+ struct ta_pmf_enact_result policy_apply_table;
+ u32 rsvd[TA_OUTPUT_RESERVED_MEM];
+};
+
+struct ta_pmf_shared_memory {
+ int command_id;
+ int resp_id;
+ u32 pmf_result;
+ u32 if_version;
+ union ta_pmf_output pmf_output;
+ union ta_pmf_input pmf_input;
+};
+
/* Core Layer */
int apmf_acpi_init(struct amd_pmf_dev *pmf_dev);
void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev);
@@ -398,6 +584,7 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
int amd_pmf_get_power_source(void);
int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
+int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer);
/* SPS Layer */
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
@@ -409,7 +596,9 @@ int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
bool is_pprof_balanced(struct amd_pmf_dev *pmf);
int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev);
+const char *amd_pmf_source_as_str(unsigned int state);
+const char *amd_pmf_source_as_str(unsigned int state);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
@@ -433,4 +622,13 @@ void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev);
int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms);
extern const struct attribute_group cnqf_feature_attribute_group;
+/* Smart PC builder Layer */
+int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev);
+int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
+
+/* Smart PC - TA interfaces */
+void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+
#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
new file mode 100644
index 000000000..a3dec14c3
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver - Smart PC Capabilities
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Patil Rajesh Reddy <Patil.Reddy@amd.com>
+ */
+
+#include <acpi/button.h>
+#include <linux/amd-pmf-io.h>
+#include <linux/power_supply.h>
+#include <linux/units.h>
+#include "pmf.h"
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+static const char *ta_slider_as_str(unsigned int state)
+{
+ switch (state) {
+ case TA_BEST_PERFORMANCE:
+ return "PERFORMANCE";
+ case TA_BETTER_PERFORMANCE:
+ return "BALANCED";
+ case TA_BEST_BATTERY:
+ return "POWER_SAVER";
+ default:
+ return "Unknown TA Slider State";
+ }
+}
+
+void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ dev_dbg(dev->dev, "==== TA inputs START ====\n");
+ dev_dbg(dev->dev, "Slider State: %s\n", ta_slider_as_str(in->ev_info.power_slider));
+ dev_dbg(dev->dev, "Power Source: %s\n", amd_pmf_source_as_str(in->ev_info.power_source));
+ dev_dbg(dev->dev, "Battery Percentage: %u\n", in->ev_info.bat_percentage);
+ dev_dbg(dev->dev, "Designed Battery Capacity: %u\n", in->ev_info.bat_design);
+ dev_dbg(dev->dev, "Fully Charged Capacity: %u\n", in->ev_info.full_charge_capacity);
+ dev_dbg(dev->dev, "Drain Rate: %d\n", in->ev_info.drain_rate);
+ dev_dbg(dev->dev, "Socket Power: %u\n", in->ev_info.socket_power);
+ dev_dbg(dev->dev, "Skin Temperature: %u\n", in->ev_info.skin_temperature);
+ dev_dbg(dev->dev, "Avg C0 Residency: %u\n", in->ev_info.avg_c0residency);
+ dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency);
+ dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy);
+ dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
+ dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
+ dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
+ dev_dbg(dev->dev, "==== TA inputs END ====\n");
+}
+#else
+void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {}
+#endif
+
+static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ u16 max, avg = 0;
+ int i;
+
+ memset(dev->buf, 0, sizeof(dev->m_table));
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
+
+ in->ev_info.socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
+ in->ev_info.skin_temperature = dev->m_table.skin_temp;
+
+ /* Get the avg and max C0 residency of all the cores */
+ max = dev->m_table.avg_core_c0residency[0];
+ for (i = 0; i < ARRAY_SIZE(dev->m_table.avg_core_c0residency); i++) {
+ avg += dev->m_table.avg_core_c0residency[i];
+ if (dev->m_table.avg_core_c0residency[i] > max)
+ max = dev->m_table.avg_core_c0residency[i];
+ }
+
+ avg = DIV_ROUND_CLOSEST(avg, ARRAY_SIZE(dev->m_table.avg_core_c0residency));
+ in->ev_info.avg_c0residency = avg;
+ in->ev_info.max_c0residency = max;
+ in->ev_info.gfx_busy = dev->m_table.avg_gfx_activity;
+}
+
+static const char * const pmf_battery_supply_name[] = {
+ "BATT",
+ "BAT0",
+};
+
+static int amd_pmf_get_battery_prop(enum power_supply_property prop)
+{
+ union power_supply_propval value;
+ struct power_supply *psy;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pmf_battery_supply_name); i++) {
+ psy = power_supply_get_by_name(pmf_battery_supply_name[i]);
+ if (!psy)
+ continue;
+
+ ret = power_supply_get_property(psy, prop, &value);
+ if (ret) {
+ power_supply_put(psy);
+ return ret;
+ }
+ }
+
+ return value.intval;
+}
+
+static int amd_pmf_get_battery_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ int val;
+
+ val = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_PRESENT);
+ if (val < 0)
+ return val;
+ if (val != 1)
+ return -ENODEV;
+
+ in->ev_info.bat_percentage = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_CAPACITY);
+ /* all values in mWh metrics */
+ in->ev_info.bat_design = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN) /
+ MILLIWATT_PER_WATT;
+ in->ev_info.full_charge_capacity = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_ENERGY_FULL) /
+ MILLIWATT_PER_WATT;
+ in->ev_info.drain_rate = amd_pmf_get_battery_prop(POWER_SUPPLY_PROP_POWER_NOW) /
+ MILLIWATT_PER_WATT;
+
+ return 0;
+}
+
+static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ int val;
+
+ switch (dev->current_profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ val = TA_BEST_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ val = TA_BETTER_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ val = TA_BEST_BATTERY;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown Platform Profile.\n");
+ return -EOPNOTSUPP;
+ }
+ in->ev_info.power_slider = val;
+
+ return 0;
+}
+
+static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ struct amd_sfh_info sfh_info;
+ int ret;
+
+ /* Get ALS data */
+ ret = amd_get_sfh_info(&sfh_info, MT_ALS);
+ if (!ret)
+ in->ev_info.ambient_light = sfh_info.ambient_light;
+ else
+ return ret;
+
+ /* get HPD data */
+ ret = amd_get_sfh_info(&sfh_info, MT_HPD);
+ if (ret)
+ return ret;
+
+ switch (sfh_info.user_present) {
+ case SFH_NOT_DETECTED:
+ in->ev_info.user_present = 0xff; /* assume no sensors connected */
+ break;
+ case SFH_USER_PRESENT:
+ in->ev_info.user_present = 1;
+ break;
+ case SFH_USER_AWAY:
+ in->ev_info.user_present = 0;
+ break;
+ }
+
+ return 0;
+}
+
+void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ /* TA side lid open is 1 and close is 0, hence the ! here */
+ in->ev_info.lid_state = !acpi_lid_open();
+ in->ev_info.power_source = amd_pmf_get_power_source();
+ amd_pmf_get_smu_info(dev, in);
+ amd_pmf_get_battery_info(dev, in);
+ amd_pmf_get_slider_info(dev, in);
+ amd_pmf_get_sensor_info(dev, in);
+}
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index a70e67749..33e23e25c 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -27,7 +27,7 @@ static const char *slider_as_str(unsigned int state)
}
}
-static const char *source_as_str(unsigned int state)
+const char *amd_pmf_source_as_str(unsigned int state)
{
switch (state) {
case POWER_SOURCE_AC:
@@ -47,7 +47,8 @@ static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *dat
for (i = 0; i < POWER_SOURCE_MAX; i++) {
for (j = 0; j < POWER_MODE_MAX; j++) {
- pr_debug("--- Source:%s Mode:%s ---\n", source_as_str(i), slider_as_str(j));
+ pr_debug("--- Source:%s Mode:%s ---\n", amd_pmf_source_as_str(i),
+ slider_as_str(j));
pr_debug("SPL: %u mW\n", data->prop[i][j].spl);
pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt);
pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only);
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
new file mode 100644
index 000000000..4ebfe0f5a
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver - TEE Interface
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include "pmf.h"
+
+#define MAX_TEE_PARAM 4
+
+/* Policy binary actions sampling frequency (in ms) */
+static int pb_actions_ms = MSEC_PER_SEC;
+/* Sideload policy binaries to debug policy failures */
+static bool pb_side_load;
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+module_param(pb_actions_ms, int, 0644);
+MODULE_PARM_DESC(pb_actions_ms, "Policy binary actions sampling frequency (default = 1000ms)");
+module_param(pb_side_load, bool, 0444);
+MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures");
+#endif
+
+static const uuid_t amd_pmf_ta_uuid = UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d,
+ 0xb1, 0x2d, 0xc5, 0x29, 0xb1, 0x3d, 0x85, 0x43);
+
+static const char *amd_pmf_uevent_as_str(unsigned int state)
+{
+ switch (state) {
+ case SYSTEM_STATE_S0i3:
+ return "S0i3";
+ case SYSTEM_STATE_S4:
+ return "S4";
+ case SYSTEM_STATE_SCREEN_LOCK:
+ return "SCREEN_LOCK";
+ default:
+ return "Unknown Smart PC event";
+ }
+}
+
+static void amd_pmf_prepare_args(struct amd_pmf_dev *dev, int cmd,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ memset(arg, 0, sizeof(*arg));
+ memset(param, 0, MAX_TEE_PARAM * sizeof(*param));
+
+ arg->func = cmd;
+ arg->session = dev->session_id;
+ arg->num_params = MAX_TEE_PARAM;
+
+ /* Fill invoke cmd params */
+ param[0].u.memref.size = sizeof(struct ta_pmf_shared_memory);
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = dev->fw_shm_pool;
+ param[0].u.memref.shm_offs = 0;
+}
+
+static int amd_pmf_update_uevents(struct amd_pmf_dev *dev, u16 event)
+{
+ char *envp[2] = {};
+
+ envp[0] = kasprintf(GFP_KERNEL, "EVENT_ID=%d", event);
+ if (!envp[0])
+ return -EINVAL;
+
+ kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, envp);
+
+ kfree(envp[0]);
+ return 0;
+}
+
+static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_result *out)
+{
+ u32 val;
+ int idx;
+
+ for (idx = 0; idx < out->actions_count; idx++) {
+ val = out->actions_list[idx].value;
+ switch (out->actions_list[idx].action_index) {
+ case PMF_POLICY_SPL:
+ if (dev->prev_data->spl != val) {
+ amd_pmf_send_cmd(dev, SET_SPL, false, val, NULL);
+ dev_dbg(dev->dev, "update SPL: %u\n", val);
+ dev->prev_data->spl = val;
+ }
+ break;
+
+ case PMF_POLICY_SPPT:
+ if (dev->prev_data->sppt != val) {
+ amd_pmf_send_cmd(dev, SET_SPPT, false, val, NULL);
+ dev_dbg(dev->dev, "update SPPT: %u\n", val);
+ dev->prev_data->sppt = val;
+ }
+ break;
+
+ case PMF_POLICY_FPPT:
+ if (dev->prev_data->fppt != val) {
+ amd_pmf_send_cmd(dev, SET_FPPT, false, val, NULL);
+ dev_dbg(dev->dev, "update FPPT: %u\n", val);
+ dev->prev_data->fppt = val;
+ }
+ break;
+
+ case PMF_POLICY_SPPT_APU_ONLY:
+ if (dev->prev_data->sppt_apuonly != val) {
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, val, NULL);
+ dev_dbg(dev->dev, "update SPPT_APU_ONLY: %u\n", val);
+ dev->prev_data->sppt_apuonly = val;
+ }
+ break;
+
+ case PMF_POLICY_STT_MIN:
+ if (dev->prev_data->stt_minlimit != val) {
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, val, NULL);
+ dev_dbg(dev->dev, "update STT_MIN: %u\n", val);
+ dev->prev_data->stt_minlimit = val;
+ }
+ break;
+
+ case PMF_POLICY_STT_SKINTEMP_APU:
+ if (dev->prev_data->stt_skintemp_apu != val) {
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, val, NULL);
+ dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val);
+ dev->prev_data->stt_skintemp_apu = val;
+ }
+ break;
+
+ case PMF_POLICY_STT_SKINTEMP_HS2:
+ if (dev->prev_data->stt_skintemp_hs2 != val) {
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, val, NULL);
+ dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val);
+ dev->prev_data->stt_skintemp_hs2 = val;
+ }
+ break;
+
+ case PMF_POLICY_P3T:
+ if (dev->prev_data->p3t_limit != val) {
+ amd_pmf_send_cmd(dev, SET_P3T, false, val, NULL);
+ dev_dbg(dev->dev, "update P3T: %u\n", val);
+ dev->prev_data->p3t_limit = val;
+ }
+ break;
+
+ case PMF_POLICY_SYSTEM_STATE:
+ amd_pmf_update_uevents(dev, val);
+ dev_dbg(dev->dev, "update SYSTEM_STATE: %s\n",
+ amd_pmf_uevent_as_str(val));
+ break;
+ }
+ }
+}
+
+static int amd_pmf_invoke_cmd_enact(struct amd_pmf_dev *dev)
+{
+ struct ta_pmf_shared_memory *ta_sm = NULL;
+ struct ta_pmf_enact_result *out = NULL;
+ struct ta_pmf_enact_table *in = NULL;
+ struct tee_param param[MAX_TEE_PARAM];
+ struct tee_ioctl_invoke_arg arg;
+ int ret = 0;
+
+ if (!dev->tee_ctx)
+ return -ENODEV;
+
+ memset(dev->shbuf, 0, dev->policy_sz);
+ ta_sm = dev->shbuf;
+ out = &ta_sm->pmf_output.policy_apply_table;
+ in = &ta_sm->pmf_input.enact_table;
+
+ memset(ta_sm, 0, sizeof(*ta_sm));
+ ta_sm->command_id = TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES;
+ ta_sm->if_version = PMF_TA_IF_VERSION_MAJOR;
+
+ amd_pmf_populate_ta_inputs(dev, in);
+ amd_pmf_prepare_args(dev, TA_PMF_COMMAND_POLICY_BUILDER_ENACT_POLICIES, &arg, param);
+
+ ret = tee_client_invoke_func(dev->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(dev->dev, "TEE enact cmd failed. err: %x, ret:%d\n", arg.ret, ret);
+ return ret;
+ }
+
+ if (ta_sm->pmf_result == TA_PMF_TYPE_SUCCESS && out->actions_count) {
+ amd_pmf_dump_ta_inputs(dev, in);
+ dev_dbg(dev->dev, "action count:%u result:%x\n", out->actions_count,
+ ta_sm->pmf_result);
+ amd_pmf_apply_policies(dev, out);
+ }
+
+ return 0;
+}
+
+static int amd_pmf_invoke_cmd_init(struct amd_pmf_dev *dev)
+{
+ struct ta_pmf_shared_memory *ta_sm = NULL;
+ struct tee_param param[MAX_TEE_PARAM];
+ struct ta_pmf_init_table *in = NULL;
+ struct tee_ioctl_invoke_arg arg;
+ int ret = 0;
+
+ if (!dev->tee_ctx) {
+ dev_err(dev->dev, "Failed to get TEE context\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(dev->dev, "Policy Binary size: %u bytes\n", dev->policy_sz);
+ memset(dev->shbuf, 0, dev->policy_sz);
+ ta_sm = dev->shbuf;
+ in = &ta_sm->pmf_input.init_table;
+
+ ta_sm->command_id = TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE;
+ ta_sm->if_version = PMF_TA_IF_VERSION_MAJOR;
+
+ in->metadata_macrocheck = false;
+ in->sku_check = false;
+ in->validate = true;
+ in->frequency = pb_actions_ms;
+ in->policies_table.table_size = dev->policy_sz;
+
+ memcpy(in->policies_table.table, dev->policy_buf, dev->policy_sz);
+ amd_pmf_prepare_args(dev, TA_PMF_COMMAND_POLICY_BUILDER_INITIALIZE, &arg, param);
+
+ ret = tee_client_invoke_func(dev->tee_ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ dev_err(dev->dev, "Failed to invoke TEE init cmd. err: %x, ret:%d\n", arg.ret, ret);
+ return ret;
+ }
+
+ return ta_sm->pmf_result;
+}
+
+static void amd_pmf_invoke_cmd(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, pb_work.work);
+
+ amd_pmf_invoke_cmd_enact(dev);
+ schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms));
+}
+
+static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
+{
+ u32 cookie, length;
+ int res;
+
+ cookie = *(u32 *)(dev->policy_buf + POLICY_COOKIE_OFFSET);
+ length = *(u32 *)(dev->policy_buf + POLICY_COOKIE_LEN);
+
+ if (cookie != POLICY_SIGN_COOKIE || !length) {
+ dev_dbg(dev->dev, "cookie doesn't match\n");
+ return -EINVAL;
+ }
+
+ /* Update the actual length */
+ dev->policy_sz = length + 512;
+ res = amd_pmf_invoke_cmd_init(dev);
+ if (res == TA_PMF_TYPE_SUCCESS) {
+ /* Now its safe to announce that smart pc is enabled */
+ dev->smart_pc_enabled = true;
+ /*
+ * Start collecting the data from TA FW after a small delay
+ * or else, we might end up getting stale values.
+ */
+ schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
+ } else {
+ dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
+ dev->smart_pc_enabled = false;
+ return res;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_AMD_PMF_DEBUG
+static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev)
+{
+ print_hex_dump_debug("(pb): ", DUMP_PREFIX_OFFSET, 16, 1, dev->policy_buf,
+ dev->policy_sz, false);
+}
+
+static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
+ size_t length, loff_t *pos)
+{
+ struct amd_pmf_dev *dev = filp->private_data;
+ unsigned char *new_policy_buf;
+ int ret;
+
+ /* Policy binary size cannot exceed POLICY_BUF_MAX_SZ */
+ if (length > POLICY_BUF_MAX_SZ || length == 0)
+ return -EINVAL;
+
+ /* re-alloc to the new buffer length of the policy binary */
+ new_policy_buf = kzalloc(length, GFP_KERNEL);
+ if (!new_policy_buf)
+ return -ENOMEM;
+
+ if (copy_from_user(new_policy_buf, buf, length)) {
+ kfree(new_policy_buf);
+ return -EFAULT;
+ }
+
+ kfree(dev->policy_buf);
+ dev->policy_buf = new_policy_buf;
+ dev->policy_sz = length;
+
+ amd_pmf_hex_dump_pb(dev);
+ ret = amd_pmf_start_policy_engine(dev);
+ if (ret)
+ return -EINVAL;
+
+ return length;
+}
+
+static const struct file_operations pb_fops = {
+ .write = amd_pmf_get_pb_data,
+ .open = simple_open,
+};
+
+static void amd_pmf_open_pb(struct amd_pmf_dev *dev, struct dentry *debugfs_root)
+{
+ dev->esbin = debugfs_create_dir("pb", debugfs_root);
+ debugfs_create_file("update_policy", 0644, dev->esbin, dev, &pb_fops);
+}
+
+static void amd_pmf_remove_pb(struct amd_pmf_dev *dev)
+{
+ debugfs_remove_recursive(dev->esbin);
+}
+#else
+static void amd_pmf_open_pb(struct amd_pmf_dev *dev, struct dentry *debugfs_root) {}
+static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {}
+static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {}
+#endif
+
+static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ return ver->impl_id == TEE_IMPL_ID_AMDTEE;
+}
+
+static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id)
+{
+ struct tee_ioctl_open_session_arg sess_arg = {};
+ int rc;
+
+ export_uuid(sess_arg.uuid, &amd_pmf_ta_uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if (rc < 0 || sess_arg.ret != 0) {
+ pr_err("Failed to open TEE session err:%#x, rc:%d\n", sess_arg.ret, rc);
+ return rc;
+ }
+
+ *id = sess_arg.session;
+
+ return rc;
+}
+
+static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
+{
+ u32 size;
+ int ret;
+
+ dev->tee_ctx = tee_client_open_context(NULL, amd_pmf_amdtee_ta_match, NULL, NULL);
+ if (IS_ERR(dev->tee_ctx)) {
+ dev_err(dev->dev, "Failed to open TEE context\n");
+ return PTR_ERR(dev->tee_ctx);
+ }
+
+ ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id);
+ if (ret) {
+ dev_err(dev->dev, "Failed to open TA session (%d)\n", ret);
+ ret = -EINVAL;
+ goto out_ctx;
+ }
+
+ size = sizeof(struct ta_pmf_shared_memory) + dev->policy_sz;
+ dev->fw_shm_pool = tee_shm_alloc_kernel_buf(dev->tee_ctx, size);
+ if (IS_ERR(dev->fw_shm_pool)) {
+ dev_err(dev->dev, "Failed to alloc TEE shared memory\n");
+ ret = PTR_ERR(dev->fw_shm_pool);
+ goto out_sess;
+ }
+
+ dev->shbuf = tee_shm_get_va(dev->fw_shm_pool, 0);
+ if (IS_ERR(dev->shbuf)) {
+ dev_err(dev->dev, "Failed to get TEE virtual address\n");
+ ret = PTR_ERR(dev->shbuf);
+ goto out_shm;
+ }
+ dev_dbg(dev->dev, "TEE init done\n");
+
+ return 0;
+
+out_shm:
+ tee_shm_free(dev->fw_shm_pool);
+out_sess:
+ tee_client_close_session(dev->tee_ctx, dev->session_id);
+out_ctx:
+ tee_client_close_context(dev->tee_ctx);
+
+ return ret;
+}
+
+static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
+{
+ tee_shm_free(dev->fw_shm_pool);
+ tee_client_close_session(dev->tee_ctx, dev->session_id);
+ tee_client_close_context(dev->tee_ctx);
+}
+
+int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
+{
+ int ret;
+
+ ret = apmf_check_smart_pc(dev);
+ if (ret) {
+ /*
+ * Lets not return from here if Smart PC bit is not advertised in
+ * the BIOS. This way, there will be some amount of power savings
+ * to the user with static slider (if enabled).
+ */
+ dev_info(dev->dev, "PMF Smart PC not advertised in BIOS!:%d\n", ret);
+ return -ENODEV;
+ }
+
+ ret = amd_pmf_tee_init(dev);
+ if (ret)
+ return ret;
+
+ INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
+
+ ret = amd_pmf_set_dram_addr(dev, true);
+ if (ret)
+ goto error;
+
+ dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
+ if (!dev->policy_base) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
+ if (!dev->policy_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
+
+ amd_pmf_hex_dump_pb(dev);
+
+ dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
+ if (!dev->prev_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = amd_pmf_start_policy_engine(dev);
+ if (ret)
+ goto error;
+
+ if (pb_side_load)
+ amd_pmf_open_pb(dev, dev->dbgfs_dir);
+
+ return 0;
+
+error:
+ amd_pmf_deinit_smart_pc(dev);
+
+ return ret;
+}
+
+void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
+{
+ if (pb_side_load && dev->esbin)
+ amd_pmf_remove_pb(dev);
+
+ cancel_delayed_work_sync(&dev->pb_work);
+ kfree(dev->prev_data);
+ dev->prev_data = NULL;
+ kfree(dev->policy_buf);
+ dev->policy_buf = NULL;
+ kfree(dev->buf);
+ dev->buf = NULL;
+ amd_pmf_tee_deinit(dev);
+}
diff --git a/drivers/platform/x86/amd/wbrf.c b/drivers/platform/x86/amd/wbrf.c
new file mode 100644
index 000000000..dd197b3ae
--- /dev/null
+++ b/drivers/platform/x86/amd/wbrf.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wifi Frequency Band Manage Interface
+ * Copyright (C) 2023 Advanced Micro Devices
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_amd_wbrf.h>
+
+/*
+ * Functions bit vector for WBRF method
+ *
+ * Bit 0: WBRF supported.
+ * Bit 1: Function 1 (Add / Remove frequency) is supported.
+ * Bit 2: Function 2 (Get frequency list) is supported.
+ */
+#define WBRF_ENABLED 0x0
+#define WBRF_RECORD 0x1
+#define WBRF_RETRIEVE 0x2
+
+#define WBRF_REVISION 0x1
+
+/*
+ * The data structure used for WBRF_RETRIEVE is not naturally aligned.
+ * And unfortunately the design has been settled down.
+ */
+struct amd_wbrf_ranges_out {
+ u32 num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+} __packed;
+
+static const guid_t wifi_acpi_dsm_guid =
+ GUID_INIT(0x7b7656cf, 0xdc3d, 0x4c1c,
+ 0x83, 0xe9, 0x66, 0xe7, 0x21, 0xde, 0x30, 0x70);
+
+/*
+ * Used to notify consumer (amdgpu driver currently) about
+ * the wifi frequency is change.
+ */
+static BLOCKING_NOTIFIER_HEAD(wbrf_chain_head);
+
+static int wbrf_record(struct acpi_device *adev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ union acpi_object argv4;
+ union acpi_object *tmp;
+ union acpi_object *obj;
+ u32 num_of_ranges = 0;
+ u32 num_of_elements;
+ u32 arg_idx = 0;
+ int ret;
+ u32 i;
+
+ if (!in)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(in->band_list); i++) {
+ if (in->band_list[i].start && in->band_list[i].end)
+ num_of_ranges++;
+ }
+
+ /*
+ * The num_of_ranges value in the "in" object supplied by
+ * the caller is required to be equal to the number of
+ * entries in the band_list array in there.
+ */
+ if (num_of_ranges != in->num_of_ranges)
+ return -EINVAL;
+
+ /*
+ * Every input frequency band comes with two end points(start/end)
+ * and each is accounted as an element. Meanwhile the range count
+ * and action type are accounted as an element each.
+ * So, the total element count = 2 * num_of_ranges + 1 + 1.
+ */
+ num_of_elements = 2 * num_of_ranges + 2;
+
+ tmp = kcalloc(num_of_elements, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ argv4.package.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = num_of_elements;
+ argv4.package.elements = tmp;
+
+ /* save the number of ranges*/
+ tmp[0].integer.type = ACPI_TYPE_INTEGER;
+ tmp[0].integer.value = num_of_ranges;
+
+ /* save the action(WBRF_RECORD_ADD/REMOVE/RETRIEVE) */
+ tmp[1].integer.type = ACPI_TYPE_INTEGER;
+ tmp[1].integer.value = action;
+
+ arg_idx = 2;
+ for (i = 0; i < ARRAY_SIZE(in->band_list); i++) {
+ if (!in->band_list[i].start || !in->band_list[i].end)
+ continue;
+
+ tmp[arg_idx].integer.type = ACPI_TYPE_INTEGER;
+ tmp[arg_idx++].integer.value = in->band_list[i].start;
+ tmp[arg_idx].integer.type = ACPI_TYPE_INTEGER;
+ tmp[arg_idx++].integer.value = in->band_list[i].end;
+ }
+
+ obj = acpi_evaluate_dsm(adev->handle, &wifi_acpi_dsm_guid,
+ WBRF_REVISION, WBRF_RECORD, &argv4);
+
+ if (!obj)
+ return -EINVAL;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = obj->integer.value;
+ if (ret)
+ ret = -EINVAL;
+
+out:
+ ACPI_FREE(obj);
+ kfree(tmp);
+
+ return ret;
+}
+
+/**
+ * acpi_amd_wbrf_add_remove - add or remove the frequency band the device is using
+ *
+ * @dev: device pointer
+ * @action: remove or add the frequency band into bios
+ * @in: input structure containing the frequency band the device is using
+ *
+ * Broadcast to other consumers the frequency band the device starts
+ * to use. Underneath the surface the information is cached into an
+ * internal buffer first. Then a notification is sent to all those
+ * registered consumers. So then they can retrieve that buffer to
+ * know the latest active frequency bands. Consumers that haven't
+ * yet been registered can retrieve the information from the cache
+ * when they register.
+ *
+ * Return:
+ * 0 for success add/remove wifi frequency band.
+ * Returns a negative error code for failure.
+ */
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ struct acpi_device *adev;
+ int ret;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ ret = wbrf_record(adev, action, in);
+ if (ret)
+ return ret;
+
+ blocking_notifier_call_chain(&wbrf_chain_head, WBRF_CHANGED, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_amd_wbrf_add_remove);
+
+/**
+ * acpi_amd_wbrf_supported_producer - determine if the WBRF can be enabled
+ * for the device as a producer
+ *
+ * @dev: device pointer
+ *
+ * Check if the platform equipped with necessary implementations to
+ * support WBRF for the device as a producer.
+ *
+ * Return:
+ * true if WBRF is supported, otherwise returns false
+ */
+bool acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return false;
+
+ return acpi_check_dsm(adev->handle, &wifi_acpi_dsm_guid,
+ WBRF_REVISION, BIT(WBRF_RECORD));
+}
+EXPORT_SYMBOL_GPL(acpi_amd_wbrf_supported_producer);
+
+/**
+ * acpi_amd_wbrf_supported_consumer - determine if the WBRF can be enabled
+ * for the device as a consumer
+ *
+ * @dev: device pointer
+ *
+ * Determine if the platform equipped with necessary implementations to
+ * support WBRF for the device as a consumer.
+ *
+ * Return:
+ * true if WBRF is supported, otherwise returns false.
+ */
+bool acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return false;
+
+ return acpi_check_dsm(adev->handle, &wifi_acpi_dsm_guid,
+ WBRF_REVISION, BIT(WBRF_RETRIEVE));
+}
+EXPORT_SYMBOL_GPL(acpi_amd_wbrf_supported_consumer);
+
+/**
+ * amd_wbrf_retrieve_freq_band - retrieve current active frequency bands
+ *
+ * @dev: device pointer
+ * @out: output structure containing all the active frequency bands
+ *
+ * Retrieve the current active frequency bands which were broadcasted
+ * by other producers. The consumer who calls this API should take
+ * proper actions if any of the frequency band may cause RFI with its
+ * own frequency band used.
+ *
+ * Return:
+ * 0 for getting wifi freq band successfully.
+ * Returns a negative error code for failure.
+ */
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ struct amd_wbrf_ranges_out acpi_out = {0};
+ struct acpi_device *adev;
+ union acpi_object *obj;
+ union acpi_object param;
+ int ret = 0;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ param.type = ACPI_TYPE_STRING;
+ param.string.length = 0;
+ param.string.pointer = NULL;
+
+ obj = acpi_evaluate_dsm(adev->handle, &wifi_acpi_dsm_guid,
+ WBRF_REVISION, WBRF_RETRIEVE, &param);
+ if (!obj)
+ return -EINVAL;
+
+ /*
+ * The return buffer is with variable length and the format below:
+ * number_of_entries(1 DWORD): Number of entries
+ * start_freq of 1st entry(1 QWORD): Start frequency of the 1st entry
+ * end_freq of 1st entry(1 QWORD): End frequency of the 1st entry
+ * ...
+ * ...
+ * start_freq of the last entry(1 QWORD)
+ * end_freq of the last entry(1 QWORD)
+ *
+ * Thus the buffer length is determined by the number of entries.
+ * - For zero entry scenario, the buffer length will be 4 bytes.
+ * - For one entry scenario, the buffer length will be 20 bytes.
+ */
+ if (obj->buffer.length > sizeof(acpi_out) || obj->buffer.length < 4) {
+ dev_err(dev, "Wrong sized WBRT information");
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(&acpi_out, obj->buffer.pointer, obj->buffer.length);
+
+ out->num_of_ranges = acpi_out.num_of_ranges;
+ memcpy(out->band_list, acpi_out.band_list, sizeof(acpi_out.band_list));
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(amd_wbrf_retrieve_freq_band);
+
+/**
+ * amd_wbrf_register_notifier - register for notifications of frequency
+ * band update
+ *
+ * @nb: driver notifier block
+ *
+ * The consumer should register itself via this API so that it can get
+ * notified on the frequency band updates from other producers.
+ *
+ * Return:
+ * 0 for registering a consumer driver successfully.
+ * Returns a negative error code for failure.
+ */
+int amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&wbrf_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(amd_wbrf_register_notifier);
+
+/**
+ * amd_wbrf_unregister_notifier - unregister for notifications of
+ * frequency band update
+ *
+ * @nb: driver notifier block
+ *
+ * The consumer should call this API when it is longer interested with
+ * the frequency band updates from other producers. Usually, this should
+ * be performed during driver cleanup.
+ *
+ * Return:
+ * 0 for unregistering a consumer driver.
+ * Returns a negative error code for failure.
+ */
+int amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&wbrf_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(amd_wbrf_unregister_notifier);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 761029f39..bf03ea1b1 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1816,9 +1816,8 @@ static void asus_dmi_check(void)
return;
/* On L1400B WLED control the sound card, don't mess with it ... */
- if (strncmp(model, "L1400B", 6) == 0) {
+ if (strncmp(model, "L1400B", 6) == 0)
wlan_status = -1;
- }
}
static bool asus_device_present;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 9f7e23c5c..18be35fdb 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -4615,7 +4615,7 @@ fail_platform:
return err;
}
-static int asus_wmi_remove(struct platform_device *device)
+static void asus_wmi_remove(struct platform_device *device)
{
struct asus_wmi *asus;
@@ -4638,7 +4638,6 @@ static int asus_wmi_remove(struct platform_device *device)
platform_profile_remove();
kfree(asus);
- return 0;
}
/* Platform driver - hibernate/resume callbacks *******************************/
@@ -4799,7 +4798,7 @@ int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
return -EBUSY;
platform_driver = &driver->platform_driver;
- platform_driver->remove = asus_wmi_remove;
+ platform_driver->remove_new = asus_wmi_remove;
platform_driver->driver.owner = driver->owner;
platform_driver->driver.name = driver->name;
platform_driver->driver.pm = &asus_pm_ops;
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index bdd78076b..e712df67f 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -37,7 +37,7 @@ config DCDBAS
Interrupts (SMIs) and Host Control Actions (system power cycle or
power off after OS shutdown) on certain Dell systems.
- See <file:Documentation/driver-api/dcdbas.rst> for more details on the driver
+ See <file:Documentation/userspace-api/dcdbas.rst> for more details on the driver
and the Dell systems on which Dell systems management software makes
use of this driver.
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index a9477e543..f5ee62ce1 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -429,7 +429,6 @@ static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
static int alienware_zone_init(struct platform_device *dev)
{
u8 zone;
- char buffer[10];
char *name;
if (interface == WMAX) {
@@ -466,8 +465,7 @@ static int alienware_zone_init(struct platform_device *dev)
return -ENOMEM;
for (zone = 0; zone < quirks->num_zones; zone++) {
- sprintf(buffer, "zone%02hhX", zone);
- name = kstrdup(buffer, GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "zone%02hhX", zone);
if (name == NULL)
return 1;
sysfs_attr_init(&zone_dev_attrs[zone].attr);
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index 76787369d..a60e35056 100644
--- a/drivers/platform/x86/dell/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -7,7 +7,7 @@
* and Host Control Actions (power cycle or power off after OS shutdown) on
* Dell systems.
*
- * See Documentation/driver-api/dcdbas.rst for more information.
+ * See Documentation/userspace-api/dcdbas.rst for more information.
*
* Copyright (C) 1995-2006 Dell Inc.
*/
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index 931cc5013..ae9012549 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -6,12 +6,16 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/device.h>
#include <linux/dmi.h>
+#include <linux/fs.h>
#include <linux/list.h>
+#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/wmi.h>
+#include <uapi/linux/wmi.h>
#include "dell-smbios.h"
#include "dell-wmi-descriptor.h"
@@ -32,7 +36,8 @@ struct wmi_smbios_priv {
struct list_head list;
struct wmi_device *wdev;
struct device *child;
- u32 req_buf_size;
+ u64 req_buf_size;
+ struct miscdevice char_dev;
};
static LIST_HEAD(wmi_list);
@@ -108,48 +113,115 @@ out_wmi_call:
return ret;
}
-static long dell_smbios_wmi_filter(struct wmi_device *wdev, unsigned int cmd,
- struct wmi_ioctl_buffer *arg)
+static int dell_smbios_wmi_open(struct inode *inode, struct file *filp)
{
struct wmi_smbios_priv *priv;
- int ret = 0;
-
- switch (cmd) {
- case DELL_WMI_SMBIOS_CMD:
- mutex_lock(&call_mutex);
- priv = dev_get_drvdata(&wdev->dev);
- if (!priv) {
- ret = -ENODEV;
- goto fail_smbios_cmd;
- }
- memcpy(priv->buf, arg, priv->req_buf_size);
- if (dell_smbios_call_filter(&wdev->dev, &priv->buf->std)) {
- dev_err(&wdev->dev, "Invalid call %d/%d:%8x\n",
- priv->buf->std.cmd_class,
- priv->buf->std.cmd_select,
- priv->buf->std.input[0]);
- ret = -EFAULT;
- goto fail_smbios_cmd;
- }
- ret = run_smbios_call(priv->wdev);
- if (ret)
- goto fail_smbios_cmd;
- memcpy(arg, priv->buf, priv->req_buf_size);
-fail_smbios_cmd:
- mutex_unlock(&call_mutex);
- break;
- default:
- ret = -ENOIOCTLCMD;
+
+ priv = container_of(filp->private_data, struct wmi_smbios_priv, char_dev);
+ filp->private_data = priv;
+
+ return nonseekable_open(inode, filp);
+}
+
+static ssize_t dell_smbios_wmi_read(struct file *filp, char __user *buffer, size_t length,
+ loff_t *offset)
+{
+ struct wmi_smbios_priv *priv = filp->private_data;
+
+ return simple_read_from_buffer(buffer, length, offset, &priv->req_buf_size,
+ sizeof(priv->req_buf_size));
+}
+
+static long dell_smbios_wmi_do_ioctl(struct wmi_smbios_priv *priv,
+ struct dell_wmi_smbios_buffer __user *arg)
+{
+ long ret;
+
+ if (get_user(priv->buf->length, &arg->length))
+ return -EFAULT;
+
+ if (priv->buf->length < priv->req_buf_size)
+ return -EINVAL;
+
+ /* if it's too big, warn, driver will only use what is needed */
+ if (priv->buf->length > priv->req_buf_size)
+ dev_err(&priv->wdev->dev, "Buffer %llu is bigger than required %llu\n",
+ priv->buf->length, priv->req_buf_size);
+
+ if (copy_from_user(priv->buf, arg, priv->req_buf_size))
+ return -EFAULT;
+
+ if (dell_smbios_call_filter(&priv->wdev->dev, &priv->buf->std)) {
+ dev_err(&priv->wdev->dev, "Invalid call %d/%d:%8x\n",
+ priv->buf->std.cmd_class,
+ priv->buf->std.cmd_select,
+ priv->buf->std.input[0]);
+
+ return -EINVAL;
}
+
+ ret = run_smbios_call(priv->wdev);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(arg, priv->buf, priv->req_buf_size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long dell_smbios_wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct dell_wmi_smbios_buffer __user *input = (struct dell_wmi_smbios_buffer __user *)arg;
+ struct wmi_smbios_priv *priv = filp->private_data;
+ long ret;
+
+ if (cmd != DELL_WMI_SMBIOS_CMD)
+ return -ENOIOCTLCMD;
+
+ mutex_lock(&call_mutex);
+ ret = dell_smbios_wmi_do_ioctl(priv, input);
+ mutex_unlock(&call_mutex);
+
return ret;
}
+static const struct file_operations dell_smbios_wmi_fops = {
+ .owner = THIS_MODULE,
+ .open = dell_smbios_wmi_open,
+ .read = dell_smbios_wmi_read,
+ .unlocked_ioctl = dell_smbios_wmi_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static void dell_smbios_wmi_unregister_chardev(void *data)
+{
+ struct miscdevice *char_dev = data;
+
+ misc_deregister(char_dev);
+}
+
+static int dell_smbios_wmi_register_chardev(struct wmi_smbios_priv *priv)
+{
+ int ret;
+
+ priv->char_dev.minor = MISC_DYNAMIC_MINOR;
+ priv->char_dev.name = "wmi/dell-smbios";
+ priv->char_dev.fops = &dell_smbios_wmi_fops;
+ priv->char_dev.mode = 0444;
+
+ ret = misc_register(&priv->char_dev);
+ if (ret < 0)
+ return ret;
+
+ return devm_add_action_or_reset(&priv->wdev->dev, dell_smbios_wmi_unregister_chardev,
+ &priv->char_dev);
+}
+
static int dell_smbios_wmi_probe(struct wmi_device *wdev, const void *context)
{
- struct wmi_driver *wdriver =
- container_of(wdev->dev.driver, struct wmi_driver, driver);
struct wmi_smbios_priv *priv;
- u32 hotfix;
+ u32 buffer_size, hotfix;
int count;
int ret;
@@ -162,62 +234,58 @@ static int dell_smbios_wmi_probe(struct wmi_device *wdev, const void *context)
if (!priv)
return -ENOMEM;
+ priv->wdev = wdev;
+ dev_set_drvdata(&wdev->dev, priv);
+
/* WMI buffer size will be either 4k or 32k depending on machine */
- if (!dell_wmi_get_size(&priv->req_buf_size))
+ if (!dell_wmi_get_size(&buffer_size))
return -EPROBE_DEFER;
+ priv->req_buf_size = buffer_size;
+
/* some SMBIOS calls fail unless BIOS contains hotfix */
if (!dell_wmi_get_hotfix(&hotfix))
return -EPROBE_DEFER;
- if (!hotfix) {
+
+ if (!hotfix)
dev_warn(&wdev->dev,
"WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
hotfix);
- wdriver->filter_callback = NULL;
- }
/* add in the length object we will use internally with ioctl */
priv->req_buf_size += sizeof(u64);
- ret = set_required_buffer_size(wdev, priv->req_buf_size);
- if (ret)
- return ret;
count = get_order(priv->req_buf_size);
- priv->buf = (void *)__get_free_pages(GFP_KERNEL, count);
+ priv->buf = (void *)devm_get_free_pages(&wdev->dev, GFP_KERNEL, count);
if (!priv->buf)
return -ENOMEM;
+ ret = dell_smbios_wmi_register_chardev(priv);
+ if (ret)
+ return ret;
+
/* ID is used by dell-smbios to set priority of drivers */
wdev->dev.id = 1;
ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call);
if (ret)
- goto fail_register;
+ return ret;
- priv->wdev = wdev;
- dev_set_drvdata(&wdev->dev, priv);
mutex_lock(&list_mutex);
list_add_tail(&priv->list, &wmi_list);
mutex_unlock(&list_mutex);
return 0;
-
-fail_register:
- free_pages((unsigned long)priv->buf, count);
- return ret;
}
static void dell_smbios_wmi_remove(struct wmi_device *wdev)
{
struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
- int count;
mutex_lock(&call_mutex);
mutex_lock(&list_mutex);
list_del(&priv->list);
mutex_unlock(&list_mutex);
dell_smbios_unregister_device(&wdev->dev);
- count = get_order(priv->req_buf_size);
- free_pages((unsigned long)priv->buf, count);
mutex_unlock(&call_mutex);
}
@@ -256,7 +324,6 @@ static struct wmi_driver dell_smbios_wmi_driver = {
.probe = dell_smbios_wmi_probe,
.remove = dell_smbios_wmi_remove,
.id_table = dell_smbios_wmi_id_table,
- .filter_callback = dell_smbios_wmi_filter,
};
int init_dell_smbios_wmi(void)
diff --git a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
index 03d018880..f7efe217a 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
@@ -7,7 +7,6 @@
*/
#include "bioscfg.h"
-#include <asm-generic/posix_types.h>
GET_INSTANCE_ID(password);
/*
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 8ebb7be52..e53660422 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -1478,7 +1478,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
return 0;
}
-static int __exit hp_wmi_bios_remove(struct platform_device *device)
+static void __exit hp_wmi_bios_remove(struct platform_device *device)
{
int i;
@@ -1502,8 +1502,6 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
if (platform_profile_support)
platform_profile_remove();
-
- return 0;
}
static int hp_wmi_resume_handler(struct device *device)
@@ -1560,7 +1558,7 @@ static struct platform_driver hp_wmi_driver __refdata = {
.pm = &hp_wmi_pm_ops,
.dev_groups = hp_wmi_groups,
},
- .remove = __exit_p(hp_wmi_bios_remove),
+ .remove_new = __exit_p(hp_wmi_bios_remove),
};
static umode_t hp_wmi_hwmon_is_visible(const void *data,
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
index 848baecc1..93f75ba1d 100644
--- a/drivers/platform/x86/intel/chtwc_int33fe.c
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -136,7 +136,7 @@ static const struct software_node altmodes_node = {
};
static const struct property_entry dp_altmode_properties[] = {
- PROPERTY_ENTRY_U32("svid", 0xff01),
+ PROPERTY_ENTRY_U16("svid", 0xff01),
PROPERTY_ENTRY_U32("vdo", 0x0c0086),
{ }
};
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 7457ca2b2..9ffbdc988 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -504,6 +504,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
struct platform_device *device = context;
struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
unsigned long long ev_index;
+ struct key_entry *ke;
int err;
/*
@@ -545,11 +546,15 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
if (event == 0xc0 || !priv->array)
return;
- if (!sparse_keymap_entry_from_scancode(priv->array, event)) {
+ ke = sparse_keymap_entry_from_scancode(priv->array, event);
+ if (!ke) {
dev_info(&device->dev, "unknown event 0x%x\n", event);
return;
}
+ if (ke->type == KE_IGNORE)
+ return;
+
wakeup:
pm_wakeup_hard_event(&device->dev);
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index b6708bab7..527d8fbc7 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -196,7 +196,7 @@ static int int0002_probe(struct platform_device *pdev)
* IRQs into gpiolib.
*/
ret = devm_request_irq(dev, irq, int0002_irq,
- IRQF_SHARED, "INT0002", chip);
+ IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
return ret;
diff --git a/drivers/platform/x86/intel/pmc/Kconfig b/drivers/platform/x86/intel/pmc/Kconfig
index b526597e4..d2f651fbe 100644
--- a/drivers/platform/x86/intel/pmc/Kconfig
+++ b/drivers/platform/x86/intel/pmc/Kconfig
@@ -7,6 +7,7 @@ config INTEL_PMC_CORE
tristate "Intel PMC Core driver"
depends on PCI
depends on ACPI
+ depends on INTEL_PMT_TELEMETRY
help
The Intel Platform Controller Hub for Intel Core SoCs provides access
to Power Management Controller registers via various interfaces. This
diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile
index 3a4cf1cbc..389e5419d 100644
--- a/drivers/platform/x86/intel/pmc/Makefile
+++ b/drivers/platform/x86/intel/pmc/Makefile
@@ -4,7 +4,7 @@
#
intel_pmc_core-y := core.o core_ssram.o spt.o cnp.o \
- icl.o tgl.o adl.o mtl.o
+ icl.o tgl.o adl.o mtl.o arl.o lnl.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
intel_pmc_core_pltdrv-y := pltdrv.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o
diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c
index 606f7678b..e7878558f 100644
--- a/drivers/platform/x86/intel/pmc/adl.c
+++ b/drivers/platform/x86/intel/pmc/adl.c
@@ -307,6 +307,8 @@ const struct pmc_reg_map adl_reg_map = {
.lpm_sts = adl_lpm_maps,
.lpm_status_offset = ADL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = ADL_LPM_LIVE_STATUS_OFFSET,
+ .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET,
+ .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
};
int adl_core_init(struct pmc_dev *pmcdev)
@@ -322,5 +324,7 @@ int adl_core_init(struct pmc_dev *pmcdev)
if (ret)
return ret;
+ pmc_core_get_low_power_modes(pmcdev);
+
return 0;
}
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
new file mode 100644
index 000000000..34b4cd23b
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Meteor Lake PCH.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include <linux/pci.h>
+#include "core.h"
+#include "../pmt/telemetry.h"
+
+/* PMC SSRAM PMT Telemetry GUID */
+#define IOEP_LPM_REQ_GUID 0x5077612
+#define SOCS_LPM_REQ_GUID 0x8478657
+#define PCHS_LPM_REQ_GUID 0x9684572
+
+static const u8 ARL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20};
+
+const struct pmc_bit_map arl_socs_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", MTL_PMC_LTR_SPG},
+ {"Reserved", ARL_SOCS_PMC_LTR_RESERVED},
+ {"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
+ {"DMI3", ARL_PMC_LTR_DMI3},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+const struct pmc_bit_map arl_socs_clocksource_status_map[] = {
+ {"AON2_OFF_STS", BIT(0)},
+ {"AON3_OFF_STS", BIT(1)},
+ {"AON4_OFF_STS", BIT(2)},
+ {"AON5_OFF_STS", BIT(3)},
+ {"AON1_OFF_STS", BIT(4)},
+ {"XTAL_LVM_OFF_STS", BIT(5)},
+ {"AON3_SPL_OFF_STS", BIT(9)},
+ {"DMI3FPW_0_PLL_OFF_STS", BIT(10)},
+ {"DMI3FPW_1_PLL_OFF_STS", BIT(11)},
+ {"G5X16FPW_0_PLL_OFF_STS", BIT(14)},
+ {"G5X16FPW_1_PLL_OFF_STS", BIT(15)},
+ {"G5X16FPW_2_PLL_OFF_STS", BIT(16)},
+ {"XTAL_AGGR_OFF_STS", BIT(17)},
+ {"USB2_PLL_OFF_STS", BIT(18)},
+ {"G5X16FPW_3_PLL_OFF_STS", BIT(19)},
+ {"BCLK_EXT_INJ_CLK_OFF_STS", BIT(20)},
+ {"PHY_OC_EXT_INJ_CLK_OFF_STS", BIT(21)},
+ {"FILTER_PLL_OFF_STS", BIT(22)},
+ {"FABRIC_PLL_OFF_STS", BIT(25)},
+ {"SOC_PLL_OFF_STS", BIT(26)},
+ {"PCIEFAB_PLL_OFF_STS", BIT(27)},
+ {"REF_PLL_OFF_STS", BIT(28)},
+ {"GENLOCK_FILTER_PLL_OFF_STS", BIT(30)},
+ {"RTC_PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0)},
+ {"DMI_PGD0_PG_STS", BIT(1)},
+ {"ESPISPI_PGD0_PG_STS", BIT(2)},
+ {"XHCI_PGD0_PG_STS", BIT(3)},
+ {"SPA_PGD0_PG_STS", BIT(4)},
+ {"SPB_PGD0_PG_STS", BIT(5)},
+ {"SPC_PGD0_PG_STS", BIT(6)},
+ {"GBE_PGD0_PG_STS", BIT(7)},
+ {"SATA_PGD0_PG_STS", BIT(8)},
+ {"FIACPCB_P5x16_PGD0_PG_STS", BIT(9)},
+ {"G5x16FPW_PGD0_PG_STS", BIT(10)},
+ {"FIA_D_PGD0_PG_STS", BIT(11)},
+ {"MPFPW2_PGD0_PG_STS", BIT(12)},
+ {"SPD_PGD0_PG_STS", BIT(13)},
+ {"LPSS_PGD0_PG_STS", BIT(14)},
+ {"LPC_PGD0_PG_STS", BIT(15)},
+ {"SMB_PGD0_PG_STS", BIT(16)},
+ {"ISH_PGD0_PG_STS", BIT(17)},
+ {"P2S_PGD0_PG_STS", BIT(18)},
+ {"NPK_PGD0_PG_STS", BIT(19)},
+ {"DMI3FPW_PGD0_PG_STS", BIT(20)},
+ {"GBETSN1_PGD0_PG_STS", BIT(21)},
+ {"FUSE_PGD0_PG_STS", BIT(22)},
+ {"FIACPCB_D_PGD0_PG_STS", BIT(23)},
+ {"FUSEGPSB_PGD0_PG_STS", BIT(24)},
+ {"XDCI_PGD0_PG_STS", BIT(25)},
+ {"EXI_PGD0_PG_STS", BIT(26)},
+ {"CSE_PGD0_PG_STS", BIT(27)},
+ {"KVMCC_PGD0_PG_STS", BIT(28)},
+ {"PMT_PGD0_PG_STS", BIT(29)},
+ {"CLINK_PGD0_PG_STS", BIT(30)},
+ {"PTIO_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0)},
+ {"SUSRAM_PGD0_PG_STS", BIT(1)},
+ {"SMT1_PGD0_PG_STS", BIT(2)},
+ {"FIACPCB_U_PGD0_PG_STS", BIT(3)},
+ {"SMS2_PGD0_PG_STS", BIT(4)},
+ {"SMS1_PGD0_PG_STS", BIT(5)},
+ {"CSMERTC_PGD0_PG_STS", BIT(6)},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7)},
+ {"SBR0_PGD0_PG_STS", BIT(8)},
+ {"SBR1_PGD0_PG_STS", BIT(9)},
+ {"SBR2_PGD0_PG_STS", BIT(10)},
+ {"SBR3_PGD0_PG_STS", BIT(11)},
+ {"MPFPW1_PGD0_PG_STS", BIT(12)},
+ {"SBR5_PGD0_PG_STS", BIT(13)},
+ {"FIA_X_PGD0_PG_STS", BIT(14)},
+ {"FIACPCB_X_PGD0_PG_STS", BIT(15)},
+ {"SBRG_PGD0_PG_STS", BIT(16)},
+ {"SOC_D2D_PGD1_PG_STS", BIT(17)},
+ {"PSF4_PGD0_PG_STS", BIT(18)},
+ {"CNVI_PGD0_PG_STS", BIT(19)},
+ {"UFSX2_PGD0_PG_STS", BIT(20)},
+ {"ENDBG_PGD0_PG_STS", BIT(21)},
+ {"DBG_PSF_PGD0_PG_STS", BIT(22)},
+ {"SBR6_PGD0_PG_STS", BIT(23)},
+ {"SOC_D2D_PGD2_PG_STS", BIT(24)},
+ {"NPK_PGD1_PG_STS", BIT(25)},
+ {"DMI3_PGD0_PG_STS", BIT(26)},
+ {"DBG_SBR_PGD0_PG_STS", BIT(27)},
+ {"SOC_D2D_PGD0_PG_STS", BIT(28)},
+ {"PSF6_PGD0_PG_STS", BIT(29)},
+ {"PSF7_PGD0_PG_STS", BIT(30)},
+ {"MPFPW3_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = {
+ {"PSF8_PGD0_PG_STS", BIT(0)},
+ {"FIA_PGD0_PG_STS", BIT(1)},
+ {"SOC_D2D_PGD3_PG_STS", BIT(2)},
+ {"FIA_U_PGD0_PG_STS", BIT(3)},
+ {"TAM_PGD0_PG_STS", BIT(4)},
+ {"GBETSN_PGD0_PG_STS", BIT(5)},
+ {"TBTLSX_PGD0_PG_STS", BIT(6)},
+ {"THC0_PGD0_PG_STS", BIT(7)},
+ {"THC1_PGD0_PG_STS", BIT(8)},
+ {"PMC_PGD1_PG_STS", BIT(9)},
+ {"FIA_P5x16_PGD0_PG_STS", BIT(10)},
+ {"GNA_PGD0_PG_STS", BIT(11)},
+ {"ACE_PGD0_PG_STS", BIT(12)},
+ {"ACE_PGD1_PG_STS", BIT(13)},
+ {"ACE_PGD2_PG_STS", BIT(14)},
+ {"ACE_PGD3_PG_STS", BIT(15)},
+ {"ACE_PGD4_PG_STS", BIT(16)},
+ {"ACE_PGD5_PG_STS", BIT(17)},
+ {"ACE_PGD6_PG_STS", BIT(18)},
+ {"ACE_PGD7_PG_STS", BIT(19)},
+ {"ACE_PGD8_PG_STS", BIT(20)},
+ {"FIA_PGS_PGD0_PG_STS", BIT(21)},
+ {"FIACPCB_PGS_PGD0_PG_STS", BIT(22)},
+ {"FUSEPMSB_PGD0_PG_STS", BIT(23)},
+ {}
+};
+
+const struct pmc_bit_map arl_socs_d3_status_2_map[] = {
+ {"CSMERTC_D3_STS", BIT(1)},
+ {"SUSRAM_D3_STS", BIT(2)},
+ {"CSE_D3_STS", BIT(4)},
+ {"KVMCC_D3_STS", BIT(5)},
+ {"USBR0_D3_STS", BIT(6)},
+ {"ISH_D3_STS", BIT(7)},
+ {"SMT1_D3_STS", BIT(8)},
+ {"SMT2_D3_STS", BIT(9)},
+ {"SMT3_D3_STS", BIT(10)},
+ {"GNA_D3_STS", BIT(12)},
+ {"CLINK_D3_STS", BIT(14)},
+ {"PTIO_D3_STS", BIT(16)},
+ {"PMT_D3_STS", BIT(17)},
+ {"SMS1_D3_STS", BIT(18)},
+ {"SMS2_D3_STS", BIT(19)},
+ {}
+};
+
+const struct pmc_bit_map arl_socs_d3_status_3_map[] = {
+ {"GBETSN_D3_STS", BIT(13)},
+ {"THC0_D3_STS", BIT(14)},
+ {"THC1_D3_STS", BIT(15)},
+ {"ACE_D3_STS", BIT(23)},
+ {}
+};
+
+const struct pmc_bit_map arl_socs_vnn_req_status_3_map[] = {
+ {"DTS0_VNN_REQ_STS", BIT(7)},
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {}
+};
+
+const struct pmc_bit_map *arl_socs_lpm_maps[] = {
+ arl_socs_clocksource_status_map,
+ arl_socs_power_gating_status_0_map,
+ arl_socs_power_gating_status_1_map,
+ arl_socs_power_gating_status_2_map,
+ mtl_socm_d3_status_0_map,
+ mtl_socm_d3_status_1_map,
+ arl_socs_d3_status_2_map,
+ arl_socs_d3_status_3_map,
+ mtl_socm_vnn_req_status_0_map,
+ mtl_socm_vnn_req_status_1_map,
+ mtl_socm_vnn_req_status_2_map,
+ arl_socs_vnn_req_status_3_map,
+ mtl_socm_vnn_misc_status_map,
+ mtl_socm_signal_status_map,
+ NULL
+};
+
+const struct pmc_bit_map arl_socs_pfear_map[] = {
+ {"RSVD64", BIT(0)},
+ {"RSVD65", BIT(1)},
+ {"RSVD66", BIT(2)},
+ {"RSVD67", BIT(3)},
+ {"RSVD68", BIT(4)},
+ {"GBETSN", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {}
+};
+
+const struct pmc_bit_map *ext_arl_socs_pfear_map[] = {
+ mtl_socm_pfear_map,
+ arl_socs_pfear_map,
+ NULL
+};
+
+const struct pmc_reg_map arl_socs_reg_map = {
+ .pfear_sts = ext_arl_socs_pfear_map,
+ .ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .lpm_sts = arl_socs_lpm_maps,
+ .ltr_ignore_max = ARL_SOCS_NUM_IP_IGN_ALLOWED,
+ .ltr_show_sts = arl_socs_ltr_show_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = MTL_SOC_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .lpm_reg_index = ARL_LPM_REG_INDEX,
+ .etr3_offset = ETR3_OFFSET,
+ .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET,
+ .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
+};
+
+const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", MTL_PMC_LTR_SPG},
+ {"ESE", MTL_PMC_LTR_ESE},
+ {"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
+ {"DMI3", ARL_PMC_LTR_DMI3},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_clocksource_status_map[] = {
+ {"AON2_OFF_STS", BIT(0)},
+ {"AON3_OFF_STS", BIT(1)},
+ {"AON4_OFF_STS", BIT(2)},
+ {"AON2_SPL_OFF_STS", BIT(3)},
+ {"AONL_OFF_STS", BIT(4)},
+ {"XTAL_LVM_OFF_STS", BIT(5)},
+ {"AON5_ACRO_OFF_STS", BIT(6)},
+ {"AON6_ACRO_OFF_STS", BIT(7)},
+ {"USB3_PLL_OFF_STS", BIT(8)},
+ {"ACRO_OFF_STS", BIT(9)},
+ {"AUDIO_PLL_OFF_STS", BIT(10)},
+ {"MAIN_CRO_OFF_STS", BIT(11)},
+ {"MAIN_DIVIDER_OFF_STS", BIT(12)},
+ {"REF_PLL_NON_OC_OFF_STS", BIT(13)},
+ {"DMI_PLL_OFF_STS", BIT(14)},
+ {"PHY_EXT_INJ_OFF_STS", BIT(15)},
+ {"AON6_MCRO_OFF_STS", BIT(16)},
+ {"XTAL_AGGR_OFF_STS", BIT(17)},
+ {"USB2_PLL_OFF_STS", BIT(18)},
+ {"TSN0_PLL_OFF_STS", BIT(19)},
+ {"TSN1_PLL_OFF_STS", BIT(20)},
+ {"GBE_PLL_OFF_STS", BIT(21)},
+ {"SATA_PLL_OFF_STS", BIT(22)},
+ {"PCIE0_PLL_OFF_STS", BIT(23)},
+ {"PCIE1_PLL_OFF_STS", BIT(24)},
+ {"PCIE2_PLL_OFF_STS", BIT(26)},
+ {"PCIE3_PLL_OFF_STS", BIT(27)},
+ {"REF_PLL_OFF_STS", BIT(28)},
+ {"PCIE4_PLL_OFF_STS", BIT(29)},
+ {"PCIE5_PLL_OFF_STS", BIT(30)},
+ {"REF38P4_PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0)},
+ {"DMI_PGD0_PG_STS", BIT(1)},
+ {"ESPISPI_PGD0_PG_STS", BIT(2)},
+ {"XHCI_PGD0_PG_STS", BIT(3)},
+ {"SPA_PGD0_PG_STS", BIT(4)},
+ {"SPB_PGD0_PG_STS", BIT(5)},
+ {"SPC_PGD0_PG_STS", BIT(6)},
+ {"GBE_PGD0_PG_STS", BIT(7)},
+ {"SATA_PGD0_PG_STS", BIT(8)},
+ {"FIA_X_PGD0_PG_STS", BIT(9)},
+ {"MPFPW4_PGD0_PG_STS", BIT(10)},
+ {"EAH_PGD0_PG_STS", BIT(11)},
+ {"MPFPW1_PGD0_PG_STS", BIT(12)},
+ {"SPD_PGD0_PG_STS", BIT(13)},
+ {"LPSS_PGD0_PG_STS", BIT(14)},
+ {"LPC_PGD0_PG_STS", BIT(15)},
+ {"SMB_PGD0_PG_STS", BIT(16)},
+ {"ISH_PGD0_PG_STS", BIT(17)},
+ {"P2S_PGD0_PG_STS", BIT(18)},
+ {"NPK_PGD0_PG_STS", BIT(19)},
+ {"U3FPW1_PGD0_PG_STS", BIT(20)},
+ {"PECI_PGD0_PG_STS", BIT(21)},
+ {"FUSE_PGD0_PG_STS", BIT(22)},
+ {"SBR8_PGD0_PG_STS", BIT(23)},
+ {"EXE_PGD0_PG_STS", BIT(24)},
+ {"XDCI_PGD0_PG_STS", BIT(25)},
+ {"EXI_PGD0_PG_STS", BIT(26)},
+ {"CSE_PGD0_PG_STS", BIT(27)},
+ {"KVMCC_PGD0_PG_STS", BIT(28)},
+ {"PMT_PGD0_PG_STS", BIT(29)},
+ {"CLINK_PGD0_PG_STS", BIT(30)},
+ {"PTIO_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0)},
+ {"SUSRAM_PGD0_PG_STS", BIT(1)},
+ {"SMT1_PGD0_PG_STS", BIT(2)},
+ {"SMT4_PGD0_PG_STS", BIT(3)},
+ {"SMS2_PGD0_PG_STS", BIT(4)},
+ {"SMS1_PGD0_PG_STS", BIT(5)},
+ {"CSMERTC_PGD0_PG_STS", BIT(6)},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7)},
+ {"SBR0_PGD0_PG_STS", BIT(8)},
+ {"SBR1_PGD0_PG_STS", BIT(9)},
+ {"SBR2_PGD0_PG_STS", BIT(10)},
+ {"SBR3_PGD0_PG_STS", BIT(11)},
+ {"SBR4_PGD0_PG_STS", BIT(12)},
+ {"SBR5_PGD0_PG_STS", BIT(13)},
+ {"MPFPW3_PGD0_PG_STS", BIT(14)},
+ {"PSF1_PGD0_PG_STS", BIT(15)},
+ {"PSF2_PGD0_PG_STS", BIT(16)},
+ {"PSF3_PGD0_PG_STS", BIT(17)},
+ {"PSF4_PGD0_PG_STS", BIT(18)},
+ {"CNVI_PGD0_PG_STS", BIT(19)},
+ {"DMI3_PGD0_PG_STS", BIT(20)},
+ {"ENDBG_PGD0_PG_STS", BIT(21)},
+ {"DBG_SBR_PGD0_PG_STS", BIT(22)},
+ {"SBR6_PGD0_PG_STS", BIT(23)},
+ {"SBR7_PGD0_PG_STS", BIT(24)},
+ {"NPK_PGD1_PG_STS", BIT(25)},
+ {"U3FPW3_PGD0_PG_STS", BIT(26)},
+ {"MPFPW2_PGD0_PG_STS", BIT(27)},
+ {"MPFPW7_PGD0_PG_STS", BIT(28)},
+ {"GBETSN1_PGD0_PG_STS", BIT(29)},
+ {"PSF7_PGD0_PG_STS", BIT(30)},
+ {"FIA2_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = {
+ {"U3FPW2_PGD0_PG_STS", BIT(0)},
+ {"FIA_PGD0_PG_STS", BIT(1)},
+ {"FIACPCB_X_PGD0_PG_STS", BIT(2)},
+ {"FIA1_PGD0_PG_STS", BIT(3)},
+ {"TAM_PGD0_PG_STS", BIT(4)},
+ {"GBETSN_PGD0_PG_STS", BIT(5)},
+ {"SBR9_PGD0_PG_STS", BIT(6)},
+ {"THC0_PGD0_PG_STS", BIT(7)},
+ {"THC1_PGD0_PG_STS", BIT(8)},
+ {"PMC_PGD1_PG_STS", BIT(9)},
+ {"DBC_PGD0_PG_STS", BIT(10)},
+ {"DBG_PSF_PGD0_PG_STS", BIT(11)},
+ {"SPF_PGD0_PG_STS", BIT(12)},
+ {"ACE_PGD0_PG_STS", BIT(13)},
+ {"ACE_PGD1_PG_STS", BIT(14)},
+ {"ACE_PGD2_PG_STS", BIT(15)},
+ {"ACE_PGD3_PG_STS", BIT(16)},
+ {"ACE_PGD4_PG_STS", BIT(17)},
+ {"ACE_PGD5_PG_STS", BIT(18)},
+ {"ACE_PGD6_PG_STS", BIT(19)},
+ {"ACE_PGD7_PG_STS", BIT(20)},
+ {"SPE_PGD0_PG_STS", BIT(21)},
+ {"MPFPW5_PG_STS", BIT(22)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_d3_status_0_map[] = {
+ {"SPF_D3_STS", BIT(0)},
+ {"LPSS_D3_STS", BIT(3)},
+ {"XDCI_D3_STS", BIT(4)},
+ {"XHCI_D3_STS", BIT(5)},
+ {"SPA_D3_STS", BIT(12)},
+ {"SPB_D3_STS", BIT(13)},
+ {"SPC_D3_STS", BIT(14)},
+ {"SPD_D3_STS", BIT(15)},
+ {"SPE_D3_STS", BIT(16)},
+ {"ESPISPI_D3_STS", BIT(18)},
+ {"SATA_D3_STS", BIT(20)},
+ {"PSTH_D3_STS", BIT(21)},
+ {"DMI_D3_STS", BIT(22)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_d3_status_1_map[] = {
+ {"GBETSN1_D3_STS", BIT(14)},
+ {"GBE_D3_STS", BIT(19)},
+ {"ITSS_D3_STS", BIT(23)},
+ {"P2S_D3_STS", BIT(24)},
+ {"CNVI_D3_STS", BIT(27)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_d3_status_2_map[] = {
+ {"CSMERTC_D3_STS", BIT(1)},
+ {"SUSRAM_D3_STS", BIT(2)},
+ {"CSE_D3_STS", BIT(4)},
+ {"KVMCC_D3_STS", BIT(5)},
+ {"USBR0_D3_STS", BIT(6)},
+ {"ISH_D3_STS", BIT(7)},
+ {"SMT1_D3_STS", BIT(8)},
+ {"SMT2_D3_STS", BIT(9)},
+ {"SMT3_D3_STS", BIT(10)},
+ {"SMT4_D3_STS", BIT(11)},
+ {"SMT5_D3_STS", BIT(12)},
+ {"SMT6_D3_STS", BIT(13)},
+ {"CLINK_D3_STS", BIT(14)},
+ {"PTIO_D3_STS", BIT(16)},
+ {"PMT_D3_STS", BIT(17)},
+ {"SMS1_D3_STS", BIT(18)},
+ {"SMS2_D3_STS", BIT(19)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_d3_status_3_map[] = {
+ {"ESE_D3_STS", BIT(3)},
+ {"GBETSN_D3_STS", BIT(13)},
+ {"THC0_D3_STS", BIT(14)},
+ {"THC1_D3_STS", BIT(15)},
+ {"ACE_D3_STS", BIT(23)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[] = {
+ {"FIA_VNN_REQ_STS", BIT(17)},
+ {"ESPISPI_VNN_REQ_STS", BIT(18)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4)},
+ {"DFXAGG_VNN_REQ_STS", BIT(8)},
+ {"EXI_VNN_REQ_STS", BIT(9)},
+ {"GBE_VNN_REQ_STS", BIT(19)},
+ {"SMB_VNN_REQ_STS", BIT(25)},
+ {"LPC_VNN_REQ_STS", BIT(26)},
+ {"CNVI_VNN_REQ_STS", BIT(27)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = {
+ {"FIA2_VNN_REQ_STS", BIT(0)},
+ {"CSMERTC_VNN_REQ_STS", BIT(1)},
+ {"CSE_VNN_REQ_STS", BIT(4)},
+ {"ISH_VNN_REQ_STS", BIT(7)},
+ {"SMT1_VNN_REQ_STS", BIT(8)},
+ {"SMT4_VNN_REQ_STS", BIT(11)},
+ {"CLINK_VNN_REQ_STS", BIT(14)},
+ {"SMS1_VNN_REQ_STS", BIT(18)},
+ {"SMS2_VNN_REQ_STS", BIT(19)},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
+ {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = {
+ {"ESE_VNN_REQ_STS", BIT(3)},
+ {"DTS0_VNN_REQ_STS", BIT(7)},
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {"FIA1_VNN_REQ_STS", BIT(12)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0)},
+ {"TS_OFF_REQ_STS", BIT(1)},
+ {"PNDE_MET_REQ_STS", BIT(2)},
+ {"PCIE_DEEP_PM_REQ_STS", BIT(3)},
+ {"FW_THROTTLE_ALLOWED_REQ_STS", BIT(4)},
+ {"ISH_VNNAON_REQ_STS", BIT(7)},
+ {"IOE_COND_MET_S02I2_0_REQ_STS", BIT(8)},
+ {"IOE_COND_MET_S02I2_1_REQ_STS", BIT(9)},
+ {"IOE_COND_MET_S02I2_2_REQ_STS", BIT(10)},
+ {"PLT_GREATER_REQ_STS", BIT(11)},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14)},
+ {"EA_REQ_STS", BIT(15)},
+ {"DMI_CLKREQ_B_REQ_STS", BIT(16)},
+ {"BRK_EV_EN_REQ_STS", BIT(17)},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18)},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19)},
+ {"ARC_IDLE_REQ_STS", BIT(21)},
+ {"DMI_IN_REQ_STS", BIT(22)},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23)},
+ {"XDCI_ATTACHED_REQ_STS", BIT(24)},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)},
+ {"PRE_WAKE0_REQ_STS", BIT(27)},
+ {"PRE_WAKE1_REQ_STS", BIT(28)},
+ {"PRE_WAKE2_EN_REQ_STS", BIT(29)},
+ {"CNVI_V1P05_REQ_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map arl_pchs_signal_status_map[] = {
+ {"LSX_Wake0_STS", BIT(0)},
+ {"LSX_Wake1_STS", BIT(1)},
+ {"LSX_Wake2_STS", BIT(2)},
+ {"LSX_Wake3_STS", BIT(3)},
+ {"LSX_Wake4_STS", BIT(4)},
+ {"LSX_Wake5_STS", BIT(5)},
+ {"LSX_Wake6_STS", BIT(6)},
+ {"LSX_Wake7_STS", BIT(7)},
+ {"Int_Timer_SS_Wake0_STS", BIT(8)},
+ {"Int_Timer_SS_Wake1_STS", BIT(9)},
+ {"Int_Timer_SS_Wake0_STS", BIT(10)},
+ {"Int_Timer_SS_Wake1_STS", BIT(11)},
+ {"Int_Timer_SS_Wake2_STS", BIT(12)},
+ {"Int_Timer_SS_Wake3_STS", BIT(13)},
+ {"Int_Timer_SS_Wake4_STS", BIT(14)},
+ {"Int_Timer_SS_Wake5_STS", BIT(15)},
+ {}
+};
+
+const struct pmc_bit_map *arl_pchs_lpm_maps[] = {
+ arl_pchs_clocksource_status_map,
+ arl_pchs_power_gating_status_0_map,
+ arl_pchs_power_gating_status_1_map,
+ arl_pchs_power_gating_status_2_map,
+ arl_pchs_d3_status_0_map,
+ arl_pchs_d3_status_1_map,
+ arl_pchs_d3_status_2_map,
+ arl_pchs_d3_status_3_map,
+ arl_pchs_vnn_req_status_0_map,
+ arl_pchs_vnn_req_status_1_map,
+ arl_pchs_vnn_req_status_2_map,
+ arl_pchs_vnn_req_status_3_map,
+ arl_pchs_vnn_misc_status_map,
+ arl_pchs_signal_status_map,
+ NULL
+};
+
+const struct pmc_reg_map arl_pchs_reg_map = {
+ .pfear_sts = ext_arl_socs_pfear_map,
+ .ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = ARL_SOCS_NUM_IP_IGN_ALLOWED,
+ .lpm_sts = arl_pchs_lpm_maps,
+ .ltr_show_sts = arl_pchs_ltr_show_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = ARL_PCH_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .lpm_reg_index = ARL_LPM_REG_INDEX,
+ .etr3_offset = ETR3_OFFSET,
+};
+
+#define PMC_DEVID_SOCS 0xae7f
+#define PMC_DEVID_IOEP 0x7ecf
+#define PMC_DEVID_PCHS 0x7f27
+static struct pmc_info arl_pmc_info_list[] = {
+ {
+ .guid = IOEP_LPM_REQ_GUID,
+ .devid = PMC_DEVID_IOEP,
+ .map = &mtl_ioep_reg_map,
+ },
+ {
+ .guid = SOCS_LPM_REQ_GUID,
+ .devid = PMC_DEVID_SOCS,
+ .map = &arl_socs_reg_map,
+ },
+ {
+ .guid = PCHS_LPM_REQ_GUID,
+ .devid = PMC_DEVID_PCHS,
+ .map = &arl_pchs_reg_map,
+ },
+ {}
+};
+
+#define ARL_NPU_PCI_DEV 0xad1d
+#define ARL_GNA_PCI_DEV 0xae4c
+/*
+ * Set power state of select devices that do not have drivers to D3
+ * so that they do not block Package C entry.
+ */
+static void arl_d3_fixup(void)
+{
+ pmc_core_set_device_d3(ARL_NPU_PCI_DEV);
+ pmc_core_set_device_d3(ARL_GNA_PCI_DEV);
+}
+
+static int arl_resume(struct pmc_dev *pmcdev)
+{
+ arl_d3_fixup();
+ pmc_core_send_ltr_ignore(pmcdev, 3, 0);
+
+ return pmc_core_resume_common(pmcdev);
+}
+
+int arl_core_init(struct pmc_dev *pmcdev)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
+ int ret;
+ int func = 0;
+ bool ssram_init = true;
+
+ arl_d3_fixup();
+ pmcdev->suspend = cnl_suspend;
+ pmcdev->resume = arl_resume;
+ pmcdev->regmap_list = arl_pmc_info_list;
+
+ /*
+ * If ssram init fails use legacy method to at least get the
+ * primary PMC
+ */
+ ret = pmc_core_ssram_init(pmcdev, func);
+ if (ret) {
+ ssram_init = false;
+ pmc->map = &arl_socs_reg_map;
+
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+ }
+
+ pmc_core_get_low_power_modes(pmcdev);
+ pmc_core_punit_pmt_init(pmcdev, ARL_PMT_DMU_GUID);
+
+ if (ssram_init) {
+ ret = pmc_core_ssram_get_lpm_reqs(pmcdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
index 98b366512..dd72974bf 100644
--- a/drivers/platform/x86/intel/pmc/cnp.c
+++ b/drivers/platform/x86/intel/pmc/cnp.c
@@ -234,5 +234,7 @@ int cnp_core_init(struct pmc_dev *pmcdev)
if (ret)
return ret;
+ pmc_core_get_low_power_modes(pmcdev);
+
return 0;
}
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 022afb97d..8f9c03680 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -20,6 +20,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/units.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -27,6 +28,7 @@
#include <asm/tsc.h>
#include "core.h"
+#include "../pmt/telemetry.h"
/* Maximum number of modes supported by platfoms that has low power mode capability */
const char *pmc_lpm_modes[] = {
@@ -206,6 +208,20 @@ static int pmc_core_dev_state_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
+static int pmc_core_pson_residency_get(void *data, u64 *val)
+{
+ struct pmc *pmc = data;
+ const struct pmc_reg_map *map = pmc->map;
+ u32 value;
+
+ value = pmc_core_reg_read(pmc, map->pson_residency_offset);
+ *val = (u64)value * map->pson_residency_counter_step;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_pson_residency, pmc_core_pson_residency_get, NULL, "%llu\n");
+
static int pmc_core_check_read_lock_bit(struct pmc *pmc)
{
u32 value;
@@ -731,7 +747,7 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
-static void pmc_core_substate_req_header_show(struct seq_file *s)
+static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index)
{
struct pmc_dev *pmcdev = s->private;
int i, mode;
@@ -746,71 +762,125 @@ static void pmc_core_substate_req_header_show(struct seq_file *s)
static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- const struct pmc_bit_map **maps = pmc->map->lpm_sts;
- const struct pmc_bit_map *map;
- const int num_maps = pmc->map->lpm_num_maps;
- u32 sts_offset = pmc->map->lpm_status_offset;
- u32 *lpm_req_regs = pmc->lpm_req_regs;
- int mp;
+ u32 sts_offset;
+ u32 *lpm_req_regs;
+ int num_maps, mp, pmc_index;
+
+ for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_index];
+ const struct pmc_bit_map **maps;
- /* Display the header */
- pmc_core_substate_req_header_show(s);
+ if (!pmc)
+ continue;
- /* Loop over maps */
- for (mp = 0; mp < num_maps; mp++) {
- u32 req_mask = 0;
- u32 lpm_status;
- int mode, idx, i, len = 32;
+ maps = pmc->map->lpm_sts;
+ num_maps = pmc->map->lpm_num_maps;
+ sts_offset = pmc->map->lpm_status_offset;
+ lpm_req_regs = pmc->lpm_req_regs;
/*
- * Capture the requirements and create a mask so that we only
- * show an element if it's required for at least one of the
- * enabled low power modes
+ * When there are multiple PMCs, though the PMC may exist, the
+ * requirement register discovery could have failed so check
+ * before accessing.
*/
- pmc_for_each_mode(idx, mode, pmcdev)
- req_mask |= lpm_req_regs[mp + (mode * num_maps)];
-
- /* Get the last latched status for this map */
- lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
-
- /* Loop over elements in this map */
- map = maps[mp];
- for (i = 0; map[i].name && i < len; i++) {
- u32 bit_mask = map[i].bit_mask;
-
- if (!(bit_mask & req_mask))
- /*
- * Not required for any enabled states
- * so don't display
- */
- continue;
-
- /* Display the element name in the first column */
- seq_printf(s, "%30s |", map[i].name);
-
- /* Loop over the enabled states and display if required */
- pmc_for_each_mode(idx, mode, pmcdev) {
- if (lpm_req_regs[mp + (mode * num_maps)] & bit_mask)
- seq_printf(s, " %9s |",
- "Required");
- else
- seq_printf(s, " %9s |", " ");
+ if (!lpm_req_regs)
+ continue;
+
+ /* Display the header */
+ pmc_core_substate_req_header_show(s, pmc_index);
+
+ /* Loop over maps */
+ for (mp = 0; mp < num_maps; mp++) {
+ u32 req_mask = 0;
+ u32 lpm_status;
+ const struct pmc_bit_map *map;
+ int mode, idx, i, len = 32;
+
+ /*
+ * Capture the requirements and create a mask so that we only
+ * show an element if it's required for at least one of the
+ * enabled low power modes
+ */
+ pmc_for_each_mode(idx, mode, pmcdev)
+ req_mask |= lpm_req_regs[mp + (mode * num_maps)];
+
+ /* Get the last latched status for this map */
+ lpm_status = pmc_core_reg_read(pmc, sts_offset + (mp * 4));
+
+ /* Loop over elements in this map */
+ map = maps[mp];
+ for (i = 0; map[i].name && i < len; i++) {
+ u32 bit_mask = map[i].bit_mask;
+
+ if (!(bit_mask & req_mask)) {
+ /*
+ * Not required for any enabled states
+ * so don't display
+ */
+ continue;
+ }
+
+ /* Display the element name in the first column */
+ seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name);
+
+ /* Loop over the enabled states and display if required */
+ pmc_for_each_mode(idx, mode, pmcdev) {
+ bool required = lpm_req_regs[mp + (mode * num_maps)] &
+ bit_mask;
+ seq_printf(s, " %9s |", required ? "Required" : " ");
+ }
+
+ /* In Status column, show the last captured state of this agent */
+ seq_printf(s, " %9s |", lpm_status & bit_mask ? "Yes" : " ");
+
+ seq_puts(s, "\n");
}
+ }
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
- /* In Status column, show the last captured state of this agent */
- if (lpm_status & bit_mask)
- seq_printf(s, " %9s |", "Yes");
- else
- seq_printf(s, " %9s |", " ");
+static unsigned int pmc_core_get_crystal_freq(void)
+{
+ unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
- seq_puts(s, "\n");
- }
+ if (boot_cpu_data.cpuid_level < 0x15)
+ return 0;
+
+ eax_denominator = ebx_numerator = ecx_hz = edx = 0;
+
+ /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
+ cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
+
+ if (ebx_numerator == 0 || eax_denominator == 0)
+ return 0;
+
+ return ecx_hz;
+}
+
+static int pmc_core_die_c6_us_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ u64 die_c6_res, count;
+ int ret;
+
+ if (!pmcdev->crystal_freq) {
+ dev_warn_once(&pmcdev->pdev->dev, "Crystal frequency unavailable\n");
+ return -ENXIO;
}
+ ret = pmt_telem_read(pmcdev->punit_ep, pmcdev->die_c6_offset,
+ &count, 1);
+ if (ret)
+ return ret;
+
+ die_c6_res = div64_u64(count * HZ_PER_MHZ, pmcdev->crystal_freq);
+ seq_printf(s, "%llu\n", die_c6_res);
+
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
+DEFINE_SHOW_ATTRIBUTE(pmc_core_die_c6_us);
static int pmc_core_lpm_latch_mode_show(struct seq_file *s, void *unused)
{
@@ -969,9 +1039,8 @@ static bool pmc_core_pri_verify(u32 lpm_pri, u8 *mode_order)
return true;
}
-static void pmc_core_get_low_power_modes(struct platform_device *pdev)
+void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
{
- struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
u8 pri_order[LPM_MAX_NUM_MODES] = LPM_DEFAULT_PRI;
u8 mode_order[LPM_MAX_NUM_MODES];
@@ -1003,7 +1072,8 @@ static void pmc_core_get_low_power_modes(struct platform_device *pdev)
for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
pri_order[mode_order[mode]] = mode;
else
- dev_warn(&pdev->dev, "Assuming a default substate order for this platform\n");
+ dev_warn(&pmcdev->pdev->dev,
+ "Assuming a default substate order for this platform\n");
/*
* Loop through all modes from lowest to highest priority,
@@ -1039,6 +1109,69 @@ int get_primary_reg_base(struct pmc *pmc)
return 0;
}
+void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
+{
+ struct telem_endpoint *ep;
+ struct pci_dev *pcidev;
+
+ pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(10, 0));
+ if (!pcidev) {
+ dev_err(&pmcdev->pdev->dev, "PUNIT PMT device not found.");
+ return;
+ }
+
+ ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
+ pci_dev_put(pcidev);
+ if (IS_ERR(ep)) {
+ dev_err(&pmcdev->pdev->dev,
+ "pmc_core: couldn't get DMU telem endpoint %ld",
+ PTR_ERR(ep));
+ return;
+ }
+
+ pmcdev->punit_ep = ep;
+
+ pmcdev->has_die_c6 = true;
+ pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET;
+}
+
+void pmc_core_set_device_d3(unsigned int device)
+{
+ struct pci_dev *pcidev;
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ if (pcidev) {
+ if (!device_trylock(&pcidev->dev)) {
+ pci_dev_put(pcidev);
+ return;
+ }
+ if (!pcidev->dev.driver) {
+ dev_info(&pcidev->dev, "Setting to D3hot\n");
+ pci_set_power_state(pcidev, PCI_D3hot);
+ }
+ device_unlock(&pcidev->dev);
+ pci_dev_put(pcidev);
+ }
+}
+
+static bool pmc_core_is_pson_residency_enabled(struct pmc_dev *pmcdev)
+{
+ struct platform_device *pdev = pmcdev->pdev;
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ u8 val;
+
+ if (!adev)
+ return false;
+
+ if (fwnode_property_read_u8(acpi_fwnode_handle(adev),
+ "intel-cec-pson-switching-enabled-in-s0",
+ &val))
+ return false;
+
+ return val == 1;
+}
+
+
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -1108,6 +1241,17 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
pmcdev->dbgfs_dir, pmcdev,
&pmc_core_substate_req_regs_fops);
}
+
+ if (primary_pmc->map->pson_residency_offset && pmc_core_is_pson_residency_enabled(pmcdev)) {
+ debugfs_create_file("pson_residency_usec", 0444,
+ pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency);
+ }
+
+ if (pmcdev->has_die_c6) {
+ debugfs_create_file("die_c6_us_show", 0444,
+ pmcdev->dbgfs_dir, pmcdev,
+ &pmc_core_die_c6_us_fops);
+ }
}
static const struct x86_cpu_id intel_pmc_core_ids[] = {
@@ -1120,18 +1264,20 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, icl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, cnp_core_init),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, cnp_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, tgl_l_core_init),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, tgl_l_core_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, icl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_l_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_l_core_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, adl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_l_core_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, adl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, mtl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, arl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, lnl_core_init),
{}
};
@@ -1202,6 +1348,10 @@ static void pmc_core_clean_structure(struct platform_device *pdev)
pci_dev_put(pmcdev->ssram_pcidev);
pci_disable_device(pmcdev->ssram_pcidev);
}
+
+ if (pmcdev->punit_ep)
+ pmt_telem_unregister_endpoint(pmcdev->punit_ep);
+
platform_set_drvdata(pdev, NULL);
mutex_destroy(&pmcdev->lock);
}
@@ -1222,6 +1372,8 @@ static int pmc_core_probe(struct platform_device *pdev)
if (!pmcdev)
return -ENOMEM;
+ pmcdev->crystal_freq = pmc_core_get_crystal_freq();
+
platform_set_drvdata(pdev, pmcdev);
pmcdev->pdev = pdev;
@@ -1253,7 +1405,6 @@ static int pmc_core_probe(struct platform_device *pdev)
}
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc);
- pmc_core_get_low_power_modes(pdev);
pmc_core_do_dmi_quirks(primary_pmc);
pmc_core_dbgfs_register(pmcdev);
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index b66dacbfb..54137faaa 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -16,6 +16,8 @@
#include <linux/bits.h>
#include <linux/platform_device.h>
+struct telem_endpoint;
+
#define SLP_S0_RES_COUNTER_MASK GENMASK(31, 0)
#define PMC_BASE_ADDR_DEFAULT 0xFE000000
@@ -221,6 +223,10 @@ enum ppfear_regs {
#define TGL_LPM_PRI_OFFSET 0x1C7C
#define TGL_LPM_NUM_MAPS 6
+/* Tigerlake PSON residency register */
+#define TGL_PSON_RESIDENCY_OFFSET 0x18f8
+#define TGL_PSON_RES_COUNTER_STEP 0x7A
+
/* Extended Test Mode Register 3 (CNL and later) */
#define ETR3_OFFSET 0x1048
#define ETR3_CF9GR BIT(20)
@@ -257,10 +263,25 @@ enum ppfear_regs {
#define MTL_SOCM_NUM_IP_IGN_ALLOWED 25
#define MTL_SOC_PMC_MMIO_REG_LEN 0x2708
#define MTL_PMC_LTR_SPG 0x1B74
+#define ARL_SOCS_PMC_LTR_RESERVED 0x1B88
+#define ARL_SOCS_NUM_IP_IGN_ALLOWED 26
+#define ARL_PMC_LTR_DMI3 0x1BE4
+#define ARL_PCH_PMC_MMIO_REG_LEN 0x2720
/* Meteor Lake PGD PFET Enable Ack Status */
#define MTL_SOCM_PPFEAR_NUM_ENTRIES 8
#define MTL_IOE_PPFEAR_NUM_ENTRIES 10
+#define ARL_SOCS_PPFEAR_NUM_ENTRIES 9
+
+/* Die C6 from PUNIT telemetry */
+#define MTL_PMT_DMU_DIE_C6_OFFSET 15
+#define MTL_PMT_DMU_GUID 0x1A067102
+#define ARL_PMT_DMU_GUID 0x1A06A000
+
+#define LNL_PMC_MMIO_REG_LEN 0x2708
+#define LNL_PMC_LTR_OSSE 0x1B88
+#define LNL_NUM_IP_IGN_ALLOWED 27
+#define LNL_PPFEAR_NUM_ENTRIES 12
extern const char *pmc_lpm_modes[];
@@ -320,6 +341,9 @@ struct pmc_reg_map {
const u32 lpm_status_offset;
const u32 lpm_live_status_offset;
const u32 etr3_offset;
+ const u8 *lpm_reg_index;
+ const u32 pson_residency_offset;
+ const u32 pson_residency_counter_step;
};
/**
@@ -329,6 +353,7 @@ struct pmc_reg_map {
* specific attributes
*/
struct pmc_info {
+ u32 guid;
u16 devid;
const struct pmc_reg_map *map;
};
@@ -355,6 +380,7 @@ struct pmc {
* @devs: pointer to an array of pmc pointers
* @pdev: pointer to platform_device struct
* @ssram_pcidev: pointer to pci device struct for the PMC SSRAM
+ * @crystal_freq: crystal frequency from cpuid
* @dbgfs_dir: path to debugfs interface
* @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
* used to read MPHY PG and PLL status are available
@@ -373,6 +399,7 @@ struct pmc_dev {
struct dentry *dbgfs_dir;
struct platform_device *pdev;
struct pci_dev *ssram_pcidev;
+ unsigned int crystal_freq;
int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */
@@ -425,6 +452,7 @@ extern const struct pmc_bit_map tgl_vnn_misc_status_map[];
extern const struct pmc_bit_map tgl_signal_status_map[];
extern const struct pmc_bit_map *tgl_lpm_maps[];
extern const struct pmc_reg_map tgl_reg_map;
+extern const struct pmc_reg_map tgl_h_reg_map;
extern const struct pmc_bit_map adl_pfear_map[];
extern const struct pmc_bit_map *ext_adl_pfear_map[];
extern const struct pmc_bit_map adl_ltr_show_map[];
@@ -486,21 +514,80 @@ extern const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[];
extern const struct pmc_bit_map mtl_ioem_vnn_req_status_1_map[];
extern const struct pmc_bit_map *mtl_ioem_lpm_maps[];
extern const struct pmc_reg_map mtl_ioem_reg_map;
+extern const struct pmc_reg_map lnl_socm_reg_map;
+
+/* LNL */
+extern const struct pmc_bit_map lnl_ltr_show_map[];
+extern const struct pmc_bit_map lnl_clocksource_status_map[];
+extern const struct pmc_bit_map lnl_power_gating_status_0_map[];
+extern const struct pmc_bit_map lnl_power_gating_status_1_map[];
+extern const struct pmc_bit_map lnl_power_gating_status_2_map[];
+extern const struct pmc_bit_map lnl_d3_status_0_map[];
+extern const struct pmc_bit_map lnl_d3_status_1_map[];
+extern const struct pmc_bit_map lnl_d3_status_2_map[];
+extern const struct pmc_bit_map lnl_d3_status_3_map[];
+extern const struct pmc_bit_map lnl_vnn_req_status_0_map[];
+extern const struct pmc_bit_map lnl_vnn_req_status_1_map[];
+extern const struct pmc_bit_map lnl_vnn_req_status_2_map[];
+extern const struct pmc_bit_map lnl_vnn_req_status_3_map[];
+extern const struct pmc_bit_map lnl_vnn_misc_status_map[];
+extern const struct pmc_bit_map *lnl_lpm_maps[];
+extern const struct pmc_bit_map lnl_pfear_map[];
+extern const struct pmc_bit_map *ext_lnl_pfear_map[];
+
+/* ARL */
+extern const struct pmc_bit_map arl_socs_ltr_show_map[];
+extern const struct pmc_bit_map arl_socs_clocksource_status_map[];
+extern const struct pmc_bit_map arl_socs_power_gating_status_0_map[];
+extern const struct pmc_bit_map arl_socs_power_gating_status_1_map[];
+extern const struct pmc_bit_map arl_socs_power_gating_status_2_map[];
+extern const struct pmc_bit_map arl_socs_d3_status_2_map[];
+extern const struct pmc_bit_map arl_socs_d3_status_3_map[];
+extern const struct pmc_bit_map arl_socs_vnn_req_status_3_map[];
+extern const struct pmc_bit_map *arl_socs_lpm_maps[];
+extern const struct pmc_bit_map arl_socs_pfear_map[];
+extern const struct pmc_bit_map *ext_arl_socs_pfear_map[];
+extern const struct pmc_reg_map arl_socs_reg_map;
+extern const struct pmc_bit_map arl_pchs_ltr_show_map[];
+extern const struct pmc_bit_map arl_pchs_clocksource_status_map[];
+extern const struct pmc_bit_map arl_pchs_power_gating_status_0_map[];
+extern const struct pmc_bit_map arl_pchs_power_gating_status_1_map[];
+extern const struct pmc_bit_map arl_pchs_power_gating_status_2_map[];
+extern const struct pmc_bit_map arl_pchs_d3_status_0_map[];
+extern const struct pmc_bit_map arl_pchs_d3_status_1_map[];
+extern const struct pmc_bit_map arl_pchs_d3_status_2_map[];
+extern const struct pmc_bit_map arl_pchs_d3_status_3_map[];
+extern const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[];
+extern const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[];
+extern const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[];
+extern const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[];
+extern const struct pmc_bit_map arl_pchs_vnn_misc_status_map[];
+extern const struct pmc_bit_map arl_pchs_signal_status_map[];
+extern const struct pmc_bit_map *arl_pchs_lpm_maps[];
+extern const struct pmc_reg_map arl_pchs_reg_map;
extern void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
+extern int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev);
int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
int pmc_core_resume_common(struct pmc_dev *pmcdev);
int get_primary_reg_base(struct pmc *pmc);
+extern void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev);
+extern void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid);
+extern void pmc_core_set_device_d3(unsigned int device);
-extern void pmc_core_ssram_init(struct pmc_dev *pmcdev);
+extern int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func);
int spt_core_init(struct pmc_dev *pmcdev);
int cnp_core_init(struct pmc_dev *pmcdev);
int icl_core_init(struct pmc_dev *pmcdev);
int tgl_core_init(struct pmc_dev *pmcdev);
+int tgl_l_core_init(struct pmc_dev *pmcdev);
+int tgl_core_generic_init(struct pmc_dev *pmcdev, int pch_tp);
int adl_core_init(struct pmc_dev *pmcdev);
int mtl_core_init(struct pmc_dev *pmcdev);
+int arl_core_init(struct pmc_dev *pmcdev);
+int lnl_core_init(struct pmc_dev *pmcdev);
void cnl_suspend(struct pmc_dev *pmcdev);
int cnl_resume(struct pmc_dev *pmcdev);
diff --git a/drivers/platform/x86/intel/pmc/core_ssram.c b/drivers/platform/x86/intel/pmc/core_ssram.c
index 13fa16f0d..1bde86c54 100644
--- a/drivers/platform/x86/intel/pmc/core_ssram.c
+++ b/drivers/platform/x86/intel/pmc/core_ssram.c
@@ -8,10 +8,13 @@
*
*/
+#include <linux/cleanup.h>
#include <linux/pci.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "core.h"
+#include "../vsec.h"
+#include "../pmt/telemetry.h"
#define SSRAM_HDR_SIZE 0x100
#define SSRAM_PWRM_OFFSET 0x14
@@ -21,6 +24,185 @@
#define SSRAM_IOE_OFFSET 0x68
#define SSRAM_DEVID_OFFSET 0x70
+/* PCH query */
+#define LPM_HEADER_OFFSET 1
+#define LPM_REG_COUNT 28
+#define LPM_MODE_OFFSET 1
+
+DEFINE_FREE(pmc_core_iounmap, void __iomem *, iounmap(_T));
+
+static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
+{
+ for (; list->map; ++list)
+ if (list->map == map)
+ return list->guid;
+
+ return 0;
+}
+
+static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc)
+{
+ struct telem_endpoint *ep;
+ const u8 *lpm_indices;
+ int num_maps, mode_offset = 0;
+ int ret, mode, i;
+ int lpm_size;
+ u32 guid;
+
+ lpm_indices = pmc->map->lpm_reg_index;
+ num_maps = pmc->map->lpm_num_maps;
+ lpm_size = LPM_MAX_NUM_MODES * num_maps;
+
+ guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
+ if (!guid)
+ return -ENXIO;
+
+ ep = pmt_telem_find_and_register_endpoint(pmcdev->ssram_pcidev, guid, 0);
+ if (IS_ERR(ep)) {
+ dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %ld",
+ PTR_ERR(ep));
+ return -EPROBE_DEFER;
+ }
+
+ pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev,
+ lpm_size * sizeof(u32),
+ GFP_KERNEL);
+ if (!pmc->lpm_req_regs) {
+ ret = -ENOMEM;
+ goto unregister_ep;
+ }
+
+ /*
+ * PMC Low Power Mode (LPM) table
+ *
+ * In telemetry space, the LPM table contains a 4 byte header followed
+ * by 8 consecutive mode blocks (one for each LPM mode). Each block
+ * has a 4 byte header followed by a set of registers that describe the
+ * IP state requirements for the given mode. The IP mapping is platform
+ * specific but the same for each block, making for easy analysis.
+ * Platforms only use a subset of the space to track the requirements
+ * for their IPs. Callers provide the requirement registers they use as
+ * a list of indices. Each requirement register is associated with an
+ * IP map that's maintained by the caller.
+ *
+ * Header
+ * +----+----------------------------+----------------------------+
+ * | 0 | REVISION | ENABLED MODES |
+ * +----+--------------+-------------+-------------+--------------+
+ *
+ * Low Power Mode 0 Block
+ * +----+--------------+-------------+-------------+--------------+
+ * | 1 | SUB ID | SIZE | MAJOR | MINOR |
+ * +----+--------------+-------------+-------------+--------------+
+ * | 2 | LPM0 Requirements 0 |
+ * +----+---------------------------------------------------------+
+ * | | ... |
+ * +----+---------------------------------------------------------+
+ * | 29 | LPM0 Requirements 27 |
+ * +----+---------------------------------------------------------+
+ *
+ * ...
+ *
+ * Low Power Mode 7 Block
+ * +----+--------------+-------------+-------------+--------------+
+ * | | SUB ID | SIZE | MAJOR | MINOR |
+ * +----+--------------+-------------+-------------+--------------+
+ * | 60 | LPM7 Requirements 0 |
+ * +----+---------------------------------------------------------+
+ * | | ... |
+ * +----+---------------------------------------------------------+
+ * | 87 | LPM7 Requirements 27 |
+ * +----+---------------------------------------------------------+
+ *
+ */
+ mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET;
+ pmc_for_each_mode(i, mode, pmcdev) {
+ u32 *req_offset = pmc->lpm_req_regs + (mode * num_maps);
+ int m;
+
+ for (m = 0; m < num_maps; m++) {
+ u8 sample_id = lpm_indices[m] + mode_offset;
+
+ ret = pmt_telem_read32(ep, sample_id, req_offset, 1);
+ if (ret) {
+ dev_err(&pmcdev->pdev->dev,
+ "couldn't read Low Power Mode requirements: %d\n", ret);
+ devm_kfree(&pmcdev->pdev->dev, pmc->lpm_req_regs);
+ goto unregister_ep;
+ }
+ ++req_offset;
+ }
+ mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET;
+ }
+
+unregister_ep:
+ pmt_telem_unregister_endpoint(ep);
+
+ return ret;
+}
+
+int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev)
+{
+ int ret, i;
+
+ if (!pmcdev->ssram_pcidev)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ if (!pmcdev->pmcs[i])
+ continue;
+
+ ret = pmc_core_get_lpm_req(pmcdev, pmcdev->pmcs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+pmc_add_pmt(struct pmc_dev *pmcdev, u64 ssram_base, void __iomem *ssram)
+{
+ struct pci_dev *pcidev = pmcdev->ssram_pcidev;
+ struct intel_vsec_platform_info info = {};
+ struct intel_vsec_header *headers[2] = {};
+ struct intel_vsec_header header;
+ void __iomem *dvsec;
+ u32 dvsec_offset;
+ u32 table, hdr;
+
+ ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ if (!ssram)
+ return;
+
+ dvsec_offset = readl(ssram + SSRAM_DVSEC_OFFSET);
+ iounmap(ssram);
+
+ dvsec = ioremap(ssram_base + dvsec_offset, SSRAM_DVSEC_SIZE);
+ if (!dvsec)
+ return;
+
+ hdr = readl(dvsec + PCI_DVSEC_HEADER1);
+ header.id = readw(dvsec + PCI_DVSEC_HEADER2);
+ header.rev = PCI_DVSEC_HEADER1_REV(hdr);
+ header.length = PCI_DVSEC_HEADER1_LEN(hdr);
+ header.num_entries = readb(dvsec + INTEL_DVSEC_ENTRIES);
+ header.entry_size = readb(dvsec + INTEL_DVSEC_SIZE);
+
+ table = readl(dvsec + INTEL_DVSEC_TABLE);
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+ iounmap(dvsec);
+
+ headers[0] = &header;
+ info.caps = VSEC_CAP_TELEMETRY;
+ info.headers = headers;
+ info.base_addr = ssram_base;
+ info.parent = &pmcdev->pdev->dev;
+
+ intel_vsec_register(pcidev, &info);
+}
+
static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 devid)
{
for (; list->map; ++list)
@@ -35,20 +217,20 @@ static inline u64 get_base(void __iomem *addr, u32 offset)
return lo_hi_readq(addr + offset) & GENMASK_ULL(63, 3);
}
-static void
+static int
pmc_core_pmc_add(struct pmc_dev *pmcdev, u64 pwrm_base,
const struct pmc_reg_map *reg_map, int pmc_index)
{
struct pmc *pmc = pmcdev->pmcs[pmc_index];
if (!pwrm_base)
- return;
+ return -ENODEV;
/* Memory for primary PMC has been allocated in core.c */
if (!pmc) {
pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL);
if (!pmc)
- return;
+ return -ENOMEM;
}
pmc->map = reg_map;
@@ -57,77 +239,88 @@ pmc_core_pmc_add(struct pmc_dev *pmcdev, u64 pwrm_base,
if (!pmc->regbase) {
devm_kfree(&pmcdev->pdev->dev, pmc);
- return;
+ return -ENOMEM;
}
pmcdev->pmcs[pmc_index] = pmc;
+
+ return 0;
}
-static void
-pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, void __iomem *ssram, u32 offset,
- int pmc_idx)
+static int
+pmc_core_ssram_get_pmc(struct pmc_dev *pmcdev, int pmc_idx, u32 offset)
{
- u64 pwrm_base;
+ struct pci_dev *ssram_pcidev = pmcdev->ssram_pcidev;
+ void __iomem __free(pmc_core_iounmap) *tmp_ssram = NULL;
+ void __iomem __free(pmc_core_iounmap) *ssram = NULL;
+ const struct pmc_reg_map *map;
+ u64 ssram_base, pwrm_base;
u16 devid;
- if (pmc_idx != PMC_IDX_SOC) {
- u64 ssram_base = get_base(ssram, offset);
+ if (!pmcdev->regmap_list)
+ return -ENOENT;
- if (!ssram_base)
- return;
+ ssram_base = ssram_pcidev->resource[0].start;
+ tmp_ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
+ if (pmc_idx != PMC_IDX_MAIN) {
+ /*
+ * The secondary PMC BARS (which are behind hidden PCI devices)
+ * are read from fixed offsets in MMIO of the primary PMC BAR.
+ */
+ ssram_base = get_base(tmp_ssram, offset);
ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
if (!ssram)
- return;
+ return -ENOMEM;
+
+ } else {
+ ssram = no_free_ptr(tmp_ssram);
}
pwrm_base = get_base(ssram, SSRAM_PWRM_OFFSET);
devid = readw(ssram + SSRAM_DEVID_OFFSET);
- if (pmcdev->regmap_list) {
- const struct pmc_reg_map *map;
+ /* Find and register and PMC telemetry entries */
+ pmc_add_pmt(pmcdev, ssram_base, ssram);
- map = pmc_core_find_regmap(pmcdev->regmap_list, devid);
- if (map)
- pmc_core_pmc_add(pmcdev, pwrm_base, map, pmc_idx);
- }
+ map = pmc_core_find_regmap(pmcdev->regmap_list, devid);
+ if (!map)
+ return -ENODEV;
- if (pmc_idx != PMC_IDX_SOC)
- iounmap(ssram);
+ return pmc_core_pmc_add(pmcdev, pwrm_base, map, pmc_idx);
}
-void pmc_core_ssram_init(struct pmc_dev *pmcdev)
+int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func)
{
- void __iomem *ssram;
struct pci_dev *pcidev;
- u64 ssram_base;
int ret;
- pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, 2));
+ pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, func));
if (!pcidev)
- goto out;
+ return -ENODEV;
ret = pcim_enable_device(pcidev);
if (ret)
goto release_dev;
- ssram_base = pcidev->resource[0].start;
- ssram = ioremap(ssram_base, SSRAM_HDR_SIZE);
- if (!ssram)
- goto disable_dev;
-
pmcdev->ssram_pcidev = pcidev;
- pmc_core_ssram_get_pmc(pmcdev, ssram, 0, PMC_IDX_SOC);
- pmc_core_ssram_get_pmc(pmcdev, ssram, SSRAM_IOE_OFFSET, PMC_IDX_IOE);
- pmc_core_ssram_get_pmc(pmcdev, ssram, SSRAM_PCH_OFFSET, PMC_IDX_PCH);
+ ret = pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_MAIN, 0);
+ if (ret)
+ goto disable_dev;
- iounmap(ssram);
-out:
- return;
+ pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_IOE, SSRAM_IOE_OFFSET);
+ pmc_core_ssram_get_pmc(pmcdev, PMC_IDX_PCH, SSRAM_PCH_OFFSET);
+
+ return 0;
disable_dev:
+ pmcdev->ssram_pcidev = NULL;
pci_disable_device(pcidev);
release_dev:
pci_dev_put(pcidev);
+
+ return ret;
}
+MODULE_IMPORT_NS(INTEL_VSEC);
+MODULE_IMPORT_NS(INTEL_PMT_TELEMETRY);
diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c
index d08e31742..71b0fd6cb 100644
--- a/drivers/platform/x86/intel/pmc/icl.c
+++ b/drivers/platform/x86/intel/pmc/icl.c
@@ -53,7 +53,15 @@ const struct pmc_reg_map icl_reg_map = {
int icl_core_init(struct pmc_dev *pmcdev)
{
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ int ret;
pmc->map = &icl_reg_map;
- return get_primary_reg_base(pmc);
+
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+ pmc_core_get_low_power_modes(pmcdev);
+
+ return ret;
}
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
new file mode 100644
index 000000000..068d72504
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Meteor Lake PCH.
+ *
+ * Copyright (c) 2022, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/pci.h>
+
+#include "core.h"
+
+const struct pmc_bit_map lnl_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", CNP_PMC_LTR_RESERVED},
+
+ {"ESE", MTL_PMC_LTR_ESE},
+ {"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
+ {"DMI3", ARL_PMC_LTR_DMI3},
+ {"OSSE", LNL_PMC_LTR_OSSE},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0)},
+ {"FUSE_OSSE_PGD0_PG_STS", BIT(1)},
+ {"ESPISPI_PGD0_PG_STS", BIT(2)},
+ {"XHCI_PGD0_PG_STS", BIT(3)},
+ {"SPA_PGD0_PG_STS", BIT(4)},
+ {"SPB_PGD0_PG_STS", BIT(5)},
+ {"SPR16B0_PGD0_PG_STS", BIT(6)},
+ {"GBE_PGD0_PG_STS", BIT(7)},
+ {"SBR8B7_PGD0_PG_STS", BIT(8)},
+ {"SBR8B6_PGD0_PG_STS", BIT(9)},
+ {"SBR16B1_PGD0_PG_STS", BIT(10)},
+ {"SBR8B8_PGD0_PG_STS", BIT(11)},
+ {"ESE_PGD3_PG_STS", BIT(12)},
+ {"D2D_DISP_PGD0_PG_STS", BIT(13)},
+ {"LPSS_PGD0_PG_STS", BIT(14)},
+ {"LPC_PGD0_PG_STS", BIT(15)},
+ {"SMB_PGD0_PG_STS", BIT(16)},
+ {"ISH_PGD0_PG_STS", BIT(17)},
+ {"SBR8B2_PGD0_PG_STS", BIT(18)},
+ {"NPK_PGD0_PG_STS", BIT(19)},
+ {"D2D_NOC_PGD0_PG_STS", BIT(20)},
+ {"SAFSS_PGD0_PG_STS", BIT(21)},
+ {"FUSE_PGD0_PG_STS", BIT(22)},
+ {"D2D_DISP_PGD1_PG_STS", BIT(23)},
+ {"MPFPW1_PGD0_PG_STS", BIT(24)},
+ {"XDCI_PGD0_PG_STS", BIT(25)},
+ {"EXI_PGD0_PG_STS", BIT(26)},
+ {"CSE_PGD0_PG_STS", BIT(27)},
+ {"KVMCC_PGD0_PG_STS", BIT(28)},
+ {"PMT_PGD0_PG_STS", BIT(29)},
+ {"CLINK_PGD0_PG_STS", BIT(30)},
+ {"PTIO_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0)},
+ {"SUSRAM_PGD0_PG_STS", BIT(1)},
+ {"SMT1_PGD0_PG_STS", BIT(2)},
+ {"U3FPW1_PGD0_PG_STS", BIT(3)},
+ {"SMS2_PGD0_PG_STS", BIT(4)},
+ {"SMS1_PGD0_PG_STS", BIT(5)},
+ {"CSMERTC_PGD0_PG_STS", BIT(6)},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7)},
+ {"FIA_PG_PGD0_PG_STS", BIT(8)},
+ {"SBR16B4_PGD0_PG_STS", BIT(9)},
+ {"P2SB8B_PGD0_PG_STS", BIT(10)},
+ {"DBG_SBR_PGD0_PG_STS", BIT(11)},
+ {"SBR8B9_PGD0_PG_STS", BIT(12)},
+ {"OSSE_SMT1_PGD0_PG_STS", BIT(13)},
+ {"SBR8B10_PGD0_PG_STS", BIT(14)},
+ {"SBR16B3_PGD0_PG_STS", BIT(15)},
+ {"G5FPW1_PGD0_PG_STS", BIT(16)},
+ {"SBRG_PGD0_PG_STS", BIT(17)},
+ {"PSF4_PGD0_PG_STS", BIT(18)},
+ {"CNVI_PGD0_PG_STS", BIT(19)},
+ {"USFX2_PGD0_PG_STS", BIT(20)},
+ {"ENDBG_PGD0_PG_STS", BIT(21)},
+ {"FIACPCB_P5X4_PGD0_PG_STS", BIT(22)},
+ {"SBR8B3_PGD0_PG_STS", BIT(23)},
+ {"SBR8B0_PGD0_PG_STS", BIT(24)},
+ {"NPK_PGD1_PG_STS", BIT(25)},
+ {"OSSE_HOTHAM_PGD0_PG_STS", BIT(26)},
+ {"D2D_NOC_PGD2_PG_STS", BIT(27)},
+ {"SBR8B1_PGD0_PG_STS", BIT(28)},
+ {"PSF6_PGD0_PG_STS", BIT(29)},
+ {"PSF7_PGD0_PG_STS", BIT(30)},
+ {"FIA_U_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
+ {"PSF8_PGD0_PG_STS", BIT(0)},
+ {"SBR16B2_PGD0_PG_STS", BIT(1)},
+ {"D2D_IPU_PGD0_PG_STS", BIT(2)},
+ {"FIACPCB_U_PGD0_PG_STS", BIT(3)},
+ {"TAM_PGD0_PG_STS", BIT(4)},
+ {"D2D_NOC_PGD1_PG_STS", BIT(5)},
+ {"TBTLSX_PGD0_PG_STS", BIT(6)},
+ {"THC0_PGD0_PG_STS", BIT(7)},
+ {"THC1_PGD0_PG_STS", BIT(8)},
+ {"PMC_PGD0_PG_STS", BIT(9)},
+ {"SBR8B5_PGD0_PG_STS", BIT(10)},
+ {"UFSPW1_PGD0_PG_STS", BIT(11)},
+ {"DBC_PGD0_PG_STS", BIT(12)},
+ {"TCSS_PGD0_PG_STS", BIT(13)},
+ {"FIA_P5X4_PGD0_PG_STS", BIT(14)},
+ {"DISP_PGA_PGD0_PG_STS", BIT(15)},
+ {"DISP_PSF_PGD0_PG_STS", BIT(16)},
+ {"PSF0_PGD0_PG_STS", BIT(17)},
+ {"P2SB16B_PGD0_PG_STS", BIT(18)},
+ {"ACE_PGD0_PG_STS", BIT(19)},
+ {"ACE_PGD1_PG_STS", BIT(20)},
+ {"ACE_PGD2_PG_STS", BIT(21)},
+ {"ACE_PGD3_PG_STS", BIT(22)},
+ {"ACE_PGD4_PG_STS", BIT(23)},
+ {"ACE_PGD5_PG_STS", BIT(24)},
+ {"ACE_PGD6_PG_STS", BIT(25)},
+ {"ACE_PGD7_PG_STS", BIT(26)},
+ {"ACE_PGD8_PG_STS", BIT(27)},
+ {"ACE_PGD9_PG_STS", BIT(28)},
+ {"ACE_PGD10_PG_STS", BIT(29)},
+ {"FIACPCB_PG_PGD0_PG_STS", BIT(30)},
+ {"OSSE_PGD0_PG_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map lnl_d3_status_0_map[] = {
+ {"LPSS_D3_STS", BIT(3)},
+ {"XDCI_D3_STS", BIT(4)},
+ {"XHCI_D3_STS", BIT(5)},
+ {"SPA_D3_STS", BIT(12)},
+ {"SPB_D3_STS", BIT(13)},
+ {"OSSE_D3_STS", BIT(15)},
+ {"ESPISPI_D3_STS", BIT(18)},
+ {"PSTH_D3_STS", BIT(21)},
+ {}
+};
+
+const struct pmc_bit_map lnl_d3_status_1_map[] = {
+ {"OSSE_SMT1_D3_STS", BIT(7)},
+ {"GBE_D3_STS", BIT(19)},
+ {"ITSS_D3_STS", BIT(23)},
+ {"CNVI_D3_STS", BIT(27)},
+ {"UFSX2_D3_STS", BIT(28)},
+ {"OSSE_HOTHAM_D3_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map lnl_d3_status_2_map[] = {
+ {"ESE_D3_STS", BIT(0)},
+ {"CSMERTC_D3_STS", BIT(1)},
+ {"SUSRAM_D3_STS", BIT(2)},
+ {"CSE_D3_STS", BIT(4)},
+ {"KVMCC_D3_STS", BIT(5)},
+ {"USBR0_D3_STS", BIT(6)},
+ {"ISH_D3_STS", BIT(7)},
+ {"SMT1_D3_STS", BIT(8)},
+ {"SMT2_D3_STS", BIT(9)},
+ {"SMT3_D3_STS", BIT(10)},
+ {"OSSE_SMT2_D3_STS", BIT(13)},
+ {"CLINK_D3_STS", BIT(14)},
+ {"PTIO_D3_STS", BIT(16)},
+ {"PMT_D3_STS", BIT(17)},
+ {"SMS1_D3_STS", BIT(18)},
+ {"SMS2_D3_STS", BIT(19)},
+ {}
+};
+
+const struct pmc_bit_map lnl_d3_status_3_map[] = {
+ {"THC0_D3_STS", BIT(14)},
+ {"THC1_D3_STS", BIT(15)},
+ {"OSSE_SMT3_D3_STS", BIT(21)},
+ {"ACE_D3_STS", BIT(23)},
+ {}
+};
+
+const struct pmc_bit_map lnl_vnn_req_status_0_map[] = {
+ {"LPSS_VNN_REQ_STS", BIT(3)},
+ {"OSSE_VNN_REQ_STS", BIT(15)},
+ {"ESPISPI_VNN_REQ_STS", BIT(18)},
+ {}
+};
+
+const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4)},
+ {"OSSE_SMT1_VNN_REQ_STS", BIT(7)},
+ {"DFXAGG_VNN_REQ_STS", BIT(8)},
+ {"EXI_VNN_REQ_STS", BIT(9)},
+ {"P2D_VNN_REQ_STS", BIT(18)},
+ {"GBE_VNN_REQ_STS", BIT(19)},
+ {"SMB_VNN_REQ_STS", BIT(25)},
+ {"LPC_VNN_REQ_STS", BIT(26)},
+ {}
+};
+
+const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
+ {"eSE_VNN_REQ_STS", BIT(0)},
+ {"CSMERTC_VNN_REQ_STS", BIT(1)},
+ {"CSE_VNN_REQ_STS", BIT(4)},
+ {"ISH_VNN_REQ_STS", BIT(7)},
+ {"SMT1_VNN_REQ_STS", BIT(8)},
+ {"CLINK_VNN_REQ_STS", BIT(14)},
+ {"SMS1_VNN_REQ_STS", BIT(18)},
+ {"SMS2_VNN_REQ_STS", BIT(19)},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20)},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21)},
+ {"GPIOCOM2_VNN_REQ_STS", BIT(22)},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23)},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24)},
+ {}
+};
+
+const struct pmc_bit_map lnl_vnn_req_status_3_map[] = {
+ {"DISP_SHIM_VNN_REQ_STS", BIT(2)},
+ {"DTS0_VNN_REQ_STS", BIT(7)},
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11)},
+ {}
+};
+
+const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0)},
+ {"TS_OFF_REQ_STS", BIT(1)},
+ {"PNDE_MET_REQ_STS", BIT(2)},
+ {"PCIE_DEEP_PM_REQ_STS", BIT(3)},
+ {"PMC_CLK_THROTTLE_EN_REQ_STS", BIT(4)},
+ {"NPK_VNNAON_REQ_STS", BIT(5)},
+ {"VNN_SOC_REQ_STS", BIT(6)},
+ {"ISH_VNNAON_REQ_STS", BIT(7)},
+ {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8)},
+ {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9)},
+ {"D2D_NOC_IPU_QACTIVE_REQ_STS", BIT(10)},
+ {"PLT_GREATER_REQ_STS", BIT(11)},
+ {"PCIE_CLKREQ_REQ_STS", BIT(12)},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13)},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14)},
+ {"EA_REQ_STS", BIT(15)},
+ {"MPHY_CORE_OFF_REQ_STS", BIT(16)},
+ {"BRK_EV_EN_REQ_STS", BIT(17)},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18)},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19)},
+ {"LPC_CLK_SRC_REQ_STS", BIT(20)},
+ {"ARC_IDLE_REQ_STS", BIT(21)},
+ {"MPHY_SUS_REQ_STS", BIT(22)},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23)},
+ {"UXD_CONNECTED_REQ_STS", BIT(24)},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25)},
+ {"D2D_NOC_DISP_DDI_QACTIVE_REQ_STS", BIT(26)},
+ {"PRE_WAKE0_REQ_STS", BIT(27)},
+ {"PRE_WAKE1_REQ_STS", BIT(28)},
+ {"PRE_WAKE2_EN_REQ_STS", BIT(29)},
+ {"WOV_REQ_STS", BIT(30)},
+ {"D2D_NOC_DISP_EDP_QACTIVE_REQ_STS_31", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map lnl_clocksource_status_map[] = {
+ {"AON2_OFF_STS", BIT(0)},
+ {"AON3_OFF_STS", BIT(1)},
+ {"AON4_OFF_STS", BIT(2)},
+ {"AON5_OFF_STS", BIT(3)},
+ {"AON1_OFF_STS", BIT(4)},
+ {"MPFPW1_0_PLL_OFF_STS", BIT(6)},
+ {"USB3_PLL_OFF_STS", BIT(8)},
+ {"AON3_SPL_OFF_STS", BIT(9)},
+ {"G5FPW1_PLL_OFF_STS", BIT(15)},
+ {"XTAL_AGGR_OFF_STS", BIT(17)},
+ {"USB2_PLL_OFF_STS", BIT(18)},
+ {"SAF_PLL_OFF_STS", BIT(19)},
+ {"SE_TCSS_PLL_OFF_STS", BIT(20)},
+ {"DDI_PLL_OFF_STS", BIT(21)},
+ {"FILTER_PLL_OFF_STS", BIT(22)},
+ {"ACE_PLL_OFF_STS", BIT(24)},
+ {"FABRIC_PLL_OFF_STS", BIT(25)},
+ {"SOC_PLL_OFF_STS", BIT(26)},
+ {"REF_OFF_STS", BIT(28)},
+ {"IMG_OFF_STS", BIT(29)},
+ {"RTC_PLL_OFF_STS", BIT(31)},
+ {}
+};
+
+const struct pmc_bit_map *lnl_lpm_maps[] = {
+ lnl_clocksource_status_map,
+ lnl_power_gating_status_0_map,
+ lnl_power_gating_status_1_map,
+ lnl_power_gating_status_2_map,
+ lnl_d3_status_0_map,
+ lnl_d3_status_1_map,
+ lnl_d3_status_2_map,
+ lnl_d3_status_3_map,
+ lnl_vnn_req_status_0_map,
+ lnl_vnn_req_status_1_map,
+ lnl_vnn_req_status_2_map,
+ lnl_vnn_req_status_3_map,
+ lnl_vnn_misc_status_map,
+ mtl_socm_signal_status_map,
+ NULL
+};
+
+const struct pmc_bit_map lnl_pfear_map[] = {
+ {"PMC_0", BIT(0)},
+ {"FUSE_OSSE", BIT(1)},
+ {"ESPISPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"SBR16B0", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SBR8B7", BIT(0)},
+ {"SBR8B6", BIT(1)},
+ {"SBR16B1", BIT(1)},
+ {"SBR8B8", BIT(2)},
+ {"ESE", BIT(3)},
+ {"SBR8B10", BIT(4)},
+ {"D2D_DISP_0", BIT(5)},
+ {"LPSS", BIT(6)},
+ {"LPC", BIT(7)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"SBR8B2", BIT(2)},
+ {"NPK_0", BIT(3)},
+ {"D2D_NOC_0", BIT(4)},
+ {"SAFSS", BIT(5)},
+ {"FUSE", BIT(6)},
+ {"D2D_DISP_1", BIT(7)},
+
+ {"MPFPW1", BIT(0)},
+ {"XDCI", BIT(1)},
+ {"EXI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"KVMCC", BIT(4)},
+ {"PMT", BIT(5)},
+ {"CLINK", BIT(6)},
+ {"PTIO", BIT(7)},
+
+ {"USBR", BIT(0)},
+ {"SUSRAM", BIT(1)},
+ {"SMT1", BIT(2)},
+ {"U3FPW1", BIT(3)},
+ {"SMS2", BIT(4)},
+ {"SMS1", BIT(5)},
+ {"CSMERTC", BIT(6)},
+ {"CSMEPSF", BIT(7)},
+
+ {"FIA_PG", BIT(0)},
+ {"SBR16B4", BIT(1)},
+ {"P2SB8B", BIT(2)},
+ {"DBG_SBR", BIT(3)},
+ {"SBR8B9", BIT(4)},
+ {"OSSE_SMT1", BIT(5)},
+ {"SBR8B10", BIT(6)},
+ {"SBR16B3", BIT(7)},
+
+ {"G5FPW1", BIT(0)},
+ {"SBRG", BIT(1)},
+ {"PSF4", BIT(2)},
+ {"CNVI", BIT(3)},
+ {"UFSX2", BIT(4)},
+ {"ENDBG", BIT(5)},
+ {"FIACPCB_P5X4", BIT(6)},
+ {"SBR8B3", BIT(7)},
+
+ {"SBR8B0", BIT(0)},
+ {"NPK_1", BIT(1)},
+ {"OSSE_HOTHAM", BIT(2)},
+ {"D2D_NOC_2", BIT(3)},
+ {"SBR8B1", BIT(4)},
+ {"PSF6", BIT(5)},
+ {"PSF7", BIT(6)},
+ {"FIA_U", BIT(7)},
+
+ {"PSF8", BIT(0)},
+ {"SBR16B2", BIT(1)},
+ {"D2D_IPU", BIT(2)},
+ {"FIACPCB_U", BIT(3)},
+ {"TAM", BIT(4)},
+ {"D2D_NOC_1", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {"THC0", BIT(7)},
+
+ {"THC1", BIT(0)},
+ {"PMC_1", BIT(1)},
+ {"SBR8B5", BIT(2)},
+ {"UFSPW1", BIT(3)},
+ {"DBC", BIT(4)},
+ {"TCSS", BIT(5)},
+ {"FIA_P5X4", BIT(6)},
+ {"DISP_PGA", BIT(7)},
+
+ {"DBG_PSF", BIT(0)},
+ {"PSF0", BIT(1)},
+ {"P2SB16B", BIT(2)},
+ {"ACE0", BIT(3)},
+ {"ACE1", BIT(4)},
+ {"ACE2", BIT(5)},
+ {"ACE3", BIT(6)},
+ {"ACE4", BIT(7)},
+
+ {"ACE5", BIT(0)},
+ {"ACE6", BIT(1)},
+ {"ACE7", BIT(2)},
+ {"ACE8", BIT(3)},
+ {"ACE9", BIT(4)},
+ {"ACE10", BIT(5)},
+ {"FIACPCB", BIT(6)},
+ {"OSSE", BIT(7)},
+ {}
+};
+
+const struct pmc_bit_map *ext_lnl_pfear_map[] = {
+ lnl_pfear_map,
+ NULL
+};
+
+const struct pmc_reg_map lnl_socm_reg_map = {
+ .pfear_sts = ext_lnl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = lnl_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = LNL_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = lnl_lpm_maps,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+};
+
+#define LNL_NPU_PCI_DEV 0x643e
+#define LNL_IPU_PCI_DEV 0x645d
+
+/*
+ * Set power state of select devices that do not have drivers to D3
+ * so that they do not block Package C entry.
+ */
+static void lnl_d3_fixup(void)
+{
+ pmc_core_set_device_d3(LNL_IPU_PCI_DEV);
+ pmc_core_set_device_d3(LNL_NPU_PCI_DEV);
+}
+
+static int lnl_resume(struct pmc_dev *pmcdev)
+{
+ lnl_d3_fixup();
+ pmc_core_send_ltr_ignore(pmcdev, 3, 0);
+
+ return pmc_core_resume_common(pmcdev);
+}
+
+int lnl_core_init(struct pmc_dev *pmcdev)
+{
+ int ret;
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
+
+ lnl_d3_fixup();
+
+ pmcdev->suspend = cnl_suspend;
+ pmcdev->resume = lnl_resume;
+
+ pmc->map = &lnl_socm_reg_map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+ pmc_core_get_low_power_modes(pmcdev);
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index 504e3e273..c7d15d864 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -10,6 +10,14 @@
#include <linux/pci.h>
#include "core.h"
+#include "../pmt/telemetry.h"
+
+/* PMC SSRAM PMT Telemetry GUIDS */
+#define SOCP_LPM_REQ_GUID 0x2625030
+#define IOEM_LPM_REQ_GUID 0x4357464
+#define IOEP_LPM_REQ_GUID 0x5077612
+
+static const u8 MTL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20};
/*
* Die Mapping to Product.
@@ -465,6 +473,7 @@ const struct pmc_reg_map mtl_socm_reg_map = {
.lpm_sts = mtl_socm_lpm_maps,
.lpm_status_offset = MTL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .lpm_reg_index = MTL_LPM_REG_INDEX,
};
const struct pmc_bit_map mtl_ioep_pfear_map[] = {
@@ -782,6 +791,13 @@ const struct pmc_reg_map mtl_ioep_reg_map = {
.ltr_show_sts = mtl_ioep_ltr_show_map,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
.ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_reg_index = MTL_LPM_REG_INDEX,
};
const struct pmc_bit_map mtl_ioem_pfear_map[] = {
@@ -922,6 +938,13 @@ const struct pmc_reg_map mtl_ioem_reg_map = {
.ltr_show_sts = mtl_ioep_ltr_show_map,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
.ltr_ignore_max = ADL_NUM_IP_IGN_ALLOWED,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_num_maps = ADL_LPM_NUM_MAPS,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_reg_index = MTL_LPM_REG_INDEX,
};
#define PMC_DEVID_SOCM 0x7e7f
@@ -929,16 +952,19 @@ const struct pmc_reg_map mtl_ioem_reg_map = {
#define PMC_DEVID_IOEM 0x7ebf
static struct pmc_info mtl_pmc_info_list[] = {
{
- .devid = PMC_DEVID_SOCM,
- .map = &mtl_socm_reg_map,
+ .guid = SOCP_LPM_REQ_GUID,
+ .devid = PMC_DEVID_SOCM,
+ .map = &mtl_socm_reg_map,
},
{
- .devid = PMC_DEVID_IOEP,
- .map = &mtl_ioep_reg_map,
+ .guid = IOEP_LPM_REQ_GUID,
+ .devid = PMC_DEVID_IOEP,
+ .map = &mtl_ioep_reg_map,
},
{
- .devid = PMC_DEVID_IOEM,
- .map = &mtl_ioem_reg_map
+ .guid = IOEM_LPM_REQ_GUID,
+ .devid = PMC_DEVID_IOEM,
+ .map = &mtl_ioem_reg_map
},
{}
};
@@ -946,34 +972,15 @@ static struct pmc_info mtl_pmc_info_list[] = {
#define MTL_GNA_PCI_DEV 0x7e4c
#define MTL_IPU_PCI_DEV 0x7d19
#define MTL_VPU_PCI_DEV 0x7d1d
-static void mtl_set_device_d3(unsigned int device)
-{
- struct pci_dev *pcidev;
-
- pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
- if (pcidev) {
- if (!device_trylock(&pcidev->dev)) {
- pci_dev_put(pcidev);
- return;
- }
- if (!pcidev->dev.driver) {
- dev_info(&pcidev->dev, "Setting to D3hot\n");
- pci_set_power_state(pcidev, PCI_D3hot);
- }
- device_unlock(&pcidev->dev);
- pci_dev_put(pcidev);
- }
-}
-
/*
* Set power state of select devices that do not have drivers to D3
* so that they do not block Package C entry.
*/
static void mtl_d3_fixup(void)
{
- mtl_set_device_d3(MTL_GNA_PCI_DEV);
- mtl_set_device_d3(MTL_IPU_PCI_DEV);
- mtl_set_device_d3(MTL_VPU_PCI_DEV);
+ pmc_core_set_device_d3(MTL_GNA_PCI_DEV);
+ pmc_core_set_device_d3(MTL_IPU_PCI_DEV);
+ pmc_core_set_device_d3(MTL_VPU_PCI_DEV);
}
static int mtl_resume(struct pmc_dev *pmcdev)
@@ -987,23 +994,36 @@ static int mtl_resume(struct pmc_dev *pmcdev)
int mtl_core_init(struct pmc_dev *pmcdev)
{
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
- int ret = 0;
+ int ret;
+ int func = 2;
+ bool ssram_init = true;
mtl_d3_fixup();
pmcdev->suspend = cnl_suspend;
pmcdev->resume = mtl_resume;
-
pmcdev->regmap_list = mtl_pmc_info_list;
- pmc_core_ssram_init(pmcdev);
- /* If regbase not assigned, set map and discover using legacy method */
- if (!pmc->regbase) {
+ /*
+ * If ssram init fails use legacy method to at least get the
+ * primary PMC
+ */
+ ret = pmc_core_ssram_init(pmcdev, func);
+ if (ret) {
+ ssram_init = false;
+ dev_warn(&pmcdev->pdev->dev,
+ "ssram init failed, %d, using legacy init\n", ret);
pmc->map = &mtl_socm_reg_map;
ret = get_primary_reg_base(pmc);
if (ret)
return ret;
}
+ pmc_core_get_low_power_modes(pmcdev);
+ pmc_core_punit_pmt_init(pmcdev, MTL_PMT_DMU_GUID);
+
+ if (ssram_init)
+ return pmc_core_ssram_get_lpm_reqs(pmcdev);
+
return 0;
}
diff --git a/drivers/platform/x86/intel/pmc/spt.c b/drivers/platform/x86/intel/pmc/spt.c
index 4b6f5cbda..ab993a69e 100644
--- a/drivers/platform/x86/intel/pmc/spt.c
+++ b/drivers/platform/x86/intel/pmc/spt.c
@@ -137,7 +137,15 @@ const struct pmc_reg_map spt_reg_map = {
int spt_core_init(struct pmc_dev *pmcdev)
{
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ int ret;
pmc->map = &spt_reg_map;
- return get_primary_reg_base(pmc);
+
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+
+ pmc_core_get_low_power_modes(pmcdev);
+
+ return ret;
}
diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
index e88d3d00c..e0580de18 100644
--- a/drivers/platform/x86/intel/pmc/tgl.c
+++ b/drivers/platform/x86/intel/pmc/tgl.c
@@ -13,6 +13,11 @@
#define ACPI_S0IX_DSM_UUID "57a6512e-3979-4e9d-9708-ff13b2508972"
#define ACPI_GET_LOW_MODE_REGISTERS 1
+enum pch_type {
+ PCH_H,
+ PCH_LP
+};
+
const struct pmc_bit_map tgl_pfear_map[] = {
{"PSF9", BIT(0)},
{"RES_66", BIT(1)},
@@ -205,6 +210,33 @@ const struct pmc_reg_map tgl_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
+const struct pmc_reg_map tgl_h_reg_map = {
+ .pfear_sts = ext_tgl_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = cnp_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = CNP_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
+ .lpm_num_maps = TGL_LPM_NUM_MAPS,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .lpm_sts_latch_en_offset = TGL_LPM_STS_LATCH_EN_OFFSET,
+ .lpm_en_offset = TGL_LPM_EN_OFFSET,
+ .lpm_priority_offset = TGL_LPM_PRI_OFFSET,
+ .lpm_residency_offset = TGL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = tgl_lpm_maps,
+ .lpm_status_offset = TGL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = TGL_LPM_LIVE_STATUS_OFFSET,
+ .etr3_offset = ETR3_OFFSET,
+ .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET,
+ .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
+};
+
void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
{
struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
@@ -253,12 +285,25 @@ free_acpi_obj:
ACPI_FREE(out_obj);
}
+int tgl_l_core_init(struct pmc_dev *pmcdev)
+{
+ return tgl_core_generic_init(pmcdev, PCH_LP);
+}
+
int tgl_core_init(struct pmc_dev *pmcdev)
{
+ return tgl_core_generic_init(pmcdev, PCH_H);
+}
+
+int tgl_core_generic_init(struct pmc_dev *pmcdev, int pch_tp)
+{
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
int ret;
- pmc->map = &tgl_reg_map;
+ if (pch_tp == PCH_H)
+ pmc->map = &tgl_h_reg_map;
+ else
+ pmc->map = &tgl_reg_map;
pmcdev->suspend = cnl_suspend;
pmcdev->resume = cnl_resume;
@@ -267,6 +312,7 @@ int tgl_core_init(struct pmc_dev *pmcdev)
if (ret)
return ret;
+ pmc_core_get_low_power_modes(pmcdev);
pmc_core_get_tgl_lpm_reqs(pmcdev->pdev);
return 0;
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index f32a23347..4b53940a6 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -17,7 +17,7 @@
#include "../vsec.h"
#include "class.h"
-#define PMT_XA_START 0
+#define PMT_XA_START 1
#define PMT_XA_MAX INT_MAX
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
#define GUID_SPR_PUNIT 0x9956f43f
@@ -31,7 +31,7 @@ bool intel_pmt_is_early_client_hw(struct device *dev)
* differences from the server platforms (which use the Out Of Band
* Management Services Module OOBMSM).
*/
- return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW);
+ return !!(ivdev->quirks & VSEC_QUIRK_EARLY_HW);
}
EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, INTEL_PMT);
@@ -159,11 +159,12 @@ static struct class intel_pmt_class = {
};
static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
- struct intel_pmt_header *header,
- struct device *dev,
+ struct intel_vsec_device *ivdev,
struct resource *disc_res)
{
- struct pci_dev *pci_dev = to_pci_dev(dev->parent);
+ struct pci_dev *pci_dev = ivdev->pcidev;
+ struct device *dev = &ivdev->auxdev.dev;
+ struct intel_pmt_header *header = &entry->header;
u8 bir;
/*
@@ -215,6 +216,13 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
break;
case ACCESS_BARID:
+ /* Use the provided base address if it exists */
+ if (ivdev->base_addr) {
+ entry->base_addr = ivdev->base_addr +
+ GET_ADDRESS(header->base_offset);
+ break;
+ }
+
/*
* If another BAR was specified then the base offset
* represents the offset within that BAR. SO retrieve the
@@ -239,6 +247,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns,
struct device *parent)
{
+ struct intel_vsec_device *ivdev = dev_to_ivdev(parent);
struct resource res = {0};
struct device *dev;
int ret;
@@ -262,7 +271,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
if (ns->attr_grp) {
ret = sysfs_create_group(entry->kobj, ns->attr_grp);
if (ret)
- goto fail_sysfs;
+ goto fail_sysfs_create_group;
}
/* if size is 0 assume no data buffer, so no file needed */
@@ -287,13 +296,23 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
entry->pmt_bin_attr.size = entry->size;
ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
- if (!ret)
- return 0;
+ if (ret)
+ goto fail_ioremap;
+ if (ns->pmt_add_endpoint) {
+ ret = ns->pmt_add_endpoint(entry, ivdev->pcidev);
+ if (ret)
+ goto fail_add_endpoint;
+ }
+
+ return 0;
+
+fail_add_endpoint:
+ sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr);
fail_ioremap:
if (ns->attr_grp)
sysfs_remove_group(entry->kobj, ns->attr_grp);
-fail_sysfs:
+fail_sysfs_create_group:
device_unregister(dev);
fail_dev_create:
xa_erase(ns->xa, entry->devid);
@@ -305,7 +324,6 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa
struct intel_vsec_device *intel_vsec_dev, int idx)
{
struct device *dev = &intel_vsec_dev->auxdev.dev;
- struct intel_pmt_header header;
struct resource *disc_res;
int ret;
@@ -315,16 +333,15 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa
if (IS_ERR(entry->disc_table))
return PTR_ERR(entry->disc_table);
- ret = ns->pmt_header_decode(entry, &header, dev);
+ ret = ns->pmt_header_decode(entry, dev);
if (ret)
return ret;
- ret = intel_pmt_populate_entry(entry, &header, dev, disc_res);
+ ret = intel_pmt_populate_entry(entry, intel_vsec_dev, disc_res);
if (ret)
return ret;
return intel_pmt_dev_register(entry, ns, dev);
-
}
EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, INTEL_PMT);
diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
index db11d5886..d23c63b73 100644
--- a/drivers/platform/x86/intel/pmt/class.h
+++ b/drivers/platform/x86/intel/pmt/class.h
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include "../vsec.h"
+#include "telemetry.h"
/* PMT access types */
#define ACCESS_BARID 2
@@ -18,7 +19,26 @@
#define GET_BIR(v) ((v) & GENMASK(2, 0))
#define GET_ADDRESS(v) ((v) & GENMASK(31, 3))
+struct pci_dev;
+
+struct telem_endpoint {
+ struct pci_dev *pcidev;
+ struct telem_header header;
+ void __iomem *base;
+ bool present;
+ struct kref kref;
+};
+
+struct intel_pmt_header {
+ u32 base_offset;
+ u32 size;
+ u32 guid;
+ u8 access_type;
+};
+
struct intel_pmt_entry {
+ struct telem_endpoint *ep;
+ struct intel_pmt_header header;
struct bin_attribute pmt_bin_attr;
struct kobject *kobj;
void __iomem *disc_table;
@@ -29,20 +49,14 @@ struct intel_pmt_entry {
int devid;
};
-struct intel_pmt_header {
- u32 base_offset;
- u32 size;
- u32 guid;
- u8 access_type;
-};
-
struct intel_pmt_namespace {
const char *name;
struct xarray *xa;
const struct attribute_group *attr_grp;
int (*pmt_header_decode)(struct intel_pmt_entry *entry,
- struct intel_pmt_header *header,
struct device *dev);
+ int (*pmt_add_endpoint)(struct intel_pmt_entry *entry,
+ struct pci_dev *pdev);
};
bool intel_pmt_is_early_client_hw(struct device *dev);
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
index bbb3d61d0..4014c02ca 100644
--- a/drivers/platform/x86/intel/pmt/crashlog.c
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -223,10 +223,10 @@ static const struct attribute_group pmt_crashlog_group = {
};
static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry,
- struct intel_pmt_header *header,
struct device *dev)
{
void __iomem *disc_table = entry->disc_table;
+ struct intel_pmt_header *header = &entry->header;
struct crashlog_entry *crashlog;
if (!pmt_crashlog_supported(entry))
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index 39cbc87cc..09258564d 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -30,6 +30,15 @@
/* Used by client hardware to identify a fixed telemetry entry*/
#define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000
+#define NUM_BYTES_QWORD(v) ((v) << 3)
+#define SAMPLE_ID_OFFSET(v) ((v) << 3)
+
+#define NUM_BYTES_DWORD(v) ((v) << 2)
+#define SAMPLE_ID_OFFSET32(v) ((v) << 2)
+
+/* Protects access to the xarray of telemetry endpoint handles */
+static DEFINE_MUTEX(ep_lock);
+
enum telem_type {
TELEM_TYPE_PUNIT = 0,
TELEM_TYPE_CRASHLOG,
@@ -58,10 +67,10 @@ static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
}
static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
- struct intel_pmt_header *header,
struct device *dev)
{
void __iomem *disc_table = entry->disc_table;
+ struct intel_pmt_header *header = &entry->header;
if (pmt_telem_region_overlaps(entry, dev))
return 1;
@@ -84,21 +93,195 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
return 0;
}
+static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry,
+ struct pci_dev *pdev)
+{
+ struct telem_endpoint *ep;
+
+ /* Endpoint lifetimes are managed by kref, not devres */
+ entry->ep = kzalloc(sizeof(*(entry->ep)), GFP_KERNEL);
+ if (!entry->ep)
+ return -ENOMEM;
+
+ ep = entry->ep;
+ ep->pcidev = pdev;
+ ep->header.access_type = entry->header.access_type;
+ ep->header.guid = entry->header.guid;
+ ep->header.base_offset = entry->header.base_offset;
+ ep->header.size = entry->header.size;
+ ep->base = entry->base;
+ ep->present = true;
+
+ kref_init(&ep->kref);
+
+ return 0;
+}
+
static DEFINE_XARRAY_ALLOC(telem_array);
static struct intel_pmt_namespace pmt_telem_ns = {
.name = "telem",
.xa = &telem_array,
.pmt_header_decode = pmt_telem_header_decode,
+ .pmt_add_endpoint = pmt_telem_add_endpoint,
};
+/* Called when all users unregister and the device is removed */
+static void pmt_telem_ep_release(struct kref *kref)
+{
+ struct telem_endpoint *ep;
+
+ ep = container_of(kref, struct telem_endpoint, kref);
+ kfree(ep);
+}
+
+unsigned long pmt_telem_get_next_endpoint(unsigned long start)
+{
+ struct intel_pmt_entry *entry;
+ unsigned long found_idx;
+
+ mutex_lock(&ep_lock);
+ xa_for_each_start(&telem_array, found_idx, entry, start) {
+ /*
+ * Return first found index after start.
+ * 0 is not valid id.
+ */
+ if (found_idx > start)
+ break;
+ }
+ mutex_unlock(&ep_lock);
+
+ return found_idx == start ? 0 : found_idx;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, INTEL_PMT_TELEMETRY);
+
+struct telem_endpoint *pmt_telem_register_endpoint(int devid)
+{
+ struct intel_pmt_entry *entry;
+ unsigned long index = devid;
+
+ mutex_lock(&ep_lock);
+ entry = xa_find(&telem_array, &index, index, XA_PRESENT);
+ if (!entry) {
+ mutex_unlock(&ep_lock);
+ return ERR_PTR(-ENXIO);
+ }
+
+ kref_get(&entry->ep->kref);
+ mutex_unlock(&ep_lock);
+
+ return entry->ep;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, INTEL_PMT_TELEMETRY);
+
+void pmt_telem_unregister_endpoint(struct telem_endpoint *ep)
+{
+ kref_put(&ep->kref, pmt_telem_ep_release);
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, INTEL_PMT_TELEMETRY);
+
+int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info)
+{
+ struct intel_pmt_entry *entry;
+ unsigned long index = devid;
+ int err = 0;
+
+ if (!info)
+ return -EINVAL;
+
+ mutex_lock(&ep_lock);
+ entry = xa_find(&telem_array, &index, index, XA_PRESENT);
+ if (!entry) {
+ err = -ENXIO;
+ goto unlock;
+ }
+
+ info->pdev = entry->ep->pcidev;
+ info->header = entry->ep->header;
+
+unlock:
+ mutex_unlock(&ep_lock);
+ return err;
+
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, INTEL_PMT_TELEMETRY);
+
+int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count)
+{
+ u32 offset, size;
+
+ if (!ep->present)
+ return -ENODEV;
+
+ offset = SAMPLE_ID_OFFSET(id);
+ size = ep->header.size;
+
+ if (offset + NUM_BYTES_QWORD(count) > size)
+ return -EINVAL;
+
+ memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count));
+
+ return ep->present ? 0 : -EPIPE;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read, INTEL_PMT_TELEMETRY);
+
+int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count)
+{
+ u32 offset, size;
+
+ if (!ep->present)
+ return -ENODEV;
+
+ offset = SAMPLE_ID_OFFSET32(id);
+ size = ep->header.size;
+
+ if (offset + NUM_BYTES_DWORD(count) > size)
+ return -EINVAL;
+
+ memcpy_fromio(data, ep->base + offset, NUM_BYTES_DWORD(count));
+
+ return ep->present ? 0 : -EPIPE;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, INTEL_PMT_TELEMETRY);
+
+struct telem_endpoint *
+pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos)
+{
+ int devid = 0;
+ int inst = 0;
+ int err = 0;
+
+ while ((devid = pmt_telem_get_next_endpoint(devid))) {
+ struct telem_endpoint_info ep_info;
+
+ err = pmt_telem_get_endpoint_info(devid, &ep_info);
+ if (err)
+ return ERR_PTR(err);
+
+ if (ep_info.header.guid == guid && ep_info.pdev == pcidev) {
+ if (inst == pos)
+ return pmt_telem_register_endpoint(devid);
+ ++inst;
+ }
+ }
+
+ return ERR_PTR(-ENXIO);
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, INTEL_PMT_TELEMETRY);
+
static void pmt_telem_remove(struct auxiliary_device *auxdev)
{
struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev);
int i;
- for (i = 0; i < priv->num_entries; i++)
- intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns);
-}
+ mutex_lock(&ep_lock);
+ for (i = 0; i < priv->num_entries; i++) {
+ struct intel_pmt_entry *entry = &priv->entry[i];
+
+ kref_put(&entry->ep->kref, pmt_telem_ep_release);
+ intel_pmt_dev_destroy(entry, &pmt_telem_ns);
+ }
+ mutex_unlock(&ep_lock);
+};
static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
{
@@ -117,7 +300,9 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia
for (i = 0; i < intel_vsec_dev->num_resources; i++) {
struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
+ mutex_lock(&ep_lock);
ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
+ mutex_unlock(&ep_lock);
if (ret < 0)
goto abort_probe;
if (ret)
diff --git a/drivers/platform/x86/intel/pmt/telemetry.h b/drivers/platform/x86/intel/pmt/telemetry.h
new file mode 100644
index 000000000..d45af5512
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/telemetry.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TELEMETRY_H
+#define _TELEMETRY_H
+
+/* Telemetry types */
+#define PMT_TELEM_TELEMETRY 0
+#define PMT_TELEM_CRASHLOG 1
+
+struct telem_endpoint;
+struct pci_dev;
+
+struct telem_header {
+ u8 access_type;
+ u16 size;
+ u32 guid;
+ u32 base_offset;
+};
+
+struct telem_endpoint_info {
+ struct pci_dev *pdev;
+ struct telem_header header;
+};
+
+/**
+ * pmt_telem_get_next_endpoint() - Get next device id for a telemetry endpoint
+ * @start: starting devid to look from
+ *
+ * This functions can be used in a while loop predicate to retrieve the devid
+ * of all available telemetry endpoints. Functions pmt_telem_get_next_endpoint()
+ * and pmt_telem_register_endpoint() can be used inside of the loop to examine
+ * endpoint info and register to receive a pointer to the endpoint. The pointer
+ * is then usable in the telemetry read calls to access the telemetry data.
+ *
+ * Return:
+ * * devid - devid of the next present endpoint from start
+ * * 0 - when no more endpoints are present after start
+ */
+unsigned long pmt_telem_get_next_endpoint(unsigned long start);
+
+/**
+ * pmt_telem_register_endpoint() - Register a telemetry endpoint
+ * @devid: device id/handle of the telemetry endpoint
+ *
+ * Increments the kref usage counter for the endpoint.
+ *
+ * Return:
+ * * endpoint - On success returns pointer to the telemetry endpoint
+ * * -ENXIO - telemetry endpoint not found
+ */
+struct telem_endpoint *pmt_telem_register_endpoint(int devid);
+
+/**
+ * pmt_telem_unregister_endpoint() - Unregister a telemetry endpoint
+ * @ep: ep structure to populate.
+ *
+ * Decrements the kref usage counter for the endpoint.
+ */
+void pmt_telem_unregister_endpoint(struct telem_endpoint *ep);
+
+/**
+ * pmt_telem_get_endpoint_info() - Get info for an endpoint from its devid
+ * @devid: device id/handle of the telemetry endpoint
+ * @info: Endpoint info structure to be populated
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENXIO - telemetry endpoint not found for the devid
+ * * -EINVAL - @info is NULL
+ */
+int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info);
+
+/**
+ * pmt_telem_find_and_register_endpoint() - Get a telemetry endpoint from
+ * pci_dev device, guid and pos
+ * @pdev: PCI device inside the Intel vsec
+ * @guid: GUID of the telemetry space
+ * @pos: Instance of the guid
+ *
+ * Return:
+ * * endpoint - On success returns pointer to the telemetry endpoint
+ * * -ENXIO - telemetry endpoint not found
+ */
+struct telem_endpoint *pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev,
+ u32 guid, u16 pos);
+
+/**
+ * pmt_telem_read() - Read qwords from counter sram using sample id
+ * @ep: Telemetry endpoint to be read
+ * @id: The beginning sample id of the metric(s) to be read
+ * @data: Allocated qword buffer
+ * @count: Number of qwords requested
+ *
+ * Callers must ensure reads are aligned. When the call returns -ENODEV,
+ * the device has been removed and callers should unregister the telemetry
+ * endpoint.
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENODEV - The device is not present.
+ * * -EINVAL - The offset is out bounds
+ * * -EPIPE - The device was removed during the read. Data written
+ * but should be considered invalid.
+ */
+int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count);
+
+/**
+ * pmt_telem_read32() - Read qwords from counter sram using sample id
+ * @ep: Telemetry endpoint to be read
+ * @id: The beginning sample id of the metric(s) to be read
+ * @data: Allocated dword buffer
+ * @count: Number of dwords requested
+ *
+ * Callers must ensure reads are aligned. When the call returns -ENODEV,
+ * the device has been removed and callers should unregister the telemetry
+ * endpoint.
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENODEV - The device is not present.
+ * * -EINVAL - The offset is out bounds
+ * * -EPIPE - The device was removed during the read. Data written
+ * but should be considered invalid.
+ */
+int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count);
+
+#endif
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
index 0b6d2c864..2662fbbdd 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
@@ -234,6 +234,7 @@ struct perf_level {
* @saved_clos_configs: Save SST-CP CLOS configuration to store restore for suspend/resume
* @saved_clos_assocs: Save SST-CP CLOS association to store restore for suspend/resume
* @saved_pp_control: Save SST-PP control information to store restore for suspend/resume
+ * @write_blocked: Write operation is blocked, so can't change SST state
*
* This structure is used store complete SST information for a power_domain. This information
* is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple
@@ -259,6 +260,7 @@ struct tpmi_per_power_domain_info {
u64 saved_clos_configs[4];
u64 saved_clos_assocs[4];
u64 saved_pp_control;
+ bool write_blocked;
};
/**
@@ -515,6 +517,9 @@ static long isst_if_clos_param(void __user *argp)
return -EINVAL;
if (clos_param.get_set) {
+ if (power_domain_info->write_blocked)
+ return -EPERM;
+
_write_cp_info("clos.min_freq", clos_param.min_freq_mhz,
(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
@@ -602,6 +607,9 @@ static long isst_if_clos_assoc(void __user *argp)
power_domain_info = &sst_inst->power_domain_info[punit_id];
+ if (assoc_cmds.get_set && power_domain_info->write_blocked)
+ return -EPERM;
+
offset = SST_CLOS_ASSOC_0_OFFSET +
(punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE;
shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG;
@@ -752,6 +760,9 @@ static int isst_if_set_perf_level(void __user *argp)
if (!power_domain_info)
return -EINVAL;
+ if (power_domain_info->write_blocked)
+ return -EPERM;
+
if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level)))
return -EINVAL;
@@ -809,6 +820,9 @@ static int isst_if_set_perf_feature(void __user *argp)
if (!power_domain_info)
return -EINVAL;
+ if (power_domain_info->write_blocked)
+ return -EPERM;
+
_write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET,
SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
SST_MUL_FACTOR_NONE)
@@ -1257,11 +1271,21 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
{
+ bool read_blocked = 0, write_blocked = 0;
struct intel_tpmi_plat_info *plat_info;
struct tpmi_sst_struct *tpmi_sst;
int i, ret, pkg = 0, inst = 0;
int num_resources;
+ ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked);
+ if (ret)
+ dev_info(&auxdev->dev, "Can't read feature status: ignoring read/write blocked status\n");
+
+ if (read_blocked) {
+ dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
+ return -ENODEV;
+ }
+
plat_info = tpmi_get_platform_data(auxdev);
if (!plat_info) {
dev_err(&auxdev->dev, "No platform info\n");
@@ -1306,6 +1330,7 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
tpmi_sst->power_domain_info[i].package_id = pkg;
tpmi_sst->power_domain_info[i].power_domain_id = i;
tpmi_sst->power_domain_info[i].auxdev = auxdev;
+ tpmi_sst->power_domain_info[i].write_blocked = write_blocked;
tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res);
if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base))
return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base);
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index c2f6e20b4..910df7c65 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -171,19 +171,6 @@ struct tpmi_feature_state {
} __packed;
/*
- * List of supported TMPI IDs.
- * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
- */
-enum intel_tpmi_id {
- TPMI_ID_RAPL = 0, /* Running Average Power Limit */
- TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
- TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
- TPMI_ID_SST = 5, /* Speed Select Technology */
- TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
- TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
-};
-
-/*
* The size from hardware is in u32 units. This size is from a trusted hardware,
* but better to verify for pre silicon platforms. Set size to 0, when invalid.
*/
@@ -345,8 +332,8 @@ err_unlock:
return ret;
}
-int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id,
- int *locked, int *disabled)
+int tpmi_get_feature_status(struct auxiliary_device *auxdev,
+ int feature_id, bool *read_blocked, bool *write_blocked)
{
struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
@@ -357,8 +344,8 @@ int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id,
if (ret)
return ret;
- *locked = feature_state.locked;
- *disabled = !feature_state.enabled;
+ *read_blocked = feature_state.read_blocked;
+ *write_blocked = feature_state.write_blocked;
return 0;
}
@@ -599,9 +586,21 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
char feature_id_name[TPMI_FEATURE_NAME_LEN];
struct intel_vsec_device *feature_vsec_dev;
+ struct tpmi_feature_state feature_state;
struct resource *res, *tmp;
const char *name;
- int i;
+ int i, ret;
+
+ ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state);
+ if (ret)
+ return ret;
+
+ /*
+ * If not enabled, continue to look at other features in the PFS, so return -EOPNOTSUPP.
+ * This will not cause failure of loading of this driver.
+ */
+ if (!feature_state.enabled)
+ return -EOPNOTSUPP;
name = intel_tpmi_name(pfs->pfs_header.tpmi_id);
if (!name)
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index 4fb790552..bd75d61ff 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -66,6 +66,7 @@ struct tpmi_uncore_struct {
int min_ratio;
struct tpmi_uncore_power_domain_info *pd_info;
struct tpmi_uncore_cluster_info root_cluster;
+ bool write_blocked;
};
#define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15)
@@ -157,6 +158,9 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu
cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
uncore_root = cluster_info->uncore_root;
+ if (uncore_root->write_blocked)
+ return -EPERM;
+
/* Update each cluster in a package */
if (cluster_info->root_domain) {
struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
@@ -233,11 +237,21 @@ static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
{
+ bool read_blocked = 0, write_blocked = 0;
struct intel_tpmi_plat_info *plat_info;
struct tpmi_uncore_struct *tpmi_uncore;
int ret, i, pkg = 0;
int num_resources;
+ ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked);
+ if (ret)
+ dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n");
+
+ if (read_blocked) {
+ dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
+ return -ENODEV;
+ }
+
/* Get number of power domains, which is equal to number of resources */
num_resources = tpmi_get_resource_count(auxdev);
if (!num_resources)
@@ -266,6 +280,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
}
tpmi_uncore->power_domain_count = num_resources;
+ tpmi_uncore->write_blocked = write_blocked;
/* Get the package ID from the TPMI core */
plat_info = tpmi_get_platform_data(auxdev);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index a3b25253b..a5e0f5c22 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -205,6 +205,16 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
@@ -212,6 +222,9 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 084c355c8..5d13452bb 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -136,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(&device->dev);
-
ret = input_register_device(priv->switches_dev);
if (ret)
return ret;
@@ -316,6 +314,9 @@ static int intel_vbtn_probe(struct platform_device *device)
if (ACPI_FAILURE(status))
dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
}
+ // Check switches after buttons since VBDL may have side effects.
+ if (has_switches)
+ detect_tablet_mode(&device->dev);
device_init_wakeup(&device->dev, true);
/*
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 343ab6a82..778eb0aa3 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -15,6 +15,7 @@
#include <linux/auxiliary_bus.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/idr.h>
@@ -24,13 +25,6 @@
#include "vsec.h"
-/* Intel DVSEC offsets */
-#define INTEL_DVSEC_ENTRIES 0xA
-#define INTEL_DVSEC_SIZE 0xB
-#define INTEL_DVSEC_TABLE 0xC
-#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
-#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
-#define TABLE_OFFSET_SHIFT 3
#define PMT_XA_START 0
#define PMT_XA_MAX INT_MAX
#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
@@ -39,34 +33,6 @@ static DEFINE_IDA(intel_vsec_ida);
static DEFINE_IDA(intel_vsec_sdsi_ida);
static DEFINE_XARRAY_ALLOC(auxdev_array);
-/**
- * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
- * @rev: Revision ID of the VSEC/DVSEC register space
- * @length: Length of the VSEC/DVSEC register space
- * @id: ID of the feature
- * @num_entries: Number of instances of the feature
- * @entry_size: Size of the discovery table for each feature
- * @tbir: BAR containing the discovery tables
- * @offset: BAR offset of start of the first discovery table
- */
-struct intel_vsec_header {
- u8 rev;
- u16 length;
- u16 id;
- u8 num_entries;
- u8 entry_size;
- u8 tbir;
- u32 offset;
-};
-
-enum intel_vsec_id {
- VSEC_ID_TELEMETRY = 2,
- VSEC_ID_WATCHER = 3,
- VSEC_ID_CRASHLOG = 4,
- VSEC_ID_SDSI = 65,
- VSEC_ID_TPMI = 66,
-};
-
static const char *intel_vsec_name(enum intel_vsec_id id)
{
switch (id) {
@@ -137,6 +103,9 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
int ret, id;
+ if (!parent)
+ return -EINVAL;
+
ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev,
PMT_XA_LIMIT, GFP_KERNEL);
if (ret < 0) {
@@ -155,9 +124,6 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
return id;
}
- if (!parent)
- parent = &pdev->dev;
-
auxdev->id = id;
auxdev->name = name;
auxdev->dev.parent = parent;
@@ -175,23 +141,27 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
return ret;
}
- ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux,
+ return devm_add_action_or_reset(parent, intel_vsec_remove_aux,
auxdev);
- if (ret < 0)
- return ret;
-
- return 0;
}
EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC);
static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
struct intel_vsec_platform_info *info)
{
- struct intel_vsec_device *intel_vsec_dev;
- struct resource *res, *tmp;
+ struct intel_vsec_device __free(kfree) *intel_vsec_dev = NULL;
+ struct resource __free(kfree) *res = NULL;
+ struct resource *tmp;
+ struct device *parent;
unsigned long quirks = info->quirks;
+ u64 base_addr;
int i;
+ if (info->parent)
+ parent = info->parent;
+ else
+ parent = &pdev->dev;
+
if (!intel_vsec_supported(header->id, info->caps))
return -EINVAL;
@@ -210,37 +180,50 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
return -ENOMEM;
res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL);
- if (!res) {
- kfree(intel_vsec_dev);
+ if (!res)
return -ENOMEM;
- }
if (quirks & VSEC_QUIRK_TABLE_SHIFT)
header->offset >>= TABLE_OFFSET_SHIFT;
+ if (info->base_addr)
+ base_addr = info->base_addr;
+ else
+ base_addr = pdev->resource[header->tbir].start;
+
/*
* The DVSEC/VSEC contains the starting offset and count for a block of
* discovery tables. Create a resource array of these tables to the
* auxiliary device driver.
*/
for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) {
- tmp->start = pdev->resource[header->tbir].start +
- header->offset + i * (header->entry_size * sizeof(u32));
+ tmp->start = base_addr + header->offset + i * (header->entry_size * sizeof(u32));
tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1;
tmp->flags = IORESOURCE_MEM;
+
+ /* Check resource is not in use */
+ if (!request_mem_region(tmp->start, resource_size(tmp), ""))
+ return -EBUSY;
+
+ release_mem_region(tmp->start, resource_size(tmp));
}
intel_vsec_dev->pcidev = pdev;
- intel_vsec_dev->resource = res;
+ intel_vsec_dev->resource = no_free_ptr(res);
intel_vsec_dev->num_resources = header->num_entries;
- intel_vsec_dev->info = info;
+ intel_vsec_dev->quirks = info->quirks;
+ intel_vsec_dev->base_addr = info->base_addr;
if (header->id == VSEC_ID_SDSI)
intel_vsec_dev->ida = &intel_vsec_sdsi_ida;
else
intel_vsec_dev->ida = &intel_vsec_ida;
- return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev,
+ /*
+ * Pass the ownership of intel_vsec_dev and resource within it to
+ * intel_vsec_add_aux()
+ */
+ return intel_vsec_add_aux(pdev, parent, no_free_ptr(intel_vsec_dev),
intel_vsec_name(header->id));
}
@@ -358,6 +341,16 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev,
return have_devices;
}
+void intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ if (!pdev || !info)
+ return;
+
+ intel_vsec_walk_header(pdev, info);
+}
+EXPORT_SYMBOL_NS_GPL(intel_vsec_register, INTEL_VSEC);
+
static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct intel_vsec_platform_info *info;
@@ -426,6 +419,11 @@ static const struct intel_vsec_platform_info tgl_info = {
.quirks = VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW,
};
+/* LNL info */
+static const struct intel_vsec_platform_info lnl_info = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_WATCHER,
+};
+
#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
@@ -433,6 +431,7 @@ static const struct intel_vsec_platform_info tgl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
+#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
@@ -441,6 +440,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
index 0a6201b4a..e23e76129 100644
--- a/drivers/platform/x86/intel/vsec.h
+++ b/drivers/platform/x86/intel/vsec.h
@@ -11,9 +11,45 @@
#define VSEC_CAP_SDSI BIT(3)
#define VSEC_CAP_TPMI BIT(4)
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+
struct pci_dev;
struct resource;
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+ VSEC_ID_SDSI = 65,
+ VSEC_ID_TPMI = 66,
+};
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
enum intel_vsec_quirks {
/* Watcher feature not supported */
VSEC_QUIRK_NO_WATCHER = BIT(0),
@@ -33,9 +69,11 @@ enum intel_vsec_quirks {
/* Platform specific data */
struct intel_vsec_platform_info {
+ struct device *parent;
struct intel_vsec_header **headers;
unsigned long caps;
unsigned long quirks;
+ u64 base_addr;
};
struct intel_vsec_device {
@@ -43,11 +81,12 @@ struct intel_vsec_device {
struct pci_dev *pcidev;
struct resource *resource;
struct ida *ida;
- struct intel_vsec_platform_info *info;
int num_resources;
int id; /* xa */
void *priv_data;
size_t priv_data_size;
+ unsigned long quirks;
+ u64 base_addr;
};
int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
@@ -63,4 +102,7 @@ static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device
{
return container_of(auxdev, struct intel_vsec_device, auxdev);
}
+
+void intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info);
#endif
diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
index 3c86e0108..040153ad6 100644
--- a/drivers/platform/x86/intel/wmi/sbl-fw-update.c
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -25,19 +25,14 @@
static int get_fwu_request(struct device *dev, u32 *out)
{
- struct acpi_buffer result = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *obj;
- acpi_status status;
- status = wmi_query_block(INTEL_WMI_SBL_GUID, 0, &result);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "wmi_query_block failed\n");
+ obj = wmidev_block_query(to_wmi_device(dev), 0);
+ if (!obj)
return -ENODEV;
- }
- obj = (union acpi_object *)result.pointer;
- if (!obj || obj->type != ACPI_TYPE_INTEGER) {
- dev_warn(dev, "wmi_query_block returned invalid value\n");
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ dev_warn(dev, "wmidev_block_query returned invalid value\n");
kfree(obj);
return -EINVAL;
}
@@ -58,9 +53,9 @@ static int set_fwu_request(struct device *dev, u32 in)
input.length = sizeof(u32);
input.pointer = &value;
- status = wmi_set_block(INTEL_WMI_SBL_GUID, 0, &input);
+ status = wmidev_block_set(to_wmi_device(dev), 0, &input);
if (ACPI_FAILURE(status)) {
- dev_err(dev, "wmi_set_block failed\n");
+ dev_err(dev, "wmidev_block_set failed\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
index fc333ff82..e2ad3f46f 100644
--- a/drivers/platform/x86/intel/wmi/thunderbolt.c
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
@@ -32,8 +32,7 @@ static ssize_t force_power_store(struct device *dev,
mode = hex_to_bin(buf[0]);
dev_dbg(dev, "force_power: storing %#x\n", mode);
if (mode == 0 || mode == 1) {
- status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1,
- &input, NULL);
+ status = wmidev_evaluate_method(to_wmi_device(dev), 0, 1, &input, NULL);
if (ACPI_FAILURE(status)) {
dev_dbg(dev, "force_power: failed to evaluate ACPI method\n");
return -ENODEV;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index c66808601..ba38649cc 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1114,39 +1114,6 @@ static int ips_monitor(void *data)
return 0;
}
-#if 0
-#define THM_DUMPW(reg) \
- { \
- u16 val = thm_readw(reg); \
- dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \
- }
-#define THM_DUMPL(reg) \
- { \
- u32 val = thm_readl(reg); \
- dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \
- }
-#define THM_DUMPQ(reg) \
- { \
- u64 val = thm_readq(reg); \
- dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \
- }
-
-static void dump_thermal_info(struct ips_driver *ips)
-{
- u16 ptl;
-
- ptl = thm_readw(THM_PTL);
- dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl);
-
- THM_DUMPW(THM_CTA);
- THM_DUMPW(THM_TRC);
- THM_DUMPW(THM_CTV1);
- THM_DUMPL(THM_STS);
- THM_DUMPW(THM_PTV);
- THM_DUMPQ(THM_MGTV);
-}
-#endif
-
/**
* ips_irq_handler - handle temperature triggers and other IPS events
* @irq: irq number
diff --git a/drivers/platform/x86/lenovo-yogabook.c b/drivers/platform/x86/lenovo-yogabook.c
index b8d023919..fd62bf746 100644
--- a/drivers/platform/x86/lenovo-yogabook.c
+++ b/drivers/platform/x86/lenovo-yogabook.c
@@ -435,7 +435,7 @@ static int yogabook_pdev_set_kbd_backlight(struct yogabook_data *data, u8 level)
.enabled = level,
};
- pwm_apply_state(data->kbd_bl_pwm, &state);
+ pwm_apply_might_sleep(data->kbd_bl_pwm, &state);
gpiod_set_value(data->kbd_bl_led_enable, level ? 1 : 0);
return 0;
}
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index a64f56ddd..3d66e1d4e 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -67,7 +67,7 @@ static bool p2sb_valid_resource(struct resource *res)
/* Copy resource from the first BAR of the device in question */
static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
- struct resource *bar0 = &pdev->resource[0];
+ struct resource *bar0 = pci_resource_n(pdev, 0);
/* Make sure we have no dangling pointers in the output */
memset(mem, 0, sizeof(*mem));
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
new file mode 100644
index 000000000..bcf3a0c35
--- /dev/null
+++ b/drivers/platform/x86/serdev_helpers.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * In some cases UART attached devices which require an in kernel driver,
+ * e.g. UART attached Bluetooth HCIs are described in the ACPI tables
+ * by an ACPI device with a broken or missing UartSerialBusV2() resource.
+ *
+ * This causes the kernel to create a /dev/ttyS# char-device for the UART
+ * instead of creating an in kernel serdev-controller + serdev-device pair
+ * for the in kernel driver.
+ *
+ * The quirk handling in acpi_quirk_skip_serdev_enumeration() makes the kernel
+ * create a serdev-controller device for these UARTs instead of a /dev/ttyS#.
+ *
+ * Instantiating the actual serdev-device to bind to is up to pdx86 code,
+ * this header provides a helper for getting the serdev-controller device.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/printk.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+ const char *serial_ctrl_uid,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
+{
+ struct device *ctrl_dev, *child;
+ struct acpi_device *ctrl_adev;
+ char name[32];
+ int i;
+
+ ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ if (!ctrl_adev) {
+ pr_err("error could not get %s/%s serial-ctrl adev\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* get_first_physical_node() returns a weak ref */
+ ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s serial-ctrl physical node\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ /* Walk host -> uart-ctrl -> port -> serdev-ctrl */
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ snprintf(name, sizeof(name), "%s:0", dev_name(ctrl_dev));
+ break;
+ case 1:
+ snprintf(name, sizeof(name), "%s.%d",
+ dev_name(ctrl_dev), serial_ctrl_port);
+ break;
+ case 2:
+ strscpy(name, serdev_ctrl_name, sizeof(name));
+ break;
+ }
+
+ child = device_find_child_by_name(ctrl_dev, name);
+ put_device(ctrl_dev);
+ if (!child) {
+ pr_err("error could not find '%s' device\n", name);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ ctrl_dev = child;
+ }
+
+put_ctrl_adev:
+ acpi_dev_put(ctrl_adev);
+ return ctrl_dev;
+}
diff --git a/drivers/platform/x86/silicom-platform.c b/drivers/platform/x86/silicom-platform.c
new file mode 100644
index 000000000..6ce43ccb3
--- /dev/null
+++ b/drivers/platform/x86/silicom-platform.c
@@ -0,0 +1,1004 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// silicom-platform.c - Silicom MEC170x platform driver
+//
+// Copyright (C) 2023 Henry Shi <henrys@silicom-usa.com>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/units.h>
+
+#include <linux/gpio/driver.h>
+
+#define MEC_POWER_CYCLE_ADDR 0x24
+#define MEC_EFUSE_LSB_ADDR 0x28
+#define MEC_GPIO_IN_POS 0x08
+#define MEC_IO_BASE 0x0800
+#define MEC_IO_LEN 0x8
+#define IO_REG_BANK 0x0
+#define DEFAULT_CHAN_LO 0
+#define DEFAULT_CHAN_HI 0
+#define DEFAULT_CHAN_LO_T 0xc
+#define MEC_ADDR (MEC_IO_BASE + 0x02)
+#define EC_ADDR_LSB MEC_ADDR
+#define SILICOM_MEC_MAGIC 0x5a
+
+#define MEC_PORT_CHANNEL_MASK GENMASK(2, 0)
+#define MEC_PORT_DWORD_OFFSET GENMASK(31, 3)
+#define MEC_DATA_OFFSET_MASK GENMASK(1, 0)
+#define MEC_PORT_OFFSET_MASK GENMASK(7, 2)
+
+#define MEC_TEMP_LOC GENMASK(31, 16)
+#define MEC_VERSION_LOC GENMASK(15, 8)
+#define MEC_VERSION_MAJOR GENMASK(15, 14)
+#define MEC_VERSION_MINOR GENMASK(13, 8)
+
+#define EC_ADDR_MSB (MEC_IO_BASE + 0x3)
+#define MEC_DATA_OFFSET(offset) (MEC_IO_BASE + 0x04 + (offset))
+
+#define OFFSET_BIT_TO_CHANNEL(off, bit) ((((off) + 0x014) << 3) | (bit))
+#define CHANNEL_TO_OFFSET(chan) (((chan) >> 3) - 0x14)
+
+static DEFINE_MUTEX(mec_io_mutex);
+static unsigned int efuse_status;
+static unsigned int mec_uc_version;
+static unsigned int power_cycle;
+
+static const struct hwmon_channel_info *silicom_fan_control_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL),
+ NULL
+};
+
+struct silicom_platform_info {
+ int io_base;
+ int io_len;
+ struct led_classdev_mc *led_info;
+ struct gpio_chip *gpiochip;
+ u8 *gpio_channels;
+ u16 ngpio;
+};
+
+static const char * const plat_0222_gpio_names[] = {
+ "AUTOM0_SFP_TX_FAULT",
+ "SLOT2_LED_OUT",
+ "SIM_M2_SLOT2_B_DET",
+ "SIM_M2_SLOT2_A_DET",
+ "SLOT1_LED_OUT",
+ "SIM_M2_SLOT1_B_DET",
+ "SIM_M2_SLOT1_A_DET",
+ "SLOT0_LED_OUT",
+ "WAN_SFP0_RX_LOS",
+ "WAN_SFP0_PRSNT_N",
+ "WAN_SFP0_TX_FAULT",
+ "AUTOM1_SFP_RX_LOS",
+ "AUTOM1_SFP_PRSNT_N",
+ "AUTOM1_SFP_TX_FAULT",
+ "AUTOM0_SFP_RX_LOS",
+ "AUTOM0_SFP_PRSNT_N",
+ "WAN_SFP1_RX_LOS",
+ "WAN_SFP1_PRSNT_N",
+ "WAN_SFP1_TX_FAULT",
+ "SIM_M2_SLOT1_MUX_SEL",
+ "W_DISABLE_M2_SLOT1_N",
+ "W_DISABLE_MPCIE_SLOT0_N",
+ "W_DISABLE_M2_SLOT0_N",
+ "BT_COMMAND_MODE",
+ "WAN_SFP1_TX_DISABLE",
+ "WAN_SFP0_TX_DISABLE",
+ "AUTOM1_SFP_TX_DISABLE",
+ "AUTOM0_SFP_TX_DISABLE",
+ "SIM_M2_SLOT2_MUX_SEL",
+ "W_DISABLE_M2_SLOT2_N",
+ "RST_CTL_M2_SLOT_1_N",
+ "RST_CTL_M2_SLOT_2_N",
+ "PM_USB_PWR_EN_BOT",
+ "PM_USB_PWR_EN_TOP",
+};
+
+static u8 plat_0222_gpio_channels[] = {
+ OFFSET_BIT_TO_CHANNEL(0x00, 0),
+ OFFSET_BIT_TO_CHANNEL(0x00, 1),
+ OFFSET_BIT_TO_CHANNEL(0x00, 2),
+ OFFSET_BIT_TO_CHANNEL(0x00, 3),
+ OFFSET_BIT_TO_CHANNEL(0x00, 4),
+ OFFSET_BIT_TO_CHANNEL(0x00, 5),
+ OFFSET_BIT_TO_CHANNEL(0x00, 6),
+ OFFSET_BIT_TO_CHANNEL(0x00, 7),
+ OFFSET_BIT_TO_CHANNEL(0x01, 0),
+ OFFSET_BIT_TO_CHANNEL(0x01, 1),
+ OFFSET_BIT_TO_CHANNEL(0x01, 2),
+ OFFSET_BIT_TO_CHANNEL(0x01, 3),
+ OFFSET_BIT_TO_CHANNEL(0x01, 4),
+ OFFSET_BIT_TO_CHANNEL(0x01, 5),
+ OFFSET_BIT_TO_CHANNEL(0x01, 6),
+ OFFSET_BIT_TO_CHANNEL(0x01, 7),
+ OFFSET_BIT_TO_CHANNEL(0x02, 0),
+ OFFSET_BIT_TO_CHANNEL(0x02, 1),
+ OFFSET_BIT_TO_CHANNEL(0x02, 2),
+ OFFSET_BIT_TO_CHANNEL(0x09, 0),
+ OFFSET_BIT_TO_CHANNEL(0x09, 1),
+ OFFSET_BIT_TO_CHANNEL(0x09, 2),
+ OFFSET_BIT_TO_CHANNEL(0x09, 3),
+ OFFSET_BIT_TO_CHANNEL(0x0a, 0),
+ OFFSET_BIT_TO_CHANNEL(0x0a, 1),
+ OFFSET_BIT_TO_CHANNEL(0x0a, 2),
+ OFFSET_BIT_TO_CHANNEL(0x0a, 3),
+ OFFSET_BIT_TO_CHANNEL(0x0a, 4),
+ OFFSET_BIT_TO_CHANNEL(0x0a, 5),
+ OFFSET_BIT_TO_CHANNEL(0x0a, 6),
+ OFFSET_BIT_TO_CHANNEL(0x0b, 0),
+ OFFSET_BIT_TO_CHANNEL(0x0b, 1),
+ OFFSET_BIT_TO_CHANNEL(0x0b, 2),
+ OFFSET_BIT_TO_CHANNEL(0x0b, 3),
+};
+
+static struct platform_device *silicom_platform_dev;
+static struct led_classdev_mc *silicom_led_info __initdata;
+static struct gpio_chip *silicom_gpiochip __initdata;
+static u8 *silicom_gpio_channels __initdata;
+
+static int silicom_mec_port_get(unsigned int offset)
+{
+ unsigned short mec_data_addr;
+ unsigned short mec_port_addr;
+ u8 reg;
+
+ mec_data_addr = FIELD_GET(MEC_PORT_DWORD_OFFSET, offset) & MEC_DATA_OFFSET_MASK;
+ mec_port_addr = FIELD_GET(MEC_PORT_DWORD_OFFSET, offset) & MEC_PORT_OFFSET_MASK;
+
+ mutex_lock(&mec_io_mutex);
+ outb(mec_port_addr, MEC_ADDR);
+ reg = inb(MEC_DATA_OFFSET(mec_data_addr));
+ mutex_unlock(&mec_io_mutex);
+
+ return (reg >> (offset & MEC_PORT_CHANNEL_MASK)) & 0x01;
+}
+
+static enum led_brightness silicom_mec_led_get(int channel)
+{
+ /* Outputs are active low */
+ return silicom_mec_port_get(channel) ? LED_OFF : LED_ON;
+}
+
+static void silicom_mec_port_set(int channel, int on)
+{
+
+ unsigned short mec_data_addr;
+ unsigned short mec_port_addr;
+ u8 reg;
+
+ mec_data_addr = FIELD_GET(MEC_PORT_DWORD_OFFSET, channel) & MEC_DATA_OFFSET_MASK;
+ mec_port_addr = FIELD_GET(MEC_PORT_DWORD_OFFSET, channel) & MEC_PORT_OFFSET_MASK;
+
+ mutex_lock(&mec_io_mutex);
+ outb(mec_port_addr, MEC_ADDR);
+ reg = inb(MEC_DATA_OFFSET(mec_data_addr));
+ /* Outputs are active low, so clear the bit for on, or set it for off */
+ if (on)
+ reg &= ~(1 << (channel & MEC_PORT_CHANNEL_MASK));
+ else
+ reg |= 1 << (channel & MEC_PORT_CHANNEL_MASK);
+ outb(reg, MEC_DATA_OFFSET(mec_data_addr));
+ mutex_unlock(&mec_io_mutex);
+}
+
+static enum led_brightness silicom_mec_led_mc_brightness_get(struct led_classdev *led_cdev)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(led_cdev);
+ enum led_brightness brightness = LED_OFF;
+ int i;
+
+ for (i = 0; i < mc_cdev->num_colors; i++) {
+ mc_cdev->subled_info[i].brightness =
+ silicom_mec_led_get(mc_cdev->subled_info[i].channel);
+ /* Mark the overall brightness as LED_ON if any of the subleds are on */
+ if (mc_cdev->subled_info[i].brightness != LED_OFF)
+ brightness = LED_ON;
+ }
+
+ return brightness;
+}
+
+static void silicom_mec_led_mc_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(led_cdev);
+ int i;
+
+ led_mc_calc_color_components(mc_cdev, brightness);
+ for (i = 0; i < mc_cdev->num_colors; i++) {
+ silicom_mec_port_set(mc_cdev->subled_info[i].channel,
+ mc_cdev->subled_info[i].brightness);
+ }
+}
+
+static int silicom_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ u8 *channels = gpiochip_get_data(gc);
+
+ /* Input registers have offsets between [0x00, 0x07] */
+ if (CHANNEL_TO_OFFSET(channels[offset]) < MEC_GPIO_IN_POS)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int silicom_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int direction = silicom_gpio_get_direction(gc, offset);
+
+ return direction == GPIO_LINE_DIRECTION_IN ? 0 : -EINVAL;
+}
+
+static void silicom_gpio_set(struct gpio_chip *gc,
+ unsigned int offset,
+ int value)
+{
+ int direction = silicom_gpio_get_direction(gc, offset);
+ u8 *channels = gpiochip_get_data(gc);
+ int channel = channels[offset];
+
+ if (direction == GPIO_LINE_DIRECTION_IN)
+ return;
+
+ if (value)
+ silicom_mec_port_set(channel, 0);
+ else if (value == 0)
+ silicom_mec_port_set(channel, 1);
+ else
+ pr_err("Wrong argument value: %d\n", value);
+}
+
+static int silicom_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset,
+ int value)
+{
+ int direction = silicom_gpio_get_direction(gc, offset);
+
+ if (direction == GPIO_LINE_DIRECTION_IN)
+ return -EINVAL;
+
+ silicom_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int silicom_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ u8 *channels = gpiochip_get_data(gc);
+ int channel = channels[offset];
+
+ return silicom_mec_port_get(channel);
+}
+
+static struct mc_subled plat_0222_wan_mc_subled_info[] __initdata = {
+ {
+ .color_index = LED_COLOR_ID_WHITE,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0c, 7),
+ },
+ {
+ .color_index = LED_COLOR_ID_YELLOW,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0c, 6),
+ },
+ {
+ .color_index = LED_COLOR_ID_RED,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0c, 5),
+ },
+};
+
+static struct mc_subled plat_0222_sys_mc_subled_info[] __initdata = {
+ {
+ .color_index = LED_COLOR_ID_WHITE,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0c, 4),
+ },
+ {
+ .color_index = LED_COLOR_ID_AMBER,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0c, 3),
+ },
+ {
+ .color_index = LED_COLOR_ID_RED,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0c, 2),
+ },
+};
+
+static struct mc_subled plat_0222_stat1_mc_subled_info[] __initdata = {
+ {
+ .color_index = LED_COLOR_ID_RED,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0c, 1),
+ },
+ {
+ .color_index = LED_COLOR_ID_GREEN,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0c, 0),
+ },
+ {
+ .color_index = LED_COLOR_ID_BLUE,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0d, 7),
+ },
+ {
+ .color_index = LED_COLOR_ID_YELLOW,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0d, 6),
+ },
+};
+
+static struct mc_subled plat_0222_stat2_mc_subled_info[] __initdata = {
+ {
+ .color_index = LED_COLOR_ID_RED,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0d, 5),
+ },
+ {
+ .color_index = LED_COLOR_ID_GREEN,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0d, 4),
+ },
+ {
+ .color_index = LED_COLOR_ID_BLUE,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0d, 3),
+ },
+ {
+ .color_index = LED_COLOR_ID_YELLOW,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0d, 2),
+ },
+};
+
+static struct mc_subled plat_0222_stat3_mc_subled_info[] __initdata = {
+ {
+ .color_index = LED_COLOR_ID_RED,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0d, 1),
+ },
+ {
+ .color_index = LED_COLOR_ID_GREEN,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0d, 0),
+ },
+ {
+ .color_index = LED_COLOR_ID_BLUE,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0e, 1),
+ },
+ {
+ .color_index = LED_COLOR_ID_YELLOW,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x0e, 0),
+ },
+};
+
+static struct led_classdev_mc plat_0222_mc_led_info[] __initdata = {
+ {
+ .led_cdev = {
+ .name = "platled::wan",
+ .brightness = 0,
+ .max_brightness = 1,
+ .brightness_set = silicom_mec_led_mc_brightness_set,
+ .brightness_get = silicom_mec_led_mc_brightness_get,
+ },
+ .num_colors = ARRAY_SIZE(plat_0222_wan_mc_subled_info),
+ .subled_info = plat_0222_wan_mc_subled_info,
+ },
+ {
+ .led_cdev = {
+ .name = "platled::sys",
+ .brightness = 0,
+ .max_brightness = 1,
+ .brightness_set = silicom_mec_led_mc_brightness_set,
+ .brightness_get = silicom_mec_led_mc_brightness_get,
+ },
+ .num_colors = ARRAY_SIZE(plat_0222_sys_mc_subled_info),
+ .subled_info = plat_0222_sys_mc_subled_info,
+ },
+ {
+ .led_cdev = {
+ .name = "platled::stat1",
+ .brightness = 0,
+ .max_brightness = 1,
+ .brightness_set = silicom_mec_led_mc_brightness_set,
+ .brightness_get = silicom_mec_led_mc_brightness_get,
+ },
+ .num_colors = ARRAY_SIZE(plat_0222_stat1_mc_subled_info),
+ .subled_info = plat_0222_stat1_mc_subled_info,
+ },
+ {
+ .led_cdev = {
+ .name = "platled::stat2",
+ .brightness = 0,
+ .max_brightness = 1,
+ .brightness_set = silicom_mec_led_mc_brightness_set,
+ .brightness_get = silicom_mec_led_mc_brightness_get,
+ },
+ .num_colors = ARRAY_SIZE(plat_0222_stat2_mc_subled_info),
+ .subled_info = plat_0222_stat2_mc_subled_info,
+ },
+ {
+ .led_cdev = {
+ .name = "platled::stat3",
+ .brightness = 0,
+ .max_brightness = 1,
+ .brightness_set = silicom_mec_led_mc_brightness_set,
+ .brightness_get = silicom_mec_led_mc_brightness_get,
+ },
+ .num_colors = ARRAY_SIZE(plat_0222_stat3_mc_subled_info),
+ .subled_info = plat_0222_stat3_mc_subled_info,
+ },
+ { },
+};
+
+static struct gpio_chip silicom_gpio_chip = {
+ .label = "silicom-gpio",
+ .get_direction = silicom_gpio_get_direction,
+ .direction_input = silicom_gpio_direction_input,
+ .direction_output = silicom_gpio_direction_output,
+ .get = silicom_gpio_get,
+ .set = silicom_gpio_set,
+ .base = -1,
+ .ngpio = ARRAY_SIZE(plat_0222_gpio_channels),
+ .names = plat_0222_gpio_names,
+ /*
+ * We're using a mutex to protect the indirect access, so we can sleep
+ * if the lock blocks
+ */
+ .can_sleep = true,
+};
+
+static struct silicom_platform_info silicom_plat_0222_cordoba_info __initdata = {
+ .io_base = MEC_IO_BASE,
+ .io_len = MEC_IO_LEN,
+ .led_info = plat_0222_mc_led_info,
+ .gpiochip = &silicom_gpio_chip,
+ .gpio_channels = plat_0222_gpio_channels,
+ /*
+ * The original generic cordoba does not have the last 4 outputs of the
+ * plat_0222 variant, the rest are the same, so use the same longer list,
+ * but ignore the last entries here
+ */
+ .ngpio = ARRAY_SIZE(plat_0222_gpio_channels),
+
+};
+
+static struct mc_subled cordoba_fp_left_mc_subled_info[] __initdata = {
+ {
+ .color_index = LED_COLOR_ID_RED,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x08, 6),
+ },
+ {
+ .color_index = LED_COLOR_ID_GREEN,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x08, 5),
+ },
+ {
+ .color_index = LED_COLOR_ID_BLUE,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x09, 7),
+ },
+ {
+ .color_index = LED_COLOR_ID_AMBER,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x09, 4),
+ },
+};
+
+static struct mc_subled cordoba_fp_center_mc_subled_info[] __initdata = {
+ {
+ .color_index = LED_COLOR_ID_RED,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x08, 7),
+ },
+ {
+ .color_index = LED_COLOR_ID_GREEN,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x08, 4),
+ },
+ {
+ .color_index = LED_COLOR_ID_BLUE,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x08, 3),
+ },
+ {
+ .color_index = LED_COLOR_ID_AMBER,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x09, 6),
+ },
+};
+
+static struct mc_subled cordoba_fp_right_mc_subled_info[] __initdata = {
+ {
+ .color_index = LED_COLOR_ID_RED,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x08, 2),
+ },
+ {
+ .color_index = LED_COLOR_ID_GREEN,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x08, 1),
+ },
+ {
+ .color_index = LED_COLOR_ID_BLUE,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x08, 0),
+ },
+ {
+ .color_index = LED_COLOR_ID_AMBER,
+ .brightness = 1,
+ .intensity = 0,
+ .channel = OFFSET_BIT_TO_CHANNEL(0x09, 5),
+ },
+};
+
+static struct led_classdev_mc cordoba_mc_led_info[] __initdata = {
+ {
+ .led_cdev = {
+ .name = "platled::fp_left",
+ .brightness = 0,
+ .max_brightness = 1,
+ .brightness_set = silicom_mec_led_mc_brightness_set,
+ .brightness_get = silicom_mec_led_mc_brightness_get,
+ },
+ .num_colors = ARRAY_SIZE(cordoba_fp_left_mc_subled_info),
+ .subled_info = cordoba_fp_left_mc_subled_info,
+ },
+ {
+ .led_cdev = {
+ .name = "platled::fp_center",
+ .brightness = 0,
+ .max_brightness = 1,
+ .brightness_set = silicom_mec_led_mc_brightness_set,
+ .brightness_get = silicom_mec_led_mc_brightness_get,
+ },
+ .num_colors = ARRAY_SIZE(cordoba_fp_center_mc_subled_info),
+ .subled_info = cordoba_fp_center_mc_subled_info,
+ },
+ {
+ .led_cdev = {
+ .name = "platled::fp_right",
+ .brightness = 0,
+ .max_brightness = 1,
+ .brightness_set = silicom_mec_led_mc_brightness_set,
+ .brightness_get = silicom_mec_led_mc_brightness_get,
+ },
+ .num_colors = ARRAY_SIZE(cordoba_fp_right_mc_subled_info),
+ .subled_info = cordoba_fp_right_mc_subled_info,
+ },
+ { },
+};
+
+static struct silicom_platform_info silicom_generic_cordoba_info __initdata = {
+ .io_base = MEC_IO_BASE,
+ .io_len = MEC_IO_LEN,
+ .led_info = cordoba_mc_led_info,
+ .gpiochip = &silicom_gpio_chip,
+ .gpio_channels = plat_0222_gpio_channels,
+ .ngpio = ARRAY_SIZE(plat_0222_gpio_channels),
+};
+
+/*
+ * sysfs interface
+ */
+static ssize_t efuse_status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 reg;
+
+ mutex_lock(&mec_io_mutex);
+ /* Select memory region */
+ outb(IO_REG_BANK, EC_ADDR_MSB);
+ outb(MEC_EFUSE_LSB_ADDR, EC_ADDR_LSB);
+
+ /* Get current data from the address */
+ reg = inl(MEC_DATA_OFFSET(DEFAULT_CHAN_LO));
+ mutex_unlock(&mec_io_mutex);
+
+ efuse_status = reg & 0x1;
+
+ return sysfs_emit(buf, "%u\n", efuse_status);
+}
+static DEVICE_ATTR_RO(efuse_status);
+
+static ssize_t uc_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int uc_version;
+ u32 reg;
+
+ mutex_lock(&mec_io_mutex);
+ outb(IO_REG_BANK, EC_ADDR_MSB);
+ outb(DEFAULT_CHAN_LO, EC_ADDR_LSB);
+
+ reg = inl(MEC_DATA_OFFSET(DEFAULT_CHAN_LO));
+ mutex_unlock(&mec_io_mutex);
+ uc_version = FIELD_GET(MEC_VERSION_LOC, reg);
+ if (uc_version >= 192)
+ return -EINVAL;
+
+ uc_version = FIELD_GET(MEC_VERSION_MAJOR, reg) * 100 +
+ FIELD_GET(MEC_VERSION_MINOR, reg);
+
+ mec_uc_version = uc_version;
+
+ return sysfs_emit(buf, "%u\n", mec_uc_version);
+}
+static DEVICE_ATTR_RO(uc_version);
+
+static ssize_t power_cycle_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%u\n", power_cycle);
+}
+
+static void powercycle_uc(void)
+{
+ /* Select memory region */
+ outb(IO_REG_BANK, EC_ADDR_MSB);
+ outb(MEC_POWER_CYCLE_ADDR, EC_ADDR_LSB);
+
+ /* Set to 1 for current data from the address */
+ outb(1, MEC_DATA_OFFSET(DEFAULT_CHAN_LO));
+}
+
+static ssize_t power_cycle_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ unsigned int power_cycle_cmd;
+
+ rc = kstrtou32(buf, 0, &power_cycle_cmd);
+ if (rc)
+ return -EINVAL;
+
+ if (power_cycle_cmd > 0) {
+ mutex_lock(&mec_io_mutex);
+ power_cycle = power_cycle_cmd;
+ powercycle_uc();
+ mutex_unlock(&mec_io_mutex);
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(power_cycle);
+
+static struct attribute *silicom_attrs[] = {
+ &dev_attr_efuse_status.attr,
+ &dev_attr_uc_version.attr,
+ &dev_attr_power_cycle.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(silicom);
+
+static struct platform_driver silicom_platform_driver = {
+ .driver = {
+ .name = "silicom-platform",
+ .dev_groups = silicom_groups,
+ },
+};
+
+static int __init silicom_mc_leds_register(struct device *dev,
+ const struct led_classdev_mc *mc_leds)
+{
+ int size = sizeof(struct mc_subled);
+ struct led_classdev_mc *led;
+ int i, err;
+
+ for (i = 0; mc_leds[i].led_cdev.name; i++) {
+
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+ memcpy(led, &mc_leds[i], sizeof(*led));
+
+ led->subled_info = devm_kzalloc(dev, led->num_colors * size, GFP_KERNEL);
+ if (!led->subled_info)
+ return -ENOMEM;
+ memcpy(led->subled_info, mc_leds[i].subled_info, led->num_colors * size);
+
+ err = devm_led_classdev_multicolor_register(dev, led);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static u32 rpm_get(void)
+{
+ u32 reg;
+
+ mutex_lock(&mec_io_mutex);
+ /* Select memory region */
+ outb(IO_REG_BANK, EC_ADDR_MSB);
+ outb(DEFAULT_CHAN_LO_T, EC_ADDR_LSB);
+ reg = inw(MEC_DATA_OFFSET(DEFAULT_CHAN_LO));
+ mutex_unlock(&mec_io_mutex);
+
+ return reg;
+}
+
+static u32 temp_get(void)
+{
+ u32 reg;
+
+ mutex_lock(&mec_io_mutex);
+ /* Select memory region */
+ outb(IO_REG_BANK, EC_ADDR_MSB);
+ outb(DEFAULT_CHAN_LO_T, EC_ADDR_LSB);
+ reg = inl(MEC_DATA_OFFSET(DEFAULT_CHAN_LO));
+ mutex_unlock(&mec_io_mutex);
+
+ return FIELD_GET(MEC_TEMP_LOC, reg) * 100;
+}
+
+static umode_t silicom_fan_control_fan_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_label:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static umode_t silicom_fan_control_temp_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int silicom_fan_control_read_fan(struct device *dev, u32 attr, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = rpm_get();
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int silicom_fan_control_read_temp(struct device *dev, u32 attr, long *val)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = temp_get();
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t silicom_fan_control_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ return silicom_fan_control_fan_is_visible(attr);
+ case hwmon_temp:
+ return silicom_fan_control_temp_is_visible(attr);
+ default:
+ return 0;
+ }
+}
+
+static int silicom_fan_control_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel,
+ long *val)
+{
+ switch (type) {
+ case hwmon_fan:
+ return silicom_fan_control_read_fan(dev, attr, val);
+ case hwmon_temp:
+ return silicom_fan_control_read_temp(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int silicom_fan_control_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel,
+ const char **str)
+{
+ switch (type) {
+ case hwmon_fan:
+ *str = "Silicom_platform: Fan Speed";
+ return 0;
+ case hwmon_temp:
+ *str = "Silicom_platform: Thermostat Sensor";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops silicom_fan_control_hwmon_ops = {
+ .is_visible = silicom_fan_control_is_visible,
+ .read = silicom_fan_control_read,
+ .read_string = silicom_fan_control_read_labels,
+};
+
+static const struct hwmon_chip_info silicom_chip_info = {
+ .ops = &silicom_fan_control_hwmon_ops,
+ .info = silicom_fan_control_info,
+};
+
+static int __init silicom_platform_probe(struct platform_device *device)
+{
+ struct device *hwmon_dev;
+ u8 magic, ver;
+ int err;
+
+ if (!devm_request_region(&device->dev, MEC_IO_BASE, MEC_IO_LEN, "mec")) {
+ dev_err(&device->dev, "couldn't reserve MEC io ports\n");
+ return -EBUSY;
+ }
+
+ /* Sanity check magic number read for EC */
+ outb(IO_REG_BANK, MEC_ADDR);
+ magic = inb(MEC_DATA_OFFSET(DEFAULT_CHAN_LO));
+ ver = inb(MEC_DATA_OFFSET(DEFAULT_CHAN_HI));
+ dev_dbg(&device->dev, "EC magic 0x%02x, version 0x%02x\n", magic, ver);
+
+ if (magic != SILICOM_MEC_MAGIC) {
+ dev_err(&device->dev, "Bad EC magic 0x%02x!\n", magic);
+ return -ENODEV;
+ }
+
+ err = silicom_mc_leds_register(&device->dev, silicom_led_info);
+ if (err) {
+ dev_err(&device->dev, "Failed to register LEDs\n");
+ return err;
+ }
+
+ err = devm_gpiochip_add_data(&device->dev, silicom_gpiochip,
+ silicom_gpio_channels);
+ if (err) {
+ dev_err(&device->dev, "Failed to register gpiochip: %d\n", err);
+ return err;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&device->dev, "silicom_fan", NULL,
+ &silicom_chip_info, NULL);
+ err = PTR_ERR_OR_ZERO(hwmon_dev);
+ if (err) {
+ dev_err(&device->dev, "Failed to register hwmon_dev: %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static int __init silicom_platform_info_init(const struct dmi_system_id *id)
+{
+ struct silicom_platform_info *info = id->driver_data;
+
+ silicom_led_info = info->led_info;
+ silicom_gpio_channels = info->gpio_channels;
+ silicom_gpiochip = info->gpiochip;
+ silicom_gpiochip->ngpio = info->ngpio;
+
+ return 1;
+}
+
+static const struct dmi_system_id silicom_dmi_ids[] __initconst = {
+ {
+ .callback = silicom_platform_info_init,
+ .ident = "Silicom Cordoba (Generic)",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Silicom"),
+ DMI_MATCH(DMI_BOARD_NAME, "80300-0214-G"),
+ },
+ .driver_data = &silicom_generic_cordoba_info,
+ },
+ {
+ .callback = silicom_platform_info_init,
+ .ident = "Silicom Cordoba (Generic)",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Silicom"),
+ DMI_MATCH(DMI_BOARD_NAME, "80500-0214-G"),
+ },
+ .driver_data = &silicom_generic_cordoba_info,
+ },
+ {
+ .callback = silicom_platform_info_init,
+ .ident = "Silicom Cordoba (plat_0222)",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Silicom"),
+ DMI_MATCH(DMI_BOARD_NAME, "80300-0222-G"),
+ },
+ .driver_data = &silicom_plat_0222_cordoba_info,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, silicom_dmi_ids);
+
+static int __init silicom_platform_init(void)
+{
+ if (!dmi_check_system(silicom_dmi_ids)) {
+ pr_err("No DMI match for this platform\n");
+ return -ENODEV;
+ }
+ silicom_platform_dev = platform_create_bundle(&silicom_platform_driver,
+ silicom_platform_probe,
+ NULL, 0, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(silicom_platform_dev);
+}
+
+static void __exit silicom_platform_exit(void)
+{
+ platform_device_unregister(silicom_platform_dev);
+ platform_driver_unregister(&silicom_platform_driver);
+}
+
+module_init(silicom_platform_init);
+module_exit(silicom_platform_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Henry Shi <henrys@silicom-usa.com>");
+MODULE_DESCRIPTION("Platform driver for Silicom network appliances");
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 969477c83..c6a10ec2c 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -415,18 +415,13 @@ static const struct property_entry gdix1001_upside_down_props[] = {
{ }
};
-static const struct ts_dmi_data gdix1001_00_upside_down_data = {
- .acpi_name = "GDIX1001:00",
- .properties = gdix1001_upside_down_props,
-};
-
-static const struct ts_dmi_data gdix1001_01_upside_down_data = {
- .acpi_name = "GDIX1001:01",
+static const struct ts_dmi_data gdix1001_upside_down_data = {
+ .acpi_name = "GDIX1001",
.properties = gdix1001_upside_down_props,
};
-static const struct ts_dmi_data gdix1002_00_upside_down_data = {
- .acpi_name = "GDIX1002:00",
+static const struct ts_dmi_data gdix1002_upside_down_data = {
+ .acpi_name = "GDIX1002",
.properties = gdix1001_upside_down_props,
};
@@ -1223,6 +1218,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Vi8 dual-boot (CWI506) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
+ },
+ },
+ {
/* Chuwi Vi8 Plus (CWI519) */
.driver_data = (void *)&chuwi_vi8_plus_data,
.matches = {
@@ -1412,7 +1416,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Juno Tablet */
- .driver_data = (void *)&gdix1002_00_upside_down_data,
+ .driver_data = (void *)&gdix1002_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
@@ -1658,7 +1662,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Android version / BIOS) */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
@@ -1666,7 +1670,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Windows version / BIOS) */
- .driver_data = (void *)&gdix1001_01_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/* tPAD is too generic, also match on bios date */
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
@@ -1684,7 +1688,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X98 Pro */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/*
* Only match BIOS date, because the manufacturers
@@ -1788,7 +1792,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* "WinBook TW100" */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
@@ -1796,7 +1800,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* WinBook TW700 */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index bd017478e..3c288e8f4 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -23,17 +23,15 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/uuid.h>
#include <linux/wmi.h>
#include <linux/fs.h>
-#include <uapi/linux/wmi.h>
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
@@ -59,19 +57,17 @@ static_assert(__alignof__(struct guid_block) == 1);
enum { /* wmi_block flags */
WMI_READ_TAKES_NO_ARGS,
- WMI_PROBED,
};
struct wmi_block {
struct wmi_device dev;
struct list_head list;
struct guid_block gblock;
- struct miscdevice char_dev;
- struct mutex char_mutex;
struct acpi_device *acpi_device;
+ struct rw_semaphore notify_lock; /* Protects notify callback add/remove */
wmi_notify_handler handler;
void *handler_data;
- u64 req_buf_size;
+ bool driver_ready;
unsigned long flags;
};
@@ -85,16 +81,6 @@ struct wmi_block {
#define ACPI_WMI_STRING BIT(2) /* GUID takes & returns a string */
#define ACPI_WMI_EVENT BIT(3) /* GUID is an event */
-static bool debug_event;
-module_param(debug_event, bool, 0444);
-MODULE_PARM_DESC(debug_event,
- "Log WMI Events [0/1]");
-
-static bool debug_dump_wdg;
-module_param(debug_dump_wdg, bool, 0444);
-MODULE_PARM_DESC(debug_dump_wdg,
- "Dump available WMI interfaces [0/1]");
-
static const struct acpi_device_id wmi_device_ids[] = {
{"PNP0C14", 0},
{"pnp0c14", 0},
@@ -106,6 +92,9 @@ MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
static const char * const allow_duplicates[] = {
"05901221-D566-11D1-B2F0-00A0C9062910", /* wmi-bmof */
"8A42EA14-4F2A-FD45-6422-0087F7A7E608", /* dell-wmi-ddv */
+ "44FADEB1-B204-40F2-8581-394BBDC1B651", /* intel-wmi-sbl-fw-update */
+ "86CCFD48-205E-4A77-9C48-2021CBEDE341", /* intel-wmi-thunderbolt */
+ "F1DDEE52-063C-4784-A11E-8A06684B9B01", /* dell-smm-hwmon */
NULL
};
@@ -146,23 +135,19 @@ static const void *find_guid_context(struct wmi_block *wblock,
static int get_subobj_info(acpi_handle handle, const char *pathname,
struct acpi_device_info **info)
{
- struct acpi_device_info *dummy_info, **info_ptr;
acpi_handle subobj_handle;
acpi_status status;
- status = acpi_get_handle(handle, (char *)pathname, &subobj_handle);
+ status = acpi_get_handle(handle, pathname, &subobj_handle);
if (status == AE_NOT_FOUND)
return -ENOENT;
- else if (ACPI_FAILURE(status))
- return -EIO;
- info_ptr = info ? info : &dummy_info;
- status = acpi_get_object_info(subobj_handle, info_ptr);
if (ACPI_FAILURE(status))
return -EIO;
- if (!info)
- kfree(dummy_info);
+ status = acpi_get_object_info(subobj_handle, info);
+ if (ACPI_FAILURE(status))
+ return -EIO;
return 0;
}
@@ -236,6 +221,17 @@ static int wmidev_match_guid(struct device *dev, const void *data)
return 0;
}
+static int wmidev_match_notify_id(struct device *dev, const void *data)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ const u32 *notify_id = data;
+
+ if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
+ return 1;
+
+ return 0;
+}
+
static struct bus_type wmi_bus_type;
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
@@ -255,6 +251,17 @@ static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
return dev_to_wdev(dev);
}
+static struct wmi_device *wmi_find_event_by_notify_id(const u32 notify_id)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&wmi_bus_type, NULL, &notify_id, wmidev_match_notify_id);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wmi_device(dev);
+}
+
static void wmi_device_put(struct wmi_device *wdev)
{
put_device(&wdev->dev);
@@ -265,26 +272,6 @@ static void wmi_device_put(struct wmi_device *wdev)
*/
/**
- * set_required_buffer_size - Sets the buffer size needed for performing IOCTL
- * @wdev: A wmi bus device from a driver
- * @length: Required buffer size
- *
- * Allocates memory needed for buffer, stores the buffer size in that memory.
- *
- * Return: 0 on success or a negative error code for failure.
- */
-int set_required_buffer_size(struct wmi_device *wdev, u64 length)
-{
- struct wmi_block *wblock;
-
- wblock = container_of(wdev, struct wmi_block, dev);
- wblock->req_buf_size = length;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(set_required_buffer_size);
-
-/**
* wmi_instance_count - Get number of WMI object instances
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
*
@@ -536,41 +523,50 @@ EXPORT_SYMBOL_GPL(wmidev_block_query);
*
* Return: acpi_status signaling success or error.
*/
-acpi_status wmi_set_block(const char *guid_string, u8 instance,
- const struct acpi_buffer *in)
+acpi_status wmi_set_block(const char *guid_string, u8 instance, const struct acpi_buffer *in)
{
- struct wmi_block *wblock;
- struct guid_block *block;
struct wmi_device *wdev;
- acpi_handle handle;
- struct acpi_object_list input;
- union acpi_object params[2];
- char method[WMI_ACPI_METHOD_NAME_SIZE];
acpi_status status;
- if (!in)
- return AE_BAD_DATA;
-
wdev = wmi_find_device_by_guid(guid_string);
if (IS_ERR(wdev))
return AE_ERROR;
- wblock = container_of(wdev, struct wmi_block, dev);
- block = &wblock->gblock;
- handle = wblock->acpi_device->handle;
+ status = wmidev_block_set(wdev, instance, in);
+ wmi_device_put(wdev);
- if (block->instance_count <= instance) {
- status = AE_BAD_PARAMETER;
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_set_block);
- goto err_wdev_put;
- }
+/**
+ * wmidev_block_set - Write to a WMI block
+ * @wdev: A wmi bus device from a driver
+ * @instance: Instance index
+ * @in: Buffer containing new values for the data block
+ *
+ * Write contents of the input buffer to an ACPI-WMI data block.
+ *
+ * Return: acpi_status signaling success or error.
+ */
+acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct acpi_buffer *in)
+{
+ struct wmi_block *wblock = container_of(wdev, struct wmi_block, dev);
+ acpi_handle handle = wblock->acpi_device->handle;
+ struct guid_block *block = &wblock->gblock;
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
+ struct acpi_object_list input;
+ union acpi_object params[2];
- /* Check GUID is a data block */
- if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD)) {
- status = AE_ERROR;
+ if (!in)
+ return AE_BAD_DATA;
- goto err_wdev_put;
- }
+ if (block->instance_count <= instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_ERROR;
input.count = 2;
input.pointer = params;
@@ -582,73 +578,9 @@ acpi_status wmi_set_block(const char *guid_string, u8 instance,
get_acpi_method_name(wblock, 'S', method);
- status = acpi_evaluate_object(handle, method, &input, NULL);
-
-err_wdev_put:
- wmi_device_put(wdev);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(wmi_set_block);
-
-static void wmi_dump_wdg(const struct guid_block *g)
-{
- pr_info("%pUL:\n", &g->guid);
- if (g->flags & ACPI_WMI_EVENT)
- pr_info("\tnotify_id: 0x%02X\n", g->notify_id);
- else
- pr_info("\tobject_id: %2pE\n", g->object_id);
- pr_info("\tinstance_count: %d\n", g->instance_count);
- pr_info("\tflags: %#x", g->flags);
- if (g->flags) {
- if (g->flags & ACPI_WMI_EXPENSIVE)
- pr_cont(" ACPI_WMI_EXPENSIVE");
- if (g->flags & ACPI_WMI_METHOD)
- pr_cont(" ACPI_WMI_METHOD");
- if (g->flags & ACPI_WMI_STRING)
- pr_cont(" ACPI_WMI_STRING");
- if (g->flags & ACPI_WMI_EVENT)
- pr_cont(" ACPI_WMI_EVENT");
- }
- pr_cont("\n");
-
-}
-
-static void wmi_notify_debug(u32 value, void *context)
-{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
-
- status = wmi_get_event_data(value, &response);
- if (status != AE_OK) {
- pr_info("bad event status 0x%x\n", status);
- return;
- }
-
- obj = response.pointer;
- if (!obj)
- return;
-
- pr_info("DEBUG: event 0x%02X ", value);
- switch (obj->type) {
- case ACPI_TYPE_BUFFER:
- pr_cont("BUFFER_TYPE - length %u\n", obj->buffer.length);
- break;
- case ACPI_TYPE_STRING:
- pr_cont("STRING_TYPE - %s\n", obj->string.pointer);
- break;
- case ACPI_TYPE_INTEGER:
- pr_cont("INTEGER_TYPE - %llu\n", obj->integer.value);
- break;
- case ACPI_TYPE_PACKAGE:
- pr_cont("PACKAGE_TYPE - %u elements\n", obj->package.count);
- break;
- default:
- pr_cont("object type 0x%X\n", obj->type);
- }
- kfree(obj);
+ return acpi_evaluate_object(handle, method, &input, NULL);
}
+EXPORT_SYMBOL_GPL(wmidev_block_set);
/**
* wmi_install_notify_handler - Register handler for WMI events (deprecated)
@@ -664,34 +596,31 @@ acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler,
void *data)
{
- struct wmi_block *block;
- acpi_status status = AE_NOT_EXIST;
- guid_t guid_input;
-
- if (!guid || !handler)
- return AE_BAD_PARAMETER;
-
- if (guid_parse(guid, &guid_input))
- return AE_BAD_PARAMETER;
+ struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- list_for_each_entry(block, &wmi_block_list, list) {
- acpi_status wmi_status;
+ wdev = wmi_find_device_by_guid(guid);
+ if (IS_ERR(wdev))
+ return AE_ERROR;
- if (guid_equal(&block->gblock.guid, &guid_input)) {
- if (block->handler &&
- block->handler != wmi_notify_debug)
- return AE_ALREADY_ACQUIRED;
+ wblock = container_of(wdev, struct wmi_block, dev);
- block->handler = handler;
- block->handler_data = data;
+ down_write(&wblock->notify_lock);
+ if (wblock->handler) {
+ status = AE_ALREADY_ACQUIRED;
+ } else {
+ wblock->handler = handler;
+ wblock->handler_data = data;
- wmi_status = wmi_method_enable(block, true);
- if (ACPI_FAILURE(wmi_status))
- dev_warn(&block->dev.dev, "Failed to enable device\n");
+ if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+ dev_warn(&wblock->dev.dev, "Failed to enable device\n");
- status = AE_OK;
- }
+ status = AE_OK;
}
+ up_write(&wblock->notify_lock);
+
+ wmi_device_put(wdev);
return status;
}
@@ -707,39 +636,31 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
*/
acpi_status wmi_remove_notify_handler(const char *guid)
{
- struct wmi_block *block;
- acpi_status status = AE_NOT_EXIST;
- guid_t guid_input;
-
- if (!guid)
- return AE_BAD_PARAMETER;
-
- if (guid_parse(guid, &guid_input))
- return AE_BAD_PARAMETER;
+ struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- list_for_each_entry(block, &wmi_block_list, list) {
- acpi_status wmi_status;
+ wdev = wmi_find_device_by_guid(guid);
+ if (IS_ERR(wdev))
+ return AE_ERROR;
- if (guid_equal(&block->gblock.guid, &guid_input)) {
- if (!block->handler ||
- block->handler == wmi_notify_debug)
- return AE_NULL_ENTRY;
+ wblock = container_of(wdev, struct wmi_block, dev);
- if (debug_event) {
- block->handler = wmi_notify_debug;
- status = AE_OK;
- } else {
- wmi_status = wmi_method_enable(block, false);
- if (ACPI_FAILURE(wmi_status))
- dev_warn(&block->dev.dev, "Failed to disable device\n");
+ down_write(&wblock->notify_lock);
+ if (!wblock->handler) {
+ status = AE_NULL_ENTRY;
+ } else {
+ if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ dev_warn(&wblock->dev.dev, "Failed to disable device\n");
- block->handler = NULL;
- block->handler_data = NULL;
+ wblock->handler = NULL;
+ wblock->handler_data = NULL;
- status = AE_OK;
- }
- }
+ status = AE_OK;
}
+ up_write(&wblock->notify_lock);
+
+ wmi_device_put(wdev);
return status;
}
@@ -758,15 +679,19 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
{
struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- list_for_each_entry(wblock, &wmi_block_list, list) {
- struct guid_block *gblock = &wblock->gblock;
+ wdev = wmi_find_event_by_notify_id(event);
+ if (IS_ERR(wdev))
+ return AE_NOT_FOUND;
- if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event)
- return get_event_data(wblock, out);
- }
+ wblock = container_of(wdev, struct wmi_block, dev);
+ status = get_event_data(wblock, out);
+
+ wmi_device_put(wdev);
- return AE_NOT_FOUND;
+ return status;
}
EXPORT_SYMBOL_GPL(wmi_get_event_data);
@@ -958,111 +883,12 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
return 0;
}
-static int wmi_char_open(struct inode *inode, struct file *filp)
-{
- /*
- * The miscdevice already stores a pointer to itself
- * inside filp->private_data
- */
- struct wmi_block *wblock = container_of(filp->private_data, struct wmi_block, char_dev);
-
- filp->private_data = wblock;
-
- return nonseekable_open(inode, filp);
-}
-
-static ssize_t wmi_char_read(struct file *filp, char __user *buffer,
- size_t length, loff_t *offset)
-{
- struct wmi_block *wblock = filp->private_data;
-
- return simple_read_from_buffer(buffer, length, offset,
- &wblock->req_buf_size,
- sizeof(wblock->req_buf_size));
-}
-
-static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct wmi_ioctl_buffer __user *input =
- (struct wmi_ioctl_buffer __user *) arg;
- struct wmi_block *wblock = filp->private_data;
- struct wmi_ioctl_buffer *buf;
- struct wmi_driver *wdriver;
- int ret;
-
- if (_IOC_TYPE(cmd) != WMI_IOC)
- return -ENOTTY;
-
- /* make sure we're not calling a higher instance than exists*/
- if (_IOC_NR(cmd) >= wblock->gblock.instance_count)
- return -EINVAL;
-
- mutex_lock(&wblock->char_mutex);
- buf = wblock->handler_data;
- if (get_user(buf->length, &input->length)) {
- dev_dbg(&wblock->dev.dev, "Read length from user failed\n");
- ret = -EFAULT;
- goto out_ioctl;
- }
- /* if it's too small, abort */
- if (buf->length < wblock->req_buf_size) {
- dev_err(&wblock->dev.dev,
- "Buffer %lld too small, need at least %lld\n",
- buf->length, wblock->req_buf_size);
- ret = -EINVAL;
- goto out_ioctl;
- }
- /* if it's too big, warn, driver will only use what is needed */
- if (buf->length > wblock->req_buf_size)
- dev_warn(&wblock->dev.dev,
- "Buffer %lld is bigger than required %lld\n",
- buf->length, wblock->req_buf_size);
-
- /* copy the structure from userspace */
- if (copy_from_user(buf, input, wblock->req_buf_size)) {
- dev_dbg(&wblock->dev.dev, "Copy %llu from user failed\n",
- wblock->req_buf_size);
- ret = -EFAULT;
- goto out_ioctl;
- }
-
- /* let the driver do any filtering and do the call */
- wdriver = drv_to_wdrv(wblock->dev.dev.driver);
- if (!try_module_get(wdriver->driver.owner)) {
- ret = -EBUSY;
- goto out_ioctl;
- }
- ret = wdriver->filter_callback(&wblock->dev, cmd, buf);
- module_put(wdriver->driver.owner);
- if (ret)
- goto out_ioctl;
-
- /* return the result (only up to our internal buffer size) */
- if (copy_to_user(input, buf, wblock->req_buf_size)) {
- dev_dbg(&wblock->dev.dev, "Copy %llu to user failed\n",
- wblock->req_buf_size);
- ret = -EFAULT;
- }
-
-out_ioctl:
- mutex_unlock(&wblock->char_mutex);
- return ret;
-}
-
-static const struct file_operations wmi_fops = {
- .owner = THIS_MODULE,
- .read = wmi_char_read,
- .open = wmi_char_open,
- .unlocked_ioctl = wmi_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
-};
static int wmi_dev_probe(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
int ret = 0;
- char *buf;
if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
dev_warn(dev, "failed to enable device -- probing anyway\n");
@@ -1070,55 +896,19 @@ static int wmi_dev_probe(struct device *dev)
if (wdriver->probe) {
ret = wdriver->probe(dev_to_wdev(dev),
find_guid_context(wblock, wdriver));
- if (ret != 0)
- goto probe_failure;
- }
-
- /* driver wants a character device made */
- if (wdriver->filter_callback) {
- /* check that required buffer size declared by driver or MOF */
- if (!wblock->req_buf_size) {
- dev_err(&wblock->dev.dev,
- "Required buffer size not set\n");
- ret = -EINVAL;
- goto probe_failure;
- }
-
- wblock->handler_data = kmalloc(wblock->req_buf_size,
- GFP_KERNEL);
- if (!wblock->handler_data) {
- ret = -ENOMEM;
- goto probe_failure;
- }
-
- buf = kasprintf(GFP_KERNEL, "wmi/%s", wdriver->driver.name);
- if (!buf) {
- ret = -ENOMEM;
- goto probe_string_failure;
- }
- wblock->char_dev.minor = MISC_DYNAMIC_MINOR;
- wblock->char_dev.name = buf;
- wblock->char_dev.fops = &wmi_fops;
- wblock->char_dev.mode = 0444;
- ret = misc_register(&wblock->char_dev);
if (ret) {
- dev_warn(dev, "failed to register char dev: %d\n", ret);
- ret = -ENOMEM;
- goto probe_misc_failure;
+ if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ dev_warn(dev, "Failed to disable device\n");
+
+ return ret;
}
}
- set_bit(WMI_PROBED, &wblock->flags);
- return 0;
+ down_write(&wblock->notify_lock);
+ wblock->driver_ready = true;
+ up_write(&wblock->notify_lock);
-probe_misc_failure:
- kfree(buf);
-probe_string_failure:
- kfree(wblock->handler_data);
-probe_failure:
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
- dev_warn(dev, "failed to disable device\n");
- return ret;
+ return 0;
}
static void wmi_dev_remove(struct device *dev)
@@ -1126,13 +916,9 @@ static void wmi_dev_remove(struct device *dev)
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
- clear_bit(WMI_PROBED, &wblock->flags);
-
- if (wdriver->filter_callback) {
- misc_deregister(&wblock->char_dev);
- kfree(wblock->char_dev.name);
- kfree(wblock->handler_data);
- }
+ down_write(&wblock->notify_lock);
+ wblock->driver_ready = false;
+ up_write(&wblock->notify_lock);
if (wdriver->remove)
wdriver->remove(dev_to_wdev(dev));
@@ -1205,7 +991,6 @@ static int wmi_create_device(struct device *wmi_bus_dev,
if (wblock->gblock.flags & ACPI_WMI_METHOD) {
wblock->dev.dev.type = &wmi_type_method;
- mutex_init(&wblock->char_mutex);
goto out_init;
}
@@ -1242,12 +1027,12 @@ static int wmi_create_device(struct device *wmi_bus_dev,
kfree(info);
get_acpi_method_name(wblock, 'S', method);
- result = get_subobj_info(device->handle, method, NULL);
-
- if (result == 0)
+ if (acpi_has_method(device->handle, method))
wblock->dev.setable = true;
out_init:
+ init_rwsem(&wblock->notify_lock);
+ wblock->driver_ready = false;
wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev;
@@ -1339,9 +1124,6 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
total = obj->buffer.length / sizeof(struct guid_block);
for (i = 0; i < total; i++) {
- if (debug_dump_wdg)
- wmi_dump_wdg(&gblock[i]);
-
if (!gblock[i].instance_count) {
dev_info(wmi_bus_dev, FW_INFO "%pUL has zero instances\n", &gblock[i].guid);
continue;
@@ -1367,17 +1149,10 @@ static int parse_wdg(struct device *wmi_bus_dev, struct platform_device *pdev)
list_add_tail(&wblock->list, &wmi_block_list);
- if (debug_event) {
- wblock->handler = wmi_notify_debug;
- wmi_method_enable(wblock, true);
- }
-
retval = wmi_add_device(pdev, &wblock->dev);
if (retval) {
dev_err(wmi_bus_dev, "failed to register %pUL\n",
&wblock->gblock.guid);
- if (debug_event)
- wmi_method_enable(wblock, false);
list_del(&wblock->list);
put_device(&wblock->dev.dev);
@@ -1398,7 +1173,7 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
u32 bits, u64 *value,
void *handler_context, void *region_context)
{
- int result = 0, i = 0;
+ int result = 0;
u8 temp = 0;
if ((address > 0xFF) || !value)
@@ -1412,9 +1187,9 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
if (function == ACPI_READ) {
result = ec_read(address, &temp);
- (*value) |= ((u64)temp) << i;
+ *value = temp;
} else {
- temp = 0xff & ((*value) >> i);
+ temp = 0xff & *value;
result = ec_write(address, temp);
}
@@ -1430,55 +1205,57 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
}
}
-static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
- void *context)
+static void wmi_notify_driver(struct wmi_block *wblock)
{
- struct wmi_block *wblock = NULL, *iter;
-
- list_for_each_entry(iter, &wmi_block_list, list) {
- struct guid_block *block = &iter->gblock;
+ struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
+ struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
- if (iter->acpi_device->handle == handle &&
- (block->flags & ACPI_WMI_EVENT) &&
- (block->notify_id == event)) {
- wblock = iter;
- break;
+ if (!driver->no_notify_data) {
+ status = get_event_data(wblock, &data);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&wblock->dev.dev, "Failed to get event data\n");
+ return;
}
}
- if (!wblock)
- return;
-
- /* If a driver is bound, then notify the driver. */
- if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) {
- struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
- struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_status status;
-
- if (!driver->no_notify_data) {
- status = get_event_data(wblock, &evdata);
- if (ACPI_FAILURE(status)) {
- dev_warn(&wblock->dev.dev, "failed to get event data\n");
- return;
- }
- }
+ if (driver->notify)
+ driver->notify(&wblock->dev, data.pointer);
- if (driver->notify)
- driver->notify(&wblock->dev, evdata.pointer);
+ kfree(data.pointer);
+}
- kfree(evdata.pointer);
- } else if (wblock->handler) {
- /* Legacy handler */
- wblock->handler(event, wblock->handler_data);
+static int wmi_notify_device(struct device *dev, void *data)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ u32 *event = data;
+
+ if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
+ return 0;
+
+ down_read(&wblock->notify_lock);
+ /* The WMI driver notify handler conflicts with the legacy WMI handler.
+ * Because of this the WMI driver notify handler takes precedence.
+ */
+ if (wblock->dev.dev.driver && wblock->driver_ready) {
+ wmi_notify_driver(wblock);
+ } else {
+ if (wblock->handler)
+ wblock->handler(*event, wblock->handler_data);
}
+ up_read(&wblock->notify_lock);
+
+ acpi_bus_generate_netlink_event(wblock->acpi_device->pnp.device_class,
+ dev_name(&wblock->dev.dev), *event, 0);
+
+ return -EBUSY;
+}
- if (debug_event)
- pr_info("DEBUG: GUID %pUL event 0x%02X\n", &wblock->gblock.guid, event);
+static void acpi_wmi_notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct device *wmi_bus_dev = context;
- acpi_bus_generate_netlink_event(
- wblock->acpi_device->pnp.device_class,
- dev_name(&wblock->dev.dev),
- event, 0);
+ device_for_each_child(wmi_bus_dev, &event, wmi_notify_device);
}
static int wmi_remove_device(struct device *dev, void *data)
@@ -1493,16 +1270,31 @@ static int wmi_remove_device(struct device *dev, void *data)
static void acpi_wmi_remove(struct platform_device *device)
{
- struct acpi_device *acpi_device = ACPI_COMPANION(&device->dev);
struct device *wmi_bus_device = dev_get_drvdata(&device->dev);
- acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
- acpi_wmi_notify_handler);
- acpi_remove_address_space_handler(acpi_device->handle,
- ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
-
device_for_each_child_reverse(wmi_bus_device, NULL, wmi_remove_device);
- device_unregister(wmi_bus_device);
+}
+
+static void acpi_wmi_remove_notify_handler(void *data)
+{
+ struct acpi_device *acpi_device = data;
+
+ acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY, acpi_wmi_notify_handler);
+}
+
+static void acpi_wmi_remove_address_space_handler(void *data)
+{
+ struct acpi_device *acpi_device = data;
+
+ acpi_remove_address_space_handler(acpi_device->handle, ACPI_ADR_SPACE_EC,
+ &acpi_wmi_ec_space_handler);
+}
+
+static void acpi_wmi_remove_bus_device(void *data)
+{
+ struct device *wmi_bus_dev = data;
+
+ device_unregister(wmi_bus_dev);
}
static int acpi_wmi_probe(struct platform_device *device)
@@ -1518,6 +1310,17 @@ static int acpi_wmi_probe(struct platform_device *device)
return -ENODEV;
}
+ wmi_bus_dev = device_create(&wmi_bus_class, &device->dev, MKDEV(0, 0), NULL, "wmi_bus-%s",
+ dev_name(&device->dev));
+ if (IS_ERR(wmi_bus_dev))
+ return PTR_ERR(wmi_bus_dev);
+
+ error = devm_add_action_or_reset(&device->dev, acpi_wmi_remove_bus_device, wmi_bus_dev);
+ if (error < 0)
+ return error;
+
+ dev_set_drvdata(&device->dev, wmi_bus_dev);
+
status = acpi_install_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC,
&acpi_wmi_ec_space_handler,
@@ -1526,46 +1329,29 @@ static int acpi_wmi_probe(struct platform_device *device)
dev_err(&device->dev, "Error installing EC region handler\n");
return -ENODEV;
}
+ error = devm_add_action_or_reset(&device->dev, acpi_wmi_remove_address_space_handler,
+ acpi_device);
+ if (error < 0)
+ return error;
- status = acpi_install_notify_handler(acpi_device->handle,
- ACPI_ALL_NOTIFY,
- acpi_wmi_notify_handler,
- NULL);
+ status = acpi_install_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
+ acpi_wmi_notify_handler, wmi_bus_dev);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Error installing notify handler\n");
- error = -ENODEV;
- goto err_remove_ec_handler;
- }
-
- wmi_bus_dev = device_create(&wmi_bus_class, &device->dev, MKDEV(0, 0),
- NULL, "wmi_bus-%s", dev_name(&device->dev));
- if (IS_ERR(wmi_bus_dev)) {
- error = PTR_ERR(wmi_bus_dev);
- goto err_remove_notify_handler;
+ return -ENODEV;
}
- dev_set_drvdata(&device->dev, wmi_bus_dev);
+ error = devm_add_action_or_reset(&device->dev, acpi_wmi_remove_notify_handler,
+ acpi_device);
+ if (error < 0)
+ return error;
error = parse_wdg(wmi_bus_dev, device);
if (error) {
pr_err("Failed to parse WDG method\n");
- goto err_remove_busdev;
+ return error;
}
return 0;
-
-err_remove_busdev:
- device_unregister(wmi_bus_dev);
-
-err_remove_notify_handler:
- acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
- acpi_wmi_notify_handler);
-
-err_remove_ec_handler:
- acpi_remove_address_space_handler(acpi_device->handle,
- ACPI_ADR_SPACE_EC,
- &acpi_wmi_ec_space_handler);
-
- return error;
}
int __must_check __wmi_driver_register(struct wmi_driver *driver,
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index ac2e8caaa..a3415f1c0 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include "x86-android-tablets.h"
+#include "../serdev_helpers.h"
static struct platform_device *x86_android_tablet_device;
@@ -144,9 +145,11 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
}
static int i2c_client_count;
+static int spi_dev_count;
static int pdev_count;
static int serdev_count;
static struct i2c_client **i2c_clients;
+static struct spi_device **spi_devs;
static struct platform_device **pdevs;
static struct serdev_device **serdevs;
static struct gpio_keys_button *buttons;
@@ -188,40 +191,62 @@ static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info
return 0;
}
+static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, int idx)
+{
+ const struct x86_spi_dev_info *spi_dev_info = &dev_info->spi_dev_info[idx];
+ struct spi_board_info board_info = spi_dev_info->board_info;
+ struct spi_controller *controller;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ acpi_status status;
+
+ board_info.irq = x86_acpi_irq_helper_get(&spi_dev_info->irq_data);
+ if (board_info.irq < 0)
+ return board_info.irq;
+
+ status = acpi_get_handle(NULL, spi_dev_info->ctrl_path, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Error could not get %s handle\n", spi_dev_info->ctrl_path);
+ return -ENODEV;
+ }
+
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev) {
+ pr_err("Error could not get adev for %s\n", spi_dev_info->ctrl_path);
+ return -ENODEV;
+ }
+
+ controller = acpi_spi_find_controller_by_adev(adev);
+ if (!controller) {
+ pr_err("Error could not get SPI controller for %s\n", spi_dev_info->ctrl_path);
+ return -ENODEV;
+ }
+
+ spi_devs[idx] = spi_new_device(controller, &board_info);
+ put_device(&controller->dev);
+ if (!spi_devs[idx])
+ return dev_err_probe(&controller->dev, -ENOMEM,
+ "creating SPI-device %d\n", idx);
+
+ return 0;
+}
+
static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
{
- struct acpi_device *ctrl_adev, *serdev_adev;
+ struct acpi_device *serdev_adev;
struct serdev_device *serdev;
struct device *ctrl_dev;
int ret = -ENODEV;
- ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1);
- if (!ctrl_adev) {
- pr_err("error could not get %s/%s ctrl adev\n",
- info->ctrl_hid, info->ctrl_uid);
- return -ENODEV;
- }
+ ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
+ info->ctrl_devname);
+ if (IS_ERR(ctrl_dev))
+ return PTR_ERR(ctrl_dev);
serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1);
if (!serdev_adev) {
pr_err("error could not get %s serdev adev\n", info->serdev_hid);
- goto put_ctrl_adev;
- }
-
- /* get_first_physical_node() returns a weak ref, no need to put() it */
- ctrl_dev = acpi_get_first_physical_node(ctrl_adev);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s ctrl physical dev\n",
- info->ctrl_hid, info->ctrl_uid);
- goto put_serdev_adev;
- }
-
- /* ctrl_dev now points to the controller's parent, get the controller */
- ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s %s ctrl dev\n",
- info->ctrl_hid, info->ctrl_uid, info->ctrl_devname);
- goto put_serdev_adev;
+ goto put_ctrl_dev;
}
serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
@@ -244,8 +269,8 @@ static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int
put_serdev_adev:
acpi_dev_put(serdev_adev);
-put_ctrl_adev:
- acpi_dev_put(ctrl_adev);
+put_ctrl_dev:
+ put_device(ctrl_dev);
return ret;
}
@@ -266,6 +291,11 @@ static void x86_android_tablet_remove(struct platform_device *pdev)
kfree(pdevs);
kfree(buttons);
+ for (i = 0; i < spi_dev_count; i++)
+ spi_unregister_device(spi_devs[i]);
+
+ kfree(spi_devs);
+
for (i = 0; i < i2c_client_count; i++)
i2c_unregister_device(i2c_clients[i]);
@@ -336,6 +366,21 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev)
}
}
+ spi_devs = kcalloc(dev_info->spi_dev_count, sizeof(*spi_devs), GFP_KERNEL);
+ if (!spi_devs) {
+ x86_android_tablet_remove(pdev);
+ return -ENOMEM;
+ }
+
+ spi_dev_count = dev_info->spi_dev_count;
+ for (i = 0; i < spi_dev_count; i++) {
+ ret = x86_instantiate_spi_dev(dev_info, i);
+ if (ret < 0) {
+ x86_android_tablet_remove(pdev);
+ return ret;
+ }
+ }
+
/* + 1 to make space for (optional) gpio_keys_button pdev */
pdevs = kcalloc(dev_info->pdev_count + 1, sizeof(*pdevs), GFP_KERNEL);
if (!pdevs) {
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index 50ac5afa0..c29739195 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -12,6 +12,8 @@
#include <linux/efi.h>
#include <linux/gpio/machine.h>
+#include <linux/mfd/arizona/pdata.h>
+#include <linux/mfd/arizona/registers.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
@@ -32,12 +34,30 @@
*
* To avoid having to have a similar hack in the mainline kernel program the
* LP8557 to directly set the level and use the lp855x_bl driver for control.
+ *
+ * The LP8557 can either be configured to multiply its PWM input and
+ * the I2C register set level (requiring both to be at 100% for 100% output);
+ * or to only take the I2C register set level into account.
+ *
+ * Multiplying the 2 levels is useful because this will turn off the backlight
+ * when the panel goes off and turns off its PWM output.
+ *
+ * But on some models the panel's PWM output defaults to a duty-cycle of
+ * much less then 100%, severely limiting max brightness. In this case
+ * the LP8557 should be configured to only take the I2C register into
+ * account and the i915 driver must turn off the panel and the backlight
+ * separately using e.g. VBT MIPI sequences to turn off the backlight.
*/
-static struct lp855x_platform_data lenovo_lp8557_pdata = {
+static struct lp855x_platform_data lenovo_lp8557_pwm_and_reg_pdata = {
.device_control = 0x86,
.initial_brightness = 128,
};
+static struct lp855x_platform_data lenovo_lp8557_reg_only_pdata = {
+ .device_control = 0x85,
+ .initial_brightness = 128,
+};
+
/* Lenovo Yoga Book X90F / X90L's Android factory img has everything hardcoded */
static const struct property_entry lenovo_yb1_x90_wacom_props[] = {
@@ -121,7 +141,7 @@ static const struct x86_i2c_client_info lenovo_yb1_x90_i2c_clients[] __initconst
.type = "lp8557",
.addr = 0x2c,
.dev_name = "lp8557",
- .platform_data = &lenovo_lp8557_pdata,
+ .platform_data = &lenovo_lp8557_pwm_and_reg_pdata,
},
.adapter_path = "\\_SB_.PCI0.I2C4",
}, {
@@ -357,7 +377,7 @@ static struct x86_i2c_client_info lenovo_yoga_tab2_830_1050_i2c_clients[] __init
.type = "lp8557",
.addr = 0x2c,
.dev_name = "lp8557",
- .platform_data = &lenovo_lp8557_pdata,
+ .platform_data = &lenovo_lp8557_pwm_and_reg_pdata,
},
.adapter_path = "\\_SB_.I2C3",
},
@@ -654,12 +674,94 @@ static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = {
.type = "lp8557",
.addr = 0x2c,
.dev_name = "lp8557",
- .platform_data = &lenovo_lp8557_pdata,
+ .platform_data = &lenovo_lp8557_reg_only_pdata,
},
.adapter_path = "\\_SB_.PCI0.I2C1",
}
};
+/*
+ * The AOSP 3.5 mm Headset: Accessory Specification gives the following values:
+ * Function A Play/Pause: 0 ohm
+ * Function D Voice assistant: 135 ohm
+ * Function B Volume Up 240 ohm
+ * Function C Volume Down 470 ohm
+ * Minimum Mic DC resistance 1000 ohm
+ * Minimum Ear speaker impedance 16 ohm
+ * Note the first max value below must be less then the min. speaker impedance,
+ * to allow CTIA/OMTP detection to work. The other max values are the closest
+ * value from extcon-arizona.c:arizona_micd_levels halfway 2 button resistances.
+ */
+static const struct arizona_micd_range arizona_micd_aosp_ranges[] = {
+ { .max = 11, .key = KEY_PLAYPAUSE },
+ { .max = 186, .key = KEY_VOICECOMMAND },
+ { .max = 348, .key = KEY_VOLUMEUP },
+ { .max = 752, .key = KEY_VOLUMEDOWN },
+};
+
+/* YT3 WM5102 arizona_micd_config comes from Android kernel sources */
+static struct arizona_micd_config lenovo_yt3_wm5102_micd_config[] = {
+ { 0, 1, 0 },
+ { ARIZONA_ACCDET_SRC, 2, 1 },
+};
+
+static struct arizona_pdata lenovo_yt3_wm5102_pdata = {
+ .irq_flags = IRQF_TRIGGER_LOW,
+ .micd_detect_debounce = 200,
+ .micd_ranges = arizona_micd_aosp_ranges,
+ .num_micd_ranges = ARRAY_SIZE(arizona_micd_aosp_ranges),
+ .hpdet_channel = ARIZONA_ACCDET_MODE_HPL,
+
+ /* Below settings come from Android kernel sources */
+ .micd_bias_start_time = 1,
+ .micd_rate = 6,
+ .micd_configs = lenovo_yt3_wm5102_micd_config,
+ .num_micd_configs = ARRAY_SIZE(lenovo_yt3_wm5102_micd_config),
+ .micbias = {
+ [0] = { /* MICBIAS1 */
+ .mV = 2800,
+ .ext_cap = 1,
+ .discharge = 1,
+ .soft_start = 0,
+ .bypass = 0,
+ },
+ [1] = { /* MICBIAS2 */
+ .mV = 2800,
+ .ext_cap = 1,
+ .discharge = 1,
+ .soft_start = 0,
+ .bypass = 0,
+ },
+ [2] = { /* MICBIAS2 */
+ .mV = 2800,
+ .ext_cap = 1,
+ .discharge = 1,
+ .soft_start = 0,
+ .bypass = 0,
+ },
+ },
+};
+
+static const struct x86_spi_dev_info lenovo_yt3_spi_devs[] __initconst = {
+ {
+ /* WM5102 codec */
+ .board_info = {
+ .modalias = "wm5102",
+ .platform_data = &lenovo_yt3_wm5102_pdata,
+ .max_speed_hz = 5000000,
+ },
+ .ctrl_path = "\\_SB_.PCI0.SPI1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FF:00",
+ .index = 91,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ .con_id = "wm5102_irq",
+ },
+ }
+};
+
static int __init lenovo_yt3_init(void)
{
int ret;
@@ -703,14 +805,28 @@ static struct gpiod_lookup_table lenovo_yt3_hideep_gpios = {
},
};
+static struct gpiod_lookup_table lenovo_yt3_wm5102_gpios = {
+ .dev_id = "spi1.0",
+ .table = {
+ GPIO_LOOKUP("INT33FF:00", 75, "wlf,spkvdd-ena", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FF:00", 81, "wlf,ldoena", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FF:00", 82, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("arizona", 2, "wlf,micd-pol", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
static struct gpiod_lookup_table * const lenovo_yt3_gpios[] = {
&lenovo_yt3_hideep_gpios,
+ &lenovo_yt3_wm5102_gpios,
NULL
};
const struct x86_dev_info lenovo_yt3_info __initconst = {
.i2c_client_info = lenovo_yt3_i2c_clients,
.i2c_client_count = ARRAY_SIZE(lenovo_yt3_i2c_clients),
+ .spi_dev_info = lenovo_yt3_spi_devs,
+ .spi_dev_count = ARRAY_SIZE(lenovo_yt3_spi_devs),
.gpiod_lookup_tables = lenovo_yt3_gpios,
.init = lenovo_yt3_init,
};
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 136a2b359..468993edf 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -14,6 +14,7 @@
#include <linux/gpio_keys.h>
#include <linux/i2c.h>
#include <linux/irqdomain_defs.h>
+#include <linux/spi/spi.h>
struct gpio_desc;
struct gpiod_lookup_table;
@@ -49,6 +50,12 @@ struct x86_i2c_client_info {
struct x86_acpi_irq_data irq_data;
};
+struct x86_spi_dev_info {
+ struct spi_board_info board_info;
+ char *ctrl_path;
+ struct x86_acpi_irq_data irq_data;
+};
+
struct x86_serdev_info {
const char *ctrl_hid;
const char *ctrl_uid;
@@ -73,10 +80,12 @@ struct x86_dev_info {
const struct software_node *bat_swnode;
struct gpiod_lookup_table * const *gpiod_lookup_tables;
const struct x86_i2c_client_info *i2c_client_info;
+ const struct x86_spi_dev_info *spi_dev_info;
const struct platform_device_info *pdev_info;
const struct x86_serdev_info *serdev_info;
const struct x86_gpio_button *gpio_button;
int i2c_client_count;
+ int spi_dev_count;
int pdev_count;
int serdev_count;
int gpio_button_count;
diff --git a/drivers/pmdomain/Kconfig b/drivers/pmdomain/Kconfig
index c98c5bf75..23c64851a 100644
--- a/drivers/pmdomain/Kconfig
+++ b/drivers/pmdomain/Kconfig
@@ -4,6 +4,7 @@ menu "PM Domains"
source "drivers/pmdomain/actions/Kconfig"
source "drivers/pmdomain/amlogic/Kconfig"
source "drivers/pmdomain/apple/Kconfig"
+source "drivers/pmdomain/arm/Kconfig"
source "drivers/pmdomain/bcm/Kconfig"
source "drivers/pmdomain/imx/Kconfig"
source "drivers/pmdomain/mediatek/Kconfig"
diff --git a/drivers/pmdomain/Makefile b/drivers/pmdomain/Makefile
index f0326b27b..a68ece2f4 100644
--- a/drivers/pmdomain/Makefile
+++ b/drivers/pmdomain/Makefile
@@ -16,3 +16,4 @@ obj-y += sunxi/
obj-y += tegra/
obj-y += ti/
obj-y += xilinx/
+obj-y += core.o governor.o
diff --git a/drivers/pmdomain/amlogic/meson-ee-pwrc.c b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
index 0dd71cd81..fcec6eb61 100644
--- a/drivers/pmdomain/amlogic/meson-ee-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-ee-pwrc.c
@@ -47,6 +47,8 @@
#define G12A_HHI_NANOQ_MEM_PD_REG0 (0x43 << 2)
#define G12A_HHI_NANOQ_MEM_PD_REG1 (0x44 << 2)
+#define G12A_HHI_ISP_MEM_PD_REG0 (0x45 << 2)
+#define G12A_HHI_ISP_MEM_PD_REG1 (0x46 << 2)
struct meson_ee_pwrc;
struct meson_ee_pwrc_domain;
@@ -115,6 +117,13 @@ static struct meson_ee_pwrc_top_domain g12a_pwrc_nna = {
.iso_mask = BIT(16) | BIT(17),
};
+static struct meson_ee_pwrc_top_domain g12a_pwrc_isp = {
+ .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
+ .sleep_mask = BIT(18) | BIT(19),
+ .iso_reg = GX_AO_RTI_GEN_PWR_ISO0,
+ .iso_mask = BIT(18) | BIT(19),
+};
+
/* Memory PD Domains */
#define VPU_MEMPD(__reg) \
@@ -231,6 +240,11 @@ static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_nna[] = {
{ G12A_HHI_NANOQ_MEM_PD_REG1, GENMASK(31, 0) },
};
+static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_isp[] = {
+ { G12A_HHI_ISP_MEM_PD_REG0, GENMASK(31, 0) },
+ { G12A_HHI_ISP_MEM_PD_REG1, GENMASK(31, 0) },
+};
+
#define VPU_PD(__name, __top_pd, __mem, __is_pwr_off, __resets, __clks) \
{ \
.name = __name, \
@@ -269,6 +283,8 @@ static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
[PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
[PWRC_G12A_NNA_ID] = TOP_PD("NNA", &g12a_pwrc_nna, g12a_pwrc_mem_nna,
pwrc_ee_is_powered_off),
+ [PWRC_G12A_ISP_ID] = TOP_PD("ISP", &g12a_pwrc_isp, g12a_pwrc_mem_isp,
+ pwrc_ee_is_powered_off),
};
static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
diff --git a/drivers/pmdomain/arm/Kconfig b/drivers/pmdomain/arm/Kconfig
new file mode 100644
index 000000000..efa139c34
--- /dev/null
+++ b/drivers/pmdomain/arm/Kconfig
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARM_SCMI_PERF_DOMAIN
+ tristate "SCMI performance domain driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCMI performance domains which can be
+ enabled or disabled via the SCP firmware.
+
+ This driver can also be built as a module. If so, the module will be
+ called scmi_perf_domain.
+
+config ARM_SCMI_POWER_DOMAIN
+ tristate "SCMI power domain driver"
+ depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCMI power domains which can be
+ enabled or disabled via the SCP firmware
+
+ This driver can also be built as a module. If so, the module
+ will be called scmi_pm_domain. Note this may needed early in boot
+ before rootfs may be available.
+
+config ARM_SCPI_POWER_DOMAIN
+ tristate "SCPI power domain driver"
+ depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCPI power domains which can be
+ enabled or disabled via the SCP firmware
+
+ This driver can also be built as a module. If so, the module will be
+ called scpi_pm_domain.
diff --git a/drivers/pmdomain/arm/Makefile b/drivers/pmdomain/arm/Makefile
index cfcb1f6cd..502fe4d0a 100644
--- a/drivers/pmdomain/arm/Makefile
+++ b/drivers/pmdomain/arm/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_ARM_SCMI_PERF_DOMAIN) += scmi_perf_domain.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
+obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/pmdomain/arm/scpi_pm_domain.c
index 2231e6dd2..2231e6dd2 100644
--- a/drivers/firmware/scpi_pm_domain.c
+++ b/drivers/pmdomain/arm/scpi_pm_domain.c
diff --git a/drivers/base/power/domain.c b/drivers/pmdomain/core.c
index 011948f02..18e232b5e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/pmdomain/core.c
@@ -23,8 +23,6 @@
#include <linux/cpu.h>
#include <linux/debugfs.h>
-#include "power.h"
-
#define GENPD_RETRY_MAX_MS 250 /* Approximate */
#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
@@ -2670,7 +2668,7 @@ static void genpd_release_dev(struct device *dev)
kfree(dev);
}
-static struct bus_type genpd_bus_type = {
+static const struct bus_type genpd_bus_type = {
.name = "genpd",
};
@@ -3042,38 +3040,6 @@ int of_genpd_parse_idle_states(struct device_node *dn,
}
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
-/**
- * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
- *
- * @genpd_dev: Genpd's device for which the performance-state needs to be found.
- * @opp: struct dev_pm_opp of the OPP for which we need to find performance
- * state.
- *
- * Returns performance state encoded in the OPP of the genpd. This calls
- * platform specific genpd->opp_to_performance_state() callback to translate
- * power domain OPP to performance state.
- *
- * Returns performance state on success and 0 on failure.
- */
-unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp)
-{
- struct generic_pm_domain *genpd = NULL;
- int state;
-
- genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
-
- if (unlikely(!genpd->opp_to_performance_state))
- return 0;
-
- genpd_lock(genpd);
- state = genpd->opp_to_performance_state(genpd, opp);
- genpd_unlock(genpd);
-
- return state;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
-
static int __init genpd_bus_init(void)
{
return bus_register(&genpd_bus_type);
diff --git a/drivers/base/power/domain_governor.c b/drivers/pmdomain/governor.c
index cc2c3a5a6..d1a10eeeb 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/pmdomain/governor.c
@@ -49,6 +49,8 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
/**
* default_suspend_ok - Default PM domain governor routine to suspend devices.
* @dev: Device to check.
+ *
+ * Returns: true if OK to suspend, false if not OK to suspend
*/
static bool default_suspend_ok(struct device *dev)
{
@@ -261,6 +263,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
* @now: current ktime.
*
* This routine must be executed under the PM domain's lock.
+ *
+ * Returns: true if OK to power down, false if not OK to power down
*/
static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
{
@@ -406,8 +410,8 @@ struct dev_power_governor simple_qos_governor = {
.power_down_ok = default_power_down_ok,
};
-/**
- * pm_genpd_gov_always_on - A governor implementing an always-on policy
+/*
+ * pm_domain_always_on_gov - A governor implementing an always-on policy
*/
struct dev_power_governor pm_domain_always_on_gov = {
.suspend_ok = default_suspend_ok,
diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
index 7d81e3171..9517cce93 100644
--- a/drivers/pmdomain/imx/gpc.c
+++ b/drivers/pmdomain/imx/gpc.c
@@ -212,7 +212,7 @@ genpd_err:
return ret;
}
-static int imx_pgc_power_domain_remove(struct platform_device *pdev)
+static void imx_pgc_power_domain_remove(struct platform_device *pdev)
{
struct imx_pm_domain *domain = pdev->dev.platform_data;
@@ -221,8 +221,6 @@ static int imx_pgc_power_domain_remove(struct platform_device *pdev)
pm_genpd_remove(&domain->base);
imx_pgc_put_clocks(domain);
}
-
- return 0;
}
static const struct platform_device_id imx_pgc_power_domain_id[] = {
@@ -235,7 +233,7 @@ static struct platform_driver imx_pgc_power_domain_driver = {
.name = "imx-pgc-pd",
},
.probe = imx_pgc_power_domain_probe,
- .remove = imx_pgc_power_domain_remove,
+ .remove_new = imx_pgc_power_domain_remove,
.id_table = imx_pgc_power_domain_id,
};
builtin_platform_driver(imx_pgc_power_domain_driver)
@@ -511,7 +509,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
return 0;
}
-static int imx_gpc_remove(struct platform_device *pdev)
+static void imx_gpc_remove(struct platform_device *pdev)
{
struct device_node *pgc_node;
int ret;
@@ -521,7 +519,7 @@ static int imx_gpc_remove(struct platform_device *pdev)
/* bail out if DT too old and doesn't provide the necessary info */
if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
!pgc_node)
- return 0;
+ return;
/*
* If the old DT binding is used the toplevel driver needs to
@@ -531,16 +529,20 @@ static int imx_gpc_remove(struct platform_device *pdev)
of_genpd_del_provider(pdev->dev.of_node);
ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
- if (ret)
- return ret;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to remove PU power domain (%pe)\n",
+ ERR_PTR(ret));
+ return;
+ }
imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]);
ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_ARM].base);
- if (ret)
- return ret;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to remove ARM power domain (%pe)\n",
+ ERR_PTR(ret));
+ return;
+ }
}
-
- return 0;
}
static struct platform_driver imx_gpc_driver = {
@@ -549,6 +551,6 @@ static struct platform_driver imx_gpc_driver = {
.of_match_table = imx_gpc_dt_ids,
},
.probe = imx_gpc_probe,
- .remove = imx_gpc_remove,
+ .remove_new = imx_gpc_remove,
};
builtin_platform_driver(imx_gpc_driver)
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index fbd3d92f8..4b828d74a 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1373,7 +1373,7 @@ out_domain_unmap:
return ret;
}
-static int imx_pgc_domain_remove(struct platform_device *pdev)
+static void imx_pgc_domain_remove(struct platform_device *pdev)
{
struct imx_pgc_domain *domain = pdev->dev.platform_data;
@@ -1385,8 +1385,6 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
domain->bits.map, 0);
pm_runtime_disable(domain->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1430,7 +1428,7 @@ static struct platform_driver imx_pgc_domain_driver = {
.pm = &imx_pgc_domain_pm_ops,
},
.probe = imx_pgc_domain_probe,
- .remove = imx_pgc_domain_remove,
+ .remove_new = imx_pgc_domain_remove,
.id_table = imx_pgc_domain_id,
};
builtin_platform_driver(imx_pgc_domain_driver)
diff --git a/drivers/pmdomain/imx/imx8m-blk-ctrl.c b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
index cc5ef6e2f..1341a707f 100644
--- a/drivers/pmdomain/imx/imx8m-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8m-blk-ctrl.c
@@ -330,7 +330,7 @@ cleanup_pds:
return ret;
}
-static int imx8m_blk_ctrl_remove(struct platform_device *pdev)
+static void imx8m_blk_ctrl_remove(struct platform_device *pdev)
{
struct imx8m_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
int i;
@@ -347,8 +347,6 @@ static int imx8m_blk_ctrl_remove(struct platform_device *pdev)
dev_pm_genpd_remove_notifier(bc->bus_power_dev);
dev_pm_domain_detach(bc->bus_power_dev, true);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -888,7 +886,7 @@ MODULE_DEVICE_TABLE(of, imx8m_blk_ctrl_of_match);
static struct platform_driver imx8m_blk_ctrl_driver = {
.probe = imx8m_blk_ctrl_probe,
- .remove = imx8m_blk_ctrl_remove,
+ .remove_new = imx8m_blk_ctrl_remove,
.driver = {
.name = "imx8m-blk-ctrl",
.pm = &imx8m_blk_ctrl_pm_ops,
diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index c6ac32c1a..a56f7f92d 100644
--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
@@ -55,7 +55,7 @@ struct imx8mp_blk_ctrl_domain_data {
const char *gpc_name;
};
-#define DOMAIN_MAX_CLKS 2
+#define DOMAIN_MAX_CLKS 3
#define DOMAIN_MAX_PATHS 3
struct imx8mp_blk_ctrl_domain {
@@ -457,8 +457,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
},
[IMX8MP_HDMIBLK_PD_LCDIF] = {
.name = "hdmiblk-lcdif",
- .clk_names = (const char *[]){ "axi", "apb" },
- .num_clks = 2,
+ .clk_names = (const char *[]){ "axi", "apb", "fdcc" },
+ .num_clks = 3,
.gpc_name = "lcdif",
.path_names = (const char *[]){"lcdif-hdmi"},
.num_paths = 1,
@@ -483,8 +483,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
},
[IMX8MP_HDMIBLK_PD_HDMI_TX] = {
.name = "hdmiblk-hdmi-tx",
- .clk_names = (const char *[]){ "apb", "ref_266m" },
- .num_clks = 2,
+ .clk_names = (const char *[]){ "apb", "ref_266m", "fdcc" },
+ .num_clks = 3,
.gpc_name = "hdmi-tx",
},
[IMX8MP_HDMIBLK_PD_HDMI_TX_PHY] = {
@@ -760,7 +760,7 @@ cleanup_pds:
return ret;
}
-static int imx8mp_blk_ctrl_remove(struct platform_device *pdev)
+static void imx8mp_blk_ctrl_remove(struct platform_device *pdev)
{
struct imx8mp_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
int i;
@@ -777,8 +777,6 @@ static int imx8mp_blk_ctrl_remove(struct platform_device *pdev)
dev_pm_genpd_remove_notifier(bc->bus_power_dev);
dev_pm_domain_detach(bc->bus_power_dev, true);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -856,7 +854,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_blk_ctrl_of_match);
static struct platform_driver imx8mp_blk_ctrl_driver = {
.probe = imx8mp_blk_ctrl_probe,
- .remove = imx8mp_blk_ctrl_remove,
+ .remove_new = imx8mp_blk_ctrl_remove,
.driver = {
.name = "imx8mp-blk-ctrl",
.pm = &imx8mp_blk_ctrl_pm_ops,
diff --git a/drivers/pmdomain/imx/imx93-blk-ctrl.c b/drivers/pmdomain/imx/imx93-blk-ctrl.c
index 40bd90f8b..904ffa55b 100644
--- a/drivers/pmdomain/imx/imx93-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx93-blk-ctrl.c
@@ -306,7 +306,7 @@ cleanup_pds:
return ret;
}
-static int imx93_blk_ctrl_remove(struct platform_device *pdev)
+static void imx93_blk_ctrl_remove(struct platform_device *pdev)
{
struct imx93_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
int i;
@@ -318,8 +318,6 @@ static int imx93_blk_ctrl_remove(struct platform_device *pdev)
pm_genpd_remove(&domain->genpd);
}
-
- return 0;
}
static const struct imx93_blk_ctrl_domain_data imx93_media_blk_ctl_domain_data[] = {
@@ -438,7 +436,7 @@ MODULE_DEVICE_TABLE(of, imx93_blk_ctrl_of_match);
static struct platform_driver imx93_blk_ctrl_driver = {
.probe = imx93_blk_ctrl_probe,
- .remove = imx93_blk_ctrl_remove,
+ .remove_new = imx93_blk_ctrl_remove,
.driver = {
.name = "imx93-blk-ctrl",
.of_match_table = imx93_blk_ctrl_of_match,
diff --git a/drivers/pmdomain/imx/imx93-pd.c b/drivers/pmdomain/imx/imx93-pd.c
index b9e60d136..1e94b499c 100644
--- a/drivers/pmdomain/imx/imx93-pd.c
+++ b/drivers/pmdomain/imx/imx93-pd.c
@@ -83,7 +83,7 @@ static int imx93_pd_off(struct generic_pm_domain *genpd)
return 0;
};
-static int imx93_pd_remove(struct platform_device *pdev)
+static void imx93_pd_remove(struct platform_device *pdev)
{
struct imx93_power_domain *domain = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -94,8 +94,6 @@ static int imx93_pd_remove(struct platform_device *pdev)
of_genpd_del_provider(np);
pm_genpd_remove(&domain->genpd);
-
- return 0;
}
static int imx93_pd_probe(struct platform_device *pdev)
@@ -167,7 +165,7 @@ static struct platform_driver imx93_power_domain_driver = {
.of_match_table = imx93_pd_ids,
},
.probe = imx93_pd_probe,
- .remove = imx93_pd_remove,
+ .remove_new = imx93_pd_remove,
};
module_platform_driver(imx93_power_domain_driver);
diff --git a/drivers/pmdomain/qcom/cpr.c b/drivers/pmdomain/qcom/cpr.c
index e9dd42bde..c64e84a27 100644
--- a/drivers/pmdomain/qcom/cpr.c
+++ b/drivers/pmdomain/qcom/cpr.c
@@ -1712,7 +1712,7 @@ err_remove_genpd:
return ret;
}
-static int cpr_remove(struct platform_device *pdev)
+static void cpr_remove(struct platform_device *pdev)
{
struct cpr_drv *drv = platform_get_drvdata(pdev);
@@ -1725,8 +1725,6 @@ static int cpr_remove(struct platform_device *pdev)
pm_genpd_remove(&drv->pd);
debugfs_remove_recursive(drv->debugfs);
-
- return 0;
}
static const struct of_device_id cpr_match_table[] = {
@@ -1737,7 +1735,7 @@ MODULE_DEVICE_TABLE(of, cpr_match_table);
static struct platform_driver cpr_driver = {
.probe = cpr_probe,
- .remove = cpr_remove,
+ .remove_new = cpr_remove,
.driver = {
.name = "qcom-cpr",
.of_match_table = cpr_match_table,
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index e4d5afab7..de9121ef4 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -597,8 +597,8 @@ static const struct rpmhpd_desc sc8280xp_desc = {
.num_pds = ARRAY_SIZE(sc8280xp_rpmhpds),
};
-/* SC8380xp RPMH powerdomains */
-static struct rpmhpd *sc8380xp_rpmhpds[] = {
+/* X1E80100 RPMH powerdomains */
+static struct rpmhpd *x1e80100_rpmhpds[] = {
[RPMHPD_CX] = &cx,
[RPMHPD_CX_AO] = &cx_ao,
[RPMHPD_EBI] = &ebi,
@@ -614,9 +614,9 @@ static struct rpmhpd *sc8380xp_rpmhpds[] = {
[RPMHPD_GMXC] = &gmxc,
};
-static const struct rpmhpd_desc sc8380xp_desc = {
- .rpmhpds = sc8380xp_rpmhpds,
- .num_pds = ARRAY_SIZE(sc8380xp_rpmhpds),
+static const struct rpmhpd_desc x1e80100_desc = {
+ .rpmhpds = x1e80100_rpmhpds,
+ .num_pds = ARRAY_SIZE(x1e80100_rpmhpds),
};
static const struct of_device_id rpmhpd_match_table[] = {
@@ -628,7 +628,6 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7280-rpmhpd", .data = &sc7280_desc },
{ .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
{ .compatible = "qcom,sc8280xp-rpmhpd", .data = &sc8280xp_desc },
- { .compatible = "qcom,sc8380xp-rpmhpd", .data = &sc8380xp_desc },
{ .compatible = "qcom,sdm670-rpmhpd", .data = &sdm670_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
@@ -642,6 +641,7 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc },
{ .compatible = "qcom,sm8550-rpmhpd", .data = &sm8550_desc },
{ .compatible = "qcom,sm8650-rpmhpd", .data = &sm8650_desc },
+ { .compatible = "qcom,x1e80100-rpmhpd", .data = &x1e80100_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
diff --git a/drivers/pmdomain/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c
index c2feae3a6..b8ceb3c2b 100644
--- a/drivers/pmdomain/ti/omap_prm.c
+++ b/drivers/pmdomain/ti/omap_prm.c
@@ -695,6 +695,8 @@ static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
data = prm->data;
name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
data->name);
+ if (!name)
+ return -ENOMEM;
prmd->dev = dev;
prmd->prm = prm;
diff --git a/drivers/pmdomain/xilinx/zynqmp-pm-domains.c b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
index 69d03ad4c..6fd514286 100644
--- a/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
+++ b/drivers/pmdomain/xilinx/zynqmp-pm-domains.c
@@ -293,11 +293,9 @@ static int zynqmp_gpd_probe(struct platform_device *pdev)
return 0;
}
-static int zynqmp_gpd_remove(struct platform_device *pdev)
+static void zynqmp_gpd_remove(struct platform_device *pdev)
{
of_genpd_del_provider(pdev->dev.parent->of_node);
-
- return 0;
}
static void zynqmp_gpd_sync_state(struct device *dev)
@@ -315,7 +313,7 @@ static struct platform_driver zynqmp_power_domain_driver = {
.sync_state = zynqmp_gpd_sync_state,
},
.probe = zynqmp_gpd_probe,
- .remove = zynqmp_gpd_remove,
+ .remove_new = zynqmp_gpd_remove,
};
module_platform_driver(zynqmp_power_domain_driver);
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 46c534f6b..0a5d0d8be 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -256,7 +256,7 @@ static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
.restore = pnp_bus_resume,
};
-struct bus_type pnp_bus_type = {
+const struct bus_type pnp_bus_type = {
.name = "pnp",
.match = pnp_bus_match,
.probe = pnp_device_probe,
diff --git a/drivers/pnp/isapnp/Kconfig b/drivers/pnp/isapnp/Kconfig
index d0479a563..8b5f2e461 100644
--- a/drivers/pnp/isapnp/Kconfig
+++ b/drivers/pnp/isapnp/Kconfig
@@ -7,6 +7,6 @@ config ISAPNP
depends on ISA || COMPILE_TEST
help
Say Y here if you would like support for ISA Plug and Play devices.
- Some information is in <file:Documentation/driver-api/isapnp.rst>.
+ Some information is in <file:Documentation/userspace-api/isapnp.rst>.
If unsure, say Y.
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index ddc6f2163..1f31dce58 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -60,7 +60,7 @@ do { \
set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
} while(0)
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
+static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(DESC_DATA32_BIOS,
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
/*
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index 829e0dba2..ab3350ce2 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -61,13 +61,11 @@ static int as3722_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int as3722_poweroff_remove(struct platform_device *pdev)
+static void as3722_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == as3722_pm_power_off)
pm_power_off = NULL;
as3722_pm_poweroff = NULL;
-
- return 0;
}
static struct platform_driver as3722_poweroff_driver = {
@@ -75,7 +73,7 @@ static struct platform_driver as3722_poweroff_driver = {
.name = "as3722-power-off",
},
.probe = as3722_poweroff_probe,
- .remove = as3722_poweroff_remove,
+ .remove_new = as3722_poweroff_remove,
};
module_platform_driver(as3722_poweroff_driver);
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index dd5399785..93eece027 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -57,7 +57,7 @@ static struct shdwc {
void __iomem *mpddrc_base;
} at91_shdwc;
-static void __init at91_wakeup_status(struct platform_device *pdev)
+static void at91_wakeup_status(struct platform_device *pdev)
{
const char *reason;
u32 reg = readl(at91_shdwc.shdwc_base + AT91_SHDW_SR);
@@ -149,7 +149,7 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
writel(wakeup_mode | mode, at91_shdwc.shdwc_base + AT91_SHDW_MR);
}
-static int __init at91_poweroff_probe(struct platform_device *pdev)
+static int at91_poweroff_probe(struct platform_device *pdev)
{
struct device_node *np;
u32 ddr_type;
@@ -202,7 +202,7 @@ clk_disable:
return ret;
}
-static int __exit at91_poweroff_remove(struct platform_device *pdev)
+static void at91_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == at91_poweroff)
pm_power_off = NULL;
@@ -211,8 +211,6 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev)
iounmap(at91_shdwc.mpddrc_base);
clk_disable_unprepare(at91_shdwc.sclk);
-
- return 0;
}
static const struct of_device_id at91_poweroff_of_match[] = {
@@ -224,13 +222,14 @@ static const struct of_device_id at91_poweroff_of_match[] = {
MODULE_DEVICE_TABLE(of, at91_poweroff_of_match);
static struct platform_driver at91_poweroff_driver = {
- .remove = __exit_p(at91_poweroff_remove),
+ .probe = at91_poweroff_probe,
+ .remove_new = at91_poweroff_remove,
.driver = {
.name = "at91-poweroff",
.of_match_table = at91_poweroff_of_match,
},
};
-module_platform_driver_probe(at91_poweroff_driver, at91_poweroff_probe);
+module_platform_driver(at91_poweroff_driver);
MODULE_AUTHOR("Atmel Corporation");
MODULE_DESCRIPTION("Shutdown driver for Atmel SoCs");
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index aa9b012d3..165126542 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -337,7 +337,7 @@ static int at91_rcdev_init(struct at91_reset *reset,
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}
-static int __init at91_reset_probe(struct platform_device *pdev)
+static int at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct at91_reset *reset;
@@ -417,24 +417,23 @@ disable_clk:
return ret;
}
-static int __exit at91_reset_remove(struct platform_device *pdev)
+static void at91_reset_remove(struct platform_device *pdev)
{
struct at91_reset *reset = platform_get_drvdata(pdev);
unregister_restart_handler(&reset->nb);
clk_disable_unprepare(reset->sclk);
-
- return 0;
}
static struct platform_driver at91_reset_driver = {
- .remove = __exit_p(at91_reset_remove),
+ .probe = at91_reset_probe,
+ .remove_new = at91_reset_remove,
.driver = {
.name = "at91-reset",
.of_match_table = at91_reset_of_match,
},
};
-module_platform_driver_probe(at91_reset_driver, at91_reset_probe);
+module_platform_driver(at91_reset_driver);
MODULE_AUTHOR("Atmel Corporation");
MODULE_DESCRIPTION("Reset driver for Atmel SoCs");
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index e76b102b5..959ce0dbe 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -107,7 +107,7 @@ static const unsigned long long sdwc_dbc_period[] = {
0, 3, 32, 512, 4096, 32768,
};
-static void __init at91_wakeup_status(struct platform_device *pdev)
+static void at91_wakeup_status(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
const struct reg_config *rcfg = shdw->rcfg;
@@ -329,7 +329,7 @@ static const struct of_device_id at91_pmc_ids[] = {
{ /* Sentinel. */ }
};
-static int __init at91_shdwc_probe(struct platform_device *pdev)
+static int at91_shdwc_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct device_node *np;
@@ -421,7 +421,7 @@ clk_disable:
return ret;
}
-static int __exit at91_shdwc_remove(struct platform_device *pdev)
+static void at91_shdwc_remove(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
@@ -437,18 +437,17 @@ static int __exit at91_shdwc_remove(struct platform_device *pdev)
iounmap(shdw->pmc_base);
clk_disable_unprepare(shdw->sclk);
-
- return 0;
}
static struct platform_driver at91_shdwc_driver = {
- .remove = __exit_p(at91_shdwc_remove),
+ .probe = at91_shdwc_probe,
+ .remove_new = at91_shdwc_remove,
.driver = {
.name = "at91-shdwc",
.of_match_table = at91_shdwc_of_match,
},
};
-module_platform_driver_probe(at91_shdwc_driver, at91_shdwc_probe);
+module_platform_driver(at91_shdwc_driver);
MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
MODULE_DESCRIPTION("Atmel shutdown controller driver");
diff --git a/drivers/power/reset/atc260x-poweroff.c b/drivers/power/reset/atc260x-poweroff.c
index 98f20251a..b4aa50e96 100644
--- a/drivers/power/reset/atc260x-poweroff.c
+++ b/drivers/power/reset/atc260x-poweroff.c
@@ -233,7 +233,7 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
return ret;
}
-static int atc260x_pwrc_remove(struct platform_device *pdev)
+static void atc260x_pwrc_remove(struct platform_device *pdev)
{
struct atc260x_pwrc *priv = platform_get_drvdata(pdev);
@@ -243,13 +243,11 @@ static int atc260x_pwrc_remove(struct platform_device *pdev)
}
unregister_restart_handler(&priv->restart_nb);
-
- return 0;
}
static struct platform_driver atc260x_pwrc_driver = {
.probe = atc260x_pwrc_probe,
- .remove = atc260x_pwrc_remove,
+ .remove_new = atc260x_pwrc_remove,
.driver = {
.name = "atc260x-pwrc",
},
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index 3aa197657..d1e177176 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -17,17 +17,14 @@
struct gpio_restart {
struct gpio_desc *reset_gpio;
- struct notifier_block restart_handler;
u32 active_delay_ms;
u32 inactive_delay_ms;
u32 wait_delay_ms;
};
-static int gpio_restart_notify(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int gpio_restart_notify(struct sys_off_data *data)
{
- struct gpio_restart *gpio_restart =
- container_of(this, struct gpio_restart, restart_handler);
+ struct gpio_restart *gpio_restart = data->cb_data;
/* drive it active, also inactive->active edge */
gpiod_direction_output(gpio_restart->reset_gpio, 1);
@@ -52,6 +49,7 @@ static int gpio_restart_probe(struct platform_device *pdev)
{
struct gpio_restart *gpio_restart;
bool open_source = false;
+ int priority = 129;
u32 property;
int ret;
@@ -71,8 +69,6 @@ static int gpio_restart_probe(struct platform_device *pdev)
return ret;
}
- gpio_restart->restart_handler.notifier_call = gpio_restart_notify;
- gpio_restart->restart_handler.priority = 129;
gpio_restart->active_delay_ms = 100;
gpio_restart->inactive_delay_ms = 100;
gpio_restart->wait_delay_ms = 3000;
@@ -83,7 +79,7 @@ static int gpio_restart_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Invalid priority property: %u\n",
property);
else
- gpio_restart->restart_handler.priority = property;
+ priority = property;
}
of_property_read_u32(pdev->dev.of_node, "active-delay",
@@ -93,9 +89,11 @@ static int gpio_restart_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "wait-delay",
&gpio_restart->wait_delay_ms);
- platform_set_drvdata(pdev, gpio_restart);
-
- ret = register_restart_handler(&gpio_restart->restart_handler);
+ ret = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ priority,
+ gpio_restart_notify,
+ gpio_restart);
if (ret) {
dev_err(&pdev->dev, "%s: cannot register restart handler, %d\n",
__func__, ret);
@@ -105,19 +103,6 @@ static int gpio_restart_probe(struct platform_device *pdev)
return 0;
}
-static void gpio_restart_remove(struct platform_device *pdev)
-{
- struct gpio_restart *gpio_restart = platform_get_drvdata(pdev);
- int ret;
-
- ret = unregister_restart_handler(&gpio_restart->restart_handler);
- if (ret) {
- dev_err(&pdev->dev,
- "%s: cannot unregister restart handler, %d\n",
- __func__, ret);
- }
-}
-
static const struct of_device_id of_gpio_restart_match[] = {
{ .compatible = "gpio-restart", },
{},
@@ -125,7 +110,6 @@ static const struct of_device_id of_gpio_restart_match[] = {
static struct platform_driver gpio_restart_driver = {
.probe = gpio_restart_probe,
- .remove_new = gpio_restart_remove,
.driver = {
.name = "restart-gpio",
.of_match_table = of_gpio_restart_match,
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index eea05921a..fa25fbd53 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -286,7 +286,7 @@ static int ltc2952_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int ltc2952_poweroff_remove(struct platform_device *pdev)
+static void ltc2952_poweroff_remove(struct platform_device *pdev)
{
struct ltc2952_poweroff *data = platform_get_drvdata(pdev);
@@ -295,7 +295,6 @@ static int ltc2952_poweroff_remove(struct platform_device *pdev)
hrtimer_cancel(&data->timer_wde);
atomic_notifier_chain_unregister(&panic_notifier_list,
&data->panic_notifier);
- return 0;
}
static const struct of_device_id of_ltc2952_poweroff_match[] = {
@@ -306,7 +305,7 @@ MODULE_DEVICE_TABLE(of, of_ltc2952_poweroff_match);
static struct platform_driver ltc2952_poweroff_driver = {
.probe = ltc2952_poweroff_probe,
- .remove = ltc2952_poweroff_remove,
+ .remove_new = ltc2952_poweroff_remove,
.driver = {
.name = "ltc2952-poweroff",
.of_match_table = of_ltc2952_poweroff_match,
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
index 108167f77..57a63c0ab 100644
--- a/drivers/power/reset/mt6323-poweroff.c
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -70,12 +70,10 @@ static int mt6323_pwrc_probe(struct platform_device *pdev)
return 0;
}
-static int mt6323_pwrc_remove(struct platform_device *pdev)
+static void mt6323_pwrc_remove(struct platform_device *pdev)
{
if (pm_power_off == &mt6323_do_pwroff)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id mt6323_pwrc_dt_match[] = {
@@ -86,7 +84,7 @@ MODULE_DEVICE_TABLE(of, mt6323_pwrc_dt_match);
static struct platform_driver mt6323_pwrc_driver = {
.probe = mt6323_pwrc_probe,
- .remove = mt6323_pwrc_remove,
+ .remove_new = mt6323_pwrc_remove,
.driver = {
.name = "mt6323-pwrc",
.of_match_table = mt6323_pwrc_dt_match,
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
index de35d24bb..1775b318d 100644
--- a/drivers/power/reset/pwr-mlxbf.c
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -17,11 +17,17 @@
#include <linux/types.h>
struct pwr_mlxbf {
- struct work_struct send_work;
+ struct work_struct reboot_work;
+ struct work_struct shutdown_work;
const char *hid;
};
-static void pwr_mlxbf_send_work(struct work_struct *work)
+static void pwr_mlxbf_reboot_work(struct work_struct *work)
+{
+ acpi_bus_generate_netlink_event("button/reboot.*", "Reboot Button", 0x80, 1);
+}
+
+static void pwr_mlxbf_shutdown_work(struct work_struct *work)
{
acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1);
}
@@ -33,10 +39,10 @@ static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr)
struct pwr_mlxbf *priv = ptr;
if (!strncmp(priv->hid, rst_pwr_hid, 8))
- emergency_restart();
+ schedule_work(&priv->reboot_work);
if (!strncmp(priv->hid, low_pwr_hid, 8))
- schedule_work(&priv->send_work);
+ schedule_work(&priv->shutdown_work);
return IRQ_HANDLED;
}
@@ -64,7 +70,11 @@ static int pwr_mlxbf_probe(struct platform_device *pdev)
if (irq < 0)
return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid);
- err = devm_work_autocancel(dev, &priv->send_work, pwr_mlxbf_send_work);
+ err = devm_work_autocancel(dev, &priv->shutdown_work, pwr_mlxbf_shutdown_work);
+ if (err)
+ return err;
+
+ err = devm_work_autocancel(dev, &priv->reboot_work, pwr_mlxbf_reboot_work);
if (err)
return err;
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
index 0ddf7f25f..e0f2ff6b1 100644
--- a/drivers/power/reset/qnap-poweroff.c
+++ b/drivers/power/reset/qnap-poweroff.c
@@ -111,15 +111,14 @@ static int qnap_power_off_probe(struct platform_device *pdev)
return 0;
}
-static int qnap_power_off_remove(struct platform_device *pdev)
+static void qnap_power_off_remove(struct platform_device *pdev)
{
pm_power_off = NULL;
- return 0;
}
static struct platform_driver qnap_power_off_driver = {
.probe = qnap_power_off_probe,
- .remove = qnap_power_off_remove,
+ .remove_new = qnap_power_off_remove,
.driver = {
.name = "qnap_power_off",
.of_match_table = of_match_ptr(qnap_power_off_of_match_table),
diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
index 7f87fbb8b..15160809c 100644
--- a/drivers/power/reset/regulator-poweroff.c
+++ b/drivers/power/reset/regulator-poweroff.c
@@ -52,12 +52,10 @@ static int regulator_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int regulator_poweroff_remove(__maybe_unused struct platform_device *pdev)
+static void regulator_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == &regulator_poweroff_do_poweroff)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id of_regulator_poweroff_match[] = {
@@ -68,7 +66,7 @@ MODULE_DEVICE_TABLE(of, of_regulator_poweroff_match);
static struct platform_driver regulator_poweroff_driver = {
.probe = regulator_poweroff_probe,
- .remove = regulator_poweroff_remove,
+ .remove_new = regulator_poweroff_remove,
.driver = {
.name = "poweroff-regulator",
.of_match_table = of_regulator_poweroff_match,
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index 28f1822db..f4d600479 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -33,12 +33,10 @@ static int restart_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int restart_poweroff_remove(struct platform_device *pdev)
+static void restart_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == &restart_poweroff_do_poweroff)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id of_restart_poweroff_match[] = {
@@ -49,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_restart_poweroff_match);
static struct platform_driver restart_poweroff_driver = {
.probe = restart_poweroff_probe,
- .remove = restart_poweroff_remove,
+ .remove_new = restart_poweroff_remove,
.driver = {
.name = "poweroff-restart",
.of_match_table = of_restart_poweroff_match,
diff --git a/drivers/power/reset/rmobile-reset.c b/drivers/power/reset/rmobile-reset.c
index bd3b39655..5df9b41c6 100644
--- a/drivers/power/reset/rmobile-reset.c
+++ b/drivers/power/reset/rmobile-reset.c
@@ -59,11 +59,10 @@ fail_unmap:
return error;
}
-static int rmobile_reset_remove(struct platform_device *pdev)
+static void rmobile_reset_remove(struct platform_device *pdev)
{
unregister_restart_handler(&rmobile_reset_nb);
iounmap(sysc_base2);
- return 0;
}
static const struct of_device_id rmobile_reset_of_match[] = {
@@ -74,7 +73,7 @@ MODULE_DEVICE_TABLE(of, rmobile_reset_of_match);
static struct platform_driver rmobile_reset_driver = {
.probe = rmobile_reset_probe,
- .remove = rmobile_reset_remove,
+ .remove_new = rmobile_reset_remove,
.driver = {
.name = "rmobile_reset",
.of_match_table = rmobile_reset_of_match,
diff --git a/drivers/power/reset/syscon-poweroff.c b/drivers/power/reset/syscon-poweroff.c
index c3aab7f59..1b2ce7734 100644
--- a/drivers/power/reset/syscon-poweroff.c
+++ b/drivers/power/reset/syscon-poweroff.c
@@ -76,12 +76,10 @@ static int syscon_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int syscon_poweroff_remove(struct platform_device *pdev)
+static void syscon_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == syscon_poweroff)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id syscon_poweroff_of_match[] = {
@@ -91,7 +89,7 @@ static const struct of_device_id syscon_poweroff_of_match[] = {
static struct platform_driver syscon_poweroff_driver = {
.probe = syscon_poweroff_probe,
- .remove = syscon_poweroff_remove,
+ .remove_new = syscon_poweroff_remove,
.driver = {
.name = "syscon-poweroff",
.of_match_table = syscon_poweroff_of_match,
diff --git a/drivers/power/reset/tps65086-restart.c b/drivers/power/reset/tps65086-restart.c
index 5ec819eac..ee8e9f4b8 100644
--- a/drivers/power/reset/tps65086-restart.c
+++ b/drivers/power/reset/tps65086-restart.c
@@ -62,19 +62,21 @@ static int tps65086_restart_probe(struct platform_device *pdev)
return 0;
}
-static int tps65086_restart_remove(struct platform_device *pdev)
+static void tps65086_restart_remove(struct platform_device *pdev)
{
struct tps65086_restart *tps65086_restart = platform_get_drvdata(pdev);
int ret;
ret = unregister_restart_handler(&tps65086_restart->handler);
if (ret) {
+ /*
+ * tps65086_restart_probe() registered the restart handler. So
+ * unregistering should work fine. Checking the error code
+ * shouldn't be needed, still doing it for completeness.
+ */
dev_err(&pdev->dev, "%s: cannot unregister restart handler: %d\n",
__func__, ret);
- return -ENODEV;
}
-
- return 0;
}
static const struct platform_device_id tps65086_restart_id_table[] = {
@@ -88,7 +90,7 @@ static struct platform_driver tps65086_restart_driver = {
.name = "tps65086-restart",
},
.probe = tps65086_restart_probe,
- .remove = tps65086_restart_remove,
+ .remove_new = tps65086_restart_remove,
.id_table = tps65086_restart_id_table,
};
module_platform_driver(tps65086_restart_driver);
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 1db290ee2..2b393eb5c 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -36,10 +36,16 @@
#define BQ24190_REG_POC_WDT_RESET_SHIFT 6
#define BQ24190_REG_POC_CHG_CONFIG_MASK (BIT(5) | BIT(4))
#define BQ24190_REG_POC_CHG_CONFIG_SHIFT 4
-#define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0
-#define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1
-#define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2
-#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3
+#define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0
+#define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1
+#define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2
+#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3
+#define BQ24296_REG_POC_OTG_CONFIG_MASK BIT(5)
+#define BQ24296_REG_POC_OTG_CONFIG_SHIFT 5
+#define BQ24296_REG_POC_CHG_CONFIG_MASK BIT(4)
+#define BQ24296_REG_POC_CHG_CONFIG_SHIFT 4
+#define BQ24296_REG_POC_OTG_CONFIG_DISABLE 0x0
+#define BQ24296_REG_POC_OTG_CONFIG_OTG 0x1
#define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1))
#define BQ24190_REG_POC_SYS_MIN_SHIFT 1
#define BQ24190_REG_POC_SYS_MIN_MIN 3000
@@ -134,59 +140,24 @@
#define BQ24190_REG_F_BAT_FAULT_SHIFT 3
#define BQ24190_REG_F_NTC_FAULT_MASK (BIT(2) | BIT(1) | BIT(0))
#define BQ24190_REG_F_NTC_FAULT_SHIFT 0
+#define BQ24296_REG_F_NTC_FAULT_MASK (BIT(1) | BIT(0))
+#define BQ24296_REG_F_NTC_FAULT_SHIFT 0
#define BQ24190_REG_VPRS 0x0A /* Vendor/Part/Revision Status */
#define BQ24190_REG_VPRS_PN_MASK (BIT(5) | BIT(4) | BIT(3))
#define BQ24190_REG_VPRS_PN_SHIFT 3
-#define BQ24190_REG_VPRS_PN_24190 0x4
-#define BQ24190_REG_VPRS_PN_24192 0x5 /* Also 24193, 24196 */
-#define BQ24190_REG_VPRS_PN_24192I 0x3
+#define BQ24190_REG_VPRS_PN_24190 0x4
+#define BQ24190_REG_VPRS_PN_24192 0x5 /* Also 24193, 24196 */
+#define BQ24190_REG_VPRS_PN_24192I 0x3
+#define BQ24296_REG_VPRS_PN_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BQ24296_REG_VPRS_PN_SHIFT 5
+#define BQ24296_REG_VPRS_PN_24296 0x1
#define BQ24190_REG_VPRS_TS_PROFILE_MASK BIT(2)
#define BQ24190_REG_VPRS_TS_PROFILE_SHIFT 2
#define BQ24190_REG_VPRS_DEV_REG_MASK (BIT(1) | BIT(0))
#define BQ24190_REG_VPRS_DEV_REG_SHIFT 0
/*
- * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
- * so the first read after a fault returns the latched value and subsequent
- * reads return the current value. In order to return the fault status
- * to the user, have the interrupt handler save the reg's value and retrieve
- * it in the appropriate health/status routine.
- */
-struct bq24190_dev_info {
- struct i2c_client *client;
- struct device *dev;
- struct extcon_dev *edev;
- struct power_supply *charger;
- struct power_supply *battery;
- struct delayed_work input_current_limit_work;
- char model_name[I2C_NAME_SIZE];
- bool initialized;
- bool irq_event;
- bool otg_vbus_enabled;
- int charge_type;
- u16 sys_min;
- u16 iprechg;
- u16 iterm;
- u32 ichg;
- u32 ichg_max;
- u32 vreg;
- u32 vreg_max;
- struct mutex f_reg_lock;
- u8 f_reg;
- u8 ss_reg;
- u8 watchdog;
-};
-
-static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
- const union power_supply_propval *val);
-
-static const unsigned int bq24190_usb_extcon_cable[] = {
- EXTCON_USB,
- EXTCON_NONE,
-};
-
-/*
* The tables below provide a 2-way mapping for the value that goes in
* the register field and the real-world value that it represents.
* The index of the array is the value that goes in the register; the
@@ -211,6 +182,9 @@ static const int bq24190_ccc_ichg_values[] = {
4096000, 4160000, 4224000, 4288000, 4352000, 4416000, 4480000, 4544000
};
+/* ICHG higher than 3008mA is not supported in BQ24296 */
+#define BQ24296_CCC_ICHG_VALUES_LEN 40
+
/* REG04[7:2] (VREG) in uV */
static const int bq24190_cvc_vreg_values[] = {
3504000, 3520000, 3536000, 3552000, 3568000, 3584000, 3600000, 3616000,
@@ -228,6 +202,68 @@ static const int bq24190_ictrc_treg_values[] = {
600, 800, 1000, 1200
};
+enum bq24190_chip {
+ BQ24190,
+ BQ24192,
+ BQ24192i,
+ BQ24196,
+ BQ24296,
+};
+
+/*
+ * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
+ * so the first read after a fault returns the latched value and subsequent
+ * reads return the current value. In order to return the fault status
+ * to the user, have the interrupt handler save the reg's value and retrieve
+ * it in the appropriate health/status routine.
+ */
+struct bq24190_dev_info {
+ struct i2c_client *client;
+ struct device *dev;
+ struct extcon_dev *edev;
+ struct power_supply *charger;
+ struct power_supply *battery;
+ struct delayed_work input_current_limit_work;
+ char model_name[I2C_NAME_SIZE];
+ bool initialized;
+ bool irq_event;
+ bool otg_vbus_enabled;
+ int charge_type;
+ u16 sys_min;
+ u16 iprechg;
+ u16 iterm;
+ u32 ichg;
+ u32 ichg_max;
+ u32 vreg;
+ u32 vreg_max;
+ struct mutex f_reg_lock;
+ u8 f_reg;
+ u8 ss_reg;
+ u8 watchdog;
+ const struct bq24190_chip_info *info;
+};
+
+struct bq24190_chip_info {
+ int ichg_array_size;
+#ifdef CONFIG_REGULATOR
+ const struct regulator_desc *vbus_desc;
+#endif
+ int (*check_chip)(struct bq24190_dev_info *bdi);
+ int (*set_chg_config)(struct bq24190_dev_info *bdi, const u8 chg_config);
+ int (*set_otg_vbus)(struct bq24190_dev_info *bdi, bool enable);
+ u8 ntc_fault_mask;
+ int (*get_ntc_status)(const u8 value);
+};
+
+static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val);
+
+static const unsigned int bq24190_usb_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_NONE,
+};
+
+
/*
* Return the index in 'tbl' of greatest value that is less than or equal to
* 'val'. The index range returned is 0 to 'tbl_size' - 1. Assumes that
@@ -529,6 +565,43 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
return ret;
}
+static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0) {
+ dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
+ return ret;
+ }
+
+ bdi->otg_vbus_enabled = enable;
+ if (enable) {
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_CHG_CONFIG_MASK,
+ BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ BQ24190_REG_POC_CHG_CONFIG_DISABLE);
+
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_OTG_CONFIG_MASK,
+ BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ BQ24296_REG_POC_OTG_CONFIG_OTG);
+ } else
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_OTG_CONFIG_MASK,
+ BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ BQ24296_REG_POC_OTG_CONFIG_DISABLE);
+
+out:
+ pm_runtime_mark_last_busy(bdi->dev);
+ pm_runtime_put_autosuspend(bdi->dev);
+
+ return ret;
+}
+
#ifdef CONFIG_REGULATOR
static int bq24190_vbus_enable(struct regulator_dev *dev)
{
@@ -567,6 +640,43 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev)
return bdi->otg_vbus_enabled;
}
+static int bq24296_vbus_enable(struct regulator_dev *dev)
+{
+ return bq24296_set_otg_vbus(rdev_get_drvdata(dev), true);
+}
+
+static int bq24296_vbus_disable(struct regulator_dev *dev)
+{
+ return bq24296_set_otg_vbus(rdev_get_drvdata(dev), false);
+}
+
+static int bq24296_vbus_is_enabled(struct regulator_dev *dev)
+{
+ struct bq24190_dev_info *bdi = rdev_get_drvdata(dev);
+ int ret;
+ u8 val;
+
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0) {
+ dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
+ return ret;
+ }
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_OTG_CONFIG_MASK,
+ BQ24296_REG_POC_OTG_CONFIG_SHIFT, &val);
+
+ pm_runtime_mark_last_busy(bdi->dev);
+ pm_runtime_put_autosuspend(bdi->dev);
+
+ if (ret)
+ return ret;
+
+ bdi->otg_vbus_enabled = (val == BQ24296_REG_POC_OTG_CONFIG_OTG);
+
+ return bdi->otg_vbus_enabled;
+}
+
static const struct regulator_ops bq24190_vbus_ops = {
.enable = bq24190_vbus_enable,
.disable = bq24190_vbus_disable,
@@ -583,6 +693,22 @@ static const struct regulator_desc bq24190_vbus_desc = {
.n_voltages = 1,
};
+static const struct regulator_ops bq24296_vbus_ops = {
+ .enable = bq24296_vbus_enable,
+ .disable = bq24296_vbus_disable,
+ .is_enabled = bq24296_vbus_is_enabled,
+};
+
+static const struct regulator_desc bq24296_vbus_desc = {
+ .name = "usb_otg_vbus",
+ .of_match = "usb-otg-vbus",
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &bq24296_vbus_ops,
+ .fixed_uV = 5000000,
+ .n_voltages = 1,
+};
+
static const struct regulator_init_data bq24190_vbus_init_data = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -602,7 +728,7 @@ static int bq24190_register_vbus_regulator(struct bq24190_dev_info *bdi)
else
cfg.init_data = &bq24190_vbus_init_data;
cfg.driver_data = bdi;
- reg = devm_regulator_register(bdi->dev, &bq24190_vbus_desc, &cfg);
+ reg = devm_regulator_register(bdi->dev, bdi->info->vbus_desc, &cfg);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(bdi->dev, "Can't register regulator: %d\n", ret);
@@ -678,7 +804,7 @@ static int bq24190_set_config(struct bq24190_dev_info *bdi)
BQ24190_REG_CCC_ICHG_MASK,
BQ24190_REG_CCC_ICHG_SHIFT,
bq24190_ccc_ichg_values,
- ARRAY_SIZE(bq24190_ccc_ichg_values),
+ bdi->info->ichg_array_size,
bdi->ichg);
if (ret < 0)
return ret;
@@ -777,6 +903,24 @@ static int bq24190_charger_get_charge_type(struct bq24190_dev_info *bdi,
return 0;
}
+static int bq24190_battery_set_chg_config(struct bq24190_dev_info *bdi,
+ const u8 chg_config)
+{
+ return bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_CHG_CONFIG_MASK,
+ BQ24190_REG_POC_CHG_CONFIG_SHIFT,
+ chg_config);
+}
+
+static int bq24296_battery_set_chg_config(struct bq24190_dev_info *bdi,
+ const u8 chg_config)
+{
+ return bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_CHG_CONFIG_MASK,
+ BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ chg_config);
+}
+
static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
const union power_supply_propval *val)
{
@@ -835,9 +979,50 @@ static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
return ret;
}
- return bq24190_write_mask(bdi, BQ24190_REG_POC,
- BQ24190_REG_POC_CHG_CONFIG_MASK,
- BQ24190_REG_POC_CHG_CONFIG_SHIFT, chg_config);
+ return bdi->info->set_chg_config(bdi, chg_config);
+}
+
+static int bq24190_charger_get_ntc_status(u8 value)
+{
+ int health;
+
+ switch (value >> BQ24190_REG_F_NTC_FAULT_SHIFT & 0x7) {
+ case 0x1: /* TS1 Cold */
+ case 0x3: /* TS2 Cold */
+ case 0x5: /* Both Cold */
+ health = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ case 0x2: /* TS1 Hot */
+ case 0x4: /* TS2 Hot */
+ case 0x6: /* Both Hot */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ return health;
+}
+
+static int bq24296_charger_get_ntc_status(u8 value)
+{
+ int health;
+
+ switch (value >> BQ24296_REG_F_NTC_FAULT_SHIFT & 0x3) {
+ case 0x0: /* Normal */
+ health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case 0x1: /* Hot */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ case 0x2: /* Cold */
+ health = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ return health;
}
static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
@@ -850,21 +1035,8 @@ static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
v = bdi->f_reg;
mutex_unlock(&bdi->f_reg_lock);
- if (v & BQ24190_REG_F_NTC_FAULT_MASK) {
- switch (v >> BQ24190_REG_F_NTC_FAULT_SHIFT & 0x7) {
- case 0x1: /* TS1 Cold */
- case 0x3: /* TS2 Cold */
- case 0x5: /* Both Cold */
- health = POWER_SUPPLY_HEALTH_COLD;
- break;
- case 0x2: /* TS1 Hot */
- case 0x4: /* TS2 Hot */
- case 0x6: /* Both Hot */
- health = POWER_SUPPLY_HEALTH_OVERHEAT;
- break;
- default:
- health = POWER_SUPPLY_HEALTH_UNKNOWN;
- }
+ if (v & bdi->info->ntc_fault_mask) {
+ health = bdi->info->get_ntc_status(v);
} else if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
} else if (v & BQ24190_REG_F_CHRG_FAULT_MASK) {
@@ -1015,7 +1187,7 @@ static int bq24190_charger_get_current(struct bq24190_dev_info *bdi,
ret = bq24190_get_field_val(bdi, BQ24190_REG_CCC,
BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
bq24190_ccc_ichg_values,
- ARRAY_SIZE(bq24190_ccc_ichg_values), &curr);
+ bdi->info->ichg_array_size, &curr);
if (ret < 0)
return ret;
@@ -1055,7 +1227,7 @@ static int bq24190_charger_set_current(struct bq24190_dev_info *bdi,
ret = bq24190_set_field_val(bdi, BQ24190_REG_CCC,
BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
bq24190_ccc_ichg_values,
- ARRAY_SIZE(bq24190_ccc_ichg_values), curr);
+ bdi->info->ichg_array_size, curr);
if (ret < 0)
return ret;
@@ -1395,26 +1567,9 @@ static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
} else {
- v &= BQ24190_REG_F_NTC_FAULT_MASK;
- v >>= BQ24190_REG_F_NTC_FAULT_SHIFT;
+ v &= bdi->info->ntc_fault_mask;
- switch (v) {
- case 0x0: /* Normal */
- health = POWER_SUPPLY_HEALTH_GOOD;
- break;
- case 0x1: /* TS1 Cold */
- case 0x3: /* TS2 Cold */
- case 0x5: /* Both Cold */
- health = POWER_SUPPLY_HEALTH_COLD;
- break;
- case 0x2: /* TS1 Hot */
- case 0x4: /* TS2 Hot */
- case 0x6: /* Both Hot */
- health = POWER_SUPPLY_HEALTH_OVERHEAT;
- break;
- default:
- health = POWER_SUPPLY_HEALTH_UNKNOWN;
- }
+ health = v ? bdi->info->get_ntc_status(v) : POWER_SUPPLY_HEALTH_GOOD;
}
val->intval = health;
@@ -1601,12 +1756,13 @@ static int bq24190_configure_usb_otg(struct bq24190_dev_info *bdi, u8 ss_reg)
static void bq24190_check_status(struct bq24190_dev_info *bdi)
{
const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK;
- const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK
- | BQ24190_REG_F_NTC_FAULT_MASK;
+ u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK;
bool alert_charger = false, alert_battery = false;
u8 ss_reg = 0, f_reg = 0;
int i, ret;
+ battery_mask_f |= bdi->info->ntc_fault_mask;
+
ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
if (ret < 0) {
dev_err(bdi->dev, "Can't read SS reg: %d\n", ret);
@@ -1633,7 +1789,7 @@ static void bq24190_check_status(struct bq24190_dev_info *bdi)
!!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
!!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
!!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK),
- !!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK));
+ !!(f_reg & bdi->info->ntc_fault_mask));
mutex_lock(&bdi->f_reg_lock);
if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f))
@@ -1696,12 +1852,11 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+static int bq24190_check_chip(struct bq24190_dev_info *bdi)
{
u8 v;
int ret;
- /* First check that the device really is what its supposed to be */
ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
BQ24190_REG_VPRS_PN_MASK,
BQ24190_REG_VPRS_PN_SHIFT,
@@ -1719,6 +1874,40 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi)
return -ENODEV;
}
+ return 0;
+}
+
+static int bq24296_check_chip(struct bq24190_dev_info *bdi)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
+ BQ24296_REG_VPRS_PN_MASK,
+ BQ24296_REG_VPRS_PN_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ switch (v) {
+ case BQ24296_REG_VPRS_PN_24296:
+ break;
+ default:
+ dev_err(bdi->dev, "Error unknown model: 0x%02x\n", v);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+{
+ int ret;
+
+ ret = bdi->info->check_chip(bdi);
+ if (ret < 0)
+ return ret;
+
ret = bq24190_register_reset(bdi);
if (ret < 0)
return ret;
@@ -1736,7 +1925,8 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
struct power_supply_battery_info *info;
int v, idx;
- idx = ARRAY_SIZE(bq24190_ccc_ichg_values) - 1;
+ idx = bdi->info->ichg_array_size - 1;
+
bdi->ichg_max = bq24190_ccc_ichg_values[idx];
idx = ARRAY_SIZE(bq24190_cvc_vreg_values) - 1;
@@ -1781,6 +1971,64 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
return 0;
}
+static const struct bq24190_chip_info bq24190_chip_info_tbl[] = {
+ [BQ24190] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
+ [BQ24192] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
+ [BQ24192i] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
+ [BQ24196] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
+ [BQ24296] = {
+ .ichg_array_size = BQ24296_CCC_ICHG_VALUES_LEN,
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24296_vbus_desc,
+#endif
+ .check_chip = bq24296_check_chip,
+ .set_chg_config = bq24296_battery_set_chg_config,
+ .ntc_fault_mask = BQ24296_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24296_charger_get_ntc_status,
+ .set_otg_vbus = bq24296_set_otg_vbus,
+ },
+};
+
static int bq24190_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -1804,6 +2052,7 @@ static int bq24190_probe(struct i2c_client *client)
bdi->client = client;
bdi->dev = dev;
strscpy(bdi->model_name, id->name, sizeof(bdi->model_name));
+ bdi->info = i2c_get_match_data(client);
mutex_init(&bdi->f_reg_lock);
bdi->charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
bdi->f_reg = 0;
@@ -1940,7 +2189,7 @@ static void bq24190_shutdown(struct i2c_client *client)
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
/* Turn off 5V boost regulator on shutdown */
- bq24190_set_otg_vbus(bdi, false);
+ bdi->info->set_otg_vbus(bdi, false);
}
static __maybe_unused int bq24190_runtime_suspend(struct device *dev)
@@ -2029,19 +2278,21 @@ static const struct dev_pm_ops bq24190_pm_ops = {
};
static const struct i2c_device_id bq24190_i2c_ids[] = {
- { "bq24190" },
- { "bq24192" },
- { "bq24192i" },
- { "bq24196" },
+ { "bq24190", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24190] },
+ { "bq24192", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192] },
+ { "bq24192i", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192i] },
+ { "bq24196", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24196] },
+ { "bq24296", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24296] },
{ },
};
MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids);
static const struct of_device_id bq24190_of_match[] = {
- { .compatible = "ti,bq24190", },
- { .compatible = "ti,bq24192", },
- { .compatible = "ti,bq24192i", },
- { .compatible = "ti,bq24196", },
+ { .compatible = "ti,bq24190", .data = &bq24190_chip_info_tbl[BQ24190] },
+ { .compatible = "ti,bq24192", .data = &bq24190_chip_info_tbl[BQ24192] },
+ { .compatible = "ti,bq24192i", .data = &bq24190_chip_info_tbl[BQ24192i] },
+ { .compatible = "ti,bq24196", .data = &bq24190_chip_info_tbl[BQ24196] },
+ { .compatible = "ti,bq24296", .data = &bq24190_chip_info_tbl[BQ24296] },
{ },
};
MODULE_DEVICE_TABLE(of, bq24190_of_match);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 4296600e8..1c4a9d137 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -2162,6 +2162,28 @@ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
+#ifdef CONFIG_PM_SLEEP
+static int bq27xxx_battery_suspend(struct device *dev)
+{
+ struct bq27xxx_device_info *di = dev_get_drvdata(dev);
+
+ cancel_delayed_work(&di->work);
+ return 0;
+}
+
+static int bq27xxx_battery_resume(struct device *dev)
+{
+ struct bq27xxx_device_info *di = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&di->work, 0);
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+SIMPLE_DEV_PM_OPS(bq27xxx_battery_battery_pm_ops,
+ bq27xxx_battery_suspend, bq27xxx_battery_resume);
+EXPORT_SYMBOL_GPL(bq27xxx_battery_battery_pm_ops);
+
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("BQ27xxx battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 886e0a8e2..9910c6007 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -297,6 +297,7 @@ static struct i2c_driver bq27xxx_battery_i2c_driver = {
.driver = {
.name = "bq27xxx-battery",
.of_match_table = of_match_ptr(bq27xxx_battery_i2c_of_match_table),
+ .pm = &bq27xxx_battery_battery_pm_ops,
},
.probe = bq27xxx_battery_i2c_probe,
.remove = bq27xxx_battery_i2c_remove,
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 73265001d..ecef35ac3 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -861,44 +861,44 @@ const size_t power_supply_battery_info_properties_size = ARRAY_SIZE(power_supply
EXPORT_SYMBOL_GPL(power_supply_battery_info_properties_size);
bool power_supply_battery_info_has_prop(struct power_supply_battery_info *info,
- enum power_supply_property psp)
+ enum power_supply_property psp)
{
if (!info)
return false;
switch (psp) {
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- return info->technology != POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
- case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- return info->energy_full_design_uwh >= 0;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- return info->charge_full_design_uah >= 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- return info->voltage_min_design_uv >= 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- return info->voltage_max_design_uv >= 0;
- case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
- return info->precharge_current_ua >= 0;
- case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
- return info->charge_term_current_ua >= 0;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
- return info->constant_charge_current_max_ua >= 0;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
- return info->constant_charge_voltage_max_uv >= 0;
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
- return info->temp_ambient_alert_min > INT_MIN;
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
- return info->temp_ambient_alert_max < INT_MAX;
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- return info->temp_alert_min > INT_MIN;
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
- return info->temp_alert_max < INT_MAX;
- case POWER_SUPPLY_PROP_TEMP_MIN:
- return info->temp_min > INT_MIN;
- case POWER_SUPPLY_PROP_TEMP_MAX:
- return info->temp_max < INT_MAX;
- default:
- return false;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ return info->technology != POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ return info->energy_full_design_uwh >= 0;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ return info->charge_full_design_uah >= 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ return info->voltage_min_design_uv >= 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ return info->voltage_max_design_uv >= 0;
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return info->precharge_current_ua >= 0;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return info->charge_term_current_ua >= 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return info->constant_charge_current_max_ua >= 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ return info->constant_charge_voltage_max_uv >= 0;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+ return info->temp_ambient_alert_min > INT_MIN;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+ return info->temp_ambient_alert_max < INT_MAX;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+ return info->temp_alert_min > INT_MIN;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ return info->temp_alert_max < INT_MAX;
+ case POWER_SUPPLY_PROP_TEMP_MIN:
+ return info->temp_min > INT_MIN;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ return info->temp_max < INT_MAX;
+ default:
+ return false;
}
}
EXPORT_SYMBOL_GPL(power_supply_battery_info_has_prop);
@@ -914,53 +914,53 @@ int power_supply_battery_info_get_prop(struct power_supply_battery_info *info,
return -EINVAL;
switch (psp) {
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = info->technology;
- return 0;
- case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- val->intval = info->energy_full_design_uwh;
- return 0;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- val->intval = info->charge_full_design_uah;
- return 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = info->voltage_min_design_uv;
- return 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = info->voltage_max_design_uv;
- return 0;
- case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
- val->intval = info->precharge_current_ua;
- return 0;
- case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
- val->intval = info->charge_term_current_ua;
- return 0;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
- val->intval = info->constant_charge_current_max_ua;
- return 0;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
- val->intval = info->constant_charge_voltage_max_uv;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
- val->intval = info->temp_ambient_alert_min;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
- val->intval = info->temp_ambient_alert_max;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- val->intval = info->temp_alert_min;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
- val->intval = info->temp_alert_max;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_MIN:
- val->intval = info->temp_min;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_MAX:
- val->intval = info->temp_max;
- return 0;
- default:
- return -EINVAL;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = info->technology;
+ return 0;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = info->energy_full_design_uwh;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = info->charge_full_design_uah;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = info->voltage_min_design_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = info->voltage_max_design_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ val->intval = info->precharge_current_ua;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ val->intval = info->charge_term_current_ua;
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = info->constant_charge_current_max_ua;
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = info->constant_charge_voltage_max_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+ val->intval = info->temp_ambient_alert_min;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+ val->intval = info->temp_ambient_alert_max;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+ val->intval = info->temp_alert_min;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ val->intval = info->temp_alert_max;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_MIN:
+ val->intval = info->temp_min;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ val->intval = info->temp_max;
+ return 0;
+ default:
+ return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(power_supply_battery_info_get_prop);
@@ -1255,6 +1255,7 @@ EXPORT_SYMBOL_GPL(power_supply_powers);
static void power_supply_dev_release(struct device *dev)
{
struct power_supply *psy = to_power_supply(dev);
+
dev_dbg(dev, "%s\n", __func__);
kfree(psy);
}
@@ -1636,6 +1637,6 @@ subsys_initcall(power_supply_class_init);
module_exit(power_supply_class_exit);
MODULE_DESCRIPTION("Universal power supply monitor class");
-MODULE_AUTHOR("Ian Molton <spyro@f2s.com>, "
- "Szabolcs Gyurko, "
- "Anton Vorontsov <cbou@mail.ru>");
+MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
+MODULE_AUTHOR("Szabolcs Gyurko");
+MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>");
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index ed215b458..1d2940a78 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -328,17 +328,15 @@ static u64 ines_find_txts(struct ines_port *port, struct sk_buff *skb)
return ns;
}
-static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+static int ines_hwtstamp(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
struct ines_port *port = container_of(mii_ts, struct ines_port, mii_ts);
u32 cm_one_step = 0, port_conf, ts_stat_rx, ts_stat_tx;
- struct hwtstamp_config cfg;
unsigned long flags;
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- switch (cfg.tx_type) {
+ switch (cfg->tx_type) {
case HWTSTAMP_TX_OFF:
ts_stat_tx = 0;
break;
@@ -353,7 +351,7 @@ static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
return -ERANGE;
}
- switch (cfg.rx_filter) {
+ switch (cfg->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ts_stat_rx = 0;
break;
@@ -372,7 +370,7 @@ static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ts_stat_rx = TS_ENABLE;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
@@ -393,7 +391,7 @@ static int ines_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
spin_unlock_irqrestore(&port->lock, flags);
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+ return 0;
}
static void ines_link_state(struct mii_timestamper *mii_ts,
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index e7defce8c..5f858e426 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1716,20 +1716,6 @@ ptp_ocp_get_mem(struct ptp_ocp *bp, struct ocp_resource *r)
return __ptp_ocp_get_mem(bp, start, r->size);
}
-static void
-ptp_ocp_set_irq_resource(struct resource *res, int irq)
-{
- struct resource r = DEFINE_RES_IRQ(irq);
- *res = r;
-}
-
-static void
-ptp_ocp_set_mem_resource(struct resource *res, resource_size_t start, int size)
-{
- struct resource r = DEFINE_RES_MEM(start, size);
- *res = r;
-}
-
static int
ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r)
{
@@ -1741,15 +1727,15 @@ ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r)
int id;
start = pci_resource_start(pdev, 0) + r->offset;
- ptp_ocp_set_mem_resource(&res[0], start, r->size);
- ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
+ res[0] = DEFINE_RES_MEM(start, r->size);
+ res[1] = DEFINE_RES_IRQ(pci_irq_vector(pdev, r->irq_vec));
info = r->extra;
id = pci_dev_id(pdev) << 1;
id += info->pci_offset;
p = platform_device_register_resndata(&pdev->dev, info->name, id,
- res, 2, info->data,
+ res, ARRAY_SIZE(res), info->data,
info->data_size);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -1768,11 +1754,11 @@ ptp_ocp_i2c_bus(struct pci_dev *pdev, struct ocp_resource *r, int id)
info = r->extra;
start = pci_resource_start(pdev, 0) + r->offset;
- ptp_ocp_set_mem_resource(&res[0], start, r->size);
- ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
+ res[0] = DEFINE_RES_MEM(start, r->size);
+ res[1] = DEFINE_RES_IRQ(pci_irq_vector(pdev, r->irq_vec));
return platform_device_register_resndata(&pdev->dev, info->name,
- id, res, 2,
+ id, res, ARRAY_SIZE(res),
info->data, info->data_size);
}
@@ -4260,13 +4246,6 @@ static int ptp_ocp_dpll_mode_get(const struct dpll_device *dpll, void *priv,
return 0;
}
-static bool ptp_ocp_dpll_mode_supported(const struct dpll_device *dpll,
- void *priv, const enum dpll_mode mode,
- struct netlink_ext_ack *extack)
-{
- return mode == DPLL_MODE_AUTOMATIC;
-}
-
static int ptp_ocp_dpll_direction_get(const struct dpll_pin *pin,
void *pin_priv,
const struct dpll_device *dpll,
@@ -4350,7 +4329,6 @@ static int ptp_ocp_dpll_frequency_get(const struct dpll_pin *pin,
static const struct dpll_device_ops dpll_ops = {
.lock_status_get = ptp_ocp_dpll_lock_status_get,
.mode_get = ptp_ocp_dpll_mode_get,
- .mode_supported = ptp_ocp_dpll_mode_supported,
};
static const struct dpll_pin_ops dpll_pins_ops = {
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 59c44a558..f2728ee78 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/idr.h>
#include <linux/of.h>
#include <linux/pwm.h>
#include <linux/list.h>
@@ -23,52 +24,25 @@
#define CREATE_TRACE_POINTS
#include <trace/events/pwm.h>
-#define MAX_PWMS 1024
-
static DEFINE_MUTEX(pwm_lookup_lock);
static LIST_HEAD(pwm_lookup_list);
-/* protects access to pwm_chips and allocated_pwms */
+/* protects access to pwm_chips */
static DEFINE_MUTEX(pwm_lock);
-static LIST_HEAD(pwm_chips);
-static DECLARE_BITMAP(allocated_pwms, MAX_PWMS);
-
-/* Called with pwm_lock held */
-static int alloc_pwms(unsigned int count)
-{
- unsigned int start;
-
- start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, 0,
- count, 0);
-
- if (start + count > MAX_PWMS)
- return -ENOSPC;
-
- bitmap_set(allocated_pwms, start, count);
-
- return start;
-}
-
-/* Called with pwm_lock held */
-static void free_pwms(struct pwm_chip *chip)
-{
- bitmap_clear(allocated_pwms, chip->base, chip->npwm);
-
- kfree(chip->pwms);
- chip->pwms = NULL;
-}
+static DEFINE_IDR(pwm_chips);
static struct pwm_chip *pwmchip_find_by_name(const char *name)
{
struct pwm_chip *chip;
+ unsigned long id, tmp;
if (!name)
return NULL;
mutex_lock(&pwm_lock);
- list_for_each_entry(chip, &pwm_chips, list) {
+ idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) {
const char *chip_name = dev_name(chip->dev);
if (chip_name && strcmp(chip_name, name) == 0) {
@@ -85,22 +59,24 @@ static struct pwm_chip *pwmchip_find_by_name(const char *name)
static int pwm_device_request(struct pwm_device *pwm, const char *label)
{
int err;
+ struct pwm_chip *chip = pwm->chip;
+ const struct pwm_ops *ops = chip->ops;
if (test_bit(PWMF_REQUESTED, &pwm->flags))
return -EBUSY;
- if (!try_module_get(pwm->chip->owner))
+ if (!try_module_get(chip->owner))
return -ENODEV;
- if (pwm->chip->ops->request) {
- err = pwm->chip->ops->request(pwm->chip, pwm);
+ if (ops->request) {
+ err = ops->request(chip, pwm);
if (err) {
- module_put(pwm->chip->owner);
+ module_put(chip->owner);
return err;
}
}
- if (pwm->chip->ops->get_state) {
+ if (ops->get_state) {
/*
* Zero-initialize state because most drivers are unaware of
* .usage_power. The other members of state are supposed to be
@@ -110,7 +86,7 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
*/
struct pwm_state state = { 0, };
- err = pwm->chip->ops->get_state(pwm->chip, pwm, &state);
+ err = ops->get_state(chip, pwm, &state);
trace_pwm_get(pwm, &state, err);
if (!err)
@@ -234,7 +210,6 @@ static bool pwm_ops_check(const struct pwm_chip *chip)
*/
int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
{
- struct pwm_device *pwm;
unsigned int i;
int ret;
@@ -246,31 +221,28 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
chip->owner = owner;
- chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL);
+ chip->pwms = kcalloc(chip->npwm, sizeof(*chip->pwms), GFP_KERNEL);
if (!chip->pwms)
return -ENOMEM;
mutex_lock(&pwm_lock);
- ret = alloc_pwms(chip->npwm);
+ ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL);
if (ret < 0) {
mutex_unlock(&pwm_lock);
kfree(chip->pwms);
return ret;
}
- chip->base = ret;
+ chip->id = ret;
for (i = 0; i < chip->npwm; i++) {
- pwm = &chip->pwms[i];
+ struct pwm_device *pwm = &chip->pwms[i];
pwm->chip = chip;
- pwm->pwm = chip->base + i;
pwm->hwpwm = i;
}
- list_add(&chip->list, &pwm_chips);
-
mutex_unlock(&pwm_lock);
if (IS_ENABLED(CONFIG_OF))
@@ -297,11 +269,11 @@ void pwmchip_remove(struct pwm_chip *chip)
mutex_lock(&pwm_lock);
- list_del_init(&chip->list);
-
- free_pwms(chip);
+ idr_remove(&pwm_chips, chip->id);
mutex_unlock(&pwm_lock);
+
+ kfree(chip->pwms);
}
EXPORT_SYMBOL_GPL(pwmchip_remove);
@@ -356,8 +328,8 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
}
EXPORT_SYMBOL_GPL(pwm_request_from_chip);
-static void pwm_apply_state_debug(struct pwm_device *pwm,
- const struct pwm_state *state)
+static void pwm_apply_debug(struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct pwm_state *last = &pwm->last;
struct pwm_chip *chip = pwm->chip;
@@ -463,24 +435,15 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
}
/**
- * pwm_apply_state() - atomically apply a new state to a PWM device
+ * __pwm_apply() - atomically apply a new state to a PWM device
* @pwm: PWM device
* @state: new state to apply
*/
-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
+static int __pwm_apply(struct pwm_device *pwm, const struct pwm_state *state)
{
struct pwm_chip *chip;
int err;
- /*
- * Some lowlevel driver's implementations of .apply() make use of
- * mutexes, also with some drivers only returning when the new
- * configuration is active calling pwm_apply_state() from atomic context
- * is a bad idea. So make it explicit that calling this function might
- * sleep.
- */
- might_sleep();
-
if (!pwm || !state || !state->period ||
state->duty_cycle > state->period)
return -EINVAL;
@@ -505,11 +468,60 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
* only do this after pwm->state was applied as some
* implementations of .get_state depend on this
*/
- pwm_apply_state_debug(pwm, state);
+ pwm_apply_debug(pwm, state);
return 0;
}
-EXPORT_SYMBOL_GPL(pwm_apply_state);
+
+/**
+ * pwm_apply_might_sleep() - atomically apply a new state to a PWM device
+ * Cannot be used in atomic context.
+ * @pwm: PWM device
+ * @state: new state to apply
+ */
+int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state)
+{
+ int err;
+
+ /*
+ * Some lowlevel driver's implementations of .apply() make use of
+ * mutexes, also with some drivers only returning when the new
+ * configuration is active calling pwm_apply_might_sleep() from atomic context
+ * is a bad idea. So make it explicit that calling this function might
+ * sleep.
+ */
+ might_sleep();
+
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && pwm->chip->atomic) {
+ /*
+ * Catch any drivers that have been marked as atomic but
+ * that will sleep anyway.
+ */
+ non_block_start();
+ err = __pwm_apply(pwm, state);
+ non_block_end();
+ } else {
+ err = __pwm_apply(pwm, state);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pwm_apply_might_sleep);
+
+/**
+ * pwm_apply_atomic() - apply a new state to a PWM device from atomic context
+ * Not all PWM devices support this function, check with pwm_might_sleep().
+ * @pwm: PWM device
+ * @state: new state to apply
+ */
+int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state)
+{
+ WARN_ONCE(!pwm->chip->atomic,
+ "sleeping PWM driver used in atomic context\n");
+
+ return __pwm_apply(pwm, state);
+}
+EXPORT_SYMBOL_GPL(pwm_apply_atomic);
/**
* pwm_capture() - capture and report a PWM signal
@@ -567,7 +579,7 @@ int pwm_adjust_config(struct pwm_device *pwm)
state.period = pargs.period;
state.polarity = pargs.polarity;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/*
@@ -590,17 +602,18 @@ int pwm_adjust_config(struct pwm_device *pwm)
state.duty_cycle = state.period - state.duty_cycle;
}
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
EXPORT_SYMBOL_GPL(pwm_adjust_config);
static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
{
struct pwm_chip *chip;
+ unsigned long id, tmp;
mutex_lock(&pwm_lock);
- list_for_each_entry(chip, &pwm_chips, list)
+ idr_for_each_entry_ul(&pwm_chips, chip, tmp, id)
if (chip->dev && device_match_fwnode(chip->dev, fwnode)) {
mutex_unlock(&pwm_lock);
return chip;
@@ -1058,17 +1071,27 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
static void *pwm_seq_start(struct seq_file *s, loff_t *pos)
{
+ unsigned long id = *pos;
+ void *ret;
+
mutex_lock(&pwm_lock);
s->private = "";
- return seq_list_start(&pwm_chips, *pos);
+ ret = idr_get_next_ul(&pwm_chips, &id);
+ *pos = id;
+ return ret;
}
static void *pwm_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
+ unsigned long id = *pos + 1;
+ void *ret;
+
s->private = "\n";
- return seq_list_next(v, &pwm_chips, pos);
+ ret = idr_get_next_ul(&pwm_chips, &id);
+ *pos = id;
+ return ret;
}
static void pwm_seq_stop(struct seq_file *s, void *v)
@@ -1078,9 +1101,10 @@ static void pwm_seq_stop(struct seq_file *s, void *v)
static int pwm_seq_show(struct seq_file *s, void *v)
{
- struct pwm_chip *chip = list_entry(v, struct pwm_chip, list);
+ struct pwm_chip *chip = v;
- seq_printf(s, "%s%s/%s, %d PWM device%s\n", (char *)s->private,
+ seq_printf(s, "%s%d: %s/%s, %d PWM device%s\n",
+ (char *)s->private, chip->id,
chip->dev->bus ? chip->dev->bus->name : "no-bus",
dev_name(chip->dev), chip->npwm,
(chip->npwm != 1) ? "s" : "");
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 584288d8c..1f6fc9a9f 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -180,7 +180,6 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
.div1_clk_erratum = true,
};
-#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_pwm_suspend(struct device *dev)
{
struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
@@ -210,10 +209,9 @@ static int atmel_hlcdc_pwm_resume(struct device *dev)
return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0],
&state);
}
-#endif
-static SIMPLE_DEV_PM_OPS(atmel_hlcdc_pwm_pm_ops,
- atmel_hlcdc_pwm_suspend, atmel_hlcdc_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_pwm_pm_ops,
+ atmel_hlcdc_pwm_suspend, atmel_hlcdc_pwm_resume);
static const struct of_device_id atmel_hlcdc_dt_ids[] = {
{
@@ -297,7 +295,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = {
.driver = {
.name = "atmel-hlcdc-pwm",
.of_match_table = atmel_hlcdc_pwm_dt_ids,
- .pm = &atmel_hlcdc_pwm_pm_ops,
+ .pm = pm_ptr(&atmel_hlcdc_pwm_pm_ops),
},
.probe = atmel_hlcdc_pwm_probe,
.remove_new = atmel_hlcdc_pwm_remove,
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 98b33c016..d42c897cb 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -489,7 +489,6 @@ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
-#ifdef CONFIG_PM_SLEEP
static int atmel_tcb_pwm_suspend(struct device *dev)
{
struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
@@ -522,16 +521,15 @@ static int atmel_tcb_pwm_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
- atmel_tcb_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
+ atmel_tcb_pwm_resume);
static struct platform_driver atmel_tcb_pwm_driver = {
.driver = {
.name = "atmel-tcb-pwm",
.of_match_table = atmel_tcb_pwm_dt_ids,
- .pm = &atmel_tcb_pwm_pm_ops,
+ .pm = pm_ptr(&atmel_tcb_pwm_pm_ops),
},
.probe = atmel_tcb_pwm_probe,
.remove_new = atmel_tcb_pwm_remove,
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 15d6ed03c..45046a5c2 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -260,7 +260,7 @@ static int kona_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return err;
}
- err = kona_pwmc_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = kona_pwmc_config(chip, pwm, state->duty_cycle, state->period);
if (err && !pwm->state.enabled)
clk_disable_unprepare(kp->clk);
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index ab30667f4..283cf27f2 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -28,6 +28,7 @@ struct bcm2835_pwm {
struct device *dev;
void __iomem *base;
struct clk *clk;
+ unsigned long rate;
};
static inline struct bcm2835_pwm *to_bcm2835_pwm(struct pwm_chip *chip)
@@ -63,17 +64,11 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
- unsigned long rate = clk_get_rate(pc->clk);
unsigned long long period_cycles;
u64 max_period;
u32 val;
- if (!rate) {
- dev_err(pc->dev, "failed to get clock rate\n");
- return -EINVAL;
- }
-
/*
* period_cycles must be a 32 bit value, so period * rate / NSEC_PER_SEC
* must be <= U32_MAX. As U32_MAX * NSEC_PER_SEC < U64_MAX the
@@ -88,13 +83,13 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* <=> period < ((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate
* <=> period <= ceil((U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC/2) / rate) - 1
*/
- max_period = DIV_ROUND_UP_ULL((u64)U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC / 2, rate) - 1;
+ max_period = DIV_ROUND_UP_ULL((u64)U32_MAX * NSEC_PER_SEC + NSEC_PER_SEC / 2, pc->rate) - 1;
if (state->period > max_period)
return -EINVAL;
/* set period */
- period_cycles = DIV_ROUND_CLOSEST_ULL(state->period * rate, NSEC_PER_SEC);
+ period_cycles = DIV_ROUND_CLOSEST_ULL(state->period * pc->rate, NSEC_PER_SEC);
/* don't accept a period that is too small */
if (period_cycles < PERIOD_MIN)
@@ -103,7 +98,7 @@ static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
writel(period_cycles, pc->base + PERIOD(pwm->hwpwm));
/* set duty cycle */
- val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle * rate, NSEC_PER_SEC);
+ val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle * pc->rate, NSEC_PER_SEC);
writel(val, pc->base + DUTY(pwm->hwpwm));
/* set polarity */
@@ -131,6 +126,13 @@ static const struct pwm_ops bcm2835_pwm_ops = {
.apply = bcm2835_pwm_apply,
};
+static void devm_clk_rate_exclusive_put(void *data)
+{
+ struct clk *clk = data;
+
+ clk_rate_exclusive_put(clk);
+}
+
static int bcm2835_pwm_probe(struct platform_device *pdev)
{
struct bcm2835_pwm *pc;
@@ -151,8 +153,24 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
"clock not found\n");
+ ret = clk_rate_exclusive_get(pc->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "fail to get exclusive rate\n");
+
+ ret = devm_add_action_or_reset(&pdev->dev, devm_clk_rate_exclusive_put,
+ pc->clk);
+ if (ret)
+ return ret;
+
+ pc->rate = clk_get_rate(pc->clk);
+ if (!pc->rate)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "failed to get clock rate\n");
+
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
+ pc->chip.atomic = true;
pc->chip.npwm = 2;
platform_set_drvdata(pdev, pc);
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index ba2d79991..442913232 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -226,7 +226,6 @@ static int berlin_pwm_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int berlin_pwm_suspend(struct device *dev)
{
struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
@@ -267,17 +266,16 @@ static int berlin_pwm_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
- berlin_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
+ berlin_pwm_resume);
static struct platform_driver berlin_pwm_driver = {
.probe = berlin_pwm_probe,
.driver = {
.name = "berlin-pwm",
.of_match_table = berlin_pwm_match,
- .pm = &berlin_pwm_pm_ops,
+ .pm = pm_ptr(&berlin_pwm_pm_ops),
},
};
module_platform_driver(berlin_pwm_driver);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index b723c2d4f..0fdeb0b2d 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -259,7 +259,6 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int brcmstb_pwm_suspend(struct device *dev)
{
struct brcmstb_pwm *p = dev_get_drvdata(dev);
@@ -275,17 +274,16 @@ static int brcmstb_pwm_resume(struct device *dev)
return clk_prepare_enable(p->clk);
}
-#endif
-static SIMPLE_DEV_PM_OPS(brcmstb_pwm_pm_ops, brcmstb_pwm_suspend,
- brcmstb_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_pwm_pm_ops, brcmstb_pwm_suspend,
+ brcmstb_pwm_resume);
static struct platform_driver brcmstb_pwm_driver = {
.probe = brcmstb_pwm_probe,
.driver = {
.name = "pwm-brcmstb",
.of_match_table = brcmstb_pwm_of_match,
- .pm = &brcmstb_pwm_pm_ops,
+ .pm = pm_ptr(&brcmstb_pwm_pm_ops),
},
};
module_platform_driver(brcmstb_pwm_driver);
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 2b0b659ee..e09358901 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -160,22 +160,22 @@ static const struct pwm_ops crc_pwm_ops = {
static int crystalcove_pwm_probe(struct platform_device *pdev)
{
- struct crystalcove_pwm *pwm;
+ struct crystalcove_pwm *crc_pwm;
struct device *dev = pdev->dev.parent;
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
- pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
- if (!pwm)
+ crc_pwm = devm_kzalloc(&pdev->dev, sizeof(*crc_pwm), GFP_KERNEL);
+ if (!crc_pwm)
return -ENOMEM;
- pwm->chip.dev = &pdev->dev;
- pwm->chip.ops = &crc_pwm_ops;
- pwm->chip.npwm = 1;
+ crc_pwm->chip.dev = &pdev->dev;
+ crc_pwm->chip.ops = &crc_pwm_ops;
+ crc_pwm->chip.npwm = 1;
/* get the PMIC regmap */
- pwm->regmap = pmic->regmap;
+ crc_pwm->regmap = pmic->regmap;
- return devm_pwmchip_add(&pdev->dev, &pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, &crc_pwm->chip);
}
static struct platform_driver crystalcove_pwm_driver = {
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 4fbd23e4e..5fe303b86 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -18,14 +18,12 @@
/**
* struct cros_ec_pwm_device - Driver data for EC PWM
*
- * @dev: Device node
* @ec: Pointer to EC device
* @chip: PWM controller chip
* @use_pwm_type: Use PWM types instead of generic channels
* @channel: array with per-channel data
*/
struct cros_ec_pwm_device {
- struct device *dev;
struct cros_ec_device *ec;
struct pwm_chip chip;
bool use_pwm_type;
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index bd9cadb49..a4a057ae0 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -71,7 +71,6 @@ static void dwc_pwm_remove(struct pci_dev *pci)
pm_runtime_get_noresume(&pci->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int dwc_pwm_suspend(struct device *dev)
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
@@ -106,9 +105,8 @@ static int dwc_pwm_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume);
static const struct pci_device_id dwc_pwm_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x4bb7) }, /* Elkhart Lake */
@@ -122,7 +120,7 @@ static struct pci_driver dwc_pwm_driver = {
.remove = dwc_pwm_remove,
.id_table = dwc_pwm_id_table,
.driver = {
- .pm = &dwc_pwm_pm_ops,
+ .pm = pm_sleep_ptr(&dwc_pwm_pm_ops),
},
};
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 29dcf38f3..54dbafc51 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -13,9 +13,9 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -196,7 +196,7 @@ static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = img_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = img_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
@@ -260,7 +260,6 @@ static int img_pwm_probe(struct platform_device *pdev)
u64 val;
unsigned long clk_rate;
struct img_pwm_chip *imgchip;
- const struct of_device_id *of_dev_id;
imgchip = devm_kzalloc(&pdev->dev, sizeof(*imgchip), GFP_KERNEL);
if (!imgchip)
@@ -272,10 +271,7 @@ static int img_pwm_probe(struct platform_device *pdev)
if (IS_ERR(imgchip->base))
return PTR_ERR(imgchip->base);
- of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
- if (!of_dev_id)
- return -ENODEV;
- imgchip->data = of_dev_id->data;
+ imgchip->data = device_get_match_data(&pdev->dev);
imgchip->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"img,cr-periph");
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index dc6aafeb9..9fc290e64 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -371,7 +371,7 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
+static int pwm_imx_tpm_suspend(struct device *dev)
{
struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
@@ -390,7 +390,7 @@ static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused pwm_imx_tpm_resume(struct device *dev)
+static int pwm_imx_tpm_resume(struct device *dev)
{
struct imx_tpm_pwm_chip *tpm = dev_get_drvdata(dev);
int ret = 0;
@@ -402,8 +402,8 @@ static int __maybe_unused pwm_imx_tpm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(imx_tpm_pwm_pm,
- pwm_imx_tpm_suspend, pwm_imx_tpm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(imx_tpm_pwm_pm,
+ pwm_imx_tpm_suspend, pwm_imx_tpm_resume);
static const struct of_device_id imx_tpm_pwm_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-pwm", },
@@ -415,7 +415,7 @@ static struct platform_driver imx_tpm_pwm_driver = {
.driver = {
.name = "imx7ulp-tpm-pwm",
.of_match_table = imx_tpm_pwm_dt_ids,
- .pm = &imx_tpm_pwm_pm,
+ .pm = pm_ptr(&imx_tpm_pwm_pm),
},
.probe = pwm_imx_tpm_probe,
};
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index d0a0406b2..3933418e5 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -124,7 +124,7 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct jz4740_pwm_chip *jz = to_jz4740(pwm->chip);
+ struct jz4740_pwm_chip *jz = to_jz4740(chip);
unsigned long long tmp = 0xffffull * NSEC_PER_SEC;
struct clk *clk = jz->clk[pwm->hwpwm];
unsigned long period, duty;
@@ -150,7 +150,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
*/
rate = clk_round_rate(clk, tmp);
if (rate < 0) {
- dev_err(chip->dev, "Unable to round rate: %ld", rate);
+ dev_err(chip->dev, "Unable to round rate: %ld\n", rate);
return rate;
}
@@ -171,7 +171,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
err = clk_set_rate(clk, rate);
if (err) {
- dev_err(chip->dev, "Unable to set rate: %d", err);
+ dev_err(chip->dev, "Unable to set rate: %d\n", err);
return err;
}
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index ef7d0da13..fe891fa71 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -194,7 +194,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
- int requested_events, i;
+ int requested_events;
if (period_ns < lpc18xx_pwm->min_period_ns ||
period_ns > lpc18xx_pwm->max_period_ns) {
@@ -223,8 +223,6 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if ((requested_events <= 2 && lpc18xx_pwm->period_ns != period_ns) ||
!lpc18xx_pwm->period_ns) {
lpc18xx_pwm->period_ns = period_ns;
- for (i = 0; i < chip->npwm; i++)
- pwm_set_period(&chip->pwms[i], period_ns);
lpc18xx_pwm_config_period(chip, period_ns);
}
@@ -328,7 +326,7 @@ static int lpc18xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = lpc18xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = lpc18xx_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 78f664e41..1d9f3e7a2 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -103,7 +103,7 @@ static int lpc32xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = lpc32xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = lpc32xx_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 373abfd25..17d290f84 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -217,7 +217,7 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = pwm_mediatek_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = pwm_mediatek_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 5bea53243..2971bbf3b 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -468,10 +468,9 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
channel->mux.hw.init = &init;
err = devm_clk_hw_register(dev, &channel->mux.hw);
- if (err) {
- dev_err(dev, "failed to register %s: %d\n", name, err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to register %s\n", name);
snprintf(name, sizeof(name), "%s#div%u", dev_name(dev), i);
@@ -491,10 +490,9 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
channel->div.lock = &meson->lock;
err = devm_clk_hw_register(dev, &channel->div.hw);
- if (err) {
- dev_err(dev, "failed to register %s: %d\n", name, err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to register %s\n", name);
snprintf(name, sizeof(name), "%s#gate%u", dev_name(dev), i);
@@ -513,17 +511,13 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
channel->gate.lock = &meson->lock;
err = devm_clk_hw_register(dev, &channel->gate.hw);
- if (err) {
- dev_err(dev, "failed to register %s: %d\n", name, err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "failed to register %s\n", name);
channel->clk = devm_clk_hw_get_clk(dev, &channel->gate.hw, NULL);
- if (IS_ERR(channel->clk)) {
- err = PTR_ERR(channel->clk);
- dev_err(dev, "failed to register %s: %d\n", name, err);
- return err;
- }
+ if (IS_ERR(channel->clk))
+ return dev_err_probe(dev, PTR_ERR(channel->clk),
+ "failed to register %s\n", name);
}
return 0;
@@ -554,10 +548,9 @@ static int meson_pwm_probe(struct platform_device *pdev)
return err;
err = devm_pwmchip_add(&pdev->dev, &meson->chip);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to register PWM chip: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to register PWM chip\n");
return 0;
}
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 13161e08d..496bd73d2 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -37,7 +37,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <clocksource/timer-ti-dm.h>
@@ -55,7 +54,6 @@
* struct pwm_omap_dmtimer_chip - Structure representing a pwm chip
* corresponding to omap dmtimer.
* @chip: PWM chip structure representing PWM controller
- * @mutex: Mutex to protect pwm apply state
* @dm_timer: Pointer to omap dm timer.
* @pdata: Pointer to omap dm timer ops.
* @dm_timer_pdev: Pointer to omap dm timer platform device
@@ -63,7 +61,6 @@
struct pwm_omap_dmtimer_chip {
struct pwm_chip chip;
/* Mutex to protect pwm apply state */
- struct mutex mutex;
struct omap_dm_timer *dm_timer;
const struct omap_dm_timer_ops *pdata;
struct platform_device *dm_timer_pdev;
@@ -277,13 +274,11 @@ static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
const struct pwm_state *state)
{
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
- int ret = 0;
-
- mutex_lock(&omap->mutex);
+ int ret;
if (pwm_omap_dmtimer_is_enabled(omap) && !state->enabled) {
omap->pdata->stop(omap->dm_timer);
- goto unlock_mutex;
+ return 0;
}
if (pwm_omap_dmtimer_polarity(omap) != state->polarity)
@@ -292,7 +287,7 @@ static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
ret = pwm_omap_dmtimer_config(chip, pwm, state->duty_cycle,
state->period);
if (ret)
- goto unlock_mutex;
+ return ret;
if (!pwm_omap_dmtimer_is_enabled(omap) && state->enabled) {
omap->pdata->set_pwm(omap->dm_timer,
@@ -303,10 +298,7 @@ static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
pwm_omap_dmtimer_start(omap);
}
-unlock_mutex:
- mutex_unlock(&omap->mutex);
-
- return ret;
+ return 0;
}
static const struct pwm_ops pwm_omap_dmtimer_ops = {
@@ -404,8 +396,6 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
omap->chip.ops = &pwm_omap_dmtimer_ops;
omap->chip.npwm = 1;
- mutex_init(&omap->mutex);
-
ret = pwmchip_add(&omap->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM\n");
@@ -452,8 +442,6 @@ static void pwm_omap_dmtimer_remove(struct platform_device *pdev)
omap->pdata->free(omap->dm_timer);
put_device(&omap->dm_timer_pdev->dev);
-
- mutex_destroy(&omap->mutex);
}
static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 4239f2c3e..28265fdfc 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -416,7 +415,7 @@ static int tpu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = tpu_pwm_config(pwm->chip, pwm,
+ err = tpu_pwm_config(chip, pwm,
state->duty_cycle, state->period, enabled);
if (err)
return err;
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index cce4381e1..a7c647e37 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -10,8 +10,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/time.h>
@@ -296,16 +296,11 @@ MODULE_DEVICE_TABLE(of, rockchip_pwm_dt_ids);
static int rockchip_pwm_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
struct rockchip_pwm_chip *pc;
u32 enable_conf, ctrl;
bool enabled;
int ret, count;
- id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
- if (!id)
- return -EINVAL;
-
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
return -ENOMEM;
@@ -344,7 +339,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pc);
- pc->data = id->data;
+ pc->data = device_get_match_data(&pdev->dev);
pc->chip.dev = &pdev->dev;
pc->chip.ops = &rockchip_pwm_ops;
pc->chip.npwm = 1;
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 69d9f4577..6e77302f7 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -620,7 +620,6 @@ static void pwm_samsung_remove(struct platform_device *pdev)
clk_disable_unprepare(our_chip->base_clk);
}
-#ifdef CONFIG_PM_SLEEP
static int pwm_samsung_resume(struct device *dev)
{
struct samsung_pwm_chip *our_chip = dev_get_drvdata(dev);
@@ -653,14 +652,13 @@ static int pwm_samsung_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(pwm_samsung_pm_ops, NULL, pwm_samsung_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pwm_samsung_pm_ops, NULL, pwm_samsung_resume);
static struct platform_driver pwm_samsung_driver = {
.driver = {
.name = "samsung-pwm",
- .pm = &pwm_samsung_pm_ops,
+ .pm = pm_ptr(&pwm_samsung_pm_ops),
.of_match_table = of_match_ptr(samsung_pwm_matches),
},
.probe = pwm_samsung_probe,
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index d885b30a1..69b1113c6 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -416,7 +416,7 @@ static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = sti_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = sti_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index b67974cc1..439068f3e 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -218,7 +218,7 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused stm32_pwm_lp_suspend(struct device *dev)
+static int stm32_pwm_lp_suspend(struct device *dev)
{
struct stm32_pwm_lp *priv = dev_get_drvdata(dev);
struct pwm_state state;
@@ -233,13 +233,13 @@ static int __maybe_unused stm32_pwm_lp_suspend(struct device *dev)
return pinctrl_pm_select_sleep_state(dev);
}
-static int __maybe_unused stm32_pwm_lp_resume(struct device *dev)
+static int stm32_pwm_lp_resume(struct device *dev)
{
return pinctrl_pm_select_default_state(dev);
}
-static SIMPLE_DEV_PM_OPS(stm32_pwm_lp_pm_ops, stm32_pwm_lp_suspend,
- stm32_pwm_lp_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stm32_pwm_lp_pm_ops, stm32_pwm_lp_suspend,
+ stm32_pwm_lp_resume);
static const struct of_device_id stm32_pwm_lp_of_match[] = {
{ .compatible = "st,stm32-pwm-lp", },
@@ -252,7 +252,7 @@ static struct platform_driver stm32_pwm_lp_driver = {
.driver = {
.name = "stm32-pwm-lp",
.of_match_table = stm32_pwm_lp_of_match,
- .pm = &stm32_pwm_lp_pm_ops,
+ .pm = pm_ptr(&stm32_pwm_lp_pm_ops),
},
};
module_platform_driver(stm32_pwm_lp_driver);
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 58ece15ef..5f10cba49 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -52,21 +52,6 @@ static u32 active_channels(struct stm32_pwm *dev)
return ccer & TIM_CCER_CCXE;
}
-static int write_ccrx(struct stm32_pwm *dev, int ch, u32 value)
-{
- switch (ch) {
- case 0:
- return regmap_write(dev->regmap, TIM_CCR1, value);
- case 1:
- return regmap_write(dev->regmap, TIM_CCR2, value);
- case 2:
- return regmap_write(dev->regmap, TIM_CCR3, value);
- case 3:
- return regmap_write(dev->regmap, TIM_CCR4, value);
- }
- return -EINVAL;
-}
-
#define TIM_CCER_CC12P (TIM_CCER_CC1P | TIM_CCER_CC2P)
#define TIM_CCER_CC12E (TIM_CCER_CC1E | TIM_CCER_CC2E)
#define TIM_CCER_CC34P (TIM_CCER_CC3P | TIM_CCER_CC4P)
@@ -323,7 +308,7 @@ unlock:
return ret;
}
-static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
+static int stm32_pwm_config(struct stm32_pwm *priv, unsigned int ch,
int duty_ns, int period_ns)
{
unsigned long long prd, div, dty;
@@ -369,7 +354,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
dty = prd * duty_ns;
do_div(dty, period_ns);
- write_ccrx(priv, ch, dty);
+ regmap_write(priv->regmap, TIM_CCR1 + 4 * ch, dty);
/* Configure output mode */
shift = (ch & 0x1) * CCMR_CHANNEL_SHIFT;
@@ -386,7 +371,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch,
return 0;
}
-static int stm32_pwm_set_polarity(struct stm32_pwm *priv, int ch,
+static int stm32_pwm_set_polarity(struct stm32_pwm *priv, unsigned int ch,
enum pwm_polarity polarity)
{
u32 mask;
@@ -401,7 +386,7 @@ static int stm32_pwm_set_polarity(struct stm32_pwm *priv, int ch,
return 0;
}
-static int stm32_pwm_enable(struct stm32_pwm *priv, int ch)
+static int stm32_pwm_enable(struct stm32_pwm *priv, unsigned int ch)
{
u32 mask;
int ret;
@@ -426,7 +411,7 @@ static int stm32_pwm_enable(struct stm32_pwm *priv, int ch)
return 0;
}
-static void stm32_pwm_disable(struct stm32_pwm *priv, int ch)
+static void stm32_pwm_disable(struct stm32_pwm *priv, unsigned int ch)
{
u32 mask;
@@ -486,8 +471,50 @@ static int stm32_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
}
+static int stm32_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm, struct pwm_state *state)
+{
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
+ int ch = pwm->hwpwm;
+ unsigned long rate;
+ u32 ccer, psc, arr, ccr;
+ u64 dty, prd;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_read(priv->regmap, TIM_CCER, &ccer);
+ if (ret)
+ goto out;
+
+ state->enabled = ccer & (TIM_CCER_CC1E << (ch * 4));
+ state->polarity = (ccer & (TIM_CCER_CC1P << (ch * 4))) ?
+ PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
+ ret = regmap_read(priv->regmap, TIM_PSC, &psc);
+ if (ret)
+ goto out;
+ ret = regmap_read(priv->regmap, TIM_ARR, &arr);
+ if (ret)
+ goto out;
+ ret = regmap_read(priv->regmap, TIM_CCR1 + 4 * ch, &ccr);
+ if (ret)
+ goto out;
+
+ rate = clk_get_rate(priv->clk);
+
+ prd = (u64)NSEC_PER_SEC * (psc + 1) * (arr + 1);
+ state->period = DIV_ROUND_UP_ULL(prd, rate);
+ dty = (u64)NSEC_PER_SEC * (psc + 1) * ccr;
+ state->duty_cycle = DIV_ROUND_UP_ULL(dty, rate);
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
static const struct pwm_ops stm32pwm_ops = {
.apply = stm32_pwm_apply_locked,
+ .get_state = stm32_pwm_get_state,
.capture = IS_ENABLED(CONFIG_DMA_ENGINE) ? stm32_pwm_capture : NULL,
};
@@ -642,7 +669,7 @@ static int stm32_pwm_probe(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused stm32_pwm_suspend(struct device *dev)
+static int stm32_pwm_suspend(struct device *dev)
{
struct stm32_pwm *priv = dev_get_drvdata(dev);
unsigned int i;
@@ -663,7 +690,7 @@ static int __maybe_unused stm32_pwm_suspend(struct device *dev)
return pinctrl_pm_select_sleep_state(dev);
}
-static int __maybe_unused stm32_pwm_resume(struct device *dev)
+static int stm32_pwm_resume(struct device *dev)
{
struct stm32_pwm *priv = dev_get_drvdata(dev);
int ret;
@@ -676,7 +703,7 @@ static int __maybe_unused stm32_pwm_resume(struct device *dev)
return stm32_pwm_apply_breakinputs(priv);
}
-static SIMPLE_DEV_PM_OPS(stm32_pwm_pm_ops, stm32_pwm_suspend, stm32_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stm32_pwm_pm_ops, stm32_pwm_suspend, stm32_pwm_resume);
static const struct of_device_id stm32_pwm_of_match[] = {
{ .compatible = "st,stm32-pwm", },
@@ -689,7 +716,7 @@ static struct platform_driver stm32_pwm_driver = {
.driver = {
.name = "stm32-pwm",
.of_match_table = stm32_pwm_of_match,
- .pm = &stm32_pwm_pm_ops,
+ .pm = pm_ptr(&stm32_pwm_pm_ops),
},
};
module_platform_driver(stm32_pwm_driver);
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index a46f5b4dd..19c0c0f39 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -44,7 +44,7 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
if (ret < 0) {
- dev_err(chip->dev, "error reading PWM#%u control\n",
+ dev_dbg(chip->dev, "error reading PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -53,7 +53,7 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
if (ret) {
- dev_err(chip->dev, "error writing PWM#%u control\n",
+ dev_dbg(chip->dev, "error writing PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -70,7 +70,7 @@ static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
if (ret < 0) {
- dev_err(chip->dev, "error reading PWM#%u control\n",
+ dev_dbg(chip->dev, "error reading PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -79,7 +79,7 @@ static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
if (ret)
- dev_err(chip->dev, "error writing PWM#%u control\n",
+ dev_dbg(chip->dev, "error writing PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -233,7 +233,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
if (ret) {
- dev_err(chip->dev, "error writing register %02x: %d\n",
+ dev_dbg(chip->dev, "error writing register %02x: %d\n",
offset, ret);
return ret;
}
@@ -242,7 +242,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
if (ret) {
- dev_err(chip->dev, "error writing register %02x: %d\n",
+ dev_dbg(chip->dev, "error writing register %02x: %d\n",
offset, ret);
return ret;
}
@@ -275,7 +275,7 @@ static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = stmpe_24xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = stmpe_24xx_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 39ea51e08..82ee2f075 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -256,7 +256,7 @@ static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = tegra_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = tegra_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 11e3549cf..d974f4414 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -269,7 +269,6 @@ static void ecap_pwm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-#ifdef CONFIG_PM_SLEEP
static void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
{
pm_runtime_get_sync(pc->chip.dev);
@@ -312,15 +311,14 @@ static int ecap_pwm_resume(struct device *dev)
ecap_pwm_restore_context(pc);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ecap_pwm_pm_ops, ecap_pwm_suspend, ecap_pwm_resume);
static struct platform_driver ecap_pwm_driver = {
.driver = {
.name = "ecap",
.of_match_table = ecap_of_match,
- .pm = &ecap_pwm_pm_ops,
+ .pm = pm_ptr(&ecap_pwm_pm_ops),
},
.probe = ecap_pwm_probe,
.remove_new = ecap_pwm_remove,
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 66ac26558..af231fa74 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -521,7 +521,6 @@ static void ehrpwm_pwm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-#ifdef CONFIG_PM_SLEEP
static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
{
pm_runtime_get_sync(pc->chip.dev);
@@ -589,16 +588,15 @@ static int ehrpwm_pwm_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend,
- ehrpwm_pwm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ehrpwm_pwm_pm_ops, ehrpwm_pwm_suspend,
+ ehrpwm_pwm_resume);
static struct platform_driver ehrpwm_pwm_driver = {
.driver = {
.name = "ehrpwm",
.of_match_table = ehrpwm_of_match,
- .pm = &ehrpwm_pwm_pm_ops,
+ .pm = pm_ptr(&ehrpwm_pwm_pm_ops),
},
.probe = ehrpwm_pwm_probe,
.remove_new = ehrpwm_pwm_remove,
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index 625233f47..c670ccb81 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -172,10 +172,10 @@ static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* We cannot skip calling ->config even if state->period ==
* pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
* because we might have exited early in the last call to
- * pwm_apply_state because of !state->enabled and so the two values in
+ * pwm_apply_might_sleep because of !state->enabled and so the two values in
* pwm->state might not be configured in hardware.
*/
- ret = twl4030_pwmled_config(pwm->chip, pwm,
+ ret = twl4030_pwmled_config(chip, pwm,
state->duty_cycle, state->period);
if (ret)
return ret;
@@ -275,7 +275,7 @@ static int twl6030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = twl6030_pwmled_config(pwm->chip, pwm,
+ err = twl6030_pwmled_config(chip, pwm,
state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index 603d31f27..68e02c9a6 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -294,7 +294,7 @@ static int twl4030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = twl_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
@@ -319,7 +319,7 @@ static int twl6030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = twl_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 5568d5312..7bfeacee0 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -206,10 +206,10 @@ static int vt8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* We cannot skip calling ->config even if state->period ==
* pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
* because we might have exited early in the last call to
- * pwm_apply_state because of !state->enabled and so the two values in
+ * pwm_apply_might_sleep because of !state->enabled and so the two values in
* pwm->state might not be configured in hardware.
*/
- err = vt8500_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ err = vt8500_pwm_config(chip, pwm, state->duty_cycle, state->period);
if (err)
return err;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 8d1254761..1698609d9 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -62,7 +62,7 @@ static ssize_t period_store(struct device *child,
mutex_lock(&export->lock);
pwm_get_state(pwm, &state);
state.period = val;
- ret = pwm_apply_state(pwm, &state);
+ ret = pwm_apply_might_sleep(pwm, &state);
mutex_unlock(&export->lock);
return ret ? : size;
@@ -97,7 +97,7 @@ static ssize_t duty_cycle_store(struct device *child,
mutex_lock(&export->lock);
pwm_get_state(pwm, &state);
state.duty_cycle = val;
- ret = pwm_apply_state(pwm, &state);
+ ret = pwm_apply_might_sleep(pwm, &state);
mutex_unlock(&export->lock);
return ret ? : size;
@@ -144,7 +144,7 @@ static ssize_t enable_store(struct device *child,
goto unlock;
}
- ret = pwm_apply_state(pwm, &state);
+ ret = pwm_apply_might_sleep(pwm, &state);
unlock:
mutex_unlock(&export->lock);
@@ -194,7 +194,7 @@ static ssize_t polarity_store(struct device *child,
mutex_lock(&export->lock);
pwm_get_state(pwm, &state);
state.polarity = polarity;
- ret = pwm_apply_state(pwm, &state);
+ ret = pwm_apply_might_sleep(pwm, &state);
mutex_unlock(&export->lock);
return ret ? : size;
@@ -401,7 +401,7 @@ static int pwm_class_apply_state(struct pwm_export *export,
struct pwm_device *pwm,
struct pwm_state *state)
{
- int ret = pwm_apply_state(pwm, state);
+ int ret = pwm_apply_might_sleep(pwm, state);
/* release lock taken in pwm_class_get_state */
mutex_unlock(&export->lock);
@@ -510,7 +510,7 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
* the kernel it's just not exported.
*/
parent = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
- "pwmchip%d", chip->base);
+ "pwmchip%d", chip->id);
if (IS_ERR(parent)) {
dev_warn(chip->dev,
"device_create failed for pwm_chip sysfs export\n");
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 83323c3d1..4b84270a8 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -51,8 +51,9 @@ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
* @len: Length (in bytes) of the maintenance transaction
* @data: Value to be read into
*
- * Generates a local SREP space read. Returns %0 on
- * success or %-EINVAL on failure.
+ * Generates a local SREP space read.
+ *
+ * Returns: %0 on success or %-EINVAL on failure.
*/
static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
int len, u32 *data)
@@ -75,8 +76,9 @@ static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
* @len: Length (in bytes) of the maintenance transaction
* @data: Value to be written
*
- * Generates a local write into SREP configuration space. Returns %0 on
- * success or %-EINVAL on failure.
+ * Generates a local write into SREP configuration space.
+ *
+ * Returns: %0 on success or %-EINVAL on failure.
*/
static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
int len, u32 data)
@@ -104,7 +106,7 @@ static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
* @do_wr: Operation flag (1 == MAINT_WR)
*
* Generates a RapidIO maintenance transaction (Read or Write).
- * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ * Returns: %0 on success and %-EINVAL or %-EFAULT on failure.
*/
static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
u16 destid, u8 hopcount, u32 offset, int len,
@@ -205,10 +207,10 @@ err_out:
* @hopcount: Number of hops to target device
* @offset: Offset into configuration space
* @len: Length (in bytes) of the maintenance transaction
- * @val: Location to be read into
+ * @data: Location to be read into
*
* Generates a RapidIO maintenance read transaction.
- * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ * Returns: %0 on success and %-EINVAL or %-EFAULT on failure.
*/
static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
u8 hopcount, u32 offset, int len, u32 *data)
@@ -228,10 +230,10 @@ static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
* @hopcount: Number of hops to target device
* @offset: Offset into configuration space
* @len: Length (in bytes) of the maintenance transaction
- * @val: Value to be written
+ * @data: Value to be written
*
* Generates a RapidIO maintenance write transaction.
- * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
+ * Returns: %0 on success and %-EINVAL or %-EFAULT on failure.
*/
static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
u8 hopcount, u32 offset, int len, u32 data)
@@ -250,6 +252,8 @@ static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
* Handles inbound port-write interrupts. Copies PW message from an internal
* buffer into PW message FIFO and schedules deferred routine to process
* queued messages.
+ *
+ * Returns: %0
*/
static int
tsi721_pw_handler(struct tsi721_device *priv)
@@ -307,6 +311,8 @@ static void tsi721_pw_dpc(struct work_struct *work)
* tsi721_pw_enable - enable/disable port-write interface init
* @mport: Master port implementing the port write unit
* @enable: 1=enable; 0=disable port-write message handling
+ *
+ * Returns: %0
*/
static int tsi721_pw_enable(struct rio_mport *mport, int enable)
{
@@ -336,7 +342,9 @@ static int tsi721_pw_enable(struct rio_mport *mport, int enable)
* @destid: Destination ID of target device
* @data: 16-bit info field of RapidIO doorbell
*
- * Sends a RapidIO doorbell message. Always returns %0.
+ * Sends a RapidIO doorbell message.
+ *
+ * Returns: %0
*/
static int tsi721_dsend(struct rio_mport *mport, int index,
u16 destid, u16 data)
@@ -361,6 +369,8 @@ static int tsi721_dsend(struct rio_mport *mport, int index,
* Handles inbound doorbell interrupts. Copies doorbell entry from an internal
* buffer into DB message FIFO and schedules deferred routine to process
* queued DBs.
+ *
+ * Returns: %0
*/
static int
tsi721_dbell_handler(struct tsi721_device *priv)
@@ -453,6 +463,8 @@ static void tsi721_db_dpc(struct work_struct *work)
*
* Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
* interrupt events and calls an event-specific handler(s).
+ *
+ * Returns: %IRQ_HANDLED or %IRQ_NONE
*/
static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
{
@@ -607,6 +619,8 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
* @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles outbound messaging interrupts signaled using MSI-X.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
{
@@ -624,6 +638,8 @@ static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
* @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles inbound messaging interrupts signaled using MSI-X.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
{
@@ -641,6 +657,8 @@ static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
* @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles Tsi721 interrupts from SRIO MAC.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
{
@@ -663,6 +681,8 @@ static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
* Handles Tsi721 interrupts from SR2PC Channel.
* NOTE: At this moment services only one SR2PC channel associated with inbound
* doorbells.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
{
@@ -689,6 +709,8 @@ static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
* Registers MSI-X interrupt service routines for interrupts that are active
* immediately after mport initialization. Messaging interrupt service routines
* should be registered during corresponding open requests.
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_request_msix(struct tsi721_device *priv)
{
@@ -717,6 +739,8 @@ static int tsi721_request_msix(struct tsi721_device *priv)
*
* Configures MSI-X support for Tsi721. Supports only an exact number
* of requested vectors.
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_enable_msix(struct tsi721_device *priv)
{
@@ -1334,7 +1358,7 @@ static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv)
* @priv: pointer to tsi721 private data
*
* Initializes inbound port write handler.
- * Returns %0 on success or %-ENOMEM on failure.
+ * Returns: %0 on success or %-ENOMEM on failure.
*/
static int tsi721_port_write_init(struct tsi721_device *priv)
{
@@ -1412,7 +1436,8 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
*
* Initialize BDMA channel allocated for RapidIO maintenance read/write
* request generation
- * Returns %0 on success or %-ENOMEM on failure.
+ *
+ * Returns: %0 on success or %-ENOMEM on failure.
*/
static int tsi721_bdma_maint_init(struct tsi721_device *priv)
{
@@ -1662,6 +1687,8 @@ tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
* @mbox: Outbound mailbox
* @buffer: Message to add to outbound queue
* @len: Length of message
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int
tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
@@ -1869,6 +1896,8 @@ no_sts_update:
* @dev_id: Device specific pointer to pass on event
* @mbox: Mailbox to open
* @entries: Number of entries in the outbound mailbox ring
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
int mbox, int entries)
@@ -2156,6 +2185,8 @@ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
* @dev_id: Device specific pointer to pass on event
* @mbox: Mailbox to open
* @entries: Number of entries in the inbound mailbox ring
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
int mbox, int entries)
@@ -2409,6 +2440,8 @@ static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
* @mport: Master port implementing the Inbound Messaging Engine
* @mbox: Inbound mailbox number
* @buf: Buffer to add to inbound queue
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
{
@@ -2439,7 +2472,7 @@ out:
* @mport: Master port implementing the Inbound Messaging Engine
* @mbox: Inbound mailbox number
*
- * Returns pointer to the message on success or NULL on failure.
+ * Returns: pointer to the message on success or %NULL on failure.
*/
static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
{
@@ -2507,6 +2540,8 @@ out:
* @priv: pointer to tsi721 private data
*
* Configures Tsi721 messaging engine.
+ *
+ * Returns: %0
*/
static int tsi721_messages_init(struct tsi721_device *priv)
{
@@ -2539,9 +2574,9 @@ static int tsi721_messages_init(struct tsi721_device *priv)
/**
* tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue
* @mport: Master port implementing the Inbound Messaging Engine
- * @mbox: Inbound mailbox number
+ * @attr: mport device attributes
*
- * Returns pointer to the message on success or NULL on failure.
+ * Returns: pointer to the message on success or %NULL on failure.
*/
static int tsi721_query_mport(struct rio_mport *mport,
struct rio_mport_attr *attr)
@@ -2653,6 +2688,8 @@ static void tsi721_mport_release(struct device *dev)
* @priv: pointer to tsi721 private data
*
* Configures Tsi721 as RapidIO master port.
+ *
+ * Returns: %0 on success or -errno value on failure.
*/
static int tsi721_setup_mport(struct tsi721_device *priv)
{
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index d375c0205..f77f75172 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -283,11 +283,13 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
#ifdef CONFIG_PCI_MSI
/**
- * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * tsi721_bdma_msix - MSI-X interrupt handler for BDMA channels
* @irq: Linux interrupt number
* @ptr: Pointer to interrupt-specific data (BDMA channel structure)
*
* Handles BDMA channel interrupts signaled using MSI-X.
+ *
+ * Returns: %IRQ_HANDLED
*/
static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
{
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f3ec24691..550145f82 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -56,6 +56,16 @@ config REGULATOR_USERSPACE_CONSUMER
If unsure, say no.
+config REGULATOR_NETLINK_EVENTS
+ bool "Enable support for receiving regulator events via netlink"
+ depends on NET
+ help
+ Enabling this option allows the kernel to broadcast regulator events using
+ the netlink mechanism. User-space applications can subscribe to these events
+ for real-time updates on various regulator events.
+
+ If unsure, say no.
+
config REGULATOR_88PG86X
tristate "Marvell 88PG86X voltage regulators"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b2b059b5e..46fb569e6 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o devres.o irq_helpers.o
+obj-$(CONFIG_REGULATOR_NETLINK_EVENTS) += event.o
obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index b465c0010..4b54068d4 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -339,14 +339,12 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
return ret;
}
-static int arizona_ldo1_remove(struct platform_device *pdev)
+static void arizona_ldo1_remove(struct platform_device *pdev)
{
struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
if (ldo1->ena_gpiod)
gpiod_put(ldo1->ena_gpiod);
-
- return 0;
}
static int madera_ldo1_probe(struct platform_device *pdev)
@@ -377,7 +375,7 @@ static int madera_ldo1_probe(struct platform_device *pdev)
static struct platform_driver arizona_ldo1_driver = {
.probe = arizona_ldo1_probe,
- .remove = arizona_ldo1_remove,
+ .remove_new = arizona_ldo1_remove,
.driver = {
.name = "arizona-ldo1",
.probe_type = PROBE_FORCE_SYNCHRONOUS,
@@ -386,7 +384,7 @@ static struct platform_driver arizona_ldo1_driver = {
static struct platform_driver madera_ldo1_driver = {
.probe = madera_ldo1_probe,
- .remove = arizona_ldo1_remove,
+ .remove_new = arizona_ldo1_remove,
.driver = {
.name = "madera-ldo1",
.probe_type = PROBE_FORCE_SYNCHRONOUS,
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index d469481d8..c7ceba56e 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -260,10 +260,9 @@ static const struct dev_pm_ops bd9571mwv_pm = {
SET_SYSTEM_SLEEP_PM_OPS(bd9571mwv_suspend, bd9571mwv_resume)
};
-static int bd9571mwv_regulator_remove(struct platform_device *pdev)
+static void bd9571mwv_regulator_remove(struct platform_device *pdev)
{
device_remove_file(&pdev->dev, &dev_attr_backup_mode);
- return 0;
}
#define DEV_PM_OPS &bd9571mwv_pm
#else
@@ -357,7 +356,7 @@ static struct platform_driver bd9571mwv_regulator_driver = {
.pm = DEV_PM_OPS,
},
.probe = bd9571mwv_regulator_probe,
- .remove = bd9571mwv_regulator_remove,
+ .remove_new = bd9571mwv_regulator_remove,
.id_table = bd9571mwv_regulator_id_table,
};
module_platform_driver(bd9571mwv_regulator_driver);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a7b3e548e..a968dabb4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/consumer.h>
@@ -32,6 +33,7 @@
#include "dummy.h"
#include "internal.h"
+#include "regnl.h"
static DEFINE_WW_CLASS(regulator_ww_class);
static DEFINE_MUTEX(regulator_nesting_mutex);
@@ -4853,7 +4855,23 @@ static int _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
/* call rdev chain first */
- return blocking_notifier_call_chain(&rdev->notifier, event, data);
+ int ret = blocking_notifier_call_chain(&rdev->notifier, event, data);
+
+ if (IS_REACHABLE(CONFIG_REGULATOR_NETLINK_EVENTS)) {
+ struct device *parent = rdev->dev.parent;
+ const char *rname = rdev_get_name(rdev);
+ char name[32];
+
+ /* Avoid duplicate debugfs directory names */
+ if (parent && rname == rdev->desc->name) {
+ snprintf(name, sizeof(name), "%s-%s", dev_name(parent),
+ rname);
+ rname = name;
+ }
+ reg_generate_netlink_event(rname, event);
+ }
+
+ return ret;
}
int _regulator_bulk_get(struct device *dev, int num_consumers,
@@ -5066,6 +5084,41 @@ void regulator_bulk_free(int num_consumers,
EXPORT_SYMBOL_GPL(regulator_bulk_free);
/**
+ * regulator_handle_critical - Handle events for system-critical regulators.
+ * @rdev: The regulator device.
+ * @event: The event being handled.
+ *
+ * This function handles critical events such as under-voltage, over-current,
+ * and unknown errors for regulators deemed system-critical. On detecting such
+ * events, it triggers a hardware protection shutdown with a defined timeout.
+ */
+static void regulator_handle_critical(struct regulator_dev *rdev,
+ unsigned long event)
+{
+ const char *reason = NULL;
+
+ if (!rdev->constraints->system_critical)
+ return;
+
+ switch (event) {
+ case REGULATOR_EVENT_UNDER_VOLTAGE:
+ reason = "System critical regulator: voltage drop detected";
+ break;
+ case REGULATOR_EVENT_OVER_CURRENT:
+ reason = "System critical regulator: over-current detected";
+ break;
+ case REGULATOR_EVENT_FAIL:
+ reason = "System critical regulator: unknown error";
+ }
+
+ if (!reason)
+ return;
+
+ hw_protection_shutdown(reason,
+ rdev->constraints->uv_less_critical_window_ms);
+}
+
+/**
* regulator_notifier_call_chain - call regulator event notifier
* @rdev: regulator source
* @event: notifier block
@@ -5077,6 +5130,8 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
+ regulator_handle_critical(rdev, event);
+
_notifier_call_chain(rdev, event, data);
return NOTIFY_DONE;
@@ -6238,6 +6293,14 @@ unlock:
return 0;
}
+static bool regulator_ignore_unused;
+static int __init regulator_ignore_unused_setup(char *__unused)
+{
+ regulator_ignore_unused = true;
+ return 1;
+}
+__setup("regulator_ignore_unused", regulator_ignore_unused_setup);
+
static void regulator_init_complete_work_function(struct work_struct *work)
{
/*
@@ -6250,6 +6313,15 @@ static void regulator_init_complete_work_function(struct work_struct *work)
class_for_each_device(&regulator_class, NULL, NULL,
regulator_register_resolve_supply);
+ /*
+ * For debugging purposes, it may be useful to prevent unused
+ * regulators from being disabled.
+ */
+ if (regulator_ignore_unused) {
+ pr_warn("regulator: Not disabling unused regulators\n");
+ return;
+ }
+
/* If we have a full configuration then disable any regulators
* we have permission to change the status for and which are
* not in use or always_on. This is effectively the default
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 34c5e485d..1e2d54da1 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -469,11 +469,9 @@ static int db8500_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int db8500_regulator_remove(struct platform_device *pdev)
+static void db8500_regulator_remove(struct platform_device *pdev)
{
ux500_regulator_debug_exit();
-
- return 0;
}
static struct platform_driver db8500_regulator_driver = {
@@ -482,7 +480,7 @@ static struct platform_driver db8500_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = db8500_regulator_probe,
- .remove = db8500_regulator_remove,
+ .remove_new = db8500_regulator_remove,
};
static int __init db8500_regulator_init(void)
diff --git a/drivers/regulator/event.c b/drivers/regulator/event.c
new file mode 100644
index 000000000..ea3bd4954
--- /dev/null
+++ b/drivers/regulator/event.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Regulator event over netlink
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#include <regulator/regulator.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <linux/atomic.h>
+
+#include "regnl.h"
+
+static atomic_t reg_event_seqnum = ATOMIC_INIT(0);
+
+static const struct genl_multicast_group reg_event_mcgrps[] = {
+ { .name = REG_GENL_MCAST_GROUP_NAME, },
+};
+
+static struct genl_family reg_event_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
+ .name = REG_GENL_FAMILY_NAME,
+ .version = REG_GENL_VERSION,
+ .maxattr = REG_GENL_ATTR_MAX,
+ .mcgrps = reg_event_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(reg_event_mcgrps),
+};
+
+int reg_generate_netlink_event(const char *reg_name, u64 event)
+{
+ struct sk_buff *skb;
+ struct nlattr *attr;
+ struct reg_genl_event *edata;
+ void *msg_header;
+ int size;
+
+ /* allocate memory */
+ size = nla_total_size(sizeof(struct reg_genl_event)) +
+ nla_total_size(0);
+
+ skb = genlmsg_new(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ /* add the genetlink message header */
+ msg_header = genlmsg_put(skb, 0, atomic_inc_return(&reg_event_seqnum),
+ &reg_event_genl_family, 0, REG_GENL_CMD_EVENT);
+ if (!msg_header) {
+ nlmsg_free(skb);
+ return -ENOMEM;
+ }
+
+ /* fill the data */
+ attr = nla_reserve(skb, REG_GENL_ATTR_EVENT, sizeof(struct reg_genl_event));
+ if (!attr) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ edata = nla_data(attr);
+ memset(edata, 0, sizeof(struct reg_genl_event));
+
+ strscpy(edata->reg_name, reg_name, sizeof(edata->reg_name));
+ edata->event = event;
+
+ /* send multicast genetlink message */
+ genlmsg_end(skb, msg_header);
+ size = genlmsg_multicast(&reg_event_genl_family, skb, 0, 0, GFP_ATOMIC);
+
+ return size;
+}
+
+static int __init reg_event_genetlink_init(void)
+{
+ return genl_register_family(&reg_event_genl_family);
+}
+
+static int __init reg_event_init(void)
+{
+ int error;
+
+ /* create genetlink for acpi event */
+ error = reg_event_genetlink_init();
+ if (error)
+ pr_warn("Failed to create genetlink family for reg event\n");
+
+ return 0;
+}
+
+fs_initcall(reg_event_init);
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 1b65e5e4e..03afc160f 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -131,6 +131,8 @@ static int of_get_regulation_constraints(struct device *dev,
constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
constraints->pull_down = of_property_read_bool(np, "regulator-pull-down");
+ constraints->system_critical = of_property_read_bool(np,
+ "system-critical-regulator");
if (of_property_read_bool(np, "regulator-allow-bypass"))
constraints->valid_ops_mask |= REGULATOR_CHANGE_BYPASS;
@@ -173,6 +175,13 @@ static int of_get_regulation_constraints(struct device *dev,
if (!ret)
constraints->enable_time = pval;
+ ret = of_property_read_u32(np, "regulator-uv-survival-time-ms", &pval);
+ if (!ret)
+ constraints->uv_less_critical_window_ms = pval;
+ else
+ constraints->uv_less_critical_window_ms =
+ REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS;
+
constraints->soft_start = of_property_read_bool(np,
"regulator-soft-start");
ret = of_property_read_u32(np, "regulator-active-discharge", &pval);
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index e0dc033aa..60656a815 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1594,7 +1594,7 @@ static const struct of_device_id of_palmas_match_tbl[] = {
static int palmas_regulators_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
- struct palmas_pmic_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct palmas_pmic_platform_data *pdata;
struct device_node *node = pdev->dev.of_node;
struct palmas_pmic_driver_data *driver_data;
struct regulator_config config = { };
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index e33d10df7..60cfcd741 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -90,7 +90,7 @@ static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
pwm_set_relative_duty_cycle(&pstate,
drvdata->duty_cycle_table[selector].dutycycle, 100);
- ret = pwm_apply_state(drvdata->pwm, &pstate);
+ ret = pwm_apply_might_sleep(drvdata->pwm, &pstate);
if (ret) {
dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -157,6 +157,13 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
pwm_get_state(drvdata->pwm, &pstate);
+ if (!pstate.enabled) {
+ if (pstate.polarity == PWM_POLARITY_INVERSED)
+ pstate.duty_cycle = pstate.period;
+ else
+ pstate.duty_cycle = 0;
+ }
+
voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
if (voltage < min(max_uV_duty, min_uV_duty) ||
voltage > max(max_uV_duty, min_uV_duty))
@@ -219,7 +226,7 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
pwm_set_relative_duty_cycle(&pstate, dutycycle, duty_unit);
- ret = pwm_apply_state(drvdata->pwm, &pstate);
+ ret = pwm_apply_might_sleep(drvdata->pwm, &pstate);
if (ret) {
dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -316,6 +323,32 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev,
return 0;
}
+static int pwm_regulator_init_boot_on(struct platform_device *pdev,
+ struct pwm_regulator_data *drvdata,
+ const struct regulator_init_data *init_data)
+{
+ struct pwm_state pstate;
+
+ if (!init_data->constraints.boot_on || drvdata->enb_gpio)
+ return 0;
+
+ pwm_get_state(drvdata->pwm, &pstate);
+ if (pstate.enabled)
+ return 0;
+
+ /*
+ * Update the duty cycle so the output does not change
+ * when the regulator core enables the regulator (and
+ * thus the PWM channel).
+ */
+ if (pstate.polarity == PWM_POLARITY_INVERSED)
+ pstate.duty_cycle = pstate.period;
+ else
+ pstate.duty_cycle = 0;
+
+ return pwm_apply_might_sleep(drvdata->pwm, &pstate);
+}
+
static int pwm_regulator_probe(struct platform_device *pdev)
{
const struct regulator_init_data *init_data;
@@ -375,6 +408,13 @@ static int pwm_regulator_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = pwm_regulator_init_boot_on(pdev, drvdata, init_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to apply boot_on settings: %d\n",
+ ret);
+ return ret;
+ }
+
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
if (IS_ERR(regulator)) {
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index cf502eec0..80e304711 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+// Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
#define pr_fmt(fmt) "%s: " fmt, __func__
@@ -68,10 +69,11 @@ enum rpmh_regulator_type {
* @regulator_type: RPMh accelerator type used to manage this
* regulator
* @ops: Pointer to regulator ops callback structure
- * @voltage_range: The single range of voltages supported by this
- * PMIC regulator type
+ * @voltage_ranges: The possible ranges of voltages supported by this
+ * PMIC regulator type
+ * @n_linear_ranges: Number of entries in voltage_ranges
* @n_voltages: The number of unique voltage set points defined
- * by voltage_range
+ * by voltage_ranges
* @hpm_min_load_uA: Minimum load current in microamps that requires
* high power mode (HPM) operation. This is used
* for LDO hardware type regulators only.
@@ -85,7 +87,8 @@ enum rpmh_regulator_type {
struct rpmh_vreg_hw_data {
enum rpmh_regulator_type regulator_type;
const struct regulator_ops *ops;
- const struct linear_range voltage_range;
+ const struct linear_range *voltage_ranges;
+ int n_linear_ranges;
int n_voltages;
int hpm_min_load_uA;
const int *pmic_mode_map;
@@ -449,8 +452,8 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
vreg->mode = REGULATOR_MODE_INVALID;
if (rpmh_data->hw_data->n_voltages) {
- vreg->rdesc.linear_ranges = &rpmh_data->hw_data->voltage_range;
- vreg->rdesc.n_linear_ranges = 1;
+ vreg->rdesc.linear_ranges = rpmh_data->hw_data->voltage_ranges;
+ vreg->rdesc.n_linear_ranges = rpmh_data->hw_data->n_linear_ranges;
vreg->rdesc.n_voltages = rpmh_data->hw_data->n_voltages;
}
@@ -508,6 +511,14 @@ static const int pmic_mode_map_pmic5_ldo[REGULATOR_MODE_STANDBY + 1] = {
[REGULATOR_MODE_FAST] = -EINVAL,
};
+static const int pmic_mode_map_pmic5_ldo_hpm[REGULATOR_MODE_STANDBY + 1] = {
+ [REGULATOR_MODE_INVALID] = -EINVAL,
+ [REGULATOR_MODE_STANDBY] = -EINVAL,
+ [REGULATOR_MODE_IDLE] = -EINVAL,
+ [REGULATOR_MODE_NORMAL] = PMIC5_LDO_MODE_HPM,
+ [REGULATOR_MODE_FAST] = -EINVAL,
+};
+
static unsigned int rpmh_regulator_pmic4_ldo_of_map_mode(unsigned int rpmh_mode)
{
unsigned int mode;
@@ -613,7 +624,10 @@ static unsigned int rpmh_regulator_pmic4_bob_of_map_mode(unsigned int rpmh_mode)
static const struct rpmh_vreg_hw_data pmic4_pldo = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1664000, 0, 255, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 256,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic4_ldo,
@@ -623,7 +637,10 @@ static const struct rpmh_vreg_hw_data pmic4_pldo = {
static const struct rpmh_vreg_hw_data pmic4_pldo_lv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1256000, 0, 127, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 128,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic4_ldo,
@@ -633,7 +650,10 @@ static const struct rpmh_vreg_hw_data pmic4_pldo_lv = {
static const struct rpmh_vreg_hw_data pmic4_nldo = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(312000, 0, 127, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 128,
.hpm_min_load_uA = 30000,
.pmic_mode_map = pmic_mode_map_pmic4_ldo,
@@ -643,7 +663,10 @@ static const struct rpmh_vreg_hw_data pmic4_nldo = {
static const struct rpmh_vreg_hw_data pmic4_hfsmps3 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 216,
.pmic_mode_map = pmic_mode_map_pmic4_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -652,7 +675,10 @@ static const struct rpmh_vreg_hw_data pmic4_hfsmps3 = {
static const struct rpmh_vreg_hw_data pmic4_ftsmps426 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 258, 4000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 259,
.pmic_mode_map = pmic_mode_map_pmic4_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -661,7 +687,10 @@ static const struct rpmh_vreg_hw_data pmic4_ftsmps426 = {
static const struct rpmh_vreg_hw_data pmic4_bob = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_bypass_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1824000, 0, 83, 32000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 84,
.pmic_mode_map = pmic_mode_map_pmic4_bob,
.of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
@@ -676,7 +705,10 @@ static const struct rpmh_vreg_hw_data pmic4_lvs = {
static const struct rpmh_vreg_hw_data pmic5_pldo = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 256,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
@@ -686,7 +718,10 @@ static const struct rpmh_vreg_hw_data pmic5_pldo = {
static const struct rpmh_vreg_hw_data pmic5_pldo_lv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 63,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
@@ -696,17 +731,50 @@ static const struct rpmh_vreg_hw_data pmic5_pldo_lv = {
static const struct rpmh_vreg_hw_data pmic5_pldo515_mv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(1800000, 0, 187, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1800000, 0, 187, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 188,
.hpm_min_load_uA = 10000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
.of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
};
+static const struct rpmh_vreg_hw_data pmic5_pldo502 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 256,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo_hpm,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
+static const struct rpmh_vreg_hw_data pmic5_pldo502ln = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_ops,
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1800000, 0, 2, 200000),
+ REGULATOR_LINEAR_RANGE(2608000, 3, 28, 16000),
+ REGULATOR_LINEAR_RANGE(3104000, 29, 30, 96000),
+ REGULATOR_LINEAR_RANGE(3312000, 31, 31, 0),
+ },
+ .n_linear_ranges = 4,
+ .n_voltages = 32,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo_hpm,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
static const struct rpmh_vreg_hw_data pmic5_nldo = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 124,
.hpm_min_load_uA = 30000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
@@ -716,17 +784,36 @@ static const struct rpmh_vreg_hw_data pmic5_nldo = {
static const struct rpmh_vreg_hw_data pmic5_nldo515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_drms_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 210, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 210, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 211,
.hpm_min_load_uA = 30000,
.pmic_mode_map = pmic_mode_map_pmic5_ldo,
.of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
};
+static const struct rpmh_vreg_hw_data pmic5_nldo502 = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_drms_ops,
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(528000, 0, 127, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 128,
+ .hpm_min_load_uA = 30000,
+ .pmic_mode_map = pmic_mode_map_pmic5_ldo,
+ .of_map_mode = rpmh_regulator_pmic4_ldo_of_map_mode,
+};
+
static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 216,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -735,7 +822,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 264,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -744,7 +834,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(300000, 0, 263, 4000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 264,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -753,7 +846,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps520 = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(300000, 0, 267, 4000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 268,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -762,7 +858,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps525_lv = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(600000, 0, 267, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 268,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -771,7 +870,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps525_mv = {
static const struct rpmh_vreg_hw_data pmic5_ftsmps527 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 215,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -780,7 +882,10 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps527 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 236,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -789,7 +894,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
@@ -798,7 +906,10 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = {
static const struct rpmh_vreg_hw_data pmic5_bob = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_bypass_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+ .voltage_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+ },
+ .n_linear_ranges = 1,
.n_voltages = 32,
.pmic_mode_map = pmic_mode_map_pmic5_bob,
.of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
@@ -1147,6 +1258,16 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pm8010_vreg_data[] = {
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo502, "vdd-l1-l2"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo502, "vdd-l1-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_pldo502ln, "vdd-l3-l4"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_pldo502ln, "vdd-l3-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo502, "vdd-l5"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo502ln, "vdd-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo502, "vdd-l7"),
+};
+
static const struct rpmh_vreg_init_data pm6150_vreg_data[] = {
RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
@@ -1463,6 +1584,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pm8009_1_vreg_data,
},
{
+ .compatible = "qcom,pm8010-rpmh-regulators",
+ .data = pm8010_vreg_data,
+ },
+ {
.compatible = "qcom,pm8150-rpmh-regulators",
.data = pm8150_vreg_data,
},
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index f53ada076..d1be95680 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -796,6 +796,7 @@ static const struct rpm_regulator_data rpm_mp5496_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &mp5496_smps, "s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &mp5496_smps, "s2" },
{ "l2", QCOM_SMD_RPM_LDOA, 2, &mp5496_ldoa2, "l2" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &mp5496_ldoa2, "l5" },
{}
};
@@ -1012,6 +1013,39 @@ static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8937_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8994_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8994_hfsmps, "vdd_s4" },
+ /* S5 - S6 are managed by SPMI */
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8953_ult_nldo, "vdd_l1_l19" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8953_ult_nldo, "vdd_l2_l23" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8953_ult_nldo, "vdd_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l13_l14_l15_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8953_ult_nldo, "vdd_l1_l19" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8953_lnldo, "vdd_l20_l21" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8953_lnldo, "vdd_l20_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_ult_pldo, "vdd_l8_l11_l12_l17_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8994_nldo, "vdd_l2_l23" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
@@ -1329,6 +1363,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8909-regulators", .data = &rpm_pm8909_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
+ { .compatible = "qcom,rpm-pm8937-regulators", .data = &rpm_pm8937_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
{ .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 94f9092b2..9a9fa20dc 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -2239,6 +2239,39 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8937_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "l1", 0x4000, "vdd_l1_l19", },
+ { "l2", 0x4100, "vdd_l2_l23", },
+ { "l3", 0x4200, "vdd_l3", },
+ { "l4", 0x4300, "vdd_l4_l5_l6_l7_l16", },
+ { "l5", 0x4400, "vdd_l4_l5_l6_l7_l16", },
+ { "l6", 0x4500, "vdd_l4_l5_l6_l7_l16", },
+ { "l7", 0x4600, "vdd_l4_l5_l6_l7_l16", },
+ { "l8", 0x4700, "vdd_l8_l11_l12_l17_l22", },
+ { "l9", 0x4800, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l10", 0x4900, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l11", 0x4a00, "vdd_l8_l11_l12_l17_l22", },
+ { "l12", 0x4b00, "vdd_l8_l11_l12_l17_l22", },
+ { "l13", 0x4c00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l14", 0x4d00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l15", 0x4e00, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l16", 0x4f00, "vdd_l4_l5_l6_l7_l16", },
+ { "l17", 0x5000, "vdd_l8_l11_l12_l17_l22", },
+ { "l18", 0x5100, "vdd_l9_l10_l13_l14_l15_l18", },
+ { "l19", 0x5200, "vdd_l1_l19", },
+ { "l20", 0x5300, "vdd_l20_l21", },
+ { "l21", 0x5400, "vdd_l21_l21", },
+ { "l22", 0x5500, "vdd_l8_l11_l12_l17_l22", },
+ { "l23", 0x5600, "vdd_l2_l23", },
+ { }
+};
+
static const struct spmi_regulator_data pm8941_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
@@ -2453,6 +2486,7 @@ static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8909-regulators", .data = &pm8909_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
+ { .compatible = "qcom,pm8937-regulators", .data = &pm8937_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
{ .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
diff --git a/drivers/regulator/regnl.h b/drivers/regulator/regnl.h
new file mode 100644
index 000000000..bcba16cc0
--- /dev/null
+++ b/drivers/regulator/regnl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Regulator event over netlink
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#ifndef __REGULATOR_EVENT_H
+#define __REGULATOR_EVENT_H
+
+int reg_generate_netlink_event(const char *reg_name, u64 event);
+
+#endif
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index e374fa6e5..d89ae7f16 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1017,14 +1017,14 @@ static const struct regulator_desc rk805_reg[] = {
};
static const struct linear_range rk806_buck_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(500000, 0, 160, 6250), /* 500mV ~ 1500mV */
- REGULATOR_LINEAR_RANGE(1500000, 161, 237, 25000), /* 1500mV ~ 3400mV */
- REGULATOR_LINEAR_RANGE(3400000, 238, 255, 0),
+ REGULATOR_LINEAR_RANGE(500000, 0, 159, 6250), /* 500mV ~ 1500mV */
+ REGULATOR_LINEAR_RANGE(1500000, 160, 235, 25000), /* 1500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(3400000, 236, 255, 0),
};
static const struct linear_range rk806_ldo_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(500000, 0, 232, 12500), /* 500mV ~ 3400mV */
- REGULATOR_LINEAR_RANGE(3400000, 233, 255, 0), /* 500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(500000, 0, 231, 12500), /* 500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(3400000, 232, 255, 0),
};
static const struct regulator_desc rk806_reg[] = {
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index 717144cbe..40855105d 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -233,7 +233,7 @@ err_pm_stop:
return ret;
}
-static int stm32_vrefbuf_remove(struct platform_device *pdev)
+static void stm32_vrefbuf_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev);
@@ -244,8 +244,6 @@ static int stm32_vrefbuf_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
};
static int __maybe_unused stm32_vrefbuf_runtime_suspend(struct device *dev)
@@ -282,7 +280,7 @@ MODULE_DEVICE_TABLE(of, stm32_vrefbuf_of_match);
static struct platform_driver stm32_vrefbuf_driver = {
.probe = stm32_vrefbuf_probe,
- .remove = stm32_vrefbuf_remove,
+ .remove_new = stm32_vrefbuf_remove,
.driver = {
.name = "stm32-vrefbuf",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index 79d1a3eb1..a498df7cb 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -15,7 +15,7 @@
#include <dt-bindings/mfd/st,stpmic1.h>
/**
- * struct stpmic1 regulator description: this structure is used as driver data
+ * struct stpmic1_regulator_cfg - this structure is used as driver data
* @desc: regulator framework description
* @mask_reset_reg: mask reset register address
* @mask_reset_mask: mask rank and mask reset register mask
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index 1d8304b88..5f8680423 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -115,7 +115,7 @@ out_rst_assert:
return ret;
}
-static int uniphier_regulator_remove(struct platform_device *pdev)
+static void uniphier_regulator_remove(struct platform_device *pdev)
{
struct uniphier_regulator_priv *priv = platform_get_drvdata(pdev);
int i;
@@ -124,8 +124,6 @@ static int uniphier_regulator_remove(struct platform_device *pdev)
reset_control_assert(priv->rst[i]);
clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
-
- return 0;
}
/* USB3 controller data */
@@ -209,7 +207,7 @@ MODULE_DEVICE_TABLE(of, uniphier_regulator_match);
static struct platform_driver uniphier_regulator_driver = {
.probe = uniphier_regulator_probe,
- .remove = uniphier_regulator_remove,
+ .remove_new = uniphier_regulator_remove,
.driver = {
.name = "uniphier-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index cb1de24b9..86a626a4f 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -194,7 +194,7 @@ err_enable:
return ret;
}
-static int regulator_userspace_consumer_remove(struct platform_device *pdev)
+static void regulator_userspace_consumer_remove(struct platform_device *pdev)
{
struct userspace_consumer_data *data = platform_get_drvdata(pdev);
@@ -202,8 +202,6 @@ static int regulator_userspace_consumer_remove(struct platform_device *pdev)
if (data->enabled && !data->no_autoswitch)
regulator_bulk_disable(data->num_supplies, data->supplies);
-
- return 0;
}
static const struct of_device_id regulator_userspace_consumer_of_match[] = {
@@ -214,7 +212,7 @@ MODULE_DEVICE_TABLE(of, regulator_userspace_consumer_of_match);
static struct platform_driver regulator_userspace_consumer_driver = {
.probe = regulator_userspace_consumer_probe,
- .remove = regulator_userspace_consumer_remove,
+ .remove_new = regulator_userspace_consumer_remove,
.driver = {
.name = "reg-userspace-consumer",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index d5a160efd..0a0ee186c 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -345,7 +345,7 @@ static int regulator_virtual_probe(struct platform_device *pdev)
return 0;
}
-static int regulator_virtual_remove(struct platform_device *pdev)
+static void regulator_virtual_remove(struct platform_device *pdev)
{
struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
@@ -353,13 +353,11 @@ static int regulator_virtual_remove(struct platform_device *pdev)
if (drvdata->enabled)
regulator_disable(drvdata->regulator);
-
- return 0;
}
static struct platform_driver regulator_virtual_consumer_driver = {
.probe = regulator_virtual_probe,
- .remove = regulator_virtual_remove,
+ .remove_new = regulator_virtual_remove,
.driver = {
.name = "reg-virt-consumer",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 1445bafca..9939a5d2c 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1158,14 +1158,12 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
return 0;
}
-static int wm8350_regulator_remove(struct platform_device *pdev)
+static void wm8350_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq, rdev);
-
- return 0;
}
int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
@@ -1306,7 +1304,7 @@ EXPORT_SYMBOL_GPL(wm8350_register_led);
static struct platform_driver wm8350_regulator_driver = {
.probe = wm8350_regulator_probe,
- .remove = wm8350_regulator_remove,
+ .remove_new = wm8350_regulator_remove,
.driver = {
.name = "wm8350-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 8fcda9b74..a1c62d15f 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -940,6 +940,7 @@ static const struct rproc_ops imx_dsp_rproc_ops = {
.kick = imx_dsp_rproc_kick,
.load = imx_dsp_rproc_elf_load_segments,
.parse_fw = imx_dsp_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 913a5d206..a9dd58608 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -1165,6 +1165,22 @@ static const struct adsp_data sm8550_mpss_resource = {
.region_assign_idx = 2,
};
+static const struct adsp_data sc7280_wpss_resource = {
+ .crash_reason_smem = 626,
+ .firmware_name = "wpss.mdt",
+ .pas_id = 6,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .load_state = "wpss",
+ .ssr_name = "wpss",
+ .sysmon_name = "wpss",
+ .ssctl_id = 0x19,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
@@ -1178,7 +1194,10 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
{ .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource},
+ { .compatible = "qcom,sc7280-cdsp-pas", .data = &sm6350_cdsp_resource},
{ .compatible = "qcom,sc7280-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc7280-wpss-pas", .data = &sc7280_wpss_resource},
{ .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index ef8415a7c..ab882e3b7 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -158,8 +158,8 @@ static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
- dev_err(dev, "failed to send mailbox message, status = %d\n",
- ret);
+ dev_err(dev, "failed to send mailbox message (%pe)\n",
+ ERR_PTR(ret));
}
/* Put the DSP processor into reset */
@@ -170,7 +170,7 @@ static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
ret = reset_control_assert(kproc->reset);
if (ret) {
- dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
return ret;
}
@@ -180,7 +180,7 @@ static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret) {
- dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+ dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
if (reset_control_deassert(kproc->reset))
dev_warn(dev, "local-reset deassert back failed\n");
}
@@ -200,14 +200,14 @@ static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret) {
- dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
+ dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
return ret;
}
lreset:
ret = reset_control_deassert(kproc->reset);
if (ret) {
- dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
kproc->ti_sci_id))
dev_warn(dev, "module-reset assert back failed\n");
@@ -246,7 +246,7 @@ static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
*/
ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
if (ret < 0) {
- dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
mbox_free_channel(kproc->mbox);
return ret;
}
@@ -272,8 +272,8 @@ static int k3_dsp_rproc_prepare(struct rproc *rproc)
ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret)
- dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
- ret);
+ dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
@@ -296,7 +296,7 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc)
ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
kproc->ti_sci_id);
if (ret)
- dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+ dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
return ret;
}
@@ -561,9 +561,9 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
num_rmems = of_property_count_elems_of_size(np, "memory-region",
sizeof(phandle));
- if (num_rmems <= 0) {
- dev_err(dev, "device does not reserved memory regions, ret = %d\n",
- num_rmems);
+ if (num_rmems < 0) {
+ dev_err(dev, "device does not reserved memory regions (%pe)\n",
+ ERR_PTR(num_rmems));
return -EINVAL;
}
if (num_rmems < 2) {
@@ -575,8 +575,8 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
/* use reserved memory region 0 for vring DMA allocations */
ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
if (ret) {
- dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
- ret);
+ dev_err(dev, "device cannot initialize DMA pool (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
@@ -687,11 +687,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
return -ENODEV;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
- if (ret) {
- dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
sizeof(*kproc));
@@ -711,39 +708,35 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
if (IS_ERR(kproc->ti_sci)) {
- ret = PTR_ERR(kproc->ti_sci);
- if (ret != -EPROBE_DEFER) {
- dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
- ret);
- }
+ ret = dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
kproc->ti_sci = NULL;
goto free_rproc;
}
ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
if (ret) {
- dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
goto put_sci;
}
kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(kproc->reset)) {
- ret = PTR_ERR(kproc->reset);
- dev_err(dev, "failed to get reset, status = %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(kproc->reset),
+ "failed to get reset\n");
goto put_sci;
}
kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
if (IS_ERR(kproc->tsp)) {
- dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
- ret);
- ret = PTR_ERR(kproc->tsp);
+ ret = dev_err_probe(dev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
goto put_sci;
}
ret = ti_sci_proc_request(kproc->tsp);
if (ret < 0) {
- dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
goto free_tsp;
}
@@ -753,15 +746,14 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
ret = k3_dsp_reserved_mem_init(kproc);
if (ret) {
- dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
+ dev_err_probe(dev, ret, "reserved memory init failed\n");
goto release_tsp;
}
ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
NULL, &p_state);
if (ret) {
- dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
- ret);
+ dev_err_probe(dev, ret, "failed to get initial state, mode cannot be determined\n");
goto release_mem;
}
@@ -787,8 +779,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
if (data->uses_lreset) {
ret = reset_control_status(kproc->reset);
if (ret < 0) {
- dev_err(dev, "failed to get reset status, status = %d\n",
- ret);
+ dev_err_probe(dev, ret, "failed to get reset status\n");
goto release_mem;
} else if (ret == 0) {
dev_warn(dev, "local reset is deasserted for device\n");
@@ -799,8 +790,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
ret = rproc_add(rproc);
if (ret) {
- dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
- ret);
+ dev_err_probe(dev, ret, "failed to add register device with remoteproc core\n");
goto release_mem;
}
@@ -813,19 +803,19 @@ release_mem:
release_tsp:
ret1 = ti_sci_proc_release(kproc->tsp);
if (ret1)
- dev_err(dev, "failed to release proc, ret = %d\n", ret1);
+ dev_err(dev, "failed to release proc (%pe)\n", ERR_PTR(ret1));
free_tsp:
kfree(kproc->tsp);
put_sci:
ret1 = ti_sci_put_handle(kproc->ti_sci);
if (ret1)
- dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
+ dev_err(dev, "failed to put ti_sci handle (%pe)\n", ERR_PTR(ret1));
free_rproc:
rproc_free(rproc);
return ret;
}
-static int k3_dsp_rproc_remove(struct platform_device *pdev)
+static void k3_dsp_rproc_remove(struct platform_device *pdev)
{
struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
struct rproc *rproc = kproc->rproc;
@@ -835,8 +825,9 @@ static int k3_dsp_rproc_remove(struct platform_device *pdev)
if (rproc->state == RPROC_ATTACHED) {
ret = rproc_detach(rproc);
if (ret) {
- dev_err(dev, "failed to detach proc, ret = %d\n", ret);
- return ret;
+ /* Note this error path leaks resources */
+ dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
+ return;
}
}
@@ -844,18 +835,16 @@ static int k3_dsp_rproc_remove(struct platform_device *pdev)
ret = ti_sci_proc_release(kproc->tsp);
if (ret)
- dev_err(dev, "failed to release proc, ret = %d\n", ret);
+ dev_err(dev, "failed to release proc (%pe)\n", ERR_PTR(ret));
kfree(kproc->tsp);
ret = ti_sci_put_handle(kproc->ti_sci);
if (ret)
- dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
+ dev_err(dev, "failed to put ti_sci handle (%pe)\n", ERR_PTR(ret));
k3_dsp_reserved_mem_exit(kproc);
rproc_free(kproc->rproc);
-
- return 0;
}
static const struct k3_dsp_mem_data c66_mems[] = {
@@ -906,7 +895,7 @@ MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
static struct platform_driver k3_dsp_rproc_driver = {
.probe = k3_dsp_rproc_probe,
- .remove = k3_dsp_rproc_remove,
+ .remove_new = k3_dsp_rproc_remove,
.driver = {
.name = "k3-dsp-rproc",
.of_match_table = k3_dsp_of_match,
diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
index 42c9d5241..810fe7645 100644
--- a/drivers/reset/reset-brcmstb.c
+++ b/drivers/reset/reset-brcmstb.c
@@ -90,8 +90,7 @@ static int brcmstb_reset_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(kdev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index 7e46dbc04..7891d52fa 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -139,7 +139,6 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct meson_audio_arb_match_data *data;
struct meson_audio_arb_data *arb;
- struct resource *res;
int ret;
data = of_device_get_match_data(dev);
@@ -155,8 +154,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
if (IS_ERR(arb->clk))
return dev_err_probe(dev, PTR_ERR(arb->clk), "failed to get clock\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- arb->regs = devm_ioremap_resource(dev, res);
+ arb->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(arb->regs))
return PTR_ERR(arb->regs);
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index a7af051b1..f78be9789 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -108,6 +108,7 @@ static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson-axg-reset", .data = &meson8b_param},
{ .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param},
{ .compatible = "amlogic,meson-s4-reset", .data = &meson_s4_param},
+ { .compatible = "amlogic,c3-reset", .data = &meson_s4_param},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c
index f6c4f854f..8935ef95a 100644
--- a/drivers/reset/reset-npcm.c
+++ b/drivers/reset/reset-npcm.c
@@ -6,8 +6,8 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/reboot.h>
#include <linux/reset-controller.h>
#include <linux/spinlock.h>
@@ -351,8 +351,7 @@ static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc)
}
}
- rc->info = (const struct npcm_reset_info *)
- of_match_device(dev->driver->of_match_table, dev)->data;
+ rc->info = device_get_match_data(dev);
switch (rc->info->bmc_id) {
case BMC_NPCM7XX:
npcm_usb_reset_npcm7xx(rc);
diff --git a/drivers/reset/reset-qcom-aoss.c b/drivers/reset/reset-qcom-aoss.c
index f52e90e36..93c84d70e 100644
--- a/drivers/reset/reset-qcom-aoss.c
+++ b/drivers/reset/reset-qcom-aoss.c
@@ -90,7 +90,6 @@ static int qcom_aoss_reset_probe(struct platform_device *pdev)
struct qcom_aoss_reset_data *data;
struct device *dev = &pdev->dev;
const struct qcom_aoss_desc *desc;
- struct resource *res;
desc = of_device_get_match_data(dev);
if (!desc)
@@ -101,8 +100,7 @@ static int qcom_aoss_reset_probe(struct platform_device *pdev)
return -ENOMEM;
data->desc = desc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/reset/reset-qcom-pdc.c b/drivers/reset/reset-qcom-pdc.c
index a3aae3f90..dce1fc1a6 100644
--- a/drivers/reset/reset-qcom-pdc.c
+++ b/drivers/reset/reset-qcom-pdc.c
@@ -114,7 +114,6 @@ static int qcom_pdc_reset_probe(struct platform_device *pdev)
struct qcom_pdc_reset_data *data;
struct device *dev = &pdev->dev;
void __iomem *base;
- struct resource *res;
desc = device_get_match_data(&pdev->dev);
if (!desc)
@@ -125,8 +124,7 @@ static int qcom_pdc_reset_probe(struct platform_device *pdev)
return -ENOMEM;
data->desc = desc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 7ea5adbf2..818cabcc9 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -169,8 +169,7 @@ static int reset_simple_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- membase = devm_ioremap_resource(dev, res);
+ membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(membase))
return PTR_ERR(membase);
diff --git a/drivers/reset/reset-sunplus.c b/drivers/reset/reset-sunplus.c
index 2f23ecaa7..df58decab 100644
--- a/drivers/reset/reset-sunplus.c
+++ b/drivers/reset/reset-sunplus.c
@@ -176,8 +176,7 @@ static int sp_reset_probe(struct platform_device *pdev)
if (!reset)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reset->base = devm_ioremap_resource(dev, res);
+ reset->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(reset->base))
return PTR_ERR(reset->base);
diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
index 97b3ddcda..5f9f2f799 100644
--- a/drivers/reset/reset-uniphier-glue.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -58,8 +58,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
priv->data->nrsts > MAX_RSTS))
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->rdata.membase = devm_ioremap_resource(dev, res);
+ priv->rdata.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->rdata.membase))
return PTR_ERR(priv->rdata.membase);
diff --git a/drivers/reset/sti/reset-syscfg.c b/drivers/reset/sti/reset-syscfg.c
index c1ba04f6f..2324060b7 100644
--- a/drivers/reset/sti/reset-syscfg.c
+++ b/drivers/reset/sti/reset-syscfg.c
@@ -7,10 +7,11 @@
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/types.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -183,14 +184,14 @@ static int syscfg_reset_controller_register(struct device *dev,
int syscfg_reset_probe(struct platform_device *pdev)
{
struct device *dev = pdev ? &pdev->dev : NULL;
- const struct of_device_id *match;
+ const void *data;
if (!dev || !dev->driver)
return -ENODEV;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data)
+ data = device_get_match_data(&pdev->dev);
+ if (!data)
return -EINVAL;
- return syscfg_reset_controller_register(dev, match->data);
+ return syscfg_reset_controller_register(dev, data);
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b1e1d277d..c63e32d01 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -373,6 +373,19 @@ config RTC_DRV_MAX8997
This driver can also be built as a module. If so, the module
will be called rtc-max8997.
+config RTC_DRV_MAX31335
+ tristate "Analog Devices MAX31335"
+ depends on I2C
+ depends on COMMON_CLK
+ depends on HWMON || HWMON=n
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Analog Devices
+ MAX31335.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max31335.
+
config RTC_DRV_MAX77686
tristate "Maxim MAX77686"
depends on MFD_MAX77686 || MFD_MAX77620 || MFD_MAX77714 || COMPILE_TEST
@@ -578,6 +591,18 @@ config RTC_DRV_TPS6586X
along with alarm. This driver supports the RTC driver for
the TPS6586X RTC module.
+config RTC_DRV_TPS6594
+ tristate "TI TPS6594 RTC driver"
+ depends on MFD_TPS6594
+ default MFD_TPS6594
+ help
+ TI Power Management IC TPS6594 supports RTC functionality
+ along with alarm. This driver supports the RTC driver for
+ the TPS6594 RTC module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-tps6594.
+
config RTC_DRV_TPS65910
tristate "TI TPS65910 RTC driver"
depends on MFD_TPS65910
@@ -1705,6 +1730,7 @@ config RTC_DRV_LPC24XX
tristate "NXP RTC for LPC178x/18xx/408x/43xx"
depends on ARCH_LPC18XX || COMPILE_TEST
depends on OF && HAS_IOMEM
+ depends on COMMON_CLK
help
This enables support for the NXP RTC found which can be found on
NXP LPC178x/18xx/408x/43xx devices.
@@ -1931,6 +1957,17 @@ config RTC_DRV_TI_K3
This driver can also be built as a module, if so, the module
will be called "rtc-ti-k3".
+config RTC_DRV_MA35D1
+ tristate "Nuvoton MA35D1 RTC"
+ depends on ARCH_MA35 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for the Nuvoton MA35D1
+ On-Chip Real Time Clock.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-ma35d1".
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 7b03c3abf..6efff381c 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -88,6 +88,8 @@ obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
+obj-$(CONFIG_RTC_DRV_MA35D1) += rtc-ma35d1.o
+obj-$(CONFIG_RTC_DRV_MAX31335) += rtc-max31335.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
obj-$(CONFIG_RTC_DRV_MAX6916) += rtc-max6916.o
@@ -176,6 +178,7 @@ obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
obj-$(CONFIG_RTC_DRV_TI_K3) += rtc-ti-k3.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
+obj-$(CONFIG_RTC_DRV_TPS6594) += rtc-tps6594.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index edfd942f8..921ee1827 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -256,7 +256,7 @@ static int rtc_device_get_id(struct device *dev)
of_id = of_alias_get_id(dev->parent->of_node, "rtc");
if (of_id >= 0) {
- id = ida_simple_get(&rtc_ida, of_id, of_id + 1, GFP_KERNEL);
+ id = ida_alloc_range(&rtc_ida, of_id, of_id, GFP_KERNEL);
if (id < 0)
dev_warn(dev, "/aliases ID %d not available\n", of_id);
}
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
index eaf2c9ab9..fa642bba3 100644
--- a/drivers/rtc/rtc-ac100.c
+++ b/drivers/rtc/rtc-ac100.c
@@ -99,7 +99,7 @@ struct ac100_rtc_dev {
struct clk_hw_onecell_data *clk_data;
};
-/**
+/*
* Clock controls for 3 clock output pins
*/
@@ -378,7 +378,7 @@ static void ac100_rtc_unregister_clks(struct ac100_rtc_dev *chip)
clk_unregister_fixed_rate(chip->rtc_32k_clk->clk);
}
-/**
+/*
* RTC related bits
*/
static int ac100_rtc_get_time(struct device *dev, struct rtc_time *rtc_tm)
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 2f5d60622..859397541 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -377,7 +377,6 @@ static int da9063_rtc_probe(struct platform_device *pdev)
{
struct da9063_compatible_rtc *rtc;
const struct da9063_compatible_rtc_regmap *config;
- const struct of_device_id *match;
int irq_alarm;
u8 data[RTC_DATA_LEN];
int ret;
@@ -385,14 +384,11 @@ static int da9063_rtc_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENXIO;
- match = of_match_node(da9063_compatible_reg_id_table,
- pdev->dev.of_node);
-
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- rtc->config = match->data;
+ rtc->config = device_get_match_data(&pdev->dev);
if (of_device_is_compatible(pdev->dev.of_node, "dlg,da9063-rtc")) {
struct da9063 *chip = dev_get_drvdata(pdev->dev.parent);
@@ -411,57 +407,49 @@ static int da9063_rtc_probe(struct platform_device *pdev)
config->rtc_enable_reg,
config->rtc_enable_mask,
config->rtc_enable_mask);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to enable RTC\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to enable RTC\n");
ret = regmap_update_bits(rtc->regmap,
config->rtc_enable_32k_crystal_reg,
config->rtc_crystal_mask,
config->rtc_crystal_mask);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to run 32kHz oscillator\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to run 32kHz oscillator\n");
ret = regmap_update_bits(rtc->regmap,
config->rtc_alarm_secs_reg,
config->rtc_alarm_status_mask,
0);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to access RTC alarm register\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to access RTC alarm register\n");
ret = regmap_update_bits(rtc->regmap,
config->rtc_alarm_secs_reg,
DA9063_ALARM_STATUS_ALARM,
DA9063_ALARM_STATUS_ALARM);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to access RTC alarm register\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to access RTC alarm register\n");
ret = regmap_update_bits(rtc->regmap,
config->rtc_alarm_year_reg,
config->rtc_tick_on_mask,
0);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to disable TICKs\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to disable TICKs\n");
data[RTC_SEC] = 0;
ret = regmap_bulk_read(rtc->regmap,
config->rtc_alarm_secs_reg,
&data[config->rtc_data_start],
config->rtc_alarm_len);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to read initial alarm data: %d\n",
- ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to read initial alarm data\n");
platform_set_drvdata(pdev, rtc);
@@ -485,25 +473,29 @@ static int da9063_rtc_probe(struct platform_device *pdev)
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc_dev->features);
}
- irq_alarm = platform_get_irq_byname(pdev, "ALARM");
- if (irq_alarm < 0)
+ irq_alarm = platform_get_irq_byname_optional(pdev, "ALARM");
+ if (irq_alarm >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
+ da9063_alarm_event,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "ALARM", rtc);
+ if (ret)
+ dev_err(&pdev->dev,
+ "Failed to request ALARM IRQ %d: %d\n",
+ irq_alarm, ret);
+
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq_alarm);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "Failed to set IRQ %d as a wake IRQ: %d\n",
+ irq_alarm, ret);
+
+ device_init_wakeup(&pdev->dev, true);
+ } else if (irq_alarm != -ENXIO) {
return irq_alarm;
-
- ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
- da9063_alarm_event,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "ALARM", rtc);
- if (ret)
- dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
- irq_alarm, ret);
-
- ret = dev_pm_set_wake_irq(&pdev->dev, irq_alarm);
- if (ret)
- dev_warn(&pdev->dev,
- "Failed to set IRQ %d as a wake IRQ: %d\n",
- irq_alarm, ret);
-
- device_init_wakeup(&pdev->dev, true);
+ } else {
+ clear_bit(RTC_FEATURE_ALARM, rtc->rtc_dev->features);
+ }
return devm_rtc_register_device(rtc->rtc_dev);
}
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 89d7b085f..1485a6ae5 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -536,6 +536,8 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
return 0;
}
+#if IS_ENABLED(CONFIG_I2C)
+
#ifdef CONFIG_PM_SLEEP
static int ds3232_suspend(struct device *dev)
{
@@ -564,8 +566,6 @@ static const struct dev_pm_ops ds3232_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ds3232_suspend, ds3232_resume)
};
-#if IS_ENABLED(CONFIG_I2C)
-
static int ds3232_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
diff --git a/drivers/rtc/rtc-ma35d1.c b/drivers/rtc/rtc-ma35d1.c
new file mode 100644
index 000000000..cfcfc2806
--- /dev/null
+++ b/drivers/rtc/rtc-ma35d1.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC driver for Nuvoton MA35D1
+ *
+ * Copyright (C) 2023 Nuvoton Technology Corp.
+ */
+
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* MA35D1 RTC Control Registers */
+#define MA35_REG_RTC_INIT 0x00
+#define MA35_REG_RTC_SINFASTS 0x04
+#define MA35_REG_RTC_FREQADJ 0x08
+#define MA35_REG_RTC_TIME 0x0c
+#define MA35_REG_RTC_CAL 0x10
+#define MA35_REG_RTC_CLKFMT 0x14
+#define MA35_REG_RTC_WEEKDAY 0x18
+#define MA35_REG_RTC_TALM 0x1c
+#define MA35_REG_RTC_CALM 0x20
+#define MA35_REG_RTC_LEAPYEAR 0x24
+#define MA35_REG_RTC_INTEN 0x28
+#define MA35_REG_RTC_INTSTS 0x2c
+
+/* register MA35_REG_RTC_INIT */
+#define RTC_INIT_ACTIVE BIT(0)
+#define RTC_INIT_MAGIC_CODE 0xa5eb1357
+
+/* register MA35_REG_RTC_CLKFMT */
+#define RTC_CLKFMT_24HEN BIT(0)
+#define RTC_CLKFMT_DCOMPEN BIT(16)
+
+/* register MA35_REG_RTC_INTEN */
+#define RTC_INTEN_ALMIEN BIT(0)
+#define RTC_INTEN_UIEN BIT(1)
+#define RTC_INTEN_CLKFIEN BIT(24)
+#define RTC_INTEN_CLKSTIEN BIT(25)
+
+/* register MA35_REG_RTC_INTSTS */
+#define RTC_INTSTS_ALMIF BIT(0)
+#define RTC_INTSTS_UIF BIT(1)
+#define RTC_INTSTS_CLKFIF BIT(24)
+#define RTC_INTSTS_CLKSTIF BIT(25)
+
+#define RTC_INIT_TIMEOUT 250
+
+struct ma35_rtc {
+ int irq_num;
+ void __iomem *rtc_reg;
+ struct rtc_device *rtcdev;
+};
+
+static u32 rtc_reg_read(struct ma35_rtc *p, u32 offset)
+{
+ return __raw_readl(p->rtc_reg + offset);
+}
+
+static inline void rtc_reg_write(struct ma35_rtc *p, u32 offset, u32 value)
+{
+ __raw_writel(value, p->rtc_reg + offset);
+}
+
+static irqreturn_t ma35d1_rtc_interrupt(int irq, void *data)
+{
+ struct ma35_rtc *rtc = (struct ma35_rtc *)data;
+ unsigned long events = 0, rtc_irq;
+
+ rtc_irq = rtc_reg_read(rtc, MA35_REG_RTC_INTSTS);
+
+ if (rtc_irq & RTC_INTSTS_ALMIF) {
+ rtc_reg_write(rtc, MA35_REG_RTC_INTSTS, RTC_INTSTS_ALMIF);
+ events |= RTC_AF | RTC_IRQF;
+ }
+
+ rtc_update_irq(rtc->rtcdev, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int ma35d1_rtc_init(struct ma35_rtc *rtc, u32 ms_timeout)
+{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(ms_timeout);
+
+ do {
+ if (rtc_reg_read(rtc, MA35_REG_RTC_INIT) & RTC_INIT_ACTIVE)
+ return 0;
+
+ rtc_reg_write(rtc, MA35_REG_RTC_INIT, RTC_INIT_MAGIC_CODE);
+
+ mdelay(1);
+
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int ma35d1_alarm_irq_enable(struct device *dev, u32 enabled)
+{
+ struct ma35_rtc *rtc = dev_get_drvdata(dev);
+ u32 reg_ien;
+
+ reg_ien = rtc_reg_read(rtc, MA35_REG_RTC_INTEN);
+
+ if (enabled)
+ rtc_reg_write(rtc, MA35_REG_RTC_INTEN, reg_ien | RTC_INTEN_ALMIEN);
+ else
+ rtc_reg_write(rtc, MA35_REG_RTC_INTEN, reg_ien & ~RTC_INTEN_ALMIEN);
+
+ return 0;
+}
+
+static int ma35d1_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ma35_rtc *rtc = dev_get_drvdata(dev);
+ u32 time, cal, wday;
+
+ do {
+ time = rtc_reg_read(rtc, MA35_REG_RTC_TIME);
+ cal = rtc_reg_read(rtc, MA35_REG_RTC_CAL);
+ wday = rtc_reg_read(rtc, MA35_REG_RTC_WEEKDAY);
+ } while (time != rtc_reg_read(rtc, MA35_REG_RTC_TIME) ||
+ cal != rtc_reg_read(rtc, MA35_REG_RTC_CAL));
+
+ tm->tm_mday = bcd2bin(cal >> 0);
+ tm->tm_wday = wday;
+ tm->tm_mon = bcd2bin(cal >> 8);
+ tm->tm_mon = tm->tm_mon - 1;
+ tm->tm_year = bcd2bin(cal >> 16) + 100;
+
+ tm->tm_sec = bcd2bin(time >> 0);
+ tm->tm_min = bcd2bin(time >> 8);
+ tm->tm_hour = bcd2bin(time >> 16);
+
+ return rtc_valid_tm(tm);
+}
+
+static int ma35d1_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ma35_rtc *rtc = dev_get_drvdata(dev);
+ u32 val;
+
+ val = bin2bcd(tm->tm_mday) << 0 | bin2bcd(tm->tm_mon + 1) << 8 |
+ bin2bcd(tm->tm_year - 100) << 16;
+ rtc_reg_write(rtc, MA35_REG_RTC_CAL, val);
+
+ val = bin2bcd(tm->tm_sec) << 0 | bin2bcd(tm->tm_min) << 8 |
+ bin2bcd(tm->tm_hour) << 16;
+ rtc_reg_write(rtc, MA35_REG_RTC_TIME, val);
+
+ val = tm->tm_wday;
+ rtc_reg_write(rtc, MA35_REG_RTC_WEEKDAY, val);
+
+ return 0;
+}
+
+static int ma35d1_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct ma35_rtc *rtc = dev_get_drvdata(dev);
+ u32 talm, calm;
+
+ talm = rtc_reg_read(rtc, MA35_REG_RTC_TALM);
+ calm = rtc_reg_read(rtc, MA35_REG_RTC_CALM);
+
+ alrm->time.tm_mday = bcd2bin(calm >> 0);
+ alrm->time.tm_mon = bcd2bin(calm >> 8);
+ alrm->time.tm_mon = alrm->time.tm_mon - 1;
+
+ alrm->time.tm_year = bcd2bin(calm >> 16) + 100;
+
+ alrm->time.tm_sec = bcd2bin(talm >> 0);
+ alrm->time.tm_min = bcd2bin(talm >> 8);
+ alrm->time.tm_hour = bcd2bin(talm >> 16);
+
+ return rtc_valid_tm(&alrm->time);
+}
+
+static int ma35d1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct ma35_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long val;
+
+ val = bin2bcd(alrm->time.tm_mday) << 0 | bin2bcd(alrm->time.tm_mon + 1) << 8 |
+ bin2bcd(alrm->time.tm_year - 100) << 16;
+ rtc_reg_write(rtc, MA35_REG_RTC_CALM, val);
+
+ val = bin2bcd(alrm->time.tm_sec) << 0 | bin2bcd(alrm->time.tm_min) << 8 |
+ bin2bcd(alrm->time.tm_hour) << 16;
+ rtc_reg_write(rtc, MA35_REG_RTC_TALM, val);
+
+ ma35d1_alarm_irq_enable(dev, alrm->enabled);
+
+ return 0;
+}
+
+static const struct rtc_class_ops ma35d1_rtc_ops = {
+ .read_time = ma35d1_rtc_read_time,
+ .set_time = ma35d1_rtc_set_time,
+ .read_alarm = ma35d1_rtc_read_alarm,
+ .set_alarm = ma35d1_rtc_set_alarm,
+ .alarm_irq_enable = ma35d1_alarm_irq_enable,
+};
+
+static int ma35d1_rtc_probe(struct platform_device *pdev)
+{
+ struct ma35_rtc *rtc;
+ struct clk *clk;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->rtc_reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc->rtc_reg))
+ return PTR_ERR(rtc->rtc_reg);
+
+ clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk), "failed to find rtc clock\n");
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ if (!(rtc_reg_read(rtc, MA35_REG_RTC_INIT) & RTC_INIT_ACTIVE)) {
+ ret = ma35d1_rtc_init(rtc, RTC_INIT_TIMEOUT);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "rtc init failed\n");
+ }
+
+ rtc->irq_num = platform_get_irq(pdev, 0);
+
+ ret = devm_request_irq(&pdev->dev, rtc->irq_num, ma35d1_rtc_interrupt,
+ IRQF_NO_SUSPEND, "ma35d1rtc", rtc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to request rtc irq\n");
+
+ platform_set_drvdata(pdev, rtc);
+
+ device_init_wakeup(&pdev->dev, true);
+
+ rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtcdev))
+ return PTR_ERR(rtc->rtcdev);
+
+ rtc->rtcdev->ops = &ma35d1_rtc_ops;
+ rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
+
+ ret = devm_rtc_register_device(rtc->rtcdev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register rtc device\n");
+
+ return 0;
+}
+
+static int ma35d1_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct ma35_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(rtc->irq_num);
+
+ return 0;
+}
+
+static int ma35d1_rtc_resume(struct platform_device *pdev)
+{
+ struct ma35_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(rtc->irq_num);
+
+ return 0;
+}
+
+static const struct of_device_id ma35d1_rtc_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-rtc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ma35d1_rtc_of_match);
+
+static struct platform_driver ma35d1_rtc_driver = {
+ .suspend = ma35d1_rtc_suspend,
+ .resume = ma35d1_rtc_resume,
+ .probe = ma35d1_rtc_probe,
+ .driver = {
+ .name = "rtc-ma35d1",
+ .of_match_table = ma35d1_rtc_of_match,
+ },
+};
+
+module_platform_driver(ma35d1_rtc_driver);
+
+MODULE_AUTHOR("Ming-Jen Chen <mjchen@nuvoton.com>");
+MODULE_DESCRIPTION("MA35D1 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
new file mode 100644
index 000000000..a2441e5c2
--- /dev/null
+++ b/drivers/rtc/rtc-max31335.c
@@ -0,0 +1,697 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC driver for the MAX31335
+ *
+ * Copyright (C) 2023 Analog Devices
+ *
+ * Antoniu Miclaus <antoniu.miclaus@analog.com>
+ *
+ */
+
+#include <asm-generic/unaligned.h>
+#include <linux/bcd.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/util_macros.h>
+
+/* MAX31335 Register Map */
+#define MAX31335_STATUS1 0x00
+#define MAX31335_INT_EN1 0x01
+#define MAX31335_STATUS2 0x02
+#define MAX31335_INT_EN2 0x03
+#define MAX31335_RTC_RESET 0x04
+#define MAX31335_RTC_CONFIG 0x05
+#define MAX31335_RTC_CONFIG2 0x06
+#define MAX31335_TIMESTAMP_CONFIG 0x07
+#define MAX31335_TIMER_CONFIG 0x08
+#define MAX31335_SECONDS_1_128 0x09
+#define MAX31335_SECONDS 0x0A
+#define MAX31335_MINUTES 0x0B
+#define MAX31335_HOURS 0x0C
+#define MAX31335_DAY 0x0D
+#define MAX31335_DATE 0x0E
+#define MAX31335_MONTH 0x0F
+#define MAX31335_YEAR 0x0F
+#define MAX31335_ALM1_SEC 0x11
+#define MAX31335_ALM1_MIN 0x12
+#define MAX31335_ALM1_HRS 0x13
+#define MAX31335_ALM1_DAY_DATE 0x14
+#define MAX31335_ALM1_MON 0x15
+#define MAX31335_ALM1_YEAR 0x16
+#define MAX31335_ALM2_MIN 0x17
+#define MAX31335_ALM2_HRS 0x18
+#define MAX31335_ALM2_DAY_DATE 0x19
+#define MAX31335_TIMER_COUNT 0x1A
+#define MAX31335_TIMER_INIT 0x1B
+#define MAX31335_PWR_MGMT 0x1C
+#define MAX31335_TRICKLE_REG 0x1D
+#define MAX31335_AGING_OFFSET 0x1E
+#define MAX31335_TS_CONFIG 0x30
+#define MAX31335_TEMP_ALARM_HIGH_MSB 0x31
+#define MAX31335_TEMP_ALARM_HIGH_LSB 0x32
+#define MAX31335_TEMP_ALARM_LOW_MSB 0x33
+#define MAX31335_TEMP_ALARM_LOW_LSB 0x34
+#define MAX31335_TEMP_DATA_MSB 0x35
+#define MAX31335_TEMP_DATA_LSB 0x36
+#define MAX31335_TS0_SEC_1_128 0x40
+#define MAX31335_TS0_SEC 0x41
+#define MAX31335_TS0_MIN 0x42
+#define MAX31335_TS0_HOUR 0x43
+#define MAX31335_TS0_DATE 0x44
+#define MAX31335_TS0_MONTH 0x45
+#define MAX31335_TS0_YEAR 0x46
+#define MAX31335_TS0_FLAGS 0x47
+#define MAX31335_TS1_SEC_1_128 0x48
+#define MAX31335_TS1_SEC 0x49
+#define MAX31335_TS1_MIN 0x4A
+#define MAX31335_TS1_HOUR 0x4B
+#define MAX31335_TS1_DATE 0x4C
+#define MAX31335_TS1_MONTH 0x4D
+#define MAX31335_TS1_YEAR 0x4E
+#define MAX31335_TS1_FLAGS 0x4F
+#define MAX31335_TS2_SEC_1_128 0x50
+#define MAX31335_TS2_SEC 0x51
+#define MAX31335_TS2_MIN 0x52
+#define MAX31335_TS2_HOUR 0x53
+#define MAX31335_TS2_DATE 0x54
+#define MAX31335_TS2_MONTH 0x55
+#define MAX31335_TS2_YEAR 0x56
+#define MAX31335_TS2_FLAGS 0x57
+#define MAX31335_TS3_SEC_1_128 0x58
+#define MAX31335_TS3_SEC 0x59
+#define MAX31335_TS3_MIN 0x5A
+#define MAX31335_TS3_HOUR 0x5B
+#define MAX31335_TS3_DATE 0x5C
+#define MAX31335_TS3_MONTH 0x5D
+#define MAX31335_TS3_YEAR 0x5E
+#define MAX31335_TS3_FLAGS 0x5F
+
+/* MAX31335_STATUS1 Bit Definitions */
+#define MAX31335_STATUS1_PSDECT BIT(7)
+#define MAX31335_STATUS1_OSF BIT(6)
+#define MAX31335_STATUS1_PFAIL BIT(5)
+#define MAX31335_STATUS1_VBATLOW BIT(4)
+#define MAX31335_STATUS1_DIF BIT(3)
+#define MAX31335_STATUS1_TIF BIT(2)
+#define MAX31335_STATUS1_A2F BIT(1)
+#define MAX31335_STATUS1_A1F BIT(0)
+
+/* MAX31335_INT_EN1 Bit Definitions */
+#define MAX31335_INT_EN1_DOSF BIT(6)
+#define MAX31335_INT_EN1_PFAILE BIT(5)
+#define MAX31335_INT_EN1_VBATLOWE BIT(4)
+#define MAX31335_INT_EN1_DIE BIT(3)
+#define MAX31335_INT_EN1_TIE BIT(2)
+#define MAX31335_INT_EN1_A2IE BIT(1)
+#define MAX31335_INT_EN1_A1IE BIT(0)
+
+/* MAX31335_STATUS2 Bit Definitions */
+#define MAX31335_STATUS2_TEMP_RDY BIT(2)
+#define MAX31335_STATUS2_OTF BIT(1)
+#define MAX31335_STATUS2_UTF BIT(0)
+
+/* MAX31335_INT_EN2 Bit Definitions */
+#define MAX31335_INT_EN2_TEMP_RDY_EN BIT(2)
+#define MAX31335_INT_EN2_OTIE BIT(1)
+#define MAX31335_INT_EN2_UTIE BIT(0)
+
+/* MAX31335_RTC_RESET Bit Definitions */
+#define MAX31335_RTC_RESET_SWRST BIT(0)
+
+/* MAX31335_RTC_CONFIG1 Bit Definitions */
+#define MAX31335_RTC_CONFIG1_EN_IO BIT(6)
+#define MAX31335_RTC_CONFIG1_A1AC GENMASK(5, 4)
+#define MAX31335_RTC_CONFIG1_DIP BIT(3)
+#define MAX31335_RTC_CONFIG1_I2C_TIMEOUT BIT(1)
+#define MAX31335_RTC_CONFIG1_EN_OSC BIT(0)
+
+/* MAX31335_RTC_CONFIG2 Bit Definitions */
+#define MAX31335_RTC_CONFIG2_ENCLKO BIT(2)
+#define MAX31335_RTC_CONFIG2_CLKO_HZ GENMASK(1, 0)
+
+/* MAX31335_TIMESTAMP_CONFIG Bit Definitions */
+#define MAX31335_TIMESTAMP_CONFIG_TSVLOW BIT(5)
+#define MAX31335_TIMESTAMP_CONFIG_TSPWM BIT(4)
+#define MAX31335_TIMESTAMP_CONFIG_TSDIN BIT(3)
+#define MAX31335_TIMESTAMP_CONFIG_TSOW BIT(2)
+#define MAX31335_TIMESTAMP_CONFIG_TSR BIT(1)
+#define MAX31335_TIMESTAMP_CONFIG_TSE BIT(0)
+
+/* MAX31335_TIMER_CONFIG Bit Definitions */
+#define MAX31335_TIMER_CONFIG_TE BIT(4)
+#define MAX31335_TIMER_CONFIG_TPAUSE BIT(3)
+#define MAX31335_TIMER_CONFIG_TRPT BIT(2)
+#define MAX31335_TIMER_CONFIG_TFS GENMASK(1, 0)
+
+/* MAX31335_HOURS Bit Definitions */
+#define MAX31335_HOURS_F_24_12 BIT(6)
+#define MAX31335_HOURS_HR_20_AM_PM BIT(5)
+
+/* MAX31335_MONTH Bit Definitions */
+#define MAX31335_MONTH_CENTURY BIT(7)
+
+/* MAX31335_PWR_MGMT Bit Definitions */
+#define MAX31335_PWR_MGMT_PFVT BIT(0)
+
+/* MAX31335_TRICKLE_REG Bit Definitions */
+#define MAX31335_TRICKLE_REG_TRICKLE GENMASK(3, 1)
+#define MAX31335_TRICKLE_REG_EN_TRICKLE BIT(0)
+
+/* MAX31335_TS_CONFIG Bit Definitions */
+#define MAX31335_TS_CONFIG_AUTO BIT(4)
+#define MAX31335_TS_CONFIG_CONVERT_T BIT(3)
+#define MAX31335_TS_CONFIG_TSINT GENMASK(2, 0)
+
+/* MAX31335_TS_FLAGS Bit Definitions */
+#define MAX31335_TS_FLAGS_VLOWF BIT(3)
+#define MAX31335_TS_FLAGS_VBATF BIT(2)
+#define MAX31335_TS_FLAGS_VCCF BIT(1)
+#define MAX31335_TS_FLAGS_DINF BIT(0)
+
+/* MAX31335 Miscellaneous Definitions */
+#define MAX31335_TRICKLE_SCHOTTKY_DIODE 1
+#define MAX31335_TRICKLE_STANDARD_DIODE 4
+#define MAX31335_RAM_SIZE 32
+#define MAX31335_TIME_SIZE 0x07
+
+#define clk_hw_to_max31335(_hw) container_of(_hw, struct max31335_data, clkout)
+
+struct max31335_data {
+ struct regmap *regmap;
+ struct rtc_device *rtc;
+ struct clk_hw clkout;
+};
+
+static const int max31335_clkout_freq[] = { 1, 64, 1024, 32768 };
+
+static const u16 max31335_trickle_resistors[] = {3000, 6000, 11000};
+
+static bool max31335_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* time keeping registers */
+ if (reg >= MAX31335_SECONDS &&
+ reg < MAX31335_SECONDS + MAX31335_TIME_SIZE)
+ return true;
+
+ /* interrupt status register */
+ if (reg == MAX31335_STATUS1)
+ return true;
+
+ /* temperature registers */
+ if (reg == MAX31335_TEMP_DATA_MSB || reg == MAX31335_TEMP_DATA_LSB)
+ return true;
+
+ return false;
+}
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x5F,
+ .volatile_reg = max31335_volatile_reg,
+};
+
+static int max31335_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+ u8 date[7];
+ int ret;
+
+ ret = regmap_bulk_read(max31335->regmap, MAX31335_SECONDS, date,
+ sizeof(date));
+ if (ret)
+ return ret;
+
+ tm->tm_sec = bcd2bin(date[0] & 0x7f);
+ tm->tm_min = bcd2bin(date[1] & 0x7f);
+ tm->tm_hour = bcd2bin(date[2] & 0x3f);
+ tm->tm_wday = bcd2bin(date[3] & 0x7) - 1;
+ tm->tm_mday = bcd2bin(date[4] & 0x3f);
+ tm->tm_mon = bcd2bin(date[5] & 0x1f) - 1;
+ tm->tm_year = bcd2bin(date[6]) + 100;
+
+ if (FIELD_GET(MAX31335_MONTH_CENTURY, date[5]))
+ tm->tm_year += 100;
+
+ return 0;
+}
+
+static int max31335_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+ u8 date[7];
+
+ date[0] = bin2bcd(tm->tm_sec);
+ date[1] = bin2bcd(tm->tm_min);
+ date[2] = bin2bcd(tm->tm_hour);
+ date[3] = bin2bcd(tm->tm_wday + 1);
+ date[4] = bin2bcd(tm->tm_mday);
+ date[5] = bin2bcd(tm->tm_mon + 1);
+ date[6] = bin2bcd(tm->tm_year % 100);
+
+ if (tm->tm_year >= 200)
+ date[5] |= FIELD_PREP(MAX31335_MONTH_CENTURY, 1);
+
+ return regmap_bulk_write(max31335->regmap, MAX31335_SECONDS, date,
+ sizeof(date));
+}
+
+static int max31335_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+ int ret, ctrl, status;
+ struct rtc_time time;
+ u8 regs[6];
+
+ ret = regmap_bulk_read(max31335->regmap, MAX31335_ALM1_SEC, regs,
+ sizeof(regs));
+ if (ret)
+ return ret;
+
+ alrm->time.tm_sec = bcd2bin(regs[0] & 0x7f);
+ alrm->time.tm_min = bcd2bin(regs[1] & 0x7f);
+ alrm->time.tm_hour = bcd2bin(regs[2] & 0x3f);
+ alrm->time.tm_mday = bcd2bin(regs[3] & 0x3f);
+ alrm->time.tm_mon = bcd2bin(regs[4] & 0x1f) - 1;
+ alrm->time.tm_year = bcd2bin(regs[5]) + 100;
+
+ ret = max31335_read_time(dev, &time);
+ if (ret)
+ return ret;
+
+ if (time.tm_year >= 200)
+ alrm->time.tm_year += 100;
+
+ ret = regmap_read(max31335->regmap, MAX31335_INT_EN1, &ctrl);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(max31335->regmap, MAX31335_STATUS1, &status);
+ if (ret)
+ return ret;
+
+ alrm->enabled = FIELD_GET(MAX31335_INT_EN1_A1IE, ctrl);
+ alrm->pending = FIELD_GET(MAX31335_STATUS1_A1F, status);
+
+ return 0;
+}
+
+static int max31335_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+ unsigned int reg;
+ u8 regs[6];
+ int ret;
+
+ regs[0] = bin2bcd(alrm->time.tm_sec);
+ regs[1] = bin2bcd(alrm->time.tm_min);
+ regs[2] = bin2bcd(alrm->time.tm_hour);
+ regs[3] = bin2bcd(alrm->time.tm_mday);
+ regs[4] = bin2bcd(alrm->time.tm_mon + 1);
+ regs[5] = bin2bcd(alrm->time.tm_year % 100);
+
+ ret = regmap_bulk_write(max31335->regmap, MAX31335_ALM1_SEC,
+ regs, sizeof(regs));
+ if (ret)
+ return ret;
+
+ reg = FIELD_PREP(MAX31335_INT_EN1_A1IE, alrm->enabled);
+ ret = regmap_update_bits(max31335->regmap, MAX31335_INT_EN1,
+ MAX31335_INT_EN1_A1IE, reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(max31335->regmap, MAX31335_STATUS1,
+ MAX31335_STATUS1_A1F, 0);
+
+ return 0;
+}
+
+static int max31335_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(max31335->regmap, MAX31335_INT_EN1,
+ MAX31335_INT_EN1_A1IE, enabled);
+}
+
+static irqreturn_t max31335_handle_irq(int irq, void *dev_id)
+{
+ struct max31335_data *max31335 = dev_id;
+ bool status;
+ int ret;
+
+ ret = regmap_update_bits_check(max31335->regmap, MAX31335_STATUS1,
+ MAX31335_STATUS1_A1F, 0, &status);
+ if (ret)
+ return IRQ_HANDLED;
+
+ if (status)
+ rtc_update_irq(max31335->rtc, 1, RTC_AF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops max31335_rtc_ops = {
+ .read_time = max31335_read_time,
+ .set_time = max31335_set_time,
+ .read_alarm = max31335_read_alarm,
+ .set_alarm = max31335_set_alarm,
+ .alarm_irq_enable = max31335_alarm_irq_enable,
+};
+
+static int max31335_trickle_charger_setup(struct device *dev,
+ struct max31335_data *max31335)
+{
+ u32 ohms, chargeable;
+ int i, trickle_cfg;
+ const char *diode;
+
+ if (device_property_read_u32(dev, "aux-voltage-chargeable",
+ &chargeable))
+ return 0;
+
+ if (device_property_read_u32(dev, "trickle-resistor-ohms", &ohms))
+ return 0;
+
+ if (device_property_read_string(dev, "adi,tc-diode", &diode))
+ return 0;
+
+ if (!strcmp(diode, "schottky"))
+ trickle_cfg = MAX31335_TRICKLE_SCHOTTKY_DIODE;
+ else if (!strcmp(diode, "standard+schottky"))
+ trickle_cfg = MAX31335_TRICKLE_STANDARD_DIODE;
+ else
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid tc-diode value: %s\n", diode);
+
+ for (i = 0; i < ARRAY_SIZE(max31335_trickle_resistors); i++)
+ if (ohms == max31335_trickle_resistors[i])
+ break;
+
+ if (i >= ARRAY_SIZE(max31335_trickle_resistors))
+ return 0;
+
+ i = i + trickle_cfg;
+
+ return regmap_write(max31335->regmap, MAX31335_TRICKLE_REG,
+ FIELD_PREP(MAX31335_TRICKLE_REG_TRICKLE, i) |
+ FIELD_PREP(MAX31335_TRICKLE_REG_EN_TRICKLE,
+ chargeable));
+}
+
+static unsigned long max31335_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+ unsigned int freq_mask;
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(max31335->regmap, MAX31335_RTC_CONFIG2, &reg);
+ if (ret)
+ return 0;
+
+ freq_mask = __roundup_pow_of_two(ARRAY_SIZE(max31335_clkout_freq)) - 1;
+
+ return max31335_clkout_freq[reg & freq_mask];
+}
+
+static long max31335_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int index;
+
+ index = find_closest(rate, max31335_clkout_freq,
+ ARRAY_SIZE(max31335_clkout_freq));
+
+ return max31335_clkout_freq[index];
+}
+
+static int max31335_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+ unsigned int freq_mask;
+ int index;
+
+ index = find_closest(rate, max31335_clkout_freq,
+ ARRAY_SIZE(max31335_clkout_freq));
+ freq_mask = __roundup_pow_of_two(ARRAY_SIZE(max31335_clkout_freq)) - 1;
+
+ return regmap_update_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+ freq_mask, index);
+}
+
+static int max31335_clkout_enable(struct clk_hw *hw)
+{
+ struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+
+ return regmap_set_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+ MAX31335_RTC_CONFIG2_ENCLKO);
+}
+
+static void max31335_clkout_disable(struct clk_hw *hw)
+{
+ struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+
+ regmap_clear_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+ MAX31335_RTC_CONFIG2_ENCLKO);
+}
+
+static int max31335_clkout_is_enabled(struct clk_hw *hw)
+{
+ struct max31335_data *max31335 = clk_hw_to_max31335(hw);
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(max31335->regmap, MAX31335_RTC_CONFIG2, &reg);
+ if (ret)
+ return ret;
+
+ return !!(reg & MAX31335_RTC_CONFIG2_ENCLKO);
+}
+
+static const struct clk_ops max31335_clkout_ops = {
+ .recalc_rate = max31335_clkout_recalc_rate,
+ .round_rate = max31335_clkout_round_rate,
+ .set_rate = max31335_clkout_set_rate,
+ .enable = max31335_clkout_enable,
+ .disable = max31335_clkout_disable,
+ .is_enabled = max31335_clkout_is_enabled,
+};
+
+static struct clk_init_data max31335_clk_init = {
+ .name = "max31335-clkout",
+ .ops = &max31335_clkout_ops,
+};
+
+static int max31335_nvmem_reg_read(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct max31335_data *max31335 = priv;
+ unsigned int reg = MAX31335_TS0_SEC_1_128 + offset;
+
+ return regmap_bulk_read(max31335->regmap, reg, val, bytes);
+}
+
+static int max31335_nvmem_reg_write(void *priv, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct max31335_data *max31335 = priv;
+ unsigned int reg = MAX31335_TS0_SEC_1_128 + offset;
+
+ return regmap_bulk_write(max31335->regmap, reg, val, bytes);
+}
+
+static struct nvmem_config max31335_nvmem_cfg = {
+ .reg_read = max31335_nvmem_reg_read,
+ .reg_write = max31335_nvmem_reg_write,
+ .word_size = 8,
+ .size = MAX31335_RAM_SIZE,
+};
+
+#if IS_REACHABLE(HWMON)
+static int max31335_read_temp(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+ u8 reg[2];
+ s16 temp;
+ int ret;
+
+ if (type != hwmon_temp || attr != hwmon_temp_input)
+ return -EOPNOTSUPP;
+
+ ret = regmap_bulk_read(max31335->regmap, MAX31335_TEMP_DATA_MSB,
+ reg, 2);
+ if (ret)
+ return ret;
+
+ temp = get_unaligned_be16(reg);
+
+ *val = (temp / 64) * 250;
+
+ return 0;
+}
+
+static umode_t max31335_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_temp && attr == hwmon_temp_input)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *max31335_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops max31335_hwmon_ops = {
+ .is_visible = max31335_is_visible,
+ .read = max31335_read_temp,
+};
+
+static const struct hwmon_chip_info max31335_chip_info = {
+ .ops = &max31335_hwmon_ops,
+ .info = max31335_info,
+};
+#endif
+
+static int max31335_clkout_register(struct device *dev)
+{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+ int ret;
+
+ if (!device_property_present(dev, "#clock-cells"))
+ return regmap_clear_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+ MAX31335_RTC_CONFIG2_ENCLKO);
+
+ max31335->clkout.init = &max31335_clk_init;
+
+ ret = devm_clk_hw_register(dev, &max31335->clkout);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot register clock\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &max31335->clkout);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot add hw provider\n");
+
+ max31335->clkout.clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(max31335->clkout.clk))
+ return dev_err_probe(dev, PTR_ERR(max31335->clkout.clk),
+ "cannot enable clkout\n");
+
+ return 0;
+}
+
+static int max31335_probe(struct i2c_client *client)
+{
+ struct max31335_data *max31335;
+#if IS_REACHABLE(HWMON)
+ struct device *hwmon;
+#endif
+ int ret;
+
+ max31335 = devm_kzalloc(&client->dev, sizeof(*max31335), GFP_KERNEL);
+ if (!max31335)
+ return -ENOMEM;
+
+ max31335->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(max31335->regmap))
+ return PTR_ERR(max31335->regmap);
+
+ i2c_set_clientdata(client, max31335);
+
+ max31335->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(max31335->rtc))
+ return PTR_ERR(max31335->rtc);
+
+ max31335->rtc->ops = &max31335_rtc_ops;
+ max31335->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ max31335->rtc->range_max = RTC_TIMESTAMP_END_2199;
+ max31335->rtc->alarm_offset_max = 24 * 60 * 60;
+
+ ret = max31335_clkout_register(&client->dev);
+ if (ret)
+ return ret;
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, max31335_handle_irq,
+ IRQF_ONESHOT,
+ "max31335", max31335);
+ if (ret) {
+ dev_warn(&client->dev,
+ "unable to request IRQ, alarm max31335 disabled\n");
+ client->irq = 0;
+ }
+ }
+
+ if (!client->irq)
+ clear_bit(RTC_FEATURE_ALARM, max31335->rtc->features);
+
+ max31335_nvmem_cfg.priv = max31335;
+ ret = devm_rtc_nvmem_register(max31335->rtc, &max31335_nvmem_cfg);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "cannot register rtc nvmem\n");
+
+#if IS_REACHABLE(HWMON)
+ hwmon = devm_hwmon_device_register_with_info(&client->dev, client->name,
+ max31335,
+ &max31335_chip_info,
+ NULL);
+ if (IS_ERR(hwmon))
+ return dev_err_probe(&client->dev, PTR_ERR(hwmon),
+ "cannot register hwmon device\n");
+#endif
+
+ ret = max31335_trickle_charger_setup(&client->dev, max31335);
+ if (ret)
+ return ret;
+
+ return devm_rtc_register_device(max31335->rtc);
+}
+
+static const struct i2c_device_id max31335_id[] = {
+ { "max31335", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max31335_id);
+
+static const struct of_device_id max31335_of_match[] = {
+ { .compatible = "adi,max31335" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, max31335_of_match);
+
+static struct i2c_driver max31335_driver = {
+ .driver = {
+ .name = "rtc-max31335",
+ .of_match_table = max31335_of_match,
+ },
+ .probe = max31335_probe,
+ .id_table = max31335_id,
+};
+module_i2c_driver(max31335_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("MAX31335 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index ed4e606be..f488a189a 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -23,6 +23,7 @@
#define NCT3018Y_REG_CTRL 0x0A /* timer control */
#define NCT3018Y_REG_ST 0x0B /* status */
#define NCT3018Y_REG_CLKO 0x0C /* clock out */
+#define NCT3018Y_REG_PART 0x21 /* part info */
#define NCT3018Y_BIT_AF BIT(7)
#define NCT3018Y_BIT_ST BIT(7)
@@ -37,10 +38,12 @@
#define NCT3018Y_REG_BAT_MASK 0x07
#define NCT3018Y_REG_CLKO_F_MASK 0x03 /* frequenc mask */
#define NCT3018Y_REG_CLKO_CKE 0x80 /* clock out enabled */
+#define NCT3018Y_REG_PART_NCT3018Y 0x02
struct nct3018y {
struct rtc_device *rtc;
struct i2c_client *client;
+ int part_num;
#ifdef CONFIG_COMMON_CLK
struct clk_hw clkout_hw;
#endif
@@ -177,8 +180,27 @@ static int nct3018y_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct nct3018y *nct3018y = dev_get_drvdata(dev);
unsigned char buf[4] = {0};
- int err;
+ int err, flags;
+ int restore_flags = 0;
+
+ flags = i2c_smbus_read_byte_data(client, NCT3018Y_REG_CTRL);
+ if (flags < 0) {
+ dev_dbg(&client->dev, "Failed to read NCT3018Y_REG_CTRL.\n");
+ return flags;
+ }
+
+ /* Check and set TWO bit */
+ if (nct3018y->part_num == NCT3018Y_REG_PART_NCT3018Y && !(flags & NCT3018Y_BIT_TWO)) {
+ restore_flags = 1;
+ flags |= NCT3018Y_BIT_TWO;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL.\n");
+ return err;
+ }
+ }
buf[0] = bin2bcd(tm->tm_sec);
err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_SC, buf[0]);
@@ -212,6 +234,18 @@ static int nct3018y_rtc_set_time(struct device *dev, struct rtc_time *tm)
return -EIO;
}
+ /* Restore TWO bit */
+ if (restore_flags) {
+ if (nct3018y->part_num == NCT3018Y_REG_PART_NCT3018Y)
+ flags &= ~NCT3018Y_BIT_TWO;
+
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL.\n");
+ return err;
+ }
+ }
+
return err;
}
@@ -479,11 +513,17 @@ static int nct3018y_probe(struct i2c_client *client)
dev_dbg(&client->dev, "%s: NCT3018Y_BIT_TWO is set\n", __func__);
}
- flags = NCT3018Y_BIT_TWO;
- err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
- if (err < 0) {
- dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL\n");
- return err;
+ nct3018y->part_num = i2c_smbus_read_byte_data(client, NCT3018Y_REG_PART);
+ if (nct3018y->part_num < 0) {
+ dev_dbg(&client->dev, "Failed to read NCT3018Y_REG_PART.\n");
+ return nct3018y->part_num;
+ } else if (nct3018y->part_num == NCT3018Y_REG_PART_NCT3018Y) {
+ flags = NCT3018Y_BIT_HF;
+ err = i2c_smbus_write_byte_data(client, NCT3018Y_REG_CTRL, flags);
+ if (err < 0) {
+ dev_dbg(&client->dev, "Unable to write NCT3018Y_REG_CTRL.\n");
+ return err;
+ }
}
flags = 0;
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 1a3ec1bb5..1327251e5 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/rtc.h>
+#include <linux/pm_wakeirq.h>
#define RV8803_I2C_TRY_COUNT 4
@@ -607,6 +608,28 @@ static int rv8803_regs_configure(struct rv8803_data *rv8803)
return 0;
}
+static int rv8803_resume(struct device *dev)
+{
+ struct rv8803_data *rv8803 = dev_get_drvdata(dev);
+
+ if (rv8803->client->irq > 0 && device_may_wakeup(dev))
+ disable_irq_wake(rv8803->client->irq);
+
+ return 0;
+}
+
+static int rv8803_suspend(struct device *dev)
+{
+ struct rv8803_data *rv8803 = dev_get_drvdata(dev);
+
+ if (rv8803->client->irq > 0 && device_may_wakeup(dev))
+ enable_irq_wake(rv8803->client->irq);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(rv8803_pm_ops, rv8803_suspend, rv8803_resume);
+
static const struct i2c_device_id rv8803_id[] = {
{ "rv8803", rv_8803 },
{ "rv8804", rx_8804 },
@@ -683,10 +706,18 @@ static int rv8803_probe(struct i2c_client *client)
if (err) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
client->irq = 0;
+ } else {
+ device_init_wakeup(&client->dev, true);
+ err = dev_pm_set_wake_irq(&client->dev, client->irq);
+ if (err)
+ dev_err(&client->dev, "failed to set wake IRQ\n");
}
+ } else {
+ if (device_property_read_bool(&client->dev, "wakeup-source"))
+ device_init_wakeup(&client->dev, true);
+ else
+ clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
}
- if (!client->irq)
- clear_bit(RTC_FEATURE_ALARM, rv8803->rtc->features);
if (of_property_read_bool(client->dev.of_node, "epson,vdet-disable"))
rv8803->backup |= RX8900_FLAG_VDETOFF;
@@ -737,6 +768,7 @@ static struct i2c_driver rv8803_driver = {
.driver = {
.name = "rtc-rv8803",
.of_match_table = of_match_ptr(rv8803_of_match),
+ .pm = &rv8803_pm_ops,
},
.probe = rv8803_probe,
.id_table = rv8803_id,
diff --git a/drivers/rtc/rtc-tps6594.c b/drivers/rtc/rtc-tps6594.c
new file mode 100644
index 000000000..838ae8562
--- /dev/null
+++ b/drivers/rtc/rtc-tps6594.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC driver for tps6594 PMIC
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#include <linux/bcd.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/mfd/tps6594.h>
+
+// Total number of RTC registers needed to set time
+#define NUM_TIME_REGS (TPS6594_REG_RTC_WEEKS - TPS6594_REG_RTC_SECONDS + 1)
+
+// Total number of RTC alarm registers
+#define NUM_TIME_ALARM_REGS (NUM_TIME_REGS - 1)
+
+/*
+ * Min and max values supported by 'offset' interface (swapped sign).
+ * After conversion, the values do not exceed the range [-32767, 33767]
+ * which COMP_REG must conform to.
+ */
+#define MIN_OFFSET (-277774)
+#define MAX_OFFSET (277774)
+
+// Number of ticks per hour
+#define TICKS_PER_HOUR (32768 * 3600)
+
+// Multiplier for ppb conversions
+#define PPB_MULT NANO
+
+static int tps6594_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ u8 val;
+
+ val = enabled ? TPS6594_BIT_IT_ALARM : 0;
+
+ return regmap_update_bits(tps->regmap, TPS6594_REG_RTC_INTERRUPTS,
+ TPS6594_BIT_IT_ALARM, val);
+}
+
+/* Pulse GET_TIME field of RTC_CTRL_1 to store a timestamp in shadow registers. */
+static int tps6594_rtc_shadow_timestamp(struct device *dev, struct tps6594 *tps)
+{
+ int ret;
+
+ /*
+ * Set GET_TIME to 0. Next time we set GET_TIME to 1 we will be sure to store
+ * an up-to-date timestamp.
+ */
+ ret = regmap_clear_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+ TPS6594_BIT_GET_TIME);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Copy content of RTC registers to shadow registers or latches to read
+ * a coherent timestamp.
+ */
+ return regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+ TPS6594_BIT_GET_TIME);
+}
+
+static int tps6594_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[NUM_TIME_REGS];
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ int ret;
+
+ // Check if RTC is running.
+ ret = regmap_test_bits(tps->regmap, TPS6594_REG_RTC_STATUS,
+ TPS6594_BIT_RUN);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -EINVAL;
+
+ ret = tps6594_rtc_shadow_timestamp(dev, tps);
+ if (ret < 0)
+ return ret;
+
+ // Read shadowed RTC registers.
+ ret = regmap_bulk_read(tps->regmap, TPS6594_REG_RTC_SECONDS, rtc_data,
+ NUM_TIME_REGS);
+ if (ret < 0)
+ return ret;
+
+ tm->tm_sec = bcd2bin(rtc_data[0]);
+ tm->tm_min = bcd2bin(rtc_data[1]);
+ tm->tm_hour = bcd2bin(rtc_data[2]);
+ tm->tm_mday = bcd2bin(rtc_data[3]);
+ tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
+ tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+ tm->tm_wday = bcd2bin(rtc_data[6]);
+
+ return 0;
+}
+
+static int tps6594_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char rtc_data[NUM_TIME_REGS];
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ int ret;
+
+ rtc_data[0] = bin2bcd(tm->tm_sec);
+ rtc_data[1] = bin2bcd(tm->tm_min);
+ rtc_data[2] = bin2bcd(tm->tm_hour);
+ rtc_data[3] = bin2bcd(tm->tm_mday);
+ rtc_data[4] = bin2bcd(tm->tm_mon + 1);
+ rtc_data[5] = bin2bcd(tm->tm_year - 100);
+ rtc_data[6] = bin2bcd(tm->tm_wday);
+
+ // Stop RTC while updating the RTC time registers.
+ ret = regmap_clear_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+ TPS6594_BIT_STOP_RTC);
+ if (ret < 0)
+ return ret;
+
+ // Update all the time registers in one shot.
+ ret = regmap_bulk_write(tps->regmap, TPS6594_REG_RTC_SECONDS, rtc_data,
+ NUM_TIME_REGS);
+ if (ret < 0)
+ return ret;
+
+ // Start back RTC.
+ return regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+ TPS6594_BIT_STOP_RTC);
+}
+
+static int tps6594_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned char alarm_data[NUM_TIME_ALARM_REGS];
+ u32 int_val;
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ int ret;
+
+ ret = regmap_bulk_read(tps->regmap, TPS6594_REG_ALARM_SECONDS,
+ alarm_data, NUM_TIME_ALARM_REGS);
+ if (ret < 0)
+ return ret;
+
+ alm->time.tm_sec = bcd2bin(alarm_data[0]);
+ alm->time.tm_min = bcd2bin(alarm_data[1]);
+ alm->time.tm_hour = bcd2bin(alarm_data[2]);
+ alm->time.tm_mday = bcd2bin(alarm_data[3]);
+ alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1;
+ alm->time.tm_year = bcd2bin(alarm_data[5]) + 100;
+
+ ret = regmap_read(tps->regmap, TPS6594_REG_RTC_INTERRUPTS, &int_val);
+ if (ret < 0)
+ return ret;
+
+ alm->enabled = int_val & TPS6594_BIT_IT_ALARM;
+
+ return 0;
+}
+
+static int tps6594_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ unsigned char alarm_data[NUM_TIME_ALARM_REGS];
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ int ret;
+
+ // Disable alarm irq before changing the alarm timestamp.
+ ret = tps6594_rtc_alarm_irq_enable(dev, 0);
+ if (ret)
+ return ret;
+
+ alarm_data[0] = bin2bcd(alm->time.tm_sec);
+ alarm_data[1] = bin2bcd(alm->time.tm_min);
+ alarm_data[2] = bin2bcd(alm->time.tm_hour);
+ alarm_data[3] = bin2bcd(alm->time.tm_mday);
+ alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
+ alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
+
+ // Update all the alarm registers in one shot.
+ ret = regmap_bulk_write(tps->regmap, TPS6594_REG_ALARM_SECONDS,
+ alarm_data, NUM_TIME_ALARM_REGS);
+ if (ret < 0)
+ return ret;
+
+ if (alm->enabled)
+ ret = tps6594_rtc_alarm_irq_enable(dev, 1);
+
+ return ret;
+}
+
+static int tps6594_rtc_set_calibration(struct device *dev, int calibration)
+{
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ __le16 value;
+ int ret;
+
+ /*
+ * TPS6594 uses two's complement 16 bit value for compensation of RTC
+ * crystal inaccuracies. One time every hour when seconds counter
+ * increments from 0 to 1 compensation value will be added to internal
+ * RTC counter value.
+ *
+ * Valid range for compensation value: [-32767 .. 32767].
+ */
+ if (calibration < S16_MIN + 1 || calibration > S16_MAX)
+ return -ERANGE;
+
+ value = cpu_to_le16(calibration);
+
+ // Update all the compensation registers in one shot.
+ ret = regmap_bulk_write(tps->regmap, TPS6594_REG_RTC_COMP_LSB, &value,
+ sizeof(value));
+ if (ret < 0)
+ return ret;
+
+ // Enable automatic compensation.
+ return regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+ TPS6594_BIT_AUTO_COMP);
+}
+
+static int tps6594_rtc_get_calibration(struct device *dev, int *calibration)
+{
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ unsigned int ctrl;
+ __le16 value;
+ int ret;
+
+ ret = regmap_read(tps->regmap, TPS6594_REG_RTC_CTRL_1, &ctrl);
+ if (ret < 0)
+ return ret;
+
+ // If automatic compensation is not enabled report back zero.
+ if (!(ctrl & TPS6594_BIT_AUTO_COMP)) {
+ *calibration = 0;
+ return 0;
+ }
+
+ ret = regmap_bulk_read(tps->regmap, TPS6594_REG_RTC_COMP_LSB, &value,
+ sizeof(value));
+ if (ret < 0)
+ return ret;
+
+ *calibration = le16_to_cpu(value);
+
+ return 0;
+}
+
+static int tps6594_rtc_read_offset(struct device *dev, long *offset)
+{
+ int calibration;
+ s64 tmp;
+ int ret;
+
+ ret = tps6594_rtc_get_calibration(dev, &calibration);
+ if (ret < 0)
+ return ret;
+
+ // Convert from RTC calibration register format to ppb format.
+ tmp = calibration * PPB_MULT;
+
+ if (tmp < 0)
+ tmp -= TICKS_PER_HOUR / 2LL;
+ else
+ tmp += TICKS_PER_HOUR / 2LL;
+ tmp = div_s64(tmp, TICKS_PER_HOUR);
+
+ /*
+ * SAFETY:
+ * Computatiion is the reverse operation of the one done in
+ * `tps6594_rtc_set_offset`. The safety remarks applie here too.
+ */
+
+ /*
+ * Offset value operates in negative way, so swap sign.
+ * See 8.3.10.5, (32768 - COMP_REG).
+ */
+ *offset = (long)-tmp;
+
+ return 0;
+}
+
+static int tps6594_rtc_set_offset(struct device *dev, long offset)
+{
+ int calibration;
+ s64 tmp;
+
+ // Make sure offset value is within supported range.
+ if (offset < MIN_OFFSET || offset > MAX_OFFSET)
+ return -ERANGE;
+
+ // Convert from ppb format to RTC calibration register format.
+
+ tmp = offset * TICKS_PER_HOUR;
+ if (tmp < 0)
+ tmp -= PPB_MULT / 2LL;
+ else
+ tmp += PPB_MULT / 2LL;
+ tmp = div_s64(tmp, PPB_MULT);
+
+ /*
+ * SAFETY:
+ * - tmp = offset * TICK_PER_HOUR :
+ * `offset` can't be more than 277774, so `tmp` can't exceed 277774000000000
+ * which is lower than the maximum value in an `s64` (2^63-1). No overflow here.
+ *
+ * - tmp += TICK_PER_HOUR / 2LL :
+ * tmp will have a maximum value of 277774117964800 which is still inferior to 2^63-1.
+ */
+
+ // Offset value operates in negative way, so swap sign.
+ calibration = (int)-tmp;
+
+ return tps6594_rtc_set_calibration(dev, calibration);
+}
+
+static irqreturn_t tps6594_rtc_interrupt(int irq, void *rtc)
+{
+ struct device *dev = rtc;
+ struct tps6594 *tps = dev_get_drvdata(dev->parent);
+ struct rtc_device *rtc_dev = dev_get_drvdata(dev);
+ int ret;
+ u32 rtc_reg;
+
+ ret = regmap_read(tps->regmap, TPS6594_REG_RTC_STATUS, &rtc_reg);
+ if (ret)
+ return IRQ_NONE;
+
+ rtc_update_irq(rtc_dev, 1, RTC_IRQF | RTC_AF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops tps6594_rtc_ops = {
+ .read_time = tps6594_rtc_read_time,
+ .set_time = tps6594_rtc_set_time,
+ .read_alarm = tps6594_rtc_read_alarm,
+ .set_alarm = tps6594_rtc_set_alarm,
+ .alarm_irq_enable = tps6594_rtc_alarm_irq_enable,
+ .read_offset = tps6594_rtc_read_offset,
+ .set_offset = tps6594_rtc_set_offset,
+};
+
+static int tps6594_rtc_probe(struct platform_device *pdev)
+{
+ struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct rtc_device *rtc;
+ int irq;
+ int ret;
+
+ rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ // Enable crystal oscillator.
+ ret = regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_2,
+ TPS6594_BIT_XTAL_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_test_bits(tps->regmap, TPS6594_REG_RTC_STATUS,
+ TPS6594_BIT_RUN);
+ if (ret < 0)
+ return ret;
+ // RTC not running.
+ if (ret == 0) {
+ ret = regmap_set_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+ TPS6594_BIT_STOP_RTC);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * On some boards, a 40 ms delay is needed before BIT_RUN is set.
+ * 80 ms should provide sufficient margin.
+ */
+ mdelay(80);
+
+ /*
+ * RTC should be running now. Check if this is the case.
+ * If not it might be a missing oscillator.
+ */
+ ret = regmap_test_bits(tps->regmap, TPS6594_REG_RTC_STATUS,
+ TPS6594_BIT_RUN);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ENODEV;
+
+ // Stop RTC until first call to `tps6594_rtc_set_time`.
+ ret = regmap_clear_bits(tps->regmap, TPS6594_REG_RTC_CTRL_1,
+ TPS6594_BIT_STOP_RTC);
+ if (ret < 0)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ irq = platform_get_irq_byname(pdev, TPS6594_IRQ_NAME_ALARM);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get irq\n");
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, tps6594_rtc_interrupt,
+ IRQF_ONESHOT, TPS6594_IRQ_NAME_ALARM,
+ dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to request_threaded_irq\n");
+
+ ret = device_init_wakeup(dev, true);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to init rtc as wakeup source\n");
+
+ rtc->ops = &tps6594_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ return devm_rtc_register_device(rtc);
+}
+
+static const struct platform_device_id tps6594_rtc_id_table[] = {
+ { "tps6594-rtc", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, tps6594_rtc_id_table);
+
+static struct platform_driver tps6594_rtc_driver = {
+ .probe = tps6594_rtc_probe,
+ .driver = {
+ .name = "tps6594-rtc",
+ },
+ .id_table = tps6594_rtc_id_table,
+};
+
+module_platform_driver(tps6594_rtc_driver);
+MODULE_AUTHOR("Esteban Blanc <eblanc@baylibre.com>");
+MODULE_DESCRIPTION("TPS6594 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0eea5afe9..30851faad 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1103,12 +1103,6 @@ static void dasd_statistics_removeroot(void)
return;
}
-int dasd_stats_generic_show(struct seq_file *m, void *v)
-{
- seq_puts(m, "Statistics are not activated in this kernel\n");
- return 0;
-}
-
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 99361618c..0b0324fe4 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -79,8 +79,8 @@ struct raw3215_info {
struct ccw_device *cdev; /* device for tty driver */
spinlock_t *lock; /* pointer to irq lock */
int flags; /* state flags */
- char *buffer; /* pointer to output buffer */
- char *inbuf; /* pointer to input buffer */
+ u8 *buffer; /* pointer to output buffer */
+ u8 *inbuf; /* pointer to input buffer */
int head; /* first free byte in output buffer */
int count; /* number of bytes in output buffer */
int written; /* number of bytes in write requests */
@@ -89,7 +89,6 @@ struct raw3215_info {
wait_queue_head_t empty_wait; /* wait queue for flushing */
struct timer_list timer; /* timer for delayed output */
int line_pos; /* position on the line (for tabs) */
- char ubuffer[80]; /* copy_from_user buffer */
};
/* array of 3215 devices structures */
@@ -523,12 +522,14 @@ static unsigned int raw3215_make_room(struct raw3215_info *raw,
* string without blocking.
* Return value is the number of bytes copied.
*/
-static unsigned int raw3215_addtext(const char *str, unsigned int length,
+static unsigned int raw3215_addtext(const u8 *str, size_t length,
struct raw3215_info *raw, int opmode,
unsigned int todrop)
{
- unsigned int c, ch, i, blanks, expanded_size = 0;
+ unsigned int i, blanks, expanded_size = 0;
unsigned int column = raw->line_pos;
+ size_t c;
+ u8 ch;
if (opmode == RAW3215_COUNT)
todrop = 0;
@@ -559,7 +560,7 @@ static unsigned int raw3215_addtext(const char *str, unsigned int length,
if (todrop && expanded_size < todrop) /* Drop head data */
continue;
for (i = 0; i < blanks; i++) {
- raw->buffer[raw->head] = (char)_ascebc[(int)ch];
+ raw->buffer[raw->head] = _ascebc[ch];
raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
raw->count++;
}
@@ -571,8 +572,8 @@ static unsigned int raw3215_addtext(const char *str, unsigned int length,
/*
* String write routine for 3215 devices
*/
-static void raw3215_write(struct raw3215_info *raw, const char *str,
- unsigned int length)
+static void raw3215_write(struct raw3215_info *raw, const u8 *str,
+ size_t length)
{
unsigned int count, avail;
unsigned long flags;
@@ -597,7 +598,7 @@ static void raw3215_write(struct raw3215_info *raw, const char *str,
/*
* Put character routine for 3215 devices
*/
-static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch)
+static void raw3215_putchar(struct raw3215_info *raw, u8 ch)
{
raw3215_write(raw, &ch, 1);
}
@@ -824,12 +825,10 @@ static struct ccw_driver raw3215_ccw_driver = {
.int_class = IRQIO_C15,
};
-static void handle_write(struct raw3215_info *raw, const char *str, int count)
+static void handle_write(struct raw3215_info *raw, const u8 *str, size_t count)
{
- int i;
-
while (count > 0) {
- i = min_t(int, count, RAW3215_BUFFER_SIZE - 1);
+ size_t i = min_t(size_t, count, RAW3215_BUFFER_SIZE - 1);
raw3215_write(raw, str, i);
count -= i;
str += i;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 363315fa1..251d2a1c3 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -54,7 +54,7 @@ struct tty3270_attribute {
};
struct tty3270_cell {
- unsigned char character;
+ u8 character;
struct tty3270_attribute attributes;
};
@@ -123,7 +123,7 @@ struct tty3270 {
/* Character array for put_char/flush_chars. */
unsigned int char_count;
- char char_buf[TTY3270_CHAR_BUF_SIZE];
+ u8 char_buf[TTY3270_CHAR_BUF_SIZE];
};
/* tty3270->update_flags. See tty3270_update for details. */
@@ -1255,7 +1255,7 @@ static unsigned int tty3270_write_room(struct tty_struct *tty)
* Insert character into the screen at the current position with the
* current color and highlight. This function does NOT do cursor movement.
*/
-static void tty3270_put_character(struct tty3270 *tp, char ch)
+static void tty3270_put_character(struct tty3270 *tp, u8 ch)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
@@ -1561,7 +1561,7 @@ static void tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
* Pn is a numeric parameter, a string of zero or more decimal digits.
* Ps is a selective parameter.
*/
-static void tty3270_escape_sequence(struct tty3270 *tp, char ch)
+static void tty3270_escape_sequence(struct tty3270 *tp, u8 ch)
{
enum { ES_NORMAL, ES_ESC, ES_SQUARE, ES_PAREN, ES_GETPARS };
@@ -1726,7 +1726,7 @@ static void tty3270_escape_sequence(struct tty3270 *tp, char ch)
* String write routine for 3270 ttys
*/
static void tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
- const unsigned char *buf, int count)
+ const u8 *buf, size_t count)
{
int i_msg, i;
@@ -2052,7 +2052,7 @@ con3270_write(struct console *co, const char *str, unsigned int count)
{
struct tty3270 *tp = co->data;
unsigned long flags;
- char c;
+ u8 c;
spin_lock_irqsave(&tp->view.lock, flags);
while (count--) {
diff --git a/drivers/s390/char/uvdevice.c b/drivers/s390/char/uvdevice.c
index 144cd2e03..42c9f77f8 100644
--- a/drivers/s390/char/uvdevice.c
+++ b/drivers/s390/char/uvdevice.c
@@ -109,6 +109,7 @@ static int uvio_copy_attest_result_to_user(struct uv_cb_attest *uvcb_attest,
struct uvio_attest *uvio_attest)
{
struct uvio_attest __user *user_uvio_attest = (void __user *)uv_ioctl->argument_addr;
+ u32 __user *user_buf_add_len = (u32 __user *)&user_uvio_attest->add_data_len;
void __user *user_buf_add = (void __user *)uvio_attest->add_data_addr;
void __user *user_buf_meas = (void __user *)uvio_attest->meas_addr;
void __user *user_buf_uid = &user_uvio_attest->config_uid;
@@ -117,6 +118,8 @@ static int uvio_copy_attest_result_to_user(struct uv_cb_attest *uvcb_attest,
return -EFAULT;
if (add_data && copy_to_user(user_buf_add, add_data, uvio_attest->add_data_len))
return -EFAULT;
+ if (put_user(uvio_attest->add_data_len, user_buf_add_len))
+ return -EFAULT;
if (copy_to_user(user_buf_uid, uvcb_attest->config_uid, sizeof(uvcb_attest->config_uid)))
return -EFAULT;
return 0;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index f8b04ce61..64ed55c3a 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -219,16 +219,16 @@ EXPORT_SYMBOL_GPL(chsc_sadc);
static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
{
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
if (sch->driver && sch->driver->chp_event)
if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
goto out_unreg;
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
return 0;
out_unreg:
sch->lpm = 0;
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
css_schedule_eval(sch->schid);
return 0;
}
@@ -258,10 +258,10 @@ void chsc_chp_offline(struct chp_id chpid)
static int __s390_process_res_acc(struct subchannel *sch, void *data)
{
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, data, CHP_ONLINE);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
return 0;
}
@@ -292,10 +292,10 @@ static void s390_process_res_acc(struct chp_link *link)
static int process_fces_event(struct subchannel *sch, void *data)
{
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, data, CHP_FCES_EVENT);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
return 0;
}
@@ -769,11 +769,11 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
if (sch->driver && sch->driver->chp_event)
sch->driver->chp_event(sch, &link,
on ? CHP_VARY_ON : CHP_VARY_OFF);
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
}
static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 180ab8992..902237d0b 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -211,10 +211,10 @@ static int chsc_async(struct chsc_async_area *chsc_area,
chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
while ((sch = chsc_get_next_subchannel(sch))) {
- spin_lock(sch->lock);
+ spin_lock(&sch->lock);
private = dev_get_drvdata(&sch->dev);
if (private->request) {
- spin_unlock(sch->lock);
+ spin_unlock(&sch->lock);
ret = -EBUSY;
continue;
}
@@ -239,7 +239,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
default:
ret = -ENODEV;
}
- spin_unlock(sch->lock);
+ spin_unlock(&sch->lock);
CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
sch->schid.ssid, sch->schid.sch_no, cc);
if (ret == -EINPROGRESS)
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 6127add74..a5736b735 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -546,7 +546,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
return IRQ_HANDLED;
}
sch = phys_to_virt(tpi_info->intparm);
- spin_lock(sch->lock);
+ spin_lock(&sch->lock);
/* Store interrupt response block to lowcore. */
if (tsch(tpi_info->schid, irb) == 0) {
/* Keep subchannel information word up to date. */
@@ -558,7 +558,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
inc_irq_stat(IRQIO_CIO);
} else
inc_irq_stat(IRQIO_CIO);
- spin_unlock(sch->lock);
+ spin_unlock(&sch->lock);
return IRQ_HANDLED;
}
@@ -663,7 +663,7 @@ struct subchannel *cio_probe_console(void)
if (IS_ERR(sch))
return sch;
- lockdep_set_class(sch->lock, &console_sch_key);
+ lockdep_set_class(&sch->lock, &console_sch_key);
isc_register(CONSOLE_ISC);
sch->config.isc = CONSOLE_ISC;
sch->config.intparm = (u32)virt_to_phys(sch);
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index fa8df50bb..a9057a5b6 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -83,7 +83,7 @@ enum sch_todo {
/* subchannel data structure used by I/O subroutines */
struct subchannel {
struct subchannel_id schid;
- spinlock_t *lock; /* subchannel lock */
+ spinlock_t lock; /* subchannel lock */
struct mutex reg_mutex;
enum {
SUBCHANNEL_TYPE_IO = 0,
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3ff46fc69..28a88ed2c 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -148,16 +148,10 @@ out:
static void css_sch_todo(struct work_struct *work);
-static int css_sch_create_locks(struct subchannel *sch)
+static void css_sch_create_locks(struct subchannel *sch)
{
- sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
- if (!sch->lock)
- return -ENOMEM;
-
- spin_lock_init(sch->lock);
+ spin_lock_init(&sch->lock);
mutex_init(&sch->reg_mutex);
-
- return 0;
}
static void css_subchannel_release(struct device *dev)
@@ -167,7 +161,6 @@ static void css_subchannel_release(struct device *dev)
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->driver_override);
- kfree(sch->lock);
kfree(sch);
}
@@ -219,9 +212,7 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
sch->schib = *schib;
sch->st = schib->pmcw.st;
- ret = css_sch_create_locks(sch);
- if (ret)
- goto err;
+ css_sch_create_locks(sch);
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
@@ -233,19 +224,17 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
*/
ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
if (ret)
- goto err_lock;
+ goto err;
/*
* But we don't have such restrictions imposed on the stuff that
* is handled by the streaming API.
*/
ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
if (ret)
- goto err_lock;
+ goto err;
return sch;
-err_lock:
- kfree(sch->lock);
err:
kfree(sch);
return ERR_PTR(ret);
@@ -604,12 +593,12 @@ static void css_sch_todo(struct work_struct *work)
sch = container_of(work, struct subchannel, todo_work);
/* Find out todo. */
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
todo = sch->todo;
CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
sch->schid.sch_no, todo);
sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
/* Perform todo. */
switch (todo) {
case SCH_TODO_NOTHING:
@@ -617,9 +606,9 @@ static void css_sch_todo(struct work_struct *work)
case SCH_TODO_EVAL:
ret = css_evaluate_known_subchannel(sch, 1);
if (ret == -EAGAIN) {
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
css_sched_sch_todo(sch, todo);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
}
break;
case SCH_TODO_UNREG:
@@ -1028,12 +1017,7 @@ static int __init setup_css(int nr)
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
mutex_init(&css->pseudo_subchannel->reg_mutex);
- ret = css_sch_create_locks(css->pseudo_subchannel);
- if (ret) {
- kfree(css->pseudo_subchannel);
- device_unregister(&css->device);
- goto out_err;
- }
+ css_sch_create_locks(css->pseudo_subchannel);
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
ret = device_register(&css->pseudo_subchannel->dev);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 4ca5adce9..34b2567b8 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -363,10 +363,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
- spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else {
+ if (ret) {
+ spin_unlock_irq(cdev->ccwlock);
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
@@ -375,7 +373,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state is reached */
+ while (!dev_fsm_final_state(cdev)) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(cdev->ccwlock);
+ }
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
@@ -748,7 +751,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
mutex_init(&cdev->reg_mutex);
atomic_set(&priv->onoff, 0);
- cdev->ccwlock = sch->lock;
+ cdev->ccwlock = &sch->lock;
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
cdev->dev.bus = &ccw_bus_type;
@@ -764,9 +767,9 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
goto out_put;
}
priv->flags.initialized = 1;
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
sch_set_cdev(sch, cdev);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
return 0;
out_put:
@@ -851,9 +854,9 @@ static void io_subchannel_register(struct ccw_device *cdev)
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
sch_set_cdev(sch, NULL);
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
mutex_unlock(&cdev->reg_mutex);
/* Release initial device reference. */
put_device(&cdev->dev);
@@ -904,9 +907,9 @@ static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
ccw_device_recognition(cdev);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
}
static int ccw_device_move_to_sch(struct ccw_device *cdev,
@@ -921,12 +924,12 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
return -ENODEV;
if (!sch_is_pseudo_sch(old_sch)) {
- spin_lock_irq(old_sch->lock);
+ spin_lock_irq(&old_sch->lock);
old_enabled = old_sch->schib.pmcw.ena;
rc = 0;
if (old_enabled)
rc = cio_disable_subchannel(old_sch);
- spin_unlock_irq(old_sch->lock);
+ spin_unlock_irq(&old_sch->lock);
if (rc == -EBUSY) {
/* Release child reference for new parent. */
put_device(&sch->dev);
@@ -944,9 +947,9 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
sch->schib.pmcw.dev, rc);
if (old_enabled) {
/* Try to re-enable the old subchannel. */
- spin_lock_irq(old_sch->lock);
+ spin_lock_irq(&old_sch->lock);
cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
- spin_unlock_irq(old_sch->lock);
+ spin_unlock_irq(&old_sch->lock);
}
/* Release child reference for new parent. */
put_device(&sch->dev);
@@ -954,19 +957,19 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
}
/* Clean up old subchannel. */
if (!sch_is_pseudo_sch(old_sch)) {
- spin_lock_irq(old_sch->lock);
+ spin_lock_irq(&old_sch->lock);
sch_set_cdev(old_sch, NULL);
- spin_unlock_irq(old_sch->lock);
+ spin_unlock_irq(&old_sch->lock);
css_schedule_eval(old_sch->schid);
}
/* Release child reference for old parent. */
put_device(&old_sch->dev);
/* Initialize new subchannel. */
- spin_lock_irq(sch->lock);
- cdev->ccwlock = sch->lock;
+ spin_lock_irq(&sch->lock);
+ cdev->ccwlock = &sch->lock;
if (!sch_is_pseudo_sch(sch))
sch_set_cdev(sch, cdev);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
if (!sch_is_pseudo_sch(sch))
css_update_ssd_info(sch);
return 0;
@@ -1077,9 +1080,9 @@ static int io_subchannel_probe(struct subchannel *sch)
return 0;
out_schedule:
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
css_sched_sch_todo(sch, SCH_TODO_UNREG);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
return 0;
}
@@ -1093,10 +1096,10 @@ static void io_subchannel_remove(struct subchannel *sch)
goto out_free;
ccw_device_unregister(cdev);
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
sch_set_cdev(sch, NULL);
set_io_private(sch, NULL);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
out_free:
dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
io_priv->dma_area, io_priv->dma_area_dma);
@@ -1203,7 +1206,7 @@ static void io_subchannel_quiesce(struct subchannel *sch)
struct ccw_device *cdev;
int ret;
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
cdev = sch_get_cdev(sch);
if (cio_is_console(sch->schid))
goto out_unlock;
@@ -1220,15 +1223,15 @@ static void io_subchannel_quiesce(struct subchannel *sch)
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, HZ/10);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
wait_event(cdev->private->wait_q,
cdev->private->state != DEV_STATE_QUIESCE);
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
}
ret = cio_disable_subchannel(sch);
}
out_unlock:
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
}
static void io_subchannel_shutdown(struct subchannel *sch)
@@ -1439,7 +1442,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
enum io_sch_action action;
int rc = -EAGAIN;
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
if (!device_is_registered(&sch->dev))
goto out_unlock;
if (work_pending(&sch->todo_work))
@@ -1492,7 +1495,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
default:
break;
}
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
/* All other actions require process context. */
if (!process)
goto out;
@@ -1507,9 +1510,9 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
break;
case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
sch_set_cdev(sch, NULL);
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
/* Unregister ccw device. */
ccw_device_unregister(cdev);
break;
@@ -1538,9 +1541,9 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
put_device(&cdev->dev);
goto out;
}
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
ccw_device_trigger_reprobe(cdev);
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
/* Release reference from get_ccwdev_by_dev_id() */
put_device(&cdev->dev);
break;
@@ -1550,7 +1553,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
return 0;
out_unlock:
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
out:
return rc;
}
@@ -1846,9 +1849,9 @@ static void ccw_device_todo(struct work_struct *work)
css_schedule_eval(sch->schid);
fallthrough;
case CDEV_TODO_UNREG:
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
sch_set_cdev(sch, NULL);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
ccw_device_unregister(cdev);
break;
default:
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 386296169..ad9004587 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -698,29 +698,29 @@ int ccw_device_stlck(struct ccw_device *cdev)
return -ENOMEM;
init_completion(&data.done);
data.rc = -EIO;
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
rc = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (rc)
goto out_unlock;
/* Perform operation. */
cdev->private->state = DEV_STATE_STEAL_LOCK;
ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
/* Wait for operation to finish. */
if (wait_for_completion_interruptible(&data.done)) {
/* Got a signal. */
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
ccw_request_cancel(cdev);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
wait_for_completion(&data.done);
}
rc = data.rc;
/* Check results. */
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
cio_disable_subchannel(sch);
cdev->private->state = DEV_STATE_BOXED;
out_unlock:
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
kfree(buffer);
return rc;
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 826364d2f..1caedf931 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -101,12 +101,12 @@ static void eadm_subchannel_timeout(struct timer_list *t)
struct eadm_private *private = from_timer(private, t, timer);
struct subchannel *sch = private->sch;
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
EADM_LOG(1, "timeout");
EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
if (eadm_subchannel_clear(sch))
EADM_LOG(0, "clear failed");
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
}
static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
@@ -163,16 +163,16 @@ static struct subchannel *eadm_get_idle_sch(void)
spin_lock_irqsave(&list_lock, flags);
list_for_each_entry(private, &eadm_list, head) {
sch = private->sch;
- spin_lock(sch->lock);
+ spin_lock(&sch->lock);
if (private->state == EADM_IDLE) {
private->state = EADM_BUSY;
list_move_tail(&private->head, &eadm_list);
- spin_unlock(sch->lock);
+ spin_unlock(&sch->lock);
spin_unlock_irqrestore(&list_lock, flags);
return sch;
}
- spin_unlock(sch->lock);
+ spin_unlock(&sch->lock);
}
spin_unlock_irqrestore(&list_lock, flags);
@@ -190,7 +190,7 @@ int eadm_start_aob(struct aob *aob)
if (!sch)
return -EBUSY;
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
ret = eadm_subchannel_start(sch, aob);
if (!ret)
@@ -203,7 +203,7 @@ int eadm_start_aob(struct aob *aob)
css_sched_sch_todo(sch, SCH_TODO_EVAL);
out_unlock:
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
return ret;
}
@@ -221,7 +221,7 @@ static int eadm_subchannel_probe(struct subchannel *sch)
INIT_LIST_HEAD(&private->head);
timer_setup(&private->timer, eadm_subchannel_timeout, 0);
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
set_eadm_private(sch, private);
private->state = EADM_IDLE;
private->sch = sch;
@@ -229,11 +229,11 @@ static int eadm_subchannel_probe(struct subchannel *sch)
ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (ret) {
set_eadm_private(sch, NULL);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
kfree(private);
goto out;
}
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
spin_lock_irq(&list_lock);
list_add(&private->head, &eadm_list);
@@ -248,7 +248,7 @@ static void eadm_quiesce(struct subchannel *sch)
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
if (private->state != EADM_BUSY)
goto disable;
@@ -256,11 +256,11 @@ static void eadm_quiesce(struct subchannel *sch)
goto disable;
private->completion = &completion;
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
wait_for_completion_io(&completion);
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
private->completion = NULL;
disable:
@@ -269,7 +269,7 @@ disable:
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
}
static void eadm_subchannel_remove(struct subchannel *sch)
@@ -282,9 +282,9 @@ static void eadm_subchannel_remove(struct subchannel *sch)
eadm_quiesce(sch);
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
set_eadm_private(sch, NULL);
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
kfree(private);
}
@@ -309,7 +309,7 @@ static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
struct eadm_private *private;
unsigned long flags;
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
if (!device_is_registered(&sch->dev))
goto out_unlock;
@@ -325,7 +325,7 @@ static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
private->state = EADM_IDLE;
out_unlock:
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
return 0;
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9cde55730..ebcb53580 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
lgr_info_log();
}
-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
- int dstat)
+static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ int dstat, int dcc)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
+ if (dcc == 1)
+ return -EAGAIN;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
- return;
+ return 0;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return -EIO;
}
/* qdio interrupt handler */
@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- int cstat, dstat;
+ int cstat, dstat, rc, dcc;
if (!intparm || !irq_ptr) {
ccw_device_get_schid(cdev, &schid);
@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
+ rc = 0;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(irq_ptr, cstat, dstat);
+ rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cstat || dstat)
qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
+ else if (dcc == 1)
+ rc = -EAGAIN;
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
+
+ if (rc == -EAGAIN) {
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
+ rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
+ if (!rc)
+ return;
+ DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ }
+
wake_up(&cdev->private->wait_q);
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index bfb35cfce..8ad49030a 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -65,14 +65,14 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
* cancel/halt/clear completion.
*/
private->completion = &completion;
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
if (ret == -EBUSY)
wait_for_completion_timeout(&completion, 3*HZ);
private->completion = NULL;
flush_workqueue(vfio_ccw_work_q);
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
@@ -249,7 +249,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
unsigned long flags;
int rc = -EAGAIN;
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
if (!device_is_registered(&sch->dev))
goto out_unlock;
@@ -264,7 +264,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
}
out_unlock:
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
return rc;
}
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 757b73141..09877b461 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -25,7 +25,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
unsigned long flags;
int ret;
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
orb = cp_get_orb(&private->cp, sch);
if (!orb) {
@@ -72,7 +72,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
ret = ccode;
}
out:
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
return ret;
}
@@ -83,7 +83,7 @@ static int fsm_do_halt(struct vfio_ccw_private *private)
int ccode;
int ret;
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
VFIO_CCW_TRACE_EVENT(2, "haltIO");
VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
@@ -111,7 +111,7 @@ static int fsm_do_halt(struct vfio_ccw_private *private)
default:
ret = ccode;
}
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
return ret;
}
@@ -122,7 +122,7 @@ static int fsm_do_clear(struct vfio_ccw_private *private)
int ccode;
int ret;
- spin_lock_irqsave(sch->lock, flags);
+ spin_lock_irqsave(&sch->lock, flags);
VFIO_CCW_TRACE_EVENT(2, "clearIO");
VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
@@ -147,7 +147,7 @@ static int fsm_do_clear(struct vfio_ccw_private *private)
default:
ret = ccode;
}
- spin_unlock_irqrestore(sch->lock, flags);
+ spin_unlock_irqrestore(&sch->lock, flags);
return ret;
}
@@ -376,18 +376,18 @@ static void fsm_open(struct vfio_ccw_private *private,
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
int ret;
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
sch->isc = VFIO_CCW_ISC;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret)
goto err_unlock;
private->state = VFIO_CCW_STATE_IDLE;
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
return;
err_unlock:
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
@@ -397,7 +397,7 @@ static void fsm_close(struct vfio_ccw_private *private,
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
int ret;
- spin_lock_irq(sch->lock);
+ spin_lock_irq(&sch->lock);
if (!sch->schib.pmcw.ena)
goto err_unlock;
@@ -409,12 +409,12 @@ static void fsm_close(struct vfio_ccw_private *private,
goto err_unlock;
private->state = VFIO_CCW_STATE_STANDBY;
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
cp_free(&private->cp);
return;
err_unlock:
- spin_unlock_irq(sch->lock);
+ spin_unlock_irq(&sch->lock);
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5dd33155d..f46dd6aba 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -357,13 +357,12 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain);
* -1 invalid APQN, TAPQ error or AP queue status which
* indicates there is no APQN.
*/
-static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
- int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
+static int ap_queue_info(ap_qid_t qid, struct ap_tapq_hwinfo *hwinfo,
+ bool *decfg, bool *cstop)
{
struct ap_queue_status status;
- struct ap_tapq_gr2 tapq_info;
- tapq_info.value = 0;
+ hwinfo->value = 0;
/* make sure we don't run into a specifiation exception */
if (AP_QID_CARD(qid) > ap_max_adapter_id ||
@@ -371,7 +370,7 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
return -1;
/* call TAPQ on this APQN */
- status = ap_test_queue(qid, ap_apft_available(), &tapq_info);
+ status = ap_test_queue(qid, ap_apft_available(), hwinfo);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
@@ -389,15 +388,11 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
}
/* There should be at least one of the mode bits set */
- if (WARN_ON_ONCE(!tapq_info.value))
+ if (WARN_ON_ONCE(!hwinfo->value))
return 0;
- *q_type = tapq_info.at;
- *q_fac = tapq_info.fac;
- *q_depth = tapq_info.qd;
- *q_ml = tapq_info.ml;
- *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
- *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
+ *decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
+ *cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
return 1;
}
@@ -642,11 +637,11 @@ static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env)
return rc;
/* Add MODE=<accel|cca|ep11> */
- if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
+ if (ac->hwinfo.accel)
rc = add_uevent_var(env, "MODE=accel");
- else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ else if (ac->hwinfo.cca)
rc = add_uevent_var(env, "MODE=cca");
- else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+ else if (ac->hwinfo.ep11)
rc = add_uevent_var(env, "MODE=ep11");
if (rc)
return rc;
@@ -654,11 +649,11 @@ static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
/* Add MODE=<accel|cca|ep11> */
- if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
+ if (aq->card->hwinfo.accel)
rc = add_uevent_var(env, "MODE=accel");
- else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ else if (aq->card->hwinfo.cca)
rc = add_uevent_var(env, "MODE=cca");
- else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+ else if (aq->card->hwinfo.ep11)
rc = add_uevent_var(env, "MODE=ep11");
if (rc)
return rc;
@@ -1799,12 +1794,12 @@ static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
*/
static inline void ap_scan_domains(struct ap_card *ac)
{
- int rc, dom, depth, type, ml;
+ struct ap_tapq_hwinfo hwinfo;
bool decfg, chkstop;
struct ap_queue *aq;
struct device *dev;
- unsigned int func;
ap_qid_t qid;
+ int rc, dom;
/*
* Go through the configuration for the domains and compare them
@@ -1827,8 +1822,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
goto put_dev_and_continue;
}
/* domain is valid, get info from this APQN */
- rc = ap_queue_info(qid, &type, &func, &depth,
- &ml, &decfg, &chkstop);
+ rc = ap_queue_info(qid, &hwinfo, &decfg, &chkstop);
switch (rc) {
case -1:
if (dev) {
@@ -1853,6 +1847,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->card = ac;
aq->config = !decfg;
aq->chkstop = chkstop;
+ aq->se_bstate = hwinfo.bs;
dev = &aq->ap_dev.device;
dev->bus = &ap_bus_type;
dev->parent = &ac->ap_dev.device;
@@ -1882,6 +1877,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
}
/* handle state changes on already existing queue device */
spin_lock_bh(&aq->lock);
+ /* SE bind state */
+ aq->se_bstate = hwinfo.bs;
/* checkstop state */
if (chkstop && !aq->chkstop) {
/* checkstop on */
@@ -1955,11 +1952,11 @@ put_dev_and_continue:
*/
static inline void ap_scan_adapter(int ap)
{
- int rc, dom, depth, type, comp_type, ml;
+ struct ap_tapq_hwinfo hwinfo;
+ int rc, dom, comp_type;
bool decfg, chkstop;
struct ap_card *ac;
struct device *dev;
- unsigned int func;
ap_qid_t qid;
/* Is there currently a card device for this adapter ? */
@@ -1989,8 +1986,7 @@ static inline void ap_scan_adapter(int ap)
for (dom = 0; dom <= ap_max_domain_id; dom++)
if (ap_test_config_usage_domain(dom)) {
qid = AP_MKQID(ap, dom);
- if (ap_queue_info(qid, &type, &func, &depth,
- &ml, &decfg, &chkstop) > 0)
+ if (ap_queue_info(qid, &hwinfo, &decfg, &chkstop) > 0)
break;
}
if (dom > ap_max_domain_id) {
@@ -2006,7 +2002,7 @@ static inline void ap_scan_adapter(int ap)
}
return;
}
- if (!type) {
+ if (!hwinfo.at) {
/* No apdater type info available, an unusable adapter */
if (ac) {
AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
@@ -2019,18 +2015,18 @@ static inline void ap_scan_adapter(int ap)
}
return;
}
+ hwinfo.value &= TAPQ_CARD_HWINFO_MASK; /* filter card specific hwinfo */
if (ac) {
/* Check APQN against existing card device for changes */
- if (ac->raw_hwtype != type) {
+ if (ac->hwinfo.at != hwinfo.at) {
AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
- __func__, ap, type);
+ __func__, ap, hwinfo.at);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
ac = NULL;
- } else if ((ac->functions & TAPQ_CARD_FUNC_CMP_MASK) !=
- (func & TAPQ_CARD_FUNC_CMP_MASK)) {
+ } else if (ac->hwinfo.fac != hwinfo.fac) {
AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
- __func__, ap, func);
+ __func__, ap, hwinfo.fac);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
ac = NULL;
@@ -2064,13 +2060,13 @@ static inline void ap_scan_adapter(int ap)
if (!ac) {
/* Build a new card device */
- comp_type = ap_get_compatible_type(qid, type, func);
+ comp_type = ap_get_compatible_type(qid, hwinfo.at, hwinfo.fac);
if (!comp_type) {
AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
- __func__, ap, type);
+ __func__, ap, hwinfo.at);
return;
}
- ac = ap_card_create(ap, depth, type, comp_type, func, ml);
+ ac = ap_card_create(ap, hwinfo, comp_type);
if (!ac) {
AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
__func__, ap);
@@ -2101,13 +2097,13 @@ static inline void ap_scan_adapter(int ap)
get_device(dev);
if (decfg)
AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
- __func__, ap, type, func);
+ __func__, ap, hwinfo.at, hwinfo.fac);
else if (chkstop)
AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n",
- __func__, ap, type, func);
+ __func__, ap, hwinfo.at, hwinfo.fac);
else
AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
- __func__, ap, type, func);
+ __func__, ap, hwinfo.at, hwinfo.fac);
}
/* Verify the domains and the queue devices for this card */
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index b0771ca08..98814839e 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -76,16 +76,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_DEVICE_TYPE_CEX8 14
/*
- * Known function facilities
- */
-#define AP_FUNC_MEX4K 1
-#define AP_FUNC_CRT4K 2
-#define AP_FUNC_COPRO 3
-#define AP_FUNC_ACCEL 4
-#define AP_FUNC_EP11 5
-#define AP_FUNC_APXA 6
-
-/*
* AP queue state machine states
*/
enum ap_sm_state {
@@ -182,9 +172,7 @@ struct ap_device {
struct ap_card {
struct ap_device ap_dev;
- int raw_hwtype; /* AP raw hardware type. */
- unsigned int functions; /* TAPQ GR2 upper 32 facility bits */
- int queue_depth; /* AP queue depth.*/
+ struct ap_tapq_hwinfo hwinfo; /* TAPQ GR2 content */
int id; /* AP card number. */
unsigned int maxmsgsize; /* AP msg limit for this card */
bool config; /* configured state */
@@ -192,7 +180,7 @@ struct ap_card {
atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
-#define TAPQ_CARD_FUNC_CMP_MASK 0xFFFF0000
+#define TAPQ_CARD_HWINFO_MASK 0xFEFF0000FFFF0F0FUL
#define ASSOC_IDX_INVALID 0x10000
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
@@ -206,7 +194,7 @@ struct ap_queue {
bool config; /* configured state */
bool chkstop; /* checkstop state */
ap_qid_t qid; /* AP queue id. */
- bool se_bound; /* SE bound state */
+ unsigned int se_bstate; /* SE bind state (BS) */
unsigned int assoc_idx; /* SE association index */
int queue_count; /* # messages currently on AP queue. */
int pendingq_count; /* # requests on pendingq list. */
@@ -290,8 +278,8 @@ void ap_queue_remove(struct ap_queue *aq);
void ap_queue_init_state(struct ap_queue *aq);
void _ap_queue_init_state(struct ap_queue *aq);
-struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
- int comp_type, unsigned int functions, int ml);
+struct ap_card *ap_card_create(int id, struct ap_tapq_hwinfo info,
+ int comp_type);
#define APMASKSIZE (BITS_TO_LONGS(AP_DEVICES) * sizeof(unsigned long))
#define AQMASKSIZE (BITS_TO_LONGS(AP_DOMAINS) * sizeof(unsigned long))
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index b2bd47765..ce953cbbd 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -34,7 +34,7 @@ static ssize_t raw_hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return sysfs_emit(buf, "%d\n", ac->raw_hwtype);
+ return sysfs_emit(buf, "%d\n", ac->hwinfo.at);
}
static DEVICE_ATTR_RO(raw_hwtype);
@@ -44,7 +44,7 @@ static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
{
struct ap_card *ac = to_ap_card(dev);
- return sysfs_emit(buf, "%d\n", ac->queue_depth);
+ return sysfs_emit(buf, "%d\n", ac->hwinfo.qd);
}
static DEVICE_ATTR_RO(depth);
@@ -54,7 +54,7 @@ static ssize_t ap_functions_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return sysfs_emit(buf, "0x%08X\n", ac->functions);
+ return sysfs_emit(buf, "0x%08X\n", ac->hwinfo.fac);
}
static DEVICE_ATTR_RO(ap_functions);
@@ -229,8 +229,8 @@ static void ap_card_device_release(struct device *dev)
kfree(ac);
}
-struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
- int comp_type, unsigned int functions, int ml)
+struct ap_card *ap_card_create(int id, struct ap_tapq_hwinfo hwinfo,
+ int comp_type)
{
struct ap_card *ac;
@@ -240,12 +240,10 @@ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
ac->ap_dev.device_type = comp_type;
- ac->raw_hwtype = raw_type;
- ac->queue_depth = queue_depth;
- ac->functions = functions;
+ ac->hwinfo = hwinfo;
ac->id = id;
- ac->maxmsgsize = ml > 0 ?
- ml * AP_TAPQ_ML_FIELD_CHUNK_SIZE : AP_DEFAULT_MAX_MSG_SIZE;
+ ac->maxmsgsize = hwinfo.ml > 0 ?
+ hwinfo.ml * AP_TAPQ_ML_FIELD_CHUNK_SIZE : AP_DEFAULT_MAX_MSG_SIZE;
return ac;
}
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 3934a0cc1..682595443 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -24,13 +24,12 @@ static void __ap_flush_queue(struct ap_queue *aq);
static inline bool ap_q_supports_bind(struct ap_queue *aq)
{
- return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
- ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
+ return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
}
static inline bool ap_q_supports_assoc(struct ap_queue *aq)
{
- return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
+ return aq->card->hwinfo.ep11;
}
static inline bool ap_q_needs_bind(struct ap_queue *aq)
@@ -257,7 +256,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
list_move_tail(&ap_msg->list, &aq->pendingq);
aq->requestq_count--;
aq->pendingq_count++;
- if (aq->queue_count < aq->card->queue_depth) {
+ if (aq->queue_count < aq->card->hwinfo.qd) {
aq->sm_state = AP_SM_STATE_WORKING;
return AP_SM_WAIT_AGAIN;
}
@@ -318,7 +317,6 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
aq->rapq_fbit = 0;
- aq->se_bound = false;
return AP_SM_WAIT_LOW_TIMEOUT;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
@@ -339,17 +337,15 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
void *lsi_ptr;
- if (aq->queue_count > 0 && aq->reply)
- /* Try to read a completed message and get the status */
- status = ap_sm_recv(aq);
- else
- /* Get the status with TAPQ */
- status = ap_tapq(aq->qid, NULL);
+ /* Get the status with TAPQ */
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ aq->se_bstate = hwinfo.bs;
lsi_ptr = ap_airq_ptr();
if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
@@ -421,9 +417,9 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
- struct ap_tapq_gr2 info;
+ struct ap_tapq_hwinfo hwinfo;
- status = ap_test_queue(aq->qid, 1, &info);
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
/* handle asynchronous error on this queue */
if (status.async && status.response_code) {
aq->dev_state = AP_DEV_STATE_ERROR;
@@ -442,8 +438,11 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
return AP_SM_WAIT_NONE;
}
+ /* update queue's SE bind state */
+ aq->se_bstate = hwinfo.bs;
+
/* check bs bits */
- switch (info.bs) {
+ switch (hwinfo.bs) {
case AP_BS_Q_USABLE:
/* association is through */
aq->sm_state = AP_SM_STATE_IDLE;
@@ -460,7 +459,7 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
aq->dev_state = AP_DEV_STATE_ERROR;
aq->last_err_rc = status.response_code;
AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
- __func__, info.bs,
+ __func__, hwinfo.bs,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
@@ -687,9 +686,9 @@ static ssize_t ap_functions_show(struct device *dev,
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
- struct ap_tapq_gr2 info;
+ struct ap_tapq_hwinfo hwinfo;
- status = ap_test_queue(aq->qid, 1, &info);
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
@@ -697,7 +696,7 @@ static ssize_t ap_functions_show(struct device *dev,
return -EIO;
}
- return sysfs_emit(buf, "0x%08X\n", info.fac);
+ return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
}
static DEVICE_ATTR_RO(ap_functions);
@@ -840,19 +839,25 @@ static ssize_t se_bind_show(struct device *dev,
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
- struct ap_tapq_gr2 info;
+ struct ap_tapq_hwinfo hwinfo;
if (!ap_q_supports_bind(aq))
return sysfs_emit(buf, "-\n");
- status = ap_test_queue(aq->qid, 1, &info);
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
- switch (info.bs) {
+
+ /* update queue's SE bind state */
+ spin_lock_bh(&aq->lock);
+ aq->se_bstate = hwinfo.bs;
+ spin_unlock_bh(&aq->lock);
+
+ switch (hwinfo.bs) {
case AP_BS_Q_USABLE:
case AP_BS_Q_USABLE_NO_SECURE_KEY:
return sysfs_emit(buf, "bound\n");
@@ -867,6 +872,7 @@ static ssize_t se_bind_store(struct device *dev,
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
bool value;
int rc;
@@ -878,39 +884,80 @@ static ssize_t se_bind_store(struct device *dev,
if (rc)
return rc;
- if (value) {
- /* bind, do BAPQ */
- spin_lock_bh(&aq->lock);
- if (aq->sm_state < AP_SM_STATE_IDLE) {
- spin_unlock_bh(&aq->lock);
- return -EBUSY;
- }
- status = ap_bapq(aq->qid);
- spin_unlock_bh(&aq->lock);
- if (!status.response_code) {
- aq->se_bound = true;
- AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
- AP_QID_CARD(aq->qid),
- AP_QID_QUEUE(aq->qid));
- } else {
- AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid),
- AP_QID_QUEUE(aq->qid));
- return -EIO;
- }
- } else {
- /* unbind, set F bit arg and trigger RAPQ */
+ if (!value) {
+ /* Unbind. Set F bit arg and trigger RAPQ */
spin_lock_bh(&aq->lock);
__ap_flush_queue(aq);
aq->rapq_fbit = 1;
- aq->assoc_idx = ASSOC_IDX_INVALID;
- aq->sm_state = AP_SM_STATE_RESET_START;
- ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
- spin_unlock_bh(&aq->lock);
+ _ap_queue_init_state(aq);
+ rc = count;
+ goto out;
}
- return count;
+ /* Bind. Check current SE bind state */
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ /* Update BS state */
+ spin_lock_bh(&aq->lock);
+ aq->se_bstate = hwinfo.bs;
+ if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
+ AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
+ __func__, hwinfo.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Check SM state */
+ if (aq->sm_state < AP_SM_STATE_IDLE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* invoke BAPQ */
+ status = ap_bapq(aq->qid);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EIO;
+ goto out;
+ }
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+
+ /* verify SE bind state */
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EIO;
+ goto out;
+ }
+ aq->se_bstate = hwinfo.bs;
+ if (!(hwinfo.bs == AP_BS_Q_USABLE ||
+ hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
+ AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
+ __func__, hwinfo.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EIO;
+ goto out;
+ }
+
+ /* SE bind was successful */
+ AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = count;
+
+out:
+ spin_unlock_bh(&aq->lock);
+ return rc;
}
static DEVICE_ATTR_RW(se_bind);
@@ -920,12 +967,12 @@ static ssize_t se_associate_show(struct device *dev,
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
- struct ap_tapq_gr2 info;
+ struct ap_tapq_hwinfo hwinfo;
if (!ap_q_supports_assoc(aq))
return sysfs_emit(buf, "-\n");
- status = ap_test_queue(aq->qid, 1, &info);
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
__func__, status.response_code,
@@ -933,7 +980,12 @@ static ssize_t se_associate_show(struct device *dev,
return -EIO;
}
- switch (info.bs) {
+ /* update queue's SE bind state */
+ spin_lock_bh(&aq->lock);
+ aq->se_bstate = hwinfo.bs;
+ spin_unlock_bh(&aq->lock);
+
+ switch (hwinfo.bs) {
case AP_BS_Q_USABLE:
if (aq->assoc_idx == ASSOC_IDX_INVALID) {
AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
@@ -955,6 +1007,7 @@ static ssize_t se_associate_store(struct device *dev,
{
struct ap_queue *aq = to_ap_queue(dev);
struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
unsigned int value;
int rc;
@@ -968,18 +1021,28 @@ static ssize_t se_associate_store(struct device *dev,
if (value >= ASSOC_IDX_INVALID)
return -EINVAL;
+ /* check current SE bind state */
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
spin_lock_bh(&aq->lock);
-
- /* sm should be in idle state */
- if (aq->sm_state != AP_SM_STATE_IDLE) {
- spin_unlock_bh(&aq->lock);
- return -EBUSY;
+ aq->se_bstate = hwinfo.bs;
+ if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
+ AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
+ __func__, hwinfo.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EINVAL;
+ goto out;
}
- /* already associated or association pending ? */
- if (aq->assoc_idx != ASSOC_IDX_INVALID) {
- spin_unlock_bh(&aq->lock);
- return -EINVAL;
+ /* check SM state */
+ if (aq->sm_state != AP_SM_STATE_IDLE) {
+ rc = -EBUSY;
+ goto out;
}
/* trigger the asynchronous association request */
@@ -990,17 +1053,20 @@ static ssize_t se_associate_store(struct device *dev,
aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
aq->assoc_idx = value;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
- spin_unlock_bh(&aq->lock);
break;
default:
- spin_unlock_bh(&aq->lock);
AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
__func__, status.response_code,
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
- return -EIO;
+ rc = -EIO;
+ goto out;
}
- return count;
+ rc = count;
+
+out:
+ spin_unlock_bh(&aq->lock);
+ return rc;
}
static DEVICE_ATTR_RW(se_associate);
@@ -1123,7 +1189,9 @@ bool ap_queue_usable(struct ap_queue *aq)
}
/* SE guest's queues additionally need to be bound */
- if (ap_q_needs_bind(aq) && !aq->se_bound)
+ if (ap_q_needs_bind(aq) &&
+ !(aq->se_bstate == AP_BS_Q_USABLE ||
+ aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
rc = false;
unlock_and_out:
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 76429585c..983b3b161 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -394,8 +394,8 @@ static int ensure_nib_shared(unsigned long addr, struct gmap *gmap)
* Register the guest ISC to GIB interface and retrieve the
* host ISC to issue the host side PQAP/AQIC
*
- * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
- * vfio_pin_pages failed.
+ * status.response_code may be set to AP_RESPONSE_INVALID_ADDRESS in case the
+ * vfio_pin_pages or kvm_s390_gisc_register failed.
*
* Otherwise return the ap_queue_status returned by the ap_aqic(),
* all retry handling will be done by the guest.
@@ -459,7 +459,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
__func__, nisc, isc, q->apqn);
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
- status.response_code = AP_RESPONSE_INVALID_GISA;
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
}
@@ -477,8 +477,11 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
break;
case AP_RESPONSE_OTHERWISE_CHANGED:
/* We could not modify IRQ settings: clear new configuration */
+ ret = kvm_s390_gisc_unregister(kvm, isc);
+ if (ret)
+ VFIO_AP_DBF_WARN("%s: kvm_s390_gisc_unregister: rc=%d isc=%d, apqn=%#04x\n",
+ __func__, ret, isc, q->apqn);
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
- kvm_s390_gisc_unregister(kvm, isc);
break;
default:
pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
@@ -2407,7 +2410,7 @@ static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
bool apid_cleared;
struct ap_queue_status status;
unsigned long apid, apqi;
- struct ap_tapq_gr2 info;
+ struct ap_tapq_hwinfo info;
for_each_set_bit_inv(apid, apm, AP_DEVICES) {
apid_cleared = false;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 973aa9375..53ddae5ad 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -675,7 +675,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
for_each_zcrypt_card(zc) {
/* Check for usable accelerator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
- !(zc->card->functions & 0x18000000))
+ !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
continue;
/* Check for size limits */
if (zc->min_mod_size > mex->inputdatalength ||
@@ -780,7 +780,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
for_each_zcrypt_card(zc) {
/* Check for usable accelerator or CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
- !(zc->card->functions & 0x18000000))
+ !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
continue;
/* Check for size limits */
if (zc->min_mod_size > crt->inputdatalength ||
@@ -895,7 +895,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
for_each_zcrypt_card(zc) {
/* Check for usable CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
- !(zc->card->functions & 0x10000000))
+ !zc->card->hwinfo.cca)
continue;
/* Check for user selected CCA card */
if (xcrb->user_defined != AUTOSELECT &&
@@ -1066,7 +1066,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
for_each_zcrypt_card(zc) {
/* Check for usable EP11 card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
- !(zc->card->functions & 0x04000000))
+ !zc->card->hwinfo.ep11)
continue;
/* Check for user selected EP11 card */
if (targets &&
@@ -1179,7 +1179,7 @@ static long zcrypt_rng(char *buffer)
for_each_zcrypt_card(zc) {
/* Check for usable CCA card */
if (!zc->online || !zc->card->config || zc->card->chkstop ||
- !(zc->card->functions & 0x10000000))
+ !zc->card->hwinfo.cca)
continue;
/* get weight index of the card device */
wgt = zc->speed_rating[func_code];
@@ -1240,7 +1240,7 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
queue = AP_QID_QUEUE(zq->queue->qid);
stat = &devstatus[card * AP_DOMAINS + queue];
stat->hwtype = zc->card->ap_dev.device_type;
- stat->functions = zc->card->functions >> 26;
+ stat->functions = zc->card->hwinfo.fac >> 26;
stat->qid = zq->queue->qid;
stat->online = zq->online ? 0x01 : 0x00;
}
@@ -1265,7 +1265,7 @@ void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
queue = AP_QID_QUEUE(zq->queue->qid);
stat = &devstatus[card * AP_DOMAINS + queue];
stat->hwtype = zc->card->ap_dev.device_type;
- stat->functions = zc->card->functions >> 26;
+ stat->functions = zc->card->hwinfo.fac >> 26;
stat->qid = zq->queue->qid;
stat->online = zq->online ? 0x01 : 0x00;
}
@@ -1288,7 +1288,7 @@ int zcrypt_device_status_ext(int card, int queue,
if (card == AP_QID_CARD(zq->queue->qid) &&
queue == AP_QID_QUEUE(zq->queue->qid)) {
devstat->hwtype = zc->card->ap_dev.device_type;
- devstat->functions = zc->card->functions >> 26;
+ devstat->functions = zc->card->hwinfo.fac >> 26;
devstat->qid = zq->queue->qid;
devstat->online = zq->online ? 0x01 : 0x00;
spin_unlock(&zcrypt_list_lock);
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 5c4532ab0..64df7d2f6 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -477,7 +477,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
return -ENOMEM;
zc->card = ac;
dev_set_drvdata(&ap_dev->device, zc);
- if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
+ if (ac->hwinfo.accel) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4A";
zc->user_space_type = ZCRYPT_CEX4;
@@ -506,8 +506,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX6;
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
- if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
- ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+ if (ac->hwinfo.mex4k && ac->hwinfo.crt4k) {
zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
zc->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_4K;
@@ -516,7 +515,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_2K;
}
- } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ } else if (ac->hwinfo.cca) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4C";
zc->speed_rating = CEX4C_SPEED_IDX;
@@ -556,7 +555,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
- } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ } else if (ac->hwinfo.ep11) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4P";
zc->user_space_type = ZCRYPT_CEX4;
@@ -599,14 +598,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
return rc;
}
- if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ if (ac->hwinfo.cca) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_card_attr_grp);
if (rc) {
zcrypt_card_unregister(zc);
zcrypt_card_free(zc);
}
- } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ } else if (ac->hwinfo.ep11) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&ep11_card_attr_grp);
if (rc) {
@@ -627,9 +626,9 @@ static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
struct ap_card *ac = to_ap_card(&ap_dev->device);
- if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ if (ac->hwinfo.cca)
sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
- else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+ else if (ac->hwinfo.ep11)
sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
zcrypt_card_unregister(zc);
@@ -654,19 +653,19 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
struct zcrypt_queue *zq;
int rc;
- if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
+ if (aq->card->hwinfo.accel) {
zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
MSGTYPE50_VARIANT_DEFAULT);
- } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ } else if (aq->card->hwinfo.cca) {
zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
- } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ } else if (aq->card->hwinfo.ep11) {
zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
@@ -689,14 +688,14 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
return rc;
}
- if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+ if (aq->card->hwinfo.cca) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&cca_queue_attr_grp);
if (rc) {
zcrypt_queue_unregister(zq);
zcrypt_queue_free(zq);
}
- } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ } else if (aq->card->hwinfo.ep11) {
rc = sysfs_create_group(&ap_dev->device.kobj,
&ep11_queue_attr_grp);
if (rc) {
@@ -717,9 +716,9 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ if (aq->card->hwinfo.cca)
sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
- else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+ else if (aq->card->hwinfo.ep11)
sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
zcrypt_queue_unregister(zq);
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 70c5bbda0..047fa6101 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -16,7 +16,6 @@
*/
#define ISM_DMB_WORD_OFFSET 1
#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32)
-#define ISM_IDENT_MASK 0x00FFFF
#define ISM_REG_SBA 0x1
#define ISM_REG_IEQ 0x2
@@ -192,12 +191,6 @@ struct ism_sba {
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
-struct ism_systemeid {
- u8 seid_string[24];
- u8 serial_number[4];
- u8 type[4];
-};
-
static inline void __ism_read_cmd(struct ism_dev *ism, void *data,
unsigned long offset, unsigned long len)
{
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 81aabbfbb..43778b088 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -36,6 +36,7 @@ static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
/* a list for fast mapping */
static u8 max_client;
static DEFINE_MUTEX(clients_lock);
+static bool ism_v2_capable;
struct ism_dev_list {
struct list_head list;
struct mutex mutex; /* protects ism device list */
@@ -291,13 +292,16 @@ out:
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
- dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
- dmb->cpu_addr, dmb->dma_addr);
+ dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
+ DMA_FROM_DEVICE);
+ folio_put(virt_to_folio(dmb->cpu_addr));
}
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
+ struct folio *folio;
unsigned long bit;
+ int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
@@ -314,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL;
- dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
- &dmb->dma_addr,
- GFP_KERNEL | __GFP_NOWARN |
- __GFP_NOMEMALLOC | __GFP_NORETRY);
- if (!dmb->cpu_addr)
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY, get_order(dmb->dmb_len));
- return dmb->cpu_addr ? 0 : -ENOMEM;
+ if (!folio) {
+ rc = -ENOMEM;
+ goto out_bit;
+ }
+
+ dmb->cpu_addr = folio_address(folio);
+ dmb->dma_addr = dma_map_page(&ism->pdev->dev,
+ virt_to_page(dmb->cpu_addr), 0,
+ dmb->dmb_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ kfree(dmb->cpu_addr);
+out_bit:
+ clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ return rc;
}
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
@@ -443,32 +463,6 @@ int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
}
EXPORT_SYMBOL_GPL(ism_move);
-static struct ism_systemeid SYSTEM_EID = {
- .seid_string = "IBM-SYSZ-ISMSEID00000000",
- .serial_number = "0000",
- .type = "0000",
-};
-
-static void ism_create_system_eid(void)
-{
- struct cpuid id;
- u16 ident_tail;
- char tmp[5];
-
- get_cpu_id(&id);
- ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
- snprintf(tmp, 5, "%04X", ident_tail);
- memcpy(&SYSTEM_EID.serial_number, tmp, 4);
- snprintf(tmp, 5, "%04X", id.machine);
- memcpy(&SYSTEM_EID.type, tmp, 4);
-}
-
-u8 *ism_get_seid(void)
-{
- return SYSTEM_EID.seid_string;
-}
-EXPORT_SYMBOL_GPL(ism_get_seid);
-
static void ism_handle_event(struct ism_dev *ism)
{
struct ism_event *entry;
@@ -560,7 +554,9 @@ static int ism_dev_init(struct ism_dev *ism)
if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
/* hardware is V2 capable */
- ism_create_system_eid();
+ ism_v2_capable = true;
+ else
+ ism_v2_capable = false;
mutex_lock(&ism_dev_list.mutex);
mutex_lock(&clients_lock);
@@ -665,8 +661,7 @@ static void ism_dev_exit(struct ism_dev *ism)
}
mutex_unlock(&clients_lock);
- if (SYSTEM_EID.serial_number[0] != '0' ||
- SYSTEM_EID.type[0] != '0')
+ if (ism_v2_capable)
ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
unregister_ieq(ism);
unregister_sba(ism);
@@ -743,10 +738,10 @@ static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
return ism_cmd(ism, &cmd);
}
-static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
- u32 vid)
+static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
+ u32 vid_valid, u32 vid)
{
- return ism_query_rgid(smcd->priv, rgid, vid_valid, vid);
+ return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid);
}
static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
@@ -797,10 +792,11 @@ static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
return ism_cmd(ism, &cmd);
}
-static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info)
+static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid,
+ u32 trigger_irq, u32 event_code, u64 info)
{
- return ism_signal_ieq(smcd->priv, rgid, trigger_irq, event_code, info);
+ return ism_signal_ieq(smcd->priv, rgid->gid,
+ trigger_irq, event_code, info);
}
static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
@@ -812,8 +808,7 @@ static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
static int smcd_supports_v2(void)
{
- return SYSTEM_EID.serial_number[0] != '0' ||
- SYSTEM_EID.type[0] != '0';
+ return ism_v2_capable;
}
static u64 ism_get_local_gid(struct ism_dev *ism)
@@ -821,9 +816,11 @@ static u64 ism_get_local_gid(struct ism_dev *ism)
return ism->local_gid;
}
-static u64 smcd_get_local_gid(struct smcd_dev *smcd)
+static void smcd_get_local_gid(struct smcd_dev *smcd,
+ struct smcd_gid *smcd_gid)
{
- return ism_get_local_gid(smcd->priv);
+ smcd_gid->gid = ism_get_local_gid(smcd->priv);
+ smcd_gid->gid_ext = 0;
}
static u16 ism_get_chid(struct ism_dev *ism)
@@ -857,7 +854,6 @@ static const struct smcd_ops ism_ops = {
.signal_event = smcd_signal_ieq,
.move_data = smcd_move,
.supports_v2 = smcd_supports_v2,
- .get_system_eid = ism_get_seid,
.get_local_gid = smcd_get_local_gid,
.get_chid = smcd_get_chid,
.get_dev = smcd_get_dev,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index cf8506d0f..601d00e09 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1179,6 +1179,20 @@ static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
}
}
+/**
+ * qeth_irq() - qeth interrupt handler
+ * @cdev: ccw device
+ * @intparm: expect pointer to iob
+ * @irb: Interruption Response Block
+ *
+ * In the good path:
+ * corresponding qeth channel is locked with last used iob as active_cmd.
+ * But this function is also called for error interrupts.
+ *
+ * Caller ensures that:
+ * Interrupts are disabled; ccw device lock is held;
+ *
+ */
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
@@ -1220,11 +1234,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
- qeth_unlock_channel(card, channel);
-
rc = qeth_check_irb_error(card, cdev, irb);
if (rc) {
/* IO was terminated, free its resources. */
+ qeth_unlock_channel(card, channel);
if (iob)
qeth_cancel_cmd(iob, rc);
return;
@@ -1268,6 +1281,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
rc = qeth_get_problem(card, cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
+ qeth_unlock_channel(card, channel);
if (iob)
qeth_cancel_cmd(iob, rc);
qeth_clear_ipacmd_list(card);
@@ -1276,6 +1290,26 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
}
+ if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) {
+ /* channel command hasn't started: retry.
+ * active_cmd is still set to last iob
+ */
+ QETH_CARD_TEXT(card, 2, "irqcc1");
+ rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob),
+ (addr_t)iob, 0, 0, iob->timeout);
+ if (rc) {
+ QETH_DBF_MESSAGE(2,
+ "ccw retry on %x failed, rc = %i\n",
+ CARD_DEVID(card), rc);
+ QETH_CARD_TEXT_(card, 2, " err%d", rc);
+ qeth_unlock_channel(card, channel);
+ qeth_cancel_cmd(iob, rc);
+ }
+ return;
+ }
+
+ qeth_unlock_channel(card, channel);
+
if (iob) {
/* sanity check: */
if (irb->scsw.cmd.count > iob->length) {
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 4f0d0e55f..d6516ab00 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -900,8 +900,19 @@ static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
- len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
- FC_SYMBOLIC_NAME_SIZE);
+
+ BUILD_BUG_ON(sizeof(rspn_req->name) !=
+ sizeof(fc_host_symbolic_name(shost)));
+ BUILD_BUG_ON(sizeof(rspn_req->name) !=
+ type_max(typeof(rspn_req->rspn.fr_name_len)) + 1);
+ len = strscpy(rspn_req->name, fc_host_symbolic_name(shost),
+ sizeof(rspn_req->name));
+ /*
+ * It should be impossible for this to truncate (see BUILD_BUG_ON()
+ * above), but be robust anyway.
+ */
+ if (WARN_ON(len < 0))
+ len = sizeof(rspn_req->name) - 1;
rspn_req->rspn.fr_name_len = len;
sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 55989eaa2..9bdb75dfd 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1326,7 +1326,8 @@ static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
}
/* Load rest of compatibility struct */
- strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
+ strscpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
+ sizeof(tw_dev->tw_compat_info.driver_version));
tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index c3028726b..378906f77 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -282,7 +282,7 @@ fail_alloc:
return error;
}
-static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
+static void __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *instance = platform_get_drvdata(pdev);
struct a3000_hostdata *hdata = shost_priv(instance);
@@ -293,11 +293,10 @@ static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
free_irq(IRQ_AMIGA_PORTS, instance);
scsi_host_put(instance);
release_mem_region(res->start, resource_size(res));
- return 0;
}
static struct platform_driver amiga_a3000_scsi_driver = {
- .remove = __exit_p(amiga_a3000_scsi_remove),
+ .remove_new = __exit_p(amiga_a3000_scsi_remove),
.driver = {
.name = "amiga-a3000-scsi",
},
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 5e575afce..e435fc06a 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -95,7 +95,7 @@ static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
return -ENODEV;
}
-static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
+static void __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *host = platform_get_drvdata(pdev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
@@ -106,11 +106,10 @@ static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
kfree(hostdata);
free_irq(host->irq, host);
release_mem_region(res->start, resource_size(res));
- return 0;
}
static struct platform_driver amiga_a4000t_scsi_driver = {
- .remove = __exit_p(amiga_a4000t_scsi_remove),
+ .remove_new = __exit_p(amiga_a4000t_scsi_remove),
.driver = {
.name = "amiga-a4000t-scsi",
},
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index bdd177e3d..a19cdd87c 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -87,17 +87,17 @@ aic7770_probe(struct device *dev)
sprintf(buf, "ahc_eisa:%d", eisaBase >> 12);
name = kstrdup(buf, GFP_ATOMIC);
if (name == NULL)
- return (ENOMEM);
+ return -ENOMEM;
ahc = ahc_alloc(&aic7xxx_driver_template, name);
if (ahc == NULL)
- return (ENOMEM);
+ return -ENOMEM;
ahc->dev = dev;
error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data,
eisaBase);
if (error != 0) {
ahc->bsh.ioport = 0;
ahc_free(ahc);
- return (error);
+ return error < 0 ? error : -error;
}
dev_set_drvdata(dev, ahc);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 4ae0a1c4d..b0c4f2345 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1085,7 +1085,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
template->name = ahc->description;
host = scsi_host_alloc(template, sizeof(struct ahc_softc *));
if (host == NULL)
- return (ENOMEM);
+ return -ENOMEM;
*((struct ahc_softc **)host->hostdata) = ahc;
ahc->platform_data->host = host;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index a07e94fac..198440dc0 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -241,8 +241,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahc_linux_pci_inherit_flags(ahc);
pci_set_drvdata(pdev, ahc);
- ahc_linux_register_host(ahc, &aic7xxx_driver_template);
- return (0);
+ return ahc_linux_register_host(ahc, &aic7xxx_driver_template);
}
/******************************* PCI Routines *********************************/
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 3819d559e..12c4148fe 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -50,7 +50,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.50.00.13-20230206"
+#define ARCMSR_DRIVER_VERSION "v1.51.00.14-20230915"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -822,6 +822,23 @@ typedef struct deliver_completeQ {
uint16_t cmdLMID; // reserved (0)
uint16_t cmdFlag2; // reserved (0)
} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
+
+#define ARCMSR_XOR_SEG_SIZE (1024 * 1024)
+struct HostRamBuf {
+ uint32_t hrbSignature; // must be "HRBS"
+ uint32_t hrbSize; // total sg size, be multiples of MB
+ uint32_t hrbRes[2]; // reserved, must be set to 0
+};
+struct Xor_sg {
+ dma_addr_t xorPhys;
+ uint64_t xorBufLen;
+};
+struct XorHandle {
+ dma_addr_t xorPhys;
+ uint64_t xorBufLen;
+ void *xorVirt;
+};
+
/*
*******************************************************************************
** Adapter Control Block
@@ -933,6 +950,7 @@ struct AdapterControlBlock
char firm_model[12];
char firm_version[20];
char device_map[20]; /*21,84-99*/
+ uint32_t firm_PicStatus;
struct work_struct arcmsr_do_message_isr_bh;
struct timer_list eternal_timer;
unsigned short fw_flag;
@@ -941,6 +959,7 @@ struct AdapterControlBlock
#define FW_DEADLOCK 0x0010
uint32_t maxOutstanding;
int vector_count;
+ int xor_mega;
uint32_t maxFreeCCB;
struct timer_list refresh_timer;
uint32_t doneq_index;
@@ -950,6 +969,10 @@ struct AdapterControlBlock
uint32_t completionQ_entry;
pCompletion_Q pCompletionQ;
uint32_t completeQ_size;
+ void *xorVirt;
+ dma_addr_t xorPhys;
+ unsigned int init2cfg_size;
+ unsigned int xorVirtOffset;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 01fb1396e..ad227e6cb 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -751,6 +751,57 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
return rtn;
}
+static int arcmsr_alloc_xor_buffer(struct AdapterControlBlock *acb)
+{
+ int rc = 0;
+ struct pci_dev *pdev = acb->pdev;
+ void *dma_coherent;
+ dma_addr_t dma_coherent_handle;
+ int i, xor_ram;
+ struct Xor_sg *pXorPhys;
+ void **pXorVirt;
+ struct HostRamBuf *pRamBuf;
+
+ // allocate 1 MB * N physically continuous memory for XOR engine.
+ xor_ram = (acb->firm_PicStatus >> 24) & 0x0f;
+ acb->xor_mega = (xor_ram - 1) * 32 + 128 + 3;
+ acb->init2cfg_size = sizeof(struct HostRamBuf) +
+ (sizeof(struct XorHandle) * acb->xor_mega);
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->init2cfg_size,
+ &dma_coherent_handle, GFP_KERNEL);
+ acb->xorVirt = dma_coherent;
+ acb->xorPhys = dma_coherent_handle;
+ pXorPhys = (struct Xor_sg *)((unsigned long)dma_coherent +
+ sizeof(struct HostRamBuf));
+ acb->xorVirtOffset = sizeof(struct HostRamBuf) +
+ (sizeof(struct Xor_sg) * acb->xor_mega);
+ pXorVirt = (void **)((unsigned long)dma_coherent +
+ (unsigned long)acb->xorVirtOffset);
+ for (i = 0; i < acb->xor_mega; i++) {
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ ARCMSR_XOR_SEG_SIZE,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (dma_coherent) {
+ pXorPhys->xorPhys = dma_coherent_handle;
+ pXorPhys->xorBufLen = ARCMSR_XOR_SEG_SIZE;
+ *pXorVirt = dma_coherent;
+ pXorPhys++;
+ pXorVirt++;
+ } else {
+ pr_info("arcmsr%d: alloc max XOR buffer = 0x%x MB\n",
+ acb->host->host_no, i);
+ rc = -ENOMEM;
+ break;
+ }
+ }
+ pRamBuf = (struct HostRamBuf *)acb->xorVirt;
+ pRamBuf->hrbSignature = 0x53425248; //HRBS
+ pRamBuf->hrbSize = i * ARCMSR_XOR_SEG_SIZE;
+ pRamBuf->hrbRes[0] = 0;
+ pRamBuf->hrbRes[1] = 0;
+ return rc;
+}
+
static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
{
struct pci_dev *pdev = acb->pdev;
@@ -840,7 +891,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ);
acb->doneq_index = 0;
break;
- }
+ }
+ if ((acb->firm_PicStatus >> 24) & 0x0f) {
+ if (arcmsr_alloc_xor_buffer(acb))
+ return -ENOMEM;
+ }
return 0;
}
@@ -2026,6 +2081,29 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
{
+ if (acb->xor_mega) {
+ struct Xor_sg *pXorPhys;
+ void **pXorVirt;
+ int i;
+
+ pXorPhys = (struct Xor_sg *)(acb->xorVirt +
+ sizeof(struct HostRamBuf));
+ pXorVirt = (void **)((unsigned long)acb->xorVirt +
+ (unsigned long)acb->xorVirtOffset);
+ for (i = 0; i < acb->xor_mega; i++) {
+ if (pXorPhys->xorPhys) {
+ dma_free_coherent(&acb->pdev->dev,
+ ARCMSR_XOR_SEG_SIZE,
+ *pXorVirt, pXorPhys->xorPhys);
+ pXorPhys->xorPhys = 0;
+ *pXorVirt = NULL;
+ }
+ pXorPhys++;
+ pXorVirt++;
+ }
+ dma_free_coherent(&acb->pdev->dev, acb->init2cfg_size,
+ acb->xorVirt, acb->xorPhys);
+ }
dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
}
@@ -3313,6 +3391,10 @@ static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t
pACB->firm_sdram_size = readl(&rwbuffer[3]);
pACB->firm_hd_channels = readl(&rwbuffer[4]);
pACB->firm_cfg_version = readl(&rwbuffer[25]);
+ if (pACB->adapter_type == ACB_ADAPTER_TYPE_F)
+ pACB->firm_PicStatus = readl(&rwbuffer[30]);
+ else
+ pACB->firm_PicStatus = 0;
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
pACB->host->host_no,
pACB->firm_model,
@@ -4100,6 +4182,12 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
acb->msgcode_rwbuffer[7] = acb->completeQ_size;
+ if (acb->xor_mega) {
+ acb->msgcode_rwbuffer[8] = 0x455AA; //Linux init 2
+ acb->msgcode_rwbuffer[9] = 0;
+ acb->msgcode_rwbuffer[10] = lower_32_bits(acb->xorPhys);
+ acb->msgcode_rwbuffer[11] = upper_32_bits(acb->xorPhys);
+ }
writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
writel(acb->out_doorbell, &reg->iobound_doorbell);
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index d401cf271..d99e70914 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -865,7 +865,7 @@ fail_alloc:
return error;
}
-static int __exit atari_scsi_remove(struct platform_device *pdev)
+static void __exit atari_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *instance = platform_get_drvdata(pdev);
@@ -876,11 +876,10 @@ static int __exit atari_scsi_remove(struct platform_device *pdev)
scsi_host_put(instance);
if (atari_dma_buffer)
atari_stram_free(atari_dma_buffer);
- return 0;
}
static struct platform_driver atari_scsi_driver = {
- .remove = __exit_p(atari_scsi_remove),
+ .remove_new = __exit_p(atari_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
},
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 451a58e0f..1078c20c5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1734,32 +1734,32 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
switch (pdev->device) {
case PCI_DEVICE_ID_NX2_57710:
- strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
+ strscpy(hba->chip_num, "BCM57710", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57711:
- strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
+ strscpy(hba->chip_num, "BCM57711", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57712:
case PCI_DEVICE_ID_NX2_57712_MF:
case PCI_DEVICE_ID_NX2_57712_VF:
- strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
+ strscpy(hba->chip_num, "BCM57712", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57800:
case PCI_DEVICE_ID_NX2_57800_MF:
case PCI_DEVICE_ID_NX2_57800_VF:
- strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
+ strscpy(hba->chip_num, "BCM57800", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57810:
case PCI_DEVICE_ID_NX2_57810_MF:
case PCI_DEVICE_ID_NX2_57810_VF:
- strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
+ strscpy(hba->chip_num, "BCM57810", sizeof(hba->chip_num));
break;
case PCI_DEVICE_ID_NX2_57840:
case PCI_DEVICE_ID_NX2_57840_MF:
case PCI_DEVICE_ID_NX2_57840_VF:
case PCI_DEVICE_ID_NX2_57840_2_20:
case PCI_DEVICE_ID_NX2_57840_4_10:
- strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
+ strscpy(hba->chip_num, "BCM57840", sizeof(hba->chip_num));
break;
default:
pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
@@ -1797,7 +1797,7 @@ static int bnx2fc_ulp_get_stats(void *handle)
if (!stats_addr)
return -EINVAL;
- strncpy(stats_addr->version, BNX2FC_VERSION,
+ strscpy(stats_addr->version, BNX2FC_VERSION,
sizeof(stats_addr->version));
stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
index 8d72b2553..f893e9779 100644
--- a/drivers/scsi/bvme6000_scsi.c
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -89,7 +89,7 @@ bvme6000_probe(struct platform_device *dev)
return -ENODEV;
}
-static int
+static void
bvme6000_device_remove(struct platform_device *dev)
{
struct Scsi_Host *host = platform_get_drvdata(dev);
@@ -99,8 +99,6 @@ bvme6000_device_remove(struct platform_device *dev)
NCR_700_release(host);
kfree(hostdata);
free_irq(host->irq, host);
-
- return 0;
}
static struct platform_driver bvme6000_scsi_driver = {
@@ -108,7 +106,7 @@ static struct platform_driver bvme6000_scsi_driver = {
.name = "bvme6000-scsi",
},
.probe = bvme6000_probe,
- .remove = bvme6000_device_remove,
+ .remove_new = bvme6000_device_remove,
};
static int __init bvme6000_scsi_init(void)
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index cb0a399be..2b864061e 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -659,19 +659,23 @@ static long ch_ioctl(struct file *file,
memset(&vparams,0,sizeof(vparams));
if (ch->counts[CHET_V1]) {
vparams.cvp_n1 = ch->counts[CHET_V1];
- strncpy(vparams.cvp_label1,vendor_labels[0],16);
+ strscpy(vparams.cvp_label1, vendor_labels[0],
+ sizeof(vparams.cvp_label1));
}
if (ch->counts[CHET_V2]) {
vparams.cvp_n2 = ch->counts[CHET_V2];
- strncpy(vparams.cvp_label2,vendor_labels[1],16);
+ strscpy(vparams.cvp_label2, vendor_labels[1],
+ sizeof(vparams.cvp_label2));
}
if (ch->counts[CHET_V3]) {
vparams.cvp_n3 = ch->counts[CHET_V3];
- strncpy(vparams.cvp_label3,vendor_labels[2],16);
+ strscpy(vparams.cvp_label3, vendor_labels[2],
+ sizeof(vparams.cvp_label3));
}
if (ch->counts[CHET_V4]) {
vparams.cvp_n4 = ch->counts[CHET_V4];
- strncpy(vparams.cvp_label4,vendor_labels[3],16);
+ strscpy(vparams.cvp_label4, vendor_labels[3],
+ sizeof(vparams.cvp_label4));
}
if (copy_to_user(argp, &vparams, sizeof(vparams)))
return -EFAULT;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 0c32faefa..d649b7a2a 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -521,7 +521,8 @@ static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
goto err;
hw->pdev = pdev;
- strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
+ strscpy(hw->drv_version, CSIO_DRV_VERSION,
+ sizeof(hw->drv_version));
/* memory pool/DMA pool allocation */
if (csio_resource_alloc(hw))
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index c8e86f8a6..d108a86e1 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -1366,7 +1366,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
"command while another command (0x%p) is active.",
srb->cmd,
acb->active_dcb->active_srb ?
- acb->active_dcb->active_srb->cmd : 0);
+ acb->active_dcb->active_srb->cmd : NULL);
return 1;
}
if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
diff --git a/drivers/scsi/elx/libefc/efc_node.h b/drivers/scsi/elx/libefc/efc_node.h
index e9c600ac4..e57579988 100644
--- a/drivers/scsi/elx/libefc/efc_node.h
+++ b/drivers/scsi/elx/libefc/efc_node.h
@@ -26,13 +26,13 @@ efc_node_evt_set(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
struct efc_node *node = ctx->app;
if (evt == EFC_EVT_ENTER) {
- strncpy(node->current_state_name, handler,
- sizeof(node->current_state_name));
+ strscpy_pad(node->current_state_name, handler,
+ sizeof(node->current_state_name));
} else if (evt == EFC_EVT_EXIT) {
- strncpy(node->prev_state_name, node->current_state_name,
- sizeof(node->prev_state_name));
- strncpy(node->current_state_name, "invalid",
- sizeof(node->current_state_name));
+ memcpy(node->prev_state_name, node->current_state_name,
+ sizeof(node->prev_state_name));
+ strscpy_pad(node->current_state_name, "invalid",
+ sizeof(node->current_state_name));
}
node->prev_evt = node->current_evt;
node->current_evt = evt;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index e17957f80..c64a085a7 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <scsi/fcoe_sysfs.h>
#include <scsi/libfcoe.h>
@@ -214,25 +215,13 @@ static const char *get_fcoe_##title##_name(enum table_type table_key) \
return table[table_key]; \
}
-static char *fip_conn_type_names[] = {
+static const char * const fip_conn_type_names[] = {
[ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
[ FIP_CONN_TYPE_FABRIC ] = "Fabric",
[ FIP_CONN_TYPE_VN2VN ] = "VN2VN",
};
fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
-static enum fip_conn_type fcoe_parse_mode(const char *buf)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
- if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
- return i;
- }
-
- return FIP_CONN_TYPE_UNKNOWN;
-}
-
static char *fcf_state_names[] = {
[ FCOE_FCF_STATE_UNKNOWN ] = "Unknown",
[ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
@@ -274,17 +263,11 @@ static ssize_t store_ctlr_mode(struct device *dev,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
- char mode[FCOE_MAX_MODENAME_LEN + 1];
+ int res;
if (count > FCOE_MAX_MODENAME_LEN)
return -EINVAL;
- strncpy(mode, buf, count);
-
- if (mode[count - 1] == '\n')
- mode[count - 1] = '\0';
- else
- mode[count] = '\0';
switch (ctlr->enabled) {
case FCOE_CTLR_ENABLED:
@@ -297,12 +280,13 @@ static ssize_t store_ctlr_mode(struct device *dev,
return -ENOTSUPP;
}
- ctlr->mode = fcoe_parse_mode(mode);
- if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+ res = sysfs_match_string(fip_conn_type_names, buf);
+ if (res < 0 || res == FIP_CONN_TYPE_UNKNOWN) {
LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
buf);
return -EINVAL;
}
+ ctlr->mode = res;
ctlr->f->set_fcoe_ctlr_mode(ctlr);
LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 22cef283b..ce73f08ee 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.57"
+#define DRV_VERSION "1.7.0.0"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -36,7 +36,6 @@
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
-#define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define FNIC_DFLT_QUEUE_DEPTH 256
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
@@ -109,7 +108,7 @@ static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
#define FNIC_MAX_FCP_TARGET 256
-
+#define FNIC_PCI_OFFSET 2
/**
* state_flags to identify host state along along with fnic's state
**/
@@ -144,31 +143,48 @@ do { \
} while (0); \
} while (0)
-#define FNIC_MAIN_DBG(kern_level, host, fmt, args...) \
+#define FNIC_MAIN_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_MAIN_LOGGING, \
- shost_printk(kern_level, host, fmt, ##args);)
+ shost_printk(kern_level, host, \
+ "fnic<%d>: %s: %d: " fmt, fnic_num,\
+ __func__, __LINE__, ##args);)
-#define FNIC_FCS_DBG(kern_level, host, fmt, args...) \
+#define FNIC_FCS_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
- shost_printk(kern_level, host, fmt, ##args);)
+ shost_printk(kern_level, host, \
+ "fnic<%d>: %s: %d: " fmt, fnic_num,\
+ __func__, __LINE__, ##args);)
-#define FNIC_SCSI_DBG(kern_level, host, fmt, args...) \
+#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
- shost_printk(kern_level, host, fmt, ##args);)
+ shost_printk(kern_level, host, \
+ "fnic<%d>: %s: %d: " fmt, fnic_num,\
+ __func__, __LINE__, ##args);)
-#define FNIC_ISR_DBG(kern_level, host, fmt, args...) \
+#define FNIC_ISR_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \
- shost_printk(kern_level, host, fmt, ##args);)
+ shost_printk(kern_level, host, \
+ "fnic<%d>: %s: %d: " fmt, fnic_num,\
+ __func__, __LINE__, ##args);)
#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \
shost_printk(kern_level, host, fmt, ##args)
+#define FNIC_WQ_COPY_MAX 64
+#define FNIC_WQ_MAX 1
+#define FNIC_RQ_MAX 1
+#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
+#define FNIC_DFLT_IO_COMPLETIONS 256
+
+#define FNIC_MQ_CQ_INDEX 2
+
extern const char *fnic_state_str[];
enum fnic_intx_intr_index {
FNIC_INTX_WQ_RQ_COPYWQ,
- FNIC_INTX_ERR,
+ FNIC_INTX_DUMMY,
FNIC_INTX_NOTIFY,
+ FNIC_INTX_ERR,
FNIC_INTX_INTR_MAX,
};
@@ -176,7 +192,7 @@ enum fnic_msix_intr_index {
FNIC_MSIX_RQ,
FNIC_MSIX_WQ,
FNIC_MSIX_WQ_COPY,
- FNIC_MSIX_ERR_NOTIFY,
+ FNIC_MSIX_ERR_NOTIFY = FNIC_MSIX_WQ_COPY + FNIC_WQ_COPY_MAX,
FNIC_MSIX_INTR_MAX,
};
@@ -185,6 +201,7 @@ struct fnic_msix_entry {
char devname[IFNAMSIZ + 11];
irqreturn_t (*isr)(int, void *);
void *devid;
+ int irq_num;
};
enum fnic_state {
@@ -194,12 +211,6 @@ enum fnic_state {
FNIC_IN_ETH_TRANS_FC_MODE,
};
-#define FNIC_WQ_COPY_MAX 1
-#define FNIC_WQ_MAX 1
-#define FNIC_RQ_MAX 1
-#define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX)
-#define FNIC_DFLT_IO_COMPLETIONS 256
-
struct mempool;
enum fnic_evt {
@@ -214,8 +225,16 @@ struct fnic_event {
enum fnic_evt event;
};
+struct fnic_cpy_wq {
+ unsigned long hw_lock_flags;
+ u16 active_ioreq_count;
+ u16 ioreq_table_size;
+ ____cacheline_aligned struct fnic_io_req **io_req_table;
+};
+
/* Per-instance private data structure */
struct fnic {
+ int fnic_num;
struct fc_lport *lport;
struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
struct vnic_dev_bar bar0;
@@ -282,10 +301,11 @@ struct fnic {
struct fnic_host_tag *tags;
mempool_t *io_req_pool;
mempool_t *io_sgl_pool[FNIC_SGL_NUM_CACHES];
- spinlock_t io_req_lock[FNIC_IO_LOCKS]; /* locks for scsi cmnds */
+ unsigned int copy_wq_base;
struct work_struct link_work;
struct work_struct frame_work;
+ struct work_struct flush_work;
struct sk_buff_head frame_queue;
struct sk_buff_head tx_queue;
@@ -302,7 +322,9 @@ struct fnic {
/*** FIP related data members -- end ***/
/* copy work queue cache line section */
- ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
+ ____cacheline_aligned struct vnic_wq_copy hw_copy_wq[FNIC_WQ_COPY_MAX];
+ ____cacheline_aligned struct fnic_cpy_wq sw_copy_wq[FNIC_WQ_COPY_MAX];
+
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX];
@@ -330,6 +352,7 @@ extern const struct attribute_group *fnic_host_groups[];
void fnic_clear_intr_mode(struct fnic *fnic);
int fnic_set_intr_mode(struct fnic *fnic);
+int fnic_set_intr_mode_msix(struct fnic *fnic);
void fnic_free_intr(struct fnic *fnic);
int fnic_request_intr(struct fnic *fnic);
@@ -341,7 +364,7 @@ void fnic_handle_event(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
-void fnic_flush_tx(struct fnic *);
+void fnic_flush_tx(struct work_struct *work);
void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
void fnic_update_mac(struct fc_lport *, u8 *new);
@@ -356,7 +379,7 @@ void fnic_scsi_cleanup(struct fc_lport *);
void fnic_scsi_abort_io(struct fc_lport *);
void fnic_empty_scsi_cleanup(struct fc_lport *);
void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
-int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int);
+int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
int fnic_flogi_reg_handler(struct fnic *fnic, u32);
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
@@ -364,7 +387,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
int fnic_fw_reset_handler(struct fnic *fnic);
void fnic_terminate_rport_io(struct fc_rport *);
const char *fnic_state_to_str(unsigned int state);
-
+void fnic_mq_map_queues_cpus(struct Scsi_Host *host);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 55632c67a..a08293b2a 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -63,8 +63,8 @@ void fnic_handle_link(struct work_struct *work)
atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
new_port_speed);
if (old_port_speed != new_port_speed)
- FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
- "Current vnic speed set to : %llu\n",
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "Current vnic speed set to: %llu\n",
new_port_speed);
switch (vnic_dev_port_speed(fnic->vdev)) {
@@ -102,6 +102,8 @@ void fnic_handle_link(struct work_struct *work)
fnic_fc_trace_set_data(fnic->lport->host->host_no,
FNIC_FC_LE, "Link Status: DOWN->DOWN",
strlen("Link Status: DOWN->DOWN"));
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "down->down\n");
} else {
if (old_link_down_cnt != fnic->link_down_cnt) {
/* UP -> DOWN -> UP */
@@ -113,7 +115,7 @@ void fnic_handle_link(struct work_struct *work)
"Link Status:UP_DOWN_UP",
strlen("Link_Status:UP_DOWN_UP")
);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"link down\n");
fcoe_ctlr_link_down(&fnic->ctlr);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
@@ -128,8 +130,8 @@ void fnic_handle_link(struct work_struct *work)
fnic_fcoe_send_vlan_req(fnic);
return;
}
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
- "link up\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "up->down->up: Link up\n");
fcoe_ctlr_link_up(&fnic->ctlr);
} else {
/* UP -> UP */
@@ -138,6 +140,8 @@ void fnic_handle_link(struct work_struct *work)
fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: UP_UP",
strlen("Link Status: UP_UP"));
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "up->up\n");
}
}
} else if (fnic->link_status) {
@@ -153,7 +157,8 @@ void fnic_handle_link(struct work_struct *work)
return;
}
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "down->up: Link up\n");
fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
fcoe_ctlr_link_up(&fnic->ctlr);
@@ -161,13 +166,14 @@ void fnic_handle_link(struct work_struct *work)
/* UP -> DOWN */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "up->down: Link down\n");
fnic_fc_trace_set_data(
fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: UP_DOWN",
strlen("Link Status: UP_DOWN"));
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"deleting fip-timer during link-down\n");
del_timer_sync(&fnic->fip_timer);
}
@@ -270,12 +276,12 @@ void fnic_handle_event(struct work_struct *work)
spin_lock_irqsave(&fnic->fnic_lock, flags);
break;
case FNIC_EVT_START_FCF_DISC:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Start FCF Discovery\n");
fnic_fcoe_start_fcf_disc(fnic);
break;
default:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Unknown event 0x%x\n", fevt->event);
break;
}
@@ -370,7 +376,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
fnic->set_vlan(fnic, 0);
if (printk_ratelimit())
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Sending VLAN request...\n");
skb = dev_alloc_skb(sizeof(struct fip_vlan));
@@ -423,12 +429,12 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
u64 sol_time;
unsigned long flags;
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Received VLAN response...\n");
fiph = (struct fip_header *) skb->data;
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
ntohs(fiph->fip_op), fiph->fip_subcode);
@@ -463,7 +469,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
if (list_empty(&fnic->vlans)) {
/* retry from timer */
atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"No VLAN descriptors in FIP VLAN response\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
goto out;
@@ -721,7 +727,8 @@ void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
new = ctl;
if (ether_addr_equal(data, new))
return;
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ "update_mac %pM\n", new);
if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
vnic_dev_del_addr(fnic->vdev, data);
memcpy(data, new, ETH_ALEN);
@@ -763,8 +770,9 @@ void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
u8 *mac;
int ret;
- FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
- port_id, fp);
+ FNIC_FCS_DBG(KERN_DEBUG, lport->host, fnic->fnic_num,
+ "set port_id 0x%x fp 0x%p\n",
+ port_id, fp);
/*
* If we're clearing the FC_ID, change to use the ctl_src_addr.
@@ -790,10 +798,9 @@ void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
else {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
- "Unexpected fnic state %s while"
- " processing flogi resp\n",
- fnic_state_to_str(fnic->state));
+ FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "Unexpected fnic state: %s processing FLOGI response",
+ fnic_state_to_str(fnic->state));
spin_unlock_irq(&fnic->fnic_lock);
return;
}
@@ -870,7 +877,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
skb_trim(skb, bytes_written);
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fcs error. dropping packet.\n");
goto drop;
}
@@ -886,7 +893,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fnic rq_cmpl fcoe x%x fcsok x%x"
" pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
" x%x\n",
@@ -967,7 +974,7 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
skb = dev_alloc_skb(len);
if (!skb) {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Unable to allocate RQ sk_buff\n");
return -ENOMEM;
}
@@ -1175,7 +1182,7 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
/**
* fnic_flush_tx() - send queued frames.
- * @fnic: fnic device
+ * @work: pointer to work element
*
* Send frames that were waiting to go out in FC or Ethernet mode.
* Whenever changing modes we purge queued frames, so these frames should
@@ -1183,8 +1190,9 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
*
* Called without fnic_lock held.
*/
-void fnic_flush_tx(struct fnic *fnic)
+void fnic_flush_tx(struct work_struct *work)
{
+ struct fnic *fnic = container_of(work, struct fnic, flush_work);
struct sk_buff *skb;
struct fc_frame *fp;
@@ -1341,12 +1349,12 @@ void fnic_handle_fip_timer(struct fnic *fnic)
}
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fip_timer: vlan %d state %d sol_count %d\n",
vlan->vid, vlan->state, vlan->sol_count);
switch (vlan->state) {
case FIP_VLAN_USED:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"FIP VLAN is selected for FC transaction\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
break;
@@ -1365,7 +1373,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
* no response on this vlan, remove from the list.
* Try the next vlan
*/
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"Dequeue this VLAN ID %d from list\n",
vlan->vid);
list_del(&vlan->list);
@@ -1375,7 +1383,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
/* we exhausted all vlans, restart vlan disc */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"fip_timer: vlan list empty, "
"trigger vlan disc\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 8896758fe..ff85441c6 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -38,8 +38,13 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
fnic_log_q_error(fnic);
}
+ if (pba & (1 << FNIC_INTX_DUMMY)) {
+ atomic64_inc(&fnic->fnic_stats.misc_stats.intx_dummy);
+ vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_DUMMY]);
+ }
+
if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
- work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
+ work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions, FNIC_MQ_CQ_INDEX);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
@@ -60,7 +65,7 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
- work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
+ work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions, FNIC_MQ_CQ_INDEX);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
@@ -109,12 +114,22 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long wq_copy_work_done = 0;
+ int i;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
- wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions);
- vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
+ i = irq - fnic->msix[0].irq_num;
+ if (i >= fnic->wq_copy_count + fnic->copy_wq_base ||
+ i < 0 || fnic->msix[i].irq_num != irq) {
+ for (i = fnic->copy_wq_base; i < fnic->wq_copy_count + fnic->copy_wq_base ; i++) {
+ if (fnic->msix[i].irq_num == irq)
+ break;
+ }
+ }
+
+ wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions, i);
+ vnic_intr_return_credits(&fnic->intr[i],
wq_copy_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
@@ -128,7 +143,7 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
- vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
+ vnic_intr_return_all_credits(&fnic->intr[fnic->err_intr_offset]);
fnic_log_q_error(fnic);
fnic_handle_link_event(fnic);
@@ -186,26 +201,30 @@ int fnic_request_intr(struct fnic *fnic)
fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
fnic->msix[FNIC_MSIX_WQ].devid = fnic;
- sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
- "%.11s-scsi-wq", fnic->name);
- fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
- fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
+ for (i = fnic->copy_wq_base; i < fnic->wq_copy_count + fnic->copy_wq_base; i++) {
+ sprintf(fnic->msix[i].devname,
+ "%.11s-scsi-wq-%d", fnic->name, i-FNIC_MSIX_WQ_COPY);
+ fnic->msix[i].isr = fnic_isr_msix_wq_copy;
+ fnic->msix[i].devid = fnic;
+ }
- sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
+ sprintf(fnic->msix[fnic->err_intr_offset].devname,
"%.11s-err-notify", fnic->name);
- fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
+ fnic->msix[fnic->err_intr_offset].isr =
fnic_isr_msix_err_notify;
- fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
+ fnic->msix[fnic->err_intr_offset].devid = fnic;
- for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
- err = request_irq(pci_irq_vector(fnic->pdev, i),
- fnic->msix[i].isr, 0,
- fnic->msix[i].devname,
- fnic->msix[i].devid);
+ for (i = 0; i < fnic->intr_count; i++) {
+ fnic->msix[i].irq_num = pci_irq_vector(fnic->pdev, i);
+
+ err = request_irq(fnic->msix[i].irq_num,
+ fnic->msix[i].isr, 0,
+ fnic->msix[i].devname,
+ fnic->msix[i].devid);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "MSIX: request_irq"
- " failed %d\n", err);
+ FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "request_irq failed with error: %d\n",
+ err);
fnic_free_intr(fnic);
break;
}
@@ -220,44 +239,99 @@ int fnic_request_intr(struct fnic *fnic)
return err;
}
-int fnic_set_intr_mode(struct fnic *fnic)
+int fnic_set_intr_mode_msix(struct fnic *fnic)
{
unsigned int n = ARRAY_SIZE(fnic->rq);
unsigned int m = ARRAY_SIZE(fnic->wq);
- unsigned int o = ARRAY_SIZE(fnic->wq_copy);
+ unsigned int o = ARRAY_SIZE(fnic->hw_copy_wq);
+ unsigned int min_irqs = n + m + 1 + 1; /*rq, raw wq, wq, err*/
/*
- * Set interrupt mode (INTx, MSI, MSI-X) depending
- * system capabilities.
- *
- * Try MSI-X first
- *
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
* (last INTR is used for WQ/RQ errors and notification area)
*/
- if (fnic->rq_count >= n &&
- fnic->raw_wq_count >= m &&
- fnic->wq_copy_count >= o &&
- fnic->cq_count >= n + m + o) {
- int vecs = n + m + o + 1;
-
- if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
- PCI_IRQ_MSIX) == vecs) {
+ FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "rq-array size: %d wq-array size: %d copy-wq array size: %d\n",
+ n, m, o);
+ FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n",
+ fnic->rq_count, fnic->raw_wq_count,
+ fnic->wq_copy_count, fnic->cq_count);
+
+ if (fnic->rq_count <= n && fnic->raw_wq_count <= m &&
+ fnic->wq_copy_count <= o) {
+ int vec_count = 0;
+ int vecs = fnic->rq_count + fnic->raw_wq_count + fnic->wq_copy_count + 1;
+
+ vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "allocated %d MSI-X vectors\n",
+ vec_count);
+
+ if (vec_count > 0) {
+ if (vec_count < vecs) {
+ FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "interrupts number mismatch: vec_count: %d vecs: %d\n",
+ vec_count, vecs);
+ if (vec_count < min_irqs) {
+ FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "no interrupts for copy wq\n");
+ return 1;
+ }
+ }
+
fnic->rq_count = n;
fnic->raw_wq_count = m;
- fnic->wq_copy_count = o;
- fnic->wq_count = m + o;
- fnic->cq_count = n + m + o;
- fnic->intr_count = vecs;
- fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
-
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
- "Using MSI-X Interrupts\n");
- vnic_dev_set_intr_mode(fnic->vdev,
- VNIC_DEV_INTR_MODE_MSIX);
+ fnic->copy_wq_base = fnic->rq_count + fnic->raw_wq_count;
+ fnic->wq_copy_count = vec_count - n - m - 1;
+ fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count;
+ if (fnic->cq_count != vec_count - 1) {
+ FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "CQ count: %d does not match MSI-X vector count: %d\n",
+ fnic->cq_count, vec_count);
+ fnic->cq_count = vec_count - 1;
+ }
+ fnic->intr_count = vec_count;
+ fnic->err_intr_offset = fnic->rq_count + fnic->wq_count;
+
+ FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "rq_count: %d raw_wq_count: %d copy_wq_base: %d\n",
+ fnic->rq_count,
+ fnic->raw_wq_count, fnic->copy_wq_base);
+
+ FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "wq_copy_count: %d wq_count: %d cq_count: %d\n",
+ fnic->wq_copy_count,
+ fnic->wq_count, fnic->cq_count);
+
+ FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "intr_count: %d err_intr_offset: %u",
+ fnic->intr_count,
+ fnic->err_intr_offset);
+
+ vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX);
+ FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "fnic using MSI-X\n");
return 0;
}
}
+ return 1;
+}
+
+int fnic_set_intr_mode(struct fnic *fnic)
+{
+ int ret_status = 0;
+
+ /*
+ * Set interrupt mode (INTx, MSI, MSI-X) depending
+ * system capabilities.
+ *
+ * Try MSI-X first
+ */
+ ret_status = fnic_set_intr_mode_msix(fnic);
+ if (ret_status == 0)
+ return ret_status;
/*
* Next try MSI
@@ -277,7 +351,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->intr_count = 1;
fnic->err_intr_offset = 0;
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Using MSI Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
@@ -303,7 +377,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->cq_count = 3;
fnic->intr_count = 3;
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Using Legacy Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index f27f9319e..29eead383 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -12,9 +12,11 @@
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/if_ether.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -39,6 +41,7 @@ static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
static LIST_HEAD(fnic_list);
static DEFINE_SPINLOCK(fnic_list_lock);
+static DEFINE_IDA(fnic_ida);
/* Supported devices by fnic module */
static struct pci_device_id fnic_id_table[] = {
@@ -113,6 +116,7 @@ static const struct scsi_host_template fnic_host_template = {
.shost_groups = fnic_host_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct fnic_cmd_priv),
+ .map_queues = fnic_mq_map_queues_cpus,
};
static void
@@ -209,7 +213,7 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fnic: Get vnic stats failed"
" 0x%x", ret);
return stats;
@@ -321,7 +325,7 @@ static void fnic_reset_host_stats(struct Scsi_Host *host)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fnic: Reset vnic stats failed"
" 0x%x", ret);
return;
@@ -354,7 +358,7 @@ void fnic_log_q_error(struct fnic *fnic)
}
for (i = 0; i < fnic->wq_copy_count; i++) {
- error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
+ error_status = ioread32(&fnic->hw_copy_wq[i].ctrl->error_status);
if (error_status)
shost_printk(KERN_ERR, fnic->lport->host,
"CWQ[%d] error_status"
@@ -389,7 +393,7 @@ static int fnic_notify_set(struct fnic *fnic)
err = vnic_dev_notify_set(fnic->vdev, -1);
break;
case VNIC_DEV_INTR_MODE_MSIX:
- err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
+ err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base);
break;
default:
shost_printk(KERN_ERR, fnic->lport->host,
@@ -475,6 +479,7 @@ static int fnic_cleanup(struct fnic *fnic)
{
unsigned int i;
int err;
+ int raw_wq_rq_counts;
vnic_dev_disable(fnic->vdev);
for (i = 0; i < fnic->intr_count; i++)
@@ -491,13 +496,14 @@ static int fnic_cleanup(struct fnic *fnic)
return err;
}
for (i = 0; i < fnic->wq_copy_count; i++) {
- err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
+ err = vnic_wq_copy_disable(&fnic->hw_copy_wq[i]);
if (err)
return err;
+ raw_wq_rq_counts = fnic->raw_wq_count + fnic->rq_count;
+ fnic_wq_copy_cmpl_handler(fnic, -1, i + raw_wq_rq_counts);
}
/* Clean up completed IOs and FCS frames */
- fnic_wq_copy_cmpl_handler(fnic, io_completions);
fnic_wq_cmpl_handler(fnic, -1);
fnic_rq_cmpl_handler(fnic, -1);
@@ -507,7 +513,7 @@ static int fnic_cleanup(struct fnic *fnic)
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
for (i = 0; i < fnic->wq_copy_count; i++)
- vnic_wq_copy_clean(&fnic->wq_copy[i],
+ vnic_wq_copy_clean(&fnic->hw_copy_wq[i],
fnic_wq_copy_cleanup_handler);
for (i = 0; i < fnic->cq_count; i++)
@@ -560,11 +566,6 @@ static int fnic_scsi_drv_init(struct fnic *fnic)
host->max_cmd_len = FCOE_MAX_CMD_LEN;
host->nr_hw_queues = fnic->wq_copy_count;
- if (host->nr_hw_queues > 1)
- shost_printk(KERN_ERR, host,
- "fnic: blk-mq is not supported");
-
- host->nr_hw_queues = fnic->wq_copy_count = 1;
shost_printk(KERN_INFO, host,
"fnic: can_queue: %d max_lun: %llu",
@@ -577,15 +578,43 @@ static int fnic_scsi_drv_init(struct fnic *fnic)
return 0;
}
+void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
+{
+ struct fc_lport *lp = shost_priv(host);
+ struct fnic *fnic = lport_priv(lp);
+ struct pci_dev *l_pdev = fnic->pdev;
+ int intr_mode = fnic->config.intr_mode;
+ struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT];
+
+ if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) {
+ FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "intr_mode is not msix\n");
+ return;
+ }
+
+ FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "qmap->nr_queues: %d\n", qmap->nr_queues);
+
+ if (l_pdev == NULL) {
+ FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "l_pdev is null\n");
+ return;
+ }
+
+ blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
+}
+
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
struct fc_lport *lp;
struct fnic *fnic;
mempool_t *pool;
- int err;
+ int err = 0;
+ int fnic_id = 0;
int i;
unsigned long flags;
+ int hwq;
/*
* Allocate SCSI Host and set up association between host,
@@ -597,25 +626,28 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto err_out;
}
+
host = lp->host;
fnic = lport_priv(lp);
+
+ fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL);
+ if (fnic_id < 0) {
+ pr_err("Unable to alloc fnic ID\n");
+ err = fnic_id;
+ goto err_out_ida_alloc;
+ }
fnic->lport = lp;
fnic->ctlr.lp = lp;
-
fnic->link_events = 0;
+ fnic->pdev = pdev;
snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
host->host_no);
host->transportt = fnic_fc_transport;
-
+ fnic->fnic_num = fnic_id;
fnic_stats_debugfs_init(fnic);
- /* Setup PCI resources */
- pci_set_drvdata(pdev, fnic);
-
- fnic->pdev = pdev;
-
err = pci_enable_device(pdev);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
@@ -717,7 +749,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
- fnic_scsi_drv_init(fnic);
+ /* Setup PCI resources */
+ pci_set_drvdata(pdev, fnic);
fnic_get_res_counts(fnic);
@@ -737,6 +770,16 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_clear_intr;
}
+ fnic_scsi_drv_init(fnic);
+
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) {
+ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id;
+ fnic->sw_copy_wq[hwq].io_req_table =
+ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) *
+ sizeof(struct fnic_io_req *), GFP_KERNEL);
+ }
+ shost_printk(KERN_INFO, fnic->lport->host, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
+ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
/* initialize all fnic locks */
spin_lock_init(&fnic->fnic_lock);
@@ -751,11 +794,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->fw_ack_index[i] = -1;
}
- for (i = 0; i < FNIC_IO_LOCKS; i++)
- spin_lock_init(&fnic->io_req_lock[i]);
-
- spin_lock_init(&fnic->sgreset_lock);
-
err = -ENOMEM;
fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
if (!fnic->io_req_pool)
@@ -792,6 +830,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
+ INIT_WORK(&fnic->flush_work, fnic_flush_tx);
skb_queue_head_init(&fnic->fip_frame_queue);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
@@ -823,16 +862,32 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* allocate RQ buffers and post them to RQ*/
for (i = 0; i < fnic->rq_count; i++) {
- vnic_rq_enable(&fnic->rq[i]);
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_alloc_rq_frame can't alloc "
"frame\n");
- goto err_out_free_rq_buf;
+ goto err_out_rq_buf;
}
}
+ /* Enable all queues */
+ for (i = 0; i < fnic->raw_wq_count; i++)
+ vnic_wq_enable(&fnic->wq[i]);
+ for (i = 0; i < fnic->rq_count; i++) {
+ if (!ioread32(&fnic->rq[i].ctrl->enable))
+ vnic_rq_enable(&fnic->rq[i]);
+ }
+ for (i = 0; i < fnic->wq_copy_count; i++)
+ vnic_wq_copy_enable(&fnic->hw_copy_wq[i]);
+
+ err = fnic_request_intr(fnic);
+ if (err) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Unable to request irq.\n");
+ goto err_out_request_intr;
+ }
+
/*
* Initialization done with PCI system, hardware, firmware.
* Add host to SCSI
@@ -841,9 +896,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"fnic: scsi_add_host failed...exiting\n");
- goto err_out_free_rq_buf;
+ goto err_out_scsi_add_host;
}
+
/* Start local port initiatialization */
lp->link_up = 0;
@@ -867,7 +923,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
FCPIO_HOST_EXCH_RANGE_END, NULL)) {
err = -ENOMEM;
- goto err_out_remove_scsi_host;
+ goto err_out_fc_exch_mgr_alloc;
}
fc_lport_init_stats(lp);
@@ -895,21 +951,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
skb_queue_head_init(&fnic->frame_queue);
skb_queue_head_init(&fnic->tx_queue);
- /* Enable all queues */
- for (i = 0; i < fnic->raw_wq_count; i++)
- vnic_wq_enable(&fnic->wq[i]);
- for (i = 0; i < fnic->wq_copy_count; i++)
- vnic_wq_copy_enable(&fnic->wq_copy[i]);
-
fc_fabric_login(lp);
- err = fnic_request_intr(fnic);
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to request irq.\n");
- goto err_out_free_exch_mgr;
- }
-
vnic_dev_enable(fnic->vdev);
for (i = 0; i < fnic->intr_count; i++)
@@ -921,12 +964,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_free_exch_mgr:
fc_exch_mgr_free(lp);
-err_out_remove_scsi_host:
+err_out_fc_exch_mgr_alloc:
fc_remove_host(lp->host);
scsi_remove_host(lp->host);
-err_out_free_rq_buf:
+err_out_scsi_add_host:
+ fnic_free_intr(fnic);
+err_out_request_intr:
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
+err_out_rq_buf:
vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
@@ -935,6 +981,8 @@ err_out_free_dflt_pool:
err_out_free_ioreq_pool:
mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
+ kfree(fnic->sw_copy_wq[hwq].io_req_table);
fnic_free_vnic_resources(fnic);
err_out_clear_intr:
fnic_clear_intr_mode(fnic);
@@ -951,6 +999,8 @@ err_out_disable_device:
pci_disable_device(pdev);
err_out_free_hba:
fnic_stats_debugfs_remove(fnic);
+ ida_free(&fnic_ida, fnic->fnic_num);
+err_out_ida_alloc:
scsi_host_put(lp->host);
err_out:
return err;
@@ -961,6 +1011,7 @@ static void fnic_remove(struct pci_dev *pdev)
struct fnic *fnic = pci_get_drvdata(pdev);
struct fc_lport *lp = fnic->lport;
unsigned long flags;
+ int hwq;
/*
* Mark state so that the workqueue thread stops forwarding
@@ -1021,6 +1072,8 @@ static void fnic_remove(struct pci_dev *pdev)
fc_remove_host(fnic->lport->host);
scsi_remove_host(fnic->lport->host);
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
+ kfree(fnic->sw_copy_wq[hwq].io_req_table);
fc_exch_mgr_free(fnic->lport);
vnic_dev_notify_unset(fnic->vdev);
fnic_free_intr(fnic);
@@ -1031,6 +1084,7 @@ static void fnic_remove(struct pci_dev *pdev)
fnic_iounmap(fnic);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ ida_free(&fnic_ida, fnic->fnic_num);
scsi_host_put(lp->host);
}
@@ -1168,6 +1222,7 @@ static void __exit fnic_cleanup_module(void)
fnic_trace_free();
fnic_fc_trace_free();
fnic_debugfs_terminate();
+ ida_destroy(&fnic_ida);
}
module_init(fnic_init_module);
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index a1c9cfcac..33dd27f6f 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -57,6 +57,8 @@ int fnic_get_vnic_config(struct fnic *fnic)
GET_CONFIG(port_down_timeout);
GET_CONFIG(port_down_io_retries);
GET_CONFIG(luns_per_tgt);
+ GET_CONFIG(intr_mode);
+ GET_CONFIG(wq_copy_count);
c->wq_enet_desc_count =
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
@@ -131,6 +133,12 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
c->intr_timer_type = c->intr_timer_type;
+ /* for older firmware, GET_CONFIG will not return anything */
+ if (c->wq_copy_count == 0)
+ c->wq_copy_count = 1;
+
+ c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count);
+
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC MAC addr %pM "
"wq/wq_copy/rq %d/%d/%d\n",
@@ -161,6 +169,10 @@ int fnic_get_vnic_config(struct fnic *fnic)
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC port dn io retries %d port dn timeout %d\n",
c->port_down_io_retries, c->port_down_timeout);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC wq_copy_count: %d\n", c->wq_copy_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC intr mode: %d\n", c->intr_mode);
return 0;
}
@@ -187,12 +199,25 @@ int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
void fnic_get_res_counts(struct fnic *fnic)
{
fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
- fnic->raw_wq_count = fnic->wq_count - 1;
- fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
+ fnic->raw_wq_count = 1;
+ fnic->wq_copy_count = fnic->config.wq_copy_count;
fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
RES_TYPE_INTR_CTRL);
+
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC fw resources wq_count: %d\n", fnic->wq_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC fw resources rq_count: %d\n", fnic->rq_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC fw resources cq_count: %d\n", fnic->cq_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC fw resources intr_count: %d\n", fnic->intr_count);
}
void fnic_free_vnic_resources(struct fnic *fnic)
@@ -203,7 +228,7 @@ void fnic_free_vnic_resources(struct fnic *fnic)
vnic_wq_free(&fnic->wq[i]);
for (i = 0; i < fnic->wq_copy_count; i++)
- vnic_wq_copy_free(&fnic->wq_copy[i]);
+ vnic_wq_copy_free(&fnic->hw_copy_wq[i]);
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_free(&fnic->rq[i]);
@@ -234,10 +259,15 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
"MSI-X" : "unknown");
- shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
- "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
- fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
- fnic->rq_count, fnic->cq_count, fnic->intr_count);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d",
+ fnic->wq_count, fnic->wq_copy_count,
+ fnic->raw_wq_count, fnic->rq_count);
+
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n",
+ fnic->cq_count, fnic->intr_count,
+ fnic->config.wq_copy_desc_count);
/* Allocate Raw WQ used for FCS frames */
for (i = 0; i < fnic->raw_wq_count; i++) {
@@ -250,7 +280,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
/* Allocate Copy WQs used for SCSI IOs */
for (i = 0; i < fnic->wq_copy_count; i++) {
- err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
+ err = vnic_wq_copy_alloc(fnic->vdev, &fnic->hw_copy_wq[i],
(fnic->raw_wq_count + i),
fnic->config.wq_copy_desc_count,
sizeof(struct fcpio_host_req));
@@ -357,7 +387,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
}
for (i = 0; i < fnic->wq_copy_count; i++) {
- vnic_wq_copy_init(&fnic->wq_copy[i],
+ vnic_wq_copy_init(&fnic->hw_copy_wq[i],
0 /* cq_index 0 - always */,
error_interrupt_enable,
error_interrupt_offset);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 416d81954..fc4cee91b 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -92,20 +92,6 @@ static const char *fnic_fcpio_status_to_str(unsigned int status)
static void fnic_cleanup_io(struct fnic *fnic);
-static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
- struct scsi_cmnd *sc)
-{
- u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
-
- return &fnic->io_req_lock[hash];
-}
-
-static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
- int tag)
-{
- return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
-}
-
/*
* Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list.
@@ -129,23 +115,23 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
}
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
-static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
+static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq)
{
/* if no Ack received from firmware, then nothing to clean */
- if (!fnic->fw_ack_recd[0])
+ if (!fnic->fw_ack_recd[hwq])
return 1;
/*
* Update desc_available count based on number of freed descriptors
* Account for wraparound
*/
- if (wq->to_clean_index <= fnic->fw_ack_index[0])
- wq->ring.desc_avail += (fnic->fw_ack_index[0]
+ if (wq->to_clean_index <= fnic->fw_ack_index[hwq])
+ wq->ring.desc_avail += (fnic->fw_ack_index[hwq]
- wq->to_clean_index + 1);
else
wq->ring.desc_avail += (wq->ring.desc_count
- wq->to_clean_index
- + fnic->fw_ack_index[0] + 1);
+ + fnic->fw_ack_index[hwq] + 1);
/*
* just bump clean index to ack_index+1 accounting for wraparound
@@ -153,10 +139,10 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
* to_clean_index and fw_ack_index, both inclusive
*/
wq->to_clean_index =
- (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
+ (fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count;
/* we have processed the acks received so far */
- fnic->fw_ack_recd[0] = 0;
+ fnic->fw_ack_recd[hwq] = 0;
return 0;
}
@@ -170,17 +156,14 @@ __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
unsigned long clearbits)
{
unsigned long flags = 0;
- unsigned long host_lock_flags = 0;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
if (clearbits)
fnic->state_flags &= ~st_flags;
else
fnic->state_flags |= st_flags;
- spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
@@ -193,7 +176,7 @@ __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
*/
int fnic_fw_reset_handler(struct fnic *fnic)
{
- struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
int ret = 0;
unsigned long flags;
@@ -210,7 +193,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
- free_wq_copy_descs(fnic, wq);
+ free_wq_copy_descs(fnic, wq, 0);
if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN;
@@ -228,12 +211,12 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!ret) {
atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Issued fw reset\n");
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "Issued fw reset\n");
} else {
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Failed to issue fw reset\n");
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "Failed to issue fw reset\n");
}
return ret;
@@ -246,7 +229,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
*/
int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
{
- struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
enum fcpio_flogi_reg_format_type format;
struct fc_lport *lp = fnic->lport;
u8 gw_mac[ETH_ALEN];
@@ -256,7 +239,7 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
- free_wq_copy_descs(fnic, wq);
+ free_wq_copy_descs(fnic, wq, 0);
if (!vnic_wq_copy_desc_avail(wq)) {
ret = -EAGAIN;
@@ -276,15 +259,15 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
fc_id, gw_mac,
fnic->data_src_addr,
lp->r_a_tov, lp->e_d_tov);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
fc_id, fnic->data_src_addr, gw_mac);
} else {
fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
format, fc_id, gw_mac);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "FLOGI reg issued fcid %x map %d dest %pM\n",
- fc_id, fnic->ctlr.map_dest, gw_mac);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "FLOGI reg issued fcid 0x%x map %d dest 0x%p\n",
+ fc_id, fnic->ctlr.map_dest, gw_mac);
}
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
@@ -306,7 +289,9 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct vnic_wq_copy *wq,
struct fnic_io_req *io_req,
struct scsi_cmnd *sc,
- int sg_count)
+ int sg_count,
+ uint32_t mqtag,
+ uint16_t hwq)
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
@@ -314,7 +299,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct host_sg_desc *desc;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned int i;
- unsigned long intr_flags;
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
@@ -354,14 +338,11 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
int_to_scsilun(sc->device->lun, &fc_lun);
/* Enqueue the descriptor in the Copy WQ */
- spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
-
- if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
- free_wq_copy_descs(fnic, wq);
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
+ free_wq_copy_descs(fnic, wq, hwq);
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
- spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"fnic_queue_wq_copy_desc failure - no descriptors\n");
atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -378,7 +359,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
(rp->flags & FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
- fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
+ fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag,
0, exch_flags, io_req->sgl_cnt,
SCSI_SENSE_BUFFERSIZE,
io_req->sgl_list_pa,
@@ -399,43 +380,52 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
- spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
return 0;
}
-/*
- * fnic_queuecommand
- * Routine to send a scsi cdb
- * Called with host_lock held and interrupts disabled.
- */
-static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
+int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
{
+ struct request *const rq = scsi_cmd_to_rq(sc);
+ uint32_t mqtag = 0;
void (*done)(struct scsi_cmnd *) = scsi_done;
- const int tag = scsi_cmd_to_rq(sc)->tag;
struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
struct fnic *fnic = lport_priv(lp);
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct vnic_wq_copy *wq;
- int ret;
+ int ret = 1;
u64 cmd_trace;
int sg_count = 0;
unsigned long flags = 0;
unsigned long ptr;
- spinlock_t *io_lock = NULL;
int io_lock_acquired = 0;
struct fc_rport_libfc_priv *rp;
+ uint16_t hwq = 0;
+
+ mqtag = blk_mq_unique_tag(rq);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
+ fnic->state_flags);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
+ fnic->state_flags);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"returning DID_NO_CONNECT for IO as rport is NULL\n");
sc->result = DID_NO_CONNECT << 16;
done(sc);
@@ -444,7 +434,8 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
ret = fc_remote_port_chkready(rport);
if (ret) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"rport is not ready\n");
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret;
@@ -454,7 +445,8 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
rp = rport->dd_data;
if (!rp || rp->rp_state == RPORT_ST_DELETE) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"rport 0x%x removed, returning DID_NO_CONNECT\n",
rport->port_id);
@@ -465,7 +457,8 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
}
if (rp->rp_state != RPORT_ST_READY) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
rport->port_id, rp->rp_state);
@@ -474,17 +467,17 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
return 0;
}
- if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "state not ready: %d/link not up: %d Returning HOST_BUSY\n",
+ lp->state, lp->link_up);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
atomic_inc(&fnic->in_flight);
- /*
- * Release host lock, use driver resource specific locks from here.
- * Don't re-enable interrupts in case they were disabled prior to the
- * caller disabling them.
- */
- spin_unlock(lp->host->host_lock);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
@@ -501,7 +494,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
+ mqtag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -536,11 +529,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
}
/*
- * Will acquire lock defore setting to IO initialized.
+ * Will acquire lock before setting to IO initialized.
*/
-
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ hwq = blk_mq_unique_tag_to_hwq(mqtag);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
/* initialize rest of io_req */
io_lock_acquired = 1;
@@ -549,28 +541,40 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
fnic_priv(sc)->io_req = io_req;
fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
+ io_req->sc = sc;
+
+ if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) {
+ WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n",
+ fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag));
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req;
+ io_req->tag = mqtag;
/* create copy wq desc and enqueue it */
- wq = &fnic->wq_copy[0];
- ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
+ wq = &fnic->hw_copy_wq[hwq];
+ atomic64_inc(&fnic_stats->io_stats.ios[hwq]);
+ ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count, mqtag, hwq);
if (ret) {
/*
* In case another thread cancelled the request,
* refetch the pointer under the lock.
*/
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, 0, 0, 0, fnic_flags_and_state(sc));
+ mqtag, sc, 0, 0, 0, fnic_flags_and_state(sc));
io_req = fnic_priv(sc)->io_req;
fnic_priv(sc)->io_req = NULL;
+ if (io_req)
+ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL;
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
if (io_req) {
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
atomic_dec(&fnic->in_flight);
- /* acquire host lock before returning to SCSI */
- spin_lock(lp->host->host_lock);
return ret;
} else {
atomic64_inc(&fnic_stats->io_stats.active_ios);
@@ -590,20 +594,17 @@ out:
sc->cmnd[5]);
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, io_req, sg_count, cmd_trace,
+ mqtag, sc, io_req, sg_count, cmd_trace,
fnic_flags_and_state(sc));
/* if only we issued IO, will we have the io lock */
if (io_lock_acquired)
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
- /* acquire host lock before returning to SCSI */
- spin_lock(lp->host->host_lock);
return ret;
}
-DEF_SCSI_QCMD(fnic_queuecommand)
/*
* fnic_fcpio_fw_reset_cmpl_handler
@@ -636,15 +637,14 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
/* Check status of reset completion */
if (!hdr_status) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "reset cmpl success\n");
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "reset cmpl success\n");
/* Ready to send flogi out */
fnic->state = FNIC_IN_ETH_MODE;
} else {
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host,
- "fnic fw_reset : failed %s\n",
- fnic_fcpio_status_to_str(hdr_status));
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "reset failed with header status: %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
/*
* Unable to change to eth mode, cannot send out flogi
@@ -657,10 +657,9 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
ret = -1;
}
} else {
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host,
- "Unexpected state %s while processing"
- " reset cmpl\n", fnic_state_to_str(fnic->state));
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "Unexpected state while processing reset completion: %s\n",
+ fnic_state_to_str(fnic->state));
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
@@ -681,7 +680,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->flush_work);
reset_cmpl_handler_end:
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
@@ -711,19 +710,19 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
/* Check flogi registration completion status */
if (!hdr_status) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"flog reg succeeded\n");
fnic->state = FNIC_IN_FC_MODE;
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host,
+ fnic->lport->host, fnic->fnic_num,
"fnic flogi reg :failed %s\n",
fnic_fcpio_status_to_str(hdr_status));
fnic->state = FNIC_IN_ETH_MODE;
ret = -1;
}
} else {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Unexpected fnic state %s while"
" processing flogi reg completion\n",
fnic_state_to_str(fnic->state));
@@ -737,7 +736,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->flush_work);
queue_work(fnic_event_queue, &fnic->frame_work);
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
@@ -780,20 +779,21 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
u16 request_out = desc->u.ack.request_out;
unsigned long flags;
u64 *ox_id_tag = (u64 *)(void *)desc;
+ unsigned int wq_index = cq_index;
/* mark the ack state */
- wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
- spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+ wq = &fnic->hw_copy_wq[cq_index];
+ spin_lock_irqsave(&fnic->wq_copy_lock[wq_index], flags);
fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
if (is_ack_index_in_range(wq, request_out)) {
- fnic->fw_ack_index[0] = request_out;
- fnic->fw_ack_recd[0] = 1;
+ fnic->fw_ack_index[wq_index] = request_out;
+ fnic->fw_ack_recd[wq_index] = 1;
} else
atomic64_inc(
&fnic->fnic_stats.misc_stats.ack_index_out_of_range);
- spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags);
FNIC_TRACE(fnic_fcpio_ack_handler,
fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
ox_id_tag[4], ox_id_tag[5]);
@@ -803,12 +803,12 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
* fnic_fcpio_icmnd_cmpl_handler
* Routine to handle icmnd completions
*/
-static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
+static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_index,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
- struct fcpio_tag tag;
+ struct fcpio_tag ftag;
u32 id;
u64 xfer_len = 0;
struct fcpio_icmnd_cmpl *icmnd_cmpl;
@@ -816,27 +816,47 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct scsi_cmnd *sc;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags;
- spinlock_t *io_lock;
u64 cmd_trace;
unsigned long start_time;
unsigned long io_duration_time;
+ unsigned int hwq = 0;
+ unsigned int mqtag = 0;
+ unsigned int tag = 0;
/* Decode the cmpl description to get the io_req id */
- fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
- fcpio_tag_id_dec(&tag, &id);
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
+ fcpio_tag_id_dec(&ftag, &id);
icmnd_cmpl = &desc->u.icmnd_cmpl;
- if (id >= fnic->fnic_max_tag_id) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Tag out of range tag %x hdr status = %s\n",
- id, fnic_fcpio_status_to_str(hdr_status));
+ mqtag = id;
+ tag = blk_mq_unique_tag_to_tag(mqtag);
+ hwq = blk_mq_unique_tag_to_hwq(mqtag);
+
+ if (hwq != cq_index) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
+ hwq, mqtag, tag, cq_index);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hdr status: %s icmnd completion on the wrong queue\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ }
+
+ if (tag >= fnic->fnic_max_tag_id) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
+ hwq, mqtag, tag, cq_index);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hdr status: %s Out of range tag\n",
+ fnic_fcpio_status_to_str(hdr_status));
return;
}
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl sc is null - "
"hdr status = %s tag = 0x%x desc = 0x%p\n",
@@ -852,14 +872,19 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
return;
}
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
+ if (fnic->sw_copy_wq[hwq].io_req_table[tag] != io_req) {
+ WARN(1, "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x io_req tag mismatch\n",
+ __func__, __LINE__, hwq, mqtag, tag);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ return;
+ }
+
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
@@ -883,11 +908,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
*/
fnic_priv(sc)->flags |= FNIC_IO_DONE;
fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
if(FCPIO_ABORTED == hdr_status)
fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"icmnd_cmpl abts pending "
"hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
@@ -971,7 +996,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Break link with the SCSI command */
fnic_priv(sc)->io_req = NULL;
+ io_req->sc = NULL;
fnic_priv(sc)->flags |= FNIC_IO_DONE;
+ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
+
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
@@ -1005,7 +1034,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Call SCSI completion function to complete the IO */
scsi_done(sc);
- spin_unlock_irqrestore(io_lock, flags);
mempool_free(io_req, fnic->io_req_pool);
@@ -1042,7 +1070,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* fnic_fcpio_itmf_cmpl_handler
* Routine to handle itmf completions
*/
-static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
+static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_index,
struct fcpio_fw_req *desc)
{
u8 type;
@@ -1056,51 +1084,73 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
- spinlock_t *io_lock;
unsigned long start_time;
+ unsigned int hwq = cq_index;
+ unsigned int mqtag;
unsigned int tag;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
fcpio_tag_id_dec(&ftag, &id);
- tag = id & FNIC_TAG_MASK;
- if (tag == fnic->fnic_max_tag_id) {
- if (!(id & FNIC_TAG_DEV_RST)) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Tag out of range id 0x%x hdr status = %s\n",
- id, fnic_fcpio_status_to_str(hdr_status));
- return;
- }
- } else if (tag > fnic->fnic_max_tag_id) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Tag out of range tag 0x%x hdr status = %s\n",
- tag, fnic_fcpio_status_to_str(hdr_status));
+ mqtag = id & FNIC_TAG_MASK;
+ tag = blk_mq_unique_tag_to_tag(id & FNIC_TAG_MASK);
+ hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK);
+
+ if (hwq != cq_index) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
+ hwq, mqtag, tag, cq_index);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hdr status: %s ITMF completion on the wrong queue\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ }
+
+ if (tag > fnic->fnic_max_tag_id) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
+ hwq, mqtag, tag, cq_index);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hdr status: %s Tag out of range\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
+ hwq, mqtag, tag, cq_index);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hdr status: %s Tag out of range\n",
+ fnic_fcpio_status_to_str(hdr_status));
return;
}
- if ((tag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) {
- sc = fnic->sgreset_sc;
- io_lock = &fnic->sgreset_lock;
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
+
+ /* If it is sg3utils allocated SC then tag_id
+ * is max_tag_id and SC is retrieved from io_req
+ */
+ if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) {
+ io_req = fnic->sw_copy_wq[hwq].io_req_table[tag];
+ if (io_req)
+ sc = io_req->sc;
} else {
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
- io_lock = fnic_io_lock_hash(fnic, sc);
}
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), tag);
return;
}
- spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl io_req is null - "
@@ -1113,17 +1163,22 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
/* Abort and terminate completion of device reset req */
/* REVISIT : Add asserts about various flags */
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "dev reset abts cmpl recd. id %x status %s\n",
- id, fnic_fcpio_status_to_str(hdr_status));
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n",
+ hwq, mqtag, tag,
+ fnic_fcpio_status_to_str(hdr_status));
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
fnic_priv(sc)->abts_status = hdr_status;
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
if (io_req->abts_done)
complete(io_req->abts_done);
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
+ shost_printk(KERN_DEBUG, fnic->lport->host,
+ "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n",
+ hwq, mqtag, tag,
+ fnic_fcpio_status_to_str(hdr_status));
switch (hdr_status) {
case FCPIO_SUCCESS:
break;
@@ -1135,7 +1190,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
&term_stats->terminate_fw_timeouts);
break;
case FCPIO_ITMF_REJECTED:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
"abort reject recd. id %d\n",
(int)(id & FNIC_TAG_MASK));
break;
@@ -1156,7 +1211,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
}
if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return;
}
@@ -1170,7 +1225,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
@@ -1182,14 +1237,19 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
*/
if (io_req->abts_done) {
complete(io_req->abts_done);
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n",
+ hwq, mqtag, tag);
} else {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "abts cmpl, completing IO\n");
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n",
+ hwq, mqtag,
+ tag, fnic_fcpio_status_to_str(hdr_status));
fnic_priv(sc)->io_req = NULL;
sc->result = (DID_ERROR << 16);
-
- spin_unlock_irqrestore(io_lock, flags);
+ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -1213,29 +1273,32 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n",
+ hwq, mqtag,
+ tag, fnic_fcpio_status_to_str(hdr_status));
fnic_priv(sc)->lr_status = hdr_status;
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Terminate pending "
- "dev reset cmpl recd. id %d status %s\n",
- (int)(id & FNIC_TAG_MASK),
- fnic_fcpio_status_to_str(hdr_status));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n",
+ hwq, mqtag,
+ tag, fnic_fcpio_status_to_str(hdr_status));
return;
}
if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
/* Need to wait for terminate completion */
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"dev reset cmpl recd after time out. "
"id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -1244,19 +1307,19 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
}
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "dev reset cmpl recd. id %d status %s\n",
- (int)(id & FNIC_TAG_MASK),
- fnic_fcpio_status_to_str(hdr_status));
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n",
+ hwq, mqtag,
+ tag, fnic_fcpio_status_to_str(hdr_status));
if (io_req->dr_done)
complete(io_req->dr_done);
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else {
shost_printk(KERN_ERR, fnic->lport->host,
- "Unexpected itmf io state %s tag %x\n",
- fnic_ioreq_state_to_str(fnic_priv(sc)->state), id);
- spin_unlock_irqrestore(io_lock, flags);
+ "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n",
+ __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
}
}
@@ -1283,17 +1346,19 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
}
+ cq_index -= fnic->copy_wq_base;
+
switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
fnic_fcpio_ack_handler(fnic, cq_index, desc);
break;
case FCPIO_ICMND_CMPL: /* fw completed a command */
- fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
+ fnic_fcpio_icmnd_cmpl_handler(fnic, cq_index, desc);
break;
case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
- fnic_fcpio_itmf_cmpl_handler(fnic, desc);
+ fnic_fcpio_itmf_cmpl_handler(fnic, cq_index, desc);
break;
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
@@ -1306,7 +1371,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
default:
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"firmware completion type %d\n",
desc->hdr.type);
break;
@@ -1319,10 +1384,8 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
* fnic_wq_copy_cmpl_handler
* Routine to process wq copy
*/
-int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
+int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index)
{
- unsigned int wq_work_done = 0;
- unsigned int i, cq_index;
unsigned int cur_work_done;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
u64 start_jiffies = 0;
@@ -1330,44 +1393,51 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
u64 delta_jiffies = 0;
u64 delta_ms = 0;
- for (i = 0; i < fnic->wq_copy_count; i++) {
- cq_index = i + fnic->raw_wq_count + fnic->rq_count;
-
- start_jiffies = jiffies;
- cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
- fnic_fcpio_cmpl_handler,
- copy_work_to_do);
- end_jiffies = jiffies;
-
- wq_work_done += cur_work_done;
- delta_jiffies = end_jiffies - start_jiffies;
- if (delta_jiffies >
- (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
- atomic64_set(&misc_stats->max_isr_jiffies,
- delta_jiffies);
- delta_ms = jiffies_to_msecs(delta_jiffies);
- atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
- atomic64_set(&misc_stats->corr_work_done,
- cur_work_done);
- }
+ start_jiffies = jiffies;
+ cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
+ fnic_fcpio_cmpl_handler,
+ copy_work_to_do);
+ end_jiffies = jiffies;
+ delta_jiffies = end_jiffies - start_jiffies;
+ if (delta_jiffies > (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
+ atomic64_set(&misc_stats->max_isr_jiffies, delta_jiffies);
+ delta_ms = jiffies_to_msecs(delta_jiffies);
+ atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
+ atomic64_set(&misc_stats->corr_work_done, cur_work_done);
}
- return wq_work_done;
+
+ return cur_work_done;
}
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
{
- const int tag = scsi_cmd_to_rq(sc)->tag;
+ struct request *const rq = scsi_cmd_to_rq(sc);
struct fnic *fnic = data;
struct fnic_io_req *io_req;
unsigned long flags = 0;
- spinlock_t *io_lock;
unsigned long start_time = 0;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ uint16_t hwq = 0;
+ int tag;
+ int mqtag;
- io_lock = fnic_io_lock_tag(fnic, tag);
- spin_lock_irqsave(io_lock, flags);
+ mqtag = blk_mq_unique_tag(rq);
+ hwq = blk_mq_unique_tag_to_hwq(mqtag);
+ tag = blk_mq_unique_tag_to_tag(mqtag);
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
+
+ fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
io_req = fnic_priv(sc)->io_req;
+ if (!io_req) {
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n",
+ hwq, mqtag, tag, fnic_priv(sc)->flags);
+ return true;
+ }
+
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
/*
@@ -1379,20 +1449,16 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
complete(io_req->dr_done);
else if (io_req && io_req->abts_done)
complete(io_req->abts_done);
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
} else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
- if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
- goto cleanup_scsi_cmd;
- }
fnic_priv(sc)->io_req = NULL;
-
- spin_unlock_irqrestore(io_lock, flags);
+ io_req->sc = NULL;
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/*
* If there is a scsi_cmnd associated with this io_req, then
@@ -1402,23 +1468,16 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
-cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
- tag, sc, jiffies - start_time);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "mqtag:0x%x tag: 0x%x sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ mqtag, tag, sc, (jiffies - start_time));
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
- /* Complete the command to SCSI */
- if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED))
- shost_printk(KERN_ERR, fnic->lport->host,
- "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
- tag, sc);
-
FNIC_TRACE(fnic_cleanup_io,
sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
@@ -1447,8 +1506,8 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
struct fnic_io_req *io_req;
struct scsi_cmnd *sc;
unsigned long flags;
- spinlock_t *io_lock;
unsigned long start_time = 0;
+ uint16_t hwq;
/* get the tag reference */
fcpio_tag_id_dec(&desc->hdr.tag, &id);
@@ -1461,8 +1520,8 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
if (!sc)
return;
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ hwq = blk_mq_unique_tag_to_hwq(id);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
/* Get the IO context which this desc refers to */
io_req = fnic_priv(sc)->io_req;
@@ -1470,13 +1529,15 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
/* fnic interrupts are turned off by now */
if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
goto wq_copy_cleanup_scsi_cmd;
}
fnic_priv(sc)->io_req = NULL;
+ io_req->sc = NULL;
+ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(id)] = NULL;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
@@ -1484,7 +1545,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
wq_copy_cleanup_scsi_cmd:
sc->result = DID_NO_CONNECT << 16;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
FNIC_TRACE(fnic_wq_copy_cleanup_handler,
@@ -1500,31 +1561,31 @@ wq_copy_cleanup_scsi_cmd:
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
u32 task_req, u8 *fc_lun,
- struct fnic_io_req *io_req)
+ struct fnic_io_req *io_req,
+ unsigned int hwq)
{
- struct vnic_wq_copy *wq = &fnic->wq_copy[0];
- struct Scsi_Host *host = fnic->lport->host;
+ struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq];
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
- spin_lock_irqsave(host->host_lock, flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
- spin_unlock_irqrestore(host->host_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return 1;
} else
atomic_inc(&fnic->in_flight);
- spin_unlock_irqrestore(host->host_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
- free_wq_copy_descs(fnic, wq);
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
+ free_wq_copy_descs(fnic, wq, hwq);
if (!vnic_wq_copy_desc_avail(wq)) {
- spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fnic_queue_abort_io_req: failure: no descriptors\n");
atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
return 1;
@@ -1539,7 +1600,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
- spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
return 0;
@@ -1553,33 +1614,36 @@ struct fnic_rport_abort_io_iter_data {
static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
{
+ struct request *const rq = scsi_cmd_to_rq(sc);
struct fnic_rport_abort_io_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
- int abt_tag = scsi_cmd_to_rq(sc)->tag;
+ int abt_tag = 0;
struct fnic_io_req *io_req;
- spinlock_t *io_lock;
unsigned long flags;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
+ uint16_t hwq = 0;
- io_lock = fnic_io_lock_tag(fnic, abt_tag);
- spin_lock_irqsave(io_lock, flags);
+ abt_tag = blk_mq_unique_tag(rq);
+ hwq = blk_mq_unique_tag_to_hwq(abt_tag);
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req || io_req->port_id != iter_data->port_id) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
- sc);
- spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n",
+ hwq, abt_tag, fnic_priv(sc)->flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
@@ -1588,7 +1652,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
* belongs to rport that went away
*/
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
if (io_req->abts_done) {
@@ -1612,38 +1676,41 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag |= FNIC_TAG_DEV_RST;
}
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
BUG_ON(io_req->abts_done);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fnic_rport_reset_exch: Issuing abts\n");
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req)) {
+ fc_lun.scsi_lun, io_req, hwq)) {
/*
* Revert the cmd state back to old state, if
* it hasn't changed in between. This cmd will get
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n",
+ hwq, abt_tag, fnic_priv(sc)->flags);
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
fnic_priv(sc)->state = old_ioreq_state;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else {
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
else
fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic64_inc(&term_stats->terminates);
iter_data->term_cnt++;
}
@@ -1660,7 +1727,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
};
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host,
+ fnic->lport->host, fnic->fnic_num,
"fnic_rport_exch_reset called portid 0x%06x\n",
port_id);
@@ -1697,9 +1764,8 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
return;
}
fnic = lport_priv(lport);
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, "fnic_terminate_rport_io called"
- " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ "wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
rport->port_name, rport->node_name, rport,
rport->port_id);
@@ -1721,7 +1787,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
- spinlock_t *io_lock;
unsigned long flags;
unsigned long start_time = 0;
int ret = SUCCESS;
@@ -1731,8 +1796,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
struct abort_stats *abts_stats;
struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state;
- const int tag = rq->tag;
+ int mqtag;
unsigned long abt_issued_time;
+ uint16_t hwq = 0;
+
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
@@ -1742,23 +1809,25 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic_stats = &fnic->fnic_stats;
abts_stats = &fnic->fnic_stats.abts_stats;
term_stats = &fnic->fnic_stats.term_stats;
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host,
- "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
- rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags);
+ mqtag = blk_mq_unique_tag(rq);
+ hwq = blk_mq_unique_tag_to_hwq(mqtag);
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto fnic_abort_cmd_end;
}
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/*
* Avoid a race between SCSI issuing the abort and the device
* completing the command.
@@ -1771,18 +1840,17 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
*
* .io_req will not be cleared except while holding io_req_lock.
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
goto fnic_abort_cmd_end;
}
io_req->abts_done = &tm_done;
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
goto wait_pending;
}
@@ -1802,8 +1870,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
else
atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ "CDB Opcode: 0x%02x Abort issued time: %lu msec\n",
+ sc->cmnd[0], abt_issued_time);
/*
* Command is still pending, need to abort it
* If the firmware completes the command after this point,
@@ -1814,7 +1883,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/*
* Check readiness of the remote port. If the path to remote
@@ -1831,15 +1900,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
- io_req)) {
- spin_lock_irqsave(io_lock, flags);
+ if (fnic_queue_abort_io_req(fnic, mqtag, task_req, fc_lun.scsi_lun,
+ io_req, hwq)) {
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
fnic_priv(sc)->state = old_ioreq_state;
io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1863,12 +1932,12 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic->config.ed_tov));
/* Check the abort status */
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
@@ -1877,7 +1946,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* fw did not complete abort, timed out */
if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
if (task_req == FCPIO_ITMF_ABT_TASK) {
atomic64_inc(&abts_stats->abort_drv_timeouts);
} else {
@@ -1891,9 +1960,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* IO out of order */
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
- spin_unlock_irqrestore(io_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Issuing Host reset due to out of order IO\n");
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "Issuing host reset due to out of order IO\n");
ret = FAILED;
goto fnic_abort_cmd_end;
@@ -1907,15 +1976,18 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
* free the io_req if successful. If abort fails,
* Device reset will clean the I/O.
*/
- if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) {
+ if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS ||
+ (fnic_priv(sc)->abts_status == FCPIO_ABORTED)) {
fnic_priv(sc)->io_req = NULL;
+ io_req->sc = NULL;
} else {
ret = FAILED;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
goto fnic_abort_cmd_end;
}
- spin_unlock_irqrestore(io_lock, flags);
+ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL;
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -1930,14 +2002,14 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
atomic64_inc(&fnic_stats->io_stats.io_completions);
fnic_abort_cmd_end:
- FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, mqtag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
@@ -1948,33 +2020,34 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct scsi_cmnd *sc,
struct fnic_io_req *io_req)
{
- struct vnic_wq_copy *wq = &fnic->wq_copy[0];
- struct Scsi_Host *host = fnic->lport->host;
+ struct vnic_wq_copy *wq;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
struct scsi_lun fc_lun;
int ret = 0;
- unsigned long intr_flags;
- unsigned int tag = scsi_cmd_to_rq(sc)->tag;
+ unsigned long flags;
+ uint16_t hwq = 0;
+ uint32_t tag = 0;
- if (tag == SCSI_NO_TAG)
- tag = io_req->tag;
+ tag = io_req->tag;
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ wq = &fnic->hw_copy_wq[hwq];
- spin_lock_irqsave(host->host_lock, intr_flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
- spin_unlock_irqrestore(host->host_lock, intr_flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return FAILED;
} else
atomic_inc(&fnic->in_flight);
- spin_unlock_irqrestore(host->host_lock, intr_flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
- free_wq_copy_descs(fnic, wq);
+ if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
+ free_wq_copy_descs(fnic, wq, hwq);
if (!vnic_wq_copy_desc_avail(wq)) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"queue_dr_io_req failure - no descriptors\n");
atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
ret = -EAGAIN;
@@ -1997,7 +2070,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
lr_io_req_end:
- spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
return ret;
@@ -2012,12 +2085,13 @@ struct fnic_pending_aborts_iter_data {
static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
{
+ struct request *const rq = scsi_cmd_to_rq(sc);
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
struct scsi_device *lun_dev = iter_data->lun_dev;
- int abt_tag = scsi_cmd_to_rq(sc)->tag;
+ unsigned long abt_tag = 0;
+ uint16_t hwq = 0;
struct fnic_io_req *io_req;
- spinlock_t *io_lock;
unsigned long flags;
struct scsi_lun fc_lun;
DECLARE_COMPLETION_ONSTACK(tm_done);
@@ -2026,11 +2100,13 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
if (sc == iter_data->lr_sc || sc->device != lun_dev)
return true;
- io_lock = fnic_io_lock_tag(fnic, abt_tag);
- spin_lock_irqsave(io_lock, flags);
+ abt_tag = blk_mq_unique_tag(rq);
+ hwq = blk_mq_unique_tag_to_hwq(abt_tag);
+
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
@@ -2038,20 +2114,19 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
(!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "%s dev rst not pending sc 0x%p\n", __func__,
- sc);
- spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "dev rst not pending sc 0x%p\n", sc);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
@@ -2072,35 +2147,37 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
BUG_ON(io_req->abts_done);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
- abt_tag |= FNIC_TAG_DEV_RST;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "%s: dev rst sc 0x%p\n", __func__, sc);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "dev rst sc 0x%p\n", sc);
}
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req)) {
- spin_lock_irqsave(io_lock, flags);
+ fc_lun.scsi_lun, io_req, hwq)) {
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
fnic_priv(sc)->state = old_ioreq_state;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
iter_data->ret = FAILED;
+ FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d abt_tag: 0x%lx Abort could not be queued\n",
+ hwq, abt_tag);
return false;
} else {
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
}
fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
@@ -2108,10 +2185,10 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
(fnic->config.ed_tov));
/* Recheck cmd state to check if it is now aborted */
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
return true;
}
@@ -2120,7 +2197,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
/* if abort is still pending with fw, fail */
if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
iter_data->ret = FAILED;
return false;
@@ -2128,9 +2205,11 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
/* original sc used for lr is handled by dev reset code */
- if (sc != iter_data->lr_sc)
+ if (sc != iter_data->lr_sc) {
fnic_priv(sc)->io_req = NULL;
- spin_unlock_irqrestore(io_lock, flags);
+ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(abt_tag)] = NULL;
+ }
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/* original sc used for lr is handled by dev reset code */
if (sc != iter_data->lr_sc) {
@@ -2182,8 +2261,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
ret = 1;
clean_pending_aborts_end:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "%s: exit status: %d\n", __func__, ret);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "exit status: %d\n", ret);
return ret;
}
@@ -2201,15 +2280,15 @@ int fnic_device_reset(struct scsi_cmnd *sc)
struct fc_rport *rport;
int status;
int ret = FAILED;
- spinlock_t *io_lock;
unsigned long flags;
unsigned long start_time = 0;
struct scsi_lun fc_lun;
struct fnic_stats *fnic_stats;
struct reset_stats *reset_stats;
- int tag = rq->tag;
+ int mqtag = rq->tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
bool new_sc = 0;
+ uint16_t hwq = 0;
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
@@ -2224,9 +2303,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
atomic64_inc(&reset_stats->device_resets);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
- rport->port_id, sc->device->lun, sc);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ "fcid: 0x%x lun: 0x%llx hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
+ rport->port_id, sc->device->lun, hwq, mqtag,
+ fnic_priv(sc)->flags);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
goto fnic_device_reset_end;
@@ -2239,7 +2319,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
- if (unlikely(tag < 0)) {
+ if (unlikely(mqtag < 0)) {
/*
* For device reset issued through sg3utils, we let
* only one LUN_RESET to go through and use a special
@@ -2248,17 +2328,14 @@ int fnic_device_reset(struct scsi_cmnd *sc)
* allocated by mid layer.
*/
mutex_lock(&fnic->sgreset_mutex);
- tag = fnic->fnic_max_tag_id;
+ mqtag = fnic->fnic_max_tag_id;
new_sc = 1;
- fnic->sgreset_sc = sc;
- io_lock = &fnic->sgreset_lock;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "fcid: 0x%x lun: 0x%llx flags: 0x%x tag: 0x%x Issuing sgreset\n",
- rport->port_id, sc->device->lun, fnic_priv(sc)->flags, tag);
- } else
- io_lock = fnic_io_lock_hash(fnic, sc);
+ } else {
+ mqtag = blk_mq_unique_tag(rq);
+ hwq = blk_mq_unique_tag_to_hwq(mqtag);
+ }
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
/*
@@ -2268,36 +2345,43 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (!io_req) {
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
goto fnic_device_reset_end;
}
memset(io_req, 0, sizeof(*io_req));
io_req->port_id = rport->port_id;
- io_req->tag = tag;
- io_req->sc = sc;
+ io_req->tag = mqtag;
fnic_priv(sc)->io_req = io_req;
+ io_req->sc = sc;
+
+ if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL)
+ WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n",
+ fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag));
+
+ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] =
+ io_req;
}
io_req->dr_done = &tm_done;
fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
* and break assoc with scsi cmd
*/
if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/*
* Wait on the local completion for LUN reset. The io_req may be
@@ -2306,12 +2390,12 @@ int fnic_device_reset(struct scsi_cmnd *sc)
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc);
goto fnic_device_reset_end;
}
io_req->dr_done = NULL;
@@ -2324,44 +2408,44 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
if (status == FCPIO_INVALID_CODE) {
atomic64_inc(&reset_stats->device_reset_timeouts);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Device reset timed out\n");
fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
int_to_scsilun(sc->device->lun, &fc_lun);
/*
* Issue abort and terminate on device reset request.
* If q'ing of terminate fails, retry it after a delay.
*/
while (1) {
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
break;
}
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
if (fnic_queue_abort_io_req(fnic,
- tag | FNIC_TAG_DEV_RST,
+ mqtag | FNIC_TAG_DEV_RST,
FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req)) {
+ fc_lun.scsi_lun, io_req, hwq)) {
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
} else {
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
io_req->abts_done = &tm_done;
- spin_unlock_irqrestore(io_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Abort and terminate issued on Device reset "
- "tag 0x%x sc 0x%p\n", tag, sc);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n",
+ mqtag, sc);
break;
}
}
while (1) {
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
break;
@@ -2372,14 +2456,14 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
}
} else {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
}
/* Completed, but not successful, clean up the io_req, return fail */
if (status != FCPIO_SUCCESS) {
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host,
+ fnic->lport->host, fnic->fnic_num,
"Device reset completed - failed\n");
io_req = fnic_priv(sc)->io_req;
goto fnic_device_reset_clean;
@@ -2393,26 +2477,29 @@ int fnic_device_reset(struct scsi_cmnd *sc)
* succeeds
*/
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Device reset failed"
" since could not abort all IOs\n");
goto fnic_device_reset_clean;
}
/* Clean lun reset command */
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (io_req)
/* Completed, and successful */
ret = SUCCESS;
fnic_device_reset_clean:
- if (io_req)
+ if (io_req) {
fnic_priv(sc)->io_req = NULL;
+ io_req->sc = NULL;
+ fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(io_req->tag)] = NULL;
+ }
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
if (io_req) {
start_time = io_req->start_time;
@@ -2433,7 +2520,7 @@ fnic_device_reset_end:
mutex_unlock(&fnic->sgreset_mutex);
}
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
@@ -2456,8 +2543,8 @@ int fnic_reset(struct Scsi_Host *shost)
fnic = lport_priv(lp);
reset_stats = &fnic->fnic_stats.reset_stats;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "fnic_reset called\n");
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "Issuing fnic reset\n");
atomic64_inc(&reset_stats->fnic_resets);
@@ -2467,10 +2554,9 @@ int fnic_reset(struct Scsi_Host *shost)
*/
ret = fc_lport_reset(lp);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Returning from fnic reset %s\n",
- (ret == 0) ?
- "SUCCESS" : "FAILED");
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "Returning from fnic reset with: %s\n",
+ (ret == 0) ? "SUCCESS" : "FAILED");
if (ret == 0)
atomic64_inc(&reset_stats->fnic_reset_completions);
@@ -2503,7 +2589,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
fnic->internal_reset_inprogress = true;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"host reset in progress skipping another host reset\n");
return SUCCESS;
}
@@ -2578,7 +2664,7 @@ retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->remove_wait = NULL;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
"fnic_scsi_abort_io %s\n",
(fnic->state == FNIC_IN_ETH_MODE) ?
"SUCCESS" : "FAILED");
@@ -2652,12 +2738,17 @@ call_fc_exch_mgr_reset:
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
{
+ struct request *const rq = scsi_cmd_to_rq(sc);
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
int cmd_state;
struct fnic_io_req *io_req;
- spinlock_t *io_lock;
unsigned long flags;
+ uint16_t hwq = 0;
+ int tag;
+
+ tag = blk_mq_unique_tag(rq);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
/*
* ignore this lun reset cmd or cmds that do not belong to
@@ -2668,12 +2759,11 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
return true;
- io_lock = fnic_io_lock_hash(fnic, sc);
- spin_lock_irqsave(io_lock, flags);
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
@@ -2681,11 +2771,12 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "Found IO in %s on lun\n",
- fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ "hwq: %d tag: 0x%x Found IO in state: %s on lun\n",
+ hwq, tag,
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
cmd_state = fnic_priv(sc)->state;
- spin_unlock_irqrestore(io_lock, flags);
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
iter_data->ret = 1;
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index bdf639eef..9d7f98c45 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -2,6 +2,7 @@
/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
+#define FNIC_MQ_MAX_QUEUES 64
struct stats_timestamps {
struct timespec64 last_reset_time;
@@ -26,6 +27,7 @@ struct io_path_stats {
atomic64_t io_btw_10000_to_30000_msec;
atomic64_t io_greater_than_30000_msec;
atomic64_t current_max_io_time;
+ atomic64_t ios[FNIC_MQ_MAX_QUEUES];
};
struct abort_stats {
@@ -103,6 +105,7 @@ struct misc_stats {
atomic64_t rport_not_ready;
atomic64_t frame_errors;
atomic64_t current_port_speed;
+ atomic64_t intx_dummy;
};
struct fnic_stats {
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index be0d7c57b..aaa4ea02f 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -204,6 +204,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
int len = 0;
int buf_size = debug->buf_size;
struct timespec64 val1, val2;
+ int i = 0;
ktime_get_real_ts64(&val1);
len = scnprintf(debug->debug_buffer + len, buf_size - len,
@@ -267,6 +268,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\tIO Queues and cumulative IOs\n"
+ "------------------------------------------\n");
+
+ for (i = 0; i < FNIC_MQ_MAX_QUEUES; i++) {
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
+ "Q:%d -> %lld\n", i, (u64)atomic64_read(&stats->io_stats.ios[i]));
+ }
+
+ len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\nCurrent Max IO time : %lld\n",
(u64)atomic64_read(&stats->io_stats.current_max_io_time));
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 3e5b437c0..e0b173cc9 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -143,6 +143,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
}
+ pr_info("res_type_wq: %d res_type_rq: %d res_type_cq: %d res_type_intr_ctrl: %d\n",
+ vdev->res[RES_TYPE_WQ].count, vdev->res[RES_TYPE_RQ].count,
+ vdev->res[RES_TYPE_CQ].count, vdev->res[RES_TYPE_INTR_CTRL].count);
+
return 0;
}
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index 4e12f7b32..f715f7942 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -26,7 +26,7 @@
#define VNIC_FNIC_RATOV_MAX 255000
#define VNIC_FNIC_MAXDATAFIELDSIZE_MIN 256
-#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2112
+#define VNIC_FNIC_MAXDATAFIELDSIZE_MAX 2048
#define VNIC_FNIC_FLOGI_RETRIES_MIN 0
#define VNIC_FNIC_FLOGI_RETRIES_MAX 0xffffffff
@@ -55,7 +55,7 @@
#define VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX 255
#define VNIC_FNIC_LUNS_PER_TARGET_MIN 1
-#define VNIC_FNIC_LUNS_PER_TARGET_MAX 1024
+#define VNIC_FNIC_LUNS_PER_TARGET_MAX 4096
/* Device-specific region: scsi configuration */
struct vnic_fc_config {
@@ -79,10 +79,19 @@ struct vnic_fc_config {
u16 ra_tov;
u16 intr_timer;
u8 intr_timer_type;
+ u8 intr_mode;
+ u8 lun_queue_depth;
+ u8 io_timeout_retry;
+ u16 wq_copy_count;
};
#define VFCF_FCP_SEQ_LVL_ERR 0x1 /* Enable FCP-2 Error Recovery */
#define VFCF_PERBI 0x2 /* persistent binding info available */
#define VFCF_FIP_CAPABLE 0x4 /* firmware can handle FIP */
+#define VFCF_FC_INITIATOR 0x20 /* FC Initiator Mode */
+#define VFCF_FC_TARGET 0x40 /* FC Target Mode */
+#define VFCF_FC_NVME_INITIATOR 0x80 /* FC-NVMe Initiator Mode */
+#define VFCF_FC_NVME_TARGET 0x100 /* FC-NVMe Target Mode */
+
#endif /* _VNIC_SCSI_H_ */
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 1abc62b07..05c38e43f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1792,7 +1792,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
if (dev_is_sata(device)) {
struct ata_link *link = &device->sata_dev.ap->link;
- rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
+ rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
smp_ata_check_ready_type);
} else {
msleep(2000);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 92c440f2e..46d0b3a0e 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1464,7 +1464,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (name)
- strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
+ strscpy(vhost->partition_name, name, sizeof(vhost->partition_name));
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
vhost->partition_number = *num;
@@ -1513,13 +1513,15 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
login_info->async.len = cpu_to_be32(async_crq->size *
sizeof(*async_crq->msgs.async));
- strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
- strncpy(login_info->device_name,
- dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
+ strscpy(login_info->partition_name, vhost->partition_name,
+ sizeof(login_info->partition_name));
+
+ strscpy(login_info->device_name,
+ dev_name(&vhost->host->shost_gendev), sizeof(login_info->device_name));
location = of_get_property(of_node, "ibm,loc-code", NULL);
location = location ? location : dev_name(vhost->dev);
- strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
+ strscpy(login_info->drc_name, location, sizeof(login_info->drc_name));
}
/**
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 595992996..71f3e9563 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -266,7 +266,7 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
- strncpy(hostdata->madapter_info.partition_name, partition_name,
+ strscpy(hostdata->madapter_info.partition_name, partition_name,
sizeof(hostdata->madapter_info.partition_name));
hostdata->madapter_info.partition_number =
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 2a50fda3a..625fd547e 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -371,7 +371,6 @@ static u16 initio_se2_rd(unsigned long base, u8 addr)
*/
static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{
- u8 rb;
u8 instr;
int i;
@@ -400,7 +399,7 @@ static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
- if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
+ if (inb(base + TUL_NVRAM) & SE2DI)
break; /* write complete */
}
outb(0, base + TUL_NVRAM); /* -CS */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 81e3d464d..3819f7c42 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -77,7 +77,6 @@
static LIST_HEAD(ipr_ioa_head);
static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
static unsigned int ipr_max_speed = 1;
-static int ipr_testmode = 0;
static unsigned int ipr_fastfail = 0;
static unsigned int ipr_transop_timeout = 0;
static unsigned int ipr_debug = 0;
@@ -193,8 +192,6 @@ module_param_named(max_speed, ipr_max_speed, uint, 0);
MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
module_param_named(log_level, ipr_log_level, uint, 0);
MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
-module_param_named(testmode, ipr_testmode, int, 0);
-MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
@@ -6416,45 +6413,6 @@ static const struct scsi_host_template driver_template = {
.proc_name = IPR_NAME,
};
-#ifdef CONFIG_PPC_PSERIES
-static const u16 ipr_blocked_processors[] = {
- PVR_NORTHSTAR,
- PVR_PULSAR,
- PVR_POWER4,
- PVR_ICESTAR,
- PVR_SSTAR,
- PVR_POWER4p,
- PVR_630,
- PVR_630p
-};
-
-/**
- * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
- * @ioa_cfg: ioa cfg struct
- *
- * Adapters that use Gemstone revision < 3.1 do not work reliably on
- * certain pSeries hardware. This function determines if the given
- * adapter is in one of these confgurations or not.
- *
- * Return value:
- * 1 if adapter is not supported / 0 if adapter is supported
- **/
-static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
-{
- int i;
-
- if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
- for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
- if (pvr_version_is(ipr_blocked_processors[i]))
- return 1;
- }
- }
- return 0;
-}
-#else
-#define ipr_invalid_adapter(ioa_cfg) 0
-#endif
-
/**
* ipr_ioa_bringdown_done - IOA bring down completion.
* @ipr_cmd: ipr command struct
@@ -7385,19 +7343,6 @@ static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
type[4] = '\0';
ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
- if (ipr_invalid_adapter(ioa_cfg)) {
- dev_err(&ioa_cfg->pdev->dev,
- "Adapter not supported in this hardware configuration.\n");
-
- if (!ipr_testmode) {
- ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
- list_add_tail(&ipr_cmd->queue,
- &ioa_cfg->hrrq->hrrq_free_q);
- return IPR_RC_JOB_RETURN;
- }
- }
-
ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
ipr_ioafp_inquiry(ipr_cmd, 1, 0,
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 7162a5029..355a0bc08 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -738,8 +738,7 @@ static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *i
return SCI_SUCCESS;
}
-enum sci_status sci_task_request_construct_ssp(
- struct isci_request *ireq)
+void sci_task_request_construct_ssp(struct isci_request *ireq)
{
/* Construct the SSP Task SCU Task Context */
scu_ssp_task_request_construct_task_context(ireq);
@@ -748,8 +747,6 @@ enum sci_status sci_task_request_construct_ssp(
sci_task_request_build_ssp_task_iu(ireq);
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
-
- return SCI_SUCCESS;
}
static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 20b141739..79ddfffbf 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -300,7 +300,7 @@ sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 io_tag,
struct isci_request *ireq);
-enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
+void sci_task_request_construct_ssp(struct isci_request *ireq);
void sci_smp_request_copy_response(struct isci_request *ireq);
static inline int isci_task_is_ncq_recovery(struct sas_task *task)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index c514b2029..3a25b1a2c 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -243,9 +243,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
/* XXX convert to get this from task->tproto like other drivers */
if (dev->dev_type == SAS_END_DEVICE) {
isci_tmf->proto = SAS_PROTOCOL_SSP;
- status = sci_task_request_construct_ssp(ireq);
- if (status != SCI_SUCCESS)
- return NULL;
+ sci_task_request_construct_ssp(ireq);
}
return ireq;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 0c842fb29..494a671fb 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -176,7 +176,7 @@ fail:
return err;
}
-static int esp_jazz_remove(struct platform_device *dev)
+static void esp_jazz_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
@@ -189,8 +189,6 @@ static int esp_jazz_remove(struct platform_device *dev)
esp->command_block_dma);
scsi_host_put(esp->host);
-
- return 0;
}
/* work with hotplug and coldplug */
@@ -198,7 +196,7 @@ MODULE_ALIAS("platform:jazz_esp");
static struct platform_driver esp_jazz_driver = {
.probe = esp_jazz_probe,
- .remove = esp_jazz_remove,
+ .remove_new = esp_jazz_remove,
.driver = {
.name = "jazz_esp",
},
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 05be0810b..80be3a936 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2062,9 +2062,9 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd->result = (DID_PARITY << 16);
break;
case FC_TIMED_OUT:
- FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
+ FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
"due to FC_TIMED_OUT\n");
- sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
+ sc_cmd->result = (DID_TIME_OUT << 16);
break;
default:
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b1c9107d3..d3a5d6ecd 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5905,11 +5905,11 @@ LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
/*
* lpfc_ras_fwlog_func: Firmware logging enabled on function number
* Default function which has RAS support : 0
- * Value Range is [0..7].
+ * Value Range is [0..3].
* FW logging is a global action and enablement is via a specific
* port.
*/
-LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
+LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 3, "Firmware Logging Enabled on Function");
/*
* lpfc_enable_bbcr: Enable BB Credit Recovery
@@ -5954,7 +5954,7 @@ LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
* 4 - 255 = vmid support enabled for 4-255 VMs
* Value range is [4,255].
*/
-LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
+LPFC_ATTR_R(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
"Maximum number of VMs supported");
/*
@@ -5962,7 +5962,7 @@ LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
* 0 = Timeout is disabled
* Value range is [0,24].
*/
-LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
+LPFC_ATTR_R(vmid_inactivity_timeout, 4, 0, 24,
"Inactivity timeout in hours");
/*
@@ -5971,7 +5971,7 @@ LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
* 1 = Support is enabled
* Value range is [0,1].
*/
-LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
+LPFC_ATTR_R(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE,
"Enable App Header VMID support");
@@ -5982,7 +5982,7 @@ LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
* 2 = Allow all targets
* Value range is [0,2].
*/
-LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
+LPFC_ATTR_R(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
LPFC_VMID_PRIO_TAG_DISABLE,
LPFC_VMID_PRIO_TAG_ALL_TARGETS,
"Enable Priority Tagging VMID support");
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 385e1636f..4d7232006 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2062,8 +2062,9 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0102 PLOGI completes to NPort x%06x "
- "Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, ndlp->nlp_fc4_type,
+ "IoTag x%x Data: x%x x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, iotag,
+ ndlp->nlp_fc4_type,
ulp_status, ulp_word4,
disc, vport->num_disc_nodes);
@@ -2362,9 +2363,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x\n",
ndlp->nlp_DID, ulp_status, ulp_word4,
- vport->num_disc_nodes, ndlp->fc4_prli_sent);
+ vport->num_disc_nodes, ndlp->fc4_prli_sent,
+ ndlp->fc4_xpt_flags);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
@@ -2805,7 +2807,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
int disc;
- u32 ulp_status, ulp_word4, tmo;
+ u32 ulp_status, ulp_word4, tmo, iotag;
bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2818,9 +2820,11 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (phba->sli_rev == LPFC_SLI_REV4) {
tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
} else {
irsp = &rspiocb->iocb;
tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -2838,9 +2842,11 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ADISC completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0104 ADISC completes to NPort x%x "
- "Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, ulp_status, ulp_word4,
+ "IoTag x%x Data: x%x x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, iotag,
+ ulp_status, ulp_word4,
tmo, disc, vport->num_disc_nodes);
+
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
spin_lock_irq(&ndlp->lock);
@@ -3001,7 +3007,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int wake_up_waiter = 0;
u32 ulp_status;
u32 ulp_word4;
- u32 tmo;
+ u32 tmo, iotag;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->rsp_iocb = rspiocb;
@@ -3011,9 +3017,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (phba->sli_rev == LPFC_SLI_REV4) {
tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
} else {
irsp = &rspiocb->iocb;
tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
}
spin_lock_irq(&ndlp->lock);
@@ -3032,9 +3040,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0105 LOGO completes to NPort x%x "
- "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
- ulp_status, ulp_word4,
+ "IoTag x%x refcnt %d nflags x%x xflags x%x "
+ "Data: x%x x%x x%x x%x\n",
+ ndlp->nlp_DID, iotag,
+ kref_read(&ndlp->kref), ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags, ulp_status, ulp_word4,
tmo, vport->num_disc_nodes);
if (lpfc_els_chk_latt(vport)) {
@@ -5075,16 +5085,19 @@ out_retry:
if (logerr) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0137 No retry ELS command x%x to remote "
- "NPORT x%x: Out of Resources: Error:x%x/%x\n",
- cmd, did, ulp_status,
- ulp_word4);
+ "NPORT x%x: Out of Resources: Error:x%x/%x "
+ "IoTag x%x\n",
+ cmd, did, ulp_status, ulp_word4,
+ cmdiocb->iotag);
}
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0108 No retry ELS command x%x to remote "
- "NPORT x%x Retried:%d Error:x%x/%x\n",
- cmd, did, cmdiocb->retry, ulp_status,
- ulp_word4);
+ "0108 No retry ELS command x%x to remote "
+ "NPORT x%x Retried:%d Error:x%x/%x "
+ "IoTag x%x nflags x%x\n",
+ cmd, did, cmdiocb->retry, ulp_status,
+ ulp_word4, cmdiocb->iotag,
+ (ndlp ? ndlp->nlp_flag : 0));
}
return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7ef9841f0..f80bbc315 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -411,7 +411,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
"port_state = x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
ndlp->nlp_flag, vport->port_state);
- spin_lock_irqsave(&ndlp->lock, iflags);
+ return;
}
spin_unlock_irqrestore(&ndlp->lock, iflags);
}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 0dfdc0c4c..cadcd1649 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1814,7 +1814,9 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
mbox->sge_array->addr[sgentry], phyaddr);
}
- /* Free the sge address array memory */
+ /* Reinitialize the context pointers to avoid stale usage. */
+ mbox->ctx_buf = NULL;
+ mbox->context3 = NULL;
kfree(mbox->sge_array);
/* Finally, free the mailbox command itself */
mempool_free(mbox, phba->mbox_mem_pool);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 89cbeba06..2697da324 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -48,6 +48,29 @@
#define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */
#define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */
+/* lpfc_mbox_free_sli_mbox
+ *
+ * @phba: HBA to free memory for
+ * @mbox: mailbox command to free
+ *
+ * This routine detects the mbox type and calls the correct
+ * free routine to fully release all associated memory.
+ */
+static void
+lpfc_mem_free_sli_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ /* Detect if the caller's mbox is an SLI4_CONFIG type. If so, this
+ * mailbox type requires a different cleanup routine. Otherwise, the
+ * mailbox is just an mbuf and mem_pool release.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ bf_get(lpfc_mqe_command, &mbox->u.mqe) == MBX_SLI4_CONFIG) {
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ } else {
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ }
+}
+
int
lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
size_t bytes;
@@ -288,27 +311,16 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
LPFC_MBOXQ_t *mbox, *next_mbox;
- struct lpfc_dmabuf *mp;
/* Free memory used in mailbox queue back to mailbox memory pool */
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
- mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
list_del(&mbox->list);
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_mem_free_sli_mbox(phba, mbox);
}
/* Free memory used in mailbox cmpl list back to mailbox memory pool */
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
- mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
list_del(&mbox->list);
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_mem_free_sli_mbox(phba, mbox);
}
/* Free the active mailbox command back to the mailbox memory pool */
spin_lock_irq(&phba->hbalock);
@@ -316,12 +328,7 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
if (psli->mbox_active) {
mbox = psli->mbox_active;
- mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_mem_free_sli_mbox(phba, mbox);
psli->mbox_active = NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d9074929f..b147304b0 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -748,8 +748,10 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Save the ELS cmd */
elsiocb->drvrTimeout = cmd;
- lpfc_sli4_resume_rpi(ndlp,
- lpfc_mbx_cmpl_resume_rpi, elsiocb);
+ if (lpfc_sli4_resume_rpi(ndlp,
+ lpfc_mbx_cmpl_resume_rpi,
+ elsiocb))
+ kfree(elsiocb);
goto out;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9386e7b44..706985358 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2995,8 +2995,9 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
LPFC_SLI_INTF_IF_TYPE_2)) {
if (ndlp) {
lpfc_printf_vlog(
- vport, KERN_INFO, LOG_MBOX | LOG_SLI,
- "0010 UNREG_LOGIN vpi:%x "
+ vport, KERN_INFO,
+ LOG_MBOX | LOG_SLI | LOG_NODE,
+ "0010 UNREG_LOGIN vpi:x%x "
"rpi:%x DID:%x defer x%x flg x%x "
"x%px\n",
vport->vpi, ndlp->nlp_rpi,
@@ -3012,7 +3013,8 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
(ndlp->nlp_defer_did !=
NLP_EVT_NOTHING_PENDING)) {
lpfc_printf_vlog(
- vport, KERN_INFO, LOG_DISCOVERY,
+ vport, KERN_INFO,
+ LOG_MBOX | LOG_SLI | LOG_NODE,
"4111 UNREG cmpl deferred "
"clr x%x on "
"NPort x%x Data: x%x x%px\n",
@@ -3938,6 +3940,9 @@ void lpfc_poll_eratt(struct timer_list *t)
if (!(phba->hba_flag & HBA_SETUP))
return;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
/* Here we will also keep track of interrupts per sec of the hba */
sli_intr = phba->sli.slistat.sli_intr;
@@ -4875,7 +4880,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
lockdep_assert_held(&phba->hbalock);
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
- if (hdrtype != 0x80 ||
+ if (hdrtype != PCI_HEADER_TYPE_MFD ||
(FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
return;
@@ -10141,11 +10146,12 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0354 Mbox cmd issue - Enqueue Data: "
- "x%x (x%x/x%x) x%x x%x x%x\n",
+ "x%x (x%x/x%x) x%x x%x x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0xffffff,
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
lpfc_sli_config_mbox_subsys_get(phba, mboxq),
lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+ mboxq->u.mb.un.varUnregLogin.rpi,
phba->pport->port_state,
psli->sli_flag, MBX_NOWAIT);
/* Wake up worker thread to transport mailbox command from head */
@@ -22167,6 +22173,12 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
* The data will be truncated if datasz is not large enough.
* Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
* Returns the actual bytes read from the object.
+ *
+ * This routine is hard coded to use a poll completion. Unlike other
+ * sli4_config mailboxes, it uses lpfc_mbuf memory which is not
+ * cleaned up in lpfc_sli4_cmd_mbox_free. If this routine is modified
+ * to use interrupt-based completions, code is needed to fully cleanup
+ * the memory.
*/
int
lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index cd33dfec7..c911a39cb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -182,9 +182,11 @@ typedef struct lpfcMboxq {
struct lpfc_mqe mqe;
} u;
struct lpfc_vport *vport; /* virtual port pointer */
- void *ctx_ndlp; /* caller ndlp information */
- void *ctx_buf; /* caller buffer information */
- void *context3;
+ void *ctx_ndlp; /* an lpfc_nodelist pointer */
+ void *ctx_buf; /* an lpfc_dmabuf pointer */
+ void *context3; /* a generic pointer. Code must
+ * accommodate the actual datatype.
+ */
void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
uint8_t mbox_flag;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 88068834c..aba1c1cee 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.15"
+#define LPFC_DRIVER_VERSION "14.2.0.17"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 3f0061b00..187ae0a65 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -407,7 +407,7 @@ fail:
return err;
}
-static int esp_mac_remove(struct platform_device *dev)
+static void esp_mac_remove(struct platform_device *dev)
{
struct mac_esp_priv *mep = platform_get_drvdata(dev);
struct esp *esp = mep->esp;
@@ -428,13 +428,11 @@ static int esp_mac_remove(struct platform_device *dev)
kfree(esp->command_block);
scsi_host_put(esp->host);
-
- return 0;
}
static struct platform_driver esp_mac_driver = {
.probe = esp_mac_probe,
- .remove = esp_mac_remove,
+ .remove_new = esp_mac_remove,
.driver = {
.name = DRV_MODULE_NAME,
},
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 2e511697f..181f16899 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -523,7 +523,7 @@ fail_init:
return error;
}
-static int __exit mac_scsi_remove(struct platform_device *pdev)
+static void __exit mac_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *instance = platform_get_drvdata(pdev);
@@ -532,11 +532,10 @@ static int __exit mac_scsi_remove(struct platform_device *pdev)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
scsi_host_put(instance);
- return 0;
}
static struct platform_driver mac_scsi_driver = {
- .remove = __exit_p(mac_scsi_remove),
+ .remove_new = __exit_p(mac_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
},
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 1e4a60fc6..0cb24fc03 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -28,6 +28,7 @@ struct mpi3_ioc_init_request {
__le64 driver_information_address;
};
+#define MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED (0x04)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index ae98d15c3..3de1ee05c 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -55,8 +55,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.5.0.0.0"
-#define MPI3MR_DRIVER_RELDATE "24-July-2023"
+#define MPI3MR_DRIVER_VERSION "8.5.1.0.0"
+#define MPI3MR_DRIVER_RELDATE "5-December-2023"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -218,14 +218,16 @@ extern atomic64_t event_counter;
* @length: SGE length
* @rsvd: Reserved
* @rsvd1: Reserved
- * @sgl_type: sgl type
+ * @sub_type: sgl sub type
+ * @type: sgl type
*/
struct mpi3mr_nvme_pt_sge {
- u64 base_addr;
- u32 length;
+ __le64 base_addr;
+ __le32 length;
u16 rsvd;
u8 rsvd1;
- u8 sgl_type;
+ u8 sub_type:4;
+ u8 type:4;
};
/**
@@ -247,6 +249,8 @@ struct mpi3mr_buf_map {
u32 kern_buf_len;
dma_addr_t kern_buf_dma;
u8 data_dir;
+ u16 num_dma_desc;
+ struct dma_memory_desc *dma_desc;
};
/* IOC State definitions */
@@ -477,6 +481,10 @@ struct mpi3mr_throttle_group_info {
/* HBA port flags */
#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
+/* IOCTL data transfer sge*/
+#define MPI3MR_NUM_IOCTL_SGE 256
+#define MPI3MR_IOCTL_SGE_SIZE (8 * 1024)
+
/**
* struct mpi3mr_hba_port - HBA's port information
* @port_id: Port number
@@ -506,7 +514,7 @@ struct mpi3mr_sas_port {
u8 num_phys;
u8 marked_responding;
int lowest_phy;
- u32 phy_mask;
+ u64 phy_mask;
struct mpi3mr_hba_port *hba_port;
struct sas_identify remote_identify;
struct sas_rphy *rphy;
@@ -1042,6 +1050,11 @@ struct scmd_priv {
* @sas_node_lock: Lock to protect SAS node list
* @hba_port_table_list: List of HBA Ports
* @enclosure_list: List of Enclosure objects
+ * @ioctl_dma_pool: DMA pool for IOCTL data buffers
+ * @ioctl_sge: DMA buffer descriptors for IOCTL data
+ * @ioctl_chain_sge: DMA buffer descriptor for IOCTL chain
+ * @ioctl_resp_sge: DMA buffer descriptor for Mgmt cmd response
+ * @ioctl_sges_allocated: Flag for IOCTL SGEs allocated or not
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -1227,6 +1240,12 @@ struct mpi3mr_ioc {
spinlock_t sas_node_lock;
struct list_head hba_port_table_list;
struct list_head enclosure_list;
+
+ struct dma_pool *ioctl_dma_pool;
+ struct dma_memory_desc ioctl_sge[MPI3MR_NUM_IOCTL_SGE];
+ struct dma_memory_desc ioctl_chain_sge;
+ struct dma_memory_desc ioctl_resp_sge;
+ bool ioctl_sges_allocated;
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 9dacbb857..0380996b5 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -564,7 +564,36 @@ static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
}
/**
+ * mpi3mr_total_num_ioctl_sges - Count number of SGEs required
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function returns total number of data SGEs required
+ * including zero length SGEs and excluding management request
+ * and response buffer for the given list of data buffer
+ * descriptors
+ *
+ * Return: Number of SGE elements needed
+ */
+static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs,
+ u8 bufcnt)
+{
+ u16 i, sge_count = 0;
+
+ for (i = 0; i < bufcnt; i++, drv_bufs++) {
+ if (drv_bufs->data_dir == DMA_NONE ||
+ drv_bufs->kern_buf)
+ continue;
+ sge_count += drv_bufs->num_dma_desc;
+ if (!drv_bufs->num_dma_desc)
+ sge_count++;
+ }
+ return sge_count;
+}
+
+/**
* mpi3mr_bsg_build_sgl - SGL construction for MPI commands
+ * @mrioc: Adapter instance reference
* @mpi_req: MPI request
* @sgl_offset: offset to start sgl in the MPI request
* @drv_bufs: DMA address of the buffers to be placed in sgl
@@ -576,27 +605,45 @@ static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
* This function places the DMA address of the given buffers in
* proper format as SGEs in the given MPI request.
*
- * Return: Nothing
+ * Return: 0 on success,-1 on failure
*/
-static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
- struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc,
- u8 is_rmr, u8 num_datasges)
+static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req,
+ u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs,
+ u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges)
{
+ struct mpi3_request_header *mpi_header =
+ (struct mpi3_request_header *)mpi_req;
u8 *sgl = (mpi_req + sgl_offset), count = 0;
struct mpi3_mgmt_passthrough_request *rmgmt_req =
(struct mpi3_mgmt_passthrough_request *)mpi_req;
struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
- u8 sgl_flags, sgl_flags_last;
+ u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag;
+ u16 available_sges, i, sges_needed;
+ u32 sge_element_size = sizeof(struct mpi3_sge_common);
+ bool chain_used = false;
sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
- MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER;
- sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST;
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+ sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER;
+ sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST;
+ last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+
+ sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt);
if (is_rmc) {
mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
sgl_flags_last, drv_buf_iter->kern_buf_len,
drv_buf_iter->kern_buf_dma);
- sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len;
+ sgl = (u8 *)drv_buf_iter->kern_buf +
+ drv_buf_iter->bsg_buf_len;
+ available_sges = (drv_buf_iter->kern_buf_len -
+ drv_buf_iter->bsg_buf_len) / sge_element_size;
+
+ if (sges_needed > available_sges)
+ return -1;
+
+ chain_used = true;
drv_buf_iter++;
count++;
if (is_rmr) {
@@ -608,23 +655,95 @@ static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
} else
mpi3mr_build_zero_len_sge(
&rmgmt_req->response_sgl);
+ if (num_datasges) {
+ i = 0;
+ goto build_sges;
+ }
+ } else {
+ if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ)
+ return -1;
+ available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) /
+ sge_element_size;
+ if (!available_sges)
+ return -1;
}
if (!num_datasges) {
mpi3mr_build_zero_len_sge(sgl);
- return;
+ return 0;
}
+ if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
+ if ((sges_needed > 2) || (sges_needed > available_sges))
+ return -1;
+ for (; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE ||
+ !drv_buf_iter->num_dma_desc)
+ continue;
+ mpi3mr_add_sg_single(sgl, sgl_flags_last,
+ drv_buf_iter->dma_desc[0].size,
+ drv_buf_iter->dma_desc[0].dma_addr);
+ sgl += sge_element_size;
+ }
+ return 0;
+ }
+ i = 0;
+
+build_sges:
for (; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
- if (num_datasges == 1 || !is_rmc)
- mpi3mr_add_sg_single(sgl, sgl_flags_last,
- drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
- else
- mpi3mr_add_sg_single(sgl, sgl_flags,
- drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
- sgl += sizeof(struct mpi3_sge_common);
+ if (!drv_buf_iter->num_dma_desc) {
+ if (chain_used && !available_sges)
+ return -1;
+ if (!chain_used && (available_sges == 1) &&
+ (sges_needed > 1))
+ goto setup_chain;
+ flag = sgl_flag_eob;
+ if (num_datasges == 1)
+ flag = sgl_flags_last;
+ mpi3mr_add_sg_single(sgl, flag, 0, 0);
+ sgl += sge_element_size;
+ sges_needed--;
+ available_sges--;
+ num_datasges--;
+ continue;
+ }
+ for (; i < drv_buf_iter->num_dma_desc; i++) {
+ if (chain_used && !available_sges)
+ return -1;
+ if (!chain_used && (available_sges == 1) &&
+ (sges_needed > 1))
+ goto setup_chain;
+ flag = sgl_flags;
+ if (i == (drv_buf_iter->num_dma_desc - 1)) {
+ if (num_datasges == 1)
+ flag = sgl_flags_last;
+ else
+ flag = sgl_flag_eob;
+ }
+
+ mpi3mr_add_sg_single(sgl, flag,
+ drv_buf_iter->dma_desc[i].size,
+ drv_buf_iter->dma_desc[i].dma_addr);
+ sgl += sge_element_size;
+ available_sges--;
+ sges_needed--;
+ }
num_datasges--;
+ i = 0;
}
+ return 0;
+
+setup_chain:
+ available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
+ if (sges_needed > available_sges)
+ return -1;
+ mpi3mr_add_sg_single(sgl, last_chain_sgl_flag,
+ (sges_needed * sge_element_size),
+ mrioc->ioctl_chain_sge.dma_addr);
+ memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
+ sgl = (u8 *)mrioc->ioctl_chain_sge.addr;
+ chain_used = true;
+ goto build_sges;
}
/**
@@ -664,14 +783,20 @@ static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
{
struct mpi3mr_nvme_pt_sge *nvme_sgl;
- u64 sgl_ptr;
+ __le64 sgl_dma;
u8 count;
size_t length = 0;
+ u16 available_sges = 0, i;
+ u32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge);
struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
mrioc->facts.sge_mod_shift) << 32);
u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
mrioc->facts.sge_mod_shift) << 32;
+ u32 size;
+
+ nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
+ ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
/*
* Not all commands require a data transfer. If no data, just return
@@ -680,27 +805,59 @@ static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
- sgl_ptr = (u64)drv_buf_iter->kern_buf_dma;
length = drv_buf_iter->kern_buf_len;
break;
}
- if (!length)
+ if (!length || !drv_buf_iter->num_dma_desc)
return 0;
- if (sgl_ptr & sgemod_mask) {
+ if (drv_buf_iter->num_dma_desc == 1) {
+ available_sges = 1;
+ goto build_sges;
+ }
+
+ sgl_dma = cpu_to_le64(mrioc->ioctl_chain_sge.dma_addr);
+ if (sgl_dma & sgemod_mask) {
dprint_bsg_err(mrioc,
- "%s: SGL address collides with SGE modifier\n",
+ "%s: SGL chain address collides with SGE modifier\n",
__func__);
return -1;
}
- sgl_ptr &= ~sgemod_mask;
- sgl_ptr |= sgemod_val;
- nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
- ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
+ sgl_dma &= ~sgemod_mask;
+ sgl_dma |= sgemod_val;
+
+ memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
+ available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
+ if (available_sges < drv_buf_iter->num_dma_desc)
+ return -1;
memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
- nvme_sgl->base_addr = sgl_ptr;
- nvme_sgl->length = length;
+ nvme_sgl->base_addr = sgl_dma;
+ size = drv_buf_iter->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge);
+ nvme_sgl->length = cpu_to_le32(size);
+ nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT;
+ nvme_sgl = (struct mpi3mr_nvme_pt_sge *)mrioc->ioctl_chain_sge.addr;
+
+build_sges:
+ for (i = 0; i < drv_buf_iter->num_dma_desc; i++) {
+ sgl_dma = cpu_to_le64(drv_buf_iter->dma_desc[i].dma_addr);
+ if (sgl_dma & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: SGL address collides with SGE modifier\n",
+ __func__);
+ return -1;
+ }
+
+ sgl_dma &= ~sgemod_mask;
+ sgl_dma |= sgemod_val;
+
+ nvme_sgl->base_addr = sgl_dma;
+ nvme_sgl->length = cpu_to_le32(drv_buf_iter->dma_desc[i].size);
+ nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT;
+ nvme_sgl++;
+ available_sges--;
+ }
+
return 0;
}
@@ -728,7 +885,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
u32 offset, entry_len, dev_pgsz;
u32 page_mask_result, page_mask;
- size_t length = 0;
+ size_t length = 0, desc_len;
u8 count;
struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
@@ -737,6 +894,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
mrioc->facts.sge_mod_shift) << 32;
u16 dev_handle = nvme_encap_request->dev_handle;
struct mpi3mr_tgt_dev *tgtdev;
+ u16 desc_count = 0;
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
if (!tgtdev) {
@@ -755,6 +913,21 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
mpi3mr_tgtdev_put(tgtdev);
+ page_mask = dev_pgsz - 1;
+
+ if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE) {
+ dprint_bsg_err(mrioc,
+ "%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n",
+ __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle);
+ return -1;
+ }
+
+ if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz) {
+ dprint_bsg_err(mrioc,
+ "%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n",
+ __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle);
+ return -1;
+ }
/*
* Not all commands require a data transfer. If no data, just return
@@ -763,14 +936,26 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
- dma_addr = drv_buf_iter->kern_buf_dma;
length = drv_buf_iter->kern_buf_len;
break;
}
- if (!length)
+ if (!length || !drv_buf_iter->num_dma_desc)
return 0;
+ for (count = 0; count < drv_buf_iter->num_dma_desc; count++) {
+ dma_addr = drv_buf_iter->dma_desc[count].dma_addr;
+ if (dma_addr & page_mask) {
+ dprint_bsg_err(mrioc,
+ "%s:dma_addr %pad is not aligned with page size 0x%x\n",
+ __func__, &dma_addr, dev_pgsz);
+ return -1;
+ }
+ }
+
+ dma_addr = drv_buf_iter->dma_desc[0].dma_addr;
+ desc_len = drv_buf_iter->dma_desc[0].size;
+
mrioc->prp_sz = 0;
mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
@@ -800,7 +985,6 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
* Check if we are within 1 entry of a page boundary we don't
* want our first entry to be a PRP List entry.
*/
- page_mask = dev_pgsz - 1;
page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
if (!page_mask_result) {
dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
@@ -914,18 +1098,31 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
prp_entry_dma += prp_size;
}
- /*
- * Bump the phys address of the command's data buffer by the
- * entry_len.
- */
- dma_addr += entry_len;
-
/* decrement length accounting for last partial page. */
- if (entry_len > length)
+ if (entry_len >= length) {
length = 0;
- else
+ } else {
+ if (entry_len <= desc_len) {
+ dma_addr += entry_len;
+ desc_len -= entry_len;
+ }
+ if (!desc_len) {
+ if ((++desc_count) >=
+ drv_buf_iter->num_dma_desc) {
+ dprint_bsg_err(mrioc,
+ "%s: Invalid len %zd while building PRP\n",
+ __func__, length);
+ goto err_out;
+ }
+ dma_addr =
+ drv_buf_iter->dma_desc[desc_count].dma_addr;
+ desc_len =
+ drv_buf_iter->dma_desc[desc_count].size;
+ }
length -= entry_len;
+ }
}
+
return 0;
err_out:
if (mrioc->prp_list_virt) {
@@ -935,10 +1132,66 @@ err_out:
}
return -1;
}
+
+/**
+ * mpi3mr_map_data_buffer_dma - build dma descriptors for data
+ * buffers
+ * @mrioc: Adapter instance reference
+ * @drv_buf: buffer map descriptor
+ * @desc_count: Number of already consumed dma descriptors
+ *
+ * This function computes how many pre-allocated DMA descriptors
+ * are required for the given data buffer and if those number of
+ * descriptors are free, then setup the mapping of the scattered
+ * DMA address to the given data buffer, if the data direction
+ * of the buffer is DMA_TO_DEVICE then the actual data is copied to
+ * the DMA buffers
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_buf_map *drv_buf,
+ u16 desc_count)
+{
+ u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE;
+ u32 buf_len = drv_buf->kern_buf_len, copied_len = 0;
+
+ if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE)
+ needed_desc++;
+ if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) {
+ dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n",
+ __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE);
+ return -1;
+ }
+ drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc,
+ GFP_KERNEL);
+ if (!drv_buf->dma_desc)
+ return -1;
+ for (i = 0; i < needed_desc; i++, desc_count++) {
+ drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr;
+ drv_buf->dma_desc[i].dma_addr =
+ mrioc->ioctl_sge[desc_count].dma_addr;
+ if (buf_len < mrioc->ioctl_sge[desc_count].size)
+ drv_buf->dma_desc[i].size = buf_len;
+ else
+ drv_buf->dma_desc[i].size =
+ mrioc->ioctl_sge[desc_count].size;
+ buf_len -= drv_buf->dma_desc[i].size;
+ memset(drv_buf->dma_desc[i].addr, 0,
+ mrioc->ioctl_sge[desc_count].size);
+ if (drv_buf->data_dir == DMA_TO_DEVICE) {
+ memcpy(drv_buf->dma_desc[i].addr,
+ drv_buf->bsg_buf + copied_len,
+ drv_buf->dma_desc[i].size);
+ copied_len += drv_buf->dma_desc[i].size;
+ }
+ }
+ drv_buf->num_dma_desc = needed_desc;
+ return 0;
+}
/**
* mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
* @job: BSG job reference
- * @reply_payload_rcv_len: length of payload recvd
*
* This function is the top level handler for MPI Pass through
* command, this does basic validation of the input data buffers,
@@ -954,10 +1207,9 @@ err_out:
* Return: 0 on success and proper error codes on failure
*/
-static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len)
+static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
{
long rval = -EINVAL;
-
struct mpi3mr_ioc *mrioc = NULL;
u8 *mpi_req = NULL, *sense_buff_k = NULL;
u8 mpi_msg_size = 0;
@@ -965,9 +1217,10 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
struct mpi3mr_bsg_mptcmd *karg;
struct mpi3mr_buf_entry *buf_entries = NULL;
struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
- u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0;
- u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0;
- u8 block_io = 0, resp_code = 0, nvme_fmt = 0;
+ u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0;
+ u8 din_cnt = 0, dout_cnt = 0;
+ u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF;
+ u8 block_io = 0, nvme_fmt = 0, resp_code = 0;
struct mpi3_request_header *mpi_header = NULL;
struct mpi3_status_reply_descriptor *status_desc;
struct mpi3_scsi_task_mgmt_request *tm_req;
@@ -979,6 +1232,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
u32 din_size = 0, dout_size = 0;
u8 *din_buf = NULL, *dout_buf = NULL;
u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
+ u16 rmc_size = 0, desc_count = 0;
bsg_req = job->request;
karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
@@ -987,6 +1241,12 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
if (!mrioc)
return -ENODEV;
+ if (!mrioc->ioctl_sges_allocated) {
+ dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
+ __func__);
+ return -ENOMEM;
+ }
+
if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
@@ -1027,26 +1287,13 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
- if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
- dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
- __func__);
- rval = -EINVAL;
- goto out;
- }
- if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
- dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
- __func__);
- rval = -EINVAL;
- goto out;
- }
-
switch (buf_entries->buf_type) {
case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
sgl_iter = sgl_dout_iter;
sgl_dout_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_TO_DEVICE;
is_rmcb = 1;
- if (count != 0)
+ if ((count != 0) || !buf_entries->buf_len)
invalid_be = 1;
break;
case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
@@ -1054,7 +1301,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
sgl_din_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_FROM_DEVICE;
is_rmrb = 1;
- if (count != 1 || !is_rmcb)
+ if (count != 1 || !is_rmcb || !buf_entries->buf_len)
invalid_be = 1;
break;
case MPI3MR_BSG_BUFTYPE_DATA_IN:
@@ -1062,7 +1309,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
sgl_din_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_FROM_DEVICE;
din_cnt++;
- din_size += drv_buf_iter->bsg_buf_len;
+ din_size += buf_entries->buf_len;
if ((din_cnt > 1) && !is_rmcb)
invalid_be = 1;
break;
@@ -1071,7 +1318,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
sgl_dout_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_TO_DEVICE;
dout_cnt++;
- dout_size += drv_buf_iter->bsg_buf_len;
+ dout_size += buf_entries->buf_len;
if ((dout_cnt > 1) && !is_rmcb)
invalid_be = 1;
break;
@@ -1080,12 +1327,16 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
sgl_din_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_NONE;
mpirep_offset = count;
+ if (!buf_entries->buf_len)
+ invalid_be = 1;
break;
case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
sgl_iter = sgl_din_iter;
sgl_din_iter += buf_entries->buf_len;
drv_buf_iter->data_dir = DMA_NONE;
erb_offset = count;
+ if (!buf_entries->buf_len)
+ invalid_be = 1;
break;
case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
sgl_iter = sgl_dout_iter;
@@ -1112,21 +1363,31 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
goto out;
}
- drv_buf_iter->bsg_buf = sgl_iter;
- drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
-
- }
- if (!is_rmcb && (dout_cnt || din_cnt)) {
- sg_entries = dout_cnt + din_cnt;
- if (((mpi_msg_size) + (sg_entries *
- sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) {
- dprint_bsg_err(mrioc,
- "%s:%d: invalid message size passed\n",
- __func__, __LINE__);
+ if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
+ dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
+ __func__);
rval = -EINVAL;
goto out;
}
+ if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
+ dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ drv_buf_iter->bsg_buf = sgl_iter;
+ drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
+ }
+
+ if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) {
+ dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n",
+ __func__, __LINE__, mpi_header->function, din_size,
+ dout_size);
+ rval = -EINVAL;
+ goto out;
}
+
if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
dprint_bsg_err(mrioc,
"%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
@@ -1142,30 +1403,64 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
goto out;
}
+ if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
+ if (din_size > MPI3MR_IOCTL_SGE_SIZE ||
+ dout_size > MPI3MR_IOCTL_SGE_SIZE) {
+ dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n",
+ __func__, __LINE__, din_cnt, dout_cnt, din_size,
+ dout_size);
+ rval = -EINVAL;
+ goto out;
+ }
+ }
+
drv_buf_iter = drv_bufs;
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
- if (is_rmcb && !count)
- drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) *
- sizeof(struct mpi3_sge_common));
-
- if (!drv_buf_iter->kern_buf_len)
- continue;
-
- drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev,
- drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma,
- GFP_KERNEL);
- if (!drv_buf_iter->kern_buf) {
- rval = -ENOMEM;
- goto out;
- }
- if (drv_buf_iter->data_dir == DMA_TO_DEVICE) {
+ if (is_rmcb && !count) {
+ drv_buf_iter->kern_buf_len =
+ mrioc->ioctl_chain_sge.size;
+ drv_buf_iter->kern_buf =
+ mrioc->ioctl_chain_sge.addr;
+ drv_buf_iter->kern_buf_dma =
+ mrioc->ioctl_chain_sge.dma_addr;
+ drv_buf_iter->dma_desc = NULL;
+ drv_buf_iter->num_dma_desc = 0;
+ memset(drv_buf_iter->kern_buf, 0,
+ drv_buf_iter->kern_buf_len);
tmplen = min(drv_buf_iter->kern_buf_len,
- drv_buf_iter->bsg_buf_len);
+ drv_buf_iter->bsg_buf_len);
+ rmc_size = tmplen;
memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
+ } else if (is_rmrb && (count == 1)) {
+ drv_buf_iter->kern_buf_len =
+ mrioc->ioctl_resp_sge.size;
+ drv_buf_iter->kern_buf =
+ mrioc->ioctl_resp_sge.addr;
+ drv_buf_iter->kern_buf_dma =
+ mrioc->ioctl_resp_sge.dma_addr;
+ drv_buf_iter->dma_desc = NULL;
+ drv_buf_iter->num_dma_desc = 0;
+ memset(drv_buf_iter->kern_buf, 0,
+ drv_buf_iter->kern_buf_len);
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ drv_buf_iter->kern_buf_len = tmplen;
+ memset(drv_buf_iter->bsg_buf, 0,
+ drv_buf_iter->bsg_buf_len);
+ } else {
+ if (!drv_buf_iter->kern_buf_len)
+ continue;
+ if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) {
+ rval = -ENOMEM;
+ dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n",
+ __func__, __LINE__);
+ goto out;
+ }
+ desc_count += drv_buf_iter->num_dma_desc;
}
}
@@ -1235,9 +1530,14 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
goto out;
}
} else {
- mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size),
- drv_bufs, bufcnt, is_rmcb, is_rmrb,
- (dout_cnt + din_cnt));
+ if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size,
+ drv_bufs, bufcnt, is_rmcb, is_rmrb,
+ (dout_cnt + din_cnt))) {
+ dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__);
+ rval = -EAGAIN;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
}
if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
@@ -1273,7 +1573,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
drv_buf_iter = &drv_bufs[0];
dprint_dump(drv_buf_iter->kern_buf,
- drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+ rmc_size, "mpi3_mgmt_req");
}
}
@@ -1308,10 +1608,9 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
drv_buf_iter = &drv_bufs[0];
dprint_dump(drv_buf_iter->kern_buf,
- drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+ rmc_size, "mpi3_mgmt_req");
}
}
-
if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
(mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
mpi3mr_issue_tm(mrioc,
@@ -1382,17 +1681,27 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply
for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
if (drv_buf_iter->data_dir == DMA_NONE)
continue;
- if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
- tmplen = min(drv_buf_iter->kern_buf_len,
- drv_buf_iter->bsg_buf_len);
+ if ((count == 1) && is_rmrb) {
memcpy(drv_buf_iter->bsg_buf,
- drv_buf_iter->kern_buf, tmplen);
+ drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_len);
+ } else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
+ tmplen = 0;
+ for (desc_count = 0;
+ desc_count < drv_buf_iter->num_dma_desc;
+ desc_count++) {
+ memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen),
+ drv_buf_iter->dma_desc[desc_count].addr,
+ drv_buf_iter->dma_desc[desc_count].size);
+ tmplen +=
+ drv_buf_iter->dma_desc[desc_count].size;
}
}
+ }
out_unlock:
if (din_buf) {
- *reply_payload_rcv_len =
+ job->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
din_buf, job->reply_payload.payload_len);
@@ -1408,13 +1717,8 @@ out:
kfree(mpi_req);
if (drv_bufs) {
drv_buf_iter = drv_bufs;
- for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
- if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma)
- dma_free_coherent(&mrioc->pdev->dev,
- drv_buf_iter->kern_buf_len,
- drv_buf_iter->kern_buf,
- drv_buf_iter->kern_buf_dma);
- }
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++)
+ kfree(drv_buf_iter->dma_desc);
kfree(drv_bufs);
}
kfree(bsg_reply_buf);
@@ -1473,7 +1777,7 @@ static int mpi3mr_bsg_request(struct bsg_job *job)
rval = mpi3mr_bsg_process_drv_cmds(job);
break;
case MPI3MR_MPT_CMD:
- rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len);
+ rval = mpi3mr_bsg_process_mpt_cmds(job);
break;
default:
pr_err("%s: unsupported BSG command(0x%08x)\n",
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 0d148c39e..528f19f78 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -475,7 +475,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
* @op_reply_q: op_reply_qinfo object
* @reply_ci: operational reply descriptor's queue consumer index
*
- * Returns reply descriptor frame address
+ * Returns: reply descriptor frame address
*/
static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
@@ -1059,6 +1059,112 @@ enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
+ * @mrioc: Adapter instance reference
+ *
+ * Free the DMA memory allocated for IOCTL handling purpose.
+ *
+ * Return: None
+ */
+static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
+{
+ struct dma_memory_desc *mem_desc;
+ u16 i;
+
+ if (!mrioc->ioctl_dma_pool)
+ return;
+
+ for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
+ mem_desc = &mrioc->ioctl_sge[i];
+ if (mem_desc->addr) {
+ dma_pool_free(mrioc->ioctl_dma_pool,
+ mem_desc->addr,
+ mem_desc->dma_addr);
+ mem_desc->addr = NULL;
+ }
+ }
+ dma_pool_destroy(mrioc->ioctl_dma_pool);
+ mrioc->ioctl_dma_pool = NULL;
+ mem_desc = &mrioc->ioctl_chain_sge;
+
+ if (mem_desc->addr) {
+ dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
+ mem_desc->addr, mem_desc->dma_addr);
+ mem_desc->addr = NULL;
+ }
+ mem_desc = &mrioc->ioctl_resp_sge;
+ if (mem_desc->addr) {
+ dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
+ mem_desc->addr, mem_desc->dma_addr);
+ mem_desc->addr = NULL;
+ }
+
+ mrioc->ioctl_sges_allocated = false;
+}
+
+/**
+ * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
+ * @mrioc: Adapter instance reference
+ *
+ * This function allocates dmaable memory required to handle the
+ * application issued MPI3 IOCTL requests.
+ *
+ * Return: None
+ */
+static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
+
+{
+ struct dma_memory_desc *mem_desc;
+ u16 i;
+
+ mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
+ &mrioc->pdev->dev,
+ MPI3MR_IOCTL_SGE_SIZE,
+ MPI3MR_PAGE_SIZE_4K, 0);
+
+ if (!mrioc->ioctl_dma_pool) {
+ ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
+ mem_desc = &mrioc->ioctl_sge[i];
+ mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
+ mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
+ GFP_KERNEL,
+ &mem_desc->dma_addr);
+ if (!mem_desc->addr)
+ goto out_failed;
+ }
+
+ mem_desc = &mrioc->ioctl_chain_sge;
+ mem_desc->size = MPI3MR_PAGE_SIZE_4K;
+ mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ mem_desc->size,
+ &mem_desc->dma_addr,
+ GFP_KERNEL);
+ if (!mem_desc->addr)
+ goto out_failed;
+
+ mem_desc = &mrioc->ioctl_resp_sge;
+ mem_desc->size = MPI3MR_PAGE_SIZE_4K;
+ mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ mem_desc->size,
+ &mem_desc->dma_addr,
+ GFP_KERNEL);
+ if (!mem_desc->addr)
+ goto out_failed;
+
+ mrioc->ioctl_sges_allocated = true;
+
+ return;
+out_failed:
+ ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
+ "from the applications, application interface for MPT command is disabled\n");
+ mpi3mr_free_ioctl_dma_memory(mrioc);
+}
+
+/**
* mpi3mr_clear_reset_history - clear reset history
* @mrioc: Adapter instance reference
*
@@ -1133,7 +1239,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
* during reset/resume
* @mrioc: Adapter instance reference
*
- * Return zero if the new IOCFacts parameters value is compatible with
+ * Return: zero if the new IOCFacts parameters value is compatible with
* older values else return -EPERM
*/
static int
@@ -3194,6 +3300,9 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
current_time = ktime_get_real();
iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
+ iocinit_req.msg_flags |=
+ MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
+
init_completion(&mrioc->init_cmds.done);
retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
sizeof(iocinit_req), 1);
@@ -3871,6 +3980,9 @@ retry_init:
}
}
+ dprint_init(mrioc, "allocating ioctl dma buffers\n");
+ mpi3mr_alloc_ioctl_dma_memory(mrioc);
+
if (!mrioc->init_cmds.reply) {
retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
if (retval) {
@@ -4290,6 +4402,7 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
struct mpi3mr_intr_info *intr_info;
mpi3mr_free_enclosure_list(mrioc);
+ mpi3mr_free_ioctl_dma_memory(mrioc);
if (mrioc->sense_buf_pool) {
if (mrioc->sense_buf)
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 872d4b809..1bffd629c 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3205,6 +3205,7 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
tg = stgt_priv_data->throttle_group;
throttle_enabled_dev =
stgt_priv_data->io_throttle_enabled;
+ dev_handle = stgt_priv_data->dev_handle;
}
}
if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 82b55e955..d32ad4631 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1587,7 +1587,7 @@ static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
*/
struct host_port {
u64 sas_address;
- u32 phy_mask;
+ u64 phy_mask;
u16 handle;
u8 iounit_port_id;
u8 used;
@@ -1611,7 +1611,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
struct mpi3mr_sas_port *mr_sas_port)
{
struct mpi3mr_sas_phy *mr_sas_phy;
- u32 phy_mask_xor;
+ u64 phy_mask_xor;
u64 phys_to_be_added, phys_to_be_removed;
int i;
@@ -1619,7 +1619,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
mr_sas_port->marked_responding = 1;
dev_info(&mr_sas_port->port->dev,
- "sas_address(0x%016llx), old: port_id %d phy_mask 0x%x, new: port_id %d phy_mask:0x%x\n",
+ "sas_address(0x%016llx), old: port_id %d phy_mask 0x%llx, new: port_id %d phy_mask:0x%llx\n",
mr_sas_port->remote_identify.sas_address,
mr_sas_port->hba_port->port_id, mr_sas_port->phy_mask,
h_port->iounit_port_id, h_port->phy_mask);
@@ -1637,7 +1637,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
* if these phys are previously registered with another port
* then delete these phys from that port first.
*/
- for_each_set_bit(i, (ulong *) &phys_to_be_added, BITS_PER_TYPE(u32)) {
+ for_each_set_bit(i, (ulong *) &phys_to_be_added, BITS_PER_TYPE(u64)) {
mr_sas_phy = &mrioc->sas_hba.phy[i];
if (mr_sas_phy->phy_belongs_to_port)
mpi3mr_del_phy_from_an_existing_port(mrioc,
@@ -1649,7 +1649,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
}
/* Delete the phys which are not part of current mr_sas_port's port. */
- for_each_set_bit(i, (ulong *) &phys_to_be_removed, BITS_PER_TYPE(u32)) {
+ for_each_set_bit(i, (ulong *) &phys_to_be_removed, BITS_PER_TYPE(u64)) {
mr_sas_phy = &mrioc->sas_hba.phy[i];
if (mr_sas_phy->phy_belongs_to_port)
mpi3mr_del_phy_from_an_existing_port(mrioc,
@@ -1671,7 +1671,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
void
mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
{
- struct host_port h_port[32];
+ struct host_port *h_port = NULL;
int i, j, found, host_port_count = 0, port_idx;
u16 sz, attached_handle, ioc_status;
struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
@@ -1685,6 +1685,10 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_io_unit_pg0)
return;
+ h_port = kcalloc(64, sizeof(struct host_port), GFP_KERNEL);
+ if (!h_port)
+ goto out;
+
if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -1742,7 +1746,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
port_list) {
ioc_info(mrioc,
- "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%llx), lowest phy id:%d\n",
mr_sas_port->hba_port->port_id,
mr_sas_port->remote_identify.sas_address,
mr_sas_port->phy_mask, mr_sas_port->lowest_phy);
@@ -1751,7 +1755,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
ioc_info(mrioc, "Host port details after reset\n");
for (i = 0; i < host_port_count; i++) {
ioc_info(mrioc,
- "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%llx), lowest phy id:%d\n",
h_port[i].iounit_port_id, h_port[i].sas_address,
h_port[i].phy_mask, h_port[i].lowest_phy);
}
@@ -1814,6 +1818,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
}
}
out:
+ kfree(h_port);
kfree(sas_io_unit_pg0);
}
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 4d0be5ab9..587f7d248 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -643,18 +643,14 @@ typedef struct _MPI2_CHIP_REVISION_ID {
/*Manufacturing Page 2 */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check Header.PageLength at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check Header.PageLength at
+ *runtime before using HwSettings[].
*/
-#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS
-#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_MAN_2 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
- U32
- HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */
+ U32 HwSettings[]; /*0x08 */
} MPI2_CONFIG_PAGE_MAN_2,
*PTR_MPI2_CONFIG_PAGE_MAN_2,
Mpi2ManufacturingPage2_t,
@@ -666,18 +662,14 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_2 {
/*Manufacturing Page 3 */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check Header.PageLength at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check Header.PageLength at
+ *runtime before using Info[].
*/
-#ifndef MPI2_MAN_PAGE_3_INFO_WORDS
-#define MPI2_MAN_PAGE_3_INFO_WORDS (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_MAN_3 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
- U32
- Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */
+ U32 Info[]; /*0x08 */
} MPI2_CONFIG_PAGE_MAN_3,
*PTR_MPI2_CONFIG_PAGE_MAN_3,
Mpi2ManufacturingPage3_t,
@@ -765,12 +757,9 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_4 {
/*Manufacturing Page 5 */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhys at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhys at runtime before using Phy[].
*/
-#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
-#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
-#endif
typedef struct _MPI2_MANUFACTURING5_ENTRY {
U64 WWID; /*0x00 */
@@ -787,8 +776,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_5 {
U16 Reserved2; /*0x06 */
U32 Reserved3; /*0x08 */
U32 Reserved4; /*0x0C */
- MPI2_MANUFACTURING5_ENTRY
- Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */
+ MPI2_MANUFACTURING5_ENTRY Phy[]; /*0x10 */
} MPI2_CONFIG_PAGE_MAN_5,
*PTR_MPI2_CONFIG_PAGE_MAN_5,
Mpi2ManufacturingPage5_t,
@@ -864,12 +852,9 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
#define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhys at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhys at runtime before using ConnectorInfo[].
*/
-#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
-#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
@@ -880,8 +865,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
U8 NumPhys; /*0x20 */
U8 Reserved3; /*0x21 */
U16 Reserved4; /*0x22 */
- MPI2_MANPAGE7_CONNECTOR_INFO
- ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */
+ MPI2_MANPAGE7_CONNECTOR_INFO ConnectorInfo[]; /*0x24 */
} MPI2_CONFIG_PAGE_MAN_7,
*PTR_MPI2_CONFIG_PAGE_MAN_7,
Mpi2ManufacturingPage7_t,
@@ -990,7 +974,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
/*
*Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for GPIOCount at runtime.
+ *36 and check the value returned for GPIOCount at runtime.
*/
#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (36)
@@ -1019,12 +1003,9 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 {
/*IO Unit Page 5 */
/*
- *Upper layer code (drivers, utilities, etc.) should leave this define set to
- *one and check the value returned for NumDmaEngines at runtime.
+ *Upper layer code (drivers, utilities, etc.) should check the value returned
+ *for NumDmaEngines at runtime before using DmaEngineCapabilities[].
*/
-#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
-#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
@@ -1042,7 +1023,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
U32 Reserved2; /*0x24 */
U32 Reserved3; /*0x28 */
U32
- DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */
+ DmaEngineCapabilities[]; /*0x2C */
} MPI2_CONFIG_PAGE_IO_UNIT_5,
*PTR_MPI2_CONFIG_PAGE_IO_UNIT_5,
Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t;
@@ -1219,12 +1200,9 @@ typedef struct _MPI2_IOUNIT8_SENSOR {
#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumSensors at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumSensors at runtime before using Sensor[].
*/
-#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
-#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
@@ -1233,8 +1211,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
U8 NumSensors; /*0x0C */
U8 PollingInterval; /*0x0D */
U16 Reserved3; /*0x0E */
- MPI2_IOUNIT8_SENSOR
- Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */
+ MPI2_IOUNIT8_SENSOR Sensor[]; /*0x10 */
} MPI2_CONFIG_PAGE_IO_UNIT_8,
*PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t;
@@ -1259,12 +1236,9 @@ typedef struct _MPI2_IOUNIT9_SENSOR {
#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumSensors at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumSensors at runtime before using Sensor[].
*/
-#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
-#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
@@ -1273,8 +1247,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
U8 NumSensors; /*0x0C */
U8 Reserved4; /*0x0D */
U16 Reserved3; /*0x0E */
- MPI2_IOUNIT9_SENSOR
- Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */
+ MPI2_IOUNIT9_SENSOR Sensor[]; /*0x10 */
} MPI2_CONFIG_PAGE_IO_UNIT_9,
*PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t;
@@ -1294,12 +1267,9 @@ typedef struct _MPI2_IOUNIT10_FUNCTION {
*pMpi2IOUnit10Function_t;
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumFunctions at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumFunctions at runtime before using Function[].
*/
-#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
-#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
@@ -1308,8 +1278,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
U16 Reserved2; /*0x06 */
U32 Reserved3; /*0x08 */
U32 Reserved4; /*0x0C */
- MPI2_IOUNIT10_FUNCTION
- Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */
+ MPI2_IOUNIT10_FUNCTION Function[]; /*0x10 */
} MPI2_CONFIG_PAGE_IO_UNIT_10,
*PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t;
@@ -1764,12 +1733,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_3 {
/*BIOS Page 4 */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhys at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhys at runtime before using Phy[].
*/
-#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
-#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
-#endif
typedef struct _MPI2_BIOS4_ENTRY {
U64 ReassignmentWWID; /*0x00 */
@@ -1782,8 +1748,7 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_4 {
U8 NumPhys; /*0x04 */
U8 Reserved1; /*0x05 */
U16 Reserved2; /*0x06 */
- MPI2_BIOS4_ENTRY
- Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */
+ MPI2_BIOS4_ENTRY Phy[]; /*0x08 */
} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4,
Mpi2BiosPage4_t, *pMpi2BiosPage4_t;
@@ -1836,12 +1801,9 @@ typedef struct _MPI2_RAIDVOL0_SETTINGS {
#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhysDisks at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhysDisks at runtime before using PhysDisk[].
*/
-#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
-#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
@@ -1861,8 +1823,7 @@ typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 {
U8 Reserved2; /*0x25 */
U8 Reserved3; /*0x26 */
U8 InactiveStatus; /*0x27 */
- MPI2_RAIDVOL0_PHYS_DISK
- PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */
+ MPI2_RAIDVOL0_PHYS_DISK PhysDisk[]; /*0x28 */
} MPI2_CONFIG_PAGE_RAID_VOL_0,
*PTR_MPI2_CONFIG_PAGE_RAID_VOL_0,
Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t;
@@ -2045,12 +2006,9 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 {
/*RAID Physical Disk Page 1 */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhysDiskPaths at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhysDiskPaths at runtime before using PhysicalDiskPath[].
*/
-#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
-#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
-#endif
typedef struct _MPI2_RAIDPHYSDISK1_PATH {
U16 DevHandle; /*0x00 */
@@ -2075,8 +2033,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
U8 PhysDiskNum; /*0x05 */
U16 Reserved1; /*0x06 */
U32 Reserved2; /*0x08 */
- MPI2_RAIDPHYSDISK1_PATH
- PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */
+ MPI2_RAIDPHYSDISK1_PATH PhysicalDiskPath[]; /*0x0C */
} MPI2_CONFIG_PAGE_RD_PDISK_1,
*PTR_MPI2_CONFIG_PAGE_RD_PDISK_1,
Mpi2RaidPhysDiskPage1_t,
@@ -2221,12 +2178,9 @@ typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA {
*pMpi2SasIOUnit0PhyData_t;
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhys at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhys at runtime before using PhyData[].
*/
-#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
-#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -2234,8 +2188,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 {
U8 NumPhys; /*0x0C */
U8 Reserved2;/*0x0D */
U16 Reserved3;/*0x0E */
- MPI2_SAS_IO_UNIT0_PHY_DATA
- PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */
+ MPI2_SAS_IO_UNIT0_PHY_DATA PhyData[];/*0x10 */
} MPI2_CONFIG_PAGE_SASIOUNIT_0,
*PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0,
Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t;
@@ -2296,12 +2249,9 @@ typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA {
*pMpi2SasIOUnit1PhyData_t;
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhys at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhys at runtime before using PhyData[].
*/
-#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
-#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -2322,7 +2272,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
U8
IODeviceMissingDelay; /*0x13 */
MPI2_SAS_IO_UNIT1_PHY_DATA
- PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */
+ PhyData[]; /*0x14 */
} MPI2_CONFIG_PAGE_SASIOUNIT_1,
*PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1,
Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t;
@@ -2502,12 +2452,9 @@ typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhys at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhys at runtime before using SASPhyPowerManagementSettings[].
*/
-#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
-#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -2516,7 +2463,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
U16 Reserved2;/*0x0A */
U32 Reserved3;/*0x0C */
MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
- SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */
+ SASPhyPowerManagementSettings[]; /*0x10 */
} MPI2_CONFIG_PAGE_SASIOUNIT_5,
*PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t;
@@ -2554,12 +2501,9 @@ typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS {
#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumGroups at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumGroups at runtime before using PortWidthModulationGroupStatus[].
*/
-#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
-#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -2569,7 +2513,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 {
U8 Reserved3; /*0x11 */
U16 Reserved4; /*0x12 */
MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
- PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */
+ PortWidthModulationGroupStatus[]; /*0x14 */
} MPI2_CONFIG_PAGE_SASIOUNIT_6,
*PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t;
@@ -2597,12 +2541,9 @@ typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS {
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumGroups at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumGroups at runtime before using PortWidthModulationGroupSettings[].
*/
-#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
-#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -2615,7 +2556,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 {
U8 Reserved4; /*0x15 */
U16 Reserved5; /*0x16 */
MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
- PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */
+ PortWidthModulationGroupSettings[]; /*0x18 */
} MPI2_CONFIG_PAGE_SASIOUNIT_7,
*PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t;
@@ -3086,12 +3027,9 @@ typedef struct _MPI2_SASPHY2_PHY_EVENT {
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhyEvents at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhyEvents at runtime before using PhyEvent[].
*/
-#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
-#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER
@@ -3105,7 +3043,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 {
U16
Reserved3; /*0x0E */
MPI2_SASPHY2_PHY_EVENT
- PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */
+ PhyEvent[]; /*0x10 */
} MPI2_CONFIG_PAGE_SAS_PHY_2,
*PTR_MPI2_CONFIG_PAGE_SAS_PHY_2,
Mpi2SasPhyPage2_t,
@@ -3200,12 +3138,9 @@ typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhyEvents at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhyEvents at runtime before using PhyEventConfig[].
*/
-#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
-#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
-#endif
typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER
@@ -3219,7 +3154,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
U16
Reserved3; /*0x0E */
MPI2_SASPHY3_PHY_EVENT_CONFIG
- PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */
+ PhyEventConfig[]; /*0x10 */
} MPI2_CONFIG_PAGE_SAS_PHY_3,
*PTR_MPI2_CONFIG_PAGE_SAS_PHY_3,
Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t;
@@ -3358,12 +3293,9 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
/*Log Page 0 */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumLogEntries at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumLogEntries at runtime before using LogEntry[].
*/
-#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
-#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
-#endif
#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C)
@@ -3393,8 +3325,7 @@ typedef struct _MPI2_CONFIG_PAGE_LOG_0 {
U32 Reserved2; /*0x0C */
U16 NumLogEntries;/*0x10 */
U16 Reserved3; /*0x12 */
- MPI2_LOG_0_ENTRY
- LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */
+ MPI2_LOG_0_ENTRY LogEntry[]; /*0x14 */
} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0,
Mpi2LogPage0_t, *pMpi2LogPage0_t;
@@ -3408,12 +3339,9 @@ typedef struct _MPI2_CONFIG_PAGE_LOG_0 {
/*RAID Page 0 */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumElements at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumElements at runtime before using ConfigElement[].
*/
-#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
-#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
-#endif
typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT {
U16 ElementFlags; /*0x00 */
@@ -3446,8 +3374,7 @@ typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 {
U8 NumElements; /*0x2C */
U8 Reserved2; /*0x2D */
U16 Reserved3; /*0x2E */
- MPI2_RAIDCONFIG0_CONFIG_ELEMENT
- ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */
+ MPI2_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[];/*0x30 */
} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
*PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
Mpi2RaidConfigurationPage0_t,
@@ -3687,12 +3614,9 @@ typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA {
Mpi26PCIeIOUnit0PhyData_t, *pMpi26PCIeIOUnit0PhyData_t;
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhys at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhys at runtime before using PhyData[].
*/
-#ifndef MPI26_PCIE_IOUNIT0_PHY_MAX
-#define MPI26_PCIE_IOUNIT0_PHY_MAX (1)
-#endif
typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -3701,7 +3625,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 {
U8 InitStatus; /*0x0D */
U16 Reserved3; /*0x0E */
MPI26_PCIE_IO_UNIT0_PHY_DATA
- PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /*0x10 */
+ PhyData[]; /*0x10 */
} MPI26_CONFIG_PAGE_PIOUNIT_0,
*PTR_MPI26_CONFIG_PAGE_PIOUNIT_0,
Mpi26PCIeIOUnitPage0_t, *pMpi26PCIeIOUnitPage0_t;
@@ -3744,12 +3668,9 @@ typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA {
#define MPI26_PCIEIOUNIT1_LINKFLAGS_SRNS_EN (0x02)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumPhys at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumPhys at runtime before using PhyData[].
*/
-#ifndef MPI26_PCIE_IOUNIT1_PHY_MAX
-#define MPI26_PCIE_IOUNIT1_PHY_MAX (1)
-#endif
typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -3761,7 +3682,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
U8 DMDReportPCIe; /*0x11 */
U16 Reserved2; /*0x12 */
MPI26_PCIE_IO_UNIT1_PHY_DATA
- PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */
+ PhyData[]; /*0x14 */
} MPI26_CONFIG_PAGE_PIOUNIT_1,
*PTR_MPI26_CONFIG_PAGE_PIOUNIT_1,
Mpi26PCIeIOUnitPage1_t, *pMpi26PCIeIOUnitPage1_t;
@@ -3993,12 +3914,9 @@ typedef struct _MPI26_PCIELINK2_LINK_EVENT {
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumLinkEvents at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumLinkEvents at runtime before using LinkEvent[].
*/
-#ifndef MPI26_PCIELINK2_LINK_EVENT_MAX
-#define MPI26_PCIELINK2_LINK_EVENT_MAX (1)
-#endif
typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -4009,7 +3927,7 @@ typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 {
U8 Reserved3; /*0x0D */
U16 Reserved4; /*0x0E */
MPI26_PCIELINK2_LINK_EVENT
- LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /*0x10 */
+ LinkEvent[]; /*0x10 */
} MPI26_CONFIG_PAGE_PCIELINK_2, *PTR_MPI26_CONFIG_PAGE_PCIELINK_2,
Mpi26PcieLinkPage2_t, *pMpi26PcieLinkPage2_t;
@@ -4067,12 +3985,9 @@ typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG {
#define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001)
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check the value returned for NumLinkEvents at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check the value returned
+ *for NumLinkEvents at runtime before using LinkEventConfig[].
*/
-#ifndef MPI26_PCIELINK3_LINK_EVENT_MAX
-#define MPI26_PCIELINK3_LINK_EVENT_MAX (1)
-#endif
typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
@@ -4083,7 +3998,7 @@ typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 {
U8 Reserved3; /*0x0D */
U16 Reserved4; /*0x0E */
MPI26_PCIELINK3_LINK_EVENT_CONFIG
- LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /*0x10 */
+ LinkEventConfig[]; /*0x10 */
} MPI26_CONFIG_PAGE_PCIELINK_3, *PTR_MPI26_CONFIG_PAGE_PCIELINK_3,
Mpi26PcieLinkPage3_t, *pMpi26PcieLinkPage3_t;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
index 33b9c3a6f..798ab6e33 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -295,20 +295,9 @@ typedef struct _MPI2_EXT_IMAGE_HEADER {
/*FLASH Layout Extended Image Data */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check RegionsPerLayout at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check NumberOfLayouts and
+ *RegionsPerLayout at runtime before using Layout[] and Region[].
*/
-#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
-#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
-#endif
-
-/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumberOfLayouts at runtime.
- */
-#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
-#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
-#endif
typedef struct _MPI2_FLASH_REGION {
U8 RegionType; /*0x00 */
@@ -325,7 +314,7 @@ typedef struct _MPI2_FLASH_LAYOUT {
U32 Reserved1; /*0x04 */
U32 Reserved2; /*0x08 */
U32 Reserved3; /*0x0C */
- MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
+ MPI2_FLASH_REGION Region[]; /*0x10 */
} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
@@ -339,7 +328,7 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
U16 MinimumSectorAlignment; /*0x08 */
U16 Reserved3; /*0x0A */
U32 Reserved4; /*0x0C */
- MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
+ MPI2_FLASH_LAYOUT Layout[]; /*0x10 */
} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
@@ -373,12 +362,9 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
/*Supported Devices Extended Image Data */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumberOfDevices at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check NumberOfDevices at
+ *runtime before using SupportedDevice[].
*/
-#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
-#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
-#endif
typedef struct _MPI2_SUPPORTED_DEVICE {
U16 DeviceID; /*0x00 */
@@ -399,7 +385,7 @@ typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
U8 Reserved2; /*0x03 */
U32 Reserved3; /*0x04 */
MPI2_SUPPORTED_DEVICE
- SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
+ SupportedDevice[]; /*0x08 */
} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
@@ -464,7 +450,7 @@ typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
U8 EncryptionAlgorithm; /*0x02 */
U8 Reserved1; /*0x03 */
U32 Reserved2; /*0x04 */
- U32 EncryptedHash[1]; /*0x08 */ /* variable length */
+ U32 EncryptedHash[]; /*0x08 */
} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
@@ -508,7 +494,7 @@ typedef struct _MPI25_ENCRYPTED_HASH_DATA {
U8 NumHash; /*0x01 */
U16 Reserved1; /*0x02 */
U32 Reserved2; /*0x04 */
- MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /*0x08 */
+ MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[]; /*0x08 */
} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 2c5711517..d92852591 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -808,12 +808,9 @@ typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK {
/*Integrated RAID Configuration Change List Event data */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumElements at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check NumElements at
+ *runtime before using ConfigElement[].
*/
-#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT
-#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1)
-#endif
typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT {
U16 ElementFlags; /*0x00 */
@@ -848,7 +845,7 @@ typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST {
U8 ConfigNum; /*0x03 */
U32 Flags; /*0x04 */
MPI2_EVENT_IR_CONFIG_ELEMENT
- ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */
+ ConfigElement[];/*0x08 */
} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
*PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
Mpi2EventDataIrConfigChangeList_t,
@@ -969,12 +966,9 @@ typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW {
/*SAS Topology Change List Event data */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumEntries at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check NumEntries at
+ *runtime before using PHY[].
*/
-#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT
-#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1)
-#endif
typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY {
U16 AttachedDevHandle; /*0x00 */
@@ -994,7 +988,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST {
U8 ExpStatus; /*0x0A */
U8 PhysicalPort; /*0x0B */
MPI2_EVENT_SAS_TOPO_PHY_ENTRY
- PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */
+ PHY[]; /*0x0C */
} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
*PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
Mpi2EventDataSasTopologyChangeList_t,
@@ -1229,12 +1223,9 @@ typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION {
/*PCIe Topology Change List Event data (MPI v2.6 and later) */
/*
- *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
- *one and check NumEntries at runtime.
+ *Host code (drivers, BIOS, utilities, etc.) should check NumEntries at
+ *runtime before using PortEntry[].
*/
-#ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT
-#define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1)
-#endif
typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY {
U16 AttachedDevHandle; /*0x00 */
@@ -1286,7 +1277,7 @@ typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST {
U8 SwitchStatus; /*0x0A */
U8 PhysicalPort; /*0x0B */
MPI26_EVENT_PCIE_TOPO_PORT_ENTRY
- PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /*0x0C */
+ PortEntry[]; /*0x0C */
} MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
*PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
Mpi26EventDataPCIeTopologyChangeList_t,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index aa29e250c..b8120ca93 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4893,8 +4893,7 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
if (!num_phys)
return;
- sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
- sizeof(Mpi2SasIOUnit1PhyData_t));
+ sz = struct_size(sas_iounit_pg1, PhyData, num_phys);
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -5044,7 +5043,7 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage2_t trigger_pg2;
struct SL_WH_EVENT_TRIGGER_T *event_tg;
- MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
+ MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY *mpi_event_tg;
Mpi2ConfigReply_t mpi_reply;
int r = 0, i = 0;
u16 count = 0;
@@ -5096,7 +5095,7 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage3_t trigger_pg3;
struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
- MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
+ MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY *mpi_scsi_tg;
Mpi2ConfigReply_t mpi_reply;
int r = 0, i = 0;
u16 count = 0;
@@ -5148,7 +5147,7 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage4_t trigger_pg4;
struct SL_WH_MPI_TRIGGER_T *status_tg;
- MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY *mpi_status_tg;
Mpi2ConfigReply_t mpi_reply;
int r = 0, i = 0;
u16 count = 0;
@@ -5379,10 +5378,9 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2ConfigReply_t mpi_reply;
- Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasIOUnitPage1_t sas_iounit_pg1;
Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
u16 depth;
- int sz;
int rc = 0;
ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
@@ -5392,28 +5390,21 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->is_gen35_ioc)
goto out;
/* sas iounit page 1 */
- sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
- sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
- if (!sas_iounit_pg1) {
- pr_err("%s: failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return rc;
- }
rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
- sas_iounit_pg1, sz);
+ &sas_iounit_pg1, sizeof(Mpi2SasIOUnitPage1_t));
if (rc) {
pr_err("%s: failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
goto out;
}
- depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
+ depth = le16_to_cpu(sas_iounit_pg1.SASWideMaxQueueDepth);
ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
- depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
+ depth = le16_to_cpu(sas_iounit_pg1.SASNarrowMaxQueueDepth);
ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
- depth = sas_iounit_pg1->SATAMaxQDepth;
+ depth = sas_iounit_pg1.SATAMaxQDepth;
ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
/* pcie iounit page 1 */
@@ -5432,7 +5423,6 @@ out:
"MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
ioc->max_wideport_qd, ioc->max_narrowport_qd,
ioc->max_sata_qd, ioc->max_nvme_qd));
- kfree(sas_iounit_pg1);
return rc;
}
@@ -5588,6 +5578,7 @@ out:
static int
_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
{
+ Mpi2IOUnitPage8_t iounit_pg8;
Mpi2ConfigReply_t mpi_reply;
u32 iounit_pg1_flags;
int tg_flags = 0;
@@ -5684,7 +5675,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
if (rc)
return rc;
- rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &iounit_pg8);
if (rc)
return rc;
_base_display_ioc_capabilities(ioc);
@@ -5706,8 +5697,8 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
if (rc)
return rc;
- if (ioc->iounit_pg8.NumSensors)
- ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
+ if (iounit_pg8.NumSensors)
+ ioc->temp_sensors_count = iounit_pg8.NumSensors;
if (ioc->is_aero_ioc) {
rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
if (rc)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 1be0850ca..6d0bc8c66 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1237,7 +1237,6 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @ioc_pg8: static ioc page 8
* @iounit_pg0: static iounit page 0
* @iounit_pg1: static iounit page 1
- * @iounit_pg8: static iounit page 8
* @sas_hba: sas host object
* @sas_expander_list: expander object list
* @enclosure_list: enclosure object list
@@ -1465,7 +1464,6 @@ struct MPT3SAS_ADAPTER {
Mpi2IOCPage8_t ioc_pg8;
Mpi2IOUnitPage0_t iounit_pg0;
Mpi2IOUnitPage1_t iounit_pg1;
- Mpi2IOUnitPage8_t iounit_pg8;
Mpi2IOCPage1_t ioc_pg1_copy;
struct _boot_device req_boot_device;
@@ -1983,6 +1981,7 @@ extern const struct attribute_group *mpt3sas_host_groups[];
extern const struct attribute_group *mpt3sas_dev_groups[];
void mpt3sas_ctl_init(ushort hbas_to_enumerate);
void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
+void mpt3sas_ctl_release(struct MPT3SAS_ADAPTER *ioc);
u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index d114ef381..2e88f456f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -2334,7 +2334,7 @@ mpt3sas_config_update_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc,
tg_pg2.NumMPIEventTrigger = 0;
memset(&tg_pg2.MPIEventTriggers[0], 0,
NUM_VALID_ENTRIES * sizeof(
- MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY));
+ MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY));
}
rc = _config_set_driver_trigger_pg2(ioc, &mpi_reply, &tg_pg2);
@@ -2493,7 +2493,7 @@ mpt3sas_config_update_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc,
tg_pg3.NumSCSISenseTrigger = 0;
memset(&tg_pg3.SCSISenseTriggers[0], 0,
NUM_VALID_ENTRIES * sizeof(
- MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY));
+ MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY));
}
rc = _config_set_driver_trigger_pg3(ioc, &mpi_reply, &tg_pg3);
@@ -2649,7 +2649,7 @@ mpt3sas_config_update_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc,
tg_pg4.NumIOCStatusLogInfoTrigger = 0;
memset(&tg_pg4.IOCStatusLoginfoTriggers[0], 0,
NUM_VALID_ENTRIES * sizeof(
- MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY));
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY));
}
rc = _config_set_driver_trigger_pg4(ioc, &mpi_reply, &tg_pg4);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index efdb8178d..147cb7088 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -4157,31 +4157,37 @@ mpt3sas_ctl_init(ushort hbas_to_enumerate)
}
/**
- * mpt3sas_ctl_exit - exit point for ctl
- * @hbas_to_enumerate: ?
+ * mpt3sas_ctl_release - release dma for ctl
+ * @ioc: per adapter object
*/
void
-mpt3sas_ctl_exit(ushort hbas_to_enumerate)
+mpt3sas_ctl_release(struct MPT3SAS_ADAPTER *ioc)
{
- struct MPT3SAS_ADAPTER *ioc;
int i;
- list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ /* free memory associated to diag buffers */
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!ioc->diag_buffer[i])
+ continue;
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i],
+ ioc->diag_buffer_dma[i]);
+ ioc->diag_buffer[i] = NULL;
+ ioc->diag_buffer_status[i] = 0;
+ }
- /* free memory associated to diag buffers */
- for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
- if (!ioc->diag_buffer[i])
- continue;
- dma_free_coherent(&ioc->pdev->dev,
- ioc->diag_buffer_sz[i],
- ioc->diag_buffer[i],
- ioc->diag_buffer_dma[i]);
- ioc->diag_buffer[i] = NULL;
- ioc->diag_buffer_status[i] = 0;
- }
+ kfree(ioc->event_log);
+}
+
+/**
+ * mpt3sas_ctl_exit - exit point for ctl
+ * @hbas_to_enumerate: ?
+ */
+void
+mpt3sas_ctl_exit(ushort hbas_to_enumerate)
+{
- kfree(ioc->event_log);
- }
if (hbas_to_enumerate != 1)
misc_deregister(&ctl_dev);
if (hbas_to_enumerate != 2)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 605013d3e..51b5788da 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2431,8 +2431,7 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
}
raid_device->num_pds = num_pds;
- sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
- sizeof(Mpi2RaidVol0PhysDisk_t));
+ sz = struct_size(vol_pg0, PhysDisk, num_pds);
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
dfailprintk(ioc,
@@ -5966,8 +5965,7 @@ _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
/*
* Read SASIOUnitPage0 to get each HBA Phy's data.
*/
- sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
- (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -6145,8 +6143,7 @@ _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
u64 attached_sas_addr;
u8 found = 0, port_count = 0, port_id;
- sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
- * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -6579,8 +6576,7 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
(u64)ioc->sas_hba.sas_address));
- sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
- * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -6731,8 +6727,7 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
ioc->sas_hba.num_phys = num_phys;
/* sas_iounit page 0 */
- sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
- sizeof(Mpi2SasIOUnit0PhyData_t));
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -6754,8 +6749,7 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
}
/* sas_iounit page 1 */
- sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
- sizeof(Mpi2SasIOUnit1PhyData_t));
+ sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -10376,8 +10370,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
Mpi2ExpanderPage0_t expander_pg0;
Mpi2SasDevicePage0_t sas_device_pg0;
Mpi26PCIeDevicePage0_t pcie_device_pg0;
- Mpi2RaidVolPage1_t *volume_pg1;
- Mpi2RaidVolPage0_t *volume_pg0;
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
Mpi2RaidPhysDiskPage0_t pd_pg0;
Mpi2EventIrConfigElement_t element;
Mpi2ConfigReply_t mpi_reply;
@@ -10392,16 +10386,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
u8 retry_count;
unsigned long flags;
- volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
- if (!volume_pg0)
- return;
-
- volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
- if (!volume_pg1) {
- kfree(volume_pg0);
- return;
- }
-
ioc_info(ioc, "scan devices: start\n");
_scsih_sas_host_refresh(ioc);
@@ -10511,7 +10495,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
/* volumes */
handle = 0xFFFF;
while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
- volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
@@ -10519,15 +10503,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
- handle = le16_to_cpu(volume_pg1->DevHandle);
+ handle = le16_to_cpu(volume_pg1.DevHandle);
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_wwid(ioc,
- le64_to_cpu(volume_pg1->WWID));
+ le64_to_cpu(volume_pg1.WWID));
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (raid_device)
continue;
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
- volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t)))
continue;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
@@ -10537,17 +10521,17 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
- if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
- volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
- volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
- element.VolDevHandle = volume_pg1->DevHandle;
+ element.VolDevHandle = volume_pg1.DevHandle;
ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
- volume_pg1->DevHandle);
+ volume_pg1.DevHandle);
_scsih_sas_volume_add(ioc, &element);
ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
- volume_pg1->DevHandle);
+ volume_pg1.DevHandle);
}
}
@@ -10636,9 +10620,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
}
- kfree(volume_pg0);
- kfree(volume_pg1);
-
ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
ioc_info(ioc, "scan devices: complete\n");
}
@@ -11350,6 +11331,7 @@ static void scsih_remove(struct pci_dev *pdev)
}
mpt3sas_base_detach(ioc);
+ mpt3sas_ctl_release(ioc);
spin_lock(&gioc_lock);
list_del(&ioc->list);
spin_unlock(&gioc_lock);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e8a4750f6..421ea511b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1792,8 +1792,7 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
/* handle hba phys */
/* read sas_iounit page 0 */
- sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
- sizeof(Mpi2SasIOUnit0PhyData_t));
+ sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -1833,8 +1832,7 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
}
/* read sas_iounit page 1 */
- sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
- sizeof(Mpi2SasIOUnit1PhyData_t));
+ sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -1944,8 +1942,7 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
/* handle hba phys */
/* sas_iounit page 1 */
- sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
- sizeof(Mpi2SasIOUnit1PhyData_t));
+ sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h
index 5f3328f01..edb8fe709 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_pages.h
@@ -20,12 +20,12 @@
#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER (0xE0)
#define MPI26_DRIVER_TRIGGER_PAGE0_PAGEVERSION (0x01)
-typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0 {
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TRIGGER_0 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
U16 TriggerFlags; /* 0x08 */
U16 Reserved0xA; /* 0x0A */
U32 Reserved0xC[61]; /* 0x0C */
-} _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0, Mpi26DriverTriggerPage0_t;
+} _MPI26_CONFIG_PAGE_DRIVER_TRIGGER_0, Mpi26DriverTriggerPage0_t;
/* Trigger Flags */
#define MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID (0x0001)
@@ -34,61 +34,61 @@ typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_0 {
#define MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID (0x0008)
#define MPI26_DRIVER_TRIGGER_PAGE1_PAGEVERSION (0x01)
-typedef struct _MPI26_DRIVER_MASTER_TIGGER_ENTRY {
+typedef struct _MPI26_DRIVER_MASTER_TRIGGER_ENTRY {
U32 MasterTriggerFlags;
-} MPI26_DRIVER_MASTER_TIGGER_ENTRY;
+} MPI26_DRIVER_MASTER_TRIGGER_ENTRY;
#define MPI26_MAX_MASTER_TRIGGERS (1)
-typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_1 {
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TRIGGER_1 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
U16 NumMasterTrigger; /* 0x08 */
U16 Reserved0xA; /* 0x0A */
- MPI26_DRIVER_MASTER_TIGGER_ENTRY MasterTriggers[MPI26_MAX_MASTER_TRIGGERS]; /* 0x0C */
-} MPI26_CONFIG_PAGE_DRIVER_TIGGER_1, Mpi26DriverTriggerPage1_t;
+ MPI26_DRIVER_MASTER_TRIGGER_ENTRY MasterTriggers[MPI26_MAX_MASTER_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TRIGGER_1, Mpi26DriverTriggerPage1_t;
#define MPI26_DRIVER_TRIGGER_PAGE2_PAGEVERSION (0x01)
-typedef struct _MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY {
+typedef struct _MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY {
U16 MPIEventCode; /* 0x00 */
U16 MPIEventCodeSpecific; /* 0x02 */
-} MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY;
+} MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY;
#define MPI26_MAX_MPI_EVENT_TRIGGERS (20)
-typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_2 {
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TRIGGER_2 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
U16 NumMPIEventTrigger; /* 0x08 */
U16 Reserved0xA; /* 0x0A */
- MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY MPIEventTriggers[MPI26_MAX_MPI_EVENT_TRIGGERS]; /* 0x0C */
-} MPI26_CONFIG_PAGE_DRIVER_TIGGER_2, Mpi26DriverTriggerPage2_t;
+ MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY MPIEventTriggers[MPI26_MAX_MPI_EVENT_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TRIGGER_2, Mpi26DriverTriggerPage2_t;
#define MPI26_DRIVER_TRIGGER_PAGE3_PAGEVERSION (0x01)
-typedef struct _MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY {
+typedef struct _MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY {
U8 ASCQ; /* 0x00 */
U8 ASC; /* 0x01 */
U8 SenseKey; /* 0x02 */
U8 Reserved; /* 0x03 */
-} MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY;
+} MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY;
#define MPI26_MAX_SCSI_SENSE_TRIGGERS (20)
-typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_3 {
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TRIGGER_3 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
U16 NumSCSISenseTrigger; /* 0x08 */
U16 Reserved0xA; /* 0x0A */
- MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY SCSISenseTriggers[MPI26_MAX_SCSI_SENSE_TRIGGERS]; /* 0x0C */
-} MPI26_CONFIG_PAGE_DRIVER_TIGGER_3, Mpi26DriverTriggerPage3_t;
+ MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY SCSISenseTriggers[MPI26_MAX_SCSI_SENSE_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TRIGGER_3, Mpi26DriverTriggerPage3_t;
#define MPI26_DRIVER_TRIGGER_PAGE4_PAGEVERSION (0x01)
-typedef struct _MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY {
+typedef struct _MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY {
U16 IOCStatus; /* 0x00 */
U16 Reserved; /* 0x02 */
U32 LogInfo; /* 0x04 */
-} MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY;
+} MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY;
#define MPI26_MAX_LOGINFO_TRIGGERS (20)
-typedef struct _MPI26_CONFIG_PAGE_DRIVER_TIGGER_4 {
+typedef struct _MPI26_CONFIG_PAGE_DRIVER_TRIGGER_4 {
MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
U16 NumIOCStatusLogInfoTrigger; /* 0x08 */
U16 Reserved0xA; /* 0x0A */
- MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY IOCStatusLoginfoTriggers[MPI26_MAX_LOGINFO_TRIGGERS]; /* 0x0C */
-} MPI26_CONFIG_PAGE_DRIVER_TIGGER_4, Mpi26DriverTriggerPage4_t;
+ MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY IOCStatusLoginfoTriggers[MPI26_MAX_LOGINFO_TRIGGERS]; /* 0x0C */
+} MPI26_CONFIG_PAGE_DRIVER_TRIGGER_4, Mpi26DriverTriggerPage4_t;
#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index cc07ba41f..1d64e5056 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -141,8 +141,7 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
return;
}
- sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
- sizeof(Mpi2RaidVol0PhysDisk_t));
+ sz = struct_size(vol_pg0, PhysDisk, num_pds);
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n");
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
index 21d638299..e08a38e2a 100644
--- a/drivers/scsi/mvme16x_scsi.c
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -103,7 +103,7 @@ static int mvme16x_probe(struct platform_device *dev)
return -ENODEV;
}
-static int mvme16x_device_remove(struct platform_device *dev)
+static void mvme16x_device_remove(struct platform_device *dev)
{
struct Scsi_Host *host = platform_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
@@ -120,8 +120,6 @@ static int mvme16x_device_remove(struct platform_device *dev)
NCR_700_release(host);
kfree(hostdata);
free_irq(host->irq, host);
-
- return 0;
}
static struct platform_driver mvme16x_scsi_driver = {
@@ -129,7 +127,7 @@ static struct platform_driver mvme16x_scsi_driver = {
.name = "mvme16x-scsi",
},
.probe = mvme16x_probe,
- .remove = mvme16x_device_remove,
+ .remove_new = mvme16x_device_remove,
};
static int __init mvme16x_scsi_init(void)
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index ca2e932dd..f684eb5e0 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1775,9 +1775,9 @@ static ssize_t raid_state_show(struct device *dev,
name = myrb_devstate_name(ldev_info->state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->state);
} else {
struct myrb_pdev_state *pdev_info = sdev->hostdata;
@@ -1796,9 +1796,9 @@ static ssize_t raid_state_show(struct device *dev,
else
name = myrb_devstate_name(pdev_info->state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
pdev_info->state);
}
return ret;
@@ -1886,11 +1886,11 @@ static ssize_t raid_level_show(struct device *dev,
name = myrb_raidlevel_name(ldev_info->raid_level);
if (!name)
- return snprintf(buf, 32, "Invalid (%02X)\n",
+ return snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->state);
- return snprintf(buf, 32, "%s\n", name);
+ return snprintf(buf, 64, "%s\n", name);
}
- return snprintf(buf, 32, "Physical Drive\n");
+ return snprintf(buf, 64, "Physical Drive\n");
}
static DEVICE_ATTR_RO(raid_level);
@@ -1903,15 +1903,15 @@ static ssize_t rebuild_show(struct device *dev,
unsigned char status;
if (sdev->channel < myrb_logical_channel(sdev->host))
- return snprintf(buf, 32, "physical device - not rebuilding\n");
+ return snprintf(buf, 64, "physical device - not rebuilding\n");
status = myrb_get_rbld_progress(cb, &rbld_buf);
if (rbld_buf.ldev_num != sdev->id ||
status != MYRB_STATUS_SUCCESS)
- return snprintf(buf, 32, "not rebuilding\n");
+ return snprintf(buf, 64, "not rebuilding\n");
- return snprintf(buf, 32, "rebuilding block %u of %u\n",
+ return snprintf(buf, 64, "rebuilding block %u of %u\n",
rbld_buf.ldev_size - rbld_buf.blocks_left,
rbld_buf.ldev_size);
}
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index a1eec65a9..e824be9d9 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -947,9 +947,9 @@ static ssize_t raid_state_show(struct device *dev,
name = myrs_devstate_name(ldev_info->dev_state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->dev_state);
} else {
struct myrs_pdev_info *pdev_info;
@@ -958,9 +958,9 @@ static ssize_t raid_state_show(struct device *dev,
pdev_info = sdev->hostdata;
name = myrs_devstate_name(pdev_info->dev_state);
if (name)
- ret = snprintf(buf, 32, "%s\n", name);
+ ret = snprintf(buf, 64, "%s\n", name);
else
- ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ret = snprintf(buf, 64, "Invalid (%02X)\n",
pdev_info->dev_state);
}
return ret;
@@ -1066,13 +1066,13 @@ static ssize_t raid_level_show(struct device *dev,
ldev_info = sdev->hostdata;
name = myrs_raid_level_name(ldev_info->raid_level);
if (!name)
- return snprintf(buf, 32, "Invalid (%02X)\n",
+ return snprintf(buf, 64, "Invalid (%02X)\n",
ldev_info->dev_state);
} else
name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
- return snprintf(buf, 32, "%s\n", name);
+ return snprintf(buf, 64, "%s\n", name);
}
static DEVICE_ATTR_RO(raid_level);
@@ -1086,7 +1086,7 @@ static ssize_t rebuild_show(struct device *dev,
unsigned char status;
if (sdev->channel < cs->ctlr_info->physchan_present)
- return snprintf(buf, 32, "physical device - not rebuilding\n");
+ return snprintf(buf, 64, "physical device - not rebuilding\n");
ldev_info = sdev->hostdata;
ldev_num = ldev_info->ldev_num;
@@ -1098,11 +1098,11 @@ static ssize_t rebuild_show(struct device *dev,
return -EIO;
}
if (ldev_info->rbld_active) {
- return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+ return snprintf(buf, 64, "rebuilding block %zu of %zu\n",
(size_t)ldev_info->rbld_lba,
(size_t)ldev_info->cfg_devsize);
} else
- return snprintf(buf, 32, "not rebuilding\n");
+ return snprintf(buf, 64, "not rebuilding\n");
}
static ssize_t rebuild_store(struct device *dev,
@@ -1190,7 +1190,7 @@ static ssize_t consistency_check_show(struct device *dev,
unsigned short ldev_num;
if (sdev->channel < cs->ctlr_info->physchan_present)
- return snprintf(buf, 32, "physical device - not checking\n");
+ return snprintf(buf, 64, "physical device - not checking\n");
ldev_info = sdev->hostdata;
if (!ldev_info)
@@ -1198,11 +1198,11 @@ static ssize_t consistency_check_show(struct device *dev,
ldev_num = ldev_info->ldev_num;
myrs_get_ldev_info(cs, ldev_num, ldev_info);
if (ldev_info->cc_active)
- return snprintf(buf, 32, "checking block %zu of %zu\n",
+ return snprintf(buf, 64, "checking block %zu of %zu\n",
(size_t)ldev_info->cc_lba,
(size_t)ldev_info->cfg_devsize);
else
- return snprintf(buf, 32, "not checking\n");
+ return snprintf(buf, 64, "not checking\n");
}
static ssize_t consistency_check_store(struct device *dev,
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 26e6b3e3a..dcde55c8e 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1100,7 +1100,7 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
- if (pcnt > app_req.num_ports)
+ if (pcnt >= app_req.num_ports)
break;
app_reply->elem[pcnt].rekey_count =
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 931bdaeae..1e2f52210 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2889,7 +2889,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
static void
qla_trace_init(void)
{
- qla_trc_array = trace_array_get_by_name("qla2xxx");
+ qla_trc_array = trace_array_get_by_name("qla2xxx", NULL);
if (!qla_trc_array) {
ql_log(ql_log_fatal, NULL, 0x0001,
"Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 3b95f7a62..5d560d9b8 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1409,7 +1409,7 @@ fail_unlink:
return -ENODEV;
}
-static int qpti_sbus_remove(struct platform_device *op)
+static void qpti_sbus_remove(struct platform_device *op)
{
struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
@@ -1438,8 +1438,6 @@ static int qpti_sbus_remove(struct platform_device *op)
of_iounmap(&op->resource[0], qpti->sreg, sizeof(unsigned char));
scsi_host_put(qpti->qhost);
-
- return 0;
}
static const struct of_device_id qpti_match[] = {
@@ -1465,7 +1463,7 @@ static struct platform_driver qpti_sbus_driver = {
.of_match_table = qpti_match,
},
.probe = qpti_sbus_probe,
- .remove = qpti_sbus_remove,
+ .remove_new = qpti_sbus_remove,
};
module_platform_driver(qpti_sbus_driver);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6d8218a44..d03d66f11 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -339,7 +339,7 @@ struct sdebug_dev_info {
bool used;
/* For ZBC devices */
- enum blk_zoned_model zmodel;
+ bool zoned;
unsigned int zcap;
unsigned int zsize;
unsigned int zsize_shift;
@@ -844,8 +844,11 @@ static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
static bool sdebug_wp;
static bool sdebug_allow_restart;
-/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
-static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
+static enum {
+ BLK_ZONED_NONE = 0,
+ BLK_ZONED_HA = 1,
+ BLK_ZONED_HM = 2,
+} sdeb_zbc_model = BLK_ZONED_NONE;
static char *sdeb_zbc_model_s;
enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
@@ -1815,8 +1818,6 @@ static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
arr[1] = 1; /* non rotating medium (e.g. solid state) */
arr[2] = 0;
arr[3] = 5; /* less than 1.8" */
- if (devip->zmodel == BLK_ZONED_HA)
- arr[4] = 1 << 4; /* zoned field = 01b */
return 0x3c;
}
@@ -1883,7 +1884,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (! arr)
return DID_REQUEUE << 16;
is_disk = (sdebug_ptype == TYPE_DISK);
- is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ is_zbc = devip->zoned;
is_disk_zbc = (is_disk || is_zbc);
have_wlun = scsi_is_wlun(scp->device->lun);
if (have_wlun)
@@ -2195,7 +2196,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
* Since the scsi_debug READ CAPACITY implementation always reports the
* total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
*/
- if (devip->zmodel == BLK_ZONED_HM)
+ if (devip->zoned)
arr[12] |= 1 << 4;
arr[15] = sdebug_lowest_aligned & 0xff;
@@ -2648,7 +2649,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
msense_6 = (MODE_SENSE == cmd[0]);
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
- is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ is_zbc = devip->zoned;
if ((is_disk || is_zbc) && !dbd)
bd_len = llbaa ? 16 : 8;
else
@@ -3194,8 +3195,6 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
if (!write) {
- if (devip->zmodel == BLK_ZONED_HA)
- return 0;
/* For host-managed, reads cannot cross zone types boundaries */
if (zsp->z_type != zsp_end->z_type) {
mk_sense_buffer(scp, ILLEGAL_REQUEST,
@@ -5322,7 +5321,7 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
if (devip->zcap < devip->zsize)
devip->nr_zones += devip->nr_seq_zones;
- if (devip->zmodel == BLK_ZONED_HM) {
+ if (devip->zoned) {
/* zbc_max_open_zones can be 0, meaning "not reported" */
if (sdeb_zbc_max_open >= devip->nr_zones - 1)
devip->max_open = (devip->nr_zones - 1) / 2;
@@ -5347,7 +5346,7 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
zsp->z_size =
min_t(u64, devip->zsize, capacity - zstart);
} else if ((zstart & (devip->zsize - 1)) == 0) {
- if (devip->zmodel == BLK_ZONED_HM)
+ if (devip->zoned)
zsp->z_type = ZBC_ZTYPE_SWR;
else
zsp->z_type = ZBC_ZTYPE_SWP;
@@ -5390,13 +5389,13 @@ static struct sdebug_dev_info *sdebug_device_create(
}
devip->sdbg_host = sdbg_host;
if (sdeb_zbc_in_use) {
- devip->zmodel = sdeb_zbc_model;
+ devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
if (sdebug_device_create_zones(devip)) {
kfree(devip);
return NULL;
}
} else {
- devip->zmodel = BLK_ZONED_NONE;
+ devip->zoned = false;
}
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 43eff1107..612489afe 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -302,6 +302,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
int ret;
WARN_ON_ONCE(!shost->ehandler);
+ WARN_ON_ONCE(!test_bit(SCMD_STATE_INFLIGHT, &scmd->state));
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RECOVERY)) {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index df5ac03d5..189dfeb37 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -543,10 +543,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_queue_add_random(q))
add_disk_randomness(req->q->disk);
- if (!blk_rq_is_passthrough(req)) {
- WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
- cmd->flags &= ~SCMD_INITIALIZED;
- }
+ WARN_ON_ONCE(!blk_rq_is_passthrough(req) &&
+ !(cmd->flags & SCMD_INITIALIZED));
+ cmd->flags = 0;
/*
* Calling rcu_barrier() is not necessary here because the
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f76dbeade..35200a7a7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3117,7 +3117,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
struct request_queue *q = sdkp->disk->queue;
struct scsi_vpd *vpd;
u16 rot;
- u8 zoned;
rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb1);
@@ -3128,7 +3127,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
}
rot = get_unaligned_be16(&vpd->data[4]);
- zoned = (vpd->data[8] >> 4) & 3;
+ sdkp->zoned = (vpd->data[8] >> 4) & 3;
rcu_read_unlock();
if (rot == 1) {
@@ -3136,39 +3135,37 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
}
+
+#ifdef CONFIG_BLK_DEV_ZONED /* sd_probe rejects ZBD devices early otherwise */
if (sdkp->device->type == TYPE_ZBC) {
/*
- * Host-managed: Per ZBC and ZAC specifications, writes in
- * sequential write required zones of host-managed devices must
- * be aligned to the device physical block size.
+ * Host-managed.
+ */
+ disk_set_zoned(sdkp->disk);
+
+ /*
+ * Per ZBC and ZAC specifications, writes in sequential write
+ * required zones of host-managed devices must be aligned to
+ * the device physical block size.
*/
- disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
} else {
- sdkp->zoned = zoned;
- if (sdkp->zoned == 1) {
- /* Host-aware */
- disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
- } else {
- /* Regular disk or drive managed disk */
- disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
- }
+ /*
+ * Host-aware devices are treated as conventional.
+ */
+ WARN_ON_ONCE(blk_queue_is_zoned(q));
}
+#endif /* CONFIG_BLK_DEV_ZONED */
if (!sdkp->first_scan)
return;
- if (blk_queue_is_zoned(q)) {
- sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
- q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
- } else {
- if (sdkp->zoned == 1)
- sd_printk(KERN_NOTICE, sdkp,
- "Host-aware SMR disk used as regular disk\n");
- else if (sdkp->zoned == 2)
- sd_printk(KERN_NOTICE, sdkp,
- "Drive-managed SMR disk\n");
- }
+ if (blk_queue_is_zoned(q))
+ sd_printk(KERN_NOTICE, sdkp, "Host-managed zoned block device\n");
+ else if (sdkp->zoned == 1)
+ sd_printk(KERN_NOTICE, sdkp, "Host-aware SMR disk used as regular disk\n");
+ else if (sdkp->zoned == 2)
+ sd_printk(KERN_NOTICE, sdkp, "Drive-managed SMR disk\n");
}
/**
@@ -3526,7 +3523,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
} else {
q->limits.io_opt = 0;
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
- (sector_t)BLK_DEF_MAX_SECTORS);
+ (sector_t)BLK_DEF_MAX_SECTORS_CAP);
}
/*
@@ -3760,7 +3757,7 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
- put_device(&sdkp->disk_dev);
+ device_unregister(&sdkp->disk_dev);
put_disk(gd);
goto out;
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index a25215507..26af5ab7d 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -836,10 +836,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
/*
* For all zoned disks, initialize zone append emulation data if not
- * already done. This is necessary also for host-aware disks used as
- * regular disks due to the presence of partitions as these partitions
- * may be deleted and the disk zoned model changed back from
- * BLK_ZONED_NONE to BLK_ZONED_HA.
+ * already done.
*/
if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) {
ret = sd_zbc_init_disk(sdkp);
@@ -932,17 +929,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
sdkp->device->use_10_for_rw = 0;
sdkp->device->use_16_for_sync = 1;
- if (!blk_queue_is_zoned(q)) {
- /*
- * This can happen for a host aware disk with partitions.
- * The block device zone model was already cleared by
- * disk_set_zoned(). Only free the scsi disk zone
- * information and exit early.
- */
- sd_zbc_free_zone_info(sdkp);
- return 0;
- }
-
/* Check zoned block device characteristics (unconstrained reads) */
ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
if (ret)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 86210e4dd..b2d02daca 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -285,6 +285,7 @@ sg_open(struct inode *inode, struct file *filp)
int dev = iminor(inode);
int flags = filp->f_flags;
struct request_queue *q;
+ struct scsi_device *device;
Sg_device *sdp;
Sg_fd *sfp;
int retval;
@@ -301,11 +302,12 @@ sg_open(struct inode *inode, struct file *filp)
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
- retval = scsi_device_get(sdp->device);
+ device = sdp->device;
+ retval = scsi_device_get(device);
if (retval)
goto sg_put;
- retval = scsi_autopm_get_device(sdp->device);
+ retval = scsi_autopm_get_device(device);
if (retval)
goto sdp_put;
@@ -313,7 +315,7 @@ sg_open(struct inode *inode, struct file *filp)
* check if O_NONBLOCK. Permits SCSI commands to be issued
* during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
- scsi_block_when_processing_errors(sdp->device))) {
+ scsi_block_when_processing_errors(device))) {
retval = -ENXIO;
/* we are in error recovery for this device */
goto error_out;
@@ -344,7 +346,7 @@ sg_open(struct inode *inode, struct file *filp)
if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
- q = sdp->device->request_queue;
+ q = device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
sfp = sg_add_sfp(sdp);
@@ -370,10 +372,11 @@ out_undo:
error_mutex_locked:
mutex_unlock(&sdp->open_rel_lock);
error_out:
- scsi_autopm_put_device(sdp->device);
+ scsi_autopm_put_device(device);
sdp_put:
- scsi_device_put(sdp->device);
- goto sg_put;
+ kref_put(&sdp->d_ref, sg_device_destroy);
+ scsi_device_put(device);
+ return retval;
}
/* Release resources associated with a successful sg_open()
@@ -2207,6 +2210,7 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ struct scsi_device *device = sdp->device;
Sg_request *srp;
unsigned long iflags;
@@ -2232,8 +2236,8 @@ sg_remove_sfp_usercontext(struct work_struct *work)
"sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
- scsi_device_put(sdp->device);
kref_put(&sdp->d_ref, sg_device_destroy);
+ scsi_device_put(device);
module_put(THIS_MODULE);
}
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 88e2b5eb9..b0bef83db 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -291,7 +291,7 @@ out:
return err;
}
-static int sgiwd93_remove(struct platform_device *pdev)
+static void sgiwd93_remove(struct platform_device *pdev)
{
struct Scsi_Host *host = platform_get_drvdata(pdev);
struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata;
@@ -302,12 +302,11 @@ static int sgiwd93_remove(struct platform_device *pdev)
dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
DMA_TO_DEVICE);
scsi_host_put(host);
- return 0;
}
static struct platform_driver sgiwd93_driver = {
.probe = sgiwd93_probe,
- .remove = sgiwd93_remove,
+ .remove_new = sgiwd93_remove,
.driver = {
.name = "sgiwd93",
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 868453b18..385180c98 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.24-046"
+#define DRIVER_VERSION "2.1.26-030"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 24
-#define DRIVER_REVISION 46
+#define DRIVER_RELEASE 26
+#define DRIVER_REVISION 30
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 678651b9b..9df1c90a2 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -104,7 +104,7 @@ static int snirm710_probe(struct platform_device *dev)
return -ENODEV;
}
-static int snirm710_driver_remove(struct platform_device *dev)
+static void snirm710_driver_remove(struct platform_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
struct NCR_700_Host_Parameters *hostdata =
@@ -115,13 +115,11 @@ static int snirm710_driver_remove(struct platform_device *dev)
free_irq(host->irq, host);
iounmap(hostdata->base);
kfree(hostdata);
-
- return 0;
}
static struct platform_driver snirm710_driver = {
.probe = snirm710_probe,
- .remove = snirm710_driver_remove,
+ .remove_new = snirm710_driver_remove,
.driver = {
.name = "snirm_53c710",
},
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index abf229b84..4a8cc2e82 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -641,7 +641,7 @@ fail_alloc:
return error;
}
-static int __exit sun3_scsi_remove(struct platform_device *pdev)
+static void __exit sun3_scsi_remove(struct platform_device *pdev)
{
struct Scsi_Host *instance = platform_get_drvdata(pdev);
struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -654,11 +654,10 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev)
if (udc_regs)
dvma_free(udc_regs);
iounmap(ioaddr);
- return 0;
}
static struct platform_driver sun3_scsi_driver = {
- .remove = __exit_p(sun3_scsi_remove),
+ .remove_new = __exit_p(sun3_scsi_remove),
.driver = {
.name = DRV_MODULE_NAME,
},
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 30f67cbf4..09219c362 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -243,7 +243,7 @@ fail:
return err;
}
-static int esp_sun3x_remove(struct platform_device *dev)
+static void esp_sun3x_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
@@ -261,13 +261,11 @@ static int esp_sun3x_remove(struct platform_device *dev)
esp->command_block_dma);
scsi_host_put(esp->host);
-
- return 0;
}
static struct platform_driver esp_sun3x_driver = {
.probe = esp_sun3x_probe,
- .remove = esp_sun3x_remove,
+ .remove_new = esp_sun3x_remove,
.driver = {
.name = "sun3x_esp",
},
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index afa9d02a3..64a7c2c6c 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -550,7 +550,7 @@ static int esp_sbus_probe(struct platform_device *op)
return ret;
}
-static int esp_sbus_remove(struct platform_device *op)
+static void esp_sbus_remove(struct platform_device *op)
{
struct esp *esp = dev_get_drvdata(&op->dev);
struct platform_device *dma_of = esp->dma;
@@ -581,8 +581,6 @@ static int esp_sbus_remove(struct platform_device *op)
dev_set_drvdata(&op->dev, NULL);
put_device(&dma_of->dev);
-
- return 0;
}
static const struct of_device_id esp_match[] = {
@@ -605,7 +603,7 @@ static struct platform_driver esp_sbus_driver = {
.of_match_table = esp_match,
},
.probe = esp_sbus_probe,
- .remove = esp_sbus_remove,
+ .remove_new = esp_sbus_remove,
};
module_platform_driver(esp_sbus_driver);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 9d1bdcdc1..617eb892f 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -37,6 +37,11 @@
#define VIRTIO_SCSI_EVENT_LEN 8
#define VIRTIO_SCSI_VQ_BASE 2
+static unsigned int virtscsi_poll_queues;
+module_param(virtscsi_poll_queues, uint, 0644);
+MODULE_PARM_DESC(virtscsi_poll_queues,
+ "The number of dedicated virtqueues for polling I/O");
+
/* Command queue element */
struct virtio_scsi_cmd {
struct scsi_cmnd *sc;
@@ -76,6 +81,7 @@ struct virtio_scsi {
struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
u32 num_queues;
+ int io_queues[HCTX_MAX_TYPES];
struct hlist_node node;
@@ -182,8 +188,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
fn(vscsi, buf);
- if (unlikely(virtqueue_is_broken(vq)))
- break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
}
@@ -722,9 +726,49 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
static void virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ int i, qoff;
+
+ for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+ map->nr_queues = vscsi->io_queues[i];
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+
+ if (map->nr_queues == 0)
+ continue;
+
+ /*
+ * Regular queues have interrupts and hence CPU affinity is
+ * defined by the core virtio code, but polling queues have
+ * no interrupts so we let the block layer assign CPU affinity.
+ */
+ if (i == HCTX_TYPE_POLL)
+ blk_mq_map_queues(map);
+ else
+ blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
+ }
+}
+
+static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct virtio_scsi *vscsi = shost_priv(shost);
+ struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num];
+ unsigned long flags;
+ unsigned int len;
+ int found = 0;
+ void *buf;
+
+ spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
+
+ while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) {
+ virtscsi_complete_cmd(vscsi, buf);
+ found++;
+ }
+
+ spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
- blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+ return found;
}
static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
@@ -751,6 +795,7 @@ static const struct scsi_host_template virtscsi_host_template = {
.this_id = -1,
.cmd_size = sizeof(struct virtio_scsi_cmd),
.queuecommand = virtscsi_queuecommand,
+ .mq_poll = virtscsi_mq_poll,
.commit_rqs = virtscsi_commit_rqs,
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
@@ -795,13 +840,14 @@ static int virtscsi_init(struct virtio_device *vdev,
{
int err;
u32 i;
- u32 num_vqs;
+ u32 num_vqs, num_poll_vqs, num_req_vqs;
vq_callback_t **callbacks;
const char **names;
struct virtqueue **vqs;
struct irq_affinity desc = { .pre_vectors = 2 };
- num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
+ num_req_vqs = vscsi->num_queues;
+ num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE;
vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
GFP_KERNEL);
@@ -812,15 +858,31 @@ static int virtscsi_init(struct virtio_device *vdev,
goto out;
}
+ num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues,
+ num_req_vqs - 1);
+ vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs;
+ vscsi->io_queues[HCTX_TYPE_READ] = 0;
+ vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+ dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+ vscsi->io_queues[HCTX_TYPE_DEFAULT],
+ vscsi->io_queues[HCTX_TYPE_READ],
+ vscsi->io_queues[HCTX_TYPE_POLL]);
+
callbacks[0] = virtscsi_ctrl_done;
callbacks[1] = virtscsi_event_done;
names[0] = "control";
names[1] = "event";
- for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
+ for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) {
callbacks[i] = virtscsi_req_done;
names[i] = "request";
}
+ for (; i < num_vqs; i++) {
+ callbacks[i] = NULL;
+ names[i] = "request_poll";
+ }
+
/* Discover virtqueues and write information to configuration. */
err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
if (err)
@@ -874,6 +936,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
shost->sg_tablesize = sg_elems;
+ shost->nr_maps = 1;
vscsi = shost_priv(shost);
vscsi->vdev = vdev;
vscsi->num_queues = num_queues;
@@ -883,6 +946,9 @@ static int virtscsi_probe(struct virtio_device *vdev)
if (err)
goto virtscsi_init_failed;
+ if (vscsi->io_queues[HCTX_TYPE_POLL])
+ shost->nr_maps = HCTX_TYPE_POLL + 1;
+
shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index e05473c5c..16018009a 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -59,6 +59,7 @@ struct maple_device_specify {
static bool checked[MAPLE_PORTS];
static bool empty[MAPLE_PORTS];
static struct maple_device *baseunits[MAPLE_PORTS];
+static const struct bus_type maple_bus_type;
/**
* maple_driver_register - register a maple driver
@@ -773,11 +774,10 @@ static struct maple_driver maple_unsupported_device = {
/*
* maple_bus_type - core maple bus structure
*/
-struct bus_type maple_bus_type = {
+static const struct bus_type maple_bus_type = {
.name = "maple",
.match = maple_match_bus_driver,
};
-EXPORT_SYMBOL_GPL(maple_bus_type);
static struct device maple_bus = {
.init_name = "maple",
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 10a9ff84f..5d924e946 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -22,7 +22,6 @@ source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
source "drivers/soc/samsung/Kconfig"
-source "drivers/soc/sifive/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 0706a27d1..ba8f5b546 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -28,7 +28,6 @@ obj-y += qcom/
obj-y += renesas/
obj-y += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
-obj-y += sifive/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index eff486a77..6388cbe1e 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -4,9 +4,22 @@ if ARCH_APPLE || COMPILE_TEST
menu "Apple SoC drivers"
+config APPLE_MAILBOX
+ tristate "Apple SoC mailboxes"
+ depends on PM
+ depends on ARCH_APPLE || (64BIT && COMPILE_TEST)
+ default ARCH_APPLE
+ help
+ Apple SoCs have various co-processors required for certain
+ peripherals to work (NVMe, display controller, etc.). This
+ driver adds support for the mailbox controller used to
+ communicate with those.
+
+ Say Y here if you have an Apple SoC.
+
config APPLE_RTKIT
tristate "Apple RTKit co-processor IPC protocol"
- depends on MAILBOX
+ depends on APPLE_MAILBOX
depends on ARCH_APPLE || COMPILE_TEST
default ARCH_APPLE
help
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
index b241e6a65..4d9ab8f30 100644
--- a/drivers/soc/apple/Makefile
+++ b/drivers/soc/apple/Makefile
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
+apple-mailbox-y = mailbox.o
+
obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
apple-rtkit-y = rtkit.o rtkit-crashlog.o
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
new file mode 100644
index 000000000..49a0955e8
--- /dev/null
+++ b/drivers/soc/apple/mailbox.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple mailbox driver
+ *
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver adds support for two mailbox variants (called ASC and M3 by
+ * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
+ * exchange 64+32 bit messages between the main CPU and a co-processor.
+ * Various coprocessors implement different IPC protocols based on these simple
+ * messages and shared memory buffers.
+ *
+ * Both the main CPU and the co-processor see the same set of registers but
+ * the first FIFO (A2I) is always used to transfer messages from the application
+ * processor (us) to the I/O processor and the second one (I2A) for the
+ * other direction.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "mailbox.h"
+
+#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
+#define APPLE_ASC_MBOX_A2I_SEND0 0x800
+#define APPLE_ASC_MBOX_A2I_SEND1 0x808
+#define APPLE_ASC_MBOX_A2I_RECV0 0x810
+#define APPLE_ASC_MBOX_A2I_RECV1 0x818
+
+#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
+#define APPLE_ASC_MBOX_I2A_SEND0 0x820
+#define APPLE_ASC_MBOX_I2A_SEND1 0x828
+#define APPLE_ASC_MBOX_I2A_RECV0 0x830
+#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+
+#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_M3_MBOX_A2I_CONTROL 0x50
+#define APPLE_M3_MBOX_A2I_SEND0 0x60
+#define APPLE_M3_MBOX_A2I_SEND1 0x68
+#define APPLE_M3_MBOX_A2I_RECV0 0x70
+#define APPLE_M3_MBOX_A2I_RECV1 0x78
+
+#define APPLE_M3_MBOX_I2A_CONTROL 0x80
+#define APPLE_M3_MBOX_I2A_SEND0 0x90
+#define APPLE_M3_MBOX_I2A_SEND1 0x98
+#define APPLE_M3_MBOX_I2A_RECV0 0xa0
+#define APPLE_M3_MBOX_I2A_RECV1 0xa8
+
+#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
+#define APPLE_M3_MBOX_IRQ_ACK 0x4c
+#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
+#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
+#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
+#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
+
+#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
+#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
+#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
+#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
+#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
+
+#define APPLE_MBOX_TX_TIMEOUT 500
+
+struct apple_mbox_hw {
+ unsigned int control_full;
+ unsigned int control_empty;
+
+ unsigned int a2i_control;
+ unsigned int a2i_send0;
+ unsigned int a2i_send1;
+
+ unsigned int i2a_control;
+ unsigned int i2a_recv0;
+ unsigned int i2a_recv1;
+
+ bool has_irq_controls;
+ unsigned int irq_enable;
+ unsigned int irq_ack;
+ unsigned int irq_bit_recv_not_empty;
+ unsigned int irq_bit_send_empty;
+};
+
+int apple_mbox_send(struct apple_mbox *mbox, const struct apple_mbox_msg msg,
+ bool atomic)
+{
+ unsigned long flags;
+ int ret;
+ u32 mbox_ctrl;
+ long t;
+
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->a2i_control);
+
+ while (mbox_ctrl & mbox->hw->control_full) {
+ if (atomic) {
+ ret = readl_poll_timeout_atomic(
+ mbox->regs + mbox->hw->a2i_control, mbox_ctrl,
+ !(mbox_ctrl & mbox->hw->control_full), 100,
+ APPLE_MBOX_TX_TIMEOUT * 1000);
+
+ if (ret) {
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+ return ret;
+ }
+
+ break;
+ }
+ /*
+ * The interrupt is level triggered and will keep firing as long as the
+ * FIFO is empty. It will also keep firing if the FIFO was empty
+ * at any point in the past until it has been acknowledged at the
+ * mailbox level. By acknowledging it here we can ensure that we will
+ * only get the interrupt once the FIFO has been cleared again.
+ * If the FIFO is already empty before the ack it will fire again
+ * immediately after the ack.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_send_empty,
+ mbox->regs + mbox->hw->irq_ack);
+ }
+ enable_irq(mbox->irq_send_empty);
+ reinit_completion(&mbox->tx_empty);
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+
+ t = wait_for_completion_interruptible_timeout(
+ &mbox->tx_empty,
+ msecs_to_jiffies(APPLE_MBOX_TX_TIMEOUT));
+ if (t < 0)
+ return t;
+ else if (t == 0)
+ return -ETIMEDOUT;
+
+ spin_lock_irqsave(&mbox->tx_lock, flags);
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->a2i_control);
+ }
+
+ writeq_relaxed(msg.msg0, mbox->regs + mbox->hw->a2i_send0);
+ writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg.msg1),
+ mbox->regs + mbox->hw->a2i_send1);
+
+ spin_unlock_irqrestore(&mbox->tx_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(apple_mbox_send);
+
+static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
+{
+ struct apple_mbox *mbox = data;
+
+ /*
+ * We don't need to acknowledge the interrupt at the mailbox level
+ * here even if supported by the hardware. It will keep firing but that
+ * doesn't matter since it's disabled at the main interrupt controller.
+ * apple_mbox_send will acknowledge it before enabling
+ * it at the main controller again.
+ */
+ spin_lock(&mbox->tx_lock);
+ disable_irq_nosync(mbox->irq_send_empty);
+ complete(&mbox->tx_empty);
+ spin_unlock(&mbox->tx_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int apple_mbox_poll_locked(struct apple_mbox *mbox)
+{
+ struct apple_mbox_msg msg;
+ int ret = 0;
+
+ u32 mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->i2a_control);
+
+ while (!(mbox_ctrl & mbox->hw->control_empty)) {
+ msg.msg0 = readq_relaxed(mbox->regs + mbox->hw->i2a_recv0);
+ msg.msg1 = FIELD_GET(
+ APPLE_MBOX_MSG1_MSG,
+ readq_relaxed(mbox->regs + mbox->hw->i2a_recv1));
+
+ mbox->rx(mbox, msg, mbox->cookie);
+ ret++;
+ mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->i2a_control);
+ }
+
+ /*
+ * The interrupt will keep firing even if there are no more messages
+ * unless we also acknowledge it at the mailbox level here.
+ * There's no race if a message comes in between the check in the while
+ * loop above and the ack below: If a new messages arrives inbetween
+ * those two the interrupt will just fire again immediately after the
+ * ack since it's level triggered.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_recv_not_empty,
+ mbox->regs + mbox->hw->irq_ack);
+ }
+
+ return ret;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *mbox = data;
+
+ spin_lock(&mbox->rx_lock);
+ apple_mbox_poll_locked(mbox);
+ spin_unlock(&mbox->rx_lock);
+
+ return IRQ_HANDLED;
+}
+
+int apple_mbox_poll(struct apple_mbox *mbox)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mbox->rx_lock, flags);
+ ret = apple_mbox_poll_locked(mbox);
+ spin_unlock_irqrestore(&mbox->rx_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(apple_mbox_poll);
+
+int apple_mbox_start(struct apple_mbox *mbox)
+{
+ int ret;
+
+ if (mbox->active)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(mbox->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Only some variants of this mailbox HW provide interrupt control
+ * at the mailbox level. We therefore need to handle enabling/disabling
+ * interrupts at the main interrupt controller anyway for hardware that
+ * doesn't. Just always keep the interrupts we care about enabled at
+ * the mailbox level so that both hardware revisions behave almost
+ * the same.
+ */
+ if (mbox->hw->has_irq_controls) {
+ writel_relaxed(mbox->hw->irq_bit_recv_not_empty |
+ mbox->hw->irq_bit_send_empty,
+ mbox->regs + mbox->hw->irq_enable);
+ }
+
+ enable_irq(mbox->irq_recv_not_empty);
+ mbox->active = true;
+ return 0;
+}
+EXPORT_SYMBOL(apple_mbox_start);
+
+void apple_mbox_stop(struct apple_mbox *mbox)
+{
+ if (!mbox->active)
+ return;
+
+ mbox->active = false;
+ disable_irq(mbox->irq_recv_not_empty);
+ pm_runtime_mark_last_busy(mbox->dev);
+ pm_runtime_put_autosuspend(mbox->dev);
+}
+EXPORT_SYMBOL(apple_mbox_stop);
+
+struct apple_mbox *apple_mbox_get(struct device *dev, int index)
+{
+ struct of_phandle_args args;
+ struct platform_device *pdev;
+ struct apple_mbox *mbox;
+ int ret;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &args);
+ if (ret || !args.np)
+ return ERR_PTR(ret);
+
+ pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ mbox = platform_get_drvdata(pdev);
+ if (!mbox)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER))
+ return ERR_PTR(-ENODEV);
+
+ return mbox;
+}
+EXPORT_SYMBOL(apple_mbox_get);
+
+struct apple_mbox *apple_mbox_get_byname(struct device *dev, const char *name)
+{
+ int index;
+
+ index = of_property_match_string(dev->of_node, "mbox-names", name);
+ if (index < 0)
+ return ERR_PTR(index);
+
+ return apple_mbox_get(dev, index);
+}
+EXPORT_SYMBOL(apple_mbox_get_byname);
+
+static int apple_mbox_probe(struct platform_device *pdev)
+{
+ int ret;
+ char *irqname;
+ struct apple_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = &pdev->dev;
+ mbox->hw = of_device_get_match_data(dev);
+ if (!mbox->hw)
+ return -EINVAL;
+
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs))
+ return PTR_ERR(mbox->regs);
+
+ mbox->irq_recv_not_empty =
+ platform_get_irq_byname(pdev, "recv-not-empty");
+ if (mbox->irq_recv_not_empty < 0)
+ return -ENODEV;
+
+ mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
+ if (mbox->irq_send_empty < 0)
+ return -ENODEV;
+
+ spin_lock_init(&mbox->rx_lock);
+ spin_lock_init(&mbox->tx_lock);
+ init_completion(&mbox->tx_empty);
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_recv_not_empty,
+ apple_mbox_recv_irq,
+ IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox);
+ if (ret)
+ return ret;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_send_empty,
+ apple_mbox_send_empty_irq,
+ IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mbox);
+ return 0;
+}
+
+static const struct apple_mbox_hw apple_mbox_asc_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
+static const struct apple_mbox_hw apple_mbox_m3_hw = {
+ .control_full = APPLE_M3_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
+
+ .has_irq_controls = true,
+ .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
+ .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
+ .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
+ .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
+};
+
+static const struct of_device_id apple_mbox_of_match[] = {
+ { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
+
+static struct platform_driver apple_mbox_driver = {
+ .driver = {
+ .name = "apple-mailbox",
+ .of_match_table = apple_mbox_of_match,
+ },
+ .probe = apple_mbox_probe,
+};
+module_platform_driver(apple_mbox_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/soc/apple/mailbox.h b/drivers/soc/apple/mailbox.h
new file mode 100644
index 000000000..f73a8913d
--- /dev/null
+++ b/drivers/soc/apple/mailbox.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple mailbox message format
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#ifndef _APPLE_MAILBOX_H_
+#define _APPLE_MAILBOX_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/* encodes a single 96bit message sent over the single channel */
+struct apple_mbox_msg {
+ u64 msg0;
+ u32 msg1;
+};
+
+struct apple_mbox {
+ struct device *dev;
+ void __iomem *regs;
+ const struct apple_mbox_hw *hw;
+ bool active;
+
+ int irq_recv_not_empty;
+ int irq_send_empty;
+
+ spinlock_t rx_lock;
+ spinlock_t tx_lock;
+
+ struct completion tx_empty;
+
+ /** Receive callback for incoming messages */
+ void (*rx)(struct apple_mbox *mbox, struct apple_mbox_msg msg, void *cookie);
+ void *cookie;
+};
+
+struct apple_mbox *apple_mbox_get(struct device *dev, int index);
+struct apple_mbox *apple_mbox_get_byname(struct device *dev, const char *name);
+
+int apple_mbox_start(struct apple_mbox *mbox);
+void apple_mbox_stop(struct apple_mbox *mbox);
+int apple_mbox_poll(struct apple_mbox *mbox);
+int apple_mbox_send(struct apple_mbox *mbox, struct apple_mbox_msg msg,
+ bool atomic);
+
+#endif
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
index 24bd619ec..27c9fa745 100644
--- a/drivers/soc/apple/rtkit-internal.h
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -7,18 +7,17 @@
#ifndef _APPLE_RTKIT_INTERAL_H
#define _APPLE_RTKIT_INTERAL_H
-#include <linux/apple-mailbox.h>
#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/soc/apple/rtkit.h>
#include <linux/workqueue.h>
+#include "mailbox.h"
#define APPLE_RTKIT_APP_ENDPOINT_START 0x20
#define APPLE_RTKIT_MAX_ENDPOINTS 0x100
@@ -28,10 +27,7 @@ struct apple_rtkit {
const struct apple_rtkit_ops *ops;
struct device *dev;
- const char *mbox_name;
- int mbox_idx;
- struct mbox_client mbox_cl;
- struct mbox_chan *mbox_chan;
+ struct apple_mbox *mbox;
struct completion epmap_completion;
struct completion iop_pwr_ack_completion;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index d9f19dc99..e6d940292 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -72,11 +72,6 @@ enum {
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
-struct apple_rtkit_msg {
- struct completion *completion;
- struct apple_mbox_msg mbox_msg;
-};
-
struct apple_rtkit_rx_work {
struct apple_rtkit *rtk;
u8 ep;
@@ -550,12 +545,12 @@ static void apple_rtkit_rx_work(struct work_struct *work)
kfree(rtk_work);
}
-static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
+static void apple_rtkit_rx(struct apple_mbox *mbox, struct apple_mbox_msg msg,
+ void *cookie)
{
- struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl);
- struct apple_mbox_msg *msg = mssg;
+ struct apple_rtkit *rtk = cookie;
struct apple_rtkit_rx_work *work;
- u8 ep = msg->msg1;
+ u8 ep = msg.msg1;
/*
* The message was read from a MMIO FIFO and we have to make
@@ -571,7 +566,7 @@ static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
rtk->ops->recv_message_early &&
- rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0))
+ rtk->ops->recv_message_early(rtk->cookie, ep, msg.msg0))
return;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
@@ -580,30 +575,18 @@ static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
work->rtk = rtk;
work->ep = ep;
- work->msg = msg->msg0;
+ work->msg = msg.msg0;
INIT_WORK(&work->work, apple_rtkit_rx_work);
queue_work(rtk->wq, &work->work);
}
-static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r)
-{
- struct apple_rtkit_msg *msg =
- container_of(mssg, struct apple_rtkit_msg, mbox_msg);
-
- if (r == -ETIME)
- return;
-
- if (msg->completion)
- complete(msg->completion);
- kfree(msg);
-}
-
int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
struct completion *completion, bool atomic)
{
- struct apple_rtkit_msg *msg;
- int ret;
- gfp_t flags;
+ struct apple_mbox_msg msg = {
+ .msg0 = message,
+ .msg1 = ep,
+ };
if (rtk->crashed)
return -EINVAL;
@@ -611,19 +594,6 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
!apple_rtkit_is_running(rtk))
return -EINVAL;
- if (atomic)
- flags = GFP_ATOMIC;
- else
- flags = GFP_KERNEL;
-
- msg = kzalloc(sizeof(*msg), flags);
- if (!msg)
- return -ENOMEM;
-
- msg->mbox_msg.msg0 = message;
- msg->mbox_msg.msg1 = ep;
- msg->completion = completion;
-
/*
* The message will be sent with a MMIO write. We need the barrier
* here to ensure any previous writes to buffers are visible to the
@@ -631,51 +601,13 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
*/
dma_wmb();
- ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg);
- if (ret < 0) {
- kfree(msg);
- return ret;
- }
-
- return 0;
+ return apple_mbox_send(rtk->mbox, msg, atomic);
}
EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
-int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
- unsigned long timeout, bool atomic)
-{
- DECLARE_COMPLETION_ONSTACK(completion);
- int ret;
- long t;
-
- ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
- if (ret < 0)
- return ret;
-
- if (atomic) {
- ret = mbox_flush(rtk->mbox_chan, timeout);
- if (ret < 0)
- return ret;
-
- if (try_wait_for_completion(&completion))
- return 0;
-
- return -ETIME;
- } else {
- t = wait_for_completion_interruptible_timeout(
- &completion, msecs_to_jiffies(timeout));
- if (t < 0)
- return t;
- else if (t == 0)
- return -ETIME;
- return 0;
- }
-}
-EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
-
int apple_rtkit_poll(struct apple_rtkit *rtk)
{
- return mbox_client_peek_data(rtk->mbox_chan);
+ return apple_mbox_poll(rtk->mbox);
}
EXPORT_SYMBOL_GPL(apple_rtkit_poll);
@@ -697,20 +629,6 @@ int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
}
EXPORT_SYMBOL_GPL(apple_rtkit_start_ep);
-static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
-{
- if (rtk->mbox_name)
- rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl,
- rtk->mbox_name);
- else
- rtk->mbox_chan =
- mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx);
-
- if (IS_ERR(rtk->mbox_chan))
- return PTR_ERR(rtk->mbox_chan);
- return 0;
-}
-
struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
const char *mbox_name, int mbox_idx,
const struct apple_rtkit_ops *ops)
@@ -736,13 +654,18 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
- rtk->mbox_name = mbox_name;
- rtk->mbox_idx = mbox_idx;
- rtk->mbox_cl.dev = dev;
- rtk->mbox_cl.tx_block = false;
- rtk->mbox_cl.knows_txdone = false;
- rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
- rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
+ if (mbox_name)
+ rtk->mbox = apple_mbox_get_byname(dev, mbox_name);
+ else
+ rtk->mbox = apple_mbox_get(dev, mbox_idx);
+
+ if (IS_ERR(rtk->mbox)) {
+ ret = PTR_ERR(rtk->mbox);
+ goto free_rtk;
+ }
+
+ rtk->mbox->rx = apple_rtkit_rx;
+ rtk->mbox->cookie = rtk;
rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
dev_name(rtk->dev));
@@ -751,7 +674,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
goto free_rtk;
}
- ret = apple_rtkit_request_mbox_chan(rtk);
+ ret = apple_mbox_start(rtk->mbox);
if (ret)
goto destroy_wq;
@@ -782,7 +705,7 @@ static int apple_rtkit_wait_for_completion(struct completion *c)
int apple_rtkit_reinit(struct apple_rtkit *rtk)
{
/* make sure we don't handle any messages while reinitializing */
- mbox_free_channel(rtk->mbox_chan);
+ apple_mbox_stop(rtk->mbox);
flush_workqueue(rtk->wq);
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
@@ -806,7 +729,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF;
rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF;
- return apple_rtkit_request_mbox_chan(rtk);
+ return apple_mbox_start(rtk->mbox);
}
EXPORT_SYMBOL_GPL(apple_rtkit_reinit);
@@ -962,7 +885,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_wake);
void apple_rtkit_free(struct apple_rtkit *rtk)
{
- mbox_free_channel(rtk->mbox_chan);
+ apple_mbox_stop(rtk->mbox);
destroy_workqueue(rtk->wq);
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index 2312152a4..f498db9ab 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -166,7 +166,7 @@
struct qmc_xfer_desc {
union {
void (*tx_complete)(void *context);
- void (*rx_complete)(void *context, size_t length);
+ void (*rx_complete)(void *context, size_t length, unsigned int flags);
};
void *context;
};
@@ -177,7 +177,10 @@ struct qmc_chan {
struct qmc *qmc;
void __iomem *s_param;
enum qmc_mode mode;
+ spinlock_t ts_lock; /* Protect timeslots */
+ u64 tx_ts_mask_avail;
u64 tx_ts_mask;
+ u64 rx_ts_mask_avail;
u64 rx_ts_mask;
bool is_reverse_data;
@@ -214,41 +217,47 @@ struct qmc {
u16 __iomem *int_curr;
dma_addr_t int_dma_addr;
size_t int_size;
+ bool is_tsa_64rxtx;
struct list_head chan_head;
struct qmc_chan *chans[64];
};
-static inline void qmc_write16(void __iomem *addr, u16 val)
+static void qmc_write16(void __iomem *addr, u16 val)
{
iowrite16be(val, addr);
}
-static inline u16 qmc_read16(void __iomem *addr)
+static u16 qmc_read16(void __iomem *addr)
{
return ioread16be(addr);
}
-static inline void qmc_setbits16(void __iomem *addr, u16 set)
+static void qmc_setbits16(void __iomem *addr, u16 set)
{
qmc_write16(addr, qmc_read16(addr) | set);
}
-static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
+static void qmc_clrbits16(void __iomem *addr, u16 clr)
{
qmc_write16(addr, qmc_read16(addr) & ~clr);
}
-static inline void qmc_write32(void __iomem *addr, u32 val)
+static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
+{
+ qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
+}
+
+static void qmc_write32(void __iomem *addr, u32 val)
{
iowrite32be(val, addr);
}
-static inline u32 qmc_read32(void __iomem *addr)
+static u32 qmc_read32(void __iomem *addr)
{
return ioread32be(addr);
}
-static inline void qmc_setbits32(void __iomem *addr, u32 set)
+static void qmc_setbits32(void __iomem *addr, u32 set)
{
qmc_write32(addr, qmc_read32(addr) | set);
}
@@ -257,6 +266,7 @@ static inline void qmc_setbits32(void __iomem *addr, u32 set)
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
{
struct tsa_serial_info tsa_info;
+ unsigned long flags;
int ret;
/* Retrieve info from the TSA related serial */
@@ -264,6 +274,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
if (ret)
return ret;
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
info->mode = chan->mode;
info->rx_fs_rate = tsa_info.rx_fs_rate;
info->rx_bit_rate = tsa_info.rx_bit_rate;
@@ -272,10 +284,63 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
info->tx_bit_rate = tsa_info.tx_bit_rate;
info->nb_rx_ts = hweight64(chan->rx_ts_mask);
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
return 0;
}
EXPORT_SYMBOL(qmc_chan_get_info);
+int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
+ ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
+ ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
+ ts_info->rx_ts_mask = chan->rx_ts_mask;
+ ts_info->tx_ts_mask = chan->tx_ts_mask;
+
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_get_ts_info);
+
+int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Only a subset of available timeslots is allowed */
+ if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
+ return -EINVAL;
+ if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
+ return -EINVAL;
+
+ /* In case of common rx/tx table, rx/tx masks must be identical */
+ if (chan->qmc->is_tsa_64rxtx) {
+ if (ts_info->rx_ts_mask != ts_info->tx_ts_mask)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
+ if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
+ (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
+ dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
+ ret = -EBUSY;
+ } else {
+ chan->tx_ts_mask = ts_info->tx_ts_mask;
+ chan->rx_ts_mask = ts_info->rx_ts_mask;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmc_chan_set_ts_info);
+
int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
{
if (param->mode != chan->mode)
@@ -421,7 +486,8 @@ end:
}
int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
- void (*complete)(void *context, size_t length), void *context)
+ void (*complete)(void *context, size_t length, unsigned int flags),
+ void *context)
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
@@ -454,6 +520,10 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
xfer_desc->rx_complete = complete;
xfer_desc->context = context;
+ /* Clear previous status flags */
+ ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
+ QMC_BD_RX_AB | QMC_BD_RX_CR);
+
/* Activate the descriptor */
ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
wmb(); /* Be sure to flush data before descriptor activation */
@@ -485,7 +555,7 @@ EXPORT_SYMBOL(qmc_chan_read_submit);
static void qmc_chan_read_done(struct qmc_chan *chan)
{
- void (*complete)(void *context, size_t size);
+ void (*complete)(void *context, size_t size, unsigned int flags);
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
cbd_t __iomem *bd;
@@ -527,7 +597,23 @@ static void qmc_chan_read_done(struct qmc_chan *chan)
if (complete) {
spin_unlock_irqrestore(&chan->rx_lock, flags);
- complete(context, datalen);
+
+ /*
+ * Avoid conversion between internal hardware flags and
+ * the software API flags.
+ * -> Be sure that the software API flags are consistent
+ * with the hardware flags
+ */
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR);
+
+ complete(context, datalen,
+ ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
+ QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
spin_lock_irqsave(&chan->rx_lock, flags);
}
@@ -539,6 +625,155 @@ end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
}
+static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /*
+ * Use a common Tx/Rx 64 entries table.
+ * Tx and Rx related stuffs must be identical
+ */
+ if (chan->tx_ts_mask != chan->rx_ts_mask) {
+ dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Rx stuff*/
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Rx stuff*/
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Rx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Tx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Tx stuff */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Tx stuff */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32tx(chan, &info, enable);
+}
+
+static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32rx(chan, &info, enable);
+}
+
static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
{
return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
@@ -551,6 +786,12 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (chan->is_rx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP RECEIVE command */
ret = qmc_chan_command(chan, 0x0);
if (ret) {
@@ -561,6 +802,15 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
chan->is_rx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
+ ret = qmc_chan_setup_tsa_rx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
return ret;
@@ -573,6 +823,12 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (chan->is_tx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP TRANSMIT command */
ret = qmc_chan_command(chan, 0x1);
if (ret) {
@@ -583,37 +839,114 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
chan->is_tx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
+ ret = qmc_chan_setup_tsa_tx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
return ret;
}
+static int qmc_chan_start_rx(struct qmc_chan *chan);
+
int qmc_chan_stop(struct qmc_chan *chan, int direction)
{
- int ret;
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = !chan->is_rx_stopped;
ret = qmc_chan_stop_rx(chan);
if (ret)
- return ret;
+ goto end;
}
if (direction & QMC_CHAN_WRITE) {
ret = qmc_chan_stop_tx(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /* Restart rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_start_rx(chan);
+ goto end;
+ }
}
- return 0;
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_stop);
-static void qmc_chan_start_rx(struct qmc_chan *chan)
+static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
+{
+ struct tsa_serial_info info;
+ u16 first_rx, last_tx;
+ u16 trnsync;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Find the first Rx TS allocated to the channel */
+ first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
+
+ /* Find the last Tx TS allocated to the channel */
+ last_tx = fls64(chan->tx_ts_mask);
+
+ trnsync = 0;
+ if (info.nb_rx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
+ if (info.nb_tx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
+
+ qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
+
+ dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
+ chan->id, trnsync,
+ first_rx, info.nb_rx_ts, chan->rx_ts_mask,
+ last_tx, info.nb_tx_ts, chan->tx_ts_mask);
+
+ return 0;
+}
+
+static int qmc_chan_start_rx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (!chan->is_rx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_rx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
/* Restart the receiver */
if (chan->mode == QMC_TRANSPARENT)
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
@@ -624,15 +957,38 @@ static void qmc_chan_start_rx(struct qmc_chan *chan)
chan->is_rx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
}
-static void qmc_chan_start_tx(struct qmc_chan *chan)
+static int qmc_chan_start_tx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (!chan->is_tx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_tx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
/*
* Enable channel transmitter as it could be disabled if
* qmc_chan_reset() was called.
@@ -644,18 +1000,39 @@ static void qmc_chan_start_tx(struct qmc_chan *chan)
chan->is_tx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
}
int qmc_chan_start(struct qmc_chan *chan, int direction)
{
- if (direction & QMC_CHAN_READ)
- qmc_chan_start_rx(chan);
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
- if (direction & QMC_CHAN_WRITE)
- qmc_chan_start_tx(chan);
+ spin_lock_irqsave(&chan->ts_lock, flags);
- return 0;
+ if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = chan->is_rx_stopped;
+ ret = qmc_chan_start_rx(chan);
+ if (ret)
+ goto end;
+ }
+
+ if (direction & QMC_CHAN_WRITE) {
+ ret = qmc_chan_start_tx(chan);
+ if (ret) {
+ /* Restop rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_stop_rx(chan);
+ goto end;
+ }
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_start);
@@ -740,10 +1117,7 @@ EXPORT_SYMBOL(qmc_chan_reset);
static int qmc_check_chans(struct qmc *qmc)
{
struct tsa_serial_info info;
- bool is_one_table = false;
struct qmc_chan *chan;
- u64 tx_ts_mask = 0;
- u64 rx_ts_mask = 0;
u64 tx_ts_assigned_mask;
u64 rx_ts_assigned_mask;
int ret;
@@ -767,38 +1141,21 @@ static int qmc_check_chans(struct qmc *qmc)
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
return -EINVAL;
}
- is_one_table = true;
}
tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
list_for_each_entry(chan, &qmc->chan_head, list) {
- if (chan->tx_ts_mask > tx_ts_assigned_mask) {
- dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
- return -EINVAL;
- }
- if (tx_ts_mask & chan->tx_ts_mask) {
- dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
- return -EINVAL;
- }
-
- if (chan->rx_ts_mask > rx_ts_assigned_mask) {
- dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
- return -EINVAL;
- }
- if (rx_ts_mask & chan->rx_ts_mask) {
- dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
+ if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
return -EINVAL;
}
- if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
- dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
+ if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
return -EINVAL;
}
-
- tx_ts_mask |= chan->tx_ts_mask;
- rx_ts_mask |= chan->rx_ts_mask;
}
return 0;
@@ -844,6 +1201,7 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
}
chan->id = chan_id;
+ spin_lock_init(&chan->ts_lock);
spin_lock_init(&chan->rx_lock);
spin_lock_init(&chan->tx_lock);
@@ -854,7 +1212,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
of_node_put(chan_np);
return ret;
}
- chan->tx_ts_mask = ts_mask;
+ chan->tx_ts_mask_avail = ts_mask;
+ chan->tx_ts_mask = chan->tx_ts_mask_avail;
ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
if (ret) {
@@ -863,7 +1222,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
of_node_put(chan_np);
return ret;
}
- chan->rx_ts_mask = ts_mask;
+ chan->rx_ts_mask_avail = ts_mask;
+ chan->rx_ts_mask = chan->rx_ts_mask_avail;
mode = "transparent";
ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
@@ -894,9 +1254,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
return qmc_check_chans(qmc);
}
-static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
+static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
{
- struct qmc_chan *chan;
unsigned int i;
u16 val;
@@ -905,23 +1264,12 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i
* Everything was previously checked, Tx and Rx related stuffs are
* identical -> Used Rx related stuff to build the table
*/
+ qmc->is_tsa_64rxtx = true;
/* Invalidate all entries */
for (i = 0; i < 64; i++)
qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
- /* Set entries based on Rx stuff*/
- list_for_each_entry(chan, &qmc->chan_head, list) {
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
- }
- }
-
/* Set Wrap bit on last entry */
qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
QMC_TSA_WRAP);
@@ -936,9 +1284,8 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i
return 0;
}
-static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
+static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
{
- struct qmc_chan *chan;
unsigned int i;
u16 val;
@@ -946,6 +1293,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
* Use a Tx 32 entries table and a Rx 32 entries table.
* Everything was previously checked.
*/
+ qmc->is_tsa_64rxtx = false;
/* Invalidate all entries */
for (i = 0; i < 32; i++) {
@@ -953,28 +1301,6 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
}
- /* Set entries based on Rx and Tx stuff*/
- list_for_each_entry(chan, &qmc->chan_head, list) {
- /* Rx part */
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
- }
- /* Tx part */
- for (i = 0; i < info->nb_tx_ts; i++) {
- if (!(chan->tx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
- }
- }
-
/* Set Wrap bit on last entries */
qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
QMC_TSA_WRAP);
@@ -994,7 +1320,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
return 0;
}
-static int qmc_setup_tsa(struct qmc *qmc)
+static int qmc_init_tsa(struct qmc *qmc)
{
struct tsa_serial_info info;
int ret;
@@ -1005,46 +1331,12 @@ static int qmc_setup_tsa(struct qmc *qmc)
return ret;
/*
- * Setup one common 64 entries table or two 32 entries (one for Tx and
- * one for Tx) according to assigned TS numbers.
+ * Initialize one common 64 entries table or two 32 entries (one for Tx
+ * and one for Tx) according to assigned TS numbers.
*/
return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
- qmc_setup_tsa_64rxtx(qmc, &info) :
- qmc_setup_tsa_32rx_32tx(qmc, &info);
-}
-
-static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
-{
- struct tsa_serial_info info;
- u16 first_rx, last_tx;
- u16 trnsync;
- int ret;
-
- /* Retrieve info from the TSA related serial */
- ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
- if (ret)
- return ret;
-
- /* Find the first Rx TS allocated to the channel */
- first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
-
- /* Find the last Tx TS allocated to the channel */
- last_tx = fls64(chan->tx_ts_mask);
-
- trnsync = 0;
- if (info.nb_rx_ts)
- trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
- if (info.nb_tx_ts)
- trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
-
- qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
-
- dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
- chan->id, trnsync,
- first_rx, info.nb_rx_ts, chan->rx_ts_mask,
- last_tx, info.nb_tx_ts, chan->tx_ts_mask);
-
- return 0;
+ qmc_init_tsa_64rxtx(qmc, &info) :
+ qmc_init_tsa_32rx_32tx(qmc, &info);
}
static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
@@ -1366,7 +1658,7 @@ static int qmc_probe(struct platform_device *pdev)
qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
- ret = qmc_setup_tsa(qmc);
+ ret = qmc_init_tsa(qmc);
if (ret)
goto err_tsa_serial_disconnect;
@@ -1404,8 +1696,16 @@ static int qmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qmc);
+ /* Populate channel related devices */
+ ret = devm_of_platform_populate(qmc->dev);
+ if (ret)
+ goto err_disable_txrx;
+
return 0;
+err_disable_txrx:
+ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
+
err_disable_intr:
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
@@ -1444,26 +1744,16 @@ static struct platform_driver qmc_driver = {
};
module_platform_driver(qmc_driver);
-struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
{
- struct of_phandle_args out_args;
struct platform_device *pdev;
struct qmc_chan *qmc_chan;
struct qmc *qmc;
- int ret;
- ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
- &out_args);
- if (ret < 0)
- return ERR_PTR(ret);
-
- if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
- of_node_put(out_args.np);
+ if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
return ERR_PTR(-EINVAL);
- }
- pdev = of_find_device_by_node(out_args.np);
- of_node_put(out_args.np);
+ pdev = of_find_device_by_node(qmc_np);
if (!pdev)
return ERR_PTR(-ENODEV);
@@ -1473,17 +1763,12 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan
return ERR_PTR(-EPROBE_DEFER);
}
- if (out_args.args_count != 1) {
- platform_device_put(pdev);
- return ERR_PTR(-EINVAL);
- }
-
- if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
+ if (chan_index >= ARRAY_SIZE(qmc->chans)) {
platform_device_put(pdev);
return ERR_PTR(-EINVAL);
}
- qmc_chan = qmc->chans[out_args.args[0]];
+ qmc_chan = qmc->chans[chan_index];
if (!qmc_chan) {
platform_device_put(pdev);
return ERR_PTR(-ENOENT);
@@ -1491,8 +1776,44 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan
return qmc_chan;
}
+
+struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+{
+ struct of_phandle_args out_args;
+ struct qmc_chan *qmc_chan;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
+ &out_args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (out_args.args_count != 1) {
+ of_node_put(out_args.np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
+ of_node_put(out_args.np);
+ return qmc_chan;
+}
EXPORT_SYMBOL(qmc_chan_get_byphandle);
+struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
+{
+ struct device_node *qmc_np;
+ u32 chan_index;
+ int ret;
+
+ qmc_np = np->parent;
+ ret = of_property_read_u32(np, "reg", &chan_index);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ return qmc_chan_get_from_qmc(qmc_np, chan_index);
+}
+EXPORT_SYMBOL(qmc_chan_get_bychild);
+
void qmc_chan_put(struct qmc_chan *chan)
{
put_device(chan->qmc->dev);
@@ -1529,6 +1850,28 @@ struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
}
EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
+struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
+ struct device_node *np)
+{
+ struct qmc_chan *qmc_chan;
+ struct qmc_chan **dr;
+
+ dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ qmc_chan = qmc_chan_get_bychild(np);
+ if (!IS_ERR(qmc_chan)) {
+ *dr = qmc_chan;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return qmc_chan;
+}
+EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
+
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
MODULE_DESCRIPTION("CPM QMC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index e31791659..9ff70b38e 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -85,8 +85,10 @@ static int hccs_get_pcc_chan_id(struct hccs_dev *hdev)
struct hccs_register_ctx ctx = {0};
acpi_status status;
- if (!acpi_has_method(handle, METHOD_NAME__CRS))
+ if (!acpi_has_method(handle, METHOD_NAME__CRS)) {
+ dev_err(hdev->dev, "No _CRS method.\n");
return -ENODEV;
+ }
ctx.dev = hdev->dev;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
@@ -108,6 +110,14 @@ static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
*(u8 *)msg, ret);
}
+static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg)
+{
+ struct hccs_mbox_client_info *cl_info =
+ container_of(cl, struct hccs_mbox_client_info, client);
+
+ complete(&cl_info->done);
+}
+
static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
{
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
@@ -129,6 +139,9 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
cl->tx_block = false;
cl->knows_txdone = true;
cl->tx_done = hccs_chan_tx_done;
+ cl->rx_callback = hdev->verspec_data->rx_callback;
+ init_completion(&cl_info->done);
+
pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id);
if (IS_ERR(pcc_chan)) {
dev_err(dev, "PPC channel request failed.\n");
@@ -145,17 +158,23 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev)
*/
cl_info->deadline_us =
HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
- if (cl_info->mbox_chan->mbox->txdone_irq) {
+ if (!hdev->verspec_data->has_txdone_irq &&
+ cl_info->mbox_chan->mbox->txdone_irq) {
dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
rc = -EINVAL;
goto err_mbx_channel_free;
+ } else if (hdev->verspec_data->has_txdone_irq &&
+ !cl_info->mbox_chan->mbox->txdone_irq) {
+ dev_err(dev, "PCC IRQ in PCCT isn't supported.\n");
+ rc = -EINVAL;
+ goto err_mbx_channel_free;
}
if (pcc_chan->shmem_base_addr) {
cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr,
pcc_chan->shmem_size);
if (!cl_info->pcc_comm_addr) {
- dev_err(dev, "Failed to ioremap PCC communication region for channel-%d.\n",
+ dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n",
hdev->chan_id);
rc = -ENOMEM;
goto err_mbx_channel_free;
@@ -170,7 +189,7 @@ out:
return rc;
}
-static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
+static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev)
{
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
struct acpi_pcct_shared_memory __iomem *comm_base =
@@ -192,30 +211,74 @@ static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
return ret;
}
+static int hccs_wait_cmd_complete_by_irq(struct hccs_dev *hdev)
+{
+ struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
+
+ if (!wait_for_completion_timeout(&cl_info->done,
+ usecs_to_jiffies(cl_info->deadline_us))) {
+ dev_err(hdev->dev, "PCC command executed timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev,
+ u8 cmd,
+ struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size)
+{
+ struct acpi_pcct_shared_memory tmp = {
+ .signature = PCC_SIGNATURE | hdev->chan_id,
+ .command = cmd,
+ .status = 0,
+ };
+
+ memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
+ sizeof(struct acpi_pcct_shared_memory));
+
+ /* Copy the message to the PCC comm space */
+ memcpy_toio(comm_space, (void *)desc, space_size);
+}
+
+static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev,
+ u8 cmd,
+ struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size)
+{
+ struct acpi_pcct_ext_pcc_shared_memory tmp = {
+ .signature = PCC_SIGNATURE | hdev->chan_id,
+ .flags = PCC_CMD_COMPLETION_NOTIFY,
+ .length = HCCS_PCC_SHARE_MEM_BYTES,
+ .command = cmd,
+ };
+
+ memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
+ sizeof(struct acpi_pcct_ext_pcc_shared_memory));
+
+ /* Copy the message to the PCC comm space */
+ memcpy_toio(comm_space, (void *)desc, space_size);
+}
+
static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
struct hccs_desc *desc)
{
+ const struct hccs_verspecific_data *verspec_data = hdev->verspec_data;
struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
- void __iomem *comm_space = cl_info->pcc_comm_addr +
- sizeof(struct acpi_pcct_shared_memory);
struct hccs_fw_inner_head *fw_inner_head;
- struct acpi_pcct_shared_memory tmp = {0};
- u16 comm_space_size;
+ void __iomem *comm_space;
+ u16 space_size;
int ret;
- /* Write signature for this subspace */
- tmp.signature = PCC_SIGNATURE | hdev->chan_id;
- /* Write to the shared command region */
- tmp.command = cmd;
- /* Clear cmd complete bit */
- tmp.status = 0;
- memcpy_toio(cl_info->pcc_comm_addr, (void *)&tmp,
- sizeof(struct acpi_pcct_shared_memory));
-
- /* Copy the message to the PCC comm space */
- comm_space_size = HCCS_PCC_SHARE_MEM_BYTES -
- sizeof(struct acpi_pcct_shared_memory);
- memcpy_toio(comm_space, (void *)desc, comm_space_size);
+ comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size;
+ space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size;
+ verspec_data->fill_pcc_shared_mem(hdev, cmd, desc,
+ comm_space, space_size);
+ if (verspec_data->has_txdone_irq)
+ reinit_completion(&cl_info->done);
/* Ring doorbell */
ret = mbox_send_message(cl_info->mbox_chan, &cmd);
@@ -225,13 +288,12 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
goto end;
}
- /* Wait for completion */
- ret = hccs_check_chan_cmd_complete(hdev);
+ ret = verspec_data->wait_cmd_complete(hdev);
if (ret)
goto end;
/* Copy response data */
- memcpy_fromio((void *)desc, comm_space, comm_space_size);
+ memcpy_fromio((void *)desc, comm_space, space_size);
fw_inner_head = &desc->rsp.fw_inner_head;
if (fw_inner_head->retStatus) {
dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n",
@@ -240,7 +302,10 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
}
end:
- mbox_client_txdone(cl_info->mbox_chan, ret);
+ if (verspec_data->has_txdone_irq)
+ mbox_chan_txdone(cl_info->mbox_chan, ret);
+ else
+ mbox_client_txdone(cl_info->mbox_chan, ret);
return ret;
}
@@ -527,7 +592,6 @@ out:
static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
{
-
struct device *dev = hdev->dev;
struct hccs_chip_info *chip;
struct hccs_die_info *die;
@@ -1097,7 +1161,7 @@ static int hccs_create_hccs_dir(struct hccs_dev *hdev,
int ret;
ret = kobject_init_and_add(&port->kobj, &hccs_port_type,
- &die->kobj, "hccs%d", port->port_id);
+ &die->kobj, "hccs%u", port->port_id);
if (ret) {
kobject_put(&port->kobj);
return ret;
@@ -1115,7 +1179,7 @@ static int hccs_create_die_dir(struct hccs_dev *hdev,
u16 i;
ret = kobject_init_and_add(&die->kobj, &hccs_die_type,
- &chip->kobj, "die%d", die->die_id);
+ &chip->kobj, "die%u", die->die_id);
if (ret) {
kobject_put(&die->kobj);
return ret;
@@ -1125,7 +1189,7 @@ static int hccs_create_die_dir(struct hccs_dev *hdev,
port = &die->ports[i];
ret = hccs_create_hccs_dir(hdev, die, port);
if (ret) {
- dev_err(hdev->dev, "create hccs%d dir failed.\n",
+ dev_err(hdev->dev, "create hccs%u dir failed.\n",
port->port_id);
goto err;
}
@@ -1147,7 +1211,7 @@ static int hccs_create_chip_dir(struct hccs_dev *hdev,
u16 id;
ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type,
- &hdev->dev->kobj, "chip%d", chip->chip_id);
+ &hdev->dev->kobj, "chip%u", chip->chip_id);
if (ret) {
kobject_put(&chip->kobj);
return ret;
@@ -1178,7 +1242,7 @@ static int hccs_create_topo_dirs(struct hccs_dev *hdev)
chip = &hdev->chips[id];
ret = hccs_create_chip_dir(hdev, chip);
if (ret) {
- dev_err(hdev->dev, "init chip%d dir failed!\n", id);
+ dev_err(hdev->dev, "init chip%u dir failed!\n", id);
goto err;
}
}
@@ -1212,6 +1276,11 @@ static int hccs_probe(struct platform_device *pdev)
hdev->dev = &pdev->dev;
platform_set_drvdata(pdev, hdev);
+ /*
+ * Here would never be failure as the driver and device has been matched.
+ */
+ hdev->verspec_data = acpi_device_get_match_data(hdev->dev);
+
mutex_init(&hdev->lock);
rc = hccs_get_pcc_chan_id(hdev);
if (rc)
@@ -1248,9 +1317,26 @@ static void hccs_remove(struct platform_device *pdev)
hccs_unregister_pcc_channel(hdev);
}
+static const struct hccs_verspecific_data hisi04b1_verspec_data = {
+ .rx_callback = NULL,
+ .wait_cmd_complete = hccs_wait_cmd_complete_by_poll,
+ .fill_pcc_shared_mem = hccs_fill_pcc_shared_mem_region,
+ .shared_mem_size = sizeof(struct acpi_pcct_shared_memory),
+ .has_txdone_irq = false,
+};
+
+static const struct hccs_verspecific_data hisi04b2_verspec_data = {
+ .rx_callback = hccs_pcc_rx_callback,
+ .wait_cmd_complete = hccs_wait_cmd_complete_by_irq,
+ .fill_pcc_shared_mem = hccs_fill_ext_pcc_shared_mem_region,
+ .shared_mem_size = sizeof(struct acpi_pcct_ext_pcc_shared_memory),
+ .has_txdone_irq = true,
+};
+
static const struct acpi_device_id hccs_acpi_match[] = {
- { "HISI04B1"},
- { ""},
+ { "HISI04B1", (unsigned long)&hisi04b1_verspec_data},
+ { "HISI04B2", (unsigned long)&hisi04b2_verspec_data},
+ { }
};
MODULE_DEVICE_TABLE(acpi, hccs_acpi_match);
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.h b/drivers/soc/hisilicon/kunpeng_hccs.h
index 6012d2776..c3adbc01b 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.h
+++ b/drivers/soc/hisilicon/kunpeng_hccs.h
@@ -51,11 +51,26 @@ struct hccs_mbox_client_info {
struct pcc_mbox_chan *pcc_chan;
u64 deadline_us;
void __iomem *pcc_comm_addr;
+ struct completion done;
+};
+
+struct hccs_desc;
+
+struct hccs_verspecific_data {
+ void (*rx_callback)(struct mbox_client *cl, void *mssg);
+ int (*wait_cmd_complete)(struct hccs_dev *hdev);
+ void (*fill_pcc_shared_mem)(struct hccs_dev *hdev,
+ u8 cmd, struct hccs_desc *desc,
+ void __iomem *comm_space,
+ u16 space_size);
+ u16 shared_mem_size;
+ bool has_txdone_irq;
};
struct hccs_dev {
struct device *dev;
struct acpi_device *acpi_dev;
+ const struct hccs_verspecific_data *verspec_data;
u64 caps;
u8 chip_num;
struct hccs_chip_info *chips;
diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h
index 448cc3761..6bebf1a69 100644
--- a/drivers/soc/mediatek/mt8188-mmsys.h
+++ b/drivers/soc/mediatek/mt8188-mmsys.h
@@ -3,6 +3,10 @@
#ifndef __SOC_MEDIATEK_MT8188_MMSYS_H
#define __SOC_MEDIATEK_MT8188_MMSYS_H
+#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <dt-bindings/reset/mt8188-resets.h>
+
+#define MT8188_VDO0_SW0_RST_B 0x190
#define MT8188_VDO0_OVL_MOUT_EN 0xf14
#define MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0)
#define MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1)
@@ -67,6 +71,136 @@
#define MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE BIT(18)
#define MT8188_SOUT_DSC_WRAP0_OUT_TO_DISP_WDMA0 BIT(19)
+#define MT8188_VDO1_SW0_RST_B 0x1d0
+#define MT8188_VDO1_HDR_TOP_CFG 0xd00
+#define MT8188_VDO1_MIXER_IN1_ALPHA 0xd30
+#define MT8188_VDO1_MIXER_IN1_PAD 0xd40
+#define MT8188_VDO1_MIXER_VSYNC_LEN 0xd5c
+#define MT8188_VDO1_MERGE0_ASYNC_CFG_WD 0xe30
+#define MT8188_VDO1_HDRBE_ASYNC_CFG_WD 0xe70
+#define MT8188_VDO1_VPP_MERGE0_P0_SEL_IN 0xf04
+#define MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0 1
+#define MT8188_VDO1_VPP_MERGE0_P1_SEL_IN 0xf08
+#define MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1 1
+#define MT8188_VDO1_DISP_DPI1_SEL_IN 0xf10
+#define MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT 0
+#define MT8188_VDO1_DISP_DP_INTF0_SEL_IN 0xf14
+#define MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT 0
+#define MT8188_VDO1_MERGE4_SOUT_SEL 0xf18
+#define MT8188_MERGE4_SOUT_TO_DPI1_SEL BIT(2)
+#define MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL BIT(3)
+#define MT8188_VDO1_MIXER_IN1_SEL_IN 0xf24
+#define MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT 1
+#define MT8188_VDO1_MIXER_IN2_SEL_IN 0xf28
+#define MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT 1
+#define MT8188_VDO1_MIXER_IN3_SEL_IN 0xf2c
+#define MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT 1
+#define MT8188_VDO1_MIXER_IN4_SEL_IN 0xf30
+#define MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT 1
+#define MT8188_VDO1_MIXER_OUT_SOUT_SEL 0xf34
+#define MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL 1
+#define MT8188_VDO1_VPP_MERGE1_P0_SEL_IN 0xf3c
+#define MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2 1
+#define MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL 0xf40
+#define MT8188_SOUT_TO_MIXER_IN1_SEL 1
+#define MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL 0xf44
+#define MT8188_SOUT_TO_MIXER_IN2_SEL 1
+#define MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL 0xf48
+#define MT8188_SOUT_TO_MIXER_IN3_SEL 1
+#define MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL 0xf4c
+#define MT8188_SOUT_TO_MIXER_IN4_SEL 1
+#define MT8188_VDO1_MERGE4_ASYNC_SEL_IN 0xf50
+#define MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT 1
+#define MT8188_VDO1_MIXER_IN1_SOUT_SEL 0xf58
+#define MT8188_MIXER_IN1_SOUT_TO_DISP_MIXER 0
+#define MT8188_VDO1_MIXER_IN2_SOUT_SEL 0xf5c
+#define MT8188_MIXER_IN2_SOUT_TO_DISP_MIXER 0
+#define MT8188_VDO1_MIXER_IN3_SOUT_SEL 0xf60
+#define MT8188_MIXER_IN3_SOUT_TO_DISP_MIXER 0
+#define MT8188_VDO1_MIXER_IN4_SOUT_SEL 0xf64
+#define MT8188_MIXER_IN4_SOUT_TO_DISP_MIXER 0
+#define MT8188_VDO1_MIXER_SOUT_SEL_IN 0xf68
+#define MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER 0
+
+static const u8 mmsys_mt8188_vdo0_rst_tb[] = {
+ [MT8188_VDO0_RST_DISP_OVL0] = MMSYS_RST_NR(0, 0),
+ [MT8188_VDO0_RST_FAKE_ENG0] = MMSYS_RST_NR(0, 2),
+ [MT8188_VDO0_RST_DISP_CCORR0] = MMSYS_RST_NR(0, 4),
+ [MT8188_VDO0_RST_DISP_MUTEX0] = MMSYS_RST_NR(0, 6),
+ [MT8188_VDO0_RST_DISP_GAMMA0] = MMSYS_RST_NR(0, 8),
+ [MT8188_VDO0_RST_DISP_DITHER0] = MMSYS_RST_NR(0, 10),
+ [MT8188_VDO0_RST_DISP_WDMA0] = MMSYS_RST_NR(0, 17),
+ [MT8188_VDO0_RST_DISP_RDMA0] = MMSYS_RST_NR(0, 19),
+ [MT8188_VDO0_RST_DSI0] = MMSYS_RST_NR(0, 21),
+ [MT8188_VDO0_RST_DSI1] = MMSYS_RST_NR(0, 22),
+ [MT8188_VDO0_RST_DSC_WRAP0] = MMSYS_RST_NR(0, 23),
+ [MT8188_VDO0_RST_VPP_MERGE0] = MMSYS_RST_NR(0, 24),
+ [MT8188_VDO0_RST_DP_INTF0] = MMSYS_RST_NR(0, 25),
+ [MT8188_VDO0_RST_DISP_AAL0] = MMSYS_RST_NR(0, 26),
+ [MT8188_VDO0_RST_INLINEROT0] = MMSYS_RST_NR(0, 27),
+ [MT8188_VDO0_RST_APB_BUS] = MMSYS_RST_NR(0, 28),
+ [MT8188_VDO0_RST_DISP_COLOR0] = MMSYS_RST_NR(0, 29),
+ [MT8188_VDO0_RST_MDP_WROT0] = MMSYS_RST_NR(0, 30),
+ [MT8188_VDO0_RST_DISP_RSZ0] = MMSYS_RST_NR(0, 31),
+};
+
+static const u8 mmsys_mt8188_vdo1_rst_tb[] = {
+ [MT8188_VDO1_RST_SMI_LARB2] = MMSYS_RST_NR(0, 0),
+ [MT8188_VDO1_RST_SMI_LARB3] = MMSYS_RST_NR(0, 1),
+ [MT8188_VDO1_RST_GALS] = MMSYS_RST_NR(0, 2),
+ [MT8188_VDO1_RST_FAKE_ENG0] = MMSYS_RST_NR(0, 3),
+ [MT8188_VDO1_RST_FAKE_ENG1] = MMSYS_RST_NR(0, 4),
+ [MT8188_VDO1_RST_MDP_RDMA0] = MMSYS_RST_NR(0, 5),
+ [MT8188_VDO1_RST_MDP_RDMA1] = MMSYS_RST_NR(0, 6),
+ [MT8188_VDO1_RST_MDP_RDMA2] = MMSYS_RST_NR(0, 7),
+ [MT8188_VDO1_RST_MDP_RDMA3] = MMSYS_RST_NR(0, 8),
+ [MT8188_VDO1_RST_VPP_MERGE0] = MMSYS_RST_NR(0, 9),
+ [MT8188_VDO1_RST_VPP_MERGE1] = MMSYS_RST_NR(0, 10),
+ [MT8188_VDO1_RST_VPP_MERGE2] = MMSYS_RST_NR(0, 11),
+ [MT8188_VDO1_RST_VPP_MERGE3] = MMSYS_RST_NR(1, 0),
+ [MT8188_VDO1_RST_VPP_MERGE4] = MMSYS_RST_NR(1, 1),
+ [MT8188_VDO1_RST_VPP2_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 2),
+ [MT8188_VDO1_RST_VPP3_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 3),
+ [MT8188_VDO1_RST_DISP_MUTEX] = MMSYS_RST_NR(1, 4),
+ [MT8188_VDO1_RST_MDP_RDMA4] = MMSYS_RST_NR(1, 5),
+ [MT8188_VDO1_RST_MDP_RDMA5] = MMSYS_RST_NR(1, 6),
+ [MT8188_VDO1_RST_MDP_RDMA6] = MMSYS_RST_NR(1, 7),
+ [MT8188_VDO1_RST_MDP_RDMA7] = MMSYS_RST_NR(1, 8),
+ [MT8188_VDO1_RST_DP_INTF1_MMCK] = MMSYS_RST_NR(1, 9),
+ [MT8188_VDO1_RST_DPI0_MM_CK] = MMSYS_RST_NR(1, 10),
+ [MT8188_VDO1_RST_DPI1_MM_CK] = MMSYS_RST_NR(1, 11),
+ [MT8188_VDO1_RST_MERGE0_DL_ASYNC] = MMSYS_RST_NR(1, 13),
+ [MT8188_VDO1_RST_MERGE1_DL_ASYNC] = MMSYS_RST_NR(1, 14),
+ [MT8188_VDO1_RST_MERGE2_DL_ASYNC] = MMSYS_RST_NR(1, 15),
+ [MT8188_VDO1_RST_MERGE3_DL_ASYNC] = MMSYS_RST_NR(1, 16),
+ [MT8188_VDO1_RST_MERGE4_DL_ASYNC] = MMSYS_RST_NR(1, 17),
+ [MT8188_VDO1_RST_VDO0_DSC_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 18),
+ [MT8188_VDO1_RST_VDO0_MERGE_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 19),
+ [MT8188_VDO1_RST_PADDING0] = MMSYS_RST_NR(1, 20),
+ [MT8188_VDO1_RST_PADDING1] = MMSYS_RST_NR(1, 21),
+ [MT8188_VDO1_RST_PADDING2] = MMSYS_RST_NR(1, 22),
+ [MT8188_VDO1_RST_PADDING3] = MMSYS_RST_NR(1, 23),
+ [MT8188_VDO1_RST_PADDING4] = MMSYS_RST_NR(1, 24),
+ [MT8188_VDO1_RST_PADDING5] = MMSYS_RST_NR(1, 25),
+ [MT8188_VDO1_RST_PADDING6] = MMSYS_RST_NR(1, 26),
+ [MT8188_VDO1_RST_PADDING7] = MMSYS_RST_NR(1, 27),
+ [MT8188_VDO1_RST_DISP_RSZ0] = MMSYS_RST_NR(1, 28),
+ [MT8188_VDO1_RST_DISP_RSZ1] = MMSYS_RST_NR(1, 29),
+ [MT8188_VDO1_RST_DISP_RSZ2] = MMSYS_RST_NR(1, 30),
+ [MT8188_VDO1_RST_DISP_RSZ3] = MMSYS_RST_NR(1, 31),
+ [MT8188_VDO1_RST_HDR_VDO_FE0] = MMSYS_RST_NR(2, 0),
+ [MT8188_VDO1_RST_HDR_GFX_FE0] = MMSYS_RST_NR(2, 1),
+ [MT8188_VDO1_RST_HDR_VDO_BE] = MMSYS_RST_NR(2, 2),
+ [MT8188_VDO1_RST_HDR_VDO_FE1] = MMSYS_RST_NR(2, 16),
+ [MT8188_VDO1_RST_HDR_GFX_FE1] = MMSYS_RST_NR(2, 17),
+ [MT8188_VDO1_RST_DISP_MIXER] = MMSYS_RST_NR(2, 18),
+ [MT8188_VDO1_RST_HDR_VDO_FE0_DL_ASYNC] = MMSYS_RST_NR(2, 19),
+ [MT8188_VDO1_RST_HDR_VDO_FE1_DL_ASYNC] = MMSYS_RST_NR(2, 20),
+ [MT8188_VDO1_RST_HDR_GFX_FE0_DL_ASYNC] = MMSYS_RST_NR(2, 21),
+ [MT8188_VDO1_RST_HDR_GFX_FE1_DL_ASYNC] = MMSYS_RST_NR(2, 22),
+ [MT8188_VDO1_RST_HDR_VDO_BE_DL_ASYNC] = MMSYS_RST_NR(2, 23),
+};
+
static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
{
DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
@@ -146,4 +280,80 @@ static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
},
};
+static const struct mtk_mmsys_routes mmsys_mt8188_vdo1_routing_table[] = {
+ {
+ DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
+ }, {
+ DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
+ }, {
+ DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
+ MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
+ }, {
+ DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
+ MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN1_SEL
+ }, {
+ DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
+ MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN2_SEL
+ }, {
+ DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
+ MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN3_SEL
+ }, {
+ DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
+ MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN4_SEL
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
+ }, {
+ DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
+ }, {
+ DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
+ MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
+ MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8188_MERGE4_SOUT_TO_DPI1_SEL
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
+ MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
+ }, {
+ DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL
+ }
+};
+
#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index 88209102f..f370f4ec4 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -87,6 +87,29 @@ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = {
.clk_driver = "clk-mt8188-vdo0",
.routes = mmsys_mt8188_routing_table,
.num_routes = ARRAY_SIZE(mmsys_mt8188_routing_table),
+ .sw0_rst_offset = MT8188_VDO0_SW0_RST_B,
+ .rst_tb = mmsys_mt8188_vdo0_rst_tb,
+ .num_resets = ARRAY_SIZE(mmsys_mt8188_vdo0_rst_tb),
+};
+
+static const struct mtk_mmsys_driver_data mt8188_vdosys1_driver_data = {
+ .clk_driver = "clk-mt8188-vdo1",
+ .routes = mmsys_mt8188_vdo1_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8188_vdo1_routing_table),
+ .sw0_rst_offset = MT8188_VDO1_SW0_RST_B,
+ .rst_tb = mmsys_mt8188_vdo1_rst_tb,
+ .num_resets = ARRAY_SIZE(mmsys_mt8188_vdo1_rst_tb),
+ .vsync_len = 1,
+};
+
+static const struct mtk_mmsys_driver_data mt8188_vppsys0_driver_data = {
+ .clk_driver = "clk-mt8188-vpp0",
+ .is_vppsys = true,
+};
+
+static const struct mtk_mmsys_driver_data mt8188_vppsys1_driver_data = {
+ .clk_driver = "clk-mt8188-vpp1",
+ .is_vppsys = true,
};
static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
@@ -169,6 +192,10 @@ void mtk_mmsys_ddp_connect(struct device *dev,
if (cur == routes[i].from_comp && next == routes[i].to_comp)
mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask,
routes[i].val, NULL);
+
+ if (mmsys->data->vsync_len)
+ mtk_mmsys_update_bits(mmsys, MT8188_VDO1_MIXER_VSYNC_LEN, GENMASK(31, 0),
+ mmsys->data->vsync_len, NULL);
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect);
@@ -302,6 +329,15 @@ static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned l
u32 offset;
u32 reg;
+ if (mmsys->data->rst_tb) {
+ if (id >= mmsys->data->num_resets) {
+ dev_err(rcdev->dev, "Invalid reset ID: %lu (>=%u)\n",
+ id, mmsys->data->num_resets);
+ return -EINVAL;
+ }
+ id = mmsys->data->rst_tb[id];
+ }
+
offset = (id / MMSYS_SW_RESET_PER_REG) * sizeof(u32);
id = id % MMSYS_SW_RESET_PER_REG;
reg = mmsys->data->sw0_rst_offset + offset;
@@ -429,6 +465,9 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
{ .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data },
{ .compatible = "mediatek,mt8186-mmsys", .data = &mt8186_mmsys_driver_data },
{ .compatible = "mediatek,mt8188-vdosys0", .data = &mt8188_vdosys0_driver_data },
+ { .compatible = "mediatek,mt8188-vdosys1", .data = &mt8188_vdosys1_driver_data },
+ { .compatible = "mediatek,mt8188-vppsys0", .data = &mt8188_vppsys0_driver_data },
+ { .compatible = "mediatek,mt8188-vppsys1", .data = &mt8188_vppsys1_driver_data },
{ .compatible = "mediatek,mt8192-mmsys", .data = &mt8192_mmsys_driver_data },
/* "mediatek,mt8195-mmsys" compatible is deprecated */
{ .compatible = "mediatek,mt8195-mmsys", .data = &mt8195_vdosys0_driver_data },
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index 6725403d2..d37019273 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -78,6 +78,8 @@
#define DSI_SEL_IN_RDMA 0x1
#define DSI_SEL_IN_MASK 0x1
+#define MMSYS_RST_NR(bank, bit) (((bank) * 32) + (bit))
+
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;
@@ -86,13 +88,43 @@ struct mtk_mmsys_routes {
u32 val;
};
+/**
+ * struct mtk_mmsys_driver_data - Settings of the mmsys
+ * @clk_driver: Clock driver name that the mmsys is using
+ * (defined in drivers/clk/mediatek/clk-*.c).
+ * @routes: Routing table of the mmsys.
+ * It provides mux settings from one module to another.
+ * @num_routes: Array size of the routes.
+ * @sw0_rst_offset: Register offset for the reset control.
+ * @num_resets: Number of reset bits that are defined
+ * @is_vppsys: Whether the mmsys is VPPSYS (Video Processing Pipe)
+ * or VDOSYS (Video). Only VDOSYS needs to be added to drm driver.
+ * @vsync_len: VSYNC length of the MIXER.
+ * VSYNC is usually triggered by the connector, so its length is a
+ * fixed value when the frame rate is decided, but ETHDR and
+ * MIXER generate their own VSYNC due to hardware design, therefore
+ * MIXER has to sync with ETHDR by adjusting VSYNC length.
+ * On MT8195, there is no such setting so we use the gap between
+ * falling edge and rising edge of SOF (Start of Frame) signal to
+ * do the job, but since MT8188, VSYNC_LEN setting is introduced to
+ * solve the problem and is given 0x40 (ticks) as the default value.
+ * Please notice that this value has to be set to 1 (minimum) if
+ * ETHDR is bypassed, otherwise MIXER could wait too long and causing
+ * underflow.
+ *
+ * Each MMSYS (multi-media system) may have different settings, they may use
+ * different clock sources, mux settings, reset control ...etc., and these
+ * differences are all stored here.
+ */
struct mtk_mmsys_driver_data {
const char *clk_driver;
const struct mtk_mmsys_routes *routes;
const unsigned int num_routes;
const u16 sw0_rst_offset;
+ const u8 *rst_tb;
const u32 num_resets;
const bool is_vppsys;
+ const u8 vsync_len;
};
/*
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 9d9f5ae57..73c256d39 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -133,6 +133,30 @@
#define MT8188_MUTEX_MOD_DISP_POSTMASK0 24
#define MT8188_MUTEX_MOD2_DISP_PWM0 33
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA0 0
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA1 1
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA2 2
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA3 3
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA4 4
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA5 5
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA6 6
+#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA7 7
+#define MT8188_MUTEX_MOD_DISP1_PADDING0 8
+#define MT8188_MUTEX_MOD_DISP1_PADDING1 9
+#define MT8188_MUTEX_MOD_DISP1_PADDING2 10
+#define MT8188_MUTEX_MOD_DISP1_PADDING3 11
+#define MT8188_MUTEX_MOD_DISP1_PADDING4 12
+#define MT8188_MUTEX_MOD_DISP1_PADDING5 13
+#define MT8188_MUTEX_MOD_DISP1_PADDING6 14
+#define MT8188_MUTEX_MOD_DISP1_PADDING7 15
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE0 20
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE1 21
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE2 22
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23
+#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24
+#define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30
+#define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39
+
#define MT8195_MUTEX_MOD_DISP_OVL0 0
#define MT8195_MUTEX_MOD_DISP_WDMA0 1
#define MT8195_MUTEX_MOD_DISP_RDMA0 2
@@ -264,6 +288,7 @@
#define MT8183_MUTEX_SOF_DPI0 2
#define MT8188_MUTEX_SOF_DSI0 1
#define MT8188_MUTEX_SOF_DP_INTF0 3
+#define MT8188_MUTEX_SOF_DP_INTF1 4
#define MT8195_MUTEX_SOF_DSI0 1
#define MT8195_MUTEX_SOF_DSI1 2
#define MT8195_MUTEX_SOF_DP_INTF0 3
@@ -275,6 +300,7 @@
#define MT8183_MUTEX_EOF_DPI0 (MT8183_MUTEX_SOF_DPI0 << 6)
#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
+#define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7)
#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
@@ -445,6 +471,29 @@ static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_DSI0] = MT8188_MUTEX_MOD_DISP_DSI0,
[DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
[DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
+ [DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1,
+ [DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER,
+ [DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0,
+ [DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1,
+ [DDP_COMPONENT_MDP_RDMA2] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA2,
+ [DDP_COMPONENT_MDP_RDMA3] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA3,
+ [DDP_COMPONENT_MDP_RDMA4] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA4,
+ [DDP_COMPONENT_MDP_RDMA5] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA5,
+ [DDP_COMPONENT_MDP_RDMA6] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA6,
+ [DDP_COMPONENT_MDP_RDMA7] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA7,
+ [DDP_COMPONENT_PADDING0] = MT8188_MUTEX_MOD_DISP1_PADDING0,
+ [DDP_COMPONENT_PADDING1] = MT8188_MUTEX_MOD_DISP1_PADDING1,
+ [DDP_COMPONENT_PADDING2] = MT8188_MUTEX_MOD_DISP1_PADDING2,
+ [DDP_COMPONENT_PADDING3] = MT8188_MUTEX_MOD_DISP1_PADDING3,
+ [DDP_COMPONENT_PADDING4] = MT8188_MUTEX_MOD_DISP1_PADDING4,
+ [DDP_COMPONENT_PADDING5] = MT8188_MUTEX_MOD_DISP1_PADDING5,
+ [DDP_COMPONENT_PADDING6] = MT8188_MUTEX_MOD_DISP1_PADDING6,
+ [DDP_COMPONENT_PADDING7] = MT8188_MUTEX_MOD_DISP1_PADDING7,
+ [DDP_COMPONENT_MERGE1] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE0,
+ [DDP_COMPONENT_MERGE2] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE1,
+ [DDP_COMPONENT_MERGE3] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE2,
+ [DDP_COMPONENT_MERGE4] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE3,
+ [DDP_COMPONENT_MERGE5] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE4,
};
static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -605,6 +654,8 @@ static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
[MUTEX_SOF_DP_INTF0] =
MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
+ [MUTEX_SOF_DP_INTF1] =
+ MT8188_MUTEX_SOF_DP_INTF1 | MT8188_MUTEX_EOF_DP_INTF1,
};
static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = {
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index f31e3bedf..9a91298c1 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 MediaTek Inc.
+ * Copyright (C) 2022 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
*/
#include <linux/bitfield.h>
@@ -32,16 +34,6 @@
#include <linux/spinlock.h>
#include <linux/thermal.h>
-/* svs bank 1-line software id */
-#define SVSB_CPU_LITTLE BIT(0)
-#define SVSB_CPU_BIG BIT(1)
-#define SVSB_CCI BIT(2)
-#define SVSB_GPU BIT(3)
-
-/* svs bank 2-line type */
-#define SVSB_LOW BIT(8)
-#define SVSB_HIGH BIT(9)
-
/* svs bank mode support */
#define SVSB_MODE_ALL_DISABLE 0
#define SVSB_MODE_INIT01 BIT(1)
@@ -128,6 +120,13 @@
#define SVSB_VOPS_FLD_VOP2_6 GENMASK(23, 16)
#define SVSB_VOPS_FLD_VOP3_7 GENMASK(31, 24)
+/* SVS Thermal Coefficients */
+#define SVSB_TS_COEFF_MT8195 250460
+#define SVSB_TS_COEFF_MT8186 204650
+
+/* Algo helpers */
+#define FUSE_DATA_NOT_VALID U32_MAX
+
/* svs bank related setting */
#define BITS8 8
#define MAX_OPP_ENTRIES 16
@@ -175,6 +174,36 @@ static DEFINE_SPINLOCK(svs_lock);
#endif
/**
+ * enum svsb_sw_id - SVS Bank Software ID
+ * @SVSB_SWID_CPU_LITTLE: CPU little cluster Bank
+ * @SVSB_SWID_CPU_BIG: CPU big cluster Bank
+ * @SVSB_SWID_CCI: Cache Coherent Interconnect Bank
+ * @SVSB_SWID_GPU: GPU Bank
+ * @SVSB_SWID_MAX: Total number of Banks
+ */
+enum svsb_sw_id {
+ SVSB_SWID_CPU_LITTLE,
+ SVSB_SWID_CPU_BIG,
+ SVSB_SWID_CCI,
+ SVSB_SWID_GPU,
+ SVSB_SWID_MAX
+};
+
+/**
+ * enum svsb_type - SVS Bank 2-line: Type and Role
+ * @SVSB_TYPE_NONE: One-line type Bank - Global role
+ * @SVSB_TYPE_LOW: Two-line type Bank - Low bank role
+ * @SVSB_TYPE_HIGH: Two-line type Bank - High bank role
+ * @SVSB_TYPE_MAX: Total number of bank types
+ */
+enum svsb_type {
+ SVSB_TYPE_NONE,
+ SVSB_TYPE_LOW,
+ SVSB_TYPE_HIGH,
+ SVSB_TYPE_MAX
+};
+
+/**
* enum svsb_phase - svs bank phase enumeration
* @SVSB_PHASE_ERROR: svs bank encounters unexpected condition
* @SVSB_PHASE_INIT01: svs bank basic init for data calibration
@@ -256,60 +285,88 @@ enum svs_reg_index {
};
static const u32 svs_regs_v2[] = {
- [DESCHAR] = 0xc00,
- [TEMPCHAR] = 0xc04,
- [DETCHAR] = 0xc08,
- [AGECHAR] = 0xc0c,
- [DCCONFIG] = 0xc10,
- [AGECONFIG] = 0xc14,
- [FREQPCT30] = 0xc18,
- [FREQPCT74] = 0xc1c,
- [LIMITVALS] = 0xc20,
- [VBOOT] = 0xc24,
- [DETWINDOW] = 0xc28,
- [CONFIG] = 0xc2c,
- [TSCALCS] = 0xc30,
- [RUNCONFIG] = 0xc34,
- [SVSEN] = 0xc38,
- [INIT2VALS] = 0xc3c,
- [DCVALUES] = 0xc40,
- [AGEVALUES] = 0xc44,
- [VOP30] = 0xc48,
- [VOP74] = 0xc4c,
- [TEMP] = 0xc50,
- [INTSTS] = 0xc54,
- [INTSTSRAW] = 0xc58,
- [INTEN] = 0xc5c,
- [CHKINT] = 0xc60,
- [CHKSHIFT] = 0xc64,
- [STATUS] = 0xc68,
- [VDESIGN30] = 0xc6c,
- [VDESIGN74] = 0xc70,
- [DVT30] = 0xc74,
- [DVT74] = 0xc78,
- [AGECOUNT] = 0xc7c,
- [SMSTATE0] = 0xc80,
- [SMSTATE1] = 0xc84,
- [CTL0] = 0xc88,
- [DESDETSEC] = 0xce0,
- [TEMPAGESEC] = 0xce4,
- [CTRLSPARE0] = 0xcf0,
- [CTRLSPARE1] = 0xcf4,
- [CTRLSPARE2] = 0xcf8,
- [CTRLSPARE3] = 0xcfc,
- [CORESEL] = 0xf00,
- [THERMINTST] = 0xf04,
- [INTST] = 0xf08,
- [THSTAGE0ST] = 0xf0c,
- [THSTAGE1ST] = 0xf10,
- [THSTAGE2ST] = 0xf14,
- [THAHBST0] = 0xf18,
- [THAHBST1] = 0xf1c,
- [SPARE0] = 0xf20,
- [SPARE1] = 0xf24,
- [SPARE2] = 0xf28,
- [SPARE3] = 0xf2c,
- [THSLPEVEB] = 0xf30,
+ [DESCHAR] = 0x00,
+ [TEMPCHAR] = 0x04,
+ [DETCHAR] = 0x08,
+ [AGECHAR] = 0x0c,
+ [DCCONFIG] = 0x10,
+ [AGECONFIG] = 0x14,
+ [FREQPCT30] = 0x18,
+ [FREQPCT74] = 0x1c,
+ [LIMITVALS] = 0x20,
+ [VBOOT] = 0x24,
+ [DETWINDOW] = 0x28,
+ [CONFIG] = 0x2c,
+ [TSCALCS] = 0x30,
+ [RUNCONFIG] = 0x34,
+ [SVSEN] = 0x38,
+ [INIT2VALS] = 0x3c,
+ [DCVALUES] = 0x40,
+ [AGEVALUES] = 0x44,
+ [VOP30] = 0x48,
+ [VOP74] = 0x4c,
+ [TEMP] = 0x50,
+ [INTSTS] = 0x54,
+ [INTSTSRAW] = 0x58,
+ [INTEN] = 0x5c,
+ [CHKINT] = 0x60,
+ [CHKSHIFT] = 0x64,
+ [STATUS] = 0x68,
+ [VDESIGN30] = 0x6c,
+ [VDESIGN74] = 0x70,
+ [DVT30] = 0x74,
+ [DVT74] = 0x78,
+ [AGECOUNT] = 0x7c,
+ [SMSTATE0] = 0x80,
+ [SMSTATE1] = 0x84,
+ [CTL0] = 0x88,
+ [DESDETSEC] = 0xe0,
+ [TEMPAGESEC] = 0xe4,
+ [CTRLSPARE0] = 0xf0,
+ [CTRLSPARE1] = 0xf4,
+ [CTRLSPARE2] = 0xf8,
+ [CTRLSPARE3] = 0xfc,
+ [CORESEL] = 0x300,
+ [THERMINTST] = 0x304,
+ [INTST] = 0x308,
+ [THSTAGE0ST] = 0x30c,
+ [THSTAGE1ST] = 0x310,
+ [THSTAGE2ST] = 0x314,
+ [THAHBST0] = 0x318,
+ [THAHBST1] = 0x31c,
+ [SPARE0] = 0x320,
+ [SPARE1] = 0x324,
+ [SPARE2] = 0x328,
+ [SPARE3] = 0x32c,
+ [THSLPEVEB] = 0x330,
+};
+
+static const char * const svs_swid_names[SVSB_SWID_MAX] = {
+ "SVSB_CPU_LITTLE", "SVSB_CPU_BIG", "SVSB_CCI", "SVSB_GPU"
+};
+
+static const char * const svs_type_names[SVSB_TYPE_MAX] = {
+ "", "_LOW", "_HIGH"
+};
+
+enum svs_fusemap_dev {
+ BDEV_BDES,
+ BDEV_MDES,
+ BDEV_MTDES,
+ BDEV_DCBDET,
+ BDEV_DCMDET,
+ BDEV_MAX
+};
+
+enum svs_fusemap_glb {
+ GLB_FT_PGM,
+ GLB_VMIN,
+ GLB_MAX
+};
+
+struct svs_fusemap {
+ s8 index;
+ u8 ofst;
};
/**
@@ -317,88 +374,124 @@ static const u32 svs_regs_v2[] = {
* @base: svs platform register base
* @dev: svs platform device
* @main_clk: main clock for svs bank
- * @pbank: svs bank pointer needing to be protected by spin_lock section
* @banks: svs banks that svs platform supports
* @rst: svs platform reset control
* @efuse_max: total number of svs efuse
* @tefuse_max: total number of thermal efuse
* @regs: svs platform registers map
- * @bank_max: total number of svs banks
* @efuse: svs efuse data received from NVMEM framework
* @tefuse: thermal efuse data received from NVMEM framework
+ * @ts_coeff: thermal sensors coefficient
+ * @bank_max: total number of svs banks
*/
struct svs_platform {
void __iomem *base;
struct device *dev;
struct clk *main_clk;
- struct svs_bank *pbank;
struct svs_bank *banks;
struct reset_control *rst;
size_t efuse_max;
size_t tefuse_max;
const u32 *regs;
- u32 bank_max;
u32 *efuse;
u32 *tefuse;
+ u32 ts_coeff;
+ u16 bank_max;
};
struct svs_platform_data {
char *name;
struct svs_bank *banks;
- bool (*efuse_parsing)(struct svs_platform *svsp);
+ bool (*efuse_parsing)(struct svs_platform *svsp, const struct svs_platform_data *pdata);
int (*probe)(struct svs_platform *svsp);
+ const struct svs_fusemap *glb_fuse_map;
const u32 *regs;
- u32 bank_max;
+ u32 ts_coeff;
+ u16 bank_max;
+};
+
+/**
+ * struct svs_bank_pdata - SVS Bank immutable config parameters
+ * @dev_fuse_map: Bank fuse map data
+ * @buck_name: Regulator name
+ * @tzone_name: Thermal zone name
+ * @age_config: Bank age configuration
+ * @ctl0: TS-x selection
+ * @dc_config: Bank dc configuration
+ * @int_st: Bank interrupt identification
+ * @turn_freq_base: Reference frequency for 2-line turn point
+ * @tzone_htemp: Thermal zone high temperature threshold
+ * @tzone_ltemp: Thermal zone low temperature threshold
+ * @volt_step: Bank voltage step
+ * @volt_base: Bank voltage base
+ * @tzone_htemp_voffset: Thermal zone high temperature voltage offset
+ * @tzone_ltemp_voffset: Thermal zone low temperature voltage offset
+ * @chk_shift: Bank chicken shift
+ * @cpu_id: CPU core ID for SVS CPU bank use only
+ * @opp_count: Bank opp count
+ * @vboot: Voltage request for bank init01 only
+ * @vco: Bank VCO value
+ * @sw_id: Bank software identification
+ * @type: SVS Bank Type (1 or 2-line) and Role (high/low)
+ * @set_freq_pct: function pointer to set bank frequency percent table
+ * @get_volts: function pointer to get bank voltages
+ */
+struct svs_bank_pdata {
+ const struct svs_fusemap *dev_fuse_map;
+ char *buck_name;
+ char *tzone_name;
+ u32 age_config;
+ u32 ctl0;
+ u32 dc_config;
+ u32 int_st;
+ u32 turn_freq_base;
+ u32 tzone_htemp;
+ u32 tzone_ltemp;
+ u32 volt_step;
+ u32 volt_base;
+ u16 tzone_htemp_voffset;
+ u16 tzone_ltemp_voffset;
+ u8 chk_shift;
+ u8 cpu_id;
+ u8 opp_count;
+ u8 vboot;
+ u8 vco;
+ u8 sw_id;
+ u8 type;
+
+ /* Callbacks */
+ void (*set_freq_pct)(struct svs_platform *svsp, struct svs_bank *svsb);
+ void (*get_volts)(struct svs_platform *svsp, struct svs_bank *svsb);
};
/**
* struct svs_bank - svs bank representation
+ * @pdata: SVS Bank immutable config parameters
* @dev: bank device
* @opp_dev: device for opp table/buck control
* @init_completion: the timeout completion for bank init
* @buck: regulator used by opp_dev
* @tzd: thermal zone device for getting temperature
* @lock: mutex lock to protect voltage update process
- * @set_freq_pct: function pointer to set bank frequency percent table
- * @get_volts: function pointer to get bank voltages
* @name: bank name
- * @buck_name: regulator name
- * @tzone_name: thermal zone name
* @phase: bank current phase
* @volt_od: bank voltage overdrive
* @reg_data: bank register data in different phase for debug purpose
* @pm_runtime_enabled_count: bank pm runtime enabled count
- * @mode_support: bank mode support.
+ * @mode_support: bank mode support
* @freq_base: reference frequency for bank init
- * @turn_freq_base: refenrece frequency for 2-line turn point
- * @vboot: voltage request for bank init01 only
* @opp_dfreq: default opp frequency table
* @opp_dvolt: default opp voltage table
* @freq_pct: frequency percent table for bank init
* @volt: bank voltage table
- * @volt_step: bank voltage step
- * @volt_base: bank voltage base
* @volt_flags: bank voltage flags
* @vmax: bank voltage maximum
* @vmin: bank voltage minimum
- * @age_config: bank age configuration
* @age_voffset_in: bank age voltage offset
- * @dc_config: bank dc configuration
* @dc_voffset_in: bank dc voltage offset
* @dvt_fixed: bank dvt fixed value
- * @vco: bank VCO value
- * @chk_shift: bank chicken shift
* @core_sel: bank selection
- * @opp_count: bank opp count
- * @int_st: bank interrupt identification
- * @sw_id: bank software identification
- * @cpu_id: cpu core id for SVS CPU bank use only
- * @ctl0: TS-x selection
* @temp: bank temperature
- * @tzone_htemp: thermal zone high temperature threshold
- * @tzone_htemp_voffset: thermal zone high temperature voltage offset
- * @tzone_ltemp: thermal zone low temperature threshold
- * @tzone_ltemp_voffset: thermal zone low temperature voltage offset
* @bts: svs efuse data
* @mts: svs efuse data
* @bdes: svs efuse data
@@ -408,70 +501,48 @@ struct svs_platform_data {
* @dcmdet: svs efuse data
* @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank
* @vbin_turn_pt: voltage bin turn point helps know which svsb_volt should be overridden
- * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank
*
- * Svs bank will generate suitalbe voltages by below general math equation
+ * Svs bank will generate suitable voltages by below general math equation
* and provide these voltages to opp voltage table.
*
* opp_volt[i] = (volt[i] * volt_step) + volt_base;
*/
struct svs_bank {
+ const struct svs_bank_pdata pdata;
struct device *dev;
struct device *opp_dev;
struct completion init_completion;
struct regulator *buck;
struct thermal_zone_device *tzd;
- struct mutex lock; /* lock to protect voltage update process */
- void (*set_freq_pct)(struct svs_platform *svsp);
- void (*get_volts)(struct svs_platform *svsp);
+ struct mutex lock;
+ int pm_runtime_enabled_count;
+ short int volt_od;
char *name;
- char *buck_name;
- char *tzone_name;
enum svsb_phase phase;
- s32 volt_od;
u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX];
- u32 pm_runtime_enabled_count;
- u32 mode_support;
- u32 freq_base;
- u32 turn_freq_base;
- u32 vboot;
+ u8 mode_support;
u32 opp_dfreq[MAX_OPP_ENTRIES];
u32 opp_dvolt[MAX_OPP_ENTRIES];
u32 freq_pct[MAX_OPP_ENTRIES];
u32 volt[MAX_OPP_ENTRIES];
- u32 volt_step;
- u32 volt_base;
u32 volt_flags;
- u32 vmax;
- u32 vmin;
- u32 age_config;
- u32 age_voffset_in;
- u32 dc_config;
- u32 dc_voffset_in;
- u32 dvt_fixed;
- u32 vco;
- u32 chk_shift;
- u32 core_sel;
- u32 opp_count;
- u32 int_st;
- u32 sw_id;
- u32 cpu_id;
- u32 ctl0;
- u32 temp;
- u32 tzone_htemp;
- u32 tzone_htemp_voffset;
- u32 tzone_ltemp;
- u32 tzone_ltemp_voffset;
- u32 bts;
- u32 mts;
- u32 bdes;
- u32 mdes;
- u32 mtdes;
- u32 dcbdet;
- u32 dcmdet;
+ u32 freq_base;
u32 turn_pt;
u32 vbin_turn_pt;
- u32 type;
+ u32 core_sel;
+ u32 temp;
+ u16 age_voffset_in;
+ u16 dc_voffset_in;
+ u8 dvt_fixed;
+ u8 vmax;
+ u8 vmin;
+ u16 bts;
+ u16 mts;
+ u16 bdes;
+ u16 mdes;
+ u8 mtdes;
+ u8 dcbdet;
+ u8 dcmdet;
};
static u32 percent(u32 numerator, u32 denominator)
@@ -494,10 +565,8 @@ static void svs_writel_relaxed(struct svs_platform *svsp, u32 val,
writel_relaxed(val, svsp->base + svsp->regs[rg_i]);
}
-static void svs_switch_bank(struct svs_platform *svsp)
+static void svs_switch_bank(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
-
svs_writel_relaxed(svsp, svsb->core_sel, CORESEL);
}
@@ -515,10 +584,11 @@ static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step,
static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
{
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
struct dev_pm_opp *opp;
u32 i, opp_u_volt;
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < bdata->opp_count; i++) {
opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
svsb->opp_dfreq[i],
true);
@@ -530,8 +600,8 @@ static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
opp_u_volt = dev_pm_opp_get_voltage(opp);
svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt,
- svsb->volt_step,
- svsb->volt_base);
+ bdata->volt_step,
+ bdata->volt_base);
dev_pm_opp_put(opp);
}
@@ -541,6 +611,7 @@ static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb)
static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
{
int ret = -EPERM, tzone_temp = 0;
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop;
mutex_lock(&svsb->lock);
@@ -549,15 +620,15 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
* 2-line bank updates its corresponding opp volts.
* 1-line bank updates all opp volts.
*/
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
opp_start = 0;
opp_stop = svsb->turn_pt;
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
opp_start = svsb->turn_pt;
- opp_stop = svsb->opp_count;
+ opp_stop = bdata->opp_count;
} else {
opp_start = 0;
- opp_stop = svsb->opp_count;
+ opp_stop = bdata->opp_count;
}
/* Get thermal effect */
@@ -566,20 +637,20 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND &&
svsb->temp < SVSB_TEMP_LOWER_BOUND)) {
dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n",
- svsb->tzone_name, ret, svsb->temp);
+ bdata->tzone_name, ret, svsb->temp);
svsb->phase = SVSB_PHASE_ERROR;
}
- if (tzone_temp >= svsb->tzone_htemp)
- temp_voffset += svsb->tzone_htemp_voffset;
- else if (tzone_temp <= svsb->tzone_ltemp)
- temp_voffset += svsb->tzone_ltemp_voffset;
+ if (tzone_temp >= bdata->tzone_htemp)
+ temp_voffset += bdata->tzone_htemp_voffset;
+ else if (tzone_temp <= bdata->tzone_ltemp)
+ temp_voffset += bdata->tzone_ltemp_voffset;
/* 2-line bank update all opp volts when running mon mode */
- if (svsb->phase == SVSB_PHASE_MON && (svsb->type == SVSB_HIGH ||
- svsb->type == SVSB_LOW)) {
+ if (svsb->phase == SVSB_PHASE_MON && (bdata->type == SVSB_TYPE_HIGH ||
+ bdata->type == SVSB_TYPE_LOW)) {
opp_start = 0;
- opp_stop = svsb->opp_count;
+ opp_stop = bdata->opp_count;
}
}
@@ -596,8 +667,8 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb)
case SVSB_PHASE_MON:
svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin);
opp_volt = svs_bank_volt_to_opp_volt(svsb_volt,
- svsb->volt_step,
- svsb->volt_base);
+ bdata->volt_step,
+ bdata->volt_base);
break;
default:
dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase);
@@ -632,8 +703,7 @@ static void svs_bank_disable_and_restore_default_volts(struct svs_platform *svsp
return;
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
- svs_switch_bank(svsp);
+ svs_switch_bank(svsp, svsb);
svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
spin_unlock_irqrestore(&svs_lock, flags);
@@ -760,7 +830,7 @@ static int svs_status_debug_show(struct seq_file *m, void *v)
svsb->name, tzone_temp, svsb->vbin_turn_pt,
svsb->turn_pt);
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < svsb->pdata.opp_count; i++) {
opp = dev_pm_opp_find_freq_exact(svsb->opp_dev,
svsb->opp_dfreq[i], true);
if (IS_ERR(opp)) {
@@ -865,12 +935,12 @@ static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx)
return DIV_ROUND_UP(vx, 100);
}
-static void svs_get_bank_volts_v3(struct svs_platform *svsp)
+static void svs_get_bank_volts_v3(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt;
u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0;
- u32 middle_index = (svsb->opp_count / 2);
+ u32 middle_index = (bdata->opp_count / 2);
if (svsb->phase == SVSB_PHASE_MON &&
svsb->volt_flags & SVSB_MON_VOLT_IGNORE)
@@ -881,7 +951,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
/* Target is to set svsb->volt[] by algorithm */
if (turn_pt < middle_index) {
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
/* volt[0] ~ volt[turn_pt - 1] */
for (i = 0; i < turn_pt; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
@@ -890,12 +960,12 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0);
shift_byte++;
}
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
/* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */
- j = svsb->opp_count - 7;
+ j = bdata->opp_count - 7;
svsb->volt[turn_pt] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
shift_byte++;
- for (i = j; i < svsb->opp_count; i++) {
+ for (i = j; i < bdata->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
vop = (shift_byte < REG_BYTES) ? &vop30 :
&vop74;
@@ -912,7 +982,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
svsb->freq_pct[i]);
}
} else {
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
/* volt[0] + volt[j] ~ volt[turn_pt - 1] */
j = turn_pt - 7;
svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
@@ -932,9 +1002,9 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
svsb->volt[0],
svsb->volt[j],
svsb->freq_pct[i]);
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
/* volt[turn_pt] ~ volt[opp_count - 1] */
- for (i = turn_pt; i < svsb->opp_count; i++) {
+ for (i = turn_pt; i < bdata->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
vop = (shift_byte < REG_BYTES) ? &vop30 :
&vop74;
@@ -944,12 +1014,12 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
}
}
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
opp_start = 0;
opp_stop = svsb->turn_pt;
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
opp_start = svsb->turn_pt;
- opp_stop = svsb->opp_count;
+ opp_stop = bdata->opp_count;
}
for (i = opp_start; i < opp_stop; i++)
@@ -959,11 +1029,11 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
/* For voltage bin support */
if (svsb->opp_dfreq[0] > svsb->freq_base) {
svsb->volt[0] = svs_opp_volt_to_bank_volt(svsb->opp_dvolt[0],
- svsb->volt_step,
- svsb->volt_base);
+ bdata->volt_step,
+ bdata->volt_base);
/* Find voltage bin turn point */
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < bdata->opp_count; i++) {
if (svsb->opp_dfreq[i] <= svsb->freq_base) {
svsb->vbin_turn_pt = i;
break;
@@ -980,15 +1050,15 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
}
}
-static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
+static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0;
u32 b_sft, shift_byte = 0, turn_pt;
- u32 middle_index = (svsb->opp_count / 2);
+ u32 middle_index = (bdata->opp_count / 2);
- for (i = 0; i < svsb->opp_count; i++) {
- if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) {
+ for (i = 0; i < bdata->opp_count; i++) {
+ if (svsb->opp_dfreq[i] <= bdata->turn_freq_base) {
svsb->turn_pt = i;
break;
}
@@ -998,11 +1068,11 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
/* Target is to fill out freq_pct74 / freq_pct30 by algorithm */
if (turn_pt < middle_index) {
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
/*
* If we don't handle this situation,
- * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0"
- * and this leads SVSB_LOW to work abnormally.
+ * SVSB_TYPE_HIGH's FREQPCT74 / FREQPCT30 would keep "0"
+ * and this leads SVSB_TYPE_LOW to work abnormally.
*/
if (turn_pt == 0)
freq_pct30 = svsb->freq_pct[0];
@@ -1015,15 +1085,15 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
*freq_pct |= (svsb->freq_pct[i] << b_sft);
shift_byte++;
}
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
/*
* freq_pct[turn_pt] +
* freq_pct[opp_count - 7] ~ freq_pct[opp_count -1]
*/
freq_pct30 = svsb->freq_pct[turn_pt];
shift_byte++;
- j = svsb->opp_count - 7;
- for (i = j; i < svsb->opp_count; i++) {
+ j = bdata->opp_count - 7;
+ for (i = j; i < bdata->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
freq_pct = (shift_byte < REG_BYTES) ?
&freq_pct30 : &freq_pct74;
@@ -1032,7 +1102,7 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
}
}
} else {
- if (svsb->type == SVSB_HIGH) {
+ if (bdata->type == SVSB_TYPE_HIGH) {
/*
* freq_pct[0] +
* freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1]
@@ -1047,9 +1117,9 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
*freq_pct |= (svsb->freq_pct[i] << b_sft);
shift_byte++;
}
- } else if (svsb->type == SVSB_LOW) {
+ } else if (bdata->type == SVSB_TYPE_LOW) {
/* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */
- for (i = turn_pt; i < svsb->opp_count; i++) {
+ for (i = turn_pt; i < bdata->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
freq_pct = (shift_byte < REG_BYTES) ?
&freq_pct30 : &freq_pct74;
@@ -1063,9 +1133,9 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp)
svs_writel_relaxed(svsp, freq_pct30, FREQPCT30);
}
-static void svs_get_bank_volts_v2(struct svs_platform *svsp)
+static void svs_get_bank_volts_v2(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 temp, i;
temp = svs_readl_relaxed(svsp, VOP74);
@@ -1093,17 +1163,17 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp)
svsb->volt[14],
svsb->freq_pct[15]);
- for (i = 0; i < svsb->opp_count; i++)
+ for (i = 0; i < bdata->opp_count; i++)
svsb->volt[i] += svsb->volt_od;
/* For voltage bin support */
if (svsb->opp_dfreq[0] > svsb->freq_base) {
svsb->volt[0] = svs_opp_volt_to_bank_volt(svsb->opp_dvolt[0],
- svsb->volt_step,
- svsb->volt_base);
+ bdata->volt_step,
+ bdata->volt_base);
/* Find voltage bin turn point */
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < bdata->opp_count; i++) {
if (svsb->opp_dfreq[i] <= svsb->freq_base) {
svsb->vbin_turn_pt = i;
break;
@@ -1120,9 +1190,8 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp)
}
}
-static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
+static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp, struct svs_bank *svsb)
{
- struct svs_bank *svsb = svsp->pbank;
u32 freqpct74_val, freqpct30_val;
freqpct74_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[8]) |
@@ -1140,18 +1209,20 @@ static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
}
static void svs_set_bank_phase(struct svs_platform *svsp,
+ unsigned int bank_idx,
enum svsb_phase target_phase)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs;
- svs_switch_bank(svsp);
+ svs_switch_bank(svsp, svsb);
des_char = FIELD_PREP(SVSB_DESCHAR_FLD_BDES, svsb->bdes) |
FIELD_PREP(SVSB_DESCHAR_FLD_MDES, svsb->mdes);
svs_writel_relaxed(svsp, des_char, DESCHAR);
- temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, svsb->vco) |
+ temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, bdata->vco) |
FIELD_PREP(SVSB_TEMPCHAR_FLD_MTDES, svsb->mtdes) |
FIELD_PREP(SVSB_TEMPCHAR_FLD_DVT_FIXED, svsb->dvt_fixed);
svs_writel_relaxed(svsp, temp_char, TEMPCHAR);
@@ -1160,11 +1231,11 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
FIELD_PREP(SVSB_DETCHAR_FLD_DCMDET, svsb->dcmdet);
svs_writel_relaxed(svsp, det_char, DETCHAR);
- svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG);
- svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG);
+ svs_writel_relaxed(svsp, bdata->dc_config, DCCONFIG);
+ svs_writel_relaxed(svsp, bdata->age_config, AGECONFIG);
svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG);
- svsb->set_freq_pct(svsp);
+ bdata->set_freq_pct(svsp, svsb);
limit_vals = FIELD_PREP(SVSB_LIMITVALS_FLD_DTLO, SVSB_VAL_DTLO) |
FIELD_PREP(SVSB_LIMITVALS_FLD_DTHI, SVSB_VAL_DTHI) |
@@ -1174,13 +1245,13 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW);
svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG);
- svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT);
- svs_writel_relaxed(svsp, svsb->ctl0, CTL0);
+ svs_writel_relaxed(svsp, bdata->chk_shift, CHKSHIFT);
+ svs_writel_relaxed(svsp, bdata->ctl0, CTL0);
svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
switch (target_phase) {
case SVSB_PHASE_INIT01:
- svs_writel_relaxed(svsp, svsb->vboot, VBOOT);
+ svs_writel_relaxed(svsp, bdata->vboot, VBOOT);
svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
svs_writel_relaxed(svsp, SVSB_PTPEN_INIT01, SVSEN);
break;
@@ -1206,18 +1277,20 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
}
static inline void svs_save_bank_register_data(struct svs_platform *svsp,
+ unsigned short bank_idx,
enum svsb_phase phase)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
enum svs_reg_index rg_i;
for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++)
svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i);
}
-static inline void svs_error_isr_handler(struct svs_platform *svsp)
+static inline void svs_error_isr_handler(struct svs_platform *svsp,
+ unsigned short bank_idx)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n",
__func__, svs_readl_relaxed(svsp, CORESEL));
@@ -1229,27 +1302,29 @@ static inline void svs_error_isr_handler(struct svs_platform *svsp)
svs_readl_relaxed(svsp, SMSTATE1));
dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP));
- svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR);
+ svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_ERROR);
svsb->phase = SVSB_PHASE_ERROR;
svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
}
-static inline void svs_init01_isr_handler(struct svs_platform *svsp)
+static inline void svs_init01_isr_handler(struct svs_platform *svsp,
+ unsigned short bank_idx)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
+ u32 val;
dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n",
__func__, svs_readl_relaxed(svsp, VDESIGN74),
svs_readl_relaxed(svsp, VDESIGN30),
svs_readl_relaxed(svsp, DCVALUES));
- svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01);
+ svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_INIT01);
svsb->phase = SVSB_PHASE_INIT01;
- svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) &
- GENMASK(15, 0)) + 1;
+ val = ~(svs_readl_relaxed(svsp, DCVALUES) & GENMASK(15, 0)) + 1;
+ svsb->dc_voffset_in = val & GENMASK(15, 0);
if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE ||
(svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT &&
svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY))
@@ -1263,32 +1338,36 @@ static inline void svs_init01_isr_handler(struct svs_platform *svsp)
svsb->core_sel &= ~SVSB_DET_CLK_EN;
}
-static inline void svs_init02_isr_handler(struct svs_platform *svsp)
+static inline void svs_init02_isr_handler(struct svs_platform *svsp,
+ unsigned short bank_idx)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n",
__func__, svs_readl_relaxed(svsp, VOP74),
svs_readl_relaxed(svsp, VOP30),
svs_readl_relaxed(svsp, DCVALUES));
- svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02);
+ svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_INIT02);
svsb->phase = SVSB_PHASE_INIT02;
- svsb->get_volts(svsp);
+ bdata->get_volts(svsp, svsb);
svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS);
}
-static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
+static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp,
+ unsigned short bank_idx)
{
- struct svs_bank *svsb = svsp->pbank;
+ struct svs_bank *svsb = &svsp->banks[bank_idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
- svs_save_bank_register_data(svsp, SVSB_PHASE_MON);
+ svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_MON);
svsb->phase = SVSB_PHASE_MON;
- svsb->get_volts(svsp);
+ bdata->get_volts(svsp, svsb);
svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0);
svs_writel_relaxed(svsp, SVSB_INTSTS_FLD_MONVOP, INTSTS);
@@ -1297,37 +1376,38 @@ static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
static irqreturn_t svs_isr(int irq, void *data)
{
struct svs_platform *svsp = data;
+ const struct svs_bank_pdata *bdata;
struct svs_bank *svsb = NULL;
unsigned long flags;
u32 idx, int_sts, svs_en;
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name);
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
/* Find out which svs bank fires interrupt */
- if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) {
+ if (bdata->int_st & svs_readl_relaxed(svsp, INTST)) {
spin_unlock_irqrestore(&svs_lock, flags);
continue;
}
- svs_switch_bank(svsp);
+ svs_switch_bank(svsp, svsb);
int_sts = svs_readl_relaxed(svsp, INTSTS);
svs_en = svs_readl_relaxed(svsp, SVSEN);
if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
svs_en == SVSB_PTPEN_INIT01)
- svs_init01_isr_handler(svsp);
+ svs_init01_isr_handler(svsp, idx);
else if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
svs_en == SVSB_PTPEN_INIT02)
- svs_init02_isr_handler(svsp);
+ svs_init02_isr_handler(svsp, idx);
else if (int_sts & SVSB_INTSTS_FLD_MONVOP)
- svs_mon_mode_isr_handler(svsp);
+ svs_mon_mode_isr_handler(svsp, idx);
else
- svs_error_isr_handler(svsp);
+ svs_error_isr_handler(svsp, idx);
spin_unlock_irqrestore(&svs_lock, flags);
break;
@@ -1342,20 +1422,35 @@ static irqreturn_t svs_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static bool svs_mode_available(struct svs_platform *svsp, u8 mode)
+{
+ int i;
+
+ for (i = 0; i < svsp->bank_max; i++)
+ if (svsp->banks[i].mode_support & mode)
+ return true;
+ return false;
+}
+
static int svs_init01(struct svs_platform *svsp)
{
+ const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
unsigned long flags, time_left;
bool search_done;
int ret = 0, r;
u32 opp_freq, opp_vboot, buck_volt, idx, i;
+ if (!svs_mode_available(svsp, SVSB_MODE_INIT01))
+ return 0;
+
/* Keep CPUs' core power on for svs_init01 initialization */
cpuidle_pause_and_lock();
/* Svs bank init01 preparation - power enable */
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT01))
continue;
@@ -1363,7 +1458,7 @@ static int svs_init01(struct svs_platform *svsp)
ret = regulator_enable(svsb->buck);
if (ret) {
dev_err(svsb->dev, "%s enable fail: %d\n",
- svsb->buck_name, ret);
+ bdata->buck_name, ret);
goto svs_init01_resume_cpuidle;
}
@@ -1393,6 +1488,7 @@ static int svs_init01(struct svs_platform *svsp)
*/
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT01))
continue;
@@ -1402,11 +1498,11 @@ static int svs_init01(struct svs_platform *svsp)
* fix to that freq until svs_init01 is done.
*/
search_done = false;
- opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
- svsb->volt_step,
- svsb->volt_base);
+ opp_vboot = svs_bank_volt_to_opp_volt(bdata->vboot,
+ bdata->volt_step,
+ bdata->volt_base);
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < bdata->opp_count; i++) {
opp_freq = svsb->opp_dfreq[i];
if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) {
ret = dev_pm_opp_adjust_voltage(svsb->opp_dev,
@@ -1438,13 +1534,14 @@ static int svs_init01(struct svs_platform *svsp)
/* Svs bank init01 begins */
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT01))
continue;
- opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot,
- svsb->volt_step,
- svsb->volt_base);
+ opp_vboot = svs_bank_volt_to_opp_volt(bdata->vboot,
+ bdata->volt_step,
+ bdata->volt_base);
buck_volt = regulator_get_voltage(svsb->buck);
if (buck_volt != opp_vboot) {
@@ -1456,8 +1553,7 @@ static int svs_init01(struct svs_platform *svsp)
}
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
- svs_set_bank_phase(svsp, SVSB_PHASE_INIT01);
+ svs_set_bank_phase(svsp, idx, SVSB_PHASE_INIT01);
spin_unlock_irqrestore(&svs_lock, flags);
time_left = wait_for_completion_timeout(&svsb->init_completion,
@@ -1472,11 +1568,12 @@ static int svs_init01(struct svs_platform *svsp)
svs_init01_finish:
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT01))
continue;
- for (i = 0; i < svsb->opp_count; i++) {
+ for (i = 0; i < bdata->opp_count; i++) {
r = dev_pm_opp_enable(svsb->opp_dev,
svsb->opp_dfreq[i]);
if (r)
@@ -1502,7 +1599,7 @@ svs_init01_finish:
r = regulator_disable(svsb->buck);
if (r)
dev_err(svsb->dev, "%s disable fail: %d\n",
- svsb->buck_name, r);
+ bdata->buck_name, r);
}
svs_init01_resume_cpuidle:
@@ -1513,11 +1610,15 @@ svs_init01_resume_cpuidle:
static int svs_init02(struct svs_platform *svsp)
{
+ const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
unsigned long flags, time_left;
int ret;
u32 idx;
+ if (!svs_mode_available(svsp, SVSB_MODE_INIT02))
+ return 0;
+
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
@@ -1526,8 +1627,7 @@ static int svs_init02(struct svs_platform *svsp)
reinit_completion(&svsb->init_completion);
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
- svs_set_bank_phase(svsp, SVSB_PHASE_INIT02);
+ svs_set_bank_phase(svsp, idx, SVSB_PHASE_INIT02);
spin_unlock_irqrestore(&svs_lock, flags);
time_left = wait_for_completion_timeout(&svsb->init_completion,
@@ -1546,11 +1646,12 @@ static int svs_init02(struct svs_platform *svsp)
*/
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
if (!(svsb->mode_support & SVSB_MODE_INIT02))
continue;
- if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ if (bdata->type == SVSB_TYPE_HIGH || bdata->type == SVSB_TYPE_LOW) {
if (svs_sync_bank_volts_from_opp(svsb)) {
dev_err(svsb->dev, "sync volt fail\n");
ret = -EPERM;
@@ -1583,8 +1684,7 @@ static void svs_mon_mode(struct svs_platform *svsp)
continue;
spin_lock_irqsave(&svs_lock, flags);
- svsp->pbank = svsb;
- svs_set_bank_phase(svsp, SVSB_PHASE_MON);
+ svs_set_bank_phase(svsp, idx, SVSB_PHASE_MON);
spin_unlock_irqrestore(&svs_lock, flags);
}
}
@@ -1609,12 +1709,12 @@ static int svs_start(struct svs_platform *svsp)
static int svs_suspend(struct device *dev)
{
struct svs_platform *svsp = dev_get_drvdata(dev);
- struct svs_bank *svsb;
int ret;
u32 idx;
for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
+ struct svs_bank *svsb = &svsp->banks[idx];
+
svs_bank_disable_and_restore_default_volts(svsp, svsb);
}
@@ -1665,8 +1765,10 @@ out_of_resume:
static int svs_bank_resource_setup(struct svs_platform *svsp)
{
+ const struct svs_bank_pdata *bdata;
struct svs_bank *svsb;
struct dev_pm_opp *opp;
+ char tz_name_buf[20];
unsigned long freq;
int count, ret;
u32 idx, i;
@@ -1675,35 +1777,23 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
- switch (svsb->sw_id) {
- case SVSB_CPU_LITTLE:
- svsb->name = "SVSB_CPU_LITTLE";
- break;
- case SVSB_CPU_BIG:
- svsb->name = "SVSB_CPU_BIG";
- break;
- case SVSB_CCI:
- svsb->name = "SVSB_CCI";
- break;
- case SVSB_GPU:
- if (svsb->type == SVSB_HIGH)
- svsb->name = "SVSB_GPU_HIGH";
- else if (svsb->type == SVSB_LOW)
- svsb->name = "SVSB_GPU_LOW";
- else
- svsb->name = "SVSB_GPU";
- break;
- default:
- dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ if (bdata->sw_id >= SVSB_SWID_MAX || bdata->type >= SVSB_TYPE_MAX) {
+ dev_err(svsb->dev, "unknown bank sw_id or type\n");
return -EINVAL;
}
- svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev),
- GFP_KERNEL);
+ svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev), GFP_KERNEL);
if (!svsb->dev)
return -ENOMEM;
+ svsb->name = devm_kasprintf(svsp->dev, GFP_KERNEL, "%s%s",
+ svs_swid_names[bdata->sw_id],
+ svs_type_names[bdata->type]);
+ if (!svsb->name)
+ return -ENOMEM;
+
ret = dev_set_name(svsb->dev, "%s", svsb->name);
if (ret)
return ret;
@@ -1721,32 +1811,34 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
if (svsb->mode_support & SVSB_MODE_INIT01) {
svsb->buck = devm_regulator_get_optional(svsb->opp_dev,
- svsb->buck_name);
+ bdata->buck_name);
if (IS_ERR(svsb->buck)) {
dev_err(svsb->dev, "cannot get \"%s-supply\"\n",
- svsb->buck_name);
+ bdata->buck_name);
return PTR_ERR(svsb->buck);
}
}
- if (!IS_ERR_OR_NULL(svsb->tzone_name)) {
- svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name);
+ if (!IS_ERR_OR_NULL(bdata->tzone_name)) {
+ snprintf(tz_name_buf, ARRAY_SIZE(tz_name_buf),
+ "%s-thermal", bdata->tzone_name);
+ svsb->tzd = thermal_zone_get_zone_by_name(tz_name_buf);
if (IS_ERR(svsb->tzd)) {
dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n",
- svsb->tzone_name);
+ tz_name_buf);
return PTR_ERR(svsb->tzd);
}
}
count = dev_pm_opp_get_opp_count(svsb->opp_dev);
- if (svsb->opp_count != count) {
+ if (bdata->opp_count != count) {
dev_err(svsb->dev,
"opp_count not \"%u\" but get \"%d\"?\n",
- svsb->opp_count, count);
+ bdata->opp_count, count);
return count;
}
- for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) {
+ for (i = 0, freq = ULONG_MAX; i < bdata->opp_count; i++, freq--) {
opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq);
if (IS_ERR(opp)) {
dev_err(svsb->dev, "cannot find freq = %ld\n",
@@ -1780,8 +1872,6 @@ static int svs_get_efuse_data(struct svs_platform *svsp,
*svsp_efuse = nvmem_cell_read(cell, svsp_efuse_max);
if (IS_ERR(*svsp_efuse)) {
- dev_err(svsp->dev, "cannot read \"%s\" efuse: %ld\n",
- nvmem_cell_name, PTR_ERR(*svsp_efuse));
nvmem_cell_put(cell);
return PTR_ERR(*svsp_efuse);
}
@@ -1792,139 +1882,91 @@ static int svs_get_efuse_data(struct svs_platform *svsp,
return 0;
}
-static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
+static u32 svs_get_fuse_val(u32 *fuse_array, const struct svs_fusemap *fmap, u8 nbits)
{
- struct svs_bank *svsb;
- u32 idx, i, vmin, golden_temp;
- int ret;
-
- for (i = 0; i < svsp->efuse_max; i++)
- if (svsp->efuse[i])
- dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
- i, svsp->efuse[i]);
-
- if (!svsp->efuse[9]) {
- dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n");
- return false;
- }
-
- /* Svs efuse parsing */
- vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0);
-
- for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
+ u32 val;
- if (vmin == 0x1)
- svsb->vmin = 0x1e;
-
- if (svsb->type == SVSB_LOW) {
- svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0);
- svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0);
- } else if (svsb->type == SVSB_HIGH) {
- svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0);
- svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0);
- }
+ if (fmap->index < 0)
+ return FUSE_DATA_NOT_VALID;
- svsb->vmax += svsb->dvt_fixed;
- }
+ val = fuse_array[fmap->index] >> fmap->ofst;
+ val &= GENMASK(nbits - 1, 0);
- ret = svs_get_efuse_data(svsp, "t-calibration-data",
- &svsp->tefuse, &svsp->tefuse_max);
- if (ret)
- return false;
+ return val;
+}
- for (i = 0; i < svsp->tefuse_max; i++)
- if (svsp->tefuse[i] != 0)
- break;
+static bool svs_is_available(struct svs_platform *svsp)
+{
+ int i, num_populated = 0;
- if (i == svsp->tefuse_max)
- golden_temp = 50; /* All thermal efuse data are 0 */
- else
- golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0);
+ /* If at least two fuse arrays are populated, SVS is calibrated */
+ for (i = 0; i < svsp->efuse_max; i++) {
+ if (svsp->efuse[i])
+ num_populated++;
- for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
- svsb->mts = 500;
- svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4;
+ if (num_populated > 1)
+ return true;
}
- return true;
+ return false;
}
-static bool svs_mt8188_efuse_parsing(struct svs_platform *svsp)
+static bool svs_common_parse_efuse(struct svs_platform *svsp,
+ const struct svs_platform_data *pdata)
{
- struct svs_bank *svsb;
- u32 idx, i, golden_temp;
- int ret;
-
- for (i = 0; i < svsp->efuse_max; i++)
- if (svsp->efuse[i])
- dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n",
- i, svsp->efuse[i]);
+ const struct svs_fusemap *gfmap = pdata->glb_fuse_map;
+ struct svs_fusemap tfm = { 0, 24 };
+ u32 golden_temp, val;
+ u8 ft_pgm, vmin;
+ int i;
- if (!svsp->efuse[5]) {
- dev_notice(svsp->dev, "svs_efuse[5] = 0x0?\n");
+ if (!svs_is_available(svsp))
return false;
- }
- /* Svs efuse parsing */
- for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
+ /* Get golden temperature from SVS-Thermal calibration */
+ val = svs_get_fuse_val(svsp->tefuse, &tfm, 8);
- if (svsb->type == SVSB_LOW) {
- svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0);
- svsb->bdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[5] >> 24) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[15] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[15] >> 24) & GENMASK(7, 0);
- } else if (svsb->type == SVSB_HIGH) {
- svsb->mtdes = svsp->efuse[4] & GENMASK(7, 0);
- svsb->bdes = (svsp->efuse[4] >> 16) & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[4] >> 24) & GENMASK(7, 0);
- svsb->dcbdet = svsp->efuse[14] & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[14] >> 8) & GENMASK(7, 0);
- }
+ /* If golden temp is not programmed, use the default of 50 */
+ golden_temp = val ? val : 50;
- svsb->vmax += svsb->dvt_fixed;
- }
+ /* Parse fused SVS calibration */
+ ft_pgm = svs_get_fuse_val(svsp->efuse, &gfmap[GLB_FT_PGM], 8);
+ vmin = svs_get_fuse_val(svsp->efuse, &gfmap[GLB_VMIN], 2);
- ret = svs_get_efuse_data(svsp, "t-calibration-data",
- &svsp->tefuse, &svsp->tefuse_max);
- if (ret)
- return false;
+ for (i = 0; i < svsp->bank_max; i++) {
+ struct svs_bank *svsb = &svsp->banks[i];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
+ const struct svs_fusemap *dfmap = bdata->dev_fuse_map;
- for (i = 0; i < svsp->tefuse_max; i++)
- if (svsp->tefuse[i] != 0)
- break;
+ if (vmin == 1)
+ svsb->vmin = 0x1e;
- if (i == svsp->tefuse_max)
- golden_temp = 50; /* All thermal efuse data are 0 */
- else
- golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0);
+ if (ft_pgm == 0)
+ svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE;
- for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
- svsb->mts = 500;
- svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4;
+ svsb->mtdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MTDES], 8);
+ svsb->bdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_BDES], 8);
+ svsb->mdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MDES], 8);
+ svsb->dcbdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCBDET], 8);
+ svsb->dcmdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCMDET], 8);
+ svsb->vmax += svsb->dvt_fixed;
+
+ svsb->mts = (svsp->ts_coeff * 2) / 1000;
+ svsb->bts = (((500 * golden_temp + svsp->ts_coeff) / 1000) - 25) * 4;
}
return true;
}
-static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
+static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp,
+ const struct svs_platform_data *pdata)
{
struct svs_bank *svsb;
+ const struct svs_bank_pdata *bdata;
int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0;
int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t;
int o_slope, o_slope_sign, ts_id;
u32 idx, i, ft_pgm, mts, temp0, temp1, temp2;
- int ret;
for (i = 0; i < svsp->efuse_max; i++)
if (svsp->efuse[i])
@@ -1937,74 +1979,48 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
}
/* Svs efuse parsing */
- ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0);
+ ft_pgm = svs_get_fuse_val(svsp->efuse, &pdata->glb_fuse_map[GLB_FT_PGM], 4);
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
+ const struct svs_fusemap *dfmap = bdata->dev_fuse_map;
if (ft_pgm <= 1)
svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE;
- switch (svsb->sw_id) {
- case SVSB_CPU_LITTLE:
- svsb->bdes = svsp->efuse[16] & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0);
- svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0);
+ svsb->mtdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MTDES], 8);
+ svsb->bdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_BDES], 8);
+ svsb->mdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MDES], 8);
+ svsb->dcbdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCBDET], 8);
+ svsb->dcmdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCMDET], 8);
+ switch (bdata->sw_id) {
+ case SVSB_SWID_CPU_LITTLE:
+ case SVSB_SWID_CCI:
if (ft_pgm <= 3)
svsb->volt_od += 10;
else
svsb->volt_od += 2;
break;
- case SVSB_CPU_BIG:
- svsb->bdes = svsp->efuse[18] & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0);
- svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0);
-
+ case SVSB_SWID_CPU_BIG:
if (ft_pgm <= 3)
svsb->volt_od += 15;
else
svsb->volt_od += 12;
break;
- case SVSB_CCI:
- svsb->bdes = svsp->efuse[4] & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0);
- svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0);
-
- if (ft_pgm <= 3)
- svsb->volt_od += 10;
- else
- svsb->volt_od += 2;
- break;
- case SVSB_GPU:
- svsb->bdes = svsp->efuse[6] & GENMASK(7, 0);
- svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0);
- svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0);
- svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0);
- svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0);
-
- if (ft_pgm >= 2) {
+ case SVSB_SWID_GPU:
+ if (ft_pgm != FUSE_DATA_NOT_VALID && ft_pgm >= 2) {
svsb->freq_base = 800000000; /* 800MHz */
svsb->dvt_fixed = 2;
}
break;
default:
- dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id);
return false;
}
}
- ret = svs_get_efuse_data(svsp, "t-calibration-data",
- &svsp->tefuse, &svsp->tefuse_max);
- if (ret)
- return false;
-
/* Thermal efuse parsing */
adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0);
adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0);
@@ -2064,23 +2080,24 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
for (idx = 0; idx < svsp->bank_max; idx++) {
svsb = &svsp->banks[idx];
+ bdata = &svsb->pdata;
svsb->mts = mts;
- switch (svsb->sw_id) {
- case SVSB_CPU_LITTLE:
+ switch (bdata->sw_id) {
+ case SVSB_SWID_CPU_LITTLE:
tb_roomt = x_roomt[3];
break;
- case SVSB_CPU_BIG:
+ case SVSB_SWID_CPU_BIG:
tb_roomt = x_roomt[4];
break;
- case SVSB_CCI:
+ case SVSB_SWID_CCI:
tb_roomt = x_roomt[3];
break;
- case SVSB_GPU:
+ case SVSB_SWID_GPU:
tb_roomt = x_roomt[1];
break;
default:
- dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id);
goto remove_mt8183_svsb_mon_mode;
}
@@ -2153,7 +2170,6 @@ static struct device *svs_add_device_link(struct svs_platform *svsp,
static int svs_mt8192_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
- struct svs_bank *svsb;
u32 idx;
svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
@@ -2161,18 +2177,33 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst),
"cannot get svs reset control\n");
- dev = svs_add_device_link(svsp, "lvts");
+ dev = svs_add_device_link(svsp, "thermal-sensor");
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get lvts device\n");
for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
+ struct svs_bank *svsb = &svsp->banks[idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
- if (svsb->type == SVSB_HIGH)
- svsb->opp_dev = svs_add_device_link(svsp, "gpu");
- else if (svsb->type == SVSB_LOW)
- svsb->opp_dev = svs_get_subsys_device(svsp, "gpu");
+ switch (bdata->sw_id) {
+ case SVSB_SWID_CPU_LITTLE:
+ case SVSB_SWID_CPU_BIG:
+ svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ break;
+ case SVSB_SWID_CCI:
+ svsb->opp_dev = svs_add_device_link(svsp, "cci");
+ break;
+ case SVSB_SWID_GPU:
+ if (bdata->type == SVSB_TYPE_LOW)
+ svsb->opp_dev = svs_get_subsys_device(svsp, "gpu");
+ else
+ svsb->opp_dev = svs_add_device_link(svsp, "gpu");
+ break;
+ default:
+ dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id);
+ return -EINVAL;
+ }
if (IS_ERR(svsb->opp_dev))
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
@@ -2186,30 +2217,30 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
static int svs_mt8183_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
- struct svs_bank *svsb;
u32 idx;
- dev = svs_add_device_link(svsp, "thermal");
+ dev = svs_add_device_link(svsp, "thermal-sensor");
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get thermal device\n");
for (idx = 0; idx < svsp->bank_max; idx++) {
- svsb = &svsp->banks[idx];
+ struct svs_bank *svsb = &svsp->banks[idx];
+ const struct svs_bank_pdata *bdata = &svsb->pdata;
- switch (svsb->sw_id) {
- case SVSB_CPU_LITTLE:
- case SVSB_CPU_BIG:
- svsb->opp_dev = get_cpu_device(svsb->cpu_id);
+ switch (bdata->sw_id) {
+ case SVSB_SWID_CPU_LITTLE:
+ case SVSB_SWID_CPU_BIG:
+ svsb->opp_dev = get_cpu_device(bdata->cpu_id);
break;
- case SVSB_CCI:
+ case SVSB_SWID_CCI:
svsb->opp_dev = svs_add_device_link(svsp, "cci");
break;
- case SVSB_GPU:
+ case SVSB_SWID_GPU:
svsb->opp_dev = svs_add_device_link(svsp, "gpu");
break;
default:
- dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id);
+ dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id);
return -EINVAL;
}
@@ -2222,241 +2253,544 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
return 0;
}
+static struct svs_bank svs_mt8195_banks[] = {
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 640000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 10, 16 }, { 10, 24 }, { 10, 0 }, { 8, 0 }, { 8, 8 }
+ }
+ },
+ .mode_support = SVSB_MODE_INIT02,
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .freq_base = 640000000,
+ .core_sel = 0x0fff0100,
+ .dvt_fixed = 0x1,
+ .vmax = 0x38,
+ .vmin = 0x14,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 640000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 9, 16 }, { 9, 24 }, { 9, 0 }, { 8, 0 }, { 8, 8 }
+ },
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 880000000,
+ .core_sel = 0x0fff0101,
+ .dvt_fixed = 0x6,
+ .vmax = 0x38,
+ .vmin = 0x14,
+ },
+};
+
static struct svs_bank svs_mt8192_banks[] = {
{
- .sw_id = SVSB_GPU,
- .type = SVSB_LOW,
- .set_freq_pct = svs_set_bank_freq_pct_v3,
- .get_volts = svs_get_bank_volts_v3,
- .tzone_name = "gpu1",
- .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
- .mode_support = SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 688000000,
- .turn_freq_base = 688000000,
- .volt_step = 6250,
- .volt_base = 400000,
- .vmax = 0x60,
- .vmin = 0x1a,
- .age_config = 0x555555,
- .dc_config = 0x1,
- .dvt_fixed = 0x1,
- .vco = 0x18,
- .chk_shift = 0x87,
- .core_sel = 0x0fff0100,
- .int_st = BIT(0),
- .ctl0 = 0x00540003,
- .tzone_htemp = 85000,
- .tzone_htemp_voffset = 0,
- .tzone_ltemp = 25000,
- .tzone_ltemp_voffset = 7,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 10, 16 }, { 10, 24 }, { 10, 0 }, { 17, 0 }, { 17, 8 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .mode_support = SVSB_MODE_INIT02,
+ .freq_base = 688000000,
+ .core_sel = 0x0fff0100,
+ .dvt_fixed = 0x1,
+ .vmax = 0x60,
+ .vmin = 0x1a,
},
{
- .sw_id = SVSB_GPU,
- .type = SVSB_HIGH,
- .set_freq_pct = svs_set_bank_freq_pct_v3,
- .get_volts = svs_get_bank_volts_v3,
- .tzone_name = "gpu1",
- .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT |
- SVSB_MON_VOLT_IGNORE,
- .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 902000000,
- .turn_freq_base = 688000000,
- .volt_step = 6250,
- .volt_base = 400000,
- .vmax = 0x60,
- .vmin = 0x1a,
- .age_config = 0x555555,
- .dc_config = 0x1,
- .dvt_fixed = 0x6,
- .vco = 0x18,
- .chk_shift = 0x87,
- .core_sel = 0x0fff0101,
- .int_st = BIT(1),
- .ctl0 = 0x00540003,
- .tzone_htemp = 85000,
- .tzone_htemp_voffset = 0,
- .tzone_ltemp = 25000,
- .tzone_ltemp_voffset = 7,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 688000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x18,
+ .chk_shift = 0x87,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 9, 16 }, { 9, 24 }, { 17, 0 }, { 17, 16 }, { 17, 24 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 902000000,
+ .core_sel = 0x0fff0101,
+ .dvt_fixed = 0x6,
+ .vmax = 0x60,
+ .vmin = 0x1a,
},
};
static struct svs_bank svs_mt8188_banks[] = {
{
- .sw_id = SVSB_GPU,
- .type = SVSB_LOW,
- .set_freq_pct = svs_set_bank_freq_pct_v3,
- .get_volts = svs_get_bank_volts_v3,
- .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
- .mode_support = SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 640000000,
- .turn_freq_base = 640000000,
- .volt_step = 6250,
- .volt_base = 400000,
- .vmax = 0x38,
- .vmin = 0x1c,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x1,
- .vco = 0x10,
- .chk_shift = 0x87,
- .core_sel = 0x0fff0000,
- .int_st = BIT(0),
- .ctl0 = 0x00100003,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 640000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(0),
+ .ctl0 = 0x00100003,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 5, 16 }, { 5, 24 }, { 5, 0 }, { 15, 16 }, { 15, 24 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .mode_support = SVSB_MODE_INIT02,
+ .freq_base = 640000000,
+ .core_sel = 0x0fff0000,
+ .dvt_fixed = 0x1,
+ .vmax = 0x38,
+ .vmin = 0x1c,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .type = SVSB_TYPE_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 640000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(1),
+ .ctl0 = 0x00100003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 4, 16 }, { 4, 24 }, { 4, 0 }, { 14, 0 }, { 14, 8 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 880000000,
+ .core_sel = 0x0fff0001,
+ .dvt_fixed = 0x4,
+ .vmax = 0x38,
+ .vmin = 0x1c,
+ },
+};
+
+static struct svs_bank svs_mt8186_banks[] = {
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_BIG,
+ .type = SVSB_TYPE_LOW,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .cpu_id = 6,
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 1670000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x1,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(0),
+ .ctl0 = 0x00540003,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 3, 16 }, { 3, 24 }, { 3, 0 }, { 14, 16 }, { 14, 24 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT,
+ .volt_od = 4,
+ .mode_support = SVSB_MODE_INIT02,
+ .freq_base = 1670000000,
+ .core_sel = 0x0fff0100,
+ .dvt_fixed = 0x3,
+ .vmax = 0x59,
+ .vmin = 0x20,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_BIG,
+ .type = SVSB_TYPE_HIGH,
+ .set_freq_pct = svs_set_bank_freq_pct_v3,
+ .get_volts = svs_get_bank_volts_v3,
+ .cpu_id = 6,
+ .tzone_name = "cpu-big",
+ .opp_count = MAX_OPP_ENTRIES,
+ .turn_freq_base = 1670000000,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x1,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(1),
+ .ctl0 = 0x00540003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 8,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 8,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 2, 16 }, { 2, 24 }, { 2, 0 }, { 13, 0 }, { 13, 8 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .volt_od = 4,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 2050000000,
+ .core_sel = 0x0fff0101,
+ .dvt_fixed = 0x6,
+ .vmax = 0x73,
+ .vmin = 0x20,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_LITTLE,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 0,
+ .tzone_name = "cpu-little",
+ .opp_count = MAX_OPP_ENTRIES,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x1,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(2),
+ .ctl0 = 0x3210000f,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 8,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 8,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 4, 16 }, { 4, 24 }, { 4, 0 }, { 14, 0 }, { 14, 8 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .volt_od = 3,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 2000000000,
+ .core_sel = 0x0fff0102,
+ .dvt_fixed = 0x6,
+ .vmax = 0x65,
+ .vmin = 0x20,
},
{
- .sw_id = SVSB_GPU,
- .type = SVSB_HIGH,
- .set_freq_pct = svs_set_bank_freq_pct_v3,
- .get_volts = svs_get_bank_volts_v3,
- .tzone_name = "gpu1",
- .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT |
- SVSB_MON_VOLT_IGNORE,
- .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 880000000,
- .turn_freq_base = 640000000,
- .volt_step = 6250,
- .volt_base = 400000,
- .vmax = 0x38,
- .vmin = 0x1c,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x4,
- .vco = 0x10,
- .chk_shift = 0x87,
- .core_sel = 0x0fff0001,
- .int_st = BIT(1),
- .ctl0 = 0x00100003,
- .tzone_htemp = 85000,
- .tzone_htemp_voffset = 0,
- .tzone_ltemp = 25000,
- .tzone_ltemp_voffset = 7,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CCI,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .tzone_name = "cci",
+ .opp_count = MAX_OPP_ENTRIES,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x1,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(3),
+ .ctl0 = 0x3210000f,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 8,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 8,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 5, 16 }, { 5, 24 }, { 5, 0 }, { 15, 16 }, { 15, 24 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .volt_od = 3,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 1400000000,
+ .core_sel = 0x0fff0103,
+ .dvt_fixed = 0x6,
+ .vmax = 0x65,
+ .vmin = 0x20,
+ },
+ {
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .volt_step = 6250,
+ .volt_base = 400000,
+ .age_config = 0x555555,
+ .dc_config = 0x1,
+ .vco = 0x10,
+ .chk_shift = 0x87,
+ .int_st = BIT(4),
+ .ctl0 = 0x00100003,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 8,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 7,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 6, 16 }, { 6, 24 }, { 6, 0 }, { 15, 8 }, { 15, 0 }
+ }
+ },
+ .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE,
+ .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 850000000,
+ .core_sel = 0x0fff0104,
+ .dvt_fixed = 0x4,
+ .vmax = 0x58,
+ .vmin = 0x20,
},
};
static struct svs_bank svs_mt8183_banks[] = {
{
- .sw_id = SVSB_CPU_LITTLE,
- .set_freq_pct = svs_set_bank_freq_pct_v2,
- .get_volts = svs_get_bank_volts_v2,
- .cpu_id = 0,
- .buck_name = "proc",
- .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
- .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 1989000000,
- .vboot = 0x30,
- .volt_step = 6250,
- .volt_base = 500000,
- .vmax = 0x64,
- .vmin = 0x18,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x7,
- .vco = 0x10,
- .chk_shift = 0x77,
- .core_sel = 0x8fff0000,
- .int_st = BIT(0),
- .ctl0 = 0x00010001,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_LITTLE,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 0,
+ .buck_name = "proc",
+ .opp_count = MAX_OPP_ENTRIES,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .int_st = BIT(0),
+ .ctl0 = 0x00010001,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 16, 0 }, { 16, 8 }, { 17, 16 }, { 16, 16 }, { 16, 24 }
+ }
+ },
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .freq_base = 1989000000,
+ .core_sel = 0x8fff0000,
+ .dvt_fixed = 0x7,
+ .vmax = 0x64,
+ .vmin = 0x18,
+
},
{
- .sw_id = SVSB_CPU_BIG,
- .set_freq_pct = svs_set_bank_freq_pct_v2,
- .get_volts = svs_get_bank_volts_v2,
- .cpu_id = 4,
- .buck_name = "proc",
- .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
- .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 1989000000,
- .vboot = 0x30,
- .volt_step = 6250,
- .volt_base = 500000,
- .vmax = 0x58,
- .vmin = 0x10,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x7,
- .vco = 0x10,
- .chk_shift = 0x77,
- .core_sel = 0x8fff0001,
- .int_st = BIT(1),
- .ctl0 = 0x00000001,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CPU_BIG,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .cpu_id = 4,
+ .buck_name = "proc",
+ .opp_count = MAX_OPP_ENTRIES,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .int_st = BIT(1),
+ .ctl0 = 0x00000001,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 18, 0 }, { 18, 8 }, { 17, 0 }, { 18, 16 }, { 18, 24 }
+ }
+ },
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .freq_base = 1989000000,
+ .core_sel = 0x8fff0001,
+ .dvt_fixed = 0x7,
+ .vmax = 0x58,
+ .vmin = 0x10,
+
},
{
- .sw_id = SVSB_CCI,
- .set_freq_pct = svs_set_bank_freq_pct_v2,
- .get_volts = svs_get_bank_volts_v2,
- .buck_name = "proc",
- .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
- .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 1196000000,
- .vboot = 0x30,
- .volt_step = 6250,
- .volt_base = 500000,
- .vmax = 0x64,
- .vmin = 0x18,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x7,
- .vco = 0x10,
- .chk_shift = 0x77,
- .core_sel = 0x8fff0002,
- .int_st = BIT(2),
- .ctl0 = 0x00100003,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_CCI,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "proc",
+ .opp_count = MAX_OPP_ENTRIES,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .int_st = BIT(2),
+ .ctl0 = 0x00100003,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 4, 0 }, { 4, 8 }, { 5, 16 }, { 4, 16 }, { 4, 24 }
+ }
+ },
+ .volt_flags = SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02,
+ .freq_base = 1196000000,
+ .core_sel = 0x8fff0002,
+ .dvt_fixed = 0x7,
+ .vmax = 0x64,
+ .vmin = 0x18,
},
{
- .sw_id = SVSB_GPU,
- .set_freq_pct = svs_set_bank_freq_pct_v2,
- .get_volts = svs_get_bank_volts_v2,
- .buck_name = "mali",
- .tzone_name = "tzts2",
- .volt_flags = SVSB_INIT01_PD_REQ |
- SVSB_INIT01_VOLT_INC_ONLY,
- .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 |
- SVSB_MODE_MON,
- .opp_count = MAX_OPP_ENTRIES,
- .freq_base = 900000000,
- .vboot = 0x30,
- .volt_step = 6250,
- .volt_base = 500000,
- .vmax = 0x40,
- .vmin = 0x14,
- .age_config = 0x555555,
- .dc_config = 0x555555,
- .dvt_fixed = 0x3,
- .vco = 0x10,
- .chk_shift = 0x77,
- .core_sel = 0x8fff0003,
- .int_st = BIT(3),
- .ctl0 = 0x00050001,
- .tzone_htemp = 85000,
- .tzone_htemp_voffset = 0,
- .tzone_ltemp = 25000,
- .tzone_ltemp_voffset = 3,
+ .pdata = (const struct svs_bank_pdata) {
+ .sw_id = SVSB_SWID_GPU,
+ .set_freq_pct = svs_set_bank_freq_pct_v2,
+ .get_volts = svs_get_bank_volts_v2,
+ .buck_name = "mali",
+ .tzone_name = "gpu",
+ .opp_count = MAX_OPP_ENTRIES,
+ .vboot = 0x30,
+ .volt_step = 6250,
+ .volt_base = 500000,
+ .age_config = 0x555555,
+ .dc_config = 0x555555,
+ .vco = 0x10,
+ .chk_shift = 0x77,
+ .int_st = BIT(3),
+ .ctl0 = 0x00050001,
+ .tzone_htemp = 85000,
+ .tzone_htemp_voffset = 0,
+ .tzone_ltemp = 25000,
+ .tzone_ltemp_voffset = 3,
+ .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) {
+ { 6, 0 }, { 6, 8 }, { 5, 0 }, { 6, 16 }, { 6, 24 }
+ }
+ },
+ .volt_flags = SVSB_INIT01_PD_REQ | SVSB_INIT01_VOLT_INC_ONLY,
+ .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 | SVSB_MODE_MON,
+ .freq_base = 900000000,
+ .core_sel = 0x8fff0003,
+ .dvt_fixed = 0x3,
+ .vmax = 0x40,
+ .vmin = 0x14,
},
};
+static const struct svs_platform_data svs_mt8195_platform_data = {
+ .name = "mt8195-svs",
+ .banks = svs_mt8195_banks,
+ .efuse_parsing = svs_common_parse_efuse,
+ .probe = svs_mt8192_platform_probe,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8195_banks),
+ .ts_coeff = SVSB_TS_COEFF_MT8195,
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ { 0, 0 }, { 19, 4 }
+ }
+};
+
static const struct svs_platform_data svs_mt8192_platform_data = {
.name = "mt8192-svs",
.banks = svs_mt8192_banks,
- .efuse_parsing = svs_mt8192_efuse_parsing,
+ .efuse_parsing = svs_common_parse_efuse,
.probe = svs_mt8192_platform_probe,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8192_banks),
+ .ts_coeff = SVSB_TS_COEFF_MT8195,
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ /* FT_PGM not present */
+ { -1, 0 }, { 19, 4 }
+ }
};
static const struct svs_platform_data svs_mt8188_platform_data = {
.name = "mt8188-svs",
.banks = svs_mt8188_banks,
- .efuse_parsing = svs_mt8188_efuse_parsing,
+ .efuse_parsing = svs_common_parse_efuse,
.probe = svs_mt8192_platform_probe,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8188_banks),
+ .ts_coeff = SVSB_TS_COEFF_MT8195,
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ /* FT_PGM and VMIN not present */
+ { -1, 0 }, { -1, 0 }
+ }
+};
+
+static const struct svs_platform_data svs_mt8186_platform_data = {
+ .name = "mt8186-svs",
+ .banks = svs_mt8186_banks,
+ .efuse_parsing = svs_common_parse_efuse,
+ .probe = svs_mt8192_platform_probe,
+ .regs = svs_regs_v2,
+ .bank_max = ARRAY_SIZE(svs_mt8186_banks),
+ .ts_coeff = SVSB_TS_COEFF_MT8186,
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ /* FT_PGM and VMIN not present */
+ { -1, 0 }, { -1, 0 }
+ }
};
static const struct svs_platform_data svs_mt8183_platform_data = {
@@ -2466,21 +2800,19 @@ static const struct svs_platform_data svs_mt8183_platform_data = {
.probe = svs_mt8183_platform_probe,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8183_banks),
+ .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) {
+ /* VMIN not present */
+ { 0, 4 }, { -1, 0 }
+ }
};
static const struct of_device_id svs_of_match[] = {
- {
- .compatible = "mediatek,mt8192-svs",
- .data = &svs_mt8192_platform_data,
- }, {
- .compatible = "mediatek,mt8188-svs",
- .data = &svs_mt8188_platform_data,
- }, {
- .compatible = "mediatek,mt8183-svs",
- .data = &svs_mt8183_platform_data,
- }, {
- /* Sentinel */
- },
+ { .compatible = "mediatek,mt8195-svs", .data = &svs_mt8195_platform_data },
+ { .compatible = "mediatek,mt8192-svs", .data = &svs_mt8192_platform_data },
+ { .compatible = "mediatek,mt8188-svs", .data = &svs_mt8188_platform_data },
+ { .compatible = "mediatek,mt8186-svs", .data = &svs_mt8186_platform_data },
+ { .compatible = "mediatek,mt8183-svs", .data = &svs_mt8183_platform_data },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, svs_of_match);
@@ -2500,6 +2832,7 @@ static int svs_probe(struct platform_device *pdev)
svsp->banks = svsp_data->banks;
svsp->regs = svsp_data->regs;
svsp->bank_max = svsp_data->bank_max;
+ svsp->ts_coeff = svsp_data->ts_coeff;
ret = svsp_data->probe(svsp);
if (ret)
@@ -2507,20 +2840,24 @@ static int svs_probe(struct platform_device *pdev)
ret = svs_get_efuse_data(svsp, "svs-calibration-data",
&svsp->efuse, &svsp->efuse_max);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot read SVS calibration\n");
+
+ ret = svs_get_efuse_data(svsp, "t-calibration-data",
+ &svsp->tefuse, &svsp->tefuse_max);
if (ret) {
- ret = -EPERM;
+ dev_err_probe(&pdev->dev, ret, "Cannot read SVS-Thermal calibration\n");
goto svs_probe_free_efuse;
}
- if (!svsp_data->efuse_parsing(svsp)) {
- dev_err(svsp->dev, "efuse data parsing failed\n");
- ret = -EPERM;
+ if (!svsp_data->efuse_parsing(svsp, svsp_data)) {
+ ret = dev_err_probe(svsp->dev, -EINVAL, "efuse data parsing failed\n");
goto svs_probe_free_tefuse;
}
ret = svs_bank_resource_setup(svsp);
if (ret) {
- dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret);
+ dev_err_probe(svsp->dev, ret, "svs bank resource setup fail\n");
goto svs_probe_free_tefuse;
}
@@ -2532,43 +2869,40 @@ static int svs_probe(struct platform_device *pdev)
svsp->main_clk = devm_clk_get(svsp->dev, "main");
if (IS_ERR(svsp->main_clk)) {
- dev_err(svsp->dev, "failed to get clock: %ld\n",
- PTR_ERR(svsp->main_clk));
- ret = PTR_ERR(svsp->main_clk);
+ ret = dev_err_probe(svsp->dev, PTR_ERR(svsp->main_clk),
+ "failed to get clock\n");
goto svs_probe_free_tefuse;
}
ret = clk_prepare_enable(svsp->main_clk);
if (ret) {
- dev_err(svsp->dev, "cannot enable main clk: %d\n", ret);
+ dev_err_probe(svsp->dev, ret, "cannot enable main clk\n");
goto svs_probe_free_tefuse;
}
svsp->base = of_iomap(svsp->dev->of_node, 0);
if (IS_ERR_OR_NULL(svsp->base)) {
- dev_err(svsp->dev, "cannot find svs register base\n");
- ret = -EINVAL;
+ ret = dev_err_probe(svsp->dev, -EINVAL, "cannot find svs register base\n");
goto svs_probe_clk_disable;
}
ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
IRQF_ONESHOT, svsp_data->name, svsp);
if (ret) {
- dev_err(svsp->dev, "register irq(%d) failed: %d\n",
- svsp_irq, ret);
+ dev_err_probe(svsp->dev, ret, "register irq(%d) failed\n", svsp_irq);
goto svs_probe_iounmap;
}
ret = svs_start(svsp);
if (ret) {
- dev_err(svsp->dev, "svs start fail: %d\n", ret);
+ dev_err_probe(svsp->dev, ret, "svs start fail\n");
goto svs_probe_iounmap;
}
#ifdef CONFIG_DEBUG_FS
ret = svs_create_debug_cmds(svsp);
if (ret) {
- dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret);
+ dev_err_probe(svsp->dev, ret, "svs create debug cmds fail\n");
goto svs_probe_iounmap;
}
#endif
@@ -2577,18 +2911,12 @@ static int svs_probe(struct platform_device *pdev)
svs_probe_iounmap:
iounmap(svsp->base);
-
svs_probe_clk_disable:
clk_disable_unprepare(svsp->main_clk);
-
svs_probe_free_tefuse:
- if (!IS_ERR_OR_NULL(svsp->tefuse))
- kfree(svsp->tefuse);
-
+ kfree(svsp->tefuse);
svs_probe_free_efuse:
- if (!IS_ERR_OR_NULL(svsp->efuse))
- kfree(svsp->efuse);
-
+ kfree(svsp->efuse);
return ret;
}
@@ -2606,5 +2934,6 @@ static struct platform_driver svs_driver = {
module_platform_driver(svs_driver);
MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>");
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
MODULE_DESCRIPTION("MediaTek SVS driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
index f19e74d34..19f4b576f 100644
--- a/drivers/soc/microchip/Kconfig
+++ b/drivers/soc/microchip/Kconfig
@@ -1,6 +1,7 @@
config POLARFIRE_SOC_SYS_CTRL
tristate "Microchip PolarFire SoC (MPFS) system controller support"
depends on POLARFIRE_SOC_MAILBOX
+ depends on MTD
help
This driver adds support for the PolarFire SoC (MPFS) system controller.
diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c
index 0935e9e94..7a4936019 100644
--- a/drivers/soc/microchip/mpfs-sys-controller.c
+++ b/drivers/soc/microchip/mpfs-sys-controller.c
@@ -12,6 +12,8 @@
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/jiffies.h>
+#include <linux/mtd/mtd.h>
+#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/mailbox_client.h>
@@ -30,6 +32,7 @@ struct mpfs_sys_controller {
struct mbox_client client;
struct mbox_chan *chan;
struct completion c;
+ struct mtd_info *flash;
struct kref consumers;
};
@@ -63,7 +66,9 @@ int mpfs_blocking_transaction(struct mpfs_sys_controller *sys_controller, struct
*/
if (!wait_for_completion_timeout(&sys_controller->c, timeout)) {
ret = -EBADMSG;
- dev_warn(sys_controller->client.dev, "MPFS sys controller service failed\n");
+ dev_warn(sys_controller->client.dev,
+ "MPFS sys controller service failed with status: %d\n",
+ msg->response->resp_status);
} else {
ret = 0;
}
@@ -99,6 +104,12 @@ static void mpfs_sys_controller_put(void *data)
kref_put(&sys_controller->consumers, mpfs_sys_controller_delete);
}
+struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_client)
+{
+ return mpfs_client->flash;
+}
+EXPORT_SYMBOL(mpfs_sys_controller_get_flash);
+
static struct platform_device subdevs[] = {
{
.name = "mpfs-rng",
@@ -107,19 +118,34 @@ static struct platform_device subdevs[] = {
{
.name = "mpfs-generic-service",
.id = -1,
- }
+ },
+ {
+ .name = "mpfs-auto-update",
+ .id = -1,
+ },
};
static int mpfs_sys_controller_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mpfs_sys_controller *sys_controller;
+ struct device_node *np;
int i, ret;
sys_controller = kzalloc(sizeof(*sys_controller), GFP_KERNEL);
if (!sys_controller)
return -ENOMEM;
+ np = of_parse_phandle(dev->of_node, "microchip,bitstream-flash", 0);
+ if (!np)
+ goto no_flash;
+
+ sys_controller->flash = of_get_mtd_device_by_node(np);
+ of_node_put(np);
+ if (IS_ERR(sys_controller->flash))
+ return dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n");
+
+no_flash:
sys_controller->client.dev = dev;
sys_controller->client.rx_callback = mpfs_sys_controller_rx_callback;
sys_controller->client.tx_block = 1U;
@@ -138,7 +164,6 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sys_controller);
- dev_info(&pdev->dev, "Registered MPFS system controller\n");
for (i = 0; i < ARRAY_SIZE(subdevs); i++) {
subdevs[i].dev.parent = dev;
@@ -146,6 +171,8 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev)
dev_warn(dev, "Error registering sub device %s\n", subdevs[i].name);
}
+ dev_info(&pdev->dev, "Registered MPFS system controller\n");
+
return 0;
}
diff --git a/drivers/soc/pxa/ssp.c b/drivers/soc/pxa/ssp.c
index a1e8a07f7..7af04e8b8 100644
--- a/drivers/soc/pxa/ssp.c
+++ b/drivers/soc/pxa/ssp.c
@@ -152,11 +152,11 @@ static int pxa_ssp_probe(struct platform_device *pdev)
if (dev->of_node) {
const struct of_device_id *id =
of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
- ssp->type = (int) id->data;
+ ssp->type = (uintptr_t) id->data;
} else {
const struct platform_device_id *id =
platform_get_device_id(pdev);
- ssp->type = (int) id->driver_data;
+ ssp->type = id->driver_data;
/* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
* starts from 0, do a translation here
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b3634e10f..c6ca4de42 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -77,6 +77,18 @@ config QCOM_PDR_HELPERS
select QCOM_QMI_HELPERS
depends on NET
+config QCOM_PMIC_PDCHARGER_ULOG
+ tristate "Qualcomm PMIC PDCharger ULOG driver"
+ depends on RPMSG
+ depends on EVENT_TRACING
+ help
+ The Qualcomm PMIC PDCharger ULOG driver provides access to logs of
+ the ADSP firmware PDCharger module in charge of Battery and Power
+ Delivery on modern systems.
+
+ Say yes here to support PDCharger ULOG event tracing on modern
+ Qualcomm platforms.
+
config QCOM_PMIC_GLINK
tristate "Qualcomm PMIC GLINK driver"
depends on RPMSG
@@ -86,6 +98,7 @@ config QCOM_PMIC_GLINK
depends on OF
select AUXILIARY_BUS
select QCOM_PDR_HELPERS
+ select DRM_AUX_HPD_BRIDGE
help
The Qualcomm PMIC GLINK driver provides access, over GLINK, to the
USB and battery firmware running on one of the coprocessors in
@@ -209,6 +222,7 @@ config QCOM_STATS
tristate "Qualcomm Technologies, Inc. (QTI) Sleep stats driver"
depends on (ARCH_QCOM && DEBUG_FS) || COMPILE_TEST
depends on QCOM_SMEM
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
help
Qualcomm Technologies, Inc. (QTI) Sleep stats driver to read
the shared memory exported by the remote processor related to
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index bbca2e1e5..05b3d54e8 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o
obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o
+obj-$(CONFIG_QCOM_PMIC_PDCHARGER_ULOG) += pmic_pdcharger_ulog.o
+CFLAGS_pmic_pdcharger_ulog.o := -I$(src)
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_RAMP_CTRL) += ramp_controller.o
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 1b7e921d4..cbef0dea1 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -92,6 +92,19 @@
* @write_scid_en: Bit enables write cache support for a given scid.
* @write_scid_cacheable_en: Enables write cache cacheable support for a
* given scid (not supported on v2 or older hardware).
+ * @stale_en: Bit enables stale.
+ * @stale_cap_en: Bit enables stale only if current scid is over-cap.
+ * @mru_uncap_en: Roll-over on reserved cache ways if current scid is
+ * under-cap.
+ * @mru_rollover: Roll-over on reserved cache ways.
+ * @alloc_oneway_en: Allways allocate one way on over-cap even if there's no
+ * same-scid lines for replacement.
+ * @ovcap_en: Once current scid is over-capacity, allocate other over-cap SCID.
+ * @ovcap_prio: Once current scid is over-capacity, allocate other low priority
+ * over-cap scid. Depends on corresponding bit being set in
+ * ovcap_en.
+ * @vict_prio: When current scid is under-capacity, allocate over other
+ * lower-than victim priority-line threshold scid.
*/
struct llcc_slice_config {
u32 usecase_id;
@@ -362,6 +375,33 @@ static const struct llcc_slice_config sm8550_data[] = {
{LLCC_VIDVSP, 28, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
};
+static const struct llcc_slice_config sm8650_data[] = {
+ {LLCC_CPUSS, 1, 5120, 1, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_AUDIO, 6, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_MDMHPGRW, 25, 1024, 3, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_MODHW, 26, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_GPU, 9, 3096, 1, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_MMUHWT, 18, 768, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_DISP, 16, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_MDMHPFX, 24, 1024, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_MDMPNG, 27, 1024, 0, 1, 0x000000, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CVP, 8, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_MODPE, 29, 128, 1, 1, 0xF00000, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP0, 4, 256, 3, 1, 0xF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP1, 7, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CMPTHCP, 17, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_LCPDARE, 30, 128, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
+ {LLCC_AENPU, 3, 3072, 1, 1, 0xFFFFFF, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_ISLAND1, 12, 5888, 7, 1, 0x0, 0x7FFFFF, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_DISP_WB, 23, 1024, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_VIDVSP, 28, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
static const struct llcc_slice_config qdu1000_data_2ch[] = {
{ LLCC_MDMHPGRW, 7, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
{ LLCC_MODHW, 9, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 },
@@ -392,6 +432,29 @@ static const struct llcc_slice_config qdu1000_data_8ch[] = {
{ LLCC_WRCACHE, 31, 512, 1, 1, 0x3, 0x0, 0, 0, 0, 0, 1, 0, 0 },
};
+static const struct llcc_slice_config x1e80100_data[] = {
+ {LLCC_CPUSS, 1, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_AUDIO, 6, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CMPT, 10, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_GPUHTW, 11, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_GPU, 9, 4096, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_MMUHWT, 18, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CVP, 8, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP1, 7, 3072, 2, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_LCPDARE, 30, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_AENPU, 3, 3072, 1, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_ISLAND1, 12, 512, 7, 1, 0x1, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_ISLAND2, 13, 512, 7, 1, 0x2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_ISLAND3, 14, 512, 7, 1, 0x3, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_ISLAND4, 15, 512, 7, 1, 0x4, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP2, 19, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP3, 20, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {LLCC_CAMEXP4, 21, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
.trp_ecc_error_status0 = 0x20344,
.trp_ecc_error_status1 = 0x20348,
@@ -610,6 +673,26 @@ static const struct qcom_llcc_config sm8550_cfg[] = {
},
};
+static const struct qcom_llcc_config sm8650_cfg[] = {
+ {
+ .sct_data = sm8650_data,
+ .size = ARRAY_SIZE(sm8650_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+};
+
+static const struct qcom_llcc_config x1e80100_cfg[] = {
+ {
+ .sct_data = x1e80100_data,
+ .size = ARRAY_SIZE(x1e80100_data),
+ .need_llcc_cfg = true,
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
+ },
+};
+
static const struct qcom_sct_config qdu1000_cfgs = {
.llcc_config = qdu1000_cfg,
.num_config = ARRAY_SIZE(qdu1000_cfg),
@@ -675,6 +758,16 @@ static const struct qcom_sct_config sm8550_cfgs = {
.num_config = ARRAY_SIZE(sm8550_cfg),
};
+static const struct qcom_sct_config sm8650_cfgs = {
+ .llcc_config = sm8650_cfg,
+ .num_config = ARRAY_SIZE(sm8650_cfg),
+};
+
+static const struct qcom_sct_config x1e80100_cfgs = {
+ .llcc_config = x1e80100_cfg,
+ .num_config = ARRAY_SIZE(x1e80100_cfg),
+};
+
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
/**
@@ -715,7 +808,7 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid)
EXPORT_SYMBOL_GPL(llcc_slice_getd);
/**
- * llcc_slice_putd - llcc slice descritpor
+ * llcc_slice_putd - llcc slice descriptor
* @desc: Pointer to llcc slice descriptor
*/
void llcc_slice_putd(struct llcc_slice_desc *desc)
@@ -1251,6 +1344,8 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfgs },
{ .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfgs },
{ .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfgs },
+ { .compatible = "qcom,sm8650-llcc", .data = &sm8650_cfgs },
+ { .compatible = "qcom,x1e80100-llcc", .data = &x1e80100_cfgs },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_llcc_of_match);
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index d05e15f10..f913e9bd5 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -18,9 +18,6 @@ enum {
PMIC_GLINK_CLIENT_UCSI,
};
-#define PMIC_GLINK_CLIENT_DEFAULT (BIT(PMIC_GLINK_CLIENT_BATT) | \
- BIT(PMIC_GLINK_CLIENT_ALTMODE))
-
struct pmic_glink {
struct device *dev;
struct pdr_handle *pdr;
@@ -263,10 +260,10 @@ static int pmic_glink_probe(struct platform_device *pdev)
mutex_init(&pg->state_lock);
match_data = (unsigned long *)of_device_get_match_data(&pdev->dev);
- if (match_data)
- pg->client_mask = *match_data;
- else
- pg->client_mask = PMIC_GLINK_CLIENT_DEFAULT;
+ if (!match_data)
+ return -EINVAL;
+
+ pg->client_mask = *match_data;
pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
if (IS_ERR(pg->pdr)) {
@@ -337,14 +334,17 @@ static void pmic_glink_remove(struct platform_device *pdev)
mutex_unlock(&__pmic_glink_lock);
}
+static const unsigned long pmic_glink_sc8180x_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
+ BIT(PMIC_GLINK_CLIENT_ALTMODE);
+
static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) |
BIT(PMIC_GLINK_CLIENT_ALTMODE) |
BIT(PMIC_GLINK_CLIENT_UCSI);
static const struct of_device_id pmic_glink_of_match[] = {
- { .compatible = "qcom,sm8450-pmic-glink", .data = &pmic_glink_sm8450_client_mask },
- { .compatible = "qcom,sm8550-pmic-glink", .data = &pmic_glink_sm8450_client_mask },
- { .compatible = "qcom,pmic-glink" },
+ { .compatible = "qcom,sc8180x-pmic-glink", .data = &pmic_glink_sc8180x_client_mask },
+ { .compatible = "qcom,sc8280xp-pmic-glink", .data = &pmic_glink_sc8180x_client_mask },
+ { .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask },
{}
};
MODULE_DEVICE_TABLE(of, pmic_glink_of_match);
@@ -364,14 +364,14 @@ static int pmic_glink_init(void)
register_rpmsg_driver(&pmic_glink_rpmsg_driver);
return 0;
-};
+}
module_init(pmic_glink_init);
static void pmic_glink_exit(void)
{
unregister_rpmsg_driver(&pmic_glink_rpmsg_driver);
platform_driver_unregister(&pmic_glink_driver);
-};
+}
module_exit(pmic_glink_exit);
MODULE_DESCRIPTION("Qualcomm PMIC GLINK driver");
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index ca58bfa41..b3808fc24 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -11,7 +11,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
- struct drm_bridge bridge;
+ struct auxiliary_device *bridge;
enum typec_orientation orientation;
u16 svid;
@@ -230,13 +230,13 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
else
pmic_glink_altmode_enable_usb(altmode, alt_port);
- if (alt_port->hpd_state)
- drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected);
- else
- drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected);
+ drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
+ alt_port->hpd_state ?
+ connector_status_connected :
+ connector_status_disconnected);
pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
-};
+}
static enum typec_orientation pmic_glink_altmode_orientation(unsigned int orientation)
{
@@ -365,16 +365,6 @@ static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv
}
}
-static int pmic_glink_altmode_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = {
- .attach = pmic_glink_altmode_attach,
-};
-
static void pmic_glink_altmode_put_retimer(void *data)
{
typec_retimer_put(data);
@@ -464,10 +454,11 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
- alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
- alt_port->bridge.of_node = to_of_node(fwnode);
- alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
- alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
+ if (IS_ERR(alt_port->bridge)) {
+ fwnode_handle_put(fwnode);
+ return PTR_ERR(alt_port->bridge);
+ }
alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
@@ -521,10 +512,10 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
alt_port = &altmode->ports[port];
- if (!alt_port->altmode)
+ if (!alt_port->bridge)
continue;
- ret = devm_drm_bridge_add(dev, &alt_port->bridge);
+ ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge);
if (ret)
return ret;
}
diff --git a/drivers/soc/qcom/pmic_pdcharger_ulog.c b/drivers/soc/qcom/pmic_pdcharger_ulog.c
new file mode 100644
index 000000000..238cd3858
--- /dev/null
+++ b/drivers/soc/qcom/pmic_pdcharger_ulog.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd
+ */
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/pdr.h>
+#include <linux/debugfs.h>
+
+#define CREATE_TRACE_POINTS
+#include "pmic_pdcharger_ulog.h"
+
+#define MSG_OWNER_CHG_ULOG 32778
+#define MSG_TYPE_REQ_RESP 1
+
+#define GET_CHG_ULOG_REQ 0x18
+#define SET_CHG_ULOG_PROP_REQ 0x19
+
+#define LOG_DEFAULT_TIME_MS 1000
+
+#define MAX_ULOG_SIZE 8192
+
+struct pmic_pdcharger_ulog_hdr {
+ __le32 owner;
+ __le32 type;
+ __le32 opcode;
+};
+
+struct pmic_pdcharger_ulog {
+ struct rpmsg_device *rpdev;
+ struct delayed_work ulog_work;
+};
+
+struct get_ulog_req_msg {
+ struct pmic_pdcharger_ulog_hdr hdr;
+ u32 log_size;
+};
+
+struct get_ulog_resp_msg {
+ struct pmic_pdcharger_ulog_hdr hdr;
+ u8 buf[MAX_ULOG_SIZE];
+};
+
+static int pmic_pdcharger_ulog_write_async(struct pmic_pdcharger_ulog *pg, void *data, size_t len)
+{
+ return rpmsg_send(pg->rpdev->ept, data, len);
+}
+
+static int pmic_pdcharger_ulog_request(struct pmic_pdcharger_ulog *pg)
+{
+ struct get_ulog_req_msg req_msg = {
+ .hdr = {
+ .owner = cpu_to_le32(MSG_OWNER_CHG_ULOG),
+ .type = cpu_to_le32(MSG_TYPE_REQ_RESP),
+ .opcode = cpu_to_le32(GET_CHG_ULOG_REQ)
+ },
+ .log_size = MAX_ULOG_SIZE
+ };
+
+ return pmic_pdcharger_ulog_write_async(pg, &req_msg, sizeof(req_msg));
+}
+
+static void pmic_pdcharger_ulog_work(struct work_struct *work)
+{
+ struct pmic_pdcharger_ulog *pg = container_of(work, struct pmic_pdcharger_ulog,
+ ulog_work.work);
+ int rc;
+
+ rc = pmic_pdcharger_ulog_request(pg);
+ if (rc) {
+ dev_err(&pg->rpdev->dev, "Error requesting ulog, rc=%d\n", rc);
+ return;
+ }
+}
+
+static void pmic_pdcharger_ulog_handle_message(struct pmic_pdcharger_ulog *pg,
+ struct get_ulog_resp_msg *resp_msg,
+ size_t len)
+{
+ char *token, *buf = resp_msg->buf;
+
+ if (len != sizeof(*resp_msg)) {
+ dev_err(&pg->rpdev->dev, "Expected data length: %zu, received: %zu\n",
+ sizeof(*resp_msg), len);
+ return;
+ }
+
+ buf[MAX_ULOG_SIZE - 1] = '\0';
+
+ do {
+ token = strsep((char **)&buf, "\n");
+ if (token && strlen(token))
+ trace_pmic_pdcharger_ulog_msg(token);
+ } while (token);
+}
+
+static int pmic_pdcharger_ulog_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 addr)
+{
+ struct pmic_pdcharger_ulog *pg = dev_get_drvdata(&rpdev->dev);
+ struct pmic_pdcharger_ulog_hdr *hdr = data;
+ u32 opcode;
+
+ opcode = le32_to_cpu(hdr->opcode);
+
+ switch (opcode) {
+ case GET_CHG_ULOG_REQ:
+ schedule_delayed_work(&pg->ulog_work, msecs_to_jiffies(LOG_DEFAULT_TIME_MS));
+ pmic_pdcharger_ulog_handle_message(pg, data, len);
+ break;
+ default:
+ dev_err(&pg->rpdev->dev, "Unknown opcode %u\n", opcode);
+ break;
+ }
+
+ return 0;
+}
+
+static int pmic_pdcharger_ulog_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct pmic_pdcharger_ulog *pg;
+ struct device *dev = &rpdev->dev;
+
+ pg = devm_kzalloc(dev, sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ pg->rpdev = rpdev;
+ INIT_DELAYED_WORK(&pg->ulog_work, pmic_pdcharger_ulog_work);
+
+ dev_set_drvdata(dev, pg);
+
+ pmic_pdcharger_ulog_request(pg);
+
+ return 0;
+}
+
+static void pmic_pdcharger_ulog_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct pmic_pdcharger_ulog *pg = dev_get_drvdata(&rpdev->dev);
+
+ cancel_delayed_work_sync(&pg->ulog_work);
+}
+
+static const struct rpmsg_device_id pmic_pdcharger_ulog_rpmsg_id_match[] = {
+ { "PMIC_LOGS_ADSP_APPS" },
+ {}
+};
+
+static struct rpmsg_driver pmic_pdcharger_ulog_rpmsg_driver = {
+ .probe = pmic_pdcharger_ulog_rpmsg_probe,
+ .remove = pmic_pdcharger_ulog_rpmsg_remove,
+ .callback = pmic_pdcharger_ulog_rpmsg_callback,
+ .id_table = pmic_pdcharger_ulog_rpmsg_id_match,
+ .drv = {
+ .name = "qcom_pmic_pdcharger_ulog_rpmsg",
+ },
+};
+
+module_rpmsg_driver(pmic_pdcharger_ulog_rpmsg_driver);
+MODULE_DESCRIPTION("Qualcomm PMIC ChargerPD ULOG driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/pmic_pdcharger_ulog.h b/drivers/soc/qcom/pmic_pdcharger_ulog.h
new file mode 100644
index 000000000..152e3a6b5
--- /dev/null
+++ b/drivers/soc/qcom/pmic_pdcharger_ulog.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Ltd
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pmic_pdcharger_ulog
+
+#if !defined(_TRACE_PMIC_PDCHARGER_ULOG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PMIC_PDCHARGER_ULOG_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(pmic_pdcharger_ulog_msg,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+#endif /* _TRACE_PMIC_PDCHARGER_ULOG_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE pmic_pdcharger_ulog
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 9865964cf..a980020ab 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -51,6 +51,11 @@
#define SMEM_IMAGE_TABLE_ADSP_INDEX 12
#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
#define SMEM_IMAGE_TABLE_VIDEO_INDEX 14
+#define SMEM_IMAGE_TABLE_DSPS_INDEX 15
+#define SMEM_IMAGE_TABLE_CDSP_INDEX 16
+#define SMEM_IMAGE_TABLE_CDSP1_INDEX 19
+#define SMEM_IMAGE_TABLE_GPDSP_INDEX 20
+#define SMEM_IMAGE_TABLE_GPDSP1_INDEX 21
#define SMEM_IMAGE_VERSION_TABLE 469
/*
@@ -65,6 +70,11 @@ static const char *const socinfo_image_names[] = {
[SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm",
[SMEM_IMAGE_TABLE_TZ_INDEX] = "tz",
[SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video",
+ [SMEM_IMAGE_TABLE_DSPS_INDEX] = "dsps",
+ [SMEM_IMAGE_TABLE_CDSP_INDEX] = "cdsp",
+ [SMEM_IMAGE_TABLE_CDSP1_INDEX] = "cdsp1",
+ [SMEM_IMAGE_TABLE_GPDSP_INDEX] = "gpdsp",
+ [SMEM_IMAGE_TABLE_GPDSP1_INDEX] = "gpdsp1",
};
static const char *const pmic_models[] = {
@@ -93,7 +103,7 @@ static const char *const pmic_models[] = {
[22] = "PM8821",
[23] = "PM8038",
[24] = "PM8005/PM8922",
- [25] = "PM8917",
+ [25] = "PM8917/PM8937",
[26] = "PM660L",
[27] = "PM660",
[30] = "PM8150",
@@ -417,6 +427,7 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(SA8775P) },
{ qcom_board_id(QRU1000) },
{ qcom_board_id(QDU1000) },
+ { qcom_board_id(SM8650) },
{ qcom_board_id(SM4450) },
{ qcom_board_id(QDU1010) },
{ qcom_board_id(QRU1032) },
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 0071864c2..0986672f6 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -340,6 +340,7 @@ if RISCV
config ARCH_R9A07G043
bool "RISC-V Platform support for RZ/Five"
depends on NONPORTABLE
+ depends on !DMA_DIRECT_REMAP
depends on RISCV_ALTERNATIVE
depends on !RISCV_ISA_ZICBOM
depends on RISCV_SBI
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index c732d4a5b..27eae1a35 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -486,10 +486,6 @@ static int __init renesas_soc_init(void)
return -ENOMEM;
}
- np = of_find_node_by_path("/");
- of_property_read_string(np, "model", &soc_dev_attr->machine);
- of_node_put(np);
-
soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL);
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 3fd0f2b84..b1118d377 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -59,6 +59,7 @@ static const struct exynos_soc_id {
{ "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 },
{ "EXYNOSAUTOV9", 0xAAA80000 },
+ { "EXYNOSAUTOV920", 0x0A920000 },
};
static const char *product_id_to_soc_id(unsigned int product_id)
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
deleted file mode 100644
index 139884add..000000000
--- a/drivers/soc/sifive/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-if ARCH_SIFIVE || ARCH_STARFIVE
-
-config SIFIVE_CCACHE
- bool "Sifive Composable Cache controller"
- help
- Support for the composable cache controller on SiFive platforms.
-
-endif
diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile
deleted file mode 100644
index 1f5dc339b..000000000
--- a/drivers/soc/sifive/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 7fc3548e0..59101bf7c 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -33,19 +33,35 @@
#define CTRLMMR_WKUP_JTAGID_MFG_TI 0x17
+#define JTAG_ID_PARTNO_AM65X 0xBB5A
+#define JTAG_ID_PARTNO_J721E 0xBB64
+#define JTAG_ID_PARTNO_J7200 0xBB6D
+#define JTAG_ID_PARTNO_AM64X 0xBB38
+#define JTAG_ID_PARTNO_J721S2 0xBB75
+#define JTAG_ID_PARTNO_AM62X 0xBB7E
+#define JTAG_ID_PARTNO_J784S4 0xBB80
+#define JTAG_ID_PARTNO_AM62AX 0xBB8D
+#define JTAG_ID_PARTNO_AM62PX 0xBB9D
+#define JTAG_ID_PARTNO_J722S 0xBBA0
+
static const struct k3_soc_id {
unsigned int id;
const char *family_name;
} k3_soc_ids[] = {
- { 0xBB5A, "AM65X" },
- { 0xBB64, "J721E" },
- { 0xBB6D, "J7200" },
- { 0xBB38, "AM64X" },
- { 0xBB75, "J721S2"},
- { 0xBB7E, "AM62X" },
- { 0xBB80, "J784S4" },
- { 0xBB8D, "AM62AX" },
- { 0xBB9D, "AM62PX" },
+ { JTAG_ID_PARTNO_AM65X, "AM65X" },
+ { JTAG_ID_PARTNO_J721E, "J721E" },
+ { JTAG_ID_PARTNO_J7200, "J7200" },
+ { JTAG_ID_PARTNO_AM64X, "AM64X" },
+ { JTAG_ID_PARTNO_J721S2, "J721S2"},
+ { JTAG_ID_PARTNO_AM62X, "AM62X" },
+ { JTAG_ID_PARTNO_J784S4, "J784S4" },
+ { JTAG_ID_PARTNO_AM62AX, "AM62AX" },
+ { JTAG_ID_PARTNO_AM62PX, "AM62PX" },
+ { JTAG_ID_PARTNO_J722S, "J722S" },
+};
+
+static const char * const j721e_rev_string_map[] = {
+ "1.0", "1.1",
};
static int
@@ -63,6 +79,32 @@ k3_chipinfo_partno_to_names(unsigned int partno,
return -ENODEV;
}
+static int
+k3_chipinfo_variant_to_sr(unsigned int partno, unsigned int variant,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ switch (partno) {
+ case JTAG_ID_PARTNO_J721E:
+ if (variant >= ARRAY_SIZE(j721e_rev_string_map))
+ goto err_unknown_variant;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%s",
+ j721e_rev_string_map[variant]);
+ break;
+ default:
+ variant++;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0",
+ variant);
+ }
+
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
+
+ return 0;
+
+err_unknown_variant:
+ return -ENODEV;
+}
+
static int k3_chipinfo_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -94,7 +136,6 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
variant = (jtag_id & CTRLMMR_WKUP_JTAGID_VARIANT_MASK) >>
CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT;
- variant++;
partno_id = (jtag_id & CTRLMMR_WKUP_JTAGID_PARTNO_MASK) >>
CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT;
@@ -103,16 +144,16 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
if (!soc_dev_attr)
return -ENOMEM;
- soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", variant);
- if (!soc_dev_attr->revision) {
- ret = -ENOMEM;
+ ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ if (ret) {
+ dev_err(dev, "Unknown SoC JTAGID[0x%08X]: %d\n", jtag_id, ret);
goto err;
}
- ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ ret = k3_chipinfo_variant_to_sr(partno_id, variant, soc_dev_attr);
if (ret) {
- dev_err(dev, "Unknown SoC JTAGID[0x%08X]: %d\n", jtag_id, ret);
- goto err_free_rev;
+ dev_err(dev, "Unknown SoC SR[0x%08X]: %d\n", jtag_id, ret);
+ goto err;
}
node = of_find_node_by_path("/");
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
index 042553abe..253299e42 100644
--- a/drivers/soc/xilinx/xlnx_event_manager.c
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -77,11 +77,26 @@ struct registered_event_data {
static bool xlnx_is_error_event(const u32 node_id)
{
- if (node_id == EVENT_ERROR_PMC_ERR1 ||
- node_id == EVENT_ERROR_PMC_ERR2 ||
- node_id == EVENT_ERROR_PSM_ERR1 ||
- node_id == EVENT_ERROR_PSM_ERR2)
- return true;
+ u32 pm_family_code, pm_sub_family_code;
+
+ zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+
+ if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) {
+ if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 ||
+ node_id == VERSAL_EVENT_ERROR_PMC_ERR2 ||
+ node_id == VERSAL_EVENT_ERROR_PSM_ERR1 ||
+ node_id == VERSAL_EVENT_ERROR_PSM_ERR2)
+ return true;
+ } else {
+ if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR1 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR2 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR3 ||
+ node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR4)
+ return true;
+ }
return false;
}
@@ -483,7 +498,7 @@ static void xlnx_call_notify_cb_handler(const u32 *payload)
static void xlnx_get_event_callback_data(u32 *buf)
{
- zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
}
static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
@@ -656,7 +671,11 @@ static int xlnx_event_manager_probe(struct platform_device *pdev)
ret = zynqmp_pm_register_sgi(sgi_num, 0);
if (ret) {
- dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
+ if (ret == -EOPNOTSUPP)
+ dev_err(&pdev->dev, "SGI registration not supported by TF-A or Xen\n");
+ else
+ dev_err(&pdev->dev, "SGI %d registration failed, err %d\n", sgi_num, ret);
+
xlnx_event_cleanup_sgi(pdev);
return ret;
}
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index c2c819701..965b11439 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -51,7 +51,7 @@ static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
static void zynqmp_pm_get_callback_data(u32 *buf)
{
- zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0);
}
static void suspend_event_callback(const u32 *payload, void *data)
@@ -83,9 +83,11 @@ static irqreturn_t zynqmp_pm_isr(int irq, void *data)
pm_suspend(PM_SUSPEND_MEM);
break;
default:
- pr_err("%s Unsupported InitSuspendCb reason "
- "code %d\n", __func__, payload[1]);
+ pr_err("%s Unsupported InitSuspendCb reason code %d\n",
+ __func__, payload[1]);
}
+ } else {
+ pr_err("%s() Unsupported Callback %d\n", __func__, payload[0]);
}
return IRQ_HANDLED;
@@ -252,8 +254,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
dev_name(&pdev->dev),
&pdev->dev);
if (ret) {
- dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
- "failed with %d\n", irq, ret);
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed with %d\n",
+ irq, ret);
return ret;
}
} else {
@@ -275,7 +277,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
return 0;
}
-static int zynqmp_pm_remove(struct platform_device *pdev)
+static void zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (event_registered)
@@ -283,8 +285,6 @@ static int zynqmp_pm_remove(struct platform_device *pdev)
if (!rx_chan)
mbox_free_channel(rx_chan);
-
- return 0;
}
static const struct of_device_id pm_of_match[] = {
@@ -295,7 +295,7 @@ MODULE_DEVICE_TABLE(of, pm_of_match);
static struct platform_driver zynqmp_pm_platform_driver = {
.probe = zynqmp_pm_probe,
- .remove = zynqmp_pm_remove,
+ .remove_new = zynqmp_pm_remove,
.driver = {
.name = "zynqmp_power",
.of_match_table = pm_of_match,
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index a3b1f4e6f..42028ace0 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -148,6 +148,19 @@ static void amd_sdw_set_frameshape(struct amd_sdw_manager *amd_manager)
writel(frame_size, amd_manager->mmio + ACP_SW_FRAMESIZE);
}
+static void amd_sdw_wake_enable(struct amd_sdw_manager *amd_manager, bool enable)
+{
+ u32 wake_ctrl;
+
+ wake_ctrl = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+ if (enable)
+ wake_ctrl |= AMD_SDW_WAKE_INTR_MASK;
+ else
+ wake_ctrl &= ~AMD_SDW_WAKE_INTR_MASK;
+
+ writel(wake_ctrl, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+}
+
static void amd_sdw_ctl_word_prep(u32 *lower_word, u32 *upper_word, struct sdw_msg *msg,
int cmd_offset)
{
@@ -950,13 +963,13 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
amd_manager->reg_mask = &sdw_manager_reg_mask_array[amd_manager->instance];
params = &amd_manager->bus.params;
- params->max_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
- params->curr_dr_freq = AMD_SDW_DEFAULT_CLK_FREQ * 2;
+
params->col = AMD_SDW_DEFAULT_COLUMNS;
params->row = AMD_SDW_DEFAULT_ROWS;
prop = &amd_manager->bus.prop;
prop->clk_freq = &amd_sdw_freq_tbl[0];
prop->mclk_freq = AMD_SDW_BUS_BASE_FREQ;
+ prop->max_clk_freq = AMD_SDW_DEFAULT_CLK_FREQ;
ret = sdw_bus_master_add(&amd_manager->bus, dev, dev->fwnode);
if (ret) {
@@ -1122,6 +1135,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, false);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
/*
@@ -1148,6 +1162,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
return 0;
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ amd_sdw_wake_enable(amd_manager, true);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
ret = amd_sdw_clock_stop(amd_manager);
diff --git a/drivers/soundwire/amd_manager.h b/drivers/soundwire/amd_manager.h
index 5f040151a..6dcc7a449 100644
--- a/drivers/soundwire/amd_manager.h
+++ b/drivers/soundwire/amd_manager.h
@@ -152,7 +152,7 @@
#define AMD_SDW0_EXT_INTR_MASK 0x200000
#define AMD_SDW1_EXT_INTR_MASK 4
#define AMD_SDW_IRQ_MASK_0TO7 0x77777777
-#define AMD_SDW_IRQ_MASK_8TO11 0x000d7777
+#define AMD_SDW_IRQ_MASK_8TO11 0x000c7777
#define AMD_SDW_IRQ_ERROR_MASK 0xff
#define AMD_SDW_MAX_FREQ_NUM 1
#define AMD_SDW0_MAX_TX_PORTS 3
@@ -190,6 +190,7 @@
#define AMD_SDW_CLK_RESUME_REQ 2
#define AMD_SDW_CLK_RESUME_DONE 3
#define AMD_SDW_WAKE_STAT_MASK BIT(16)
+#define AMD_SDW_WAKE_INTR_MASK BIT(16)
static u32 amd_sdw_freq_tbl[AMD_SDW_MAX_FREQ_NUM] = {
AMD_SDW_DEFAULT_CLK_FREQ,
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 9ebdd0cd0..91ab97a45 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -131,6 +131,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
.driver_data = (void *)intel_rooks_county,
},
{
+ /* quirk used for NUC15 LAPRC710 skew */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
+ },
+ .driver_data = (void *)intel_rooks_county,
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
index 31162f2b5..c70a63d00 100644
--- a/drivers/soundwire/generic_bandwidth_allocation.c
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -333,7 +333,7 @@ static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
*/
static int sdw_compute_bus_params(struct sdw_bus *bus)
{
- unsigned int max_dr_freq, curr_dr_freq = 0;
+ unsigned int curr_dr_freq = 0;
struct sdw_master_prop *mstr_prop = &bus->prop;
int i, clk_values, ret;
bool is_gear = false;
@@ -351,14 +351,12 @@ static int sdw_compute_bus_params(struct sdw_bus *bus)
clk_buf = NULL;
}
- max_dr_freq = mstr_prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
-
for (i = 0; i < clk_values; i++) {
if (!clk_buf)
- curr_dr_freq = max_dr_freq;
+ curr_dr_freq = bus->params.max_dr_freq;
else
curr_dr_freq = (is_gear) ?
- (max_dr_freq >> clk_buf[i]) :
+ (bus->params.max_dr_freq >> clk_buf[i]) :
clk_buf[i] * SDW_DOUBLE_RATE_FACTOR;
if (curr_dr_freq <= bus->params.bandwidth)
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 8e027eee8..3c4d6deba 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1157,9 +1157,20 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
struct sdw_port_runtime *p_rt;
struct sdw_slave *slave;
unsigned long *port_mask;
- int i, maxport, pn, nports = 0, ret = 0;
+ int maxport, pn, nports = 0, ret = 0;
unsigned int m_port;
+ if (direction == SNDRV_PCM_STREAM_CAPTURE)
+ sconfig.direction = SDW_DATA_DIR_TX;
+ else
+ sconfig.direction = SDW_DATA_DIR_RX;
+
+ /* hw parameters wil be ignored as we only support PDM */
+ sconfig.ch_count = 1;
+ sconfig.frame_rate = params_rate(params);
+ sconfig.type = stream->type;
+ sconfig.bps = 1;
+
mutex_lock(&ctrl->port_lock);
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
if (m_rt->direction == SDW_DATA_DIR_RX) {
@@ -1183,7 +1194,7 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
if (pn > maxport) {
dev_err(ctrl->dev, "All ports busy\n");
ret = -EBUSY;
- goto err;
+ goto out;
}
set_bit(pn, port_mask);
pconfig[nports].num = pn;
@@ -1193,24 +1204,9 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl,
}
}
- if (direction == SNDRV_PCM_STREAM_CAPTURE)
- sconfig.direction = SDW_DATA_DIR_TX;
- else
- sconfig.direction = SDW_DATA_DIR_RX;
-
- /* hw parameters wil be ignored as we only support PDM */
- sconfig.ch_count = 1;
- sconfig.frame_rate = params_rate(params);
- sconfig.type = stream->type;
- sconfig.bps = 1;
sdw_stream_add_master(&ctrl->bus, &sconfig, pconfig,
nports, stream);
-err:
- if (ret) {
- for (i = 0; i < nports; i++)
- clear_bit(pconfig[i].num, port_mask);
- }
-
+out:
mutex_unlock(&ctrl->port_lock);
return ret;
@@ -1265,10 +1261,7 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct sdw_stream_runtime *sruntime;
- struct snd_soc_dai *codec_dai;
- int ret, i;
+ int ret;
ret = pm_runtime_get_sync(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
@@ -1279,33 +1272,7 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
return ret;
}
- sruntime = sdw_alloc_stream(dai->name);
- if (!sruntime) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
- ctrl->sruntime[dai->id] = sruntime;
-
- for_each_rtd_codec_dais(rtd, i, codec_dai) {
- ret = snd_soc_dai_set_stream(codec_dai, sruntime,
- substream->stream);
- if (ret < 0 && ret != -ENOTSUPP) {
- dev_err(dai->dev, "Failed to set sdw stream on %s\n",
- codec_dai->name);
- goto err_set_stream;
- }
- }
-
return 0;
-
-err_set_stream:
- sdw_release_stream(sruntime);
-err_alloc:
- pm_runtime_mark_last_busy(ctrl->dev);
- pm_runtime_put_autosuspend(ctrl->dev);
-
- return ret;
}
static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,
@@ -1314,8 +1281,6 @@ static void qcom_swrm_shutdown(struct snd_pcm_substream *substream,
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
swrm_wait_for_wr_fifo_done(ctrl);
- sdw_release_stream(ctrl->sruntime[dai->id]);
- ctrl->sruntime[dai->id] = NULL;
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put_autosuspend(ctrl->dev);
@@ -1624,9 +1589,13 @@ static int qcom_swrm_probe(struct platform_device *pdev)
}
}
- /* FIXME: is there a DT-defined value to use ? */
ctrl->bus.controller_id = -1;
+ if (ctrl->version > SWRM_VERSION_1_3_0) {
+ ctrl->reg_read(ctrl, SWRM_COMP_MASTER_ID, &val);
+ ctrl->bus.controller_id = val;
+ }
+
ret = sdw_bus_master_add(&ctrl->bus, dev, dev->fwnode);
if (ret) {
dev_err(dev, "Failed to register Soundwire controller (%d)\n",
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index f048b3d55..f9c0adc07 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -898,7 +898,7 @@ static struct sdw_port_runtime *sdw_port_alloc(struct list_head *port_list)
}
static int sdw_port_config(struct sdw_port_runtime *p_rt,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
int port_index)
{
p_rt->ch_mask = port_config[port_index].ch_mask;
@@ -971,7 +971,7 @@ static int sdw_slave_port_is_valid_range(struct device *dev, int num)
static int sdw_slave_port_config(struct sdw_slave *slave,
struct sdw_slave_runtime *s_rt,
- struct sdw_port_config *port_config)
+ const struct sdw_port_config *port_config)
{
struct sdw_port_runtime *p_rt;
int ret;
@@ -1027,7 +1027,7 @@ static int sdw_master_port_alloc(struct sdw_master_runtime *m_rt,
}
static int sdw_master_port_config(struct sdw_master_runtime *m_rt,
- struct sdw_port_config *port_config)
+ const struct sdw_port_config *port_config)
{
struct sdw_port_runtime *p_rt;
int ret;
@@ -1862,7 +1862,7 @@ EXPORT_SYMBOL(sdw_release_stream);
*/
int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream)
{
@@ -1982,7 +1982,7 @@ EXPORT_SYMBOL(sdw_stream_remove_master);
*/
int sdw_stream_add_slave(struct sdw_slave *slave,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream)
{
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 3d1252566..370c4d157 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -272,7 +272,7 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
return i;
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static bool atmel_qspi_supports_op(struct spi_mem *mem,
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index c9f1d1e1d..b7ada9814 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -146,7 +146,7 @@ static int ath79_exec_mem_op(struct spi_mem *mem,
/* Only use for fast-read op. */
if (op->cmd.opcode != 0x0b || op->data.dir != SPI_MEM_DATA_IN ||
op->addr.nbytes != 3 || op->dummy.nbytes != 1)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* disable GPIO mode */
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index b96e55f59..9ace259d2 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -6,12 +6,14 @@
*/
#include <linux/clk.h>
+#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/timer.h>
#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
@@ -52,6 +54,7 @@
#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
#define SPI_ENGINE_CMD_REG_CONFIG 0x1
+#define SPI_ENGINE_CMD_REG_XFER_BITS 0x2
#define SPI_ENGINE_MISC_SYNC 0x0
#define SPI_ENGINE_MISC_SLEEP 0x1
@@ -78,29 +81,42 @@ struct spi_engine_program {
uint16_t instructions[];
};
-struct spi_engine {
- struct clk *clk;
- struct clk *ref_clk;
-
- spinlock_t lock;
-
- void __iomem *base;
-
- struct spi_message *msg;
+/**
+ * struct spi_engine_message_state - SPI engine per-message state
+ */
+struct spi_engine_message_state {
+ /** @p: Instructions for executing this message. */
struct spi_engine_program *p;
+ /** @cmd_length: Number of elements in cmd_buf array. */
unsigned cmd_length;
+ /** @cmd_buf: Array of commands not yet written to CMD FIFO. */
const uint16_t *cmd_buf;
-
+ /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
struct spi_transfer *tx_xfer;
+ /** @tx_length: Size of tx_buf in bytes. */
unsigned int tx_length;
+ /** @tx_buf: Bytes not yet written to TX FIFO. */
const uint8_t *tx_buf;
-
+ /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
struct spi_transfer *rx_xfer;
+ /** @rx_length: Size of tx_buf in bytes. */
unsigned int rx_length;
+ /** @rx_buf: Bytes not yet written to the RX FIFO. */
uint8_t *rx_buf;
+ /** @sync_id: ID to correlate SYNC interrupts with this message. */
+ u8 sync_id;
+};
+
+struct spi_engine {
+ struct clk *clk;
+ struct clk *ref_clk;
+
+ spinlock_t lock;
- unsigned int sync_id;
- unsigned int completed_id;
+ void __iomem *base;
+ struct ida sync_ida;
+ struct timer_list watchdog_timer;
+ struct spi_controller *controller;
unsigned int int_enable;
};
@@ -127,25 +143,17 @@ static unsigned int spi_engine_get_config(struct spi_device *spi)
return config;
}
-static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
- struct spi_device *spi, struct spi_transfer *xfer)
-{
- unsigned int clk_div;
-
- clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
- xfer->speed_hz * 2);
- if (clk_div > 255)
- clk_div = 255;
- else if (clk_div > 0)
- clk_div -= 1;
-
- return clk_div;
-}
-
static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
struct spi_transfer *xfer)
{
- unsigned int len = xfer->len;
+ unsigned int len;
+
+ if (xfer->bits_per_word <= 8)
+ len = xfer->len;
+ else if (xfer->bits_per_word <= 16)
+ len = xfer->len / 2;
+ else
+ len = xfer->len / 4;
while (len) {
unsigned int n = min(len, 256U);
@@ -163,22 +171,16 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
}
static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
- struct spi_engine *spi_engine, unsigned int clk_div,
- struct spi_transfer *xfer)
+ int delay_ns, u32 sclk_hz)
{
- unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
unsigned int t;
- int delay;
- delay = spi_delay_to_ns(&xfer->delay, xfer);
- if (delay < 0)
+ /* negative delay indicates error, e.g. from spi_delay_to_ns() */
+ if (delay_ns <= 0)
return;
- delay /= 1000;
- if (delay == 0)
- return;
-
- t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
+ /* rounding down since executing the instruction adds a couple of ticks delay */
+ t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC);
while (t) {
unsigned int n = min(t, 256U);
@@ -195,53 +197,105 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
if (assert)
mask ^= BIT(spi_get_chipselect(spi, 0));
- spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
+ spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(0, mask));
+}
+
+/*
+ * Performs precompile steps on the message.
+ *
+ * The SPI core does most of the message/transfer validation and filling in
+ * fields for us via __spi_validate(). This fixes up anything remaining not
+ * done there.
+ *
+ * NB: This is separate from spi_engine_compile_message() because the latter
+ * is called twice and would otherwise result in double-evaluation.
+ */
+static void spi_engine_precompile_message(struct spi_message *msg)
+{
+ unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
+ xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
+ }
}
-static int spi_engine_compile_message(struct spi_engine *spi_engine,
- struct spi_message *msg, bool dry, struct spi_engine_program *p)
+static void spi_engine_compile_message(struct spi_message *msg, bool dry,
+ struct spi_engine_program *p)
{
struct spi_device *spi = msg->spi;
+ struct spi_controller *host = spi->controller;
struct spi_transfer *xfer;
int clk_div, new_clk_div;
- bool cs_change = true;
+ bool keep_cs = false;
+ u8 bits_per_word = 0;
- clk_div = -1;
+ clk_div = 1;
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
spi_engine_get_config(spi)));
+ xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
+ spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
+
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
+ new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
if (new_clk_div != clk_div) {
clk_div = new_clk_div;
+ /* actual divider used is register value + 1 */
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
- clk_div));
+ clk_div - 1));
}
- if (cs_change)
- spi_engine_gen_cs(p, dry, spi, true);
+ if (bits_per_word != xfer->bits_per_word) {
+ bits_per_word = xfer->bits_per_word;
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS,
+ bits_per_word));
+ }
spi_engine_gen_xfer(p, dry, xfer);
- spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
-
- cs_change = xfer->cs_change;
- if (list_is_last(&xfer->transfer_list, &msg->transfers))
- cs_change = !cs_change;
-
- if (cs_change)
- spi_engine_gen_cs(p, dry, spi, false);
+ spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
+ xfer->effective_speed_hz);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ if (!xfer->cs_off)
+ spi_engine_gen_cs(p, dry, spi, false);
+
+ spi_engine_gen_sleep(p, dry, spi_delay_to_ns(
+ &xfer->cs_change_delay, xfer),
+ xfer->effective_speed_hz);
+
+ if (!list_next_entry(xfer, transfer_list)->cs_off)
+ spi_engine_gen_cs(p, dry, spi, true);
+ }
+ } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
+ xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
+ spi_engine_gen_cs(p, dry, spi, xfer->cs_off);
+ }
}
- return 0;
+ if (!keep_cs)
+ spi_engine_gen_cs(p, dry, spi, false);
+
+ /*
+ * Restore clockdiv to default so that future gen_sleep commands don't
+ * have to be aware of the current register state.
+ */
+ if (clk_div != 1)
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
}
-static void spi_engine_xfer_next(struct spi_engine *spi_engine,
+static void spi_engine_xfer_next(struct spi_message *msg,
struct spi_transfer **_xfer)
{
- struct spi_message *msg = spi_engine->msg;
struct spi_transfer *xfer = *_xfer;
if (!xfer) {
@@ -256,147 +310,192 @@ static void spi_engine_xfer_next(struct spi_engine *spi_engine,
*_xfer = xfer;
}
-static void spi_engine_tx_next(struct spi_engine *spi_engine)
+static void spi_engine_tx_next(struct spi_message *msg)
{
- struct spi_transfer *xfer = spi_engine->tx_xfer;
+ struct spi_engine_message_state *st = msg->state;
+ struct spi_transfer *xfer = st->tx_xfer;
do {
- spi_engine_xfer_next(spi_engine, &xfer);
+ spi_engine_xfer_next(msg, &xfer);
} while (xfer && !xfer->tx_buf);
- spi_engine->tx_xfer = xfer;
+ st->tx_xfer = xfer;
if (xfer) {
- spi_engine->tx_length = xfer->len;
- spi_engine->tx_buf = xfer->tx_buf;
+ st->tx_length = xfer->len;
+ st->tx_buf = xfer->tx_buf;
} else {
- spi_engine->tx_buf = NULL;
+ st->tx_buf = NULL;
}
}
-static void spi_engine_rx_next(struct spi_engine *spi_engine)
+static void spi_engine_rx_next(struct spi_message *msg)
{
- struct spi_transfer *xfer = spi_engine->rx_xfer;
+ struct spi_engine_message_state *st = msg->state;
+ struct spi_transfer *xfer = st->rx_xfer;
do {
- spi_engine_xfer_next(spi_engine, &xfer);
+ spi_engine_xfer_next(msg, &xfer);
} while (xfer && !xfer->rx_buf);
- spi_engine->rx_xfer = xfer;
+ st->rx_xfer = xfer;
if (xfer) {
- spi_engine->rx_length = xfer->len;
- spi_engine->rx_buf = xfer->rx_buf;
+ st->rx_length = xfer->len;
+ st->rx_buf = xfer->rx_buf;
} else {
- spi_engine->rx_buf = NULL;
+ st->rx_buf = NULL;
}
}
-static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
+static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine,
+ struct spi_message *msg)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
+ struct spi_engine_message_state *st = msg->state;
unsigned int n, m, i;
const uint16_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
- while (n && spi_engine->cmd_length) {
- m = min(n, spi_engine->cmd_length);
- buf = spi_engine->cmd_buf;
+ while (n && st->cmd_length) {
+ m = min(n, st->cmd_length);
+ buf = st->cmd_buf;
for (i = 0; i < m; i++)
writel_relaxed(buf[i], addr);
- spi_engine->cmd_buf += m;
- spi_engine->cmd_length -= m;
+ st->cmd_buf += m;
+ st->cmd_length -= m;
n -= m;
}
- return spi_engine->cmd_length != 0;
+ return st->cmd_length != 0;
}
-static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
+static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine,
+ struct spi_message *msg)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
+ struct spi_engine_message_state *st = msg->state;
unsigned int n, m, i;
- const uint8_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
- while (n && spi_engine->tx_length) {
- m = min(n, spi_engine->tx_length);
- buf = spi_engine->tx_buf;
- for (i = 0; i < m; i++)
- writel_relaxed(buf[i], addr);
- spi_engine->tx_buf += m;
- spi_engine->tx_length -= m;
+ while (n && st->tx_length) {
+ if (st->tx_xfer->bits_per_word <= 8) {
+ const u8 *buf = st->tx_buf;
+
+ m = min(n, st->tx_length);
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ st->tx_buf += m;
+ st->tx_length -= m;
+ } else if (st->tx_xfer->bits_per_word <= 16) {
+ const u16 *buf = (const u16 *)st->tx_buf;
+
+ m = min(n, st->tx_length / 2);
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ st->tx_buf += m * 2;
+ st->tx_length -= m * 2;
+ } else {
+ const u32 *buf = (const u32 *)st->tx_buf;
+
+ m = min(n, st->tx_length / 4);
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ st->tx_buf += m * 4;
+ st->tx_length -= m * 4;
+ }
n -= m;
- if (spi_engine->tx_length == 0)
- spi_engine_tx_next(spi_engine);
+ if (st->tx_length == 0)
+ spi_engine_tx_next(msg);
}
- return spi_engine->tx_length != 0;
+ return st->tx_length != 0;
}
-static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
+static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine,
+ struct spi_message *msg)
{
void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
+ struct spi_engine_message_state *st = msg->state;
unsigned int n, m, i;
- uint8_t *buf;
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
- while (n && spi_engine->rx_length) {
- m = min(n, spi_engine->rx_length);
- buf = spi_engine->rx_buf;
- for (i = 0; i < m; i++)
- buf[i] = readl_relaxed(addr);
- spi_engine->rx_buf += m;
- spi_engine->rx_length -= m;
+ while (n && st->rx_length) {
+ if (st->rx_xfer->bits_per_word <= 8) {
+ u8 *buf = st->rx_buf;
+
+ m = min(n, st->rx_length);
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+ st->rx_buf += m;
+ st->rx_length -= m;
+ } else if (st->rx_xfer->bits_per_word <= 16) {
+ u16 *buf = (u16 *)st->rx_buf;
+
+ m = min(n, st->rx_length / 2);
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+ st->rx_buf += m * 2;
+ st->rx_length -= m * 2;
+ } else {
+ u32 *buf = (u32 *)st->rx_buf;
+
+ m = min(n, st->rx_length / 4);
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+ st->rx_buf += m * 4;
+ st->rx_length -= m * 4;
+ }
n -= m;
- if (spi_engine->rx_length == 0)
- spi_engine_rx_next(spi_engine);
+ if (st->rx_length == 0)
+ spi_engine_rx_next(msg);
}
- return spi_engine->rx_length != 0;
+ return st->rx_length != 0;
}
static irqreturn_t spi_engine_irq(int irq, void *devid)
{
struct spi_controller *host = devid;
+ struct spi_message *msg = host->cur_msg;
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
unsigned int disable_int = 0;
unsigned int pending;
+ int completed_id = -1;
pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
if (pending & SPI_ENGINE_INT_SYNC) {
writel_relaxed(SPI_ENGINE_INT_SYNC,
spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
- spi_engine->completed_id = readl_relaxed(
+ completed_id = readl_relaxed(
spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
}
spin_lock(&spi_engine->lock);
if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
- if (!spi_engine_write_cmd_fifo(spi_engine))
+ if (!spi_engine_write_cmd_fifo(spi_engine, msg))
disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
}
if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
- if (!spi_engine_write_tx_fifo(spi_engine))
+ if (!spi_engine_write_tx_fifo(spi_engine, msg))
disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
}
if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
- if (!spi_engine_read_rx_fifo(spi_engine))
+ if (!spi_engine_read_rx_fifo(spi_engine, msg))
disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
}
- if (pending & SPI_ENGINE_INT_SYNC) {
- if (spi_engine->msg &&
- spi_engine->completed_id == spi_engine->sync_id) {
- struct spi_message *msg = spi_engine->msg;
-
- kfree(spi_engine->p);
- msg->status = 0;
- msg->actual_length = msg->frame_length;
- spi_engine->msg = NULL;
- spi_finalize_current_message(host);
+ if (pending & SPI_ENGINE_INT_SYNC && msg) {
+ struct spi_engine_message_state *st = msg->state;
+
+ if (completed_id == st->sync_id) {
+ if (timer_delete_sync(&spi_engine->watchdog_timer)) {
+ msg->status = 0;
+ msg->actual_length = msg->frame_length;
+ spi_finalize_current_message(host);
+ }
disable_int |= SPI_ENGINE_INT_SYNC;
}
}
@@ -412,43 +511,86 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static int spi_engine_transfer_one_message(struct spi_controller *host,
- struct spi_message *msg)
+static int spi_engine_prepare_message(struct spi_controller *host,
+ struct spi_message *msg)
{
struct spi_engine_program p_dry, *p;
struct spi_engine *spi_engine = spi_controller_get_devdata(host);
- unsigned int int_enable = 0;
- unsigned long flags;
+ struct spi_engine_message_state *st;
size_t size;
+ int ret;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ spi_engine_precompile_message(msg);
p_dry.length = 0;
- spi_engine_compile_message(spi_engine, msg, true, &p_dry);
+ spi_engine_compile_message(msg, true, &p_dry);
size = sizeof(*p->instructions) * (p_dry.length + 1);
p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
- if (!p)
+ if (!p) {
+ kfree(st);
return -ENOMEM;
- spi_engine_compile_message(spi_engine, msg, false, p);
+ }
- spin_lock_irqsave(&spi_engine->lock, flags);
- spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
- spi_engine_program_add_cmd(p, false,
- SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
+ ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(p);
+ kfree(st);
+ return ret;
+ }
+
+ st->sync_id = ret;
+
+ spi_engine_compile_message(msg, false, p);
+
+ spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id));
+
+ st->p = p;
+ st->cmd_buf = p->instructions;
+ st->cmd_length = p->length;
+ msg->state = st;
+
+ return 0;
+}
+
+static int spi_engine_unprepare_message(struct spi_controller *host,
+ struct spi_message *msg)
+{
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_message_state *st = msg->state;
+
+ ida_free(&spi_engine->sync_ida, st->sync_id);
+ kfree(st->p);
+ kfree(st);
- spi_engine->msg = msg;
- spi_engine->p = p;
+ return 0;
+}
- spi_engine->cmd_buf = p->instructions;
- spi_engine->cmd_length = p->length;
- if (spi_engine_write_cmd_fifo(spi_engine))
+static int spi_engine_transfer_one_message(struct spi_controller *host,
+ struct spi_message *msg)
+{
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_message_state *st = msg->state;
+ unsigned int int_enable = 0;
+ unsigned long flags;
+
+ mod_timer(&spi_engine->watchdog_timer, jiffies + msecs_to_jiffies(5000));
+
+ spin_lock_irqsave(&spi_engine->lock, flags);
+
+ if (spi_engine_write_cmd_fifo(spi_engine, msg))
int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
- spi_engine_tx_next(spi_engine);
- if (spi_engine_write_tx_fifo(spi_engine))
+ spi_engine_tx_next(msg);
+ if (spi_engine_write_tx_fifo(spi_engine, msg))
int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
- spi_engine_rx_next(spi_engine);
- if (spi_engine->rx_length != 0)
+ spi_engine_rx_next(msg);
+ if (st->rx_length != 0)
int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
int_enable |= SPI_ENGINE_INT_SYNC;
@@ -461,6 +603,29 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
return 0;
}
+static void spi_engine_timeout(struct timer_list *timer)
+{
+ struct spi_engine *spi_engine = from_timer(spi_engine, timer, watchdog_timer);
+ struct spi_controller *host = spi_engine->controller;
+
+ if (WARN_ON(!host->cur_msg))
+ return;
+
+ dev_err(&host->dev,
+ "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
+ host->cur_msg->status = -ETIMEDOUT;
+ spi_finalize_current_message(host);
+}
+
+static void spi_engine_release_hw(void *p)
+{
+ struct spi_engine *spi_engine = p;
+
+ writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+}
+
static int spi_engine_probe(struct platform_device *pdev)
{
struct spi_engine *spi_engine;
@@ -473,35 +638,28 @@ static int spi_engine_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
- if (!spi_engine)
- return -ENOMEM;
-
- host = spi_alloc_host(&pdev->dev, 0);
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
if (!host)
return -ENOMEM;
- spi_controller_set_devdata(host, spi_engine);
+ spi_engine = spi_controller_get_devdata(host);
spin_lock_init(&spi_engine->lock);
+ ida_init(&spi_engine->sync_ida);
+ timer_setup(&spi_engine->watchdog_timer, spi_engine_timeout, TIMER_IRQSAFE);
+ spi_engine->controller = host;
spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(spi_engine->clk)) {
- ret = PTR_ERR(spi_engine->clk);
- goto err_put_host;
- }
+ if (IS_ERR(spi_engine->clk))
+ return PTR_ERR(spi_engine->clk);
spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
- if (IS_ERR(spi_engine->ref_clk)) {
- ret = PTR_ERR(spi_engine->ref_clk);
- goto err_put_host;
- }
+ if (IS_ERR(spi_engine->ref_clk))
+ return PTR_ERR(spi_engine->ref_clk);
spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(spi_engine->base)) {
- ret = PTR_ERR(spi_engine->base);
- goto err_put_host;
- }
+ if (IS_ERR(spi_engine->base))
+ return PTR_ERR(spi_engine->base);
version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
@@ -509,54 +667,42 @@ static int spi_engine_probe(struct platform_device *pdev)
SPI_ENGINE_VERSION_MAJOR(version),
SPI_ENGINE_VERSION_MINOR(version),
SPI_ENGINE_VERSION_PATCH(version));
- ret = -ENODEV;
- goto err_put_host;
+ return -ENODEV;
}
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
- ret = request_irq(irq, spi_engine_irq, 0, pdev->name, host);
+ ret = devm_add_action_or_reset(&pdev->dev, spi_engine_release_hw,
+ spi_engine);
if (ret)
- goto err_put_host;
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_engine_irq, 0, pdev->name,
+ host);
+ if (ret)
+ return ret;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
- host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
host->transfer_one_message = spi_engine_transfer_one_message;
+ host->prepare_message = spi_engine_prepare_message;
+ host->unprepare_message = spi_engine_unprepare_message;
host->num_chipselect = 8;
- ret = spi_register_controller(host);
+ if (host->max_speed_hz == 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
+
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
- goto err_free_irq;
+ return ret;
platform_set_drvdata(pdev, host);
return 0;
-err_free_irq:
- free_irq(irq, host);
-err_put_host:
- spi_controller_put(host);
- return ret;
-}
-
-static void spi_engine_remove(struct platform_device *pdev)
-{
- struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev));
- struct spi_engine *spi_engine = spi_controller_get_devdata(host);
- int irq = platform_get_irq(pdev, 0);
-
- spi_unregister_controller(host);
-
- free_irq(irq, host);
-
- spi_controller_put(host);
-
- writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
- writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
- writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
}
static const struct of_device_id spi_engine_match_table[] = {
@@ -567,7 +713,6 @@ MODULE_DEVICE_TABLE(of, spi_engine_match_table);
static struct platform_driver spi_engine_driver = {
.probe = spi_engine_probe,
- .remove_new = spi_engine_remove,
.driver = {
.name = "spi-engine",
.of_match_table = spi_engine_match_table,
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 0407b9118..cfdaa5eae 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1199,7 +1199,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
op->data.dir != SPI_MEM_DATA_IN)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
buf = op->data.buf.in;
addr = op->addr.val;
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index b7e04b03b..8648b8eb0 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -619,7 +619,6 @@ MODULE_DEVICE_TABLE(of, cdns_xspi_of_match);
static struct platform_driver cdns_xspi_platform_driver = {
.probe = cdns_xspi_probe,
- .remove = NULL,
.driver = {
.name = CDNS_XSPI_NAME,
.of_match_table = cdns_xspi_of_match,
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index c1556b652..adf19e8c4 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -148,8 +148,7 @@ static void cs42l43_set_cs(struct spi_device *spi, bool is_high)
{
struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller);
- if (spi_get_chipselect(spi, 0) == 0)
- regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
+ regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
}
static int cs42l43_prepare_message(struct spi_controller *ctlr, struct spi_message *msg)
@@ -213,7 +212,7 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*priv->ctlr));
+ priv->ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*priv->ctlr));
if (!priv->ctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 46801189a..cc74cbe03 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -411,7 +411,6 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "renesas,rzn1-spi", .data = dw_spi_pssi_init},
{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_hssi_init},
{ .compatible = "intel,keembay-ssi", .data = dw_spi_intel_init},
- { .compatible = "intel,thunderbay-ssi", .data = dw_spi_intel_init},
{
.compatible = "intel,mountevans-imc-ssi",
.data = dw_spi_mountevans_imc_init,
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index b956a32a4..15f84e68d 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -145,10 +145,10 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret;
}
-static void handle_se_timeout(struct spi_master *spi,
- struct spi_message *msg)
+static void handle_se_timeout(struct spi_controller *spi,
+ struct spi_message *msg)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
unsigned long time_left;
struct geni_se *se = &mas->se;
const struct spi_transfer *xfer;
@@ -160,9 +160,9 @@ static void handle_se_timeout(struct spi_master *spi,
xfer = mas->cur_xfer;
mas->cur_xfer = NULL;
- if (spi->slave) {
+ if (spi->target) {
/*
- * skip CMD Cancel sequnece since spi slave
+ * skip CMD Cancel sequnece since spi target
* doesn`t support CMD Cancel sequnece
*/
spin_unlock_irq(&mas->lock);
@@ -225,17 +225,17 @@ reset_if_dma:
}
}
-static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
+static void handle_gpi_timeout(struct spi_controller *spi, struct spi_message *msg)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
dmaengine_terminate_sync(mas->tx);
dmaengine_terminate_sync(mas->rx);
}
-static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
+static void spi_geni_handle_err(struct spi_controller *spi, struct spi_message *msg)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
@@ -286,8 +286,8 @@ static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
- struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
- struct spi_master *spi = dev_get_drvdata(mas->dev);
+ struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller);
+ struct spi_controller *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned long time_left;
@@ -395,9 +395,9 @@ static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
}
static int setup_fifo_params(struct spi_device *spi_slv,
- struct spi_master *spi)
+ struct spi_controller *spi)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
u32 demux_sel;
@@ -434,7 +434,7 @@ static int setup_fifo_params(struct spi_device *spi_slv,
static void
spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
{
- struct spi_master *spi = cb;
+ struct spi_controller *spi = cb;
spi->cur_msg->status = -EIO;
if (result->result != DMA_TRANS_NOERROR) {
@@ -454,7 +454,7 @@ spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
}
static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
- struct spi_device *spi_slv, struct spi_master *spi)
+ struct spi_device *spi_slv, struct spi_controller *spi)
{
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
struct dma_slave_config config = {};
@@ -560,14 +560,14 @@ static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
static bool geni_can_dma(struct spi_controller *ctlr,
struct spi_device *slv, struct spi_transfer *xfer)
{
- struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
+ struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller);
u32 len, fifo_size;
if (mas->cur_xfer_mode == GENI_GPI_DMA)
return true;
- /* Set SE DMA mode for SPI slave. */
- if (ctlr->slave)
+ /* Set SE DMA mode for SPI target. */
+ if (ctlr->target)
return true;
len = get_xfer_len_in_words(xfer, mas);
@@ -579,10 +579,10 @@ static bool geni_can_dma(struct spi_controller *ctlr,
return false;
}
-static int spi_geni_prepare_message(struct spi_master *spi,
- struct spi_message *spi_msg)
+static int spi_geni_prepare_message(struct spi_controller *spi,
+ struct spi_message *spi_msg)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
int ret;
switch (mas->cur_xfer_mode) {
@@ -657,7 +657,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
proto = geni_se_read_proto(se);
- if (spi->slave) {
+ if (spi->target) {
if (proto != GENI_SE_SPI_SLAVE) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
goto out_pm;
@@ -715,7 +715,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
}
/* We always control CS manually */
- if (!spi->slave) {
+ if (!spi->target) {
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
spi_tx_cfg &= ~CS_TOGGLE;
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
@@ -824,7 +824,7 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
static int setup_se_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas,
- u16 mode, struct spi_master *spi)
+ u16 mode, struct spi_controller *spi)
{
u32 m_cmd = 0;
u32 len;
@@ -913,11 +913,11 @@ static int setup_se_xfer(struct spi_transfer *xfer,
return ret;
}
-static int spi_geni_transfer_one(struct spi_master *spi,
- struct spi_device *slv,
- struct spi_transfer *xfer)
+static int spi_geni_transfer_one(struct spi_controller *spi,
+ struct spi_device *slv,
+ struct spi_transfer *xfer)
{
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
int ret;
if (spi_geni_is_abort_still_pending(mas))
@@ -939,8 +939,8 @@ static int spi_geni_transfer_one(struct spi_master *spi,
static irqreturn_t geni_spi_isr(int irq, void *data)
{
- struct spi_master *spi = data;
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_controller *spi = data;
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 m_irq;
@@ -1042,7 +1042,7 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
static int spi_geni_probe(struct platform_device *pdev)
{
int ret, irq;
- struct spi_master *spi;
+ struct spi_controller *spi;
struct spi_geni_master *mas;
void __iomem *base;
struct clk *clk;
@@ -1064,12 +1064,12 @@ static int spi_geni_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
- spi = devm_spi_alloc_master(dev, sizeof(*mas));
+ spi = devm_spi_alloc_host(dev, sizeof(*mas));
if (!spi)
return -ENOMEM;
platform_set_drvdata(pdev, spi);
- mas = spi_master_get_devdata(spi);
+ mas = spi_controller_get_devdata(spi);
mas->irq = irq;
mas->dev = dev;
mas->se.dev = dev;
@@ -1113,7 +1113,7 @@ static int spi_geni_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
if (device_property_read_bool(&pdev->dev, "spi-slave"))
- spi->slave = true;
+ spi->target = true;
ret = geni_icc_get(&mas->se, NULL);
if (ret)
@@ -1135,7 +1135,7 @@ static int spi_geni_probe(struct platform_device *pdev)
* for dma (gsi) mode, the gsi will set cs based on params passed in
* TRE
*/
- if (!spi->slave && mas->cur_xfer_mode == GENI_SE_FIFO)
+ if (!spi->target && mas->cur_xfer_mode == GENI_SE_FIFO)
spi->set_cs = spi_geni_set_cs;
/*
@@ -1148,7 +1148,7 @@ static int spi_geni_probe(struct platform_device *pdev)
if (ret)
goto spi_geni_release_dma;
- ret = spi_register_master(spi);
+ ret = spi_register_controller(spi);
if (ret)
goto spi_geni_probe_free_irq;
@@ -1164,11 +1164,11 @@ spi_geni_probe_runtime_disable:
static void spi_geni_remove(struct platform_device *pdev)
{
- struct spi_master *spi = platform_get_drvdata(pdev);
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_controller *spi = platform_get_drvdata(pdev);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
- spi_unregister_master(spi);
+ spi_unregister_controller(spi);
spi_geni_release_dma_chan(mas);
@@ -1178,8 +1178,8 @@ static void spi_geni_remove(struct platform_device *pdev)
static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
{
- struct spi_master *spi = dev_get_drvdata(dev);
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_controller *spi = dev_get_drvdata(dev);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
int ret;
/* Drop the performance state vote */
@@ -1194,8 +1194,8 @@ static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
{
- struct spi_master *spi = dev_get_drvdata(dev);
- struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ struct spi_controller *spi = dev_get_drvdata(dev);
+ struct spi_geni_master *mas = spi_controller_get_devdata(spi);
int ret;
ret = geni_icc_enable(&mas->se);
@@ -1211,30 +1211,30 @@ static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
static int __maybe_unused spi_geni_suspend(struct device *dev)
{
- struct spi_master *spi = dev_get_drvdata(dev);
+ struct spi_controller *spi = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(spi);
+ ret = spi_controller_suspend(spi);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret)
- spi_master_resume(spi);
+ spi_controller_resume(spi);
return ret;
}
static int __maybe_unused spi_geni_resume(struct device *dev)
{
- struct spi_master *spi = dev_get_drvdata(dev);
+ struct spi_controller *spi = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
- ret = spi_master_resume(spi);
+ ret = spi_controller_resume(spi);
if (ret)
pm_runtime_force_suspend(dev);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 0e479c540..c3e5cee18 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1345,7 +1345,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
controller->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(controller->dma_tx)) {
ret = PTR_ERR(controller->dma_tx);
- dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+ dev_err_probe(dev, ret, "can't get the TX DMA channel!\n");
controller->dma_tx = NULL;
goto err;
}
@@ -1354,7 +1354,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
controller->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(controller->dma_rx)) {
ret = PTR_ERR(controller->dma_rx);
- dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+ dev_err_probe(dev, ret, "can't get the RX DMA channel!\n");
controller->dma_rx = NULL;
goto err;
}
diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c
index cc366936d..003a6d21c 100644
--- a/drivers/spi/spi-ingenic.c
+++ b/drivers/spi/spi-ingenic.c
@@ -346,14 +346,17 @@ static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
static int spi_ingenic_request_dma(struct spi_controller *ctlr,
struct device *dev)
{
- ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
- if (!ctlr->dma_tx)
- return -ENODEV;
+ struct dma_chan *chan;
- ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
+ chan = dma_request_chan(dev, "tx");
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+ ctlr->dma_tx = chan;
- if (!ctlr->dma_rx)
- return -ENODEV;
+ chan = dma_request_chan(dev, "rx");
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+ ctlr->dma_rx = chan;
ctlr->can_dma = spi_ingenic_can_dma;
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 98ec4dc22..3654ae35d 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -711,8 +711,7 @@ static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
{
if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
- iop->mem_op.cmd.dtr != op->cmd.dtr ||
- iop->mem_op.cmd.opcode != op->cmd.opcode)
+ iop->mem_op.cmd.dtr != op->cmd.dtr)
return false;
if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
@@ -737,11 +736,12 @@ intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
const struct intel_spi_mem_op *iop;
for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
- if (intel_spi_cmp_mem_op(iop, op))
- break;
+ if (iop->mem_op.cmd.opcode == op->cmd.opcode &&
+ intel_spi_cmp_mem_op(iop, op))
+ return iop;
}
- return iop->mem_op.cmd.opcode ? iop : NULL;
+ return NULL;
}
static bool intel_spi_supports_mem_op(struct spi_mem *mem,
diff --git a/drivers/spi/spi-ljca.c b/drivers/spi/spi-ljca.c
index c5a066c73..1cc1422dd 100644
--- a/drivers/spi/spi-ljca.c
+++ b/drivers/spi/spi-ljca.c
@@ -223,7 +223,7 @@ static int ljca_spi_probe(struct auxiliary_device *auxdev,
struct ljca_spi_dev *ljca_spi;
int ret;
- controller = devm_spi_alloc_master(&auxdev->dev, sizeof(*ljca_spi));
+ controller = devm_spi_alloc_host(&auxdev->dev, sizeof(*ljca_spi));
if (!controller)
return -ENOMEM;
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index edd7430d4..17b8baf74 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -297,6 +297,49 @@ static void spi_mem_access_end(struct spi_mem *mem)
pm_runtime_put(ctlr->dev.parent);
}
+static void spi_mem_add_op_stats(struct spi_statistics __percpu *pcpu_stats,
+ const struct spi_mem_op *op, int exec_op_ret)
+{
+ struct spi_statistics *stats;
+ u64 len, l2len;
+
+ get_cpu();
+ stats = this_cpu_ptr(pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+
+ /*
+ * We do not have the concept of messages or transfers. Let's consider
+ * that one operation is equivalent to one message and one transfer.
+ */
+ u64_stats_inc(&stats->messages);
+ u64_stats_inc(&stats->transfers);
+
+ /* Use the sum of all lengths as bytes count and histogram value. */
+ len = op->cmd.nbytes + op->addr.nbytes;
+ len += op->dummy.nbytes + op->data.nbytes;
+ u64_stats_add(&stats->bytes, len);
+ l2len = min(fls(len), SPI_STATISTICS_HISTO_SIZE) - 1;
+ u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
+
+ /* Only account for data bytes as transferred bytes. */
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ u64_stats_add(&stats->bytes_tx, op->data.nbytes);
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ u64_stats_add(&stats->bytes_rx, op->data.nbytes);
+
+ /*
+ * A timeout is not an error, following the same behavior as
+ * spi_transfer_one_message().
+ */
+ if (exec_op_ret == -ETIMEDOUT)
+ u64_stats_inc(&stats->timedout);
+ else if (exec_op_ret)
+ u64_stats_inc(&stats->errors);
+
+ u64_stats_update_end(&stats->syncp);
+ put_cpu();
+}
+
/**
* spi_mem_exec_op() - Execute a memory operation
* @mem: the SPI memory
@@ -323,7 +366,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
return ret;
if (!spi_mem_internal_supports_op(mem, op))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !spi_get_csgpiod(mem->spi, 0)) {
ret = spi_mem_access_start(mem);
@@ -339,8 +382,12 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
- if (!ret || ret != -ENOTSUPP)
+ if (!ret || (ret != -ENOTSUPP && ret != -EOPNOTSUPP)) {
+ spi_mem_add_op_stats(ctlr->pcpu_statistics, op, ret);
+ spi_mem_add_op_stats(mem->spi->pcpu_statistics, op, ret);
+
return ret;
+ }
}
tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
@@ -559,7 +606,7 @@ spi_mem_dirmap_create(struct spi_mem *mem,
if (ret) {
desc->nodirmap = true;
if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
else
ret = 0;
}
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 4a6c984b6..d5ac60c13 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 1bf080339..88cbe4f00 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -39,6 +39,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
#include <trace/events/spi.h>
+#include <linux/dma/mxs-dma.h>
#define DRIVER_NAME "mxs-spi"
@@ -252,7 +253,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
if (!desc) {
dev_err(ssp->dev,
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 03db9f016..f3bb8bbc1 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -556,7 +556,7 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
op->data.nbytes);
if (fiu->spix_mode || op->addr.nbytes > 4)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (fiu->clkrate != chip->clkrate) {
ret = clk_set_rate(fiu->clk, chip->clkrate);
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index 5b2d3e4e2..5a4226682 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -275,6 +275,8 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
sizeof(struct pci1xxxx_spi_internal),
GFP_KERNEL);
+ if (!spi_bus->spi_int[iter])
+ return -ENOMEM;
spi_sub_ptr = spi_bus->spi_int[iter];
spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
if (!spi_sub_ptr->spi_host)
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index d1b6110b3..de63cf055 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -338,14 +338,8 @@ struct vendor_data {
* @clk: outgoing clock "SPICLK" for the SPI bus
* @host: SPI framework hookup
* @host_info: controller-specific data from machine setup
- * @pump_transfers: Tasklet used in Interrupt Transfer mode
- * @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
* @cur_chip: pointer to current clients chip(assigned from controller_state)
- * @next_msg_cs_active: the next message in the queue has been examined
- * and it was found that it uses the same chip select as the previous
- * message, so we left it active after the previous transfer, and it's
- * active already.
* @tx: current position in TX buffer to be read
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
@@ -362,7 +356,6 @@ struct vendor_data {
* @dummypage: a dummy page used for driving data on the bus with DMA
* @dma_running: indicates whether DMA is in operation
* @cur_cs: current chip select index
- * @cur_gpiod: current chip select GPIO descriptor
*/
struct pl022 {
struct amba_device *adev;
@@ -372,12 +365,8 @@ struct pl022 {
struct clk *clk;
struct spi_controller *host;
struct pl022_ssp_controller *host_info;
- /* Message per-transfer pump */
- struct tasklet_struct pump_transfers;
- struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
- bool next_msg_cs_active;
void *tx;
void *tx_end;
void *rx;
@@ -397,7 +386,6 @@ struct pl022 {
bool dma_running;
#endif
int cur_cs;
- struct gpio_desc *cur_gpiod;
};
/**
@@ -431,99 +419,29 @@ struct chip_data {
/**
* internal_cs_control - Control chip select signals via SSP_CSR.
* @pl022: SSP driver private data structure
- * @command: select/delect the chip
+ * @enable: select/delect the chip
*
* Used on controller with internal chip select control via SSP_CSR register
* (vendor extension). Each of the 5 LSB in the register controls one chip
* select signal.
*/
-static void internal_cs_control(struct pl022 *pl022, u32 command)
+static void internal_cs_control(struct pl022 *pl022, bool enable)
{
u32 tmp;
tmp = readw(SSP_CSR(pl022->virtbase));
- if (command == SSP_CHIP_SELECT)
+ if (enable)
tmp &= ~BIT(pl022->cur_cs);
else
tmp |= BIT(pl022->cur_cs);
writew(tmp, SSP_CSR(pl022->virtbase));
}
-static void pl022_cs_control(struct pl022 *pl022, u32 command)
+static void pl022_cs_control(struct spi_device *spi, bool enable)
{
+ struct pl022 *pl022 = spi_controller_get_devdata(spi->controller);
if (pl022->vendor->internal_cs_ctrl)
- internal_cs_control(pl022, command);
- else if (pl022->cur_gpiod)
- /*
- * This needs to be inverted since with GPIOLIB in
- * control, the inversion will be handled by
- * GPIOLIB's active low handling. The "command"
- * passed into this function will be SSP_CHIP_SELECT
- * which is enum:ed to 0, so we need the inverse
- * (1) to activate chip select.
- */
- gpiod_set_value(pl022->cur_gpiod, !command);
-}
-
-/**
- * giveback - current spi_message is over, schedule next message and call
- * callback of this message. Assumes that caller already
- * set message->status; dma and pio irqs are blocked
- * @pl022: SSP driver private data structure
- */
-static void giveback(struct pl022 *pl022)
-{
- struct spi_transfer *last_transfer;
- pl022->next_msg_cs_active = false;
-
- last_transfer = list_last_entry(&pl022->cur_msg->transfers,
- struct spi_transfer, transfer_list);
-
- /* Delay if requested before any change in chip select */
- /*
- * FIXME: This runs in interrupt context.
- * Is this really smart?
- */
- spi_transfer_delay_exec(last_transfer);
-
- if (!last_transfer->cs_change) {
- struct spi_message *next_msg;
-
- /*
- * cs_change was not set. We can keep the chip select
- * enabled if there is message in the queue and it is
- * for the same spi device.
- *
- * We cannot postpone this until pump_messages, because
- * after calling msg->complete (below) the driver that
- * sent the current message could be unloaded, which
- * could invalidate the cs_control() callback...
- */
- /* get a pointer to the next message, if any */
- next_msg = spi_get_next_queued_message(pl022->host);
-
- /*
- * see if the next and current messages point
- * to the same spi device.
- */
- if (next_msg && next_msg->spi != pl022->cur_msg->spi)
- next_msg = NULL;
- if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
- else
- pl022->next_msg_cs_active = true;
-
- }
-
- pl022->cur_msg = NULL;
- pl022->cur_transfer = NULL;
- pl022->cur_chip = NULL;
-
- /* disable the SPI/SSP operation */
- writew((readw(SSP_CR1(pl022->virtbase)) &
- (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
-
- spi_finalize_current_message(pl022->host);
+ internal_cs_control(pl022, enable);
}
/**
@@ -757,30 +675,6 @@ static void readwriter(struct pl022 *pl022)
*/
}
-/**
- * next_transfer - Move to the Next transfer in the current spi message
- * @pl022: SSP driver private data structure
- *
- * This function moves though the linked list of spi transfers in the
- * current spi message and returns with the state of current spi
- * message i.e whether its last transfer is done(STATE_DONE) or
- * Next transfer is ready(STATE_RUNNING)
- */
-static void *next_transfer(struct pl022 *pl022)
-{
- struct spi_message *msg = pl022->cur_msg;
- struct spi_transfer *trans = pl022->cur_transfer;
-
- /* Move to next transfer */
- if (trans->transfer_list.next != &msg->transfers) {
- pl022->cur_transfer =
- list_entry(trans->transfer_list.next,
- struct spi_transfer, transfer_list);
- return STATE_RUNNING;
- }
- return STATE_DONE;
-}
-
/*
* This DMA functionality is only compiled in if we have
* access to the generic DMA devices/DMA engine.
@@ -800,7 +694,6 @@ static void unmap_free_dma_scatter(struct pl022 *pl022)
static void dma_callback(void *data)
{
struct pl022 *pl022 = data;
- struct spi_message *msg = pl022->cur_msg;
BUG_ON(!pl022->sgt_rx.sgl);
@@ -845,13 +738,7 @@ static void dma_callback(void *data)
unmap_free_dma_scatter(pl022);
- /* Update total bytes transferred */
- msg->actual_length += pl022->cur_transfer->len;
- /* Move to next transfer */
- msg->state = next_transfer(pl022);
- if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
- tasklet_schedule(&pl022->pump_transfers);
+ spi_finalize_current_transfer(pl022->host);
}
static void setup_dma_scatter(struct pl022 *pl022,
@@ -1189,6 +1076,9 @@ err_no_rxchan:
static void terminate_dma(struct pl022 *pl022)
{
+ if (!pl022->dma_running)
+ return;
+
struct dma_chan *rxchan = pl022->dma_rx_channel;
struct dma_chan *txchan = pl022->dma_tx_channel;
@@ -1200,8 +1090,7 @@ static void terminate_dma(struct pl022 *pl022)
static void pl022_dma_remove(struct pl022 *pl022)
{
- if (pl022->dma_running)
- terminate_dma(pl022);
+ terminate_dma(pl022);
if (pl022->dma_tx_channel)
dma_release_channel(pl022->dma_tx_channel);
if (pl022->dma_rx_channel)
@@ -1225,6 +1114,10 @@ static inline int pl022_dma_probe(struct pl022 *pl022)
return 0;
}
+static inline void terminate_dma(struct pl022 *pl022)
+{
+}
+
static inline void pl022_dma_remove(struct pl022 *pl022)
{
}
@@ -1246,16 +1139,7 @@ static inline void pl022_dma_remove(struct pl022 *pl022)
static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
{
struct pl022 *pl022 = dev_id;
- struct spi_message *msg = pl022->cur_msg;
u16 irq_status = 0;
-
- if (unlikely(!msg)) {
- dev_err(&pl022->adev->dev,
- "bad message state in interrupt handler");
- /* Never fail */
- return IRQ_HANDLED;
- }
-
/* Read the Interrupt Status Register */
irq_status = readw(SSP_MIS(pl022->virtbase));
@@ -1287,10 +1171,8 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
- msg->state = STATE_ERROR;
-
- /* Schedule message queue handler */
- tasklet_schedule(&pl022->pump_transfers);
+ pl022->cur_transfer->error |= SPI_TRANS_FAIL_IO;
+ spi_finalize_current_transfer(pl022->host);
return IRQ_HANDLED;
}
@@ -1318,13 +1200,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
"number of bytes on a 16bit bus?)\n",
(u32) (pl022->rx - pl022->rx_end));
}
- /* Update total bytes transferred */
- msg->actual_length += pl022->cur_transfer->len;
- /* Move to next transfer */
- msg->state = next_transfer(pl022);
- if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
- tasklet_schedule(&pl022->pump_transfers);
+ spi_finalize_current_transfer(pl022->host);
return IRQ_HANDLED;
}
@@ -1361,98 +1237,20 @@ static int set_up_next_transfer(struct pl022 *pl022,
return 0;
}
-/**
- * pump_transfers - Tasklet function which schedules next transfer
- * when running in interrupt or DMA transfer mode.
- * @data: SSP driver private data structure
- *
- */
-static void pump_transfers(unsigned long data)
+static int do_interrupt_dma_transfer(struct pl022 *pl022)
{
- struct pl022 *pl022 = (struct pl022 *) data;
- struct spi_message *message = NULL;
- struct spi_transfer *transfer = NULL;
- struct spi_transfer *previous = NULL;
-
- /* Get current state information */
- message = pl022->cur_msg;
- transfer = pl022->cur_transfer;
-
- /* Handle for abort */
- if (message->state == STATE_ERROR) {
- message->status = -EIO;
- giveback(pl022);
- return;
- }
-
- /* Handle end of message */
- if (message->state == STATE_DONE) {
- message->status = 0;
- giveback(pl022);
- return;
- }
-
- /* Delay if requested at end of transfer before CS change */
- if (message->state == STATE_RUNNING) {
- previous = list_entry(transfer->transfer_list.prev,
- struct spi_transfer,
- transfer_list);
- /*
- * FIXME: This runs in interrupt context.
- * Is this really smart?
- */
- spi_transfer_delay_exec(previous);
-
- /* Reselect chip select only if cs_change was requested */
- if (previous->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_SELECT);
- } else {
- /* STATE_START */
- message->state = STATE_RUNNING;
- }
-
- if (set_up_next_transfer(pl022, transfer)) {
- message->state = STATE_ERROR;
- message->status = -EIO;
- giveback(pl022);
- return;
- }
- /* Flush the FIFOs and let's go! */
- flush(pl022);
-
- if (pl022->cur_chip->enable_dma) {
- if (configure_dma(pl022)) {
- dev_dbg(&pl022->adev->dev,
- "configuration of DMA failed, fall back to interrupt mode\n");
- goto err_config_dma;
- }
- return;
- }
-
-err_config_dma:
- /* enable all interrupts except RX */
- writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
-}
+ int ret;
-static void do_interrupt_dma_transfer(struct pl022 *pl022)
-{
/*
* Default is to enable all interrupts except RX -
* this will be enabled once TX is complete
*/
u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
- /* Enable target chip, if not already active */
- if (!pl022->next_msg_cs_active)
- pl022_cs_control(pl022, SSP_CHIP_SELECT);
+ ret = set_up_next_transfer(pl022, pl022->cur_transfer);
+ if (ret)
+ return ret;
- if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
- /* Error path */
- pl022->cur_msg->state = STATE_ERROR;
- pl022->cur_msg->status = -EIO;
- giveback(pl022);
- return;
- }
/* If we're using DMA, set up DMA here */
if (pl022->cur_chip->enable_dma) {
/* Configure DMA transfer */
@@ -1469,6 +1267,7 @@ err_config_dma:
writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
SSP_CR1(pl022->virtbase));
writew(irqflags, SSP_IMSC(pl022->virtbase));
+ return 1;
}
static void print_current_status(struct pl022 *pl022)
@@ -1495,111 +1294,65 @@ static void print_current_status(struct pl022 *pl022)
}
-static void do_polling_transfer(struct pl022 *pl022)
+static int do_polling_transfer(struct pl022 *pl022)
{
- struct spi_message *message = NULL;
- struct spi_transfer *transfer = NULL;
- struct spi_transfer *previous = NULL;
+ int ret;
unsigned long time, timeout;
- message = pl022->cur_msg;
-
- while (message->state != STATE_DONE) {
- /* Handle for abort */
- if (message->state == STATE_ERROR)
- break;
- transfer = pl022->cur_transfer;
-
- /* Delay if requested at end of transfer */
- if (message->state == STATE_RUNNING) {
- previous =
- list_entry(transfer->transfer_list.prev,
- struct spi_transfer, transfer_list);
- spi_transfer_delay_exec(previous);
- if (previous->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_SELECT);
- } else {
- /* STATE_START */
- message->state = STATE_RUNNING;
- if (!pl022->next_msg_cs_active)
- pl022_cs_control(pl022, SSP_CHIP_SELECT);
- }
-
- /* Configuration Changing Per Transfer */
- if (set_up_next_transfer(pl022, transfer)) {
- /* Error path */
- message->state = STATE_ERROR;
- break;
- }
- /* Flush FIFOs and enable SSP */
- flush(pl022);
- writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
- SSP_CR1(pl022->virtbase));
-
- dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
-
- timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
- while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
- time = jiffies;
- readwriter(pl022);
- if (time_after(time, timeout)) {
- dev_warn(&pl022->adev->dev,
- "%s: timeout!\n", __func__);
- message->state = STATE_TIMEOUT;
- print_current_status(pl022);
- goto out;
- }
- cpu_relax();
+ /* Configuration Changing Per Transfer */
+ ret = set_up_next_transfer(pl022, pl022->cur_transfer);
+ if (ret)
+ return ret;
+ /* Flush FIFOs and enable SSP */
+ flush(pl022);
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+
+ dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
+
+ timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
+ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
+ time = jiffies;
+ readwriter(pl022);
+ if (time_after(time, timeout)) {
+ dev_warn(&pl022->adev->dev,
+ "%s: timeout!\n", __func__);
+ print_current_status(pl022);
+ return -ETIMEDOUT;
}
-
- /* Update total byte transferred */
- message->actual_length += pl022->cur_transfer->len;
- /* Move to next transfer */
- message->state = next_transfer(pl022);
- if (message->state != STATE_DONE
- && pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ cpu_relax();
}
-out:
- /* Handle end of message */
- if (message->state == STATE_DONE)
- message->status = 0;
- else if (message->state == STATE_TIMEOUT)
- message->status = -EAGAIN;
- else
- message->status = -EIO;
- giveback(pl022);
- return;
+ return 0;
}
-static int pl022_transfer_one_message(struct spi_controller *host,
- struct spi_message *msg)
+static int pl022_transfer_one(struct spi_controller *host, struct spi_device *spi,
+ struct spi_transfer *transfer)
{
struct pl022 *pl022 = spi_controller_get_devdata(host);
- /* Initial message state */
- pl022->cur_msg = msg;
- msg->state = STATE_START;
-
- pl022->cur_transfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
+ pl022->cur_transfer = transfer;
/* Setup the SPI using the per chip configuration */
- pl022->cur_chip = spi_get_ctldata(msg->spi);
- pl022->cur_cs = spi_get_chipselect(msg->spi, 0);
- /* This is always available but may be set to -ENOENT */
- pl022->cur_gpiod = spi_get_csgpiod(msg->spi, 0);
+ pl022->cur_chip = spi_get_ctldata(spi);
+ pl022->cur_cs = spi_get_chipselect(spi, 0);
restore_state(pl022);
flush(pl022);
if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
- do_polling_transfer(pl022);
+ return do_polling_transfer(pl022);
else
- do_interrupt_dma_transfer(pl022);
+ return do_interrupt_dma_transfer(pl022);
+}
- return 0;
+static void pl022_handle_err(struct spi_controller *ctlr, struct spi_message *message)
+{
+ struct pl022 *pl022 = spi_controller_get_devdata(ctlr);
+
+ terminate_dma(pl022);
+ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
}
static int pl022_unprepare_transfer_hardware(struct spi_controller *host)
@@ -2138,7 +1891,9 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
host->cleanup = pl022_cleanup;
host->setup = pl022_setup;
host->auto_runtime_pm = true;
- host->transfer_one_message = pl022_transfer_one_message;
+ host->transfer_one = pl022_transfer_one;
+ host->set_cs = pl022_cs_control;
+ host->handle_err = pl022_handle_err;
host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
host->rt = platform_info->rt;
host->dev.of_node = dev->of_node;
@@ -2175,10 +1930,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk;
}
- /* Initialize transfer pump */
- tasklet_init(&pl022->pump_transfers, pump_transfers,
- (unsigned long)pl022);
-
/* Disable SSP */
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
@@ -2261,7 +2012,6 @@ pl022_remove(struct amba_device *adev)
pl022_dma_remove(pl022);
amba_release_regions(adev);
- tasklet_disable(&pl022->pump_transfers);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index e982d3189..82d626484 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -25,11 +25,13 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/wait.h>
+#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 0e48ffd49..652eadbef 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -3,19 +3,20 @@
// Copyright (c) 2009 Samsung Electronics Co., Ltd.
// Jaswinder Singh <jassi.brar@samsung.com>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/spi-s3c64xx.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
-#include <linux/of.h>
-
-#include <linux/platform_data/spi-s3c64xx.h>
#define MAX_SPI_PORTS 12
#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
@@ -76,6 +77,7 @@
#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
+#define S3C64XX_SPI_ST_TX_FIFO_LVL_SHIFT 6
#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
@@ -106,9 +108,11 @@
#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
(1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
-#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
-#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
- FIFO_LVL_MASK(i))
+#define TX_FIFO_LVL(v, sdd) (((v) & (sdd)->tx_fifomask) >> \
+ __ffs((sdd)->tx_fifomask))
+#define RX_FIFO_LVL(v, sdd) (((v) & (sdd)->rx_fifomask) >> \
+ __ffs((sdd)->rx_fifomask))
+#define FIFO_DEPTH(i) ((FIFO_LVL_MASK(i) >> 1) + 1)
#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
#define S3C64XX_SPI_TRAILCNT_OFF 19
@@ -133,6 +137,10 @@ struct s3c64xx_spi_dma_data {
* struct s3c64xx_spi_port_config - SPI Controller hardware info
* @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
* @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
+ * @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
+ * length and position.
+ * @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
+ * length and position.
* @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
* @clk_div: Internal clock divider
* @quirks: Bitmask of known quirks
@@ -150,6 +158,8 @@ struct s3c64xx_spi_dma_data {
struct s3c64xx_spi_port_config {
int fifo_lvl_mask[MAX_SPI_PORTS];
int rx_lvl_offset;
+ u32 rx_fifomask;
+ u32 tx_fifomask;
int tx_st_done;
int quirks;
int clk_div;
@@ -179,6 +189,11 @@ struct s3c64xx_spi_port_config {
* @tx_dma: Local transmit DMA data (e.g. chan and direction)
* @port_conf: Local SPI port configuartion data
* @port_id: Port identification number
+ * @fifo_depth: depth of the FIFO.
+ * @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
+ * length and position.
+ * @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
+ * length and position.
*/
struct s3c64xx_spi_driver_data {
void __iomem *regs;
@@ -198,6 +213,9 @@ struct s3c64xx_spi_driver_data {
struct s3c64xx_spi_dma_data tx_dma;
const struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
+ unsigned int fifo_depth;
+ u32 rx_fifomask;
+ u32 tx_fifomask;
};
static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
@@ -405,12 +423,10 @@ static bool s3c64xx_spi_can_dma(struct spi_controller *host,
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
- if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
- return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
- } else {
- return false;
- }
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch)
+ return xfer->len >= sdd->fifo_depth;
+ return false;
}
static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
@@ -495,9 +511,7 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
void __iomem *regs = sdd->regs;
unsigned long val = 1;
u32 status;
-
- /* max fifo depth available */
- u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ u32 max_fifo = sdd->fifo_depth;
if (timeout_ms)
val = msecs_to_loops(timeout_ms);
@@ -604,7 +618,7 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
* For any size less than the fifo size the below code is
* executed atleast once.
*/
- loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ loops = xfer->len / sdd->fifo_depth;
buf = xfer->rx_buf;
do {
/* wait for data to be received in the fifo */
@@ -741,7 +755,7 @@ static int s3c64xx_spi_transfer_one(struct spi_controller *host,
struct spi_transfer *xfer)
{
struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
- const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
+ const unsigned int fifo_len = sdd->fifo_depth;
const void *tx_buf = NULL;
void *rx_buf = NULL;
int target_len = 0, origin_len = 0;
@@ -769,10 +783,9 @@ static int s3c64xx_spi_transfer_one(struct spi_controller *host,
return status;
}
- if (!is_polling(sdd) && (xfer->len > fifo_len) &&
+ if (!is_polling(sdd) && xfer->len >= fifo_len &&
sdd->rx_dma.ch && sdd->tx_dma.ch) {
use_dma = 1;
-
} else if (xfer->len >= fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
@@ -1146,6 +1159,23 @@ static inline const struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
return (const struct s3c64xx_spi_port_config *)platform_get_device_id(pdev)->driver_data;
}
+static void s3c64xx_spi_set_fifomask(struct s3c64xx_spi_driver_data *sdd)
+{
+ const struct s3c64xx_spi_port_config *port_conf = sdd->port_conf;
+
+ if (port_conf->rx_fifomask)
+ sdd->rx_fifomask = port_conf->rx_fifomask;
+ else
+ sdd->rx_fifomask = FIFO_LVL_MASK(sdd) <<
+ port_conf->rx_lvl_offset;
+
+ if (port_conf->tx_fifomask)
+ sdd->tx_fifomask = port_conf->tx_fifomask;
+ else
+ sdd->tx_fifomask = FIFO_LVL_MASK(sdd) <<
+ S3C64XX_SPI_ST_TX_FIFO_LVL_SHIFT;
+}
+
static int s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res;
@@ -1191,6 +1221,10 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->port_id = pdev->id;
}
+ sdd->fifo_depth = FIFO_DEPTH(sdd);
+
+ s3c64xx_spi_set_fifomask(sdd);
+
sdd->cur_bpw = 8;
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
@@ -1280,7 +1314,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Targets attached\n",
sdd->port_id, host->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
- mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
+ mem_res, sdd->fifo_depth);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index bf01feedb..262c11d97 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -138,8 +138,7 @@ struct sprd_adi_data {
u32 slave_offset;
u32 slave_addr_size;
int (*read_check)(u32 val, u32 reg);
- int (*restart)(struct notifier_block *this,
- unsigned long mode, void *cmd);
+ int (*restart)(struct sys_off_data *data);
void (*wdg_rst)(void *p);
};
@@ -150,7 +149,6 @@ struct sprd_adi {
struct hwspinlock *hwlock;
unsigned long slave_vbase;
unsigned long slave_pbase;
- struct notifier_block restart_handler;
const struct sprd_adi_data *data;
};
@@ -370,11 +368,9 @@ static void sprd_adi_set_wdt_rst_mode(void *p)
#endif
}
-static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
- void *cmd, struct sprd_adi_wdg *wdg)
+static int sprd_adi_restart(struct sprd_adi *sadi, unsigned long mode,
+ const char *cmd, struct sprd_adi_wdg *wdg)
{
- struct sprd_adi *sadi = container_of(this, struct sprd_adi,
- restart_handler);
u32 val, reboot_mode = 0;
if (!cmd)
@@ -448,8 +444,7 @@ static int sprd_adi_restart(struct notifier_block *this, unsigned long mode,
return NOTIFY_DONE;
}
-static int sprd_adi_restart_sc9860(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int sprd_adi_restart_sc9860(struct sys_off_data *data)
{
struct sprd_adi_wdg wdg = {
.base = PMIC_WDG_BASE,
@@ -458,7 +453,7 @@ static int sprd_adi_restart_sc9860(struct notifier_block *this,
.wdg_clk = PMIC_CLK_EN,
};
- return sprd_adi_restart(this, mode, cmd, &wdg);
+ return sprd_adi_restart(data->cb_data, data->mode, data->cmd, &wdg);
}
static void sprd_adi_hw_init(struct sprd_adi *sadi)
@@ -533,7 +528,7 @@ static int sprd_adi_probe(struct platform_device *pdev)
pdev->id = of_alias_get_id(np, "spi");
num_chipselect = of_get_child_count(np);
- ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(struct sprd_adi));
if (!ctlr)
return -ENOMEM;
@@ -590,9 +585,9 @@ static int sprd_adi_probe(struct platform_device *pdev)
}
if (sadi->data->restart) {
- sadi->restart_handler.notifier_call = sadi->data->restart;
- sadi->restart_handler.priority = 128;
- ret = register_restart_handler(&sadi->restart_handler);
+ ret = devm_register_restart_handler(&pdev->dev,
+ sadi->data->restart,
+ sadi);
if (ret) {
dev_err(&pdev->dev, "can not register restart handler\n");
goto put_ctlr;
@@ -606,14 +601,6 @@ put_ctlr:
return ret;
}
-static void sprd_adi_remove(struct platform_device *pdev)
-{
- struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
- struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
-
- unregister_restart_handler(&sadi->restart_handler);
-}
-
static struct sprd_adi_data sc9860_data = {
.slave_offset = ADI_10BIT_SLAVE_OFFSET,
.slave_addr_size = ADI_10BIT_SLAVE_ADDR_SIZE,
@@ -657,7 +644,6 @@ static struct platform_driver sprd_adi_driver = {
.of_match_table = sprd_adi_of_match,
},
.probe = sprd_adi_probe,
- .remove_new = sprd_adi_remove,
};
module_platform_driver(sprd_adi_driver);
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 95377cf74..831ebae10 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -578,7 +578,7 @@ static void sprd_spi_dma_release(struct sprd_spi *ss)
static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
struct spi_transfer *t)
{
- struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
+ struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u32 trans_len = ss->trans_len;
int ret, write_size = 0;
@@ -923,7 +923,7 @@ static int sprd_spi_probe(struct platform_device *pdev)
int ret;
pdev->id = of_alias_get_id(pdev->dev.of_node, "spi");
- sctlr = spi_alloc_master(&pdev->dev, sizeof(*ss));
+ sctlr = spi_alloc_host(&pdev->dev, sizeof(*ss));
if (!sctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index 7fcff9c53..e064025e2 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -6,7 +6,7 @@
* Patrice Chotard <patrice.chotard@st.com>
* Lee Jones <lee.jones@linaro.org>
*
- * SPI master mode controller driver, used in STMicroelectronics devices.
+ * SPI host mode controller driver, used in STMicroelectronics devices.
*/
#include <linux/clk.h>
@@ -115,10 +115,10 @@ static void ssc_read_rx_fifo(struct spi_st *spi_st)
spi_st->words_remaining -= count;
}
-static int spi_st_transfer_one(struct spi_master *master,
+static int spi_st_transfer_one(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *t)
{
- struct spi_st *spi_st = spi_master_get_devdata(master);
+ struct spi_st *spi_st = spi_controller_get_devdata(host);
uint32_t ctl = 0;
/* Setup transfer */
@@ -165,7 +165,7 @@ static int spi_st_transfer_one(struct spi_master *master,
if (ctl)
writel_relaxed(ctl, spi_st->base + SSC_CTL);
- spi_finalize_current_transfer(spi->master);
+ spi_finalize_current_transfer(spi->controller);
return t->len;
}
@@ -174,7 +174,7 @@ static int spi_st_transfer_one(struct spi_master *master,
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
static int spi_st_setup(struct spi_device *spi)
{
- struct spi_st *spi_st = spi_master_get_devdata(spi->master);
+ struct spi_st *spi_st = spi_controller_get_devdata(spi->controller);
u32 spi_st_clk, sscbrg, var;
u32 hz = spi->max_speed_hz;
@@ -274,35 +274,35 @@ static irqreturn_t spi_st_irq(int irq, void *dev_id)
static int spi_st_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct spi_master *master;
+ struct spi_controller *host;
struct spi_st *spi_st;
int irq, ret = 0;
u32 var;
- master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*spi_st));
+ if (!host)
return -ENOMEM;
- master->dev.of_node = np;
- master->mode_bits = MODEBITS;
- master->setup = spi_st_setup;
- master->transfer_one = spi_st_transfer_one;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- master->auto_runtime_pm = true;
- master->bus_num = pdev->id;
- master->use_gpio_descriptors = true;
- spi_st = spi_master_get_devdata(master);
+ host->dev.of_node = np;
+ host->mode_bits = MODEBITS;
+ host->setup = spi_st_setup;
+ host->transfer_one = spi_st_transfer_one;
+ host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ host->auto_runtime_pm = true;
+ host->bus_num = pdev->id;
+ host->use_gpio_descriptors = true;
+ spi_st = spi_controller_get_devdata(host);
spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
if (IS_ERR(spi_st->clk)) {
dev_err(&pdev->dev, "Unable to request clock\n");
ret = PTR_ERR(spi_st->clk);
- goto put_master;
+ goto put_host;
}
ret = clk_prepare_enable(spi_st->clk);
if (ret)
- goto put_master;
+ goto put_host;
init_completion(&spi_st->done);
@@ -324,7 +324,7 @@ static int spi_st_probe(struct platform_device *pdev)
var &= ~SSC_CTL_SR;
writel_relaxed(var, spi_st->base + SSC_CTL);
- /* Set SSC into slave mode before reconfiguring PIO pins */
+ /* Set SSC into target mode before reconfiguring PIO pins */
var = readl_relaxed(spi_st->base + SSC_CTL);
var &= ~SSC_CTL_MS;
writel_relaxed(var, spi_st->base + SSC_CTL);
@@ -347,11 +347,11 @@ static int spi_st_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
- dev_err(&pdev->dev, "Failed to register master\n");
+ dev_err(&pdev->dev, "Failed to register host\n");
goto rpm_disable;
}
@@ -361,15 +361,15 @@ rpm_disable:
pm_runtime_disable(&pdev->dev);
clk_disable:
clk_disable_unprepare(spi_st->clk);
-put_master:
- spi_master_put(master);
+put_host:
+ spi_controller_put(host);
return ret;
}
static void spi_st_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct spi_st *spi_st = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct spi_st *spi_st = spi_controller_get_devdata(host);
pm_runtime_disable(&pdev->dev);
@@ -381,8 +381,8 @@ static void spi_st_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int spi_st_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct spi_st *spi_st = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_controller_get_devdata(host);
writel_relaxed(0, spi_st->base + SSC_IEN);
pinctrl_pm_select_sleep_state(dev);
@@ -394,8 +394,8 @@ static int spi_st_runtime_suspend(struct device *dev)
static int spi_st_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct spi_st *spi_st = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(spi_st->clk);
@@ -408,10 +408,10 @@ static int spi_st_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int spi_st_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -420,10 +420,10 @@ static int spi_st_suspend(struct device *dev)
static int spi_st_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(host);
if (ret)
return ret;
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index def74ae9b..385832030 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -357,7 +357,7 @@ static int stm32_qspi_get_mode(u8 buswidth)
static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(spi->controller);
struct stm32_qspi_flash *flash = &qspi->flash[spi_get_chipselect(spi, 0)];
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
@@ -448,7 +448,7 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *
unsigned long polling_rate_us,
unsigned long timeout_ms)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->controller);
int ret;
if (!spi_mem_supports_op(mem, op))
@@ -476,7 +476,7 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *
static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->controller);
int ret;
ret = pm_runtime_resume_and_get(qspi->dev);
@@ -500,7 +500,7 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->controller);
if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
return -EOPNOTSUPP;
@@ -518,7 +518,7 @@ static int stm32_qspi_dirmap_create(struct spi_mem_dirmap_desc *desc)
static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct stm32_qspi *qspi = spi_controller_get_devdata(desc->mem->spi->controller);
struct spi_mem_op op;
u32 addr_max;
int ret;
@@ -640,7 +640,7 @@ end_of_transfer:
static int stm32_qspi_setup(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
+ struct spi_controller *ctrl = spi->controller;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
u32 presc, mode;
@@ -775,7 +775,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
- ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
+ ctrl = devm_spi_alloc_host(dev, sizeof(*qspi));
if (!ctrl)
return -ENOMEM;
@@ -861,7 +861,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_noresume(dev);
- ret = spi_register_master(ctrl);
+ ret = spi_register_controller(ctrl);
if (ret)
goto err_pm_runtime_free;
@@ -892,7 +892,7 @@ static void stm32_qspi_remove(struct platform_device *pdev)
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
pm_runtime_get_sync(qspi->dev);
- spi_unregister_master(qspi->ctrl);
+ spi_unregister_controller(qspi->ctrl);
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
stm32_qspi_dma_free(qspi);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index e6e3e4ea2..e61302ef3 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -22,58 +22,65 @@
#define DRIVER_NAME "spi_stm32"
-/* STM32F4 SPI registers */
-#define STM32F4_SPI_CR1 0x00
-#define STM32F4_SPI_CR2 0x04
-#define STM32F4_SPI_SR 0x08
-#define STM32F4_SPI_DR 0x0C
-#define STM32F4_SPI_I2SCFGR 0x1C
-
-/* STM32F4_SPI_CR1 bit fields */
-#define STM32F4_SPI_CR1_CPHA BIT(0)
-#define STM32F4_SPI_CR1_CPOL BIT(1)
-#define STM32F4_SPI_CR1_MSTR BIT(2)
-#define STM32F4_SPI_CR1_BR_SHIFT 3
-#define STM32F4_SPI_CR1_BR GENMASK(5, 3)
-#define STM32F4_SPI_CR1_SPE BIT(6)
-#define STM32F4_SPI_CR1_LSBFRST BIT(7)
-#define STM32F4_SPI_CR1_SSI BIT(8)
-#define STM32F4_SPI_CR1_SSM BIT(9)
-#define STM32F4_SPI_CR1_RXONLY BIT(10)
+/* STM32F4/7 SPI registers */
+#define STM32FX_SPI_CR1 0x00
+#define STM32FX_SPI_CR2 0x04
+#define STM32FX_SPI_SR 0x08
+#define STM32FX_SPI_DR 0x0C
+#define STM32FX_SPI_I2SCFGR 0x1C
+
+/* STM32FX_SPI_CR1 bit fields */
+#define STM32FX_SPI_CR1_CPHA BIT(0)
+#define STM32FX_SPI_CR1_CPOL BIT(1)
+#define STM32FX_SPI_CR1_MSTR BIT(2)
+#define STM32FX_SPI_CR1_BR_SHIFT 3
+#define STM32FX_SPI_CR1_BR GENMASK(5, 3)
+#define STM32FX_SPI_CR1_SPE BIT(6)
+#define STM32FX_SPI_CR1_LSBFRST BIT(7)
+#define STM32FX_SPI_CR1_SSI BIT(8)
+#define STM32FX_SPI_CR1_SSM BIT(9)
+#define STM32FX_SPI_CR1_RXONLY BIT(10)
#define STM32F4_SPI_CR1_DFF BIT(11)
-#define STM32F4_SPI_CR1_CRCNEXT BIT(12)
-#define STM32F4_SPI_CR1_CRCEN BIT(13)
-#define STM32F4_SPI_CR1_BIDIOE BIT(14)
-#define STM32F4_SPI_CR1_BIDIMODE BIT(15)
-#define STM32F4_SPI_CR1_BR_MIN 0
-#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
-
-/* STM32F4_SPI_CR2 bit fields */
-#define STM32F4_SPI_CR2_RXDMAEN BIT(0)
-#define STM32F4_SPI_CR2_TXDMAEN BIT(1)
-#define STM32F4_SPI_CR2_SSOE BIT(2)
-#define STM32F4_SPI_CR2_FRF BIT(4)
-#define STM32F4_SPI_CR2_ERRIE BIT(5)
-#define STM32F4_SPI_CR2_RXNEIE BIT(6)
-#define STM32F4_SPI_CR2_TXEIE BIT(7)
-
-/* STM32F4_SPI_SR bit fields */
-#define STM32F4_SPI_SR_RXNE BIT(0)
-#define STM32F4_SPI_SR_TXE BIT(1)
-#define STM32F4_SPI_SR_CHSIDE BIT(2)
-#define STM32F4_SPI_SR_UDR BIT(3)
-#define STM32F4_SPI_SR_CRCERR BIT(4)
-#define STM32F4_SPI_SR_MODF BIT(5)
-#define STM32F4_SPI_SR_OVR BIT(6)
-#define STM32F4_SPI_SR_BSY BIT(7)
-#define STM32F4_SPI_SR_FRE BIT(8)
-
-/* STM32F4_SPI_I2SCFGR bit fields */
-#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11)
+#define STM32F7_SPI_CR1_CRCL BIT(11)
+#define STM32FX_SPI_CR1_CRCNEXT BIT(12)
+#define STM32FX_SPI_CR1_CRCEN BIT(13)
+#define STM32FX_SPI_CR1_BIDIOE BIT(14)
+#define STM32FX_SPI_CR1_BIDIMODE BIT(15)
+#define STM32FX_SPI_CR1_BR_MIN 0
+#define STM32FX_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
+
+/* STM32FX_SPI_CR2 bit fields */
+#define STM32FX_SPI_CR2_RXDMAEN BIT(0)
+#define STM32FX_SPI_CR2_TXDMAEN BIT(1)
+#define STM32FX_SPI_CR2_SSOE BIT(2)
+#define STM32FX_SPI_CR2_FRF BIT(4)
+#define STM32FX_SPI_CR2_ERRIE BIT(5)
+#define STM32FX_SPI_CR2_RXNEIE BIT(6)
+#define STM32FX_SPI_CR2_TXEIE BIT(7)
+#define STM32F7_SPI_CR2_DS GENMASK(11, 8)
+#define STM32F7_SPI_CR2_FRXTH BIT(12)
+#define STM32F7_SPI_CR2_LDMA_RX BIT(13)
+#define STM32F7_SPI_CR2_LDMA_TX BIT(14)
+
+/* STM32FX_SPI_SR bit fields */
+#define STM32FX_SPI_SR_RXNE BIT(0)
+#define STM32FX_SPI_SR_TXE BIT(1)
+#define STM32FX_SPI_SR_CHSIDE BIT(2)
+#define STM32FX_SPI_SR_UDR BIT(3)
+#define STM32FX_SPI_SR_CRCERR BIT(4)
+#define STM32FX_SPI_SR_MODF BIT(5)
+#define STM32FX_SPI_SR_OVR BIT(6)
+#define STM32FX_SPI_SR_BSY BIT(7)
+#define STM32FX_SPI_SR_FRE BIT(8)
+#define STM32F7_SPI_SR_FRLVL GENMASK(10, 9)
+#define STM32F7_SPI_SR_FTLVL GENMASK(12, 11)
+
+/* STM32FX_SPI_I2SCFGR bit fields */
+#define STM32FX_SPI_I2SCFGR_I2SMOD BIT(11)
/* STM32F4 SPI Baud Rate min/max divisor */
-#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN)
-#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX)
+#define STM32FX_SPI_BR_DIV_MIN (2 << STM32FX_SPI_CR1_BR_MIN)
+#define STM32FX_SPI_BR_DIV_MAX (2 << STM32FX_SPI_CR1_BR_MAX)
/* STM32H7 SPI registers */
#define STM32H7_SPI_CR1 0x00
@@ -147,6 +154,20 @@
/* STM32H7_SPI_I2SCFGR bit fields */
#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
+/* STM32MP25 SPI registers bit fields */
+#define STM32MP25_SPI_HWCFGR1 0x3F0
+
+/* STM32MP25_SPI_CR2 bit fields */
+#define STM32MP25_SPI_TSIZE_MAX_LIMITED GENMASK(9, 0)
+
+/* STM32MP25_SPI_HWCFGR1 */
+#define STM32MP25_SPI_HWCFGR1_FULLCFG GENMASK(27, 24)
+#define STM32MP25_SPI_HWCFGR1_FULLCFG_LIMITED 0x0
+#define STM32MP25_SPI_HWCFGR1_FULLCFG_FULL 0x1
+#define STM32MP25_SPI_HWCFGR1_DSCFG GENMASK(19, 16)
+#define STM32MP25_SPI_HWCFGR1_DSCFG_16_B 0x0
+#define STM32MP25_SPI_HWCFGR1_DSCFG_32_B 0x1
+
/* STM32H7 SPI Master Baud Rate min/max divisor */
#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
@@ -173,7 +194,7 @@
#define SPI_DMA_MIN_BYTES 16
/* STM32 SPI driver helpers */
-#define STM32_SPI_MASTER_MODE(stm32_spi) (!(stm32_spi)->device_mode)
+#define STM32_SPI_HOST_MODE(stm32_spi) (!(stm32_spi)->device_mode)
#define STM32_SPI_DEVICE_MODE(stm32_spi) ((stm32_spi)->device_mode)
/**
@@ -200,6 +221,7 @@ struct stm32_spi_reg {
* @br: baud rate register and bitfields
* @rx: SPI RX data register
* @tx: SPI TX data register
+ * @fullcfg: SPI full or limited feature set register
*/
struct stm32_spi_regspec {
const struct stm32_spi_reg en;
@@ -212,6 +234,7 @@ struct stm32_spi_regspec {
const struct stm32_spi_reg br;
const struct stm32_spi_reg rx;
const struct stm32_spi_reg tx;
+ const struct stm32_spi_reg fullcfg;
};
struct stm32_spi;
@@ -222,13 +245,15 @@ struct stm32_spi;
* @get_fifo_size: routine to get fifo size
* @get_bpw_mask: routine to get bits per word mask
* @disable: routine to disable controller
- * @config: routine to configure controller as SPI Master
+ * @config: routine to configure controller as SPI Host
* @set_bpw: routine to configure registers to for bits per word
* @set_mode: routine to configure registers to desired mode
* @set_data_idleness: optional routine to configure registers to desired idle
* time between frames (if driver has this functionality)
* @set_number_of_data: optional routine to configure registers to desired
* number of data (if driver has this functionality)
+ * @write_tx: routine to write to transmit register/FIFO
+ * @read_rx: routine to read from receive register/FIFO
* @transfer_one_dma_start: routine to start transfer a single spi_transfer
* using DMA
* @dma_rx_cb: routine to call after DMA RX channel operation is complete
@@ -241,6 +266,7 @@ struct stm32_spi;
* @has_fifo: boolean to know if fifo is used for driver
* @has_device_mode: is this compatible capable to switch on device mode
* @flags: compatible specific SPI controller flags used at registration time
+ * @prevent_dma_burst: boolean to indicate to prevent DMA burst
*/
struct stm32_spi_cfg {
const struct stm32_spi_regspec *regs;
@@ -252,6 +278,8 @@ struct stm32_spi_cfg {
int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type);
void (*set_data_idleness)(struct stm32_spi *spi, u32 length);
int (*set_number_of_data)(struct stm32_spi *spi, u32 length);
+ void (*write_tx)(struct stm32_spi *spi);
+ void (*read_rx)(struct stm32_spi *spi);
void (*transfer_one_dma_start)(struct stm32_spi *spi);
void (*dma_rx_cb)(void *data);
void (*dma_tx_cb)(void *data);
@@ -263,6 +291,7 @@ struct stm32_spi_cfg {
bool has_fifo;
bool has_device_mode;
u16 flags;
+ bool prevent_dma_burst;
};
/**
@@ -276,7 +305,9 @@ struct stm32_spi_cfg {
* @lock: prevent I/O concurrent access
* @irq: SPI controller interrupt line
* @fifo_size: size of the embedded fifo in bytes
- * @cur_midi: master inter-data idleness in ns
+ * @t_size_max: maximum number of data of one transfer
+ * @feature_set: SPI full or limited feature set
+ * @cur_midi: host inter-data idleness in ns
* @cur_speed: speed configured in Hz
* @cur_half_period: time of a half bit in us
* @cur_bpw: number of bits in a single SPI data frame
@@ -303,6 +334,10 @@ struct stm32_spi {
spinlock_t lock; /* prevent I/O concurrent access */
int irq;
unsigned int fifo_size;
+ unsigned int t_size_max;
+ unsigned int feature_set;
+#define STM32_SPI_FEATURE_LIMITED STM32MP25_SPI_HWCFGR1_FULLCFG_LIMITED /* 0x0 */
+#define STM32_SPI_FEATURE_FULL STM32MP25_SPI_HWCFGR1_FULLCFG_FULL /* 0x1 */
unsigned int cur_midi;
unsigned int cur_speed;
@@ -324,20 +359,20 @@ struct stm32_spi {
bool device_mode;
};
-static const struct stm32_spi_regspec stm32f4_spi_regspec = {
- .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE },
+static const struct stm32_spi_regspec stm32fx_spi_regspec = {
+ .en = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_SPE },
- .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN },
- .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN },
+ .dma_rx_en = { STM32FX_SPI_CR2, STM32FX_SPI_CR2_RXDMAEN },
+ .dma_tx_en = { STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXDMAEN },
- .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
- .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
- .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
+ .cpol = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_CPOL },
+ .cpha = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_CPHA },
+ .lsb_first = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_LSBFRST },
.cs_high = {},
- .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
+ .br = { STM32FX_SPI_CR1, STM32FX_SPI_CR1_BR, STM32FX_SPI_CR1_BR_SHIFT },
- .rx = { STM32F4_SPI_DR },
- .tx = { STM32F4_SPI_DR },
+ .rx = { STM32FX_SPI_DR },
+ .tx = { STM32FX_SPI_DR },
};
static const struct stm32_spi_regspec stm32h7_spi_regspec = {
@@ -360,6 +395,28 @@ static const struct stm32_spi_regspec stm32h7_spi_regspec = {
.tx = { STM32H7_SPI_TXDR },
};
+static const struct stm32_spi_regspec stm32mp25_spi_regspec = {
+ /* SPI data transfer is enabled but spi_ker_ck is idle.
+ * CFG1 and CFG2 registers are write protected when SPE is enabled.
+ */
+ .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE },
+
+ .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN },
+ .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN },
+
+ .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
+ .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
+ .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
+ .cs_high = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_SSIOP },
+ .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
+ STM32H7_SPI_CFG1_MBR_SHIFT },
+
+ .rx = { STM32H7_SPI_RXDR },
+ .tx = { STM32H7_SPI_TXDR },
+
+ .fullcfg = { STM32MP25_SPI_HWCFGR1, STM32MP25_SPI_HWCFGR1_FULLCFG },
+};
+
static inline void stm32_spi_set_bits(struct stm32_spi *spi,
u32 offset, u32 bits)
{
@@ -410,6 +467,16 @@ static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi)
}
/**
+ * stm32f7_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32f7_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ dev_dbg(spi->dev, "16-bit maximum data frame\n");
+ return SPI_BPW_RANGE_MASK(4, 16);
+}
+
+/**
* stm32h7_spi_get_bpw_mask - Return bits per word mask
* @spi: pointer to the spi controller data structure
*/
@@ -437,6 +504,28 @@ static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
}
/**
+ * stm32mp25_spi_get_bpw_mask - Return bits per word mask
+ * @spi: pointer to the spi controller data structure
+ */
+static int stm32mp25_spi_get_bpw_mask(struct stm32_spi *spi)
+{
+ u32 dscfg, max_bpw;
+
+ if (spi->feature_set == STM32_SPI_FEATURE_LIMITED) {
+ dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n");
+ return SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ }
+
+ dscfg = FIELD_GET(STM32MP25_SPI_HWCFGR1_DSCFG,
+ readl_relaxed(spi->base + STM32MP25_SPI_HWCFGR1));
+ max_bpw = 16;
+ if (dscfg == STM32MP25_SPI_HWCFGR1_DSCFG_32_B)
+ max_bpw = 32;
+ dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
+ return SPI_BPW_RANGE_MASK(4, max_bpw);
+}
+
+/**
* stm32_spi_prepare_mbr - Determine baud rate divisor value
* @spi: pointer to the spi controller data structure
* @speed_hz: requested speed
@@ -502,19 +591,48 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
*/
static void stm32f4_spi_write_tx(struct stm32_spi *spi)
{
- if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
- STM32F4_SPI_SR_TXE)) {
+ if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) &
+ STM32FX_SPI_SR_TXE)) {
u32 offs = spi->cur_xferlen - spi->tx_len;
if (spi->cur_bpw == 16) {
const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
- writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR);
+ writew_relaxed(*tx_buf16, spi->base + STM32FX_SPI_DR);
spi->tx_len -= sizeof(u16);
} else {
const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
- writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR);
+ writeb_relaxed(*tx_buf8, spi->base + STM32FX_SPI_DR);
+ spi->tx_len -= sizeof(u8);
+ }
+ }
+
+ dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
+}
+
+/**
+ * stm32f7_spi_write_tx - Write bytes to Transmit Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Read from tx_buf depends on remaining bytes to avoid to read beyond
+ * tx_buf end.
+ */
+static void stm32f7_spi_write_tx(struct stm32_spi *spi)
+{
+ if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) &
+ STM32FX_SPI_SR_TXE)) {
+ u32 offs = spi->cur_xferlen - spi->tx_len;
+
+ if (spi->tx_len >= sizeof(u16)) {
+ const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
+
+ writew_relaxed(*tx_buf16, spi->base + STM32FX_SPI_DR);
+ spi->tx_len -= sizeof(u16);
+ } else {
+ const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
+
+ writeb_relaxed(*tx_buf8, spi->base + STM32FX_SPI_DR);
spi->tx_len -= sizeof(u8);
}
}
@@ -566,19 +684,19 @@ static void stm32h7_spi_write_txfifo(struct stm32_spi *spi)
*/
static void stm32f4_spi_read_rx(struct stm32_spi *spi)
{
- if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
- STM32F4_SPI_SR_RXNE)) {
+ if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32FX_SPI_SR) &
+ STM32FX_SPI_SR_RXNE)) {
u32 offs = spi->cur_xferlen - spi->rx_len;
if (spi->cur_bpw == 16) {
u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
- *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR);
+ *rx_buf16 = readw_relaxed(spi->base + STM32FX_SPI_DR);
spi->rx_len -= sizeof(u16);
} else {
u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
- *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR);
+ *rx_buf8 = readb_relaxed(spi->base + STM32FX_SPI_DR);
spi->rx_len -= sizeof(u8);
}
}
@@ -587,6 +705,46 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
}
/**
+ * stm32f7_spi_read_rx - Read bytes from Receive Data Register
+ * @spi: pointer to the spi controller data structure
+ *
+ * Write in rx_buf depends on remaining bytes to avoid to write beyond
+ * rx_buf end.
+ */
+static void stm32f7_spi_read_rx(struct stm32_spi *spi)
+{
+ u32 sr = readl_relaxed(spi->base + STM32FX_SPI_SR);
+ u32 frlvl = FIELD_GET(STM32F7_SPI_SR_FRLVL, sr);
+
+ while ((spi->rx_len > 0) && (frlvl > 0)) {
+ u32 offs = spi->cur_xferlen - spi->rx_len;
+
+ if ((spi->rx_len >= sizeof(u16)) && (frlvl >= 2)) {
+ u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
+
+ *rx_buf16 = readw_relaxed(spi->base + STM32FX_SPI_DR);
+ spi->rx_len -= sizeof(u16);
+ } else {
+ u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
+
+ *rx_buf8 = readb_relaxed(spi->base + STM32FX_SPI_DR);
+ spi->rx_len -= sizeof(u8);
+ }
+
+ sr = readl_relaxed(spi->base + STM32FX_SPI_SR);
+ frlvl = FIELD_GET(STM32F7_SPI_SR_FRLVL, sr);
+ }
+
+ if (spi->rx_len >= sizeof(u16))
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH);
+ else
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH);
+
+ dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
+ __func__, spi->rx_len, sr);
+}
+
+/**
* stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
* @spi: pointer to the spi controller data structure
*
@@ -645,10 +803,10 @@ static void stm32_spi_enable(struct stm32_spi *spi)
}
/**
- * stm32f4_spi_disable - Disable SPI controller
+ * stm32fx_spi_disable - Disable SPI controller
* @spi: pointer to the spi controller data structure
*/
-static void stm32f4_spi_disable(struct stm32_spi *spi)
+static void stm32fx_spi_disable(struct stm32_spi *spi)
{
unsigned long flags;
u32 sr;
@@ -657,20 +815,20 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
- if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) &
- STM32F4_SPI_CR1_SPE)) {
+ if (!(readl_relaxed(spi->base + STM32FX_SPI_CR1) &
+ STM32FX_SPI_CR1_SPE)) {
spin_unlock_irqrestore(&spi->lock, flags);
return;
}
/* Disable interrupts */
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE |
- STM32F4_SPI_CR2_RXNEIE |
- STM32F4_SPI_CR2_ERRIE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXEIE |
+ STM32FX_SPI_CR2_RXNEIE |
+ STM32FX_SPI_CR2_ERRIE);
/* Wait until BSY = 0 */
- if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR,
- sr, !(sr & STM32F4_SPI_SR_BSY),
+ if (readl_relaxed_poll_timeout_atomic(spi->base + STM32FX_SPI_SR,
+ sr, !(sr & STM32FX_SPI_SR_BSY),
10, 100000) < 0) {
dev_warn(spi->dev, "disabling condition timeout\n");
}
@@ -680,14 +838,14 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
if (spi->cur_usedma && spi->dma_rx)
dmaengine_terminate_async(spi->dma_rx);
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR1, STM32FX_SPI_CR1_SPE);
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN |
- STM32F4_SPI_CR2_RXDMAEN);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_TXDMAEN |
+ STM32FX_SPI_CR2_RXDMAEN);
/* Sequence to clear OVR flag */
- readl_relaxed(spi->base + STM32F4_SPI_DR);
- readl_relaxed(spi->base + STM32F4_SPI_SR);
+ readl_relaxed(spi->base + STM32FX_SPI_DR);
+ readl_relaxed(spi->base + STM32FX_SPI_SR);
spin_unlock_irqrestore(&spi->lock, flags);
}
@@ -763,11 +921,11 @@ static bool stm32_spi_can_dma(struct spi_controller *ctrl,
}
/**
- * stm32f4_spi_irq_event - Interrupt handler for SPI controller events
+ * stm32fx_spi_irq_event - Interrupt handler for SPI controller events
* @irq: interrupt line
* @dev_id: SPI controller ctrl interface
*/
-static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
+static irqreturn_t stm32fx_spi_irq_event(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
@@ -776,26 +934,26 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
spin_lock(&spi->lock);
- sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
+ sr = readl_relaxed(spi->base + STM32FX_SPI_SR);
/*
* BSY flag is not handled in interrupt but it is normal behavior when
* this flag is set.
*/
- sr &= ~STM32F4_SPI_SR_BSY;
+ sr &= ~STM32FX_SPI_SR_BSY;
if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
spi->cur_comm == SPI_3WIRE_TX)) {
/* OVR flag shouldn't be handled for TX only mode */
- sr &= ~(STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE);
- mask |= STM32F4_SPI_SR_TXE;
+ sr &= ~(STM32FX_SPI_SR_OVR | STM32FX_SPI_SR_RXNE);
+ mask |= STM32FX_SPI_SR_TXE;
}
if (!spi->cur_usedma && (spi->cur_comm == SPI_FULL_DUPLEX ||
spi->cur_comm == SPI_SIMPLEX_RX ||
spi->cur_comm == SPI_3WIRE_RX)) {
/* TXE flag is set and is handled when RXNE flag occurs */
- sr &= ~STM32F4_SPI_SR_TXE;
- mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
+ sr &= ~STM32FX_SPI_SR_TXE;
+ mask |= STM32FX_SPI_SR_RXNE | STM32FX_SPI_SR_OVR;
}
if (!(sr & mask)) {
@@ -804,12 +962,12 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
return IRQ_NONE;
}
- if (sr & STM32F4_SPI_SR_OVR) {
+ if (sr & STM32FX_SPI_SR_OVR) {
dev_warn(spi->dev, "Overrun: received value discarded\n");
/* Sequence to clear OVR flag */
- readl_relaxed(spi->base + STM32F4_SPI_DR);
- readl_relaxed(spi->base + STM32F4_SPI_SR);
+ readl_relaxed(spi->base + STM32FX_SPI_DR);
+ readl_relaxed(spi->base + STM32FX_SPI_SR);
/*
* If overrun is detected, it means that something went wrong,
@@ -820,28 +978,28 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
goto end_irq;
}
- if (sr & STM32F4_SPI_SR_TXE) {
+ if (sr & STM32FX_SPI_SR_TXE) {
if (spi->tx_buf)
- stm32f4_spi_write_tx(spi);
+ spi->cfg->write_tx(spi);
if (spi->tx_len == 0)
end = true;
}
- if (sr & STM32F4_SPI_SR_RXNE) {
- stm32f4_spi_read_rx(spi);
+ if (sr & STM32FX_SPI_SR_RXNE) {
+ spi->cfg->read_rx(spi);
if (spi->rx_len == 0)
end = true;
else if (spi->tx_buf)/* Load data for discontinuous mode */
- stm32f4_spi_write_tx(spi);
+ spi->cfg->write_tx(spi);
}
end_irq:
if (end) {
/* Immediately disable interrupts to do not generate new one */
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR2,
- STM32F4_SPI_CR2_TXEIE |
- STM32F4_SPI_CR2_RXNEIE |
- STM32F4_SPI_CR2_ERRIE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2,
+ STM32FX_SPI_CR2_TXEIE |
+ STM32FX_SPI_CR2_RXNEIE |
+ STM32FX_SPI_CR2_ERRIE);
spin_unlock(&spi->lock);
return IRQ_WAKE_THREAD;
}
@@ -851,17 +1009,17 @@ end_irq:
}
/**
- * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
+ * stm32fx_spi_irq_thread - Thread of interrupt handler for SPI controller
* @irq: interrupt line
* @dev_id: SPI controller interface
*/
-static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
+static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi_finalize_current_transfer(ctrl);
- stm32f4_spi_disable(spi);
+ stm32fx_spi_disable(spi);
return IRQ_HANDLED;
}
@@ -974,7 +1132,7 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
unsigned long flags;
u32 clrb = 0, setb = 0;
- /* SPI slave device may need time between data frames */
+ /* SPI target device may need time between data frames */
spi->cur_midi = 0;
if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
@@ -1013,7 +1171,7 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
int ret;
ret = spi_split_transfers_maxwords(ctrl, msg,
- STM32H7_SPI_TSIZE_MAX,
+ spi->t_size_max,
GFP_KERNEL | GFP_DMA);
if (ret)
return ret;
@@ -1034,18 +1192,18 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
}
/**
- * stm32f4_spi_dma_tx_cb - dma callback
+ * stm32fx_spi_dma_tx_cb - dma callback
* @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA TX channel.
*/
-static void stm32f4_spi_dma_tx_cb(void *data)
+static void stm32fx_spi_dma_tx_cb(void *data)
{
struct stm32_spi *spi = data;
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
spi_finalize_current_transfer(spi->ctrl);
- stm32f4_spi_disable(spi);
+ stm32fx_spi_disable(spi);
}
}
@@ -1067,15 +1225,19 @@ static void stm32_spi_dma_rx_cb(void *data)
* stm32_spi_dma_config - configure dma slave channel depending on current
* transfer bits_per_word.
* @spi: pointer to the spi controller data structure
+ * @dma_chan: pointer to the DMA channel
* @dma_conf: pointer to the dma_slave_config structure
* @dir: direction of the dma transfer
*/
static void stm32_spi_dma_config(struct stm32_spi *spi,
+ struct dma_chan *dma_chan,
struct dma_slave_config *dma_conf,
enum dma_transfer_direction dir)
{
enum dma_slave_buswidth buswidth;
- u32 maxburst;
+ struct dma_slave_caps caps;
+ u32 maxburst = 1;
+ int ret;
if (spi->cur_bpw <= 8)
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -1084,15 +1246,14 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
else
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
- if (spi->cfg->has_fifo) {
- /* Valid for DMA Half or Full Fifo threshold */
- if (spi->cur_fthlv == 2)
- maxburst = 1;
- else
- maxburst = spi->cur_fthlv;
- } else {
- maxburst = 1;
- }
+ /* Valid for DMA Half or Full Fifo threshold */
+ if (!spi->cfg->prevent_dma_burst && spi->cfg->has_fifo && spi->cur_fthlv != 2)
+ maxburst = spi->cur_fthlv;
+
+ /* Get the DMA channel caps, and adjust maxburst if possible */
+ ret = dma_get_slave_caps(dma_chan, &caps);
+ if (!ret)
+ maxburst = min(maxburst, caps.max_burst);
memset(dma_conf, 0, sizeof(struct dma_slave_config));
dma_conf->direction = dir;
@@ -1114,21 +1275,21 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
}
/**
- * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
+ * stm32fx_spi_transfer_one_irq - transfer a single spi_transfer using
* interrupts
* @spi: pointer to the spi controller data structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
*/
-static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
+static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
{
unsigned long flags;
u32 cr2 = 0;
/* Enable the interrupts relative to the current communication mode */
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
- cr2 |= STM32F4_SPI_CR2_TXEIE;
+ cr2 |= STM32FX_SPI_CR2_TXEIE;
} else if (spi->cur_comm == SPI_FULL_DUPLEX ||
spi->cur_comm == SPI_SIMPLEX_RX ||
spi->cur_comm == SPI_3WIRE_RX) {
@@ -1136,20 +1297,20 @@ static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
* since the received data are never read. Therefore set OVR
* interrupt only when rx buffer is available.
*/
- cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE;
+ cr2 |= STM32FX_SPI_CR2_RXNEIE | STM32FX_SPI_CR2_ERRIE;
} else {
return -EINVAL;
}
spin_lock_irqsave(&spi->lock, flags);
- stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
stm32_spi_enable(spi);
/* starting data transfer when buffer is loaded */
if (spi->tx_buf)
- stm32f4_spi_write_tx(spi);
+ spi->cfg->write_tx(spi);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -1189,7 +1350,7 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
- if (STM32_SPI_MASTER_MODE(spi))
+ if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
@@ -1200,11 +1361,11 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
}
/**
- * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * stm32fx_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
* @spi: pointer to the spi controller data structure
*/
-static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
+static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
/* In DMA mode end of transfer is handled by DMA TX or RX callback. */
if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX ||
@@ -1214,13 +1375,29 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
* since the received data are never read. Therefore set OVR
* interrupt only when rx buffer is available.
*/
- stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
}
stm32_spi_enable(spi);
}
/**
+ * stm32f7_spi_transfer_one_dma_start - Set SPI driver registers to start
+ * transfer using DMA
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f7_spi_transfer_one_dma_start(struct stm32_spi *spi)
+{
+ /* Configure DMA request trigger threshold according to DMA width */
+ if (spi->cur_bpw <= 8)
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH);
+ else
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR2, STM32F7_SPI_CR2_FRXTH);
+
+ stm32fx_spi_transfer_one_dma_start(spi);
+}
+
+/**
* stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
* @spi: pointer to the spi controller data structure
@@ -1237,7 +1414,7 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
stm32_spi_enable(spi);
- if (STM32_SPI_MASTER_MODE(spi))
+ if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
@@ -1260,7 +1437,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
rx_dma_desc = NULL;
if (spi->rx_buf && spi->dma_rx) {
- stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
+ stm32_spi_dma_config(spi, spi->dma_rx, &rx_dma_conf, DMA_DEV_TO_MEM);
dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
/* Enable Rx DMA request */
@@ -1276,7 +1453,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
tx_dma_desc = NULL;
if (spi->tx_buf && spi->dma_tx) {
- stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
+ stm32_spi_dma_config(spi, spi->dma_tx, &tx_dma_conf, DMA_MEM_TO_DEV);
dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
tx_dma_desc = dmaengine_prep_slave_sg(
@@ -1353,9 +1530,34 @@ dma_desc_error:
static void stm32f4_spi_set_bpw(struct stm32_spi *spi)
{
if (spi->cur_bpw == 16)
- stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ else
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR1, STM32F4_SPI_CR1_DFF);
+}
+
+/**
+ * stm32f7_spi_set_bpw - Configure bits per word
+ * @spi: pointer to the spi controller data structure
+ */
+static void stm32f7_spi_set_bpw(struct stm32_spi *spi)
+{
+ u32 bpw;
+ u32 cr2_clrb = 0, cr2_setb = 0;
+
+ bpw = spi->cur_bpw - 1;
+
+ cr2_clrb |= STM32F7_SPI_CR2_DS;
+ cr2_setb |= FIELD_PREP(STM32F7_SPI_CR2_DS, bpw);
+
+ if (spi->rx_len >= sizeof(u16))
+ cr2_clrb |= STM32F7_SPI_CR2_FRXTH;
else
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
+ cr2_setb |= STM32F7_SPI_CR2_FRXTH;
+
+ writel_relaxed(
+ (readl_relaxed(spi->base + STM32FX_SPI_CR2) &
+ ~cr2_clrb) | cr2_setb,
+ spi->base + STM32FX_SPI_CR2);
}
/**
@@ -1385,7 +1587,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
}
/**
- * stm32_spi_set_mbr - Configure baud rate divisor in master mode
+ * stm32_spi_set_mbr - Configure baud rate divisor in host mode
* @spi: pointer to the spi controller data structure
* @mbrdiv: baud rate divisor value
*/
@@ -1433,26 +1635,26 @@ static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
}
/**
- * stm32f4_spi_set_mode - configure communication mode
+ * stm32fx_spi_set_mode - configure communication mode
* @spi: pointer to the spi controller data structure
* @comm_type: type of communication to configure
*/
-static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
+static int stm32fx_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
{
if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) {
- stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
- STM32F4_SPI_CR1_BIDIMODE |
- STM32F4_SPI_CR1_BIDIOE);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR1,
+ STM32FX_SPI_CR1_BIDIMODE |
+ STM32FX_SPI_CR1_BIDIOE);
} else if (comm_type == SPI_FULL_DUPLEX ||
comm_type == SPI_SIMPLEX_RX) {
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
- STM32F4_SPI_CR1_BIDIMODE |
- STM32F4_SPI_CR1_BIDIOE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR1,
+ STM32FX_SPI_CR1_BIDIMODE |
+ STM32FX_SPI_CR1_BIDIOE);
} else if (comm_type == SPI_3WIRE_RX) {
- stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
- STM32F4_SPI_CR1_BIDIMODE);
- stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
- STM32F4_SPI_CR1_BIDIOE);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR1,
+ STM32FX_SPI_CR1_BIDIMODE);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_CR1,
+ STM32FX_SPI_CR1_BIDIOE);
} else {
return -EINVAL;
}
@@ -1497,7 +1699,7 @@ static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
/**
* stm32h7_spi_data_idleness - configure minimum time delay inserted between two
- * consecutive data frames in master mode
+ * consecutive data frames in host mode
* @spi: pointer to the spi controller data structure
* @len: transfer len
*/
@@ -1531,7 +1733,7 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
*/
static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
{
- if (nb_words <= STM32H7_SPI_TSIZE_MAX) {
+ if (nb_words <= spi->t_size_max) {
writel_relaxed(FIELD_PREP(STM32H7_SPI_CR2_TSIZE, nb_words),
spi->base + STM32H7_SPI_CR2);
} else {
@@ -1566,7 +1768,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
spi->cfg->set_bpw(spi);
/* Update spi->cur_speed with real clock speed */
- if (STM32_SPI_MASTER_MODE(spi)) {
+ if (STM32_SPI_HOST_MODE(spi)) {
mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
spi->cfg->baud_rate_div_min,
spi->cfg->baud_rate_div_max);
@@ -1586,7 +1788,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
spi->cur_comm = comm_type;
- if (STM32_SPI_MASTER_MODE(spi) && spi->cfg->set_data_idleness)
+ if (STM32_SPI_HOST_MODE(spi) && spi->cfg->set_data_idleness)
spi->cfg->set_data_idleness(spi, transfer->len);
if (spi->cur_bpw <= 8)
@@ -1607,7 +1809,7 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
dev_dbg(spi->dev,
"data frame of %d-bit, data packet of %d data frames\n",
spi->cur_bpw, spi->cur_fthlv);
- if (STM32_SPI_MASTER_MODE(spi))
+ if (STM32_SPI_HOST_MODE(spi))
dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
spi->cur_xferlen, nb_words);
@@ -1672,30 +1874,30 @@ static int stm32_spi_unprepare_msg(struct spi_controller *ctrl,
}
/**
- * stm32f4_spi_config - Configure SPI controller as SPI master
+ * stm32fx_spi_config - Configure SPI controller as SPI host
* @spi: pointer to the spi controller data structure
*/
-static int stm32f4_spi_config(struct stm32_spi *spi)
+static int stm32fx_spi_config(struct stm32_spi *spi)
{
unsigned long flags;
spin_lock_irqsave(&spi->lock, flags);
/* Ensure I2SMOD bit is kept cleared */
- stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR,
- STM32F4_SPI_I2SCFGR_I2SMOD);
+ stm32_spi_clr_bits(spi, STM32FX_SPI_I2SCFGR,
+ STM32FX_SPI_I2SCFGR_I2SMOD);
/*
* - SS input value high
* - transmitter half duplex direction
- * - Set the master mode (default Motorola mode)
- * - Consider 1 master/n slaves configuration and
+ * - Set the host mode (default Motorola mode)
+ * - Consider 1 host/n targets configuration and
* SS input value is determined by the SSI bit
*/
- stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI |
- STM32F4_SPI_CR1_BIDIOE |
- STM32F4_SPI_CR1_MSTR |
- STM32F4_SPI_CR1_SSM);
+ stm32_spi_set_bits(spi, STM32FX_SPI_CR1, STM32FX_SPI_CR1_SSI |
+ STM32FX_SPI_CR1_BIDIOE |
+ STM32FX_SPI_CR1_MSTR |
+ STM32FX_SPI_CR1_SSM);
spin_unlock_irqrestore(&spi->lock, flags);
@@ -1729,8 +1931,8 @@ static int stm32h7_spi_config(struct stm32_spi *spi)
cr1 |= STM32H7_SPI_CR1_HDDIR | STM32H7_SPI_CR1_MASRX | STM32H7_SPI_CR1_SSI;
/*
- * - Set the master mode (default Motorola mode)
- * - Consider 1 master/n devices configuration and
+ * - Set the host mode (default Motorola mode)
+ * - Consider 1 host/n devices configuration and
* SS input value is determined by the SSI bit
* - keep control of all associated GPIOs
*/
@@ -1746,25 +1948,48 @@ static int stm32h7_spi_config(struct stm32_spi *spi)
}
static const struct stm32_spi_cfg stm32f4_spi_cfg = {
- .regs = &stm32f4_spi_regspec,
+ .regs = &stm32fx_spi_regspec,
.get_bpw_mask = stm32f4_spi_get_bpw_mask,
- .disable = stm32f4_spi_disable,
- .config = stm32f4_spi_config,
+ .disable = stm32fx_spi_disable,
+ .config = stm32fx_spi_config,
.set_bpw = stm32f4_spi_set_bpw,
- .set_mode = stm32f4_spi_set_mode,
- .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
- .dma_tx_cb = stm32f4_spi_dma_tx_cb,
+ .set_mode = stm32fx_spi_set_mode,
+ .write_tx = stm32f4_spi_write_tx,
+ .read_rx = stm32f4_spi_read_rx,
+ .transfer_one_dma_start = stm32fx_spi_transfer_one_dma_start,
+ .dma_tx_cb = stm32fx_spi_dma_tx_cb,
.dma_rx_cb = stm32_spi_dma_rx_cb,
- .transfer_one_irq = stm32f4_spi_transfer_one_irq,
- .irq_handler_event = stm32f4_spi_irq_event,
- .irq_handler_thread = stm32f4_spi_irq_thread,
- .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
- .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
+ .transfer_one_irq = stm32fx_spi_transfer_one_irq,
+ .irq_handler_event = stm32fx_spi_irq_event,
+ .irq_handler_thread = stm32fx_spi_irq_thread,
+ .baud_rate_div_min = STM32FX_SPI_BR_DIV_MIN,
+ .baud_rate_div_max = STM32FX_SPI_BR_DIV_MAX,
.has_fifo = false,
.has_device_mode = false,
.flags = SPI_CONTROLLER_MUST_TX,
};
+static const struct stm32_spi_cfg stm32f7_spi_cfg = {
+ .regs = &stm32fx_spi_regspec,
+ .get_bpw_mask = stm32f7_spi_get_bpw_mask,
+ .disable = stm32fx_spi_disable,
+ .config = stm32fx_spi_config,
+ .set_bpw = stm32f7_spi_set_bpw,
+ .set_mode = stm32fx_spi_set_mode,
+ .write_tx = stm32f7_spi_write_tx,
+ .read_rx = stm32f7_spi_read_rx,
+ .transfer_one_dma_start = stm32f7_spi_transfer_one_dma_start,
+ .dma_tx_cb = stm32fx_spi_dma_tx_cb,
+ .dma_rx_cb = stm32_spi_dma_rx_cb,
+ .transfer_one_irq = stm32fx_spi_transfer_one_irq,
+ .irq_handler_event = stm32fx_spi_irq_event,
+ .irq_handler_thread = stm32fx_spi_irq_thread,
+ .baud_rate_div_min = STM32FX_SPI_BR_DIV_MIN,
+ .baud_rate_div_max = STM32FX_SPI_BR_DIV_MAX,
+ .has_fifo = false,
+ .flags = SPI_CONTROLLER_MUST_TX,
+};
+
static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.regs = &stm32h7_spi_regspec,
.get_fifo_size = stm32h7_spi_get_fifo_size,
@@ -1775,6 +2000,8 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.set_mode = stm32h7_spi_set_mode,
.set_data_idleness = stm32h7_spi_data_idleness,
.set_number_of_data = stm32h7_spi_number_of_data,
+ .write_tx = stm32h7_spi_write_txfifo,
+ .read_rx = stm32h7_spi_read_rxfifo,
.transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
.dma_rx_cb = stm32_spi_dma_rx_cb,
/*
@@ -1789,9 +2016,40 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.has_device_mode = true,
};
+/*
+ * STM32MP2 is compatible with the STM32H7 except:
+ * - enforce the DMA maxburst value to 1
+ * - spi8 have limited feature set (TSIZE_MAX = 1024, BPW of 8 OR 16)
+ */
+static const struct stm32_spi_cfg stm32mp25_spi_cfg = {
+ .regs = &stm32mp25_spi_regspec,
+ .get_fifo_size = stm32h7_spi_get_fifo_size,
+ .get_bpw_mask = stm32mp25_spi_get_bpw_mask,
+ .disable = stm32h7_spi_disable,
+ .config = stm32h7_spi_config,
+ .set_bpw = stm32h7_spi_set_bpw,
+ .set_mode = stm32h7_spi_set_mode,
+ .set_data_idleness = stm32h7_spi_data_idleness,
+ .set_number_of_data = stm32h7_spi_number_of_data,
+ .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
+ .dma_rx_cb = stm32_spi_dma_rx_cb,
+ /*
+ * dma_tx_cb is not necessary since in case of TX, dma is followed by
+ * SPI access hence handling is performed within the SPI interrupt
+ */
+ .transfer_one_irq = stm32h7_spi_transfer_one_irq,
+ .irq_handler_thread = stm32h7_spi_irq_thread,
+ .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
+ .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
+ .has_fifo = true,
+ .prevent_dma_burst = true,
+};
+
static const struct of_device_id stm32_spi_of_match[] = {
+ { .compatible = "st,stm32mp25-spi", .data = (void *)&stm32mp25_spi_cfg },
{ .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg },
{ .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg },
+ { .compatible = "st,stm32f7-spi", .data = (void *)&stm32f7_spi_cfg },
{},
};
MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
@@ -1820,9 +2078,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
}
if (device_mode)
- ctrl = devm_spi_alloc_slave(&pdev->dev, sizeof(struct stm32_spi));
+ ctrl = devm_spi_alloc_target(&pdev->dev, sizeof(struct stm32_spi));
else
- ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(struct stm32_spi));
if (!ctrl) {
dev_err(&pdev->dev, "spi controller allocation failed\n");
return -ENOMEM;
@@ -1892,6 +2150,22 @@ static int stm32_spi_probe(struct platform_device *pdev)
if (spi->cfg->has_fifo)
spi->fifo_size = spi->cfg->get_fifo_size(spi);
+ spi->feature_set = STM32_SPI_FEATURE_FULL;
+ if (spi->cfg->regs->fullcfg.reg) {
+ spi->feature_set =
+ FIELD_GET(STM32MP25_SPI_HWCFGR1_FULLCFG,
+ readl_relaxed(spi->base + spi->cfg->regs->fullcfg.reg));
+
+ dev_dbg(spi->dev, "%s feature set\n",
+ spi->feature_set == STM32_SPI_FEATURE_FULL ? "full" : "limited");
+ }
+
+ /* Only for STM32H7 and after */
+ spi->t_size_max = spi->feature_set == STM32_SPI_FEATURE_FULL ?
+ STM32H7_SPI_TSIZE_MAX :
+ STM32MP25_SPI_TSIZE_MAX_LIMITED;
+ dev_dbg(spi->dev, "one message max size %d\n", spi->t_size_max);
+
ret = spi->cfg->config(spi);
if (ret) {
dev_err(&pdev->dev, "controller configuration failed: %d\n",
@@ -1913,7 +2187,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
ctrl->unprepare_message = stm32_spi_unprepare_msg;
ctrl->flags = spi->cfg->flags;
if (STM32_SPI_DEVICE_MODE(spi))
- ctrl->slave_abort = stm32h7_spi_device_abort;
+ ctrl->target_abort = stm32h7_spi_device_abort;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
@@ -1960,7 +2234,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
pm_runtime_put_autosuspend(&pdev->dev);
dev_info(&pdev->dev, "driver initialized (%s mode)\n",
- STM32_SPI_MASTER_MODE(spi) ? "master" : "device");
+ STM32_SPI_HOST_MODE(spi) ? "host" : "device");
return 0;
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index b8947265d..11d8bd27b 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -75,7 +75,7 @@
#define SUN4I_FIFO_STA_TF_CNT_BITS 16
struct sun4i_spi {
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base_addr;
struct clk *hclk;
struct clk *mclk;
@@ -161,7 +161,7 @@ static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct sun4i_spi *sspi = spi_master_get_devdata(spi->master);
+ struct sun4i_spi *sspi = spi_controller_get_devdata(spi->controller);
u32 reg;
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
@@ -201,11 +201,11 @@ static size_t sun4i_spi_max_transfer_size(struct spi_device *spi)
return SUN4I_MAX_XFER_SIZE - 1;
}
-static int sun4i_spi_transfer_one(struct spi_master *master,
+static int sun4i_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ struct sun4i_spi *sspi = spi_controller_get_devdata(host);
unsigned int mclk_rate, div, timeout;
unsigned int start, end, tx_time;
unsigned int tx_len = 0;
@@ -331,7 +331,7 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
msecs_to_jiffies(tx_time));
end = jiffies;
if (!timeout) {
- dev_warn(&master->dev,
+ dev_warn(&host->dev,
"%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
jiffies_to_msecs(end - start), tx_time);
@@ -386,8 +386,8 @@ static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
static int sun4i_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(sspi->hclk);
@@ -415,8 +415,8 @@ out:
static int sun4i_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_controller_get_devdata(host);
clk_disable_unprepare(sspi->mclk);
clk_disable_unprepare(sspi->hclk);
@@ -426,62 +426,62 @@ static int sun4i_spi_runtime_suspend(struct device *dev)
static int sun4i_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct sun4i_spi *sspi;
int ret = 0, irq;
- master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
- if (!master) {
- dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(struct sun4i_spi));
+ if (!host) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Host\n");
return -ENOMEM;
}
- platform_set_drvdata(pdev, master);
- sspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, host);
+ sspi = spi_controller_get_devdata(host);
sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sspi->base_addr)) {
ret = PTR_ERR(sspi->base_addr);
- goto err_free_master;
+ goto err_free_host;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
- goto err_free_master;
+ goto err_free_host;
}
ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler,
0, "sun4i-spi", sspi);
if (ret) {
dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_free_master;
+ goto err_free_host;
}
- sspi->master = master;
- master->max_speed_hz = 100 * 1000 * 1000;
- master->min_speed_hz = 3 * 1000;
- master->set_cs = sun4i_spi_set_cs;
- master->transfer_one = sun4i_spi_transfer_one;
- master->num_chipselect = 4;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->dev.of_node = pdev->dev.of_node;
- master->auto_runtime_pm = true;
- master->max_transfer_size = sun4i_spi_max_transfer_size;
+ sspi->host = host;
+ host->max_speed_hz = 100 * 1000 * 1000;
+ host->min_speed_hz = 3 * 1000;
+ host->set_cs = sun4i_spi_set_cs;
+ host->transfer_one = sun4i_spi_transfer_one;
+ host->num_chipselect = 4;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->dev.of_node = pdev->dev.of_node;
+ host->auto_runtime_pm = true;
+ host->max_transfer_size = sun4i_spi_max_transfer_size;
sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sspi->hclk)) {
dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
ret = PTR_ERR(sspi->hclk);
- goto err_free_master;
+ goto err_free_host;
}
sspi->mclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(sspi->mclk)) {
dev_err(&pdev->dev, "Unable to acquire module clock\n");
ret = PTR_ERR(sspi->mclk);
- goto err_free_master;
+ goto err_free_host;
}
init_completion(&sspi->done);
@@ -493,16 +493,16 @@ static int sun4i_spi_probe(struct platform_device *pdev)
ret = sun4i_spi_runtime_resume(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Couldn't resume the device\n");
- goto err_free_master;
+ goto err_free_host;
}
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
- dev_err(&pdev->dev, "cannot register SPI master\n");
+ dev_err(&pdev->dev, "cannot register SPI host\n");
goto err_pm_disable;
}
@@ -511,8 +511,8 @@ static int sun4i_spi_probe(struct platform_device *pdev)
err_pm_disable:
pm_runtime_disable(&pdev->dev);
sun4i_spi_runtime_suspend(&pdev->dev);
-err_free_master:
- spi_master_put(master);
+err_free_host:
+ spi_controller_put(host);
return ret;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index fddc63309..cd018ea1a 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -97,7 +97,7 @@ struct sun6i_spi_cfg {
};
struct sun6i_spi {
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base_addr;
dma_addr_t dma_addr_rx;
dma_addr_t dma_addr_tx;
@@ -181,7 +181,7 @@ static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi)
static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(spi->controller);
u32 reg;
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
@@ -212,7 +212,7 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
struct spi_transfer *tfr)
{
struct dma_async_tx_descriptor *rxdesc, *txdesc;
- struct spi_master *master = sspi->master;
+ struct spi_controller *host = sspi->host;
rxdesc = NULL;
if (tfr->rx_buf) {
@@ -223,9 +223,9 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
.src_maxburst = 8,
};
- dmaengine_slave_config(master->dma_rx, &rxconf);
+ dmaengine_slave_config(host->dma_rx, &rxconf);
- rxdesc = dmaengine_prep_slave_sg(master->dma_rx,
+ rxdesc = dmaengine_prep_slave_sg(host->dma_rx,
tfr->rx_sg.sgl,
tfr->rx_sg.nents,
DMA_DEV_TO_MEM,
@@ -245,38 +245,38 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
.dst_maxburst = 8,
};
- dmaengine_slave_config(master->dma_tx, &txconf);
+ dmaengine_slave_config(host->dma_tx, &txconf);
- txdesc = dmaengine_prep_slave_sg(master->dma_tx,
+ txdesc = dmaengine_prep_slave_sg(host->dma_tx,
tfr->tx_sg.sgl,
tfr->tx_sg.nents,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!txdesc) {
if (rxdesc)
- dmaengine_terminate_sync(master->dma_rx);
+ dmaengine_terminate_sync(host->dma_rx);
return -EINVAL;
}
}
if (tfr->rx_buf) {
dmaengine_submit(rxdesc);
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(host->dma_rx);
}
if (tfr->tx_buf) {
dmaengine_submit(txdesc);
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(host->dma_tx);
}
return 0;
}
-static int sun6i_spi_transfer_one(struct spi_master *master,
+static int sun6i_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(host);
unsigned int div, div_cdr1, div_cdr2, timeout;
unsigned int start, end, tx_time;
unsigned int trig_level;
@@ -293,7 +293,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
sspi->tx_buf = tfr->tx_buf;
sspi->rx_buf = tfr->rx_buf;
sspi->len = tfr->len;
- use_dma = master->can_dma ? master->can_dma(master, spi, tfr) : false;
+ use_dma = host->can_dma ? host->can_dma(host, spi, tfr) : false;
/* Clear pending interrupts */
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);
@@ -463,7 +463,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
} else {
ret = sun6i_spi_prepare_dma(sspi, tfr);
if (ret) {
- dev_warn(&master->dev,
+ dev_warn(&host->dev,
"%s: prepare DMA failed, ret=%d",
dev_name(&spi->dev), ret);
return ret;
@@ -486,7 +486,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
- tx_time = spi_controller_xfer_timeout(master, tfr);
+ tx_time = spi_controller_xfer_timeout(host, tfr);
start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
msecs_to_jiffies(tx_time));
@@ -502,13 +502,13 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
timeout = wait_for_completion_timeout(&sspi->dma_rx_done,
timeout);
if (!timeout)
- dev_warn(&master->dev, "RX DMA timeout\n");
+ dev_warn(&host->dev, "RX DMA timeout\n");
}
}
end = jiffies;
if (!timeout) {
- dev_warn(&master->dev,
+ dev_warn(&host->dev,
"%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
dev_name(&spi->dev), tfr->len, tfr->speed_hz,
jiffies_to_msecs(end - start), tx_time);
@@ -518,8 +518,8 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
if (ret && use_dma) {
- dmaengine_terminate_sync(master->dma_rx);
- dmaengine_terminate_sync(master->dma_tx);
+ dmaengine_terminate_sync(host->dma_rx);
+ dmaengine_terminate_sync(host->dma_tx);
}
return ret;
@@ -564,8 +564,8 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
static int sun6i_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(sspi->hclk);
@@ -601,8 +601,8 @@ out:
static int sun6i_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(host);
reset_control_assert(sspi->rstc);
clk_disable_unprepare(sspi->mclk);
@@ -611,11 +611,11 @@ static int sun6i_spi_runtime_suspend(struct device *dev)
return 0;
}
-static bool sun6i_spi_can_dma(struct spi_master *master,
+static bool sun6i_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ struct sun6i_spi *sspi = spi_controller_get_devdata(host);
/*
* If the number of spi words to transfer is less or equal than
@@ -627,67 +627,67 @@ static bool sun6i_spi_can_dma(struct spi_master *master,
static int sun6i_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct sun6i_spi *sspi;
struct resource *mem;
int ret = 0, irq;
- master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
- if (!master) {
- dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(struct sun6i_spi));
+ if (!host) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Host\n");
return -ENOMEM;
}
- platform_set_drvdata(pdev, master);
- sspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, host);
+ sspi = spi_controller_get_devdata(host);
sspi->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(sspi->base_addr)) {
ret = PTR_ERR(sspi->base_addr);
- goto err_free_master;
+ goto err_free_host;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
- goto err_free_master;
+ goto err_free_host;
}
ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler,
0, "sun6i-spi", sspi);
if (ret) {
dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_free_master;
+ goto err_free_host;
}
- sspi->master = master;
+ sspi->host = host;
sspi->cfg = of_device_get_match_data(&pdev->dev);
- master->max_speed_hz = 100 * 1000 * 1000;
- master->min_speed_hz = 3 * 1000;
- master->use_gpio_descriptors = true;
- master->set_cs = sun6i_spi_set_cs;
- master->transfer_one = sun6i_spi_transfer_one;
- master->num_chipselect = 4;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
- sspi->cfg->mode_bits;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->dev.of_node = pdev->dev.of_node;
- master->auto_runtime_pm = true;
- master->max_transfer_size = sun6i_spi_max_transfer_size;
+ host->max_speed_hz = 100 * 1000 * 1000;
+ host->min_speed_hz = 3 * 1000;
+ host->use_gpio_descriptors = true;
+ host->set_cs = sun6i_spi_set_cs;
+ host->transfer_one = sun6i_spi_transfer_one;
+ host->num_chipselect = 4;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
+ sspi->cfg->mode_bits;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->dev.of_node = pdev->dev.of_node;
+ host->auto_runtime_pm = true;
+ host->max_transfer_size = sun6i_spi_max_transfer_size;
sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sspi->hclk)) {
dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
ret = PTR_ERR(sspi->hclk);
- goto err_free_master;
+ goto err_free_host;
}
sspi->mclk = devm_clk_get(&pdev->dev, "mod");
if (IS_ERR(sspi->mclk)) {
dev_err(&pdev->dev, "Unable to acquire module clock\n");
ret = PTR_ERR(sspi->mclk);
- goto err_free_master;
+ goto err_free_host;
}
init_completion(&sspi->done);
@@ -697,34 +697,34 @@ static int sun6i_spi_probe(struct platform_device *pdev)
if (IS_ERR(sspi->rstc)) {
dev_err(&pdev->dev, "Couldn't get reset controller\n");
ret = PTR_ERR(sspi->rstc);
- goto err_free_master;
+ goto err_free_host;
}
- master->dma_tx = dma_request_chan(&pdev->dev, "tx");
- if (IS_ERR(master->dma_tx)) {
+ host->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(host->dma_tx)) {
/* Check tx to see if we need defer probing driver */
- if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
+ if (PTR_ERR(host->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto err_free_master;
+ goto err_free_host;
}
dev_warn(&pdev->dev, "Failed to request TX DMA channel\n");
- master->dma_tx = NULL;
+ host->dma_tx = NULL;
}
- master->dma_rx = dma_request_chan(&pdev->dev, "rx");
- if (IS_ERR(master->dma_rx)) {
- if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
+ host->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(host->dma_rx)) {
+ if (PTR_ERR(host->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_free_dma_tx;
}
dev_warn(&pdev->dev, "Failed to request RX DMA channel\n");
- master->dma_rx = NULL;
+ host->dma_rx = NULL;
}
- if (master->dma_tx && master->dma_rx) {
+ if (host->dma_tx && host->dma_rx) {
sspi->dma_addr_tx = mem->start + SUN6I_TXDATA_REG;
sspi->dma_addr_rx = mem->start + SUN6I_RXDATA_REG;
- master->can_dma = sun6i_spi_can_dma;
+ host->can_dma = sun6i_spi_can_dma;
}
/*
@@ -742,9 +742,9 @@ static int sun6i_spi_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
- dev_err(&pdev->dev, "cannot register SPI master\n");
+ dev_err(&pdev->dev, "cannot register SPI host\n");
goto err_pm_disable;
}
@@ -754,26 +754,26 @@ err_pm_disable:
pm_runtime_disable(&pdev->dev);
sun6i_spi_runtime_suspend(&pdev->dev);
err_free_dma_rx:
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
err_free_dma_tx:
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
-err_free_master:
- spi_master_put(master);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+err_free_host:
+ spi_controller_put(host);
return ret;
}
static void sun6i_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_controller *host = platform_get_drvdata(pdev);
pm_runtime_force_suspend(&pdev->dev);
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
}
static const struct sun6i_spi_cfg sun6i_a31_spi_cfg = {
diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c
index eb8f835a4..4e481380c 100644
--- a/drivers/spi/spi-sunplus-sp7021.c
+++ b/drivers/spi/spi-sunplus-sp7021.c
@@ -70,8 +70,8 @@
#define SP7021_FIFO_DATA_LEN (16)
enum {
- SP7021_MASTER_MODE = 0,
- SP7021_SLAVE_MODE = 1,
+ SP7021_HOST_MODE = 0,
+ SP7021_TARGET_MODE = 1,
};
struct sp7021_spi_ctlr {
@@ -88,7 +88,7 @@ struct sp7021_spi_ctlr {
// data xfer lock
struct mutex buf_lock;
struct completion isr_done;
- struct completion slave_isr;
+ struct completion target_isr;
unsigned int rx_cur_len;
unsigned int tx_cur_len;
unsigned int data_unit;
@@ -96,7 +96,7 @@ struct sp7021_spi_ctlr {
u8 *rx_buf;
};
-static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev)
+static irqreturn_t sp7021_spi_target_irq(int irq, void *dev)
{
struct sp7021_spi_ctlr *pspim = dev;
unsigned int data_status;
@@ -104,25 +104,25 @@ static irqreturn_t sp7021_spi_slave_irq(int irq, void *dev)
data_status = readl(pspim->s_base + SP7021_DATA_RDY_REG);
data_status |= SP7021_SLAVE_CLR_INT;
writel(data_status , pspim->s_base + SP7021_DATA_RDY_REG);
- complete(&pspim->slave_isr);
+ complete(&pspim->target_isr);
return IRQ_HANDLED;
}
-static int sp7021_spi_slave_abort(struct spi_controller *ctlr)
+static int sp7021_spi_target_abort(struct spi_controller *ctlr)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
- complete(&pspim->slave_isr);
+ complete(&pspim->target_isr);
complete(&pspim->isr_done);
return 0;
}
-static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer)
+static int sp7021_spi_target_tx(struct spi_device *spi, struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
u32 value;
- reinit_completion(&pspim->slave_isr);
+ reinit_completion(&pspim->target_isr);
value = SP7021_SLAVE_DMA_EN | SP7021_SLAVE_DMA_RW | FIELD_PREP(SP7021_SLAVE_DMA_CMD, 3);
writel(value, pspim->s_base + SP7021_SLAVE_DMA_CTRL_REG);
writel(xfer->len, pspim->s_base + SP7021_SLAVE_DMA_LENGTH_REG);
@@ -137,7 +137,7 @@ static int sp7021_spi_slave_tx(struct spi_device *spi, struct spi_transfer *xfer
return 0;
}
-static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer)
+static int sp7021_spi_target_rx(struct spi_device *spi, struct spi_transfer *xfer)
{
struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(spi->controller);
u32 value;
@@ -155,7 +155,7 @@ static int sp7021_spi_slave_rx(struct spi_device *spi, struct spi_transfer *xfer
return 0;
}
-static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+static void sp7021_spi_host_rb(struct sp7021_spi_ctlr *pspim, unsigned int len)
{
int i;
@@ -166,7 +166,7 @@ static void sp7021_spi_master_rb(struct sp7021_spi_ctlr *pspim, unsigned int len
}
}
-static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len)
+static void sp7021_spi_host_wb(struct sp7021_spi_ctlr *pspim, unsigned int len)
{
int i;
@@ -177,7 +177,7 @@ static void sp7021_spi_master_wb(struct sp7021_spi_ctlr *pspim, unsigned int len
}
}
-static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
+static irqreturn_t sp7021_spi_host_irq(int irq, void *dev)
{
struct sp7021_spi_ctlr *pspim = dev;
unsigned int tx_cnt, total_len;
@@ -206,9 +206,9 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
fd_status, rx_cnt, tx_cnt, tx_len);
if (rx_cnt > 0)
- sp7021_spi_master_rb(pspim, rx_cnt);
+ sp7021_spi_host_rb(pspim, rx_cnt);
if (tx_cnt > 0)
- sp7021_spi_master_wb(pspim, tx_cnt);
+ sp7021_spi_host_wb(pspim, tx_cnt);
fd_status = readl(pspim->m_base + SP7021_SPI_STATUS_REG);
tx_len = FIELD_GET(SP7021_TX_LEN_MASK, fd_status);
@@ -224,7 +224,7 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
rx_cnt = FIELD_GET(SP7021_RX_CNT_MASK, fd_status);
if (rx_cnt > 0)
- sp7021_spi_master_rb(pspim, rx_cnt);
+ sp7021_spi_host_rb(pspim, rx_cnt);
}
value = readl(pspim->m_base + SP7021_INT_BUSY_REG);
value |= SP7021_CLR_MASTER_INT;
@@ -240,7 +240,7 @@ static irqreturn_t sp7021_spi_master_irq(int irq, void *dev)
static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device *spi)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
pspim->tx_cur_len = 0;
pspim->rx_cur_len = 0;
@@ -251,7 +251,7 @@ static void sp7021_prep_transfer(struct spi_controller *ctlr, struct spi_device
static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
struct spi_device *s = msg->spi;
u32 valus, rs = 0;
@@ -283,7 +283,7 @@ static int sp7021_spi_controller_prepare_message(struct spi_controller *ctlr,
static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfer *xfer)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
u32 clk_rate, clk_sel, div;
clk_rate = clk_get_rate(pspim->spi_clk);
@@ -295,10 +295,10 @@ static void sp7021_spi_setup_clk(struct spi_controller *ctlr, struct spi_transfe
writel(pspim->xfer_conf, pspim->m_base + SP7021_SPI_CONFIG_REG);
}
-static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+static int sp7021_spi_host_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
unsigned long timeout = msecs_to_jiffies(1000);
unsigned int xfer_cnt, xfer_len, last_len;
unsigned int i, len_temp;
@@ -323,7 +323,7 @@ static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct sp
if (pspim->tx_cur_len < xfer_len) {
len_temp = min(pspim->data_unit, xfer_len);
- sp7021_spi_master_wb(pspim, len_temp);
+ sp7021_spi_host_wb(pspim, len_temp);
}
reg_temp = readl(pspim->m_base + SP7021_SPI_CONFIG_REG);
reg_temp &= ~SP7021_CLEAN_RW_BYTE;
@@ -359,10 +359,10 @@ static int sp7021_spi_master_transfer_one(struct spi_controller *ctlr, struct sp
return 0;
}
-static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+static int sp7021_spi_target_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
struct device *dev = pspim->dev;
int ret;
@@ -371,14 +371,14 @@ static int sp7021_spi_slave_transfer_one(struct spi_controller *ctlr, struct spi
xfer->len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma))
return -ENOMEM;
- ret = sp7021_spi_slave_tx(spi, xfer);
+ ret = sp7021_spi_target_tx(spi, xfer);
dma_unmap_single(dev, xfer->tx_dma, xfer->len, DMA_TO_DEVICE);
} else if (xfer->rx_buf && !xfer->tx_buf) {
xfer->rx_dma = dma_map_single(dev, xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma))
return -ENOMEM;
- ret = sp7021_spi_slave_rx(spi, xfer);
+ ret = sp7021_spi_target_rx(spi, xfer);
dma_unmap_single(dev, xfer->rx_dma, xfer->len, DMA_FROM_DEVICE);
} else {
dev_dbg(&ctlr->dev, "%s() wrong command\n", __func__);
@@ -409,14 +409,14 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
pdev->id = of_alias_get_id(pdev->dev.of_node, "sp_spi");
if (device_property_read_bool(dev, "spi-slave"))
- mode = SP7021_SLAVE_MODE;
+ mode = SP7021_TARGET_MODE;
else
- mode = SP7021_MASTER_MODE;
+ mode = SP7021_HOST_MODE;
- if (mode == SP7021_SLAVE_MODE)
- ctlr = devm_spi_alloc_slave(dev, sizeof(*pspim));
+ if (mode == SP7021_TARGET_MODE)
+ ctlr = devm_spi_alloc_target(dev, sizeof(*pspim));
else
- ctlr = devm_spi_alloc_master(dev, sizeof(*pspim));
+ ctlr = devm_spi_alloc_host(dev, sizeof(*pspim));
if (!ctlr)
return -ENOMEM;
device_set_node(&ctlr->dev, dev_fwnode(dev));
@@ -424,9 +424,9 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
ctlr->auto_runtime_pm = true;
ctlr->prepare_message = sp7021_spi_controller_prepare_message;
- if (mode == SP7021_SLAVE_MODE) {
- ctlr->transfer_one = sp7021_spi_slave_transfer_one;
- ctlr->slave_abort = sp7021_spi_slave_abort;
+ if (mode == SP7021_TARGET_MODE) {
+ ctlr->transfer_one = sp7021_spi_target_transfer_one;
+ ctlr->target_abort = sp7021_spi_target_abort;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
} else {
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
@@ -434,7 +434,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
ctlr->max_speed_hz = 25000000;
ctlr->use_gpio_descriptors = true;
ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
- ctlr->transfer_one = sp7021_spi_master_transfer_one;
+ ctlr->transfer_one = sp7021_spi_host_transfer_one;
}
platform_set_drvdata(pdev, ctlr);
pspim = spi_controller_get_devdata(ctlr);
@@ -443,7 +443,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
pspim->dev = dev;
mutex_init(&pspim->buf_lock);
init_completion(&pspim->isr_done);
- init_completion(&pspim->slave_isr);
+ init_completion(&pspim->target_isr);
pspim->m_base = devm_platform_ioremap_resource_byname(pdev, "master");
if (IS_ERR(pspim->m_base))
@@ -485,12 +485,12 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_master_irq,
+ ret = devm_request_irq(dev, pspim->m_irq, sp7021_spi_host_irq,
IRQF_TRIGGER_RISING, pdev->name, pspim);
if (ret)
return ret;
- ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_slave_irq,
+ ret = devm_request_irq(dev, pspim->s_irq, sp7021_spi_target_irq,
IRQF_TRIGGER_RISING, pdev->name, pspim);
if (ret)
return ret;
@@ -499,7 +499,7 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev)
ret = spi_register_controller(ctlr);
if (ret) {
pm_runtime_disable(dev);
- return dev_err_probe(dev, ret, "spi_register_master fail\n");
+ return dev_err_probe(dev, ret, "spi_register_controller fail\n");
}
return 0;
}
@@ -516,7 +516,7 @@ static void sp7021_spi_controller_remove(struct platform_device *pdev)
static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
return reset_control_assert(pspim->rstc);
}
@@ -524,7 +524,7 @@ static int __maybe_unused sp7021_spi_controller_suspend(struct device *dev)
static int __maybe_unused sp7021_spi_controller_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
reset_control_deassert(pspim->rstc);
return clk_prepare_enable(pspim->spi_clk);
@@ -534,7 +534,7 @@ static int __maybe_unused sp7021_spi_controller_resume(struct device *dev)
static int sp7021_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
return reset_control_assert(pspim->rstc);
}
@@ -542,7 +542,7 @@ static int sp7021_spi_runtime_suspend(struct device *dev)
static int sp7021_spi_runtime_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct sp7021_spi_ctlr *pspim = spi_master_get_devdata(ctlr);
+ struct sp7021_spi_ctlr *pspim = spi_controller_get_devdata(ctlr);
return reset_control_deassert(pspim->rstc);
}
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index aeaf7db02..7cb4301a6 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -225,11 +225,11 @@ static int write_fifo(struct synquacer_spi *sspi)
return 0;
}
-static int synquacer_spi_config(struct spi_master *master,
+static int synquacer_spi_config(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
unsigned int speed, mode, bpw, cs, bus_width, transfer_mode;
u32 rate, val, div;
@@ -263,7 +263,7 @@ static int synquacer_spi_config(struct spi_master *master,
}
sspi->transfer_mode = transfer_mode;
- rate = master->max_speed_hz;
+ rate = host->max_speed_hz;
div = DIV_ROUND_UP(rate, speed);
if (div > 254) {
@@ -350,11 +350,11 @@ static int synquacer_spi_config(struct spi_master *master,
return 0;
}
-static int synquacer_spi_transfer_one(struct spi_master *master,
+static int synquacer_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
int ret;
int status = 0;
u32 words;
@@ -378,7 +378,7 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST))
xfer->bits_per_word = 32;
- ret = synquacer_spi_config(master, spi, xfer);
+ ret = synquacer_spi_config(host, spi, xfer);
/* restore */
xfer->bits_per_word = bpw;
@@ -482,7 +482,7 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct synquacer_spi *sspi = spi_master_get_devdata(spi->master);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(spi->controller);
u32 val;
val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
@@ -517,11 +517,11 @@ static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi,
return -EBUSY;
}
-static int synquacer_spi_enable(struct spi_master *master)
+static int synquacer_spi_enable(struct spi_controller *host)
{
u32 val;
int status;
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
/* Disable module */
writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
@@ -601,18 +601,18 @@ static irqreturn_t sq_spi_tx_handler(int irq, void *priv)
static int synquacer_spi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct spi_master *master;
+ struct spi_controller *host;
struct synquacer_spi *sspi;
int ret;
int rx_irq, tx_irq;
- master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*sspi));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- sspi = spi_master_get_devdata(master);
+ sspi = spi_controller_get_devdata(host);
sspi->dev = &pdev->dev;
init_completion(&sspi->transfer_done);
@@ -625,7 +625,7 @@ static int synquacer_spi_probe(struct platform_device *pdev)
sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */
device_property_read_u32(&pdev->dev, "socionext,ihclk-rate",
- &master->max_speed_hz); /* for ACPI */
+ &host->max_speed_hz); /* for ACPI */
if (dev_of_node(&pdev->dev)) {
if (device_property_match_string(&pdev->dev,
@@ -655,21 +655,21 @@ static int synquacer_spi_probe(struct platform_device *pdev)
goto put_spi;
}
- master->max_speed_hz = clk_get_rate(sspi->clk);
+ host->max_speed_hz = clk_get_rate(sspi->clk);
}
- if (!master->max_speed_hz) {
+ if (!host->max_speed_hz) {
dev_err(&pdev->dev, "missing clock source\n");
ret = -EINVAL;
goto disable_clk;
}
- master->min_speed_hz = master->max_speed_hz / 254;
+ host->min_speed_hz = host->max_speed_hz / 254;
sspi->aces = device_property_read_bool(&pdev->dev,
"socionext,set-aces");
sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm");
- master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
+ host->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
rx_irq = platform_get_irq(pdev, 0);
if (rx_irq <= 0) {
@@ -699,27 +699,27 @@ static int synquacer_spi_probe(struct platform_device *pdev)
goto disable_clk;
}
- master->dev.of_node = np;
- master->dev.fwnode = pdev->dev.fwnode;
- master->auto_runtime_pm = true;
- master->bus_num = pdev->id;
+ host->dev.of_node = np;
+ host->dev.fwnode = pdev->dev.fwnode;
+ host->auto_runtime_pm = true;
+ host->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
- SPI_TX_QUAD | SPI_RX_QUAD;
- master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
- SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+ host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
- master->set_cs = synquacer_spi_set_cs;
- master->transfer_one = synquacer_spi_transfer_one;
+ host->set_cs = synquacer_spi_set_cs;
+ host->transfer_one = synquacer_spi_transfer_one;
- ret = synquacer_spi_enable(master);
+ ret = synquacer_spi_enable(host);
if (ret)
goto disable_clk;
pm_runtime_set_active(sspi->dev);
pm_runtime_enable(sspi->dev);
- ret = devm_spi_register_master(sspi->dev, master);
+ ret = devm_spi_register_controller(sspi->dev, host);
if (ret)
goto disable_pm;
@@ -730,15 +730,15 @@ disable_pm:
disable_clk:
clk_disable_unprepare(sspi->clk);
put_spi:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
static void synquacer_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
pm_runtime_disable(sspi->dev);
@@ -747,11 +747,11 @@ static void synquacer_spi_remove(struct platform_device *pdev)
static int __maybe_unused synquacer_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -763,8 +763,8 @@ static int __maybe_unused synquacer_spi_suspend(struct device *dev)
static int __maybe_unused synquacer_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct synquacer_spi *sspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct synquacer_spi *sspi = spi_controller_get_devdata(host);
int ret;
if (!pm_runtime_suspended(dev)) {
@@ -778,7 +778,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev)
return ret;
}
- ret = synquacer_spi_enable(master);
+ ret = synquacer_spi_enable(host);
if (ret) {
clk_disable_unprepare(sspi->clk);
dev_err(dev, "failed to enable spi (%d)\n", ret);
@@ -786,7 +786,7 @@ static int __maybe_unused synquacer_spi_resume(struct device *dev)
}
}
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(host);
if (ret < 0)
clk_disable_unprepare(sspi->clk);
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 460f232da..bc7cc4088 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -164,7 +164,7 @@ struct tegra_spi_client_data {
struct tegra_spi_data {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
spinlock_t lock;
struct clk *clk;
@@ -718,7 +718,7 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
struct spi_delay *setup = &spi->cs_setup;
struct spi_delay *hold = &spi->cs_hold;
struct spi_delay *inactive = &spi->cs_inactive;
@@ -772,7 +772,7 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
bool is_first_of_msg,
bool is_single_xfer)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
struct tegra_spi_client_data *cdata = spi->controller_data;
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
@@ -865,7 +865,7 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
static int tegra_spi_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, u32 command1)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
unsigned total_fifo_words;
int ret;
@@ -912,10 +912,10 @@ static struct tegra_spi_client_data
*tegra_spi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_spi_client_data *cdata;
- struct device_node *slave_np;
+ struct device_node *target_np;
- slave_np = spi->dev.of_node;
- if (!slave_np) {
+ target_np = spi->dev.of_node;
+ if (!target_np) {
dev_dbg(&spi->dev, "device node not found\n");
return NULL;
}
@@ -924,9 +924,9 @@ static struct tegra_spi_client_data
if (!cdata)
return NULL;
- of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
+ of_property_read_u32(target_np, "nvidia,tx-clk-tap-delay",
&cdata->tx_clk_tap_delay);
- of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
+ of_property_read_u32(target_np, "nvidia,rx-clk-tap-delay",
&cdata->rx_clk_tap_delay);
return cdata;
}
@@ -942,7 +942,7 @@ static void tegra_spi_cleanup(struct spi_device *spi)
static int tegra_spi_setup(struct spi_device *spi)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
struct tegra_spi_client_data *cdata = spi->controller_data;
u32 val;
unsigned long flags;
@@ -993,7 +993,7 @@ static int tegra_spi_setup(struct spi_device *spi)
static void tegra_spi_transfer_end(struct spi_device *spi)
{
- struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(spi->controller);
int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
/* GPIO based chip select control */
@@ -1025,11 +1025,11 @@ static void tegra_spi_dump_regs(struct tegra_spi_data *tspi)
tegra_spi_readl(tspi, SPI_FIFO_STATUS));
}
-static int tegra_spi_transfer_one_message(struct spi_master *master,
+static int tegra_spi_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
bool is_first_msg = true;
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret;
@@ -1078,7 +1078,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
- tspi->last_used_cs = master->num_chipselect + 1;
+ tspi->last_used_cs = host->num_chipselect + 1;
goto complete_xfer;
}
@@ -1112,7 +1112,7 @@ complete_xfer:
ret = 0;
exit:
msg->status = ret;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return ret;
}
@@ -1293,40 +1293,40 @@ MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
static int tegra_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct tegra_spi_data *tspi;
struct resource *r;
int ret, spi_irq;
int bus_num;
- master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
- if (!master) {
- dev_err(&pdev->dev, "master allocation failed\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*tspi));
+ if (!host) {
+ dev_err(&pdev->dev, "host allocation failed\n");
return -ENOMEM;
}
- platform_set_drvdata(pdev, master);
- tspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, host);
+ tspi = spi_controller_get_devdata(host);
if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
- &master->max_speed_hz))
- master->max_speed_hz = 25000000; /* 25MHz */
+ &host->max_speed_hz))
+ host->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
- master->use_gpio_descriptors = true;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
- SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
- master->setup = tegra_spi_setup;
- master->cleanup = tegra_spi_cleanup;
- master->transfer_one_message = tegra_spi_transfer_one_message;
- master->set_cs_timing = tegra_spi_set_hw_cs_timing;
- master->num_chipselect = MAX_CHIP_SELECT;
- master->auto_runtime_pm = true;
+ host->use_gpio_descriptors = true;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ host->setup = tegra_spi_setup;
+ host->cleanup = tegra_spi_cleanup;
+ host->transfer_one_message = tegra_spi_transfer_one_message;
+ host->set_cs_timing = tegra_spi_set_hw_cs_timing;
+ host->num_chipselect = MAX_CHIP_SELECT;
+ host->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
if (bus_num >= 0)
- master->bus_num = bus_num;
+ host->bus_num = bus_num;
- tspi->master = master;
+ tspi->host = host;
tspi->dev = &pdev->dev;
spin_lock_init(&tspi->lock);
@@ -1334,20 +1334,20 @@ static int tegra_spi_probe(struct platform_device *pdev)
if (!tspi->soc_data) {
dev_err(&pdev->dev, "unsupported tegra\n");
ret = -ENODEV;
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tspi->base)) {
ret = PTR_ERR(tspi->base);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->phys = r->start;
spi_irq = platform_get_irq(pdev, 0);
if (spi_irq < 0) {
ret = spi_irq;
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->irq = spi_irq;
@@ -1355,14 +1355,14 @@ static int tegra_spi_probe(struct platform_device *pdev)
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
@@ -1370,7 +1370,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
ret = tegra_spi_init_dma_param(tspi, true);
if (ret < 0)
- goto exit_free_master;
+ goto exit_free_host;
ret = tegra_spi_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
@@ -1401,7 +1401,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
- tspi->last_used_cs = master->num_chipselect + 1;
+ tspi->last_used_cs = host->num_chipselect + 1;
pm_runtime_put(&pdev->dev);
ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
tegra_spi_isr_thread, IRQF_ONESHOT,
@@ -1412,10 +1412,10 @@ static int tegra_spi_probe(struct platform_device *pdev)
goto exit_pm_disable;
}
- master->dev.of_node = pdev->dev.of_node;
- ret = devm_spi_register_master(&pdev->dev, master);
+ host->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret < 0) {
- dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ dev_err(&pdev->dev, "can not register to host err %d\n", ret);
goto exit_free_irq;
}
return ret;
@@ -1429,15 +1429,15 @@ exit_pm_disable:
tegra_spi_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_spi_deinit_dma_param(tspi, true);
-exit_free_master:
- spi_master_put(master);
+exit_free_host:
+ spi_controller_put(host);
return ret;
}
static void tegra_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
free_irq(tspi->irq, tspi);
@@ -1455,15 +1455,15 @@ static void tegra_spi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int tegra_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int tegra_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -1473,17 +1473,17 @@ static int tegra_spi_resume(struct device *dev)
}
tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
- tspi->last_used_cs = master->num_chipselect + 1;
+ tspi->last_used_cs = host->num_chipselect + 1;
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif
static int tegra_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
/* Flush all write which are in PPSB queue by reading back */
tegra_spi_readl(tspi, SPI_COMMAND1);
@@ -1494,8 +1494,8 @@ static int tegra_spi_runtime_suspend(struct device *dev)
static int tegra_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(tspi->clk);
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 0c5507473..9f6b9f89b 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -102,7 +102,7 @@
struct tegra_sflash_data {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
spinlock_t lock;
struct clk *clk;
@@ -251,7 +251,7 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, bool is_first_of_msg,
bool is_single_xfer)
{
- struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(spi->controller);
u32 speed;
u32 command;
@@ -303,12 +303,12 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
return tegra_sflash_start_cpu_based_transfer(tsd, t);
}
-static int tegra_sflash_transfer_one_message(struct spi_master *master,
+static int tegra_sflash_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
bool is_first_msg = true;
int single_xfer;
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
int ret;
@@ -351,7 +351,7 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master,
exit:
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
msg->status = ret;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return ret;
}
@@ -416,7 +416,7 @@ MODULE_DEVICE_TABLE(of, tegra_sflash_of_match);
static int tegra_sflash_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct tegra_sflash_data *tsd;
int ret;
const struct of_device_id *match;
@@ -427,37 +427,37 @@ static int tegra_sflash_probe(struct platform_device *pdev)
return -ENODEV;
}
- master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
- if (!master) {
- dev_err(&pdev->dev, "master allocation failed\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*tsd));
+ if (!host) {
+ dev_err(&pdev->dev, "host allocation failed\n");
return -ENOMEM;
}
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->transfer_one_message = tegra_sflash_transfer_one_message;
- master->auto_runtime_pm = true;
- master->num_chipselect = MAX_CHIP_SELECT;
-
- platform_set_drvdata(pdev, master);
- tsd = spi_master_get_devdata(master);
- tsd->master = master;
+ host->mode_bits = SPI_CPOL | SPI_CPHA;
+ host->transfer_one_message = tegra_sflash_transfer_one_message;
+ host->auto_runtime_pm = true;
+ host->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, host);
+ tsd = spi_controller_get_devdata(host);
+ tsd->host = host;
tsd->dev = &pdev->dev;
spin_lock_init(&tsd->lock);
if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
- &master->max_speed_hz))
- master->max_speed_hz = 25000000; /* 25MHz */
+ &host->max_speed_hz))
+ host->max_speed_hz = 25000000; /* 25MHz */
tsd->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tsd->base)) {
ret = PTR_ERR(tsd->base);
- goto exit_free_master;
+ goto exit_free_host;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto exit_free_master;
+ goto exit_free_host;
tsd->irq = ret;
ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
@@ -465,7 +465,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tsd->irq);
- goto exit_free_master;
+ goto exit_free_host;
}
tsd->clk = devm_clk_get(&pdev->dev, NULL);
@@ -505,10 +505,10 @@ static int tegra_sflash_probe(struct platform_device *pdev)
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
pm_runtime_put(&pdev->dev);
- master->dev.of_node = pdev->dev.of_node;
- ret = devm_spi_register_master(&pdev->dev, master);
+ host->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret < 0) {
- dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ dev_err(&pdev->dev, "can not register to host err %d\n", ret);
goto exit_pm_disable;
}
return ret;
@@ -519,15 +519,15 @@ exit_pm_disable:
tegra_sflash_runtime_suspend(&pdev->dev);
exit_free_irq:
free_irq(tsd->irq, tsd);
-exit_free_master:
- spi_master_put(master);
+exit_free_host:
+ spi_controller_put(host);
return ret;
}
static void tegra_sflash_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
free_irq(tsd->irq, tsd);
@@ -539,15 +539,15 @@ static void tegra_sflash_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int tegra_sflash_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int tegra_sflash_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -558,14 +558,14 @@ static int tegra_sflash_resume(struct device *dev)
tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND);
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif
static int tegra_sflash_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
/* Flush all write which are in PPSB queue by reading back */
tegra_sflash_readl(tsd, SPI_COMMAND);
@@ -576,8 +576,8 @@ static int tegra_sflash_runtime_suspend(struct device *dev)
static int tegra_sflash_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(tsd->clk);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index f5cd365c9..ed1393d15 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -152,7 +152,7 @@ struct tegra_slink_chip_data {
struct tegra_slink_data {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
const struct tegra_slink_chip_data *chip_data;
spinlock_t lock;
@@ -671,7 +671,7 @@ static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
static int tegra_slink_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(spi->controller);
u32 speed;
u8 bits_per_word;
unsigned total_fifo_words;
@@ -737,7 +737,7 @@ static int tegra_slink_setup(struct spi_device *spi)
SLINK_CS_POLARITY3,
};
- struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(spi->controller);
u32 val;
unsigned long flags;
int ret;
@@ -768,10 +768,10 @@ static int tegra_slink_setup(struct spi_device *spi)
return 0;
}
-static int tegra_slink_prepare_message(struct spi_master *master,
+static int tegra_slink_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
tegra_slink_clear_status(tspi);
@@ -794,11 +794,11 @@ static int tegra_slink_prepare_message(struct spi_master *master,
return 0;
}
-static int tegra_slink_transfer_one(struct spi_master *master,
+static int tegra_slink_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
int ret;
reinit_completion(&tspi->xfer_completion);
@@ -825,10 +825,10 @@ static int tegra_slink_transfer_one(struct spi_master *master,
return 0;
}
-static int tegra_slink_unprepare_message(struct spi_master *master,
+static int tegra_slink_unprepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
@@ -999,7 +999,7 @@ MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
static int tegra_slink_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct tegra_slink_data *tspi;
struct resource *r;
int ret, spi_irq;
@@ -1007,36 +1007,36 @@ static int tegra_slink_probe(struct platform_device *pdev)
cdata = of_device_get_match_data(&pdev->dev);
- master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
- if (!master) {
- dev_err(&pdev->dev, "master allocation failed\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*tspi));
+ if (!host) {
+ dev_err(&pdev->dev, "host allocation failed\n");
return -ENOMEM;
}
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->setup = tegra_slink_setup;
- master->prepare_message = tegra_slink_prepare_message;
- master->transfer_one = tegra_slink_transfer_one;
- master->unprepare_message = tegra_slink_unprepare_message;
- master->auto_runtime_pm = true;
- master->num_chipselect = MAX_CHIP_SELECT;
-
- platform_set_drvdata(pdev, master);
- tspi = spi_master_get_devdata(master);
- tspi->master = master;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->setup = tegra_slink_setup;
+ host->prepare_message = tegra_slink_prepare_message;
+ host->transfer_one = tegra_slink_transfer_one;
+ host->unprepare_message = tegra_slink_unprepare_message;
+ host->auto_runtime_pm = true;
+ host->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, host);
+ tspi = spi_controller_get_devdata(host);
+ tspi->host = host;
tspi->dev = &pdev->dev;
tspi->chip_data = cdata;
spin_lock_init(&tspi->lock);
if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
- &master->max_speed_hz))
- master->max_speed_hz = 25000000; /* 25MHz */
+ &host->max_speed_hz))
+ host->max_speed_hz = 25000000; /* 25MHz */
tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tspi->base)) {
ret = PTR_ERR(tspi->base);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->phys = r->start;
@@ -1045,26 +1045,26 @@ static int tegra_slink_probe(struct platform_device *pdev)
if (IS_ERR(tspi->clk)) {
ret = PTR_ERR(tspi->clk);
dev_err(&pdev->dev, "Can not get clock %d\n", ret);
- goto exit_free_master;
+ goto exit_free_host;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
- goto exit_free_master;
+ goto exit_free_host;
}
ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (ret)
- goto exit_free_master;
+ goto exit_free_host;
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
ret = tegra_slink_init_dma_param(tspi, true);
if (ret < 0)
- goto exit_free_master;
+ goto exit_free_host;
ret = tegra_slink_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
@@ -1103,10 +1103,10 @@ static int tegra_slink_probe(struct platform_device *pdev)
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
- master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ host->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_controller(host);
if (ret < 0) {
- dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ dev_err(&pdev->dev, "can not register to host err %d\n", ret);
goto exit_free_irq;
}
@@ -1124,17 +1124,17 @@ exit_pm_disable:
tegra_slink_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_slink_deinit_dma_param(tspi, true);
-exit_free_master:
- spi_master_put(master);
+exit_free_host:
+ spi_controller_put(host);
return ret;
}
static void tegra_slink_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev));
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
- spi_unregister_master(master);
+ spi_unregister_controller(host);
free_irq(tspi->irq, tspi);
@@ -1146,21 +1146,21 @@ static void tegra_slink_remove(struct platform_device *pdev)
if (tspi->rx_dma_chan)
tegra_slink_deinit_dma_param(tspi, true);
- spi_master_put(master);
+ spi_controller_put(host);
}
#ifdef CONFIG_PM_SLEEP
static int tegra_slink_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int tegra_slink_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -1172,14 +1172,14 @@ static int tegra_slink_resume(struct device *dev)
tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif
static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
/* Flush all write which are in PPSB queue by reading back */
tegra_slink_readl(tspi, SLINK_MAS_DATA);
@@ -1190,8 +1190,8 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(tspi->clk);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index e9ad9b0b5..afbd64a21 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -175,7 +175,7 @@ struct tegra_qspi_client_data {
struct tegra_qspi {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
/* lock to protect data accessed by irq */
spinlock_t lock;
@@ -809,7 +809,7 @@ err_out:
static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
bool is_first_of_msg)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
struct tegra_qspi_client_data *cdata = spi->controller_data;
u32 command1, command2, speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
@@ -870,7 +870,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran
static int tegra_qspi_start_transfer_one(struct spi_device *spi,
struct spi_transfer *t, u32 command1)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
unsigned int total_fifo_words;
u8 bus_width = 0;
int ret;
@@ -925,7 +925,7 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi,
static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_qspi_client_data *cdata;
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
@@ -941,7 +941,7 @@ static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_devic
static int tegra_qspi_setup(struct spi_device *spi)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
struct tegra_qspi_client_data *cdata = spi->controller_data;
unsigned long flags;
u32 val;
@@ -1005,7 +1005,7 @@ static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
static void tegra_qspi_transfer_end(struct spi_device *spi)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
if (cs_val)
@@ -1316,10 +1316,10 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
return true;
}
-static int tegra_qspi_transfer_one_message(struct spi_master *master,
+static int tegra_qspi_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
int ret;
if (tegra_qspi_validate_cmb_seq(tqspi, msg))
@@ -1327,7 +1327,7 @@ static int tegra_qspi_transfer_one_message(struct spi_master *master,
else
ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return ret;
}
@@ -1533,38 +1533,38 @@ MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
static int tegra_qspi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct tegra_qspi *tqspi;
struct resource *r;
int ret, qspi_irq;
int bus_num;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
- tqspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, host);
+ tqspi = spi_controller_get_devdata(host);
- master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
- SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
- master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
- master->flags = SPI_CONTROLLER_HALF_DUPLEX;
- master->setup = tegra_qspi_setup;
- master->transfer_one_message = tegra_qspi_transfer_one_message;
- master->num_chipselect = 1;
- master->auto_runtime_pm = true;
+ host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
+ SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+ host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->setup = tegra_qspi_setup;
+ host->transfer_one_message = tegra_qspi_transfer_one_message;
+ host->num_chipselect = 1;
+ host->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
if (bus_num >= 0)
- master->bus_num = bus_num;
+ host->bus_num = bus_num;
- tqspi->master = master;
+ tqspi->host = host;
tqspi->dev = &pdev->dev;
spin_lock_init(&tqspi->lock);
tqspi->soc_data = device_get_match_data(&pdev->dev);
- master->num_chipselect = tqspi->soc_data->cs_count;
+ host->num_chipselect = tqspi->soc_data->cs_count;
tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tqspi->base))
return PTR_ERR(tqspi->base);
@@ -1625,10 +1625,10 @@ static int tegra_qspi_probe(struct platform_device *pdev)
goto exit_pm_disable;
}
- master->dev.of_node = pdev->dev.of_node;
- ret = spi_register_master(master);
+ host->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_controller(host);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to register master: %d\n", ret);
+ dev_err(&pdev->dev, "failed to register host: %d\n", ret);
goto exit_free_irq;
}
@@ -1644,10 +1644,10 @@ exit_pm_disable:
static void tegra_qspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
- spi_unregister_master(master);
+ spi_unregister_controller(host);
free_irq(tqspi->irq, tqspi);
pm_runtime_force_suspend(&pdev->dev);
tegra_qspi_deinit_dma(tqspi);
@@ -1655,15 +1655,15 @@ static void tegra_qspi_remove(struct platform_device *pdev)
static int __maybe_unused tegra_qspi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int __maybe_unused tegra_qspi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -1676,13 +1676,13 @@ static int __maybe_unused tegra_qspi_resume(struct device *dev)
tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
/* Runtime pm disabled with ACPI */
if (has_acpi_companion(tqspi->dev))
@@ -1697,8 +1697,8 @@ static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct tegra_qspi *tqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
int ret;
/* Runtime pm disabled with ACPI */
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 4c81516b6..0fe6899e7 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -40,7 +40,7 @@ struct ti_qspi {
/* list synchronization */
struct mutex list_lock;
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base;
void __iomem *mmap_base;
size_t mmap_size;
@@ -137,20 +137,20 @@ static inline void ti_qspi_write(struct ti_qspi *qspi,
static int ti_qspi_setup(struct spi_device *spi)
{
- struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller);
int ret;
- if (spi->master->busy) {
- dev_dbg(qspi->dev, "master busy doing other transfers\n");
+ if (spi->controller->busy) {
+ dev_dbg(qspi->dev, "host busy doing other transfers\n");
return -EBUSY;
}
- if (!qspi->master->max_speed_hz) {
+ if (!qspi->host->max_speed_hz) {
dev_err(qspi->dev, "spi max frequency not defined\n");
return -EINVAL;
}
- spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz);
+ spi->max_speed_hz = min(spi->max_speed_hz, qspi->host->max_speed_hz);
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0) {
@@ -526,7 +526,7 @@ static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
static void ti_qspi_enable_memory_map(struct spi_device *spi)
{
- struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller);
ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base) {
@@ -540,7 +540,7 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
static void ti_qspi_disable_memory_map(struct spi_device *spi)
{
- struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller);
ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
if (qspi->ctrl_base)
@@ -554,7 +554,7 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
u8 data_nbits, u8 addr_width,
u8 dummy_bytes)
{
- struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(spi->controller);
u32 memval = opcode;
switch (data_nbits) {
@@ -576,7 +576,7 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
- struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->controller);
size_t max_len;
if (op->data.dir == SPI_MEM_DATA_IN) {
@@ -606,19 +606,19 @@ static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
static int ti_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->controller);
u32 from = 0;
int ret = 0;
/* Only optimize read path. */
if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
!op->addr.nbytes || op->addr.nbytes > 4)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* Address exceeds MMIO window size, fall back to regular mode. */
from = op->addr.val;
if (from + op->data.nbytes > qspi->mmap_size)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
mutex_lock(&qspi->list_lock);
@@ -633,10 +633,10 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem,
struct sg_table sgt;
if (virt_addr_valid(op->data.buf.in) &&
- !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
+ !spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
&sgt)) {
ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
- spi_controller_dma_unmap_mem_op_data(mem->spi->master,
+ spi_controller_dma_unmap_mem_op_data(mem->spi->controller,
op, &sgt);
} else {
ret = ti_qspi_dma_bounce_buffer(qspi, from,
@@ -658,10 +658,10 @@ static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
.adjust_op_size = ti_qspi_adjust_op_size,
};
-static int ti_qspi_start_transfer_one(struct spi_master *master,
+static int ti_qspi_start_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
- struct ti_qspi *qspi = spi_master_get_devdata(master);
+ struct ti_qspi *qspi = spi_controller_get_devdata(host);
struct spi_device *spi = m->spi;
struct spi_transfer *t;
int status = 0, ret;
@@ -720,7 +720,7 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
m->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return status;
}
@@ -756,33 +756,33 @@ MODULE_DEVICE_TABLE(of, ti_qspi_match);
static int ti_qspi_probe(struct platform_device *pdev)
{
struct ti_qspi *qspi;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource *r, *res_mmap;
struct device_node *np = pdev->dev.of_node;
u32 max_freq;
int ret = 0, num_cs, irq;
dma_cap_mask_t mask;
- master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*qspi));
+ if (!host)
return -ENOMEM;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
- master->flags = SPI_CONTROLLER_HALF_DUPLEX;
- master->setup = ti_qspi_setup;
- master->auto_runtime_pm = true;
- master->transfer_one_message = ti_qspi_start_transfer_one;
- master->dev.of_node = pdev->dev.of_node;
- master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
- SPI_BPW_MASK(8);
- master->mem_ops = &ti_qspi_mem_ops;
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->setup = ti_qspi_setup;
+ host->auto_runtime_pm = true;
+ host->transfer_one_message = ti_qspi_start_transfer_one;
+ host->dev.of_node = pdev->dev.of_node;
+ host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+ host->mem_ops = &ti_qspi_mem_ops;
if (!of_property_read_u32(np, "num-cs", &num_cs))
- master->num_chipselect = num_cs;
+ host->num_chipselect = num_cs;
- qspi = spi_master_get_devdata(master);
- qspi->master = master;
+ qspi = spi_controller_get_devdata(host);
+ qspi->host = host;
qspi->dev = &pdev->dev;
platform_set_drvdata(pdev, qspi);
@@ -792,7 +792,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (r == NULL) {
dev_err(&pdev->dev, "missing platform data\n");
ret = -ENODEV;
- goto free_master;
+ goto free_host;
}
}
@@ -812,7 +812,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto free_master;
+ goto free_host;
}
mutex_init(&qspi->list_lock);
@@ -820,7 +820,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
qspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(qspi->base)) {
ret = PTR_ERR(qspi->base);
- goto free_master;
+ goto free_host;
}
@@ -830,7 +830,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
"syscon-chipselects");
if (IS_ERR(qspi->ctrl_base)) {
ret = PTR_ERR(qspi->ctrl_base);
- goto free_master;
+ goto free_host;
}
ret = of_property_read_u32_index(np,
"syscon-chipselects",
@@ -838,7 +838,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"couldn't get ctrl_mod reg index\n");
- goto free_master;
+ goto free_host;
}
}
@@ -853,7 +853,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
- master->max_speed_hz = max_freq;
+ host->max_speed_hz = max_freq;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -876,7 +876,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
dma_release_channel(qspi->rx_chan);
goto no_dma;
}
- master->dma_rx = qspi->rx_chan;
+ host->dma_rx = qspi->rx_chan;
init_completion(&qspi->transfer_complete);
if (res_mmap)
qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
@@ -889,39 +889,40 @@ no_dma:
"mmap failed with error %ld using PIO mode\n",
PTR_ERR(qspi->mmap_base));
qspi->mmap_base = NULL;
- master->mem_ops = NULL;
+ host->mem_ops = NULL;
}
}
qspi->mmap_enabled = false;
qspi->current_cs = -1;
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (!ret)
return 0;
ti_qspi_dma_cleanup(qspi);
pm_runtime_disable(&pdev->dev);
-free_master:
- spi_master_put(master);
+free_host:
+ spi_controller_put(host);
return ret;
}
-static int ti_qspi_remove(struct platform_device *pdev)
+static void ti_qspi_remove(struct platform_device *pdev)
{
struct ti_qspi *qspi = platform_get_drvdata(pdev);
int rc;
- rc = spi_master_suspend(qspi->master);
- if (rc)
- return rc;
+ rc = spi_controller_suspend(qspi->host);
+ if (rc) {
+ dev_alert(&pdev->dev, "spi_controller_suspend() failed (%pe)\n",
+ ERR_PTR(rc));
+ return;
+ }
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
ti_qspi_dma_cleanup(qspi);
-
- return 0;
}
static const struct dev_pm_ops ti_qspi_pm_ops = {
@@ -930,7 +931,7 @@ static const struct dev_pm_ops ti_qspi_pm_ops = {
static struct platform_driver ti_qspi_driver = {
.probe = ti_qspi_probe,
- .remove = ti_qspi_remove,
+ .remove_new = ti_qspi_remove,
.driver = {
.name = "ti-qspi",
.pm = &ti_qspi_pm_ops,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index af5846cfe..271f3e7f8 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -124,7 +124,7 @@ struct pch_spi_dma_ctrl {
* struct pch_spi_data - Holds the SPI channel specific details
* @io_remap_addr: The remapped PCI base address
* @io_base_addr: Base address
- * @master: Pointer to the SPI master structure
+ * @host: Pointer to the SPI controller structure
* @work: Reference to work queue handler
* @wait: Wait queue for waking up upon receiving an
* interrupt.
@@ -161,7 +161,7 @@ struct pch_spi_dma_ctrl {
struct pch_spi_data {
void __iomem *io_remap_addr;
unsigned long io_base_addr;
- struct spi_master *master;
+ struct spi_controller *host;
struct work_struct work;
wait_queue_head_t wait;
u8 transfer_complete;
@@ -216,48 +216,48 @@ static const struct pci_device_id pch_spi_pcidev_id[] = {
/**
* pch_spi_writereg() - Performs register writes
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
* @idx: Register offset.
* @val: Value to be written to register.
*/
-static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
+static inline void pch_spi_writereg(struct spi_controller *host, int idx, u32 val)
{
- struct pch_spi_data *data = spi_master_get_devdata(master);
+ struct pch_spi_data *data = spi_controller_get_devdata(host);
iowrite32(val, (data->io_remap_addr + idx));
}
/**
* pch_spi_readreg() - Performs register reads
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
* @idx: Register offset.
*/
-static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
+static inline u32 pch_spi_readreg(struct spi_controller *host, int idx)
{
- struct pch_spi_data *data = spi_master_get_devdata(master);
+ struct pch_spi_data *data = spi_controller_get_devdata(host);
return ioread32(data->io_remap_addr + idx);
}
-static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
+static inline void pch_spi_setclr_reg(struct spi_controller *host, int idx,
u32 set, u32 clr)
{
- u32 tmp = pch_spi_readreg(master, idx);
+ u32 tmp = pch_spi_readreg(host, idx);
tmp = (tmp & ~clr) | set;
- pch_spi_writereg(master, idx, tmp);
+ pch_spi_writereg(host, idx, tmp);
}
-static void pch_spi_set_master_mode(struct spi_master *master)
+static void pch_spi_set_host_mode(struct spi_controller *host)
{
- pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
+ pch_spi_setclr_reg(host, PCH_SPCR, SPCR_MSTR_BIT, 0);
}
/**
* pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
*/
-static void pch_spi_clear_fifo(struct spi_master *master)
+static void pch_spi_clear_fifo(struct spi_controller *host)
{
- pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
- pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
+ pch_spi_setclr_reg(host, PCH_SPCR, SPCR_FICLR_BIT, 0);
+ pch_spi_setclr_reg(host, PCH_SPCR, 0, SPCR_FICLR_BIT);
}
static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
@@ -312,7 +312,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
if (reg_spsr_val & SPSR_FI_BIT) {
if ((tx_index == bpw_len) && (rx_index == tx_index)) {
/* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0,
PCH_ALL);
/* transfer is completed;
@@ -321,7 +321,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
data->transfer_active = false;
wake_up(&data->wait);
} else {
- dev_vdbg(&data->master->dev,
+ dev_vdbg(&data->host->dev,
"%s : Transfer is not completed",
__func__);
}
@@ -383,10 +383,10 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
/**
* pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
* @speed_hz: Baud rate.
*/
-static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
+static void pch_spi_set_baud_rate(struct spi_controller *host, u32 speed_hz)
{
u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
@@ -394,21 +394,21 @@ static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
if (n_spbr > PCH_MAX_SPBR)
n_spbr = PCH_MAX_SPBR;
- pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
+ pch_spi_setclr_reg(host, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
}
/**
* pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
* @bits_per_word: Bits per word for SPI transfer.
*/
-static void pch_spi_set_bits_per_word(struct spi_master *master,
+static void pch_spi_set_bits_per_word(struct spi_controller *host,
u8 bits_per_word)
{
if (bits_per_word == 8)
- pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
+ pch_spi_setclr_reg(host, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
else
- pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
+ pch_spi_setclr_reg(host, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
}
/**
@@ -420,12 +420,12 @@ static void pch_spi_setup_transfer(struct spi_device *spi)
u32 flags = 0;
dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
- __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
+ __func__, pch_spi_readreg(spi->controller, PCH_SPBRR),
spi->max_speed_hz);
- pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
+ pch_spi_set_baud_rate(spi->controller, spi->max_speed_hz);
/* set bits per word */
- pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
+ pch_spi_set_bits_per_word(spi->controller, spi->bits_per_word);
if (!(spi->mode & SPI_LSB_FIRST))
flags |= SPCR_LSBF_BIT;
@@ -433,29 +433,29 @@ static void pch_spi_setup_transfer(struct spi_device *spi)
flags |= SPCR_CPOL_BIT;
if (spi->mode & SPI_CPHA)
flags |= SPCR_CPHA_BIT;
- pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
+ pch_spi_setclr_reg(spi->controller, PCH_SPCR, flags,
(SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
/* Clear the FIFO by toggling FICLR to 1 and back to 0 */
- pch_spi_clear_fifo(spi->master);
+ pch_spi_clear_fifo(spi->controller);
}
/**
* pch_spi_reset() - Clears SPI registers
- * @master: Pointer to struct spi_master.
+ * @host: Pointer to struct spi_controller.
*/
-static void pch_spi_reset(struct spi_master *master)
+static void pch_spi_reset(struct spi_controller *host)
{
/* write 1 to reset SPI */
- pch_spi_writereg(master, PCH_SRST, 0x1);
+ pch_spi_writereg(host, PCH_SRST, 0x1);
/* clear reset */
- pch_spi_writereg(master, PCH_SRST, 0x0);
+ pch_spi_writereg(host, PCH_SRST, 0x0);
}
static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
{
- struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
+ struct pch_spi_data *data = spi_controller_get_devdata(pspi->controller);
int retval;
unsigned long flags;
@@ -524,15 +524,15 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
- dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
- pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ dev_dbg(&data->host->dev, "%s:setting baud rate\n", __func__);
+ pch_spi_set_baud_rate(data->host, data->cur_trans->speed_hz);
}
/* set bits per word if needed */
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
- dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
- pch_spi_set_bits_per_word(data->master,
+ dev_dbg(&data->host->dev, "%s:set bits per word\n", __func__);
+ pch_spi_set_bits_per_word(data->host,
data->cur_trans->bits_per_word);
*bpw = data->cur_trans->bits_per_word;
} else {
@@ -590,13 +590,13 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
if (n_writes > PCH_MAX_FIFO_DEPTH)
n_writes = PCH_MAX_FIFO_DEPTH;
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
__func__);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+ pch_spi_writereg(data->host, PCH_SSNXCR, SSN_LOW);
for (j = 0; j < n_writes; j++)
- pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
+ pch_spi_writereg(data->host, PCH_SPDWR, data->pkt_tx_buff[j]);
/* update tx_index */
data->tx_index = j;
@@ -609,13 +609,13 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
static void pch_spi_nomore_transfer(struct pch_spi_data *data)
{
struct spi_message *pmsg, *tmp;
- dev_dbg(&data->master->dev, "%s called\n", __func__);
+ dev_dbg(&data->host->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
data->current_msg->status = 0;
if (data->current_msg->complete) {
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:Invoking callback of SPI core\n", __func__);
data->current_msg->complete(data->current_msg->context);
}
@@ -623,7 +623,7 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data)
/* update status in global variable */
data->bcurrent_msg_processing = false;
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:data->bcurrent_msg_processing = false\n", __func__);
data->current_msg = NULL;
@@ -638,11 +638,11 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data)
* bpw;sfer requests in the current message or there are
*more messages)
*/
- dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
+ dev_dbg(&data->host->dev, "%s:Invoke queue_work\n", __func__);
schedule_work(&data->work);
} else if (data->board_dat->suspend_sts ||
data->status == STATUS_EXITING) {
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s suspend/remove initiated, flushing queue\n",
__func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
@@ -662,14 +662,14 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
/* enable interrupts, set threshold, enable SPI */
if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
/* set receive threshold to PCH_RX_THOLD */
- pch_spi_setclr_reg(data->master, PCH_SPCR,
+ pch_spi_setclr_reg(data->host, PCH_SPCR,
PCH_RX_THOLD << SPCR_RFIC_FIELD |
SPCR_FIE_BIT | SPCR_RFIE_BIT |
SPCR_ORIE_BIT | SPCR_SPE_BIT,
MASK_RFIC_SPCR_BITS | PCH_ALL);
else
/* set receive threshold to maximum */
- pch_spi_setclr_reg(data->master, PCH_SPCR,
+ pch_spi_setclr_reg(data->host, PCH_SPCR,
PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
SPCR_FIE_BIT | SPCR_ORIE_BIT |
SPCR_SPE_BIT,
@@ -677,18 +677,18 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:waiting for transfer to get over\n", __func__);
wait_event_interruptible(data->wait, data->transfer_complete);
/* clear all interrupts */
- pch_spi_writereg(data->master, PCH_SPSR,
- pch_spi_readreg(data->master, PCH_SPSR));
+ pch_spi_writereg(data->host, PCH_SPSR,
+ pch_spi_readreg(data->host, PCH_SPSR));
/* Disable interrupts and SPI transfer */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
/* clear FIFO */
- pch_spi_clear_fifo(data->master);
+ pch_spi_clear_fifo(data->host);
}
static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
@@ -750,25 +750,25 @@ static int pch_spi_start_transfer(struct pch_spi_data *data)
spin_lock_irqsave(&data->lock, flags);
/* disable interrupts, SPI set enable */
- pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
+ pch_spi_setclr_reg(data->host, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
spin_unlock_irqrestore(&data->lock, flags);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:waiting for transfer to get over\n", __func__);
rtn = wait_event_interruptible_timeout(data->wait,
data->transfer_complete,
msecs_to_jiffies(2 * HZ));
if (!rtn)
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"%s wait-event timeout\n", __func__);
- dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
+ dma_sync_sg_for_cpu(&data->host->dev, dma->sg_rx_p, dma->nent,
DMA_FROM_DEVICE);
- dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
+ dma_sync_sg_for_cpu(&data->host->dev, dma->sg_tx_p, dma->nent,
DMA_FROM_DEVICE);
memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
@@ -780,14 +780,14 @@ static int pch_spi_start_transfer(struct pch_spi_data *data)
spin_lock_irqsave(&data->lock, flags);
/* clear fifo threshold, disable interrupts, disable SPI transfer */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0,
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
SPCR_SPE_BIT);
/* clear all interrupts */
- pch_spi_writereg(data->master, PCH_SPSR,
- pch_spi_readreg(data->master, PCH_SPSR));
+ pch_spi_writereg(data->host, PCH_SPSR,
+ pch_spi_readreg(data->host, PCH_SPSR));
/* clear FIFO */
- pch_spi_clear_fifo(data->master);
+ pch_spi_clear_fifo(data->host);
spin_unlock_irqrestore(&data->lock, flags);
@@ -846,7 +846,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
if (!chan) {
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"ERROR: dma_request_channel FAILS(Tx)\n");
goto out;
}
@@ -860,7 +860,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
if (!chan) {
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"ERROR: dma_request_channel FAILS(Rx)\n");
dma_release_channel(dma->chan_tx);
dma->chan_tx = NULL;
@@ -913,9 +913,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
- dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+ dev_dbg(&data->host->dev, "%s:setting baud rate\n", __func__);
spin_lock_irqsave(&data->lock, flags);
- pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ pch_spi_set_baud_rate(data->host, data->cur_trans->speed_hz);
spin_unlock_irqrestore(&data->lock, flags);
}
@@ -923,9 +923,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
if (data->cur_trans->bits_per_word &&
(data->current_msg->spi->bits_per_word !=
data->cur_trans->bits_per_word)) {
- dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+ dev_dbg(&data->host->dev, "%s:set bits per word\n", __func__);
spin_lock_irqsave(&data->lock, flags);
- pch_spi_set_bits_per_word(data->master,
+ pch_spi_set_bits_per_word(data->host,
data->cur_trans->bits_per_word);
spin_unlock_irqrestore(&data->lock, flags);
*bpw = data->cur_trans->bits_per_word;
@@ -969,12 +969,12 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
size = data->bpw_len;
rem = data->bpw_len;
}
- dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
+ dev_dbg(&data->host->dev, "%s num=%d size=%d rem=%d\n",
__func__, num, size, rem);
spin_lock_irqsave(&data->lock, flags);
/* set receive fifo threshold and transmit fifo threshold */
- pch_spi_setclr_reg(data->master, PCH_SPCR,
+ pch_spi_setclr_reg(data->host, PCH_SPCR,
((size - 1) << SPCR_RFIC_FIELD) |
(PCH_TX_THOLD << SPCR_TFIC_FIELD),
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
@@ -1016,11 +1016,11 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
num, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"%s:dmaengine_prep_slave_sg Failed\n", __func__);
return;
}
- dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
+ dma_sync_sg_for_device(&data->host->dev, sg, num, DMA_FROM_DEVICE);
desc_rx->callback = pch_dma_rx_complete;
desc_rx->callback_param = data;
dma->nent = num;
@@ -1078,20 +1078,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
sg, num, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
- dev_err(&data->master->dev,
+ dev_err(&data->host->dev,
"%s:dmaengine_prep_slave_sg Failed\n", __func__);
return;
}
- dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
+ dma_sync_sg_for_device(&data->host->dev, sg, num, DMA_TO_DEVICE);
desc_tx->callback = NULL;
desc_tx->callback_param = data;
dma->nent = num;
dma->desc_tx = desc_tx;
- dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
+ dev_dbg(&data->host->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
spin_lock_irqsave(&data->lock, flags);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+ pch_spi_writereg(data->host, PCH_SSNXCR, SSN_LOW);
desc_rx->tx_submit(desc_rx);
desc_tx->tx_submit(desc_tx);
spin_unlock_irqrestore(&data->lock, flags);
@@ -1107,12 +1107,12 @@ static void pch_spi_process_messages(struct work_struct *pwork)
int bpw;
data = container_of(pwork, struct pch_spi_data, work);
- dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
+ dev_dbg(&data->host->dev, "%s data initialized\n", __func__);
spin_lock(&data->lock);
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s suspend/remove initiated, flushing queue\n", __func__);
list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
@@ -1132,7 +1132,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
}
data->bcurrent_msg_processing = true;
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s Set data->bcurrent_msg_processing= true\n", __func__);
/* Get the message from the queue and delete it from there. */
@@ -1150,7 +1150,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->use_dma)
pch_spi_request_dma(data,
data->current_msg->spi->bits_per_word);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
+ pch_spi_writereg(data->host, PCH_SSNXCR, SSN_NO_CONTROL);
do {
int cnt;
/* If we are already processing a message get the next
@@ -1161,14 +1161,14 @@ static void pch_spi_process_messages(struct work_struct *pwork)
data->cur_trans =
list_entry(data->current_msg->transfers.next,
struct spi_transfer, transfer_list);
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s :Getting 1st transfer message\n",
__func__);
} else {
data->cur_trans =
list_entry(data->cur_trans->transfer_list.next,
struct spi_transfer, transfer_list);
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s :Getting next transfer message\n",
__func__);
}
@@ -1210,7 +1210,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
data->cur_trans->len = data->save_total_len;
data->current_msg->actual_length += data->cur_trans->len;
- dev_dbg(&data->master->dev,
+ dev_dbg(&data->host->dev,
"%s:data->current_msg->actual_length=%d\n",
__func__, data->current_msg->actual_length);
@@ -1229,7 +1229,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
} while (data->cur_trans != NULL);
out:
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
+ pch_spi_writereg(data->host, PCH_SSNXCR, SSN_HIGH);
if (data->use_dma)
pch_spi_release_dma(data);
}
@@ -1248,7 +1248,7 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* reset PCH SPI h/w */
- pch_spi_reset(data->master);
+ pch_spi_reset(data->host);
dev_dbg(&board_dat->pdev->dev,
"%s pch_spi_reset invoked successfully\n", __func__);
@@ -1297,22 +1297,22 @@ static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
static int pch_spi_pd_probe(struct platform_device *plat_dev)
{
int ret;
- struct spi_master *master;
+ struct spi_controller *host;
struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
struct pch_spi_data *data;
dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
- master = spi_alloc_master(&board_dat->pdev->dev,
+ host = spi_alloc_host(&board_dat->pdev->dev,
sizeof(struct pch_spi_data));
- if (!master) {
- dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
+ if (!host) {
+ dev_err(&plat_dev->dev, "spi_alloc_host[%d] failed.\n",
plat_dev->id);
return -ENOMEM;
}
- data = spi_master_get_devdata(master);
- data->master = master;
+ data = spi_controller_get_devdata(host);
+ data->host = host;
platform_set_drvdata(plat_dev, data);
@@ -1330,13 +1330,13 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
plat_dev->id, data->io_remap_addr);
- /* initialize members of SPI master */
- master->num_chipselect = PCH_MAX_CS;
- master->transfer = pch_spi_transfer;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- master->max_speed_hz = PCH_MAX_BAUDRATE;
- master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ /* initialize members of SPI host */
+ host->num_chipselect = PCH_MAX_CS;
+ host->transfer = pch_spi_transfer;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ host->max_speed_hz = PCH_MAX_BAUDRATE;
+ host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
data->board_dat = board_dat;
data->plat_dev = plat_dev;
@@ -1365,25 +1365,25 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
}
data->irq_reg_sts = true;
- pch_spi_set_master_mode(master);
+ pch_spi_set_host_mode(host);
if (use_dma) {
dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
ret = pch_alloc_dma_buf(board_dat, data);
if (ret)
- goto err_spi_register_master;
+ goto err_spi_register_controller;
}
- ret = spi_register_master(master);
+ ret = spi_register_controller(host);
if (ret != 0) {
dev_err(&plat_dev->dev,
- "%s spi_register_master FAILED\n", __func__);
- goto err_spi_register_master;
+ "%s spi_register_controller FAILED\n", __func__);
+ goto err_spi_register_controller;
}
return 0;
-err_spi_register_master:
+err_spi_register_controller:
pch_free_dma_buf(board_dat, data);
free_irq(board_dat->pdev->irq, data);
err_request_irq:
@@ -1391,7 +1391,7 @@ err_request_irq:
err_spi_get_resources:
pci_iounmap(board_dat->pdev, data->io_remap_addr);
err_pci_iomap:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
@@ -1427,13 +1427,13 @@ static void pch_spi_pd_remove(struct platform_device *plat_dev)
/* disable interrupts & free IRQ */
if (data->irq_reg_sts) {
/* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL);
data->irq_reg_sts = false;
free_irq(board_dat->pdev->irq, data);
}
pci_iounmap(board_dat->pdev, data->io_remap_addr);
- spi_unregister_master(data->master);
+ spi_unregister_controller(data->host);
}
#ifdef CONFIG_PM
static int pch_spi_pd_suspend(struct platform_device *pd_dev,
@@ -1463,8 +1463,8 @@ static int pch_spi_pd_suspend(struct platform_device *pd_dev,
/* Free IRQ */
if (data->irq_reg_sts) {
/* disable all interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
- pch_spi_reset(data->master);
+ pch_spi_setclr_reg(data->host, PCH_SPCR, 0, PCH_ALL);
+ pch_spi_reset(data->host);
free_irq(board_dat->pdev->irq, data);
data->irq_reg_sts = false;
@@ -1498,8 +1498,8 @@ static int pch_spi_pd_resume(struct platform_device *pd_dev)
}
/* reset PCH SPI h/w */
- pch_spi_reset(data->master);
- pch_spi_set_master_mode(data->master);
+ pch_spi_reset(data->host);
+ pch_spi_set_host_mode(data->host);
data->irq_reg_sts = true;
}
return 0;
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index f5344527a..4a18cf896 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -26,7 +26,7 @@ struct uniphier_spi_priv {
void __iomem *base;
dma_addr_t base_dma_addr;
struct clk *clk;
- struct spi_master *master;
+ struct spi_controller *host;
struct completion xfer_done;
int error;
@@ -127,7 +127,7 @@ static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
static void uniphier_spi_set_mode(struct spi_device *spi)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val1, val2;
/*
@@ -180,7 +180,7 @@ static void uniphier_spi_set_mode(struct spi_device *spi)
static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val;
val = readl(priv->base + SSI_TXWDS);
@@ -198,7 +198,7 @@ static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
static void uniphier_spi_set_baudrate(struct spi_device *spi,
unsigned int speed)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val, ckdiv;
/*
@@ -217,7 +217,7 @@ static void uniphier_spi_set_baudrate(struct spi_device *spi,
static void uniphier_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val;
priv->error = 0;
@@ -333,7 +333,7 @@ static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(spi->controller);
u32 val;
val = readl(priv->base + SSI_FPS);
@@ -346,16 +346,16 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
writel(val, priv->base + SSI_FPS);
}
-static bool uniphier_spi_can_dma(struct spi_master *master,
+static bool uniphier_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
unsigned int bpw = bytes_per_word(priv->bits_per_word);
- if ((!master->dma_tx && !master->dma_rx)
- || (!master->dma_tx && t->tx_buf)
- || (!master->dma_rx && t->rx_buf))
+ if ((!host->dma_tx && !host->dma_rx)
+ || (!host->dma_tx && t->tx_buf)
+ || (!host->dma_rx && t->rx_buf))
return false;
return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
@@ -363,33 +363,33 @@ static bool uniphier_spi_can_dma(struct spi_master *master,
static void uniphier_spi_dma_rxcb(void *data)
{
- struct spi_master *master = data;
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct spi_controller *host = data;
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
if (!(state & SSI_DMA_TX_BUSY))
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
}
static void uniphier_spi_dma_txcb(void *data)
{
- struct spi_master *master = data;
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct spi_controller *host = data;
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
if (!(state & SSI_DMA_RX_BUSY))
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
}
-static int uniphier_spi_transfer_one_dma(struct spi_master *master,
+static int uniphier_spi_transfer_one_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
int buswidth;
@@ -412,23 +412,23 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master,
.src_maxburst = SSI_FIFO_BURST_NUM,
};
- dmaengine_slave_config(master->dma_rx, &rxconf);
+ dmaengine_slave_config(host->dma_rx, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
- master->dma_rx,
+ host->dma_rx,
t->rx_sg.sgl, t->rx_sg.nents,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto out_err_prep;
rxdesc->callback = uniphier_spi_dma_rxcb;
- rxdesc->callback_param = master;
+ rxdesc->callback_param = host;
uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
dmaengine_submit(rxdesc);
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(host->dma_rx);
}
if (priv->tx_buf) {
@@ -439,23 +439,23 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master,
.dst_maxburst = SSI_FIFO_BURST_NUM,
};
- dmaengine_slave_config(master->dma_tx, &txconf);
+ dmaengine_slave_config(host->dma_tx, &txconf);
txdesc = dmaengine_prep_slave_sg(
- master->dma_tx,
+ host->dma_tx,
t->tx_sg.sgl, t->tx_sg.nents,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto out_err_prep;
txdesc->callback = uniphier_spi_dma_txcb;
- txdesc->callback_param = master;
+ txdesc->callback_param = host;
uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
dmaengine_submit(txdesc);
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(host->dma_tx);
}
/* signal that we need to wait for completion */
@@ -463,17 +463,17 @@ static int uniphier_spi_transfer_one_dma(struct spi_master *master,
out_err_prep:
if (rxdesc)
- dmaengine_terminate_sync(master->dma_rx);
+ dmaengine_terminate_sync(host->dma_rx);
return -EINVAL;
}
-static int uniphier_spi_transfer_one_irq(struct spi_master *master,
+static int uniphier_spi_transfer_one_irq(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
- struct device *dev = master->dev.parent;
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
+ struct device *dev = host->dev.parent;
unsigned long time_left;
reinit_completion(&priv->xfer_done);
@@ -495,11 +495,11 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master,
return priv->error;
}
-static int uniphier_spi_transfer_one_poll(struct spi_master *master,
+static int uniphier_spi_transfer_one_poll(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
int loop = SSI_POLL_TIMEOUT_US * 10;
while (priv->tx_bytes) {
@@ -520,14 +520,14 @@ static int uniphier_spi_transfer_one_poll(struct spi_master *master,
return 0;
irq_transfer:
- return uniphier_spi_transfer_one_irq(master, spi, t);
+ return uniphier_spi_transfer_one_irq(host, spi, t);
}
-static int uniphier_spi_transfer_one(struct spi_master *master,
+static int uniphier_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
unsigned long threshold;
bool use_dma;
@@ -537,9 +537,9 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
uniphier_spi_setup_transfer(spi, t);
- use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
+ use_dma = host->can_dma ? host->can_dma(host, spi, t) : false;
if (use_dma)
- return uniphier_spi_transfer_one_dma(master, spi, t);
+ return uniphier_spi_transfer_one_dma(host, spi, t);
/*
* If the transfer operation will take longer than
@@ -548,33 +548,33 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
USEC_PER_SEC * BITS_PER_BYTE);
if (t->len > threshold)
- return uniphier_spi_transfer_one_irq(master, spi, t);
+ return uniphier_spi_transfer_one_irq(host, spi, t);
else
- return uniphier_spi_transfer_one_poll(master, spi, t);
+ return uniphier_spi_transfer_one_poll(host, spi, t);
}
-static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
+static int uniphier_spi_prepare_transfer_hardware(struct spi_controller *host)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
writel(SSI_CTL_EN, priv->base + SSI_CTL);
return 0;
}
-static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
+static int uniphier_spi_unprepare_transfer_hardware(struct spi_controller *host)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
writel(0, priv->base + SSI_CTL);
return 0;
}
-static void uniphier_spi_handle_err(struct spi_master *master,
+static void uniphier_spi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
u32 val;
/* stop running spi transfer */
@@ -587,12 +587,12 @@ static void uniphier_spi_handle_err(struct spi_master *master,
uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
- dmaengine_terminate_async(master->dma_tx);
+ dmaengine_terminate_async(host->dma_tx);
atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
}
if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
- dmaengine_terminate_async(master->dma_rx);
+ dmaengine_terminate_async(host->dma_rx);
atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
}
}
@@ -641,7 +641,7 @@ done:
static int uniphier_spi_probe(struct platform_device *pdev)
{
struct uniphier_spi_priv *priv;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource *res;
struct dma_slave_caps caps;
u32 dma_tx_burst = 0, dma_rx_burst = 0;
@@ -649,20 +649,20 @@ static int uniphier_spi_probe(struct platform_device *pdev)
int irq;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(*priv));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*priv));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- priv = spi_master_get_devdata(master);
- priv->master = master;
+ priv = spi_controller_get_devdata(host);
+ priv->host = host;
priv->is_save_param = false;
priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
- goto out_master_put;
+ goto out_host_put;
}
priv->base_dma_addr = res->start;
@@ -670,12 +670,12 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(priv->clk);
- goto out_master_put;
+ goto out_host_put;
}
ret = clk_prepare_enable(priv->clk);
if (ret)
- goto out_master_put;
+ goto out_host_put;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -694,35 +694,35 @@ static int uniphier_spi_probe(struct platform_device *pdev)
clk_rate = clk_get_rate(priv->clk);
- master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
- master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ host->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
+ host->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = pdev->id;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->set_cs = uniphier_spi_set_cs;
- master->transfer_one = uniphier_spi_transfer_one;
- master->prepare_transfer_hardware
+ host->set_cs = uniphier_spi_set_cs;
+ host->transfer_one = uniphier_spi_transfer_one;
+ host->prepare_transfer_hardware
= uniphier_spi_prepare_transfer_hardware;
- master->unprepare_transfer_hardware
+ host->unprepare_transfer_hardware
= uniphier_spi_unprepare_transfer_hardware;
- master->handle_err = uniphier_spi_handle_err;
- master->can_dma = uniphier_spi_can_dma;
+ host->handle_err = uniphier_spi_handle_err;
+ host->can_dma = uniphier_spi_can_dma;
- master->num_chipselect = 1;
- master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ host->num_chipselect = 1;
+ host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
- master->dma_tx = dma_request_chan(&pdev->dev, "tx");
- if (IS_ERR_OR_NULL(master->dma_tx)) {
- if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
+ host->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR_OR_NULL(host->dma_tx)) {
+ if (PTR_ERR(host->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_disable_clk;
}
- master->dma_tx = NULL;
+ host->dma_tx = NULL;
dma_tx_burst = INT_MAX;
} else {
- ret = dma_get_slave_caps(master->dma_tx, &caps);
+ ret = dma_get_slave_caps(host->dma_tx, &caps);
if (ret) {
dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
ret);
@@ -731,16 +731,16 @@ static int uniphier_spi_probe(struct platform_device *pdev)
dma_tx_burst = caps.max_burst;
}
- master->dma_rx = dma_request_chan(&pdev->dev, "rx");
- if (IS_ERR_OR_NULL(master->dma_rx)) {
- if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
+ host->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR_OR_NULL(host->dma_rx)) {
+ if (PTR_ERR(host->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_release_dma;
}
- master->dma_rx = NULL;
+ host->dma_rx = NULL;
dma_rx_burst = INT_MAX;
} else {
- ret = dma_get_slave_caps(master->dma_rx, &caps);
+ ret = dma_get_slave_caps(host->dma_rx, &caps);
if (ret) {
dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
ret);
@@ -749,41 +749,41 @@ static int uniphier_spi_probe(struct platform_device *pdev)
dma_rx_burst = caps.max_burst;
}
- master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
+ host->max_dma_len = min(dma_tx_burst, dma_rx_burst);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
goto out_release_dma;
return 0;
out_release_dma:
- if (!IS_ERR_OR_NULL(master->dma_rx)) {
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (!IS_ERR_OR_NULL(host->dma_rx)) {
+ dma_release_channel(host->dma_rx);
+ host->dma_rx = NULL;
}
- if (!IS_ERR_OR_NULL(master->dma_tx)) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (!IS_ERR_OR_NULL(host->dma_tx)) {
+ dma_release_channel(host->dma_tx);
+ host->dma_tx = NULL;
}
out_disable_clk:
clk_disable_unprepare(priv->clk);
-out_master_put:
- spi_master_put(master);
+out_host_put:
+ spi_controller_put(host);
return ret;
}
static void uniphier_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct uniphier_spi_priv *priv = spi_controller_get_devdata(host);
- if (master->dma_tx)
- dma_release_channel(master->dma_tx);
- if (master->dma_rx)
- dma_release_channel(master->dma_rx);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c
index 852ffe013..6b16a22cc 100644
--- a/drivers/spi/spi-wpcm-fiu.c
+++ b/drivers/spi/spi-wpcm-fiu.c
@@ -361,7 +361,7 @@ static int wpcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
wpcm_fiu_stall_host(fiu, false);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int wpcm_fiu_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
@@ -441,7 +441,7 @@ static int wpcm_fiu_probe(struct platform_device *pdev)
struct wpcm_fiu_spi *fiu;
struct resource *res;
- ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
+ ctrl = devm_spi_alloc_host(dev, sizeof(*fiu));
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index a3d57554f..63354dd31 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -132,10 +132,10 @@ static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm,
return t->len;
}
-static int spi_xcomm_transfer_one(struct spi_master *master,
+static int spi_xcomm_transfer_one(struct spi_controller *host,
struct spi_message *msg)
{
- struct spi_xcomm *spi_xcomm = spi_master_get_devdata(master);
+ struct spi_xcomm *spi_xcomm = spi_controller_get_devdata(host);
unsigned int settings = spi_xcomm->settings;
struct spi_device *spi = msg->spi;
unsigned cs_change = 0;
@@ -197,7 +197,7 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
spi_xcomm_chipselect(spi_xcomm, spi, false);
msg->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return status;
}
@@ -205,27 +205,27 @@ static int spi_xcomm_transfer_one(struct spi_master *master,
static int spi_xcomm_probe(struct i2c_client *i2c)
{
struct spi_xcomm *spi_xcomm;
- struct spi_master *master;
+ struct spi_controller *host;
int ret;
- master = spi_alloc_master(&i2c->dev, sizeof(*spi_xcomm));
- if (!master)
+ host = spi_alloc_host(&i2c->dev, sizeof(*spi_xcomm));
+ if (!host)
return -ENOMEM;
- spi_xcomm = spi_master_get_devdata(master);
+ spi_xcomm = spi_controller_get_devdata(host);
spi_xcomm->i2c = i2c;
- master->num_chipselect = 16;
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->flags = SPI_CONTROLLER_HALF_DUPLEX;
- master->transfer_one_message = spi_xcomm_transfer_one;
- master->dev.of_node = i2c->dev.of_node;
- i2c_set_clientdata(i2c, master);
+ host->num_chipselect = 16;
+ host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->transfer_one_message = spi_xcomm_transfer_one;
+ host->dev.of_node = i2c->dev.of_node;
+ i2c_set_clientdata(i2c, host);
- ret = devm_spi_register_master(&i2c->dev, master);
+ ret = devm_spi_register_controller(&i2c->dev, host);
if (ret < 0)
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 8e6e3876a..12355957b 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Xilinx SPI controller driver (master mode only)
+ * Xilinx SPI controller driver (host mode only)
*
* Author: MontaVista Software, Inc.
* source@mvista.com
@@ -83,7 +83,7 @@ struct xilinx_spi {
void __iomem *regs; /* virt. address of the control registers */
int irq;
- bool force_irq; /* force irq to setup master inhibit */
+ bool force_irq; /* force irq to setup host inhibit */
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
u8 bytes_per_word;
@@ -174,10 +174,10 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
regs_base + XIPIF_V123B_IIER_OFFSET);
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
- /* Deselect the slave on the SPI bus */
+ /* Deselect the Target on the SPI bus */
xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
- /* Disable the transmitter, enable Manual Slave Select Assertion,
- * put SPI controller into master mode, and enable it */
+ /* Disable the transmitter, enable Manual Target Select Assertion,
+ * put SPI controller into host mode, and enable it */
xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
regs_base + XSPI_CR_OFFSET);
@@ -185,12 +185,12 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller);
u16 cr;
u32 cs;
if (is_on == BITBANG_CS_INACTIVE) {
- /* Deselect the slave on the SPI bus */
+ /* Deselect the target on the SPI bus */
xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
return;
}
@@ -225,7 +225,7 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller);
if (spi->mode & SPI_CS_HIGH)
xspi->cs_inactive &= ~BIT(spi_get_chipselect(spi, 0));
@@ -237,7 +237,7 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xilinx_spi *xspi = spi_controller_get_devdata(spi->controller);
int remaining_words; /* the number of words left to transfer */
bool use_irq = false;
u16 cr = 0;
@@ -335,9 +335,9 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
}
-/* This driver supports single master mode only. Hence Tx FIFO Empty
+/* This driver supports single host mode only. Hence Tx FIFO Empty
* is the only interrupt we care about.
- * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
+ * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Target Mode
* Fault are not to happen.
*/
static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
@@ -393,7 +393,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
struct xspi_platform_data *pdata;
struct resource *res;
int ret, num_cs = 0, bits_per_word;
- struct spi_master *master;
+ struct spi_controller *host;
bool force_irq = false;
u32 tmp;
u8 i;
@@ -415,26 +415,26 @@ static int xilinx_spi_probe(struct platform_device *pdev)
if (!num_cs) {
dev_err(&pdev->dev,
- "Missing slave select configuration data\n");
+ "Missing target select configuration data\n");
return -EINVAL;
}
if (num_cs > XILINX_SPI_MAX_CS) {
- dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ dev_err(&pdev->dev, "Invalid number of spi targets\n");
return -EINVAL;
}
- master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(struct xilinx_spi));
+ if (!host)
return -ENODEV;
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
- SPI_CS_HIGH;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
+ SPI_CS_HIGH;
- xspi = spi_master_get_devdata(master);
+ xspi = spi_controller_get_devdata(host);
xspi->cs_inactive = 0xffffffff;
- xspi->bitbang.master = master;
+ xspi->bitbang.master = host;
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
@@ -444,9 +444,9 @@ static int xilinx_spi_probe(struct platform_device *pdev)
if (IS_ERR(xspi->regs))
return PTR_ERR(xspi->regs);
- master->bus_num = pdev->id;
- master->num_chipselect = num_cs;
- master->dev.of_node = pdev->dev.of_node;
+ host->bus_num = pdev->id;
+ host->num_chipselect = num_cs;
+ host->dev.of_node = pdev->dev.of_node;
/*
* Detect endianess on the IP via loop bit in CR. Detection
@@ -466,7 +466,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->write_fn = xspi_write32_be;
}
- master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
+ host->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
xspi->bytes_per_word = bits_per_word / 8;
xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
@@ -496,17 +496,17 @@ static int xilinx_spi_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
- spi_new_device(master, pdata->devices + i);
+ spi_new_device(host, pdata->devices + i);
}
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
return 0;
}
static void xilinx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct xilinx_spi *xspi = spi_controller_get_devdata(host);
void __iomem *regs_base = xspi->regs;
spi_bitbang_stop(&xspi->bitbang);
@@ -516,7 +516,7 @@ static void xilinx_spi_remove(struct platform_device *pdev)
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
- spi_master_put(xspi->bitbang.master);
+ spi_controller_put(xspi->bitbang.master);
}
/* work with hotplug and coldplug */
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 3b91cdd5a..49302364b 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -95,7 +95,7 @@ struct xlp_spi_priv {
int rx_len; /* rx xfer length */
int txerrors; /* TXFIFO underflow count */
int rxerrors; /* RXFIFO overflow count */
- int cs; /* slave device chip select */
+ int cs; /* target device chip select */
u32 spi_clk; /* spi clock frequency */
bool cmd_cont; /* cs active */
struct completion done; /* completion notification */
@@ -138,7 +138,7 @@ static int xlp_spi_setup(struct spi_device *spi)
u32 fdiv, cfg;
int cs;
- xspi = spi_master_get_devdata(spi->master);
+ xspi = spi_controller_get_devdata(spi->controller);
cs = spi_get_chipselect(spi, 0);
/*
* The value of fdiv must be between 4 and 65535.
@@ -343,17 +343,17 @@ static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
return bytesleft;
}
-static int xlp_spi_transfer_one(struct spi_master *master,
+static int xlp_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct xlp_spi_priv *xspi = spi_master_get_devdata(master);
+ struct xlp_spi_priv *xspi = spi_controller_get_devdata(host);
int ret = 0;
xspi->cs = spi_get_chipselect(spi, 0);
xspi->dev = spi->dev;
- if (spi_transfer_is_last(master, t))
+ if (spi_transfer_is_last(host, t))
xspi->cmd_cont = 0;
else
xspi->cmd_cont = 1;
@@ -361,13 +361,13 @@ static int xlp_spi_transfer_one(struct spi_master *master,
if (xlp_spi_txrx_bufs(xspi, t))
ret = -EIO;
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return ret;
}
static int xlp_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct xlp_spi_priv *xspi;
struct clk *clk;
int irq, err;
@@ -398,28 +398,28 @@ static int xlp_spi_probe(struct platform_device *pdev)
xspi->spi_clk = clk_get_rate(clk);
- master = spi_alloc_master(&pdev->dev, 0);
- if (!master) {
- dev_err(&pdev->dev, "could not alloc master\n");
+ host = spi_alloc_host(&pdev->dev, 0);
+ if (!host) {
+ dev_err(&pdev->dev, "could not alloc host\n");
return -ENOMEM;
}
- master->bus_num = 0;
- master->num_chipselect = XLP_SPI_MAX_CS;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->setup = xlp_spi_setup;
- master->transfer_one = xlp_spi_transfer_one;
- master->dev.of_node = pdev->dev.of_node;
+ host->bus_num = 0;
+ host->num_chipselect = XLP_SPI_MAX_CS;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->setup = xlp_spi_setup;
+ host->transfer_one = xlp_spi_transfer_one;
+ host->dev.of_node = pdev->dev.of_node;
init_completion(&xspi->done);
- spi_master_set_devdata(master, xspi);
+ spi_controller_set_devdata(host, xspi);
xlp_spi_sysctl_setup(xspi);
/* register spi controller */
- err = devm_spi_register_master(&pdev->dev, master);
+ err = devm_spi_register_controller(&pdev->dev, host);
if (err) {
- dev_err(&pdev->dev, "spi register master failed!\n");
- spi_master_put(master);
+ dev_err(&pdev->dev, "spi register host failed!\n");
+ spi_controller_put(host);
return err;
}
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index dbd85d7a1..3c7721894 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -53,7 +53,7 @@ static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
u32 v, u8 bits, unsigned flags)
{
- struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xtfpga_spi *xspi = spi_controller_get_devdata(spi->controller);
xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0));
xspi->data_sz += bits;
@@ -71,7 +71,7 @@ static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
{
- struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+ struct xtfpga_spi *xspi = spi_controller_get_devdata(spi->controller);
WARN_ON(xspi->data_sz != 0);
xspi->data_sz = 0;
@@ -81,19 +81,19 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
{
struct xtfpga_spi *xspi;
int ret;
- struct spi_master *master;
+ struct spi_controller *host;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(struct xtfpga_spi));
+ if (!host)
return -ENOMEM;
- master->flags = SPI_CONTROLLER_NO_RX;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
- master->bus_num = pdev->dev.id;
- master->dev.of_node = pdev->dev.of_node;
+ host->flags = SPI_CONTROLLER_NO_RX;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ host->bus_num = pdev->dev.id;
+ host->dev.of_node = pdev->dev.of_node;
- xspi = spi_master_get_devdata(master);
- xspi->bitbang.master = master;
+ xspi = spi_controller_get_devdata(host);
+ xspi->bitbang.master = host;
xspi->bitbang.chipselect = xtfpga_spi_chipselect;
xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
xspi->regs = devm_platform_ioremap_resource(pdev, 0);
@@ -113,17 +113,17 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
return 0;
}
static void xtfpga_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct xtfpga_spi *xspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct xtfpga_spi *xspi = spi_controller_get_devdata(host);
spi_bitbang_stop(&xspi->bitbang);
- spi_master_put(master);
+ spi_controller_put(host);
}
MODULE_ALIAS("platform:" XTFPGA_SPI_NAME);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 0db69a2a7..d6325c6be 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -54,10 +54,10 @@
#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0) /* Master Mode */
/*
- * QSPI Configuration Register - Baud rate and slave select
+ * QSPI Configuration Register - Baud rate and target select
*
* These are the values used in the calculation of baud rate divisor and
- * setting the slave select.
+ * setting the target select.
*/
#define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
#define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift */
@@ -164,14 +164,14 @@ static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
- * - Master mode
+ * - Host mode
* - Baud rate divisor is set to 2
* - Tx threshold set to 1l Rx threshold set to 32
* - Flash memory interface mode enabled
* - Size of the word to be transferred as 8 bit
* This function performs the following actions
* - Disable and clear all the interrupts
- * - Enable manual slave select
+ * - Enable manual target select
* - Enable manual start
* - Deselect all the chip select lines
* - Set the size of the word to be transferred as 32 bit
@@ -289,7 +289,7 @@ static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
*/
static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
{
- struct spi_controller *ctlr = spi->master;
+ struct spi_controller *ctlr = spi->controller;
struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
u32 config_reg;
@@ -377,7 +377,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
*/
static int zynq_qspi_setup_op(struct spi_device *spi)
{
- struct spi_controller *ctlr = spi->master;
+ struct spi_controller *ctlr = spi->controller;
struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
if (ctlr->busy)
@@ -525,7 +525,7 @@ static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
+ struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->controller);
int err = 0, i;
u8 *tmpbuf;
@@ -637,7 +637,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
struct zynq_qspi *xqspi;
u32 num_cs;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
return -ENOMEM;
@@ -647,14 +647,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
- goto remove_master;
+ goto remove_ctlr;
}
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(&pdev->dev, "pclk clock not found.\n");
ret = PTR_ERR(xqspi->pclk);
- goto remove_master;
+ goto remove_ctlr;
}
init_completion(&xqspi->data_completion);
@@ -663,13 +663,13 @@ static int zynq_qspi_probe(struct platform_device *pdev)
if (IS_ERR(xqspi->refclk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
ret = PTR_ERR(xqspi->refclk);
- goto remove_master;
+ goto remove_ctlr;
}
ret = clk_prepare_enable(xqspi->pclk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable APB clock.\n");
- goto remove_master;
+ goto remove_ctlr;
}
ret = clk_prepare_enable(xqspi->refclk);
@@ -715,7 +715,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
- dev_err(&pdev->dev, "spi_register_master failed\n");
+ dev_err(&pdev->dev, "devm_spi_register_controller failed\n");
goto clk_dis_all;
}
@@ -725,7 +725,7 @@ clk_dis_all:
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
-remove_master:
+remove_ctlr:
spi_controller_put(ctlr);
return ret;
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 9a46b2478..99524a3c9 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Xilinx Zynq UltraScale+ MPSoC Quad-SPI (QSPI) controller driver
- * (master mode only)
+ * (host mode only)
*
* Copyright (C) 2009 - 2015 Xilinx, Inc.
*/
@@ -235,21 +235,21 @@ static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
}
/**
- * zynqmp_gqspi_selectslave - For selection of slave device
+ * zynqmp_gqspi_selecttarget - For selection of target device
* @instanceptr: Pointer to the zynqmp_qspi structure
- * @slavecs: For chip select
- * @slavebus: To check which bus is selected- upper or lower
+ * @targetcs: For chip select
+ * @targetbus: To check which bus is selected- upper or lower
*/
-static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
- u8 slavecs, u8 slavebus)
+static void zynqmp_gqspi_selecttarget(struct zynqmp_qspi *instanceptr,
+ u8 targetcs, u8 targetbus)
{
/*
* Bus and CS lines selected here will be updated in the instance and
* used for subsequent GENFIFO entries during transfer.
*/
- /* Choose slave select line */
- switch (slavecs) {
+ /* Choose target select line */
+ switch (targetcs) {
case GQSPI_SELECT_FLASH_CS_BOTH:
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
GQSPI_GENFIFO_CS_UPPER;
@@ -261,11 +261,11 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER;
break;
default:
- dev_warn(instanceptr->dev, "Invalid slave select\n");
+ dev_warn(instanceptr->dev, "Invalid target select\n");
}
/* Choose the bus */
- switch (slavebus) {
+ switch (targetbus) {
case GQSPI_SELECT_FLASH_BUS_BOTH:
instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER |
GQSPI_GENFIFO_BUS_UPPER;
@@ -277,7 +277,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
break;
default:
- dev_warn(instanceptr->dev, "Invalid slave bus\n");
+ dev_warn(instanceptr->dev, "Invalid target bus\n");
}
}
@@ -337,13 +337,13 @@ static void zynqmp_qspi_set_tapdelay(struct zynqmp_qspi *xqspi, u32 baudrateval)
*
* The default settings of the QSPI controller's configurable parameters on
* reset are
- * - Master mode
+ * - Host mode
* - TX threshold set to 1
* - RX threshold set to 1
* - Flash memory interface mode enabled
* This function performs the following actions
* - Disable and clear all the interrupts
- * - Enable manual slave select
+ * - Enable manual target select
* - Enable manual start
* - Deselect all the chip select lines
* - Set the little endian mode of TX FIFO
@@ -426,9 +426,9 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
GQSPI_RX_FIFO_THRESHOLD);
zynqmp_gqspi_write(xqspi, GQSPI_GF_THRESHOLD_OFST,
GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL);
- zynqmp_gqspi_selectslave(xqspi,
- GQSPI_SELECT_FLASH_CS_LOWER,
- GQSPI_SELECT_FLASH_BUS_LOWER);
+ zynqmp_gqspi_selecttarget(xqspi,
+ GQSPI_SELECT_FLASH_CS_LOWER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
/* Initialize DMA */
zynqmp_gqspi_write(xqspi,
GQSPI_QSPIDMA_DST_CTRL_OFST,
@@ -459,7 +459,7 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
*/
static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
{
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
+ struct zynqmp_qspi *xqspi = spi_controller_get_devdata(qspi->controller);
ulong timeout;
u32 genfifoentry = 0, statusreg;
@@ -594,7 +594,7 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
*/
static int zynqmp_qspi_setup_op(struct spi_device *qspi)
{
- struct spi_controller *ctlr = qspi->master;
+ struct spi_controller *ctlr = qspi->controller;
struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
if (ctlr->busy)
@@ -1048,7 +1048,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct zynqmp_qspi *xqspi = spi_controller_get_devdata
- (mem->spi->master);
+ (mem->spi->controller);
int err = 0, i;
u32 genfifoentry = 0;
u16 opcode = op->cmd.opcode;
@@ -1224,7 +1224,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
u32 num_cs;
const struct qspi_platform_data *p_data;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*xqspi));
if (!ctlr)
return -ENOMEM;
@@ -1240,27 +1240,27 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
- goto remove_master;
+ goto remove_ctlr;
}
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(dev, "pclk clock not found.\n");
ret = PTR_ERR(xqspi->pclk);
- goto remove_master;
+ goto remove_ctlr;
}
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xqspi->refclk)) {
dev_err(dev, "ref_clk clock not found.\n");
ret = PTR_ERR(xqspi->refclk);
- goto remove_master;
+ goto remove_ctlr;
}
ret = clk_prepare_enable(xqspi->pclk);
if (ret) {
dev_err(dev, "Unable to enable APB clock.\n");
- goto remove_master;
+ goto remove_ctlr;
}
ret = clk_prepare_enable(xqspi->refclk);
@@ -1346,7 +1346,7 @@ clk_dis_all:
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
-remove_master:
+remove_ctlr:
spi_controller_put(ctlr);
return ret;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4adc56dab..46f153548 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -612,10 +612,21 @@ static int spi_dev_check(struct device *dev, void *data)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_device *new_spi = data;
-
- if (spi->controller == new_spi->controller &&
- spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
- return -EBUSY;
+ int idx, nw_idx;
+ u8 cs, cs_nw;
+
+ if (spi->controller == new_spi->controller) {
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ cs = spi_get_chipselect(spi, idx);
+ for (nw_idx = 0; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
+ cs_nw = spi_get_chipselect(new_spi, nw_idx);
+ if (cs != 0xFF && cs_nw != 0xFF && cs == cs_nw) {
+ dev_err(dev, "chipselect %d already in use\n", cs_nw);
+ return -EBUSY;
+ }
+ }
+ }
+ }
return 0;
}
@@ -629,13 +640,32 @@ static int __spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
- int status;
+ int status, idx, nw_idx;
+ u8 cs, nw_cs;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ /* Chipselects are numbered 0..max; validate. */
+ cs = spi_get_chipselect(spi, idx);
+ if (cs != 0xFF && cs >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+ }
- /* Chipselects are numbered 0..max; validate. */
- if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
- ctlr->num_chipselect);
- return -EINVAL;
+ /*
+ * Make sure that multiple logical CS doesn't map to the same physical CS.
+ * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ cs = spi_get_chipselect(spi, idx);
+ for (nw_idx = idx + 1; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
+ nw_cs = spi_get_chipselect(spi, nw_idx);
+ if (cs != 0xFF && nw_cs != 0xFF && cs == nw_cs) {
+ dev_err(dev, "chipselect %d already in use\n", nw_cs);
+ return -EBUSY;
+ }
+ }
}
/* Set the bus ID string */
@@ -647,11 +677,8 @@ static int __spi_add_device(struct spi_device *spi)
* its configuration.
*/
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
- if (status) {
- dev_err(dev, "chipselect %d already in use\n",
- spi_get_chipselect(spi, 0));
+ if (status)
return status;
- }
/* Controller may unregister concurrently */
if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
@@ -659,8 +686,15 @@ static int __spi_add_device(struct spi_device *spi)
return -ENODEV;
}
- if (ctlr->cs_gpiods)
- spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
+ if (ctlr->cs_gpiods) {
+ u8 cs;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ cs = spi_get_chipselect(spi, idx);
+ if (cs != 0xFF)
+ spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
+ }
+ }
/*
* Drivers may modify this initial i/o setup, but will
@@ -701,6 +735,9 @@ int spi_add_device(struct spi_device *spi)
struct spi_controller *ctlr = spi->controller;
int status;
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
mutex_lock(&ctlr->add_lock);
status = __spi_add_device(spi);
mutex_unlock(&ctlr->add_lock);
@@ -727,6 +764,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
{
struct spi_device *proxy;
int status;
+ u8 idx;
/*
* NOTE: caller did any chip->bus_num checks necessary.
@@ -742,6 +780,18 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
+ /*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
+ * Now all the unused logical CS will have 0xFF physical CS value & can be
+ * ignore while performing physical CS validity checks.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(proxy, idx, 0xFF);
+
spi_set_chipselect(proxy, 0, chip->chip_select);
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
@@ -750,6 +800,15 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
proxy->dev.platform_data = (void *) chip->platform_data;
proxy->controller_data = chip->controller_data;
proxy->controller_state = NULL;
+ /*
+ * spi->chip_select[i] gives the corresponding physical CS for logical CS i
+ * logical CS number is represented by setting the ith bit in spi->cs_index_mask
+ * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
+ * spi->chip_select[0] will give the physical CS.
+ * By default spi->chip_select[0] will hold the physical CS number so, set
+ * spi->cs_index_mask as 0x01.
+ */
+ proxy->cs_index_mask = 0x01;
if (chip->swnode) {
status = device_add_software_node(&proxy->dev, chip->swnode);
@@ -942,32 +1001,55 @@ static void spi_res_release(struct spi_controller *ctlr, struct spi_message *mes
}
/*-------------------------------------------------------------------------*/
+static inline bool spi_is_last_cs(struct spi_device *spi)
+{
+ u8 idx;
+ bool last = false;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if ((spi->cs_index_mask >> idx) & 0x01) {
+ if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
+ last = true;
+ }
+ }
+ return last;
+}
+
static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
{
bool activate = enable;
+ u8 idx;
/*
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
- if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
- (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
+ if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
+ spi_is_last_cs(spi)) ||
+ (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
+ !spi_is_last_cs(spi))) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
trace_spi_set_cs(spi, activate);
- spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
+ spi->controller->last_cs_index_mask = spi->cs_index_mask;
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : -1;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
- spi_delay_exec(&spi->cs_hold, NULL);
-
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi_get_csgpiod(spi, 0)) {
+ /*
+ * Handle chip select delays for GPIO based CS or controllers without
+ * programmable chip select timing.
+ */
+ if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
+ spi_delay_exec(&spi->cs_hold, NULL);
+
+ if (spi_is_csgpiod(spi)) {
if (!(spi->mode & SPI_NO_CS)) {
/*
* Historically ACPI has no means of the GPIO polarity and
@@ -979,11 +1061,23 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* ambiguity. That's why we use enable, that takes SPI_CS_HIGH
* into account.
*/
- if (has_acpi_companion(&spi->dev))
- gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
- else
- /* Polarity handled by GPIO library */
- gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (((spi->cs_index_mask >> idx) & 0x01) &&
+ spi_get_csgpiod(spi, idx)) {
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
+ !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
+ activate);
+
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+ }
+ }
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
@@ -993,7 +1087,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->set_cs(spi, !enable);
}
- if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
+ if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
if (activate)
spi_delay_exec(&spi->cs_setup, NULL);
else
@@ -1361,6 +1455,9 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
"SPI transfer timed out\n");
return -ETIMEDOUT;
}
+
+ if (xfer->error & SPI_TRANS_FAIL_IO)
+ return -EIO;
}
return 0;
@@ -1654,13 +1751,37 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
trace_spi_message_start(msg);
- ret = spi_split_transfers_maxsize(ctlr, msg,
- spi_max_transfer_size(msg->spi),
- GFP_KERNEL | GFP_DMA);
- if (ret) {
- msg->status = ret;
- spi_finalize_current_message(ctlr);
- return ret;
+ /*
+ * If an SPI controller does not support toggling the CS line on each
+ * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
+ * for the CS line, we can emulate the CS-per-word hardware function by
+ * splitting transfers into one-word transfers and ensuring that
+ * cs_change is set for each transfer.
+ */
+ if ((msg->spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
+ spi_is_csgpiod(msg->spi))) {
+ ret = spi_split_transfers_maxwords(ctlr, msg, 1, GFP_KERNEL);
+ if (ret) {
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* Don't change cs_change on the last entry in the list */
+ if (list_is_last(&xfer->transfer_list, &msg->transfers))
+ break;
+ xfer->cs_change = 1;
+ }
+ } else {
+ ret = spi_split_transfers_maxsize(ctlr, msg,
+ spi_max_transfer_size(msg->spi),
+ GFP_KERNEL | GFP_DMA);
+ if (ret) {
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
}
if (ctlr->prepare_message) {
@@ -2226,8 +2347,8 @@ static void of_spi_parse_dt_cs_delay(struct device_node *nc,
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
- u32 value;
- int rc;
+ u32 value, cs[SPI_CS_CNT_MAX];
+ int rc, idx;
/* Mode (clock phase/polarity/etc.) */
if (of_property_read_bool(nc, "spi-cpha"))
@@ -2299,14 +2420,53 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
return 0;
}
+ if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
+ dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
+ * Now all the unused logical CS will have 0xFF physical CS value & can be
+ * ignore while performing physical CS validity checks.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(spi, idx, 0xFF);
+
/* Device address */
- rc = of_property_read_u32(nc, "reg", &value);
- if (rc) {
+ rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
+ SPI_CS_CNT_MAX);
+ if (rc < 0) {
dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
nc, rc);
return rc;
}
- spi_set_chipselect(spi, 0, value);
+ if (rc > ctlr->num_chipselect) {
+ dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
+ nc, rc);
+ return rc;
+ }
+ if ((of_property_read_bool(nc, "parallel-memories")) &&
+ (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
+ dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
+ return -EINVAL;
+ }
+ for (idx = 0; idx < rc; idx++)
+ spi_set_chipselect(spi, idx, cs[idx]);
+
+ /*
+ * spi->chip_select[i] gives the corresponding physical CS for logical CS i
+ * logical CS number is represented by setting the ith bit in spi->cs_index_mask
+ * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
+ * spi->chip_select[0] will give the physical CS.
+ * By default spi->chip_select[0] will hold the physical CS number so, set
+ * spi->cs_index_mask as 0x01.
+ */
+ spi->cs_index_mask = 0x01;
/* Device speed */
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
@@ -2412,6 +2572,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
struct spi_controller *ctlr = spi->controller;
struct spi_device *ancillary;
int rc = 0;
+ u8 idx;
/* Alloc an spi_device */
ancillary = spi_alloc_device(ctlr);
@@ -2422,12 +2583,33 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+ /*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
+ * Now all the unused logical CS will have 0xFF physical CS value & can be
+ * ignore while performing physical CS validity checks.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(ancillary, idx, 0xFF);
+
/* Use provided chip-select for ancillary device */
spi_set_chipselect(ancillary, 0, chip_select);
/* Take over SPI mode/speed from SPI main device */
ancillary->max_speed_hz = spi->max_speed_hz;
ancillary->mode = spi->mode;
+ /*
+ * spi->chip_select[i] gives the corresponding physical CS for logical CS i
+ * logical CS number is represented by setting the ith bit in spi->cs_index_mask
+ * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
+ * spi->chip_select[0] will give the physical CS.
+ * By default spi->chip_select[0] will hold the physical CS number so, set
+ * spi->cs_index_mask as 0x01.
+ */
+ ancillary->cs_index_mask = 0x01;
WARN_ON(!mutex_is_locked(&ctlr->add_lock));
@@ -2630,6 +2812,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_spi_lookup lookup = {};
struct spi_device *spi;
int ret;
+ u8 idx;
if (!ctlr && index == -1)
return ERR_PTR(-EINVAL);
@@ -2665,12 +2848,33 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
return ERR_PTR(-ENOMEM);
}
+ /*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
+ * Now all the unused logical CS will have 0xFF physical CS value & can be
+ * ignore while performing physical CS validity checks.
+ */
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(spi, idx, 0xFF);
+
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
spi_set_chipselect(spi, 0, lookup.chip_select);
+ /*
+ * spi->chip_select[i] gives the corresponding physical CS for logical CS i
+ * logical CS number is represented by setting the ith bit in spi->cs_index_mask
+ * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
+ * spi->chip_select[0] will give the physical CS.
+ * By default spi->chip_select[0] will hold the physical CS number so, set
+ * spi->cs_index_mask as 0x01.
+ */
+ spi->cs_index_mask = 0x01;
return spi;
}
@@ -3104,6 +3308,7 @@ int spi_register_controller(struct spi_controller *ctlr)
struct boardinfo *bi;
int first_dynamic;
int status;
+ int idx;
if (!dev)
return -ENODEV;
@@ -3168,7 +3373,8 @@ int spi_register_controller(struct spi_controller *ctlr)
}
/* Setting last_cs to -1 means no chip selected */
- ctlr->last_cs = -1;
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ ctlr->last_cs[idx] = -1;
status = device_add(&ctlr->dev);
if (status < 0)
@@ -3885,33 +4091,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (list_empty(&message->transfers))
return -EINVAL;
- /*
- * If an SPI controller does not support toggling the CS line on each
- * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
- * for the CS line, we can emulate the CS-per-word hardware function by
- * splitting transfers into one-word transfers and ensuring that
- * cs_change is set for each transfer.
- */
- if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- spi_get_csgpiod(spi, 0))) {
- size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
- int ret;
-
- /* spi_split_transfers_maxsize() requires message->spi */
- message->spi = spi;
-
- ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
- GFP_KERNEL);
- if (ret)
- return ret;
-
- list_for_each_entry(xfer, &message->transfers, transfer_list) {
- /* Don't change cs_change on the last entry in the list */
- if (list_is_last(&xfer->transfer_list, &message->transfers))
- break;
- xfer->cs_change = 1;
- }
- }
+ message->spi = spi;
/*
* Half-duplex links include original MicroWire, and ones with
@@ -4036,8 +4216,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
if (!ctlr->transfer)
return -ENOTSUPP;
- message->spi = spi;
-
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
@@ -4217,8 +4395,6 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
if (status != 0)
return status;
- message->spi = spi;
-
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index 9d974424c..7f152167b 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for kernel SPMI framework.
#
-obj-$(CONFIG_SPMI) += spmi.o
+obj-$(CONFIG_SPMI) += spmi.o spmi-devres.o
obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c
index 9cbd47348..674a350cc 100644
--- a/drivers/spmi/hisi-spmi-controller.c
+++ b/drivers/spmi/hisi-spmi-controller.c
@@ -267,10 +267,10 @@ static int spmi_controller_probe(struct platform_device *pdev)
struct resource *iores;
int ret;
- ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller));
- if (!ctrl) {
+ ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller));
+ if (IS_ERR(ctrl)) {
dev_err(&pdev->dev, "can not allocate spmi_controller data\n");
- return -ENOMEM;
+ return PTR_ERR(ctrl);
}
spmi_controller = spmi_controller_get_drvdata(ctrl);
spmi_controller->controller = ctrl;
@@ -278,24 +278,21 @@ static int spmi_controller_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
- ret = -EINVAL;
- goto err_put_controller;
+ return -EINVAL;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
- ret = -EADDRNOTAVAIL;
- goto err_put_controller;
+ return -EADDRNOTAVAIL;
}
ret = of_property_read_u32(pdev->dev.of_node, "hisilicon,spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
- ret = -ENODEV;
- goto err_put_controller;
+ return -ENODEV;
}
platform_set_drvdata(pdev, spmi_controller);
@@ -311,25 +308,13 @@ static int spmi_controller_probe(struct platform_device *pdev)
ctrl->read_cmd = spmi_read_cmd;
ctrl->write_cmd = spmi_write_cmd;
- ret = spmi_controller_add(ctrl);
+ ret = devm_spmi_controller_add(&pdev->dev, ctrl);
if (ret) {
dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
- goto err_put_controller;
+ return ret;
}
return 0;
-
-err_put_controller:
- spmi_controller_put(ctrl);
- return ret;
-}
-
-static void spmi_del_controller(struct platform_device *pdev)
-{
- struct spmi_controller *ctrl = platform_get_drvdata(pdev);
-
- spmi_controller_remove(ctrl);
- spmi_controller_put(ctrl);
}
static const struct of_device_id spmi_controller_match_table[] = {
@@ -342,7 +327,6 @@ MODULE_DEVICE_TABLE(of, spmi_controller_match_table);
static struct platform_driver spmi_controller_driver = {
.probe = spmi_controller_probe,
- .remove_new = spmi_del_controller,
.driver = {
.name = "hisi_spmi_controller",
.of_match_table = spmi_controller_match_table,
diff --git a/drivers/spmi/spmi-devres.c b/drivers/spmi/spmi-devres.c
new file mode 100644
index 000000000..62c4b3f24
--- /dev/null
+++ b/drivers/spmi/spmi-devres.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2023 Google LLC.
+ */
+
+#include <linux/device.h>
+#include <linux/spmi.h>
+
+static void devm_spmi_controller_release(struct device *parent, void *res)
+{
+ spmi_controller_put(*(struct spmi_controller **)res);
+}
+
+struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size)
+{
+ struct spmi_controller **ptr, *ctrl;
+
+ ptr = devres_alloc(devm_spmi_controller_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ ctrl = spmi_controller_alloc(parent, size);
+ if (IS_ERR(ctrl)) {
+ devres_free(ptr);
+ return ctrl;
+ }
+
+ *ptr = ctrl;
+ devres_add(parent, ptr);
+
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(devm_spmi_controller_alloc);
+
+static void devm_spmi_controller_remove(struct device *parent, void *res)
+{
+ spmi_controller_remove(*(struct spmi_controller **)res);
+}
+
+int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl)
+{
+ struct spmi_controller **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_spmi_controller_remove, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = spmi_controller_add(ctrl);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = ctrl;
+ devres_add(parent, ptr);
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(devm_spmi_controller_add);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SPMI devres helpers");
diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c
index 1261f381c..5079442f8 100644
--- a/drivers/spmi/spmi-mtk-pmif.c
+++ b/drivers/spmi/spmi-mtk-pmif.c
@@ -384,6 +384,12 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u32 data, wdata, cmd;
unsigned long flags;
+ /* Check for argument validation. */
+ if (unlikely(sid & ~0xf)) {
+ dev_err(&ctrl->dev, "exceed the max slv id\n");
+ return -EINVAL;
+ }
+
if (len > 4) {
dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
@@ -447,29 +453,24 @@ static int mtk_spmi_probe(struct platform_device *pdev)
int err, i;
u32 chan_offset;
- ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*arb));
- if (!ctrl)
- return -ENOMEM;
+ ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*arb));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
arb = spmi_controller_get_drvdata(ctrl);
arb->data = device_get_match_data(&pdev->dev);
if (!arb->data) {
- err = -EINVAL;
dev_err(&pdev->dev, "Cannot get drv_data\n");
- goto err_put_ctrl;
+ return -EINVAL;
}
arb->base = devm_platform_ioremap_resource_byname(pdev, "pmif");
- if (IS_ERR(arb->base)) {
- err = PTR_ERR(arb->base);
- goto err_put_ctrl;
- }
+ if (IS_ERR(arb->base))
+ return PTR_ERR(arb->base);
arb->spmimst_base = devm_platform_ioremap_resource_byname(pdev, "spmimst");
- if (IS_ERR(arb->spmimst_base)) {
- err = PTR_ERR(arb->spmimst_base);
- goto err_put_ctrl;
- }
+ if (IS_ERR(arb->spmimst_base))
+ return PTR_ERR(arb->spmimst_base);
arb->nclks = ARRAY_SIZE(pmif_clock_names);
for (i = 0; i < arb->nclks; i++)
@@ -478,7 +479,7 @@ static int mtk_spmi_probe(struct platform_device *pdev)
err = clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
if (err) {
dev_err(&pdev->dev, "Failed to get clocks: %d\n", err);
- goto err_put_ctrl;
+ return err;
}
err = clk_bulk_prepare_enable(arb->nclks, arb->clks);
@@ -512,8 +513,6 @@ err_domain_remove:
clk_bulk_disable_unprepare(arb->nclks, arb->clks);
err_put_clks:
clk_bulk_put(arb->nclks, arb->clks);
-err_put_ctrl:
- spmi_controller_put(ctrl);
return err;
}
@@ -522,10 +521,9 @@ static void mtk_spmi_remove(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ spmi_controller_remove(ctrl);
clk_bulk_disable_unprepare(arb->nclks, arb->clks);
clk_bulk_put(arb->nclks, arb->clks);
- spmi_controller_remove(ctrl);
- spmi_controller_put(ctrl);
}
static const struct of_device_id mtk_spmi_match_table[] = {
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index dcb675d98..9ed1180fe 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1443,9 +1443,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
u32 channel, ee, hw_ver;
int err;
- ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb));
- if (!ctrl)
- return -ENOMEM;
+ ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
pmic_arb = spmi_controller_get_drvdata(ctrl);
pmic_arb->spmic = ctrl;
@@ -1462,20 +1462,16 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
*/
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
core = devm_ioremap(&ctrl->dev, res->start, resource_size(res));
- if (IS_ERR(core)) {
- err = PTR_ERR(core);
- goto err_put_ctrl;
- }
+ if (IS_ERR(core))
+ return PTR_ERR(core);
pmic_arb->core_size = resource_size(res);
pmic_arb->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
sizeof(*pmic_arb->ppid_to_apid),
GFP_KERNEL);
- if (!pmic_arb->ppid_to_apid) {
- err = -ENOMEM;
- goto err_put_ctrl;
- }
+ if (!pmic_arb->ppid_to_apid)
+ return -ENOMEM;
hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
@@ -1499,19 +1495,15 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
"obsrvr");
pmic_arb->rd_base = devm_ioremap(&ctrl->dev, res->start,
resource_size(res));
- if (IS_ERR(pmic_arb->rd_base)) {
- err = PTR_ERR(pmic_arb->rd_base);
- goto err_put_ctrl;
- }
+ if (IS_ERR(pmic_arb->rd_base))
+ return PTR_ERR(pmic_arb->rd_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"chnls");
pmic_arb->wr_base = devm_ioremap(&ctrl->dev, res->start,
resource_size(res));
- if (IS_ERR(pmic_arb->wr_base)) {
- err = PTR_ERR(pmic_arb->wr_base);
- goto err_put_ctrl;
- }
+ if (IS_ERR(pmic_arb->wr_base))
+ return PTR_ERR(pmic_arb->wr_base);
}
pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS;
@@ -1522,10 +1514,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "qcom,bus-id",
&pmic_arb->bus_instance);
if (pmic_arb->bus_instance > 1) {
- err = -EINVAL;
dev_err(&pdev->dev, "invalid bus instance (%u) specified\n",
pmic_arb->bus_instance);
- goto err_put_ctrl;
+ return -EINVAL;
}
if (pmic_arb->bus_instance == 0) {
@@ -1543,10 +1534,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
}
if (pmic_arb->base_apid + pmic_arb->apid_count > pmic_arb->max_periphs) {
- err = -EINVAL;
dev_err(&pdev->dev, "Unsupported APID count %d detected\n",
pmic_arb->base_apid + pmic_arb->apid_count);
- goto err_put_ctrl;
+ return -EINVAL;
}
} else if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) {
pmic_arb->base_apid = 0;
@@ -1554,55 +1544,45 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
PMIC_ARB_FEATURES_PERIPH_MASK;
if (pmic_arb->apid_count > pmic_arb->max_periphs) {
- err = -EINVAL;
dev_err(&pdev->dev, "Unsupported APID count %d detected\n",
pmic_arb->apid_count);
- goto err_put_ctrl;
+ return -EINVAL;
}
}
pmic_arb->apid_data = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs,
sizeof(*pmic_arb->apid_data),
GFP_KERNEL);
- if (!pmic_arb->apid_data) {
- err = -ENOMEM;
- goto err_put_ctrl;
- }
+ if (!pmic_arb->apid_data)
+ return -ENOMEM;
dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
pmic_arb->ver_ops->ver_str, hw_ver);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
pmic_arb->intr = devm_ioremap_resource(&ctrl->dev, res);
- if (IS_ERR(pmic_arb->intr)) {
- err = PTR_ERR(pmic_arb->intr);
- goto err_put_ctrl;
- }
+ if (IS_ERR(pmic_arb->intr))
+ return PTR_ERR(pmic_arb->intr);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
pmic_arb->cnfg = devm_ioremap_resource(&ctrl->dev, res);
- if (IS_ERR(pmic_arb->cnfg)) {
- err = PTR_ERR(pmic_arb->cnfg);
- goto err_put_ctrl;
- }
+ if (IS_ERR(pmic_arb->cnfg))
+ return PTR_ERR(pmic_arb->cnfg);
pmic_arb->irq = platform_get_irq_byname(pdev, "periph_irq");
- if (pmic_arb->irq < 0) {
- err = pmic_arb->irq;
- goto err_put_ctrl;
- }
+ if (pmic_arb->irq < 0)
+ return pmic_arb->irq;
err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
if (err) {
dev_err(&pdev->dev, "channel unspecified.\n");
- goto err_put_ctrl;
+ return err;
}
if (channel > 5) {
dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
channel);
- err = -EINVAL;
- goto err_put_ctrl;
+ return -EINVAL;
}
pmic_arb->channel = channel;
@@ -1610,22 +1590,19 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee);
if (err) {
dev_err(&pdev->dev, "EE unspecified.\n");
- goto err_put_ctrl;
+ return err;
}
if (ee > 5) {
dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee);
- err = -EINVAL;
- goto err_put_ctrl;
+ return -EINVAL;
}
pmic_arb->ee = ee;
mapping_table = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs,
sizeof(*mapping_table), GFP_KERNEL);
- if (!mapping_table) {
- err = -ENOMEM;
- goto err_put_ctrl;
- }
+ if (!mapping_table)
+ return -ENOMEM;
pmic_arb->mapping_table = mapping_table;
/* Initialize max_apid/min_apid to the opposite bounds, during
@@ -1645,7 +1622,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n",
err);
- goto err_put_ctrl;
+ return err;
}
}
@@ -1654,8 +1631,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
&pmic_arb_irq_domain_ops, pmic_arb);
if (!pmic_arb->domain) {
dev_err(&pdev->dev, "unable to create irq_domain\n");
- err = -ENOMEM;
- goto err_put_ctrl;
+ return -ENOMEM;
}
irq_set_chained_handler_and_data(pmic_arb->irq, pmic_arb_chained_irq,
@@ -1669,8 +1645,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
err_domain_remove:
irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
irq_domain_remove(pmic_arb->domain);
-err_put_ctrl:
- spmi_controller_put(ctrl);
return err;
}
@@ -1681,7 +1655,6 @@ static void spmi_pmic_arb_remove(struct platform_device *pdev)
spmi_controller_remove(ctrl);
irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL);
irq_domain_remove(pmic_arb->domain);
- spmi_controller_put(ctrl);
}
static const struct of_device_id spmi_pmic_arb_match_table[] = {
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 93cd4a34d..3a60fd2e0 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -448,11 +448,11 @@ struct spmi_controller *spmi_controller_alloc(struct device *parent,
int id;
if (WARN_ON(!parent))
- return NULL;
+ return ERR_PTR(-EINVAL);
ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL);
if (!ctrl)
- return NULL;
+ return ERR_PTR(-ENOMEM);
device_initialize(&ctrl->dev);
ctrl->dev.type = &spmi_ctrl_type;
@@ -466,7 +466,7 @@ struct spmi_controller *spmi_controller_alloc(struct device *parent,
dev_err(parent,
"unable to allocate SPMI controller identifier.\n");
spmi_controller_put(ctrl);
- return NULL;
+ return ERR_PTR(id);
}
ctrl->nr = id;
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
index de2f6516d..22325ab9d 100644
--- a/drivers/staging/greybus/i2c.c
+++ b/drivers/staging/greybus/i2c.c
@@ -264,7 +264,7 @@ static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
/* Looks good; up our i2c adapter */
adapter = &gb_i2c_dev->adapter;
adapter->owner = THIS_MODULE;
- adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adapter->class = I2C_CLASS_HWMON;
adapter->algo = &gb_i2c_algorithm;
adapter->dev.parent = &gbphy_dev->dev;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index bc6c7b248..554c2e475 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -36,6 +36,8 @@ source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/rkvdec/Kconfig"
+source "drivers/staging/media/starfive/Kconfig"
+
source "drivers/staging/media/sunxi/Kconfig"
source "drivers/staging/media/tegra-video/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 1a4c3a062..dcaeeca0e 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_VIDEO_MAX96712) += max96712/
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rkvdec/
+obj-$(CONFIG_VIDEO_STARFIVE_CAMSS) += starfive/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index 58cddf11c..5bcd634a2 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -361,7 +361,7 @@ gc0310_get_pad_format(struct gc0310_device *dev,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&dev->sd, state, pad);
+ return v4l2_subdev_state_get_format(state, pad);
return &dev->mode.fmt;
}
@@ -496,9 +496,17 @@ error_power_down:
return ret;
}
-static int gc0310_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int gc0310_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
interval->interval.numerator = 1;
interval->interval.denominator = GC0310_FPS;
@@ -545,7 +553,6 @@ static const struct v4l2_subdev_sensor_ops gc0310_sensor_ops = {
static const struct v4l2_subdev_video_ops gc0310_video_ops = {
.s_stream = gc0310_s_stream,
- .g_frame_interval = gc0310_g_frame_interval,
};
static const struct v4l2_subdev_pad_ops gc0310_pad_ops = {
@@ -553,6 +560,7 @@ static const struct v4l2_subdev_pad_ops gc0310_pad_ops = {
.enum_frame_size = gc0310_enum_frame_size,
.get_fmt = gc0310_get_fmt,
.set_fmt = gc0310_set_fmt,
+ .get_frame_interval = gc0310_get_frame_interval,
};
static const struct v4l2_subdev_ops gc0310_ops = {
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index 9fa390fbc..bec4c5615 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -561,7 +561,7 @@ static int gc2235_set_fmt(struct v4l2_subdev *sd,
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *fmt;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
mutex_unlock(&dev->input_lock);
return 0;
}
@@ -698,11 +698,19 @@ fail_power_off:
return ret;
}
-static int gc2235_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int gc2235_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
struct gc2235_device *dev = to_gc2235_sensor(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
interval->interval.numerator = 1;
interval->interval.denominator = dev->res->fps;
@@ -754,7 +762,6 @@ static const struct v4l2_subdev_sensor_ops gc2235_sensor_ops = {
static const struct v4l2_subdev_video_ops gc2235_video_ops = {
.s_stream = gc2235_s_stream,
- .g_frame_interval = gc2235_g_frame_interval,
};
static const struct v4l2_subdev_core_ops gc2235_core_ops = {
@@ -767,6 +774,7 @@ static const struct v4l2_subdev_pad_ops gc2235_pad_ops = {
.enum_frame_size = gc2235_enum_frame_size,
.get_fmt = gc2235_get_fmt,
.set_fmt = gc2235_set_fmt,
+ .get_frame_interval = gc2235_get_frame_interval,
};
static const struct v4l2_subdev_ops gc2235_ops = {
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 1c6643c44..20f02d18a 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -666,7 +666,7 @@ static int mt9m114_set_fmt(struct v4l2_subdev *sd,
fmt->height = res->height;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *fmt;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
return 0;
}
@@ -1388,11 +1388,19 @@ static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value)
return !!err;
}
-static int mt9m114_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int mt9m114_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
interval->interval.numerator = 1;
interval->interval.denominator = mt9m114_res[dev->res].fps;
@@ -1479,7 +1487,6 @@ static int mt9m114_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
static const struct v4l2_subdev_video_ops mt9m114_video_ops = {
.s_stream = mt9m114_s_stream,
- .g_frame_interval = mt9m114_g_frame_interval,
};
static const struct v4l2_subdev_sensor_ops mt9m114_sensor_ops = {
@@ -1498,6 +1505,7 @@ static const struct v4l2_subdev_pad_ops mt9m114_pad_ops = {
.get_fmt = mt9m114_get_fmt,
.set_fmt = mt9m114_set_fmt,
.set_selection = mt9m114_s_exposure_selection,
+ .get_frame_interval = mt9m114_get_frame_interval,
};
static const struct v4l2_subdev_ops mt9m114_ops = {
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index 6a72691ed..133e346ae 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -671,7 +671,7 @@ static int ov2722_set_fmt(struct v4l2_subdev *sd,
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *fmt;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
return 0;
}
@@ -845,11 +845,19 @@ fail_power_off:
return ret;
}
-static int ov2722_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
+static int ov2722_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *interval)
{
struct ov2722_device *dev = to_ov2722_sensor(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (interval->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
interval->interval.numerator = 1;
interval->interval.denominator = dev->res->fps;
@@ -901,7 +909,6 @@ static const struct v4l2_subdev_sensor_ops ov2722_sensor_ops = {
static const struct v4l2_subdev_video_ops ov2722_video_ops = {
.s_stream = ov2722_s_stream,
- .g_frame_interval = ov2722_g_frame_interval,
};
static const struct v4l2_subdev_core_ops ov2722_core_ops = {
@@ -914,6 +921,7 @@ static const struct v4l2_subdev_pad_ops ov2722_pad_ops = {
.enum_frame_size = ov2722_enum_frame_size,
.get_fmt = ov2722_get_fmt,
.set_fmt = ov2722_set_fmt,
+ .get_frame_interval = ov2722_get_frame_interval,
};
static const struct v4l2_subdev_ops ov2722_ops = {
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 759233a7b..d0db2efe0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -105,8 +105,8 @@ static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
unsigned short fps = 0;
int ret;
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- video, g_frame_interval, &fi);
+ ret = v4l2_subdev_call_state_active(isp->inputs[asd->input_curr].camera,
+ pad, get_frame_interval, &fi);
if (!ret && fi.interval.numerator)
fps = fi.interval.denominator / fi.interval.numerator;
@@ -3723,12 +3723,10 @@ apply_min_padding:
static int atomisp_set_crop(struct atomisp_device *isp,
const struct v4l2_mbus_framefmt *format,
+ struct v4l2_subdev_state *sd_state,
int which)
{
struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_selection sel = {
.which = which,
.target = V4L2_SEL_TGT_CROP,
@@ -3754,7 +3752,7 @@ static int atomisp_set_crop(struct atomisp_device *isp,
sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1;
sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1;
- ret = v4l2_subdev_call(input->camera, pad, set_selection, &pad_state, &sel);
+ ret = v4l2_subdev_call(input->camera, pad, set_selection, sd_state, &sel);
if (ret)
dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n",
sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret);
@@ -3770,9 +3768,6 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
const struct atomisp_format_bridge *fmt, *snr_fmt;
struct atomisp_sub_device *asd = &isp->asd;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -3809,11 +3804,16 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
format.format.width, format.format.height);
- ret = atomisp_set_crop(isp, &format.format, V4L2_SUBDEV_FORMAT_TRY);
- if (ret)
- return ret;
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ ret = atomisp_set_crop(isp, &format.format, input->try_sd_state,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret == 0)
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt,
+ input->try_sd_state, &format);
+
+ v4l2_subdev_unlock_state(input->try_sd_state);
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &format);
if (ret)
return ret;
@@ -4238,9 +4238,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
struct atomisp_device *isp = asd->isp;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
+ struct v4l2_subdev_state *act_sd_state;
struct v4l2_subdev_format vformat = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -4268,12 +4266,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
/* Disable dvs if resolution can't be supported by sensor */
if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
- ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_TRY);
- if (ret)
- return ret;
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ ret = atomisp_set_crop(isp, &vformat.format, input->try_sd_state,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret == 0) {
+ vformat.which = V4L2_SUBDEV_FORMAT_TRY;
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt,
+ input->try_sd_state, &vformat);
+ }
+
+ v4l2_subdev_unlock_state(input->try_sd_state);
- vformat.which = V4L2_SUBDEV_FORMAT_TRY;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
@@ -4291,12 +4295,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
}
}
- ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_ACTIVE);
- if (ret)
- return ret;
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+
+ ret = atomisp_set_crop(isp, &vformat.format, act_sd_state,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (ret == 0) {
+ vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt, act_sd_state, &vformat);
+ }
+
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
- vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, NULL, &vformat);
if (ret)
return ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
index abf55a86f..89118438a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -29,8 +29,7 @@ v4l2_mbus_framefmt *__csi2_get_format(struct atomisp_mipi_csi2_device *csi2,
unsigned int pad)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &csi2->formats[pad];
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index f7b4bee95..d5b077e60 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -132,8 +132,8 @@ struct atomisp_input_subdev {
/* Sensor rects for sensors which support crop */
struct v4l2_rect native_rect;
struct v4l2_rect active_rect;
- /* Sensor pad_cfg for which == V4L2_SUBDEV_FORMAT_TRY calls */
- struct v4l2_subdev_pad_config pad_cfg;
+ /* Sensor state for which == V4L2_SUBDEV_FORMAT_TRY calls */
+ struct v4l2_subdev_state *try_sd_state;
struct v4l2_subdev *motor;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index a8e4779d0..5b2d88c02 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -781,12 +781,20 @@ static int atomisp_enum_framesizes(struct file *file, void *priv,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = input->code,
};
+ struct v4l2_subdev_state *act_sd_state;
int ret;
+ if (!input->camera)
+ return -EINVAL;
+
if (input->crop_support)
return atomisp_enum_framesizes_crop(isp, fsize);
- ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state, &fse);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
if (ret)
return ret;
@@ -803,18 +811,25 @@ static int atomisp_enum_frameintervals(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_frame_interval_enum fie = {
- .code = atomisp_in_fmt_conv[0].code,
+ .code = atomisp_in_fmt_conv[0].code,
.index = fival->index,
.width = fival->width,
.height = fival->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ struct v4l2_subdev_state *act_sd_state;
int ret;
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, enum_frame_interval, NULL,
- &fie);
+ if (!input->camera)
+ return -EINVAL;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_interval,
+ act_sd_state, &fie);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
if (ret)
return ret;
@@ -830,30 +845,25 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
const struct atomisp_format_bridge *format;
- struct v4l2_subdev *camera;
+ struct v4l2_subdev_state *act_sd_state;
unsigned int i, fi = 0;
- int rval;
+ int ret;
- camera = isp->inputs[asd->input_curr].camera;
- if(!camera) {
- dev_err(isp->dev, "%s(): camera is NULL, device is %s\n",
- __func__, vdev->name);
+ if (!input->camera)
return -EINVAL;
- }
- rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code);
- if (rval == -ENOIOCTLCMD) {
- dev_warn(isp->dev,
- "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n",
- camera->name);
- }
-
- if (rval)
- return rval;
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state, &code);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
format = &atomisp_output_fmts[i];
@@ -1028,7 +1038,7 @@ static int atomisp_qbuf_wrapper(struct file *file, void *fh, struct v4l2_buffer
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
- if (buf->index >= vdev->queue->num_buffers)
+ if (buf->index >= vb2_get_num_buffers(vdev->queue))
return -EINVAL;
if (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING) {
@@ -1059,7 +1069,7 @@ static int atomisp_dqbuf_wrapper(struct file *file, void *fh, struct v4l2_buffer
if (ret)
return ret;
- vb = pipe->vb_queue.bufs[buf->index];
+ vb = vb2_get_buffer(&pipe->vb_queue, buf->index);
frame = vb_to_frame(vb);
buf->reserved = asd->frame_status[buf->index];
@@ -1739,8 +1749,8 @@ static int atomisp_s_parm(struct file *file, void *fh,
fi.interval = parm->parm.capture.timeperframe;
- rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- video, s_frame_interval, &fi);
+ rval = v4l2_subdev_call_state_active(isp->inputs[asd->input_curr].camera,
+ pad, set_frame_interval, &fi);
if (!rval)
parm->parm.capture.timeperframe = fi.interval;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 471912dea..a87fc7415 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -240,9 +240,9 @@ struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
if (which == V4L2_SUBDEV_FORMAT_TRY) {
switch (target) {
case V4L2_SEL_TGT_CROP:
- return v4l2_subdev_get_try_crop(sd, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
case V4L2_SEL_TGT_COMPOSE:
- return v4l2_subdev_get_try_compose(sd, sd_state, pad);
+ return v4l2_subdev_state_get_compose(sd_state, pad);
}
}
@@ -264,7 +264,7 @@ struct v4l2_mbus_framefmt
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &isp_sd->fmt[pad].fmt;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp_tpg.c
index 074826a5b..92e61ee90 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_tpg.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_tpg.c
@@ -47,7 +47,7 @@ static int tpg_set_fmt(struct v4l2_subdev *sd,
/* only raw8 grbg is supported by TPG */
fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- sd_state->pads->try_fmt = *fmt;
+ *v4l2_subdev_state_get_format(sd_state, 0) = *fmt;
return 0;
}
return 0;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index c1c8501ec..547e1444a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -862,6 +862,9 @@ static void atomisp_unregister_entities(struct atomisp_device *isp)
v4l2_device_unregister(&isp->v4l2_dev);
media_device_unregister(&isp->media_dev);
media_device_cleanup(&isp->media_dev);
+
+ for (i = 0; i < isp->input_cnt; i++)
+ __v4l2_subdev_state_free(isp->inputs[i].try_sd_state);
}
static int atomisp_register_entities(struct atomisp_device *isp)
@@ -933,32 +936,49 @@ v4l2_device_failed:
static void atomisp_init_sensor(struct atomisp_input_subdev *input)
{
+ static struct lock_class_key try_sd_state_key;
struct v4l2_subdev_mbus_code_enum mbus_code_enum = { };
struct v4l2_subdev_frame_size_enum fse = { };
- struct v4l2_subdev_state sd_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_selection sel = { };
+ struct v4l2_subdev_state *try_sd_state, *act_sd_state;
int i, err;
+ /*
+ * FIXME: Drivers are not supposed to use __v4l2_subdev_state_alloc()
+ * but atomisp needs this for try_fmt on its /dev/video# node since
+ * it emulates a normal v4l2 device there, passing through try_fmt /
+ * set_fmt to the sensor.
+ */
+ try_sd_state = __v4l2_subdev_state_alloc(input->camera,
+ "atomisp:try_sd_state->lock", &try_sd_state_key);
+ if (IS_ERR(try_sd_state))
+ return;
+
+ input->try_sd_state = try_sd_state;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+
mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, NULL, &mbus_code_enum);
+ err = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state, &mbus_code_enum);
if (!err)
input->code = mbus_code_enum.code;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_NATIVE_SIZE;
- err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, get_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
input->native_rect = sel.r;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP_DEFAULT;
- err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, get_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
input->active_rect = sel.r;
@@ -973,7 +993,8 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
fse.code = input->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ err = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state, &fse);
if (err)
break;
@@ -989,22 +1010,26 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
* for padding, set the crop rect to cover the entire sensor instead
* of only the default active area.
*
- * Do this for both try and active formats since the try_crop rect in
- * pad_cfg may influence (clamp) future try_fmt calls with which == try.
+ * Do this for both try and active formats since the crop rect in
+ * try_sd_state may influence (clamp size) in calls with which == try.
*/
sel.which = V4L2_SUBDEV_FORMAT_TRY;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection, &sd_state, &sel);
+ v4l2_subdev_lock_state(input->try_sd_state);
+ err = v4l2_subdev_call(input->camera, pad, set_selection,
+ input->try_sd_state, &sel);
+ v4l2_subdev_unlock_state(input->try_sd_state);
if (err)
- return;
+ goto unlock_act_sd_state;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, set_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
input->native_rect.width, input->native_rect.height,
@@ -1012,6 +1037,10 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
input->binning_support);
input->crop_support = true;
+
+unlock_act_sd_state:
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
}
int atomisp_register_device_nodes(struct atomisp_device *isp)
diff --git a/drivers/staging/media/deprecated/atmel/atmel-isc-base.c b/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
index 8e26663ce..305b10315 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
@@ -820,6 +820,8 @@ static int isc_try_configure_pipeline(struct isc_device *isc)
static void isc_try_fse(struct isc_device *isc,
struct v4l2_subdev_state *sd_state)
{
+ struct v4l2_rect *try_crop =
+ v4l2_subdev_state_get_crop(sd_state, 0);
struct v4l2_subdev_frame_size_enum fse = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -841,11 +843,11 @@ static void isc_try_fse(struct isc_device *isc,
* just use the maximum ISC can receive.
*/
if (ret) {
- sd_state->pads->try_crop.width = isc->max_width;
- sd_state->pads->try_crop.height = isc->max_height;
+ try_crop->width = isc->max_width;
+ try_crop->height = isc->max_height;
} else {
- sd_state->pads->try_crop.width = fse.max_width;
- sd_state->pads->try_crop.height = fse.max_height;
+ try_crop->width = fse.max_width;
+ try_crop->height = fse.max_height;
}
}
@@ -1869,7 +1871,7 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &isc->lock;
- q->min_buffers_needed = 1;
+ q->min_queued_buffers = 1;
q->dev = isc->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index ac5fb3320..2b80d5400 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -82,10 +82,8 @@ static struct v4l2_mbus_framefmt *
__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
- struct imx_ic_priv *ic_priv = priv->ic_priv;
-
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &priv->format_mbus;
}
@@ -395,11 +393,19 @@ out:
return ret;
}
-static int prp_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int prp_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct prp_priv *priv = sd_to_priv(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad >= PRP_NUM_PADS)
return -EINVAL;
@@ -410,11 +416,19 @@ static int prp_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int prp_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int prp_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct prp_priv *priv = sd_to_priv(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad >= PRP_NUM_PADS)
return -EINVAL;
@@ -450,16 +464,15 @@ static int prp_registered(struct v4l2_subdev *sd)
}
static const struct v4l2_subdev_pad_ops prp_pad_ops = {
- .init_cfg = imx_media_init_cfg,
.enum_mbus_code = prp_enum_mbus_code,
.get_fmt = prp_get_fmt,
.set_fmt = prp_set_fmt,
+ .get_frame_interval = prp_get_frame_interval,
+ .set_frame_interval = prp_set_frame_interval,
.link_validate = prp_link_validate,
};
static const struct v4l2_subdev_video_ops prp_video_ops = {
- .g_frame_interval = prp_g_frame_interval,
- .s_frame_interval = prp_s_frame_interval,
.s_stream = prp_s_stream,
};
@@ -474,6 +487,7 @@ static const struct v4l2_subdev_ops prp_subdev_ops = {
};
static const struct v4l2_subdev_internal_ops prp_internal_ops = {
+ .init_state = imx_media_init_state,
.registered = prp_registered,
};
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 9b81cfbcd..17fd980c9 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -790,10 +790,8 @@ static struct v4l2_mbus_framefmt *
__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
- struct imx_ic_priv *ic_priv = priv->ic_priv;
-
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &priv->format_mbus[pad];
}
@@ -1205,11 +1203,19 @@ out:
return ret;
}
-static int prp_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int prp_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct prp_priv *priv = sd_to_priv(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
@@ -1220,11 +1226,19 @@ static int prp_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int prp_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int prp_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct prp_priv *priv = sd_to_priv(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
@@ -1298,16 +1312,15 @@ static void prp_unregistered(struct v4l2_subdev *sd)
}
static const struct v4l2_subdev_pad_ops prp_pad_ops = {
- .init_cfg = imx_media_init_cfg,
.enum_mbus_code = prp_enum_mbus_code,
.enum_frame_size = prp_enum_frame_size,
.get_fmt = prp_get_fmt,
.set_fmt = prp_set_fmt,
+ .get_frame_interval = prp_get_frame_interval,
+ .set_frame_interval = prp_set_frame_interval,
};
static const struct v4l2_subdev_video_ops prp_video_ops = {
- .g_frame_interval = prp_g_frame_interval,
- .s_frame_interval = prp_s_frame_interval,
.s_stream = prp_s_stream,
};
@@ -1322,6 +1335,7 @@ static const struct v4l2_subdev_ops prp_subdev_ops = {
};
static const struct v4l2_subdev_internal_ops prp_internal_ops = {
+ .init_state = imx_media_init_state,
.registered = prp_registered,
.unregistered = prp_unregistered,
};
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 484607831..efa7623b5 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -511,7 +511,8 @@ static int capture_legacy_g_parm(struct file *file, void *fh,
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- ret = v4l2_subdev_call(priv->src_sd, video, g_frame_interval, &fi);
+ ret = v4l2_subdev_call_state_active(priv->src_sd, pad,
+ get_frame_interval, &fi);
if (ret < 0)
return ret;
@@ -534,7 +535,8 @@ static int capture_legacy_s_parm(struct file *file, void *fh,
return -EINVAL;
fi.interval = a->parm.capture.timeperframe;
- ret = v4l2_subdev_call(priv->src_sd, video, s_frame_interval, &fi);
+ ret = v4l2_subdev_call_state_active(priv->src_sd, pad,
+ set_frame_interval, &fi);
if (ret < 0)
return ret;
@@ -605,6 +607,7 @@ static int capture_queue_setup(struct vb2_queue *vq,
{
struct capture_priv *priv = vb2_get_drv_priv(vq);
struct v4l2_pix_format *pix = &priv->vdev.fmt;
+ unsigned int q_num_bufs = vb2_get_num_buffers(vq);
unsigned int count = *nbuffers;
if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -613,14 +616,14 @@ static int capture_queue_setup(struct vb2_queue *vq,
if (*nplanes) {
if (*nplanes != 1 || sizes[0] < pix->sizeimage)
return -EINVAL;
- count += vq->num_buffers;
+ count += q_num_bufs;
}
count = min_t(__u32, VID_MEM_LIMIT / pix->sizeimage, count);
if (*nplanes)
- *nbuffers = (count < vq->num_buffers) ? 0 :
- count - vq->num_buffers;
+ *nbuffers = (count < q_num_bufs) ? 0 :
+ count - q_num_bufs;
else
*nbuffers = count;
@@ -1021,7 +1024,7 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
vq->mem_ops = &vb2_dma_contig_memops;
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vq->lock = &priv->mutex;
- vq->min_buffers_needed = 2;
+ vq->min_queued_buffers = 2;
vq->dev = priv->dev;
ret = vb2_queue_init(vq);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index dda1ebc34..785aac881 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -902,11 +902,19 @@ static const struct csi_skip_desc *csi_find_best_skip(struct v4l2_fract *in,
* V4L2 subdev operations.
*/
-static int csi_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int csi_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad >= CSI_NUM_PADS)
return -EINVAL;
@@ -919,13 +927,21 @@ static int csi_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int csi_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int csi_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_fract *input_fi;
int ret = 0;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&priv->lock);
input_fi = &priv->frame_interval[CSI_SINK_PAD];
@@ -1148,7 +1164,7 @@ __csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &priv->format_mbus[pad];
}
@@ -1158,8 +1174,7 @@ __csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&priv->sd, sd_state,
- CSI_SINK_PAD);
+ return v4l2_subdev_state_get_crop(sd_state, CSI_SINK_PAD);
else
return &priv->crop;
}
@@ -1169,8 +1184,7 @@ __csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_compose(&priv->sd, sd_state,
- CSI_SINK_PAD);
+ return v4l2_subdev_state_get_compose(sd_state, CSI_SINK_PAD);
else
return &priv->compose;
}
@@ -1862,13 +1876,10 @@ static const struct v4l2_subdev_core_ops csi_core_ops = {
};
static const struct v4l2_subdev_video_ops csi_video_ops = {
- .g_frame_interval = csi_g_frame_interval,
- .s_frame_interval = csi_s_frame_interval,
.s_stream = csi_s_stream,
};
static const struct v4l2_subdev_pad_ops csi_pad_ops = {
- .init_cfg = imx_media_init_cfg,
.enum_mbus_code = csi_enum_mbus_code,
.enum_frame_size = csi_enum_frame_size,
.enum_frame_interval = csi_enum_frame_interval,
@@ -1876,6 +1887,8 @@ static const struct v4l2_subdev_pad_ops csi_pad_ops = {
.set_fmt = csi_set_fmt,
.get_selection = csi_get_selection,
.set_selection = csi_set_selection,
+ .get_frame_interval = csi_get_frame_interval,
+ .set_frame_interval = csi_set_frame_interval,
.link_validate = csi_link_validate,
};
@@ -1886,6 +1899,7 @@ static const struct v4l2_subdev_ops csi_subdev_ops = {
};
static const struct v4l2_subdev_internal_ops csi_internal_ops = {
+ .init_state = imx_media_init_state,
.registered = csi_registered,
.unregistered = csi_unregistered,
};
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 064dc562b..1b5af8945 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -426,10 +426,10 @@ EXPORT_SYMBOL_GPL(imx_media_init_mbus_fmt);
/*
* Initializes the TRY format to the ACTIVE format on all pads
- * of a subdev. Can be used as the .init_cfg pad operation.
+ * of a subdev. Can be used as the .init_state internal operation.
*/
-int imx_media_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+int imx_media_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mf_try;
unsigned int pad;
@@ -445,13 +445,13 @@ int imx_media_init_cfg(struct v4l2_subdev *sd,
if (ret)
continue;
- mf_try = v4l2_subdev_get_try_format(sd, sd_state, pad);
+ mf_try = v4l2_subdev_state_get_format(sd_state, pad);
*mf_try = format.format;
}
return 0;
}
-EXPORT_SYMBOL_GPL(imx_media_init_cfg);
+EXPORT_SYMBOL_GPL(imx_media_init_state);
/*
* Default the colorspace in tryfmt to SRGB if set to an unsupported
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 3c2093c52..09da4103a 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -536,7 +536,7 @@ __vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &priv->format_mbus[pad];
}
@@ -780,11 +780,19 @@ static int vdic_link_validate(struct v4l2_subdev *sd,
return ret;
}
-static int vdic_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int vdic_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
if (fi->pad >= VDIC_NUM_PADS)
return -EINVAL;
@@ -797,13 +805,21 @@ static int vdic_g_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static int vdic_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *fi)
+static int vdic_set_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *fi)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_fract *input_fi, *output_fi;
int ret = 0;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (fi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
mutex_lock(&priv->lock);
input_fi = &priv->frame_interval[priv->active_input_pad];
@@ -882,16 +898,15 @@ static void vdic_unregistered(struct v4l2_subdev *sd)
}
static const struct v4l2_subdev_pad_ops vdic_pad_ops = {
- .init_cfg = imx_media_init_cfg,
.enum_mbus_code = vdic_enum_mbus_code,
.get_fmt = vdic_get_fmt,
.set_fmt = vdic_set_fmt,
+ .get_frame_interval = vdic_get_frame_interval,
+ .set_frame_interval = vdic_set_frame_interval,
.link_validate = vdic_link_validate,
};
static const struct v4l2_subdev_video_ops vdic_video_ops = {
- .g_frame_interval = vdic_g_frame_interval,
- .s_frame_interval = vdic_s_frame_interval,
.s_stream = vdic_s_stream,
};
@@ -906,6 +921,7 @@ static const struct v4l2_subdev_ops vdic_subdev_ops = {
};
static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
+ .init_state = imx_media_init_state,
.registered = vdic_registered,
.unregistered = vdic_unregistered,
};
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index 2640cd34d..f095d9134 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -192,8 +192,8 @@ static inline int imx_media_enum_ipu_formats(u32 *code, u32 index,
int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
u32 width, u32 height, u32 code, u32 field,
const struct imx_media_pixfmt **cc);
-int imx_media_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state);
+int imx_media_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state);
void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
bool ic_route);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index b2d8476d8..0d8b42061 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -501,7 +501,7 @@ __csi2_get_fmt(struct csi2_dev *csi2, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->sd, sd_state, pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
else
return &csi2->format_mbus;
}
@@ -619,7 +619,6 @@ static const struct v4l2_subdev_video_ops csi2_video_ops = {
};
static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
- .init_cfg = imx_media_init_cfg,
.get_fmt = csi2_get_fmt,
.set_fmt = csi2_set_fmt,
};
@@ -631,6 +630,7 @@ static const struct v4l2_subdev_ops csi2_subdev_ops = {
};
static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+ .init_state = imx_media_init_state,
.registered = csi2_registered,
};
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 55cc44a40..3df58eb3e 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -36,7 +36,7 @@ static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
/* Initialize try_fmt */
for (i = 0; i < IMGU_NODE_NUM; i++) {
struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, i);
+ v4l2_subdev_state_get_format(fh->state, i);
try_fmt->width = try_crop.width;
try_fmt->height = try_crop.height;
@@ -44,8 +44,8 @@ static int imgu_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
try_fmt->field = V4L2_FIELD_NONE;
}
- *v4l2_subdev_get_try_crop(sd, fh->state, IMGU_NODE_IN) = try_crop;
- *v4l2_subdev_get_try_compose(sd, fh->state, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_state_get_crop(fh->state, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_state_get_compose(fh->state, IMGU_NODE_IN) = try_crop;
return 0;
}
@@ -136,7 +136,7 @@ static int imgu_subdev_get_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
fmt->format = imgu_pipe->nodes[pad].pad_fmt;
} else {
- mf = v4l2_subdev_get_try_format(sd, sd_state, pad);
+ mf = v4l2_subdev_state_get_format(sd_state, pad);
fmt->format = *mf;
}
@@ -161,7 +161,7 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd,
imgu_pipe = &imgu->imgu_pipe[pipe];
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
- mf = v4l2_subdev_get_try_format(sd, sd_state, pad);
+ mf = v4l2_subdev_state_get_format(sd_state, pad);
else
mf = &imgu_pipe->nodes[pad].pad_fmt;
@@ -194,7 +194,7 @@ imgu_subdev_get_crop(struct imgu_v4l2_subdev *sd,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&sd->subdev, sd_state, pad);
+ return v4l2_subdev_state_get_crop(sd_state, pad);
else
return &sd->rect.eff;
}
@@ -205,7 +205,7 @@ imgu_subdev_get_compose(struct imgu_v4l2_subdev *sd,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_compose(&sd->subdev, sd_state, pad);
+ return v4l2_subdev_state_get_compose(sd_state, pad);
else
return &sd->rect.bds;
}
@@ -1198,7 +1198,7 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
vbq->buf_struct_size = imgu->buf_struct_size;
vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
/* can streamon w/o buffers */
- vbq->min_buffers_needed = 0;
+ vbq->min_queued_buffers = 0;
vbq->drv_priv = imgu;
vbq->lock = &node->lock;
r = vb2_queue_init(vbq);
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 219185aaa..de3e0345a 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -167,23 +167,24 @@ static void process_num_buffers(struct vb2_queue *q,
bool is_reqbufs)
{
const struct amvdec_format *fmt_out = sess->fmt_out;
- unsigned int buffers_total = q->num_buffers + *num_buffers;
+ unsigned int q_num_bufs = vb2_get_num_buffers(q);
+ unsigned int buffers_total = q_num_bufs + *num_buffers;
u32 min_buf_capture = v4l2_ctrl_g_ctrl(sess->ctrl_min_buf_capture);
- if (q->num_buffers + *num_buffers < min_buf_capture)
- *num_buffers = min_buf_capture - q->num_buffers;
+ if (q_num_bufs + *num_buffers < min_buf_capture)
+ *num_buffers = min_buf_capture - q_num_bufs;
if (is_reqbufs && buffers_total < fmt_out->min_buffers)
- *num_buffers = fmt_out->min_buffers - q->num_buffers;
+ *num_buffers = fmt_out->min_buffers - q_num_bufs;
if (buffers_total > fmt_out->max_buffers)
- *num_buffers = fmt_out->max_buffers - q->num_buffers;
+ *num_buffers = fmt_out->max_buffers - q_num_bufs;
/* We need to program the complete CAPTURE buffer list
* in registers during start_streaming, and the firmwares
* are free to choose any of them to write frames to. As such,
* we need all of them to be queued into the driver
*/
- sess->num_dst_bufs = q->num_buffers + *num_buffers;
- q->min_buffers_needed = max(fmt_out->min_buffers, sess->num_dst_bufs);
+ sess->num_dst_bufs = q_num_bufs + *num_buffers;
+ q->min_queued_buffers = max(fmt_out->min_buffers, sess->num_dst_bufs);
}
static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
@@ -824,7 +825,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->drv_priv = sess;
src_vq->buf_struct_size = sizeof(struct dummy_buf);
- src_vq->min_buffers_needed = 1;
+ src_vq->min_queued_buffers = 1;
src_vq->dev = sess->core->dev;
src_vq->lock = &sess->lock;
ret = vb2_queue_init(src_vq);
@@ -838,7 +839,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->drv_priv = sess;
dst_vq->buf_struct_size = sizeof(struct dummy_buf);
- dst_vq->min_buffers_needed = 1;
+ dst_vq->min_queued_buffers = 1;
dst_vq->dev = sess->core->dev;
dst_vq->lock = &sess->lock;
return vb2_queue_init(dst_vq);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index d2844414d..0e6c5bd81 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -830,8 +830,7 @@ __csi2_get_format(struct iss_csi2_device *csi2,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &csi2->formats[pad];
}
@@ -891,7 +890,7 @@ csi2_try_format(struct iss_csi2_device *csi2,
/*
* csi2_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg : V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -965,7 +964,7 @@ static int csi2_enum_frame_size(struct v4l2_subdev *sd,
/*
* csi2_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
@@ -987,7 +986,7 @@ static int csi2_get_format(struct v4l2_subdev *sd,
/*
* csi2_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
index 23f707cb3..4a4eae290 100644
--- a/drivers/staging/media/omap4iss/iss_ipipe.c
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -180,8 +180,7 @@ __ipipe_get_format(struct iss_ipipe_device *ipipe,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipe->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &ipipe->formats[pad];
}
@@ -189,7 +188,7 @@ __ipipe_get_format(struct iss_ipipe_device *ipipe,
/*
* ipipe_try_format - Try video format on a pad
* @ipipe: ISS IPIPE device
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @pad: Pad number
* @fmt: Format
*/
@@ -240,7 +239,7 @@ ipipe_try_format(struct iss_ipipe_device *ipipe,
/*
* ipipe_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg : V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -304,7 +303,7 @@ static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
/*
* ipipe_get_format - Retrieve the video format on a pad
* @sd : ISP IPIPE V4L2 subdevice
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
@@ -328,7 +327,7 @@ static int ipipe_get_format(struct v4l2_subdev *sd,
/*
* ipipe_set_format - Set the video format on a pad
* @sd : ISP IPIPE V4L2 subdevice
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
index 5e7f25cd5..8fa99532d 100644
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -361,15 +361,14 @@ __ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&ipipeif->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &ipipeif->formats[pad];
}
/*
* ipipeif_try_format - Try video format on a pad
* @ipipeif: ISS IPIPEIF device
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @pad: Pad number
* @fmt: Format
*/
@@ -440,7 +439,7 @@ ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
/*
* ipipeif_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg : V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -512,7 +511,7 @@ static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
/*
* ipipeif_get_format - Retrieve the video format on a pad
* @sd : ISP IPIPEIF V4L2 subdevice
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
@@ -536,7 +535,7 @@ static int ipipeif_get_format(struct v4l2_subdev *sd,
/*
* ipipeif_set_format - Set the video format on a pad
* @sd : ISP IPIPEIF V4L2 subdevice
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
index a5f8f9f1a..58e698ef9 100644
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -420,15 +420,14 @@ __resizer_get_format(struct iss_resizer_device *resizer,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&resizer->subdev, sd_state,
- pad);
+ return v4l2_subdev_state_get_format(sd_state, pad);
return &resizer->formats[pad];
}
/*
* resizer_try_format - Try video format on a pad
* @resizer: ISS RESIZER device
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @pad: Pad number
* @fmt: Format
*/
@@ -489,7 +488,7 @@ resizer_try_format(struct iss_resizer_device *resizer,
/*
* resizer_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
@@ -572,7 +571,7 @@ static int resizer_enum_frame_size(struct v4l2_subdev *sd,
/*
* resizer_get_format - Retrieve the video format on a pad
* @sd : ISP RESIZER V4L2 subdevice
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
@@ -596,7 +595,7 @@ static int resizer_get_format(struct v4l2_subdev *sd,
/*
* resizer_set_format - Set the video format on a pad
* @sd : ISP RESIZER V4L2 subdevice
- * @cfg: V4L2 subdev pad config
+ * @sd_state: V4L2 subdev state
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
diff --git a/drivers/staging/media/rkvdec/Kconfig b/drivers/staging/media/rkvdec/Kconfig
index e963d60cc..5f3bdd848 100644
--- a/drivers/staging/media/rkvdec/Kconfig
+++ b/drivers/staging/media/rkvdec/Kconfig
@@ -4,7 +4,6 @@ config VIDEO_ROCKCHIP_VDEC
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on VIDEO_DEV
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
diff --git a/drivers/staging/media/starfive/Kconfig b/drivers/staging/media/starfive/Kconfig
new file mode 100644
index 000000000..34727cf56
--- /dev/null
+++ b/drivers/staging/media/starfive/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "StarFive media platform drivers"
+
+source "drivers/staging/media/starfive/camss/Kconfig"
diff --git a/drivers/staging/media/starfive/Makefile b/drivers/staging/media/starfive/Makefile
new file mode 100644
index 000000000..4639fa1bc
--- /dev/null
+++ b/drivers/staging/media/starfive/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += camss/
diff --git a/drivers/staging/media/starfive/camss/Kconfig b/drivers/staging/media/starfive/camss/Kconfig
new file mode 100644
index 000000000..9ea5708fe
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_STARFIVE_CAMSS
+ tristate "Starfive Camera Subsystem driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on HAS_DMA
+ depends on PM
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Enable this to support for the Starfive Camera subsystem
+ found on Starfive JH7110 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called starfive-camss.
diff --git a/drivers/staging/media/starfive/camss/Makefile b/drivers/staging/media/starfive/camss/Makefile
new file mode 100644
index 000000000..005790202
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for StarFive Camera Subsystem driver
+#
+
+starfive-camss-objs += \
+ stf-camss.o \
+ stf-capture.o \
+ stf-isp.o \
+ stf-isp-hw-ops.o \
+ stf-video.o
+
+obj-$(CONFIG_VIDEO_STARFIVE_CAMSS) += starfive-camss.o
diff --git a/drivers/staging/media/starfive/camss/TODO.txt b/drivers/staging/media/starfive/camss/TODO.txt
new file mode 100644
index 000000000..7d459f4f0
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/TODO.txt
@@ -0,0 +1,4 @@
+Unstaging requirements:
+- Add userspace support which demonstrates the ability to receive statistics and
+ adapt hardware modules configuration accordingly;
+- Add documentation for description of the statistics data structures;
diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c
new file mode 100644
index 000000000..a587f8601
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-camss.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_camss.c
+ *
+ * Starfive Camera Subsystem driver
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Jack Zhu <jack.zhu@starfivetech.com>
+ * Author: Changhuang Liang <changhuang.liang@starfivetech.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+
+#include "stf-camss.h"
+
+static const char * const stfcamss_clocks[] = {
+ "wrapper_clk_c",
+ "ispcore_2x",
+ "isp_axi",
+};
+
+static const char * const stfcamss_resets[] = {
+ "wrapper_p",
+ "wrapper_c",
+ "axiwr",
+ "isp_top_n",
+ "isp_top_axi",
+};
+
+static const struct stf_isr_data stf_isrs[] = {
+ {"wr_irq", stf_wr_irq_handler},
+ {"isp_irq", stf_isp_irq_handler},
+ {"line_irq", stf_line_irq_handler},
+};
+
+static int stfcamss_get_mem_res(struct stfcamss *stfcamss)
+{
+ struct platform_device *pdev = to_platform_device(stfcamss->dev);
+
+ stfcamss->syscon_base =
+ devm_platform_ioremap_resource_byname(pdev, "syscon");
+ if (IS_ERR(stfcamss->syscon_base))
+ return PTR_ERR(stfcamss->syscon_base);
+
+ stfcamss->isp_base = devm_platform_ioremap_resource_byname(pdev, "isp");
+ if (IS_ERR(stfcamss->isp_base))
+ return PTR_ERR(stfcamss->isp_base);
+
+ return 0;
+}
+
+/*
+ * stfcamss_of_parse_endpoint_node - Parse port endpoint node
+ * @dev: Device
+ * @node: Device node to be parsed
+ * @csd: Parsed data from port endpoint node
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static int stfcamss_of_parse_endpoint_node(struct stfcamss *stfcamss,
+ struct device_node *node,
+ struct stfcamss_async_subdev *csd)
+{
+ struct v4l2_fwnode_endpoint vep = { { 0 } };
+ int ret;
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
+ if (ret) {
+ dev_err(stfcamss->dev, "endpoint not defined at %pOF\n", node);
+ return ret;
+ }
+
+ csd->port = vep.base.port;
+
+ return 0;
+}
+
+/*
+ * stfcamss_of_parse_ports - Parse ports node
+ * @stfcamss: STFCAMSS device
+ *
+ * Return number of "port" nodes found in "ports" node
+ */
+static int stfcamss_of_parse_ports(struct stfcamss *stfcamss)
+{
+ struct device_node *node = NULL;
+ int ret, num_subdevs = 0;
+
+ for_each_endpoint_of_node(stfcamss->dev->of_node, node) {
+ struct stfcamss_async_subdev *csd;
+
+ if (!of_device_is_available(node))
+ continue;
+
+ csd = v4l2_async_nf_add_fwnode_remote(&stfcamss->notifier,
+ of_fwnode_handle(node),
+ struct stfcamss_async_subdev);
+ if (IS_ERR(csd)) {
+ ret = PTR_ERR(csd);
+ dev_err(stfcamss->dev, "failed to add async notifier\n");
+ goto err_cleanup;
+ }
+
+ ret = stfcamss_of_parse_endpoint_node(stfcamss, node, csd);
+ if (ret)
+ goto err_cleanup;
+
+ num_subdevs++;
+ }
+
+ return num_subdevs;
+
+err_cleanup:
+ of_node_put(node);
+ return ret;
+}
+
+static int stfcamss_register_devs(struct stfcamss *stfcamss)
+{
+ struct stf_capture *cap_yuv = &stfcamss->captures[STF_CAPTURE_YUV];
+ struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
+ int ret;
+
+ ret = stf_isp_register(isp_dev, &stfcamss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(stfcamss->dev,
+ "failed to register stf isp%d entity: %d\n", 0, ret);
+ return ret;
+ }
+
+ ret = stf_capture_register(stfcamss, &stfcamss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(stfcamss->dev,
+ "failed to register capture: %d\n", ret);
+ goto err_isp_unregister;
+ }
+
+ ret = media_create_pad_link(&isp_dev->subdev.entity, STF_ISP_PAD_SRC,
+ &cap_yuv->video.vdev.entity, 0, 0);
+ if (ret)
+ goto err_cap_unregister;
+
+ cap_yuv->video.source_subdev = &isp_dev->subdev;
+
+ return ret;
+
+err_cap_unregister:
+ stf_capture_unregister(stfcamss);
+err_isp_unregister:
+ stf_isp_unregister(&stfcamss->isp_dev);
+
+ return ret;
+}
+
+static void stfcamss_unregister_devs(struct stfcamss *stfcamss)
+{
+ stf_isp_unregister(&stfcamss->isp_dev);
+ stf_capture_unregister(stfcamss);
+}
+
+static int stfcamss_subdev_notifier_bound(struct v4l2_async_notifier *async,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct stfcamss *stfcamss =
+ container_of(async, struct stfcamss, notifier);
+ struct stfcamss_async_subdev *csd =
+ container_of(asc, struct stfcamss_async_subdev, asd);
+ enum stf_port_num port = csd->port;
+ struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
+ struct stf_capture *cap_raw = &stfcamss->captures[STF_CAPTURE_RAW];
+ struct media_pad *pad;
+ int ret;
+
+ if (port == STF_PORT_CSI2RX) {
+ pad = &isp_dev->pads[STF_ISP_PAD_SINK];
+ } else {
+ dev_err(stfcamss->dev, "not support port %d\n", port);
+ return -EPERM;
+ }
+
+ ret = v4l2_create_fwnode_links_to_pad(subdev, pad, 0);
+ if (ret)
+ return ret;
+
+ ret = media_create_pad_link(&subdev->entity, 1,
+ &cap_raw->video.vdev.entity, 0, 0);
+ if (ret)
+ return ret;
+
+ isp_dev->source_subdev = subdev;
+ cap_raw->video.source_subdev = subdev;
+
+ return 0;
+}
+
+static int stfcamss_subdev_notifier_complete(struct v4l2_async_notifier *ntf)
+{
+ struct stfcamss *stfcamss =
+ container_of(ntf, struct stfcamss, notifier);
+
+ return v4l2_device_register_subdev_nodes(&stfcamss->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations
+stfcamss_subdev_notifier_ops = {
+ .bound = stfcamss_subdev_notifier_bound,
+ .complete = stfcamss_subdev_notifier_complete,
+};
+
+static void stfcamss_mc_init(struct platform_device *pdev,
+ struct stfcamss *stfcamss)
+{
+ stfcamss->media_dev.dev = stfcamss->dev;
+ strscpy(stfcamss->media_dev.model, "Starfive Camera Subsystem",
+ sizeof(stfcamss->media_dev.model));
+ media_device_init(&stfcamss->media_dev);
+
+ stfcamss->v4l2_dev.mdev = &stfcamss->media_dev;
+}
+
+/*
+ * stfcamss_probe - Probe STFCAMSS platform device
+ * @pdev: Pointer to STFCAMSS platform device
+ *
+ * Return 0 on success or a negative error code on failure
+ */
+static int stfcamss_probe(struct platform_device *pdev)
+{
+ struct stfcamss *stfcamss;
+ struct device *dev = &pdev->dev;
+ int ret, num_subdevs;
+ unsigned int i;
+
+ stfcamss = devm_kzalloc(dev, sizeof(*stfcamss), GFP_KERNEL);
+ if (!stfcamss)
+ return -ENOMEM;
+
+ stfcamss->dev = dev;
+
+ for (i = 0; i < ARRAY_SIZE(stf_isrs); ++i) {
+ int irq;
+
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(stfcamss->dev, irq, stf_isrs[i].isr, 0,
+ stf_isrs[i].name, stfcamss);
+ if (ret) {
+ dev_err(dev, "request irq failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ stfcamss->nclks = ARRAY_SIZE(stfcamss->sys_clk);
+ for (i = 0; i < stfcamss->nclks; ++i)
+ stfcamss->sys_clk[i].id = stfcamss_clocks[i];
+ ret = devm_clk_bulk_get(dev, stfcamss->nclks, stfcamss->sys_clk);
+ if (ret) {
+ dev_err(dev, "Failed to get clk controls\n");
+ return ret;
+ }
+
+ stfcamss->nrsts = ARRAY_SIZE(stfcamss->sys_rst);
+ for (i = 0; i < stfcamss->nrsts; ++i)
+ stfcamss->sys_rst[i].id = stfcamss_resets[i];
+ ret = devm_reset_control_bulk_get_shared(dev, stfcamss->nrsts,
+ stfcamss->sys_rst);
+ if (ret) {
+ dev_err(dev, "Failed to get reset controls\n");
+ return ret;
+ }
+
+ ret = stfcamss_get_mem_res(stfcamss);
+ if (ret) {
+ dev_err(dev, "Could not map registers\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, stfcamss);
+
+ v4l2_async_nf_init(&stfcamss->notifier, &stfcamss->v4l2_dev);
+
+ num_subdevs = stfcamss_of_parse_ports(stfcamss);
+ if (num_subdevs < 0) {
+ ret = -ENODEV;
+ dev_err(dev, "Failed to get sub devices: %d\n", ret);
+ goto err_cleanup_notifier;
+ }
+
+ ret = stf_isp_init(stfcamss);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init isp: %d\n", ret);
+ goto err_cleanup_notifier;
+ }
+
+ stfcamss_mc_init(pdev, stfcamss);
+
+ ret = v4l2_device_register(stfcamss->dev, &stfcamss->v4l2_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
+ goto err_cleanup_media_device;
+ }
+
+ ret = media_device_register(&stfcamss->media_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register media device: %d\n", ret);
+ goto err_unregister_device;
+ }
+
+ ret = stfcamss_register_devs(stfcamss);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register subdevice: %d\n", ret);
+ goto err_unregister_media_dev;
+ }
+
+ pm_runtime_enable(dev);
+
+ stfcamss->notifier.ops = &stfcamss_subdev_notifier_ops;
+ ret = v4l2_async_nf_register(&stfcamss->notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register async subdev nodes: %d\n",
+ ret);
+ pm_runtime_disable(dev);
+ goto err_unregister_subdevs;
+ }
+
+ return 0;
+
+err_unregister_subdevs:
+ stfcamss_unregister_devs(stfcamss);
+err_unregister_media_dev:
+ media_device_unregister(&stfcamss->media_dev);
+err_unregister_device:
+ v4l2_device_unregister(&stfcamss->v4l2_dev);
+err_cleanup_media_device:
+ media_device_cleanup(&stfcamss->media_dev);
+err_cleanup_notifier:
+ v4l2_async_nf_cleanup(&stfcamss->notifier);
+ return ret;
+}
+
+/*
+ * stfcamss_remove - Remove STFCAMSS platform device
+ * @pdev: Pointer to STFCAMSS platform device
+ *
+ * Always returns 0.
+ */
+static int stfcamss_remove(struct platform_device *pdev)
+{
+ struct stfcamss *stfcamss = platform_get_drvdata(pdev);
+
+ stfcamss_unregister_devs(stfcamss);
+ v4l2_device_unregister(&stfcamss->v4l2_dev);
+ media_device_cleanup(&stfcamss->media_dev);
+ v4l2_async_nf_cleanup(&stfcamss->notifier);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id stfcamss_of_match[] = {
+ { .compatible = "starfive,jh7110-camss" },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, stfcamss_of_match);
+
+static int __maybe_unused stfcamss_runtime_suspend(struct device *dev)
+{
+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_bulk_assert(stfcamss->nrsts, stfcamss->sys_rst);
+ if (ret) {
+ dev_err(dev, "reset assert failed\n");
+ return ret;
+ }
+
+ clk_bulk_disable_unprepare(stfcamss->nclks, stfcamss->sys_clk);
+
+ return 0;
+}
+
+static int __maybe_unused stfcamss_runtime_resume(struct device *dev)
+{
+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(stfcamss->nclks, stfcamss->sys_clk);
+ if (ret) {
+ dev_err(dev, "clock prepare enable failed\n");
+ return ret;
+ }
+
+ ret = reset_control_bulk_deassert(stfcamss->nrsts, stfcamss->sys_rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ clk_bulk_disable_unprepare(stfcamss->nclks, stfcamss->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops stfcamss_pm_ops = {
+ SET_RUNTIME_PM_OPS(stfcamss_runtime_suspend,
+ stfcamss_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver stfcamss_driver = {
+ .probe = stfcamss_probe,
+ .remove = stfcamss_remove,
+ .driver = {
+ .name = "starfive-camss",
+ .pm = &stfcamss_pm_ops,
+ .of_match_table = stfcamss_of_match,
+ },
+};
+
+module_platform_driver(stfcamss_driver);
+
+MODULE_AUTHOR("Jack Zhu <jack.zhu@starfivetech.com>");
+MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>");
+MODULE_DESCRIPTION("StarFive Camera Subsystem driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/starfive/camss/stf-camss.h b/drivers/staging/media/starfive/camss/stf-camss.h
new file mode 100644
index 000000000..e2b0cfb43
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-camss.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * stf_camss.h
+ *
+ * Starfive Camera Subsystem driver
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#ifndef STF_CAMSS_H
+#define STF_CAMSS_H
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+
+#include "stf-isp.h"
+#include "stf-capture.h"
+
+enum stf_port_num {
+ STF_PORT_DVP = 0,
+ STF_PORT_CSI2RX
+};
+
+enum stf_clk {
+ STF_CLK_WRAPPER_CLK_C = 0,
+ STF_CLK_ISPCORE_2X,
+ STF_CLK_ISP_AXI,
+ STF_CLK_NUM
+};
+
+enum stf_rst {
+ STF_RST_WRAPPER_P = 0,
+ STF_RST_WRAPPER_C,
+ STF_RST_AXIWR,
+ STF_RST_ISP_TOP_N,
+ STF_RST_ISP_TOP_AXI,
+ STF_RST_NUM
+};
+
+struct stf_isr_data {
+ const char *name;
+ irqreturn_t (*isr)(int irq, void *priv);
+};
+
+struct stfcamss {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct media_pipeline pipe;
+ struct device *dev;
+ struct stf_isp_dev isp_dev;
+ struct stf_capture captures[STF_CAPTURE_NUM];
+ struct v4l2_async_notifier notifier;
+ void __iomem *syscon_base;
+ void __iomem *isp_base;
+ struct clk_bulk_data sys_clk[STF_CLK_NUM];
+ int nclks;
+ struct reset_control_bulk_data sys_rst[STF_RST_NUM];
+ int nrsts;
+};
+
+struct stfcamss_async_subdev {
+ struct v4l2_async_connection asd; /* must be first */
+ enum stf_port_num port;
+};
+
+static inline u32 stf_isp_reg_read(struct stfcamss *stfcamss, u32 reg)
+{
+ return ioread32(stfcamss->isp_base + reg);
+}
+
+static inline void stf_isp_reg_write(struct stfcamss *stfcamss,
+ u32 reg, u32 val)
+{
+ iowrite32(val, stfcamss->isp_base + reg);
+}
+
+static inline void stf_isp_reg_write_delay(struct stfcamss *stfcamss,
+ u32 reg, u32 val, u32 delay)
+{
+ iowrite32(val, stfcamss->isp_base + reg);
+ usleep_range(1000 * delay, 1000 * delay + 100);
+}
+
+static inline void stf_isp_reg_set_bit(struct stfcamss *stfcamss,
+ u32 reg, u32 mask, u32 val)
+{
+ u32 value;
+
+ value = ioread32(stfcamss->isp_base + reg) & ~mask;
+ val &= mask;
+ val |= value;
+ iowrite32(val, stfcamss->isp_base + reg);
+}
+
+static inline void stf_isp_reg_set(struct stfcamss *stfcamss, u32 reg, u32 mask)
+{
+ iowrite32(ioread32(stfcamss->isp_base + reg) | mask,
+ stfcamss->isp_base + reg);
+}
+
+static inline u32 stf_syscon_reg_read(struct stfcamss *stfcamss, u32 reg)
+{
+ return ioread32(stfcamss->syscon_base + reg);
+}
+
+static inline void stf_syscon_reg_write(struct stfcamss *stfcamss,
+ u32 reg, u32 val)
+{
+ iowrite32(val, stfcamss->syscon_base + reg);
+}
+
+static inline void stf_syscon_reg_set_bit(struct stfcamss *stfcamss,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(stfcamss->syscon_base + reg);
+ iowrite32(value | bit_mask, stfcamss->syscon_base + reg);
+}
+
+static inline void stf_syscon_reg_clear_bit(struct stfcamss *stfcamss,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(stfcamss->syscon_base + reg);
+ iowrite32(value & ~bit_mask, stfcamss->syscon_base + reg);
+}
+#endif /* STF_CAMSS_H */
diff --git a/drivers/staging/media/starfive/camss/stf-capture.c b/drivers/staging/media/starfive/camss/stf-capture.c
new file mode 100644
index 000000000..ec5169e7b
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-capture.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_capture.c
+ *
+ * StarFive Camera Subsystem - capture device
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#include "stf-camss.h"
+
+static const char * const stf_cap_names[] = {
+ "capture_raw",
+ "capture_yuv",
+};
+
+static const struct stfcamss_format_info stf_wr_fmts[] = {
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .planes = 1,
+ .vsub = { 1 },
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .planes = 1,
+ .vsub = { 1 },
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .planes = 1,
+ .vsub = { 1 },
+ .bpp = 16,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .planes = 1,
+ .vsub = { 1 },
+ .bpp = 16,
+ },
+};
+
+static const struct stfcamss_format_info stf_isp_fmts[] = {
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_1_5X8,
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .planes = 2,
+ .vsub = { 1, 2 },
+ .bpp = 8,
+ },
+};
+
+static inline struct stf_capture *to_stf_capture(struct stfcamss_video *video)
+{
+ return container_of(video, struct stf_capture, video);
+}
+
+static void stf_set_raw_addr(struct stfcamss *stfcamss, dma_addr_t addr)
+{
+ stf_syscon_reg_write(stfcamss, VIN_START_ADDR_O, (long)addr);
+ stf_syscon_reg_write(stfcamss, VIN_START_ADDR_N, (long)addr);
+}
+
+static void stf_set_yuv_addr(struct stfcamss *stfcamss,
+ dma_addr_t y_addr, dma_addr_t uv_addr)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_Y_PLANE_START_ADDR, y_addr);
+ stf_isp_reg_write(stfcamss, ISP_REG_UV_PLANE_START_ADDR, uv_addr);
+}
+
+static void stf_init_addrs(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *output = &cap->buffers;
+ dma_addr_t addr0, addr1;
+
+ output->active_buf = 0;
+
+ if (!output->buf[0])
+ return;
+
+ addr0 = output->buf[0]->addr[0];
+ addr1 = output->buf[0]->addr[1];
+
+ if (cap->type == STF_CAPTURE_RAW)
+ stf_set_raw_addr(video->stfcamss, addr0);
+ else if (cap->type == STF_CAPTURE_YUV)
+ stf_set_yuv_addr(video->stfcamss, addr0, addr1);
+}
+
+static struct stfcamss_buffer *stf_buf_get_pending(struct stf_v_buf *output)
+{
+ struct stfcamss_buffer *buffer = NULL;
+
+ if (!list_empty(&output->pending_bufs)) {
+ buffer = list_first_entry(&output->pending_bufs,
+ struct stfcamss_buffer,
+ queue);
+ list_del(&buffer->queue);
+ }
+
+ return buffer;
+}
+
+static void stf_cap_s_cfg(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *output = &cap->buffers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&output->lock, flags);
+
+ output->state = STF_OUTPUT_IDLE;
+ output->buf[0] = stf_buf_get_pending(output);
+
+ if (!output->buf[0] && output->buf[1]) {
+ output->buf[0] = output->buf[1];
+ output->buf[1] = NULL;
+ }
+
+ if (output->buf[0])
+ output->state = STF_OUTPUT_SINGLE;
+
+ output->sequence = 0;
+ stf_init_addrs(video);
+
+ spin_unlock_irqrestore(&output->lock, flags);
+}
+
+static int stf_cap_s_cleanup(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *output = &cap->buffers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&output->lock, flags);
+
+ output->state = STF_OUTPUT_OFF;
+
+ spin_unlock_irqrestore(&output->lock, flags);
+
+ return 0;
+}
+
+static void stf_wr_data_en(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+
+ stf_syscon_reg_set_bit(stfcamss, VIN_CHANNEL_SEL_EN, U0_VIN_AXIWR0_EN);
+}
+
+static void stf_wr_irq_enable(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+
+ stf_syscon_reg_clear_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_M);
+}
+
+static void stf_wr_irq_disable(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+
+ stf_syscon_reg_set_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_CLEAN);
+ stf_syscon_reg_clear_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_CLEAN);
+ stf_syscon_reg_set_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_M);
+}
+
+static void stf_channel_set(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+ u32 val;
+
+ if (cap->type == STF_CAPTURE_RAW) {
+ val = stf_syscon_reg_read(stfcamss, VIN_CHANNEL_SEL_EN);
+ val &= ~U0_VIN_CHANNEL_SEL_MASK;
+ val |= CHANNEL(0);
+ stf_syscon_reg_write(stfcamss, VIN_CHANNEL_SEL_EN, val);
+
+ val = stf_syscon_reg_read(stfcamss, VIN_INRT_PIX_CFG);
+ val &= ~U0_VIN_PIX_CT_MASK;
+ val |= PIX_CT(1);
+
+ val &= ~U0_VIN_PIXEL_HEIGH_BIT_SEL_MAKS;
+ val |= PIXEL_HEIGH_BIT_SEL(0);
+
+ val &= ~U0_VIN_PIX_CNT_END_MASK;
+ val |= PIX_CNT_END(IMAGE_MAX_WIDTH / 4 - 1);
+
+ stf_syscon_reg_write(stfcamss, VIN_INRT_PIX_CFG, val);
+ } else if (cap->type == STF_CAPTURE_YUV) {
+ val = stf_syscon_reg_read(stfcamss, VIN_CFG_REG);
+ val &= ~U0_VIN_MIPI_BYTE_EN_ISP0_MASK;
+ val |= U0_VIN_MIPI_BYTE_EN_ISP0(0);
+
+ val &= ~U0_VIN_MIPI_CHANNEL_SEL0_MASK;
+ val |= U0_VIN_MIPI_CHANNEL_SEL0(0);
+
+ val &= ~U0_VIN_PIX_NUM_MASK;
+ val |= U0_VIN_PIX_NUM(0);
+
+ val &= ~U0_VIN_P_I_MIPI_HAEDER_EN0_MASK;
+ val |= U0_VIN_P_I_MIPI_HAEDER_EN0(1);
+
+ stf_syscon_reg_write(stfcamss, VIN_CFG_REG, val);
+ }
+}
+
+static void stf_capture_start(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+
+ stf_channel_set(video);
+ if (cap->type == STF_CAPTURE_RAW) {
+ stf_wr_irq_enable(video);
+ stf_wr_data_en(video);
+ }
+
+ stf_cap_s_cfg(video);
+}
+
+static void stf_capture_stop(struct stfcamss_video *video)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+
+ if (cap->type == STF_CAPTURE_RAW)
+ stf_wr_irq_disable(video);
+
+ stf_cap_s_cleanup(video);
+}
+
+static void stf_capture_init(struct stfcamss *stfcamss, struct stf_capture *cap)
+{
+ cap->buffers.state = STF_OUTPUT_OFF;
+ cap->buffers.buf[0] = NULL;
+ cap->buffers.buf[1] = NULL;
+ cap->buffers.active_buf = 0;
+ atomic_set(&cap->buffers.frame_skip, 4);
+ INIT_LIST_HEAD(&cap->buffers.pending_bufs);
+ INIT_LIST_HEAD(&cap->buffers.ready_bufs);
+ spin_lock_init(&cap->buffers.lock);
+
+ cap->video.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ cap->video.stfcamss = stfcamss;
+ cap->video.bpl_alignment = 16 * 8;
+
+ if (cap->type == STF_CAPTURE_RAW) {
+ cap->video.formats = stf_wr_fmts;
+ cap->video.nformats = ARRAY_SIZE(stf_wr_fmts);
+ cap->video.bpl_alignment = 8;
+ } else if (cap->type == STF_CAPTURE_YUV) {
+ cap->video.formats = stf_isp_fmts;
+ cap->video.nformats = ARRAY_SIZE(stf_isp_fmts);
+ cap->video.bpl_alignment = 1;
+ }
+}
+
+static void stf_buf_add_ready(struct stf_v_buf *output,
+ struct stfcamss_buffer *buffer)
+{
+ INIT_LIST_HEAD(&buffer->queue);
+ list_add_tail(&buffer->queue, &output->ready_bufs);
+}
+
+static struct stfcamss_buffer *stf_buf_get_ready(struct stf_v_buf *output)
+{
+ struct stfcamss_buffer *buffer = NULL;
+
+ if (!list_empty(&output->ready_bufs)) {
+ buffer = list_first_entry(&output->ready_bufs,
+ struct stfcamss_buffer,
+ queue);
+ list_del(&buffer->queue);
+ }
+
+ return buffer;
+}
+
+static void stf_buf_add_pending(struct stf_v_buf *output,
+ struct stfcamss_buffer *buffer)
+{
+ INIT_LIST_HEAD(&buffer->queue);
+ list_add_tail(&buffer->queue, &output->pending_bufs);
+}
+
+static void stf_buf_update_on_last(struct stf_v_buf *output)
+{
+ switch (output->state) {
+ case STF_OUTPUT_CONTINUOUS:
+ output->state = STF_OUTPUT_SINGLE;
+ output->active_buf = !output->active_buf;
+ break;
+ case STF_OUTPUT_SINGLE:
+ output->state = STF_OUTPUT_STOPPING;
+ break;
+ default:
+ break;
+ }
+}
+
+static void stf_buf_update_on_next(struct stf_v_buf *output)
+{
+ switch (output->state) {
+ case STF_OUTPUT_CONTINUOUS:
+ output->active_buf = !output->active_buf;
+ break;
+ case STF_OUTPUT_SINGLE:
+ default:
+ break;
+ }
+}
+
+static void stf_buf_update_on_new(struct stfcamss_video *video,
+ struct stfcamss_buffer *new_buf)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *output = &cap->buffers;
+
+ switch (output->state) {
+ case STF_OUTPUT_SINGLE:
+ stf_buf_add_pending(output, new_buf);
+ break;
+ case STF_OUTPUT_IDLE:
+ if (!output->buf[0]) {
+ output->buf[0] = new_buf;
+ stf_init_addrs(video);
+ output->state = STF_OUTPUT_SINGLE;
+ } else {
+ stf_buf_add_pending(output, new_buf);
+ }
+ break;
+ case STF_OUTPUT_STOPPING:
+ if (output->last_buffer) {
+ output->buf[output->active_buf] = output->last_buffer;
+ output->last_buffer = NULL;
+ }
+
+ output->state = STF_OUTPUT_SINGLE;
+ stf_buf_add_pending(output, new_buf);
+ break;
+ case STF_OUTPUT_CONTINUOUS:
+ default:
+ stf_buf_add_pending(output, new_buf);
+ break;
+ }
+}
+
+static void stf_buf_flush(struct stf_v_buf *output, enum vb2_buffer_state state)
+{
+ struct stfcamss_buffer *buf;
+ struct stfcamss_buffer *t;
+
+ list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ list_for_each_entry_safe(buf, t, &output->ready_bufs, queue) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+}
+
+static void stf_buf_done(struct stf_v_buf *output)
+{
+ struct stfcamss_buffer *ready_buf;
+ u64 ts = ktime_get_ns();
+ unsigned long flags;
+
+ if (output->state == STF_OUTPUT_OFF ||
+ output->state == STF_OUTPUT_RESERVED)
+ return;
+
+ spin_lock_irqsave(&output->lock, flags);
+
+ while ((ready_buf = stf_buf_get_ready(output))) {
+ ready_buf->vb.vb2_buf.timestamp = ts;
+ ready_buf->vb.sequence = output->sequence++;
+
+ vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ spin_unlock_irqrestore(&output->lock, flags);
+}
+
+static void stf_change_buffer(struct stf_v_buf *output)
+{
+ struct stf_capture *cap = container_of(output, struct stf_capture,
+ buffers);
+ struct stfcamss *stfcamss = cap->video.stfcamss;
+ struct stfcamss_buffer *ready_buf;
+ dma_addr_t *new_addr;
+ unsigned long flags;
+ u32 active_index;
+
+ if (output->state == STF_OUTPUT_OFF ||
+ output->state == STF_OUTPUT_STOPPING ||
+ output->state == STF_OUTPUT_RESERVED ||
+ output->state == STF_OUTPUT_IDLE)
+ return;
+
+ spin_lock_irqsave(&output->lock, flags);
+
+ active_index = output->active_buf;
+
+ ready_buf = output->buf[active_index];
+ if (!ready_buf) {
+ dev_dbg(stfcamss->dev, "missing ready buf %d %d.\n",
+ active_index, output->state);
+ active_index = !active_index;
+ ready_buf = output->buf[active_index];
+ if (!ready_buf) {
+ dev_dbg(stfcamss->dev,
+ "missing ready buf2 %d %d.\n",
+ active_index, output->state);
+ goto out_unlock;
+ }
+ }
+
+ /* Get next buffer */
+ output->buf[active_index] = stf_buf_get_pending(output);
+ if (!output->buf[active_index]) {
+ new_addr = ready_buf->addr;
+ stf_buf_update_on_last(output);
+ } else {
+ new_addr = output->buf[active_index]->addr;
+ stf_buf_update_on_next(output);
+ }
+
+ if (output->state == STF_OUTPUT_STOPPING) {
+ output->last_buffer = ready_buf;
+ } else {
+ if (cap->type == STF_CAPTURE_RAW)
+ stf_set_raw_addr(stfcamss, new_addr[0]);
+ else if (cap->type == STF_CAPTURE_YUV)
+ stf_set_yuv_addr(stfcamss, new_addr[0], new_addr[1]);
+
+ stf_buf_add_ready(output, ready_buf);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&output->lock, flags);
+}
+
+irqreturn_t stf_wr_irq_handler(int irq, void *priv)
+{
+ struct stfcamss *stfcamss = priv;
+ struct stf_capture *cap = &stfcamss->captures[STF_CAPTURE_RAW];
+
+ if (atomic_dec_if_positive(&cap->buffers.frame_skip) < 0) {
+ stf_change_buffer(&cap->buffers);
+ stf_buf_done(&cap->buffers);
+ }
+
+ stf_syscon_reg_set_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_CLEAN);
+ stf_syscon_reg_clear_bit(stfcamss, VIN_INRT_PIX_CFG, U0_VIN_INTR_CLEAN);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t stf_isp_irq_handler(int irq, void *priv)
+{
+ struct stfcamss *stfcamss = priv;
+ struct stf_capture *cap = &stfcamss->captures[STF_CAPTURE_YUV];
+ u32 status;
+
+ status = stf_isp_reg_read(stfcamss, ISP_REG_ISP_CTRL_0);
+ if (status & ISPC_ISP) {
+ if (status & ISPC_ENUO)
+ stf_buf_done(&cap->buffers);
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ISP_CTRL_0,
+ (status & ~ISPC_INT_ALL_MASK) |
+ ISPC_ISP | ISPC_CSI | ISPC_SC);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t stf_line_irq_handler(int irq, void *priv)
+{
+ struct stfcamss *stfcamss = priv;
+ struct stf_capture *cap = &stfcamss->captures[STF_CAPTURE_YUV];
+ u32 status;
+
+ status = stf_isp_reg_read(stfcamss, ISP_REG_ISP_CTRL_0);
+ if (status & ISPC_LINE) {
+ if (atomic_dec_if_positive(&cap->buffers.frame_skip) < 0) {
+ if ((status & ISPC_ENUO))
+ stf_change_buffer(&cap->buffers);
+ }
+
+ stf_isp_reg_set_bit(stfcamss, ISP_REG_CSIINTS,
+ CSI_INTS_MASK, CSI_INTS(0x3));
+ stf_isp_reg_set_bit(stfcamss, ISP_REG_IESHD,
+ SHAD_UP_M | SHAD_UP_EN, 0x3);
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ISP_CTRL_0,
+ (status & ~ISPC_INT_ALL_MASK) | ISPC_LINE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int stf_queue_buffer(struct stfcamss_video *video,
+ struct stfcamss_buffer *buf)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *v_bufs = &cap->buffers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&v_bufs->lock, flags);
+ stf_buf_update_on_new(video, buf);
+ spin_unlock_irqrestore(&v_bufs->lock, flags);
+
+ return 0;
+}
+
+static int stf_flush_buffers(struct stfcamss_video *video,
+ enum vb2_buffer_state state)
+{
+ struct stf_capture *cap = to_stf_capture(video);
+ struct stf_v_buf *v_bufs = &cap->buffers;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&v_bufs->lock, flags);
+
+ stf_buf_flush(v_bufs, state);
+
+ for (i = 0; i < ARRAY_SIZE(v_bufs->buf); i++) {
+ if (v_bufs->buf[i])
+ vb2_buffer_done(&v_bufs->buf[i]->vb.vb2_buf, state);
+
+ v_bufs->buf[i] = NULL;
+ }
+
+ if (v_bufs->last_buffer) {
+ vb2_buffer_done(&v_bufs->last_buffer->vb.vb2_buf, state);
+ v_bufs->last_buffer = NULL;
+ }
+
+ spin_unlock_irqrestore(&v_bufs->lock, flags);
+ return 0;
+}
+
+static const struct stfcamss_video_ops stf_capture_ops = {
+ .queue_buffer = stf_queue_buffer,
+ .flush_buffers = stf_flush_buffers,
+ .start_streaming = stf_capture_start,
+ .stop_streaming = stf_capture_stop,
+};
+
+static void stf_capture_unregister_one(struct stf_capture *cap)
+{
+ if (!video_is_registered(&cap->video.vdev))
+ return;
+
+ media_entity_cleanup(&cap->video.vdev.entity);
+ vb2_video_unregister_device(&cap->video.vdev);
+}
+
+void stf_capture_unregister(struct stfcamss *stfcamss)
+{
+ struct stf_capture *cap_raw = &stfcamss->captures[STF_CAPTURE_RAW];
+ struct stf_capture *cap_yuv = &stfcamss->captures[STF_CAPTURE_YUV];
+
+ stf_capture_unregister_one(cap_raw);
+ stf_capture_unregister_one(cap_yuv);
+}
+
+int stf_capture_register(struct stfcamss *stfcamss,
+ struct v4l2_device *v4l2_dev)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(stfcamss->captures); i++) {
+ struct stf_capture *capture = &stfcamss->captures[i];
+
+ capture->type = i;
+ capture->video.ops = &stf_capture_ops;
+ stf_capture_init(stfcamss, capture);
+
+ ret = stf_video_register(&capture->video, v4l2_dev,
+ stf_cap_names[i]);
+ if (ret < 0) {
+ dev_err(stfcamss->dev,
+ "Failed to register video node: %d\n", ret);
+ stf_capture_unregister(stfcamss);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/starfive/camss/stf-capture.h b/drivers/staging/media/starfive/camss/stf-capture.h
new file mode 100644
index 000000000..2f9740b7e
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-capture.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * stf_capture.h
+ *
+ * Starfive Camera Subsystem driver
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#ifndef STF_CAPTURE_H
+#define STF_CAPTURE_H
+
+#include "stf-video.h"
+
+#define VIN_CHANNEL_SEL_EN 0x14
+#define VIN_START_ADDR_N 0x18
+#define VIN_INRT_PIX_CFG 0x1c
+#define VIN_START_ADDR_O 0x20
+#define VIN_CFG_REG 0x24
+
+#define U0_VIN_CNFG_AXI_DVP_EN BIT(2)
+
+#define U0_VIN_CHANNEL_SEL_MASK GENMASK(3, 0)
+#define U0_VIN_AXIWR0_EN BIT(4)
+#define CHANNEL(x) ((x) << 0)
+
+#define U0_VIN_INTR_CLEAN BIT(0)
+#define U0_VIN_INTR_M BIT(1)
+#define U0_VIN_PIX_CNT_END_MASK GENMASK(12, 2)
+#define U0_VIN_PIX_CT_MASK GENMASK(14, 13)
+#define U0_VIN_PIXEL_HEIGH_BIT_SEL_MAKS GENMASK(16, 15)
+
+#define PIX_CNT_END(x) ((x) << 2)
+#define PIX_CT(x) ((x) << 13)
+#define PIXEL_HEIGH_BIT_SEL(x) ((x) << 15)
+
+#define U0_VIN_CNFG_DVP_HS_POS BIT(1)
+#define U0_VIN_CNFG_DVP_SWAP_EN BIT(2)
+#define U0_VIN_CNFG_DVP_VS_POS BIT(3)
+#define U0_VIN_CNFG_GEN_EN_AXIRD BIT(4)
+#define U0_VIN_CNFG_ISP_DVP_EN0 BIT(5)
+#define U0_VIN_MIPI_BYTE_EN_ISP0(n) ((n) << 6)
+#define U0_VIN_MIPI_CHANNEL_SEL0(n) ((n) << 8)
+#define U0_VIN_P_I_MIPI_HAEDER_EN0(n) ((n) << 12)
+#define U0_VIN_PIX_NUM(n) ((n) << 13)
+#define U0_VIN_MIPI_BYTE_EN_ISP0_MASK GENMASK(7, 6)
+#define U0_VIN_MIPI_CHANNEL_SEL0_MASK GENMASK(11, 8)
+#define U0_VIN_P_I_MIPI_HAEDER_EN0_MASK BIT(12)
+#define U0_VIN_PIX_NUM_MASK GENMASK(16, 13)
+
+enum stf_v_state {
+ STF_OUTPUT_OFF,
+ STF_OUTPUT_RESERVED,
+ STF_OUTPUT_SINGLE,
+ STF_OUTPUT_CONTINUOUS,
+ STF_OUTPUT_IDLE,
+ STF_OUTPUT_STOPPING
+};
+
+struct stf_v_buf {
+ int active_buf;
+ struct stfcamss_buffer *buf[2];
+ struct stfcamss_buffer *last_buffer;
+ struct list_head pending_bufs;
+ struct list_head ready_bufs;
+ enum stf_v_state state;
+ unsigned int sequence;
+ /* protects the above member variables */
+ spinlock_t lock;
+ atomic_t frame_skip;
+};
+
+struct stf_capture {
+ struct stfcamss_video video;
+ struct stf_v_buf buffers;
+ enum stf_capture_type type;
+};
+
+irqreturn_t stf_wr_irq_handler(int irq, void *priv);
+irqreturn_t stf_isp_irq_handler(int irq, void *priv);
+irqreturn_t stf_line_irq_handler(int irq, void *priv);
+int stf_capture_register(struct stfcamss *stfcamss,
+ struct v4l2_device *v4l2_dev);
+void stf_capture_unregister(struct stfcamss *stfcamss);
+
+#endif
diff --git a/drivers/staging/media/starfive/camss/stf-isp-hw-ops.c b/drivers/staging/media/starfive/camss/stf-isp-hw-ops.c
new file mode 100644
index 000000000..c34631ff9
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-isp-hw-ops.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_isp_hw_ops.c
+ *
+ * Register interface file for StarFive ISP driver
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ *
+ */
+
+#include "stf-camss.h"
+
+static void stf_isp_config_obc(struct stfcamss *stfcamss)
+{
+ u32 reg_val, reg_add;
+
+ stf_isp_reg_write(stfcamss, ISP_REG_OBC_CFG, OBC_W_H(11) | OBC_W_W(11));
+
+ reg_val = GAIN_D_POINT(0x40) | GAIN_C_POINT(0x40) |
+ GAIN_B_POINT(0x40) | GAIN_A_POINT(0x40);
+ for (reg_add = ISP_REG_OBCG_CFG_0; reg_add <= ISP_REG_OBCG_CFG_3;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+
+ reg_val = OFFSET_D_POINT(0) | OFFSET_C_POINT(0) |
+ OFFSET_B_POINT(0) | OFFSET_A_POINT(0);
+ for (reg_add = ISP_REG_OBCO_CFG_0; reg_add <= ISP_REG_OBCO_CFG_3;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+}
+
+static void stf_isp_config_oecf(struct stfcamss *stfcamss)
+{
+ u32 reg_add, par_val;
+ u16 par_h, par_l;
+
+ par_h = 0x10; par_l = 0;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG0; reg_add <= ISP_REG_OECF_Y3_CFG0;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x40; par_l = 0x20;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG1; reg_add <= ISP_REG_OECF_Y3_CFG1;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x80; par_l = 0x60;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG2; reg_add <= ISP_REG_OECF_Y3_CFG2;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0xc0; par_l = 0xa0;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG3; reg_add <= ISP_REG_OECF_Y3_CFG3;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x100; par_l = 0xe0;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG4; reg_add <= ISP_REG_OECF_Y3_CFG4;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x200; par_l = 0x180;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG5; reg_add <= ISP_REG_OECF_Y3_CFG5;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x300; par_l = 0x280;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG6; reg_add <= ISP_REG_OECF_Y3_CFG6;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x3fe; par_l = 0x380;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_X0_CFG7; reg_add <= ISP_REG_OECF_Y3_CFG7;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 0x20;
+ }
+
+ par_h = 0x80; par_l = 0x80;
+ par_val = OCEF_PAR_H(par_h) | OCEF_PAR_L(par_l);
+ for (reg_add = ISP_REG_OECF_S0_CFG0; reg_add <= ISP_REG_OECF_S3_CFG7;) {
+ stf_isp_reg_write(stfcamss, reg_add, par_val);
+ reg_add += 4;
+ }
+}
+
+static void stf_isp_config_lccf(struct stfcamss *stfcamss)
+{
+ u32 reg_add;
+
+ stf_isp_reg_write(stfcamss, ISP_REG_LCCF_CFG_0,
+ Y_DISTANCE(0x21C) | X_DISTANCE(0x3C0));
+ stf_isp_reg_write(stfcamss, ISP_REG_LCCF_CFG_1, LCCF_MAX_DIS(0xb));
+
+ for (reg_add = ISP_REG_LCCF_CFG_2; reg_add <= ISP_REG_LCCF_CFG_5;) {
+ stf_isp_reg_write(stfcamss, reg_add,
+ LCCF_F2_PAR(0x0) | LCCF_F1_PAR(0x0));
+ reg_add += 4;
+ }
+}
+
+static void stf_isp_config_awb(struct stfcamss *stfcamss)
+{
+ u32 reg_val, reg_add;
+ u16 symbol_h, symbol_l;
+
+ symbol_h = 0x0; symbol_l = 0x0;
+ reg_val = AWB_X_SYMBOL_H(symbol_h) | AWB_X_SYMBOL_L(symbol_l);
+
+ for (reg_add = ISP_REG_AWB_X0_CFG_0; reg_add <= ISP_REG_AWB_X3_CFG_1;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+
+ symbol_h = 0x0, symbol_l = 0x0;
+ reg_val = AWB_Y_SYMBOL_H(symbol_h) | AWB_Y_SYMBOL_L(symbol_l);
+
+ for (reg_add = ISP_REG_AWB_Y0_CFG_0; reg_add <= ISP_REG_AWB_Y3_CFG_1;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+
+ symbol_h = 0x80, symbol_l = 0x80;
+ reg_val = AWB_S_SYMBOL_H(symbol_h) | AWB_S_SYMBOL_L(symbol_l);
+
+ for (reg_add = ISP_REG_AWB_S0_CFG_0; reg_add <= ISP_REG_AWB_S3_CFG_1;) {
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ }
+}
+
+static void stf_isp_config_grgb(struct stfcamss *stfcamss)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_ICTC,
+ GF_MODE(1) | MAXGT(0x140) | MINGT(0x40));
+ stf_isp_reg_write(stfcamss, ISP_REG_IDBC, BADGT(0x200) | BADXT(0x200));
+}
+
+static void stf_isp_config_cfa(struct stfcamss *stfcamss)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_RAW_FORMAT_CFG,
+ SMY13(0) | SMY12(1) | SMY11(0) | SMY10(1) | SMY3(2) |
+ SMY2(3) | SMY1(2) | SMY0(3));
+ stf_isp_reg_write(stfcamss, ISP_REG_ICFAM, CROSS_COV(3) | HV_W(2));
+}
+
+static void stf_isp_config_ccm(struct stfcamss *stfcamss)
+{
+ u32 reg_add;
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ICAMD_0, DNRM_F(6) | CCM_M_DAT(0));
+
+ for (reg_add = ISP_REG_ICAMD_12; reg_add <= ISP_REG_ICAMD_20;) {
+ stf_isp_reg_write(stfcamss, reg_add, CCM_M_DAT(0x80));
+ reg_add += 0x10;
+ }
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ICAMD_24, CCM_M_DAT(0x700));
+ stf_isp_reg_write(stfcamss, ISP_REG_ICAMD_25, CCM_M_DAT(0x200));
+}
+
+static void stf_isp_config_gamma(struct stfcamss *stfcamss)
+{
+ u32 reg_val, reg_add;
+ u16 gamma_slope_v, gamma_v;
+
+ gamma_slope_v = 0x2400; gamma_v = 0x0;
+ reg_val = GAMMA_S_VAL(gamma_slope_v) | GAMMA_VAL(gamma_v);
+ stf_isp_reg_write(stfcamss, ISP_REG_GAMMA_VAL0, reg_val);
+
+ gamma_slope_v = 0x800; gamma_v = 0x20;
+ for (reg_add = ISP_REG_GAMMA_VAL1; reg_add <= ISP_REG_GAMMA_VAL7;) {
+ reg_val = GAMMA_S_VAL(gamma_slope_v) | GAMMA_VAL(gamma_v);
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ gamma_v += 0x20;
+ }
+
+ gamma_v = 0x100;
+ for (reg_add = ISP_REG_GAMMA_VAL8; reg_add <= ISP_REG_GAMMA_VAL13;) {
+ reg_val = GAMMA_S_VAL(gamma_slope_v) | GAMMA_VAL(gamma_v);
+ stf_isp_reg_write(stfcamss, reg_add, reg_val);
+ reg_add += 4;
+ gamma_v += 0x80;
+ }
+
+ gamma_v = 0x3fe;
+ reg_val = GAMMA_S_VAL(gamma_slope_v) | GAMMA_VAL(gamma_v);
+ stf_isp_reg_write(stfcamss, ISP_REG_GAMMA_VAL14, reg_val);
+}
+
+static void stf_isp_config_r2y(struct stfcamss *stfcamss)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_0, 0x4C);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_1, 0x97);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_2, 0x1d);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_3, 0x1d5);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_4, 0x1ac);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_5, 0x80);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_6, 0x80);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_7, 0x194);
+ stf_isp_reg_write(stfcamss, ISP_REG_R2Y_8, 0x1ec);
+}
+
+static void stf_isp_config_y_curve(struct stfcamss *stfcamss)
+{
+ u32 reg_add;
+ u16 y_curve;
+
+ y_curve = 0x0;
+ for (reg_add = ISP_REG_YCURVE_0; reg_add <= ISP_REG_YCURVE_63;) {
+ stf_isp_reg_write(stfcamss, reg_add, y_curve);
+ reg_add += 4;
+ y_curve += 0x10;
+ }
+}
+
+static void stf_isp_config_sharpen(struct stfcamss *sc)
+{
+ u32 reg_add;
+
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN0, S_DELTA(0x7) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN1, S_DELTA(0x18) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN2, S_DELTA(0x80) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN3, S_DELTA(0x100) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN4, S_DELTA(0x10) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN5, S_DELTA(0x60) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN6, S_DELTA(0x100) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN7, S_DELTA(0x190) | S_WEIGHT(0xf));
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN8, S_DELTA(0x0) | S_WEIGHT(0xf));
+
+ for (reg_add = ISP_REG_SHARPEN9; reg_add <= ISP_REG_SHARPEN14;) {
+ stf_isp_reg_write(sc, reg_add, S_WEIGHT(0xf));
+ reg_add += 4;
+ }
+
+ for (reg_add = ISP_REG_SHARPEN_FS0; reg_add <= ISP_REG_SHARPEN_FS5;) {
+ stf_isp_reg_write(sc, reg_add, S_FACTOR(0x10) | S_SLOPE(0x0));
+ reg_add += 4;
+ }
+
+ stf_isp_reg_write(sc, ISP_REG_SHARPEN_WN,
+ PDIRF(0x8) | NDIRF(0x8) | WSUM(0xd7c));
+ stf_isp_reg_write(sc, ISP_REG_IUVS1, UVDIFF2(0xC0) | UVDIFF1(0x40));
+ stf_isp_reg_write(sc, ISP_REG_IUVS2, UVF(0xff) | UVSLOPE(0x0));
+ stf_isp_reg_write(sc, ISP_REG_IUVCKS1,
+ UVCKDIFF2(0xa0) | UVCKDIFF1(0x40));
+}
+
+static void stf_isp_config_dnyuv(struct stfcamss *stfcamss)
+{
+ u32 reg_val;
+
+ reg_val = YUVSW5(7) | YUVSW4(7) | YUVSW3(7) | YUVSW2(7) |
+ YUVSW1(7) | YUVSW0(7);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YSWR0, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CSWR0, reg_val);
+
+ reg_val = YUVSW3(7) | YUVSW2(7) | YUVSW1(7) | YUVSW0(7);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YSWR1, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CSWR1, reg_val);
+
+ reg_val = CURVE_D_H(0x60) | CURVE_D_L(0x40);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YDR0, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CDR0, reg_val);
+
+ reg_val = CURVE_D_H(0xd8) | CURVE_D_L(0x90);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YDR1, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CDR1, reg_val);
+
+ reg_val = CURVE_D_H(0x1e6) | CURVE_D_L(0x144);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_YDR2, reg_val);
+ stf_isp_reg_write(stfcamss, ISP_REG_DNYUV_CDR2, reg_val);
+}
+
+static void stf_isp_config_sat(struct stfcamss *stfcamss)
+{
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_GAIN, CMAD(0x0) | CMAB(0x100));
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_THRESHOLD, CMD(0x1f) | CMB(0x1));
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_OFFSET, VOFF(0x0) | UOFF(0x0));
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_HUE_F, SIN(0x0) | COS(0x100));
+ stf_isp_reg_write(stfcamss, ISP_REG_CS_SCALE, 0x8);
+ stf_isp_reg_write(stfcamss, ISP_REG_YADJ0, YOIR(0x401) | YIMIN(0x1));
+ stf_isp_reg_write(stfcamss, ISP_REG_YADJ1, YOMAX(0x3ff) | YOMIN(0x1));
+}
+
+int stf_isp_reset(struct stf_isp_dev *isp_dev)
+{
+ stf_isp_reg_set_bit(isp_dev->stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_RST_MASK, ISPC_RST);
+ stf_isp_reg_set_bit(isp_dev->stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_RST_MASK, 0);
+
+ return 0;
+}
+
+void stf_isp_init_cfg(struct stf_isp_dev *isp_dev)
+{
+ stf_isp_reg_write(isp_dev->stfcamss, ISP_REG_DC_CFG_1, DC_AXI_ID(0x0));
+ stf_isp_reg_write(isp_dev->stfcamss, ISP_REG_DEC_CFG,
+ DEC_V_KEEP(0x0) |
+ DEC_V_PERIOD(0x0) |
+ DEC_H_KEEP(0x0) |
+ DEC_H_PERIOD(0x0));
+
+ stf_isp_config_obc(isp_dev->stfcamss);
+ stf_isp_config_oecf(isp_dev->stfcamss);
+ stf_isp_config_lccf(isp_dev->stfcamss);
+ stf_isp_config_awb(isp_dev->stfcamss);
+ stf_isp_config_grgb(isp_dev->stfcamss);
+ stf_isp_config_cfa(isp_dev->stfcamss);
+ stf_isp_config_ccm(isp_dev->stfcamss);
+ stf_isp_config_gamma(isp_dev->stfcamss);
+ stf_isp_config_r2y(isp_dev->stfcamss);
+ stf_isp_config_y_curve(isp_dev->stfcamss);
+ stf_isp_config_sharpen(isp_dev->stfcamss);
+ stf_isp_config_dnyuv(isp_dev->stfcamss);
+ stf_isp_config_sat(isp_dev->stfcamss);
+
+ stf_isp_reg_write(isp_dev->stfcamss, ISP_REG_CSI_MODULE_CFG,
+ CSI_DUMP_EN | CSI_SC_EN | CSI_AWB_EN |
+ CSI_LCCF_EN | CSI_OECF_EN | CSI_OBC_EN | CSI_DEC_EN);
+ stf_isp_reg_write(isp_dev->stfcamss, ISP_REG_ISP_CTRL_1,
+ CTRL_SAT(1) | CTRL_DBC | CTRL_CTC | CTRL_YHIST |
+ CTRL_YCURVE | CTRL_BIYUV | CTRL_SCE | CTRL_EE |
+ CTRL_CCE | CTRL_RGE | CTRL_CME | CTRL_AE | CTRL_CE);
+}
+
+static void stf_isp_config_crop(struct stfcamss *stfcamss,
+ struct v4l2_rect *crop)
+{
+ u32 bpp = stfcamss->isp_dev.current_fmt->bpp;
+ u32 val;
+
+ val = VSTART_CAP(crop->top) | HSTART_CAP(crop->left);
+ stf_isp_reg_write(stfcamss, ISP_REG_PIC_CAPTURE_START_CFG, val);
+
+ val = VEND_CAP(crop->height + crop->top - 1) |
+ HEND_CAP(crop->width + crop->left - 1);
+ stf_isp_reg_write(stfcamss, ISP_REG_PIC_CAPTURE_END_CFG, val);
+
+ val = H_ACT_CAP(crop->height) | W_ACT_CAP(crop->width);
+ stf_isp_reg_write(stfcamss, ISP_REG_PIPELINE_XY_SIZE, val);
+
+ val = ALIGN(crop->width * bpp / 8, STFCAMSS_FRAME_WIDTH_ALIGN_8);
+ stf_isp_reg_write(stfcamss, ISP_REG_STRIDE, val);
+}
+
+static void stf_isp_config_raw_fmt(struct stfcamss *stfcamss, u32 mcode)
+{
+ u32 val, val1;
+
+ switch (mcode) {
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ /* 3 2 3 2 1 0 1 0 B Gb B Gb Gr R Gr R */
+ val = SMY13(3) | SMY12(2) | SMY11(3) | SMY10(2) |
+ SMY3(1) | SMY2(0) | SMY1(1) | SMY0(0);
+ val1 = CTRL_SAT(0x0);
+ break;
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ /* 2 3 2 3 0 1 0 1, Gb B Gb B R Gr R Gr */
+ val = SMY13(2) | SMY12(3) | SMY11(2) | SMY10(3) |
+ SMY3(0) | SMY2(1) | SMY1(0) | SMY0(1);
+ val1 = CTRL_SAT(0x2);
+ break;
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ /* 1 0 1 0 3 2 3 2, Gr R Gr R B Gb B Gb */
+ val = SMY13(1) | SMY12(0) | SMY11(1) | SMY10(0) |
+ SMY3(3) | SMY2(2) | SMY1(3) | SMY0(2);
+ val1 = CTRL_SAT(0x3);
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ /* 0 1 0 1 2 3 2 3 R Gr R Gr Gb B Gb B */
+ val = SMY13(0) | SMY12(1) | SMY11(0) | SMY10(1) |
+ SMY3(2) | SMY2(3) | SMY1(2) | SMY0(3);
+ val1 = CTRL_SAT(0x1);
+ break;
+ default:
+ val = SMY13(0) | SMY12(1) | SMY11(0) | SMY10(1) |
+ SMY3(2) | SMY2(3) | SMY1(2) | SMY0(3);
+ val1 = CTRL_SAT(0x1);
+ break;
+ }
+ stf_isp_reg_write(stfcamss, ISP_REG_RAW_FORMAT_CFG, val);
+ stf_isp_reg_set_bit(stfcamss, ISP_REG_ISP_CTRL_1, CTRL_SAT_MASK, val1);
+}
+
+void stf_isp_settings(struct stf_isp_dev *isp_dev,
+ struct v4l2_rect *crop, u32 mcode)
+{
+ struct stfcamss *stfcamss = isp_dev->stfcamss;
+
+ stf_isp_config_crop(stfcamss, crop);
+ stf_isp_config_raw_fmt(stfcamss, mcode);
+
+ stf_isp_reg_set_bit(stfcamss, ISP_REG_DUMP_CFG_1,
+ DUMP_BURST_LEN_MASK | DUMP_SD_MASK,
+ DUMP_BURST_LEN(3));
+
+ stf_isp_reg_write(stfcamss, ISP_REG_ITIIWSR,
+ ITI_HSIZE(IMAGE_MAX_HEIGH) |
+ ITI_WSIZE(IMAGE_MAX_WIDTH));
+ stf_isp_reg_write(stfcamss, ISP_REG_ITIDWLSR, 0x960);
+ stf_isp_reg_write(stfcamss, ISP_REG_ITIDRLSR, 0x960);
+ stf_isp_reg_write(stfcamss, ISP_REG_SENSOR, IMAGER_SEL(1));
+}
+
+void stf_isp_stream_set(struct stf_isp_dev *isp_dev)
+{
+ struct stfcamss *stfcamss = isp_dev->stfcamss;
+
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_ENUO | ISPC_ENLS | ISPC_RST, 10);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_ENUO | ISPC_ENLS, 10);
+ stf_isp_reg_write(stfcamss, ISP_REG_IESHD, SHAD_UP_M);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_ISP_CTRL_0,
+ ISPC_ENUO | ISPC_ENLS | ISPC_EN, 10);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_CSIINTS,
+ CSI_INTS(1) | CSI_SHA_M(4), 10);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_CSIINTS,
+ CSI_INTS(2) | CSI_SHA_M(4), 10);
+ stf_isp_reg_write_delay(stfcamss, ISP_REG_CSI_INPUT_EN_AND_STATUS,
+ CSI_EN_S, 10);
+}
diff --git a/drivers/staging/media/starfive/camss/stf-isp.c b/drivers/staging/media/starfive/camss/stf-isp.c
new file mode 100644
index 000000000..d50616ef3
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-isp.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_isp.c
+ *
+ * StarFive Camera Subsystem - ISP Module
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+#include <media/v4l2-rect.h>
+
+#include "stf-camss.h"
+
+#define SINK_FORMATS_INDEX 0
+#define SOURCE_FORMATS_INDEX 1
+
+static int isp_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel);
+
+static const struct stf_isp_format isp_formats_sink[] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
+};
+
+static const struct stf_isp_format isp_formats_source[] = {
+ { MEDIA_BUS_FMT_YUYV8_1_5X8, 8 },
+};
+
+static const struct stf_isp_format_table isp_formats_st7110[] = {
+ { isp_formats_sink, ARRAY_SIZE(isp_formats_sink) },
+ { isp_formats_source, ARRAY_SIZE(isp_formats_source) },
+};
+
+static const struct stf_isp_format *
+stf_g_fmt_by_mcode(const struct stf_isp_format_table *fmt_table, u32 mcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < fmt_table->nfmts; i++) {
+ if (fmt_table->fmts[i].code == mcode)
+ return &fmt_table->fmts[i];
+ }
+
+ return NULL;
+}
+
+int stf_isp_init(struct stfcamss *stfcamss)
+{
+ struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
+
+ isp_dev->stfcamss = stfcamss;
+ isp_dev->formats = isp_formats_st7110;
+ isp_dev->nformats = ARRAY_SIZE(isp_formats_st7110);
+ isp_dev->current_fmt = &isp_formats_source[0];
+
+ return 0;
+}
+
+static int isp_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev_state *sd_state;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+
+ sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+ fmt = v4l2_subdev_state_get_format(sd_state, STF_ISP_PAD_SINK);
+ crop = v4l2_subdev_state_get_crop(sd_state, STF_ISP_PAD_SRC);
+
+ if (enable) {
+ stf_isp_reset(isp_dev);
+ stf_isp_init_cfg(isp_dev);
+ stf_isp_settings(isp_dev, crop, fmt->code);
+ stf_isp_stream_set(isp_dev);
+ }
+
+ v4l2_subdev_call(isp_dev->source_subdev, video, s_stream, enable);
+
+ v4l2_subdev_unlock_state(sd_state);
+ return 0;
+}
+
+static void isp_try_format(struct stf_isp_dev *isp_dev,
+ struct v4l2_subdev_state *state,
+ unsigned int pad,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ const struct stf_isp_format_table *formats;
+
+ if (pad >= STF_ISP_PAD_MAX) {
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ return;
+ }
+
+ if (pad == STF_ISP_PAD_SINK)
+ formats = &isp_dev->formats[SINK_FORMATS_INDEX];
+ else if (pad == STF_ISP_PAD_SRC)
+ formats = &isp_dev->formats[SOURCE_FORMATS_INDEX];
+
+ fmt->width = clamp_t(u32, fmt->width, STFCAMSS_FRAME_MIN_WIDTH,
+ STFCAMSS_FRAME_MAX_WIDTH);
+ fmt->height = clamp_t(u32, fmt->height, STFCAMSS_FRAME_MIN_HEIGHT,
+ STFCAMSS_FRAME_MAX_HEIGHT);
+ fmt->height &= ~0x1;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->flags = 0;
+
+ if (!stf_g_fmt_by_mcode(formats, fmt->code))
+ fmt->code = formats->fmts[0].code;
+}
+
+static int isp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
+ const struct stf_isp_format_table *formats;
+
+ if (code->pad == STF_ISP_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(isp_formats_sink))
+ return -EINVAL;
+
+ formats = &isp_dev->formats[SINK_FORMATS_INDEX];
+ code->code = formats->fmts[code->index].code;
+ } else {
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ if (code->index >= ARRAY_SIZE(isp_formats_source))
+ return -EINVAL;
+
+ sink_fmt = v4l2_subdev_state_get_format(state,
+ STF_ISP_PAD_SRC);
+
+ code->code = sink_fmt->code;
+ if (!code->code)
+ return -EINVAL;
+ }
+ code->flags = 0;
+
+ return 0;
+}
+
+static int isp_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_state_get_format(state, fmt->pad);
+ if (!format)
+ return -EINVAL;
+
+ isp_try_format(isp_dev, state, fmt->pad, &fmt->format);
+ *format = fmt->format;
+
+ isp_dev->current_fmt = stf_g_fmt_by_mcode(&isp_dev->formats[fmt->pad],
+ fmt->format.code);
+
+ /* Propagate to in crop */
+ if (fmt->pad == STF_ISP_PAD_SINK) {
+ struct v4l2_subdev_selection sel = { 0 };
+
+ /* Reset sink pad compose selection */
+ sel.which = fmt->which;
+ sel.pad = STF_ISP_PAD_SINK;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r.width = fmt->format.width;
+ sel.r.height = fmt->format.height;
+ isp_set_selection(sd, state, &sel);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_rect stf_frame_min_crop = {
+ .width = STFCAMSS_FRAME_MIN_WIDTH,
+ .height = STFCAMSS_FRAME_MIN_HEIGHT,
+ .top = 0,
+ .left = 0,
+};
+
+static void isp_try_crop(struct stf_isp_dev *isp_dev,
+ struct v4l2_subdev_state *state,
+ struct v4l2_rect *crop)
+{
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_state_get_format(state, STF_ISP_PAD_SINK);
+
+ const struct v4l2_rect bounds = {
+ .width = fmt->width,
+ .height = fmt->height,
+ .left = 0,
+ .top = 0,
+ };
+
+ v4l2_rect_set_min_size(crop, &stf_frame_min_crop);
+ v4l2_rect_map_inside(crop, &bounds);
+}
+
+static int isp_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_subdev_format fmt = { 0 };
+ struct v4l2_rect *rect;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (sel->pad == STF_ISP_PAD_SINK) {
+ fmt.format = *v4l2_subdev_state_get_format(state,
+ sel->pad);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = fmt.format.width;
+ sel->r.height = fmt.format.height;
+ } else if (sel->pad == STF_ISP_PAD_SRC) {
+ rect = v4l2_subdev_state_get_crop(state, sel->pad);
+ sel->r = *rect;
+ }
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ rect = v4l2_subdev_state_get_crop(state, sel->pad);
+ if (!rect)
+ return -EINVAL;
+
+ sel->r = *rect;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int isp_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *rect;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ if (sel->target == V4L2_SEL_TGT_CROP &&
+ sel->pad == STF_ISP_PAD_SINK) {
+ struct v4l2_subdev_selection crop = { 0 };
+
+ rect = v4l2_subdev_state_get_crop(state, sel->pad);
+ if (!rect)
+ return -EINVAL;
+
+ isp_try_crop(isp_dev, state, &sel->r);
+ *rect = sel->r;
+
+ /* Reset source crop selection */
+ crop.which = sel->which;
+ crop.pad = STF_ISP_PAD_SRC;
+ crop.target = V4L2_SEL_TGT_CROP;
+ crop.r = *rect;
+ isp_set_selection(sd, state, &crop);
+ } else if (sel->target == V4L2_SEL_TGT_CROP &&
+ sel->pad == STF_ISP_PAD_SRC) {
+ struct v4l2_subdev_format fmt = { 0 };
+
+ rect = v4l2_subdev_state_get_crop(state, sel->pad);
+ if (!rect)
+ return -EINVAL;
+
+ isp_try_crop(isp_dev, state, &sel->r);
+ *rect = sel->r;
+
+ /* Reset source pad format width and height */
+ fmt.which = sel->which;
+ fmt.pad = STF_ISP_PAD_SRC;
+ fmt.format.width = rect->width;
+ fmt.format.height = rect->height;
+ isp_set_format(sd, state, &fmt);
+ }
+
+ dev_dbg(isp_dev->stfcamss->dev, "pad: %d sel(%d,%d)/%dx%d\n",
+ sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
+
+ return 0;
+}
+
+static int isp_init_formats(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_subdev_format format = {
+ .pad = STF_ISP_PAD_SINK,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .format = {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .width = 1920,
+ .height = 1080
+ }
+ };
+
+ return isp_set_format(sd, sd_state, &format);
+}
+
+static const struct v4l2_subdev_video_ops isp_video_ops = {
+ .s_stream = isp_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops isp_pad_ops = {
+ .enum_mbus_code = isp_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = isp_set_format,
+ .get_selection = isp_get_selection,
+ .set_selection = isp_set_selection,
+};
+
+static const struct v4l2_subdev_ops isp_v4l2_ops = {
+ .video = &isp_video_ops,
+ .pad = &isp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops isp_internal_ops = {
+ .init_state = isp_init_formats,
+};
+
+static const struct media_entity_operations isp_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+int stf_isp_register(struct stf_isp_dev *isp_dev, struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd = &isp_dev->subdev;
+ struct media_pad *pads = isp_dev->pads;
+ int ret;
+
+ v4l2_subdev_init(sd, &isp_v4l2_ops);
+ sd->internal_ops = &isp_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, ARRAY_SIZE(sd->name), "stf_isp");
+ v4l2_set_subdevdata(sd, isp_dev);
+
+ pads[STF_ISP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[STF_ISP_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_ISP;
+ sd->entity.ops = &isp_media_ops;
+ ret = media_entity_pads_init(&sd->entity, STF_ISP_PAD_MAX, pads);
+ if (ret) {
+ dev_err(isp_dev->stfcamss->dev,
+ "Failed to init media entity: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret) {
+ dev_err(isp_dev->stfcamss->dev,
+ "Failed to register subdev: %d\n", ret);
+ goto err_subdev_cleanup;
+ }
+
+ return 0;
+
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
+ media_entity_cleanup(&sd->entity);
+ return ret;
+}
+
+int stf_isp_unregister(struct stf_isp_dev *isp_dev)
+{
+ v4l2_device_unregister_subdev(&isp_dev->subdev);
+ v4l2_subdev_cleanup(&isp_dev->subdev);
+ media_entity_cleanup(&isp_dev->subdev.entity);
+
+ return 0;
+}
diff --git a/drivers/staging/media/starfive/camss/stf-isp.h b/drivers/staging/media/starfive/camss/stf-isp.h
new file mode 100644
index 000000000..955cbb048
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-isp.h
@@ -0,0 +1,428 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * stf_isp.h
+ *
+ * StarFive Camera Subsystem - ISP Module
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#ifndef STF_ISP_H
+#define STF_ISP_H
+
+#include <media/v4l2-subdev.h>
+
+#include "stf-video.h"
+
+#define ISP_RAW_DATA_BITS 12
+#define SCALER_RATIO_MAX 1
+#define STF_ISP_REG_OFFSET_MAX 0x0fff
+#define STF_ISP_REG_DELAY_MAX 100
+
+/* isp registers */
+#define ISP_REG_CSI_INPUT_EN_AND_STATUS 0x000
+#define CSI_SCD_ERR BIT(6)
+#define CSI_ITU656_ERR BIT(4)
+#define CSI_ITU656_F BIT(3)
+#define CSI_SCD_DONE BIT(2)
+#define CSI_BUSY_S BIT(1)
+#define CSI_EN_S BIT(0)
+
+#define ISP_REG_CSIINTS 0x008
+#define CSI_INTS(n) ((n) << 16)
+#define CSI_SHA_M(n) ((n) << 0)
+#define CSI_INTS_MASK GENMASK(17, 16)
+
+#define ISP_REG_CSI_MODULE_CFG 0x010
+#define CSI_DUMP_EN BIT(19)
+#define CSI_VS_EN BIT(18)
+#define CSI_SC_EN BIT(17)
+#define CSI_OBA_EN BIT(16)
+#define CSI_AWB_EN BIT(7)
+#define CSI_LCCF_EN BIT(6)
+#define CSI_OECFHM_EN BIT(5)
+#define CSI_OECF_EN BIT(4)
+#define CSI_LCBQ_EN BIT(3)
+#define CSI_OBC_EN BIT(2)
+#define CSI_DEC_EN BIT(1)
+#define CSI_DC_EN BIT(0)
+
+#define ISP_REG_SENSOR 0x014
+#define DVP_SYNC_POL(n) ((n) << 2)
+#define ITU656_EN(n) ((n) << 1)
+#define IMAGER_SEL(n) ((n) << 0)
+
+#define ISP_REG_RAW_FORMAT_CFG 0x018
+#define SMY13(n) ((n) << 14)
+#define SMY12(n) ((n) << 12)
+#define SMY11(n) ((n) << 10)
+#define SMY10(n) ((n) << 8)
+#define SMY3(n) ((n) << 6)
+#define SMY2(n) ((n) << 4)
+#define SMY1(n) ((n) << 2)
+#define SMY0(n) ((n) << 0)
+
+#define ISP_REG_PIC_CAPTURE_START_CFG 0x01c
+#define VSTART_CAP(n) ((n) << 16)
+#define HSTART_CAP(n) ((n) << 0)
+
+#define ISP_REG_PIC_CAPTURE_END_CFG 0x020
+#define VEND_CAP(n) ((n) << 16)
+#define HEND_CAP(n) ((n) << 0)
+
+#define ISP_REG_DUMP_CFG_0 0x024
+#define ISP_REG_DUMP_CFG_1 0x028
+#define DUMP_ID(n) ((n) << 24)
+#define DUMP_SHT(n) ((n) << 20)
+#define DUMP_BURST_LEN(n) ((n) << 16)
+#define DUMP_SD(n) ((n) << 0)
+#define DUMP_BURST_LEN_MASK GENMASK(17, 16)
+#define DUMP_SD_MASK GENMASK(15, 0)
+
+#define ISP_REG_DEC_CFG 0x030
+#define DEC_V_KEEP(n) ((n) << 24)
+#define DEC_V_PERIOD(n) ((n) << 16)
+#define DEC_H_KEEP(n) ((n) << 8)
+#define DEC_H_PERIOD(n) ((n) << 0)
+
+#define ISP_REG_OBC_CFG 0x034
+#define OBC_W_H(y) ((y) << 4)
+#define OBC_W_W(x) ((x) << 0)
+
+#define ISP_REG_DC_CFG_1 0x044
+#define DC_AXI_ID(n) ((n) << 0)
+
+#define ISP_REG_LCCF_CFG_0 0x050
+#define Y_DISTANCE(y) ((y) << 16)
+#define X_DISTANCE(x) ((x) << 0)
+
+#define ISP_REG_LCCF_CFG_1 0x058
+#define LCCF_MAX_DIS(n) ((n) << 0)
+
+#define ISP_REG_LCBQ_CFG_0 0x074
+#define H_LCBQ(y) ((y) << 12)
+#define W_LCBQ(x) ((x) << 8)
+
+#define ISP_REG_LCBQ_CFG_1 0x07c
+#define Y_COOR(y) ((y) << 16)
+#define X_COOR(x) ((x) << 0)
+
+#define ISP_REG_LCCF_CFG_2 0x0e0
+#define ISP_REG_LCCF_CFG_3 0x0e4
+#define ISP_REG_LCCF_CFG_4 0x0e8
+#define ISP_REG_LCCF_CFG_5 0x0ec
+#define LCCF_F2_PAR(n) ((n) << 16)
+#define LCCF_F1_PAR(n) ((n) << 0)
+
+#define ISP_REG_OECF_X0_CFG0 0x100
+#define ISP_REG_OECF_X0_CFG1 0x104
+#define ISP_REG_OECF_X0_CFG2 0x108
+#define ISP_REG_OECF_X0_CFG3 0x10c
+#define ISP_REG_OECF_X0_CFG4 0x110
+#define ISP_REG_OECF_X0_CFG5 0x114
+#define ISP_REG_OECF_X0_CFG6 0x118
+#define ISP_REG_OECF_X0_CFG7 0x11c
+
+#define ISP_REG_OECF_Y3_CFG0 0x1e0
+#define ISP_REG_OECF_Y3_CFG1 0x1e4
+#define ISP_REG_OECF_Y3_CFG2 0x1e8
+#define ISP_REG_OECF_Y3_CFG3 0x1ec
+#define ISP_REG_OECF_Y3_CFG4 0x1f0
+#define ISP_REG_OECF_Y3_CFG5 0x1f4
+#define ISP_REG_OECF_Y3_CFG6 0x1f8
+#define ISP_REG_OECF_Y3_CFG7 0x1fc
+
+#define ISP_REG_OECF_S0_CFG0 0x200
+#define ISP_REG_OECF_S3_CFG7 0x27c
+#define OCEF_PAR_H(n) ((n) << 16)
+#define OCEF_PAR_L(n) ((n) << 0)
+
+#define ISP_REG_AWB_X0_CFG_0 0x280
+#define ISP_REG_AWB_X0_CFG_1 0x284
+#define ISP_REG_AWB_X1_CFG_0 0x288
+#define ISP_REG_AWB_X1_CFG_1 0x28c
+#define ISP_REG_AWB_X2_CFG_0 0x290
+#define ISP_REG_AWB_X2_CFG_1 0x294
+#define ISP_REG_AWB_X3_CFG_0 0x298
+#define ISP_REG_AWB_X3_CFG_1 0x29c
+#define AWB_X_SYMBOL_H(n) ((n) << 16)
+#define AWB_X_SYMBOL_L(n) ((n) << 0)
+
+#define ISP_REG_AWB_Y0_CFG_0 0x2a0
+#define ISP_REG_AWB_Y0_CFG_1 0x2a4
+#define ISP_REG_AWB_Y1_CFG_0 0x2a8
+#define ISP_REG_AWB_Y1_CFG_1 0x2ac
+#define ISP_REG_AWB_Y2_CFG_0 0x2b0
+#define ISP_REG_AWB_Y2_CFG_1 0x2b4
+#define ISP_REG_AWB_Y3_CFG_0 0x2b8
+#define ISP_REG_AWB_Y3_CFG_1 0x2bc
+#define AWB_Y_SYMBOL_H(n) ((n) << 16)
+#define AWB_Y_SYMBOL_L(n) ((n) << 0)
+
+#define ISP_REG_AWB_S0_CFG_0 0x2c0
+#define ISP_REG_AWB_S0_CFG_1 0x2c4
+#define ISP_REG_AWB_S1_CFG_0 0x2c8
+#define ISP_REG_AWB_S1_CFG_1 0x2cc
+#define ISP_REG_AWB_S2_CFG_0 0x2d0
+#define ISP_REG_AWB_S2_CFG_1 0x2d4
+#define ISP_REG_AWB_S3_CFG_0 0x2d8
+#define ISP_REG_AWB_S3_CFG_1 0x2dc
+#define AWB_S_SYMBOL_H(n) ((n) << 16)
+#define AWB_S_SYMBOL_L(n) ((n) << 0)
+
+#define ISP_REG_OBCG_CFG_0 0x2e0
+#define ISP_REG_OBCG_CFG_1 0x2e4
+#define ISP_REG_OBCG_CFG_2 0x2e8
+#define ISP_REG_OBCG_CFG_3 0x2ec
+#define ISP_REG_OBCO_CFG_0 0x2f0
+#define ISP_REG_OBCO_CFG_1 0x2f4
+#define ISP_REG_OBCO_CFG_2 0x2f8
+#define ISP_REG_OBCO_CFG_3 0x2fc
+#define GAIN_D_POINT(x) ((x) << 24)
+#define GAIN_C_POINT(x) ((x) << 16)
+#define GAIN_B_POINT(x) ((x) << 8)
+#define GAIN_A_POINT(x) ((x) << 0)
+#define OFFSET_D_POINT(x) ((x) << 24)
+#define OFFSET_C_POINT(x) ((x) << 16)
+#define OFFSET_B_POINT(x) ((x) << 8)
+#define OFFSET_A_POINT(x) ((x) << 0)
+
+#define ISP_REG_ISP_CTRL_0 0xa00
+#define ISPC_LINE BIT(27)
+#define ISPC_SC BIT(26)
+#define ISPC_CSI BIT(25)
+#define ISPC_ISP BIT(24)
+#define ISPC_ENUO BIT(20)
+#define ISPC_ENLS BIT(17)
+#define ISPC_ENSS1 BIT(12)
+#define ISPC_ENSS0 BIT(11)
+#define ISPC_RST BIT(1)
+#define ISPC_EN BIT(0)
+#define ISPC_RST_MASK BIT(1)
+#define ISPC_INT_ALL_MASK GENMASK(27, 24)
+
+#define ISP_REG_ISP_CTRL_1 0xa08
+#define CTRL_SAT(n) ((n) << 28)
+#define CTRL_DBC BIT(22)
+#define CTRL_CTC BIT(21)
+#define CTRL_YHIST BIT(20)
+#define CTRL_YCURVE BIT(19)
+#define CTRL_CTM BIT(18)
+#define CTRL_BIYUV BIT(17)
+#define CTRL_SCE BIT(8)
+#define CTRL_EE BIT(7)
+#define CTRL_CCE BIT(5)
+#define CTRL_RGE BIT(4)
+#define CTRL_CME BIT(3)
+#define CTRL_AE BIT(2)
+#define CTRL_CE BIT(1)
+#define CTRL_SAT_MASK GENMASK(31, 28)
+
+#define ISP_REG_PIPELINE_XY_SIZE 0xa0c
+#define H_ACT_CAP(n) ((n) << 16)
+#define W_ACT_CAP(n) ((n) << 0)
+
+#define ISP_REG_ICTC 0xa10
+#define GF_MODE(n) ((n) << 30)
+#define MAXGT(n) ((n) << 16)
+#define MINGT(n) ((n) << 0)
+
+#define ISP_REG_IDBC 0xa14
+#define BADGT(n) ((n) << 16)
+#define BADXT(n) ((n) << 0)
+
+#define ISP_REG_ICFAM 0xa1c
+#define CROSS_COV(n) ((n) << 4)
+#define HV_W(n) ((n) << 0)
+
+#define ISP_REG_CS_GAIN 0xa30
+#define CMAD(n) ((n) << 16)
+#define CMAB(n) ((n) << 0)
+
+#define ISP_REG_CS_THRESHOLD 0xa34
+#define CMD(n) ((n) << 16)
+#define CMB(n) ((n) << 0)
+
+#define ISP_REG_CS_OFFSET 0xa38
+#define VOFF(n) ((n) << 16)
+#define UOFF(n) ((n) << 0)
+
+#define ISP_REG_CS_HUE_F 0xa3c
+#define SIN(n) ((n) << 16)
+#define COS(n) ((n) << 0)
+
+#define ISP_REG_CS_SCALE 0xa40
+
+#define ISP_REG_IESHD 0xa50
+#define SHAD_UP_M BIT(1)
+#define SHAD_UP_EN BIT(0)
+
+#define ISP_REG_YADJ0 0xa54
+#define YOIR(n) ((n) << 16)
+#define YIMIN(n) ((n) << 0)
+
+#define ISP_REG_YADJ1 0xa58
+#define YOMAX(n) ((n) << 16)
+#define YOMIN(n) ((n) << 0)
+
+#define ISP_REG_Y_PLANE_START_ADDR 0xa80
+#define ISP_REG_UV_PLANE_START_ADDR 0xa84
+#define ISP_REG_STRIDE 0xa88
+
+#define ISP_REG_ITIIWSR 0xb20
+#define ITI_HSIZE(n) ((n) << 16)
+#define ITI_WSIZE(n) ((n) << 0)
+
+#define ISP_REG_ITIDWLSR 0xb24
+#define ISP_REG_ITIPDFR 0xb38
+#define ISP_REG_ITIDRLSR 0xb3C
+
+#define ISP_REG_DNYUV_YSWR0 0xc00
+#define ISP_REG_DNYUV_YSWR1 0xc04
+#define ISP_REG_DNYUV_CSWR0 0xc08
+#define ISP_REG_DNYUV_CSWR1 0xc0c
+#define YUVSW5(n) ((n) << 20)
+#define YUVSW4(n) ((n) << 16)
+#define YUVSW3(n) ((n) << 12)
+#define YUVSW2(n) ((n) << 8)
+#define YUVSW1(n) ((n) << 4)
+#define YUVSW0(n) ((n) << 0)
+
+#define ISP_REG_DNYUV_YDR0 0xc10
+#define ISP_REG_DNYUV_YDR1 0xc14
+#define ISP_REG_DNYUV_YDR2 0xc18
+#define ISP_REG_DNYUV_CDR0 0xc1c
+#define ISP_REG_DNYUV_CDR1 0xc20
+#define ISP_REG_DNYUV_CDR2 0xc24
+#define CURVE_D_H(n) ((n) << 16)
+#define CURVE_D_L(n) ((n) << 0)
+
+#define ISP_REG_ICAMD_0 0xc40
+#define ISP_REG_ICAMD_12 0xc70
+#define ISP_REG_ICAMD_20 0xc90
+#define ISP_REG_ICAMD_24 0xca0
+#define ISP_REG_ICAMD_25 0xca4
+#define DNRM_F(n) ((n) << 16)
+#define CCM_M_DAT(n) ((n) << 0)
+
+#define ISP_REG_GAMMA_VAL0 0xe00
+#define ISP_REG_GAMMA_VAL1 0xe04
+#define ISP_REG_GAMMA_VAL2 0xe08
+#define ISP_REG_GAMMA_VAL3 0xe0c
+#define ISP_REG_GAMMA_VAL4 0xe10
+#define ISP_REG_GAMMA_VAL5 0xe14
+#define ISP_REG_GAMMA_VAL6 0xe18
+#define ISP_REG_GAMMA_VAL7 0xe1c
+#define ISP_REG_GAMMA_VAL8 0xe20
+#define ISP_REG_GAMMA_VAL9 0xe24
+#define ISP_REG_GAMMA_VAL10 0xe28
+#define ISP_REG_GAMMA_VAL11 0xe2c
+#define ISP_REG_GAMMA_VAL12 0xe30
+#define ISP_REG_GAMMA_VAL13 0xe34
+#define ISP_REG_GAMMA_VAL14 0xe38
+#define GAMMA_S_VAL(n) ((n) << 16)
+#define GAMMA_VAL(n) ((n) << 0)
+
+#define ISP_REG_R2Y_0 0xe40
+#define ISP_REG_R2Y_1 0xe44
+#define ISP_REG_R2Y_2 0xe48
+#define ISP_REG_R2Y_3 0xe4c
+#define ISP_REG_R2Y_4 0xe50
+#define ISP_REG_R2Y_5 0xe54
+#define ISP_REG_R2Y_6 0xe58
+#define ISP_REG_R2Y_7 0xe5c
+#define ISP_REG_R2Y_8 0xe60
+
+#define ISP_REG_SHARPEN0 0xe80
+#define ISP_REG_SHARPEN1 0xe84
+#define ISP_REG_SHARPEN2 0xe88
+#define ISP_REG_SHARPEN3 0xe8c
+#define ISP_REG_SHARPEN4 0xe90
+#define ISP_REG_SHARPEN5 0xe94
+#define ISP_REG_SHARPEN6 0xe98
+#define ISP_REG_SHARPEN7 0xe9c
+#define ISP_REG_SHARPEN8 0xea0
+#define ISP_REG_SHARPEN9 0xea4
+#define ISP_REG_SHARPEN10 0xea8
+#define ISP_REG_SHARPEN11 0xeac
+#define ISP_REG_SHARPEN12 0xeb0
+#define ISP_REG_SHARPEN13 0xeb4
+#define ISP_REG_SHARPEN14 0xeb8
+#define S_DELTA(n) ((n) << 16)
+#define S_WEIGHT(n) ((n) << 8)
+
+#define ISP_REG_SHARPEN_FS0 0xebc
+#define ISP_REG_SHARPEN_FS1 0xec0
+#define ISP_REG_SHARPEN_FS2 0xec4
+#define ISP_REG_SHARPEN_FS3 0xec8
+#define ISP_REG_SHARPEN_FS4 0xecc
+#define ISP_REG_SHARPEN_FS5 0xed0
+#define S_FACTOR(n) ((n) << 24)
+#define S_SLOPE(n) ((n) << 0)
+
+#define ISP_REG_SHARPEN_WN 0xed4
+#define PDIRF(n) ((n) << 28)
+#define NDIRF(n) ((n) << 24)
+#define WSUM(n) ((n) << 0)
+
+#define ISP_REG_IUVS1 0xed8
+#define UVDIFF2(n) ((n) << 16)
+#define UVDIFF1(n) ((n) << 0)
+
+#define ISP_REG_IUVS2 0xedc
+#define UVF(n) ((n) << 24)
+#define UVSLOPE(n) ((n) << 0)
+
+#define ISP_REG_IUVCKS1 0xee0
+#define UVCKDIFF2(n) ((n) << 16)
+#define UVCKDIFF1(n) ((n) << 0)
+
+#define ISP_REG_IUVCKS2 0xee4
+
+#define ISP_REG_ISHRPET 0xee8
+#define TH(n) ((n) << 8)
+#define EN(n) ((n) << 0)
+
+#define ISP_REG_YCURVE_0 0xf00
+#define ISP_REG_YCURVE_63 0xffc
+
+#define IMAGE_MAX_WIDTH 1920
+#define IMAGE_MAX_HEIGH 1080
+
+/* pad id for media framework */
+enum stf_isp_pad_id {
+ STF_ISP_PAD_SINK = 0,
+ STF_ISP_PAD_SRC,
+ STF_ISP_PAD_MAX
+};
+
+struct stf_isp_format {
+ u32 code;
+ u8 bpp;
+};
+
+struct stf_isp_format_table {
+ const struct stf_isp_format *fmts;
+ int nfmts;
+};
+
+struct stf_isp_dev {
+ struct stfcamss *stfcamss;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[STF_ISP_PAD_MAX];
+ const struct stf_isp_format_table *formats;
+ unsigned int nformats;
+ struct v4l2_subdev *source_subdev;
+ const struct stf_isp_format *current_fmt;
+};
+
+int stf_isp_reset(struct stf_isp_dev *isp_dev);
+void stf_isp_init_cfg(struct stf_isp_dev *isp_dev);
+void stf_isp_settings(struct stf_isp_dev *isp_dev,
+ struct v4l2_rect *crop, u32 mcode);
+void stf_isp_stream_set(struct stf_isp_dev *isp_dev);
+int stf_isp_init(struct stfcamss *stfcamss);
+int stf_isp_register(struct stf_isp_dev *isp_dev, struct v4l2_device *v4l2_dev);
+int stf_isp_unregister(struct stf_isp_dev *isp_dev);
+
+#endif /* STF_ISP_H */
diff --git a/drivers/staging/media/starfive/camss/stf-video.c b/drivers/staging/media/starfive/camss/stf-video.c
new file mode 100644
index 000000000..989b5e82b
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-video.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * stf_video.c
+ *
+ * StarFive Camera Subsystem - V4L2 device node
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "stf-camss.h"
+#include "stf-video.h"
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static inline struct stfcamss_buffer *
+to_stfcamss_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct stfcamss_buffer, vb);
+}
+
+static const struct stfcamss_format_info *
+video_g_fi_by_index(struct stfcamss_video *video, int index)
+{
+ if (index >= video->nformats)
+ return NULL;
+
+ return &video->formats[index];
+}
+
+static const struct stfcamss_format_info *
+video_g_fi_by_mcode(struct stfcamss_video *video, u32 mcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < video->nformats; i++) {
+ if (video->formats[i].code == mcode)
+ return &video->formats[i];
+ }
+
+ return NULL;
+}
+
+static const struct stfcamss_format_info *
+video_g_fi_by_pfmt(struct stfcamss_video *video, u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < video->nformats; i++) {
+ if (video->formats[i].pixelformat == pixelformat)
+ return &video->formats[i];
+ }
+
+ return NULL;
+}
+
+static int __video_try_fmt(struct stfcamss_video *video, struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ const struct stfcamss_format_info *fi;
+ u32 width, height;
+ u32 bpl;
+ unsigned int i;
+
+ fi = video_g_fi_by_pfmt(video, pix->pixelformat);
+ if (!fi)
+ fi = &video->formats[0]; /* default format */
+
+ width = pix->width;
+ height = pix->height;
+
+ memset(pix, 0, sizeof(*pix));
+
+ pix->pixelformat = fi->pixelformat;
+ pix->width = clamp_t(u32, width, STFCAMSS_FRAME_MIN_WIDTH,
+ STFCAMSS_FRAME_MAX_WIDTH);
+ pix->height = clamp_t(u32, height, STFCAMSS_FRAME_MIN_HEIGHT,
+ STFCAMSS_FRAME_MAX_HEIGHT);
+ bpl = pix->width * fi->bpp / 8;
+ bpl = ALIGN(bpl, video->bpl_alignment);
+ pix->bytesperline = bpl;
+
+ for (i = 0; i < fi->planes; ++i)
+ pix->sizeimage += bpl * pix->height / fi->vsub[i];
+
+ pix->field = V4L2_FIELD_NONE;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->flags = 0;
+ pix->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
+ pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ pix->colorspace,
+ pix->ycbcr_enc);
+ pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
+
+ return 0;
+}
+
+static int stf_video_init_format(struct stfcamss_video *video)
+{
+ int ret;
+ struct v4l2_format format = {
+ .type = video->type,
+ .fmt.pix = {
+ .width = 1920,
+ .height = 1080,
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ },
+ };
+
+ ret = __video_try_fmt(video, &format);
+
+ if (ret < 0)
+ return ret;
+
+ video->active_fmt = format;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video queue operations
+ */
+
+static int video_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct stfcamss_video *video = vb2_get_drv_priv(q);
+ const struct v4l2_pix_format *format = &video->active_fmt.fmt.pix;
+
+ if (*num_planes) {
+ if (*num_planes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < format->sizeimage)
+ return -EINVAL;
+ } else {
+ *num_planes = 1;
+ sizes[0] = format->sizeimage;
+ }
+
+ if (!sizes[0]) {
+ dev_dbg(video->stfcamss->dev,
+ "%s: error size is zero.\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(video->stfcamss->dev, "planes = %d, size = %d\n",
+ *num_planes, sizes[0]);
+
+ return 0;
+}
+
+static int video_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct stfcamss_buffer *buffer = to_stfcamss_buffer(vbuf);
+ const struct v4l2_pix_format *fmt = &video->active_fmt.fmt.pix;
+ dma_addr_t *paddr;
+
+ paddr = vb2_plane_cookie(vb, 0);
+ buffer->addr[0] = *paddr;
+
+ if (fmt->pixelformat == V4L2_PIX_FMT_NV12)
+ buffer->addr[1] =
+ buffer->addr[0] + fmt->bytesperline * fmt->height;
+
+ return 0;
+}
+
+static int video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ const struct v4l2_pix_format *fmt = &video->active_fmt.fmt.pix;
+
+ if (fmt->sizeimage > vb2_plane_size(vb, 0)) {
+ dev_dbg(video->stfcamss->dev,
+ "sizeimage = %u, plane size = %u\n",
+ fmt->sizeimage, (unsigned int)vb2_plane_size(vb, 0));
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, fmt->sizeimage);
+
+ vbuf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static void video_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct stfcamss_buffer *buffer = to_stfcamss_buffer(vbuf);
+
+ video->ops->queue_buffer(video, buffer);
+}
+
+static int video_get_subdev_format(struct stfcamss_video *video,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ struct media_entity *entity;
+ int ret;
+
+ entity = &video->vdev.entity;
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_pad_remote_pad_first(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ fmt->pad = pad->index;
+
+ ret = v4l2_subdev_call_state_active(subdev, pad, get_fmt, fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ else if (!ret)
+ break;
+ }
+
+ return 0;
+}
+
+static int stf_video_check_format(struct stfcamss_video *video)
+{
+ struct v4l2_pix_format *pix = &video->active_fmt.fmt.pix;
+ const struct stfcamss_format_info *fi;
+ int ret;
+ struct v4l2_subdev_format sd_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ ret = video_get_subdev_format(video, &sd_fmt);
+ if (ret < 0)
+ return ret;
+
+ fi = video_g_fi_by_mcode(video, sd_fmt.format.code);
+ if (!fi)
+ return -EINVAL;
+
+ if (pix->pixelformat != fi->pixelformat ||
+ pix->height != sd_fmt.format.height ||
+ pix->width != sd_fmt.format.width ||
+ pix->field != sd_fmt.format.field)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int video_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct stfcamss_video *video = vb2_get_drv_priv(q);
+ struct video_device *vdev = &video->vdev;
+ int ret;
+
+ ret = video_device_pipeline_start(vdev, &video->stfcamss->pipe);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to media_pipeline_start: %d\n", ret);
+ goto err_ret_buffers;
+ }
+
+ ret = pm_runtime_resume_and_get(video->stfcamss->dev);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev, "power up failed %d\n", ret);
+ goto err_pipeline_stop;
+ }
+
+ video->ops->start_streaming(video);
+
+ ret = v4l2_subdev_call(video->source_subdev, video, s_stream, true);
+ if (ret) {
+ dev_err(video->stfcamss->dev, "stream on failed\n");
+ goto err_pm_put;
+ }
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put(video->stfcamss->dev);
+err_pipeline_stop:
+ video_device_pipeline_stop(vdev);
+err_ret_buffers:
+ video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void video_stop_streaming(struct vb2_queue *q)
+{
+ struct stfcamss_video *video = vb2_get_drv_priv(q);
+ struct video_device *vdev = &video->vdev;
+
+ video->ops->stop_streaming(video);
+
+ v4l2_subdev_call(video->source_subdev, video, s_stream, false);
+
+ pm_runtime_put(video->stfcamss->dev);
+
+ video_device_pipeline_stop(vdev);
+ video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops stf_video_vb2_q_ops = {
+ .queue_setup = video_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_init = video_buf_init,
+ .buf_prepare = video_buf_prepare,
+ .buf_queue = video_buf_queue,
+ .start_streaming = video_start_streaming,
+ .stop_streaming = video_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int video_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, "starfive-camss", sizeof(cap->driver));
+ strscpy(cap->card, "Starfive Camera Subsystem", sizeof(cap->card));
+
+ return 0;
+}
+
+static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+ const struct stfcamss_format_info *fi;
+
+ if (f->index >= video->nformats)
+ return -EINVAL;
+
+ if (f->mbus_code) {
+ /* Each entry in formats[] table has unique mbus_code */
+ if (f->index > 0)
+ return -EINVAL;
+
+ fi = video_g_fi_by_mcode(video, f->mbus_code);
+ } else {
+ fi = video_g_fi_by_index(video, f->index);
+ }
+
+ if (!fi)
+ return -EINVAL;
+
+ f->pixelformat = fi->pixelformat;
+
+ return 0;
+}
+
+static int video_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+ unsigned int i;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ for (i = 0; i < video->nformats; i++) {
+ if (video->formats[i].pixelformat == fsize->pixel_format)
+ break;
+ }
+
+ if (i == video->nformats)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = STFCAMSS_FRAME_MIN_WIDTH;
+ fsize->stepwise.max_width = STFCAMSS_FRAME_MAX_WIDTH;
+ fsize->stepwise.min_height = STFCAMSS_FRAME_MIN_HEIGHT;
+ fsize->stepwise.max_height = STFCAMSS_FRAME_MAX_HEIGHT;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+
+ *f = video->active_fmt;
+
+ return 0;
+}
+
+static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+ int ret;
+
+ if (vb2_is_busy(&video->vb2_q))
+ return -EBUSY;
+
+ ret = __video_try_fmt(video, f);
+ if (ret < 0)
+ return ret;
+
+ video->active_fmt = *f;
+
+ return 0;
+}
+
+static int video_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct stfcamss_video *video = video_drvdata(file);
+
+ return __video_try_fmt(video, f);
+}
+
+static const struct v4l2_ioctl_ops stf_vid_ioctl_ops = {
+ .vidioc_querycap = video_querycap,
+ .vidioc_enum_fmt_vid_cap = video_enum_fmt,
+ .vidioc_enum_framesizes = video_enum_framesizes,
+ .vidioc_g_fmt_vid_cap = video_g_fmt,
+ .vidioc_s_fmt_vid_cap = video_s_fmt,
+ .vidioc_try_fmt_vid_cap = video_try_fmt,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+
+static const struct v4l2_file_operations stf_vid_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+/* -----------------------------------------------------------------------------
+ * STFCAMSS video core
+ */
+
+static int stf_link_validate(struct media_link *link)
+{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct stfcamss_video *video = video_get_drvdata(vdev);
+ int ret;
+
+ ret = stf_video_check_format(video);
+
+ return ret;
+}
+
+static const struct media_entity_operations stf_media_ops = {
+ .link_validate = stf_link_validate,
+};
+
+static void stf_video_release(struct video_device *vdev)
+{
+ struct stfcamss_video *video = video_get_drvdata(vdev);
+
+ media_entity_cleanup(&vdev->entity);
+
+ mutex_destroy(&video->q_lock);
+ mutex_destroy(&video->lock);
+}
+
+int stf_video_register(struct stfcamss_video *video,
+ struct v4l2_device *v4l2_dev, const char *name)
+{
+ struct video_device *vdev = &video->vdev;
+ struct vb2_queue *q;
+ struct media_pad *pad = &video->pad;
+ int ret;
+
+ mutex_init(&video->q_lock);
+ mutex_init(&video->lock);
+
+ q = &video->vb2_q;
+ q->drv_priv = video;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->ops = &stf_video_vb2_q_ops;
+ q->type = video->type;
+ q->io_modes = VB2_DMABUF | VB2_MMAP;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->buf_struct_size = sizeof(struct stfcamss_buffer);
+ q->dev = video->stfcamss->dev;
+ q->lock = &video->q_lock;
+ q->min_queued_buffers = STFCAMSS_MIN_BUFFERS;
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to init vb2 queue: %d\n", ret);
+ goto err_mutex_destroy;
+ }
+
+ pad->flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, pad);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to init video entity: %d\n", ret);
+ goto err_mutex_destroy;
+ }
+
+ ret = stf_video_init_format(video);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to init format: %d\n", ret);
+ goto err_media_cleanup;
+ }
+
+ vdev->fops = &stf_vid_fops;
+ vdev->ioctl_ops = &stf_vid_ioctl_ops;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->entity.ops = &stf_media_ops;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->release = stf_video_release;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->queue = &video->vb2_q;
+ vdev->lock = &video->lock;
+ strscpy(vdev->name, name, sizeof(vdev->name));
+
+ video_set_drvdata(vdev, video);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(video->stfcamss->dev,
+ "Failed to register video device: %d\n", ret);
+ goto err_media_cleanup;
+ }
+
+ return 0;
+
+err_media_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_mutex_destroy:
+ mutex_destroy(&video->lock);
+ mutex_destroy(&video->q_lock);
+ return ret;
+}
+
+void stf_video_unregister(struct stfcamss_video *video)
+{
+ vb2_video_unregister_device(&video->vdev);
+}
diff --git a/drivers/staging/media/starfive/camss/stf-video.h b/drivers/staging/media/starfive/camss/stf-video.h
new file mode 100644
index 000000000..8052b77e3
--- /dev/null
+++ b/drivers/staging/media/starfive/camss/stf-video.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * stf_video.h
+ *
+ * StarFive Camera Subsystem - V4L2 device node
+ *
+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
+ */
+
+#ifndef STF_VIDEO_H
+#define STF_VIDEO_H
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+
+#define STFCAMSS_FRAME_MIN_WIDTH 64
+#define STFCAMSS_FRAME_MAX_WIDTH 1920
+#define STFCAMSS_FRAME_MIN_HEIGHT 64
+#define STFCAMSS_FRAME_MAX_HEIGHT 1080
+#define STFCAMSS_FRAME_WIDTH_ALIGN_8 8
+#define STFCAMSS_FRAME_WIDTH_ALIGN_128 128
+#define STFCAMSS_MIN_BUFFERS 2
+
+#define STFCAMSS_MAX_ENTITY_NAME_LEN 27
+
+enum stf_v_line_id {
+ STF_V_LINE_WR = 0,
+ STF_V_LINE_ISP,
+ STF_V_LINE_MAX,
+};
+
+enum stf_capture_type {
+ STF_CAPTURE_RAW = 0,
+ STF_CAPTURE_YUV,
+ STF_CAPTURE_NUM,
+};
+
+struct stfcamss_buffer {
+ struct vb2_v4l2_buffer vb;
+ dma_addr_t addr[2];
+ struct list_head queue;
+};
+
+struct fract {
+ u8 numerator;
+ u8 denominator;
+};
+
+/*
+ * struct stfcamss_format_info - ISP media bus format information
+ * @code: V4L2 media bus format code
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @planes: Number of planes
+ * @vsub: Vertical subsampling (for each plane)
+ * @bpp: Bits per pixel when stored in memory (for each plane)
+ */
+struct stfcamss_format_info {
+ u32 code;
+ u32 pixelformat;
+ u8 planes;
+ u8 vsub[3];
+ u8 bpp;
+};
+
+struct stfcamss_video {
+ struct stfcamss *stfcamss;
+ struct vb2_queue vb2_q;
+ struct video_device vdev;
+ struct media_pad pad;
+ struct v4l2_format active_fmt;
+ enum v4l2_buf_type type;
+ const struct stfcamss_video_ops *ops;
+ struct mutex lock; /* serialize device access */
+ struct mutex q_lock; /* protects the queue */
+ unsigned int bpl_alignment;
+ const struct stfcamss_format_info *formats;
+ unsigned int nformats;
+ struct v4l2_subdev *source_subdev;
+};
+
+struct stfcamss_video_ops {
+ int (*queue_buffer)(struct stfcamss_video *video,
+ struct stfcamss_buffer *buf);
+ int (*flush_buffers)(struct stfcamss_video *video,
+ enum vb2_buffer_state state);
+ void (*start_streaming)(struct stfcamss_video *video);
+ void (*stop_streaming)(struct stfcamss_video *video);
+};
+
+int stf_video_register(struct stfcamss_video *video,
+ struct v4l2_device *v4l2_dev, const char *name);
+
+void stf_video_unregister(struct stfcamss_video *video);
+
+#endif /* STF_VIDEO_H */
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
index 621944f99..cb07a343c 100644
--- a/drivers/staging/media/sunxi/cedrus/Kconfig
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -6,7 +6,6 @@ config VIDEO_SUNXI_CEDRUS
depends on HAS_DMA
depends on OF
select MEDIA_CONTROLLER
- select MEDIA_CONTROLLER_REQUEST_API
select SUNXI_SRAM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/staging/media/sunxi/cedrus/TODO b/drivers/staging/media/sunxi/cedrus/TODO
index ec277ece4..00aa304a7 100644
--- a/drivers/staging/media/sunxi/cedrus/TODO
+++ b/drivers/staging/media/sunxi/cedrus/TODO
@@ -1,7 +1,16 @@
-Before this stateless decoder driver can leave the staging area:
-* The Request API needs to be stabilized;
-* The codec-specific controls need to be thoroughly reviewed to ensure they
- cover all intended uses cases;
-* Userspace support for the Request API needs to be reviewed;
-* Another stateless decoder driver should be submitted;
-* At least one stateless encoder driver should be submitted.
+This driver suffers from a bad initial design that results in various aspects
+being intricated, making it difficult to scale to new codecs and to add encoding
+support in the future.
+
+Before leaving the staging area, it should be reworked to clearly distinguish
+between different aspects:
+- platform, with resources management, interrupt handler, watchdog,
+ v4l2 and m2m devices registration;
+- proc, with video device registration and related operations;
+- context, with m2m context, queue and controls management;
+- engine, with each individual codec job execution and codec-specific
+ operation callbacks;
+
+This will make it possible to register two different procs (decoder and
+encoder) while sharing significant common infrastructure, common v4l2 and m2m
+devices but exposing distinct video devices.
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index dfb401df1..3e2843ef6 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -653,8 +653,13 @@ static void cedrus_h264_stop(struct cedrus_ctx *ctx)
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- for (i = 0; i < vq->num_buffers; i++) {
- buf = vb2_to_cedrus_buffer(vb2_get_buffer(vq, i));
+ for (i = 0; i < vb2_get_num_buffers(vq); i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(vq, i);
+
+ if (!vb)
+ continue;
+
+ buf = vb2_to_cedrus_buffer(vb);
if (buf->codec.h264.mv_col_buf_size > 0) {
dma_free_attrs(dev->dev,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index 16c822637..780da4a8b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -867,8 +867,13 @@ static void cedrus_h265_stop(struct cedrus_ctx *ctx)
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- for (i = 0; i < vq->num_buffers; i++) {
- buf = vb2_to_cedrus_buffer(vb2_get_buffer(vq, i));
+ for (i = 0; i < vb2_get_num_buffers(vq); i++) {
+ struct vb2_buffer *vb = vb2_get_buffer(vq, i);
+
+ if (!vb)
+ continue;
+
+ buf = vb2_to_cedrus_buffer(vb);
if (buf->codec.h265.mv_col_buf_size > 0) {
dma_free_attrs(dev->dev,
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c
index 1595a9607..0eea4c2c3 100644
--- a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c
@@ -660,7 +660,7 @@ int sun6i_isp_capture_setup(struct sun6i_isp_device *isp_dev)
queue->buf_struct_size = sizeof(struct sun6i_isp_buffer);
queue->ops = &sun6i_isp_capture_queue_ops;
queue->mem_ops = &vb2_dma_contig_memops;
- queue->min_buffers_needed = 2;
+ queue->min_queued_buffers = 2;
queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue->lock = &capture->lock;
queue->dev = isp_dev->dev;
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c
index e28be895b..53d05e8a3 100644
--- a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c
@@ -489,7 +489,7 @@ int sun6i_isp_params_setup(struct sun6i_isp_device *isp_dev)
queue->buf_struct_size = sizeof(struct sun6i_isp_buffer);
queue->ops = &sun6i_isp_params_queue_ops;
queue->mem_ops = &vb2_vmalloc_memops;
- queue->min_buffers_needed = 1;
+ queue->min_queued_buffers = 1;
queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
queue->lock = &params->lock;
queue->dev = isp_dev->dev;
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c
index ccbb530aa..46a334b60 100644
--- a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c
@@ -256,13 +256,13 @@ sun6i_isp_proc_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
-static int sun6i_isp_proc_init_cfg(struct v4l2_subdev *subdev,
- struct v4l2_subdev_state *state)
+static int sun6i_isp_proc_init_state(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *state)
{
struct sun6i_isp_device *isp_dev = v4l2_get_subdevdata(subdev);
unsigned int pad = SUN6I_ISP_PROC_PAD_SINK_CSI;
struct v4l2_mbus_framefmt *mbus_format =
- v4l2_subdev_get_try_format(subdev, state, pad);
+ v4l2_subdev_state_get_format(state, pad);
struct mutex *lock = &isp_dev->proc.lock;
mutex_lock(lock);
@@ -302,8 +302,8 @@ static int sun6i_isp_proc_get_fmt(struct v4l2_subdev *subdev,
mutex_lock(lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *mbus_format = *v4l2_subdev_get_try_format(subdev, state,
- format->pad);
+ *mbus_format = *v4l2_subdev_state_get_format(state,
+ format->pad);
else
*mbus_format = isp_dev->proc.mbus_format;
@@ -325,7 +325,7 @@ static int sun6i_isp_proc_set_fmt(struct v4l2_subdev *subdev,
sun6i_isp_proc_mbus_format_prepare(mbus_format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- *v4l2_subdev_get_try_format(subdev, state, format->pad) =
+ *v4l2_subdev_state_get_format(state, format->pad) =
*mbus_format;
else
isp_dev->proc.mbus_format = *mbus_format;
@@ -336,7 +336,6 @@ static int sun6i_isp_proc_set_fmt(struct v4l2_subdev *subdev,
}
static const struct v4l2_subdev_pad_ops sun6i_isp_proc_pad_ops = {
- .init_cfg = sun6i_isp_proc_init_cfg,
.enum_mbus_code = sun6i_isp_proc_enum_mbus_code,
.get_fmt = sun6i_isp_proc_get_fmt,
.set_fmt = sun6i_isp_proc_set_fmt,
@@ -347,6 +346,10 @@ static const struct v4l2_subdev_ops sun6i_isp_proc_subdev_ops = {
.pad = &sun6i_isp_proc_pad_ops,
};
+static const struct v4l2_subdev_internal_ops sun6i_isp_proc_internal_ops = {
+ .init_state = sun6i_isp_proc_init_state,
+};
+
/* Media Entity */
static const struct media_entity_operations sun6i_isp_proc_entity_ops = {
@@ -501,6 +504,7 @@ int sun6i_isp_proc_setup(struct sun6i_isp_device *isp_dev)
/* V4L2 Subdev */
v4l2_subdev_init(subdev, &sun6i_isp_proc_subdev_ops);
+ subdev->internal_ops = &sun6i_isp_proc_internal_ops;
strscpy(subdev->name, SUN6I_ISP_PROC_NAME, sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->owner = THIS_MODULE;
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
index 9aa72863c..255cccd0c 100644
--- a/drivers/staging/media/tegra-video/csi.c
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -222,14 +222,22 @@ static int csi_set_format(struct v4l2_subdev *subdev,
/*
* V4L2 Subdevice Video Operations
*/
-static int tegra_csi_g_frame_interval(struct v4l2_subdev *subdev,
- struct v4l2_subdev_frame_interval *vfi)
+static int tegra_csi_get_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval *vfi)
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
return -ENOIOCTLCMD;
+ /*
+ * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
+ * subdev active state API.
+ */
+ if (vfi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
vfi->interval.numerator = 1;
vfi->interval.denominator = csi_chan->framerate;
@@ -430,8 +438,6 @@ static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable)
*/
static const struct v4l2_subdev_video_ops tegra_csi_video_ops = {
.s_stream = tegra_csi_s_stream,
- .g_frame_interval = tegra_csi_g_frame_interval,
- .s_frame_interval = tegra_csi_g_frame_interval,
};
static const struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
@@ -440,6 +446,8 @@ static const struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
.enum_frame_interval = csi_enum_frameintervals,
.get_fmt = csi_get_format,
.set_fmt = csi_set_format,
+ .get_frame_interval = tegra_csi_get_frame_interval,
+ .set_frame_interval = tegra_csi_get_frame_interval,
};
static const struct v4l2_subdev_ops tegra_csi_ops = {
@@ -818,15 +826,13 @@ rpm_disable:
return ret;
}
-static int tegra_csi_remove(struct platform_device *pdev)
+static void tegra_csi_remove(struct platform_device *pdev)
{
struct tegra_csi *csi = platform_get_drvdata(pdev);
host1x_client_unregister(&csi->client);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
@@ -852,5 +858,5 @@ struct platform_driver tegra_csi_driver = {
.pm = &tegra_csi_pm_ops,
},
.probe = tegra_csi_probe,
- .remove = tegra_csi_remove,
+ .remove_new = tegra_csi_remove,
};
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 94171e62d..af6e3a0d8 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -439,6 +439,7 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.target = V4L2_SEL_TGT_CROP_BOUNDS,
};
+ struct v4l2_rect *try_crop;
int ret;
subdev = tegra_channel_get_remote_source_subdev(chan);
@@ -473,24 +474,25 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
* Attempt to obtain the format size from subdev.
* If not available, try to get crop boundary from subdev.
*/
+ try_crop = v4l2_subdev_state_get_crop(sd_state, 0);
fse.code = fmtinfo->code;
ret = v4l2_subdev_call(subdev, pad, enum_frame_size, sd_state, &fse);
if (ret) {
if (!v4l2_subdev_has_op(subdev, pad, get_selection)) {
- sd_state->pads->try_crop.width = 0;
- sd_state->pads->try_crop.height = 0;
+ try_crop->width = 0;
+ try_crop->height = 0;
} else {
ret = v4l2_subdev_call(subdev, pad, get_selection,
NULL, &sdsel);
if (ret)
return -EINVAL;
- sd_state->pads->try_crop.width = sdsel.r.width;
- sd_state->pads->try_crop.height = sdsel.r.height;
+ try_crop->width = sdsel.r.width;
+ try_crop->height = sdsel.r.height;
}
} else {
- sd_state->pads->try_crop.width = fse.max_width;
- sd_state->pads->try_crop.height = fse.max_height;
+ try_crop->width = fse.max_width;
+ try_crop->height = fse.max_height;
}
ret = v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &fmt);
@@ -1172,7 +1174,7 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
chan->queue.ops = &tegra_channel_queue_qops;
chan->queue.mem_ops = &vb2_dma_contig_memops;
chan->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- chan->queue.min_buffers_needed = 2;
+ chan->queue.min_queued_buffers = 2;
chan->queue.dev = vi->dev;
ret = vb2_queue_init(&chan->queue);
if (ret < 0) {
@@ -1944,7 +1946,7 @@ rpm_disable:
return ret;
}
-static int tegra_vi_remove(struct platform_device *pdev)
+static void tegra_vi_remove(struct platform_device *pdev)
{
struct tegra_vi *vi = platform_get_drvdata(pdev);
@@ -1953,8 +1955,6 @@ static int tegra_vi_remove(struct platform_device *pdev)
if (vi->ops->vi_enable)
vi->ops->vi_enable(vi, false);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id tegra_vi_of_id_table[] = {
@@ -1979,5 +1979,5 @@ struct platform_driver tegra_vi_driver = {
.pm = &tegra_vi_pm_ops,
},
.probe = tegra_vi_probe,
- .remove = tegra_vi_remove,
+ .remove_new = tegra_vi_remove,
};
diff --git a/drivers/staging/media/tegra-video/vip.c b/drivers/staging/media/tegra-video/vip.c
index e95cc7bb1..8504b9ea9 100644
--- a/drivers/staging/media/tegra-video/vip.c
+++ b/drivers/staging/media/tegra-video/vip.c
@@ -254,15 +254,13 @@ static int tegra_vip_probe(struct platform_device *pdev)
return 0;
}
-static int tegra_vip_remove(struct platform_device *pdev)
+static void tegra_vip_remove(struct platform_device *pdev)
{
struct tegra_vip *vip = platform_get_drvdata(pdev);
host1x_client_unregister(&vip->client);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
@@ -283,5 +281,5 @@ struct platform_driver tegra_vip_driver = {
.of_match_table = tegra_vip_of_id_table,
},
.probe = tegra_vip_probe,
- .remove = tegra_vip_remove,
+ .remove_new = tegra_vip_remove,
};
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index 6af519938..a1492215d 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
rtllib-objs := \
- dot11d.o \
rtllib_module.o \
rtllib_rx.o \
rtllib_tx.o \
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
deleted file mode 100644
index d0b733264..000000000
--- a/drivers/staging/rtl8192e/dot11d.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
-#include "dot11d.h"
-
-struct channel_list {
- u8 channel[32];
- u8 len;
-};
-
-static struct channel_list channel_array[] = {
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64,
- 149, 153, 157, 161, 165}, 24},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56,
- 60, 64}, 21},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
- 56, 60, 64}, 22},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
- 56, 60, 64}, 22},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
- 56, 60, 64}, 22},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52,
- 56, 60, 64}, 22},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13},
- {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52,
- 56, 60, 64}, 21}
-};
-
-void dot11d_init(struct rtllib_device *ieee)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
-
- dot11d_info->enabled = false;
-
- dot11d_info->state = DOT11D_STATE_NONE;
- dot11d_info->country_len = 0;
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
- RESET_CIE_WATCHDOG(ieee);
-}
-EXPORT_SYMBOL(dot11d_init);
-
-void dot11d_channel_map(u8 channel_plan, struct rtllib_device *ieee)
-{
- int i, max_chan = 14, min_chan = 1;
-
- ieee->global_domain = false;
-
- if (channel_array[channel_plan].len != 0) {
- memset(GET_DOT11D_INFO(ieee)->channel_map, 0,
- sizeof(GET_DOT11D_INFO(ieee)->channel_map));
- for (i = 0; i < channel_array[channel_plan].len; i++) {
- if (channel_array[channel_plan].channel[i] < min_chan ||
- channel_array[channel_plan].channel[i] > max_chan)
- break;
- GET_DOT11D_INFO(ieee)->channel_map[channel_array
- [channel_plan].channel[i]] = 1;
- }
- }
-
- switch (channel_plan) {
- case COUNTRY_CODE_GLOBAL_DOMAIN:
- ieee->global_domain = true;
- for (i = 12; i <= 14; i++)
- GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
- ieee->bss_start_channel = 10;
- break;
-
- case COUNTRY_CODE_WORLD_WIDE_13:
- for (i = 12; i <= 13; i++)
- GET_DOT11D_INFO(ieee)->channel_map[i] = 2;
- ieee->bss_start_channel = 10;
- break;
-
- default:
- ieee->bss_start_channel = 1;
- break;
- }
-}
-EXPORT_SYMBOL(dot11d_channel_map);
-
-void dot11d_reset(struct rtllib_device *ieee)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(ieee);
- u32 i;
-
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
- for (i = 1; i <= 11; i++)
- (dot11d_info->channel_map)[i] = 1;
- for (i = 12; i <= 14; i++)
- (dot11d_info->channel_map)[i] = 2;
- dot11d_info->state = DOT11D_STATE_NONE;
- dot11d_info->country_len = 0;
- RESET_CIE_WATCHDOG(ieee);
-}
-
-void dot11d_update_country(struct rtllib_device *dev, u8 *address,
- u16 country_len, u8 *country)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
- u8 i, j, number_of_triples, max_channel_number;
- struct chnl_txpow_triple *triple;
-
- memset(dot11d_info->channel_map, 0, MAX_CHANNEL_NUMBER + 1);
- memset(dot11d_info->max_tx_power_list, 0xFF, MAX_CHANNEL_NUMBER + 1);
- max_channel_number = 0;
- number_of_triples = (country_len - 3) / 3;
- triple = (struct chnl_txpow_triple *)(country + 3);
- for (i = 0; i < number_of_triples; i++) {
- if (max_channel_number >= triple->first_channel) {
- netdev_info(dev->dev,
- "%s: Invalid country IE, skip it......1\n",
- __func__);
- return;
- }
- if (MAX_CHANNEL_NUMBER < (triple->first_channel +
- triple->num_channels)) {
- netdev_info(dev->dev,
- "%s: Invalid country IE, skip it......2\n",
- __func__);
- return;
- }
-
- for (j = 0; j < triple->num_channels; j++) {
- dot11d_info->channel_map[triple->first_channel + j] = 1;
- dot11d_info->max_tx_power_list[triple->first_channel + j] =
- triple->max_tx_power;
- max_channel_number = triple->first_channel + j;
- }
-
- triple = (struct chnl_txpow_triple *)((u8 *)triple + 3);
- }
-
- UPDATE_CIE_SRC(dev, address);
-
- dot11d_info->country_len = country_len;
- memcpy(dot11d_info->country_buffer, country, country_len);
- dot11d_info->state = DOT11D_STATE_LEARNED;
-}
-
-void dot11d_scan_complete(struct rtllib_device *dev)
-{
- struct rt_dot11d_info *dot11d_info = GET_DOT11D_INFO(dev);
-
- switch (dot11d_info->state) {
- case DOT11D_STATE_LEARNED:
- dot11d_info->state = DOT11D_STATE_DONE;
- break;
- case DOT11D_STATE_DONE:
- dot11d_reset(dev);
- break;
- case DOT11D_STATE_NONE:
- break;
- }
-}
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
deleted file mode 100644
index 6d2b93acf..000000000
--- a/drivers/staging/rtl8192e/dot11d.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
- ******************************************************************************/
-#ifndef __INC_DOT11D_H
-#define __INC_DOT11D_H
-
-#include "rtllib.h"
-
-struct chnl_txpow_triple {
- u8 first_channel;
- u8 num_channels;
- u8 max_tx_power;
-};
-
-enum dot11d_state {
- DOT11D_STATE_NONE = 0,
- DOT11D_STATE_LEARNED,
- DOT11D_STATE_DONE,
-};
-
-/**
- * struct rt_dot11d_info * @country_len: value greater than 0 if
- * @country_buffer contains valid country information element.
- * @channel_map: holds channel values
- * 0 - invalid,
- * 1 - valid (active scan),
- * 2 - valid (passive scan)
- * @country_src_addr - Source AP of the country IE
- */
-
-struct rt_dot11d_info {
- bool enabled;
-
- u16 country_len;
- u8 country_buffer[MAX_IE_LEN];
- u8 country_src_addr[6];
- u8 country_watchdog;
-
- u8 channel_map[MAX_CHANNEL_NUMBER + 1];
- u8 max_tx_power_list[MAX_CHANNEL_NUMBER + 1];
-
- enum dot11d_state state;
-};
-
-static inline void copy_mac_addr(unsigned char *des, unsigned char *src)
-{
- memcpy(des, src, 6);
-}
-
-#define GET_DOT11D_INFO(__ieee_dev) \
- ((struct rt_dot11d_info *)((__ieee_dev)->dot11d_info))
-
-#define IS_DOT11D_ENABLE(__ieee_dev) \
- (GET_DOT11D_INFO(__ieee_dev)->enabled)
-#define IS_COUNTRY_IE_VALID(__ieee_dev) \
- (GET_DOT11D_INFO(__ieee_dev)->country_len > 0)
-
-#define IS_EQUAL_CIE_SRC(__ieee_dev, __address) \
- ether_addr_equal_unaligned( \
- GET_DOT11D_INFO(__ieee_dev)->country_src_addr, __address)
-#define UPDATE_CIE_SRC(__ieee_dev, __address) \
- copy_mac_addr(GET_DOT11D_INFO(__ieee_dev)->country_src_addr, __address)
-
-#define GET_CIE_WATCHDOG(__ieee_dev) \
- (GET_DOT11D_INFO(__ieee_dev)->country_watchdog)
-static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__ieee_dev)
-{
- GET_CIE_WATCHDOG(__ieee_dev) = 0;
-}
-
-#define UPDATE_CIE_WATCHDOG(__ieee_dev) (++GET_CIE_WATCHDOG(__ieee_dev))
-
-void dot11d_init(struct rtllib_device *dev);
-void dot11d_channel_map(u8 channel_plan, struct rtllib_device *ieee);
-void dot11d_reset(struct rtllib_device *dev);
-void dot11d_update_country(struct rtllib_device *dev, u8 *address,
- u16 country_len, u8 *country);
-void dot11d_scan_complete(struct rtllib_device *dev);
-
-#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index eba8364d0..7f0c160bc 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -63,9 +63,9 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
if (type == DESC_PACKET_TYPE_INIT &&
(!priv->rtllib->check_nic_enough_desc(dev, TXCMD_QUEUE) ||
- (!skb_queue_empty(&priv->rtllib->skb_waitQ[TXCMD_QUEUE])) ||
+ (!skb_queue_empty(&priv->rtllib->skb_waitq[TXCMD_QUEUE])) ||
(priv->rtllib->queue_stop))) {
- skb_queue_tail(&priv->rtllib->skb_waitQ[TXCMD_QUEUE],
+ skb_queue_tail(&priv->rtllib->skb_waitq[TXCMD_QUEUE],
skb);
} else {
priv->rtllib->softmac_hard_start_xmit(skb, dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index e93394c51..c7a2eae2f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -51,12 +51,12 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
case HW_VAR_MEDIA_STATUS:
{
- enum rt_op_mode OpMode = *((enum rt_op_mode *)(val));
+ enum rt_op_mode op_mode = *((enum rt_op_mode *)(val));
u8 btMsr = rtl92e_readb(dev, MSR);
btMsr &= 0xfc;
- switch (OpMode) {
+ switch (op_mode) {
case RT_OP_MODE_INFRASTRUCTURE:
btMsr |= MSR_INFRA;
break;
@@ -261,7 +261,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->eeprom_customer_id = usValue & 0xff;
usValue = rtl92e_eeprom_read(dev,
EEPROM_ICVersion_ChannelPlan >> 1);
- priv->eeprom_chnl_plan = usValue & 0xff;
IC_Version = (usValue & 0xff00) >> 8;
ICVer8192 = IC_Version & 0xf;
@@ -283,7 +282,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->eeprom_vid = 0;
priv->eeprom_did = 0;
priv->eeprom_customer_id = 0;
- priv->eeprom_chnl_plan = 0;
}
if (!priv->autoload_fail_flag) {
@@ -387,25 +385,15 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
rtl92e_init_adaptive_rate(dev);
- priv->chnl_plan = priv->eeprom_chnl_plan;
-
switch (priv->eeprom_customer_id) {
case EEPROM_CID_NetCore:
priv->customer_id = RT_CID_819X_NETCORE;
break;
case EEPROM_CID_TOSHIBA:
priv->customer_id = RT_CID_TOSHIBA;
- if (priv->eeprom_chnl_plan & 0x80)
- priv->chnl_plan = priv->eeprom_chnl_plan & 0x7f;
- else
- priv->chnl_plan = 0x0;
break;
}
- if (priv->chnl_plan > CHANNEL_PLAN_LEN - 1)
- priv->chnl_plan = 0;
- priv->chnl_plan = COUNTRY_CODE_WORLD_WIDE_13;
-
if (priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304)
priv->rtllib->bSupportRemoteWakeUp = true;
else
@@ -891,11 +879,11 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
pTxFwInfo->TxHT = (cb_desc->data_rate & 0x80) ? 1 : 0;
pTxFwInfo->TxRate = _rtl92e_rate_mgn_to_hw(cb_desc->data_rate);
- pTxFwInfo->EnableCPUDur = cb_desc->bTxEnableFwCalcDur;
+ pTxFwInfo->EnableCPUDur = cb_desc->tx_enable_fw_calc_dur;
pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
pTxFwInfo->TxRate, cb_desc);
- if (cb_desc->bAMPDUEnable) {
+ if (cb_desc->ampdu_enable) {
pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = cb_desc->ampdu_factor;
pTxFwInfo->RxAMD = cb_desc->ampdu_density;
@@ -1685,8 +1673,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->TimeStampLow = pDrvInfo->TSFL;
stats->TimeStampHigh = rtl92e_readl(dev, TSFR + 4);
- rtl92e_update_rx_pkt_timestamp(dev, stats);
-
if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0)
stats->bShift = 1;
@@ -1707,12 +1693,12 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset)
{
struct r8192_priv *priv = rtllib_priv(dev);
int i;
- u8 OpMode;
+ u8 op_mode;
u8 u1bTmp;
u32 ulRegRead;
- OpMode = RT_OP_MODE_NO_LINK;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_MEDIA_STATUS, &OpMode);
+ op_mode = RT_OP_MODE_NO_LINK;
+ priv->rtllib->SetHwRegHandler(dev, HW_VAR_MEDIA_STATUS, &op_mode);
if (!priv->rtllib->bSupportRemoteWakeUp) {
u1bTmp = 0x0;
@@ -1742,7 +1728,7 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset)
}
for (i = 0; i < MAX_QUEUE_SIZE; i++)
- skb_queue_purge(&priv->rtllib->skb_waitQ[i]);
+ skb_queue_purge(&priv->rtllib->skb_waitq[i]);
skb_queue_purge(&priv->skb_queue);
}
@@ -1767,20 +1753,17 @@ void rtl92e_update_ratr_table(struct net_device *dev)
ratr_value &= 0x00000FF7;
break;
case WIRELESS_MODE_N_24G:
- if (ieee->ht_info->peer_mimo_ps == 0)
- ratr_value &= 0x0007F007;
- else
- ratr_value &= 0x000FF007;
+ ratr_value &= 0x000FF007;
break;
default:
break;
}
ratr_value &= 0x0FFFFFFF;
if (ieee->ht_info->cur_tx_bw40mhz &&
- ieee->ht_info->bCurShortGI40MHz)
+ ieee->ht_info->cur_short_gi_40mhz)
ratr_value |= 0x80000000;
else if (!ieee->ht_info->cur_tx_bw40mhz &&
- ieee->ht_info->bCurShortGI20MHz)
+ ieee->ht_info->cur_short_gi_20mhz)
ratr_value |= 0x80000000;
rtl92e_writel(dev, RATR0 + rate_index * 4, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
@@ -1814,7 +1797,7 @@ rtl92e_init_variables(struct net_device *dev)
IMR_MGNTDOK | IMR_COMDOK | IMR_HIGHDOK |
IMR_BDOK | IMR_RXCMDOK | IMR_TIMEOUT0 |
IMR_RDU | IMR_RXFOVW | IMR_TXFOVW |
- IMR_BcnInt | IMR_TBDOK | IMR_TBDER);
+ IMR_TBDOK | IMR_TBDER);
priv->bfirst_after_down = false;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 4d12d7385..e1bd4d67e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -671,16 +671,16 @@ static void _rtl92e_phy_switch_channel_work_item(struct net_device *dev)
_rtl92e_phy_switch_channel(dev, priv->chan);
}
-u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
+void rtl92e_set_channel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (!priv->up) {
netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
- return false;
+ return;
}
if (priv->sw_chnl_in_progress)
- return false;
+ return;
switch (priv->rtllib->mode) {
case WIRELESS_MODE_B:
@@ -688,7 +688,7 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
netdev_warn(dev,
"Channel %d not available in 802.11b.\n",
channel);
- return false;
+ return;
}
break;
case WIRELESS_MODE_G:
@@ -697,7 +697,7 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
netdev_warn(dev,
"Channel %d not available in 802.11g.\n",
channel);
- return false;
+ return;
}
break;
}
@@ -714,7 +714,7 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
if (priv->up)
_rtl92e_phy_switch_channel_work_item(dev);
priv->sw_chnl_in_progress = false;
- return true;
+ return;
}
static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 6c4c33ded..ff4b4004b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -41,7 +41,7 @@ void rtl92e_get_tx_power(struct net_device *dev);
void rtl92e_set_tx_power(struct net_device *dev, u8 channel);
u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath);
-u8 rtl92e_set_channel(struct net_device *dev, u8 channel);
+void rtl92e_set_channel(struct net_device *dev, u8 channel);
void rtl92e_set_bw_mode(struct net_device *dev,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 995daab90..6815d18a7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -226,16 +226,6 @@ static void _rtl92e_tx_timeout(struct net_device *dev, unsigned int txqueue)
netdev_info(dev, "TXTIMEOUT");
}
-static void _rtl92e_set_chan(struct net_device *dev, short ch)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- priv->chan = ch;
-
- if (priv->rf_set_chan)
- priv->rf_set_chan(dev, priv->chan);
-}
-
static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -297,7 +287,6 @@ static void _rtl92e_update_beacon(void *data)
if (ieee->ht_info->current_ht_support)
HT_update_self_and_peer_setting(ieee, net);
ieee->ht_info->current_rt2rt_long_slot_time = net->bssht.bd_rt2rt_long_slot_time;
- ieee->ht_info->RT2RT_HT_Mode = net->bssht.rt2rt_ht_mode;
_rtl92e_update_cap(dev, net->capability);
}
@@ -426,38 +415,6 @@ static int _rtl92e_handle_assoc_response(struct net_device *dev,
return 0;
}
-static void _rtl92e_prepare_beacon(struct tasklet_struct *t)
-{
- struct r8192_priv *priv = from_tasklet(priv, t,
- irq_prepare_beacon_tasklet);
- struct net_device *dev = priv->rtllib->dev;
- struct sk_buff *pskb = NULL, *pnewskb = NULL;
- struct cb_desc *tcb_desc = NULL;
- struct rtl8192_tx_ring *ring = NULL;
- struct tx_desc *pdesc = NULL;
-
- ring = &priv->tx_ring[BEACON_QUEUE];
- pskb = __skb_dequeue(&ring->queue);
- kfree_skb(pskb);
-
- pnewskb = rtllib_get_beacon(priv->rtllib);
- if (!pnewskb)
- return;
-
- tcb_desc = (struct cb_desc *)(pnewskb->cb + 8);
- tcb_desc->queue_index = BEACON_QUEUE;
- tcb_desc->data_rate = 2;
- tcb_desc->ratr_index = 7;
- tcb_desc->tx_dis_rate_fallback = 1;
- tcb_desc->tx_use_drv_assinged_rate = 1;
- skb_push(pnewskb, priv->rtllib->tx_headroom);
-
- pdesc = &ring->desc[0];
- rtl92e_fill_tx_desc(dev, pdesc, tcb_desc, pnewskb);
- __skb_queue_tail(&ring->queue, pnewskb);
- pdesc->OWN = 1;
-}
-
void rtl92e_config_rate(struct net_device *dev, u16 *rate_config)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -685,7 +642,7 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
priv->rtllib->softmac_hard_start_xmit = _rtl92e_hard_start_xmit;
- priv->rtllib->set_chan = _rtl92e_set_chan;
+ priv->rtllib->set_chan = rtl92e_set_channel;
priv->rtllib->link_change = rtl92e_link_change;
priv->rtllib->softmac_data_hard_start_xmit = _rtl92e_hard_data_xmit;
priv->rtllib->check_nic_enough_desc = _rtl92e_check_nic_enough_desc;
@@ -694,7 +651,6 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->set_wireless_mode = rtl92e_set_wireless_mode;
priv->rtllib->leisure_ps_leave = rtl92e_leisure_ps_leave;
priv->rtllib->set_bw_mode_handler = rtl92e_set_bw_mode;
- priv->rf_set_chan = rtl92e_set_channel;
priv->rtllib->sta_wake_up = rtl92e_hw_wakeup;
priv->rtllib->enter_sleep_state = rtl92e_enter_sleep;
@@ -767,7 +723,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
skb_queue_head_init(&priv->skb_queue);
for (i = 0; i < MAX_QUEUE_SIZE; i++)
- skb_queue_head_init(&priv->rtllib->skb_waitQ[i]);
+ skb_queue_head_init(&priv->rtllib->skb_waitq[i]);
}
static void _rtl92e_init_priv_lock(struct r8192_priv *priv)
@@ -796,8 +752,6 @@ static void _rtl92e_init_priv_task(struct net_device *dev)
INIT_DELAYED_WORK(&priv->rtllib->hw_sleep_wq, (void *)rtl92e_hw_sleep_wq);
tasklet_setup(&priv->irq_rx_tasklet, _rtl92e_irq_rx_tasklet);
tasklet_setup(&priv->irq_tx_tasklet, _rtl92e_irq_tx_tasklet);
- tasklet_setup(&priv->irq_prepare_beacon_tasklet,
- _rtl92e_prepare_beacon);
}
static short _rtl92e_get_channel_map(struct net_device *dev)
@@ -806,13 +760,6 @@ static short _rtl92e_get_channel_map(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->chnl_plan >= COUNTRY_CODE_MAX) {
- netdev_info(dev,
- "rtl819x_init:Error channel plan! Set to default.\n");
- priv->chnl_plan = COUNTRY_CODE_FCC;
- }
- dot11d_init(priv->rtllib);
- dot11d_channel_map(priv->chnl_plan, priv->rtllib);
for (i = 1; i <= 11; i++)
(priv->rtllib->active_channel_map)[i] = 1;
(priv->rtllib->active_channel_map)[12] = 2;
@@ -1024,21 +971,21 @@ static void _rtl92e_watchdog_wq_cb(void *data)
}
}
if ((ieee->link_state == MAC80211_LINKED) && (ieee->iw_mode == IW_MODE_INFRA)) {
- if (ieee->link_detect_info.NumRxOkInPeriod > 100 ||
- ieee->link_detect_info.NumTxOkInPeriod > 100)
+ if (ieee->link_detect_info.num_rx_ok_in_period > 100 ||
+ ieee->link_detect_info.num_tx_ok_in_period > 100)
bBusyTraffic = true;
- if (ieee->link_detect_info.NumRxOkInPeriod > 4000 ||
- ieee->link_detect_info.NumTxOkInPeriod > 4000) {
+ if (ieee->link_detect_info.num_rx_ok_in_period > 4000 ||
+ ieee->link_detect_info.num_tx_ok_in_period > 4000) {
bHigherBusyTraffic = true;
- if (ieee->link_detect_info.NumRxOkInPeriod > 5000)
+ if (ieee->link_detect_info.num_rx_ok_in_period > 5000)
bHigherBusyRxTraffic = true;
else
bHigherBusyRxTraffic = false;
}
if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
- ieee->link_detect_info.NumTxOkInPeriod) > 8) ||
+ ieee->link_detect_info.num_tx_ok_in_period) > 8) ||
(ieee->link_detect_info.NumRxUnicastOkInPeriod > 2))
bEnterPS = false;
else
@@ -1056,8 +1003,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
rtl92e_leisure_ps_leave(dev);
}
- ieee->link_detect_info.NumRxOkInPeriod = 0;
- ieee->link_detect_info.NumTxOkInPeriod = 0;
+ ieee->link_detect_info.num_rx_ok_in_period = 0;
+ ieee->link_detect_info.num_tx_ok_in_period = 0;
ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
ieee->link_detect_info.bBusyTraffic = bBusyTraffic;
@@ -1240,7 +1187,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
- tcb_desc->bTxEnableFwCalcDur = 1;
+ tcb_desc->tx_enable_fw_calc_dur = 1;
skb_push(skb, priv->rtllib->tx_headroom);
ret = _rtl92e_tx(dev, skb);
if (ret != 0)
@@ -1484,17 +1431,6 @@ void rtl92e_reset_desc_ring(struct net_device *dev)
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
}
-void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
- struct rtllib_rx_stats *stats)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
-
- if (stats->bIsAMPDU && !stats->bFirstMPDU)
- stats->mac_time = priv->last_rx_desc_tsf;
- else
- priv->last_rx_desc_tsf = stats->mac_time;
-}
-
long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index)
{
long signal_power;
@@ -1638,9 +1574,9 @@ static void _rtl92e_tx_resume(struct net_device *dev)
for (queue_index = BK_QUEUE;
queue_index < MAX_QUEUE_SIZE; queue_index++) {
- while ((!skb_queue_empty(&ieee->skb_waitQ[queue_index])) &&
+ while ((!skb_queue_empty(&ieee->skb_waitq[queue_index])) &&
(priv->rtllib->check_nic_enough_desc(dev, queue_index) > 0)) {
- skb = skb_dequeue(&ieee->skb_waitQ[queue_index]);
+ skb = skb_dequeue(&ieee->skb_waitq[queue_index]);
ieee->softmac_data_hard_start_xmit(skb, dev, 0);
}
}
@@ -1827,9 +1763,6 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
if (inta & IMR_ROK)
tasklet_schedule(&priv->irq_rx_tasklet);
- if (inta & IMR_BcnInt)
- tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
-
if (inta & IMR_RDU) {
rtl92e_writel(dev, INTA_MASK,
rtl92e_readl(dev, INTA_MASK) & ~IMR_RDU);
@@ -1840,22 +1773,22 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
tasklet_schedule(&priv->irq_rx_tasklet);
if (inta & IMR_BKDOK) {
- priv->rtllib->link_detect_info.NumTxOkInPeriod++;
+ priv->rtllib->link_detect_info.num_tx_ok_in_period++;
_rtl92e_tx_isr(dev, BK_QUEUE);
}
if (inta & IMR_BEDOK) {
- priv->rtllib->link_detect_info.NumTxOkInPeriod++;
+ priv->rtllib->link_detect_info.num_tx_ok_in_period++;
_rtl92e_tx_isr(dev, BE_QUEUE);
}
if (inta & IMR_VIDOK) {
- priv->rtllib->link_detect_info.NumTxOkInPeriod++;
+ priv->rtllib->link_detect_info.num_tx_ok_in_period++;
_rtl92e_tx_isr(dev, VI_QUEUE);
}
if (inta & IMR_VODOK) {
- priv->rtllib->link_detect_info.NumTxOkInPeriod++;
+ priv->rtllib->link_detect_info.num_tx_ok_in_period++;
_rtl92e_tx_isr(dev, VO_QUEUE);
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index a4afbf3e9..1d6d31292 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -34,8 +34,6 @@
#include "../rtllib.h"
-#include "../dot11d.h"
-
#include "r8192E_firmware.h"
#include "r8192E_hw.h"
@@ -219,7 +217,6 @@ struct r8192_priv {
struct tasklet_struct irq_rx_tasklet;
struct tasklet_struct irq_tx_tasklet;
- struct tasklet_struct irq_prepare_beacon_tasklet;
struct mutex wx_mutex;
struct mutex rf_mutex;
@@ -228,8 +225,6 @@ struct r8192_priv {
struct rt_stats stats;
struct iw_statistics wstats;
- u8 (*rf_set_chan)(struct net_device *dev, u8 ch);
-
struct rx_desc *rx_ring;
struct sk_buff *rx_buf[MAX_RX_COUNT];
dma_addr_t rx_ring_dma;
@@ -237,8 +232,6 @@ struct r8192_priv {
int rxringcount;
u16 rxbuffersize;
- u64 last_rx_desc_tsf;
-
u32 receive_config;
u8 retry_data;
u8 retry_rts;
@@ -286,7 +279,6 @@ struct r8192_priv {
u16 eeprom_vid;
u16 eeprom_did;
u8 eeprom_customer_id;
- u16 eeprom_chnl_plan;
u8 eeprom_tx_pwr_level_cck[14];
u8 eeprom_tx_pwr_level_ofdm24g[14];
@@ -312,7 +304,6 @@ struct r8192_priv {
bool tx_pwr_data_read_from_eeprom;
- u16 chnl_plan;
u8 hw_rf_off_action;
bool rf_change_in_progress;
@@ -396,8 +387,6 @@ void rtl92e_irq_enable(struct net_device *dev);
void rtl92e_config_rate(struct net_device *dev, u16 *rate_config);
void rtl92e_irq_disable(struct net_device *dev);
-void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
- struct rtllib_rx_stats *stats);
long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index);
void rtl92e_update_rx_statistics(struct r8192_priv *priv,
struct rtllib_rx_stats *pprevious_stats);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 330dafd62..92143c50c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -287,9 +287,9 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
if (priv->rtllib->link_state == MAC80211_LINKED) {
bshort_gi_enabled = (ht_info->cur_tx_bw40mhz &&
- ht_info->bCurShortGI40MHz) ||
+ ht_info->cur_short_gi_40mhz) ||
(!ht_info->cur_tx_bw40mhz &&
- ht_info->bCurShortGI20MHz);
+ ht_info->cur_short_gi_20mhz);
pra->upper_rssi_threshold_ratr =
(pra->upper_rssi_threshold_ratr & (~BIT(31))) |
@@ -1142,7 +1142,7 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
rtl92e_writel(dev, EDCAPARA_BE,
- edca_setting_UL[ht_info->IOTPeer]);
+ edca_setting_UL[ht_info->iot_peer]);
priv->bis_cur_rdlstate = false;
}
} else {
@@ -1150,10 +1150,10 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
!priv->bcurrent_turbo_EDCA) {
if (priv->rtllib->mode == WIRELESS_MODE_G)
rtl92e_writel(dev, EDCAPARA_BE,
- edca_setting_DL_GMode[ht_info->IOTPeer]);
+ edca_setting_DL_GMode[ht_info->iot_peer]);
else
rtl92e_writel(dev, EDCAPARA_BE,
- edca_setting_DL[ht_info->IOTPeer]);
+ edca_setting_DL[ht_info->iot_peer]);
priv->bis_cur_rdlstate = true;
}
}
@@ -1164,17 +1164,17 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
!priv->bcurrent_turbo_EDCA) {
if (priv->rtllib->mode == WIRELESS_MODE_G)
rtl92e_writel(dev, EDCAPARA_BE,
- edca_setting_DL_GMode[ht_info->IOTPeer]);
+ edca_setting_DL_GMode[ht_info->iot_peer]);
else
rtl92e_writel(dev, EDCAPARA_BE,
- edca_setting_DL[ht_info->IOTPeer]);
+ edca_setting_DL[ht_info->iot_peer]);
priv->bis_cur_rdlstate = true;
}
} else {
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
rtl92e_writel(dev, EDCAPARA_BE,
- edca_setting_UL[ht_info->IOTPeer]);
+ edca_setting_UL[ht_info->iot_peer]);
priv->bis_cur_rdlstate = false;
}
}
@@ -1217,7 +1217,7 @@ static void _rtl92e_dm_cts_to_self(struct net_device *dev)
ht_info->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
return;
}
- if (ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
+ if (ht_info->iot_peer == HT_IOT_PEER_BROADCOM) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
if (curRxOkCnt > 4 * curTxOkCnt)
@@ -1713,7 +1713,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
static u8 reg_c38_State = RegC38_Default;
if (priv->rtllib->link_state == MAC80211_LINKED &&
- priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM) {
+ priv->rtllib->ht_info->iot_peer == HT_IOT_PEER_BROADCOM) {
if (priv->rtllib->bfsync_enable == 0) {
switch (priv->rtllib->fsync_state) {
case Default_Fsync:
@@ -1819,7 +1819,7 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
priv->dynamic_tx_low_pwr = false;
return;
}
- if ((priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) &&
+ if ((priv->rtllib->ht_info->iot_peer == HT_IOT_PEER_ATHEROS) &&
(priv->rtllib->mode == WIRELESS_MODE_G)) {
txhipower_threshold = TX_POWER_ATHEROAP_THRESH_HIGH;
txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 4371ab123..4c884c527 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -158,28 +158,6 @@ static int _rtl92e_wx_set_mode(struct net_device *dev,
return ret;
}
-struct iw_range_with_scan_capa {
- /* Informative stuff (to choose between different interface) */
- __u32 throughput; /* To give an idea... */
- /* In theory this value should be the maximum benchmarked
- * TCP/IP throughput, because with most of these devices the
- * bit rate is meaningless (overhead an co) to estimate how
- * fast the connection will go and pick the fastest one.
- * I suggest people to play with Netperf or any benchmark...
- */
-
- /* NWID (or domain id) */
- __u32 min_nwid; /* Minimal NWID we are able to set */
- __u32 max_nwid; /* Maximal NWID we are able to set */
-
- /* Old Frequency (backward compat - moved lower ) */
- __u16 old_num_channels;
- __u8 old_num_frequency;
-
- /* Scan capabilities */
- __u8 scan_capa;
-};
-
static int _rtl92e_wx_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 4af8055d2..ee9ce3921 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -23,35 +23,35 @@ static void deactivate_ba_entry(struct rtllib_device *ieee, struct ba_record *ba
del_timer_sync(&ba->timer);
}
-static u8 tx_ts_delete_ba(struct rtllib_device *ieee, struct tx_ts_record *pTxTs)
+static u8 tx_ts_delete_ba(struct rtllib_device *ieee, struct tx_ts_record *ts)
{
- struct ba_record *admitted_ba = &pTxTs->TxAdmittedBARecord;
- struct ba_record *pending_ba = &pTxTs->TxPendingBARecord;
- u8 bSendDELBA = false;
+ struct ba_record *admitted_ba = &ts->tx_admitted_ba_record;
+ struct ba_record *pending_ba = &ts->tx_pending_ba_record;
+ u8 send_del_ba = false;
if (pending_ba->b_valid) {
deactivate_ba_entry(ieee, pending_ba);
- bSendDELBA = true;
+ send_del_ba = true;
}
if (admitted_ba->b_valid) {
deactivate_ba_entry(ieee, admitted_ba);
- bSendDELBA = true;
+ send_del_ba = true;
}
- return bSendDELBA;
+ return send_del_ba;
}
static u8 rx_ts_delete_ba(struct rtllib_device *ieee, struct rx_ts_record *ts)
{
struct ba_record *ba = &ts->rx_admitted_ba_record;
- u8 bSendDELBA = false;
+ u8 send_del_ba = false;
if (ba->b_valid) {
deactivate_ba_entry(ieee, ba);
- bSendDELBA = true;
+ send_del_ba = true;
}
- return bSendDELBA;
+ return send_del_ba;
}
void rtllib_reset_ba_entry(struct ba_record *ba)
@@ -68,7 +68,7 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *dst,
u16 status_code, u8 type)
{
struct sk_buff *skb = NULL;
- struct ieee80211_hdr_3addr *BAReq = NULL;
+ struct ieee80211_hdr_3addr *ba_req = NULL;
u8 *tag = NULL;
u16 len = ieee->tx_headroom + 9;
@@ -87,13 +87,13 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *dst,
skb_reserve(skb, ieee->tx_headroom);
- BAReq = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
+ ba_req = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
- ether_addr_copy(BAReq->addr1, dst);
- ether_addr_copy(BAReq->addr2, ieee->dev->dev_addr);
+ ether_addr_copy(ba_req->addr1, dst);
+ ether_addr_copy(ba_req->addr2, ieee->dev->dev_addr);
- ether_addr_copy(BAReq->addr3, ieee->current_network.bssid);
- BAReq->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+ ether_addr_copy(ba_req->addr3, ieee->current_network.bssid);
+ ba_req->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
tag = skb_put(skb, 9);
*tag++ = ACT_CAT_BA;
@@ -127,9 +127,9 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
struct ba_record *ba,
enum tr_select TxRxSelect, u16 reason_code)
{
- union delba_param_set DelbaParamSet;
+ union delba_param_set del_ba_param_set;
struct sk_buff *skb = NULL;
- struct ieee80211_hdr_3addr *Delba = NULL;
+ struct ieee80211_hdr_3addr *del_ba = NULL;
u8 *tag = NULL;
u16 len = 6 + ieee->tx_headroom;
@@ -137,10 +137,10 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
netdev_dbg(ieee->dev, "%s(): reason_code(%d) sentd to: %pM\n",
__func__, reason_code, dst);
- memset(&DelbaParamSet, 0, 2);
+ memset(&del_ba_param_set, 0, 2);
- DelbaParamSet.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
- DelbaParamSet.field.tid = ba->ba_param_set.field.tid;
+ del_ba_param_set.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
+ del_ba_param_set.field.tid = ba->ba_param_set.field.tid;
skb = dev_alloc_skb(len + sizeof(struct ieee80211_hdr_3addr));
if (!skb)
@@ -148,19 +148,19 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
skb_reserve(skb, ieee->tx_headroom);
- Delba = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
+ del_ba = skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
- ether_addr_copy(Delba->addr1, dst);
- ether_addr_copy(Delba->addr2, ieee->dev->dev_addr);
- ether_addr_copy(Delba->addr3, ieee->current_network.bssid);
- Delba->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
+ ether_addr_copy(del_ba->addr1, dst);
+ ether_addr_copy(del_ba->addr2, ieee->dev->dev_addr);
+ ether_addr_copy(del_ba->addr3, ieee->current_network.bssid);
+ del_ba->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
tag = skb_put(skb, 6);
*tag++ = ACT_CAT_BA;
*tag++ = ACT_DELBA;
- put_unaligned_le16(DelbaParamSet.short_data, tag);
+ put_unaligned_le16(del_ba_param_set.short_data, tag);
tag += 2;
put_unaligned_le16(reason_code, tag);
@@ -215,11 +215,11 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct ieee80211_hdr_3addr *req = NULL;
u16 rc = 0;
- u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
+ u8 *dst = NULL, *dialog_token = NULL, *tag = NULL;
struct ba_record *ba = NULL;
- union ba_param_set *pBaParamSet = NULL;
- u16 *pBaTimeoutVal = NULL;
- union sequence_control *pBaStartSeqCtrl = NULL;
+ union ba_param_set *ba_param_set = NULL;
+ u16 *ba_timeout_value = NULL;
+ union sequence_control *ba_start_seq_ctrl = NULL;
struct rx_ts_record *ts = NULL;
if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
@@ -238,10 +238,10 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
tag = (u8 *)req;
dst = (u8 *)(&req->addr2[0]);
tag += sizeof(struct ieee80211_hdr_3addr);
- pDialogToken = tag + 2;
- pBaParamSet = (union ba_param_set *)(tag + 3);
- pBaTimeoutVal = (u16 *)(tag + 5);
- pBaStartSeqCtrl = (union sequence_control *)(req + 7);
+ dialog_token = tag + 2;
+ ba_param_set = (union ba_param_set *)(tag + 3);
+ ba_timeout_value = (u16 *)(tag + 5);
+ ba_start_seq_ctrl = (union sequence_control *)(req + 7);
if (!ieee->current_network.qos_data.active ||
!ieee->ht_info->current_ht_support ||
@@ -254,14 +254,14 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
goto OnADDBAReq_Fail;
}
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)(pBaParamSet->field.tid), RX_DIR, true)) {
+ (u8)(ba_param_set->field.tid), RX_DIR, true)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
goto OnADDBAReq_Fail;
}
ba = &ts->rx_admitted_ba_record;
- if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
+ if (ba_param_set->field.ba_policy == BA_POLICY_DELAYED) {
rc = ADDBA_STATUS_INVALID_PARAM;
netdev_warn(ieee->dev, "%s(): BA Policy is not correct\n",
__func__);
@@ -271,10 +271,10 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
rtllib_FlushRxTsPendingPkts(ieee, ts);
deactivate_ba_entry(ieee, ba);
- ba->dialog_token = *pDialogToken;
- ba->ba_param_set = *pBaParamSet;
- ba->ba_timeout_value = *pBaTimeoutVal;
- ba->ba_start_seq_ctrl = *pBaStartSeqCtrl;
+ ba->dialog_token = *dialog_token;
+ ba->ba_param_set = *ba_param_set;
+ ba->ba_timeout_value = *ba_timeout_value;
+ ba->ba_start_seq_ctrl = *ba_start_seq_ctrl;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
(ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
@@ -291,9 +291,9 @@ OnADDBAReq_Fail:
{
struct ba_record BA;
- BA.ba_param_set = *pBaParamSet;
- BA.ba_timeout_value = *pBaTimeoutVal;
- BA.dialog_token = *pDialogToken;
+ BA.ba_param_set = *ba_param_set;
+ BA.ba_timeout_value = *ba_timeout_value;
+ BA.dialog_token = *dialog_token;
BA.ba_param_set.field.ba_policy = BA_POLICY_IMMEDIATE;
rtllib_send_ADDBARsp(ieee, dst, &BA, rc);
return 0;
@@ -303,11 +303,11 @@ OnADDBAReq_Fail:
int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct ieee80211_hdr_3addr *rsp = NULL;
- struct ba_record *pending_ba, *pAdmittedBA;
+ struct ba_record *pending_ba, *admitted_ba;
struct tx_ts_record *ts = NULL;
- u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
- u16 *status_code = NULL, *pBaTimeoutVal = NULL;
- union ba_param_set *pBaParamSet = NULL;
+ u8 *dst = NULL, *dialog_token = NULL, *tag = NULL;
+ u16 *status_code = NULL, *ba_timeout_value = NULL;
+ union ba_param_set *ba_param_set = NULL;
u16 reason_code;
if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 9) {
@@ -320,40 +320,40 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
tag = (u8 *)rsp;
dst = (u8 *)(&rsp->addr2[0]);
tag += sizeof(struct ieee80211_hdr_3addr);
- pDialogToken = tag + 2;
+ dialog_token = tag + 2;
status_code = (u16 *)(tag + 3);
- pBaParamSet = (union ba_param_set *)(tag + 5);
- pBaTimeoutVal = (u16 *)(tag + 7);
+ ba_param_set = (union ba_param_set *)(tag + 5);
+ ba_timeout_value = (u16 *)(tag + 7);
if (!ieee->current_network.qos_data.active ||
!ieee->ht_info->current_ht_support ||
- !ieee->ht_info->bCurrentAMPDUEnable) {
+ !ieee->ht_info->current_ampdu_enable) {
netdev_warn(ieee->dev,
"reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",
ieee->current_network.qos_data.active,
ieee->ht_info->current_ht_support,
- ieee->ht_info->bCurrentAMPDUEnable);
+ ieee->ht_info->current_ampdu_enable);
reason_code = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
}
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)(pBaParamSet->field.tid), TX_DIR, false)) {
+ (u8)(ba_param_set->field.tid), TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
reason_code = DELBA_REASON_UNKNOWN_BA;
goto OnADDBARsp_Reject;
}
- ts->bAddBaReqInProgress = false;
- pending_ba = &ts->TxPendingBARecord;
- pAdmittedBA = &ts->TxAdmittedBARecord;
+ ts->add_ba_req_in_progress = false;
+ pending_ba = &ts->tx_pending_ba_record;
+ admitted_ba = &ts->tx_admitted_ba_record;
- if (pAdmittedBA->b_valid) {
+ if (admitted_ba->b_valid) {
netdev_dbg(ieee->dev, "%s(): ADDBA response already admitted\n",
__func__);
return -1;
} else if (!pending_ba->b_valid ||
- (*pDialogToken != pending_ba->dialog_token)) {
+ (*dialog_token != pending_ba->dialog_token)) {
netdev_warn(ieee->dev,
"%s(): ADDBA Rsp. BA invalid, DELBA!\n",
__func__);
@@ -367,22 +367,22 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
}
if (*status_code == ADDBA_STATUS_SUCCESS) {
- if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
- ts->bAddBaReqDelayed = true;
- deactivate_ba_entry(ieee, pAdmittedBA);
+ if (ba_param_set->field.ba_policy == BA_POLICY_DELAYED) {
+ ts->add_ba_req_delayed = true;
+ deactivate_ba_entry(ieee, admitted_ba);
reason_code = DELBA_REASON_END_BA;
goto OnADDBARsp_Reject;
}
- pAdmittedBA->dialog_token = *pDialogToken;
- pAdmittedBA->ba_timeout_value = *pBaTimeoutVal;
- pAdmittedBA->ba_start_seq_ctrl = pending_ba->ba_start_seq_ctrl;
- pAdmittedBA->ba_param_set = *pBaParamSet;
- deactivate_ba_entry(ieee, pAdmittedBA);
- activate_ba_entry(pAdmittedBA, *pBaTimeoutVal);
+ admitted_ba->dialog_token = *dialog_token;
+ admitted_ba->ba_timeout_value = *ba_timeout_value;
+ admitted_ba->ba_start_seq_ctrl = pending_ba->ba_start_seq_ctrl;
+ admitted_ba->ba_param_set = *ba_param_set;
+ deactivate_ba_entry(ieee, admitted_ba);
+ activate_ba_entry(admitted_ba, *ba_timeout_value);
} else {
- ts->bAddBaReqDelayed = true;
- ts->bDisable_AddBa = true;
+ ts->add_ba_req_delayed = true;
+ ts->disable_add_ba = true;
reason_code = DELBA_REASON_END_BA;
goto OnADDBARsp_Reject;
}
@@ -393,7 +393,7 @@ OnADDBARsp_Reject:
{
struct ba_record BA;
- BA.ba_param_set = *pBaParamSet;
+ BA.ba_param_set = *ba_param_set;
rtllib_send_DELBA(ieee, dst, &BA, TX_DIR, reason_code);
return 0;
}
@@ -402,7 +402,7 @@ OnADDBARsp_Reject:
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct ieee80211_hdr_3addr *delba = NULL;
- union delba_param_set *pDelBaParamSet = NULL;
+ union delba_param_set *del_ba_param_set = NULL;
u8 *dst = NULL;
if (skb->len < sizeof(struct ieee80211_hdr_3addr) + 6) {
@@ -427,46 +427,46 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
#endif
delba = (struct ieee80211_hdr_3addr *)skb->data;
dst = (u8 *)(&delba->addr2[0]);
- pDelBaParamSet = (union delba_param_set *)&delba->seq_ctrl + 2;
+ del_ba_param_set = (union delba_param_set *)&delba->seq_ctrl + 2;
- if (pDelBaParamSet->field.initiator == 1) {
+ if (del_ba_param_set->field.initiator == 1) {
struct rx_ts_record *ts;
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)pDelBaParamSet->field.tid, RX_DIR, false)) {
+ (u8)del_ba_param_set->field.tid, RX_DIR, false)) {
netdev_warn(ieee->dev,
"%s(): can't get TS for RXTS. dst:%pM TID:%d\n",
__func__, dst,
- (u8)pDelBaParamSet->field.tid);
+ (u8)del_ba_param_set->field.tid);
return -1;
}
rx_ts_delete_ba(ieee, ts);
} else {
- struct tx_ts_record *pTxTs;
+ struct tx_ts_record *ts;
- if (!rtllib_get_ts(ieee, (struct ts_common_info **)&pTxTs, dst,
- (u8)pDelBaParamSet->field.tid, TX_DIR, false)) {
+ if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
+ (u8)del_ba_param_set->field.tid, TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS for TXTS\n",
__func__);
return -1;
}
- pTxTs->bUsingBa = false;
- pTxTs->bAddBaReqInProgress = false;
- pTxTs->bAddBaReqDelayed = false;
- del_timer_sync(&pTxTs->TsAddBaTimer);
- tx_ts_delete_ba(ieee, pTxTs);
+ ts->using_ba = false;
+ ts->add_ba_req_in_progress = false;
+ ts->add_ba_req_delayed = false;
+ del_timer_sync(&ts->ts_add_ba_timer);
+ tx_ts_delete_ba(ieee, ts);
}
return 0;
}
void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
- u8 policy, u8 bOverwritePending)
+ u8 policy, u8 overwrite_pending)
{
- struct ba_record *ba = &ts->TxPendingBARecord;
+ struct ba_record *ba = &ts->tx_pending_ba_record;
- if (ba->b_valid && !bOverwritePending)
+ if (ba->b_valid && !overwrite_pending)
return;
deactivate_ba_entry(ieee, ba);
@@ -474,35 +474,35 @@ void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
ba->dialog_token++;
ba->ba_param_set.field.amsdu_support = 0;
ba->ba_param_set.field.ba_policy = policy;
- ba->ba_param_set.field.tid = ts->TsCommonInfo.TSpec.ucTSID;
+ ba->ba_param_set.field.tid = ts->ts_common_info.tspec.ts_id;
ba->ba_param_set.field.buffer_size = 32;
ba->ba_timeout_value = 0;
- ba->ba_start_seq_ctrl.field.seq_num = (ts->TxCurSeq + 3) % 4096;
+ ba->ba_start_seq_ctrl.field.seq_num = (ts->tx_cur_seq + 3) % 4096;
activate_ba_entry(ba, BA_SETUP_TIMEOUT);
- rtllib_send_ADDBAReq(ieee, ts->TsCommonInfo.addr, ba);
+ rtllib_send_ADDBAReq(ieee, ts->ts_common_info.addr, ba);
}
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
- struct ts_common_info *pTsCommonInfo,
+ struct ts_common_info *ts_common_info,
enum tr_select TxRxSelect)
{
if (TxRxSelect == TX_DIR) {
- struct tx_ts_record *pTxTs =
- (struct tx_ts_record *)pTsCommonInfo;
-
- if (tx_ts_delete_ba(ieee, pTxTs))
- rtllib_send_DELBA(ieee, pTsCommonInfo->addr,
- (pTxTs->TxAdmittedBARecord.b_valid) ?
- (&pTxTs->TxAdmittedBARecord) :
- (&pTxTs->TxPendingBARecord),
+ struct tx_ts_record *ts =
+ (struct tx_ts_record *)ts_common_info;
+
+ if (tx_ts_delete_ba(ieee, ts))
+ rtllib_send_DELBA(ieee, ts_common_info->addr,
+ (ts->tx_admitted_ba_record.b_valid) ?
+ (&ts->tx_admitted_ba_record) :
+ (&ts->tx_pending_ba_record),
TxRxSelect, DELBA_REASON_END_BA);
} else if (TxRxSelect == RX_DIR) {
struct rx_ts_record *ts =
- (struct rx_ts_record *)pTsCommonInfo;
+ (struct rx_ts_record *)ts_common_info;
if (rx_ts_delete_ba(ieee, ts))
- rtllib_send_DELBA(ieee, pTsCommonInfo->addr,
+ rtllib_send_DELBA(ieee, ts_common_info->addr,
&ts->rx_admitted_ba_record,
TxRxSelect, DELBA_REASON_END_BA);
}
@@ -510,23 +510,23 @@ void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
void rtllib_ba_setup_timeout(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
- TxPendingBARecord.timer);
+ struct tx_ts_record *ts = from_timer(ts, t,
+ tx_pending_ba_record.timer);
- pTxTs->bAddBaReqInProgress = false;
- pTxTs->bAddBaReqDelayed = true;
- pTxTs->TxPendingBARecord.b_valid = false;
+ ts->add_ba_req_in_progress = false;
+ ts->add_ba_req_delayed = true;
+ ts->tx_pending_ba_record.b_valid = false;
}
void rtllib_tx_ba_inact_timeout(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
- TxAdmittedBARecord.timer);
- struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
- TxTsRecord[pTxTs->num]);
- tx_ts_delete_ba(ieee, pTxTs);
- rtllib_send_DELBA(ieee, pTxTs->TsCommonInfo.addr,
- &pTxTs->TxAdmittedBARecord, TX_DIR,
+ struct tx_ts_record *ts = from_timer(ts, t,
+ tx_admitted_ba_record.timer);
+ struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
+ tx_ts_records[ts->num]);
+ tx_ts_delete_ba(ieee, ts);
+ rtllib_send_DELBA(ieee, ts->ts_common_info.addr,
+ &ts->tx_admitted_ba_record, TX_DIR,
DELBA_REASON_TIMEOUT);
}
@@ -535,7 +535,7 @@ void rtllib_rx_ba_inact_timeout(struct timer_list *t)
struct rx_ts_record *ts = from_timer(ts, t,
rx_admitted_ba_record.timer);
struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
- RxTsRecord[ts->num]);
+ rx_ts_records[ts->num]);
rx_ts_delete_ba(ieee, ts);
rtllib_send_DELBA(ieee, ts->ts_common_info.addr,
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index f8eb4d553..68577bffb 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -94,51 +94,32 @@ enum ht_aggre_mode {
struct rt_hi_throughput {
u8 enable_ht;
u8 current_ht_support;
- u8 bRegBW40MHz;
- u8 bCurBW40MHz;
- u8 bRegShortGI40MHz;
- u8 bCurShortGI40MHz;
- u8 bRegShortGI20MHz;
- u8 bCurShortGI20MHz;
- u8 bRegSuppCCK;
- u8 bCurSuppCCK;
- enum ht_spec_ver ePeerHTSpecVer;
+ u8 cur_bw_40mhz;
+ u8 cur_short_gi_40mhz;
+ u8 cur_short_gi_20mhz;
+ enum ht_spec_ver peer_ht_spec_ver;
struct ht_capab_ele SelfHTCap;
- struct ht_info_ele SelfHTInfo;
u8 PeerHTCapBuf[32];
u8 PeerHTInfoBuf[32];
- u8 bAMSDU_Support;
- u16 nAMSDU_MaxSize;
- u8 bCurrent_AMSDU_Support;
- u16 nCurrent_AMSDU_MaxSize;
- u8 bAMPDUEnable;
- u8 bCurrentAMPDUEnable;
- u8 AMPDU_Factor;
+ u8 ampdu_enable;
+ u8 current_ampdu_enable;
+ u8 ampdu_factor;
u8 CurrentAMPDUFactor;
- u8 MPDU_Density;
u8 current_mpdu_density;
- enum ht_aggre_mode ForcedAMPDUMode;
u8 forced_ampdu_factor;
u8 forced_mpdu_density;
- enum ht_aggre_mode ForcedAMSDUMode;
- u8 forced_short_gi;
u8 current_op_mode;
- u8 self_mimo_ps;
- u8 peer_mimo_ps;
enum ht_extchnl_offset CurSTAExtChnlOffset;
u8 cur_tx_bw40mhz;
u8 sw_bw_in_progress;
- u8 reg_rt2rt_aggregation;
- u8 RT2RT_HT_Mode;
u8 current_rt2rt_aggregation;
u8 current_rt2rt_long_slot_time;
u8 sz_rt2rt_agg_buf[10];
- u8 reg_rx_reorder_enable;
u8 cur_rx_reorder_enable;
u8 rx_reorder_win_size;
u8 rx_reorder_pending_time;
u16 rx_reorder_drop_counter;
- u8 IOTPeer;
+ u8 iot_peer;
u32 iot_action;
u8 iot_ra_func;
} __packed;
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index e607bccc0..6d0912f90 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -71,75 +71,55 @@ void ht_update_default_setting(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- ht_info->bRegShortGI20MHz = 1;
- ht_info->bRegShortGI40MHz = 1;
+ ht_info->ampdu_enable = 1;
+ ht_info->ampdu_factor = 2;
- ht_info->bRegBW40MHz = 1;
-
- if (ht_info->bRegBW40MHz)
- ht_info->bRegSuppCCK = 1;
- else
- ht_info->bRegSuppCCK = true;
-
- ht_info->nAMSDU_MaxSize = 7935UL;
- ht_info->bAMSDU_Support = 0;
-
- ht_info->bAMPDUEnable = 1;
- ht_info->AMPDU_Factor = 2;
- ht_info->MPDU_Density = 0;
-
- ht_info->self_mimo_ps = 3;
- if (ht_info->self_mimo_ps == 2)
- ht_info->self_mimo_ps = 3;
ieee->tx_dis_rate_fallback = 0;
ieee->tx_use_drv_assinged_rate = 0;
- ieee->bTxEnableFwCalcDur = 1;
-
- ht_info->reg_rt2rt_aggregation = 1;
+ ieee->tx_enable_fw_calc_dur = 1;
- ht_info->reg_rx_reorder_enable = 1;
ht_info->rx_reorder_win_size = 64;
ht_info->rx_reorder_pending_time = 30;
}
-static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
+static u16 ht_mcs_to_data_rate(struct rtllib_device *ieee, u8 mcs_rate)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- u8 is40MHz = (ht_info->bCurBW40MHz) ? 1 : 0;
- u8 isShortGI = (ht_info->bCurBW40MHz) ?
- ((ht_info->bCurShortGI40MHz) ? 1 : 0) :
- ((ht_info->bCurShortGI20MHz) ? 1 : 0);
- return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
+ u8 is40MHz = (ht_info->cur_bw_40mhz) ? 1 : 0;
+ u8 isShortGI = (ht_info->cur_bw_40mhz) ?
+ ((ht_info->cur_short_gi_40mhz) ? 1 : 0) :
+ ((ht_info->cur_short_gi_20mhz) ? 1 : 0);
+ return MCS_DATA_RATE[is40MHz][isShortGI][(mcs_rate & 0x7f)];
}
-u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate)
+u16 tx_count_to_data_rate(struct rtllib_device *ieee, u8 data_rate)
{
- u16 CCKOFDMRate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18,
+ u16 cck_of_dm_rate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18,
0x24, 0x30, 0x48, 0x60, 0x6c};
u8 is40MHz = 0;
u8 isShortGI = 0;
- if (nDataRate < 12)
- return CCKOFDMRate[nDataRate];
- if (nDataRate >= 0x10 && nDataRate <= 0x1f) {
+ if (data_rate < 12)
+ return cck_of_dm_rate[data_rate];
+ if (data_rate >= 0x10 && data_rate <= 0x1f) {
is40MHz = 0;
isShortGI = 0;
- } else if (nDataRate >= 0x20 && nDataRate <= 0x2f) {
+ } else if (data_rate >= 0x20 && data_rate <= 0x2f) {
is40MHz = 1;
isShortGI = 0;
- } else if (nDataRate >= 0x30 && nDataRate <= 0x3f) {
+ } else if (data_rate >= 0x30 && data_rate <= 0x3f) {
is40MHz = 0;
isShortGI = 1;
- } else if (nDataRate >= 0x40 && nDataRate <= 0x4f) {
+ } else if (data_rate >= 0x40 && data_rate <= 0x4f) {
is40MHz = 1;
isShortGI = 1;
}
- return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate & 0xf];
+ return MCS_DATA_RATE[is40MHz][isShortGI][data_rate & 0xf];
}
-bool IsHTHalfNmodeAPs(struct rtllib_device *ieee)
+bool is_ht_half_nmode_aps(struct rtllib_device *ieee)
{
bool retValue = false;
struct rtllib_network *net = &ieee->current_network;
@@ -164,187 +144,171 @@ bool IsHTHalfNmodeAPs(struct rtllib_device *ieee)
return retValue;
}
-static void HTIOTPeerDetermine(struct rtllib_device *ieee)
+static void ht_iot_peer_determine(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct rtllib_network *net = &ieee->current_network;
if (net->bssht.bd_rt2rt_aggregation) {
- ht_info->IOTPeer = HT_IOT_PEER_REALTEK;
+ ht_info->iot_peer = HT_IOT_PEER_REALTEK;
if (net->bssht.rt2rt_ht_mode & RT_HT_CAP_USE_92SE)
- ht_info->IOTPeer = HT_IOT_PEER_REALTEK_92SE;
+ ht_info->iot_peer = HT_IOT_PEER_REALTEK_92SE;
if (net->bssht.rt2rt_ht_mode & RT_HT_CAP_USE_SOFTAP)
- ht_info->IOTPeer = HT_IOT_PEER_92U_SOFTAP;
+ ht_info->iot_peer = HT_IOT_PEER_92U_SOFTAP;
} else if (net->broadcom_cap_exist) {
- ht_info->IOTPeer = HT_IOT_PEER_BROADCOM;
+ ht_info->iot_peer = HT_IOT_PEER_BROADCOM;
} else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)) {
- ht_info->IOTPeer = HT_IOT_PEER_BROADCOM;
+ ht_info->iot_peer = HT_IOT_PEER_BROADCOM;
} else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
(memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
(memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
(memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
net->ralink_cap_exist) {
- ht_info->IOTPeer = HT_IOT_PEER_RALINK;
+ ht_info->iot_peer = HT_IOT_PEER_RALINK;
} else if ((net->atheros_cap_exist) ||
(memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0) ||
(memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0)) {
- ht_info->IOTPeer = HT_IOT_PEER_ATHEROS;
+ ht_info->iot_peer = HT_IOT_PEER_ATHEROS;
} else if ((memcmp(net->bssid, CISCO_BROADCOM, 3) == 0) ||
net->cisco_cap_exist) {
- ht_info->IOTPeer = HT_IOT_PEER_CISCO;
+ ht_info->iot_peer = HT_IOT_PEER_CISCO;
} else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) ||
net->marvell_cap_exist) {
- ht_info->IOTPeer = HT_IOT_PEER_MARVELL;
+ ht_info->iot_peer = HT_IOT_PEER_MARVELL;
} else if (net->airgo_cap_exist) {
- ht_info->IOTPeer = HT_IOT_PEER_AIRGO;
+ ht_info->iot_peer = HT_IOT_PEER_AIRGO;
} else {
- ht_info->IOTPeer = HT_IOT_PEER_UNKNOWN;
+ ht_info->iot_peer = HT_IOT_PEER_UNKNOWN;
}
- netdev_dbg(ieee->dev, "IOTPEER: %x\n", ht_info->IOTPeer);
+ netdev_dbg(ieee->dev, "IOTPEER: %x\n", ht_info->iot_peer);
}
-static u8 HTIOTActIsMgntUseCCK6M(struct rtllib_device *ieee,
+static u8 ht_iot_act_is_mgnt_use_cck_6m(struct rtllib_device *ieee,
struct rtllib_network *network)
{
u8 retValue = 0;
- if (ieee->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM)
+ if (ieee->ht_info->iot_peer == HT_IOT_PEER_BROADCOM)
retValue = 1;
return retValue;
}
-static u8 HTIOTActIsCCDFsync(struct rtllib_device *ieee)
+static u8 ht_iot_act_is_ccd_fsync(struct rtllib_device *ieee)
{
u8 retValue = 0;
- if (ieee->ht_info->IOTPeer == HT_IOT_PEER_BROADCOM)
+ if (ieee->ht_info->iot_peer == HT_IOT_PEER_BROADCOM)
retValue = 1;
return retValue;
}
-static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss)
+static void ht_iot_act_determine_ra_func(struct rtllib_device *ieee, bool bPeerRx2ss)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
ht_info->iot_ra_func &= HT_IOT_RAFUNC_DISABLE_ALL;
- if (ht_info->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
+ if (ht_info->iot_peer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
ht_info->iot_ra_func |= HT_IOT_RAFUNC_PEER_1R;
if (ht_info->iot_action & HT_IOT_ACT_AMSDU_ENABLE)
ht_info->iot_ra_func |= HT_IOT_RAFUNC_TX_AMSDU;
}
-void HTResetIOTSetting(struct rt_hi_throughput *ht_info)
+void ht_reset_iot_setting(struct rt_hi_throughput *ht_info)
{
ht_info->iot_action = 0;
- ht_info->IOTPeer = HT_IOT_PEER_UNKNOWN;
+ ht_info->iot_peer = HT_IOT_PEER_UNKNOWN;
ht_info->iot_ra_func = 0;
}
-void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
- u8 *len, u8 IsEncrypt, bool bAssoc)
+void ht_construct_capability_element(struct rtllib_device *ieee, u8 *pos_ht_cap,
+ u8 *len, u8 is_encrypt, bool assoc)
{
- struct rt_hi_throughput *pHT = ieee->ht_info;
- struct ht_capab_ele *pCapELE = NULL;
+ struct rt_hi_throughput *ht = ieee->ht_info;
+ struct ht_capab_ele *cap_ele = NULL;
- if (!posHTCap || !pHT) {
+ if (!pos_ht_cap || !ht) {
netdev_warn(ieee->dev,
"%s(): posHTCap and ht_info are null\n", __func__);
return;
}
- memset(posHTCap, 0, *len);
+ memset(pos_ht_cap, 0, *len);
- if ((bAssoc) && (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)) {
+ if ((assoc) && (ht->peer_ht_spec_ver == HT_SPEC_VER_EWC)) {
static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 };
- memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
- pCapELE = (struct ht_capab_ele *)&posHTCap[4];
+ memcpy(pos_ht_cap, EWC11NHTCap, sizeof(EWC11NHTCap));
+ cap_ele = (struct ht_capab_ele *)&pos_ht_cap[4];
*len = 30 + 2;
} else {
- pCapELE = (struct ht_capab_ele *)posHTCap;
+ cap_ele = (struct ht_capab_ele *)pos_ht_cap;
*len = 26 + 2;
}
- pCapELE->AdvCoding = 0;
+ cap_ele->AdvCoding = 0;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- pCapELE->ChlWidth = 0;
+ cap_ele->ChlWidth = 0;
else
- pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
+ cap_ele->ChlWidth = 1;
- pCapELE->MimoPwrSave = pHT->self_mimo_ps;
- pCapELE->GreenField = 0;
- pCapELE->ShortGI20Mhz = 1;
- pCapELE->ShortGI40Mhz = 1;
+ cap_ele->MimoPwrSave = 3;
+ cap_ele->GreenField = 0;
+ cap_ele->ShortGI20Mhz = 1;
+ cap_ele->ShortGI40Mhz = 1;
- pCapELE->TxSTBC = 1;
- pCapELE->RxSTBC = 0;
- pCapELE->DelayBA = 0;
- pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
- pCapELE->DssCCk = ((pHT->bRegBW40MHz) ? (pHT->bRegSuppCCK ? 1 : 0) : 0);
- pCapELE->PSMP = 0;
- pCapELE->LSigTxopProtect = 0;
+ cap_ele->TxSTBC = 1;
+ cap_ele->RxSTBC = 0;
+ cap_ele->DelayBA = 0;
+ cap_ele->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
+ cap_ele->DssCCk = 1;
+ cap_ele->PSMP = 0;
+ cap_ele->LSigTxopProtect = 0;
netdev_dbg(ieee->dev,
"TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n",
- pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
+ cap_ele->ChlWidth, cap_ele->MaxAMSDUSize, cap_ele->DssCCk);
- if (IsEncrypt) {
- pCapELE->MPDUDensity = 7;
- pCapELE->MaxRxAMPDUFactor = 2;
+ if (is_encrypt) {
+ cap_ele->MPDUDensity = 7;
+ cap_ele->MaxRxAMPDUFactor = 2;
} else {
- pCapELE->MaxRxAMPDUFactor = 3;
- pCapELE->MPDUDensity = 0;
+ cap_ele->MaxRxAMPDUFactor = 3;
+ cap_ele->MPDUDensity = 0;
}
- memcpy(pCapELE->MCS, ieee->reg_dot11ht_oper_rate_set, 16);
- memset(&pCapELE->ExtHTCapInfo, 0, 2);
- memset(pCapELE->TxBFCap, 0, 4);
+ memcpy(cap_ele->MCS, ieee->reg_dot11ht_oper_rate_set, 16);
+ memset(&cap_ele->ExtHTCapInfo, 0, 2);
+ memset(cap_ele->TxBFCap, 0, 4);
- pCapELE->ASCap = 0;
+ cap_ele->ASCap = 0;
- if (bAssoc) {
- if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS15)
- pCapELE->MCS[1] &= 0x7f;
+ if (assoc) {
+ if (ht->iot_action & HT_IOT_ACT_DISABLE_MCS15)
+ cap_ele->MCS[1] &= 0x7f;
- if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS14)
- pCapELE->MCS[1] &= 0xbf;
+ if (ht->iot_action & HT_IOT_ACT_DISABLE_MCS14)
+ cap_ele->MCS[1] &= 0xbf;
- if (pHT->iot_action & HT_IOT_ACT_DISABLE_ALL_2SS)
- pCapELE->MCS[1] &= 0x00;
+ if (ht->iot_action & HT_IOT_ACT_DISABLE_ALL_2SS)
+ cap_ele->MCS[1] &= 0x00;
- if (pHT->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
- pCapELE->ShortGI40Mhz = 0;
+ if (ht->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
+ cap_ele->ShortGI40Mhz = 0;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
- pCapELE->ChlWidth = 0;
- pCapELE->MCS[1] = 0;
+ cap_ele->ChlWidth = 0;
+ cap_ele->MCS[1] = 0;
}
}
}
-void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
- u8 *len, u8 IsEncrypt)
-{
- struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo;
-
- if (!posHTInfo || !pHTInfoEle) {
- netdev_warn(ieee->dev,
- "%s(): posHTInfo and pHTInfoEle are null\n",
- __func__);
- return;
- }
-
- memset(posHTInfo, 0, *len);
- *len = 0;
-}
-
-void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
+void ht_construct_rt2rt_agg_element(struct rtllib_device *ieee, u8 *posRT2RTAgg,
u8 *len)
{
if (!posRT2RTAgg) {
@@ -366,7 +330,7 @@ void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
*len = 6 + 2;
}
-static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
+static u8 ht_pick_mcs_rate(struct rtllib_device *ieee, u8 *pOperateMCS)
{
u8 i;
@@ -393,7 +357,7 @@ static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
return true;
}
-u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
+u8 ht_get_highest_mcs_rate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter)
{
u8 i, j;
@@ -422,8 +386,8 @@ u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
bitMap = availableMcsRate[i];
for (j = 0; j < 8; j++) {
if ((bitMap % 2) != 0) {
- if (HTMcsToDataRate(ieee, (8 * i + j)) >
- HTMcsToDataRate(ieee, mcsRate))
+ if (ht_mcs_to_data_rate(ieee, (8 * i + j)) >
+ ht_mcs_to_data_rate(ieee, mcsRate))
mcsRate = 8 * i + j;
}
bitMap >>= 1;
@@ -433,7 +397,7 @@ u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
return mcsRate | 0x80;
}
-static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
+static u8 ht_filter_mcs_rate(struct rtllib_device *ieee, u8 *pSupportMCS,
u8 *pOperateMCS)
{
u8 i;
@@ -442,7 +406,7 @@ static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
pOperateMCS[i] = ieee->reg_dot11tx_ht_oper_rate_set[i] &
pSupportMCS[i];
- HT_PickMCSRate(ieee, pOperateMCS);
+ ht_pick_mcs_rate(ieee, pOperateMCS);
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pOperateMCS[1] = 0;
@@ -453,16 +417,15 @@ static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
return true;
}
-void HTSetConnectBwMode(struct rtllib_device *ieee,
+void ht_set_connect_bw_mode(struct rtllib_device *ieee,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
-void HTOnAssocRsp(struct rtllib_device *ieee)
+void ht_on_assoc_rsp(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
struct ht_capab_ele *pPeerHTCap = NULL;
struct ht_info_ele *pPeerHTInfo = NULL;
- u16 nMaxAMSDUSize = 0;
u8 *pMcsFilter = NULL;
static const u8 EWC11NHTCap[] = { 0x00, 0x90, 0x4c, 0x33 };
@@ -489,79 +452,48 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE,
pPeerHTCap, sizeof(struct ht_capab_ele));
#endif
- HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
+ ht_set_connect_bw_mode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
(enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset));
ht_info->cur_tx_bw40mhz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
true : false);
- ht_info->bCurShortGI20MHz = ((ht_info->bRegShortGI20MHz) ?
- ((pPeerHTCap->ShortGI20Mhz == 1) ?
- true : false) : false);
- ht_info->bCurShortGI40MHz = ((ht_info->bRegShortGI40MHz) ?
- ((pPeerHTCap->ShortGI40Mhz == 1) ?
- true : false) : false);
+ ht_info->cur_short_gi_20mhz = ((pPeerHTCap->ShortGI20Mhz == 1) ? true : false);
+ ht_info->cur_short_gi_40mhz = ((pPeerHTCap->ShortGI40Mhz == 1) ? true : false);
- ht_info->bCurSuppCCK = ((ht_info->bRegSuppCCK) ?
- ((pPeerHTCap->DssCCk == 1) ? true :
- false) : false);
-
- ht_info->bCurrent_AMSDU_Support = ht_info->bAMSDU_Support;
-
- nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935;
-
- if (ht_info->nAMSDU_MaxSize > nMaxAMSDUSize)
- ht_info->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
- else
- ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize;
-
- ht_info->bCurrentAMPDUEnable = ht_info->bAMPDUEnable;
+ ht_info->current_ampdu_enable = ht_info->ampdu_enable;
if (ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_WEP | SEC_ALG_TKIP))) {
- if ((ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) ||
- (ht_info->IOTPeer == HT_IOT_PEER_UNKNOWN))
- ht_info->bCurrentAMPDUEnable = false;
+ if ((ht_info->iot_peer == HT_IOT_PEER_ATHEROS) ||
+ (ht_info->iot_peer == HT_IOT_PEER_UNKNOWN))
+ ht_info->current_ampdu_enable = false;
}
- if (!ht_info->reg_rt2rt_aggregation) {
- if (ht_info->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
+ if (ieee->current_network.bssht.bd_rt2rt_aggregation) {
+ if (ieee->pairwise_key_type != KEY_TYPE_NA)
ht_info->CurrentAMPDUFactor =
- pPeerHTCap->MaxRxAMPDUFactor;
+ pPeerHTCap->MaxRxAMPDUFactor;
else
- ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor;
-
+ ht_info->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
} else {
- if (ieee->current_network.bssht.bd_rt2rt_aggregation) {
- if (ieee->pairwise_key_type != KEY_TYPE_NA)
- ht_info->CurrentAMPDUFactor =
- pPeerHTCap->MaxRxAMPDUFactor;
- else
- ht_info->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
- } else {
- ht_info->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
- HT_AGG_SIZE_32K);
- }
+ ht_info->CurrentAMPDUFactor = min_t(u32, pPeerHTCap->MaxRxAMPDUFactor,
+ HT_AGG_SIZE_32K);
}
- ht_info->current_mpdu_density = max_t(u8, ht_info->MPDU_Density,
- pPeerHTCap->MPDUDensity);
+
+ ht_info->current_mpdu_density = pPeerHTCap->MPDUDensity;
if (ht_info->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) {
- ht_info->bCurrentAMPDUEnable = false;
- ht_info->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
+ ht_info->current_ampdu_enable = false;
}
- ht_info->cur_rx_reorder_enable = ht_info->reg_rx_reorder_enable;
+ ht_info->cur_rx_reorder_enable = 1;
if (pPeerHTCap->MCS[0] == 0)
pPeerHTCap->MCS[0] = 0xff;
- HTIOTActDetermineRaFunc(ieee, ((pPeerHTCap->MCS[1]) != 0));
+ ht_iot_act_determine_ra_func(ieee, ((pPeerHTCap->MCS[1]) != 0));
- HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11ht_oper_rate_set);
+ ht_filter_mcs_rate(ieee, pPeerHTCap->MCS, ieee->dot11ht_oper_rate_set);
- ht_info->peer_mimo_ps = pPeerHTCap->MimoPwrSave;
- if (ht_info->peer_mimo_ps == MIMO_PS_STATIC)
- pMcsFilter = MCS_FILTER_1SS;
- else
- pMcsFilter = MCS_FILTER_ALL;
- ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
+ pMcsFilter = MCS_FILTER_ALL;
+ ieee->HTHighestOperaRate = ht_get_highest_mcs_rate(ieee,
ieee->dot11ht_oper_rate_set,
pMcsFilter);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
@@ -569,30 +501,23 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
ht_info->current_op_mode = pPeerHTInfo->OptMode;
}
-void HTInitializeHTInfo(struct rtllib_device *ieee)
+void ht_initialize_ht_info(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
ht_info->current_ht_support = false;
- ht_info->bCurBW40MHz = false;
+ ht_info->cur_bw_40mhz = false;
ht_info->cur_tx_bw40mhz = false;
- ht_info->bCurShortGI20MHz = false;
- ht_info->bCurShortGI40MHz = false;
- ht_info->forced_short_gi = false;
+ ht_info->cur_short_gi_20mhz = false;
+ ht_info->cur_short_gi_40mhz = false;
- ht_info->bCurSuppCCK = true;
-
- ht_info->bCurrent_AMSDU_Support = false;
- ht_info->nCurrent_AMSDU_MaxSize = ht_info->nAMSDU_MaxSize;
- ht_info->current_mpdu_density = ht_info->MPDU_Density;
- ht_info->CurrentAMPDUFactor = ht_info->AMPDU_Factor;
+ ht_info->current_mpdu_density = 0;
+ ht_info->CurrentAMPDUFactor = ht_info->ampdu_factor;
memset((void *)(&ht_info->SelfHTCap), 0,
sizeof(ht_info->SelfHTCap));
- memset((void *)(&ht_info->SelfHTInfo), 0,
- sizeof(ht_info->SelfHTInfo));
memset((void *)(&ht_info->PeerHTCapBuf), 0,
sizeof(ht_info->PeerHTCapBuf));
memset((void *)(&ht_info->PeerHTInfoBuf), 0,
@@ -600,13 +525,12 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
ht_info->sw_bw_in_progress = false;
- ht_info->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
+ ht_info->peer_ht_spec_ver = HT_SPEC_VER_IEEE;
ht_info->current_rt2rt_aggregation = false;
ht_info->current_rt2rt_long_slot_time = false;
- ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
- ht_info->IOTPeer = 0;
+ ht_info->iot_peer = 0;
ht_info->iot_action = 0;
ht_info->iot_ra_func = 0;
@@ -619,7 +543,7 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
}
}
-void HTInitializeBssDesc(struct bss_ht *pBssHT)
+void ht_initialize_bss_desc(struct bss_ht *pBssHT)
{
pBssHT->bd_support_ht = false;
memset(pBssHT->bd_ht_cap_buf, 0, sizeof(pBssHT->bd_ht_cap_buf));
@@ -634,7 +558,7 @@ void HTInitializeBssDesc(struct bss_ht *pBssHT)
pBssHT->rt2rt_ht_mode = (enum rt_ht_capability)0;
}
-void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
+void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
@@ -645,7 +569,7 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
*/
if (pNetwork->bssht.bd_support_ht) {
ht_info->current_ht_support = true;
- ht_info->ePeerHTSpecVer = pNetwork->bssht.bd_ht_spec_ver;
+ ht_info->peer_ht_spec_ver = pNetwork->bssht.bd_ht_spec_ver;
if (pNetwork->bssht.bd_ht_cap_len > 0 &&
pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->PeerHTCapBuf))
@@ -660,32 +584,24 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
pNetwork->bssht.bd_ht_info_buf,
pNetwork->bssht.bd_ht_info_len);
- if (ht_info->reg_rt2rt_aggregation) {
- ht_info->current_rt2rt_aggregation =
- pNetwork->bssht.bd_rt2rt_aggregation;
- ht_info->current_rt2rt_long_slot_time =
- pNetwork->bssht.bd_rt2rt_long_slot_time;
- ht_info->RT2RT_HT_Mode = pNetwork->bssht.rt2rt_ht_mode;
- } else {
- ht_info->current_rt2rt_aggregation = false;
- ht_info->current_rt2rt_long_slot_time = false;
- ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
- }
+ ht_info->current_rt2rt_aggregation =
+ pNetwork->bssht.bd_rt2rt_aggregation;
+ ht_info->current_rt2rt_long_slot_time =
+ pNetwork->bssht.bd_rt2rt_long_slot_time;
- HTIOTPeerDetermine(ieee);
+ ht_iot_peer_determine(ieee);
ht_info->iot_action = 0;
- bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork);
+ bIOTAction = ht_iot_act_is_mgnt_use_cck_6m(ieee, pNetwork);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M;
- bIOTAction = HTIOTActIsCCDFsync(ieee);
+ bIOTAction = ht_iot_act_is_ccd_fsync(ieee);
if (bIOTAction)
ht_info->iot_action |= HT_IOT_ACT_CDD_FSYNC;
} else {
ht_info->current_ht_support = false;
ht_info->current_rt2rt_aggregation = false;
ht_info->current_rt2rt_long_slot_time = false;
- ht_info->RT2RT_HT_Mode = (enum rt_ht_capability)0;
ht_info->iot_action = 0;
ht_info->iot_ra_func = 0;
@@ -706,7 +622,7 @@ void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
}
EXPORT_SYMBOL(HT_update_self_and_peer_setting);
-u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame)
+u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame)
{
if (ieee->ht_info->current_ht_support) {
if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
@@ -717,11 +633,11 @@ u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame)
return false;
}
-static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
+static void ht_set_connect_bw_mode_callback(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- if (ht_info->bCurBW40MHz) {
+ if (ht_info->cur_bw_40mhz) {
if (ht_info->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER)
ieee->set_chan(ieee->dev,
ieee->current_network.channel + 2);
@@ -744,15 +660,12 @@ static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
ht_info->sw_bw_in_progress = false;
}
-void HTSetConnectBwMode(struct rtllib_device *ieee,
+void ht_set_connect_bw_mode(struct rtllib_device *ieee,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- if (!ht_info->bRegBW40MHz)
- return;
-
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
bandwidth = HT_CHANNEL_WIDTH_20;
@@ -766,21 +679,21 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
Offset = HT_EXTCHNL_OFFSET_NO_EXT;
if (Offset == HT_EXTCHNL_OFFSET_UPPER ||
Offset == HT_EXTCHNL_OFFSET_LOWER) {
- ht_info->bCurBW40MHz = true;
+ ht_info->cur_bw_40mhz = true;
ht_info->CurSTAExtChnlOffset = Offset;
} else {
- ht_info->bCurBW40MHz = false;
+ ht_info->cur_bw_40mhz = false;
ht_info->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
}
} else {
- ht_info->bCurBW40MHz = false;
+ ht_info->cur_bw_40mhz = false;
ht_info->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
}
netdev_dbg(ieee->dev, "%s():ht_info->bCurBW40MHz:%x\n", __func__,
- ht_info->bCurBW40MHz);
+ ht_info->cur_bw_40mhz);
ht_info->sw_bw_in_progress = true;
- HTSetConnectBwModeCallback(ieee);
+ ht_set_connect_bw_mode_callback(ieee);
}
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 1c00092ea..50e01ca49 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -8,7 +8,7 @@
#define __INC_QOS_TYPE_H
struct qos_tsinfo {
- u8 ucTSID:4;
+ u8 ts_id:4;
u8 ucDirection:2;
};
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
index fff36315f..5b0e4cb57 100644
--- a/drivers/staging/rtl8192e/rtl819x_TS.h
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -17,21 +17,21 @@ enum tr_select {
};
struct ts_common_info {
- struct list_head List;
+ struct list_head list;
u8 addr[ETH_ALEN];
- struct qos_tsinfo TSpec;
+ struct qos_tsinfo tspec;
};
struct tx_ts_record {
- struct ts_common_info TsCommonInfo;
- u16 TxCurSeq;
- struct ba_record TxPendingBARecord;
- struct ba_record TxAdmittedBARecord;
- u8 bAddBaReqInProgress;
- u8 bAddBaReqDelayed;
- u8 bUsingBa;
- u8 bDisable_AddBa;
- struct timer_list TsAddBaTimer;
+ struct ts_common_info ts_common_info;
+ u16 tx_cur_seq;
+ struct ba_record tx_pending_ba_record;
+ struct ba_record tx_admitted_ba_record;
+ u8 add_ba_req_in_progress;
+ u8 add_ba_req_delayed;
+ u8 using_ba;
+ u8 disable_add_ba;
+ struct timer_list ts_add_ba_timer;
u8 num;
};
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 3206fdb3e..7e73d31dc 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -12,7 +12,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
{
struct rx_ts_record *ts = from_timer(ts, t, rx_pkt_pending_timer);
struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
- RxTsRecord[ts->num]);
+ rx_ts_records[ts->num]);
struct rx_reorder_entry *pReorderEntry = NULL;
@@ -25,7 +25,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
while (!list_empty(&ts->rx_pending_pkt_list)) {
pReorderEntry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
- struct rx_reorder_entry, List);
+ struct rx_reorder_entry, list);
if (index == 0)
ts->rx_indicate_seq = pReorderEntry->SeqNum;
@@ -33,7 +33,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
ts->rx_indicate_seq) ||
SN_EQUAL(pReorderEntry->SeqNum,
ts->rx_indicate_seq)) {
- list_del_init(&pReorderEntry->List);
+ list_del_init(&pReorderEntry->list);
if (SN_EQUAL(pReorderEntry->SeqNum,
ts->rx_indicate_seq))
@@ -47,7 +47,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
pReorderEntry->prxb;
index++;
- list_add_tail(&pReorderEntry->List,
+ list_add_tail(&pReorderEntry->list,
&ieee->RxReorder_Unused_List);
} else {
bPktInBuf = true;
@@ -82,31 +82,31 @@ static void RxPktPendingTimeout(struct timer_list *t)
static void TsAddBaProcess(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
- u8 num = pTxTs->num;
- struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
- TxTsRecord[num]);
+ struct tx_ts_record *ts = from_timer(ts, t, ts_add_ba_timer);
+ u8 num = ts->num;
+ struct rtllib_device *ieee = container_of(ts, struct rtllib_device,
+ tx_ts_records[num]);
- rtllib_ts_init_add_ba(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
+ rtllib_ts_init_add_ba(ieee, ts, BA_POLICY_IMMEDIATE, false);
netdev_dbg(ieee->dev, "%s(): ADDBA Req is started\n", __func__);
}
-static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
+static void ResetTsCommonInfo(struct ts_common_info *ts_common_info)
{
- eth_zero_addr(pTsCommonInfo->addr);
- memset(&pTsCommonInfo->TSpec, 0, sizeof(struct qos_tsinfo));
+ eth_zero_addr(ts_common_info->addr);
+ memset(&ts_common_info->tspec, 0, sizeof(struct qos_tsinfo));
}
static void ResetTxTsEntry(struct tx_ts_record *ts)
{
- ResetTsCommonInfo(&ts->TsCommonInfo);
- ts->TxCurSeq = 0;
- ts->bAddBaReqInProgress = false;
- ts->bAddBaReqDelayed = false;
- ts->bUsingBa = false;
- ts->bDisable_AddBa = false;
- rtllib_reset_ba_entry(&ts->TxAdmittedBARecord);
- rtllib_reset_ba_entry(&ts->TxPendingBARecord);
+ ResetTsCommonInfo(&ts->ts_common_info);
+ ts->tx_cur_seq = 0;
+ ts->add_ba_req_in_progress = false;
+ ts->add_ba_req_delayed = false;
+ ts->using_ba = false;
+ ts->disable_add_ba = false;
+ rtllib_reset_ba_entry(&ts->tx_admitted_ba_record);
+ rtllib_reset_ba_entry(&ts->tx_pending_ba_record);
}
static void ResetRxTsEntry(struct rx_ts_record *ts)
@@ -119,8 +119,8 @@ static void ResetRxTsEntry(struct rx_ts_record *ts)
void rtllib_ts_init(struct rtllib_device *ieee)
{
- struct tx_ts_record *pTxTS = ieee->TxTsRecord;
- struct rx_ts_record *rxts = ieee->RxTsRecord;
+ struct tx_ts_record *pTxTS = ieee->tx_ts_records;
+ struct rx_ts_record *rxts = ieee->rx_ts_records;
struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
u8 count = 0;
@@ -130,15 +130,15 @@ void rtllib_ts_init(struct rtllib_device *ieee)
for (count = 0; count < TOTAL_TS_NUM; count++) {
pTxTS->num = count;
- timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
+ timer_setup(&pTxTS->ts_add_ba_timer, TsAddBaProcess, 0);
- timer_setup(&pTxTS->TxPendingBARecord.timer, rtllib_ba_setup_timeout,
+ timer_setup(&pTxTS->tx_pending_ba_record.timer, rtllib_ba_setup_timeout,
0);
- timer_setup(&pTxTS->TxAdmittedBARecord.timer,
+ timer_setup(&pTxTS->tx_admitted_ba_record.timer,
rtllib_tx_ba_inact_timeout, 0);
ResetTxTsEntry(pTxTS);
- list_add_tail(&pTxTS->TsCommonInfo.List,
+ list_add_tail(&pTxTS->ts_common_info.list,
&ieee->Tx_TS_Unused_List);
pTxTS++;
}
@@ -155,13 +155,13 @@ void rtllib_ts_init(struct rtllib_device *ieee)
timer_setup(&rxts->rx_pkt_pending_timer, RxPktPendingTimeout, 0);
ResetRxTsEntry(rxts);
- list_add_tail(&rxts->ts_common_info.List,
+ list_add_tail(&rxts->ts_common_info.list,
&ieee->Rx_TS_Unused_List);
rxts++;
}
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
for (count = 0; count < REORDER_ENTRY_NUM; count++) {
- list_add_tail(&pRxReorderEntry->List,
+ list_add_tail(&pRxReorderEntry->list,
&ieee->RxReorder_Unused_List);
if (count == (REORDER_ENTRY_NUM - 1))
break;
@@ -196,31 +196,31 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
for (dir = 0; dir <= DIR_BI_DIR; dir++) {
if (!search_dir[dir])
continue;
- list_for_each_entry(pRet, psearch_list, List) {
+ list_for_each_entry(pRet, psearch_list, list) {
if (memcmp(pRet->addr, addr, 6) == 0 &&
- pRet->TSpec.ucTSID == TID &&
- pRet->TSpec.ucDirection == dir)
+ pRet->tspec.ts_id == TID &&
+ pRet->tspec.ucDirection == dir)
break;
}
- if (&pRet->List != psearch_list)
+ if (&pRet->list != psearch_list)
break;
}
- if (pRet && &pRet->List != psearch_list)
+ if (pRet && &pRet->list != psearch_list)
return pRet;
return NULL;
}
-static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *addr,
+static void MakeTSEntry(struct ts_common_info *ts_common_info, u8 *addr,
struct qos_tsinfo *pTSPEC)
{
- if (!pTsCommonInfo)
+ if (!ts_common_info)
return;
- memcpy(pTsCommonInfo->addr, addr, 6);
+ memcpy(ts_common_info->addr, addr, 6);
if (pTSPEC)
- memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC,
+ memcpy((u8 *)(&(ts_common_info->tspec)), (u8 *)pTSPEC,
sizeof(struct qos_tsinfo));
}
@@ -228,8 +228,8 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
u8 *addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
{
u8 UP = 0;
- struct qos_tsinfo TSpec;
- struct qos_tsinfo *ts_info = &TSpec;
+ struct qos_tsinfo tspec;
+ struct qos_tsinfo *ts_info = &tspec;
struct list_head *pUnusedList;
struct list_head *pAddmitList;
enum direction_value Dir;
@@ -286,13 +286,13 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next,
- struct ts_common_info, List);
- list_del_init(&(*ppTS)->List);
+ struct ts_common_info, list);
+ list_del_init(&(*ppTS)->list);
if (TxRxSelect == TX_DIR) {
struct tx_ts_record *tmp =
container_of(*ppTS,
struct tx_ts_record,
- TsCommonInfo);
+ ts_common_info);
ResetTxTsEntry(tmp);
} else {
struct rx_ts_record *ts =
@@ -305,11 +305,11 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
netdev_dbg(ieee->dev,
"to init current TS, UP:%d, Dir:%d, addr: %pM ppTs=%p\n",
UP, Dir, addr, *ppTS);
- ts_info->ucTSID = UP;
+ ts_info->ts_id = UP;
ts_info->ucDirection = Dir;
- MakeTSEntry(*ppTS, addr, &TSpec);
- list_add_tail(&((*ppTS)->List), pAddmitList);
+ MakeTSEntry(*ppTS, addr, &tspec);
+ list_add_tail(&((*ppTS)->list), pAddmitList);
return true;
}
@@ -335,10 +335,10 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
while (!list_empty(&ts->rx_pending_pkt_list)) {
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
- struct rx_reorder_entry, List);
+ struct rx_reorder_entry, list);
netdev_dbg(ieee->dev, "%s(): Delete SeqNum %d!\n",
__func__, pRxReorderEntry->SeqNum);
- list_del_init(&pRxReorderEntry->List);
+ list_del_init(&pRxReorderEntry->list);
{
int i = 0;
struct rtllib_rxb *prxb = pRxReorderEntry->prxb;
@@ -350,13 +350,13 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
kfree(prxb);
prxb = NULL;
}
- list_add_tail(&pRxReorderEntry->List,
+ list_add_tail(&pRxReorderEntry->list,
&ieee->RxReorder_Unused_List);
}
} else {
struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
- del_timer_sync(&pTxTS->TsAddBaTimer);
+ del_timer_sync(&pTxTS->ts_add_ba_timer);
}
}
@@ -366,37 +366,37 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *addr)
netdev_info(ieee->dev, "===========>%s, %pM\n", __func__, addr);
- list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, list) {
if (memcmp(ts->addr, addr, 6) == 0) {
RemoveTsEntry(ieee, ts, TX_DIR);
- list_del_init(&ts->List);
- list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &ieee->Tx_TS_Unused_List);
}
}
- list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, list) {
if (memcmp(ts->addr, addr, 6) == 0) {
netdev_info(ieee->dev,
"====>remove Tx_TS_admin_list\n");
RemoveTsEntry(ieee, ts, TX_DIR);
- list_del_init(&ts->List);
- list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &ieee->Tx_TS_Unused_List);
}
}
- list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, list) {
if (memcmp(ts->addr, addr, 6) == 0) {
RemoveTsEntry(ieee, ts, RX_DIR);
- list_del_init(&ts->List);
- list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &ieee->Rx_TS_Unused_List);
}
}
- list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, list) {
if (memcmp(ts->addr, addr, 6) == 0) {
RemoveTsEntry(ieee, ts, RX_DIR);
- list_del_init(&ts->List);
- list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &ieee->Rx_TS_Unused_List);
}
}
}
@@ -406,43 +406,43 @@ void RemoveAllTS(struct rtllib_device *ieee)
{
struct ts_common_info *ts, *pTmpTS;
- list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Pending_List, list) {
RemoveTsEntry(ieee, ts, TX_DIR);
- list_del_init(&ts->List);
- list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &ieee->Tx_TS_Unused_List);
}
- list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Tx_TS_Admit_List, list) {
RemoveTsEntry(ieee, ts, TX_DIR);
- list_del_init(&ts->List);
- list_add_tail(&ts->List, &ieee->Tx_TS_Unused_List);
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &ieee->Tx_TS_Unused_List);
}
- list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Pending_List, list) {
RemoveTsEntry(ieee, ts, RX_DIR);
- list_del_init(&ts->List);
- list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &ieee->Rx_TS_Unused_List);
}
- list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
+ list_for_each_entry_safe(ts, pTmpTS, &ieee->Rx_TS_Admit_List, list) {
RemoveTsEntry(ieee, ts, RX_DIR);
- list_del_init(&ts->List);
- list_add_tail(&ts->List, &ieee->Rx_TS_Unused_List);
+ list_del_init(&ts->list);
+ list_add_tail(&ts->list, &ieee->Rx_TS_Unused_List);
}
}
void TsStartAddBaProcess(struct rtllib_device *ieee, struct tx_ts_record *pTxTS)
{
- if (pTxTS->bAddBaReqInProgress == false) {
- pTxTS->bAddBaReqInProgress = true;
+ if (pTxTS->add_ba_req_in_progress == false) {
+ pTxTS->add_ba_req_in_progress = true;
- if (pTxTS->bAddBaReqDelayed) {
+ if (pTxTS->add_ba_req_delayed) {
netdev_dbg(ieee->dev, "Start ADDBA after 60 sec!!\n");
- mod_timer(&pTxTS->TsAddBaTimer, jiffies +
+ mod_timer(&pTxTS->ts_add_ba_timer, jiffies +
msecs_to_jiffies(TS_ADDBA_DELAY));
} else {
netdev_dbg(ieee->dev, "Immediately Start ADDBA\n");
- mod_timer(&pTxTS->TsAddBaTimer, jiffies + 10);
+ mod_timer(&pTxTS->ts_add_ba_timer, jiffies + 10);
}
} else {
netdev_dbg(ieee->dev, "BA timer is already added\n");
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index d2cf3cfaa..7b39a1987 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -103,9 +103,6 @@ struct cb_desc {
/* Tx Desc Related flags (8-9) */
u8 bLastIniPkt:1;
u8 bCmdOrInit:1;
- u8 bFirstSeg:1;
- u8 bLastSeg:1;
- u8 bEncrypt:1;
u8 tx_dis_rate_fallback:1;
u8 tx_use_drv_assinged_rate:1;
u8 bHwSec:1;
@@ -117,8 +114,8 @@ struct cb_desc {
u8 bRTSEnable:1;
u8 bUseShortGI:1;
u8 bUseShortPreamble:1;
- u8 bTxEnableFwCalcDur:1;
- u8 bAMPDUEnable:1;
+ u8 tx_enable_fw_calc_dur:1;
+ u8 ampdu_enable:1;
u8 bRTSSTBC:1;
u8 RTSSC:1;
@@ -139,7 +136,6 @@ struct cb_desc {
u8 bAMSDU:1;
u8 bFromAggrQ:1;
u8 reserved6:6;
- u8 macId;
u8 priority;
/* Tx firmware related element(20-27) */
@@ -471,7 +467,6 @@ enum _REG_PREAMBLE_MODE {
* any adverse affects.
*/
struct rtllib_rx_stats {
- u64 mac_time;
s8 rssi;
u8 signal;
u8 noise;
@@ -1047,7 +1042,7 @@ struct bandwidth_autoswitch {
#define REORDER_WIN_SIZE 128
#define REORDER_ENTRY_NUM 128
struct rx_reorder_entry {
- struct list_head List;
+ struct list_head list;
u16 SeqNum;
struct rtllib_rxb *prxb;
};
@@ -1123,8 +1118,8 @@ struct rt_link_detect {
u16 SlotNum;
u16 SlotIndex;
- u32 NumTxOkInPeriod;
- u32 NumRxOkInPeriod;
+ u32 num_tx_ok_in_period;
+ u32 num_rx_ok_in_period;
u32 NumRxUnicastOkInPeriod;
bool bBusyTraffic;
bool bHigherBusyTraffic;
@@ -1169,7 +1164,7 @@ struct rt_pmkid_list {
u8 Bssid[ETH_ALEN];
u8 PMKID[16];
u8 SsidBuf[33];
- u8 bUsed;
+ u8 used;
};
/*************** DRIVER STATUS *****/
@@ -1192,7 +1187,7 @@ struct rtllib_device {
unsigned long status;
u8 CntAfterLink;
- enum rt_op_mode OpMode;
+ enum rt_op_mode op_mode;
/* The last AssocReq/Resp IEs */
u8 *assocreq_ies, *assocresp_ies;
@@ -1224,17 +1219,17 @@ struct rtllib_device {
u8 HTHighestOperaRate;
u8 tx_dis_rate_fallback;
u8 tx_use_drv_assinged_rate;
- u8 bTxEnableFwCalcDur;
+ u8 tx_enable_fw_calc_dur;
atomic_t atm_swbw;
struct list_head Tx_TS_Admit_List;
struct list_head Tx_TS_Pending_List;
struct list_head Tx_TS_Unused_List;
- struct tx_ts_record TxTsRecord[TOTAL_TS_NUM];
+ struct tx_ts_record tx_ts_records[TOTAL_TS_NUM];
struct list_head Rx_TS_Admit_List;
struct list_head Rx_TS_Pending_List;
struct list_head Rx_TS_Unused_List;
- struct rx_ts_record RxTsRecord[TOTAL_TS_NUM];
+ struct rx_ts_record rx_ts_records[TOTAL_TS_NUM];
struct rx_reorder_entry RxReorderEntry[128];
struct list_head RxReorder_Unused_List;
@@ -1321,12 +1316,8 @@ struct rtllib_device {
u16 scan_watch_dog;
/* map of allowed channels. 0 is dummy */
- void *dot11d_info;
- bool global_domain;
u8 active_channel_map[MAX_CHANNEL_NUMBER+1];
- u8 bss_start_channel;
-
int rate; /* current rate */
int basic_rate;
@@ -1391,7 +1382,7 @@ struct rtllib_device {
int mgmt_queue_head;
int mgmt_queue_tail;
u8 AsocRetryCount;
- struct sk_buff_head skb_waitQ[MAX_QUEUE_SIZE];
+ struct sk_buff_head skb_waitq[MAX_QUEUE_SIZE];
bool bdynamic_txpower_enable;
@@ -1411,7 +1402,7 @@ struct rtllib_device {
bool FwRWRF;
struct rt_link_detect link_detect_info;
- bool bIsAggregateFrame;
+ bool is_aggregate_frame;
struct rt_pwr_save_ctrl pwr_save_ctrl;
/* used if IEEE_SOFTMAC_TX_QUEUE is set */
@@ -1421,7 +1412,6 @@ struct rtllib_device {
struct timer_list associate_timer;
/* used if IEEE_SOFTMAC_BEACONS is set */
- struct timer_list beacon_timer;
u8 need_sw_enc;
struct work_struct associate_complete_wq;
struct work_struct ips_leave_wq;
@@ -1469,7 +1459,7 @@ struct rtllib_device {
* This function can sleep. the driver should ensure
* the radio has been switched before return.
*/
- void (*set_chan)(struct net_device *dev, short ch);
+ void (*set_chan)(struct net_device *dev, u8 ch);
/* indicate the driver that the link state is changed
* for example it may indicate the card is associated now.
@@ -1687,8 +1677,8 @@ void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
void rtllib_start_protocol(struct rtllib_device *ieee);
void rtllib_stop_protocol(struct rtllib_device *ieee);
-void rtllib_EnableNetMonitorMode(struct net_device *dev, bool bInitState);
-void rtllib_DisableNetMonitorMode(struct net_device *dev, bool bInitState);
+void rtllib_enable_net_monitor_mode(struct net_device *dev, bool init_state);
+void rtllib_disable_net_monitor_mode(struct net_device *dev, bool init_state);
void rtllib_softmac_stop_protocol(struct rtllib_device *ieee);
void rtllib_softmac_start_protocol(struct rtllib_device *ieee);
@@ -1696,7 +1686,6 @@ void rtllib_softmac_start_protocol(struct rtllib_device *ieee);
void rtllib_reset_queue(struct rtllib_device *ieee);
void rtllib_wake_all_queues(struct rtllib_device *ieee);
void rtllib_stop_all_queues(struct rtllib_device *ieee);
-struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
void notify_wx_assoc_event(struct rtllib_device *ieee);
void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
@@ -1758,39 +1747,37 @@ int rtllib_wx_get_rts(struct rtllib_device *ieee, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra);
#define MAX_RECEIVE_BUFFER_SIZE 9100
-void HTSetConnectBwMode(struct rtllib_device *ieee,
+void ht_set_connect_bw_mode(struct rtllib_device *ieee,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
void ht_update_default_setting(struct rtllib_device *ieee);
-void HTConstructCapabilityElement(struct rtllib_device *ieee,
+void ht_construct_capability_element(struct rtllib_device *ieee,
u8 *posHTCap, u8 *len,
u8 isEncrypt, bool bAssoc);
-void HTConstructInfoElement(struct rtllib_device *ieee,
- u8 *posHTInfo, u8 *len, u8 isEncrypt);
-void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
+void ht_construct_rt2rt_agg_element(struct rtllib_device *ieee,
u8 *posRT2RTAgg, u8 *len);
-void HTOnAssocRsp(struct rtllib_device *ieee);
-void HTInitializeHTInfo(struct rtllib_device *ieee);
-void HTInitializeBssDesc(struct bss_ht *pBssHT);
-void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
+void ht_on_assoc_rsp(struct rtllib_device *ieee);
+void ht_initialize_ht_info(struct rtllib_device *ieee);
+void ht_initialize_bss_desc(struct bss_ht *pBssHT);
+void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork);
void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork);
-u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
+u8 ht_get_highest_mcs_rate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
extern u16 MCS_DATA_RATE[2][2][77];
-u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame);
-void HTResetIOTSetting(struct rt_hi_throughput *ht_info);
-bool IsHTHalfNmodeAPs(struct rtllib_device *ieee);
-u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
+u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame);
+void ht_reset_iot_setting(struct rt_hi_throughput *ht_info);
+bool is_ht_half_nmode_aps(struct rtllib_device *ieee);
+u16 tx_count_to_data_rate(struct rtllib_device *ieee, u8 nDataRate);
int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
- u8 policy, u8 bOverwritePending);
+ u8 policy, u8 overwrite_pending);
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
- struct ts_common_info *pTsCommonInfo,
+ struct ts_common_info *ts_common_info,
enum tr_select TxRxSelect);
void rtllib_ba_setup_timeout(struct timer_list *t);
void rtllib_tx_ba_inact_timeout(struct timer_list *t);
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 95b6d6b94..e7af4a25b 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -123,7 +123,7 @@ struct net_device *alloc_rtllib(int sizeof_priv)
goto free_softmac;
ht_update_default_setting(ieee);
- HTInitializeHTInfo(ieee);
+ ht_initialize_ht_info(ieee);
rtllib_ts_init(ieee);
for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index ecaa4dec3..4df20f4d6 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -34,7 +34,6 @@
#include <linux/ctype.h>
#include "rtllib.h"
-#include "dot11d.h"
static void rtllib_rx_mgt(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *stats);
@@ -412,19 +411,19 @@ static bool AddReorderEntry(struct rx_ts_record *ts,
while (pList->next != &ts->rx_pending_pkt_list) {
if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)
list_entry(pList->next, struct rx_reorder_entry,
- List))->SeqNum))
+ list))->SeqNum))
pList = pList->next;
else if (SN_EQUAL(pReorderEntry->SeqNum,
((struct rx_reorder_entry *)list_entry(pList->next,
- struct rx_reorder_entry, List))->SeqNum))
+ struct rx_reorder_entry, list))->SeqNum))
return false;
else
break;
}
- pReorderEntry->List.next = pList->next;
- pReorderEntry->List.next->prev = &pReorderEntry->List;
- pReorderEntry->List.prev = pList;
- pList->next = &pReorderEntry->List;
+ pReorderEntry->list.next = pList->next;
+ pReorderEntry->list.next->prev = &pReorderEntry->list;
+ pReorderEntry->list.prev = pList;
+ pList->next = &pReorderEntry->list;
return true;
}
@@ -505,15 +504,15 @@ void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
- struct rx_reorder_entry, List);
+ struct rx_reorder_entry, list);
netdev_dbg(ieee->dev, "%s(): Indicate SeqNum %d!\n", __func__,
pRxReorderEntry->SeqNum);
- list_del_init(&pRxReorderEntry->List);
+ list_del_init(&pRxReorderEntry->list);
ieee->RfdArray[RfdCnt] = pRxReorderEntry->prxb;
RfdCnt = RfdCnt + 1;
- list_add_tail(&pRxReorderEntry->List,
+ list_add_tail(&pRxReorderEntry->list,
&ieee->RxReorder_Unused_List);
}
rtllib_indicate_packets(ieee, ieee->RfdArray, RfdCnt);
@@ -602,8 +601,8 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
if (!list_empty(&ieee->RxReorder_Unused_List)) {
pReorderEntry = (struct rx_reorder_entry *)
list_entry(ieee->RxReorder_Unused_List.next,
- struct rx_reorder_entry, List);
- list_del_init(&pReorderEntry->List);
+ struct rx_reorder_entry, list);
+ list_del_init(&pReorderEntry->list);
/* Make a reorder entry and insert
* into a the packet list.
@@ -618,7 +617,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
"%s(): Duplicate packet is dropped. IndicateSeq: %d, NewSeq: %d\n",
__func__, ts->rx_indicate_seq,
SeqNum);
- list_add_tail(&pReorderEntry->List,
+ list_add_tail(&pReorderEntry->list,
&ieee->RxReorder_Unused_List);
for (i = 0; i < prxb->nr_subframes; i++)
@@ -658,7 +657,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
pReorderEntry = (struct rx_reorder_entry *)
list_entry(ts->rx_pending_pkt_list.prev,
struct rx_reorder_entry,
- List);
+ list);
if (SN_LESS(pReorderEntry->SeqNum, ts->rx_indicate_seq) ||
SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq)) {
/* This protect struct buffer from overflow. */
@@ -670,7 +669,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
break;
}
- list_del_init(&pReorderEntry->List);
+ list_del_init(&pReorderEntry->list);
if (SN_EQUAL(pReorderEntry->SeqNum, ts->rx_indicate_seq))
ts->rx_indicate_seq = (ts->rx_indicate_seq + 1) %
@@ -681,7 +680,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
__func__, pReorderEntry->SeqNum);
index++;
- list_add_tail(&pReorderEntry->List,
+ list_add_tail(&pReorderEntry->list,
&ieee->RxReorder_Unused_List);
} else {
bPktInBuf = true;
@@ -731,7 +730,7 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
u16 LLCOffset = sizeof(struct ieee80211_hdr_3addr);
u16 ChkLength;
- bool bIsAggregateFrame = false;
+ bool is_aggregate_frame = false;
u16 nSubframe_Length;
u8 nPadding_Length = 0;
u16 SeqNum = 0;
@@ -740,7 +739,7 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl));
if ((RTLLIB_QOS_HAS_SEQ(fc)) &&
(((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved))
- bIsAggregateFrame = true;
+ is_aggregate_frame = true;
if (RTLLIB_QOS_HAS_SEQ(fc))
LLCOffset += 2;
@@ -753,8 +752,8 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
return 0;
skb_pull(skb, LLCOffset);
- ieee->bIsAggregateFrame = bIsAggregateFrame;
- if (!bIsAggregateFrame) {
+ ieee->is_aggregate_frame = is_aggregate_frame;
+ if (!is_aggregate_frame) {
rxb->nr_subframes = 1;
/* altered by clark 3/30/2010
@@ -858,7 +857,7 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
size_t hdrlen;
hdrlen = rtllib_get_hdrlen(fc);
- if (HTCCheck(ieee, skb->data)) {
+ if (ht_c_check(ieee, skb->data)) {
if (net_ratelimit())
netdev_info(ieee->dev, "%s: find HTCControl!\n",
__func__);
@@ -1151,7 +1150,7 @@ static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast,
if (unicast) {
if (ieee->link_state == MAC80211_LINKED) {
if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
- ieee->link_detect_info.NumTxOkInPeriod) > 8) ||
+ ieee->link_detect_info.num_tx_ok_in_period) > 8) ||
(ieee->link_detect_info.NumRxUnicastOkInPeriod > 2)) {
ieee->leisure_ps_leave(ieee->dev);
}
@@ -1286,7 +1285,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Update statstics for AP roaming */
ieee->link_detect_info.NumRecvDataInPeriod++;
- ieee->link_detect_info.NumRxOkInPeriod++;
+ ieee->link_detect_info.num_rx_ok_in_period++;
/* Data frame - extract src/dst addresses */
rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid);
@@ -1359,7 +1358,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Update WAPI PN */
/* Check if leave LPS */
- if (ieee->bIsAggregateFrame)
+ if (ieee->is_aggregate_frame)
nr_subframes = rxb->nr_subframes;
else
nr_subframes = 1;
@@ -1402,7 +1401,7 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
return 0;
}
- if (HTCCheck(ieee, skb->data)) {
+ if (ht_c_check(ieee, skb->data)) {
if (net_ratelimit())
netdev_info(ieee->dev, "%s: Find HTCControl!\n",
__func__);
@@ -1663,35 +1662,6 @@ static const char *get_info_element_string(u16 id)
}
}
-static inline void rtllib_extract_country_ie(
- struct rtllib_device *ieee,
- struct rtllib_info_element *info_element,
- struct rtllib_network *network,
- u8 *addr2)
-{
- if (IS_DOT11D_ENABLE(ieee)) {
- if (info_element->len != 0) {
- memcpy(network->CountryIeBuf, info_element->data,
- info_element->len);
- network->CountryIeLen = info_element->len;
-
- if (!IS_COUNTRY_IE_VALID(ieee)) {
- if (rtllib_act_scanning(ieee, false) &&
- ieee->FirstIe_InScan)
- netdev_info(ieee->dev,
- "Received beacon CountryIE, SSID: <%s>\n",
- network->ssid);
- dot11d_update_country(ieee, addr2,
- info_element->len,
- info_element->data);
- }
- }
-
- if (IS_EQUAL_CIE_SRC(ieee, addr2))
- UPDATE_CIE_WATCHDOG(ieee);
- }
-}
-
static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
struct rtllib_info_element *info_element,
struct rtllib_network *network,
@@ -2146,8 +2116,6 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
case MFIE_TYPE_COUNTRY:
netdev_dbg(ieee->dev, "MFIE_TYPE_COUNTRY: %d bytes\n",
info_element->len);
- rtllib_extract_country_ie(ieee, info_element, network,
- network->bssid);
break;
/* TODO */
default:
@@ -2221,7 +2189,7 @@ static inline int rtllib_network_init(
network->RSSI = stats->SignalStrength;
network->CountryIeLen = 0;
memset(network->CountryIeBuf, 0, MAX_IE_LEN);
- HTInitializeBssDesc(&network->bssht);
+ ht_initialize_bss_desc(&network->bssht);
network->flags |= NETWORK_HAS_CCK;
network->wpa_ie_len = 0;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 42d652fe8..b9278b26a 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -18,7 +18,6 @@
#include <linux/uaccess.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
-#include "dot11d.h"
static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
@@ -45,7 +44,7 @@ static unsigned int rtllib_MFIE_rate_len(struct rtllib_device *ieee)
* Then it updates the pointer so that
* it points after the new MFIE tag added.
*/
-static void rtllib_MFIE_Brate(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_mfie_brate(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -62,7 +61,7 @@ static void rtllib_MFIE_Brate(struct rtllib_device *ieee, u8 **tag_p)
*tag_p = tag;
}
-static void rtllib_MFIE_Grate(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_mfie_grate(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -83,7 +82,7 @@ static void rtllib_MFIE_Grate(struct rtllib_device *ieee, u8 **tag_p)
*tag_p = tag;
}
-static void rtllib_WMM_Info(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_wmm_info(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -99,7 +98,7 @@ static void rtllib_WMM_Info(struct rtllib_device *ieee, u8 **tag_p)
*tag_p = tag;
}
-static void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_turbo_info(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
@@ -135,32 +134,33 @@ static void enqueue_mgmt(struct rtllib_device *ieee, struct sk_buff *skb)
static void init_mgmt_queue(struct rtllib_device *ieee)
{
- ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
+ ieee->mgmt_queue_tail = 0;
+ ieee->mgmt_queue_head = 0;
}
u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
{
u16 i;
- u8 QueryRate = 0;
- u8 BasicRate;
+ u8 query_rate = 0;
+ u8 basic_rate;
for (i = 0; i < ieee->current_network.rates_len; i++) {
- BasicRate = ieee->current_network.rates[i] & 0x7F;
- if (!rtllib_is_cck_rate(BasicRate)) {
- if (QueryRate == 0) {
- QueryRate = BasicRate;
+ basic_rate = ieee->current_network.rates[i] & 0x7F;
+ if (!rtllib_is_cck_rate(basic_rate)) {
+ if (query_rate == 0) {
+ query_rate = basic_rate;
} else {
- if (BasicRate < QueryRate)
- QueryRate = BasicRate;
+ if (basic_rate < query_rate)
+ query_rate = basic_rate;
}
}
}
- if (QueryRate == 0) {
- QueryRate = 12;
- netdev_info(ieee->dev, "No BasicRate found!!\n");
+ if (query_rate == 0) {
+ query_rate = 12;
+ netdev_info(ieee->dev, "No basic_rate found!!\n");
}
- return QueryRate;
+ return query_rate;
}
static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
@@ -173,12 +173,8 @@ static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
else
rate = ieee->basic_rate & 0x7f;
- if (rate == 0) {
- if (ieee->mode == WIRELESS_MODE_N_24G && !ht_info->bCurSuppCCK)
- rate = 0x0c;
- else
- rate = 0x02;
- }
+ if (rate == 0)
+ rate = 0x02;
return rate;
}
@@ -240,7 +236,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
/* check whether the managed packet queued greater than 5 */
if (!ieee->check_nic_enough_desc(ieee->dev,
tcb_desc->queue_index) ||
- skb_queue_len(&ieee->skb_waitQ[tcb_desc->queue_index]) ||
+ skb_queue_len(&ieee->skb_waitq[tcb_desc->queue_index]) ||
ieee->queue_stop) {
/* insert the skb packet to the management queue
*
@@ -250,7 +246,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
netdev_info(ieee->dev,
"%s():insert to waitqueue, queue_index:%d!\n",
__func__, tcb_desc->queue_index);
- skb_queue_tail(&ieee->skb_waitQ[tcb_desc->queue_index],
+ skb_queue_tail(&ieee->skb_waitq[tcb_desc->queue_index],
skb);
} else {
ieee->softmac_hard_start_xmit(skb, ieee->dev);
@@ -345,65 +341,34 @@ static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
memcpy(tag, ieee->current_network.ssid, len);
tag += len;
- rtllib_MFIE_Brate(ieee, &tag);
- rtllib_MFIE_Grate(ieee, &tag);
+ rtllib_mfie_brate(ieee, &tag);
+ rtllib_mfie_grate(ieee, &tag);
return skb;
}
-static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee);
-
-static void rtllib_send_beacon(struct rtllib_device *ieee)
-{
- struct sk_buff *skb;
-
- if (!ieee->ieee_up)
- return;
- skb = rtllib_get_beacon_(ieee);
-
- if (skb) {
- softmac_mgmt_xmit(skb, ieee);
- ieee->softmac_stats.tx_beacons++;
- }
-
- if (ieee->beacon_txing && ieee->ieee_up)
- mod_timer(&ieee->beacon_timer, jiffies +
- (msecs_to_jiffies(ieee->current_network.beacon_interval - 5)));
-}
-
-static void rtllib_send_beacon_cb(struct timer_list *t)
-{
- struct rtllib_device *ieee =
- from_timer(ieee, t, beacon_timer);
- unsigned long flags;
-
- spin_lock_irqsave(&ieee->beacon_lock, flags);
- rtllib_send_beacon(ieee);
- spin_unlock_irqrestore(&ieee->beacon_lock, flags);
-}
-
/* Enables network monitor mode, all rx packets will be received. */
-void rtllib_EnableNetMonitorMode(struct net_device *dev,
- bool bInitState)
+void rtllib_enable_net_monitor_mode(struct net_device *dev,
+ bool init_state)
{
struct rtllib_device *ieee = netdev_priv_rsl(dev);
netdev_info(dev, "========>Enter Monitor Mode\n");
- ieee->AllowAllDestAddrHandler(dev, true, !bInitState);
+ ieee->AllowAllDestAddrHandler(dev, true, !init_state);
}
/* Disables network monitor mode. Only packets destinated to
* us will be received.
*/
-void rtllib_DisableNetMonitorMode(struct net_device *dev,
- bool bInitState)
+void rtllib_disable_net_monitor_mode(struct net_device *dev,
+ bool init_state)
{
struct rtllib_device *ieee = netdev_priv_rsl(dev);
netdev_info(dev, "========>Exit Monitor Mode\n");
- ieee->AllowAllDestAddrHandler(dev, false, !bInitState);
+ ieee->AllowAllDestAddrHandler(dev, false, !init_state);
}
static void rtllib_send_probe(struct rtllib_device *ieee)
@@ -425,12 +390,6 @@ static void rtllib_send_probe_requests(struct rtllib_device *ieee)
}
}
-static void rtllib_update_active_chan_map(struct rtllib_device *ieee)
-{
- memcpy(ieee->active_channel_map, GET_DOT11D_INFO(ieee)->channel_map,
- MAX_CHANNEL_NUMBER + 1);
-}
-
/* this performs syncro scan blocking the caller until all channels
* in the allowed channel map has been checked.
*/
@@ -439,8 +398,6 @@ static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee)
union iwreq_data wrqu;
short ch = 0;
- rtllib_update_active_chan_map(ieee);
-
ieee->be_scan_inprogress = true;
mutex_lock(&ieee->scan_mutex);
@@ -492,10 +449,6 @@ out:
ieee->actscanning = false;
ieee->sync_scan_hurryup = 0;
- if (ieee->link_state >= MAC80211_LINKED) {
- if (IS_DOT11D_ENABLE(ieee))
- dot11d_scan_complete(ieee);
- }
mutex_unlock(&ieee->scan_mutex);
ieee->be_scan_inprogress = false;
@@ -510,8 +463,6 @@ static void rtllib_softmac_scan_wq(void *data)
struct rtllib_device, softmac_scan_wq);
u8 last_channel = ieee->current_network.channel;
- rtllib_update_active_chan_map(ieee);
-
if (!ieee->ieee_up)
return;
if (rtllib_act_scanning(ieee, true))
@@ -552,8 +503,6 @@ static void rtllib_softmac_scan_wq(void *data)
return;
out:
- if (IS_DOT11D_ENABLE(ieee))
- dot11d_scan_complete(ieee);
ieee->current_network.channel = last_channel;
out1:
@@ -609,10 +558,6 @@ static void rtllib_start_scan(struct rtllib_device *ieee)
{
ieee->rtllib_ips_leave_wq(ieee->dev);
- if (IS_DOT11D_ENABLE(ieee)) {
- if (IS_COUNTRY_IE_VALID(ieee))
- RESET_CIE_WATCHDOG(ieee);
- }
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
if (ieee->scanning_continue == 0) {
ieee->actscanning = true;
@@ -625,10 +570,6 @@ static void rtllib_start_scan(struct rtllib_device *ieee)
/* called with wx_mutex held */
void rtllib_start_scan_syncro(struct rtllib_device *ieee)
{
- if (IS_DOT11D_ENABLE(ieee)) {
- if (IS_COUNTRY_IE_VALID(ieee))
- RESET_CIE_WATCHDOG(ieee);
- }
ieee->sync_scan_hurryup = 0;
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
rtllib_softmac_scan_syncro(ieee);
@@ -677,152 +618,6 @@ rtllib_authentication_req(struct rtllib_network *beacon,
return skb;
}
-static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
- const u8 *dest)
-{
- u8 *tag;
- int beacon_size;
- struct rtllib_probe_response *beacon_buf;
- struct sk_buff *skb = NULL;
- int encrypt;
- int atim_len, erp_len;
- struct lib80211_crypt_data *crypt;
-
- char *ssid = ieee->current_network.ssid;
- int ssid_len = ieee->current_network.ssid_len;
- int rate_len = ieee->current_network.rates_len + 2;
- int rate_ex_len = ieee->current_network.rates_ex_len;
- int wpa_ie_len = ieee->wpa_ie_len;
- u8 erpinfo_content = 0;
-
- u8 *tmp_ht_cap_buf = NULL;
- u8 tmp_ht_cap_len = 0;
- u8 *tmp_ht_info_buf = NULL;
- u8 tmp_ht_info_len = 0;
- struct rt_hi_throughput *ht_info = ieee->ht_info;
- u8 *tmp_generic_ie_buf = NULL;
- u8 tmp_generic_ie_len = 0;
-
- if (rate_ex_len > 0)
- rate_ex_len += 2;
-
- if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
- atim_len = 4;
- else
- atim_len = 0;
-
- if ((ieee->current_network.mode == WIRELESS_MODE_G) ||
- (ieee->current_network.mode == WIRELESS_MODE_N_24G &&
- ieee->ht_info->bCurSuppCCK)) {
- erp_len = 3;
- erpinfo_content = 0;
- if (ieee->current_network.buseprotection)
- erpinfo_content |= ERP_UseProtection;
- } else {
- erp_len = 0;
- }
-
- crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
- encrypt = crypt && crypt->ops &&
- ((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len));
- if (ieee->ht_info->current_ht_support) {
- tmp_ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
- tmp_ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
- tmp_ht_info_buf = (u8 *)&(ieee->ht_info->SelfHTInfo);
- tmp_ht_info_len = sizeof(ieee->ht_info->SelfHTInfo);
- HTConstructCapabilityElement(ieee, tmp_ht_cap_buf,
- &tmp_ht_cap_len, encrypt, false);
- HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len,
- encrypt);
-
- if (ht_info->reg_rt2rt_aggregation) {
- tmp_generic_ie_buf = ieee->ht_info->sz_rt2rt_agg_buf;
- tmp_generic_ie_len =
- sizeof(ieee->ht_info->sz_rt2rt_agg_buf);
- HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf,
- &tmp_generic_ie_len);
- }
- }
-
- beacon_size = sizeof(struct rtllib_probe_response) + 2 +
- ssid_len + 3 + rate_len + rate_ex_len + atim_len + erp_len
- + wpa_ie_len + ieee->tx_headroom;
- skb = dev_alloc_skb(beacon_size);
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
-
- beacon_buf = skb_put(skb, (beacon_size - ieee->tx_headroom));
- ether_addr_copy(beacon_buf->header.addr1, dest);
- ether_addr_copy(beacon_buf->header.addr2, ieee->dev->dev_addr);
- ether_addr_copy(beacon_buf->header.addr3, ieee->current_network.bssid);
-
- beacon_buf->header.duration_id = 0;
- beacon_buf->beacon_interval =
- cpu_to_le16(ieee->current_network.beacon_interval);
- beacon_buf->capability =
- cpu_to_le16(ieee->current_network.capability &
- WLAN_CAPABILITY_IBSS);
- beacon_buf->capability |=
- cpu_to_le16(ieee->current_network.capability &
- WLAN_CAPABILITY_SHORT_PREAMBLE);
-
- if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
- beacon_buf->capability |=
- cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
-
- crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
- if (encrypt)
- beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
- beacon_buf->header.frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
- beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
- beacon_buf->info_element[0].len = ssid_len;
-
- tag = (u8 *)beacon_buf->info_element[0].data;
-
- memcpy(tag, ssid, ssid_len);
-
- tag += ssid_len;
-
- *(tag++) = MFIE_TYPE_RATES;
- *(tag++) = rate_len - 2;
- memcpy(tag, ieee->current_network.rates, rate_len - 2);
- tag += rate_len - 2;
-
- *(tag++) = MFIE_TYPE_DS_SET;
- *(tag++) = 1;
- *(tag++) = ieee->current_network.channel;
-
- if (atim_len) {
- u16 val16;
- *(tag++) = MFIE_TYPE_IBSS_SET;
- *(tag++) = 2;
- val16 = ieee->current_network.atim_window;
- memcpy((u8 *)tag, (u8 *)&val16, 2);
- tag += 2;
- }
-
- if (erp_len) {
- *(tag++) = MFIE_TYPE_ERP;
- *(tag++) = 1;
- *(tag++) = erpinfo_content;
- }
- if (rate_ex_len) {
- *(tag++) = MFIE_TYPE_RATES_EX;
- *(tag++) = rate_ex_len - 2;
- memcpy(tag, ieee->current_network.rates_ex, rate_ex_len - 2);
- tag += rate_ex_len - 2;
- }
-
- if (wpa_ie_len) {
- memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
- tag += ieee->wpa_ie_len;
- }
- return skb;
-}
-
static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
{
struct sk_buff *skb;
@@ -875,7 +670,7 @@ static inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
int i = 0;
do {
- if ((ieee->PMKIDList[i].bUsed) &&
+ if ((ieee->PMKIDList[i].used) &&
(memcmp(ieee->PMKIDList[i].Bssid, bssid, ETH_ALEN) == 0))
break;
i++;
@@ -933,15 +728,15 @@ rtllib_association_req(struct rtllib_network *beacon,
}
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
- ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
+ ht_cap_buf = (u8 *)&ieee->ht_info->SelfHTCap;
ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
- HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len,
+ ht_construct_capability_element(ieee, ht_cap_buf, &ht_cap_len,
encrypt, true);
if (ieee->ht_info->current_rt2rt_aggregation) {
realtek_ie_buf = ieee->ht_info->sz_rt2rt_agg_buf;
realtek_ie_len =
sizeof(ieee->ht_info->sz_rt2rt_agg_buf);
- HTConstructRT2RTAggElement(ieee, realtek_ie_buf,
+ ht_construct_rt2rt_agg_element(ieee, realtek_ie_buf,
&realtek_ie_len);
}
}
@@ -1071,7 +866,7 @@ rtllib_association_req(struct rtllib_network *beacon,
tag += osCcxVerNum.Length;
}
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
- if (ieee->ht_info->ePeerHTSpecVer != HT_SPEC_VER_EWC) {
+ if (ieee->ht_info->peer_ht_spec_ver != HT_SPEC_VER_EWC) {
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_HT_CAP;
*tag++ = ht_cap_len - 2;
@@ -1093,7 +888,7 @@ rtllib_association_req(struct rtllib_network *beacon,
}
if (wmm_info_len) {
tag = skb_put(skb, wmm_info_len);
- rtllib_WMM_Info(ieee, &tag);
+ rtllib_wmm_info(ieee, &tag);
}
if (wps_ie_len && ieee->wps_ie)
@@ -1101,11 +896,11 @@ rtllib_association_req(struct rtllib_network *beacon,
if (turbo_info_len) {
tag = skb_put(skb, turbo_info_len);
- rtllib_TURBO_Info(ieee, &tag);
+ rtllib_turbo_info(ieee, &tag);
}
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
- if (ieee->ht_info->ePeerHTSpecVer == HT_SPEC_VER_EWC) {
+ if (ieee->ht_info->peer_ht_spec_ver == HT_SPEC_VER_EWC) {
tag = skb_put(skb, ht_cap_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = ht_cap_len - 2;
@@ -1123,7 +918,7 @@ rtllib_association_req(struct rtllib_network *beacon,
kfree(ieee->assocreq_ies);
ieee->assocreq_ies = NULL;
- ies = &(hdr->info_element[0].id);
+ ies = &hdr->info_element[0].id;
ieee->assocreq_ies_len = (skb->data + skb->len) - ies;
ieee->assocreq_ies = kmemdup(ies, ieee->assocreq_ies_len, GFP_ATOMIC);
if (!ieee->assocreq_ies)
@@ -1269,7 +1064,7 @@ static void rtllib_associate_complete_wq(void *data)
}
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
netdev_info(ieee->dev, "Successfully associated, ht enabled\n");
- HTOnAssocRsp(ieee);
+ ht_on_assoc_rsp(ieee);
} else {
netdev_info(ieee->dev,
"Successfully associated, ht not enabled(%d, %d)\n",
@@ -1314,7 +1109,7 @@ static void rtllib_associate_procedure_wq(void *data)
mutex_lock(&ieee->wx_mutex);
rtllib_stop_scan(ieee);
- HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+ ht_set_connect_bw_mode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
if (ieee->rf_power_state == rf_off) {
ieee->rtllib_ips_leave_wq(ieee->dev);
mutex_unlock(&ieee->wx_mutex);
@@ -1417,21 +1212,20 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
!(ieee->softmac_features & IEEE_SOFTMAC_SCAN))
rtllib_stop_scan_syncro(ieee);
- HTResetIOTSetting(ieee->ht_info);
+ ht_reset_iot_setting(ieee->ht_info);
ieee->wmm_acm = 0;
if (ieee->iw_mode == IW_MODE_INFRA) {
/* Join the network for the first time */
ieee->AsocRetryCount = 0;
if ((ieee->current_network.qos_data.supported == 1) &&
ieee->current_network.bssht.bd_support_ht)
- HTResetSelfAndSavePeerSetting(ieee,
+ ht_reset_self_and_save_peer_setting(ieee,
&(ieee->current_network));
else
ieee->ht_info->current_ht_support = false;
ieee->link_state = RTLLIB_ASSOCIATING;
- schedule_delayed_work(
- &ieee->associate_procedure_wq, 0);
+ schedule_delayed_work(&ieee->associate_procedure_wq, 0);
} else {
if (rtllib_is_54g(&ieee->current_network)) {
ieee->rate = 108;
@@ -1825,7 +1619,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
kfree(ieee->assocresp_ies);
ieee->assocresp_ies = NULL;
- ies = &(assoc_resp->info_element[0].id);
+ ies = &assoc_resp->info_element[0].id;
ieee->assocresp_ies_len = (skb->data + skb->len) - ies;
ieee->assocresp_ies = kmemdup(ies,
ieee->assocresp_ies_len,
@@ -1841,8 +1635,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
"Association response status code 0x%x\n",
errcode);
if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
- schedule_delayed_work(
- &ieee->associate_procedure_wq, 0);
+ schedule_delayed_work(&ieee->associate_procedure_wq, 0);
else
rtllib_associate_abort(ieee);
}
@@ -1872,7 +1665,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
ieee->softmac_stats.rx_auth_rs_ok++;
if (!(ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE)) {
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
- if (IsHTHalfNmodeAPs(ieee)) {
+ if (is_ht_half_nmode_aps(ieee)) {
bSupportNmode = true;
bHalfSupportNmode = true;
} else {
@@ -2030,7 +1823,7 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
* the wait queue
*/
for (i = 0; i < txb->nr_frags; i++) {
- queue_len = skb_queue_len(&ieee->skb_waitQ[queue_index]);
+ queue_len = skb_queue_len(&ieee->skb_waitq[queue_index]);
if ((queue_len != 0) ||
(!ieee->check_nic_enough_desc(ieee->dev, queue_index)) ||
(ieee->queue_stop)) {
@@ -2039,13 +1832,12 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
* to check it any more.
*/
if (queue_len < 200)
- skb_queue_tail(&ieee->skb_waitQ[queue_index],
+ skb_queue_tail(&ieee->skb_waitq[queue_index],
txb->fragments[i]);
else
kfree_skb(txb->fragments[i]);
} else {
- ieee->softmac_data_hard_start_xmit(
- txb->fragments[i],
+ ieee->softmac_data_hard_start_xmit(txb->fragments[i],
ieee->dev, ieee->rate);
}
}
@@ -2090,10 +1882,6 @@ static void rtllib_start_bss(struct rtllib_device *ieee)
{
unsigned long flags;
- if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
- if (!ieee->global_domain)
- return;
- }
/* check if we have already found the net we
* are interested in (if any).
* if not (we are disassociated and we are not
@@ -2121,6 +1909,7 @@ static void rtllib_link_change_wq(void *data)
struct rtllib_device, link_change_wq);
ieee->link_change(ieee->dev);
}
+
/* called only in userspace context */
void rtllib_disassociate(struct rtllib_device *ieee)
{
@@ -2128,8 +1917,6 @@ void rtllib_disassociate(struct rtllib_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
rtllib_reset_queue(ieee);
- if (IS_DOT11D_ENABLE(ieee))
- dot11d_reset(ieee);
ieee->link_state = MAC80211_NOLINK;
ieee->is_set_key = false;
ieee->wap_set = 0;
@@ -2181,46 +1968,6 @@ exit:
mutex_unlock(&ieee->wx_mutex);
}
-static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
-{
- static const u8 broadcast_addr[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
- struct sk_buff *skb;
- struct rtllib_probe_response *b;
-
- skb = rtllib_probe_resp(ieee, broadcast_addr);
-
- if (!skb)
- return NULL;
-
- b = (struct rtllib_probe_response *)skb->data;
- b->header.frame_control = cpu_to_le16(IEEE80211_STYPE_BEACON);
-
- return skb;
-}
-
-struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
-{
- struct sk_buff *skb;
- struct rtllib_probe_response *b;
-
- skb = rtllib_get_beacon_(ieee);
- if (!skb)
- return NULL;
-
- b = (struct rtllib_probe_response *)skb->data;
- b->header.seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
-
- if (ieee->seq_ctrl[0] == 0xFFF)
- ieee->seq_ctrl[0] = 0;
- else
- ieee->seq_ctrl[0]++;
-
- return skb;
-}
-EXPORT_SYMBOL(rtllib_get_beacon);
-
void rtllib_softmac_stop_protocol(struct rtllib_device *ieee)
{
rtllib_stop_scan_syncro(ieee);
@@ -2279,8 +2026,6 @@ void rtllib_start_protocol(struct rtllib_device *ieee)
short ch = 0;
int i = 0;
- rtllib_update_active_chan_map(ieee);
-
if (ieee->proto_started)
return;
@@ -2326,18 +2071,15 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
ieee->link_state = MAC80211_NOLINK;
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
- ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
- if (!ieee->dot11d_info)
- return -ENOMEM;
ieee->link_detect_info.SlotIndex = 0;
ieee->link_detect_info.SlotNum = 2;
ieee->link_detect_info.NumRecvBcnInPeriod = 0;
ieee->link_detect_info.NumRecvDataInPeriod = 0;
- ieee->link_detect_info.NumTxOkInPeriod = 0;
- ieee->link_detect_info.NumRxOkInPeriod = 0;
+ ieee->link_detect_info.num_tx_ok_in_period = 0;
+ ieee->link_detect_info.num_rx_ok_in_period = 0;
ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
- ieee->bIsAggregateFrame = false;
+ ieee->is_aggregate_frame = false;
ieee->assoc_id = 0;
ieee->queue_stop = 0;
ieee->scanning_continue = 0;
@@ -2369,8 +2111,6 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
timer_setup(&ieee->associate_timer, rtllib_associate_abort_cb, 0);
- timer_setup(&ieee->beacon_timer, rtllib_send_beacon_cb, 0);
-
INIT_DELAYED_WORK(&ieee->link_change_wq, (void *)rtllib_link_change_wq);
INIT_WORK(&ieee->associate_complete_wq, (void *)rtllib_associate_complete_wq);
INIT_DELAYED_WORK(&ieee->associate_procedure_wq, (void *)rtllib_associate_procedure_wq);
@@ -2404,9 +2144,6 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
cancel_work_sync(&ieee->ips_leave_wq);
cancel_work_sync(&ieee->wx_sync_scan_wq);
cancel_work_sync(&ieee->ps_task);
-
- kfree(ieee->dot11d_info);
- ieee->dot11d_info = NULL;
}
static inline struct sk_buff *
@@ -2493,7 +2230,7 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
return SEC_ALG_WEP;
} else if ((wpa_ie_len != 0)) {
if (((ieee->wpa_ie[0] == 0xdd) &&
- (!memcmp(&(ieee->wpa_ie[14]), ccmp_ie, 4))) ||
+ (!memcmp(&ieee->wpa_ie[14], ccmp_ie, 4))) ||
((ieee->wpa_ie[0] == 0x30) &&
(!memcmp(&ieee->wpa_ie[10], ccmp_rsn_ie, 4))))
return SEC_ALG_CCMP;
@@ -2508,7 +2245,7 @@ static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
u8 *asSta, u8 asRsn)
{
u8 i;
- u8 OpMode;
+ u8 op_mode;
RemovePeerTS(rtllib, asSta);
@@ -2517,10 +2254,10 @@ static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
for (i = 0; i < 6; i++)
rtllib->current_network.bssid[i] = 0x22;
- OpMode = RT_OP_MODE_NO_LINK;
- rtllib->OpMode = RT_OP_MODE_NO_LINK;
+ op_mode = RT_OP_MODE_NO_LINK;
+ rtllib->op_mode = RT_OP_MODE_NO_LINK;
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS,
- (u8 *)(&OpMode));
+ (u8 *)(&op_mode));
rtllib_disassociate(rtllib);
rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
@@ -2528,11 +2265,7 @@ static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
}
}
-static void
-rtllib_MgntDisconnectAP(
- struct rtllib_device *rtllib,
- u8 asRsn
-)
+static void rtllib_MgntDisconnectAP(struct rtllib_device *rtllib, u8 asRsn)
{
bool bFilterOutNonAssociatedBSSID = false;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index f32584291..2afa701e5 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -14,7 +14,6 @@
#include <linux/etherdevice.h>
#include "rtllib.h"
-#include "dot11d.h"
int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -208,7 +207,7 @@ int rtllib_wx_get_rate(struct rtllib_device *ieee,
{
u32 tmp_rate;
- tmp_rate = TxCountToDataRate(ieee,
+ tmp_rate = tx_count_to_data_rate(ieee,
ieee->softmac_stats.CurrentShowTxate);
wrqu->bitrate.value = tmp_rate * 500000;
@@ -267,11 +266,11 @@ int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
if (wrqu->mode == IW_MODE_MONITOR) {
ieee->dev->type = ARPHRD_IEEE80211;
- rtllib_EnableNetMonitorMode(ieee->dev, false);
+ rtllib_enable_net_monitor_mode(ieee->dev, false);
} else {
ieee->dev->type = ARPHRD_ETHER;
if (ieee->iw_mode == IW_MODE_MONITOR)
- rtllib_DisableNetMonitorMode(ieee->dev, false);
+ rtllib_disable_net_monitor_mode(ieee->dev, false);
}
if (!ieee->proto_started) {
@@ -318,10 +317,10 @@ void rtllib_wx_sync_scan_wq(void *data)
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht &&
- ieee->ht_info->bCurBW40MHz) {
+ ieee->ht_info->cur_bw_40mhz) {
b40M = 1;
chan_offset = ieee->ht_info->CurSTAExtChnlOffset;
- bandwidth = (enum ht_channel_width)ieee->ht_info->bCurBW40MHz;
+ bandwidth = (enum ht_channel_width)ieee->ht_info->cur_bw_40mhz;
ieee->set_bw_mode_handler(ieee->dev, HT_CHANNEL_WIDTH_20,
HT_EXTCHNL_OFFSET_NO_EXT);
}
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 9bf679438..f7098a2ba 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -267,7 +267,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
struct cb_desc *tcb_desc)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- struct tx_ts_record *pTxTs = NULL;
+ struct tx_ts_record *ts = NULL;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (rtllib_act_scanning(ieee, false))
@@ -288,52 +288,35 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
return;
- if (ht_info->bCurrentAMPDUEnable) {
- if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
+ if (ht_info->current_ampdu_enable) {
+ if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), hdr->addr1,
skb->priority, TX_DIR, true)) {
netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
return;
}
- if (!pTxTs->TxAdmittedBARecord.b_valid) {
+ if (!ts->tx_admitted_ba_record.b_valid) {
if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
KEY_TYPE_NA)) {
;
} else if (tcb_desc->bdhcp == 1) {
;
- } else if (!pTxTs->bDisable_AddBa) {
- TsStartAddBaProcess(ieee, pTxTs);
+ } else if (!ts->disable_add_ba) {
+ TsStartAddBaProcess(ieee, ts);
}
- goto FORCED_AGG_SETTING;
- } else if (!pTxTs->bUsingBa) {
- if (SN_LESS(pTxTs->TxAdmittedBARecord.ba_start_seq_ctrl.field.seq_num,
- (pTxTs->TxCurSeq + 1) % 4096))
- pTxTs->bUsingBa = true;
+ return;
+ } else if (!ts->using_ba) {
+ if (SN_LESS(ts->tx_admitted_ba_record.ba_start_seq_ctrl.field.seq_num,
+ (ts->tx_cur_seq + 1) % 4096))
+ ts->using_ba = true;
else
- goto FORCED_AGG_SETTING;
+ return;
}
if (ieee->iw_mode == IW_MODE_INFRA) {
- tcb_desc->bAMPDUEnable = true;
+ tcb_desc->ampdu_enable = true;
tcb_desc->ampdu_factor = ht_info->CurrentAMPDUFactor;
tcb_desc->ampdu_density = ht_info->current_mpdu_density;
}
}
-FORCED_AGG_SETTING:
- switch (ht_info->ForcedAMPDUMode) {
- case HT_AGG_AUTO:
- break;
-
- case HT_AGG_FORCE_ENABLE:
- tcb_desc->bAMPDUEnable = true;
- tcb_desc->ampdu_density = ht_info->forced_mpdu_density;
- tcb_desc->ampdu_factor = ht_info->forced_ampdu_factor;
- break;
-
- case HT_AGG_FORCE_DISABLE:
- tcb_desc->bAMPDUEnable = false;
- tcb_desc->ampdu_density = 0;
- tcb_desc->ampdu_factor = 0;
- break;
- }
}
static void rtllib_query_ShortPreambleMode(struct rtllib_device *ieee,
@@ -357,14 +340,9 @@ static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
if (!ht_info->current_ht_support || !ht_info->enable_ht)
return;
- if (ht_info->forced_short_gi) {
- tcb_desc->bUseShortGI = true;
- return;
- }
-
- if (ht_info->bCurBW40MHz && ht_info->bCurShortGI40MHz)
+ if (ht_info->cur_bw_40mhz && ht_info->cur_short_gi_40mhz)
tcb_desc->bUseShortGI = true;
- else if (!ht_info->bCurBW40MHz && ht_info->bCurShortGI20MHz)
+ else if (!ht_info->cur_bw_40mhz && ht_info->cur_short_gi_20mhz)
tcb_desc->bUseShortGI = true;
}
@@ -383,7 +361,7 @@ static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
if ((tcb_desc->data_rate & 0x80) == 0)
return;
- if (ht_info->bCurBW40MHz && ht_info->cur_tx_bw40mhz &&
+ if (ht_info->cur_bw_40mhz && ht_info->cur_tx_bw40mhz &&
!ieee->bandwidth_auto_switch.bforced_tx20Mhz)
tcb_desc->bPacketBW = true;
}
@@ -441,9 +419,9 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
if (ht_info->current_ht_support && ht_info->enable_ht) {
u8 HTOpMode = ht_info->current_op_mode;
- if ((ht_info->bCurBW40MHz && (HTOpMode == 2 ||
+ if ((ht_info->cur_bw_40mhz && (HTOpMode == 2 ||
HTOpMode == 3)) ||
- (!ht_info->bCurBW40MHz && HTOpMode == 3)) {
+ (!ht_info->cur_bw_40mhz && HTOpMode == 3)) {
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
break;
@@ -454,7 +432,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
tcb_desc->bRTSEnable = true;
break;
}
- if (tcb_desc->bAMPDUEnable) {
+ if (tcb_desc->ampdu_enable) {
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = false;
break;
@@ -500,8 +478,8 @@ static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), dst,
skb->priority, TX_DIR, true))
return 0;
- seqnum = ts->TxCurSeq;
- ts->TxCurSeq = (ts->TxCurSeq + 1) % 4096;
+ seqnum = ts->tx_cur_seq;
+ ts->tx_cur_seq = (ts->tx_cur_seq + 1) % 4096;
return seqnum;
}
return 0;
@@ -847,7 +825,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (txb) {
tcb_desc = (struct cb_desc *)
(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
- tcb_desc->bTxEnableFwCalcDur = 1;
+ tcb_desc->tx_enable_fw_calc_dur = 1;
tcb_desc->priority = skb->priority;
if (ether_type == ETH_P_PAE) {
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index a37250de7..f92ec0faf 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -134,7 +134,7 @@ static inline char *rtl819x_translate_scan(struct rtllib_device *ieee,
((ht_cap->ShortGI40Mhz) ? 1 : 0) :
((ht_cap->ShortGI20Mhz) ? 1 : 0);
- max_mcs = HTGetHighestMCSRate(ieee, ht_cap->MCS,
+ max_mcs = ht_get_highest_mcs_rate(ieee, ht_cap->MCS,
MCS_FILTER_ALL);
rate = MCS_DATA_RATE[is40M][isShortGI][max_mcs & 0x7f];
if (rate > max_rate)
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index b18e6d9c8..7554613fe 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -221,8 +221,7 @@ struct net_device *r8712_init_netdev(void)
static u32 start_drv_threads(struct _adapter *padapter)
{
- padapter->cmd_thread = kthread_run(r8712_cmd_thread, padapter, "%s",
- padapter->pnetdev->name);
+ padapter->cmd_thread = kthread_run(r8712_cmd_thread, padapter, "%s", padapter->pnetdev->name);
if (IS_ERR(padapter->cmd_thread))
return _FAIL;
return _SUCCESS;
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index c9400e40a..a39d6c066 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -213,8 +213,8 @@ u16 r8712_efuse_get_current_size(struct _adapter *adapter)
u8 hworden = 0;
u8 efuse_data, word_cnts = 0;
- while (bContinual && efuse_one_byte_read(adapter, efuse_addr,
- &efuse_data) && (efuse_addr < efuse_available_max_size)) {
+ while (bContinual && efuse_one_byte_read(adapter, efuse_addr, &efuse_data) &&
+ (efuse_addr < efuse_available_max_size)) {
if (efuse_data != 0xFF) {
hworden = efuse_data & 0x0F;
word_cnts = calculate_word_cnts(hworden);
@@ -252,9 +252,8 @@ u8 r8712_efuse_pg_packet_read(struct _adapter *adapter, u8 offset, u8 *data)
memset(tmpdata, 0xFF, PGPKT_DATA_SIZE);
for (tmpidx = 0; tmpidx < word_cnts * 2;
tmpidx++) {
- if (efuse_one_byte_read(adapter,
- efuse_addr + 1 + tmpidx,
- &efuse_data)) {
+ if (efuse_one_byte_read(adapter, efuse_addr + 1 + tmpidx,
+ &efuse_data)) {
tmpdata[tmpidx] = efuse_data;
} else {
ret = false;
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 7da014ab0..a3c4713c5 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -267,8 +267,7 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
/*the first fragment*/
if (!list_empty(&pdefrag_q->queue)) {
/*free current defrag_q */
- r8712_free_recvframe_queue(pdefrag_q,
- pfree_recv_queue);
+ r8712_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
}
}
/* Then enqueue the 0~(n-1) fragment to the defrag_q */
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 4cb01f590..d7d678b04 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -147,9 +147,8 @@ static u32 get_ff_hwaddr(struct xmit_frame *pxmitframe)
}
static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv,
- struct hw_xmit *phwxmit,
- struct tx_servq *ptxservq,
- struct __queue *pframe_queue)
+ struct hw_xmit *phwxmit, struct tx_servq *ptxservq,
+ struct __queue *pframe_queue)
{
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
@@ -167,7 +166,7 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv,
}
static struct xmit_frame *dequeue_xframe_ex(struct xmit_priv *pxmitpriv,
- struct hw_xmit *phwxmit_i, sint entry)
+ struct hw_xmit *phwxmit_i, sint entry)
{
unsigned long irqL0;
struct list_head *sta_plist, *sta_phead;
@@ -197,11 +196,10 @@ static struct xmit_frame *dequeue_xframe_ex(struct xmit_priv *pxmitpriv,
sta_phead = &phwxmit->sta_queue->queue;
sta_plist = sta_phead->next;
while (!end_of_queue_search(sta_phead, sta_plist)) {
- ptxservq = container_of(sta_plist, struct tx_servq,
- tx_pending);
+ ptxservq = container_of(sta_plist, struct tx_servq, tx_pending);
pframe_queue = &ptxservq->sta_pending;
- pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit,
- ptxservq, pframe_queue);
+ pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq,
+ pframe_queue);
if (pxmitframe) {
phwxmit->accnt--;
goto exit_dequeue_xframe_ex;
@@ -221,8 +219,7 @@ exit_dequeue_xframe_ex:
return pxmitframe;
}
-void r8712_do_queue_select(struct _adapter *padapter,
- struct pkt_attrib *pattrib)
+void r8712_do_queue_select(struct _adapter *padapter, struct pkt_attrib *pattrib)
{
unsigned int qsel = 0;
struct dvobj_priv *pdvobj = &padapter->dvobjpriv;
@@ -292,14 +289,12 @@ void r8712_append_mpdu_unit(struct xmit_buf *pxmitbuf,
r8712_xmit_complete(padapter, pxmitframe);
if (pxmitframe->attrib.ether_type != 0x0806) {
if ((pxmitframe->attrib.ether_type != 0x888e) &&
- (pxmitframe->attrib.dhcp_pkt != 1)) {
- r8712_issue_addbareq_cmd(padapter,
- pxmitframe->attrib.priority);
+ (pxmitframe->attrib.dhcp_pkt != 1)) {
+ r8712_issue_addbareq_cmd(padapter, pxmitframe->attrib.priority);
}
}
pxmitframe->last[0] = 1;
- update_txdesc(pxmitframe, (uint *)(pxmitframe->buf_addr),
- pxmitframe->attrib.last_txcmdsz);
+ update_txdesc(pxmitframe, (uint *)(pxmitframe->buf_addr), pxmitframe->attrib.last_txcmdsz);
/*padding zero */
last_txcmdsz = pxmitframe->attrib.last_txcmdsz;
padding_sz = (8 - (last_txcmdsz % 8));
@@ -333,8 +328,7 @@ void r8712_xmitframe_aggr_1st(struct xmit_buf *pxmitbuf,
pxmitbuf->aggr_nr = 1;
}
-u16 r8712_xmitframe_aggr_next(struct xmit_buf *pxmitbuf,
- struct xmit_frame *pxmitframe)
+u16 r8712_xmitframe_aggr_next(struct xmit_buf *pxmitbuf, struct xmit_frame *pxmitframe)
{
pxmitframe->pxmitbuf = pxmitbuf;
pxmitbuf->priv_data = pxmitframe;
@@ -374,9 +368,9 @@ void r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
pxmitframe->bpending[0] = false;
pxmitframe->mem_addr = pxmitbuf->pbuf;
- if ((pdvobj->ishighspeed && ((total_length + TXDESC_SIZE) % 0x200) ==
- 0) || ((!pdvobj->ishighspeed && ((total_length + TXDESC_SIZE) %
- 0x40) == 0))) {
+ if ((pdvobj->ishighspeed && ((total_length + TXDESC_SIZE) % 0x200) == 0) ||
+ ((!pdvobj->ishighspeed && ((total_length + TXDESC_SIZE) %
+ 0x40) == 0))) {
ptxdesc->txdw0 |= cpu_to_le32
(((TXDESC_SIZE + OFFSET_SZ + 8) << OFFSET_SHT) &
0x00ff0000);
@@ -387,8 +381,8 @@ void r8712_dump_aggr_xframe(struct xmit_buf *pxmitbuf,
0x00ff0000);
/*default = 32 bytes for TX Desc*/
}
- r8712_write_port(pxmitframe->padapter, RTL8712_DMA_H2CCMD,
- total_length + TXDESC_SIZE, (u8 *)pxmitframe);
+ r8712_write_port(pxmitframe->padapter, RTL8712_DMA_H2CCMD, total_length + TXDESC_SIZE,
+ (u8 *)pxmitframe);
}
#endif
@@ -618,14 +612,12 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
pxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
/* need to remember the 1st frame */
if (pxmitframe) {
-
#ifdef CONFIG_R8712_TX_AGGR
/* 1. dequeue 2nd frame
* 2. aggr if 2nd xframe is dequeued, else dump directly
*/
if (AGGR_NR_HIGH_BOUND > 1)
- p2ndxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits,
- hwentry);
+ p2ndxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
if (pxmitframe->frame_tag != DATA_FRAMETAG) {
r8712_free_xmitbuf(pxmitpriv, pxmitbuf);
return false;
@@ -639,16 +631,12 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
if (p2ndxmitframe) {
u16 total_length;
- total_length = r8712_xmitframe_aggr_next(
- pxmitbuf, p2ndxmitframe);
+ total_length = r8712_xmitframe_aggr_next(pxmitbuf, p2ndxmitframe);
do {
- p2ndxmitframe = dequeue_xframe_ex(
- pxmitpriv, phwxmits, hwentry);
+ p2ndxmitframe = dequeue_xframe_ex(pxmitpriv, phwxmits, hwentry);
if (p2ndxmitframe)
total_length =
- r8712_xmitframe_aggr_next(
- pxmitbuf,
- p2ndxmitframe);
+ r8712_xmitframe_aggr_next(pxmitbuf, p2ndxmitframe);
else
break;
} while (total_length <= 0x1800 &&
@@ -662,8 +650,8 @@ int r8712_xmitframe_complete(struct _adapter *padapter,
xmitframe_xmitbuf_attach(pxmitframe, pxmitbuf);
if (pxmitframe->frame_tag == DATA_FRAMETAG) {
if (pxmitframe->attrib.priority <= 15)
- res = r8712_xmitframe_coalesce(padapter,
- pxmitframe->pkt, pxmitframe);
+ res = r8712_xmitframe_coalesce(padapter, pxmitframe->pkt,
+ pxmitframe);
/* always return ndis_packet after
* r8712_xmitframe_coalesce
*/
@@ -714,10 +702,10 @@ static void dump_xframe(struct _adapter *padapter,
ff_hwaddr = get_ff_hwaddr(pxmitframe);
#ifdef CONFIG_R8712_TX_AGGR
r8712_write_port(padapter, RTL8712_DMA_H2CCMD, w_sz,
- (unsigned char *)pxmitframe);
+ (unsigned char *)pxmitframe);
#else
r8712_write_port(padapter, ff_hwaddr, w_sz,
- (unsigned char *)pxmitframe);
+ (unsigned char *)pxmitframe);
#endif
mem_addr += w_sz;
mem_addr = (u8 *)RND4(((addr_t)(mem_addr)));
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 4be96df5a..bbd4a13c7 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -242,8 +242,7 @@ void r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
kfree(ph2c);
return;
}
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetchplanpara,
- GEN_CMD_CODE(_SetChannelPlan));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetchplanpara, GEN_CMD_CODE(_SetChannelPlan));
psetchplanpara->ChannelPlan = chplan;
r8712_enqueue_cmd(pcmdpriv, ph2c);
}
@@ -302,8 +301,7 @@ void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter,
padapter->mppriv.workparam.bcompleted = true;
}
-void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd)
+void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
{
kfree(pcmd->parmbuf);
kfree(pcmd);
@@ -374,11 +372,10 @@ int r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
psecuritypriv->authenticator_ie[0] = (unsigned char)
psecnetwork->IELength;
if ((psecnetwork->IELength - 12) < (256 - 1))
- memcpy(&psecuritypriv->authenticator_ie[1],
- &psecnetwork->IEs[12], psecnetwork->IELength - 12);
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12],
+ psecnetwork->IELength - 12);
else
- memcpy(&psecuritypriv->authenticator_ie[1],
- &psecnetwork->IEs[12], (256 - 1));
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256 - 1));
psecnetwork->IELength = 0;
/*
* If the driver wants to use the bssid to create the connection.
@@ -388,19 +385,15 @@ int r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
if (!pmlmepriv->assoc_by_bssid)
ether_addr_copy(&pmlmepriv->assoc_bssid[0],
&pnetwork->network.MacAddress[0]);
- psecnetwork->IELength = r8712_restruct_sec_ie(padapter,
- &pnetwork->network.IEs[0],
- &psecnetwork->IEs[0],
- pnetwork->network.IELength);
+ psecnetwork->IELength = r8712_restruct_sec_ie(padapter, &pnetwork->network.IEs[0],
+ &psecnetwork->IEs[0], pnetwork->network.IELength);
pqospriv->qos_option = 0;
if (pregistrypriv->wmm_enable) {
u32 tmp_len;
- tmp_len = r8712_restruct_wmm_ie(padapter,
- &pnetwork->network.IEs[0],
- &psecnetwork->IEs[0],
- pnetwork->network.IELength,
- psecnetwork->IELength);
+ tmp_len = r8712_restruct_wmm_ie(padapter, &pnetwork->network.IEs[0],
+ &psecnetwork->IEs[0], pnetwork->network.IELength,
+ psecnetwork->IELength);
if (psecnetwork->IELength != tmp_len) {
psecnetwork->IELength = tmp_len;
pqospriv->qos_option = 1; /* WMM IE in beacon */
@@ -427,39 +420,28 @@ int r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
psecuritypriv->supplicant_ie[0] = (u8)psecnetwork->IELength;
if (psecnetwork->IELength < 255)
memcpy(&psecuritypriv->supplicant_ie[1], &psecnetwork->IEs[0],
- psecnetwork->IELength);
+ psecnetwork->IELength);
else
memcpy(&psecuritypriv->supplicant_ie[1], &psecnetwork->IEs[0],
- 255);
+ 255);
/* get cmdsz before endian conversion */
pcmd->cmdsz = r8712_get_wlan_bssid_ex_sz(psecnetwork);
#ifdef __BIG_ENDIAN
/* wlan_network endian conversion */
psecnetwork->Length = cpu_to_le32(psecnetwork->Length);
- psecnetwork->Ssid.SsidLength = cpu_to_le32(
- psecnetwork->Ssid.SsidLength);
+ psecnetwork->Ssid.SsidLength = cpu_to_le32(psecnetwork->Ssid.SsidLength);
psecnetwork->Privacy = cpu_to_le32(psecnetwork->Privacy);
psecnetwork->Rssi = cpu_to_le32(psecnetwork->Rssi);
- psecnetwork->NetworkTypeInUse = cpu_to_le32(
- psecnetwork->NetworkTypeInUse);
- psecnetwork->Configuration.ATIMWindow = cpu_to_le32(
- psecnetwork->Configuration.ATIMWindow);
- psecnetwork->Configuration.BeaconPeriod = cpu_to_le32(
- psecnetwork->Configuration.BeaconPeriod);
- psecnetwork->Configuration.DSConfig = cpu_to_le32(
- psecnetwork->Configuration.DSConfig);
- psecnetwork->Configuration.FHConfig.DwellTime = cpu_to_le32(
- psecnetwork->Configuration.FHConfig.DwellTime);
- psecnetwork->Configuration.FHConfig.HopPattern = cpu_to_le32(
- psecnetwork->Configuration.FHConfig.HopPattern);
- psecnetwork->Configuration.FHConfig.HopSet = cpu_to_le32(
- psecnetwork->Configuration.FHConfig.HopSet);
- psecnetwork->Configuration.FHConfig.Length = cpu_to_le32(
- psecnetwork->Configuration.FHConfig.Length);
- psecnetwork->Configuration.Length = cpu_to_le32(
- psecnetwork->Configuration.Length);
- psecnetwork->InfrastructureMode = cpu_to_le32(
- psecnetwork->InfrastructureMode);
+ psecnetwork->NetworkTypeInUse = cpu_to_le32(psecnetwork->NetworkTypeInUse);
+ psecnetwork->Configuration.ATIMWindow = cpu_to_le32(psecnetwork->Configuration.ATIMWindow);
+ psecnetwork->Configuration.BeaconPeriod = cpu_to_le32(psecnetwork->Configuration.BeaconPeriod);
+ psecnetwork->Configuration.DSConfig = cpu_to_le32(psecnetwork->Configuration.DSConfig);
+ psecnetwork->Configuration.FHConfig.DwellTime = cpu_to_le32(psecnetwork->Configuration.FHConfig.DwellTime);
+ psecnetwork->Configuration.FHConfig.HopPattern = cpu_to_le32(psecnetwork->Configuration.FHConfig.HopPattern);
+ psecnetwork->Configuration.FHConfig.HopSet = cpu_to_le32(psecnetwork->Configuration.FHConfig.HopSet);
+ psecnetwork->Configuration.FHConfig.Length = cpu_to_le32(psecnetwork->Configuration.FHConfig.Length);
+ psecnetwork->Configuration.Length = cpu_to_le32(psecnetwork->Configuration.Length);
+ psecnetwork->InfrastructureMode = cpu_to_le32(psecnetwork->InfrastructureMode);
psecnetwork->IELength = cpu_to_le32(psecnetwork->IELength);
#endif
INIT_LIST_HEAD(&pcmd->list);
@@ -485,13 +467,12 @@ void r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */
kfree(pdisconnect_cmd);
return;
}
- init_h2fwcmd_w_parm_no_rsp(pdisconnect_cmd, pdisconnect,
- _DisConnect_CMD_);
+ init_h2fwcmd_w_parm_no_rsp(pdisconnect_cmd, pdisconnect, _DisConnect_CMD_);
r8712_enqueue_cmd(pcmdpriv, pdisconnect_cmd);
}
void r8712_setopmode_cmd(struct _adapter *padapter,
- enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype)
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype)
{
struct cmd_obj *ph2c;
struct setopmode_parm *psetop;
@@ -543,14 +524,12 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
psetstakey_para->algorithm = (unsigned char)
psecuritypriv->PrivacyAlgrthm;
else
- GET_ENCRY_ALGO(psecuritypriv, sta,
- psetstakey_para->algorithm, false);
+ GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
if (unicast_key)
memcpy(&psetstakey_para->key, &sta->x_UncstKey, 16);
else
- memcpy(&psetstakey_para->key,
- &psecuritypriv->XGrpKey[
- psecuritypriv->XGrpKeyid - 1]. skey, 16);
+ memcpy(&psetstakey_para->key, &psecuritypriv->XGrpKey[psecuritypriv->XGrpKeyid - 1].
+ skey, 16);
r8712_enqueue_cmd(pcmdpriv, ph2c);
}
@@ -568,8 +547,7 @@ void r8712_setMacAddr_cmd(struct _adapter *padapter, const u8 *mac_addr)
kfree(ph2c);
return;
}
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetMacAddr_para,
- _SetMacAddress_CMD_);
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetMacAddr_para, _SetMacAddress_CMD_);
ether_addr_copy(psetMacAddr_para->MacAddr, mac_addr);
r8712_enqueue_cmd(pcmdpriv, ph2c);
}
@@ -589,8 +567,7 @@ void r8712_addbareq_cmd(struct _adapter *padapter, u8 tid)
return;
}
paddbareq_parm->tid = tid;
- init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm,
- GEN_CMD_CODE(_AddBAReq));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
r8712_enqueue_cmd_ex(pcmdpriv, ph2c);
}
@@ -644,13 +621,11 @@ void r8712_joinbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (pcmd->res != H2C_SUCCESS)
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(1));
+ mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1));
r8712_free_cmd_obj(pcmd);
}
-void r8712_createbss_cmd_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd)
+void r8712_createbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
{
unsigned long irqL;
struct sta_info *psta = NULL;
@@ -660,8 +635,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
if (pcmd->res != H2C_SUCCESS)
- mod_timer(&pmlmepriv->assoc_timer,
- jiffies + msecs_to_jiffies(1));
+ mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1));
del_timer(&pmlmepriv->assoc_timer);
#ifdef __BIG_ENDIAN
/* endian_convert */
@@ -670,31 +644,21 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
pnetwork->Privacy = le32_to_cpu(pnetwork->Privacy);
pnetwork->Rssi = le32_to_cpu(pnetwork->Rssi);
pnetwork->NetworkTypeInUse = le32_to_cpu(pnetwork->NetworkTypeInUse);
- pnetwork->Configuration.ATIMWindow =
- le32_to_cpu(pnetwork->Configuration.ATIMWindow);
- pnetwork->Configuration.DSConfig =
- le32_to_cpu(pnetwork->Configuration.DSConfig);
- pnetwork->Configuration.FHConfig.DwellTime =
- le32_to_cpu(pnetwork->Configuration.FHConfig.DwellTime);
- pnetwork->Configuration.FHConfig.HopPattern =
- le32_to_cpu(pnetwork->Configuration.FHConfig.HopPattern);
- pnetwork->Configuration.FHConfig.HopSet =
- le32_to_cpu(pnetwork->Configuration.FHConfig.HopSet);
- pnetwork->Configuration.FHConfig.Length =
- le32_to_cpu(pnetwork->Configuration.FHConfig.Length);
- pnetwork->Configuration.Length =
- le32_to_cpu(pnetwork->Configuration.Length);
- pnetwork->InfrastructureMode =
- le32_to_cpu(pnetwork->InfrastructureMode);
+ pnetwork->Configuration.ATIMWindow = le32_to_cpu(pnetwork->Configuration.ATIMWindow);
+ pnetwork->Configuration.DSConfig = le32_to_cpu(pnetwork->Configuration.DSConfig);
+ pnetwork->Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->Configuration.FHConfig.DwellTime);
+ pnetwork->Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->Configuration.FHConfig.HopPattern);
+ pnetwork->Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->Configuration.FHConfig.HopSet);
+ pnetwork->Configuration.FHConfig.Length = le32_to_cpu(pnetwork->Configuration.FHConfig.Length);
+ pnetwork->Configuration.Length = le32_to_cpu(pnetwork->Configuration.Length);
+ pnetwork->InfrastructureMode = le32_to_cpu(pnetwork->InfrastructureMode);
pnetwork->IELength = le32_to_cpu(pnetwork->IELength);
#endif
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if ((pmlmepriv->fw_state) & WIFI_AP_STATE) {
- psta = r8712_get_stainfo(&padapter->stapriv,
- pnetwork->MacAddress);
+ psta = r8712_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
if (!psta) {
- psta = r8712_alloc_stainfo(&padapter->stapriv,
- pnetwork->MacAddress);
+ psta = r8712_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
if (!psta)
goto createbss_cmd_fail;
}
@@ -702,20 +666,17 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
} else {
pwlan = _r8712_alloc_network(pmlmepriv);
if (!pwlan) {
- pwlan = r8712_get_oldest_wlan_network(
- &pmlmepriv->scanned_queue);
+ pwlan = r8712_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
if (!pwlan)
goto createbss_cmd_fail;
pwlan->last_scanned = jiffies;
} else {
- list_add_tail(&(pwlan->list),
- &pmlmepriv->scanned_queue.queue);
+ list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
}
pnetwork->Length = r8712_get_wlan_bssid_ex_sz(pnetwork);
memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
pwlan->fixed = true;
- memcpy(&tgt_network->network, pnetwork,
- (r8712_get_wlan_bssid_ex_sz(pnetwork)));
+ memcpy(&tgt_network->network, pnetwork, (r8712_get_wlan_bssid_ex_sz(pnetwork)));
if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
/*
@@ -728,14 +689,11 @@ createbss_cmd_fail:
r8712_free_cmd_obj(pcmd);
}
-void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd)
+void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd)
{
struct sta_priv *pstapriv = &padapter->stapriv;
- struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)
- (pcmd->rsp);
- struct sta_info *psta = r8712_get_stainfo(pstapriv,
- psetstakey_rsp->addr);
+ struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *) (pcmd->rsp);
+ struct sta_info *psta = r8712_get_stainfo(pstapriv, psetstakey_rsp->addr);
if (!psta)
goto exit;
@@ -750,27 +708,23 @@ void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter,
unsigned long irqL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct set_assocsta_parm *passocsta_parm =
- (struct set_assocsta_parm *)(pcmd->parmbuf);
- struct set_assocsta_rsp *passocsta_rsp =
- (struct set_assocsta_rsp *) (pcmd->rsp);
- struct sta_info *psta = r8712_get_stainfo(pstapriv,
- passocsta_parm->addr);
+ struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
+ struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *) (pcmd->rsp);
+ struct sta_info *psta = r8712_get_stainfo(pstapriv, passocsta_parm->addr);
if (!psta)
return;
psta->aid = psta->mac_id = passocsta_rsp->cam_id;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
- if ((check_fwstate(pmlmepriv, WIFI_MP_STATE)) &&
- (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)))
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE)) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)))
pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
set_fwstate(pmlmepriv, _FW_LINKED);
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
r8712_free_cmd_obj(pcmd);
}
-void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
- u32 tryPktCnt, u32 tryPktInterval, u32 firstStageTO)
+void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt,
+ u32 tryPktInterval, u32 firstStageTO)
{
struct cmd_obj *ph2c;
struct DisconnectCtrlEx_param *param;
@@ -790,7 +744,6 @@ void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
param->TryPktInterval = (unsigned char)tryPktInterval;
param->FirstStageTO = (unsigned int)firstStageTO;
- init_h2fwcmd_w_parm_no_rsp(ph2c, param,
- GEN_CMD_CODE(_DisconnectCtrlEx));
+ init_h2fwcmd_w_parm_no_rsp(ph2c, param, GEN_CMD_CODE(_DisconnectCtrlEx));
r8712_enqueue_cmd(pcmdpriv, ph2c);
}
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 8453d8de8..2613b3c2a 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -716,39 +716,28 @@ struct DisconnectCtrlEx_param {
#define H2C_RESERVED 0x07
void r8712_setMacAddr_cmd(struct _adapter *padapter, const u8 *mac_addr);
-u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
- struct ndis_802_11_ssid *pssid);
+u8 r8712_sitesurvey_cmd(struct _adapter *padapter, struct ndis_802_11_ssid *pssid);
int r8712_createbss_cmd(struct _adapter *padapter);
void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key);
-int r8712_joinbss_cmd(struct _adapter *padapter,
- struct wlan_network *pnetwork);
+int r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork);
void r8712_disassoc_cmd(struct _adapter *padapter);
-void r8712_setopmode_cmd(struct _adapter *padapter,
- enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype);
+void r8712_setopmode_cmd(struct _adapter *padapter, enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype);
int r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset);
void r8712_set_chplan_cmd(struct _adapter *padapter, int chplan);
int r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval);
int r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val);
void r8712_addbareq_cmd(struct _adapter *padapter, u8 tid);
void r8712_wdg_wk_cmd(struct _adapter *padapter);
-void r8712_survey_cmd_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd);
-void r8712_disassoc_cmd_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd);
-void r8712_joinbss_cmd_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd);
-void r8712_createbss_cmd_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd);
-void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd);
-void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd);
-void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd);
-void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter,
- struct cmd_obj *pcmd);
-void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
- u32 tryPktCnt, u32 tryPktInterval, u32 firstStageTO);
+void r8712_survey_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_disassoc_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_joinbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_createbss_cmd_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_getbbrfreg_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_readtssi_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_setstaKey_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_setassocsta_cmdrsp_callback(struct _adapter *padapter, struct cmd_obj *pcmd);
+void r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl, u32 tryPktCnt,
+ u32 tryPktInterval, u32 firstStageTO);
struct _cmd_callback {
u32 cmd_code;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 36f6904d2..0653aa27b 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -59,8 +59,7 @@ void r8712_indicate_wx_assoc_event(struct _adapter *padapter)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress,
- ETH_ALEN);
+ memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
@@ -111,11 +110,9 @@ static inline void handle_group_key(struct ieee_param *param,
memcpy(grk[param->u.crypt.idx - 1].skey,
&param->u.crypt.key[24], 8);
padapter->securitypriv.binstallGrpkey = true;
- r8712_set_key(padapter, &padapter->securitypriv,
- param->u.crypt.idx);
+ r8712_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx);
if (padapter->registrypriv.power_mgnt > PS_MODE_ACTIVE) {
- if (padapter->registrypriv.power_mgnt !=
- padapter->pwrctrlpriv.pwr_mode)
+ if (padapter->registrypriv.power_mgnt != padapter->pwrctrlpriv.pwr_mode)
mod_timer(&padapter->mlmepriv.dhcp_timer,
jiffies + msecs_to_jiffies(60000));
}
@@ -148,13 +145,11 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
memset(iwe, 0, sizeof(*iwe));
iwe->cmd = IWEVCUSTOM;
iwe->u.data.length = (u16)strlen(buf);
- start = iwe_stream_add_point(info, start, stop,
- iwe, buf);
+ start = iwe_stream_add_point(info, start, stop, iwe, buf);
memset(iwe, 0, sizeof(*iwe));
iwe->cmd = IWEVGENIE;
iwe->u.data.length = (u16)wpa_len;
- start = iwe_stream_add_point(info, start, stop,
- iwe, wpa_ie);
+ start = iwe_stream_add_point(info, start, stop, iwe, wpa_ie);
}
if (rsn_len > 0) {
memset(buf, 0, MAX_WPA_IE_LEN);
@@ -168,13 +163,11 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
memset(iwe, 0, sizeof(*iwe));
iwe->cmd = IWEVCUSTOM;
iwe->u.data.length = strlen(buf);
- start = iwe_stream_add_point(info, start, stop,
- iwe, buf);
+ start = iwe_stream_add_point(info, start, stop, iwe, buf);
memset(iwe, 0, sizeof(*iwe));
iwe->cmd = IWEVGENIE;
iwe->u.data.length = rsn_len;
- start = iwe_stream_add_point(info, start, stop, iwe,
- rsn_ie);
+ start = iwe_stream_add_point(info, start, stop, iwe, rsn_ie);
}
return start;
@@ -189,14 +182,11 @@ static noinline_for_stack char *translate_scan_wps(struct iw_request_info *info,
u8 wps_ie[512];
uint wps_ielen;
- if (r8712_get_wps_ie(pnetwork->network.IEs,
- pnetwork->network.IELength,
- wps_ie, &wps_ielen)) {
+ if (r8712_get_wps_ie(pnetwork->network.IEs, pnetwork->network.IELength, wps_ie, &wps_ielen)) {
if (wps_ielen > 2) {
iwe->cmd = IWEVGENIE;
iwe->u.data.length = (u16)wps_ielen;
- start = iwe_stream_add_point(info, start, stop,
- iwe, wps_ie);
+ start = iwe_stream_add_point(info, start, stop, iwe, wps_ie);
}
}
@@ -259,16 +249,14 @@ static char *translate_scan(struct _adapter *padapter,
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
- memcpy((u8 *)&cap, r8712_get_capability_from_ie(pnetwork->network.IEs),
- 2);
+ memcpy((u8 *)&cap, r8712_get_capability_from_ie(pnetwork->network.IEs), 2);
le16_to_cpus(&cap);
if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_ESS)) {
if (cap & WLAN_CAPABILITY_ESS)
iwe.u.mode = (u32)IW_MODE_MASTER;
else
iwe.u.mode = (u32)IW_MODE_ADHOC;
- start = iwe_stream_add_event(info, start, stop, &iwe,
- IW_EV_UINT_LEN);
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
}
/* Add frequency/channel */
iwe.cmd = SIOCGIWFREQ;
@@ -276,28 +264,23 @@ static char *translate_scan(struct _adapter *padapter,
/* check legal index */
u8 dsconfig = pnetwork->network.Configuration.DSConfig;
- if (dsconfig >= 1 && dsconfig <= sizeof(
- ieee80211_wlan_frequencies) / sizeof(long))
- iwe.u.freq.m =
- (s32)(ieee80211_wlan_frequencies
- [dsconfig - 1] * 100000);
+ if (dsconfig >= 1 && dsconfig <= sizeof(ieee80211_wlan_frequencies) / sizeof(long))
+ iwe.u.freq.m = (s32)(ieee80211_wlan_frequencies[dsconfig - 1] * 100000);
else
iwe.u.freq.m = 0;
}
iwe.u.freq.e = (s16)1;
iwe.u.freq.i = (u8)pnetwork->network.Configuration.DSConfig;
start = iwe_stream_add_event(info, start, stop, &iwe,
- IW_EV_FREQ_LEN);
+ IW_EV_FREQ_LEN);
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
if (cap & WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags = (u16)(IW_ENCODE_ENABLED |
- IW_ENCODE_NOKEY);
+ iwe.u.data.flags = (u16)(IW_ENCODE_ENABLED | IW_ENCODE_NOKEY);
else
iwe.u.data.flags = (u16)(IW_ENCODE_DISABLED);
iwe.u.data.length = (u16)0;
- start = iwe_stream_add_point(info, start, stop, &iwe,
- pnetwork->network.Ssid.Ssid);
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
/*Add basic and extended rates */
current_val = start + iwe_stream_lcp_len(info);
iwe.cmd = SIOCGIWRATE;
@@ -307,10 +290,9 @@ static char *translate_scan(struct _adapter *padapter,
i = 0;
while (pnetwork->network.rates[i] != 0) {
/* Bit rate given in 500 kb/s units */
- iwe.u.bitrate.value = (pnetwork->network.rates[i++] &
- 0x7F) * 500000;
- current_val = iwe_stream_add_value(info, start, current_val,
- stop, &iwe, IW_EV_PARAM_LEN);
+ iwe.u.bitrate.value = (pnetwork->network.rates[i++] & 0x7F) * 500000;
+ current_val = iwe_stream_add_value(info, start, current_val, stop, &iwe,
+ IW_EV_PARAM_LEN);
}
/* Check if we added any event */
if ((current_val - start) > iwe_stream_lcp_len(info))
@@ -324,8 +306,7 @@ static char *translate_scan(struct _adapter *padapter,
iwe.cmd = IWEVQUAL;
rssi = r8712_signal_scale_mapping(pnetwork->network.Rssi);
/* we only update signal_level (signal strength) that is rssi. */
- iwe.u.qual.updated = (u8)(IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED |
- IW_QUAL_NOISE_INVALID);
+ iwe.u.qual.updated = (u8)(IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID);
iwe.u.qual.level = rssi; /* signal strength */
iwe.u.qual.qual = 0; /* signal quality */
iwe.u.qual.noise = 0; /* noise level */
@@ -490,71 +471,59 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
goto exit;
}
if (r8712_parse_wpa_ie(buf, ielen, &group_cipher,
- &pairwise_cipher) == 0) {
+ &pairwise_cipher) == 0) {
padapter->securitypriv.AuthAlgrthm = 2;
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeWPAPSK;
}
if (r8712_parse_wpa2_ie(buf, ielen, &group_cipher,
- &pairwise_cipher) == 0) {
+ &pairwise_cipher) == 0) {
padapter->securitypriv.AuthAlgrthm = 2;
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeWPA2PSK;
}
switch (group_cipher) {
case WPA_CIPHER_NONE:
- padapter->securitypriv.XGrpPrivacy =
- _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.XGrpPrivacy = _WEP40_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
case WPA_CIPHER_TKIP:
padapter->securitypriv.XGrpPrivacy = _TKIP_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption2Enabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
break;
case WPA_CIPHER_CCMP:
padapter->securitypriv.XGrpPrivacy = _AES_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption3Enabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
break;
case WPA_CIPHER_WEP104:
padapter->securitypriv.XGrpPrivacy = _WEP104_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
}
switch (pairwise_cipher) {
case WPA_CIPHER_NONE:
- padapter->securitypriv.PrivacyAlgrthm =
- _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
case WPA_CIPHER_TKIP:
padapter->securitypriv.PrivacyAlgrthm = _TKIP_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption2Enabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
break;
case WPA_CIPHER_CCMP:
padapter->securitypriv.PrivacyAlgrthm = _AES_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption3Enabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
break;
case WPA_CIPHER_WEP104:
padapter->securitypriv.PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.ndisencryptstatus =
- Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
break;
}
padapter->securitypriv.wps_phase = false;
@@ -574,8 +543,8 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
(buf[cnt + 1] + 2) :
(MAX_WPA_IE_LEN << 2);
memcpy(padapter->securitypriv.wps_ie,
- &buf[cnt],
- padapter->securitypriv.wps_ie_len);
+ &buf[cnt],
+ padapter->securitypriv.wps_ie_len);
padapter->securitypriv.wps_phase =
true;
netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n");
@@ -592,8 +561,7 @@ exit:
return ret;
}
-static int r8711_wx_get_name(struct net_device *dev,
- struct iw_request_info *info,
+static int r8711_wx_get_name(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
@@ -604,8 +572,7 @@ static int r8711_wx_get_name(struct net_device *dev,
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
u8 *prates;
- if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE) ==
- true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE) == true) {
/* parsing HT_CAP_IE */
p = r8712_get_ie(&pcur_bss->IEs[12], WLAN_EID_HT_CAPABILITY,
&ht_ielen, pcur_bss->IELength - 12);
@@ -658,9 +625,7 @@ static int r8711_wx_set_freq(struct net_device *dev,
int rc = 0;
/* If setting by frequency, convert to a channel */
- if ((fwrq->e == 1) &&
- (fwrq->m >= 241200000) &&
- (fwrq->m <= 248700000)) {
+ if ((fwrq->e == 1) && (fwrq->m >= 241200000) && (fwrq->m <= 248700000)) {
int f = fwrq->m / 100000;
int c = 0;
@@ -685,8 +650,7 @@ static int r8711_wx_set_freq(struct net_device *dev,
return rc;
}
-static int r8711_wx_get_freq(struct net_device *dev,
- struct iw_request_info *info,
+static int r8711_wx_get_freq(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
@@ -745,7 +709,7 @@ static int r8711_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
wrqu->mode = IW_MODE_INFRA;
else if (check_fwstate(pmlmepriv,
- WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE))
+ WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE))
wrqu->mode = IW_MODE_ADHOC;
else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
wrqu->mode = IW_MODE_MASTER;
@@ -754,9 +718,8 @@ static int r8711_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
return 0;
}
-static int r871x_wx_set_pmkid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+static int r871x_wx_set_pmkid(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -828,7 +791,7 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
break;
case IW_PMKSA_FLUSH:
memset(psecuritypriv->PMKIDList, 0,
- sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
psecuritypriv->PMKIDIndex = 0;
intReturn = true;
break;
@@ -850,9 +813,8 @@ static int r8711_wx_get_sens(struct net_device *dev,
return 0;
}
-static int r8711_wx_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_range(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
u16 val;
@@ -912,9 +874,9 @@ static int r8711_wx_get_rate(struct net_device *dev,
union iwreq_data *wrqu, char *extra);
static int r871x_wx_set_priv(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
{
int ret = 0, len = 0;
char *ext;
@@ -995,12 +957,10 @@ static int r871x_wx_set_priv(struct net_device *dev,
);
sprintf(ext, "OK");
} else {
- netdev_info(dev, "r8712u: %s: unknown Command %s.\n",
- __func__, ext);
+ netdev_info(dev, "r8712u: %s: unknown Command %s.\n", __func__, ext);
goto FREE_EXT;
}
- if (copy_to_user(dwrq->pointer, ext,
- min(dwrq->length, (__u16)(strlen(ext) + 1))))
+ if (copy_to_user(dwrq->pointer, ext, min(dwrq->length, (__u16)(strlen(ext) + 1))))
ret = -EFAULT;
FREE_EXT:
@@ -1021,10 +981,8 @@ FREE_EXT:
* For this operation to succeed, there is no need for the interface to be up.
*
*/
-static int r8711_wx_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
+static int r8711_wx_set_wap(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *awrq, char *extra)
{
int ret = -EINPROGRESS;
struct _adapter *padapter = netdev_priv(dev);
@@ -1072,17 +1030,15 @@ static int r8711_wx_set_wap(struct net_device *dev,
return ret;
}
-static int r8711_wx_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_wap(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
- if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE |
- WIFI_AP_STATE))
+ if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE))
ether_addr_copy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress);
else
eth_zero_addr(wrqu->ap_addr.sa_data);
@@ -1122,9 +1078,8 @@ static int r871x_wx_set_mlme(struct net_device *dev,
* For this operation to succeed, the interface is brought Up beforehand.
*
*/
-static int r8711_wx_set_scan(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1150,8 +1105,7 @@ static int r8711_wx_set_scan(struct net_device *dev,
unsigned long irqL;
u32 len = min_t(u8, req->essid_len, IW_ESSID_MAX_SIZE);
- memset((unsigned char *)&ssid, 0,
- sizeof(struct ndis_802_11_ssid));
+ memset((unsigned char *)&ssid, 0, sizeof(struct ndis_802_11_ssid));
memcpy(ssid.Ssid, req->essid, len);
ssid.SsidLength = len;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
@@ -1173,9 +1127,8 @@ static int r8711_wx_set_scan(struct net_device *dev,
return 0;
}
-static int r8711_wx_get_scan(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1189,8 +1142,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
if (padapter->driver_stopped)
return -EINVAL;
- while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY |
- _FW_UNDER_LINKING)) {
+ while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING)) {
msleep(30);
cnt++;
if (cnt > 100)
@@ -1228,9 +1180,8 @@ static int r8711_wx_get_scan(struct net_device *dev,
* For this operation to succeed, there is no need for the interface to be Up.
*
*/
-static int r8711_wx_set_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_set_essid(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1268,8 +1219,7 @@ static int r8711_wx_set_essid(struct net_device *dev,
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength))
&& (pnetwork->network.Ssid.SsidLength ==
ndis_ssid.SsidLength)) {
- if (check_fwstate(pmlmepriv,
- WIFI_ADHOC_STATE)) {
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
if (pnetwork->network.
InfrastructureMode
!=
@@ -1291,9 +1241,8 @@ static int r8711_wx_set_essid(struct net_device *dev,
return -EINPROGRESS;
}
-static int r8711_wx_get_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_essid(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1311,9 +1260,8 @@ static int r8711_wx_get_essid(struct net_device *dev,
return ret;
}
-static int r8711_wx_set_rate(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_set_rate(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
u32 target_rate = wrqu->bitrate.value;
@@ -1382,8 +1330,7 @@ set_rate:
return r8712_setdatarate_cmd(padapter, datarates);
}
-static int r8711_wx_get_rate(struct net_device *dev,
- struct iw_request_info *info,
+static int r8711_wx_get_rate(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
@@ -1437,9 +1384,8 @@ static int r8711_wx_get_rate(struct net_device *dev,
return 0;
}
-static int r8711_wx_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_get_rts(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
@@ -1448,9 +1394,8 @@ static int r8711_wx_get_rts(struct net_device *dev,
return 0;
}
-static int r8711_wx_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+static int r8711_wx_set_frag(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = netdev_priv(dev);
diff --git a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
index a08c5d2f5..bb9f83d58 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
@@ -126,7 +126,7 @@
#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
#define rFPGA0_AnalogParameter4 0x88c
-#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
+#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Transceiver LSSI Readback */
#define rFPGA0_XB_LSSIReadBack 0x8a4
#define rFPGA0_XC_LSSIReadBack 0x8a8
#define rFPGA0_XD_LSSIReadBack 0x8ac
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 79bcd5bd4..04c1b32a2 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -663,17 +663,54 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
return ret;
}
-static struct fb_ops lynxfb_ops = {
+static const struct fb_ops lynxfb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_check_var = lynxfb_ops_check_var,
.fb_set_par = lynxfb_ops_set_par,
.fb_setcolreg = lynxfb_ops_setcolreg,
.fb_blank = lynxfb_ops_blank,
- .fb_fillrect = cfb_fillrect,
- .fb_imageblit = cfb_imageblit,
- .fb_copyarea = cfb_copyarea,
- /* cursor */
+ .fb_pan_display = lynxfb_ops_pan_display,
+};
+
+static const struct fb_ops lynxfb_ops_with_cursor = {
+ .owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_cursor = lynxfb_ops_cursor,
+};
+
+static const struct fb_ops lynxfb_ops_accel = {
+ .owner = THIS_MODULE,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_fillrect = lynxfb_ops_fillrect,
+ .fb_copyarea = lynxfb_ops_copyarea,
+ .fb_imageblit = lynxfb_ops_imageblit,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
+};
+
+static const struct fb_ops lynxfb_ops_accel_with_cursor = {
+ .owner = THIS_MODULE,
+ __FB_DEFAULT_IOMEM_OPS_RDWR,
+ .fb_check_var = lynxfb_ops_check_var,
+ .fb_set_par = lynxfb_ops_set_par,
+ .fb_setcolreg = lynxfb_ops_setcolreg,
+ .fb_blank = lynxfb_ops_blank,
+ .fb_pan_display = lynxfb_ops_pan_display,
+ .fb_fillrect = lynxfb_ops_fillrect,
+ .fb_copyarea = lynxfb_ops_copyarea,
+ .fb_imageblit = lynxfb_ops_imageblit,
.fb_cursor = lynxfb_ops_cursor,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
static int lynxfb_set_fbinfo(struct fb_info *info, int index)
@@ -714,7 +751,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
par->index = index;
output->channel = &crtc->channel;
sm750fb_set_drv(par);
- lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
/*
* set current cursor variable and proc pointer,
@@ -731,19 +767,22 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
crtc->cursor.vstart = sm750_dev->pvMem + crtc->cursor.offset;
memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
- if (!g_hwcursor) {
- lynxfb_ops.fb_cursor = NULL;
+ if (!g_hwcursor)
sm750_hw_cursor_disable(&crtc->cursor);
- }
/* set info->fbops, must be set before fb_find_mode */
if (!sm750_dev->accel_off) {
/* use 2d acceleration */
- lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect;
- lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea;
- lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit;
+ if (!g_hwcursor)
+ info->fbops = &lynxfb_ops_accel;
+ else
+ info->fbops = &lynxfb_ops_accel_with_cursor;
+ } else {
+ if (!g_hwcursor)
+ info->fbops = &lynxfb_ops;
+ else
+ info->fbops = &lynxfb_ops_with_cursor;
}
- info->fbops = &lynxfb_ops;
if (!g_fbmode[index]) {
g_fbmode[index] = g_def_fbmode;
diff --git a/drivers/staging/vc04_services/interface/TODO b/drivers/staging/vc04_services/interface/TODO
index 6d9d4a800..05eb5140d 100644
--- a/drivers/staging/vc04_services/interface/TODO
+++ b/drivers/staging/vc04_services/interface/TODO
@@ -23,11 +23,6 @@ should properly handle a module unload. This also includes that all
resources must be freed (kthreads, debugfs entries, ...) and global
variables avoided.
-* Cleanup logging mechanism
-
-The driver should probably be using the standard kernel logging mechanisms
-such as dev_info, dev_dbg, and friends.
-
* Documentation
A short top-down description of this driver's architecture (function of
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 9fb8f657c..1579bd4e5 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -255,8 +255,7 @@ create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
GFP_KERNEL);
- vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
- "%s - %pK", __func__, pagelist);
+ dev_dbg(instance->state->dev, "arm: %pK\n", pagelist);
if (!pagelist)
return NULL;
@@ -311,9 +310,8 @@ create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
type == PAGELIST_READ, pages);
if (actual_pages != num_pages) {
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "%s - only %d/%d pages locked",
- __func__, actual_pages, num_pages);
+ dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n",
+ actual_pages, num_pages);
/* This is probably due to the process being killed */
if (actual_pages > 0)
@@ -407,8 +405,7 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
struct page **pages = pagelistinfo->pages;
unsigned int num_pages = pagelistinfo->num_pages;
- vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
- "%s - %pK, %d", __func__, pagelistinfo->pagelist, actual);
+ dev_dbg(instance->state->dev, "arm: %pK, %d\n", pagelistinfo->pagelist, actual);
/*
* NOTE: dma_unmap_sg must be called before the
@@ -556,8 +553,8 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return -ENXIO;
}
- vchiq_log_debug(&pdev->dev, VCHIQ_ARM, "vchiq_init - done (slots %pK, phys %pad)",
- vchiq_slot_zero, &slot_phys);
+ dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %pK, phys %pad)\n",
+ vchiq_slot_zero, &slot_phys);
vchiq_call_connected_callbacks();
@@ -659,13 +656,9 @@ vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
bulk->actual);
}
-int vchiq_dump_platform_state(void *dump_context)
+void vchiq_dump_platform_state(struct seq_file *f)
{
- char buf[80];
- int len;
-
- len = snprintf(buf, sizeof(buf), " Platform: 2835 (VC master)");
- return vchiq_dump(dump_context, buf, len + 1);
+ seq_puts(f, " Platform: 2835 (VC master)\n");
}
#define VCHIQ_INIT_RETRIES 10
@@ -687,19 +680,17 @@ int vchiq_initialise(struct vchiq_instance **instance_out)
usleep_range(500, 600);
}
if (i == VCHIQ_INIT_RETRIES) {
- vchiq_log_error(state->dev, VCHIQ_CORE, "%s: videocore not initialized\n",
- __func__);
+ dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
ret = -ENOTCONN;
goto failed;
} else if (i > 0) {
- vchiq_log_warning(state->dev, VCHIQ_CORE,
- "%s: videocore initialized after %d retries\n", __func__, i);
+ dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
+ __func__, i);
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "%s: error allocating vchiq instance\n", __func__);
+ dev_err(state->dev, "core: %s: Cannot allocate vchiq instance\n", __func__);
ret = -ENOMEM;
goto failed;
}
@@ -714,8 +705,7 @@ int vchiq_initialise(struct vchiq_instance **instance_out)
ret = 0;
failed:
- vchiq_log_trace(state->dev, VCHIQ_CORE,
- "%s(%p): returning %d", __func__, instance, ret);
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
return ret;
}
@@ -728,9 +718,9 @@ void free_bulk_waiter(struct vchiq_instance *instance)
list_for_each_entry_safe(waiter, next,
&instance->bulk_waiter_list, list) {
list_del(&waiter->list);
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "bulk_waiter - cleaned up %pK for pid %d",
- waiter, waiter->pid);
+ dev_dbg(instance->state->dev,
+ "arm: bulk_waiter - cleaned up %pK for pid %d\n",
+ waiter, waiter->pid);
kfree(waiter);
}
}
@@ -748,8 +738,7 @@ int vchiq_shutdown(struct vchiq_instance *instance)
mutex_unlock(&state->mutex);
- vchiq_log_trace(state->dev, VCHIQ_CORE,
- "%s(%p): returning %d", __func__, instance, status);
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
free_bulk_waiter(instance);
kfree(instance);
@@ -769,8 +758,8 @@ int vchiq_connect(struct vchiq_instance *instance)
struct vchiq_state *state = instance->state;
if (mutex_lock_killable(&state->mutex)) {
- vchiq_log_trace(state->dev, VCHIQ_CORE,
- "%s: call to mutex_lock failed", __func__);
+ dev_dbg(state->dev,
+ "core: call to mutex_lock failed\n");
status = -EAGAIN;
goto failed;
}
@@ -782,8 +771,7 @@ int vchiq_connect(struct vchiq_instance *instance)
mutex_unlock(&state->mutex);
failed:
- vchiq_log_trace(state->dev, VCHIQ_CORE,
- "%s(%p): returning %d", __func__, instance, status);
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
return status;
}
@@ -814,8 +802,7 @@ vchiq_add_service(struct vchiq_instance *instance,
status = -EINVAL;
}
- vchiq_log_trace(state->dev, VCHIQ_CORE,
- "%s(%p): returning %d", __func__, instance, status);
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
return status;
}
@@ -846,8 +833,7 @@ vchiq_open_service(struct vchiq_instance *instance,
}
failed:
- vchiq_log_trace(state->dev, VCHIQ_CORE,
- "%s(%p): returning %d", __func__, instance, status);
+ dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
return status;
}
@@ -971,8 +957,7 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
} else {
waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
- vchiq_log_error(service->state->dev, VCHIQ_CORE,
- "%s - out of memory", __func__);
+ dev_err(service->state->dev, "core: %s: - Out of memory\n", __func__);
return -ENOMEM;
}
}
@@ -995,9 +980,8 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "saved bulk_waiter %pK for pid %d", waiter,
- current->pid);
+ dev_dbg(instance->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
+ waiter, current->pid);
}
return status;
@@ -1017,16 +1001,13 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
/* Out of space - wait for the client */
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
- vchiq_log_trace(instance->state->dev, VCHIQ_CORE,
- "%s - completion queue full", __func__);
+ dev_dbg(instance->state->dev, "core: completion queue full\n");
DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
if (wait_for_completion_interruptible(&instance->remove_event)) {
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "service_callback interrupted");
+ dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
return -EAGAIN;
} else if (instance->closing) {
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "service_callback closing");
+ dev_dbg(instance->state->dev, "arm: service_callback closing\n");
return 0;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1106,11 +1087,10 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
vchiq_service_get(service);
rcu_read_unlock();
- vchiq_log_trace(service->state->dev, VCHIQ_ARM,
- "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
- __func__, (unsigned long)user_service, service->localport,
- user_service->userdata, reason, (unsigned long)header,
- (unsigned long)instance, (unsigned long)bulk_userdata);
+ dev_dbg(service->state->dev,
+ "arm: service %p(%d,%p), reason %d, header %p, instance %p, bulk_userdata %p\n",
+ user_service, service->localport, user_service->userdata,
+ reason, header, instance, bulk_userdata);
if (header && user_service->is_vchi) {
spin_lock(&msg_queue_spinlock);
@@ -1119,8 +1099,7 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
- vchiq_log_trace(service->state->dev, VCHIQ_ARM,
- "%s - msg queue full", __func__);
+ dev_dbg(service->state->dev, "arm: msg queue full\n");
/*
* If there is no MESSAGE_AVAILABLE in the completion
* queue, add one
@@ -1129,8 +1108,8 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
instance->completion_remove) < 0) {
int status;
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "Inserting extra MESSAGE_AVAILABLE");
+ dev_dbg(instance->state->dev,
+ "arm: Inserting extra MESSAGE_AVAILABLE\n");
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
status = add_completion(instance, reason, NULL, user_service,
bulk_userdata);
@@ -1143,14 +1122,12 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
if (wait_for_completion_interruptible(&user_service->remove_event)) {
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "%s interrupted", __func__);
+ dev_dbg(instance->state->dev, "arm: interrupted\n");
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
return -EAGAIN;
} else if (instance->closing) {
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "%s closing", __func__);
+ dev_dbg(instance->state->dev, "arm: closing\n");
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
return -EINVAL;
@@ -1190,56 +1167,13 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
bulk_userdata);
}
-int vchiq_dump(void *dump_context, const char *str, int len)
-{
- struct dump_context *context = (struct dump_context *)dump_context;
- int copy_bytes;
-
- if (context->actual >= context->space)
- return 0;
-
- if (context->offset > 0) {
- int skip_bytes = min_t(int, len, context->offset);
-
- str += skip_bytes;
- len -= skip_bytes;
- context->offset -= skip_bytes;
- if (context->offset > 0)
- return 0;
- }
- copy_bytes = min_t(int, len, context->space - context->actual);
- if (copy_bytes == 0)
- return 0;
- if (copy_to_user(context->buf + context->actual, str,
- copy_bytes))
- return -EFAULT;
- context->actual += copy_bytes;
- len -= copy_bytes;
-
- /*
- * If the terminating NUL is included in the length, then it
- * marks the end of a line and should be replaced with a
- * carriage return.
- */
- if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
- char cr = '\n';
-
- if (copy_to_user(context->buf + context->actual - 1,
- &cr, 1))
- return -EFAULT;
- }
- return 0;
-}
-
-int vchiq_dump_platform_instances(void *dump_context)
+void vchiq_dump_platform_instances(struct seq_file *f)
{
struct vchiq_state *state = vchiq_get_state();
- char buf[80];
- int len;
int i;
if (!state)
- return -ENOTCONN;
+ return;
/*
* There is no list of instances, so instead scan all services,
@@ -1264,7 +1198,6 @@ int vchiq_dump_platform_instances(void *dump_context)
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service;
struct vchiq_instance *instance;
- int err;
rcu_read_lock();
service = rcu_dereference(state->services[i]);
@@ -1280,43 +1213,35 @@ int vchiq_dump_platform_instances(void *dump_context)
}
rcu_read_unlock();
- len = snprintf(buf, sizeof(buf),
- "Instance %pK: pid %d,%s completions %d/%d",
- instance, instance->pid,
- instance->connected ? " connected, " :
- "",
- instance->completion_insert -
- instance->completion_remove,
- MAX_COMPLETIONS);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
+ seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
+ instance, instance->pid,
+ instance->connected ? " connected, " :
+ "",
+ instance->completion_insert -
+ instance->completion_remove,
+ MAX_COMPLETIONS);
instance->mark = 1;
}
- return 0;
}
-int vchiq_dump_platform_service_state(void *dump_context,
- struct vchiq_service *service)
+void vchiq_dump_platform_service_state(struct seq_file *f,
+ struct vchiq_service *service)
{
struct user_service *user_service =
(struct user_service *)service->base.userdata;
- char buf[80];
- int len;
- len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance);
+ seq_printf(f, " instance %pK", service->instance);
if ((service->base.callback == service_callback) && user_service->is_vchi) {
- len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
- user_service->msg_insert - user_service->msg_remove,
- MSG_QUEUE_SIZE);
+ seq_printf(f, ", %d/%d messages",
+ user_service->msg_insert - user_service->msg_remove,
+ MSG_QUEUE_SIZE);
if (user_service->dequeue_pending)
- len += scnprintf(buf + len, sizeof(buf) - len,
- " (dequeue pending)");
+ seq_puts(f, " (dequeue pending)");
}
- return vchiq_dump(dump_context, buf, len + 1);
+ seq_puts(f, "\n");
}
struct vchiq_state *
@@ -1346,8 +1271,8 @@ vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
struct vchiq_header *header,
unsigned int service_user, void *bulk_user)
{
- vchiq_log_error(instance->state->dev, VCHIQ_SUSPEND,
- "%s callback reason %d", __func__, reason);
+ dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
+ __func__, reason);
return 0;
}
@@ -1371,22 +1296,20 @@ vchiq_keepalive_thread_func(void *v)
ret = vchiq_initialise(&instance);
if (ret) {
- vchiq_log_error(state->dev, VCHIQ_SUSPEND,
- "%s vchiq_initialise failed %d", __func__, ret);
+ dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
goto exit;
}
status = vchiq_connect(instance);
if (status) {
- vchiq_log_error(state->dev, VCHIQ_SUSPEND,
- "%s vchiq_connect failed %d", __func__, status);
+ dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, status);
goto shutdown;
}
status = vchiq_add_service(instance, &params, &ka_handle);
if (status) {
- vchiq_log_error(state->dev, VCHIQ_SUSPEND,
- "%s vchiq_open_service failed %d", __func__, status);
+ dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
+ __func__, status);
goto shutdown;
}
@@ -1394,8 +1317,7 @@ vchiq_keepalive_thread_func(void *v)
long rc = 0, uc = 0;
if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
- vchiq_log_error(state->dev, VCHIQ_SUSPEND,
- "%s interrupted", __func__);
+ dev_err(state->dev, "suspend: %s: interrupted\n", __func__);
flush_signals(current);
continue;
}
@@ -1415,16 +1337,15 @@ vchiq_keepalive_thread_func(void *v)
atomic_inc(&arm_state->ka_use_ack_count);
status = vchiq_use_service(instance, ka_handle);
if (status) {
- vchiq_log_error(state->dev, VCHIQ_SUSPEND,
- "%s vchiq_use_service error %d", __func__, status);
+ dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
+ __func__, status);
}
}
while (rc--) {
status = vchiq_release_service(instance, ka_handle);
if (status) {
- vchiq_log_error(state->dev, VCHIQ_SUSPEND,
- "%s vchiq_release_service error %d", __func__,
- status);
+ dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
+ __func__, status);
}
}
}
@@ -1459,7 +1380,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
service->client_id);
entity_uc = &service->service_use_count;
} else {
- vchiq_log_error(state->dev, VCHIQ_SUSPEND, "%s null service ptr", __func__);
+ dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
ret = -EINVAL;
goto out;
}
@@ -1468,8 +1389,8 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
local_uc = ++arm_state->videocore_use_count;
++(*entity_uc);
- vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s %s count %d, state count %d",
- __func__, entity, *entity_uc, local_uc);
+ dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
+ entity, *entity_uc, local_uc);
write_unlock_bh(&arm_state->susp_res_lock);
@@ -1488,7 +1409,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
}
out:
- vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s exit %d", __func__, ret);
+ dev_dbg(state->dev, "suspend: exit %d\n", ret);
return ret;
}
@@ -1526,14 +1447,14 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
--arm_state->videocore_use_count;
--(*entity_uc);
- vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s %s count %d, state count %d",
- __func__, entity, *entity_uc, arm_state->videocore_use_count);
+ dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
+ entity, *entity_uc, arm_state->videocore_use_count);
unlock:
write_unlock_bh(&arm_state->susp_res_lock);
out:
- vchiq_log_trace(state->dev, VCHIQ_SUSPEND, "%s exit %d", __func__, ret);
+ dev_dbg(state->dev, "suspend: exit %d\n", ret);
return ret;
}
@@ -1707,20 +1628,19 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
read_unlock_bh(&arm_state->susp_res_lock);
if (only_nonzero)
- vchiq_log_warning(state->dev, VCHIQ_SUSPEND,
- "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
- active_services, found);
+ dev_warn(state->dev,
+ "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
+ active_services, found);
for (i = 0; i < found; i++) {
- vchiq_log_warning(state->dev, VCHIQ_SUSPEND,
- "%p4cc:%d service count %d %s",
- &service_data[i].fourcc,
- service_data[i].clientid, service_data[i].use_count,
- service_data[i].use_count ? nz : "");
+ dev_warn(state->dev,
+ "suspend: %p4cc:%d service count %d %s\n",
+ &service_data[i].fourcc,
+ service_data[i].clientid, service_data[i].use_count,
+ service_data[i].use_count ? nz : "");
}
- vchiq_log_warning(state->dev, VCHIQ_SUSPEND, "VCHIQ use count %d", peer_count);
- vchiq_log_warning(state->dev, VCHIQ_SUSPEND, "Overall vchiq instance use count %d",
- vc_use_count);
+ dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
+ dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
kfree(service_data);
}
@@ -1742,10 +1662,10 @@ vchiq_check_service(struct vchiq_service *service)
read_unlock_bh(&arm_state->susp_res_lock);
if (ret) {
- vchiq_log_error(service->state->dev, VCHIQ_SUSPEND,
- "%s ERROR - %p4cc:%d service count %d, state count %d", __func__,
- &service->base.fourcc, service->client_id,
- service->service_use_count, arm_state->videocore_use_count);
+ dev_err(service->state->dev,
+ "suspend: %s: %p4cc:%d service count %d, state count %d\n",
+ __func__, &service->base.fourcc, service->client_id,
+ service->service_use_count, arm_state->videocore_use_count);
vchiq_dump_service_use_state(service->state);
}
out:
@@ -1759,8 +1679,8 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
char threadname[16];
- vchiq_log_debug(state->dev, VCHIQ_SUSPEND, "%d: %s->%s", state->id,
- get_conn_state_name(oldstate), get_conn_state_name(newstate));
+ dev_dbg(state->dev, "suspend: %d: %s->%s\n",
+ state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
return;
@@ -1778,9 +1698,8 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
(void *)state,
threadname);
if (IS_ERR(arm_state->ka_thread)) {
- vchiq_log_error(state->dev, VCHIQ_SUSPEND,
- "vchiq: FATAL: couldn't create thread %s",
- threadname);
+ dev_err(state->dev, "suspend: Couldn't create thread %s\n",
+ threadname);
} else {
wake_up_process(arm_state->ka_thread);
}
@@ -1825,9 +1744,8 @@ static int vchiq_probe(struct platform_device *pdev)
vchiq_debugfs_init();
- vchiq_log_debug(&pdev->dev, VCHIQ_ARM,
- "vchiq: platform initialised - version %d (min %d)",
- VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+ dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN);
/*
* Simply exit on error since the function handles cleanup in
@@ -1835,8 +1753,7 @@ static int vchiq_probe(struct platform_device *pdev)
*/
err = vchiq_register_chrdev(&pdev->dev);
if (err) {
- vchiq_log_warning(&pdev->dev, VCHIQ_ARM,
- "Failed to initialize vchiq cdev");
+ dev_warn(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
goto error_exit;
}
@@ -1846,7 +1763,7 @@ static int vchiq_probe(struct platform_device *pdev)
return 0;
failed_platform_init:
- vchiq_log_warning(&pdev->dev, VCHIQ_ARM, "could not initialize vchiq platform");
+ dev_warn(&pdev->dev, "arm: Could not initialize vchiq platform\n");
error_exit:
return err;
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index 7cdc3d70b..7844ef765 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -69,13 +69,6 @@ struct vchiq_instance {
struct vchiq_debugfs_node debugfs_node;
};
-struct dump_context {
- char __user *buf;
- size_t actual;
- size_t space;
- loff_t offset;
-};
-
extern spinlock_t msg_queue_spinlock;
extern struct vchiq_state g_state;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 21f9fa1a1..3cad13f09 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -39,9 +39,9 @@ void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(
callback();
} else {
if (g_num_deferred_callbacks >= MAX_CALLBACKS) {
- vchiq_log_error(&device->dev, VCHIQ_CORE,
- "There already %d callback registered - please increase MAX_CALLBACKS",
- g_num_deferred_callbacks);
+ dev_err(&device->dev,
+ "core: There already %d callback registered - please increase MAX_CALLBACKS\n",
+ g_num_deferred_callbacks);
} else {
g_deferred_callback[g_num_deferred_callbacks] =
callback;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 8a9eb0101..76c277781 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -217,10 +217,10 @@ static const char *msg_type_str(unsigned int msg_type)
static inline void
set_service_state(struct vchiq_service *service, int newstate)
{
- vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: srv:%d %s->%s",
- service->state->id, service->localport,
- srvstate_names[service->srvstate],
- srvstate_names[newstate]);
+ dev_dbg(service->state->dev, "core: %d: srv:%d %s->%s\n",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate],
+ srvstate_names[newstate]);
service->srvstate = newstate;
}
@@ -245,8 +245,7 @@ find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
return service;
}
rcu_read_unlock();
- vchiq_log_debug(instance->state->dev, VCHIQ_CORE,
- "Invalid service handle 0x%x", handle);
+ dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
return NULL;
}
@@ -266,8 +265,7 @@ find_service_by_port(struct vchiq_state *state, unsigned int localport)
}
rcu_read_unlock();
}
- vchiq_log_debug(state->dev, VCHIQ_CORE,
- "Invalid port %u", localport);
+ dev_dbg(state->dev, "core: Invalid port %u\n", localport);
return NULL;
}
@@ -287,8 +285,7 @@ find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
return service;
}
rcu_read_unlock();
- vchiq_log_debug(instance->state->dev, VCHIQ_CORE,
- "Invalid service handle 0x%x", handle);
+ dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
return NULL;
}
@@ -310,8 +307,7 @@ find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int h
return service;
}
rcu_read_unlock();
- vchiq_log_debug(instance->state->dev, VCHIQ_CORE,
- "Invalid service handle 0x%x", handle);
+ dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
return service;
}
@@ -459,15 +455,15 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
{
int status;
- vchiq_log_trace(service->state->dev, VCHIQ_CORE, "%d: callback:%d (%s, %pK, %pK)",
- service->state->id, service->localport, reason_names[reason],
- header, bulk_userdata);
+ dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK)\n",
+ service->state->id, service->localport, reason_names[reason],
+ header, bulk_userdata);
status = service->base.callback(service->instance, reason, header, service->handle,
bulk_userdata);
if (status && (status != -EAGAIN)) {
- vchiq_log_warning(service->state->dev, VCHIQ_CORE,
- "%d: ignoring ERROR from callback to service %x",
- service->state->id, service->handle);
+ dev_warn(service->state->dev,
+ "core: %d: ignoring ERROR from callback to service %x\n",
+ service->state->id, service->handle);
status = 0;
}
@@ -482,8 +478,8 @@ vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
{
enum vchiq_connstate oldstate = state->conn_state;
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: %s->%s", state->id, conn_state_names[oldstate],
- conn_state_names[newstate]);
+ dev_dbg(state->dev, "core: %d: %s->%s\n",
+ state->id, conn_state_names[oldstate], conn_state_names[newstate]);
state->conn_state = newstate;
vchiq_platform_conn_state_changed(state, oldstate, newstate);
}
@@ -741,10 +737,10 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
*/
complete(&quota->quota_event);
} else if (count == 0) {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
- port, quota->message_use_count, header, msgid, header->msgid,
- header->size);
+ dev_err(state->dev,
+ "core: service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
+ port, quota->message_use_count, header, msgid,
+ header->msgid, header->size);
WARN(1, "invalid message use count\n");
}
if (!BITSET_IS_SET(service_found, port)) {
@@ -763,12 +759,12 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
* it has dropped below its quota
*/
complete(&quota->quota_event);
- vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: pfq:%d %x@%pK - slot_use->%d",
- state->id, port, header->size, header, count - 1);
+ dev_dbg(state->dev, "core: %d: pfq:%d %x@%pK - slot_use->%d\n",
+ state->id, port, header->size, header, count - 1);
} else {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
- port, count, header, msgid, header->msgid, header->size);
+ dev_err(state->dev,
+ "core: service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
+ port, count, header, msgid, header->msgid, header->size);
WARN(1, "bad slot use count\n");
}
}
@@ -809,9 +805,9 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
*/
rmb();
- vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: pfq %d=%pK %x %x",
- state->id, slot_index, data, local->slot_queue_recycle,
- slot_queue_available);
+ dev_dbg(state->dev, "core: %d: pfq %d=%pK %x %x\n",
+ state->id, slot_index, data, local->slot_queue_recycle,
+ slot_queue_available);
/* Initialise the bitmask for services which have used this slot */
memset(service_found, 0, length);
@@ -831,9 +827,9 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
- pos, header, msgid, header->msgid, header->size);
+ dev_err(state->dev,
+ "core: pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
+ pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
}
@@ -980,10 +976,10 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
((tx_end_index != quota->previous_tx_index) &&
(quota->slot_use_count == quota->slot_quota))) {
spin_unlock(&quota_spinlock);
- vchiq_log_trace(state->dev, VCHIQ_CORE,
- "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
- state->id, service->localport, msg_type_str(type), size,
- quota->message_use_count, quota->slot_use_count);
+ dev_dbg(state->dev,
+ "core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n",
+ state->id, service->localport, msg_type_str(type), size,
+ quota->message_use_count, quota->slot_use_count);
VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
mutex_unlock(&state->slot_mutex);
if (wait_for_completion_interruptible(&quota->quota_event))
@@ -1023,9 +1019,9 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
int tx_end_index;
int slot_use_count;
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
- VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+ dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
+ state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK));
@@ -1073,17 +1069,16 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
spin_unlock(&quota_spinlock);
if (slot_use_count)
- vchiq_log_trace(state->dev, VCHIQ_CORE,
- "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
- service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- size, slot_use_count, header);
+ dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n",
+ state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ size, slot_use_count, header);
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
- VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
+ dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
+ state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
if (size != 0) {
/*
* It is assumed for now that this code path
@@ -1111,11 +1106,9 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
- vchiq_log_debug(state->dev, VCHIQ_CORE_MSG,
- "Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu",
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
- &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid), size);
+ dev_dbg(state->dev, "core_msg: Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu\n",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+ &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid), size);
}
/* Make sure the new header is visible to the peer. */
@@ -1167,15 +1160,13 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int oldmsgid = header->msgid;
if (oldmsgid != VCHIQ_MSGID_PADDING)
- vchiq_log_error(state->dev, VCHIQ_CORE, "%d: qms - msgid %x, not PADDING",
- state->id, oldmsgid);
+ dev_err(state->dev, "core: %d: qms - msgid %x, not PADDING\n",
+ state->id, oldmsgid);
}
- vchiq_log_debug(state->dev, VCHIQ_SYNC,
- "%d: qms %s@%pK,%x (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- header, size, VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid));
+ dev_dbg(state->dev, "sync: %d: qms %s@%pK,%x (%d->%d)\n",
+ state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
callback_result =
copy_message_data(copy_callback, context,
@@ -1205,11 +1196,11 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
svc_fourcc = service ? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
- vchiq_log_trace(state->dev, VCHIQ_SYNC,
- "Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d",
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
- &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid), size);
+ dev_dbg(state->dev,
+ "sync: Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d\n",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+ &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid), size);
remote_event_signal(&state->remote->sync_trigger);
@@ -1261,9 +1252,9 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
VCHIQ_SLOT_QUEUE_MASK] =
SLOT_INDEX_FROM_INFO(state, slot_info);
state->remote->slot_queue_recycle = slot_queue_recycle + 1;
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: %s %d - recycle->%x",
- state->id, __func__, SLOT_INDEX_FROM_INFO(state, slot_info),
- state->remote->slot_queue_recycle);
+ dev_dbg(state->dev, "core: %d: %d - recycle->%x\n",
+ state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
+ state->remote->slot_queue_recycle);
/*
* A write barrier is necessary, but remote_event_signal
@@ -1298,11 +1289,11 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
{
int status = 0;
- vchiq_log_trace(service->state->dev, VCHIQ_CORE,
- "%d: nb:%d %cx - p=%x rn=%x r=%x",
- service->state->id, service->localport,
- (queue == &service->bulk_tx) ? 't' : 'r',
- queue->process, queue->remote_notify, queue->remove);
+ dev_dbg(service->state->dev,
+ "core: %d: nb:%d %cx - p=%x rn=%x r=%x\n",
+ service->state->id, service->localport,
+ (queue == &service->bulk_tx) ? 't' : 'r',
+ queue->process, queue->remote_notify, queue->remove);
queue->remote_notify = queue->process;
@@ -1382,8 +1373,8 @@ poll_services_of_group(struct vchiq_state *state, int group)
service_flags = atomic_xchg(&service->poll_flags, 0);
if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: ps - remove %d<->%d",
- state->id, service->localport, service->remoteport);
+ dev_dbg(state->dev, "core: %d: ps - remove %d<->%d\n",
+ state->id, service->localport, service->remoteport);
/*
* Make it look like a client, because
@@ -1395,8 +1386,8 @@ poll_services_of_group(struct vchiq_state *state, int group)
if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
request_poll(state, service, VCHIQ_POLL_REMOVE);
} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: ps - terminate %d<->%d",
- state->id, service->localport, service->remoteport);
+ dev_dbg(state->dev, "core: %d: ps - terminate %d<->%d\n",
+ state->id, service->localport, service->remoteport);
if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
request_poll(state, service, VCHIQ_POLL_TERMINATE);
}
@@ -1425,11 +1416,11 @@ abort_outstanding_bulks(struct vchiq_service *service,
{
int is_tx = (queue == &service->bulk_tx);
- vchiq_log_trace(service->state->dev, VCHIQ_CORE,
- "%d: aob:%d %cx - li=%x ri=%x p=%x",
- service->state->id, service->localport,
- is_tx ? 't' : 'r', queue->local_insert,
- queue->remote_insert, queue->process);
+ dev_dbg(service->state->dev,
+ "core: %d: aob:%d %cx - li=%x ri=%x p=%x\n",
+ service->state->id, service->localport,
+ is_tx ? 't' : 'r', queue->local_insert,
+ queue->remote_insert, queue->process);
WARN_ON((int)(queue->local_insert - queue->process) < 0);
WARN_ON((int)(queue->remote_insert - queue->process) < 0);
@@ -1448,11 +1439,11 @@ abort_outstanding_bulks(struct vchiq_service *service,
if (queue->process != queue->local_insert) {
vchiq_complete_bulk(service->instance, bulk);
- vchiq_log_debug(service->state->dev, VCHIQ_CORE_MSG,
- "%s %p4cc d:%d ABORTED - tx len:%d, rx len:%d",
- is_tx ? "Send Bulk to" : "Recv Bulk from",
- &service->base.fourcc,
- service->remoteport, bulk->size, bulk->remote_size);
+ dev_dbg(service->state->dev,
+ "core_msg: %s %p4cc d:%d ABORTED - tx len:%d, rx len:%d\n",
+ is_tx ? "Send Bulk to" : "Recv Bulk from",
+ &service->base.fourcc,
+ service->remoteport, bulk->size, bulk->remote_size);
} else {
/* fabricate a matching dummy bulk */
bulk->data = 0;
@@ -1485,8 +1476,8 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
payload = (struct vchiq_open_payload *)header->data;
fourcc = payload->fourcc;
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs OPEN@%pK (%d->'%p4cc')",
- state->id, header, localport, &fourcc);
+ dev_dbg(state->dev, "core: %d: prs OPEN@%pK (%d->'%p4cc')\n",
+ state->id, header, localport, &fourcc);
service = get_listening_service(state, fourcc);
if (!service)
@@ -1609,17 +1600,17 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
vchiq_service_put(service);
service = get_connected_service(state, remoteport);
if (service)
- vchiq_log_warning(state->dev, VCHIQ_CORE,
- "%d: prs %s@%pK (%d->%d) - found connected service %d",
- state->id, msg_type_str(type), header,
- remoteport, localport, service->localport);
+ dev_warn(state->dev,
+ "core: %d: prs %s@%pK (%d->%d) - found connected service %d\n",
+ state->id, msg_type_str(type), header,
+ remoteport, localport, service->localport);
}
if (!service) {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
- state->id, msg_type_str(type), header, remoteport,
- localport, localport);
+ dev_err(state->dev,
+ "core: %d: prs %s@%pK (%d->%d) - invalid/closed service %d\n",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, localport);
goto skip_message;
}
break;
@@ -1631,18 +1622,15 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
svc_fourcc = service ? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
- vchiq_log_debug(state->dev, VCHIQ_CORE_MSG,
- "Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d",
- msg_type_str(type), type, &svc_fourcc,
- remoteport, localport, size);
+ dev_dbg(state->dev, "core_msg: Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d\n",
+ msg_type_str(type), type, &svc_fourcc, remoteport, localport, size);
if (size > 0)
vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
if (((unsigned long)header & VCHIQ_SLOT_MASK) +
calc_stride(size) > VCHIQ_SLOT_SIZE) {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "header %pK (msgid %x) - size %x too big for slot",
- header, (unsigned int)msgid, (unsigned int)size);
+ dev_err(state->dev, "core: header %pK (msgid %x) - size %x too big for slot\n",
+ header, (unsigned int)msgid, (unsigned int)size);
WARN(1, "oversized for slot\n");
}
@@ -1659,37 +1647,36 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
header->data;
service->peer_version = payload->version;
}
- vchiq_log_debug(state->dev, VCHIQ_CORE,
- "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
- state->id, header, size, remoteport, localport,
- service->peer_version);
+ dev_dbg(state->dev,
+ "core: %d: prs OPENACK@%pK,%x (%d->%d) v:%d\n",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
set_service_state(service, VCHIQ_SRVSTATE_OPEN);
complete(&service->remove_event);
} else {
- vchiq_log_error(state->dev, VCHIQ_CORE, "OPENACK received in state %s",
- srvstate_names[service->srvstate]);
+ dev_err(state->dev, "core: OPENACK received in state %s\n",
+ srvstate_names[service->srvstate]);
}
break;
case VCHIQ_MSG_CLOSE:
WARN_ON(size); /* There should be no data */
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs CLOSE@%pK (%d->%d)",
- state->id, header, remoteport, localport);
+ dev_dbg(state->dev, "core: %d: prs CLOSE@%pK (%d->%d)\n",
+ state->id, header, remoteport, localport);
mark_service_closing_internal(service, 1);
if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
goto bail_not_ready;
- vchiq_log_debug(state->dev, VCHIQ_CORE, "Close Service %p4cc s:%u d:%d",
- &service->base.fourcc,
- service->localport, service->remoteport);
+ dev_dbg(state->dev, "core: Close Service %p4cc s:%u d:%d\n",
+ &service->base.fourcc, service->localport, service->remoteport);
break;
case VCHIQ_MSG_DATA:
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs DATA@%pK,%x (%d->%d)",
- state->id, header, size, remoteport, localport);
+ dev_dbg(state->dev, "core: %d: prs DATA@%pK,%x (%d->%d)\n",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
@@ -1708,8 +1695,8 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_CONNECT:
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: prs CONNECT@%pK",
- state->id, header);
+ dev_dbg(state->dev, "core: %d: prs CONNECT@%pK\n",
+ state->id, header);
state->version_common = ((struct vchiq_slot_zero *)
state->slot_data)->version;
complete(&state->connect);
@@ -1740,11 +1727,10 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
if ((int)(queue->remote_insert -
queue->local_insert) >= 0) {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
- state->id, msg_type_str(type), header, remoteport,
- localport, queue->remote_insert,
- queue->local_insert);
+ dev_err(state->dev,
+ "core: %d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)\n",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, queue->remote_insert, queue->local_insert);
mutex_unlock(&service->bulk_mutex);
break;
}
@@ -1761,15 +1747,14 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
bulk->actual = *(int *)header->data;
queue->remote_insert++;
- vchiq_log_debug(state->dev, VCHIQ_CORE,
- "%d: prs %s@%pK (%d->%d) %x@%pad",
- state->id, msg_type_str(type), header, remoteport,
- localport, bulk->actual, &bulk->data);
+ dev_dbg(state->dev, "core: %d: prs %s@%pK (%d->%d) %x@%pad\n",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, bulk->actual, &bulk->data);
- vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs:%d %cx li=%x ri=%x p=%x",
- state->id, localport,
- (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
- queue->local_insert, queue->remote_insert, queue->process);
+ dev_dbg(state->dev, "core: %d: prs:%d %cx li=%x ri=%x p=%x\n",
+ state->id, localport,
+ (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
+ queue->local_insert, queue->remote_insert, queue->process);
DEBUG_TRACE(PARSE_LINE);
WARN_ON(queue->process == queue->local_insert);
@@ -1782,16 +1767,16 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_PADDING:
- vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs PADDING@%pK,%x",
- state->id, header, size);
+ dev_dbg(state->dev, "core: %d: prs PADDING@%pK,%x\n",
+ state->id, header, size);
break;
case VCHIQ_MSG_PAUSE:
/* If initiated, signal the application thread */
- vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs PAUSE@%pK,%x",
- state->id, header, size);
+ dev_dbg(state->dev, "core: %d: prs PAUSE@%pK,%x\n",
+ state->id, header, size);
if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "%d: PAUSE received in state PAUSED", state->id);
+ dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n",
+ state->id);
break;
}
if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
@@ -1804,8 +1789,8 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
break;
case VCHIQ_MSG_RESUME:
- vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: prs RESUME@%pK,%x",
- state->id, header, size);
+ dev_dbg(state->dev, "core: %d: prs RESUME@%pK,%x\n",
+ state->id, header, size);
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
@@ -1821,8 +1806,8 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
break;
default:
- vchiq_log_error(state->dev, VCHIQ_CORE, "%d: prs invalid msgid %x@%pK,%x",
- state->id, msgid, header, size);
+ dev_err(state->dev, "core: %d: prs invalid msgid %x@%pK,%x\n",
+ state->id, msgid, header, size);
WARN(1, "invalid message\n");
break;
}
@@ -1932,7 +1917,7 @@ handle_poll(struct vchiq_state *state)
* since the PAUSE should have flushed
* through outstanding messages.
*/
- vchiq_log_error(state->dev, VCHIQ_CORE, "Failed to send RESUME message");
+ dev_err(state->dev, "core: Failed to send RESUME message\n");
}
break;
default:
@@ -2032,21 +2017,18 @@ sync_func(void *v)
service = find_service_by_port(state, localport);
if (!service) {
- vchiq_log_error(state->dev, VCHIQ_SYNC,
- "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
- state->id, msg_type_str(type), header,
- remoteport, localport, localport);
+ dev_err(state->dev,
+ "sync: %d: sf %s@%pK (%d->%d) - invalid/closed service %d\n",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, localport);
release_message_sync(state, header);
continue;
}
- svc_fourcc = service ? service->base.fourcc
- : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
+ svc_fourcc = service->base.fourcc;
- vchiq_log_trace(state->dev, VCHIQ_SYNC,
- "Rcvd Msg %s from %p4cc s:%d d:%d len:%d",
- msg_type_str(type), &svc_fourcc,
- remoteport, localport, size);
+ dev_dbg(state->dev, "sync: Rcvd Msg %s from %p4cc s:%d d:%d len:%d\n",
+ msg_type_str(type), &svc_fourcc, remoteport, localport, size);
if (size > 0)
vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
@@ -2058,9 +2040,9 @@ sync_func(void *v)
header->data;
service->peer_version = payload->version;
}
- vchiq_log_debug(state->dev, VCHIQ_SYNC, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
- state->id, header, size, remoteport, localport,
- service->peer_version);
+ dev_err(state->dev, "sync: %d: sf OPENACK@%pK,%x (%d->%d) v:%d\n",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
@@ -2071,22 +2053,22 @@ sync_func(void *v)
break;
case VCHIQ_MSG_DATA:
- vchiq_log_trace(state->dev, VCHIQ_SYNC, "%d: sf DATA@%pK,%x (%d->%d)",
- state->id, header, size, remoteport, localport);
+ dev_dbg(state->dev, "sync: %d: sf DATA@%pK,%x (%d->%d)\n",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
NULL) == -EAGAIN)
- vchiq_log_error(state->dev, VCHIQ_SYNC,
- "synchronous callback to service %d returns -EAGAIN",
- localport);
+ dev_err(state->dev,
+ "sync: error: synchronous callback to service %d returns -EAGAIN\n",
+ localport);
}
break;
default:
- vchiq_log_error(state->dev, VCHIQ_SYNC, "%d: sf unexpected msgid %x@%pK,%x",
- state->id, msgid, header, size);
+ dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%pK,%x\n",
+ state->id, msgid, header, size);
release_message_sync(state, header);
break;
}
@@ -2119,8 +2101,8 @@ vchiq_init_slots(struct device *dev, void *mem_base, int mem_size)
num_slots -= first_data_slot;
if (num_slots < 4) {
- vchiq_log_error(dev, VCHIQ_CORE, "%s - insufficient memory %x bytes",
- __func__, mem_size);
+ dev_err(dev, "core: %s: Insufficient memory %x bytes\n",
+ __func__, mem_size);
return NULL;
}
@@ -2462,9 +2444,9 @@ vchiq_add_service_internal(struct vchiq_state *state,
/* Bring this service online */
set_service_state(service, srvstate);
- vchiq_log_debug(state->dev, VCHIQ_CORE_MSG, "%s Service %p4cc SrcPort:%d",
- (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
- &params->fourcc, service->localport);
+ dev_dbg(state->dev, "core_msg: %s Service %p4cc SrcPort:%d\n",
+ (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
+ &params->fourcc, service->localport);
/* Don't unlock the service - leave it with a ref_count of 1. */
@@ -2501,11 +2483,10 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
(service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
- vchiq_log_error(service->state->dev, VCHIQ_CORE,
- "%d: osi - srvstate = %s (ref %u)",
- service->state->id,
- srvstate_names[service->srvstate],
- kref_read(&service->ref_count));
+ dev_err(service->state->dev,
+ "core: %d: osi - srvstate = %s (ref %u)\n",
+ service->state->id, srvstate_names[service->srvstate],
+ kref_read(&service->ref_count));
status = -EINVAL;
VCHIQ_SERVICE_STATS_INC(service, error_count);
vchiq_release_service_internal(service);
@@ -2560,15 +2541,14 @@ release_service_messages(struct vchiq_service *service)
int port = VCHIQ_MSG_DSTPORT(msgid);
if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
- vchiq_log_debug(state->dev, VCHIQ_CORE,
- " fsi - hdr %pK", header);
+ dev_dbg(state->dev, "core: fsi - hdr %pK\n", header);
release_slot(state, slot_info, header, NULL);
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
- vchiq_log_error(state->dev, VCHIQ_CORE,
- "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
- pos, header, msgid, header->msgid, header->size);
+ dev_err(state->dev,
+ "core: fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
+ pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
}
@@ -2622,8 +2602,8 @@ close_service_complete(struct vchiq_service *service, int failstate)
case VCHIQ_SRVSTATE_LISTENING:
break;
default:
- vchiq_log_error(service->state->dev, VCHIQ_CORE, "%s(%x) called in state %s",
- __func__, service->handle, srvstate_names[service->srvstate]);
+ dev_err(service->state->dev, "core: (%x) called in state %s\n",
+ service->handle, srvstate_names[service->srvstate]);
WARN(1, "%s in unexpected state\n", __func__);
return -EINVAL;
}
@@ -2669,8 +2649,9 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
int close_id = MAKE_CLOSE(service->localport,
VCHIQ_MSG_DSTPORT(service->remoteport));
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: csi:%d,%d (%s)", service->state->id,
- service->localport, close_recvd, srvstate_names[service->srvstate]);
+ dev_dbg(state->dev, "core: %d: csi:%d,%d (%s)\n",
+ service->state->id, service->localport, close_recvd,
+ srvstate_names[service->srvstate]);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_CLOSED:
@@ -2678,8 +2659,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
case VCHIQ_SRVSTATE_LISTENING:
case VCHIQ_SRVSTATE_CLOSEWAIT:
if (close_recvd) {
- vchiq_log_error(state->dev, VCHIQ_CORE, "%s(1) called in state %s",
- __func__, srvstate_names[service->srvstate]);
+ dev_err(state->dev, "core: (1) called in state %s\n",
+ srvstate_names[service->srvstate]);
} else if (is_server) {
if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
status = -EINVAL;
@@ -2766,8 +2747,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
break;
default:
- vchiq_log_error(state->dev, VCHIQ_CORE, "%s(%d) called in state %s", __func__,
- close_recvd, srvstate_names[service->srvstate]);
+ dev_err(state->dev, "core: (%d) called in state %s\n",
+ close_recvd, srvstate_names[service->srvstate]);
break;
}
@@ -2780,8 +2761,8 @@ vchiq_terminate_service_internal(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: tsi - (%d<->%d)", state->id,
- service->localport, service->remoteport);
+ dev_dbg(state->dev, "core: %d: tsi - (%d<->%d)\n",
+ state->id, service->localport, service->remoteport);
mark_service_closing(service);
@@ -2795,8 +2776,7 @@ vchiq_free_service_internal(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: fsi - (%d)",
- state->id, service->localport);
+ dev_dbg(state->dev, "core: %d: fsi - (%d)\n", state->id, service->localport);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPENING:
@@ -2806,8 +2786,8 @@ vchiq_free_service_internal(struct vchiq_service *service)
case VCHIQ_SRVSTATE_CLOSEWAIT:
break;
default:
- vchiq_log_error(state->dev, VCHIQ_CORE, "%d: fsi - (%d) in state %s", state->id,
- service->localport, srvstate_names[service->srvstate]);
+ dev_err(state->dev, "core: %d: fsi - (%d) in state %s\n",
+ state->id, service->localport, srvstate_names[service->srvstate]);
return;
}
@@ -2876,8 +2856,8 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
if (!service)
return -EINVAL;
- vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: close_service:%d",
- service->state->id, service->localport);
+ dev_dbg(service->state->dev, "core: %d: close_service:%d\n",
+ service->state->id, service->localport);
if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
@@ -2907,10 +2887,10 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
(service->srvstate == VCHIQ_SRVSTATE_OPEN))
break;
- vchiq_log_warning(service->state->dev, VCHIQ_CORE,
- "%d: close_service:%d - waiting in state %s",
- service->state->id, service->localport,
- srvstate_names[service->srvstate]);
+ dev_warn(service->state->dev,
+ "core: %d: close_service:%d - waiting in state %s\n",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
}
if (!status &&
@@ -2934,8 +2914,8 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
if (!service)
return -EINVAL;
- vchiq_log_debug(service->state->dev, VCHIQ_CORE, "%d: remove_service:%d",
- service->state->id, service->localport);
+ dev_dbg(service->state->dev, "core: %d: remove_service:%d\n",
+ service->state->id, service->localport);
if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
vchiq_service_put(service);
@@ -2968,10 +2948,10 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
(service->srvstate == VCHIQ_SRVSTATE_OPEN))
break;
- vchiq_log_warning(service->state->dev, VCHIQ_CORE,
- "%d: remove_service:%d - waiting in state %s",
- service->state->id, service->localport,
- srvstate_names[service->srvstate]);
+ dev_warn(service->state->dev,
+ "core: %d: remove_service:%d - waiting in state %s\n",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
}
if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
@@ -3078,9 +3058,9 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
*/
wmb();
- vchiq_log_debug(state->dev, VCHIQ_CORE, "%d: bt (%d->%d) %cx %x@%pad %pK",
- state->id, service->localport, service->remoteport,
- dir_char, size, &bulk->data, userdata);
+ dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
+ state->id, service->localport, service->remoteport,
+ dir_char, size, &bulk->data, userdata);
/*
* The slot mutex must be held when the service is being closed, so
@@ -3115,9 +3095,9 @@ int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
mutex_unlock(&state->slot_mutex);
mutex_unlock(&service->bulk_mutex);
- vchiq_log_trace(state->dev, VCHIQ_CORE, "%d: bt:%d %cx li=%x ri=%x p=%x",
- state->id, service->localport, dir_char, queue->local_insert,
- queue->remote_insert, queue->process);
+ dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
+ state->id, service->localport, dir_char, queue->local_insert,
+ queue->remote_insert, queue->process);
waiting:
vchiq_service_put(service);
@@ -3372,8 +3352,8 @@ vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
return ret;
}
-static int
-vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
+static void
+vchiq_dump_shared_state(struct seq_file *f, struct vchiq_state *state,
struct vchiq_shared_state *shared, const char *label)
{
static const char *const debug_names[] = {
@@ -3390,146 +3370,44 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
"COMPLETION_QUEUE_FULL_COUNT"
};
int i;
- char buf[80];
- int len;
- int err;
-
- len = scnprintf(buf, sizeof(buf), " %s: slots %d-%d tx_pos=%x recycle=%x",
- label, shared->slot_first, shared->slot_last,
- shared->tx_pos, shared->slot_queue_recycle);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
-
- len = scnprintf(buf, sizeof(buf), " Slots claimed:");
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
+
+ seq_printf(f, " %s: slots %d-%d tx_pos=%x recycle=%x\n",
+ label, shared->slot_first, shared->slot_last,
+ shared->tx_pos, shared->slot_queue_recycle);
+
+ seq_puts(f, " Slots claimed:\n");
for (i = shared->slot_first; i <= shared->slot_last; i++) {
struct vchiq_slot_info slot_info =
*SLOT_INFO_FROM_INDEX(state, i);
if (slot_info.use_count != slot_info.release_count) {
- len = scnprintf(buf, sizeof(buf), " %d: %d/%d", i, slot_info.use_count,
- slot_info.release_count);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
+ seq_printf(f, " %d: %d/%d\n", i, slot_info.use_count,
+ slot_info.release_count);
}
}
for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
- len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
- debug_names[i], shared->debug[i], shared->debug[i]);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
- }
- return 0;
-}
-
-int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
-{
- char buf[80];
- int len;
- int i;
- int err;
-
- len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
- conn_state_names[state->conn_state]);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
-
- len = scnprintf(buf, sizeof(buf), " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
- state->local->tx_pos,
- state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
- state->rx_pos,
- state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
-
- len = scnprintf(buf, sizeof(buf), " Version: %d (min %d)",
- VCHIQ_VERSION, VCHIQ_VERSION_MIN);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
-
- if (VCHIQ_ENABLE_STATS) {
- len = scnprintf(buf, sizeof(buf),
- " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
- state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
- state->stats.error_count);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
- }
-
- len = scnprintf(buf, sizeof(buf),
- " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
- ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
- state->local_tx_pos) / VCHIQ_SLOT_SIZE,
- state->data_quota - state->data_use_count,
- state->local->slot_queue_recycle - state->slot_queue_available,
- state->stats.slot_stalls, state->stats.data_stalls);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
-
- err = vchiq_dump_platform_state(dump_context);
- if (err)
- return err;
-
- err = vchiq_dump_shared_state(dump_context,
- state,
- state->local,
- "Local");
- if (err)
- return err;
- err = vchiq_dump_shared_state(dump_context,
- state,
- state->remote,
- "Remote");
- if (err)
- return err;
-
- err = vchiq_dump_platform_instances(dump_context);
- if (err)
- return err;
-
- for (i = 0; i < state->unused_service; i++) {
- struct vchiq_service *service = find_service_by_port(state, i);
-
- if (service) {
- err = vchiq_dump_service_state(dump_context, service);
- vchiq_service_put(service);
- if (err)
- return err;
- }
+ seq_printf(f, " DEBUG: %s = %d(%x)\n",
+ debug_names[i], shared->debug[i], shared->debug[i]);
}
- return 0;
}
-int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
+static void
+vchiq_dump_service_state(struct seq_file *f, struct vchiq_service *service)
{
- char buf[80];
- int len;
- int err;
unsigned int ref_count;
/*Don't include the lock just taken*/
ref_count = kref_read(&service->ref_count) - 1;
- len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
- service->localport, srvstate_names[service->srvstate],
- ref_count);
+ seq_printf(f, "Service %u: %s (ref %u)", service->localport,
+ srvstate_names[service->srvstate], ref_count);
if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
char remoteport[30];
struct vchiq_service_quota *quota =
&service->state->service_quotas[service->localport];
int fourcc = service->base.fourcc;
- int tx_pending, rx_pending;
+ int tx_pending, rx_pending, tx_size = 0, rx_size = 0;
if (service->remoteport != VCHIQ_PORT_FREE) {
int len2 = scnprintf(remoteport, sizeof(remoteport),
@@ -3542,68 +3420,100 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
strscpy(remoteport, "n/a", sizeof(remoteport));
}
- len += scnprintf(buf + len, sizeof(buf) - len,
- " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)",
- &fourcc, remoteport,
- quota->message_use_count, quota->message_quota,
- quota->slot_use_count, quota->slot_quota);
-
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
+ seq_printf(f, " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)\n",
+ &fourcc, remoteport,
+ quota->message_use_count, quota->message_quota,
+ quota->slot_use_count, quota->slot_quota);
tx_pending = service->bulk_tx.local_insert -
service->bulk_tx.remote_insert;
+ if (tx_pending) {
+ unsigned int i = BULK_INDEX(service->bulk_tx.remove);
+
+ tx_size = service->bulk_tx.bulks[i].size;
+ }
rx_pending = service->bulk_rx.local_insert -
service->bulk_rx.remote_insert;
+ if (rx_pending) {
+ unsigned int i = BULK_INDEX(service->bulk_rx.remove);
- len = scnprintf(buf, sizeof(buf),
- " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
- tx_pending,
- tx_pending ?
- service->bulk_tx.bulks[BULK_INDEX(service->bulk_tx.remove)].size :
- 0, rx_pending, rx_pending ?
- service->bulk_rx.bulks[BULK_INDEX(service->bulk_rx.remove)].size :
- 0);
+ rx_size = service->bulk_rx.bulks[i].size;
+ }
+
+ seq_printf(f, " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)\n",
+ tx_pending, tx_size, rx_pending, rx_size);
if (VCHIQ_ENABLE_STATS) {
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
+ seq_printf(f, " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
+ service->stats.ctrl_tx_count,
+ service->stats.ctrl_tx_bytes,
+ service->stats.ctrl_rx_count,
+ service->stats.ctrl_rx_bytes);
+
+ seq_printf(f, " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
+ service->stats.bulk_tx_count,
+ service->stats.bulk_tx_bytes,
+ service->stats.bulk_rx_count,
+ service->stats.bulk_rx_bytes);
+
+ seq_printf(f, " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors\n",
+ service->stats.quota_stalls,
+ service->stats.slot_stalls,
+ service->stats.bulk_stalls,
+ service->stats.bulk_aborted_count,
+ service->stats.error_count);
+ }
+ }
- len = scnprintf(buf, sizeof(buf),
- " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
- service->stats.ctrl_tx_count, service->stats.ctrl_tx_bytes,
- service->stats.ctrl_rx_count, service->stats.ctrl_rx_bytes);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
+ vchiq_dump_platform_service_state(f, service);
+}
- len = scnprintf(buf, sizeof(buf),
- " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
- service->stats.bulk_tx_count, service->stats.bulk_tx_bytes,
- service->stats.bulk_rx_count, service->stats.bulk_rx_bytes);
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
+void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state)
+{
+ int i;
- len = scnprintf(buf, sizeof(buf),
- " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
- service->stats.quota_stalls, service->stats.slot_stalls,
- service->stats.bulk_stalls,
- service->stats.bulk_aborted_count,
- service->stats.error_count);
- }
+ seq_printf(f, "State %d: %s\n", state->id,
+ conn_state_names[state->conn_state]);
+
+ seq_printf(f, " tx_pos=%x(@%pK), rx_pos=%x(@%pK)\n",
+ state->local->tx_pos,
+ state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
+ state->rx_pos,
+ state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
+
+ seq_printf(f, " Version: %d (min %d)\n", VCHIQ_VERSION,
+ VCHIQ_VERSION_MIN);
+
+ if (VCHIQ_ENABLE_STATS) {
+ seq_printf(f, " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d\n",
+ state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
+ state->stats.error_count);
}
- err = vchiq_dump(dump_context, buf, len + 1);
- if (err)
- return err;
+ seq_printf(f, " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)\n",
+ ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
+ state->local_tx_pos) / VCHIQ_SLOT_SIZE,
+ state->data_quota - state->data_use_count,
+ state->local->slot_queue_recycle - state->slot_queue_available,
+ state->stats.slot_stalls, state->stats.data_stalls);
+
+ vchiq_dump_platform_state(f);
+
+ vchiq_dump_shared_state(f, state, state->local, "Local");
+
+ vchiq_dump_shared_state(f, state, state->remote, "Remote");
+
+ vchiq_dump_platform_instances(f);
- if (service->srvstate != VCHIQ_SRVSTATE_FREE)
- err = vchiq_dump_platform_service_state(dump_context, service);
- return err;
+ for (i = 0; i < state->unused_service; i++) {
+ struct vchiq_service *service = find_service_by_port(state, i);
+
+ if (service) {
+ vchiq_dump_service_state(f, service);
+ vchiq_service_put(service);
+ }
+ }
}
int vchiq_send_remote_use(struct vchiq_state *state)
@@ -3653,9 +3563,9 @@ void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
*s++ = '\0';
if (label && (*label != '\0'))
- vchiq_log_trace(dev, VCHIQ_CORE, "%s: %08x: %s", label, addr, line_buf);
+ dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
else
- vchiq_log_trace(dev, VCHIQ_CORE, "%s: %08x: %s", label, addr, line_buf);
+ dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
addr += 16;
mem += 16;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 161358db4..c8527551b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -6,6 +6,7 @@
#include <linux/mutex.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/dev_printk.h>
#include <linux/kthread.h>
#include <linux/kref.h>
@@ -31,44 +32,6 @@
#define VCHIQ_SLOT_SIZE 4096
#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
-enum vchiq_log_category {
- VCHIQ_ARM,
- VCHIQ_CORE,
- VCHIQ_CORE_MSG,
- VCHIQ_SYNC,
- VCHIQ_SUSPEND,
-};
-
-static inline const char *log_category_str(enum vchiq_log_category c)
-{
- static const char * const strings[] = {
- "vchiq_arm",
- "vchiq_core",
- "vchiq_core_msg",
- "vchiq_sync",
- "vchiq_suspend",
- };
-
- return strings[c];
-};
-
-#ifndef vchiq_log_error
-#define vchiq_log_error(dev, cat, fmt, ...) \
- do { dev_dbg(dev, "%s error: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
-#endif
-#ifndef vchiq_log_warning
-#define vchiq_log_warning(dev, cat, fmt, ...) \
- do { dev_dbg(dev, "%s warning: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
-#endif
-#ifndef vchiq_log_debug
-#define vchiq_log_debug(dev, cat, fmt, ...) \
- do { dev_dbg(dev, "%s debug: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
-#endif
-#ifndef vchiq_log_trace
-#define vchiq_log_trace(dev, cat, fmt, ...) \
- do { dev_dbg(dev, "%s trace: " fmt, log_category_str(cat), ##__VA_ARGS__); } while (0)
-#endif
-
#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
@@ -504,11 +467,8 @@ vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *
void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode,
enum vchiq_bulk_dir dir);
-extern int
-vchiq_dump_state(void *dump_context, struct vchiq_state *state);
-
-extern int
-vchiq_dump_service_state(void *dump_context, struct vchiq_service *service);
+extern void
+vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
extern void
vchiq_loud_error_header(void);
@@ -564,13 +524,11 @@ void vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bul
void remote_event_signal(struct remote_event *event);
-int vchiq_dump(void *dump_context, const char *str, int len);
-
-int vchiq_dump_platform_state(void *dump_context);
+void vchiq_dump_platform_state(struct seq_file *f);
-int vchiq_dump_platform_instances(void *dump_context);
+void vchiq_dump_platform_instances(struct seq_file *f);
-int vchiq_dump_platform_service_state(void *dump_context, struct vchiq_service *service);
+void vchiq_dump_platform_service_state(struct seq_file *f, struct vchiq_service *service);
int vchiq_use_service_internal(struct vchiq_service *service);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 58db78a9c..d833e4e29 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -40,6 +40,13 @@ static int debugfs_trace_show(struct seq_file *f, void *offset)
return 0;
}
+static int vchiq_dump_show(struct seq_file *f, void *offset)
+{
+ vchiq_dump_state(f, &g_state);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(vchiq_dump);
+
static int debugfs_trace_open(struct inode *inode, struct file *file)
{
return single_open(file, debugfs_trace_show, inode->i_private);
@@ -115,6 +122,9 @@ void vchiq_debugfs_init(void)
{
vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL);
vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir);
+
+ debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, NULL,
+ &vchiq_dump_fops);
}
/* remove all the debugfs entries */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 0bc93f48c..4d9deeeb6 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -47,9 +47,8 @@ user_service_free(void *userdata)
static void close_delivered(struct user_service *user_service)
{
- vchiq_log_debug(user_service->service->state->dev, VCHIQ_ARM,
- "%s(handle=%x)",
- __func__, user_service->service->handle);
+ dev_dbg(user_service->service->state->dev,
+ "arm: (handle=%x)\n", user_service->service->handle);
if (user_service->close_pending) {
/* Allow the underlying service to be culled */
@@ -235,8 +234,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
if (wait_for_completion_interruptible(&user_service->insert_event)) {
- vchiq_log_debug(service->state->dev, VCHIQ_ARM,
- "DEQUEUE_MESSAGE interrupted");
+ dev_dbg(service->state->dev, "arm: DEQUEUE_MESSAGE interrupted\n");
ret = -EINTR;
break;
}
@@ -271,9 +269,9 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
ret = -EFAULT;
}
} else {
- vchiq_log_error(service->state->dev, VCHIQ_ARM,
- "header %pK: bufsize %x < size %x",
- header, args->bufsize, header->size);
+ dev_err(service->state->dev,
+ "arm: header %pK: bufsize %x < size %x\n",
+ header, args->bufsize, header->size);
WARN(1, "invalid size\n");
ret = -EMSGSIZE;
}
@@ -318,13 +316,13 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
if (!waiter) {
- vchiq_log_error(service->state->dev, VCHIQ_ARM,
- "no bulk_waiter found for pid %d", current->pid);
+ dev_err(service->state->dev,
+ "arm: no bulk_waiter found for pid %d\n", current->pid);
ret = -ESRCH;
goto out;
}
- vchiq_log_debug(service->state->dev, VCHIQ_ARM,
- "found bulk_waiter %pK for pid %d", waiter, current->pid);
+ dev_dbg(service->state->dev, "arm: found bulk_waiter %pK for pid %d\n",
+ waiter, current->pid);
userdata = &waiter->bulk_waiter;
} else {
userdata = args->userdata;
@@ -355,8 +353,8 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- vchiq_log_debug(service->state->dev, VCHIQ_ARM,
- "saved bulk_waiter %pK for pid %d", waiter, current->pid);
+ dev_dbg(service->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
+ waiter, current->pid);
ret = put_user(mode_waiting, mode);
}
@@ -455,8 +453,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
mutex_lock(&instance->completion_mutex);
if (rc) {
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- "AWAIT_COMPLETION interrupted");
+ dev_dbg(instance->state->dev, "arm: AWAIT_COMPLETION interrupted\n");
ret = -EINTR;
goto out;
}
@@ -501,10 +498,10 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
msglen = header->size + sizeof(struct vchiq_header);
/* This must be a VCHIQ-style service */
if (args->msgbufsize < msglen) {
- vchiq_log_error(service->state->dev, VCHIQ_ARM,
- "header %pK: msgbufsize %x < msglen %x",
- header, args->msgbufsize, msglen);
- WARN(1, "invalid message size\n");
+ dev_err(service->state->dev,
+ "arm: header %pK: msgbufsize %x < msglen %x\n",
+ header, args->msgbufsize, msglen);
+ WARN(1, "invalid message size\n");
if (ret == 0)
ret = -EMSGSIZE;
break;
@@ -582,10 +579,9 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long ret = 0;
int i, rc;
- vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
- "%s - instance %pK, cmd %s, arg %lx", __func__, instance,
- ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
- ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
+ dev_dbg(instance->state->dev, "arm: instance %pK, cmd %s, arg %lx\n", instance,
+ ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
switch (cmd) {
case VCHIQ_IOC_SHUTDOWN:
@@ -618,9 +614,9 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
rc = mutex_lock_killable(&instance->state->mutex);
if (rc) {
- vchiq_log_error(instance->state->dev, VCHIQ_ARM,
- "vchiq: connect: could not lock mutex for state %d: %d",
- instance->state->id, rc);
+ dev_err(instance->state->dev,
+ "arm: vchiq: connect: could not lock mutex for state %d: %d\n",
+ instance->state->id, rc);
ret = -EINTR;
break;
}
@@ -630,8 +626,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (!status)
instance->connected = 1;
else
- vchiq_log_error(instance->state->dev, VCHIQ_ARM,
- "vchiq: could not connect: %d", status);
+ dev_err(instance->state->dev,
+ "arm: vchiq: could not connect: %d\n", status);
break;
case VCHIQ_IOC_CREATE_SERVICE: {
@@ -700,13 +696,13 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
vchiq_use_service_internal(service) :
vchiq_release_service_internal(service);
if (ret) {
- vchiq_log_error(instance->state->dev, VCHIQ_SUSPEND,
- "%s: cmd %s returned error %ld for service %p4cc:%03d",
- __func__, (cmd == VCHIQ_IOC_USE_SERVICE) ?
- "VCHIQ_IOC_USE_SERVICE" :
- "VCHIQ_IOC_RELEASE_SERVICE",
- ret, &service->base.fourcc,
- service->client_id);
+ dev_err(instance->state->dev,
+ "suspend: cmd %s returned error %ld for service %p4cc:%03d\n",
+ (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ "VCHIQ_IOC_USE_SERVICE" :
+ "VCHIQ_IOC_RELEASE_SERVICE",
+ ret, &service->base.fourcc,
+ service->client_id);
}
} else {
ret = -EINVAL;
@@ -868,15 +864,15 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) {
- vchiq_log_debug(instance->state->dev, VCHIQ_ARM,
- " ioctl instance %pK, cmd %s -> status %d, %ld",
- instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
- ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
+ dev_dbg(instance->state->dev,
+ "arm: ioctl instance %pK, cmd %s -> status %d, %ld\n",
+ instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
} else {
- vchiq_log_trace(instance->state->dev, VCHIQ_ARM,
- " ioctl instance %pK, cmd %s -> status %d, %ld",
- instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
- ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
+ dev_dbg(instance->state->dev,
+ "arm: ioctl instance %pK, cmd %s -> status %d\n, %ld\n",
+ instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
}
return ret;
@@ -1170,11 +1166,10 @@ static int vchiq_open(struct inode *inode, struct file *file)
struct vchiq_state *state = vchiq_get_state();
struct vchiq_instance *instance;
- vchiq_log_debug(state->dev, VCHIQ_ARM, "vchiq_open");
+ dev_dbg(state->dev, "arm: vchiq open\n");
if (!state) {
- vchiq_log_error(state->dev, VCHIQ_ARM,
- "vchiq has no connection to VideoCore");
+ dev_err(state->dev, "arm: vchiq has no connection to VideoCore\n");
return -ENOTCONN;
}
@@ -1206,8 +1201,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
int ret = 0;
int i;
- vchiq_log_debug(state->dev, VCHIQ_ARM, "%s: instance=%lx", __func__,
- (unsigned long)instance);
+ dev_dbg(state->dev, "arm: instance=%p\n", instance);
if (!state) {
ret = -EPERM;
@@ -1306,26 +1300,6 @@ out:
return ret;
}
-static ssize_t
-vchiq_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
-{
- struct dump_context context;
- int err;
-
- context.buf = buf;
- context.actual = 0;
- context.space = count;
- context.offset = *ppos;
-
- err = vchiq_dump_state(&context, &g_state);
- if (err)
- return err;
-
- *ppos += context.actual;
-
- return context.actual;
-}
-
static const struct file_operations
vchiq_fops = {
.owner = THIS_MODULE,
@@ -1335,7 +1309,6 @@ vchiq_fops = {
#endif
.open = vchiq_open,
.release = vchiq_release,
- .read = vchiq_read
};
static struct miscdevice vchiq_miscdev = {
diff --git a/drivers/staging/vme_user/Kconfig b/drivers/staging/vme_user/Kconfig
index d65cc5510..8e5df6ce3 100644
--- a/drivers/staging/vme_user/Kconfig
+++ b/drivers/staging/vme_user/Kconfig
@@ -3,18 +3,32 @@ menuconfig VME_BUS
bool "VME bridge support"
depends on PCI
help
- If you say Y here you get support for the VME bridge Framework.
+ Enable support for VME (VersaModular Eurocard bus) bridge modules.
+ The bridge allows connecting VME devices to systems with existing
+ interfaces (like USB or PCI) by means of translating VME protocol
+ operations.
+
+ Note that this only enables the bridge framework. You'll also
+ likely want to enable driver for specific bridge device you have
+ to actually use it. If unsure, say N.
if VME_BUS
comment "VME Bridge Drivers"
config VME_TSI148
- tristate "Tempe"
+ tristate "Tundra TSI148 VME bridge support"
depends on HAS_DMA
help
- If you say Y here you get support for the Tundra TSI148 VME bridge
- chip.
+ If you say Y here you get support for the Tundra TSI148 VME-to-PCI/X
+ bridge chip (and pin-compatible clones).
+
+ TSI148 is a high-performant, 2eSST and VME64-compliant VME-to-PCI/X
+ interconnect bridge with support for PCI and PCI-X bus interface.
+ It is primarily used in industrial and embedded systems.
+
+ To compile this driver as a module, say M - the module will be
+ called vme_tsi148. If unsure, say N.
config VME_FAKE
tristate "Fake"
diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c
index 9bc2d3540..e9461a7a7 100644
--- a/drivers/staging/vme_user/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -340,7 +340,7 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
image = list_entry(resource->entry, struct vme_slave_resource, list);
if (!bridge->slave_set) {
- dev_err(bridge->parent, "Function not supported\n");
+ dev_err(bridge->parent, "%s not supported\n", __func__);
return -EINVAL;
}
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 350ab8f37..36183f2a6 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -10,12 +10,12 @@
* CARDvUpdateBasicTopRate - Update BasicTopRate
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
- * CARDqGetTSFOffset - Calculate TSFOffset
+ * card_get_tsf_offset - Calculate TSFOffset
* vt6655_get_current_tsf - Read Current NIC TSF counter
- * CARDqGetNextTBTT - Calculate Next Beacon TSF counter
+ * card_get_next_tbtt - Calculate Next Beacon TSF counter
* CARDvSetFirstNextTBTT - Set NIC Beacon time
* CARDvUpdateNextTBTT - Sync. NIC Beacon time
- * CARDbRadioPowerOff - Turn Off NIC Radio Power
+ * card_radio_power_off - Turn Off NIC Radio Power
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -280,7 +280,7 @@ bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type)
* In:
* priv - The adapter to be sync.
* rx_rate - data rate of receive beacon
- * qwBSSTimestamp - Rx BCN's TSF
+ * bss_timestamp - Rx BCN's TSF
* qwLocalTSF - Local TSF
* Out:
* none
@@ -288,20 +288,20 @@ bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type)
* Return Value: none
*/
bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
- u64 qwBSSTimestamp)
+ u64 bss_timestamp)
{
u64 local_tsf;
- u64 qwTSFOffset = 0;
+ u64 tsf_offset = 0;
local_tsf = vt6655_get_current_tsf(priv);
- if (qwBSSTimestamp != local_tsf) {
- qwTSFOffset = CARDqGetTSFOffset(rx_rate, qwBSSTimestamp,
+ if (bss_timestamp != local_tsf) {
+ tsf_offset = card_get_tsf_offset(rx_rate, bss_timestamp,
local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
- qwTSFOffset = le64_to_cpu(qwTSFOffset);
- iowrite32((u32)qwTSFOffset, priv->port_offset + MAC_REG_TSFOFST);
- iowrite32((u32)(qwTSFOffset >> 32), priv->port_offset + MAC_REG_TSFOFST + 4);
+ tsf_offset = le64_to_cpu(tsf_offset);
+ iowrite32((u32)tsf_offset, priv->port_offset + MAC_REG_TSFOFST);
+ iowrite32((u32)(tsf_offset >> 32), priv->port_offset + MAC_REG_TSFOFST + 4);
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
}
return true;
@@ -314,28 +314,28 @@ bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
* Parameters:
* In:
* priv - The adapter to be set.
- * wBeaconInterval - Beacon Interval
+ * beacon_interval - Beacon Interval
* Out:
* none
*
* Return Value: true if succeed; otherwise false
*/
-bool CARDbSetBeaconPeriod(struct vnt_private *priv,
- unsigned short wBeaconInterval)
+bool card_set_beacon_period(struct vnt_private *priv,
+ unsigned short beacon_interval)
{
- u64 qwNextTBTT;
+ u64 next_tbtt;
- qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
+ next_tbtt = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
- qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
+ next_tbtt = card_get_next_tbtt(next_tbtt, beacon_interval);
/* set HW beacon interval */
- iowrite16(wBeaconInterval, priv->port_offset + MAC_REG_BI);
- priv->wBeaconInterval = wBeaconInterval;
+ iowrite16(beacon_interval, priv->port_offset + MAC_REG_BI);
+ priv->beacon_interval = beacon_interval;
/* Set NextTBTT */
- qwNextTBTT = le64_to_cpu(qwNextTBTT);
- iowrite32((u32)qwNextTBTT, priv->port_offset + MAC_REG_NEXTTBTT);
- iowrite32((u32)(qwNextTBTT >> 32), priv->port_offset + MAC_REG_NEXTTBTT + 4);
+ next_tbtt = le64_to_cpu(next_tbtt);
+ iowrite32((u32)next_tbtt, priv->port_offset + MAC_REG_NEXTTBTT);
+ iowrite32((u32)(next_tbtt >> 32), priv->port_offset + MAC_REG_NEXTTBTT + 4);
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
return true;
@@ -351,7 +351,7 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
* none
*
*/
-void CARDbRadioPowerOff(struct vnt_private *priv)
+void card_radio_power_off(struct vnt_private *priv)
{
if (priv->radio_off)
return;
@@ -382,29 +382,29 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
}
-void CARDvSafeResetTx(struct vnt_private *priv)
+void card_safe_reset_tx(struct vnt_private *priv)
{
unsigned int uu;
- struct vnt_tx_desc *pCurrTD;
+ struct vnt_tx_desc *curr_td;
/* initialize TD index */
- priv->apTailTD[0] = &priv->apTD0Rings[0];
+ priv->tail_td[0] = &priv->apTD0Rings[0];
priv->apCurrTD[0] = &priv->apTD0Rings[0];
- priv->apTailTD[1] = &priv->apTD1Rings[0];
+ priv->tail_td[1] = &priv->apTD1Rings[0];
priv->apCurrTD[1] = &priv->apTD1Rings[0];
for (uu = 0; uu < TYPE_MAXTD; uu++)
priv->iTDUsed[uu] = 0;
for (uu = 0; uu < priv->opts.tx_descs[0]; uu++) {
- pCurrTD = &priv->apTD0Rings[uu];
- pCurrTD->td0.owner = OWNED_BY_HOST;
+ curr_td = &priv->apTD0Rings[uu];
+ curr_td->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
for (uu = 0; uu < priv->opts.tx_descs[1]; uu++) {
- pCurrTD = &priv->apTD1Rings[uu];
- pCurrTD->td0.owner = OWNED_BY_HOST;
+ curr_td = &priv->apTD1Rings[uu];
+ curr_td->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
@@ -708,7 +708,7 @@ unsigned char card_get_pkt_type(struct vnt_private *priv)
*
* Return Value: TSF Offset value
*/
-u64 CARDqGetTSFOffset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2)
+u64 card_get_tsf_offset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2)
{
unsigned short wRxBcnTSFOffst;
@@ -764,11 +764,11 @@ u64 vt6655_get_current_tsf(struct vnt_private *priv)
*
* Return Value: TSF value of next Beacon
*/
-u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
+u64 card_get_next_tbtt(u64 qwTSF, unsigned short beacon_interval)
{
u32 beacon_int;
- beacon_int = wBeaconInterval * 1024;
+ beacon_int = beacon_interval * 1024;
if (beacon_int) {
do_div(qwTSF, beacon_int);
qwTSF += 1;
@@ -785,25 +785,25 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
* Parameters:
* In:
* iobase - IO Base
- * wBeaconInterval - Beacon Interval
+ * beacon_interval - Beacon Interval
* Out:
* none
*
* Return Value: none
*/
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
- unsigned short wBeaconInterval)
+ unsigned short beacon_interval)
{
void __iomem *iobase = priv->port_offset;
- u64 qwNextTBTT;
+ u64 next_tbtt;
- qwNextTBTT = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
+ next_tbtt = vt6655_get_current_tsf(priv); /* Get Local TSF counter */
- qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
+ next_tbtt = card_get_next_tbtt(next_tbtt, beacon_interval);
/* Set NextTBTT */
- qwNextTBTT = le64_to_cpu(qwNextTBTT);
- iowrite32((u32)qwNextTBTT, iobase + MAC_REG_NEXTTBTT);
- iowrite32((u32)(qwNextTBTT >> 32), iobase + MAC_REG_NEXTTBTT + 4);
+ next_tbtt = le64_to_cpu(next_tbtt);
+ iowrite32((u32)next_tbtt, iobase + MAC_REG_NEXTTBTT);
+ iowrite32((u32)(next_tbtt >> 32), iobase + MAC_REG_NEXTTBTT + 4);
vt6655_mac_reg_bits_on(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
}
@@ -815,18 +815,18 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
* In:
* priv - The adapter to be set
* qwTSF - Current TSF counter
- * wBeaconInterval - Beacon Interval
+ * beacon_interval - Beacon Interval
* Out:
* none
*
* Return Value: none
*/
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
- unsigned short wBeaconInterval)
+ unsigned short beacon_interval)
{
void __iomem *iobase = priv->port_offset;
- qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
+ qwTSF = card_get_next_tbtt(qwTSF, beacon_interval);
/* Set NextTBTT */
qwTSF = le64_to_cpu(qwTSF);
iowrite32((u32)qwTSF, iobase + MAC_REG_NEXTTBTT);
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 19689a291..f52e42564 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -43,20 +43,20 @@ void card_set_rspinf(struct vnt_private *priv, u8 bb_type);
void CARDvUpdateBasicTopRate(struct vnt_private *priv);
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
- unsigned short wBeaconInterval);
+ unsigned short beacon_interval);
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
- unsigned short wBeaconInterval);
+ unsigned short beacon_interval);
u64 vt6655_get_current_tsf(struct vnt_private *priv);
-u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
-u64 CARDqGetTSFOffset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2);
+u64 card_get_next_tbtt(u64 qwTSF, unsigned short beacon_interval);
+u64 card_get_tsf_offset(unsigned char rx_rate, u64 qwTSF1, u64 qwTSF2);
unsigned char card_get_pkt_type(struct vnt_private *priv);
-void CARDvSafeResetTx(struct vnt_private *priv);
+void card_safe_reset_tx(struct vnt_private *priv);
void CARDvSafeResetRx(struct vnt_private *priv);
-void CARDbRadioPowerOff(struct vnt_private *priv);
+void card_radio_power_off(struct vnt_private *priv);
bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type);
bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
- u64 qwBSSTimestamp);
-bool CARDbSetBeaconPeriod(struct vnt_private *priv,
- unsigned short wBeaconInterval);
+ u64 bss_timestamp);
+bool card_set_beacon_period(struct vnt_private *priv,
+ unsigned short beacon_interval);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index d9ee0b740..0212240ba 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -133,7 +133,7 @@ struct vnt_private {
volatile int iTDUsed[TYPE_MAXTD];
struct vnt_tx_desc *apCurrTD[TYPE_MAXTD];
- struct vnt_tx_desc *apTailTD[TYPE_MAXTD];
+ struct vnt_tx_desc *tail_td[TYPE_MAXTD];
struct vnt_tx_desc *apTD0Rings;
struct vnt_tx_desc *apTD1Rings;
@@ -281,7 +281,7 @@ struct vnt_private {
unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; /* unsigned long alignment */
- unsigned short wBeaconInterval;
+ unsigned short beacon_interval;
u16 wake_up_count;
struct work_struct interrupt_work;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 7d297526e..b0b262de6 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -454,7 +454,7 @@ static void device_init_registers(struct vnt_private *priv)
}
if (priv->hw_radio_off || priv->bRadioControlOff)
- CARDbRadioPowerOff(priv);
+ card_radio_power_off(priv);
/* get Permanent network address */
SROMvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
@@ -463,7 +463,7 @@ static void device_init_registers(struct vnt_private *priv)
/* reset Tx pointer */
CARDvSafeResetRx(priv);
/* reset Rx pointer */
- CARDvSafeResetTx(priv);
+ card_safe_reset_tx(priv);
if (priv->local_id <= REV_ID_VT3253_A1)
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
@@ -737,7 +737,7 @@ static int device_init_td0_ring(struct vnt_private *priv)
if (i > 0)
priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
- priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
+ priv->tail_td[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
return 0;
@@ -777,7 +777,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
if (i > 0)
priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
- priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
+ priv->tail_td[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
return 0;
@@ -969,7 +969,7 @@ static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
unsigned char byTsr0;
unsigned char byTsr1;
- for (desc = priv->apTailTD[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
+ for (desc = priv->tail_td[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
if (desc->td0.owner == OWNED_BY_NIC)
break;
if (works++ > 15)
@@ -1007,7 +1007,7 @@ static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
}
}
- priv->apTailTD[idx] = desc;
+ priv->tail_td[idx] = desc;
return works;
}
@@ -1349,7 +1349,7 @@ static void vnt_stop(struct ieee80211_hw *hw)
MACbShutdown(priv);
MACbSoftwareReset(priv);
- CARDbRadioPowerOff(priv);
+ card_radio_power_off(priv);
device_free_td0_ring(priv);
device_free_td1_ring(priv);
@@ -1537,7 +1537,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
card_update_tsf(priv, conf->beacon_rate->hw_value,
conf->sync_tsf);
- CARDbSetBeaconPeriod(priv, conf->beacon_int);
+ card_set_beacon_period(priv, conf->beacon_int);
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
} else {
@@ -1712,7 +1712,7 @@ static int vnt_init(struct vnt_private *priv)
priv->mac_hw = true;
- CARDbRadioPowerOff(priv);
+ card_radio_power_off(priv);
return 0;
}
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 522d34ca9..5e5ed582c 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1456,7 +1456,7 @@ int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
- CARDbSetBeaconPeriod(priv, conf->beacon_int);
+ card_set_beacon_period(priv, conf->beacon_int);
return vnt_beacon_make(priv, vif);
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f589881..c1fbcdd16 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -759,6 +759,29 @@ static ssize_t emulate_tas_store(struct config_item *item,
return count;
}
+static int target_try_configure_unmap(struct se_device *dev,
+ const char *config_opt)
+{
+ if (!dev->transport->configure_unmap) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ if (!target_dev_configured(dev)) {
+ pr_err("Generic Block Discard setup for %s requires device to be configured\n",
+ config_opt);
+ return -ENODEV;
+ }
+
+ if (!dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard setup for %s failed\n",
+ config_opt);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
@@ -776,11 +799,9 @@ static ssize_t emulate_tpu_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "emulate_tpu");
+ if (ret)
+ return ret;
}
da->emulate_tpu = flag;
@@ -806,11 +827,9 @@ static ssize_t emulate_tpws_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "emulate_tpws");
+ if (ret)
+ return ret;
}
da->emulate_tpws = flag;
@@ -1022,12 +1041,9 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n",
- da->da_dev);
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "unmap_zeroes_data");
+ if (ret)
+ return ret;
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 4e4cf6c34..4d447520b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -332,13 +332,11 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
}
iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
- if (is_write) {
- file_start_write(fd);
+ if (is_write)
ret = vfs_iter_write(fd, &iter, &pos, 0);
- file_end_write(fd);
- } else {
+ else
ret = vfs_iter_read(fd, &iter, &pos, 0);
- }
+
if (is_write) {
if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret);
@@ -469,9 +467,7 @@ fd_execute_write_same(struct se_cmd *cmd)
}
iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len);
- file_start_write(fd_dev->fd_file);
ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
- file_end_write(fd_dev->fd_file);
kfree(bvec);
if (ret < 0 || ret != len) {
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 49d9167bb..80b7d8503 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -470,6 +470,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
case INQUIRY:
case LOG_SENSE:
case SERVICE_ACTION_IN_12:
+ case READ_CAPACITY:
case REPORT_LUNS:
case REQUEST_SENSE:
case PERSISTENT_RESERVE_IN:
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 91ed015b5..4128631c9 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/rculist.h>
#include <linux/configfs.h>
#include <linux/ratelimit.h>
#include <scsi/scsi_proto.h>
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
index 70898bbd5..976928641 100644
--- a/drivers/tee/optee/Kconfig
+++ b/drivers/tee/optee/Kconfig
@@ -23,4 +23,4 @@ config OPTEE_INSECURE_LOAD_IMAGE
https://trustedfirmware-a.readthedocs.io/en/latest/threat_model/threat_model.html
Additional documentation on kernel security risks are at
- Documentation/staging/tee.rst.
+ Documentation/tee/op-tee.rst.
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index df5fb5410..a91e50be1 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
*/
#include <linux/device.h>
#include <linux/err.h>
@@ -39,9 +39,29 @@ struct optee_shm_arg_entry {
DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
};
+void optee_cq_init(struct optee_call_queue *cq, int thread_count)
+{
+ mutex_init(&cq->mutex);
+ INIT_LIST_HEAD(&cq->waiters);
+
+ /*
+ * If cq->total_thread_count is 0 then we're not trying to keep
+ * track of how many free threads we have, instead we're relying on
+ * the secure world to tell us when we're out of thread and have to
+ * wait for another thread to become available.
+ */
+ cq->total_thread_count = thread_count;
+ cq->free_thread_count = thread_count;
+}
+
void optee_cq_wait_init(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+ struct optee_call_waiter *w, bool sys_thread)
{
+ unsigned int free_thread_threshold;
+ bool need_wait = false;
+
+ memset(w, 0, sizeof(*w));
+
/*
* We're preparing to make a call to secure world. In case we can't
* allocate a thread in secure world we'll end up waiting in
@@ -60,8 +80,38 @@ void optee_cq_wait_init(struct optee_call_queue *cq,
*/
init_completion(&w->c);
list_add_tail(&w->list_node, &cq->waiters);
+ w->sys_thread = sys_thread;
+
+ if (cq->total_thread_count) {
+ if (sys_thread || !cq->sys_thread_req_count)
+ free_thread_threshold = 0;
+ else
+ free_thread_threshold = 1;
+
+ if (cq->free_thread_count > free_thread_threshold)
+ cq->free_thread_count--;
+ else
+ need_wait = true;
+ }
mutex_unlock(&cq->mutex);
+
+ while (need_wait) {
+ optee_cq_wait_for_completion(cq, w);
+ mutex_lock(&cq->mutex);
+
+ if (sys_thread || !cq->sys_thread_req_count)
+ free_thread_threshold = 0;
+ else
+ free_thread_threshold = 1;
+
+ if (cq->free_thread_count > free_thread_threshold) {
+ cq->free_thread_count--;
+ need_wait = false;
+ }
+
+ mutex_unlock(&cq->mutex);
+ }
}
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
@@ -83,6 +133,14 @@ static void optee_cq_complete_one(struct optee_call_queue *cq)
{
struct optee_call_waiter *w;
+ /* Wake a waiting system session if any, prior to a normal session */
+ list_for_each_entry(w, &cq->waiters, list_node) {
+ if (w->sys_thread && !completion_done(&w->c)) {
+ complete(&w->c);
+ return;
+ }
+ }
+
list_for_each_entry(w, &cq->waiters, list_node) {
if (!completion_done(&w->c)) {
complete(&w->c);
@@ -104,6 +162,8 @@ void optee_cq_wait_final(struct optee_call_queue *cq,
/* Get out of the list */
list_del(&w->list_node);
+ cq->free_thread_count++;
+
/* Wake up one eventual waiting task */
optee_cq_complete_one(cq);
@@ -119,6 +179,28 @@ void optee_cq_wait_final(struct optee_call_queue *cq,
mutex_unlock(&cq->mutex);
}
+/* Count registered system sessions to reserved a system thread or not */
+static bool optee_cq_incr_sys_thread_count(struct optee_call_queue *cq)
+{
+ if (cq->total_thread_count <= 1)
+ return false;
+
+ mutex_lock(&cq->mutex);
+ cq->sys_thread_req_count++;
+ mutex_unlock(&cq->mutex);
+
+ return true;
+}
+
+static void optee_cq_decr_sys_thread_count(struct optee_call_queue *cq)
+{
+ mutex_lock(&cq->mutex);
+ cq->sys_thread_req_count--;
+ /* If there's someone waiting, let it resume */
+ optee_cq_complete_one(cq);
+ mutex_unlock(&cq->mutex);
+}
+
/* Requires the filpstate mutex to be held */
static struct optee_session *find_session(struct optee_context_data *ctxdata,
u32 session_id)
@@ -328,7 +410,8 @@ int optee_open_session(struct tee_context *ctx,
goto out;
}
- if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ if (optee->ops->do_call_with_arg(ctx, shm, offs,
+ sess->use_sys_thread)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -360,7 +443,29 @@ out:
return rc;
}
-int optee_close_session_helper(struct tee_context *ctx, u32 session)
+int optee_system_session(struct tee_context *ctx, u32 session)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_context_data *ctxdata = ctx->data;
+ struct optee_session *sess;
+ int rc = -EINVAL;
+
+ mutex_lock(&ctxdata->mutex);
+
+ sess = find_session(ctxdata, session);
+ if (sess && (sess->use_sys_thread ||
+ optee_cq_incr_sys_thread_count(&optee->call_queue))) {
+ sess->use_sys_thread = true;
+ rc = 0;
+ }
+
+ mutex_unlock(&ctxdata->mutex);
+
+ return rc;
+}
+
+int optee_close_session_helper(struct tee_context *ctx, u32 session,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_shm_arg_entry *entry;
@@ -374,10 +479,13 @@ int optee_close_session_helper(struct tee_context *ctx, u32 session)
msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
msg_arg->session = session;
- optee->ops->do_call_with_arg(ctx, shm, offs);
+ optee->ops->do_call_with_arg(ctx, shm, offs, system_thread);
optee_free_msg_arg(ctx, entry, offs);
+ if (system_thread)
+ optee_cq_decr_sys_thread_count(&optee->call_queue);
+
return 0;
}
@@ -385,6 +493,7 @@ int optee_close_session(struct tee_context *ctx, u32 session)
{
struct optee_context_data *ctxdata = ctx->data;
struct optee_session *sess;
+ bool system_thread;
/* Check that the session is valid and remove it from the list */
mutex_lock(&ctxdata->mutex);
@@ -394,9 +503,10 @@ int optee_close_session(struct tee_context *ctx, u32 session)
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
+ system_thread = sess->use_sys_thread;
kfree(sess);
- return optee_close_session_helper(ctx, session);
+ return optee_close_session_helper(ctx, session, system_thread);
}
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
@@ -408,12 +518,15 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct optee_msg_arg *msg_arg;
struct optee_session *sess;
struct tee_shm *shm;
+ bool system_thread;
u_int offs;
int rc;
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, arg->session);
+ if (sess)
+ system_thread = sess->use_sys_thread;
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
@@ -432,7 +545,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
if (rc)
goto out;
- if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
+ if (optee->ops->do_call_with_arg(ctx, shm, offs, system_thread)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -457,12 +570,15 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
struct optee_shm_arg_entry *entry;
struct optee_msg_arg *msg_arg;
struct optee_session *sess;
+ bool system_thread;
struct tee_shm *shm;
u_int offs;
/* Check that the session is valid */
mutex_lock(&ctxdata->mutex);
sess = find_session(ctxdata, session);
+ if (sess)
+ system_thread = sess->use_sys_thread;
mutex_unlock(&ctxdata->mutex);
if (!sess)
return -EINVAL;
@@ -474,7 +590,7 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
msg_arg->session = session;
msg_arg->cancel_id = cancel_id;
- optee->ops->do_call_with_arg(ctx, shm, offs);
+ optee->ops->do_call_with_arg(ctx, shm, offs, system_thread);
optee_free_msg_arg(ctx, entry, offs);
return 0;
@@ -524,3 +640,32 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
return rc;
}
+
+static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_shm_arg_entry *entry;
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+ u_int offs;
+
+ msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+ if (IS_ERR(msg_arg))
+ return PTR_ERR(msg_arg);
+
+ msg_arg->cmd = cmd;
+ optee->ops->do_call_with_arg(ctx, shm, offs, false);
+
+ optee_free_msg_arg(ctx, entry, offs);
+ return 0;
+}
+
+int optee_do_bottom_half(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
+}
+
+int optee_stop_async_notif(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 2a258bd3b..3aed554bc 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -15,7 +15,6 @@
#include <linux/string.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
#include "optee_private.h"
int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
@@ -26,46 +25,46 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
size_t num_pages,
unsigned long start))
{
- unsigned int order = get_order(size);
- struct page *page;
+ size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+ struct page **pages;
+ unsigned int i;
int rc = 0;
/*
* Ignore alignment since this is already going to be page aligned
* and there's no need for any larger alignment.
*/
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!page)
+ shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!shm->kaddr)
return -ENOMEM;
- shm->kaddr = page_address(page);
- shm->paddr = page_to_phys(page);
- shm->size = PAGE_SIZE << order;
+ shm->paddr = virt_to_phys(shm->kaddr);
+ shm->size = nr_pages * PAGE_SIZE;
- if (shm_register) {
- unsigned int nr_pages = 1 << order, i;
- struct page **pages;
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
- pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- rc = -ENOMEM;
- goto err;
- }
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
- for (i = 0; i < nr_pages; i++)
- pages[i] = page + i;
+ shm->pages = pages;
+ shm->num_pages = nr_pages;
+ if (shm_register) {
rc = shm_register(shm->ctx, shm, pages, nr_pages,
(unsigned long)shm->kaddr);
- kfree(pages);
if (rc)
goto err;
}
return 0;
-
err:
- free_pages((unsigned long)shm->kaddr, order);
+ free_pages_exact(shm->kaddr, shm->size);
+ shm->kaddr = NULL;
return rc;
}
@@ -75,8 +74,10 @@ void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
{
if (shm_unregister)
shm_unregister(shm->ctx, shm);
- free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ free_pages_exact(shm->kaddr, shm->size);
shm->kaddr = NULL;
+ kfree(shm->pages);
+ shm->pages = NULL;
}
static void optee_bus_scan(struct work_struct *work)
@@ -110,12 +111,7 @@ int optee_open(struct tee_context *ctx, bool cap_memref_null)
if (!optee->scan_bus_done) {
INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
- optee->scan_bus_wq = create_workqueue("optee_bus_scan");
- if (!optee->scan_bus_wq) {
- kfree(ctxdata);
- return -ECHILD;
- }
- queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
+ schedule_work(&optee->scan_bus_work);
optee->scan_bus_done = true;
}
}
@@ -129,7 +125,8 @@ int optee_open(struct tee_context *ctx, bool cap_memref_null)
static void optee_release_helper(struct tee_context *ctx,
int (*close_session)(struct tee_context *ctx,
- u32 session))
+ u32 session,
+ bool system_thread))
{
struct optee_context_data *ctxdata = ctx->data;
struct optee_session *sess;
@@ -141,7 +138,7 @@ static void optee_release_helper(struct tee_context *ctx,
list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
list_node) {
list_del(&sess->list_node);
- close_session(ctx, sess->session_id);
+ close_session(ctx, sess->session_id, sess->use_sys_thread);
kfree(sess);
}
kfree(ctxdata);
@@ -158,10 +155,7 @@ void optee_release_supp(struct tee_context *ctx)
struct optee *optee = tee_get_drvdata(ctx->teedev);
optee_release_helper(ctx, optee_close_session_helper);
- if (optee->scan_bus_wq) {
- destroy_workqueue(optee->scan_bus_wq);
- optee->scan_bus_wq = NULL;
- }
+
optee_supp_release(&optee->supp);
}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 0828240f2..ecb5eb079 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2021, Linaro Limited
+ * Copyright (c) 2021, 2023 Linaro Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -528,7 +528,8 @@ static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
static int optee_ffa_yielding_call(struct tee_context *ctx,
struct ffa_send_direct_data *data,
- struct optee_msg_arg *rpc_arg)
+ struct optee_msg_arg *rpc_arg,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
@@ -541,7 +542,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
int rc;
/* Initialize waiter */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, system_thread);
while (true) {
rc = msg_ops->sync_send_receive(ffa_dev, data);
if (rc)
@@ -604,6 +605,7 @@ done:
* @ctx: calling context
* @shm: shared memory holding the message to pass to secure world
* @offs: offset of the message in @shm
+ * @system_thread: true if caller requests TEE system thread support
*
* Does a FF-A call to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
@@ -612,7 +614,8 @@ done:
*/
static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
- struct tee_shm *shm, u_int offs)
+ struct tee_shm *shm, u_int offs,
+ bool system_thread)
{
struct ffa_send_direct_data data = {
.data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
@@ -642,7 +645,7 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
if (IS_ERR(rpc_arg))
return PTR_ERR(rpc_arg);
- return optee_ffa_yielding_call(ctx, &data, rpc_arg);
+ return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread);
}
/*
@@ -692,7 +695,8 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
const struct ffa_ops *ops,
u32 *sec_caps,
- unsigned int *rpc_param_count)
+ unsigned int *rpc_param_count,
+ unsigned int *max_notif_value)
{
struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
int rc;
@@ -709,10 +713,39 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
*rpc_param_count = (u8)data.data1;
*sec_caps = data.data2;
+ if (data.data3)
+ *max_notif_value = data.data3;
+ else
+ *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
return true;
}
+static void notif_callback(int notify_id, void *cb_data)
+{
+ struct optee *optee = cb_data;
+
+ if (notify_id == optee->ffa.bottom_half_value)
+ optee_do_bottom_half(optee->ctx);
+ else
+ optee_notif_send(optee, notify_id);
+}
+
+static int enable_async_notif(struct optee *optee)
+{
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_ENABLE_ASYNC_NOTIF,
+ .data1 = optee->ffa.bottom_half_value,
+ };
+ int rc;
+
+ rc = ffa_dev->ops->msg_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ return rc;
+ return data.data0;
+}
+
static void optee_ffa_get_version(struct tee_device *teedev,
struct tee_ioctl_version_data *vers)
{
@@ -775,7 +808,11 @@ static const struct optee_ops optee_ffa_ops = {
static void optee_ffa_remove(struct ffa_device *ffa_dev)
{
struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
+ u32 bottom_half_id = optee->ffa.bottom_half_value;
+ if (bottom_half_id != U32_MAX)
+ ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev,
+ bottom_half_id);
optee_remove_common(optee);
mutex_destroy(&optee->ffa.mutex);
@@ -784,9 +821,51 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev)
kfree(optee);
}
+static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev,
+ struct optee *optee)
+{
+ bool is_per_vcpu = false;
+ u32 notif_id = 0;
+ int rc;
+
+ while (true) {
+ rc = ffa_dev->ops->notifier_ops->notify_request(ffa_dev,
+ is_per_vcpu,
+ notif_callback,
+ optee,
+ notif_id);
+ if (!rc)
+ break;
+ /*
+ * -EACCES means that the notification ID was
+ * already bound, try the next one as long as we
+ * haven't reached the max. Any other error is a
+ * permanent error, so skip asynchronous
+ * notifications in that case.
+ */
+ if (rc != -EACCES)
+ return rc;
+ notif_id++;
+ if (notif_id >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE)
+ return rc;
+ }
+ optee->ffa.bottom_half_value = notif_id;
+
+ rc = enable_async_notif(optee);
+ if (rc < 0) {
+ ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev,
+ notif_id);
+ optee->ffa.bottom_half_value = U32_MAX;
+ }
+
+ return rc;
+}
+
static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
+ const struct ffa_notifier_ops *notif_ops;
const struct ffa_ops *ffa_ops;
+ unsigned int max_notif_value;
unsigned int rpc_param_count;
struct tee_shm_pool *pool;
struct tee_device *teedev;
@@ -797,12 +876,13 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
int rc;
ffa_ops = ffa_dev->ops;
+ notif_ops = ffa_ops->notifier_ops;
if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
return -EINVAL;
if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
- &rpc_param_count))
+ &rpc_param_count, &max_notif_value))
return -EINVAL;
if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
@@ -820,6 +900,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
+ optee->ffa.bottom_half_value = U32_MAX;
optee->rpc_param_count = rpc_param_count;
teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
@@ -850,8 +931,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (rc)
goto err_unreg_supp_teedev;
mutex_init(&optee->ffa.mutex);
- mutex_init(&optee->call_queue.mutex);
- INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_cq_init(&optee->call_queue, 0);
optee_supp_init(&optee->supp);
optee_shm_arg_cache_init(optee, arg_cache_flags);
ffa_dev_set_drvdata(ffa_dev, optee);
@@ -864,6 +944,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
if (rc)
goto err_close_ctx;
+ if (sec_caps & OPTEE_FFA_SEC_CAP_ASYNC_NOTIF) {
+ rc = optee_ffa_async_notif_init(ffa_dev, optee);
+ if (rc < 0)
+ pr_err("Failed to initialize async notifications: %d",
+ rc);
+ }
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc)
@@ -874,6 +960,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
err_unregister_devices:
optee_unregister_devices();
+ if (optee->ffa.bottom_half_value != U32_MAX)
+ notif_ops->notify_relinquish(ffa_dev,
+ optee->ffa.bottom_half_value);
optee_notif_uninit(optee);
err_close_ctx:
teedev_close_context(ctx);
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
index 97266243d..5db779dc0 100644
--- a/drivers/tee/optee/optee_ffa.h
+++ b/drivers/tee/optee/optee_ffa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
- * Copyright (c) 2019-2021, Linaro Limited
+ * Copyright (c) 2019-2021, 2023 Linaro Limited
*/
/*
@@ -73,7 +73,7 @@
*
* Call register usage:
* w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES
- * w4-w7: Note used (MBZ)
+ * w4-w7: Not used (MBZ)
*
* Return register usage:
* w3: Error code, 0 on success
@@ -82,14 +82,16 @@
* OPTEE_FFA_YIELDING_CALL_WITH_ARG.
* Bit[31:8]: Reserved (MBZ)
* w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
- * unused bits MBZ.
- * w6-w7: Not used (MBZ)
+ * w6: The maximum secure world notification number
+ * w7: Not used (MBZ)
*/
/*
* Secure world supports giving an offset into the argument shared memory
* object, see also OPTEE_FFA_YIELDING_CALL_WITH_ARG
*/
#define OPTEE_FFA_SEC_CAP_ARG_OFFSET BIT(0)
+/* OP-TEE supports asynchronous notification via FF-A */
+#define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1)
#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
@@ -109,6 +111,24 @@
#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
/*
+ * Inform OP-TEE that the normal world is able to receive asynchronous
+ * notifications.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_ENABLE_ASYNC_NOTIF
+ * w4: Notification value to request bottom half processing, should be
+ * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE.
+ * w5-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5)
+
+#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64
+
+/*
* Call with struct optee_msg_arg as argument in the supplied shared memory
* with a zero internal offset and normal cached memory attributes.
* Register usage:
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 6bb5cae09..7a5243c78 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
*/
#ifndef OPTEE_PRIVATE_H
@@ -40,15 +40,33 @@ typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
struct arm_smccc_res *);
+/*
+ * struct optee_call_waiter - TEE entry may need to wait for a free TEE thread
+ * @list_node Reference in waiters list
+ * @c Waiting completion reference
+ * @sys_thread True if waiter belongs to a system thread
+ */
struct optee_call_waiter {
struct list_head list_node;
struct completion c;
+ bool sys_thread;
};
+/*
+ * struct optee_call_queue - OP-TEE call queue management
+ * @mutex Serializes access to this struct
+ * @waiters List of threads waiting to enter OP-TEE
+ * @total_thread_count Overall number of thread context in OP-TEE or 0
+ * @free_thread_count Number of threads context free in OP-TEE
+ * @sys_thread_req_count Number of registered system thread sessions
+ */
struct optee_call_queue {
/* Serializes access to this struct */
struct mutex mutex;
struct list_head waiters;
+ int total_thread_count;
+ int free_thread_count;
+ int sys_thread_req_count;
};
struct optee_notif {
@@ -129,12 +147,14 @@ struct optee_smc {
* struct optee_ffa_data - FFA communication struct
* @ffa_dev FFA device, contains the destination id, the id of
* OP-TEE in secure world
- * @ffa_ops FFA operations
+ * @bottom_half_value Notification ID used for bottom half signalling or
+ * U32_MAX if unused
* @mutex Serializes access to @global_ids
* @global_ids FF-A shared memory global handle translation
*/
struct optee_ffa {
struct ffa_device *ffa_dev;
+ u32 bottom_half_value;
/* Serializes access to @global_ids */
struct mutex mutex;
struct rhashtable global_ids;
@@ -154,7 +174,8 @@ struct optee;
*/
struct optee_ops {
int (*do_call_with_arg)(struct tee_context *ctx,
- struct tee_shm *shm_arg, u_int offs);
+ struct tee_shm *shm_arg, u_int offs,
+ bool system_thread);
int (*to_msg_param)(struct optee *optee,
struct optee_msg_param *msg_params,
size_t num_params, const struct tee_param *params);
@@ -178,7 +199,6 @@ struct optee_ops {
* @pool: shared memory pool
* @rpc_param_count: If > 0 number of RPC parameters to make room for
* @scan_bus_done flag if device registation was already done.
- * @scan_bus_wq workqueue to scan optee bus and register optee drivers
* @scan_bus_work workq to scan optee bus and register optee drivers
*/
struct optee {
@@ -197,13 +217,13 @@ struct optee {
struct tee_shm_pool *pool;
unsigned int rpc_param_count;
bool scan_bus_done;
- struct workqueue_struct *scan_bus_wq;
struct work_struct scan_bus_work;
};
struct optee_session {
struct list_head list_node;
u32 session_id;
+ bool use_sys_thread;
};
struct optee_context_data {
@@ -250,7 +270,9 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
int optee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
-int optee_close_session_helper(struct tee_context *ctx, u32 session);
+int optee_system_session(struct tee_context *ctx, u32 session);
+int optee_close_session_helper(struct tee_context *ctx, u32 session,
+ bool system_thread);
int optee_close_session(struct tee_context *ctx, u32 session);
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
@@ -298,8 +320,9 @@ static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
mp->u.value.c = p->u.value.c;
}
+void optee_cq_init(struct optee_call_queue *cq, int thread_count);
void optee_cq_wait_init(struct optee_call_queue *cq,
- struct optee_call_waiter *w);
+ struct optee_call_waiter *w, bool sys_thread);
void optee_cq_wait_for_completion(struct optee_call_queue *cq,
struct optee_call_waiter *w);
void optee_cq_wait_final(struct optee_call_queue *cq,
@@ -323,6 +346,9 @@ void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
struct optee_msg_arg *arg);
+int optee_do_bottom_half(struct tee_context *ctx);
+int optee_stop_async_notif(struct tee_context *ctx);
+
/*
* Small helpers
*/
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index d5b28fd35..a37f87087 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2015-2021, 2023 Linaro Limited
* Copyright (c) 2016, EPAM Systems
*/
@@ -283,7 +283,7 @@ static void optee_enable_shm_cache(struct optee *optee)
struct optee_call_waiter w;
/* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, false);
while (true) {
struct arm_smccc_res res;
@@ -308,7 +308,7 @@ static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
struct optee_call_waiter w;
/* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, false);
while (true) {
union {
struct arm_smccc_res smccc;
@@ -507,7 +507,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0, false) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
@@ -550,7 +550,7 @@ static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
- if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
+ if (optee->ops->do_call_with_arg(ctx, shm_arg, 0, false) ||
msg_arg->ret != TEEC_SUCCESS)
rc = -EINVAL;
out:
@@ -678,10 +678,11 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
- phys_addr_t pa;
struct tee_shm *shm;
size_t sz;
size_t n;
+ struct page **pages;
+ size_t page_count;
arg->ret_origin = TEEC_ORIGIN_COMMS;
@@ -716,32 +717,23 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
return;
}
- if (tee_shm_get_pa(shm, 0, &pa)) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- goto bad;
- }
-
- sz = tee_shm_get_size(shm);
-
- if (tee_shm_is_dynamic(shm)) {
- struct page **pages;
+ /*
+ * If there are pages it's dynamically allocated shared memory (not
+ * from the reserved shared memory pool) and needs to be
+ * registered.
+ */
+ pages = tee_shm_get_pages(shm, &page_count);
+ if (pages) {
u64 *pages_list;
- size_t page_num;
-
- pages = tee_shm_get_pages(shm, &page_num);
- if (!pages || !page_num) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- goto bad;
- }
- pages_list = optee_allocate_pages_list(page_num);
+ pages_list = optee_allocate_pages_list(page_count);
if (!pages_list) {
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
goto bad;
}
call_ctx->pages_list = pages_list;
- call_ctx->num_entries = page_num;
+ call_ctx->num_entries = page_count;
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
OPTEE_MSG_ATTR_NONCONTIG;
@@ -752,17 +744,22 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
(tee_shm_get_page_offset(shm) &
(OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- arg->params[0].u.tmem.size = tee_shm_get_size(shm);
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
- optee_fill_pages_list(pages_list, pages, page_num,
+ optee_fill_pages_list(pages_list, pages, page_count,
tee_shm_get_page_offset(shm));
} else {
+ phys_addr_t pa;
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
arg->params[0].u.tmem.buf_ptr = pa;
- arg->params[0].u.tmem.size = sz;
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
}
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
arg->ret = TEEC_SUCCESS;
return;
@@ -806,6 +803,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
/**
* optee_handle_rpc() - handle RPC from secure world
* @ctx: context doing the RPC
+ * @rpc_arg: pointer to RPC arguments if any, or NULL if none
* @param: value of registers for the RPC
* @call_ctx: call context. Preserved during one OP-TEE invocation
*
@@ -878,6 +876,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
* @ctx: calling context
* @shm: shared memory holding the message to pass to secure world
* @offs: offset of the message in @shm
+ * @system_thread: true if caller requests TEE system thread support
*
* Does and SMC to OP-TEE in secure world and handles eventual resulting
* Remote Procedure Calls (RPC) from OP-TEE.
@@ -885,7 +884,8 @@ static void optee_handle_rpc(struct tee_context *ctx,
* Returns return code from secure world, 0 is OK
*/
static int optee_smc_do_call_with_arg(struct tee_context *ctx,
- struct tee_shm *shm, u_int offs)
+ struct tee_shm *shm, u_int offs,
+ bool system_thread)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_call_waiter w;
@@ -926,7 +926,7 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
reg_pair_from_64(&param.a1, &param.a2, parg);
}
/* Initialize waiter */
- optee_cq_wait_init(&optee->call_queue, &w);
+ optee_cq_wait_init(&optee->call_queue, &w, system_thread);
while (true) {
struct arm_smccc_res res;
@@ -965,34 +965,6 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc;
}
-static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
-{
- struct optee_shm_arg_entry *entry;
- struct optee_msg_arg *msg_arg;
- struct tee_shm *shm;
- u_int offs;
-
- msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
- if (IS_ERR(msg_arg))
- return PTR_ERR(msg_arg);
-
- msg_arg->cmd = cmd;
- optee_smc_do_call_with_arg(ctx, shm, offs);
-
- optee_free_msg_arg(ctx, entry, offs);
- return 0;
-}
-
-static int optee_smc_do_bottom_half(struct tee_context *ctx)
-{
- return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
-}
-
-static int optee_smc_stop_async_notif(struct tee_context *ctx)
-{
- return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
-}
-
/*
* 5. Asynchronous notification
*/
@@ -1048,7 +1020,7 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
{
struct optee *optee = dev_id;
- optee_smc_do_bottom_half(optee->ctx);
+ optee_do_bottom_half(optee->ctx);
return IRQ_HANDLED;
}
@@ -1086,7 +1058,7 @@ static void notif_pcpu_irq_work_fn(struct work_struct *work)
notif_pcpu_work);
struct optee *optee = container_of(optee_smc, struct optee, smc);
- optee_smc_do_bottom_half(optee->ctx);
+ optee_do_bottom_half(optee->ctx);
}
static int init_pcpu_irq(struct optee *optee, u_int irq)
@@ -1158,7 +1130,7 @@ static void uninit_pcpu_irq(struct optee *optee)
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
- optee_smc_stop_async_notif(optee->ctx);
+ optee_stop_async_notif(optee->ctx);
if (optee->smc.notif_irq) {
if (irq_is_percpu_devid(optee->smc.notif_irq))
uninit_pcpu_irq(optee);
@@ -1210,6 +1182,7 @@ static const struct tee_driver_ops optee_clnt_ops = {
.release = optee_release,
.open_session = optee_open_session,
.close_session = optee_close_session,
+ .system_session = optee_system_session,
.invoke_func = optee_invoke_func,
.cancel_req = optee_cancel_req,
.shm_register = optee_shm_register,
@@ -1357,6 +1330,16 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return true;
}
+static unsigned int optee_msg_get_thread_count(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_GET_THREAD_COUNT, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ return 0;
+ return res.a1;
+}
+
static struct tee_shm_pool *
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
{
@@ -1609,6 +1592,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL;
void *memremaped_shm = NULL;
unsigned int rpc_param_count;
+ unsigned int thread_count;
struct tee_device *teedev;
struct tee_context *ctx;
u32 max_notif_value;
@@ -1636,6 +1620,7 @@ static int optee_probe(struct platform_device *pdev)
return -EINVAL;
}
+ thread_count = optee_msg_get_thread_count(invoke_fn);
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
&max_notif_value,
&rpc_param_count)) {
@@ -1725,8 +1710,7 @@ static int optee_probe(struct platform_device *pdev)
if (rc)
goto err_unreg_supp_teedev;
- mutex_init(&optee->call_queue.mutex);
- INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_cq_init(&optee->call_queue, thread_count);
optee_supp_init(&optee->supp);
optee->smc.memremaped_shm = memremaped_shm;
optee->pool = pool;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 5ddfd5d9a..792d6fae4 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -1173,6 +1173,14 @@ int tee_client_close_session(struct tee_context *ctx, u32 session)
}
EXPORT_SYMBOL_GPL(tee_client_close_session);
+int tee_client_system_session(struct tee_context *ctx, u32 session)
+{
+ if (!ctx->teedev->desc->ops->system_session)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->system_session(ctx, session);
+}
+EXPORT_SYMBOL_GPL(tee_client_system_session);
+
int tee_client_invoke_func(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param)
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 673cf0359..731d9028b 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -22,23 +22,12 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count)
put_page(pages[n]);
}
-static int shm_get_kernel_pages(unsigned long start, size_t page_count,
- struct page **pages)
+static void shm_get_kernel_pages(struct page **pages, size_t page_count)
{
- struct page *page;
size_t n;
- if (WARN_ON_ONCE(is_vmalloc_addr((void *)start) ||
- is_kmap_addr((void *)start)))
- return -EINVAL;
-
- page = virt_to_page((void *)start);
- for (n = 0; n < page_count; n++) {
- pages[n] = page + n;
+ for (n = 0; n < page_count; n++)
get_page(pages[n]);
- }
-
- return page_count;
}
static void release_registered_pages(struct tee_shm *shm)
@@ -214,13 +203,14 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
static struct tee_shm *
-register_shm_helper(struct tee_context *ctx, unsigned long addr,
- size_t length, u32 flags, int id)
+register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
+ int id)
{
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
- unsigned long start;
- size_t num_pages;
+ unsigned long start, addr;
+ size_t num_pages, off;
+ ssize_t len;
void *ret;
int rc;
@@ -245,31 +235,38 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
shm->flags = flags;
shm->ctx = ctx;
shm->id = id;
- addr = untagged_addr(addr);
+ addr = untagged_addr((unsigned long)iter_iov_addr(iter));
start = rounddown(addr, PAGE_SIZE);
- shm->offset = addr - start;
- shm->size = length;
- num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
+ num_pages = iov_iter_npages(iter, INT_MAX);
+ if (!num_pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_ctx_put;
+ }
+
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
if (!shm->pages) {
ret = ERR_PTR(-ENOMEM);
goto err_free_shm;
}
- if (flags & TEE_SHM_USER_MAPPED)
- rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
- shm->pages);
- else
- rc = shm_get_kernel_pages(start, num_pages, shm->pages);
- if (rc > 0)
- shm->num_pages = rc;
- if (rc != num_pages) {
- if (rc >= 0)
- rc = -ENOMEM;
- ret = ERR_PTR(rc);
- goto err_put_shm_pages;
+ len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
+ &off);
+ if (unlikely(len <= 0)) {
+ ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
+ goto err_free_shm_pages;
}
+ /*
+ * iov_iter_extract_kvec_pages does not get reference on the pages,
+ * get a reference on them.
+ */
+ if (iov_iter_is_kvec(iter))
+ shm_get_kernel_pages(shm->pages, num_pages);
+
+ shm->offset = off;
+ shm->size = len;
+ shm->num_pages = num_pages;
+
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
shm->num_pages, start);
if (rc) {
@@ -279,10 +276,11 @@ register_shm_helper(struct tee_context *ctx, unsigned long addr,
return shm;
err_put_shm_pages:
- if (flags & TEE_SHM_USER_MAPPED)
+ if (!iov_iter_is_kvec(iter))
unpin_user_pages(shm->pages, shm->num_pages);
else
shm_put_kernel_pages(shm->pages, shm->num_pages);
+err_free_shm_pages:
kfree(shm->pages);
err_free_shm:
kfree(shm);
@@ -307,6 +305,7 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
struct tee_device *teedev = ctx->teedev;
struct tee_shm *shm;
+ struct iov_iter iter;
void *ret;
int id;
@@ -319,7 +318,8 @@ struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
if (id < 0)
return ERR_PTR(id);
- shm = register_shm_helper(ctx, addr, length, flags, id);
+ iov_iter_ubuf(&iter, ITER_DEST, (void __user *)addr, length);
+ shm = register_shm_helper(ctx, &iter, flags, id);
if (IS_ERR(shm)) {
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, id);
@@ -352,8 +352,14 @@ struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
void *addr, size_t length)
{
u32 flags = TEE_SHM_DYNAMIC;
+ struct kvec kvec;
+ struct iov_iter iter;
+
+ kvec.iov_base = addr;
+ kvec.iov_len = length;
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
- return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
+ return register_shm_helper(ctx, &iter, flags, -1);
}
EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c81a00fbc..17a8ae5e9 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -33,6 +33,13 @@ config THERMAL_STATISTICS
If in doubt, say N.
+config THERMAL_DEBUGFS
+ bool "Thermal subsystem debug support"
+ depends on DEBUG_FS
+ help
+ Say Y to allow the thermal subsystem to collect diagnostic
+ information that can be accessed via debugfs.
+
config THERMAL_EMERGENCY_POWEROFF_DELAY_MS
int "Emergency poweroff delay in milli-seconds"
default 0
@@ -76,10 +83,6 @@ config THERMAL_OF
Say 'Y' here if you need to build thermal infrastructure
based on device tree.
-config THERMAL_ACPI
- depends on ACPI
- bool
-
config THERMAL_WRITABLE_TRIPS
bool "Enable writable trip points"
help
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index c934cab30..d77d7fe99 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -10,10 +10,11 @@ thermal_sys-y += thermal_trip.o thermal_helpers.o
# netlink interface to manage the thermal framework
thermal_sys-$(CONFIG_THERMAL_NETLINK) += thermal_netlink.o
+thermal_sys-$(CONFIG_THERMAL_DEBUGFS) += thermal_debugfs.o
+
# interface to/from other layers providing sensors
thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o
thermal_sys-$(CONFIG_THERMAL_OF) += thermal_of.o
-thermal_sys-$(CONFIG_THERMAL_ACPI) += thermal_acpi.o
# governors
CFLAGS_gov_power_allocator.o := -I$(src)
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index 5877cde25..df7a5ed55 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -167,13 +167,11 @@ static int amlogic_thermal_enable(struct amlogic_thermal *data)
return 0;
}
-static int amlogic_thermal_disable(struct amlogic_thermal *data)
+static void amlogic_thermal_disable(struct amlogic_thermal *data)
{
regmap_update_bits(data->regmap, TSENSOR_CFG_REG1,
TSENSOR_CFG_REG1_ENABLE, 0);
clk_disable_unprepare(data->clk);
-
- return 0;
}
static int amlogic_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
@@ -298,27 +296,30 @@ static void amlogic_thermal_remove(struct platform_device *pdev)
amlogic_thermal_disable(data);
}
-static int __maybe_unused amlogic_thermal_suspend(struct device *dev)
+static int amlogic_thermal_suspend(struct device *dev)
{
struct amlogic_thermal *data = dev_get_drvdata(dev);
- return amlogic_thermal_disable(data);
+ amlogic_thermal_disable(data);
+
+ return 0;
}
-static int __maybe_unused amlogic_thermal_resume(struct device *dev)
+static int amlogic_thermal_resume(struct device *dev)
{
struct amlogic_thermal *data = dev_get_drvdata(dev);
return amlogic_thermal_enable(data);
}
-static SIMPLE_DEV_PM_OPS(amlogic_thermal_pm_ops,
- amlogic_thermal_suspend, amlogic_thermal_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(amlogic_thermal_pm_ops,
+ amlogic_thermal_suspend,
+ amlogic_thermal_resume);
static struct platform_driver amlogic_thermal_driver = {
.driver = {
.name = "amlogic_thermal",
- .pm = &amlogic_thermal_pm_ops,
+ .pm = pm_ptr(&amlogic_thermal_pm_ops),
.of_match_table = of_amlogic_thermal_match,
},
.probe = amlogic_thermal_probe,
diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c
index 69f4c0a8d..f678c1281 100644
--- a/drivers/thermal/cpuidle_cooling.c
+++ b/drivers/thermal/cpuidle_cooling.c
@@ -66,7 +66,7 @@ static unsigned int cpuidle_cooling_runtime(unsigned int idle_duration_us,
* @state : a pointer to the state variable to be filled
*
* The function always returns 100 as the injection ratio. It is
- * percentile based for consistency accross different platforms.
+ * percentile based for consistency across different platforms.
*
* Return: The function can not fail, it is always zero
*/
@@ -146,7 +146,7 @@ static int cpuidle_cooling_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
-/**
+/*
* cpuidle_cooling_ops - thermal cooling device ops
*/
static struct thermal_cooling_device_ops cpuidle_cooling_ops = {
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 931cd8842..38581583a 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -47,6 +47,22 @@ static inline s64 div_frac(s64 x, s64 y)
}
/**
+ * struct power_actor - internal power information for power actor
+ * @req_power: requested power value (not weighted)
+ * @max_power: max allocatable power for this actor
+ * @granted_power: granted power for this actor
+ * @extra_actor_power: extra power that this actor can receive
+ * @weighted_req_power: weighted requested power as input to IPA
+ */
+struct power_actor {
+ u32 req_power;
+ u32 max_power;
+ u32 granted_power;
+ u32 extra_actor_power;
+ u32 weighted_req_power;
+};
+
+/**
* struct power_allocator_params - parameters for the power allocator governor
* @allocated_tzp: whether we have allocated tzp for this thermal zone and
* it needs to be freed on unbind
@@ -59,9 +75,12 @@ static inline s64 div_frac(s64 x, s64 y)
* governor switches on when this trip point is crossed.
* If the thermal zone only has one passive trip point,
* @trip_switch_on should be NULL.
- * @trip_max_desired_temperature: last passive trip point of the thermal
- * zone. The temperature we are
- * controlling for.
+ * @trip_max: last passive trip point of the thermal zone. The
+ * temperature we are controlling for.
+ * @total_weight: Sum of all thermal instances weights
+ * @num_actors: number of cooling devices supporting IPA callbacks
+ * @buffer_size: internal buffer size, to avoid runtime re-calculation
+ * @power: buffer for all power actors internal power information
*/
struct power_allocator_params {
bool allocated_tzp;
@@ -69,9 +88,20 @@ struct power_allocator_params {
s32 prev_err;
u32 sustainable_power;
const struct thermal_trip *trip_switch_on;
- const struct thermal_trip *trip_max_desired_temperature;
+ const struct thermal_trip *trip_max;
+ int total_weight;
+ unsigned int num_actors;
+ unsigned int buffer_size;
+ struct power_actor *power;
};
+static bool power_actor_is_valid(struct power_allocator_params *params,
+ struct thermal_instance *instance)
+{
+ return (instance->trip == params->trip_max &&
+ cdev_is_power_actor(instance->cdev));
+}
+
/**
* estimate_sustainable_power() - Estimate the sustainable power of a thermal zone
* @tz: thermal zone we are operating in
@@ -85,20 +115,17 @@ struct power_allocator_params {
*/
static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
{
- u32 sustainable_power = 0;
- struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ struct thermal_cooling_device *cdev;
+ struct thermal_instance *instance;
+ u32 sustainable_power = 0;
+ u32 min_power;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- struct thermal_cooling_device *cdev = instance->cdev;
- u32 min_power;
-
- if (instance->trip != params->trip_max_desired_temperature)
- continue;
-
- if (!cdev_is_power_actor(cdev))
+ if (!power_actor_is_valid(params, instance))
continue;
+ cdev = instance->cdev;
if (cdev->ops->state2power(cdev, instance->upper, &min_power))
continue;
@@ -212,10 +239,10 @@ static u32 pid_controller(struct thermal_zone_device *tz,
int control_temp,
u32 max_allocatable_power)
{
+ struct power_allocator_params *params = tz->governor_data;
s64 p, i, d, power_range;
s32 err, max_power_frac;
u32 sustainable_power;
- struct power_allocator_params *params = tz->governor_data;
max_power_frac = int_to_frac(max_allocatable_power);
@@ -303,15 +330,10 @@ power_actor_set_power(struct thermal_cooling_device *cdev,
/**
* divvy_up_power() - divvy the allocated power between the actors
- * @req_power: each actor's requested power
- * @max_power: each actor's maximum available power
- * @num_actors: size of the @req_power, @max_power and @granted_power's array
- * @total_req_power: sum of @req_power
+ * @power: buffer for all power actors internal power information
+ * @num_actors: number of power actors in this thermal zone
+ * @total_req_power: sum of all weighted requested power for all actors
* @power_range: total allocated power
- * @granted_power: output array: each actor's granted power
- * @extra_actor_power: an appropriately sized array to be used in the
- * function as temporary storage of the extra power given
- * to the actors
*
* This function divides the total allocated power (@power_range)
* fairly between the actors. It first tries to give each actor a
@@ -324,15 +346,12 @@ power_actor_set_power(struct thermal_cooling_device *cdev,
* If any actor received more than their maximum power, then that
* surplus is re-divvied among the actors based on how far they are
* from their respective maximums.
- *
- * Granted power for each actor is written to @granted_power, which
- * should've been allocated by the calling function.
*/
-static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
- u32 total_req_power, u32 power_range,
- u32 *granted_power, u32 *extra_actor_power)
+static void divvy_up_power(struct power_actor *power, int num_actors,
+ u32 total_req_power, u32 power_range)
{
- u32 extra_power, capped_extra_power;
+ u32 capped_extra_power = 0;
+ u32 extra_power = 0;
int i;
/*
@@ -341,24 +360,23 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
if (!total_req_power)
total_req_power = 1;
- capped_extra_power = 0;
- extra_power = 0;
for (i = 0; i < num_actors; i++) {
- u64 req_range = (u64)req_power[i] * power_range;
+ struct power_actor *pa = &power[i];
+ u64 req_range = (u64)pa->req_power * power_range;
- granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
- total_req_power);
+ pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
+ total_req_power);
- if (granted_power[i] > max_power[i]) {
- extra_power += granted_power[i] - max_power[i];
- granted_power[i] = max_power[i];
+ if (pa->granted_power > pa->max_power) {
+ extra_power += pa->granted_power - pa->max_power;
+ pa->granted_power = pa->max_power;
}
- extra_actor_power[i] = max_power[i] - granted_power[i];
- capped_extra_power += extra_actor_power[i];
+ pa->extra_actor_power = pa->max_power - pa->granted_power;
+ capped_extra_power += pa->extra_actor_power;
}
- if (!extra_power)
+ if (!extra_power || !capped_extra_power)
return;
/*
@@ -366,127 +384,95 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
* how far they are from the max
*/
extra_power = min(extra_power, capped_extra_power);
- if (capped_extra_power > 0)
- for (i = 0; i < num_actors; i++) {
- u64 extra_range = (u64)extra_actor_power[i] * extra_power;
- granted_power[i] += DIV_ROUND_CLOSEST_ULL(extra_range,
- capped_extra_power);
- }
+
+ for (i = 0; i < num_actors; i++) {
+ struct power_actor *pa = &power[i];
+ u64 extra_range = pa->extra_actor_power;
+
+ extra_range *= extra_power;
+ pa->granted_power += DIV_ROUND_CLOSEST_ULL(extra_range,
+ capped_extra_power);
+ }
}
-static int allocate_power(struct thermal_zone_device *tz,
- int control_temp)
+static int allocate_power(struct thermal_zone_device *tz, int control_temp)
{
- struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
- const struct thermal_trip *trip_max_desired_temperature =
- params->trip_max_desired_temperature;
- u32 *req_power, *max_power, *granted_power, *extra_actor_power;
- u32 *weighted_req_power;
- u32 total_req_power, max_allocatable_power, total_weighted_req_power;
- u32 total_granted_power, power_range;
- int i, num_actors, total_weight, ret = 0;
-
- num_actors = 0;
- total_weight = 0;
- list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if ((instance->trip == trip_max_desired_temperature) &&
- cdev_is_power_actor(instance->cdev)) {
- num_actors++;
- total_weight += instance->weight;
- }
- }
+ unsigned int num_actors = params->num_actors;
+ struct power_actor *power = params->power;
+ struct thermal_cooling_device *cdev;
+ struct thermal_instance *instance;
+ u32 total_weighted_req_power = 0;
+ u32 max_allocatable_power = 0;
+ u32 total_granted_power = 0;
+ u32 total_req_power = 0;
+ u32 power_range, weight;
+ int i = 0, ret;
if (!num_actors)
return -ENODEV;
- /*
- * We need to allocate five arrays of the same size:
- * req_power, max_power, granted_power, extra_actor_power and
- * weighted_req_power. They are going to be needed until this
- * function returns. Allocate them all in one go to simplify
- * the allocation and deallocation logic.
- */
- BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
- BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
- BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
- BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
- req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
- if (!req_power)
- return -ENOMEM;
-
- max_power = &req_power[num_actors];
- granted_power = &req_power[2 * num_actors];
- extra_actor_power = &req_power[3 * num_actors];
- weighted_req_power = &req_power[4 * num_actors];
-
- i = 0;
- total_weighted_req_power = 0;
- total_req_power = 0;
- max_allocatable_power = 0;
+ /* Clean all buffers for new power estimations */
+ memset(power, 0, params->buffer_size);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- int weight;
- struct thermal_cooling_device *cdev = instance->cdev;
+ struct power_actor *pa = &power[i];
- if (instance->trip != trip_max_desired_temperature)
+ if (!power_actor_is_valid(params, instance))
continue;
- if (!cdev_is_power_actor(cdev))
- continue;
+ cdev = instance->cdev;
- if (cdev->ops->get_requested_power(cdev, &req_power[i]))
+ ret = cdev->ops->get_requested_power(cdev, &pa->req_power);
+ if (ret)
continue;
- if (!total_weight)
+ if (!params->total_weight)
weight = 1 << FRAC_BITS;
else
weight = instance->weight;
- weighted_req_power[i] = frac_to_int(weight * req_power[i]);
+ pa->weighted_req_power = frac_to_int(weight * pa->req_power);
- if (cdev->ops->state2power(cdev, instance->lower,
- &max_power[i]))
+ ret = cdev->ops->state2power(cdev, instance->lower,
+ &pa->max_power);
+ if (ret)
continue;
- total_req_power += req_power[i];
- max_allocatable_power += max_power[i];
- total_weighted_req_power += weighted_req_power[i];
+ total_req_power += pa->req_power;
+ max_allocatable_power += pa->max_power;
+ total_weighted_req_power += pa->weighted_req_power;
i++;
}
power_range = pid_controller(tz, control_temp, max_allocatable_power);
- divvy_up_power(weighted_req_power, max_power, num_actors,
- total_weighted_req_power, power_range, granted_power,
- extra_actor_power);
+ divvy_up_power(power, num_actors, total_weighted_req_power,
+ power_range);
- total_granted_power = 0;
i = 0;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip_max_desired_temperature)
- continue;
+ struct power_actor *pa = &power[i];
- if (!cdev_is_power_actor(instance->cdev))
+ if (!power_actor_is_valid(params, instance))
continue;
power_actor_set_power(instance->cdev, instance,
- granted_power[i]);
- total_granted_power += granted_power[i];
+ pa->granted_power);
+ total_granted_power += pa->granted_power;
+ trace_thermal_power_actor(tz, i, pa->req_power,
+ pa->granted_power);
i++;
}
- trace_thermal_power_allocator(tz, req_power, total_req_power,
- granted_power, total_granted_power,
+ trace_thermal_power_allocator(tz, total_req_power, total_granted_power,
num_actors, power_range,
max_allocatable_power, tz->temperature,
control_temp - tz->temperature);
- kfree(req_power);
-
- return ret;
+ return 0;
}
/**
@@ -531,13 +517,13 @@ static void get_governor_trips(struct thermal_zone_device *tz,
if (last_passive) {
params->trip_switch_on = first_passive;
- params->trip_max_desired_temperature = last_passive;
+ params->trip_max = last_passive;
} else if (first_passive) {
params->trip_switch_on = NULL;
- params->trip_max_desired_temperature = first_passive;
+ params->trip_max = first_passive;
} else {
params->trip_switch_on = NULL;
- params->trip_max_desired_temperature = last_active;
+ params->trip_max = last_active;
}
}
@@ -549,19 +535,19 @@ static void reset_pid_controller(struct power_allocator_params *params)
static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
{
- struct thermal_instance *instance;
struct power_allocator_params *params = tz->governor_data;
+ struct thermal_cooling_device *cdev;
+ struct thermal_instance *instance;
u32 req_power;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- struct thermal_cooling_device *cdev = instance->cdev;
-
- if (instance->trip != params->trip_max_desired_temperature ||
- (!cdev_is_power_actor(instance->cdev)))
+ if (!power_actor_is_valid(params, instance))
continue;
+ cdev = instance->cdev;
+
instance->target = 0;
- mutex_lock(&instance->cdev->lock);
+ mutex_lock(&cdev->lock);
/*
* Call for updating the cooling devices local stats and avoid
* periods of dozen of seconds when those have not been
@@ -570,9 +556,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
cdev->ops->get_requested_power(cdev, &req_power);
if (update)
- __thermal_cdev_update(instance->cdev);
+ __thermal_cdev_update(cdev);
- mutex_unlock(&instance->cdev->lock);
+ mutex_unlock(&cdev->lock);
}
}
@@ -580,30 +566,99 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
* check_power_actors() - Check all cooling devices and warn when they are
* not power actors
* @tz: thermal zone to operate on
+ * @params: power allocator private data
*
* Check all cooling devices in the @tz and warn every time they are missing
* power actor API. The warning should help to investigate the issue, which
* could be e.g. lack of Energy Model for a given device.
*
- * Return: 0 on success, -EINVAL if any cooling device does not implement
- * the power actor API.
+ * If all of the cooling devices currently attached to @tz implement the power
+ * actor API, return the number of them (which may be 0, because some cooling
+ * devices may be attached later). Otherwise, return -EINVAL.
*/
-static int check_power_actors(struct thermal_zone_device *tz)
+static int check_power_actors(struct thermal_zone_device *tz,
+ struct power_allocator_params *params)
{
struct thermal_instance *instance;
int ret = 0;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != params->trip_max)
+ continue;
+
if (!cdev_is_power_actor(instance->cdev)) {
dev_warn(&tz->device, "power_allocator: %s is not a power actor\n",
instance->cdev->type);
- ret = -EINVAL;
+ return -EINVAL;
}
+ ret++;
}
return ret;
}
+static int allocate_actors_buffer(struct power_allocator_params *params,
+ int num_actors)
+{
+ int ret;
+
+ kfree(params->power);
+
+ /* There might be no cooling devices yet. */
+ if (!num_actors) {
+ ret = 0;
+ goto clean_state;
+ }
+
+ params->power = kcalloc(num_actors, sizeof(struct power_actor),
+ GFP_KERNEL);
+ if (!params->power) {
+ ret = -ENOMEM;
+ goto clean_state;
+ }
+
+ params->num_actors = num_actors;
+ params->buffer_size = num_actors * sizeof(struct power_actor);
+
+ return 0;
+
+clean_state:
+ params->num_actors = 0;
+ params->buffer_size = 0;
+ params->power = NULL;
+ return ret;
+}
+
+static void power_allocator_update_tz(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason)
+{
+ struct power_allocator_params *params = tz->governor_data;
+ struct thermal_instance *instance;
+ int num_actors = 0;
+
+ switch (reason) {
+ case THERMAL_TZ_BIND_CDEV:
+ case THERMAL_TZ_UNBIND_CDEV:
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+ if (power_actor_is_valid(params, instance))
+ num_actors++;
+
+ if (num_actors == params->num_actors)
+ return;
+
+ allocate_actors_buffer(params, num_actors);
+ break;
+ case THERMAL_INSTANCE_WEIGHT_CHANGED:
+ params->total_weight = 0;
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+ if (power_actor_is_valid(params, instance))
+ params->total_weight += instance->weight;
+ break;
+ default:
+ break;
+ }
+}
+
/**
* power_allocator_bind() - bind the power_allocator governor to a thermal zone
* @tz: thermal zone to bind it to
@@ -616,17 +671,29 @@ static int check_power_actors(struct thermal_zone_device *tz)
*/
static int power_allocator_bind(struct thermal_zone_device *tz)
{
- int ret;
struct power_allocator_params *params;
-
- ret = check_power_actors(tz);
- if (ret)
- return ret;
+ int ret;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
+ get_governor_trips(tz, params);
+
+ ret = check_power_actors(tz, params);
+ if (ret < 0) {
+ dev_warn(&tz->device, "power_allocator: binding failed\n");
+ kfree(params);
+ return ret;
+ }
+
+ ret = allocate_actors_buffer(params, ret);
+ if (ret) {
+ dev_warn(&tz->device, "power_allocator: allocation failed\n");
+ kfree(params);
+ return ret;
+ }
+
if (!tz->tzp) {
tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL);
if (!tz->tzp) {
@@ -640,14 +707,10 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
if (!tz->tzp->sustainable_power)
dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
- get_governor_trips(tz, params);
-
- if (params->trip_max_desired_temperature) {
- int temp = params->trip_max_desired_temperature->temperature;
-
+ if (params->trip_max)
estimate_pid_constants(tz, tz->tzp->sustainable_power,
- params->trip_switch_on, temp);
- }
+ params->trip_switch_on,
+ params->trip_max->temperature);
reset_pid_controller(params);
@@ -656,6 +719,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return 0;
free_params:
+ kfree(params->power);
kfree(params);
return ret;
@@ -672,6 +736,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
tz->tzp = NULL;
}
+ kfree(params->power);
kfree(tz->governor_data);
tz->governor_data = NULL;
}
@@ -688,7 +753,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz,
* We get called for every trip point but we only need to do
* our calculations once
*/
- if (trip != params->trip_max_desired_temperature)
+ if (trip != params->trip_max)
return 0;
trip = params->trip_switch_on;
@@ -702,7 +767,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz,
tz->passive = 1;
- return allocate_power(tz, params->trip_max_desired_temperature->temperature);
+ return allocate_power(tz, params->trip_max->temperature);
}
static struct thermal_governor thermal_gov_power_allocator = {
@@ -710,5 +775,6 @@ static struct thermal_governor thermal_gov_power_allocator = {
.bind_to_tz = power_allocator_bind,
.unbind_from_tz = power_allocator_unbind,
.throttle = power_allocator_throttle,
+ .update_tz = power_allocator_update_tz,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_power_allocator);
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index ecd7e07ee..b43953b55 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -85,7 +85,7 @@ config INTEL_BXT_PMIC_THERMAL
config INTEL_PCH_THERMAL
tristate "Intel PCH Thermal Reporting Driver"
depends on X86 && PCI
- select THERMAL_ACPI if ACPI
+ select ACPI_THERMAL_LIB if ACPI
help
Enable this to support thermal reporting on certain intel PCHs.
Thermal reporting device will provide temperature reading,
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 300ea53e9..e76b13e44 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -9,7 +9,7 @@ config INT340X_THERMAL
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
- select THERMAL_ACPI
+ select ACPI_THERMAL_LIB
select INTEL_SOC_DTS_IOSF_CORE
select INTEL_TCC
select PROC_THERMAL_MMIO_RAPL if POWERCAP
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index a03b67579..3e4bfe817 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -225,7 +225,8 @@ EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
static int int340x_update_one_trip(struct thermal_trip *trip, void *arg)
{
- struct acpi_device *zone_adev = arg;
+ struct int34x_thermal_zone *int34x_zone = arg;
+ struct acpi_device *zone_adev = int34x_zone->adev;
int temp, err;
switch (trip->type) {
@@ -249,14 +250,15 @@ static int int340x_update_one_trip(struct thermal_trip *trip, void *arg)
if (err)
temp = THERMAL_TEMP_INVALID;
- trip->temperature = temp;
+ thermal_zone_set_trip_temp(int34x_zone->zone, trip, temp);
+
return 0;
}
void int340x_thermal_update_trips(struct int34x_thermal_zone *int34x_zone)
{
thermal_zone_for_each_trip(int34x_zone->zone, int340x_update_one_trip,
- int34x_zone->adev);
+ int34x_zone);
}
EXPORT_SYMBOL_GPL(int340x_thermal_update_trips);
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index 1c5a429b2..3b04c6ec4 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -439,13 +439,12 @@ void intel_hfi_online(unsigned int cpu)
/*
* Now check if the HFI instance of the package/die of @cpu has been
* initialized (by checking its header). In such case, all we have to
- * do is to add @cpu to this instance's cpumask.
+ * do is to add @cpu to this instance's cpumask and enable the instance
+ * if needed.
*/
mutex_lock(&hfi_instance_lock);
- if (hfi_instance->hdr) {
- cpumask_set_cpu(cpu, hfi_instance->cpus);
- goto unlock;
- }
+ if (hfi_instance->hdr)
+ goto enable;
/*
* Hardware is programmed with the physical address of the first page
@@ -475,10 +474,14 @@ void intel_hfi_online(unsigned int cpu)
raw_spin_lock_init(&hfi_instance->table_lock);
raw_spin_lock_init(&hfi_instance->event_lock);
+enable:
cpumask_set_cpu(cpu, hfi_instance->cpus);
- hfi_set_hw_table(hfi_instance);
- hfi_enable();
+ /* Enable this HFI instance if this is its first online CPU. */
+ if (cpumask_weight(hfi_instance->cpus) == 1) {
+ hfi_set_hw_table(hfi_instance);
+ hfi_enable();
+ }
unlock:
mutex_unlock(&hfi_instance_lock);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 5ac5cb60b..bc6eb0dd6 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -49,7 +49,6 @@
*/
#define DEFAULT_DURATION_JIFFIES (6)
-static unsigned int target_mwait;
static struct dentry *debug_dir;
static bool poll_pkg_cstate_enable;
@@ -312,34 +311,6 @@ MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
"\twindow size results in slower response time but more smooth\n"
"\tclamping results. default to 2.");
-static void find_target_mwait(void)
-{
- unsigned int eax, ebx, ecx, edx;
- unsigned int highest_cstate = 0;
- unsigned int highest_subcstate = 0;
- int i;
-
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
-
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
-
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
- return;
-
- edx >>= MWAIT_SUBSTATE_SIZE;
- for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
- if (edx & MWAIT_SUBSTATE_MASK) {
- highest_cstate = i;
- highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
- }
- }
- target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
- (highest_subcstate - 1);
-
-}
-
struct pkg_cstate_info {
bool skip;
int msr_index;
@@ -759,9 +730,6 @@ static int __init powerclamp_probe(void)
return -ENODEV;
}
- /* find the deepest mwait value */
- find_target_mwait();
-
return 0;
}
diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
index 99ca0c7bc..0f475fe46 100644
--- a/drivers/thermal/loongson2_thermal.c
+++ b/drivers/thermal/loongson2_thermal.c
@@ -8,9 +8,10 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/thermal.h>
#include <linux/units.h>
#include "thermal_hwmon.h"
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 123ec81e1..6482513bf 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -138,12 +138,10 @@ enum soc_type {
/**
* struct exynos_tmu_data : A structure to hold the private data of the TMU
* driver
- * @id: identifier of the one instance of the TMU controller.
* @base: base address of the single instance of the TMU controller.
* @base_second: base address of the common registers of the TMU controller.
* @irq: irq number of the TMU controller.
* @soc: id of the SOC type.
- * @irq_work: pointer to the irq work structure.
* @lock: lock to implement synchronization.
* @clk: pointer to the clock structure.
* @clk_sec: pointer to the clock structure for accessing the base_second.
@@ -159,13 +157,13 @@ enum soc_type {
* @reference_voltage: reference voltage of amplifier
* in the positive-TC generator block
* 0 < reference_voltage <= 31
- * @regulator: pointer to the TMU regulator structure.
- * @reg_conf: pointer to structure to register with core thermal.
* @tzd: pointer to thermal_zone_device structure
- * @ntrip: number of supported trip points.
* @enabled: current status of TMU device
- * @tmu_set_trip_temp: SoC specific method to set trip (rising threshold)
- * @tmu_set_trip_hyst: SoC specific to set hysteresis (falling threshold)
+ * @tmu_set_low_temp: SoC specific method to set trip (falling threshold)
+ * @tmu_set_high_temp: SoC specific method to set trip (rising threshold)
+ * @tmu_set_crit_temp: SoC specific method to set critical temperature
+ * @tmu_disable_low: SoC specific method to disable an interrupt (falling threshold)
+ * @tmu_disable_high: SoC specific method to disable an interrupt (rising threshold)
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -173,12 +171,10 @@ enum soc_type {
* @tmu_clear_irqs: SoC specific TMU interrupts clearing method
*/
struct exynos_tmu_data {
- int id;
void __iomem *base;
void __iomem *base_second;
int irq;
enum soc_type soc;
- struct work_struct irq_work;
struct mutex lock;
struct clk *clk, *clk_sec, *sclk;
u32 cal_type;
@@ -188,15 +184,14 @@ struct exynos_tmu_data {
u16 temp_error1, temp_error2;
u8 gain;
u8 reference_voltage;
- struct regulator *regulator;
struct thermal_zone_device *tzd;
- unsigned int ntrip;
bool enabled;
- void (*tmu_set_trip_temp)(struct exynos_tmu_data *data, int trip,
- u8 temp);
- void (*tmu_set_trip_hyst)(struct exynos_tmu_data *data, int trip,
- u8 temp, u8 hyst);
+ void (*tmu_set_low_temp)(struct exynos_tmu_data *data, u8 temp);
+ void (*tmu_set_high_temp)(struct exynos_tmu_data *data, u8 temp);
+ void (*tmu_set_crit_temp)(struct exynos_tmu_data *data, u8 temp);
+ void (*tmu_disable_low)(struct exynos_tmu_data *data);
+ void (*tmu_disable_high)(struct exynos_tmu_data *data);
void (*tmu_initialize)(struct platform_device *pdev);
void (*tmu_control)(struct platform_device *pdev, bool on);
int (*tmu_read)(struct exynos_tmu_data *data);
@@ -258,25 +253,8 @@ static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
static int exynos_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tzd = data->tzd;
- int num_trips = thermal_zone_get_num_trips(tzd);
unsigned int status;
- int ret = 0, temp;
-
- ret = thermal_zone_get_crit_temp(tzd, &temp);
- if (ret && data->soc != SOC_ARCH_EXYNOS5433) { /* FIXME */
- dev_err(&pdev->dev,
- "No CRITICAL trip point defined in device tree!\n");
- goto out;
- }
-
- if (num_trips > data->ntrip) {
- dev_info(&pdev->dev,
- "More trip points than supported by this TMU.\n");
- dev_info(&pdev->dev,
- "%d trip points should be configured in polling mode.\n",
- num_trips - data->ntrip);
- }
+ int ret = 0;
mutex_lock(&data->lock);
clk_enable(data->clk);
@@ -287,34 +265,44 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
if (!status) {
ret = -EBUSY;
} else {
- int i, ntrips =
- min_t(int, num_trips, data->ntrip);
-
data->tmu_initialize(pdev);
+ data->tmu_clear_irqs(data);
+ }
- /* Write temperature code for rising and falling threshold */
- for (i = 0; i < ntrips; i++) {
+ if (!IS_ERR(data->clk_sec))
+ clk_disable(data->clk_sec);
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
- struct thermal_trip trip;
+ return ret;
+}
- ret = thermal_zone_get_trip(tzd, i, &trip);
- if (ret)
- goto err;
+static int exynos_thermal_zone_configure(struct platform_device *pdev)
+{
+ struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+ struct thermal_zone_device *tzd = data->tzd;
+ int ret, temp;
- data->tmu_set_trip_temp(data, i, trip.temperature / MCELSIUS);
- data->tmu_set_trip_hyst(data, i, trip.temperature / MCELSIUS,
- trip.hysteresis / MCELSIUS);
- }
+ ret = thermal_zone_get_crit_temp(tzd, &temp);
+ if (ret) {
+ /* FIXME: Remove this special case */
+ if (data->soc == SOC_ARCH_EXYNOS5433)
+ return 0;
- data->tmu_clear_irqs(data);
+ dev_err(&pdev->dev,
+ "No CRITICAL trip point defined in device tree!\n");
+ return ret;
}
-err:
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ data->tmu_set_crit_temp(data, temp / MCELSIUS);
+
clk_disable(data->clk);
mutex_unlock(&data->lock);
- if (!IS_ERR(data->clk_sec))
- clk_disable(data->clk_sec);
-out:
- return ret;
+
+ return 0;
}
static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
@@ -347,30 +335,74 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
mutex_unlock(&data->lock);
}
-static void exynos4210_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip_id, u8 temp)
+static void exynos_tmu_update_bit(struct exynos_tmu_data *data, int reg_off,
+ int bit_off, bool enable)
+{
+ u32 interrupt_en;
+
+ interrupt_en = readl(data->base + reg_off);
+ if (enable)
+ interrupt_en |= BIT(bit_off);
+ else
+ interrupt_en &= ~BIT(bit_off);
+ writel(interrupt_en, data->base + reg_off);
+}
+
+static void exynos_tmu_update_temp(struct exynos_tmu_data *data, int reg_off,
+ int bit_off, u8 temp)
{
- struct thermal_trip trip;
- u8 ref, th_code;
+ u16 tmu_temp_mask;
+ u32 th;
- if (thermal_zone_get_trip(data->tzd, 0, &trip))
- return;
+ tmu_temp_mask =
+ (data->soc == SOC_ARCH_EXYNOS7) ? EXYNOS7_TMU_TEMP_MASK
+ : EXYNOS_TMU_TEMP_MASK;
- ref = trip.temperature / MCELSIUS;
+ th = readl(data->base + reg_off);
+ th &= ~(tmu_temp_mask << bit_off);
+ th |= temp_to_code(data, temp) << bit_off;
+ writel(th, data->base + reg_off);
+}
- if (trip_id == 0) {
- th_code = temp_to_code(data, ref);
- writeb(th_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
- }
+static void exynos4210_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ /*
+ * Failing thresholds are not supported on Exynos 4210.
+ * We use polling instead.
+ */
+}
+
+static void exynos4210_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ temp = temp_to_code(data, temp);
+ writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + 4);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, true);
+}
- temp -= ref;
- writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + trip_id * 4);
+static void exynos4210_tmu_disable_low(struct exynos_tmu_data *data)
+{
+ /* Again, this is handled by polling. */
}
-/* failing thresholds are not supported on Exynos4210 */
-static void exynos4210_tmu_set_trip_hyst(struct exynos_tmu_data *data,
- int trip, u8 temp, u8 hyst)
+static void exynos4210_tmu_disable_high(struct exynos_tmu_data *data)
{
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, false);
+}
+
+static void exynos4210_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ /*
+ * Hardware critical temperature handling is not supported on Exynos 4210.
+ * We still set the critical temperature threshold, but this is only to
+ * make sure it is handled as soon as possible. It is just a normal interrupt.
+ */
+
+ temp = temp_to_code(data, temp);
+ writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + 12);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_RISE0_SHIFT + 12, true);
}
static void exynos4210_tmu_initialize(struct platform_device *pdev)
@@ -378,35 +410,35 @@ static void exynos4210_tmu_initialize(struct platform_device *pdev)
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
+
+ writeb(0, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
}
-static void exynos4412_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip, u8 temp)
+static void exynos4412_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp)
{
- u32 th, con;
-
- th = readl(data->base + EXYNOS_THD_TEMP_RISE);
- th &= ~(0xff << 8 * trip);
- th |= temp_to_code(data, temp) << 8 * trip;
- writel(th, data->base + EXYNOS_THD_TEMP_RISE);
+ exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_FALL, 0, temp);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT, true);
+}
- if (trip == 3) {
- con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
- con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
- writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
- }
+static void exynos4412_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_RISE, 8, temp);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_RISE0_SHIFT + 4, true);
}
-static void exynos4412_tmu_set_trip_hyst(struct exynos_tmu_data *data,
- int trip, u8 temp, u8 hyst)
+static void exynos4412_tmu_disable_low(struct exynos_tmu_data *data)
{
- u32 th;
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT, false);
+}
- th = readl(data->base + EXYNOS_THD_TEMP_FALL);
- th &= ~(0xff << 8 * trip);
- if (hyst)
- th |= temp_to_code(data, temp - hyst) << 8 * trip;
- writel(th, data->base + EXYNOS_THD_TEMP_FALL);
+static void exynos4412_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS_THD_TEMP_RISE, 24, temp);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_CONTROL,
+ EXYNOS_TMU_THERM_TRIP_EN_SHIFT, true);
}
static void exynos4412_tmu_initialize(struct platform_device *pdev)
@@ -436,44 +468,39 @@ static void exynos4412_tmu_initialize(struct platform_device *pdev)
sanitize_temp_error(data, trim_info);
}
-static void exynos5433_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip, u8 temp)
+static void exynos5433_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp)
{
- unsigned int reg_off, j;
- u32 th;
-
- if (trip > 3) {
- reg_off = EXYNOS5433_THD_TEMP_RISE7_4;
- j = trip - 4;
- } else {
- reg_off = EXYNOS5433_THD_TEMP_RISE3_0;
- j = trip;
- }
+ exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_FALL3_0, 0, temp);
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT, true);
+}
- th = readl(data->base + reg_off);
- th &= ~(0xff << j * 8);
- th |= (temp_to_code(data, temp) << j * 8);
- writel(th, data->base + reg_off);
+static void exynos5433_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_RISE3_0, 8, temp);
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, true);
}
-static void exynos5433_tmu_set_trip_hyst(struct exynos_tmu_data *data,
- int trip, u8 temp, u8 hyst)
+static void exynos5433_tmu_disable_low(struct exynos_tmu_data *data)
{
- unsigned int reg_off, j;
- u32 th;
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT, false);
+}
- if (trip > 3) {
- reg_off = EXYNOS5433_THD_TEMP_FALL7_4;
- j = trip - 4;
- } else {
- reg_off = EXYNOS5433_THD_TEMP_FALL3_0;
- j = trip;
- }
+static void exynos5433_tmu_disable_high(struct exynos_tmu_data *data)
+{
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, false);
+}
- th = readl(data->base + reg_off);
- th &= ~(0xff << j * 8);
- th |= (temp_to_code(data, temp - hyst) << j * 8);
- writel(th, data->base + reg_off);
+static void exynos5433_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS5433_THD_TEMP_RISE7_4, 24, temp);
+ exynos_tmu_update_bit(data, EXYNOS_TMU_REG_CONTROL,
+ EXYNOS_TMU_THERM_TRIP_EN_SHIFT, true);
+ exynos_tmu_update_bit(data, EXYNOS5433_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 7, true);
}
static void exynos5433_tmu_initialize(struct platform_device *pdev)
@@ -509,34 +536,41 @@ static void exynos5433_tmu_initialize(struct platform_device *pdev)
cal_type ? 2 : 1);
}
-static void exynos7_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip, u8 temp)
+static void exynos7_tmu_set_low_temp(struct exynos_tmu_data *data, u8 temp)
{
- unsigned int reg_off, bit_off;
- u32 th;
-
- reg_off = ((7 - trip) / 2) * 4;
- bit_off = ((8 - trip) % 2);
+ exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_FALL7_6 + 12, 0, temp);
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT + 0, true);
+}
- th = readl(data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
- th &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
- th |= temp_to_code(data, temp) << (16 * bit_off);
- writel(th, data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
+static void exynos7_tmu_set_high_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_RISE7_6 + 12, 16, temp);
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, true);
}
-static void exynos7_tmu_set_trip_hyst(struct exynos_tmu_data *data,
- int trip, u8 temp, u8 hyst)
+static void exynos7_tmu_disable_low(struct exynos_tmu_data *data)
{
- unsigned int reg_off, bit_off;
- u32 th;
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS_TMU_INTEN_FALL0_SHIFT + 0, false);
+}
- reg_off = ((7 - trip) / 2) * 4;
- bit_off = ((8 - trip) % 2);
+static void exynos7_tmu_disable_high(struct exynos_tmu_data *data)
+{
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 1, false);
+}
- th = readl(data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
- th &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
- th |= temp_to_code(data, temp - hyst) << (16 * bit_off);
- writel(th, data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
+static void exynos7_tmu_set_crit_temp(struct exynos_tmu_data *data, u8 temp)
+{
+ /*
+ * Like Exynos 4210, Exynos 7 does not seem to support critical temperature
+ * handling in hardware. Again, we still set a separate interrupt for it.
+ */
+ exynos_tmu_update_temp(data, EXYNOS7_THD_TEMP_RISE7_6 + 0, 16, temp);
+ exynos_tmu_update_bit(data, EXYNOS7_TMU_REG_INTEN,
+ EXYNOS7_TMU_INTEN_RISE0_SHIFT + 7, true);
}
static void exynos7_tmu_initialize(struct platform_device *pdev)
@@ -551,95 +585,51 @@ static void exynos7_tmu_initialize(struct platform_device *pdev)
static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tz = data->tzd;
- struct thermal_trip trip;
- unsigned int con, interrupt_en = 0, i;
+ unsigned int con;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
- if (on) {
- for (i = 0; i < data->ntrip; i++) {
- if (thermal_zone_get_trip(tz, i, &trip))
- continue;
-
- interrupt_en |=
- (1 << (EXYNOS_TMU_INTEN_RISE0_SHIFT + i * 4));
- }
-
- if (data->soc != SOC_ARCH_EXYNOS4210)
- interrupt_en |=
- interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
-
- con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
- } else {
- con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
- }
+ if (on)
+ con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT);
+ else
+ con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT);
- writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tz = data->tzd;
- struct thermal_trip trip;
- unsigned int con, interrupt_en = 0, pd_det_en, i;
+ unsigned int con, pd_det_en;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
- if (on) {
- for (i = 0; i < data->ntrip; i++) {
- if (thermal_zone_get_trip(tz, i, &trip))
- continue;
-
- interrupt_en |=
- (1 << (EXYNOS7_TMU_INTEN_RISE0_SHIFT + i));
- }
-
- interrupt_en |=
- interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
-
- con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
- } else
- con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+ if (on)
+ con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT);
+ else
+ con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT);
pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
- writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
static void exynos7_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tz = data->tzd;
- struct thermal_trip trip;
- unsigned int con, interrupt_en = 0, i;
+ unsigned int con;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
if (on) {
- for (i = 0; i < data->ntrip; i++) {
- if (thermal_zone_get_trip(tz, i, &trip))
- continue;
-
- interrupt_en |=
- (1 << (EXYNOS7_TMU_INTEN_RISE0_SHIFT + i));
- }
-
- interrupt_en |=
- interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
-
- con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
- con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
+ con |= BIT(EXYNOS_TMU_CORE_EN_SHIFT);
+ con |= BIT(EXYNOS7_PD_DET_EN_SHIFT);
} else {
- con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
- con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
+ con &= ~BIT(EXYNOS_TMU_CORE_EN_SHIFT);
+ con &= ~BIT(EXYNOS7_PD_DET_EN_SHIFT);
}
- writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
@@ -766,10 +756,9 @@ static int exynos7_tmu_read(struct exynos_tmu_data *data)
EXYNOS7_TMU_TEMP_MASK;
}
-static void exynos_tmu_work(struct work_struct *work)
+static irqreturn_t exynos_tmu_threaded_irq(int irq, void *id)
{
- struct exynos_tmu_data *data = container_of(work,
- struct exynos_tmu_data, irq_work);
+ struct exynos_tmu_data *data = id;
thermal_zone_device_update(data->tzd, THERMAL_EVENT_UNSPECIFIED);
@@ -781,7 +770,8 @@ static void exynos_tmu_work(struct work_struct *work)
clk_disable(data->clk);
mutex_unlock(&data->lock);
- enable_irq(data->irq);
+
+ return IRQ_HANDLED;
}
static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
@@ -815,16 +805,6 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
writel(val_irq, data->base + tmu_intclear);
}
-static irqreturn_t exynos_tmu_irq(int irq, void *id)
-{
- struct exynos_tmu_data *data = id;
-
- disable_irq_nosync(irq);
- schedule_work(&data->irq_work);
-
- return IRQ_HANDLED;
-}
-
static const struct of_device_id exynos_tmu_match[] = {
{
.compatible = "samsung,exynos3250-tmu",
@@ -866,10 +846,6 @@ static int exynos_map_dt_data(struct platform_device *pdev)
if (!data || !pdev->dev.of_node)
return -ENODEV;
- data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
- if (data->id < 0)
- data->id = 0;
-
data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (data->irq <= 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
@@ -891,13 +867,15 @@ static int exynos_map_dt_data(struct platform_device *pdev)
switch (data->soc) {
case SOC_ARCH_EXYNOS4210:
- data->tmu_set_trip_temp = exynos4210_tmu_set_trip_temp;
- data->tmu_set_trip_hyst = exynos4210_tmu_set_trip_hyst;
+ data->tmu_set_low_temp = exynos4210_tmu_set_low_temp;
+ data->tmu_set_high_temp = exynos4210_tmu_set_high_temp;
+ data->tmu_disable_low = exynos4210_tmu_disable_low;
+ data->tmu_disable_high = exynos4210_tmu_disable_high;
+ data->tmu_set_crit_temp = exynos4210_tmu_set_crit_temp;
data->tmu_initialize = exynos4210_tmu_initialize;
data->tmu_control = exynos4210_tmu_control;
data->tmu_read = exynos4210_tmu_read;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
- data->ntrip = 4;
data->gain = 15;
data->reference_voltage = 7;
data->efuse_value = 55;
@@ -910,14 +888,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
case SOC_ARCH_EXYNOS5260:
case SOC_ARCH_EXYNOS5420:
case SOC_ARCH_EXYNOS5420_TRIMINFO:
- data->tmu_set_trip_temp = exynos4412_tmu_set_trip_temp;
- data->tmu_set_trip_hyst = exynos4412_tmu_set_trip_hyst;
+ data->tmu_set_low_temp = exynos4412_tmu_set_low_temp;
+ data->tmu_set_high_temp = exynos4412_tmu_set_high_temp;
+ data->tmu_disable_low = exynos4412_tmu_disable_low;
+ data->tmu_disable_high = exynos4210_tmu_disable_high;
+ data->tmu_set_crit_temp = exynos4412_tmu_set_crit_temp;
data->tmu_initialize = exynos4412_tmu_initialize;
data->tmu_control = exynos4210_tmu_control;
data->tmu_read = exynos4412_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
- data->ntrip = 4;
data->gain = 8;
data->reference_voltage = 16;
data->efuse_value = 55;
@@ -929,14 +909,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->max_efuse_value = 100;
break;
case SOC_ARCH_EXYNOS5433:
- data->tmu_set_trip_temp = exynos5433_tmu_set_trip_temp;
- data->tmu_set_trip_hyst = exynos5433_tmu_set_trip_hyst;
+ data->tmu_set_low_temp = exynos5433_tmu_set_low_temp;
+ data->tmu_set_high_temp = exynos5433_tmu_set_high_temp;
+ data->tmu_disable_low = exynos5433_tmu_disable_low;
+ data->tmu_disable_high = exynos5433_tmu_disable_high;
+ data->tmu_set_crit_temp = exynos5433_tmu_set_crit_temp;
data->tmu_initialize = exynos5433_tmu_initialize;
data->tmu_control = exynos5433_tmu_control;
data->tmu_read = exynos4412_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
- data->ntrip = 8;
data->gain = 8;
if (res.start == EXYNOS5433_G3D_BASE)
data->reference_voltage = 23;
@@ -947,14 +929,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->max_efuse_value = 150;
break;
case SOC_ARCH_EXYNOS7:
- data->tmu_set_trip_temp = exynos7_tmu_set_trip_temp;
- data->tmu_set_trip_hyst = exynos7_tmu_set_trip_hyst;
+ data->tmu_set_low_temp = exynos7_tmu_set_low_temp;
+ data->tmu_set_high_temp = exynos7_tmu_set_high_temp;
+ data->tmu_disable_low = exynos7_tmu_disable_low;
+ data->tmu_disable_high = exynos7_tmu_disable_high;
+ data->tmu_set_crit_temp = exynos7_tmu_set_crit_temp;
data->tmu_initialize = exynos7_tmu_initialize;
data->tmu_control = exynos7_tmu_control;
data->tmu_read = exynos7_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
- data->ntrip = 8;
data->gain = 9;
data->reference_voltage = 17;
data->efuse_value = 75;
@@ -990,9 +974,32 @@ static int exynos_map_dt_data(struct platform_device *pdev)
return 0;
}
+static int exynos_set_trips(struct thermal_zone_device *tz, int low, int high)
+{
+ struct exynos_tmu_data *data = thermal_zone_device_priv(tz);
+
+ mutex_lock(&data->lock);
+ clk_enable(data->clk);
+
+ if (low > INT_MIN)
+ data->tmu_set_low_temp(data, low / MCELSIUS);
+ else
+ data->tmu_disable_low(data);
+ if (high < INT_MAX)
+ data->tmu_set_high_temp(data, high / MCELSIUS);
+ else
+ data->tmu_disable_high(data);
+
+ clk_disable(data->clk);
+ mutex_unlock(&data->lock);
+
+ return 0;
+}
+
static const struct thermal_zone_device_ops exynos_sensor_ops = {
.get_temp = exynos_get_temp,
.set_emul_temp = exynos_tmu_set_emulation,
+ .set_trips = exynos_set_trips,
};
static int exynos_tmu_probe(struct platform_device *pdev)
@@ -1013,44 +1020,40 @@ static int exynos_tmu_probe(struct platform_device *pdev)
* TODO: Add regulator as an SOC feature, so that regulator enable
* is a compulsory call.
*/
- data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu");
- if (!IS_ERR(data->regulator)) {
- ret = regulator_enable(data->regulator);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable vtmu\n");
- return ret;
- }
- } else {
- if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
+ ret = devm_regulator_get_enable_optional(&pdev->dev, "vtmu");
+ switch (ret) {
+ case 0:
+ case -ENODEV:
+ break;
+ case -EPROBE_DEFER:
+ return -EPROBE_DEFER;
+ default:
+ dev_err(&pdev->dev, "Failed to get enabled regulator: %d\n",
+ ret);
+ return ret;
}
ret = exynos_map_dt_data(pdev);
if (ret)
- goto err_sensor;
-
- INIT_WORK(&data->irq_work, exynos_tmu_work);
+ return ret;
data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
if (IS_ERR(data->clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
- ret = PTR_ERR(data->clk);
- goto err_sensor;
+ return PTR_ERR(data->clk);
}
data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
if (IS_ERR(data->clk_sec)) {
if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
dev_err(&pdev->dev, "Failed to get triminfo clock\n");
- ret = PTR_ERR(data->clk_sec);
- goto err_sensor;
+ return PTR_ERR(data->clk_sec);
}
} else {
ret = clk_prepare(data->clk_sec);
if (ret) {
dev_err(&pdev->dev, "Failed to get clock\n");
- goto err_sensor;
+ return ret;
}
}
@@ -1080,10 +1083,12 @@ static int exynos_tmu_probe(struct platform_device *pdev)
break;
}
- /*
- * data->tzd must be registered before calling exynos_tmu_initialize(),
- * requesting irq and calling exynos_tmu_control().
- */
+ ret = exynos_tmu_initialize(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize TMU\n");
+ goto err_sclk;
+ }
+
data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data,
&exynos_sensor_ops);
if (IS_ERR(data->tzd)) {
@@ -1094,14 +1099,17 @@ static int exynos_tmu_probe(struct platform_device *pdev)
goto err_sclk;
}
- ret = exynos_tmu_initialize(pdev);
+ ret = exynos_thermal_zone_configure(pdev);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize TMU\n");
+ dev_err(&pdev->dev, "Failed to configure the thermal zone\n");
goto err_sclk;
}
- ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
- IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
+ ret = devm_request_threaded_irq(&pdev->dev, data->irq, NULL,
+ exynos_tmu_threaded_irq,
+ IRQF_TRIGGER_RISING
+ | IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&pdev->dev), data);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
goto err_sclk;
@@ -1117,10 +1125,6 @@ err_clk:
err_clk_sec:
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
-err_sensor:
- if (!IS_ERR(data->regulator))
- regulator_disable(data->regulator);
-
return ret;
}
@@ -1134,9 +1138,6 @@ static void exynos_tmu_remove(struct platform_device *pdev)
clk_unprepare(data->clk);
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
-
- if (!IS_ERR(data->regulator))
- regulator_disable(data->regulator);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index f989b55a8..6a8e386db 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -606,6 +606,18 @@ static const struct ths_thermal_chip sun50i_h6_ths = {
.calc_temp = sun8i_ths_calc_temp,
};
+static const struct ths_thermal_chip sun20i_d1_ths = {
+ .sensor_num = 1,
+ .has_bus_clk_reset = true,
+ .offset = 188552,
+ .scale = 673,
+ .temp_data_base = SUN50I_H6_THS_TEMP_DATA,
+ .calibrate = sun50i_h6_ths_calibrate,
+ .init = sun50i_h6_thermal_init,
+ .irq_ack = sun50i_h6_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths },
{ .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths },
@@ -614,6 +626,7 @@ static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun50i-a100-ths", .data = &sun50i_a100_ths },
{ .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths },
{ .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths },
+ { .compatible = "allwinner,sun20i-d1-ths", .data = &sun20i_d1_ths },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, of_ths_match);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 45f517274..dfaa63416 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -201,9 +201,6 @@ int thermal_zone_device_set_policy(struct thermal_zone_device *tz,
mutex_lock(&thermal_governor_lock);
mutex_lock(&tz->lock);
- if (!device_is_registered(&tz->device))
- goto exit;
-
gov = __find_governor(strim(policy));
if (!gov)
goto exit;
@@ -214,7 +211,7 @@ exit:
mutex_unlock(&tz->lock);
mutex_unlock(&thermal_governor_lock);
- thermal_notify_tz_gov_change(tz->id, policy);
+ thermal_notify_tz_gov_change(tz, policy);
return ret;
}
@@ -312,21 +309,43 @@ static void handle_non_critical_trips(struct thermal_zone_device *tz,
def_governor->throttle(tz, trip);
}
-void thermal_zone_device_critical(struct thermal_zone_device *tz)
+void thermal_governor_update_tz(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason)
+{
+ if (!tz->governor || !tz->governor->update_tz)
+ return;
+
+ tz->governor->update_tz(tz, reason);
+}
+
+static void thermal_zone_device_halt(struct thermal_zone_device *tz, bool shutdown)
{
/*
* poweroff_delay_ms must be a carefully profiled positive value.
* Its a must for forced_emergency_poweroff_work to be scheduled.
*/
int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS;
+ const char *msg = "Temperature too high";
- dev_emerg(&tz->device, "%s: critical temperature reached, "
- "shutting down\n", tz->type);
+ dev_emerg(&tz->device, "%s: critical temperature reached\n", tz->type);
- hw_protection_shutdown("Temperature too high", poweroff_delay_ms);
+ if (shutdown)
+ hw_protection_shutdown(msg, poweroff_delay_ms);
+ else
+ hw_protection_reboot(msg, poweroff_delay_ms);
+}
+
+void thermal_zone_device_critical(struct thermal_zone_device *tz)
+{
+ thermal_zone_device_halt(tz, true);
}
EXPORT_SYMBOL(thermal_zone_device_critical);
+void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz)
+{
+ thermal_zone_device_halt(tz, false);
+}
+
static void handle_critical_trips(struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
@@ -343,22 +362,49 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
}
static void handle_thermal_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip)
+ struct thermal_trip *trip)
{
if (trip->temperature == THERMAL_TEMP_INVALID)
return;
- if (tz->last_temperature != THERMAL_TEMP_INVALID) {
- if (tz->last_temperature < trip->temperature &&
- tz->temperature >= trip->temperature)
- thermal_notify_tz_trip_up(tz->id,
- thermal_zone_trip_id(tz, trip),
- tz->temperature);
- if (tz->last_temperature >= trip->temperature &&
- tz->temperature < trip->temperature - trip->hysteresis)
- thermal_notify_tz_trip_down(tz->id,
- thermal_zone_trip_id(tz, trip),
- tz->temperature);
+ if (tz->last_temperature == THERMAL_TEMP_INVALID) {
+ /* Initialization. */
+ trip->threshold = trip->temperature;
+ if (tz->temperature >= trip->threshold)
+ trip->threshold -= trip->hysteresis;
+ } else if (tz->last_temperature < trip->threshold) {
+ /*
+ * The trip threshold is equal to the trip temperature, unless
+ * the latter has changed in the meantime. In either case,
+ * the trip is crossed if the current zone temperature is at
+ * least equal to its temperature, but otherwise ensure that
+ * the threshold and the trip temperature will be equal.
+ */
+ if (tz->temperature >= trip->temperature) {
+ thermal_notify_tz_trip_up(tz, trip);
+ thermal_debug_tz_trip_up(tz, trip);
+ trip->threshold = trip->temperature - trip->hysteresis;
+ } else {
+ trip->threshold = trip->temperature;
+ }
+ } else {
+ /*
+ * The previous zone temperature was above or equal to the trip
+ * threshold, which would be equal to the "low temperature" of
+ * the trip (its temperature minus its hysteresis), unless the
+ * trip temperature or hysteresis had changed. In either case,
+ * the trip is crossed if the current zone temperature is below
+ * the low temperature of the trip, but otherwise ensure that
+ * the trip threshold will be equal to the low temperature of
+ * the trip.
+ */
+ if (tz->temperature < trip->temperature - trip->hysteresis) {
+ thermal_notify_tz_trip_down(tz, trip);
+ thermal_debug_tz_trip_down(tz, trip);
+ trip->threshold = trip->temperature;
+ } else {
+ trip->threshold = trip->temperature - trip->hysteresis;
+ }
}
if (trip->type == THERMAL_TRIP_CRITICAL || trip->type == THERMAL_TRIP_HOT)
@@ -386,11 +432,23 @@ static void update_temperature(struct thermal_zone_device *tz)
trace_thermal_temperature(tz);
thermal_genl_sampling_temp(tz->id, temp);
+ thermal_debug_update_temp(tz);
+}
+
+static void thermal_zone_device_check(struct work_struct *work)
+{
+ struct thermal_zone_device *tz = container_of(work, struct
+ thermal_zone_device,
+ poll_queue.work);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
}
static void thermal_zone_device_init(struct thermal_zone_device *tz)
{
struct thermal_instance *pos;
+
+ INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
+
tz->temperature = THERMAL_TEMP_INVALID;
tz->prev_low_trip = -INT_MAX;
tz->prev_high_trip = INT_MAX;
@@ -401,16 +459,11 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
void __thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event)
{
- const struct thermal_trip *trip;
+ struct thermal_trip *trip;
if (tz->suspended)
return;
- if (WARN_ONCE(!tz->ops->get_temp,
- "'%s' must not be called without 'get_temp' ops set\n",
- __func__))
- return;
-
if (!thermal_zone_device_is_enabled(tz))
return;
@@ -440,12 +493,6 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
return ret;
}
- if (!device_is_registered(&tz->device)) {
- mutex_unlock(&tz->lock);
-
- return -ENODEV;
- }
-
if (tz->ops->change_mode)
ret = tz->ops->change_mode(tz, mode);
@@ -457,9 +504,9 @@ static int thermal_zone_device_set_mode(struct thermal_zone_device *tz,
mutex_unlock(&tz->lock);
if (mode == THERMAL_DEVICE_ENABLED)
- thermal_notify_tz_enable(tz->id);
+ thermal_notify_tz_enable(tz);
else
- thermal_notify_tz_disable(tz->id);
+ thermal_notify_tz_disable(tz);
return ret;
}
@@ -483,24 +530,21 @@ int thermal_zone_device_is_enabled(struct thermal_zone_device *tz)
return tz->mode == THERMAL_DEVICE_ENABLED;
}
+static bool thermal_zone_is_present(struct thermal_zone_device *tz)
+{
+ return !list_empty(&tz->node);
+}
+
void thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event)
{
mutex_lock(&tz->lock);
- if (device_is_registered(&tz->device))
+ if (thermal_zone_is_present(tz))
__thermal_zone_device_update(tz, event);
mutex_unlock(&tz->lock);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
-static void thermal_zone_device_check(struct work_struct *work)
-{
- struct thermal_zone_device *tz = container_of(work, struct
- thermal_zone_device,
- poll_queue.work);
- thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
-}
-
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
void *data)
{
@@ -692,6 +736,8 @@ int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
list_add_tail(&dev->tz_node, &tz->thermal_instances);
list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
atomic_set(&tz->need_update, 1);
+
+ thermal_governor_update_tz(tz, THERMAL_TZ_BIND_CDEV);
}
mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
@@ -750,6 +796,9 @@ int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
list_del(&pos->tz_node);
list_del(&pos->cdev_node);
+
+ thermal_governor_update_tz(tz, THERMAL_TZ_UNBIND_CDEV);
+
mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
goto unbind;
@@ -791,12 +840,12 @@ static void thermal_release(struct device *dev)
tz = to_thermal_zone(dev);
thermal_zone_destroy_device_groups(tz);
mutex_destroy(&tz->lock);
- kfree(tz);
+ complete(&tz->removal);
} else if (!strncmp(dev_name(dev), "cooling_device",
sizeof("cooling_device") - 1)) {
cdev = to_cooling_device(dev);
thermal_cooling_device_destroy_sysfs(cdev);
- kfree(cdev->type);
+ kfree_const(cdev->type);
ida_free(&thermal_cdev_ida, cdev->id);
kfree(cdev);
}
@@ -868,7 +917,7 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->id = ret;
id = ret;
- cdev->type = kstrdup(type ? type : "", GFP_KERNEL);
+ cdev->type = kstrdup_const(type ? type : "", GFP_KERNEL);
if (!cdev->type) {
ret = -ENOMEM;
goto out_ida_remove;
@@ -914,12 +963,14 @@ __thermal_cooling_device_register(struct device_node *np,
mutex_unlock(&thermal_list_lock);
+ thermal_debug_cdev_add(cdev);
+
return cdev;
out_cooling_dev:
thermal_cooling_device_destroy_sysfs(cdev);
out_cdev_type:
- kfree(cdev->type);
+ kfree_const(cdev->type);
out_ida_remove:
ida_free(&thermal_cdev_ida, id);
out_kfree_cdev:
@@ -1120,6 +1171,8 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
if (!cdev)
return;
+ thermal_debug_cdev_remove(cdev);
+
mutex_lock(&thermal_list_lock);
if (!thermal_cooling_device_present(cdev)) {
@@ -1258,7 +1311,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (!ops) {
+ if (!ops || !ops->get_temp) {
pr_err("Thermal zone device ops not defined\n");
return ERR_PTR(-EINVAL);
}
@@ -1282,8 +1335,10 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
}
INIT_LIST_HEAD(&tz->thermal_instances);
+ INIT_LIST_HEAD(&tz->node);
ida_init(&tz->ida);
mutex_init(&tz->lock);
+ init_completion(&tz->removal);
id = ida_alloc(&thermal_tz_ida, GFP_KERNEL);
if (id < 0) {
result = id;
@@ -1346,20 +1401,22 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
}
mutex_lock(&thermal_list_lock);
+ mutex_lock(&tz->lock);
list_add_tail(&tz->node, &thermal_tz_list);
+ mutex_unlock(&tz->lock);
mutex_unlock(&thermal_list_lock);
/* Bind cooling devices for this zone */
bind_tz(tz);
- INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check);
-
thermal_zone_device_init(tz);
/* Update the new thermal zone and mark it as already updated. */
if (atomic_cmpxchg(&tz->need_update, 1, 0))
thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
- thermal_notify_tz_create(tz->id, tz->type);
+ thermal_notify_tz_create(tz);
+
+ thermal_debug_tz_add(tz);
return tz;
@@ -1418,14 +1475,13 @@ EXPORT_SYMBOL_GPL(thermal_zone_device);
*/
void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{
- int tz_id;
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
if (!tz)
return;
- tz_id = tz->id;
+ thermal_debug_tz_remove(tz);
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node)
@@ -1436,7 +1492,10 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
mutex_unlock(&thermal_list_lock);
return;
}
+
+ mutex_lock(&tz->lock);
list_del(&tz->node);
+ mutex_unlock(&tz->lock);
/* Unbind all cdevs associated with 'this' thermal zone */
list_for_each_entry(cdev, &thermal_cdev_list, node)
@@ -1453,15 +1512,16 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
ida_free(&thermal_tz_ida, tz->id);
ida_destroy(&tz->ida);
- mutex_lock(&tz->lock);
device_del(&tz->device);
- mutex_unlock(&tz->lock);
kfree(tz->tzp);
put_device(&tz->device);
- thermal_notify_tz_delete(tz_id);
+ thermal_notify_tz_delete(tz);
+
+ wait_for_completion(&tz->removal);
+ kfree(tz);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
@@ -1503,6 +1563,22 @@ exit:
}
EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
+static void thermal_zone_device_resume(struct work_struct *work)
+{
+ struct thermal_zone_device *tz;
+
+ tz = container_of(work, struct thermal_zone_device, poll_queue.work);
+
+ mutex_lock(&tz->lock);
+
+ tz->suspended = false;
+
+ thermal_zone_device_init(tz);
+ __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+
+ mutex_unlock(&tz->lock);
+}
+
static int thermal_pm_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
@@ -1532,10 +1608,18 @@ static int thermal_pm_notify(struct notifier_block *nb,
list_for_each_entry(tz, &thermal_tz_list, node) {
mutex_lock(&tz->lock);
- tz->suspended = false;
+ cancel_delayed_work(&tz->poll_queue);
- thermal_zone_device_init(tz);
- __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ /*
+ * Replace the work function with the resume one, which
+ * will restore the original work function and schedule
+ * the polling work if needed.
+ */
+ INIT_DELAYED_WORK(&tz->poll_queue,
+ thermal_zone_device_resume);
+ /* Queue up the work without a delay. */
+ mod_delayed_work(system_freezable_power_efficient_wq,
+ &tz->poll_queue, 0);
mutex_unlock(&tz->lock);
}
@@ -1556,6 +1640,8 @@ static int __init thermal_init(void)
{
int result;
+ thermal_debug_init();
+
result = thermal_netlink_init();
if (result)
goto error;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 0a3b3ec51..e9c099ecd 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -13,6 +13,7 @@
#include <linux/thermal.h>
#include "thermal_netlink.h"
+#include "thermal_debugfs.h"
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
@@ -114,16 +115,19 @@ int thermal_zone_device_set_policy(struct thermal_zone_device *, char *);
int thermal_build_list_of_policies(char *buf);
void __thermal_zone_device_update(struct thermal_zone_device *tz,
enum thermal_notify_event event);
+void thermal_zone_device_critical_reboot(struct thermal_zone_device *tz);
+void thermal_governor_update_tz(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason);
/* Helpers */
#define for_each_trip(__tz, __trip) \
for (__trip = __tz->trips; __trip - __tz->trips < __tz->num_trips; __trip++)
void __thermal_zone_set_trips(struct thermal_zone_device *tz);
-int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip);
-int thermal_zone_trip_id(struct thermal_zone_device *tz,
+int thermal_zone_trip_id(const struct thermal_zone_device *tz,
const struct thermal_trip *trip);
+void thermal_zone_trip_updated(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
/* sysfs I/F */
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
new file mode 100644
index 000000000..d78d54ae2
--- /dev/null
+++ b/drivers/thermal/thermal_debugfs.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2023 Linaro Limited
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ * Thermal subsystem debug support
+ */
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/mutex.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+static struct dentry *d_root;
+static struct dentry *d_cdev;
+static struct dentry *d_tz;
+
+/*
+ * Length of the string containing the thermal zone id or the cooling
+ * device id, including the ending nul character. We can reasonably
+ * assume there won't be more than 256 thermal zones as the maximum
+ * observed today is around 32.
+ */
+#define IDSLENGTH 4
+
+/*
+ * The cooling device transition list is stored in a hash table where
+ * the size is CDEVSTATS_HASH_SIZE. The majority of cooling devices
+ * have dozen of states but some can have much more, so a hash table
+ * is more adequate in this case, because the cost of browsing the entire
+ * list when storing the transitions may not be negligible.
+ */
+#define CDEVSTATS_HASH_SIZE 16
+
+/**
+ * struct cdev_debugfs - per cooling device statistics structure
+ * A cooling device can have a high number of states. Showing the
+ * transitions on a matrix based representation can be overkill given
+ * most of the transitions won't happen and we end up with a matrix
+ * filled with zero. Instead, we show the transitions which actually
+ * happened.
+ *
+ * Every transition updates the current_state and the timestamp. The
+ * transitions and the durations are stored in lists.
+ *
+ * @total: the number of transitions for this cooling device
+ * @current_state: the current cooling device state
+ * @timestamp: the state change timestamp
+ * @transitions: an array of lists containing the state transitions
+ * @durations: an array of lists containing the residencies of each state
+ */
+struct cdev_debugfs {
+ u32 total;
+ int current_state;
+ ktime_t timestamp;
+ struct list_head transitions[CDEVSTATS_HASH_SIZE];
+ struct list_head durations[CDEVSTATS_HASH_SIZE];
+};
+
+/**
+ * struct cdev_record - Common structure for cooling device entry
+ *
+ * The following common structure allows to store the information
+ * related to the transitions and to the state residencies. They are
+ * identified with a id which is associated to a value. It is used as
+ * nodes for the "transitions" and "durations" above.
+ *
+ * @node: node to insert the structure in a list
+ * @id: identifier of the value which can be a state or a transition
+ * @residency: a ktime_t representing a state residency duration
+ * @count: a number of occurrences
+ */
+struct cdev_record {
+ struct list_head node;
+ int id;
+ union {
+ ktime_t residency;
+ u64 count;
+ };
+};
+
+/**
+ * struct trip_stats - Thermal trip statistics
+ *
+ * The trip_stats structure has the relevant information to show the
+ * statistics related to temperature going above a trip point.
+ *
+ * @timestamp: the trip crossing timestamp
+ * @duration: total time when the zone temperature was above the trip point
+ * @count: the number of times the zone temperature was above the trip point
+ * @max: maximum recorded temperature above the trip point
+ * @min: minimum recorded temperature above the trip point
+ * @avg: average temperature above the trip point
+ */
+struct trip_stats {
+ ktime_t timestamp;
+ ktime_t duration;
+ int count;
+ int max;
+ int min;
+ int avg;
+};
+
+/**
+ * struct tz_episode - A mitigation episode information
+ *
+ * The tz_episode structure describes a mitigation episode. A
+ * mitigation episode begins the trip point with the lower temperature
+ * is crossed the way up and ends when it is crossed the way
+ * down. During this episode we can have multiple trip points crossed
+ * the way up and down if there are multiple trip described in the
+ * firmware after the lowest temperature trip point.
+ *
+ * @timestamp: first trip point crossed the way up
+ * @duration: total duration of the mitigation episode
+ * @node: a list element to be added to the list of tz events
+ * @trip_stats: per trip point statistics, flexible array
+ */
+struct tz_episode {
+ ktime_t timestamp;
+ ktime_t duration;
+ struct list_head node;
+ struct trip_stats trip_stats[];
+};
+
+/**
+ * struct tz_debugfs - Store all mitigation episodes for a thermal zone
+ *
+ * The tz_debugfs structure contains the list of the mitigation
+ * episodes and has to track which trip point has been crossed in
+ * order to handle correctly nested trip point mitigation episodes.
+ *
+ * We keep the history of the trip point crossed in an array and as we
+ * can go back and forth inside this history, eg. trip 0,1,2,1,2,1,0,
+ * we keep track of the current position in the history array.
+ *
+ * @tz_episodes: a list of thermal mitigation episodes
+ * @trips_crossed: an array of trip points crossed by id
+ * @nr_trips: the number of trip points currently being crossed
+ */
+struct tz_debugfs {
+ struct list_head tz_episodes;
+ int *trips_crossed;
+ int nr_trips;
+};
+
+/**
+ * struct thermal_debugfs - High level structure for a thermal object in debugfs
+ *
+ * The thermal_debugfs structure is the common structure used by the
+ * cooling device or the thermal zone to store the statistics.
+ *
+ * @d_top: top directory of the thermal object directory
+ * @lock: per object lock to protect the internals
+ *
+ * @cdev_dbg: a cooling device debug structure
+ * @tz_dbg: a thermal zone debug structure
+ */
+struct thermal_debugfs {
+ struct dentry *d_top;
+ struct mutex lock;
+ union {
+ struct cdev_debugfs cdev_dbg;
+ struct tz_debugfs tz_dbg;
+ };
+};
+
+void thermal_debug_init(void)
+{
+ d_root = debugfs_create_dir("thermal", NULL);
+ if (!d_root)
+ return;
+
+ d_cdev = debugfs_create_dir("cooling_devices", d_root);
+ if (!d_cdev)
+ return;
+
+ d_tz = debugfs_create_dir("thermal_zones", d_root);
+}
+
+static struct thermal_debugfs *thermal_debugfs_add_id(struct dentry *d, int id)
+{
+ struct thermal_debugfs *thermal_dbg;
+ char ids[IDSLENGTH];
+
+ thermal_dbg = kzalloc(sizeof(*thermal_dbg), GFP_KERNEL);
+ if (!thermal_dbg)
+ return NULL;
+
+ mutex_init(&thermal_dbg->lock);
+
+ snprintf(ids, IDSLENGTH, "%d", id);
+
+ thermal_dbg->d_top = debugfs_create_dir(ids, d);
+ if (!thermal_dbg->d_top) {
+ kfree(thermal_dbg);
+ return NULL;
+ }
+
+ return thermal_dbg;
+}
+
+static void thermal_debugfs_remove_id(struct thermal_debugfs *thermal_dbg)
+{
+ if (!thermal_dbg)
+ return;
+
+ debugfs_remove(thermal_dbg->d_top);
+
+ kfree(thermal_dbg);
+}
+
+static struct cdev_record *
+thermal_debugfs_cdev_record_alloc(struct thermal_debugfs *thermal_dbg,
+ struct list_head *lists, int id)
+{
+ struct cdev_record *cdev_record;
+
+ cdev_record = kzalloc(sizeof(*cdev_record), GFP_KERNEL);
+ if (!cdev_record)
+ return NULL;
+
+ cdev_record->id = id;
+ INIT_LIST_HEAD(&cdev_record->node);
+ list_add_tail(&cdev_record->node,
+ &lists[cdev_record->id % CDEVSTATS_HASH_SIZE]);
+
+ return cdev_record;
+}
+
+static struct cdev_record *
+thermal_debugfs_cdev_record_find(struct thermal_debugfs *thermal_dbg,
+ struct list_head *lists, int id)
+{
+ struct cdev_record *entry;
+
+ list_for_each_entry(entry, &lists[id % CDEVSTATS_HASH_SIZE], node)
+ if (entry->id == id)
+ return entry;
+
+ return NULL;
+}
+
+static struct cdev_record *
+thermal_debugfs_cdev_record_get(struct thermal_debugfs *thermal_dbg,
+ struct list_head *lists, int id)
+{
+ struct cdev_record *cdev_record;
+
+ cdev_record = thermal_debugfs_cdev_record_find(thermal_dbg, lists, id);
+ if (cdev_record)
+ return cdev_record;
+
+ return thermal_debugfs_cdev_record_alloc(thermal_dbg, lists, id);
+}
+
+static void thermal_debugfs_cdev_clear(struct cdev_debugfs *cdev_dbg)
+{
+ int i;
+ struct cdev_record *entry, *tmp;
+
+ for (i = 0; i < CDEVSTATS_HASH_SIZE; i++) {
+
+ list_for_each_entry_safe(entry, tmp,
+ &cdev_dbg->transitions[i], node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+
+ list_for_each_entry_safe(entry, tmp,
+ &cdev_dbg->durations[i], node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ }
+
+ cdev_dbg->total = 0;
+}
+
+static void *cdev_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct thermal_debugfs *thermal_dbg = s->private;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ return (*pos < CDEVSTATS_HASH_SIZE) ? pos : NULL;
+}
+
+static void *cdev_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+
+ return (*pos < CDEVSTATS_HASH_SIZE) ? pos : NULL;
+}
+
+static void cdev_seq_stop(struct seq_file *s, void *v)
+{
+ struct thermal_debugfs *thermal_dbg = s->private;
+
+ mutex_unlock(&thermal_dbg->lock);
+}
+
+static int cdev_tt_seq_show(struct seq_file *s, void *v)
+{
+ struct thermal_debugfs *thermal_dbg = s->private;
+ struct cdev_debugfs *cdev_dbg = &thermal_dbg->cdev_dbg;
+ struct list_head *transitions = cdev_dbg->transitions;
+ struct cdev_record *entry;
+ int i = *(loff_t *)v;
+
+ if (!i)
+ seq_puts(s, "Transition\tOccurences\n");
+
+ list_for_each_entry(entry, &transitions[i], node) {
+ /*
+ * Assuming maximum cdev states is 1024, the longer
+ * string for a transition would be "1024->1024\0"
+ */
+ char buffer[11];
+
+ snprintf(buffer, ARRAY_SIZE(buffer), "%d->%d",
+ entry->id >> 16, entry->id & 0xFFFF);
+
+ seq_printf(s, "%-10s\t%-10llu\n", buffer, entry->count);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations tt_sops = {
+ .start = cdev_seq_start,
+ .next = cdev_seq_next,
+ .stop = cdev_seq_stop,
+ .show = cdev_tt_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(tt);
+
+static int cdev_dt_seq_show(struct seq_file *s, void *v)
+{
+ struct thermal_debugfs *thermal_dbg = s->private;
+ struct cdev_debugfs *cdev_dbg = &thermal_dbg->cdev_dbg;
+ struct list_head *durations = cdev_dbg->durations;
+ struct cdev_record *entry;
+ int i = *(loff_t *)v;
+
+ if (!i)
+ seq_puts(s, "State\tResidency\n");
+
+ list_for_each_entry(entry, &durations[i], node) {
+ s64 duration = ktime_to_ms(entry->residency);
+
+ if (entry->id == cdev_dbg->current_state)
+ duration += ktime_ms_delta(ktime_get(),
+ cdev_dbg->timestamp);
+
+ seq_printf(s, "%-5d\t%-10llu\n", entry->id, duration);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations dt_sops = {
+ .start = cdev_seq_start,
+ .next = cdev_seq_next,
+ .stop = cdev_seq_stop,
+ .show = cdev_dt_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(dt);
+
+static int cdev_clear_set(void *data, u64 val)
+{
+ struct thermal_debugfs *thermal_dbg = data;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ thermal_debugfs_cdev_clear(&thermal_dbg->cdev_dbg);
+
+ mutex_unlock(&thermal_dbg->lock);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cdev_clear_fops, NULL, cdev_clear_set, "%llu\n");
+
+/**
+ * thermal_debug_cdev_state_update - Update a cooling device state change
+ *
+ * Computes a transition and the duration of the previous state residency.
+ *
+ * @cdev : a pointer to a cooling device
+ * @new_state: an integer corresponding to the new cooling device state
+ */
+void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
+ int new_state)
+{
+ struct thermal_debugfs *thermal_dbg = cdev->debugfs;
+ struct cdev_debugfs *cdev_dbg;
+ struct cdev_record *cdev_record;
+ int transition, old_state;
+
+ if (!thermal_dbg || (thermal_dbg->cdev_dbg.current_state == new_state))
+ return;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ cdev_dbg = &thermal_dbg->cdev_dbg;
+
+ old_state = cdev_dbg->current_state;
+
+ /*
+ * Get the old state information in the durations list. If
+ * this one does not exist, a new allocated one will be
+ * returned. Recompute the total duration in the old state and
+ * get a new timestamp for the new state.
+ */
+ cdev_record = thermal_debugfs_cdev_record_get(thermal_dbg,
+ cdev_dbg->durations,
+ old_state);
+ if (cdev_record) {
+ ktime_t now = ktime_get();
+ ktime_t delta = ktime_sub(now, cdev_dbg->timestamp);
+ cdev_record->residency = ktime_add(cdev_record->residency, delta);
+ cdev_dbg->timestamp = now;
+ }
+
+ cdev_dbg->current_state = new_state;
+ transition = (old_state << 16) | new_state;
+
+ /*
+ * Get the transition in the transitions list. If this one
+ * does not exist, a new allocated one will be returned.
+ * Increment the occurrence of this transition which is stored
+ * in the value field.
+ */
+ cdev_record = thermal_debugfs_cdev_record_get(thermal_dbg,
+ cdev_dbg->transitions,
+ transition);
+ if (cdev_record)
+ cdev_record->count++;
+
+ cdev_dbg->total++;
+
+ mutex_unlock(&thermal_dbg->lock);
+}
+
+/**
+ * thermal_debug_cdev_add - Add a cooling device debugfs entry
+ *
+ * Allocates a cooling device object for debug, initializes the
+ * statistics and create the entries in sysfs.
+ * @cdev: a pointer to a cooling device
+ */
+void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
+{
+ struct thermal_debugfs *thermal_dbg;
+ struct cdev_debugfs *cdev_dbg;
+ int i;
+
+ thermal_dbg = thermal_debugfs_add_id(d_cdev, cdev->id);
+ if (!thermal_dbg)
+ return;
+
+ cdev_dbg = &thermal_dbg->cdev_dbg;
+
+ for (i = 0; i < CDEVSTATS_HASH_SIZE; i++) {
+ INIT_LIST_HEAD(&cdev_dbg->transitions[i]);
+ INIT_LIST_HEAD(&cdev_dbg->durations[i]);
+ }
+
+ cdev_dbg->current_state = 0;
+ cdev_dbg->timestamp = ktime_get();
+
+ debugfs_create_file("trans_table", 0400, thermal_dbg->d_top,
+ thermal_dbg, &tt_fops);
+
+ debugfs_create_file("time_in_state_ms", 0400, thermal_dbg->d_top,
+ thermal_dbg, &dt_fops);
+
+ debugfs_create_file("clear", 0200, thermal_dbg->d_top,
+ thermal_dbg, &cdev_clear_fops);
+
+ debugfs_create_u32("total_trans", 0400, thermal_dbg->d_top,
+ &cdev_dbg->total);
+
+ cdev->debugfs = thermal_dbg;
+}
+
+/**
+ * thermal_debug_cdev_remove - Remove a cooling device debugfs entry
+ *
+ * Frees the statistics memory data and remove the debugfs entry
+ *
+ * @cdev: a pointer to a cooling device
+ */
+void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev)
+{
+ struct thermal_debugfs *thermal_dbg = cdev->debugfs;
+
+ if (!thermal_dbg)
+ return;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ thermal_debugfs_cdev_clear(&thermal_dbg->cdev_dbg);
+ cdev->debugfs = NULL;
+
+ mutex_unlock(&thermal_dbg->lock);
+
+ thermal_debugfs_remove_id(thermal_dbg);
+}
+
+static struct tz_episode *thermal_debugfs_tz_event_alloc(struct thermal_zone_device *tz,
+ ktime_t now)
+{
+ struct tz_episode *tze;
+ int i;
+
+ tze = kzalloc(struct_size(tze, trip_stats, tz->num_trips), GFP_KERNEL);
+ if (!tze)
+ return NULL;
+
+ INIT_LIST_HEAD(&tze->node);
+ tze->timestamp = now;
+
+ for (i = 0; i < tz->num_trips; i++) {
+ tze->trip_stats[i].min = INT_MAX;
+ tze->trip_stats[i].max = INT_MIN;
+ }
+
+ return tze;
+}
+
+void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
+{
+ struct tz_episode *tze;
+ struct tz_debugfs *tz_dbg;
+ struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ int temperature = tz->temperature;
+ int trip_id = thermal_zone_trip_id(tz, trip);
+ ktime_t now = ktime_get();
+
+ if (!thermal_dbg)
+ return;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ tz_dbg = &thermal_dbg->tz_dbg;
+
+ /*
+ * The mitigation is starting. A mitigation can contain
+ * several episodes where each of them is related to a
+ * temperature crossing a trip point. The episodes are
+ * nested. That means when the temperature is crossing the
+ * first trip point, the duration begins to be measured. If
+ * the temperature continues to increase and reaches the
+ * second trip point, the duration of the first trip must be
+ * also accumulated.
+ *
+ * eg.
+ *
+ * temp
+ * ^
+ * | --------
+ * trip 2 / \ ------
+ * | /| |\ /| |\
+ * trip 1 / | | `---- | | \
+ * | /| | | | | |\
+ * trip 0 / | | | | | | \
+ * | /| | | | | | | |\
+ * | / | | | | | | | | `--
+ * | / | | | | | | | |
+ * |----- | | | | | | | |
+ * | | | | | | | | |
+ * --------|-|-|--------|--------|------|-|-|------------------> time
+ * | | |<--t2-->| |<-t2'>| | |
+ * | | | |
+ * | |<------------t1------------>| |
+ * | |
+ * |<-------------t0--------------->|
+ *
+ */
+ if (!tz_dbg->nr_trips) {
+ tze = thermal_debugfs_tz_event_alloc(tz, now);
+ if (!tze)
+ goto unlock;
+
+ list_add(&tze->node, &tz_dbg->tz_episodes);
+ }
+
+ /*
+ * Each time a trip point is crossed the way up, the trip_id
+ * is stored in the trip_crossed array and the nr_trips is
+ * incremented. A nr_trips equal to zero means we are entering
+ * a mitigation episode.
+ *
+ * The trip ids may not be in the ascending order but the
+ * result in the array trips_crossed will be in the ascending
+ * temperature order. The function detecting when a trip point
+ * is crossed the way down will handle the very rare case when
+ * the trip points may have been reordered during this
+ * mitigation episode.
+ */
+ tz_dbg->trips_crossed[tz_dbg->nr_trips++] = trip_id;
+
+ tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+ tze->trip_stats[trip_id].timestamp = now;
+ tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
+ tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
+ tze->trip_stats[trip_id].count++;
+ tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
+ (temperature - tze->trip_stats[trip_id].avg) /
+ tze->trip_stats[trip_id].count;
+
+unlock:
+ mutex_unlock(&thermal_dbg->lock);
+}
+
+void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
+{
+ struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct tz_episode *tze;
+ struct tz_debugfs *tz_dbg;
+ ktime_t delta, now = ktime_get();
+ int trip_id = thermal_zone_trip_id(tz, trip);
+ int i;
+
+ if (!thermal_dbg)
+ return;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ tz_dbg = &thermal_dbg->tz_dbg;
+
+ /*
+ * The temperature crosses the way down but there was not
+ * mitigation detected before. That may happen when the
+ * temperature is greater than a trip point when registering a
+ * thermal zone, which is a common use case as the kernel has
+ * no mitigation mechanism yet at boot time.
+ */
+ if (!tz_dbg->nr_trips)
+ goto out;
+
+ for (i = tz_dbg->nr_trips - 1; i >= 0; i--) {
+ if (tz_dbg->trips_crossed[i] == trip_id)
+ break;
+ }
+
+ if (i < 0)
+ goto out;
+
+ tz_dbg->nr_trips--;
+
+ if (i < tz_dbg->nr_trips)
+ tz_dbg->trips_crossed[i] = tz_dbg->trips_crossed[tz_dbg->nr_trips];
+
+ tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+
+ delta = ktime_sub(now, tze->trip_stats[trip_id].timestamp);
+
+ tze->trip_stats[trip_id].duration =
+ ktime_add(delta, tze->trip_stats[trip_id].duration);
+
+ /*
+ * This event closes the mitigation as we are crossing the
+ * last trip point the way down.
+ */
+ if (!tz_dbg->nr_trips)
+ tze->duration = ktime_sub(now, tze->timestamp);
+
+out:
+ mutex_unlock(&thermal_dbg->lock);
+}
+
+void thermal_debug_update_temp(struct thermal_zone_device *tz)
+{
+ struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct tz_episode *tze;
+ struct tz_debugfs *tz_dbg;
+ int trip_id, i;
+
+ if (!thermal_dbg)
+ return;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ tz_dbg = &thermal_dbg->tz_dbg;
+
+ if (!tz_dbg->nr_trips)
+ goto out;
+
+ for (i = 0; i < tz_dbg->nr_trips; i++) {
+ trip_id = tz_dbg->trips_crossed[i];
+ tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
+ tze->trip_stats[trip_id].count++;
+ tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, tz->temperature);
+ tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, tz->temperature);
+ tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
+ (tz->temperature - tze->trip_stats[trip_id].avg) /
+ tze->trip_stats[trip_id].count;
+ }
+out:
+ mutex_unlock(&thermal_dbg->lock);
+}
+
+static void *tze_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct thermal_zone_device *tz = s->private;
+ struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ return seq_list_start(&tz_dbg->tz_episodes, *pos);
+}
+
+static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct thermal_zone_device *tz = s->private;
+ struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
+
+ return seq_list_next(v, &tz_dbg->tz_episodes, pos);
+}
+
+static void tze_seq_stop(struct seq_file *s, void *v)
+{
+ struct thermal_zone_device *tz = s->private;
+ struct thermal_debugfs *thermal_dbg = tz->debugfs;
+
+ mutex_unlock(&thermal_dbg->lock);
+}
+
+static int tze_seq_show(struct seq_file *s, void *v)
+{
+ struct thermal_zone_device *tz = s->private;
+ struct thermal_trip *trip;
+ struct tz_episode *tze;
+ const char *type;
+ int trip_id;
+
+ tze = list_entry((struct list_head *)v, struct tz_episode, node);
+
+ seq_printf(s, ",-Mitigation at %lluus, duration=%llums\n",
+ ktime_to_us(tze->timestamp),
+ ktime_to_ms(tze->duration));
+
+ seq_printf(s, "| trip | type | temp(°mC) | hyst(°mC) | duration | avg(°mC) | min(°mC) | max(°mC) |\n");
+
+ for_each_trip(tz, trip) {
+ /*
+ * There is no possible mitigation happening at the
+ * critical trip point, so the stats will be always
+ * zero, skip this trip point
+ */
+ if (trip->type == THERMAL_TRIP_CRITICAL)
+ continue;
+
+ if (trip->type == THERMAL_TRIP_PASSIVE)
+ type = "passive";
+ else if (trip->type == THERMAL_TRIP_ACTIVE)
+ type = "active";
+ else
+ type = "hot";
+
+ trip_id = thermal_zone_trip_id(tz, trip);
+
+ seq_printf(s, "| %*d | %*s | %*d | %*d | %*lld | %*d | %*d | %*d |\n",
+ 4 , trip_id,
+ 8, type,
+ 9, trip->temperature,
+ 9, trip->hysteresis,
+ 10, ktime_to_ms(tze->trip_stats[trip_id].duration),
+ 9, tze->trip_stats[trip_id].avg,
+ 9, tze->trip_stats[trip_id].min,
+ 9, tze->trip_stats[trip_id].max);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations tze_sops = {
+ .start = tze_seq_start,
+ .next = tze_seq_next,
+ .stop = tze_seq_stop,
+ .show = tze_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(tze);
+
+void thermal_debug_tz_add(struct thermal_zone_device *tz)
+{
+ struct thermal_debugfs *thermal_dbg;
+ struct tz_debugfs *tz_dbg;
+
+ thermal_dbg = thermal_debugfs_add_id(d_tz, tz->id);
+ if (!thermal_dbg)
+ return;
+
+ tz_dbg = &thermal_dbg->tz_dbg;
+
+ tz_dbg->trips_crossed = kzalloc(sizeof(int) * tz->num_trips, GFP_KERNEL);
+ if (!tz_dbg->trips_crossed) {
+ thermal_debugfs_remove_id(thermal_dbg);
+ return;
+ }
+
+ INIT_LIST_HEAD(&tz_dbg->tz_episodes);
+
+ debugfs_create_file("mitigations", 0400, thermal_dbg->d_top, tz, &tze_fops);
+
+ tz->debugfs = thermal_dbg;
+}
+
+void thermal_debug_tz_remove(struct thermal_zone_device *tz)
+{
+ struct thermal_debugfs *thermal_dbg = tz->debugfs;
+
+ if (!thermal_dbg)
+ return;
+
+ mutex_lock(&thermal_dbg->lock);
+
+ tz->debugfs = NULL;
+
+ mutex_unlock(&thermal_dbg->lock);
+
+ thermal_debugfs_remove_id(thermal_dbg);
+}
diff --git a/drivers/thermal/thermal_debugfs.h b/drivers/thermal/thermal_debugfs.h
new file mode 100644
index 000000000..155b9af5f
--- /dev/null
+++ b/drivers/thermal/thermal_debugfs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_THERMAL_DEBUGFS
+void thermal_debug_init(void);
+void thermal_debug_cdev_add(struct thermal_cooling_device *cdev);
+void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev);
+void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state);
+void thermal_debug_tz_add(struct thermal_zone_device *tz);
+void thermal_debug_tz_remove(struct thermal_zone_device *tz);
+void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
+void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
+void thermal_debug_update_temp(struct thermal_zone_device *tz);
+#else
+static inline void thermal_debug_init(void) {}
+static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) {}
+static inline void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev) {}
+static inline void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
+ int state) {}
+static inline void thermal_debug_tz_add(struct thermal_zone_device *tz) {}
+static inline void thermal_debug_tz_remove(struct thermal_zone_device *tz) {}
+static inline void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip) {};
+static inline void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip) {}
+static inline void thermal_debug_update_temp(struct thermal_zone_device *tz) {}
+#endif /* CONFIG_THERMAL_DEBUGFS */
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 69e8ea4aa..0329f4a71 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -82,20 +82,18 @@ EXPORT_SYMBOL(get_thermal_instance);
*/
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
{
- int ret = -EINVAL;
- int count;
+ const struct thermal_trip *trip;
int crit_temp = INT_MAX;
- struct thermal_trip trip;
+ int ret = -EINVAL;
lockdep_assert_held(&tz->lock);
ret = tz->ops->get_temp(tz, temp);
if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
- for (count = 0; count < tz->num_trips; count++) {
- ret = __thermal_zone_get_trip(tz, count, &trip);
- if (!ret && trip.type == THERMAL_TRIP_CRITICAL) {
- crit_temp = trip.temperature;
+ for_each_trip(tz, trip) {
+ if (trip->type == THERMAL_TRIP_CRITICAL) {
+ crit_temp = trip->temperature;
break;
}
}
@@ -139,10 +137,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
goto unlock;
}
- if (device_is_registered(&tz->device))
- ret = __thermal_zone_get_temp(tz, temp);
- else
- ret = -ENODEV;
+ ret = __thermal_zone_get_temp(tz, temp);
unlock:
mutex_unlock(&tz->lock);
@@ -151,14 +146,23 @@ unlock:
}
EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
-static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev,
- int target)
+static int thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev, int state)
{
- if (cdev->ops->set_cur_state(cdev, target))
- return;
+ int ret;
- thermal_notify_cdev_state_update(cdev->id, target);
- thermal_cooling_device_stats_update(cdev, target);
+ /*
+ * No check is needed for the ops->set_cur_state as the
+ * registering function checked the ops are correctly set
+ */
+ ret = cdev->ops->set_cur_state(cdev, state);
+ if (ret)
+ return ret;
+
+ thermal_notify_cdev_state_update(cdev, state);
+ thermal_cooling_device_stats_update(cdev, state);
+ thermal_debug_cdev_state_update(cdev, state);
+
+ return 0;
}
void __thermal_cdev_update(struct thermal_cooling_device *cdev)
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index c3ae44659..252116f1e 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -80,10 +80,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
mutex_lock(&tz->lock);
- if (device_is_registered(&tz->device))
- ret = tz->ops->get_crit_temp(tz, &temperature);
- else
- ret = -ENODEV;
+ ret = tz->ops->get_crit_temp(tz, &temperature);
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index 08bc46c3e..76a231a29 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -13,9 +13,14 @@
#include "thermal_core.h"
+enum thermal_genl_multicast_groups {
+ THERMAL_GENL_SAMPLING_GROUP = 0,
+ THERMAL_GENL_EVENT_GROUP = 1,
+};
+
static const struct genl_multicast_group thermal_genl_mcgrps[] = {
- { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, },
- { .name = THERMAL_GENL_EVENT_GROUP_NAME, },
+ [THERMAL_GENL_SAMPLING_GROUP] = { .name = THERMAL_GENL_SAMPLING_GROUP_NAME, },
+ [THERMAL_GENL_EVENT_GROUP] = { .name = THERMAL_GENL_EVENT_GROUP_NAME, },
};
static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = {
@@ -71,6 +76,11 @@ typedef int (*cb_t)(struct param *);
static struct genl_family thermal_gnl_family;
+static int thermal_group_has_listeners(enum thermal_genl_multicast_groups group)
+{
+ return genl_has_listeners(&thermal_gnl_family, &init_net, group);
+}
+
/************************** Sampling encoding *******************************/
int thermal_genl_sampling_temp(int id, int temp)
@@ -78,6 +88,9 @@ int thermal_genl_sampling_temp(int id, int temp)
struct sk_buff *skb;
void *hdr;
+ if (!thermal_group_has_listeners(THERMAL_GENL_SAMPLING_GROUP))
+ return 0;
+
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -95,7 +108,7 @@ int thermal_genl_sampling_temp(int id, int temp)
genlmsg_end(skb, hdr);
- genlmsg_multicast(&thermal_gnl_family, skb, 0, 0, GFP_KERNEL);
+ genlmsg_multicast(&thermal_gnl_family, skb, 0, THERMAL_GENL_SAMPLING_GROUP, GFP_KERNEL);
return 0;
out_cancel:
@@ -135,7 +148,7 @@ static int thermal_genl_event_tz_trip_up(struct param *p)
return 0;
}
-static int thermal_genl_event_tz_trip_add(struct param *p)
+static int thermal_genl_event_tz_trip_change(struct param *p)
{
if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) ||
nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id) ||
@@ -147,15 +160,6 @@ static int thermal_genl_event_tz_trip_add(struct param *p)
return 0;
}
-static int thermal_genl_event_tz_trip_delete(struct param *p)
-{
- if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) ||
- nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id))
- return -EMSGSIZE;
-
- return 0;
-}
-
static int thermal_genl_event_cdev_add(struct param *p)
{
if (nla_put_string(p->msg, THERMAL_GENL_ATTR_CDEV_NAME,
@@ -245,9 +249,6 @@ int thermal_genl_event_tz_disable(struct param *p)
int thermal_genl_event_tz_trip_down(struct param *p)
__attribute__((alias("thermal_genl_event_tz_trip_up")));
-int thermal_genl_event_tz_trip_change(struct param *p)
- __attribute__((alias("thermal_genl_event_tz_trip_add")));
-
static cb_t event_cb[] = {
[THERMAL_GENL_EVENT_TZ_CREATE] = thermal_genl_event_tz_create,
[THERMAL_GENL_EVENT_TZ_DELETE] = thermal_genl_event_tz_delete,
@@ -256,8 +257,6 @@ static cb_t event_cb[] = {
[THERMAL_GENL_EVENT_TZ_TRIP_UP] = thermal_genl_event_tz_trip_up,
[THERMAL_GENL_EVENT_TZ_TRIP_DOWN] = thermal_genl_event_tz_trip_down,
[THERMAL_GENL_EVENT_TZ_TRIP_CHANGE] = thermal_genl_event_tz_trip_change,
- [THERMAL_GENL_EVENT_TZ_TRIP_ADD] = thermal_genl_event_tz_trip_add,
- [THERMAL_GENL_EVENT_TZ_TRIP_DELETE] = thermal_genl_event_tz_trip_delete,
[THERMAL_GENL_EVENT_CDEV_ADD] = thermal_genl_event_cdev_add,
[THERMAL_GENL_EVENT_CDEV_DELETE] = thermal_genl_event_cdev_delete,
[THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update,
@@ -275,6 +274,9 @@ static int thermal_genl_send_event(enum thermal_genl_event event,
int ret = -EMSGSIZE;
void *hdr;
+ if (!thermal_group_has_listeners(THERMAL_GENL_EVENT_GROUP))
+ return 0;
+
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -290,7 +292,7 @@ static int thermal_genl_send_event(enum thermal_genl_event event,
genlmsg_end(msg, hdr);
- genlmsg_multicast(&thermal_gnl_family, msg, 0, 1, GFP_KERNEL);
+ genlmsg_multicast(&thermal_gnl_family, msg, 0, THERMAL_GENL_EVENT_GROUP, GFP_KERNEL);
return 0;
@@ -302,100 +304,93 @@ out_free_msg:
return ret;
}
-int thermal_notify_tz_create(int tz_id, const char *name)
+int thermal_notify_tz_create(const struct thermal_zone_device *tz)
{
- struct param p = { .tz_id = tz_id, .name = name };
+ struct param p = { .tz_id = tz->id, .name = tz->type };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_CREATE, &p);
}
-int thermal_notify_tz_delete(int tz_id)
+int thermal_notify_tz_delete(const struct thermal_zone_device *tz)
{
- struct param p = { .tz_id = tz_id };
+ struct param p = { .tz_id = tz->id };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DELETE, &p);
}
-int thermal_notify_tz_enable(int tz_id)
+int thermal_notify_tz_enable(const struct thermal_zone_device *tz)
{
- struct param p = { .tz_id = tz_id };
+ struct param p = { .tz_id = tz->id };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_ENABLE, &p);
}
-int thermal_notify_tz_disable(int tz_id)
+int thermal_notify_tz_disable(const struct thermal_zone_device *tz)
{
- struct param p = { .tz_id = tz_id };
+ struct param p = { .tz_id = tz->id };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DISABLE, &p);
}
-int thermal_notify_tz_trip_down(int tz_id, int trip_id, int temp)
+int thermal_notify_tz_trip_down(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
{
- struct param p = { .tz_id = tz_id, .trip_id = trip_id, .temp = temp };
+ struct param p = { .tz_id = tz->id,
+ .trip_id = thermal_zone_trip_id(tz, trip),
+ .temp = tz->temperature };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DOWN, &p);
}
-int thermal_notify_tz_trip_up(int tz_id, int trip_id, int temp)
+int thermal_notify_tz_trip_up(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
{
- struct param p = { .tz_id = tz_id, .trip_id = trip_id, .temp = temp };
+ struct param p = { .tz_id = tz->id,
+ .trip_id = thermal_zone_trip_id(tz, trip),
+ .temp = tz->temperature };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_UP, &p);
}
-int thermal_notify_tz_trip_add(int tz_id, int trip_id, int trip_type,
- int trip_temp, int trip_hyst)
+int thermal_notify_tz_trip_change(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
{
- struct param p = { .tz_id = tz_id, .trip_id = trip_id,
- .trip_type = trip_type, .trip_temp = trip_temp,
- .trip_hyst = trip_hyst };
-
- return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_ADD, &p);
-}
-
-int thermal_notify_tz_trip_delete(int tz_id, int trip_id)
-{
- struct param p = { .tz_id = tz_id, .trip_id = trip_id };
-
- return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DELETE, &p);
-}
-
-int thermal_notify_tz_trip_change(int tz_id, int trip_id, int trip_type,
- int trip_temp, int trip_hyst)
-{
- struct param p = { .tz_id = tz_id, .trip_id = trip_id,
- .trip_type = trip_type, .trip_temp = trip_temp,
- .trip_hyst = trip_hyst };
+ struct param p = { .tz_id = tz->id,
+ .trip_id = thermal_zone_trip_id(tz, trip),
+ .trip_type = trip->type,
+ .trip_temp = trip->temperature,
+ .trip_hyst = trip->hysteresis };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_CHANGE, &p);
}
-int thermal_notify_cdev_state_update(int cdev_id, int cdev_state)
+int thermal_notify_cdev_state_update(const struct thermal_cooling_device *cdev,
+ int state)
{
- struct param p = { .cdev_id = cdev_id, .cdev_state = cdev_state };
+ struct param p = { .cdev_id = cdev->id, .cdev_state = state };
return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, &p);
}
-int thermal_notify_cdev_add(int cdev_id, const char *name, int cdev_max_state)
+int thermal_notify_cdev_add(const struct thermal_cooling_device *cdev)
{
- struct param p = { .cdev_id = cdev_id, .name = name,
- .cdev_max_state = cdev_max_state };
+ struct param p = { .cdev_id = cdev->id, .name = cdev->type,
+ .cdev_max_state = cdev->max_state };
return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_ADD, &p);
}
-int thermal_notify_cdev_delete(int cdev_id)
+int thermal_notify_cdev_delete(const struct thermal_cooling_device *cdev)
{
- struct param p = { .cdev_id = cdev_id };
+ struct param p = { .cdev_id = cdev->id };
return thermal_genl_send_event(THERMAL_GENL_EVENT_CDEV_DELETE, &p);
}
-int thermal_notify_tz_gov_change(int tz_id, const char *name)
+int thermal_notify_tz_gov_change(const struct thermal_zone_device *tz,
+ const char *name)
{
- struct param p = { .tz_id = tz_id, .name = name };
+ struct param p = { .tz_id = tz->id, .name = name };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p);
}
@@ -450,10 +445,10 @@ out_cancel_nest:
static int thermal_genl_cmd_tz_get_trip(struct param *p)
{
struct sk_buff *msg = p->msg;
+ const struct thermal_trip *trip;
struct thermal_zone_device *tz;
struct nlattr *start_trip;
- struct thermal_trip trip;
- int ret, i, id;
+ int id;
if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID])
return -EINVAL;
@@ -470,16 +465,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
mutex_lock(&tz->lock);
- for (i = 0; i < tz->num_trips; i++) {
-
- ret = __thermal_zone_get_trip(tz, i, &trip);
- if (ret)
- goto out_cancel_nest;
-
- if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip.type) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip.temperature) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip.hysteresis))
+ for_each_trip(tz, trip) {
+ if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID,
+ thermal_zone_trip_id(tz, trip)) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip->type) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip->temperature) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip->hysteresis))
goto out_cancel_nest;
}
diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h
index 0a9987c3b..93a927e14 100644
--- a/drivers/thermal/thermal_netlink.h
+++ b/drivers/thermal/thermal_netlink.h
@@ -10,25 +10,30 @@ struct thermal_genl_cpu_caps {
int efficiency;
};
+struct thermal_zone_device;
+struct thermal_trip;
+struct thermal_cooling_device;
+
/* Netlink notification function */
#ifdef CONFIG_THERMAL_NETLINK
int __init thermal_netlink_init(void);
void __init thermal_netlink_exit(void);
-int thermal_notify_tz_create(int tz_id, const char *name);
-int thermal_notify_tz_delete(int tz_id);
-int thermal_notify_tz_enable(int tz_id);
-int thermal_notify_tz_disable(int tz_id);
-int thermal_notify_tz_trip_down(int tz_id, int id, int temp);
-int thermal_notify_tz_trip_up(int tz_id, int id, int temp);
-int thermal_notify_tz_trip_delete(int tz_id, int id);
-int thermal_notify_tz_trip_add(int tz_id, int id, int type,
- int temp, int hyst);
-int thermal_notify_tz_trip_change(int tz_id, int id, int type,
- int temp, int hyst);
-int thermal_notify_cdev_state_update(int cdev_id, int state);
-int thermal_notify_cdev_add(int cdev_id, const char *name, int max_state);
-int thermal_notify_cdev_delete(int cdev_id);
-int thermal_notify_tz_gov_change(int tz_id, const char *name);
+int thermal_notify_tz_create(const struct thermal_zone_device *tz);
+int thermal_notify_tz_delete(const struct thermal_zone_device *tz);
+int thermal_notify_tz_enable(const struct thermal_zone_device *tz);
+int thermal_notify_tz_disable(const struct thermal_zone_device *tz);
+int thermal_notify_tz_trip_down(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
+int thermal_notify_tz_trip_up(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
+int thermal_notify_tz_trip_change(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
+int thermal_notify_cdev_state_update(const struct thermal_cooling_device *cdev,
+ int state);
+int thermal_notify_cdev_add(const struct thermal_cooling_device *cdev);
+int thermal_notify_cdev_delete(const struct thermal_cooling_device *cdev);
+int thermal_notify_tz_gov_change(const struct thermal_zone_device *tz,
+ const char *name);
int thermal_genl_sampling_temp(int id, int temp);
int thermal_genl_cpu_capability_event(int count,
struct thermal_genl_cpu_caps *caps);
@@ -38,70 +43,62 @@ static inline int thermal_netlink_init(void)
return 0;
}
-static inline int thermal_notify_tz_create(int tz_id, const char *name)
-{
- return 0;
-}
-
-static inline int thermal_notify_tz_delete(int tz_id)
-{
- return 0;
-}
-
-static inline int thermal_notify_tz_enable(int tz_id)
+static inline int thermal_notify_tz_create(const struct thermal_zone_device *tz)
{
return 0;
}
-static inline int thermal_notify_tz_disable(int tz_id)
+static inline int thermal_notify_tz_delete(const struct thermal_zone_device *tz)
{
return 0;
}
-static inline int thermal_notify_tz_trip_down(int tz_id, int id, int temp)
+static inline int thermal_notify_tz_enable(const struct thermal_zone_device *tz)
{
return 0;
}
-static inline int thermal_notify_tz_trip_up(int tz_id, int id, int temp)
+static inline int thermal_notify_tz_disable(const struct thermal_zone_device *tz)
{
return 0;
}
-static inline int thermal_notify_tz_trip_delete(int tz_id, int id)
+static inline int thermal_notify_tz_trip_down(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
{
return 0;
}
-static inline int thermal_notify_tz_trip_add(int tz_id, int id, int type,
- int temp, int hyst)
+static inline int thermal_notify_tz_trip_up(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
{
return 0;
}
-static inline int thermal_notify_tz_trip_change(int tz_id, int id, int type,
- int temp, int hyst)
+static inline int thermal_notify_tz_trip_change(const struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
{
return 0;
}
-static inline int thermal_notify_cdev_state_update(int cdev_id, int state)
+static inline int thermal_notify_cdev_state_update(const struct thermal_cooling_device *cdev,
+ int state)
{
return 0;
}
-static inline int thermal_notify_cdev_add(int cdev_id, const char *name,
- int max_state)
+static inline int thermal_notify_cdev_add(const struct thermal_cooling_device *cdev)
{
return 0;
}
-static inline int thermal_notify_cdev_delete(int cdev_id)
+static inline int thermal_notify_cdev_delete(const struct thermal_cooling_device *cdev)
{
return 0;
}
-static inline int thermal_notify_tz_gov_change(int tz_id, const char *name)
+static inline int thermal_notify_tz_gov_change(const struct thermal_zone_device *tz,
+ const char *name)
{
return 0;
}
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 1e0655b63..61bbd42aa 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -225,14 +225,18 @@ static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdel
int ret;
ret = of_property_read_u32(np, "polling-delay-passive", pdelay);
- if (ret < 0) {
- pr_err("%pOFn: missing polling-delay-passive property\n", np);
+ if (ret == -EINVAL) {
+ *pdelay = 0;
+ } else if (ret < 0) {
+ pr_err("%pOFn: Couldn't get polling-delay-passive: %d\n", np, ret);
return ret;
}
ret = of_property_read_u32(np, "polling-delay", delay);
- if (ret < 0) {
- pr_err("%pOFn: missing polling-delay property\n", np);
+ if (ret == -EINVAL) {
+ *delay = 0;
+ } else if (ret < 0) {
+ pr_err("%pOFn: Couldn't get polling-delay: %d\n", np, ret);
return ret;
}
@@ -475,6 +479,7 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
struct thermal_zone_params tzp = {};
struct thermal_zone_device_ops *of_ops;
struct device_node *np;
+ const char *action;
int delay, pdelay;
int ntrips, mask;
int ret;
@@ -511,6 +516,11 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node *
mask = GENMASK_ULL((ntrips) - 1, 0);
+ ret = of_property_read_string(np, "critical-action", &action);
+ if (!ret)
+ if (!of_ops->critical && !strcasecmp(action, "reboot"))
+ of_ops->critical = thermal_zone_device_critical_reboot;
+
tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips,
mask, data, of_ops, &tzp,
pdelay, delay);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index eef40d4f3..f4033865b 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -83,25 +83,12 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
- int trip_id, result;
+ int trip_id;
if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1)
return -EINVAL;
- mutex_lock(&tz->lock);
-
- if (device_is_registered(dev))
- result = __thermal_zone_get_trip(tz, trip_id, &trip);
- else
- result = -ENODEV;
-
- mutex_unlock(&tz->lock);
-
- if (result)
- return result;
-
- switch (trip.type) {
+ switch (tz->trips[trip_id].type) {
case THERMAL_TRIP_CRITICAL:
return sprintf(buf, "critical\n");
case THERMAL_TRIP_HOT:
@@ -120,28 +107,33 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
+ struct thermal_trip *trip;
int trip_id, ret;
+ int temp;
+
+ ret = kstrtoint(buf, 10, &temp);
+ if (ret)
+ return -EINVAL;
if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
return -EINVAL;
mutex_lock(&tz->lock);
- if (!device_is_registered(dev)) {
- ret = -ENODEV;
- goto unlock;
- }
+ trip = &tz->trips[trip_id];
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- if (ret)
- goto unlock;
+ if (temp != trip->temperature) {
+ if (tz->ops->set_trip_temp) {
+ ret = tz->ops->set_trip_temp(tz, trip_id, temp);
+ if (ret)
+ goto unlock;
+ }
- ret = kstrtoint(buf, 10, &trip.temperature);
- if (ret)
- goto unlock;
+ thermal_zone_set_trip_temp(tz, trip, temp);
+
+ __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
+ }
- ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
@@ -153,25 +145,12 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
- int trip_id, ret;
+ int trip_id;
if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
return -EINVAL;
- mutex_lock(&tz->lock);
-
- if (device_is_registered(dev))
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- else
- ret = -ENODEV;
-
- mutex_unlock(&tz->lock);
-
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", trip.temperature);
+ return sprintf(buf, "%d\n", tz->trips[trip_id].temperature);
}
static ssize_t
@@ -179,28 +158,33 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
+ struct thermal_trip *trip;
int trip_id, ret;
+ int hyst;
+
+ ret = kstrtoint(buf, 10, &hyst);
+ if (ret || hyst < 0)
+ return -EINVAL;
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
mutex_lock(&tz->lock);
- if (!device_is_registered(dev)) {
- ret = -ENODEV;
- goto unlock;
- }
+ trip = &tz->trips[trip_id];
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- if (ret)
- goto unlock;
+ if (hyst != trip->hysteresis) {
+ if (tz->ops->set_trip_hyst) {
+ ret = tz->ops->set_trip_hyst(tz, trip_id, hyst);
+ if (ret)
+ goto unlock;
+ }
- ret = kstrtoint(buf, 10, &trip.hysteresis);
- if (ret)
- goto unlock;
+ trip->hysteresis = hyst;
+
+ thermal_zone_trip_updated(tz, trip);
+ }
- ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
@@ -212,22 +196,12 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- struct thermal_trip trip;
- int trip_id, ret;
+ int trip_id;
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
- mutex_lock(&tz->lock);
-
- if (device_is_registered(dev))
- ret = __thermal_zone_get_trip(tz, trip_id, &trip);
- else
- ret = -ENODEV;
-
- mutex_unlock(&tz->lock);
-
- return ret ? ret : sprintf(buf, "%d\n", trip.hysteresis);
+ return sprintf(buf, "%d\n", tz->trips[trip_id].hysteresis);
}
static ssize_t
@@ -276,11 +250,6 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&tz->lock);
- if (!device_is_registered(dev)) {
- ret = -ENODEV;
- goto unlock;
- }
-
if (!tz->ops->set_emul_temp)
tz->emul_temperature = temperature;
else
@@ -289,7 +258,6 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
if (!ret)
__thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
-unlock:
mutex_unlock(&tz->lock);
return ret ? ret : count;
@@ -968,7 +936,16 @@ ssize_t weight_store(struct device *dev, struct device_attribute *attr,
return ret;
instance = container_of(attr, struct thermal_instance, weight_attr);
+
+ /* Don't race with governors using the 'weight' value */
+ mutex_lock(&instance->tz->lock);
+
instance->weight = weight;
+ thermal_governor_update_tz(instance->tz,
+ THERMAL_INSTANCE_WEIGHT_CHANGED);
+
+ mutex_unlock(&instance->tz->lock);
+
return count;
}
diff --git a/drivers/thermal/thermal_trace_ipa.h b/drivers/thermal/thermal_trace_ipa.h
index 84568db54..b16b5dd86 100644
--- a/drivers/thermal/thermal_trace_ipa.h
+++ b/drivers/thermal/thermal_trace_ipa.h
@@ -8,19 +8,14 @@
#include <linux/tracepoint.h>
TRACE_EVENT(thermal_power_allocator,
- TP_PROTO(struct thermal_zone_device *tz, u32 *req_power,
- u32 total_req_power, u32 *granted_power,
- u32 total_granted_power, size_t num_actors,
- u32 power_range, u32 max_allocatable_power,
- int current_temp, s32 delta_temp),
- TP_ARGS(tz, req_power, total_req_power, granted_power,
- total_granted_power, num_actors, power_range,
- max_allocatable_power, current_temp, delta_temp),
+ TP_PROTO(struct thermal_zone_device *tz, u32 total_req_power,
+ u32 total_granted_power, int num_actors, u32 power_range,
+ u32 max_allocatable_power, int current_temp, s32 delta_temp),
+ TP_ARGS(tz, total_req_power, total_granted_power, num_actors,
+ power_range, max_allocatable_power, current_temp, delta_temp),
TP_STRUCT__entry(
__field(int, tz_id )
- __dynamic_array(u32, req_power, num_actors )
__field(u32, total_req_power )
- __dynamic_array(u32, granted_power, num_actors)
__field(u32, total_granted_power )
__field(size_t, num_actors )
__field(u32, power_range )
@@ -30,11 +25,7 @@ TRACE_EVENT(thermal_power_allocator,
),
TP_fast_assign(
__entry->tz_id = tz->id;
- memcpy(__get_dynamic_array(req_power), req_power,
- num_actors * sizeof(*req_power));
__entry->total_req_power = total_req_power;
- memcpy(__get_dynamic_array(granted_power), granted_power,
- num_actors * sizeof(*granted_power));
__entry->total_granted_power = total_granted_power;
__entry->num_actors = num_actors;
__entry->power_range = power_range;
@@ -43,18 +34,35 @@ TRACE_EVENT(thermal_power_allocator,
__entry->delta_temp = delta_temp;
),
- TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
- __entry->tz_id,
- __print_array(__get_dynamic_array(req_power),
- __entry->num_actors, 4),
- __entry->total_req_power,
- __print_array(__get_dynamic_array(granted_power),
- __entry->num_actors, 4),
+ TP_printk("thermal_zone_id=%d total_req_power=%u total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%d delta_temperature=%d",
+ __entry->tz_id, __entry->total_req_power,
__entry->total_granted_power, __entry->power_range,
__entry->max_allocatable_power, __entry->current_temp,
__entry->delta_temp)
);
+TRACE_EVENT(thermal_power_actor,
+ TP_PROTO(struct thermal_zone_device *tz, int actor_id, u32 req_power,
+ u32 granted_power),
+ TP_ARGS(tz, actor_id, req_power, granted_power),
+ TP_STRUCT__entry(
+ __field(int, tz_id)
+ __field(int, actor_id)
+ __field(u32, req_power)
+ __field(u32, granted_power)
+ ),
+ TP_fast_assign(
+ __entry->tz_id = tz->id;
+ __entry->actor_id = actor_id;
+ __entry->req_power = req_power;
+ __entry->granted_power = granted_power;
+ ),
+
+ TP_printk("thermal_zone_id=%d actor_id=%d req_power=%u granted_power=%u",
+ __entry->tz_id, __entry->actor_id, __entry->req_power,
+ __entry->granted_power)
+);
+
TRACE_EVENT(thermal_power_allocator_pid,
TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral,
s64 p, s64 i, s64 d, s32 output),
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index e42456442..e6dcb8117 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -63,51 +63,32 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips);
*/
void __thermal_zone_set_trips(struct thermal_zone_device *tz)
{
- struct thermal_trip trip;
+ const struct thermal_trip *trip;
int low = -INT_MAX, high = INT_MAX;
- bool same_trip = false;
- int i, ret;
+ int ret;
lockdep_assert_held(&tz->lock);
if (!tz->ops->set_trips)
return;
- for (i = 0; i < tz->num_trips; i++) {
- bool low_set = false;
+ for_each_trip(tz, trip) {
int trip_low;
- ret = __thermal_zone_get_trip(tz, i , &trip);
- if (ret)
- return;
-
- trip_low = trip.temperature - trip.hysteresis;
+ trip_low = trip->temperature - trip->hysteresis;
- if (trip_low < tz->temperature && trip_low > low) {
+ if (trip_low < tz->temperature && trip_low > low)
low = trip_low;
- low_set = true;
- same_trip = false;
- }
-
- if (trip.temperature > tz->temperature &&
- trip.temperature < high) {
- high = trip.temperature;
- same_trip = low_set;
- }
+
+ if (trip->temperature > tz->temperature &&
+ trip->temperature < high)
+ high = trip->temperature;
}
/* No need to change trip points */
if (tz->prev_low_trip == low && tz->prev_high_trip == high)
return;
- /*
- * If "high" and "low" are the same, skip the change unless this is the
- * first time.
- */
- if (same_trip && (tz->prev_low_trip != -INT_MAX ||
- tz->prev_high_trip != INT_MAX))
- return;
-
tz->prev_low_trip = low;
tz->prev_high_trip = high;
@@ -147,46 +128,7 @@ int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
}
EXPORT_SYMBOL_GPL(thermal_zone_get_trip);
-int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
- const struct thermal_trip *trip)
-{
- struct thermal_trip t;
- int ret;
-
- if (!tz->ops->set_trip_temp && !tz->ops->set_trip_hyst && !tz->trips)
- return -EINVAL;
-
- ret = __thermal_zone_get_trip(tz, trip_id, &t);
- if (ret)
- return ret;
-
- if (t.type != trip->type)
- return -EINVAL;
-
- if (t.temperature != trip->temperature && tz->ops->set_trip_temp) {
- ret = tz->ops->set_trip_temp(tz, trip_id, trip->temperature);
- if (ret)
- return ret;
- }
-
- if (t.hysteresis != trip->hysteresis && tz->ops->set_trip_hyst) {
- ret = tz->ops->set_trip_hyst(tz, trip_id, trip->hysteresis);
- if (ret)
- return ret;
- }
-
- if (tz->trips && (t.temperature != trip->temperature || t.hysteresis != trip->hysteresis))
- tz->trips[trip_id] = *trip;
-
- thermal_notify_tz_trip_change(tz->id, trip_id, trip->type,
- trip->temperature, trip->hysteresis);
-
- __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
-
- return 0;
-}
-
-int thermal_zone_trip_id(struct thermal_zone_device *tz,
+int thermal_zone_trip_id(const struct thermal_zone_device *tz,
const struct thermal_trip *trip)
{
/*
@@ -195,3 +137,20 @@ int thermal_zone_trip_id(struct thermal_zone_device *tz,
*/
return trip - tz->trips;
}
+void thermal_zone_trip_updated(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip)
+{
+ thermal_notify_tz_trip_change(tz, trip);
+ __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
+}
+
+void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int temp)
+{
+ if (trip->temperature == temp)
+ return;
+
+ trip->temperature = temp;
+ thermal_notify_tz_trip_change(tz, trip);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index ec7b5f658..df0d845e0 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -307,7 +307,7 @@ static const struct attribute_group *domain_attr_groups[] = {
NULL,
};
-struct bus_type tb_bus_type = {
+const struct bus_type tb_bus_type = {
.name = "thunderbolt",
.match = tb_service_match,
.probe = tb_service_probe,
@@ -423,6 +423,7 @@ err_free:
/**
* tb_domain_add() - Add domain to the system
* @tb: Domain to add
+ * @reset: Issue reset to the host router
*
* Starts the domain and adds it to the system. Hotplugging devices will
* work after this has been returned successfully. In order to remove
@@ -431,7 +432,7 @@ err_free:
*
* Return: %0 in case of success and negative errno in case of error
*/
-int tb_domain_add(struct tb *tb)
+int tb_domain_add(struct tb *tb, bool reset)
{
int ret;
@@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
/* Start the domain */
if (tb->cm_ops->start) {
- ret = tb->cm_ops->start(tb);
+ ret = tb->cm_ops->start(tb, reset);
if (ret)
goto err_domain_del;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index d8b9c734a..baf10d099 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
- 1, 10, 2000);
+ 1, 10, 250);
if (ret)
return ret;
@@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
return 0;
}
-static int icm_start(struct tb *tb)
+static int icm_start(struct tb *tb, bool not_used)
{
struct icm *icm = tb_priv(tb);
int ret;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 633970fbe..63cb4b6af 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -6,6 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/delay.h>
+
#include "tb.h"
/**
@@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
+/**
+ * tb_lc_reset_port() - Trigger downstream port reset through LC
+ * @port: Port that is reset
+ *
+ * Triggers downstream port reset through link controller registers.
+ * Returns %0 in case of success negative errno otherwise. Only supports
+ * non-USB4 routers with link controller (that's Thunderbolt 2 and
+ * Thunderbolt 3).
+ */
+int tb_lc_reset_port(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int cap, ret;
+ u32 mode;
+
+ if (sw->generation < 2)
+ return -EINVAL;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode |= TB_LC_PORT_MODE_DPR;
+
+ ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode &= ~TB_LC_PORT_MODE_DPR;
+
+ return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+}
+
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 4b7bec74e..b22023fae 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1221,7 +1221,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
str_enabled_disabled(port_ok));
}
-static void nhi_reset(struct tb_nhi *nhi)
+static bool nhi_reset(struct tb_nhi *nhi)
{
ktime_t timeout;
u32 val;
@@ -1229,11 +1229,11 @@ static void nhi_reset(struct tb_nhi *nhi)
val = ioread32(nhi->iobase + REG_CAPS);
/* Reset only v2 and later routers */
if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
- return;
+ return false;
if (!host_reset) {
dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
- return;
+ return false;
}
iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
@@ -1244,12 +1244,14 @@ static void nhi_reset(struct tb_nhi *nhi)
val = ioread32(nhi->iobase + REG_RESET);
if (!(val & REG_RESET_HRR)) {
dev_warn(&nhi->pdev->dev, "host router reset successful\n");
- return;
+ return true;
}
usleep_range(10, 20);
} while (ktime_before(ktime_get(), timeout));
dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
+
+ return false;
}
static int nhi_init_msi(struct tb_nhi *nhi)
@@ -1331,6 +1333,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *dev = &pdev->dev;
struct tb_nhi *nhi;
struct tb *tb;
+ bool reset;
int res;
if (!nhi_imr_valid(pdev))
@@ -1365,7 +1368,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
- nhi_reset(nhi);
+ /*
+ * Only USB4 v2 hosts support host reset so if we already did
+ * that then don't do it again when the domain is initialized.
+ */
+ reset = nhi_reset(nhi) ? false : host_reset;
res = nhi_init_msi(nhi);
if (res)
@@ -1392,7 +1399,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
- res = tb_domain_add(tb);
+ res = tb_domain_add(tb, reset);
if (res) {
/*
* At this point the RX/TX rings might already have been
@@ -1517,6 +1524,10 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 0f029ce75..7a07c7c1a 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -90,6 +90,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21
#define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e
#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d
+#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833
+#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 091a81bbd..f760e54cd 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
return -ETIMEDOUT;
}
+/**
+ * tb_path_deactivate_hop() - Deactivate one path in path config space
+ * @port: Lane or protocol adapter
+ * @hop_index: HopID of the path to be cleared
+ *
+ * This deactivates or clears a single path config space entry at
+ * @hop_index. Returns %0 in success and negative errno otherwise.
+ */
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
+{
+ return __tb_path_deactivate_hop(port, hop_index, true);
+}
+
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index e6bfa63b4..e81de9c30 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -43,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
}
}
+static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
+ tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
@@ -87,6 +93,14 @@ static const struct tb_quirk tb_quirks[] = {
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
+ * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
+ * controllers.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ /*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index a3c68c808..7b086923c 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
return __tb_port_enable(port, false);
}
+static int tb_port_reset(struct tb_port *port)
+{
+ if (tb_switch_is_usb4(port->sw))
+ return port->cap_usb4 ? usb4_port_reset(port) : 0;
+ return tb_lc_reset_port(port);
+}
+
/*
* tb_init_port() - initialize a port
*
@@ -941,22 +948,6 @@ int tb_port_get_link_generation(struct tb_port *port)
}
}
-static const char *width_name(enum tb_link_width width)
-{
- switch (width) {
- case TB_LINK_WIDTH_SINGLE:
- return "symmetric, single lane";
- case TB_LINK_WIDTH_DUAL:
- return "symmetric, dual lanes";
- case TB_LINK_WIDTH_ASYM_TX:
- return "asymmetric, 3 transmitters, 1 receiver";
- case TB_LINK_WIDTH_ASYM_RX:
- return "asymmetric, 3 receivers, 1 transmitter";
- default:
- return "unknown";
- }
-}
-
/**
* tb_port_get_link_width() - Get current link width
* @port: Port to check (USB4 or CIO)
@@ -1550,29 +1541,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
regs->__unknown1, regs->__unknown4);
}
+static int tb_switch_reset_host(struct tb_switch *sw)
+{
+ if (sw->generation > 1) {
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ int i, ret;
+
+ /*
+ * For lane adapters we issue downstream port
+ * reset and clear up path config spaces.
+ *
+ * For protocol adapters we disable the path and
+ * clear path config space one by one (from 8 to
+ * Max Input HopID of the adapter).
+ */
+ if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
+ ret = tb_port_reset(port);
+ if (ret)
+ return ret;
+ } else if (tb_port_is_usb3_down(port) ||
+ tb_port_is_usb3_up(port)) {
+ tb_usb3_port_enable(port, false);
+ } else if (tb_port_is_dpin(port) ||
+ tb_port_is_dpout(port)) {
+ tb_dp_port_enable(port, false);
+ } else if (tb_port_is_pcie_down(port) ||
+ tb_port_is_pcie_up(port)) {
+ tb_pci_port_enable(port, false);
+ } else {
+ continue;
+ }
+
+ /* Cleanup path config space of protocol adapter */
+ for (i = TB_PATH_MIN_HOPID;
+ i <= port->config.max_in_hop_id; i++) {
+ ret = tb_path_deactivate_hop(port, i);
+ if (ret)
+ return ret;
+ }
+ }
+ } else {
+ struct tb_cfg_result res;
+
+ /* Thunderbolt 1 uses the "reset" config space packet */
+ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+ TB_CFG_SWITCH, 2, 2);
+ if (res.err)
+ return res.err;
+ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
+ if (res.err > 0)
+ return -EIO;
+ else if (res.err < 0)
+ return res.err;
+ }
+
+ return 0;
+}
+
+static int tb_switch_reset_device(struct tb_switch *sw)
+{
+ return tb_port_reset(tb_switch_downstream_port(sw));
+}
+
+static bool tb_switch_enumerated(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * Read directly from the hardware because we use this also
+ * during system sleep where sw->config.enabled is already set
+ * by us.
+ */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
+ if (ret)
+ return false;
+
+ return !!(val & ROUTER_CS_3_V);
+}
+
/**
- * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
- * @sw: Switch to reset
+ * tb_switch_reset() - Perform reset to the router
+ * @sw: Router to reset
*
- * Return: Returns 0 on success or an error code on failure.
+ * Issues reset to the router @sw. Can be used for any router. For host
+ * routers, resets all the downstream ports and cleans up path config
+ * spaces accordingly. For device routers issues downstream port reset
+ * through the parent router, so as side effect there will be unplug
+ * soon after this is finished.
+ *
+ * If the router is not enumerated does nothing.
+ *
+ * Returns %0 on success or negative errno in case of failure.
*/
int tb_switch_reset(struct tb_switch *sw)
{
- struct tb_cfg_result res;
+ int ret;
- if (sw->generation > 1)
+ /*
+ * We cannot access the port config spaces unless the router is
+ * already enumerated. If the router is not enumerated it is
+ * equal to being reset so we can skip that here.
+ */
+ if (!tb_switch_enumerated(sw))
return 0;
- tb_sw_dbg(sw, "resetting switch\n");
+ tb_sw_dbg(sw, "resetting\n");
+
+ if (tb_route(sw))
+ ret = tb_switch_reset_device(sw);
+ else
+ ret = tb_switch_reset_host(sw);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to reset\n");
- res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
- TB_CFG_SWITCH, 2, 2);
- if (res.err)
- return res.err;
- res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
- if (res.err > 0)
- return -EIO;
- return res.err;
+ return ret;
}
/**
@@ -2772,7 +2858,7 @@ static void tb_switch_link_init(struct tb_switch *sw)
return;
tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
- tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width));
+ tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width));
bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
@@ -2792,6 +2878,19 @@ static void tb_switch_link_init(struct tb_switch *sw)
if (down->dual_link_port)
down->dual_link_port->bonded = bonded;
tb_port_update_credits(down);
+
+ if (tb_port_get_link_generation(up) < 4)
+ return;
+
+ /*
+ * Set the Gen 4 preferred link width. This is what the router
+ * prefers when the link is brought up. If the router does not
+ * support asymmetric link configuration, this also will be set
+ * to TB_LINK_WIDTH_DUAL.
+ */
+ sw->preferred_link_width = sw->link_width;
+ tb_sw_dbg(sw, "preferred link width %s\n",
+ tb_width_name(sw->preferred_link_width));
}
/**
@@ -3032,7 +3131,7 @@ int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
tb_switch_update_link_attributes(sw);
- tb_sw_dbg(sw, "link width set to %s\n", width_name(width));
+ tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width));
return ret;
}
@@ -3081,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged)
- return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
+ /*
+ * Unconfigure downstream port so that wake-on-connect can be
+ * configured after router unplug. No need to unconfigure upstream port
+ * since its router is unplugged.
+ */
up = tb_upstream_port(sw);
- if (tb_switch_is_usb4(up->sw))
- usb4_port_unconfigure(up);
- else
- tb_lc_unconfigure_port(up);
-
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
+
+ if (sw->is_unplugged)
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
}
static void tb_switch_credits_init(struct tb_switch *sw)
@@ -3342,7 +3448,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
-int tb_switch_resume(struct tb_switch *sw)
+static void tb_switch_check_wakes(struct tb_switch *sw)
+{
+ if (device_may_wakeup(&sw->dev)) {
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_check_wakes(sw);
+ }
+}
+
+/**
+ * tb_switch_resume() - Resume a switch after sleep
+ * @sw: Switch to resume
+ * @runtime: Is this resume from runtime suspend or system sleep
+ *
+ * Resumes and re-enumerates router (and all its children), if still plugged
+ * after suspend. Don't enumerate device router whose UID was changed during
+ * suspend. If this is resume from system sleep, notifies PM core about the
+ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
+ * upstream port for USB4 routers that shall be always enabled.
+ */
+int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@@ -3391,6 +3516,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ if (!runtime)
+ tb_switch_check_wakes(sw);
+
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@@ -3420,7 +3548,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
- if (port->remote && tb_switch_resume(port->remote->sw)) {
+ if (port->remote &&
+ tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index fd49f86e0..525f515e8 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -513,8 +513,6 @@ static void tb_port_unconfigure_xdomain(struct tb_port *port)
usb4_port_unconfigure_xdomain(port);
else
tb_lc_unconfigure_xdomain(port);
-
- tb_port_enable(port->dual_link_port);
}
static void tb_scan_xdomain(struct tb_port *port)
@@ -1087,15 +1085,14 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
struct tb_port *dst_port, int requested_up,
int requested_down)
{
+ bool clx = false, clx_disabled = false, downstream;
struct tb_switch *sw;
- bool clx, downstream;
struct tb_port *up;
int ret = 0;
if (!asym_threshold)
return 0;
- /* Disable CL states before doing any transitions */
downstream = tb_port_path_direction_downstream(src_port, dst_port);
/* Pick up router deepest in the hierarchy */
if (downstream)
@@ -1103,11 +1100,10 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
else
sw = src_port->sw;
- clx = tb_disable_clx(sw);
-
tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
+ struct tb_port *down = tb_switch_downstream_port(up->sw);
+ enum tb_link_width width_up, width_down;
int consumed_up, consumed_down;
- enum tb_link_width width;
ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
&consumed_up, &consumed_down);
@@ -1128,7 +1124,8 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
if (consumed_down + requested_down < asym_threshold)
continue;
- width = TB_LINK_WIDTH_ASYM_RX;
+ width_up = TB_LINK_WIDTH_ASYM_RX;
+ width_down = TB_LINK_WIDTH_ASYM_TX;
} else {
/* Upstream, the opposite of above */
if (consumed_down + requested_down >= TB_ASYM_MIN) {
@@ -1138,22 +1135,34 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
if (consumed_up + requested_up < asym_threshold)
continue;
- width = TB_LINK_WIDTH_ASYM_TX;
+ width_up = TB_LINK_WIDTH_ASYM_TX;
+ width_down = TB_LINK_WIDTH_ASYM_RX;
}
- if (up->sw->link_width == width)
+ if (up->sw->link_width == width_up)
continue;
- if (!tb_port_width_supported(up, width))
+ if (!tb_port_width_supported(up, width_up) ||
+ !tb_port_width_supported(down, width_down))
continue;
+ /*
+ * Disable CL states before doing any transitions. We
+ * delayed it until now that we know there is a real
+ * transition taking place.
+ */
+ if (!clx_disabled) {
+ clx = tb_disable_clx(sw);
+ clx_disabled = true;
+ }
+
tb_sw_dbg(up->sw, "configuring asymmetric link\n");
/*
* Here requested + consumed > threshold so we need to
* transtion the link into asymmetric now.
*/
- ret = tb_switch_set_link_width(up->sw, width);
+ ret = tb_switch_set_link_width(up->sw, width_up);
if (ret) {
tb_sw_warn(up->sw, "failed to set link width\n");
break;
@@ -1174,24 +1183,24 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* @dst_port: Destination adapter
* @requested_up: New lower bandwidth request upstream (Mb/s)
* @requested_down: New lower bandwidth request downstream (Mb/s)
+ * @keep_asym: Keep asymmetric link if preferred
*
* Goes over each link from @src_port to @dst_port and tries to
* transition the link to symmetric if the currently consumed bandwidth
- * allows.
+ * allows and link asymmetric preference is ignored (if @keep_asym is %false).
*/
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
struct tb_port *dst_port, int requested_up,
- int requested_down)
+ int requested_down, bool keep_asym)
{
+ bool clx = false, clx_disabled = false, downstream;
struct tb_switch *sw;
- bool clx, downstream;
struct tb_port *up;
int ret = 0;
if (!asym_threshold)
return 0;
- /* Disable CL states before doing any transitions */
downstream = tb_port_path_direction_downstream(src_port, dst_port);
/* Pick up router deepest in the hierarchy */
if (downstream)
@@ -1199,8 +1208,6 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
else
sw = src_port->sw;
- clx = tb_disable_clx(sw);
-
tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
int consumed_up, consumed_down;
@@ -1233,6 +1240,25 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
continue;
+ /*
+ * Here consumed < threshold so we can transition the
+ * link to symmetric.
+ *
+ * However, if the router prefers asymmetric link we
+ * honor that (unless @keep_asym is %false).
+ */
+ if (keep_asym &&
+ up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
+ tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
+ continue;
+ }
+
+ /* Disable CL states before doing any transitions */
+ if (!clx_disabled) {
+ clx = tb_disable_clx(sw);
+ clx_disabled = true;
+ }
+
tb_sw_dbg(up->sw, "configuring symmetric link\n");
ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
@@ -1280,7 +1306,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
struct tb_port *host_port;
host_port = tb_port_at(tb_route(sw), tb->root_switch);
- tb_configure_sym(tb, host_port, up, 0, 0);
+ tb_configure_sym(tb, host_port, up, 0, 0, false);
}
/* Set the link configured */
@@ -1465,7 +1491,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* If bandwidth on a link is < asym_threshold
* transition the link to symmetric.
*/
- tb_configure_sym(tb, src_port, dst_port, 0, 0);
+ tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1691,6 +1717,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
}
+ /* Needs to be on different routers */
+ if (in->sw == port->sw) {
+ tb_port_dbg(port, "skipping DP OUT on same router\n");
+ continue;
+ }
+
tb_port_dbg(port, "DP OUT available\n");
/*
@@ -1861,6 +1893,49 @@ static void tb_tunnel_dp(struct tb *tb)
;
}
+static void tb_enter_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ /*
+ * If we get hot-unplug for the DP IN port of the host router
+ * and the DP resource is not available anymore it means there
+ * is a monitor connected directly to the Type-C port and we are
+ * in "redrive" mode. For this to work we cannot enter RTD3 so
+ * we bump up the runtime PM reference count here.
+ */
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (!tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = true;
+ pm_runtime_get(&sw->dev);
+ tb_port_dbg(port, "enter redrive mode, keeping powered\n");
+ }
+}
+
+static void tb_exit_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
+}
+
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
{
struct tb_port *in, *out;
@@ -1877,7 +1952,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
}
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
- tb_deactivate_and_free_tunnel(tunnel);
+ if (tunnel)
+ tb_deactivate_and_free_tunnel(tunnel);
+ else
+ tb_enter_redrive(port);
list_del_init(&port->list);
/*
@@ -1901,9 +1979,10 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
return;
}
- tb_port_dbg(port, "DP %s resource available\n",
+ tb_port_dbg(port, "DP %s resource available after hotplug\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
+ tb_exit_redrive(port);
/* Look for suitable DP IN <-> DP OUT pairs now */
tb_tunnel_dp(tb);
@@ -2287,7 +2366,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
* If bandwidth on a link is < asym_threshold transition
* the link to symmetric.
*/
- tb_configure_sym(tb, in, out, *requested_up, *requested_down);
+ tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
/*
* If requested bandwidth is less or equal than what is
* currently allocated to that tunnel we simply change
@@ -2330,7 +2409,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
ret = tb_configure_asym(tb, in, out, *requested_up,
*requested_down);
if (ret) {
- tb_configure_sym(tb, in, out, 0, 0);
+ tb_configure_sym(tb, in, out, 0, 0, true);
return ret;
}
@@ -2338,7 +2417,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
requested_down);
if (ret) {
tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
- tb_configure_sym(tb, in, out, 0, 0);
+ tb_configure_sym(tb, in, out, 0, 0, true);
}
} else {
ret = -ENOBUFS;
@@ -2555,7 +2634,7 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
return 0;
}
-static int tb_start(struct tb *tb)
+static int tb_start(struct tb *tb, bool reset)
{
struct tb_cm *tcm = tb_priv(tb);
int ret;
@@ -2596,12 +2675,24 @@ static int tb_start(struct tb *tb)
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
- /* Full scan to discover devices added before the driver was loaded. */
- tb_scan_switch(tb->root_switch);
- /* Find out tunnels created by the boot firmware */
- tb_discover_tunnels(tb);
- /* Add DP resources from the DP tunnels created by the boot firmware */
- tb_discover_dp_resources(tb);
+
+ /*
+ * Boot firmware might have created tunnels of its own. Since we
+ * cannot be sure they are usable for us, tear them down and
+ * reset the ports to handle it as new hotplug for USB4 v1
+ * routers (for USB4 v2 and beyond we already do host reset).
+ */
+ if (reset && usb4_switch_version(tb->root_switch) == 1) {
+ tb_switch_reset(tb->root_switch);
+ } else {
+ /* Full scan to discover devices added before the driver was loaded. */
+ tb_scan_switch(tb->root_switch);
+ /* Find out tunnels created by the boot firmware */
+ tb_discover_tunnels(tb);
+ /* Add DP resources from the DP tunnels created by the boot firmware */
+ tb_discover_dp_resources(tb);
+ }
+
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -2672,10 +2763,14 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n");
- /* remove any pci devices the firmware might have setup */
- tb_switch_reset(tb->root_switch);
+ /*
+ * For non-USB4 hosts (Apple systems) remove any PCIe devices
+ * the firmware might have setup.
+ */
+ if (!tb_switch_is_usb4(tb->root_switch))
+ tb_switch_reset(tb->root_switch);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@@ -2801,7 +2896,7 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index e299e5347..7706f8e08 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -23,6 +23,8 @@
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
#define QUIRK_NO_CLX BIT(1)
+/* Need to keep power on while USB4 port is in redrive mode */
+#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
/**
* struct tb_nvm - Structure holding NVM information
@@ -125,6 +127,7 @@ struct tb_switch_tmu {
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
* @link_width: Width of the upstream facing link
+ * @preferred_link_width: Router preferred link width (only set for Gen 4 links)
* @link_usb4: Upstream link is USB4
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
@@ -178,6 +181,7 @@ struct tb_switch {
const char *device_name;
unsigned int link_speed;
enum tb_link_width link_width;
+ enum tb_link_width preferred_link_width;
bool link_usb4;
unsigned int generation;
int cap_plug_events;
@@ -256,6 +260,7 @@ struct tb_bandwidth_group {
* @group_list: The adapter is linked to the group's list of ports through this
* @max_bw: Maximum possible bandwidth through this adapter if set to
* non-zero.
+ * @redrive: For DP IN, if true the adapter is in redrive mode.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -284,6 +289,7 @@ struct tb_port {
struct tb_bandwidth_group *group;
struct list_head group_list;
unsigned int max_bw;
+ bool redrive;
};
/**
@@ -481,7 +487,7 @@ struct tb_path {
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
- int (*start)(struct tb *tb);
+ int (*start)(struct tb *tb, bool reset);
void (*stop)(struct tb *tb);
int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb);
@@ -568,6 +574,22 @@ static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
return &sw->ports[port];
}
+static inline const char *tb_width_name(enum tb_link_width width)
+{
+ switch (width) {
+ case TB_LINK_WIDTH_SINGLE:
+ return "symmetric, single lane";
+ case TB_LINK_WIDTH_DUAL:
+ return "symmetric, dual lanes";
+ case TB_LINK_WIDTH_ASYM_TX:
+ return "asymmetric, 3 transmitters, 1 receiver";
+ case TB_LINK_WIDTH_ASYM_RX:
+ return "asymmetric, 3 receivers, 1 transmitter";
+ default:
+ return "unknown";
+ }
+}
+
/**
* tb_port_has_remote() - Does the port have switch connected downstream
* @port: Port to check
@@ -728,7 +750,7 @@ int tb_xdomain_init(void);
void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
-int tb_domain_add(struct tb *tb);
+int tb_domain_add(struct tb *tb, bool reset);
void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
@@ -795,7 +817,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
-int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec);
@@ -1132,6 +1154,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
bool tb_path_is_invalid(struct tb_path *path);
bool tb_path_port_on_path(const struct tb_path *path,
const struct tb_port *port);
@@ -1151,6 +1174,7 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_reset_port(struct tb_port *port);
int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
@@ -1254,6 +1278,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
return usb4_switch_version(sw) > 0;
}
+void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
@@ -1283,6 +1308,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_hotplug_enable(struct tb_port *port);
+int usb4_port_reset(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6f798f6a2..4e43b47f9 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -194,6 +194,8 @@ struct tb_regs_switch_header {
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
+#define ROUTER_CS_3 0x03
+#define ROUTER_CS_3_V BIT(31)
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
@@ -389,6 +391,7 @@ struct tb_regs_port_header {
#define PORT_CS_18_CSA BIT(22)
#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
+#define PORT_CS_19_DPR BIT(0)
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
@@ -584,6 +587,9 @@ struct tb_regs_hop {
#define TB_LC_POWER 0x740
/* Link controller registers */
+#define TB_LC_PORT_MODE 0x26
+#define TB_LC_PORT_MODE_DPR BIT(0)
+
#define TB_LC_CS_42 0x2a
#define TB_LC_CS_42_USB_PLUGGED BIT(31)
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index 11f2aec2a..9a259c72e 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -894,7 +894,7 @@ static int tb_switch_tmu_change_mode(struct tb_switch *sw)
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
- return ret;
+ goto out;
/* Program the new mode and the downstream router lane adapter */
switch (sw->tmu.mode_request) {
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 7534cd3a8..4f09216b7 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -173,16 +173,28 @@ static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
int ret;
/* Only supported of both routers are at least USB4 v2 */
- if (tb_port_get_link_generation(port) < 4)
+ if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
+ (usb4_switch_version(tunnel->dst_port->sw) < 2))
+ return 0;
+
+ if (enable && tb_port_get_link_generation(port) < 4)
return 0;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
if (ret)
return ret;
+ /*
+ * Downstream router could be unplugged so disable of encapsulation
+ * in upstream router is still possible.
+ */
ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
- if (ret)
- return ret;
+ if (ret) {
+ if (enable)
+ return ret;
+ if (ret != -ENODEV)
+ return ret;
+ }
tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
str_enabled_disabled(enable));
@@ -199,14 +211,21 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
return res;
}
- res = tb_pci_port_enable(tunnel->src_port, activate);
+ if (activate)
+ res = tb_pci_port_enable(tunnel->dst_port, activate);
+ else
+ res = tb_pci_port_enable(tunnel->src_port, activate);
if (res)
return res;
- if (tb_port_is_pcie_up(tunnel->dst_port)) {
- res = tb_pci_port_enable(tunnel->dst_port, activate);
+
+ if (activate) {
+ res = tb_pci_port_enable(tunnel->src_port, activate);
if (res)
return res;
+ } else {
+ /* Downstream router could be unplugged */
+ tb_pci_port_enable(tunnel->dst_port, activate);
}
return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
@@ -1067,8 +1086,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
return 0;
}
-static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
- int timeout_msec)
+static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
struct tb_port *in = tunnel->src_port;
@@ -1087,15 +1105,13 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
return ret;
if (val & DP_COMMON_CAP_DPRX_DONE) {
- *rate = tb_dp_cap_get_rate(val);
- *lanes = tb_dp_cap_get_lanes(val);
-
tb_tunnel_dbg(tunnel, "DPRX read done\n");
return 0;
}
usleep_range(100, 150);
} while (ktime_before(ktime_get(), timeout));
+ tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
return -ETIMEDOUT;
}
@@ -1110,6 +1126,7 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
switch (cap) {
case DP_LOCAL_CAP:
case DP_REMOTE_CAP:
+ case DP_COMMON_CAP:
break;
default:
@@ -1179,17 +1196,16 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
/*
* Then see if the DPRX negotiation is ready and if yes
* return that bandwidth (it may be smaller than the
- * reduced one). Otherwise return the remote (possibly
- * reduced) caps.
+ * reduced one). According to VESA spec, the DPRX
+ * negotiation shall compete in 5 seconds after tunnel
+ * established. We give it 100ms extra just in case.
*/
- ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
- if (ret) {
- if (ret == -ETIMEDOUT)
- ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
- &rate, &lanes);
- if (ret)
- return ret;
- }
+ ret = tb_dp_wait_dprx(tunnel, 5100);
+ if (ret)
+ return ret;
+ ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
+ if (ret)
+ return ret;
} else if (sw->generation >= 2) {
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
@@ -1313,8 +1329,6 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
"DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tb_dp_bandwidth(rate, lanes));
- out = tunnel->dst_port;
-
if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
out->cap_adap + DP_LOCAL_CAP, 1))
return;
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 1515eff8c..a74c9ea67 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
tx_dwords, rx_data, rx_dwords);
}
-static void usb4_switch_check_wakes(struct tb_switch *sw)
+/**
+ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
+ * @sw: Router whose wakes to check
+ *
+ * Checks wakes occurred during suspend and notify the PM core about them.
+ */
+void usb4_switch_check_wakes(struct tb_switch *sw)
{
bool wakeup_usb4 = false;
struct usb4_port *usb4;
@@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
bool wakeup = false;
u32 val;
- if (!device_may_wakeup(&sw->dev))
- return;
-
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
- usb4_switch_check_wakes(sw);
-
if (!tb_route(sw))
return 0;
@@ -1113,6 +1114,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
+/**
+ * usb4_port_reset() - Issue downstream port reset
+ * @port: USB4 port to reset
+ *
+ * Issues downstream port reset to @port.
+ */
+int usb4_port_reset(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val |= PORT_CS_19_DPR;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~PORT_CS_19_DPR;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 9803f0bbf..949574291 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1462,6 +1462,11 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
tb_port_disable(port->dual_link_port);
}
+ dev_dbg(&xd->dev, "current link speed %u.0 Gb/s\n",
+ xd->link_speed);
+ dev_dbg(&xd->dev, "current link width %s\n",
+ tb_width_name(xd->link_width));
+
if (device_add(&xd->dev)) {
dev_err(&xd->dev, "failed to add XDomain device\n");
return -ENODEV;
@@ -1895,6 +1900,50 @@ struct device_type tb_xdomain_type = {
};
EXPORT_SYMBOL_GPL(tb_xdomain_type);
+static void tb_xdomain_link_init(struct tb_xdomain *xd, struct tb_port *down)
+{
+ if (!down->dual_link_port)
+ return;
+
+ /*
+ * Gen 4 links come up already as bonded so only update the port
+ * structures here.
+ */
+ if (tb_port_get_link_generation(down) >= 4) {
+ down->bonded = true;
+ down->dual_link_port->bonded = true;
+ } else {
+ xd->bonding_possible = true;
+ }
+}
+
+static void tb_xdomain_link_exit(struct tb_xdomain *xd)
+{
+ struct tb_port *down = tb_xdomain_downstream_port(xd);
+
+ if (!down->dual_link_port)
+ return;
+
+ if (tb_port_get_link_generation(down) >= 4) {
+ down->bonded = false;
+ down->dual_link_port->bonded = false;
+ } else if (xd->link_width > TB_LINK_WIDTH_SINGLE) {
+ /*
+ * Just return port structures back to way they were and
+ * update credits. No need to update userspace because
+ * the XDomain is removed soon anyway.
+ */
+ tb_port_lane_bonding_disable(down);
+ tb_port_update_credits(down);
+ } else if (down->dual_link_port) {
+ /*
+ * Re-enable the lane 1 adapter we disabled at the end
+ * of tb_xdomain_get_properties().
+ */
+ tb_port_enable(down->dual_link_port);
+ }
+}
+
/**
* tb_xdomain_alloc() - Allocate new XDomain object
* @tb: Domain where the XDomain belongs
@@ -1945,7 +1994,8 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
goto err_free_local_uuid;
} else {
xd->needs_uuid = true;
- xd->bonding_possible = !!down->dual_link_port;
+
+ tb_xdomain_link_init(xd, down);
}
device_initialize(&xd->dev);
@@ -2014,6 +2064,8 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
device_for_each_child_reverse(&xd->dev, xd, unregister_service);
+ tb_xdomain_link_exit(xd);
+
/*
* Undo runtime PM here explicitly because it is possible that
* the XDomain was never added to the bus and thus device_del()
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 785558c65..7716ce0d3 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -81,7 +81,7 @@ struct serial_state {
int quot;
int IER; /* Interrupt Enable Register */
int MCR; /* Modem control register */
- int x_char; /* xon/xoff character */
+ u8 x_char; /* xon/xoff character */
};
static struct tty_driver *serial_driver;
@@ -178,9 +178,9 @@ static void receive_chars(struct serial_state *info)
{
int status;
int serdatr;
- unsigned char ch, flag;
+ u8 ch, flag;
struct async_icount *icount;
- int oe = 0;
+ bool overrun = false;
icount = &info->icount;
@@ -230,7 +230,7 @@ static void receive_chars(struct serial_state *info)
* should be ignored.
*/
if (status & info->ignore_status_mask)
- goto out;
+ return;
status &= info->read_status_mask;
@@ -251,15 +251,13 @@ static void receive_chars(struct serial_state *info)
* reported immediately, and doesn't
* affect the current character
*/
- oe = 1;
+ overrun = true;
}
}
tty_insert_flip_char(&info->tport, ch, flag);
- if (oe == 1)
+ if (overrun)
tty_insert_flip_char(&info->tport, 0, TTY_OVERRUN);
tty_flip_buffer_push(&info->tport);
-out:
- return;
}
static void transmit_chars(struct serial_state *info)
@@ -813,7 +811,7 @@ static void rs_flush_buffer(struct tty_struct *tty)
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
-static void rs_send_xchar(struct tty_struct *tty, char ch)
+static void rs_send_xchar(struct tty_struct *tty, u8 ch)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index a067628e0..69508d7a4 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -49,7 +49,7 @@ struct ehv_bc_data {
unsigned int tx_irq;
spinlock_t lock; /* lock for transmit buffer */
- unsigned char buf[BUF_SIZE]; /* transmit circular buffer */
+ u8 buf[BUF_SIZE]; /* transmit circular buffer */
unsigned int head; /* circular buffer head */
unsigned int tail; /* circular buffer tail */
@@ -138,14 +138,17 @@ static int find_console_handle(void)
static unsigned int local_ev_byte_channel_send(unsigned int handle,
unsigned int *count,
- const char *p)
+ const u8 *p)
{
- char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
+ u8 buffer[EV_BYTE_CHANNEL_MAX_BYTES];
unsigned int c = *count;
+ /*
+ * ev_byte_channel_send() expects at least EV_BYTE_CHANNEL_MAX_BYTES
+ * (16 B) in the buffer. Fake it using a local buffer if needed.
+ */
if (c < sizeof(buffer)) {
- memcpy(buffer, p, c);
- memset(&buffer[c], 0, sizeof(buffer) - c);
+ memcpy_and_pad(buffer, sizeof(buffer), p, c, 0);
p = buffer;
}
return ev_byte_channel_send(handle, count, p);
@@ -163,7 +166,7 @@ static unsigned int local_ev_byte_channel_send(unsigned int handle,
* has been sent, or if some error has occurred.
*
*/
-static void byte_channel_spin_send(const char data)
+static void byte_channel_spin_send(const u8 data)
{
int ret, count;
@@ -471,8 +474,7 @@ static ssize_t ehv_bc_tty_write(struct tty_struct *ttys, const u8 *s,
{
struct ehv_bc_data *bc = ttys->driver_data;
unsigned long flags;
- unsigned int len;
- unsigned int written = 0;
+ size_t len, written = 0;
while (1) {
spin_lock_irqsave(&bc->lock, flags);
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 4591f940b..d27979eab 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -50,10 +50,8 @@ static u32 goldfish_tty_line_count = 8;
static u32 goldfish_tty_current_line_count;
static struct goldfish_tty *goldfish_ttys;
-static void do_rw_io(struct goldfish_tty *qtty,
- unsigned long address,
- unsigned int count,
- int is_write)
+static void do_rw_io(struct goldfish_tty *qtty, unsigned long address,
+ size_t count, bool is_write)
{
unsigned long irq_flags;
void __iomem *base = qtty->base;
@@ -73,10 +71,8 @@ static void do_rw_io(struct goldfish_tty *qtty,
spin_unlock_irqrestore(&qtty->lock, irq_flags);
}
-static void goldfish_tty_rw(struct goldfish_tty *qtty,
- unsigned long addr,
- unsigned int count,
- int is_write)
+static void goldfish_tty_rw(struct goldfish_tty *qtty, unsigned long addr,
+ size_t count, bool is_write)
{
dma_addr_t dma_handle;
enum dma_data_direction dma_dir;
@@ -125,20 +121,18 @@ static void goldfish_tty_rw(struct goldfish_tty *qtty,
}
}
-static void goldfish_tty_do_write(int line, const u8 *buf, unsigned int count)
+static void goldfish_tty_do_write(int line, const u8 *buf, size_t count)
{
struct goldfish_tty *qtty = &goldfish_ttys[line];
- unsigned long address = (unsigned long)(void *)buf;
- goldfish_tty_rw(qtty, address, count, 1);
+ goldfish_tty_rw(qtty, (unsigned long)buf, count, true);
}
static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
{
struct goldfish_tty *qtty = dev_id;
void __iomem *base = qtty->base;
- unsigned long address;
- unsigned char *buf;
+ u8 *buf;
u32 count;
count = gf_ioread32(base + GOLDFISH_TTY_REG_BYTES_READY);
@@ -147,8 +141,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
count = tty_prepare_flip_string(&qtty->port, &buf, count);
- address = (unsigned long)(void *)buf;
- goldfish_tty_rw(qtty, address, count, 0);
+ goldfish_tty_rw(qtty, (unsigned long)buf, count, false);
tty_flip_buffer_push(&qtty->port);
return IRQ_HANDLED;
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4f9264d00..c2a4e88b3 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -108,13 +108,15 @@ config HVC_DCC_SERIALIZE_SMP
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV_SBI_V01
+ depends on RISCV_SBI && NONPORTABLE
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
- is normally used only during boot to output printk.
+ is normally used only during boot to output printk. This driver
+ conflicts with real console drivers and should not be enabled on
+ systems that directly access the console.
- If you don't know what do to here, say Y.
+ If you don't know what do to here, say N.
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 959fae54c..cd1f657f7 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -922,8 +922,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
return ERR_PTR(err);
}
- hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
- GFP_KERNEL);
+ hp = kzalloc(struct_size(hp, outbuf, outbuf_size), GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
@@ -931,7 +930,6 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
hp->data = data;
hp->ops = ops;
hp->outbuf_size = outbuf_size;
- hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
tty_port_init(&hp->port);
hp->port.ops = &hvc_port_ops;
@@ -976,7 +974,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
}
EXPORT_SYMBOL_GPL(hvc_alloc);
-int hvc_remove(struct hvc_struct *hp)
+void hvc_remove(struct hvc_struct *hp)
{
unsigned long flags;
struct tty_struct *tty;
@@ -1010,7 +1008,6 @@ int hvc_remove(struct hvc_struct *hp)
tty_vhangup(tty);
tty_kref_put(tty);
}
- return 0;
}
EXPORT_SYMBOL_GPL(hvc_remove);
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 9668f821d..cf4c1af08 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -37,7 +37,6 @@ struct hvc_struct {
spinlock_t lock;
int index;
int do_wakeup;
- char *outbuf;
int outbuf_size;
int n_outbuf;
uint32_t vtermno;
@@ -48,12 +47,13 @@ struct hvc_struct {
struct work_struct tty_resize;
struct list_head next;
unsigned long flags;
+ u8 outbuf[] __aligned(sizeof(long));
};
/* implemented by a low level driver */
struct hv_ops {
- int (*get_chars)(uint32_t vtermno, char *buf, int count);
- int (*put_chars)(uint32_t vtermno, const char *buf, int count);
+ ssize_t (*get_chars)(uint32_t vtermno, u8 *buf, size_t count);
+ ssize_t (*put_chars)(uint32_t vtermno, const u8 *buf, size_t count);
int (*flush)(uint32_t vtermno, bool wait);
/* Callbacks for notification. Called in open, close and hangup */
@@ -77,7 +77,7 @@ extern int hvc_instantiate(uint32_t vtermno, int index,
extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data,
const struct hv_ops *ops, int outbuf_size);
/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
-extern int hvc_remove(struct hvc_struct *hp);
+extern void hvc_remove(struct hvc_struct *hp);
/* data available */
int hvc_poll(struct hvc_struct *hp);
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 1751108cf..dfc5c9c38 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -26,10 +26,10 @@
/* Lock to serialize access to DCC fifo */
static DEFINE_SPINLOCK(dcc_lock);
-static DEFINE_KFIFO(inbuf, unsigned char, DCC_INBUF_SIZE);
-static DEFINE_KFIFO(outbuf, unsigned char, DCC_OUTBUF_SIZE);
+static DEFINE_KFIFO(inbuf, u8, DCC_INBUF_SIZE);
+static DEFINE_KFIFO(outbuf, u8, DCC_OUTBUF_SIZE);
-static void dcc_uart_console_putchar(struct uart_port *port, unsigned char ch)
+static void dcc_uart_console_putchar(struct uart_port *port, u8 ch)
{
while (__dcc_getstatus() & DCC_STATUS_TX)
cpu_relax();
@@ -47,6 +47,14 @@ static void dcc_early_write(struct console *con, const char *s, unsigned n)
static int __init dcc_early_console_setup(struct earlycon_device *device,
const char *opt)
{
+ unsigned int count = 0x4000000;
+
+ while (--count && (__dcc_getstatus() & DCC_STATUS_TX))
+ cpu_relax();
+
+ if (__dcc_getstatus() & DCC_STATUS_TX)
+ return -ENODEV;
+
device->con->write = dcc_early_write;
return 0;
@@ -54,9 +62,9 @@ static int __init dcc_early_console_setup(struct earlycon_device *device,
EARLYCON_DECLARE(dcc, dcc_early_console_setup);
-static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
+static ssize_t hvc_dcc_put_chars(uint32_t vt, const u8 *buf, size_t count)
{
- int i;
+ size_t i;
for (i = 0; i < count; i++) {
while (__dcc_getstatus() & DCC_STATUS_TX)
@@ -68,9 +76,9 @@ static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
return count;
}
-static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
+static ssize_t hvc_dcc_get_chars(uint32_t vt, u8 *buf, size_t count)
{
- int i;
+ size_t i;
for (i = 0; i < count; ++i)
if (__dcc_getstatus() & DCC_STATUS_RX)
@@ -149,8 +157,8 @@ static DECLARE_WORK(dcc_pwork, dcc_put_work);
*/
static void dcc_get_work(struct work_struct *work)
{
- unsigned char ch;
unsigned long irqflags;
+ u8 ch;
/*
* Read characters from DCC and put them into the input FIFO, as
@@ -172,10 +180,10 @@ static DECLARE_WORK(dcc_gwork, dcc_get_work);
* Write characters directly to the DCC if we're on core 0 and the FIFO
* is empty, or write them to the FIFO if we're not.
*/
-static int hvc_dcc0_put_chars(u32 vt, const char *buf, int count)
+static ssize_t hvc_dcc0_put_chars(u32 vt, const u8 *buf, size_t count)
{
- int len;
unsigned long irqflags;
+ ssize_t len;
if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
return hvc_dcc_put_chars(vt, buf, count);
@@ -211,10 +219,10 @@ static int hvc_dcc0_put_chars(u32 vt, const char *buf, int count)
* Read characters directly from the DCC if we're on core 0 and the FIFO
* is empty, or read them from the FIFO if we're not.
*/
-static int hvc_dcc0_get_chars(u32 vt, char *buf, int count)
+static ssize_t hvc_dcc0_get_chars(u32 vt, u8 *buf, size_t count)
{
- int len;
unsigned long irqflags;
+ ssize_t len;
if (!IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP))
return hvc_dcc_get_chars(vt, buf, count);
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index 543f35ddf..fdecc0d63 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -215,11 +215,11 @@ static void destroy_tty_buffer_list(struct list_head *list)
* If the IUCV path has been severed, then -EPIPE is returned to cause a
* hang up (that is issued by the HVC layer).
*/
-static int hvc_iucv_write(struct hvc_iucv_private *priv,
- char *buf, int count, int *has_more_data)
+static ssize_t hvc_iucv_write(struct hvc_iucv_private *priv,
+ u8 *buf, size_t count, int *has_more_data)
{
struct iucv_tty_buffer *rb;
- int written;
+ ssize_t written;
int rc;
/* immediately return if there is no IUCV connection */
@@ -312,10 +312,10 @@ out_written:
* the routine locks the struct hvc_iucv_private->lock to call
* helper functions.
*/
-static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_iucv_get_chars(uint32_t vtermno, u8 *buf, size_t count)
{
struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
- int written;
+ ssize_t written;
int has_more_data;
if (count <= 0)
@@ -352,8 +352,8 @@ static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
* If an existing IUCV communicaton path has been severed, -EPIPE is returned
* (that can be passed to HVC layer to cause a tty hangup).
*/
-static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
- int count)
+static ssize_t hvc_iucv_queue(struct hvc_iucv_private *priv, const u8 *buf,
+ size_t count)
{
size_t len;
@@ -455,12 +455,12 @@ static void hvc_iucv_sndbuf_work(struct work_struct *work)
* Locking: The method gets called under an irqsave() spinlock; and
* locks struct hvc_iucv_private->lock.
*/
-static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvc_iucv_put_chars(uint32_t vtermno, const u8 *buf, size_t count)
{
struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
int queued;
- if (count <= 0)
+ if (!count)
return 0;
if (!priv)
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 992e199e0..095c33ad1 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -58,7 +58,7 @@ static const struct hv_ops hvc_opal_raw_ops = {
.notifier_hangup = notifier_hangup_irq,
};
-static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_opal_hvsi_get_chars(uint32_t vtermno, u8 *buf, size_t count)
{
struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
@@ -68,7 +68,8 @@ static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
return hvsilib_get_chars(&pv->hvsi, buf, count);
}
-static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvc_opal_hvsi_put_chars(uint32_t vtermno, const u8 *buf,
+ size_t count)
{
struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
@@ -232,24 +233,21 @@ static int hvc_opal_probe(struct platform_device *dev)
return 0;
}
-static int hvc_opal_remove(struct platform_device *dev)
+static void hvc_opal_remove(struct platform_device *dev)
{
struct hvc_struct *hp = dev_get_drvdata(&dev->dev);
- int rc, termno;
+ int termno;
termno = hp->vtermno;
- rc = hvc_remove(hp);
- if (rc == 0) {
- if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
- kfree(hvc_opal_privs[termno]);
- hvc_opal_privs[termno] = NULL;
- }
- return rc;
+ hvc_remove(hp);
+ if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
+ kfree(hvc_opal_privs[termno]);
+ hvc_opal_privs[termno] = NULL;
}
static struct platform_driver hvc_opal_driver = {
.probe = hvc_opal_probe,
- .remove = hvc_opal_remove,
+ .remove_new = hvc_opal_remove,
.driver = {
.name = hvc_opal_name,
.of_match_table = hvc_opal_match,
diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c
index 31f53fa77..cede8a572 100644
--- a/drivers/tty/hvc/hvc_riscv_sbi.c
+++ b/drivers/tty/hvc/hvc_riscv_sbi.c
@@ -15,9 +15,9 @@
#include "hvc_console.h"
-static int hvc_sbi_tty_put(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvc_sbi_tty_put(uint32_t vtermno, const u8 *buf, size_t count)
{
- int i;
+ size_t i;
for (i = 0; i < count; i++)
sbi_console_putchar(buf[i]);
@@ -25,9 +25,10 @@ static int hvc_sbi_tty_put(uint32_t vtermno, const char *buf, int count)
return i;
}
-static int hvc_sbi_tty_get(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_sbi_tty_get(uint32_t vtermno, u8 *buf, size_t count)
{
- int i, c;
+ size_t i;
+ int c;
for (i = 0; i < count; i++) {
c = sbi_console_getchar();
@@ -39,21 +40,44 @@ static int hvc_sbi_tty_get(uint32_t vtermno, char *buf, int count)
return i;
}
-static const struct hv_ops hvc_sbi_ops = {
+static const struct hv_ops hvc_sbi_v01_ops = {
.get_chars = hvc_sbi_tty_get,
.put_chars = hvc_sbi_tty_put,
};
-static int __init hvc_sbi_init(void)
+static ssize_t hvc_sbi_dbcn_tty_put(uint32_t vtermno, const u8 *buf, size_t count)
{
- return PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_ops, 16));
+ return sbi_debug_console_write(buf, count);
}
-device_initcall(hvc_sbi_init);
-static int __init hvc_sbi_console_init(void)
+static ssize_t hvc_sbi_dbcn_tty_get(uint32_t vtermno, u8 *buf, size_t count)
{
- hvc_instantiate(0, 0, &hvc_sbi_ops);
+ return sbi_debug_console_read(buf, count);
+}
+
+static const struct hv_ops hvc_sbi_dbcn_ops = {
+ .put_chars = hvc_sbi_dbcn_tty_put,
+ .get_chars = hvc_sbi_dbcn_tty_get,
+};
+
+static int __init hvc_sbi_init(void)
+{
+ int err;
+
+ if (sbi_debug_console_available) {
+ err = PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_dbcn_ops, 256));
+ if (err)
+ return err;
+ hvc_instantiate(0, 0, &hvc_sbi_dbcn_ops);
+ } else if (IS_ENABLED(CONFIG_RISCV_SBI_V01)) {
+ err = PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_v01_ops, 256));
+ if (err)
+ return err;
+ hvc_instantiate(0, 0, &hvc_sbi_v01_ops);
+ } else {
+ return -ENODEV;
+ }
return 0;
}
-console_initcall(hvc_sbi_console_init);
+device_initcall(hvc_sbi_init);
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 184d325ab..a0b90275b 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -31,10 +31,10 @@ static struct hvc_struct *hvc_rtas_dev;
static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE;
static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE;
-static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
- int count)
+static ssize_t hvc_rtas_write_console(uint32_t vtermno, const u8 *buf,
+ size_t count)
{
- int i;
+ size_t i;
for (i = 0; i < count; i++) {
if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i]))
@@ -44,9 +44,10 @@ static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
return i;
}
-static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_rtas_read_console(uint32_t vtermno, u8 *buf, size_t count)
{
- int i, c;
+ size_t i;
+ int c;
for (i = 0; i < count; i++) {
if (rtas_call(rtascons_get_char_token, 0, 2, &c))
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index ff0dcc564..fdc2699b7 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -19,9 +19,9 @@
static struct hvc_struct *hvc_udbg_dev;
-static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvc_udbg_put(uint32_t vtermno, const u8 *buf, size_t count)
{
- int i;
+ size_t i;
for (i = 0; i < count && udbg_putc; i++)
udbg_putc(buf[i]);
@@ -29,9 +29,10 @@ static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
return i;
}
-static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
+static ssize_t hvc_udbg_get(uint32_t vtermno, u8 *buf, size_t count)
{
- int i, c;
+ size_t i;
+ int c;
if (!udbg_getc_poll)
return 0;
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index 736b230f5..47930601a 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -58,20 +58,20 @@ struct hvterm_priv {
hv_protocol_t proto; /* Raw data or HVSI packets */
struct hvsi_priv hvsi; /* HVSI specific data */
spinlock_t buf_lock;
- char buf[SIZE_VIO_GET_CHARS];
- int left;
- int offset;
+ u8 buf[SIZE_VIO_GET_CHARS];
+ size_t left;
+ size_t offset;
};
static struct hvterm_priv *hvterm_privs[MAX_NR_HVC_CONSOLES];
/* For early boot console */
static struct hvterm_priv hvterm_priv0;
-static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
+static ssize_t hvterm_raw_get_chars(uint32_t vtermno, u8 *buf, size_t count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
unsigned long i;
unsigned long flags;
- int got;
+ size_t got;
if (WARN_ON(!pv))
return 0;
@@ -115,7 +115,8 @@ static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
* you are sending fewer chars.
* @count: number of chars to send.
*/
-static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvterm_raw_put_chars(uint32_t vtermno, const u8 *buf,
+ size_t count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
@@ -133,7 +134,7 @@ static const struct hv_ops hvterm_raw_ops = {
.notifier_hangup = notifier_hangup_irq,
};
-static int hvterm_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
+static ssize_t hvterm_hvsi_get_chars(uint32_t vtermno, u8 *buf, size_t count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
@@ -143,7 +144,8 @@ static int hvterm_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
return hvsilib_get_chars(&pv->hvsi, buf, count);
}
-static int hvterm_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
+static ssize_t hvterm_hvsi_put_chars(uint32_t vtermno, const u8 *buf,
+ size_t count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 34c01874f..0e497501f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -84,13 +84,13 @@ static inline void notify_daemon(struct xencons_info *cons)
notify_remote_via_evtchn(cons->evtchn);
}
-static int __write_console(struct xencons_info *xencons,
- const char *data, int len)
+static ssize_t __write_console(struct xencons_info *xencons,
+ const u8 *data, size_t len)
{
XENCONS_RING_IDX cons, prod;
struct xencons_interface *intf = xencons->intf;
- int sent = 0;
unsigned long flags;
+ size_t sent = 0;
spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->out_cons;
@@ -115,10 +115,11 @@ static int __write_console(struct xencons_info *xencons,
return sent;
}
-static int domU_write_console(uint32_t vtermno, const char *data, int len)
+static ssize_t domU_write_console(uint32_t vtermno, const u8 *data, size_t len)
{
- int ret = len;
struct xencons_info *cons = vtermno_to_xencons(vtermno);
+ size_t ret = len;
+
if (cons == NULL)
return -EINVAL;
@@ -129,7 +130,7 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
* kernel is crippled.
*/
while (len) {
- int sent = __write_console(cons, data, len);
+ ssize_t sent = __write_console(cons, data, len);
if (sent < 0)
return sent;
@@ -144,14 +145,14 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
return ret;
}
-static int domU_read_console(uint32_t vtermno, char *buf, int len)
+static ssize_t domU_read_console(uint32_t vtermno, u8 *buf, size_t len)
{
struct xencons_interface *intf;
XENCONS_RING_IDX cons, prod;
- int recv = 0;
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
unsigned int eoiflag = 0;
unsigned long flags;
+ size_t recv = 0;
if (xencons == NULL)
return -EINVAL;
@@ -209,7 +210,7 @@ static const struct hv_ops domU_hvc_ops = {
.notifier_hangup = notifier_hangup_irq,
};
-static int dom0_read_console(uint32_t vtermno, char *buf, int len)
+static ssize_t dom0_read_console(uint32_t vtermno, u8 *buf, size_t len)
{
return HYPERVISOR_console_io(CONSOLEIO_read, len, buf);
}
@@ -218,9 +219,9 @@ static int dom0_read_console(uint32_t vtermno, char *buf, int len)
* Either for a dom0 to write to the system console, or a domU with a
* debug version of Xen
*/
-static int dom0_write_console(uint32_t vtermno, const char *str, int len)
+static ssize_t dom0_write_console(uint32_t vtermno, const u8 *str, size_t len)
{
- int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (char *)str);
+ int rc = HYPERVISOR_console_io(CONSOLEIO_write, len, (u8 *)str);
if (rc < 0)
return rc;
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 09289c815..22e1bc4d8 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -12,7 +12,7 @@ static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
/* Assumes that always succeeds, works in practice */
- return pv->put_chars(pv->termno, (char *)packet, packet->len);
+ return pv->put_chars(pv->termno, (u8 *)packet, packet->len);
}
static void hvsi_start_handshake(struct hvsi_priv *pv)
@@ -178,9 +178,10 @@ static int hvsi_get_packet(struct hvsi_priv *pv)
return 0;
}
-int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
+ssize_t hvsilib_get_chars(struct hvsi_priv *pv, u8 *buf, size_t count)
{
- unsigned int tries, read = 0;
+ unsigned int tries;
+ size_t read = 0;
if (WARN_ON(!pv))
return -ENXIO;
@@ -199,7 +200,7 @@ int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
for (tries = 1; count && tries < 2; tries++) {
/* Consume existing data packet */
if (pv->inbuf_pktlen) {
- unsigned int l = min(count, (int)pv->inbuf_pktlen);
+ size_t l = min(count, pv->inbuf_pktlen);
memcpy(&buf[read], &pv->inbuf[pv->inbuf_cur], l);
pv->inbuf_cur += l;
pv->inbuf_pktlen -= l;
@@ -228,10 +229,11 @@ int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
return read;
}
-int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count)
+ssize_t hvsilib_put_chars(struct hvsi_priv *pv, const u8 *buf, size_t count)
{
struct hvsi_data dp;
- int rc, adjcount = min(count, HVSI_MAX_OUTGOING_DATA);
+ size_t adjcount = min_t(size_t, count, HVSI_MAX_OUTGOING_DATA);
+ int rc;
if (WARN_ON(!pv))
return -ENODEV;
@@ -411,9 +413,9 @@ void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp)
}
void hvsilib_init(struct hvsi_priv *pv,
- int (*get_chars)(uint32_t termno, char *buf, int count),
- int (*put_chars)(uint32_t termno, const char *buf,
- int count),
+ ssize_t (*get_chars)(uint32_t termno, u8 *buf, size_t count),
+ ssize_t (*put_chars)(uint32_t termno, const u8 *buf,
+ size_t count),
int termno, int is_console)
{
memset(pv, 0, sizeof(*pv));
diff --git a/drivers/tty/ipwireless/main.h b/drivers/tty/ipwireless/main.h
index 73818bb64..a5728a5b3 100644
--- a/drivers/tty/ipwireless/main.h
+++ b/drivers/tty/ipwireless/main.h
@@ -49,9 +49,6 @@ struct ipw_dev {
void __iomem *common_memory;
- /* Reference to attribute memory, containing CIS data */
- void *attribute_memory;
-
/* Hardware context */
struct ipw_hardware *hardware;
/* Network layer context */
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 369ec71c2..afbf7738c 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -213,16 +213,16 @@ struct fdc_word {
*/
/* ranges >= 1 && sizes[0] >= 1 */
-static struct fdc_word mips_ejtag_fdc_encode(const char **ptrs,
+static struct fdc_word mips_ejtag_fdc_encode(const u8 **ptrs,
unsigned int *sizes,
unsigned int ranges)
{
struct fdc_word word = { 0, 0 };
- const char **ptrs_end = ptrs + ranges;
+ const u8 **ptrs_end = ptrs + ranges;
for (; ptrs < ptrs_end; ++ptrs) {
- const char *ptr = *(ptrs++);
- const char *end = ptr + *(sizes++);
+ const u8 *ptr = *(ptrs++);
+ const u8 *end = ptr + *(sizes++);
for (; ptr < end; ++ptr) {
word.word |= (u8)*ptr << (8*word.bytes);
@@ -309,7 +309,7 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
unsigned int i, buf_len, cpu;
bool done_cr = false;
char buf[4];
- const char *buf_ptr = buf;
+ const u8 *buf_ptr = buf;
/* Number of bytes of input data encoded up to each byte in buf */
u8 inc[4];
@@ -417,7 +417,7 @@ static unsigned int mips_ejtag_fdc_put_chan(struct mips_ejtag_fdc_tty *priv,
{
struct mips_ejtag_fdc_tty_port *dport;
struct tty_struct *tty;
- const char *ptrs[2];
+ const u8 *ptrs[2];
unsigned int sizes[2] = { 0 };
struct fdc_word word = { .bytes = 0 };
unsigned long flags;
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index bf3f87ba3..ebaada8db 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -514,7 +514,7 @@ static void MoxaPortLineCtrl(struct moxa_port *, bool, bool);
static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int);
static int MoxaPortLineStatus(struct moxa_port *);
static void MoxaPortFlushData(struct moxa_port *, int);
-static int MoxaPortWriteData(struct tty_struct *, const unsigned char *, int);
+static ssize_t MoxaPortWriteData(struct tty_struct *, const u8 *, size_t);
static int MoxaPortReadData(struct moxa_port *);
static unsigned int MoxaPortTxQueue(struct moxa_port *);
static int MoxaPortRxQueue(struct moxa_port *);
@@ -1933,10 +1933,10 @@ static void MoxaPortFlushData(struct moxa_port *port, int mode)
*
* Function 20: Write data.
* Syntax:
- * int MoxaPortWriteData(int port, unsigned char * buffer, int length);
+ * ssize_t MoxaPortWriteData(int port, u8 *buffer, size_t length);
* int port : port number (0 - 127)
- * unsigned char * buffer : pointer to write data buffer.
- * int length : write data length
+ * u8 *buffer : pointer to write data buffer.
+ * size_t length : write data length
*
* return: 0 - length : real write data length
*
@@ -2163,11 +2163,12 @@ static int MoxaPortLineStatus(struct moxa_port *port)
return val;
}
-static int MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer, int len)
+static ssize_t MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer,
+ size_t len)
{
struct moxa_port *port = tty->driver_data;
void __iomem *baseAddr, *ofsAddr, *ofs;
- unsigned int c, total;
+ size_t c, total;
u16 head, tail, tx_mask, spage, epage;
u16 pageno, pageofs, bufhead;
@@ -2224,8 +2225,8 @@ static int MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer, int len)
static int MoxaPortReadData(struct moxa_port *port)
{
struct tty_struct *tty = port->port.tty;
- unsigned char *dst;
void __iomem *baseAddr, *ofsAddr, *ofs;
+ u8 *dst;
unsigned int count, len, total;
u16 tail, rx_mask, spage, epage;
u16 pageno, pageofs, bufhead, head;
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 6ce7f2599..458bb1280 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -264,7 +264,7 @@ struct mxser_port {
u8 rx_low_water;
int type; /* UART type */
- unsigned char x_char; /* xon/xoff character */
+ u8 x_char; /* xon/xoff character */
u8 IER; /* Interrupt Enable Register */
u8 MCR; /* Modem control register */
u8 FCR; /* FIFO control register */
@@ -905,7 +905,7 @@ static ssize_t mxser_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
- int written;
+ size_t written;
bool is_empty;
spin_lock_irqsave(&info->slock, flags);
@@ -1521,7 +1521,7 @@ static u8 mxser_receive_chars_old(struct tty_struct *tty,
if (++ignored > 100)
break;
} else {
- char flag = 0;
+ u8 flag = 0;
if (status & UART_LSR_BRK_ERROR_BITS) {
if (status & UART_LSR_BI) {
flag = TTY_BREAK;
@@ -1585,7 +1585,7 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
count = port->xmit_fifo_size;
do {
- unsigned char c;
+ u8 c;
if (!kfifo_get(&port->port.xmit_fifo, &c))
break;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index a3ab3946e..4036566fe 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -124,8 +124,8 @@ struct gsm_msg {
u8 addr; /* DLCI address + flags */
u8 ctrl; /* Control byte + flags */
unsigned int len; /* Length of data block (can be zero) */
- unsigned char *data; /* Points into buffer but not at the start */
- unsigned char buffer[];
+ u8 *data; /* Points into buffer but not at the start */
+ u8 buffer[];
};
enum gsm_dlci_state {
@@ -283,7 +283,7 @@ struct gsm_mux {
/* Bits for GSM mode decoding */
/* Framing Layer */
- unsigned char *buf;
+ u8 *buf;
enum gsm_mux_state state;
unsigned int len;
unsigned int address;
@@ -2856,7 +2856,7 @@ invalid:
* Receive bytes in gsm mode 0
*/
-static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
+static void gsm0_receive(struct gsm_mux *gsm, u8 c)
{
unsigned int len;
@@ -2947,7 +2947,7 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
* Receive bytes in mode 1 (Advanced option)
*/
-static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
+static void gsm1_receive(struct gsm_mux *gsm, u8 c)
{
/* handle XON/XOFF */
if ((c & ISO_IEC_646_MASK) == XON) {
@@ -3541,7 +3541,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const u8 *cp,
const u8 *fp, size_t count)
{
struct gsm_mux *gsm = tty->disc_data;
- char flags = TTY_NORMAL;
+ u8 flags = TTY_NORMAL;
if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, cp, count);
@@ -3711,7 +3711,7 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
{
struct gsm_mux *gsm = tty->disc_data;
unsigned long flags;
- int space;
+ size_t space;
int ret;
if (!gsm)
@@ -3909,8 +3909,7 @@ static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue)
net->stats.tx_errors++;
}
-static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
- const unsigned char *in_buf, int size)
+static void gsm_mux_rx_netchar(struct gsm_dlci *dlci, const u8 *in_buf, int size)
{
struct net_device *net = dlci->net;
struct sk_buff *skb;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index a670419ef..1615f074a 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -109,8 +109,8 @@
struct n_hdlc_buf {
struct list_head list_item;
- int count;
- char buf[];
+ size_t count;
+ u8 buf[];
};
struct n_hdlc_buf_list {
@@ -263,9 +263,9 @@ static int n_hdlc_tty_open(struct tty_struct *tty)
*/
static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
{
- register int actual;
unsigned long flags;
struct n_hdlc_buf *tbuf;
+ ssize_t actual;
check_again:
@@ -281,7 +281,7 @@ check_again:
tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
while (tbuf) {
- pr_debug("sending frame %p, count=%d\n", tbuf, tbuf->count);
+ pr_debug("sending frame %p, count=%zu\n", tbuf, tbuf->count);
/* Send the next block of data to device */
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
@@ -521,9 +521,9 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
const u8 *data, size_t count)
{
struct n_hdlc *n_hdlc = tty->disc_data;
- int error = 0;
DECLARE_WAITQUEUE(wait, current);
struct n_hdlc_buf *tbuf;
+ ssize_t error = 0;
pr_debug("%s() called count=%zd\n", __func__, count);
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 02cd40147..e28a921c1 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -65,24 +65,8 @@ do { \
#define DBG3(args...) DBG_(0x04, ##args)
#define DBG4(args...) DBG_(0x08, ##args)
-/* TODO: rewrite to optimize macros... */
-
#define TMP_BUF_MAX 256
-#define DUMP(buf__, len__) \
- do { \
- char tbuf[TMP_BUF_MAX] = {0}; \
- if (len__ > 1) { \
- u32 data_len = min_t(u32, len__, TMP_BUF_MAX); \
- strscpy(tbuf, buf__, data_len); \
- if (tbuf[data_len - 2] == '\r') \
- tbuf[data_len - 2] = 'r'; \
- DBG1("SENDING: '%s' (%d+n)", tbuf, len__); \
- } else { \
- DBG1("SENDING: '%s' (%d)", tbuf, len__); \
- } \
- } while (0)
-
/* Defines */
#define NOZOMI_NAME "nozomi"
#define NOZOMI_NAME_TTY "nozomi_tty"
@@ -754,8 +738,6 @@ static int send_data(enum port_type index, struct nozomi *dc)
return 0;
}
- /* DUMP(buf, size); */
-
/* Write length + data */
write_mem32(addr, (u32 *) &size, 4);
write_mem32(addr + 4, (u32 *) dc->send_buf, size);
@@ -801,11 +783,10 @@ static int receive_data(enum port_type index, struct nozomi *dc)
tty_insert_flip_char(&port->port, buf[0], TTY_NORMAL);
size = 0;
} else if (size < RECEIVE_BUF_MAX) {
- size -= tty_insert_flip_string(&port->port,
- (char *)buf, size);
+ size -= tty_insert_flip_string(&port->port, buf, size);
} else {
- i = tty_insert_flip_string(&port->port,
- (char *)buf, RECEIVE_BUF_MAX);
+ i = tty_insert_flip_string(&port->port, buf,
+ RECEIVE_BUF_MAX);
size -= i;
offset += i;
}
@@ -1602,10 +1583,10 @@ static void ntty_hangup(struct tty_struct *tty)
static ssize_t ntty_write(struct tty_struct *tty, const u8 *buffer,
size_t count)
{
- int rval = -EINVAL;
struct nozomi *dc = get_dc_by_tty(tty);
struct port *port = tty->driver_data;
unsigned long flags;
+ size_t rval;
if (!dc || !port)
return -ENODEV;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index a5fdaf5e1..822a5cd05 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -77,7 +77,7 @@ static bool is_serdev_device(const struct device *dev)
static void serdev_ctrl_release(struct device *dev)
{
struct serdev_controller *ctrl = to_serdev_controller(dev);
- ida_simple_remove(&ctrl_ida, ctrl->nr);
+ ida_free(&ctrl_ida, ctrl->nr);
kfree(ctrl);
}
@@ -225,8 +225,7 @@ EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
* Return: The number of bytes written (less than count if not enough room in
* the write buffer), or a negative errno on errors.
*/
-int serdev_device_write_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t count)
+int serdev_device_write_buf(struct serdev_device *serdev, const u8 *buf, size_t count)
{
struct serdev_controller *ctrl = serdev->ctrl;
@@ -259,13 +258,12 @@ EXPORT_SYMBOL_GPL(serdev_device_write_buf);
* -ETIMEDOUT or -ERESTARTSYS if interrupted before any bytes were written, or
* a negative errno on errors.
*/
-int serdev_device_write(struct serdev_device *serdev,
- const unsigned char *buf, size_t count,
- long timeout)
+ssize_t serdev_device_write(struct serdev_device *serdev, const u8 *buf,
+ size_t count, long timeout)
{
struct serdev_controller *ctrl = serdev->ctrl;
- int written = 0;
- int ret;
+ size_t written = 0;
+ ssize_t ret;
if (!ctrl || !ctrl->ops->write_buf || !serdev->ops->write_wakeup)
return -EINVAL;
@@ -468,6 +466,7 @@ EXPORT_SYMBOL_GPL(serdev_device_alloc);
/**
* serdev_controller_alloc() - Allocate a new serdev controller
+ * @host: serial port hardware controller device
* @parent: parent device
* @size: size of private data
*
@@ -476,8 +475,9 @@ EXPORT_SYMBOL_GPL(serdev_device_alloc);
* The allocated private data region may be accessed via
* serdev_controller_get_drvdata()
*/
-struct serdev_controller *serdev_controller_alloc(struct device *parent,
- size_t size)
+struct serdev_controller *serdev_controller_alloc(struct device *host,
+ struct device *parent,
+ size_t size)
{
struct serdev_controller *ctrl;
int id;
@@ -489,7 +489,7 @@ struct serdev_controller *serdev_controller_alloc(struct device *parent,
if (!ctrl)
return NULL;
- id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&ctrl_ida, GFP_KERNEL);
if (id < 0) {
dev_err(parent,
"unable to allocate serdev controller identifier.\n");
@@ -502,7 +502,8 @@ struct serdev_controller *serdev_controller_alloc(struct device *parent,
ctrl->dev.type = &serdev_ctrl_type;
ctrl->dev.bus = &serdev_bus_type;
ctrl->dev.parent = parent;
- device_set_node(&ctrl->dev, dev_fwnode(parent));
+ ctrl->host = host;
+ device_set_node(&ctrl->dev, dev_fwnode(host));
serdev_controller_set_drvdata(ctrl, &ctrl[1]);
dev_set_name(&ctrl->dev, "serial%d", id);
@@ -665,7 +666,7 @@ static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
acpi_get_parent(adev->handle, &lookup.controller_handle);
/* Make sure controller and ResourceSource handle match */
- if (!device_match_acpi_handle(ctrl->dev.parent, lookup.controller_handle))
+ if (!device_match_acpi_handle(ctrl->host, lookup.controller_handle))
return -ENODEV;
return 0;
@@ -730,7 +731,7 @@ static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
bool skip;
int ret;
- if (!has_acpi_companion(ctrl->dev.parent))
+ if (!has_acpi_companion(ctrl->host))
return -ENODEV;
/*
@@ -739,7 +740,7 @@ static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
* succeed in this case, so that the proper serdev devices can be
* added "manually" later.
*/
- ret = acpi_quirk_skip_serdev_enumeration(ctrl->dev.parent, &skip);
+ ret = acpi_quirk_skip_serdev_enumeration(ctrl->host, &skip);
if (ret)
return ret;
if (skip)
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index e3856814c..e94e090cf 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -74,7 +74,7 @@ static const struct tty_port_client_operations client_ops = {
* Callback functions from the serdev core.
*/
-static int ttyport_write_buf(struct serdev_controller *ctrl, const unsigned char *data, size_t len)
+static ssize_t ttyport_write_buf(struct serdev_controller *ctrl, const u8 *data, size_t len)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty = serport->tty;
@@ -274,6 +274,7 @@ static const struct serdev_controller_ops ctrl_ops = {
};
struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx)
{
@@ -284,7 +285,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
if (!port || !drv || !parent)
return ERR_PTR(-ENODEV);
- ctrl = serdev_controller_alloc(parent, sizeof(struct serport));
+ ctrl = serdev_controller_alloc(host, parent, sizeof(struct serport));
if (!ctrl)
return ERR_PTR(-ENOMEM);
serport = serdev_controller_get_drvdata(ctrl);
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index d7482ae33..8c2aaf7af 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -566,7 +566,7 @@ err_sysfs_remove:
return rc;
}
-static int aspeed_vuart_remove(struct platform_device *pdev)
+static void aspeed_vuart_remove(struct platform_device *pdev)
{
struct aspeed_vuart *vuart = platform_get_drvdata(pdev);
@@ -574,8 +574,6 @@ static int aspeed_vuart_remove(struct platform_device *pdev)
aspeed_vuart_set_enabled(vuart, false);
serial8250_unregister_port(vuart->line);
sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
-
- return 0;
}
static const struct of_device_id aspeed_vuart_table[] = {
@@ -590,7 +588,7 @@ static struct platform_driver aspeed_vuart_driver = {
.of_match_table = aspeed_vuart_table,
},
.probe = aspeed_vuart_probe,
- .remove = aspeed_vuart_remove,
+ .remove_new = aspeed_vuart_remove,
};
module_platform_driver(aspeed_vuart_driver);
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 4f4502fb5..beac6b340 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -197,14 +197,12 @@ dis_clk:
return ret;
}
-static int bcm2835aux_serial_remove(struct platform_device *pdev)
+static void bcm2835aux_serial_remove(struct platform_device *pdev)
{
struct bcm2835aux_data *data = platform_get_drvdata(pdev);
serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk);
-
- return 0;
}
static const struct bcm2835_aux_serial_driver_data bcm2835_acpi_data = {
@@ -230,7 +228,7 @@ static struct platform_driver bcm2835aux_serial_driver = {
.acpi_match_table = bcm2835aux_serial_acpi_match,
},
.probe = bcm2835aux_serial_probe,
- .remove = bcm2835aux_serial_remove,
+ .remove_new = bcm2835aux_serial_remove,
};
module_platform_driver(bcm2835aux_serial_driver);
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 55dea2539..504c4c020 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1121,7 +1121,7 @@ release_dma:
return ret;
}
-static int brcmuart_remove(struct platform_device *pdev)
+static void brcmuart_remove(struct platform_device *pdev)
{
struct brcmuart_priv *priv = platform_get_drvdata(pdev);
@@ -1131,7 +1131,6 @@ static int brcmuart_remove(struct platform_device *pdev)
brcmuart_free_bufs(&pdev->dev, priv);
if (priv->dma_enabled)
brcmuart_arbitration(priv, 0);
- return 0;
}
static int __maybe_unused brcmuart_suspend(struct device *dev)
@@ -1207,7 +1206,7 @@ static struct platform_driver brcmuart_platform_driver = {
.of_match_table = brcmuart_dt_ids,
},
.probe = brcmuart_probe,
- .remove = brcmuart_remove,
+ .remove_new = brcmuart_remove,
};
static int __init brcmuart_init(void)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 912733151..b62ad9006 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -883,7 +883,7 @@ static int serial8250_probe(struct platform_device *dev)
/*
* Remove serial ports registered against a platform device.
*/
-static int serial8250_remove(struct platform_device *dev)
+static void serial8250_remove(struct platform_device *dev)
{
int i;
@@ -893,7 +893,6 @@ static int serial8250_remove(struct platform_device *dev)
if (up->port.dev == &dev->dev)
serial8250_unregister_port(i);
}
- return 0;
}
static int serial8250_suspend(struct platform_device *dev, pm_message_t state)
@@ -926,7 +925,7 @@ static int serial8250_resume(struct platform_device *dev)
static struct platform_driver serial8250_isa_driver = {
.probe = serial8250_probe,
- .remove = serial8250_remove,
+ .remove_new = serial8250_remove,
.suspend = serial8250_suspend,
.resume = serial8250_resume,
.driver = {
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index e6218766d..2d1f350a4 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -663,7 +663,7 @@ static int dw8250_probe(struct platform_device *pdev)
return 0;
}
-static int dw8250_remove(struct platform_device *pdev)
+static void dw8250_remove(struct platform_device *pdev)
{
struct dw8250_data *data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -680,8 +680,6 @@ static int dw8250_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
-
- return 0;
}
static int dw8250_suspend(struct device *dev)
@@ -790,7 +788,7 @@ static struct platform_driver dw8250_platform_driver = {
.acpi_match_table = dw8250_acpi_match,
},
.probe = dw8250_probe,
- .remove = dw8250_remove,
+ .remove_new = dw8250_remove,
};
module_platform_driver(dw8250_platform_driver);
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 84843e204..3e33ddf7b 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -259,17 +259,6 @@ void dw8250_setup_port(struct uart_port *p)
}
up->capabilities |= UART_CAP_NOTEMT;
- /*
- * If the Component Version Register returns zero, we know that
- * ADDITIONAL_FEATURES are not enabled. No need to go any further.
- */
- reg = dw8250_readl_ext(p, DW_UART_UCV);
- if (!reg)
- return;
-
- dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
- (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
-
/* Preserve value written by firmware or bootloader */
old_dlf = dw8250_readl_ext(p, DW_UART_DLF);
dw8250_writel_ext(p, DW_UART_DLF, ~0U);
@@ -282,6 +271,11 @@ void dw8250_setup_port(struct uart_port *p)
p->set_divisor = dw8250_set_divisor;
}
+ reg = dw8250_readl_ext(p, DW_UART_UCV);
+ if (reg)
+ dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
+ (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
+
reg = dw8250_readl_ext(p, DW_UART_CPR);
if (!reg) {
reg = data->pdata->cpr_val;
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index ef5019e94..a75475510 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -200,12 +200,11 @@ static int serial8250_em_probe(struct platform_device *pdev)
return 0;
}
-static int serial8250_em_remove(struct platform_device *pdev)
+static void serial8250_em_remove(struct platform_device *pdev)
{
struct serial8250_em_priv *priv = platform_get_drvdata(pdev);
serial8250_unregister_port(priv->line);
- return 0;
}
static const struct of_device_id serial8250_em_dt_ids[] = {
@@ -220,7 +219,7 @@ static struct platform_driver serial8250_em_platform_driver = {
.of_match_table = serial8250_em_dt_ids,
},
.probe = serial8250_em_probe,
- .remove = serial8250_em_remove,
+ .remove_new = serial8250_em_remove,
};
module_platform_driver(serial8250_em_platform_driver);
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index f522eb502..b4ed44208 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -51,7 +51,8 @@ int fsl8250_handle_irq(struct uart_port *port)
* immediately and interrupt the CPU again. The hardware clears LSR.BI
* when the next valid char is read.)
*/
- if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+ if (unlikely((iir & UART_IIR_ID) == UART_IIR_RLSI &&
+ (up->lsr_saved_flags & UART_LSR_BI))) {
up->lsr_saved_flags &= ~UART_LSR_BI;
port->serial_in(port, UART_RX);
uart_port_unlock_irqrestore(&up->port, flags);
@@ -159,12 +160,11 @@ static int fsl8250_acpi_probe(struct platform_device *pdev)
return 0;
}
-static int fsl8250_acpi_remove(struct platform_device *pdev)
+static void fsl8250_acpi_remove(struct platform_device *pdev)
{
struct fsl8250_data *data = platform_get_drvdata(pdev);
serial8250_unregister_port(data->line);
- return 0;
}
static const struct acpi_device_id fsl_8250_acpi_id[] = {
@@ -179,7 +179,7 @@ static struct platform_driver fsl8250_platform_driver = {
.acpi_match_table = ACPI_PTR(fsl_8250_acpi_id),
},
.probe = fsl8250_acpi_probe,
- .remove = fsl8250_acpi_remove,
+ .remove_new = fsl8250_acpi_remove,
};
module_platform_driver(fsl8250_platform_driver);
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index 4c4c4da73..a12f73792 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -320,14 +320,13 @@ out:
return err;
}
-static int ingenic_uart_remove(struct platform_device *pdev)
+static void ingenic_uart_remove(struct platform_device *pdev)
{
struct ingenic_uart_data *data = platform_get_drvdata(pdev);
serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk_module);
clk_disable_unprepare(data->clk_baud);
- return 0;
}
static const struct ingenic_uart_config jz4740_uart_config = {
@@ -368,7 +367,7 @@ static struct platform_driver ingenic_uart_platform_driver = {
.of_match_table = of_match,
},
.probe = ingenic_uart_probe,
- .remove = ingenic_uart_remove,
+ .remove_new = ingenic_uart_remove,
};
module_platform_driver(ingenic_uart_platform_driver);
diff --git a/drivers/tty/serial/8250/8250_ioc3.c b/drivers/tty/serial/8250/8250_ioc3.c
index d5a39e105..50c77c3da 100644
--- a/drivers/tty/serial/8250/8250_ioc3.c
+++ b/drivers/tty/serial/8250/8250_ioc3.c
@@ -75,17 +75,16 @@ static int serial8250_ioc3_probe(struct platform_device *pdev)
return 0;
}
-static int serial8250_ioc3_remove(struct platform_device *pdev)
+static void serial8250_ioc3_remove(struct platform_device *pdev)
{
struct ioc3_8250_data *data = platform_get_drvdata(pdev);
serial8250_unregister_port(data->line);
- return 0;
}
static struct platform_driver serial8250_ioc3_driver = {
.probe = serial8250_ioc3_probe,
- .remove = serial8250_ioc3_remove,
+ .remove_new = serial8250_ioc3_remove,
.driver = {
.name = "ioc3-serial8250",
}
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 6dc85aaba..8d728a6a5 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -182,15 +182,13 @@ dis_clk_reg:
return ret;
}
-static int lpc18xx_serial_remove(struct platform_device *pdev)
+static void lpc18xx_serial_remove(struct platform_device *pdev)
{
struct lpc18xx_uart_data *data = platform_get_drvdata(pdev);
serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk_uart);
clk_disable_unprepare(data->clk_reg);
-
- return 0;
}
static const struct of_device_id lpc18xx_serial_match[] = {
@@ -201,7 +199,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_serial_match);
static struct platform_driver lpc18xx_serial_driver = {
.probe = lpc18xx_serial_probe,
- .remove = lpc18xx_serial_remove,
+ .remove_new = lpc18xx_serial_remove,
.driver = {
.name = "lpc18xx-uart",
.of_match_table = lpc18xx_serial_match,
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 0e43bdfb7..776ec1ef2 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -287,17 +287,14 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
return 0;
}
- rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
+ rx_param = devm_kmemdup(dev, &lpss->dma_param, sizeof(*rx_param), GFP_KERNEL);
if (!rx_param)
return -ENOMEM;
- tx_param = devm_kzalloc(dev, sizeof(*tx_param), GFP_KERNEL);
+ tx_param = devm_kmemdup(dev, &lpss->dma_param, sizeof(*tx_param), GFP_KERNEL);
if (!tx_param)
return -ENOMEM;
- *rx_param = lpss->dma_param;
- *tx_param = lpss->dma_param;
-
dma->fn = lpss8250_dma_filter;
dma->rx_param = rx_param;
dma->tx_param = tx_param;
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 23457daae..9ff6bbe9c 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -581,7 +581,7 @@ static int mtk8250_probe(struct platform_device *pdev)
return 0;
}
-static int mtk8250_remove(struct platform_device *pdev)
+static void mtk8250_remove(struct platform_device *pdev)
{
struct mtk8250_data *data = platform_get_drvdata(pdev);
@@ -591,8 +591,6 @@ static int mtk8250_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
}
static int __maybe_unused mtk8250_suspend(struct device *dev)
@@ -652,7 +650,7 @@ static struct platform_driver mtk8250_platform_driver = {
.of_match_table = mtk8250_of_match,
},
.probe = mtk8250_probe,
- .remove = mtk8250_remove,
+ .remove_new = mtk8250_remove,
};
module_platform_driver(mtk8250_platform_driver);
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index ef3e745bd..9dcc17e33 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -4,7 +4,10 @@
*
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
*/
+
+#include <linux/bits.h>
#include <linux/console.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/serial_core.h>
@@ -25,6 +28,36 @@ struct of_serial_info {
int line;
};
+/* Nuvoton NPCM timeout register */
+#define UART_NPCM_TOR 7
+#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */
+
+static int npcm_startup(struct uart_port *port)
+{
+ /*
+ * Nuvoton calls the scratch register 'UART_TOR' (timeout
+ * register). Enable it, and set TIOC (timeout interrupt
+ * comparator) to be 0x20 for correct operation.
+ */
+ serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20);
+
+ return serial8250_do_startup(port);
+}
+
+/* Nuvoton NPCM UARTs have a custom divisor calculation */
+static unsigned int npcm_get_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int *frac)
+{
+ return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
+}
+
+static int npcm_setup(struct uart_port *port)
+{
+ port->get_divisor = npcm_get_divisor;
+ port->startup = npcm_startup;
+ return 0;
+}
+
/*
* Fill a struct uart_port for a given device node
*/
@@ -164,10 +197,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
switch (type) {
case PORT_RT2880:
ret = rt288x_setup(port);
- if (ret)
- goto err_pmruntime;
+ break;
+ case PORT_NPCM:
+ ret = npcm_setup(port);
+ break;
+ default:
+ /* Nothing to do */
+ ret = 0;
break;
}
+ if (ret)
+ goto err_pmruntime;
if (IS_REACHABLE(CONFIG_SERIAL_8250_FSL) &&
(of_device_is_compatible(np, "fsl,ns16550") ||
@@ -251,7 +291,7 @@ err_free:
/*
* Release a line
*/
-static int of_platform_serial_remove(struct platform_device *ofdev)
+static void of_platform_serial_remove(struct platform_device *ofdev)
{
struct of_serial_info *info = platform_get_drvdata(ofdev);
@@ -261,7 +301,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
pm_runtime_put_sync(&ofdev->dev);
pm_runtime_disable(&ofdev->dev);
kfree(info);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -337,7 +376,7 @@ static struct platform_driver of_platform_serial_driver = {
.pm = &of_serial_pm_ops,
},
.probe = of_platform_serial_probe,
- .remove = of_platform_serial_remove,
+ .remove_new = of_platform_serial_remove,
};
module_platform_driver(of_platform_serial_driver);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index dd2be7a81..6942990a3 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1586,7 +1586,7 @@ err:
return ret;
}
-static int omap8250_remove(struct platform_device *pdev)
+static void omap8250_remove(struct platform_device *pdev)
{
struct omap8250_priv *priv = platform_get_drvdata(pdev);
struct uart_8250_port *up;
@@ -1606,7 +1606,6 @@ static int omap8250_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
cpu_latency_qos_remove_request(&priv->pm_qos_request);
device_init_wakeup(&pdev->dev, false);
- return 0;
}
static int omap8250_prepare(struct device *dev)
@@ -1865,7 +1864,7 @@ static struct platform_driver omap8250_platform_driver = {
.of_match_table = omap8250_dt_ids,
},
.probe = omap8250_probe,
- .remove = omap8250_remove,
+ .remove_new = omap8250_remove,
};
module_platform_driver(omap8250_platform_driver);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 8ccf69193..0d35c77fa 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2039,12 +2039,13 @@ static int pci_moxa_init(struct pci_dev *dev)
unsigned short device = dev->device;
resource_size_t iobar_addr = pci_resource_start(dev, 2);
unsigned int num_ports = (device & 0x00F0) >> 4, i;
- u8 val;
+ u8 val, init_mode = MOXA_RS232;
if (!(pci_moxa_supported_rs(dev) & MOXA_SUPP_RS232)) {
- for (i = 0; i < num_ports; ++i)
- pci_moxa_set_interface(dev, i, MOXA_RS422);
+ init_mode = MOXA_RS422;
}
+ for (i = 0; i < num_ports; ++i)
+ pci_moxa_set_interface(dev, i, init_mode);
/*
* Enable hardware buffer to prevent break signal output when system boots up.
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index 9f9e21981..2dda737b1 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -9,15 +9,21 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/units.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/8250_pci.h>
#include <asm/byteorder.h>
@@ -52,6 +58,17 @@
#define PCI_SUBDEVICE_ID_EFAR_PCI11400 PCI_DEVICE_ID_EFAR_PCI11400
#define PCI_SUBDEVICE_ID_EFAR_PCI11414 PCI_DEVICE_ID_EFAR_PCI11414
+#define UART_SYSTEM_ADDR_BASE 0x1000
+#define UART_DEV_REV_REG (UART_SYSTEM_ADDR_BASE + 0x00)
+#define UART_DEV_REV_MASK GENMASK(7, 0)
+#define UART_SYSLOCK_REG (UART_SYSTEM_ADDR_BASE + 0xA0)
+#define UART_SYSLOCK BIT(2)
+#define SYSLOCK_SLEEP_TIMEOUT 100
+#define SYSLOCK_RETRY_CNT 1000
+
+#define UART_RX_BYTE_FIFO 0x00
+#define UART_FIFO_CTL 0x02
+
#define UART_ACTV_REG 0x11
#define UART_BLOCK_SET_ACTIVE BIT(0)
@@ -82,8 +99,59 @@
#define UART_RESET_REG 0x94
#define UART_RESET_D3_RESET_DISABLE BIT(16)
+#define UART_BURST_STATUS_REG 0x9C
+#define UART_RX_BURST_FIFO 0xA4
+
#define MAX_PORTS 4
#define PORT_OFFSET 0x100
+#define RX_BUF_SIZE 512
+#define UART_BYTE_SIZE 1
+#define UART_BURST_SIZE 4
+
+#define UART_BST_STAT_RX_COUNT_MASK 0x00FF
+#define UART_BST_STAT_IIR_INT_PEND 0x100000
+#define UART_LSR_OVERRUN_ERR_CLR 0x43
+#define UART_BST_STAT_LSR_RX_MASK 0x9F000000
+#define UART_BST_STAT_LSR_RX_ERR_MASK 0x9E000000
+#define UART_BST_STAT_LSR_OVERRUN_ERR 0x2000000
+#define UART_BST_STAT_LSR_PARITY_ERR 0x4000000
+#define UART_BST_STAT_LSR_FRAME_ERR 0x8000000
+
+struct pci1xxxx_8250 {
+ unsigned int nr;
+ u8 dev_rev;
+ u8 pad[3];
+ void __iomem *membase;
+ int line[] __counted_by(nr);
+};
+
+static const struct serial_rs485 pci1xxxx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_after_send = 1,
+ /* Delay RTS before send is not supported */
+};
+
+static int pci1xxxx_set_sys_lock(struct pci1xxxx_8250 *port)
+{
+ writel(UART_SYSLOCK, port->membase + UART_SYSLOCK_REG);
+ return readl(port->membase + UART_SYSLOCK_REG);
+}
+
+static int pci1xxxx_acquire_sys_lock(struct pci1xxxx_8250 *port)
+{
+ u32 regval;
+
+ return readx_poll_timeout(pci1xxxx_set_sys_lock, port, regval,
+ (regval & UART_SYSLOCK),
+ SYSLOCK_SLEEP_TIMEOUT,
+ SYSLOCK_RETRY_CNT * SYSLOCK_SLEEP_TIMEOUT);
+}
+
+static void pci1xxxx_release_sys_lock(struct pci1xxxx_8250 *port)
+{
+ writel(0x0, port->membase + UART_SYSLOCK_REG);
+}
static const int logical_to_physical_port_idx[][MAX_PORTS] = {
{0, 1, 2, 3}, /* PCI12000, PCI11010, PCI11101, PCI11400, PCI11414 */
@@ -104,12 +172,6 @@ static const int logical_to_physical_port_idx[][MAX_PORTS] = {
{3, -1, -1, -1}, /* PCI1p3 */
};
-struct pci1xxxx_8250 {
- unsigned int nr;
- void __iomem *membase;
- int line[] __counted_by(nr);
-};
-
static int pci1xxxx_get_num_ports(struct pci_dev *dev)
{
switch (dev->subsystem_device) {
@@ -205,12 +267,102 @@ static int pci1xxxx_rs485_config(struct uart_port *port,
return 0;
}
-static const struct serial_rs485 pci1xxxx_rs485_supported = {
- .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
- SER_RS485_RTS_AFTER_SEND,
- .delay_rts_after_send = 1,
- /* Delay RTS before send is not supported */
-};
+static u32 pci1xxxx_read_burst_status(struct uart_port *port)
+{
+ u32 status;
+
+ status = readl(port->membase + UART_BURST_STATUS_REG);
+ if (status & UART_BST_STAT_LSR_RX_ERR_MASK) {
+ if (status & UART_BST_STAT_LSR_OVERRUN_ERR) {
+ writeb(UART_LSR_OVERRUN_ERR_CLR,
+ port->membase + UART_FIFO_CTL);
+ port->icount.overrun++;
+ }
+
+ if (status & UART_BST_STAT_LSR_FRAME_ERR)
+ port->icount.frame++;
+
+ if (status & UART_BST_STAT_LSR_PARITY_ERR)
+ port->icount.parity++;
+ }
+ return status;
+}
+
+static void pci1xxxx_process_read_data(struct uart_port *port,
+ unsigned char *rx_buff, u32 *buff_index,
+ u32 *valid_byte_count)
+{
+ u32 valid_burst_count = *valid_byte_count / UART_BURST_SIZE;
+ u32 *burst_buf;
+
+ /*
+ * Depending on the RX Trigger Level the number of bytes that can be
+ * stored in RX FIFO at a time varies. Each transaction reads data
+ * in DWORDs. If there are less than four remaining valid_byte_count
+ * to read, the data is received one byte at a time.
+ */
+ while (valid_burst_count--) {
+ if (*buff_index > (RX_BUF_SIZE - UART_BURST_SIZE))
+ break;
+ burst_buf = (u32 *)&rx_buff[*buff_index];
+ *burst_buf = readl(port->membase + UART_RX_BURST_FIFO);
+ *buff_index += UART_BURST_SIZE;
+ *valid_byte_count -= UART_BURST_SIZE;
+ }
+
+ while (*valid_byte_count) {
+ if (*buff_index >= RX_BUF_SIZE)
+ break;
+ rx_buff[*buff_index] = readb(port->membase +
+ UART_RX_BYTE_FIFO);
+ *buff_index += UART_BYTE_SIZE;
+ *valid_byte_count -= UART_BYTE_SIZE;
+ }
+}
+
+static void pci1xxxx_rx_burst(struct uart_port *port, u32 uart_status)
+{
+ u32 valid_byte_count = uart_status & UART_BST_STAT_RX_COUNT_MASK;
+ struct tty_port *tty_port = &port->state->port;
+ unsigned char rx_buff[RX_BUF_SIZE];
+ u32 buff_index = 0;
+ u32 copied_len;
+
+ if (valid_byte_count != 0 &&
+ valid_byte_count < RX_BUF_SIZE) {
+ pci1xxxx_process_read_data(port, rx_buff, &buff_index,
+ &valid_byte_count);
+
+ copied_len = (u32)tty_insert_flip_string(tty_port, rx_buff,
+ buff_index);
+
+ if (copied_len != buff_index)
+ port->icount.overrun += buff_index - copied_len;
+
+ port->icount.rx += buff_index;
+ tty_flip_buffer_push(tty_port);
+ }
+}
+
+static int pci1xxxx_handle_irq(struct uart_port *port)
+{
+ unsigned long flags;
+ u32 status;
+
+ status = pci1xxxx_read_burst_status(port);
+
+ if (status & UART_BST_STAT_IIR_INT_PEND)
+ return 0;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (status & UART_BST_STAT_LSR_RX_MASK)
+ pci1xxxx_rx_burst(port, status);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return 1;
+}
static bool pci1xxxx_port_suspend(int line)
{
@@ -323,7 +475,7 @@ static int pci1xxxx_resume(struct device *dev)
}
static int pci1xxxx_setup(struct pci_dev *pdev,
- struct uart_8250_port *port, int port_idx)
+ struct uart_8250_port *port, int port_idx, int rev)
{
int ret;
@@ -335,6 +487,10 @@ static int pci1xxxx_setup(struct pci_dev *pdev,
port->port.rs485_config = pci1xxxx_rs485_config;
port->port.rs485_supported = pci1xxxx_rs485_supported;
+ /* From C0 rev Burst operation is supported */
+ if (rev >= 0xC0)
+ port->port.handle_irq = pci1xxxx_handle_irq;
+
ret = serial8250_pci_setup_port(pdev, port, 0, PORT_OFFSET * port_idx, 0);
if (ret < 0)
return ret;
@@ -370,6 +526,27 @@ static int pci1xxxx_logical_to_physical_port_translate(int subsys_dev, int port)
return logical_to_physical_port_idx[0][port];
}
+static int pci1xxxx_get_device_revision(struct pci1xxxx_8250 *priv)
+{
+ u32 regval;
+ int ret;
+
+ /*
+ * DEV REV is a system register, HW Syslock bit
+ * should be acquired before accessing the register
+ */
+ ret = pci1xxxx_acquire_sys_lock(priv);
+ if (ret)
+ return ret;
+
+ regval = readl(priv->membase + UART_DEV_REV_REG);
+ priv->dev_rev = regval & UART_DEV_REV_MASK;
+
+ pci1xxxx_release_sys_lock(priv);
+
+ return 0;
+}
+
static int pci1xxxx_serial_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -381,6 +558,7 @@ static int pci1xxxx_serial_probe(struct pci_dev *pdev,
int num_vectors;
int subsys_dev;
int port_idx;
+ int ret;
int rc;
rc = pcim_enable_device(pdev);
@@ -397,6 +575,10 @@ static int pci1xxxx_serial_probe(struct pci_dev *pdev,
if (!priv->membase)
return -ENOMEM;
+ ret = pci1xxxx_get_device_revision(priv);
+ if (ret)
+ return ret;
+
pci_set_master(pdev);
priv->nr = nr_ports;
@@ -428,7 +610,7 @@ static int pci1xxxx_serial_probe(struct pci_dev *pdev,
else
uart.port.irq = pci_irq_vector(pdev, 0);
- rc = pci1xxxx_setup(pdev, &uart, port_idx);
+ rc = pci1xxxx_setup(pdev, &uart, port_idx, priv->dev_rev);
if (rc) {
dev_warn(dev, "Failed to setup port %u\n", i);
continue;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1d65055dd..c0c34e410 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -38,10 +38,6 @@
#include "8250.h"
-/* Nuvoton NPCM timeout register */
-#define UART_NPCM_TOR 7
-#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */
-
/*
* Debugging.
*/
@@ -2229,15 +2225,6 @@ int serial8250_do_startup(struct uart_port *port)
UART_DA830_PWREMU_MGMT_FREE);
}
- if (port->type == PORT_NPCM) {
- /*
- * Nuvoton calls the scratch register 'UART_TOR' (timeout
- * register). Enable it, and set TIOC (timeout interrupt
- * comparator) to be 0x20 for correct operation.
- */
- serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20);
- }
-
#ifdef CONFIG_SERIAL_8250_RSA
/*
* If this is an RSA port, see if we can kick it up to the
@@ -2539,15 +2526,6 @@ static void serial8250_shutdown(struct uart_port *port)
serial8250_do_shutdown(port);
}
-/* Nuvoton NPCM UARTs have a custom divisor calculation */
-static unsigned int npcm_get_divisor(struct uart_8250_port *up,
- unsigned int baud)
-{
- struct uart_port *port = &up->port;
-
- return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
-}
-
static unsigned int serial8250_do_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
@@ -2592,8 +2570,6 @@ static unsigned int serial8250_do_get_divisor(struct uart_port *port,
quot = 0x8001;
else if (magic_multiplier && baud >= port->uartclk / 12)
quot = 0x8002;
- else if (up->port.type == PORT_NPCM)
- quot = npcm_get_divisor(up, baud);
else
quot = uart_get_divisor(port, baud);
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index a5b3ea27f..77686da42 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -146,20 +146,18 @@ static int serial_pxa_probe(struct platform_device *pdev)
return ret;
}
-static int serial_pxa_remove(struct platform_device *pdev)
+static void serial_pxa_remove(struct platform_device *pdev)
{
struct pxa8250_data *data = platform_get_drvdata(pdev);
serial8250_unregister_port(data->line);
clk_unprepare(data->clk);
-
- return 0;
}
static struct platform_driver serial_pxa_driver = {
.probe = serial_pxa_probe,
- .remove = serial_pxa_remove,
+ .remove_new = serial_pxa_remove,
.driver = {
.name = "pxa2xx-uart",
diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c
index 89956bbf3..ba352262d 100644
--- a/drivers/tty/serial/8250/8250_tegra.c
+++ b/drivers/tty/serial/8250/8250_tegra.c
@@ -128,15 +128,13 @@ err_clkdisable:
return ret;
}
-static int tegra_uart_remove(struct platform_device *pdev)
+static void tegra_uart_remove(struct platform_device *pdev)
{
struct tegra_uart *uart = platform_get_drvdata(pdev);
serial8250_unregister_port(uart->line);
reset_control_assert(uart->rst);
clk_disable_unprepare(uart->clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -192,7 +190,7 @@ static struct platform_driver tegra_uart_driver = {
.acpi_match_table = ACPI_PTR(tegra_uart_acpi_match),
},
.probe = tegra_uart_probe,
- .remove = tegra_uart_remove,
+ .remove_new = tegra_uart_remove,
};
module_platform_driver(tegra_uart_driver);
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index a40515526..6399a38ec 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -241,14 +241,12 @@ static int uniphier_uart_probe(struct platform_device *pdev)
return 0;
}
-static int uniphier_uart_remove(struct platform_device *pdev)
+static void uniphier_uart_remove(struct platform_device *pdev)
{
struct uniphier8250_priv *priv = platform_get_drvdata(pdev);
serial8250_unregister_port(priv->line);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static int __maybe_unused uniphier_uart_suspend(struct device *dev)
@@ -293,7 +291,7 @@ MODULE_DEVICE_TABLE(of, uniphier_uart_match);
static struct platform_driver uniphier_uart_platform_driver = {
.probe = uniphier_uart_probe,
- .remove = uniphier_uart_remove,
+ .remove_new = uniphier_uart_remove,
.driver = {
.name = "uniphier-uart",
.of_match_table = uniphier_uart_match,
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index dc2ef05a1..2056aed46 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -90,12 +90,6 @@ struct serial_info {
const struct serial_quirk *quirk;
};
-struct serial_cfg_mem {
- tuple_t tuple;
- cisparse_t parse;
- u_char buf[256];
-};
-
/*
* vers_1 5.0, "Brain Boxes", "2-Port RS232 card", "r6"
* manfid 0x0160, 0x0104
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 732c893c8..ffcf4882b 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -87,7 +87,7 @@ config SERIAL_EARLYCON_SEMIHOST
config SERIAL_EARLYCON_RISCV_SBI
bool "Early console using RISC-V SBI"
- depends on RISCV_SBI_V01
+ depends on RISCV_SBI
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
@@ -532,6 +532,9 @@ config SERIAL_UARTLITE_NR_UARTS
help
Set this to the number of uartlites in your system, or the number
you think you might implement.
+ If maximum number of uartlite serial ports is more than 4, then the
+ driver uses dynamic allocation instead of static allocation for major
+ number.
config SERIAL_SUNCORE
bool
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 7090b251d..effcba71e 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -425,7 +425,7 @@ static int altera_jtaguart_probe(struct platform_device *pdev)
return 0;
}
-static int altera_jtaguart_remove(struct platform_device *pdev)
+static void altera_jtaguart_remove(struct platform_device *pdev)
{
struct uart_port *port;
int i = pdev->id;
@@ -436,8 +436,6 @@ static int altera_jtaguart_remove(struct platform_device *pdev)
port = &altera_jtaguart_ports[i];
uart_remove_one_port(&altera_jtaguart_driver, port);
iounmap(port->membase);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -451,7 +449,7 @@ MODULE_DEVICE_TABLE(of, altera_jtaguart_match);
static struct platform_driver altera_jtaguart_platform_driver = {
.probe = altera_jtaguart_probe,
- .remove = altera_jtaguart_remove,
+ .remove_new = altera_jtaguart_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(altera_jtaguart_match),
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 77835ac68..897f0995b 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -305,7 +305,7 @@ static int altera_uart_startup(struct uart_port *port)
int ret;
ret = request_irq(port->irq, altera_uart_interrupt, 0,
- DRV_NAME, port);
+ dev_name(port->dev), port);
if (ret) {
pr_err(DRV_NAME ": unable to attach Altera UART %d "
"interrupt vector=%d\n", port->line, port->irq);
@@ -595,7 +595,7 @@ static int altera_uart_probe(struct platform_device *pdev)
return 0;
}
-static int altera_uart_remove(struct platform_device *pdev)
+static void altera_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
@@ -604,8 +604,6 @@ static int altera_uart_remove(struct platform_device *pdev)
port->mapbase = 0;
iounmap(port->membase);
}
-
- return 0;
}
#ifdef CONFIG_OF
@@ -619,7 +617,7 @@ MODULE_DEVICE_TABLE(of, altera_uart_match);
static struct platform_driver altera_uart_platform_driver = {
.probe = altera_uart_probe,
- .remove = altera_uart_remove,
+ .remove_new = altera_uart_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(altera_uart_match),
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 474e2b7f4..cf2c890a5 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -50,8 +50,8 @@
#define AMBA_ISR_PASS_LIMIT 256
-#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
-#define UART_DUMMY_DR_RX (1 << 16)
+#define UART_DR_ERROR (UART011_DR_OE | UART011_DR_BE | UART011_DR_PE | UART011_DR_FE)
+#define UART_DUMMY_DR_RX BIT(16)
enum {
REG_DR,
@@ -125,7 +125,7 @@ static unsigned int get_fifosize_arm(struct amba_device *dev)
static struct vendor_data vendor_arm = {
.reg_offset = pl011_std_offsets,
- .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
+ .ifls = UART011_IFLS_RX4_8 | UART011_IFLS_TX4_8,
.fr_busy = UART01x_FR_BUSY,
.fr_dsr = UART01x_FR_DSR,
.fr_cts = UART01x_FR_CTS,
@@ -203,7 +203,7 @@ static unsigned int get_fifosize_st(struct amba_device *dev)
static struct vendor_data vendor_st = {
.reg_offset = pl011_st_offsets,
- .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
+ .ifls = UART011_IFLS_RX_HALF | UART011_IFLS_TX_HALF,
.fr_busy = UART01x_FR_BUSY,
.fr_dsr = UART01x_FR_DSR,
.fr_cts = UART01x_FR_CTS,
@@ -277,13 +277,13 @@ struct uart_amba_port {
static unsigned int pl011_tx_empty(struct uart_port *port);
static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
- unsigned int reg)
+ unsigned int reg)
{
return uap->reg_offset[reg];
}
static unsigned int pl011_read(const struct uart_amba_port *uap,
- unsigned int reg)
+ unsigned int reg)
{
void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
@@ -292,7 +292,7 @@ static unsigned int pl011_read(const struct uart_amba_port *uap,
}
static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
- unsigned int reg)
+ unsigned int reg)
{
void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
@@ -330,10 +330,11 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
uap->port.icount.brk++;
if (uart_handle_break(&uap->port))
continue;
- } else if (ch & UART011_DR_PE)
+ } else if (ch & UART011_DR_PE) {
uap->port.icount.parity++;
- else if (ch & UART011_DR_FE)
+ } else if (ch & UART011_DR_FE) {
uap->port.icount.frame++;
+ }
if (ch & UART011_DR_OE)
uap->port.icount.overrun++;
@@ -358,7 +359,6 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
return fifotaken;
}
-
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
@@ -369,7 +369,7 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE,
&db->dma, GFP_KERNEL);
@@ -381,7 +381,7 @@ static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db,
}
static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
if (db->buf) {
dma_free_coherent(chan->device->dev,
@@ -424,7 +424,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
dma_cap_set(DMA_SLAVE, mask);
chan = dma_request_channel(mask, plat->dma_filter,
- plat->dma_tx_param);
+ plat->dma_tx_param);
if (!chan) {
dev_err(uap->port.dev, "no TX DMA channel!\n");
return;
@@ -438,9 +438,9 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
dma_chan_name(uap->dmatx.chan));
/* Optionally make use of an RX channel as well */
- chan = dma_request_slave_channel(dev, "rx");
+ chan = dma_request_chan(dev, "rx");
- if (!chan && plat && plat->dma_rx_param) {
+ if (IS_ERR(chan) && plat && plat->dma_rx_param) {
chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
if (!chan) {
@@ -449,7 +449,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
}
}
- if (chan) {
+ if (!IS_ERR(chan)) {
struct dma_slave_config rx_conf = {
.src_addr = uap->port.mapbase +
pl011_reg_to_offset(uap, REG_DR),
@@ -465,12 +465,12 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
* If the controller does, check for suitable residue processing
* otherwise assime all is well.
*/
- if (0 == dma_get_slave_caps(chan, &caps)) {
+ if (dma_get_slave_caps(chan, &caps) == 0) {
if (caps.residue_granularity ==
DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
dma_release_channel(chan);
dev_info(uap->port.dev,
- "RX DMA disabled - no residue processing\n");
+ "RX DMA disabled - no residue processing\n");
return;
}
}
@@ -499,18 +499,16 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
else
uap->dmarx.poll_timeout = 3000;
} else if (!plat && dev->of_node) {
- uap->dmarx.auto_poll_rate = of_property_read_bool(
- dev->of_node, "auto-poll");
+ uap->dmarx.auto_poll_rate =
+ of_property_read_bool(dev->of_node, "auto-poll");
if (uap->dmarx.auto_poll_rate) {
u32 x;
- if (0 == of_property_read_u32(dev->of_node,
- "poll-rate-ms", &x))
+ if (of_property_read_u32(dev->of_node, "poll-rate-ms", &x) == 0)
uap->dmarx.poll_rate = x;
else
uap->dmarx.poll_rate = 100;
- if (0 == of_property_read_u32(dev->of_node,
- "poll-timeout-ms", &x))
+ if (of_property_read_u32(dev->of_node, "poll-timeout-ms", &x) == 0)
uap->dmarx.poll_timeout = x;
else
uap->dmarx.poll_timeout = 3000;
@@ -547,7 +545,7 @@ static void pl011_dma_tx_callback(void *data)
uart_port_lock_irqsave(&uap->port, &flags);
if (uap->dmatx.queued)
dma_unmap_single(dmatx->chan->device->dev, dmatx->dma,
- dmatx->len, DMA_TO_DEVICE);
+ dmatx->len, DMA_TO_DEVICE);
dmacr = uap->dmacr;
uap->dmacr = dmacr & ~UART011_TXDMAE;
@@ -618,9 +616,9 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
if (count > PL011_DMA_BUFFER_SIZE)
count = PL011_DMA_BUFFER_SIZE;
- if (xmit->tail < xmit->head)
+ if (xmit->tail < xmit->head) {
memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
- else {
+ } else {
size_t first = UART_XMIT_SIZE - xmit->tail;
size_t second;
@@ -643,7 +641,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
}
desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
uap->dmatx.queued = false;
@@ -754,8 +752,9 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
if (pl011_dma_tx_refill(uap) > 0) {
uap->im &= ~UART011_TXIM;
pl011_write(uap->im, uap, REG_IMSC);
- } else
+ } else {
ret = false;
+ }
} else if (!(uap->dmacr & UART011_TXDMAE)) {
uap->dmacr |= UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
@@ -832,8 +831,8 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
dbuf = uap->dmarx.use_buf_b ?
&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
* If the DMA engine is busy and cannot prepare a
* channel, no big deal, the driver will fall back
@@ -889,14 +888,12 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
/* Pick the remain data from the DMA */
if (pending) {
-
/*
* First take all chars in the DMA pipe, then look in the FIFO.
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
- dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
- pending);
+ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending);
uap->port.icount.rx += dma_count;
if (dma_count < pending)
@@ -978,8 +975,8 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
/* Switch buffer & re-trigger DMA job */
dmarx->use_buf_b = !dmarx->use_buf_b;
if (pl011_dma_rx_trigger_dma(uap)) {
- dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
- "fall back to interrupt mode\n");
+ dev_dbg(uap->port.dev,
+ "could not retrigger RX DMA job fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
}
@@ -1026,8 +1023,8 @@ static void pl011_dma_rx_callback(void *data)
* get some IRQ immediately from RX.
*/
if (ret) {
- dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
- "fall back to interrupt mode\n");
+ dev_dbg(uap->port.dev,
+ "could not retrigger RX DMA job fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
}
@@ -1072,7 +1069,7 @@ static void pl011_dma_rx_poll(struct timer_list *t)
dmataken = dbuf->len - dmarx->last_residue;
size = dmarx->last_residue - state.residue;
dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken,
- size);
+ size);
if (dma_count == size)
dmarx->last_residue = state.residue;
dmarx->last_jiffies = jiffies;
@@ -1085,7 +1082,6 @@ static void pl011_dma_rx_poll(struct timer_list *t)
*/
if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
> uap->dmarx.poll_timeout) {
-
uart_port_lock_irqsave(&uap->port, &flags);
pl011_dma_rx_stop(uap);
uap->im |= UART011_RXIM;
@@ -1097,7 +1093,7 @@ static void pl011_dma_rx_poll(struct timer_list *t)
del_timer(&uap->dmarx.timer);
} else {
mod_timer(&uap->dmarx.timer,
- jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
+ jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
}
}
@@ -1113,7 +1109,6 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
if (!uap->dmatx.buf) {
- dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
uap->port.fifosize = uap->fifosize;
return;
}
@@ -1129,7 +1124,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
/* Allocate and map DMA RX buffers */
ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer A", ret);
@@ -1137,12 +1132,12 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
}
ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer B", ret);
pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
goto skip_rx;
}
@@ -1164,13 +1159,12 @@ skip_rx:
if (uap->using_rx_dma) {
if (pl011_dma_rx_trigger_dma(uap))
- dev_dbg(uap->port.dev, "could not trigger initial "
- "RX DMA job, fall back to interrupt mode\n");
+ dev_dbg(uap->port.dev,
+ "could not trigger initial RX DMA job, fall back to interrupt mode\n");
if (uap->dmarx.poll_rate) {
timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
mod_timer(&uap->dmarx.timer,
- jiffies +
- msecs_to_jiffies(uap->dmarx.poll_rate));
+ jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
uap->dmarx.last_jiffies = jiffies;
}
@@ -1389,8 +1383,8 @@ static void pl011_stop_rx(struct uart_port *port)
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
- UART011_PEIM|UART011_BEIM|UART011_OEIM);
+ uap->im &= ~(UART011_RXIM | UART011_RTIM | UART011_FEIM |
+ UART011_PEIM | UART011_BEIM | UART011_OEIM);
pl011_write(uap->im, uap, REG_IMSC);
pl011_dma_rx_stop(uap);
@@ -1410,7 +1404,7 @@ static void pl011_enable_ms(struct uart_port *port)
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
- uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
+ uap->im |= UART011_RIMIM | UART011_CTSMIM | UART011_DCDMIM | UART011_DSRMIM;
pl011_write(uap->im, uap, REG_IMSC);
}
@@ -1428,8 +1422,8 @@ __acquires(&uap->port.lock)
*/
if (pl011_dma_rx_available(uap)) {
if (pl011_dma_rx_trigger_dma(uap)) {
- dev_dbg(uap->port.dev, "could not trigger RX DMA job "
- "fall back to interrupt mode again\n");
+ dev_dbg(uap->port.dev,
+ "could not trigger RX DMA job fall back to interrupt mode again\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
} else {
@@ -1439,8 +1433,7 @@ __acquires(&uap->port.lock)
uap->dmarx.last_jiffies = jiffies;
uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
mod_timer(&uap->dmarx.timer,
- jiffies +
- msecs_to_jiffies(uap->dmarx.poll_rate));
+ jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
}
#endif
}
@@ -1557,18 +1550,17 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
do {
check_apply_cts_event_workaround(uap);
- pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
- UART011_RXIS),
+ pl011_write(status & ~(UART011_TXIS | UART011_RTIS | UART011_RXIS),
uap, REG_ICR);
- if (status & (UART011_RTIS|UART011_RXIS)) {
+ if (status & (UART011_RTIS | UART011_RXIS)) {
if (pl011_dma_rx_running(uap))
pl011_dma_rx_irq(uap);
else
pl011_rx_chars(uap);
}
- if (status & (UART011_DSRMIS|UART011_DCDMIS|
- UART011_CTSMIS|UART011_RIMIS))
+ if (status & (UART011_DSRMIS | UART011_DCDMIS |
+ UART011_CTSMIS | UART011_RIMIS))
pl011_modem_status(uap);
if (status & UART011_TXIS)
pl011_tx_chars(uap, true);
@@ -1598,6 +1590,12 @@ static unsigned int pl011_tx_empty(struct uart_port *port)
0 : TIOCSER_TEMT;
}
+static void pl011_maybe_set_bit(bool cond, unsigned int *ptr, unsigned int mask)
+{
+ if (cond)
+ *ptr |= mask;
+}
+
static unsigned int pl011_get_mctrl(struct uart_port *port)
{
struct uart_amba_port *uap =
@@ -1605,18 +1603,22 @@ static unsigned int pl011_get_mctrl(struct uart_port *port)
unsigned int result = 0;
unsigned int status = pl011_read(uap, REG_FR);
-#define TIOCMBIT(uartbit, tiocmbit) \
- if (status & uartbit) \
- result |= tiocmbit
+ pl011_maybe_set_bit(status & UART01x_FR_DCD, &result, TIOCM_CAR);
+ pl011_maybe_set_bit(status & uap->vendor->fr_dsr, &result, TIOCM_DSR);
+ pl011_maybe_set_bit(status & uap->vendor->fr_cts, &result, TIOCM_CTS);
+ pl011_maybe_set_bit(status & uap->vendor->fr_ri, &result, TIOCM_RNG);
- TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
- TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
- TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
- TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
-#undef TIOCMBIT
return result;
}
+static void pl011_assign_bit(bool cond, unsigned int *ptr, unsigned int mask)
+{
+ if (cond)
+ *ptr |= mask;
+ else
+ *ptr &= ~mask;
+}
+
static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_amba_port *uap =
@@ -1625,23 +1627,16 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
cr = pl011_read(uap, REG_CR);
-#define TIOCMBIT(tiocmbit, uartbit) \
- if (mctrl & tiocmbit) \
- cr |= uartbit; \
- else \
- cr &= ~uartbit
-
- TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
- TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
- TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
- TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
- TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
+ pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTS);
+ pl011_assign_bit(mctrl & TIOCM_DTR, &cr, UART011_CR_DTR);
+ pl011_assign_bit(mctrl & TIOCM_OUT1, &cr, UART011_CR_OUT1);
+ pl011_assign_bit(mctrl & TIOCM_OUT2, &cr, UART011_CR_OUT2);
+ pl011_assign_bit(mctrl & TIOCM_LOOP, &cr, UART011_CR_LBE);
if (port->status & UPSTAT_AUTORTS) {
/* We need to disable auto-RTS if we want to turn RTS off */
- TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
+ pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTSEN);
}
-#undef TIOCMBIT
pl011_write(cr, uap, REG_CR);
}
@@ -1707,8 +1702,7 @@ static int pl011_get_poll_char(struct uart_port *port)
return pl011_read(uap, REG_DR);
}
-static void pl011_put_poll_char(struct uart_port *port,
- unsigned char ch)
+static void pl011_put_poll_char(struct uart_port *port, unsigned char ch)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
@@ -1909,14 +1903,13 @@ static int sbsa_uart_startup(struct uart_port *port)
return 0;
}
-static void pl011_shutdown_channel(struct uart_amba_port *uap,
- unsigned int lcrh)
+static void pl011_shutdown_channel(struct uart_amba_port *uap, unsigned int lcrh)
{
- unsigned long val;
+ unsigned long val;
- val = pl011_read(uap, lcrh);
- val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
- pl011_write(val, uap, lcrh);
+ val = pl011_read(uap, lcrh);
+ val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+ pl011_write(val, uap, lcrh);
}
/*
@@ -2065,7 +2058,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
#endif
- if (baud > port->uartclk/16)
+ if (baud > port->uartclk / 16)
quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
else
quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
@@ -2147,9 +2140,9 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* else we see data corruption.
*/
if (uap->vendor->oversampling) {
- if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
+ if (baud >= 3000000 && baud < 3250000 && quot > 1)
quot -= 1;
- else if ((baud > 3250000) && (quot > 2))
+ else if (baud > 3250000 && quot > 2)
quot -= 2;
}
/* Set baud rate */
@@ -2218,13 +2211,14 @@ static void pl011_config_port(struct uart_port *port, int flags)
static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
+
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
ret = -EINVAL;
if (ser->irq < 0 || ser->irq >= nr_irqs)
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;
- if (port->mapbase != (unsigned long) ser->iomem_base)
+ if (port->mapbase != (unsigned long)ser->iomem_base)
ret = -EINVAL;
return ret;
}
@@ -2369,35 +2363,34 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
int *parity, int *bits)
{
- if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
- unsigned int lcr_h, ibrd, fbrd;
+ unsigned int lcr_h, ibrd, fbrd;
- lcr_h = pl011_read(uap, REG_LCRH_TX);
+ if (!(pl011_read(uap, REG_CR) & UART01x_CR_UARTEN))
+ return;
- *parity = 'n';
- if (lcr_h & UART01x_LCRH_PEN) {
- if (lcr_h & UART01x_LCRH_EPS)
- *parity = 'e';
- else
- *parity = 'o';
- }
+ lcr_h = pl011_read(uap, REG_LCRH_TX);
- if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
- *bits = 7;
+ *parity = 'n';
+ if (lcr_h & UART01x_LCRH_PEN) {
+ if (lcr_h & UART01x_LCRH_EPS)
+ *parity = 'e';
else
- *bits = 8;
+ *parity = 'o';
+ }
+
+ if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
+ *bits = 7;
+ else
+ *bits = 8;
- ibrd = pl011_read(uap, REG_IBRD);
- fbrd = pl011_read(uap, REG_FBRD);
+ ibrd = pl011_read(uap, REG_IBRD);
+ fbrd = pl011_read(uap, REG_FBRD);
- *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
+ *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
- if (uap->vendor->oversampling) {
- if (pl011_read(uap, REG_CR)
- & ST_UART011_CR_OVSFACT)
- *baud *= 2;
- }
- }
+ if (uap->vendor->oversampling &&
+ (pl011_read(uap, REG_CR) & ST_UART011_CR_OVSFACT))
+ *baud *= 2;
}
static int pl011_console_setup(struct console *co, char *options)
@@ -2533,7 +2526,7 @@ static void qdf2400_e44_putc(struct uart_port *port, unsigned char c)
cpu_relax();
}
-static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
+static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
@@ -2552,7 +2545,7 @@ static void pl011_putc(struct uart_port *port, unsigned char c)
cpu_relax();
}
-static void pl011_early_write(struct console *con, const char *s, unsigned n)
+static void pl011_early_write(struct console *con, const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
@@ -2613,7 +2606,9 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
return 0;
}
+
OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
+
OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
/*
@@ -2636,6 +2631,7 @@ qdf2400_e44_early_console_setup(struct earlycon_device *device,
device->con->write = qdf2400_e44_early_write;
return 0;
}
+
EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
#else
@@ -2655,8 +2651,8 @@ static struct uart_driver amba_reg = {
static int pl011_probe_dt_alias(int index, struct device *dev)
{
struct device_node *np;
- static bool seen_dev_with_alias = false;
- static bool seen_dev_without_alias = false;
+ static bool seen_dev_with_alias;
+ static bool seen_dev_without_alias;
int ret = index;
if (!IS_ENABLED(CONFIG_OF))
@@ -2672,7 +2668,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev)
ret = index;
} else {
seen_dev_with_alias = true;
- if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
+ if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret]) {
dev_warn(dev, "requested serial port %d not available.\n", ret);
ret = index;
}
@@ -2706,7 +2702,7 @@ static int pl011_find_free_port(void)
int i;
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
- if (amba_ports[i] == NULL)
+ if (!amba_ports[i])
return i;
return -EBUSY;
@@ -2873,6 +2869,22 @@ static int pl011_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
+#ifdef CONFIG_ACPI_SPCR_TABLE
+static void qpdf2400_erratum44_workaround(struct device *dev,
+ struct uart_amba_port *uap)
+{
+ if (!qdf2400_e44_present)
+ return;
+
+ dev_info(dev, "working around QDF2400 SoC erratum 44\n");
+ uap->vendor = &vendor_qdt_qdf2400_e44;
+}
+#else
+static void qpdf2400_erratum44_workaround(struct device *dev,
+ struct uart_amba_port *uap)
+{ /* empty */ }
+#endif
+
static int sbsa_uart_probe(struct platform_device *pdev)
{
struct uart_amba_port *uap;
@@ -2908,13 +2920,8 @@ static int sbsa_uart_probe(struct platform_device *pdev)
return ret;
uap->port.irq = ret;
-#ifdef CONFIG_ACPI_SPCR_TABLE
- if (qdf2400_e44_present) {
- dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
- uap->vendor = &vendor_qdt_qdf2400_e44;
- } else
-#endif
- uap->vendor = &vendor_sbsa;
+ uap->vendor = &vendor_sbsa;
+ qpdf2400_erratum44_workaround(&pdev->dev, uap);
uap->reg_offset = uap->vendor->reg_offset;
uap->fifosize = 32;
@@ -2935,13 +2942,12 @@ static int sbsa_uart_probe(struct platform_device *pdev)
return pl011_register_port(uap);
}
-static int sbsa_uart_remove(struct platform_device *pdev)
+static void sbsa_uart_remove(struct platform_device *pdev)
{
struct uart_amba_port *uap = platform_get_drvdata(pdev);
uart_remove_one_port(&amba_reg, &uap->port);
pl011_unregister_port(uap);
- return 0;
}
static const struct of_device_id sbsa_uart_of_match[] = {
@@ -2959,7 +2965,7 @@ MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
static struct platform_driver arm_sbsa_uart_platform_driver = {
.probe = sbsa_uart_probe,
- .remove = sbsa_uart_remove,
+ .remove_new = sbsa_uart_remove,
.driver = {
.name = "sbsa-uart",
.pm = &pl011_dev_pm_ops,
@@ -2998,7 +3004,7 @@ static struct amba_driver pl011_driver = {
static int __init pl011_init(void)
{
- printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
+ pr_info("Serial: AMBA PL011 UART driver\n");
if (platform_driver_register(&arm_sbsa_uart_platform_driver))
pr_warn("could not register SBSA UART platform driver\n");
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index ffd234673..8d09ace06 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -818,7 +818,7 @@ err_disable_clk:
return ret;
}
-static int ar933x_uart_remove(struct platform_device *pdev)
+static void ar933x_uart_remove(struct platform_device *pdev)
{
struct ar933x_uart_port *up;
@@ -828,8 +828,6 @@ static int ar933x_uart_remove(struct platform_device *pdev)
uart_remove_one_port(&ar933x_uart_driver, &up->port);
clk_disable_unprepare(up->clk);
}
-
- return 0;
}
#ifdef CONFIG_OF
@@ -842,7 +840,7 @@ MODULE_DEVICE_TABLE(of, ar933x_uart_of_ids);
static struct platform_driver ar933x_uart_platform_driver = {
.probe = ar933x_uart_probe,
- .remove = ar933x_uart_remove,
+ .remove_new = ar933x_uart_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(ar933x_uart_of_ids),
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 1946fafc3..85667f709 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1013,14 +1013,18 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
struct device *mfd_dev = port->dev->parent;
dma_cap_mask_t mask;
struct dma_slave_config config;
+ struct dma_chan *chan;
int ret, nent;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
- if (atmel_port->chan_tx == NULL)
+ chan = dma_request_chan(mfd_dev, "tx");
+ if (IS_ERR(chan)) {
+ atmel_port->chan_tx = NULL;
goto chan_err;
+ }
+ atmel_port->chan_tx = chan;
dev_info(port->dev, "using %s for tx DMA transfers\n",
dma_chan_name(atmel_port->chan_tx));
@@ -1188,6 +1192,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
dma_cap_mask_t mask;
struct dma_slave_config config;
struct circ_buf *ring;
+ struct dma_chan *chan;
int ret, nent;
ring = &atmel_port->rx_ring;
@@ -1195,9 +1200,12 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_CYCLIC, mask);
- atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
- if (atmel_port->chan_rx == NULL)
+ chan = dma_request_chan(mfd_dev, "rx");
+ if (IS_ERR(chan)) {
+ atmel_port->chan_rx = NULL;
goto chan_err;
+ }
+ atmel_port->chan_rx = chan;
dev_info(port->dev, "using %s for rx DMA transfers\n",
dma_chan_name(atmel_port->chan_rx));
@@ -3001,7 +3009,7 @@ err:
* protocol that needs bitbanging on IO lines, but use the regular serial
* port in the normal case.
*/
-static int atmel_serial_remove(struct platform_device *pdev)
+static void atmel_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -3020,8 +3028,6 @@ static int atmel_serial_remove(struct platform_device *pdev)
clear_bit(port->line, atmel_ports_in_use);
pdev->dev.of_node = NULL;
-
- return 0;
}
static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
@@ -3029,7 +3035,7 @@ static SIMPLE_DEV_PM_OPS(atmel_serial_pm_ops, atmel_serial_suspend,
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
- .remove = atmel_serial_remove,
+ .remove_new = atmel_serial_remove,
.driver = {
.name = "atmel_usart_serial",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 4a08fd5ee..a3cefa153 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -868,7 +868,7 @@ static int bcm_uart_probe(struct platform_device *pdev)
return 0;
}
-static int bcm_uart_remove(struct platform_device *pdev)
+static void bcm_uart_remove(struct platform_device *pdev)
{
struct uart_port *port;
@@ -876,7 +876,6 @@ static int bcm_uart_remove(struct platform_device *pdev)
uart_remove_one_port(&bcm_uart_driver, port);
/* mark port as free */
ports[pdev->id].membase = NULL;
- return 0;
}
static const struct of_device_id bcm63xx_of_match[] = {
@@ -890,7 +889,7 @@ MODULE_DEVICE_TABLE(of, bcm63xx_of_match);
*/
static struct platform_driver bcm_uart_platform_driver = {
.probe = bcm_uart_probe,
- .remove = bcm_uart_remove,
+ .remove_new = bcm_uart_remove,
.driver = {
.name = "bcm63xx_uart",
.of_match_table = bcm63xx_of_match,
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 55d19937e..7927725b8 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -510,13 +510,11 @@ static int uart_clps711x_probe(struct platform_device *pdev)
return ret;
}
-static int uart_clps711x_remove(struct platform_device *pdev)
+static void uart_clps711x_remove(struct platform_device *pdev)
{
struct clps711x_port *s = platform_get_drvdata(pdev);
uart_remove_one_port(&clps711x_uart, &s->port);
-
- return 0;
}
static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = {
@@ -531,7 +529,7 @@ static struct platform_driver clps711x_uart_platform = {
.of_match_table = of_match_ptr(clps711x_uart_dt_ids),
},
.probe = uart_clps711x_probe,
- .remove = uart_clps711x_remove,
+ .remove_new = uart_clps711x_remove,
};
static int __init uart_clps711x_init(void)
diff --git a/drivers/tty/serial/cpm_uart.c b/drivers/tty/serial/cpm_uart.c
index be4af6eda..df56c6c5a 100644
--- a/drivers/tty/serial/cpm_uart.c
+++ b/drivers/tty/serial/cpm_uart.c
@@ -1549,13 +1549,11 @@ static int cpm_uart_probe(struct platform_device *ofdev)
return ret;
}
-static int cpm_uart_remove(struct platform_device *ofdev)
+static void cpm_uart_remove(struct platform_device *ofdev)
{
struct uart_cpm_port *pinfo = platform_get_drvdata(ofdev);
uart_remove_one_port(&cpm_reg, &pinfo->port);
-
- return 0;
}
static const struct of_device_id cpm_uart_match[] = {
@@ -1581,7 +1579,7 @@ static struct platform_driver cpm_uart_driver = {
.of_match_table = cpm_uart_match,
},
.probe = cpm_uart_probe,
- .remove = cpm_uart_remove,
+ .remove_new = cpm_uart_remove,
};
static int __init cpm_uart_init(void)
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index 5004125f3..e419c4bde 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -503,13 +503,11 @@ static int digicolor_uart_probe(struct platform_device *pdev)
return uart_add_one_port(&digicolor_uart, &dp->port);
}
-static int digicolor_uart_remove(struct platform_device *pdev)
+static void digicolor_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&digicolor_uart, port);
-
- return 0;
}
static const struct of_device_id digicolor_uart_dt_ids[] = {
@@ -524,7 +522,7 @@ static struct platform_driver digicolor_uart_platform = {
.of_match_table = of_match_ptr(digicolor_uart_dt_ids),
},
.probe = digicolor_uart_probe,
- .remove = digicolor_uart_remove,
+ .remove_new = digicolor_uart_remove,
};
static int __init digicolor_uart_init(void)
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c
index 27afb0b74..0162155f0 100644
--- a/drivers/tty/serial/earlycon-riscv-sbi.c
+++ b/drivers/tty/serial/earlycon-riscv-sbi.c
@@ -15,17 +15,38 @@ static void sbi_putc(struct uart_port *port, unsigned char c)
sbi_console_putchar(c);
}
-static void sbi_console_write(struct console *con,
- const char *s, unsigned n)
+static void sbi_0_1_console_write(struct console *con,
+ const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, sbi_putc);
}
+static void sbi_dbcn_console_write(struct console *con,
+ const char *s, unsigned int n)
+{
+ int ret;
+
+ while (n) {
+ ret = sbi_debug_console_write(s, n);
+ if (ret < 0)
+ break;
+
+ s += ret;
+ n -= ret;
+ }
+}
+
static int __init early_sbi_setup(struct earlycon_device *device,
const char *opt)
{
- device->con->write = sbi_console_write;
+ if (sbi_debug_console_available)
+ device->con->write = sbi_dbcn_console_write;
+ else if (IS_ENABLED(CONFIG_RISCV_SBI_V01))
+ device->con->write = sbi_0_1_console_write;
+ else
+ return -ENODEV;
+
return 0;
}
EARLYCON_DECLARE(sbi, early_sbi_setup);
diff --git a/drivers/tty/serial/esp32_acm.c b/drivers/tty/serial/esp32_acm.c
index cb28a8773..d4e8bdb1c 100644
--- a/drivers/tty/serial/esp32_acm.c
+++ b/drivers/tty/serial/esp32_acm.c
@@ -8,7 +8,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
@@ -413,18 +413,17 @@ static int esp32s3_acm_probe(struct platform_device *pdev)
return uart_add_one_port(&esp32s3_acm_reg, port);
}
-static int esp32s3_acm_remove(struct platform_device *pdev)
+static void esp32s3_acm_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&esp32s3_acm_reg, port);
- return 0;
}
static struct platform_driver esp32s3_acm_driver = {
.probe = esp32s3_acm_probe,
- .remove = esp32s3_acm_remove,
+ .remove_new = esp32s3_acm_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = esp32s3_acm_dt_ids,
diff --git a/drivers/tty/serial/esp32_uart.c b/drivers/tty/serial/esp32_uart.c
index 85c9c5ad7..6fc61f323 100644
--- a/drivers/tty/serial/esp32_uart.c
+++ b/drivers/tty/serial/esp32_uart.c
@@ -9,7 +9,8 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
@@ -678,16 +679,11 @@ static struct uart_driver esp32_uart_reg = {
static int esp32_uart_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- static const struct of_device_id *match;
struct uart_port *port;
struct esp32_port *sport;
struct resource *res;
int ret;
- match = of_match_device(esp32_uart_dt_ids, &pdev->dev);
- if (!match)
- return -ENODEV;
-
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
if (!sport)
return -ENOMEM;
@@ -728,7 +724,7 @@ static int esp32_uart_probe(struct platform_device *pdev)
port->flags = UPF_BOOT_AUTOCONF;
port->has_sysrq = 1;
port->fifosize = ESP32_UART_TX_FIFO_SIZE;
- port->private_data = (void *)match->data;
+ port->private_data = (void *)device_get_match_data(&pdev->dev);
esp32_uart_ports[port->line] = sport;
@@ -737,19 +733,17 @@ static int esp32_uart_probe(struct platform_device *pdev)
return uart_add_one_port(&esp32_uart_reg, port);
}
-static int esp32_uart_remove(struct platform_device *pdev)
+static void esp32_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&esp32_uart_reg, port);
-
- return 0;
}
static struct platform_driver esp32_uart_driver = {
.probe = esp32_uart_probe,
- .remove = esp32_uart_remove,
+ .remove_new = esp32_uart_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = esp32_uart_dt_ids,
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 3bdaf1ddc..52c87876a 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -851,13 +851,11 @@ static int linflex_probe(struct platform_device *pdev)
return uart_add_one_port(&linflex_reg, sport);
}
-static int linflex_remove(struct platform_device *pdev)
+static void linflex_remove(struct platform_device *pdev)
{
struct uart_port *sport = platform_get_drvdata(pdev);
uart_remove_one_port(&linflex_reg, sport);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -884,7 +882,7 @@ static SIMPLE_DEV_PM_OPS(linflex_pm_ops, linflex_suspend, linflex_resume);
static struct platform_driver linflex_driver = {
.probe = linflex_probe,
- .remove = linflex_remove,
+ .remove_new = linflex_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = linflex_dt_ids,
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 71d0cbd74..bbcbc9148 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2962,7 +2962,7 @@ failed_reset:
return ret;
}
-static int lpuart_remove(struct platform_device *pdev)
+static void lpuart_remove(struct platform_device *pdev)
{
struct lpuart_port *sport = platform_get_drvdata(pdev);
@@ -2979,7 +2979,6 @@ static int lpuart_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- return 0;
}
static int lpuart_runtime_suspend(struct device *dev)
@@ -3213,7 +3212,7 @@ static const struct dev_pm_ops lpuart_pm_ops = {
static struct platform_driver lpuart_driver = {
.probe = lpuart_probe,
- .remove = lpuart_remove,
+ .remove_new = lpuart_remove,
.driver = {
.name = "fsl-lpuart",
.of_match_table = lpuart_dt_ids,
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 5545bf4a7..e14813250 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1350,15 +1350,18 @@ static int imx_uart_dma_init(struct imx_port *sport)
{
struct dma_slave_config slave_config = {};
struct device *dev = sport->port.dev;
+ struct dma_chan *chan;
int ret;
/* Prepare for RX : */
- sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
- if (!sport->dma_chan_rx) {
+ chan = dma_request_chan(dev, "rx");
+ if (IS_ERR(chan)) {
dev_dbg(dev, "cannot get the DMA channel.\n");
- ret = -EINVAL;
+ sport->dma_chan_rx = NULL;
+ ret = PTR_ERR(chan);
goto err;
}
+ sport->dma_chan_rx = chan;
slave_config.direction = DMA_DEV_TO_MEM;
slave_config.src_addr = sport->port.mapbase + URXD0;
@@ -1380,12 +1383,14 @@ static int imx_uart_dma_init(struct imx_port *sport)
sport->rx_ring.buf = sport->rx_buf;
/* Prepare for TX : */
- sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
- if (!sport->dma_chan_tx) {
+ chan = dma_request_chan(dev, "tx");
+ if (IS_ERR(chan)) {
dev_err(dev, "cannot get the TX DMA channel!\n");
- ret = -EINVAL;
+ sport->dma_chan_tx = NULL;
+ ret = PTR_ERR(chan);
goto err;
}
+ sport->dma_chan_tx = chan;
slave_config.direction = DMA_MEM_TO_DEV;
slave_config.dst_addr = sport->port.mapbase + URTX0;
@@ -2469,13 +2474,11 @@ err_clk:
return ret;
}
-static int imx_uart_remove(struct platform_device *pdev)
+static void imx_uart_remove(struct platform_device *pdev)
{
struct imx_port *sport = platform_get_drvdata(pdev);
uart_remove_one_port(&imx_uart_uart_driver, &sport->port);
-
- return 0;
}
static void imx_uart_restore_context(struct imx_port *sport)
@@ -2644,7 +2647,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = {
static struct platform_driver imx_uart_platform_driver = {
.probe = imx_uart_probe,
- .remove = imx_uart_remove,
+ .remove_new = imx_uart_remove,
.driver = {
.name = "imx-uart",
diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index 8489c07f4..df55e5dc5 100644
--- a/drivers/tty/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
@@ -115,8 +115,6 @@ struct board_ops {
void (*send_start_character)(struct jsm_channel *ch);
void (*send_stop_character)(struct jsm_channel *ch);
void (*copy_data_from_queue_to_uart)(struct jsm_channel *ch);
- u32 (*get_uart_bytes_left)(struct jsm_channel *ch);
- void (*send_immediate_char)(struct jsm_channel *ch, unsigned char);
};
@@ -127,7 +125,6 @@ struct jsm_board
{
int boardnum; /* Board number: 0-32 */
- int type; /* Type of board */
u8 rev; /* PCI revision ID */
struct pci_dev *pci_dev;
u32 maxports; /* MAX ports this board can handle */
@@ -155,8 +152,6 @@ struct jsm_board
u32 bd_dividend; /* Board/UARTs specific dividend */
struct board_ops *bd_ops;
-
- struct list_head jsm_board_entry;
};
/************************************************************************
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
index 3fd57ac3a..1eda48964 100644
--- a/drivers/tty/serial/jsm/jsm_cls.c
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -878,28 +878,6 @@ static void cls_uart_off(struct jsm_channel *ch)
}
/*
- * cls_get_uarts_bytes_left.
- * Returns 0 is nothing left in the FIFO, returns 1 otherwise.
- *
- * The channel lock MUST be held by the calling function.
- */
-static u32 cls_get_uart_bytes_left(struct jsm_channel *ch)
-{
- u8 left = 0;
- u8 lsr = readb(&ch->ch_cls_uart->lsr);
-
- /* Determine whether the Transmitter is empty or not */
- if (!(lsr & UART_LSR_TEMT))
- left = 1;
- else {
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- left = 0;
- }
-
- return left;
-}
-
-/*
* cls_send_break.
* Starts sending a break thru the UART.
*
@@ -916,18 +894,6 @@ static void cls_send_break(struct jsm_channel *ch)
}
}
-/*
- * cls_send_immediate_char.
- * Sends a specific character as soon as possible to the UART,
- * jumping over any bytes that might be in the write queue.
- *
- * The channel lock MUST be held by the calling function.
- */
-static void cls_send_immediate_char(struct jsm_channel *ch, unsigned char c)
-{
- writeb(c, &ch->ch_cls_uart->txrx);
-}
-
struct board_ops jsm_cls_ops = {
.intr = cls_intr,
.uart_init = cls_uart_init,
@@ -943,7 +909,5 @@ struct board_ops jsm_cls_ops = {
.send_start_character = cls_send_start_character,
.send_stop_character = cls_send_stop_character,
.copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart,
- .get_uart_bytes_left = cls_get_uart_bytes_left,
- .send_immediate_char = cls_send_immediate_char
};
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index 2bd640428..1fa10f193 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -1309,25 +1309,6 @@ static void neo_uart_off(struct jsm_channel *ch)
writeb(0, &ch->ch_neo_uart->ier);
}
-static u32 neo_get_uart_bytes_left(struct jsm_channel *ch)
-{
- u8 left = 0;
- u8 lsr = readb(&ch->ch_neo_uart->lsr);
-
- /* We must cache the LSR as some of the bits get reset once read... */
- ch->ch_cached_lsr |= lsr;
-
- /* Determine whether the Transmitter is empty or not */
- if (!(lsr & UART_LSR_TEMT))
- left = 1;
- else {
- ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
- left = 0;
- }
-
- return left;
-}
-
/* Channel lock MUST be held by the calling function! */
static void neo_send_break(struct jsm_channel *ch)
{
@@ -1348,25 +1329,6 @@ static void neo_send_break(struct jsm_channel *ch)
}
}
-/*
- * neo_send_immediate_char.
- *
- * Sends a specific character as soon as possible to the UART,
- * jumping over any bytes that might be in the write queue.
- *
- * The channel lock MUST be held by the calling function.
- */
-static void neo_send_immediate_char(struct jsm_channel *ch, unsigned char c)
-{
- if (!ch)
- return;
-
- writeb(c, &ch->ch_neo_uart->txrx);
-
- /* flush write operation */
- neo_pci_posting_flush(ch->ch_bd);
-}
-
struct board_ops jsm_neo_ops = {
.intr = neo_intr,
.uart_init = neo_uart_init,
@@ -1382,6 +1344,4 @@ struct board_ops jsm_neo_ops = {
.send_start_character = neo_send_start_character,
.send_stop_character = neo_send_stop_character,
.copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart,
- .get_uart_bytes_left = neo_get_uart_bytes_left,
- .send_immediate_char = neo_send_immediate_char
};
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 3adb60c68..a0731773c 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -887,13 +887,11 @@ static int lqasc_probe(struct platform_device *pdev)
return ret;
}
-static int lqasc_remove(struct platform_device *pdev)
+static void lqasc_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&lqasc_reg, port);
-
- return 0;
}
static const struct ltq_soc_data soc_data_lantiq = {
@@ -917,7 +915,7 @@ MODULE_DEVICE_TABLE(of, ltq_asc_match);
static struct platform_driver lqasc_driver = {
.probe = lqasc_probe,
- .remove = lqasc_remove,
+ .remove_new = lqasc_remove,
.driver = {
.name = DRVNAME,
.of_match_table = ltq_asc_match,
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index a25ab1efe..3ce369f76 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -336,15 +336,13 @@ err_erase_id:
return ret;
}
-static int liteuart_remove(struct platform_device *pdev)
+static void liteuart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
unsigned int line = port->line;
uart_remove_one_port(&liteuart_driver, port);
xa_erase(&liteuart_array, line);
-
- return 0;
}
static const struct of_device_id liteuart_of_match[] = {
@@ -355,7 +353,7 @@ MODULE_DEVICE_TABLE(of, liteuart_of_match);
static struct platform_driver liteuart_platform_driver = {
.probe = liteuart_probe,
- .remove = liteuart_remove,
+ .remove_new = liteuart_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = liteuart_of_match,
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index 5149a947b..ec20329f0 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -659,13 +659,11 @@ static int serial_hs_lpc32xx_probe(struct platform_device *pdev)
/*
* Remove serial ports registered against a platform device.
*/
-static int serial_hs_lpc32xx_remove(struct platform_device *pdev)
+static void serial_hs_lpc32xx_remove(struct platform_device *pdev)
{
struct lpc32xx_hsuart_port *p = platform_get_drvdata(pdev);
uart_remove_one_port(&lpc32xx_hs_reg, &p->port);
-
- return 0;
}
@@ -702,7 +700,7 @@ MODULE_DEVICE_TABLE(of, serial_hs_lpc32xx_dt_ids);
static struct platform_driver serial_hs_lpc32xx_driver = {
.probe = serial_hs_lpc32xx_probe,
- .remove = serial_hs_lpc32xx_remove,
+ .remove_new = serial_hs_lpc32xx_remove,
.suspend = serial_hs_lpc32xx_suspend,
.resume = serial_hs_lpc32xx_resume,
.driver = {
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
index 21b574f78..19f0a305c 100644
--- a/drivers/tty/serial/ma35d1_serial.c
+++ b/drivers/tty/serial/ma35d1_serial.c
@@ -558,7 +558,7 @@ static void ma35d1serial_console_write(struct console *co, const char *s, u32 co
u32 ier;
if ((co->index < 0) || (co->index >= MA35_UART_NR)) {
- pr_warn("Failed to write on ononsole port %x, out of range\n",
+ pr_warn("Failed to write on console port %x, out of range\n",
co->index);
return;
}
@@ -754,14 +754,13 @@ err_iounmap:
/*
* Remove serial ports registered against a platform device.
*/
-static int ma35d1serial_remove(struct platform_device *dev)
+static void ma35d1serial_remove(struct platform_device *dev)
{
struct uart_port *port = platform_get_drvdata(dev);
struct uart_ma35d1_port *up = to_ma35d1_uart_port(port);
uart_remove_one_port(&ma35d1serial_reg, port);
clk_disable_unprepare(up->clk);
- return 0;
}
static int ma35d1serial_suspend(struct platform_device *dev, pm_message_t state)
@@ -794,7 +793,7 @@ static int ma35d1serial_resume(struct platform_device *dev)
static struct platform_driver ma35d1serial_driver = {
.probe = ma35d1serial_probe,
- .remove = ma35d1serial_remove,
+ .remove_new = ma35d1serial_remove,
.suspend = ma35d1serial_suspend,
.resume = ma35d1serial_resume,
.driver = {
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 1a4bb5565..198413df0 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -796,7 +796,7 @@ static void max310x_handle_tx(struct uart_port *port)
to_send = uart_circ_chars_pending(xmit);
until_end = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (likely(to_send)) {
- /* Limit to size of TX FIFO */
+ /* Limit to space available in TX FIFO */
txlen = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
txlen = port->fifosize - txlen;
to_send = (to_send > txlen) ? txlen : to_send;
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 91b15243f..8690a4523 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -627,7 +627,7 @@ static int mcf_probe(struct platform_device *pdev)
/****************************************************************************/
-static int mcf_remove(struct platform_device *pdev)
+static void mcf_remove(struct platform_device *pdev)
{
struct uart_port *port;
int i;
@@ -637,15 +637,13 @@ static int mcf_remove(struct platform_device *pdev)
if (port)
uart_remove_one_port(&mcf_driver, port);
}
-
- return 0;
}
/****************************************************************************/
static struct platform_driver mcf_platform_driver = {
.probe = mcf_probe,
- .remove = mcf_remove,
+ .remove_new = mcf_remove,
.driver = {
.name = "mcfuart",
},
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 8dd84617e..8395688f5 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -795,7 +795,7 @@ static int meson_uart_probe(struct platform_device *pdev)
return ret;
}
-static int meson_uart_remove(struct platform_device *pdev)
+static void meson_uart_remove(struct platform_device *pdev)
{
struct uart_driver *uart_driver;
struct uart_port *port;
@@ -807,12 +807,10 @@ static int meson_uart_remove(struct platform_device *pdev)
for (int id = 0; id < AML_UART_PORT_NUM; id++)
if (meson_ports[id])
- return 0;
+ return;
/* No more available uart ports, unregister uart driver */
uart_unregister_driver(uart_driver);
-
- return 0;
}
static struct meson_uart_data meson_g12a_uart_data = {
@@ -852,7 +850,7 @@ MODULE_DEVICE_TABLE(of, meson_uart_dt_match);
static struct platform_driver meson_uart_platform_driver = {
.probe = meson_uart_probe,
- .remove = meson_uart_remove,
+ .remove_new = meson_uart_remove,
.driver = {
.name = "meson_uart",
.of_match_table = meson_uart_dt_match,
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index db3b81f2a..da4c6f7e2 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -552,15 +552,13 @@ failed:
return ret;
}
-static int mlb_usio_remove(struct platform_device *pdev)
+static void mlb_usio_remove(struct platform_device *pdev)
{
struct uart_port *port = &mlb_usio_ports[pdev->id];
struct clk *clk = port->private_data;
uart_remove_one_port(&mlb_usio_uart_driver, port);
clk_disable_unprepare(clk);
-
- return 0;
}
static const struct of_device_id mlb_usio_dt_ids[] = {
@@ -571,7 +569,7 @@ MODULE_DEVICE_TABLE(of, mlb_usio_dt_ids);
static struct platform_driver mlb_usio_driver = {
.probe = mlb_usio_probe,
- .remove = mlb_usio_remove,
+ .remove_new = mlb_usio_remove,
.driver = {
.name = USIO_NAME,
.of_match_table = mlb_usio_dt_ids,
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index a252465e7..95dae5e27 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1765,15 +1765,12 @@ static int mpc52xx_uart_of_probe(struct platform_device *op)
return 0;
}
-static int
-mpc52xx_uart_of_remove(struct platform_device *op)
+static void mpc52xx_uart_of_remove(struct platform_device *op)
{
struct uart_port *port = platform_get_drvdata(op);
if (port)
uart_remove_one_port(&mpc52xx_uart_driver, port);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1846,7 +1843,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
static struct platform_driver mpc52xx_uart_of_driver = {
.probe = mpc52xx_uart_of_probe,
- .remove = mpc52xx_uart_of_remove,
+ .remove_new = mpc52xx_uart_of_remove,
#ifdef CONFIG_PM
.suspend = mpc52xx_uart_of_suspend,
.resume = mpc52xx_uart_of_resume,
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 597264b54..e24204ad3 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -1131,7 +1132,7 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
uart_port_unlock_irqrestore(port, flags);
entry = msm_find_best_baud(port, baud, &rate);
- clk_set_rate(msm_port->clk, rate);
+ dev_pm_opp_set_rate(port->dev, rate);
baud = rate / 16 / entry->divisor;
uart_port_lock_irqsave(port, &flags);
@@ -1186,6 +1187,7 @@ static void msm_init_clock(struct uart_port *port)
{
struct msm_port *msm_port = to_msm_port(port);
+ dev_pm_opp_set_rate(port->dev, port->uartclk);
clk_prepare_enable(msm_port->clk);
clk_prepare_enable(msm_port->pclk);
msm_serial_set_mnd_regs(port);
@@ -1239,6 +1241,7 @@ err_irq:
clk_disable_unprepare(msm_port->pclk);
clk_disable_unprepare(msm_port->clk);
+ dev_pm_opp_set_rate(port->dev, 0);
return ret;
}
@@ -1254,6 +1257,7 @@ static void msm_shutdown(struct uart_port *port)
msm_release_dma(msm_port);
clk_disable_unprepare(msm_port->clk);
+ dev_pm_opp_set_rate(port->dev, 0);
free_irq(port->irq, port);
}
@@ -1419,11 +1423,13 @@ static void msm_power(struct uart_port *port, unsigned int state,
switch (state) {
case 0:
+ dev_pm_opp_set_rate(port->dev, port->uartclk);
clk_prepare_enable(msm_port->clk);
clk_prepare_enable(msm_port->pclk);
break;
case 3:
clk_disable_unprepare(msm_port->clk);
+ dev_pm_opp_set_rate(port->dev, 0);
clk_disable_unprepare(msm_port->pclk);
break;
default:
@@ -1789,7 +1795,7 @@ static int msm_serial_probe(struct platform_device *pdev)
struct resource *resource;
struct uart_port *port;
const struct of_device_id *id;
- int irq, line;
+ int irq, line, ret;
if (pdev->dev.of_node)
line = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -1824,6 +1830,15 @@ static int msm_serial_probe(struct platform_device *pdev)
return PTR_ERR(msm_port->pclk);
}
+ ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
+ if (ret)
+ return ret;
+
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(&pdev->dev);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&pdev->dev, ret, "invalid OPP table\n");
+
port->uartclk = clk_get_rate(msm_port->clk);
dev_info(&pdev->dev, "uartclk = %d\n", port->uartclk);
@@ -1843,13 +1858,11 @@ static int msm_serial_probe(struct platform_device *pdev)
return uart_add_one_port(&msm_uart_driver, port);
}
-static int msm_serial_remove(struct platform_device *pdev)
+static void msm_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&msm_uart_driver, port);
-
- return 0;
}
static const struct of_device_id msm_match_table[] = {
@@ -1882,7 +1895,7 @@ static const struct dev_pm_ops msm_serial_dev_pm_ops = {
};
static struct platform_driver msm_platform_driver = {
- .remove = msm_serial_remove,
+ .remove_new = msm_serial_remove,
.probe = msm_serial_probe,
.driver = {
.name = "msm_serial",
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 380a8b059..1e8853eae 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -907,21 +907,27 @@ static void mxs_auart_dma_exit(struct mxs_auart_port *s)
static int mxs_auart_dma_init(struct mxs_auart_port *s)
{
+ struct dma_chan *chan;
+
if (auart_dma_enabled(s))
return 0;
/* init for RX */
- s->rx_dma_chan = dma_request_slave_channel(s->dev, "rx");
- if (!s->rx_dma_chan)
+ chan = dma_request_chan(s->dev, "rx");
+ if (IS_ERR(chan))
goto err_out;
+ s->rx_dma_chan = chan;
+
s->rx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA);
if (!s->rx_dma_buf)
goto err_out;
/* init for TX */
- s->tx_dma_chan = dma_request_slave_channel(s->dev, "tx");
- if (!s->tx_dma_chan)
+ chan = dma_request_chan(s->dev, "tx");
+ if (IS_ERR(chan))
goto err_out;
+ s->tx_dma_chan = chan;
+
s->tx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA);
if (!s->tx_dma_buf)
goto err_out;
@@ -1080,11 +1086,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istat;
+ u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
- u32 stat = mxs_read(s, REG_STAT);
+ uart_port_lock(&s->port);
+
+ stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@@ -1120,6 +1128,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
+ uart_port_unlock(&s->port);
+
return IRQ_HANDLED;
}
@@ -1689,7 +1699,7 @@ out_disable_clks:
return ret;
}
-static int mxs_auart_remove(struct platform_device *pdev)
+static void mxs_auart_remove(struct platform_device *pdev)
{
struct mxs_auart_port *s = platform_get_drvdata(pdev);
@@ -1701,13 +1711,11 @@ static int mxs_auart_remove(struct platform_device *pdev)
clk_disable_unprepare(s->clk);
clk_disable_unprepare(s->clk_ahb);
}
-
- return 0;
}
static struct platform_driver mxs_auart_driver = {
.probe = mxs_auart_probe,
- .remove = mxs_auart_remove,
+ .remove_new = mxs_auart_remove,
.driver = {
.name = "mxs-auart",
.of_match_table = mxs_auart_dt_ids,
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index f4c6ff806..f5a0b401a 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1659,7 +1659,7 @@ err_port_line:
return ret;
}
-static int serial_omap_remove(struct platform_device *dev)
+static void serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
@@ -1671,8 +1671,6 @@ static int serial_omap_remove(struct platform_device *dev)
pm_runtime_disable(up->dev);
cpu_latency_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(&dev->dev, false);
-
- return 0;
}
/*
@@ -1809,7 +1807,7 @@ MODULE_DEVICE_TABLE(of, omap_serial_of_match);
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
- .remove = serial_omap_remove,
+ .remove_new = serial_omap_remove,
.driver = {
.name = OMAP_SERIAL_DRIVER_NAME,
.pm = &serial_omap_dev_pm_ops,
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 919f5e5aa..d9fe85397 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -725,20 +725,18 @@ static int owl_uart_probe(struct platform_device *pdev)
return ret;
}
-static int owl_uart_remove(struct platform_device *pdev)
+static void owl_uart_remove(struct platform_device *pdev)
{
struct owl_uart_port *owl_port = platform_get_drvdata(pdev);
uart_remove_one_port(&owl_uart_driver, &owl_port->port);
owl_uart_ports[pdev->id] = NULL;
clk_disable_unprepare(owl_port->clk);
-
- return 0;
}
static struct platform_driver owl_uart_platform_driver = {
.probe = owl_uart_probe,
- .remove = owl_uart_remove,
+ .remove_new = owl_uart_remove,
.driver = {
.name = "owl-uart",
.of_match_table = owl_uart_dt_matches,
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 3a95bf5d5..bbb46e6e9 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -938,7 +938,7 @@ err:
return ret;
}
-static int pic32_uart_remove(struct platform_device *pdev)
+static void pic32_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct pic32_sport *sport = to_pic32_sport(port);
@@ -947,9 +947,6 @@ static int pic32_uart_remove(struct platform_device *pdev)
clk_disable_unprepare(sport->clk);
platform_set_drvdata(pdev, NULL);
pic32_sports[sport->idx] = NULL;
-
- /* automatic unroll of sport and gpios */
- return 0;
}
static const struct of_device_id pic32_serial_dt_ids[] = {
@@ -960,7 +957,7 @@ MODULE_DEVICE_TABLE(of, pic32_serial_dt_ids);
static struct platform_driver pic32_uart_platform_driver = {
.probe = pic32_uart_probe,
- .remove = pic32_uart_remove,
+ .remove_new = pic32_uart_remove,
.driver = {
.name = PIC32_DEV_NAME,
.of_match_table = of_match_ptr(pic32_serial_dt_ids),
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index c8bf08c19..77691fbbf 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
- int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
@@ -291,25 +290,12 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
- /* We can get stuck in an infinite loop getting char 0 when the
- * line is in a wrong HW state, we break that here.
- * When that happens, I disable the receive side of the driver.
- * Note that what I've been experiencing is a real irq loop where
- * I'm getting flooded regardless of the actual port speed.
- * Something strange is going on with the HW
- */
- if ((++loops) > 1000)
- goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
return true;
- flood:
- pmz_interrupt_control(uap, 0);
- pmz_error("pmz: rx irq flood !\n");
- return true;
}
static void pmz_status_handle(struct uart_pmac_port *uap)
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 549909644..99e08737f 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1698,7 +1698,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
return 0;
}
-static int qcom_geni_serial_remove(struct platform_device *pdev)
+static void qcom_geni_serial_remove(struct platform_device *pdev)
{
struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
struct uart_driver *drv = port->private_data.drv;
@@ -1706,8 +1706,6 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
uart_remove_one_port(drv, &port->uport);
-
- return 0;
}
static int qcom_geni_serial_sys_suspend(struct device *dev)
@@ -1807,7 +1805,7 @@ static const struct of_device_id qcom_geni_serial_match_table[] = {
MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table);
static struct platform_driver qcom_geni_serial_platform_driver = {
- .remove = qcom_geni_serial_remove,
+ .remove_new = qcom_geni_serial_remove,
.probe = qcom_geni_serial_probe,
.driver = {
.name = "qcom_geni_serial",
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index d824c8318..13deb355c 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -780,19 +780,17 @@ static int rda_uart_probe(struct platform_device *pdev)
return ret;
}
-static int rda_uart_remove(struct platform_device *pdev)
+static void rda_uart_remove(struct platform_device *pdev)
{
struct rda_uart_port *rda_port = platform_get_drvdata(pdev);
uart_remove_one_port(&rda_uart_driver, &rda_port->port);
rda_uart_ports[pdev->id] = NULL;
-
- return 0;
}
static struct platform_driver rda_uart_platform_driver = {
.probe = rda_uart_probe,
- .remove = rda_uart_remove,
+ .remove_new = rda_uart_remove,
.driver = {
.name = "rda-uart",
.of_match_table = rda_uart_dt_matches,
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index d46a81cdd..4132fcff7 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -178,7 +178,6 @@ struct rp2_card;
struct rp2_uart_port {
struct uart_port port;
int idx;
- int ignore_rx;
struct rp2_card *card;
void __iomem *asic_base;
void __iomem *base;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index be7bcd75d..79c794fa6 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -870,19 +870,17 @@ static int sa1100_serial_probe(struct platform_device *dev)
return 0;
}
-static int sa1100_serial_remove(struct platform_device *pdev)
+static void sa1100_serial_remove(struct platform_device *pdev)
{
struct sa1100_port *sport = platform_get_drvdata(pdev);
if (sport)
uart_remove_one_port(&sa1100_reg, &sport->port);
-
- return 0;
}
static struct platform_driver sa11x0_serial_driver = {
.probe = sa1100_serial_probe,
- .remove = sa1100_serial_remove,
+ .remove_new = sa1100_serial_remove,
.suspend = sa1100_serial_suspend,
.resume = sa1100_serial_resume,
.driver = {
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 06d140c9f..6fdb32b83 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -2053,7 +2053,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
return 0;
}
-static int s3c24xx_serial_remove(struct platform_device *dev)
+static void s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
@@ -2062,8 +2062,6 @@ static int s3c24xx_serial_remove(struct platform_device *dev)
}
uart_unregister_driver(&s3c24xx_uart_drv);
-
- return 0;
}
/* UART power management code */
@@ -2491,14 +2489,25 @@ static const struct s3c24xx_serial_drv_data exynos850_serial_drv_data = {
.fifosize = { 256, 64, 64, 64 },
};
+/*
+ * Common drv_data struct for platforms that specify samsung,uart-fifosize in
+ * device tree.
+ */
+static const struct s3c24xx_serial_drv_data exynos_fifoszdt_serial_drv_data = {
+ EXYNOS_COMMON_SERIAL_DRV_DATA(),
+ .fifosize = { 0 },
+};
+
#define EXYNOS4210_SERIAL_DRV_DATA (&exynos4210_serial_drv_data)
#define EXYNOS5433_SERIAL_DRV_DATA (&exynos5433_serial_drv_data)
#define EXYNOS850_SERIAL_DRV_DATA (&exynos850_serial_drv_data)
+#define EXYNOS_FIFOSZDT_DRV_DATA (&exynos_fifoszdt_serial_drv_data)
#else
#define EXYNOS4210_SERIAL_DRV_DATA NULL
#define EXYNOS5433_SERIAL_DRV_DATA NULL
#define EXYNOS850_SERIAL_DRV_DATA NULL
+#define EXYNOS_FIFOSZDT_DRV_DATA NULL
#endif
#ifdef CONFIG_ARCH_APPLE
@@ -2582,6 +2591,9 @@ static const struct platform_device_id s3c24xx_serial_driver_ids[] = {
}, {
.name = "artpec8-uart",
.driver_data = (kernel_ulong_t)ARTPEC8_SERIAL_DRV_DATA,
+ }, {
+ .name = "gs101-uart",
+ .driver_data = (kernel_ulong_t)EXYNOS_FIFOSZDT_DRV_DATA,
},
{ },
};
@@ -2603,6 +2615,8 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
.data = EXYNOS850_SERIAL_DRV_DATA },
{ .compatible = "axis,artpec8-uart",
.data = ARTPEC8_SERIAL_DRV_DATA },
+ { .compatible = "google,gs101-uart",
+ .data = EXYNOS_FIFOSZDT_DRV_DATA },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
@@ -2610,7 +2624,7 @@ MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
static struct platform_driver samsung_serial_driver = {
.probe = s3c24xx_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove_new = s3c24xx_serial_remove,
.id_table = s3c24xx_serial_driver_ids,
.driver = {
.name = "samsung-uart",
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index dba25e709..929206a9a 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -29,6 +29,7 @@
#define SC16IS7XX_NAME "sc16is7xx"
#define SC16IS7XX_MAX_DEVS 8
+#define SC16IS7XX_MAX_PORTS 2 /* Maximum number of UART ports per IC. */
/* SC16IS7XX register definitions */
#define SC16IS7XX_RHR_REG (0x00) /* RX FIFO */
@@ -329,8 +330,9 @@ struct sc16is7xx_one {
struct kthread_work reg_work;
struct kthread_delayed_work ms_work;
struct sc16is7xx_one_config config;
- bool irda_mode;
unsigned int old_mctrl;
+ u8 old_lcr; /* Value before EFR access. */
+ bool irda_mode;
};
struct sc16is7xx_port {
@@ -347,17 +349,15 @@ struct sc16is7xx_port {
struct sc16is7xx_one p[];
};
-static unsigned long sc16is7xx_lines;
+static DECLARE_BITMAP(sc16is7xx_lines, SC16IS7XX_MAX_DEVS);
static struct uart_driver sc16is7xx_uart = {
.owner = THIS_MODULE,
+ .driver_name = SC16IS7XX_NAME,
.dev_name = "ttySC",
.nr = SC16IS7XX_MAX_DEVS,
};
-static void sc16is7xx_ier_set(struct uart_port *port, u8 bit);
-static void sc16is7xx_stop_tx(struct uart_port *port);
-
#define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
@@ -377,17 +377,15 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
regmap_write(one->regmap, reg, val);
}
-static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+static void sc16is7xx_fifo_read(struct uart_port *port, u8 *rxbuf, unsigned int rxlen)
{
- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
- regmap_noinc_read(one->regmap, SC16IS7XX_RHR_REG, s->buf, rxlen);
+ regmap_noinc_read(one->regmap, SC16IS7XX_RHR_REG, rxbuf, rxlen);
}
-static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+static void sc16is7xx_fifo_write(struct uart_port *port, u8 *txbuf, u8 to_send)
{
- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
/*
@@ -397,7 +395,7 @@ static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
if (unlikely(!to_send))
return;
- regmap_noinc_write(one->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
+ regmap_noinc_write(one->regmap, SC16IS7XX_THR_REG, txbuf, to_send);
}
static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
@@ -415,6 +413,85 @@ static void sc16is7xx_power(struct uart_port *port, int on)
on ? 0 : SC16IS7XX_IER_SLEEP_BIT);
}
+/*
+ * In an amazing feat of design, the Enhanced Features Register (EFR)
+ * shares the address of the Interrupt Identification Register (IIR).
+ * Access to EFR is switched on by writing a magic value (0xbf) to the
+ * Line Control Register (LCR). Any interrupt firing during this time will
+ * see the EFR where it expects the IIR to be, leading to
+ * "Unexpected interrupt" messages.
+ *
+ * Prevent this possibility by claiming a mutex while accessing the EFR,
+ * and claiming the same mutex from within the interrupt handler. This is
+ * similar to disabling the interrupt, but that doesn't work because the
+ * bulk of the interrupt processing is run as a workqueue job in thread
+ * context.
+ */
+static void sc16is7xx_efr_lock(struct uart_port *port)
+{
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ mutex_lock(&one->efr_lock);
+
+ /* Backup content of LCR. */
+ one->old_lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
+
+ /* Enable access to Enhanced register set */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_CONF_MODE_B);
+
+ /* Disable cache updates when writing to EFR registers */
+ regcache_cache_bypass(one->regmap, true);
+}
+
+static void sc16is7xx_efr_unlock(struct uart_port *port)
+{
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ /* Re-enable cache updates when writing to normal registers */
+ regcache_cache_bypass(one->regmap, false);
+
+ /* Restore original content of LCR */
+ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, one->old_lcr);
+
+ mutex_unlock(&one->efr_lock);
+}
+
+static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
+{
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ lockdep_assert_held_once(&port->lock);
+
+ one->config.flags |= SC16IS7XX_RECONF_IER;
+ one->config.ier_mask |= bit;
+ one->config.ier_val &= ~bit;
+ kthread_queue_work(&s->kworker, &one->reg_work);
+}
+
+static void sc16is7xx_ier_set(struct uart_port *port, u8 bit)
+{
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+ lockdep_assert_held_once(&port->lock);
+
+ one->config.flags |= SC16IS7XX_RECONF_IER;
+ one->config.ier_mask |= bit;
+ one->config.ier_val |= bit;
+ kthread_queue_work(&s->kworker, &one->reg_work);
+}
+
+static void sc16is7xx_stop_tx(struct uart_port *port)
+{
+ sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT);
+}
+
+static void sc16is7xx_stop_rx(struct uart_port *port)
+{
+ sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
+}
+
static const struct sc16is7xx_devtype sc16is74x_devtype = {
.name = "SC16IS74X",
.nr_gpio = 0,
@@ -458,10 +535,8 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
case SC16IS7XX_IOCONTROL_REG:
return true;
default:
- break;
+ return false;
}
-
- return false;
}
static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
@@ -470,10 +545,8 @@ static bool sc16is7xx_regmap_precious(struct device *dev, unsigned int reg)
case SC16IS7XX_RHR_REG:
return true;
default:
- break;
+ return false;
}
-
- return false;
}
static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg)
@@ -488,50 +561,24 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
u8 prescaler = 0;
unsigned long clk = port->uartclk, div = clk / 16 / baud;
- if (div > 0xffff) {
+ if (div >= BIT(16)) {
prescaler = SC16IS7XX_MCR_CLKSEL_BIT;
div /= 4;
}
- /* In an amazing feat of design, the Enhanced Features Register shares
- * the address of the Interrupt Identification Register, and is
- * switched in by writing a magic value (0xbf) to the Line Control
- * Register. Any interrupt firing during this time will see the EFR
- * where it expects the IIR to be, leading to "Unexpected interrupt"
- * messages.
- *
- * Prevent this possibility by claiming a mutex while accessing the
- * EFR, and claiming the same mutex from within the interrupt handler.
- * This is similar to disabling the interrupt, but that doesn't work
- * because the bulk of the interrupt processing is run as a workqueue
- * job in thread context.
- */
- mutex_lock(&one->efr_lock);
-
- lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
-
- /* Open the LCR divisors for configuration */
- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
- SC16IS7XX_LCR_CONF_MODE_B);
-
/* Enable enhanced features */
- regcache_cache_bypass(one->regmap, true);
+ sc16is7xx_efr_lock(port);
sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
SC16IS7XX_EFR_ENABLE_BIT,
SC16IS7XX_EFR_ENABLE_BIT);
-
- regcache_cache_bypass(one->regmap, false);
-
- /* Put LCR back to the normal mode */
- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
-
- mutex_unlock(&one->efr_lock);
+ sc16is7xx_efr_unlock(port);
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
prescaler);
- /* Open the LCR divisors for configuration */
+ /* Backup LCR and access special register set (DLL/DLH) */
+ lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_A);
@@ -541,7 +588,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
sc16is7xx_port_write(port, SC16IS7XX_DLL_REG, div % 256);
regcache_cache_bypass(one->regmap, false);
- /* Put LCR back to the normal mode */
+ /* Restore LCR and access to general register set */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
return DIV_ROUND_CLOSEST(clk / 16, div);
@@ -577,7 +624,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
bytes_read = 1;
} else {
- sc16is7xx_fifo_read(port, rxlen);
+ sc16is7xx_fifo_read(port, s->buf, rxlen);
bytes_read = rxlen;
}
@@ -650,7 +697,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
/* Get length of data pending in circular buffer */
to_send = uart_circ_chars_pending(xmit);
if (likely(to_send)) {
- /* Limit to size of TX FIFO */
+ /* Limit to space available in TX FIFO */
txlen = sc16is7xx_port_read(port, SC16IS7XX_TXLVL_REG);
if (txlen > SC16IS7XX_FIFO_SIZE) {
dev_err_ratelimited(port->dev,
@@ -666,7 +713,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
uart_xmit_advance(port, 1);
}
- sc16is7xx_fifo_write(port, to_send);
+ sc16is7xx_fifo_write(port, s->buf, to_send);
}
uart_port_lock_irqsave(port, &flags);
@@ -871,42 +918,6 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws)
sc16is7xx_reconf_rs485(&one->port);
}
-static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
-{
- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
-
- lockdep_assert_held_once(&port->lock);
-
- one->config.flags |= SC16IS7XX_RECONF_IER;
- one->config.ier_mask |= bit;
- one->config.ier_val &= ~bit;
- kthread_queue_work(&s->kworker, &one->reg_work);
-}
-
-static void sc16is7xx_ier_set(struct uart_port *port, u8 bit)
-{
- struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
- struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
-
- lockdep_assert_held_once(&port->lock);
-
- one->config.flags |= SC16IS7XX_RECONF_IER;
- one->config.ier_mask |= bit;
- one->config.ier_val |= bit;
- kthread_queue_work(&s->kworker, &one->reg_work);
-}
-
-static void sc16is7xx_stop_tx(struct uart_port *port)
-{
- sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT);
-}
-
-static void sc16is7xx_stop_rx(struct uart_port *port)
-{
- sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
-}
-
static void sc16is7xx_ms_proc(struct kthread_work *ws)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(ws, ms_work.work);
@@ -1056,17 +1067,7 @@ static void sc16is7xx_set_termios(struct uart_port *port,
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
- /* As above, claim the mutex while accessing the EFR. */
- mutex_lock(&one->efr_lock);
-
- sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
- SC16IS7XX_LCR_CONF_MODE_B);
-
/* Configure flow control */
- regcache_cache_bypass(one->regmap, true);
- sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
- sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
-
port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
if (termios->c_cflag & CRTSCTS) {
flow |= SC16IS7XX_EFR_AUTOCTS_BIT |
@@ -1078,16 +1079,16 @@ static void sc16is7xx_set_termios(struct uart_port *port,
if (termios->c_iflag & IXOFF)
flow |= SC16IS7XX_EFR_SWFLOW1_BIT;
- sc16is7xx_port_update(port,
- SC16IS7XX_EFR_REG,
- SC16IS7XX_EFR_FLOWCTRL_BITS,
- flow);
- regcache_cache_bypass(one->regmap, false);
-
/* Update LCR register */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
- mutex_unlock(&one->efr_lock);
+ /* Update EFR registers */
+ sc16is7xx_efr_lock(port);
+ sc16is7xx_port_write(port, SC16IS7XX_XON1_REG, termios->c_cc[VSTART]);
+ sc16is7xx_port_write(port, SC16IS7XX_XOFF1_REG, termios->c_cc[VSTOP]);
+ sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
+ SC16IS7XX_EFR_FLOWCTRL_BITS, flow);
+ sc16is7xx_efr_unlock(port);
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
@@ -1397,7 +1398,7 @@ static void sc16is7xx_setup_irda_ports(struct sc16is7xx_port *s)
int i;
int ret;
int count;
- u32 irda_port[2];
+ u32 irda_port[SC16IS7XX_MAX_PORTS];
struct device *dev = s->p[0].port.dev;
count = device_property_count_u32(dev, "irda-mode-ports");
@@ -1424,7 +1425,7 @@ static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s,
int i;
int ret;
int count;
- u32 mctrl_port[2];
+ u32 mctrl_port[SC16IS7XX_MAX_PORTS];
struct device *dev = s->p[0].port.dev;
count = device_property_count_u32(dev, "nxp,modem-control-line-ports");
@@ -1536,7 +1537,7 @@ static int sc16is7xx_probe(struct device *dev,
SC16IS7XX_IOCONTROL_SRESET_BIT);
for (i = 0; i < devtype->nr_uart; ++i) {
- s->p[i].port.line = find_first_zero_bit(&sc16is7xx_lines,
+ s->p[i].port.line = find_first_zero_bit(sc16is7xx_lines,
SC16IS7XX_MAX_DEVS);
if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
ret = -ERANGE;
@@ -1587,7 +1588,7 @@ static int sc16is7xx_probe(struct device *dev,
if (ret)
goto out_ports;
- set_bit(s->p[i].port.line, &sc16is7xx_lines);
+ set_bit(s->p[i].port.line, sc16is7xx_lines);
/* Enable EFR */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_LCR_REG,
@@ -1646,7 +1647,7 @@ static int sc16is7xx_probe(struct device *dev,
out_ports:
for (i = 0; i < devtype->nr_uart; i++)
- if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
+ if (test_and_clear_bit(s->p[i].port.line, sc16is7xx_lines))
uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
kthread_stop(s->kworker_task);
@@ -1669,7 +1670,7 @@ static void sc16is7xx_remove(struct device *dev)
for (i = 0; i < s->devtype->nr_uart; i++) {
kthread_cancel_delayed_work_sync(&s->p[i].ms_work);
- if (test_and_clear_bit(s->p[i].port.line, &sc16is7xx_lines))
+ if (test_and_clear_bit(s->p[i].port.line, sc16is7xx_lines))
uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
sc16is7xx_power(&s->p[i].port, 0);
}
@@ -1726,7 +1727,7 @@ static unsigned int sc16is7xx_regmap_port_mask(unsigned int port_id)
static int sc16is7xx_spi_probe(struct spi_device *spi)
{
const struct sc16is7xx_devtype *devtype;
- struct regmap *regmaps[2];
+ struct regmap *regmaps[SC16IS7XX_MAX_PORTS];
unsigned int i;
int ret;
@@ -1742,15 +1743,9 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- if (spi->dev.of_node) {
- devtype = device_get_match_data(&spi->dev);
- if (!devtype)
- return -ENODEV;
- } else {
- const struct spi_device_id *id_entry = spi_get_device_id(spi);
-
- devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
- }
+ devtype = spi_get_device_match_data(spi);
+ if (!devtype)
+ return dev_err_probe(&spi->dev, -ENODEV, "Failed to match device\n");
for (i = 0; i < devtype->nr_uart; i++) {
regcfg.name = sc16is7xx_regmap_name(i);
@@ -1795,25 +1790,18 @@ static struct spi_driver sc16is7xx_spi_uart_driver = {
.remove = sc16is7xx_spi_remove,
.id_table = sc16is7xx_spi_id_table,
};
-
-MODULE_ALIAS("spi:sc16is7xx");
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
static int sc16is7xx_i2c_probe(struct i2c_client *i2c)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
const struct sc16is7xx_devtype *devtype;
- struct regmap *regmaps[2];
+ struct regmap *regmaps[SC16IS7XX_MAX_PORTS];
unsigned int i;
- if (i2c->dev.of_node) {
- devtype = device_get_match_data(&i2c->dev);
- if (!devtype)
- return -ENODEV;
- } else {
- devtype = (struct sc16is7xx_devtype *)id->driver_data;
- }
+ devtype = i2c_get_match_data(i2c);
+ if (!devtype)
+ return dev_err_probe(&i2c->dev, -ENODEV, "Failed to match device\n");
for (i = 0; i < devtype->nr_uart; i++) {
regcfg.name = sc16is7xx_regmap_name(i);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 2be2c1098..f24217a56 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -1021,7 +1021,7 @@ err_out:
return ret;
}
-static int sccnxp_remove(struct platform_device *pdev)
+static void sccnxp_remove(struct platform_device *pdev)
{
int i;
struct sccnxp_port *s = platform_get_drvdata(pdev);
@@ -1036,10 +1036,11 @@ static int sccnxp_remove(struct platform_device *pdev)
uart_unregister_driver(&s->uart);
- if (!IS_ERR(s->regulator))
- return regulator_disable(s->regulator);
-
- return 0;
+ if (!IS_ERR(s->regulator)) {
+ int ret = regulator_disable(s->regulator);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to disable regulator\n");
+ }
}
static struct platform_driver sccnxp_uart_driver = {
@@ -1047,7 +1048,7 @@ static struct platform_driver sccnxp_uart_driver = {
.name = SCCNXP_NAME,
},
.probe = sccnxp_probe,
- .remove = sccnxp_remove,
+ .remove_new = sccnxp_remove,
.id_table = sccnxp_id_table,
};
module_platform_driver(sccnxp_uart_driver);
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 6d4006b41..525f3a2f7 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1611,13 +1611,12 @@ static int tegra_uart_probe(struct platform_device *pdev)
return ret;
}
-static int tegra_uart_remove(struct platform_device *pdev)
+static void tegra_uart_remove(struct platform_device *pdev)
{
struct tegra_uart_port *tup = platform_get_drvdata(pdev);
struct uart_port *u = &tup->uport;
uart_remove_one_port(&tegra_uart_driver, u);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1644,7 +1643,7 @@ static const struct dev_pm_ops tegra_uart_pm_ops = {
static struct platform_driver tegra_uart_platform_driver = {
.probe = tegra_uart_probe,
- .remove = tegra_uart_remove,
+ .remove_new = tegra_uart_remove,
.driver = {
.name = "serial-tegra",
.of_match_table = tegra_uart_of_match,
diff --git a/drivers/tty/serial/serial_base.h b/drivers/tty/serial/serial_base.h
index c74c548f0..b6c38d2ed 100644
--- a/drivers/tty/serial/serial_base.h
+++ b/drivers/tty/serial/serial_base.h
@@ -22,6 +22,7 @@ struct serial_ctrl_device {
struct serial_port_device {
struct device dev;
struct uart_port *port;
+ unsigned int tx_enabled:1;
};
int serial_base_ctrl_init(void);
@@ -30,6 +31,9 @@ void serial_base_ctrl_exit(void);
int serial_base_port_init(void);
void serial_base_port_exit(void);
+void serial_base_port_startup(struct uart_port *port);
+void serial_base_port_shutdown(struct uart_port *port);
+
int serial_base_driver_register(struct device_driver *driver);
void serial_base_driver_unregister(struct device_driver *driver);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index e383d9a10..c476d8843 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
* enabled, serial_port_runtime_resume() calls start_tx() again
* after enabling the device.
*/
- if (!pm_runtime_enabled(port->dev) || pm_runtime_active(port->dev))
+ if (!pm_runtime_enabled(port->dev) || pm_runtime_active(&port_dev->dev))
port->ops->start_tx(port);
pm_runtime_mark_last_busy(&port_dev->dev);
pm_runtime_put_autosuspend(&port_dev->dev);
@@ -323,16 +323,26 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
bool init_hw)
{
struct tty_port *port = &state->port;
+ struct uart_port *uport;
int retval;
if (tty_port_initialized(port))
- return 0;
+ goto out_base_port_startup;
retval = uart_port_startup(tty, state, init_hw);
- if (retval)
+ if (retval) {
set_bit(TTY_IO_ERROR, &tty->flags);
+ return retval;
+ }
- return retval;
+out_base_port_startup:
+ uport = uart_port_check(state);
+ if (!uport)
+ return -EIO;
+
+ serial_base_port_startup(uport);
+
+ return 0;
}
/*
@@ -355,6 +365,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
+ if (uport)
+ serial_base_port_shutdown(uport);
+
if (tty_port_initialized(port)) {
tty_port_set_initialized(port, false);
@@ -410,11 +423,10 @@ void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud)
{
- unsigned int size = tty_get_frame_size(cflag);
- u64 frame_time;
+ u64 temp = tty_get_frame_size(cflag);
- frame_time = (u64)size * NSEC_PER_SEC;
- port->frame_time = DIV64_U64_ROUND_UP(frame_time, baud);
+ temp *= NSEC_PER_SEC;
+ port->frame_time = (unsigned int)DIV64_U64_ROUND_UP(temp, baud);
}
EXPORT_SYMBOL(uart_update_timeout);
@@ -687,7 +699,7 @@ EXPORT_SYMBOL_GPL(uart_xchar_out);
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
-static void uart_send_xchar(struct tty_struct *tty, char ch)
+static void uart_send_xchar(struct tty_struct *tty, u8 ch)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
@@ -1371,6 +1383,12 @@ static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs4
return;
}
+ /* Clear other RS485 flags but SER_RS485_TERMINATE_BUS and return if enabling RS422 */
+ if (rs485->flags & SER_RS485_MODE_RS422) {
+ rs485->flags &= (SER_RS485_ENABLED | SER_RS485_MODE_RS422 | SER_RS485_TERMINATE_BUS);
+ return;
+ }
+
rs485->flags &= supported_flags;
/* Pick sane settings if the user hasn't */
@@ -1770,6 +1788,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
uport->ops->stop_rx(uport);
uart_port_unlock_irq(uport);
+ serial_base_port_shutdown(uport);
uart_port_shutdown(port);
/*
@@ -1783,6 +1802,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
* Free the transmit buffer.
*/
uart_port_lock_irq(uport);
+ uart_circ_clear(&state->xmit);
buf = state->xmit.buf;
state->xmit.buf = NULL;
uart_port_unlock_irq(uport);
@@ -2372,7 +2392,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
mutex_lock(&port->mutex);
- tty_dev = device_find_child(uport->dev, &match, serial_match_port);
+ tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port);
if (tty_dev && device_may_wakeup(tty_dev)) {
enable_irq_wake(uport->irq);
put_device(tty_dev);
@@ -2453,7 +2473,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
mutex_lock(&port->mutex);
- tty_dev = device_find_child(uport->dev, &match, serial_match_port);
+ tty_dev = device_find_child(&uport->port_dev->dev, &match, serial_match_port);
if (!uport->suspended && device_may_wakeup(tty_dev)) {
if (irqd_is_wakeup_set(irq_get_irq_data((uport->irq))))
disable_irq_wake(uport->irq);
@@ -2675,7 +2695,8 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
mutex_lock(&tport->mutex);
port = uart_port_check(state);
- if (!port || !(port->ops->poll_get_char && port->ops->poll_put_char)) {
+ if (!port || port->type == PORT_UNKNOWN ||
+ !(port->ops->poll_get_char && port->ops->poll_put_char)) {
ret = -1;
goto out;
}
@@ -3195,7 +3216,8 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
* setserial to be used to alter this port's parameters.
*/
tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver,
- uport->line, uport->dev, port, uport->tty_groups);
+ uport->line, uport->dev, &uport->port_dev->dev, port,
+ uport->tty_groups);
if (!IS_ERR(tty_dev)) {
device_set_wakeup_capable(tty_dev, 1);
} else {
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 72b6f4f32..7d51e66ec 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -36,8 +36,12 @@ static int serial_port_runtime_resume(struct device *dev)
/* Flush any pending TX for the port */
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled)
+ goto unlock;
if (__serial_port_busy(port))
port->ops->start_tx(port);
+
+unlock:
uart_port_unlock_irqrestore(port, flags);
out:
@@ -57,6 +61,11 @@ static int serial_port_runtime_suspend(struct device *dev)
return 0;
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled) {
+ uart_port_unlock_irqrestore(port, flags);
+ return 0;
+ }
+
busy = __serial_port_busy(port);
if (busy)
port->ops->start_tx(port);
@@ -68,6 +77,31 @@ static int serial_port_runtime_suspend(struct device *dev)
return busy ? -EBUSY : 0;
}
+static void serial_base_port_set_tx(struct uart_port *port,
+ struct serial_port_device *port_dev,
+ bool enabled)
+{
+ unsigned long flags;
+
+ uart_port_lock_irqsave(port, &flags);
+ port_dev->tx_enabled = enabled;
+ uart_port_unlock_irqrestore(port, flags);
+}
+
+void serial_base_port_startup(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, true);
+}
+
+void serial_base_port_shutdown(struct uart_port *port)
+{
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, false);
+}
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
serial_port_runtime_suspend,
serial_port_runtime_resume, NULL);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index eaa980722..e1897894a 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1052,7 +1052,7 @@ static int serial_txx9_probe(struct platform_device *dev)
/*
* Remove serial ports registered against a platform device.
*/
-static int serial_txx9_remove(struct platform_device *dev)
+static void serial_txx9_remove(struct platform_device *dev)
{
int i;
@@ -1062,7 +1062,6 @@ static int serial_txx9_remove(struct platform_device *dev)
if (up->dev == &dev->dev)
serial_txx9_unregister_port(i);
}
- return 0;
}
#ifdef CONFIG_PM
@@ -1097,7 +1096,7 @@ static int serial_txx9_resume(struct platform_device *dev)
static struct platform_driver serial_txx9_plat_driver = {
.probe = serial_txx9_probe,
- .remove = serial_txx9_remove,
+ .remove_new = serial_txx9_remove,
#ifdef CONFIG_PM
.suspend = serial_txx9_suspend,
.resume = serial_txx9_resume,
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 84ab434c9..a85e7b9a2 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1558,10 +1558,9 @@ static struct dma_chan *sci_request_dma_chan(struct uart_port *port,
struct dma_slave_config cfg;
int ret;
- chan = dma_request_slave_channel(port->dev,
- dir == DMA_MEM_TO_DEV ? "tx" : "rx");
- if (!chan) {
- dev_dbg(port->dev, "dma_request_slave_channel failed\n");
+ chan = dma_request_chan(port->dev, dir == DMA_MEM_TO_DEV ? "tx" : "rx");
+ if (IS_ERR(chan)) {
+ dev_dbg(port->dev, "dma_request_chan failed\n");
return NULL;
}
@@ -3190,7 +3189,7 @@ static struct uart_driver sci_uart_driver = {
.cons = SCI_CONSOLE,
};
-static int sci_remove(struct platform_device *dev)
+static void sci_remove(struct platform_device *dev)
{
struct sci_port *port = platform_get_drvdata(dev);
unsigned int type = port->port.type; /* uart_remove_... clears it */
@@ -3204,8 +3203,6 @@ static int sci_remove(struct platform_device *dev)
device_remove_file(&dev->dev, &dev_attr_rx_fifo_trigger);
if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF)
device_remove_file(&dev->dev, &dev_attr_rx_fifo_timeout);
-
- return 0;
}
@@ -3470,7 +3467,7 @@ static SIMPLE_DEV_PM_OPS(sci_dev_pm_ops, sci_suspend, sci_resume);
static struct platform_driver sci_driver = {
.probe = sci_probe,
- .remove = sci_remove,
+ .remove_new = sci_remove,
.driver = {
.name = "sh-sci",
.pm = &sci_dev_pm_ops,
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index b296e57a9..a4cc569a7 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -1007,7 +1007,7 @@ probe_out1:
return r;
}
-static int sifive_serial_remove(struct platform_device *dev)
+static void sifive_serial_remove(struct platform_device *dev)
{
struct sifive_serial_port *ssp = platform_get_drvdata(dev);
@@ -1015,8 +1015,6 @@ static int sifive_serial_remove(struct platform_device *dev)
uart_remove_one_port(&sifive_serial_uart_driver, &ssp->port);
free_irq(ssp->port.irq, ssp);
clk_notifier_unregister(ssp->clk, &ssp->clk_notifier);
-
- return 0;
}
static int sifive_serial_suspend(struct device *dev)
@@ -1033,8 +1031,8 @@ static int sifive_serial_resume(struct device *dev)
return uart_resume_port(&sifive_serial_uart_driver, &ssp->port);
}
-DEFINE_SIMPLE_DEV_PM_OPS(sifive_uart_pm_ops, sifive_serial_suspend,
- sifive_serial_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sifive_uart_pm_ops, sifive_serial_suspend,
+ sifive_serial_resume);
static const struct of_device_id sifive_serial_of_match[] = {
{ .compatible = "sifive,fu540-c000-uart0" },
@@ -1045,7 +1043,7 @@ MODULE_DEVICE_TABLE(of, sifive_serial_of_match);
static struct platform_driver sifive_serial_platform_driver = {
.probe = sifive_serial_probe,
- .remove = sifive_serial_remove,
+ .remove_new = sifive_serial_remove,
.driver = {
.name = SIFIVE_SERIAL_NAME,
.pm = pm_sleep_ptr(&sifive_uart_pm_ops),
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index f257525f9..15f14fa59 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -1076,7 +1076,7 @@ static struct uart_driver sprd_uart_driver = {
.cons = SPRD_CONSOLE,
};
-static int sprd_remove(struct platform_device *dev)
+static void sprd_remove(struct platform_device *dev)
{
struct sprd_uart_port *sup = platform_get_drvdata(dev);
@@ -1089,8 +1089,6 @@ static int sprd_remove(struct platform_device *dev)
if (!sprd_ports_num)
uart_unregister_driver(&sprd_uart_driver);
-
- return 0;
}
static bool sprd_uart_is_console(struct uart_port *uport)
@@ -1257,7 +1255,7 @@ MODULE_DEVICE_TABLE(of, serial_ids);
static struct platform_driver sprd_platform_driver = {
.probe = sprd_probe,
- .remove = sprd_remove,
+ .remove_new = sprd_remove,
.driver = {
.name = "sprd_serial",
.of_match_table = serial_ids,
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index a821f5d76..bbb5595d7 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -793,13 +793,11 @@ static int asc_serial_probe(struct platform_device *pdev)
return 0;
}
-static int asc_serial_remove(struct platform_device *pdev)
+static void asc_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
uart_remove_one_port(&asc_uart_driver, port);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -932,7 +930,7 @@ static const struct dev_pm_ops asc_serial_pm_ops = {
static struct platform_driver asc_serial_driver = {
.probe = asc_serial_probe,
- .remove = asc_serial_remove,
+ .remove_new = asc_serial_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &asc_serial_pm_ops,
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index e93e3a9b4..d103b07d1 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -857,6 +857,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
unsigned int size;
+ irqreturn_t ret = IRQ_NONE;
sr = readl_relaxed(port->membase + ofs->isr);
@@ -865,11 +866,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
+ ret = IRQ_HANDLED;
}
- if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
+ ret = IRQ_HANDLED;
+ }
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
/* Clear wake up flag and disable wake up interrupt */
@@ -878,6 +882,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
+ ret = IRQ_HANDLED;
}
/*
@@ -892,6 +897,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
}
@@ -899,6 +905,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_port_lock(port);
stm32_usart_transmit_chars(port);
uart_port_unlock(port);
+ ret = IRQ_HANDLED;
}
/* Receiver timeout irq for DMA RX */
@@ -908,9 +915,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return ret;
}
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1080,6 +1088,7 @@ static int stm32_usart_startup(struct uart_port *port)
val |= USART_CR2_SWAP;
writel_relaxed(val, port->membase + ofs->cr2);
}
+ stm32_port->throttled = false;
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)
@@ -1820,7 +1829,7 @@ err_dma_rx:
return ret;
}
-static int stm32_usart_serial_remove(struct platform_device *pdev)
+static void stm32_usart_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1859,8 +1868,6 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
}
stm32_usart_deinit_port(stm32_port);
-
- return 0;
}
static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
@@ -2144,7 +2151,7 @@ static const struct dev_pm_ops stm32_serial_pm_ops = {
static struct platform_driver stm32_serial_driver = {
.probe = stm32_usart_serial_probe,
- .remove = stm32_usart_serial_remove,
+ .remove_new = stm32_usart_serial_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_serial_pm_ops,
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 5bfc0040f..8d612ab80 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -595,7 +595,7 @@ out_free_port:
return err;
}
-static int hv_remove(struct platform_device *dev)
+static void hv_remove(struct platform_device *dev)
{
struct uart_port *port = platform_get_drvdata(dev);
@@ -608,8 +608,6 @@ static int hv_remove(struct platform_device *dev)
kfree(con_write_page);
kfree(port);
sunhv_port = NULL;
-
- return 0;
}
static const struct of_device_id hv_match[] = {
@@ -630,7 +628,7 @@ static struct platform_driver hv_driver = {
.of_match_table = hv_match,
},
.probe = hv_probe,
- .remove = hv_remove,
+ .remove_new = hv_remove,
};
static int __init sunhv_init(void)
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 4251f4e1b..99f528581 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -662,13 +662,11 @@ static int sunplus_uart_probe(struct platform_device *pdev)
return ret;
}
-static int sunplus_uart_remove(struct platform_device *pdev)
+static void sunplus_uart_remove(struct platform_device *pdev)
{
struct sunplus_uart_port *sup = platform_get_drvdata(pdev);
uart_remove_one_port(&sunplus_uart_driver, &sup->port);
-
- return 0;
}
static int __maybe_unused sunplus_uart_suspend(struct device *dev)
@@ -703,7 +701,7 @@ MODULE_DEVICE_TABLE(of, sp_uart_of_match);
static struct platform_driver sunplus_uart_platform_driver = {
.probe = sunplus_uart_probe,
- .remove = sunplus_uart_remove,
+ .remove_new = sunplus_uart_remove,
.driver = {
.name = "sunplus_uart",
.of_match_table = sp_uart_of_match,
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 6aa51a6f8..1ea2f33a0 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -443,7 +443,7 @@ static void sunsab_start_tx(struct uart_port *port)
up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
writeb(up->interrupt_mask1, &up->regs->w.imr1);
-
+
if (!test_bit(SAB82532_XPR, &up->irqflags))
return;
@@ -549,7 +549,7 @@ static int sunsab_startup(struct uart_port *port)
(void) readb(&up->regs->r.isr1);
/*
- * Now, initialize the UART
+ * Now, initialize the UART
*/
writeb(0, &up->regs->w.ccr0); /* power-down */
writeb(SAB82532_CCR0_MCE | SAB82532_CCR0_SC_NRZ |
@@ -563,7 +563,7 @@ static int sunsab_startup(struct uart_port *port)
SAB82532_MODE_RAC);
writeb(up->cached_mode, &up->regs->w.mode);
writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc);
-
+
tmp = readb(&up->regs->rw.ccr0);
tmp |= SAB82532_CCR0_PU; /* power-up */
writeb(tmp, &up->regs->rw.ccr0);
@@ -607,7 +607,7 @@ static void sunsab_shutdown(struct uart_port *port)
up->cached_dafo &= ~SAB82532_DAFO_XBRK;
writeb(up->cached_dafo, &up->regs->rw.dafo);
- /* Disable Receiver */
+ /* Disable Receiver */
up->cached_mode &= ~SAB82532_MODE_RAC;
writeb(up->cached_mode, &up->regs->rw.mode);
@@ -622,7 +622,7 @@ static void sunsab_shutdown(struct uart_port *port)
* speed the chip was configured for when the port was open).
*/
#if 0
- /* Power Down */
+ /* Power Down */
tmp = readb(&up->regs->rw.ccr0);
tmp &= ~SAB82532_CCR0_PU;
writeb(tmp, &up->regs->rw.ccr0);
@@ -649,7 +649,7 @@ static void calc_ebrg(int baud, int *n_ret, int *m_ret)
*m_ret = 0;
return;
}
-
+
/*
* We scale numbers by 10 so that we get better accuracy
* without having to use floating point. Here we increment m
@@ -788,7 +788,7 @@ static const char *sunsab_type(struct uart_port *port)
{
struct uart_sunsab_port *up = (void *)port;
static char buf[36];
-
+
sprintf(buf, "SAB82532 %s", sab82532_version[up->type]);
return buf;
}
@@ -933,7 +933,7 @@ static int sunsab_console_setup(struct console *con, char *options)
sunsab_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
uart_port_unlock_irqrestore(&up->port, flags);
-
+
return 0;
}
@@ -1066,7 +1066,7 @@ out:
return err;
}
-static int sab_remove(struct platform_device *op)
+static void sab_remove(struct platform_device *op)
{
struct uart_sunsab_port *up = platform_get_drvdata(op);
@@ -1078,8 +1078,6 @@ static int sab_remove(struct platform_device *op)
of_iounmap(&op->resource[0],
up[0].port.membase,
sizeof(union sab82532_async_regs));
-
- return 0;
}
static const struct of_device_id sab_match[] = {
@@ -1100,7 +1098,7 @@ static struct platform_driver sab_driver = {
.of_match_table = sab_match,
},
.probe = sab_probe,
- .remove = sab_remove,
+ .remove_new = sab_remove,
};
static int __init sunsab_init(void)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 1e051cc25..c8b65f4b2 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1515,7 +1515,7 @@ out_unmap:
return err;
}
-static int su_remove(struct platform_device *op)
+static void su_remove(struct platform_device *op)
{
struct uart_sunsu_port *up = platform_get_drvdata(op);
bool kbdms = false;
@@ -1536,8 +1536,6 @@ static int su_remove(struct platform_device *op)
if (kbdms)
kfree(up);
-
- return 0;
}
static const struct of_device_id su_match[] = {
@@ -1565,7 +1563,7 @@ static struct platform_driver su_driver = {
.of_match_table = su_match,
},
.probe = su_probe,
- .remove = su_remove,
+ .remove_new = su_remove,
};
static int __init sunsu_init(void)
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index d3b5e864b..c99289c6c 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1513,7 +1513,7 @@ static void zs_remove_one(struct uart_sunzilog_port *up)
uart_remove_one_port(&sunzilog_reg, &up->port);
}
-static int zs_remove(struct platform_device *op)
+static void zs_remove(struct platform_device *op)
{
struct uart_sunzilog_port *up = platform_get_drvdata(op);
struct zilog_layout __iomem *regs;
@@ -1523,8 +1523,6 @@ static int zs_remove(struct platform_device *op)
regs = sunzilog_chip_regs[up[0].port.line / 2];
of_iounmap(&op->resource[0], regs, sizeof(struct zilog_layout));
-
- return 0;
}
static const struct of_device_id zs_match[] = {
@@ -1541,7 +1539,7 @@ static struct platform_driver zs_driver = {
.of_match_table = zs_match,
},
.probe = zs_probe,
- .remove = zs_remove,
+ .remove_new = zs_remove,
};
static int __init sunzilog_init(void)
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index 65069daf3..d9c78320e 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -266,7 +266,7 @@ free_tx:
return err;
}
-static int tegra_tcu_remove(struct platform_device *pdev)
+static void tegra_tcu_remove(struct platform_device *pdev)
{
struct tegra_tcu *tcu = platform_get_drvdata(pdev);
@@ -277,8 +277,6 @@ static int tegra_tcu_remove(struct platform_device *pdev)
uart_remove_one_port(&tcu->driver, &tcu->port);
uart_unregister_driver(&tcu->driver);
mbox_free_channel(tcu->tx);
-
- return 0;
}
static const struct of_device_id tegra_tcu_match[] = {
@@ -293,7 +291,7 @@ static struct platform_driver tegra_tcu_driver = {
.of_match_table = tegra_tcu_match,
},
.probe = tegra_tcu_probe,
- .remove = tegra_tcu_remove,
+ .remove_new = tegra_tcu_remove,
};
module_platform_driver(tegra_tcu_driver);
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 0cc6524f5..4bc89a9b3 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -473,7 +473,7 @@ err_mem:
return err;
}
-static int timbuart_remove(struct platform_device *dev)
+static void timbuart_remove(struct platform_device *dev)
{
struct timbuart_port *uart = platform_get_drvdata(dev);
@@ -481,8 +481,6 @@ static int timbuart_remove(struct platform_device *dev)
uart_remove_one_port(&timbuart_driver, &uart->port);
uart_unregister_driver(&timbuart_driver);
kfree(uart);
-
- return 0;
}
static struct platform_driver timbuart_platform_driver = {
@@ -490,7 +488,7 @@ static struct platform_driver timbuart_platform_driver = {
.name = "timb-uart",
},
.probe = timbuart_probe,
- .remove = timbuart_remove,
+ .remove_new = timbuart_remove,
};
module_platform_driver(timbuart_platform_driver);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 404c14aca..10ba41b7b 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -24,8 +24,13 @@
#include <linux/pm_runtime.h>
#define ULITE_NAME "ttyUL"
+#if CONFIG_SERIAL_UARTLITE_NR_UARTS > 4
+#define ULITE_MAJOR 0 /* use dynamic node allocation */
+#define ULITE_MINOR 0
+#else
#define ULITE_MAJOR 204
#define ULITE_MINOR 187
+#endif
#define ULITE_NR_UARTS CONFIG_SERIAL_UARTLITE_NR_UARTS
/* ---------------------------------------------------------------------
@@ -62,11 +67,11 @@ static struct uart_port *console_port;
#endif
/**
- * struct uartlite_data: Driver private data
- * reg_ops: Functions to read/write registers
- * clk: Our parent clock, if present
- * baud: The baud rate configured when this device was synthesized
- * cflags: The cflags for parity and data bits
+ * struct uartlite_data - Driver private data
+ * @reg_ops: Functions to read/write registers
+ * @clk: Our parent clock, if present
+ * @baud: The baud rate configured when this device was synthesized
+ * @cflags: The cflags for parity and data bits
*/
struct uartlite_data {
const struct uartlite_reg_ops *reg_ops;
@@ -890,7 +895,7 @@ of_err:
return ret;
}
-static int ulite_remove(struct platform_device *pdev)
+static void ulite_remove(struct platform_device *pdev)
{
struct uart_port *port = dev_get_drvdata(&pdev->dev);
struct uartlite_data *pdata = port->private_data;
@@ -900,7 +905,6 @@ static int ulite_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- return 0;
}
/* work with hotplug and coldplug */
@@ -908,7 +912,7 @@ MODULE_ALIAS("platform:uartlite");
static struct platform_driver ulite_platform_driver = {
.probe = ulite_probe,
- .remove = ulite_remove,
+ .remove_new = ulite_remove,
.driver = {
.name = "uartlite",
.of_match_table = of_match_ptr(ulite_of_match),
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index ed7a6bb55..397b95dff 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -189,10 +189,10 @@ struct uart_qe_port {
u16 tx_fifosize;
int wait_closing;
u32 flags;
- struct qe_bd *rx_bd_base;
- struct qe_bd *rx_cur;
- struct qe_bd *tx_bd_base;
- struct qe_bd *tx_cur;
+ struct qe_bd __iomem *rx_bd_base;
+ struct qe_bd __iomem *rx_cur;
+ struct qe_bd __iomem *tx_bd_base;
+ struct qe_bd __iomem *tx_cur;
unsigned char *tx_buf;
unsigned char *rx_buf;
void *bd_virt; /* virtual address of the BD buffers */
@@ -258,7 +258,7 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
- struct qe_bd *bdp = qe_port->tx_bd_base;
+ struct qe_bd __iomem *bdp = qe_port->tx_bd_base;
while (1) {
if (ioread16be(&bdp->status) & BD_SC_READY)
@@ -330,7 +330,7 @@ static void qe_uart_stop_tx(struct uart_port *port)
*/
static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
{
- struct qe_bd *bdp;
+ struct qe_bd __iomem *bdp;
unsigned char *p;
unsigned int count;
struct uart_port *port = &qe_port->port;
@@ -341,7 +341,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
/* Pick next descriptor and fill from buffer */
bdp = qe_port->tx_cur;
- p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
+ p = qe2cpu_addr(ioread32be(&bdp->buf), qe_port);
*p++ = port->x_char;
iowrite16be(1, &bdp->length);
@@ -368,7 +368,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
while (!(ioread16be(&bdp->status) & BD_SC_READY) && !uart_circ_empty(xmit)) {
count = 0;
- p = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
+ p = qe2cpu_addr(ioread32be(&bdp->buf), qe_port);
while (count < qe_port->tx_fifosize) {
*p++ = xmit->buf[xmit->tail];
uart_xmit_advance(port, 1);
@@ -460,7 +460,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
unsigned char ch, *cp;
struct uart_port *port = &qe_port->port;
struct tty_port *tport = &port->state->port;
- struct qe_bd *bdp;
+ struct qe_bd __iomem *bdp;
u16 status;
unsigned int flg;
@@ -487,7 +487,7 @@ static void qe_uart_int_rx(struct uart_qe_port *qe_port)
}
/* get pointer */
- cp = qe2cpu_addr(be32_to_cpu(bdp->buf), qe_port);
+ cp = qe2cpu_addr(ioread32be(&bdp->buf), qe_port);
/* loop through the buffer */
while (i-- > 0) {
@@ -590,7 +590,7 @@ static void qe_uart_initbd(struct uart_qe_port *qe_port)
{
int i;
void *bd_virt;
- struct qe_bd *bdp;
+ struct qe_bd __iomem *bdp;
/* Set the physical address of the host memory buffers in the buffer
* descriptors, and the virtual address for us to work with.
@@ -648,7 +648,7 @@ static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
{
u32 cecr_subblock;
struct ucc_slow __iomem *uccp = qe_port->uccp;
- struct ucc_uart_pram *uccup = qe_port->uccup;
+ struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
unsigned int i;
@@ -983,7 +983,7 @@ static int qe_uart_request_port(struct uart_port *port)
qe_port->us_private = uccs;
qe_port->uccp = uccs->us_regs;
- qe_port->uccup = (struct ucc_uart_pram *) uccs->us_pram;
+ qe_port->uccup = (struct ucc_uart_pram __iomem *)uccs->us_pram;
qe_port->rx_bd_base = uccs->rx_bd;
qe_port->tx_bd_base = uccs->tx_bd;
@@ -1156,7 +1156,7 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
firmware = (struct qe_firmware *) fw->data;
- if (firmware->header.length != fw->size) {
+ if (be32_to_cpu(firmware->header.length) != fw->size) {
dev_err(dev, "invalid firmware\n");
goto out;
}
@@ -1459,7 +1459,7 @@ out_free:
return ret;
}
-static int ucc_uart_remove(struct platform_device *ofdev)
+static void ucc_uart_remove(struct platform_device *ofdev)
{
struct uart_qe_port *qe_port = platform_get_drvdata(ofdev);
@@ -1470,8 +1470,6 @@ static int ucc_uart_remove(struct platform_device *ofdev)
of_node_put(qe_port->np);
kfree(qe_port);
-
- return 0;
}
static const struct of_device_id ucc_uart_match[] = {
@@ -1492,7 +1490,7 @@ static struct platform_driver ucc_uart_of_driver = {
.of_match_table = ucc_uart_match,
},
.probe = ucc_uart_probe,
- .remove = ucc_uart_remove,
+ .remove_new = ucc_uart_remove,
};
static int __init ucc_uart_init(void)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 66a45a634..920762d7b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1663,10 +1663,8 @@ err_out_unregister_driver:
/**
* cdns_uart_remove - called when the platform driver is unregistered
* @pdev: Pointer to the platform device structure
- *
- * Return: 0 on success, negative errno otherwise
*/
-static int cdns_uart_remove(struct platform_device *pdev)
+static void cdns_uart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct cdns_uart *cdns_uart_data = port->private_data;
@@ -1692,12 +1690,11 @@ static int cdns_uart_remove(struct platform_device *pdev)
if (!--instances)
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
- return 0;
}
static struct platform_driver cdns_uart_platform_driver = {
.probe = cdns_uart_probe,
- .remove = cdns_uart_remove,
+ .remove_new = cdns_uart_remove,
.driver = {
.name = CDNS_UART_NAME,
.of_match_table = cdns_uart_of_match,
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 6b4a28bcf..02217e3c9 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -1150,16 +1150,29 @@ EXPORT_SYMBOL(unregister_sysrq_key);
#ifdef CONFIG_PROC_FS
/*
* writing 'C' to /proc/sysrq-trigger is like sysrq-C
+ * Normally, only the first character written is processed.
+ * However, if the first character is an underscore,
+ * all characters are processed.
*/
static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (count) {
+ bool bulk = false;
+ size_t i;
+
+ for (i = 0; i < count; i++) {
char c;
- if (get_user(c, buf))
+ if (get_user(c, buf + i))
return -EFAULT;
- __handle_sysrq(c, false);
+
+ if (c == '_')
+ bulk = true;
+ else
+ __handle_sysrq(c, false);
+
+ if (!bulk)
+ break;
}
return count;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 96617f9af..407b0d87b 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -852,9 +852,9 @@ static ssize_t iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
{
void *cookie = NULL;
unsigned long offset = 0;
- char kernel_buf[64];
ssize_t retval = 0;
size_t copied, count = iov_iter_count(to);
+ u8 kernel_buf[64];
do {
ssize_t size = min(count, sizeof(kernel_buf));
@@ -995,7 +995,7 @@ static ssize_t iterate_tty_write(struct tty_ldisc *ld, struct tty_struct *tty,
/* write_buf/write_cnt is protected by the atomic_write_lock mutex */
if (tty->write_cnt < chunk) {
- unsigned char *buf_chunk;
+ u8 *buf_chunk;
if (chunk < 1024)
chunk = 1024;
@@ -1047,6 +1047,7 @@ out:
return ret;
}
+#ifdef CONFIG_PRINT_QUOTA_WARNING
/**
* tty_write_message - write a message to a certain tty, not just the console.
* @tty: the destination tty_struct
@@ -1057,6 +1058,8 @@ out:
* needed.
*
* We must still hold the BTM and test the CLOSING flag for the moment.
+ *
+ * This function is DEPRECATED, do not use in new code.
*/
void tty_write_message(struct tty_struct *tty, char *msg)
{
@@ -1069,6 +1072,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
tty_write_unlock(tty);
}
}
+#endif
static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
{
@@ -1145,7 +1149,7 @@ ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
*
* Locking: none for xchar method, write ordering for write method.
*/
-int tty_send_xchar(struct tty_struct *tty, char ch)
+int tty_send_xchar(struct tty_struct *tty, u8 ch)
{
bool was_stopped = tty->flow.stopped;
@@ -2274,10 +2278,10 @@ static bool tty_legacy_tiocsti __read_mostly = IS_ENABLED(CONFIG_LEGACY_TIOCSTI)
* * Called functions take tty_ldiscs_lock
* * current->signal->tty check is safe without locks
*/
-static int tiocsti(struct tty_struct *tty, char __user *p)
+static int tiocsti(struct tty_struct *tty, u8 __user *p)
{
- char ch, mbz = 0;
struct tty_ldisc *ld;
+ u8 ch;
if (!tty_legacy_tiocsti && !capable(CAP_SYS_ADMIN))
return -EIO;
@@ -2292,7 +2296,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
return -EIO;
tty_buffer_lock_exclusive(tty->port);
if (ld->ops->receive_buf)
- ld->ops->receive_buf(tty, &ch, &mbz, 1);
+ ld->ops->receive_buf(tty, &ch, NULL, 1);
tty_buffer_unlock_exclusive(tty->port);
tty_ldisc_deref(ld);
return 0;
@@ -2502,6 +2506,24 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
}
/**
+ * tty_get_tiocm - get tiocm status register
+ * @tty: tty device
+ *
+ * Obtain the modem status bits from the tty driver if the feature
+ * is supported.
+ */
+int tty_get_tiocm(struct tty_struct *tty)
+{
+ int retval = -ENOTTY;
+
+ if (tty->ops->tiocmget)
+ retval = tty->ops->tiocmget(tty);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(tty_get_tiocm);
+
+/**
* tty_tiocmget - get modem status
* @tty: tty device
* @p: pointer to result
@@ -2513,14 +2535,12 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
*/
static int tty_tiocmget(struct tty_struct *tty, int __user *p)
{
- int retval = -ENOTTY;
+ int retval;
- if (tty->ops->tiocmget) {
- retval = tty->ops->tiocmget(tty);
+ retval = tty_get_tiocm(tty);
+ if (retval >= 0)
+ retval = put_user(retval, p);
- if (retval >= 0)
- retval = put_user(retval, p);
- }
return retval;
}
@@ -3141,7 +3161,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
*
* Return: the number of characters successfully output.
*/
-int tty_put_char(struct tty_struct *tty, unsigned char ch)
+int tty_put_char(struct tty_struct *tty, u8 ch)
{
if (tty->ops->put_char)
return tty->ops->put_char(tty, ch);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 63c125250..14cca33d2 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -171,7 +171,8 @@ EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
* @port: tty_port of the device
* @driver: tty_driver for this device
* @index: index of the tty
- * @device: parent if exists, otherwise NULL
+ * @host: serial port hardware device
+ * @parent: parent if exists, otherwise NULL
* @drvdata: driver data for the device
* @attr_grp: attribute group for the device
*
@@ -180,20 +181,20 @@ EXPORT_SYMBOL_GPL(tty_port_register_device_attr);
*/
struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
- struct device *device, void *drvdata,
+ struct device *host, struct device *parent, void *drvdata,
const struct attribute_group **attr_grp)
{
struct device *dev;
tty_port_link_device(port, driver, index);
- dev = serdev_tty_port_register(port, device, driver, index);
+ dev = serdev_tty_port_register(port, host, parent, driver, index);
if (PTR_ERR(dev) != -ENODEV) {
/* Skip creating cdev if we registered a serdev device */
return dev;
}
- return tty_register_device_attr(driver, index, device, drvdata,
+ return tty_register_device_attr(driver, index, parent, drvdata,
attr_grp);
}
EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
@@ -203,17 +204,18 @@ EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev);
* @port: tty_port of the device
* @driver: tty_driver for this device
* @index: index of the tty
- * @device: parent if exists, otherwise NULL
+ * @host: serial port hardware controller device
+ * @parent: parent if exists, otherwise NULL
*
* Register a serdev or tty device depending on if the parent device has any
* defined serdev clients or not.
*/
struct device *tty_port_register_device_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
- struct device *device)
+ struct device *host, struct device *parent)
{
return tty_port_register_device_attr_serdev(port, driver, index,
- device, NULL, NULL);
+ host, parent, NULL, NULL);
}
EXPORT_SYMBOL_GPL(tty_port_register_device_serdev);
@@ -245,7 +247,7 @@ int tty_port_alloc_xmit_buf(struct tty_port *port)
/* We may sleep in get_zeroed_page() */
mutex_lock(&port->buf_mutex);
if (port->xmit_buf == NULL) {
- port->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
+ port->xmit_buf = (u8 *)get_zeroed_page(GFP_KERNEL);
if (port->xmit_buf)
kfifo_init(&port->xmit_fifo, port->xmit_buf, PAGE_SIZE);
}
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 5e39a4f43..82d70083f 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -644,7 +644,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
if (!ct)
return 0;
- unilist = vmemdup_user(list, array_size(sizeof(*unilist), ct));
+ unilist = vmemdup_array_user(list, ct, sizeof(*unilist));
if (IS_ERR(unilist))
return PTR_ERR(unilist);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 12a192e11..a2116e135 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1772,12 +1772,10 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
return -EINVAL;
if (ct) {
-
- dia = memdup_user(a->kbdiacr,
- sizeof(struct kbdiacr) * ct);
+ dia = memdup_array_user(a->kbdiacr,
+ ct, sizeof(struct kbdiacr));
if (IS_ERR(dia))
return PTR_ERR(dia);
-
}
spin_lock_irqsave(&kbd_event_lock, flags);
@@ -1811,8 +1809,8 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
return -EINVAL;
if (ct) {
- buf = memdup_user(a->kbdiacruc,
- ct * sizeof(struct kbdiacruc));
+ buf = memdup_array_user(a->kbdiacruc,
+ ct, sizeof(struct kbdiacruc));
if (IS_ERR(buf))
return PTR_ERR(buf);
}
diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c
index 5b1184aac..169540417 100644
--- a/drivers/ufs/core/ufs-fault-injection.c
+++ b/drivers/ufs/core/ufs-fault-injection.c
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/fault-inject.h>
#include <linux/module.h>
+#include <ufs/ufshcd.h>
#include "ufs-fault-injection.h"
static int ufs_fault_get(char *buffer, const struct kernel_param *kp);
@@ -59,12 +60,22 @@ static int ufs_fault_set(const char *val, const struct kernel_param *kp)
return 0;
}
-bool ufs_trigger_eh(void)
+void ufs_fault_inject_hba_init(struct ufs_hba *hba)
{
- return should_fail(&ufs_trigger_eh_attr, 1);
+ hba->trigger_eh_attr = ufs_trigger_eh_attr;
+ hba->timeout_attr = ufs_timeout_attr;
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+ fault_create_debugfs_attr("trigger_eh_inject", hba->debugfs_root, &hba->trigger_eh_attr);
+ fault_create_debugfs_attr("timeout_inject", hba->debugfs_root, &hba->timeout_attr);
+#endif
}
-bool ufs_fail_completion(void)
+bool ufs_trigger_eh(struct ufs_hba *hba)
{
- return should_fail(&ufs_timeout_attr, 1);
+ return should_fail(&hba->trigger_eh_attr, 1);
+}
+
+bool ufs_fail_completion(struct ufs_hba *hba)
+{
+ return should_fail(&hba->timeout_attr, 1);
}
diff --git a/drivers/ufs/core/ufs-fault-injection.h b/drivers/ufs/core/ufs-fault-injection.h
index 6d0cd8e10..996a35769 100644
--- a/drivers/ufs/core/ufs-fault-injection.h
+++ b/drivers/ufs/core/ufs-fault-injection.h
@@ -7,15 +7,20 @@
#include <linux/types.h>
#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
-bool ufs_trigger_eh(void);
-bool ufs_fail_completion(void);
+void ufs_fault_inject_hba_init(struct ufs_hba *hba);
+bool ufs_trigger_eh(struct ufs_hba *hba);
+bool ufs_fail_completion(struct ufs_hba *hba);
#else
-static inline bool ufs_trigger_eh(void)
+static inline void ufs_fault_inject_hba_init(struct ufs_hba *hba)
+{
+}
+
+static inline bool ufs_trigger_eh(struct ufs_hba *hba)
{
return false;
}
-static inline bool ufs_fail_completion(void)
+static inline bool ufs_fail_completion(struct ufs_hba *hba)
{
return false;
}
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index c95906443..e6d12289e 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -7,9 +7,56 @@
#include <asm/unaligned.h>
#include <ufs/ufs.h>
+#include <ufs/unipro.h>
#include "ufs-sysfs.h"
#include "ufshcd-priv.h"
+static const char *ufs_pa_pwr_mode_to_string(enum ufs_pa_pwr_mode mode)
+{
+ switch (mode) {
+ case FAST_MODE: return "FAST_MODE";
+ case SLOW_MODE: return "SLOW_MODE";
+ case FASTAUTO_MODE: return "FASTAUTO_MODE";
+ case SLOWAUTO_MODE: return "SLOWAUTO_MODE";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *ufs_hs_gear_rate_to_string(enum ufs_hs_gear_rate rate)
+{
+ switch (rate) {
+ case PA_HS_MODE_A: return "HS_RATE_A";
+ case PA_HS_MODE_B: return "HS_RATE_B";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *ufs_pwm_gear_to_string(enum ufs_pwm_gear_tag gear)
+{
+ switch (gear) {
+ case UFS_PWM_G1: return "PWM_GEAR1";
+ case UFS_PWM_G2: return "PWM_GEAR2";
+ case UFS_PWM_G3: return "PWM_GEAR3";
+ case UFS_PWM_G4: return "PWM_GEAR4";
+ case UFS_PWM_G5: return "PWM_GEAR5";
+ case UFS_PWM_G6: return "PWM_GEAR6";
+ case UFS_PWM_G7: return "PWM_GEAR7";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
+{
+ switch (gear) {
+ case UFS_HS_G1: return "HS_GEAR1";
+ case UFS_HS_G2: return "HS_GEAR2";
+ case UFS_HS_G3: return "HS_GEAR3";
+ case UFS_HS_G4: return "HS_GEAR4";
+ case UFS_HS_G5: return "HS_GEAR5";
+ default: return "UNKNOWN";
+ }
+}
+
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
@@ -255,6 +302,35 @@ out:
return res < 0 ? res : count;
}
+static ssize_t rtc_update_ms_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->dev_info.rtc_update_period);
+}
+
+static ssize_t rtc_update_ms_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int ms;
+ bool resume_period_update = false;
+
+ if (kstrtouint(buf, 0, &ms))
+ return -EINVAL;
+
+ if (!hba->dev_info.rtc_update_period && ms > 0)
+ resume_period_update = true;
+ /* Minimum and maximum update frequency should be synchronized with all UFS vendors */
+ hba->dev_info.rtc_update_period = ms;
+
+ if (resume_period_update)
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(hba->dev_info.rtc_update_period));
+ return count;
+}
+
static ssize_t enable_wb_buf_flush_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -339,6 +415,7 @@ static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
+static DEVICE_ATTR_RW(rtc_update_ms);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -351,6 +428,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_on.attr,
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
+ &dev_attr_rtc_update_ms.attr,
NULL
};
@@ -628,6 +706,78 @@ static const struct attribute_group ufs_sysfs_monitor_group = {
.attrs = ufs_sysfs_monitor_attrs,
};
+static ssize_t lane_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", hba->pwr_info.lane_rx);
+}
+
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ufs_pa_pwr_mode_to_string(hba->pwr_info.pwr_rx));
+}
+
+static ssize_t rate_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ufs_hs_gear_rate_to_string(hba->pwr_info.hs_rate));
+}
+
+static ssize_t gear_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", hba->pwr_info.hs_rate ?
+ ufs_hs_gear_to_string(hba->pwr_info.gear_rx) :
+ ufs_pwm_gear_to_string(hba->pwr_info.gear_rx));
+}
+
+static ssize_t dev_pm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(hba->curr_dev_pwr_mode));
+}
+
+static ssize_t link_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(hba->uic_link_state));
+}
+
+static DEVICE_ATTR_RO(lane);
+static DEVICE_ATTR_RO(mode);
+static DEVICE_ATTR_RO(rate);
+static DEVICE_ATTR_RO(gear);
+static DEVICE_ATTR_RO(dev_pm);
+static DEVICE_ATTR_RO(link_state);
+
+static struct attribute *ufs_power_info_attrs[] = {
+ &dev_attr_lane.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_rate.attr,
+ &dev_attr_gear.attr,
+ &dev_attr_dev_pm.attr,
+ &dev_attr_link_state.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_power_info_group = {
+ .name = "power_info",
+ .attrs = ufs_power_info_attrs,
+};
+
static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
u8 desc_index,
@@ -1233,6 +1383,7 @@ static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
&ufs_sysfs_monitor_group,
+ &ufs_sysfs_power_info_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
&ufs_sysfs_geometry_descriptor_group,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index d2d760143..3b89c9d4a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -99,6 +99,9 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+/* Default RTC update every 10 seconds */
+#define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
+
/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
@@ -235,6 +238,12 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
return UFS_PM_LVL_0;
}
+static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
+{
+ return (hba->clk_gating.active_reqs || hba->outstanding_reqs || hba->outstanding_tasks ||
+ hba->active_uic_cmd || hba->uic_async_done);
+}
+
static const struct ufs_dev_quirk ufs_fixups[] = {
/* UFS cards deviations table */
{ .wmanufacturerid = UFS_VENDOR_MICRON,
@@ -289,21 +298,23 @@ static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
-static inline void ufshcd_enable_irq(struct ufs_hba *hba)
+void ufshcd_enable_irq(struct ufs_hba *hba)
{
if (!hba->is_irq_enabled) {
enable_irq(hba->irq);
hba->is_irq_enabled = true;
}
}
+EXPORT_SYMBOL_GPL(ufshcd_enable_irq);
-static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+void ufshcd_disable_irq(struct ufs_hba *hba)
{
if (hba->is_irq_enabled) {
disable_irq(hba->irq);
hba->is_irq_enabled = false;
}
}
+EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
static void ufshcd_configure_wb(struct ufs_hba *hba)
{
@@ -677,6 +688,8 @@ static void ufshcd_device_reset(struct ufs_hba *hba)
hba->dev_info.wb_enabled = false;
hba->dev_info.wb_buf_flush_enabled = false;
}
+ if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
+ hba->dev_info.rtc_time_baseline = 0;
}
if (err != -EOPNOTSUPP)
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
@@ -1917,10 +1930,7 @@ static void ufshcd_gate_work(struct work_struct *work)
goto rel_lock;
}
- if (hba->clk_gating.active_reqs
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->outstanding_reqs || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done)
+ if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
goto rel_lock;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -2721,6 +2731,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
.command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
};
+ WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
+
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
@@ -2993,7 +3005,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_send_command(hba, tag, hwq);
out:
- if (ufs_trigger_eh()) {
+ if (ufs_trigger_eh(hba)) {
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4404,40 +4416,32 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
-void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+static void ufshcd_configure_auto_hibern8(struct ufs_hba *hba)
{
- unsigned long flags;
- bool update = false;
-
if (!ufshcd_is_auto_hibern8_supported(hba))
return;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit != ahit) {
- hba->ahit = ahit;
- update = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+}
+
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+{
+ const u32 cur_ahit = READ_ONCE(hba->ahit);
- if (update &&
- !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
+ if (!ufshcd_is_auto_hibern8_supported(hba) || cur_ahit == ahit)
+ return;
+
+ WRITE_ONCE(hba->ahit, ahit);
+ if (!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba);
- ufshcd_auto_hibern8_enable(hba);
+ ufshcd_configure_auto_hibern8(hba);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
}
}
EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
-{
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return;
-
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-}
-
/**
* ufshcd_init_pwr_info - setting the POR (power on reset)
* values in hba power info
@@ -5652,7 +5656,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
- if (ufs_fail_completion())
+ if (ufs_fail_completion(hba))
return IRQ_HANDLED;
/*
@@ -8200,6 +8204,79 @@ static void ufs_fixup_device_setup(struct ufs_hba *hba)
ufshcd_vops_fixup_dev_quirks(hba);
}
+static void ufshcd_update_rtc(struct ufs_hba *hba)
+{
+ struct timespec64 ts64;
+ int err;
+ u32 val;
+
+ ktime_get_real_ts64(&ts64);
+
+ if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) {
+ dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__);
+ return;
+ }
+
+ /*
+ * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
+ * 2146 is required, it is recommended to choose the relative RTC mode.
+ */
+ val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
+
+ ufshcd_rpm_get_sync(hba);
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
+ 0, 0, &val);
+ ufshcd_rpm_put_sync(hba);
+
+ if (err)
+ dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
+ else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
+ hba->dev_info.rtc_time_baseline = ts64.tv_sec;
+}
+
+static void ufshcd_rtc_work(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+
+ hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
+
+ /* Update RTC only when there are no requests in progress and UFSHCI is operational */
+ if (!ufshcd_is_ufs_dev_busy(hba) && hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL)
+ ufshcd_update_rtc(hba);
+
+ if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(hba->dev_info.rtc_update_period));
+}
+
+static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u16 periodic_rtc_update = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_FRQ_RTC]);
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (periodic_rtc_update & UFS_RTC_TIME_BASELINE) {
+ dev_info->rtc_type = UFS_RTC_ABSOLUTE;
+
+ /*
+ * The concept of measuring time in Linux as the number of seconds elapsed since
+ * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
+ * 2010 00:00, here we need to adjust ABS baseline.
+ */
+ dev_info->rtc_time_baseline = mktime64(2010, 1, 1, 0, 0, 0) -
+ mktime64(1970, 1, 1, 0, 0, 0);
+ } else {
+ dev_info->rtc_type = UFS_RTC_RELATIVE;
+ dev_info->rtc_time_baseline = 0;
+ }
+
+ /*
+ * We ignore TIME_PERIOD defined in wPeriodicRTCUpdate because Spec does not clearly state
+ * how to calculate the specific update period for each time unit. And we disable periodic
+ * RTC update work, let user configure by sysfs node according to specific circumstance.
+ */
+ dev_info->rtc_update_period = 0;
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
@@ -8252,6 +8329,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ ufs_init_rtc(hba, desc_buf);
+
if (hba->ext_iid_sup)
ufshcd_ext_iid_probe(hba, desc_buf);
@@ -8804,6 +8883,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_force_reset_auto_bkops(hba);
ufshcd_set_timestamp_attr(hba);
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
@@ -8878,8 +8959,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
+ ufshcd_configure_auto_hibern8(hba);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -9359,6 +9439,7 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
goto out_disable_vreg;
ufs_debugfs_hba_init(hba);
+ ufs_fault_inject_hba_init(hba);
hba->is_powered = true;
goto out;
@@ -9757,6 +9838,8 @@ vops_suspend:
ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
if (ret)
goto set_link_active;
+
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
goto out;
set_link_active:
@@ -9851,6 +9934,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_old_link_state;
ufshcd_set_timestamp_attr(hba);
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
}
if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
@@ -9873,8 +9958,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
+ ufshcd_configure_auto_hibern8(hba);
goto out;
@@ -10547,8 +10631,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
- INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
- ufshcd_rpm_dev_flush_recheck_work);
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
+ INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
/* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 71bd6dbc0..734d40f99 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -765,7 +765,7 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
- struct ufs_dev_params ufs_exynos_cap;
+ struct ufs_host_params host_params;
int ret;
if (!dev_req_params) {
@@ -774,10 +774,9 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
goto out;
}
- ufshcd_init_pwr_dev_param(&ufs_exynos_cap);
+ ufshcd_init_host_params(&host_params);
- ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap,
- dev_max_params, dev_req_params);
+ ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_err("%s: failed to determine capabilities\n", __func__);
goto out;
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 0229ac0a8..5ee73ff05 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -293,9 +293,9 @@ static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
return err;
}
-static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
+static void ufs_hisi_set_dev_cap(struct ufs_host_params *host_params)
{
- ufshcd_init_pwr_dev_param(hisi_param);
+ ufshcd_init_host_params(host_params);
}
static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
@@ -365,7 +365,7 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
- struct ufs_dev_params ufs_hisi_cap;
+ struct ufs_host_params host_params;
int ret = 0;
if (!dev_req_params) {
@@ -377,9 +377,8 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- ufs_hisi_set_dev_cap(&ufs_hisi_cap);
- ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
- dev_max_params, dev_req_params);
+ ufs_hisi_set_dev_cap(&host_params);
+ ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
dev_err(hba->dev,
"%s: failed to determine capabilities\n", __func__);
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index fc61790d2..776bca4f7 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -996,16 +996,14 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct ufs_dev_params host_cap;
+ struct ufs_host_params host_params;
int ret;
- ufshcd_init_pwr_dev_param(&host_cap);
- host_cap.hs_rx_gear = UFS_HS_G5;
- host_cap.hs_tx_gear = UFS_HS_G5;
+ ufshcd_init_host_params(&host_params);
+ host_params.hs_rx_gear = UFS_HS_G5;
+ host_params.hs_tx_gear = UFS_HS_G5;
- ret = ufshcd_get_pwr_dev_param(&host_cap,
- dev_max_params,
- dev_req_params);
+ ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_info("%s: failed to determine capabilities\n",
__func__);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 0d3886455..bcbcf7589 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -4,26 +4,26 @@
*/
#include <linux/acpi.h>
-#include <linux/time.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/devfreq.h>
+#include <linux/gpio/consumer.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
#include <linux/reset-controller.h>
-#include <linux/devfreq.h>
+#include <linux/time.h>
#include <soc/qcom/ice.h>
#include <ufs/ufshcd.h>
-#include "ufshcd-pltfrm.h"
-#include <ufs/unipro.h>
-#include "ufs-qcom.h"
#include <ufs/ufshci.h>
#include <ufs/ufs_quirks.h>
+#include <ufs/unipro.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-qcom.h"
#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
#define MCQ_QCFGPTR_UNIT 0x200
@@ -47,7 +47,7 @@ enum {
TSTBUS_MAX,
};
-#define QCOM_UFS_MAX_GEAR 4
+#define QCOM_UFS_MAX_GEAR 5
#define QCOM_UFS_MAX_LANE 2
enum {
@@ -67,31 +67,35 @@ static const struct __ufs_qcom_bw_table {
[MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { 14752, 1000 },
[MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
+ [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { 29504, 1000 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { 5836800, 409600 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { 5836800, 819200 },
[MODE_MAX][0][0] = { 7643136, 307200 },
};
-static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
-
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
@@ -194,52 +198,12 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
}
#endif
-static int ufs_qcom_host_clk_get(struct device *dev,
- const char *name, struct clk **clk_out, bool optional)
-{
- struct clk *clk;
- int err = 0;
-
- clk = devm_clk_get(dev, name);
- if (!IS_ERR(clk)) {
- *clk_out = clk;
- return 0;
- }
-
- err = PTR_ERR(clk);
-
- if (optional && err == -ENOENT) {
- *clk_out = NULL;
- return 0;
- }
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get %s err %d\n", name, err);
-
- return err;
-}
-
-static int ufs_qcom_host_clk_enable(struct device *dev,
- const char *name, struct clk *clk)
-{
- int err = 0;
-
- err = clk_prepare_enable(clk);
- if (err)
- dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
-
- return err;
-}
-
static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
{
if (!host->is_lane_clks_enabled)
return;
- clk_disable_unprepare(host->tx_l1_sync_clk);
- clk_disable_unprepare(host->tx_l0_sync_clk);
- clk_disable_unprepare(host->rx_l1_sync_clk);
- clk_disable_unprepare(host->rx_l0_sync_clk);
+ clk_bulk_disable_unprepare(host->num_clks, host->clks);
host->is_lane_clks_enabled = false;
}
@@ -247,73 +211,29 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
int err;
- struct device *dev = host->hba->dev;
-
- if (host->is_lane_clks_enabled)
- return 0;
- err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
- host->rx_l0_sync_clk);
+ err = clk_bulk_prepare_enable(host->num_clks, host->clks);
if (err)
return err;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
- host->tx_l0_sync_clk);
- if (err)
- goto disable_rx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
- host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
- host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
-
host->is_lane_clks_enabled = true;
return 0;
-
-disable_rx_l1:
- clk_disable_unprepare(host->rx_l1_sync_clk);
-disable_tx_l0:
- clk_disable_unprepare(host->tx_l0_sync_clk);
-disable_rx_l0:
- clk_disable_unprepare(host->rx_l0_sync_clk);
-
- return err;
}
static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
{
- int err = 0;
+ int err;
struct device *dev = host->hba->dev;
if (has_acpi_companion(dev))
return 0;
- err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
- &host->rx_l0_sync_clk, false);
- if (err)
+ err = devm_clk_bulk_get_all(dev, &host->clks);
+ if (err <= 0)
return err;
- err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
- &host->tx_l0_sync_clk, false);
- if (err)
- return err;
-
- /* In case of single lane per direction, don't read lane1 clocks */
- if (host->hba->lanes_per_direction > 1) {
- err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk, false);
- if (err)
- return err;
-
- err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk, true);
- }
+ host->num_clks = err;
return 0;
}
@@ -321,7 +241,7 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
{
int err;
- u32 tx_fsm_val = 0;
+ u32 tx_fsm_val;
unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
do {
@@ -360,9 +280,7 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
{
- ufshcd_rmwl(host->hba, QUNIPRO_SEL,
- ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
- REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, QUNIPRO_SEL, QUNIPRO_SEL, REG_UFS_CFG1);
if (host->hw_ver.major >= 0x05)
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
@@ -376,18 +294,15 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
*/
static int ufs_qcom_host_reset(struct ufs_hba *hba)
{
- int ret = 0;
+ int ret;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- bool reenable_intr = false;
+ bool reenable_intr;
- if (!host->core_reset) {
- dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+ if (!host->core_reset)
return 0;
- }
reenable_intr = hba->is_irq_enabled;
- disable_irq(hba->irq);
- hba->is_irq_enabled = false;
+ ufshcd_disable_irq(hba);
ret = reset_control_assert(host->core_reset);
if (ret) {
@@ -404,16 +319,16 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
usleep_range(200, 210);
ret = reset_control_deassert(host->core_reset);
- if (ret)
+ if (ret) {
dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
__func__, ret);
+ return ret;
+ }
usleep_range(1000, 1100);
- if (reenable_intr) {
- enable_irq(hba->irq);
- hba->is_irq_enabled = true;
- }
+ if (reenable_intr)
+ ufshcd_enable_irq(hba);
return 0;
}
@@ -422,18 +337,8 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- return UFS_HS_G2;
- } else if (host->hw_ver.major >= 0x4) {
+ if (host->hw_ver.major >= 0x4)
return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
- }
/* Default is HS-G3 */
return UFS_HS_G3;
@@ -442,14 +347,29 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_host_params *host_params = &host->host_params;
struct phy *phy = host->generic_phy;
+ enum phy_mode mode;
int ret;
+ /*
+ * HW ver 5 can only support up to HS-G5 Rate-A due to HW limitations.
+ * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A,
+ * so that the subsequent power mode change shall stick to Rate-A.
+ */
+ if (host->hw_ver.major == 0x5) {
+ if (host->phy_gear == UFS_HS_G5)
+ host_params->hs_rate = PA_HS_MODE_A;
+ else
+ host_params->hs_rate = PA_HS_MODE_B;
+ }
+
+ mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A;
+
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_host_reset(hba);
if (ret)
- dev_warn(hba->dev, "%s: host reset returned %d\n",
- __func__, ret);
+ return ret;
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
@@ -459,7 +379,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
return ret;
}
- phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->phy_gear);
+ ret = phy_set_mode_ext(phy, mode, host->phy_gear);
+ if (ret)
+ goto out_disable_phy;
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
@@ -489,9 +411,8 @@ out_disable_phy:
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
- ufshcd_writel(hba,
- ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
- REG_UFS_CFG2);
+ ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL,
+ REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
mb();
@@ -501,11 +422,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
+ int err;
switch (status) {
case PRE_CHANGE:
- ufs_qcom_power_up_sequence(hba);
+ err = ufs_qcom_power_up_sequence(hba);
+ if (err)
+ return err;
+
/*
* The PHY PLL output is the source of tx/rx lane symbol
* clocks, hence, enable the lane clocks only after PHY
@@ -544,41 +468,16 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
- u32 core_clk_period_in_ns;
- u32 tx_clk_cycles_per_us = 0;
unsigned long core_clk_rate = 0;
- u32 core_clk_cycles_per_us = 0;
-
- static u32 pwm_fr_table[][2] = {
- {UFS_PWM_G1, 0x1},
- {UFS_PWM_G2, 0x1},
- {UFS_PWM_G3, 0x1},
- {UFS_PWM_G4, 0x1},
- };
-
- static u32 hs_fr_table_rA[][2] = {
- {UFS_HS_G1, 0x1F},
- {UFS_HS_G2, 0x3e},
- {UFS_HS_G3, 0x7D},
- };
-
- static u32 hs_fr_table_rB[][2] = {
- {UFS_HS_G1, 0x24},
- {UFS_HS_G2, 0x49},
- {UFS_HS_G3, 0x92},
- };
+ u32 core_clk_cycles_per_us;
/*
- * The Qunipro controller does not use following registers:
- * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
- * UFS_REG_PA_LINK_STARTUP_TIMER.
- * However UTP controller uses SYS1CLK_1US_REG register for Interrupt
+ * UTP controller uses SYS1CLK_1US_REG register for Interrupt
* Aggregation logic.
* It is mandatory to write SYS1CLK_1US_REG register on UFS host
* controller V4.0.0 onwards.
*/
- if (host->hw_ver.major < 4 && ufs_qcom_cap_qunipro(host) &&
- !ufshcd_is_intr_aggr_allowed(hba))
+ if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
if (gear == 0) {
@@ -611,79 +510,6 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
mb();
}
- if (ufs_qcom_cap_qunipro(host))
- return 0;
-
- core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
- core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
- core_clk_period_in_ns &= MASK_CLK_NS_REG;
-
- switch (hs) {
- case FASTAUTO_MODE:
- case FAST_MODE:
- if (rate == PA_HS_MODE_A) {
- if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rA));
- return -EINVAL;
- }
- tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
- } else if (rate == PA_HS_MODE_B) {
- if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rB));
- return -EINVAL;
- }
- tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
- } else {
- dev_err(hba->dev, "%s: invalid rate = %d\n",
- __func__, rate);
- return -EINVAL;
- }
- break;
- case SLOWAUTO_MODE:
- case SLOW_MODE:
- if (gear > ARRAY_SIZE(pwm_fr_table)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(pwm_fr_table));
- return -EINVAL;
- }
- tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
- break;
- case UNCHANGED:
- default:
- dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- return -EINVAL;
- }
-
- if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
- (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
- /* this register 2 fields shall be written at once */
- ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
- REG_UFS_TX_SYMBOL_CLK_NS_US);
- /*
- * make sure above write gets applied before we return from
- * this function.
- */
- mb();
- }
-
- if (update_link_startup_timer && host->hw_ver.major != 0x5) {
- ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
- REG_UFS_CFG0);
- /*
- * make sure that this configuration is applied before
- * we return
- */
- mb();
- }
-
return 0;
}
@@ -691,7 +517,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
switch (status) {
case PRE_CHANGE:
@@ -702,11 +527,9 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
return -EINVAL;
}
- if (ufs_qcom_cap_qunipro(host)) {
- err = ufs_qcom_set_core_clk_ctrl(hba, true);
- if (err)
- dev_err(hba->dev, "cfg core clk ctrl failed\n");
- }
+ err = ufs_qcom_set_core_clk_ctrl(hba, true);
+ if (err)
+ dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
@@ -898,7 +721,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_dev_params ufs_qcom_cap;
+ struct ufs_host_params *host_params = &host->host_params;
int ret = 0;
if (!dev_req_params) {
@@ -908,15 +731,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
- ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
-
- /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
- ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
-
- ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
- dev_max_params,
- dev_req_params);
+ ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
if (ret) {
dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
@@ -924,12 +739,22 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
}
/*
- * Update phy_gear only when the gears are scaled to a higher value. This is
- * because, the PHY gear settings are backwards compatible and we only need to
- * change the PHY gear settings while scaling to higher gears.
+ * During UFS driver probe, always update the PHY gear to match the negotiated
+ * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled,
+ * the second init can program the optimal PHY settings. This allows one to start
+ * the first init with either the minimum or the maximum support gear.
*/
- if (dev_req_params->gear_tx > host->phy_gear)
- host->phy_gear = dev_req_params->gear_tx;
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
+ /*
+ * Skip REINIT if the negotiated gear matches with the
+ * initial phy_gear. Otherwise, update the phy_gear to
+ * program the optimal gear setting during REINIT.
+ */
+ if (host->phy_gear == dev_req_params->gear_tx)
+ hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+ else
+ host->phy_gear = dev_req_params->gear_tx;
+ }
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
@@ -1005,12 +830,7 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (host->hw_ver.major == 0x1)
- return ufshci_version(1, 1);
- else
- return ufshci_version(2, 0);
+ return ufshci_version(2, 0);
}
/**
@@ -1026,46 +846,69 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (host->hw_ver.major == 0x01) {
- hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
+ if (host->hw_ver.major == 0x2)
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
- if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
- hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
+ if (host->hw_ver.major > 0x3)
+ hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+}
- hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
- }
+static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
+{
+ struct ufs_host_params *host_params = &host->host_params;
+ u32 val, dev_major;
- if (host->hw_ver.major == 0x2) {
- hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
+ host->phy_gear = host_params->hs_tx_gear;
- if (!ufs_qcom_cap_qunipro(host))
- /* Legacy UniPro mode still need following quirks */
- hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
- }
+ if (host->hw_ver.major < 0x4) {
+ /*
+ * For controllers whose major HW version is < 4, power up the
+ * PHY using minimum supported gear (UFS_HS_G2). Switching to
+ * max gear will be performed during reinit if supported.
+ * For newer controllers, whose major HW version is >= 4, power
+ * up the PHY using max supported gear.
+ */
+ host->phy_gear = UFS_HS_G2;
+ } else if (host->hw_ver.major >= 0x5) {
+ val = ufshcd_readl(host->hba, REG_UFS_DEBUG_SPARE_CFG);
+ dev_major = FIELD_GET(UFS_DEV_VER_MAJOR_MASK, val);
- if (host->hw_ver.major > 0x3)
- hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+ /*
+ * Since the UFS device version is populated, let's remove the
+ * REINIT quirk as the negotiated gear won't change during boot.
+ * So there is no need to do reinit.
+ */
+ if (dev_major != 0x0)
+ host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
+
+ /*
+ * For UFS 3.1 device and older, power up the PHY using HS-G4
+ * PHY gear to save power.
+ */
+ if (dev_major > 0x0 && dev_major < 0x4)
+ host->phy_gear = UFS_HS_G4;
+ }
}
-static void ufs_qcom_set_caps(struct ufs_hba *hba)
+static void ufs_qcom_set_host_params(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_host_params *host_params = &host->host_params;
+
+ ufshcd_init_host_params(host_params);
+
+ /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
+ host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba);
+}
+static void ufs_qcom_set_caps(struct ufs_hba *hba)
+{
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
-
- if (host->hw_ver.major >= 0x2) {
- host->caps = UFS_QCOM_CAP_QUNIPRO |
- UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
- }
}
/**
@@ -1188,16 +1031,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
{
int err;
struct device *dev = hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
struct ufs_qcom_host *host;
- struct resource *res;
struct ufs_clk_info *clki;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
+ if (!host)
return -ENOMEM;
- }
/* Make a two way bind between the qcom host and the hba */
host->hba = hba;
@@ -1235,34 +1074,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->device_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(host->device_reset)) {
- err = PTR_ERR(host->device_reset);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to acquire reset gpio: %d\n", err);
+ err = dev_err_probe(dev, PTR_ERR(host->device_reset),
+ "Failed to acquire device reset gpio\n");
goto out_variant_clear;
}
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
- /*
- * for newer controllers, device reference clock control bit has
- * moved inside UFS controller register address space itself.
- */
- if (host->hw_ver.major >= 0x02) {
- host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
- host->dev_ref_clk_en_mask = BIT(26);
- } else {
- /* "dev_ref_clk_ctrl_mem" is optional resource */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "dev_ref_clk_ctrl_mem");
- if (res) {
- host->dev_ref_clk_ctrl_mmio =
- devm_ioremap_resource(dev, res);
- if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
- host->dev_ref_clk_ctrl_mmio = NULL;
- host->dev_ref_clk_en_mask = BIT(5);
- }
- }
+ host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
+ host->dev_ref_clk_en_mask = BIT(26);
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk_unipro"))
@@ -1275,6 +1096,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
+ ufs_qcom_set_host_params(hba);
+ ufs_qcom_set_phy_gear(host);
err = ufs_qcom_ice_init(host);
if (err)
@@ -1282,9 +1105,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
- if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
- ufs_qcom_hosts[hba->dev->id] = host;
-
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
if (err)
@@ -1292,12 +1112,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
- /*
- * Power up the PHY using the minimum supported gear (UFS_HS_G2).
- * Switching to max gear will be performed during reinit if supported.
- */
- host->phy_gear = UFS_HS_G2;
-
return 0;
out_variant_clear:
@@ -1391,7 +1205,7 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
struct ufs_clk_info *clki;
- u32 cycles_in_1us;
+ u32 cycles_in_1us = 0;
u32 core_clk_ctrl_reg;
int err;
@@ -1446,9 +1260,6 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int ret;
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
attr->hs_rate, false, true);
if (ret) {
@@ -1466,13 +1277,9 @@ static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
u32 core_clk_ctrl_reg;
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
@@ -1491,11 +1298,6 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
/* set unipro core clock attributes and clear clock divider */
return ufs_qcom_set_core_clk_ctrl(hba, false);
}
@@ -1504,7 +1306,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
bool scale_up, enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
+ int err;
/* check the host controller state before sending hibern8 cmd */
if (!ufshcd_is_hba_active(hba))
@@ -1777,7 +1579,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
struct platform_device *pdev = to_platform_device(hba->dev);
struct ufshcd_res_info *res;
struct resource *res_mem, *res_mcq;
- int i, ret = 0;
+ int i, ret;
memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
@@ -1931,7 +1733,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
- goto out;
+ return ret;
}
msi_lock_descs(hba->dev);
@@ -1960,17 +1762,13 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
platform_msi_domain_free_irqs(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0) {
- ufshcd_writel(hba,
- ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
- REG_UFS_CFG3);
- }
+ host->hw_ver.step == 0)
+ ufshcd_rmwl(hba, ESI_VEC_MASK,
+ FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
ufshcd_mcq_enable_esi(hba);
- }
-
-out:
- if (!ret)
host->esi_enabled = true;
+ }
return ret;
}
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 9950a0089..9dd9a391e 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -10,22 +10,17 @@
#include <soc/qcom/ice.h>
#include <ufs/ufshcd.h>
-#define MAX_UFS_QCOM_HOSTS 1
-#define MAX_U32 (~(u32)0)
#define MPHY_TX_FSM_STATE 0x41
#define TX_FSM_HIBERN8 0x1
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
-#define BUS_VECTOR_NAME_LEN 32
#define MAX_SUPP_MAC 64
+#define MAX_ESI_VEC 32
#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
-
-/* vendor specific pre-defined parameters */
-#define SLOW 1
-#define FAST 2
+#define UFS_DEV_VER_MAJOR_MASK GENMASK(7, 4)
#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
@@ -56,6 +51,8 @@ enum {
UFS_AH8_CFG = 0xFC,
REG_UFS_CFG3 = 0x271C,
+
+ REG_UFS_DEBUG_SPARE_CFG = 0x284C,
};
/* QCOM UFS host controller vendor specific debug registers */
@@ -93,9 +90,6 @@ enum {
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
-#define UFS_PHY_RESET_ENABLE 1
-#define UFS_PHY_RESET_DISABLE 0
-
/* bit definitions for REG_UFS_CFG2 register */
#define UAWM_HW_CGC_EN BIT(0)
#define UARM_HW_CGC_EN BIT(1)
@@ -106,6 +100,9 @@ enum {
#define TMRLUT_HW_CGC_EN BIT(6)
#define OCSC_HW_CGC_EN BIT(7)
+/* bit definitions for REG_UFS_CFG3 register */
+#define ESI_VEC_MASK GENMASK(22, 12)
+
/* bit definitions for REG_UFS_PARAM0 */
#define MAX_HS_GEAR_MASK GENMASK(6, 4)
#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
@@ -118,13 +115,6 @@ enum {
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
-/* bit offset */
-#define OFFSET_CLK_NS_REG 0xa
-
-/* bit masks */
-#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
-#define MASK_CLK_NS_REG GENMASK(23, 10)
-
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
@@ -158,8 +148,7 @@ ufs_qcom_get_controller_revision(struct ufs_hba *hba,
static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_ENABLE),
- REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
/*
* Make sure assertion of ufs phy reset is written to
@@ -170,8 +159,7 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_DISABLE),
- REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1);
/*
* Make sure de-assertion of ufs phy reset is written to
@@ -195,28 +183,11 @@ struct ufs_qcom_testbus {
struct gpio_desc;
struct ufs_qcom_host {
- /*
- * Set this capability if host controller supports the QUniPro mode
- * and if driver wants the Host controller to operate in QUniPro mode.
- * Note: By default this capability will be kept enabled if host
- * controller supports the QUniPro mode.
- */
- #define UFS_QCOM_CAP_QUNIPRO 0x1
-
- /*
- * Set this capability if host controller can retain the secure
- * configuration even after UFS controller core power collapse.
- */
- #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
- u32 caps;
-
struct phy *generic_phy;
struct ufs_hba *hba;
struct ufs_pa_layer_attr dev_req_params;
- struct clk *rx_l0_sync_clk;
- struct clk *tx_l0_sync_clk;
- struct clk *rx_l1_sync_clk;
- struct clk *tx_l1_sync_clk;
+ struct clk_bulk_data *clks;
+ u32 num_clks;
bool is_lane_clks_enabled;
struct icc_path *icc_ddr;
@@ -240,6 +211,7 @@ struct ufs_qcom_host {
struct gpio_desc *device_reset;
+ struct ufs_host_params host_params;
u32 phy_gear;
bool esi_enabled;
@@ -261,9 +233,4 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
-static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
-{
- return host->caps & UFS_QCOM_CAP_QUNIPRO;
-}
-
#endif /* UFS_QCOM_H_ */
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index db9d9365f..a3e69ecaf 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -339,61 +339,60 @@ static int ufshcd_parse_operating_points(struct ufs_hba *hba)
}
/**
- * ufshcd_get_pwr_dev_param - get finally agreed attributes for
- * power mode change
- * @pltfrm_param: pointer to platform parameters
+ * ufshcd_negotiate_pwr_params - find power mode settings that are supported by
+ * both the controller and the device
+ * @host_params: pointer to host parameters
* @dev_max: pointer to device attributes
* @agreed_pwr: returned agreed attributes
*
* Return: 0 on success, non-zero value on failure.
*/
-int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
- const struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr)
+int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
+ const struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr)
{
- int min_pltfrm_gear;
+ int min_host_gear;
int min_dev_gear;
bool is_dev_sup_hs = false;
- bool is_pltfrm_max_hs = false;
+ bool is_host_max_hs = false;
if (dev_max->pwr_rx == FAST_MODE)
is_dev_sup_hs = true;
- if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
- is_pltfrm_max_hs = true;
- min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
- pltfrm_param->hs_tx_gear);
+ if (host_params->desired_working_mode == UFS_HS_MODE) {
+ is_host_max_hs = true;
+ min_host_gear = min_t(u32, host_params->hs_rx_gear,
+ host_params->hs_tx_gear);
} else {
- min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
- pltfrm_param->pwm_tx_gear);
+ min_host_gear = min_t(u32, host_params->pwm_rx_gear,
+ host_params->pwm_tx_gear);
}
/*
- * device doesn't support HS but
- * pltfrm_param->desired_working_mode is HS,
- * thus device and pltfrm_param don't agree
+ * device doesn't support HS but host_params->desired_working_mode is HS,
+ * thus device and host_params don't agree
*/
- if (!is_dev_sup_hs && is_pltfrm_max_hs) {
+ if (!is_dev_sup_hs && is_host_max_hs) {
pr_info("%s: device doesn't support HS\n",
__func__);
return -ENOTSUPP;
- } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
+ } else if (is_dev_sup_hs && is_host_max_hs) {
/*
* since device supports HS, it supports FAST_MODE.
- * since pltfrm_param->desired_working_mode is also HS
+ * since host_params->desired_working_mode is also HS
* then final decision (FAST/FASTAUTO) is done according
* to pltfrm_params as it is the restricting factor
*/
- agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
+ agreed_pwr->pwr_rx = host_params->rx_pwr_hs;
agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
} else {
/*
- * here pltfrm_param->desired_working_mode is PWM.
+ * here host_params->desired_working_mode is PWM.
* it doesn't matter whether device supports HS or PWM,
- * in both cases pltfrm_param->desired_working_mode will
+ * in both cases host_params->desired_working_mode will
* determine the mode
*/
- agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
+ agreed_pwr->pwr_rx = host_params->rx_pwr_pwm;
agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
}
@@ -403,9 +402,9 @@ int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
* the same decision will be made for rx
*/
agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
- pltfrm_param->tx_lanes);
+ host_params->tx_lanes);
agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
- pltfrm_param->rx_lanes);
+ host_params->rx_lanes);
/* device maximum gear is the minimum between device rx and tx gears */
min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
@@ -418,26 +417,26 @@ int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
* what is the gear, as it is the one that also decided previously what
* pwr the device will be configured to.
*/
- if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
- (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
+ if ((is_dev_sup_hs && is_host_max_hs) ||
+ (!is_dev_sup_hs && !is_host_max_hs)) {
agreed_pwr->gear_rx =
- min_t(u32, min_dev_gear, min_pltfrm_gear);
+ min_t(u32, min_dev_gear, min_host_gear);
} else if (!is_dev_sup_hs) {
agreed_pwr->gear_rx = min_dev_gear;
} else {
- agreed_pwr->gear_rx = min_pltfrm_gear;
+ agreed_pwr->gear_rx = min_host_gear;
}
agreed_pwr->gear_tx = agreed_pwr->gear_rx;
- agreed_pwr->hs_rate = pltfrm_param->hs_rate;
+ agreed_pwr->hs_rate = host_params->hs_rate;
return 0;
}
-EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
+EXPORT_SYMBOL_GPL(ufshcd_negotiate_pwr_params);
-void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
+void ufshcd_init_host_params(struct ufs_host_params *host_params)
{
- *dev_param = (struct ufs_dev_params){
+ *host_params = (struct ufs_host_params){
.tx_lanes = UFS_LANE_2,
.rx_lanes = UFS_LANE_2,
.hs_rx_gear = UFS_HS_G3,
@@ -452,7 +451,7 @@ void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
.desired_working_mode = UFS_HS_MODE,
};
}
-EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
+EXPORT_SYMBOL_GPL(ufshcd_init_host_params);
/**
* ufshcd_pltfrm_init - probe routine of the driver
diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h
index a86a3ada4..df387be52 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.h
+++ b/drivers/ufs/host/ufshcd-pltfrm.h
@@ -10,7 +10,7 @@
#define UFS_PWM_MODE 1
#define UFS_HS_MODE 2
-struct ufs_dev_params {
+struct ufs_host_params {
u32 pwm_rx_gear; /* pwm rx gear to work in */
u32 pwm_tx_gear; /* pwm tx gear to work in */
u32 hs_rx_gear; /* hs rx gear to work in */
@@ -25,10 +25,10 @@ struct ufs_dev_params {
u32 desired_working_mode;
};
-int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param,
- const struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr);
-void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
+int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
+ const struct ufs_pa_layer_attr *dev_max,
+ struct ufs_pa_layer_attr *agreed_pwr);
+void ufshcd_init_host_params(struct ufs_host_params *host_params);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
int ufshcd_populate_vreg(struct device *dev, const char *name,
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 5812f7ea7..16703815b 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -546,7 +546,7 @@ MODULE_PARM_DESC(annex,
#define uea_wait(sc, cond, timeo) \
({ \
- int _r = wait_event_interruptible_timeout(sc->sync_q, \
+ int _r = wait_event_freezable_timeout(sc->sync_q, \
(cond) || kthread_should_stop(), timeo); \
if (kthread_should_stop()) \
_r = -ENODEV; \
@@ -1896,7 +1896,6 @@ static int uea_kthread(void *data)
ret = sc->stat(sc);
if (ret != -EAGAIN)
uea_wait(sc, 0, msecs_to_jiffies(1000));
- try_to_freeze();
}
uea_leaves(INS_TO_USBDEV(sc));
return ret;
@@ -2252,7 +2251,7 @@ static ssize_t stat_status_show(struct device *dev, struct device_attribute *att
sc = dev_to_uea(dev);
if (!sc)
goto out;
- ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.state);
+ ret = sysfs_emit(buf, "%08x\n", sc->stats.phy.state);
out:
mutex_unlock(&uea_mutex);
return ret;
@@ -2318,19 +2317,19 @@ static ssize_t stat_human_status_show(struct device *dev,
switch (modem_state) {
case 0:
- ret = sprintf(buf, "Modem is booting\n");
+ ret = sysfs_emit(buf, "Modem is booting\n");
break;
case 1:
- ret = sprintf(buf, "Modem is initializing\n");
+ ret = sysfs_emit(buf, "Modem is initializing\n");
break;
case 2:
- ret = sprintf(buf, "Modem is operational\n");
+ ret = sysfs_emit(buf, "Modem is operational\n");
break;
case 3:
- ret = sprintf(buf, "Modem synchronization failed\n");
+ ret = sysfs_emit(buf, "Modem synchronization failed\n");
break;
default:
- ret = sprintf(buf, "Modem state is unknown\n");
+ ret = sysfs_emit(buf, "Modem state is unknown\n");
break;
}
out:
@@ -2364,7 +2363,7 @@ static ssize_t stat_delin_show(struct device *dev, struct device_attribute *attr
delin = "LOSS";
}
- ret = sprintf(buf, "%s\n", delin);
+ ret = sysfs_emit(buf, "%s\n", delin);
out:
mutex_unlock(&uea_mutex);
return ret;
@@ -2384,7 +2383,7 @@ static ssize_t stat_##name##_show(struct device *dev, \
sc = dev_to_uea(dev); \
if (!sc) \
goto out; \
- ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name); \
+ ret = sysfs_emit(buf, "%08x\n", sc->stats.phy.name); \
if (reset) \
sc->stats.phy.name = 0; \
out: \
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index b1b46c7c6..fd1beb10b 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1132,6 +1132,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
u16 total_tdl = 0;
struct scatterlist *s = NULL;
bool sg_supported = !!(request->num_mapped_sgs);
+ u32 ioc = request->no_interrupt ? 0 : TRB_IOC;
num_trb_req = sg_supported ? request->num_mapped_sgs : 1;
@@ -1288,11 +1289,11 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
control |= pcs;
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
- control |= TRB_IOC | TRB_ISP;
+ control |= ioc | TRB_ISP;
} else {
/* for last element in TD or in SG list */
if (sg_iter == (num_trb - 1) && sg_iter != 0)
- control |= pcs | TRB_IOC | TRB_ISP;
+ control |= pcs | ioc | TRB_ISP;
}
if (sg_iter)
@@ -1323,7 +1324,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
priv_req->num_of_trb = num_trb;
if (sg_iter == 1)
- trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP);
+ trb->control |= cpu_to_le32(ioc | TRB_ISP);
if (priv_dev->dev_ver < DEV_VER_V2 &&
(priv_ep->flags & EP_TDLCHK_EN)) {
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index 2c1aca84f..3ef8e3c87 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -87,16 +87,20 @@ static int cdns3_plat_probe(struct platform_device *pdev)
cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
if (cdns->dev_irq < 0)
- return cdns->dev_irq;
+ return dev_err_probe(dev, cdns->dev_irq,
+ "Failed to get peripheral IRQ\n");
regs = devm_platform_ioremap_resource_byname(pdev, "dev");
if (IS_ERR(regs))
- return PTR_ERR(regs);
+ return dev_err_probe(dev, PTR_ERR(regs),
+ "Failed to get dev base\n");
+
cdns->dev_regs = regs;
cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
if (cdns->otg_irq < 0)
- return cdns->otg_irq;
+ return dev_err_probe(dev, cdns->otg_irq,
+ "Failed to get otg IRQ\n");
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
if (!res) {
@@ -119,7 +123,8 @@ static int cdns3_plat_probe(struct platform_device *pdev)
cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
if (IS_ERR(cdns->usb2_phy))
- return PTR_ERR(cdns->usb2_phy);
+ return dev_err_probe(dev, PTR_ERR(cdns->usb2_phy),
+ "Failed to get cdn3,usb2-phy\n");
ret = phy_init(cdns->usb2_phy);
if (ret)
@@ -127,7 +132,8 @@ static int cdns3_plat_probe(struct platform_device *pdev)
cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
if (IS_ERR(cdns->usb3_phy))
- return PTR_ERR(cdns->usb3_phy);
+ return dev_err_probe(dev, PTR_ERR(cdns->usb3_phy),
+ "Failed to get cdn3,usb3-phy\n");
ret = phy_init(cdns->usb3_phy);
if (ret)
diff --git a/drivers/usb/cdns3/cdns3-starfive.c b/drivers/usb/cdns3/cdns3-starfive.c
index a7265b86e..c04d196ab 100644
--- a/drivers/usb/cdns3/cdns3-starfive.c
+++ b/drivers/usb/cdns3/cdns3-starfive.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* cdns3-starfive.c - StarFive specific Glue layer for Cadence USB Controller
*
* Copyright (C) 2023 StarFive Technology Co., Ltd.
diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h
index ad617b745..cd138acdc 100644
--- a/drivers/usb/cdns3/cdnsp-debug.h
+++ b/drivers/usb/cdns3/cdnsp-debug.h
@@ -187,202 +187,202 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
switch (type) {
case TRB_LINK:
- ret = snprintf(str, size,
- "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
- field1, field0, GET_INTR_TARGET(field2),
- cdnsp_trb_type_string(type),
- field3 & TRB_IOC ? 'I' : 'i',
- field3 & TRB_CHAIN ? 'C' : 'c',
- field3 & TRB_TC ? 'T' : 't',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
+ field1, field0, GET_INTR_TARGET(field2),
+ cdnsp_trb_type_string(type),
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_TC ? 'T' : 't',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_TRANSFER:
case TRB_COMPLETION:
case TRB_PORT_STATUS:
case TRB_HC_EVENT:
- ret = snprintf(str, size,
- "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
- " len %ld slot %ld flags %c:%c",
- ep_num, ep_id % 2 ? "out" : "in",
- TRB_TO_EP_INDEX(field3),
- cdnsp_trb_type_string(type), field1, field0,
- cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
- EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
- field3 & EVENT_DATA ? 'E' : 'e',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
+ " len %ld slot %ld flags %c:%c",
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3),
+ cdnsp_trb_type_string(type), field1, field0,
+ cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
+ EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
+ field3 & EVENT_DATA ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_MFINDEX_WRAP:
- ret = snprintf(str, size, "%s: flags %c",
- cdnsp_trb_type_string(type),
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size, "%s: flags %c",
+ cdnsp_trb_type_string(type),
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_SETUP:
- ret = snprintf(str, size,
- "type '%s' bRequestType %02x bRequest %02x "
- "wValue %02x%02x wIndex %02x%02x wLength %d "
- "length %ld TD size %ld intr %ld Setup ID %ld "
- "flags %c:%c:%c",
- cdnsp_trb_type_string(type),
- field0 & 0xff,
- (field0 & 0xff00) >> 8,
- (field0 & 0xff000000) >> 24,
- (field0 & 0xff0000) >> 16,
- (field1 & 0xff00) >> 8,
- field1 & 0xff,
- (field1 & 0xff000000) >> 16 |
- (field1 & 0xff0000) >> 16,
- TRB_LEN(field2), GET_TD_SIZE(field2),
- GET_INTR_TARGET(field2),
- TRB_SETUPID_TO_TYPE(field3),
- field3 & TRB_IDT ? 'D' : 'd',
- field3 & TRB_IOC ? 'I' : 'i',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "type '%s' bRequestType %02x bRequest %02x "
+ "wValue %02x%02x wIndex %02x%02x wLength %d "
+ "length %ld TD size %ld intr %ld Setup ID %ld "
+ "flags %c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field0 & 0xff,
+ (field0 & 0xff00) >> 8,
+ (field0 & 0xff000000) >> 24,
+ (field0 & 0xff0000) >> 16,
+ (field1 & 0xff00) >> 8,
+ field1 & 0xff,
+ (field1 & 0xff000000) >> 16 |
+ (field1 & 0xff0000) >> 16,
+ TRB_LEN(field2), GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ TRB_SETUPID_TO_TYPE(field3),
+ field3 & TRB_IDT ? 'D' : 'd',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_DATA:
- ret = snprintf(str, size,
- "type '%s' Buffer %08x%08x length %ld TD size %ld "
- "intr %ld flags %c:%c:%c:%c:%c:%c:%c",
- cdnsp_trb_type_string(type),
- field1, field0, TRB_LEN(field2),
- GET_TD_SIZE(field2),
- GET_INTR_TARGET(field2),
- field3 & TRB_IDT ? 'D' : 'i',
- field3 & TRB_IOC ? 'I' : 'i',
- field3 & TRB_CHAIN ? 'C' : 'c',
- field3 & TRB_NO_SNOOP ? 'S' : 's',
- field3 & TRB_ISP ? 'I' : 'i',
- field3 & TRB_ENT ? 'E' : 'e',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "type '%s' Buffer %08x%08x length %ld TD size %ld "
+ "intr %ld flags %c:%c:%c:%c:%c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ field3 & TRB_IDT ? 'D' : 'i',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_NO_SNOOP ? 'S' : 's',
+ field3 & TRB_ISP ? 'I' : 'i',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_STATUS:
- ret = snprintf(str, size,
- "Buffer %08x%08x length %ld TD size %ld intr"
- "%ld type '%s' flags %c:%c:%c:%c",
- field1, field0, TRB_LEN(field2),
- GET_TD_SIZE(field2),
- GET_INTR_TARGET(field2),
- cdnsp_trb_type_string(type),
- field3 & TRB_IOC ? 'I' : 'i',
- field3 & TRB_CHAIN ? 'C' : 'c',
- field3 & TRB_ENT ? 'E' : 'e',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "Buffer %08x%08x length %ld TD size %ld intr"
+ "%ld type '%s' flags %c:%c:%c:%c",
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ cdnsp_trb_type_string(type),
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_NORMAL:
case TRB_ISOC:
case TRB_EVENT_DATA:
case TRB_TR_NOOP:
- ret = snprintf(str, size,
- "type '%s' Buffer %08x%08x length %ld "
- "TD size %ld intr %ld "
- "flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
- cdnsp_trb_type_string(type),
- field1, field0, TRB_LEN(field2),
- GET_TD_SIZE(field2),
- GET_INTR_TARGET(field2),
- field3 & TRB_BEI ? 'B' : 'b',
- field3 & TRB_IDT ? 'T' : 't',
- field3 & TRB_IOC ? 'I' : 'i',
- field3 & TRB_CHAIN ? 'C' : 'c',
- field3 & TRB_NO_SNOOP ? 'S' : 's',
- field3 & TRB_ISP ? 'I' : 'i',
- field3 & TRB_ENT ? 'E' : 'e',
- field3 & TRB_CYCLE ? 'C' : 'c',
- !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
+ ret = scnprintf(str, size,
+ "type '%s' Buffer %08x%08x length %ld "
+ "TD size %ld intr %ld "
+ "flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ field3 & TRB_BEI ? 'B' : 'b',
+ field3 & TRB_IDT ? 'T' : 't',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_NO_SNOOP ? 'S' : 's',
+ field3 & TRB_ISP ? 'I' : 'i',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c',
+ !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
break;
case TRB_CMD_NOOP:
case TRB_ENABLE_SLOT:
- ret = snprintf(str, size, "%s: flags %c",
- cdnsp_trb_type_string(type),
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size, "%s: flags %c",
+ cdnsp_trb_type_string(type),
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_DISABLE_SLOT:
- ret = snprintf(str, size, "%s: slot %ld flags %c",
- cdnsp_trb_type_string(type),
- TRB_TO_SLOT_ID(field3),
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size, "%s: slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_ADDR_DEV:
- ret = snprintf(str, size,
- "%s: ctx %08x%08x slot %ld flags %c:%c",
- cdnsp_trb_type_string(type), field1, field0,
- TRB_TO_SLOT_ID(field3),
- field3 & TRB_BSR ? 'B' : 'b',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c:%c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_BSR ? 'B' : 'b',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_CONFIG_EP:
- ret = snprintf(str, size,
- "%s: ctx %08x%08x slot %ld flags %c:%c",
- cdnsp_trb_type_string(type), field1, field0,
- TRB_TO_SLOT_ID(field3),
- field3 & TRB_DC ? 'D' : 'd',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c:%c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_DC ? 'D' : 'd',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_EVAL_CONTEXT:
- ret = snprintf(str, size,
- "%s: ctx %08x%08x slot %ld flags %c",
- cdnsp_trb_type_string(type), field1, field0,
- TRB_TO_SLOT_ID(field3),
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_RESET_EP:
case TRB_HALT_ENDPOINT:
- ret = snprintf(str, size,
- "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
- cdnsp_trb_type_string(type),
- ep_num, ep_id % 2 ? "out" : "in",
- TRB_TO_EP_INDEX(field3), field1, field0,
- TRB_TO_SLOT_ID(field3),
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_STOP_RING:
- ret = snprintf(str, size,
- "%s: ep%d%s(%d) slot %ld sp %d flags %c",
- cdnsp_trb_type_string(type),
- ep_num, ep_id % 2 ? "out" : "in",
- TRB_TO_EP_INDEX(field3),
- TRB_TO_SLOT_ID(field3),
- TRB_TO_SUSPEND_PORT(field3),
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "%s: ep%d%s(%d) slot %ld sp %d flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3),
+ TRB_TO_SLOT_ID(field3),
+ TRB_TO_SUSPEND_PORT(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_SET_DEQ:
- ret = snprintf(str, size,
- "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c",
- cdnsp_trb_type_string(type),
- ep_num, ep_id % 2 ? "out" : "in",
- TRB_TO_EP_INDEX(field3), field1, field0,
- TRB_TO_STREAM_ID(field2),
- TRB_TO_SLOT_ID(field3),
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), field1, field0,
+ TRB_TO_STREAM_ID(field2),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_RESET_DEV:
- ret = snprintf(str, size, "%s: slot %ld flags %c",
- cdnsp_trb_type_string(type),
- TRB_TO_SLOT_ID(field3),
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size, "%s: slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_ENDPOINT_NRDY:
temp = TRB_TO_HOST_STREAM(field2);
- ret = snprintf(str, size,
- "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
- cdnsp_trb_type_string(type),
- ep_num, ep_id % 2 ? "out" : "in",
- TRB_TO_EP_INDEX(field3), temp,
- temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
- temp == STREAM_REJECTED ? "(REJECTED)" : "",
- TRB_TO_DEV_STREAM(field0),
- field3 & TRB_STAT ? 'S' : 's',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ ret = scnprintf(str, size,
+ "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), temp,
+ temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
+ temp == STREAM_REJECTED ? "(REJECTED)" : "",
+ TRB_TO_DEV_STREAM(field0),
+ field3 & TRB_STAT ? 'S' : 's',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
default:
- ret = snprintf(str, size,
- "type '%s' -> raw %08x %08x %08x %08x",
- cdnsp_trb_type_string(type),
- field0, field1, field2, field3);
+ ret = scnprintf(str, size,
+ "type '%s' -> raw %08x %08x %08x %08x",
+ cdnsp_trb_type_string(type),
+ field0, field1, field2, field3);
}
- if (ret >= size)
- pr_info("CDNSP: buffer overflowed.\n");
+ if (ret == size - 1)
+ pr_info("CDNSP: buffer may be truncated.\n");
return str;
}
@@ -465,32 +465,32 @@ static inline const char *cdnsp_decode_portsc(char *str, size_t size,
{
int ret;
- ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
- portsc & PORT_POWER ? "Powered" : "Powered-off",
- portsc & PORT_CONNECT ? "Connected" : "Not-connected",
- portsc & PORT_PED ? "Enabled" : "Disabled",
- cdnsp_portsc_link_state_string(portsc),
- DEV_PORT_SPEED(portsc));
+ ret = scnprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
+ portsc & PORT_POWER ? "Powered" : "Powered-off",
+ portsc & PORT_CONNECT ? "Connected" : "Not-connected",
+ portsc & PORT_PED ? "Enabled" : "Disabled",
+ cdnsp_portsc_link_state_string(portsc),
+ DEV_PORT_SPEED(portsc));
if (portsc & PORT_RESET)
- ret += snprintf(str + ret, size - ret, "In-Reset ");
+ ret += scnprintf(str + ret, size - ret, "In-Reset ");
- ret += snprintf(str + ret, size - ret, "Change: ");
+ ret += scnprintf(str + ret, size - ret, "Change: ");
if (portsc & PORT_CSC)
- ret += snprintf(str + ret, size - ret, "CSC ");
+ ret += scnprintf(str + ret, size - ret, "CSC ");
if (portsc & PORT_WRC)
- ret += snprintf(str + ret, size - ret, "WRC ");
+ ret += scnprintf(str + ret, size - ret, "WRC ");
if (portsc & PORT_RC)
- ret += snprintf(str + ret, size - ret, "PRC ");
+ ret += scnprintf(str + ret, size - ret, "PRC ");
if (portsc & PORT_PLC)
- ret += snprintf(str + ret, size - ret, "PLC ");
+ ret += scnprintf(str + ret, size - ret, "PLC ");
if (portsc & PORT_CEC)
- ret += snprintf(str + ret, size - ret, "CEC ");
- ret += snprintf(str + ret, size - ret, "Wake: ");
+ ret += scnprintf(str + ret, size - ret, "CEC ");
+ ret += scnprintf(str + ret, size - ret, "Wake: ");
if (portsc & PORT_WKCONN_E)
- ret += snprintf(str + ret, size - ret, "WCE ");
+ ret += scnprintf(str + ret, size - ret, "WCE ");
if (portsc & PORT_WKDISC_E)
- ret += snprintf(str + ret, size - ret, "WDE ");
+ ret += scnprintf(str + ret, size - ret, "WDE ");
return str;
}
@@ -562,20 +562,20 @@ static inline const char *cdnsp_decode_ep_context(char *str, size_t size,
avg = EP_AVG_TRB_LENGTH(tx_info);
- ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s",
- cdnsp_ep_state_string(ep_state), mult,
- max_pstr, lsa ? "LSA " : "");
+ ret = scnprintf(str, size, "State %s mult %d max P. Streams %d %s",
+ cdnsp_ep_state_string(ep_state), mult,
+ max_pstr, lsa ? "LSA " : "");
- ret += snprintf(str + ret, size - ret,
- "interval %d us max ESIT payload %d CErr %d ",
- (1 << interval) * 125, esit, cerr);
+ ret += scnprintf(str + ret, size - ret,
+ "interval %d us max ESIT payload %d CErr %d ",
+ (1 << interval) * 125, esit, cerr);
- ret += snprintf(str + ret, size - ret,
- "Type %s %sburst %d maxp %d deq %016llx ",
- cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
- burst, maxp, deq);
+ ret += scnprintf(str + ret, size - ret,
+ "Type %s %sburst %d maxp %d deq %016llx ",
+ cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
+ burst, maxp, deq);
- ret += snprintf(str + ret, size - ret, "avg trb len %d", avg);
+ ret += scnprintf(str + ret, size - ret, "avg trb len %d", avg);
return str;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index e28bb2f26..ae9a6a17e 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -96,6 +96,7 @@ struct ci_hdrc_imx_data {
struct usb_phy *phy;
struct platform_device *ci_pdev;
struct clk *clk;
+ struct clk *clk_wakeup;
struct imx_usbmisc_data *usbmisc_data;
bool supports_runtime_pm;
bool override_phy_control;
@@ -199,7 +200,7 @@ static int imx_get_clks(struct device *dev)
data->clk_ipg = devm_clk_get(dev, "ipg");
if (IS_ERR(data->clk_ipg)) {
- /* If the platform only needs one clocks */
+ /* If the platform only needs one primary clock */
data->clk = devm_clk_get(dev, NULL);
if (IS_ERR(data->clk)) {
ret = PTR_ERR(data->clk);
@@ -208,6 +209,13 @@ static int imx_get_clks(struct device *dev)
PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
return ret;
}
+ /* Get wakeup clock. Not all of the platforms need to
+ * handle this clock. So make it optional.
+ */
+ data->clk_wakeup = devm_clk_get_optional(dev, "usb_wakeup_clk");
+ if (IS_ERR(data->clk_wakeup))
+ ret = dev_err_probe(dev, PTR_ERR(data->clk_wakeup),
+ "Failed to get wakeup clk\n");
return ret;
}
@@ -423,6 +431,10 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
if (ret)
goto disable_hsic_regulator;
+ ret = clk_prepare_enable(data->clk_wakeup);
+ if (ret)
+ goto err_wakeup_clk;
+
data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
@@ -504,6 +516,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
err_clk:
+ clk_disable_unprepare(data->clk_wakeup);
+err_wakeup_clk:
imx_disable_unprepare_clks(dev);
disable_hsic_regulator:
if (data->hsic_pad_regulator)
@@ -530,6 +544,7 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
usb_phy_shutdown(data->phy);
if (data->ci_pdev) {
imx_disable_unprepare_clks(&pdev->dev);
+ clk_disable_unprepare(data->clk_wakeup);
if (data->plat_data->flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
if (data->hsic_pad_regulator)
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index ca71df4f3..835bf2428 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -890,7 +890,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
if (ret)
return ERR_PTR(ret);
- id = ida_simple_get(&ci_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&ci_ida, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
@@ -920,7 +920,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
err:
platform_device_put(pdev);
put_id:
- ida_simple_remove(&ci_ida, id);
+ ida_free(&ci_ida, id);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(ci_hdrc_add_device);
@@ -929,7 +929,7 @@ void ci_hdrc_remove_device(struct platform_device *pdev)
{
int id = pdev->id;
platform_device_unregister(pdev);
- ida_simple_remove(&ci_ida, id);
+ ida_free(&ci_ida, id);
}
EXPORT_SYMBOL_GPL(ci_hdrc_remove_device);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 0b7bd3c64..2d7f61627 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -688,7 +688,8 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
int n = hw_ep_bit(hwep->num, hwep->dir);
- if (ci->rev == CI_REVISION_24)
+ if (ci->rev == CI_REVISION_24 ||
+ ci->rev == CI_REVISION_22)
if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
reprime_dtd(ci, hwep, node);
hwreq->req.status = -EALREADY;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c8262e2f2..c553decb5 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -485,7 +485,6 @@ out_free_mem:
static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
- int used;
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
@@ -500,10 +499,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
goto out;
}
- used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
- if (used)
- goto out;
-
+ set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f58a0299f..e01b1913d 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -189,13 +189,13 @@ static int usb_create_newid_files(struct usb_driver *usb_drv)
goto exit;
if (usb_drv->probe != NULL) {
- error = driver_create_file(&usb_drv->drvwrap.driver,
+ error = driver_create_file(&usb_drv->driver,
&driver_attr_new_id);
if (error == 0) {
- error = driver_create_file(&usb_drv->drvwrap.driver,
+ error = driver_create_file(&usb_drv->driver,
&driver_attr_remove_id);
if (error)
- driver_remove_file(&usb_drv->drvwrap.driver,
+ driver_remove_file(&usb_drv->driver,
&driver_attr_new_id);
}
}
@@ -209,9 +209,9 @@ static void usb_remove_newid_files(struct usb_driver *usb_drv)
return;
if (usb_drv->probe != NULL) {
- driver_remove_file(&usb_drv->drvwrap.driver,
+ driver_remove_file(&usb_drv->driver,
&driver_attr_remove_id);
- driver_remove_file(&usb_drv->drvwrap.driver,
+ driver_remove_file(&usb_drv->driver,
&driver_attr_new_id);
}
}
@@ -290,7 +290,10 @@ static int usb_probe_device(struct device *dev)
* specialised device drivers prior to setting the
* use_generic_driver bit.
*/
- error = udriver->probe(udev);
+ if (udriver->probe)
+ error = udriver->probe(udev);
+ else if (!udriver->generic_subclass)
+ error = -EINVAL;
if (error == -ENODEV && udriver != &usb_generic_driver &&
(udriver->id_table || udriver->match)) {
udev->use_generic_driver = 1;
@@ -549,7 +552,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (!iface->authorized)
return -ENODEV;
- dev->driver = &driver->drvwrap.driver;
+ dev->driver = &driver->driver;
usb_set_intfdata(iface, data);
iface->needs_binding = 0;
@@ -612,7 +615,7 @@ void usb_driver_release_interface(struct usb_driver *driver,
struct device *dev = &iface->dev;
/* this should never happen, don't release something that's not ours */
- if (!dev->driver || dev->driver != &driver->drvwrap.driver)
+ if (!dev->driver || dev->driver != &driver->driver)
return;
/* don't release from within disconnect() */
@@ -947,7 +950,7 @@ static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
int ret;
/* Don't reprobe if current driver isn't usb_generic_driver */
- if (dev->driver != &usb_generic_driver.drvwrap.driver)
+ if (dev->driver != &usb_generic_driver.driver)
return 0;
udev = to_usb_device(dev);
@@ -961,6 +964,11 @@ static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
return 0;
}
+bool is_usb_device_driver(const struct device_driver *drv)
+{
+ return drv->probe == usb_probe_device;
+}
+
/**
* usb_register_device_driver - register a USB device (not interface) driver
* @new_udriver: USB operations for the device driver
@@ -980,15 +988,14 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver,
if (usb_disabled())
return -ENODEV;
- new_udriver->drvwrap.for_devices = 1;
- new_udriver->drvwrap.driver.name = new_udriver->name;
- new_udriver->drvwrap.driver.bus = &usb_bus_type;
- new_udriver->drvwrap.driver.probe = usb_probe_device;
- new_udriver->drvwrap.driver.remove = usb_unbind_device;
- new_udriver->drvwrap.driver.owner = owner;
- new_udriver->drvwrap.driver.dev_groups = new_udriver->dev_groups;
+ new_udriver->driver.name = new_udriver->name;
+ new_udriver->driver.bus = &usb_bus_type;
+ new_udriver->driver.probe = usb_probe_device;
+ new_udriver->driver.remove = usb_unbind_device;
+ new_udriver->driver.owner = owner;
+ new_udriver->driver.dev_groups = new_udriver->dev_groups;
- retval = driver_register(&new_udriver->drvwrap.driver);
+ retval = driver_register(&new_udriver->driver);
if (!retval) {
pr_info("%s: registered new device driver %s\n",
@@ -1020,7 +1027,7 @@ void usb_deregister_device_driver(struct usb_device_driver *udriver)
pr_info("%s: deregistering device driver %s\n",
usbcore_name, udriver->name);
- driver_unregister(&udriver->drvwrap.driver);
+ driver_unregister(&udriver->driver);
}
EXPORT_SYMBOL_GPL(usb_deregister_device_driver);
@@ -1048,18 +1055,17 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
if (usb_disabled())
return -ENODEV;
- new_driver->drvwrap.for_devices = 0;
- new_driver->drvwrap.driver.name = new_driver->name;
- new_driver->drvwrap.driver.bus = &usb_bus_type;
- new_driver->drvwrap.driver.probe = usb_probe_interface;
- new_driver->drvwrap.driver.remove = usb_unbind_interface;
- new_driver->drvwrap.driver.owner = owner;
- new_driver->drvwrap.driver.mod_name = mod_name;
- new_driver->drvwrap.driver.dev_groups = new_driver->dev_groups;
+ new_driver->driver.name = new_driver->name;
+ new_driver->driver.bus = &usb_bus_type;
+ new_driver->driver.probe = usb_probe_interface;
+ new_driver->driver.remove = usb_unbind_interface;
+ new_driver->driver.owner = owner;
+ new_driver->driver.mod_name = mod_name;
+ new_driver->driver.dev_groups = new_driver->dev_groups;
spin_lock_init(&new_driver->dynids.lock);
INIT_LIST_HEAD(&new_driver->dynids.list);
- retval = driver_register(&new_driver->drvwrap.driver);
+ retval = driver_register(&new_driver->driver);
if (retval)
goto out;
@@ -1074,7 +1080,7 @@ out:
return retval;
out_newid:
- driver_unregister(&new_driver->drvwrap.driver);
+ driver_unregister(&new_driver->driver);
pr_err("%s: error %d registering interface driver %s\n",
usbcore_name, retval, new_driver->name);
@@ -1099,7 +1105,7 @@ void usb_deregister(struct usb_driver *driver)
usbcore_name, driver->name);
usb_remove_newid_files(driver);
- driver_unregister(&driver->drvwrap.driver);
+ driver_unregister(&driver->driver);
usb_free_dynids(driver);
}
EXPORT_SYMBOL_GPL(usb_deregister);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 740342a28..b134bff5c 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -59,10 +59,26 @@ int usb_choose_configuration(struct usb_device *udev)
int num_configs;
int insufficient_power = 0;
struct usb_host_config *c, *best;
+ struct usb_device_driver *udriver;
+
+ /*
+ * If a USB device (not an interface) doesn't have a driver then the
+ * kernel has no business trying to select or install a configuration
+ * for it.
+ */
+ if (!udev->dev.driver)
+ return -1;
+ udriver = to_usb_device_driver(udev->dev.driver);
if (usb_device_is_owned(udev))
return 0;
+ if (udriver->choose_configuration) {
+ i = udriver->choose_configuration(udev);
+ if (i >= 0)
+ return i;
+ }
+
best = NULL;
c = udev->config;
num_configs = udev->descriptor.bNumConfigurations;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 80145bd4b..64e54163f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -60,6 +60,12 @@
#define USB_PING_RESPONSE_TIME 400 /* ns */
#define USB_REDUCE_FRAME_INTR_BINTERVAL 9
+/*
+ * The SET_ADDRESS request timeout will be 500 ms when
+ * USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT quirk flag is set.
+ */
+#define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */
+
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
@@ -4667,7 +4673,12 @@ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
static int hub_set_address(struct usb_device *udev, int devnum)
{
int retval;
+ unsigned int timeout_ms = USB_CTRL_SET_TIMEOUT;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+
+ if (hub->hdev->quirks & USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT)
+ timeout_ms = USB_SHORT_SET_ADDRESS_REQ_TIMEOUT;
/*
* The host controller will choose the device address,
@@ -4680,11 +4691,11 @@ static int hub_set_address(struct usb_device *udev, int devnum)
if (udev->state != USB_STATE_DEFAULT)
return -EINVAL;
if (hcd->driver->address_device)
- retval = hcd->driver->address_device(hcd, udev);
+ retval = hcd->driver->address_device(hcd, udev, timeout_ms);
else
retval = usb_control_msg(udev, usb_sndaddr0pipe(),
USB_REQ_SET_ADDRESS, 0, devnum, 0,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
+ NULL, 0, timeout_ms);
if (retval == 0) {
update_devnum(udev, devnum);
/* Device now using proper address. */
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index a5776531b..97ff2073c 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -448,8 +448,10 @@ static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
- if (port_dev->child)
+ if (port_dev->child) {
usb_disable_usb2_hardware_lpm(port_dev->child);
+ usb_unlocked_disable_lpm(port_dev->child);
+ }
}
static const struct dev_pm_ops usb_port_pm_ops = {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 15e9bd180..b4783574b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -138,6 +138,9 @@ static int quirks_param_set(const char *value, const struct kernel_param *kp)
case 'o':
flags |= USB_QUIRK_HUB_SLOW_RESET;
break;
+ case 'p':
+ flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT;
+ break;
/* Ignore unrecognized flag characters */
}
}
@@ -527,6 +530,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+ /* APTIV AUTOMOTIVE HUB */
+ { USB_DEVICE(0x2c48, 0x0132), .driver_info =
+ USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT },
+
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 2a938cf47..dc8d9228a 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -431,7 +431,7 @@ struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
struct device *dev;
argb.minor = minor;
- argb.drv = &drv->drvwrap.driver;
+ argb.drv = &drv->driver;
dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 60363153f..bfecb5077 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -175,13 +175,7 @@ static inline int is_root_hub(struct usb_device *udev)
return (udev->parent == NULL);
}
-/* Do the same for device drivers and interface drivers. */
-
-static inline int is_usb_device_driver(struct device_driver *drv)
-{
- return container_of(drv, struct usbdrv_wrap, driver)->
- for_devices;
-}
+extern bool is_usb_device_driver(const struct device_driver *drv);
/* for labeling diagnostics */
extern const char *usbcore_name;
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 79582b102..994a78ad0 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -867,13 +867,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 frame_desc_idx;
- struct urb *usb_urb = qtd->urb->priv;
+ struct urb *usb_urb;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
+ usb_urb = qtd->urb->priv;
+
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index fb03162ae..eb677c3cf 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -130,6 +130,7 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
p->lpm_clock_gating = false;
p->besl = false;
p->hird_threshold_en = false;
+ p->no_clock_gating = true;
}
static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 8de1ab851..31684cdaa 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1330,6 +1330,18 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_config_threshold(dwc);
+ /*
+ * Modify this for all supported Super Speed ports when
+ * multiport support is added.
+ */
+ if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET &&
+ (DWC3_IP_IS(DWC31)) &&
+ dwc->maximum_speed == USB_SPEED_SUPER) {
+ reg = dwc3_readl(dwc->regs, DWC3_LLUCTL);
+ reg |= DWC3_LLUCTL_FORCE_GEN1;
+ dwc3_writel(dwc->regs, DWC3_LLUCTL, reg);
+ }
+
return 0;
err_power_off_phy:
@@ -2305,12 +2317,15 @@ static int dwc3_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+
ret = dwc3_resume_common(dwc, PMSG_RESUME);
- if (ret)
+ if (ret) {
+ pm_runtime_set_suspended(dev);
return ret;
+ }
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 80265ef60..893b1e694 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -172,6 +172,8 @@
#define DWC3_OEVTEN 0xcc0C
#define DWC3_OSTS 0xcc10
+#define DWC3_LLUCTL 0xd024
+
/* Bit fields */
/* Global SoC Bus Configuration INCRx Register 0 */
@@ -657,6 +659,9 @@
#define DWC3_OSTS_VBUSVLD BIT(1)
#define DWC3_OSTS_CONIDSTS BIT(0)
+/* Force Gen1 speed on Gen2 link */
+#define DWC3_LLUCTL_FORCE_GEN1 BIT(10)
+
/* Structures */
struct dwc3_trb;
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
index a1e15f2ff..8ee448068 100644
--- a/drivers/usb/dwc3/dwc3-imx8mp.c
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -363,8 +363,10 @@ static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
}
ret = clk_prepare_enable(dwc3_imx->hsio_clk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
return ret;
+ }
ret = dwc3_imx8mp_resume(dwc3_imx, PMSG_RESUME);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index fdf6d5d3c..dbd6a5b2b 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -57,7 +57,7 @@ struct dwc3_acpi_pdata {
u32 qscratch_base_offset;
u32 qscratch_base_size;
u32 dwc3_core_base_size;
- int hs_phy_irq_index;
+ int qusb2_phy_irq_index;
int dp_hs_phy_irq_index;
int dm_hs_phy_irq_index;
int ss_phy_irq_index;
@@ -73,7 +73,7 @@ struct dwc3_qcom {
int num_clocks;
struct reset_control *resets;
- int hs_phy_irq;
+ int qusb2_phy_irq;
int dp_hs_phy_irq;
int dm_hs_phy_irq;
int ss_phy_irq;
@@ -372,7 +372,7 @@ static void dwc3_qcom_disable_wakeup_irq(int irq)
static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
{
- dwc3_qcom_disable_wakeup_irq(qcom->hs_phy_irq);
+ dwc3_qcom_disable_wakeup_irq(qcom->qusb2_phy_irq);
if (qcom->usb2_speed == USB_SPEED_LOW) {
dwc3_qcom_disable_wakeup_irq(qcom->dm_hs_phy_irq);
@@ -389,7 +389,7 @@ static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
{
- dwc3_qcom_enable_wakeup_irq(qcom->hs_phy_irq, 0);
+ dwc3_qcom_enable_wakeup_irq(qcom->qusb2_phy_irq, 0);
/*
* Configure DP/DM line interrupts based on the USB2 device attached to
@@ -542,19 +542,19 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
int irq;
int ret;
- irq = dwc3_qcom_get_irq(pdev, "hs_phy_irq",
- pdata ? pdata->hs_phy_irq_index : -1);
+ irq = dwc3_qcom_get_irq(pdev, "qusb2_phy",
+ pdata ? pdata->qusb2_phy_irq_index : -1);
if (irq > 0) {
/* Keep wakeup interrupts disabled until suspend */
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
- "qcom_dwc3 HS", qcom);
+ "qcom_dwc3 QUSB2", qcom);
if (ret) {
- dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
+ dev_err(qcom->dev, "qusb2_phy_irq failed: %d\n", ret);
return ret;
}
- qcom->hs_phy_irq = irq;
+ qcom->qusb2_phy_irq = irq;
}
irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
@@ -1058,7 +1058,7 @@ static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
.qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
.qscratch_base_size = SDM845_QSCRATCH_SIZE,
.dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
- .hs_phy_irq_index = 1,
+ .qusb2_phy_irq_index = 1,
.dp_hs_phy_irq_index = 4,
.dm_hs_phy_irq_index = 3,
.ss_phy_irq_index = 2
@@ -1068,7 +1068,7 @@ static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
.qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
.qscratch_base_size = SDM845_QSCRATCH_SIZE,
.dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
- .hs_phy_irq_index = 1,
+ .qusb2_phy_irq_index = 1,
.dp_hs_phy_irq_index = 4,
.dm_hs_phy_irq_index = 3,
.ss_phy_irq_index = 2,
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 5b7e92f47..6095f4dee 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -293,11 +293,15 @@ static int dwc3_xlnx_probe(struct platform_device *pdev)
goto err_clk_put;
pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0)
+ goto err_pm_set_suspended;
+
pm_suspend_ignore_children(dev, false);
- pm_runtime_get_sync(dev);
+ return pm_runtime_resume_and_get(dev);
- return 0;
+err_pm_set_suspended:
+ pm_runtime_set_suspended(dev);
err_clk_put:
clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
@@ -315,7 +319,6 @@ static void dwc3_xlnx_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
priv_data->num_clocks = 0;
- pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
}
diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
index 7bf810a0c..8c5aaf860 100644
--- a/drivers/usb/fotg210/fotg210-hcd.c
+++ b/drivers/usb/fotg210/fotg210-hcd.c
@@ -404,9 +404,9 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
else if (td->hw_alt_next != list_end)
mark = '/';
}
- temp = snprintf(next, size,
- "\n\t%p%c%s len=%d %08x urb %p",
- td, mark, ({ char *tmp;
+ temp = scnprintf(next, size,
+ "\n\t%p%c%s len=%d %08x urb %p",
+ td, mark, ({ char *tmp;
switch ((scratch>>8)&0x03) {
case 0:
tmp = "out";
@@ -424,15 +424,11 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
(scratch >> 16) & 0x7fff,
scratch,
td->urb);
- if (size < temp)
- temp = size;
size -= temp;
next += temp;
}
- temp = snprintf(next, size, "\n");
- if (size < temp)
- temp = size;
+ temp = scnprintf(next, size, "\n");
size -= temp;
next += temp;
diff --git a/drivers/usb/fotg210/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c
index f7ea84070..0bae12e34 100644
--- a/drivers/usb/fotg210/fotg210-udc.c
+++ b/drivers/usb/fotg210/fotg210-udc.c
@@ -1094,10 +1094,10 @@ static int fotg210_udc_stop(struct usb_gadget *g)
/**
* fotg210_vbus_session - Called by external transceiver to enable/disable udc
- * @_gadget: usb gadget
+ * @g: usb gadget
* @is_active: 0 if should disable UDC VBUS, 1 if should enable
*
- * Returns 0
+ * Returns: %0
*/
static int fotg210_vbus_session(struct usb_gadget *g, int is_active)
{
@@ -1122,7 +1122,7 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
*
* Called by the USB Phy when a cable connect or disconnect is sensed.
*
- * Returns NOTIFY_OK or NOTIFY_DONE
+ * Returns: NOTIFY_OK or NOTIFY_DONE
*/
static int fotg210_phy_event(struct notifier_block *nb, unsigned long action,
void *data)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 4c639e9dd..ce3cfa1f3 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -606,10 +606,11 @@ static struct config_group *function_make(
char *instance_name;
int ret;
- ret = snprintf(buf, MAX_NAME_LEN, "%s", name);
- if (ret >= MAX_NAME_LEN)
+ if (strlen(name) >= MAX_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
+ scnprintf(buf, MAX_NAME_LEN, "%s", name);
+
func_name = buf;
instance_name = strchr(func_name, '.');
if (!instance_name) {
@@ -701,10 +702,12 @@ static struct config_group *config_desc_make(
int ret;
gi = container_of(group, struct gadget_info, configs_group);
- ret = snprintf(buf, MAX_NAME_LEN, "%s", name);
- if (ret >= MAX_NAME_LEN)
+
+ if (strlen(name) >= MAX_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
+ scnprintf(buf, MAX_NAME_LEN, "%s", name);
+
num_str = strchr(buf, '.');
if (!num_str) {
pr_err("Unable to locate . in name.bConfigurationValue\n");
@@ -812,7 +815,7 @@ static ssize_t gadget_string_s_show(struct config_item *item, char *page)
struct gadget_string *string = to_gadget_string(item);
int ret;
- ret = snprintf(page, sizeof(string->string), "%s\n", string->string);
+ ret = sysfs_emit(page, "%s\n", string->string);
return ret;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index fdd0fc7b8..6bff6cb93 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2931,9 +2931,8 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
- memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
- ARRAY_SIZE(desc->CompatibleID) +
- ARRAY_SIZE(desc->SubCompatibleID));
+ memcpy(t->os_desc->ext_compat_id, &desc->IDs,
+ sizeof_field(struct usb_ext_compat_desc, IDs));
length = sizeof(*desc);
}
break;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 5335845d6..20c6fbd94 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1177,11 +1177,11 @@ F_MIDI_OPT(out_ports, true, MAX_PORTS);
static ssize_t f_midi_opts_id_show(struct config_item *item, char *page)
{
struct f_midi_opts *opts = to_f_midi_opts(item);
- int result;
+ ssize_t result;
mutex_lock(&opts->lock);
if (opts->id) {
- result = strlcpy(page, opts->id, PAGE_SIZE);
+ result = strscpy(page, opts->id, PAGE_SIZE);
} else {
page[0] = 0;
result = 0;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index f3456b8bf..0acc32ed9 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -103,6 +103,16 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
/* Delay for the transmit to wait before sending an unfilled NTB frame. */
#define TX_TIMEOUT_NSECS 300000
+/*
+ * Although max mtu as dictated by u_ether is 15412 bytes, setting
+ * max_segment_size to 15426 would not be efficient. If user chooses segment
+ * size to be (>= 8192), then we can't aggregate more than one buffer in each
+ * NTB (assuming each packet coming from network layer is >= 8192 bytes) as ep
+ * maxpacket limit is 16384. So let max_segment_size be limited to 8000 to allow
+ * at least 2 packets to be aggregated reducing wastage of NTB buffer space
+ */
+#define MAX_DATAGRAM_SIZE 8000
+
#define FORMATS_SUPPORTED (USB_CDC_NCM_NTB16_SUPPORTED | \
USB_CDC_NCM_NTB32_SUPPORTED)
@@ -179,7 +189,6 @@ static struct usb_cdc_ether_desc ecm_desc = {
/* this descriptor actually adds value, surprise! */
/* .iMACAddress = DYNAMIC */
.bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
- .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
.wNumberMCFilters = cpu_to_le16(0),
.bNumberPowerFilters = 0,
};
@@ -869,7 +878,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (alt > 1)
goto fail;
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
DBG(cdev, "reset ncm\n");
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
@@ -1166,11 +1175,15 @@ static int ncm_unwrap_ntb(struct gether *port,
struct sk_buff *skb2;
int ret = -EINVAL;
unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
- unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
+ unsigned frame_max;
const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter;
int to_process = skb->len;
+ struct f_ncm_opts *ncm_opts;
+
+ ncm_opts = container_of(port->func.fi, struct f_ncm_opts, func_inst);
+ frame_max = ncm_opts->max_segment_size;
parse_ntb:
tmp = (__le16 *)ntb_ptr;
@@ -1354,7 +1367,7 @@ static void ncm_disable(struct usb_function *f)
DBG(cdev, "ncm deactivated\n");
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
}
@@ -1438,8 +1451,10 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
mutex_lock(&ncm_opts->lock);
gether_set_gadget(ncm_opts->net, cdev->gadget);
- if (!ncm_opts->bound)
+ if (!ncm_opts->bound) {
+ ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN);
status = gether_register_netdev(ncm_opts->net);
+ }
mutex_unlock(&ncm_opts->lock);
if (status)
@@ -1482,6 +1497,8 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm_data_intf.bInterfaceNumber = status;
ncm_union_desc.bSlaveInterface0 = status;
+ ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_opts->max_segment_size);
+
status = -ENODEV;
/* allocate instance-specific endpoints */
@@ -1584,11 +1601,56 @@ USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm);
/* f_ncm_opts_ifname */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm);
+static ssize_t ncm_opts_max_segment_size_show(struct config_item *item,
+ char *page)
+{
+ struct f_ncm_opts *opts = to_f_ncm_opts(item);
+ u16 segment_size;
+
+ mutex_lock(&opts->lock);
+ segment_size = opts->max_segment_size;
+ mutex_unlock(&opts->lock);
+
+ return sysfs_emit(page, "%u\n", segment_size);
+}
+
+static ssize_t ncm_opts_max_segment_size_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_ncm_opts *opts = to_f_ncm_opts(item);
+ u16 segment_size;
+ int ret;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = kstrtou16(page, 0, &segment_size);
+ if (ret)
+ goto out;
+
+ if (segment_size > MAX_DATAGRAM_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ opts->max_segment_size = segment_size;
+ ret = len;
+out:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+CONFIGFS_ATTR(ncm_opts_, max_segment_size);
+
static struct configfs_attribute *ncm_attrs[] = {
&ncm_opts_attr_dev_addr,
&ncm_opts_attr_host_addr,
&ncm_opts_attr_qmult,
&ncm_opts_attr_ifname,
+ &ncm_opts_attr_max_segment_size,
NULL,
};
@@ -1631,6 +1693,7 @@ static struct usb_function_instance *ncm_alloc_inst(void)
kfree(opts);
return ERR_CAST(net);
}
+ opts->max_segment_size = ETH_FRAME_LEN;
INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
descs[0] = &opts->ncm_os_desc;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index ff33f31bc..37befd6db 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1504,8 +1504,8 @@ static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
ret = -ENODEV;
goto out;
}
- ret = snprintf(page, PAGE_SIZE, "%s\n",
- tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ ret = sysfs_emit(page, "%s\n",
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
out:
mutex_unlock(&tpg->tpg_mutex);
return ret;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 6f0e1d803..7de74a3dd 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -292,6 +292,77 @@ static struct usb_descriptor_header *f_audio_desc[] = {
NULL,
};
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor ss_as_out_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+ .bInterval = 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_as_out_ep_desc_comp = {
+ .bLength = sizeof(ss_as_out_ep_desc_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ /* wBytesPerInterval = DYNAMIC */
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor ss_as_in_ep_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_SYNC_ASYNC
+ | USB_ENDPOINT_XFER_ISOC,
+ .wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE),
+ .bInterval = 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_as_in_ep_desc_comp = {
+ .bLength = sizeof(ss_as_in_ep_desc_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ /* wBytesPerInterval = DYNAMIC */
+};
+
+static struct usb_descriptor_header *f_audio_ss_desc[] = {
+ (struct usb_descriptor_header *)&ac_interface_desc,
+ (struct usb_descriptor_header *)&ac_header_desc,
+
+ (struct usb_descriptor_header *)&usb_out_it_desc,
+ (struct usb_descriptor_header *)&io_out_ot_desc,
+ (struct usb_descriptor_header *)&io_in_it_desc,
+ (struct usb_descriptor_header *)&usb_in_ot_desc,
+
+ (struct usb_descriptor_header *)&as_out_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_out_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_out_header_desc,
+
+ (struct usb_descriptor_header *)&as_out_type_i_desc,
+
+ //(struct usb_descriptor_header *)&as_out_ep_desc,
+ (struct usb_descriptor_header *)&ss_as_out_ep_desc,
+ (struct usb_descriptor_header *)&ss_as_out_ep_desc_comp,
+ (struct usb_descriptor_header *)&as_iso_out_desc,
+
+ (struct usb_descriptor_header *)&as_in_interface_alt_0_desc,
+ (struct usb_descriptor_header *)&as_in_interface_alt_1_desc,
+ (struct usb_descriptor_header *)&as_in_header_desc,
+
+ (struct usb_descriptor_header *)&as_in_type_i_desc,
+
+ //(struct usb_descriptor_header *)&as_in_ep_desc,
+ (struct usb_descriptor_header *)&ss_as_in_ep_desc,
+ (struct usb_descriptor_header *)&ss_as_in_ep_desc_comp,
+ (struct usb_descriptor_header *)&as_iso_in_desc,
+ NULL,
+};
+
enum {
STR_AC_IF,
STR_USB_OUT_IT,
@@ -1352,6 +1423,7 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
if (!ep)
goto err_free_fu;
+ ss_as_out_ep_desc.bEndpointAddress = as_out_ep_desc.bEndpointAddress;
audio->out_ep = ep;
audio->out_ep->desc = &as_out_ep_desc;
}
@@ -1360,6 +1432,7 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
ep = usb_ep_autoconfig(cdev->gadget, &as_in_ep_desc);
if (!ep)
goto err_free_fu;
+ ss_as_in_ep_desc.bEndpointAddress = as_in_ep_desc.bEndpointAddress;
audio->in_ep = ep;
audio->in_ep->desc = &as_in_ep_desc;
}
@@ -1367,8 +1440,8 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
setup_descriptor(audio_opts);
/* copy descriptors, and track endpoint copies */
- status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
- NULL);
+ status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, f_audio_ss_desc,
+ f_audio_ss_desc);
if (status)
goto err_free_fu;
@@ -1561,7 +1634,7 @@ static ssize_t f_uac1_opts_##name##_show(struct config_item *item, \
int result; \
\
mutex_lock(&opts->lock); \
- result = snprintf(page, sizeof(opts->name), "%s", opts->name); \
+ result = scnprintf(page, sizeof(opts->name), "%s", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
@@ -1579,7 +1652,7 @@ static ssize_t f_uac1_opts_##name##_store(struct config_item *item, \
goto end; \
} \
\
- ret = snprintf(opts->name, min(sizeof(opts->name), len), \
+ ret = scnprintf(opts->name, min(sizeof(opts->name), len), \
"%s", page); \
\
end: \
@@ -1685,7 +1758,7 @@ static struct usb_function_instance *f_audio_alloc_inst(void)
opts->req_number = UAC1_DEF_REQ_NUM;
- snprintf(opts->function_name, sizeof(opts->function_name), "AC Interface");
+ scnprintf(opts->function_name, sizeof(opts->function_name), "AC Interface");
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index f9a0f07a7..383f6854c 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -2045,7 +2045,7 @@ static ssize_t f_uac2_opts_##name##_show(struct config_item *item, \
int result; \
\
mutex_lock(&opts->lock); \
- result = snprintf(page, sizeof(opts->name), "%s", opts->name); \
+ result = scnprintf(page, sizeof(opts->name), "%s", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
@@ -2063,7 +2063,7 @@ static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
goto end; \
} \
\
- ret = snprintf(opts->name, min(sizeof(opts->name), len), \
+ ret = scnprintf(opts->name, min(sizeof(opts->name), len), \
"%s", page); \
\
end: \
@@ -2187,7 +2187,7 @@ static struct usb_function_instance *afunc_alloc_inst(void)
opts->req_number = UAC2_DEF_REQ_NUM;
opts->fb_max = FBACK_FAST_MAX;
- snprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
+ scnprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
opts->p_terminal_type = UAC2_DEF_P_TERM_TYPE;
opts->c_terminal_type = UAC2_DEF_C_TERM_TYPE;
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index c6965e389..929666805 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -263,10 +263,13 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
return 0;
}
-void uvc_function_setup_continue(struct uvc_device *uvc)
+void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ if (disable_ep && uvc->video.ep)
+ usb_ep_disable(uvc->video.ep);
+
usb_composite_setup_continue(cdev);
}
@@ -337,15 +340,11 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
if (uvc->state != UVC_STATE_STREAMING)
return 0;
- if (uvc->video.ep)
- usb_ep_disable(uvc->video.ep);
-
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMOFF;
v4l2_event_queue(&uvc->vdev, &v4l2_event);
- uvc->state = UVC_STATE_CONNECTED;
- return 0;
+ return USB_GADGET_DELAYED_STATUS;
case 1:
if (uvc->state != UVC_STATE_CONNECTED)
diff --git a/drivers/usb/gadget/function/f_uvc.h b/drivers/usb/gadget/function/f_uvc.h
index 1db972d4b..083aef0c6 100644
--- a/drivers/usb/gadget/function/f_uvc.h
+++ b/drivers/usb/gadget/function/f_uvc.h
@@ -11,7 +11,7 @@
struct uvc_device;
-void uvc_function_setup_continue(struct uvc_device *uvc);
+void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep);
void uvc_function_connect(struct uvc_device *uvc);
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index 5408854d8..49ec095cd 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -31,6 +31,8 @@ struct f_ncm_opts {
*/
struct mutex lock;
int refcnt;
+
+ u16 max_segment_size;
};
#endif /* U_NCM_H */
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 6751de8b6..cb35687b1 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -81,6 +81,7 @@ struct uvc_request {
struct sg_table sgt;
u8 header[UVCG_REQUEST_HEADER_LEN];
struct uvc_buffer *last_buf;
+ struct list_head list;
};
struct uvc_video {
@@ -101,9 +102,18 @@ struct uvc_video {
unsigned int uvc_num_requests;
/* Requests */
+ bool is_enabled; /* tracks whether video stream is enabled */
unsigned int req_size;
- struct uvc_request *ureq;
+ struct list_head ureqs; /* all uvc_requests allocated by uvc_video */
+
+ /* USB requests that the video pump thread can encode into */
struct list_head req_free;
+
+ /*
+ * USB requests video pump thread has already encoded into. These are
+ * ready to be queued to the endpoint.
+ */
+ struct list_head req_ready;
spinlock_t req_lock;
unsigned int req_int_count;
@@ -177,7 +187,7 @@ struct uvc_file_handle {
* Functions
*/
-extern void uvc_function_setup_continue(struct uvc_device *uvc);
+extern void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep);
extern void uvc_function_connect(struct uvc_device *uvc);
extern void uvc_function_disconnect(struct uvc_device *uvc);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 9bf0e985a..7e704b2bc 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -3414,7 +3414,7 @@ static ssize_t f_uvc_opts_string_##cname##_show(struct config_item *item,\
int result; \
\
mutex_lock(&opts->lock); \
- result = snprintf(page, sizeof(opts->aname), "%s", opts->aname);\
+ result = scnprintf(page, sizeof(opts->aname), "%s", opts->aname);\
mutex_unlock(&opts->lock); \
\
return result; \
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 3f0a9795c..c7e5fa4f2 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -443,7 +443,7 @@ uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return -EINVAL;
/* Enable UVC video. */
- ret = uvcg_video_enable(video, 1);
+ ret = uvcg_video_enable(video);
if (ret < 0)
return ret;
@@ -451,7 +451,7 @@ uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
* Complete the alternate setting selection setup phase now that
* userspace is ready to provide video frames.
*/
- uvc_function_setup_continue(uvc);
+ uvc_function_setup_continue(uvc, 0);
uvc->state = UVC_STATE_STREAMING;
return 0;
@@ -463,11 +463,18 @@ uvc_v4l2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
+ int ret = 0;
if (type != video->queue.queue.type)
return -EINVAL;
- return uvcg_video_enable(video, 0);
+ ret = uvcg_video_disable(video);
+ if (ret < 0)
+ return ret;
+
+ uvc->state = UVC_STATE_CONNECTED;
+ uvc_function_setup_continue(uvc, 1);
+ return 0;
}
static int
@@ -500,7 +507,7 @@ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
static void uvc_v4l2_disable(struct uvc_device *uvc)
{
uvc_function_disconnect(uvc);
- uvcg_video_enable(&uvc->video, 0);
+ uvcg_video_disable(&uvc->video);
uvcg_free_buffers(&uvc->video.queue);
uvc->func_connected = false;
wake_up_interruptible(&uvc->func_connected_queue);
@@ -647,4 +654,3 @@ const struct v4l2_file_operations uvc_v4l2_fops = {
.get_unmapped_area = uvcg_v4l2_get_unmapped_area,
#endif
};
-
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 91af3b1ef..53e4cd81e 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -35,6 +35,9 @@ uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
data[1] = UVC_STREAM_EOH | video->fid;
+ if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
+ data[1] |= UVC_STREAM_ERR;
+
if (video->queue.buf_used == 0 && ts.tv_sec) {
/* dwClockFrequency is 48 MHz */
u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
@@ -227,6 +230,28 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
* Request handling
*/
+/*
+ * Callers must take care to hold req_lock when this function may be called
+ * from multiple threads. For example, when frames are streaming to the host.
+ */
+static void
+uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
+{
+ sg_free_table(&ureq->sgt);
+ if (ureq->req && ep) {
+ usb_ep_free_request(ep, ureq->req);
+ ureq->req = NULL;
+ }
+
+ kfree(ureq->req_buffer);
+ ureq->req_buffer = NULL;
+
+ if (!list_empty(&ureq->list))
+ list_del_init(&ureq->list);
+
+ kfree(ureq);
+}
+
static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
{
int ret;
@@ -247,14 +272,127 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
return ret;
}
+/* This function must be called with video->req_lock held. */
+static int uvcg_video_usb_req_queue(struct uvc_video *video,
+ struct usb_request *req, bool queue_to_ep)
+{
+ bool is_bulk = video->max_payload_size;
+ struct list_head *list = NULL;
+
+ if (!video->is_enabled)
+ return -ENODEV;
+
+ if (queue_to_ep) {
+ struct uvc_request *ureq = req->context;
+ /*
+ * With USB3 handling more requests at a higher speed, we can't
+ * afford to generate an interrupt for every request. Decide to
+ * interrupt:
+ *
+ * - When no more requests are available in the free queue, as
+ * this may be our last chance to refill the endpoint's
+ * request queue.
+ *
+ * - When this is request is the last request for the video
+ * buffer, as we want to start sending the next video buffer
+ * ASAP in case it doesn't get started already in the next
+ * iteration of this loop.
+ *
+ * - Four times over the length of the requests queue (as
+ * indicated by video->uvc_num_requests), as a trade-off
+ * between latency and interrupt load.
+ */
+ if (list_empty(&video->req_free) || ureq->last_buf ||
+ !(video->req_int_count %
+ DIV_ROUND_UP(video->uvc_num_requests, 4))) {
+ video->req_int_count = 0;
+ req->no_interrupt = 0;
+ } else {
+ req->no_interrupt = 1;
+ }
+ video->req_int_count++;
+ return uvcg_video_ep_queue(video, req);
+ }
+ /*
+ * If we're not queuing to the ep, for isoc we're queuing
+ * to the req_ready list, otherwise req_free.
+ */
+ list = is_bulk ? &video->req_free : &video->req_ready;
+ list_add_tail(&req->list, list);
+ return 0;
+}
+
+/*
+ * Must only be called from uvcg_video_enable - since after that we only want to
+ * queue requests to the endpoint from the uvc_video_complete complete handler.
+ * This function is needed in order to 'kick start' the flow of requests from
+ * gadget driver to the usb controller.
+ */
+static void uvc_video_ep_queue_initial_requests(struct uvc_video *video)
+{
+ struct usb_request *req = NULL;
+ unsigned long flags = 0;
+ unsigned int count = 0;
+ int ret = 0;
+
+ /*
+ * We only queue half of the free list since we still want to have
+ * some free usb_requests in the free list for the video_pump async_wq
+ * thread to encode uvc buffers into. Otherwise we could get into a
+ * situation where the free list does not have any usb requests to
+ * encode into - we always end up queueing 0 length requests to the
+ * end point.
+ */
+ unsigned int half_list_size = video->uvc_num_requests / 2;
+
+ spin_lock_irqsave(&video->req_lock, flags);
+ /*
+ * Take these requests off the free list and queue them all to the
+ * endpoint. Since we queue 0 length requests with the req_lock held,
+ * there isn't any 'data' race involved here with the complete handler.
+ */
+ while (count < half_list_size) {
+ req = list_first_entry(&video->req_free, struct usb_request,
+ list);
+ list_del(&req->list);
+ req->length = 0;
+ ret = uvcg_video_ep_queue(video, req);
+ if (ret < 0) {
+ uvcg_queue_cancel(&video->queue, 0);
+ break;
+ }
+ count++;
+ }
+ spin_unlock_irqrestore(&video->req_lock, flags);
+}
+
static void
uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
{
struct uvc_request *ureq = req->context;
struct uvc_video *video = ureq->video;
struct uvc_video_queue *queue = &video->queue;
- struct uvc_device *uvc = video->uvc;
+ struct uvc_buffer *last_buf;
unsigned long flags;
+ bool is_bulk = video->max_payload_size;
+ int ret = 0;
+
+ spin_lock_irqsave(&video->req_lock, flags);
+ if (!video->is_enabled) {
+ /*
+ * When is_enabled is false, uvcg_video_disable() ensures
+ * that in-flight uvc_buffers are returned, so we can
+ * safely call free_request without worrying about
+ * last_buf.
+ */
+ uvc_video_free_request(ureq, ep);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ return;
+ }
+
+ last_buf = ureq->last_buf;
+ ureq->last_buf = NULL;
+ spin_unlock_irqrestore(&video->req_lock, flags);
switch (req->status) {
case 0:
@@ -277,44 +415,85 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
uvcg_queue_cancel(queue, 0);
}
- if (ureq->last_buf) {
- uvcg_complete_buffer(&video->queue, ureq->last_buf);
- ureq->last_buf = NULL;
+ if (last_buf) {
+ spin_lock_irqsave(&queue->irqlock, flags);
+ uvcg_complete_buffer(queue, last_buf);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
}
spin_lock_irqsave(&video->req_lock, flags);
- list_add_tail(&req->list, &video->req_free);
+ /*
+ * Video stream might have been disabled while we were
+ * processing the current usb_request. So make sure
+ * we're still streaming before queueing the usb_request
+ * back to req_free
+ */
+ if (video->is_enabled) {
+ /*
+ * Here we check whether any request is available in the ready
+ * list. If it is, queue it to the ep and add the current
+ * usb_request to the req_free list - for video_pump to fill in.
+ * Otherwise, just use the current usb_request to queue a 0
+ * length request to the ep. Since we always add to the req_free
+ * list if we dequeue from the ready list, there will never
+ * be a situation where the req_free list is completely out of
+ * requests and cannot recover.
+ */
+ struct usb_request *to_queue = req;
+
+ to_queue->length = 0;
+ if (!list_empty(&video->req_ready)) {
+ to_queue = list_first_entry(&video->req_ready,
+ struct usb_request, list);
+ list_del(&to_queue->list);
+ list_add_tail(&req->list, &video->req_free);
+ /*
+ * Queue work to the wq as well since it is possible that a
+ * buffer may not have been completely encoded with the set of
+ * in-flight usb requests for whih the complete callbacks are
+ * firing.
+ * In that case, if we do not queue work to the worker thread,
+ * the buffer will never be marked as complete - and therefore
+ * not be returned to userpsace. As a result,
+ * dequeue -> queue -> dequeue flow of uvc buffers will not
+ * happen.
+ */
+ queue_work(video->async_wq, &video->pump);
+ }
+ /*
+ * Queue to the endpoint. The actual queueing to ep will
+ * only happen on one thread - the async_wq for bulk endpoints
+ * and this thread for isoc endpoints.
+ */
+ ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
+ if (ret < 0) {
+ /*
+ * Endpoint error, but the stream is still enabled.
+ * Put request back in req_free for it to be cleaned
+ * up later.
+ */
+ list_add_tail(&to_queue->list, &video->req_free);
+ }
+ } else {
+ uvc_video_free_request(ureq, ep);
+ ret = 0;
+ }
spin_unlock_irqrestore(&video->req_lock, flags);
-
- if (uvc->state == UVC_STATE_STREAMING)
- queue_work(video->async_wq, &video->pump);
+ if (ret < 0)
+ uvcg_queue_cancel(queue, 0);
}
static int
uvc_video_free_requests(struct uvc_video *video)
{
- unsigned int i;
-
- if (video->ureq) {
- for (i = 0; i < video->uvc_num_requests; ++i) {
- sg_free_table(&video->ureq[i].sgt);
+ struct uvc_request *ureq, *temp;
- if (video->ureq[i].req) {
- usb_ep_free_request(video->ep, video->ureq[i].req);
- video->ureq[i].req = NULL;
- }
-
- if (video->ureq[i].req_buffer) {
- kfree(video->ureq[i].req_buffer);
- video->ureq[i].req_buffer = NULL;
- }
- }
-
- kfree(video->ureq);
- video->ureq = NULL;
- }
+ list_for_each_entry_safe(ureq, temp, &video->ureqs, list)
+ uvc_video_free_request(ureq, video->ep);
+ INIT_LIST_HEAD(&video->ureqs);
INIT_LIST_HEAD(&video->req_free);
+ INIT_LIST_HEAD(&video->req_ready);
video->req_size = 0;
return 0;
}
@@ -322,6 +501,7 @@ uvc_video_free_requests(struct uvc_video *video)
static int
uvc_video_alloc_requests(struct uvc_video *video)
{
+ struct uvc_request *ureq;
unsigned int req_size;
unsigned int i;
int ret = -ENOMEM;
@@ -332,29 +512,33 @@ uvc_video_alloc_requests(struct uvc_video *video)
* max_t(unsigned int, video->ep->maxburst, 1)
* (video->ep->mult);
- video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL);
- if (video->ureq == NULL)
- return -ENOMEM;
+ for (i = 0; i < video->uvc_num_requests; i++) {
+ ureq = kzalloc(sizeof(struct uvc_request), GFP_KERNEL);
+ if (ureq == NULL)
+ goto error;
+
+ INIT_LIST_HEAD(&ureq->list);
- for (i = 0; i < video->uvc_num_requests; ++i) {
- video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL);
- if (video->ureq[i].req_buffer == NULL)
+ list_add_tail(&ureq->list, &video->ureqs);
+
+ ureq->req_buffer = kmalloc(req_size, GFP_KERNEL);
+ if (ureq->req_buffer == NULL)
goto error;
- video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
- if (video->ureq[i].req == NULL)
+ ureq->req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
+ if (ureq->req == NULL)
goto error;
- video->ureq[i].req->buf = video->ureq[i].req_buffer;
- video->ureq[i].req->length = 0;
- video->ureq[i].req->complete = uvc_video_complete;
- video->ureq[i].req->context = &video->ureq[i];
- video->ureq[i].video = video;
- video->ureq[i].last_buf = NULL;
+ ureq->req->buf = ureq->req_buffer;
+ ureq->req->length = 0;
+ ureq->req->complete = uvc_video_complete;
+ ureq->req->context = ureq;
+ ureq->video = video;
+ ureq->last_buf = NULL;
- list_add_tail(&video->ureq[i].req->list, &video->req_free);
+ list_add_tail(&ureq->req->list, &video->req_free);
/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
- sg_alloc_table(&video->ureq[i].sgt,
+ sg_alloc_table(&ureq->sgt,
DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
PAGE_SIZE) + 2, GFP_KERNEL);
}
@@ -387,16 +571,18 @@ static void uvcg_video_pump(struct work_struct *work)
struct usb_request *req = NULL;
struct uvc_buffer *buf;
unsigned long flags;
- bool buf_done;
- int ret;
+ int ret = 0;
+
+ while (true) {
+ if (!video->ep->enabled)
+ return;
- while (video->ep->enabled) {
/*
- * Retrieve the first available USB request, protected by the
- * request lock.
+ * Check is_enabled and retrieve the first available USB
+ * request, protected by the request lock.
*/
spin_lock_irqsave(&video->req_lock, flags);
- if (list_empty(&video->req_free)) {
+ if (!video->is_enabled || list_empty(&video->req_free)) {
spin_unlock_irqrestore(&video->req_lock, flags);
return;
}
@@ -411,19 +597,7 @@ static void uvcg_video_pump(struct work_struct *work)
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue);
-
- if (buf != NULL) {
- video->encode(req, video, buf);
- buf_done = buf->state == UVC_BUF_STATE_DONE;
- } else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
- /*
- * No video buffer available; the queue is still connected and
- * we're transferring over ISOC. Queue a 0 length request to
- * prevent missed ISOC transfers.
- */
- req->length = 0;
- buf_done = false;
- } else {
+ if (!buf) {
/*
* Either the queue has been disconnected or no video buffer
* available for bulk transfer. Either way, stop processing
@@ -433,82 +607,140 @@ static void uvcg_video_pump(struct work_struct *work)
break;
}
- /*
- * With USB3 handling more requests at a higher speed, we can't
- * afford to generate an interrupt for every request. Decide to
- * interrupt:
- *
- * - When no more requests are available in the free queue, as
- * this may be our last chance to refill the endpoint's
- * request queue.
- *
- * - When this is request is the last request for the video
- * buffer, as we want to start sending the next video buffer
- * ASAP in case it doesn't get started already in the next
- * iteration of this loop.
- *
- * - Four times over the length of the requests queue (as
- * indicated by video->uvc_num_requests), as a trade-off
- * between latency and interrupt load.
- */
- if (list_empty(&video->req_free) || buf_done ||
- !(video->req_int_count %
- DIV_ROUND_UP(video->uvc_num_requests, 4))) {
- video->req_int_count = 0;
- req->no_interrupt = 0;
- } else {
- req->no_interrupt = 1;
- }
+ video->encode(req, video, buf);
- /* Queue the USB request */
- ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&queue->irqlock, flags);
+ spin_lock_irqsave(&video->req_lock, flags);
+ /* For bulk end points we queue from the worker thread
+ * since we would preferably not want to wait on requests
+ * to be ready, in the uvcg_video_complete() handler.
+ * For isoc endpoints we add the request to the ready list
+ * and only queue it to the endpoint from the complete handler.
+ */
+ ret = uvcg_video_usb_req_queue(video, req, is_bulk);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
break;
}
- /* Endpoint now owns the request */
+ /* The request is owned by the endpoint / ready list. */
req = NULL;
- video->req_int_count++;
}
if (!req)
return;
spin_lock_irqsave(&video->req_lock, flags);
- list_add_tail(&req->list, &video->req_free);
+ if (video->is_enabled)
+ list_add_tail(&req->list, &video->req_free);
+ else
+ uvc_video_free_request(req->context, video->ep);
spin_unlock_irqrestore(&video->req_lock, flags);
- return;
}
/*
- * Enable or disable the video stream.
+ * Disable the video stream
*/
-int uvcg_video_enable(struct uvc_video *video, int enable)
+int
+uvcg_video_disable(struct uvc_video *video)
{
- unsigned int i;
- int ret;
+ unsigned long flags;
+ struct list_head inflight_bufs;
+ struct usb_request *req, *temp;
+ struct uvc_buffer *buf, *btemp;
+ struct uvc_request *ureq, *utemp;
if (video->ep == NULL) {
uvcg_info(&video->uvc->func,
- "Video enable failed, device is uninitialized.\n");
+ "Video disable failed, device is uninitialized.\n");
return -ENODEV;
}
- if (!enable) {
- cancel_work_sync(&video->pump);
- uvcg_queue_cancel(&video->queue, 0);
+ INIT_LIST_HEAD(&inflight_bufs);
+ spin_lock_irqsave(&video->req_lock, flags);
+ video->is_enabled = false;
+
+ /*
+ * Remove any in-flight buffers from the uvc_requests
+ * because we want to return them before cancelling the
+ * queue. This ensures that we aren't stuck waiting for
+ * all complete callbacks to come through before disabling
+ * vb2 queue.
+ */
+ list_for_each_entry(ureq, &video->ureqs, list) {
+ if (ureq->last_buf) {
+ list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
+ ureq->last_buf = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&video->req_lock, flags);
+
+ cancel_work_sync(&video->pump);
+ uvcg_queue_cancel(&video->queue, 0);
+
+ spin_lock_irqsave(&video->req_lock, flags);
+ /*
+ * Remove all uvc_requests from ureqs with list_del_init
+ * This lets uvc_video_free_request correctly identify
+ * if the uvc_request is attached to a list or not when freeing
+ * memory.
+ */
+ list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
+ list_del_init(&ureq->list);
+
+ list_for_each_entry_safe(req, temp, &video->req_free, list) {
+ list_del(&req->list);
+ uvc_video_free_request(req->context, video->ep);
+ }
+
+ list_for_each_entry_safe(req, temp, &video->req_ready, list) {
+ list_del(&req->list);
+ uvc_video_free_request(req->context, video->ep);
+ }
- for (i = 0; i < video->uvc_num_requests; ++i)
- if (video->ureq && video->ureq[i].req)
- usb_ep_dequeue(video->ep, video->ureq[i].req);
+ INIT_LIST_HEAD(&video->ureqs);
+ INIT_LIST_HEAD(&video->req_free);
+ INIT_LIST_HEAD(&video->req_ready);
+ video->req_size = 0;
+ spin_unlock_irqrestore(&video->req_lock, flags);
- uvc_video_free_requests(video);
- uvcg_queue_enable(&video->queue, 0);
- return 0;
+ /*
+ * Return all the video buffers before disabling the queue.
+ */
+ spin_lock_irqsave(&video->queue.irqlock, flags);
+ list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
+ list_del(&buf->queue);
+ uvcg_complete_buffer(&video->queue, buf);
}
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+ uvcg_queue_enable(&video->queue, 0);
+ return 0;
+}
+
+/*
+ * Enable the video stream.
+ */
+int uvcg_video_enable(struct uvc_video *video)
+{
+ int ret;
+
+ if (video->ep == NULL) {
+ uvcg_info(&video->uvc->func,
+ "Video enable failed, device is uninitialized.\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Safe to access request related fields without req_lock because
+ * this is the only thread currently active, and no other
+ * request handling thread will become active until this function
+ * returns.
+ */
+ video->is_enabled = true;
if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
return ret;
@@ -525,7 +757,7 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
video->req_int_count = 0;
- queue_work(video->async_wq, &video->pump);
+ uvc_video_ep_queue_initial_requests(video);
return ret;
}
@@ -535,7 +767,10 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
*/
int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
+ video->is_enabled = false;
+ INIT_LIST_HEAD(&video->ureqs);
INIT_LIST_HEAD(&video->req_free);
+ INIT_LIST_HEAD(&video->req_ready);
spin_lock_init(&video->req_lock);
INIT_WORK(&video->pump, uvcg_video_pump);
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index 03adeefa3..8ef625974 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -14,7 +14,8 @@
struct uvc_video;
-int uvcg_video_enable(struct uvc_video *video, int enable);
+int uvcg_video_enable(struct uvc_video *video);
+int uvcg_video_disable(struct uvc_video *video);
int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc);
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 30ea4a9d5..e3bf17a98 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1924,7 +1924,7 @@ err_unprepare_fclk:
return retval;
}
-static int at91udc_remove(struct platform_device *pdev)
+static void at91udc_remove(struct platform_device *pdev)
{
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
@@ -1932,8 +1932,11 @@ static int at91udc_remove(struct platform_device *pdev)
DBG("remove\n");
usb_del_gadget_udc(&udc->gadget);
- if (udc->driver)
- return -EBUSY;
+ if (udc->driver) {
+ dev_err(&pdev->dev,
+ "Driver still in use but removing anyhow\n");
+ return;
+ }
spin_lock_irqsave(&udc->lock, flags);
pullup(udc, 0);
@@ -1943,8 +1946,6 @@ static int at91udc_remove(struct platform_device *pdev)
remove_debug_file(udc);
clk_unprepare(udc->fclk);
clk_unprepare(udc->iclk);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -2001,7 +2002,7 @@ static int at91udc_resume(struct platform_device *pdev)
static struct platform_driver at91_udc_driver = {
.probe = at91udc_probe,
- .remove = at91udc_remove,
+ .remove_new = at91udc_remove,
.shutdown = at91udc_shutdown,
.suspend = at91udc_suspend,
.resume = at91udc_resume,
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 02b1bef5e..b76885d78 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -94,7 +94,7 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
inode_lock(file_inode(file));
list_for_each_entry_safe(req, tmp_req, queue, queue) {
- len = snprintf(tmpbuf, sizeof(tmpbuf),
+ len = scnprintf(tmpbuf, sizeof(tmpbuf),
"%8p %08x %c%c%c %5d %c%c%c\n",
req->req.buf, req->req.length,
req->req.no_interrupt ? 'i' : 'I',
@@ -104,7 +104,6 @@ static ssize_t queue_dbg_read(struct file *file, char __user *buf,
req->submitted ? 'F' : 'f',
req->using_dma ? 'D' : 'd',
req->last_transaction ? 'L' : 'l');
- len = min(len, sizeof(tmpbuf));
if (len > nbytes)
break;
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-debug.h b/drivers/usb/gadget/udc/cdns2/cdns2-debug.h
index be9ae0d28..f5f330004 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-debug.h
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-debug.h
@@ -16,34 +16,34 @@ static inline const char *cdns2_decode_usb_irq(char *str, size_t size,
{
int ret;
- ret = snprintf(str, size, "usbirq: 0x%02x - ", usb_irq);
+ ret = scnprintf(str, size, "usbirq: 0x%02x - ", usb_irq);
if (usb_irq & USBIRQ_SOF)
- ret += snprintf(str + ret, size - ret, "SOF ");
+ ret += scnprintf(str + ret, size - ret, "SOF ");
if (usb_irq & USBIRQ_SUTOK)
- ret += snprintf(str + ret, size - ret, "SUTOK ");
+ ret += scnprintf(str + ret, size - ret, "SUTOK ");
if (usb_irq & USBIRQ_SUDAV)
- ret += snprintf(str + ret, size - ret, "SETUP ");
+ ret += scnprintf(str + ret, size - ret, "SETUP ");
if (usb_irq & USBIRQ_SUSPEND)
- ret += snprintf(str + ret, size - ret, "Suspend ");
+ ret += scnprintf(str + ret, size - ret, "Suspend ");
if (usb_irq & USBIRQ_URESET)
- ret += snprintf(str + ret, size - ret, "Reset ");
+ ret += scnprintf(str + ret, size - ret, "Reset ");
if (usb_irq & USBIRQ_HSPEED)
- ret += snprintf(str + ret, size - ret, "HS ");
+ ret += scnprintf(str + ret, size - ret, "HS ");
if (usb_irq & USBIRQ_LPM)
- ret += snprintf(str + ret, size - ret, "LPM ");
+ ret += scnprintf(str + ret, size - ret, "LPM ");
- ret += snprintf(str + ret, size - ret, ", EXT: 0x%02x - ", ext_irq);
+ ret += scnprintf(str + ret, size - ret, ", EXT: 0x%02x - ", ext_irq);
if (ext_irq & EXTIRQ_WAKEUP)
- ret += snprintf(str + ret, size - ret, "Wakeup ");
+ ret += scnprintf(str + ret, size - ret, "Wakeup ");
if (ext_irq & EXTIRQ_VBUSFAULT_FALL)
- ret += snprintf(str + ret, size - ret, "VBUS_FALL ");
+ ret += scnprintf(str + ret, size - ret, "VBUS_FALL ");
if (ext_irq & EXTIRQ_VBUSFAULT_RISE)
- ret += snprintf(str + ret, size - ret, "VBUS_RISE ");
+ ret += scnprintf(str + ret, size - ret, "VBUS_RISE ");
- if (ret >= size)
- pr_info("CDNS2: buffer overflowed.\n");
+ if (ret == size - 1)
+ pr_info("CDNS2: buffer may be truncated.\n");
return str;
}
@@ -54,28 +54,28 @@ static inline const char *cdns2_decode_dma_irq(char *str, size_t size,
{
int ret;
- ret = snprintf(str, size, "ISTS: %08x, %s: %08x ",
- ep_ists, ep_name, ep_sts);
+ ret = scnprintf(str, size, "ISTS: %08x, %s: %08x ",
+ ep_ists, ep_name, ep_sts);
if (ep_sts & DMA_EP_STS_IOC)
- ret += snprintf(str + ret, size - ret, "IOC ");
+ ret += scnprintf(str + ret, size - ret, "IOC ");
if (ep_sts & DMA_EP_STS_ISP)
- ret += snprintf(str + ret, size - ret, "ISP ");
+ ret += scnprintf(str + ret, size - ret, "ISP ");
if (ep_sts & DMA_EP_STS_DESCMIS)
- ret += snprintf(str + ret, size - ret, "DESCMIS ");
+ ret += scnprintf(str + ret, size - ret, "DESCMIS ");
if (ep_sts & DMA_EP_STS_TRBERR)
- ret += snprintf(str + ret, size - ret, "TRBERR ");
+ ret += scnprintf(str + ret, size - ret, "TRBERR ");
if (ep_sts & DMA_EP_STS_OUTSMM)
- ret += snprintf(str + ret, size - ret, "OUTSMM ");
+ ret += scnprintf(str + ret, size - ret, "OUTSMM ");
if (ep_sts & DMA_EP_STS_ISOERR)
- ret += snprintf(str + ret, size - ret, "ISOERR ");
+ ret += scnprintf(str + ret, size - ret, "ISOERR ");
if (ep_sts & DMA_EP_STS_DBUSY)
- ret += snprintf(str + ret, size - ret, "DBUSY ");
+ ret += scnprintf(str + ret, size - ret, "DBUSY ");
if (DMA_EP_STS_CCS(ep_sts))
- ret += snprintf(str + ret, size - ret, "CCS ");
+ ret += scnprintf(str + ret, size - ret, "CCS ");
- if (ret >= size)
- pr_info("CDNS2: buffer overflowed.\n");
+ if (ret == size - 1)
+ pr_info("CDNS2: buffer may be truncated.\n");
return str;
}
@@ -105,43 +105,43 @@ static inline const char *cdns2_raw_ring(struct cdns2_endpoint *pep,
int ret;
int i;
- ret = snprintf(str, size, "\n\t\tTR for %s:", pep->name);
+ ret = scnprintf(str, size, "\n\t\tTR for %s:", pep->name);
trb = &trbs[ring->dequeue];
dma = cdns2_trb_virt_to_dma(pep, trb);
- ret += snprintf(str + ret, size - ret,
- "\n\t\tRing deq index: %d, trb: V=%p, P=0x%pad\n",
- ring->dequeue, trb, &dma);
+ ret += scnprintf(str + ret, size - ret,
+ "\n\t\tRing deq index: %d, trb: V=%p, P=0x%pad\n",
+ ring->dequeue, trb, &dma);
trb = &trbs[ring->enqueue];
dma = cdns2_trb_virt_to_dma(pep, trb);
- ret += snprintf(str + ret, size - ret,
- "\t\tRing enq index: %d, trb: V=%p, P=0x%pad\n",
- ring->enqueue, trb, &dma);
+ ret += scnprintf(str + ret, size - ret,
+ "\t\tRing enq index: %d, trb: V=%p, P=0x%pad\n",
+ ring->enqueue, trb, &dma);
- ret += snprintf(str + ret, size - ret,
- "\t\tfree trbs: %d, CCS=%d, PCS=%d\n",
- ring->free_trbs, ring->ccs, ring->pcs);
+ ret += scnprintf(str + ret, size - ret,
+ "\t\tfree trbs: %d, CCS=%d, PCS=%d\n",
+ ring->free_trbs, ring->ccs, ring->pcs);
if (TRBS_PER_SEGMENT > 40) {
- ret += snprintf(str + ret, size - ret,
- "\t\tTransfer ring %d too big\n", TRBS_PER_SEGMENT);
+ ret += scnprintf(str + ret, size - ret,
+ "\t\tTransfer ring %d too big\n", TRBS_PER_SEGMENT);
return str;
}
dma = ring->dma;
for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
trb = &trbs[i];
- ret += snprintf(str + ret, size - ret,
- "\t\t@%pad %08x %08x %08x\n", &dma,
- le32_to_cpu(trb->buffer),
- le32_to_cpu(trb->length),
- le32_to_cpu(trb->control));
+ ret += scnprintf(str + ret, size - ret,
+ "\t\t@%pad %08x %08x %08x\n", &dma,
+ le32_to_cpu(trb->buffer),
+ le32_to_cpu(trb->length),
+ le32_to_cpu(trb->control));
dma += sizeof(*trb);
}
- if (ret >= size)
- pr_info("CDNS2: buffer overflowed.\n");
+ if (ret == size - 1)
+ pr_info("CDNS2: buffer may be truncated.\n");
return str;
}
@@ -166,36 +166,36 @@ static inline const char *cdns2_decode_trb(char *str, size_t size, u32 flags,
switch (type) {
case TRB_LINK:
- ret = snprintf(str, size,
- "LINK %08x type '%s' flags %c:%c:%c%c:%c",
- buffer, cdns2_trb_type_string(type),
- flags & TRB_CYCLE ? 'C' : 'c',
- flags & TRB_TOGGLE ? 'T' : 't',
- flags & TRB_CHAIN ? 'C' : 'c',
- flags & TRB_CHAIN ? 'H' : 'h',
- flags & TRB_IOC ? 'I' : 'i');
+ ret = scnprintf(str, size,
+ "LINK %08x type '%s' flags %c:%c:%c%c:%c",
+ buffer, cdns2_trb_type_string(type),
+ flags & TRB_CYCLE ? 'C' : 'c',
+ flags & TRB_TOGGLE ? 'T' : 't',
+ flags & TRB_CHAIN ? 'C' : 'c',
+ flags & TRB_CHAIN ? 'H' : 'h',
+ flags & TRB_IOC ? 'I' : 'i');
break;
case TRB_NORMAL:
- ret = snprintf(str, size,
- "type: '%s', Buffer: %08x, length: %ld, burst len: %ld, "
- "flags %c:%c:%c%c:%c",
- cdns2_trb_type_string(type),
- buffer, TRB_LEN(length),
- TRB_FIELD_TO_BURST(length),
- flags & TRB_CYCLE ? 'C' : 'c',
- flags & TRB_ISP ? 'I' : 'i',
- flags & TRB_CHAIN ? 'C' : 'c',
- flags & TRB_CHAIN ? 'H' : 'h',
- flags & TRB_IOC ? 'I' : 'i');
+ ret = scnprintf(str, size,
+ "type: '%s', Buffer: %08x, length: %ld, burst len: %ld, "
+ "flags %c:%c:%c%c:%c",
+ cdns2_trb_type_string(type),
+ buffer, TRB_LEN(length),
+ TRB_FIELD_TO_BURST(length),
+ flags & TRB_CYCLE ? 'C' : 'c',
+ flags & TRB_ISP ? 'I' : 'i',
+ flags & TRB_CHAIN ? 'C' : 'c',
+ flags & TRB_CHAIN ? 'H' : 'h',
+ flags & TRB_IOC ? 'I' : 'i');
break;
default:
- ret = snprintf(str, size, "type '%s' -> raw %08x %08x %08x",
- cdns2_trb_type_string(type),
- buffer, length, flags);
+ ret = scnprintf(str, size, "type '%s' -> raw %08x %08x %08x",
+ cdns2_trb_type_string(type),
+ buffer, length, flags);
}
- if (ret >= size)
- pr_info("CDNS2: buffer overflowed.\n");
+ if (ret == size - 1)
+ pr_info("CDNS2: buffer may be truncated.\n");
return str;
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 2693a10eb..e8042c158 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1360,7 +1360,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
udc->ep0_dir = USB_DIR_IN;
/* Borrow the per device status_req */
req = udc->status_req;
- /* Fill in the reqest structure */
+ /* Fill in the request structure */
*((u16 *) req->req.buf) = cpu_to_le16(tmp);
req->ep = ep;
@@ -2532,15 +2532,18 @@ err_kfree:
/* Driver removal function
* Free resources and finish pending transactions
*/
-static int fsl_udc_remove(struct platform_device *pdev)
+static void fsl_udc_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
DECLARE_COMPLETION_ONSTACK(done);
- if (!udc_controller)
- return -ENODEV;
+ if (!udc_controller) {
+ dev_err(&pdev->dev,
+ "Driver still in use but removing anyhow\n");
+ return;
+ }
udc_controller->done = &done;
usb_del_gadget_udc(&udc_controller->gadget);
@@ -2568,8 +2571,6 @@ static int fsl_udc_remove(struct platform_device *pdev)
*/
if (pdata->exit)
pdata->exit(pdev);
-
- return 0;
}
/*-----------------------------------------------------------------
@@ -2667,7 +2668,7 @@ static const struct platform_device_id fsl_udc_devtype[] = {
MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
static struct platform_driver udc_driver = {
.probe = fsl_udc_probe,
- .remove = fsl_udc_remove,
+ .remove_new = fsl_udc_remove,
.id_table = fsl_udc_devtype,
/* these suspend and resume are not usb suspend and resume */
.suspend = fsl_udc_suspend,
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index c6dfa7ccc..fb901be5d 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2089,15 +2089,18 @@ static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
ep->tailbuf, ep->tailbuf_paddr);
}
-static int gr_remove(struct platform_device *pdev)
+static void gr_remove(struct platform_device *pdev)
{
struct gr_udc *dev = platform_get_drvdata(pdev);
int i;
if (dev->added)
usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
- if (dev->driver)
- return -EBUSY;
+ if (dev->driver) {
+ dev_err(&pdev->dev,
+ "Driver still in use but removing anyhow\n");
+ return;
+ }
gr_dfs_delete(dev);
dma_pool_destroy(dev->desc_pool);
@@ -2110,8 +2113,6 @@ static int gr_remove(struct platform_device *pdev)
gr_ep_remove(dev, i, 0);
for (i = 0; i < dev->nepi; i++)
gr_ep_remove(dev, i, 1);
-
- return 0;
}
static int gr_request_irq(struct gr_udc *dev, int irq)
{
@@ -2248,7 +2249,7 @@ static struct platform_driver gr_driver = {
.of_match_table = gr_match,
},
.probe = gr_probe,
- .remove = gr_remove,
+ .remove_new = gr_remove,
};
module_platform_driver(gr_driver);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index a917cc9a3..d5f29f8fe 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3174,13 +3174,16 @@ i2c_fail:
return retval;
}
-static int lpc32xx_udc_remove(struct platform_device *pdev)
+static void lpc32xx_udc_remove(struct platform_device *pdev)
{
struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
- if (udc->driver)
- return -EBUSY;
+ if (udc->driver) {
+ dev_err(&pdev->dev,
+ "Driver still in use but removing anyhow\n");
+ return;
+ }
udc_clk_set(udc, 1);
udc_disable(udc);
@@ -3194,8 +3197,6 @@ static int lpc32xx_udc_remove(struct platform_device *pdev)
udc->udca_v_base, udc->udca_p_base);
clk_disable_unprepare(udc->usb_slv_clk);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -3255,7 +3256,7 @@ MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
static struct platform_driver lpc32xx_udc_driver = {
.probe = lpc32xx_udc_probe,
- .remove = lpc32xx_udc_remove,
+ .remove_new = lpc32xx_udc_remove,
.shutdown = lpc32xx_udc_shutdown,
.suspend = lpc32xx_udc_suspend,
.resume = lpc32xx_udc_resume,
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 2d57786d3..89e8cf2a2 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1201,7 +1201,7 @@ static int max3420_probe(struct spi_device *spi)
int err, irq;
u8 reg[8];
- if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+ if (spi->master->flags & SPI_CONTROLLER_HALF_DUPLEX) {
dev_err(&spi->dev, "UDC needs full duplex to work\n");
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index d888dcda2..78308b649 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1451,7 +1451,7 @@ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
req = udc->status_req;
- /* fill in the reqest structure */
+ /* fill in the request structure */
if (empty == false) {
*((u16 *) req->req.buf) = cpu_to_le16(status);
req->req.length = 2;
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 4f8617210..169f72665 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -274,7 +274,6 @@ struct pch_udc_cfg_data {
* @td_data: for data request
* @dev: reference to device struct
* @offset_addr: offset address of ep register
- * @desc: for this ep
* @queue: queue for requests
* @num: endpoint number
* @in: endpoint is IN
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index df0551ecc..1ac26cb49 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2397,12 +2397,15 @@ static void pxa25x_udc_shutdown(struct platform_device *_dev)
pullup_off();
}
-static int pxa25x_udc_remove(struct platform_device *pdev)
+static void pxa25x_udc_remove(struct platform_device *pdev)
{
struct pxa25x_udc *dev = platform_get_drvdata(pdev);
- if (dev->driver)
- return -EBUSY;
+ if (dev->driver) {
+ dev_err(&pdev->dev,
+ "Driver still in use but removing anyhow\n");
+ return;
+ }
usb_del_gadget_udc(&dev->gadget);
dev->pullup = 0;
@@ -2414,7 +2417,6 @@ static int pxa25x_udc_remove(struct platform_device *pdev)
dev->transceiver = NULL;
the_controller = NULL;
- return 0;
}
/*-------------------------------------------------------------------------*/
@@ -2472,7 +2474,7 @@ static int pxa25x_udc_resume(struct platform_device *dev)
static struct platform_driver udc_driver = {
.shutdown = pxa25x_udc_shutdown,
.probe = pxa25x_udc_probe,
- .remove = pxa25x_udc_remove,
+ .remove_new = pxa25x_udc_remove,
.suspend = pxa25x_udc_suspend,
.resume = pxa25x_udc_resume,
.driver = {
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 8508d37a2..6cdc3d805 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -288,7 +288,7 @@ static void fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
#define PHYCTRL_LSFE (1 << 1) /* Line State Filter Enable */
#define PHYCTRL_PXE (1 << 0) /* PHY oscillator enable */
-int fsl_usb2_mpc5121_init(struct platform_device *pdev)
+static int fsl_usb2_mpc5121_init(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct clk *clk;
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index d152d72de..9fe4f48b1 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1158,12 +1158,12 @@ dump_eps(struct usb_hcd *hcd)
end = dp + sizeof(ubuf);
*dp = '\0';
list_for_each_entry(urb, &ep->urb_list, urb_list) {
- ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
- usb_pipetype(urb->pipe),
- usb_urb_dir_in(urb) ? "IN" : "OUT",
- urb->actual_length,
- urb->transfer_buffer_length);
- if (ret < 0 || ret >= end - dp)
+ ret = scnprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
+ usb_pipetype(urb->pipe),
+ usb_urb_dir_in(urb) ? "IN" : "OUT",
+ urb->actual_length,
+ urb->transfer_buffer_length);
+ if (ret == end - dp - 1)
break; /* error or buffer full */
dp += ret;
}
@@ -1255,9 +1255,9 @@ max3421_handle_irqs(struct usb_hcd *hcd)
end = sbuf + sizeof(sbuf);
*dp = '\0';
for (i = 0; i < 16; ++i) {
- int ret = snprintf(dp, end - dp, " %lu",
- max3421_hcd->err_stat[i]);
- if (ret < 0 || ret >= end - dp)
+ int ret = scnprintf(dp, end - dp, " %lu",
+ max3421_hcd->err_stat[i]);
+ if (ret == end - dp - 1)
break; /* error or buffer full */
dp += ret;
}
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 0956495bb..2b871540b 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -585,6 +585,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
finish_request(sl811, ep, urb, urbstat);
}
+#ifdef QUIRK2
static inline u8 checkdone(struct sl811 *sl811)
{
u8 ctl;
@@ -616,6 +617,7 @@ static inline u8 checkdone(struct sl811 *sl811)
#endif
return irqstat;
}
+#endif
static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index ac3fc5970..cfebb8336 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
static int uhci_grlib_init(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index b40d9238d..d82935d31 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -6,9 +6,24 @@
*
* Author: Lu Baolu <baolu.lu@linux.intel.com>
*/
+#include <linux/bug.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/nls.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include <asm/byteorder.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -28,7 +43,7 @@ static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
if (!ring)
return;
- if (ring->first_seg && ring->first_seg->trbs) {
+ if (ring->first_seg) {
dma_free_coherent(dev, TRB_SEGMENT_SIZE,
ring->first_seg->trbs,
ring->first_seg->dma);
@@ -374,13 +389,13 @@ static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
{
- memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
+ memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
}
static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
struct xhci_erst *erst, gfp_t flags)
{
- erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
+ erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
&erst->erst_dma_addr, flags);
if (!erst->entries)
return -ENOMEM;
@@ -394,9 +409,8 @@ static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
{
- if (erst->entries)
- dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
- erst->entries, erst->erst_dma_addr);
+ dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
+ erst->erst_dma_addr);
erst->entries = NULL;
}
@@ -495,7 +509,7 @@ static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
goto ctx_fail;
/* Allocate the string table: */
- dbc->string_size = sizeof(struct dbc_str_descs);
+ dbc->string_size = sizeof(*dbc->string);
dbc->string = dma_alloc_coherent(dev, dbc->string_size,
&dbc->string_dma, flags);
if (!dbc->string)
@@ -543,11 +557,8 @@ static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
xhci_dbc_eps_exit(dbc);
- if (dbc->string) {
- dma_free_coherent(dbc->dev, dbc->string_size,
- dbc->string, dbc->string_dma);
- dbc->string = NULL;
- }
+ dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma);
+ dbc->string = NULL;
dbc_free_ctx(dbc->dev, dbc->ctx);
dbc->ctx = NULL;
@@ -597,7 +608,7 @@ static int xhci_do_dbc_start(struct xhci_dbc *dbc)
static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
{
if (dbc->state == DS_DISABLED)
- return -1;
+ return -EINVAL;
writel(0, &dbc->regs->control);
dbc->state = DS_DISABLED;
@@ -650,11 +661,11 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
spin_lock_irqsave(&dbc->lock, flags);
ret = xhci_do_dbc_stop(dbc);
spin_unlock_irqrestore(&dbc->lock, flags);
+ if (ret)
+ return;
- if (!ret) {
- xhci_dbc_mem_cleanup(dbc);
- pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
- }
+ xhci_dbc_mem_cleanup(dbc);
+ pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
}
static void
@@ -914,41 +925,29 @@ static void xhci_dbc_handle_events(struct work_struct *work)
mod_delayed_work(system_wq, &dbc->event_work, 1);
}
+static const char * const dbc_state_strings[DS_MAX] = {
+ [DS_DISABLED] = "disabled",
+ [DS_INITIALIZED] = "initialized",
+ [DS_ENABLED] = "enabled",
+ [DS_CONNECTED] = "connected",
+ [DS_CONFIGURED] = "configured",
+ [DS_STALLED] = "stalled",
+};
+
static ssize_t dbc_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- const char *p;
struct xhci_dbc *dbc;
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
- switch (dbc->state) {
- case DS_DISABLED:
- p = "disabled";
- break;
- case DS_INITIALIZED:
- p = "initialized";
- break;
- case DS_ENABLED:
- p = "enabled";
- break;
- case DS_CONNECTED:
- p = "connected";
- break;
- case DS_CONFIGURED:
- p = "configured";
- break;
- case DS_STALLED:
- p = "stalled";
- break;
- default:
- p = "unknown";
- }
+ if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
+ return sysfs_emit(buf, "unknown\n");
- return sprintf(buf, "%s\n", p);
+ return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
}
static ssize_t dbc_store(struct device *dev,
@@ -961,9 +960,9 @@ static ssize_t dbc_store(struct device *dev,
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
- if (!strncmp(buf, "enable", 6))
+ if (sysfs_streq(buf, "enable"))
xhci_dbc_start(dbc);
- else if (!strncmp(buf, "disable", 7))
+ else if (sysfs_streq(buf, "disable"))
xhci_dbc_stop(dbc);
else
return -EINVAL;
@@ -981,7 +980,7 @@ static ssize_t dbc_idVendor_show(struct device *dev,
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
- return sprintf(buf, "%04x\n", dbc->idVendor);
+ return sysfs_emit(buf, "%04x\n", dbc->idVendor);
}
static ssize_t dbc_idVendor_store(struct device *dev,
@@ -993,9 +992,11 @@ static ssize_t dbc_idVendor_store(struct device *dev,
void __iomem *ptr;
u16 value;
u32 dev_info;
+ int ret;
- if (kstrtou16(buf, 0, &value))
- return -EINVAL;
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
@@ -1021,7 +1022,7 @@ static ssize_t dbc_idProduct_show(struct device *dev,
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
- return sprintf(buf, "%04x\n", dbc->idProduct);
+ return sysfs_emit(buf, "%04x\n", dbc->idProduct);
}
static ssize_t dbc_idProduct_store(struct device *dev,
@@ -1033,9 +1034,11 @@ static ssize_t dbc_idProduct_store(struct device *dev,
void __iomem *ptr;
u32 dev_info;
u16 value;
+ int ret;
- if (kstrtou16(buf, 0, &value))
- return -EINVAL;
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
@@ -1060,7 +1063,7 @@ static ssize_t dbc_bcdDevice_show(struct device *dev,
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
- return sprintf(buf, "%04x\n", dbc->bcdDevice);
+ return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
}
static ssize_t dbc_bcdDevice_store(struct device *dev,
@@ -1072,9 +1075,11 @@ static ssize_t dbc_bcdDevice_store(struct device *dev,
void __iomem *ptr;
u32 dev_info;
u16 value;
+ int ret;
- if (kstrtou16(buf, 0, &value))
- return -EINVAL;
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
@@ -1100,7 +1105,7 @@ static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
xhci = hcd_to_xhci(dev_get_drvdata(dev));
dbc = xhci->dbc;
- return sprintf(buf, "%02x\n", dbc->bInterfaceProtocol);
+ return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
}
static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
@@ -1114,9 +1119,13 @@ static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
u8 value;
int ret;
- /* bInterfaceProtocol is 8 bit, but xhci only supports values 0 and 1 */
+ /* bInterfaceProtocol is 8 bit, but... */
ret = kstrtou8(buf, 0, &value);
- if (ret || value > 1)
+ if (ret)
+ return ret;
+
+ /* ...xhci only supports values 0 and 1 */
+ if (value > 1)
return -EINVAL;
xhci = hcd_to_xhci(dev_get_drvdata(dev));
@@ -1139,7 +1148,7 @@ static DEVICE_ATTR_RW(dbc_idProduct);
static DEVICE_ATTR_RW(dbc_bcdDevice);
static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
-static struct attribute *dbc_dev_attributes[] = {
+static struct attribute *dbc_dev_attrs[] = {
&dev_attr_dbc.attr,
&dev_attr_dbc_idVendor.attr,
&dev_attr_dbc_idProduct.attr,
@@ -1147,10 +1156,7 @@ static struct attribute *dbc_dev_attributes[] = {
&dev_attr_dbc_bInterfaceProtocol.attr,
NULL
};
-
-static const struct attribute_group dbc_dev_attrib_grp = {
- .attrs = dbc_dev_attributes,
-};
+ATTRIBUTE_GROUPS(dbc_dev);
struct xhci_dbc *
xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
@@ -1176,7 +1182,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *
INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
spin_lock_init(&dbc->lock);
- ret = sysfs_create_group(&dev->kobj, &dbc_dev_attrib_grp);
+ ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
if (ret)
goto err;
@@ -1195,7 +1201,7 @@ void xhci_dbc_remove(struct xhci_dbc *dbc)
xhci_dbc_stop(dbc);
/* remove sysfs files */
- sysfs_remove_group(&dbc->dev->kobj, &dbc_dev_attrib_grp);
+ sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
kfree(dbc);
}
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index 51a7ab3ba..e39e3ae16 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -82,6 +82,7 @@ enum dbc_state {
DS_CONNECTED,
DS_CONFIGURED,
DS_STALLED,
+ DS_MAX
};
struct dbc_ep {
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 6d142cd61..f8ba15e7c 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -693,7 +693,7 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
"command-ring",
xhci->debugfs_root);
- xhci_debugfs_create_ring_dir(xhci, &xhci->interrupter->event_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->interrupters[0]->event_ring,
"event-ring",
xhci->debugfs_root);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 621165868..450adaca6 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -323,6 +323,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
+EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
@@ -1739,6 +1740,8 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
}
command->status = 0;
+ /* set default timeout to 5000 ms */
+ command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT;
INIT_LIST_HEAD(&command->cmd_list);
return command;
}
@@ -1853,6 +1856,31 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
kfree(ir);
}
+void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ unsigned int intr_num;
+
+ spin_lock_irq(&xhci->lock);
+
+ /* interrupter 0 is primary interrupter, don't touch it */
+ if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) {
+ xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n");
+ spin_unlock_irq(&xhci->lock);
+ return;
+ }
+
+ intr_num = ir->intr_num;
+
+ xhci_remove_interrupter(xhci, ir);
+ xhci->interrupters[intr_num] = NULL;
+
+ spin_unlock_irq(&xhci->lock);
+
+ xhci_free_interrupter(xhci, ir);
+}
+EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter);
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
@@ -1860,10 +1888,14 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
cancel_delayed_work_sync(&xhci->cmd_timer);
- xhci_remove_interrupter(xhci, xhci->interrupter);
- xhci_free_interrupter(xhci, xhci->interrupter);
- xhci->interrupter = NULL;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring");
+ for (i = 0; i < xhci->max_interrupters; i++) {
+ if (xhci->interrupters[i]) {
+ xhci_remove_interrupter(xhci, xhci->interrupters[i]);
+ xhci_free_interrupter(xhci, xhci->interrupters[i]);
+ xhci->interrupters[i] = NULL;
+ }
+ }
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters");
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
@@ -1933,6 +1965,7 @@ no_bw:
for (i = 0; i < xhci->num_port_caps; i++)
kfree(xhci->port_caps[i].psi);
kfree(xhci->port_caps);
+ kfree(xhci->interrupters);
xhci->num_port_caps = 0;
xhci->usb2_rhub.ports = NULL;
@@ -1941,6 +1974,7 @@ no_bw:
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
xhci->port_caps = NULL;
+ xhci->interrupters = NULL;
xhci->page_size = 0;
xhci->page_shift = 0;
@@ -2246,18 +2280,20 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
}
static struct xhci_interrupter *
-xhci_alloc_interrupter(struct xhci_hcd *xhci, gfp_t flags)
+xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_interrupter *ir;
- unsigned int num_segs;
+ unsigned int num_segs = segs;
int ret;
ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
if (!ir)
return NULL;
- num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
+ /* number of ring segments should be greater than 0 */
+ if (segs <= 0)
+ num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
ERST_MAX_SEGS);
ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0,
@@ -2286,12 +2322,19 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
u64 erst_base;
u32 erst_size;
- if (intr_num > xhci->max_interrupters) {
+ if (intr_num >= xhci->max_interrupters) {
xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
intr_num, xhci->max_interrupters);
return -EINVAL;
}
+ if (xhci->interrupters[intr_num]) {
+ xhci_warn(xhci, "Interrupter %d\n already set up", intr_num);
+ return -EINVAL;
+ }
+
+ xhci->interrupters[intr_num] = ir;
+ ir->intr_num = intr_num;
ir->ir_set = &xhci->run_regs->ir_set[intr_num];
/* set ERST count with the number of entries in the segment table */
@@ -2311,10 +2354,52 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
return 0;
}
+struct xhci_interrupter *
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_interrupter *ir;
+ unsigned int i;
+ int err = -ENOSPC;
+
+ if (!xhci->interrupters || xhci->max_interrupters <= 1)
+ return NULL;
+
+ ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL);
+ if (!ir)
+ return NULL;
+
+ spin_lock_irq(&xhci->lock);
+
+ /* Find available secondary interrupter, interrupter 0 is reserved for primary */
+ for (i = 1; i < xhci->max_interrupters; i++) {
+ if (xhci->interrupters[i] == NULL) {
+ err = xhci_add_interrupter(xhci, ir, i);
+ break;
+ }
+ }
+
+ spin_unlock_irq(&xhci->lock);
+
+ if (err) {
+ xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n",
+ xhci->max_interrupters);
+ xhci_free_interrupter(xhci, ir);
+ return NULL;
+ }
+
+ xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
+ i, xhci->max_interrupters);
+
+ return ir;
+}
+EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
- dma_addr_t dma;
+ struct xhci_interrupter *ir;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ dma_addr_t dma;
unsigned int val, val2;
u64 val_64;
u32 page_size, temp;
@@ -2438,14 +2523,17 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Allocate and set up primary interrupter 0 with an event ring. */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocating primary event ring");
- xhci->interrupter = xhci_alloc_interrupter(xhci, flags);
- if (!xhci->interrupter)
+ xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters),
+ flags, dev_to_node(dev));
+
+ ir = xhci_alloc_interrupter(xhci, 0, flags);
+ if (!ir)
goto fail;
- if (xhci_add_interrupter(xhci, xhci->interrupter, 0))
+ if (xhci_add_interrupter(xhci, ir, 0))
goto fail;
- xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
+ ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d6fc08e5d..b534ca975 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -95,10 +95,9 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
if (hcd->msix_enabled) {
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- int i;
- for (i = 0; i < xhci->msix_count; i++)
- synchronize_irq(pci_irq_vector(pdev, i));
+ /* for now, the driver only supports one primary interrupter */
+ synchronize_irq(pci_irq_vector(pdev, 0));
}
}
@@ -112,100 +111,18 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
if (hcd->irq > 0)
return;
- if (hcd->msix_enabled) {
- int i;
-
- for (i = 0; i < xhci->msix_count; i++)
- free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
- } else {
- free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
- }
-
+ free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
pci_free_irq_vectors(pdev);
hcd->msix_enabled = 0;
}
-/*
- * Set up MSI
- */
-static int xhci_setup_msi(struct xhci_hcd *xhci)
-{
- int ret;
- /*
- * TODO:Check with MSI Soc for sysdev
- */
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
-
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
- if (ret < 0) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "failed to allocate MSI entry");
- return ret;
- }
-
- ret = request_irq(pdev->irq, xhci_msi_irq,
- 0, "xhci_hcd", xhci_to_hcd(xhci));
- if (ret) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "disable MSI interrupt");
- pci_free_irq_vectors(pdev);
- }
-
- return ret;
-}
-
-/*
- * Set up MSI-X
- */
-static int xhci_setup_msix(struct xhci_hcd *xhci)
-{
- int i, ret;
- struct usb_hcd *hcd = xhci_to_hcd(xhci);
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
-
- /*
- * calculate number of msi-x vectors supported.
- * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
- * with max number of interrupters based on the xhci HCSPARAMS1.
- * - num_online_cpus: maximum msi-x vectors per CPUs core.
- * Add additional 1 vector to ensure always available interrupt.
- */
- xhci->msix_count = min(num_online_cpus() + 1,
- HCS_MAX_INTRS(xhci->hcs_params1));
-
- ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
- PCI_IRQ_MSIX);
- if (ret < 0) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Failed to enable MSI-X");
- return ret;
- }
-
- for (i = 0; i < xhci->msix_count; i++) {
- ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
- "xhci_hcd", xhci_to_hcd(xhci));
- if (ret)
- goto disable_msix;
- }
-
- hcd->msix_enabled = 1;
- return ret;
-
-disable_msix:
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
- while (--i >= 0)
- free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
- pci_free_irq_vectors(pdev);
- return ret;
-}
-
+/* Try enabling MSI-X with MSI and legacy IRQ as fallback */
static int xhci_try_enable_msi(struct usb_hcd *hcd)
{
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev;
int ret;
- pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
/*
* Some Fresco Logic host controllers advertise MSI, but fail to
* generate interrupts. Don't even try to enable MSI.
@@ -218,32 +135,53 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
free_irq(hcd->irq, hcd);
hcd->irq = 0;
- ret = xhci_setup_msix(xhci);
- if (ret)
- /* fall back to msi*/
- ret = xhci_setup_msi(xhci);
+ /*
+ * calculate number of MSI-X vectors supported.
+ * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
+ * with max number of interrupters based on the xhci HCSPARAMS1.
+ * - num_online_cpus: maximum MSI-X vectors per CPUs core.
+ * Add additional 1 vector to ensure always available interrupt.
+ */
+ xhci->nvecs = min(num_online_cpus() + 1,
+ HCS_MAX_INTRS(xhci->hcs_params1));
- if (!ret) {
- hcd->msi_enabled = 1;
- return 0;
+ /* TODO: Check with MSI Soc for sysdev */
+ xhci->nvecs = pci_alloc_irq_vectors(pdev, 1, xhci->nvecs,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (xhci->nvecs < 0) {
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "failed to allocate IRQ vectors");
+ goto legacy_irq;
}
+ ret = request_irq(pci_irq_vector(pdev, 0), xhci_msi_irq, 0, "xhci_hcd",
+ xhci_to_hcd(xhci));
+ if (ret)
+ goto free_irq_vectors;
+
+ hcd->msi_enabled = 1;
+ hcd->msix_enabled = pdev->msix_enabled;
+ return 0;
+
+free_irq_vectors:
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable %s interrupt",
+ pdev->msix_enabled ? "MSI-X" : "MSI");
+ pci_free_irq_vectors(pdev);
+
+legacy_irq:
if (!pdev->irq) {
xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
return -EINVAL;
}
- legacy_irq:
if (!strlen(hcd->irq_descr))
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
- /* fall back to legacy interrupt*/
- ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
- hcd->irq_descr, hcd);
+ /* fall back to legacy interrupt */
+ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, hcd->irq_descr, hcd);
if (ret) {
- xhci_err(xhci, "request interrupt %d failed\n",
- pdev->irq);
+ xhci_err(xhci, "request interrupt %d failed\n", pdev->irq);
return ret;
}
hcd->irq = pdev->irq;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index d68e9abcd..3d071b875 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -131,6 +131,9 @@ static const struct of_device_id usb_xhci_of_match[] = {
.compatible = "brcm,xhci-brcm-v2",
.data = &xhci_plat_brcm,
}, {
+ .compatible = "brcm,bcm2711-xhci",
+ .data = &xhci_plat_brcm,
+ }, {
.compatible = "brcm,bcm7445-xhci",
.data = &xhci_plat_brcm,
},
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 2647245d5..b2868217d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -372,9 +372,10 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
readl(&xhci->dba->doorbell[0]);
}
-static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
{
- return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
+ return mod_delayed_work(system_wq, &xhci->cmd_timer,
+ msecs_to_jiffies(xhci->current_cmd->timeout_ms));
}
static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
@@ -418,7 +419,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
!(xhci->xhc_state & XHCI_STATE_DYING)) {
xhci->current_cmd = cur_cmd;
- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci);
xhci_ring_cmd_db(xhci);
}
}
@@ -1793,7 +1794,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
if (!list_is_singular(&xhci->cmd_list)) {
xhci->current_cmd = list_first_entry(&cmd->cmd_list,
struct xhci_command, cmd_list);
- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci);
} else if (xhci->current_cmd == cmd) {
xhci->current_cmd = NULL;
}
@@ -3021,9 +3022,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
return 0;
}
- /* Update SW event ring dequeue pointer */
- inc_deq(xhci, ir->event_ring);
-
/* Are there more items on the event ring? Caller will call us again to
* check.
*/
@@ -3037,30 +3035,26 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
- union xhci_trb *event_ring_deq,
bool clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != ir->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
- ir->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
- /*
- * Per 4.9.4, Software writes to the ERDP register shall
- * always advance the Event Ring Dequeue Pointer value.
- */
- if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK))
- return;
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall always advance
+ * the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb)
+ return;
- /* Update HC event ring dequeue pointer */
- temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
- temp_64 |= deq & ERST_PTR_MASK;
- }
+ /* Update HC event ring dequeue pointer */
+ temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
+ temp_64 |= deq & ERST_PTR_MASK;
/* Clear the event handler busy flag (RW1C) */
if (clear_ehb)
@@ -3068,6 +3062,59 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
+/* Clear the interrupt pending bit for a specific interrupter. */
+static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci,
+ struct xhci_interrupter *ir)
+{
+ if (!ir->ip_autoclear) {
+ u32 irq_pending;
+
+ irq_pending = readl(&ir->ir_set->irq_pending);
+ irq_pending |= IMAN_IP;
+ writel(irq_pending, &ir->ir_set->irq_pending);
+ }
+}
+
+static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+{
+ int event_loop = 0;
+ u64 temp;
+
+ xhci_clear_interrupt_pending(xhci, ir);
+
+ if (xhci->xhc_state & XHCI_STATE_DYING ||
+ xhci->xhc_state & XHCI_STATE_HALTED) {
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue);
+ return -ENODEV;
+ }
+
+ while (xhci_handle_event(xhci, ir) > 0) {
+ /*
+ * If half a segment of events have been handled in one go then
+ * update ERDP, and force isoc trbs to interrupt more often
+ */
+ if (event_loop++ > TRBS_PER_SEGMENT / 2) {
+ xhci_update_erst_dequeue(xhci, ir, false);
+
+ if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
+ ir->isoc_bei_interval = ir->isoc_bei_interval / 2;
+
+ event_loop = 0;
+ }
+
+ /* Update SW event ring dequeue pointer */
+ inc_deq(xhci, ir->event_ring);
+ }
+
+ xhci_update_erst_dequeue(xhci, ir, true);
+
+ return 0;
+}
+
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
@@ -3076,24 +3123,21 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- union xhci_trb *event_ring_deq;
- struct xhci_interrupter *ir;
- irqreturn_t ret = IRQ_NONE;
- u64 temp_64;
+ irqreturn_t ret = IRQ_HANDLED;
u32 status;
- int event_loop = 0;
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
status = readl(&xhci->op_regs->status);
if (status == ~(u32)0) {
xhci_hc_died(xhci);
- ret = IRQ_HANDLED;
goto out;
}
- if (!(status & STS_EINT))
+ if (!(status & STS_EINT)) {
+ ret = IRQ_NONE;
goto out;
+ }
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
@@ -3103,7 +3147,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
- ret = IRQ_HANDLED;
goto out;
}
@@ -3116,48 +3159,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
writel(status, &xhci->op_regs->status);
/* This is the handler of the primary interrupter */
- ir = xhci->interrupter;
- if (!hcd->msi_enabled) {
- u32 irq_pending;
- irq_pending = readl(&ir->ir_set->irq_pending);
- irq_pending |= IMAN_IP;
- writel(irq_pending, &ir->ir_set->irq_pending);
- }
-
- if (xhci->xhc_state & XHCI_STATE_DYING ||
- xhci->xhc_state & XHCI_STATE_HALTED) {
- xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
- "Shouldn't IRQs be disabled?\n");
- /* Clear the event handler busy flag (RW1C);
- * the event ring should be empty.
- */
- temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- xhci_write_64(xhci, temp_64 | ERST_EHB,
- &ir->ir_set->erst_dequeue);
- ret = IRQ_HANDLED;
- goto out;
- }
-
- event_ring_deq = ir->event_ring->dequeue;
- /* FIXME this should be a delayed service routine
- * that clears the EHB.
- */
- while (xhci_handle_event(xhci, ir) > 0) {
- if (event_loop++ < TRBS_PER_SEGMENT / 2)
- continue;
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false);
- event_ring_deq = ir->event_ring->dequeue;
-
- /* ring is half-full, force isoc trbs to interrupt more often */
- if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
- xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
-
- event_loop = 0;
- }
-
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true);
- ret = IRQ_HANDLED;
-
+ xhci_handle_events(xhci, xhci->interrupters[0]);
out:
spin_unlock(&xhci->lock);
@@ -4018,7 +4020,8 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
}
/* Check if we should generate event interrupt for a TD in an isoc URB */
-static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
+static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i,
+ struct xhci_interrupter *ir)
{
if (xhci->hci_version < 0x100)
return false;
@@ -4029,8 +4032,8 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
* If AVOID_BEI is set the host handles full event rings poorly,
* generate an event at least every 8th TD to clear the event ring
*/
- if (i && xhci->quirks & XHCI_AVOID_BEI)
- return !!(i % xhci->isoc_bei_interval);
+ if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI)
+ return !!(i % ir->isoc_bei_interval);
return true;
}
@@ -4039,6 +4042,7 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
+ struct xhci_interrupter *ir;
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
@@ -4056,6 +4060,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ir = xhci->interrupters[0];
num_tds = urb->number_of_packets;
if (num_tds < 1) {
@@ -4143,7 +4148,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue;
td->last_trb_seg = ep_ring->enq_seg;
field |= TRB_IOC;
- if (trb_block_event_intr(xhci, num_tds, i))
+ if (trb_block_event_intr(xhci, num_tds, i, ir))
field |= TRB_BEI;
}
/* Calculate TRB length */
@@ -4343,7 +4348,7 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
/* if there are no other commands queued we start the timeout timer */
if (list_empty(&xhci->cmd_list)) {
xhci->current_cmd = cmd;
- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci);
}
list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 943b87d15..afccd58c9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -480,7 +480,7 @@ static int xhci_init(struct usb_hcd *hcd)
static int xhci_run_finished(struct xhci_hcd *xhci)
{
- struct xhci_interrupter *ir = xhci->interrupter;
+ struct xhci_interrupter *ir = xhci->interrupters[0];
unsigned long flags;
u32 temp;
@@ -532,12 +532,15 @@ int xhci_run(struct usb_hcd *hcd)
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct xhci_interrupter *ir = xhci->interrupter;
+ struct xhci_interrupter *ir = xhci->interrupters[0];
/* Start the xHCI host controller running only after the USB 2.0 roothub
* is setup.
*/
hcd->uses_new_polling = 1;
+ if (hcd->msi_enabled)
+ ir->ip_autoclear = true;
+
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
@@ -596,7 +599,7 @@ void xhci_stop(struct usb_hcd *hcd)
{
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct xhci_interrupter *ir = xhci->interrupter;
+ struct xhci_interrupter *ir = xhci->interrupters[0];
mutex_lock(&xhci->mutex);
@@ -692,36 +695,51 @@ EXPORT_SYMBOL_GPL(xhci_shutdown);
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
{
- struct xhci_interrupter *ir = xhci->interrupter;
+ struct xhci_interrupter *ir;
+ unsigned int i;
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
- if (!ir)
- return;
+ /* save both primary and all secondary interrupters */
+ /* fixme, shold we lock to prevent race with remove secondary interrupter? */
+ for (i = 0; i < xhci->max_interrupters; i++) {
+ ir = xhci->interrupters[i];
+ if (!ir)
+ continue;
- ir->s3_erst_size = readl(&ir->ir_set->erst_size);
- ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
- ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
- ir->s3_irq_control = readl(&ir->ir_set->irq_control);
+ ir->s3_erst_size = readl(&ir->ir_set->erst_size);
+ ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+ ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
+ ir->s3_irq_control = readl(&ir->ir_set->irq_control);
+ }
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
{
- struct xhci_interrupter *ir = xhci->interrupter;
+ struct xhci_interrupter *ir;
+ unsigned int i;
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
- writel(ir->s3_erst_size, &ir->ir_set->erst_size);
- xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
- xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
- writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
- writel(ir->s3_irq_control, &ir->ir_set->irq_control);
+
+ /* FIXME should we lock to protect against freeing of interrupters */
+ for (i = 0; i < xhci->max_interrupters; i++) {
+ ir = xhci->interrupters[i];
+ if (!ir)
+ continue;
+
+ writel(ir->s3_erst_size, &ir->ir_set->erst_size);
+ xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
+ xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
+ writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
+ writel(ir->s3_irq_control, &ir->ir_set->irq_control);
+ }
}
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
@@ -1084,7 +1102,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- xhci_disable_interrupter(xhci->interrupter);
+ xhci_disable_interrupter(xhci->interrupters[0]);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -1440,10 +1458,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
* descriptor. If the usb_device's max packet size changes after that point,
* we need to issue an evaluate context command and wait on it.
*/
-static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
+static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev)
{
- struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
struct xhci_command *command;
@@ -1451,11 +1467,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
int hw_max_packet_size;
int ret = 0;
- out_ctx = xhci->devs[slot_id]->out_ctx;
- ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0);
hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
- max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
- if (hw_max_packet_size != max_packet_size) {
+ max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc);
+
+ if (hw_max_packet_size == max_packet_size)
+ return 0;
+
+ switch (max_packet_size) {
+ case 8: case 16: case 32: case 64: case 9:
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Max Packet Size for ep 0 changed.");
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
@@ -1467,28 +1487,22 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Issuing evaluate context command.");
- /* Set up the input context flags for the command */
- /* FIXME: This won't work if a non-default control endpoint
- * changes max packet sizes.
- */
-
- command = xhci_alloc_command(xhci, true, mem_flags);
+ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
- command->in_ctx = xhci->devs[slot_id]->in_ctx;
+ command->in_ctx = vdev->in_ctx;
ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
__func__);
ret = -ENOMEM;
- goto command_cleanup;
+ break;
}
/* Set up the modified control endpoint 0 */
- xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
- xhci->devs[slot_id]->out_ctx, ep_index);
+ xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0);
- ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0);
ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
@@ -1496,17 +1510,20 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
ctrl_ctx->drop_flags = 0;
- ret = xhci_configure_endpoint(xhci, urb->dev, command,
- true, false);
-
- /* Clean up the input context for later use by bandwidth
- * functions.
- */
+ ret = xhci_configure_endpoint(xhci, vdev->udev, command,
+ true, false);
+ /* Clean up the input context for later use by bandwidth functions */
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
-command_cleanup:
- kfree(command->completion);
- kfree(command);
+ break;
+ default:
+ dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n",
+ max_packet_size);
+ return -EINVAL;
}
+
+ kfree(command->completion);
+ kfree(command);
+
return ret;
}
@@ -1524,24 +1541,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
struct urb_priv *urb_priv;
int num_tds;
- if (!urb)
- return -EINVAL;
- ret = xhci_check_args(hcd, urb->dev, urb->ep,
- true, true, __func__);
- if (ret <= 0)
- return ret ? ret : -EINVAL;
-
- slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
- ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
-
- if (!HCD_HW_ACCESSIBLE(hcd))
- return -ESHUTDOWN;
-
- if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
- xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
- return -ENODEV;
- }
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
num_tds = urb->number_of_packets;
@@ -1563,22 +1563,27 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
trace_xhci_urb_enqueue(urb);
- if (usb_endpoint_xfer_control(&urb->ep->desc)) {
- /* Check to see if the max packet size for the default control
- * endpoint changed during FS device enumeration
- */
- if (urb->dev->speed == USB_SPEED_FULL) {
- ret = xhci_check_maxpacket(xhci, slot_id,
- ep_index, urb, mem_flags);
- if (ret < 0) {
- xhci_urb_free_priv(urb_priv);
- urb->hcpriv = NULL;
- return ret;
- }
- }
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ ret = xhci_check_args(hcd, urb->dev, urb->ep,
+ true, true, __func__);
+ if (ret <= 0) {
+ ret = ret ? ret : -EINVAL;
+ goto free_priv;
}
- spin_lock_irqsave(&xhci->lock, flags);
+ slot_id = urb->dev->slot_id;
+
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ ret = -ESHUTDOWN;
+ goto free_priv;
+ }
+
+ if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
+ xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
+ ret = -ENODEV;
+ goto free_priv;
+ }
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
@@ -1586,6 +1591,9 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
ret = -ESHUTDOWN;
goto free_priv;
}
+
+ ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
+
if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
*ep_state);
@@ -3089,6 +3097,9 @@ done:
* of an endpoint that isn't in the halted state this function will issue a
* configure endpoint command with the Drop and Add bits set for the target
* endpoint. Refer to the additional note in xhci spcification section 4.6.8.
+ *
+ * vdev may be lost due to xHC restore error and re-initialization during S3/S4
+ * resume. A new vdev will be allocated later by xhci_discover_or_reset_device()
*/
static void xhci_endpoint_reset(struct usb_hcd *hcd,
@@ -3106,19 +3117,37 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
int err;
xhci = hcd_to_xhci(hcd);
+ ep_index = xhci_get_endpoint_index(&host_ep->desc);
+
+ /*
+ * Usb core assumes a max packet value for ep0 on FS devices until the
+ * real value is read from the descriptor. Core resets Ep0 if values
+ * mismatch. Reconfigure the xhci ep0 endpoint context here in that case
+ */
+ if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) {
+
+ udev = container_of(host_ep, struct usb_device, ep0);
+ if (udev->speed != USB_SPEED_FULL || !udev->slot_id)
+ return;
+
+ vdev = xhci->devs[udev->slot_id];
+ if (!vdev || vdev->udev != udev)
+ return;
+
+ xhci_check_ep0_maxpacket(xhci, vdev);
+
+ /* Nothing else should be done here for ep0 during ep reset */
+ return;
+ }
+
if (!host_ep->hcpriv)
return;
udev = (struct usb_device *) host_ep->hcpriv;
vdev = xhci->devs[udev->slot_id];
- /*
- * vdev may be lost due to xHC restore error and re-initialization
- * during S3/S4 resume. A new vdev will be allocated later by
- * xhci_discover_or_reset_device()
- */
if (!udev->slot_id || !vdev)
return;
- ep_index = xhci_get_endpoint_index(&host_ep->desc);
+
ep = &vdev->eps[ep_index];
/* Bail out if toggle is already being cleared by a endpoint reset */
@@ -4031,12 +4060,18 @@ disable_slot:
return 0;
}
-/*
- * Issue an Address Device command and optionally send a corresponding
- * SetAddress request to the device.
+/**
+ * xhci_setup_device - issues an Address Device command to assign a unique
+ * USB bus address.
+ * @hcd: USB host controller data structure.
+ * @udev: USB dev structure representing the connected device.
+ * @setup: Enum specifying setup mode: address only or with context.
+ * @timeout_ms: Max wait time (ms) for the command operation to complete.
+ *
+ * Return: 0 if successful; otherwise, negative error code.
*/
static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
- enum xhci_setup_dev setup)
+ enum xhci_setup_dev setup, unsigned int timeout_ms)
{
const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
unsigned long flags;
@@ -4093,6 +4128,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
}
command->in_ctx = virt_dev->in_ctx;
+ command->timeout_ms = timeout_ms;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
@@ -4219,14 +4255,16 @@ out:
return ret;
}
-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
+ unsigned int timeout_ms)
{
- return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
+ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
}
static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
{
- return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
+ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
+ XHCI_CMD_DEFAULT_TIMEOUT);
}
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6f53a950d..bf05103aa 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -791,6 +791,8 @@ struct xhci_command {
struct completion *completion;
union xhci_trb *command_trb;
struct list_head cmd_list;
+ /* xHCI command response timeout in milliseconds */
+ unsigned int timeout_ms;
};
/* drop context bitmasks */
@@ -1551,8 +1553,11 @@ struct xhci_td {
unsigned int num_trbs;
};
-/* xHCI command default timeout value */
-#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
+/*
+ * xHCI command default timeout value in milliseconds.
+ * USB 3.2 spec, section 9.2.6.1
+ */
+#define XHCI_CMD_DEFAULT_TIMEOUT 5000
/* command descriptor */
struct xhci_cd {
@@ -1683,6 +1688,8 @@ struct xhci_interrupter {
struct xhci_erst erst;
struct xhci_intr_reg __iomem *ir_set;
unsigned int intr_num;
+ bool ip_autoclear;
+ u32 isoc_bei_interval;
/* For interrupter registers save and restore over suspend/resume */
u32 s3_irq_pending;
u32 s3_irq_control;
@@ -1755,14 +1762,13 @@ struct xhci_hcd {
u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- u32 isoc_bei_interval;
int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
/* Valid values are 12 to 20, inclusive */
int page_shift;
- /* msi-x vectors */
- int msix_count;
+ /* MSI-X/MSI vectors */
+ int nvecs;
/* optional clocks */
struct clk *clk;
struct clk *reg_clk;
@@ -1770,7 +1776,7 @@ struct xhci_hcd {
struct reset_control *reset;
/* data structures */
struct xhci_device_context_array *dcbaa;
- struct xhci_interrupter *interrupter;
+ struct xhci_interrupter **interrupters;
struct xhci_ring *cmd_ring;
unsigned int cmd_ring_state;
#define CMD_RING_STATE_RUNNING (1 << 0)
@@ -2081,6 +2087,10 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags);
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
+struct xhci_interrupter *
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg);
+void xhci_remove_secondary_interrupter(struct usb_hcd
+ *hcd, struct xhci_interrupter *ir);
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 1e3df27ba..6d28467ce 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -501,7 +501,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
dev->minor, cmd, arg);
retval = 0;
- io_res = 0;
switch (cmd) {
case IOW_WRITE:
if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 2b45404e9..e162838be 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -5,8 +5,10 @@
* Copyright (c) 2022, Google LLC
*/
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/export.h>
+#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -60,24 +62,35 @@ struct onboard_hub {
bool going_away;
struct list_head udev_list;
struct mutex lock;
+ struct clk *clk;
};
static int onboard_hub_power_on(struct onboard_hub *hub)
{
int err;
- err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
+ err = clk_prepare_enable(hub->clk);
if (err) {
- dev_err(hub->dev, "failed to enable supplies: %d\n", err);
+ dev_err(hub->dev, "failed to enable clock: %pe\n", ERR_PTR(err));
return err;
}
+ err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
+ if (err) {
+ dev_err(hub->dev, "failed to enable supplies: %pe\n", ERR_PTR(err));
+ goto disable_clk;
+ }
+
fsleep(hub->pdata->reset_us);
gpiod_set_value_cansleep(hub->reset_gpio, 0);
hub->is_powered_on = true;
return 0;
+
+disable_clk:
+ clk_disable_unprepare(hub->clk);
+ return err;
}
static int onboard_hub_power_off(struct onboard_hub *hub)
@@ -88,10 +101,12 @@ static int onboard_hub_power_off(struct onboard_hub *hub)
err = regulator_bulk_disable(hub->pdata->num_supplies, hub->supplies);
if (err) {
- dev_err(hub->dev, "failed to disable supplies: %d\n", err);
+ dev_err(hub->dev, "failed to disable supplies: %pe\n", ERR_PTR(err));
return err;
}
+ clk_disable_unprepare(hub->clk);
+
hub->is_powered_on = false;
return 0;
@@ -233,9 +248,9 @@ static void onboard_hub_attach_usb_driver(struct work_struct *work)
{
int err;
- err = driver_attach(&onboard_hub_usbdev_driver.drvwrap.driver);
+ err = driver_attach(&onboard_hub_usbdev_driver.driver);
if (err)
- pr_err("Failed to attach USB driver: %d\n", err);
+ pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err));
}
static int onboard_hub_probe(struct platform_device *pdev)
@@ -262,10 +277,14 @@ static int onboard_hub_probe(struct platform_device *pdev)
err = devm_regulator_bulk_get(dev, hub->pdata->num_supplies, hub->supplies);
if (err) {
- dev_err(dev, "Failed to get regulator supplies: %d\n", err);
+ dev_err(dev, "Failed to get regulator supplies: %pe\n", ERR_PTR(err));
return err;
}
+ hub->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(hub->clk))
+ return dev_err_probe(dev, PTR_ERR(hub->clk), "failed to get clock\n");
+
hub->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(hub->reset_gpio))
@@ -426,6 +445,7 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 */
{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index 292110e64..f360d5cf8 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -36,6 +36,11 @@ static const struct onboard_hub_pdata cypress_hx3_data = {
.num_supplies = 2,
};
+static const struct onboard_hub_pdata cypress_hx2vl_data = {
+ .reset_us = 1,
+ .num_supplies = 1,
+};
+
static const struct onboard_hub_pdata genesys_gl850g_data = {
.reset_us = 3,
.num_supplies = 1,
@@ -61,6 +66,7 @@ static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
{ .compatible = "usb4b4,6506", .data = &cypress_hx3_data, },
+ { .compatible = "usb4b4,6570", .data = &cypress_hx2vl_data, },
{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
{ .compatible = "usb5e3,610", .data = &genesys_gl852g_data, },
{ .compatible = "usb5e3,620", .data = &genesys_gl852g_data, },
diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c
index 7f371ea12..26e9b8749 100644
--- a/drivers/usb/misc/qcom_eud.c
+++ b/drivers/usb/misc/qcom_eud.c
@@ -205,6 +205,9 @@ static int eud_probe(struct platform_device *pdev)
return PTR_ERR(chip->mode_mgr);
chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0)
+ return chip->irq;
+
ret = devm_request_threaded_irq(&pdev->dev, chip->irq, handle_eud_irq,
handle_eud_irq_thread, IRQF_ONESHOT, NULL, chip);
if (ret)
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index c640f98d2..9a0649d23 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -34,6 +34,8 @@
#define YUREX_BUF_SIZE 8
#define YUREX_WRITE_TIMEOUT (HZ*2)
+#define MAX_S64_STRLEN 20 /* {-}922337203685477580{7,8} */
+
/* table of devices that work with this driver */
static struct usb_device_id yurex_table[] = {
{ USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) },
@@ -401,7 +403,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
{
struct usb_yurex *dev;
int len = 0;
- char in_buffer[20];
+ char in_buffer[MAX_S64_STRLEN];
unsigned long flags;
dev = file->private_data;
@@ -412,14 +414,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
return -ENODEV;
}
+ if (WARN_ON_ONCE(dev->bbu > S64_MAX || dev->bbu < S64_MIN)) {
+ mutex_unlock(&dev->io_mutex);
+ return -EIO;
+ }
+
spin_lock_irqsave(&dev->lock, flags);
- len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+ scnprintf(in_buffer, MAX_S64_STRLEN, "%lld\n", dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->io_mutex);
- if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
- return -EIO;
-
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
}
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c
index 98ab0cc47..3c23805ab 100644
--- a/drivers/usb/mon/mon_stat.c
+++ b/drivers/usb/mon/mon_stat.c
@@ -35,9 +35,9 @@ static int mon_stat_open(struct inode *inode, struct file *file)
mbus = inode->i_private;
- sp->slen = snprintf(sp->str, STAT_BUF_SIZE,
- "nreaders %d events %u text_lost %u\n",
- mbus->nreaders, mbus->cnt_events, mbus->cnt_text_lost);
+ sp->slen = scnprintf(sp->str, STAT_BUF_SIZE,
+ "nreaders %d events %u text_lost %u\n",
+ mbus->nreaders, mbus->cnt_events, mbus->cnt_text_lost);
file->private_data = sp;
return 0;
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 39cb14164..2fe9b95ba 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -352,7 +352,7 @@ static int mon_text_open(struct inode *inode, struct file *file)
rp->r.rnf_error = mon_text_error;
rp->r.rnf_complete = mon_text_complete;
- snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
+ scnprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
rp->e_slab = kmem_cache_create(rp->slab_name,
sizeof(struct mon_event_text), sizeof(long), 0,
mon_text_ctor);
@@ -700,46 +700,28 @@ static const struct file_operations mon_fops_text_u = {
int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
{
- enum { NAMESZ = 10 };
+ enum { NAMESZ = 12 };
char name[NAMESZ];
int busnum = ubus? ubus->busnum: 0;
- int rc;
if (mon_dir == NULL)
return 0;
if (ubus != NULL) {
- rc = snprintf(name, NAMESZ, "%dt", busnum);
- if (rc <= 0 || rc >= NAMESZ)
- goto err_print_t;
+ scnprintf(name, NAMESZ, "%dt", busnum);
mbus->dent_t = debugfs_create_file(name, 0600, mon_dir, mbus,
&mon_fops_text_t);
}
- rc = snprintf(name, NAMESZ, "%du", busnum);
- if (rc <= 0 || rc >= NAMESZ)
- goto err_print_u;
+ scnprintf(name, NAMESZ, "%du", busnum);
mbus->dent_u = debugfs_create_file(name, 0600, mon_dir, mbus,
&mon_fops_text_u);
- rc = snprintf(name, NAMESZ, "%ds", busnum);
- if (rc <= 0 || rc >= NAMESZ)
- goto err_print_s;
+ scnprintf(name, NAMESZ, "%ds", busnum);
mbus->dent_s = debugfs_create_file(name, 0600, mon_dir, mbus,
&mon_fops_stat);
return 1;
-
-err_print_s:
- debugfs_remove(mbus->dent_u);
- mbus->dent_u = NULL;
-err_print_u:
- if (ubus != NULL) {
- debugfs_remove(mbus->dent_t);
- mbus->dent_t = NULL;
- }
-err_print_t:
- return 0;
}
void mon_text_del(struct mon_bus *mbus)
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 770081b82..9ab50f26d 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -46,15 +46,21 @@ EXPORT_SYMBOL_GPL(usb_phy_generic_unregister);
static int nop_set_suspend(struct usb_phy *x, int suspend)
{
struct usb_phy_generic *nop = dev_get_drvdata(x->dev);
+ int ret = 0;
- if (!IS_ERR(nop->clk)) {
- if (suspend)
+ if (suspend) {
+ if (!IS_ERR(nop->clk))
clk_disable_unprepare(nop->clk);
- else
+ if (!IS_ERR(nop->vcc) && !device_may_wakeup(x->dev))
+ ret = regulator_disable(nop->vcc);
+ } else {
+ if (!IS_ERR(nop->vcc) && !device_may_wakeup(x->dev))
+ ret = regulator_enable(nop->vcc);
+ if (!IS_ERR(nop->clk))
clk_prepare_enable(nop->clk);
}
- return 0;
+ return ret;
}
static void nop_reset(struct usb_phy_generic *nop)
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index c3ce6b105..da09cff55 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -179,16 +179,16 @@ static ssize_t vbus_show(struct device *dev,
switch (twl->linkstat) {
case MUSB_VBUS_VALID:
- ret = snprintf(buf, PAGE_SIZE, "vbus\n");
+ ret = sysfs_emit(buf, "vbus\n");
break;
case MUSB_ID_GROUND:
- ret = snprintf(buf, PAGE_SIZE, "id\n");
+ ret = sysfs_emit(buf, "id\n");
break;
case MUSB_VBUS_OFF:
- ret = snprintf(buf, PAGE_SIZE, "none\n");
+ ret = sysfs_emit(buf, "none\n");
break;
default:
- ret = snprintf(buf, PAGE_SIZE, "UNKNOWN\n");
+ ret = sysfs_emit(buf, "UNKNOWN\n");
}
spin_unlock_irqrestore(&twl->lock, flags);
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 3eb8dc3a1..6c812d01b 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -113,7 +113,7 @@ static ssize_t new_id_store(struct device_driver *driver,
if (retval >= 0 && usb_drv->usb_driver != NULL)
retval = usb_store_new_id(&usb_drv->usb_driver->dynids,
usb_drv->usb_driver->id_table,
- &usb_drv->usb_driver->drvwrap.driver,
+ &usb_drv->usb_driver->driver,
buf, count);
return retval;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 55a65d941..8a5846d4a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
+#define QUECTEL_PRODUCT_EM060K_129 0x0129
+#define QUECTEL_PRODUCT_EM060K_12a 0x012a
+#define QUECTEL_PRODUCT_EM060K_12b 0x012b
+#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 17b09f03e..f1e91eb7f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1521,7 +1521,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
/* Now set udriver's id_table and look for matches */
udriver->id_table = id_table;
- rc = driver_attach(&udriver->drvwrap.driver);
+ rc = driver_attach(&udriver->driver);
return 0;
err_deregister_drivers:
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 0774ba22f..177fa6cd1 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -98,26 +98,26 @@ static ssize_t truinst_show(struct device *dev, struct device_attribute *attr,
struct usb_device *udev = interface_to_usbdev(intf);
int result;
if (swi_tru_install == TRU_FORCE_MS) {
- result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
+ result = sysfs_emit(buf, "Forced Mass Storage\n");
} else {
swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
if (!swocInfo) {
- snprintf(buf, PAGE_SIZE, "Error\n");
+ sysfs_emit(buf, "Error\n");
return -ENOMEM;
}
result = sierra_get_swoc_info(udev, swocInfo);
if (result < 0) {
dev_dbg(dev, "SWIMS: failed SWoC query\n");
kfree(swocInfo);
- snprintf(buf, PAGE_SIZE, "Error\n");
+ sysfs_emit(buf, "Error\n");
return -EIO;
}
debug_swoc(dev, swocInfo);
- result = snprintf(buf, PAGE_SIZE,
- "REV=%02d SKU=%04X VER=%04X\n",
- swocInfo->rev,
- swocInfo->LinuxSKU,
- swocInfo->LinuxVer);
+ result = sysfs_emit(buf,
+ "REV=%02d SKU=%04X VER=%04X\n",
+ swocInfo->rev,
+ swocInfo->LinuxSKU,
+ swocInfo->LinuxVer);
kfree(swocInfo);
}
return result;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index d002eead6..08953f0d4 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1251,7 +1251,7 @@ static struct usb_driver uas_driver = {
.suspend = uas_suspend,
.resume = uas_resume,
.reset_resume = uas_reset_resume,
- .drvwrap.driver.shutdown = uas_shutdown,
+ .driver.shutdown = uas_shutdown,
.id_table = uas_usb_ids,
};
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 2373cd39b..e6a100376 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -478,7 +478,7 @@ static int altmode_id_get(struct device *dev)
else
ids = &to_typec_port(dev)->mode_ids;
- return ida_simple_get(ids, 0, 0, GFP_KERNEL);
+ return ida_alloc(ids, GFP_KERNEL);
}
static void altmode_id_remove(struct device *dev, int id)
@@ -492,7 +492,7 @@ static void altmode_id_remove(struct device *dev, int id)
else
ids = &to_typec_port(dev)->mode_ids;
- ida_simple_remove(ids, id);
+ ida_free(ids, id);
}
static void typec_altmode_release(struct device *dev)
@@ -1805,7 +1805,7 @@ static void typec_release(struct device *dev)
{
struct typec_port *port = to_typec_port(dev);
- ida_simple_remove(&typec_index_ida, port->id);
+ ida_free(&typec_index_ida, port->id);
ida_destroy(&port->mode_ids);
typec_switch_put(port->sw);
typec_mux_put(port->mux);
@@ -2238,7 +2238,8 @@ void typec_port_register_altmodes(struct typec_port *port,
struct typec_altmode_desc desc;
struct typec_altmode *alt;
size_t index = 0;
- u32 svid, vdo;
+ u16 svid;
+ u32 vdo;
int ret;
altmodes_node = device_get_named_child_node(&port->dev, "altmodes");
@@ -2246,7 +2247,7 @@ void typec_port_register_altmodes(struct typec_port *port,
return; /* No altmodes specified */
fwnode_for_each_child_node(altmodes_node, child) {
- ret = fwnode_property_read_u32(child, "svid", &svid);
+ ret = fwnode_property_read_u16(child, "svid", &svid);
if (ret) {
dev_err(&port->dev, "Error reading svid for altmode %s\n",
fwnode_get_name(child));
@@ -2304,7 +2305,7 @@ struct typec_port *typec_register_port(struct device *parent,
if (!port)
return ERR_PTR(-ENOMEM);
- id = ida_simple_get(&typec_index_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&typec_index_ida, GFP_KERNEL);
if (id < 0) {
kfree(port);
return ERR_PTR(id);
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 816b9bd08..d2cb5e733 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -40,7 +40,7 @@ config TYPEC_MUX_NB7VPQ904M
tristate "On Semiconductor NB7VPQ904M Type-C redriver driver"
depends on I2C
depends on DRM || DRM=n
- select DRM_PANEL_BRIDGE if DRM
+ select DRM_AUX_BRIDGE if DRM_BRIDGE && OF
select REGMAP_I2C
help
Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C
@@ -56,4 +56,14 @@ config TYPEC_MUX_PTN36502
Say Y or M if your system has a NXP PTN36502 Type-C redriver chip
found on some devices with a Type-C port.
+config TYPEC_MUX_WCD939X_USBSS
+ tristate "Qualcomm WCD939x USBSS Analog Audio Switch driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Driver for the Qualcomm WCD939x Audio Codec USBSS domain which
+ provides support for muxing analog audio and sideband signals on a
+ common USB Type-C connector.
+ If compiled as a module, the module will be named wcd939x-usbss.
+
endmenu
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 9d6a5557b..57dc9ac6f 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M) += nb7vpq904m.o
obj-$(CONFIG_TYPEC_MUX_PTN36502) += ptn36502.o
+obj-$(CONFIG_TYPEC_MUX_WCD939X_USBSS) += wcd939x-usbss.o
diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c
index cda206cf0..b17826713 100644
--- a/drivers/usb/typec/mux/nb7vpq904m.c
+++ b/drivers/usb/typec/mux/nb7vpq904m.c
@@ -11,7 +11,7 @@
#include <linux/regmap.h>
#include <linux/bitfield.h>
#include <linux/of_graph.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
@@ -70,8 +70,6 @@ struct nb7vpq904m {
bool swap_data_lanes;
struct typec_switch *typec_switch;
- struct drm_bridge bridge;
-
struct mutex lock; /* protect non-concurrent retimer & switch */
enum typec_orientation orientation;
@@ -297,44 +295,6 @@ static int nb7vpq904m_retimer_set(struct typec_retimer *retimer, struct typec_re
return ret;
}
-#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
-static int nb7vpq904m_bridge_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- struct nb7vpq904m *nb7 = container_of(bridge, struct nb7vpq904m, bridge);
- struct drm_bridge *next_bridge;
-
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
- return -EINVAL;
-
- next_bridge = devm_drm_of_get_bridge(&nb7->client->dev, nb7->client->dev.of_node, 0, 0);
- if (IS_ERR(next_bridge)) {
- dev_err(&nb7->client->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
- return PTR_ERR(next_bridge);
- }
-
- return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-}
-
-static const struct drm_bridge_funcs nb7vpq904m_bridge_funcs = {
- .attach = nb7vpq904m_bridge_attach,
-};
-
-static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
-{
- nb7->bridge.funcs = &nb7vpq904m_bridge_funcs;
- nb7->bridge.of_node = nb7->client->dev.of_node;
-
- return devm_drm_bridge_add(&nb7->client->dev, &nb7->bridge);
-}
-#else
-static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
-{
- return 0;
-}
-#endif
-
static const struct regmap_config nb7_regmap = {
.max_register = 0x1f,
.reg_bits = 8,
@@ -461,7 +421,7 @@ static int nb7vpq904m_probe(struct i2c_client *client)
gpiod_set_value(nb7->enable_gpio, 1);
- ret = nb7vpq904m_register_bridge(nb7);
+ ret = drm_aux_bridge_register(dev);
if (ret)
goto err_disable_gpio;
diff --git a/drivers/usb/typec/mux/wcd939x-usbss.c b/drivers/usb/typec/mux/wcd939x-usbss.c
new file mode 100644
index 000000000..d46c353df
--- /dev/null
+++ b/drivers/usb/typec/mux/wcd939x-usbss.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (C) 2023 Linaro Ltd.
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/bitfield.h>
+#include <linux/gpio/consumer.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#define WCD_USBSS_PMP_OUT1 0x2
+
+#define WCD_USBSS_DP_DN_MISC1 0x20
+
+#define WCD_USBSS_DP_DN_MISC1_DP_PCOMP_2X_DYN_BST_ON_EN BIT(3)
+#define WCD_USBSS_DP_DN_MISC1_DN_PCOMP_2X_DYN_BST_ON_EN BIT(0)
+
+#define WCD_USBSS_MG1_EN 0x24
+
+#define WCD_USBSS_MG1_EN_CT_SNS_EN BIT(1)
+
+#define WCD_USBSS_MG1_BIAS 0x25
+
+#define WCD_USBSS_MG1_BIAS_PCOMP_DYN_BST_EN BIT(3)
+
+#define WCD_USBSS_MG1_MISC 0x27
+
+#define WCD_USBSS_MG1_MISC_PCOMP_2X_DYN_BST_ON_EN BIT(5)
+
+#define WCD_USBSS_MG2_EN 0x28
+
+#define WCD_USBSS_MG2_EN_CT_SNS_EN BIT(1)
+
+#define WCD_USBSS_MG2_BIAS 0x29
+
+#define WCD_USBSS_MG2_BIAS_PCOMP_DYN_BST_EN BIT(3)
+
+#define WCD_USBSS_MG2_MISC 0x30
+
+#define WCD_USBSS_MG2_MISC_PCOMP_2X_DYN_BST_ON_EN BIT(5)
+
+#define WCD_USBSS_DISP_AUXP_THRESH 0x80
+
+#define WCD_USBSS_DISP_AUXP_THRESH_DISP_AUXP_OVPON_CM GENMASK(7, 5)
+
+#define WCD_USBSS_DISP_AUXP_CTL 0x81
+
+#define WCD_USBSS_DISP_AUXP_CTL_LK_CANCEL_TRK_COEFF GENMASK(2, 0)
+
+#define WCD_USBSS_CPLDO_CTL2 0xa1
+
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE 0x403
+
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DEVICE_ENABLE BIT(7)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXP_TO_MGX_SWITCHES BIT(6)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXM_TO_MGX_SWITCHES BIT(5)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DNL_SWITCHES BIT(4)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_DPR_SWITCHES BIT(3)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_SENSE_SWITCHES BIT(2)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_MIC_SWITCHES BIT(1)
+#define WCD_USBSS_SWITCH_SETTINGS_ENABLE_AGND_SWITCHES BIT(0)
+
+#define WCD_USBSS_SWITCH_SELECT0 0x404
+
+#define WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES BIT(7) /* 1-> MG2 */
+#define WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES BIT(6) /* 1-> MG2 */
+#define WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES GENMASK(5, 4)
+#define WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES GENMASK(3, 2)
+#define WCD_USBSS_SWITCH_SELECT0_SENSE_SWITCHES BIT(1) /* 1-> SBU2 */
+#define WCD_USBSS_SWITCH_SELECT0_MIC_SWITCHES BIT(0) /* 1-> MG2 */
+
+#define WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_L 0
+#define WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_DN 1
+#define WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_DN2 2
+
+#define WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_R 0
+#define WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_DP 1
+#define WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_DR2 2
+
+#define WCD_USBSS_SWITCH_SELECT1 0x405
+
+#define WCD_USBSS_SWITCH_SELECT1_AGND_SWITCHES BIT(0) /* 1-> MG2 */
+
+#define WCD_USBSS_DELAY_R_SW 0x40d
+#define WCD_USBSS_DELAY_MIC_SW 0x40e
+#define WCD_USBSS_DELAY_SENSE_SW 0x40f
+#define WCD_USBSS_DELAY_GND_SW 0x410
+#define WCD_USBSS_DELAY_L_SW 0x411
+
+#define WCD_USBSS_FUNCTION_ENABLE 0x413
+
+#define WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT GENMASK(1, 0)
+
+#define WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT_MANUAL 1
+#define WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT_AUDIO_FSM 2
+
+#define WCD_USBSS_EQUALIZER1 0x415
+
+#define WCD_USBSS_EQUALIZER1_EQ_EN BIT(7)
+#define WCD_USBSS_EQUALIZER1_BW_SETTINGS GENMASK(6, 3)
+
+#define WCD_USBSS_USB_SS_CNTL 0x419
+
+#define WCD_USBSS_USB_SS_CNTL_STANDBY_STATE BIT(4)
+#define WCD_USBSS_USB_SS_CNTL_RCO_EN BIT(3)
+#define WCD_USBSS_USB_SS_CNTL_USB_SS_MODE GENMASK(2, 0)
+
+#define WCD_USBSS_USB_SS_CNTL_USB_SS_MODE_AATC 2
+#define WCD_USBSS_USB_SS_CNTL_USB_SS_MODE_USB 5
+
+#define WCD_USBSS_AUDIO_FSM_START 0x433
+
+#define WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG BIT(0)
+
+#define WCD_USBSS_RATIO_SPKR_REXT_L_LSB 0x461
+#define WCD_USBSS_RATIO_SPKR_REXT_L_MSB 0x462
+#define WCD_USBSS_RATIO_SPKR_REXT_R_LSB 0x463
+#define WCD_USBSS_RATIO_SPKR_REXT_R_MSB 0x464
+#define WCD_USBSS_AUD_COEF_L_K0_0 0x475
+#define WCD_USBSS_AUD_COEF_L_K0_1 0x476
+#define WCD_USBSS_AUD_COEF_L_K0_2 0x477
+#define WCD_USBSS_AUD_COEF_L_K1_0 0x478
+#define WCD_USBSS_AUD_COEF_L_K1_1 0x479
+#define WCD_USBSS_AUD_COEF_L_K2_0 0x47a
+#define WCD_USBSS_AUD_COEF_L_K2_1 0x47b
+#define WCD_USBSS_AUD_COEF_L_K3_0 0x47c
+#define WCD_USBSS_AUD_COEF_L_K3_1 0x47d
+#define WCD_USBSS_AUD_COEF_L_K4_0 0x47e
+#define WCD_USBSS_AUD_COEF_L_K4_1 0x47f
+#define WCD_USBSS_AUD_COEF_L_K5_0 0x480
+#define WCD_USBSS_AUD_COEF_L_K5_1 0x481
+#define WCD_USBSS_AUD_COEF_R_K0_0 0x482
+#define WCD_USBSS_AUD_COEF_R_K0_1 0x483
+#define WCD_USBSS_AUD_COEF_R_K0_2 0x484
+#define WCD_USBSS_AUD_COEF_R_K1_0 0x485
+#define WCD_USBSS_AUD_COEF_R_K1_1 0x486
+#define WCD_USBSS_AUD_COEF_R_K2_0 0x487
+#define WCD_USBSS_AUD_COEF_R_K2_1 0x488
+#define WCD_USBSS_AUD_COEF_R_K3_0 0x489
+#define WCD_USBSS_AUD_COEF_R_K3_1 0x48a
+#define WCD_USBSS_AUD_COEF_R_K4_0 0x48b
+#define WCD_USBSS_AUD_COEF_R_K4_1 0x48c
+#define WCD_USBSS_AUD_COEF_R_K5_0 0x48d
+#define WCD_USBSS_AUD_COEF_R_K5_1 0x48e
+#define WCD_USBSS_GND_COEF_L_K0_0 0x48f
+#define WCD_USBSS_GND_COEF_L_K0_1 0x490
+#define WCD_USBSS_GND_COEF_L_K0_2 0x491
+#define WCD_USBSS_GND_COEF_L_K1_0 0x492
+#define WCD_USBSS_GND_COEF_L_K1_1 0x493
+#define WCD_USBSS_GND_COEF_L_K2_0 0x494
+#define WCD_USBSS_GND_COEF_L_K2_1 0x495
+#define WCD_USBSS_GND_COEF_L_K3_0 0x496
+#define WCD_USBSS_GND_COEF_L_K3_1 0x497
+#define WCD_USBSS_GND_COEF_L_K4_0 0x498
+#define WCD_USBSS_GND_COEF_L_K4_1 0x499
+#define WCD_USBSS_GND_COEF_L_K5_0 0x49a
+#define WCD_USBSS_GND_COEF_L_K5_1 0x49b
+#define WCD_USBSS_GND_COEF_R_K0_0 0x49c
+#define WCD_USBSS_GND_COEF_R_K0_1 0x49d
+#define WCD_USBSS_GND_COEF_R_K0_2 0x49e
+#define WCD_USBSS_GND_COEF_R_K1_0 0x49f
+#define WCD_USBSS_GND_COEF_R_K1_1 0x4a0
+#define WCD_USBSS_GND_COEF_R_K2_0 0x4a1
+#define WCD_USBSS_GND_COEF_R_K2_1 0x4a2
+#define WCD_USBSS_GND_COEF_R_K3_0 0x4a3
+#define WCD_USBSS_GND_COEF_R_K3_1 0x4a4
+#define WCD_USBSS_GND_COEF_R_K4_0 0x4a5
+#define WCD_USBSS_GND_COEF_R_K4_1 0x4a6
+#define WCD_USBSS_GND_COEF_R_K5_0 0x4a7
+#define WCD_USBSS_GND_COEF_R_K5_1 0x4a8
+
+#define WCD_USBSS_MAX_REGISTER 0x4c1
+
+struct wcd939x_usbss {
+ struct i2c_client *client;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vdd_supply;
+
+ /* used to serialize concurrent change requests */
+ struct mutex lock;
+
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+
+ struct regmap *regmap;
+
+ struct typec_mux *codec;
+ struct typec_switch *codec_switch;
+
+ enum typec_orientation orientation;
+ unsigned long mode;
+ unsigned int svid;
+};
+
+static const struct regmap_range_cfg wcd939x_usbss_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = WCD_USBSS_MAX_REGISTER,
+ .selector_reg = 0x0,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ },
+};
+
+static const struct regmap_config wcd939x_usbss_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = WCD_USBSS_MAX_REGISTER,
+ .ranges = wcd939x_usbss_ranges,
+ .num_ranges = ARRAY_SIZE(wcd939x_usbss_ranges),
+};
+
+/* Linearlizer coefficients for 32ohm load */
+static const struct {
+ unsigned int offset;
+ unsigned int mask;
+ unsigned int value;
+} wcd939x_usbss_coeff_init[] = {
+ { WCD_USBSS_AUD_COEF_L_K5_0, GENMASK(7, 0), 0x39 },
+ { WCD_USBSS_AUD_COEF_R_K5_0, GENMASK(7, 0), 0x39 },
+ { WCD_USBSS_GND_COEF_L_K2_0, GENMASK(7, 0), 0xe8 },
+ { WCD_USBSS_GND_COEF_L_K4_0, GENMASK(7, 0), 0x73 },
+ { WCD_USBSS_GND_COEF_R_K2_0, GENMASK(7, 0), 0xe8 },
+ { WCD_USBSS_GND_COEF_R_K4_0, GENMASK(7, 0), 0x73 },
+ { WCD_USBSS_RATIO_SPKR_REXT_L_LSB, GENMASK(7, 0), 0x00 },
+ { WCD_USBSS_RATIO_SPKR_REXT_L_MSB, GENMASK(6, 0), 0x04 },
+ { WCD_USBSS_RATIO_SPKR_REXT_R_LSB, GENMASK(7, 0), 0x00 },
+ { WCD_USBSS_RATIO_SPKR_REXT_R_MSB, GENMASK(6, 0), 0x04 },
+};
+
+static int wcd939x_usbss_set(struct wcd939x_usbss *usbss)
+{
+ bool reverse = (usbss->orientation == TYPEC_ORIENTATION_REVERSE);
+ bool enable_audio = false;
+ bool enable_usb = false;
+ bool enable_dp = false;
+ int ret;
+
+ /* USB Mode */
+ if (usbss->mode < TYPEC_STATE_MODAL ||
+ (!usbss->svid && (usbss->mode == TYPEC_MODE_USB2 ||
+ usbss->mode == TYPEC_MODE_USB3))) {
+ enable_usb = true;
+ } else if (usbss->svid) {
+ switch (usbss->mode) {
+ /* DP Only */
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_E:
+ enable_dp = true;
+ break;
+
+ /* DP + USB */
+ case TYPEC_DP_STATE_D:
+ case TYPEC_DP_STATE_F:
+ enable_usb = true;
+ enable_dp = true;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ } else if (usbss->mode == TYPEC_MODE_AUDIO) {
+ enable_audio = true;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ /* Disable all switches */
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXP_TO_MGX_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXM_TO_MGX_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DPR_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DNL_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_SENSE_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_MIC_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_AGND_SWITCHES);
+ if (ret)
+ return ret;
+
+ /* Clear switches */
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+ WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_SENSE_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_MIC_SWITCHES);
+ if (ret)
+ return ret;
+
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT1,
+ WCD_USBSS_SWITCH_SELECT1_AGND_SWITCHES);
+ if (ret)
+ return ret;
+
+ /* Enable OVP_MG1_BIAS PCOMP_DYN_BST_EN */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_MG1_BIAS,
+ WCD_USBSS_MG1_BIAS_PCOMP_DYN_BST_EN);
+ if (ret)
+ return ret;
+
+ /* Enable OVP_MG2_BIAS PCOMP_DYN_BST_EN */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_MG2_BIAS,
+ WCD_USBSS_MG2_BIAS_PCOMP_DYN_BST_EN);
+ if (ret)
+ return ret;
+
+ /* Disable Equalizer in safe mode */
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_EQUALIZER1,
+ WCD_USBSS_EQUALIZER1_EQ_EN);
+ if (ret)
+ return ret;
+
+ /* Start FSM with all disabled, force write */
+ ret = regmap_write_bits(usbss->regmap, WCD_USBSS_AUDIO_FSM_START,
+ WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG,
+ WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG);
+
+ /* 35us to allow the SBU switch to turn off */
+ usleep_range(35, 1000);
+
+ /* Setup Audio Accessory mux/switch */
+ if (enable_audio) {
+ int i;
+
+ /*
+ * AATC switch configuration:
+ * "Normal":
+ * - R: DNR
+ * - L: DNL
+ * - Sense: GSBU2
+ * - Mic: MG1
+ * - AGND: MG2
+ * "Swapped":
+ * - R: DNR
+ * - L: DNL
+ * - Sense: GSBU1
+ * - Mic: MG2
+ * - AGND: MG1
+ * Swapped information is given by the codec MBHC logic
+ */
+
+ /* Set AATC mode */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+ WCD_USBSS_USB_SS_CNTL_USB_SS_MODE,
+ FIELD_PREP(WCD_USBSS_USB_SS_CNTL_USB_SS_MODE,
+ WCD_USBSS_USB_SS_CNTL_USB_SS_MODE_AATC));
+ if (ret)
+ return ret;
+
+ /* Select L for DNL_SWITCHES and R for DPR_SWITCHES */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+ WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES,
+ FIELD_PREP(WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES,
+ WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_L) |
+ FIELD_PREP(WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES,
+ WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_R));
+ if (ret)
+ return ret;
+
+ if (reverse)
+ /* Select MG2 for MIC, SBU1 for Sense */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+ WCD_USBSS_SWITCH_SELECT0_MIC_SWITCHES,
+ WCD_USBSS_SWITCH_SELECT0_MIC_SWITCHES);
+ else
+ /* Select MG1 for MIC, SBU2 for Sense */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+ WCD_USBSS_SWITCH_SELECT0_SENSE_SWITCHES,
+ WCD_USBSS_SWITCH_SELECT0_SENSE_SWITCHES);
+ if (ret)
+ return ret;
+
+ if (reverse)
+ /* Disable OVP_MG1_BIAS PCOMP_DYN_BST_EN */
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_MG1_BIAS,
+ WCD_USBSS_MG1_BIAS_PCOMP_DYN_BST_EN);
+ else
+ /* Disable OVP_MG2_BIAS PCOMP_DYN_BST_EN */
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_MG2_BIAS,
+ WCD_USBSS_MG2_BIAS_PCOMP_DYN_BST_EN);
+ if (ret)
+ return ret;
+
+ /* Enable SENSE, MIC switches */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_SENSE_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_MIC_SWITCHES);
+ if (ret)
+ return ret;
+
+ if (reverse)
+ /* Select MG1 for AGND_SWITCHES */
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT1,
+ WCD_USBSS_SWITCH_SELECT1_AGND_SWITCHES);
+ else
+ /* Select MG2 for AGND_SWITCHES */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT1,
+ WCD_USBSS_SWITCH_SELECT1_AGND_SWITCHES);
+ if (ret)
+ return ret;
+
+ /* Enable AGND switches */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_AGND_SWITCHES);
+ if (ret)
+ return ret;
+
+ /* Enable DPR, DNL switches */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DNL_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DPR_SWITCHES);
+ if (ret)
+ return ret;
+
+ /* Setup FSM delays */
+ ret = regmap_write(usbss->regmap, WCD_USBSS_DELAY_L_SW, 0x02);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(usbss->regmap, WCD_USBSS_DELAY_R_SW, 0x02);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(usbss->regmap, WCD_USBSS_DELAY_MIC_SW, 0x01);
+ if (ret)
+ return ret;
+
+ /* Start FSM, force write */
+ ret = regmap_write_bits(usbss->regmap, WCD_USBSS_AUDIO_FSM_START,
+ WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG,
+ WCD_USBSS_AUDIO_FSM_START_AUDIO_FSM_AUDIO_TRIG);
+ if (ret)
+ return ret;
+
+ /* Default Linearlizer coefficients */
+ for (i = 0; i < ARRAY_SIZE(wcd939x_usbss_coeff_init); ++i)
+ regmap_update_bits(usbss->regmap,
+ wcd939x_usbss_coeff_init[i].offset,
+ wcd939x_usbss_coeff_init[i].mask,
+ wcd939x_usbss_coeff_init[i].value);
+
+ return 0;
+ }
+
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+ WCD_USBSS_USB_SS_CNTL_USB_SS_MODE,
+ FIELD_PREP(WCD_USBSS_USB_SS_CNTL_USB_SS_MODE,
+ WCD_USBSS_USB_SS_CNTL_USB_SS_MODE_USB));
+ if (ret)
+ return ret;
+
+ /* Enable USB muxes */
+ if (enable_usb) {
+ /* Do not enable Equalizer in safe mode */
+ if (usbss->mode != TYPEC_STATE_SAFE) {
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_EQUALIZER1,
+ WCD_USBSS_EQUALIZER1_EQ_EN);
+ if (ret)
+ return ret;
+ }
+
+ /* Select DN for DNL_SWITCHES and DP for DPR_SWITCHES */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+ WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES,
+ FIELD_PREP(WCD_USBSS_SWITCH_SELECT0_DNL_SWITCHES,
+ WCD_USBSS_SWITCH_SELECT0_DNL_SWITCH_DN) |
+ FIELD_PREP(WCD_USBSS_SWITCH_SELECT0_DPR_SWITCHES,
+ WCD_USBSS_SWITCH_SELECT0_DPR_SWITCH_DP));
+ if (ret)
+ return ret;
+
+ /* Enable DNL_SWITCHES and DPR_SWITCHES */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DPR_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DNL_SWITCHES);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable DP AUX muxes */
+ if (enable_dp) {
+ /* Update Leakage Canceller Coefficient for AUXP pins */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_DISP_AUXP_CTL,
+ WCD_USBSS_DISP_AUXP_CTL_LK_CANCEL_TRK_COEFF,
+ FIELD_PREP(WCD_USBSS_DISP_AUXP_CTL_LK_CANCEL_TRK_COEFF,
+ 5));
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_DISP_AUXP_THRESH,
+ WCD_USBSS_DISP_AUXP_THRESH_DISP_AUXP_OVPON_CM);
+ if (ret)
+ return ret;
+
+ if (reverse)
+ /* Select MG2 for AUXP and MG1 for AUXM */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+ WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES,
+ WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES);
+ else
+ /* Select MG1 for AUXP and MG2 for AUXM */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_SWITCH_SELECT0,
+ WCD_USBSS_SWITCH_SELECT0_DP_AUXP_SWITCHES |
+ WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES,
+ WCD_USBSS_SWITCH_SELECT0_DP_AUXM_SWITCHES);
+ if (ret)
+ return ret;
+
+ /* Enable DP_AUXP_TO_MGX and DP_AUXM_TO_MGX switches */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXP_TO_MGX_SWITCHES |
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DP_AUXM_TO_MGX_SWITCHES);
+
+ /* 15us to allow the SBU switch to turn on again */
+ usleep_range(15, 1000);
+ }
+
+ return 0;
+}
+
+static int wcd939x_usbss_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct wcd939x_usbss *usbss = typec_switch_get_drvdata(sw);
+ int ret = 0;
+
+ mutex_lock(&usbss->lock);
+
+ if (usbss->orientation != orientation) {
+ usbss->orientation = orientation;
+
+ ret = wcd939x_usbss_set(usbss);
+ }
+
+ mutex_unlock(&usbss->lock);
+
+ if (ret)
+ return ret;
+
+ /* Report orientation to codec after switch has been done */
+ return typec_switch_set(usbss->codec_switch, orientation);
+}
+
+static int wcd939x_usbss_mux_set(struct typec_mux_dev *mux,
+ struct typec_mux_state *state)
+{
+ struct wcd939x_usbss *usbss = typec_mux_get_drvdata(mux);
+ int ret = 0;
+
+ mutex_lock(&usbss->lock);
+
+ if (usbss->mode != state->mode) {
+ usbss->mode = state->mode;
+
+ if (state->alt)
+ usbss->svid = state->alt->svid;
+ else
+ usbss->svid = 0; // No SVID
+
+ ret = wcd939x_usbss_set(usbss);
+ }
+
+ mutex_unlock(&usbss->lock);
+
+ if (ret)
+ return ret;
+
+ /* Report event to codec after switch has been done */
+ return typec_mux_set(usbss->codec, state);
+}
+
+static int wcd939x_usbss_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ struct wcd939x_usbss *usbss;
+ int ret;
+
+ usbss = devm_kzalloc(dev, sizeof(*usbss), GFP_KERNEL);
+ if (!usbss)
+ return -ENOMEM;
+
+ usbss->client = client;
+ mutex_init(&usbss->lock);
+
+ usbss->regmap = devm_regmap_init_i2c(client, &wcd939x_usbss_regmap_config);
+ if (IS_ERR(usbss->regmap))
+ return dev_err_probe(dev, PTR_ERR(usbss->regmap), "failed to initialize regmap\n");
+
+ usbss->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(usbss->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(usbss->reset_gpio),
+ "unable to acquire reset gpio\n");
+
+ usbss->vdd_supply = devm_regulator_get_optional(dev, "vdd");
+ if (IS_ERR(usbss->vdd_supply))
+ return PTR_ERR(usbss->vdd_supply);
+
+ /* Get Codec's MUX & Switch devices */
+ usbss->codec = fwnode_typec_mux_get(dev->fwnode);
+ if (IS_ERR(usbss->codec))
+ return dev_err_probe(dev, PTR_ERR(usbss->codec),
+ "failed to acquire codec mode-switch\n");
+
+ usbss->codec_switch = fwnode_typec_switch_get(dev->fwnode);
+ if (IS_ERR(usbss->codec_switch)) {
+ ret = dev_err_probe(dev, PTR_ERR(usbss->codec_switch),
+ "failed to acquire codec orientation-switch\n");
+ goto err_mux_put;
+ }
+
+ usbss->mode = TYPEC_STATE_SAFE;
+ usbss->orientation = TYPEC_ORIENTATION_NONE;
+
+ gpiod_set_value(usbss->reset_gpio, 1);
+
+ ret = regulator_enable(usbss->vdd_supply);
+ if (ret) {
+ dev_err(dev, "Failed to enable vdd: %d\n", ret);
+ goto err_mux_switch;
+ }
+
+ msleep(20);
+
+ gpiod_set_value(usbss->reset_gpio, 0);
+
+ msleep(20);
+
+ /* Disable standby */
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+ WCD_USBSS_USB_SS_CNTL_STANDBY_STATE);
+ if (ret)
+ goto err_regulator_disable;
+
+ /* Set manual mode by default */
+ ret = regmap_update_bits(usbss->regmap, WCD_USBSS_FUNCTION_ENABLE,
+ WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT,
+ FIELD_PREP(WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT,
+ WCD_USBSS_FUNCTION_ENABLE_SOURCE_SELECT_MANUAL));
+ if (ret)
+ goto err_regulator_disable;
+
+ /* Enable dynamic boosting for DP and DN */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_DP_DN_MISC1,
+ WCD_USBSS_DP_DN_MISC1_DP_PCOMP_2X_DYN_BST_ON_EN |
+ WCD_USBSS_DP_DN_MISC1_DN_PCOMP_2X_DYN_BST_ON_EN);
+ if (ret)
+ goto err_regulator_disable;
+
+ /* Enable dynamic boosting for MG1 OVP */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_MG1_MISC,
+ WCD_USBSS_MG1_MISC_PCOMP_2X_DYN_BST_ON_EN);
+ if (ret)
+ goto err_regulator_disable;
+
+ /* Enable dynamic boosting for MG2 OVP */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_MG2_MISC,
+ WCD_USBSS_MG2_MISC_PCOMP_2X_DYN_BST_ON_EN);
+ if (ret)
+ goto err_regulator_disable;
+
+ /* Write 0xFF to WCD_USBSS_CPLDO_CTL2 */
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_CPLDO_CTL2, 0xff);
+ if (ret)
+ goto err_regulator_disable;
+
+ /* Set RCO_EN: WCD_USBSS_USB_SS_CNTL Bit<3> --> 0x0 --> 0x1 */
+ ret = regmap_clear_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+ WCD_USBSS_USB_SS_CNTL_RCO_EN);
+ if (ret)
+ goto err_regulator_disable;
+
+ ret = regmap_set_bits(usbss->regmap, WCD_USBSS_USB_SS_CNTL,
+ WCD_USBSS_USB_SS_CNTL_RCO_EN);
+ if (ret)
+ goto err_regulator_disable;
+
+ /* Disable all switches but enable the mux */
+ ret = regmap_write(usbss->regmap, WCD_USBSS_SWITCH_SETTINGS_ENABLE,
+ WCD_USBSS_SWITCH_SETTINGS_ENABLE_DEVICE_ENABLE);
+ if (ret)
+ goto err_regulator_disable;
+
+ /* Setup in SAFE mode */
+ ret = wcd939x_usbss_set(usbss);
+ if (ret)
+ goto err_regulator_disable;
+
+ sw_desc.drvdata = usbss;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = wcd939x_usbss_switch_set;
+
+ usbss->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(usbss->sw)) {
+ ret = dev_err_probe(dev, PTR_ERR(usbss->sw), "failed to register typec switch\n");
+ goto err_regulator_disable;
+ }
+
+ mux_desc.drvdata = usbss;
+ mux_desc.fwnode = dev_fwnode(dev);
+ mux_desc.set = wcd939x_usbss_mux_set;
+
+ usbss->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(usbss->mux)) {
+ ret = dev_err_probe(dev, PTR_ERR(usbss->mux), "failed to register typec mux\n");
+ goto err_switch_unregister;
+ }
+
+ i2c_set_clientdata(client, usbss);
+
+ return 0;
+
+err_switch_unregister:
+ typec_switch_unregister(usbss->sw);
+
+err_regulator_disable:
+ regulator_disable(usbss->vdd_supply);
+
+err_mux_switch:
+ typec_switch_put(usbss->codec_switch);
+
+err_mux_put:
+ typec_mux_put(usbss->codec);
+
+ return ret;
+}
+
+static void wcd939x_usbss_remove(struct i2c_client *client)
+{
+ struct wcd939x_usbss *usbss = i2c_get_clientdata(client);
+
+ typec_mux_unregister(usbss->mux);
+ typec_switch_unregister(usbss->sw);
+
+ regulator_disable(usbss->vdd_supply);
+
+ typec_switch_put(usbss->codec_switch);
+ typec_mux_put(usbss->codec);
+}
+
+static const struct i2c_device_id wcd939x_usbss_table[] = {
+ { "wcd9390-usbss" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wcd939x_usbss_table);
+
+static const struct of_device_id wcd939x_usbss_of_table[] = {
+ { .compatible = "qcom,wcd9390-usbss" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wcd939x_usbss_of_table);
+
+static struct i2c_driver wcd939x_usbss_driver = {
+ .driver = {
+ .name = "wcd939x-usbss",
+ .of_match_table = wcd939x_usbss_of_table,
+ },
+ .probe = wcd939x_usbss_probe,
+ .remove = wcd939x_usbss_remove,
+ .id_table = wcd939x_usbss_table,
+};
+module_i2c_driver(wcd939x_usbss_driver);
+
+MODULE_DESCRIPTION("Qualcomm WCD939x USBSS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index 85d015cdb..b9cca2be7 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -468,7 +468,7 @@ static struct device_type pd_capabilities_type = {
/**
* usb_power_delivery_register_capabilities - Register a set of capabilities.
* @pd: The USB PD instance that the capabilities belong to.
- * @desc: Description of the Capablities Message.
+ * @desc: Description of the Capabilities Message.
*
* This function registers a Capabilities Message described in @desc. The
* capabilities will have their own sub-directory under @pd in sysfs.
@@ -571,7 +571,7 @@ static void pd_release(struct device *dev)
{
struct usb_power_delivery *pd = to_usb_power_delivery(dev);
- ida_simple_remove(&pd_ida, pd->id);
+ ida_free(&pd_ida, pd->id);
kfree(pd);
}
@@ -616,7 +616,7 @@ usb_power_delivery_register(struct device *parent, struct usb_power_delivery_des
if (!pd)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&pd_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&pd_ida, GFP_KERNEL);
if (ret < 0) {
kfree(pd);
return ERR_PTR(ret);
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 0b2993fef..8cdd84ca5 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -80,6 +80,7 @@ config TYPEC_QCOM_PMIC
tristate "Qualcomm PMIC USB Type-C Port Controller Manager driver"
depends on ARCH_QCOM || COMPILE_TEST
depends on DRM || DRM=n
+ select DRM_AUX_HPD_BRIDGE if DRM_BRIDGE && OF
help
A Type-C port and Power Delivery driver which aggregates two
discrete pieces of silicon in the PM8150b PMIC block: the
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 581199d37..1a2b4bdda 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -18,7 +18,7 @@
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
#include "qcom_pmic_typec_pdphy.h"
#include "qcom_pmic_typec_port.h"
@@ -36,7 +36,6 @@ struct pmic_typec {
struct pmic_typec_port *pmic_typec_port;
bool vbus_enabled;
struct mutex lock; /* VBUS state serialization */
- struct drm_bridge bridge;
};
#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
@@ -150,35 +149,6 @@ static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
return 0;
}
-#if IS_ENABLED(CONFIG_DRM)
-static int qcom_pmic_typec_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
-{
- return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs qcom_pmic_typec_bridge_funcs = {
- .attach = qcom_pmic_typec_attach,
-};
-
-static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
-{
- tcpm->bridge.funcs = &qcom_pmic_typec_bridge_funcs;
-#ifdef CONFIG_OF
- tcpm->bridge.of_node = of_get_child_by_name(tcpm->dev->of_node, "connector");
-#endif
- tcpm->bridge.ops = DRM_BRIDGE_OP_HPD;
- tcpm->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-
- return devm_drm_bridge_add(tcpm->dev, &tcpm->bridge);
-}
-#else
-static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
-{
- return 0;
-}
-#endif
-
static int qcom_pmic_typec_probe(struct platform_device *pdev)
{
struct pmic_typec *tcpm;
@@ -186,6 +156,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const struct pmic_typec_resources *res;
struct regmap *regmap;
+ struct device *bridge_dev;
u32 base[2];
int ret;
@@ -241,14 +212,14 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
mutex_init(&tcpm->lock);
platform_set_drvdata(pdev, tcpm);
- ret = qcom_pmic_typec_init_drm(tcpm);
- if (ret)
- return ret;
-
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
if (!tcpm->tcpc.fwnode)
return -EINVAL;
+ bridge_dev = drm_dp_hpd_bridge_register(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc);
if (IS_ERR(tcpm->tcpm_port)) {
ret = PTR_ERR(tcpm->tcpm_port);
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 0ee3e6e29..711855182 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -889,6 +889,7 @@ MODULE_DEVICE_TABLE(i2c, tcpci_id);
#ifdef CONFIG_OF
static const struct of_device_id tcpci_of_match[] = {
{ .compatible = "nxp,ptn5110", },
+ { .compatible = "tcpci", },
{},
};
MODULE_DEVICE_TABLE(of, tcpci_of_match);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index 9454b12a0..7fb966fd6 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -92,11 +92,16 @@ static void max_tcpci_init_regs(struct max_tcpci_chip *chip)
return;
}
+ /* Vconn Over Current Protection */
+ ret = max_tcpci_write8(chip, TCPC_FAULT_STATUS_MASK, TCPC_FAULT_STATUS_MASK_VCONN_OC);
+ if (ret < 0)
+ return;
+
alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_TX_FAILED |
TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_RX_STATUS | TCPC_ALERT_CC_STATUS |
TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | TCPC_ALERT_POWER_STATUS |
/* Enable Extended alert for detecting Fast Role Swap Signal */
- TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS;
+ TCPC_ALERT_EXTND | TCPC_ALERT_EXTENDED_STATUS | TCPC_ALERT_FAULT;
ret = max_tcpci_write16(chip, TCPC_ALERT_MASK, alert_mask);
if (ret < 0) {
@@ -295,6 +300,19 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
}
}
+ if (status & TCPC_ALERT_FAULT) {
+ ret = max_tcpci_read8(chip, TCPC_FAULT_STATUS, &reg_status);
+ if (ret < 0)
+ return ret;
+
+ ret = max_tcpci_write8(chip, TCPC_FAULT_STATUS, reg_status);
+ if (ret < 0)
+ return ret;
+
+ if (reg_status & TCPC_FAULT_STATUS_VCONN_OC)
+ tcpm_port_error_recovery(chip->port);
+ }
+
if (status & TCPC_ALERT_EXTND) {
ret = max_tcpci_read8(chip, TCPC_ALERT_EXTENDED, &reg_status);
if (ret < 0)
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index c5776b3c9..df9a5d676 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -251,6 +251,7 @@ enum frs_typec_current {
#define TCPM_FRS_EVENT BIT(3)
#define TCPM_SOURCING_VBUS BIT(4)
#define TCPM_PORT_CLEAN BIT(5)
+#define TCPM_PORT_ERROR BIT(6)
#define LOG_BUFFER_ENTRIES 1024
#define LOG_BUFFER_ENTRY_SIZE 128
@@ -296,6 +297,15 @@ struct pd_pps_data {
bool active;
};
+struct pd_data {
+ struct usb_power_delivery *pd;
+ struct usb_power_delivery_capabilities *source_cap;
+ struct usb_power_delivery_capabilities_desc source_desc;
+ struct usb_power_delivery_capabilities *sink_cap;
+ struct usb_power_delivery_capabilities_desc sink_desc;
+ unsigned int operating_snk_mw;
+};
+
struct tcpm_port {
struct device *dev;
@@ -397,12 +407,14 @@ struct tcpm_port {
unsigned int rx_msgid;
/* USB PD objects */
- struct usb_power_delivery *pd;
+ struct usb_power_delivery **pds;
+ struct pd_data **pd_list;
struct usb_power_delivery_capabilities *port_source_caps;
struct usb_power_delivery_capabilities *port_sink_caps;
struct usb_power_delivery *partner_pd;
struct usb_power_delivery_capabilities *partner_source_caps;
struct usb_power_delivery_capabilities *partner_sink_caps;
+ struct usb_power_delivery *selected_pd;
/* Partner capabilities/requests */
u32 sink_request;
@@ -412,6 +424,7 @@ struct tcpm_port {
unsigned int nr_sink_caps;
/* Local capabilities */
+ unsigned int pd_count;
u32 src_pdo[PDO_MAX_OBJECTS];
unsigned int nr_src_pdo;
u32 snk_pdo[PDO_MAX_OBJECTS];
@@ -2847,7 +2860,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
} else {
- if (port->send_discover) {
+ if (port->send_discover && port->negotiated_rev < PD_REV30) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
@@ -2863,7 +2876,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
PD_MSG_CTRL_NOT_SUPP,
NONE_AMS);
} else {
- if (port->send_discover) {
+ if (port->send_discover && port->negotiated_rev < PD_REV30) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
@@ -2872,7 +2885,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
}
break;
case PD_CTRL_VCONN_SWAP:
- if (port->send_discover) {
+ if (port->send_discover && port->negotiated_rev < PD_REV30) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
@@ -4398,7 +4411,8 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
- mod_enable_frs_delayed_work(port, 0);
+ if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
+ mod_enable_frs_delayed_work(port, 0);
tcpm_pps_complete(port, port->pps_status);
if (port->ams != NONE_AMS)
@@ -5487,6 +5501,10 @@ static void tcpm_pd_event_handler(struct kthread_work *work)
tcpm_set_state(port, tcpm_default_state(port), 0);
}
}
+ if (events & TCPM_PORT_ERROR) {
+ tcpm_log(port, "port triggering error recovery");
+ tcpm_set_state(port, ERROR_RECOVERY, 0);
+ }
spin_lock(&port->pd_event_lock);
}
@@ -5554,6 +5572,15 @@ bool tcpm_port_is_toggling(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
+void tcpm_port_error_recovery(struct tcpm_port *port)
+{
+ spin_lock(&port->pd_event_lock);
+ port->pd_events |= TCPM_PORT_ERROR;
+ spin_unlock(&port->pd_event_lock);
+ kthread_queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
+
static void tcpm_enable_frs_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
@@ -6045,12 +6072,116 @@ port_unlock:
return 0;
}
+static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
+{
+ int i;
+
+ for (i = 0; port->pd_list[i]; i++) {
+ if (port->pd_list[i]->pd == pd)
+ return port->pd_list[i];
+ }
+
+ return ERR_PTR(-ENODATA);
+}
+
+static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
+{
+ struct tcpm_port *port = typec_get_drvdata(p);
+
+ return port->pds;
+}
+
+static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
+{
+ struct tcpm_port *port = typec_get_drvdata(p);
+ struct pd_data *data;
+ int i, ret = 0;
+
+ mutex_lock(&port->lock);
+
+ if (port->selected_pd == pd)
+ goto unlock;
+
+ data = tcpm_find_pd_data(port, pd);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ goto unlock;
+ }
+
+ if (data->sink_desc.pdo[0]) {
+ for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
+ port->snk_pdo[i] = data->sink_desc.pdo[i];
+ port->nr_snk_pdo = i;
+ port->operating_snk_mw = data->operating_snk_mw;
+ }
+
+ if (data->source_desc.pdo[0]) {
+ for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
+ port->src_pdo[i] = data->source_desc.pdo[i];
+ port->nr_src_pdo = i;
+ }
+
+ switch (port->state) {
+ case SRC_UNATTACHED:
+ case SRC_ATTACH_WAIT:
+ case SRC_TRYWAIT:
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ break;
+ case SRC_SEND_CAPABILITIES:
+ case SRC_SEND_CAPABILITIES_TIMEOUT:
+ case SRC_NEGOTIATE_CAPABILITIES:
+ case SRC_READY:
+ case SRC_WAIT_NEW_CAPABILITIES:
+ port->caps_count = 0;
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto unlock;
+ }
+ break;
+ case SNK_NEGOTIATE_CAPABILITIES:
+ case SNK_NEGOTIATE_PPS_CAPABILITIES:
+ case SNK_READY:
+ case SNK_TRANSITION_SINK:
+ case SNK_TRANSITION_SINK_VBUS:
+ if (port->pps_data.active)
+ port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
+ else if (port->pd_capable)
+ port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
+ else
+ break;
+
+ port->update_sink_caps = true;
+
+ ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto unlock;
+ }
+ break;
+ default:
+ break;
+ }
+
+ port->port_source_caps = data->source_cap;
+ port->port_sink_caps = data->sink_cap;
+ typec_port_set_usb_power_delivery(p, NULL);
+ port->selected_pd = pd;
+ typec_port_set_usb_power_delivery(p, port->selected_pd);
+unlock:
+ mutex_unlock(&port->lock);
+ return ret;
+}
+
static const struct typec_operations tcpm_ops = {
.try_role = tcpm_try_role,
.dr_set = tcpm_dr_set,
.pr_set = tcpm_pr_set,
.vconn_set = tcpm_vconn_set,
- .port_type_set = tcpm_port_type_set
+ .port_type_set = tcpm_port_type_set,
+ .pd_get = tcpm_pd_get,
+ .pd_set = tcpm_pd_set
};
void tcpm_tcpc_reset(struct tcpm_port *port)
@@ -6064,58 +6195,61 @@ EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
static void tcpm_port_unregister_pd(struct tcpm_port *port)
{
- usb_power_delivery_unregister_capabilities(port->port_sink_caps);
+ int i;
+
port->port_sink_caps = NULL;
- usb_power_delivery_unregister_capabilities(port->port_source_caps);
port->port_source_caps = NULL;
- usb_power_delivery_unregister(port->pd);
- port->pd = NULL;
+ for (i = 0; i < port->pd_count; i++) {
+ usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
+ usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
+ devm_kfree(port->dev, port->pd_list[i]);
+ port->pd_list[i] = NULL;
+ usb_power_delivery_unregister(port->pds[i]);
+ port->pds[i] = NULL;
+ }
}
static int tcpm_port_register_pd(struct tcpm_port *port)
{
struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
- struct usb_power_delivery_capabilities_desc caps = { };
struct usb_power_delivery_capabilities *cap;
- int ret;
+ int ret, i;
if (!port->nr_src_pdo && !port->nr_snk_pdo)
return 0;
- port->pd = usb_power_delivery_register(port->dev, &desc);
- if (IS_ERR(port->pd)) {
- ret = PTR_ERR(port->pd);
- goto err_unregister;
- }
-
- if (port->nr_src_pdo) {
- memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->src_pdo,
- port->nr_src_pdo * sizeof(u32), 0);
- caps.role = TYPEC_SOURCE;
-
- cap = usb_power_delivery_register_capabilities(port->pd, &caps);
- if (IS_ERR(cap)) {
- ret = PTR_ERR(cap);
+ for (i = 0; i < port->pd_count; i++) {
+ port->pds[i] = usb_power_delivery_register(port->dev, &desc);
+ if (IS_ERR(port->pds[i])) {
+ ret = PTR_ERR(port->pds[i]);
goto err_unregister;
}
-
- port->port_source_caps = cap;
- }
-
- if (port->nr_snk_pdo) {
- memcpy_and_pad(caps.pdo, sizeof(caps.pdo), port->snk_pdo,
- port->nr_snk_pdo * sizeof(u32), 0);
- caps.role = TYPEC_SINK;
-
- cap = usb_power_delivery_register_capabilities(port->pd, &caps);
- if (IS_ERR(cap)) {
- ret = PTR_ERR(cap);
- goto err_unregister;
+ port->pd_list[i]->pd = port->pds[i];
+
+ if (port->pd_list[i]->source_desc.pdo[0]) {
+ cap = usb_power_delivery_register_capabilities(port->pds[i],
+ &port->pd_list[i]->source_desc);
+ if (IS_ERR(cap)) {
+ ret = PTR_ERR(cap);
+ goto err_unregister;
+ }
+ port->pd_list[i]->source_cap = cap;
}
- port->port_sink_caps = cap;
+ if (port->pd_list[i]->sink_desc.pdo[0]) {
+ cap = usb_power_delivery_register_capabilities(port->pds[i],
+ &port->pd_list[i]->sink_desc);
+ if (IS_ERR(cap)) {
+ ret = PTR_ERR(cap);
+ goto err_unregister;
+ }
+ port->pd_list[i]->sink_cap = cap;
+ }
}
+ port->port_source_caps = port->pd_list[0]->source_cap;
+ port->port_sink_caps = port->pd_list[0]->sink_cap;
+ port->selected_pd = port->pds[0];
return 0;
err_unregister:
@@ -6124,12 +6258,15 @@ err_unregister:
return ret;
}
-static int tcpm_fw_get_caps(struct tcpm_port *port,
- struct fwnode_handle *fwnode)
+static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
{
+ struct fwnode_handle *capabilities, *child, *caps = NULL;
+ unsigned int nr_src_pdo, nr_snk_pdo;
const char *opmode_str;
- int ret;
- u32 mw, frs_current;
+ u32 *src_pdo, *snk_pdo;
+ u32 uw, frs_current;
+ int ret = 0, i;
+ int mode;
if (!fwnode)
return -EINVAL;
@@ -6147,30 +6284,20 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
if (ret < 0)
return ret;
+ mode = 0;
+
+ if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
+ port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
+
+ if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
+ port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
+
port->port_type = port->typec_caps.type;
port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
-
port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
- if (port->port_type == TYPEC_PORT_SNK)
- goto sink;
-
- /* Get Source PDOs for the PD port or Source Rp value for the non-PD port */
- if (port->pd_supported) {
- ret = fwnode_property_count_u32(fwnode, "source-pdos");
- if (ret == 0)
- return -EINVAL;
- else if (ret < 0)
- return ret;
+ port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
- port->nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
- ret = fwnode_property_read_u32_array(fwnode, "source-pdos",
- port->src_pdo, port->nr_src_pdo);
- if (ret)
- return ret;
- ret = tcpm_validate_caps(port, port->src_pdo, port->nr_src_pdo);
- if (ret)
- return ret;
- } else {
+ if (!port->pd_supported) {
ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
if (ret)
return ret;
@@ -6178,45 +6305,150 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
if (ret < 0)
return ret;
port->src_rp = tcpm_pwr_opmode_to_rp(ret);
- }
-
- if (port->port_type == TYPEC_PORT_SRC)
return 0;
+ }
-sink:
- port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
-
- if (!port->pd_supported)
- return 0;
-
- /* Get sink pdos */
- ret = fwnode_property_count_u32(fwnode, "sink-pdos");
- if (ret <= 0)
- return -EINVAL;
-
- port->nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
- ret = fwnode_property_read_u32_array(fwnode, "sink-pdos",
- port->snk_pdo, port->nr_snk_pdo);
- if ((ret < 0) || tcpm_validate_caps(port, port->snk_pdo,
- port->nr_snk_pdo))
- return -EINVAL;
-
- if (fwnode_property_read_u32(fwnode, "op-sink-microwatt", &mw) < 0)
- return -EINVAL;
- port->operating_snk_mw = mw / 1000;
+ /* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
/* FRS can only be supported by DRP ports */
if (port->port_type == TYPEC_PORT_DRP) {
ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
&frs_current);
- if (ret >= 0 && frs_current <= FRS_5V_3A)
+ if (!ret && frs_current <= FRS_5V_3A)
port->new_source_frs_current = frs_current;
+
+ if (ret)
+ ret = 0;
}
+ /* For the backward compatibility, "capabilities" node is optional. */
+ capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
+ if (!capabilities) {
+ port->pd_count = 1;
+ } else {
+ fwnode_for_each_child_node(capabilities, child)
+ port->pd_count++;
+
+ if (!port->pd_count) {
+ ret = -ENODATA;
+ goto put_capabilities;
+ }
+ }
+
+ port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
+ GFP_KERNEL);
+ if (!port->pds) {
+ ret = -ENOMEM;
+ goto put_capabilities;
+ }
+
+ port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
+ GFP_KERNEL);
+ if (!port->pd_list) {
+ ret = -ENOMEM;
+ goto put_capabilities;
+ }
+
+ for (i = 0; i < port->pd_count; i++) {
+ port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
+ if (!port->pd_list[i]) {
+ ret = -ENOMEM;
+ goto put_capabilities;
+ }
+
+ src_pdo = port->pd_list[i]->source_desc.pdo;
+ port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
+ snk_pdo = port->pd_list[i]->sink_desc.pdo;
+ port->pd_list[i]->sink_desc.role = TYPEC_SINK;
+
+ /* If "capabilities" is NULL, fall back to single pd cap population. */
+ if (!capabilities)
+ caps = fwnode;
+ else
+ caps = fwnode_get_next_child_node(capabilities, caps);
+
+ if (port->port_type != TYPEC_PORT_SNK) {
+ ret = fwnode_property_count_u32(caps, "source-pdos");
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto put_caps;
+ }
+ if (ret < 0)
+ goto put_caps;
+
+ nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
+ nr_src_pdo);
+ if (ret)
+ goto put_caps;
+
+ ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
+ if (ret)
+ goto put_caps;
+
+ if (i == 0) {
+ port->nr_src_pdo = nr_src_pdo;
+ memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
+ port->pd_list[0]->source_desc.pdo,
+ sizeof(u32) * nr_src_pdo,
+ 0);
+ }
+ }
+
+ if (port->port_type != TYPEC_PORT_SRC) {
+ ret = fwnode_property_count_u32(caps, "sink-pdos");
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto put_caps;
+ }
+
+ if (ret < 0)
+ goto put_caps;
+
+ nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
+ nr_snk_pdo);
+ if (ret)
+ goto put_caps;
+
+ ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
+ if (ret)
+ goto put_caps;
+
+ if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
+ ret = -EINVAL;
+ goto put_caps;
+ }
+
+ port->pd_list[i]->operating_snk_mw = uw / 1000;
+
+ if (i == 0) {
+ port->nr_snk_pdo = nr_snk_pdo;
+ memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
+ port->pd_list[0]->sink_desc.pdo,
+ sizeof(u32) * nr_snk_pdo,
+ 0);
+ port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
+ }
+ }
+ }
+
+put_caps:
+ if (caps != fwnode)
+ fwnode_handle_put(caps);
+put_capabilities:
+ fwnode_handle_put(capabilities);
+ return ret;
+}
+
+static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
+{
+ int ret;
+
/* sink-vdos is optional */
ret = fwnode_property_count_u32(fwnode, "sink-vdos");
if (ret < 0)
- ret = 0;
+ return 0;
port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
if (port->nr_snk_vdo) {
@@ -6584,10 +6816,12 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
err = tcpm_fw_get_caps(port, tcpc->fwnode);
if (err < 0)
goto out_destroy_wq;
+ err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
+ if (err < 0)
+ goto out_destroy_wq;
port->try_role = port->typec_caps.prefer_role;
- port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
port->typec_caps.svdm_version = SVDM_VER_2_0;
@@ -6596,7 +6830,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->typec_caps.orientation_aware = 1;
port->partner_desc.identity = &port->partner_ident;
- port->port_type = port->typec_caps.type;
port->role_sw = usb_role_switch_get(port->dev);
if (!port->role_sw)
@@ -6615,7 +6848,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
if (err)
goto out_role_sw_put;
- port->typec_caps.pd = port->pd;
+ if (port->pds)
+ port->typec_caps.pd = port->pds[0];
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
if (IS_ERR(port->typec_port)) {
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 196535ad9..0717cfcd9 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -8,6 +8,7 @@
#include <linux/i2c.h>
#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/power_supply.h>
@@ -64,6 +65,9 @@
#define TPS_PBMC_RC 0 /* Return code */
#define TPS_PBMC_DPCS 2 /* device patch complete status */
+/* reset de-assertion to ready for operation */
+#define TPS_SETUP_MS 1000
+
enum {
TPS_PORTINFO_SINK,
TPS_PORTINFO_SINK_ACCESSORY,
@@ -111,6 +115,8 @@ struct tipd_data {
void (*trace_power_status)(u16 status);
void (*trace_status)(u32 status);
int (*apply_patch)(struct tps6598x *tps);
+ int (*init)(struct tps6598x *tps);
+ int (*reset)(struct tps6598x *tps);
};
struct tps6598x {
@@ -119,6 +125,7 @@ struct tps6598x {
struct mutex lock; /* device lock */
u8 i2c_protocol:1;
+ struct gpio_desc *reset;
struct typec_port *port;
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
@@ -323,7 +330,7 @@ static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
}
static int tps6598x_exec_cmd_tmo(struct tps6598x *tps, const char *cmd,
- size_t in_len, u8 *in_data,
+ size_t in_len, const u8 *in_data,
size_t out_len, u8 *out_data,
u32 cmd_timeout_ms, u32 res_delay_ms)
{
@@ -389,7 +396,7 @@ static int tps6598x_exec_cmd_tmo(struct tps6598x *tps, const char *cmd,
}
static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
- size_t in_len, u8 *in_data,
+ size_t in_len, const u8 *in_data,
size_t out_len, u8 *out_data)
{
return tps6598x_exec_cmd_tmo(tps, cmd, in_len, in_data,
@@ -866,6 +873,30 @@ tps6598x_register_port(struct tps6598x *tps, struct fwnode_handle *fwnode)
return 0;
}
+static int tps_request_firmware(struct tps6598x *tps, const struct firmware **fw)
+{
+ const char *firmware_name;
+ int ret;
+
+ ret = device_property_read_string(tps->dev, "firmware-name",
+ &firmware_name);
+ if (ret)
+ return ret;
+
+ ret = request_firmware(fw, firmware_name, tps->dev);
+ if (ret) {
+ dev_err(tps->dev, "failed to retrieve \"%s\"\n", firmware_name);
+ return ret;
+ }
+
+ if ((*fw)->size == 0) {
+ release_firmware(*fw);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int
tps25750_write_firmware(struct tps6598x *tps,
u8 bpms_addr, const u8 *data, size_t len)
@@ -954,16 +985,9 @@ static int tps25750_start_patch_burst_mode(struct tps6598x *tps)
if (ret)
return ret;
- ret = request_firmware(&fw, firmware_name, tps->dev);
- if (ret) {
- dev_err(tps->dev, "failed to retrieve \"%s\"\n", firmware_name);
+ ret = tps_request_firmware(tps, &fw);
+ if (ret)
return ret;
- }
-
- if (fw->size == 0) {
- ret = -EINVAL;
- goto release_fw;
- }
ret = of_property_match_string(np, "reg-names", "patch-address");
if (ret < 0) {
@@ -1101,6 +1125,76 @@ wait_for_app:
return 0;
};
+static int tps6598x_apply_patch(struct tps6598x *tps)
+{
+ u8 in = TPS_PTCS_CONTENT_DEV | TPS_PTCS_CONTENT_APP;
+ u8 out[TPS_MAX_LEN] = {0};
+ size_t in_len = sizeof(in);
+ size_t copied_bytes = 0;
+ size_t bytes_left;
+ const struct firmware *fw;
+ const char *firmware_name;
+ int ret;
+
+ ret = device_property_read_string(tps->dev, "firmware-name",
+ &firmware_name);
+ if (ret)
+ return ret;
+
+ ret = tps_request_firmware(tps, &fw);
+ if (ret)
+ return ret;
+
+ ret = tps6598x_exec_cmd(tps, "PTCs", in_len, &in,
+ TPS_PTCS_OUT_BYTES, out);
+ if (ret || out[TPS_PTCS_STATUS] == TPS_PTCS_STATUS_FAIL) {
+ if (!ret)
+ ret = -EBUSY;
+ dev_err(tps->dev, "Update start failed (%d)\n", ret);
+ goto release_fw;
+ }
+
+ bytes_left = fw->size;
+ while (bytes_left) {
+ if (bytes_left < TPS_MAX_LEN)
+ in_len = bytes_left;
+ else
+ in_len = TPS_MAX_LEN;
+ ret = tps6598x_exec_cmd(tps, "PTCd", in_len,
+ fw->data + copied_bytes,
+ TPS_PTCD_OUT_BYTES, out);
+ if (ret || out[TPS_PTCD_TRANSFER_STATUS] ||
+ out[TPS_PTCD_LOADING_STATE] == TPS_PTCD_LOAD_ERR) {
+ if (!ret)
+ ret = -EBUSY;
+ dev_err(tps->dev, "Patch download failed (%d)\n", ret);
+ goto release_fw;
+ }
+ copied_bytes += in_len;
+ bytes_left -= in_len;
+ }
+
+ ret = tps6598x_exec_cmd(tps, "PTCc", 0, NULL, TPS_PTCC_OUT_BYTES, out);
+ if (ret || out[TPS_PTCC_DEV] || out[TPS_PTCC_APP]) {
+ if (!ret)
+ ret = -EBUSY;
+ dev_err(tps->dev, "Update completion failed (%d)\n", ret);
+ goto release_fw;
+ }
+ msleep(TPS_SETUP_MS);
+ dev_info(tps->dev, "Firmware update succeeded\n");
+
+release_fw:
+ release_firmware(fw);
+
+ return ret;
+};
+
+static int cd321x_init(struct tps6598x *tps)
+{
+ return 0;
+}
+
static int tps25750_init(struct tps6598x *tps)
{
int ret;
@@ -1119,6 +1213,26 @@ static int tps25750_init(struct tps6598x *tps)
return 0;
}
+static int tps6598x_init(struct tps6598x *tps)
+{
+ return tps->data->apply_patch(tps);
+}
+
+static int cd321x_reset(struct tps6598x *tps)
+{
+ return 0;
+}
+
+static int tps25750_reset(struct tps6598x *tps)
+{
+ return tps6598x_exec_cmd_tmo(tps, "GAID", 0, NULL, 0, NULL, 2000, 0);
+}
+
+static int tps6598x_reset(struct tps6598x *tps)
+{
+ return 0;
+}
+
static int
tps25750_register_port(struct tps6598x *tps, struct fwnode_handle *fwnode)
{
@@ -1182,7 +1296,6 @@ static int tps6598x_probe(struct i2c_client *client)
u32 vid;
int ret;
u64 mask1;
- bool is_tps25750;
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -1191,12 +1304,18 @@ static int tps6598x_probe(struct i2c_client *client)
mutex_init(&tps->lock);
tps->dev = &client->dev;
+ tps->reset = devm_gpiod_get_optional(tps->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(tps->reset))
+ return dev_err_probe(tps->dev, PTR_ERR(tps->reset),
+ "failed to get reset GPIO\n");
+ if (tps->reset)
+ msleep(TPS_SETUP_MS);
+
tps->regmap = devm_regmap_init_i2c(client, &tps6598x_regmap_config);
if (IS_ERR(tps->regmap))
return PTR_ERR(tps->regmap);
- is_tps25750 = device_is_compatible(tps->dev, "ti,tps25750");
- if (!is_tps25750) {
+ if (!device_is_compatible(tps->dev, "ti,tps25750")) {
ret = tps6598x_read32(tps, TPS_REG_VID, &vid);
if (ret < 0 || !vid)
return -ENODEV;
@@ -1239,8 +1358,8 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- if (is_tps25750 && ret == TPS_MODE_PTCH) {
- ret = tps25750_init(tps);
+ if (ret == TPS_MODE_PTCH) {
+ ret = tps->data->init(tps);
if (ret)
return ret;
}
@@ -1328,8 +1447,8 @@ err_clear_mask:
tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
err_reset_controller:
/* Reset PD controller to remove any applied patch */
- if (is_tps25750)
- tps6598x_exec_cmd_tmo(tps, "GAID", 0, NULL, 0, NULL, 2000, 0);
+ tps->data->reset(tps);
+
return ret;
}
@@ -1346,8 +1465,10 @@ static void tps6598x_remove(struct i2c_client *client)
usb_role_switch_put(tps->role_sw);
/* Reset PD controller to remove any applied patch */
- if (device_is_compatible(tps->dev, "ti,tps25750"))
- tps6598x_exec_cmd_tmo(tps, "GAID", 0, NULL, 0, NULL, 2000, 0);
+ tps->data->reset(tps);
+
+ if (tps->reset)
+ gpiod_set_value_cansleep(tps->reset, 1);
}
static int __maybe_unused tps6598x_suspend(struct device *dev)
@@ -1358,6 +1479,8 @@ static int __maybe_unused tps6598x_suspend(struct device *dev)
if (tps->wakeup) {
disable_irq(client->irq);
enable_irq_wake(client->irq);
+ } else if (tps->reset) {
+ gpiod_set_value_cansleep(tps->reset, 1);
}
if (!client->irq)
@@ -1376,8 +1499,8 @@ static int __maybe_unused tps6598x_resume(struct device *dev)
if (ret < 0)
return ret;
- if (device_is_compatible(tps->dev, "ti,tps25750") && ret == TPS_MODE_PTCH) {
- ret = tps25750_init(tps);
+ if (ret == TPS_MODE_PTCH) {
+ ret = tps->data->init(tps);
if (ret)
return ret;
}
@@ -1385,6 +1508,9 @@ static int __maybe_unused tps6598x_resume(struct device *dev)
if (tps->wakeup) {
disable_irq_wake(client->irq);
enable_irq(client->irq);
+ } else if (tps->reset) {
+ gpiod_set_value_cansleep(tps->reset, 0);
+ msleep(TPS_SETUP_MS);
}
if (!client->irq)
@@ -1403,6 +1529,8 @@ static const struct tipd_data cd321x_data = {
.register_port = tps6598x_register_port,
.trace_power_status = trace_tps6598x_power_status,
.trace_status = trace_tps6598x_status,
+ .init = cd321x_init,
+ .reset = cd321x_reset,
};
static const struct tipd_data tps6598x_data = {
@@ -1410,6 +1538,9 @@ static const struct tipd_data tps6598x_data = {
.register_port = tps6598x_register_port,
.trace_power_status = trace_tps6598x_power_status,
.trace_status = trace_tps6598x_status,
+ .apply_patch = tps6598x_apply_patch,
+ .init = tps6598x_init,
+ .reset = tps6598x_reset,
};
static const struct tipd_data tps25750_data = {
@@ -1418,6 +1549,8 @@ static const struct tipd_data tps25750_data = {
.trace_power_status = trace_tps25750_power_status,
.trace_status = trace_tps25750_status,
.apply_patch = tps25750_apply_patch,
+ .init = tps25750_init,
+ .reset = tps25750_reset,
};
static const struct of_device_id tps6598x_of_match[] = {
diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h
index 01609bf50..89b245194 100644
--- a/drivers/usb/typec/tipd/tps6598x.h
+++ b/drivers/usb/typec/tipd/tps6598x.h
@@ -235,4 +235,22 @@
/* SLEEP CONF REG */
#define TPS_SLEEP_CONF_SLEEP_MODE_ALLOWED BIT(0)
+/* Start Patch Download Sequence */
+#define TPS_PTCS_CONTENT_APP BIT(0)
+#define TPS_PTCS_CONTENT_DEV BIT(1)
+#define TPS_PTCS_OUT_BYTES 4
+#define TPS_PTCS_STATUS 1
+
+#define TPS_PTCS_STATUS_FAIL 0x80
+/* Patch Download */
+#define TPS_PTCD_OUT_BYTES 10
+#define TPS_PTCD_TRANSFER_STATUS 1
+#define TPS_PTCD_LOADING_STATE 2
+
+#define TPS_PTCD_LOAD_ERR 0x09
+/* Patch Download Complete */
+#define TPS_PTCC_OUT_BYTES 4
+#define TPS_PTCC_DEV 2
+#define TPS_PTCC_APP 3
+
#endif /* __TPS6598X_H__ */
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 70d9f4eeb..cd415565b 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -36,6 +36,19 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
+static int ucsi_read_message_in(struct ucsi *ucsi, void *buf,
+ size_t buf_size)
+{
+ /*
+ * Below UCSI 2.0, MESSAGE_IN was limited to 16 bytes. Truncate the
+ * reads here.
+ */
+ if (ucsi->version <= UCSI_VERSION_1_2)
+ buf_size = clamp(buf_size, 0, 16);
+
+ return ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, buf, buf_size);
+}
+
static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
u64 ctrl;
@@ -72,7 +85,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
if (ret < 0)
return ret;
- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
+ ret = ucsi_read_message_in(ucsi, &error, sizeof(error));
if (ret)
return ret;
@@ -174,7 +187,7 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
length = ret;
if (data) {
- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
+ ret = ucsi_read_message_in(ucsi, data, size);
if (ret)
goto out;
}
@@ -582,6 +595,9 @@ static int ucsi_read_pdos(struct ucsi_connector *con,
u64 command;
int ret;
+ if (ucsi->quirks & UCSI_NO_PARTNER_PDOS)
+ return 0;
+
command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
@@ -1593,6 +1609,15 @@ int ucsi_register(struct ucsi *ucsi)
if (!ucsi->version)
return -ENODEV;
+ /*
+ * Version format is JJ.M.N (JJ = Major version, M = Minor version,
+ * N = sub-minor version).
+ */
+ dev_dbg(ucsi->dev, "Registered UCSI interface with version %x.%x.%x",
+ UCSI_BCD_GET_MAJOR(ucsi->version),
+ UCSI_BCD_GET_MINOR(ucsi->version),
+ UCSI_BCD_GET_SUBMINOR(ucsi->version));
+
queue_delayed_work(system_long_wq, &ucsi->work, 0);
ucsi_debugfs_register(ucsi);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 13ec976b1..ede987648 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -23,6 +23,17 @@ struct dentry;
#define UCSI_CONTROL 8
#define UCSI_MESSAGE_IN 16
#define UCSI_MESSAGE_OUT 32
+#define UCSIv2_MESSAGE_OUT 272
+
+/* UCSI versions */
+#define UCSI_VERSION_1_2 0x0120
+#define UCSI_VERSION_2_0 0x0200
+#define UCSI_VERSION_2_1 0x0210
+#define UCSI_VERSION_3_0 0x0300
+
+#define UCSI_BCD_GET_MAJOR(_v_) (((_v_) >> 8) & 0xFF)
+#define UCSI_BCD_GET_MINOR(_v_) (((_v_) >> 4) & 0x0F)
+#define UCSI_BCD_GET_SUBMINOR(_v_) ((_v_) & 0x0F)
/* Command Status and Connector Change Indication (CCI) bits */
#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 1)) >> 1)
@@ -317,6 +328,9 @@ struct ucsi {
#define EVENT_PENDING 0
#define COMMAND_PENDING 1
#define ACK_PENDING 2
+
+ unsigned long quirks;
+#define UCSI_NO_PARTNER_PDOS BIT(0) /* Don't read partner's PDOs */
};
#define UCSI_MAX_SVID 5
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 894622b65..ce08eb33e 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -6,6 +6,7 @@
#include <linux/auxiliary_bus.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
#include <linux/usb/typec_mux.h>
@@ -310,11 +311,21 @@ static void pmic_glink_ucsi_destroy(void *data)
mutex_unlock(&ucsi->lock);
}
+static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
+ { .compatible = "qcom,qcm6490-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+ { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+ { .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+ { .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+ { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+ {}
+};
+
static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct pmic_glink_ucsi *ucsi;
struct device *dev = &adev->dev;
+ const struct of_device_id *match;
struct fwnode_handle *fwnode;
int ret;
@@ -341,6 +352,10 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
if (ret)
return ret;
+ match = of_match_device(pmic_glink_ucsi_of_quirks, dev->parent);
+ if (match)
+ ucsi->ucsi->quirks = (unsigned long)match->data;
+
ucsi_set_drvdata(ucsi->ucsi, ucsi);
device_for_each_child_node(dev, fwnode) {
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 0a6624d37..79110a69d 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -377,14 +377,14 @@ static int __init usbip_host_init(void)
goto err_usb_register;
}
- ret = driver_create_file(&stub_driver.drvwrap.driver,
+ ret = driver_create_file(&stub_driver.driver,
&driver_attr_match_busid);
if (ret) {
pr_err("driver_create_file failed\n");
goto err_create_file;
}
- ret = driver_create_file(&stub_driver.drvwrap.driver,
+ ret = driver_create_file(&stub_driver.driver,
&driver_attr_rebind);
if (ret) {
pr_err("driver_create_file failed\n");
@@ -402,10 +402,10 @@ err_usb_register:
static void __exit usbip_host_exit(void)
{
- driver_remove_file(&stub_driver.drvwrap.driver,
+ driver_remove_file(&stub_driver.driver,
&driver_attr_match_busid);
- driver_remove_file(&stub_driver.drvwrap.driver,
+ driver_remove_file(&stub_driver.driver,
&driver_attr_rebind);
/*
diff --git a/drivers/usb/usbip/vudc.h b/drivers/usb/usbip/vudc.h
index 1bd4bc005..faf61c9c6 100644
--- a/drivers/usb/usbip/vudc.h
+++ b/drivers/usb/usbip/vudc.h
@@ -173,6 +173,6 @@ struct vudc_device *alloc_vudc_device(int devid);
void put_vudc_device(struct vudc_device *udc_dev);
int vudc_probe(struct platform_device *pdev);
-int vudc_remove(struct platform_device *pdev);
+void vudc_remove(struct platform_device *pdev);
#endif /* __USBIP_VUDC_H */
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 44b04c54c..f11535020 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -628,12 +628,11 @@ out:
return ret;
}
-int vudc_remove(struct platform_device *pdev)
+void vudc_remove(struct platform_device *pdev)
{
struct vudc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
cleanup_vudc_hw(udc);
kfree(udc);
- return 0;
}
diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
index 993e721cb..8bee553e4 100644
--- a/drivers/usb/usbip/vudc_main.c
+++ b/drivers/usb/usbip/vudc_main.c
@@ -19,7 +19,7 @@ MODULE_PARM_DESC(num, "number of emulated controllers");
static struct platform_driver vudc_driver = {
.probe = vudc_probe,
- .remove = vudc_remove,
+ .remove_new = vudc_remove,
.driver = {
.name = GADGET_NAME,
.dev_groups = vudc_groups,
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 84547d998..50aac8fe5 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -35,6 +35,9 @@ struct mlx5_vdpa_mr {
struct vhost_iotlb *iotlb;
bool user_mr;
+
+ refcount_t refcount;
+ struct list_head mr_list;
};
struct mlx5_vdpa_resources {
@@ -93,6 +96,7 @@ struct mlx5_vdpa_dev {
u32 generation;
struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
+ struct list_head mr_list_head;
/* serialize mr access */
struct mutex mr_mtx;
struct mlx5_control_vq cvq;
@@ -118,8 +122,10 @@ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
struct vhost_iotlb *iotlb);
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
- struct mlx5_vdpa_mr *mr);
+void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr);
+void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr);
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr,
unsigned int asid);
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index 2197c46e5..4758914cc 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -498,32 +498,54 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
+ if (WARN_ON(!mr))
+ return;
+
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
destroy_dma_mr(mvdev, mr);
vhost_iotlb_free(mr->iotlb);
+
+ list_del(&mr->mr_list);
+
+ kfree(mr);
}
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
- struct mlx5_vdpa_mr *mr)
+static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr)
{
if (!mr)
return;
+ if (refcount_dec_and_test(&mr->refcount))
+ _mlx5_vdpa_destroy_mr(mvdev, mr);
+}
+
+void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr)
+{
mutex_lock(&mvdev->mr_mtx);
+ _mlx5_vdpa_put_mr(mvdev, mr);
+ mutex_unlock(&mvdev->mr_mtx);
+}
- _mlx5_vdpa_destroy_mr(mvdev, mr);
+static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr)
+{
+ if (!mr)
+ return;
- for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) {
- if (mvdev->mr[i] == mr)
- mvdev->mr[i] = NULL;
- }
+ refcount_inc(&mr->refcount);
+}
+void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
+ struct mlx5_vdpa_mr *mr)
+{
+ mutex_lock(&mvdev->mr_mtx);
+ _mlx5_vdpa_get_mr(mvdev, mr);
mutex_unlock(&mvdev->mr_mtx);
-
- kfree(mr);
}
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
@@ -534,10 +556,23 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
mutex_lock(&mvdev->mr_mtx);
+ _mlx5_vdpa_put_mr(mvdev, old_mr);
mvdev->mr[asid] = new_mr;
- if (old_mr) {
- _mlx5_vdpa_destroy_mr(mvdev, old_mr);
- kfree(old_mr);
+
+ mutex_unlock(&mvdev->mr_mtx);
+}
+
+static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
+{
+ struct mlx5_vdpa_mr *mr;
+
+ mutex_lock(&mvdev->mr_mtx);
+
+ list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) {
+
+ mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: "
+ "mr: %p, mkey: 0x%x, refcount: %u\n",
+ mr, mr->mkey, refcount_read(&mr->refcount));
}
mutex_unlock(&mvdev->mr_mtx);
@@ -547,9 +582,11 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
{
for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
- mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[i]);
+ mlx5_vdpa_update_mr(mvdev, NULL, i);
prune_iotlb(mvdev->cvq.iotlb);
+
+ mlx5_vdpa_show_mr_leaks(mvdev);
}
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
@@ -576,6 +613,8 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto err_iotlb;
+ list_add_tail(&mr->mr_list, &mvdev->mr_list_head);
+
return 0;
err_iotlb:
@@ -607,6 +646,8 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (err)
goto out_err;
+ refcount_set(&mr->refcount, 1);
+
return mr;
out_err:
@@ -651,7 +692,7 @@ int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
if (asid >= MLX5_VDPA_NUM_AS)
return -EINVAL;
- mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[asid]);
+ mlx5_vdpa_update_mr(mvdev, NULL, asid);
if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
if (mlx5_vdpa_create_dma_mr(mvdev))
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 7795d2b7f..ecfc16151 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -120,6 +120,12 @@ struct mlx5_vdpa_virtqueue {
u16 avail_idx;
u16 used_idx;
int fw_state;
+
+ u64 modified_fields;
+
+ struct mlx5_vdpa_mr *vq_mr;
+ struct mlx5_vdpa_mr *desc_mr;
+
struct msi_map map;
/* keep last in the struct */
@@ -941,6 +947,14 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
kfree(in);
mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_vdpa_get_mr(mvdev, vq_mr);
+ mvq->vq_mr = vq_mr;
+
+ if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) {
+ mlx5_vdpa_get_mr(mvdev, vq_desc_mr);
+ mvq->desc_mr = vq_desc_mr;
+ }
+
return 0;
err_cmd:
@@ -967,6 +981,12 @@ static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtq
}
mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE;
umems_destroy(ndev, mvq);
+
+ mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr);
+ mvq->vq_mr = NULL;
+
+ mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr);
+ mvq->desc_mr = NULL;
}
static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw)
@@ -1165,7 +1185,12 @@ err_cmd:
return err;
}
-static bool is_valid_state_change(int oldstate, int newstate)
+static bool is_resumable(struct mlx5_vdpa_net *ndev)
+{
+ return ndev->mvdev.vdev.config->resume;
+}
+
+static bool is_valid_state_change(int oldstate, int newstate, bool resumable)
{
switch (oldstate) {
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
@@ -1173,25 +1198,43 @@ static bool is_valid_state_change(int oldstate, int newstate)
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
+ return resumable ? newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY : false;
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
default:
return false;
}
}
-static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state)
+static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq)
+{
+ /* Only state is always modifiable */
+ if (mvq->modified_fields & ~MLX5_VIRTQ_MODIFY_MASK_STATE)
+ return mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT ||
+ mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
+
+ return true;
+}
+
+static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ int state)
{
int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
+ struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+ struct mlx5_vdpa_mr *desc_mr = NULL;
+ struct mlx5_vdpa_mr *vq_mr = NULL;
+ bool state_change = false;
void *obj_context;
void *cmd_hdr;
+ void *vq_ctx;
void *in;
int err;
if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE)
return 0;
- if (!is_valid_state_change(mvq->fw_state, state))
+ if (!modifiable_virtqueue_fields(mvq))
return -EINVAL;
in = kzalloc(inlen, GFP_KERNEL);
@@ -1206,17 +1249,83 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
- MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select,
- MLX5_VIRTQ_MODIFY_MASK_STATE);
- MLX5_SET(virtio_net_q_object, obj_context, state, state);
+ vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
+
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
+ if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ MLX5_SET(virtio_net_q_object, obj_context, state, state);
+ state_change = true;
+ }
+
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) {
+ MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
+ MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
+ MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
+ }
+
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX)
+ MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX)
+ MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
+
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
+ vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+
+ if (vq_mr)
+ MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
+ else
+ mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;
+ }
+
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
+ desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+
+ if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
+ MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey);
+ else
+ mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
+ }
+
+ MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
- kfree(in);
- if (!err)
+ if (err)
+ goto done;
+
+ if (state_change)
mvq->fw_state = state;
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
+ mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
+ mlx5_vdpa_get_mr(mvdev, vq_mr);
+ mvq->vq_mr = vq_mr;
+ }
+
+ if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
+ mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
+ mlx5_vdpa_get_mr(mvdev, desc_mr);
+ mvq->desc_mr = desc_mr;
+ }
+
+ mvq->modified_fields = 0;
+
+done:
+ kfree(in);
return err;
}
+static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev,
+ struct mlx5_vdpa_virtqueue *mvq,
+ unsigned int state)
+{
+ mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE;
+ return modify_virtqueue(ndev, mvq, state);
+}
+
static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
u32 in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {};
@@ -1345,7 +1454,7 @@ static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
goto err_vq;
if (mvq->ready) {
- err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+ err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
if (err) {
mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n",
idx, err);
@@ -1380,7 +1489,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)
return;
- if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
+ if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND))
mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n");
if (query_virtqueue(ndev, mvq, &attr)) {
@@ -1399,12 +1508,31 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
suspend_vq(ndev, &ndev->vqs[i]);
}
+static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
+{
+ if (!mvq->initialized || !is_resumable(ndev))
+ return;
+
+ if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)
+ return;
+
+ if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY))
+ mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index);
+}
+
+static void resume_vqs(struct mlx5_vdpa_net *ndev)
+{
+ for (int i = 0; i < ndev->mvdev.max_vqs; i++)
+ resume_vq(ndev, &ndev->vqs[i]);
+}
+
static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
{
if (!mvq->initialized)
return;
suspend_vq(ndev, mvq);
+ mvq->modified_fields = 0;
destroy_virtqueue(ndev, mvq);
dealloc_vector(ndev, mvq);
counter_set_dealloc(ndev, mvq);
@@ -2136,6 +2264,7 @@ static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_
mvq->desc_addr = desc_area;
mvq->device_addr = device_area;
mvq->driver_addr = driver_area;
+ mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS;
return 0;
}
@@ -2212,7 +2341,7 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
if (!ready) {
suspend_vq(ndev, mvq);
} else {
- err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
+ err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY);
if (err) {
mlx5_vdpa_warn(mvdev, "modify VQ %d to ready failed (%d)\n", idx, err);
ready = false;
@@ -2260,6 +2389,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
mvq->used_idx = state->split.avail_index;
mvq->avail_idx = state->split.avail_index;
+ mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX |
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX;
return 0;
}
@@ -2708,24 +2839,35 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
unsigned int asid)
{
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ bool teardown = !is_resumable(ndev);
int err;
suspend_vqs(ndev);
- err = save_channels_info(ndev);
- if (err)
- return err;
+ if (teardown) {
+ err = save_channels_info(ndev);
+ if (err)
+ return err;
- teardown_driver(ndev);
+ teardown_driver(ndev);
+ }
mlx5_vdpa_update_mr(mvdev, new_mr, asid);
+ for (int i = 0; i < ndev->cur_num_vqs; i++)
+ ndev->vqs[i].modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY |
+ MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
+
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
return 0;
- restore_channels_info(ndev);
- err = setup_driver(mvdev);
- if (err)
- return err;
+ if (teardown) {
+ restore_channels_info(ndev);
+ err = setup_driver(mvdev);
+ if (err)
+ return err;
+ }
+
+ resume_vqs(ndev);
return 0;
}
@@ -2809,8 +2951,10 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
{
int i;
- for (i = 0; i < ndev->mvdev.max_vqs; i++)
+ for (i = 0; i < ndev->mvdev.max_vqs; i++) {
ndev->vqs[i].ready = false;
+ ndev->vqs[i].modified_fields = 0;
+ }
ndev->mvdev.cvq.ready = false;
}
@@ -2987,7 +3131,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid);
out_err:
- mlx5_vdpa_destroy_mr(mvdev, new_mr);
+ mlx5_vdpa_put_mr(mvdev, new_mr);
return err;
}
@@ -3234,6 +3378,23 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
return 0;
}
+static int mlx5_vdpa_resume(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev;
+
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ mlx5_vdpa_info(mvdev, "resuming device\n");
+
+ down_write(&ndev->reslock);
+ mvdev->suspended = false;
+ resume_vqs(ndev);
+ register_link_notifier(ndev);
+ up_write(&ndev->reslock);
+ return 0;
+}
+
static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
unsigned int asid)
{
@@ -3290,6 +3451,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_dma_dev = mlx5_get_vq_dma_dev,
.free = mlx5_vdpa_free,
.suspend = mlx5_vdpa_suspend,
+ .resume = mlx5_vdpa_resume, /* Op disabled if not supported. */
};
static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
@@ -3565,6 +3727,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err)
goto err_mpfs;
+ INIT_LIST_HEAD(&mvdev->mr_list_head);
+
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_dma_mr(mvdev);
if (err)
@@ -3661,6 +3825,9 @@ static int mlx5v_probe(struct auxiliary_device *adev,
if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported))
mgtdev->vdpa_ops.get_vq_desc_group = NULL;
+ if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, freeze_to_rdy_supported))
+ mgtdev->vdpa_ops.resume = NULL;
+
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
if (err)
goto reg_err;
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index a7612e078..d0695680b 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -131,7 +131,7 @@ static void vdpa_release_dev(struct device *d)
if (ops->free)
ops->free(vdev);
- ida_simple_remove(&vdpa_index_ida, vdev->index);
+ ida_free(&vdpa_index_ida, vdev->index);
kfree(vdev->driver_override);
kfree(vdev);
}
@@ -205,7 +205,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
return vdev;
err_name:
- ida_simple_remove(&vdpa_index_ida, vdev->index);
+ ida_free(&vdpa_index_ida, vdev->index);
err_ida:
kfree(vdev);
err:
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 6cb5ce4a8..1d24da79c 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -1157,7 +1157,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
fput(f);
break;
}
- ret = receive_fd(f, perm_to_file_flags(entry.perm));
+ ret = receive_fd(f, NULL, perm_to_file_flags(entry.perm));
fput(f);
break;
}
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index 6bda6dbb4..ceae52fd7 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -80,6 +80,16 @@ config VFIO_VIRQFD
select EVENTFD
default n
+config VFIO_DEBUGFS
+ bool "Export VFIO internals in DebugFS"
+ depends on DEBUG_FS
+ help
+ Allows exposure of VFIO device internals. This option enables
+ the use of debugfs by VFIO drivers as required. The device can
+ cause the VFIO code create a top-level debug/vfio directory
+ during initialization, and then populate a subdirectory with
+ entries as required.
+
source "drivers/vfio/pci/Kconfig"
source "drivers/vfio/platform/Kconfig"
source "drivers/vfio/mdev/Kconfig"
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 68c057052..b2fc9fb49 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -7,6 +7,7 @@ vfio-$(CONFIG_VFIO_GROUP) += group.o
vfio-$(CONFIG_IOMMUFD) += iommufd.o
vfio-$(CONFIG_VFIO_CONTAINER) += container.o
vfio-$(CONFIG_VFIO_VIRQFD) += virqfd.o
+vfio-$(CONFIG_VFIO_DEBUGFS) += debugfs.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
diff --git a/drivers/vfio/debugfs.c b/drivers/vfio/debugfs.c
new file mode 100644
index 000000000..298bd866f
--- /dev/null
+++ b/drivers/vfio/debugfs.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, HiSilicon Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/vfio.h>
+#include "vfio.h"
+
+static struct dentry *vfio_debugfs_root;
+
+static int vfio_device_state_read(struct seq_file *seq, void *data)
+{
+ struct device *vf_dev = seq->private;
+ struct vfio_device *vdev = container_of(vf_dev,
+ struct vfio_device, device);
+ enum vfio_device_mig_state state;
+ int ret;
+
+ BUILD_BUG_ON(VFIO_DEVICE_STATE_NR !=
+ VFIO_DEVICE_STATE_PRE_COPY_P2P + 1);
+
+ ret = vdev->mig_ops->migration_get_state(vdev, &state);
+ if (ret)
+ return -EINVAL;
+
+ switch (state) {
+ case VFIO_DEVICE_STATE_ERROR:
+ seq_puts(seq, "ERROR\n");
+ break;
+ case VFIO_DEVICE_STATE_STOP:
+ seq_puts(seq, "STOP\n");
+ break;
+ case VFIO_DEVICE_STATE_RUNNING:
+ seq_puts(seq, "RUNNING\n");
+ break;
+ case VFIO_DEVICE_STATE_STOP_COPY:
+ seq_puts(seq, "STOP_COPY\n");
+ break;
+ case VFIO_DEVICE_STATE_RESUMING:
+ seq_puts(seq, "RESUMING\n");
+ break;
+ case VFIO_DEVICE_STATE_RUNNING_P2P:
+ seq_puts(seq, "RUNNING_P2P\n");
+ break;
+ case VFIO_DEVICE_STATE_PRE_COPY:
+ seq_puts(seq, "PRE_COPY\n");
+ break;
+ case VFIO_DEVICE_STATE_PRE_COPY_P2P:
+ seq_puts(seq, "PRE_COPY_P2P\n");
+ break;
+ default:
+ seq_puts(seq, "Invalid\n");
+ }
+
+ return 0;
+}
+
+void vfio_device_debugfs_init(struct vfio_device *vdev)
+{
+ struct device *dev = &vdev->device;
+
+ vdev->debug_root = debugfs_create_dir(dev_name(vdev->dev),
+ vfio_debugfs_root);
+
+ if (vdev->mig_ops) {
+ struct dentry *vfio_dev_migration = NULL;
+
+ vfio_dev_migration = debugfs_create_dir("migration",
+ vdev->debug_root);
+ debugfs_create_devm_seqfile(dev, "state", vfio_dev_migration,
+ vfio_device_state_read);
+ }
+}
+
+void vfio_device_debugfs_exit(struct vfio_device *vdev)
+{
+ debugfs_remove_recursive(vdev->debug_root);
+}
+
+void vfio_debugfs_create_root(void)
+{
+ vfio_debugfs_root = debugfs_create_dir("vfio", NULL);
+}
+
+void vfio_debugfs_remove_root(void)
+{
+ debugfs_remove_recursive(vfio_debugfs_root);
+ vfio_debugfs_root = NULL;
+}
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 8125e5f37..18c397df5 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -65,4 +65,6 @@ source "drivers/vfio/pci/hisilicon/Kconfig"
source "drivers/vfio/pci/pds/Kconfig"
+source "drivers/vfio/pci/virtio/Kconfig"
+
endmenu
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 45167be46..046139a4e 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/
obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/
obj-$(CONFIG_PDS_VFIO_PCI) += pds/
+
+obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio/
diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c
index 27607d7b9..8ddf4346f 100644
--- a/drivers/vfio/pci/pds/dirty.c
+++ b/drivers/vfio/pci/pds/dirty.c
@@ -70,7 +70,7 @@ out_free_region_info:
kfree(region_info);
}
-static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_dirty *dirty,
+static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_region *region,
unsigned long bytes)
{
unsigned long *host_seq_bmp, *host_ack_bmp;
@@ -85,47 +85,63 @@ static int pds_vfio_dirty_alloc_bitmaps(struct pds_vfio_dirty *dirty,
return -ENOMEM;
}
- dirty->host_seq.bmp = host_seq_bmp;
- dirty->host_ack.bmp = host_ack_bmp;
+ region->host_seq = host_seq_bmp;
+ region->host_ack = host_ack_bmp;
+ region->bmp_bytes = bytes;
return 0;
}
static void pds_vfio_dirty_free_bitmaps(struct pds_vfio_dirty *dirty)
{
- vfree(dirty->host_seq.bmp);
- vfree(dirty->host_ack.bmp);
- dirty->host_seq.bmp = NULL;
- dirty->host_ack.bmp = NULL;
+ if (!dirty->regions)
+ return;
+
+ for (int i = 0; i < dirty->num_regions; i++) {
+ struct pds_vfio_region *region = &dirty->regions[i];
+
+ vfree(region->host_seq);
+ vfree(region->host_ack);
+ region->host_seq = NULL;
+ region->host_ack = NULL;
+ region->bmp_bytes = 0;
+ }
}
static void __pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio,
- struct pds_vfio_bmp_info *bmp_info)
+ struct pds_vfio_region *region)
{
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
- dma_unmap_single(pdsc_dev, bmp_info->sgl_addr,
- bmp_info->num_sge * sizeof(struct pds_lm_sg_elem),
+ dma_unmap_single(pdsc_dev, region->sgl_addr,
+ region->num_sge * sizeof(struct pds_lm_sg_elem),
DMA_BIDIRECTIONAL);
- kfree(bmp_info->sgl);
+ kfree(region->sgl);
- bmp_info->num_sge = 0;
- bmp_info->sgl = NULL;
- bmp_info->sgl_addr = 0;
+ region->num_sge = 0;
+ region->sgl = NULL;
+ region->sgl_addr = 0;
}
static void pds_vfio_dirty_free_sgl(struct pds_vfio_pci_device *pds_vfio)
{
- if (pds_vfio->dirty.host_seq.sgl)
- __pds_vfio_dirty_free_sgl(pds_vfio, &pds_vfio->dirty.host_seq);
- if (pds_vfio->dirty.host_ack.sgl)
- __pds_vfio_dirty_free_sgl(pds_vfio, &pds_vfio->dirty.host_ack);
+ struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+
+ if (!dirty->regions)
+ return;
+
+ for (int i = 0; i < dirty->num_regions; i++) {
+ struct pds_vfio_region *region = &dirty->regions[i];
+
+ if (region->sgl)
+ __pds_vfio_dirty_free_sgl(pds_vfio, region);
+ }
}
-static int __pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
- struct pds_vfio_bmp_info *bmp_info,
- u32 page_count)
+static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
+ struct pds_vfio_region *region,
+ u32 page_count)
{
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
@@ -147,32 +163,81 @@ static int __pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
return -EIO;
}
- bmp_info->sgl = sgl;
- bmp_info->num_sge = max_sge;
- bmp_info->sgl_addr = sgl_addr;
+ region->sgl = sgl;
+ region->num_sge = max_sge;
+ region->sgl_addr = sgl_addr;
return 0;
}
-static int pds_vfio_dirty_alloc_sgl(struct pds_vfio_pci_device *pds_vfio,
- u32 page_count)
+static void pds_vfio_dirty_free_regions(struct pds_vfio_dirty *dirty)
{
+ vfree(dirty->regions);
+ dirty->regions = NULL;
+ dirty->num_regions = 0;
+}
+
+static int pds_vfio_dirty_alloc_regions(struct pds_vfio_pci_device *pds_vfio,
+ struct pds_lm_dirty_region_info *region_info,
+ u64 region_page_size, u8 num_regions)
+{
+ struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+ u32 dev_bmp_offset_byte = 0;
int err;
- err = __pds_vfio_dirty_alloc_sgl(pds_vfio, &dirty->host_seq,
- page_count);
- if (err)
- return err;
+ dirty->regions = vcalloc(num_regions, sizeof(struct pds_vfio_region));
+ if (!dirty->regions)
+ return -ENOMEM;
+ dirty->num_regions = num_regions;
+
+ for (int i = 0; i < num_regions; i++) {
+ struct pds_lm_dirty_region_info *ri = &region_info[i];
+ struct pds_vfio_region *region = &dirty->regions[i];
+ u64 region_size, region_start;
+ u32 page_count;
+
+ /* page_count might be adjusted by the device */
+ page_count = le32_to_cpu(ri->page_count);
+ region_start = le64_to_cpu(ri->dma_base);
+ region_size = page_count * region_page_size;
+
+ err = pds_vfio_dirty_alloc_bitmaps(region,
+ page_count / BITS_PER_BYTE);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n",
+ ERR_PTR(err));
+ goto out_free_regions;
+ }
- err = __pds_vfio_dirty_alloc_sgl(pds_vfio, &dirty->host_ack,
- page_count);
- if (err) {
- __pds_vfio_dirty_free_sgl(pds_vfio, &dirty->host_seq);
- return err;
+ err = pds_vfio_dirty_alloc_sgl(pds_vfio, region, page_count);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n",
+ ERR_PTR(err));
+ goto out_free_regions;
+ }
+
+ region->size = region_size;
+ region->start = region_start;
+ region->page_size = region_page_size;
+ region->dev_bmp_offset_start_byte = dev_bmp_offset_byte;
+
+ dev_bmp_offset_byte += page_count / BITS_PER_BYTE;
+ if (dev_bmp_offset_byte % BITS_PER_BYTE) {
+ dev_err(&pdev->dev, "Device bitmap offset is mis-aligned\n");
+ err = -EINVAL;
+ goto out_free_regions;
+ }
}
return 0;
+
+out_free_regions:
+ pds_vfio_dirty_free_bitmaps(dirty);
+ pds_vfio_dirty_free_sgl(pds_vfio);
+ pds_vfio_dirty_free_regions(dirty);
+
+ return err;
}
static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
@@ -181,16 +246,14 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
{
struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev;
struct device *pdsc_dev = &pci_physfn(pdev)->dev;
- struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
- u64 region_start, region_size, region_page_size;
struct pds_lm_dirty_region_info *region_info;
struct interval_tree_node *node = NULL;
+ u64 region_page_size = *page_size;
u8 max_regions = 0, num_regions;
dma_addr_t regions_dma = 0;
u32 num_ranges = nnodes;
- u32 page_count;
- u16 len;
int err;
+ u16 len;
dev_dbg(&pdev->dev, "vf%u: Start dirty page tracking\n",
pds_vfio->vf_id);
@@ -217,39 +280,38 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
return -EOPNOTSUPP;
}
- /*
- * Only support 1 region for now. If there are any large gaps in the
- * VM's address regions, then this would be a waste of memory as we are
- * generating 2 bitmaps (ack/seq) from the min address to the max
- * address of the VM's address regions. In the future, if we support
- * more than one region in the device/driver we can split the bitmaps
- * on the largest address region gaps. We can do this split up to the
- * max_regions times returned from the dirty_status command.
- */
- max_regions = 1;
if (num_ranges > max_regions) {
vfio_combine_iova_ranges(ranges, nnodes, max_regions);
num_ranges = max_regions;
}
+ region_info = kcalloc(num_ranges, sizeof(*region_info), GFP_KERNEL);
+ if (!region_info)
+ return -ENOMEM;
+ len = num_ranges * sizeof(*region_info);
+
node = interval_tree_iter_first(ranges, 0, ULONG_MAX);
if (!node)
return -EINVAL;
+ for (int i = 0; i < num_ranges; i++) {
+ struct pds_lm_dirty_region_info *ri = &region_info[i];
+ u64 region_size = node->last - node->start + 1;
+ u64 region_start = node->start;
+ u32 page_count;
- region_size = node->last - node->start + 1;
- region_start = node->start;
- region_page_size = *page_size;
+ page_count = DIV_ROUND_UP(region_size, region_page_size);
- len = sizeof(*region_info);
- region_info = kzalloc(len, GFP_KERNEL);
- if (!region_info)
- return -ENOMEM;
+ ri->dma_base = cpu_to_le64(region_start);
+ ri->page_count = cpu_to_le32(page_count);
+ ri->page_size_log2 = ilog2(region_page_size);
- page_count = DIV_ROUND_UP(region_size, region_page_size);
+ dev_dbg(&pdev->dev,
+ "region_info[%d]: region_start 0x%llx region_end 0x%lx region_size 0x%llx page_count %u page_size %llu\n",
+ i, region_start, node->last, region_size, page_count,
+ region_page_size);
- region_info->dma_base = cpu_to_le64(region_start);
- region_info->page_count = cpu_to_le32(page_count);
- region_info->page_size_log2 = ilog2(region_page_size);
+ node = interval_tree_iter_next(node, 0, ULONG_MAX);
+ }
regions_dma = dma_map_single(pdsc_dev, (void *)region_info, len,
DMA_BIDIRECTIONAL);
@@ -258,39 +320,20 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
goto out_free_region_info;
}
- err = pds_vfio_dirty_enable_cmd(pds_vfio, regions_dma, max_regions);
+ err = pds_vfio_dirty_enable_cmd(pds_vfio, regions_dma, num_ranges);
dma_unmap_single(pdsc_dev, regions_dma, len, DMA_BIDIRECTIONAL);
if (err)
goto out_free_region_info;
- /*
- * page_count might be adjusted by the device,
- * update it before freeing region_info DMA
- */
- page_count = le32_to_cpu(region_info->page_count);
-
- dev_dbg(&pdev->dev,
- "region_info: regions_dma 0x%llx dma_base 0x%llx page_count %u page_size_log2 %u\n",
- regions_dma, region_start, page_count,
- (u8)ilog2(region_page_size));
-
- err = pds_vfio_dirty_alloc_bitmaps(dirty, page_count / BITS_PER_BYTE);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n",
- ERR_PTR(err));
- goto out_free_region_info;
- }
-
- err = pds_vfio_dirty_alloc_sgl(pds_vfio, page_count);
+ err = pds_vfio_dirty_alloc_regions(pds_vfio, region_info,
+ region_page_size, num_ranges);
if (err) {
- dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n",
- ERR_PTR(err));
- goto out_free_bitmaps;
+ dev_err(&pdev->dev,
+ "Failed to allocate %d regions for tracking dirty regions: %pe\n",
+ num_regions, ERR_PTR(err));
+ goto out_dirty_disable;
}
- dirty->region_start = region_start;
- dirty->region_size = region_size;
- dirty->region_page_size = region_page_size;
pds_vfio_dirty_set_enabled(pds_vfio);
pds_vfio_print_guest_region_info(pds_vfio, max_regions);
@@ -299,8 +342,8 @@ static int pds_vfio_dirty_enable(struct pds_vfio_pci_device *pds_vfio,
return 0;
-out_free_bitmaps:
- pds_vfio_dirty_free_bitmaps(dirty);
+out_dirty_disable:
+ pds_vfio_dirty_disable_cmd(pds_vfio);
out_free_region_info:
kfree(region_info);
return err;
@@ -314,6 +357,7 @@ void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
pds_vfio_dirty_disable_cmd(pds_vfio);
pds_vfio_dirty_free_sgl(pds_vfio);
pds_vfio_dirty_free_bitmaps(&pds_vfio->dirty);
+ pds_vfio_dirty_free_regions(&pds_vfio->dirty);
}
if (send_cmd)
@@ -321,8 +365,9 @@ void pds_vfio_dirty_disable(struct pds_vfio_pci_device *pds_vfio, bool send_cmd)
}
static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
- struct pds_vfio_bmp_info *bmp_info,
- u32 offset, u32 bmp_bytes, bool read_seq)
+ struct pds_vfio_region *region,
+ unsigned long *seq_ack_bmp, u32 offset,
+ u32 bmp_bytes, bool read_seq)
{
const char *bmp_type_str = read_seq ? "read_seq" : "write_ack";
u8 dma_dir = read_seq ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
@@ -339,7 +384,7 @@ static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
int err;
int i;
- bmp = (void *)((u64)bmp_info->bmp + offset);
+ bmp = (void *)((u64)seq_ack_bmp + offset);
page_offset = offset_in_page(bmp);
bmp -= page_offset;
@@ -375,7 +420,7 @@ static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
goto out_free_sg_table;
for_each_sgtable_dma_sg(&sg_table, sg, i) {
- struct pds_lm_sg_elem *sg_elem = &bmp_info->sgl[i];
+ struct pds_lm_sg_elem *sg_elem = &region->sgl[i];
sg_elem->addr = cpu_to_le64(sg_dma_address(sg));
sg_elem->len = cpu_to_le32(sg_dma_len(sg));
@@ -383,15 +428,16 @@ static int pds_vfio_dirty_seq_ack(struct pds_vfio_pci_device *pds_vfio,
num_sge = sg_table.nents;
size = num_sge * sizeof(struct pds_lm_sg_elem);
- dma_sync_single_for_device(pdsc_dev, bmp_info->sgl_addr, size, dma_dir);
- err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, bmp_info->sgl_addr, num_sge,
+ offset += region->dev_bmp_offset_start_byte;
+ dma_sync_single_for_device(pdsc_dev, region->sgl_addr, size, dma_dir);
+ err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, region->sgl_addr, num_sge,
offset, bmp_bytes, read_seq);
if (err)
dev_err(&pdev->dev,
"Dirty bitmap %s failed offset %u bmp_bytes %u num_sge %u DMA 0x%llx: %pe\n",
bmp_type_str, offset, bmp_bytes,
- num_sge, bmp_info->sgl_addr, ERR_PTR(err));
- dma_sync_single_for_cpu(pdsc_dev, bmp_info->sgl_addr, size, dma_dir);
+ num_sge, region->sgl_addr, ERR_PTR(err));
+ dma_sync_single_for_cpu(pdsc_dev, region->sgl_addr, size, dma_dir);
dma_unmap_sgtable(pdsc_dev, &sg_table, dma_dir, 0);
out_free_sg_table:
@@ -403,32 +449,36 @@ out_free_pages:
}
static int pds_vfio_dirty_write_ack(struct pds_vfio_pci_device *pds_vfio,
+ struct pds_vfio_region *region,
u32 offset, u32 len)
{
- return pds_vfio_dirty_seq_ack(pds_vfio, &pds_vfio->dirty.host_ack,
+
+ return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_ack,
offset, len, WRITE_ACK);
}
static int pds_vfio_dirty_read_seq(struct pds_vfio_pci_device *pds_vfio,
+ struct pds_vfio_region *region,
u32 offset, u32 len)
{
- return pds_vfio_dirty_seq_ack(pds_vfio, &pds_vfio->dirty.host_seq,
+ return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_seq,
offset, len, READ_SEQ);
}
static int pds_vfio_dirty_process_bitmaps(struct pds_vfio_pci_device *pds_vfio,
+ struct pds_vfio_region *region,
struct iova_bitmap *dirty_bitmap,
u32 bmp_offset, u32 len_bytes)
{
- u64 page_size = pds_vfio->dirty.region_page_size;
- u64 region_start = pds_vfio->dirty.region_start;
+ u64 page_size = region->page_size;
+ u64 region_start = region->start;
u32 bmp_offset_bit;
__le64 *seq, *ack;
int dword_count;
dword_count = len_bytes / sizeof(u64);
- seq = (__le64 *)((u64)pds_vfio->dirty.host_seq.bmp + bmp_offset);
- ack = (__le64 *)((u64)pds_vfio->dirty.host_ack.bmp + bmp_offset);
+ seq = (__le64 *)((u64)region->host_seq + bmp_offset);
+ ack = (__le64 *)((u64)region->host_ack + bmp_offset);
bmp_offset_bit = bmp_offset * 8;
for (int i = 0; i < dword_count; i++) {
@@ -451,12 +501,28 @@ static int pds_vfio_dirty_process_bitmaps(struct pds_vfio_pci_device *pds_vfio,
return 0;
}
+static struct pds_vfio_region *
+pds_vfio_get_region(struct pds_vfio_pci_device *pds_vfio, unsigned long iova)
+{
+ struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+
+ for (int i = 0; i < dirty->num_regions; i++) {
+ struct pds_vfio_region *region = &dirty->regions[i];
+
+ if (iova >= region->start &&
+ iova < (region->start + region->size))
+ return region;
+ }
+
+ return NULL;
+}
+
static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
struct iova_bitmap *dirty_bitmap,
unsigned long iova, unsigned long length)
{
struct device *dev = &pds_vfio->vfio_coredev.pdev->dev;
- struct pds_vfio_dirty *dirty = &pds_vfio->dirty;
+ struct pds_vfio_region *region;
u64 bmp_offset, bmp_bytes;
u64 bitmap_size, pages;
int err;
@@ -469,25 +535,31 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
return -EINVAL;
}
- pages = DIV_ROUND_UP(length, pds_vfio->dirty.region_page_size);
+ region = pds_vfio_get_region(pds_vfio, iova);
+ if (!region) {
+ dev_err(dev, "vf%u: Failed to find region that contains iova 0x%lx length 0x%lx\n",
+ pds_vfio->vf_id, iova, length);
+ return -EINVAL;
+ }
+
+ pages = DIV_ROUND_UP(length, region->page_size);
bitmap_size =
round_up(pages, sizeof(u64) * BITS_PER_BYTE) / BITS_PER_BYTE;
dev_dbg(dev,
"vf%u: iova 0x%lx length %lu page_size %llu pages %llu bitmap_size %llu\n",
- pds_vfio->vf_id, iova, length, pds_vfio->dirty.region_page_size,
+ pds_vfio->vf_id, iova, length, region->page_size,
pages, bitmap_size);
- if (!length || ((iova - dirty->region_start + length) > dirty->region_size)) {
+ if (!length || ((iova - region->start + length) > region->size)) {
dev_err(dev, "Invalid iova 0x%lx and/or length 0x%lx to sync\n",
iova, length);
return -EINVAL;
}
/* bitmap is modified in 64 bit chunks */
- bmp_bytes = ALIGN(DIV_ROUND_UP(length / dirty->region_page_size,
- sizeof(u64)),
- sizeof(u64));
+ bmp_bytes = ALIGN(DIV_ROUND_UP(length / region->page_size,
+ sizeof(u64)), sizeof(u64));
if (bmp_bytes != bitmap_size) {
dev_err(dev,
"Calculated bitmap bytes %llu not equal to bitmap size %llu\n",
@@ -495,23 +567,30 @@ static int pds_vfio_dirty_sync(struct pds_vfio_pci_device *pds_vfio,
return -EINVAL;
}
- bmp_offset = DIV_ROUND_UP((iova - dirty->region_start) /
- dirty->region_page_size, sizeof(u64));
+ if (bmp_bytes > region->bmp_bytes) {
+ dev_err(dev,
+ "Calculated bitmap bytes %llu larger than region's cached bmp_bytes %llu\n",
+ bmp_bytes, region->bmp_bytes);
+ return -EINVAL;
+ }
+
+ bmp_offset = DIV_ROUND_UP((iova - region->start) /
+ region->page_size, sizeof(u64));
dev_dbg(dev,
"Syncing dirty bitmap, iova 0x%lx length 0x%lx, bmp_offset %llu bmp_bytes %llu\n",
iova, length, bmp_offset, bmp_bytes);
- err = pds_vfio_dirty_read_seq(pds_vfio, bmp_offset, bmp_bytes);
+ err = pds_vfio_dirty_read_seq(pds_vfio, region, bmp_offset, bmp_bytes);
if (err)
return err;
- err = pds_vfio_dirty_process_bitmaps(pds_vfio, dirty_bitmap, bmp_offset,
- bmp_bytes);
+ err = pds_vfio_dirty_process_bitmaps(pds_vfio, region, dirty_bitmap,
+ bmp_offset, bmp_bytes);
if (err)
return err;
- err = pds_vfio_dirty_write_ack(pds_vfio, bmp_offset, bmp_bytes);
+ err = pds_vfio_dirty_write_ack(pds_vfio, region, bmp_offset, bmp_bytes);
if (err)
return err;
diff --git a/drivers/vfio/pci/pds/dirty.h b/drivers/vfio/pci/pds/dirty.h
index f78da25d7..c8e23018b 100644
--- a/drivers/vfio/pci/pds/dirty.h
+++ b/drivers/vfio/pci/pds/dirty.h
@@ -4,20 +4,22 @@
#ifndef _DIRTY_H_
#define _DIRTY_H_
-struct pds_vfio_bmp_info {
- unsigned long *bmp;
- u32 bmp_bytes;
+struct pds_vfio_region {
+ unsigned long *host_seq;
+ unsigned long *host_ack;
+ u64 bmp_bytes;
+ u64 size;
+ u64 start;
+ u64 page_size;
struct pds_lm_sg_elem *sgl;
dma_addr_t sgl_addr;
+ u32 dev_bmp_offset_start_byte;
u16 num_sge;
};
struct pds_vfio_dirty {
- struct pds_vfio_bmp_info host_seq;
- struct pds_vfio_bmp_info host_ack;
- u64 region_size;
- u64 region_start;
- u64 region_page_size;
+ struct pds_vfio_region *regions;
+ u8 num_regions;
bool is_enabled;
};
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index e27de61ac..07fea08ea 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -38,7 +38,7 @@
#define vfio_iowrite8 iowrite8
#define VFIO_IOWRITE(size) \
-static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \
+int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev, \
bool test_mem, u##size val, void __iomem *io) \
{ \
if (test_mem) { \
@@ -55,7 +55,8 @@ static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \
up_read(&vdev->memory_lock); \
\
return 0; \
-}
+} \
+EXPORT_SYMBOL_GPL(vfio_pci_core_iowrite##size);
VFIO_IOWRITE(8)
VFIO_IOWRITE(16)
@@ -65,7 +66,7 @@ VFIO_IOWRITE(64)
#endif
#define VFIO_IOREAD(size) \
-static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \
+int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \
bool test_mem, u##size *val, void __iomem *io) \
{ \
if (test_mem) { \
@@ -82,7 +83,8 @@ static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \
up_read(&vdev->memory_lock); \
\
return 0; \
-}
+} \
+EXPORT_SYMBOL_GPL(vfio_pci_core_ioread##size);
VFIO_IOREAD(8)
VFIO_IOREAD(16)
@@ -119,13 +121,13 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
if (copy_from_user(&val, buf, 4))
return -EFAULT;
- ret = vfio_pci_iowrite32(vdev, test_mem,
- val, io + off);
+ ret = vfio_pci_core_iowrite32(vdev, test_mem,
+ val, io + off);
if (ret)
return ret;
} else {
- ret = vfio_pci_ioread32(vdev, test_mem,
- &val, io + off);
+ ret = vfio_pci_core_ioread32(vdev, test_mem,
+ &val, io + off);
if (ret)
return ret;
@@ -141,13 +143,13 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
if (copy_from_user(&val, buf, 2))
return -EFAULT;
- ret = vfio_pci_iowrite16(vdev, test_mem,
- val, io + off);
+ ret = vfio_pci_core_iowrite16(vdev, test_mem,
+ val, io + off);
if (ret)
return ret;
} else {
- ret = vfio_pci_ioread16(vdev, test_mem,
- &val, io + off);
+ ret = vfio_pci_core_ioread16(vdev, test_mem,
+ &val, io + off);
if (ret)
return ret;
@@ -163,13 +165,13 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
if (copy_from_user(&val, buf, 1))
return -EFAULT;
- ret = vfio_pci_iowrite8(vdev, test_mem,
- val, io + off);
+ ret = vfio_pci_core_iowrite8(vdev, test_mem,
+ val, io + off);
if (ret)
return ret;
} else {
- ret = vfio_pci_ioread8(vdev, test_mem,
- &val, io + off);
+ ret = vfio_pci_core_ioread8(vdev, test_mem,
+ &val, io + off);
if (ret)
return ret;
@@ -200,7 +202,7 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
return done;
}
-static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
+int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
{
struct pci_dev *pdev = vdev->pdev;
int ret;
@@ -223,6 +225,7 @@ static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
return 0;
}
+EXPORT_SYMBOL_GPL(vfio_pci_core_setup_barmap);
ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
size_t count, loff_t *ppos, bool iswrite)
@@ -262,7 +265,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
}
x_end = end;
} else {
- int ret = vfio_pci_setup_barmap(vdev, bar);
+ int ret = vfio_pci_core_setup_barmap(vdev, bar);
if (ret) {
done = ret;
goto out;
@@ -363,21 +366,21 @@ static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
{
switch (ioeventfd->count) {
case 1:
- vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
- ioeventfd->data, ioeventfd->addr);
+ vfio_pci_core_iowrite8(ioeventfd->vdev, test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
case 2:
- vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
- ioeventfd->data, ioeventfd->addr);
+ vfio_pci_core_iowrite16(ioeventfd->vdev, test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
case 4:
- vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
- ioeventfd->data, ioeventfd->addr);
+ vfio_pci_core_iowrite32(ioeventfd->vdev, test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
#ifdef iowrite64
case 8:
- vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
- ioeventfd->data, ioeventfd->addr);
+ vfio_pci_core_iowrite64(ioeventfd->vdev, test_mem,
+ ioeventfd->data, ioeventfd->addr);
break;
#endif
}
@@ -438,7 +441,7 @@ int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
return -EINVAL;
#endif
- ret = vfio_pci_setup_barmap(vdev, bar);
+ ret = vfio_pci_core_setup_barmap(vdev, bar);
if (ret)
return ret;
diff --git a/drivers/vfio/pci/virtio/Kconfig b/drivers/vfio/pci/virtio/Kconfig
new file mode 100644
index 000000000..bd80eca4a
--- /dev/null
+++ b/drivers/vfio/pci/virtio/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIRTIO_VFIO_PCI
+ tristate "VFIO support for VIRTIO NET PCI devices"
+ depends on VIRTIO_PCI && VIRTIO_PCI_ADMIN_LEGACY
+ select VFIO_PCI_CORE
+ help
+ This provides support for exposing VIRTIO NET VF devices which support
+ legacy IO access, using the VFIO framework that can work with a legacy
+ virtio driver in the guest.
+ Based on PCIe spec, VFs do not support I/O Space.
+ As of that this driver emulates I/O BAR in software to let a VF be
+ seen as a transitional device by its users and let it work with
+ a legacy driver.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/virtio/Makefile b/drivers/vfio/pci/virtio/Makefile
new file mode 100644
index 000000000..7171105ba
--- /dev/null
+++ b/drivers/vfio/pci/virtio/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIRTIO_VFIO_PCI) += virtio-vfio-pci.o
+virtio-vfio-pci-y := main.o
diff --git a/drivers/vfio/pci/virtio/main.c b/drivers/vfio/pci/virtio/main.c
new file mode 100644
index 000000000..d5af68383
--- /dev/null
+++ b/drivers/vfio/pci/virtio/main.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/vfio_pci_core.h>
+#include <linux/virtio_pci.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_pci_admin.h>
+
+struct virtiovf_pci_core_device {
+ struct vfio_pci_core_device core_device;
+ u8 *bar0_virtual_buf;
+ /* synchronize access to the virtual buf */
+ struct mutex bar_mutex;
+ void __iomem *notify_addr;
+ u64 notify_offset;
+ __le32 pci_base_addr_0;
+ __le16 pci_cmd;
+ u8 bar0_virtual_buf_size;
+ u8 notify_bar;
+};
+
+static int
+virtiovf_issue_legacy_rw_cmd(struct virtiovf_pci_core_device *virtvdev,
+ loff_t pos, char __user *buf,
+ size_t count, bool read)
+{
+ bool msix_enabled =
+ (virtvdev->core_device.irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
+ struct pci_dev *pdev = virtvdev->core_device.pdev;
+ u8 *bar0_buf = virtvdev->bar0_virtual_buf;
+ bool common;
+ u8 offset;
+ int ret;
+
+ common = pos < VIRTIO_PCI_CONFIG_OFF(msix_enabled);
+ /* offset within the relevant configuration area */
+ offset = common ? pos : pos - VIRTIO_PCI_CONFIG_OFF(msix_enabled);
+ mutex_lock(&virtvdev->bar_mutex);
+ if (read) {
+ if (common)
+ ret = virtio_pci_admin_legacy_common_io_read(pdev, offset,
+ count, bar0_buf + pos);
+ else
+ ret = virtio_pci_admin_legacy_device_io_read(pdev, offset,
+ count, bar0_buf + pos);
+ if (ret)
+ goto out;
+ if (copy_to_user(buf, bar0_buf + pos, count))
+ ret = -EFAULT;
+ } else {
+ if (copy_from_user(bar0_buf + pos, buf, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (common)
+ ret = virtio_pci_admin_legacy_common_io_write(pdev, offset,
+ count, bar0_buf + pos);
+ else
+ ret = virtio_pci_admin_legacy_device_io_write(pdev, offset,
+ count, bar0_buf + pos);
+ }
+out:
+ mutex_unlock(&virtvdev->bar_mutex);
+ return ret;
+}
+
+static int
+virtiovf_pci_bar0_rw(struct virtiovf_pci_core_device *virtvdev,
+ loff_t pos, char __user *buf,
+ size_t count, bool read)
+{
+ struct vfio_pci_core_device *core_device = &virtvdev->core_device;
+ struct pci_dev *pdev = core_device->pdev;
+ u16 queue_notify;
+ int ret;
+
+ if (!(le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO))
+ return -EIO;
+
+ if (pos + count > virtvdev->bar0_virtual_buf_size)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret) {
+ pci_info_ratelimited(pdev, "runtime resume failed %d\n", ret);
+ return -EIO;
+ }
+
+ switch (pos) {
+ case VIRTIO_PCI_QUEUE_NOTIFY:
+ if (count != sizeof(queue_notify)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (read) {
+ ret = vfio_pci_core_ioread16(core_device, true, &queue_notify,
+ virtvdev->notify_addr);
+ if (ret)
+ goto end;
+ if (copy_to_user(buf, &queue_notify,
+ sizeof(queue_notify))) {
+ ret = -EFAULT;
+ goto end;
+ }
+ } else {
+ if (copy_from_user(&queue_notify, buf, count)) {
+ ret = -EFAULT;
+ goto end;
+ }
+ ret = vfio_pci_core_iowrite16(core_device, true, queue_notify,
+ virtvdev->notify_addr);
+ }
+ break;
+ default:
+ ret = virtiovf_issue_legacy_rw_cmd(virtvdev, pos, buf, count,
+ read);
+ }
+
+end:
+ pm_runtime_put(&pdev->dev);
+ return ret ? ret : count;
+}
+
+static bool range_intersect_range(loff_t range1_start, size_t count1,
+ loff_t range2_start, size_t count2,
+ loff_t *start_offset,
+ size_t *intersect_count,
+ size_t *register_offset)
+{
+ if (range1_start <= range2_start &&
+ range1_start + count1 > range2_start) {
+ *start_offset = range2_start - range1_start;
+ *intersect_count = min_t(size_t, count2,
+ range1_start + count1 - range2_start);
+ *register_offset = 0;
+ return true;
+ }
+
+ if (range1_start > range2_start &&
+ range1_start < range2_start + count2) {
+ *start_offset = 0;
+ *intersect_count = min_t(size_t, count1,
+ range2_start + count2 - range1_start);
+ *register_offset = range1_start - range2_start;
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t virtiovf_pci_read_config(struct vfio_device *core_vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct virtiovf_pci_core_device *virtvdev = container_of(
+ core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ size_t register_offset;
+ loff_t copy_offset;
+ size_t copy_count;
+ __le32 val32;
+ __le16 val16;
+ u8 val8;
+ int ret;
+
+ ret = vfio_pci_core_read(core_vdev, buf, count, ppos);
+ if (ret < 0)
+ return ret;
+
+ if (range_intersect_range(pos, count, PCI_DEVICE_ID, sizeof(val16),
+ &copy_offset, &copy_count, &register_offset)) {
+ val16 = cpu_to_le16(VIRTIO_TRANS_ID_NET);
+ if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset, copy_count))
+ return -EFAULT;
+ }
+
+ if ((le16_to_cpu(virtvdev->pci_cmd) & PCI_COMMAND_IO) &&
+ range_intersect_range(pos, count, PCI_COMMAND, sizeof(val16),
+ &copy_offset, &copy_count, &register_offset)) {
+ if (copy_from_user((void *)&val16 + register_offset, buf + copy_offset,
+ copy_count))
+ return -EFAULT;
+ val16 |= cpu_to_le16(PCI_COMMAND_IO);
+ if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
+ copy_count))
+ return -EFAULT;
+ }
+
+ if (range_intersect_range(pos, count, PCI_REVISION_ID, sizeof(val8),
+ &copy_offset, &copy_count, &register_offset)) {
+ /* Transional needs to have revision 0 */
+ val8 = 0;
+ if (copy_to_user(buf + copy_offset, &val8, copy_count))
+ return -EFAULT;
+ }
+
+ if (range_intersect_range(pos, count, PCI_BASE_ADDRESS_0, sizeof(val32),
+ &copy_offset, &copy_count, &register_offset)) {
+ u32 bar_mask = ~(virtvdev->bar0_virtual_buf_size - 1);
+ u32 pci_base_addr_0 = le32_to_cpu(virtvdev->pci_base_addr_0);
+
+ val32 = cpu_to_le32((pci_base_addr_0 & bar_mask) | PCI_BASE_ADDRESS_SPACE_IO);
+ if (copy_to_user(buf + copy_offset, (void *)&val32 + register_offset, copy_count))
+ return -EFAULT;
+ }
+
+ if (range_intersect_range(pos, count, PCI_SUBSYSTEM_ID, sizeof(val16),
+ &copy_offset, &copy_count, &register_offset)) {
+ /*
+ * Transitional devices use the PCI subsystem device id as
+ * virtio device id, same as legacy driver always did.
+ */
+ val16 = cpu_to_le16(VIRTIO_ID_NET);
+ if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
+ copy_count))
+ return -EFAULT;
+ }
+
+ if (range_intersect_range(pos, count, PCI_SUBSYSTEM_VENDOR_ID, sizeof(val16),
+ &copy_offset, &copy_count, &register_offset)) {
+ val16 = cpu_to_le16(PCI_VENDOR_ID_REDHAT_QUMRANET);
+ if (copy_to_user(buf + copy_offset, (void *)&val16 + register_offset,
+ copy_count))
+ return -EFAULT;
+ }
+
+ return count;
+}
+
+static ssize_t
+virtiovf_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct virtiovf_pci_core_device *virtvdev = container_of(
+ core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (!count)
+ return 0;
+
+ if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+ return virtiovf_pci_read_config(core_vdev, buf, count, ppos);
+
+ if (index == VFIO_PCI_BAR0_REGION_INDEX)
+ return virtiovf_pci_bar0_rw(virtvdev, pos, buf, count, true);
+
+ return vfio_pci_core_read(core_vdev, buf, count, ppos);
+}
+
+static ssize_t virtiovf_pci_write_config(struct vfio_device *core_vdev,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct virtiovf_pci_core_device *virtvdev = container_of(
+ core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ size_t register_offset;
+ loff_t copy_offset;
+ size_t copy_count;
+
+ if (range_intersect_range(pos, count, PCI_COMMAND, sizeof(virtvdev->pci_cmd),
+ &copy_offset, &copy_count,
+ &register_offset)) {
+ if (copy_from_user((void *)&virtvdev->pci_cmd + register_offset,
+ buf + copy_offset,
+ copy_count))
+ return -EFAULT;
+ }
+
+ if (range_intersect_range(pos, count, PCI_BASE_ADDRESS_0,
+ sizeof(virtvdev->pci_base_addr_0),
+ &copy_offset, &copy_count,
+ &register_offset)) {
+ if (copy_from_user((void *)&virtvdev->pci_base_addr_0 + register_offset,
+ buf + copy_offset,
+ copy_count))
+ return -EFAULT;
+ }
+
+ return vfio_pci_core_write(core_vdev, buf, count, ppos);
+}
+
+static ssize_t
+virtiovf_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct virtiovf_pci_core_device *virtvdev = container_of(
+ core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (!count)
+ return 0;
+
+ if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+ return virtiovf_pci_write_config(core_vdev, buf, count, ppos);
+
+ if (index == VFIO_PCI_BAR0_REGION_INDEX)
+ return virtiovf_pci_bar0_rw(virtvdev, pos, (char __user *)buf, count, false);
+
+ return vfio_pci_core_write(core_vdev, buf, count, ppos);
+}
+
+static int
+virtiovf_pci_ioctl_get_region_info(struct vfio_device *core_vdev,
+ unsigned int cmd, unsigned long arg)
+{
+ struct virtiovf_pci_core_device *virtvdev = container_of(
+ core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+ unsigned long minsz = offsetofend(struct vfio_region_info, offset);
+ void __user *uarg = (void __user *)arg;
+ struct vfio_region_info info = {};
+
+ if (copy_from_user(&info, uarg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = virtvdev->bar0_virtual_buf_size;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ return copy_to_user(uarg, &info, minsz) ? -EFAULT : 0;
+ default:
+ return vfio_pci_core_ioctl(core_vdev, cmd, arg);
+ }
+}
+
+static long
+virtiovf_vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case VFIO_DEVICE_GET_REGION_INFO:
+ return virtiovf_pci_ioctl_get_region_info(core_vdev, cmd, arg);
+ default:
+ return vfio_pci_core_ioctl(core_vdev, cmd, arg);
+ }
+}
+
+static int
+virtiovf_set_notify_addr(struct virtiovf_pci_core_device *virtvdev)
+{
+ struct vfio_pci_core_device *core_device = &virtvdev->core_device;
+ int ret;
+
+ /*
+ * Setup the BAR where the 'notify' exists to be used by vfio as well
+ * This will let us mmap it only once and use it when needed.
+ */
+ ret = vfio_pci_core_setup_barmap(core_device,
+ virtvdev->notify_bar);
+ if (ret)
+ return ret;
+
+ virtvdev->notify_addr = core_device->barmap[virtvdev->notify_bar] +
+ virtvdev->notify_offset;
+ return 0;
+}
+
+static int virtiovf_pci_open_device(struct vfio_device *core_vdev)
+{
+ struct virtiovf_pci_core_device *virtvdev = container_of(
+ core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+ struct vfio_pci_core_device *vdev = &virtvdev->core_device;
+ int ret;
+
+ ret = vfio_pci_core_enable(vdev);
+ if (ret)
+ return ret;
+
+ if (virtvdev->bar0_virtual_buf) {
+ /*
+ * Upon close_device() the vfio_pci_core_disable() is called
+ * and will close all the previous mmaps, so it seems that the
+ * valid life cycle for the 'notify' addr is per open/close.
+ */
+ ret = virtiovf_set_notify_addr(virtvdev);
+ if (ret) {
+ vfio_pci_core_disable(vdev);
+ return ret;
+ }
+ }
+
+ vfio_pci_core_finish_enable(vdev);
+ return 0;
+}
+
+static int virtiovf_get_device_config_size(unsigned short device)
+{
+ /* Network card */
+ return offsetofend(struct virtio_net_config, status);
+}
+
+static int virtiovf_read_notify_info(struct virtiovf_pci_core_device *virtvdev)
+{
+ u64 offset;
+ int ret;
+ u8 bar;
+
+ ret = virtio_pci_admin_legacy_io_notify_info(virtvdev->core_device.pdev,
+ VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM,
+ &bar, &offset);
+ if (ret)
+ return ret;
+
+ virtvdev->notify_bar = bar;
+ virtvdev->notify_offset = offset;
+ return 0;
+}
+
+static int virtiovf_pci_init_device(struct vfio_device *core_vdev)
+{
+ struct virtiovf_pci_core_device *virtvdev = container_of(
+ core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+ struct pci_dev *pdev;
+ int ret;
+
+ ret = vfio_pci_core_init_dev(core_vdev);
+ if (ret)
+ return ret;
+
+ pdev = virtvdev->core_device.pdev;
+ ret = virtiovf_read_notify_info(virtvdev);
+ if (ret)
+ return ret;
+
+ virtvdev->bar0_virtual_buf_size = VIRTIO_PCI_CONFIG_OFF(true) +
+ virtiovf_get_device_config_size(pdev->device);
+ BUILD_BUG_ON(!is_power_of_2(virtvdev->bar0_virtual_buf_size));
+ virtvdev->bar0_virtual_buf = kzalloc(virtvdev->bar0_virtual_buf_size,
+ GFP_KERNEL);
+ if (!virtvdev->bar0_virtual_buf)
+ return -ENOMEM;
+ mutex_init(&virtvdev->bar_mutex);
+ return 0;
+}
+
+static void virtiovf_pci_core_release_dev(struct vfio_device *core_vdev)
+{
+ struct virtiovf_pci_core_device *virtvdev = container_of(
+ core_vdev, struct virtiovf_pci_core_device, core_device.vdev);
+
+ kfree(virtvdev->bar0_virtual_buf);
+ vfio_pci_core_release_dev(core_vdev);
+}
+
+static const struct vfio_device_ops virtiovf_vfio_pci_tran_ops = {
+ .name = "virtio-vfio-pci-trans",
+ .init = virtiovf_pci_init_device,
+ .release = virtiovf_pci_core_release_dev,
+ .open_device = virtiovf_pci_open_device,
+ .close_device = vfio_pci_core_close_device,
+ .ioctl = virtiovf_vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = virtiovf_pci_core_read,
+ .write = virtiovf_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+ .bind_iommufd = vfio_iommufd_physical_bind,
+ .unbind_iommufd = vfio_iommufd_physical_unbind,
+ .attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
+};
+
+static const struct vfio_device_ops virtiovf_vfio_pci_ops = {
+ .name = "virtio-vfio-pci",
+ .init = vfio_pci_core_init_dev,
+ .release = vfio_pci_core_release_dev,
+ .open_device = virtiovf_pci_open_device,
+ .close_device = vfio_pci_core_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .device_feature = vfio_pci_core_ioctl_feature,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+ .bind_iommufd = vfio_iommufd_physical_bind,
+ .unbind_iommufd = vfio_iommufd_physical_unbind,
+ .attach_ioas = vfio_iommufd_physical_attach_ioas,
+ .detach_ioas = vfio_iommufd_physical_detach_ioas,
+};
+
+static bool virtiovf_bar0_exists(struct pci_dev *pdev)
+{
+ struct resource *res = pdev->resource;
+
+ return res->flags;
+}
+
+static int virtiovf_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const struct vfio_device_ops *ops = &virtiovf_vfio_pci_ops;
+ struct virtiovf_pci_core_device *virtvdev;
+ int ret;
+
+ if (pdev->is_virtfn && virtio_pci_admin_has_legacy_io(pdev) &&
+ !virtiovf_bar0_exists(pdev))
+ ops = &virtiovf_vfio_pci_tran_ops;
+
+ virtvdev = vfio_alloc_device(virtiovf_pci_core_device, core_device.vdev,
+ &pdev->dev, ops);
+ if (IS_ERR(virtvdev))
+ return PTR_ERR(virtvdev);
+
+ dev_set_drvdata(&pdev->dev, &virtvdev->core_device);
+ ret = vfio_pci_core_register_device(&virtvdev->core_device);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ vfio_put_device(&virtvdev->core_device.vdev);
+ return ret;
+}
+
+static void virtiovf_pci_remove(struct pci_dev *pdev)
+{
+ struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev);
+
+ vfio_pci_core_unregister_device(&virtvdev->core_device);
+ vfio_put_device(&virtvdev->core_device.vdev);
+}
+
+static const struct pci_device_id virtiovf_pci_table[] = {
+ /* Only virtio-net is supported/tested so far */
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1041) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, virtiovf_pci_table);
+
+static void virtiovf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+ struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev);
+
+ virtvdev->pci_cmd = 0;
+}
+
+static const struct pci_error_handlers virtiovf_err_handlers = {
+ .reset_done = virtiovf_pci_aer_reset_done,
+ .error_detected = vfio_pci_core_aer_err_detected,
+};
+
+static struct pci_driver virtiovf_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = virtiovf_pci_table,
+ .probe = virtiovf_pci_probe,
+ .remove = virtiovf_pci_remove,
+ .err_handler = &virtiovf_err_handlers,
+ .driver_managed_dma = true,
+};
+
+module_pci_driver(virtiovf_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yishai Hadas <yishaih@nvidia.com>");
+MODULE_DESCRIPTION(
+ "VIRTIO VFIO PCI - User Level meta-driver for VIRTIO NET devices");
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index 307e3f29b..bde84ad34 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -448,4 +448,18 @@ static inline void vfio_device_put_kvm(struct vfio_device *device)
}
#endif
+#ifdef CONFIG_VFIO_DEBUGFS
+void vfio_debugfs_create_root(void);
+void vfio_debugfs_remove_root(void);
+
+void vfio_device_debugfs_init(struct vfio_device *vdev);
+void vfio_device_debugfs_exit(struct vfio_device *vdev);
+#else
+static inline void vfio_debugfs_create_root(void) { }
+static inline void vfio_debugfs_remove_root(void) { }
+
+static inline void vfio_device_debugfs_init(struct vfio_device *vdev) { }
+static inline void vfio_device_debugfs_exit(struct vfio_device *vdev) { }
+#endif /* CONFIG_VFIO_DEBUGFS */
+
#endif
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index eacd6ec04..b2854d793 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1436,7 +1436,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
list_for_each_entry(d, &iommu->domain_list, next) {
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
npage << PAGE_SHIFT, prot | IOMMU_CACHE,
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (ret)
goto unwind;
@@ -1750,7 +1750,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
}
ret = iommu_map(domain->domain, iova, phys, size,
- dma->prot | IOMMU_CACHE, GFP_KERNEL);
+ dma->prot | IOMMU_CACHE,
+ GFP_KERNEL_ACCOUNT);
if (ret) {
if (!dma->iommu_mapped) {
vfio_unpin_pages_remote(dma, iova,
@@ -1845,7 +1846,8 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *
continue;
ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE,
+ GFP_KERNEL_ACCOUNT);
if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 8d4995ada..1cc93aac9 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -311,6 +311,7 @@ static int __vfio_register_dev(struct vfio_device *device,
refcount_set(&device->refcount, 1);
vfio_device_group_register(device);
+ vfio_device_debugfs_init(device);
return 0;
err_out:
@@ -378,6 +379,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
}
}
+ vfio_device_debugfs_exit(device);
/* Balances vfio_device_set_group in register path */
vfio_device_remove_group(device);
}
@@ -1676,6 +1678,7 @@ static int __init vfio_init(void)
if (ret)
goto err_alloc_dev_chrdev;
+ vfio_debugfs_create_root();
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return 0;
@@ -1691,6 +1694,7 @@ err_virqfd:
static void __exit vfio_cleanup(void)
{
+ vfio_debugfs_remove_root();
ida_destroy(&vfio.device_ida);
vfio_cdev_cleanup();
class_destroy(vfio.device_class);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 173beda74..bc4a51e46 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -59,6 +59,7 @@ struct vhost_vdpa {
int in_batch;
struct vdpa_iova_range range;
u32 batch_asid;
+ bool suspended;
};
static DEFINE_IDA(vhost_vdpa_ida);
@@ -232,6 +233,8 @@ static int _compat_vdpa_reset(struct vhost_vdpa *v)
struct vdpa_device *vdpa = v->vdpa;
u32 flags = 0;
+ v->suspended = false;
+
if (v->vdev.vqs) {
flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
VHOST_BACKEND_F_IOTLB_PERSIST) ?
@@ -590,11 +593,16 @@ static long vhost_vdpa_suspend(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ int ret;
if (!ops->suspend)
return -EOPNOTSUPP;
- return ops->suspend(vdpa);
+ ret = ops->suspend(vdpa);
+ if (!ret)
+ v->suspended = true;
+
+ return ret;
}
/* After a successful return of this ioctl the device resumes processing
@@ -605,11 +613,16 @@ static long vhost_vdpa_resume(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ int ret;
if (!ops->resume)
return -EOPNOTSUPP;
- return ops->resume(vdpa);
+ ret = ops->resume(vdpa);
+ if (!ret)
+ v->suspended = false;
+
+ return ret;
}
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
@@ -690,6 +703,9 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
switch (cmd) {
case VHOST_SET_VRING_ADDR:
+ if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
+ return -EINVAL;
+
if (ops->set_vq_address(vdpa, idx,
(u64)(uintptr_t)vq->desc,
(u64)(uintptr_t)vq->avail,
@@ -698,6 +714,9 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
+ if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
+ return -EINVAL;
+
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
@@ -968,7 +987,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
r = ops->set_map(vdpa, asid, iotlb);
} else {
r = iommu_map(v->domain, iova, pa, size,
- perm_to_iommu_flags(perm), GFP_KERNEL);
+ perm_to_iommu_flags(perm),
+ GFP_KERNEL_ACCOUNT);
}
if (r) {
vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 045f666b4..32686c79c 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2799,9 +2799,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
r = vhost_get_avail_idx(vq, &avail_idx);
if (unlikely(r))
return false;
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return false;
+ }
- return vq->avail_idx == vq->last_avail_idx;
+ return true;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
@@ -2838,9 +2848,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
&vq->avail->idx, r);
return false;
}
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return true;
+ }
- return vq->avail_idx != vq->last_avail_idx;
+ return false;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 51387b1ef..ea2d0d69b 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -235,13 +235,6 @@ config BACKLIGHT_HP700
If you have an HP Jornada 700 series,
say Y to include backlight control driver.
-config BACKLIGHT_CARILLO_RANCH
- tristate "Intel Carillo Ranch Backlight Driver"
- depends on LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578
- help
- If you have a Intel LE80578 (Carillo Ranch) say Y to enable the
- backlight driver.
-
config BACKLIGHT_PWM
tristate "Generic PWM based Backlight Driver"
depends on PWM
@@ -402,6 +395,17 @@ config BACKLIGHT_LP8788
help
This supports TI LP8788 backlight driver.
+config BACKLIGHT_MP3309C
+ tristate "Backlight Driver for MPS MP3309C"
+ depends on I2C && PWM
+ select REGMAP_I2C
+ help
+ This supports MPS MP3309C backlight WLED driver in both PWM and
+ analog/I2C dimming modes.
+
+ To compile this driver as a module, choose M here: the module will
+ be called mp3309c.
+
config BACKLIGHT_PANDORA
tristate "Backlight driver for Pandora console"
depends on TWL4030_CORE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index f72e1c3c5..06966cb20 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
obj-$(CONFIG_BACKLIGHT_AS3711) += as3711_bl.o
obj-$(CONFIG_BACKLIGHT_BD6107) += bd6107.o
-obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o
@@ -44,6 +43,7 @@ obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o
obj-$(CONFIG_BACKLIGHT_LV5207LP) += lv5207lp.o
obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
+obj-$(CONFIG_BACKLIGHT_MP3309C) += mp3309c.o
obj-$(CONFIG_BACKLIGHT_MT6370) += mt6370-backlight.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
deleted file mode 100644
index 781aeecc4..000000000
--- a/drivers/video/backlight/cr_bllcd.c
+++ /dev/null
@@ -1,264 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) Intel Corp. 2007.
- * All Rights Reserved.
- *
- * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
- * develop this driver.
- *
- * This file is part of the Carillo Ranch video subsystem driver.
- *
- * Authors:
- * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- * Alan Hourihane <alanh-at-tungstengraphics-dot-com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-#include <linux/lcd.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-/* The LVDS- and panel power controls sits on the
- * GPIO port of the ISA bridge.
- */
-
-#define CRVML_DEVICE_LPC 0x27B8
-#define CRVML_REG_GPIOBAR 0x48
-#define CRVML_REG_GPIOEN 0x4C
-#define CRVML_GPIOEN_BIT (1 << 4)
-#define CRVML_PANEL_PORT 0x38
-#define CRVML_LVDS_ON 0x00000001
-#define CRVML_PANEL_ON 0x00000002
-#define CRVML_BACKLIGHT_OFF 0x00000004
-
-/* The PLL Clock register sits on Host bridge */
-#define CRVML_DEVICE_MCH 0x5001
-#define CRVML_REG_MCHBAR 0x44
-#define CRVML_REG_MCHEN 0x54
-#define CRVML_MCHEN_BIT (1 << 28)
-#define CRVML_MCHMAP_SIZE 4096
-#define CRVML_REG_CLOCK 0xc3c
-#define CRVML_CLOCK_SHIFT 8
-#define CRVML_CLOCK_MASK 0x00000f00
-
-static struct pci_dev *lpc_dev;
-static u32 gpio_bar;
-
-struct cr_panel {
- struct backlight_device *cr_backlight_device;
- struct lcd_device *cr_lcd_device;
-};
-
-static int cr_backlight_set_intensity(struct backlight_device *bd)
-{
- u32 addr = gpio_bar + CRVML_PANEL_PORT;
- u32 cur = inl(addr);
-
- if (backlight_get_brightness(bd) == 0) {
- /* OFF */
- cur |= CRVML_BACKLIGHT_OFF;
- outl(cur, addr);
- } else {
- /* FULL ON */
- cur &= ~CRVML_BACKLIGHT_OFF;
- outl(cur, addr);
- }
-
- return 0;
-}
-
-static int cr_backlight_get_intensity(struct backlight_device *bd)
-{
- u32 addr = gpio_bar + CRVML_PANEL_PORT;
- u32 cur = inl(addr);
- u8 intensity;
-
- if (cur & CRVML_BACKLIGHT_OFF)
- intensity = 0;
- else
- intensity = 1;
-
- return intensity;
-}
-
-static const struct backlight_ops cr_backlight_ops = {
- .get_brightness = cr_backlight_get_intensity,
- .update_status = cr_backlight_set_intensity,
-};
-
-static void cr_panel_on(void)
-{
- u32 addr = gpio_bar + CRVML_PANEL_PORT;
- u32 cur = inl(addr);
-
- if (!(cur & CRVML_PANEL_ON)) {
- /* Make sure LVDS controller is down. */
- if (cur & 0x00000001) {
- cur &= ~CRVML_LVDS_ON;
- outl(cur, addr);
- }
- /* Power up Panel */
- schedule_timeout(HZ / 10);
- cur |= CRVML_PANEL_ON;
- outl(cur, addr);
- }
-
- /* Power up LVDS controller */
-
- if (!(cur & CRVML_LVDS_ON)) {
- schedule_timeout(HZ / 10);
- outl(cur | CRVML_LVDS_ON, addr);
- }
-}
-
-static void cr_panel_off(void)
-{
- u32 addr = gpio_bar + CRVML_PANEL_PORT;
- u32 cur = inl(addr);
-
- /* Power down LVDS controller first to avoid high currents */
- if (cur & CRVML_LVDS_ON) {
- cur &= ~CRVML_LVDS_ON;
- outl(cur, addr);
- }
- if (cur & CRVML_PANEL_ON) {
- schedule_timeout(HZ / 10);
- outl(cur & ~CRVML_PANEL_ON, addr);
- }
-}
-
-static int cr_lcd_set_power(struct lcd_device *ld, int power)
-{
- if (power == FB_BLANK_UNBLANK)
- cr_panel_on();
- if (power == FB_BLANK_POWERDOWN)
- cr_panel_off();
-
- return 0;
-}
-
-static struct lcd_ops cr_lcd_ops = {
- .set_power = cr_lcd_set_power,
-};
-
-static int cr_backlight_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- struct backlight_device *bdp;
- struct lcd_device *ldp;
- struct cr_panel *crp;
- u8 dev_en;
-
- lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
- CRVML_DEVICE_LPC, NULL);
- if (!lpc_dev) {
- pr_err("INTEL CARILLO RANCH LPC not found.\n");
- return -ENODEV;
- }
-
- pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en);
- if (!(dev_en & CRVML_GPIOEN_BIT)) {
- pr_err("Carillo Ranch GPIO device was not enabled.\n");
- pci_dev_put(lpc_dev);
- return -ENODEV;
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- bdp = devm_backlight_device_register(&pdev->dev, "cr-backlight",
- &pdev->dev, NULL, &cr_backlight_ops,
- &props);
- if (IS_ERR(bdp)) {
- pci_dev_put(lpc_dev);
- return PTR_ERR(bdp);
- }
-
- ldp = devm_lcd_device_register(&pdev->dev, "cr-lcd", &pdev->dev, NULL,
- &cr_lcd_ops);
- if (IS_ERR(ldp)) {
- pci_dev_put(lpc_dev);
- return PTR_ERR(ldp);
- }
-
- pci_read_config_dword(lpc_dev, CRVML_REG_GPIOBAR,
- &gpio_bar);
- gpio_bar &= ~0x3F;
-
- crp = devm_kzalloc(&pdev->dev, sizeof(*crp), GFP_KERNEL);
- if (!crp) {
- pci_dev_put(lpc_dev);
- return -ENOMEM;
- }
-
- crp->cr_backlight_device = bdp;
- crp->cr_lcd_device = ldp;
- crp->cr_backlight_device->props.power = FB_BLANK_UNBLANK;
- crp->cr_backlight_device->props.brightness = 0;
- cr_backlight_set_intensity(crp->cr_backlight_device);
- cr_lcd_set_power(crp->cr_lcd_device, FB_BLANK_UNBLANK);
-
- platform_set_drvdata(pdev, crp);
-
- return 0;
-}
-
-static void cr_backlight_remove(struct platform_device *pdev)
-{
- struct cr_panel *crp = platform_get_drvdata(pdev);
-
- crp->cr_backlight_device->props.power = FB_BLANK_POWERDOWN;
- crp->cr_backlight_device->props.brightness = 0;
- crp->cr_backlight_device->props.max_brightness = 0;
- cr_backlight_set_intensity(crp->cr_backlight_device);
- cr_lcd_set_power(crp->cr_lcd_device, FB_BLANK_POWERDOWN);
- pci_dev_put(lpc_dev);
-}
-
-static struct platform_driver cr_backlight_driver = {
- .probe = cr_backlight_probe,
- .remove_new = cr_backlight_remove,
- .driver = {
- .name = "cr_backlight",
- },
-};
-
-static struct platform_device *crp;
-
-static int __init cr_backlight_init(void)
-{
- int ret = platform_driver_register(&cr_backlight_driver);
-
- if (ret)
- return ret;
-
- crp = platform_device_register_simple("cr_backlight", -1, NULL, 0);
- if (IS_ERR(crp)) {
- platform_driver_unregister(&cr_backlight_driver);
- return PTR_ERR(crp);
- }
-
- pr_info("Carillo Ranch Backlight Driver Initialized.\n");
-
- return 0;
-}
-
-static void __exit cr_backlight_exit(void)
-{
- platform_device_unregister(crp);
- platform_driver_unregister(&cr_backlight_driver);
-}
-
-module_init(cr_backlight_init);
-module_exit(cr_backlight_exit);
-
-MODULE_AUTHOR("Tungsten Graphics Inc.");
-MODULE_DESCRIPTION("Carillo Ranch Backlight Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
index f76d2469d..bf18337ff 100644
--- a/drivers/video/backlight/hx8357.c
+++ b/drivers/video/backlight/hx8357.c
@@ -6,11 +6,11 @@
*/
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/lcd.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#define HX8357_NUM_IM_PINS 3
@@ -83,11 +83,10 @@
#define HX8369_SET_GAMMA_CURVE_RELATED 0xe0
struct hx8357_data {
- unsigned im_pins[HX8357_NUM_IM_PINS];
- unsigned reset;
+ struct gpio_descs *im_pins;
+ struct gpio_desc *reset;
struct spi_device *spi;
int state;
- bool use_im_pins;
};
static u8 hx8357_seq_power[] = {
@@ -321,11 +320,11 @@ static void hx8357_lcd_reset(struct lcd_device *lcdev)
struct hx8357_data *lcd = lcd_get_data(lcdev);
/* Reset the screen */
- gpio_set_value(lcd->reset, 1);
+ gpiod_set_value(lcd->reset, 0);
usleep_range(10000, 12000);
- gpio_set_value(lcd->reset, 0);
+ gpiod_set_value(lcd->reset, 1);
usleep_range(10000, 12000);
- gpio_set_value(lcd->reset, 1);
+ gpiod_set_value(lcd->reset, 0);
/* The controller needs 120ms to recover from reset */
msleep(120);
@@ -340,10 +339,10 @@ static int hx8357_lcd_init(struct lcd_device *lcdev)
* Set the interface selection pins to SPI mode, with three
* wires
*/
- if (lcd->use_im_pins) {
- gpio_set_value_cansleep(lcd->im_pins[0], 1);
- gpio_set_value_cansleep(lcd->im_pins[1], 0);
- gpio_set_value_cansleep(lcd->im_pins[2], 1);
+ if (lcd->im_pins) {
+ gpiod_set_value_cansleep(lcd->im_pins->desc[0], 1);
+ gpiod_set_value_cansleep(lcd->im_pins->desc[1], 0);
+ gpiod_set_value_cansleep(lcd->im_pins->desc[2], 1);
}
ret = hx8357_spi_write_array(lcdev, hx8357_seq_power,
@@ -580,6 +579,7 @@ MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
static int hx8357_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct lcd_device *lcdev;
struct hx8357_data *lcd;
const struct of_device_id *match;
@@ -601,48 +601,20 @@ static int hx8357_probe(struct spi_device *spi)
if (!match || !match->data)
return -EINVAL;
- lcd->reset = of_get_named_gpio(spi->dev.of_node, "gpios-reset", 0);
- if (!gpio_is_valid(lcd->reset)) {
- dev_err(&spi->dev, "Missing dt property: gpios-reset\n");
- return -EINVAL;
- }
+ lcd->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->reset))
+ return dev_err_probe(dev, PTR_ERR(lcd->reset), "failed to request reset GPIO\n");
+ gpiod_set_consumer_name(lcd->reset, "hx8357-reset");
- ret = devm_gpio_request_one(&spi->dev, lcd->reset,
- GPIOF_OUT_INIT_HIGH,
- "hx8357-reset");
- if (ret) {
- dev_err(&spi->dev,
- "failed to request gpio %d: %d\n",
- lcd->reset, ret);
- return -EINVAL;
- }
+ lcd->im_pins = devm_gpiod_get_array_optional(dev, "im", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->im_pins))
+ return dev_err_probe(dev, PTR_ERR(lcd->im_pins), "failed to request im GPIOs\n");
+ if (lcd->im_pins) {
+ if (lcd->im_pins->ndescs < HX8357_NUM_IM_PINS)
+ return dev_err_probe(dev, -EINVAL, "not enough im GPIOs\n");
- if (of_property_present(spi->dev.of_node, "im-gpios")) {
- lcd->use_im_pins = 1;
-
- for (i = 0; i < HX8357_NUM_IM_PINS; i++) {
- lcd->im_pins[i] = of_get_named_gpio(spi->dev.of_node,
- "im-gpios", i);
- if (lcd->im_pins[i] == -EPROBE_DEFER) {
- dev_info(&spi->dev, "GPIO requested is not here yet, deferring the probe\n");
- return -EPROBE_DEFER;
- }
- if (!gpio_is_valid(lcd->im_pins[i])) {
- dev_err(&spi->dev, "Missing dt property: im-gpios\n");
- return -EINVAL;
- }
-
- ret = devm_gpio_request_one(&spi->dev, lcd->im_pins[i],
- GPIOF_OUT_INIT_LOW,
- "im_pins");
- if (ret) {
- dev_err(&spi->dev, "failed to request gpio %d: %d\n",
- lcd->im_pins[i], ret);
- return -EINVAL;
- }
- }
- } else {
- lcd->use_im_pins = 0;
+ for (i = 0; i < HX8357_NUM_IM_PINS; i++)
+ gpiod_set_consumer_name(lcd->im_pins->desc[i], "im_pins");
}
lcdev = devm_lcd_device_register(&spi->dev, "mxsfb", &spi->dev, lcd,
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index e7b6bd827..c8e0e655d 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -81,7 +81,7 @@
#define START_RW_WRITE 0
#define START_RW_READ 1
-/**
+/*
* START_BYTE(id, rs, rw)
*
* Set the start byte according to the required operation.
@@ -100,7 +100,7 @@
#define START_BYTE(id, rs, rw) \
(0x70 | (((id) & 0x01) << 2) | (((rs) & 0x01) << 1) | ((rw) & 0x01))
-/**
+/*
* CHECK_FREQ_REG(spi_device s, spi_transfer x) - Check the frequency
* for the SPI transfer. According to the datasheet, the controller
* accept higher frequency for the GRAM transfer, but it requires
@@ -269,6 +269,10 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
spi_message_add_tail(&xfer_regindex, &msg);
ret = spi_sync(spi, &msg);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Error sending SPI message 0x%x", ret);
+ return ret;
+ }
spi_message_init(&msg);
tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_REG,
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 7115d7bb2..26ff4178c 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -180,7 +180,7 @@ static int lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
pchip->pwmd_state.enabled = pchip->pwmd_state.duty_cycle ? true : false;
- return pwm_apply_state(pchip->pwmd, &pchip->pwmd_state);
+ return pwm_apply_might_sleep(pchip->pwmd, &pchip->pwmd_state);
}
/* update and get brightness */
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index da1f124db..7075bfab5 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -234,7 +234,7 @@ static int lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
state.duty_cycle = div_u64(br * state.period, max_br);
state.enabled = state.duty_cycle;
- return pwm_apply_state(lp->pwm, &state);
+ return pwm_apply_might_sleep(lp->pwm, &state);
}
static int lp855x_bl_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c
new file mode 100644
index 000000000..34d71259f
--- /dev/null
+++ b/drivers/video/backlight/mp3309c.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for MPS MP3309C White LED driver with I2C interface
+ *
+ * This driver support both analog (by I2C commands) and PWM dimming control
+ * modes.
+ *
+ * Copyright (C) 2023 ASEM Srl
+ * Author: Flavio Suligoi <f.suligoi@asem.it>
+ *
+ * Based on pwm_bl.c
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+#define REG_I2C_0 0x00
+#define REG_I2C_1 0x01
+
+#define REG_I2C_0_EN 0x80
+#define REG_I2C_0_D0 0x40
+#define REG_I2C_0_D1 0x20
+#define REG_I2C_0_D2 0x10
+#define REG_I2C_0_D3 0x08
+#define REG_I2C_0_D4 0x04
+#define REG_I2C_0_RSRV1 0x02
+#define REG_I2C_0_RSRV2 0x01
+
+#define REG_I2C_1_RSRV1 0x80
+#define REG_I2C_1_DIMS 0x40
+#define REG_I2C_1_SYNC 0x20
+#define REG_I2C_1_OVP0 0x10
+#define REG_I2C_1_OVP1 0x08
+#define REG_I2C_1_VOS 0x04
+#define REG_I2C_1_LEDO 0x02
+#define REG_I2C_1_OTP 0x01
+
+#define ANALOG_I2C_NUM_LEVELS 32 /* 0..31 */
+#define ANALOG_I2C_REG_MASK 0x7c
+
+#define MP3309C_PWM_DEFAULT_NUM_LEVELS 256 /* 0..255 */
+
+enum mp3309c_status_value {
+ FIRST_POWER_ON,
+ BACKLIGHT_OFF,
+ BACKLIGHT_ON,
+};
+
+enum mp3309c_dimming_mode_value {
+ DIMMING_PWM,
+ DIMMING_ANALOG_I2C,
+};
+
+struct mp3309c_platform_data {
+ unsigned int max_brightness;
+ unsigned int default_brightness;
+ unsigned int *levels;
+ u8 dimming_mode;
+ u8 over_voltage_protection;
+ bool sync_mode;
+ u8 status;
+};
+
+struct mp3309c_chip {
+ struct device *dev;
+ struct mp3309c_platform_data *pdata;
+ struct backlight_device *bl;
+ struct gpio_desc *enable_gpio;
+ struct regmap *regmap;
+ struct pwm_device *pwmd;
+};
+
+static const struct regmap_config mp3309c_regmap = {
+ .name = "mp3309c_regmap",
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .max_register = REG_I2C_1,
+};
+
+static int mp3309c_enable_device(struct mp3309c_chip *chip)
+{
+ u8 reg_val;
+ int ret;
+
+ /* I2C register #0 - Device enable */
+ ret = regmap_update_bits(chip->regmap, REG_I2C_0, REG_I2C_0_EN,
+ REG_I2C_0_EN);
+ if (ret)
+ return ret;
+
+ /*
+ * I2C register #1 - Set working mode:
+ * - set one of the two dimming mode:
+ * - PWM dimming using an external PWM dimming signal
+ * - analog dimming using I2C commands
+ * - enable/disable synchronous mode
+ * - set overvoltage protection (OVP)
+ */
+ reg_val = 0x00;
+ if (chip->pdata->dimming_mode == DIMMING_PWM)
+ reg_val |= REG_I2C_1_DIMS;
+ if (chip->pdata->sync_mode)
+ reg_val |= REG_I2C_1_SYNC;
+ reg_val |= chip->pdata->over_voltage_protection;
+ ret = regmap_write(chip->regmap, REG_I2C_1, reg_val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mp3309c_bl_update_status(struct backlight_device *bl)
+{
+ struct mp3309c_chip *chip = bl_get_data(bl);
+ int brightness = backlight_get_brightness(bl);
+ struct pwm_state pwmstate;
+ unsigned int analog_val, bits_val;
+ int i, ret;
+
+ if (chip->pdata->dimming_mode == DIMMING_PWM) {
+ /*
+ * PWM control mode
+ */
+ pwm_get_state(chip->pwmd, &pwmstate);
+ pwm_set_relative_duty_cycle(&pwmstate,
+ chip->pdata->levels[brightness],
+ chip->pdata->levels[chip->pdata->max_brightness]);
+ pwmstate.enabled = true;
+ ret = pwm_apply_state(chip->pwmd, &pwmstate);
+ if (ret)
+ return ret;
+
+ switch (chip->pdata->status) {
+ case FIRST_POWER_ON:
+ case BACKLIGHT_OFF:
+ /*
+ * After 20ms of low pwm signal level, the chip turns
+ * off automatically. In this case, before enabling the
+ * chip again, we must wait about 10ms for pwm signal to
+ * stabilize.
+ */
+ if (brightness > 0) {
+ msleep(10);
+ mp3309c_enable_device(chip);
+ chip->pdata->status = BACKLIGHT_ON;
+ } else {
+ chip->pdata->status = BACKLIGHT_OFF;
+ }
+ break;
+ case BACKLIGHT_ON:
+ if (brightness == 0)
+ chip->pdata->status = BACKLIGHT_OFF;
+ break;
+ }
+ } else {
+ /*
+ * Analog (by I2C command) control mode
+ *
+ * The first time, before setting brightness, we must enable the
+ * device
+ */
+ if (chip->pdata->status == FIRST_POWER_ON)
+ mp3309c_enable_device(chip);
+
+ /*
+ * Dimming mode I2C command (fixed dimming range 0..31)
+ *
+ * The 5 bits of the dimming analog value D4..D0 is allocated
+ * in the I2C register #0, in the following way:
+ *
+ * +--+--+--+--+--+--+--+--+
+ * |EN|D0|D1|D2|D3|D4|XX|XX|
+ * +--+--+--+--+--+--+--+--+
+ */
+ analog_val = brightness;
+ bits_val = 0;
+ for (i = 0; i <= 5; i++)
+ bits_val += ((analog_val >> i) & 0x01) << (6 - i);
+ ret = regmap_update_bits(chip->regmap, REG_I2C_0,
+ ANALOG_I2C_REG_MASK, bits_val);
+ if (ret)
+ return ret;
+
+ if (brightness > 0)
+ chip->pdata->status = BACKLIGHT_ON;
+ else
+ chip->pdata->status = BACKLIGHT_OFF;
+ }
+
+ return 0;
+}
+
+static const struct backlight_ops mp3309c_bl_ops = {
+ .update_status = mp3309c_bl_update_status,
+};
+
+static int pm3309c_parse_dt_node(struct mp3309c_chip *chip,
+ struct mp3309c_platform_data *pdata)
+{
+ struct device_node *node = chip->dev->of_node;
+ struct property *prop_pwms;
+ struct property *prop_levels = NULL;
+ int length = 0;
+ int ret, i;
+ unsigned int num_levels, tmp_value;
+
+ if (!node) {
+ dev_err(chip->dev, "failed to get DT node\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Dimming mode: the MP3309C provides two dimming control mode:
+ *
+ * - PWM mode
+ * - Analog by I2C control mode (default)
+ *
+ * I2C control mode is assumed as default but, if the pwms property is
+ * found in the backlight node, the mode switches to PWM mode.
+ */
+ pdata->dimming_mode = DIMMING_ANALOG_I2C;
+ prop_pwms = of_find_property(node, "pwms", &length);
+ if (prop_pwms) {
+ chip->pwmd = devm_pwm_get(chip->dev, NULL);
+ if (IS_ERR(chip->pwmd))
+ return dev_err_probe(chip->dev, PTR_ERR(chip->pwmd),
+ "error getting pwm data\n");
+ pdata->dimming_mode = DIMMING_PWM;
+ pwm_apply_args(chip->pwmd);
+ }
+
+ /*
+ * In I2C control mode the dimming levels (0..31) are fixed by the
+ * hardware, while in PWM control mode they can be chosen by the user,
+ * to allow nonlinear mappings.
+ */
+ if (pdata->dimming_mode == DIMMING_ANALOG_I2C) {
+ /*
+ * Analog (by I2C commands) control mode: fixed 0..31 brightness
+ * levels
+ */
+ num_levels = ANALOG_I2C_NUM_LEVELS;
+
+ /* Enable GPIO used in I2C dimming mode only */
+ chip->enable_gpio = devm_gpiod_get(chip->dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->enable_gpio))
+ return dev_err_probe(chip->dev,
+ PTR_ERR(chip->enable_gpio),
+ "error getting enable gpio\n");
+ } else {
+ /*
+ * PWM control mode: check for brightness level in DT
+ */
+ prop_levels = of_find_property(node, "brightness-levels",
+ &length);
+ if (prop_levels) {
+ /* Read brightness levels from DT */
+ num_levels = length / sizeof(u32);
+ if (num_levels < 2)
+ return -EINVAL;
+ } else {
+ /* Use default brightness levels */
+ num_levels = MP3309C_PWM_DEFAULT_NUM_LEVELS;
+ }
+ }
+
+ /* Fill brightness levels array */
+ pdata->levels = devm_kcalloc(chip->dev, num_levels,
+ sizeof(*pdata->levels), GFP_KERNEL);
+ if (!pdata->levels)
+ return -ENOMEM;
+ if (prop_levels) {
+ ret = of_property_read_u32_array(node, "brightness-levels",
+ pdata->levels,
+ num_levels);
+ if (ret < 0)
+ return ret;
+ } else {
+ for (i = 0; i < num_levels; i++)
+ pdata->levels[i] = i;
+ }
+
+ pdata->max_brightness = num_levels - 1;
+
+ ret = of_property_read_u32(node, "default-brightness",
+ &pdata->default_brightness);
+ if (ret)
+ pdata->default_brightness = pdata->max_brightness;
+ if (pdata->default_brightness > pdata->max_brightness) {
+ dev_err(chip->dev,
+ "default brightness exceeds max brightness\n");
+ pdata->default_brightness = pdata->max_brightness;
+ }
+
+ /*
+ * Over-voltage protection (OVP)
+ *
+ * This (optional) property values are:
+ *
+ * - 13.5V
+ * - 24V
+ * - 35.5V (hardware default setting)
+ *
+ * If missing, the default value for OVP is 35.5V
+ */
+ pdata->over_voltage_protection = REG_I2C_1_OVP1;
+ if (!of_property_read_u32(node, "mps,overvoltage-protection-microvolt",
+ &tmp_value)) {
+ switch (tmp_value) {
+ case 13500000:
+ pdata->over_voltage_protection = 0x00;
+ break;
+ case 24000000:
+ pdata->over_voltage_protection = REG_I2C_1_OVP0;
+ break;
+ case 35500000:
+ pdata->over_voltage_protection = REG_I2C_1_OVP1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Synchronous (default) and non-synchronous mode */
+ pdata->sync_mode = true;
+ if (of_property_read_bool(node, "mps,no-sync-mode"))
+ pdata->sync_mode = false;
+
+ return 0;
+}
+
+static int mp3309c_probe(struct i2c_client *client)
+{
+ struct mp3309c_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct mp3309c_chip *chip;
+ struct backlight_properties props;
+ struct pwm_state pwmstate;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "failed to check i2c functionality\n");
+ return -EOPNOTSUPP;
+ }
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &client->dev;
+
+ chip->regmap = devm_regmap_init_i2c(client, &mp3309c_regmap);
+ if (IS_ERR(chip->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(chip->regmap),
+ "failed to allocate register map\n");
+
+ i2c_set_clientdata(client, chip);
+
+ if (!pdata) {
+ pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = pm3309c_parse_dt_node(chip, pdata);
+ if (ret)
+ return ret;
+ }
+ chip->pdata = pdata;
+
+ /* Backlight properties */
+ props.brightness = pdata->default_brightness;
+ props.max_brightness = pdata->max_brightness;
+ props.scale = BACKLIGHT_SCALE_LINEAR;
+ props.type = BACKLIGHT_RAW;
+ props.power = FB_BLANK_UNBLANK;
+ props.fb_blank = FB_BLANK_UNBLANK;
+ chip->bl = devm_backlight_device_register(chip->dev, "mp3309c",
+ chip->dev, chip,
+ &mp3309c_bl_ops, &props);
+ if (IS_ERR(chip->bl))
+ return dev_err_probe(chip->dev, PTR_ERR(chip->bl),
+ "error registering backlight device\n");
+
+ /* In PWM dimming mode, enable pwm device */
+ if (chip->pdata->dimming_mode == DIMMING_PWM) {
+ pwm_init_state(chip->pwmd, &pwmstate);
+ pwm_set_relative_duty_cycle(&pwmstate,
+ chip->pdata->default_brightness,
+ chip->pdata->max_brightness);
+ pwmstate.enabled = true;
+ ret = pwm_apply_state(chip->pwmd, &pwmstate);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "error setting pwm device\n");
+ }
+
+ chip->pdata->status = FIRST_POWER_ON;
+ backlight_update_status(chip->bl);
+
+ return 0;
+}
+
+static void mp3309c_remove(struct i2c_client *client)
+{
+ struct mp3309c_chip *chip = i2c_get_clientdata(client);
+ struct backlight_device *bl = chip->bl;
+
+ bl->props.power = FB_BLANK_POWERDOWN;
+ bl->props.brightness = 0;
+ backlight_update_status(chip->bl);
+}
+
+static const struct of_device_id mp3309c_match_table[] = {
+ { .compatible = "mps,mp3309c", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mp3309c_match_table);
+
+static const struct i2c_device_id mp3309c_id[] = {
+ { "mp3309c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mp3309c_id);
+
+static struct i2c_driver mp3309c_i2c_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = mp3309c_match_table,
+ },
+ .probe = mp3309c_probe,
+ .remove = mp3309c_remove,
+ .id_table = mp3309c_id,
+};
+
+module_i2c_driver(mp3309c_i2c_driver);
+
+MODULE_DESCRIPTION("Backlight Driver for MPS MP3309C");
+MODULE_AUTHOR("Flavio Suligoi <f.suligoi@asem.it>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 289bd9ce4..ffcebf6aa 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -103,7 +103,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
pwm_get_state(pb->pwm, &state);
state.duty_cycle = compute_duty_cycle(pb, brightness, &state);
state.enabled = true;
- pwm_apply_state(pb->pwm, &state);
+ pwm_apply_might_sleep(pb->pwm, &state);
pwm_backlight_power_on(pb);
} else {
@@ -120,7 +120,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
* inactive output.
*/
state.enabled = !pb->power_supply && !pb->enable_gpio;
- pwm_apply_state(pb->pwm, &state);
+ pwm_apply_might_sleep(pb->pwm, &state);
}
if (pb->notify_after)
@@ -461,10 +461,9 @@ static int pwm_backlight_probe(struct platform_device *pdev)
if (!data) {
ret = pwm_backlight_parse_dt(&pdev->dev, &defdata);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to find platform data\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to find platform data\n");
data = &defdata;
}
@@ -493,24 +492,27 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_ASIS);
if (IS_ERR(pb->enable_gpio)) {
- ret = PTR_ERR(pb->enable_gpio);
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pb->enable_gpio),
+ "failed to acquire enable GPIO\n");
goto err_alloc;
}
pb->power_supply = devm_regulator_get_optional(&pdev->dev, "power");
if (IS_ERR(pb->power_supply)) {
ret = PTR_ERR(pb->power_supply);
- if (ret == -ENODEV)
+ if (ret == -ENODEV) {
pb->power_supply = NULL;
- else
+ } else {
+ dev_err_probe(&pdev->dev, ret,
+ "failed to acquire power regulator\n");
goto err_alloc;
+ }
}
pb->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(pb->pwm)) {
- ret = PTR_ERR(pb->pwm);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "unable to request PWM\n");
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pb->pwm),
+ "unable to request PWM\n");
goto err_alloc;
}
@@ -528,10 +530,10 @@ static int pwm_backlight_probe(struct platform_device *pdev)
if (!state.period && (data->pwm_period_ns > 0))
state.period = data->pwm_period_ns;
- ret = pwm_apply_state(pb->pwm, &state);
+ ret = pwm_apply_might_sleep(pb->pwm, &state);
if (ret) {
- dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
- ret);
+ dev_err_probe(&pdev->dev, ret,
+ "failed to apply initial PWM state");
goto err_alloc;
}
@@ -568,8 +570,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
ret = pwm_backlight_brightness_default(&pdev->dev, data,
state.period);
if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to setup default brightness table\n");
+ dev_err_probe(&pdev->dev, ret,
+ "failed to setup default brightness table\n");
goto err_alloc;
}
@@ -597,8 +599,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb,
&pwm_backlight_ops, &props);
if (IS_ERR(bl)) {
- dev_err(&pdev->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(bl),
+ "failed to register backlight\n");
goto err_alloc;
}
@@ -633,7 +635,7 @@ static void pwm_backlight_remove(struct platform_device *pdev)
pwm_get_state(pb->pwm, &state);
state.duty_cycle = 0;
state.enabled = false;
- pwm_apply_state(pb->pwm, &state);
+ pwm_apply_might_sleep(pb->pwm, &state);
if (pb->exit)
pb->exit(&pdev->dev);
@@ -649,7 +651,7 @@ static void pwm_backlight_shutdown(struct platform_device *pdev)
pwm_get_state(pb->pwm, &state);
state.duty_cycle = 0;
state.enabled = false;
- pwm_apply_state(pb->pwm, &state);
+ pwm_apply_might_sleep(pb->pwm, &state);
}
#ifdef CONFIG_PM_SLEEP
@@ -673,7 +675,7 @@ static int pwm_backlight_suspend(struct device *dev)
pwm_get_state(pb->pwm, &state);
state.duty_cycle = 0;
state.enabled = false;
- pwm_apply_state(pb->pwm, &state);
+ pwm_apply_might_sleep(pb->pwm, &state);
if (pb->notify_after)
pb->notify_after(pb->dev, 0);
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 83c2d7329..bc31db6ef 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -7,7 +7,7 @@ menu "Console display driver support"
config VGA_CONSOLE
bool "VGA text console" if EXPERT || !X86
- depends on ALPHA || IA64 || X86 || \
+ depends on ALPHA || X86 || \
(ARM && ARCH_FOOTBRIDGE) || \
(MIPS && (MIPS_MALTA || SIBYTE_BCM112X || SIBYTE_SB1250 || SIBYTE_BCM1x80 || SNI_RM))
select APERTURE_HELPERS if (DRM || FB || VFIO_PCI_CORE)
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 53693c826..ba4ab33f6 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -116,24 +116,6 @@ config FB_PM2_FIFO_DISCONNECT
help
Support the Permedia2 FIFO disconnect feature.
-config FB_ARMCLCD
- tristate "ARM PrimeCell PL110 support"
- depends on ARM || ARM64 || COMPILE_TEST
- depends on FB && ARM_AMBA && HAS_IOMEM
- select FB_IOMEM_HELPERS
- select FB_MODE_HELPERS if OF
- select VIDEOMODE_HELPERS if OF
- select BACKLIGHT_CLASS_DEVICE if OF
- help
- This framebuffer device driver is for the ARM PrimeCell PL110
- Colour LCD controller. ARM PrimeCells provide the building
- blocks for System on a Chip devices.
-
- If you want to compile this as a module (=code which can be
- inserted into and removed from the running kernel), say M
- here and read <file:Documentation/kbuild/modules.rst>. The module
- will be called amba-clcd.
-
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && ARCH_ACORN
@@ -146,10 +128,8 @@ config FB_ACORN
config FB_CLPS711X
tristate "CLPS711X LCD support"
depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
+ select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select LCD_CLASS_DEVICE
select VIDEOMODE_HELPERS
help
@@ -180,10 +160,7 @@ config FB_IMX
config FB_CYBER2000
tristate "CyberPro 2000/2010/5000 support"
depends on FB && PCI && (BROKEN || !SPARC64)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB_IOMEM_FOPS
+ select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This enables support for the Integraphics CyberPro 20x0 and 5000
@@ -272,10 +249,7 @@ config FB_FM2
config FB_ARC
tristate "Arc Monochrome LCD board support"
depends on FB && (X86 || COMPILE_TEST)
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This enables support for the Arc Monochrome LCD board. The board
is based on the KS-108 lcd controller and is typically a matrix
@@ -527,6 +501,7 @@ config FB_SBUS_HELPERS
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
config FB_BW2
bool "BWtwo support"
@@ -547,6 +522,7 @@ config FB_CG6
depends on (FB = y) && (SPARC && FB_SBUS)
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
help
This is the frame buffer device driver for the CGsix (GX, TurboGX)
frame buffer.
@@ -556,6 +532,7 @@ config FB_FFB
depends on FB_SBUS && SPARC64
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_IOMEM_FOPS
help
This is the frame buffer device driver for the Creator, Creator3D,
and Elite3D graphics boards.
@@ -847,60 +824,6 @@ config FB_I810_I2C
If unsure, say Y.
-config FB_LE80578
- tristate "Intel LE80578 (Vermilion) support"
- depends on FB && PCI && X86
- select FB_IOMEM_HELPERS
- select FB_MODE_HELPERS
- select VIDEO_NOMODESET
- help
- This driver supports the LE80578 (Vermilion Range) chipset
-
-config FB_CARILLO_RANCH
- tristate "Intel Carillo Ranch support"
- depends on FB_LE80578 && FB && PCI && X86
- help
- This driver supports the LE80578 (Carillo Ranch) board
-
-config FB_INTEL
- tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support"
- depends on FB && PCI && X86 && AGP_INTEL && EXPERT
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB_IOMEM_FOPS
- select FB_MODE_HELPERS
- select BOOT_VESA_SUPPORT if FB_INTEL = y
- select VIDEO_NOMODESET
- depends on !DRM_I915
- help
- This driver supports the on-board graphics built in to the Intel
- 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
- Say Y if you have and plan to use such a board.
-
- To make FB_INTEL=Y work you need to say AGP_INTEL=y too.
-
- To compile this driver as a module, choose M here: the
- module will be called intelfb.
-
- For more information, please read <file:Documentation/fb/intelfb.rst>
-
-config FB_INTEL_DEBUG
- bool "Intel driver Debug Messages"
- depends on FB_INTEL
- help
- Say Y here if you want the Intel driver to output all sorts
- of debugging information to provide to the maintainer when
- something goes wrong.
-
-config FB_INTEL_I2C
- bool "DDC/I2C for Intel framebuffer support"
- depends on FB_INTEL
- select FB_DDC
- default y
- help
- Say Y here if you want DDC/I2C support for your on-board Intel graphics.
-
config FB_MATROX
tristate "Matrox acceleration"
depends on FB && PCI
@@ -1460,10 +1383,7 @@ config FB_AU1100
config FB_AU1200
bool "Au1200/Au1300 LCD Driver"
depends on (FB = y) && MIPS_ALCHEMY
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_DMAMEM_HELPERS
help
This is the framebuffer driver for the Au1200/Au1300 SOCs.
It can drive various panels and CRTs by passing in kernel cmd line
@@ -1475,6 +1395,7 @@ config FB_VT8500
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@@ -1487,6 +1408,7 @@ config FB_WM8505
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@@ -1637,12 +1559,9 @@ config FB_SH_MOBILE_LCDC
depends on FB && HAVE_CLK && HAS_IOMEM
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
depends on FB_DEVICE
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
select FB_BACKLIGHT
+ select FB_DEFERRED_IO
+ select FB_DMAMEM_HELPERS
help
Frame buffer driver for the on-chip SH-Mobile LCD controller.
@@ -1689,11 +1608,7 @@ config FB_SMSCUFX
tristate "SMSC UFX6000/7000 USB Framebuffer support"
depends on FB && USB
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This is a kernel framebuffer driver for SMSC UFX USB devices.
Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
@@ -1706,11 +1621,7 @@ config FB_UDL
depends on FB && USB
depends on FB_DEVICE
select FB_MODE_HELPERS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
- select FB_DEFERRED_IO
+ select FB_SYSMEM_HELPERS_DEFERRED
help
This is a kernel framebuffer driver for DisplayLink USB devices.
Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
@@ -1732,10 +1643,7 @@ config FB_IBM_GXT4500
config FB_PS3
tristate "PS3 GPU framebuffer driver"
depends on FB && PS3_PS3AV
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
help
Include support for the virtual frame buffer in the PS3 platform.
@@ -1800,10 +1708,7 @@ config FB_DA8XX
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
depends on FB
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
+ select FB_SYSMEM_HELPERS
help
This is a `virtual' frame buffer device. It operates on a chunk of
unswappable kernel memory instead of on the memory of a graphics
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 8e1522015..3eecd5126 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_FB_IMSTT) += imsttfb.o
obj-$(CONFIG_FB_FM2) += fm2fb.o
obj-$(CONFIG_FB_VT8623) += vt8623fb.o
obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
-obj-$(CONFIG_FB_LE80578) += vermilion/
obj-$(CONFIG_FB_S3) += s3fb.o
obj-$(CONFIG_FB_ARK) += arkfb.o
obj-$(CONFIG_FB_STI) += stifb.o
@@ -75,7 +74,6 @@ obj-$(CONFIG_FB_HIT) += hitfb.o
obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
-obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
obj-$(CONFIG_FB_68328) += 68328fb.o
obj-$(CONFIG_FB_GBE) += gbefb.o
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
deleted file mode 100644
index 0399db369..000000000
--- a/drivers/video/fbdev/amba-clcd.c
+++ /dev/null
@@ -1,984 +0,0 @@
-/*
- * linux/drivers/video/amba-clcd.c
- *
- * Copyright (C) 2001 ARM Limited, by David A Rusling
- * Updated to 2.5, Deep Blue Solutions Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * ARM PrimeCell PL110 Color LCD Controller
- */
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-#include <linux/backlight.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <video/display_timing.h>
-#include <video/of_display_timing.h>
-#include <video/videomode.h>
-
-#define to_clcd(info) container_of(info, struct clcd_fb, fb)
-
-/* This is limited to 16 characters when displayed by X startup */
-static const char *clcd_name = "CLCD FB";
-
-static inline void clcdfb_set_start(struct clcd_fb *fb)
-{
- unsigned long ustart = fb->fb.fix.smem_start;
- unsigned long lstart;
-
- ustart += fb->fb.var.yoffset * fb->fb.fix.line_length;
- lstart = ustart + fb->fb.var.yres * fb->fb.fix.line_length / 2;
-
- writel(ustart, fb->regs + CLCD_UBAS);
- writel(lstart, fb->regs + CLCD_LBAS);
-}
-
-static void clcdfb_disable(struct clcd_fb *fb)
-{
- u32 val;
-
- if (fb->board->disable)
- fb->board->disable(fb);
-
- if (fb->panel->backlight) {
- fb->panel->backlight->props.power = FB_BLANK_POWERDOWN;
- backlight_update_status(fb->panel->backlight);
- }
-
- val = readl(fb->regs + fb->off_cntl);
- if (val & CNTL_LCDPWR) {
- val &= ~CNTL_LCDPWR;
- writel(val, fb->regs + fb->off_cntl);
-
- msleep(20);
- }
- if (val & CNTL_LCDEN) {
- val &= ~CNTL_LCDEN;
- writel(val, fb->regs + fb->off_cntl);
- }
-
- /*
- * Disable CLCD clock source.
- */
- if (fb->clk_enabled) {
- fb->clk_enabled = false;
- clk_disable(fb->clk);
- }
-}
-
-static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
-{
- /*
- * Enable the CLCD clock source.
- */
- if (!fb->clk_enabled) {
- fb->clk_enabled = true;
- clk_enable(fb->clk);
- }
-
- /*
- * Bring up by first enabling..
- */
- cntl |= CNTL_LCDEN;
- writel(cntl, fb->regs + fb->off_cntl);
-
- msleep(20);
-
- /*
- * and now apply power.
- */
- cntl |= CNTL_LCDPWR;
- writel(cntl, fb->regs + fb->off_cntl);
-
- /*
- * Turn on backlight
- */
- if (fb->panel->backlight) {
- fb->panel->backlight->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(fb->panel->backlight);
- }
-
- /*
- * finally, enable the interface.
- */
- if (fb->board->enable)
- fb->board->enable(fb);
-}
-
-static int
-clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
-{
- u32 caps;
- int ret = 0;
-
- if (fb->panel->caps && fb->board->caps)
- caps = fb->panel->caps & fb->board->caps;
- else {
- /* Old way of specifying what can be used */
- caps = fb->panel->cntl & CNTL_BGR ?
- CLCD_CAP_BGR : CLCD_CAP_RGB;
- /* But mask out 444 modes as they weren't supported */
- caps &= ~CLCD_CAP_444;
- }
-
- /* Only TFT panels can do RGB888/BGR888 */
- if (!(fb->panel->cntl & CNTL_LCDTFT))
- caps &= ~CLCD_CAP_888;
-
- memset(&var->transp, 0, sizeof(var->transp));
-
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
-
- switch (var->bits_per_pixel) {
- case 1:
- case 2:
- case 4:
- case 8:
- /* If we can't do 5551, reject */
- caps &= CLCD_CAP_5551;
- if (!caps) {
- ret = -EINVAL;
- break;
- }
-
- var->red.length = var->bits_per_pixel;
- var->red.offset = 0;
- var->green.length = var->bits_per_pixel;
- var->green.offset = 0;
- var->blue.length = var->bits_per_pixel;
- var->blue.offset = 0;
- break;
-
- case 16:
- /* If we can't do 444, 5551 or 565, reject */
- if (!(caps & (CLCD_CAP_444 | CLCD_CAP_5551 | CLCD_CAP_565))) {
- ret = -EINVAL;
- break;
- }
-
- /*
- * Green length can be 4, 5 or 6 depending whether
- * we're operating in 444, 5551 or 565 mode.
- */
- if (var->green.length == 4 && caps & CLCD_CAP_444)
- caps &= CLCD_CAP_444;
- if (var->green.length == 5 && caps & CLCD_CAP_5551)
- caps &= CLCD_CAP_5551;
- else if (var->green.length == 6 && caps & CLCD_CAP_565)
- caps &= CLCD_CAP_565;
- else {
- /*
- * PL110 officially only supports RGB555,
- * but may be wired up to allow RGB565.
- */
- if (caps & CLCD_CAP_565) {
- var->green.length = 6;
- caps &= CLCD_CAP_565;
- } else if (caps & CLCD_CAP_5551) {
- var->green.length = 5;
- caps &= CLCD_CAP_5551;
- } else {
- var->green.length = 4;
- caps &= CLCD_CAP_444;
- }
- }
-
- if (var->green.length >= 5) {
- var->red.length = 5;
- var->blue.length = 5;
- } else {
- var->red.length = 4;
- var->blue.length = 4;
- }
- break;
- case 32:
- /* If we can't do 888, reject */
- caps &= CLCD_CAP_888;
- if (!caps) {
- ret = -EINVAL;
- break;
- }
-
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- /*
- * >= 16bpp displays have separate colour component bitfields
- * encoded in the pixel data. Calculate their position from
- * the bitfield length defined above.
- */
- if (ret == 0 && var->bits_per_pixel >= 16) {
- bool bgr, rgb;
-
- bgr = caps & CLCD_CAP_BGR && var->blue.offset == 0;
- rgb = caps & CLCD_CAP_RGB && var->red.offset == 0;
-
- if (!bgr && !rgb)
- /*
- * The requested format was not possible, try just
- * our capabilities. One of BGR or RGB must be
- * supported.
- */
- bgr = caps & CLCD_CAP_BGR;
-
- if (bgr) {
- var->blue.offset = 0;
- var->green.offset = var->blue.offset + var->blue.length;
- var->red.offset = var->green.offset + var->green.length;
- } else {
- var->red.offset = 0;
- var->green.offset = var->red.offset + var->red.length;
- var->blue.offset = var->green.offset + var->green.length;
- }
- }
-
- return ret;
-}
-
-static int clcdfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct clcd_fb *fb = to_clcd(info);
- int ret = -EINVAL;
-
- if (fb->board->check)
- ret = fb->board->check(fb, var);
-
- if (ret == 0 &&
- var->xres_virtual * var->bits_per_pixel / 8 *
- var->yres_virtual > fb->fb.fix.smem_len)
- ret = -EINVAL;
-
- if (ret == 0)
- ret = clcdfb_set_bitfields(fb, var);
-
- return ret;
-}
-
-static int clcdfb_set_par(struct fb_info *info)
-{
- struct clcd_fb *fb = to_clcd(info);
- struct clcd_regs regs;
-
- fb->fb.fix.line_length = fb->fb.var.xres_virtual *
- fb->fb.var.bits_per_pixel / 8;
-
- if (fb->fb.var.bits_per_pixel <= 8)
- fb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
- else
- fb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
-
- fb->board->decode(fb, &regs);
-
- clcdfb_disable(fb);
-
- writel(regs.tim0, fb->regs + CLCD_TIM0);
- writel(regs.tim1, fb->regs + CLCD_TIM1);
- writel(regs.tim2, fb->regs + CLCD_TIM2);
- writel(regs.tim3, fb->regs + CLCD_TIM3);
-
- clcdfb_set_start(fb);
-
- clk_set_rate(fb->clk, (1000000000 / regs.pixclock) * 1000);
-
- fb->clcd_cntl = regs.cntl;
-
- clcdfb_enable(fb, regs.cntl);
-
-#ifdef DEBUG
- printk(KERN_INFO
- "CLCD: Registers set to\n"
- " %08x %08x %08x %08x\n"
- " %08x %08x %08x %08x\n",
- readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1),
- readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3),
- readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS),
- readl(fb->regs + fb->off_ienb), readl(fb->regs + fb->off_cntl));
-#endif
-
- return 0;
-}
-
-static inline u32 convert_bitfield(int val, struct fb_bitfield *bf)
-{
- unsigned int mask = (1 << bf->length) - 1;
-
- return (val >> (16 - bf->length) & mask) << bf->offset;
-}
-
-/*
- * Set a single color register. The values supplied have a 16 bit
- * magnitude. Return != 0 for invalid regno.
- */
-static int
-clcdfb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
- unsigned int blue, unsigned int transp, struct fb_info *info)
-{
- struct clcd_fb *fb = to_clcd(info);
-
- if (regno < 16)
- fb->cmap[regno] = convert_bitfield(transp, &fb->fb.var.transp) |
- convert_bitfield(blue, &fb->fb.var.blue) |
- convert_bitfield(green, &fb->fb.var.green) |
- convert_bitfield(red, &fb->fb.var.red);
-
- if (fb->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) {
- int hw_reg = CLCD_PALETTE + ((regno * 2) & ~3);
- u32 val, mask, newval;
-
- newval = (red >> 11) & 0x001f;
- newval |= (green >> 6) & 0x03e0;
- newval |= (blue >> 1) & 0x7c00;
-
- /*
- * 3.2.11: if we're configured for big endian
- * byte order, the palette entries are swapped.
- */
- if (fb->clcd_cntl & CNTL_BEBO)
- regno ^= 1;
-
- if (regno & 1) {
- newval <<= 16;
- mask = 0x0000ffff;
- } else {
- mask = 0xffff0000;
- }
-
- val = readl(fb->regs + hw_reg) & mask;
- writel(val | newval, fb->regs + hw_reg);
- }
-
- return regno > 255;
-}
-
-/*
- * Blank the screen if blank_mode != 0, else unblank. If blank == NULL
- * then the caller blanks by setting the CLUT (Color Look Up Table) to all
- * black. Return 0 if blanking succeeded, != 0 if un-/blanking failed due
- * to e.g. a video mode which doesn't support it. Implements VESA suspend
- * and powerdown modes on hardware that supports disabling hsync/vsync:
- * blank_mode == 2: suspend vsync
- * blank_mode == 3: suspend hsync
- * blank_mode == 4: powerdown
- */
-static int clcdfb_blank(int blank_mode, struct fb_info *info)
-{
- struct clcd_fb *fb = to_clcd(info);
-
- if (blank_mode != 0) {
- clcdfb_disable(fb);
- } else {
- clcdfb_enable(fb, fb->clcd_cntl);
- }
- return 0;
-}
-
-static int clcdfb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- struct clcd_fb *fb = to_clcd(info);
- unsigned long len, off = vma->vm_pgoff << PAGE_SHIFT;
- int ret = -EINVAL;
-
- len = info->fix.smem_len;
-
- if (off <= len && vma->vm_end - vma->vm_start <= len - off &&
- fb->board->mmap)
- ret = fb->board->mmap(fb, vma);
-
- return ret;
-}
-
-static const struct fb_ops clcdfb_ops = {
- .owner = THIS_MODULE,
- __FB_DEFAULT_IOMEM_OPS_RDWR,
- .fb_check_var = clcdfb_check_var,
- .fb_set_par = clcdfb_set_par,
- .fb_setcolreg = clcdfb_setcolreg,
- .fb_blank = clcdfb_blank,
- __FB_DEFAULT_IOMEM_OPS_DRAW,
- .fb_mmap = clcdfb_mmap,
-};
-
-static int clcdfb_register(struct clcd_fb *fb)
-{
- int ret;
-
- /*
- * ARM PL111 always has IENB at 0x1c; it's only PL110
- * which is reversed on some platforms.
- */
- if (amba_manf(fb->dev) == 0x41 && amba_part(fb->dev) == 0x111) {
- fb->off_ienb = CLCD_PL111_IENB;
- fb->off_cntl = CLCD_PL111_CNTL;
- } else {
- fb->off_ienb = CLCD_PL110_IENB;
- fb->off_cntl = CLCD_PL110_CNTL;
- }
-
- fb->clk = clk_get(&fb->dev->dev, NULL);
- if (IS_ERR(fb->clk)) {
- ret = PTR_ERR(fb->clk);
- goto out;
- }
-
- ret = clk_prepare(fb->clk);
- if (ret)
- goto free_clk;
-
- fb->fb.device = &fb->dev->dev;
-
- fb->fb.fix.mmio_start = fb->dev->res.start;
- fb->fb.fix.mmio_len = resource_size(&fb->dev->res);
-
- fb->regs = ioremap(fb->fb.fix.mmio_start, fb->fb.fix.mmio_len);
- if (!fb->regs) {
- printk(KERN_ERR "CLCD: unable to remap registers\n");
- ret = -ENOMEM;
- goto clk_unprep;
- }
-
- fb->fb.fbops = &clcdfb_ops;
- fb->fb.pseudo_palette = fb->cmap;
-
- strncpy(fb->fb.fix.id, clcd_name, sizeof(fb->fb.fix.id));
- fb->fb.fix.type = FB_TYPE_PACKED_PIXELS;
- fb->fb.fix.type_aux = 0;
- fb->fb.fix.xpanstep = 0;
- fb->fb.fix.ypanstep = 0;
- fb->fb.fix.ywrapstep = 0;
- fb->fb.fix.accel = FB_ACCEL_NONE;
-
- fb->fb.var.xres = fb->panel->mode.xres;
- fb->fb.var.yres = fb->panel->mode.yres;
- fb->fb.var.xres_virtual = fb->panel->mode.xres;
- fb->fb.var.yres_virtual = fb->panel->mode.yres;
- fb->fb.var.bits_per_pixel = fb->panel->bpp;
- fb->fb.var.grayscale = fb->panel->grayscale;
- fb->fb.var.pixclock = fb->panel->mode.pixclock;
- fb->fb.var.left_margin = fb->panel->mode.left_margin;
- fb->fb.var.right_margin = fb->panel->mode.right_margin;
- fb->fb.var.upper_margin = fb->panel->mode.upper_margin;
- fb->fb.var.lower_margin = fb->panel->mode.lower_margin;
- fb->fb.var.hsync_len = fb->panel->mode.hsync_len;
- fb->fb.var.vsync_len = fb->panel->mode.vsync_len;
- fb->fb.var.sync = fb->panel->mode.sync;
- fb->fb.var.vmode = fb->panel->mode.vmode;
- fb->fb.var.activate = FB_ACTIVATE_NOW;
- fb->fb.var.nonstd = 0;
- fb->fb.var.height = fb->panel->height;
- fb->fb.var.width = fb->panel->width;
- fb->fb.var.accel_flags = 0;
-
- fb->fb.monspecs.hfmin = 0;
- fb->fb.monspecs.hfmax = 100000;
- fb->fb.monspecs.vfmin = 0;
- fb->fb.monspecs.vfmax = 400;
- fb->fb.monspecs.dclkmin = 1000000;
- fb->fb.monspecs.dclkmax = 100000000;
-
- /*
- * Make sure that the bitfields are set appropriately.
- */
- clcdfb_set_bitfields(fb, &fb->fb.var);
-
- /*
- * Allocate colourmap.
- */
- ret = fb_alloc_cmap(&fb->fb.cmap, 256, 0);
- if (ret)
- goto unmap;
-
- /*
- * Ensure interrupts are disabled.
- */
- writel(0, fb->regs + fb->off_ienb);
-
- fb_set_var(&fb->fb, &fb->fb.var);
-
- dev_info(&fb->dev->dev, "%s hardware, %s display\n",
- fb->board->name, fb->panel->mode.name);
-
- ret = register_framebuffer(&fb->fb);
- if (ret == 0)
- goto out;
-
- printk(KERN_ERR "CLCD: cannot register framebuffer (%d)\n", ret);
-
- fb_dealloc_cmap(&fb->fb.cmap);
- unmap:
- iounmap(fb->regs);
- clk_unprep:
- clk_unprepare(fb->clk);
- free_clk:
- clk_put(fb->clk);
- out:
- return ret;
-}
-
-#ifdef CONFIG_OF
-static int clcdfb_of_get_dpi_panel_mode(struct device_node *node,
- struct clcd_panel *clcd_panel)
-{
- int err;
- struct display_timing timing;
- struct videomode video;
-
- err = of_get_display_timing(node, "panel-timing", &timing);
- if (err) {
- pr_err("%pOF: problems parsing panel-timing (%d)\n", node, err);
- return err;
- }
-
- videomode_from_timing(&timing, &video);
-
- err = fb_videomode_from_videomode(&video, &clcd_panel->mode);
- if (err)
- return err;
-
- /* Set up some inversion flags */
- if (timing.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
- clcd_panel->tim2 |= TIM2_IPC;
- else if (!(timing.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE))
- /*
- * To preserve backwards compatibility, the IPC (inverted
- * pixel clock) flag needs to be set on any display that
- * doesn't explicitly specify that the pixel clock is
- * active on the negative or positive edge.
- */
- clcd_panel->tim2 |= TIM2_IPC;
-
- if (timing.flags & DISPLAY_FLAGS_HSYNC_LOW)
- clcd_panel->tim2 |= TIM2_IHS;
-
- if (timing.flags & DISPLAY_FLAGS_VSYNC_LOW)
- clcd_panel->tim2 |= TIM2_IVS;
-
- if (timing.flags & DISPLAY_FLAGS_DE_LOW)
- clcd_panel->tim2 |= TIM2_IOE;
-
- return 0;
-}
-
-static int clcdfb_snprintf_mode(char *buf, int size, struct fb_videomode *mode)
-{
- return snprintf(buf, size, "%ux%u@%u", mode->xres, mode->yres,
- mode->refresh);
-}
-
-static int clcdfb_of_get_backlight(struct device *dev,
- struct clcd_panel *clcd_panel)
-{
- struct backlight_device *backlight;
-
- /* Look up the optional backlight device */
- backlight = devm_of_find_backlight(dev);
- if (IS_ERR(backlight))
- return PTR_ERR(backlight);
-
- clcd_panel->backlight = backlight;
- return 0;
-}
-
-static int clcdfb_of_get_mode(struct device *dev, struct device_node *panel,
- struct clcd_panel *clcd_panel)
-{
- int err;
- struct fb_videomode *mode;
- char *name;
- int len;
-
- /* Only directly connected DPI panels supported for now */
- if (of_device_is_compatible(panel, "panel-dpi"))
- err = clcdfb_of_get_dpi_panel_mode(panel, clcd_panel);
- else
- err = -ENOENT;
- if (err)
- return err;
- mode = &clcd_panel->mode;
-
- len = clcdfb_snprintf_mode(NULL, 0, mode);
- name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
-
- clcdfb_snprintf_mode(name, len + 1, mode);
- mode->name = name;
-
- return 0;
-}
-
-static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
-{
- static struct {
- unsigned int part;
- u32 r0, g0, b0;
- u32 caps;
- } panels[] = {
- { 0x110, 1, 7, 13, CLCD_CAP_5551 },
- { 0x110, 0, 8, 16, CLCD_CAP_888 },
- { 0x110, 16, 8, 0, CLCD_CAP_888 },
- { 0x111, 4, 14, 20, CLCD_CAP_444 },
- { 0x111, 3, 11, 19, CLCD_CAP_444 | CLCD_CAP_5551 },
- { 0x111, 3, 10, 19, CLCD_CAP_444 | CLCD_CAP_5551 |
- CLCD_CAP_565 },
- { 0x111, 0, 8, 16, CLCD_CAP_444 | CLCD_CAP_5551 |
- CLCD_CAP_565 | CLCD_CAP_888 },
- };
- int i;
-
- /* Bypass pixel clock divider */
- fb->panel->tim2 |= TIM2_BCD;
-
- /* TFT display, vert. comp. interrupt at the start of the back porch */
- fb->panel->cntl |= CNTL_LCDTFT | CNTL_LCDVCOMP(1);
-
- fb->panel->caps = 0;
-
- /* Match the setup with known variants */
- for (i = 0; i < ARRAY_SIZE(panels) && !fb->panel->caps; i++) {
- if (amba_part(fb->dev) != panels[i].part)
- continue;
- if (g0 != panels[i].g0)
- continue;
- if (r0 == panels[i].r0 && b0 == panels[i].b0)
- fb->panel->caps = panels[i].caps;
- }
-
- /*
- * If we actually physically connected the R lines to B and
- * vice versa
- */
- if (r0 != 0 && b0 == 0)
- fb->panel->bgr_connection = true;
-
- return fb->panel->caps ? 0 : -EINVAL;
-}
-
-static int clcdfb_of_init_display(struct clcd_fb *fb)
-{
- struct device_node *endpoint, *panel;
- int err;
- unsigned int bpp;
- u32 max_bandwidth;
- u32 tft_r0b0g0[3];
-
- fb->panel = devm_kzalloc(&fb->dev->dev, sizeof(*fb->panel), GFP_KERNEL);
- if (!fb->panel)
- return -ENOMEM;
-
- /*
- * Fetch the panel endpoint.
- */
- endpoint = of_graph_get_next_endpoint(fb->dev->dev.of_node, NULL);
- if (!endpoint)
- return -ENODEV;
-
- panel = of_graph_get_remote_port_parent(endpoint);
- if (!panel) {
- err = -ENODEV;
- goto out_endpoint_put;
- }
-
- err = clcdfb_of_get_backlight(&fb->dev->dev, fb->panel);
- if (err)
- goto out_panel_put;
-
- err = clcdfb_of_get_mode(&fb->dev->dev, panel, fb->panel);
- if (err)
- goto out_panel_put;
-
- err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
- &max_bandwidth);
- if (!err) {
- /*
- * max_bandwidth is in bytes per second and pixclock in
- * pico-seconds, so the maximum allowed bits per pixel is
- * 8 * max_bandwidth / (PICOS2KHZ(pixclock) * 1000)
- * Rearrange this calculation to avoid overflow and then ensure
- * result is a valid format.
- */
- bpp = max_bandwidth / (1000 / 8)
- / PICOS2KHZ(fb->panel->mode.pixclock);
- bpp = rounddown_pow_of_two(bpp);
- if (bpp > 32)
- bpp = 32;
- } else
- bpp = 32;
- fb->panel->bpp = bpp;
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
- fb->panel->cntl |= CNTL_BEBO;
-#endif
- fb->panel->width = -1;
- fb->panel->height = -1;
-
- if (of_property_read_u32_array(endpoint,
- "arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0) {
- err = -ENOENT;
- goto out_panel_put;
- }
-
- of_node_put(panel);
- of_node_put(endpoint);
-
- return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
- tft_r0b0g0[1], tft_r0b0g0[2]);
-out_panel_put:
- of_node_put(panel);
-out_endpoint_put:
- of_node_put(endpoint);
- return err;
-}
-
-static int clcdfb_of_vram_setup(struct clcd_fb *fb)
-{
- int err;
- struct device_node *memory;
- u64 size;
-
- err = clcdfb_of_init_display(fb);
- if (err)
- return err;
-
- memory = of_parse_phandle(fb->dev->dev.of_node, "memory-region", 0);
- if (!memory)
- return -ENODEV;
-
- fb->fb.screen_base = of_iomap(memory, 0);
- if (!fb->fb.screen_base) {
- of_node_put(memory);
- return -ENOMEM;
- }
-
- fb->fb.fix.smem_start = of_translate_address(memory,
- of_get_address(memory, 0, &size, NULL));
- fb->fb.fix.smem_len = size;
- of_node_put(memory);
-
- return 0;
-}
-
-static int clcdfb_of_vram_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- unsigned long off, user_size, kernel_size;
-
-
- off = vma->vm_pgoff << PAGE_SHIFT;
- user_size = vma->vm_end - vma->vm_start;
- kernel_size = fb->fb.fix.smem_len;
-
- if (off >= kernel_size || user_size > (kernel_size - off))
- return -ENXIO;
-
- return remap_pfn_range(vma, vma->vm_start,
- __phys_to_pfn(fb->fb.fix.smem_start) + vma->vm_pgoff,
- user_size,
- pgprot_writecombine(vma->vm_page_prot));
-}
-
-static void clcdfb_of_vram_remove(struct clcd_fb *fb)
-{
- iounmap(fb->fb.screen_base);
-}
-
-static int clcdfb_of_dma_setup(struct clcd_fb *fb)
-{
- unsigned long framesize;
- dma_addr_t dma;
- int err;
-
- err = clcdfb_of_init_display(fb);
- if (err)
- return err;
-
- framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres *
- fb->panel->bpp / 8);
- fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize,
- &dma, GFP_KERNEL);
- if (!fb->fb.screen_base)
- return -ENOMEM;
-
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = framesize;
-
- return 0;
-}
-
-static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
- fb->fb.fix.smem_start, fb->fb.fix.smem_len);
-}
-
-static void clcdfb_of_dma_remove(struct clcd_fb *fb)
-{
- dma_free_coherent(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
-}
-
-static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
-{
- struct clcd_board *board = devm_kzalloc(&dev->dev, sizeof(*board),
- GFP_KERNEL);
- struct device_node *node = dev->dev.of_node;
-
- if (!board)
- return NULL;
-
- board->name = of_node_full_name(node);
- board->caps = CLCD_CAP_ALL;
- board->check = clcdfb_check;
- board->decode = clcdfb_decode;
- if (of_property_present(node, "memory-region")) {
- board->setup = clcdfb_of_vram_setup;
- board->mmap = clcdfb_of_vram_mmap;
- board->remove = clcdfb_of_vram_remove;
- } else {
- board->setup = clcdfb_of_dma_setup;
- board->mmap = clcdfb_of_dma_mmap;
- board->remove = clcdfb_of_dma_remove;
- }
-
- return board;
-}
-#else
-static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
-{
- return NULL;
-}
-#endif
-
-static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
-{
- struct clcd_board *board = dev_get_platdata(&dev->dev);
- struct clcd_fb *fb;
- int ret;
-
- if (!board)
- board = clcdfb_of_get_board(dev);
-
- if (!board)
- return -EINVAL;
-
- ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
- if (ret)
- goto out;
-
- ret = amba_request_regions(dev, NULL);
- if (ret) {
- printk(KERN_ERR "CLCD: unable to reserve regs region\n");
- goto out;
- }
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb) {
- ret = -ENOMEM;
- goto free_region;
- }
-
- fb->dev = dev;
- fb->board = board;
-
- dev_info(&fb->dev->dev, "PL%03x designer %02x rev%u at 0x%08llx\n",
- amba_part(dev), amba_manf(dev), amba_rev(dev),
- (unsigned long long)dev->res.start);
-
- ret = fb->board->setup(fb);
- if (ret)
- goto free_fb;
-
- ret = clcdfb_register(fb);
- if (ret == 0) {
- amba_set_drvdata(dev, fb);
- goto out;
- }
-
- fb->board->remove(fb);
- free_fb:
- kfree(fb);
- free_region:
- amba_release_regions(dev);
- out:
- return ret;
-}
-
-static void clcdfb_remove(struct amba_device *dev)
-{
- struct clcd_fb *fb = amba_get_drvdata(dev);
-
- clcdfb_disable(fb);
- unregister_framebuffer(&fb->fb);
- if (fb->fb.cmap.len)
- fb_dealloc_cmap(&fb->fb.cmap);
- iounmap(fb->regs);
- clk_unprepare(fb->clk);
- clk_put(fb->clk);
-
- fb->board->remove(fb);
-
- kfree(fb);
-
- amba_release_regions(dev);
-}
-
-static const struct amba_id clcdfb_id_table[] = {
- {
- .id = 0x00041110,
- .mask = 0x000ffffe,
- },
- { 0, 0 },
-};
-
-MODULE_DEVICE_TABLE(amba, clcdfb_id_table);
-
-static struct amba_driver clcd_driver = {
- .drv = {
- .name = "clcd-pl11x",
- },
- .probe = clcdfb_probe,
- .remove = clcdfb_remove,
- .id_table = clcdfb_id_table,
-};
-
-static int __init amba_clcdfb_init(void)
-{
- if (fb_get_options("ambafb", NULL))
- return -ENODEV;
-
- return amba_driver_register(&clcd_driver);
-}
-
-module_init(amba_clcdfb_init);
-
-static void __exit amba_clcdfb_exit(void)
-{
- amba_driver_unregister(&clcd_driver);
-}
-
-module_exit(amba_clcdfb_exit);
-
-MODULE_DESCRIPTION("ARM PrimeCell PL110 CLCD core driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index cff11cb04..b24085432 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -363,39 +363,6 @@ static void arcfb_lcd_update(struct arcfb_par *par, unsigned int dx,
}
}
-static void arcfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct arcfb_par *par = info->par;
-
- sys_fillrect(info, rect);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, rect->dx, rect->dy, rect->width, rect->height);
-}
-
-static void arcfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct arcfb_par *par = info->par;
-
- sys_copyarea(info, area);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, area->dx, area->dy, area->width, area->height);
-}
-
-static void arcfb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- struct arcfb_par *par = info->par;
-
- sys_imageblit(info, image);
-
- /* update the physical lcd */
- arcfb_lcd_update(par, image->dx, image->dy, image->width,
- image->height);
-}
-
static int arcfb_ioctl(struct fb_info *info,
unsigned int cmd, unsigned long arg)
{
@@ -436,76 +403,48 @@ static int arcfb_ioctl(struct fb_info *info,
}
}
-/*
- * this is the access path from userspace. they can seek and write to
- * the fb. it's inefficient for them to do anything less than 64*8
- * writes since we update the lcd in each write() anyway.
- */
-static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos)
+static void arcfb_damage_range(struct fb_info *info, off_t off, size_t len)
{
- /* modded from epson 1355 */
-
- unsigned long p;
- int err;
- unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount;
- struct arcfb_par *par;
- unsigned int xres;
-
- if (!info->screen_buffer)
- return -ENODEV;
-
- p = *ppos;
- par = info->par;
- xres = info->var.xres;
- fbmemlength = (xres * info->var.yres)/8;
-
- if (p > fbmemlength)
- return -ENOSPC;
-
- err = 0;
- if ((count + p) > fbmemlength) {
- count = fbmemlength - p;
- err = -ENOSPC;
- }
-
- if (count) {
- char *base_addr;
-
- base_addr = info->screen_buffer;
- count -= copy_from_user(base_addr + p, buf, count);
- *ppos += count;
- err = -EFAULT;
- }
-
+ struct arcfb_par *par = info->par;
+ unsigned int xres = info->var.xres;
+ unsigned int bitppos, startpos, endpos, bitcount;
+ unsigned int x, y, width, height;
- bitppos = p*8;
+ bitppos = off * 8;
startpos = floorXres(bitppos, xres);
- endpos = ceilXres((bitppos + (count*8)), xres);
+ endpos = ceilXres((bitppos + (len * 8)), xres);
bitcount = endpos - startpos;
x = startpos % xres;
y = startpos / xres;
- w = xres;
- h = bitcount / xres;
- arcfb_lcd_update(par, x, y, w, h);
+ width = xres;
+ height = bitcount / xres;
+
+ arcfb_lcd_update(par, x, y, width, height);
+}
- if (count)
- return count;
- return err;
+static void arcfb_damage_area(struct fb_info *info, u32 x, u32 y,
+ u32 width, u32 height)
+{
+ struct arcfb_par *par = info->par;
+
+ /* update the physical lcd */
+ arcfb_lcd_update(par, x, y, width, height);
}
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(arcfb,
+ arcfb_damage_range,
+ arcfb_damage_area)
+
static const struct fb_ops arcfb_ops = {
.owner = THIS_MODULE,
.fb_open = arcfb_open,
- .fb_read = fb_sys_read,
- .fb_write = arcfb_write,
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(arcfb),
.fb_release = arcfb_release,
.fb_pan_display = arcfb_pan_display,
- .fb_fillrect = arcfb_fillrect,
- .fb_copyarea = arcfb_copyarea,
- .fb_imageblit = arcfb_imageblit,
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(arcfb),
.fb_ioctl = arcfb_ioctl,
+ // .fb_mmap reqires deferred I/O
};
static int arcfb_probe(struct platform_device *dev)
@@ -529,6 +468,7 @@ static int arcfb_probe(struct platform_device *dev)
if (!info)
goto err_fb_alloc;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = videomemory;
info->fbops = &arcfb_ops;
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index a9c8d33a6..08109ce53 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -342,6 +342,8 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
{
struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
return dma_mmap_coherent(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 98afd385c..6f20efc66 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1236,6 +1236,8 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct au1200fb_device *fbdev = info->par;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(fbdev->dev, vma,
fbdev->fb_mem, fbdev->fb_phys, fbdev->fb_len);
}
@@ -1488,15 +1490,12 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
static const struct fb_ops au1200fb_fb_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_check_var = au1200fb_fb_check_var,
.fb_set_par = au1200fb_fb_set_par,
.fb_setcolreg = au1200fb_fb_setcolreg,
.fb_blank = au1200fb_fb_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_sync = NULL,
.fb_ioctl = au1200fb_ioctl,
.fb_mmap = au1200fb_fb_mmap,
@@ -1568,6 +1567,8 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
fbi->fix.mmio_len = 0;
fbi->fix.accel = FB_ACCEL_NONE;
+ fbi->flags |= FBINFO_VIRTFB;
+
fbi->screen_buffer = fbdev->fb_mem;
au1200fb_update_fbinfo(fbi);
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index e956c90ef..dcfd1fbbc 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -155,13 +155,11 @@ static int clps711x_fb_blank(int blank, struct fb_info *info)
static const struct fb_ops clps711x_fb_ops = {
.owner = THIS_MODULE,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = clps711x_fb_setcolreg,
.fb_check_var = clps711x_fb_check_var,
.fb_set_par = clps711x_fb_set_par,
.fb_blank = clps711x_fb_blank,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
};
static int clps711x_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi)
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 7a3ed13be..21053bf00 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -4,7 +4,6 @@
#
config FB_CORE
- select FB_IOMEM_FOPS
select VIDEO_CMDLINE
tristate
@@ -129,7 +128,7 @@ config FB_LITTLE_ENDIAN
endchoice
-config FB_SYS_FOPS
+config FB_SYSMEM_FOPS
tristate
depends on FB_CORE
@@ -142,8 +141,8 @@ config FB_DMAMEM_HELPERS
depends on FB_CORE
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
- select FB_SYS_FOPS
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
config FB_IOMEM_FOPS
tristate
@@ -168,8 +167,8 @@ config FB_SYSMEM_HELPERS
depends on FB_CORE
select FB_SYS_COPYAREA
select FB_SYS_FILLRECT
- select FB_SYS_FOPS
select FB_SYS_IMAGEBLIT
+ select FB_SYSMEM_FOPS
config FB_SYSMEM_HELPERS_DEFERRED
bool
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index c1d657601..d15974759 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -32,6 +32,6 @@ obj-$(CONFIG_FB_IOMEM_FOPS) += fb_io_fops.o
obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o
obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o
obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
-obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o
+obj-$(CONFIG_FB_SYSMEM_FOPS) += fb_sys_fops.o
obj-$(CONFIG_FB_SVGALIB) += svgalib.o
obj-$(CONFIG_FB_DDC) += fb_ddc.o
diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index 5b80bf3da..a271f57d9 100644
--- a/drivers/video/fbdev/core/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
@@ -391,6 +391,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
/* if the beginning of the target area might overlap with the end of
the source area, be have to copy the area reverse. */
if ((dy == sy && dx > sx) || (dy > sy)) {
diff --git a/drivers/video/fbdev/core/cfbfillrect.c b/drivers/video/fbdev/core/cfbfillrect.c
index ba9f58b2a..cbaa4c9e2 100644
--- a/drivers/video/fbdev/core/cfbfillrect.c
+++ b/drivers/video/fbdev/core/cfbfillrect.c
@@ -287,6 +287,9 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
diff --git a/drivers/video/fbdev/core/cfbimgblt.c b/drivers/video/fbdev/core/cfbimgblt.c
index 9ebda4e0d..7d1d2f1a6 100644
--- a/drivers/video/fbdev/core/cfbimgblt.c
+++ b/drivers/video/fbdev/core/cfbimgblt.c
@@ -326,6 +326,9 @@ void cfb_imageblit(struct fb_info *p, const struct fb_image *image)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (p->flags & FBINFO_VIRTFB)
+ fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
start_index = bitstart & (32 - 1);
pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c
index 32a7315b4..4ebd16b7e 100644
--- a/drivers/video/fbdev/core/fb_chrdev.c
+++ b/drivers/video/fbdev/core/fb_chrdev.c
@@ -34,13 +34,13 @@ static ssize_t fb_read(struct file *file, char __user *buf, size_t count, loff_t
if (!info)
return -ENODEV;
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_read))
+ return -EINVAL;
+
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
- if (info->fbops->fb_read)
- return info->fbops->fb_read(info, buf, count, ppos);
-
- return fb_io_read(info, buf, count, ppos);
+ return info->fbops->fb_read(info, buf, count, ppos);
}
static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
@@ -50,13 +50,13 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count,
if (!info)
return -ENODEV;
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_write))
+ return -EINVAL;
+
if (info->state != FBINFO_STATE_RUNNING)
return -EPERM;
- if (info->fbops->fb_write)
- return info->fbops->fb_write(info, buf, count, ppos);
-
- return fb_io_write(info, buf, count, ppos);
+ return info->fbops->fb_write(info, buf, count, ppos);
}
static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
@@ -314,61 +314,19 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
static int fb_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fb_info *info = file_fb_info(file);
- unsigned long mmio_pgoff;
- unsigned long start;
- u32 len;
+ int res;
if (!info)
return -ENODEV;
- mutex_lock(&info->mm_lock);
- if (info->fbops->fb_mmap) {
- int res;
-
- /*
- * The framebuffer needs to be accessed decrypted, be sure
- * SME protection is removed ahead of the call
- */
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
- res = info->fbops->fb_mmap(info, vma);
- mutex_unlock(&info->mm_lock);
- return res;
-#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
- } else if (info->fbdefio) {
- /*
- * FB deferred I/O wants you to handle mmap in your drivers. At a
- * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap().
- */
- dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n");
- mutex_unlock(&info->mm_lock);
+ if (fb_WARN_ON_ONCE(info, !info->fbops->fb_mmap))
return -ENODEV;
-#endif
- }
-
- /*
- * Ugh. This can be either the frame buffer mapping, or
- * if pgoff points past it, the mmio mapping.
- */
- start = info->fix.smem_start;
- len = info->fix.smem_len;
- mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
- if (vma->vm_pgoff >= mmio_pgoff) {
- if (info->var.accel_flags) {
- mutex_unlock(&info->mm_lock);
- return -EINVAL;
- }
- vma->vm_pgoff -= mmio_pgoff;
- start = info->fix.mmio_start;
- len = info->fix.mmio_len;
- }
+ mutex_lock(&info->mm_lock);
+ res = info->fbops->fb_mmap(info, vma);
mutex_unlock(&info->mm_lock);
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start,
- vma->vm_end, start);
-
- return vm_iomap_memory(vma, start, len);
+ return res;
}
static int fb_open(struct inode *inode, struct file *file)
diff --git a/drivers/video/fbdev/core/fb_ddc.c b/drivers/video/fbdev/core/fb_ddc.c
index 8bf5f2f54..e25143219 100644
--- a/drivers/video/fbdev/core/fb_ddc.c
+++ b/drivers/video/fbdev/core/fb_ddc.c
@@ -116,7 +116,6 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
algo_data->setsda(algo_data->data, 1);
algo_data->setscl(algo_data->data, 1);
- adapter->class |= I2C_CLASS_DDC;
return edid;
}
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 1ae1d35a5..806ecd322 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -196,7 +196,7 @@ err_mutex_unlock:
*/
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
{
- unsigned long offset = vmf->address - vmf->vma->vm_start;
+ unsigned long offset = vmf->pgoff << PAGE_SHIFT;
struct page *page = vmf->page;
file_update_time(vmf->vma->vm_file);
@@ -223,6 +223,8 @@ static const struct address_space_operations fb_deferred_io_aops = {
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
vma->vm_ops = &fb_deferred_io_vm_ops;
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
if (!(info->flags & FBINFO_VIRTFB))
diff --git a/drivers/video/fbdev/core/fb_io_fops.c b/drivers/video/fbdev/core/fb_io_fops.c
index 871b82952..3408ff1b2 100644
--- a/drivers/video/fbdev/core/fb_io_fops.c
+++ b/drivers/video/fbdev/core/fb_io_fops.c
@@ -12,6 +12,9 @@ ssize_t fb_io_read(struct fb_info *info, char __user *buf, size_t count, loff_t
int c, cnt = 0, err = 0;
unsigned long total_size, trailing;
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
if (!info->screen_base)
return -ENODEV;
@@ -73,6 +76,9 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
int c, cnt = 0, err = 0;
unsigned long total_size, trailing;
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
if (!info->screen_base)
return -ENODEV;
@@ -132,5 +138,35 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
}
EXPORT_SYMBOL(fb_io_write);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ unsigned long start = info->fix.smem_start;
+ u32 len = info->fix.smem_len;
+ unsigned long mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+
+ if (info->flags & FBINFO_VIRTFB)
+ fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
+ /*
+ * This can be either the framebuffer mapping, or if pgoff points
+ * past it, the mmio mapping.
+ */
+ if (vma->vm_pgoff >= mmio_pgoff) {
+ if (info->var.accel_flags)
+ return -EINVAL;
+
+ vma->vm_pgoff -= mmio_pgoff;
+ start = info->fix.mmio_start;
+ len = info->fix.mmio_len;
+ }
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start,
+ vma->vm_end, start);
+
+ return vm_iomap_memory(vma, start, len);
+}
+EXPORT_SYMBOL(fb_io_mmap);
+
MODULE_DESCRIPTION("Fbdev helpers for framebuffers in I/O memory");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/fb_sys_fops.c b/drivers/video/fbdev/core/fb_sys_fops.c
index 0cb0989ab..a9aa6519a 100644
--- a/drivers/video/fbdev/core/fb_sys_fops.c
+++ b/drivers/video/fbdev/core/fb_sys_fops.c
@@ -22,6 +22,9 @@ ssize_t fb_sys_read(struct fb_info *info, char __user *buf, size_t count,
unsigned long total_size, c;
ssize_t ret;
+ if (!(info->flags & FBINFO_VIRTFB))
+ fb_warn_once(info, "Framebuffer is not in virtual address space.");
+
if (!info->screen_buffer)
return -ENODEV;
@@ -64,6 +67,9 @@ ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
unsigned long total_size, c;
size_t ret;
+ if (!(info->flags & FBINFO_VIRTFB))
+ fb_warn_once(info, "Framebuffer is not in virtual address space.");
+
if (!info->screen_buffer)
return -ENODEV;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index e6c45d585..46823c2e2 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -631,8 +631,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (logo_lines > vc->vc_bottom) {
logo_shown = FBCON_LOGO_CANSHOW;
- printk(KERN_INFO
- "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
+ pr_info("fbcon: disable boot-logo (boot-logo bigger than screen).\n");
} else {
logo_shown = FBCON_LOGO_DRAW;
vc->vc_top = logo_lines;
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 79e5bfbdd..0a26399db 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1311,7 +1311,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode)
{
- unsigned int htotal, vtotal;
+ unsigned int htotal, vtotal, total;
fbmode->xres = vm->hactive;
fbmode->left_margin = vm->hback_porch;
@@ -1344,8 +1344,9 @@ int fb_videomode_from_videomode(const struct videomode *vm,
vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
vm->vsync_len;
/* prevent division by zero */
- if (htotal && vtotal) {
- fbmode->refresh = vm->pixelclock / (htotal * vtotal);
+ total = htotal * vtotal;
+ if (total) {
+ fbmode->refresh = vm->pixelclock / total;
/* a mode must have htotal and vtotal != 0 or it is invalid */
} else {
fbmode->refresh = 0;
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index 7b8bd3a2b..75e7001e8 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -324,6 +324,9 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
/* if the beginning of the target area might overlap with the end of
the source area, be have to copy the area reverse. */
if ((dy == sy && dx > sx) || (dy > sy)) {
diff --git a/drivers/video/fbdev/core/sysfillrect.c b/drivers/video/fbdev/core/sysfillrect.c
index bcdcaeae6..e49221a88 100644
--- a/drivers/video/fbdev/core/sysfillrect.c
+++ b/drivers/video/fbdev/core/sysfillrect.c
@@ -242,6 +242,9 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
fg = ((u32 *) (p->pseudo_palette))[rect->color];
diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
index 665ef7a0a..6949bbd51 100644
--- a/drivers/video/fbdev/core/sysimgblt.c
+++ b/drivers/video/fbdev/core/sysimgblt.c
@@ -296,6 +296,9 @@ void sys_imageblit(struct fb_info *p, const struct fb_image *image)
if (p->state != FBINFO_STATE_RUNNING)
return;
+ if (!(p->flags & FBINFO_VIRTFB))
+ fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
start_index = bitstart & (32 - 1);
pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 52105dc1a..986760b90 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -227,13 +227,6 @@ cyber2000fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
CO_REG_CMD_H, cfb);
}
-static void
-cyber2000fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- cfb_imageblit(info, image);
- return;
-}
-
static int cyber2000fb_sync(struct fb_info *info)
{
struct cfb_info *cfb = container_of(info, struct cfb_info, fb);
@@ -1069,7 +1062,7 @@ static const struct fb_ops cyber2000fb_ops = {
.fb_pan_display = cyber2000fb_pan_display,
.fb_fillrect = cyber2000fb_fillrect,
.fb_copyarea = cyber2000fb_copyarea,
- .fb_imageblit = cyber2000fb_imageblit,
+ .fb_imageblit = cfb_imageblit,
.fb_sync = cyber2000fb_sync,
__FB_DEFAULT_IOMEM_OPS_MMAP,
};
@@ -1234,7 +1227,6 @@ static int cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
strscpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
sizeof(cfb->ddc_adapter.name));
cfb->ddc_adapter.owner = THIS_MODULE;
- cfb->ddc_adapter.class = I2C_CLASS_DDC;
cfb->ddc_adapter.algo_data = &cfb->ddc_algo;
cfb->ddc_adapter.dev.parent = cfb->fb.device;
cfb->ddc_algo.setsda = cyber2000fb_ddc_setsda;
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index cae00deee..3e378874c 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -311,6 +311,8 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (offset < info->fix.smem_len) {
return dma_mmap_wc(info->device, vma, info->screen_base,
info->fix.smem_start, info->fix.smem_len);
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index 0bced82fa..019114165 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -877,7 +877,7 @@ static int map_video_memory(struct fb_info *info)
}
mutex_lock(&info->mm_lock);
info->screen_base = p;
- info->fix.smem_start = virt_to_phys(info->screen_base);
+ info->fix.smem_start = virt_to_phys((__force const void *)info->screen_base);
info->fix.smem_len = smem_len;
mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index e89e55792..8463de833 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1000,6 +1000,8 @@ static int gbefb_mmap(struct fb_info *info,
unsigned long phys_addr, phys_size;
u16 *tile;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
/* check range */
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 10728259d..264c8cedb 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -364,6 +364,8 @@ error:
* hgafb_open - open the framebuffer device
* @info: pointer to fb_info object containing info for current hga board
* @init: open by console system or userland.
+ *
+ * Returns: %0
*/
static int hgafb_open(struct fb_info *info, int init)
@@ -378,6 +380,8 @@ static int hgafb_open(struct fb_info *info, int init)
* hgafb_release - open the framebuffer device
* @info: pointer to fb_info object containing info for current hga board
* @init: open by console system or userland.
+ *
+ * Returns: %0
*/
static int hgafb_release(struct fb_info *info, int init)
@@ -399,6 +403,8 @@ static int hgafb_release(struct fb_info *info, int init)
* This callback function is used to set the color registers of a HGA
* board. Since we have only two fixed colors only @regno is checked.
* A zero is returned on success and 1 for failure.
+ *
+ * Returns: %0
*/
static int hgafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
@@ -410,14 +416,15 @@ static int hgafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
}
/**
- * hga_pan_display - pan or wrap the display
+ * hgafb_pan_display - pan or wrap the display
* @var:contains new xoffset, yoffset and vmode values
* @info:pointer to fb_info object containing info for current hga board
*
* This function looks only at xoffset, yoffset and the %FB_VMODE_YWRAP
* flag in @var. If input parameters are correct it calls hga_pan() to
* program the hardware. @info->var is updated to the new values.
- * A zero is returned on success and %-EINVAL for failure.
+ *
+ * Returns: %0 on success or %-EINVAL for failure.
*/
static int hgafb_pan_display(struct fb_var_screeninfo *var,
@@ -449,6 +456,8 @@ static int hgafb_pan_display(struct fb_var_screeninfo *var,
* @blank_mode == 2 means suspend vsync,
* @blank_mode == 3 means suspend hsync,
* @blank_mode == 4 means powerdown.
+ *
+ * Returns: %0
*/
static int hgafb_blank(int blank_mode, struct fb_info *info)
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index a80939fe2..8fdccf033 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -48,7 +48,6 @@
#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/screen_info.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/completion.h>
@@ -927,8 +926,8 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
if (request_size == 0)
return -1;
- if (order <= MAX_ORDER) {
- /* Call alloc_pages if the size is less than 2^MAX_ORDER */
+ if (order <= MAX_PAGE_ORDER) {
+ /* Call alloc_pages if the size is less than 2^MAX_PAGE_ORDER */
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -1;
@@ -958,7 +957,7 @@ static void hvfb_release_phymem(struct hv_device *hdev,
{
unsigned int order = get_order(size);
- if (order <= MAX_ORDER)
+ if (order <= MAX_PAGE_ORDER)
__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
else
dma_free_coherent(&hdev->device,
@@ -975,7 +974,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
struct pci_dev *pdev = NULL;
void __iomem *fb_virt;
int gen2vm = efi_enabled(EFI_BOOT);
- resource_size_t base, size;
+ resource_size_t base = 0;
+ resource_size_t size = 0;
phys_addr_t paddr;
int ret;
@@ -1010,11 +1010,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
- } else if (IS_ENABLED(CONFIG_SYSFB)) {
- base = screen_info.lfb_base;
- size = screen_info.lfb_size;
- } else {
- goto err1;
}
/*
@@ -1056,16 +1051,13 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_size = dio_fb_size;
getmem_done:
- aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
+ if (base && size)
+ aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
+ else
+ aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
- if (!gen2vm) {
+ if (!gen2vm)
pci_dev_put(pdev);
- } else if (IS_ENABLED(CONFIG_SYSFB)) {
- /* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
- screen_info.lfb_size = 0;
- screen_info.lfb_base = 0;
- screen_info.orig_video_isVGA = 0;
- }
return 0;
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 1897e65ab..9b74dae71 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -163,7 +163,6 @@ static int i740fb_setup_ddc_bus(struct fb_info *info)
strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
- par->ddc_adapter.class = I2C_CLASS_DDC;
par->ddc_adapter.algo_data = &par->ddc_algo;
par->ddc_adapter.dev.parent = info->device;
par->ddc_algo.setsda = i740fb_ddc_setsda;
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 7042a43b8..a4dbc72f9 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Freescale i.MX Frame Buffer device driver
*
* Copyright (C) 2004 Sascha Hauer, Pengutronix
* Based on acornfb.c Copyright (C) Russell King.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
* Please direct your questions and comments on this driver to the following
* email address:
*
@@ -34,6 +31,7 @@
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/bitfield.h>
#include <linux/regulator/consumer.h>
@@ -41,13 +39,6 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
-#define PCR_TFT (1 << 31)
-#define PCR_COLOR (1 << 30)
-#define PCR_BPIX_8 (3 << 25)
-#define PCR_BPIX_12 (4 << 25)
-#define PCR_BPIX_16 (5 << 25)
-#define PCR_BPIX_18 (6 << 25)
-
struct imx_fb_videomode {
struct fb_videomode mode;
u32 pcr;
@@ -65,42 +56,50 @@ struct imx_fb_videomode {
#define LCDC_SSA 0x00
#define LCDC_SIZE 0x04
-#define SIZE_XMAX(x) ((((x) >> 4) & 0x3f) << 20)
+#define SIZE_XMAX_MASK GENMASK(25, 20)
-#define YMAX_MASK_IMX1 0x1ff
-#define YMAX_MASK_IMX21 0x3ff
+#define YMAX_MASK_IMX1 GENMASK(8, 0)
+#define YMAX_MASK_IMX21 GENMASK(9, 0)
#define LCDC_VPW 0x08
-#define VPW_VPW(x) ((x) & 0x3ff)
+#define VPW_VPW_MASK GENMASK(9, 0)
#define LCDC_CPOS 0x0C
-#define CPOS_CC1 (1<<31)
-#define CPOS_CC0 (1<<30)
-#define CPOS_OP (1<<28)
-#define CPOS_CXP(x) (((x) & 3ff) << 16)
+#define CPOS_CC1 BIT(31)
+#define CPOS_CC0 BIT(30)
+#define CPOS_OP BIT(28)
+#define CPOS_CXP_MASK GENMASK(25, 16)
#define LCDC_LCWHB 0x10
-#define LCWHB_BK_EN (1<<31)
-#define LCWHB_CW(w) (((w) & 0x1f) << 24)
-#define LCWHB_CH(h) (((h) & 0x1f) << 16)
-#define LCWHB_BD(x) ((x) & 0xff)
+#define LCWHB_BK_EN BIT(31)
+#define LCWHB_CW_MASK GENMASK(28, 24)
+#define LCWHB_CH_MASK GENMASK(20, 16)
+#define LCWHB_BD_MASK GENMASK(7, 0)
#define LCDC_LCHCC 0x14
#define LCDC_PCR 0x18
+#define PCR_TFT BIT(31)
+#define PCR_COLOR BIT(30)
+#define PCR_BPIX_MASK GENMASK(27, 25)
+#define PCR_BPIX_8 3
+#define PCR_BPIX_12 4
+#define PCR_BPIX_16 5
+#define PCR_BPIX_18 6
+#define PCR_PCD_MASK GENMASK(5, 0)
#define LCDC_HCR 0x1C
-#define HCR_H_WIDTH(x) (((x) & 0x3f) << 26)
-#define HCR_H_WAIT_1(x) (((x) & 0xff) << 8)
-#define HCR_H_WAIT_2(x) ((x) & 0xff)
+#define HCR_H_WIDTH_MASK GENMASK(31, 26)
+#define HCR_H_WAIT_1_MASK GENMASK(15, 8)
+#define HCR_H_WAIT_2_MASK GENMASK(7, 0)
#define LCDC_VCR 0x20
-#define VCR_V_WIDTH(x) (((x) & 0x3f) << 26)
-#define VCR_V_WAIT_1(x) (((x) & 0xff) << 8)
-#define VCR_V_WAIT_2(x) ((x) & 0xff)
+#define VCR_V_WIDTH_MASK GENMASK(31, 26)
+#define VCR_V_WAIT_1_MASK GENMASK(15, 8)
+#define VCR_V_WAIT_2_MASK GENMASK(7, 0)
#define LCDC_POS 0x24
-#define POS_POS(x) ((x) & 1f)
+#define POS_POS_MASK GENMASK(4, 0)
#define LCDC_LSCR1 0x28
/* bit fields in imxfb.h */
@@ -113,24 +112,24 @@ struct imx_fb_videomode {
#define LCDC_RMCR 0x34
-#define RMCR_LCDC_EN_MX1 (1<<1)
+#define RMCR_LCDC_EN_MX1 BIT(1)
-#define RMCR_SELF_REF (1<<0)
+#define RMCR_SELF_REF BIT(0)
#define LCDC_LCDICR 0x38
-#define LCDICR_INT_SYN (1<<2)
-#define LCDICR_INT_CON (1)
+#define LCDICR_INT_SYN BIT(2)
+#define LCDICR_INT_CON BIT(0)
#define LCDC_LCDISR 0x40
-#define LCDISR_UDR_ERR (1<<3)
-#define LCDISR_ERR_RES (1<<2)
-#define LCDISR_EOF (1<<1)
-#define LCDISR_BOF (1<<0)
+#define LCDISR_UDR_ERR BIT(3)
+#define LCDISR_ERR_RES BIT(2)
+#define LCDISR_EOF BIT(1)
+#define LCDISR_BOF BIT(0)
#define IMXFB_LSCR1_DEFAULT 0x00120300
#define LCDC_LAUSCR 0x80
-#define LAUSCR_AUS_MODE (1<<31)
+#define LAUSCR_AUS_MODE BIT(31)
/* Used fb-mode. Can be set on kernel command line, therefore file-static. */
static const char *fb_mode;
@@ -281,10 +280,10 @@ static int imxfb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue,
struct imxfb_info *fbi = info->par;
u_int val, ret = 1;
-#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16)
+#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
if (regno < fbi->palette_size) {
val = (CNVT_TOHW(red, 4) << 8) |
- (CNVT_TOHW(green,4) << 4) |
+ (CNVT_TOHW(green, 4) << 4) |
CNVT_TOHW(blue, 4);
writel(val, fbi->regs + 0x800 + (regno << 2));
@@ -413,23 +412,23 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
pcr = (unsigned int)tmp;
- if (--pcr > 0x3F) {
- pcr = 0x3F;
- printk(KERN_WARNING "Must limit pixel clock to %luHz\n",
- lcd_clk / pcr);
+ if (--pcr > PCR_PCD_MASK) {
+ pcr = PCR_PCD_MASK;
+ dev_warn(&fbi->pdev->dev, "Must limit pixel clock to %luHz\n",
+ lcd_clk / pcr);
}
switch (var->bits_per_pixel) {
case 32:
- pcr |= PCR_BPIX_18;
+ pcr |= FIELD_PREP(PCR_BPIX_MASK, PCR_BPIX_18);
rgb = &def_rgb_18;
break;
case 16:
default:
if (is_imx1_fb(fbi))
- pcr |= PCR_BPIX_12;
+ pcr |= FIELD_PREP(PCR_BPIX_MASK, PCR_BPIX_12);
else
- pcr |= PCR_BPIX_16;
+ pcr |= FIELD_PREP(PCR_BPIX_MASK, PCR_BPIX_16);
if (imxfb_mode->pcr & PCR_TFT)
rgb = &def_rgb_16_tft;
@@ -437,13 +436,13 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
rgb = &def_rgb_16_stn;
break;
case 8:
- pcr |= PCR_BPIX_8;
+ pcr |= FIELD_PREP(PCR_BPIX_MASK, PCR_BPIX_8);
rgb = &def_rgb_8;
break;
}
/* add sync polarities */
- pcr |= imxfb_mode->pcr & ~(0x3f | (7 << 25));
+ pcr |= imxfb_mode->pcr & ~(PCR_PCD_MASK | PCR_BPIX_MASK);
fbi->pcr = pcr;
/*
@@ -521,7 +520,7 @@ static int imxfb_enable_controller(struct imxfb_info *fbi)
writel(fbi->map_dma, fbi->regs + LCDC_SSA);
/* panning offset 0 (0 pixel offset) */
- writel(0x00000000, fbi->regs + LCDC_POS);
+ writel(FIELD_PREP(POS_POS_MASK, 0), fbi->regs + LCDC_POS);
/* disable hardware cursor */
writel(readl(fbi->regs + LCDC_CPOS) & ~(CPOS_CC0 | CPOS_CC1),
@@ -577,7 +576,7 @@ static int imxfb_blank(int blank, struct fb_info *info)
{
struct imxfb_info *fbi = info->par;
- pr_debug("imxfb_blank: blank=%d\n", blank);
+ pr_debug("%s: blank=%d\n", __func__, blank);
switch (blank) {
case FB_BLANK_POWERDOWN:
@@ -629,47 +628,50 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
#if DEBUG_VAR
if (var->xres < 16 || var->xres > 1024)
- printk(KERN_ERR "%s: invalid xres %d\n",
+ dev_err(&fbi->pdev->dev, "%s: invalid xres %d\n",
info->fix.id, var->xres);
if (var->hsync_len < 1 || var->hsync_len > 64)
- printk(KERN_ERR "%s: invalid hsync_len %d\n",
+ dev_err(&fbi->pdev->dev, "%s: invalid hsync_len %d\n",
info->fix.id, var->hsync_len);
if (var->left_margin < left_margin_low || var->left_margin > 255)
- printk(KERN_ERR "%s: invalid left_margin %d\n",
+ dev_err(&fbi->pdev->dev, "%s: invalid left_margin %d\n",
info->fix.id, var->left_margin);
if (var->right_margin < 1 || var->right_margin > 255)
- printk(KERN_ERR "%s: invalid right_margin %d\n",
+ dev_err(&fbi->pdev->dev, "%s: invalid right_margin %d\n",
info->fix.id, var->right_margin);
if (var->yres < 1 || var->yres > ymax_mask)
- printk(KERN_ERR "%s: invalid yres %d\n",
+ dev_err(&fbi->pdev->dev, "%s: invalid yres %d\n",
info->fix.id, var->yres);
if (var->vsync_len > 100)
- printk(KERN_ERR "%s: invalid vsync_len %d\n",
+ dev_err(&fbi->pdev->dev, "%s: invalid vsync_len %d\n",
info->fix.id, var->vsync_len);
if (var->upper_margin > 63)
- printk(KERN_ERR "%s: invalid upper_margin %d\n",
+ dev_err(&fbi->pdev->dev, "%s: invalid upper_margin %d\n",
info->fix.id, var->upper_margin);
if (var->lower_margin > 255)
- printk(KERN_ERR "%s: invalid lower_margin %d\n",
+ dev_err(&fbi->pdev->dev, "%s: invalid lower_margin %d\n",
info->fix.id, var->lower_margin);
#endif
/* physical screen start address */
- writel(VPW_VPW(var->xres * var->bits_per_pixel / 8 / 4),
- fbi->regs + LCDC_VPW);
+ writel(FIELD_PREP(VPW_VPW_MASK,
+ var->xres * var->bits_per_pixel / 8 / 4),
+ fbi->regs + LCDC_VPW);
- writel(HCR_H_WIDTH(var->hsync_len - 1) |
- HCR_H_WAIT_1(var->right_margin - 1) |
- HCR_H_WAIT_2(var->left_margin - left_margin_low),
- fbi->regs + LCDC_HCR);
+ writel(FIELD_PREP(HCR_H_WIDTH_MASK, var->hsync_len - 1) |
+ FIELD_PREP(HCR_H_WAIT_1_MASK, var->right_margin - 1) |
+ FIELD_PREP(HCR_H_WAIT_2_MASK,
+ var->left_margin - left_margin_low),
+ fbi->regs + LCDC_HCR);
- writel(VCR_V_WIDTH(var->vsync_len) |
- VCR_V_WAIT_1(var->lower_margin) |
- VCR_V_WAIT_2(var->upper_margin),
- fbi->regs + LCDC_VCR);
+ writel(FIELD_PREP(VCR_V_WIDTH_MASK, var->vsync_len) |
+ FIELD_PREP(VCR_V_WAIT_1_MASK, var->lower_margin) |
+ FIELD_PREP(VCR_V_WAIT_2_MASK, var->upper_margin),
+ fbi->regs + LCDC_VCR);
- writel(SIZE_XMAX(var->xres) | (var->yres & ymax_mask),
- fbi->regs + LCDC_SIZE);
+ writel(FIELD_PREP(SIZE_XMAX_MASK, var->xres >> 4) |
+ (var->yres & ymax_mask),
+ fbi->regs + LCDC_SIZE);
writel(fbi->pcr, fbi->regs + LCDC_PCR);
if (fbi->pwmr)
@@ -692,8 +694,6 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
struct imxfb_info *fbi = info->par;
struct device_node *np;
- pr_debug("%s\n",__func__);
-
info->pseudo_palette = devm_kmalloc_array(&pdev->dev, 16,
sizeof(u32), GFP_KERNEL);
if (!info->pseudo_palette)
@@ -701,6 +701,7 @@ static int imxfb_init_fbinfo(struct platform_device *pdev)
memset(fbi, 0, sizeof(struct imxfb_info));
+ fbi->pdev = pdev;
fbi->devtype = pdev->id_entry->driver_data;
strscpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
@@ -945,8 +946,10 @@ static int imxfb_probe(struct platform_device *pdev)
if (ret)
goto failed_init;
- /* Calculate maximum bytes used per pixel. In most cases this should
- * be the same as m->bpp/8 */
+ /*
+ * Calculate maximum bytes used per pixel. In most cases this should
+ * be the same as m->bpp/8
+ */
m = &fbi->mode[0];
bytes_per_pixel = (m->bpp + 7) / 8;
for (i = 0; i < fbi->num_modes; i++, m++)
@@ -1044,7 +1047,6 @@ static int imxfb_probe(struct platform_device *pdev)
lcd->props.max_contrast = 0xff;
imxfb_enable_controller(fbi);
- fbi->pdev = pdev;
return 0;
diff --git a/drivers/video/fbdev/intelfb/Makefile b/drivers/video/fbdev/intelfb/Makefile
deleted file mode 100644
index 7ff2debb3..000000000
--- a/drivers/video/fbdev/intelfb/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_FB_INTEL) += intelfb.o
-
-intelfb-y := intelfbdrv.o intelfbhw.o
-intelfb-$(CONFIG_FB_INTEL_I2C) += intelfb_i2c.o
-intelfb-objs := $(intelfb-y)
-
-ccflags-$(CONFIG_FB_INTEL_DEBUG) := -DDEBUG -DREGDUMP
diff --git a/drivers/video/fbdev/intelfb/intelfb.h b/drivers/video/fbdev/intelfb/intelfb.h
deleted file mode 100644
index 5de703902..000000000
--- a/drivers/video/fbdev/intelfb/intelfb.h
+++ /dev/null
@@ -1,382 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _INTELFB_H
-#define _INTELFB_H
-
-/* $DHD: intelfb/intelfb.h,v 1.40 2003/06/27 15:06:25 dawes Exp $ */
-
-#include <linux/agp_backend.h>
-#include <linux/fb.h>
-
-#ifdef CONFIG_FB_INTEL_I2C
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#endif
-
-/*** Version/name ***/
-#define INTELFB_VERSION "0.9.6"
-#define INTELFB_MODULE_NAME "intelfb"
-#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/945GME/965G/965GM"
-
-
-/*** Debug/feature defines ***/
-
-#ifndef DEBUG
-#define DEBUG 0
-#endif
-
-#ifndef VERBOSE
-#define VERBOSE 0
-#endif
-
-#ifndef REGDUMP
-#define REGDUMP 0
-#endif
-
-#ifndef DETECT_VGA_CLASS_ONLY
-#define DETECT_VGA_CLASS_ONLY 1
-#endif
-
-#ifndef ALLOCATE_FOR_PANNING
-#define ALLOCATE_FOR_PANNING 1
-#endif
-
-#ifndef PREFERRED_MODE
-#define PREFERRED_MODE "1024x768-32@70"
-#endif
-
-/*** hw-related values ***/
-
-/* Resource Allocation */
-#define INTELFB_FB_ACQUIRED 1
-#define INTELFB_MMIO_ACQUIRED 2
-
-/* PCI ids for supported devices */
-#define PCI_DEVICE_ID_INTEL_830M 0x3577
-#define PCI_DEVICE_ID_INTEL_845G 0x2562
-#define PCI_DEVICE_ID_INTEL_85XGM 0x3582
-#define PCI_DEVICE_ID_INTEL_854 0x358E
-#define PCI_DEVICE_ID_INTEL_865G 0x2572
-#define PCI_DEVICE_ID_INTEL_915G 0x2582
-#define PCI_DEVICE_ID_INTEL_915GM 0x2592
-#define PCI_DEVICE_ID_INTEL_945G 0x2772
-#define PCI_DEVICE_ID_INTEL_945GM 0x27A2
-#define PCI_DEVICE_ID_INTEL_945GME 0x27AE
-#define PCI_DEVICE_ID_INTEL_965G 0x29A2
-#define PCI_DEVICE_ID_INTEL_965GM 0x2A02
-
-/* Size of MMIO region */
-#define INTEL_REG_SIZE 0x80000
-
-#define STRIDE_ALIGNMENT 16
-#define STRIDE_ALIGNMENT_I9XX 64
-
-#define PALETTE_8_ENTRIES 256
-
-
-/*** Macros ***/
-
-/* basic arithmetic */
-#define KB(x) ((x) * 1024)
-#define MB(x) ((x) * 1024 * 1024)
-#define BtoKB(x) ((x) / 1024)
-#define BtoMB(x) ((x) / 1024 / 1024)
-
-#define GTT_PAGE_SIZE KB(4)
-
-#define ROUND_UP_TO(x, y) (((x) + (y) - 1) / (y) * (y))
-#define ROUND_DOWN_TO(x, y) ((x) / (y) * (y))
-#define ROUND_UP_TO_PAGE(x) ROUND_UP_TO((x), GTT_PAGE_SIZE)
-#define ROUND_DOWN_TO_PAGE(x) ROUND_DOWN_TO((x), GTT_PAGE_SIZE)
-
-/* messages */
-#define PFX INTELFB_MODULE_NAME ": "
-
-#define ERR_MSG(fmt, args...) printk(KERN_ERR PFX fmt, ## args)
-#define WRN_MSG(fmt, args...) printk(KERN_WARNING PFX fmt, ## args)
-#define NOT_MSG(fmt, args...) printk(KERN_NOTICE PFX fmt, ## args)
-#define INF_MSG(fmt, args...) printk(KERN_INFO PFX fmt, ## args)
-#if DEBUG
-#define DBG_MSG(fmt, args...) printk(KERN_DEBUG PFX fmt, ## args)
-#else
-#define DBG_MSG(fmt, args...) while (0) printk(fmt, ## args)
-#endif
-
-/* get commonly used pointers */
-#define GET_DINFO(info) (info)->par
-
-/* misc macros */
-#define ACCEL(d, i) \
- ((d)->accel && !(d)->ring_lockup && \
- ((i)->var.accel_flags & FB_ACCELF_TEXT))
-
-/*#define NOACCEL_CHIPSET(d) \
- ((d)->chipset != INTEL_865G)*/
-#define NOACCEL_CHIPSET(d) \
- (0)
-
-#define FIXED_MODE(d) ((d)->fixed_mode)
-
-/*** Driver parameters ***/
-
-#define RINGBUFFER_SIZE KB(64)
-#define HW_CURSOR_SIZE KB(4)
-
-/* Intel agpgart driver */
-#define AGP_PHYSICAL_MEMORY 2
-
-/* store information about an Ixxx DVO */
-/* The i830->i865 use multiple DVOs with multiple i2cs */
-/* the i915, i945 have a single sDVO i2c bus - which is different */
-#define MAX_OUTPUTS 6
-
-/* these are outputs from the chip - integrated only
- external chips are via DVO or SDVO output */
-#define INTELFB_OUTPUT_UNUSED 0
-#define INTELFB_OUTPUT_ANALOG 1
-#define INTELFB_OUTPUT_DVO 2
-#define INTELFB_OUTPUT_SDVO 3
-#define INTELFB_OUTPUT_LVDS 4
-#define INTELFB_OUTPUT_TVOUT 5
-
-#define INTELFB_DVO_CHIP_NONE 0
-#define INTELFB_DVO_CHIP_LVDS 1
-#define INTELFB_DVO_CHIP_TMDS 2
-#define INTELFB_DVO_CHIP_TVOUT 4
-
-#define INTELFB_OUTPUT_PIPE_NC 0
-#define INTELFB_OUTPUT_PIPE_A 1
-#define INTELFB_OUTPUT_PIPE_B 2
-
-/*** Data Types ***/
-
-/* supported chipsets */
-enum intel_chips {
- INTEL_830M,
- INTEL_845G,
- INTEL_85XGM,
- INTEL_852GM,
- INTEL_852GME,
- INTEL_854,
- INTEL_855GM,
- INTEL_855GME,
- INTEL_865G,
- INTEL_915G,
- INTEL_915GM,
- INTEL_945G,
- INTEL_945GM,
- INTEL_945GME,
- INTEL_965G,
- INTEL_965GM,
-};
-
-struct intelfb_hwstate {
- u32 vga0_divisor;
- u32 vga1_divisor;
- u32 vga_pd;
- u32 dpll_a;
- u32 dpll_b;
- u32 fpa0;
- u32 fpa1;
- u32 fpb0;
- u32 fpb1;
- u32 palette_a[PALETTE_8_ENTRIES];
- u32 palette_b[PALETTE_8_ENTRIES];
- u32 htotal_a;
- u32 hblank_a;
- u32 hsync_a;
- u32 vtotal_a;
- u32 vblank_a;
- u32 vsync_a;
- u32 src_size_a;
- u32 bclrpat_a;
- u32 htotal_b;
- u32 hblank_b;
- u32 hsync_b;
- u32 vtotal_b;
- u32 vblank_b;
- u32 vsync_b;
- u32 src_size_b;
- u32 bclrpat_b;
- u32 adpa;
- u32 dvoa;
- u32 dvob;
- u32 dvoc;
- u32 dvoa_srcdim;
- u32 dvob_srcdim;
- u32 dvoc_srcdim;
- u32 lvds;
- u32 pipe_a_conf;
- u32 pipe_b_conf;
- u32 disp_arb;
- u32 cursor_a_control;
- u32 cursor_b_control;
- u32 cursor_a_base;
- u32 cursor_b_base;
- u32 cursor_size;
- u32 disp_a_ctrl;
- u32 disp_b_ctrl;
- u32 disp_a_base;
- u32 disp_b_base;
- u32 cursor_a_palette[4];
- u32 cursor_b_palette[4];
- u32 disp_a_stride;
- u32 disp_b_stride;
- u32 vgacntrl;
- u32 add_id;
- u32 swf0x[7];
- u32 swf1x[7];
- u32 swf3x[3];
- u32 fence[8];
- u32 instpm;
- u32 mem_mode;
- u32 fw_blc_0;
- u32 fw_blc_1;
- u16 hwstam;
- u16 ier;
- u16 iir;
- u16 imr;
-};
-
-struct intelfb_heap_data {
- u32 physical;
- u8 __iomem *virtual;
- u32 offset; /* in GATT pages */
- u32 size; /* in bytes */
-};
-
-#ifdef CONFIG_FB_INTEL_I2C
-struct intelfb_i2c_chan {
- struct intelfb_info *dinfo;
- u32 reg;
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
-};
-#endif
-
-struct intelfb_output_rec {
- int type;
- int pipe;
- int flags;
-
-#ifdef CONFIG_FB_INTEL_I2C
- struct intelfb_i2c_chan i2c_bus;
- struct intelfb_i2c_chan ddc_bus;
-#endif
-};
-
-struct intelfb_vsync {
- wait_queue_head_t wait;
- unsigned int count;
- int pan_display;
- u32 pan_offset;
-};
-
-struct intelfb_info {
- struct fb_info *info;
- const struct fb_ops *fbops;
- struct pci_dev *pdev;
-
- struct intelfb_hwstate save_state;
-
- /* agpgart structs */
- struct agp_memory *gtt_fb_mem; /* use all stolen memory or vram */
- struct agp_memory *gtt_ring_mem; /* ring buffer */
- struct agp_memory *gtt_cursor_mem; /* hw cursor */
-
- /* use a gart reserved fb mem */
- u8 fbmem_gart;
-
- int wc_cookie;
-
- /* heap data */
- struct intelfb_heap_data aperture;
- struct intelfb_heap_data fb;
- struct intelfb_heap_data ring;
- struct intelfb_heap_data cursor;
-
- /* mmio regs */
- u32 mmio_base_phys;
- u8 __iomem *mmio_base;
-
- /* fb start offset (in bytes) */
- u32 fb_start;
-
- /* ring buffer */
- u32 ring_head;
- u32 ring_tail;
- u32 ring_tail_mask;
- u32 ring_space;
- u32 ring_lockup;
-
- /* palette */
- u32 pseudo_palette[16];
-
- /* chip info */
- int pci_chipset;
- int chipset;
- const char *name;
- int mobile;
-
- /* current mode */
- int bpp, depth;
- u32 visual;
- int xres, yres, pitch;
- int pixclock;
-
- /* current pipe */
- int pipe;
-
- /* some flags */
- int accel;
- int hwcursor;
- int fixed_mode;
- int ring_active;
- int flag;
- unsigned long irq_flags;
- int open;
-
- /* vsync */
- struct intelfb_vsync vsync;
- spinlock_t int_lock;
-
- /* hw cursor */
- int cursor_on;
- int cursor_blanked;
- u8 cursor_src[64];
-
- /* initial parameters */
- int initial_vga;
- struct fb_var_screeninfo initial_var;
- u32 initial_fb_base;
- u32 initial_video_ram;
- u32 initial_pitch;
-
- /* driver registered */
- int registered;
-
- /* index into plls */
- int pll_index;
-
- /* outputs */
- int num_outputs;
- struct intelfb_output_rec output[MAX_OUTPUTS];
-};
-
-#define IS_I9XX(dinfo) (((dinfo)->chipset == INTEL_915G) || \
- ((dinfo)->chipset == INTEL_915GM) || \
- ((dinfo)->chipset == INTEL_945G) || \
- ((dinfo)->chipset == INTEL_945GM) || \
- ((dinfo)->chipset == INTEL_945GME) || \
- ((dinfo)->chipset == INTEL_965G) || \
- ((dinfo)->chipset == INTEL_965GM))
-
-/*** function prototypes ***/
-
-extern int intelfb_var_to_depth(const struct fb_var_screeninfo *var);
-
-#ifdef CONFIG_FB_INTEL_I2C
-extern void intelfb_create_i2c_busses(struct intelfb_info *dinfo);
-extern void intelfb_delete_i2c_busses(struct intelfb_info *dinfo);
-#endif
-
-#endif /* _INTELFB_H */
diff --git a/drivers/video/fbdev/intelfb/intelfb_i2c.c b/drivers/video/fbdev/intelfb/intelfb_i2c.c
deleted file mode 100644
index 3300bd31d..000000000
--- a/drivers/video/fbdev/intelfb/intelfb_i2c.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/**************************************************************************
-
- Copyright 2006 Dave Airlie <airlied@linux.ie>
-
-All Rights Reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the "Software"),
-to deal in the Software without restriction, including without limitation
-on the rights to use, copy, modify, merge, publish, distribute, sub
-license, and/or sell copies of the Software, and to permit persons to whom
-the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice (including the next
-paragraph) shall be included in all copies or substantial portions of the
-Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
-DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-**************************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/fb.h>
-
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-
-#include <asm/io.h>
-
-#include "intelfb.h"
-#include "intelfbhw.h"
-
-/* bit locations in the registers */
-#define SCL_DIR_MASK 0x0001
-#define SCL_DIR 0x0002
-#define SCL_VAL_MASK 0x0004
-#define SCL_VAL_OUT 0x0008
-#define SCL_VAL_IN 0x0010
-#define SDA_DIR_MASK 0x0100
-#define SDA_DIR 0x0200
-#define SDA_VAL_MASK 0x0400
-#define SDA_VAL_OUT 0x0800
-#define SDA_VAL_IN 0x1000
-
-static void intelfb_gpio_setscl(void *data, int state)
-{
- struct intelfb_i2c_chan *chan = data;
- struct intelfb_info *dinfo = chan->dinfo;
- u32 val;
-
- OUTREG(chan->reg, (state ? SCL_VAL_OUT : 0) |
- SCL_DIR | SCL_DIR_MASK | SCL_VAL_MASK);
- val = INREG(chan->reg);
-}
-
-static void intelfb_gpio_setsda(void *data, int state)
-{
- struct intelfb_i2c_chan *chan = data;
- struct intelfb_info *dinfo = chan->dinfo;
- u32 val;
-
- OUTREG(chan->reg, (state ? SDA_VAL_OUT : 0) |
- SDA_DIR | SDA_DIR_MASK | SDA_VAL_MASK);
- val = INREG(chan->reg);
-}
-
-static int intelfb_gpio_getscl(void *data)
-{
- struct intelfb_i2c_chan *chan = data;
- struct intelfb_info *dinfo = chan->dinfo;
- u32 val;
-
- OUTREG(chan->reg, SCL_DIR_MASK);
- OUTREG(chan->reg, 0);
- val = INREG(chan->reg);
- return ((val & SCL_VAL_IN) != 0);
-}
-
-static int intelfb_gpio_getsda(void *data)
-{
- struct intelfb_i2c_chan *chan = data;
- struct intelfb_info *dinfo = chan->dinfo;
- u32 val;
-
- OUTREG(chan->reg, SDA_DIR_MASK);
- OUTREG(chan->reg, 0);
- val = INREG(chan->reg);
- return ((val & SDA_VAL_IN) != 0);
-}
-
-static int intelfb_setup_i2c_bus(struct intelfb_info *dinfo,
- struct intelfb_i2c_chan *chan,
- const u32 reg, const char *name,
- int class)
-{
- int rc;
-
- chan->dinfo = dinfo;
- chan->reg = reg;
- snprintf(chan->adapter.name, sizeof(chan->adapter.name),
- "intelfb %s", name);
- chan->adapter.class = class;
- chan->adapter.owner = THIS_MODULE;
- chan->adapter.algo_data = &chan->algo;
- chan->adapter.dev.parent = &chan->dinfo->pdev->dev;
- chan->algo.setsda = intelfb_gpio_setsda;
- chan->algo.setscl = intelfb_gpio_setscl;
- chan->algo.getsda = intelfb_gpio_getsda;
- chan->algo.getscl = intelfb_gpio_getscl;
- chan->algo.udelay = 40;
- chan->algo.timeout = 20;
- chan->algo.data = chan;
-
- i2c_set_adapdata(&chan->adapter, chan);
-
- /* Raise SCL and SDA */
- intelfb_gpio_setsda(chan, 1);
- intelfb_gpio_setscl(chan, 1);
- udelay(20);
-
- rc = i2c_bit_add_bus(&chan->adapter);
- if (rc == 0)
- DBG_MSG("I2C bus %s registered.\n", name);
- else
- WRN_MSG("Failed to register I2C bus %s.\n", name);
- return rc;
-}
-
-void intelfb_create_i2c_busses(struct intelfb_info *dinfo)
-{
- int i = 0;
-
- /* everyone has at least a single analog output */
- dinfo->num_outputs = 1;
- dinfo->output[i].type = INTELFB_OUTPUT_ANALOG;
-
- /* setup the DDC bus for analog output */
- intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].ddc_bus, GPIOA,
- "CRTDDC_A", I2C_CLASS_DDC);
- i++;
-
- /* need to add the output busses for each device
- - this function is very incomplete
- - i915GM has LVDS and TVOUT for example
- */
- switch(dinfo->chipset) {
- case INTEL_830M:
- case INTEL_845G:
- case INTEL_854:
- case INTEL_855GM:
- case INTEL_865G:
- dinfo->output[i].type = INTELFB_OUTPUT_DVO;
- intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].ddc_bus,
- GPIOD, "DVODDC_D", I2C_CLASS_DDC);
- intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].i2c_bus,
- GPIOE, "DVOI2C_E", 0);
- i++;
- break;
- case INTEL_915G:
- case INTEL_915GM:
- /* has some LVDS + tv-out */
- case INTEL_945G:
- case INTEL_945GM:
- case INTEL_945GME:
- case INTEL_965G:
- case INTEL_965GM:
- /* SDVO ports have a single control bus - 2 devices */
- dinfo->output[i].type = INTELFB_OUTPUT_SDVO;
- intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].i2c_bus,
- GPIOE, "SDVOCTRL_E", 0);
- /* TODO: initialize the SDVO */
- /* I830SDVOInit(pScrn, i, DVOB); */
- i++;
-
- /* set up SDVOC */
- dinfo->output[i].type = INTELFB_OUTPUT_SDVO;
- dinfo->output[i].i2c_bus = dinfo->output[i - 1].i2c_bus;
- /* TODO: initialize the SDVO */
- /* I830SDVOInit(pScrn, i, DVOC); */
- i++;
- break;
- }
- dinfo->num_outputs = i;
-}
-
-void intelfb_delete_i2c_busses(struct intelfb_info *dinfo)
-{
- int i;
-
- for (i = 0; i < MAX_OUTPUTS; i++) {
- if (dinfo->output[i].i2c_bus.dinfo) {
- i2c_del_adapter(&dinfo->output[i].i2c_bus.adapter);
- dinfo->output[i].i2c_bus.dinfo = NULL;
- }
- if (dinfo->output[i].ddc_bus.dinfo) {
- i2c_del_adapter(&dinfo->output[i].ddc_bus.adapter);
- dinfo->output[i].ddc_bus.dinfo = NULL;
- }
- }
-}
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
deleted file mode 100644
index d29d80a16..000000000
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ /dev/null
@@ -1,1680 +0,0 @@
-/*
- * intelfb
- *
- * Linux framebuffer driver for Intel(R) 830M/845G/852GM/855GM/865G/915G/915GM/
- * 945G/945GM/945GME/965G/965GM integrated graphics chips.
- *
- * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
- * 2004 Sylvain Meyer
- * 2006 David Airlie
- *
- * This driver consists of two parts. The first part (intelfbdrv.c) provides
- * the basic fbdev interfaces, is derived in part from the radeonfb and
- * vesafb drivers, and is covered by the GPL. The second part (intelfbhw.c)
- * provides the code to program the hardware. Most of it is derived from
- * the i810/i830 XFree86 driver. The HW-specific code is covered here
- * under a dual license (GPL and MIT/XFree86 license).
- *
- * Author: David Dawes
- *
- */
-
-/* $DHD: intelfb/intelfbdrv.c,v 1.20 2003/06/27 15:17:40 dawes Exp $ */
-
-/*
- * Changes:
- * 01/2003 - Initial driver (0.1.0), no mode switching, no acceleration.
- * This initial version is a basic core that works a lot like
- * the vesafb driver. It must be built-in to the kernel,
- * and the initial video mode must be set with vga=XXX at
- * boot time. (David Dawes)
- *
- * 01/2003 - Version 0.2.0: Mode switching added, colormap support
- * implemented, Y panning, and soft screen blanking implemented.
- * No acceleration yet. (David Dawes)
- *
- * 01/2003 - Version 0.3.0: fbcon acceleration support added. Module
- * option handling added. (David Dawes)
- *
- * 01/2003 - Version 0.4.0: fbcon HW cursor support added. (David Dawes)
- *
- * 01/2003 - Version 0.4.1: Add auto-generation of built-in modes.
- * (David Dawes)
- *
- * 02/2003 - Version 0.4.2: Add check for active non-CRT devices, and
- * mode validation checks. (David Dawes)
- *
- * 02/2003 - Version 0.4.3: Check when the VC is in graphics mode so that
- * acceleration is disabled while an XFree86 server is running.
- * (David Dawes)
- *
- * 02/2003 - Version 0.4.4: Monitor DPMS support. (David Dawes)
- *
- * 02/2003 - Version 0.4.5: Basic XFree86 + fbdev working. (David Dawes)
- *
- * 02/2003 - Version 0.5.0: Modify to work with the 2.5.32 kernel as well
- * as 2.4.x kernels. (David Dawes)
- *
- * 02/2003 - Version 0.6.0: Split out HW-specifics into a separate file.
- * (David Dawes)
- *
- * 02/2003 - Version 0.7.0: Test on 852GM/855GM. Acceleration and HW
- * cursor are disabled on this platform. (David Dawes)
- *
- * 02/2003 - Version 0.7.1: Test on 845G. Acceleration is disabled
- * on this platform. (David Dawes)
- *
- * 02/2003 - Version 0.7.2: Test on 830M. Acceleration and HW
- * cursor are disabled on this platform. (David Dawes)
- *
- * 02/2003 - Version 0.7.3: Fix 8-bit modes for mobile platforms
- * (David Dawes)
- *
- * 02/2003 - Version 0.7.4: Add checks for FB and FBCON_HAS_CFB* configured
- * in the kernel, and add mode bpp verification and default
- * bpp selection based on which FBCON_HAS_CFB* are configured.
- * (David Dawes)
- *
- * 02/2003 - Version 0.7.5: Add basic package/install scripts based on the
- * DRI packaging scripts. (David Dawes)
- *
- * 04/2003 - Version 0.7.6: Fix typo that affects builds with SMP-enabled
- * kernels. (David Dawes, reported by Anupam).
- *
- * 06/2003 - Version 0.7.7:
- * Fix Makefile.kernel build problem (Tsutomu Yasuda).
- * Fix mis-placed #endif (2.4.21 kernel).
- *
- * 09/2004 - Version 0.9.0 - by Sylvain Meyer
- * Port to linux 2.6 kernel fbdev
- * Fix HW accel and HW cursor on i845G
- * Use of agpgart for fb memory reservation
- * Add mtrr support
- *
- * 10/2004 - Version 0.9.1
- * Use module_param instead of old MODULE_PARM
- * Some cleanup
- *
- * 11/2004 - Version 0.9.2
- * Add vram option to reserve more memory than stolen by BIOS
- * Fix intelfbhw_pan_display typo
- * Add __initdata annotations
- *
- * 04/2008 - Version 0.9.5
- * Add support for 965G/965GM. (Maik Broemme <mbroemme@plusserver.de>)
- *
- * 08/2008 - Version 0.9.6
- * Add support for 945GME. (Phil Endecott <spam_from_intelfb@chezphil.org>)
- */
-
-#include <linux/aperture.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/screen_info.h>
-
-#include <asm/io.h>
-
-#include "intelfb.h"
-#include "intelfbhw.h"
-#include "../edid.h"
-
-static void get_initial_mode(struct intelfb_info *dinfo);
-static void update_dinfo(struct intelfb_info *dinfo,
- struct fb_var_screeninfo *var);
-static int intelfb_open(struct fb_info *info, int user);
-static int intelfb_release(struct fb_info *info, int user);
-static int intelfb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static int intelfb_set_par(struct fb_info *info);
-static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info);
-
-static int intelfb_blank(int blank, struct fb_info *info);
-static int intelfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info);
-
-static void intelfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-static void intelfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region);
-static void intelfb_imageblit(struct fb_info *info,
- const struct fb_image *image);
-static int intelfb_cursor(struct fb_info *info,
- struct fb_cursor *cursor);
-
-static int intelfb_sync(struct fb_info *info);
-
-static int intelfb_ioctl(struct fb_info *info,
- unsigned int cmd, unsigned long arg);
-
-static int intelfb_pci_register(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void intelfb_pci_unregister(struct pci_dev *pdev);
-static int intelfb_set_fbinfo(struct intelfb_info *dinfo);
-
-/*
- * Limiting the class to PCI_CLASS_DISPLAY_VGA prevents function 1 of the
- * mobile chipsets from being registered.
- */
-#if DETECT_VGA_CLASS_ONLY
-#define INTELFB_CLASS_MASK ~0 << 8
-#else
-#define INTELFB_CLASS_MASK 0
-#endif
-
-static const struct pci_device_id intelfb_pci_table[] = {
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_830M, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_830M },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_865G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_865G },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_854, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_854 },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915G },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GM },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GME, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GME },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965G },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965GM },
- { 0, }
-};
-
-/* Global data */
-static int num_registered = 0;
-
-/* fb ops */
-static const struct fb_ops intel_fb_ops = {
- .owner = THIS_MODULE,
- .fb_open = intelfb_open,
- .fb_release = intelfb_release,
- __FB_DEFAULT_IOMEM_OPS_RDWR,
- .fb_check_var = intelfb_check_var,
- .fb_set_par = intelfb_set_par,
- .fb_setcolreg = intelfb_setcolreg,
- .fb_blank = intelfb_blank,
- .fb_pan_display = intelfb_pan_display,
- .fb_fillrect = intelfb_fillrect,
- .fb_copyarea = intelfb_copyarea,
- .fb_imageblit = intelfb_imageblit,
- .fb_cursor = intelfb_cursor,
- .fb_sync = intelfb_sync,
- .fb_ioctl = intelfb_ioctl,
- __FB_DEFAULT_IOMEM_OPS_MMAP,
-};
-
-/* PCI driver module table */
-static struct pci_driver intelfb_driver = {
- .name = "intelfb",
- .id_table = intelfb_pci_table,
- .probe = intelfb_pci_register,
- .remove = intelfb_pci_unregister,
-};
-
-/* Module description/parameters */
-MODULE_AUTHOR("David Dawes <dawes@tungstengraphics.com>, "
- "Sylvain Meyer <sylvain.meyer@worldonline.fr>");
-MODULE_DESCRIPTION("Framebuffer driver for Intel(R) " SUPPORTED_CHIPSETS
- " chipsets");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DEVICE_TABLE(pci, intelfb_pci_table);
-
-static bool accel = 1;
-static int vram = 4;
-static bool hwcursor = 0;
-static bool mtrr = 1;
-static bool fixed = 0;
-static bool noinit = 0;
-static bool noregister = 0;
-static bool probeonly = 0;
-static bool idonly = 0;
-static int bailearly = 0;
-static int voffset = 48;
-static char *mode = NULL;
-
-module_param(accel, bool, S_IRUGO);
-MODULE_PARM_DESC(accel, "Enable hardware acceleration");
-module_param(vram, int, S_IRUGO);
-MODULE_PARM_DESC(vram, "System RAM to allocate to framebuffer in MiB");
-module_param(voffset, int, S_IRUGO);
-MODULE_PARM_DESC(voffset, "Offset of framebuffer in MiB");
-module_param(hwcursor, bool, S_IRUGO);
-MODULE_PARM_DESC(hwcursor, "Enable HW cursor");
-module_param(mtrr, bool, S_IRUGO);
-MODULE_PARM_DESC(mtrr, "Enable MTRR support");
-module_param(fixed, bool, S_IRUGO);
-MODULE_PARM_DESC(fixed, "Disable mode switching");
-module_param(noinit, bool, 0);
-MODULE_PARM_DESC(noinit, "Don't initialise graphics mode when loading");
-module_param(noregister, bool, 0);
-MODULE_PARM_DESC(noregister, "Don't register, just probe and exit (debug)");
-module_param(probeonly, bool, 0);
-MODULE_PARM_DESC(probeonly, "Do a minimal probe (debug)");
-module_param(idonly, bool, 0);
-MODULE_PARM_DESC(idonly, "Just identify without doing anything else (debug)");
-module_param(bailearly, int, 0);
-MODULE_PARM_DESC(bailearly, "Bail out early, depending on value (debug)");
-module_param(mode, charp, S_IRUGO);
-MODULE_PARM_DESC(mode,
- "Initial video mode \"<xres>x<yres>[-<depth>][@<refresh>]\"");
-
-#ifndef MODULE
-#define OPT_EQUAL(opt, name) (!strncmp(opt, name, strlen(name)))
-#define OPT_INTVAL(opt, name) simple_strtoul(opt + strlen(name) + 1, NULL, 0)
-#define OPT_STRVAL(opt, name) (opt + strlen(name))
-
-static __inline__ char * get_opt_string(const char *this_opt, const char *name)
-{
- const char *p;
- int i;
- char *ret;
-
- p = OPT_STRVAL(this_opt, name);
- i = 0;
- while (p[i] && p[i] != ' ' && p[i] != ',')
- i++;
- ret = kmalloc(i + 1, GFP_KERNEL);
- if (ret) {
- strncpy(ret, p, i);
- ret[i] = '\0';
- }
- return ret;
-}
-
-static __inline__ int get_opt_int(const char *this_opt, const char *name,
- int *ret)
-{
- if (!ret)
- return 0;
-
- if (!OPT_EQUAL(this_opt, name))
- return 0;
-
- *ret = OPT_INTVAL(this_opt, name);
- return 1;
-}
-
-static __inline__ int get_opt_bool(const char *this_opt, const char *name,
- bool *ret)
-{
- if (!ret)
- return 0;
-
- if (OPT_EQUAL(this_opt, name)) {
- if (this_opt[strlen(name)] == '=')
- *ret = simple_strtoul(this_opt + strlen(name) + 1,
- NULL, 0);
- else
- *ret = 1;
- } else {
- if (OPT_EQUAL(this_opt, "no") && OPT_EQUAL(this_opt + 2, name))
- *ret = 0;
- else
- return 0;
- }
- return 1;
-}
-
-static int __init intelfb_setup(char *options)
-{
- char *this_opt;
-
- DBG_MSG("intelfb_setup\n");
-
- if (!options || !*options) {
- DBG_MSG("no options\n");
- return 0;
- } else
- DBG_MSG("options: %s\n", options);
-
- /*
- * These are the built-in options analogous to the module parameters
- * defined above.
- *
- * The syntax is:
- *
- * video=intelfb:[mode][,<param>=<val>] ...
- *
- * e.g.,
- *
- * video=intelfb:1024x768-16@75,accel=0
- */
-
- while ((this_opt = strsep(&options, ","))) {
- if (!*this_opt)
- continue;
- if (get_opt_bool(this_opt, "accel", &accel))
- ;
- else if (get_opt_int(this_opt, "vram", &vram))
- ;
- else if (get_opt_bool(this_opt, "hwcursor", &hwcursor))
- ;
- else if (get_opt_bool(this_opt, "mtrr", &mtrr))
- ;
- else if (get_opt_bool(this_opt, "fixed", &fixed))
- ;
- else if (get_opt_bool(this_opt, "init", &noinit))
- noinit = !noinit;
- else if (OPT_EQUAL(this_opt, "mode="))
- mode = get_opt_string(this_opt, "mode=");
- else
- mode = this_opt;
- }
-
- return 0;
-}
-
-#endif
-
-static int __init intelfb_init(void)
-{
-#ifndef MODULE
- char *option = NULL;
-#endif
-
- DBG_MSG("intelfb_init\n");
-
- INF_MSG("Framebuffer driver for "
- "Intel(R) " SUPPORTED_CHIPSETS " chipsets\n");
- INF_MSG("Version " INTELFB_VERSION "\n");
-
- if (idonly)
- return -ENODEV;
-
- if (fb_modesetting_disabled("intelfb"))
- return -ENODEV;
-
-#ifndef MODULE
- if (fb_get_options("intelfb", &option))
- return -ENODEV;
- intelfb_setup(option);
-#endif
-
- return pci_register_driver(&intelfb_driver);
-}
-
-static void __exit intelfb_exit(void)
-{
- DBG_MSG("intelfb_exit\n");
- pci_unregister_driver(&intelfb_driver);
-}
-
-module_init(intelfb_init);
-module_exit(intelfb_exit);
-
-/***************************************************************
- * driver init / cleanup *
- ***************************************************************/
-
-static void cleanup(struct intelfb_info *dinfo)
-{
- DBG_MSG("cleanup\n");
-
- if (!dinfo)
- return;
-
- intelfbhw_disable_irq(dinfo);
-
- fb_dealloc_cmap(&dinfo->info->cmap);
- kfree(dinfo->info->pixmap.addr);
-
- if (dinfo->registered)
- unregister_framebuffer(dinfo->info);
-
- arch_phys_wc_del(dinfo->wc_cookie);
-
- if (dinfo->fbmem_gart && dinfo->gtt_fb_mem) {
- agp_unbind_memory(dinfo->gtt_fb_mem);
- agp_free_memory(dinfo->gtt_fb_mem);
- }
- if (dinfo->gtt_cursor_mem) {
- agp_unbind_memory(dinfo->gtt_cursor_mem);
- agp_free_memory(dinfo->gtt_cursor_mem);
- }
- if (dinfo->gtt_ring_mem) {
- agp_unbind_memory(dinfo->gtt_ring_mem);
- agp_free_memory(dinfo->gtt_ring_mem);
- }
-
-#ifdef CONFIG_FB_INTEL_I2C
- /* un-register I2C bus */
- intelfb_delete_i2c_busses(dinfo);
-#endif
-
- if (dinfo->mmio_base)
- iounmap((void __iomem *)dinfo->mmio_base);
- if (dinfo->aperture.virtual)
- iounmap((void __iomem *)dinfo->aperture.virtual);
-
- if (dinfo->flag & INTELFB_MMIO_ACQUIRED)
- release_mem_region(dinfo->mmio_base_phys, INTEL_REG_SIZE);
- if (dinfo->flag & INTELFB_FB_ACQUIRED)
- release_mem_region(dinfo->aperture.physical,
- dinfo->aperture.size);
- framebuffer_release(dinfo->info);
-}
-
-#define bailout(dinfo) do { \
- DBG_MSG("bailout\n"); \
- cleanup(dinfo); \
- INF_MSG("Not going to register framebuffer, exiting...\n"); \
- return -ENODEV; \
-} while (0)
-
-
-static int intelfb_pci_register(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct fb_info *info;
- struct intelfb_info *dinfo;
- int i, err, dvo;
- int aperture_size, stolen_size = 0;
- struct agp_kern_info gtt_info;
- int agp_memtype;
- const char *s;
- struct agp_bridge_data *bridge;
- int aperture_bar = 0;
- int mmio_bar = 1;
- int offset;
-
- DBG_MSG("intelfb_pci_register\n");
-
- err = aperture_remove_conflicting_pci_devices(pdev, "intelfb");
- if (err)
- return err;
-
- num_registered++;
- if (num_registered != 1) {
- ERR_MSG("Attempted to register %d devices "
- "(should be only 1).\n", num_registered);
- return -ENODEV;
- }
-
- info = framebuffer_alloc(sizeof(struct intelfb_info), &pdev->dev);
- if (!info)
- return -ENOMEM;
-
- if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
- ERR_MSG("Could not allocate cmap for intelfb_info.\n");
- goto err_out_cmap;
- }
-
- dinfo = info->par;
- dinfo->info = info;
- dinfo->fbops = &intel_fb_ops;
- dinfo->pdev = pdev;
-
- /* Reserve pixmap space. */
- info->pixmap.addr = kzalloc(64 * 1024, GFP_KERNEL);
- if (info->pixmap.addr == NULL) {
- ERR_MSG("Cannot reserve pixmap memory.\n");
- goto err_out_pixmap;
- }
-
- /* set early this option because it could be changed by tv encoder
- driver */
- dinfo->fixed_mode = fixed;
-
- /* Enable device. */
- if ((err = pci_enable_device(pdev))) {
- ERR_MSG("Cannot enable device.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
- /* Set base addresses. */
- if ((ent->device == PCI_DEVICE_ID_INTEL_915G) ||
- (ent->device == PCI_DEVICE_ID_INTEL_915GM) ||
- (ent->device == PCI_DEVICE_ID_INTEL_945G) ||
- (ent->device == PCI_DEVICE_ID_INTEL_945GM) ||
- (ent->device == PCI_DEVICE_ID_INTEL_945GME) ||
- (ent->device == PCI_DEVICE_ID_INTEL_965G) ||
- (ent->device == PCI_DEVICE_ID_INTEL_965GM)) {
-
- aperture_bar = 2;
- mmio_bar = 0;
- }
- dinfo->aperture.physical = pci_resource_start(pdev, aperture_bar);
- dinfo->aperture.size = pci_resource_len(pdev, aperture_bar);
- dinfo->mmio_base_phys = pci_resource_start(pdev, mmio_bar);
- DBG_MSG("fb aperture: 0x%llx/0x%llx, MMIO region: 0x%llx/0x%llx\n",
- (unsigned long long)pci_resource_start(pdev, aperture_bar),
- (unsigned long long)pci_resource_len(pdev, aperture_bar),
- (unsigned long long)pci_resource_start(pdev, mmio_bar),
- (unsigned long long)pci_resource_len(pdev, mmio_bar));
-
- /* Reserve the fb and MMIO regions */
- if (!request_mem_region(dinfo->aperture.physical, dinfo->aperture.size,
- INTELFB_MODULE_NAME)) {
- ERR_MSG("Cannot reserve FB region.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
- dinfo->flag |= INTELFB_FB_ACQUIRED;
-
- if (!request_mem_region(dinfo->mmio_base_phys,
- INTEL_REG_SIZE,
- INTELFB_MODULE_NAME)) {
- ERR_MSG("Cannot reserve MMIO region.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
- dinfo->flag |= INTELFB_MMIO_ACQUIRED;
-
- /* Get the chipset info. */
- dinfo->pci_chipset = pdev->device;
-
- if (intelfbhw_get_chipset(pdev, dinfo)) {
- cleanup(dinfo);
- return -ENODEV;
- }
-
- if (intelfbhw_get_memory(pdev, &aperture_size, &stolen_size)) {
- cleanup(dinfo);
- return -ENODEV;
- }
-
- INF_MSG("%02x:%02x.%d: %s, aperture size %dMB, "
- "stolen memory %dkB\n",
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), dinfo->name,
- BtoMB(aperture_size), BtoKB(stolen_size));
-
- /* Set these from the options. */
- dinfo->accel = accel;
- dinfo->hwcursor = hwcursor;
-
- if (NOACCEL_CHIPSET(dinfo) && dinfo->accel == 1) {
- INF_MSG("Acceleration is not supported for the %s chipset.\n",
- dinfo->name);
- dinfo->accel = 0;
- }
-
- /* Framebuffer parameters - Use all the stolen memory if >= vram */
- if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) {
- dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size);
- dinfo->fbmem_gart = 0;
- } else {
- dinfo->fb.size = MB(vram);
- dinfo->fbmem_gart = 1;
- }
-
- /* Allocate space for the ring buffer and HW cursor if enabled. */
- if (dinfo->accel) {
- dinfo->ring.size = RINGBUFFER_SIZE;
- dinfo->ring_tail_mask = dinfo->ring.size - 1;
- }
- if (dinfo->hwcursor)
- dinfo->cursor.size = HW_CURSOR_SIZE;
-
- /* Use agpgart to manage the GATT */
- if (!(bridge = agp_backend_acquire(pdev))) {
- ERR_MSG("cannot acquire agp\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
- /* get the current gatt info */
- if (agp_copy_info(bridge, &gtt_info)) {
- ERR_MSG("cannot get agp info\n");
- agp_backend_release(bridge);
- cleanup(dinfo);
- return -ENODEV;
- }
-
- if (MB(voffset) < stolen_size)
- offset = (stolen_size >> 12);
- else
- offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
-
- /* set the mem offsets - set them after the already used pages */
- if (dinfo->accel)
- dinfo->ring.offset = offset + gtt_info.current_memory;
- if (dinfo->hwcursor)
- dinfo->cursor.offset = offset +
- + gtt_info.current_memory + (dinfo->ring.size >> 12);
- if (dinfo->fbmem_gart)
- dinfo->fb.offset = offset +
- + gtt_info.current_memory + (dinfo->ring.size >> 12)
- + (dinfo->cursor.size >> 12);
-
- /* Allocate memories (which aren't stolen) */
- /* Map the fb and MMIO regions */
- /* ioremap only up to the end of used aperture */
- dinfo->aperture.virtual = (u8 __iomem *)ioremap_wc
- (dinfo->aperture.physical, ((offset + dinfo->fb.offset) << 12)
- + dinfo->fb.size);
- if (!dinfo->aperture.virtual) {
- ERR_MSG("Cannot remap FB region.\n");
- agp_backend_release(bridge);
- cleanup(dinfo);
- return -ENODEV;
- }
-
- dinfo->mmio_base =
- (u8 __iomem *)ioremap(dinfo->mmio_base_phys,
- INTEL_REG_SIZE);
- if (!dinfo->mmio_base) {
- ERR_MSG("Cannot remap MMIO region.\n");
- agp_backend_release(bridge);
- cleanup(dinfo);
- return -ENODEV;
- }
-
- if (dinfo->accel) {
- if (!(dinfo->gtt_ring_mem =
- agp_allocate_memory(bridge, dinfo->ring.size >> 12,
- AGP_NORMAL_MEMORY))) {
- ERR_MSG("cannot allocate ring buffer memory\n");
- agp_backend_release(bridge);
- cleanup(dinfo);
- return -ENOMEM;
- }
- if (agp_bind_memory(dinfo->gtt_ring_mem,
- dinfo->ring.offset)) {
- ERR_MSG("cannot bind ring buffer memory\n");
- agp_backend_release(bridge);
- cleanup(dinfo);
- return -EBUSY;
- }
- dinfo->ring.physical = dinfo->aperture.physical
- + (dinfo->ring.offset << 12);
- dinfo->ring.virtual = dinfo->aperture.virtual
- + (dinfo->ring.offset << 12);
- dinfo->ring_head = 0;
- }
- if (dinfo->hwcursor) {
- agp_memtype = dinfo->mobile ? AGP_PHYSICAL_MEMORY
- : AGP_NORMAL_MEMORY;
- if (!(dinfo->gtt_cursor_mem =
- agp_allocate_memory(bridge, dinfo->cursor.size >> 12,
- agp_memtype))) {
- ERR_MSG("cannot allocate cursor memory\n");
- agp_backend_release(bridge);
- cleanup(dinfo);
- return -ENOMEM;
- }
- if (agp_bind_memory(dinfo->gtt_cursor_mem,
- dinfo->cursor.offset)) {
- ERR_MSG("cannot bind cursor memory\n");
- agp_backend_release(bridge);
- cleanup(dinfo);
- return -EBUSY;
- }
- if (dinfo->mobile)
- dinfo->cursor.physical
- = dinfo->gtt_cursor_mem->physical;
- else
- dinfo->cursor.physical = dinfo->aperture.physical
- + (dinfo->cursor.offset << 12);
- dinfo->cursor.virtual = dinfo->aperture.virtual
- + (dinfo->cursor.offset << 12);
- }
- if (dinfo->fbmem_gart) {
- if (!(dinfo->gtt_fb_mem =
- agp_allocate_memory(bridge, dinfo->fb.size >> 12,
- AGP_NORMAL_MEMORY))) {
- WRN_MSG("cannot allocate framebuffer memory - use "
- "the stolen one\n");
- dinfo->fbmem_gart = 0;
- }
- if (agp_bind_memory(dinfo->gtt_fb_mem,
- dinfo->fb.offset)) {
- WRN_MSG("cannot bind framebuffer memory - use "
- "the stolen one\n");
- dinfo->fbmem_gart = 0;
- }
- }
-
- /* update framebuffer memory parameters */
- if (!dinfo->fbmem_gart)
- dinfo->fb.offset = 0; /* starts at offset 0 */
- dinfo->fb.physical = dinfo->aperture.physical
- + (dinfo->fb.offset << 12);
- dinfo->fb.virtual = dinfo->aperture.virtual + (dinfo->fb.offset << 12);
- dinfo->fb_start = dinfo->fb.offset << 12;
-
- /* release agpgart */
- agp_backend_release(bridge);
-
- if (mtrr)
- dinfo->wc_cookie = arch_phys_wc_add(dinfo->aperture.physical,
- dinfo->aperture.size);
-
- DBG_MSG("fb: 0x%x(+ 0x%x)/0x%x (0x%p)\n",
- dinfo->fb.physical, dinfo->fb.offset, dinfo->fb.size,
- dinfo->fb.virtual);
- DBG_MSG("MMIO: 0x%x/0x%x (0x%p)\n",
- dinfo->mmio_base_phys, INTEL_REG_SIZE,
- dinfo->mmio_base);
- DBG_MSG("ring buffer: 0x%x/0x%x (0x%p)\n",
- dinfo->ring.physical, dinfo->ring.size,
- dinfo->ring.virtual);
- DBG_MSG("HW cursor: 0x%x/0x%x (0x%p) (offset 0x%x) (phys 0x%x)\n",
- dinfo->cursor.physical, dinfo->cursor.size,
- dinfo->cursor.virtual, dinfo->cursor.offset,
- dinfo->cursor.physical);
-
- DBG_MSG("options: vram = %d, accel = %d, hwcursor = %d, fixed = %d, "
- "noinit = %d\n", vram, accel, hwcursor, fixed, noinit);
- DBG_MSG("options: mode = \"%s\"\n", mode ? mode : "");
-
- if (probeonly)
- bailout(dinfo);
-
- /*
- * Check if the LVDS port or any DVO ports are enabled. If so,
- * don't allow mode switching
- */
- dvo = intelfbhw_check_non_crt(dinfo);
- if (dvo) {
- dinfo->fixed_mode = 1;
- WRN_MSG("Non-CRT device is enabled ( ");
- i = 0;
- while (dvo) {
- if (dvo & 1) {
- s = intelfbhw_dvo_to_string(1 << i);
- if (s)
- printk("%s ", s);
- }
- dvo >>= 1;
- ++i;
- }
- printk("). Disabling mode switching.\n");
- }
-
- if (bailearly == 1)
- bailout(dinfo);
-
- if (FIXED_MODE(dinfo) &&
- screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) {
- ERR_MSG("Video mode must be programmed at boot time.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
- if (bailearly == 2)
- bailout(dinfo);
-
- /* Initialise dinfo and related data. */
- /* If an initial mode was programmed at boot time, get its details. */
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
- get_initial_mode(dinfo);
-
- if (bailearly == 3)
- bailout(dinfo);
-
- if (FIXED_MODE(dinfo)) /* remap fb address */
- update_dinfo(dinfo, &dinfo->initial_var);
-
- if (bailearly == 4)
- bailout(dinfo);
-
-
- if (intelfb_set_fbinfo(dinfo)) {
- cleanup(dinfo);
- return -ENODEV;
- }
-
- if (bailearly == 5)
- bailout(dinfo);
-
-#ifdef CONFIG_FB_INTEL_I2C
- /* register I2C bus */
- intelfb_create_i2c_busses(dinfo);
-#endif
-
- if (bailearly == 6)
- bailout(dinfo);
-
- pci_set_drvdata(pdev, dinfo);
-
- /* Save the initial register state. */
- i = intelfbhw_read_hw_state(dinfo, &dinfo->save_state,
- bailearly > 6 ? bailearly - 6 : 0);
- if (i != 0) {
- DBG_MSG("intelfbhw_read_hw_state returned %d\n", i);
- bailout(dinfo);
- }
-
- intelfbhw_print_hw_state(dinfo, &dinfo->save_state);
-
- if (bailearly == 18)
- bailout(dinfo);
-
- /* read active pipe */
- dinfo->pipe = intelfbhw_active_pipe(&dinfo->save_state);
-
- /* Cursor initialisation */
- if (dinfo->hwcursor) {
- intelfbhw_cursor_init(dinfo);
- intelfbhw_cursor_reset(dinfo);
- }
-
- if (bailearly == 19)
- bailout(dinfo);
-
- /* 2d acceleration init */
- if (dinfo->accel)
- intelfbhw_2d_start(dinfo);
-
- if (bailearly == 20)
- bailout(dinfo);
-
- if (noregister)
- bailout(dinfo);
-
- if (register_framebuffer(dinfo->info) < 0) {
- ERR_MSG("Cannot register framebuffer.\n");
- cleanup(dinfo);
- return -ENODEV;
- }
-
- dinfo->registered = 1;
- dinfo->open = 0;
-
- init_waitqueue_head(&dinfo->vsync.wait);
- spin_lock_init(&dinfo->int_lock);
- dinfo->irq_flags = 0;
- dinfo->vsync.pan_display = 0;
- dinfo->vsync.pan_offset = 0;
-
- return 0;
-
-err_out_pixmap:
- fb_dealloc_cmap(&info->cmap);
-err_out_cmap:
- framebuffer_release(info);
- return -ENODEV;
-}
-
-static void intelfb_pci_unregister(struct pci_dev *pdev)
-{
- struct intelfb_info *dinfo = pci_get_drvdata(pdev);
-
- DBG_MSG("intelfb_pci_unregister\n");
-
- if (!dinfo)
- return;
-
- cleanup(dinfo);
-}
-
-/***************************************************************
- * helper functions *
- ***************************************************************/
-
-__inline__ int intelfb_var_to_depth(const struct fb_var_screeninfo *var)
-{
- DBG_MSG("intelfb_var_to_depth: bpp: %d, green.length is %d\n",
- var->bits_per_pixel, var->green.length);
-
- switch (var->bits_per_pixel) {
- case 16:
- return (var->green.length == 6) ? 16 : 15;
- case 32:
- return 24;
- default:
- return var->bits_per_pixel;
- }
-}
-
-
-static __inline__ int var_to_refresh(const struct fb_var_screeninfo *var)
-{
- int xtot = var->xres + var->left_margin + var->right_margin +
- var->hsync_len;
- int ytot = var->yres + var->upper_margin + var->lower_margin +
- var->vsync_len;
-
- return (1000000000 / var->pixclock * 1000 + 500) / xtot / ytot;
-}
-
-/***************************************************************
- * Various initialisation functions *
- ***************************************************************/
-
-static void get_initial_mode(struct intelfb_info *dinfo)
-{
- struct fb_var_screeninfo *var;
- int xtot, ytot;
-
- DBG_MSG("get_initial_mode\n");
-
- dinfo->initial_vga = 1;
- dinfo->initial_fb_base = screen_info.lfb_base;
- dinfo->initial_video_ram = screen_info.lfb_size * KB(64);
- dinfo->initial_pitch = screen_info.lfb_linelength;
-
- var = &dinfo->initial_var;
- memset(var, 0, sizeof(*var));
- var->xres = screen_info.lfb_width;
- var->yres = screen_info.lfb_height;
- var->bits_per_pixel = screen_info.lfb_depth;
- switch (screen_info.lfb_depth) {
- case 15:
- var->bits_per_pixel = 16;
- break;
- case 24:
- var->bits_per_pixel = 32;
- break;
- }
-
- DBG_MSG("Initial info: FB is 0x%x/0x%x (%d kByte)\n",
- dinfo->initial_fb_base, dinfo->initial_video_ram,
- BtoKB(dinfo->initial_video_ram));
-
- DBG_MSG("Initial info: mode is %dx%d-%d (%d)\n",
- var->xres, var->yres, var->bits_per_pixel,
- dinfo->initial_pitch);
-
- /* Dummy timing values (assume 60Hz) */
- var->left_margin = (var->xres / 8) & 0xf8;
- var->right_margin = 32;
- var->upper_margin = 16;
- var->lower_margin = 4;
- var->hsync_len = (var->xres / 8) & 0xf8;
- var->vsync_len = 4;
-
- xtot = var->xres + var->left_margin +
- var->right_margin + var->hsync_len;
- ytot = var->yres + var->upper_margin +
- var->lower_margin + var->vsync_len;
- var->pixclock = 10000000 / xtot * 1000 / ytot * 100 / 60;
-
- var->height = -1;
- var->width = -1;
-
- if (var->bits_per_pixel > 8) {
- var->red.offset = screen_info.red_pos;
- var->red.length = screen_info.red_size;
- var->green.offset = screen_info.green_pos;
- var->green.length = screen_info.green_size;
- var->blue.offset = screen_info.blue_pos;
- var->blue.length = screen_info.blue_size;
- var->transp.offset = screen_info.rsvd_pos;
- var->transp.length = screen_info.rsvd_size;
- } else {
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- }
-}
-
-static int intelfb_init_var(struct intelfb_info *dinfo)
-{
- struct fb_var_screeninfo *var;
- int msrc = 0;
-
- DBG_MSG("intelfb_init_var\n");
-
- var = &dinfo->info->var;
- if (FIXED_MODE(dinfo)) {
- memcpy(var, &dinfo->initial_var,
- sizeof(struct fb_var_screeninfo));
- msrc = 5;
- } else {
- const u8 *edid_s = fb_firmware_edid(&dinfo->pdev->dev);
- u8 *edid_d = NULL;
-
- if (edid_s) {
- edid_d = kmemdup(edid_s, EDID_LENGTH, GFP_KERNEL);
-
- if (edid_d) {
- fb_edid_to_monspecs(edid_d,
- &dinfo->info->monspecs);
- kfree(edid_d);
- }
- }
-
- if (mode) {
- printk("intelfb: Looking for mode in private "
- "database\n");
- msrc = fb_find_mode(var, dinfo->info, mode,
- dinfo->info->monspecs.modedb,
- dinfo->info->monspecs.modedb_len,
- NULL, 0);
-
- if (msrc && msrc > 1) {
- printk("intelfb: No mode in private database, "
- "intelfb: looking for mode in global "
- "database ");
- msrc = fb_find_mode(var, dinfo->info, mode,
- NULL, 0, NULL, 0);
-
- if (msrc)
- msrc |= 8;
- }
-
- }
-
- if (!msrc)
- msrc = fb_find_mode(var, dinfo->info, PREFERRED_MODE,
- NULL, 0, NULL, 0);
- }
-
- if (!msrc) {
- ERR_MSG("Cannot find a suitable video mode.\n");
- return 1;
- }
-
- INF_MSG("Initial video mode is %dx%d-%d@%d.\n", var->xres, var->yres,
- var->bits_per_pixel, var_to_refresh(var));
-
- DBG_MSG("Initial video mode is from %d.\n", msrc);
-
-#if ALLOCATE_FOR_PANNING
- /* Allow use of half of the video ram for panning */
- var->xres_virtual = var->xres;
- var->yres_virtual =
- dinfo->fb.size / 2 / (var->bits_per_pixel * var->xres);
- if (var->yres_virtual < var->yres)
- var->yres_virtual = var->yres;
-#else
- var->yres_virtual = var->yres;
-#endif
-
- if (dinfo->accel)
- var->accel_flags |= FB_ACCELF_TEXT;
- else
- var->accel_flags &= ~FB_ACCELF_TEXT;
-
- return 0;
-}
-
-static int intelfb_set_fbinfo(struct intelfb_info *dinfo)
-{
- struct fb_info *info = dinfo->info;
-
- DBG_MSG("intelfb_set_fbinfo\n");
-
- info->fbops = &intel_fb_ops;
- info->pseudo_palette = dinfo->pseudo_palette;
-
- info->pixmap.size = 64*1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- if (intelfb_init_var(dinfo))
- return 1;
-
- info->pixmap.scan_align = 1;
- strcpy(info->fix.id, dinfo->name);
- info->fix.smem_start = dinfo->fb.physical;
- info->fix.smem_len = dinfo->fb.size;
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.type_aux = 0;
- info->fix.xpanstep = 8;
- info->fix.ypanstep = 1;
- info->fix.ywrapstep = 0;
- info->fix.mmio_start = dinfo->mmio_base_phys;
- info->fix.mmio_len = INTEL_REG_SIZE;
- info->fix.accel = FB_ACCEL_I830;
- update_dinfo(dinfo, &info->var);
-
- return 0;
-}
-
-/* Update dinfo to match the active video mode. */
-static void update_dinfo(struct intelfb_info *dinfo,
- struct fb_var_screeninfo *var)
-{
- DBG_MSG("update_dinfo\n");
-
- dinfo->bpp = var->bits_per_pixel;
- dinfo->depth = intelfb_var_to_depth(var);
- dinfo->xres = var->xres;
- dinfo->yres = var->xres;
- dinfo->pixclock = var->pixclock;
-
- dinfo->info->fix.visual = dinfo->visual;
- dinfo->info->fix.line_length = dinfo->pitch;
-
- switch (dinfo->bpp) {
- case 8:
- dinfo->visual = FB_VISUAL_PSEUDOCOLOR;
- dinfo->pitch = var->xres_virtual;
- break;
- case 16:
- dinfo->visual = FB_VISUAL_TRUECOLOR;
- dinfo->pitch = var->xres_virtual * 2;
- break;
- case 32:
- dinfo->visual = FB_VISUAL_TRUECOLOR;
- dinfo->pitch = var->xres_virtual * 4;
- break;
- }
-
- /* Make sure the line length is a aligned correctly. */
- if (IS_I9XX(dinfo))
- dinfo->pitch = ROUND_UP_TO(dinfo->pitch, STRIDE_ALIGNMENT_I9XX);
- else
- dinfo->pitch = ROUND_UP_TO(dinfo->pitch, STRIDE_ALIGNMENT);
-
- if (FIXED_MODE(dinfo))
- dinfo->pitch = dinfo->initial_pitch;
-
- dinfo->info->screen_base = (char __iomem *)dinfo->fb.virtual;
- dinfo->info->fix.line_length = dinfo->pitch;
- dinfo->info->fix.visual = dinfo->visual;
-}
-
-/* fbops functions */
-
-/***************************************************************
- * fbdev interface *
- ***************************************************************/
-
-static int intelfb_open(struct fb_info *info, int user)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
-
- if (user)
- dinfo->open++;
-
- return 0;
-}
-
-static int intelfb_release(struct fb_info *info, int user)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
-
- if (user) {
- dinfo->open--;
- msleep(1);
- if (!dinfo->open)
- intelfbhw_disable_irq(dinfo);
- }
-
- return 0;
-}
-
-static int intelfb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- int change_var = 0;
- struct fb_var_screeninfo v;
- struct intelfb_info *dinfo;
- static int first = 1;
- int i;
- /* Good pitches to allow tiling. Don't care about pitches < 1024. */
- static const int pitches[] = {
- 128 * 8,
- 128 * 16,
- 128 * 32,
- 128 * 64,
- 0
- };
-
- DBG_MSG("intelfb_check_var: accel_flags is %d\n", var->accel_flags);
-
- dinfo = GET_DINFO(info);
-
- if (!var->pixclock)
- return -EINVAL;
-
- /* update the pitch */
- if (intelfbhw_validate_mode(dinfo, var) != 0)
- return -EINVAL;
-
- v = *var;
-
- for (i = 0; pitches[i] != 0; i++) {
- if (pitches[i] >= v.xres_virtual) {
- v.xres_virtual = pitches[i];
- break;
- }
- }
-
- /* Check for a supported bpp. */
- if (v.bits_per_pixel <= 8)
- v.bits_per_pixel = 8;
- else if (v.bits_per_pixel <= 16) {
- if (v.bits_per_pixel == 16)
- v.green.length = 6;
- v.bits_per_pixel = 16;
- } else if (v.bits_per_pixel <= 32)
- v.bits_per_pixel = 32;
- else
- return -EINVAL;
-
- change_var = ((info->var.xres != var->xres) ||
- (info->var.yres != var->yres) ||
- (info->var.xres_virtual != var->xres_virtual) ||
- (info->var.yres_virtual != var->yres_virtual) ||
- (info->var.bits_per_pixel != var->bits_per_pixel) ||
- memcmp(&info->var.red, &var->red, sizeof(var->red)) ||
- memcmp(&info->var.green, &var->green,
- sizeof(var->green)) ||
- memcmp(&info->var.blue, &var->blue, sizeof(var->blue)));
-
- if (FIXED_MODE(dinfo) &&
- (change_var ||
- var->yres_virtual > dinfo->initial_var.yres_virtual ||
- var->yres_virtual < dinfo->initial_var.yres ||
- var->xoffset || var->nonstd)) {
- if (first) {
- ERR_MSG("Changing the video mode is not supported.\n");
- first = 0;
- }
- return -EINVAL;
- }
-
- switch (intelfb_var_to_depth(&v)) {
- case 8:
- v.red.offset = v.green.offset = v.blue.offset = 0;
- v.red.length = v.green.length = v.blue.length = 8;
- v.transp.offset = v.transp.length = 0;
- break;
- case 15:
- v.red.offset = 10;
- v.green.offset = 5;
- v.blue.offset = 0;
- v.red.length = v.green.length = v.blue.length = 5;
- v.transp.offset = v.transp.length = 0;
- break;
- case 16:
- v.red.offset = 11;
- v.green.offset = 5;
- v.blue.offset = 0;
- v.red.length = 5;
- v.green.length = 6;
- v.blue.length = 5;
- v.transp.offset = v.transp.length = 0;
- break;
- case 24:
- v.red.offset = 16;
- v.green.offset = 8;
- v.blue.offset = 0;
- v.red.length = v.green.length = v.blue.length = 8;
- v.transp.offset = v.transp.length = 0;
- break;
- case 32:
- v.red.offset = 16;
- v.green.offset = 8;
- v.blue.offset = 0;
- v.red.length = v.green.length = v.blue.length = 8;
- v.transp.offset = 24;
- v.transp.length = 8;
- break;
- }
-
- if (v.xoffset > v.xres_virtual - v.xres)
- v.xoffset = v.xres_virtual - v.xres;
- if (v.yoffset > v.yres_virtual - v.yres)
- v.yoffset = v.yres_virtual - v.yres;
-
- v.red.msb_right = v.green.msb_right = v.blue.msb_right =
- v.transp.msb_right = 0;
-
- *var = v;
-
- return 0;
-}
-
-static int intelfb_set_par(struct fb_info *info)
-{
- struct intelfb_hwstate *hw;
- struct intelfb_info *dinfo = GET_DINFO(info);
-
- if (FIXED_MODE(dinfo)) {
- ERR_MSG("Changing the video mode is not supported.\n");
- return -EINVAL;
- }
-
- hw = kmalloc(sizeof(*hw), GFP_ATOMIC);
- if (!hw)
- return -ENOMEM;
-
- DBG_MSG("intelfb_set_par (%dx%d-%d)\n", info->var.xres,
- info->var.yres, info->var.bits_per_pixel);
-
- /*
- * Disable VCO prior to timing register change.
- */
- OUTREG(DPLL_A, INREG(DPLL_A) & ~DPLL_VCO_ENABLE);
-
- intelfb_blank(FB_BLANK_POWERDOWN, info);
-
- if (ACCEL(dinfo, info))
- intelfbhw_2d_stop(dinfo);
-
- memcpy(hw, &dinfo->save_state, sizeof(*hw));
- if (intelfbhw_mode_to_hw(dinfo, hw, &info->var))
- goto invalid_mode;
- if (intelfbhw_program_mode(dinfo, hw, 0))
- goto invalid_mode;
-
-#if REGDUMP > 0
- intelfbhw_read_hw_state(dinfo, hw, 0);
- intelfbhw_print_hw_state(dinfo, hw);
-#endif
-
- update_dinfo(dinfo, &info->var);
-
- if (ACCEL(dinfo, info))
- intelfbhw_2d_start(dinfo);
-
- intelfb_pan_display(&info->var, info);
-
- intelfb_blank(FB_BLANK_UNBLANK, info);
-
- if (ACCEL(dinfo, info)) {
- info->flags = FBINFO_HWACCEL_YPAN |
- FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
- FBINFO_HWACCEL_IMAGEBLIT;
- } else
- info->flags = FBINFO_HWACCEL_YPAN;
-
- kfree(hw);
- return 0;
-invalid_mode:
- kfree(hw);
- return -EINVAL;
-}
-
-static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
-
-#if VERBOSE > 0
- DBG_MSG("intelfb_setcolreg: regno %d, depth %d\n", regno, dinfo->depth);
-#endif
-
- if (regno > 255)
- return 1;
-
- if (dinfo->depth == 8) {
- red >>= 8;
- green >>= 8;
- blue >>= 8;
-
- intelfbhw_setcolreg(dinfo, regno, red, green, blue,
- transp);
- }
-
- if (regno < 16) {
- switch (dinfo->depth) {
- case 15:
- dinfo->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
- ((green & 0xf800) >> 6) |
- ((blue & 0xf800) >> 11);
- break;
- case 16:
- dinfo->pseudo_palette[regno] = (red & 0xf800) |
- ((green & 0xfc00) >> 5) |
- ((blue & 0xf800) >> 11);
- break;
- case 24:
- dinfo->pseudo_palette[regno] = ((red & 0xff00) << 8) |
- (green & 0xff00) |
- ((blue & 0xff00) >> 8);
- break;
- }
- }
-
- return 0;
-}
-
-static int intelfb_blank(int blank, struct fb_info *info)
-{
- intelfbhw_do_blank(blank, info);
- return 0;
-}
-
-static int intelfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- intelfbhw_pan_display(var, info);
- return 0;
-}
-
-/* When/if we have our own ioctls. */
-static int intelfb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- int retval = 0;
- struct intelfb_info *dinfo = GET_DINFO(info);
- u32 pipe = 0;
-
- switch (cmd) {
- case FBIO_WAITFORVSYNC:
- if (get_user(pipe, (__u32 __user *)arg))
- return -EFAULT;
-
- retval = intelfbhw_wait_for_vsync(dinfo, pipe);
- break;
- default:
- break;
- }
-
- return retval;
-}
-
-static void intelfb_fillrect (struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
- u32 rop, color;
-
-#if VERBOSE > 0
- DBG_MSG("intelfb_fillrect\n");
-#endif
-
- if (!ACCEL(dinfo, info) || dinfo->depth == 4) {
- cfb_fillrect(info, rect);
- return;
- }
-
- if (rect->rop == ROP_COPY)
- rop = PAT_ROP_GXCOPY;
- else /* ROP_XOR */
- rop = PAT_ROP_GXXOR;
-
- if (dinfo->depth != 8)
- color = dinfo->pseudo_palette[rect->color];
- else
- color = rect->color;
-
- intelfbhw_do_fillrect(dinfo, rect->dx, rect->dy,
- rect->width, rect->height, color,
- dinfo->pitch, info->var.bits_per_pixel,
- rop);
-}
-
-static void intelfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
-
-#if VERBOSE > 0
- DBG_MSG("intelfb_copyarea\n");
-#endif
-
- if (!ACCEL(dinfo, info) || dinfo->depth == 4) {
- cfb_copyarea(info, region);
- return;
- }
-
- intelfbhw_do_bitblt(dinfo, region->sx, region->sy, region->dx,
- region->dy, region->width, region->height,
- dinfo->pitch, info->var.bits_per_pixel);
-}
-
-static void intelfb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
- u32 fgcolor, bgcolor;
-
-#if VERBOSE > 0
- DBG_MSG("intelfb_imageblit\n");
-#endif
-
- if (!ACCEL(dinfo, info) || dinfo->depth == 4
- || image->depth != 1) {
- cfb_imageblit(info, image);
- return;
- }
-
- if (dinfo->depth != 8) {
- fgcolor = dinfo->pseudo_palette[image->fg_color];
- bgcolor = dinfo->pseudo_palette[image->bg_color];
- } else {
- fgcolor = image->fg_color;
- bgcolor = image->bg_color;
- }
-
- if (!intelfbhw_do_drawglyph(dinfo, fgcolor, bgcolor, image->width,
- image->height, image->data,
- image->dx, image->dy,
- dinfo->pitch, info->var.bits_per_pixel)) {
- cfb_imageblit(info, image);
- return;
- }
-}
-
-static int intelfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
- u32 physical;
-#if VERBOSE > 0
- DBG_MSG("intelfb_cursor\n");
-#endif
-
- if (!dinfo->hwcursor)
- return -ENODEV;
-
- intelfbhw_cursor_hide(dinfo);
-
- /* If XFree killed the cursor - restore it */
- physical = (dinfo->mobile || IS_I9XX(dinfo)) ? dinfo->cursor.physical :
- (dinfo->cursor.offset << 12);
-
- if (INREG(CURSOR_A_BASEADDR) != physical) {
- u32 fg, bg;
-
- DBG_MSG("the cursor was killed - restore it !!\n");
- DBG_MSG("size %d, %d pos %d, %d\n",
- cursor->image.width, cursor->image.height,
- cursor->image.dx, cursor->image.dy);
-
- intelfbhw_cursor_init(dinfo);
- intelfbhw_cursor_reset(dinfo);
- intelfbhw_cursor_setpos(dinfo, cursor->image.dx,
- cursor->image.dy);
-
- if (dinfo->depth != 8) {
- fg =dinfo->pseudo_palette[cursor->image.fg_color];
- bg =dinfo->pseudo_palette[cursor->image.bg_color];
- } else {
- fg = cursor->image.fg_color;
- bg = cursor->image.bg_color;
- }
- intelfbhw_cursor_setcolor(dinfo, bg, fg);
- intelfbhw_cursor_load(dinfo, cursor->image.width,
- cursor->image.height,
- dinfo->cursor_src);
-
- if (cursor->enable)
- intelfbhw_cursor_show(dinfo);
- return 0;
- }
-
- if (cursor->set & FB_CUR_SETPOS) {
- u32 dx, dy;
-
- dx = cursor->image.dx - info->var.xoffset;
- dy = cursor->image.dy - info->var.yoffset;
-
- intelfbhw_cursor_setpos(dinfo, dx, dy);
- }
-
- if (cursor->set & FB_CUR_SETSIZE) {
- if (cursor->image.width > 64 || cursor->image.height > 64)
- return -ENXIO;
-
- intelfbhw_cursor_reset(dinfo);
- }
-
- if (cursor->set & FB_CUR_SETCMAP) {
- u32 fg, bg;
-
- if (dinfo->depth != 8) {
- fg = dinfo->pseudo_palette[cursor->image.fg_color];
- bg = dinfo->pseudo_palette[cursor->image.bg_color];
- } else {
- fg = cursor->image.fg_color;
- bg = cursor->image.bg_color;
- }
-
- intelfbhw_cursor_setcolor(dinfo, bg, fg);
- }
-
- if (cursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
- u32 s_pitch = (ROUND_UP_TO(cursor->image.width, 8) / 8);
- u32 size = s_pitch * cursor->image.height;
- u8 *dat = (u8 *) cursor->image.data;
- u8 *msk = (u8 *) cursor->mask;
- u8 src[64];
- u32 i;
-
- if (cursor->image.depth != 1)
- return -ENXIO;
-
- switch (cursor->rop) {
- case ROP_XOR:
- for (i = 0; i < size; i++)
- src[i] = dat[i] ^ msk[i];
- break;
- case ROP_COPY:
- default:
- for (i = 0; i < size; i++)
- src[i] = dat[i] & msk[i];
- break;
- }
-
- /* save the bitmap to restore it when XFree will
- make the cursor dirty */
- memcpy(dinfo->cursor_src, src, size);
-
- intelfbhw_cursor_load(dinfo, cursor->image.width,
- cursor->image.height, src);
- }
-
- if (cursor->enable)
- intelfbhw_cursor_show(dinfo);
-
- return 0;
-}
-
-static int intelfb_sync(struct fb_info *info)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
-
-#if VERBOSE > 0
- DBG_MSG("intelfb_sync\n");
-#endif
-
- if (dinfo->ring_lockup)
- return 0;
-
- intelfbhw_do_sync(dinfo);
- return 0;
-}
-
diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c
deleted file mode 100644
index 2086e0653..000000000
--- a/drivers/video/fbdev/intelfb/intelfbhw.c
+++ /dev/null
@@ -1,2115 +0,0 @@
-/*
- * intelfb
- *
- * Linux framebuffer driver for Intel(R) 865G integrated graphics chips.
- *
- * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
- * 2004 Sylvain Meyer
- *
- * This driver consists of two parts. The first part (intelfbdrv.c) provides
- * the basic fbdev interfaces, is derived in part from the radeonfb and
- * vesafb drivers, and is covered by the GPL. The second part (intelfbhw.c)
- * provides the code to program the hardware. Most of it is derived from
- * the i810/i830 XFree86 driver. The HW-specific code is covered here
- * under a dual license (GPL and MIT/XFree86 license).
- *
- * Author: David Dawes
- *
- */
-
-/* $DHD: intelfb/intelfbhw.c,v 1.9 2003/06/27 15:06:25 dawes Exp $ */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-
-#include "intelfb.h"
-#include "intelfbhw.h"
-
-struct pll_min_max {
- int min_m, max_m, min_m1, max_m1;
- int min_m2, max_m2, min_n, max_n;
- int min_p, max_p, min_p1, max_p1;
- int min_vco, max_vco, p_transition_clk, ref_clk;
- int p_inc_lo, p_inc_hi;
-};
-
-#define PLLS_I8xx 0
-#define PLLS_I9xx 1
-#define PLLS_MAX 2
-
-static struct pll_min_max plls[PLLS_MAX] = {
- { 108, 140, 18, 26,
- 6, 16, 3, 16,
- 4, 128, 0, 31,
- 930000, 1400000, 165000, 48000,
- 4, 2 }, /* I8xx */
-
- { 75, 120, 10, 20,
- 5, 9, 4, 7,
- 5, 80, 1, 8,
- 1400000, 2800000, 200000, 96000,
- 10, 5 } /* I9xx */
-};
-
-int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo)
-{
- u32 tmp;
- if (!pdev || !dinfo)
- return 1;
-
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_830M:
- dinfo->name = "Intel(R) 830M";
- dinfo->chipset = INTEL_830M;
- dinfo->mobile = 1;
- dinfo->pll_index = PLLS_I8xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_845G:
- dinfo->name = "Intel(R) 845G";
- dinfo->chipset = INTEL_845G;
- dinfo->mobile = 0;
- dinfo->pll_index = PLLS_I8xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_854:
- dinfo->mobile = 1;
- dinfo->name = "Intel(R) 854";
- dinfo->chipset = INTEL_854;
- return 0;
- case PCI_DEVICE_ID_INTEL_85XGM:
- tmp = 0;
- dinfo->mobile = 1;
- dinfo->pll_index = PLLS_I8xx;
- pci_read_config_dword(pdev, INTEL_85X_CAPID, &tmp);
- switch ((tmp >> INTEL_85X_VARIANT_SHIFT) &
- INTEL_85X_VARIANT_MASK) {
- case INTEL_VAR_855GME:
- dinfo->name = "Intel(R) 855GME";
- dinfo->chipset = INTEL_855GME;
- return 0;
- case INTEL_VAR_855GM:
- dinfo->name = "Intel(R) 855GM";
- dinfo->chipset = INTEL_855GM;
- return 0;
- case INTEL_VAR_852GME:
- dinfo->name = "Intel(R) 852GME";
- dinfo->chipset = INTEL_852GME;
- return 0;
- case INTEL_VAR_852GM:
- dinfo->name = "Intel(R) 852GM";
- dinfo->chipset = INTEL_852GM;
- return 0;
- default:
- dinfo->name = "Intel(R) 852GM/855GM";
- dinfo->chipset = INTEL_85XGM;
- return 0;
- }
- break;
- case PCI_DEVICE_ID_INTEL_865G:
- dinfo->name = "Intel(R) 865G";
- dinfo->chipset = INTEL_865G;
- dinfo->mobile = 0;
- dinfo->pll_index = PLLS_I8xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_915G:
- dinfo->name = "Intel(R) 915G";
- dinfo->chipset = INTEL_915G;
- dinfo->mobile = 0;
- dinfo->pll_index = PLLS_I9xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_915GM:
- dinfo->name = "Intel(R) 915GM";
- dinfo->chipset = INTEL_915GM;
- dinfo->mobile = 1;
- dinfo->pll_index = PLLS_I9xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_945G:
- dinfo->name = "Intel(R) 945G";
- dinfo->chipset = INTEL_945G;
- dinfo->mobile = 0;
- dinfo->pll_index = PLLS_I9xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_945GM:
- dinfo->name = "Intel(R) 945GM";
- dinfo->chipset = INTEL_945GM;
- dinfo->mobile = 1;
- dinfo->pll_index = PLLS_I9xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_945GME:
- dinfo->name = "Intel(R) 945GME";
- dinfo->chipset = INTEL_945GME;
- dinfo->mobile = 1;
- dinfo->pll_index = PLLS_I9xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_965G:
- dinfo->name = "Intel(R) 965G";
- dinfo->chipset = INTEL_965G;
- dinfo->mobile = 0;
- dinfo->pll_index = PLLS_I9xx;
- return 0;
- case PCI_DEVICE_ID_INTEL_965GM:
- dinfo->name = "Intel(R) 965GM";
- dinfo->chipset = INTEL_965GM;
- dinfo->mobile = 1;
- dinfo->pll_index = PLLS_I9xx;
- return 0;
- default:
- return 1;
- }
-}
-
-int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size,
- int *stolen_size)
-{
- struct pci_dev *bridge_dev;
- u16 tmp;
- int stolen_overhead;
-
- if (!pdev || !aperture_size || !stolen_size)
- return 1;
-
- /* Find the bridge device. It is always 0:0.0 */
- bridge_dev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0,
- PCI_DEVFN(0, 0));
- if (!bridge_dev) {
- ERR_MSG("cannot find bridge device\n");
- return 1;
- }
-
- /* Get the fb aperture size and "stolen" memory amount. */
- tmp = 0;
- pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
- pci_dev_put(bridge_dev);
-
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_915G:
- case PCI_DEVICE_ID_INTEL_915GM:
- case PCI_DEVICE_ID_INTEL_945G:
- case PCI_DEVICE_ID_INTEL_945GM:
- case PCI_DEVICE_ID_INTEL_945GME:
- case PCI_DEVICE_ID_INTEL_965G:
- case PCI_DEVICE_ID_INTEL_965GM:
- /*
- * 915, 945 and 965 chipsets support 64MB, 128MB or 256MB
- * aperture. Determine size from PCI resource length.
- */
- *aperture_size = pci_resource_len(pdev, 2);
- break;
- default:
- if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
- *aperture_size = MB(64);
- else
- *aperture_size = MB(128);
- break;
- }
-
- /* Stolen memory size is reduced by the GTT and the popup.
- GTT is 1K per MB of aperture size, and popup is 4K. */
- stolen_overhead = (*aperture_size / MB(1)) + 4;
- switch(pdev->device) {
- case PCI_DEVICE_ID_INTEL_830M:
- case PCI_DEVICE_ID_INTEL_845G:
- switch (tmp & INTEL_830_GMCH_GMS_MASK) {
- case INTEL_830_GMCH_GMS_STOLEN_512:
- *stolen_size = KB(512) - KB(stolen_overhead);
- return 0;
- case INTEL_830_GMCH_GMS_STOLEN_1024:
- *stolen_size = MB(1) - KB(stolen_overhead);
- return 0;
- case INTEL_830_GMCH_GMS_STOLEN_8192:
- *stolen_size = MB(8) - KB(stolen_overhead);
- return 0;
- case INTEL_830_GMCH_GMS_LOCAL:
- ERR_MSG("only local memory found\n");
- return 1;
- case INTEL_830_GMCH_GMS_DISABLED:
- ERR_MSG("video memory is disabled\n");
- return 1;
- default:
- ERR_MSG("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_830_GMCH_GMS_MASK);
- return 1;
- }
- break;
- default:
- switch (tmp & INTEL_855_GMCH_GMS_MASK) {
- case INTEL_855_GMCH_GMS_STOLEN_1M:
- *stolen_size = MB(1) - KB(stolen_overhead);
- return 0;
- case INTEL_855_GMCH_GMS_STOLEN_4M:
- *stolen_size = MB(4) - KB(stolen_overhead);
- return 0;
- case INTEL_855_GMCH_GMS_STOLEN_8M:
- *stolen_size = MB(8) - KB(stolen_overhead);
- return 0;
- case INTEL_855_GMCH_GMS_STOLEN_16M:
- *stolen_size = MB(16) - KB(stolen_overhead);
- return 0;
- case INTEL_855_GMCH_GMS_STOLEN_32M:
- *stolen_size = MB(32) - KB(stolen_overhead);
- return 0;
- case INTEL_915G_GMCH_GMS_STOLEN_48M:
- *stolen_size = MB(48) - KB(stolen_overhead);
- return 0;
- case INTEL_915G_GMCH_GMS_STOLEN_64M:
- *stolen_size = MB(64) - KB(stolen_overhead);
- return 0;
- case INTEL_855_GMCH_GMS_DISABLED:
- ERR_MSG("video memory is disabled\n");
- return 0;
- default:
- ERR_MSG("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_855_GMCH_GMS_MASK);
- return 1;
- }
- }
-}
-
-int intelfbhw_check_non_crt(struct intelfb_info *dinfo)
-{
- int dvo = 0;
-
- if (INREG(LVDS) & PORT_ENABLE)
- dvo |= LVDS_PORT;
- if (INREG(DVOA) & PORT_ENABLE)
- dvo |= DVOA_PORT;
- if (INREG(DVOB) & PORT_ENABLE)
- dvo |= DVOB_PORT;
- if (INREG(DVOC) & PORT_ENABLE)
- dvo |= DVOC_PORT;
-
- return dvo;
-}
-
-const char * intelfbhw_dvo_to_string(int dvo)
-{
- if (dvo & DVOA_PORT)
- return "DVO port A";
- else if (dvo & DVOB_PORT)
- return "DVO port B";
- else if (dvo & DVOC_PORT)
- return "DVO port C";
- else if (dvo & LVDS_PORT)
- return "LVDS port";
- else
- return NULL;
-}
-
-
-int intelfbhw_validate_mode(struct intelfb_info *dinfo,
- struct fb_var_screeninfo *var)
-{
- int bytes_per_pixel;
- int tmp;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_validate_mode\n");
-#endif
-
- bytes_per_pixel = var->bits_per_pixel / 8;
- if (bytes_per_pixel == 3)
- bytes_per_pixel = 4;
-
- /* Check if enough video memory. */
- tmp = var->yres_virtual * var->xres_virtual * bytes_per_pixel;
- if (tmp > dinfo->fb.size) {
- WRN_MSG("Not enough video ram for mode "
- "(%d KByte vs %d KByte).\n",
- BtoKB(tmp), BtoKB(dinfo->fb.size));
- return 1;
- }
-
- /* Check if x/y limits are OK. */
- if (var->xres - 1 > HACTIVE_MASK) {
- WRN_MSG("X resolution too large (%d vs %d).\n",
- var->xres, HACTIVE_MASK + 1);
- return 1;
- }
- if (var->yres - 1 > VACTIVE_MASK) {
- WRN_MSG("Y resolution too large (%d vs %d).\n",
- var->yres, VACTIVE_MASK + 1);
- return 1;
- }
- if (var->xres < 4) {
- WRN_MSG("X resolution too small (%d vs 4).\n", var->xres);
- return 1;
- }
- if (var->yres < 4) {
- WRN_MSG("Y resolution too small (%d vs 4).\n", var->yres);
- return 1;
- }
-
- /* Check for doublescan modes. */
- if (var->vmode & FB_VMODE_DOUBLE) {
- WRN_MSG("Mode is double-scan.\n");
- return 1;
- }
-
- if ((var->vmode & FB_VMODE_INTERLACED) && (var->yres & 1)) {
- WRN_MSG("Odd number of lines in interlaced mode\n");
- return 1;
- }
-
- /* Check if clock is OK. */
- tmp = 1000000000 / var->pixclock;
- if (tmp < MIN_CLOCK) {
- WRN_MSG("Pixel clock is too low (%d MHz vs %d MHz).\n",
- (tmp + 500) / 1000, MIN_CLOCK / 1000);
- return 1;
- }
- if (tmp > MAX_CLOCK) {
- WRN_MSG("Pixel clock is too high (%d MHz vs %d MHz).\n",
- (tmp + 500) / 1000, MAX_CLOCK / 1000);
- return 1;
- }
-
- return 0;
-}
-
-int intelfbhw_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
- u32 offset, xoffset, yoffset;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_pan_display\n");
-#endif
-
- xoffset = ROUND_DOWN_TO(var->xoffset, 8);
- yoffset = var->yoffset;
-
- if ((xoffset + info->var.xres > info->var.xres_virtual) ||
- (yoffset + info->var.yres > info->var.yres_virtual))
- return -EINVAL;
-
- offset = (yoffset * dinfo->pitch) +
- (xoffset * info->var.bits_per_pixel) / 8;
-
- offset += dinfo->fb.offset << 12;
-
- dinfo->vsync.pan_offset = offset;
- if ((var->activate & FB_ACTIVATE_VBL) &&
- !intelfbhw_enable_irq(dinfo))
- dinfo->vsync.pan_display = 1;
- else {
- dinfo->vsync.pan_display = 0;
- OUTREG(DSPABASE, offset);
- }
-
- return 0;
-}
-
-/* Blank the screen. */
-void intelfbhw_do_blank(int blank, struct fb_info *info)
-{
- struct intelfb_info *dinfo = GET_DINFO(info);
- u32 tmp;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_do_blank: blank is %d\n", blank);
-#endif
-
- /* Turn plane A on or off */
- tmp = INREG(DSPACNTR);
- if (blank)
- tmp &= ~DISPPLANE_PLANE_ENABLE;
- else
- tmp |= DISPPLANE_PLANE_ENABLE;
- OUTREG(DSPACNTR, tmp);
- /* Flush */
- tmp = INREG(DSPABASE);
- OUTREG(DSPABASE, tmp);
-
- /* Turn off/on the HW cursor */
-#if VERBOSE > 0
- DBG_MSG("cursor_on is %d\n", dinfo->cursor_on);
-#endif
- if (dinfo->cursor_on) {
- if (blank)
- intelfbhw_cursor_hide(dinfo);
- else
- intelfbhw_cursor_show(dinfo);
- dinfo->cursor_on = 1;
- }
- dinfo->cursor_blanked = blank;
-
- /* Set DPMS level */
- tmp = INREG(ADPA) & ~ADPA_DPMS_CONTROL_MASK;
- switch (blank) {
- case FB_BLANK_UNBLANK:
- case FB_BLANK_NORMAL:
- tmp |= ADPA_DPMS_D0;
- break;
- case FB_BLANK_VSYNC_SUSPEND:
- tmp |= ADPA_DPMS_D1;
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- tmp |= ADPA_DPMS_D2;
- break;
- case FB_BLANK_POWERDOWN:
- tmp |= ADPA_DPMS_D3;
- break;
- }
- OUTREG(ADPA, tmp);
-
- return;
-}
-
-
-/* Check which pipe is connected to an active display plane. */
-int intelfbhw_active_pipe(const struct intelfb_hwstate *hw)
-{
- int pipe = -1;
-
- /* keep old default behaviour - prefer PIPE_A */
- if (hw->disp_b_ctrl & DISPPLANE_PLANE_ENABLE) {
- pipe = (hw->disp_b_ctrl >> DISPPLANE_SEL_PIPE_SHIFT);
- pipe &= PIPE_MASK;
- if (unlikely(pipe == PIPE_A))
- return PIPE_A;
- }
- if (hw->disp_a_ctrl & DISPPLANE_PLANE_ENABLE) {
- pipe = (hw->disp_a_ctrl >> DISPPLANE_SEL_PIPE_SHIFT);
- pipe &= PIPE_MASK;
- if (likely(pipe == PIPE_A))
- return PIPE_A;
- }
- /* Impossible that no pipe is selected - return PIPE_A */
- WARN_ON(pipe == -1);
- if (unlikely(pipe == -1))
- pipe = PIPE_A;
-
- return pipe;
-}
-
-void intelfbhw_setcolreg(struct intelfb_info *dinfo, unsigned regno,
- unsigned red, unsigned green, unsigned blue,
- unsigned transp)
-{
- u32 palette_reg = (dinfo->pipe == PIPE_A) ?
- PALETTE_A : PALETTE_B;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_setcolreg: %d: (%d, %d, %d)\n",
- regno, red, green, blue);
-#endif
-
- OUTREG(palette_reg + (regno << 2),
- (red << PALETTE_8_RED_SHIFT) |
- (green << PALETTE_8_GREEN_SHIFT) |
- (blue << PALETTE_8_BLUE_SHIFT));
-}
-
-
-int intelfbhw_read_hw_state(struct intelfb_info *dinfo,
- struct intelfb_hwstate *hw, int flag)
-{
- int i;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_read_hw_state\n");
-#endif
-
- if (!hw || !dinfo)
- return -1;
-
- /* Read in as much of the HW state as possible. */
- hw->vga0_divisor = INREG(VGA0_DIVISOR);
- hw->vga1_divisor = INREG(VGA1_DIVISOR);
- hw->vga_pd = INREG(VGAPD);
- hw->dpll_a = INREG(DPLL_A);
- hw->dpll_b = INREG(DPLL_B);
- hw->fpa0 = INREG(FPA0);
- hw->fpa1 = INREG(FPA1);
- hw->fpb0 = INREG(FPB0);
- hw->fpb1 = INREG(FPB1);
-
- if (flag == 1)
- return flag;
-
-#if 0
- /* This seems to be a problem with the 852GM/855GM */
- for (i = 0; i < PALETTE_8_ENTRIES; i++) {
- hw->palette_a[i] = INREG(PALETTE_A + (i << 2));
- hw->palette_b[i] = INREG(PALETTE_B + (i << 2));
- }
-#endif
-
- if (flag == 2)
- return flag;
-
- hw->htotal_a = INREG(HTOTAL_A);
- hw->hblank_a = INREG(HBLANK_A);
- hw->hsync_a = INREG(HSYNC_A);
- hw->vtotal_a = INREG(VTOTAL_A);
- hw->vblank_a = INREG(VBLANK_A);
- hw->vsync_a = INREG(VSYNC_A);
- hw->src_size_a = INREG(SRC_SIZE_A);
- hw->bclrpat_a = INREG(BCLRPAT_A);
- hw->htotal_b = INREG(HTOTAL_B);
- hw->hblank_b = INREG(HBLANK_B);
- hw->hsync_b = INREG(HSYNC_B);
- hw->vtotal_b = INREG(VTOTAL_B);
- hw->vblank_b = INREG(VBLANK_B);
- hw->vsync_b = INREG(VSYNC_B);
- hw->src_size_b = INREG(SRC_SIZE_B);
- hw->bclrpat_b = INREG(BCLRPAT_B);
-
- if (flag == 3)
- return flag;
-
- hw->adpa = INREG(ADPA);
- hw->dvoa = INREG(DVOA);
- hw->dvob = INREG(DVOB);
- hw->dvoc = INREG(DVOC);
- hw->dvoa_srcdim = INREG(DVOA_SRCDIM);
- hw->dvob_srcdim = INREG(DVOB_SRCDIM);
- hw->dvoc_srcdim = INREG(DVOC_SRCDIM);
- hw->lvds = INREG(LVDS);
-
- if (flag == 4)
- return flag;
-
- hw->pipe_a_conf = INREG(PIPEACONF);
- hw->pipe_b_conf = INREG(PIPEBCONF);
- hw->disp_arb = INREG(DISPARB);
-
- if (flag == 5)
- return flag;
-
- hw->cursor_a_control = INREG(CURSOR_A_CONTROL);
- hw->cursor_b_control = INREG(CURSOR_B_CONTROL);
- hw->cursor_a_base = INREG(CURSOR_A_BASEADDR);
- hw->cursor_b_base = INREG(CURSOR_B_BASEADDR);
-
- if (flag == 6)
- return flag;
-
- for (i = 0; i < 4; i++) {
- hw->cursor_a_palette[i] = INREG(CURSOR_A_PALETTE0 + (i << 2));
- hw->cursor_b_palette[i] = INREG(CURSOR_B_PALETTE0 + (i << 2));
- }
-
- if (flag == 7)
- return flag;
-
- hw->cursor_size = INREG(CURSOR_SIZE);
-
- if (flag == 8)
- return flag;
-
- hw->disp_a_ctrl = INREG(DSPACNTR);
- hw->disp_b_ctrl = INREG(DSPBCNTR);
- hw->disp_a_base = INREG(DSPABASE);
- hw->disp_b_base = INREG(DSPBBASE);
- hw->disp_a_stride = INREG(DSPASTRIDE);
- hw->disp_b_stride = INREG(DSPBSTRIDE);
-
- if (flag == 9)
- return flag;
-
- hw->vgacntrl = INREG(VGACNTRL);
-
- if (flag == 10)
- return flag;
-
- hw->add_id = INREG(ADD_ID);
-
- if (flag == 11)
- return flag;
-
- for (i = 0; i < 7; i++) {
- hw->swf0x[i] = INREG(SWF00 + (i << 2));
- hw->swf1x[i] = INREG(SWF10 + (i << 2));
- if (i < 3)
- hw->swf3x[i] = INREG(SWF30 + (i << 2));
- }
-
- for (i = 0; i < 8; i++)
- hw->fence[i] = INREG(FENCE + (i << 2));
-
- hw->instpm = INREG(INSTPM);
- hw->mem_mode = INREG(MEM_MODE);
- hw->fw_blc_0 = INREG(FW_BLC_0);
- hw->fw_blc_1 = INREG(FW_BLC_1);
-
- hw->hwstam = INREG16(HWSTAM);
- hw->ier = INREG16(IER);
- hw->iir = INREG16(IIR);
- hw->imr = INREG16(IMR);
-
- return 0;
-}
-
-
-static int calc_vclock3(int index, int m, int n, int p)
-{
- if (p == 0 || n == 0)
- return 0;
- return plls[index].ref_clk * m / n / p;
-}
-
-static int calc_vclock(int index, int m1, int m2, int n, int p1, int p2,
- int lvds)
-{
- struct pll_min_max *pll = &plls[index];
- u32 m, vco, p;
-
- m = (5 * (m1 + 2)) + (m2 + 2);
- n += 2;
- vco = pll->ref_clk * m / n;
-
- if (index == PLLS_I8xx)
- p = ((p1 + 2) * (1 << (p2 + 1)));
- else
- p = ((p1) * (p2 ? 5 : 10));
- return vco / p;
-}
-
-#if REGDUMP
-static void intelfbhw_get_p1p2(struct intelfb_info *dinfo, int dpll,
- int *o_p1, int *o_p2)
-{
- int p1, p2;
-
- if (IS_I9XX(dinfo)) {
- if (dpll & DPLL_P1_FORCE_DIV2)
- p1 = 1;
- else
- p1 = (dpll >> DPLL_P1_SHIFT) & 0xff;
-
- p1 = ffs(p1);
-
- p2 = (dpll >> DPLL_I9XX_P2_SHIFT) & DPLL_P2_MASK;
- } else {
- if (dpll & DPLL_P1_FORCE_DIV2)
- p1 = 0;
- else
- p1 = (dpll >> DPLL_P1_SHIFT) & DPLL_P1_MASK;
- p2 = (dpll >> DPLL_P2_SHIFT) & DPLL_P2_MASK;
- }
-
- *o_p1 = p1;
- *o_p2 = p2;
-}
-#endif
-
-
-void intelfbhw_print_hw_state(struct intelfb_info *dinfo,
- struct intelfb_hwstate *hw)
-{
-#if REGDUMP
- int i, m1, m2, n, p1, p2;
- int index = dinfo->pll_index;
- DBG_MSG("intelfbhw_print_hw_state\n");
-
- if (!hw)
- return;
- /* Read in as much of the HW state as possible. */
- printk("hw state dump start\n");
- printk(" VGA0_DIVISOR: 0x%08x\n", hw->vga0_divisor);
- printk(" VGA1_DIVISOR: 0x%08x\n", hw->vga1_divisor);
- printk(" VGAPD: 0x%08x\n", hw->vga_pd);
- n = (hw->vga0_divisor >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
- m1 = (hw->vga0_divisor >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
- m2 = (hw->vga0_divisor >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
-
- intelfbhw_get_p1p2(dinfo, hw->vga_pd, &p1, &p2);
-
- printk(" VGA0: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n",
- m1, m2, n, p1, p2);
- printk(" VGA0: clock is %d\n",
- calc_vclock(index, m1, m2, n, p1, p2, 0));
-
- n = (hw->vga1_divisor >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
- m1 = (hw->vga1_divisor >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
- m2 = (hw->vga1_divisor >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
-
- intelfbhw_get_p1p2(dinfo, hw->vga_pd, &p1, &p2);
- printk(" VGA1: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n",
- m1, m2, n, p1, p2);
- printk(" VGA1: clock is %d\n",
- calc_vclock(index, m1, m2, n, p1, p2, 0));
-
- printk(" DPLL_A: 0x%08x\n", hw->dpll_a);
- printk(" DPLL_B: 0x%08x\n", hw->dpll_b);
- printk(" FPA0: 0x%08x\n", hw->fpa0);
- printk(" FPA1: 0x%08x\n", hw->fpa1);
- printk(" FPB0: 0x%08x\n", hw->fpb0);
- printk(" FPB1: 0x%08x\n", hw->fpb1);
-
- n = (hw->fpa0 >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
- m1 = (hw->fpa0 >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
- m2 = (hw->fpa0 >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
-
- intelfbhw_get_p1p2(dinfo, hw->dpll_a, &p1, &p2);
-
- printk(" PLLA0: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n",
- m1, m2, n, p1, p2);
- printk(" PLLA0: clock is %d\n",
- calc_vclock(index, m1, m2, n, p1, p2, 0));
-
- n = (hw->fpa1 >> FP_N_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
- m1 = (hw->fpa1 >> FP_M1_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
- m2 = (hw->fpa1 >> FP_M2_DIVISOR_SHIFT) & FP_DIVISOR_MASK;
-
- intelfbhw_get_p1p2(dinfo, hw->dpll_a, &p1, &p2);
-
- printk(" PLLA1: (m1, m2, n, p1, p2) = (%d, %d, %d, %d, %d)\n",
- m1, m2, n, p1, p2);
- printk(" PLLA1: clock is %d\n",
- calc_vclock(index, m1, m2, n, p1, p2, 0));
-
-#if 0
- printk(" PALETTE_A:\n");
- for (i = 0; i < PALETTE_8_ENTRIES)
- printk(" %3d: 0x%08x\n", i, hw->palette_a[i]);
- printk(" PALETTE_B:\n");
- for (i = 0; i < PALETTE_8_ENTRIES)
- printk(" %3d: 0x%08x\n", i, hw->palette_b[i]);
-#endif
-
- printk(" HTOTAL_A: 0x%08x\n", hw->htotal_a);
- printk(" HBLANK_A: 0x%08x\n", hw->hblank_a);
- printk(" HSYNC_A: 0x%08x\n", hw->hsync_a);
- printk(" VTOTAL_A: 0x%08x\n", hw->vtotal_a);
- printk(" VBLANK_A: 0x%08x\n", hw->vblank_a);
- printk(" VSYNC_A: 0x%08x\n", hw->vsync_a);
- printk(" SRC_SIZE_A: 0x%08x\n", hw->src_size_a);
- printk(" BCLRPAT_A: 0x%08x\n", hw->bclrpat_a);
- printk(" HTOTAL_B: 0x%08x\n", hw->htotal_b);
- printk(" HBLANK_B: 0x%08x\n", hw->hblank_b);
- printk(" HSYNC_B: 0x%08x\n", hw->hsync_b);
- printk(" VTOTAL_B: 0x%08x\n", hw->vtotal_b);
- printk(" VBLANK_B: 0x%08x\n", hw->vblank_b);
- printk(" VSYNC_B: 0x%08x\n", hw->vsync_b);
- printk(" SRC_SIZE_B: 0x%08x\n", hw->src_size_b);
- printk(" BCLRPAT_B: 0x%08x\n", hw->bclrpat_b);
-
- printk(" ADPA: 0x%08x\n", hw->adpa);
- printk(" DVOA: 0x%08x\n", hw->dvoa);
- printk(" DVOB: 0x%08x\n", hw->dvob);
- printk(" DVOC: 0x%08x\n", hw->dvoc);
- printk(" DVOA_SRCDIM: 0x%08x\n", hw->dvoa_srcdim);
- printk(" DVOB_SRCDIM: 0x%08x\n", hw->dvob_srcdim);
- printk(" DVOC_SRCDIM: 0x%08x\n", hw->dvoc_srcdim);
- printk(" LVDS: 0x%08x\n", hw->lvds);
-
- printk(" PIPEACONF: 0x%08x\n", hw->pipe_a_conf);
- printk(" PIPEBCONF: 0x%08x\n", hw->pipe_b_conf);
- printk(" DISPARB: 0x%08x\n", hw->disp_arb);
-
- printk(" CURSOR_A_CONTROL: 0x%08x\n", hw->cursor_a_control);
- printk(" CURSOR_B_CONTROL: 0x%08x\n", hw->cursor_b_control);
- printk(" CURSOR_A_BASEADDR: 0x%08x\n", hw->cursor_a_base);
- printk(" CURSOR_B_BASEADDR: 0x%08x\n", hw->cursor_b_base);
-
- printk(" CURSOR_A_PALETTE: ");
- for (i = 0; i < 4; i++) {
- printk("0x%08x", hw->cursor_a_palette[i]);
- if (i < 3)
- printk(", ");
- }
- printk("\n");
- printk(" CURSOR_B_PALETTE: ");
- for (i = 0; i < 4; i++) {
- printk("0x%08x", hw->cursor_b_palette[i]);
- if (i < 3)
- printk(", ");
- }
- printk("\n");
-
- printk(" CURSOR_SIZE: 0x%08x\n", hw->cursor_size);
-
- printk(" DSPACNTR: 0x%08x\n", hw->disp_a_ctrl);
- printk(" DSPBCNTR: 0x%08x\n", hw->disp_b_ctrl);
- printk(" DSPABASE: 0x%08x\n", hw->disp_a_base);
- printk(" DSPBBASE: 0x%08x\n", hw->disp_b_base);
- printk(" DSPASTRIDE: 0x%08x\n", hw->disp_a_stride);
- printk(" DSPBSTRIDE: 0x%08x\n", hw->disp_b_stride);
-
- printk(" VGACNTRL: 0x%08x\n", hw->vgacntrl);
- printk(" ADD_ID: 0x%08x\n", hw->add_id);
-
- for (i = 0; i < 7; i++) {
- printk(" SWF0%d 0x%08x\n", i,
- hw->swf0x[i]);
- }
- for (i = 0; i < 7; i++) {
- printk(" SWF1%d 0x%08x\n", i,
- hw->swf1x[i]);
- }
- for (i = 0; i < 3; i++) {
- printk(" SWF3%d 0x%08x\n", i,
- hw->swf3x[i]);
- }
- for (i = 0; i < 8; i++)
- printk(" FENCE%d 0x%08x\n", i,
- hw->fence[i]);
-
- printk(" INSTPM 0x%08x\n", hw->instpm);
- printk(" MEM_MODE 0x%08x\n", hw->mem_mode);
- printk(" FW_BLC_0 0x%08x\n", hw->fw_blc_0);
- printk(" FW_BLC_1 0x%08x\n", hw->fw_blc_1);
-
- printk(" HWSTAM 0x%04x\n", hw->hwstam);
- printk(" IER 0x%04x\n", hw->ier);
- printk(" IIR 0x%04x\n", hw->iir);
- printk(" IMR 0x%04x\n", hw->imr);
- printk("hw state dump end\n");
-#endif
-}
-
-
-
-/* Split the M parameter into M1 and M2. */
-static int splitm(int index, unsigned int m, unsigned int *retm1,
- unsigned int *retm2)
-{
- int m1, m2;
- int testm;
- struct pll_min_max *pll = &plls[index];
-
- /* no point optimising too much - brute force m */
- for (m1 = pll->min_m1; m1 < pll->max_m1 + 1; m1++) {
- for (m2 = pll->min_m2; m2 < pll->max_m2 + 1; m2++) {
- testm = (5 * (m1 + 2)) + (m2 + 2);
- if (testm == m) {
- *retm1 = (unsigned int)m1;
- *retm2 = (unsigned int)m2;
- return 0;
- }
- }
- }
- return 1;
-}
-
-/* Split the P parameter into P1 and P2. */
-static int splitp(int index, unsigned int p, unsigned int *retp1,
- unsigned int *retp2)
-{
- int p1, p2;
- struct pll_min_max *pll = &plls[index];
-
- if (index == PLLS_I9xx) {
- p2 = (p % 10) ? 1 : 0;
-
- p1 = p / (p2 ? 5 : 10);
-
- *retp1 = (unsigned int)p1;
- *retp2 = (unsigned int)p2;
- return 0;
- }
-
- if (p % 4 == 0)
- p2 = 1;
- else
- p2 = 0;
- p1 = (p / (1 << (p2 + 1))) - 2;
- if (p % 4 == 0 && p1 < pll->min_p1) {
- p2 = 0;
- p1 = (p / (1 << (p2 + 1))) - 2;
- }
- if (p1 < pll->min_p1 || p1 > pll->max_p1 ||
- (p1 + 2) * (1 << (p2 + 1)) != p) {
- return 1;
- } else {
- *retp1 = (unsigned int)p1;
- *retp2 = (unsigned int)p2;
- return 0;
- }
-}
-
-static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
- u32 *retn, u32 *retp1, u32 *retp2, u32 *retclock)
-{
- u32 m1, m2, n, p1, p2, n1, testm;
- u32 f_vco, p, p_best = 0, m, f_out = 0;
- u32 err_best = 10000000;
- u32 n_best = 0, m_best = 0, f_err;
- u32 p_min, p_max, p_inc, div_max;
- struct pll_min_max *pll = &plls[index];
-
- DBG_MSG("Clock is %d\n", clock);
-
- div_max = pll->max_vco / clock;
-
- p_inc = (clock <= pll->p_transition_clk) ? pll->p_inc_lo : pll->p_inc_hi;
- p_min = p_inc;
- p_max = ROUND_DOWN_TO(div_max, p_inc);
- if (p_min < pll->min_p)
- p_min = pll->min_p;
- if (p_max > pll->max_p)
- p_max = pll->max_p;
-
- DBG_MSG("p range is %d-%d (%d)\n", p_min, p_max, p_inc);
-
- p = p_min;
- do {
- if (splitp(index, p, &p1, &p2)) {
- WRN_MSG("cannot split p = %d\n", p);
- p += p_inc;
- continue;
- }
- n = pll->min_n;
- f_vco = clock * p;
-
- do {
- m = ROUND_UP_TO(f_vco * n, pll->ref_clk) / pll->ref_clk;
- if (m < pll->min_m)
- m = pll->min_m + 1;
- if (m > pll->max_m)
- m = pll->max_m - 1;
- for (testm = m - 1; testm <= m; testm++) {
- f_out = calc_vclock3(index, testm, n, p);
- if (splitm(index, testm, &m1, &m2)) {
- WRN_MSG("cannot split m = %d\n",
- testm);
- continue;
- }
- if (clock > f_out)
- f_err = clock - f_out;
- else/* slightly bias the error for bigger clocks */
- f_err = f_out - clock + 1;
-
- if (f_err < err_best) {
- m_best = testm;
- n_best = n;
- p_best = p;
- err_best = f_err;
- }
- }
- n++;
- } while ((n <= pll->max_n) && (f_out >= clock));
- p += p_inc;
- } while ((p <= p_max));
-
- if (!m_best) {
- WRN_MSG("cannot find parameters for clock %d\n", clock);
- return 1;
- }
- m = m_best;
- n = n_best;
- p = p_best;
- splitm(index, m, &m1, &m2);
- splitp(index, p, &p1, &p2);
- n1 = n - 2;
-
- DBG_MSG("m, n, p: %d (%d,%d), %d (%d), %d (%d,%d), "
- "f: %d (%d), VCO: %d\n",
- m, m1, m2, n, n1, p, p1, p2,
- calc_vclock3(index, m, n, p),
- calc_vclock(index, m1, m2, n1, p1, p2, 0),
- calc_vclock3(index, m, n, p) * p);
- *retm1 = m1;
- *retm2 = m2;
- *retn = n1;
- *retp1 = p1;
- *retp2 = p2;
- *retclock = calc_vclock(index, m1, m2, n1, p1, p2, 0);
-
- return 0;
-}
-
-static __inline__ int check_overflow(u32 value, u32 limit,
- const char *description)
-{
- if (value > limit) {
- WRN_MSG("%s value %d exceeds limit %d\n",
- description, value, limit);
- return 1;
- }
- return 0;
-}
-
-/* It is assumed that hw is filled in with the initial state information. */
-int intelfbhw_mode_to_hw(struct intelfb_info *dinfo,
- struct intelfb_hwstate *hw,
- struct fb_var_screeninfo *var)
-{
- int pipe = intelfbhw_active_pipe(hw);
- u32 *dpll, *fp0, *fp1;
- u32 m1, m2, n, p1, p2, clock_target, clock;
- u32 hsync_start, hsync_end, hblank_start, hblank_end, htotal, hactive;
- u32 vsync_start, vsync_end, vblank_start, vblank_end, vtotal, vactive;
- u32 vsync_pol, hsync_pol;
- u32 *vs, *vb, *vt, *hs, *hb, *ht, *ss, *pipe_conf;
- u32 stride_alignment;
-
- DBG_MSG("intelfbhw_mode_to_hw\n");
-
- /* Disable VGA */
- hw->vgacntrl |= VGA_DISABLE;
-
- /* Set which pipe's registers will be set. */
- if (pipe == PIPE_B) {
- dpll = &hw->dpll_b;
- fp0 = &hw->fpb0;
- fp1 = &hw->fpb1;
- hs = &hw->hsync_b;
- hb = &hw->hblank_b;
- ht = &hw->htotal_b;
- vs = &hw->vsync_b;
- vb = &hw->vblank_b;
- vt = &hw->vtotal_b;
- ss = &hw->src_size_b;
- pipe_conf = &hw->pipe_b_conf;
- } else {
- dpll = &hw->dpll_a;
- fp0 = &hw->fpa0;
- fp1 = &hw->fpa1;
- hs = &hw->hsync_a;
- hb = &hw->hblank_a;
- ht = &hw->htotal_a;
- vs = &hw->vsync_a;
- vb = &hw->vblank_a;
- vt = &hw->vtotal_a;
- ss = &hw->src_size_a;
- pipe_conf = &hw->pipe_a_conf;
- }
-
- /* Use ADPA register for sync control. */
- hw->adpa &= ~ADPA_USE_VGA_HVPOLARITY;
-
- /* sync polarity */
- hsync_pol = (var->sync & FB_SYNC_HOR_HIGH_ACT) ?
- ADPA_SYNC_ACTIVE_HIGH : ADPA_SYNC_ACTIVE_LOW;
- vsync_pol = (var->sync & FB_SYNC_VERT_HIGH_ACT) ?
- ADPA_SYNC_ACTIVE_HIGH : ADPA_SYNC_ACTIVE_LOW;
- hw->adpa &= ~((ADPA_SYNC_ACTIVE_MASK << ADPA_VSYNC_ACTIVE_SHIFT) |
- (ADPA_SYNC_ACTIVE_MASK << ADPA_HSYNC_ACTIVE_SHIFT));
- hw->adpa |= (hsync_pol << ADPA_HSYNC_ACTIVE_SHIFT) |
- (vsync_pol << ADPA_VSYNC_ACTIVE_SHIFT);
-
- /* Connect correct pipe to the analog port DAC */
- hw->adpa &= ~(PIPE_MASK << ADPA_PIPE_SELECT_SHIFT);
- hw->adpa |= (pipe << ADPA_PIPE_SELECT_SHIFT);
-
- /* Set DPMS state to D0 (on) */
- hw->adpa &= ~ADPA_DPMS_CONTROL_MASK;
- hw->adpa |= ADPA_DPMS_D0;
-
- hw->adpa |= ADPA_DAC_ENABLE;
-
- *dpll |= (DPLL_VCO_ENABLE | DPLL_VGA_MODE_DISABLE);
- *dpll &= ~(DPLL_RATE_SELECT_MASK | DPLL_REFERENCE_SELECT_MASK);
- *dpll |= (DPLL_REFERENCE_DEFAULT | DPLL_RATE_SELECT_FP0);
-
- /* Desired clock in kHz */
- clock_target = 1000000000 / var->pixclock;
-
- if (calc_pll_params(dinfo->pll_index, clock_target, &m1, &m2,
- &n, &p1, &p2, &clock)) {
- WRN_MSG("calc_pll_params failed\n");
- return 1;
- }
-
- /* Check for overflow. */
- if (check_overflow(p1, DPLL_P1_MASK, "PLL P1 parameter"))
- return 1;
- if (check_overflow(p2, DPLL_P2_MASK, "PLL P2 parameter"))
- return 1;
- if (check_overflow(m1, FP_DIVISOR_MASK, "PLL M1 parameter"))
- return 1;
- if (check_overflow(m2, FP_DIVISOR_MASK, "PLL M2 parameter"))
- return 1;
- if (check_overflow(n, FP_DIVISOR_MASK, "PLL N parameter"))
- return 1;
-
- *dpll &= ~DPLL_P1_FORCE_DIV2;
- *dpll &= ~((DPLL_P2_MASK << DPLL_P2_SHIFT) |
- (DPLL_P1_MASK << DPLL_P1_SHIFT));
-
- if (IS_I9XX(dinfo)) {
- *dpll |= (p2 << DPLL_I9XX_P2_SHIFT);
- *dpll |= (1 << (p1 - 1)) << DPLL_P1_SHIFT;
- } else
- *dpll |= (p2 << DPLL_P2_SHIFT) | (p1 << DPLL_P1_SHIFT);
-
- *fp0 = (n << FP_N_DIVISOR_SHIFT) |
- (m1 << FP_M1_DIVISOR_SHIFT) |
- (m2 << FP_M2_DIVISOR_SHIFT);
- *fp1 = *fp0;
-
- hw->dvob &= ~PORT_ENABLE;
- hw->dvoc &= ~PORT_ENABLE;
-
- /* Use display plane A. */
- hw->disp_a_ctrl |= DISPPLANE_PLANE_ENABLE;
- hw->disp_a_ctrl &= ~DISPPLANE_GAMMA_ENABLE;
- hw->disp_a_ctrl &= ~DISPPLANE_PIXFORMAT_MASK;
- switch (intelfb_var_to_depth(var)) {
- case 8:
- hw->disp_a_ctrl |= DISPPLANE_8BPP | DISPPLANE_GAMMA_ENABLE;
- break;
- case 15:
- hw->disp_a_ctrl |= DISPPLANE_15_16BPP;
- break;
- case 16:
- hw->disp_a_ctrl |= DISPPLANE_16BPP;
- break;
- case 24:
- hw->disp_a_ctrl |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- }
- hw->disp_a_ctrl &= ~(PIPE_MASK << DISPPLANE_SEL_PIPE_SHIFT);
- hw->disp_a_ctrl |= (pipe << DISPPLANE_SEL_PIPE_SHIFT);
-
- /* Set CRTC registers. */
- hactive = var->xres;
- hsync_start = hactive + var->right_margin;
- hsync_end = hsync_start + var->hsync_len;
- htotal = hsync_end + var->left_margin;
- hblank_start = hactive;
- hblank_end = htotal;
-
- DBG_MSG("H: act %d, ss %d, se %d, tot %d bs %d, be %d\n",
- hactive, hsync_start, hsync_end, htotal, hblank_start,
- hblank_end);
-
- vactive = var->yres;
- if (var->vmode & FB_VMODE_INTERLACED)
- vactive--; /* the chip adds 2 halflines automatically */
- vsync_start = vactive + var->lower_margin;
- vsync_end = vsync_start + var->vsync_len;
- vtotal = vsync_end + var->upper_margin;
- vblank_start = vactive;
- vblank_end = vsync_end + 1;
-
- DBG_MSG("V: act %d, ss %d, se %d, tot %d bs %d, be %d\n",
- vactive, vsync_start, vsync_end, vtotal, vblank_start,
- vblank_end);
-
- /* Adjust for register values, and check for overflow. */
- hactive--;
- if (check_overflow(hactive, HACTIVE_MASK, "CRTC hactive"))
- return 1;
- hsync_start--;
- if (check_overflow(hsync_start, HSYNCSTART_MASK, "CRTC hsync_start"))
- return 1;
- hsync_end--;
- if (check_overflow(hsync_end, HSYNCEND_MASK, "CRTC hsync_end"))
- return 1;
- htotal--;
- if (check_overflow(htotal, HTOTAL_MASK, "CRTC htotal"))
- return 1;
- hblank_start--;
- if (check_overflow(hblank_start, HBLANKSTART_MASK, "CRTC hblank_start"))
- return 1;
- hblank_end--;
- if (check_overflow(hblank_end, HBLANKEND_MASK, "CRTC hblank_end"))
- return 1;
-
- vactive--;
- if (check_overflow(vactive, VACTIVE_MASK, "CRTC vactive"))
- return 1;
- vsync_start--;
- if (check_overflow(vsync_start, VSYNCSTART_MASK, "CRTC vsync_start"))
- return 1;
- vsync_end--;
- if (check_overflow(vsync_end, VSYNCEND_MASK, "CRTC vsync_end"))
- return 1;
- vtotal--;
- if (check_overflow(vtotal, VTOTAL_MASK, "CRTC vtotal"))
- return 1;
- vblank_start--;
- if (check_overflow(vblank_start, VBLANKSTART_MASK, "CRTC vblank_start"))
- return 1;
- vblank_end--;
- if (check_overflow(vblank_end, VBLANKEND_MASK, "CRTC vblank_end"))
- return 1;
-
- *ht = (htotal << HTOTAL_SHIFT) | (hactive << HACTIVE_SHIFT);
- *hb = (hblank_start << HBLANKSTART_SHIFT) |
- (hblank_end << HSYNCEND_SHIFT);
- *hs = (hsync_start << HSYNCSTART_SHIFT) | (hsync_end << HSYNCEND_SHIFT);
-
- *vt = (vtotal << VTOTAL_SHIFT) | (vactive << VACTIVE_SHIFT);
- *vb = (vblank_start << VBLANKSTART_SHIFT) |
- (vblank_end << VSYNCEND_SHIFT);
- *vs = (vsync_start << VSYNCSTART_SHIFT) | (vsync_end << VSYNCEND_SHIFT);
- *ss = (hactive << SRC_SIZE_HORIZ_SHIFT) |
- (vactive << SRC_SIZE_VERT_SHIFT);
-
- hw->disp_a_stride = dinfo->pitch;
- DBG_MSG("pitch is %d\n", hw->disp_a_stride);
-
- hw->disp_a_base = hw->disp_a_stride * var->yoffset +
- var->xoffset * var->bits_per_pixel / 8;
-
- hw->disp_a_base += dinfo->fb.offset << 12;
-
- /* Check stride alignment. */
- stride_alignment = IS_I9XX(dinfo) ? STRIDE_ALIGNMENT_I9XX :
- STRIDE_ALIGNMENT;
- if (hw->disp_a_stride % stride_alignment != 0) {
- WRN_MSG("display stride %d has bad alignment %d\n",
- hw->disp_a_stride, stride_alignment);
- return 1;
- }
-
- /* Set the palette to 8-bit mode. */
- *pipe_conf &= ~PIPECONF_GAMMA;
-
- if (var->vmode & FB_VMODE_INTERLACED)
- *pipe_conf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
- else
- *pipe_conf &= ~PIPECONF_INTERLACE_MASK;
-
- return 0;
-}
-
-/* Program a (non-VGA) video mode. */
-int intelfbhw_program_mode(struct intelfb_info *dinfo,
- const struct intelfb_hwstate *hw, int blank)
-{
- u32 tmp;
- const u32 *dpll, *fp0, *fp1, *pipe_conf;
- const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss;
- u32 dpll_reg, fp0_reg, fp1_reg, pipe_conf_reg, pipe_stat_reg;
- u32 hsync_reg, htotal_reg, hblank_reg;
- u32 vsync_reg, vtotal_reg, vblank_reg;
- u32 src_size_reg;
- u32 count, tmp_val[3];
-
- /* Assume single pipe */
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_program_mode\n");
-#endif
-
- /* Disable VGA */
- tmp = INREG(VGACNTRL);
- tmp |= VGA_DISABLE;
- OUTREG(VGACNTRL, tmp);
-
- dinfo->pipe = intelfbhw_active_pipe(hw);
-
- if (dinfo->pipe == PIPE_B) {
- dpll = &hw->dpll_b;
- fp0 = &hw->fpb0;
- fp1 = &hw->fpb1;
- pipe_conf = &hw->pipe_b_conf;
- hs = &hw->hsync_b;
- hb = &hw->hblank_b;
- ht = &hw->htotal_b;
- vs = &hw->vsync_b;
- vb = &hw->vblank_b;
- vt = &hw->vtotal_b;
- ss = &hw->src_size_b;
- dpll_reg = DPLL_B;
- fp0_reg = FPB0;
- fp1_reg = FPB1;
- pipe_conf_reg = PIPEBCONF;
- pipe_stat_reg = PIPEBSTAT;
- hsync_reg = HSYNC_B;
- htotal_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- vsync_reg = VSYNC_B;
- vtotal_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- src_size_reg = SRC_SIZE_B;
- } else {
- dpll = &hw->dpll_a;
- fp0 = &hw->fpa0;
- fp1 = &hw->fpa1;
- pipe_conf = &hw->pipe_a_conf;
- hs = &hw->hsync_a;
- hb = &hw->hblank_a;
- ht = &hw->htotal_a;
- vs = &hw->vsync_a;
- vb = &hw->vblank_a;
- vt = &hw->vtotal_a;
- ss = &hw->src_size_a;
- dpll_reg = DPLL_A;
- fp0_reg = FPA0;
- fp1_reg = FPA1;
- pipe_conf_reg = PIPEACONF;
- pipe_stat_reg = PIPEASTAT;
- hsync_reg = HSYNC_A;
- htotal_reg = HTOTAL_A;
- hblank_reg = HBLANK_A;
- vsync_reg = VSYNC_A;
- vtotal_reg = VTOTAL_A;
- vblank_reg = VBLANK_A;
- src_size_reg = SRC_SIZE_A;
- }
-
- /* turn off pipe */
- tmp = INREG(pipe_conf_reg);
- tmp &= ~PIPECONF_ENABLE;
- OUTREG(pipe_conf_reg, tmp);
-
- count = 0;
- do {
- tmp_val[count % 3] = INREG(PIPEA_DSL);
- if ((tmp_val[0] == tmp_val[1]) && (tmp_val[1] == tmp_val[2]))
- break;
- count++;
- udelay(1);
- if (count % 200 == 0) {
- tmp = INREG(pipe_conf_reg);
- tmp &= ~PIPECONF_ENABLE;
- OUTREG(pipe_conf_reg, tmp);
- }
- } while (count < 2000);
-
- OUTREG(ADPA, INREG(ADPA) & ~ADPA_DAC_ENABLE);
-
- /* Disable planes A and B. */
- tmp = INREG(DSPACNTR);
- tmp &= ~DISPPLANE_PLANE_ENABLE;
- OUTREG(DSPACNTR, tmp);
- tmp = INREG(DSPBCNTR);
- tmp &= ~DISPPLANE_PLANE_ENABLE;
- OUTREG(DSPBCNTR, tmp);
-
- /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */
- mdelay(20);
-
- OUTREG(DVOB, INREG(DVOB) & ~PORT_ENABLE);
- OUTREG(DVOC, INREG(DVOC) & ~PORT_ENABLE);
- OUTREG(ADPA, INREG(ADPA) & ~ADPA_DAC_ENABLE);
-
- /* Disable Sync */
- tmp = INREG(ADPA);
- tmp &= ~ADPA_DPMS_CONTROL_MASK;
- tmp |= ADPA_DPMS_D3;
- OUTREG(ADPA, tmp);
-
- /* do some funky magic - xyzzy */
- OUTREG(0x61204, 0xabcd0000);
-
- /* turn off PLL */
- tmp = INREG(dpll_reg);
- tmp &= ~DPLL_VCO_ENABLE;
- OUTREG(dpll_reg, tmp);
-
- /* Set PLL parameters */
- OUTREG(fp0_reg, *fp0);
- OUTREG(fp1_reg, *fp1);
-
- /* Enable PLL */
- OUTREG(dpll_reg, *dpll);
-
- /* Set DVOs B/C */
- OUTREG(DVOB, hw->dvob);
- OUTREG(DVOC, hw->dvoc);
-
- /* undo funky magic */
- OUTREG(0x61204, 0x00000000);
-
- /* Set ADPA */
- OUTREG(ADPA, INREG(ADPA) | ADPA_DAC_ENABLE);
- OUTREG(ADPA, (hw->adpa & ~(ADPA_DPMS_CONTROL_MASK)) | ADPA_DPMS_D3);
-
- /* Set pipe parameters */
- OUTREG(hsync_reg, *hs);
- OUTREG(hblank_reg, *hb);
- OUTREG(htotal_reg, *ht);
- OUTREG(vsync_reg, *vs);
- OUTREG(vblank_reg, *vb);
- OUTREG(vtotal_reg, *vt);
- OUTREG(src_size_reg, *ss);
-
- switch (dinfo->info->var.vmode & (FB_VMODE_INTERLACED |
- FB_VMODE_ODD_FLD_FIRST)) {
- case FB_VMODE_INTERLACED | FB_VMODE_ODD_FLD_FIRST:
- OUTREG(pipe_stat_reg, 0xFFFF | PIPESTAT_FLD_EVT_ODD_EN);
- break;
- case FB_VMODE_INTERLACED: /* even lines first */
- OUTREG(pipe_stat_reg, 0xFFFF | PIPESTAT_FLD_EVT_EVEN_EN);
- break;
- default: /* non-interlaced */
- OUTREG(pipe_stat_reg, 0xFFFF); /* clear all status bits only */
- }
- /* Enable pipe */
- OUTREG(pipe_conf_reg, *pipe_conf | PIPECONF_ENABLE);
-
- /* Enable sync */
- tmp = INREG(ADPA);
- tmp &= ~ADPA_DPMS_CONTROL_MASK;
- tmp |= ADPA_DPMS_D0;
- OUTREG(ADPA, tmp);
-
- /* setup display plane */
- if (dinfo->pdev->device == PCI_DEVICE_ID_INTEL_830M) {
- /*
- * i830M errata: the display plane must be enabled
- * to allow writes to the other bits in the plane
- * control register.
- */
- tmp = INREG(DSPACNTR);
- if ((tmp & DISPPLANE_PLANE_ENABLE) != DISPPLANE_PLANE_ENABLE) {
- tmp |= DISPPLANE_PLANE_ENABLE;
- OUTREG(DSPACNTR, tmp);
- OUTREG(DSPACNTR,
- hw->disp_a_ctrl|DISPPLANE_PLANE_ENABLE);
- mdelay(1);
- }
- }
-
- OUTREG(DSPACNTR, hw->disp_a_ctrl & ~DISPPLANE_PLANE_ENABLE);
- OUTREG(DSPASTRIDE, hw->disp_a_stride);
- OUTREG(DSPABASE, hw->disp_a_base);
-
- /* Enable plane */
- if (!blank) {
- tmp = INREG(DSPACNTR);
- tmp |= DISPPLANE_PLANE_ENABLE;
- OUTREG(DSPACNTR, tmp);
- OUTREG(DSPABASE, hw->disp_a_base);
- }
-
- return 0;
-}
-
-/* forward declarations */
-static void refresh_ring(struct intelfb_info *dinfo);
-static void reset_state(struct intelfb_info *dinfo);
-static void do_flush(struct intelfb_info *dinfo);
-
-static u32 get_ring_space(struct intelfb_info *dinfo)
-{
- u32 ring_space;
-
- if (dinfo->ring_tail >= dinfo->ring_head)
- ring_space = dinfo->ring.size -
- (dinfo->ring_tail - dinfo->ring_head);
- else
- ring_space = dinfo->ring_head - dinfo->ring_tail;
-
- if (ring_space > RING_MIN_FREE)
- ring_space -= RING_MIN_FREE;
- else
- ring_space = 0;
-
- return ring_space;
-}
-
-static int wait_ring(struct intelfb_info *dinfo, int n)
-{
- int i = 0;
- unsigned long end;
- u32 last_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK;
-
-#if VERBOSE > 0
- DBG_MSG("wait_ring: %d\n", n);
-#endif
-
- end = jiffies + (HZ * 3);
- while (dinfo->ring_space < n) {
- dinfo->ring_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK;
- dinfo->ring_space = get_ring_space(dinfo);
-
- if (dinfo->ring_head != last_head) {
- end = jiffies + (HZ * 3);
- last_head = dinfo->ring_head;
- }
- i++;
- if (time_before(end, jiffies)) {
- if (!i) {
- /* Try again */
- reset_state(dinfo);
- refresh_ring(dinfo);
- do_flush(dinfo);
- end = jiffies + (HZ * 3);
- i = 1;
- } else {
- WRN_MSG("ring buffer : space: %d wanted %d\n",
- dinfo->ring_space, n);
- WRN_MSG("lockup - turning off hardware "
- "acceleration\n");
- dinfo->ring_lockup = 1;
- break;
- }
- }
- udelay(1);
- }
- return i;
-}
-
-static void do_flush(struct intelfb_info *dinfo)
-{
- START_RING(2);
- OUT_RING(MI_FLUSH | MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE);
- OUT_RING(MI_NOOP);
- ADVANCE_RING();
-}
-
-void intelfbhw_do_sync(struct intelfb_info *dinfo)
-{
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_do_sync\n");
-#endif
-
- if (!dinfo->accel)
- return;
-
- /*
- * Send a flush, then wait until the ring is empty. This is what
- * the XFree86 driver does, and actually it doesn't seem a lot worse
- * than the recommended method (both have problems).
- */
- do_flush(dinfo);
- wait_ring(dinfo, dinfo->ring.size - RING_MIN_FREE);
- dinfo->ring_space = dinfo->ring.size - RING_MIN_FREE;
-}
-
-static void refresh_ring(struct intelfb_info *dinfo)
-{
-#if VERBOSE > 0
- DBG_MSG("refresh_ring\n");
-#endif
-
- dinfo->ring_head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK;
- dinfo->ring_tail = INREG(PRI_RING_TAIL) & RING_TAIL_MASK;
- dinfo->ring_space = get_ring_space(dinfo);
-}
-
-static void reset_state(struct intelfb_info *dinfo)
-{
- int i;
- u32 tmp;
-
-#if VERBOSE > 0
- DBG_MSG("reset_state\n");
-#endif
-
- for (i = 0; i < FENCE_NUM; i++)
- OUTREG(FENCE + (i << 2), 0);
-
- /* Flush the ring buffer if it's enabled. */
- tmp = INREG(PRI_RING_LENGTH);
- if (tmp & RING_ENABLE) {
-#if VERBOSE > 0
- DBG_MSG("reset_state: ring was enabled\n");
-#endif
- refresh_ring(dinfo);
- intelfbhw_do_sync(dinfo);
- DO_RING_IDLE();
- }
-
- OUTREG(PRI_RING_LENGTH, 0);
- OUTREG(PRI_RING_HEAD, 0);
- OUTREG(PRI_RING_TAIL, 0);
- OUTREG(PRI_RING_START, 0);
-}
-
-/* Stop the 2D engine, and turn off the ring buffer. */
-void intelfbhw_2d_stop(struct intelfb_info *dinfo)
-{
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_2d_stop: accel: %d, ring_active: %d\n",
- dinfo->accel, dinfo->ring_active);
-#endif
-
- if (!dinfo->accel)
- return;
-
- dinfo->ring_active = 0;
- reset_state(dinfo);
-}
-
-/*
- * Enable the ring buffer, and initialise the 2D engine.
- * It is assumed that the graphics engine has been stopped by previously
- * calling intelfb_2d_stop().
- */
-void intelfbhw_2d_start(struct intelfb_info *dinfo)
-{
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_2d_start: accel: %d, ring_active: %d\n",
- dinfo->accel, dinfo->ring_active);
-#endif
-
- if (!dinfo->accel)
- return;
-
- /* Initialise the primary ring buffer. */
- OUTREG(PRI_RING_LENGTH, 0);
- OUTREG(PRI_RING_TAIL, 0);
- OUTREG(PRI_RING_HEAD, 0);
-
- OUTREG(PRI_RING_START, dinfo->ring.physical & RING_START_MASK);
- OUTREG(PRI_RING_LENGTH,
- ((dinfo->ring.size - GTT_PAGE_SIZE) & RING_LENGTH_MASK) |
- RING_NO_REPORT | RING_ENABLE);
- refresh_ring(dinfo);
- dinfo->ring_active = 1;
-}
-
-/* 2D fillrect (solid fill or invert) */
-void intelfbhw_do_fillrect(struct intelfb_info *dinfo, u32 x, u32 y, u32 w,
- u32 h, u32 color, u32 pitch, u32 bpp, u32 rop)
-{
- u32 br00, br09, br13, br14, br16;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_do_fillrect: (%d,%d) %dx%d, c 0x%06x, p %d bpp %d, "
- "rop 0x%02x\n", x, y, w, h, color, pitch, bpp, rop);
-#endif
-
- br00 = COLOR_BLT_CMD;
- br09 = dinfo->fb_start + (y * pitch + x * (bpp / 8));
- br13 = (rop << ROP_SHIFT) | pitch;
- br14 = (h << HEIGHT_SHIFT) | ((w * (bpp / 8)) << WIDTH_SHIFT);
- br16 = color;
-
- switch (bpp) {
- case 8:
- br13 |= COLOR_DEPTH_8;
- break;
- case 16:
- br13 |= COLOR_DEPTH_16;
- break;
- case 32:
- br13 |= COLOR_DEPTH_32;
- br00 |= WRITE_ALPHA | WRITE_RGB;
- break;
- }
-
- START_RING(6);
- OUT_RING(br00);
- OUT_RING(br13);
- OUT_RING(br14);
- OUT_RING(br09);
- OUT_RING(br16);
- OUT_RING(MI_NOOP);
- ADVANCE_RING();
-
-#if VERBOSE > 0
- DBG_MSG("ring = 0x%08x, 0x%08x (%d)\n", dinfo->ring_head,
- dinfo->ring_tail, dinfo->ring_space);
-#endif
-}
-
-void
-intelfbhw_do_bitblt(struct intelfb_info *dinfo, u32 curx, u32 cury,
- u32 dstx, u32 dsty, u32 w, u32 h, u32 pitch, u32 bpp)
-{
- u32 br00, br09, br11, br12, br13, br22, br23, br26;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_do_bitblt: (%d,%d)->(%d,%d) %dx%d, p %d bpp %d\n",
- curx, cury, dstx, dsty, w, h, pitch, bpp);
-#endif
-
- br00 = XY_SRC_COPY_BLT_CMD;
- br09 = dinfo->fb_start;
- br11 = (pitch << PITCH_SHIFT);
- br12 = dinfo->fb_start;
- br13 = (SRC_ROP_GXCOPY << ROP_SHIFT) | (pitch << PITCH_SHIFT);
- br22 = (dstx << WIDTH_SHIFT) | (dsty << HEIGHT_SHIFT);
- br23 = ((dstx + w) << WIDTH_SHIFT) |
- ((dsty + h) << HEIGHT_SHIFT);
- br26 = (curx << WIDTH_SHIFT) | (cury << HEIGHT_SHIFT);
-
- switch (bpp) {
- case 8:
- br13 |= COLOR_DEPTH_8;
- break;
- case 16:
- br13 |= COLOR_DEPTH_16;
- break;
- case 32:
- br13 |= COLOR_DEPTH_32;
- br00 |= WRITE_ALPHA | WRITE_RGB;
- break;
- }
-
- START_RING(8);
- OUT_RING(br00);
- OUT_RING(br13);
- OUT_RING(br22);
- OUT_RING(br23);
- OUT_RING(br09);
- OUT_RING(br26);
- OUT_RING(br11);
- OUT_RING(br12);
- ADVANCE_RING();
-}
-
-int intelfbhw_do_drawglyph(struct intelfb_info *dinfo, u32 fg, u32 bg, u32 w,
- u32 h, const u8* cdat, u32 x, u32 y, u32 pitch,
- u32 bpp)
-{
- int nbytes, ndwords, pad, tmp;
- u32 br00, br09, br13, br18, br19, br22, br23;
- int dat, ix, iy, iw;
- int i, j;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_do_drawglyph: (%d,%d) %dx%d\n", x, y, w, h);
-#endif
-
- /* size in bytes of a padded scanline */
- nbytes = ROUND_UP_TO(w, 16) / 8;
-
- /* Total bytes of padded scanline data to write out. */
- nbytes = nbytes * h;
-
- /*
- * Check if the glyph data exceeds the immediate mode limit.
- * It would take a large font (1K pixels) to hit this limit.
- */
- if (nbytes > MAX_MONO_IMM_SIZE)
- return 0;
-
- /* Src data is packaged a dword (32-bit) at a time. */
- ndwords = ROUND_UP_TO(nbytes, 4) / 4;
-
- /*
- * Ring has to be padded to a quad word. But because the command starts
- with 7 bytes, pad only if there is an even number of ndwords
- */
- pad = !(ndwords % 2);
-
- tmp = (XY_MONO_SRC_IMM_BLT_CMD & DW_LENGTH_MASK) + ndwords;
- br00 = (XY_MONO_SRC_IMM_BLT_CMD & ~DW_LENGTH_MASK) | tmp;
- br09 = dinfo->fb_start;
- br13 = (SRC_ROP_GXCOPY << ROP_SHIFT) | (pitch << PITCH_SHIFT);
- br18 = bg;
- br19 = fg;
- br22 = (x << WIDTH_SHIFT) | (y << HEIGHT_SHIFT);
- br23 = ((x + w) << WIDTH_SHIFT) | ((y + h) << HEIGHT_SHIFT);
-
- switch (bpp) {
- case 8:
- br13 |= COLOR_DEPTH_8;
- break;
- case 16:
- br13 |= COLOR_DEPTH_16;
- break;
- case 32:
- br13 |= COLOR_DEPTH_32;
- br00 |= WRITE_ALPHA | WRITE_RGB;
- break;
- }
-
- START_RING(8 + ndwords);
- OUT_RING(br00);
- OUT_RING(br13);
- OUT_RING(br22);
- OUT_RING(br23);
- OUT_RING(br09);
- OUT_RING(br18);
- OUT_RING(br19);
- ix = iy = 0;
- iw = ROUND_UP_TO(w, 8) / 8;
- while (ndwords--) {
- dat = 0;
- for (j = 0; j < 2; ++j) {
- for (i = 0; i < 2; ++i) {
- if (ix != iw || i == 0)
- dat |= cdat[iy*iw + ix++] << (i+j*2)*8;
- }
- if (ix == iw && iy != (h-1)) {
- ix = 0;
- ++iy;
- }
- }
- OUT_RING(dat);
- }
- if (pad)
- OUT_RING(MI_NOOP);
- ADVANCE_RING();
-
- return 1;
-}
-
-/* HW cursor functions. */
-void intelfbhw_cursor_init(struct intelfb_info *dinfo)
-{
- u32 tmp;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_cursor_init\n");
-#endif
-
- if (dinfo->mobile || IS_I9XX(dinfo)) {
- if (!dinfo->cursor.physical)
- return;
- tmp = INREG(CURSOR_A_CONTROL);
- tmp &= ~(CURSOR_MODE_MASK | CURSOR_MOBILE_GAMMA_ENABLE |
- CURSOR_MEM_TYPE_LOCAL |
- (1 << CURSOR_PIPE_SELECT_SHIFT));
- tmp |= CURSOR_MODE_DISABLE;
- OUTREG(CURSOR_A_CONTROL, tmp);
- OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical);
- } else {
- tmp = INREG(CURSOR_CONTROL);
- tmp &= ~(CURSOR_FORMAT_MASK | CURSOR_GAMMA_ENABLE |
- CURSOR_ENABLE | CURSOR_STRIDE_MASK);
- tmp |= CURSOR_FORMAT_3C;
- OUTREG(CURSOR_CONTROL, tmp);
- OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.offset << 12);
- tmp = (64 << CURSOR_SIZE_H_SHIFT) |
- (64 << CURSOR_SIZE_V_SHIFT);
- OUTREG(CURSOR_SIZE, tmp);
- }
-}
-
-void intelfbhw_cursor_hide(struct intelfb_info *dinfo)
-{
- u32 tmp;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_cursor_hide\n");
-#endif
-
- dinfo->cursor_on = 0;
- if (dinfo->mobile || IS_I9XX(dinfo)) {
- if (!dinfo->cursor.physical)
- return;
- tmp = INREG(CURSOR_A_CONTROL);
- tmp &= ~CURSOR_MODE_MASK;
- tmp |= CURSOR_MODE_DISABLE;
- OUTREG(CURSOR_A_CONTROL, tmp);
- /* Flush changes */
- OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical);
- } else {
- tmp = INREG(CURSOR_CONTROL);
- tmp &= ~CURSOR_ENABLE;
- OUTREG(CURSOR_CONTROL, tmp);
- }
-}
-
-void intelfbhw_cursor_show(struct intelfb_info *dinfo)
-{
- u32 tmp;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_cursor_show\n");
-#endif
-
- dinfo->cursor_on = 1;
-
- if (dinfo->cursor_blanked)
- return;
-
- if (dinfo->mobile || IS_I9XX(dinfo)) {
- if (!dinfo->cursor.physical)
- return;
- tmp = INREG(CURSOR_A_CONTROL);
- tmp &= ~CURSOR_MODE_MASK;
- tmp |= CURSOR_MODE_64_4C_AX;
- OUTREG(CURSOR_A_CONTROL, tmp);
- /* Flush changes */
- OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical);
- } else {
- tmp = INREG(CURSOR_CONTROL);
- tmp |= CURSOR_ENABLE;
- OUTREG(CURSOR_CONTROL, tmp);
- }
-}
-
-void intelfbhw_cursor_setpos(struct intelfb_info *dinfo, int x, int y)
-{
- u32 tmp;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_cursor_setpos: (%d, %d)\n", x, y);
-#endif
-
- /*
- * Sets the position. The coordinates are assumed to already
- * have any offset adjusted. Assume that the cursor is never
- * completely off-screen, and that x, y are always >= 0.
- */
-
- tmp = ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) |
- ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
- OUTREG(CURSOR_A_POSITION, tmp);
-
- if (IS_I9XX(dinfo))
- OUTREG(CURSOR_A_BASEADDR, dinfo->cursor.physical);
-}
-
-void intelfbhw_cursor_setcolor(struct intelfb_info *dinfo, u32 bg, u32 fg)
-{
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_cursor_setcolor\n");
-#endif
-
- OUTREG(CURSOR_A_PALETTE0, bg & CURSOR_PALETTE_MASK);
- OUTREG(CURSOR_A_PALETTE1, fg & CURSOR_PALETTE_MASK);
- OUTREG(CURSOR_A_PALETTE2, fg & CURSOR_PALETTE_MASK);
- OUTREG(CURSOR_A_PALETTE3, bg & CURSOR_PALETTE_MASK);
-}
-
-void intelfbhw_cursor_load(struct intelfb_info *dinfo, int width, int height,
- u8 *data)
-{
- u8 __iomem *addr = (u8 __iomem *)dinfo->cursor.virtual;
- int i, j, w = width / 8;
- int mod = width % 8, t_mask, d_mask;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_cursor_load\n");
-#endif
-
- if (!dinfo->cursor.virtual)
- return;
-
- t_mask = 0xff >> mod;
- d_mask = ~(0xff >> mod);
- for (i = height; i--; ) {
- for (j = 0; j < w; j++) {
- writeb(0x00, addr + j);
- writeb(*(data++), addr + j+8);
- }
- if (mod) {
- writeb(t_mask, addr + j);
- writeb(*(data++) & d_mask, addr + j+8);
- }
- addr += 16;
- }
-}
-
-void intelfbhw_cursor_reset(struct intelfb_info *dinfo)
-{
- u8 __iomem *addr = (u8 __iomem *)dinfo->cursor.virtual;
- int i, j;
-
-#if VERBOSE > 0
- DBG_MSG("intelfbhw_cursor_reset\n");
-#endif
-
- if (!dinfo->cursor.virtual)
- return;
-
- for (i = 64; i--; ) {
- for (j = 0; j < 8; j++) {
- writeb(0xff, addr + j+0);
- writeb(0x00, addr + j+8);
- }
- addr += 16;
- }
-}
-
-static irqreturn_t intelfbhw_irq(int irq, void *dev_id)
-{
- u16 tmp;
- struct intelfb_info *dinfo = dev_id;
-
- spin_lock(&dinfo->int_lock);
-
- tmp = INREG16(IIR);
- if (dinfo->info->var.vmode & FB_VMODE_INTERLACED)
- tmp &= PIPE_A_EVENT_INTERRUPT;
- else
- tmp &= VSYNC_PIPE_A_INTERRUPT; /* non-interlaced */
-
- if (tmp == 0) {
- spin_unlock(&dinfo->int_lock);
- return IRQ_RETVAL(0); /* not us */
- }
-
- /* clear status bits 0-15 ASAP and don't touch bits 16-31 */
- OUTREG(PIPEASTAT, INREG(PIPEASTAT));
-
- OUTREG16(IIR, tmp);
- if (dinfo->vsync.pan_display) {
- dinfo->vsync.pan_display = 0;
- OUTREG(DSPABASE, dinfo->vsync.pan_offset);
- }
-
- dinfo->vsync.count++;
- wake_up_interruptible(&dinfo->vsync.wait);
-
- spin_unlock(&dinfo->int_lock);
-
- return IRQ_RETVAL(1);
-}
-
-int intelfbhw_enable_irq(struct intelfb_info *dinfo)
-{
- u16 tmp;
- if (!test_and_set_bit(0, &dinfo->irq_flags)) {
- if (request_irq(dinfo->pdev->irq, intelfbhw_irq, IRQF_SHARED,
- "intelfb", dinfo)) {
- clear_bit(0, &dinfo->irq_flags);
- return -EINVAL;
- }
-
- spin_lock_irq(&dinfo->int_lock);
- OUTREG16(HWSTAM, 0xfffe); /* i830 DRM uses ffff */
- OUTREG16(IMR, 0);
- } else
- spin_lock_irq(&dinfo->int_lock);
-
- if (dinfo->info->var.vmode & FB_VMODE_INTERLACED)
- tmp = PIPE_A_EVENT_INTERRUPT;
- else
- tmp = VSYNC_PIPE_A_INTERRUPT; /* non-interlaced */
- if (tmp != INREG16(IER)) {
- DBG_MSG("changing IER to 0x%X\n", tmp);
- OUTREG16(IER, tmp);
- }
-
- spin_unlock_irq(&dinfo->int_lock);
- return 0;
-}
-
-void intelfbhw_disable_irq(struct intelfb_info *dinfo)
-{
- if (test_and_clear_bit(0, &dinfo->irq_flags)) {
- if (dinfo->vsync.pan_display) {
- dinfo->vsync.pan_display = 0;
- OUTREG(DSPABASE, dinfo->vsync.pan_offset);
- }
- spin_lock_irq(&dinfo->int_lock);
- OUTREG16(HWSTAM, 0xffff);
- OUTREG16(IMR, 0xffff);
- OUTREG16(IER, 0x0);
-
- OUTREG16(IIR, INREG16(IIR)); /* clear IRQ requests */
- spin_unlock_irq(&dinfo->int_lock);
-
- free_irq(dinfo->pdev->irq, dinfo);
- }
-}
-
-int intelfbhw_wait_for_vsync(struct intelfb_info *dinfo, u32 pipe)
-{
- struct intelfb_vsync *vsync;
- unsigned int count;
- int ret;
-
- switch (pipe) {
- case 0:
- vsync = &dinfo->vsync;
- break;
- default:
- return -ENODEV;
- }
-
- ret = intelfbhw_enable_irq(dinfo);
- if (ret)
- return ret;
-
- count = vsync->count;
- ret = wait_event_interruptible_timeout(vsync->wait,
- count != vsync->count, HZ / 10);
- if (ret < 0)
- return ret;
- if (ret == 0) {
- DBG_MSG("wait_for_vsync timed out!\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
diff --git a/drivers/video/fbdev/intelfb/intelfbhw.h b/drivers/video/fbdev/intelfb/intelfbhw.h
deleted file mode 100644
index 216ca20f2..000000000
--- a/drivers/video/fbdev/intelfb/intelfbhw.h
+++ /dev/null
@@ -1,609 +0,0 @@
-#ifndef _INTELFBHW_H
-#define _INTELFBHW_H
-
-/* $DHD: intelfb/intelfbhw.h,v 1.5 2003/06/27 15:06:25 dawes Exp $ */
-
-
-/*** HW-specific data ***/
-
-/* Information about the 852GM/855GM variants */
-#define INTEL_85X_CAPID 0x44
-#define INTEL_85X_VARIANT_MASK 0x7
-#define INTEL_85X_VARIANT_SHIFT 5
-#define INTEL_VAR_855GME 0x0
-#define INTEL_VAR_855GM 0x4
-#define INTEL_VAR_852GME 0x2
-#define INTEL_VAR_852GM 0x5
-
-/* Information about DVO/LVDS Ports */
-#define DVOA_PORT 0x1
-#define DVOB_PORT 0x2
-#define DVOC_PORT 0x4
-#define LVDS_PORT 0x8
-
-/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- */
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_ENABLED 0x4
-#define INTEL_GMCH_MEM_MASK 0x1
-#define INTEL_GMCH_MEM_64M 0x1
-#define INTEL_GMCH_MEM_128M 0
-
-#define INTEL_830_GMCH_GMS_MASK (0x7 << 4)
-#define INTEL_830_GMCH_GMS_DISABLED (0x0 << 4)
-#define INTEL_830_GMCH_GMS_LOCAL (0x1 << 4)
-#define INTEL_830_GMCH_GMS_STOLEN_512 (0x2 << 4)
-#define INTEL_830_GMCH_GMS_STOLEN_1024 (0x3 << 4)
-#define INTEL_830_GMCH_GMS_STOLEN_8192 (0x4 << 4)
-
-#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
-#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-
-#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-
-/* HW registers */
-
-/* Fence registers */
-#define FENCE 0x2000
-#define FENCE_NUM 8
-
-/* Primary ring buffer */
-#define PRI_RING_TAIL 0x2030
-#define RING_TAIL_MASK 0x001ffff8
-#define RING_INUSE 0x1
-
-#define PRI_RING_HEAD 0x2034
-#define RING_HEAD_WRAP_MASK 0x7ff
-#define RING_HEAD_WRAP_SHIFT 21
-#define RING_HEAD_MASK 0x001ffffc
-
-#define PRI_RING_START 0x2038
-#define RING_START_MASK 0xfffff000
-
-#define PRI_RING_LENGTH 0x203c
-#define RING_LENGTH_MASK 0x001ff000
-#define RING_REPORT_MASK (0x3 << 1)
-#define RING_NO_REPORT (0x0 << 1)
-#define RING_REPORT_64K (0x1 << 1)
-#define RING_REPORT_4K (0x2 << 1)
-#define RING_REPORT_128K (0x3 << 1)
-#define RING_ENABLE 0x1
-
-/*
- * Tail can't wrap to any closer than RING_MIN_FREE bytes of the head,
- * and the last RING_MIN_FREE bytes need to be padded with MI_NOOP
- */
-#define RING_MIN_FREE 64
-
-#define IPEHR 0x2088
-
-#define INSTDONE 0x2090
-#define PRI_RING_EMPTY 1
-
-#define HWSTAM 0x2098
-#define IER 0x20A0
-#define IIR 0x20A4
-#define IMR 0x20A8
-#define VSYNC_PIPE_A_INTERRUPT (1 << 7)
-#define PIPE_A_EVENT_INTERRUPT (1 << 6)
-#define VSYNC_PIPE_B_INTERRUPT (1 << 5)
-#define PIPE_B_EVENT_INTERRUPT (1 << 4)
-#define HOST_PORT_EVENT_INTERRUPT (1 << 3)
-#define CAPTURE_EVENT_INTERRUPT (1 << 2)
-#define USER_DEFINED_INTERRUPT (1 << 1)
-#define BREAKPOINT_INTERRUPT 1
-
-#define INSTPM 0x20c0
-#define SYNC_FLUSH_ENABLE (1 << 5)
-
-#define INSTPS 0x20c4
-
-#define MEM_MODE 0x20cc
-
-#define MASK_SHIFT 16
-
-#define FW_BLC_0 0x20d8
-#define FW_DISPA_WM_SHIFT 0
-#define FW_DISPA_WM_MASK 0x3f
-#define FW_DISPA_BL_SHIFT 8
-#define FW_DISPA_BL_MASK 0xf
-#define FW_DISPB_WM_SHIFT 16
-#define FW_DISPB_WM_MASK 0x1f
-#define FW_DISPB_BL_SHIFT 24
-#define FW_DISPB_BL_MASK 0x7
-
-#define FW_BLC_1 0x20dc
-#define FW_DISPC_WM_SHIFT 0
-#define FW_DISPC_WM_MASK 0x1f
-#define FW_DISPC_BL_SHIFT 8
-#define FW_DISPC_BL_MASK 0x7
-
-#define GPIOA 0x5010
-#define GPIOB 0x5014
-#define GPIOC 0x5018 /* this may be external DDC on i830 */
-#define GPIOD 0x501C /* this is DVO DDC */
-#define GPIOE 0x5020 /* this is DVO i2C */
-#define GPIOF 0x5024
-
-/* PLL registers */
-#define VGA0_DIVISOR 0x06000
-#define VGA1_DIVISOR 0x06004
-#define VGAPD 0x06010
-#define VGAPD_0_P1_SHIFT 0
-#define VGAPD_0_P1_FORCE_DIV2 (1 << 5)
-#define VGAPD_0_P2_SHIFT 7
-#define VGAPD_1_P1_SHIFT 8
-#define VGAPD_1_P1_FORCE_DIV2 (1 << 13)
-#define VGAPD_1_P2_SHIFT 15
-
-#define DPLL_A 0x06014
-#define DPLL_B 0x06018
-#define DPLL_VCO_ENABLE (1 << 31)
-#define DPLL_2X_CLOCK_ENABLE (1 << 30)
-#define DPLL_SYNCLOCK_ENABLE (1 << 29)
-#define DPLL_VGA_MODE_DISABLE (1 << 28)
-#define DPLL_P2_MASK 1
-#define DPLL_P2_SHIFT 23
-#define DPLL_I9XX_P2_SHIFT 24
-#define DPLL_P1_FORCE_DIV2 (1 << 21)
-#define DPLL_P1_MASK 0x1f
-#define DPLL_P1_SHIFT 16
-#define DPLL_REFERENCE_SELECT_MASK (0x3 << 13)
-#define DPLL_REFERENCE_DEFAULT (0x0 << 13)
-#define DPLL_REFERENCE_TVCLK (0x2 << 13)
-#define DPLL_RATE_SELECT_MASK (1 << 8)
-#define DPLL_RATE_SELECT_FP0 (0 << 8)
-#define DPLL_RATE_SELECT_FP1 (1 << 8)
-
-#define FPA0 0x06040
-#define FPA1 0x06044
-#define FPB0 0x06048
-#define FPB1 0x0604c
-#define FP_DIVISOR_MASK 0x3f
-#define FP_N_DIVISOR_SHIFT 16
-#define FP_M1_DIVISOR_SHIFT 8
-#define FP_M2_DIVISOR_SHIFT 0
-
-/* PLL parameters (these are for 852GM/855GM/865G, check earlier chips). */
-/* Clock values are in units of kHz */
-#define PLL_REFCLK 48000
-#define MIN_CLOCK 25000
-#define MAX_CLOCK 350000
-
-/* Two pipes */
-#define PIPE_A 0
-#define PIPE_B 1
-#define PIPE_MASK 1
-
-/* palette registers */
-#define PALETTE_A 0x0a000
-#define PALETTE_B 0x0a800
-#ifndef PALETTE_8_ENTRIES
-#define PALETTE_8_ENTRIES 256
-#endif
-#define PALETTE_8_SIZE (PALETTE_8_ENTRIES * 4)
-#define PALETTE_10_ENTRIES 128
-#define PALETTE_10_SIZE (PALETTE_10_ENTRIES * 8)
-#define PALETTE_8_MASK 0xff
-#define PALETTE_8_RED_SHIFT 16
-#define PALETTE_8_GREEN_SHIFT 8
-#define PALETTE_8_BLUE_SHIFT 0
-
-/* CRTC registers */
-#define HTOTAL_A 0x60000
-#define HBLANK_A 0x60004
-#define HSYNC_A 0x60008
-#define VTOTAL_A 0x6000c
-#define VBLANK_A 0x60010
-#define VSYNC_A 0x60014
-#define SRC_SIZE_A 0x6001c
-#define BCLRPAT_A 0x60020
-
-#define HTOTAL_B 0x61000
-#define HBLANK_B 0x61004
-#define HSYNC_B 0x61008
-#define VTOTAL_B 0x6100c
-#define VBLANK_B 0x61010
-#define VSYNC_B 0x61014
-#define SRC_SIZE_B 0x6101c
-#define BCLRPAT_B 0x61020
-
-#define HTOTAL_MASK 0xfff
-#define HTOTAL_SHIFT 16
-#define HACTIVE_MASK 0x7ff
-#define HACTIVE_SHIFT 0
-#define HBLANKEND_MASK 0xfff
-#define HBLANKEND_SHIFT 16
-#define HBLANKSTART_MASK 0xfff
-#define HBLANKSTART_SHIFT 0
-#define HSYNCEND_MASK 0xfff
-#define HSYNCEND_SHIFT 16
-#define HSYNCSTART_MASK 0xfff
-#define HSYNCSTART_SHIFT 0
-#define VTOTAL_MASK 0xfff
-#define VTOTAL_SHIFT 16
-#define VACTIVE_MASK 0x7ff
-#define VACTIVE_SHIFT 0
-#define VBLANKEND_MASK 0xfff
-#define VBLANKEND_SHIFT 16
-#define VBLANKSTART_MASK 0xfff
-#define VBLANKSTART_SHIFT 0
-#define VSYNCEND_MASK 0xfff
-#define VSYNCEND_SHIFT 16
-#define VSYNCSTART_MASK 0xfff
-#define VSYNCSTART_SHIFT 0
-#define SRC_SIZE_HORIZ_MASK 0x7ff
-#define SRC_SIZE_HORIZ_SHIFT 16
-#define SRC_SIZE_VERT_MASK 0x7ff
-#define SRC_SIZE_VERT_SHIFT 0
-
-#define ADPA 0x61100
-#define ADPA_DAC_ENABLE (1 << 31)
-#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SELECT_SHIFT 30
-#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
-#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_DPMS_CONTROL_MASK (0x3 << 10)
-#define ADPA_DPMS_D0 (0x0 << 10)
-#define ADPA_DPMS_D2 (0x1 << 10)
-#define ADPA_DPMS_D1 (0x2 << 10)
-#define ADPA_DPMS_D3 (0x3 << 10)
-#define ADPA_VSYNC_ACTIVE_SHIFT 4
-#define ADPA_HSYNC_ACTIVE_SHIFT 3
-#define ADPA_SYNC_ACTIVE_MASK 1
-#define ADPA_SYNC_ACTIVE_HIGH 1
-#define ADPA_SYNC_ACTIVE_LOW 0
-
-#define DVOA 0x61120
-#define DVOB 0x61140
-#define DVOC 0x61160
-#define LVDS 0x61180
-#define PORT_ENABLE (1 << 31)
-#define PORT_PIPE_SELECT_SHIFT 30
-#define PORT_TV_FLAGS_MASK 0xFF
-#define PORT_TV_FLAGS 0xC4 /* ripped from my BIOS
- to understand and correct */
-
-#define DVOA_SRCDIM 0x61124
-#define DVOB_SRCDIM 0x61144
-#define DVOC_SRCDIM 0x61164
-
-#define PIPEA_DSL 0x70000
-#define PIPEB_DSL 0x71000
-#define PIPEACONF 0x70008
-#define PIPEBCONF 0x71008
-#define PIPEASTAT 0x70024 /* bits 0-15 are "write 1 to clear" */
-#define PIPEBSTAT 0x71024
-
-#define PIPECONF_ENABLE (1 << 31)
-#define PIPECONF_DISABLE 0
-#define PIPECONF_DOUBLE_WIDE (1 << 30)
-#define PIPECONF_SINGLE_WIDE 0
-#define PIPECONF_LOCKED (1 << 25)
-#define PIPECONF_UNLOCKED 0
-#define PIPECONF_GAMMA (1 << 24)
-#define PIPECONF_PALETTE 0
-#define PIPECONF_PROGRESSIVE (0 << 21)
-#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
-#define PIPECONF_INTERLACE_MASK (7 << 21)
-
-/* enable bits, write 1 to enable */
-#define PIPESTAT_FIFO_UNDERRUN (1 << 31)
-#define PIPESTAT_CRC_ERROR_EN (1 << 29)
-#define PIPESTAT_CRC_DONE_EN (1 << 28)
-#define PIPESTAT_HOTPLUG_EN (1 << 26)
-#define PIPESTAT_VERTICAL_SYNC_EN (1 << 25)
-#define PIPESTAT_DISPLINE_COMP_EN (1 << 24)
-#define PIPESTAT_FLD_EVT_ODD_EN (1 << 21)
-#define PIPESTAT_FLD_EVT_EVEN_EN (1 << 20)
-#define PIPESTAT_TV_HOTPLUG_EN (1 << 18)
-#define PIPESTAT_VBLANK_EN (1 << 17)
-#define PIPESTAT_OVL_UPDATE_EN (1 << 16)
-/* status bits, write 1 to clear */
-#define PIPESTAT_HOTPLUG_STATE (1 << 15)
-#define PIPESTAT_CRC_ERROR (1 << 13)
-#define PIPESTAT_CRC_DONE (1 << 12)
-#define PIPESTAT_HOTPLUG (1 << 10)
-#define PIPESTAT_VSYNC (1 << 9)
-#define PIPESTAT_DISPLINE_COMP (1 << 8)
-#define PIPESTAT_FLD_EVT_ODD (1 << 5)
-#define PIPESTAT_FLD_EVT_EVEN (1 << 4)
-#define PIPESTAT_TV_HOTPLUG (1 << 2)
-#define PIPESTAT_VBLANK (1 << 1)
-#define PIPESTAT_OVL_UPDATE (1 << 0)
-
-#define DISPARB 0x70030
-#define DISPARB_AEND_MASK 0x1ff
-#define DISPARB_AEND_SHIFT 0
-#define DISPARB_BEND_MASK 0x3ff
-#define DISPARB_BEND_SHIFT 9
-
-/* Desktop HW cursor */
-#define CURSOR_CONTROL 0x70080
-#define CURSOR_ENABLE (1 << 31)
-#define CURSOR_GAMMA_ENABLE (1 << 30)
-#define CURSOR_STRIDE_MASK (0x3 << 28)
-#define CURSOR_STRIDE_256 (0x0 << 28)
-#define CURSOR_STRIDE_512 (0x1 << 28)
-#define CURSOR_STRIDE_1K (0x2 << 28)
-#define CURSOR_STRIDE_2K (0x3 << 28)
-#define CURSOR_FORMAT_MASK (0x7 << 24)
-#define CURSOR_FORMAT_2C (0x0 << 24)
-#define CURSOR_FORMAT_3C (0x1 << 24)
-#define CURSOR_FORMAT_4C (0x2 << 24)
-#define CURSOR_FORMAT_ARGB (0x4 << 24)
-#define CURSOR_FORMAT_XRGB (0x5 << 24)
-
-/* Mobile HW cursor (and i810) */
-#define CURSOR_A_CONTROL CURSOR_CONTROL
-#define CURSOR_B_CONTROL 0x700c0
-#define CURSOR_MODE_MASK 0x27
-#define CURSOR_MODE_DISABLE 0
-#define CURSOR_MODE_64_3C 0x04
-#define CURSOR_MODE_64_4C_AX 0x05
-#define CURSOR_MODE_64_4C 0x06
-#define CURSOR_MODE_64_32B_AX 0x07
-#define CURSOR_MODE_64_ARGB_AX 0x27
-#define CURSOR_PIPE_SELECT_SHIFT 28
-#define CURSOR_MOBILE_GAMMA_ENABLE (1 << 26)
-#define CURSOR_MEM_TYPE_LOCAL (1 << 25)
-
-/* All platforms (desktop has no pipe B) */
-#define CURSOR_A_BASEADDR 0x70084
-#define CURSOR_B_BASEADDR 0x700c4
-#define CURSOR_BASE_MASK 0xffffff00
-
-#define CURSOR_A_POSITION 0x70088
-#define CURSOR_B_POSITION 0x700c8
-#define CURSOR_POS_SIGN (1 << 15)
-#define CURSOR_POS_MASK 0x7ff
-#define CURSOR_X_SHIFT 0
-#define CURSOR_Y_SHIFT 16
-
-#define CURSOR_A_PALETTE0 0x70090
-#define CURSOR_A_PALETTE1 0x70094
-#define CURSOR_A_PALETTE2 0x70098
-#define CURSOR_A_PALETTE3 0x7009c
-#define CURSOR_B_PALETTE0 0x700d0
-#define CURSOR_B_PALETTE1 0x700d4
-#define CURSOR_B_PALETTE2 0x700d8
-#define CURSOR_B_PALETTE3 0x700dc
-#define CURSOR_COLOR_MASK 0xff
-#define CURSOR_RED_SHIFT 16
-#define CURSOR_GREEN_SHIFT 8
-#define CURSOR_BLUE_SHIFT 0
-#define CURSOR_PALETTE_MASK 0xffffff
-
-/* Desktop only */
-#define CURSOR_SIZE 0x700a0
-#define CURSOR_SIZE_MASK 0x3ff
-#define CURSOR_SIZE_H_SHIFT 0
-#define CURSOR_SIZE_V_SHIFT 12
-
-#define DSPACNTR 0x70180
-#define DSPBCNTR 0x71180
-#define DISPPLANE_PLANE_ENABLE (1 << 31)
-#define DISPPLANE_PLANE_DISABLE 0
-#define DISPPLANE_GAMMA_ENABLE (1<<30)
-#define DISPPLANE_GAMMA_DISABLE 0
-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_15_16BPP (0x4<<26)
-#define DISPPLANE_16BPP (0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-#define DISPPLANE_32BPP (0x7<<26)
-#define DISPPLANE_STEREO_ENABLE (1<<25)
-#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_SEL_PIPE_SHIFT 24
-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
-#define DISPPLANE_SRC_KEY_DISABLE 0
-#define DISPPLANE_LINE_DOUBLE (1<<20)
-#define DISPPLANE_NO_LINE_DOUBLE 0
-#define DISPPLANE_STEREO_POLARITY_FIRST 0
-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-/* plane B only */
-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
-#define DISPPLANE_ALPHA_TRANS_DISABLE 0
-#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
-#define DISPPLANE_SPRITE_ABOVE_OVERLAY 1
-
-#define DSPABASE 0x70184
-#define DSPASTRIDE 0x70188
-
-#define DSPBBASE 0x71184
-#define DSPBSTRIDE 0x71188
-
-#define VGACNTRL 0x71400
-#define VGA_DISABLE (1 << 31)
-#define VGA_ENABLE 0
-#define VGA_PIPE_SELECT_SHIFT 29
-#define VGA_PALETTE_READ_SELECT 23
-#define VGA_PALETTE_A_WRITE_DISABLE (1 << 22)
-#define VGA_PALETTE_B_WRITE_DISABLE (1 << 21)
-#define VGA_LEGACY_PALETTE (1 << 20)
-#define VGA_6BIT_DAC 0
-#define VGA_8BIT_DAC (1 << 20)
-
-#define ADD_ID 0x71408
-#define ADD_ID_MASK 0xff
-
-/* BIOS scratch area registers (830M and 845G). */
-#define SWF0 0x71410
-#define SWF1 0x71414
-#define SWF2 0x71418
-#define SWF3 0x7141c
-#define SWF4 0x71420
-#define SWF5 0x71424
-#define SWF6 0x71428
-
-/* BIOS scratch area registers (852GM, 855GM, 865G). */
-#define SWF00 0x70410
-#define SWF01 0x70414
-#define SWF02 0x70418
-#define SWF03 0x7041c
-#define SWF04 0x70420
-#define SWF05 0x70424
-#define SWF06 0x70428
-
-#define SWF10 SWF0
-#define SWF11 SWF1
-#define SWF12 SWF2
-#define SWF13 SWF3
-#define SWF14 SWF4
-#define SWF15 SWF5
-#define SWF16 SWF6
-
-#define SWF30 0x72414
-#define SWF31 0x72418
-#define SWF32 0x7241c
-
-/* Memory Commands */
-#define MI_NOOP (0x00 << 23)
-#define MI_NOOP_WRITE_ID (1 << 22)
-#define MI_NOOP_ID_MASK ((1 << 22) - 1)
-
-#define MI_FLUSH (0x04 << 23)
-#define MI_WRITE_DIRTY_STATE (1 << 4)
-#define MI_END_SCENE (1 << 3)
-#define MI_INHIBIT_RENDER_CACHE_FLUSH (1 << 2)
-#define MI_INVALIDATE_MAP_CACHE (1 << 0)
-
-#define MI_STORE_DWORD_IMM ((0x20 << 23) | 1)
-
-/* 2D Commands */
-#define COLOR_BLT_CMD ((2 << 29) | (0x40 << 22) | 3)
-#define XY_COLOR_BLT_CMD ((2 << 29) | (0x50 << 22) | 4)
-#define XY_SETUP_CLIP_BLT_CMD ((2 << 29) | (0x03 << 22) | 1)
-#define XY_SRC_COPY_BLT_CMD ((2 << 29) | (0x53 << 22) | 6)
-#define SRC_COPY_BLT_CMD ((2 << 29) | (0x43 << 22) | 4)
-#define XY_MONO_PAT_BLT_CMD ((2 << 29) | (0x52 << 22) | 7)
-#define XY_MONO_SRC_BLT_CMD ((2 << 29) | (0x54 << 22) | 6)
-#define XY_MONO_SRC_IMM_BLT_CMD ((2 << 29) | (0x71 << 22) | 5)
-#define TXT_IMM_BLT_CMD ((2 << 29) | (0x30 << 22) | 2)
-#define SETUP_BLT_CMD ((2 << 29) | (0x00 << 22) | 6)
-
-#define DW_LENGTH_MASK 0xff
-
-#define WRITE_ALPHA (1 << 21)
-#define WRITE_RGB (1 << 20)
-#define VERT_SEED (3 << 8)
-#define HORIZ_SEED (3 << 12)
-
-#define COLOR_DEPTH_8 (0 << 24)
-#define COLOR_DEPTH_16 (1 << 24)
-#define COLOR_DEPTH_32 (3 << 24)
-
-#define SRC_ROP_GXCOPY 0xcc
-#define SRC_ROP_GXXOR 0x66
-
-#define PAT_ROP_GXCOPY 0xf0
-#define PAT_ROP_GXXOR 0x5a
-
-#define PITCH_SHIFT 0
-#define ROP_SHIFT 16
-#define WIDTH_SHIFT 0
-#define HEIGHT_SHIFT 16
-
-/* in bytes */
-#define MAX_MONO_IMM_SIZE 128
-
-
-/*** Macros ***/
-
-/* I/O macros */
-#define INREG8(addr) readb((u8 __iomem *)(dinfo->mmio_base + (addr)))
-#define INREG16(addr) readw((u16 __iomem *)(dinfo->mmio_base + (addr)))
-#define INREG(addr) readl((u32 __iomem *)(dinfo->mmio_base + (addr)))
-#define OUTREG8(addr, val) writeb((val),(u8 __iomem *)(dinfo->mmio_base + \
- (addr)))
-#define OUTREG16(addr, val) writew((val),(u16 __iomem *)(dinfo->mmio_base + \
- (addr)))
-#define OUTREG(addr, val) writel((val),(u32 __iomem *)(dinfo->mmio_base + \
- (addr)))
-
-/* Ring buffer macros */
-#define OUT_RING(n) do { \
- writel((n), (u32 __iomem *)(dinfo->ring.virtual + dinfo->ring_tail));\
- dinfo->ring_tail += 4; \
- dinfo->ring_tail &= dinfo->ring_tail_mask; \
-} while (0)
-
-#define START_RING(n) do { \
- if (dinfo->ring_space < (n) * 4) \
- wait_ring(dinfo,(n) * 4); \
- dinfo->ring_space -= (n) * 4; \
-} while (0)
-
-#define ADVANCE_RING() do { \
- OUTREG(PRI_RING_TAIL, dinfo->ring_tail); \
-} while (0)
-
-#define DO_RING_IDLE() do { \
- u32 head, tail; \
- do { \
- head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK; \
- tail = INREG(PRI_RING_TAIL) & RING_TAIL_MASK; \
- udelay(10); \
- } while (head != tail); \
-} while (0)
-
-
-/* function protoypes */
-extern int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo);
-extern int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size,
- int *stolen_size);
-extern int intelfbhw_check_non_crt(struct intelfb_info *dinfo);
-extern const char *intelfbhw_dvo_to_string(int dvo);
-extern int intelfbhw_validate_mode(struct intelfb_info *dinfo,
- struct fb_var_screeninfo *var);
-extern int intelfbhw_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info);
-extern void intelfbhw_do_blank(int blank, struct fb_info *info);
-extern void intelfbhw_setcolreg(struct intelfb_info *dinfo, unsigned regno,
- unsigned red, unsigned green, unsigned blue,
- unsigned transp);
-extern int intelfbhw_read_hw_state(struct intelfb_info *dinfo,
- struct intelfb_hwstate *hw, int flag);
-extern void intelfbhw_print_hw_state(struct intelfb_info *dinfo,
- struct intelfb_hwstate *hw);
-extern int intelfbhw_mode_to_hw(struct intelfb_info *dinfo,
- struct intelfb_hwstate *hw,
- struct fb_var_screeninfo *var);
-extern int intelfbhw_program_mode(struct intelfb_info *dinfo,
- const struct intelfb_hwstate *hw, int blank);
-extern void intelfbhw_do_sync(struct intelfb_info *dinfo);
-extern void intelfbhw_2d_stop(struct intelfb_info *dinfo);
-extern void intelfbhw_2d_start(struct intelfb_info *dinfo);
-extern void intelfbhw_do_fillrect(struct intelfb_info *dinfo, u32 x, u32 y,
- u32 w, u32 h, u32 color, u32 pitch, u32 bpp,
- u32 rop);
-extern void intelfbhw_do_bitblt(struct intelfb_info *dinfo, u32 curx, u32 cury,
- u32 dstx, u32 dsty, u32 w, u32 h, u32 pitch,
- u32 bpp);
-extern int intelfbhw_do_drawglyph(struct intelfb_info *dinfo, u32 fg, u32 bg,
- u32 w, u32 h, const u8* cdat, u32 x, u32 y,
- u32 pitch, u32 bpp);
-extern void intelfbhw_cursor_init(struct intelfb_info *dinfo);
-extern void intelfbhw_cursor_hide(struct intelfb_info *dinfo);
-extern void intelfbhw_cursor_show(struct intelfb_info *dinfo);
-extern void intelfbhw_cursor_setpos(struct intelfb_info *dinfo, int x, int y);
-extern void intelfbhw_cursor_setcolor(struct intelfb_info *dinfo, u32 bg,
- u32 fg);
-extern void intelfbhw_cursor_load(struct intelfb_info *dinfo, int width,
- int height, u8 *data);
-extern void intelfbhw_cursor_reset(struct intelfb_info *dinfo);
-extern int intelfbhw_enable_irq(struct intelfb_info *dinfo);
-extern void intelfbhw_disable_irq(struct intelfb_info *dinfo);
-extern int intelfbhw_wait_for_vsync(struct intelfb_info *dinfo, u32 pipe);
-extern int intelfbhw_active_pipe(const struct intelfb_hwstate *hw);
-
-#endif /* _INTELFBHW_H */
diff --git a/drivers/video/fbdev/matrox/i2c-matroxfb.c b/drivers/video/fbdev/matrox/i2c-matroxfb.c
index e2e4705e3..bb048e14b 100644
--- a/drivers/video/fbdev/matrox/i2c-matroxfb.c
+++ b/drivers/video/fbdev/matrox/i2c-matroxfb.c
@@ -100,8 +100,7 @@ static const struct i2c_algo_bit_data matrox_i2c_algo_template =
};
static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo,
- unsigned int data, unsigned int clock, const char *name,
- int class)
+ unsigned int data, unsigned int clock, const char *name)
{
int err;
@@ -112,7 +111,6 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo,
snprintf(b->adapter.name, sizeof(b->adapter.name), name,
minfo->fbcon.node);
i2c_set_adapdata(&b->adapter, b);
- b->adapter.class = class;
b->adapter.algo_data = &b->bac;
b->adapter.dev.parent = &minfo->pcidev->dev;
b->bac = matrox_i2c_algo_template;
@@ -160,27 +158,24 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
case MGA_2164:
err = i2c_bus_reg(&m2info->ddc1, minfo,
DDC1B_DATA, DDC1B_CLK,
- "DDC:fb%u #0", I2C_CLASS_DDC);
+ "DDC:fb%u #0");
break;
default:
err = i2c_bus_reg(&m2info->ddc1, minfo,
DDC1_DATA, DDC1_CLK,
- "DDC:fb%u #0", I2C_CLASS_DDC);
+ "DDC:fb%u #0");
break;
}
if (err)
goto fail_ddc1;
if (minfo->devflags.dualhead) {
- err = i2c_bus_reg(&m2info->ddc2, minfo,
- DDC2_DATA, DDC2_CLK,
- "DDC:fb%u #1", I2C_CLASS_DDC);
+ err = i2c_bus_reg(&m2info->ddc2, minfo, DDC2_DATA, DDC2_CLK, "DDC:fb%u #1");
if (err == -ENODEV) {
printk(KERN_INFO "i2c-matroxfb: VGA->TV plug detected, DDC unavailable.\n");
} else if (err)
printk(KERN_INFO "i2c-matroxfb: Could not register secondary output i2c bus. Continuing anyway.\n");
/* Register maven bus even on G450/G550 */
- err = i2c_bus_reg(&m2info->maven, minfo,
- MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0);
+ err = i2c_bus_reg(&m2info->maven, minfo, MAT_DATA, MAT_CLK, "MAVEN:fb%u");
if (err)
printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n");
else {
diff --git a/drivers/video/fbdev/mmp/hw/mmp_spi.c b/drivers/video/fbdev/mmp/hw/mmp_spi.c
index 16401eb95..0f8f0312a 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_spi.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_spi.c
@@ -91,7 +91,7 @@ static int lcd_spi_setup(struct spi_device *spi)
writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
/*
- * After set mode it need a time to pull up the spi singals,
+ * After set mode it needs some time to pull up the spi signals,
* or it would cause the wrong waveform when send spi command,
* especially on pxa910h
*/
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 694cf6318..aa31c0d26 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1203,6 +1203,8 @@ static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
struct omapfb_device *fbdev = plane->fbdev;
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
omapfb_rqueue_lock(fbdev);
r = fbdev->ctrl->mmap(info, vma);
omapfb_rqueue_unlock(fbdev);
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index c9fd0ad35..0db9c55fc 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1095,6 +1095,8 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
u32 len;
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
rg = omapfb_get_mem_region(ofbi->region);
start = omapfb_get_region_paddr(ofbi);
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 64d291d6b..dbcda307f 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -708,6 +708,8 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
int r;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n",
@@ -939,15 +941,12 @@ static const struct fb_ops ps3fb_ops = {
.owner = THIS_MODULE,
.fb_open = ps3fb_open,
.fb_release = ps3fb_release,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_check_var = ps3fb_check_var,
.fb_set_par = ps3fb_set_par,
.fb_setcolreg = ps3fb_setcolreg,
.fb_pan_display = ps3fb_pan_display,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = ps3fb_mmap,
.fb_blank = ps3fb_blank,
.fb_ioctl = ps3fb_ioctl,
@@ -1145,7 +1144,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev)
info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START;
info->pseudo_palette = par->pseudo_palette;
- info->flags = FBINFO_READS_FAST |
+ info->flags = FBINFO_VIRTFB | FBINFO_READS_FAST |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
retval = fb_alloc_cmap(&info->cmap, 256, 0);
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 589b349cb..07722a5ea 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -252,7 +252,6 @@ static int s3fb_setup_ddc_bus(struct fb_info *info)
strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
- par->ddc_adapter.class = I2C_CLASS_DDC;
par->ddc_adapter.algo_data = &par->ddc_algo;
par->ddc_adapter.dev.parent = info->device;
par->ddc_algo.setsda = s3fb_ddc_setsda;
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index befd3fe2f..0d362d2bf 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -562,6 +562,8 @@ static int sa1100fb_mmap(struct fb_info *info,
container_of(info, struct sa1100fb_info, fb);
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (off < info->fix.smem_len) {
vma->vm_pgoff += 1; /* skip over the palette */
return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma,
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index 21e9fd8e6..634e3d159 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -48,7 +48,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
unsigned long map_offset = 0;
unsigned long off;
int i;
-
+
if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
return -EINVAL;
@@ -60,6 +60,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* Each page, see which map applies */
@@ -72,7 +73,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
#define POFF_MASK (PAGE_MASK|0x1UL)
#else
#define POFF_MASK (PAGE_MASK)
-#endif
+#endif
map_offset = (physbase + map[i].poff) & POFF_MASK;
break;
}
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 1364dafaa..eb2297b37 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1482,19 +1482,18 @@ sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(ovl->channel->lcdc->dev, vma, ovl->fb_mem,
ovl->dma_handle, ovl->fb_size);
}
static const struct fb_ops sh_mobile_lcdc_overlay_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_blank = sh_mobile_lcdc_overlay_blank,
.fb_pan_display = sh_mobile_lcdc_overlay_pan,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_ioctl = sh_mobile_lcdc_overlay_ioctl,
.fb_check_var = sh_mobile_lcdc_overlay_check_var,
.fb_set_par = sh_mobile_lcdc_overlay_set_par,
@@ -1567,6 +1566,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
info->fbops = &sh_mobile_lcdc_overlay_ops;
info->device = priv->dev;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = ovl->fb_mem;
info->par = ovl;
@@ -1958,6 +1958,8 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem,
ch->dma_handle, ch->fb_size);
}
@@ -1965,8 +1967,7 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
static const struct fb_ops sh_mobile_lcdc_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = sh_mobile_lcdc_setcolreg,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_fillrect = sh_mobile_lcdc_fillrect,
.fb_copyarea = sh_mobile_lcdc_copyarea,
.fb_imageblit = sh_mobile_lcdc_imageblit,
@@ -2053,6 +2054,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
info->fbops = &sh_mobile_lcdc_ops;
info->device = priv->dev;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = ch->fb_mem;
info->pseudo_palette = &ch->pseudo_palette;
info->par = ch;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 62f99f6fc..028a56525 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -21,9 +21,11 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_clk.h>
#include <linux/of_platform.h>
#include <linux/parser.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
static const struct fb_fix_screeninfo simplefb_fix = {
@@ -77,6 +79,11 @@ struct simplefb_par {
unsigned int clk_count;
struct clk **clks;
#endif
+#if defined CONFIG_OF && defined CONFIG_PM_GENERIC_DOMAINS
+ unsigned int num_genpds;
+ struct device **genpds;
+ struct device_link **genpd_links;
+#endif
#if defined CONFIG_OF && defined CONFIG_REGULATOR
bool regulators_enabled;
u32 regulator_count;
@@ -121,12 +128,13 @@ struct simplefb_params {
u32 height;
u32 stride;
struct simplefb_format *format;
+ struct resource memory;
};
static int simplefb_parse_dt(struct platform_device *pdev,
struct simplefb_params *params)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *mem;
int ret;
const char *format;
int i;
@@ -166,6 +174,23 @@ static int simplefb_parse_dt(struct platform_device *pdev,
return -EINVAL;
}
+ mem = of_parse_phandle(np, "memory-region", 0);
+ if (mem) {
+ ret = of_address_to_resource(mem, 0, &params->memory);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse memory-region\n");
+ of_node_put(mem);
+ return ret;
+ }
+
+ if (of_property_present(np, "reg"))
+ dev_warn(&pdev->dev, "preferring \"memory-region\" over \"reg\" property\n");
+
+ of_node_put(mem);
+ } else {
+ memset(&params->memory, 0, sizeof(params->memory));
+ }
+
return 0;
}
@@ -193,6 +218,8 @@ static int simplefb_parse_pd(struct platform_device *pdev,
return -EINVAL;
}
+ memset(&params->memory, 0, sizeof(params->memory));
+
return 0;
}
@@ -411,6 +438,93 @@ static void simplefb_regulators_enable(struct simplefb_par *par,
static void simplefb_regulators_destroy(struct simplefb_par *par) { }
#endif
+#if defined CONFIG_OF && defined CONFIG_PM_GENERIC_DOMAINS
+static void simplefb_detach_genpds(void *res)
+{
+ struct simplefb_par *par = res;
+ unsigned int i = par->num_genpds;
+
+ if (par->num_genpds <= 1)
+ return;
+
+ while (i--) {
+ if (par->genpd_links[i])
+ device_link_del(par->genpd_links[i]);
+
+ if (!IS_ERR_OR_NULL(par->genpds[i]))
+ dev_pm_domain_detach(par->genpds[i], true);
+ }
+}
+
+static int simplefb_attach_genpds(struct simplefb_par *par,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int i;
+ int err;
+
+ err = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (err < 0) {
+ /* Nothing wrong if optional PDs are missing */
+ if (err == -ENOENT)
+ return 0;
+
+ dev_err(dev, "failed to parse power-domains: %d\n", err);
+ return err;
+ }
+
+ par->num_genpds = err;
+
+ /*
+ * Single power-domain devices are handled by the driver core, so
+ * nothing to do here.
+ */
+ if (par->num_genpds <= 1)
+ return 0;
+
+ par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds),
+ GFP_KERNEL);
+ if (!par->genpds)
+ return -ENOMEM;
+
+ par->genpd_links = devm_kcalloc(dev, par->num_genpds,
+ sizeof(*par->genpd_links),
+ GFP_KERNEL);
+ if (!par->genpd_links)
+ return -ENOMEM;
+
+ for (i = 0; i < par->num_genpds; i++) {
+ par->genpds[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(par->genpds[i])) {
+ err = PTR_ERR(par->genpds[i]);
+ if (err == -EPROBE_DEFER) {
+ simplefb_detach_genpds(par);
+ return err;
+ }
+
+ dev_warn(dev, "failed to attach domain %u: %d\n", i, err);
+ continue;
+ }
+
+ par->genpd_links[i] = device_link_add(dev, par->genpds[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!par->genpd_links[i])
+ dev_warn(dev, "failed to link power-domain %u\n", i);
+ }
+
+ return devm_add_action_or_reset(dev, simplefb_detach_genpds, par);
+}
+#else
+static int simplefb_attach_genpds(struct simplefb_par *par,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
static int simplefb_probe(struct platform_device *pdev)
{
int ret;
@@ -431,10 +545,14 @@ static int simplefb_probe(struct platform_device *pdev)
if (ret)
return ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "No memory resource\n");
- return -EINVAL;
+ if (params.memory.start == 0 && params.memory.end == 0) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ return -EINVAL;
+ }
+ } else {
+ res = &params.memory;
}
mem = request_mem_region(res->start, resource_size(res), "simplefb");
@@ -493,6 +611,10 @@ static int simplefb_probe(struct platform_device *pdev)
if (ret < 0)
goto error_clocks;
+ ret = simplefb_attach_genpds(par, pdev);
+ if (ret < 0)
+ goto error_regulators;
+
simplefb_clocks_enable(par, pdev);
simplefb_regulators_enable(par, pdev);
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 431b1a138..009bf1d92 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -27,7 +27,6 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <linux/screen_info.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/selection.h>
@@ -257,36 +256,6 @@ static void sisfb_search_mode(char *name, bool quiet)
printk(KERN_ERR "sisfb: Invalid mode '%s'\n", nameptr);
}
-#ifndef MODULE
-static void sisfb_get_vga_mode_from_kernel(void)
-{
-#ifdef CONFIG_X86
- char mymode[32];
- int mydepth = screen_info.lfb_depth;
-
- if(screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) return;
-
- if( (screen_info.lfb_width >= 320) && (screen_info.lfb_width <= 2048) &&
- (screen_info.lfb_height >= 200) && (screen_info.lfb_height <= 1536) &&
- (mydepth >= 8) && (mydepth <= 32) ) {
-
- if(mydepth == 24) mydepth = 32;
-
- sprintf(mymode, "%ux%ux%u", screen_info.lfb_width,
- screen_info.lfb_height,
- mydepth);
-
- printk(KERN_DEBUG
- "sisfb: Using vga mode %s pre-set by kernel as default\n",
- mymode);
-
- sisfb_search_mode(mymode, true);
- }
-#endif
- return;
-}
-#endif
-
static void __init
sisfb_search_crt2type(const char *name)
{
@@ -5903,12 +5872,6 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ivideo->subsysvendor = pdev->subsystem_vendor;
ivideo->subsysdevice = pdev->subsystem_device;
-#ifndef MODULE
- if(sisfb_mode_idx == -1) {
- sisfb_get_vga_mode_from_kernel();
- }
-#endif
-
ivideo->chip = chipinfo->chip;
ivideo->chip_real_id = chipinfo->chip;
ivideo->sisvga_engine = chipinfo->vgaengine;
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 90a77d19b..35d682b11 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -783,6 +783,8 @@ static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
if (size > info->fix.smem_len)
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 5ae48e36f..1a4f90ea7 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -347,7 +347,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
pwm_init_state(par->pwm, &pwmstate);
pwm_set_relative_duty_cycle(&pwmstate, 50, 100);
- pwm_apply_state(par->pwm, &pwmstate);
+ pwm_apply_might_sleep(par->pwm, &pwmstate);
/* Enable the PWM */
pwm_enable(par->pwm);
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 548d992f8..8e5bac275 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -103,7 +103,7 @@ typedef struct {
} ngle_rom_t;
struct stifb_info {
- struct fb_info info;
+ struct fb_info *info;
unsigned int id;
ngle_rom_t ngle_rom;
struct sti_struct *sti;
@@ -153,15 +153,15 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
#define REG_44 0x210030
#define REG_45 0x210034
-#define READ_BYTE(fb,reg) gsc_readb((fb)->info.fix.mmio_start + (reg))
-#define READ_WORD(fb,reg) gsc_readl((fb)->info.fix.mmio_start + (reg))
+#define READ_BYTE(fb, reg) gsc_readb((fb)->info->fix.mmio_start + (reg))
+#define READ_WORD(fb, reg) gsc_readl((fb)->info->fix.mmio_start + (reg))
#ifndef DEBUG_STIFB_REGS
# define DEBUG_OFF()
# define DEBUG_ON()
-# define WRITE_BYTE(value,fb,reg) gsc_writeb((value),(fb)->info.fix.mmio_start + (reg))
-# define WRITE_WORD(value,fb,reg) gsc_writel((value),(fb)->info.fix.mmio_start + (reg))
+# define WRITE_BYTE(value, fb, reg) gsc_writeb((value), (fb)->info->fix.mmio_start + (reg))
+# define WRITE_WORD(value, fb, reg) gsc_writel((value), (fb)->info->fix.mmio_start + (reg))
#else
static int debug_on = 1;
# define DEBUG_OFF() debug_on=0
@@ -169,11 +169,11 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
# define WRITE_BYTE(value,fb,reg) do { if (debug_on) \
printk(KERN_DEBUG "%30s: WRITE_BYTE(0x%06x) = 0x%02x (old=0x%02x)\n", \
__func__, reg, value, READ_BYTE(fb,reg)); \
- gsc_writeb((value),(fb)->info.fix.mmio_start + (reg)); } while (0)
+ gsc_writeb((value), (fb)->info->fix.mmio_start + (reg)); } while (0)
# define WRITE_WORD(value,fb,reg) do { if (debug_on) \
printk(KERN_DEBUG "%30s: WRITE_WORD(0x%06x) = 0x%08x (old=0x%08x)\n", \
__func__, reg, value, READ_WORD(fb,reg)); \
- gsc_writel((value),(fb)->info.fix.mmio_start + (reg)); } while (0)
+ gsc_writel((value), (fb)->info->fix.mmio_start + (reg)); } while (0)
#endif /* DEBUG_STIFB_REGS */
@@ -210,13 +210,13 @@ SETUP_FB(struct stifb_info *fb)
reg10_value = 0x13601000;
break;
case S9000_ID_A1439A:
- if (fb->info.var.bits_per_pixel == 32)
+ if (fb->info->var.bits_per_pixel == 32)
reg10_value = 0xBBA0A000;
else
reg10_value = 0x13601000;
break;
case S9000_ID_HCRX:
- if (fb->info.var.bits_per_pixel == 32)
+ if (fb->info->var.bits_per_pixel == 32)
reg10_value = 0xBBA0A000;
else
reg10_value = 0x13602000;
@@ -254,7 +254,7 @@ static void
FINISH_IMAGE_COLORMAP_ACCESS(struct stifb_info *fb)
{
WRITE_WORD(0x400, fb, REG_2);
- if (fb->info.var.bits_per_pixel == 32) {
+ if (fb->info->var.bits_per_pixel == 32) {
WRITE_WORD(0x83000100, fb, REG_1);
} else {
if (fb->id == S9000_ID_ARTIST || fb->id == CRT_ID_VISUALIZE_EG)
@@ -503,7 +503,7 @@ static void
ngleSetupAttrPlanes(struct stifb_info *fb, int BufferNumber)
{
SETUP_ATTR_ACCESS(fb, BufferNumber);
- SET_ATTR_SIZE(fb, fb->info.var.xres, fb->info.var.yres);
+ SET_ATTR_SIZE(fb, fb->info->var.xres, fb->info->var.yres);
FINISH_ATTR_ACCESS(fb);
SETUP_FB(fb);
}
@@ -526,9 +526,9 @@ rattlerSetupPlanes(struct stifb_info *fb)
SETUP_FB(fb);
fb->id = saved_id;
- for (y = 0; y < fb->info.var.yres; ++y)
- fb_memset_io(fb->info.screen_base + y * fb->info.fix.line_length,
- 0xff, fb->info.var.xres * fb->info.var.bits_per_pixel/8);
+ for (y = 0; y < fb->info->var.yres; ++y)
+ fb_memset_io(fb->info->screen_base + y * fb->info->fix.line_length,
+ 0xff, fb->info->var.xres * fb->info->var.bits_per_pixel/8);
CRX24_SET_OVLY_MASK(fb);
SETUP_FB(fb);
@@ -607,7 +607,7 @@ setHyperLutBltCtl(struct stifb_info *fb, int offsetWithinLut, int length)
lutBltCtl.fields.lutType = HYPER_CMAP_TYPE;
/* Expect lutIndex to be 0 or 1 for image cmaps, 2 or 3 for overlay cmaps */
- if (fb->info.var.bits_per_pixel == 8)
+ if (fb->info->var.bits_per_pixel == 8)
lutBltCtl.fields.lutOffset = 2 * 256;
else
lutBltCtl.fields.lutOffset = 0 * 256;
@@ -688,7 +688,7 @@ ngleResetAttrPlanes(struct stifb_info *fb, unsigned int ctlPlaneReg)
DataDynamic, MaskOtc,
BGx(0), FGx(0)));
packed_dst = 0;
- packed_len = (fb->info.var.xres << 16) | fb->info.var.yres;
+ packed_len = (fb->info->var.xres << 16) | fb->info->var.yres;
GET_FIFO_SLOTS(fb, nFreeFifoSlots, 2);
NGLE_SET_DSTXY(fb, packed_dst);
SET_LENXY_START_RECFILL(fb, packed_len);
@@ -738,7 +738,7 @@ ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data)
NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, mask);
packed_dst = 0;
- packed_len = (fb->info.var.xres << 16) | fb->info.var.yres;
+ packed_len = (fb->info->var.xres << 16) | fb->info->var.yres;
NGLE_SET_DSTXY(fb, packed_dst);
/* Write zeroes to overlay planes */
@@ -760,7 +760,7 @@ hyperResetPlanes(struct stifb_info *fb, int enable)
NGLE_LOCK(fb);
if (IS_24_DEVICE(fb))
- if (fb->info.var.bits_per_pixel == 32)
+ if (fb->info->var.bits_per_pixel == 32)
controlPlaneReg = 0x04000F00;
else
controlPlaneReg = 0x00000F00; /* 0x00000800 should be enough, but lets clear all 4 bits */
@@ -890,7 +890,7 @@ SETUP_HCRX(struct stifb_info *fb)
GET_FIFO_SLOTS(fb, nFreeFifoSlots, 7);
if (IS_24_DEVICE(fb)) {
- hyperbowl = (fb->info.var.bits_per_pixel == 32) ?
+ hyperbowl = (fb->info->var.bits_per_pixel == 32) ?
HYPERBOWL_MODE01_8_24_LUT0_TRANSPARENT_LUT1_OPAQUE :
HYPERBOWL_MODE01_8_24_LUT0_OPAQUE_LUT1_OPAQUE;
@@ -924,21 +924,21 @@ SETUP_HCRX(struct stifb_info *fb)
static int
stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct stifb_info *fb = container_of(info, struct stifb_info, info);
+ struct stifb_info *fb = info->par;
- if (var->xres != fb->info.var.xres ||
- var->yres != fb->info.var.yres ||
- var->bits_per_pixel != fb->info.var.bits_per_pixel)
+ if (var->xres != fb->info->var.xres ||
+ var->yres != fb->info->var.yres ||
+ var->bits_per_pixel != fb->info->var.bits_per_pixel)
return -EINVAL;
var->xres_virtual = var->xres;
var->yres_virtual = var->yres;
var->xoffset = 0;
var->yoffset = 0;
- var->grayscale = fb->info.var.grayscale;
- var->red.length = fb->info.var.red.length;
- var->green.length = fb->info.var.green.length;
- var->blue.length = fb->info.var.blue.length;
+ var->grayscale = fb->info->var.grayscale;
+ var->red.length = fb->info->var.red.length;
+ var->green.length = fb->info->var.green.length;
+ var->blue.length = fb->info->var.blue.length;
return 0;
}
@@ -947,7 +947,7 @@ static int
stifb_setcolreg(u_int regno, u_int red, u_int green,
u_int blue, u_int transp, struct fb_info *info)
{
- struct stifb_info *fb = container_of(info, struct stifb_info, info);
+ struct stifb_info *fb = info->par;
u32 color;
if (regno >= NR_PALETTE)
@@ -961,7 +961,7 @@ stifb_setcolreg(u_int regno, u_int red, u_int green,
START_IMAGE_COLORMAP_ACCESS(fb);
- if (unlikely(fb->info.var.grayscale)) {
+ if (unlikely(fb->info->var.grayscale)) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
color = ((red * 77) +
(green * 151) +
@@ -972,10 +972,10 @@ stifb_setcolreg(u_int regno, u_int red, u_int green,
(blue));
}
- if (fb->info.fix.visual == FB_VISUAL_DIRECTCOLOR) {
- struct fb_var_screeninfo *var = &fb->info.var;
+ if (fb->info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ struct fb_var_screeninfo *var = &fb->info->var;
if (regno < 16)
- ((u32 *)fb->info.pseudo_palette)[regno] =
+ ((u32 *)fb->info->pseudo_palette)[regno] =
regno << var->red.offset |
regno << var->green.offset |
regno << var->blue.offset;
@@ -1007,7 +1007,7 @@ stifb_setcolreg(u_int regno, u_int red, u_int green,
static int
stifb_blank(int blank_mode, struct fb_info *info)
{
- struct stifb_info *fb = container_of(info, struct stifb_info, info);
+ struct stifb_info *fb = info->par;
int enable = (blank_mode == 0) ? ENABLE : DISABLE;
switch (fb->id) {
@@ -1036,12 +1036,12 @@ stifb_blank(int blank_mode, struct fb_info *info)
static void
stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- struct stifb_info *fb = container_of(info, struct stifb_info, info);
+ struct stifb_info *fb = info->par;
SETUP_COPYAREA(fb);
SETUP_HW(fb);
- if (fb->info.var.bits_per_pixel == 32) {
+ if (fb->info->var.bits_per_pixel == 32) {
WRITE_WORD(0xBBA0A000, fb, REG_10);
NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
@@ -1075,15 +1075,15 @@ stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void
stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct stifb_info *fb = container_of(info, struct stifb_info, info);
+ struct stifb_info *fb = info->par;
if (rect->rop != ROP_COPY ||
- (fb->id == S9000_ID_HCRX && fb->info.var.bits_per_pixel == 32))
+ (fb->id == S9000_ID_HCRX && fb->info->var.bits_per_pixel == 32))
return cfb_fillrect(info, rect);
SETUP_HW(fb);
- if (fb->info.var.bits_per_pixel == 32) {
+ if (fb->info->var.bits_per_pixel == 32) {
WRITE_WORD(0xBBA0A000, fb, REG_10);
NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
@@ -1141,7 +1141,7 @@ stifb_init_display(struct stifb_info *fb)
switch (id) {
case S9000_ID_A1659A:
case S9000_ID_A1439A:
- if (fb->info.var.bits_per_pixel == 32)
+ if (fb->info->var.bits_per_pixel == 32)
ngleSetupAttrPlanes(fb, BUFF1_CMAP3);
else {
ngleSetupAttrPlanes(fb, BUFF1_CMAP0);
@@ -1151,14 +1151,14 @@ stifb_init_display(struct stifb_info *fb)
break;
case S9000_ID_ARTIST:
case CRT_ID_VISUALIZE_EG:
- if (fb->info.var.bits_per_pixel == 32)
+ if (fb->info->var.bits_per_pixel == 32)
ngleSetupAttrPlanes(fb, BUFF1_CMAP3);
else {
ngleSetupAttrPlanes(fb, ARTIST_CMAP0);
}
break;
}
- stifb_blank(0, (struct fb_info *)fb); /* 0=enable screen */
+ stifb_blank(0, fb->info); /* 0=enable screen */
SETUP_FB(fb);
}
@@ -1193,11 +1193,11 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
char *dev_name;
int bpp, xres, yres;
- fb = kzalloc(sizeof(*fb), GFP_ATOMIC);
- if (!fb)
+ info = framebuffer_alloc(sizeof(*fb), sti->dev);
+ if (!info)
return -ENOMEM;
-
- info = &fb->info;
+ fb = info->par;
+ fb->info = info;
/* set struct to a known state */
fix = &info->fix;
@@ -1389,11 +1389,10 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
}
/* save for primary gfx device detection & unregister_framebuffer() */
- sti->info = info;
- if (register_framebuffer(&fb->info) < 0)
+ if (register_framebuffer(fb->info) < 0)
goto out_err4;
- fb_info(&fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
+ fb_info(fb->info, "%s %dx%d-%d frame buffer device, %s, id: %04x, mmio: 0x%04lx\n",
fix->id,
var->xres,
var->yres,
@@ -1402,6 +1401,8 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
fb->id,
fix->mmio_start);
+ dev_set_drvdata(sti->dev, info);
+
return 0;
@@ -1414,8 +1415,7 @@ out_err2:
out_err1:
iounmap(info->screen_base);
out_err0:
- kfree(fb);
- sti->info = NULL;
+ framebuffer_release(info);
return -ENXIO;
}
@@ -1480,17 +1480,20 @@ stifb_cleanup(void)
sti = sti_get_rom(i);
if (!sti)
break;
- if (sti->info) {
- struct fb_info *info = sti->info;
- unregister_framebuffer(sti->info);
+ if (sti->dev) {
+ struct fb_info *info = dev_get_drvdata(sti->dev);
+
+ if (!info)
+ continue;
+ unregister_framebuffer(info);
release_mem_region(info->fix.mmio_start, info->fix.mmio_len);
release_mem_region(info->fix.smem_start, info->fix.smem_len);
if (info->screen_base)
iounmap(info->screen_base);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
+ dev_set_drvdata(sti->dev, NULL);
}
- sti->info = NULL;
}
}
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 22aa95313..51ebe7835 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -1267,7 +1267,6 @@ static int tdfxfb_setup_ddc_bus(struct tdfxfb_i2c_chan *chan, const char *name,
strscpy(chan->adapter.name, name, sizeof(chan->adapter.name));
chan->adapter.owner = THIS_MODULE;
- chan->adapter.class = I2C_CLASS_DDC;
chan->adapter.algo_data = &chan->algo;
chan->adapter.dev.parent = dev;
chan->algo.setsda = tdfxfb_ddc_setsda;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 816d40b6f..516cf2a18 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -274,7 +274,6 @@ static int tridentfb_setup_ddc_bus(struct fb_info *info)
strscpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
- par->ddc_adapter.class = I2C_CLASS_DDC;
par->ddc_adapter.algo_data = &par->ddc_algo;
par->ddc_adapter.dev.parent = info->device;
if (is_oldclock(par->chip_id)) { /* not sure if this check is OK */
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 2460ff4ac..1514ddac4 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -331,6 +331,8 @@ static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (info->fbdefio)
return fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
if (size > info->fix.smem_len)
diff --git a/drivers/video/fbdev/vermilion/Makefile b/drivers/video/fbdev/vermilion/Makefile
deleted file mode 100644
index 22e9e4635..000000000
--- a/drivers/video/fbdev/vermilion/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_FB_LE80578) += vmlfb.o
-obj-$(CONFIG_FB_CARILLO_RANCH) += crvml.o
-
-vmlfb-objs := vermilion.o
-crvml-objs := cr_pll.o
diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c
deleted file mode 100644
index 79d42b23d..000000000
--- a/drivers/video/fbdev/vermilion/cr_pll.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) Intel Corp. 2007.
- * All Rights Reserved.
- *
- * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
- * develop this driver.
- *
- * This file is part of the Carillo Ranch video subsystem driver.
- *
- * Authors:
- * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- * Alan Hourihane <alanh-at-tungstengraphics-dot-com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/fb.h>
-#include "vermilion.h"
-
-/* The PLL Clock register sits on Host bridge */
-#define CRVML_DEVICE_MCH 0x5001
-#define CRVML_REG_MCHBAR 0x44
-#define CRVML_REG_MCHEN 0x54
-#define CRVML_MCHEN_BIT (1 << 28)
-#define CRVML_MCHMAP_SIZE 4096
-#define CRVML_REG_CLOCK 0xc3c
-#define CRVML_CLOCK_SHIFT 8
-#define CRVML_CLOCK_MASK 0x00000f00
-
-static struct pci_dev *mch_dev;
-static u32 mch_bar;
-static void __iomem *mch_regs_base;
-static u32 saved_clock;
-
-static const unsigned crvml_clocks[] = {
- 6750,
- 13500,
- 27000,
- 29700,
- 37125,
- 54000,
- 59400,
- 74250,
- 120000
- /*
- * There are more clocks, but they are disabled on the CR board.
- */
-};
-
-static const u32 crvml_clock_bits[] = {
- 0x0a,
- 0x09,
- 0x08,
- 0x07,
- 0x06,
- 0x05,
- 0x04,
- 0x03,
- 0x0b
-};
-
-static const unsigned crvml_num_clocks = ARRAY_SIZE(crvml_clocks);
-
-static int crvml_sys_restore(struct vml_sys *sys)
-{
- void __iomem *clock_reg = mch_regs_base + CRVML_REG_CLOCK;
-
- iowrite32(saved_clock, clock_reg);
- ioread32(clock_reg);
-
- return 0;
-}
-
-static int crvml_sys_save(struct vml_sys *sys)
-{
- void __iomem *clock_reg = mch_regs_base + CRVML_REG_CLOCK;
-
- saved_clock = ioread32(clock_reg);
-
- return 0;
-}
-
-static int crvml_nearest_index(const struct vml_sys *sys, int clock)
-{
- int i;
- int cur_index = 0;
- int cur_diff;
- int diff;
-
- cur_diff = clock - crvml_clocks[0];
- cur_diff = (cur_diff < 0) ? -cur_diff : cur_diff;
- for (i = 1; i < crvml_num_clocks; ++i) {
- diff = clock - crvml_clocks[i];
- diff = (diff < 0) ? -diff : diff;
- if (diff < cur_diff) {
- cur_index = i;
- cur_diff = diff;
- }
- }
- return cur_index;
-}
-
-static int crvml_nearest_clock(const struct vml_sys *sys, int clock)
-{
- return crvml_clocks[crvml_nearest_index(sys, clock)];
-}
-
-static int crvml_set_clock(struct vml_sys *sys, int clock)
-{
- void __iomem *clock_reg = mch_regs_base + CRVML_REG_CLOCK;
- int index;
- u32 clock_val;
-
- index = crvml_nearest_index(sys, clock);
-
- if (crvml_clocks[index] != clock)
- return -EINVAL;
-
- clock_val = ioread32(clock_reg) & ~CRVML_CLOCK_MASK;
- clock_val = crvml_clock_bits[index] << CRVML_CLOCK_SHIFT;
- iowrite32(clock_val, clock_reg);
- ioread32(clock_reg);
-
- return 0;
-}
-
-static struct vml_sys cr_pll_ops = {
- .name = "Carillo Ranch",
- .save = crvml_sys_save,
- .restore = crvml_sys_restore,
- .set_clock = crvml_set_clock,
- .nearest_clock = crvml_nearest_clock,
-};
-
-static int __init cr_pll_init(void)
-{
- int err;
- u32 dev_en;
-
- mch_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
- CRVML_DEVICE_MCH, NULL);
- if (!mch_dev) {
- printk(KERN_ERR
- "Could not find Carillo Ranch MCH device.\n");
- return -ENODEV;
- }
-
- pci_read_config_dword(mch_dev, CRVML_REG_MCHEN, &dev_en);
- if (!(dev_en & CRVML_MCHEN_BIT)) {
- printk(KERN_ERR
- "Carillo Ranch MCH device was not enabled.\n");
- pci_dev_put(mch_dev);
- return -ENODEV;
- }
-
- pci_read_config_dword(mch_dev, CRVML_REG_MCHBAR,
- &mch_bar);
- mch_regs_base =
- ioremap(mch_bar, CRVML_MCHMAP_SIZE);
- if (!mch_regs_base) {
- printk(KERN_ERR
- "Carillo Ranch MCH device was not enabled.\n");
- pci_dev_put(mch_dev);
- return -ENODEV;
- }
-
- err = vmlfb_register_subsys(&cr_pll_ops);
- if (err) {
- printk(KERN_ERR
- "Carillo Ranch failed to initialize vml_sys.\n");
- iounmap(mch_regs_base);
- pci_dev_put(mch_dev);
- return err;
- }
-
- return 0;
-}
-
-static void __exit cr_pll_exit(void)
-{
- vmlfb_unregister_subsys(&cr_pll_ops);
-
- iounmap(mch_regs_base);
- pci_dev_put(mch_dev);
-}
-
-module_init(cr_pll_init);
-module_exit(cr_pll_exit);
-
-MODULE_AUTHOR("Tungsten Graphics Inc.");
-MODULE_DESCRIPTION("Carillo Ranch PLL Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
deleted file mode 100644
index 840ead696..000000000
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ /dev/null
@@ -1,1173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) Intel Corp. 2007.
- * All Rights Reserved.
- *
- * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
- * develop this driver.
- *
- * This file is part of the Vermilion Range fb driver.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- * Michel Dänzer <michel-at-tungstengraphics-dot-com>
- * Alan Hourihane <alanh-at-tungstengraphics-dot-com>
- */
-
-#include <linux/aperture.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/pci.h>
-#include <asm/set_memory.h>
-#include <asm/tlbflush.h>
-#include <linux/mmzone.h>
-
-/* #define VERMILION_DEBUG */
-
-#include "vermilion.h"
-
-#define MODULE_NAME "vmlfb"
-
-#define VML_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-
-static struct mutex vml_mutex;
-static struct list_head global_no_mode;
-static struct list_head global_has_mode;
-static struct fb_ops vmlfb_ops;
-static struct vml_sys *subsys = NULL;
-static char *vml_default_mode = "1024x768@60";
-static const struct fb_videomode defaultmode = {
- NULL, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6,
- 0, FB_VMODE_NONINTERLACED
-};
-
-static u32 vml_mem_requested = (10 * 1024 * 1024);
-static u32 vml_mem_contig = (4 * 1024 * 1024);
-static u32 vml_mem_min = (4 * 1024 * 1024);
-
-static u32 vml_clocks[] = {
- 6750,
- 13500,
- 27000,
- 29700,
- 37125,
- 54000,
- 59400,
- 74250,
- 120000,
- 148500
-};
-
-static u32 vml_num_clocks = ARRAY_SIZE(vml_clocks);
-
-/*
- * Allocate a contiguous vram area and make its linear kernel map
- * uncached.
- */
-
-static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
- unsigned min_order)
-{
- gfp_t flags;
- unsigned long i;
-
- max_order++;
- do {
- /*
- * Really try hard to get the needed memory.
- * We need memory below the first 32MB, so we
- * add the __GFP_DMA flag that guarantees that we are
- * below the first 16MB.
- */
-
- flags = __GFP_DMA | __GFP_HIGH | __GFP_KSWAPD_RECLAIM;
- va->logical =
- __get_free_pages(flags, --max_order);
- } while (va->logical == 0 && max_order > min_order);
-
- if (!va->logical)
- return -ENOMEM;
-
- va->phys = virt_to_phys((void *)va->logical);
- va->size = PAGE_SIZE << max_order;
- va->order = max_order;
-
- /*
- * It seems like __get_free_pages only ups the usage count
- * of the first page. This doesn't work with fault mapping, so
- * up the usage count once more (XXX: should use split_page or
- * compound page).
- */
-
- memset((void *)va->logical, 0x00, va->size);
- for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) {
- get_page(virt_to_page(i));
- }
-
- /*
- * Change caching policy of the linear kernel map to avoid
- * mapping type conflicts with user-space mappings.
- */
- set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT);
-
- printk(KERN_DEBUG MODULE_NAME
- ": Allocated %ld bytes vram area at 0x%08lx\n",
- va->size, va->phys);
-
- return 0;
-}
-
-/*
- * Free a contiguous vram area and reset its linear kernel map
- * mapping type.
- */
-
-static void vmlfb_free_vram_area(struct vram_area *va)
-{
- unsigned long j;
-
- if (va->logical) {
-
- /*
- * Reset the linear kernel map caching policy.
- */
-
- set_pages_wb(virt_to_page(va->logical),
- va->size >> PAGE_SHIFT);
-
- /*
- * Decrease the usage count on the pages we've used
- * to compensate for upping when allocating.
- */
-
- for (j = va->logical; j < va->logical + va->size;
- j += PAGE_SIZE) {
- (void)put_page_testzero(virt_to_page(j));
- }
-
- printk(KERN_DEBUG MODULE_NAME
- ": Freeing %ld bytes vram area at 0x%08lx\n",
- va->size, va->phys);
- free_pages(va->logical, va->order);
-
- va->logical = 0;
- }
-}
-
-/*
- * Free allocated vram.
- */
-
-static void vmlfb_free_vram(struct vml_info *vinfo)
-{
- int i;
-
- for (i = 0; i < vinfo->num_areas; ++i) {
- vmlfb_free_vram_area(&vinfo->vram[i]);
- }
- vinfo->num_areas = 0;
-}
-
-/*
- * Allocate vram. Currently we try to allocate contiguous areas from the
- * __GFP_DMA zone and puzzle them together. A better approach would be to
- * allocate one contiguous area for scanout and use one-page allocations for
- * offscreen areas. This requires user-space and GPU virtual mappings.
- */
-
-static int vmlfb_alloc_vram(struct vml_info *vinfo,
- size_t requested,
- size_t min_total, size_t min_contig)
-{
- int i, j;
- int order;
- int contiguous;
- int err;
- struct vram_area *va;
- struct vram_area *va2;
-
- vinfo->num_areas = 0;
- for (i = 0; i < VML_VRAM_AREAS; ++i) {
- va = &vinfo->vram[i];
- order = 0;
-
- while (requested > (PAGE_SIZE << order) && order <= MAX_ORDER)
- order++;
-
- err = vmlfb_alloc_vram_area(va, order, 0);
-
- if (err)
- break;
-
- if (i == 0) {
- vinfo->vram_start = va->phys;
- vinfo->vram_logical = (void __iomem *) va->logical;
- vinfo->vram_contig_size = va->size;
- vinfo->num_areas = 1;
- } else {
- contiguous = 0;
-
- for (j = 0; j < i; ++j) {
- va2 = &vinfo->vram[j];
- if (va->phys + va->size == va2->phys ||
- va2->phys + va2->size == va->phys) {
- contiguous = 1;
- break;
- }
- }
-
- if (contiguous) {
- vinfo->num_areas++;
- if (va->phys < vinfo->vram_start) {
- vinfo->vram_start = va->phys;
- vinfo->vram_logical =
- (void __iomem *)va->logical;
- }
- vinfo->vram_contig_size += va->size;
- } else {
- vmlfb_free_vram_area(va);
- break;
- }
- }
-
- if (requested < va->size)
- break;
- else
- requested -= va->size;
- }
-
- if (vinfo->vram_contig_size > min_total &&
- vinfo->vram_contig_size > min_contig) {
-
- printk(KERN_DEBUG MODULE_NAME
- ": Contiguous vram: %ld bytes at physical 0x%08lx.\n",
- (unsigned long)vinfo->vram_contig_size,
- (unsigned long)vinfo->vram_start);
-
- return 0;
- }
-
- printk(KERN_ERR MODULE_NAME
- ": Could not allocate requested minimal amount of vram.\n");
-
- vmlfb_free_vram(vinfo);
-
- return -ENOMEM;
-}
-
-/*
- * Find the GPU to use with our display controller.
- */
-
-static int vmlfb_get_gpu(struct vml_par *par)
-{
- mutex_lock(&vml_mutex);
-
- par->gpu = pci_get_device(PCI_VENDOR_ID_INTEL, VML_DEVICE_GPU, NULL);
-
- if (!par->gpu) {
- mutex_unlock(&vml_mutex);
- return -ENODEV;
- }
-
- mutex_unlock(&vml_mutex);
-
- if (pci_enable_device(par->gpu) < 0) {
- pci_dev_put(par->gpu);
- return -ENODEV;
- }
-
- return 0;
-}
-
-/*
- * Find a contiguous vram area that contains a given offset from vram start.
- */
-static int vmlfb_vram_offset(struct vml_info *vinfo, unsigned long offset)
-{
- unsigned long aoffset;
- unsigned i;
-
- for (i = 0; i < vinfo->num_areas; ++i) {
- aoffset = offset - (vinfo->vram[i].phys - vinfo->vram_start);
-
- if (aoffset < vinfo->vram[i].size) {
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-/*
- * Remap the MMIO register spaces of the VDC and the GPU.
- */
-
-static int vmlfb_enable_mmio(struct vml_par *par)
-{
- int err;
-
- par->vdc_mem_base = pci_resource_start(par->vdc, 0);
- par->vdc_mem_size = pci_resource_len(par->vdc, 0);
- if (!request_mem_region(par->vdc_mem_base, par->vdc_mem_size, "vmlfb")) {
- printk(KERN_ERR MODULE_NAME
- ": Could not claim display controller MMIO.\n");
- return -EBUSY;
- }
- par->vdc_mem = ioremap(par->vdc_mem_base, par->vdc_mem_size);
- if (par->vdc_mem == NULL) {
- printk(KERN_ERR MODULE_NAME
- ": Could not map display controller MMIO.\n");
- err = -ENOMEM;
- goto out_err_0;
- }
-
- par->gpu_mem_base = pci_resource_start(par->gpu, 0);
- par->gpu_mem_size = pci_resource_len(par->gpu, 0);
- if (!request_mem_region(par->gpu_mem_base, par->gpu_mem_size, "vmlfb")) {
- printk(KERN_ERR MODULE_NAME ": Could not claim GPU MMIO.\n");
- err = -EBUSY;
- goto out_err_1;
- }
- par->gpu_mem = ioremap(par->gpu_mem_base, par->gpu_mem_size);
- if (par->gpu_mem == NULL) {
- printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n");
- err = -ENOMEM;
- goto out_err_2;
- }
-
- return 0;
-
-out_err_2:
- release_mem_region(par->gpu_mem_base, par->gpu_mem_size);
-out_err_1:
- iounmap(par->vdc_mem);
-out_err_0:
- release_mem_region(par->vdc_mem_base, par->vdc_mem_size);
- return err;
-}
-
-/*
- * Unmap the VDC and GPU register spaces.
- */
-
-static void vmlfb_disable_mmio(struct vml_par *par)
-{
- iounmap(par->gpu_mem);
- release_mem_region(par->gpu_mem_base, par->gpu_mem_size);
- iounmap(par->vdc_mem);
- release_mem_region(par->vdc_mem_base, par->vdc_mem_size);
-}
-
-/*
- * Release and uninit the VDC and GPU.
- */
-
-static void vmlfb_release_devices(struct vml_par *par)
-{
- if (atomic_dec_and_test(&par->refcount)) {
- pci_disable_device(par->gpu);
- pci_disable_device(par->vdc);
- }
-}
-
-/*
- * Free up allocated resources for a device.
- */
-
-static void vml_pci_remove(struct pci_dev *dev)
-{
- struct fb_info *info;
- struct vml_info *vinfo;
- struct vml_par *par;
-
- info = pci_get_drvdata(dev);
- if (info) {
- vinfo = container_of(info, struct vml_info, info);
- par = vinfo->par;
- mutex_lock(&vml_mutex);
- unregister_framebuffer(info);
- fb_dealloc_cmap(&info->cmap);
- vmlfb_free_vram(vinfo);
- vmlfb_disable_mmio(par);
- vmlfb_release_devices(par);
- kfree(vinfo);
- kfree(par);
- mutex_unlock(&vml_mutex);
- }
-}
-
-static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo *var)
-{
- switch (var->bits_per_pixel) {
- case 16:
- var->blue.offset = 0;
- var->blue.length = 5;
- var->green.offset = 5;
- var->green.length = 5;
- var->red.offset = 10;
- var->red.length = 5;
- var->transp.offset = 15;
- var->transp.length = 1;
- break;
- case 32:
- var->blue.offset = 0;
- var->blue.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->red.offset = 16;
- var->red.length = 8;
- var->transp.offset = 24;
- var->transp.length = 0;
- break;
- default:
- break;
- }
-
- var->blue.msb_right = var->green.msb_right =
- var->red.msb_right = var->transp.msb_right = 0;
-}
-
-/*
- * Device initialization.
- * We initialize one vml_par struct per device and one vml_info
- * struct per pipe. Currently we have only one pipe.
- */
-
-static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct vml_info *vinfo;
- struct fb_info *info;
- struct vml_par *par;
- int err;
-
- err = aperture_remove_conflicting_pci_devices(dev, "vmlfb");
- if (err)
- return err;
-
- par = kzalloc(sizeof(*par), GFP_KERNEL);
- if (par == NULL)
- return -ENOMEM;
-
- vinfo = kzalloc(sizeof(*vinfo), GFP_KERNEL);
- if (vinfo == NULL) {
- err = -ENOMEM;
- goto out_err_0;
- }
-
- vinfo->par = par;
- par->vdc = dev;
- atomic_set(&par->refcount, 1);
-
- switch (id->device) {
- case VML_DEVICE_VDC:
- if ((err = vmlfb_get_gpu(par)))
- goto out_err_1;
- pci_set_drvdata(dev, &vinfo->info);
- break;
- default:
- err = -ENODEV;
- goto out_err_1;
- }
-
- info = &vinfo->info;
- info->flags = FBINFO_PARTIAL_PAN_OK;
-
- err = vmlfb_enable_mmio(par);
- if (err)
- goto out_err_2;
-
- err = vmlfb_alloc_vram(vinfo, vml_mem_requested,
- vml_mem_contig, vml_mem_min);
- if (err)
- goto out_err_3;
-
- strcpy(info->fix.id, "Vermilion Range");
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
- info->fix.smem_start = vinfo->vram_start;
- info->fix.smem_len = vinfo->vram_contig_size;
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- info->fix.ypanstep = 1;
- info->fix.xpanstep = 1;
- info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_NONE;
- info->screen_base = vinfo->vram_logical;
- info->pseudo_palette = vinfo->pseudo_palette;
- info->par = par;
- info->fbops = &vmlfb_ops;
- info->device = &dev->dev;
-
- INIT_LIST_HEAD(&vinfo->head);
- vinfo->pipe_disabled = 1;
- vinfo->cur_blank_mode = FB_BLANK_UNBLANK;
-
- info->var.grayscale = 0;
- info->var.bits_per_pixel = 16;
- vmlfb_set_pref_pixel_format(&info->var);
-
- if (!fb_find_mode
- (&info->var, info, vml_default_mode, NULL, 0, &defaultmode, 16)) {
- printk(KERN_ERR MODULE_NAME ": Could not find initial mode\n");
- }
-
- if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
- err = -ENOMEM;
- goto out_err_4;
- }
-
- err = register_framebuffer(info);
- if (err) {
- printk(KERN_ERR MODULE_NAME ": Register framebuffer error.\n");
- goto out_err_5;
- }
-
- printk("Initialized vmlfb\n");
-
- return 0;
-
-out_err_5:
- fb_dealloc_cmap(&info->cmap);
-out_err_4:
- vmlfb_free_vram(vinfo);
-out_err_3:
- vmlfb_disable_mmio(par);
-out_err_2:
- vmlfb_release_devices(par);
-out_err_1:
- kfree(vinfo);
-out_err_0:
- kfree(par);
- return err;
-}
-
-static int vmlfb_open(struct fb_info *info, int user)
-{
- /*
- * Save registers here?
- */
- return 0;
-}
-
-static int vmlfb_release(struct fb_info *info, int user)
-{
- /*
- * Restore registers here.
- */
-
- return 0;
-}
-
-static int vml_nearest_clock(int clock)
-{
-
- int i;
- int cur_index;
- int cur_diff;
- int diff;
-
- cur_index = 0;
- cur_diff = clock - vml_clocks[0];
- cur_diff = (cur_diff < 0) ? -cur_diff : cur_diff;
- for (i = 1; i < vml_num_clocks; ++i) {
- diff = clock - vml_clocks[i];
- diff = (diff < 0) ? -diff : diff;
- if (diff < cur_diff) {
- cur_index = i;
- cur_diff = diff;
- }
- }
- return vml_clocks[cur_index];
-}
-
-static int vmlfb_check_var_locked(struct fb_var_screeninfo *var,
- struct vml_info *vinfo)
-{
- u32 pitch;
- u64 mem;
- int nearest_clock;
- int clock;
- int clock_diff;
- struct fb_var_screeninfo v;
-
- v = *var;
- clock = PICOS2KHZ(var->pixclock);
-
- if (subsys && subsys->nearest_clock) {
- nearest_clock = subsys->nearest_clock(subsys, clock);
- } else {
- nearest_clock = vml_nearest_clock(clock);
- }
-
- /*
- * Accept a 20% diff.
- */
-
- clock_diff = nearest_clock - clock;
- clock_diff = (clock_diff < 0) ? -clock_diff : clock_diff;
- if (clock_diff > clock / 5) {
-#if 0
- printk(KERN_DEBUG MODULE_NAME ": Diff failure. %d %d\n",clock_diff,clock);
-#endif
- return -EINVAL;
- }
-
- v.pixclock = KHZ2PICOS(nearest_clock);
-
- if (var->xres > VML_MAX_XRES || var->yres > VML_MAX_YRES) {
- printk(KERN_DEBUG MODULE_NAME ": Resolution failure.\n");
- return -EINVAL;
- }
- if (var->xres_virtual > VML_MAX_XRES_VIRTUAL) {
- printk(KERN_DEBUG MODULE_NAME
- ": Virtual resolution failure.\n");
- return -EINVAL;
- }
- switch (v.bits_per_pixel) {
- case 0 ... 16:
- v.bits_per_pixel = 16;
- break;
- case 17 ... 32:
- v.bits_per_pixel = 32;
- break;
- default:
- printk(KERN_DEBUG MODULE_NAME ": Invalid bpp: %d.\n",
- var->bits_per_pixel);
- return -EINVAL;
- }
-
- pitch = ALIGN((var->xres * var->bits_per_pixel) >> 3, 0x40);
- mem = (u64)pitch * var->yres_virtual;
- if (mem > vinfo->vram_contig_size) {
- return -ENOMEM;
- }
-
- switch (v.bits_per_pixel) {
- case 16:
- if (var->blue.offset != 0 ||
- var->blue.length != 5 ||
- var->green.offset != 5 ||
- var->green.length != 5 ||
- var->red.offset != 10 ||
- var->red.length != 5 ||
- var->transp.offset != 15 || var->transp.length != 1) {
- vmlfb_set_pref_pixel_format(&v);
- }
- break;
- case 32:
- if (var->blue.offset != 0 ||
- var->blue.length != 8 ||
- var->green.offset != 8 ||
- var->green.length != 8 ||
- var->red.offset != 16 ||
- var->red.length != 8 ||
- (var->transp.length != 0 && var->transp.length != 8) ||
- (var->transp.length == 8 && var->transp.offset != 24)) {
- vmlfb_set_pref_pixel_format(&v);
- }
- break;
- default:
- return -EINVAL;
- }
-
- *var = v;
-
- return 0;
-}
-
-static int vmlfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct vml_info *vinfo = container_of(info, struct vml_info, info);
- int ret;
-
- mutex_lock(&vml_mutex);
- ret = vmlfb_check_var_locked(var, vinfo);
- mutex_unlock(&vml_mutex);
-
- return ret;
-}
-
-static void vml_wait_vblank(struct vml_info *vinfo)
-{
- /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */
- mdelay(20);
-}
-
-static void vmlfb_disable_pipe(struct vml_info *vinfo)
-{
- struct vml_par *par = vinfo->par;
-
- /* Disable the MDVO pad */
- VML_WRITE32(par, VML_RCOMPSTAT, 0);
- while (!(VML_READ32(par, VML_RCOMPSTAT) & VML_MDVO_VDC_I_RCOMP)) ;
-
- /* Disable display planes */
- VML_WRITE32(par, VML_DSPCCNTR,
- VML_READ32(par, VML_DSPCCNTR) & ~VML_GFX_ENABLE);
- (void)VML_READ32(par, VML_DSPCCNTR);
- /* Wait for vblank for the disable to take effect */
- vml_wait_vblank(vinfo);
-
- /* Next, disable display pipes */
- VML_WRITE32(par, VML_PIPEACONF, 0);
- (void)VML_READ32(par, VML_PIPEACONF);
-
- vinfo->pipe_disabled = 1;
-}
-
-#ifdef VERMILION_DEBUG
-static void vml_dump_regs(struct vml_info *vinfo)
-{
- struct vml_par *par = vinfo->par;
-
- printk(KERN_DEBUG MODULE_NAME ": Modesetting register dump:\n");
- printk(KERN_DEBUG MODULE_NAME ": \tHTOTAL_A : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_HTOTAL_A));
- printk(KERN_DEBUG MODULE_NAME ": \tHBLANK_A : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_HBLANK_A));
- printk(KERN_DEBUG MODULE_NAME ": \tHSYNC_A : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_HSYNC_A));
- printk(KERN_DEBUG MODULE_NAME ": \tVTOTAL_A : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_VTOTAL_A));
- printk(KERN_DEBUG MODULE_NAME ": \tVBLANK_A : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_VBLANK_A));
- printk(KERN_DEBUG MODULE_NAME ": \tVSYNC_A : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_VSYNC_A));
- printk(KERN_DEBUG MODULE_NAME ": \tDSPCSTRIDE : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_DSPCSTRIDE));
- printk(KERN_DEBUG MODULE_NAME ": \tDSPCSIZE : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_DSPCSIZE));
- printk(KERN_DEBUG MODULE_NAME ": \tDSPCPOS : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_DSPCPOS));
- printk(KERN_DEBUG MODULE_NAME ": \tDSPARB : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_DSPARB));
- printk(KERN_DEBUG MODULE_NAME ": \tDSPCADDR : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_DSPCADDR));
- printk(KERN_DEBUG MODULE_NAME ": \tBCLRPAT_A : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_BCLRPAT_A));
- printk(KERN_DEBUG MODULE_NAME ": \tCANVSCLR_A : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_CANVSCLR_A));
- printk(KERN_DEBUG MODULE_NAME ": \tPIPEASRC : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_PIPEASRC));
- printk(KERN_DEBUG MODULE_NAME ": \tPIPEACONF : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_PIPEACONF));
- printk(KERN_DEBUG MODULE_NAME ": \tDSPCCNTR : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_DSPCCNTR));
- printk(KERN_DEBUG MODULE_NAME ": \tRCOMPSTAT : 0x%08x\n",
- (unsigned)VML_READ32(par, VML_RCOMPSTAT));
- printk(KERN_DEBUG MODULE_NAME ": End of modesetting register dump.\n");
-}
-#endif
-
-static int vmlfb_set_par_locked(struct vml_info *vinfo)
-{
- struct vml_par *par = vinfo->par;
- struct fb_info *info = &vinfo->info;
- struct fb_var_screeninfo *var = &info->var;
- u32 htotal, hactive, hblank_start, hblank_end, hsync_start, hsync_end;
- u32 vtotal, vactive, vblank_start, vblank_end, vsync_start, vsync_end;
- u32 dspcntr;
- int clock;
-
- vinfo->bytes_per_pixel = var->bits_per_pixel >> 3;
- vinfo->stride = ALIGN(var->xres_virtual * vinfo->bytes_per_pixel, 0x40);
- info->fix.line_length = vinfo->stride;
-
- if (!subsys)
- return 0;
-
- htotal =
- var->xres + var->right_margin + var->hsync_len + var->left_margin;
- hactive = var->xres;
- hblank_start = var->xres;
- hblank_end = htotal;
- hsync_start = hactive + var->right_margin;
- hsync_end = hsync_start + var->hsync_len;
-
- vtotal =
- var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
- vactive = var->yres;
- vblank_start = var->yres;
- vblank_end = vtotal;
- vsync_start = vactive + var->lower_margin;
- vsync_end = vsync_start + var->vsync_len;
-
- dspcntr = VML_GFX_ENABLE | VML_GFX_GAMMABYPASS;
- clock = PICOS2KHZ(var->pixclock);
-
- if (subsys->nearest_clock) {
- clock = subsys->nearest_clock(subsys, clock);
- } else {
- clock = vml_nearest_clock(clock);
- }
- printk(KERN_DEBUG MODULE_NAME
- ": Set mode Hfreq : %d kHz, Vfreq : %d Hz.\n", clock / htotal,
- ((clock / htotal) * 1000) / vtotal);
-
- switch (var->bits_per_pixel) {
- case 16:
- dspcntr |= VML_GFX_ARGB1555;
- break;
- case 32:
- if (var->transp.length == 8)
- dspcntr |= VML_GFX_ARGB8888 | VML_GFX_ALPHAMULT;
- else
- dspcntr |= VML_GFX_RGB0888;
- break;
- default:
- return -EINVAL;
- }
-
- vmlfb_disable_pipe(vinfo);
- mb();
-
- if (subsys->set_clock)
- subsys->set_clock(subsys, clock);
- else
- return -EINVAL;
-
- VML_WRITE32(par, VML_HTOTAL_A, ((htotal - 1) << 16) | (hactive - 1));
- VML_WRITE32(par, VML_HBLANK_A,
- ((hblank_end - 1) << 16) | (hblank_start - 1));
- VML_WRITE32(par, VML_HSYNC_A,
- ((hsync_end - 1) << 16) | (hsync_start - 1));
- VML_WRITE32(par, VML_VTOTAL_A, ((vtotal - 1) << 16) | (vactive - 1));
- VML_WRITE32(par, VML_VBLANK_A,
- ((vblank_end - 1) << 16) | (vblank_start - 1));
- VML_WRITE32(par, VML_VSYNC_A,
- ((vsync_end - 1) << 16) | (vsync_start - 1));
- VML_WRITE32(par, VML_DSPCSTRIDE, vinfo->stride);
- VML_WRITE32(par, VML_DSPCSIZE,
- ((var->yres - 1) << 16) | (var->xres - 1));
- VML_WRITE32(par, VML_DSPCPOS, 0x00000000);
- VML_WRITE32(par, VML_DSPARB, VML_FIFO_DEFAULT);
- VML_WRITE32(par, VML_BCLRPAT_A, 0x00000000);
- VML_WRITE32(par, VML_CANVSCLR_A, 0x00000000);
- VML_WRITE32(par, VML_PIPEASRC,
- ((var->xres - 1) << 16) | (var->yres - 1));
-
- wmb();
- VML_WRITE32(par, VML_PIPEACONF, VML_PIPE_ENABLE);
- wmb();
- VML_WRITE32(par, VML_DSPCCNTR, dspcntr);
- wmb();
- VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
- var->yoffset * vinfo->stride +
- var->xoffset * vinfo->bytes_per_pixel);
-
- VML_WRITE32(par, VML_RCOMPSTAT, VML_MDVO_PAD_ENABLE);
-
- while (!(VML_READ32(par, VML_RCOMPSTAT) &
- (VML_MDVO_VDC_I_RCOMP | VML_MDVO_PAD_ENABLE))) ;
-
- vinfo->pipe_disabled = 0;
-#ifdef VERMILION_DEBUG
- vml_dump_regs(vinfo);
-#endif
-
- return 0;
-}
-
-static int vmlfb_set_par(struct fb_info *info)
-{
- struct vml_info *vinfo = container_of(info, struct vml_info, info);
- int ret;
-
- mutex_lock(&vml_mutex);
- list_move(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode);
- ret = vmlfb_set_par_locked(vinfo);
-
- mutex_unlock(&vml_mutex);
- return ret;
-}
-
-static int vmlfb_blank_locked(struct vml_info *vinfo)
-{
- struct vml_par *par = vinfo->par;
- u32 cur = VML_READ32(par, VML_PIPEACONF);
-
- switch (vinfo->cur_blank_mode) {
- case FB_BLANK_UNBLANK:
- if (vinfo->pipe_disabled) {
- vmlfb_set_par_locked(vinfo);
- }
- VML_WRITE32(par, VML_PIPEACONF, cur & ~VML_PIPE_FORCE_BORDER);
- (void)VML_READ32(par, VML_PIPEACONF);
- break;
- case FB_BLANK_NORMAL:
- if (vinfo->pipe_disabled) {
- vmlfb_set_par_locked(vinfo);
- }
- VML_WRITE32(par, VML_PIPEACONF, cur | VML_PIPE_FORCE_BORDER);
- (void)VML_READ32(par, VML_PIPEACONF);
- break;
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- if (!vinfo->pipe_disabled) {
- vmlfb_disable_pipe(vinfo);
- }
- break;
- case FB_BLANK_POWERDOWN:
- if (!vinfo->pipe_disabled) {
- vmlfb_disable_pipe(vinfo);
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vmlfb_blank(int blank_mode, struct fb_info *info)
-{
- struct vml_info *vinfo = container_of(info, struct vml_info, info);
- int ret;
-
- mutex_lock(&vml_mutex);
- vinfo->cur_blank_mode = blank_mode;
- ret = vmlfb_blank_locked(vinfo);
- mutex_unlock(&vml_mutex);
- return ret;
-}
-
-static int vmlfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct vml_info *vinfo = container_of(info, struct vml_info, info);
- struct vml_par *par = vinfo->par;
-
- mutex_lock(&vml_mutex);
- VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start +
- var->yoffset * vinfo->stride +
- var->xoffset * vinfo->bytes_per_pixel);
- (void)VML_READ32(par, VML_DSPCADDR);
- mutex_unlock(&vml_mutex);
-
- return 0;
-}
-
-static int vmlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
-{
- u32 v;
-
- if (regno >= 16)
- return -EINVAL;
-
- if (info->var.grayscale) {
- red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
- }
-
- if (info->fix.visual != FB_VISUAL_TRUECOLOR)
- return -EINVAL;
-
- red = VML_TOHW(red, info->var.red.length);
- blue = VML_TOHW(blue, info->var.blue.length);
- green = VML_TOHW(green, info->var.green.length);
- transp = VML_TOHW(transp, info->var.transp.length);
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset) |
- (transp << info->var.transp.offset);
-
- switch (info->var.bits_per_pixel) {
- case 16:
- ((u32 *) info->pseudo_palette)[regno] = v;
- break;
- case 24:
- case 32:
- ((u32 *) info->pseudo_palette)[regno] = v;
- break;
- }
- return 0;
-}
-
-static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
- struct vml_info *vinfo = container_of(info, struct vml_info, info);
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- int ret;
- unsigned long prot;
-
- ret = vmlfb_vram_offset(vinfo, offset);
- if (ret)
- return -EINVAL;
-
- prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
- pgprot_val(vma->vm_page_prot) =
- prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
-
- return vm_iomap_memory(vma, vinfo->vram_start,
- vinfo->vram_contig_size);
-}
-
-static int vmlfb_sync(struct fb_info *info)
-{
- return 0;
-}
-
-static int vmlfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
-{
- return -EINVAL; /* just to force soft_cursor() call */
-}
-
-static struct fb_ops vmlfb_ops = {
- .owner = THIS_MODULE,
- .fb_open = vmlfb_open,
- .fb_release = vmlfb_release,
- __FB_DEFAULT_IOMEM_OPS_RDWR,
- .fb_check_var = vmlfb_check_var,
- .fb_set_par = vmlfb_set_par,
- .fb_blank = vmlfb_blank,
- .fb_pan_display = vmlfb_pan_display,
- __FB_DEFAULT_IOMEM_OPS_DRAW,
- .fb_cursor = vmlfb_cursor,
- .fb_sync = vmlfb_sync,
- .fb_mmap = vmlfb_mmap,
- .fb_setcolreg = vmlfb_setcolreg
-};
-
-static const struct pci_device_id vml_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, VML_DEVICE_VDC)},
- {0}
-};
-
-static struct pci_driver vmlfb_pci_driver = {
- .name = "vmlfb",
- .id_table = vml_ids,
- .probe = vml_pci_probe,
- .remove = vml_pci_remove,
-};
-
-static void __exit vmlfb_cleanup(void)
-{
- pci_unregister_driver(&vmlfb_pci_driver);
-}
-
-static int __init vmlfb_init(void)
-{
-
-#ifndef MODULE
- char *option = NULL;
-#endif
-
- if (fb_modesetting_disabled("vmlfb"))
- return -ENODEV;
-
-#ifndef MODULE
- if (fb_get_options(MODULE_NAME, &option))
- return -ENODEV;
-#endif
-
- printk(KERN_DEBUG MODULE_NAME ": initializing\n");
- mutex_init(&vml_mutex);
- INIT_LIST_HEAD(&global_no_mode);
- INIT_LIST_HEAD(&global_has_mode);
-
- return pci_register_driver(&vmlfb_pci_driver);
-}
-
-int vmlfb_register_subsys(struct vml_sys *sys)
-{
- struct vml_info *entry;
- struct list_head *list;
- u32 save_activate;
-
- mutex_lock(&vml_mutex);
- if (subsys != NULL) {
- subsys->restore(subsys);
- }
- subsys = sys;
- subsys->save(subsys);
-
- /*
- * We need to restart list traversal for each item, since we
- * release the list mutex in the loop.
- */
-
- list = global_no_mode.next;
- while (list != &global_no_mode) {
- list_del_init(list);
- entry = list_entry(list, struct vml_info, head);
-
- /*
- * First, try the current mode which might not be
- * completely validated with respect to the pixel clock.
- */
-
- if (!vmlfb_check_var_locked(&entry->info.var, entry)) {
- vmlfb_set_par_locked(entry);
- list_add_tail(list, &global_has_mode);
- } else {
-
- /*
- * Didn't work. Try to find another mode,
- * that matches this subsys.
- */
-
- mutex_unlock(&vml_mutex);
- save_activate = entry->info.var.activate;
- entry->info.var.bits_per_pixel = 16;
- vmlfb_set_pref_pixel_format(&entry->info.var);
- if (fb_find_mode(&entry->info.var,
- &entry->info,
- vml_default_mode, NULL, 0, NULL, 16)) {
- entry->info.var.activate |=
- FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW;
- fb_set_var(&entry->info, &entry->info.var);
- } else {
- printk(KERN_ERR MODULE_NAME
- ": Sorry. no mode found for this subsys.\n");
- }
- entry->info.var.activate = save_activate;
- mutex_lock(&vml_mutex);
- }
- vmlfb_blank_locked(entry);
- list = global_no_mode.next;
- }
- mutex_unlock(&vml_mutex);
-
- printk(KERN_DEBUG MODULE_NAME ": Registered %s subsystem.\n",
- subsys->name ? subsys->name : "unknown");
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(vmlfb_register_subsys);
-
-void vmlfb_unregister_subsys(struct vml_sys *sys)
-{
- struct vml_info *entry, *next;
-
- mutex_lock(&vml_mutex);
- if (subsys != sys) {
- mutex_unlock(&vml_mutex);
- return;
- }
- subsys->restore(subsys);
- subsys = NULL;
- list_for_each_entry_safe(entry, next, &global_has_mode, head) {
- printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n");
- vmlfb_disable_pipe(entry);
- list_move_tail(&entry->head, &global_no_mode);
- }
- mutex_unlock(&vml_mutex);
-}
-
-EXPORT_SYMBOL_GPL(vmlfb_unregister_subsys);
-
-module_init(vmlfb_init);
-module_exit(vmlfb_cleanup);
-
-MODULE_AUTHOR("Tungsten Graphics");
-MODULE_DESCRIPTION("Initialization of the Vermilion display devices");
-MODULE_VERSION("1.0.0");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/vermilion/vermilion.h b/drivers/video/fbdev/vermilion/vermilion.h
deleted file mode 100644
index 19cbbe76a..000000000
--- a/drivers/video/fbdev/vermilion/vermilion.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) Intel Corp. 2007.
- * All Rights Reserved.
- *
- * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
- * develop this driver.
- *
- * This file is part of the Vermilion Range fb driver.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef _VERMILION_H_
-#define _VERMILION_H_
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/atomic.h>
-#include <linux/mutex.h>
-
-#define VML_DEVICE_GPU 0x5002
-#define VML_DEVICE_VDC 0x5009
-
-#define VML_VRAM_AREAS 3
-#define VML_MAX_XRES 1024
-#define VML_MAX_YRES 768
-#define VML_MAX_XRES_VIRTUAL 1040
-
-/*
- * Display controller registers:
- */
-
-/* Display controller 10-bit color representation */
-
-#define VML_R_MASK 0x3FF00000
-#define VML_R_SHIFT 20
-#define VML_G_MASK 0x000FFC00
-#define VML_G_SHIFT 10
-#define VML_B_MASK 0x000003FF
-#define VML_B_SHIFT 0
-
-/* Graphics plane control */
-#define VML_DSPCCNTR 0x00072180
-#define VML_GFX_ENABLE 0x80000000
-#define VML_GFX_GAMMABYPASS 0x40000000
-#define VML_GFX_ARGB1555 0x0C000000
-#define VML_GFX_RGB0888 0x18000000
-#define VML_GFX_ARGB8888 0x1C000000
-#define VML_GFX_ALPHACONST 0x02000000
-#define VML_GFX_ALPHAMULT 0x01000000
-#define VML_GFX_CONST_ALPHA 0x000000FF
-
-/* Graphics plane start address. Pixel aligned. */
-#define VML_DSPCADDR 0x00072184
-
-/* Graphics plane stride register. */
-#define VML_DSPCSTRIDE 0x00072188
-
-/* Graphics plane position register. */
-#define VML_DSPCPOS 0x0007218C
-#define VML_POS_YMASK 0x0FFF0000
-#define VML_POS_YSHIFT 16
-#define VML_POS_XMASK 0x00000FFF
-#define VML_POS_XSHIFT 0
-
-/* Graphics plane height and width */
-#define VML_DSPCSIZE 0x00072190
-#define VML_SIZE_HMASK 0x0FFF0000
-#define VML_SIZE_HSHIFT 16
-#define VML_SISE_WMASK 0x00000FFF
-#define VML_SIZE_WSHIFT 0
-
-/* Graphics plane gamma correction lookup table registers (129 * 32 bits) */
-#define VML_DSPCGAMLUT 0x00072200
-
-/* Pixel video output configuration register */
-#define VML_PVOCONFIG 0x00061140
-#define VML_CONFIG_BASE 0x80000000
-#define VML_CONFIG_PIXEL_SWAP 0x04000000
-#define VML_CONFIG_DE_INV 0x01000000
-#define VML_CONFIG_HREF_INV 0x00400000
-#define VML_CONFIG_VREF_INV 0x00100000
-#define VML_CONFIG_CLK_INV 0x00040000
-#define VML_CONFIG_CLK_DIV2 0x00010000
-#define VML_CONFIG_ESTRB_INV 0x00008000
-
-/* Pipe A Horizontal total register */
-#define VML_HTOTAL_A 0x00060000
-#define VML_HTOTAL_MASK 0x1FFF0000
-#define VML_HTOTAL_SHIFT 16
-#define VML_HTOTAL_VAL 8192
-#define VML_HACTIVE_MASK 0x000007FF
-#define VML_HACTIVE_SHIFT 0
-#define VML_HACTIVE_VAL 4096
-
-/* Pipe A Horizontal Blank register */
-#define VML_HBLANK_A 0x00060004
-#define VML_HBLANK_END_MASK 0x1FFF0000
-#define VML_HBLANK_END_SHIFT 16
-#define VML_HBLANK_END_VAL 8192
-#define VML_HBLANK_START_MASK 0x00001FFF
-#define VML_HBLANK_START_SHIFT 0
-#define VML_HBLANK_START_VAL 8192
-
-/* Pipe A Horizontal Sync register */
-#define VML_HSYNC_A 0x00060008
-#define VML_HSYNC_END_MASK 0x1FFF0000
-#define VML_HSYNC_END_SHIFT 16
-#define VML_HSYNC_END_VAL 8192
-#define VML_HSYNC_START_MASK 0x00001FFF
-#define VML_HSYNC_START_SHIFT 0
-#define VML_HSYNC_START_VAL 8192
-
-/* Pipe A Vertical total register */
-#define VML_VTOTAL_A 0x0006000C
-#define VML_VTOTAL_MASK 0x1FFF0000
-#define VML_VTOTAL_SHIFT 16
-#define VML_VTOTAL_VAL 8192
-#define VML_VACTIVE_MASK 0x000007FF
-#define VML_VACTIVE_SHIFT 0
-#define VML_VACTIVE_VAL 4096
-
-/* Pipe A Vertical Blank register */
-#define VML_VBLANK_A 0x00060010
-#define VML_VBLANK_END_MASK 0x1FFF0000
-#define VML_VBLANK_END_SHIFT 16
-#define VML_VBLANK_END_VAL 8192
-#define VML_VBLANK_START_MASK 0x00001FFF
-#define VML_VBLANK_START_SHIFT 0
-#define VML_VBLANK_START_VAL 8192
-
-/* Pipe A Vertical Sync register */
-#define VML_VSYNC_A 0x00060014
-#define VML_VSYNC_END_MASK 0x1FFF0000
-#define VML_VSYNC_END_SHIFT 16
-#define VML_VSYNC_END_VAL 8192
-#define VML_VSYNC_START_MASK 0x00001FFF
-#define VML_VSYNC_START_SHIFT 0
-#define VML_VSYNC_START_VAL 8192
-
-/* Pipe A Source Image size (minus one - equal to active size)
- * Programmable while pipe is enabled.
- */
-#define VML_PIPEASRC 0x0006001C
-#define VML_PIPEASRC_HMASK 0x0FFF0000
-#define VML_PIPEASRC_HSHIFT 16
-#define VML_PIPEASRC_VMASK 0x00000FFF
-#define VML_PIPEASRC_VSHIFT 0
-
-/* Pipe A Border Color Pattern register (10 bit color) */
-#define VML_BCLRPAT_A 0x00060020
-
-/* Pipe A Canvas Color register (10 bit color) */
-#define VML_CANVSCLR_A 0x00060024
-
-/* Pipe A Configuration register */
-#define VML_PIPEACONF 0x00070008
-#define VML_PIPE_BASE 0x00000000
-#define VML_PIPE_ENABLE 0x80000000
-#define VML_PIPE_FORCE_BORDER 0x02000000
-#define VML_PIPE_PLANES_OFF 0x00080000
-#define VML_PIPE_ARGB_OUTPUT_MODE 0x00040000
-
-/* Pipe A FIFO setting */
-#define VML_DSPARB 0x00070030
-#define VML_FIFO_DEFAULT 0x00001D9C
-
-/* MDVO rcomp status & pads control register */
-#define VML_RCOMPSTAT 0x00070048
-#define VML_MDVO_VDC_I_RCOMP 0x80000000
-#define VML_MDVO_POWERSAVE_OFF 0x00000008
-#define VML_MDVO_PAD_ENABLE 0x00000004
-#define VML_MDVO_PULLDOWN_ENABLE 0x00000001
-
-struct vml_par {
- struct pci_dev *vdc;
- u64 vdc_mem_base;
- u64 vdc_mem_size;
- char __iomem *vdc_mem;
-
- struct pci_dev *gpu;
- u64 gpu_mem_base;
- u64 gpu_mem_size;
- char __iomem *gpu_mem;
-
- atomic_t refcount;
-};
-
-struct vram_area {
- unsigned long logical;
- unsigned long phys;
- unsigned long size;
- unsigned order;
-};
-
-struct vml_info {
- struct fb_info info;
- struct vml_par *par;
- struct list_head head;
- struct vram_area vram[VML_VRAM_AREAS];
- u64 vram_start;
- u64 vram_contig_size;
- u32 num_areas;
- void __iomem *vram_logical;
- u32 pseudo_palette[16];
- u32 stride;
- u32 bytes_per_pixel;
- atomic_t vmas;
- int cur_blank_mode;
- int pipe_disabled;
-};
-
-/*
- * Subsystem
- */
-
-struct vml_sys {
- char *name;
-
- /*
- * Save / Restore;
- */
-
- int (*save) (struct vml_sys * sys);
- int (*restore) (struct vml_sys * sys);
-
- /*
- * PLL programming;
- */
-
- int (*set_clock) (struct vml_sys * sys, int clock);
- int (*nearest_clock) (const struct vml_sys * sys, int clock);
-};
-
-extern int vmlfb_register_subsys(struct vml_sys *sys);
-extern void vmlfb_unregister_subsys(struct vml_sys *sys);
-
-#define VML_READ32(_par, _offset) \
- (ioread32((_par)->vdc_mem + (_offset)))
-#define VML_WRITE32(_par, _offset, _value) \
- iowrite32(_value, (_par)->vdc_mem + (_offset))
-
-#endif
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 1b7c338f9..f86149ba3 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -80,15 +80,12 @@ static int vfb_mmap(struct fb_info *info,
static const struct fb_ops vfb_ops = {
.owner = THIS_MODULE,
- .fb_read = fb_sys_read,
- .fb_write = fb_sys_write,
+ __FB_DEFAULT_SYSMEM_OPS_RDWR,
.fb_check_var = vfb_check_var,
.fb_set_par = vfb_set_par,
.fb_setcolreg = vfb_setcolreg,
.fb_pan_display = vfb_pan_display,
- .fb_fillrect = sys_fillrect,
- .fb_copyarea = sys_copyarea,
- .fb_imageblit = sys_imageblit,
+ __FB_DEFAULT_SYSMEM_OPS_DRAW,
.fb_mmap = vfb_mmap,
};
@@ -385,6 +382,8 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
static int vfb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff);
}
@@ -440,6 +439,7 @@ static int vfb_probe(struct platform_device *dev)
if (!info)
goto err;
+ info->flags |= FBINFO_VIRTFB;
info->screen_buffer = videomemory;
info->fbops = &vfb_ops;
diff --git a/drivers/video/fbdev/via/accel.c b/drivers/video/fbdev/via/accel.c
index 0a1bc7a4d..1e04026f0 100644
--- a/drivers/video/fbdev/via/accel.c
+++ b/drivers/video/fbdev/via/accel.c
@@ -115,7 +115,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
if (op != VIA_BITBLT_FILL) {
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
@@ -260,7 +260,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
writel(tmp, engine + 0x18);
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
diff --git a/drivers/video/fbdev/via/via_i2c.c b/drivers/video/fbdev/via/via_i2c.c
index c35e530e0..582502810 100644
--- a/drivers/video/fbdev/via/via_i2c.c
+++ b/drivers/video/fbdev/via/via_i2c.c
@@ -201,7 +201,6 @@ static int create_i2c_bus(struct i2c_adapter *adapter,
sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
adap_cfg->ioport_index);
adapter->owner = THIS_MODULE;
- adapter->class = I2C_CLASS_DDC;
adapter->algo_data = algo;
if (pdev)
adapter->dev.parent = &pdev->dev;
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index 42d39a9d5..ac7393707 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -241,6 +241,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
static const struct fb_ops vt8500lcd_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_set_par = vt8500lcd_set_par,
.fb_setcolreg = vt8500lcd_setcolreg,
.fb_fillrect = wmt_ge_fillrect,
@@ -250,6 +251,7 @@ static const struct fb_ops vt8500lcd_ops = {
.fb_ioctl = vt8500lcd_ioctl,
.fb_pan_display = vt8500lcd_pan_display,
.fb_blank = vt8500lcd_blank,
+ // .fb_mmap needs DMA mmap
};
static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
@@ -357,7 +359,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
fbi->fb.fix.smem_start = fb_mem_phys;
fbi->fb.fix.smem_len = fb_mem_len;
- fbi->fb.screen_base = fb_mem_virt;
+ fbi->fb.screen_buffer = fb_mem_virt;
fbi->palette_size = PAGE_ALIGN(512);
fbi->palette_cpu = dma_alloc_coherent(&pdev->dev,
@@ -372,7 +374,6 @@ static int vt8500lcd_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ defined\n");
ret = -ENODEV;
goto failed_free_palette;
}
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index 5833147aa..00952e9c8 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -248,6 +248,7 @@ static int wm8505fb_blank(int blank, struct fb_info *info)
static const struct fb_ops wm8505fb_ops = {
.owner = THIS_MODULE,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
.fb_set_par = wm8505fb_set_par,
.fb_setcolreg = wm8505fb_setcolreg,
.fb_fillrect = wmt_ge_fillrect,
@@ -256,6 +257,7 @@ static const struct fb_ops wm8505fb_ops = {
.fb_sync = wmt_ge_sync,
.fb_pan_display = wm8505fb_pan_display,
.fb_blank = wm8505fb_blank,
+ __FB_DEFAULT_IOMEM_OPS_MMAP,
};
static int wm8505fb_probe(struct platform_device *pdev)
diff --git a/drivers/video/logo/pnmtologo.c b/drivers/video/logo/pnmtologo.c
index ada5ef6e5..2434a25af 100644
--- a/drivers/video/logo/pnmtologo.c
+++ b/drivers/video/logo/pnmtologo.c
@@ -249,10 +249,10 @@ static void write_footer(void)
fputs("\n};\n\n", out);
fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
- fprintf(out, "\t.width\t\t= %d,\n", logo_width);
- fprintf(out, "\t.height\t\t= %d,\n", logo_height);
+ fprintf(out, "\t.width\t\t= %u,\n", logo_width);
+ fprintf(out, "\t.height\t\t= %u,\n", logo_height);
if (logo_type == LINUX_LOGO_CLUT224) {
- fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
+ fprintf(out, "\t.clutsize\t= %u,\n", logo_clutsize);
fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
}
fprintf(out, "\t.data\t\t= %s_data\n", logoname);
diff --git a/drivers/video/sticore.c b/drivers/video/sticore.c
index c3765ad6e..7115b3258 100644
--- a/drivers/video/sticore.c
+++ b/drivers/video/sticore.c
@@ -1041,6 +1041,9 @@ static int __init sticore_pa_init(struct parisc_device *dev)
print_pa_hwpath(dev, sti->pa_path);
sticore_check_for_default_sti(sti, sti->pa_path);
+
+ sti->dev = &dev->dev;
+
return 0;
}
@@ -1084,6 +1087,8 @@ static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent)
pr_warn("Unable to handle STI device '%s'\n", pci_name(pd));
return -ENODEV;
}
+
+ sti->dev = &pd->dev;
#endif /* CONFIG_PCI */
return 0;
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index bc564adcf..87f241825 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -994,7 +994,7 @@ e_unmap:
return ret;
}
-static int __exit sev_guest_remove(struct platform_device *pdev)
+static void __exit sev_guest_remove(struct platform_device *pdev)
{
struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
@@ -1003,8 +1003,6 @@ static int __exit sev_guest_remove(struct platform_device *pdev)
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
deinit_crypto(snp_dev->crypto);
misc_deregister(&snp_dev->misc);
-
- return 0;
}
/*
@@ -1013,7 +1011,7 @@ static int __exit sev_guest_remove(struct platform_device *pdev)
* with the SEV-SNP support, it is named "sev-guest".
*/
static struct platform_driver sev_guest_driver = {
- .remove = __exit_p(sev_guest_remove),
+ .remove_new = __exit_p(sev_guest_remove),
.driver = {
.name = "sev-guest",
},
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index dfd69bd77..c6e985599 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -33,16 +33,15 @@
VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
/**
- * Reserves memory in which the VMM can relocate any guest mappings
- * that are floating around.
+ * vbg_guest_mappings_init - Reserves memory in which the VMM can
+ * relocate any guest mappings that are floating around.
+ * @gdev: The Guest extension device.
*
* This operation is a little bit tricky since the VMM might not accept
* just any address because of address clashes between the three contexts
* it operates in, so we try several times.
*
* Failure to reserve the guest mappings is ignored.
- *
- * @gdev: The Guest extension device.
*/
static void vbg_guest_mappings_init(struct vbg_dev *gdev)
{
@@ -125,7 +124,7 @@ out:
}
/**
- * Undo what vbg_guest_mappings_init did.
+ * vbg_guest_mappings_exit - Undo what vbg_guest_mappings_init did.
*
* @gdev: The Guest extension device.
*/
@@ -166,9 +165,10 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
}
/**
- * Report the guest information to the host.
- * Return: 0 or negative errno value.
+ * vbg_report_guest_info - Report the guest information to the host.
* @gdev: The Guest extension device.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_report_guest_info(struct vbg_dev *gdev)
{
@@ -229,10 +229,11 @@ out_free:
}
/**
- * Report the guest driver status to the host.
- * Return: 0 or negative errno value.
+ * vbg_report_driver_status - Report the guest driver status to the host.
* @gdev: The Guest extension device.
* @active: Flag whether the driver is now active or not.
+ *
+ * Return: 0 or negative errno value.
*/
static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
{
@@ -261,10 +262,12 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
}
/**
- * Inflate the balloon by one chunk. The caller owns the balloon mutex.
- * Return: 0 or negative errno value.
+ * vbg_balloon_inflate - Inflate the balloon by one chunk. The caller
+ * owns the balloon mutex.
* @gdev: The Guest extension device.
* @chunk_idx: Index of the chunk.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
{
@@ -312,10 +315,12 @@ out_error:
}
/**
- * Deflate the balloon by one chunk. The caller owns the balloon mutex.
- * Return: 0 or negative errno value.
+ * vbg_balloon_deflate - Deflate the balloon by one chunk. The caller
+ * owns the balloon mutex.
* @gdev: The Guest extension device.
* @chunk_idx: Index of the chunk.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
{
@@ -344,7 +349,7 @@ static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
return 0;
}
-/**
+/*
* Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
* the host wants the balloon to be and adjust accordingly.
*/
@@ -409,7 +414,7 @@ static void vbg_balloon_work(struct work_struct *work)
}
}
-/**
+/*
* Callback for heartbeat timer.
*/
static void vbg_heartbeat_timer(struct timer_list *t)
@@ -422,11 +427,12 @@ static void vbg_heartbeat_timer(struct timer_list *t)
}
/**
- * Configure the host to check guest's heartbeat
- * and get heartbeat interval from the host.
- * Return: 0 or negative errno value.
+ * vbg_heartbeat_host_config - Configure the host to check guest's heartbeat
+ * and get heartbeat interval from the host.
* @gdev: The Guest extension device.
* @enabled: Set true to enable guest heartbeat checks on host.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
{
@@ -449,9 +455,11 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
}
/**
- * Initializes the heartbeat timer. This feature may be disabled by the host.
- * Return: 0 or negative errno value.
+ * vbg_heartbeat_init - Initializes the heartbeat timer. This feature
+ * may be disabled by the host.
* @gdev: The Guest extension device.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_heartbeat_init(struct vbg_dev *gdev)
{
@@ -481,7 +489,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
}
/**
- * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
+ * vbg_heartbeat_exit - Cleanup heartbeat code, stop HB timer and disable
+ * host heartbeat checking.
* @gdev: The Guest extension device.
*/
static void vbg_heartbeat_exit(struct vbg_dev *gdev)
@@ -493,11 +502,12 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
}
/**
- * Applies a change to the bit usage tracker.
- * Return: true if the mask changed, false if not.
+ * vbg_track_bit_usage - Applies a change to the bit usage tracker.
* @tracker: The bit usage tracker.
* @changed: The bits to change.
* @previous: The previous value of the bits.
+ *
+ * Return: %true if the mask changed, %false if not.
*/
static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
u32 changed, u32 previous)
@@ -529,10 +539,12 @@ static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
}
/**
- * Init and termination worker for resetting the (host) event filter on the host
- * Return: 0 or negative errno value.
+ * vbg_reset_host_event_filter - Init and termination worker for
+ * resetting the (host) event filter on the host
* @gdev: The Guest extension device.
* @fixed_events: Fixed events (init time).
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
u32 fixed_events)
@@ -556,12 +568,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
}
/**
- * Changes the event filter mask for the given session.
- *
- * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
- * do session cleanup. Takes the session mutex.
- *
- * Return: 0 or negative errno value.
+ * vbg_set_session_event_filter - Changes the event filter mask for the
+ * given session.
* @gdev: The Guest extension device.
* @session: The session.
* @or_mask: The events to add.
@@ -570,6 +578,11 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
* This tweaks the error handling so we perform
* proper session cleanup even if the host
* misbehaves.
+ *
+ * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
+ * do session cleanup. Takes the session mutex.
+ *
+ * Return: 0 or negative errno value.
*/
static int vbg_set_session_event_filter(struct vbg_dev *gdev,
struct vbg_session *session,
@@ -637,9 +650,11 @@ out:
}
/**
- * Init and termination worker for set guest capabilities to zero on the host.
- * Return: 0 or negative errno value.
+ * vbg_reset_host_capabilities - Init and termination worker for set
+ * guest capabilities to zero on the host.
* @gdev: The Guest extension device.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
{
@@ -662,12 +677,14 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
}
/**
- * Set guest capabilities on the host.
- * Must be called with gdev->session_mutex hold.
- * Return: 0 or negative errno value.
+ * vbg_set_host_capabilities - Set guest capabilities on the host.
* @gdev: The Guest extension device.
* @session: The session.
* @session_termination: Set if we're called by the session cleanup code.
+ *
+ * Must be called with gdev->session_mutex hold.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_set_host_capabilities(struct vbg_dev *gdev,
struct vbg_session *session,
@@ -704,9 +721,8 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev,
}
/**
- * Acquire (get exclusive access) guest capabilities for a session.
- * Takes the session mutex.
- * Return: 0 or negative errno value.
+ * vbg_acquire_session_capabilities - Acquire (get exclusive access)
+ * guest capabilities for a session.
* @gdev: The Guest extension device.
* @session: The session.
* @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX).
@@ -716,6 +732,10 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev,
* This tweaks the error handling so we perform
* proper session cleanup even if the host
* misbehaves.
+ *
+ * Takes the session mutex.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
struct vbg_session *session,
@@ -811,8 +831,8 @@ out:
}
/**
- * Sets the guest capabilities for a session. Takes the session mutex.
- * Return: 0 or negative errno value.
+ * vbg_set_session_capabilities - Sets the guest capabilities for a
+ * session. Takes the session mutex.
* @gdev: The Guest extension device.
* @session: The session.
* @or_mask: The capabilities to add.
@@ -821,6 +841,8 @@ out:
* This tweaks the error handling so we perform
* proper session cleanup even if the host
* misbehaves.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_set_session_capabilities(struct vbg_dev *gdev,
struct vbg_session *session,
@@ -866,9 +888,10 @@ out:
}
/**
- * vbg_query_host_version get the host feature mask and version information.
- * Return: 0 or negative errno value.
+ * vbg_query_host_version - get the host feature mask and version information.
* @gdev: The Guest extension device.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_query_host_version(struct vbg_dev *gdev)
{
@@ -905,19 +928,18 @@ out:
}
/**
- * Initializes the VBoxGuest device extension when the
- * device driver is loaded.
+ * vbg_core_init - Initializes the VBoxGuest device extension when the
+ * device driver is loaded.
+ * @gdev: The Guest extension device.
+ * @fixed_events: Events that will be enabled upon init and no client
+ * will ever be allowed to mask.
*
* The native code locates the VMMDev on the PCI bus and retrieve
* the MMIO and I/O port ranges, this function will take care of
* mapping the MMIO memory (if present). Upon successful return
* the native code should set up the interrupt handler.
*
- * Return: 0 or negative errno value.
- *
- * @gdev: The Guest extension device.
- * @fixed_events: Events that will be enabled upon init and no client
- * will ever be allowed to mask.
+ * Return: %0 or negative errno value.
*/
int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
{
@@ -1017,11 +1039,12 @@ err_free_reqs:
}
/**
- * Call this on exit to clean-up vboxguest-core managed resources.
+ * vbg_core_exit - Call this on exit to clean-up vboxguest-core managed
+ * resources.
+ * @gdev: The Guest extension device.
*
* The native code should call this before the driver is loaded,
* but don't call this on shutdown.
- * @gdev: The Guest extension device.
*/
void vbg_core_exit(struct vbg_dev *gdev)
{
@@ -1046,12 +1069,13 @@ void vbg_core_exit(struct vbg_dev *gdev)
}
/**
- * Creates a VBoxGuest user session.
+ * vbg_core_open_session - Creates a VBoxGuest user session.
+ * @gdev: The Guest extension device.
+ * @requestor: VMMDEV_REQUESTOR_* flags
*
* vboxguest_linux.c calls this when userspace opens the char-device.
+ *
* Return: A pointer to the new session or an ERR_PTR on error.
- * @gdev: The Guest extension device.
- * @requestor: VMMDEV_REQUESTOR_* flags
*/
struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
{
@@ -1068,7 +1092,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
}
/**
- * Closes a VBoxGuest session.
+ * vbg_core_close_session - Closes a VBoxGuest session.
* @session: The session to close (and free).
*/
void vbg_core_close_session(struct vbg_session *session)
@@ -1250,11 +1274,13 @@ static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
}
/**
- * Checks if the VMM request is allowed in the context of the given session.
- * Return: 0 or negative errno value.
+ * vbg_req_allowed - Checks if the VMM request is allowed in the
+ * context of the given session.
* @gdev: The Guest extension device.
* @session: The calling session.
* @req: The request.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
const struct vmmdev_request_header *req)
@@ -1670,11 +1696,12 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
}
/**
- * Common IOCtl for user to kernel communication.
- * Return: 0 or negative errno value.
+ * vbg_core_ioctl - Common IOCtl for user to kernel communication.
* @session: The client session.
* @req: The requested function.
* @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
+ *
+ * Return: %0 or negative errno value.
*/
int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
{
@@ -1744,11 +1771,12 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
}
/**
- * Report guest supported mouse-features to the host.
+ * vbg_core_set_mouse_status - Report guest supported mouse-features to the host.
*
- * Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @features: The set of features to report to the host.
+ *
+ * Return: %0 or negative errno value.
*/
int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
{
@@ -1772,7 +1800,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
return vbg_status_code_to_errno(rc);
}
-/** Core interrupt service routine. */
+/* Core interrupt service routine. */
irqreturn_t vbg_core_isr(int irq, void *dev_id)
{
struct vbg_dev *gdev = dev_id;
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index c47e62dc5..8c92ea5b7 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -81,10 +81,11 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
}
/**
- * Close device.
- * Return: 0 on success, negated errno on failure.
+ * vbg_misc_device_close - Close device.
* @inode: Pointer to inode info structure.
* @filp: Associated file pointer.
+ *
+ * Return: %0 on success, negated errno on failure.
*/
static int vbg_misc_device_close(struct inode *inode, struct file *filp)
{
@@ -94,11 +95,12 @@ static int vbg_misc_device_close(struct inode *inode, struct file *filp)
}
/**
- * Device I/O Control entry point.
- * Return: 0 on success, negated errno on failure.
+ * vbg_misc_device_ioctl - Device I/O Control entry point.
* @filp: Associated file pointer.
* @req: The request specified to ioctl().
* @arg: The argument specified to ioctl().
+ *
+ * Return: %0 on success, negated errno on failure.
*/
static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
unsigned long arg)
@@ -173,7 +175,7 @@ out:
return ret;
}
-/** The file_operations structures. */
+/* The file_operations structures. */
static const struct file_operations vbg_misc_device_fops = {
.owner = THIS_MODULE,
.open = vbg_misc_device_open,
@@ -193,7 +195,7 @@ static const struct file_operations vbg_misc_device_user_fops = {
#endif
};
-/**
+/*
* Called when the input device is first opened.
*
* Sets up absolute mouse reporting.
@@ -206,7 +208,7 @@ static int vbg_input_open(struct input_dev *input)
return vbg_core_set_mouse_status(gdev, feat);
}
-/**
+/*
* Called if all open handles to the input device are closed.
*
* Disables absolute reporting.
@@ -218,7 +220,7 @@ static void vbg_input_close(struct input_dev *input)
vbg_core_set_mouse_status(gdev, 0);
}
-/**
+/*
* Creates the kernel input device.
*
* Return: 0 on success, negated errno on failure.
@@ -277,7 +279,7 @@ static struct attribute *vbg_pci_attrs[] = {
};
ATTRIBUTE_GROUPS(vbg_pci);
-/**
+/*
* Does the PCI detection and init of the device.
*
* Return: 0 on success, negated errno on failure.
@@ -453,7 +455,7 @@ void vbg_put_gdev(struct vbg_dev *gdev)
}
EXPORT_SYMBOL(vbg_put_gdev);
-/**
+/*
* Callback for mouse events.
*
* This is called at the end of the ISR, after leaving the event spinlock, if
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 8d195e3f8..1c02b3c0d 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -237,14 +237,16 @@ static int hgcm_call_preprocess_linaddr(
}
/**
- * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
- * figure out how much extra storage we need for page lists.
- * Return: 0 or negative errno value.
+ * hgcm_call_preprocess - Preprocesses the HGCM call, validate parameters,
+ * alloc bounce buffers and figure out how much extra storage we need for
+ * page lists.
* @src_parm: Pointer to source function call parameters
* @parm_count: Number of function call parameters.
* @bounce_bufs_ret: Where to return the allocated bouncebuffer array
* @extra: Where to return the extra request space needed for
* physical page lists.
+ *
+ * Return: %0 or negative errno value.
*/
static int hgcm_call_preprocess(
const struct vmmdev_hgcm_function_parameter *src_parm,
@@ -301,10 +303,11 @@ static int hgcm_call_preprocess(
}
/**
- * Translates linear address types to page list direction flags.
+ * hgcm_call_linear_addr_type_to_pagelist_flags - Translates linear address
+ * types to page list direction flags.
+ * @type: The type.
*
* Return: page list flags.
- * @type: The type.
*/
static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
enum vmmdev_hgcm_function_parameter_type type)
@@ -369,7 +372,8 @@ static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
}
/**
- * Initializes the call request that we're sending to the host.
+ * hgcm_call_init_call - Initializes the call request that we're sending
+ * to the host.
* @call: The call to initialize.
* @client_id: The client ID of the caller.
* @function: The function number of the function to call.
@@ -425,7 +429,9 @@ static void hgcm_call_init_call(
}
/**
- * Tries to cancel a pending HGCM call.
+ * hgcm_cancel_call - Tries to cancel a pending HGCM call.
+ * @gdev: The VBoxGuest device extension.
+ * @call: The call to cancel.
*
* Return: VBox status code
*/
@@ -459,13 +465,15 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
}
/**
- * Performs the call and completion wait.
- * Return: 0 or negative errno value.
+ * vbg_hgcm_do_call - Performs the call and completion wait.
* @gdev: The VBoxGuest device extension.
* @call: The call to execute.
* @timeout_ms: Timeout in ms.
+ * @interruptible: whether this call is interruptible
* @leak_it: Where to return the leak it / free it, indicator.
* Cancellation fun.
+ *
+ * Return: %0 or negative errno value.
*/
static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
u32 timeout_ms, bool interruptible, bool *leak_it)
@@ -545,13 +553,14 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
}
/**
- * Copies the result of the call back to the caller info structure and user
- * buffers.
- * Return: 0 or negative errno value.
+ * hgcm_call_copy_back_result - Copies the result of the call back to
+ * the caller info structure and user buffers.
* @call: HGCM call request.
* @dst_parm: Pointer to function call parameters destination.
* @parm_count: Number of function call parameters.
* @bounce_bufs: The bouncebuffer array.
+ *
+ * Return: %0 or negative errno value.
*/
static int hgcm_call_copy_back_result(
const struct vmmdev_hgcm_call *call,
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 0a53a6123..c17193544 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -60,6 +60,11 @@ config VIRTIO_PCI
If unsure, say M.
+config VIRTIO_PCI_ADMIN_LEGACY
+ bool
+ depends on VIRTIO_PCI && (X86 || COMPILE_TEST)
+ default y
+
config VIRTIO_PCI_LEGACY
bool "Support for legacy virtio draft 0.9.X and older devices"
default y
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 8e98d2491..73ace62af 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
+virtio_pci-$(CONFIG_VIRTIO_PCI_ADMIN_LEGACY) += virtio_pci_admin_legacy_io.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 71dee622b..f513ee21b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -302,9 +302,15 @@ static int virtio_dev_probe(struct device *_d)
if (err)
goto err;
+ if (dev->config->create_avq) {
+ err = dev->config->create_avq(dev);
+ if (err)
+ goto err;
+ }
+
err = drv->probe(dev);
if (err)
- goto err;
+ goto err_probe;
/* If probe didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -316,6 +322,10 @@ static int virtio_dev_probe(struct device *_d)
virtio_config_enable(dev);
return 0;
+
+err_probe:
+ if (dev->config->destroy_avq)
+ dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@@ -331,6 +341,9 @@ static void virtio_dev_remove(struct device *_d)
drv->remove(dev);
+ if (dev->config->destroy_avq)
+ dev->config->destroy_avq(dev);
+
/* Driver should have reset device. */
WARN_ON_ONCE(dev->config->get_status(dev));
@@ -503,6 +516,9 @@ int virtio_device_freeze(struct virtio_device *dev)
}
}
+ if (dev->config->destroy_avq)
+ dev->config->destroy_avq(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(virtio_device_freeze);
@@ -538,10 +554,16 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
+ if (dev->config->create_avq) {
+ ret = dev->config->create_avq(dev);
+ if (ret)
+ goto err;
+ }
+
if (drv->restore) {
ret = drv->restore(dev);
if (ret)
- goto err;
+ goto err_restore;
}
/* If restore didn't do it, mark device DRIVER_OK ourselves. */
@@ -552,6 +574,9 @@ int virtio_device_restore(struct virtio_device *dev)
return 0;
+err_restore:
+ if (dev->config->destroy_avq)
+ dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1fe93e93f..1f5b3dd31 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -33,7 +33,7 @@
#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
__GFP_NOMEMALLOC)
/* The order of free page blocks to report to host */
-#define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_ORDER
+#define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_PAGE_ORDER
/* The size of a free page block in bytes */
#define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
(1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
@@ -119,6 +119,11 @@ struct virtio_balloon {
/* Free page reporting device */
struct virtqueue *reporting_vq;
struct page_reporting_dev_info pr_dev_info;
+
+ /* State for keeping the wakeup_source active while adjusting the balloon */
+ spinlock_t adjustment_lock;
+ bool adjustment_signal_pending;
+ bool adjustment_in_progress;
};
static const struct virtio_device_id id_table[] = {
@@ -437,6 +442,31 @@ static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
queue_work(vb->balloon_wq, &vb->report_free_page_work);
}
+static void start_update_balloon_size(struct virtio_balloon *vb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vb->adjustment_lock, flags);
+ vb->adjustment_signal_pending = true;
+ if (!vb->adjustment_in_progress) {
+ vb->adjustment_in_progress = true;
+ pm_stay_awake(vb->vdev->dev.parent);
+ }
+ spin_unlock_irqrestore(&vb->adjustment_lock, flags);
+
+ queue_work(system_freezable_wq, &vb->update_balloon_size_work);
+}
+
+static void end_update_balloon_size(struct virtio_balloon *vb)
+{
+ spin_lock_irq(&vb->adjustment_lock);
+ if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) {
+ vb->adjustment_in_progress = false;
+ pm_relax(vb->vdev->dev.parent);
+ }
+ spin_unlock_irq(&vb->adjustment_lock);
+}
+
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -444,8 +474,7 @@ static void virtballoon_changed(struct virtio_device *vdev)
spin_lock_irqsave(&vb->stop_update_lock, flags);
if (!vb->stop_update) {
- queue_work(system_freezable_wq,
- &vb->update_balloon_size_work);
+ start_update_balloon_size(vb);
virtio_balloon_queue_free_page_work(vb);
}
spin_unlock_irqrestore(&vb->stop_update_lock, flags);
@@ -476,19 +505,25 @@ static void update_balloon_size_func(struct work_struct *work)
vb = container_of(work, struct virtio_balloon,
update_balloon_size_work);
- diff = towards_target(vb);
- if (!diff)
- return;
+ spin_lock_irq(&vb->adjustment_lock);
+ vb->adjustment_signal_pending = false;
+ spin_unlock_irq(&vb->adjustment_lock);
- if (diff > 0)
- diff -= fill_balloon(vb, diff);
- else
- diff += leak_balloon(vb, -diff);
- update_balloon_size(vb);
+ diff = towards_target(vb);
+
+ if (diff) {
+ if (diff > 0)
+ diff -= fill_balloon(vb, diff);
+ else
+ diff += leak_balloon(vb, -diff);
+ update_balloon_size(vb);
+ }
if (diff)
queue_work(system_freezable_wq, work);
+ else
+ end_update_balloon_size(vb);
}
static int init_vqs(struct virtio_balloon *vb)
@@ -992,6 +1027,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_unregister_oom;
}
+ spin_lock_init(&vb->adjustment_lock);
+
virtio_device_ready(vdev);
if (towards_target(vb))
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index fa5226c19..8e3223294 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -1154,13 +1154,13 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
*/
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{
- unsigned long order = MAX_ORDER;
+ unsigned long order = MAX_PAGE_ORDER;
unsigned long i;
/*
* We might get called for ranges that don't cover properly aligned
- * MAX_ORDER pages; however, we can only online properly aligned
- * pages with an order of MAX_ORDER at maximum.
+ * MAX_PAGE_ORDER pages; however, we can only online properly aligned
+ * pages with an order of MAX_PAGE_ORDER at maximum.
*/
while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
order--;
@@ -1280,7 +1280,7 @@ static void virtio_mem_online_page(struct virtio_mem *vm,
bool do_online;
/*
- * We can get called with any order up to MAX_ORDER. If our subblock
+ * We can get called with any order up to MAX_PAGE_ORDER. If our subblock
* size is smaller than that and we have a mixture of plugged and
* unplugged subblocks within such a page, we have to process in
* smaller granularity. In that case we'll adjust the order exactly once
diff --git a/drivers/virtio/virtio_pci_admin_legacy_io.c b/drivers/virtio/virtio_pci_admin_legacy_io.c
new file mode 100644
index 000000000..819cfbbc6
--- /dev/null
+++ b/drivers/virtio/virtio_pci_admin_legacy_io.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <linux/virtio_pci_admin.h>
+#include "virtio_pci_common.h"
+
+/*
+ * virtio_pci_admin_has_legacy_io - Checks whether the legacy IO
+ * commands are supported
+ * @dev: VF pci_dev
+ *
+ * Returns true on success.
+ */
+bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_pci_device *vp_dev;
+
+ if (!virtio_dev)
+ return false;
+
+ if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ vp_dev = to_vp_device(virtio_dev);
+
+ if ((vp_dev->admin_vq.supported_cmds & VIRTIO_LEGACY_ADMIN_CMD_BITMAP) ==
+ VIRTIO_LEGACY_ADMIN_CMD_BITMAP)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_has_legacy_io);
+
+static int virtio_pci_admin_legacy_io_write(struct pci_dev *pdev, u16 opcode,
+ u8 offset, u8 size, u8 *buf)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_legacy_wr_data *data;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist data_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data) + size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->offset = offset;
+ memcpy(data->registers, buf, size);
+ sg_init_one(&data_sg, data, sizeof(*data) + size);
+ cmd.opcode = cpu_to_le16(opcode);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+ return ret;
+}
+
+/*
+ * virtio_pci_admin_legacy_io_write_common - Write legacy common configuration
+ * of a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the common configuration area to write to
+ * @size: size of the data to write
+ * @buf: buffer which holds the data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_write(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_write);
+
+/*
+ * virtio_pci_admin_legacy_io_write_device - Write legacy device configuration
+ * of a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the device configuration area to write to
+ * @size: size of the data to write
+ * @buf: buffer which holds the data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_write(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_write);
+
+static int virtio_pci_admin_legacy_io_read(struct pci_dev *pdev, u16 opcode,
+ u8 offset, u8 size, u8 *buf)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_legacy_rd_data *data;
+ struct scatterlist data_sg, result_sg;
+ struct virtio_admin_cmd cmd = {};
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->offset = offset;
+ sg_init_one(&data_sg, data, sizeof(*data));
+ sg_init_one(&result_sg, buf, size);
+ cmd.opcode = cpu_to_le16(opcode);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+
+ kfree(data);
+ return ret;
+}
+
+/*
+ * virtio_pci_admin_legacy_device_io_read - Read legacy device configuration of
+ * a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the device configuration area to read from
+ * @size: size of the data to be read
+ * @buf: buffer to hold the returned data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_read(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_device_io_read);
+
+/*
+ * virtio_pci_admin_legacy_common_io_read - Read legacy common configuration of
+ * a member device
+ * @dev: VF pci_dev
+ * @offset: starting byte offset within the common configuration area to read from
+ * @size: size of the data to be read
+ * @buf: buffer to hold the returned data
+ *
+ * Note: caller must serialize access for the given device.
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf)
+{
+ return virtio_pci_admin_legacy_io_read(pdev,
+ VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ,
+ offset, size, buf);
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_common_io_read);
+
+/*
+ * virtio_pci_admin_legacy_io_notify_info - Read the queue notification
+ * information for legacy interface
+ * @dev: VF pci_dev
+ * @req_bar_flags: requested bar flags
+ * @bar: on output the BAR number of the owner or member device
+ * @bar_offset: on output the offset within bar
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
+ u8 req_bar_flags, u8 *bar,
+ u64 *bar_offset)
+{
+ struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
+ struct virtio_admin_cmd_notify_info_result *result;
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ int vf_id;
+ int ret;
+
+ if (!virtio_dev)
+ return -ENODEV;
+
+ vf_id = pci_iov_vf_id(pdev);
+ if (vf_id < 0)
+ return vf_id;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ sg_init_one(&result_sg, result, sizeof(*result));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_member_id = cpu_to_le64(vf_id + 1);
+ cmd.result_sg = &result_sg;
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (!ret) {
+ struct virtio_admin_cmd_notify_info_data *entry;
+ int i;
+
+ ret = -ENOENT;
+ for (i = 0; i < VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO; i++) {
+ entry = &result->entries[i];
+ if (entry->flags == VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END)
+ break;
+ if (entry->flags != req_bar_flags)
+ continue;
+ *bar = entry->bar;
+ *bar_offset = le64_to_cpu(entry->offset);
+ ret = 0;
+ break;
+ }
+ }
+
+ kfree(result);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_pci_admin_legacy_io_notify_info);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 7a5593997..b655fccaf 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -236,6 +236,9 @@ void vp_del_vqs(struct virtio_device *vdev)
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ if (vp_dev->is_avq(vdev, vq->index))
+ continue;
+
if (vp_dev->per_vq_vectors) {
int v = vp_dev->vqs[vq->index]->msix_vector;
@@ -492,8 +495,40 @@ static int virtio_pci_restore(struct device *dev)
return virtio_device_restore(&vp_dev->vdev);
}
+static bool vp_supports_pm_no_reset(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u16 pmcsr;
+
+ if (!pci_dev->pm_cap)
+ return false;
+
+ pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
+ dev_err(dev, "Unable to query pmcsr");
+ return false;
+ }
+
+ return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET;
+}
+
+static int virtio_pci_suspend(struct device *dev)
+{
+ return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev);
+}
+
+static int virtio_pci_resume(struct device *dev)
+{
+ return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev);
+}
+
static const struct dev_pm_ops virtio_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
+ .suspend = virtio_pci_suspend,
+ .resume = virtio_pci_resume,
+ .freeze = virtio_pci_freeze,
+ .thaw = virtio_pci_restore,
+ .poweroff = virtio_pci_freeze,
+ .restore = virtio_pci_restore,
};
#endif
@@ -642,6 +677,17 @@ static struct pci_driver virtio_pci_driver = {
.sriov_configure = virtio_pci_sriov_configure,
};
+struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
+{
+ struct virtio_pci_device *pf_vp_dev;
+
+ pf_vp_dev = pci_iov_get_pf_drvdata(pdev, &virtio_pci_driver);
+ if (IS_ERR(pf_vp_dev))
+ return NULL;
+
+ return &pf_vp_dev->vdev;
+}
+
module_pci_driver(virtio_pci_driver);
MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 4b773bd7c..7fef52bee 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -29,6 +29,7 @@
#include <linux/virtio_pci_modern.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
struct virtio_pci_vq_info {
/* the actual virtqueue */
@@ -41,6 +42,17 @@ struct virtio_pci_vq_info {
unsigned int msix_vector;
};
+struct virtio_pci_admin_vq {
+ /* Virtqueue info associated with this admin queue. */
+ struct virtio_pci_vq_info info;
+ /* serializing admin commands execution and virtqueue deletion */
+ struct mutex cmd_lock;
+ u64 supported_cmds;
+ /* Name of the admin queue: avq.$vq_index. */
+ char name[10];
+ u16 vq_index;
+};
+
/* Our device structure */
struct virtio_pci_device {
struct virtio_device vdev;
@@ -58,9 +70,13 @@ struct virtio_pci_device {
spinlock_t lock;
struct list_head virtqueues;
- /* array of all queues for house-keeping */
+ /* Array of all virtqueues reported in the
+ * PCI common config num_queues field
+ */
struct virtio_pci_vq_info **vqs;
+ struct virtio_pci_admin_vq admin_vq;
+
/* MSI-X support */
int msix_enabled;
int intx_enabled;
@@ -86,6 +102,7 @@ struct virtio_pci_device {
void (*del_vq)(struct virtio_pci_vq_info *info);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
+ bool (*is_avq)(struct virtio_device *vdev, unsigned int index);
};
/* Constants for MSI-X */
@@ -139,4 +156,27 @@ static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
int virtio_pci_modern_probe(struct virtio_pci_device *);
void virtio_pci_modern_remove(struct virtio_pci_device *);
+struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
+
+#define VIRTIO_LEGACY_ADMIN_CMD_BITMAP \
+ (BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \
+ BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO))
+
+/* Unlike modern drivers which support hardware virtio devices, legacy drivers
+ * assume software-based devices: e.g. they don't use proper memory barriers
+ * on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more
+ * or less by chance. For now, only support legacy IO on X86.
+ */
+#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
+#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_LEGACY_ADMIN_CMD_BITMAP
+#else
+#define VIRTIO_ADMIN_CMD_BITMAP 0
+#endif
+
+int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
+ struct virtio_admin_cmd *cmd);
+
#endif
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index ee6a386d2..f62b530aa 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -19,6 +19,8 @@
#define VIRTIO_RING_NO_LEGACY
#include "virtio_pci_common.h"
+#define VIRTIO_AVQ_SGS_MAX 4
+
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -26,6 +28,187 @@ static u64 vp_get_features(struct virtio_device *vdev)
return vp_modern_get_features(&vp_dev->mdev);
}
+static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return false;
+
+ return index == vp_dev->admin_vq.vq_index;
+}
+
+static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
+ u16 opcode,
+ struct scatterlist **sgs,
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data)
+{
+ struct virtqueue *vq;
+ int ret, len;
+
+ vq = admin_vq->info.vq;
+ if (!vq)
+ return -EIO;
+
+ if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY &&
+ opcode != VIRTIO_ADMIN_CMD_LIST_USE &&
+ !((1ULL << opcode) & admin_vq->supported_cmds))
+ return -EOPNOTSUPP;
+
+ ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL);
+ if (ret < 0)
+ return -EIO;
+
+ if (unlikely(!virtqueue_kick(vq)))
+ return -EIO;
+
+ while (!virtqueue_get_buf(vq, &len) &&
+ !virtqueue_is_broken(vq))
+ cpu_relax();
+
+ if (virtqueue_is_broken(vq))
+ return -EIO;
+
+ return 0;
+}
+
+int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
+ struct virtio_admin_cmd *cmd)
+{
+ struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat;
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_admin_cmd_status *va_status;
+ unsigned int out_num = 0, in_num = 0;
+ struct virtio_admin_cmd_hdr *va_hdr;
+ u16 status;
+ int ret;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return -EOPNOTSUPP;
+
+ va_status = kzalloc(sizeof(*va_status), GFP_KERNEL);
+ if (!va_status)
+ return -ENOMEM;
+
+ va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL);
+ if (!va_hdr) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ va_hdr->opcode = cmd->opcode;
+ va_hdr->group_type = cmd->group_type;
+ va_hdr->group_member_id = cmd->group_member_id;
+
+ /* Add header */
+ sg_init_one(&hdr, va_hdr, sizeof(*va_hdr));
+ sgs[out_num] = &hdr;
+ out_num++;
+
+ if (cmd->data_sg) {
+ sgs[out_num] = cmd->data_sg;
+ out_num++;
+ }
+
+ /* Add return status */
+ sg_init_one(&stat, va_status, sizeof(*va_status));
+ sgs[out_num + in_num] = &stat;
+ in_num++;
+
+ if (cmd->result_sg) {
+ sgs[out_num + in_num] = cmd->result_sg;
+ in_num++;
+ }
+
+ mutex_lock(&vp_dev->admin_vq.cmd_lock);
+ ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
+ le16_to_cpu(cmd->opcode),
+ sgs, out_num, in_num, sgs);
+ mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+
+ if (ret) {
+ dev_err(&vdev->dev,
+ "Failed to execute command on admin vq: %d\n.", ret);
+ goto err_cmd_exec;
+ }
+
+ status = le16_to_cpu(va_status->status);
+ if (status != VIRTIO_ADMIN_STATUS_OK) {
+ dev_err(&vdev->dev,
+ "admin command error: status(%#x) qualifier(%#x)\n",
+ status, le16_to_cpu(va_status->status_qualifier));
+ ret = -status;
+ }
+
+err_cmd_exec:
+ kfree(va_hdr);
+err_alloc:
+ kfree(va_status);
+ return ret;
+}
+
+static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
+ struct virtio_admin_cmd cmd = {};
+ struct scatterlist result_sg;
+ struct scatterlist data_sg;
+ __le64 *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ sg_init_one(&result_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.result_sg = &result_sg;
+
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto end;
+
+ *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP);
+ sg_init_one(&data_sg, data, sizeof(*data));
+ cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
+ cmd.data_sg = &data_sg;
+ cmd.result_sg = NULL;
+
+ ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
+ if (ret)
+ goto end;
+
+ vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
+end:
+ kfree(data);
+}
+
+static void vp_modern_avq_activate(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ __virtqueue_unbreak(admin_vq->info.vq);
+ virtio_pci_admin_cmd_list_init(vdev);
+}
+
+static void vp_modern_avq_deactivate(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ __virtqueue_break(admin_vq->info.vq);
+}
+
static void vp_transport_features(struct virtio_device *vdev, u64 features)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -37,6 +220,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
if (features & BIT_ULL(VIRTIO_F_RING_RESET))
__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
+
+ if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ))
+ __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ);
}
static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
@@ -69,6 +255,9 @@ static int vp_check_common_size(struct virtio_device *vdev)
if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
return -EINVAL;
+ if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num))
+ return -EINVAL;
+
return 0;
}
@@ -195,6 +384,8 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
/* We should never be setting status to 0. */
BUG_ON(status == 0);
vp_modern_set_status(&vp_dev->mdev, status);
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+ vp_modern_avq_activate(vdev);
}
static void vp_reset(struct virtio_device *vdev)
@@ -211,6 +402,9 @@ static void vp_reset(struct virtio_device *vdev)
*/
while (vp_modern_get_status(mdev))
msleep(1);
+
+ vp_modern_avq_deactivate(vdev);
+
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -345,6 +539,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
bool (*notify)(struct virtqueue *vq);
struct virtqueue *vq;
+ bool is_avq;
u16 num;
int err;
@@ -353,11 +548,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
else
notify = vp_notify;
- if (index >= vp_modern_get_num_queues(mdev))
+ is_avq = vp_is_avq(&vp_dev->vdev, index);
+ if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
return ERR_PTR(-EINVAL);
+ num = is_avq ?
+ VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
/* Check if queue is either not available or already active. */
- num = vp_modern_get_queue_size(mdev, index);
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
@@ -383,6 +580,12 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
goto err;
}
+ if (is_avq) {
+ mutex_lock(&vp_dev->admin_vq.cmd_lock);
+ vp_dev->admin_vq.info.vq = vq;
+ mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+ }
+
return vq;
err:
@@ -418,6 +621,12 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ if (vp_is_avq(&vp_dev->vdev, vq->index)) {
+ mutex_lock(&vp_dev->admin_vq.cmd_lock);
+ vp_dev->admin_vq.info.vq = NULL;
+ mutex_unlock(&vp_dev->admin_vq.cmd_lock);
+ }
+
if (vp_dev->msix_enabled)
vp_modern_queue_vector(mdev, vq->index,
VIRTIO_MSI_NO_VECTOR);
@@ -527,6 +736,45 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
return true;
}
+static int vp_modern_create_avq(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq;
+ struct virtqueue *vq;
+ u16 admin_q_num;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return 0;
+
+ admin_q_num = vp_modern_avq_num(&vp_dev->mdev);
+ if (!admin_q_num)
+ return -EINVAL;
+
+ avq = &vp_dev->admin_vq;
+ avq->vq_index = vp_modern_avq_index(&vp_dev->mdev);
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
+ avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
+ if (IS_ERR(vq)) {
+ dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld",
+ PTR_ERR(vq));
+ return PTR_ERR(vq);
+ }
+
+ vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true);
+ return 0;
+}
+
+static void vp_modern_destroy_avq(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ vp_dev->del_vq(&vp_dev->admin_vq.info);
+}
+
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL,
.set = NULL,
@@ -545,6 +793,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
+ .create_avq = vp_modern_create_avq,
+ .destroy_avq = vp_modern_destroy_avq,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -565,6 +815,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
+ .create_avq = vp_modern_create_avq,
+ .destroy_avq = vp_modern_destroy_avq,
};
/* the PCI probing function */
@@ -588,9 +840,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
+ vp_dev->is_avq = vp_is_avq;
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
+ mutex_init(&vp_dev->admin_vq.cmd_lock);
return 0;
}
@@ -598,5 +852,6 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+ mutex_destroy(&vp_dev->admin_vq.cmd_lock);
vp_modern_remove(mdev);
}
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index 7de8b1eba..0d3dbfaf4 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -207,6 +207,10 @@ static inline void check_offsets(void)
offsetof(struct virtio_pci_modern_common_cfg, queue_notify_data));
BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_RESET !=
offsetof(struct virtio_pci_modern_common_cfg, queue_reset));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_ADM_Q_IDX !=
+ offsetof(struct virtio_pci_modern_common_cfg, admin_queue_index));
+ BUILD_BUG_ON(VIRTIO_PCI_COMMON_ADM_Q_NUM !=
+ offsetof(struct virtio_pci_modern_common_cfg, admin_queue_num));
}
/*
@@ -296,7 +300,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4, 0,
offsetofend(struct virtio_pci_modern_common_cfg,
- queue_reset),
+ admin_queue_num),
&mdev->common_len, NULL);
if (!mdev->common)
goto err_map_common;
@@ -719,6 +723,24 @@ void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
}
EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
+u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+ return vp_ioread16(&cfg->admin_queue_num);
+}
+EXPORT_SYMBOL_GPL(vp_modern_avq_num);
+
+u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev)
+{
+ struct virtio_pci_modern_common_cfg __iomem *cfg;
+
+ cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
+ return vp_ioread16(&cfg->admin_queue_index);
+}
+EXPORT_SYMBOL_GPL(vp_modern_avq_index);
+
MODULE_VERSION("0.1");
MODULE_DESCRIPTION("Modern Virtio PCI Device");
MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index ad3165732..513c0b114 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -5,6 +5,17 @@
menu "1-wire Bus Masters"
+config W1_MASTER_AMD_AXI
+ tristate "AMD AXI 1-wire bus host"
+ help
+ Say Y here is you want to support the AMD AXI 1-wire IP core.
+ This driver makes use of the programmable logic IP to perform
+ correctly timed 1 wire transactions without relying on GPIO timing
+ through the kernel.
+
+ This driver can also be built as a module. If so, the module will be
+ called amd_w1_axi.
+
config W1_MASTER_MATROX
tristate "Matrox G400 transport layer for 1-wire"
depends on PCI
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index c5d85a827..6c5a21f9b 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -3,6 +3,7 @@
# Makefile for 1-wire bus master drivers.
#
+obj-$(CONFIG_W1_MASTER_AMD_AXI) += amd_axi_w1.o
obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o
obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o
diff --git a/drivers/w1/masters/amd_axi_w1.c b/drivers/w1/masters/amd_axi_w1.c
new file mode 100644
index 000000000..4d3a68ca9
--- /dev/null
+++ b/drivers/w1/masters/amd_axi_w1.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * amd_axi_w1 - AMD 1Wire programmable logic bus host driver
+ *
+ * Copyright (C) 2022-2023 Advanced Micro Devices, Inc. All Rights Reserved.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <linux/w1.h>
+
+/* 1-wire AMD IP definition */
+#define AXIW1_IPID 0x10ee4453
+/* Registers offset */
+#define AXIW1_INST_REG 0x0
+#define AXIW1_CTRL_REG 0x4
+#define AXIW1_IRQE_REG 0x8
+#define AXIW1_STAT_REG 0xC
+#define AXIW1_DATA_REG 0x10
+#define AXIW1_IPVER_REG 0x18
+#define AXIW1_IPID_REG 0x1C
+/* Instructions */
+#define AXIW1_INITPRES 0x0800
+#define AXIW1_READBIT 0x0C00
+#define AXIW1_WRITEBIT 0x0E00
+#define AXIW1_READBYTE 0x0D00
+#define AXIW1_WRITEBYTE 0x0F00
+/* Status flag masks */
+#define AXIW1_DONE BIT(0)
+#define AXIW1_READY BIT(4)
+#define AXIW1_PRESENCE BIT(31)
+#define AXIW1_MAJORVER_MASK GENMASK(23, 8)
+#define AXIW1_MINORVER_MASK GENMASK(7, 0)
+/* Control flag */
+#define AXIW1_GO BIT(0)
+#define AXI_CLEAR 0
+#define AXI_RESET BIT(31)
+#define AXIW1_READDATA BIT(0)
+/* Interrupt Enable */
+#define AXIW1_READY_IRQ_EN BIT(4)
+#define AXIW1_DONE_IRQ_EN BIT(0)
+
+#define AXIW1_TIMEOUT msecs_to_jiffies(100)
+
+#define DRIVER_NAME "amd_axi_w1"
+
+struct amd_axi_w1_local {
+ struct device *dev;
+ void __iomem *base_addr;
+ int irq;
+ atomic_t flag; /* Set on IRQ, cleared once serviced */
+ wait_queue_head_t wait_queue;
+ struct w1_bus_master bus_host;
+};
+
+/**
+ * amd_axi_w1_wait_irq_interruptible_timeout() - Wait for IRQ with timeout.
+ *
+ * @amd_axi_w1_local: Pointer to device structure
+ * @IRQ: IRQ channel to wait on
+ *
+ * Return: %0 - OK, %-EINTR - Interrupted, %-EBUSY - Timed out
+ */
+static int amd_axi_w1_wait_irq_interruptible_timeout(struct amd_axi_w1_local *amd_axi_w1_local,
+ u32 IRQ)
+{
+ int ret;
+
+ /* Enable the IRQ requested and wait for flag to indicate it's been triggered */
+ iowrite32(IRQ, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG);
+ ret = wait_event_interruptible_timeout(amd_axi_w1_local->wait_queue,
+ atomic_read(&amd_axi_w1_local->flag) != 0,
+ AXIW1_TIMEOUT);
+ if (ret < 0) {
+ dev_err(amd_axi_w1_local->dev, "Wait IRQ Interrupted\n");
+ return -EINTR;
+ }
+
+ if (!ret) {
+ dev_err(amd_axi_w1_local->dev, "Wait IRQ Timeout\n");
+ return -EBUSY;
+ }
+
+ atomic_set(&amd_axi_w1_local->flag, 0);
+ return 0;
+}
+
+/**
+ * amd_axi_w1_touch_bit() - Performs the touch-bit function - write a 0 or 1 and reads the level.
+ *
+ * @data: Pointer to device structure
+ * @bit: The level to write
+ *
+ * Return: The level read
+ */
+static u8 amd_axi_w1_touch_bit(void *data, u8 bit)
+{
+ struct amd_axi_w1_local *amd_axi_w1_local = data;
+ u8 val = 0;
+ int rc;
+
+ /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */
+ while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) {
+ rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+ AXIW1_READY_IRQ_EN);
+ if (rc < 0)
+ return 1; /* Callee doesn't test for error. Return inactive bus state */
+ }
+
+ if (bit)
+ /* Read. Write read Bit command in register 0 */
+ iowrite32(AXIW1_READBIT, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+ else
+ /* Write. Write tx Bit command in instruction register with bit to transmit */
+ iowrite32(AXIW1_WRITEBIT + (bit & 0x01),
+ amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+
+ /* Write Go signal and clear control reset signal in control register */
+ iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+ /* Wait for done signal to be 1 */
+ while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) {
+ rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN);
+ if (rc < 0)
+ return 1; /* Callee doesn't test for error. Return inactive bus state */
+ }
+
+ /* If read, Retrieve data from register */
+ if (bit)
+ val = (u8)(ioread32(amd_axi_w1_local->base_addr + AXIW1_DATA_REG) & AXIW1_READDATA);
+
+ /* Clear Go signal in register 1 */
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+ return val;
+}
+
+/**
+ * amd_axi_w1_read_byte - Performs the read byte function.
+ *
+ * @data: Pointer to device structure
+ * Return: The value read
+ */
+static u8 amd_axi_w1_read_byte(void *data)
+{
+ struct amd_axi_w1_local *amd_axi_w1_local = data;
+ u8 val = 0;
+ int rc;
+
+ /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */
+ while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) {
+ rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+ AXIW1_READY_IRQ_EN);
+ if (rc < 0)
+ return 0xFF; /* Return inactive bus state */
+ }
+
+ /* Write read Byte command in instruction register*/
+ iowrite32(AXIW1_READBYTE, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+
+ /* Write Go signal and clear control reset signal in control register */
+ iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+ /* Wait for done signal to be 1 */
+ while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) {
+ rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN);
+ if (rc < 0)
+ return 0xFF; /* Return inactive bus state */
+ }
+
+ /* Retrieve LSB bit in data register to get RX byte */
+ val = (u8)(ioread32(amd_axi_w1_local->base_addr + AXIW1_DATA_REG) & 0x000000FF);
+
+ /* Clear Go signal in control register */
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+ return val;
+}
+
+/**
+ * amd_axi_w1_write_byte - Performs the write byte function.
+ *
+ * @data: The ds2482 channel pointer
+ * @val: The value to write
+ */
+static void amd_axi_w1_write_byte(void *data, u8 val)
+{
+ struct amd_axi_w1_local *amd_axi_w1_local = data;
+ int rc;
+
+ /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */
+ while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) {
+ rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+ AXIW1_READY_IRQ_EN);
+ if (rc < 0)
+ return;
+ }
+
+ /* Write tx Byte command in instruction register with bit to transmit */
+ iowrite32(AXIW1_WRITEBYTE + val, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+
+ /* Write Go signal and clear control reset signal in register 1 */
+ iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+ /* Wait for done signal to be 1 */
+ while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) {
+ rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+ AXIW1_DONE_IRQ_EN);
+ if (rc < 0)
+ return;
+ }
+
+ /* Clear Go signal in control register */
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+}
+
+/**
+ * amd_axi_w1_reset_bus() - Issues a reset bus sequence.
+ *
+ * @data: the bus host data struct
+ * Return: 0=Device present, 1=No device present or error
+ */
+static u8 amd_axi_w1_reset_bus(void *data)
+{
+ struct amd_axi_w1_local *amd_axi_w1_local = data;
+ u8 val = 0;
+ int rc;
+
+ /* Reset 1-wire Axi IP */
+ iowrite32(AXI_RESET, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+ /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */
+ while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) {
+ rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local,
+ AXIW1_READY_IRQ_EN);
+ if (rc < 0)
+ return 1; /* Something went wrong with the hardware */
+ }
+ /* Write Initialization command in instruction register */
+ iowrite32(AXIW1_INITPRES, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+
+ /* Write Go signal and clear control reset signal in register 1 */
+ iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+ /* Wait for done signal to be 1 */
+ while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) {
+ rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN);
+ if (rc < 0)
+ return 1; /* Something went wrong with the hardware */
+ }
+ /* Retrieve MSB bit in status register to get failure bit */
+ if ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_PRESENCE) != 0)
+ val = 1;
+
+ /* Clear Go signal in control register */
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+
+ return val;
+}
+
+/* Reset the 1-wire AXI IP. Put the IP in reset state and clear registers */
+static void amd_axi_w1_reset(struct amd_axi_w1_local *amd_axi_w1_local)
+{
+ iowrite32(AXI_RESET, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG);
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_INST_REG);
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG);
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_STAT_REG);
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_DATA_REG);
+}
+
+static irqreturn_t amd_axi_w1_irq(int irq, void *lp)
+{
+ struct amd_axi_w1_local *amd_axi_w1_local = lp;
+
+ /* Reset interrupt trigger */
+ iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG);
+
+ atomic_set(&amd_axi_w1_local->flag, 1);
+ wake_up_interruptible(&amd_axi_w1_local->wait_queue);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_axi_w1_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_axi_w1_local *lp;
+ struct clk *clk;
+ u32 ver_major, ver_minor;
+ int val, rc = 0;
+
+ lp = devm_kzalloc(dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ lp->dev = dev;
+ lp->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ lp->irq = platform_get_irq(pdev, 0);
+ if (lp->irq < 0)
+ return lp->irq;
+
+ rc = devm_request_irq(dev, lp->irq, &amd_axi_w1_irq, IRQF_TRIGGER_HIGH, DRIVER_NAME, lp);
+ if (rc)
+ return rc;
+
+ /* Initialize wait queue and flag */
+ init_waitqueue_head(&lp->wait_queue);
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ /* Verify IP presence in HW */
+ if (ioread32(lp->base_addr + AXIW1_IPID_REG) != AXIW1_IPID) {
+ dev_err(dev, "AMD 1-wire IP not detected in hardware\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Allow for future driver expansion supporting new hardware features
+ * This driver currently only supports hardware 1.x, but include logic
+ * to detect if a potentially incompatible future version is used
+ * by reading major version ID. It is highly undesirable for new IP versions
+ * to break the API, but this code will at least allow for graceful failure
+ * should that happen. Future new features can be enabled by hardware
+ * incrementing the minor version and augmenting the driver to detect capability
+ * using the minor version number
+ */
+ val = ioread32(lp->base_addr + AXIW1_IPVER_REG);
+ ver_major = FIELD_GET(AXIW1_MAJORVER_MASK, val);
+ ver_minor = FIELD_GET(AXIW1_MINORVER_MASK, val);
+
+ if (ver_major != 1) {
+ dev_err(dev, "AMD AXI W1 host version %u.%u is not supported by this driver",
+ ver_major, ver_minor);
+ return -ENODEV;
+ }
+
+ lp->bus_host.data = lp;
+ lp->bus_host.touch_bit = amd_axi_w1_touch_bit;
+ lp->bus_host.read_byte = amd_axi_w1_read_byte;
+ lp->bus_host.write_byte = amd_axi_w1_write_byte;
+ lp->bus_host.reset_bus = amd_axi_w1_reset_bus;
+
+ amd_axi_w1_reset(lp);
+
+ platform_set_drvdata(pdev, lp);
+ rc = w1_add_master_device(&lp->bus_host);
+ if (rc) {
+ dev_err(dev, "Could not add host device\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void amd_axi_w1_remove(struct platform_device *pdev)
+{
+ struct amd_axi_w1_local *lp = platform_get_drvdata(pdev);
+
+ w1_remove_master_device(&lp->bus_host);
+}
+
+static const struct of_device_id amd_axi_w1_of_match[] = {
+ { .compatible = "amd,axi-1wire-host" },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, amd_axi_w1_of_match);
+
+static struct platform_driver amd_axi_w1_driver = {
+ .probe = amd_axi_w1_probe,
+ .remove_new = amd_axi_w1_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = amd_axi_w1_of_match,
+ },
+};
+module_platform_driver(amd_axi_w1_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kris Chaplin <kris.chaplin@amd.com>");
+MODULE_DESCRIPTION("Driver for AMD AXI 1 Wire IP core");
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 5f5b97e24..e1cac0730 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -98,6 +98,8 @@
#define ST_EPOF 0x80
/* Status transfer size, 16 bytes status, 16 byte result flags */
#define ST_SIZE 0x20
+/* 1-wire data i/o fifo size, 128 bytes */
+#define FIFO_SIZE 0x80
/* Result Register flags */
#define RR_DETECT 0xA5 /* New device detected */
@@ -614,14 +616,11 @@ static int ds_read_byte(struct ds_device *dev, u8 *byte)
return 0;
}
-static int ds_read_block(struct ds_device *dev, u8 *buf, int len)
+static int read_block_chunk(struct ds_device *dev, u8 *buf, int len)
{
struct ds_status st;
int err;
- if (len > 64*1024)
- return -E2BIG;
-
memset(buf, 0xFF, len);
err = ds_send_data(dev, buf, len);
@@ -640,6 +639,24 @@ static int ds_read_block(struct ds_device *dev, u8 *buf, int len)
return err;
}
+static int ds_read_block(struct ds_device *dev, u8 *buf, int len)
+{
+ int err, to_read, rem = len;
+
+ if (len > 64 * 1024)
+ return -E2BIG;
+
+ do {
+ to_read = rem <= FIFO_SIZE ? rem : FIFO_SIZE;
+ err = read_block_chunk(dev, &buf[len - rem], to_read);
+ if (err < 0)
+ return err;
+ rem -= to_read;
+ } while (rem);
+
+ return err;
+}
+
static int ds_write_block(struct ds_device *dev, u8 *buf, int len)
{
int err;
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index e45acb6d9..05c67038e 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/w1-gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
#include <linux/err.h>
@@ -18,27 +17,33 @@
#include <linux/w1.h>
+struct w1_gpio_ddata {
+ struct gpio_desc *gpiod;
+ struct gpio_desc *pullup_gpiod;
+ unsigned int pullup_duration;
+};
+
static u8 w1_gpio_set_pullup(void *data, int delay)
{
- struct w1_gpio_platform_data *pdata = data;
+ struct w1_gpio_ddata *ddata = data;
if (delay) {
- pdata->pullup_duration = delay;
+ ddata->pullup_duration = delay;
} else {
- if (pdata->pullup_duration) {
+ if (ddata->pullup_duration) {
/*
* This will OVERRIDE open drain emulation and force-pull
* the line high for some time.
*/
- gpiod_set_raw_value(pdata->gpiod, 1);
- msleep(pdata->pullup_duration);
+ gpiod_set_raw_value(ddata->gpiod, 1);
+ msleep(ddata->pullup_duration);
/*
* This will simply set the line as input since we are doing
* open drain emulation in the GPIO library.
*/
- gpiod_set_value(pdata->gpiod, 1);
+ gpiod_set_value(ddata->gpiod, 1);
}
- pdata->pullup_duration = 0;
+ ddata->pullup_duration = 0;
}
return 0;
@@ -46,16 +51,16 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
static void w1_gpio_write_bit(void *data, u8 bit)
{
- struct w1_gpio_platform_data *pdata = data;
+ struct w1_gpio_ddata *ddata = data;
- gpiod_set_value(pdata->gpiod, bit);
+ gpiod_set_value(ddata->gpiod, bit);
}
static u8 w1_gpio_read_bit(void *data)
{
- struct w1_gpio_platform_data *pdata = data;
+ struct w1_gpio_ddata *ddata = data;
- return gpiod_get_value(pdata->gpiod) ? 1 : 0;
+ return gpiod_get_value(ddata->gpiod) ? 1 : 0;
}
#if defined(CONFIG_OF)
@@ -69,58 +74,48 @@ MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
- struct w1_gpio_platform_data *pdata;
+ struct w1_gpio_ddata *ddata;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
/* Enforce open drain mode by default */
enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
int err;
- if (of_have_populated_dt()) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- /*
- * This parameter means that something else than the gpiolib has
- * already set the line into open drain mode, so we should just
- * driver it high/low like we are in full control of the line and
- * open drain will happen transparently.
- */
- if (of_property_present(np, "linux,open-drain"))
- gflags = GPIOD_OUT_LOW;
-
- pdev->dev.platform_data = pdata;
- }
- pdata = dev_get_platdata(dev);
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
- if (!pdata) {
- dev_err(dev, "No configuration data\n");
- return -ENXIO;
- }
+ /*
+ * This parameter means that something else than the gpiolib has
+ * already set the line into open drain mode, so we should just
+ * driver it high/low like we are in full control of the line and
+ * open drain will happen transparently.
+ */
+ if (of_property_present(np, "linux,open-drain"))
+ gflags = GPIOD_OUT_LOW;
master = devm_kzalloc(dev, sizeof(struct w1_bus_master),
GFP_KERNEL);
if (!master)
return -ENOMEM;
- pdata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
- if (IS_ERR(pdata->gpiod)) {
+ ddata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
+ if (IS_ERR(ddata->gpiod)) {
dev_err(dev, "gpio_request (pin) failed\n");
- return PTR_ERR(pdata->gpiod);
+ return PTR_ERR(ddata->gpiod);
}
- pdata->pullup_gpiod =
+ ddata->pullup_gpiod =
devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_LOW);
- if (IS_ERR(pdata->pullup_gpiod)) {
+ if (IS_ERR(ddata->pullup_gpiod)) {
dev_err(dev, "gpio_request_one "
"(ext_pullup_enable_pin) failed\n");
- return PTR_ERR(pdata->pullup_gpiod);
+ return PTR_ERR(ddata->pullup_gpiod);
}
- master->data = pdata;
+ master->data = ddata;
master->read_bit = w1_gpio_read_bit;
- gpiod_direction_output(pdata->gpiod, 1);
+ gpiod_direction_output(ddata->gpiod, 1);
master->write_bit = w1_gpio_write_bit;
/*
@@ -138,11 +133,8 @@ static int w1_gpio_probe(struct platform_device *pdev)
return err;
}
- if (pdata->enable_external_pullup)
- pdata->enable_external_pullup(1);
-
- if (pdata->pullup_gpiod)
- gpiod_set_value(pdata->pullup_gpiod, 1);
+ if (ddata->pullup_gpiod)
+ gpiod_set_value(ddata->pullup_gpiod, 1);
platform_set_drvdata(pdev, master);
@@ -152,45 +144,19 @@ static int w1_gpio_probe(struct platform_device *pdev)
static int w1_gpio_remove(struct platform_device *pdev)
{
struct w1_bus_master *master = platform_get_drvdata(pdev);
- struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
-
- if (pdata->enable_external_pullup)
- pdata->enable_external_pullup(0);
+ struct w1_gpio_ddata *ddata = master->data;
- if (pdata->pullup_gpiod)
- gpiod_set_value(pdata->pullup_gpiod, 0);
+ if (ddata->pullup_gpiod)
+ gpiod_set_value(ddata->pullup_gpiod, 0);
w1_remove_master_device(master);
return 0;
}
-static int __maybe_unused w1_gpio_suspend(struct device *dev)
-{
- struct w1_gpio_platform_data *pdata = dev_get_platdata(dev);
-
- if (pdata->enable_external_pullup)
- pdata->enable_external_pullup(0);
-
- return 0;
-}
-
-static int __maybe_unused w1_gpio_resume(struct device *dev)
-{
- struct w1_gpio_platform_data *pdata = dev_get_platdata(dev);
-
- if (pdata->enable_external_pullup)
- pdata->enable_external_pullup(1);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(w1_gpio_pm_ops, w1_gpio_suspend, w1_gpio_resume);
-
static struct platform_driver w1_gpio_driver = {
.driver = {
.name = "w1-gpio",
- .pm = &w1_gpio_pm_ops,
.of_match_table = of_match_ptr(w1_gpio_dt_ids),
},
.probe = w1_gpio_probe,
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 9f21fd98f..250b7f7ec 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * w1_ds2433.c - w1 family 23 (DS2433) driver
+ * w1_ds2433.c - w1 family 23 (DS2433) & 43 (DS28EC20) eeprom driver
*
* Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
+ * Copyright (c) 2023 Marc Ferland <marc.ferland@sonatest.com>
*/
#include <linux/kernel.h>
@@ -23,23 +24,45 @@
#include <linux/w1.h>
#define W1_EEPROM_DS2433 0x23
+#define W1_EEPROM_DS28EC20 0x43
+
+#define W1_EEPROM_DS2433_SIZE 512
+#define W1_EEPROM_DS28EC20_SIZE 2560
-#define W1_EEPROM_SIZE 512
-#define W1_PAGE_COUNT 16
#define W1_PAGE_SIZE 32
#define W1_PAGE_BITS 5
#define W1_PAGE_MASK 0x1F
-
-#define W1_F23_TIME 300
+#define W1_VALIDCRC_MAX 96
#define W1_F23_READ_EEPROM 0xF0
#define W1_F23_WRITE_SCRATCH 0x0F
#define W1_F23_READ_SCRATCH 0xAA
#define W1_F23_COPY_SCRATCH 0x55
+struct ds2433_config {
+ size_t eeprom_size; /* eeprom size in bytes */
+ unsigned int page_count; /* number of 256 bits pages */
+ unsigned int tprog; /* time in ms for page programming */
+};
+
+static const struct ds2433_config config_f23 = {
+ .eeprom_size = W1_EEPROM_DS2433_SIZE,
+ .page_count = 16,
+ .tprog = 5,
+};
+
+static const struct ds2433_config config_f43 = {
+ .eeprom_size = W1_EEPROM_DS28EC20_SIZE,
+ .page_count = 80,
+ .tprog = 10,
+};
+
struct w1_f23_data {
- u8 memory[W1_EEPROM_SIZE];
- u32 validcrc;
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
+ u8 *memory;
+ DECLARE_BITMAP(validcrc, W1_VALIDCRC_MAX);
+#endif
+ const struct ds2433_config *cfg;
};
/*
@@ -64,11 +87,11 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
u8 wrbuf[3];
int off = block * W1_PAGE_SIZE;
- if (data->validcrc & (1 << block))
+ if (test_bit(block, data->validcrc))
return 0;
if (w1_reset_select_slave(sl)) {
- data->validcrc = 0;
+ bitmap_zero(data->validcrc, data->cfg->page_count);
return -EIO;
}
@@ -80,7 +103,7 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
/* cache the block if the CRC is valid */
if (crc16(CRC16_INIT, &data->memory[off], W1_PAGE_SIZE) == CRC16_VALID)
- data->validcrc |= (1 << block);
+ set_bit(block, data->validcrc);
return 0;
}
@@ -98,7 +121,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
u8 wrbuf[3];
#endif
- count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE);
+ count = w1_f23_fix_count(off, count, bin_attr->size);
if (!count)
return 0;
@@ -153,9 +176,7 @@ out_up:
*/
static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
{
-#ifdef CONFIG_W1_SLAVE_DS2433_CRC
struct w1_f23_data *f23 = sl->family_data;
-#endif
u8 wrbuf[4];
u8 rdbuf[W1_PAGE_SIZE + 3];
u8 es = (addr + len - 1) & 0x1f;
@@ -191,13 +212,13 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
wrbuf[3] = es;
w1_write_block(sl->master, wrbuf, 4);
- /* Sleep for 5 ms to wait for the write to complete */
- msleep(5);
+ /* Sleep for tprog ms to wait for the write to complete */
+ msleep(f23->cfg->tprog);
/* Reset the bus to wake up the EEPROM (this may not be needed) */
w1_reset_bus(sl->master);
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
- f23->validcrc &= ~(1 << (addr >> W1_PAGE_BITS));
+ clear_bit(addr >> W1_PAGE_BITS, f23->validcrc);
#endif
return 0;
}
@@ -209,7 +230,7 @@ static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len, idx;
- count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE);
+ count = w1_f23_fix_count(off, count, bin_attr->size);
if (!count)
return 0;
@@ -253,10 +274,22 @@ out_up:
return count;
}
-static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
+static struct bin_attribute bin_attr_f23_eeprom = {
+ .attr = { .name = "eeprom", .mode = 0644 },
+ .read = eeprom_read,
+ .write = eeprom_write,
+ .size = W1_EEPROM_DS2433_SIZE,
+};
+
+static struct bin_attribute bin_attr_f43_eeprom = {
+ .attr = { .name = "eeprom", .mode = 0644 },
+ .read = eeprom_read,
+ .write = eeprom_write,
+ .size = W1_EEPROM_DS28EC20_SIZE,
+};
static struct bin_attribute *w1_f23_bin_attributes[] = {
- &bin_attr_eeprom,
+ &bin_attr_f23_eeprom,
NULL,
};
@@ -269,26 +302,63 @@ static const struct attribute_group *w1_f23_groups[] = {
NULL,
};
+static struct bin_attribute *w1_f43_bin_attributes[] = {
+ &bin_attr_f43_eeprom,
+ NULL,
+};
+
+static const struct attribute_group w1_f43_group = {
+ .bin_attrs = w1_f43_bin_attributes,
+};
+
+static const struct attribute_group *w1_f43_groups[] = {
+ &w1_f43_group,
+ NULL,
+};
+
static int w1_f23_add_slave(struct w1_slave *sl)
{
-#ifdef CONFIG_W1_SLAVE_DS2433_CRC
struct w1_f23_data *data;
data = kzalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+
+ switch (sl->family->fid) {
+ case W1_EEPROM_DS2433:
+ data->cfg = &config_f23;
+ break;
+ case W1_EEPROM_DS28EC20:
+ data->cfg = &config_f43;
+ break;
+ }
+
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
+ if (data->cfg->page_count > W1_VALIDCRC_MAX) {
+ dev_err(&sl->dev, "page count too big for crc bitmap\n");
+ kfree(data);
+ return -EINVAL;
+ }
+ data->memory = kzalloc(data->cfg->eeprom_size, GFP_KERNEL);
+ if (!data->memory) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ bitmap_zero(data->validcrc, data->cfg->page_count);
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
sl->family_data = data;
-#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
return 0;
}
static void w1_f23_remove_slave(struct w1_slave *sl)
{
-#ifdef CONFIG_W1_SLAVE_DS2433_CRC
- kfree(sl->family_data);
+ struct w1_f23_data *data = sl->family_data;
sl->family_data = NULL;
-#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
+ kfree(data->memory);
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
+ kfree(data);
}
static const struct w1_family_ops w1_f23_fops = {
@@ -297,13 +367,53 @@ static const struct w1_family_ops w1_f23_fops = {
.groups = w1_f23_groups,
};
+static const struct w1_family_ops w1_f43_fops = {
+ .add_slave = w1_f23_add_slave,
+ .remove_slave = w1_f23_remove_slave,
+ .groups = w1_f43_groups,
+};
+
static struct w1_family w1_family_23 = {
.fid = W1_EEPROM_DS2433,
.fops = &w1_f23_fops,
};
-module_w1_family(w1_family_23);
+
+static struct w1_family w1_family_43 = {
+ .fid = W1_EEPROM_DS28EC20,
+ .fops = &w1_f43_fops,
+};
+
+static int __init w1_ds2433_init(void)
+{
+ int err;
+
+ err = w1_register_family(&w1_family_23);
+ if (err)
+ return err;
+
+ err = w1_register_family(&w1_family_43);
+ if (err)
+ goto err_43;
+
+ return 0;
+
+err_43:
+ w1_unregister_family(&w1_family_23);
+ return err;
+}
+
+static void __exit w1_ds2433_exit(void)
+{
+ w1_unregister_family(&w1_family_23);
+ w1_unregister_family(&w1_family_43);
+}
+
+module_init(w1_ds2433_init);
+module_exit(w1_ds2433_exit);
MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
-MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM");
+MODULE_AUTHOR("Marc Ferland <marc.ferland@sonatest.com>");
+MODULE_DESCRIPTION("w1 family 23/43 driver for DS2433 (4kb) and DS28EC20 (20kb)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2433));
+MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS28EC20));
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index b111b28ac..2c6474cb8 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -324,7 +324,7 @@ static inline int of_at91wdt_init(struct device_node *np, struct at91wdt *wdt)
}
#endif
-static int __init at91wdt_probe(struct platform_device *pdev)
+static int at91wdt_probe(struct platform_device *pdev)
{
int err;
struct at91wdt *wdt;
@@ -372,15 +372,13 @@ static int __init at91wdt_probe(struct platform_device *pdev)
return 0;
}
-static int __exit at91wdt_remove(struct platform_device *pdev)
+static void at91wdt_remove(struct platform_device *pdev)
{
struct at91wdt *wdt = platform_get_drvdata(pdev);
watchdog_unregister_device(&wdt->wdd);
pr_warn("I quit now, hardware will probably reboot!\n");
del_timer(&wdt->timer);
-
- return 0;
}
#if defined(CONFIG_OF)
@@ -393,14 +391,14 @@ MODULE_DEVICE_TABLE(of, at91_wdt_dt_ids);
#endif
static struct platform_driver at91wdt_driver = {
- .remove = __exit_p(at91wdt_remove),
+ .probe = at91wdt_probe,
+ .remove_new = at91wdt_remove,
.driver = {
.name = "at91_wdt",
.of_match_table = of_match_ptr(at91_wdt_dt_ids),
},
};
-
-module_platform_driver_probe(at91wdt_driver, at91wdt_probe);
+module_platform_driver(at91wdt_driver);
MODULE_AUTHOR("Renaud CERRATO <r.cerrato@til-technologies.fr>");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT91SAM9x processors");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 79ed1626d..138dc8d8c 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -33,7 +33,6 @@
#define DEFAULT_MARGIN 30
#define PRETIMEOUT_SEC 9
-static bool ilo5;
static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
static bool nowayout = WATCHDOG_NOWAYOUT;
static bool pretimeout = IS_ENABLED(CONFIG_HPWDT_NMI_DECODING);
@@ -181,9 +180,6 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
if (ulReason == NMI_UNKNOWN && !mynmi)
return NMI_DONE;
- if (ilo5 && !pretimeout && !mynmi)
- return NMI_DONE;
-
if (kdumptimeout < 0)
hpwdt_stop();
else if (kdumptimeout == 0)
@@ -363,9 +359,6 @@ static int hpwdt_init_one(struct pci_dev *dev,
pretimeout ? "on" : "off");
dev_info(&dev->dev, "kdumptimeout: %d.\n", kdumptimeout);
- if (dev->subsystem_vendor == PCI_VENDOR_ID_HP_3PAR)
- ilo5 = true;
-
return 0;
error_wd_register:
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index 8c1ee072f..9297a5891 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -13,9 +13,9 @@
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
- * IT8607, IT8613, IT8620, IT8622, IT8625, IT8628, IT8655, IT8665,
- * IT8686, IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726,
- * IT8728, IT8772, IT8783 and IT8784.
+ * IT8607, IT8613, IT8620, IT8622, IT8625, IT8628, IT8655, IT8659,
+ * IT8665, IT8686, IT8702, IT8712, IT8716, IT8718, IT8720, IT8721,
+ * IT8726, IT8728, IT8772, IT8783, IT8784 and IT8786.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -56,6 +56,7 @@
#define IT8625_ID 0x8625
#define IT8628_ID 0x8628
#define IT8655_ID 0x8655
+#define IT8659_ID 0x8659
#define IT8665_ID 0x8665
#define IT8686_ID 0x8686
#define IT8702_ID 0x8702
@@ -146,6 +147,7 @@ static inline void superio_outb(int val, int reg)
static inline int superio_inw(int reg)
{
int val;
+
outb(reg++, REG);
val = inb(VAL) << 8;
outb(reg, REG);
@@ -274,10 +276,6 @@ static int __init it87_wdt_init(void)
case IT8712_ID:
max_units = (chip_rev < 8) ? 255 : 65535;
break;
- case IT8716_ID:
- case IT8726_ID:
- max_units = 65535;
- break;
case IT8607_ID:
case IT8613_ID:
case IT8620_ID:
@@ -285,11 +283,14 @@ static int __init it87_wdt_init(void)
case IT8625_ID:
case IT8628_ID:
case IT8655_ID:
+ case IT8659_ID:
case IT8665_ID:
case IT8686_ID:
+ case IT8716_ID:
case IT8718_ID:
case IT8720_ID:
case IT8721_ID:
+ case IT8726_ID:
case IT8728_ID:
case IT8772_ID:
case IT8783_ID:
diff --git a/drivers/watchdog/mlx_wdt.c b/drivers/watchdog/mlx_wdt.c
index 667e2c5b3..5dc69363f 100644
--- a/drivers/watchdog/mlx_wdt.c
+++ b/drivers/watchdog/mlx_wdt.c
@@ -30,17 +30,15 @@
* struct mlxreg_wdt - wd private data:
*
* @wdd: watchdog device;
- * @device: basic device;
* @pdata: data received from platform driver;
* @regmap: register map of parent device;
- * @timeout: defined timeout in sec.;
* @action_idx: index for direct access to action register;
* @timeout_idx:index for direct access to TO register;
* @tleft_idx: index for direct access to time left register;
* @ping_idx: index for direct access to ping register;
* @reset_idx: index for direct access to reset cause register;
* @regmap_val_sz: size of value in register map;
- * @wd_type: watchdog HW type;
+ * @wdt_type: watchdog HW type;
*/
struct mlxreg_wdt {
struct watchdog_device wdd;
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index b2330b16b..c35f85ce8 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -58,9 +58,13 @@
#define WDT_SWSYSRST 0x18U
#define WDT_SWSYS_RST_KEY 0x88000000
+#define WDT_SWSYSRST_EN 0xfc
+
#define DRV_NAME "mtk-wdt"
#define DRV_VERSION "1.0"
+#define MT7988_TOPRGU_SW_RST_NUM 24
+
static bool nowayout = WATCHDOG_NOWAYOUT;
static unsigned int timeout;
@@ -71,10 +75,12 @@ struct mtk_wdt_dev {
struct reset_controller_dev rcdev;
bool disable_wdt_extrst;
bool reset_by_toprgu;
+ bool has_swsysrst_en;
};
struct mtk_wdt_data {
int toprgu_sw_rst_num;
+ bool has_swsysrst_en;
};
static const struct mtk_wdt_data mt2712_data = {
@@ -89,6 +95,11 @@ static const struct mtk_wdt_data mt7986_data = {
.toprgu_sw_rst_num = MT7986_TOPRGU_SW_RST_NUM,
};
+static const struct mtk_wdt_data mt7988_data = {
+ .toprgu_sw_rst_num = MT7988_TOPRGU_SW_RST_NUM,
+ .has_swsysrst_en = true,
+};
+
static const struct mtk_wdt_data mt8183_data = {
.toprgu_sw_rst_num = MT8183_TOPRGU_SW_RST_NUM,
};
@@ -109,6 +120,28 @@ static const struct mtk_wdt_data mt8195_data = {
.toprgu_sw_rst_num = MT8195_TOPRGU_SW_RST_NUM,
};
+/**
+ * toprgu_reset_sw_en_unlocked() - enable/disable software control for reset bit
+ * @data: Pointer to instance of driver data.
+ * @id: Bit number identifying the reset to be enabled or disabled.
+ * @enable: If true, enable software control for that bit, disable otherwise.
+ *
+ * Context: The caller must hold lock of struct mtk_wdt_dev.
+ */
+static void toprgu_reset_sw_en_unlocked(struct mtk_wdt_dev *data,
+ unsigned long id, bool enable)
+{
+ u32 tmp;
+
+ tmp = readl(data->wdt_base + WDT_SWSYSRST_EN);
+ if (enable)
+ tmp |= BIT(id);
+ else
+ tmp &= ~BIT(id);
+
+ writel(tmp, data->wdt_base + WDT_SWSYSRST_EN);
+}
+
static int toprgu_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
@@ -119,6 +152,9 @@ static int toprgu_reset_update(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
+ if (assert && data->has_swsysrst_en)
+ toprgu_reset_sw_en_unlocked(data, id, true);
+
tmp = readl(data->wdt_base + WDT_SWSYSRST);
if (assert)
tmp |= BIT(id);
@@ -127,6 +163,9 @@ static int toprgu_reset_update(struct reset_controller_dev *rcdev,
tmp |= WDT_SWSYS_RST_KEY;
writel(tmp, data->wdt_base + WDT_SWSYSRST);
+ if (!assert && data->has_swsysrst_en)
+ toprgu_reset_sw_en_unlocked(data, id, false);
+
spin_unlock_irqrestore(&data->lock, flags);
return 0;
@@ -406,6 +445,8 @@ static int mtk_wdt_probe(struct platform_device *pdev)
wdt_data->toprgu_sw_rst_num);
if (err)
return err;
+
+ mtk_wdt->has_swsysrst_en = wdt_data->has_swsysrst_en;
}
mtk_wdt->disable_wdt_extrst =
@@ -444,6 +485,7 @@ static const struct of_device_id mtk_wdt_dt_ids[] = {
{ .compatible = "mediatek,mt6589-wdt" },
{ .compatible = "mediatek,mt6795-wdt", .data = &mt6795_data },
{ .compatible = "mediatek,mt7986-wdt", .data = &mt7986_data },
+ { .compatible = "mediatek,mt7988-wdt", .data = &mt7988_data },
{ .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data },
{ .compatible = "mediatek,mt8186-wdt", .data = &mt8186_data },
{ .compatible = "mediatek,mt8188-wdt", .data = &mt8188_data },
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 0b4bd883f..349d30462 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -9,6 +9,7 @@
* (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
*/
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -34,9 +35,10 @@
#define S3C2410_WTCNT_MAXCNT 0xffff
-#define S3C2410_WTCON_RSTEN (1 << 0)
-#define S3C2410_WTCON_INTEN (1 << 2)
-#define S3C2410_WTCON_ENABLE (1 << 5)
+#define S3C2410_WTCON_RSTEN BIT(0)
+#define S3C2410_WTCON_INTEN BIT(2)
+#define S3C2410_WTCON_ENABLE BIT(5)
+#define S3C2410_WTCON_DBGACK_MASK BIT(16)
#define S3C2410_WTCON_DIV16 (0 << 3)
#define S3C2410_WTCON_DIV32 (1 << 3)
@@ -67,6 +69,13 @@
#define EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT 25
#define EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT 24
+#define GS_CLUSTER0_NONCPU_OUT 0x1220
+#define GS_CLUSTER1_NONCPU_OUT 0x1420
+#define GS_CLUSTER0_NONCPU_INT_EN 0x1244
+#define GS_CLUSTER1_NONCPU_INT_EN 0x1444
+#define GS_CLUSTER2_NONCPU_INT_EN 0x1644
+#define GS_RST_STAT_REG_OFFSET 0x3B44
+
/**
* DOC: Quirk flags for different Samsung watchdog IP-cores
*
@@ -100,12 +109,17 @@
* %QUIRK_HAS_PMU_CNT_EN: PMU block has some register (e.g. CLUSTERx_NONCPU_OUT)
* with "watchdog counter enable" bit. That bit should be set to make watchdog
* counter running.
+ *
+ * %QUIRK_HAS_DBGACK_BIT: WTCON register has DBGACK_MASK bit. Setting the
+ * DBGACK_MASK bit disables the watchdog outputs when the SoC is in debug mode.
+ * Debug mode is determined by the DBGACK CPU signal.
*/
-#define QUIRK_HAS_WTCLRINT_REG (1 << 0)
-#define QUIRK_HAS_PMU_MASK_RESET (1 << 1)
-#define QUIRK_HAS_PMU_RST_STAT (1 << 2)
-#define QUIRK_HAS_PMU_AUTO_DISABLE (1 << 3)
-#define QUIRK_HAS_PMU_CNT_EN (1 << 4)
+#define QUIRK_HAS_WTCLRINT_REG BIT(0)
+#define QUIRK_HAS_PMU_MASK_RESET BIT(1)
+#define QUIRK_HAS_PMU_RST_STAT BIT(2)
+#define QUIRK_HAS_PMU_AUTO_DISABLE BIT(3)
+#define QUIRK_HAS_PMU_CNT_EN BIT(4)
+#define QUIRK_HAS_DBGACK_BIT BIT(5)
/* These quirks require that we have a PMU register map */
#define QUIRKS_HAVE_PMUREG \
@@ -263,7 +277,35 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl1 = {
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
};
+static const struct s3c2410_wdt_variant drv_data_gs101_cl0 = {
+ .mask_reset_reg = GS_CLUSTER0_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = GS_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 0,
+ .cnt_en_reg = GS_CLUSTER0_NONCPU_OUT,
+ .cnt_en_bit = 8,
+ .quirks = QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_CNT_EN | QUIRK_HAS_WTCLRINT_REG |
+ QUIRK_HAS_DBGACK_BIT,
+};
+
+static const struct s3c2410_wdt_variant drv_data_gs101_cl1 = {
+ .mask_reset_reg = GS_CLUSTER1_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = GS_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = 1,
+ .cnt_en_reg = GS_CLUSTER1_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_MASK_RESET |
+ QUIRK_HAS_PMU_CNT_EN | QUIRK_HAS_WTCLRINT_REG |
+ QUIRK_HAS_DBGACK_BIT,
+};
+
static const struct of_device_id s3c2410_wdt_match[] = {
+ { .compatible = "google,gs101-wdt",
+ .data = &drv_data_gs101_cl0 },
{ .compatible = "samsung,s3c2410-wdt",
.data = &drv_data_s3c2410 },
{ .compatible = "samsung,s3c6410-wdt",
@@ -375,6 +417,19 @@ static int s3c2410wdt_enable(struct s3c2410_wdt *wdt, bool en)
return 0;
}
+/* Disable watchdog outputs if CPU is in debug mode */
+static void s3c2410wdt_mask_dbgack(struct s3c2410_wdt *wdt)
+{
+ unsigned long wtcon;
+
+ if (!(wdt->drv_data->quirks & QUIRK_HAS_DBGACK_BIT))
+ return;
+
+ wtcon = readl(wdt->reg_base + S3C2410_WTCON);
+ wtcon |= S3C2410_WTCON_DBGACK_MASK;
+ writel(wtcon, wdt->reg_base + S3C2410_WTCON);
+}
+
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
@@ -587,7 +642,8 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt)
#ifdef CONFIG_OF
/* Choose Exynos850/ExynosAutov9 driver data w.r.t. cluster index */
if (variant == &drv_data_exynos850_cl0 ||
- variant == &drv_data_exynosautov9_cl0) {
+ variant == &drv_data_exynosautov9_cl0 ||
+ variant == &drv_data_gs101_cl0) {
u32 index;
int err;
@@ -600,9 +656,12 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt)
case 0:
break;
case 1:
- variant = (variant == &drv_data_exynos850_cl0) ?
- &drv_data_exynos850_cl1 :
- &drv_data_exynosautov9_cl1;
+ if (variant == &drv_data_exynos850_cl0)
+ variant = &drv_data_exynos850_cl1;
+ else if (variant == &drv_data_exynosautov9_cl0)
+ variant = &drv_data_exynosautov9_cl1;
+ else if (variant == &drv_data_gs101_cl0)
+ variant = &drv_data_gs101_cl1;
break;
default:
return dev_err_probe(dev, -EINVAL, "wrong cluster index: %u\n", index);
@@ -700,6 +759,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
wdt->wdt_device.parent = dev;
+ s3c2410wdt_mask_dbgack(wdt);
+
/*
* If "tmr_atboot" param is non-zero, start the watchdog right now. Also
* set WDOG_HW_RUNNING bit, so that watchdog core can kick the watchdog.
diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
index e4b344db3..df68ae4ac 100644
--- a/drivers/watchdog/starfive-wdt.c
+++ b/drivers/watchdog/starfive-wdt.c
@@ -511,7 +511,7 @@ err_exit:
return ret;
}
-static int starfive_wdt_remove(struct platform_device *pdev)
+static void starfive_wdt_remove(struct platform_device *pdev)
{
struct starfive_wdt *wdt = platform_get_drvdata(pdev);
@@ -523,8 +523,6 @@ static int starfive_wdt_remove(struct platform_device *pdev)
else
/* disable clock without PM */
starfive_wdt_disable_clock(wdt);
-
- return 0;
}
static void starfive_wdt_shutdown(struct platform_device *pdev)
@@ -594,7 +592,7 @@ MODULE_DEVICE_TABLE(of, starfive_wdt_match);
static struct platform_driver starfive_wdt_driver = {
.probe = starfive_wdt_probe,
- .remove = starfive_wdt_remove,
+ .remove_new = starfive_wdt_remove,
.shutdown = starfive_wdt_shutdown,
.driver = {
.name = "starfive-wdt",
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 89a54b664..8d5f67acb 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -98,7 +98,7 @@ static struct watchdog_device txx9wdt = {
.ops = &txx9wdt_ops,
};
-static int __init txx9wdt_probe(struct platform_device *dev)
+static int txx9wdt_probe(struct platform_device *dev)
{
int ret;
@@ -145,12 +145,11 @@ exit:
return ret;
}
-static int __exit txx9wdt_remove(struct platform_device *dev)
+static void txx9wdt_remove(struct platform_device *dev)
{
watchdog_unregister_device(&txx9wdt);
clk_disable_unprepare(txx9_imclk);
clk_put(txx9_imclk);
- return 0;
}
static void txx9wdt_shutdown(struct platform_device *dev)
@@ -159,14 +158,14 @@ static void txx9wdt_shutdown(struct platform_device *dev)
}
static struct platform_driver txx9wdt_driver = {
- .remove = __exit_p(txx9wdt_remove),
+ .probe = txx9wdt_probe,
+ .remove_new = txx9wdt_remove,
.shutdown = txx9wdt_shutdown,
.driver = {
.name = "txx9wdt",
},
};
-
-module_platform_driver_probe(txx9wdt_driver, txx9wdt_probe);
+module_platform_driver(txx9wdt_driver);
MODULE_DESCRIPTION("TXx9 Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 976c6cdf9..aaf2514fc 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -672,7 +672,6 @@ EXPORT_SYMBOL(xen_free_ballooned_pages);
static void __init balloon_add_regions(void)
{
-#if defined(CONFIG_XEN_PV)
unsigned long start_pfn, pages;
unsigned long pfn, extra_pfn_end;
unsigned int i;
@@ -696,7 +695,6 @@ static void __init balloon_add_regions(void)
balloon_stats.total_pages += extra_pfn_end - start_pfn;
}
-#endif
}
static int __init balloon_init(void)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 26ffb8755..f93f73ece 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -317,7 +317,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
rc = -EFAULT;
goto out_free;
}
- if (copy_to_user(arg->gref_ids, gref_ids,
+ if (copy_to_user(arg->gref_ids_flex, gref_ids,
sizeof(gref_ids[0]) * op.count)) {
rc = -EFAULT;
goto out_free;
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 508655273..c63f317e3 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -65,7 +65,7 @@ struct pcpu {
uint32_t flags;
};
-static struct bus_type xen_pcpu_subsys = {
+static const struct bus_type xen_pcpu_subsys = {
.name = "xen_cpu",
.dev_name = "xen_cpu",
};
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 35b6e3060..67dfa4778 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1223,18 +1223,13 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
mmap_write_unlock(mm);
- size = sizeof(*ports) * kioreq->vcpus;
- ports = kzalloc(size, GFP_KERNEL);
- if (!ports) {
- ret = -ENOMEM;
+ ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
+ kioreq->vcpus, sizeof(*ports));
+ if (IS_ERR(ports)) {
+ ret = PTR_ERR(ports);
goto error_kfree;
}
- if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) {
- ret = -EFAULT;
- goto error_kfree_ports;
- }
-
for (i = 0; i < kioreq->vcpus; i++) {
kioreq->ports[i].vcpu = i;
kioreq->ports[i].port = ports[i];
@@ -1256,7 +1251,7 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
error_unbind:
while (--i >= 0)
unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
-error_kfree_ports:
+
kfree(ports);
error_kfree:
kfree(kioreq);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 8cd583db2..b293d7652 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -237,7 +237,7 @@ static const struct attribute_group *balloon_groups[] = {
NULL
};
-static struct bus_type balloon_subsys = {
+static const struct bus_type balloon_subsys = {
.name = BALLOON_CLASS_NAME,
.dev_name = BALLOON_CLASS_NAME,
};
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index d4b251925..51b3124b0 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -116,14 +116,17 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
* @dev: xenbus device
* @path: path to watch
* @watch: watch to register
+ * @will_handle: events queuing determine callback
* @callback: callback to register
*
* Register a @watch on the given path, using the given xenbus_watch structure
- * for storage, and the given @callback function as the callback. Return 0 on
- * success, or -errno on error. On success, the given @path will be saved as
- * @watch->node, and remains the caller's to free. On error, @watch->node will
- * be NULL, the device will switch to %XenbusStateClosing, and the error will
- * be saved in the store.
+ * for storage, @will_handle function as the callback to determine if each
+ * event need to be queued, and the given @callback function as the callback.
+ * On success, the given @path will be saved as @watch->node, and remains the
+ * caller's to free. On error, @watch->node will be NULL, the device will
+ * switch to %XenbusStateClosing, and the error will be saved in the store.
+ *
+ * Returns: %0 on success or -errno on error
*/
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
@@ -156,16 +159,20 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
* xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
* @dev: xenbus device
* @watch: watch to register
+ * @will_handle: events queuing determine callback
* @callback: callback to register
* @pathfmt: format of path to watch
*
* Register a watch on the given @path, using the given xenbus_watch
- * structure for storage, and the given @callback function as the callback.
- * Return 0 on success, or -errno on error. On success, the watched path
- * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
- * kfree(). On error, watch->node will be NULL, so the caller has nothing to
+ * structure for storage, @will_handle function as the callback to determine if
+ * each event need to be queued, and the given @callback function as the
+ * callback. On success, the watched path (@path/@path2) will be saved
+ * as @watch->node, and becomes the caller's to kfree().
+ * On error, watch->node will be NULL, so the caller has nothing to
* free, the device will switch to %XenbusStateClosing, and the error will be
* saved in the store.
+ *
+ * Returns: %0 on success or -errno on error
*/
int xenbus_watch_pathfmt(struct xenbus_device *dev,
struct xenbus_watch *watch,
@@ -255,13 +262,15 @@ abort:
}
/**
- * xenbus_switch_state
+ * xenbus_switch_state - save the new state of a driver
* @dev: xenbus device
* @state: new state
*
* Advertise in the store a change of the given driver to the given new_state.
- * Return 0 on success, or -errno on error. On error, the device will switch
- * to XenbusStateClosing, and the error will be saved in the store.
+ * On error, the device will switch to XenbusStateClosing, and the error
+ * will be saved in the store.
+ *
+ * Returns: %0 on success or -errno on error
*/
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
{
@@ -305,7 +314,7 @@ static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
}
/**
- * xenbus_dev_error
+ * xenbus_dev_error - place an error message into the store
* @dev: xenbus device
* @err: error to report
* @fmt: error message format
@@ -324,7 +333,7 @@ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
EXPORT_SYMBOL_GPL(xenbus_dev_error);
/**
- * xenbus_dev_fatal
+ * xenbus_dev_fatal - put an error messages into the store and then shutdown
* @dev: xenbus device
* @err: error to report
* @fmt: error message format
@@ -346,7 +355,7 @@ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
}
EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
-/**
+/*
* Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
* avoiding recursion within xenbus_switch_state.
*/
@@ -453,7 +462,7 @@ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
}
EXPORT_SYMBOL_GPL(xenbus_teardown_ring);
-/**
+/*
* Allocate an event channel for the given xenbus_device, assigning the newly
* created local port to *port. Return 0 on success, or -errno on error. On
* error, the device will switch to XenbusStateClosing, and the error will be
@@ -479,7 +488,7 @@ int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
-/**
+/*
* Free an existing event channel. Returns 0 on success or -errno on error.
*/
int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
@@ -499,7 +508,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
/**
- * xenbus_map_ring_valloc
+ * xenbus_map_ring_valloc - allocate & map pages of VA space
* @dev: xenbus device
* @gnt_refs: grant reference array
* @nr_grefs: number of grant references
@@ -507,10 +516,11 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
*
* Map @nr_grefs pages of memory into this domain from another
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
- * pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address. Returns 0 on success, and -errno on
- * error. If an error is returned, device will switch to
+ * pages of virtual address space, maps the pages to that address, and sets
+ * *vaddr to that address. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore.
+ *
+ * Returns: %0 on success or -errno on error
*/
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr)
@@ -599,14 +609,15 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
}
/**
- * xenbus_unmap_ring
+ * xenbus_unmap_ring - unmap memory from another domain
* @dev: xenbus device
* @handles: grant handle array
* @nr_handles: number of handles in the array
* @vaddrs: addresses to unmap
*
* Unmap memory in this domain that was imported from another domain.
- * Returns 0 on success and returns GNTST_* on error
+ *
+ * Returns: %0 on success or GNTST_* on error
* (see xen/include/interface/grant_table.h).
*/
static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
@@ -712,7 +723,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
}
/**
- * xenbus_unmap_ring_vfree
+ * xenbus_unmap_ring_vfree - unmap a page of memory from another domain
* @dev: xenbus device
* @vaddr: addr to unmap
*
@@ -720,7 +731,8 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
* Unmap a page of memory in this domain that was imported from another domain.
* Use xenbus_unmap_ring_vfree if you mapped in your memory with
* xenbus_map_ring_valloc (it will free the virtual address space).
- * Returns 0 on success and returns GNTST_* on error
+ *
+ * Returns: %0 on success or GNTST_* on error
* (see xen/include/interface/grant_table.h).
*/
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
@@ -916,10 +928,10 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
}
/**
- * xenbus_read_driver_state
+ * xenbus_read_driver_state - read state from a store path
* @path: path for driver
*
- * Return the state of the driver rooted at the given store path, or
+ * Returns: the state of the driver rooted at the given store path, or
* XenbusStateUnknown if no state can be read.
*/
enum xenbus_state xenbus_read_driver_state(const char *path)